From 0a7de7458d150b5d4dffc935ba399be265ef0a1a Mon Sep 17 00:00:00 2001 From: Apple Date: Fri, 31 Jan 2020 18:14:00 +0000 Subject: [PATCH] xnu-4903.270.47.tar.gz --- EXTERNAL_HEADERS/architecture/i386/desc.h | 5 +- EXTERNAL_HEADERS/img4/api.h | 15 +- EXTERNAL_HEADERS/img4/environment.h | 313 +- EXTERNAL_HEADERS/img4/img4.h | 324 +- EXTERNAL_HEADERS/img4/nonce.h | 185 + EXTERNAL_HEADERS/img4/payload.h | 38 +- Makefile | 2 +- README.md | 18 +- SETUP/config/config.h | 90 +- SETUP/config/externs.c | 23 +- SETUP/config/main.c | 135 +- SETUP/config/mkheaders.c | 60 +- SETUP/config/mkioconf.c | 25 +- SETUP/config/mkmakefile.c | 319 +- SETUP/config/openp.c | 30 +- SETUP/config/searchp.c | 28 +- SETUP/decomment/decomment.c | 143 +- SETUP/installfile/installfile.c | 75 +- .../json_compilation_db/json_compilation_db.c | 49 +- SETUP/kextsymboltool/kextsymboltool.c | 1585 +- SETUP/replacecontents/replacecontents.c | 46 +- SETUP/setsegname/setsegname.c | 419 +- bsd/arm/_limits.h | 8 +- bsd/arm/_mcontext.h | 42 +- bsd/arm/_param.h | 4 +- bsd/arm/_types.h | 72 +- bsd/arm/disklabel.h | 10 +- bsd/arm/endian.h | 20 +- bsd/arm/exec.h | 36 +- bsd/arm/fasttrap_isa.h | 230 +- bsd/arm/limits.h | 62 +- bsd/arm/param.h | 84 +- bsd/arm/profile.h | 4 +- bsd/arm/psl.h | 10 +- bsd/arm/reboot.h | 30 +- bsd/arm/reg.h | 8 +- bsd/arm/signal.h | 9 +- bsd/arm/types.h | 78 +- bsd/arm/vmparam.h | 32 +- bsd/bsm/audit.h | 368 +- bsd/bsm/audit_domain.h | 138 +- bsd/bsm/audit_errno.h | 318 +- bsd/bsm/audit_fcntl.h | 168 +- bsd/bsm/audit_internal.h | 76 +- bsd/bsm/audit_kernel.h | 20 +- bsd/bsm/audit_kevents.h | 1474 +- bsd/bsm/audit_record.h | 366 +- bsd/bsm/audit_socket_type.h | 16 +- bsd/conf/files | 2 + bsd/conf/param.c | 48 +- bsd/crypto/rc4/rc4.c | 18 +- bsd/crypto/rc4/rc4.h | 14 +- bsd/crypto/sha1.h | 8 +- bsd/crypto/sha2.h | 8 +- bsd/dev/arm/conf.c | 182 +- bsd/dev/arm/cons.c | 30 +- bsd/dev/arm/disassembler.c | 668 +- bsd/dev/arm/dtrace_isa.c | 235 +- bsd/dev/arm/dtrace_subr_arm.c | 9 +- bsd/dev/arm/fasttrap_isa.c | 978 +- bsd/dev/arm/fbt_arm.c | 240 +- bsd/dev/arm/kern_machdep.c | 54 +- bsd/dev/arm/km.c | 99 +- bsd/dev/arm/munge.c | 290 +- bsd/dev/arm/pci_device.h | 64 +- bsd/dev/arm/pio.h | 254 +- bsd/dev/arm/sdt_arm.c | 38 +- bsd/dev/arm/stubs.c | 12 +- bsd/dev/arm/sysctl.c | 90 +- bsd/dev/arm/systemcalls.c | 8 +- bsd/dev/arm/table_inline.h | 8 +- bsd/dev/arm/unix_signal.c | 243 +- bsd/dev/arm64/conf.c | 172 +- bsd/dev/arm64/disassembler.c | 721 +- bsd/dev/arm64/dtrace_isa.c | 209 +- bsd/dev/arm64/dtrace_subr_arm.c | 9 +- bsd/dev/arm64/fasttrap_isa.c | 1784 +- bsd/dev/arm64/fbt_arm.c | 190 +- bsd/dev/arm64/sdt_arm.c | 40 +- bsd/dev/arm64/sysctl.c | 91 +- bsd/dev/busvar.h | 22 +- bsd/dev/dtrace/blist.c | 389 +- bsd/dev/dtrace/blist.h | 98 +- bsd/dev/dtrace/dtrace.c | 67 +- bsd/dev/dtrace/dtrace_glue.c | 426 +- bsd/dev/dtrace/dtrace_ptss.c | 92 +- bsd/dev/dtrace/fbt.c | 542 +- bsd/dev/dtrace/lockprof.c | 350 + bsd/dev/dtrace/lockstat.c | 190 +- bsd/dev/dtrace/profile_prvd.c | 257 +- bsd/dev/dtrace/sdt.c | 231 +- bsd/dev/dtrace/sdt_subr.c | 579 +- bsd/dev/dtrace/systrace.c | 386 +- bsd/dev/dtrace/systrace.h | 16 +- bsd/dev/i386/conf.c | 295 +- bsd/dev/i386/cons.c | 31 +- bsd/dev/i386/instr_size.c | 2 +- bsd/dev/i386/kern_machdep.c | 42 +- bsd/dev/i386/km.c | 153 +- bsd/dev/i386/sdt_x86.c | 35 +- bsd/dev/i386/stubs.c | 42 +- bsd/dev/i386/sysctl.c | 926 +- bsd/dev/i386/systemcalls.c | 236 +- bsd/dev/i386/unix_signal.c | 715 +- bsd/dev/kmreg_com.h | 88 +- bsd/dev/ldd.h | 17 +- bsd/dev/mem.c | 110 +- bsd/dev/memdev.c | 795 +- bsd/dev/memdev.h | 3 +- bsd/dev/monotonic.c | 72 +- bsd/dev/munge.c | 106 +- bsd/dev/random/randomdev.c | 114 +- bsd/dev/random/randomdev.h | 27 +- bsd/dev/unix_startup.c | 185 +- bsd/dev/vn/shadow.c | 890 +- bsd/dev/vn/shadow.h | 19 +- bsd/dev/vn/vn.c | 674 +- bsd/i386/_limits.h | 14 +- bsd/i386/_mcontext.h | 166 +- bsd/i386/_param.h | 12 +- bsd/i386/_types.h | 80 +- bsd/i386/dis_tables.h | 75 +- bsd/i386/disklabel.h | 18 +- bsd/i386/endian.h | 28 +- bsd/i386/exec.h | 44 +- bsd/i386/fasttrap_isa.h | 124 +- bsd/i386/limits.h | 62 +- bsd/i386/param.h | 92 +- bsd/i386/profile.h | 12 +- bsd/i386/psl.h | 42 +- bsd/i386/ptrace.h | 8 +- bsd/i386/reboot.h | 38 +- bsd/i386/reg.h | 16 +- bsd/i386/signal.h | 17 +- bsd/i386/types.h | 64 +- bsd/i386/vmparam.h | 40 +- bsd/kern/ast.h | 8 +- bsd/kern/bsd_init.c | 446 +- bsd/kern/bsd_stubs.c | 86 +- bsd/kern/chunklist.h | 1 - bsd/kern/decmpfs.c | 2579 ++- bsd/kern/imageboot.c | 132 +- bsd/kern/kdebug.c | 1738 +- bsd/kern/kern_acct.c | 96 +- bsd/kern/kern_aio.c | 1739 +- bsd/kern/kern_asl.c | 44 +- bsd/kern/kern_authorization.c | 421 +- bsd/kern/kern_backtrace.c | 40 +- bsd/kern/kern_clock.c | 118 +- bsd/kern/kern_control.c | 1374 +- bsd/kern/kern_core.c | 289 +- bsd/kern/kern_credential.c | 2147 +- bsd/kern/kern_cs.c | 414 +- bsd/kern/kern_csr.c | 33 +- bsd/kern/kern_descrip.c | 1497 +- bsd/kern/kern_ecc.c | 68 +- bsd/kern/kern_event.c | 1573 +- bsd/kern/kern_exec.c | 1580 +- bsd/kern/kern_exit.c | 931 +- bsd/kern/kern_fork.c | 266 +- bsd/kern/kern_guarded.c | 503 +- bsd/kern/kern_kpc.c | 222 +- bsd/kern/kern_ktrace.c | 32 +- bsd/kern/kern_lockf.c | 374 +- bsd/kern/kern_malloc.c | 862 +- bsd/kern/kern_memorystatus.c | 3003 ++- bsd/kern/kern_mib.c | 343 +- bsd/kern/kern_mman.c | 838 +- bsd/kern/kern_newsysctl.c | 738 +- bsd/kern/kern_ntptime.c | 244 +- bsd/kern/kern_overrides.c | 155 +- bsd/kern/kern_pcsamples.c | 546 +- bsd/kern/kern_persona.c | 368 +- bsd/kern/kern_physio.c | 183 +- bsd/kern/kern_priv.c | 27 +- bsd/kern/kern_proc.c | 1608 +- bsd/kern/kern_prot.c | 461 +- bsd/kern/kern_resource.c | 970 +- bsd/kern/kern_sfi.c | 317 +- bsd/kern/kern_shutdown.c | 167 +- bsd/kern/kern_sig.c | 1160 +- bsd/kern/kern_subr.c | 684 +- bsd/kern/kern_symfile.c | 1134 +- bsd/kern/kern_synch.c | 341 +- bsd/kern/kern_sysctl.c | 2351 +- bsd/kern/kern_time.c | 255 +- bsd/kern/kern_xxx.c | 36 +- bsd/kern/kpi_mbuf.c | 804 +- bsd/kern/kpi_mbuf_internal.h | 8 +- bsd/kern/kpi_socket.c | 475 +- bsd/kern/kpi_socketfilter.c | 423 +- bsd/kern/mach_fat.c | 174 +- bsd/kern/mach_fat.h | 12 +- bsd/kern/mach_loader.c | 1628 +- bsd/kern/mach_loader.h | 104 +- bsd/kern/mach_process.c | 187 +- bsd/kern/mcache.c | 337 +- bsd/kern/netboot.c | 1116 +- bsd/kern/policy_check.c | 75 +- bsd/kern/posix_sem.c | 438 +- bsd/kern/posix_shm.c | 1330 +- bsd/kern/proc_info.c | 2366 +- bsd/kern/proc_uuid_policy.c | 104 +- bsd/kern/process_policy.c | 694 +- bsd/kern/socket_info.c | 26 +- bsd/kern/stackshot.c | 115 +- bsd/kern/subr_eventhandler.c | 85 +- bsd/kern/subr_log.c | 476 +- bsd/kern/subr_prf.c | 102 +- bsd/kern/subr_prof.c | 398 +- bsd/kern/subr_sbuf.c | 220 +- bsd/kern/subr_xxx.c | 35 +- bsd/kern/sys_coalition.c | 94 +- bsd/kern/sys_domain.c | 16 +- bsd/kern/sys_generic.c | 1824 +- bsd/kern/sys_persona.c | 125 +- bsd/kern/sys_pipe.c | 669 +- bsd/kern/sys_reason.c | 26 +- bsd/kern/sys_socket.c | 141 +- bsd/kern/sys_ulock.c | 69 +- bsd/kern/sys_work_interval.c | 235 +- bsd/kern/syscalls.master | 2 +- bsd/kern/sysv_ipc.c | 57 +- bsd/kern/sysv_msg.c | 422 +- bsd/kern/sysv_sem.c | 576 +- bsd/kern/sysv_shm.c | 407 +- bsd/kern/trace_codes | 3 + bsd/kern/tty.c | 994 +- bsd/kern/tty_compat.c | 387 +- bsd/kern/tty_conf.c | 61 +- bsd/kern/tty_dev.c | 365 +- bsd/kern/tty_dev.h | 44 +- bsd/kern/tty_ptmx.c | 175 +- bsd/kern/tty_pty.c | 32 +- bsd/kern/tty_subr.c | 180 +- bsd/kern/tty_tty.c | 85 +- bsd/kern/ubc_subr.c | 1668 +- bsd/kern/uipc_domain.c | 337 +- bsd/kern/uipc_mbuf.c | 2187 +- bsd/kern/uipc_mbuf2.c | 322 +- bsd/kern/uipc_proto.c | 79 +- bsd/kern/uipc_socket.c | 2116 +- bsd/kern/uipc_socket2.c | 820 +- bsd/kern/uipc_syscalls.c | 953 +- bsd/kern/uipc_usrreq.c | 832 +- bsd/libkern/bcd.c | 10 +- bsd/libkern/copyio.h | 6 +- bsd/libkern/crc16.c | 5 +- bsd/libkern/crc32.c | 17 +- bsd/libkern/libkern.h | 101 +- bsd/libkern/memchr.c | 15 +- bsd/libkern/random.c | 11 +- bsd/libkern/scanc.c | 14 +- bsd/libkern/skpc.c | 14 +- bsd/libkern/strsep.c | 12 +- bsd/libkern/url_encode.c | 2 +- bsd/machine/_limits.h | 8 +- bsd/machine/_mcontext.h | 8 +- bsd/machine/_param.h | 8 +- bsd/machine/_types.h | 8 +- bsd/machine/byte_order.h | 14 +- bsd/machine/cons.h | 47 +- bsd/machine/dis_tables.h | 8 +- bsd/machine/disklabel.h | 8 +- bsd/machine/endian.h | 8 +- bsd/machine/exec.h | 18 +- bsd/machine/fasttrap_isa.h | 8 +- bsd/machine/limits.h | 6 +- bsd/machine/param.h | 8 +- bsd/machine/profile.h | 8 +- bsd/machine/psl.h | 8 +- bsd/machine/ptrace.h | 8 +- bsd/machine/reboot.h | 8 +- bsd/machine/reg.h | 8 +- bsd/machine/signal.h | 8 +- bsd/machine/smp.h | 8 +- bsd/machine/types.h | 8 +- bsd/machine/vmparam.h | 8 +- bsd/man/man2/chflags.2 | 5 - bsd/man/man2/fcntl.2 | 5 +- bsd/miscfs/deadfs/dead_vnops.c | 180 +- bsd/miscfs/devfs/devfs.h | 62 +- bsd/miscfs/devfs/devfs_fdesc_support.c | 287 +- bsd/miscfs/devfs/devfs_proto.h | 38 +- bsd/miscfs/devfs/devfs_tree.c | 1005 +- bsd/miscfs/devfs/devfs_vfsops.c | 285 +- bsd/miscfs/devfs/devfs_vnops.c | 995 +- bsd/miscfs/devfs/devfsdefs.h | 245 +- bsd/miscfs/devfs/fdesc.h | 42 +- bsd/miscfs/fifofs/fifo.h | 52 +- bsd/miscfs/fifofs/fifo_vnops.c | 280 +- bsd/miscfs/mockfs/mockfs.h | 9 +- bsd/miscfs/mockfs/mockfs_fsnode.c | 122 +- bsd/miscfs/mockfs/mockfs_fsnode.h | 9 +- bsd/miscfs/mockfs/mockfs_vfsops.c | 52 +- bsd/miscfs/mockfs/mockfs_vnops.c | 72 +- bsd/miscfs/mockfs/mockfs_vnops.h | 11 +- bsd/miscfs/nullfs/null_subr.c | 33 +- bsd/miscfs/nullfs/null_vfsops.c | 95 +- bsd/miscfs/nullfs/null_vnops.c | 103 +- bsd/miscfs/nullfs/nullfs.h | 14 +- bsd/miscfs/routefs/routefs.h | 23 +- bsd/miscfs/routefs/routefs_ops.c | 536 +- bsd/miscfs/specfs/spec_vnops.c | 1227 +- bsd/miscfs/specfs/specdev.h | 88 +- bsd/miscfs/union/union.h | 8 +- bsd/net/altq/altq.h | 16 +- bsd/net/altq/altq_cbq.h | 2 +- bsd/net/altq/altq_fairq.h | 12 +- bsd/net/altq/altq_hfsc.h | 2 +- bsd/net/altq/altq_priq.h | 2 +- bsd/net/altq/altq_qfq.h | 2 +- bsd/net/bpf.c | 1273 +- bsd/net/bpf.h | 744 +- bsd/net/bpf_compat.h | 8 +- bsd/net/bpf_filter.c | 412 +- bsd/net/bpfdesc.h | 142 +- bsd/net/bridgestp.c | 1339 +- bsd/net/bridgestp.h | 477 +- bsd/net/classq/classq.c | 104 +- bsd/net/classq/classq.h | 84 +- bsd/net/classq/classq_blue.h | 12 +- bsd/net/classq/classq_fq_codel.c | 70 +- bsd/net/classq/classq_fq_codel.h | 52 +- bsd/net/classq/classq_red.h | 18 +- bsd/net/classq/classq_rio.h | 4 +- bsd/net/classq/classq_sfb.c | 464 +- bsd/net/classq/classq_sfb.h | 140 +- bsd/net/classq/classq_subr.c | 212 +- bsd/net/classq/classq_util.c | 164 +- bsd/net/classq/if_classq.h | 264 +- bsd/net/content_filter.c | 3050 +-- bsd/net/content_filter.h | 331 +- bsd/net/devtimer.c | 281 +- bsd/net/devtimer.h | 30 +- bsd/net/dlil.c | 2294 +- bsd/net/dlil.h | 206 +- bsd/net/ether_if_module.c | 207 +- bsd/net/ether_if_module.h | 8 +- bsd/net/ether_inet6_pr_module.c | 52 +- bsd/net/ether_inet_pr_module.c | 163 +- bsd/net/etherdefs.h | 26 +- bsd/net/ethernet.h | 88 +- bsd/net/firewire.h | 46 +- bsd/net/flowadv.c | 36 +- bsd/net/flowadv.h | 14 +- bsd/net/flowhash.c | 412 +- bsd/net/flowhash.h | 4 +- bsd/net/ieee8023ad.h | 22 +- bsd/net/if.c | 1811 +- bsd/net/if.h | 918 +- bsd/net/if_arp.h | 100 +- bsd/net/if_bond.c | 6793 +++--- bsd/net/if_bond_internal.h | 9 +- bsd/net/if_bond_var.h | 94 +- bsd/net/if_bridge.c | 3021 +-- bsd/net/if_bridgevar.h | 452 +- bsd/net/if_dl.h | 36 +- bsd/net/if_ether.h | 18 +- bsd/net/if_fake.c | 389 +- bsd/net/if_fake_var.h | 50 +- bsd/net/if_gif.c | 316 +- bsd/net/if_gif.h | 36 +- bsd/net/if_ipsec.c | 2237 +- bsd/net/if_ipsec.h | 42 +- bsd/net/if_llatbl.c | 131 +- bsd/net/if_llatbl.h | 214 +- bsd/net/if_llc.h | 162 +- bsd/net/if_llreach.c | 153 +- bsd/net/if_llreach.h | 88 +- bsd/net/if_loop.c | 253 +- bsd/net/if_low_power_mode.c | 15 +- bsd/net/if_media.h | 437 +- bsd/net/if_mib.c | 64 +- bsd/net/if_mib.h | 104 +- bsd/net/if_pflog.c | 75 +- bsd/net/if_pflog.h | 54 +- bsd/net/if_ports_used.c | 89 +- bsd/net/if_ports_used.h | 56 +- bsd/net/if_ppp.h | 132 +- bsd/net/if_stf.c | 268 +- bsd/net/if_types.h | 146 +- bsd/net/if_utun.c | 1492 +- bsd/net/if_utun.h | 58 +- bsd/net/if_var.h | 1268 +- bsd/net/if_vlan.c | 2808 ++- bsd/net/if_vlan_var.h | 36 +- bsd/net/init.c | 44 +- bsd/net/init.h | 50 +- bsd/net/iptap.c | 308 +- bsd/net/iptap.h | 2 +- bsd/net/kext_net.h | 58 +- bsd/net/kpi_interface.c | 1423 +- bsd/net/kpi_interface.h | 4844 ++-- bsd/net/kpi_interfacefilter.c | 22 +- bsd/net/kpi_interfacefilter.h | 306 +- bsd/net/kpi_protocol.c | 147 +- bsd/net/kpi_protocol.h | 216 +- bsd/net/lacp.h | 288 +- bsd/net/multicast_list.c | 144 +- bsd/net/multicast_list.h | 16 +- bsd/net/nat464_utils.c | 562 +- bsd/net/nat464_utils.h | 68 +- bsd/net/ndrv.c | 4 +- bsd/net/ndrv.h | 92 +- bsd/net/ndrv_var.h | 40 +- bsd/net/necp.c | 3552 ++- bsd/net/necp.h | 1047 +- bsd/net/necp_client.c | 1609 +- bsd/net/net_api_stats.h | 72 +- bsd/net/net_kev.h | 160 +- bsd/net/net_osdep.h | 18 +- bsd/net/net_perf.c | 19 +- bsd/net/net_perf.h | 23 +- bsd/net/net_str_id.c | 100 +- bsd/net/net_str_id.h | 26 +- bsd/net/net_stubs.c | 15 +- bsd/net/netsrc.c | 91 +- bsd/net/netsrc.h | 48 +- bsd/net/network_agent.c | 656 +- bsd/net/network_agent.h | 274 +- bsd/net/ntstat.c | 3480 ++- bsd/net/ntstat.h | 1515 +- bsd/net/nwk_wq.c | 6 +- bsd/net/nwk_wq.h | 3 +- bsd/net/packet_mangler.c | 890 +- bsd/net/packet_mangler.h | 56 +- bsd/net/pf.c | 5043 ++-- bsd/net/pf_if.c | 354 +- bsd/net/pf_ioctl.c | 1726 +- bsd/net/pf_norm.c | 1320 +- bsd/net/pf_osfp.c | 249 +- bsd/net/pf_pbuf.c | 131 +- bsd/net/pf_pbuf.h | 77 +- bsd/net/pf_ruleset.c | 257 +- bsd/net/pf_table.c | 1589 +- bsd/net/pfkeyv2.h | 430 +- bsd/net/pfvar.h | 2312 +- bsd/net/pktap.c | 599 +- bsd/net/pktap.h | 160 +- bsd/net/pktsched/pktsched.c | 63 +- bsd/net/pktsched/pktsched.h | 86 +- bsd/net/pktsched/pktsched_cbq.h | 90 +- bsd/net/pktsched/pktsched_fairq.h | 72 +- bsd/net/pktsched/pktsched_fq_codel.c | 234 +- bsd/net/pktsched/pktsched_fq_codel.h | 158 +- bsd/net/pktsched/pktsched_hfsc.h | 136 +- bsd/net/pktsched/pktsched_priq.h | 54 +- bsd/net/pktsched/pktsched_qfq.c | 436 +- bsd/net/pktsched/pktsched_qfq.h | 156 +- bsd/net/pktsched/pktsched_rmclass.h | 24 +- bsd/net/pktsched/pktsched_tcq.c | 291 +- bsd/net/pktsched/pktsched_tcq.h | 92 +- bsd/net/ppp_comp.h | 132 +- bsd/net/ppp_defs.h | 138 +- bsd/net/radix.c | 604 +- bsd/net/radix.h | 178 +- bsd/net/raw_cb.c | 47 +- bsd/net/raw_cb.h | 26 +- bsd/net/raw_usrreq.c | 128 +- bsd/net/route.c | 5 +- bsd/net/route.h | 514 +- bsd/net/rtsock.c | 566 +- bsd/net/skywalk_stubs.c | 143 + bsd/net/zlib.h | 8 +- bsd/netinet/bootp.h | 109 +- bsd/netinet/cbrtf.c | 754 +- bsd/netinet/cpu_in_cksum_gen.c | 81 +- bsd/netinet/dhcp.h | 103 +- bsd/netinet/dhcp_options.c | 499 +- bsd/netinet/dhcp_options.h | 255 +- bsd/netinet/flow_divert.c | 912 +- bsd/netinet/flow_divert.h | 87 +- bsd/netinet/flow_divert_proto.h | 96 +- bsd/netinet/icmp6.h | 868 +- bsd/netinet/icmp_var.h | 46 +- bsd/netinet/if_ether.h | 82 +- bsd/netinet/if_tun.h | 44 +- bsd/netinet/igmp.c | 1033 +- bsd/netinet/igmp.h | 118 +- bsd/netinet/igmp_var.h | 256 +- bsd/netinet/in.c | 885 +- bsd/netinet/in.h | 772 +- bsd/netinet/in_arp.c | 448 +- bsd/netinet/in_arp.h | 6 +- bsd/netinet/in_cksum.c | 134 +- bsd/netinet/in_gif.c | 157 +- bsd/netinet/in_gif.h | 4 +- bsd/netinet/in_mcast.c | 1239 +- bsd/netinet/in_pcb.c | 934 +- bsd/netinet/in_pcb.h | 582 +- bsd/netinet/in_pcblist.c | 201 +- bsd/netinet/in_proto.c | 347 +- bsd/netinet/in_rmx.c | 143 +- bsd/netinet/in_stat.c | 22 +- bsd/netinet/in_stat.h | 4 +- bsd/netinet/in_systm.h | 14 +- bsd/netinet/in_tclass.c | 968 +- bsd/netinet/in_tclass.h | 50 +- bsd/netinet/in_var.h | 324 +- bsd/netinet/ip.h | 202 +- bsd/netinet/ip6.h | 247 +- bsd/netinet/ip_compat.h | 772 +- bsd/netinet/ip_divert.c | 225 +- bsd/netinet/ip_divert.h | 36 +- bsd/netinet/ip_dummynet.h | 863 +- bsd/netinet/ip_ecn.c | 44 +- bsd/netinet/ip_ecn.h | 14 +- bsd/netinet/ip_encap.c | 190 +- bsd/netinet/ip_encap.h | 38 +- bsd/netinet/ip_flowid.h | 22 +- bsd/netinet/ip_fw.h | 296 +- bsd/netinet/ip_fw2.c | 1969 +- bsd/netinet/ip_fw2.h | 584 +- bsd/netinet/ip_fw2_compat.c | 4027 ++-- bsd/netinet/ip_fw2_compat.h | 574 +- bsd/netinet/ip_icmp.c | 609 +- bsd/netinet/ip_icmp.h | 192 +- bsd/netinet/ip_id.c | 34 +- bsd/netinet/ip_input.c | 1119 +- bsd/netinet/ip_output.c | 898 +- bsd/netinet/ip_var.h | 252 +- bsd/netinet/isakmp.h | 40 +- bsd/netinet/kpi_ipfilter.c | 226 +- bsd/netinet/kpi_ipfilter.h | 274 +- bsd/netinet/kpi_ipfilter_var.h | 20 +- bsd/netinet/lro_ext.h | 24 +- bsd/netinet/mp_pcb.c | 79 +- bsd/netinet/mp_pcb.h | 62 +- bsd/netinet/mp_proto.c | 30 +- bsd/netinet/mptcp.c | 382 +- bsd/netinet/mptcp.h | 289 +- bsd/netinet/mptcp_opt.c | 929 +- bsd/netinet/mptcp_opt.h | 8 +- bsd/netinet/mptcp_seq.h | 12 +- bsd/netinet/mptcp_subr.c | 1763 +- bsd/netinet/mptcp_timer.c | 29 +- bsd/netinet/mptcp_timer.h | 6 +- bsd/netinet/mptcp_usrreq.c | 653 +- bsd/netinet/mptcp_var.h | 548 +- bsd/netinet/raw_ip.c | 562 +- bsd/netinet/tcp.h | 600 +- bsd/netinet/tcp_cache.c | 495 +- bsd/netinet/tcp_cache.h | 17 +- bsd/netinet/tcp_cc.c | 147 +- bsd/netinet/tcp_cc.h | 31 +- bsd/netinet/tcp_cubic.c | 135 +- bsd/netinet/tcp_debug.c | 109 +- bsd/netinet/tcp_debug.h | 50 +- bsd/netinet/tcp_fsm.h | 108 +- bsd/netinet/tcp_input.c | 1715 +- bsd/netinet/tcp_ledbat.c | 177 +- bsd/netinet/tcp_lro.c | 394 +- bsd/netinet/tcp_lro.h | 42 +- bsd/netinet/tcp_newreno.c | 89 +- bsd/netinet/tcp_sack.c | 188 +- bsd/netinet/tcp_seq.h | 44 +- bsd/netinet/tcp_subr.c | 1077 +- bsd/netinet/tcp_timer.c | 635 +- bsd/netinet/tcp_timer.h | 204 +- bsd/netinet/tcp_usrreq.c | 988 +- bsd/netinet/tcp_var.h | 1919 +- bsd/netinet/tcpip.h | 48 +- bsd/netinet/udp.h | 36 +- bsd/netinet/udp_usrreq.c | 814 +- bsd/netinet/udp_var.h | 120 +- bsd/netinet6/ah.h | 28 +- bsd/netinet6/ah6.h | 4 +- bsd/netinet6/ah_core.c | 406 +- bsd/netinet6/ah_input.c | 334 +- bsd/netinet6/ah_output.c | 112 +- bsd/netinet6/dest6.c | 13 +- bsd/netinet6/esp.h | 36 +- bsd/netinet6/esp6.h | 10 +- bsd/netinet6/esp_chachapoly.c | 158 +- bsd/netinet6/esp_chachapoly.h | 14 +- bsd/netinet6/esp_core.c | 269 +- bsd/netinet6/esp_input.c | 425 +- bsd/netinet6/esp_output.c | 741 +- bsd/netinet6/esp_rijndael.c | 275 +- bsd/netinet6/esp_rijndael.h | 16 +- bsd/netinet6/frag6.c | 224 +- bsd/netinet6/icmp6.c | 1145 +- bsd/netinet6/in6.c | 1390 +- bsd/netinet6/in6.h | 552 +- bsd/netinet6/in6_cga.c | 116 +- bsd/netinet6/in6_cksum.c | 35 +- bsd/netinet6/in6_gif.c | 150 +- bsd/netinet6/in6_gif.h | 4 +- bsd/netinet6/in6_ifattach.c | 322 +- bsd/netinet6/in6_ifattach.h | 2 +- bsd/netinet6/in6_mcast.c | 1217 +- bsd/netinet6/in6_pcb.c | 393 +- bsd/netinet6/in6_pcb.h | 2 +- bsd/netinet6/in6_proto.c | 570 +- bsd/netinet6/in6_rmx.c | 138 +- bsd/netinet6/in6_src.c | 547 +- bsd/netinet6/in6_var.h | 696 +- bsd/netinet6/ip6_ecn.h | 8 +- bsd/netinet6/ip6_forward.c | 200 +- bsd/netinet6/ip6_fw.c | 707 +- bsd/netinet6/ip6_fw.h | 334 +- bsd/netinet6/ip6_id.c | 81 +- bsd/netinet6/ip6_input.c | 605 +- bsd/netinet6/ip6_output.c | 1246 +- bsd/netinet6/ip6_var.h | 336 +- bsd/netinet6/ip6protosw.h | 70 +- bsd/netinet6/ipcomp.h | 19 +- bsd/netinet6/ipcomp6.h | 2 +- bsd/netinet6/ipcomp_core.c | 172 +- bsd/netinet6/ipcomp_input.c | 91 +- bsd/netinet6/ipcomp_output.c | 154 +- bsd/netinet6/ipsec.c | 2529 +- bsd/netinet6/ipsec.h | 226 +- bsd/netinet6/ipsec6.h | 12 +- bsd/netinet6/mld6.c | 905 +- bsd/netinet6/mld6.h | 74 +- bsd/netinet6/mld6_var.h | 170 +- bsd/netinet6/nd6.c | 933 +- bsd/netinet6/nd6.h | 622 +- bsd/netinet6/nd6_nbr.c | 497 +- bsd/netinet6/nd6_prproxy.c | 229 +- bsd/netinet6/nd6_rtr.c | 734 +- bsd/netinet6/nd6_send.c | 75 +- bsd/netinet6/nd6_var.h | 2 +- bsd/netinet6/raw_ip6.c | 376 +- bsd/netinet6/raw_ip6.h | 22 +- bsd/netinet6/route6.c | 6 +- bsd/netinet6/scope6.c | 95 +- bsd/netinet6/scope6_var.h | 10 +- bsd/netinet6/tcp6_var.h | 22 +- bsd/netinet6/udp6_output.c | 162 +- bsd/netinet6/udp6_usrreq.c | 290 +- bsd/netkey/key.c | 7783 ++++--- bsd/netkey/key.h | 58 +- bsd/netkey/key_debug.h | 35 +- bsd/netkey/key_var.h | 31 +- bsd/netkey/keydb.c | 45 +- bsd/netkey/keydb.h | 106 +- bsd/netkey/keysock.c | 121 +- bsd/netkey/keysock.h | 52 +- bsd/nfs/gss/ccrypto.c | 57 +- bsd/nfs/gss/gss_krb5_mech.c | 1138 +- bsd/nfs/gss/gss_krb5_mech.h | 124 +- bsd/nfs/krpc.h | 74 +- bsd/nfs/krpc_subr.c | 218 +- bsd/nfs/nfs.h | 1770 +- bsd/nfs/nfs4_subs.c | 1167 +- bsd/nfs/nfs4_vnops.c | 3153 ++- bsd/nfs/nfs_bio.c | 1133 +- bsd/nfs/nfs_boot.c | 279 +- bsd/nfs/nfs_gss.c | 1084 +- bsd/nfs/nfs_gss.h | 218 +- bsd/nfs/nfs_ioctl.h | 46 +- bsd/nfs/nfs_lock.c | 222 +- bsd/nfs/nfs_lock.h | 102 +- bsd/nfs/nfs_node.c | 419 +- bsd/nfs/nfs_serv.c | 1626 +- bsd/nfs/nfs_socket.c | 2430 +- bsd/nfs/nfs_srvcache.c | 99 +- bsd/nfs/nfs_subs.c | 1409 +- bsd/nfs/nfs_syscalls.c | 751 +- bsd/nfs/nfs_upcall.c | 46 +- bsd/nfs/nfs_vfsops.c | 2564 ++- bsd/nfs/nfs_vnops.c | 3856 ++-- bsd/nfs/nfsdiskless.h | 28 +- bsd/nfs/nfsm_subs.h | 744 +- bsd/nfs/nfsmount.h | 534 +- bsd/nfs/nfsnode.h | 843 +- bsd/nfs/nfsproto.h | 780 +- bsd/nfs/nfsrvcache.h | 66 +- bsd/nfs/rpcv2.h | 132 +- bsd/nfs/xdr_subs.h | 194 +- bsd/pgo/profile_runtime.c | 465 +- bsd/pthread/bsdthread_private.h | 20 +- bsd/pthread/priority_private.h | 66 +- bsd/pthread/pthread_priority.c | 10 +- bsd/pthread/pthread_shims.c | 78 +- bsd/pthread/pthread_workqueue.c | 444 +- bsd/pthread/workqueue_internal.h | 74 +- bsd/pthread/workqueue_syscalls.h | 50 +- bsd/pthread/workqueue_trace.h | 58 +- bsd/security/audit/audit.c | 238 +- bsd/security/audit/audit.h | 450 +- bsd/security/audit/audit_arg.c | 163 +- bsd/security/audit/audit_bsd.c | 217 +- bsd/security/audit/audit_bsd.h | 306 +- bsd/security/audit/audit_bsm.c | 461 +- bsd/security/audit/audit_bsm_domain.c | 260 +- bsd/security/audit/audit_bsm_errno.c | 436 +- bsd/security/audit/audit_bsm_fcntl.c | 316 +- bsd/security/audit/audit_bsm_klib.c | 195 +- bsd/security/audit/audit_bsm_socket_type.c | 36 +- bsd/security/audit/audit_bsm_token.c | 279 +- bsd/security/audit/audit_ioctl.h | 86 +- bsd/security/audit/audit_mac.c | 79 +- bsd/security/audit/audit_pipe.c | 301 +- bsd/security/audit/audit_private.h | 484 +- bsd/security/audit/audit_session.c | 766 +- bsd/security/audit/audit_syscalls.c | 467 +- bsd/security/audit/audit_worker.c | 106 +- bsd/sys/_endian.h | 50 +- bsd/sys/_select.h | 22 +- bsd/sys/_structs.h | 8 +- bsd/sys/_types.h | 48 +- bsd/sys/_types/_blkcnt_t.h | 14 +- bsd/sys/_types/_blksize_t.h | 14 +- bsd/sys/_types/_caddr_t.h | 10 +- bsd/sys/_types/_clock_t.h | 8 +- bsd/sys/_types/_ct_rune_t.h | 8 +- bsd/sys/_types/_dev_t.h | 14 +- bsd/sys/_types/_errno_t.h | 8 +- bsd/sys/_types/_fd_clr.h | 10 +- bsd/sys/_types/_fd_copy.h | 10 +- bsd/sys/_types/_fd_def.h | 36 +- bsd/sys/_types/_fd_isset.h | 10 +- bsd/sys/_types/_fd_set.h | 10 +- bsd/sys/_types/_fd_setsize.h | 12 +- bsd/sys/_types/_fd_zero.h | 10 +- bsd/sys/_types/_filesec_t.h | 10 +- bsd/sys/_types/_fsblkcnt_t.h | 10 +- bsd/sys/_types/_fsfilcnt_t.h | 10 +- bsd/sys/_types/_fsid_t.h | 2 +- bsd/sys/_types/_gid_t.h | 16 +- bsd/sys/_types/_guid_t.h | 10 +- bsd/sys/_types/_id_t.h | 14 +- bsd/sys/_types/_in_addr_t.h | 10 +- bsd/sys/_types/_in_port_t.h | 10 +- bsd/sys/_types/_ino64_t.h | 14 +- bsd/sys/_types/_ino_t.h | 14 +- bsd/sys/_types/_int16_t.h | 10 +- bsd/sys/_types/_int32_t.h | 10 +- bsd/sys/_types/_int64_t.h | 10 +- bsd/sys/_types/_int8_t.h | 10 +- bsd/sys/_types/_intptr_t.h | 10 +- bsd/sys/_types/_iovec_t.h | 14 +- bsd/sys/_types/_key_t.h | 14 +- bsd/sys/_types/_mach_port_t.h | 14 +- bsd/sys/_types/_mbstate_t.h | 8 +- bsd/sys/_types/_mode_t.h | 14 +- bsd/sys/_types/_nlink_t.h | 14 +- bsd/sys/_types/_null.h | 8 +- bsd/sys/_types/_o_dsync.h | 12 +- bsd/sys/_types/_o_sync.h | 12 +- bsd/sys/_types/_off_t.h | 14 +- bsd/sys/_types/_offsetof.h | 8 +- bsd/sys/_types/_os_inline.h | 8 +- bsd/sys/_types/_pid_t.h | 14 +- bsd/sys/_types/_posix_vdisable.h | 10 +- bsd/sys/_types/_ptrdiff_t.h | 8 +- bsd/sys/_types/_rsize_t.h | 8 +- bsd/sys/_types/_rune_t.h | 14 +- bsd/sys/_types/_s_ifmt.h | 66 +- bsd/sys/_types/_sa_family_t.h | 14 +- bsd/sys/_types/_seek_set.h | 20 +- bsd/sys/_types/_sigaltstack.h | 16 +- bsd/sys/_types/_sigset_t.h | 14 +- bsd/sys/_types/_size_t.h | 14 +- bsd/sys/_types/_socklen_t.h | 11 +- bsd/sys/_types/_ssize_t.h | 14 +- bsd/sys/_types/_suseconds_t.h | 14 +- bsd/sys/_types/_time_t.h | 14 +- bsd/sys/_types/_timespec.h | 12 +- bsd/sys/_types/_timeval.h | 12 +- bsd/sys/_types/_timeval32.h | 14 +- bsd/sys/_types/_timeval64.h | 7 +- bsd/sys/_types/_u_char.h | 10 +- bsd/sys/_types/_u_int.h | 10 +- bsd/sys/_types/_u_int16_t.h | 10 +- bsd/sys/_types/_u_int32_t.h | 10 +- bsd/sys/_types/_u_int64_t.h | 10 +- bsd/sys/_types/_u_int8_t.h | 10 +- bsd/sys/_types/_u_short.h | 10 +- bsd/sys/_types/_ucontext.h | 16 +- bsd/sys/_types/_ucontext64.h | 18 +- bsd/sys/_types/_uid_t.h | 14 +- bsd/sys/_types/_uintptr_t.h | 10 +- bsd/sys/_types/_useconds_t.h | 10 +- bsd/sys/_types/_user32_itimerval.h | 10 +- bsd/sys/_types/_user32_ntptimeval.h | 3 +- bsd/sys/_types/_user32_timespec.h | 12 +- bsd/sys/_types/_user32_timeval.h | 12 +- bsd/sys/_types/_user32_timex.h | 35 +- bsd/sys/_types/_user64_itimerval.h | 10 +- bsd/sys/_types/_user64_ntptimeval.h | 3 +- bsd/sys/_types/_user64_timespec.h | 12 +- bsd/sys/_types/_user64_timeval.h | 10 +- bsd/sys/_types/_user64_timex.h | 35 +- bsd/sys/_types/_user_timespec.h | 14 +- bsd/sys/_types/_user_timeval.h | 10 +- bsd/sys/_types/_uuid_t.h | 10 +- bsd/sys/_types/_va_list.h | 8 +- bsd/sys/_types/_wchar_t.h | 8 +- bsd/sys/_types/_wint_t.h | 8 +- bsd/sys/acct.h | 46 +- bsd/sys/aio.h | 240 +- bsd/sys/aio_kern.h | 85 +- bsd/sys/appleapiopts.h | 9 +- bsd/sys/attr.h | 430 +- bsd/sys/bitstring.h | 168 +- bsd/sys/bsdtask_info.h | 114 +- bsd/sys/buf.h | 1626 +- bsd/sys/buf_internal.h | 286 +- bsd/sys/callout.h | 22 +- bsd/sys/cdefs.h | 340 +- bsd/sys/clist.h | 23 +- bsd/sys/coalition.h | 116 +- bsd/sys/codedir_internal.h | 14 +- bsd/sys/codesign.h | 100 +- bsd/sys/commpage.h | 14 +- bsd/sys/conf.h | 194 +- bsd/sys/content_protection.h | 14 +- bsd/sys/cprotect.h | 40 +- bsd/sys/csr.h | 50 +- bsd/sys/decmpfs.h | 102 +- bsd/sys/dir.h | 10 +- bsd/sys/dirent.h | 52 +- bsd/sys/dis_tables.h | 14 +- bsd/sys/disk.h | 145 +- bsd/sys/disklabel.h | 263 +- bsd/sys/disktab.h | 102 +- bsd/sys/dkstat.h | 14 +- bsd/sys/dmap.h | 24 +- bsd/sys/doc_tombstone.h | 16 +- bsd/sys/domain.h | 82 +- bsd/sys/dtrace_glue.h | 210 +- bsd/sys/dtrace_ptss.h | 45 +- bsd/sys/errno.h | 248 +- bsd/sys/ev.h | 70 +- bsd/sys/event.h | 524 +- bsd/sys/eventhandler.h | 162 +- bsd/sys/eventvar.h | 30 +- bsd/sys/exec.h | 13 +- bsd/sys/fcntl.h | 500 +- bsd/sys/file.h | 18 +- bsd/sys/file_internal.h | 148 +- bsd/sys/filedesc.h | 80 +- bsd/sys/fileport.h | 16 +- bsd/sys/filio.h | 32 +- bsd/sys/fsctl.h | 151 +- bsd/sys/fsevents.h | 46 +- bsd/sys/fsgetpath.h | 20 +- bsd/sys/fslog.h | 10 +- bsd/sys/gmon.h | 183 +- bsd/sys/guarded.h | 92 +- bsd/sys/imageboot.h | 22 +- bsd/sys/imgact.h | 116 +- bsd/sys/imgsrc.h | 45 +- bsd/sys/ioccom.h | 52 +- bsd/sys/ioctl.h | 26 +- bsd/sys/ioctl_compat.h | 216 +- bsd/sys/ipc.h | 83 +- bsd/sys/ipcs.h | 60 +- bsd/sys/kas_info.h | 18 +- bsd/sys/kasl.h | 6 +- bsd/sys/kauth.h | 677 +- bsd/sys/kdebug.h | 720 +- bsd/sys/kdebug_signpost.h | 8 +- bsd/sys/kern_control.h | 932 +- bsd/sys/kern_event.h | 357 +- bsd/sys/kern_memorystatus.h | 186 +- bsd/sys/kern_overrides.h | 42 +- bsd/sys/kernel.h | 26 +- bsd/sys/kernel_types.h | 58 +- bsd/sys/kpi_mbuf.h | 2644 +-- bsd/sys/kpi_private.h | 16 +- bsd/sys/kpi_socket.h | 726 +- bsd/sys/kpi_socketfilter.h | 1114 +- bsd/sys/ktrace.h | 8 +- bsd/sys/lctx.h | 2 +- bsd/sys/linker_set.h | 60 +- bsd/sys/loadable_fs.h | 86 +- bsd/sys/lock.h | 16 +- bsd/sys/lockf.h | 42 +- bsd/sys/lockstat.h | 235 +- bsd/sys/mach_swapon.h | 18 +- bsd/sys/malloc.h | 421 +- bsd/sys/mbuf.h | 1224 +- bsd/sys/mcache.h | 272 +- bsd/sys/md5.h | 10 +- bsd/sys/memory_maintenance.h | 25 +- bsd/sys/mman.h | 150 +- bsd/sys/monotonic.h | 38 +- bsd/sys/mount.h | 1684 +- bsd/sys/mount_internal.h | 382 +- bsd/sys/msg.h | 275 +- bsd/sys/msgbuf.h | 32 +- bsd/sys/munge.h | 8 +- bsd/sys/namei.h | 192 +- bsd/sys/netboot.h | 24 +- bsd/sys/netport.h | 29 +- bsd/sys/param.h | 154 +- bsd/sys/paths.h | 10 +- bsd/sys/persona.h | 66 +- bsd/sys/pgo.h | 40 +- bsd/sys/pipe.h | 112 +- bsd/sys/poll.h | 47 +- bsd/sys/posix_sem.h | 47 +- bsd/sys/posix_shm.h | 45 +- bsd/sys/priv.h | 78 +- bsd/sys/proc.h | 248 +- bsd/sys/proc_info.h | 960 +- bsd/sys/proc_internal.h | 747 +- bsd/sys/proc_uuid_policy.h | 24 +- bsd/sys/process_policy.h | 174 +- bsd/sys/protosw.h | 420 +- bsd/sys/pthread_internal.h | 9 +- bsd/sys/pthread_shims.h | 30 +- bsd/sys/ptrace.h | 52 +- bsd/sys/queue.h | 986 +- bsd/sys/quota.h | 227 +- bsd/sys/random.h | 11 +- bsd/sys/reason.h | 22 +- bsd/sys/reboot.h | 74 +- bsd/sys/resource.h | 278 +- bsd/sys/resourcevar.h | 86 +- bsd/sys/sbuf.h | 66 +- bsd/sys/sdt.h | 4 +- bsd/sys/sdt_impl.h | 66 +- bsd/sys/select.h | 48 +- bsd/sys/sem.h | 114 +- bsd/sys/sem_internal.h | 166 +- bsd/sys/semaphore.h | 18 +- bsd/sys/sfi.h | 30 +- bsd/sys/shm.h | 76 +- bsd/sys/shm_internal.h | 64 +- bsd/sys/signal.h | 558 +- bsd/sys/signalvar.h | 222 +- bsd/sys/snapshot.h | 2 +- bsd/sys/socket.h | 1106 +- bsd/sys/socketvar.h | 893 +- bsd/sys/sockio.h | 349 +- bsd/sys/spawn.h | 56 +- bsd/sys/spawn_internal.h | 204 +- bsd/sys/stackshot.h | 14 +- bsd/sys/stat.h | 490 +- bsd/sys/stdio.h | 18 +- bsd/sys/subr_prf.h | 25 +- bsd/sys/sys_domain.h | 36 +- bsd/sys/sysctl.h | 776 +- bsd/sys/sysent.h | 58 +- bsd/sys/syslimits.h | 66 +- bsd/sys/syslog.h | 226 +- bsd/sys/systm.h | 172 +- bsd/sys/termios.h | 364 +- bsd/sys/time.h | 162 +- bsd/sys/timeb.h | 18 +- bsd/sys/times.h | 22 +- bsd/sys/timex.h | 60 +- bsd/sys/tprintf.h | 16 +- bsd/sys/trace.h | 101 +- bsd/sys/tty.h | 328 +- bsd/sys/ttychars.h | 39 +- bsd/sys/ttycom.h | 210 +- bsd/sys/ttydefaults.h | 72 +- bsd/sys/ttydev.h | 42 +- bsd/sys/types.h | 83 +- bsd/sys/ubc.h | 105 +- bsd/sys/ubc_internal.h | 197 +- bsd/sys/ucontext.h | 36 +- bsd/sys/ucred.h | 90 +- bsd/sys/uio.h | 76 +- bsd/sys/uio_internal.h | 72 +- bsd/sys/ulock.h | 48 +- bsd/sys/un.h | 48 +- bsd/sys/unistd.h | 142 +- bsd/sys/unpcb.h | 174 +- bsd/sys/user.h | 154 +- bsd/sys/utfconv.h | 32 +- bsd/sys/utsname.h | 30 +- bsd/sys/ux_exception.h | 27 +- bsd/sys/vadvise.h | 20 +- bsd/sys/vcmd.h | 20 +- bsd/sys/vlimit.h | 30 +- bsd/sys/vm.h | 82 +- bsd/sys/vmmeter.h | 109 +- bsd/sys/vmparam.h | 14 +- bsd/sys/vnioctl.h | 68 +- bsd/sys/vnode.h | 2725 +-- bsd/sys/vnode_if.h | 1428 +- bsd/sys/vnode_internal.h | 504 +- bsd/sys/vstat.h | 58 +- bsd/sys/wait.h | 122 +- bsd/sys/work_interval.h | 23 +- bsd/sys/xattr.h | 26 +- bsd/tests/bsd_tests.c | 59 +- bsd/tests/ctrr_test_sysctl.c | 8 +- bsd/tests/pmap_test_sysctl.c | 22 +- bsd/uuid/uuid.h | 8 +- bsd/uxkern/ux_exception.c | 80 +- bsd/vfs/doc_tombstone.c | 29 +- bsd/vfs/kpi_vfs.c | 2288 +- bsd/vfs/vfs_attrlist.c | 1292 +- bsd/vfs/vfs_bio.c | 2484 +- bsd/vfs/vfs_cache.c | 1150 +- bsd/vfs/vfs_cluster.c | 4396 ++-- bsd/vfs/vfs_conf.c | 32 +- bsd/vfs/vfs_cprotect.c | 261 +- bsd/vfs/vfs_disk_conditioner.c | 15 +- bsd/vfs/vfs_fsevents.c | 3583 ++- bsd/vfs/vfs_fslog.c | 104 +- bsd/vfs/vfs_init.c | 151 +- bsd/vfs/vfs_lookup.c | 684 +- bsd/vfs/vfs_quota.c | 414 +- bsd/vfs/vfs_subr.c | 3543 +-- bsd/vfs/vfs_support.c | 650 +- bsd/vfs/vfs_support.h | 16 +- bsd/vfs/vfs_syscalls.c | 4395 ++-- bsd/vfs/vfs_utfconv.c | 408 +- bsd/vfs/vfs_utfconvdata.h | 528 +- bsd/vfs/vfs_vnops.c | 706 +- bsd/vfs/vfs_xattr.c | 1050 +- bsd/vfs/vnode_if.c | 246 +- bsd/vm/dp_backing_file.c | 44 +- bsd/vm/vm_compressor_backing_file.c | 168 +- bsd/vm/vm_pager.h | 46 +- bsd/vm/vm_unix.c | 1073 +- bsd/vm/vnode_pager.c | 429 +- bsd/vm/vnode_pager.h | 82 +- config/Libkern.exports | 21 +- config/MASTER | 20 +- config/MASTER.arm64 | 4 +- config/MASTER.x86_64 | 13 +- config/MasterVersion | 2 +- config/Private.arm.exports | 1 + config/Private.arm64.exports | 4 + config/Private.exports | 6 + config/Private.x86_64.exports | 9 + config/version.c | 8 +- iokit/.clang-format | 30 - .../GenericInterruptController.cpp | 283 +- .../GenericInterruptController.h | 84 +- iokit/Families/IONVRAM/IONVRAMController.cpp | 14 +- .../IOSystemManagement/IOWatchDogTimer.cpp | 146 +- iokit/IOKit/AppleKeyStoreInterface.h | 24 +- iokit/IOKit/IOBSD.h | 19 +- iokit/IOKit/IOBufferMemoryDescriptor.h | 390 +- iokit/IOKit/IOCPU.h | 180 +- iokit/IOKit/IOCatalogue.h | 356 +- iokit/IOKit/IOCommand.h | 24 +- iokit/IOKit/IOCommandGate.h | 360 +- iokit/IOKit/IOCommandPool.h | 287 +- iokit/IOKit/IOCommandQueue.h | 82 +- iokit/IOKit/IOConditionLock.h | 46 +- iokit/IOKit/IODMACommand.h | 921 +- iokit/IOKit/IODMAController.h | 66 +- iokit/IOKit/IODMAEventSource.h | 106 +- iokit/IOKit/IODataQueue.h | 162 +- iokit/IOKit/IODataQueueShared.h | 29 +- iokit/IOKit/IODeviceMemory.h | 86 +- iokit/IOKit/IODeviceTreeSupport.h | 91 +- iokit/IOKit/IOEventSource.h | 345 +- iokit/IOKit/IOFilterInterruptEventSource.h | 219 +- iokit/IOKit/IOHibernatePrivate.h | 513 +- iokit/IOKit/IOInterleavedMemoryDescriptor.h | 118 +- iokit/IOKit/IOInterruptAccounting.h | 31 +- iokit/IOKit/IOInterruptAccountingPrivate.h | 69 +- iokit/IOKit/IOInterruptController.h | 178 +- iokit/IOKit/IOInterruptEventSource.h | 280 +- iokit/IOKit/IOInterrupts.h | 18 +- iokit/IOKit/IOKernelReportStructs.h | 28 +- iokit/IOKit/IOKernelReporters.h | 3063 ++- iokit/IOKit/IOKitDebug.h | 270 +- iokit/IOKit/IOKitDiagnosticsUserClient.h | 14 +- iokit/IOKit/IOKitKeys.h | 136 +- iokit/IOKit/IOKitKeysPrivate.h | 16 +- iokit/IOKit/IOKitServer.h | 115 +- iokit/IOKit/IOLib.h | 278 +- iokit/IOKit/IOLocks.h | 413 +- iokit/IOKit/IOLocksPrivate.h | 11 +- iokit/IOKit/IOMapper.h | 145 +- iokit/IOKit/IOMemoryCursor.h | 503 +- iokit/IOKit/IOMemoryDescriptor.h | 1570 +- iokit/IOKit/IOMessage.h | 40 +- iokit/IOKit/IOMultiMemoryDescriptor.h | 140 +- iokit/IOKit/IONVRAM.h | 244 +- iokit/IOKit/IONotifier.h | 41 +- iokit/IOKit/IOPlatformExpert.h | 379 +- iokit/IOKit/IOPolledInterface.h | 260 +- iokit/IOKit/IORangeAllocator.h | 187 +- iokit/IOKit/IORegistryEntry.h | 1246 +- iokit/IOKit/IOReportMacros.h | 286 +- iokit/IOKit/IOReportTypes.h | 312 +- iokit/IOKit/IOReturn.h | 96 +- iokit/IOKit/IOService.h | 2722 +-- iokit/IOKit/IOServicePM.h | 40 +- iokit/IOKit/IOSharedDataQueue.h | 208 +- iokit/IOKit/IOSharedLock.h | 6 +- iokit/IOKit/IOStatistics.h | 22 +- iokit/IOKit/IOStatisticsPrivate.h | 95 +- iokit/IOKit/IOSubMemoryDescriptor.h | 136 +- iokit/IOKit/IOSyncer.h | 35 +- iokit/IOKit/IOTimeStamp.h | 206 +- iokit/IOKit/IOTimerEventSource.h | 373 +- iokit/IOKit/IOTypes.h | 179 +- iokit/IOKit/IOUserClient.h | 563 +- iokit/IOKit/IOWorkLoop.h | 448 +- iokit/IOKit/OSMessageNotification.h | 115 +- iokit/IOKit/assert.h | 21 +- iokit/IOKit/nvram/IONVRAMController.h | 30 +- iokit/IOKit/perfcontrol/IOPerfControl.h | 366 +- iokit/IOKit/platform/AppleMacIO.h | 50 +- iokit/IOKit/platform/AppleMacIODevice.h | 34 +- iokit/IOKit/platform/AppleNMI.h | 47 +- iokit/IOKit/platform/ApplePlatformExpert.h | 72 +- iokit/IOKit/power/IOPwrController.h | 14 +- iokit/IOKit/pwr_mgt/IOPM.h | 774 +- iokit/IOKit/pwr_mgt/IOPMLibDefs.h | 22 +- iokit/IOKit/pwr_mgt/IOPMPowerSource.h | 210 +- iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h | 39 +- iokit/IOKit/pwr_mgt/IOPMinformee.h | 30 +- iokit/IOKit/pwr_mgt/IOPMinformeeList.h | 53 +- iokit/IOKit/pwr_mgt/IOPMlog.h | 136 +- iokit/IOKit/pwr_mgt/IOPMpowerState.h | 75 +- iokit/IOKit/pwr_mgt/IOPowerConnection.h | 207 +- iokit/IOKit/pwr_mgt/RootDomain.h | 1354 +- iokit/IOKit/rtc/IORTCController.h | 60 +- iokit/IOKit/system.h | 16 +- .../IOKit/system_management/IOWatchDogTimer.h | 42 +- iokit/Kernel/IOBufferMemoryDescriptor.cpp | 842 +- iokit/Kernel/IOCPU.cpp | 1263 +- iokit/Kernel/IOCatalogue.cpp | 1482 +- iokit/Kernel/IOCommand.cpp | 23 +- iokit/Kernel/IOCommandGate.cpp | 393 +- iokit/Kernel/IOCommandPool.cpp | 189 +- iokit/Kernel/IOCommandQueue.cpp | 408 +- iokit/Kernel/IOConditionLock.cpp | 246 +- iokit/Kernel/IODMACommand.cpp | 2289 +- iokit/Kernel/IODMAController.cpp | 117 +- iokit/Kernel/IODMAEventSource.cpp | 307 +- iokit/Kernel/IODataQueue.cpp | 349 +- iokit/Kernel/IODeviceMemory.cpp | 77 +- iokit/Kernel/IODeviceTreeSupport.cpp | 2205 +- iokit/Kernel/IOEventSource.cpp | 244 +- iokit/Kernel/IOFilterInterruptEventSource.cpp | 335 +- iokit/Kernel/IOHibernateIO.cpp | 4196 ++-- iokit/Kernel/IOHibernateInternal.h | 70 +- iokit/Kernel/IOHibernateRestoreKernel.c | 1186 +- iokit/Kernel/IOHistogramReporter.cpp | 622 +- .../Kernel/IOInterleavedMemoryDescriptor.cpp | 398 +- iokit/Kernel/IOInterruptAccounting.cpp | 55 +- iokit/Kernel/IOInterruptController.cpp | 1258 +- iokit/Kernel/IOInterruptEventSource.cpp | 563 +- iokit/Kernel/IOKitDebug.cpp | 2090 +- iokit/Kernel/IOKitKernelInternal.h | 169 +- iokit/Kernel/IOLib.cpp | 1652 +- iokit/Kernel/IOLocks.cpp | 359 +- iokit/Kernel/IOMapper.cpp | 468 +- iokit/Kernel/IOMemoryCursor.cpp | 416 +- iokit/Kernel/IOMemoryDescriptor.cpp | 7550 +++--- iokit/Kernel/IOMultiMemoryDescriptor.cpp | 728 +- iokit/Kernel/IONVRAM.cpp | 2846 +-- iokit/Kernel/IOPMPowerSource.cpp | 643 +- iokit/Kernel/IOPMPowerSourceList.cpp | 154 +- iokit/Kernel/IOPMPowerStateQueue.cpp | 98 +- iokit/Kernel/IOPMPowerStateQueue.h | 38 +- iokit/Kernel/IOPMinformee.cpp | 45 +- iokit/Kernel/IOPMinformeeList.cpp | 283 +- iokit/Kernel/IOPMrootDomain.cpp | 16723 +++++++------- iokit/Kernel/IOPMrootDomainInternal.h | 44 +- iokit/Kernel/IOPerfControl.cpp | 290 +- iokit/Kernel/IOPlatformExpert.cpp | 2394 +- iokit/Kernel/IOPolledInterface.cpp | 1667 +- iokit/Kernel/IOPowerConnection.cpp | 86 +- iokit/Kernel/IORTC.cpp | 35 +- iokit/Kernel/IORangeAllocator.cpp | 555 +- iokit/Kernel/IORegistryEntry.cpp | 3232 +-- iokit/Kernel/IOReportLegend.cpp | 288 +- iokit/Kernel/IOReporter.cpp | 1639 +- iokit/Kernel/IOReporterDefs.h | 35 +- iokit/Kernel/IOService.cpp | 11096 ++++----- iokit/Kernel/IOServicePM.cpp | 13134 ++++++----- iokit/Kernel/IOServicePMPrivate.h | 843 +- iokit/Kernel/IOServicePrivate.h | 237 +- iokit/Kernel/IOSharedDataQueue.cpp | 558 +- iokit/Kernel/IOSimpleReporter.cpp | 195 +- iokit/Kernel/IOStartIOKit.cpp | 217 +- iokit/Kernel/IOStateReporter.cpp | 1384 +- iokit/Kernel/IOStatistics.cpp | 564 +- iokit/Kernel/IOStringFuncs.c | 180 +- iokit/Kernel/IOSubMemoryDescriptor.cpp | 237 +- iokit/Kernel/IOSyncer.cpp | 126 +- iokit/Kernel/IOTimerEventSource.cpp | 632 +- iokit/Kernel/IOUserClient.cpp | 7632 +++--- iokit/Kernel/IOWorkLoop.cpp | 825 +- iokit/Kernel/RootDomainUserClient.cpp | 588 +- iokit/Kernel/RootDomainUserClient.h | 72 +- iokit/Kernel/i386/IOKeyStoreHelper.cpp | 159 +- iokit/KernelConfigTables.cpp | 22 +- iokit/Tests/TestCollections.cpp | 1855 +- iokit/Tests/TestContainers.cpp | 763 +- iokit/Tests/TestDevice.cpp | 197 +- iokit/Tests/TestIOMemoryDescriptor.cpp | 1462 +- iokit/Tests/Tests.cpp | 412 +- iokit/Tests/Tests.h | 8 +- iokit/bsddev/DINetBootHook.cpp | 334 +- iokit/bsddev/DINetBootHook.h | 32 +- iokit/bsddev/IOKitBSDInit.cpp | 1507 +- iokit/bsddev/IOKitBSDInit.h | 9 +- iokit/bsddev/skywalk/IOSkywalkSupport.cpp | 52 + iokit/conf/files | 3 + libkdd/kcdata.h | 260 +- libkdd/kcdtypes.c | 35 +- libkdd/kdd.h | 2 +- libkern/.clang-format | 1 - libkern/OSKextLib.cpp | 574 +- libkern/OSKextVersion.c | 756 +- libkern/c++/OSArray.cpp | 683 +- libkern/c++/OSBoolean.cpp | 125 +- libkern/c++/OSCPPDebug.cpp | 37 +- libkern/c++/OSCollection.cpp | 135 +- libkern/c++/OSCollectionIterator.cpp | 146 +- libkern/c++/OSData.cpp | 751 +- libkern/c++/OSDictionary.cpp | 1112 +- libkern/c++/OSIterator.cpp | 11 +- libkern/c++/OSKext.cpp | 19113 ++++++++-------- libkern/c++/OSMetaClass.cpp | 1688 +- libkern/c++/OSNumber.cpp | 174 +- libkern/c++/OSObject.cpp | 372 +- libkern/c++/OSOrderedSet.cpp | 569 +- libkern/c++/OSRuntime.cpp | 714 +- libkern/c++/OSRuntimeSupport.c | 13 +- libkern/c++/OSSerialize.cpp | 339 +- libkern/c++/OSSerializeBinary.cpp | 629 +- libkern/c++/OSSet.cpp | 550 +- libkern/c++/OSString.cpp | 456 +- libkern/c++/OSSymbol.cpp | 996 +- libkern/c++/OSUnserialize.cpp | 2161 +- libkern/c++/OSUnserializeXML.cpp | 2945 +-- .../TestSerialization/test1/test1_main.cpp | 66 +- .../TestSerialization/test2/test2_main.cpp | 193 +- libkern/crypto/corecrypto_aes.c | 173 +- libkern/crypto/corecrypto_aesxts.c | 100 +- libkern/crypto/corecrypto_chacha20poly1305.c | 55 +- libkern/crypto/corecrypto_des.c | 39 +- libkern/crypto/corecrypto_md5.c | 55 +- libkern/crypto/corecrypto_rand.c | 3 +- libkern/crypto/corecrypto_rsa.c | 34 +- libkern/crypto/corecrypto_sha1.c | 68 +- libkern/crypto/corecrypto_sha2.c | 93 +- libkern/crypto/register_crypto.c | 18 +- libkern/firehose/chunk_private.h | 78 +- libkern/firehose/firehose_types_private.h | 170 +- libkern/firehose/ioctl_private.h | 2 +- libkern/firehose/tracepoint_private.h | 22 +- libkern/gen/OSAtomicOperations.c | 128 +- libkern/gen/OSDebug.cpp | 231 +- libkern/kernel_mach_header.c | 222 +- libkern/kmod/c_start.c | 65 +- libkern/kmod/c_stop.c | 50 +- libkern/kmod/cplus_start.c | 38 +- libkern/kmod/cplus_stop.c | 38 +- libkern/kmod/libkmodtest/libkmodtest.cpp | 12 +- libkern/kmod/libkmodtest/libkmodtest.h | 15 +- libkern/kxld/WKdmCompress.c | 491 +- libkern/kxld/WKdmDecompress.c | 413 +- libkern/kxld/kxld.c | 959 +- libkern/kxld/kxld_array.c | 615 +- libkern/kxld/kxld_array.h | 74 +- libkern/kxld/kxld_copyright.c | 322 +- libkern/kxld/kxld_demangle.c | 47 +- libkern/kxld/kxld_demangle.h | 20 +- libkern/kxld/kxld_dict.c | 543 +- libkern/kxld/kxld_dict.h | 68 +- libkern/kxld/kxld_kext.c | 1635 +- libkern/kxld/kxld_kext.h | 54 +- libkern/kxld/kxld_object.c | 3700 ++- libkern/kxld/kxld_object.h | 101 +- libkern/kxld/kxld_reloc.c | 2300 +- libkern/kxld/kxld_reloc.h | 110 +- libkern/kxld/kxld_sect.c | 909 +- libkern/kxld/kxld_sect.h | 91 +- libkern/kxld/kxld_seg.c | 1318 +- libkern/kxld/kxld_seg.h | 75 +- libkern/kxld/kxld_splitinfolc.c | 129 +- libkern/kxld/kxld_splitinfolc.h | 34 +- libkern/kxld/kxld_srcversion.c | 61 +- libkern/kxld/kxld_srcversion.h | 24 +- libkern/kxld/kxld_stubs.c | 38 +- libkern/kxld/kxld_sym.c | 945 +- libkern/kxld/kxld_sym.h | 189 +- libkern/kxld/kxld_symtab.c | 837 +- libkern/kxld/kxld_symtab.h | 85 +- libkern/kxld/kxld_util.c | 1114 +- libkern/kxld/kxld_util.h | 101 +- libkern/kxld/kxld_uuid.c | 51 +- libkern/kxld/kxld_uuid.h | 23 +- libkern/kxld/kxld_versionmin.c | 171 +- libkern/kxld/kxld_versionmin.h | 35 +- libkern/kxld/kxld_vtable.c | 1063 +- libkern/kxld/kxld_vtable.h | 41 +- libkern/kxld/tests/kextcopyright.c | 291 +- libkern/kxld/tests/kxld_array_test.c | 240 +- libkern/kxld/tests/kxld_dict_test.c | 239 +- libkern/kxld/tests/kxld_test.c | 22 +- libkern/kxld/tests/kxld_test.h | 9 +- libkern/libclosure/libclosuredata.c | 12 +- libkern/libclosure/runtime.cpp | 784 +- libkern/libkern/Block.h | 14 +- libkern/libkern/Block_private.h | 432 +- libkern/libkern/OSAtomic.h | 149 +- libkern/libkern/OSBase.h | 50 +- libkern/libkern/OSByteOrder.h | 121 +- libkern/libkern/OSCrossEndian.h | 30 +- libkern/libkern/OSDebug.h | 24 +- libkern/libkern/OSKextLib.h | 130 +- libkern/libkern/OSKextLibPrivate.h | 64 +- libkern/libkern/OSMalloc.h | 52 +- libkern/libkern/OSReturn.h | 20 +- libkern/libkern/OSRuntime.h | 8 +- libkern/libkern/OSSerializeBinary.h | 35 +- libkern/libkern/OSTypes.h | 62 +- libkern/libkern/_OSByteOrder.h | 48 +- libkern/libkern/arm/OSByteOrder.h | 134 +- libkern/libkern/c++/OSArray.h | 1272 +- libkern/libkern/c++/OSBoolean.h | 314 +- libkern/libkern/c++/OSCPPDebug.h | 8 +- libkern/libkern/c++/OSCollection.h | 802 +- libkern/libkern/c++/OSCollectionIterator.h | 228 +- libkern/libkern/c++/OSContainers.h | 8 +- libkern/libkern/c++/OSData.h | 1313 +- libkern/libkern/c++/OSDictionary.h | 1691 +- libkern/libkern/c++/OSEndianTypes.h | 31 +- libkern/libkern/c++/OSIterator.h | 130 +- libkern/libkern/c++/OSKext.h | 1029 +- libkern/libkern/c++/OSLib.h | 13 +- libkern/libkern/c++/OSMetaClass.h | 3395 +-- libkern/libkern/c++/OSNumber.h | 692 +- libkern/libkern/c++/OSObject.h | 553 +- libkern/libkern/c++/OSOrderedSet.h | 1334 +- libkern/libkern/c++/OSSerialize.h | 472 +- libkern/libkern/c++/OSSet.h | 1376 +- libkern/libkern/c++/OSString.h | 675 +- libkern/libkern/c++/OSSymbol.h | 611 +- libkern/libkern/c++/OSUnserialize.h | 20 +- libkern/libkern/crc.h | 4 +- libkern/libkern/crypto/aes.h | 15 +- libkern/libkern/crypto/aesxts.h | 32 +- libkern/libkern/crypto/chacha20poly1305.h | 18 +- libkern/libkern/crypto/crypto_internal.h | 8 +- libkern/libkern/crypto/des.h | 6 +- libkern/libkern/crypto/md5.h | 18 +- libkern/libkern/crypto/register_crypto.h | 188 +- libkern/libkern/crypto/rsa.h | 12 +- libkern/libkern/crypto/sha1.h | 36 +- libkern/libkern/crypto/sha2.h | 28 +- libkern/libkern/i386/OSByteOrder.h | 74 +- libkern/libkern/i386/_OSByteOrder.h | 52 +- libkern/libkern/img4/interface.h | 235 +- libkern/libkern/kernel_mach_header.h | 56 +- libkern/libkern/kext_panic_report.h | 44 +- libkern/libkern/kext_request_keys.h | 16 +- libkern/libkern/kxld.h | 107 +- libkern/libkern/kxld_types.h | 81 +- libkern/libkern/locks.h | 12 +- libkern/libkern/machine/OSByteOrder.h | 86 +- libkern/libkern/mkext.h | 46 +- libkern/libkern/prelink.h | 9 +- libkern/libkern/section_keywords.h | 14 +- libkern/libkern/stack_protector.h | 9 +- libkern/libkern/sysctl.h | 12 +- libkern/libkern/tree.h | 1286 +- libkern/libkern/zconf.h | 72 +- libkern/libkern/zlib.h | 2130 +- libkern/net/inet_aton.c | 51 +- libkern/net/inet_ntoa.c | 18 +- libkern/net/inet_ntop.c | 50 +- libkern/net/inet_pton.c | 101 +- libkern/os/Makefile | 1 + libkern/os/base.h | 10 +- libkern/os/firehose.h | 2 +- libkern/os/hash.h | 108 + libkern/os/internal.c | 72 +- libkern/os/log.c | 821 +- libkern/os/log.h | 31 +- libkern/os/log_encode.h | 864 +- libkern/os/log_encode_types.h | 152 +- libkern/os/log_private.h | 8 +- libkern/os/object.h | 20 +- libkern/os/object_private.h | 36 +- libkern/os/overflow.h | 48 +- libkern/os/reason_private.h | 12 +- libkern/os/refcnt.c | 94 +- libkern/os/refcnt.h | 24 +- libkern/os/trace.h | 423 +- libkern/os/trace_internal.h | 4 +- libkern/stack_protector.c | 11 +- libkern/stdio/scanf.c | 280 +- libkern/uuid/uuid.c | 75 +- libsa/bootstrap.cpp | 1496 +- libsa/lastkernelconstructor.c | 14 +- libsa/lastkerneldataconst.c | 14 +- libsyscall/Libsyscall.xcconfig | 2 +- libsyscall/custom/custom.s | 4 +- libsyscall/custom/errno.c | 8 +- libsyscall/mach/abort.h | 11 +- libsyscall/mach/clock_sleep.c | 16 +- libsyscall/mach/error_codes.c | 26 +- libsyscall/mach/errorlib.h | 74 +- libsyscall/mach/exc_catcher.c | 23 +- libsyscall/mach/exc_catcher.h | 58 +- libsyscall/mach/exc_catcher_state.c | 29 +- libsyscall/mach/exc_catcher_state_identity.c | 35 +- libsyscall/mach/externs.h | 8 +- libsyscall/mach/fprintf_stderr.c | 12 +- libsyscall/mach/host.c | 30 +- libsyscall/mach/mach/errorlib.h | 58 +- libsyscall/mach/mach/mach.h | 110 +- libsyscall/mach/mach/mach_error.h | 46 +- libsyscall/mach/mach/mach_init.h | 42 +- libsyscall/mach/mach/mach_interface.h | 10 +- libsyscall/mach/mach/mach_right_private.h | 16 +- libsyscall/mach/mach/mach_sync_ipc.h | 26 +- libsyscall/mach/mach/port_obj.h | 72 +- libsyscall/mach/mach/sync.h | 8 +- libsyscall/mach/mach/thread_state.h | 12 +- libsyscall/mach/mach/vm_page_size.h | 24 +- libsyscall/mach/mach/vm_task.h | 12 +- libsyscall/mach/mach_error.c | 22 +- libsyscall/mach/mach_error_string.c | 71 +- libsyscall/mach/mach_init.c | 26 +- libsyscall/mach/mach_legacy.c | 8 +- libsyscall/mach/mach_msg.c | 560 +- libsyscall/mach/mach_port.c | 221 +- libsyscall/mach/mach_right.c | 32 +- libsyscall/mach/mach_vm.c | 96 +- libsyscall/mach/mig_allocate.c | 31 +- libsyscall/mach/mig_deallocate.c | 26 +- libsyscall/mach/mig_reply_port.c | 8 +- libsyscall/mach/mig_reply_setup.c | 28 +- libsyscall/mach/mig_strncpy.c | 50 +- libsyscall/mach/ms_thread_switch.c | 24 +- libsyscall/mach/panic.c | 24 +- libsyscall/mach/port_descriptions.c | 8 +- libsyscall/mach/port_obj.c | 19 +- libsyscall/mach/semaphore.c | 36 +- libsyscall/mach/servers/key_defs.h | 60 +- libsyscall/mach/servers/ls_defs.h | 302 +- libsyscall/mach/servers/netname_defs.h | 32 +- libsyscall/mach/servers/nm_defs.h | 39 +- libsyscall/mach/slot_name.c | 24 +- libsyscall/mach/stack_logging_internal.h | 10 +- libsyscall/mach/string.c | 47 +- libsyscall/mach/string.h | 8 +- libsyscall/os/alloc_once.c | 10 +- libsyscall/os/thread_self_restrict.h | 1 - libsyscall/wrappers/__commpage_gettimeofday.c | 31 +- libsyscall/wrappers/_errno.h | 8 +- libsyscall/wrappers/_libc_funcptr.c | 28 +- libsyscall/wrappers/_libkernel_init.c | 14 +- libsyscall/wrappers/_libkernel_init.h | 10 +- libsyscall/wrappers/cancelable/fcntl-base.c | 64 +- libsyscall/wrappers/cancelable/fcntl-cancel.c | 8 +- libsyscall/wrappers/cancelable/fcntl.c | 8 +- .../cancelable/pselect-darwinext-cancel.c | 6 +- .../wrappers/cancelable/pselect-darwinext.c | 6 +- .../wrappers/cancelable/select-cancel.c | 6 +- libsyscall/wrappers/cancelable/select.c | 6 +- .../wrappers/cancelable/sigsuspend-cancel.c | 8 +- libsyscall/wrappers/cancelable/sigsuspend.c | 8 +- libsyscall/wrappers/carbon_delete.c | 11 +- libsyscall/wrappers/clonefile.c | 2 +- libsyscall/wrappers/coalition.c | 28 +- libsyscall/wrappers/csr.c | 12 +- libsyscall/wrappers/fs_snapshot.c | 14 +- libsyscall/wrappers/gethostuuid.c | 5 +- libsyscall/wrappers/getiopolicy_np.c | 12 +- .../wrappers/guarded_open_dprotected_np.c | 10 +- libsyscall/wrappers/guarded_open_np.c | 10 +- libsyscall/wrappers/init_cpu_capabilities.c | 12 +- libsyscall/wrappers/ioctl.c | 10 +- libsyscall/wrappers/kdebug_trace.c | 10 +- libsyscall/wrappers/kill.c | 14 +- libsyscall/wrappers/legacy/accept.c | 9 +- libsyscall/wrappers/legacy/bind.c | 9 +- libsyscall/wrappers/legacy/connect.c | 9 +- libsyscall/wrappers/legacy/getattrlist.c | 13 +- libsyscall/wrappers/legacy/getaudit.c | 19 +- libsyscall/wrappers/legacy/getpeername.c | 11 +- libsyscall/wrappers/legacy/getsockname.c | 11 +- libsyscall/wrappers/legacy/kill.c | 6 +- libsyscall/wrappers/legacy/lchown.c | 9 +- libsyscall/wrappers/legacy/listen.c | 9 +- libsyscall/wrappers/legacy/mprotect.c | 12 +- libsyscall/wrappers/legacy/msync.c | 8 +- libsyscall/wrappers/legacy/munmap.c | 8 +- libsyscall/wrappers/legacy/open.c | 10 +- libsyscall/wrappers/legacy/recvfrom.c | 9 +- libsyscall/wrappers/legacy/recvmsg.c | 9 +- libsyscall/wrappers/legacy/select-pre1050.c | 8 +- libsyscall/wrappers/legacy/select.c | 6 +- libsyscall/wrappers/legacy/sendmsg.c | 9 +- libsyscall/wrappers/legacy/sendto.c | 9 +- libsyscall/wrappers/legacy/setattrlist.c | 13 +- libsyscall/wrappers/legacy/sigsuspend.c | 8 +- libsyscall/wrappers/legacy/socketpair.c | 11 +- libsyscall/wrappers/libproc/libproc.c | 580 +- libsyscall/wrappers/libproc/libproc.h | 90 +- .../wrappers/libproc/libproc_internal.h | 45 +- .../wrappers/libproc/proc_listpidspath.c | 201 +- libsyscall/wrappers/mach_approximate_time.c | 17 +- libsyscall/wrappers/mach_boottime.c | 6 +- libsyscall/wrappers/mach_bridge_remote_time.c | 4 +- libsyscall/wrappers/mach_continuous_time.c | 46 +- libsyscall/wrappers/mach_get_times.c | 11 +- libsyscall/wrappers/mach_timebase_info.c | 23 +- libsyscall/wrappers/open_dprotected_np.c | 15 +- libsyscall/wrappers/persona.c | 27 +- libsyscall/wrappers/pid_shutdown_networking.c | 6 +- libsyscall/wrappers/posix_sem_obsolete.c | 9 +- libsyscall/wrappers/quota_obsolete.c | 6 +- libsyscall/wrappers/reboot.c | 6 +- libsyscall/wrappers/remove-counter.c | 9 +- libsyscall/wrappers/rename.c | 10 +- libsyscall/wrappers/renameat.c | 4 +- libsyscall/wrappers/renamex.c | 4 +- libsyscall/wrappers/rmdir.c | 10 +- libsyscall/wrappers/select-base.c | 37 +- libsyscall/wrappers/sfi.c | 18 +- libsyscall/wrappers/sigsuspend-base.c | 22 +- libsyscall/wrappers/spawn/posix_spawn.c | 548 +- libsyscall/wrappers/spawn/spawn.h | 126 +- libsyscall/wrappers/spawn/spawn_private.h | 24 +- libsyscall/wrappers/stackshot.c | 6 +- libsyscall/wrappers/string/index.c | 10 +- libsyscall/wrappers/string/memcpy.c | 36 +- libsyscall/wrappers/string/memset.c | 23 +- libsyscall/wrappers/string/strcmp.c | 10 +- libsyscall/wrappers/string/strcpy.c | 19 +- libsyscall/wrappers/string/strings.h | 28 +- libsyscall/wrappers/string/strlcpy.c | 25 +- libsyscall/wrappers/string/strlen.c | 46 +- libsyscall/wrappers/string/strsep.c | 12 +- libsyscall/wrappers/terminate_with_reason.c | 42 +- libsyscall/wrappers/thread_register_state.c | 166 +- libsyscall/wrappers/unix03/chmod.c | 18 +- libsyscall/wrappers/unix03/fchmod.c | 18 +- libsyscall/wrappers/unix03/getrlimit.c | 8 +- libsyscall/wrappers/unix03/mmap.c | 14 +- libsyscall/wrappers/unix03/munmap.c | 18 +- libsyscall/wrappers/unix03/setrlimit.c | 8 +- libsyscall/wrappers/unlink.c | 10 +- libsyscall/wrappers/unlinkat.c | 4 +- libsyscall/wrappers/utimensat.c | 12 +- libsyscall/wrappers/work_interval.c | 24 +- makedefs/MakeInc.def | 16 +- makedefs/MakeInc.top | 17 +- .../UserNotification/KUNCUserNotifications.c | 207 +- .../UserNotification/KUNCUserNotifications.h | 136 +- osfmk/UserNotification/UNDTypes.h | 13 +- osfmk/arm/arch.h | 6 +- osfmk/arm/arm_init.c | 146 +- osfmk/arm/arm_timer.c | 83 +- osfmk/arm/arm_vm_init.c | 83 +- osfmk/arm/atomic.h | 196 +- osfmk/arm/bsd_arm.c | 6 +- osfmk/arm/caches.c | 380 +- osfmk/arm/caches_internal.h | 10 +- osfmk/arm/commpage/commpage.c | 243 +- osfmk/arm/commpage/commpage.h | 28 +- osfmk/arm/commpage/commpage_sigs.h | 32 +- osfmk/arm/cpu.c | 171 +- osfmk/arm/cpu_capabilities.h | 204 +- osfmk/arm/cpu_common.c | 285 +- osfmk/arm/cpu_data.h | 53 +- osfmk/arm/cpu_data_internal.h | 365 +- osfmk/arm/cpu_internal.h | 54 +- osfmk/arm/cpu_number.h | 26 +- osfmk/arm/cpuid.c | 52 +- osfmk/arm/cpuid.h | 148 +- osfmk/arm/data.s | 11 - osfmk/arm/dbgwrap.c | 3 +- osfmk/arm/dbgwrap.h | 28 +- osfmk/arm/exception.h | 18 +- osfmk/arm/genassym.c | 283 +- osfmk/arm/hw_lock_types.h | 22 +- osfmk/arm/io_map.c | 30 +- osfmk/arm/io_map_entries.h | 17 +- osfmk/arm/kpc_arm.c | 289 +- osfmk/arm/lock.h | 22 +- osfmk/arm/locks.h | 296 +- osfmk/arm/locks_arm.c | 1721 +- osfmk/arm/loose_ends.c | 310 +- osfmk/arm/lowglobals.h | 64 +- osfmk/arm/lowmem_vectors.c | 27 +- osfmk/arm/machdep_call.c | 15 +- osfmk/arm/machdep_call.h | 31 +- osfmk/arm/machine_cpu.h | 2 +- osfmk/arm/machine_cpuid.c | 38 +- osfmk/arm/machine_cpuid.h | 75 +- osfmk/arm/machine_kpc.h | 18 +- osfmk/arm/machine_routines.c | 375 +- osfmk/arm/machine_routines.h | 449 +- osfmk/arm/machine_routines_common.c | 214 +- osfmk/arm/machine_task.c | 84 +- osfmk/arm/machlimits.h | 46 +- osfmk/arm/machparam.h | 13 +- osfmk/arm/misc_protos.h | 18 +- osfmk/arm/model_dep.c | 317 +- osfmk/arm/monotonic_arm.c | 3 +- osfmk/arm/pal_routines.c | 8 +- osfmk/arm/pal_routines.h | 20 +- osfmk/arm/pcb.c | 26 +- osfmk/arm/pmap.c | 5404 ++--- osfmk/arm/pmap.h | 287 +- osfmk/arm/pmap_public.h | 4 +- osfmk/arm/proc_reg.h | 1104 +- osfmk/arm/rtclock.c | 83 +- osfmk/arm/rtclock.h | 65 +- osfmk/arm/sched_param.h | 18 +- osfmk/arm/setjmp.h | 22 +- osfmk/arm/simple_lock.h | 200 +- osfmk/arm/smp.h | 6 +- osfmk/arm/status_shared.c | 10 +- osfmk/arm/strlcpy.c | 19 +- osfmk/arm/strncpy.c | 19 +- osfmk/arm/task.h | 16 +- osfmk/arm/thread.h | 138 +- osfmk/arm/trap.c | 290 +- osfmk/arm/trap.h | 284 +- osfmk/arm/vm_tuning.h | 18 +- osfmk/arm/xpr.h | 2 +- osfmk/arm64/Makefile | 5 +- osfmk/arm64/alternate_debugger.c | 97 +- osfmk/arm64/alternate_debugger.h | 9 +- osfmk/arm64/bsd_arm64.c | 32 +- osfmk/arm64/copyio.c | 80 +- osfmk/arm64/cpu.c | 208 +- osfmk/arm64/dbgwrap.c | 97 +- osfmk/arm64/genassym.c | 258 +- osfmk/arm64/kpc.c | 185 +- osfmk/arm64/loose_ends.c | 338 +- osfmk/arm64/lowglobals.h | 62 +- osfmk/arm64/lowmem_vectors.c | 25 +- osfmk/arm64/machine_cpuid.h | 44 +- osfmk/arm64/machine_kpc.h | 8 +- osfmk/arm64/machine_machdep.h | 6 +- osfmk/arm64/machine_remote_time.c | 89 + osfmk/arm64/machine_remote_time.h | 39 + osfmk/arm64/machine_routines.c | 579 +- osfmk/arm64/machine_routines_asm.s | 7 +- osfmk/arm64/machine_task.c | 128 +- osfmk/arm64/monotonic.h | 51 +- osfmk/arm64/monotonic_arm64.c | 138 +- osfmk/arm64/pcb.c | 22 +- osfmk/arm64/pgtrace.c | 752 +- osfmk/arm64/pgtrace.h | 106 +- osfmk/arm64/pgtrace_decoder.c | 2835 +-- osfmk/arm64/pgtrace_decoder.h | 1 - osfmk/arm64/platform_tests.c | 414 +- osfmk/arm64/proc_reg.h | 1185 +- osfmk/arm64/sleh.c | 338 +- osfmk/arm64/status.c | 1214 +- osfmk/atm/atm.c | 215 +- osfmk/atm/atm_internal.h | 46 +- osfmk/atm/atm_types.h | 35 +- osfmk/bank/bank.c | 320 +- osfmk/bank/bank_internal.h | 66 +- osfmk/bank/bank_types.h | 12 +- osfmk/conf/files | 2 + osfmk/conf/files.arm64 | 1 + osfmk/conf/files.x86_64 | 1 + osfmk/console/art/scalegear.c | 71 +- osfmk/console/iso_font.c | 522 +- osfmk/console/progress_meter_data.c | 726 +- osfmk/console/serial_console.c | 82 +- osfmk/console/serial_general.c | 59 +- osfmk/console/serial_protos.h | 12 +- osfmk/console/video_console.c | 249 +- osfmk/console/video_console.h | 135 +- osfmk/console/video_scroll.c | 7 +- osfmk/corecrypto/cc/src/cc_clear.c | 36 +- osfmk/corecrypto/cc/src/cc_cmp_safe.c | 22 +- osfmk/corecrypto/cc/src/cc_try_abort.c | 17 +- osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c | 684 +- osfmk/corecrypto/ccdigest/src/ccdigest_init.c | 10 +- .../corecrypto/ccdigest/src/ccdigest_update.c | 77 +- osfmk/corecrypto/cchmac/src/cchmac.c | 18 +- osfmk/corecrypto/cchmac/src/cchmac_final.c | 25 +- osfmk/corecrypto/cchmac/src/cchmac_init.c | 74 +- osfmk/corecrypto/cchmac/src/cchmac_update.c | 8 +- osfmk/corecrypto/ccn/src/ccn_set.c | 5 +- .../ccsha1/src/ccdigest_final_64be.c | 48 +- .../corecrypto/ccsha1/src/ccdigest_internal.h | 6 +- osfmk/corecrypto/ccsha1/src/ccsha1_eay.c | 310 +- .../ccsha1/src/ccsha1_initial_state.c | 10 +- .../corecrypto/ccsha2/src/ccdigest_internal.h | 6 +- osfmk/corecrypto/ccsha2/src/ccsha256_K.c | 26 +- osfmk/corecrypto/ccsha2/src/ccsha256_di.c | 26 +- .../ccsha2/src/ccsha256_initial_state.c | 16 +- .../ccsha2/src/ccsha256_ltc_compress.c | 110 +- osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c | 16 +- osfmk/corpses/corpse.c | 116 +- osfmk/corpses/task_corpse.h | 42 +- osfmk/default_pager/default_pager_types.h | 30 +- osfmk/device/device_init.c | 37 +- osfmk/device/device_port.h | 44 +- osfmk/device/device_types.h | 87 +- osfmk/device/iokit_rpc.c | 487 +- osfmk/device/subrs.c | 478 +- osfmk/gssd/gssd_mach_types.h | 48 +- osfmk/i386/AT386/model_dep.c | 1094 +- osfmk/i386/Diagnostics.c | 125 +- osfmk/i386/Diagnostics.h | 31 +- osfmk/i386/acpi.c | 135 +- osfmk/i386/acpi.h | 16 +- osfmk/i386/apic.h | 57 +- osfmk/i386/arch_types.h | 10 +- osfmk/i386/atomic.h | 14 +- osfmk/i386/bit_routines.h | 72 - osfmk/i386/bsd_i386.c | 293 +- osfmk/i386/bsd_i386_native.c | 68 +- osfmk/i386/commpage/commpage.c | 722 +- osfmk/i386/commpage/commpage.h | 97 +- osfmk/i386/cpu.c | 147 +- osfmk/i386/cpu_affinity.h | 25 +- osfmk/i386/cpu_capabilities.h | 346 +- osfmk/i386/cpu_data.h | 613 +- osfmk/i386/cpu_number.h | 37 +- osfmk/i386/cpu_threads.c | 1865 +- osfmk/i386/cpu_threads.h | 64 +- osfmk/i386/cpu_topology.c | 220 +- osfmk/i386/cpu_topology.h | 265 +- osfmk/i386/cpuid.c | 1074 +- osfmk/i386/cpuid.h | 549 +- osfmk/i386/eflags.h | 72 +- osfmk/i386/endian.h | 44 +- osfmk/i386/exec.h | 61 +- osfmk/i386/fpu.c | 702 +- osfmk/i386/fpu.h | 138 +- osfmk/i386/gdt.c | 80 +- osfmk/i386/genassym.c | 613 +- osfmk/i386/hibernate_i386.c | 352 +- osfmk/i386/hibernate_restore.c | 64 +- osfmk/i386/hpet.c | 244 +- osfmk/i386/hpet.h | 94 +- osfmk/i386/hw_defs.h | 28 +- osfmk/i386/hw_lock_types.h | 31 +- osfmk/i386/i386_init.c | 463 +- osfmk/i386/i386_lowmem.h | 14 +- osfmk/i386/i386_timer.c | 199 +- osfmk/i386/i386_vm_init.c | 459 +- osfmk/i386/io_map.c | 60 +- osfmk/i386/io_map_entries.h | 25 +- osfmk/i386/io_port.h | 42 +- osfmk/i386/iopb.h | 46 +- osfmk/i386/ktss.c | 27 +- osfmk/i386/lapic.c | 34 +- osfmk/i386/lapic.h | 466 +- osfmk/i386/lapic_native.c | 429 +- osfmk/i386/ldt.c | 36 +- osfmk/i386/lock.h | 30 +- osfmk/i386/locks.h | 289 +- osfmk/i386/locks_i386.c | 1443 +- osfmk/i386/locks_i386_inlines.h | 55 +- osfmk/i386/locks_i386_opt.c | 47 +- osfmk/i386/machdep_call.c | 53 +- osfmk/i386/machdep_call.h | 74 +- osfmk/i386/machine_check.c | 188 +- osfmk/i386/machine_check.h | 214 +- osfmk/i386/machine_cpu.h | 22 +- osfmk/i386/machine_routines.c | 619 +- osfmk/i386/machine_routines.h | 143 +- osfmk/i386/machine_rpc.h | 8 +- osfmk/i386/machine_task.c | 279 +- osfmk/i386/machlimits.h | 41 +- osfmk/i386/machparam.h | 21 +- osfmk/i386/misc_protos.h | 142 +- osfmk/i386/mp.c | 960 +- osfmk/i386/mp.h | 219 +- osfmk/i386/mp_desc.c | 404 +- osfmk/i386/mp_desc.h | 72 +- osfmk/i386/mp_events.h | 40 +- osfmk/i386/mp_native.c | 53 +- osfmk/i386/mtrr.c | 227 +- osfmk/i386/mtrr.h | 20 +- osfmk/i386/pal_hibernate.h | 24 +- osfmk/i386/pal_native.h | 44 +- osfmk/i386/pal_routines.c | 332 +- osfmk/i386/pal_routines.h | 73 +- osfmk/i386/panic_hooks.c | 105 +- osfmk/i386/panic_hooks.h | 14 +- osfmk/i386/pcb.c | 1182 +- osfmk/i386/pcb_native.c | 190 +- osfmk/i386/phys.c | 84 +- osfmk/i386/pio.h | 22 +- osfmk/i386/pmCPU.c | 743 +- osfmk/i386/pmCPU.h | 167 +- osfmk/i386/pmap.h | 689 +- osfmk/i386/pmap_common.c | 250 +- osfmk/i386/pmap_internal.h | 833 +- osfmk/i386/pmap_pcid.h | 80 +- osfmk/i386/pmap_x86_common.c | 1242 +- osfmk/i386/postcode.h | 212 +- osfmk/i386/proc_reg.h | 837 +- osfmk/i386/rtclock.c | 196 +- osfmk/i386/rtclock_native.c | 46 +- osfmk/i386/rtclock_protos.h | 38 +- osfmk/i386/sched_param.h | 26 +- osfmk/i386/seg.h | 312 +- osfmk/i386/serial_io.h | 8 +- osfmk/i386/setjmp.h | 30 +- osfmk/i386/simple_lock.h | 88 +- osfmk/i386/smp.h | 12 +- osfmk/i386/stab.h | 60 +- osfmk/i386/startup64.c | 85 +- osfmk/i386/task.h | 34 +- osfmk/i386/thread.h | 130 +- osfmk/i386/trap.c | 715 +- osfmk/i386/trap.h | 148 +- osfmk/i386/trap_native.c | 105 +- osfmk/i386/tsc.c | 181 +- osfmk/i386/tsc.h | 54 +- osfmk/i386/tss.h | 128 +- osfmk/i386/ucode.c | 119 +- osfmk/i386/user_ldt.c | 501 +- osfmk/i386/user_ldt.h | 50 +- osfmk/i386/vm_tuning.h | 26 +- osfmk/i386/vmx.h | 16 +- osfmk/i386/vmx/vmx_cpu.c | 186 +- osfmk/i386/vmx/vmx_cpu.h | 42 +- osfmk/i386/vmx/vmx_shims.c | 22 +- osfmk/i386/vmx/vmx_shims.h | 12 +- osfmk/i386/xpr.h | 27 +- osfmk/ipc/flipc.c | 779 +- osfmk/ipc/flipc.h | 39 +- osfmk/ipc/ipc_entry.c | 182 +- osfmk/ipc/ipc_entry.h | 158 +- osfmk/ipc/ipc_hash.c | 228 +- osfmk/ipc/ipc_hash.h | 84 +- osfmk/ipc/ipc_importance.c | 757 +- osfmk/ipc/ipc_importance.h | 197 +- osfmk/ipc/ipc_init.c | 105 +- osfmk/ipc/ipc_init.h | 70 +- osfmk/ipc/ipc_kmsg.c | 4122 ++-- osfmk/ipc/ipc_kmsg.h | 253 +- osfmk/ipc/ipc_machdep.h | 51 +- osfmk/ipc/ipc_mqueue.c | 463 +- osfmk/ipc/ipc_mqueue.h | 247 +- osfmk/ipc/ipc_notify.c | 42 +- osfmk/ipc/ipc_notify.h | 48 +- osfmk/ipc/ipc_object.c | 306 +- osfmk/ipc/ipc_object.h | 208 +- osfmk/ipc/ipc_port.c | 554 +- osfmk/ipc/ipc_port.h | 425 +- osfmk/ipc/ipc_pset.c | 181 +- osfmk/ipc/ipc_pset.h | 88 +- osfmk/ipc/ipc_right.c | 887 +- osfmk/ipc/ipc_right.h | 230 +- osfmk/ipc/ipc_space.c | 105 +- osfmk/ipc/ipc_space.h | 148 +- osfmk/ipc/ipc_table.c | 68 +- osfmk/ipc/ipc_table.h | 60 +- osfmk/ipc/ipc_types.h | 64 +- osfmk/ipc/ipc_voucher.c | 1077 +- osfmk/ipc/ipc_voucher.h | 232 +- osfmk/ipc/mach_debug.c | 208 +- osfmk/ipc/mach_kernelrpc.c | 275 +- osfmk/ipc/mach_msg.c | 305 +- osfmk/ipc/mach_port.c | 1104 +- osfmk/ipc/mig_log.c | 102 +- osfmk/ipc/port.h | 48 +- osfmk/kdp/kdp.h | 18 +- osfmk/kdp/kdp_callout.h | 9 +- osfmk/kdp/kdp_core.c | 2 +- osfmk/kdp/kdp_core.h | 82 +- osfmk/kdp/kdp_dyld.h | 83 +- osfmk/kdp/kdp_en_debugger.h | 15 +- osfmk/kdp/kdp_internal.h | 151 +- osfmk/kdp/kdp_private.h | 204 +- osfmk/kdp/kdp_protocol.h | 658 +- osfmk/kdp/kdp_serial.c | 127 +- osfmk/kdp/kdp_serial.h | 8 +- osfmk/kdp/kdp_udp.c | 1237 +- osfmk/kdp/kdp_udp.h | 22 +- osfmk/kdp/ml/arm/kdp_machdep.c | 134 +- osfmk/kdp/ml/arm/kdp_vm.c | 208 +- osfmk/kdp/ml/i386/kdp_x86_common.c | 206 +- osfmk/kdp/ml/i386/kdp_x86_common.h | 30 +- osfmk/kdp/ml/x86_64/kdp_machdep.c | 667 +- osfmk/kdp/ml/x86_64/kdp_vm.c | 53 +- osfmk/kdp/processor_core.c | 134 +- osfmk/kdp/processor_core.h | 26 +- osfmk/kern/Makefile | 7 +- osfmk/kern/affinity.c | 174 +- osfmk/kern/affinity.h | 66 +- osfmk/kern/arithmetic_128.h | 81 +- osfmk/kern/assert.h | 56 +- osfmk/kern/ast.c | 83 +- osfmk/kern/ast.h | 68 +- osfmk/kern/audit_sessionport.c | 30 +- osfmk/kern/backtrace.c | 11 +- osfmk/kern/backtrace.h | 8 +- osfmk/kern/bits.h | 56 +- osfmk/kern/block_hint.h | 6 +- osfmk/kern/bsd_kern.c | 525 +- osfmk/kern/btlog.c | 373 +- osfmk/kern/btlog.h | 46 +- osfmk/kern/build_config.c | 1 - osfmk/kern/call_entry.h | 88 +- osfmk/kern/clock.c | 504 +- osfmk/kern/clock.h | 352 +- osfmk/kern/clock_oldops.c | 538 +- osfmk/kern/coalition.c | 478 +- osfmk/kern/coalition.h | 43 +- osfmk/kern/copyout_shim.c | 74 +- osfmk/kern/copyout_shim.h | 20 +- osfmk/kern/counters.c | 24 +- osfmk/kern/counters.h | 41 +- osfmk/kern/cpu_data.h | 30 +- osfmk/kern/cpu_number.h | 32 +- osfmk/kern/cpu_quiesce.c | 30 +- osfmk/kern/cpu_quiesce.h | 31 +- osfmk/kern/cs_blobs.h | 108 +- osfmk/kern/debug.c | 408 +- osfmk/kern/debug.h | 427 +- osfmk/kern/ecc.h | 28 +- osfmk/kern/ecc_logging.c | 72 +- osfmk/kern/energy_perf.c | 43 +- osfmk/kern/energy_perf.h | 8 +- osfmk/kern/exc_guard.h | 20 +- osfmk/kern/exc_resource.h | 40 +- osfmk/kern/exception.c | 301 +- osfmk/kern/exception.h | 34 +- osfmk/kern/extmod_statistics.c | 20 +- osfmk/kern/extmod_statistics.h | 12 +- osfmk/kern/gzalloc.c | 123 +- osfmk/kern/hibernate.c | 176 +- osfmk/kern/host.c | 506 +- osfmk/kern/host.h | 46 +- osfmk/kern/host_notify.c | 101 +- osfmk/kern/host_notify.h | 22 +- osfmk/kern/host_statistics.h | 24 +- osfmk/kern/hv_support.c | 83 +- osfmk/kern/hv_support.h | 10 +- osfmk/kern/ipc_clock.c | 71 +- osfmk/kern/ipc_host.c | 270 +- osfmk/kern/ipc_host.h | 68 +- osfmk/kern/ipc_kobject.c | 520 +- osfmk/kern/ipc_kobject.h | 137 +- osfmk/kern/ipc_mig.c | 424 +- osfmk/kern/ipc_mig.h | 212 +- osfmk/kern/ipc_misc.c | 70 +- osfmk/kern/ipc_sync.c | 39 +- osfmk/kern/ipc_sync.h | 22 +- osfmk/kern/ipc_tt.c | 883 +- osfmk/kern/ipc_tt.h | 84 +- osfmk/kern/kalloc.c | 578 +- osfmk/kern/kalloc.h | 103 +- osfmk/kern/kcdata.h | 260 +- osfmk/kern/kern_cdata.c | 134 +- osfmk/kern/kern_cdata.h | 14 +- osfmk/kern/kern_monotonic.c | 30 +- osfmk/kern/kern_stackshot.c | 636 +- osfmk/kern/kern_types.h | 218 +- osfmk/kern/kext_alloc.c | 232 +- osfmk/kern/kext_alloc.h | 9 +- osfmk/kern/kmod.c | 110 +- osfmk/kern/kpc.h | 47 +- osfmk/kern/kpc_common.c | 192 +- osfmk/kern/kpc_thread.c | 63 +- osfmk/kern/ledger.c | 612 +- osfmk/kern/ledger.h | 114 +- osfmk/kern/lock.h | 30 +- osfmk/kern/lock_group.h | 163 + osfmk/kern/lock_stat.h | 325 + osfmk/kern/locks.c | 871 +- osfmk/kern/locks.h | 647 +- osfmk/kern/ltable.c | 268 +- osfmk/kern/ltable.h | 39 +- osfmk/kern/mach_node.c | 666 +- osfmk/kern/mach_node.h | 71 +- osfmk/kern/mach_node_link.h | 50 +- osfmk/kern/mach_param.h | 48 +- osfmk/kern/machine.c | 326 +- osfmk/kern/machine.h | 113 +- osfmk/kern/macro_help.h | 48 +- osfmk/kern/memset_s.c | 13 +- osfmk/kern/misc_protos.h | 114 +- osfmk/kern/mk_sp.c | 136 +- osfmk/kern/mk_timer.c | 167 +- osfmk/kern/mk_timer.h | 32 +- osfmk/kern/monotonic.h | 2 +- osfmk/kern/mpqueue.h | 75 + osfmk/kern/page_decrypt.c | 18 +- osfmk/kern/page_decrypt.h | 46 +- osfmk/kern/pms.h | 134 +- osfmk/kern/policy_internal.h | 96 +- osfmk/kern/printf.c | 876 +- osfmk/kern/priority.c | 248 +- osfmk/kern/priority_queue.c | 8 +- osfmk/kern/priority_queue.h | 114 +- osfmk/kern/processor.c | 770 +- osfmk/kern/processor.h | 320 +- osfmk/kern/processor_data.c | 12 +- osfmk/kern/processor_data.h | 132 +- osfmk/kern/queue.h | 688 +- osfmk/kern/remote_time.c | 526 + osfmk/kern/remote_time.h | 69 + osfmk/kern/sched.h | 233 +- osfmk/kern/sched_average.c | 149 +- osfmk/kern/sched_dualq.c | 185 +- osfmk/kern/sched_grrr.c | 238 +- osfmk/kern/sched_multiq.c | 222 +- osfmk/kern/sched_prim.c | 2747 ++- osfmk/kern/sched_prim.h | 590 +- osfmk/kern/sched_proto.c | 301 +- osfmk/kern/sched_traditional.c | 128 +- osfmk/kern/sched_urgency.h | 63 + osfmk/kern/sfi.c | 435 +- osfmk/kern/sfi.h | 10 +- osfmk/kern/simple_lock.h | 232 +- osfmk/kern/smp.h | 18 +- osfmk/kern/spl.c | 18 +- osfmk/kern/spl.h | 24 +- osfmk/kern/stack.c | 197 +- osfmk/kern/startup.c | 160 +- osfmk/kern/startup.h | 34 +- osfmk/kern/sync_lock.c | 41 +- osfmk/kern/sync_lock.h | 18 +- osfmk/kern/sync_sema.c | 552 +- osfmk/kern/sync_sema.h | 30 +- osfmk/kern/syscall_emulation.c | 46 +- osfmk/kern/syscall_subr.c | 127 +- osfmk/kern/syscall_subr.h | 1 - osfmk/kern/syscall_sw.c | 551 +- osfmk/kern/task.c | 2025 +- osfmk/kern/task.h | 669 +- osfmk/kern/task_policy.c | 1792 +- osfmk/kern/task_swap.c | 26 +- osfmk/kern/task_swap.h | 114 +- osfmk/kern/telemetry.c | 235 +- osfmk/kern/telemetry.h | 10 +- osfmk/kern/test_lock.c | 169 +- osfmk/kern/thread.c | 1163 +- osfmk/kern/thread.h | 1200 +- osfmk/kern/thread_act.c | 541 +- osfmk/kern/thread_call.c | 669 +- osfmk/kern/thread_call.h | 500 +- osfmk/kern/thread_group.c | 7 +- osfmk/kern/thread_group.h | 8 +- osfmk/kern/thread_kernel_state.h | 6 +- osfmk/kern/thread_policy.c | 1141 +- osfmk/kern/timer.c | 6 +- osfmk/kern/timer.h | 20 +- osfmk/kern/timer_call.c | 960 +- osfmk/kern/timer_call.h | 129 +- osfmk/kern/timer_queue.h | 157 +- osfmk/kern/tlock.c | 205 + osfmk/kern/trustcache.h | 28 +- osfmk/kern/turnstile.c | 482 +- osfmk/kern/turnstile.h | 30 +- osfmk/kern/ux_handler.c | 151 +- osfmk/kern/ux_handler.h | 1 - osfmk/kern/waitq.c | 1523 +- osfmk/kern/waitq.h | 207 +- osfmk/kern/work_interval.c | 108 +- osfmk/kern/xpr.c | 52 +- osfmk/kern/xpr.h | 106 +- osfmk/kern/zalloc.c | 2560 ++- osfmk/kern/zalloc.h | 354 +- osfmk/kern/zcache.c | 234 +- osfmk/kern/zcache.h | 26 +- osfmk/kperf/action.c | 39 +- osfmk/kperf/action.h | 12 +- osfmk/kperf/arm/kperf_mp.c | 16 +- osfmk/kperf/ast.h | 8 +- osfmk/kperf/buffer.h | 4 +- osfmk/kperf/callstack.c | 598 +- osfmk/kperf/kdebug_trigger.c | 10 +- osfmk/kperf/kperf.c | 22 +- osfmk/kperf/kperf.h | 6 +- osfmk/kperf/kperf_kpc.c | 48 +- osfmk/kperf/kperf_kpc.h | 13 +- osfmk/kperf/kperf_timer.c | 85 +- osfmk/kperf/kperf_timer.h | 14 +- osfmk/kperf/kperfbsd.c | 301 +- osfmk/kperf/kperfbsd.h | 8 +- osfmk/kperf/lazy.c | 27 +- osfmk/kperf/lazy.h | 2 +- osfmk/kperf/meminfo.c | 8 +- osfmk/kperf/meminfo.h | 8 +- osfmk/kperf/pet.c | 26 +- osfmk/kperf/pet.h | 10 +- osfmk/kperf/task_samplers.c | 22 +- osfmk/kperf/thread_samplers.c | 97 +- osfmk/kperf/thread_samplers.h | 20 +- osfmk/kperf/x86_64/kperf_mp.c | 15 +- osfmk/libsa/arm/types.h | 36 +- osfmk/libsa/i386/types.h | 45 +- osfmk/libsa/machine/types.h | 10 +- osfmk/libsa/stdlib.h | 52 +- osfmk/libsa/string.h | 68 +- osfmk/libsa/sys/timers.h | 30 +- osfmk/libsa/types.h | 66 +- osfmk/lockd/lockd_mach_types.h | 12 +- osfmk/mach/arm/_structs.h | 179 +- osfmk/mach/arm/boolean.h | 26 +- osfmk/mach/arm/exception.h | 16 +- osfmk/mach/arm/kern_return.h | 22 +- osfmk/mach/arm/ndr_def.h | 8 +- osfmk/mach/arm/processor_info.h | 53 +- osfmk/mach/arm/rpc.h | 4 +- osfmk/mach/arm/thread_state.h | 6 +- osfmk/mach/arm/thread_status.h | 284 +- osfmk/mach/arm/vm_param.h | 126 +- osfmk/mach/arm/vm_types.h | 76 +- osfmk/mach/boolean.h | 42 +- osfmk/mach/bootstrap.h | 10 +- osfmk/mach/clock_types.h | 98 +- osfmk/mach/coalition.h | 18 +- osfmk/mach/error.h | 74 +- osfmk/mach/events_info.h | 34 +- osfmk/mach/exception.h | 26 +- osfmk/mach/exception_types.h | 186 +- osfmk/mach/host_info.h | 250 +- osfmk/mach/host_notify.h | 22 +- osfmk/mach/host_reboot.h | 12 +- osfmk/mach/host_special_ports.h | 176 +- osfmk/mach/i386/_structs.h | 26 + osfmk/mach/i386/boolean.h | 28 +- osfmk/mach/i386/exception.h | 90 +- osfmk/mach/i386/fp_reg.h | 218 +- osfmk/mach/i386/kern_return.h | 30 +- osfmk/mach/i386/ndr_def.h | 16 +- osfmk/mach/i386/processor_info.h | 12 +- osfmk/mach/i386/rpc.h | 15 +- osfmk/mach/i386/thread_state.h | 16 +- osfmk/mach/i386/thread_status.h | 350 +- osfmk/mach/i386/vm_param.h | 165 +- osfmk/mach/i386/vm_types.h | 76 +- osfmk/mach/kern_return.h | 540 +- osfmk/mach/kmod.h | 98 +- osfmk/mach/mach.h | 18 +- osfmk/mach/mach_interface.h | 10 +- osfmk/mach/mach_param.h | 26 +- osfmk/mach/mach_syscalls.h | 12 +- osfmk/mach/mach_time.h | 48 +- osfmk/mach/mach_traps.h | 536 +- osfmk/mach/mach_types.defs | 27 +- osfmk/mach/mach_types.h | 262 +- osfmk/mach/mach_voucher_types.h | 114 +- osfmk/mach/machine.h | 384 +- osfmk/mach/machine/_structs.h | 8 +- osfmk/mach/machine/boolean.h | 8 +- osfmk/mach/machine/exception.h | 8 +- osfmk/mach/machine/kern_return.h | 8 +- osfmk/mach/machine/ndr_def.h | 8 +- osfmk/mach/machine/processor_info.h | 8 +- osfmk/mach/machine/rpc.h | 10 +- osfmk/mach/machine/sdt.h | 360 +- osfmk/mach/machine/thread_state.h | 8 +- osfmk/mach/machine/thread_status.h | 8 +- osfmk/mach/machine/vm_param.h | 8 +- osfmk/mach/machine/vm_types.h | 8 +- osfmk/mach/memory_object.h | 244 +- osfmk/mach/memory_object_types.h | 624 +- osfmk/mach/message.h | 918 +- osfmk/mach/mig.h | 146 +- osfmk/mach/mig_errors.h | 51 +- osfmk/mach/mig_log.h | 17 +- osfmk/mach/mk_timer.h | 46 +- osfmk/mach/mk_traps.h | 12 +- osfmk/mach/msg_type.h | 72 +- osfmk/mach/ndr.h | 84 +- osfmk/mach/notify.h | 98 +- osfmk/mach/policy.h | 180 +- osfmk/mach/port.h | 300 +- osfmk/mach/processor.defs | 11 + osfmk/mach/processor_info.h | 116 +- osfmk/mach/prof_types.h | 52 +- osfmk/mach/resource_monitors.h | 6 +- osfmk/mach/rpc.h | 68 +- osfmk/mach/sdt.h | 4 +- osfmk/mach/semaphore.h | 114 +- osfmk/mach/sfi_class.h | 48 +- osfmk/mach/shared_memory_server.h | 96 +- osfmk/mach/shared_region.h | 158 +- osfmk/mach/std_types.h | 26 +- osfmk/mach/sync_policy.h | 28 +- osfmk/mach/task_info.h | 482 +- osfmk/mach/task_ledger.h | 18 +- osfmk/mach/task_policy.h | 213 +- osfmk/mach/task_special_ports.h | 82 +- osfmk/mach/thread_info.h | 148 +- osfmk/mach/thread_policy.h | 214 +- osfmk/mach/thread_special_ports.h | 38 +- osfmk/mach/thread_status.h | 40 +- osfmk/mach/thread_switch.h | 40 +- osfmk/mach/time_value.h | 56 +- osfmk/mach/vm_attributes.h | 54 +- osfmk/mach/vm_behavior.h | 42 +- osfmk/mach/vm_inherit.h | 38 +- osfmk/mach/vm_param.h | 143 +- osfmk/mach/vm_prot.h | 60 +- osfmk/mach/vm_purgable.h | 122 +- osfmk/mach/vm_region.h | 316 +- osfmk/mach/vm_statistics.h | 497 +- osfmk/mach/vm_sync.h | 30 +- osfmk/mach/vm_types.h | 149 +- osfmk/mach_debug/hash_info.h | 26 +- osfmk/mach_debug/ipc_info.h | 64 +- osfmk/mach_debug/lockgroup_info.h | 71 +- osfmk/mach_debug/mach_debug.h | 12 +- osfmk/mach_debug/mach_debug_types.h | 50 +- osfmk/mach_debug/page_info.h | 28 +- osfmk/mach_debug/vm_info.h | 98 +- osfmk/mach_debug/zone_info.h | 110 +- osfmk/machine/Makefile | 5 +- osfmk/machine/atomic.h | 52 +- osfmk/machine/commpage.h | 18 +- osfmk/machine/config.h | 8 +- osfmk/machine/cpu_affinity.h | 12 +- osfmk/machine/cpu_capabilities.h | 10 +- osfmk/machine/cpu_data.h | 8 +- osfmk/machine/cpu_number.h | 12 +- osfmk/machine/endian.h | 8 +- osfmk/machine/io_map_entries.h | 12 +- osfmk/machine/lock.h | 10 +- osfmk/machine/locks.h | 8 +- osfmk/machine/lowglobals.h | 10 +- osfmk/machine/machine_cpu.h | 8 +- osfmk/machine/machine_cpuid.h | 12 +- osfmk/machine/machine_kpc.h | 8 +- osfmk/machine/machine_remote_time.h | 41 + osfmk/machine/machine_routines.h | 8 +- osfmk/machine/machine_rpc.h | 8 +- osfmk/machine/machlimits.h | 8 +- osfmk/machine/machparam.h | 8 +- osfmk/machine/monotonic.h | 2 +- osfmk/machine/pal_hibernate.h | 8 +- osfmk/machine/pal_routines.h | 8 +- osfmk/machine/pmap.h | 8 +- osfmk/machine/sched_param.h | 8 +- osfmk/machine/setjmp.h | 8 +- osfmk/machine/simple_lock.h | 10 +- osfmk/machine/smp.h | 8 +- osfmk/machine/task.h | 8 +- osfmk/machine/thread.h | 8 +- osfmk/machine/trap.h | 8 +- osfmk/machine/vm_tuning.h | 8 +- osfmk/machine/xpr.h | 8 +- osfmk/prng/prng_random.c | 67 +- osfmk/profiling/i386/profile-md.h | 420 +- osfmk/profiling/machine/profile-md.h | 8 +- osfmk/profiling/profile-internal.h | 296 +- osfmk/profiling/profile-kgmon.c | 127 +- osfmk/profiling/profile-mk.c | 65 +- osfmk/profiling/profile-mk.h | 10 +- osfmk/tests/bitmap_test.c | 10 +- osfmk/tests/kernel_tests.c | 176 +- osfmk/tests/ktest.c | 45 +- osfmk/tests/ktest.h | 310 +- osfmk/tests/ktest_accessor.c | 26 +- osfmk/tests/ktest_emit.c | 181 +- osfmk/tests/ktest_global.c | 9 +- osfmk/tests/ktest_internal.h | 7 +- osfmk/tests/pmap_tests.c | 27 +- osfmk/tests/test_thread_call.c | 16 +- osfmk/tests/xnupost.h | 4 +- osfmk/vm/WKdm_new.h | 62 +- osfmk/vm/bsd_vm.c | 839 +- osfmk/vm/cpm.h | 16 +- osfmk/vm/device_vm.c | 291 +- osfmk/vm/lz4.c | 848 +- osfmk/vm/lz4.h | 126 +- osfmk/vm/lz4_assembly_select.h | 8 +- osfmk/vm/lz4_constants.h | 8 +- osfmk/vm/memory_object.c | 1370 +- osfmk/vm/memory_object.h | 106 +- osfmk/vm/pmap.h | 970 +- osfmk/vm/vm32_user.c | 481 +- osfmk/vm/vm_apple_protect.c | 803 +- osfmk/vm/vm_compressor.c | 2027 +- osfmk/vm/vm_compressor.h | 444 +- osfmk/vm/vm_compressor_algorithms.c | 116 +- osfmk/vm/vm_compressor_algorithms.h | 10 +- osfmk/vm/vm_compressor_backing_store.c | 850 +- osfmk/vm/vm_compressor_backing_store.h | 62 +- osfmk/vm/vm_compressor_pager.c | 673 +- osfmk/vm/vm_compressor_pager.h | 140 +- osfmk/vm/vm_debug.c | 431 +- osfmk/vm/vm_debug.h | 18 +- osfmk/vm/vm_external.h | 32 +- osfmk/vm/vm_fault.c | 2548 +- osfmk/vm/vm_fault.h | 168 +- osfmk/vm/vm_fourk_pager.c | 627 +- osfmk/vm/vm_init.c | 90 +- osfmk/vm/vm_init.h | 8 +- osfmk/vm/vm_kern.c | 914 +- osfmk/vm/vm_kern.h | 538 +- osfmk/vm/vm_map.c | 8282 +++---- osfmk/vm/vm_map.h | 1839 +- osfmk/vm/vm_map_store.c | 76 +- osfmk/vm/vm_map_store.h | 115 +- osfmk/vm/vm_map_store_ll.c | 176 +- osfmk/vm/vm_map_store_ll.h | 14 +- osfmk/vm/vm_map_store_rb.c | 212 +- osfmk/vm/vm_map_store_rb.h | 16 +- osfmk/vm/vm_object.c | 3271 +-- osfmk/vm/vm_object.h | 1430 +- osfmk/vm/vm_options.h | 8 +- osfmk/vm/vm_page.h | 1750 +- osfmk/vm/vm_pageout.c | 5113 +++-- osfmk/vm/vm_pageout.h | 777 +- osfmk/vm/vm_phantom_cache.c | 230 +- osfmk/vm/vm_phantom_cache.h | 46 +- osfmk/vm/vm_protos.h | 426 +- osfmk/vm/vm_purgeable.c | 731 +- osfmk/vm/vm_purgeable_internal.h | 36 +- osfmk/vm/vm_resident.c | 6122 ++--- osfmk/vm/vm_shared_region.c | 1409 +- osfmk/vm/vm_shared_region.h | 313 +- osfmk/vm/vm_shared_region_pager.c | 484 +- osfmk/vm/vm_swapfile_pager.c | 393 +- osfmk/vm/vm_user.c | 2939 +-- osfmk/voucher/ipc_pthread_priority.c | 62 +- osfmk/voucher/ipc_pthread_priority_internal.h | 1 - osfmk/voucher/ipc_pthread_priority_types.h | 4 +- osfmk/x86_64/Makefile | 3 +- osfmk/x86_64/bcopy.s | 13 +- osfmk/x86_64/boot_pt.c | 60 +- osfmk/x86_64/bzero.s | 25 +- osfmk/x86_64/copyio.c | 275 +- osfmk/x86_64/idt64.s | 696 +- osfmk/x86_64/idt_table.h | 80 +- osfmk/x86_64/kpc_x86.c | 107 +- osfmk/x86_64/loose_ends.c | 680 +- osfmk/x86_64/lowglobals.h | 56 +- osfmk/x86_64/lowmem_vectors.c | 69 +- osfmk/x86_64/machine_kpc.h | 8 +- osfmk/x86_64/machine_remote_time.c | 87 + osfmk/x86_64/machine_remote_time.h | 39 + osfmk/x86_64/monotonic_x86_64.c | 19 +- osfmk/x86_64/pmap.c | 1992 +- osfmk/x86_64/pmap_pcid.c | 235 +- osfmk/x86_64/start.s | 4 +- pexpert/arm/pe_consistent_debug.c | 43 +- pexpert/arm/pe_identify_machine.c | 297 +- pexpert/arm/pe_init.c | 184 +- pexpert/arm/pe_kprintf.c | 45 +- pexpert/arm/pe_serial.c | 397 +- pexpert/gen/bootargs.c | 279 +- pexpert/gen/device_tree.c | 69 +- pexpert/gen/pe_gen.c | 193 +- pexpert/i386/boot_images.h | 224 +- pexpert/i386/pe_bootargs.c | 10 +- pexpert/i386/pe_identify_machine.c | 71 +- pexpert/i386/pe_init.c | 467 +- pexpert/i386/pe_interrupt.c | 45 +- pexpert/i386/pe_kprintf.c | 54 +- pexpert/i386/pe_serial.c | 635 +- pexpert/pexpert/AppleBoot.h | 274 +- pexpert/pexpert/Clut.h | 136 +- pexpert/pexpert/GearImage.h | 8080 +++---- pexpert/pexpert/arm/AIC.h | 118 +- pexpert/pexpert/arm/PL192_VIC.h | 42 +- pexpert/pexpert/arm/S3cUART.h | 2 +- pexpert/pexpert/arm/S7002.h | 52 +- pexpert/pexpert/arm/T8002.h | 40 +- pexpert/pexpert/arm/board_config.h | 6 +- pexpert/pexpert/arm/boot.h | 64 +- pexpert/pexpert/arm/consistent_debug.h | 51 +- pexpert/pexpert/arm/protos.h | 2 +- pexpert/pexpert/arm64/AIC.h | 15 +- pexpert/pexpert/arm64/BCM2837.h | 84 +- pexpert/pexpert/arm64/S3c2410x.h | 48 +- pexpert/pexpert/arm64/S5L8960X.h | 4 +- pexpert/pexpert/arm64/S8000.h | 2 +- pexpert/pexpert/arm64/T7000.h | 4 +- pexpert/pexpert/arm64/T8010.h | 32 +- pexpert/pexpert/arm64/board_config.h | 9 +- pexpert/pexpert/arm64/boot.h | 65 +- pexpert/pexpert/arm64/cyclone.h | 12 +- pexpert/pexpert/arm64/hurricane.h | 10 +- pexpert/pexpert/arm64/twister.h | 6 +- pexpert/pexpert/arm64/typhoon.h | 6 +- pexpert/pexpert/boot.h | 8 +- pexpert/pexpert/device_tree.h | 229 +- pexpert/pexpert/i386/boot.h | 236 +- pexpert/pexpert/i386/efi.h | 503 +- pexpert/pexpert/i386/protos.h | 20 +- pexpert/pexpert/machine/boot.h | 8 +- pexpert/pexpert/machine/protos.h | 8 +- pexpert/pexpert/pe_images.h | 34 +- pexpert/pexpert/pexpert.h | 198 +- pexpert/pexpert/protos.h | 30 +- san/Kasan_kasan.exports | 1 + san/kasan-arm64.c | 24 +- san/kasan-fakestack.c | 7 +- san/kasan-test.c | 175 +- san/kasan-x86_64.c | 80 +- san/kasan.c | 3 +- san/kasan.h | 6 +- san/kasan_dynamic_blacklist.c | 12 +- san/kasan_internal.h | 20 +- san/memintrinsics.h | 78 +- san/ubsan.c | 86 +- san/ubsan_log.c | 10 +- security/_label.h | 26 +- security/mac.h | 84 +- security/mac_alloc.c | 47 +- security/mac_alloc.h | 38 +- security/mac_audit.c | 140 +- security/mac_base.c | 823 +- security/mac_data.c | 17 +- security/mac_data.h | 98 +- security/mac_file.c | 61 +- security/mac_framework.h | 880 +- security/mac_inet.c | 57 +- security/mac_internal.h | 272 +- security/mac_iokit.c | 27 +- security/mac_kext.c | 16 +- security/mac_label.c | 23 +- security/mac_mach.c | 54 +- security/mac_net.c | 116 +- security/mac_pipe.c | 94 +- security/mac_policy.h | 9984 ++++---- security/mac_posix_sem.c | 72 +- security/mac_posix_shm.c | 81 +- security/mac_priv.c | 18 +- security/mac_process.c | 189 +- security/mac_pty.c | 6 +- security/mac_skywalk.c | 5 +- security/mac_socket.c | 362 +- security/mac_system.c | 95 +- security/mac_sysv_msg.c | 103 +- security/mac_sysv_sem.c | 47 +- security/mac_sysv_shm.c | 56 +- security/mac_vfs.c | 1118 +- security/mac_vfs_subr.c | 74 +- tests/Makefile | 37 +- tests/atm_diagnostic_flag.c | 82 +- tests/avx.c | 584 +- tests/backtracing.c | 331 +- tests/contextswitch.c | 375 +- tests/cpucount.c | 57 +- tests/data_protection.c | 366 +- tests/disk_mount_conditioner.c | 54 +- tests/drop_priv.c | 44 +- tests/exc_resource_threads.c | 71 +- tests/freebsd_waitpid_nohang.c | 2 +- tests/gettimeofday.c | 4 +- tests/host_notifications.c | 26 +- tests/host_statistics_rate_limiting.c | 102 +- tests/ioperf.c | 326 +- tests/jumbo_va_spaces_28530648.c | 4 + tests/kdebug.c | 401 +- tests/kernel_mtx_perf.c | 93 +- tests/kevent_continuous_time.c | 45 +- tests/kevent_pty.c | 44 +- tests/kevent_qos.c | 559 +- tests/kpc.c | 16 +- tests/kperf.c | 228 +- tests/kperf_backtracing.c | 578 +- tests/kperf_helpers.c | 22 +- tests/kqueue_add_and_trigger.c | 4 +- tests/kqueue_close.c | 6 +- tests/kqueue_fifo_18776047.c | 5 +- tests/kqueue_file_tests.c | 937 +- tests/kqueue_timer_tests.c | 60 +- tests/ldt.c | 1164 + tests/ldt_code32.s | 285 + tests/legacy_footprint.entitlement | 8 + tests/ltable_exhaustion_test.c | 8 +- tests/mach_boottime_usec.c | 2 +- tests/mach_continuous_time.c | 57 +- tests/mach_exc.defs | 57 + tests/mach_get_times.c | 8 +- tests/mach_port_deallocate_21692215.c | 16 +- tests/mach_port_insert_right.c | 2 +- tests/mach_port_mod_refs.c | 4 +- tests/mach_timebase_info.c | 2 +- tests/memorystatus_freeze_test.c | 103 +- tests/memorystatus_vm_map_fork.c | 84 +- tests/memorystatus_zone_test.c | 281 +- tests/mktimer_kobject.c | 3 +- tests/monotonic_core.c | 62 +- tests/net_tun_pr_35136664.c | 5 +- tests/net_tuntests.c | 167 +- tests/netbsd_utimensat.c | 13 +- tests/ntp_adjtime_29192647.c | 107 +- tests/perf_compressor.c | 172 +- tests/perf_exit.c | 25 +- tests/perf_exit_proc.c | 24 +- tests/perf_kdebug.c | 71 +- tests/perf_spawn_fork.c | 44 +- tests/perf_vmfault.c | 377 +- tests/phys_footprint_interval_max.c | 13 +- tests/poll.c | 38 +- tests/poll_select_kevent_paired_fds.c | 313 +- tests/port_descriptions.c | 44 +- tests/proc_core_name_24152432.c | 26 +- tests/proc_info.c | 1020 +- tests/proc_info_list_kthreads.c | 23 +- tests/proc_info_udata.c | 12 +- tests/processor_info.c | 104 + tests/pwrite_avoid_sigxfsz_28581610.c | 22 +- tests/quiesce_counter.c | 8 +- tests/regression_17272465.c | 2 +- tests/remote_time.c | 4 +- tests/settimeofday_29193041.c | 45 +- tests/settimeofday_29193041_entitled.c | 50 +- tests/sigchld_return.c | 52 +- tests/sigcont_return.c | 27 +- tests/socket_bind_35243417.c | 42 +- tests/socket_bind_35685803.c | 79 +- tests/socket_poll_close_25786011.c | 2 +- tests/stackshot_spawn_exit_stress.c | 38 +- tests/suspended_spawn_26184412.c | 5 +- tests/task_info.c | 128 +- tests/task_info_28439149.c | 19 +- tests/task_inspect.c | 38 +- tests/telemetry.c | 153 +- tests/testposixshm.c | 218 + tests/tty_hang.c | 45 +- tests/turnstile_multihop.c | 238 +- tests/turnstile_multihop_helper.h | 20 +- tests/turnstile_multihop_types.h | 30 +- tests/turnstiles_test.c | 18 +- tests/utimensat.c | 10 +- tests/verify_kalloc_config.c | 20 +- tests/vm_phys_footprint.c | 1221 + tests/vm_phys_footprint_legacy.c | 1223 + tests/vm_set_max_addr_helper.c | 4 +- tests/vm_set_max_addr_test.c | 7 +- tests/voucher_entry_18826844.c | 5 +- tests/voucher_traps.c | 26 +- tests/wired_mem_bench.c | 30 +- tests/work_interval_test.c | 5 +- tests/workq_sigprof.c | 73 +- tests/xnu_quick_test.c | 80 +- tests/xnu_quick_test_entitled.c | 34 +- tests/xnu_quick_test_helpers.c | 192 +- tests/xnu_quick_test_helpers.h | 2 +- tools/cred_dump_backtraces.c | 51 +- tools/cred_dump_creds.c | 74 +- tools/lldbmacros/Makefile | 10 +- tools/lldbmacros/kauth.py | 7 +- tools/lldbmacros/kcdata.py | 94 +- tools/lldbmacros/memory.py | 7 +- tools/lldbmacros/misc.py | 31 + tools/lldbmacros/pmap.py | 3 +- tools/lldbmacros/scheduler.py | 4 + tools/lldbmacros/xnu.py | 129 + tools/lockstat/lockstat.c | 173 +- tools/reindent.sh | 27 - tools/tests/MPMMTest/KQMPMMtest.c | 557 +- tools/tests/MPMMTest/MPMMtest.c | 558 +- tools/tests/TLBcoherency/TLBcoherency.c | 80 +- tools/tests/affinity/pool.c | 270 +- tools/tests/affinity/sets.c | 276 +- tools/tests/affinity/tags.c | 93 +- tools/tests/execperf/exit.c | 10 +- tools/tests/execperf/printexecinfo.c | 40 +- tools/tests/execperf/run.c | 138 +- tools/tests/jitter/timer_jitter.c | 174 +- tools/tests/mktimer/mktimer_test.c | 12 +- .../perf_index/PerfIndex_COPS_Module/PITest.h | 24 +- tools/tests/perf_index/md5.c | 220 +- tools/tests/perf_index/perf_index.c | 349 +- tools/tests/perf_index/perfindex-compile.c | 56 +- tools/tests/perf_index/perfindex-cpu.c | 12 +- tools/tests/perf_index/perfindex-fault.c | 4 +- .../tests/perf_index/perfindex-file_create.c | 20 +- tools/tests/perf_index/perfindex-file_read.c | 25 +- tools/tests/perf_index/perfindex-file_write.c | 25 +- tools/tests/perf_index/perfindex-iperf.c | 18 +- tools/tests/perf_index/perfindex-memory.c | 120 +- .../perf_index/perfindex-ram_file_create.c | 20 +- .../perf_index/perfindex-ram_file_read.c | 24 +- .../perf_index/perfindex-ram_file_write.c | 24 +- tools/tests/perf_index/perfindex-syscall.c | 10 +- tools/tests/perf_index/perfindex-zfod.c | 4 +- tools/tests/perf_index/ramdisk.c | 44 +- tools/tests/perf_index/test_fault_helper.c | 135 +- tools/tests/perf_index/test_fault_helper.h | 4 +- tools/tests/perf_index/test_file_helper.c | 313 +- tools/tests/personas/persona_mgr.c | 81 +- tools/tests/personas/persona_spawn.c | 61 +- tools/tests/personas/persona_test.h | 106 +- tools/tests/personas/persona_test_run.sh | 12 +- tools/tests/superpages/measure_tlbs.c | 120 +- tools/tests/superpages/testsp.c | 452 +- tools/tests/testkext/pgokext/pgokext.c | 10 +- tools/tests/testkext/testthreadcall.cpp | 128 +- tools/tests/testkext/testthreadcall.h | 8 +- tools/tests/testkext/testvmx.cpp | 40 +- tools/tests/testkext/testvmx.h | 11 +- tools/tests/zero-to-n/zero-to-n.c | 438 +- 2590 files changed, 431718 insertions(+), 388430 deletions(-) create mode 100644 EXTERNAL_HEADERS/img4/nonce.h create mode 100644 bsd/dev/dtrace/lockprof.c create mode 100644 bsd/net/skywalk_stubs.c delete mode 100644 iokit/.clang-format create mode 100644 iokit/bsddev/skywalk/IOSkywalkSupport.cpp delete mode 120000 libkern/.clang-format create mode 100644 libkern/os/hash.h create mode 100644 osfmk/arm64/machine_remote_time.c create mode 100644 osfmk/arm64/machine_remote_time.h create mode 100644 osfmk/kern/lock_group.h create mode 100644 osfmk/kern/lock_stat.h create mode 100644 osfmk/kern/mpqueue.h create mode 100644 osfmk/kern/remote_time.c create mode 100644 osfmk/kern/remote_time.h create mode 100644 osfmk/kern/sched_urgency.h create mode 100644 osfmk/kern/tlock.c create mode 100644 osfmk/machine/machine_remote_time.h create mode 100644 osfmk/x86_64/machine_remote_time.c create mode 100644 osfmk/x86_64/machine_remote_time.h create mode 100644 tests/ldt.c create mode 100644 tests/ldt_code32.s create mode 100644 tests/legacy_footprint.entitlement create mode 100644 tests/mach_exc.defs create mode 100644 tests/processor_info.c create mode 100644 tests/testposixshm.c create mode 100644 tests/vm_phys_footprint.c create mode 100644 tests/vm_phys_footprint_legacy.c delete mode 100755 tools/reindent.sh diff --git a/EXTERNAL_HEADERS/architecture/i386/desc.h b/EXTERNAL_HEADERS/architecture/i386/desc.h index ee7891783..a13a5590e 100644 --- a/EXTERNAL_HEADERS/architecture/i386/desc.h +++ b/EXTERNAL_HEADERS/architecture/i386/desc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -50,7 +50,8 @@ typedef struct code_desc { dpl :2, present :1; unsigned char limit16 :4, - :2, + :1, + Lflag :1, opsz :1, #define DESC_CODE_16B 0 #define DESC_CODE_32B 1 diff --git a/EXTERNAL_HEADERS/img4/api.h b/EXTERNAL_HEADERS/img4/api.h index 56b875bc2..ecaf2efed 100644 --- a/EXTERNAL_HEADERS/img4/api.h +++ b/EXTERNAL_HEADERS/img4/api.h @@ -9,6 +9,10 @@ #error "Please #include instead of this file directly" #endif // __IMG4_INDIRECT +#if IMG4_TAPI +#include +#endif + #ifndef KERNEL #include #endif @@ -36,14 +40,23 @@ * individual preprocessor macros in this header that declare new behavior as * required. */ -#define IMG4_API_VERSION (20180112u) +#define IMG4_API_VERSION (20181106u) #if !defined(KERNEL) && !IMG4_PROJECT_BUILD #define IMG4_API_AVAILABLE_20180112 \ __API_UNAVAILABLE(macos) \ API_AVAILABLE(ios(12.0), tvos(12.0), watchos(5.0)) +#define IMG4_API_AVAILABLE_20181004 \ + __API_UNAVAILABLE(macos) \ + API_AVAILABLE(ios(12.2), tvos(12.2), watchos(5.2)) +#define IMG4_API_AVAILABLE_20181106 \ + __API_UNAVAILABLE(macos) \ + API_AVAILABLE(ios(12.2), tvos(12.2), watchos(5.2)) +#define IMG4_API_AVAILABLE_20181106 #else #define IMG4_API_AVAILABLE_20180112 +#define IMG4_API_AVAILABLE_20181004 +#define IMG4_API_AVAILABLE_20181106 #endif /*! diff --git a/EXTERNAL_HEADERS/img4/environment.h b/EXTERNAL_HEADERS/img4/environment.h index d5c4f4902..5f5ba1d02 100644 --- a/EXTERNAL_HEADERS/img4/environment.h +++ b/EXTERNAL_HEADERS/img4/environment.h @@ -1,6 +1,6 @@ /*! * @header - * Image4 environment interfaces. + * Image4 environments. */ #ifndef __IMG4_ENVIRONMENT_H #define __IMG4_ENVIRONMENT_H @@ -9,306 +9,41 @@ #error "Please #include instead of this file directly" #endif // __IMG4_INDIRECT -/*! - * @const IMG4_ENVIRONMENT_VERSION - * The version of the {@link img4_environment_t} structure supported by the - * implementation. See {@link _img4_environment} for complete definition. - */ -#define IMG4_ENVIRONMENT_VERSION ((img4_struct_version_t)0) - -/*! - * @typedef img4_crypto_selector_t - * A CoreCrypto selector routine. - */ -IMG4_API_AVAILABLE_20180112 -typedef const struct ccdigest_info *(*img4_crypto_selector_t)(void); - -/*! - * @typedef img4_crypto_t - * A structure describing a crypto algorithm used by Image4. - * - * @property i4c_name - * The human-readable string for the crypto algorithm (e.g. "sha1"). - * - * @property i4c_select - * The CoreCrypto selector routine for the algorithm - * - * @property i4c_hash_len - * The length of the hash computed by the algorithm. - * - * @property i4c_truncated_hash_len - * The truncated length of the hash computed by the algorithm. - * - * @property __opaque - * Reserved for the implementation. - */ -IMG4_API_AVAILABLE_20180112 -typedef struct _img4_crypto { - const char *i4c_name; - img4_crypto_selector_t i4c_select; - uint32_t i4c_hash_len; - uint32_t i4c_truncated_hash_len; - const void *__opaque; -} img4_crypto_t; - -/*! - * @const IMG4_CRYPTO_SHA1 - * The Image4 SHA1 implementation. - */ -IMG4_API_AVAILABLE_20180112 -OS_EXPORT -const img4_crypto_t _img4_crypto_sha1; -#define IMG4_CRYPTO_SHA1 (&_img4_crypto_sha1) - -/*! - * @const IMG4_CRYPTO_SHA384 - * The Image4 SHA-384 implementation. - */ -IMG4_API_AVAILABLE_20180112 -OS_EXPORT -const img4_crypto_t _img4_crypto_sha384; -#define IMG4_CRYPTO_SHA384 (&_img4_crypto_sha384) - /*! * @typedef img4_environment_t - * A type describing an Image4 environment. + * An opaque type describing an Image4 environment. */ -IMG4_API_AVAILABLE_20180112 typedef struct _img4_environment img4_environment_t; -/*! - * @typedef img4_environment_get_crypto_t - * A function which obtains a crypto descriptor for the host environment. - * - * @param i4e - * The environment descriptor. - * - * @param crypto - * A pointer to the storage in which the pointer to the host's crypto descriptor - * will be written. - * - * @param ctx - * The context pointer supplied to {@link img4_init}. - * - * @result - * Upon successfully fetching the property value, zero should be returned. - * Otherwise, the following error codes should be returned: - * - * [ENOENT] The property does not exist in the environment - */ -IMG4_API_AVAILABLE_20180112 -typedef errno_t (*img4_environment_get_crypto_t)( - const img4_environment_t *i4e, - const img4_crypto_t **crypto, - const void *ctx); - -/*! - * @typedef img4_environment_get_bool_t - * A function which obtains a Boolean property from the host environment. - * - * @param val - * A pointer to storage in which the value will be written. - * - * @param ctx - * The context pointer supplied to {@link img4_init}. - * - * @result - * Upon successfully fetching the property value, zero should be returned. - * Otherwise, the following error codes should be returned: - * - * [ENOENT] The property does not exist in the environment - * [EFTYPE] The property is not expressible as a Boolean - */ -IMG4_API_AVAILABLE_20180112 -typedef errno_t (*img4_environment_get_bool_t)( - const img4_environment_t *i4e, - bool *val, - const void *ctx); - -/*! - * @typedef img4_environment_get_uint32_t - * A function which obtains an unsigned 32-bit integer property from the host - * environment. - * - * @param val - * A pointer to storage in which the value will be written. - * - * @param ctx - * The context pointer supplied to {@link img4_init}. - * - * @result - * Upon successfully fetching the property value, zero should be returned. - * Otherwise, the following error codes should be returned: - * - * [ENOENT] The property does not exist in the environment - * [EFTYPE] The property is not expressible as an unsigned 32-bit integer - */ -IMG4_API_AVAILABLE_20180112 -typedef errno_t (*img4_environment_get_uint32_t)( - const img4_environment_t *i4e, - uint32_t *val, - const void *ctx); - -/*! - * @typedef img4_environment_get_uint64_t - * A function which obtains an unsigned 64-bit integer property from the host - * environment. - * - * @param val - * A pointer to storage in which the value will be written. - * - * @param ctx - * The context pointer supplied to {@link img4_init}. - * - * @result - * Upon successfully fetching the property value, zero should be returned. - * Otherwise, the following error codes should be returned: - * - * [ENOENT] The property does not exist in the environment - * [EFTYPE] The property is not expressible as an unsigned 64-bit - * integer - */ -IMG4_API_AVAILABLE_20180112 -typedef errno_t (*img4_environment_get_uint64_t)( - const img4_environment_t *i4e, - uint64_t *val, - const void *ctx); - -/*! - * @typedef img4_environment_get_data_t - * A function which obtains a property which is a raw sequence of bytes from the - * host environment. - * - * @param bytes - * A pointer to storage in which the value will be written. - * - * @param len - * A pointer to the length of the buffer referred to be {@link val}. Upon - * successful return, this storage should contain the number of bytes written. - * - * @param ctx - * The context pointer supplied to {@link img4_init}. - * - * @result - * Upon successfully fetching the property value, zero should be returned. - * Otherwise, the following error codes should be returned: - * - * [ENOENT] The property does not exist in the environment - * [EFTYPE] The property is not expressible as a raw sequence of bytes - * [ERANGE] The buffer was not large enough to hold the property - */ -IMG4_API_AVAILABLE_20180112 -typedef errno_t (*img4_environment_get_data_t)( - const img4_environment_t *i4e, - uint8_t *bytes, - uint32_t *len, - const void *ctx); - -/*! - * @struct _img4_environment - * A type describing a host environment. - * - * @property i4e_version - * The version of the environment structure. Pass - * {@link IMG4_ENVIRONMENT_VERSION}. - * - * @property i4e_name - * A human-readable description of the environment. - * - * @property i4e_crypto - * A pointer to a function which returns the crypto implementation for the - * environment. - * - * @property i4e_cert_epoch - * A pointer to a function which returns the certificate epoch for the - * environment. - * - * @property i4e_board_id - * A pointer to a function which returns the board identifier for the - * environment. - * - * @property i4e_chip_id - * A pointer to a function which returns the chip design identifier for the - * environment. - * - * @property i4e_ecid - * A pointer to a function which returns the unique chip identifier for the - * environment. - * - * @property i4e_security_domain - * A pointer to a function which returns the security domain for the - * environment. - * - * @property i4e_cert_prod - * A pointer to a function which returns the certificate production status for - * the environment. This indicates whether the environment's leaf certificate - * must be production or development. - * - * - true the environment's leaf certificate must be production - * - false the environment's leaf certificate may be development - * - * @property i4e_cert_security - * A pointer to a function which returns the certificate security mode for the - * environment. This indicates Whether the leaf certificate must be secure. - * - * @property i4e_ap_nonce_hash - * A pointer to a function which returns the hash of the AP nonce for the - * environment. - * - * @property i4e_prevent_mixnmatch - * A pointer to a function which returns whether the environment prevents mix- - * n-match. - * - * - true the environment disallows mix-n-match - * - false the environment allows mix-n-match - * - * @property i4e_boot_manifest_hash - * A pointer to a function which returns the hash of the manifest from which - * mix-n-match policy derives. - * - * @property i4e_eff_security - * A pointer to a function which returns the effective security mode for the - * environment. - * - * @property i4e_eff_prod - * A pointer to a function which returns the effective production status for the - * environment. - * - * @property i4e_ap_nonce_trust - * A pointer to a function which returns whether the AP nonce must be - * exclusively fetched from main memory. - * - * - true the AP nonce hash must be fetched from main memory exclusively; - * persistent storage is not trustworthy - * - false the AP nonce hash may be fetched from persistent storage - */ -struct _img4_environment { - img4_struct_version_t i4e_version; - const char *i4e_name; - img4_environment_get_crypto_t i4e_crypto; - img4_environment_get_uint32_t i4e_cert_epoch; - img4_environment_get_uint32_t i4e_board_id; - img4_environment_get_uint32_t i4e_chip_id; - img4_environment_get_uint64_t i4e_ecid; - img4_environment_get_uint32_t i4e_security_domain; - img4_environment_get_bool_t i4e_cert_prod; - img4_environment_get_bool_t i4e_cert_security; - img4_environment_get_data_t i4e_ap_nonce_hash; - img4_environment_get_bool_t i4e_prevent_mixnmatch; - img4_environment_get_data_t i4e_boot_manifest_hash; - img4_environment_get_bool_t i4e_eff_prod; - img4_environment_get_bool_t i4e_eff_security; - img4_environment_get_bool_t i4e_ap_nonce_trust; -} IMG4_API_AVAILABLE_20180112; - /*! * @const IMG4_ENVIRONMENT_PLATFORM * The environment for the host that uses the default platform implementation to - * resolve the environment. + * resolve the environment. This is the environment against which manifests are + * personalized. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 OS_EXPORT const struct _img4_environment _img4_environment_platform; #define IMG4_ENVIRONMENT_PLATFORM (&_img4_environment_platform) +#else +#define IMG4_ENVIRONMENT_PLATFORM (img4if->i4if_environment_platform) +#endif + + +/*! + * @const IMG4_ENVIRONMENT_TRUST_CACHE + * The software environment for globally-signed loadable trust caches. This + * environment should be used as a fallback when validation against the platform + * fails, and the caller is handling a loadable trust cache. + */ +#if !MACH_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20181004 +OS_EXPORT +const struct _img4_environment _img4_environment_trust_cache; +#define IMG4_ENVIRONMENT_TRUST_CACHE (&_img4_environment_trust_cache) +#else +#define IMG4_ENVIRONMENT_TRUST_CACHE (img4if->i4if_environment_trust_cache) +#endif #endif // __IMG4_ENVIRONMENT_H diff --git a/EXTERNAL_HEADERS/img4/img4.h b/EXTERNAL_HEADERS/img4/img4.h index 13b053fc9..c3faf5a28 100644 --- a/EXTERNAL_HEADERS/img4/img4.h +++ b/EXTERNAL_HEADERS/img4/img4.h @@ -4,7 +4,7 @@ * for authenticating and validating Image4 manifests as being authoritative. * These concepts are: * - * Environment + * @section Environment * An environment is a description of a host comprised of hardware identifiers * and policy configurations. For example, the environment of an iPhone may * include the following hardware identifiers (among others): @@ -26,7 +26,7 @@ * should be prevented from being executed on the host environment. The * default is true. * - * Manifest + * @section Manifest * An Image4 manifest is a set of constraints that describe a host environment. * For example, a manifest may have been signed such that it is only valid for a * single host environment. In this case, the manifest may include specific @@ -39,7 +39,7 @@ * The manifest also includes one or more objects which may be executed in the * environment. * - * Object + * @section Object * An object is a description of a payload. An object can describe any payload, * not just the payload that is in the Image4. An object describes a payload by * means of its digest. Examples of objects present in a secure boot manifest @@ -69,6 +69,63 @@ * - a type of object (e.g. 'krnl') * * Tags comprised of all-caps are reserved for the Image4 specification. + * + * @section Secure Boot Policy + * Manifests are evaluated with the Secure Boot evaluation policy. Broadly + * speaking, this policy: + * + * - enforces that manifest identifiers match the host's silicon + * identifiers, + * - enforces that the epoch of the certificate which signed the manifest is + * greater than or equal to the host silicon's epoch + * - enforces that the current manifest is the same one that was used in the + * previous stage of Secure Boot unless mix-n-match is allowed + * + * For manifests which lack a CHMH property, mix-n-match policy is enforced as + * follows + * + * (1) If the previous stage of Secure Boot disallows mix-n-match and the + * manifest does not possess the AMNM entitlement, the hash of the + * manifest will be enforced against the hash of the manifest which was + * evaluated by the previous stage of Secure Boot. + * + * (2) If the previous stage of Secure Boot allows mix-n-match or the manifest + * possesses the AMNM entitlement, the manifest's constraints will be + * enforced on the environment, but the manifest will not be expected to + * be consistent with the manifest evaluated in the previous stage of + * Secure Boot, i.e. the hash of the previous manifest will not be + * enforced against the manifest being evaluated. + * + * Enforcement of the manifest's constraints will include the value of the + * BNCH tag in the manifest, if any. Therefore the caller should always + * provide a nonce value to the implementation via {@link img4_set_nonce} + * if this option is used. + * + * For manifests which possess a CHMH property, mix-n-match policy is enforced + * as follows: + * + * (1) If the previous stage of Secure Boot disallows mix-n-match or the + * manifest does not possess the AMNM entitlement, the value of the CHMH + * property will be enforced against the hash of the manifest which was + * evaluated by the previous stage of Secure Boot. + * + * (2) If the previous stage of Secure Boot allows mix-n-match and the + * manifest possesses the AMNM entitlement, all of the manifest's + * constraints will be enforced on the environment except for the CHMH + * constraint, which will be ignored. + * + * Enforcement of the manifest's constraints will include the value of the + * BNCH tag in the manifest, if any. Therefore the caller should always + * provide a nonce value to the implementation via {@link img4_set_nonce} + * if this option is used. + * + * The CHMH policy may be expressed as the following truth table: + * + * AMNM [manifest] Verify Manifest Hash [environment] Enforce CHMH + * 0 0 Y + * 0 1 Y + * 1 0 N + * 1 1 Y */ @@ -86,7 +143,7 @@ * This header is used in the pmap layer in xnu, which is in osfmk, which does * not have access to most of the BSD headers. (But for some reason it does have * access to sys/cdefs.h.) The only thing we need from that header is the - * errno_t typedef though, so if we can't get to it, then just typeded it + * errno_t typedef though, so if we can't get to it, then just typedef it * ourselves. */ #if MACH_KERNEL_PRIVATE @@ -127,47 +184,6 @@ OS_ENUM(img4_section, uint8_t, IMG4_SECTION_RESTOREINFO, ) IMG4_API_AVAILABLE_20180112; -/*! - * @typedef img4_custom_tag_handler_t - * A handler for a tag unrecognized by the implementation. - * - * @param tag - * The FourCC tag. - * - * @param ctx - * The user-provided context pointer given to either - * {@link img4_get_trusted_payload} or - * {@link img4_get_trusted_external_payload}. - */ -IMG4_API_AVAILABLE_20180112 -typedef errno_t (*img4_custom_tag_handler_t)( - img4_tag_t tag, - img4_section_t section, - void *ctx); - -/*! - * @typedef img4_custom_tag_t - * A type describing a custom tag and its handler. - * - * @property i4ct_tag - * The FourCC tag. - * - * @property i4ct_section - * The section in which the tag is expected. If {@link IMG4_SECTION_OBJECT} is - * given, the object corresponding to the tag given to - * {@link img4_get_trusted_payload} or {@link img4_get_trusted_external_payload} - * will be consulted for the tag. - * - * @property i4ct_handler - * The handler for the tag. - */ -IMG4_API_AVAILABLE_20180112 -typedef struct _img4_custom_tag { - img4_tag_t i4ct_tag; - img4_section_t i4ct_section; - img4_custom_tag_handler_t i4ct_handler; -} img4_custom_tag_t; - /*! * @typedef img4_destructor_t * A type describing a destructor routine for an Image4 object. @@ -201,35 +217,37 @@ typedef void (*img4_destructor_t)( * @const I4F_FORCE_MIXNMATCH * Causes the implementation to bypass mix-n-match policy evaluation and always * allow mix-n-match, irrespective of the previous boot stage's conclusion or - * manifest policy. + * manifest policy. This also allows replay of manifests whose personalization + * has been invalidated by rolling the nonce. * * This option is for testing purposes only and is not respected on the RELEASE * variant of the implementation. + * + * @const I4F_FIRST_STAGE + * Indicates that the manifest being evaluated is the first link in the secure + * boot chain. This causes the implementation to enforce the manifest directly + * on the environment rather than requiring that a previous stage has already + * done so by way of checking the previous stage's boot manifest hash. In effect + * this disables the mix-n-match enforcement policy. + * + * The critical difference between this flag and {@link I4F_FORCE_MIXNMATCH} is + * that this flag will cause the entire manifest to be enforced on the + * environment, including the anti-replay token in BNCH. + * {@link I4F_FORCE_MIXNMATCH} will ignore the nonce. + * + * It is illegal to use a manifest which possesses a CHMH tag as a first-stage + * manifest. */ OS_ENUM(img4_flags, uint64_t, I4F_INIT = 0, I4F_TRUST_MANIFEST = (1 << 0), I4F_FORCE_MIXNMATCH = (1 << 1), + I4F_FIRST_STAGE = (1 << 2), ) IMG4_API_AVAILABLE_20180112; -#if TARGET_OS_OSX || defined(PLATFORM_MacOSX) -typedef char _img4_opaque_data_64[656]; -typedef char _img4_opaque_data_32[476]; -#elif TARGET_OS_IOS || defined(PLATFORM_iPhoneOS) -typedef char _img4_opaque_data_64[656]; -typedef char _img4_opaque_data_32[476]; -#elif TARGET_OS_WATCH || defined(PLATFORM_WatchOS) -typedef char _img4_opaque_data_64[656]; -typedef char _img4_opaque_data_32[488]; -#elif TARGET_OS_TV || defined(PLATFORM_tvOS) || defined(PLATFORM_AppleTVOS) -typedef char _img4_opaque_data_64[656]; -typedef char _img4_opaque_data_32[476]; -#elif TARGET_OS_BRIDGE || defined(PLATFORM_BridgeOS) -typedef char _img4_opaque_data_64[656]; -typedef char _img4_opaque_data_32[476]; -#else -#error "Unsupported platform" -#endif +typedef char _img4_opaque_data_64[696]; + +typedef char _img4_opaque_data_32[520]; /*! * @typedef img4_t @@ -246,23 +264,12 @@ typedef struct _img4 { #endif } img4_t; -#if TARGET_OS_OSX || defined(PLATFORM_MacOSX) -typedef char _img4_payload_opaque_data_64[488]; -typedef char _img4_payload_opaque_data_32[316]; -#elif TARGET_OS_IOS || defined(PLATFORM_iPhoneOS) -typedef char _img4_payload_opaque_data_64[488]; -typedef char _img4_payload_opaque_data_32[316]; -#elif TARGET_OS_WATCH || defined(PLATFORM_WatchOS) -typedef char _img4_payload_opaque_data_64[488]; -typedef char _img4_payload_opaque_data_32[316]; -#elif TARGET_OS_TV || defined(PLATFORM_tvOS) || defined(PLATFORM_AppleTVOS) -typedef char _img4_payload_opaque_data_64[488]; -typedef char _img4_payload_opaque_data_32[316]; -#elif TARGET_OS_BRIDGE || defined(PLATFORM_BridgeOS) -typedef char _img4_payload_opaque_data_64[488]; -typedef char _img4_payload_opaque_data_32[316]; +typedef char _img4_payload_opaque_data_64[496]; + +#if __ARM_ARCH_7S__ || __i386__ +typedef char _img4_payload_opaque_data_32[324]; #else -#error "Unsupported platform" +typedef char _img4_payload_opaque_data_32[328]; #endif /*! @@ -281,9 +288,16 @@ typedef struct _img4_payload { #if !IMG4_PROJECT_BUILD #include +#include #include #endif +#if IMG4_TAPI +#include "environment.h" +#include "nonce.h" +#include "payload.h" +#endif + /*! * @function img4_init * Initializes an Image4. @@ -316,40 +330,69 @@ typedef struct _img4_payload { * The bytes given to this routine must represent an Image4 manifest. They may * optionally also represent an Image4 payload. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 errno_t img4_init(img4_t *i4, img4_flags_t flags, const uint8_t *bytes, size_t len, img4_destructor_t destructor); +#else +#define img4_init(...) (img4if->i4if_init(__VA_ARGS__)) +#endif /*! - * @function img4_set_custom_tag_handler - * Sets custom tag handlers for an Image4. These handlers are invoked during - * trust evaluation of the Image4. + * @function img4_set_nonce + * Sets the anti-reply token to be used during manifest enforcement. This value + * will be compared against the value of the manifest's BNCH property. * * @param i4 * The Image4 to modify. * - * @param tags - * An array of custom tag structures which specify the custom tags expected. - * This must be constant storage. Passing heap or stack storage will result in - * undefined behavior. + * @param bytes + * The bytes which comprise the anti-replay token. * - * @param tags_cnt - * The number of items in the {@link tags} array. + * @param len + * The length of the anti-replay token. * * @discussion - * Invocations of custom tag handlers occur during trust evaluation. You should - * not assume that the Image4 is trusted within the scope of a custom tag - * handler. Trustworthiness can only be determined by consulting the return - * value of {@link img4_get_trusted_payload} or - * {@link img4_get_trusted_external_payload}. + * If a nonce is not set prior to a call to either + * {@link img4_get_trusted_payload} or + * {@link img4_get_trusted_external_payload}, the implementation will act as + * though there is no nonce in the environment. Therefore, any manifests which + * have a BNCH property constraint will fail to validate. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 OS_EXPORT OS_NONNULL1 OS_NONNULL2 void -img4_set_custom_tag_handler(img4_t *i4, - const img4_custom_tag_t *tags, size_t tags_cnt); +img4_set_nonce(img4_t *i4, const void *bytes, size_t len); +#else +#define img4_set_nonce(...) (img4if->i4if_set_nonce(__VA_ARGS__)) +#endif + +/*! + * @function img4_set_nonce_domain + * Sets the nonce domain to be consulted for the anti-replay token during + * manifest enforcement. + * + * @param i4 + * The Image4 to modify. + * + * @param nd + * The nonce domain to use for anti-replay. + * + * @discussion + * See discussion for {@link img4_set_nonce}. + */ +#if !MACH_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20181106 +OS_EXPORT OS_NONNULL1 OS_NONNULL2 +void +img4_set_nonce_domain(img4_t *i4, const img4_nonce_domain_t *nd); +#else +#define img4_set_nonce_domain(...) \ + (img4if->i4if_v1.set_nonce_domain(__VA_ARGS__)) +#endif /*! * @function img4_get_trusted_payload @@ -364,10 +407,6 @@ img4_set_custom_tag_handler(img4_t *i4, * @param env * The environment against which to validate the Image4. * - * @param ctx - * The context pointer to pass to the routines defined in the environment (if - * a custom environment was passed) and to any custom tag handlers. - * * @param bytes * A pointer to the storage where the pointer to the payload buffer will be * written on success. @@ -384,6 +423,7 @@ img4_set_custom_tag_handler(img4_t *i4, * [EAUTH] The Image4 manifest was not authentic * [EACCES] The environment given does not satisfy the manifest * constraints + * [ESTALE] The nonce specified is not valid * [EACCES] The environment and manifest do not agree on a digest * algorithm * [EILSEQ] The payload for the given tag does not match its description @@ -406,12 +446,16 @@ img4_set_custom_tag_handler(img4_t *i4, * If any one of these validation checks fails, the payload is considered * untrustworthy and is not returned. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 OS_NONNULL5 OS_NONNULL6 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 OS_NONNULL4 OS_NONNULL5 errno_t img4_get_trusted_payload(img4_t *i4, img4_tag_t tag, - const img4_environment_t *env, void *ctx, - const uint8_t **bytes, size_t *len); + const img4_environment_t *env, const uint8_t **bytes, size_t *len); +#else +#define img4_get_trusted_payload(...) \ + (img4if->i4if_get_trusted_payload(__VA_ARGS__)) +#endif /*! * @function img4_get_trusted_external_payload @@ -427,10 +471,6 @@ img4_get_trusted_payload(img4_t *i4, img4_tag_t tag, * @param env * The environment against which to validate the Image4. * - * @param ctx - * The context pointer to pass to the routines defined in the environment and to - * any custom tag handlers. - * * @param bytes * A pointer to the storage where the pointer to the payload buffer will be * written on success. @@ -448,6 +488,7 @@ img4_get_trusted_payload(img4_t *i4, img4_tag_t tag, * [EAUTH] The Image4 manifest was not authentic * [EACCES] The environment given does not satisfy the manifest * constraints + * [ESTALE] The nonce specified is not valid * [EACCES] The environment and manifest do not agree on a digest * algorithm * [EILSEQ] The payload for the given tag does not match its description @@ -461,66 +502,17 @@ img4_get_trusted_payload(img4_t *i4, img4_tag_t tag, * This routine performs the same validation steps as * {@link img4_get_trusted_payload}. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL2 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL2 OS_NONNULL3 OS_NONNULL4 +OS_NONNULL5 errno_t img4_get_trusted_external_payload(img4_t *i4, img4_payload_t *payload, - const img4_environment_t *env, void *ctx, - const uint8_t **bytes, size_t *len); - -/*! - * @function img4_get_entitlement_bool - * Queries the Image4 manifest for a Boolean entitlement value. - * - * @param i4 - * The Image4 to query. - * - * @param entitlement - * The tag for the entitlement to query. - * - * @result - * The Boolean value of the entitlement. If the entitlement was not present, - * false is returned. If the entitlement was present but did not have a Boolean - * value, false is returned. - * - * @discussion - * This routine does not trigger validation of the Image4. Therefore the result - * result of this routine cannot be used to confer trust without also having - * obtained a valid payload. - */ -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 -bool -img4_get_entitlement_bool(img4_t *i4, img4_tag_t entitlement); - -/*! - * @function img4_get_object_entitlement_bool - * Queries the specified object in the Image4 manifest for a Boolean entitlement - * value. - * - * @param i4 - * The Image4 to query. - * - * @param object - * The tag for the object to query. - * - * @param entitlement - * The tag for the entitlement to query. - * - * @result - * The Boolean value of the entitlement. If the entitlement was not present, - * false is returned. If the entitlement was present but did not have a Boolean - * value, false is returned. If the object specified was not present, false is - * returned. - * - * @discussion - * See discussion for {@link img4_get_entitlement_bool}. - */ -IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 -bool -img4_get_object_entitlement_bool(img4_t *i4, img4_tag_t object, - img4_tag_t entitlement); + const img4_environment_t *env, const uint8_t **bytes, size_t *len); +#else +#define img4_get_trusted_external_payload(...) \ + (img4if->i4if_get_trusted_external_payload(__VA_ARGS__)) +#endif /*! * @function img4_destroy @@ -533,10 +525,14 @@ img4_get_object_entitlement_bool(img4_t *i4, img4_tag_t object, * The destructor passed to {@link img4_init} is called as a result of this * routine, if any was set. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 OS_EXPORT OS_NONNULL1 void img4_destroy(img4_t *i4); +#else +#define img4_destroy(...) (img4if->i4if_destroy(__VA_ARGS__)) +#endif __END_DECLS; diff --git a/EXTERNAL_HEADERS/img4/nonce.h b/EXTERNAL_HEADERS/img4/nonce.h new file mode 100644 index 000000000..c9571f704 --- /dev/null +++ b/EXTERNAL_HEADERS/img4/nonce.h @@ -0,0 +1,185 @@ +/*! + * @header + * Provides an interface for managing nonces to govern the lifetime of a + * personalization performed with Tatsu. A nonce managed by this interface may + * be used in a Tatsu signing request as the value for the BNCH tag. + * + * These interfaces require the caller to possess the + * + * com.apple.private.security.AppleImage4.user-client + * + * entitlement. + * + * @section Threat Model + * The adversary possesses the following: + * + * 1. a manifest which was previously valid but has since been invalidated + * by rolling the nonce associated with it + * 2. user-level code execution + * 3. knowledge of the raw nonce value for the previously-valid manifest + * + * The defense against this adversary is a system in which knowledge of the raw + * nonce is insufficient to impact the evaluation of a personalization. This + * system has the following characteristics: + * + * 1. A nonce seed is stored in an nvram variable which is only writable by + * the kernel + * 2. When making a new signing request, the nonce seed is encrypted by a + * UID1-derived key in-kernel and then hashed -- the output of this + * operation the nonce to be used in the signing request + * 3. On boot, AppleImage4 obtains the nonce seed from nvram and stores it + * in a data structure which will be covered by KTRR + * 4. When evaluating a manifest, AppleImage4 reads the raw nonce from the + * KTRR-covered data structure and validates it with the same + * transformation as was done in (2) + * 5. When the nonce is to be invalidated, AppleImage4 sets a flag in an + * nvram variable which is only writable by the kernel + * 6. On the subsequent boot, AppleImage4 notices the flag, generates a new + * nonce and repeats the procedure in (3) + * + * In this system, the raw nonce seed never leaves the kernel, and the nonce + * itself is a non-reversible representation of the seed. + */ + + +#ifndef __IMG4_NONCE_H +#define __IMG4_NONCE_H + +#ifndef __IMG4_INDIRECT +#error "Please #include instead of this file directly" +#endif // __IMG4_INDIRECT + +/*! + * @typedef img4_nonce_domain_t + * An opaque type describing a nonce domain. + */ +IMG4_API_AVAILABLE_20181106 +typedef struct _img4_nonce_domain img4_nonce_domain_t; + +/*! + * @const IMG4_NONCE_VERSION + * The version of the {@link img4_nonce_t} structure supported by the + * implementation. + */ +#define IMG4_NONCE_VERSION ((img4_struct_version_t)0) + +/*! + * @const IMG4_NONCE_MAX_LENGTH + * The maximum length of a nonce. Currently, this is the length of a SHA2-384 + * hash. + */ +#define IMG4_NONCE_MAX_LENGTH (48) + +/*! + * @typedef img4_nonce_t + * A structure describing a nonce. + * + * @field i4n_version + * The version of the structure. When declaring this structure, you must + * initialize this field to {@link IMG4_NONCE_VERSION}. + * + * @field i4n_nonce + * The bytes comprising the nonce. + * + * @field i4n_length + * The length of the nonce. Will be at most {@link IMG4_NONCE_MAX_LENGTH}. + */ +IMG4_API_AVAILABLE_20181106 +typedef struct _img4_nonce { + img4_struct_version_t i4n_version; + const uint8_t i4n_nonce[IMG4_NONCE_MAX_LENGTH]; + uint32_t i4n_length; +} img4_nonce_t; + +/*! + * @const IMG4_NONCE_INIT + * A convenience initializer for {@link img4_nonce_t} which ensures that the + * {@link i4n_version} field is properly initialized. + */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +#define IMG4_NONCE_INIT (img4_nonce_t){.i4n_version = IMG4_NONCE_VERSION} +#elif defined(__cplusplus) && __cplusplus >= 201103L +#define IMG4_NONCE_INIT (img4_nonce_t{IMG4_NONCE_VERSION}) +#elif defined(__cplusplus) +#define IMG4_NONCE_INIT \ + (img4_nonce_t((img4_nonce_t){IMG4_NONCE_VERSION})) +#else +#define IMG4_NONCE_INIT {IMG4_NONCE_VERSION} +#endif + +/*! + * @const IMG4_NONCE_DOMAIN_TRUST_CACHE + * The nonce domain governing trust cache personalizations. Use of this domain + * requires the + * + * com.apple.private.img4.nonce.trust-cache + * + * entitlement. + */ +#if !MACH_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20181106 +OS_EXPORT +const struct _img4_nonce_domain _img4_nonce_domain_trust_cache; +#define IMG4_NONCE_DOMAIN_TRUST_CACHE (&_img4_nonce_domain_trust_cache) +#else +#define IMG4_NONCE_DOMAIN_TRUST_CACHE (img4if->i4if_v1.nonce_domain_trust_cache) +#endif + +/*! + * @function img4_nonce_domain_copy_nonce + * Copies the current value of the nonce in the given domain. + * + * @param nd + * The nonce domain. + * + * @param n + * Upon successful return, storage that will contain the current nonce. The + * provided structure's {@link i4n_version} must be initialized to + * {@link IMG4_NONCE_VERSION}. + * + * @result + * Upon success, zero is returned. The implementation may also return one of the + * following error codes directly: + * + * [ESTALE] The nonce for the given domain has been invalidated, and the + * host must reboot in order to generate a new one + * [EPERM] The caller lacked the entitlement necessary to read the + * given nonce + */ +#if !MACH_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20181106 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL2 +errno_t +img4_nonce_domain_copy_nonce(const img4_nonce_domain_t *nd, img4_nonce_t *n); +#else +#define img4_nonce_domain_copy_nonce(...) \ + (i4if->i4if_v1.nonce_domain_copy_nonce(__VA_ARGS__)) +#endif + +/*! + * @function img4_nonce_domain_roll_nonce + * Invalidates the current nonce for the given domain and forces a re-generation + * of the domain's nonce seed at the next boot. + * + * @param nd + * The nonce domain. + * + * @result + * Upon success, zero is returned. The kernel implementation will never return + * a non-zero code. The userspace implementation may return one of the following + * error codes directly: + * + * [EPERM] The caller lacked the entitlement necessary to roll the + * given nonce + */ +#if !MACH_KERNEL_PRIVATE +IMG4_API_AVAILABLE_20181106 +OS_EXPORT OS_NONNULL1 +errno_t +img4_nonce_domain_roll_nonce(const img4_nonce_domain_t *nd); +#else +#define img4_nonce_domain_roll_nonce(...) \ + (i4if->i4if_v1.nonce_domain_roll_nonce(__VA_ARGS__)) +#endif + +#endif // __IMG4_NONCE_H diff --git a/EXTERNAL_HEADERS/img4/payload.h b/EXTERNAL_HEADERS/img4/payload.h index 5a3ba810d..4a1d119d3 100644 --- a/EXTERNAL_HEADERS/img4/payload.h +++ b/EXTERNAL_HEADERS/img4/payload.h @@ -17,8 +17,29 @@ #error "Please #include instead of this file directly" #endif // __IMG4_INDIRECT +/*! + * @typedef img4_payload_flags_t + * Flags modifying the behavior of an Image4 payload object. + * + * @const I4PLF_INIT + * No flags set. This value is suitable for initialization purposes. + * + * @const I4PLF_UNWRAPPED + * Indicates that the payload bytes are not wrapped in an Image4 payload object + * (.im4p file). If this flag is given, the payload tag is ignored. + * + * This should be used in scenarios such as x86 SecureBoot, which use Image4 to + * describe portable executable files which must be fed directly to the firmware + * and cannot tolerate being wrapped in an intermediary format. + */ +OS_ENUM(img4_payload_flags, uint64_t, + I4PLF_INIT = 0, + I4PLF_UNWRAPPED = (1 << 0), +); + /*! * @function img4_payload_init + * Initializes an Image4 payload object. * * @param i4p * A pointer to the payload object to initialize. @@ -26,6 +47,9 @@ * @param tag * The expected tag for the payload. * + * @param flags + * Flags modifying the behavior of the payload object. + * * @param bytes * The buffer containing the Image4 payload. * @@ -43,11 +67,16 @@ * [EFTYPE] The data does not contain an Image4 payload * [ENOENT] The bytes do not contain a payload for the specified tag */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 -OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL3 OS_NONNULL5 +OS_EXPORT OS_WARN_RESULT OS_NONNULL1 OS_NONNULL4 errno_t img4_payload_init(img4_payload_t *i4p, img4_tag_t tag, - const uint8_t *bytes, size_t len, img4_destructor_t destructor); + img4_payload_flags_t flags, const uint8_t *bytes, size_t len, + img4_destructor_t destructor); +#else +#define img4_payload_init(...) img4if->i4if_payload_init(__VA_ARGS__) +#endif /*! * @function img4_payload_destroy @@ -61,10 +90,13 @@ img4_payload_init(img4_payload_t *i4p, img4_tag_t tag, * only the associated resources. This routine will cause the destructor given * in {@link img4_payload_init} to be called, if any. */ +#if !MACH_KERNEL_PRIVATE IMG4_API_AVAILABLE_20180112 OS_EXPORT OS_NONNULL1 void img4_payload_destroy(img4_payload_t *i4p); +#else +#define img4_payload_destroy(...) img4if->i4if_payload_destroy(__VA_ARGS__) +#endif #endif // __IMG4_PAYLOAD_H - diff --git a/Makefile b/Makefile index 31de51ae8..38cb74935 100644 --- a/Makefile +++ b/Makefile @@ -159,7 +159,7 @@ TOP_TARGETS = \ install install_desktop install_embedded \ install_release_embedded install_development_embedded \ installopensource \ - cscope tags TAGS reindent \ + cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \ help DEFAULT_TARGET = all diff --git a/README.md b/README.md index 0e9d6b708..a65afae99 100644 --- a/README.md +++ b/README.md @@ -156,21 +156,17 @@ Set up your build environment and from the top directory, run: $ make cscope # this will build cscope database -Coding styles (Reindenting files) -================================= +Code Style +========== -Source files can be reindented using clang-format setup in .clang-format. -XNU follows a variant of WebKit style for source code formatting. -Please refer to format styles at [WebKit website](http://www.webkit.org/coding/coding-style.html). -Further options about style options is available at [clang docs](http://clang.llvm.org/docs/ClangFormatStyleOptions.html) +Source files can be reformatted to comply with the xnu code style using the "restyle" make target invoked from the +top-level project directory. - Note: clang-format binary may not be part of base installation. It can be compiled from llvm clang sources and is reachable in $PATH. - - From the top directory, run: - - $ make reindent # reindent all source files using clang format. + $ make restyle # re-format all source files to be xnu code style conformant. +Compliance can be checked using the "checkstyle" make target. + $ make checkstyle # Check all relevant source files for xnu code style conformance. How to install a new header file from XNU ========================================= diff --git a/SETUP/config/config.h b/SETUP/config/config.h index 3692c798a..54c441289 100644 --- a/SETUP/config/config.h +++ b/SETUP/config/config.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,10 +18,10 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1990 Carnegie-Mellon University * Copyright (c) 1989 Carnegie-Mellon University @@ -61,38 +61,38 @@ #include struct file_list { - struct file_list *f_next; - char *f_fn; /* the name */ - u_char f_type; /* see below */ - u_char f_flags; /* see below */ - short f_special; /* requires special make rule */ - char *f_needs; - char *f_extra; /* stuff to add to make line */ + struct file_list *f_next; + char *f_fn; /* the name */ + u_char f_type; /* see below */ + u_char f_flags; /* see below */ + short f_special; /* requires special make rule */ + char *f_needs; + char *f_extra; /* stuff to add to make line */ }; /* * Types. */ -#define DRIVER 1 -#define NORMAL 2 -#define INVISIBLE 3 -#define PROFILING 4 +#define DRIVER 1 +#define NORMAL 2 +#define INVISIBLE 3 +#define PROFILING 4 /* * Attributes (flags). */ -#define CONFIGDEP 0x01 /* obsolete? */ -#define OPTIONSDEF 0x02 /* options definition entry */ +#define CONFIGDEP 0x01 /* obsolete? */ +#define OPTIONSDEF 0x02 /* options definition entry */ struct device { - int d_type; /* CONTROLLER, DEVICE, bus adaptor */ - const char *d_name; /* name of device (e.g. rk11) */ - int d_slave; /* slave number */ -#define QUES -1 /* -1 means '?' */ -#define UNKNOWN -2 /* -2 means not set yet */ - int d_flags; /* nlags for device init */ - struct device *d_next; /* Next one in list */ - char *d_init; /* pseudo device init routine name */ + int d_type; /* CONTROLLER, DEVICE, bus adaptor */ + const char *d_name; /* name of device (e.g. rk11) */ + int d_slave; /* slave number */ +#define QUES -1 /* -1 means '?' */ +#define UNKNOWN -2 /* -2 means not set yet */ + int d_flags; /* nlags for device init */ + struct device *d_next; /* Next one in list */ + char *d_init; /* pseudo device init routine name */ }; /* @@ -102,7 +102,7 @@ struct device { * it will build from ``Makefile.vax'' and use ``../vax/inline'' * in the makerules, etc. */ -extern const char *machinename; +extern const char *machinename; /* * In order to configure and build outside the kernel source tree, @@ -114,7 +114,7 @@ extern char *config_directory; FILE *fopenp(const char *fpath, char *file, char *complete, const char *ftype); const char *get_VPATH(void); -#define VPATH get_VPATH() +#define VPATH get_VPATH() /* * A set of options may also be specified which are like CPU types, @@ -122,36 +122,36 @@ const char *get_VPATH(void); * A separate set of options may be defined for make-style options. */ struct opt { - char *op_name; - char *op_value; - struct opt *op_next; + char *op_name; + char *op_value; + struct opt *op_next; }; extern struct opt *opt, *mkopt, *opt_tail, *mkopt_tail; -const char *get_word(FILE *fp); -char *ns(const char *str); -char *qu(int num); -char *path(const char *file); +const char *get_word(FILE *fp); +char *ns(const char *str); +char *qu(int num); +char *path(const char *file); -extern int do_trace; +extern int do_trace; -extern struct device *dtab; -dev_t nametodev(char *name, int defunit, char defpartition); -char *devtoname(dev_t dev); +extern struct device *dtab; +dev_t nametodev(char *name, int defunit, char defpartition); +char *devtoname(dev_t dev); -extern char errbuf[80]; -extern int yyline; +extern char errbuf[80]; +extern int yyline; -extern struct file_list *ftab, *conf_list, **confp; -extern char *build_directory; +extern struct file_list *ftab, *conf_list, **confp; +extern char *build_directory; -extern int profiling; +extern int profiling; -#define eq(a,b) (!strcmp(a,b)) +#define eq(a, b) (!strcmp(a,b)) #define DEV_MASK 0x7 -#define DEV_SHIFT 3 +#define DEV_SHIFT 3 /* External function references */ char *get_rest(FILE *fp); diff --git a/SETUP/config/externs.c b/SETUP/config/externs.c index 6f69a3340..58a6045fe 100644 --- a/SETUP/config/externs.c +++ b/SETUP/config/externs.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,7 +18,7 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ /* Copyright (c) Apple Computer, Inc. All rights reserved. */ @@ -33,7 +33,7 @@ * it will build from ``Makefile.vax'' and use ``../vax/inline'' * in the makerules, etc. */ -const char *machinename; +const char *machinename; /* * In order to configure and build outside the kernel source tree, @@ -50,15 +50,14 @@ char *config_directory; */ struct opt *opt, *mkopt, *opt_tail, *mkopt_tail; -int do_trace; - -struct device *dtab; +int do_trace; -char errbuf[80]; -int yyline; +struct device *dtab; -struct file_list *ftab, *conf_list, **confp; -char *build_directory; +char errbuf[80]; +int yyline; -int profiling = 0; +struct file_list *ftab, *conf_list, **confp; +char *build_directory; +int profiling = 0; diff --git a/SETUP/config/main.c b/SETUP/config/main.c index f485b4e39..3a570c92b 100644 --- a/SETUP/config/main.c +++ b/SETUP/config/main.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,10 +18,10 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1990 Carnegie-Mellon University * Copyright (c) 1989 Carnegie-Mellon University @@ -50,7 +50,7 @@ #ifndef lint char copyright[] = -"@(#) Copyright (c) 1980 Regents of the University of California.\n\ + "@(#) Copyright (c) 1980 Regents of the University of California.\n\ All rights reserved.\n"; #endif /* not lint */ @@ -70,57 +70,58 @@ static char sccsid[] __attribute__((used)) = "@(#)main.c 5.9 (Berkeley) 6/18/88" int main(int argc, char *argv[]) { - - source_directory = ".."; /* default */ + source_directory = ".."; /* default */ object_directory = ".."; config_directory = (char *) 0; while ((argc > 1) && (argv[1][0] == '-')) { - char *c; + char *c; argv++; argc--; - for (c = &argv[0][1]; *c ; c++) { + for (c = &argv[0][1]; *c; c++) { switch (*c) { - case 'b': - build_directory = argv[1]; - goto check_arg; - - case 'd': - source_directory = argv[1]; - goto check_arg; - - case 'o': - object_directory = argv[1]; - goto check_arg; - - case 'c': - config_directory = argv[1]; - - check_arg: - if (argv[1] == (char *) 0) - goto usage_error; - argv++; argc--; - break; - - case 'p': - profiling++; - break; - default: + case 'b': + build_directory = argv[1]; + goto check_arg; + + case 'd': + source_directory = argv[1]; + goto check_arg; + + case 'o': + object_directory = argv[1]; + goto check_arg; + + case 'c': + config_directory = argv[1]; + +check_arg: + if (argv[1] == (char *) 0) { goto usage_error; + } + argv++; argc--; + break; + + case 'p': + profiling++; + break; + default: + goto usage_error; } } } if (config_directory == (char *) 0) { config_directory = - malloc((unsigned) strlen(source_directory) + 6); + malloc((unsigned) strlen(source_directory) + 6); (void) sprintf(config_directory, "%s/conf", source_directory); } if (argc != 2) { - usage_error: ; +usage_error: ; fprintf(stderr, "usage: config [ -bcdo dir ] [ -p ] sysname\n"); exit(1); } - if (!build_directory) + if (!build_directory) { build_directory = argv[1]; + } if (freopen(argv[1], "r", stdin) == NULL) { perror(argv[1]); exit(2); @@ -128,12 +129,13 @@ main(int argc, char *argv[]) dtab = NULL; confp = &conf_list; opt = 0; - if (yyparse()) + if (yyparse()) { exit(3); + } - mkioconf(); /* ioconf.c */ - makefile(); /* build Makefile */ - headers(); /* make a lot of .h files */ + mkioconf(); /* ioconf.c */ + makefile(); /* build Makefile */ + headers(); /* make a lot of .h files */ return 0; } @@ -151,27 +153,34 @@ get_word(FILE *fp) int ch; char *cp; - while ((ch = getc(fp)) != EOF) - if (ch != ' ' && ch != '\t') + while ((ch = getc(fp)) != EOF) { + if (ch != ' ' && ch != '\t') { break; - if (ch == EOF) - return ((char *)EOF); - if (ch == '\n') - return (NULL); - if (ch == '|') - return( "|"); + } + } + if (ch == EOF) { + return (char *)EOF; + } + if (ch == '\n') { + return NULL; + } + if (ch == '|') { + return "|"; + } cp = line; *cp++ = ch; while ((ch = getc(fp)) != EOF) { - if (isspace(ch)) + if (isspace(ch)) { break; + } *cp++ = ch; } *cp = 0; - if (ch == EOF) - return ((char *)EOF); + if (ch == EOF) { + return (char *)EOF; + } (void) ungetc(ch, fp); - return (line); + return line; } /* @@ -189,14 +198,16 @@ get_rest(FILE *fp) cp = line; while ((ch = getc(fp)) != EOF) { - if (ch == '\n') + if (ch == '\n') { break; + } *cp++ = ch; } *cp = 0; - if (ch == EOF) - return ((char *)EOF); - return (line); + if (ch == EOF) { + return (char *)EOF; + } + return line; } /* @@ -207,10 +218,10 @@ path(const char *file) { char *cp; - cp = malloc((unsigned)(strlen(build_directory)+ - strlen(file)+ - strlen(object_directory)+ - 3)); + cp = malloc((unsigned)(strlen(build_directory) + + strlen(file) + + strlen(object_directory) + + 3)); (void) sprintf(cp, "%s/%s/%s", object_directory, build_directory, file); - return (cp); + return cp; } diff --git a/SETUP/config/mkheaders.c b/SETUP/config/mkheaders.c index 2c345b7c9..e3d2f34ac 100644 --- a/SETUP/config/mkheaders.c +++ b/SETUP/config/mkheaders.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,10 +18,10 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1990 Carnegie-Mellon University * Copyright (c) 1989 Carnegie-Mellon University @@ -57,24 +57,26 @@ static char sccsid[] __attribute__((used)) = "@(#)mkheaders.c 5.5 (Berkeley) 6/1 */ #include -#include /* unlink */ +#include /* unlink */ #include #include "config.h" #include "parser.h" -static void do_count(const char *dev, const char *hname, int search); -static void do_header(const char *dev, const char *hname, int count); -static char *toheader(const char *dev); -static char *tomacro(const char *dev); +static void do_count(const char *dev, const char *hname, int search); +static void do_header(const char *dev, const char *hname, int count); +static char *toheader(const char *dev); +static char *tomacro(const char *dev); void headers(void) { struct file_list *fl; - for (fl = ftab; fl != 0; fl = fl->f_next) - if (fl->f_needs != 0) + for (fl = ftab; fl != 0; fl = fl->f_next) { + if (fl->f_needs != 0) { do_count(fl->f_needs, fl->f_needs, 1); + } + } } /* @@ -87,16 +89,18 @@ do_count(const char *dev, const char *hname, int search) struct device *dp; int count; - for (count = 0,dp = dtab; dp != 0; dp = dp->d_next) + for (count = 0, dp = dtab; dp != 0; dp = dp->d_next) { if (eq(dp->d_name, dev)) { if (dp->d_type == PSEUDO_DEVICE) { count = dp->d_slave != UNKNOWN ? dp->d_slave : 1; - if (dp->d_flags) + if (dp->d_flags) { dev = NULL; + } break; } } + } do_header(dev, hname, count); } @@ -106,7 +110,7 @@ do_header(const char *dev, const char *hname, int count) char *file, *name; const char *inw; char *inwcopy; - struct file_list *fl = NULL; /* may exit for(;;) uninitted */ + struct file_list *fl = NULL; /* may exit for(;;) uninitted */ struct file_list *fl_head, *fl_prev; FILE *inf, *outf; int inc, oldcount; @@ -137,22 +141,26 @@ do_header(const char *dev, const char *hname, int count) fl_head = 0; for (;;) { const char *cp; - if ((inw = get_word(inf)) == 0 || inw == (char *)EOF) + if ((inw = get_word(inf)) == 0 || inw == (char *)EOF) { break; - if ((inw = get_word(inf)) == 0 || inw == (char *)EOF) + } + if ((inw = get_word(inf)) == 0 || inw == (char *)EOF) { break; + } inwcopy = ns(inw); cp = get_word(inf); - if (cp == 0 || cp == (char *)EOF) + if (cp == 0 || cp == (char *)EOF) { break; + } inc = atoi(cp); if (eq(inwcopy, name)) { oldcount = inc; inc = count; } cp = get_word(inf); - if (cp == (char *)EOF) + if (cp == (char *)EOF) { break; + } fl = (struct file_list *) malloc(sizeof *fl); fl->f_fn = inwcopy; fl->f_type = inc; @@ -161,7 +169,7 @@ do_header(const char *dev, const char *hname, int count) } (void) fclose(inf); if (count == oldcount) { - while (fl !=0) { + while (fl != 0) { fl_prev = fl; fl = fl->f_next; free((char *)fl_prev); @@ -197,8 +205,8 @@ toheader(const char *dev) { static char hbuf[MAXPATHLEN]; (void) snprintf(hbuf, sizeof hbuf, "%s.h", path(dev)); - hbuf[MAXPATHLEN-1] = '\0'; - return (hbuf); + hbuf[MAXPATHLEN - 1] = '\0'; + return hbuf; } /* @@ -212,11 +220,13 @@ tomacro(const char *dev) cp = mbuf; *cp++ = 'N'; - while (*dev) - if (!islower(*dev)) + while (*dev) { + if (!islower(*dev)) { *cp++ = *dev++; - else + } else { *cp++ = toupper(*dev++); + } + } *cp++ = 0; - return (mbuf); + return mbuf; } diff --git a/SETUP/config/mkioconf.c b/SETUP/config/mkioconf.c index 9f210daa0..b20eb73d4 100644 --- a/SETUP/config/mkioconf.c +++ b/SETUP/config/mkioconf.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,10 +18,10 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1990 Carnegie-Mellon University * Copyright (c) 1989 Carnegie-Mellon University @@ -30,7 +30,7 @@ * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ - + /* * Copyright (c) 1980 Regents of the University of California. * All rights reserved. @@ -49,14 +49,14 @@ */ #include -#include /* for unlink */ +#include /* for unlink */ #include "parser.h" #include "config.h" /* * build the ioconf.c file */ -void pseudo_inits(FILE *fp); +void pseudo_inits(FILE *fp); void mkioconf(void) @@ -71,7 +71,7 @@ mkioconf(void) } fprintf(fp, "#include \n"); fprintf(fp, "\n"); - pseudo_inits (fp); + pseudo_inits(fp); (void) fclose(fp); } @@ -83,17 +83,20 @@ pseudo_inits(FILE *fp) fprintf(fp, "\n"); for (dp = dtab; dp != 0; dp = dp->d_next) { - if (dp->d_type != PSEUDO_DEVICE || dp->d_init == 0) + if (dp->d_type != PSEUDO_DEVICE || dp->d_init == 0) { continue; + } fprintf(fp, "extern int %s(int);\n", dp->d_init); } fprintf(fp, "\nstruct pseudo_init pseudo_inits[] = {\n"); for (dp = dtab; dp != 0; dp = dp->d_next) { - if (dp->d_type != PSEUDO_DEVICE || dp->d_init == 0) + if (dp->d_type != PSEUDO_DEVICE || dp->d_init == 0) { continue; + } count = dp->d_slave; - if (count <= 0) + if (count <= 0) { count = 1; + } fprintf(fp, "\t{%d,\t%s},\n", count, dp->d_init); } fprintf(fp, "\t{0,\t0},\n};\n"); diff --git a/SETUP/config/mkmakefile.c b/SETUP/config/mkmakefile.c index 9a8dc5c79..a32236fd1 100644 --- a/SETUP/config/mkmakefile.c +++ b/SETUP/config/mkmakefile.c @@ -59,34 +59,34 @@ static char sccsid[] __attribute__((used)) = "@(#)mkmakefile.c 5.21 (Berkeley) 6 */ #include -#include /* for unlink */ +#include /* for unlink */ #include #include "parser.h" #include "config.h" -void read_files(void); -void do_objs(FILE *fp, const char *msg, int ext); -void do_files(FILE *fp, const char *msg, char ext); -void do_machdep(FILE *ofp); -void do_rules(FILE *f); -void copy_dependencies(FILE *makin, FILE *makout); +void read_files(void); +void do_objs(FILE *fp, const char *msg, int ext); +void do_files(FILE *fp, const char *msg, char ext); +void do_machdep(FILE *ofp); +void do_rules(FILE *f); +void copy_dependencies(FILE *makin, FILE *makout); struct file_list *fl_lookup(char *file); struct file_list *fltail_lookup(char *file); struct file_list *new_fent(void); -void put_source_file_name(FILE *fp, struct file_list *tp); +void put_source_file_name(FILE *fp, struct file_list *tp); #define next_word(fp, wd) \ { const char *word = get_word(fp); \ if (word == (char *)EOF) \ - return; \ + return; \ else \ - wd = word; \ + wd = word; \ } -static struct file_list *fcur; +static struct file_list *fcur; const char *tail(const char *fn); char *allCaps(char *str); @@ -98,11 +98,12 @@ fl_lookup(char *file) { struct file_list *fp; - for (fp = ftab ; fp != 0; fp = fp->f_next) { - if (eq(fp->f_fn, file)) - return (fp); + for (fp = ftab; fp != 0; fp = fp->f_next) { + if (eq(fp->f_fn, file)) { + return fp; + } } - return (0); + return 0; } /* @@ -113,11 +114,12 @@ fltail_lookup(char *file) { struct file_list *fp; - for (fp = ftab ; fp != 0; fp = fp->f_next) { - if (eq(tail(fp->f_fn), tail(file))) - return (fp); + for (fp = ftab; fp != 0; fp = fp->f_next) { + if (eq(tail(fp->f_fn), tail(file))) { + return fp; + } } - return (0); + return 0; } /* @@ -134,30 +136,31 @@ new_fent(void) fp->f_flags = 0; fp->f_type = 0; fp->f_extra = (char *) 0; - if (fcur == 0) + if (fcur == 0) { fcur = ftab = fp; - else + } else { fcur->f_next = fp; + } fcur = fp; - return (fp); + return fp; } -char *COPTS; +char *COPTS; const char * get_VPATH(void) { - static char *vpath = NULL; + static char *vpath = NULL; - if ((vpath == NULL) && - ((vpath = getenv("VPATH")) != NULL) && - (*vpath != ':')) { - char *buf = malloc((unsigned)(strlen(vpath) + 2)); + if ((vpath == NULL) && + ((vpath = getenv("VPATH")) != NULL) && + (*vpath != ':')) { + char *buf = malloc((unsigned)(strlen(vpath) + 2)); - vpath = strcat(strcpy(buf, ":"), vpath); - } + vpath = strcat(strcpy(buf, ":"), vpath); + } - return vpath ? vpath : ""; + return vpath ? vpath : ""; } @@ -191,34 +194,42 @@ makefile(void) fprintf(ofp, "SOURCE_DIR=%s\n", source_directory); fprintf(ofp, "export CONFIG_DEFINES ="); - if (profiling) + if (profiling) { fprintf(ofp, " -DGPROF"); + } - for (op = opt; op; op = op->op_next) - if (op->op_value) + for (op = opt; op; op = op->op_next) { + if (op->op_value) { fprintf(ofp, " -D%s=\"%s\"", op->op_name, op->op_value); - else + } else { fprintf(ofp, " -D%s", op->op_name); + } + } fprintf(ofp, "\n"); - for (op = mkopt; op; op = op->op_next) - if (op->op_value) + for (op = mkopt; op; op = op->op_next) { + if (op->op_value) { fprintf(ofp, "%s=%s\n", op->op_name, op->op_value); - else + } else { fprintf(ofp, "%s\n", op->op_name); + } + } while (fgets(line, BUFSIZ, ifp) != 0) { - if (*line == '%') + if (*line == '%') { goto percent; + } if (profiling && strncmp(line, "COPTS=", 6) == 0) { char *cp; fprintf(ofp, - "GPROF.EX=$(SOURCE_DIR)/machdep/%s/gmon.ex\n", machinename); + "GPROF.EX=$(SOURCE_DIR)/machdep/%s/gmon.ex\n", machinename); cp = index(line, '\n'); - if (cp) + if (cp) { *cp = 0; + } cp = line + 6; - while (*cp && (*cp == ' ' || *cp == '\t')) + while (*cp && (*cp == ' ' || *cp == '\t')) { cp++; + } COPTS = malloc((unsigned)(strlen(cp) + 1)); if (COPTS == 0) { printf("config: out of memory\n"); @@ -230,7 +241,7 @@ makefile(void) } fprintf(ofp, "%s", line); continue; - percent: +percent: if (eq(line, "%OBJS\n")) { do_objs(ofp, "OBJS=", -1); } else if (eq(line, "%CFILES\n")) { @@ -244,15 +255,15 @@ makefile(void) do_objs(ofp, "SOBJS=", 's'); } else if (eq(line, "%MACHDEP\n")) { do_machdep(ofp); - } else if (eq(line, "%RULES\n")) + } else if (eq(line, "%RULES\n")) { do_rules(ofp); - else + } else { fprintf(stderr, "Unknown %% construct in generic makefile: %s", line); + } } - if (dfp != NULL) - { + if (dfp != NULL) { copy_dependencies(dfp, ofp); (void) fclose(dfp); } @@ -306,15 +317,16 @@ next: } return; } - if (wd == 0) + if (wd == 0) { goto next; + } /* * Allow comment lines beginning witha '#' character. */ - if (*wd == '#') - { - while ((wd=get_word(fp)) && wd != (char *)EOF) + if (*wd == '#') { + while ((wd = get_word(fp)) && wd != (char *)EOF) { ; + } goto next; } @@ -325,27 +337,31 @@ next: fname, this); exit(1); } - if ((pf = fl_lookup(this)) && (pf->f_type != INVISIBLE || pf->f_flags)) + if ((pf = fl_lookup(this)) && (pf->f_type != INVISIBLE || pf->f_flags)) { isdup = 1; - else + } else { isdup = 0; + } tp = 0; nreqs = 0; devorprof = ""; needs = 0; - if (eq(wd, "standard")) + if (eq(wd, "standard")) { goto checkdev; + } if (!eq(wd, "optional")) { printf("%s: %s must be optional or standard\n", fname, this); exit(1); } - if (strncmp(this, "OPTIONS/", 8) == 0) + if (strncmp(this, "OPTIONS/", 8) == 0) { options++; + } not_option = 0; nextopt: next_word(fp, wd); - if (wd == 0) + if (wd == 0) { goto doneopt; + } if (eq(wd, "not")) { not_option = !not_option; goto nextopt; @@ -356,12 +372,13 @@ nextopt: goto save; } nreqs++; - if (needs == 0 && nreqs == 1) + if (needs == 0 && nreqs == 1) { needs = ns(wd); - if (isdup) + } + if (isdup) { goto invis; - if (options) - { + } + if (options) { struct opt *lop = 0; struct device tdev; @@ -379,8 +396,7 @@ nextopt: tdev.d_flags++; tdev.d_slave = 0; - for (op=opt; op; lop=op, op=op->op_next) - { + for (op = opt; op; lop = op, op = op->op_next) { char *od = allCaps(ns(wd)); /* @@ -388,34 +404,36 @@ nextopt: * dependency identifier. Set the slave field to * define the option in the header file. */ - if (strcmp(op->op_name, od) == 0) - { + if (strcmp(op->op_name, od) == 0) { tdev.d_slave = 1; - if (lop == 0) + if (lop == 0) { opt = op->op_next; - else + } else { lop->op_next = op->op_next; + } free(op); op = 0; - } + } free(od); - if (op == 0) + if (op == 0) { break; + } } newdev(&tdev); } - for (dp = dtab; dp != 0; dp = dp->d_next) { + for (dp = dtab; dp != 0; dp = dp->d_next) { if (eq(dp->d_name, wd) && (dp->d_type != PSEUDO_DEVICE || dp->d_slave)) { - if (not_option) - goto invis; /* dont want file if option present */ - else + if (not_option) { + goto invis; /* dont want file if option present */ + } else { goto nextopt; + } } } - if (not_option) - goto nextopt; /* want file if option missing */ - - for (op = opt; op != 0; op = op->op_next) + if (not_option) { + goto nextopt; /* want file if option missing */ + } + for (op = opt; op != 0; op = op->op_next) { if (op->op_value == 0 && opteq(op->op_name, wd)) { if (nreqs == 1) { free(needs); @@ -423,12 +441,15 @@ nextopt: } goto nextopt; } + } invis: - while ((wd = get_word(fp)) != 0) + while ((wd = get_word(fp)) != 0) { ; - if (tp == 0) + } + if (tp == 0) { tp = new_fent(); + } tp->f_fn = this; tp->f_type = INVISIBLE; tp->f_needs = needs; @@ -444,8 +465,9 @@ doneopt: checkdev: if (wd) { - if (*wd == '|') + if (*wd == '|') { goto getrest; + } next_word(fp, wd); if (wd) { devorprof = wd; @@ -460,29 +482,32 @@ getrest: rest = ns(get_rest(fp)); } else { printf("%s: syntax error describing %s\n", - fname, this); + fname, this); exit(1); } } - if (eq(devorprof, "profiling-routine") && profiling == 0) + if (eq(devorprof, "profiling-routine") && profiling == 0) { goto next; - if (tp == 0) + } + if (tp == 0) { tp = new_fent(); + } tp->f_fn = this; tp->f_extra = rest; - if (options) + if (options) { tp->f_type = INVISIBLE; - else - if (eq(devorprof, "device-driver")) + } else if (eq(devorprof, "device-driver")) { tp->f_type = DRIVER; - else if (eq(devorprof, "profiling-routine")) + } else if (eq(devorprof, "profiling-routine")) { tp->f_type = PROFILING; - else + } else { tp->f_type = NORMAL; + } tp->f_flags = 0; tp->f_needs = needs; - if (pf && pf->f_type == INVISIBLE) - pf->f_flags = 1; /* mark as duplicate */ + if (pf && pf->f_type == INVISIBLE) { + pf->f_flags = 1; /* mark as duplicate */ + } goto next; } @@ -491,25 +516,28 @@ opteq(const char *cp, const char *dp) { char c, d; - for (; ; cp++, dp++) { + for (;; cp++, dp++) { if (*cp != *dp) { c = isupper(*cp) ? tolower(*cp) : *cp; d = isupper(*dp) ? tolower(*dp) : *dp; - if (c != d) - return (0); + if (c != d) { + return 0; + } + } + if (*cp == 0) { + return 1; } - if (*cp == 0) - return (1); } } void put_source_file_name(FILE *fp, struct file_list *tp) { - if ((tp->f_fn[0] == '.') && (tp->f_fn[1] == '/')) + if ((tp->f_fn[0] == '.') && (tp->f_fn[1] == '/')) { fprintf(fp, "%s ", tp->f_fn); - else + } else { fprintf(fp, "$(SOURCE_DIR)/%s ", tp->f_fn); + } } void @@ -524,16 +552,17 @@ do_objs(FILE *fp, const char *msg, int ext) fprintf(fp, "%s", msg); lpos = strlen(msg); for (tp = ftab; tp != 0; tp = tp->f_next) { - if (tp->f_type == INVISIBLE) + if (tp->f_type == INVISIBLE) { continue; + } /* * Check for '.o' file in list */ cp = tp->f_fn + (len = strlen(tp->f_fn)) - 1; - if (ext != -1 && *cp != ext) + if (ext != -1 && *cp != ext) { continue; - else if (*cp == 'o') { + } else if (*cp == 'o') { if (len + lpos > 72) { lpos = 8; fprintf(fp, "\\\n\t"); @@ -562,15 +591,17 @@ void do_files(FILE *fp, const char *msg, char ext) { struct file_list *tp; - int lpos, len=0; /* dvw: init to 0 */ + int lpos, len = 0; /* dvw: init to 0 */ fprintf(fp, "%s", msg); lpos = 8; for (tp = ftab; tp != 0; tp = tp->f_next) { - if (tp->f_type == INVISIBLE) + if (tp->f_type == INVISIBLE) { continue; - if (tp->f_fn[strlen(tp->f_fn)-1] != ext) + } + if (tp->f_fn[strlen(tp->f_fn) - 1] != ext) { continue; + } /* * Always generate a newline. * Our Makefile's aren't readable anyway. @@ -602,10 +633,11 @@ do_machdep(FILE *ofp) exit(1); } while (fgets(line, BUFSIZ, ifp) != 0) { - if (profiling && (strncmp(line, "LIBS=", 5) == 0)) - fprintf(ofp,"LIBS=${LIBS_P}\n"); - else + if (profiling && (strncmp(line, "LIBS=", 5) == 0)) { + fprintf(ofp, "LIBS=${LIBS_P}\n"); + } else { fputs(line, ofp); + } } fclose(ifp); } @@ -616,9 +648,10 @@ tail(const char *fn) const char *cp; cp = rindex(fn, '/'); - if (cp == 0) - return (fn); - return (cp+1); + if (cp == 0) { + return fn; + } + return cp + 1; } /* @@ -641,86 +674,92 @@ do_rules(FILE *f) const char *nl = ""; for (ftp = ftab; ftp != 0; ftp = ftp->f_next) { - if (ftp->f_type == INVISIBLE) + if (ftp->f_type == INVISIBLE) { continue; + } cp = (np = ftp->f_fn) + strlen(ftp->f_fn) - 1; och = *cp; /* - * Don't compile '.o' files - */ - if (och == 'o') + * Don't compile '.o' files + */ + if (och == 'o') { continue; + } /* - * Determine where sources should come from - */ + * Determine where sources should come from + */ if ((np[0] == '.') && (np[1] == '/')) { source_dir = ""; np += 2; - } else + } else { source_dir = "$(SOURCE_DIR)/"; + } *cp = '\0'; - tp = tail(np); /* dvw: init tp before 'if' */ + tp = tail(np); /* dvw: init tp before 'if' */ fprintf(f, "-include %sd\n", tp); fprintf(f, "%so: %s%s%c\n", tp, source_dir, np, och); if (och == 's') { fprintf(f, "\t${S_RULE_0}\n"); fprintf(f, "\t${S_RULE_1A}%s%.*s${S_RULE_1B}%s\n", - source_dir, (int)(tp-np), np, nl); + source_dir, (int)(tp - np), np, nl); fprintf(f, "\t${S_RULE_2}%s\n", nl); continue; } extras = ""; switch (ftp->f_type) { - case NORMAL: goto common; break; - + case DRIVER: extras = "_D"; goto common; break; - + case PROFILING: - if (!profiling) + if (!profiling) { continue; + } if (COPTS == 0) { fprintf(stderr, - "config: COPTS undefined in generic makefile"); + "config: COPTS undefined in generic makefile"); COPTS = ""; } extras = "_P"; goto common; - - common: + +common: och_upper = och + 'A' - 'a'; fprintf(f, "\t${%c_RULE_0%s}\n", och_upper, extras); fprintf(f, "\t${%c_RULE_1A%s}", och_upper, extras); - if (ftp->f_extra) + if (ftp->f_extra) { fprintf(f, "%s", ftp->f_extra); + } fprintf(f, "%s%.*s${%c_RULE_1B%s}%s\n", - source_dir, (int)(tp-np), np, och_upper, extras, nl); + source_dir, (int)(tp - np), np, och_upper, extras, nl); /* While we are still using CTF, any build that normally does not support CTF will * a "standard" compile done as well that we can harvest CTF information from; do * that here. */ fprintf(f, "\t${%c_CTFRULE_1A%s}", och_upper, extras); - if (ftp->f_extra) + if (ftp->f_extra) { fprintf(f, "%s", ftp->f_extra); + } fprintf(f, "%s%.*s${%c_CTFRULE_1B%s}%s\n", - source_dir, (int)(tp-np), np, och_upper, extras, nl); + source_dir, (int)(tp - np), np, och_upper, extras, nl); fprintf(f, "\t${%c_RULE_2%s}%s\n", och_upper, extras, nl); fprintf(f, "\t${%c_CTFRULE_2%s}%s\n", och_upper, extras, nl); fprintf(f, "\t${%c_RULE_3%s}%s\n", och_upper, extras, nl); fprintf(f, "\t${%c_RULE_4A%s}", och_upper, extras); - if (ftp->f_extra) + if (ftp->f_extra) { fprintf(f, "%s", ftp->f_extra); + } fprintf(f, "%s%.*s${%c_RULE_4B%s}%s\n", - source_dir, (int)(tp-np), np, och_upper, extras, nl); + source_dir, (int)(tp - np), np, och_upper, extras, nl); break; - + default: printf("Don't know rules for %s\n", np); break; @@ -735,17 +774,18 @@ allCaps(char *str) char *cp = str; while (*str) { - if (islower(*str)) + if (islower(*str)) { *str = toupper(*str); + } str++; } - return (cp); + return cp; } #define OLDSALUTATION "# DO NOT DELETE THIS LINE" #define LINESIZE 1024 -static char makbuf[LINESIZE]; /* one line buffer for makefile */ +static char makbuf[LINESIZE]; /* one line buffer for makefile */ void copy_dependencies(FILE *makin, FILE *makout) @@ -753,16 +793,17 @@ copy_dependencies(FILE *makin, FILE *makout) int oldlen = (sizeof OLDSALUTATION - 1); while (fgets(makbuf, LINESIZE, makin) != NULL) { - if (! strncmp(makbuf, OLDSALUTATION, oldlen)) + if (!strncmp(makbuf, OLDSALUTATION, oldlen)) { break; + } } while (fgets(makbuf, LINESIZE, makin) != NULL) { - if (oldlen != 0) - { - if (makbuf[0] == '\n') + if (oldlen != 0) { + if (makbuf[0] == '\n') { continue; - else + } else { oldlen = 0; + } } fputs(makbuf, makout); } diff --git a/SETUP/config/openp.c b/SETUP/config/openp.c index c05cd9daf..e18b63913 100644 --- a/SETUP/config/openp.c +++ b/SETUP/config/openp.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,7 +18,7 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ /* openp, fopenp -- search pathlist and open file @@ -51,28 +51,28 @@ */ #include -#include /* open */ +#include /* open */ #include "config.h" int openp(const char *fpath, char *file, char *complete, int flags, int mode); -static int flgs,mod,value; +static int flgs, mod, value; static const char *ftyp; static FILE *fvalue; static int func(char *fnam) { - value = open (fnam,flgs,mod); - return (value < 0); + value = open(fnam, flgs, mod); + return value < 0; } static int ffunc(char *fnam) { - fvalue = fopen (fnam,ftyp); - return (fvalue == 0); + fvalue = fopen(fnam, ftyp); + return fvalue == 0; } int @@ -80,14 +80,18 @@ openp(const char *fpath, char *file, char *complete, int flags, int mode) { flgs = flags; mod = mode; - if (searchp(fpath,file,complete,func) < 0) return (-1); - return (value); + if (searchp(fpath, file, complete, func) < 0) { + return -1; + } + return value; } FILE * fopenp(const char *fpath, char *file, char *complete, const char *ftype) { ftyp = ftype; - if (searchp(fpath,file,complete,ffunc) < 0) return (0); - return (fvalue); + if (searchp(fpath, file, complete, ffunc) < 0) { + return 0; + } + return fvalue; } diff --git a/SETUP/config/searchp.c b/SETUP/config/searchp.c index b79ca6a44..453cd6f9d 100644 --- a/SETUP/config/searchp.c +++ b/SETUP/config/searchp.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,7 +18,7 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ /* searchp -- search through pathlist for file @@ -45,7 +45,7 @@ * * HISTORY * 01-Apr-86 Rudy Nedved (ern) at Carnegie-Mellon University - * 4.1BSD system ignores trailing slashes. 4.2BSD does not. + * 4.1BSD system ignores trailing slashes. 4.2BSD does not. * Therefore don't add a seperating slash if there is a null * filename. * @@ -75,16 +75,20 @@ searchp(const char *spath, char *file, char *fullname, int (*func)(char *)) do { fname = fullname; nextchar = nextpath; - while (*nextchar && (*nextchar != ':')) + while (*nextchar && (*nextchar != ':')) { *fname++ = *nextchar++; - if (nextchar != nextpath && *file) *fname++ = '/'; + } + if (nextchar != nextpath && *file) { + *fname++ = '/'; + } lastchar = nextchar; nextpath = ((*nextchar) ? nextchar + 1 : nextchar); - nextchar = file; /* append file */ - while (*nextchar) *fname++ = *nextchar++; + nextchar = file; /* append file */ + while (*nextchar) { + *fname++ = *nextchar++; + } *fname = '\0'; - failure = (*func) (fullname); - } - while (failure && (*lastchar)); - return (failure ? -1 : 0); + failure = (*func)(fullname); + }while (failure && (*lastchar)); + return failure ? -1 : 0; } diff --git a/SETUP/decomment/decomment.c b/SETUP/decomment/decomment.c index f95bdb69d..b2cceac7f 100644 --- a/SETUP/decomment/decomment.c +++ b/SETUP/decomment/decomment.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * "Portions Copyright (c) 1999 Apple Computer, Inc. All Rights * Reserved. This file contains Original Code and/or Modifications of * Original Code as defined in and that are subject to the Apple Public @@ -10,7 +10,7 @@ * except in compliance with the License. Please obtain a copy of the * License at http://www.apple.com/publicsource and read it before using * this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,18 +18,18 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License." - * + * * @APPLE_LICENSE_HEADER_END@ */ /* * decomment.c * - * Removes all comments and (optionally) whitespace from an input file. + * Removes all comments and (optionally) whitespace from an input file. * Writes result on stdout. */ - + #include -#include /* for isspace */ +#include /* for isspace */ #include /* @@ -37,15 +37,16 @@ */ typedef enum { IS_NORMAL, - IS_SLASH, // encountered opening '/' - IS_IN_COMMENT, // within / * * / comment - IS_STAR, // encountered closing '*' - IS_IN_END_COMMENT // within / / comment + IS_SLASH, // encountered opening '/' + IS_IN_COMMENT, // within / * * / comment + IS_STAR, // encountered closing '*' + IS_IN_END_COMMENT // within / / comment } input_state_t; static void usage(char **argv); -int main(int argc, char **argv) +int +main(int argc, char **argv) { FILE *fp; char bufchar; @@ -53,136 +54,136 @@ int main(int argc, char **argv) int exit_code = 0; int remove_whitespace = 0; int arg; - - if(argc < 2) + + if (argc < 2) { usage(argv); - for(arg=2; arg] \n", - getprogname()); + getprogname()); exit(EX_USAGE); } diff --git a/SETUP/json_compilation_db/json_compilation_db.c b/SETUP/json_compilation_db/json_compilation_db.c index 7a148aebc..eb0dfdb50 100644 --- a/SETUP/json_compilation_db/json_compilation_db.c +++ b/SETUP/json_compilation_db/json_compilation_db.c @@ -56,7 +56,8 @@ char *escape_string(const char *); * "]" */ -int main(int argc, char * argv[]) +int +main(int argc, char * argv[]) { struct stat sb; int ret; @@ -82,47 +83,55 @@ int main(int argc, char * argv[]) argc -= 4; input_file_len = strlen(input_file); - if (!(input_file_len > 2 && 0 == strcmp(".c", input_file + input_file_len - 2)) && - !(input_file_len > 3 && 0 == strcmp(".cp", input_file + input_file_len - 3)) && - !(input_file_len > 4 && 0 == strcmp(".cpp", input_file + input_file_len - 4))) { + if (!(input_file_len > 2 && 0 == strcmp(".c", input_file + input_file_len - 2)) && + !(input_file_len > 3 && 0 == strcmp(".cp", input_file + input_file_len - 3)) && + !(input_file_len > 4 && 0 == strcmp(".cpp", input_file + input_file_len - 4))) { /* Not a C/C++ file, just skip it */ return 0; } dstfd = open(json_output, O_RDWR | O_CREAT | O_EXLOCK, DEFFILEMODE); - if (dstfd < 0) + if (dstfd < 0) { err(EX_NOINPUT, "open(%s)", json_output); + } ret = fstat(dstfd, &sb); - if (ret < 0) + if (ret < 0) { err(EX_NOINPUT, "fstat(%s)", json_output); + } - if (!S_ISREG(sb.st_mode)) + if (!S_ISREG(sb.st_mode)) { err(EX_USAGE, "%s is not a regular file", json_output); + } dst = fdopen(dstfd, "w+"); - if (dst == NULL) + if (dst == NULL) { err(EX_UNAVAILABLE, "fdopen"); + } - read_bytes = fread(start, sizeof(start[0]), sizeof(start)/sizeof(start[0]), dst); - if ((read_bytes != sizeof(start)) || (0 != memcmp(start, "[\n", sizeof(start)/sizeof(start[0])))) { + read_bytes = fread(start, sizeof(start[0]), sizeof(start) / sizeof(start[0]), dst); + if ((read_bytes != sizeof(start)) || (0 != memcmp(start, "[\n", sizeof(start) / sizeof(start[0])))) { /* no JSON start, we don't really care why */ ret = fseeko(dst, 0, SEEK_SET); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "fseeko"); + } ret = fputs("[", dst); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "fputs"); + } } else { /* has at least two bytes at the start. Seek to 3 bytes before the end */ ret = fseeko(dst, -3, SEEK_END); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "fseeko"); + } ret = fputs(",", dst); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "fputs"); + } } fprintf(dst, "\n"); @@ -130,9 +139,9 @@ int main(int argc, char * argv[]) fprintf(dst, " \"directory\": \"%s\",\n", cwd); fprintf(dst, " \"file\": \"%s\",\n", input_file); fprintf(dst, " \"command\": \""); - for (i=0; i < argc; i++) { + for (i = 0; i < argc; i++) { bool needs_escape = strchr(argv[i], '\\') || strchr(argv[i], '"') || strchr(argv[i], ' '); - + if (needs_escape) { char *escaped_string = escape_string(argv[i]); fprintf(dst, "%s\\\"%s\\\"", i == 0 ? "" : " ", escaped_string); @@ -146,13 +155,15 @@ int main(int argc, char * argv[]) fprintf(dst, "]\n"); ret = fclose(dst); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "fclose"); + } return 0; } -void usage(void) +void +usage(void) { fprintf(stderr, "Usage: %s [ ...]\n", getprogname()); exit(EX_USAGE); @@ -171,7 +182,7 @@ escape_string(const char *input) size_t i, j; char *output = malloc(len * 4 + 1); - for (i=0, j=0; i < len; i++) { + for (i = 0, j = 0; i < len; i++) { char ch = input[i]; if (ch == '\\' || ch == '"') { diff --git a/SETUP/kextsymboltool/kextsymboltool.c b/SETUP/kextsymboltool/kextsymboltool.c index 7e0d49a2d..46f644b55 100644 --- a/SETUP/kextsymboltool/kextsymboltool.c +++ b/SETUP/kextsymboltool/kextsymboltool.c @@ -2,14 +2,14 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -44,11 +44,11 @@ * Typedefs, Enums, Constants *********************************************************************/ typedef enum { - kErrorNone = 0, - kError, - kErrorFileAccess, - kErrorDiskFull, - kErrorDuplicate + kErrorNone = 0, + kError, + kErrorFileAccess, + kErrorDiskFull, + kErrorDuplicate } ToolError; #pragma mark Function Protos @@ -64,10 +64,10 @@ writeFile(int fd, const void * data, size_t length); __private_extern__ ToolError seekFile(int fd, off_t offset); -extern char* __cxa_demangle (const char* mangled_name, - char* buf, - size_t* n, - int* status); +extern char* __cxa_demangle(const char* mangled_name, + char* buf, + size_t* n, + int* status); #pragma mark Functions /********************************************************************* @@ -75,35 +75,39 @@ extern char* __cxa_demangle (const char* mangled_name, __private_extern__ ToolError writeFile(int fd, const void * data, size_t length) { - ToolError err; + ToolError err; - if (length != (size_t)write(fd, data, length)) - err = kErrorDiskFull; - else - err = kErrorNone; + if (length != (size_t)write(fd, data, length)) { + err = kErrorDiskFull; + } else { + err = kErrorNone; + } - if (kErrorNone != err) - perror("couldn't write output"); + if (kErrorNone != err) { + perror("couldn't write output"); + } - return( err ); + return err; } - /********************************************************************* - *********************************************************************/ +/********************************************************************* +*********************************************************************/ __private_extern__ ToolError seekFile(int fd, off_t offset) { - ToolError err; + ToolError err; - if (offset != lseek(fd, offset, SEEK_SET)) - err = kErrorDiskFull; - else - err = kErrorNone; + if (offset != lseek(fd, offset, SEEK_SET)) { + err = kErrorDiskFull; + } else { + err = kErrorNone; + } - if (kErrorNone != err) - perror("couldn't write output"); + if (kErrorNone != err) { + perror("couldn't write output"); + } - return( err ); + return err; } /********************************************************************* @@ -111,81 +115,82 @@ seekFile(int fd, off_t offset) __private_extern__ ToolError readFile(const char *path, vm_offset_t * objAddr, vm_size_t * objSize) { - ToolError err = kErrorFileAccess; - int fd; - struct stat stat_buf; - - *objAddr = 0; - *objSize = 0; - - do - { - if((fd = open(path, O_RDONLY)) == -1) - continue; - - if(fstat(fd, &stat_buf) == -1) - continue; - - if (0 == (stat_buf.st_mode & S_IFREG)) - continue; - - /* Don't try to map an empty file, it fails now due to conformance - * stuff (PR 4611502). - */ - if (0 == stat_buf.st_size) { - err = kErrorNone; - continue; - } - - *objSize = stat_buf.st_size; - - *objAddr = (vm_offset_t)mmap(NULL /* address */, *objSize, - PROT_READ|PROT_WRITE, MAP_FILE|MAP_PRIVATE /* flags */, - fd, 0 /* offset */); - - if ((void *)*objAddr == MAP_FAILED) { - *objAddr = 0; - *objSize = 0; - continue; - } + ToolError err = kErrorFileAccess; + int fd; + struct stat stat_buf; + + *objAddr = 0; + *objSize = 0; + + do{ + if ((fd = open(path, O_RDONLY)) == -1) { + continue; + } + + if (fstat(fd, &stat_buf) == -1) { + continue; + } + + if (0 == (stat_buf.st_mode & S_IFREG)) { + continue; + } - err = kErrorNone; + /* Don't try to map an empty file, it fails now due to conformance + * stuff (PR 4611502). + */ + if (0 == stat_buf.st_size) { + err = kErrorNone; + continue; + } + + *objSize = stat_buf.st_size; - } while( false ); + *objAddr = (vm_offset_t)mmap(NULL /* address */, *objSize, + PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE /* flags */, + fd, 0 /* offset */); - if (-1 != fd) - { - close(fd); - } - if (kErrorNone != err) - { - fprintf(stderr, "couldn't read %s: %s\n", path, strerror(errno)); - } + if ((void *)*objAddr == MAP_FAILED) { + *objAddr = 0; + *objSize = 0; + continue; + } - return( err ); + err = kErrorNone; + } while (false); + + if (-1 != fd) { + close(fd); + } + if (kErrorNone != err) { + fprintf(stderr, "couldn't read %s: %s\n", path, strerror(errno)); + } + + return err; } enum { kExported = 0x00000001, kObsolete = 0x00000002 }; struct symbol { - char * name; - unsigned int name_len; - char * indirect; - unsigned int indirect_len; - unsigned int flags; - struct symbol * list; - unsigned int list_count; + char * name; + unsigned int name_len; + char * indirect; + unsigned int indirect_len; + unsigned int flags; + struct symbol * list; + unsigned int list_count; }; -static bool issymchar( char c ) +static bool +issymchar( char c ) { - return ((c > ' ') && (c <= '~') && (c != ':') && (c != '#')); + return (c > ' ') && (c <= '~') && (c != ':') && (c != '#'); } -static bool iswhitespace( char c ) +static bool +iswhitespace( char c ) { - return ((c == ' ') || (c == '\t')); + return (c == ' ') || (c == '\t'); } /* @@ -194,10 +199,10 @@ static bool iswhitespace( char c ) static int qsort_cmp(const void * _left, const void * _right) { - struct symbol * left = (struct symbol *) _left; - struct symbol * right = (struct symbol *) _right; + struct symbol * left = (struct symbol *) _left; + struct symbol * right = (struct symbol *) _right; - return (strcmp(left->name, right->name)); + return strcmp(left->name, right->name); } /* @@ -207,261 +212,254 @@ qsort_cmp(const void * _left, const void * _right) static int bsearch_cmp( const void * _key, const void * _cmp) { - char * key = (char *)_key; - struct symbol * cmp = (struct symbol *) _cmp; + char * key = (char *)_key; + struct symbol * cmp = (struct symbol *) _cmp; - return(strcmp(key, cmp->name)); + return strcmp(key, cmp->name); } -struct bsearch_key -{ - char * name; - unsigned int name_len; +struct bsearch_key { + char * name; + unsigned int name_len; }; static int bsearch_cmp_prefix( const void * _key, const void * _cmp) { - struct bsearch_key * key = (struct bsearch_key *)_key; - struct symbol * cmp = (struct symbol *) _cmp; + struct bsearch_key * key = (struct bsearch_key *)_key; + struct symbol * cmp = (struct symbol *) _cmp; - return(strncmp(key->name, cmp->name, key->name_len)); + return strncmp(key->name, cmp->name, key->name_len); } static uint32_t count_symbols(char * file, vm_size_t file_size) { - uint32_t nsyms = 0; - char * scan; - char * eol; - char * next; - - for (scan = file; true; scan = next) { - - eol = memchr(scan, '\n', file_size - (scan - file)); - if (eol == NULL) { - break; - } - next = eol + 1; - - /* Skip empty lines. - */ - if (eol == scan) { - continue; - } - - /* Skip comment lines. - */ - if (scan[0] == '#') { - continue; - } - - /* Scan past any non-symbol characters at the beginning of the line. */ - while ((scan < eol) && !issymchar(*scan)) { - scan++; - } - - /* No symbol on line? Move along. - */ - if (scan == eol) { - continue; - } - - /* Skip symbols starting with '.'. - */ - if (scan[0] == '.') { - continue; - } - nsyms++; - } - - return nsyms; + uint32_t nsyms = 0; + char * scan; + char * eol; + char * next; + + for (scan = file; true; scan = next) { + eol = memchr(scan, '\n', file_size - (scan - file)); + if (eol == NULL) { + break; + } + next = eol + 1; + + /* Skip empty lines. + */ + if (eol == scan) { + continue; + } + + /* Skip comment lines. + */ + if (scan[0] == '#') { + continue; + } + + /* Scan past any non-symbol characters at the beginning of the line. */ + while ((scan < eol) && !issymchar(*scan)) { + scan++; + } + + /* No symbol on line? Move along. + */ + if (scan == eol) { + continue; + } + + /* Skip symbols starting with '.'. + */ + if (scan[0] == '.') { + continue; + } + nsyms++; + } + + return nsyms; } static uint32_t store_symbols(char * file, vm_size_t file_size, struct symbol * symbols, uint32_t idx, uint32_t max_symbols) { - char * scan; - char * line; - char * eol; - char * next; - - uint32_t strtabsize; - - strtabsize = 0; - - for (scan = file, line = file; true; scan = next, line = next) { - - char * name = NULL; - char * name_term = NULL; - unsigned int name_len = 0; - char * indirect = NULL; - char * indirect_term = NULL; - unsigned int indirect_len = 0; - char * option = NULL; - char * option_term = NULL; - unsigned int option_len = 0; - char optionstr[256]; - boolean_t obsolete = 0; - - eol = memchr(scan, '\n', file_size - (scan - file)); - if (eol == NULL) { - break; - } - next = eol + 1; - - /* Skip empty lines. - */ - if (eol == scan) { - continue; - } - - *eol = '\0'; - - /* Skip comment lines. - */ - if (scan[0] == '#') { - continue; - } - - /* Scan past any non-symbol characters at the beginning of the line. */ - while ((scan < eol) && !issymchar(*scan)) { - scan++; - } - - /* No symbol on line? Move along. - */ - if (scan == eol) { - continue; - } - - /* Skip symbols starting with '.'. - */ - if (scan[0] == '.') { - continue; - } - - name = scan; - - /* Find the end of the symbol. - */ - while ((*scan != '\0') && issymchar(*scan)) { - scan++; - } - - /* Note char past end of symbol. - */ - name_term = scan; - - /* Stored length must include the terminating nul char. - */ - name_len = name_term - name + 1; - - /* Now look for an indirect. - */ - if (*scan != '\0') { - while ((*scan != '\0') && iswhitespace(*scan)) { - scan++; - } - if (*scan == ':') { - scan++; - while ((*scan != '\0') && iswhitespace(*scan)) { - scan++; - } - if (issymchar(*scan)) { - indirect = scan; - - /* Find the end of the symbol. - */ - while ((*scan != '\0') && issymchar(*scan)) { - scan++; - } - - /* Note char past end of symbol. - */ - indirect_term = scan; - - /* Stored length must include the terminating nul char. - */ - indirect_len = indirect_term - indirect + 1; - - } else if (*scan == '\0') { - fprintf(stderr, "bad format in symbol line: %s\n", line); - exit(1); - } - } else if (*scan != '\0' && *scan != '-') { - fprintf(stderr, "bad format in symbol line: %s\n", line); - exit(1); - } - } - - /* Look for options. - */ - if (*scan != '\0') { - while ((*scan != '\0') && iswhitespace(*scan)) { - scan++; - } - - if (*scan == '-') { - scan++; - - if (isalpha(*scan)) { - option = scan; - - /* Find the end of the option. - */ - while ((*scan != '\0') && isalpha(*scan)) { - scan++; - } - - /* Note char past end of option. - */ - option_term = scan; - option_len = option_term - option; - - if (option_len >= sizeof(optionstr)) { - fprintf(stderr, "option too long in symbol line: %s\n", line); - exit(1); - } - memcpy(optionstr, option, option_len); - optionstr[option_len] = '\0'; - - /* Find the option. - */ - if (!strncmp(optionstr, "obsolete", option_len)) { - obsolete = TRUE; - } - - } else if (*scan == '\0') { - fprintf(stderr, "bad format in symbol line: %s\n", line); - exit(1); - } - - } - - } - - if(idx >= max_symbols) { - fprintf(stderr, "symbol[%d/%d] overflow: %s\n", idx, max_symbols, line); - exit(1); - } - - *name_term = '\0'; - if (indirect_term) { - *indirect_term = '\0'; - } - - symbols[idx].name = name; - symbols[idx].name_len = name_len; - symbols[idx].indirect = indirect; - symbols[idx].indirect_len = indirect_len; - symbols[idx].flags = (obsolete) ? kObsolete : 0; - - strtabsize += symbols[idx].name_len + symbols[idx].indirect_len; - idx++; - } - - return strtabsize; + char * scan; + char * line; + char * eol; + char * next; + + uint32_t strtabsize; + + strtabsize = 0; + + for (scan = file, line = file; true; scan = next, line = next) { + char * name = NULL; + char * name_term = NULL; + unsigned int name_len = 0; + char * indirect = NULL; + char * indirect_term = NULL; + unsigned int indirect_len = 0; + char * option = NULL; + char * option_term = NULL; + unsigned int option_len = 0; + char optionstr[256]; + boolean_t obsolete = 0; + + eol = memchr(scan, '\n', file_size - (scan - file)); + if (eol == NULL) { + break; + } + next = eol + 1; + + /* Skip empty lines. + */ + if (eol == scan) { + continue; + } + + *eol = '\0'; + + /* Skip comment lines. + */ + if (scan[0] == '#') { + continue; + } + + /* Scan past any non-symbol characters at the beginning of the line. */ + while ((scan < eol) && !issymchar(*scan)) { + scan++; + } + + /* No symbol on line? Move along. + */ + if (scan == eol) { + continue; + } + + /* Skip symbols starting with '.'. + */ + if (scan[0] == '.') { + continue; + } + + name = scan; + + /* Find the end of the symbol. + */ + while ((*scan != '\0') && issymchar(*scan)) { + scan++; + } + + /* Note char past end of symbol. + */ + name_term = scan; + + /* Stored length must include the terminating nul char. + */ + name_len = name_term - name + 1; + + /* Now look for an indirect. + */ + if (*scan != '\0') { + while ((*scan != '\0') && iswhitespace(*scan)) { + scan++; + } + if (*scan == ':') { + scan++; + while ((*scan != '\0') && iswhitespace(*scan)) { + scan++; + } + if (issymchar(*scan)) { + indirect = scan; + + /* Find the end of the symbol. + */ + while ((*scan != '\0') && issymchar(*scan)) { + scan++; + } + + /* Note char past end of symbol. + */ + indirect_term = scan; + + /* Stored length must include the terminating nul char. + */ + indirect_len = indirect_term - indirect + 1; + } else if (*scan == '\0') { + fprintf(stderr, "bad format in symbol line: %s\n", line); + exit(1); + } + } else if (*scan != '\0' && *scan != '-') { + fprintf(stderr, "bad format in symbol line: %s\n", line); + exit(1); + } + } + + /* Look for options. + */ + if (*scan != '\0') { + while ((*scan != '\0') && iswhitespace(*scan)) { + scan++; + } + + if (*scan == '-') { + scan++; + + if (isalpha(*scan)) { + option = scan; + + /* Find the end of the option. + */ + while ((*scan != '\0') && isalpha(*scan)) { + scan++; + } + + /* Note char past end of option. + */ + option_term = scan; + option_len = option_term - option; + + if (option_len >= sizeof(optionstr)) { + fprintf(stderr, "option too long in symbol line: %s\n", line); + exit(1); + } + memcpy(optionstr, option, option_len); + optionstr[option_len] = '\0'; + + /* Find the option. + */ + if (!strncmp(optionstr, "obsolete", option_len)) { + obsolete = TRUE; + } + } else if (*scan == '\0') { + fprintf(stderr, "bad format in symbol line: %s\n", line); + exit(1); + } + } + } + + if (idx >= max_symbols) { + fprintf(stderr, "symbol[%d/%d] overflow: %s\n", idx, max_symbols, line); + exit(1); + } + + *name_term = '\0'; + if (indirect_term) { + *indirect_term = '\0'; + } + + symbols[idx].name = name; + symbols[idx].name_len = name_len; + symbols[idx].indirect = indirect; + symbols[idx].indirect_len = indirect_len; + symbols[idx].flags = (obsolete) ? kObsolete : 0; + + strtabsize += symbols[idx].name_len + symbols[idx].indirect_len; + idx++; + } + + return strtabsize; } static const NXArchInfo * @@ -481,7 +479,7 @@ lookup_arch(const char *archstring) }; unsigned long i; - for (i=0; i < sizeof(archlist)/sizeof(archlist[0]); i++) { + for (i = 0; i < sizeof(archlist) / sizeof(archlist[0]); i++) { if (0 == strcmp(archstring, archlist[i].name)) { return &archlist[i]; } @@ -492,510 +490,479 @@ lookup_arch(const char *archstring) /********************************************************************* *********************************************************************/ -int main(int argc, char * argv[]) +int +main(int argc, char * argv[]) { - ToolError err; - int i, fd; - const char * output_name = NULL; - uint32_t zero = 0, num_files = 0; - uint32_t filenum; - uint32_t strx, strtabsize, strtabpad; - struct symbol * import_symbols; - struct symbol * export_symbols; - uint32_t num_import_syms, num_export_syms; - uint32_t result_count, num_removed_syms; - uint32_t import_idx, export_idx; - const NXArchInfo * host_arch; - const NXArchInfo * target_arch; - boolean_t require_imports = true; - boolean_t diff = false; - - - struct file { - vm_offset_t mapped; - vm_size_t mapped_size; - uint32_t nsyms; - boolean_t import; - const char * path; - }; - struct file files[64]; - - host_arch = NXGetLocalArchInfo(); - target_arch = host_arch; - - for( i = 1; i < argc; i += 2) - { - boolean_t import; - - if (!strcmp("-sect", argv[i])) - { - require_imports = false; - i--; - continue; - } - if (!strcmp("-diff", argv[i])) - { - require_imports = false; - diff = true; - i--; - continue; - } - - if (i == (argc - 1)) - { - fprintf(stderr, "bad arguments: %s\n", argv[i]); - exit(1); + ToolError err; + int i, fd; + const char * output_name = NULL; + uint32_t zero = 0, num_files = 0; + uint32_t filenum; + uint32_t strx, strtabsize, strtabpad; + struct symbol * import_symbols; + struct symbol * export_symbols; + uint32_t num_import_syms, num_export_syms; + uint32_t result_count, num_removed_syms; + uint32_t import_idx, export_idx; + const NXArchInfo * host_arch; + const NXArchInfo * target_arch; + boolean_t require_imports = true; + boolean_t diff = false; + + + struct file { + vm_offset_t mapped; + vm_size_t mapped_size; + uint32_t nsyms; + boolean_t import; + const char * path; + }; + struct file files[64]; + + host_arch = NXGetLocalArchInfo(); + target_arch = host_arch; + + for (i = 1; i < argc; i += 2) { + boolean_t import; + + if (!strcmp("-sect", argv[i])) { + require_imports = false; + i--; + continue; + } + if (!strcmp("-diff", argv[i])) { + require_imports = false; + diff = true; + i--; + continue; + } + + if (i == (argc - 1)) { + fprintf(stderr, "bad arguments: %s\n", argv[i]); + exit(1); + } + + if (!strcmp("-arch", argv[i])) { + target_arch = lookup_arch(argv[i + 1]); + if (!target_arch) { + fprintf(stderr, "unknown architecture name: %s\n", argv[i + 1]); + exit(1); + } + continue; + } + if (!strcmp("-output", argv[i])) { + output_name = argv[i + 1]; + continue; + } + + if (!strcmp("-import", argv[i])) { + import = true; + } else if (!strcmp("-export", argv[i])) { + import = false; + } else { + fprintf(stderr, "unknown option: %s\n", argv[i]); + exit(1); + } + + err = readFile(argv[i + 1], &files[num_files].mapped, &files[num_files].mapped_size); + if (kErrorNone != err) { + exit(1); + } + + if (files[num_files].mapped && files[num_files].mapped_size) { + files[num_files].import = import; + files[num_files].path = argv[i + 1]; + num_files++; + } } - if (!strcmp("-arch", argv[i])) - { - target_arch = lookup_arch(argv[i + 1]); - if (!target_arch) - { - fprintf(stderr, "unknown architecture name: %s\n", argv[i+1]); + if (!output_name) { + fprintf(stderr, "no output file\n"); exit(1); - } - continue; - } - if (!strcmp("-output", argv[i])) - { - output_name = argv[i+1]; - continue; - } - - if (!strcmp("-import", argv[i])) - import = true; - else if (!strcmp("-export", argv[i])) - import = false; - else - { - fprintf(stderr, "unknown option: %s\n", argv[i]); - exit(1); } - err = readFile(argv[i+1], &files[num_files].mapped, &files[num_files].mapped_size); - if (kErrorNone != err) - exit(1); + num_import_syms = 0; + num_export_syms = 0; + for (filenum = 0; filenum < num_files; filenum++) { + files[filenum].nsyms = count_symbols((char *) files[filenum].mapped, files[filenum].mapped_size); + if (files[filenum].import) { + num_import_syms += files[filenum].nsyms; + } else { + num_export_syms += files[filenum].nsyms; + } + } - if (files[num_files].mapped && files[num_files].mapped_size) - { - files[num_files].import = import; - files[num_files].path = argv[i+1]; - num_files++; + import_symbols = calloc(num_import_syms, sizeof(struct symbol)); + export_symbols = calloc(num_export_syms, sizeof(struct symbol)); + + import_idx = 0; + export_idx = 0; + + for (filenum = 0; filenum < num_files; filenum++) { + if (files[filenum].import) { + store_symbols((char *) files[filenum].mapped, files[filenum].mapped_size, + import_symbols, import_idx, num_import_syms); + import_idx += files[filenum].nsyms; + } else { + store_symbols((char *) files[filenum].mapped, files[filenum].mapped_size, + export_symbols, export_idx, num_export_syms); + export_idx += files[filenum].nsyms; + } + if (false && !files[filenum].nsyms) { + fprintf(stderr, "warning: file %s contains no names\n", files[filenum].path); + } } - } - - if (!output_name) - { - fprintf(stderr, "no output file\n"); - exit(1); - } - - num_import_syms = 0; - num_export_syms = 0; - for (filenum = 0; filenum < num_files; filenum++) - { - files[filenum].nsyms = count_symbols((char *) files[filenum].mapped, files[filenum].mapped_size); - if (files[filenum].import) - num_import_syms += files[filenum].nsyms; - else - num_export_syms += files[filenum].nsyms; - } - - import_symbols = calloc(num_import_syms, sizeof(struct symbol)); - export_symbols = calloc(num_export_syms, sizeof(struct symbol)); - - import_idx = 0; - export_idx = 0; - - for (filenum = 0; filenum < num_files; filenum++) - { - if (files[filenum].import) - { - store_symbols((char *) files[filenum].mapped, files[filenum].mapped_size, - import_symbols, import_idx, num_import_syms); - import_idx += files[filenum].nsyms; + + + qsort(import_symbols, num_import_syms, sizeof(struct symbol), &qsort_cmp); + qsort(export_symbols, num_export_syms, sizeof(struct symbol), &qsort_cmp); + + result_count = 0; + num_removed_syms = 0; + strtabsize = 4; + if (num_import_syms) { + for (export_idx = 0; export_idx < num_export_syms; export_idx++) { + struct symbol * result; + char * name; + size_t len; + boolean_t wild; + + name = export_symbols[export_idx].indirect; + len = export_symbols[export_idx].indirect_len; + if (!name) { + name = export_symbols[export_idx].name; + len = export_symbols[export_idx].name_len; + } + wild = ((len > 2) && ('*' == name[len -= 2])); + if (wild) { + struct bsearch_key key; + key.name = name; + key.name_len = len; + result = bsearch(&key, import_symbols, + num_import_syms, sizeof(struct symbol), &bsearch_cmp_prefix); + + if (result) { + struct symbol * first; + struct symbol * last; + + strtabsize += (result->name_len + result->indirect_len); + + first = result; + while (--first >= &import_symbols[0]) { + if (bsearch_cmp_prefix(&key, first)) { + break; + } + strtabsize += (first->name_len + first->indirect_len); + } + first++; + + last = result; + while (++last < (&import_symbols[0] + num_import_syms)) { + if (bsearch_cmp_prefix(&key, last)) { + break; + } + strtabsize += (last->name_len + last->indirect_len); + } + result_count += last - first; + result = first; + export_symbols[export_idx].list = first; + export_symbols[export_idx].list_count = last - first; + export_symbols[export_idx].flags |= kExported; + } + } else { + result = bsearch(name, import_symbols, + num_import_syms, sizeof(struct symbol), &bsearch_cmp); + } + + if (!result && require_imports) { + int status; + char * demangled_result = + __cxa_demangle(export_symbols[export_idx].name + 1, NULL, NULL, &status); + fprintf(stderr, "exported name not in import list: %s\n", + demangled_result ? demangled_result : export_symbols[export_idx].name); +// fprintf(stderr, " : %s\n", export_symbols[export_idx].name); + if (demangled_result) { + free(demangled_result); + } + num_removed_syms++; + } + if (diff) { + if (!result) { + result = &export_symbols[export_idx]; + } else { + result = NULL; + } + } + if (result && !wild) { + export_symbols[export_idx].flags |= kExported; + strtabsize += (export_symbols[export_idx].name_len + export_symbols[export_idx].indirect_len); + result_count++; + export_symbols[export_idx].list = &export_symbols[export_idx]; + export_symbols[export_idx].list_count = 1; + } + } } - else - { - store_symbols((char *) files[filenum].mapped, files[filenum].mapped_size, - export_symbols, export_idx, num_export_syms); - export_idx += files[filenum].nsyms; + strtabpad = (strtabsize + 3) & ~3; + + if (require_imports && num_removed_syms) { + err = kError; + goto finish; } - if (false && !files[filenum].nsyms) - { - fprintf(stderr, "warning: file %s contains no names\n", files[filenum].path); + + fd = open(output_name, O_WRONLY | O_CREAT | O_TRUNC, 0755); + if (-1 == fd) { + perror("couldn't write output"); + err = kErrorFileAccess; + goto finish; } - } - - - qsort(import_symbols, num_import_syms, sizeof(struct symbol), &qsort_cmp); - qsort(export_symbols, num_export_syms, sizeof(struct symbol), &qsort_cmp); - - result_count = 0; - num_removed_syms = 0; - strtabsize = 4; - if (num_import_syms) - { - for (export_idx = 0; export_idx < num_export_syms; export_idx++) - { - struct symbol * result; - char * name; - size_t len; - boolean_t wild; - - name = export_symbols[export_idx].indirect; - len = export_symbols[export_idx].indirect_len; - if (!name) - { - name = export_symbols[export_idx].name; - len = export_symbols[export_idx].name_len; - } - wild = ((len > 2) && ('*' == name[len-=2])); - if (wild) - { - struct bsearch_key key; - key.name = name; - key.name_len = len; - result = bsearch(&key, import_symbols, - num_import_syms, sizeof(struct symbol), &bsearch_cmp_prefix); - if (result) - { - struct symbol * first; - struct symbol * last; - - strtabsize += (result->name_len + result->indirect_len); - - first = result; - while (--first >= &import_symbols[0]) - { - if (bsearch_cmp_prefix(&key, first)) - break; - strtabsize += (first->name_len + first->indirect_len); - } - first++; - - last = result; - while (++last < (&import_symbols[0] + num_import_syms)) - { - if (bsearch_cmp_prefix(&key, last)) - break; - strtabsize += (last->name_len + last->indirect_len); - } - result_count += last - first; - result = first; - export_symbols[export_idx].list = first; - export_symbols[export_idx].list_count = last - first; - export_symbols[export_idx].flags |= kExported; - } - } - else - result = bsearch(name, import_symbols, - num_import_syms, sizeof(struct symbol), &bsearch_cmp); + struct symtab_command symcmd; + struct uuid_command uuidcmd; + off_t symsoffset; + + symcmd.cmd = LC_SYMTAB; + symcmd.cmdsize = sizeof(symcmd); + symcmd.nsyms = result_count; + symcmd.strsize = strtabpad; + + uuidcmd.cmd = LC_UUID; + uuidcmd.cmdsize = sizeof(uuidcmd); + uuid_generate(uuidcmd.uuid); + + if (CPU_ARCH_ABI64 & target_arch->cputype) { + struct mach_header_64 hdr; + struct segment_command_64 segcmd; + + hdr.magic = MH_MAGIC_64; + hdr.cputype = target_arch->cputype; + hdr.cpusubtype = target_arch->cpusubtype; + hdr.filetype = MH_KEXT_BUNDLE; + hdr.ncmds = 3; + hdr.sizeofcmds = sizeof(segcmd) + sizeof(symcmd) + sizeof(uuidcmd); + hdr.flags = MH_INCRLINK; + symsoffset = mach_vm_round_page(hdr.sizeofcmds); + + segcmd.cmd = LC_SEGMENT_64; + segcmd.cmdsize = sizeof(segcmd); + strncpy(segcmd.segname, SEG_LINKEDIT, sizeof(segcmd.segname)); + segcmd.vmaddr = 0; + segcmd.vmsize = result_count * sizeof(struct nlist_64) + strtabpad; + segcmd.fileoff = symsoffset; + segcmd.filesize = segcmd.vmsize; + segcmd.maxprot = PROT_READ; + segcmd.initprot = PROT_READ; + segcmd.nsects = 0; + segcmd.flags = SG_NORELOC; + + symcmd.symoff = symsoffset; + symcmd.stroff = result_count * sizeof(struct nlist_64) + + symcmd.symoff; + + if (target_arch->byteorder != host_arch->byteorder) { + swap_mach_header_64(&hdr, target_arch->byteorder); + swap_segment_command_64(&segcmd, target_arch->byteorder); + } + err = writeFile(fd, &hdr, sizeof(hdr)); + if (kErrorNone != err) { + goto finish; + } + err = writeFile(fd, &segcmd, sizeof(segcmd)); + } else { + struct mach_header hdr; + struct segment_command segcmd; + + hdr.magic = MH_MAGIC; + hdr.cputype = target_arch->cputype; + hdr.cpusubtype = target_arch->cpusubtype; + hdr.filetype = MH_KEXT_BUNDLE; + hdr.ncmds = 3; + hdr.sizeofcmds = sizeof(segcmd) + sizeof(symcmd) + sizeof(uuidcmd); + hdr.flags = MH_INCRLINK; + symsoffset = mach_vm_round_page(hdr.sizeofcmds); + + segcmd.cmd = LC_SEGMENT; + segcmd.cmdsize = sizeof(segcmd); + strncpy(segcmd.segname, SEG_LINKEDIT, sizeof(segcmd.segname)); + segcmd.vmaddr = 0; + segcmd.vmsize = result_count * sizeof(struct nlist) + strtabpad; + segcmd.fileoff = symsoffset; + segcmd.filesize = segcmd.vmsize; + segcmd.maxprot = PROT_READ; + segcmd.initprot = PROT_READ; + segcmd.nsects = 0; + segcmd.flags = SG_NORELOC; + + symcmd.symoff = symsoffset; + symcmd.stroff = result_count * sizeof(struct nlist) + + symcmd.symoff; + + if (target_arch->byteorder != host_arch->byteorder) { + swap_mach_header(&hdr, target_arch->byteorder); + swap_segment_command(&segcmd, target_arch->byteorder); + } + err = writeFile(fd, &hdr, sizeof(hdr)); + if (kErrorNone != err) { + goto finish; + } + err = writeFile(fd, &segcmd, sizeof(segcmd)); + } - if (!result && require_imports) - { - int status; - char * demangled_result = - __cxa_demangle(export_symbols[export_idx].name + 1, NULL, NULL, &status); - fprintf(stderr, "exported name not in import list: %s\n", - demangled_result ? demangled_result : export_symbols[export_idx].name); -// fprintf(stderr, " : %s\n", export_symbols[export_idx].name); - if (demangled_result) { - free(demangled_result); - } - num_removed_syms++; - } - if (diff) - { - if (!result) - result = &export_symbols[export_idx]; - else - result = NULL; - } - if (result && !wild) - { - export_symbols[export_idx].flags |= kExported; - strtabsize += (export_symbols[export_idx].name_len + export_symbols[export_idx].indirect_len); - result_count++; - export_symbols[export_idx].list = &export_symbols[export_idx]; - export_symbols[export_idx].list_count = 1; - } + if (kErrorNone != err) { + goto finish; } - } - strtabpad = (strtabsize + 3) & ~3; - - if (require_imports && num_removed_syms) - { - err = kError; - goto finish; - } - - fd = open(output_name, O_WRONLY|O_CREAT|O_TRUNC, 0755); - if (-1 == fd) - { - perror("couldn't write output"); - err = kErrorFileAccess; - goto finish; - } - - struct symtab_command symcmd; - struct uuid_command uuidcmd; - off_t symsoffset; - - symcmd.cmd = LC_SYMTAB; - symcmd.cmdsize = sizeof(symcmd); - symcmd.nsyms = result_count; - symcmd.strsize = strtabpad; - - uuidcmd.cmd = LC_UUID; - uuidcmd.cmdsize = sizeof(uuidcmd); - uuid_generate(uuidcmd.uuid); - - if (CPU_ARCH_ABI64 & target_arch->cputype) - { - struct mach_header_64 hdr; - struct segment_command_64 segcmd; - - hdr.magic = MH_MAGIC_64; - hdr.cputype = target_arch->cputype; - hdr.cpusubtype = target_arch->cpusubtype; - hdr.filetype = MH_KEXT_BUNDLE; - hdr.ncmds = 3; - hdr.sizeofcmds = sizeof(segcmd) + sizeof(symcmd) + sizeof(uuidcmd); - hdr.flags = MH_INCRLINK; - symsoffset = mach_vm_round_page(hdr.sizeofcmds); - - segcmd.cmd = LC_SEGMENT_64; - segcmd.cmdsize = sizeof(segcmd); - strncpy(segcmd.segname, SEG_LINKEDIT, sizeof(segcmd.segname)); - segcmd.vmaddr = 0; - segcmd.vmsize = result_count * sizeof(struct nlist_64) + strtabpad; - segcmd.fileoff = symsoffset; - segcmd.filesize = segcmd.vmsize; - segcmd.maxprot = PROT_READ; - segcmd.initprot = PROT_READ; - segcmd.nsects = 0; - segcmd.flags = SG_NORELOC; - - symcmd.symoff = symsoffset; - symcmd.stroff = result_count * sizeof(struct nlist_64) - + symcmd.symoff; - - if (target_arch->byteorder != host_arch->byteorder) - { - swap_mach_header_64(&hdr, target_arch->byteorder); - swap_segment_command_64(&segcmd, target_arch->byteorder); + + if (target_arch->byteorder != host_arch->byteorder) { + swap_symtab_command(&symcmd, target_arch->byteorder); + swap_uuid_command(&uuidcmd, target_arch->byteorder); } - err = writeFile(fd, &hdr, sizeof(hdr)); - if (kErrorNone != err) - goto finish; - err = writeFile(fd, &segcmd, sizeof(segcmd)); - } - else - { - struct mach_header hdr; - struct segment_command segcmd; - - hdr.magic = MH_MAGIC; - hdr.cputype = target_arch->cputype; - hdr.cpusubtype = target_arch->cpusubtype; - hdr.filetype = MH_KEXT_BUNDLE; - hdr.ncmds = 3; - hdr.sizeofcmds = sizeof(segcmd) + sizeof(symcmd) + sizeof(uuidcmd); - hdr.flags = MH_INCRLINK; - symsoffset = mach_vm_round_page(hdr.sizeofcmds); - - segcmd.cmd = LC_SEGMENT; - segcmd.cmdsize = sizeof(segcmd); - strncpy(segcmd.segname, SEG_LINKEDIT, sizeof(segcmd.segname)); - segcmd.vmaddr = 0; - segcmd.vmsize = result_count * sizeof(struct nlist) + strtabpad; - segcmd.fileoff = symsoffset; - segcmd.filesize = segcmd.vmsize; - segcmd.maxprot = PROT_READ; - segcmd.initprot = PROT_READ; - segcmd.nsects = 0; - segcmd.flags = SG_NORELOC; - - symcmd.symoff = symsoffset; - symcmd.stroff = result_count * sizeof(struct nlist) - + symcmd.symoff; - - if (target_arch->byteorder != host_arch->byteorder) - { - swap_mach_header(&hdr, target_arch->byteorder); - swap_segment_command(&segcmd, target_arch->byteorder); + err = writeFile(fd, &symcmd, sizeof(symcmd)); + if (kErrorNone != err) { + goto finish; } - err = writeFile(fd, &hdr, sizeof(hdr)); - if (kErrorNone != err) - goto finish; - err = writeFile(fd, &segcmd, sizeof(segcmd)); - } - - if (kErrorNone != err) - goto finish; - - if (target_arch->byteorder != host_arch->byteorder) { - swap_symtab_command(&symcmd, target_arch->byteorder); - swap_uuid_command(&uuidcmd, target_arch->byteorder); - } - err = writeFile(fd, &symcmd, sizeof(symcmd)); - if (kErrorNone != err) - goto finish; - err = writeFile(fd, &uuidcmd, sizeof(uuidcmd)); - if (kErrorNone != err) - goto finish; - - err = seekFile(fd, symsoffset); - if (kErrorNone != err) - goto finish; - - strx = 4; - for (export_idx = 0; export_idx < num_export_syms; export_idx++) - { - if (!export_symbols[export_idx].name) - continue; - if (!(kExported & export_symbols[export_idx].flags)) - continue; - - if (export_idx - && export_symbols[export_idx - 1].name - && !strcmp(export_symbols[export_idx - 1].name, export_symbols[export_idx].name)) - { - fprintf(stderr, "duplicate export: %s\n", export_symbols[export_idx - 1].name); - err = kErrorDuplicate; - goto finish; + err = writeFile(fd, &uuidcmd, sizeof(uuidcmd)); + if (kErrorNone != err) { + goto finish; } - for (import_idx = 0; import_idx < export_symbols[export_idx].list_count; import_idx++) - { - - if (export_symbols[export_idx].list != &export_symbols[export_idx]) - { - printf("wild: %s, %s\n", export_symbols[export_idx].name, - export_symbols[export_idx].list[import_idx].name); - } - if (CPU_ARCH_ABI64 & target_arch->cputype) - { - struct nlist_64 nl; - - nl.n_sect = 0; - nl.n_desc = 0; - nl.n_un.n_strx = strx; - strx += export_symbols[export_idx].list[import_idx].name_len; - - if (export_symbols[export_idx].flags & kObsolete) { - nl.n_desc |= N_DESC_DISCARDED; - } - - if (export_symbols[export_idx].list[import_idx].indirect) - { - nl.n_type = N_INDR | N_EXT; - nl.n_value = strx; - strx += export_symbols[export_idx].list[import_idx].indirect_len; - } - else - { - nl.n_type = N_UNDF | N_EXT; - nl.n_value = 0; - } - - if (target_arch->byteorder != host_arch->byteorder) - swap_nlist_64(&nl, 1, target_arch->byteorder); - - err = writeFile(fd, &nl, sizeof(nl)); - } - else - { - struct nlist nl; - - nl.n_sect = 0; - nl.n_desc = 0; - nl.n_un.n_strx = strx; - strx += export_symbols[export_idx].list[import_idx].name_len; - - if (export_symbols[export_idx].flags & kObsolete) { - nl.n_desc |= N_DESC_DISCARDED; - } - - if (export_symbols[export_idx].list[import_idx].indirect) - { - nl.n_type = N_INDR | N_EXT; - nl.n_value = strx; - strx += export_symbols[export_idx].list[import_idx].indirect_len; - } - else - { - nl.n_type = N_UNDF | N_EXT; - nl.n_value = 0; - } - - if (target_arch->byteorder != host_arch->byteorder) - swap_nlist(&nl, 1, target_arch->byteorder); - - err = writeFile(fd, &nl, sizeof(nl)); - } + err = seekFile(fd, symsoffset); + if (kErrorNone != err) { + goto finish; } - if (kErrorNone != err) - goto finish; - } - - strx = sizeof(uint32_t); - err = writeFile(fd, &zero, strx); - if (kErrorNone != err) - goto finish; - - for (export_idx = 0; export_idx < num_export_syms; export_idx++) - { - if (!export_symbols[export_idx].name) - continue; - - for (import_idx = 0; import_idx < export_symbols[export_idx].list_count; import_idx++) - { - err = writeFile(fd, export_symbols[export_idx].list[import_idx].name, - export_symbols[export_idx].list[import_idx].name_len); - if (kErrorNone != err) + strx = 4; + for (export_idx = 0; export_idx < num_export_syms; export_idx++) { + if (!export_symbols[export_idx].name) { + continue; + } + if (!(kExported & export_symbols[export_idx].flags)) { + continue; + } + + if (export_idx + && export_symbols[export_idx - 1].name + && !strcmp(export_symbols[export_idx - 1].name, export_symbols[export_idx].name)) { + fprintf(stderr, "duplicate export: %s\n", export_symbols[export_idx - 1].name); + err = kErrorDuplicate; + goto finish; + } + + for (import_idx = 0; import_idx < export_symbols[export_idx].list_count; import_idx++) { + if (export_symbols[export_idx].list != &export_symbols[export_idx]) { + printf("wild: %s, %s\n", export_symbols[export_idx].name, + export_symbols[export_idx].list[import_idx].name); + } + if (CPU_ARCH_ABI64 & target_arch->cputype) { + struct nlist_64 nl; + + nl.n_sect = 0; + nl.n_desc = 0; + nl.n_un.n_strx = strx; + strx += export_symbols[export_idx].list[import_idx].name_len; + + if (export_symbols[export_idx].flags & kObsolete) { + nl.n_desc |= N_DESC_DISCARDED; + } + + if (export_symbols[export_idx].list[import_idx].indirect) { + nl.n_type = N_INDR | N_EXT; + nl.n_value = strx; + strx += export_symbols[export_idx].list[import_idx].indirect_len; + } else { + nl.n_type = N_UNDF | N_EXT; + nl.n_value = 0; + } + + if (target_arch->byteorder != host_arch->byteorder) { + swap_nlist_64(&nl, 1, target_arch->byteorder); + } + + err = writeFile(fd, &nl, sizeof(nl)); + } else { + struct nlist nl; + + nl.n_sect = 0; + nl.n_desc = 0; + nl.n_un.n_strx = strx; + strx += export_symbols[export_idx].list[import_idx].name_len; + + if (export_symbols[export_idx].flags & kObsolete) { + nl.n_desc |= N_DESC_DISCARDED; + } + + if (export_symbols[export_idx].list[import_idx].indirect) { + nl.n_type = N_INDR | N_EXT; + nl.n_value = strx; + strx += export_symbols[export_idx].list[import_idx].indirect_len; + } else { + nl.n_type = N_UNDF | N_EXT; + nl.n_value = 0; + } + + if (target_arch->byteorder != host_arch->byteorder) { + swap_nlist(&nl, 1, target_arch->byteorder); + } + + err = writeFile(fd, &nl, sizeof(nl)); + } + } + + if (kErrorNone != err) { + goto finish; + } + } + + strx = sizeof(uint32_t); + err = writeFile(fd, &zero, strx); + if (kErrorNone != err) { + goto finish; + } + + for (export_idx = 0; export_idx < num_export_syms; export_idx++) { + if (!export_symbols[export_idx].name) { + continue; + } + + for (import_idx = 0; import_idx < export_symbols[export_idx].list_count; import_idx++) { + err = writeFile(fd, export_symbols[export_idx].list[import_idx].name, + export_symbols[export_idx].list[import_idx].name_len); + if (kErrorNone != err) { + goto finish; + } + if (export_symbols[export_idx].list[import_idx].indirect) { + err = writeFile(fd, export_symbols[export_idx].list[import_idx].indirect, + export_symbols[export_idx].list[import_idx].indirect_len); + if (kErrorNone != err) { + goto finish; + } + } + } + } + + err = writeFile(fd, &zero, strtabpad - strtabsize); + if (kErrorNone != err) { goto finish; - if (export_symbols[export_idx].list[import_idx].indirect) - { - err = writeFile(fd, export_symbols[export_idx].list[import_idx].indirect, - export_symbols[export_idx].list[import_idx].indirect_len); - if (kErrorNone != err) - goto finish; - } } - } - err = writeFile(fd, &zero, strtabpad - strtabsize); - if (kErrorNone != err) - goto finish; - - close(fd); + close(fd); finish: - for (filenum = 0; filenum < num_files; filenum++) { - // unmap file - if (files[filenum].mapped_size) - { - munmap((caddr_t)files[filenum].mapped, files[filenum].mapped_size); - files[filenum].mapped = 0; - files[filenum].mapped_size = 0; - } - - } - - if (kErrorNone != err) - { - if (output_name && strncmp(output_name, "/dev/", 5)) - unlink(output_name); - exit(1); - } - else - exit(0); - return(0); -} + for (filenum = 0; filenum < num_files; filenum++) { + // unmap file + if (files[filenum].mapped_size) { + munmap((caddr_t)files[filenum].mapped, files[filenum].mapped_size); + files[filenum].mapped = 0; + files[filenum].mapped_size = 0; + } + } + if (kErrorNone != err) { + if (output_name && strncmp(output_name, "/dev/", 5)) { + unlink(output_name); + } + exit(1); + } else { + exit(0); + } + return 0; +} diff --git a/SETUP/replacecontents/replacecontents.c b/SETUP/replacecontents/replacecontents.c index 3e72b2d9d..5b14bc3da 100644 --- a/SETUP/replacecontents/replacecontents.c +++ b/SETUP/replacecontents/replacecontents.c @@ -37,7 +37,8 @@ void usage(void); -int main(int argc, char * argv[]) +int +main(int argc, char * argv[]) { struct stat sb; char *newcontent = NULL; @@ -55,18 +56,19 @@ int main(int argc, char * argv[]) dst = argv[1]; - for (i=2; i < argc; i++) { + for (i = 2; i < argc; i++) { newcontentlength += strlen(argv[i]) + 1 /* space or newline */; } newcontentlength += 1; /* NUL */ newcontent = malloc(newcontentlength); - if (newcontent == NULL) + if (newcontent == NULL) { err(EX_UNAVAILABLE, "malloc() failed"); + } newcontent[0] = '\0'; - for (i=2; i < argc; i++) { + for (i = 2; i < argc; i++) { strlcat(newcontent, argv[i], newcontentlength); if (i < argc - 1) { strlcat(newcontent, " ", newcontentlength); @@ -76,15 +78,18 @@ int main(int argc, char * argv[]) } dstfd = open(dst, O_RDWR | O_CREAT | O_APPEND, DEFFILEMODE); - if (dstfd < 0) + if (dstfd < 0) { err(EX_NOINPUT, "open(%s)", dst); + } ret = fstat(dstfd, &sb); - if (ret < 0) + if (ret < 0) { err(EX_NOINPUT, "fstat(%s)", dst); + } - if (!S_ISREG(sb.st_mode)) + if (!S_ISREG(sb.st_mode)) { err(EX_USAGE, "%s is not a regular file", dst); + } if (sb.st_size != newcontentlength) { /* obvious new content must be different than old */ @@ -92,46 +97,53 @@ int main(int argc, char * argv[]) } oldcontent = malloc(newcontentlength); - if (oldcontent == NULL) + if (oldcontent == NULL) { err(EX_UNAVAILABLE, "malloc(%lu) failed", newcontentlength); + } readsize = read(dstfd, oldcontent, newcontentlength); - if (readsize == -1) + if (readsize == -1) { err(EX_UNAVAILABLE, "read() failed"); - else if (readsize != newcontentlength) + } else if (readsize != newcontentlength) { errx(EX_UNAVAILABLE, "short read of file"); + } if (0 == memcmp(oldcontent, newcontent, newcontentlength)) { /* binary comparison succeeded, just exit */ free(oldcontent); ret = close(dstfd); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "close() failed"); + } exit(0); } replace: ret = ftruncate(dstfd, 0); - if (ret < 0) + if (ret < 0) { err(EX_UNAVAILABLE, "ftruncate() failed"); + } writesize = write(dstfd, newcontent, newcontentlength); - if (writesize == -1) + if (writesize == -1) { err(EX_UNAVAILABLE, "write() failed"); - else if (writesize != newcontentlength) + } else if (writesize != newcontentlength) { errx(EX_UNAVAILABLE, "short write of file"); + } ret = close(dstfd); - if (ret < 0) + if (ret < 0) { err(EX_NOINPUT, "close(dst)"); + } return 0; } -void usage(void) +void +usage(void) { fprintf(stderr, "Usage: %s <...>\n", - getprogname()); + getprogname()); exit(EX_USAGE); } diff --git a/SETUP/setsegname/setsegname.c b/SETUP/setsegname/setsegname.c index ab3dd410f..bd15b0025 100644 --- a/SETUP/setsegname/setsegname.c +++ b/SETUP/setsegname/setsegname.c @@ -36,17 +36,17 @@ static int writeFile(int fd, const void * data, size_t length) { - int error = 0; + int error = 0; - if (length != (size_t)write(fd, data, length)) { - error = -1; - } + if (length != (size_t)write(fd, data, length)) { + error = -1; + } - if (error != 0) { - perror("couldn't write output"); - } + if (error != 0) { + perror("couldn't write output"); + } - return error; + return error; } /********************************************************************* @@ -54,217 +54,218 @@ writeFile(int fd, const void * data, size_t length) static int readFile(const char *path, vm_offset_t * objAddr, vm_size_t * objSize) { - int error = -1; - int fd; - struct stat stat_buf; - - *objAddr = 0; - *objSize = 0; - - do { - if ((fd = open(path, O_RDONLY)) == -1) { - continue; - } - - if (fstat(fd, &stat_buf) == -1) { - continue; - } - - if (0 == (stat_buf.st_mode & S_IFREG)) { - continue; - } - - if (0 == stat_buf.st_size) { - error = 0; - continue; - } - - *objSize = stat_buf.st_size; - - *objAddr = (vm_offset_t)mmap(NULL /* address */, *objSize, - PROT_READ|PROT_WRITE, MAP_FILE|MAP_PRIVATE /* flags */, - fd, 0 /* offset */); - - if ((void *)*objAddr == MAP_FAILED) { - *objAddr = 0; - *objSize = 0; - continue; - } - - error = 0; - - } while (false); - - if (-1 != fd) { - close(fd); - } - if (error) { - fprintf(stderr, "couldn't read %s: %s\n", path, strerror(errno)); - } - - return error; + int error = -1; + int fd; + struct stat stat_buf; + + *objAddr = 0; + *objSize = 0; + + do { + if ((fd = open(path, O_RDONLY)) == -1) { + continue; + } + + if (fstat(fd, &stat_buf) == -1) { + continue; + } + + if (0 == (stat_buf.st_mode & S_IFREG)) { + continue; + } + + if (0 == stat_buf.st_size) { + error = 0; + continue; + } + + *objSize = stat_buf.st_size; + + *objAddr = (vm_offset_t)mmap(NULL /* address */, *objSize, + PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE /* flags */, + fd, 0 /* offset */); + + if ((void *)*objAddr == MAP_FAILED) { + *objAddr = 0; + *objSize = 0; + continue; + } + + error = 0; + } while (false); + + if (-1 != fd) { + close(fd); + } + if (error) { + fprintf(stderr, "couldn't read %s: %s\n", path, strerror(errno)); + } + + return error; } static void usage(void) { - fprintf(stderr, "Usage: %s [-s OLDSEGNAME] -n NEWSEGNAME input -o output\n", getprogname()); - exit(1); + fprintf(stderr, "Usage: %s [-s OLDSEGNAME] -n NEWSEGNAME input -o output\n", getprogname()); + exit(1); } /********************************************************************* *********************************************************************/ -int main(int argc, char * argv[]) +int +main(int argc, char * argv[]) { - int error; - const char * output_name = NULL; - const char * input_name = NULL; - const char * oldseg_name = NULL; - const char * newseg_name = NULL; - struct mach_header * hdr; - struct mach_header_64 * hdr64; - struct load_command * cmds; - boolean_t swap = false; - uint32_t ncmds, cmdtype; - uint32_t len; - vm_offset_t input; - vm_size_t input_size; - uint32_t nsects = 0; - uint32_t * flags = NULL; - uint32_t attr; - typedef char segname_t[16]; - segname_t * names = NULL; - int ch; - - - while ((ch = getopt(argc, argv, "s:n:o:")) != -1) { - switch (ch) { - case 's': - oldseg_name = optarg; - break; - case 'n': - newseg_name = optarg; - break; - case 'o': - output_name = optarg; - break; - case '?': - default: - usage(); - } - } - - argc -= optind; - argv += optind; + int error; + const char * output_name = NULL; + const char * input_name = NULL; + const char * oldseg_name = NULL; + const char * newseg_name = NULL; + struct mach_header * hdr; + struct mach_header_64 * hdr64; + struct load_command * cmds; + boolean_t swap = false; + uint32_t ncmds, cmdtype; + uint32_t len; + vm_offset_t input; + vm_size_t input_size; + uint32_t nsects = 0; + uint32_t * flags = NULL; + uint32_t attr; + typedef char segname_t[16]; + segname_t * names = NULL; + int ch; + + + while ((ch = getopt(argc, argv, "s:n:o:")) != -1) { + switch (ch) { + case 's': + oldseg_name = optarg; + break; + case 'n': + newseg_name = optarg; + break; + case 'o': + output_name = optarg; + break; + case '?': + default: + usage(); + } + } + + argc -= optind; + argv += optind; if ((argc != 1) || !newseg_name || !output_name) { - usage(); - } - - input_name = argv[0]; - - error = readFile(input_name, &input, &input_size); - if (error) { - exit(1); - } - - hdr = (typeof(hdr)) input; - switch (hdr->magic) { - case MH_CIGAM: - swap = true; - // fall thru - case MH_MAGIC: - ncmds = hdr->ncmds; - cmds = (typeof(cmds)) (hdr+1); - break; - - case MH_CIGAM_64: - swap = true; - // fall thru - case MH_MAGIC_64: - hdr64 = (typeof(hdr64)) hdr; - ncmds = hdr64->ncmds; - cmds = (typeof(cmds)) (hdr64+1); - break; - - default: - fprintf(stderr, "not macho input file\n"); - exit(1); - break; - } - - if (swap) { - ncmds = OSSwapInt32(ncmds); - } - while (ncmds--) { - cmdtype = cmds->cmd; - if (swap) { - cmdtype = OSSwapInt32(cmdtype); - } - nsects = 0; - len = 0; - if (LC_SEGMENT == cmdtype) { - struct segment_command * segcmd; - struct section * sects; - - segcmd = (typeof(segcmd)) cmds; - nsects = segcmd->nsects; - sects = (typeof(sects))(segcmd + 1); - names = §s->segname; - flags = §s->flags; - len = sizeof(*sects); - } else if (LC_SEGMENT_64 == cmdtype) { - struct segment_command_64 * segcmd; - struct section_64 * sects; - - segcmd = (typeof(segcmd)) cmds; - nsects = segcmd->nsects; - sects = (typeof(sects))(segcmd + 1); - names = §s->segname; - flags = §s->flags; - len = sizeof(*sects); - } - - if (swap) - nsects = OSSwapInt32(nsects); - while (nsects--) { - attr = *flags; - if (swap) { - attr = OSSwapInt32(attr); - } - - if (!(S_ATTR_DEBUG & attr)) { - if (!oldseg_name || - 0 == strncmp(oldseg_name, (char *)names, sizeof(*names))) { - memset(names, 0x0, sizeof(*names)); - strncpy((char *)names, newseg_name, sizeof(*names)); - } - } - - names = (typeof(names))(((uintptr_t) names) + len); - flags = (typeof(flags))(((uintptr_t) flags) + len); - } - - len = cmds->cmdsize; - if (swap) { - len = OSSwapInt32(len); - } - cmds = (typeof(cmds))(((uintptr_t) cmds) + len); - } - - int fd = open(output_name, O_WRONLY|O_CREAT|O_TRUNC, 0755); - if (-1 == fd) { - error = -1; - } else { - error = writeFile(fd, (const void *) input, input_size); - close(fd); - } - - if (error) { - fprintf(stderr, "couldn't write output: %s\n", strerror(errno)); - exit(1); - } - - exit(0); - return 0; + usage(); + } + + input_name = argv[0]; + + error = readFile(input_name, &input, &input_size); + if (error) { + exit(1); + } + + hdr = (typeof(hdr))input; + switch (hdr->magic) { + case MH_CIGAM: + swap = true; + // fall thru + case MH_MAGIC: + ncmds = hdr->ncmds; + cmds = (typeof(cmds))(hdr + 1); + break; + + case MH_CIGAM_64: + swap = true; + // fall thru + case MH_MAGIC_64: + hdr64 = (typeof(hdr64))hdr; + ncmds = hdr64->ncmds; + cmds = (typeof(cmds))(hdr64 + 1); + break; + + default: + fprintf(stderr, "not macho input file\n"); + exit(1); + break; + } + + if (swap) { + ncmds = OSSwapInt32(ncmds); + } + while (ncmds--) { + cmdtype = cmds->cmd; + if (swap) { + cmdtype = OSSwapInt32(cmdtype); + } + nsects = 0; + len = 0; + if (LC_SEGMENT == cmdtype) { + struct segment_command * segcmd; + struct section * sects; + + segcmd = (typeof(segcmd))cmds; + nsects = segcmd->nsects; + sects = (typeof(sects))(segcmd + 1); + names = §s->segname; + flags = §s->flags; + len = sizeof(*sects); + } else if (LC_SEGMENT_64 == cmdtype) { + struct segment_command_64 * segcmd; + struct section_64 * sects; + + segcmd = (typeof(segcmd))cmds; + nsects = segcmd->nsects; + sects = (typeof(sects))(segcmd + 1); + names = §s->segname; + flags = §s->flags; + len = sizeof(*sects); + } + + if (swap) { + nsects = OSSwapInt32(nsects); + } + while (nsects--) { + attr = *flags; + if (swap) { + attr = OSSwapInt32(attr); + } + + if (!(S_ATTR_DEBUG & attr)) { + if (!oldseg_name || + 0 == strncmp(oldseg_name, (char *)names, sizeof(*names))) { + memset(names, 0x0, sizeof(*names)); + strncpy((char *)names, newseg_name, sizeof(*names)); + } + } + + names = (typeof(names))(((uintptr_t) names) + len); + flags = (typeof(flags))(((uintptr_t) flags) + len); + } + + len = cmds->cmdsize; + if (swap) { + len = OSSwapInt32(len); + } + cmds = (typeof(cmds))(((uintptr_t) cmds) + len); + } + + int fd = open(output_name, O_WRONLY | O_CREAT | O_TRUNC, 0755); + if (-1 == fd) { + error = -1; + } else { + error = writeFile(fd, (const void *) input, input_size); + close(fd); + } + + if (error) { + fprintf(stderr, "couldn't write output: %s\n", strerror(errno)); + exit(1); + } + + exit(0); + return 0; } diff --git a/bsd/arm/_limits.h b/bsd/arm/_limits.h index f3d3fcb2c..08234b7ff 100644 --- a/bsd/arm/_limits.h +++ b/bsd/arm/_limits.h @@ -1,9 +1,9 @@ /* * Copyright (c) 2004-2007 Apple Inc. All rights reserved. */ -#ifndef _ARM__LIMITS_H_ -#define _ARM__LIMITS_H_ +#ifndef _ARM__LIMITS_H_ +#define _ARM__LIMITS_H_ -#define __DARWIN_CLK_TCK 100 /* ticks per second */ +#define __DARWIN_CLK_TCK 100 /* ticks per second */ -#endif /* _ARM__LIMITS_H_ */ +#endif /* _ARM__LIMITS_H_ */ diff --git a/bsd/arm/_mcontext.h b/bsd/arm/_mcontext.h index 7d03ebe75..a78adee3b 100644 --- a/bsd/arm/_mcontext.h +++ b/bsd/arm/_mcontext.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,18 +38,18 @@ #define _STRUCT_MCONTEXT32 struct __darwin_mcontext32 _STRUCT_MCONTEXT32 { - _STRUCT_ARM_EXCEPTION_STATE __es; - _STRUCT_ARM_THREAD_STATE __ss; - _STRUCT_ARM_VFP_STATE __fs; + _STRUCT_ARM_EXCEPTION_STATE __es; + _STRUCT_ARM_THREAD_STATE __ss; + _STRUCT_ARM_VFP_STATE __fs; }; #else /* !__DARWIN_UNIX03 */ #define _STRUCT_MCONTEXT32 struct mcontext32 _STRUCT_MCONTEXT32 { - _STRUCT_ARM_EXCEPTION_STATE es; - _STRUCT_ARM_THREAD_STATE ss; - _STRUCT_ARM_VFP_STATE fs; + _STRUCT_ARM_EXCEPTION_STATE es; + _STRUCT_ARM_THREAD_STATE ss; + _STRUCT_ARM_VFP_STATE fs; }; #endif /* __DARWIN_UNIX03 */ @@ -58,21 +58,21 @@ _STRUCT_MCONTEXT32 #ifndef _STRUCT_MCONTEXT64 #if __DARWIN_UNIX03 -#define _STRUCT_MCONTEXT64 struct __darwin_mcontext64 +#define _STRUCT_MCONTEXT64 struct __darwin_mcontext64 _STRUCT_MCONTEXT64 { - _STRUCT_ARM_EXCEPTION_STATE64 __es; - _STRUCT_ARM_THREAD_STATE64 __ss; - _STRUCT_ARM_NEON_STATE64 __ns; + _STRUCT_ARM_EXCEPTION_STATE64 __es; + _STRUCT_ARM_THREAD_STATE64 __ss; + _STRUCT_ARM_NEON_STATE64 __ns; }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_MCONTEXT64 struct mcontext64 +#define _STRUCT_MCONTEXT64 struct mcontext64 _STRUCT_MCONTEXT64 { - _STRUCT_ARM_EXCEPTION_STATE64 es; - _STRUCT_ARM_THREAD_STATE64 ss; - _STRUCT_ARM_NEON_STATE64 ns; + _STRUCT_ARM_EXCEPTION_STATE64 es; + _STRUCT_ARM_THREAD_STATE64 ss; + _STRUCT_ARM_NEON_STATE64 ns; }; #endif /* __DARWIN_UNIX03 */ #endif /* _STRUCT_MCONTEXT32 */ @@ -80,11 +80,11 @@ _STRUCT_MCONTEXT64 #ifndef _MCONTEXT_T #define _MCONTEXT_T #if defined(__arm64__) -typedef _STRUCT_MCONTEXT64 *mcontext_t; +typedef _STRUCT_MCONTEXT64 *mcontext_t; #define _STRUCT_MCONTEXT _STRUCT_MCONTEXT64 #else -typedef _STRUCT_MCONTEXT32 *mcontext_t; -#define _STRUCT_MCONTEXT _STRUCT_MCONTEXT32 +typedef _STRUCT_MCONTEXT32 *mcontext_t; +#define _STRUCT_MCONTEXT _STRUCT_MCONTEXT32 #endif #endif /* _MCONTEXT_T */ diff --git a/bsd/arm/_param.h b/bsd/arm/_param.h index 2d1e03a96..1a8787637 100644 --- a/bsd/arm/_param.h +++ b/bsd/arm/_param.h @@ -12,8 +12,8 @@ * data types (int, long, ...). The result is unsigned int and must be * cast to any desired pointer type. */ -#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) -#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) +#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) +#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) #define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1) #define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32) diff --git a/bsd/arm/_types.h b/bsd/arm/_types.h index d76d8a64b..6b98f12bf 100644 --- a/bsd/arm/_types.h +++ b/bsd/arm/_types.h @@ -1,8 +1,8 @@ /* * Copyright (c) 2000-2007 Apple Inc. All rights reserved. */ -#ifndef _BSD_ARM__TYPES_H_ -#define _BSD_ARM__TYPES_H_ +#ifndef _BSD_ARM__TYPES_H_ +#define _BSD_ARM__TYPES_H_ /* * This header file contains integer types. It's intended to also contain @@ -10,20 +10,20 @@ */ #ifdef __GNUC__ -typedef __signed char __int8_t; -#else /* !__GNUC__ */ -typedef char __int8_t; -#endif /* !__GNUC__ */ -typedef unsigned char __uint8_t; -typedef short __int16_t; -typedef unsigned short __uint16_t; -typedef int __int32_t; -typedef unsigned int __uint32_t; -typedef long long __int64_t; -typedef unsigned long long __uint64_t; +typedef __signed char __int8_t; +#else /* !__GNUC__ */ +typedef char __int8_t; +#endif /* !__GNUC__ */ +typedef unsigned char __uint8_t; +typedef short __int16_t; +typedef unsigned short __uint16_t; +typedef int __int32_t; +typedef unsigned int __uint32_t; +typedef long long __int64_t; +typedef unsigned long long __uint64_t; -typedef long __darwin_intptr_t; -typedef unsigned int __darwin_natural_t; +typedef long __darwin_intptr_t; +typedef unsigned int __darwin_natural_t; /* * The rune type below is declared to be an ``int'' instead of the more natural @@ -43,56 +43,56 @@ typedef unsigned int __darwin_natural_t; * character set plus one extra value (WEOF). wint_t must be at least 16 bits. */ -typedef int __darwin_ct_rune_t; /* ct_rune_t */ +typedef int __darwin_ct_rune_t; /* ct_rune_t */ /* * mbstate_t is an opaque object to keep conversion state, during multibyte * stream conversions. The content must not be referenced by user programs. */ typedef union { - char __mbstate8[128]; - long long _mbstateL; /* for alignment */ + char __mbstate8[128]; + long long _mbstateL; /* for alignment */ } __mbstate_t; -typedef __mbstate_t __darwin_mbstate_t; /* mbstate_t */ +typedef __mbstate_t __darwin_mbstate_t; /* mbstate_t */ #if defined(__PTRDIFF_TYPE__) -typedef __PTRDIFF_TYPE__ __darwin_ptrdiff_t; /* ptr1 - ptr2 */ +typedef __PTRDIFF_TYPE__ __darwin_ptrdiff_t; /* ptr1 - ptr2 */ #elif defined(__LP64__) -typedef long __darwin_ptrdiff_t; /* ptr1 - ptr2 */ +typedef long __darwin_ptrdiff_t; /* ptr1 - ptr2 */ #else -typedef int __darwin_ptrdiff_t; /* ptr1 - ptr2 */ +typedef int __darwin_ptrdiff_t; /* ptr1 - ptr2 */ #endif /* __GNUC__ */ #if defined(__SIZE_TYPE__) -typedef __SIZE_TYPE__ __darwin_size_t; /* sizeof() */ +typedef __SIZE_TYPE__ __darwin_size_t; /* sizeof() */ #else -typedef unsigned long __darwin_size_t; /* sizeof() */ +typedef unsigned long __darwin_size_t; /* sizeof() */ #endif #if (__GNUC__ > 2) -typedef __builtin_va_list __darwin_va_list; /* va_list */ +typedef __builtin_va_list __darwin_va_list; /* va_list */ #else -typedef void * __darwin_va_list; /* va_list */ +typedef void * __darwin_va_list; /* va_list */ #endif #if defined(__WCHAR_TYPE__) -typedef __WCHAR_TYPE__ __darwin_wchar_t; /* wchar_t */ +typedef __WCHAR_TYPE__ __darwin_wchar_t; /* wchar_t */ #else -typedef __darwin_ct_rune_t __darwin_wchar_t; /* wchar_t */ +typedef __darwin_ct_rune_t __darwin_wchar_t; /* wchar_t */ #endif -typedef __darwin_wchar_t __darwin_rune_t; /* rune_t */ +typedef __darwin_wchar_t __darwin_rune_t; /* rune_t */ #if defined(__WINT_TYPE__) -typedef __WINT_TYPE__ __darwin_wint_t; /* wint_t */ +typedef __WINT_TYPE__ __darwin_wint_t; /* wint_t */ #else -typedef __darwin_ct_rune_t __darwin_wint_t; /* wint_t */ +typedef __darwin_ct_rune_t __darwin_wint_t; /* wint_t */ #endif -typedef unsigned long __darwin_clock_t; /* clock() */ -typedef __uint32_t __darwin_socklen_t; /* socklen_t (duh) */ -typedef long __darwin_ssize_t; /* byte count or error */ -typedef long __darwin_time_t; /* time() */ +typedef unsigned long __darwin_clock_t; /* clock() */ +typedef __uint32_t __darwin_socklen_t; /* socklen_t (duh) */ +typedef long __darwin_ssize_t; /* byte count or error */ +typedef long __darwin_time_t; /* time() */ -#endif /* _BSD_ARM__TYPES_H_ */ +#endif /* _BSD_ARM__TYPES_H_ */ diff --git a/bsd/arm/disklabel.h b/bsd/arm/disklabel.h index 966f66d50..0bc7a994a 100644 --- a/bsd/arm/disklabel.h +++ b/bsd/arm/disklabel.h @@ -7,14 +7,14 @@ #include #ifdef __APPLE_API_OBSOLETE -#define LABELSECTOR (1024 / DEV_BSIZE) /* sector containing label */ -#define LABELOFFSET 0 /* offset of label in sector */ -#define MAXPARTITIONS 8 /* number of partitions */ -#define RAW_PART 2 /* raw partition: xx?c */ +#define LABELSECTOR (1024 / DEV_BSIZE) /* sector containing label */ +#define LABELOFFSET 0 /* offset of label in sector */ +#define MAXPARTITIONS 8 /* number of partitions */ +#define RAW_PART 2 /* raw partition: xx?c */ /* Just a dummy */ struct cpu_disklabel { - int cd_dummy; /* must have one element. */ + int cd_dummy; /* must have one element. */ }; #endif /* __APPLE_API_OBSOLETE */ diff --git a/bsd/arm/endian.h b/bsd/arm/endian.h index 6cd67268d..9a23f103e 100644 --- a/bsd/arm/endian.h +++ b/bsd/arm/endian.h @@ -40,7 +40,7 @@ */ #ifndef _ARM__ENDIAN_H_ -#define _ARM__ENDIAN_H_ +#define _ARM__ENDIAN_H_ #include /* @@ -58,19 +58,19 @@ * Definitions for byte order, according to byte significance from low * address to high. */ -#define __DARWIN_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */ -#define __DARWIN_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */ -#define __DARWIN_PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */ +#define __DARWIN_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */ +#define __DARWIN_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */ +#define __DARWIN_PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */ -#define __DARWIN_BYTE_ORDER __DARWIN_LITTLE_ENDIAN +#define __DARWIN_BYTE_ORDER __DARWIN_LITTLE_ENDIAN -#if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) +#if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN -#define BIG_ENDIAN __DARWIN_BIG_ENDIAN -#define PDP_ENDIAN __DARWIN_PDP_ENDIAN +#define LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN +#define BIG_ENDIAN __DARWIN_BIG_ENDIAN +#define PDP_ENDIAN __DARWIN_PDP_ENDIAN -#define BYTE_ORDER __DARWIN_BYTE_ORDER +#define BYTE_ORDER __DARWIN_BYTE_ORDER #include diff --git a/bsd/arm/exec.h b/bsd/arm/exec.h index e1266aacd..ed29b140f 100644 --- a/bsd/arm/exec.h +++ b/bsd/arm/exec.h @@ -42,15 +42,15 @@ #ifdef BSD_KERNEL_PRIVATE /* Size of a page in an object file. */ -#define __LDPGSZ 4096 +#define __LDPGSZ 4096 /* Valid magic number check. */ -#define N_BADMAG(ex) \ +#define N_BADMAG(ex) \ ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \ (ex).a_magic != ZMAGIC) /* Address of the bottom of the text segment. */ -#define N_TXTADDR(X) 0 +#define N_TXTADDR(X) 0 /* Address of the bottom of the data segment. */ #define N_DATADDR(ex) \ @@ -58,11 +58,11 @@ : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) /* Text segment offset. */ -#define N_TXTOFF(ex) \ +#define N_TXTOFF(ex) \ ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec)) /* Data segment offset. */ -#define N_DATOFF(ex) \ +#define N_DATOFF(ex) \ (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \ __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) @@ -72,23 +72,23 @@ (ex).a_drsize) /* String table offset. */ -#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms) +#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms) /* Description of the object file header (a.out format). */ struct exec { -#define OMAGIC 0407 /* old impure format */ -#define NMAGIC 0410 /* read-only text */ -#define ZMAGIC 0413 /* demand load format */ -#define QMAGIC 0314 /* demand load format. Header in text. */ - unsigned int a_magic; /* magic number */ +#define OMAGIC 0407 /* old impure format */ +#define NMAGIC 0410 /* read-only text */ +#define ZMAGIC 0413 /* demand load format */ +#define QMAGIC 0314 /* demand load format. Header in text. */ + unsigned int a_magic; /* magic number */ - unsigned int a_text; /* text segment size */ - unsigned int a_data; /* initialized data size */ - unsigned int a_bss; /* uninitialized data size */ - unsigned int a_syms; /* symbol table size */ - unsigned int a_entry; /* entry point */ - unsigned int a_trsize; /* text relocation size */ - unsigned int a_drsize; /* data relocation size */ + unsigned int a_text; /* text segment size */ + unsigned int a_data; /* initialized data size */ + unsigned int a_bss; /* uninitialized data size */ + unsigned int a_syms; /* symbol table size */ + unsigned int a_entry; /* entry point */ + unsigned int a_trsize; /* text relocation size */ + unsigned int a_drsize; /* data relocation size */ }; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/arm/fasttrap_isa.h b/bsd/arm/fasttrap_isa.h index e72118ebc..823ecc583 100644 --- a/bsd/arm/fasttrap_isa.h +++ b/bsd/arm/fasttrap_isa.h @@ -27,15 +27,15 @@ * Use is subject to license terms. */ -#ifndef _FASTTRAP_ISA_H -#define _FASTTRAP_ISA_H +#ifndef _FASTTRAP_ISA_H +#define _FASTTRAP_ISA_H /* #pragma ident "@(#)fasttrap_isa.h 1.4 05/06/08 SMI" */ #include #include -#ifdef __cplusplus +#ifdef __cplusplus extern "C" { #endif @@ -50,72 +50,72 @@ typedef union { typedef struct fasttrap_machtp { fasttrap_instr_t ftmt_instr; /* original instruction */ - uint8_t ftmt_fntype; /* One of the FASTTRAP_FN* constants defined below */ + uint8_t ftmt_fntype; /* One of the FASTTRAP_FN* constants defined below */ /* Once the tracepoint is initialized, fntype will be FN_DONE_INIT and thumb will be 0 for ARM, 1 for Thumb */ - uint8_t ftmt_thumb; + uint8_t ftmt_thumb; - uint8_t ftmt_type; - uint8_t ftmt_installed:1; - uint8_t ftmt_retired:1; + uint8_t ftmt_type; + uint8_t ftmt_installed:1; + uint8_t ftmt_retired:1; } fasttrap_machtp_t; -#define ftt_instr ftt_mtp.ftmt_instr.instr32 -#define ftt_instr1 ftt_mtp.ftmt_instr.instr16.instr1 -#define ftt_instr2 ftt_mtp.ftmt_instr.instr16.instr2 -#define ftt_fntype ftt_mtp.ftmt_fntype -#define ftt_thumb ftt_mtp.ftmt_thumb -#define ftt_type ftt_mtp.ftmt_type -#define ftt_installed ftt_mtp.ftmt_installed -#define ftt_retired ftt_mtp.ftmt_retired - -#define FASTTRAP_T_INV 1 -#define FASTTRAP_T_COMMON 2 -#define FASTTRAP_T_BLX 3 -#define FASTTRAP_T_B_COND 4 -#define FASTTRAP_T_B_UNCOND 5 -#define FASTTRAP_T_BX_REG 6 -#define FASTTRAP_T_PUSH_LR 7 -#define FASTTRAP_T_POP_PC 8 -#define FASTTRAP_T_STM_LR 9 -#define FASTTRAP_T_LDM_PC 10 -#define FASTTRAP_T_CPY_PC 11 -#define FASTTRAP_T_MOV_PC_REG 12 -#define FASTTRAP_T_LDR_PC_IMMED 13 -#define FASTTRAP_T_VLDR_PC_IMMED 14 -#define FASTTRAP_T_CB_N_Z 15 +#define ftt_instr ftt_mtp.ftmt_instr.instr32 +#define ftt_instr1 ftt_mtp.ftmt_instr.instr16.instr1 +#define ftt_instr2 ftt_mtp.ftmt_instr.instr16.instr2 +#define ftt_fntype ftt_mtp.ftmt_fntype +#define ftt_thumb ftt_mtp.ftmt_thumb +#define ftt_type ftt_mtp.ftmt_type +#define ftt_installed ftt_mtp.ftmt_installed +#define ftt_retired ftt_mtp.ftmt_retired + +#define FASTTRAP_T_INV 1 +#define FASTTRAP_T_COMMON 2 +#define FASTTRAP_T_BLX 3 +#define FASTTRAP_T_B_COND 4 +#define FASTTRAP_T_B_UNCOND 5 +#define FASTTRAP_T_BX_REG 6 +#define FASTTRAP_T_PUSH_LR 7 +#define FASTTRAP_T_POP_PC 8 +#define FASTTRAP_T_STM_LR 9 +#define FASTTRAP_T_LDM_PC 10 +#define FASTTRAP_T_CPY_PC 11 +#define FASTTRAP_T_MOV_PC_REG 12 +#define FASTTRAP_T_LDR_PC_IMMED 13 +#define FASTTRAP_T_VLDR_PC_IMMED 14 +#define FASTTRAP_T_CB_N_Z 15 #if defined(__arm64__) -#define FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY 16 /* stp fp, lr, [sp, #-16]! */ -#define FASTTRAP_T_ARM64_LDR_S_PC_REL 17 -#define FASTTRAP_T_ARM64_LDR_W_PC_REL 18 -#define FASTTRAP_T_ARM64_LDR_D_PC_REL 19 -#define FASTTRAP_T_ARM64_LDR_X_PC_REL 20 -#define FASTTRAP_T_ARM64_LDR_Q_PC_REL 21 -#define FASTTRAP_T_ARM64_LDRSW_PC_REL 22 -#define FASTTRAP_T_ARM64_B_COND 23 -#define FASTTRAP_T_ARM64_CBNZ_W 24 -#define FASTTRAP_T_ARM64_CBNZ_X 25 -#define FASTTRAP_T_ARM64_CBZ_W 26 -#define FASTTRAP_T_ARM64_CBZ_X 27 -#define FASTTRAP_T_ARM64_TBNZ 28 -#define FASTTRAP_T_ARM64_TBZ 29 -#define FASTTRAP_T_ARM64_B 30 -#define FASTTRAP_T_ARM64_BL 31 -#define FASTTRAP_T_ARM64_BLR 32 -#define FASTTRAP_T_ARM64_BR 33 -#define FASTTRAP_T_ARM64_RET 34 -#define FASTTRAP_T_ARM64_ADRP 35 -#define FASTTRAP_T_ARM64_ADR 36 -#define FASTTRAP_T_ARM64_PRFM 37 -#define FASTTRAP_T_ARM64_EXCLUSIVE_MEM 38 -#define FASTTRAP_T_ARM64_RETAB 39 +#define FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY 16 /* stp fp, lr, [sp, #-16]! */ +#define FASTTRAP_T_ARM64_LDR_S_PC_REL 17 +#define FASTTRAP_T_ARM64_LDR_W_PC_REL 18 +#define FASTTRAP_T_ARM64_LDR_D_PC_REL 19 +#define FASTTRAP_T_ARM64_LDR_X_PC_REL 20 +#define FASTTRAP_T_ARM64_LDR_Q_PC_REL 21 +#define FASTTRAP_T_ARM64_LDRSW_PC_REL 22 +#define FASTTRAP_T_ARM64_B_COND 23 +#define FASTTRAP_T_ARM64_CBNZ_W 24 +#define FASTTRAP_T_ARM64_CBNZ_X 25 +#define FASTTRAP_T_ARM64_CBZ_W 26 +#define FASTTRAP_T_ARM64_CBZ_X 27 +#define FASTTRAP_T_ARM64_TBNZ 28 +#define FASTTRAP_T_ARM64_TBZ 29 +#define FASTTRAP_T_ARM64_B 30 +#define FASTTRAP_T_ARM64_BL 31 +#define FASTTRAP_T_ARM64_BLR 32 +#define FASTTRAP_T_ARM64_BR 33 +#define FASTTRAP_T_ARM64_RET 34 +#define FASTTRAP_T_ARM64_ADRP 35 +#define FASTTRAP_T_ARM64_ADR 36 +#define FASTTRAP_T_ARM64_PRFM 37 +#define FASTTRAP_T_ARM64_EXCLUSIVE_MEM 38 +#define FASTTRAP_T_ARM64_RETAB 39 #endif -#if defined (__arm__) +#if defined (__arm__) #define FASTTRAP_ARM_INSTR 0xe7ffdefc #define FASTTRAP_THUMB_INSTR 0xdefc #define FASTTRAP_ARM_RET_INSTR 0xe7ffdefb #define FASTTRAP_THUMB_RET_INSTR 0xdefb - + #elif defined (__arm64__) #define FASTTRAP_ARM32_INSTR 0xe7ffdefc #define FASTTRAP_THUMB32_INSTR 0xdefc @@ -124,7 +124,7 @@ typedef struct fasttrap_machtp { #define FASTTRAP_ARM32_RET_INSTR 0xe7ffdefb #define FASTTRAP_THUMB32_RET_INSTR 0xdefb #define FASTTRAP_ARM64_RET_INSTR 0xe7eeee7d -#endif +#endif #define FASTTRAP_FN_DONE_INIT 255 #define FASTTRAP_FN_UNKNOWN 0 @@ -143,94 +143,94 @@ typedef struct fasttrap_machtp { #define THUMB16_HRM(x) (((x) >> 3) & 0xF) #define THUMB16_HRD(x) (((x) & 0x7) | ((((x) >> 4) & 0x8))) -#define THUMB32_RM(x,y) ((y) & 0xF) -#define THUMB32_RD(x,y) (((y) >> 8) & 0xF) -#define THUMB32_RT(x,y) (((y) >> 12) & 0xF) -#define THUMB32_RN(x,y) ((x) & 0xF) +#define THUMB32_RM(x, y) ((y) & 0xF) +#define THUMB32_RD(x, y) (((y) >> 8) & 0xF) +#define THUMB32_RT(x, y) (((y) >> 12) & 0xF) +#define THUMB32_RN(x, y) ((x) & 0xF) #define REG_SP 13 #define REG_LR 14 #define REG_PC 15 -#define FASTTRAP_RETURN_AFRAMES 6 -#define FASTTRAP_ENTRY_AFRAMES 5 -#define FASTTRAP_OFFSET_AFRAMES 5 +#define FASTTRAP_RETURN_AFRAMES 6 +#define FASTTRAP_ENTRY_AFRAMES 5 +#define FASTTRAP_OFFSET_AFRAMES 5 #if defined(__arm64__) -#define FASTTRAP_ARM64_OP_VALUE_FUNC_ENTRY 0xa9bf7bfd /* stp fp, lr, [sp, #-16]! */ +#define FASTTRAP_ARM64_OP_VALUE_FUNC_ENTRY 0xa9bf7bfd /* stp fp, lr, [sp, #-16]! */ -#define FASTTRAP_ARM64_OP_MASK_LDR_S_PC_REL 0xff000000 /* Bits to check for ldr St, label */ -#define FASTTRAP_ARM64_OP_VALUE_LDR_S_PC_REL 0x1c000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_LDR_S_PC_REL 0xff000000 /* Bits to check for ldr St, label */ +#define FASTTRAP_ARM64_OP_VALUE_LDR_S_PC_REL 0x1c000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_LDR_W_PC_REL 0xff000000 /* Bits to check for ldr Wt, label */ -#define FASTTRAP_ARM64_OP_VALUE_LDR_W_PC_REL 0x18000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_LDR_W_PC_REL 0xff000000 /* Bits to check for ldr Wt, label */ +#define FASTTRAP_ARM64_OP_VALUE_LDR_W_PC_REL 0x18000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_LDR_D_PC_REL 0xff000000 /* Bits to check for ldr Dt, label */ -#define FASTTRAP_ARM64_OP_VALUE_LDR_D_PC_REL 0x5c000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_LDR_D_PC_REL 0xff000000 /* Bits to check for ldr Dt, label */ +#define FASTTRAP_ARM64_OP_VALUE_LDR_D_PC_REL 0x5c000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_LDR_X_PC_REL 0xff000000 /* Bits to check for ldr Xt, label */ -#define FASTTRAP_ARM64_OP_VALUE_LDR_X_PC_REL 0x58000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_LDR_X_PC_REL 0xff000000 /* Bits to check for ldr Xt, label */ +#define FASTTRAP_ARM64_OP_VALUE_LDR_X_PC_REL 0x58000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_LDR_Q_PC_REL 0xff000000 /* Bits to check for ldr Qt, label */ -#define FASTTRAP_ARM64_OP_VALUE_LDR_Q_PC_REL 0x9c000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_LDR_Q_PC_REL 0xff000000 /* Bits to check for ldr Qt, label */ +#define FASTTRAP_ARM64_OP_VALUE_LDR_Q_PC_REL 0x9c000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_LRDSW_PC_REL 0xff000000 /* Bits to check for ldrsw , label */ -#define FASTTRAP_ARM64_OP_VALUE_LRDSW_PC_REL 0x98000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_LRDSW_PC_REL 0xff000000 /* Bits to check for ldrsw , label */ +#define FASTTRAP_ARM64_OP_VALUE_LRDSW_PC_REL 0x98000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_B_COND_PC_REL 0xff000010 /* Bits to check for b.cond label */ -#define FASTTRAP_ARM64_OP_VALUE_B_COND_PC_REL 0x54000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_B_COND_PC_REL 0xff000010 /* Bits to check for b.cond label */ +#define FASTTRAP_ARM64_OP_VALUE_B_COND_PC_REL 0x54000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_CBNZ_W_PC_REL 0xff000000 /* Bits to check for cbnz Wt, _label */ -#define FASTTRAP_ARM64_OP_VALUE_CBNZ_W_PC_REL 0x35000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_CBNZ_W_PC_REL 0xff000000 /* Bits to check for cbnz Wt, _label */ +#define FASTTRAP_ARM64_OP_VALUE_CBNZ_W_PC_REL 0x35000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_CBNZ_X_PC_REL 0xff000000 /* Bits to check for cbnz Xt, _label */ -#define FASTTRAP_ARM64_OP_VALUE_CBNZ_X_PC_REL 0xb5000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_CBNZ_X_PC_REL 0xff000000 /* Bits to check for cbnz Xt, _label */ +#define FASTTRAP_ARM64_OP_VALUE_CBNZ_X_PC_REL 0xb5000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_CBZ_W_PC_REL 0xff000000 /* Bits to check for cbz Wt, _label */ -#define FASTTRAP_ARM64_OP_VALUE_CBZ_W_PC_REL 0x34000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_CBZ_W_PC_REL 0xff000000 /* Bits to check for cbz Wt, _label */ +#define FASTTRAP_ARM64_OP_VALUE_CBZ_W_PC_REL 0x34000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_CBZ_X_PC_REL 0xff000000 /* Bits to check for cbz Xt, _label */ -#define FASTTRAP_ARM64_OP_VALUE_CBZ_X_PC_REL 0xb4000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_CBZ_X_PC_REL 0xff000000 /* Bits to check for cbz Xt, _label */ +#define FASTTRAP_ARM64_OP_VALUE_CBZ_X_PC_REL 0xb4000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_TBNZ_PC_REL 0x7f000000 /* Bits to check for tbnz Xt, _label */ -#define FASTTRAP_ARM64_OP_VALUE_TBNZ_PC_REL 0x37000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_TBNZ_PC_REL 0x7f000000 /* Bits to check for tbnz Xt, _label */ +#define FASTTRAP_ARM64_OP_VALUE_TBNZ_PC_REL 0x37000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_TBZ_PC_REL 0x7f000000 /* Bits to check for tbz Xt, _label */ -#define FASTTRAP_ARM64_OP_VALUE_TBZ_PC_REL 0x36000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_TBZ_PC_REL 0x7f000000 /* Bits to check for tbz Xt, _label */ +#define FASTTRAP_ARM64_OP_VALUE_TBZ_PC_REL 0x36000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_B_PC_REL 0xfc000000 /* Bits to check for b _label */ -#define FASTTRAP_ARM64_OP_VALUE_B_PC_REL 0x14000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_B_PC_REL 0xfc000000 /* Bits to check for b _label */ +#define FASTTRAP_ARM64_OP_VALUE_B_PC_REL 0x14000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_BL_PC_REL 0xfc000000 /* Bits to check for bl _label */ -#define FASTTRAP_ARM64_OP_VALUE_BL_PC_REL 0x94000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_BL_PC_REL 0xfc000000 /* Bits to check for bl _label */ +#define FASTTRAP_ARM64_OP_VALUE_BL_PC_REL 0x94000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_BLR 0xfffffe1f /* Bits to check for blr Xt */ -#define FASTTRAP_ARM64_OP_VALUE_BLR 0xd63f0000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_BLR 0xfffffe1f /* Bits to check for blr Xt */ +#define FASTTRAP_ARM64_OP_VALUE_BLR 0xd63f0000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_BR 0xfffffe1f /* Bits to check for br Xt */ -#define FASTTRAP_ARM64_OP_VALUE_BR 0xd61f0000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_BR 0xfffffe1f /* Bits to check for br Xt */ +#define FASTTRAP_ARM64_OP_VALUE_BR 0xd61f0000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_RET 0xfffffc1f /* Bits to check for ret Rt */ -#define FASTTRAP_ARM64_OP_VALUE_RET 0xd65f0000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_RET 0xfffffc1f /* Bits to check for ret Rt */ +#define FASTTRAP_ARM64_OP_VALUE_RET 0xd65f0000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_ADRP 0x9f000000 /* Bits to check for adrp Xt, label*/ -#define FASTTRAP_ARM64_OP_VALUE_ADRP 0x90000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_ADRP 0x9f000000 /* Bits to check for adrp Xt, label*/ +#define FASTTRAP_ARM64_OP_VALUE_ADRP 0x90000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_ADR 0x9f000000 /* Bits to check for adr Xt, label*/ -#define FASTTRAP_ARM64_OP_VALUE_ADR 0x10000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_ADR 0x9f000000 /* Bits to check for adr Xt, label*/ +#define FASTTRAP_ARM64_OP_VALUE_ADR 0x10000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_PRFM 0xff000000 /* Bits to check for adr Xt, label*/ -#define FASTTRAP_ARM64_OP_VALUE_PRFM 0xd8000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_PRFM 0xff000000 /* Bits to check for adr Xt, label*/ +#define FASTTRAP_ARM64_OP_VALUE_PRFM 0xd8000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_EXCL_MEM 0x3f000000 /* Bits to check for exclusive memory operation */ -#define FASTTRAP_ARM64_OP_VALUE_EXCL_MEM 0x08000000 /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_EXCL_MEM 0x3f000000 /* Bits to check for exclusive memory operation */ +#define FASTTRAP_ARM64_OP_VALUE_EXCL_MEM 0x08000000 /* Value to find */ -#define FASTTRAP_ARM64_OP_MASK_RETAB 0xfffffc1f /* Bits to check for retab Rt */ -#define FASTTRAP_ARM64_OP_VALUE_RETAB 0xd65f0c1f /* Value to find */ +#define FASTTRAP_ARM64_OP_MASK_RETAB 0xfffffc1f /* Bits to check for retab Rt */ +#define FASTTRAP_ARM64_OP_VALUE_RETAB 0xd65f0c1f /* Value to find */ #endif /* defined(__arm64__) */ -#ifdef __cplusplus +#ifdef __cplusplus } #endif -#endif /* _FASTTRAP_ISA_H */ +#endif /* _FASTTRAP_ISA_H */ diff --git a/bsd/arm/limits.h b/bsd/arm/limits.h index 32c8033b9..1223d3ce0 100644 --- a/bsd/arm/limits.h +++ b/bsd/arm/limits.h @@ -42,11 +42,11 @@ #include #include -#define CHAR_BIT 8 /* number of bits in a char */ -#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */ +#define CHAR_BIT 8 /* number of bits in a char */ +#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */ #if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define CLK_TCK __DARWIN_CLK_TCK /* ticks per second */ +#define CLK_TCK __DARWIN_CLK_TCK /* ticks per second */ #endif /* !_ANSI_SOURCE && (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* @@ -59,50 +59,50 @@ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values * are written as hex so that GCC will be quiet about large integer constants. */ -#define SCHAR_MAX 127 /* min value for a signed char */ -#define SCHAR_MIN (-128) /* max value for a signed char */ +#define SCHAR_MAX 127 /* min value for a signed char */ +#define SCHAR_MIN (-128) /* max value for a signed char */ -#define UCHAR_MAX 255 /* max value for an unsigned char */ -#define CHAR_MAX 127 /* max value for a char */ -#define CHAR_MIN (-128) /* min value for a char */ +#define UCHAR_MAX 255 /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ -#define USHRT_MAX 65535 /* max value for an unsigned short */ -#define SHRT_MAX 32767 /* max value for a short */ -#define SHRT_MIN (-32768) /* min value for a short */ +#define USHRT_MAX 65535 /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ -#define UINT_MAX 0xffffffff /* max value for an unsigned int */ -#define INT_MAX 2147483647 /* max value for an int */ -#define INT_MIN (-2147483647-1) /* min value for an int */ +#define UINT_MAX 0xffffffff /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ #ifdef __LP64__ -#define ULONG_MAX 0xffffffffffffffffUL /* max unsigned long */ -#define LONG_MAX 0x7fffffffffffffffL /* max signed long */ -#define LONG_MIN (-0x7fffffffffffffffL-1) /* min signed long */ +#define ULONG_MAX 0xffffffffffffffffUL /* max unsigned long */ +#define LONG_MAX 0x7fffffffffffffffL /* max signed long */ +#define LONG_MIN (-0x7fffffffffffffffL-1) /* min signed long */ #else /* !__LP64__ */ -#define ULONG_MAX 0xffffffffUL /* max unsigned long */ -#define LONG_MAX 2147483647L /* max signed long */ -#define LONG_MIN (-2147483647L-1) /* min signed long */ +#define ULONG_MAX 0xffffffffUL /* max unsigned long */ +#define LONG_MAX 2147483647L /* max signed long */ +#define LONG_MIN (-2147483647L-1) /* min signed long */ #endif /* __LP64__ */ -#define ULLONG_MAX 0xffffffffffffffffULL /* max unsigned long long */ -#define LLONG_MAX 0x7fffffffffffffffLL /* max signed long long */ -#define LLONG_MIN (-0x7fffffffffffffffLL-1) /* min signed long long */ +#define ULLONG_MAX 0xffffffffffffffffULL /* max unsigned long long */ +#define LLONG_MAX 0x7fffffffffffffffLL /* max signed long long */ +#define LLONG_MIN (-0x7fffffffffffffffLL-1) /* min signed long long */ #if !defined(_ANSI_SOURCE) #ifdef __LP64__ -#define LONG_BIT 64 +#define LONG_BIT 64 #else /* !__LP64__ */ -#define LONG_BIT 32 +#define LONG_BIT 32 #endif /* __LP64__ */ -#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */ -#define WORD_BIT 32 +#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */ +#define WORD_BIT 32 #if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE) -#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */ +#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */ -#define UQUAD_MAX ULLONG_MAX -#define QUAD_MAX LLONG_MAX -#define QUAD_MIN LLONG_MIN +#define UQUAD_MAX ULLONG_MAX +#define QUAD_MAX LLONG_MAX +#define QUAD_MIN LLONG_MIN #endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */ #endif /* !_ANSI_SOURCE */ diff --git a/bsd/arm/param.h b/bsd/arm/param.h index 538e53418..3f7d2d89c 100644 --- a/bsd/arm/param.h +++ b/bsd/arm/param.h @@ -55,20 +55,20 @@ * data types (int, long, ...). The result is unsigned int and must be * cast to any desired pointer type. */ -#define ALIGNBYTES __DARWIN_ALIGNBYTES -#define ALIGN(p) __DARWIN_ALIGN(p) +#define ALIGNBYTES __DARWIN_ALIGNBYTES +#define ALIGN(p) __DARWIN_ALIGN(p) -#define NBPG 4096 /* bytes/page */ -#define PGOFSET (NBPG-1) /* byte offset into page */ -#define PGSHIFT 12 /* LOG2(NBPG) */ +#define NBPG 4096 /* bytes/page */ +#define PGOFSET (NBPG-1) /* byte offset into page */ +#define PGSHIFT 12 /* LOG2(NBPG) */ -#define DEV_BSIZE 512 -#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ -#define BLKDEV_IOSIZE 2048 -#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */ +#define DEV_BSIZE 512 +#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ +#define BLKDEV_IOSIZE 2048 +#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */ -#define CLSIZE 1 -#define CLSIZELOG2 0 +#define CLSIZE 1 +#define CLSIZELOG2 0 /* * Constants related to network buffer management. @@ -77,47 +77,47 @@ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple * of the hardware page size. */ -#define MSIZESHIFT 8 /* 256 */ -#define MSIZE (1 << MSIZESHIFT) /* size of an mbuf */ -#define MCLSHIFT 11 /* 2048 */ -#define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */ -#define MBIGCLSHIFT 12 /* 4096 */ -#define MBIGCLBYTES (1 << MBIGCLSHIFT) /* size of a big cluster */ -#define M16KCLSHIFT 14 /* 16384 */ -#define M16KCLBYTES (1 << M16KCLSHIFT) /* size of a jumbo cluster */ - -#define MCLOFSET (MCLBYTES - 1) +#define MSIZESHIFT 8 /* 256 */ +#define MSIZE (1 << MSIZESHIFT) /* size of an mbuf */ +#define MCLSHIFT 11 /* 2048 */ +#define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */ +#define MBIGCLSHIFT 12 /* 4096 */ +#define MBIGCLBYTES (1 << MBIGCLSHIFT) /* size of a big cluster */ +#define M16KCLSHIFT 14 /* 16384 */ +#define M16KCLBYTES (1 << M16KCLSHIFT) /* size of a jumbo cluster */ + +#define MCLOFSET (MCLBYTES - 1) #ifndef NMBCLUSTERS -#define NMBCLUSTERS CONFIG_NMBCLUSTERS /* cl map size */ +#define NMBCLUSTERS CONFIG_NMBCLUSTERS /* cl map size */ #endif /* * Some macros for units conversion */ /* Core clicks (NeXT_page_size bytes) to segments and vice versa */ -#define ctos(x) (x) -#define stoc(x) (x) +#define ctos(x) (x) +#define stoc(x) (x) /* Core clicks (4096 bytes) to disk blocks */ -#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT)) -#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT)) -#define dtob(x) ((x)<>(PGSHIFT-DEV_BSHIFT)) +#define dtob(x) ((x)<>PGSHIFT) +#define btoc(x) (((unsigned)(x)+(NBPG-1))>>PGSHIFT) #ifdef __APPLE__ #define btodb(bytes, devBlockSize) \ - ((unsigned)(bytes) / devBlockSize) + ((unsigned)(bytes) / devBlockSize) #define dbtob(db, devBlockSize) \ - ((unsigned)(db) * devBlockSize) + ((unsigned)(db) * devBlockSize) #else -#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ +#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ ((unsigned)(bytes) >> DEV_BSHIFT) -#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ +#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((unsigned)(db) << DEV_BSHIFT) #endif @@ -127,21 +127,21 @@ * add an entry to cdevsw/bdevsw for that purpose. * For now though just use DEV_BSIZE. */ -#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) +#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) /* * Macros to decode (and encode) processor status word. */ -#define STATUS_WORD(rpl, ipl) (((ipl) << 8) | (rpl)) -#define USERMODE(x) (((x) & 3) == 3) -#define BASEPRI(x) (((x) & (255 << 8)) == 0) +#define STATUS_WORD(rpl, ipl) (((ipl) << 8) | (rpl)) +#define USERMODE(x) (((x) & 3) == 3) +#define BASEPRI(x) (((x) & (255 << 8)) == 0) -#if defined(KERNEL) || defined(STANDALONE) -#define DELAY(n) delay(n) +#if defined(KERNEL) || defined(STANDALONE) +#define DELAY(n) delay(n) -#else /* defined(KERNEL) || defined(STANDALONE) */ -#define DELAY(n) { int N = (n); while (--N > 0); } -#endif /* defined(KERNEL) || defined(STANDALONE) */ +#else /* defined(KERNEL) || defined(STANDALONE) */ +#define DELAY(n) { int N = (n); while (--N > 0); } +#endif /* defined(KERNEL) || defined(STANDALONE) */ #endif /* _ARM_PARAM_H_ */ diff --git a/bsd/arm/profile.h b/bsd/arm/profile.h index 728d3f99b..f5b994842 100644 --- a/bsd/arm/profile.h +++ b/bsd/arm/profile.h @@ -23,8 +23,8 @@ #warning MCOUNT_* not implemented yet. #define MCOUNT_INIT -#define MCOUNT_ENTER /* s = splhigh(); */ /* XXX TODO */ -#define MCOUNT_EXIT /* (void) splx(s); */ /* XXX TODO */ +#define MCOUNT_ENTER /* s = splhigh(); */ /* XXX TODO */ +#define MCOUNT_EXIT /* (void) splx(s); */ /* XXX TODO */ #endif /* __APPLE_API_UNSTABLE */ #endif /* KERNEL */ diff --git a/bsd/arm/psl.h b/bsd/arm/psl.h index 313ba2d20..b0ddca0dd 100644 --- a/bsd/arm/psl.h +++ b/bsd/arm/psl.h @@ -5,12 +5,12 @@ * Copyright (c) 1992 NeXT Computer, Inc. * */ - -#if KERNEL_PRIVATE + +#if KERNEL_PRIVATE #ifndef _BSD_ARM_PSL_H_ #define _BSD_ARM_PSL_H_ - -#endif /* _BSD_ARM_PSL_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* _BSD_ARM_PSL_H_ */ + +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/arm/reboot.h b/bsd/arm/reboot.h index 5d47728b4..0bb3b5aec 100644 --- a/bsd/arm/reboot.h +++ b/bsd/arm/reboot.h @@ -1,32 +1,32 @@ /* * Copyright (c) 2000-2007 Apple Inc. All rights reserved. */ - -#ifndef _BSD_ARM_REBOOT_H_ + +#ifndef _BSD_ARM_REBOOT_H_ #define _BSD_ARM_REBOOT_H_ /* * Empty file (publicly) */ - + #include -#ifdef BSD_KERNEL_PRIVATE +#ifdef BSD_KERNEL_PRIVATE /* * Use most significant 16 bits to avoid collisions with * machine independent flags. */ -#define RB_POWERDOWN 0x00010000 /* power down on halt */ -#define RB_NOBOOTRC 0x00020000 /* don't run '/etc/rc.boot' */ -#define RB_DEBUG 0x00040000 /* drop into mini monitor on panic */ -#define RB_EJECT 0x00080000 /* eject disks on halt */ -#define RB_COMMAND 0x00100000 /* new boot command specified */ -#define RB_NOFP 0x00200000 /* don't use floating point */ -#define RB_BOOTNEXT 0x00400000 /* reboot into NeXT */ -#define RB_BOOTDOS 0x00800000 /* reboot into DOS */ -#define RB_PRETTY 0x01000000 /* shutdown with pretty graphics */ +#define RB_POWERDOWN 0x00010000 /* power down on halt */ +#define RB_NOBOOTRC 0x00020000 /* don't run '/etc/rc.boot' */ +#define RB_DEBUG 0x00040000 /* drop into mini monitor on panic */ +#define RB_EJECT 0x00080000 /* eject disks on halt */ +#define RB_COMMAND 0x00100000 /* new boot command specified */ +#define RB_NOFP 0x00200000 /* don't use floating point */ +#define RB_BOOTNEXT 0x00400000 /* reboot into NeXT */ +#define RB_BOOTDOS 0x00800000 /* reboot into DOS */ +#define RB_PRETTY 0x01000000 /* shutdown with pretty graphics */ -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ -#endif /* _BSD_ARM_REBOOT_H_ */ +#endif /* _BSD_ARM_REBOOT_H_ */ diff --git a/bsd/arm/reg.h b/bsd/arm/reg.h index bffce0700..353887b2e 100644 --- a/bsd/arm/reg.h +++ b/bsd/arm/reg.h @@ -5,12 +5,12 @@ * Copyright (c) 1992 NeXT Computer, Inc. * */ - -#ifdef KERNEL_PRIVATE + +#ifdef KERNEL_PRIVATE #ifndef _BSD_ARM_REG_H_ #define _BSD_ARM_REG_H_ -#endif /* _BSD_ARM_REG_H_ */ +#endif /* _BSD_ARM_REG_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/arm/signal.h b/bsd/arm/signal.h index e6ed0e24c..bf19eded2 100644 --- a/bsd/arm/signal.h +++ b/bsd/arm/signal.h @@ -6,14 +6,13 @@ * */ -#ifndef _ARM_SIGNAL_ -#define _ARM_SIGNAL_ 1 +#ifndef _ARM_SIGNAL_ +#define _ARM_SIGNAL_ 1 #include #ifndef _ANSI_SOURCE -typedef int sig_atomic_t; +typedef int sig_atomic_t; #endif /* ! _ANSI_SOURCE */ -#endif /* _ARM_SIGNAL_ */ - +#endif /* _ARM_SIGNAL_ */ diff --git a/bsd/arm/types.h b/bsd/arm/types.h index e84405b12..f98ca5b84 100644 --- a/bsd/arm/types.h +++ b/bsd/arm/types.h @@ -39,8 +39,8 @@ * @(#)types.h 8.3 (Berkeley) 1/5/94 */ -#ifndef _MACHTYPES_H_ -#define _MACHTYPES_H_ +#ifndef _MACHTYPES_H_ +#define _MACHTYPES_H_ #ifndef __ASSEMBLER__ #include @@ -60,9 +60,9 @@ #include #if __LP64__ -typedef int64_t register_t; +typedef int64_t register_t; #else -typedef int32_t register_t; +typedef int32_t register_t; #endif #include @@ -71,24 +71,24 @@ typedef int32_t register_t; #if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) /* These types are used for reserving the largest possible size. */ #ifdef __arm64__ -typedef u_int64_t user_addr_t; -typedef u_int64_t user_size_t; -typedef int64_t user_ssize_t; -typedef int64_t user_long_t; -typedef u_int64_t user_ulong_t; -typedef int64_t user_time_t; -typedef int64_t user_off_t; +typedef u_int64_t user_addr_t; +typedef u_int64_t user_size_t; +typedef int64_t user_ssize_t; +typedef int64_t user_long_t; +typedef u_int64_t user_ulong_t; +typedef int64_t user_time_t; +typedef int64_t user_off_t; #else -typedef u_int32_t user_addr_t; -typedef u_int32_t user_size_t; -typedef int32_t user_ssize_t; -typedef int32_t user_long_t; -typedef u_int32_t user_ulong_t; -typedef int32_t user_time_t; -typedef int64_t user_off_t; +typedef u_int32_t user_addr_t; +typedef u_int32_t user_size_t; +typedef int32_t user_ssize_t; +typedef int32_t user_long_t; +typedef u_int32_t user_ulong_t; +typedef int32_t user_time_t; +typedef int64_t user_off_t; #endif -#define USER_ADDR_NULL ((user_addr_t) 0) +#define USER_ADDR_NULL ((user_addr_t) 0) #define CAST_USER_ADDR_T(a_ptr) ((user_addr_t)((uintptr_t)(a_ptr))) #ifdef KERNEL @@ -114,20 +114,20 @@ typedef int64_t user_off_t; * of 4, even for 8-byte quantites. */ -typedef __uint64_t user64_addr_t; -typedef __uint64_t user64_size_t; -typedef __int64_t user64_ssize_t; -typedef __int64_t user64_long_t; -typedef __uint64_t user64_ulong_t; -typedef __int64_t user64_time_t; -typedef __int64_t user64_off_t; - -typedef __uint32_t user32_addr_t; -typedef __uint32_t user32_size_t; -typedef __int32_t user32_ssize_t; -typedef __int32_t user32_long_t; -typedef __uint32_t user32_ulong_t; -typedef __int32_t user32_time_t; +typedef __uint64_t user64_addr_t; +typedef __uint64_t user64_size_t; +typedef __int64_t user64_ssize_t; +typedef __int64_t user64_long_t; +typedef __uint64_t user64_ulong_t; +typedef __int64_t user64_time_t; +typedef __int64_t user64_off_t; + +typedef __uint32_t user32_addr_t; +typedef __uint32_t user32_size_t; +typedef __int32_t user32_ssize_t; +typedef __int32_t user32_long_t; +typedef __uint32_t user32_ulong_t; +typedef __int32_t user32_time_t; /* * This alignment is required to ensure symmetry between userspace and kernelspace @@ -136,9 +136,9 @@ typedef __int32_t user32_time_t; * ABI so this alignment isn't needed for ARM. */ #if defined(__x86_64__) -typedef __int64_t user32_off_t __attribute__((aligned(4))); +typedef __int64_t user32_off_t __attribute__((aligned(4))); #else -typedef __int64_t user32_off_t; +typedef __int64_t user32_off_t; #endif #endif /* KERNEL */ @@ -147,12 +147,12 @@ typedef __int64_t user32_off_t; /* This defines the size of syscall arguments after copying into the kernel: */ #if defined(__arm__) -typedef u_int32_t syscall_arg_t; +typedef u_int32_t syscall_arg_t; #elif defined(__arm64__) -typedef u_int64_t syscall_arg_t; +typedef u_int64_t syscall_arg_t; #else #error Unknown architecture. -#endif +#endif #endif /* __ASSEMBLER__ */ -#endif /* _MACHTYPES_H_ */ +#endif /* _MACHTYPES_H_ */ diff --git a/bsd/arm/vmparam.h b/bsd/arm/vmparam.h index dbee6526d..cfa45d66e 100644 --- a/bsd/arm/vmparam.h +++ b/bsd/arm/vmparam.h @@ -2,34 +2,34 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. */ -#ifndef _BSD_ARM_VMPARAM_H_ -#define _BSD_ARM_VMPARAM_H_ 1 +#ifndef _BSD_ARM_VMPARAM_H_ +#define _BSD_ARM_VMPARAM_H_ 1 #include -#define USRSTACK (0x27E00000) /* ASLR slides stack down by up to 1MB */ -#define USRSTACK64 (0x000000016FE00000ULL) +#define USRSTACK (0x27E00000) /* ASLR slides stack down by up to 1MB */ +#define USRSTACK64 (0x000000016FE00000ULL) /* * Virtual memory related constants, all in bytes */ #ifndef DFLDSIZ -#define DFLDSIZ (RLIM_INFINITY) /* initial data size limit */ +#define DFLDSIZ (RLIM_INFINITY) /* initial data size limit */ #endif #ifndef MAXDSIZ -#define MAXDSIZ (RLIM_INFINITY) /* max data size */ +#define MAXDSIZ (RLIM_INFINITY) /* max data size */ #endif -#ifndef DFLSSIZ -#define DFLSSIZ (1024*1024 - 16*1024) /* initial stack size limit */ +#ifndef DFLSSIZ +#define DFLSSIZ (1024*1024 - 16*1024) /* initial stack size limit */ #endif -#ifndef MAXSSIZ -#define MAXSSIZ (1024*1024) /* max stack size */ +#ifndef MAXSSIZ +#define MAXSSIZ (1024*1024) /* max stack size */ #endif -#ifndef DFLCSIZ -#define DFLCSIZ (0) /* initial core size limit */ +#ifndef DFLCSIZ +#define DFLCSIZ (0) /* initial core size limit */ #endif -#ifndef MAXCSIZ -#define MAXCSIZ (RLIM_INFINITY) /* max core size */ -#endif /* MAXCSIZ */ +#ifndef MAXCSIZ +#define MAXCSIZ (RLIM_INFINITY) /* max core size */ +#endif /* MAXCSIZ */ -#endif /* _BSD_ARM_VMPARAM_H_ */ +#endif /* _BSD_ARM_VMPARAM_H_ */ diff --git a/bsd/bsm/audit.h b/bsd/bsm/audit.h index 1f6b2476e..2bac16e91 100644 --- a/bsd/bsm/audit.h +++ b/bsd/bsm/audit.h @@ -29,17 +29,17 @@ * $P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit.h#10 $ */ -#ifndef _BSM_AUDIT_H -#define _BSM_AUDIT_H +#ifndef _BSM_AUDIT_H +#define _BSM_AUDIT_H #include #include -#define AUDIT_RECORD_MAGIC 0x828a0f1b -#define MAX_AUDIT_RECORDS 20 -#define MAXAUDITDATA (0x8000 - 1) -#define MAX_AUDIT_RECORD_SIZE MAXAUDITDATA -#define MIN_AUDIT_FILE_SIZE (512 * 1024) +#define AUDIT_RECORD_MAGIC 0x828a0f1b +#define MAX_AUDIT_RECORDS 20 +#define MAXAUDITDATA (0x8000 - 1) +#define MAX_AUDIT_RECORD_SIZE MAXAUDITDATA +#define MIN_AUDIT_FILE_SIZE (512 * 1024) /* * Minimum noumber of free blocks on the filesystem containing the audit @@ -47,129 +47,129 @@ * as the kernel does an unsigned compare, plus we want to leave a few blocks * free so userspace can terminate the log, etc. */ -#define AUDIT_HARD_LIMIT_FREE_BLOCKS 4 +#define AUDIT_HARD_LIMIT_FREE_BLOCKS 4 /* * Triggers for the audit daemon. */ -#define AUDIT_TRIGGER_MIN 1 -#define AUDIT_TRIGGER_LOW_SPACE 1 /* Below low watermark. */ -#define AUDIT_TRIGGER_ROTATE_KERNEL 2 /* Kernel requests rotate. */ -#define AUDIT_TRIGGER_READ_FILE 3 /* Re-read config file. */ -#define AUDIT_TRIGGER_CLOSE_AND_DIE 4 /* Terminate audit. */ -#define AUDIT_TRIGGER_NO_SPACE 5 /* Below min free space. */ -#define AUDIT_TRIGGER_ROTATE_USER 6 /* User requests rotate. */ -#define AUDIT_TRIGGER_INITIALIZE 7 /* User initialize of auditd. */ -#define AUDIT_TRIGGER_EXPIRE_TRAILS 8 /* User expiration of trails. */ -#define AUDIT_TRIGGER_MAX 8 +#define AUDIT_TRIGGER_MIN 1 +#define AUDIT_TRIGGER_LOW_SPACE 1 /* Below low watermark. */ +#define AUDIT_TRIGGER_ROTATE_KERNEL 2 /* Kernel requests rotate. */ +#define AUDIT_TRIGGER_READ_FILE 3 /* Re-read config file. */ +#define AUDIT_TRIGGER_CLOSE_AND_DIE 4 /* Terminate audit. */ +#define AUDIT_TRIGGER_NO_SPACE 5 /* Below min free space. */ +#define AUDIT_TRIGGER_ROTATE_USER 6 /* User requests rotate. */ +#define AUDIT_TRIGGER_INITIALIZE 7 /* User initialize of auditd. */ +#define AUDIT_TRIGGER_EXPIRE_TRAILS 8 /* User expiration of trails. */ +#define AUDIT_TRIGGER_MAX 8 /* * The special device filename (FreeBSD). */ -#define AUDITDEV_FILENAME "audit" -#define AUDIT_TRIGGER_FILE ("/dev/" AUDITDEV_FILENAME) +#define AUDITDEV_FILENAME "audit" +#define AUDIT_TRIGGER_FILE ("/dev/" AUDITDEV_FILENAME) /* * Pre-defined audit IDs */ -#define AU_DEFAUDITID (uid_t)(-1) -#define AU_DEFAUDITSID 0 -#define AU_ASSIGN_ASID -1 +#define AU_DEFAUDITID (uid_t)(-1) +#define AU_DEFAUDITSID 0 +#define AU_ASSIGN_ASID -1 /* * IPC types. */ -#define AT_IPC_MSG ((unsigned char)1) /* Message IPC id. */ -#define AT_IPC_SEM ((unsigned char)2) /* Semaphore IPC id. */ -#define AT_IPC_SHM ((unsigned char)3) /* Shared mem IPC id. */ +#define AT_IPC_MSG ((unsigned char)1) /* Message IPC id. */ +#define AT_IPC_SEM ((unsigned char)2) /* Semaphore IPC id. */ +#define AT_IPC_SHM ((unsigned char)3) /* Shared mem IPC id. */ /* * Audit conditions. */ -#define AUC_UNSET 0 -#define AUC_AUDITING 1 -#define AUC_NOAUDIT 2 -#define AUC_DISABLED -1 +#define AUC_UNSET 0 +#define AUC_AUDITING 1 +#define AUC_NOAUDIT 2 +#define AUC_DISABLED -1 /* * auditon(2) commands. */ -#define A_OLDGETPOLICY 2 -#define A_OLDSETPOLICY 3 -#define A_GETKMASK 4 -#define A_SETKMASK 5 -#define A_OLDGETQCTRL 6 -#define A_OLDSETQCTRL 7 -#define A_GETCWD 8 -#define A_GETCAR 9 -#define A_GETSTAT 12 -#define A_SETSTAT 13 -#define A_SETUMASK 14 -#define A_SETSMASK 15 -#define A_OLDGETCOND 20 -#define A_OLDSETCOND 21 -#define A_GETCLASS 22 -#define A_SETCLASS 23 -#define A_GETPINFO 24 -#define A_SETPMASK 25 -#define A_SETFSIZE 26 -#define A_GETFSIZE 27 -#define A_GETPINFO_ADDR 28 -#define A_GETKAUDIT 29 -#define A_SETKAUDIT 30 -#define A_SENDTRIGGER 31 -#define A_GETSINFO_ADDR 32 -#define A_GETPOLICY 33 -#define A_SETPOLICY 34 -#define A_GETQCTRL 35 -#define A_SETQCTRL 36 -#define A_GETCOND 37 -#define A_SETCOND 38 -#define A_GETSFLAGS 39 -#define A_SETSFLAGS 40 -#define A_GETCTLMODE 41 -#define A_SETCTLMODE 42 -#define A_GETEXPAFTER 43 -#define A_SETEXPAFTER 44 +#define A_OLDGETPOLICY 2 +#define A_OLDSETPOLICY 3 +#define A_GETKMASK 4 +#define A_SETKMASK 5 +#define A_OLDGETQCTRL 6 +#define A_OLDSETQCTRL 7 +#define A_GETCWD 8 +#define A_GETCAR 9 +#define A_GETSTAT 12 +#define A_SETSTAT 13 +#define A_SETUMASK 14 +#define A_SETSMASK 15 +#define A_OLDGETCOND 20 +#define A_OLDSETCOND 21 +#define A_GETCLASS 22 +#define A_SETCLASS 23 +#define A_GETPINFO 24 +#define A_SETPMASK 25 +#define A_SETFSIZE 26 +#define A_GETFSIZE 27 +#define A_GETPINFO_ADDR 28 +#define A_GETKAUDIT 29 +#define A_SETKAUDIT 30 +#define A_SENDTRIGGER 31 +#define A_GETSINFO_ADDR 32 +#define A_GETPOLICY 33 +#define A_SETPOLICY 34 +#define A_GETQCTRL 35 +#define A_SETQCTRL 36 +#define A_GETCOND 37 +#define A_SETCOND 38 +#define A_GETSFLAGS 39 +#define A_SETSFLAGS 40 +#define A_GETCTLMODE 41 +#define A_SETCTLMODE 42 +#define A_GETEXPAFTER 43 +#define A_SETEXPAFTER 44 /* * Audit policy controls. */ -#define AUDIT_CNT 0x0001 -#define AUDIT_AHLT 0x0002 -#define AUDIT_ARGV 0x0004 -#define AUDIT_ARGE 0x0008 -#define AUDIT_SEQ 0x0010 -#define AUDIT_WINDATA 0x0020 -#define AUDIT_USER 0x0040 -#define AUDIT_GROUP 0x0080 -#define AUDIT_TRAIL 0x0100 -#define AUDIT_PATH 0x0200 -#define AUDIT_SCNT 0x0400 -#define AUDIT_PUBLIC 0x0800 -#define AUDIT_ZONENAME 0x1000 -#define AUDIT_PERZONE 0x2000 +#define AUDIT_CNT 0x0001 +#define AUDIT_AHLT 0x0002 +#define AUDIT_ARGV 0x0004 +#define AUDIT_ARGE 0x0008 +#define AUDIT_SEQ 0x0010 +#define AUDIT_WINDATA 0x0020 +#define AUDIT_USER 0x0040 +#define AUDIT_GROUP 0x0080 +#define AUDIT_TRAIL 0x0100 +#define AUDIT_PATH 0x0200 +#define AUDIT_SCNT 0x0400 +#define AUDIT_PUBLIC 0x0800 +#define AUDIT_ZONENAME 0x1000 +#define AUDIT_PERZONE 0x2000 /* * Default audit queue control parameters. */ -#define AQ_HIWATER 100 -#define AQ_MAXHIGH 10000 -#define AQ_LOWATER 10 -#define AQ_BUFSZ MAXAUDITDATA -#define AQ_MAXBUFSZ 1048576 +#define AQ_HIWATER 100 +#define AQ_MAXHIGH 10000 +#define AQ_LOWATER 10 +#define AQ_BUFSZ MAXAUDITDATA +#define AQ_MAXBUFSZ 1048576 /* * Default minimum percentage free space on file system. */ -#define AU_FS_MINFREE 20 +#define AU_FS_MINFREE 20 /* * Type definitions used indicating the length of variable length addresses * in tokens containing addresses, such as header fields. */ -#define AU_IPv4 4 -#define AU_IPv6 16 +#define AU_IPv4 4 +#define AU_IPv6 16 /* * Reserved audit class mask indicating which classes are unable to have @@ -191,78 +191,78 @@ __BEGIN_DECLS -typedef uid_t au_id_t; -typedef pid_t au_asid_t; -typedef u_int16_t au_event_t; -typedef u_int16_t au_emod_t; -typedef u_int32_t au_class_t; -typedef u_int64_t au_asflgs_t __attribute__ ((aligned (8))); -typedef unsigned char au_ctlmode_t; +typedef uid_t au_id_t; +typedef pid_t au_asid_t; +typedef u_int16_t au_event_t; +typedef u_int16_t au_emod_t; +typedef u_int32_t au_class_t; +typedef u_int64_t au_asflgs_t __attribute__ ((aligned(8))); +typedef unsigned char au_ctlmode_t; struct au_tid { - dev_t port; - u_int32_t machine; + dev_t port; + u_int32_t machine; }; -typedef struct au_tid au_tid_t; +typedef struct au_tid au_tid_t; struct au_tid_addr { - dev_t at_port; - u_int32_t at_type; - u_int32_t at_addr[4]; + dev_t at_port; + u_int32_t at_type; + u_int32_t at_addr[4]; }; -typedef struct au_tid_addr au_tid_addr_t; +typedef struct au_tid_addr au_tid_addr_t; struct au_mask { unsigned int am_success; /* Success bits. */ unsigned int am_failure; /* Failure bits. */ }; -typedef struct au_mask au_mask_t; +typedef struct au_mask au_mask_t; struct auditinfo { - au_id_t ai_auid; /* Audit user ID. */ - au_mask_t ai_mask; /* Audit masks. */ - au_tid_t ai_termid; /* Terminal ID. */ - au_asid_t ai_asid; /* Audit session ID. */ + au_id_t ai_auid; /* Audit user ID. */ + au_mask_t ai_mask; /* Audit masks. */ + au_tid_t ai_termid; /* Terminal ID. */ + au_asid_t ai_asid; /* Audit session ID. */ }; -typedef struct auditinfo auditinfo_t; +typedef struct auditinfo auditinfo_t; struct auditinfo_addr { - au_id_t ai_auid; /* Audit user ID. */ - au_mask_t ai_mask; /* Audit masks. */ - au_tid_addr_t ai_termid; /* Terminal ID. */ - au_asid_t ai_asid; /* Audit session ID. */ - au_asflgs_t ai_flags; /* Audit session flags. */ + au_id_t ai_auid; /* Audit user ID. */ + au_mask_t ai_mask; /* Audit masks. */ + au_tid_addr_t ai_termid; /* Terminal ID. */ + au_asid_t ai_asid; /* Audit session ID. */ + au_asflgs_t ai_flags; /* Audit session flags. */ }; -typedef struct auditinfo_addr auditinfo_addr_t; +typedef struct auditinfo_addr auditinfo_addr_t; struct auditpinfo { - pid_t ap_pid; /* ID of target process. */ - au_id_t ap_auid; /* Audit user ID. */ - au_mask_t ap_mask; /* Audit masks. */ - au_tid_t ap_termid; /* Terminal ID. */ - au_asid_t ap_asid; /* Audit session ID. */ + pid_t ap_pid; /* ID of target process. */ + au_id_t ap_auid; /* Audit user ID. */ + au_mask_t ap_mask; /* Audit masks. */ + au_tid_t ap_termid; /* Terminal ID. */ + au_asid_t ap_asid; /* Audit session ID. */ }; -typedef struct auditpinfo auditpinfo_t; +typedef struct auditpinfo auditpinfo_t; struct auditpinfo_addr { - pid_t ap_pid; /* ID of target process. */ - au_id_t ap_auid; /* Audit user ID. */ - au_mask_t ap_mask; /* Audit masks. */ - au_tid_addr_t ap_termid; /* Terminal ID. */ - au_asid_t ap_asid; /* Audit session ID. */ - au_asflgs_t ap_flags; /* Audit session flags. */ + pid_t ap_pid; /* ID of target process. */ + au_id_t ap_auid; /* Audit user ID. */ + au_mask_t ap_mask; /* Audit masks. */ + au_tid_addr_t ap_termid; /* Terminal ID. */ + au_asid_t ap_asid; /* Audit session ID. */ + au_asflgs_t ap_flags; /* Audit session flags. */ }; -typedef struct auditpinfo_addr auditpinfo_addr_t; +typedef struct auditpinfo_addr auditpinfo_addr_t; struct au_session { - auditinfo_addr_t *as_aia_p; /* Ptr to full audit info. */ - au_mask_t as_mask; /* Process Audit Masks. */ + auditinfo_addr_t *as_aia_p; /* Ptr to full audit info. */ + au_mask_t as_mask; /* Process Audit Masks. */ }; typedef struct au_session au_session_t; struct au_expire_after { - time_t age; /* Age after which trail files should be expired */ - size_t size; /* Aggregate trail size when files should be expired */ + time_t age; /* Age after which trail files should be expired */ + size_t size; /* Aggregate trail size when files should be expired */ unsigned char op_type; /* Operator used with the above values to determine when files should be expired */ }; typedef struct au_expire_after au_expire_after_t; @@ -270,79 +270,79 @@ typedef struct au_expire_after au_expire_after_t; /* * Contents of token_t are opaque outside of libbsm. */ -typedef struct au_token token_t; +typedef struct au_token token_t; /* * Kernel audit queue control parameters: - * Default: Maximum: - * aq_hiwater: AQ_HIWATER (100) AQ_MAXHIGH (10000) - * aq_lowater: AQ_LOWATER (10) @@ -352,23 +352,23 @@ int setaudit_addr(const struct auditinfo_addr *, int); * wrappers to the getaudit_addr()/setaudit_addr() syscalls above. */ -int getaudit(struct auditinfo *) - __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, - __IPHONE_2_0, __IPHONE_6_0); -int setaudit(const struct auditinfo *) - __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, - __IPHONE_2_0, __IPHONE_6_0); +int getaudit(struct auditinfo *) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, + __IPHONE_2_0, __IPHONE_6_0); +int setaudit(const struct auditinfo *) +__OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, + __IPHONE_2_0, __IPHONE_6_0); #else -int getaudit(struct auditinfo *); -int setaudit(const struct auditinfo *); +int getaudit(struct auditinfo *); +int setaudit(const struct auditinfo *); #endif /* !__APPLE__ */ #ifdef __APPLE_API_PRIVATE #include mach_port_name_t audit_session_self(void); -au_asid_t audit_session_join(mach_port_name_t port); -int audit_session_port(au_asid_t asid, mach_port_name_t *portname); +au_asid_t audit_session_join(mach_port_name_t port); +int audit_session_port(au_asid_t asid, mach_port_name_t *portname); #endif /* __APPLE_API_PRIVATE */ #endif /* defined(_KERNEL) || defined(KERNEL) */ diff --git a/bsd/bsm/audit_domain.h b/bsd/bsm/audit_domain.h index 9edcb4fbc..be1bc920f 100644 --- a/bsd/bsm/audit_domain.h +++ b/bsd/bsm/audit_domain.h @@ -24,91 +24,91 @@ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * POSSIBILITY OF SUCH DAMAGE. * * $P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit_domain.h#1 $ */ #ifndef _BSM_AUDIT_DOMAIN_H_ -#define _BSM_AUDIT_DOMAIN_H_ +#define _BSM_AUDIT_DOMAIN_H_ /* * BSM protocol domain constants - protocol domains defined in Solaris. */ -#define BSM_PF_UNSPEC 0 -#define BSM_PF_LOCAL 1 -#define BSM_PF_INET 2 -#define BSM_PF_IMPLINK 3 -#define BSM_PF_PUP 4 -#define BSM_PF_CHAOS 5 -#define BSM_PF_NS 6 -#define BSM_PF_NBS 7 /* Solaris-specific. */ -#define BSM_PF_ECMA 8 -#define BSM_PF_DATAKIT 9 -#define BSM_PF_CCITT 10 -#define BSM_PF_SNA 11 -#define BSM_PF_DECnet 12 -#define BSM_PF_DLI 13 -#define BSM_PF_LAT 14 -#define BSM_PF_HYLINK 15 -#define BSM_PF_APPLETALK 16 -#define BSM_PF_NIT 17 /* Solaris-specific. */ -#define BSM_PF_802 18 /* Solaris-specific. */ -#define BSM_PF_OSI 19 -#define BSM_PF_X25 20 /* Solaris/Linux-specific. */ -#define BSM_PF_OSINET 21 /* Solaris-specific. */ -#define BSM_PF_GOSIP 22 /* Solaris-specific. */ -#define BSM_PF_IPX 23 -#define BSM_PF_ROUTE 24 -#define BSM_PF_LINK 25 -#define BSM_PF_INET6 26 -#define BSM_PF_KEY 27 -#define BSM_PF_NCA 28 /* Solaris-specific. */ -#define BSM_PF_POLICY 29 /* Solaris-specific. */ -#define BSM_PF_INET_OFFLOAD 30 /* Solaris-specific. */ +#define BSM_PF_UNSPEC 0 +#define BSM_PF_LOCAL 1 +#define BSM_PF_INET 2 +#define BSM_PF_IMPLINK 3 +#define BSM_PF_PUP 4 +#define BSM_PF_CHAOS 5 +#define BSM_PF_NS 6 +#define BSM_PF_NBS 7 /* Solaris-specific. */ +#define BSM_PF_ECMA 8 +#define BSM_PF_DATAKIT 9 +#define BSM_PF_CCITT 10 +#define BSM_PF_SNA 11 +#define BSM_PF_DECnet 12 +#define BSM_PF_DLI 13 +#define BSM_PF_LAT 14 +#define BSM_PF_HYLINK 15 +#define BSM_PF_APPLETALK 16 +#define BSM_PF_NIT 17 /* Solaris-specific. */ +#define BSM_PF_802 18 /* Solaris-specific. */ +#define BSM_PF_OSI 19 +#define BSM_PF_X25 20 /* Solaris/Linux-specific. */ +#define BSM_PF_OSINET 21 /* Solaris-specific. */ +#define BSM_PF_GOSIP 22 /* Solaris-specific. */ +#define BSM_PF_IPX 23 +#define BSM_PF_ROUTE 24 +#define BSM_PF_LINK 25 +#define BSM_PF_INET6 26 +#define BSM_PF_KEY 27 +#define BSM_PF_NCA 28 /* Solaris-specific. */ +#define BSM_PF_POLICY 29 /* Solaris-specific. */ +#define BSM_PF_INET_OFFLOAD 30 /* Solaris-specific. */ /* * BSM protocol domain constants - protocol domains not defined in Solaris. */ -#define BSM_PF_NETBIOS 500 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_ISO 501 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_XTP 502 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_COIP 503 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_CNT 504 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_RTIP 505 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_SIP 506 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_PIP 507 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_ISDN 508 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_E164 509 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_NATM 510 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_ATM 511 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_NETGRAPH 512 /* FreeBSD/Darwin-specific. */ -#define BSM_PF_SLOW 513 /* FreeBSD-specific. */ -#define BSM_PF_SCLUSTER 514 /* FreeBSD-specific. */ -#define BSM_PF_ARP 515 /* FreeBSD-specific. */ -#define BSM_PF_BLUETOOTH 516 /* FreeBSD-specific. */ -#define BSM_PF_IEEE80211 517 /* FreeBSD-specific. */ -#define BSM_PF_AX25 518 /* Linux-specific. */ -#define BSM_PF_ROSE 519 /* Linux-specific. */ -#define BSM_PF_NETBEUI 520 /* Linux-specific. */ -#define BSM_PF_SECURITY 521 /* Linux-specific. */ -#define BSM_PF_PACKET 522 /* Linux-specific. */ -#define BSM_PF_ASH 523 /* Linux-specific. */ -#define BSM_PF_ECONET 524 /* Linux-specific. */ -#define BSM_PF_ATMSVC 525 /* Linux-specific. */ -#define BSM_PF_IRDA 526 /* Linux-specific. */ -#define BSM_PF_PPPOX 527 /* Linux-specific. */ -#define BSM_PF_WANPIPE 528 /* Linux-specific. */ -#define BSM_PF_LLC 529 /* Linux-specific. */ -#define BSM_PF_CAN 530 /* Linux-specific. */ -#define BSM_PF_TIPC 531 /* Linux-specific. */ -#define BSM_PF_IUCV 532 /* Linux-specific. */ -#define BSM_PF_RXRPC 533 /* Linux-specific. */ -#define BSM_PF_PHONET 534 /* Linux-specific. */ +#define BSM_PF_NETBIOS 500 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_ISO 501 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_XTP 502 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_COIP 503 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_CNT 504 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_RTIP 505 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_SIP 506 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_PIP 507 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_ISDN 508 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_E164 509 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_NATM 510 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_ATM 511 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_NETGRAPH 512 /* FreeBSD/Darwin-specific. */ +#define BSM_PF_SLOW 513 /* FreeBSD-specific. */ +#define BSM_PF_SCLUSTER 514 /* FreeBSD-specific. */ +#define BSM_PF_ARP 515 /* FreeBSD-specific. */ +#define BSM_PF_BLUETOOTH 516 /* FreeBSD-specific. */ +#define BSM_PF_IEEE80211 517 /* FreeBSD-specific. */ +#define BSM_PF_AX25 518 /* Linux-specific. */ +#define BSM_PF_ROSE 519 /* Linux-specific. */ +#define BSM_PF_NETBEUI 520 /* Linux-specific. */ +#define BSM_PF_SECURITY 521 /* Linux-specific. */ +#define BSM_PF_PACKET 522 /* Linux-specific. */ +#define BSM_PF_ASH 523 /* Linux-specific. */ +#define BSM_PF_ECONET 524 /* Linux-specific. */ +#define BSM_PF_ATMSVC 525 /* Linux-specific. */ +#define BSM_PF_IRDA 526 /* Linux-specific. */ +#define BSM_PF_PPPOX 527 /* Linux-specific. */ +#define BSM_PF_WANPIPE 528 /* Linux-specific. */ +#define BSM_PF_LLC 529 /* Linux-specific. */ +#define BSM_PF_CAN 530 /* Linux-specific. */ +#define BSM_PF_TIPC 531 /* Linux-specific. */ +#define BSM_PF_IUCV 532 /* Linux-specific. */ +#define BSM_PF_RXRPC 533 /* Linux-specific. */ +#define BSM_PF_PHONET 534 /* Linux-specific. */ /* * Used when there is no mapping from a local to BSM protocol domain. */ -#define BSM_PF_UNKNOWN 700 /* OpenBSM-specific. */ +#define BSM_PF_UNKNOWN 700 /* OpenBSM-specific. */ #endif /* !_BSM_AUDIT_DOMAIN_H_ */ diff --git a/bsd/bsm/audit_errno.h b/bsd/bsm/audit_errno.h index c6f058017..a44e0dec9 100644 --- a/bsd/bsm/audit_errno.h +++ b/bsd/bsm/audit_errno.h @@ -24,13 +24,13 @@ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * POSSIBILITY OF SUCH DAMAGE. * * $P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit_errno.h#5 $ */ #ifndef _BSM_AUDIT_ERRNO_H_ -#define _BSM_AUDIT_ERRNO_H_ +#define _BSM_AUDIT_ERRNO_H_ /* * For the purposes of portable encoding, we convert between local error @@ -41,129 +41,129 @@ * * When adding constants here, also add them to bsm_errno.c. */ -#define BSM_ERRNO_ESUCCESS 0 -#define BSM_ERRNO_EPERM 1 -#define BSM_ERRNO_ENOENT 2 -#define BSM_ERRNO_ESRCH 3 -#define BSM_ERRNO_EINTR 4 -#define BSM_ERRNO_EIO 5 -#define BSM_ERRNO_ENXIO 6 -#define BSM_ERRNO_E2BIG 7 -#define BSM_ERRNO_ENOEXEC 8 -#define BSM_ERRNO_EBADF 9 -#define BSM_ERRNO_ECHILD 10 -#define BSM_ERRNO_EAGAIN 11 -#define BSM_ERRNO_ENOMEM 12 -#define BSM_ERRNO_EACCES 13 -#define BSM_ERRNO_EFAULT 14 -#define BSM_ERRNO_ENOTBLK 15 -#define BSM_ERRNO_EBUSY 16 -#define BSM_ERRNO_EEXIST 17 -#define BSM_ERRNO_EXDEV 18 -#define BSM_ERRNO_ENODEV 19 -#define BSM_ERRNO_ENOTDIR 20 -#define BSM_ERRNO_EISDIR 21 -#define BSM_ERRNO_EINVAL 22 -#define BSM_ERRNO_ENFILE 23 -#define BSM_ERRNO_EMFILE 24 -#define BSM_ERRNO_ENOTTY 25 -#define BSM_ERRNO_ETXTBSY 26 -#define BSM_ERRNO_EFBIG 27 -#define BSM_ERRNO_ENOSPC 28 -#define BSM_ERRNO_ESPIPE 29 -#define BSM_ERRNO_EROFS 30 -#define BSM_ERRNO_EMLINK 31 -#define BSM_ERRNO_EPIPE 32 -#define BSM_ERRNO_EDOM 33 -#define BSM_ERRNO_ERANGE 34 -#define BSM_ERRNO_ENOMSG 35 -#define BSM_ERRNO_EIDRM 36 -#define BSM_ERRNO_ECHRNG 37 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EL2NSYNC 38 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EL3HLT 39 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EL3RST 40 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ELNRNG 41 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EUNATCH 42 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ENOCSI 43 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EL2HLT 44 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EDEADLK 45 -#define BSM_ERRNO_ENOLCK 46 -#define BSM_ERRNO_ECANCELED 47 -#define BSM_ERRNO_ENOTSUP 48 -#define BSM_ERRNO_EDQUOT 49 -#define BSM_ERRNO_EBADE 50 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EBADR 51 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EXFULL 52 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ENOANO 53 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EBADRQC 54 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EBADSLT 55 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EDEADLOCK 56 /* Solaris-specific. */ -#define BSM_ERRNO_EBFONT 57 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EOWNERDEAD 58 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ENOTRECOVERABLE 59 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ENOSTR 60 /* Solaris/Darwin/Linux-specific. */ -#define BSM_ERRNO_ENODATA 61 /* Solaris/Darwin/Linux-specific. */ -#define BSM_ERRNO_ETIME 62 /* Solaris/Darwin/Linux-specific. */ -#define BSM_ERRNO_ENOSR 63 /* Solaris/Darwin/Linux-specific. */ -#define BSM_ERRNO_ENONET 64 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ENOPKG 65 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EREMOTE 66 -#define BSM_ERRNO_ENOLINK 67 -#define BSM_ERRNO_EADV 68 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ESRMNT 69 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ECOMM 70 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EPROTO 71 -#define BSM_ERRNO_ELOCKUNMAPPED 72 /* Solaris-specific. */ -#define BSM_ERRNO_ENOTACTIVE 73 /* Solaris-specific. */ -#define BSM_ERRNO_EMULTIHOP 74 -#define BSM_ERRNO_EBADMSG 77 -#define BSM_ERRNO_ENAMETOOLONG 78 -#define BSM_ERRNO_EOVERFLOW 79 -#define BSM_ERRNO_ENOTUNIQ 80 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EBADFD 81 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EREMCHG 82 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ELIBACC 83 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ELIBBAD 84 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ELIBSCN 85 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ELIBMAX 86 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ELIBEXEC 87 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_EILSEQ 88 -#define BSM_ERRNO_ENOSYS 89 -#define BSM_ERRNO_ELOOP 90 -#define BSM_ERRNO_ERESTART 91 -#define BSM_ERRNO_ESTRPIPE 92 /* Solaris/Linux-specific. */ -#define BSM_ERRNO_ENOTEMPTY 93 -#define BSM_ERRNO_EUSERS 94 -#define BSM_ERRNO_ENOTSOCK 95 -#define BSM_ERRNO_EDESTADDRREQ 96 -#define BSM_ERRNO_EMSGSIZE 97 -#define BSM_ERRNO_EPROTOTYPE 98 -#define BSM_ERRNO_ENOPROTOOPT 99 -#define BSM_ERRNO_EPROTONOSUPPORT 120 -#define BSM_ERRNO_ESOCKTNOSUPPORT 121 -#define BSM_ERRNO_EOPNOTSUPP 122 -#define BSM_ERRNO_EPFNOSUPPORT 123 -#define BSM_ERRNO_EAFNOSUPPORT 124 -#define BSM_ERRNO_EADDRINUSE 125 -#define BSM_ERRNO_EADDRNOTAVAIL 126 -#define BSM_ERRNO_ENETDOWN 127 -#define BSM_ERRNO_ENETUNREACH 128 -#define BSM_ERRNO_ENETRESET 129 -#define BSM_ERRNO_ECONNABORTED 130 -#define BSM_ERRNO_ECONNRESET 131 -#define BSM_ERRNO_ENOBUFS 132 -#define BSM_ERRNO_EISCONN 133 -#define BSM_ERRNO_ENOTCONN 134 -#define BSM_ERRNO_ESHUTDOWN 143 -#define BSM_ERRNO_ETOOMANYREFS 144 -#define BSM_ERRNO_ETIMEDOUT 145 -#define BSM_ERRNO_ECONNREFUSED 146 -#define BSM_ERRNO_EHOSTDOWN 147 -#define BSM_ERRNO_EHOSTUNREACH 148 -#define BSM_ERRNO_EALREADY 149 -#define BSM_ERRNO_EINPROGRESS 150 -#define BSM_ERRNO_ESTALE 151 -#define BSM_ERRNO_EQFULL 152 +#define BSM_ERRNO_ESUCCESS 0 +#define BSM_ERRNO_EPERM 1 +#define BSM_ERRNO_ENOENT 2 +#define BSM_ERRNO_ESRCH 3 +#define BSM_ERRNO_EINTR 4 +#define BSM_ERRNO_EIO 5 +#define BSM_ERRNO_ENXIO 6 +#define BSM_ERRNO_E2BIG 7 +#define BSM_ERRNO_ENOEXEC 8 +#define BSM_ERRNO_EBADF 9 +#define BSM_ERRNO_ECHILD 10 +#define BSM_ERRNO_EAGAIN 11 +#define BSM_ERRNO_ENOMEM 12 +#define BSM_ERRNO_EACCES 13 +#define BSM_ERRNO_EFAULT 14 +#define BSM_ERRNO_ENOTBLK 15 +#define BSM_ERRNO_EBUSY 16 +#define BSM_ERRNO_EEXIST 17 +#define BSM_ERRNO_EXDEV 18 +#define BSM_ERRNO_ENODEV 19 +#define BSM_ERRNO_ENOTDIR 20 +#define BSM_ERRNO_EISDIR 21 +#define BSM_ERRNO_EINVAL 22 +#define BSM_ERRNO_ENFILE 23 +#define BSM_ERRNO_EMFILE 24 +#define BSM_ERRNO_ENOTTY 25 +#define BSM_ERRNO_ETXTBSY 26 +#define BSM_ERRNO_EFBIG 27 +#define BSM_ERRNO_ENOSPC 28 +#define BSM_ERRNO_ESPIPE 29 +#define BSM_ERRNO_EROFS 30 +#define BSM_ERRNO_EMLINK 31 +#define BSM_ERRNO_EPIPE 32 +#define BSM_ERRNO_EDOM 33 +#define BSM_ERRNO_ERANGE 34 +#define BSM_ERRNO_ENOMSG 35 +#define BSM_ERRNO_EIDRM 36 +#define BSM_ERRNO_ECHRNG 37 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EL2NSYNC 38 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EL3HLT 39 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EL3RST 40 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ELNRNG 41 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EUNATCH 42 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ENOCSI 43 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EL2HLT 44 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EDEADLK 45 +#define BSM_ERRNO_ENOLCK 46 +#define BSM_ERRNO_ECANCELED 47 +#define BSM_ERRNO_ENOTSUP 48 +#define BSM_ERRNO_EDQUOT 49 +#define BSM_ERRNO_EBADE 50 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EBADR 51 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EXFULL 52 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ENOANO 53 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EBADRQC 54 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EBADSLT 55 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EDEADLOCK 56 /* Solaris-specific. */ +#define BSM_ERRNO_EBFONT 57 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EOWNERDEAD 58 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ENOTRECOVERABLE 59 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ENOSTR 60 /* Solaris/Darwin/Linux-specific. */ +#define BSM_ERRNO_ENODATA 61 /* Solaris/Darwin/Linux-specific. */ +#define BSM_ERRNO_ETIME 62 /* Solaris/Darwin/Linux-specific. */ +#define BSM_ERRNO_ENOSR 63 /* Solaris/Darwin/Linux-specific. */ +#define BSM_ERRNO_ENONET 64 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ENOPKG 65 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EREMOTE 66 +#define BSM_ERRNO_ENOLINK 67 +#define BSM_ERRNO_EADV 68 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ESRMNT 69 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ECOMM 70 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EPROTO 71 +#define BSM_ERRNO_ELOCKUNMAPPED 72 /* Solaris-specific. */ +#define BSM_ERRNO_ENOTACTIVE 73 /* Solaris-specific. */ +#define BSM_ERRNO_EMULTIHOP 74 +#define BSM_ERRNO_EBADMSG 77 +#define BSM_ERRNO_ENAMETOOLONG 78 +#define BSM_ERRNO_EOVERFLOW 79 +#define BSM_ERRNO_ENOTUNIQ 80 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EBADFD 81 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EREMCHG 82 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ELIBACC 83 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ELIBBAD 84 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ELIBSCN 85 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ELIBMAX 86 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ELIBEXEC 87 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_EILSEQ 88 +#define BSM_ERRNO_ENOSYS 89 +#define BSM_ERRNO_ELOOP 90 +#define BSM_ERRNO_ERESTART 91 +#define BSM_ERRNO_ESTRPIPE 92 /* Solaris/Linux-specific. */ +#define BSM_ERRNO_ENOTEMPTY 93 +#define BSM_ERRNO_EUSERS 94 +#define BSM_ERRNO_ENOTSOCK 95 +#define BSM_ERRNO_EDESTADDRREQ 96 +#define BSM_ERRNO_EMSGSIZE 97 +#define BSM_ERRNO_EPROTOTYPE 98 +#define BSM_ERRNO_ENOPROTOOPT 99 +#define BSM_ERRNO_EPROTONOSUPPORT 120 +#define BSM_ERRNO_ESOCKTNOSUPPORT 121 +#define BSM_ERRNO_EOPNOTSUPP 122 +#define BSM_ERRNO_EPFNOSUPPORT 123 +#define BSM_ERRNO_EAFNOSUPPORT 124 +#define BSM_ERRNO_EADDRINUSE 125 +#define BSM_ERRNO_EADDRNOTAVAIL 126 +#define BSM_ERRNO_ENETDOWN 127 +#define BSM_ERRNO_ENETUNREACH 128 +#define BSM_ERRNO_ENETRESET 129 +#define BSM_ERRNO_ECONNABORTED 130 +#define BSM_ERRNO_ECONNRESET 131 +#define BSM_ERRNO_ENOBUFS 132 +#define BSM_ERRNO_EISCONN 133 +#define BSM_ERRNO_ENOTCONN 134 +#define BSM_ERRNO_ESHUTDOWN 143 +#define BSM_ERRNO_ETOOMANYREFS 144 +#define BSM_ERRNO_ETIMEDOUT 145 +#define BSM_ERRNO_ECONNREFUSED 146 +#define BSM_ERRNO_EHOSTDOWN 147 +#define BSM_ERRNO_EHOSTUNREACH 148 +#define BSM_ERRNO_EALREADY 149 +#define BSM_ERRNO_EINPROGRESS 150 +#define BSM_ERRNO_ESTALE 151 +#define BSM_ERRNO_EQFULL 152 /* * OpenBSM constants for error numbers not defined in Solaris. In the event @@ -172,44 +172,44 @@ * * ELAST doesn't get a constant in the BSM space. */ -#define BSM_ERRNO_EPROCLIM 190 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EBADRPC 191 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_ERPCMISMATCH 192 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EPROGUNAVAIL 193 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EPROGMISMATCH 194 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EPROCUNAVAIL 195 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EFTYPE 196 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EAUTH 197 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_ENEEDAUTH 198 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_ENOATTR 199 /* FreeBSD/Darwin-specific. */ -#define BSM_ERRNO_EDOOFUS 200 /* FreeBSD-specific. */ -#define BSM_ERRNO_EJUSTRETURN 201 /* FreeBSD-specific. */ -#define BSM_ERRNO_ENOIOCTL 202 /* FreeBSD-specific. */ -#define BSM_ERRNO_EDIRIOCTL 203 /* FreeBSD-specific. */ -#define BSM_ERRNO_EPWROFF 204 /* Darwin-specific. */ -#define BSM_ERRNO_EDEVERR 205 /* Darwin-specific. */ -#define BSM_ERRNO_EBADEXEC 206 /* Darwin-specific. */ -#define BSM_ERRNO_EBADARCH 207 /* Darwin-specific. */ -#define BSM_ERRNO_ESHLIBVERS 208 /* Darwin-specific. */ -#define BSM_ERRNO_EBADMACHO 209 /* Darwin-specific. */ -#define BSM_ERRNO_EPOLICY 210 /* Darwin-specific. */ -#define BSM_ERRNO_EDOTDOT 211 /* Linux-specific. */ -#define BSM_ERRNO_EUCLEAN 212 /* Linux-specific. */ -#define BSM_ERRNO_ENOTNAM 213 /* Linux(Xenix?)-specific. */ -#define BSM_ERRNO_ENAVAIL 214 /* Linux(Xenix?)-specific. */ -#define BSM_ERRNO_EISNAM 215 /* Linux(Xenix?)-specific. */ -#define BSM_ERRNO_EREMOTEIO 216 /* Linux-specific. */ -#define BSM_ERRNO_ENOMEDIUM 217 /* Linux-specific. */ -#define BSM_ERRNO_EMEDIUMTYPE 218 /* Linux-specific. */ -#define BSM_ERRNO_ENOKEY 219 /* Linux-specific. */ -#define BSM_ERRNO_EKEYEXPIRED 220 /* Linux-specific. */ -#define BSM_ERRNO_EKEYREVOKED 221 /* Linux-specific. */ -#define BSM_ERRNO_EKEYREJECTED 222 /* Linux-specific. */ +#define BSM_ERRNO_EPROCLIM 190 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EBADRPC 191 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_ERPCMISMATCH 192 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EPROGUNAVAIL 193 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EPROGMISMATCH 194 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EPROCUNAVAIL 195 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EFTYPE 196 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EAUTH 197 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_ENEEDAUTH 198 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_ENOATTR 199 /* FreeBSD/Darwin-specific. */ +#define BSM_ERRNO_EDOOFUS 200 /* FreeBSD-specific. */ +#define BSM_ERRNO_EJUSTRETURN 201 /* FreeBSD-specific. */ +#define BSM_ERRNO_ENOIOCTL 202 /* FreeBSD-specific. */ +#define BSM_ERRNO_EDIRIOCTL 203 /* FreeBSD-specific. */ +#define BSM_ERRNO_EPWROFF 204 /* Darwin-specific. */ +#define BSM_ERRNO_EDEVERR 205 /* Darwin-specific. */ +#define BSM_ERRNO_EBADEXEC 206 /* Darwin-specific. */ +#define BSM_ERRNO_EBADARCH 207 /* Darwin-specific. */ +#define BSM_ERRNO_ESHLIBVERS 208 /* Darwin-specific. */ +#define BSM_ERRNO_EBADMACHO 209 /* Darwin-specific. */ +#define BSM_ERRNO_EPOLICY 210 /* Darwin-specific. */ +#define BSM_ERRNO_EDOTDOT 211 /* Linux-specific. */ +#define BSM_ERRNO_EUCLEAN 212 /* Linux-specific. */ +#define BSM_ERRNO_ENOTNAM 213 /* Linux(Xenix?)-specific. */ +#define BSM_ERRNO_ENAVAIL 214 /* Linux(Xenix?)-specific. */ +#define BSM_ERRNO_EISNAM 215 /* Linux(Xenix?)-specific. */ +#define BSM_ERRNO_EREMOTEIO 216 /* Linux-specific. */ +#define BSM_ERRNO_ENOMEDIUM 217 /* Linux-specific. */ +#define BSM_ERRNO_EMEDIUMTYPE 218 /* Linux-specific. */ +#define BSM_ERRNO_ENOKEY 219 /* Linux-specific. */ +#define BSM_ERRNO_EKEYEXPIRED 220 /* Linux-specific. */ +#define BSM_ERRNO_EKEYREVOKED 221 /* Linux-specific. */ +#define BSM_ERRNO_EKEYREJECTED 222 /* Linux-specific. */ /* * In the event that OpenBSM doesn't have a file representation of a local * error number, use this. */ -#define BSM_ERRNO_UNKNOWN 250 /* OpenBSM-specific. */ +#define BSM_ERRNO_UNKNOWN 250 /* OpenBSM-specific. */ #endif /* !_BSM_AUDIT_ERRNO_H_ */ diff --git a/bsd/bsm/audit_fcntl.h b/bsd/bsm/audit_fcntl.h index 5024fb9cf..20b73988a 100644 --- a/bsd/bsm/audit_fcntl.h +++ b/bsd/bsm/audit_fcntl.h @@ -24,120 +24,120 @@ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * POSSIBILITY OF SUCH DAMAGE. * * $P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit_fcntl.h#2 $ */ -#ifndef _BSM_AUDIT_FCNTL_H_ -#define _BSM_AUDIT_FCNTL_H_ +#ifndef _BSM_AUDIT_FCNTL_H_ +#define _BSM_AUDIT_FCNTL_H_ /* * Shared and Solaris-specific: (0-99). */ -#define BSM_F_DUPFD 0 -#define BSM_F_GETFD 1 -#define BSM_F_SETFD 2 -#define BSM_F_GETFL 3 -#define BSM_F_SETFL 4 -#define BSM_F_O_GETLK 5 /* Solaris-specific. */ -#define BSM_F_SETLK 6 -#define BSM_F_SETLKW 7 -#define BSM_F_CHKFL 8 /* Solaris-specific. */ -#define BSM_F_DUP2FD 9 /* FreeBSD/Solaris-specific. */ -#define BSM_F_ALLOCSP 10 /* Solaris-specific. */ -#define BSM_F_FREESP 11 /* Solaris-specific. */ +#define BSM_F_DUPFD 0 +#define BSM_F_GETFD 1 +#define BSM_F_SETFD 2 +#define BSM_F_GETFL 3 +#define BSM_F_SETFL 4 +#define BSM_F_O_GETLK 5 /* Solaris-specific. */ +#define BSM_F_SETLK 6 +#define BSM_F_SETLKW 7 +#define BSM_F_CHKFL 8 /* Solaris-specific. */ +#define BSM_F_DUP2FD 9 /* FreeBSD/Solaris-specific. */ +#define BSM_F_ALLOCSP 10 /* Solaris-specific. */ +#define BSM_F_FREESP 11 /* Solaris-specific. */ -#define BSM_F_ISSTREAM 13 /* Solaris-specific. */ -#define BSM_F_GETLK 14 -#define BSM_F_PRIV 15 /* Solaris-specific. */ -#define BSM_F_NPRIV 16 /* Solaris-specific. */ -#define BSM_F_QUOTACTL 17 /* Solaris-specific. */ -#define BSM_F_BLOCKS 18 /* Solaris-specific. */ -#define BSM_F_BLKSIZE 19 /* Solaris-specific. */ +#define BSM_F_ISSTREAM 13 /* Solaris-specific. */ +#define BSM_F_GETLK 14 +#define BSM_F_PRIV 15 /* Solaris-specific. */ +#define BSM_F_NPRIV 16 /* Solaris-specific. */ +#define BSM_F_QUOTACTL 17 /* Solaris-specific. */ +#define BSM_F_BLOCKS 18 /* Solaris-specific. */ +#define BSM_F_BLKSIZE 19 /* Solaris-specific. */ -#define BSM_F_GETOWN 23 -#define BSM_F_SETOWN 24 -#define BSM_F_REVOKE 25 /* Solaris-specific. */ -#define BSM_F_HASREMOTELOCKS 26 /* Solaris-specific. */ -#define BSM_F_FREESP64 27 /* Solaris-specific. */ -#define BSM_F_ALLOCSP64 28 /* Solaris-specific. */ +#define BSM_F_GETOWN 23 +#define BSM_F_SETOWN 24 +#define BSM_F_REVOKE 25 /* Solaris-specific. */ +#define BSM_F_HASREMOTELOCKS 26 /* Solaris-specific. */ +#define BSM_F_FREESP64 27 /* Solaris-specific. */ +#define BSM_F_ALLOCSP64 28 /* Solaris-specific. */ -#define BSM_F_GETLK64 33 /* Solaris-specific. */ -#define BSM_F_SETLK64 34 /* Solaris-specific. */ -#define BSM_F_SETLKW64 35 /* Solaris-specific. */ +#define BSM_F_GETLK64 33 /* Solaris-specific. */ +#define BSM_F_SETLK64 34 /* Solaris-specific. */ +#define BSM_F_SETLKW64 35 /* Solaris-specific. */ -#define BSM_F_SHARE 40 /* Solaris-specific. */ -#define BSM_F_UNSHARE 41 /* Solaris-specific. */ -#define BSM_F_SETLK_NBMAND 42 /* Solaris-specific. */ -#define BSM_F_SHARE_NBMAND 43 /* Solaris-specific. */ -#define BSM_F_SETLK64_NBMAND 44 /* Solaris-specific. */ -#define BSM_F_GETXFL 45 /* Solaris-specific. */ -#define BSM_F_BADFD 46 /* Solaris-specific. */ +#define BSM_F_SHARE 40 /* Solaris-specific. */ +#define BSM_F_UNSHARE 41 /* Solaris-specific. */ +#define BSM_F_SETLK_NBMAND 42 /* Solaris-specific. */ +#define BSM_F_SHARE_NBMAND 43 /* Solaris-specific. */ +#define BSM_F_SETLK64_NBMAND 44 /* Solaris-specific. */ +#define BSM_F_GETXFL 45 /* Solaris-specific. */ +#define BSM_F_BADFD 46 /* Solaris-specific. */ /* * FreeBSD-specific (100-199). */ -#define BSM_F_OGETLK 107 /* FreeBSD-specific. */ -#define BSM_F_OSETLK 108 /* FreeBSD-specific. */ -#define BSM_F_OSETLKW 109 /* FreeBSD-specific. */ +#define BSM_F_OGETLK 107 /* FreeBSD-specific. */ +#define BSM_F_OSETLK 108 /* FreeBSD-specific. */ +#define BSM_F_OSETLKW 109 /* FreeBSD-specific. */ -#define BSM_F_SETLK_REMOTE 114 /* FreeBSD-specific. */ +#define BSM_F_SETLK_REMOTE 114 /* FreeBSD-specific. */ /* * Linux-specific (200-299). */ -#define BSM_F_SETSIG 210 /* Linux-specific. */ -#define BSM_F_GETSIG 211 /* Linux-specific. */ +#define BSM_F_SETSIG 210 /* Linux-specific. */ +#define BSM_F_GETSIG 211 /* Linux-specific. */ /* * Darwin-specific (300-399). */ -#define BSM_F_CHKCLEAN 341 /* Darwin-specific. */ -#define BSM_F_PREALLOCATE 342 /* Darwin-specific. */ -#define BSM_F_SETSIZE 343 /* Darwin-specific. */ -#define BSM_F_RDADVISE 344 /* Darwin-specific. */ -#define BSM_F_RDAHEAD 345 /* Darwin-specific. */ -#define BSM_F_READBOOTSTRAP 346 /* Darwin-specific. */ -#define BSM_F_WRITEBOOTSTRAP 347 /* Darwin-specific. */ -#define BSM_F_NOCACHE 348 /* Darwin-specific. */ -#define BSM_F_LOG2PHYS 349 /* Darwin-specific. */ -#define BSM_F_GETPATH 350 /* Darwin-specific. */ -#define BSM_F_FULLFSYNC 351 /* Darwin-specific. */ -#define BSM_F_PATHPKG_CHECK 352 /* Darwin-specific. */ -#define BSM_F_FREEZE_FS 353 /* Darwin-specific. */ -#define BSM_F_THAW_FS 354 /* Darwin-specific. */ -#define BSM_F_GLOBAL_NOCACHE 355 /* Darwin-specific. */ -#define BSM_F_OPENFROM 356 /* Darwin-specific. */ -#define BSM_F_UNLINKFROM 357 /* Darwin-specific. */ -#define BSM_F_CHECK_OPENEVT 358 /* Darwin-specific. */ -#define BSM_F_ADDSIGS 359 /* Darwin-specific. */ -#define BSM_F_MARKDEPENDENCY 360 /* Darwin-specific. */ -#define BSM_F_BARRIERFSYNC 361 /* Darwin-specific. */ -#define BSM_F_PUNCHHOLE 362 /* Darwin-specific. */ -#define BSM_F_TRIM_ACTIVE_FILE 363 /* Darwin-specific. */ +#define BSM_F_CHKCLEAN 341 /* Darwin-specific. */ +#define BSM_F_PREALLOCATE 342 /* Darwin-specific. */ +#define BSM_F_SETSIZE 343 /* Darwin-specific. */ +#define BSM_F_RDADVISE 344 /* Darwin-specific. */ +#define BSM_F_RDAHEAD 345 /* Darwin-specific. */ +#define BSM_F_READBOOTSTRAP 346 /* Darwin-specific. */ +#define BSM_F_WRITEBOOTSTRAP 347 /* Darwin-specific. */ +#define BSM_F_NOCACHE 348 /* Darwin-specific. */ +#define BSM_F_LOG2PHYS 349 /* Darwin-specific. */ +#define BSM_F_GETPATH 350 /* Darwin-specific. */ +#define BSM_F_FULLFSYNC 351 /* Darwin-specific. */ +#define BSM_F_PATHPKG_CHECK 352 /* Darwin-specific. */ +#define BSM_F_FREEZE_FS 353 /* Darwin-specific. */ +#define BSM_F_THAW_FS 354 /* Darwin-specific. */ +#define BSM_F_GLOBAL_NOCACHE 355 /* Darwin-specific. */ +#define BSM_F_OPENFROM 356 /* Darwin-specific. */ +#define BSM_F_UNLINKFROM 357 /* Darwin-specific. */ +#define BSM_F_CHECK_OPENEVT 358 /* Darwin-specific. */ +#define BSM_F_ADDSIGS 359 /* Darwin-specific. */ +#define BSM_F_MARKDEPENDENCY 360 /* Darwin-specific. */ +#define BSM_F_BARRIERFSYNC 361 /* Darwin-specific. */ +#define BSM_F_PUNCHHOLE 362 /* Darwin-specific. */ +#define BSM_F_TRIM_ACTIVE_FILE 363 /* Darwin-specific. */ /* * Darwin file system specific (400-499). */ -#define BSM_F_FS_SPECIFIC_0 400 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_1 401 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_2 402 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_3 403 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_4 404 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_5 405 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_6 406 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_7 407 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_8 408 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_9 409 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_10 410 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_11 411 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_12 412 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_13 413 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_14 414 /* Darwin-fs-specific. */ -#define BSM_F_FS_SPECIFIC_15 415 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_0 400 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_1 401 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_2 402 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_3 403 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_4 404 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_5 405 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_6 406 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_7 407 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_8 408 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_9 409 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_10 410 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_11 411 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_12 412 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_13 413 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_14 414 /* Darwin-fs-specific. */ +#define BSM_F_FS_SPECIFIC_15 415 /* Darwin-fs-specific. */ -#define BSM_F_UNKNOWN 0xFFFF +#define BSM_F_UNKNOWN 0xFFFF #endif /* !_BSM_AUDIT_FCNTL_H_ */ diff --git a/bsd/bsm/audit_internal.h b/bsd/bsm/audit_internal.h index c2103f32c..f62514d74 100644 --- a/bsd/bsm/audit_internal.h +++ b/bsd/bsm/audit_internal.h @@ -34,10 +34,10 @@ */ #ifndef _AUDIT_INTERNAL_H -#define _AUDIT_INTERNAL_H +#define _AUDIT_INTERNAL_H #if defined(__linux__) && !defined(__unused) -#define __unused +#define __unused #endif /* @@ -48,20 +48,20 @@ * otherwise break these interfaces or the assumptions they rely on. */ struct au_token { - u_char *t_data; - size_t len; - TAILQ_ENTRY(au_token) tokens; + u_char *t_data; + size_t len; + TAILQ_ENTRY(au_token) tokens; }; struct au_record { - char used; /* Record currently in use? */ - int desc; /* Descriptor for record. */ - TAILQ_HEAD(, au_token) token_q; /* Queue of BSM tokens. */ - u_char *data; - size_t len; - LIST_ENTRY(au_record) au_rec_q; + char used; /* Record currently in use? */ + int desc; /* Descriptor for record. */ + TAILQ_HEAD(, au_token) token_q; /* Queue of BSM tokens. */ + u_char *data; + size_t len; + LIST_ENTRY(au_record) au_rec_q; }; -typedef struct au_record au_record_t; +typedef struct au_record au_record_t; /* @@ -71,48 +71,48 @@ typedef struct au_record au_record_t; * token structures may contain pointers of whose contents we do not know the * size (e.g text tokens). */ -#define AUDIT_HEADER_EX_SIZE(a) ((a)->ai_termid.at_type+18+sizeof(u_int32_t)) -#define AUDIT_HEADER_SIZE 18 -#define MAX_AUDIT_HEADER_SIZE (5*sizeof(u_int32_t)+18) -#define AUDIT_TRAILER_SIZE 7 -#define MAX_AUDIT_IDENTITY_SIZE 179 +#define AUDIT_HEADER_EX_SIZE(a) ((a)->ai_termid.at_type+18+sizeof(u_int32_t)) +#define AUDIT_HEADER_SIZE 18 +#define MAX_AUDIT_HEADER_SIZE (5*sizeof(u_int32_t)+18) +#define AUDIT_TRAILER_SIZE 7 +#define MAX_AUDIT_IDENTITY_SIZE 179 /* * BSM token streams store fields in big endian byte order, so as to be * portable; when encoding and decoding, we must convert byte orders for * typed values. */ -#define ADD_U_CHAR(loc, val) \ - do { \ - *(loc) = (val); \ - (loc) += sizeof(u_char); \ +#define ADD_U_CHAR(loc, val) \ + do { \ + *(loc) = (val); \ + (loc) += sizeof(u_char); \ } while(0) -#define ADD_U_INT16(loc, val) \ - do { \ - be16enc((loc), (val)); \ - (loc) += sizeof(u_int16_t); \ +#define ADD_U_INT16(loc, val) \ + do { \ + be16enc((loc), (val)); \ + (loc) += sizeof(u_int16_t); \ } while(0) -#define ADD_U_INT32(loc, val) \ - do { \ - be32enc((loc), (val)); \ - (loc) += sizeof(u_int32_t); \ +#define ADD_U_INT32(loc, val) \ + do { \ + be32enc((loc), (val)); \ + (loc) += sizeof(u_int32_t); \ } while(0) -#define ADD_U_INT64(loc, val) \ - do { \ - be64enc((loc), (val)); \ - (loc) += sizeof(u_int64_t); \ +#define ADD_U_INT64(loc, val) \ + do { \ + be64enc((loc), (val)); \ + (loc) += sizeof(u_int64_t); \ } while(0) -#define ADD_MEM(loc, data, size) \ - do { \ - memcpy((loc), (data), (size)); \ - (loc) += size; \ +#define ADD_MEM(loc, data, size) \ + do { \ + memcpy((loc), (data), (size)); \ + (loc) += size; \ } while(0) -#define ADD_STRING(loc, data, size) ADD_MEM(loc, data, size) +#define ADD_STRING(loc, data, size) ADD_MEM(loc, data, size) #endif /* !_AUDIT_INTERNAL_H_ */ diff --git a/bsd/bsm/audit_kernel.h b/bsd/bsm/audit_kernel.h index 3abea7ddd..200a41a3a 100644 --- a/bsd/bsm/audit_kernel.h +++ b/bsd/bsm/audit_kernel.h @@ -1,21 +1,21 @@ /* * Copyright (c) 2004-2008, Apple Inc. All rights reserved. - * + * * @APPLE_BSD_LICENSE_HEADER_START@ - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. + * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * documentation and/or other materials provided with the distribution. * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * + * from this software without specific prior written permission. + * * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -26,9 +26,9 @@ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * + * * @APPLE_BSD_LICENSE_HEADER_END@ -*/ + */ /* * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce * support for mandatory and extensible security protections. This notice @@ -37,7 +37,7 @@ */ #ifndef _BSM_AUDIT_KERNEL_H -#define _BSM_AUDIT_KERNEL_H +#define _BSM_AUDIT_KERNEL_H #warning " is deprecated. Please use instead." diff --git a/bsd/bsm/audit_kevents.h b/bsd/bsm/audit_kevents.h index 31e6353d7..3f4ddea63 100644 --- a/bsd/bsm/audit_kevents.h +++ b/bsd/bsm/audit_kevents.h @@ -30,13 +30,13 @@ */ #ifndef _BSM_AUDIT_KEVENTS_H_ -#define _BSM_AUDIT_KEVENTS_H_ +#define _BSM_AUDIT_KEVENTS_H_ /* * The reserved event numbers for kernel events are 1...2047 and 43001..44900. */ -#define AUE_IS_A_KEVENT(e) (((e) > 0 && (e) < 2048) || \ - ((e) > 43000 && (e) < 44901)) +#define AUE_IS_A_KEVENT(e) (((e) > 0 && (e) < 2048) || \ + ((e) > 43000 && (e) < 44901)) /* * Values marked as AUE_NULL are not required to be audited as per CAPP. @@ -47,272 +47,272 @@ * been inserted for the Darwin variants. If necessary, other tags will be * added in the future. */ -#define AUE_NULL 0 -#define AUE_EXIT 1 -#define AUE_FORK 2 -#define AUE_FORKALL AUE_FORK /* Solaris-specific. */ -#define AUE_OPEN 3 -#define AUE_CREAT 4 -#define AUE_LINK 5 -#define AUE_UNLINK 6 -#define AUE_DELETE AUE_UNLINK /* Darwin-specific. */ -#define AUE_EXEC 7 -#define AUE_CHDIR 8 -#define AUE_MKNOD 9 -#define AUE_CHMOD 10 -#define AUE_CHOWN 11 -#define AUE_UMOUNT 12 -#define AUE_JUNK 13 /* Solaris-specific. */ -#define AUE_ACCESS 14 -#define AUE_KILL 15 -#define AUE_STAT 16 -#define AUE_LSTAT 17 -#define AUE_ACCT 18 -#define AUE_MCTL 19 /* Solaris-specific. */ -#define AUE_REBOOT 20 /* XXX: Darwin conflict. */ -#define AUE_SYMLINK 21 -#define AUE_READLINK 22 -#define AUE_EXECVE 23 -#define AUE_CHROOT 24 -#define AUE_VFORK 25 -#define AUE_SETGROUPS 26 -#define AUE_SETPGRP 27 -#define AUE_SWAPON 28 -#define AUE_SETHOSTNAME 29 /* XXX: Darwin conflict. */ -#define AUE_FCNTL 30 -#define AUE_SETPRIORITY 31 /* XXX: Darwin conflict. */ -#define AUE_CONNECT 32 -#define AUE_ACCEPT 33 -#define AUE_BIND 34 -#define AUE_SETSOCKOPT 35 -#define AUE_VTRACE 36 /* Solaris-specific. */ -#define AUE_SETTIMEOFDAY 37 /* XXX: Darwin conflict. */ -#define AUE_FCHOWN 38 -#define AUE_FCHMOD 39 -#define AUE_SETREUID 40 -#define AUE_SETREGID 41 -#define AUE_RENAME 42 -#define AUE_TRUNCATE 43 /* XXX: Darwin conflict. */ -#define AUE_FTRUNCATE 44 /* XXX: Darwin conflict. */ -#define AUE_FLOCK 45 /* XXX: Darwin conflict. */ -#define AUE_SHUTDOWN 46 -#define AUE_MKDIR 47 -#define AUE_RMDIR 48 -#define AUE_UTIMES 49 -#define AUE_ADJTIME 50 -#define AUE_SETRLIMIT 51 -#define AUE_KILLPG 52 -#define AUE_NFS_SVC 53 /* XXX: Darwin conflict. */ -#define AUE_STATFS 54 -#define AUE_FSTATFS 55 -#define AUE_UNMOUNT 56 /* XXX: Darwin conflict. */ -#define AUE_ASYNC_DAEMON 57 -#define AUE_NFS_GETFH 58 /* XXX: Darwin conflict. */ -#define AUE_SETDOMAINNAME 59 -#define AUE_QUOTACTL 60 /* XXX: Darwin conflict. */ -#define AUE_EXPORTFS 61 -#define AUE_MOUNT 62 -#define AUE_SEMSYS 63 -#define AUE_MSGSYS 64 -#define AUE_SHMSYS 65 -#define AUE_BSMSYS 66 /* Solaris-specific. */ -#define AUE_RFSSYS 67 /* Solaris-specific. */ -#define AUE_FCHDIR 68 -#define AUE_FCHROOT 69 -#define AUE_VPIXSYS 70 /* Solaris-specific. */ -#define AUE_PATHCONF 71 -#define AUE_OPEN_R 72 -#define AUE_OPEN_RC 73 -#define AUE_OPEN_RT 74 -#define AUE_OPEN_RTC 75 -#define AUE_OPEN_W 76 -#define AUE_OPEN_WC 77 -#define AUE_OPEN_WT 78 -#define AUE_OPEN_WTC 79 -#define AUE_OPEN_RW 80 -#define AUE_OPEN_RWC 81 -#define AUE_OPEN_RWT 82 -#define AUE_OPEN_RWTC 83 -#define AUE_MSGCTL 84 -#define AUE_MSGCTL_RMID 85 -#define AUE_MSGCTL_SET 86 -#define AUE_MSGCTL_STAT 87 -#define AUE_MSGGET 88 -#define AUE_MSGRCV 89 -#define AUE_MSGSND 90 -#define AUE_SHMCTL 91 -#define AUE_SHMCTL_RMID 92 -#define AUE_SHMCTL_SET 93 -#define AUE_SHMCTL_STAT 94 -#define AUE_SHMGET 95 -#define AUE_SHMAT 96 -#define AUE_SHMDT 97 -#define AUE_SEMCTL 98 -#define AUE_SEMCTL_RMID 99 -#define AUE_SEMCTL_SET 100 -#define AUE_SEMCTL_STAT 101 -#define AUE_SEMCTL_GETNCNT 102 -#define AUE_SEMCTL_GETPID 103 -#define AUE_SEMCTL_GETVAL 104 -#define AUE_SEMCTL_GETALL 105 -#define AUE_SEMCTL_GETZCNT 106 -#define AUE_SEMCTL_SETVAL 107 -#define AUE_SEMCTL_SETALL 108 -#define AUE_SEMGET 109 -#define AUE_SEMOP 110 -#define AUE_CORE 111 /* Solaris-specific, currently. */ -#define AUE_CLOSE 112 -#define AUE_SYSTEMBOOT 113 /* Solaris-specific. */ -#define AUE_ASYNC_DAEMON_EXIT 114 /* Solaris-specific. */ -#define AUE_NFSSVC_EXIT 115 /* Solaris-specific. */ -#define AUE_WRITEL 128 /* Solaris-specific. */ -#define AUE_WRITEVL 129 /* Solaris-specific. */ -#define AUE_GETAUID 130 -#define AUE_SETAUID 131 -#define AUE_GETAUDIT 132 -#define AUE_SETAUDIT 133 -#define AUE_GETUSERAUDIT 134 /* Solaris-specific. */ -#define AUE_SETUSERAUDIT 135 /* Solaris-specific. */ -#define AUE_AUDITSVC 136 /* Solaris-specific. */ -#define AUE_AUDITUSER 137 /* Solaris-specific. */ -#define AUE_AUDITON 138 -#define AUE_AUDITON_GTERMID 139 /* Solaris-specific. */ -#define AUE_AUDITON_STERMID 140 /* Solaris-specific. */ -#define AUE_AUDITON_GPOLICY 141 -#define AUE_AUDITON_SPOLICY 142 -#define AUE_AUDITON_GQCTRL 145 -#define AUE_AUDITON_SQCTRL 146 -#define AUE_GETKERNSTATE 147 /* Solaris-specific. */ -#define AUE_SETKERNSTATE 148 /* Solaris-specific. */ -#define AUE_GETPORTAUDIT 149 /* Solaris-specific. */ -#define AUE_AUDITSTAT 150 /* Solaris-specific. */ -#define AUE_REVOKE 151 -#define AUE_MAC 152 /* Solaris-specific. */ -#define AUE_ENTERPROM 153 /* Solaris-specific. */ -#define AUE_EXITPROM 154 /* Solaris-specific. */ -#define AUE_IFLOAT 155 /* Solaris-specific. */ -#define AUE_PFLOAT 156 /* Solaris-specific. */ -#define AUE_UPRIV 157 /* Solaris-specific. */ -#define AUE_IOCTL 158 -#define AUE_SOCKET 183 -#define AUE_SENDTO 184 -#define AUE_PIPE 185 -#define AUE_SOCKETPAIR 186 /* XXX: Darwin conflict. */ -#define AUE_SEND 187 -#define AUE_SENDMSG 188 -#define AUE_RECV 189 -#define AUE_RECVMSG 190 -#define AUE_RECVFROM 191 -#define AUE_READ 192 -#define AUE_GETDENTS 193 -#define AUE_LSEEK 194 -#define AUE_WRITE 195 -#define AUE_WRITEV 196 -#define AUE_NFS 197 /* Solaris-specific. */ -#define AUE_READV 198 -#define AUE_OSTAT 199 /* Solaris-specific. */ -#define AUE_SETUID 200 /* XXXRW: Solaris old setuid? */ -#define AUE_STIME 201 /* XXXRW: Solaris old stime? */ -#define AUE_UTIME 202 /* XXXRW: Solaris old utime? */ -#define AUE_NICE 203 /* XXXRW: Solaris old nice? */ -#define AUE_OSETPGRP 204 /* Solaris-specific. */ -#define AUE_SETGID 205 -#define AUE_READL 206 /* Solaris-specific. */ -#define AUE_READVL 207 /* Solaris-specific. */ -#define AUE_FSTAT 208 -#define AUE_DUP2 209 -#define AUE_MMAP 210 -#define AUE_AUDIT 211 -#define AUE_PRIOCNTLSYS 212 /* Solaris-specific. */ -#define AUE_MUNMAP 213 -#define AUE_SETEGID 214 -#define AUE_SETEUID 215 -#define AUE_PUTMSG 216 /* Solaris-specific. */ -#define AUE_GETMSG 217 /* Solaris-specific. */ -#define AUE_PUTPMSG 218 /* Solaris-specific. */ -#define AUE_GETPMSG 219 /* Solaris-specific. */ -#define AUE_AUDITSYS 220 /* Solaris-specific. */ -#define AUE_AUDITON_GETKMASK 221 -#define AUE_AUDITON_SETKMASK 222 -#define AUE_AUDITON_GETCWD 223 -#define AUE_AUDITON_GETCAR 224 -#define AUE_AUDITON_GETSTAT 225 -#define AUE_AUDITON_SETSTAT 226 -#define AUE_AUDITON_SETUMASK 227 -#define AUE_AUDITON_SETSMASK 228 -#define AUE_AUDITON_GETCOND 229 -#define AUE_AUDITON_SETCOND 230 -#define AUE_AUDITON_GETCLASS 231 -#define AUE_AUDITON_SETCLASS 232 -#define AUE_FUSERS 233 /* Solaris-specific; also UTSSYS? */ -#define AUE_STATVFS 234 -#define AUE_XSTAT 235 /* Solaris-specific. */ -#define AUE_LXSTAT 236 /* Solaris-specific. */ -#define AUE_LCHOWN 237 -#define AUE_MEMCNTL 238 /* Solaris-specific. */ -#define AUE_SYSINFO 239 /* Solaris-specific. */ -#define AUE_XMKNOD 240 /* Solaris-specific. */ -#define AUE_FORK1 241 -#define AUE_MODCTL 242 /* Solaris-specific. */ -#define AUE_MODLOAD 243 -#define AUE_MODUNLOAD 244 -#define AUE_MODCONFIG 245 /* Solaris-specific. */ -#define AUE_MODADDMAJ 246 /* Solaris-specific. */ -#define AUE_SOCKACCEPT 247 /* Solaris-specific. */ -#define AUE_SOCKCONNECT 248 /* Solaris-specific. */ -#define AUE_SOCKSEND 249 /* Solaris-specific. */ -#define AUE_SOCKRECEIVE 250 /* Solaris-specific. */ -#define AUE_ACLSET 251 -#define AUE_FACLSET 252 -#define AUE_DOORFS 253 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_CALL 254 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_RETURN 255 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_CREATE 256 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_REVOKE 257 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_INFO 258 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_CRED 259 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_BIND 260 /* Solaris-specific. */ -#define AUE_DOORFS_DOOR_UNBIND 261 /* Solaris-specific. */ -#define AUE_P_ONLINE 262 /* Solaris-specific. */ -#define AUE_PROCESSOR_BIND 263 /* Solaris-specific. */ -#define AUE_INST_SYNC 264 /* Solaris-specific. */ -#define AUE_SOCKCONFIG 265 /* Solaris-specific. */ -#define AUE_SETAUDIT_ADDR 266 -#define AUE_GETAUDIT_ADDR 267 -#define AUE_UMOUNT2 268 /* Solaris-specific. */ -#define AUE_FSAT 269 /* Solaris-specific. */ -#define AUE_OPENAT_R 270 -#define AUE_OPENAT_RC 271 -#define AUE_OPENAT_RT 272 -#define AUE_OPENAT_RTC 273 -#define AUE_OPENAT_W 274 -#define AUE_OPENAT_WC 275 -#define AUE_OPENAT_WT 276 -#define AUE_OPENAT_WTC 277 -#define AUE_OPENAT_RW 278 -#define AUE_OPENAT_RWC 279 -#define AUE_OPENAT_RWT 280 -#define AUE_OPENAT_RWTC 281 -#define AUE_RENAMEAT 282 -#define AUE_FSTATAT 283 -#define AUE_FCHOWNAT 284 -#define AUE_FUTIMESAT 285 -#define AUE_UNLINKAT 286 -#define AUE_CLOCK_SETTIME 287 -#define AUE_NTP_ADJTIME 288 -#define AUE_SETPPRIV 289 /* Solaris-specific. */ -#define AUE_MODDEVPLCY 290 /* Solaris-specific. */ -#define AUE_MODADDPRIV 291 /* Solaris-specific. */ -#define AUE_CRYPTOADM 292 /* Solaris-specific. */ -#define AUE_CONFIGKSSL 293 /* Solaris-specific. */ -#define AUE_BRANDSYS 294 /* Solaris-specific. */ -#define AUE_PF_POLICY_ADDRULE 295 /* Solaris-specific. */ -#define AUE_PF_POLICY_DELRULE 296 /* Solaris-specific. */ -#define AUE_PF_POLICY_CLONE 297 /* Solaris-specific. */ -#define AUE_PF_POLICY_FLIP 298 /* Solaris-specific. */ -#define AUE_PF_POLICY_FLUSH 299 /* Solaris-specific. */ -#define AUE_PF_POLICY_ALGS 300 /* Solaris-specific. */ -#define AUE_PORTFS 301 /* Solaris-specific. */ +#define AUE_NULL 0 +#define AUE_EXIT 1 +#define AUE_FORK 2 +#define AUE_FORKALL AUE_FORK /* Solaris-specific. */ +#define AUE_OPEN 3 +#define AUE_CREAT 4 +#define AUE_LINK 5 +#define AUE_UNLINK 6 +#define AUE_DELETE AUE_UNLINK /* Darwin-specific. */ +#define AUE_EXEC 7 +#define AUE_CHDIR 8 +#define AUE_MKNOD 9 +#define AUE_CHMOD 10 +#define AUE_CHOWN 11 +#define AUE_UMOUNT 12 +#define AUE_JUNK 13 /* Solaris-specific. */ +#define AUE_ACCESS 14 +#define AUE_KILL 15 +#define AUE_STAT 16 +#define AUE_LSTAT 17 +#define AUE_ACCT 18 +#define AUE_MCTL 19 /* Solaris-specific. */ +#define AUE_REBOOT 20 /* XXX: Darwin conflict. */ +#define AUE_SYMLINK 21 +#define AUE_READLINK 22 +#define AUE_EXECVE 23 +#define AUE_CHROOT 24 +#define AUE_VFORK 25 +#define AUE_SETGROUPS 26 +#define AUE_SETPGRP 27 +#define AUE_SWAPON 28 +#define AUE_SETHOSTNAME 29 /* XXX: Darwin conflict. */ +#define AUE_FCNTL 30 +#define AUE_SETPRIORITY 31 /* XXX: Darwin conflict. */ +#define AUE_CONNECT 32 +#define AUE_ACCEPT 33 +#define AUE_BIND 34 +#define AUE_SETSOCKOPT 35 +#define AUE_VTRACE 36 /* Solaris-specific. */ +#define AUE_SETTIMEOFDAY 37 /* XXX: Darwin conflict. */ +#define AUE_FCHOWN 38 +#define AUE_FCHMOD 39 +#define AUE_SETREUID 40 +#define AUE_SETREGID 41 +#define AUE_RENAME 42 +#define AUE_TRUNCATE 43 /* XXX: Darwin conflict. */ +#define AUE_FTRUNCATE 44 /* XXX: Darwin conflict. */ +#define AUE_FLOCK 45 /* XXX: Darwin conflict. */ +#define AUE_SHUTDOWN 46 +#define AUE_MKDIR 47 +#define AUE_RMDIR 48 +#define AUE_UTIMES 49 +#define AUE_ADJTIME 50 +#define AUE_SETRLIMIT 51 +#define AUE_KILLPG 52 +#define AUE_NFS_SVC 53 /* XXX: Darwin conflict. */ +#define AUE_STATFS 54 +#define AUE_FSTATFS 55 +#define AUE_UNMOUNT 56 /* XXX: Darwin conflict. */ +#define AUE_ASYNC_DAEMON 57 +#define AUE_NFS_GETFH 58 /* XXX: Darwin conflict. */ +#define AUE_SETDOMAINNAME 59 +#define AUE_QUOTACTL 60 /* XXX: Darwin conflict. */ +#define AUE_EXPORTFS 61 +#define AUE_MOUNT 62 +#define AUE_SEMSYS 63 +#define AUE_MSGSYS 64 +#define AUE_SHMSYS 65 +#define AUE_BSMSYS 66 /* Solaris-specific. */ +#define AUE_RFSSYS 67 /* Solaris-specific. */ +#define AUE_FCHDIR 68 +#define AUE_FCHROOT 69 +#define AUE_VPIXSYS 70 /* Solaris-specific. */ +#define AUE_PATHCONF 71 +#define AUE_OPEN_R 72 +#define AUE_OPEN_RC 73 +#define AUE_OPEN_RT 74 +#define AUE_OPEN_RTC 75 +#define AUE_OPEN_W 76 +#define AUE_OPEN_WC 77 +#define AUE_OPEN_WT 78 +#define AUE_OPEN_WTC 79 +#define AUE_OPEN_RW 80 +#define AUE_OPEN_RWC 81 +#define AUE_OPEN_RWT 82 +#define AUE_OPEN_RWTC 83 +#define AUE_MSGCTL 84 +#define AUE_MSGCTL_RMID 85 +#define AUE_MSGCTL_SET 86 +#define AUE_MSGCTL_STAT 87 +#define AUE_MSGGET 88 +#define AUE_MSGRCV 89 +#define AUE_MSGSND 90 +#define AUE_SHMCTL 91 +#define AUE_SHMCTL_RMID 92 +#define AUE_SHMCTL_SET 93 +#define AUE_SHMCTL_STAT 94 +#define AUE_SHMGET 95 +#define AUE_SHMAT 96 +#define AUE_SHMDT 97 +#define AUE_SEMCTL 98 +#define AUE_SEMCTL_RMID 99 +#define AUE_SEMCTL_SET 100 +#define AUE_SEMCTL_STAT 101 +#define AUE_SEMCTL_GETNCNT 102 +#define AUE_SEMCTL_GETPID 103 +#define AUE_SEMCTL_GETVAL 104 +#define AUE_SEMCTL_GETALL 105 +#define AUE_SEMCTL_GETZCNT 106 +#define AUE_SEMCTL_SETVAL 107 +#define AUE_SEMCTL_SETALL 108 +#define AUE_SEMGET 109 +#define AUE_SEMOP 110 +#define AUE_CORE 111 /* Solaris-specific, currently. */ +#define AUE_CLOSE 112 +#define AUE_SYSTEMBOOT 113 /* Solaris-specific. */ +#define AUE_ASYNC_DAEMON_EXIT 114 /* Solaris-specific. */ +#define AUE_NFSSVC_EXIT 115 /* Solaris-specific. */ +#define AUE_WRITEL 128 /* Solaris-specific. */ +#define AUE_WRITEVL 129 /* Solaris-specific. */ +#define AUE_GETAUID 130 +#define AUE_SETAUID 131 +#define AUE_GETAUDIT 132 +#define AUE_SETAUDIT 133 +#define AUE_GETUSERAUDIT 134 /* Solaris-specific. */ +#define AUE_SETUSERAUDIT 135 /* Solaris-specific. */ +#define AUE_AUDITSVC 136 /* Solaris-specific. */ +#define AUE_AUDITUSER 137 /* Solaris-specific. */ +#define AUE_AUDITON 138 +#define AUE_AUDITON_GTERMID 139 /* Solaris-specific. */ +#define AUE_AUDITON_STERMID 140 /* Solaris-specific. */ +#define AUE_AUDITON_GPOLICY 141 +#define AUE_AUDITON_SPOLICY 142 +#define AUE_AUDITON_GQCTRL 145 +#define AUE_AUDITON_SQCTRL 146 +#define AUE_GETKERNSTATE 147 /* Solaris-specific. */ +#define AUE_SETKERNSTATE 148 /* Solaris-specific. */ +#define AUE_GETPORTAUDIT 149 /* Solaris-specific. */ +#define AUE_AUDITSTAT 150 /* Solaris-specific. */ +#define AUE_REVOKE 151 +#define AUE_MAC 152 /* Solaris-specific. */ +#define AUE_ENTERPROM 153 /* Solaris-specific. */ +#define AUE_EXITPROM 154 /* Solaris-specific. */ +#define AUE_IFLOAT 155 /* Solaris-specific. */ +#define AUE_PFLOAT 156 /* Solaris-specific. */ +#define AUE_UPRIV 157 /* Solaris-specific. */ +#define AUE_IOCTL 158 +#define AUE_SOCKET 183 +#define AUE_SENDTO 184 +#define AUE_PIPE 185 +#define AUE_SOCKETPAIR 186 /* XXX: Darwin conflict. */ +#define AUE_SEND 187 +#define AUE_SENDMSG 188 +#define AUE_RECV 189 +#define AUE_RECVMSG 190 +#define AUE_RECVFROM 191 +#define AUE_READ 192 +#define AUE_GETDENTS 193 +#define AUE_LSEEK 194 +#define AUE_WRITE 195 +#define AUE_WRITEV 196 +#define AUE_NFS 197 /* Solaris-specific. */ +#define AUE_READV 198 +#define AUE_OSTAT 199 /* Solaris-specific. */ +#define AUE_SETUID 200 /* XXXRW: Solaris old setuid? */ +#define AUE_STIME 201 /* XXXRW: Solaris old stime? */ +#define AUE_UTIME 202 /* XXXRW: Solaris old utime? */ +#define AUE_NICE 203 /* XXXRW: Solaris old nice? */ +#define AUE_OSETPGRP 204 /* Solaris-specific. */ +#define AUE_SETGID 205 +#define AUE_READL 206 /* Solaris-specific. */ +#define AUE_READVL 207 /* Solaris-specific. */ +#define AUE_FSTAT 208 +#define AUE_DUP2 209 +#define AUE_MMAP 210 +#define AUE_AUDIT 211 +#define AUE_PRIOCNTLSYS 212 /* Solaris-specific. */ +#define AUE_MUNMAP 213 +#define AUE_SETEGID 214 +#define AUE_SETEUID 215 +#define AUE_PUTMSG 216 /* Solaris-specific. */ +#define AUE_GETMSG 217 /* Solaris-specific. */ +#define AUE_PUTPMSG 218 /* Solaris-specific. */ +#define AUE_GETPMSG 219 /* Solaris-specific. */ +#define AUE_AUDITSYS 220 /* Solaris-specific. */ +#define AUE_AUDITON_GETKMASK 221 +#define AUE_AUDITON_SETKMASK 222 +#define AUE_AUDITON_GETCWD 223 +#define AUE_AUDITON_GETCAR 224 +#define AUE_AUDITON_GETSTAT 225 +#define AUE_AUDITON_SETSTAT 226 +#define AUE_AUDITON_SETUMASK 227 +#define AUE_AUDITON_SETSMASK 228 +#define AUE_AUDITON_GETCOND 229 +#define AUE_AUDITON_SETCOND 230 +#define AUE_AUDITON_GETCLASS 231 +#define AUE_AUDITON_SETCLASS 232 +#define AUE_FUSERS 233 /* Solaris-specific; also UTSSYS? */ +#define AUE_STATVFS 234 +#define AUE_XSTAT 235 /* Solaris-specific. */ +#define AUE_LXSTAT 236 /* Solaris-specific. */ +#define AUE_LCHOWN 237 +#define AUE_MEMCNTL 238 /* Solaris-specific. */ +#define AUE_SYSINFO 239 /* Solaris-specific. */ +#define AUE_XMKNOD 240 /* Solaris-specific. */ +#define AUE_FORK1 241 +#define AUE_MODCTL 242 /* Solaris-specific. */ +#define AUE_MODLOAD 243 +#define AUE_MODUNLOAD 244 +#define AUE_MODCONFIG 245 /* Solaris-specific. */ +#define AUE_MODADDMAJ 246 /* Solaris-specific. */ +#define AUE_SOCKACCEPT 247 /* Solaris-specific. */ +#define AUE_SOCKCONNECT 248 /* Solaris-specific. */ +#define AUE_SOCKSEND 249 /* Solaris-specific. */ +#define AUE_SOCKRECEIVE 250 /* Solaris-specific. */ +#define AUE_ACLSET 251 +#define AUE_FACLSET 252 +#define AUE_DOORFS 253 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_CALL 254 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_RETURN 255 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_CREATE 256 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_REVOKE 257 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_INFO 258 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_CRED 259 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_BIND 260 /* Solaris-specific. */ +#define AUE_DOORFS_DOOR_UNBIND 261 /* Solaris-specific. */ +#define AUE_P_ONLINE 262 /* Solaris-specific. */ +#define AUE_PROCESSOR_BIND 263 /* Solaris-specific. */ +#define AUE_INST_SYNC 264 /* Solaris-specific. */ +#define AUE_SOCKCONFIG 265 /* Solaris-specific. */ +#define AUE_SETAUDIT_ADDR 266 +#define AUE_GETAUDIT_ADDR 267 +#define AUE_UMOUNT2 268 /* Solaris-specific. */ +#define AUE_FSAT 269 /* Solaris-specific. */ +#define AUE_OPENAT_R 270 +#define AUE_OPENAT_RC 271 +#define AUE_OPENAT_RT 272 +#define AUE_OPENAT_RTC 273 +#define AUE_OPENAT_W 274 +#define AUE_OPENAT_WC 275 +#define AUE_OPENAT_WT 276 +#define AUE_OPENAT_WTC 277 +#define AUE_OPENAT_RW 278 +#define AUE_OPENAT_RWC 279 +#define AUE_OPENAT_RWT 280 +#define AUE_OPENAT_RWTC 281 +#define AUE_RENAMEAT 282 +#define AUE_FSTATAT 283 +#define AUE_FCHOWNAT 284 +#define AUE_FUTIMESAT 285 +#define AUE_UNLINKAT 286 +#define AUE_CLOCK_SETTIME 287 +#define AUE_NTP_ADJTIME 288 +#define AUE_SETPPRIV 289 /* Solaris-specific. */ +#define AUE_MODDEVPLCY 290 /* Solaris-specific. */ +#define AUE_MODADDPRIV 291 /* Solaris-specific. */ +#define AUE_CRYPTOADM 292 /* Solaris-specific. */ +#define AUE_CONFIGKSSL 293 /* Solaris-specific. */ +#define AUE_BRANDSYS 294 /* Solaris-specific. */ +#define AUE_PF_POLICY_ADDRULE 295 /* Solaris-specific. */ +#define AUE_PF_POLICY_DELRULE 296 /* Solaris-specific. */ +#define AUE_PF_POLICY_CLONE 297 /* Solaris-specific. */ +#define AUE_PF_POLICY_FLIP 298 /* Solaris-specific. */ +#define AUE_PF_POLICY_FLUSH 299 /* Solaris-specific. */ +#define AUE_PF_POLICY_ALGS 300 /* Solaris-specific. */ +#define AUE_PORTFS 301 /* Solaris-specific. */ /* * Events added for Apple Darwin that potentially collide with future Solaris @@ -320,68 +320,68 @@ * new trails. Systems generating these events should switch to the new * identifiers that avoid colliding with the Solaris identifier space. */ -#define AUE_DARWIN_GETFSSTAT 301 -#define AUE_DARWIN_PTRACE 302 -#define AUE_DARWIN_CHFLAGS 303 -#define AUE_DARWIN_FCHFLAGS 304 -#define AUE_DARWIN_PROFILE 305 -#define AUE_DARWIN_KTRACE 306 -#define AUE_DARWIN_SETLOGIN 307 -#define AUE_DARWIN_REBOOT 308 -#define AUE_DARWIN_REVOKE 309 -#define AUE_DARWIN_UMASK 310 -#define AUE_DARWIN_MPROTECT 311 -#define AUE_DARWIN_SETPRIORITY 312 -#define AUE_DARWIN_SETTIMEOFDAY 313 -#define AUE_DARWIN_FLOCK 314 -#define AUE_DARWIN_MKFIFO 315 -#define AUE_DARWIN_POLL 316 -#define AUE_DARWIN_SOCKETPAIR 317 -#define AUE_DARWIN_FUTIMES 318 -#define AUE_DARWIN_SETSID 319 -#define AUE_DARWIN_SETPRIVEXEC 320 /* Darwin-specific. */ -#define AUE_DARWIN_NFSSVC 321 -#define AUE_DARWIN_GETFH 322 -#define AUE_DARWIN_QUOTACTL 323 -#define AUE_DARWIN_ADDPROFILE 324 /* Darwin-specific. */ -#define AUE_DARWIN_KDEBUGTRACE 325 /* Darwin-specific. */ -#define AUE_DARWIN_KDBUGTRACE AUE_KDEBUGTRACE -#define AUE_DARWIN_FSTAT 326 -#define AUE_DARWIN_FPATHCONF 327 -#define AUE_DARWIN_GETDIRENTRIES 328 -#define AUE_DARWIN_TRUNCATE 329 -#define AUE_DARWIN_FTRUNCATE 330 -#define AUE_DARWIN_SYSCTL 331 -#define AUE_DARWIN_MLOCK 332 -#define AUE_DARWIN_MUNLOCK 333 -#define AUE_DARWIN_UNDELETE 334 -#define AUE_DARWIN_GETATTRLIST 335 /* Darwin-specific. */ -#define AUE_DARWIN_SETATTRLIST 336 /* Darwin-specific. */ -#define AUE_DARWIN_GETDIRENTRIESATTR 337 /* Darwin-specific. */ -#define AUE_DARWIN_EXCHANGEDATA 338 /* Darwin-specific. */ -#define AUE_DARWIN_SEARCHFS 339 /* Darwin-specific. */ -#define AUE_DARWIN_MINHERIT 340 -#define AUE_DARWIN_SEMCONFIG 341 -#define AUE_DARWIN_SEMOPEN 342 -#define AUE_DARWIN_SEMCLOSE 343 -#define AUE_DARWIN_SEMUNLINK 344 -#define AUE_DARWIN_SHMOPEN 345 -#define AUE_DARWIN_SHMUNLINK 346 -#define AUE_DARWIN_LOADSHFILE 347 /* Darwin-specific. */ -#define AUE_DARWIN_RESETSHFILE 348 /* Darwin-specific. */ -#define AUE_DARWIN_NEWSYSTEMSHREG 349 /* Darwin-specific. */ -#define AUE_DARWIN_PTHREADKILL 350 /* Darwin-specific. */ -#define AUE_DARWIN_PTHREADSIGMASK 351 /* Darwin-specific. */ -#define AUE_DARWIN_AUDITCTL 352 -#define AUE_DARWIN_RFORK 353 -#define AUE_DARWIN_LCHMOD 354 -#define AUE_DARWIN_SWAPOFF 355 -#define AUE_DARWIN_INITPROCESS 356 /* Darwin-specific. */ -#define AUE_DARWIN_MAPFD 357 /* Darwin-specific. */ -#define AUE_DARWIN_TASKFORPID 358 /* Darwin-specific. */ -#define AUE_DARWIN_PIDFORTASK 359 /* Darwin-specific. */ -#define AUE_DARWIN_SYSCTL_NONADMIN 360 -#define AUE_DARWIN_COPYFILE 361 /* Darwin-specific. */ +#define AUE_DARWIN_GETFSSTAT 301 +#define AUE_DARWIN_PTRACE 302 +#define AUE_DARWIN_CHFLAGS 303 +#define AUE_DARWIN_FCHFLAGS 304 +#define AUE_DARWIN_PROFILE 305 +#define AUE_DARWIN_KTRACE 306 +#define AUE_DARWIN_SETLOGIN 307 +#define AUE_DARWIN_REBOOT 308 +#define AUE_DARWIN_REVOKE 309 +#define AUE_DARWIN_UMASK 310 +#define AUE_DARWIN_MPROTECT 311 +#define AUE_DARWIN_SETPRIORITY 312 +#define AUE_DARWIN_SETTIMEOFDAY 313 +#define AUE_DARWIN_FLOCK 314 +#define AUE_DARWIN_MKFIFO 315 +#define AUE_DARWIN_POLL 316 +#define AUE_DARWIN_SOCKETPAIR 317 +#define AUE_DARWIN_FUTIMES 318 +#define AUE_DARWIN_SETSID 319 +#define AUE_DARWIN_SETPRIVEXEC 320 /* Darwin-specific. */ +#define AUE_DARWIN_NFSSVC 321 +#define AUE_DARWIN_GETFH 322 +#define AUE_DARWIN_QUOTACTL 323 +#define AUE_DARWIN_ADDPROFILE 324 /* Darwin-specific. */ +#define AUE_DARWIN_KDEBUGTRACE 325 /* Darwin-specific. */ +#define AUE_DARWIN_KDBUGTRACE AUE_KDEBUGTRACE +#define AUE_DARWIN_FSTAT 326 +#define AUE_DARWIN_FPATHCONF 327 +#define AUE_DARWIN_GETDIRENTRIES 328 +#define AUE_DARWIN_TRUNCATE 329 +#define AUE_DARWIN_FTRUNCATE 330 +#define AUE_DARWIN_SYSCTL 331 +#define AUE_DARWIN_MLOCK 332 +#define AUE_DARWIN_MUNLOCK 333 +#define AUE_DARWIN_UNDELETE 334 +#define AUE_DARWIN_GETATTRLIST 335 /* Darwin-specific. */ +#define AUE_DARWIN_SETATTRLIST 336 /* Darwin-specific. */ +#define AUE_DARWIN_GETDIRENTRIESATTR 337 /* Darwin-specific. */ +#define AUE_DARWIN_EXCHANGEDATA 338 /* Darwin-specific. */ +#define AUE_DARWIN_SEARCHFS 339 /* Darwin-specific. */ +#define AUE_DARWIN_MINHERIT 340 +#define AUE_DARWIN_SEMCONFIG 341 +#define AUE_DARWIN_SEMOPEN 342 +#define AUE_DARWIN_SEMCLOSE 343 +#define AUE_DARWIN_SEMUNLINK 344 +#define AUE_DARWIN_SHMOPEN 345 +#define AUE_DARWIN_SHMUNLINK 346 +#define AUE_DARWIN_LOADSHFILE 347 /* Darwin-specific. */ +#define AUE_DARWIN_RESETSHFILE 348 /* Darwin-specific. */ +#define AUE_DARWIN_NEWSYSTEMSHREG 349 /* Darwin-specific. */ +#define AUE_DARWIN_PTHREADKILL 350 /* Darwin-specific. */ +#define AUE_DARWIN_PTHREADSIGMASK 351 /* Darwin-specific. */ +#define AUE_DARWIN_AUDITCTL 352 +#define AUE_DARWIN_RFORK 353 +#define AUE_DARWIN_LCHMOD 354 +#define AUE_DARWIN_SWAPOFF 355 +#define AUE_DARWIN_INITPROCESS 356 /* Darwin-specific. */ +#define AUE_DARWIN_MAPFD 357 /* Darwin-specific. */ +#define AUE_DARWIN_TASKFORPID 358 /* Darwin-specific. */ +#define AUE_DARWIN_PIDFORTASK 359 /* Darwin-specific. */ +#define AUE_DARWIN_SYSCTL_NONADMIN 360 +#define AUE_DARWIN_COPYFILE 361 /* Darwin-specific. */ /* * Audit event identifiers added as part of OpenBSM, generally corresponding @@ -395,230 +395,230 @@ * identifier so that old trails can still be processed, but new trails use * the Solaris identifier. */ -#define AUE_GETFSSTAT 43001 -#define AUE_PTRACE 43002 -#define AUE_CHFLAGS 43003 -#define AUE_FCHFLAGS 43004 -#define AUE_PROFILE 43005 -#define AUE_KTRACE 43006 -#define AUE_SETLOGIN 43007 -#define AUE_OPENBSM_REVOKE 43008 /* Solaris event now preferred. */ -#define AUE_UMASK 43009 -#define AUE_MPROTECT 43010 -#define AUE_MKFIFO 43011 -#define AUE_POLL 43012 -#define AUE_FUTIMES 43013 -#define AUE_SETSID 43014 -#define AUE_SETPRIVEXEC 43015 /* Darwin-specific. */ -#define AUE_ADDPROFILE 43016 /* Darwin-specific. */ -#define AUE_KDEBUGTRACE 43017 /* Darwin-specific. */ -#define AUE_KDBUGTRACE AUE_KDEBUGTRACE -#define AUE_OPENBSM_FSTAT 43018 /* Solaris event now preferred. */ -#define AUE_FPATHCONF 43019 -#define AUE_GETDIRENTRIES 43020 -#define AUE_SYSCTL 43021 -#define AUE_MLOCK 43022 -#define AUE_MUNLOCK 43023 -#define AUE_UNDELETE 43024 -#define AUE_GETATTRLIST 43025 /* Darwin-specific. */ -#define AUE_SETATTRLIST 43026 /* Darwin-specific. */ -#define AUE_GETDIRENTRIESATTR 43027 /* Darwin-specific. */ -#define AUE_EXCHANGEDATA 43028 /* Darwin-specific. */ -#define AUE_SEARCHFS 43029 /* Darwin-specific. */ -#define AUE_MINHERIT 43030 -#define AUE_SEMCONFIG 43031 -#define AUE_SEMOPEN 43032 -#define AUE_SEMCLOSE 43033 -#define AUE_SEMUNLINK 43034 -#define AUE_SHMOPEN 43035 -#define AUE_SHMUNLINK 43036 -#define AUE_LOADSHFILE 43037 /* Darwin-specific. */ -#define AUE_RESETSHFILE 43038 /* Darwin-specific. */ -#define AUE_NEWSYSTEMSHREG 43039 /* Darwin-specific. */ -#define AUE_PTHREADKILL 43040 /* Darwin-specific. */ -#define AUE_PTHREADSIGMASK 43041 /* Darwin-specific. */ -#define AUE_AUDITCTL 43042 -#define AUE_RFORK 43043 -#define AUE_LCHMOD 43044 -#define AUE_SWAPOFF 43045 -#define AUE_INITPROCESS 43046 /* Darwin-specific. */ -#define AUE_MAPFD 43047 /* Darwin-specific. */ -#define AUE_TASKFORPID 43048 /* Darwin-specific. */ -#define AUE_PIDFORTASK 43049 /* Darwin-specific. */ -#define AUE_SYSCTL_NONADMIN 43050 -#define AUE_COPYFILE 43051 /* Darwin-specific. */ +#define AUE_GETFSSTAT 43001 +#define AUE_PTRACE 43002 +#define AUE_CHFLAGS 43003 +#define AUE_FCHFLAGS 43004 +#define AUE_PROFILE 43005 +#define AUE_KTRACE 43006 +#define AUE_SETLOGIN 43007 +#define AUE_OPENBSM_REVOKE 43008 /* Solaris event now preferred. */ +#define AUE_UMASK 43009 +#define AUE_MPROTECT 43010 +#define AUE_MKFIFO 43011 +#define AUE_POLL 43012 +#define AUE_FUTIMES 43013 +#define AUE_SETSID 43014 +#define AUE_SETPRIVEXEC 43015 /* Darwin-specific. */ +#define AUE_ADDPROFILE 43016 /* Darwin-specific. */ +#define AUE_KDEBUGTRACE 43017 /* Darwin-specific. */ +#define AUE_KDBUGTRACE AUE_KDEBUGTRACE +#define AUE_OPENBSM_FSTAT 43018 /* Solaris event now preferred. */ +#define AUE_FPATHCONF 43019 +#define AUE_GETDIRENTRIES 43020 +#define AUE_SYSCTL 43021 +#define AUE_MLOCK 43022 +#define AUE_MUNLOCK 43023 +#define AUE_UNDELETE 43024 +#define AUE_GETATTRLIST 43025 /* Darwin-specific. */ +#define AUE_SETATTRLIST 43026 /* Darwin-specific. */ +#define AUE_GETDIRENTRIESATTR 43027 /* Darwin-specific. */ +#define AUE_EXCHANGEDATA 43028 /* Darwin-specific. */ +#define AUE_SEARCHFS 43029 /* Darwin-specific. */ +#define AUE_MINHERIT 43030 +#define AUE_SEMCONFIG 43031 +#define AUE_SEMOPEN 43032 +#define AUE_SEMCLOSE 43033 +#define AUE_SEMUNLINK 43034 +#define AUE_SHMOPEN 43035 +#define AUE_SHMUNLINK 43036 +#define AUE_LOADSHFILE 43037 /* Darwin-specific. */ +#define AUE_RESETSHFILE 43038 /* Darwin-specific. */ +#define AUE_NEWSYSTEMSHREG 43039 /* Darwin-specific. */ +#define AUE_PTHREADKILL 43040 /* Darwin-specific. */ +#define AUE_PTHREADSIGMASK 43041 /* Darwin-specific. */ +#define AUE_AUDITCTL 43042 +#define AUE_RFORK 43043 +#define AUE_LCHMOD 43044 +#define AUE_SWAPOFF 43045 +#define AUE_INITPROCESS 43046 /* Darwin-specific. */ +#define AUE_MAPFD 43047 /* Darwin-specific. */ +#define AUE_TASKFORPID 43048 /* Darwin-specific. */ +#define AUE_PIDFORTASK 43049 /* Darwin-specific. */ +#define AUE_SYSCTL_NONADMIN 43050 +#define AUE_COPYFILE 43051 /* Darwin-specific. */ /* * Events added to OpenBSM for FreeBSD and Linux; may also be used by Darwin * in the future. */ -#define AUE_LUTIMES 43052 -#define AUE_LCHFLAGS 43053 /* FreeBSD-specific. */ -#define AUE_SENDFILE 43054 /* BSD/Linux-specific. */ -#define AUE_USELIB 43055 /* Linux-specific. */ -#define AUE_GETRESUID 43056 -#define AUE_SETRESUID 43057 -#define AUE_GETRESGID 43058 -#define AUE_SETRESGID 43059 -#define AUE_WAIT4 43060 /* FreeBSD-specific. */ -#define AUE_LGETFH 43061 /* FreeBSD-specific. */ -#define AUE_FHSTATFS 43062 /* FreeBSD-specific. */ -#define AUE_FHOPEN 43063 /* FreeBSD-specific. */ -#define AUE_FHSTAT 43064 /* FreeBSD-specific. */ -#define AUE_JAIL 43065 /* FreeBSD-specific. */ -#define AUE_EACCESS 43066 /* FreeBSD-specific. */ -#define AUE_KQUEUE 43067 /* FreeBSD-specific. */ -#define AUE_KEVENT 43068 /* FreeBSD-specific. */ -#define AUE_FSYNC 43069 -#define AUE_NMOUNT 43070 /* FreeBSD-specific. */ -#define AUE_BDFLUSH 43071 /* Linux-specific. */ -#define AUE_SETFSUID 43072 /* Linux-specific. */ -#define AUE_SETFSGID 43073 /* Linux-specific. */ -#define AUE_PERSONALITY 43074 /* Linux-specific. */ -#define AUE_SCHED_GETSCHEDULER 43075 /* POSIX.1b. */ -#define AUE_SCHED_SETSCHEDULER 43076 /* POSIX.1b. */ -#define AUE_PRCTL 43077 /* Linux-specific. */ -#define AUE_GETCWD 43078 /* FreeBSD/Linux-specific. */ -#define AUE_CAPGET 43079 /* Linux-specific. */ -#define AUE_CAPSET 43080 /* Linux-specific. */ -#define AUE_PIVOT_ROOT 43081 /* Linux-specific. */ -#define AUE_RTPRIO 43082 /* FreeBSD-specific. */ -#define AUE_SCHED_GETPARAM 43083 /* POSIX.1b. */ -#define AUE_SCHED_SETPARAM 43084 /* POSIX.1b. */ -#define AUE_SCHED_GET_PRIORITY_MAX 43085 /* POSIX.1b. */ -#define AUE_SCHED_GET_PRIORITY_MIN 43086 /* POSIX.1b. */ -#define AUE_SCHED_RR_GET_INTERVAL 43087 /* POSIX.1b. */ -#define AUE_ACL_GET_FILE 43088 /* FreeBSD. */ -#define AUE_ACL_SET_FILE 43089 /* FreeBSD. */ -#define AUE_ACL_GET_FD 43090 /* FreeBSD. */ -#define AUE_ACL_SET_FD 43091 /* FreeBSD. */ -#define AUE_ACL_DELETE_FILE 43092 /* FreeBSD. */ -#define AUE_ACL_DELETE_FD 43093 /* FreeBSD. */ -#define AUE_ACL_CHECK_FILE 43094 /* FreeBSD. */ -#define AUE_ACL_CHECK_FD 43095 /* FreeBSD. */ -#define AUE_ACL_GET_LINK 43096 /* FreeBSD. */ -#define AUE_ACL_SET_LINK 43097 /* FreeBSD. */ -#define AUE_ACL_DELETE_LINK 43098 /* FreeBSD. */ -#define AUE_ACL_CHECK_LINK 43099 /* FreeBSD. */ -#define AUE_SYSARCH 43100 /* FreeBSD. */ -#define AUE_EXTATTRCTL 43101 /* FreeBSD. */ -#define AUE_EXTATTR_GET_FILE 43102 /* FreeBSD. */ -#define AUE_EXTATTR_SET_FILE 43103 /* FreeBSD. */ -#define AUE_EXTATTR_LIST_FILE 43104 /* FreeBSD. */ -#define AUE_EXTATTR_DELETE_FILE 43105 /* FreeBSD. */ -#define AUE_EXTATTR_GET_FD 43106 /* FreeBSD. */ -#define AUE_EXTATTR_SET_FD 43107 /* FreeBSD. */ -#define AUE_EXTATTR_LIST_FD 43108 /* FreeBSD. */ -#define AUE_EXTATTR_DELETE_FD 43109 /* FreeBSD. */ -#define AUE_EXTATTR_GET_LINK 43110 /* FreeBSD. */ -#define AUE_EXTATTR_SET_LINK 43111 /* FreeBSD. */ -#define AUE_EXTATTR_LIST_LINK 43112 /* FreeBSD. */ -#define AUE_EXTATTR_DELETE_LINK 43113 /* FreeBSD. */ -#define AUE_KENV 43114 /* FreeBSD. */ -#define AUE_JAIL_ATTACH 43115 /* FreeBSD. */ -#define AUE_SYSCTL_WRITE 43116 /* FreeBSD. */ -#define AUE_IOPERM 43117 /* Linux. */ -#define AUE_READDIR 43118 /* Linux. */ -#define AUE_IOPL 43119 /* Linux. */ -#define AUE_VM86 43120 /* Linux. */ -#define AUE_MAC_GET_PROC 43121 /* FreeBSD/Darwin. */ -#define AUE_MAC_SET_PROC 43122 /* FreeBSD/Darwin. */ -#define AUE_MAC_GET_FD 43123 /* FreeBSD/Darwin. */ -#define AUE_MAC_GET_FILE 43124 /* FreeBSD/Darwin. */ -#define AUE_MAC_SET_FD 43125 /* FreeBSD/Darwin. */ -#define AUE_MAC_SET_FILE 43126 /* FreeBSD/Darwin. */ -#define AUE_MAC_SYSCALL 43127 /* FreeBSD. */ -#define AUE_MAC_GET_PID 43128 /* FreeBSD/Darwin. */ -#define AUE_MAC_GET_LINK 43129 /* FreeBSD/Darwin. */ -#define AUE_MAC_SET_LINK 43130 /* FreeBSD/Darwin. */ -#define AUE_MAC_EXECVE 43131 /* FreeBSD/Darwin. */ -#define AUE_GETPATH_FROMFD 43132 /* FreeBSD. */ -#define AUE_GETPATH_FROMADDR 43133 /* FreeBSD. */ -#define AUE_MQ_OPEN 43134 /* FreeBSD. */ -#define AUE_MQ_SETATTR 43135 /* FreeBSD. */ -#define AUE_MQ_TIMEDRECEIVE 43136 /* FreeBSD. */ -#define AUE_MQ_TIMEDSEND 43137 /* FreeBSD. */ -#define AUE_MQ_NOTIFY 43138 /* FreeBSD. */ -#define AUE_MQ_UNLINK 43139 /* FreeBSD. */ -#define AUE_LISTEN 43140 /* FreeBSD/Darwin/Linux. */ -#define AUE_MLOCKALL 43141 /* FreeBSD. */ -#define AUE_MUNLOCKALL 43142 /* FreeBSD. */ -#define AUE_CLOSEFROM 43143 /* FreeBSD. */ -#define AUE_FEXECVE 43144 /* FreeBSD. */ -#define AUE_FACCESSAT 43145 /* FreeBSD. */ -#define AUE_FCHMODAT 43146 /* FreeBSD. */ -#define AUE_LINKAT 43147 /* FreeBSD. */ -#define AUE_MKDIRAT 43148 /* FreeBSD. */ -#define AUE_MKFIFOAT 43149 /* FreeBSD. */ -#define AUE_MKNODAT 43150 /* FreeBSD. */ -#define AUE_READLINKAT 43151 /* FreeBSD. */ -#define AUE_SYMLINKAT 43152 /* FreeBSD. */ -#define AUE_MAC_GETFSSTAT 43153 /* Darwin. */ -#define AUE_MAC_GET_MOUNT 43154 /* Darwin. */ -#define AUE_MAC_GET_LCID 43155 /* Darwin. */ -#define AUE_MAC_GET_LCTX 43156 /* Darwin. */ -#define AUE_MAC_SET_LCTX 43157 /* Darwin. */ -#define AUE_MAC_MOUNT 43158 /* Darwin. */ -#define AUE_GETLCID 43159 /* Darwin. */ -#define AUE_SETLCID 43160 /* Darwin. */ -#define AUE_TASKNAMEFORPID 43161 /* Darwin. */ -#define AUE_ACCESS_EXTENDED 43162 /* Darwin. */ -#define AUE_CHMOD_EXTENDED 43163 /* Darwin. */ -#define AUE_FCHMOD_EXTENDED 43164 /* Darwin. */ -#define AUE_FSTAT_EXTENDED 43165 /* Darwin. */ -#define AUE_LSTAT_EXTENDED 43166 /* Darwin. */ -#define AUE_MKDIR_EXTENDED 43167 /* Darwin. */ -#define AUE_MKFIFO_EXTENDED 43168 /* Darwin. */ -#define AUE_OPEN_EXTENDED 43169 /* Darwin. */ -#define AUE_OPEN_EXTENDED_R 43170 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RC 43171 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RT 43172 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RTC 43173 /* Darwin. */ -#define AUE_OPEN_EXTENDED_W 43174 /* Darwin. */ -#define AUE_OPEN_EXTENDED_WC 43175 /* Darwin. */ -#define AUE_OPEN_EXTENDED_WT 43176 /* Darwin. */ -#define AUE_OPEN_EXTENDED_WTC 43177 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RW 43178 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RWC 43179 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RWT 43180 /* Darwin. */ -#define AUE_OPEN_EXTENDED_RWTC 43181 /* Darwin. */ -#define AUE_STAT_EXTENDED 43182 /* Darwin. */ -#define AUE_UMASK_EXTENDED 43183 /* Darwin. */ -#define AUE_OPENAT 43184 /* FreeBSD. */ -#define AUE_POSIX_OPENPT 43185 /* FreeBSD. */ -#define AUE_CAP_NEW 43186 /* TrustedBSD. */ -#define AUE_CAP_GETRIGHTS 43187 /* TrustedBSD. */ -#define AUE_CAP_ENTER 43188 /* TrustedBSD. */ -#define AUE_CAP_GETMODE 43189 /* TrustedBSD. */ -#define AUE_POSIX_SPAWN 43190 /* Darwin. */ -#define AUE_FSGETPATH 43191 /* Darwin. */ -#define AUE_PREAD 43192 /* Darwin/FreeBSD. */ -#define AUE_PWRITE 43193 /* Darwin/FreeBSD. */ -#define AUE_FSCTL 43194 /* Darwin. */ -#define AUE_FFSCTL 43195 /* Darwin. */ -#define AUE_LPATHCONF 43196 /* FreeBSD. */ -#define AUE_PDFORK 43197 /* FreeBSD. */ -#define AUE_PDKILL 43198 /* FreeBSD. */ -#define AUE_PDGETPID 43199 /* FreeBSD. */ -#define AUE_PDWAIT 43200 /* FreeBSD. */ -#define AUE_GETATTRLISTBULK 43201 /* Darwin. */ -#define AUE_GETATTRLISTAT 43202 /* Darwin. */ -#define AUE_OPENBYID 43203 /* Darwin. */ -#define AUE_OPENBYID_R 43204 /* Darwin. */ -#define AUE_OPENBYID_RT 43205 /* Darwin. */ -#define AUE_OPENBYID_W 43206 /* Darwin. */ -#define AUE_OPENBYID_WT 43207 /* Darwin. */ -#define AUE_OPENBYID_RW 43208 /* Darwin. */ -#define AUE_OPENBYID_RWT 43209 /* Darwin. */ -#define AUE_CLONEFILEAT 43210 /* Darwin. */ -#define AUE_FCLONEFILEAT 43211 /* Darwin. */ -#define AUE_SETATTRLISTAT 43212 /* Darwin. */ -#define AUE_FMOUNT 43213 /* Darwin. */ +#define AUE_LUTIMES 43052 +#define AUE_LCHFLAGS 43053 /* FreeBSD-specific. */ +#define AUE_SENDFILE 43054 /* BSD/Linux-specific. */ +#define AUE_USELIB 43055 /* Linux-specific. */ +#define AUE_GETRESUID 43056 +#define AUE_SETRESUID 43057 +#define AUE_GETRESGID 43058 +#define AUE_SETRESGID 43059 +#define AUE_WAIT4 43060 /* FreeBSD-specific. */ +#define AUE_LGETFH 43061 /* FreeBSD-specific. */ +#define AUE_FHSTATFS 43062 /* FreeBSD-specific. */ +#define AUE_FHOPEN 43063 /* FreeBSD-specific. */ +#define AUE_FHSTAT 43064 /* FreeBSD-specific. */ +#define AUE_JAIL 43065 /* FreeBSD-specific. */ +#define AUE_EACCESS 43066 /* FreeBSD-specific. */ +#define AUE_KQUEUE 43067 /* FreeBSD-specific. */ +#define AUE_KEVENT 43068 /* FreeBSD-specific. */ +#define AUE_FSYNC 43069 +#define AUE_NMOUNT 43070 /* FreeBSD-specific. */ +#define AUE_BDFLUSH 43071 /* Linux-specific. */ +#define AUE_SETFSUID 43072 /* Linux-specific. */ +#define AUE_SETFSGID 43073 /* Linux-specific. */ +#define AUE_PERSONALITY 43074 /* Linux-specific. */ +#define AUE_SCHED_GETSCHEDULER 43075 /* POSIX.1b. */ +#define AUE_SCHED_SETSCHEDULER 43076 /* POSIX.1b. */ +#define AUE_PRCTL 43077 /* Linux-specific. */ +#define AUE_GETCWD 43078 /* FreeBSD/Linux-specific. */ +#define AUE_CAPGET 43079 /* Linux-specific. */ +#define AUE_CAPSET 43080 /* Linux-specific. */ +#define AUE_PIVOT_ROOT 43081 /* Linux-specific. */ +#define AUE_RTPRIO 43082 /* FreeBSD-specific. */ +#define AUE_SCHED_GETPARAM 43083 /* POSIX.1b. */ +#define AUE_SCHED_SETPARAM 43084 /* POSIX.1b. */ +#define AUE_SCHED_GET_PRIORITY_MAX 43085 /* POSIX.1b. */ +#define AUE_SCHED_GET_PRIORITY_MIN 43086 /* POSIX.1b. */ +#define AUE_SCHED_RR_GET_INTERVAL 43087 /* POSIX.1b. */ +#define AUE_ACL_GET_FILE 43088 /* FreeBSD. */ +#define AUE_ACL_SET_FILE 43089 /* FreeBSD. */ +#define AUE_ACL_GET_FD 43090 /* FreeBSD. */ +#define AUE_ACL_SET_FD 43091 /* FreeBSD. */ +#define AUE_ACL_DELETE_FILE 43092 /* FreeBSD. */ +#define AUE_ACL_DELETE_FD 43093 /* FreeBSD. */ +#define AUE_ACL_CHECK_FILE 43094 /* FreeBSD. */ +#define AUE_ACL_CHECK_FD 43095 /* FreeBSD. */ +#define AUE_ACL_GET_LINK 43096 /* FreeBSD. */ +#define AUE_ACL_SET_LINK 43097 /* FreeBSD. */ +#define AUE_ACL_DELETE_LINK 43098 /* FreeBSD. */ +#define AUE_ACL_CHECK_LINK 43099 /* FreeBSD. */ +#define AUE_SYSARCH 43100 /* FreeBSD. */ +#define AUE_EXTATTRCTL 43101 /* FreeBSD. */ +#define AUE_EXTATTR_GET_FILE 43102 /* FreeBSD. */ +#define AUE_EXTATTR_SET_FILE 43103 /* FreeBSD. */ +#define AUE_EXTATTR_LIST_FILE 43104 /* FreeBSD. */ +#define AUE_EXTATTR_DELETE_FILE 43105 /* FreeBSD. */ +#define AUE_EXTATTR_GET_FD 43106 /* FreeBSD. */ +#define AUE_EXTATTR_SET_FD 43107 /* FreeBSD. */ +#define AUE_EXTATTR_LIST_FD 43108 /* FreeBSD. */ +#define AUE_EXTATTR_DELETE_FD 43109 /* FreeBSD. */ +#define AUE_EXTATTR_GET_LINK 43110 /* FreeBSD. */ +#define AUE_EXTATTR_SET_LINK 43111 /* FreeBSD. */ +#define AUE_EXTATTR_LIST_LINK 43112 /* FreeBSD. */ +#define AUE_EXTATTR_DELETE_LINK 43113 /* FreeBSD. */ +#define AUE_KENV 43114 /* FreeBSD. */ +#define AUE_JAIL_ATTACH 43115 /* FreeBSD. */ +#define AUE_SYSCTL_WRITE 43116 /* FreeBSD. */ +#define AUE_IOPERM 43117 /* Linux. */ +#define AUE_READDIR 43118 /* Linux. */ +#define AUE_IOPL 43119 /* Linux. */ +#define AUE_VM86 43120 /* Linux. */ +#define AUE_MAC_GET_PROC 43121 /* FreeBSD/Darwin. */ +#define AUE_MAC_SET_PROC 43122 /* FreeBSD/Darwin. */ +#define AUE_MAC_GET_FD 43123 /* FreeBSD/Darwin. */ +#define AUE_MAC_GET_FILE 43124 /* FreeBSD/Darwin. */ +#define AUE_MAC_SET_FD 43125 /* FreeBSD/Darwin. */ +#define AUE_MAC_SET_FILE 43126 /* FreeBSD/Darwin. */ +#define AUE_MAC_SYSCALL 43127 /* FreeBSD. */ +#define AUE_MAC_GET_PID 43128 /* FreeBSD/Darwin. */ +#define AUE_MAC_GET_LINK 43129 /* FreeBSD/Darwin. */ +#define AUE_MAC_SET_LINK 43130 /* FreeBSD/Darwin. */ +#define AUE_MAC_EXECVE 43131 /* FreeBSD/Darwin. */ +#define AUE_GETPATH_FROMFD 43132 /* FreeBSD. */ +#define AUE_GETPATH_FROMADDR 43133 /* FreeBSD. */ +#define AUE_MQ_OPEN 43134 /* FreeBSD. */ +#define AUE_MQ_SETATTR 43135 /* FreeBSD. */ +#define AUE_MQ_TIMEDRECEIVE 43136 /* FreeBSD. */ +#define AUE_MQ_TIMEDSEND 43137 /* FreeBSD. */ +#define AUE_MQ_NOTIFY 43138 /* FreeBSD. */ +#define AUE_MQ_UNLINK 43139 /* FreeBSD. */ +#define AUE_LISTEN 43140 /* FreeBSD/Darwin/Linux. */ +#define AUE_MLOCKALL 43141 /* FreeBSD. */ +#define AUE_MUNLOCKALL 43142 /* FreeBSD. */ +#define AUE_CLOSEFROM 43143 /* FreeBSD. */ +#define AUE_FEXECVE 43144 /* FreeBSD. */ +#define AUE_FACCESSAT 43145 /* FreeBSD. */ +#define AUE_FCHMODAT 43146 /* FreeBSD. */ +#define AUE_LINKAT 43147 /* FreeBSD. */ +#define AUE_MKDIRAT 43148 /* FreeBSD. */ +#define AUE_MKFIFOAT 43149 /* FreeBSD. */ +#define AUE_MKNODAT 43150 /* FreeBSD. */ +#define AUE_READLINKAT 43151 /* FreeBSD. */ +#define AUE_SYMLINKAT 43152 /* FreeBSD. */ +#define AUE_MAC_GETFSSTAT 43153 /* Darwin. */ +#define AUE_MAC_GET_MOUNT 43154 /* Darwin. */ +#define AUE_MAC_GET_LCID 43155 /* Darwin. */ +#define AUE_MAC_GET_LCTX 43156 /* Darwin. */ +#define AUE_MAC_SET_LCTX 43157 /* Darwin. */ +#define AUE_MAC_MOUNT 43158 /* Darwin. */ +#define AUE_GETLCID 43159 /* Darwin. */ +#define AUE_SETLCID 43160 /* Darwin. */ +#define AUE_TASKNAMEFORPID 43161 /* Darwin. */ +#define AUE_ACCESS_EXTENDED 43162 /* Darwin. */ +#define AUE_CHMOD_EXTENDED 43163 /* Darwin. */ +#define AUE_FCHMOD_EXTENDED 43164 /* Darwin. */ +#define AUE_FSTAT_EXTENDED 43165 /* Darwin. */ +#define AUE_LSTAT_EXTENDED 43166 /* Darwin. */ +#define AUE_MKDIR_EXTENDED 43167 /* Darwin. */ +#define AUE_MKFIFO_EXTENDED 43168 /* Darwin. */ +#define AUE_OPEN_EXTENDED 43169 /* Darwin. */ +#define AUE_OPEN_EXTENDED_R 43170 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RC 43171 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RT 43172 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RTC 43173 /* Darwin. */ +#define AUE_OPEN_EXTENDED_W 43174 /* Darwin. */ +#define AUE_OPEN_EXTENDED_WC 43175 /* Darwin. */ +#define AUE_OPEN_EXTENDED_WT 43176 /* Darwin. */ +#define AUE_OPEN_EXTENDED_WTC 43177 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RW 43178 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RWC 43179 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RWT 43180 /* Darwin. */ +#define AUE_OPEN_EXTENDED_RWTC 43181 /* Darwin. */ +#define AUE_STAT_EXTENDED 43182 /* Darwin. */ +#define AUE_UMASK_EXTENDED 43183 /* Darwin. */ +#define AUE_OPENAT 43184 /* FreeBSD. */ +#define AUE_POSIX_OPENPT 43185 /* FreeBSD. */ +#define AUE_CAP_NEW 43186 /* TrustedBSD. */ +#define AUE_CAP_GETRIGHTS 43187 /* TrustedBSD. */ +#define AUE_CAP_ENTER 43188 /* TrustedBSD. */ +#define AUE_CAP_GETMODE 43189 /* TrustedBSD. */ +#define AUE_POSIX_SPAWN 43190 /* Darwin. */ +#define AUE_FSGETPATH 43191 /* Darwin. */ +#define AUE_PREAD 43192 /* Darwin/FreeBSD. */ +#define AUE_PWRITE 43193 /* Darwin/FreeBSD. */ +#define AUE_FSCTL 43194 /* Darwin. */ +#define AUE_FFSCTL 43195 /* Darwin. */ +#define AUE_LPATHCONF 43196 /* FreeBSD. */ +#define AUE_PDFORK 43197 /* FreeBSD. */ +#define AUE_PDKILL 43198 /* FreeBSD. */ +#define AUE_PDGETPID 43199 /* FreeBSD. */ +#define AUE_PDWAIT 43200 /* FreeBSD. */ +#define AUE_GETATTRLISTBULK 43201 /* Darwin. */ +#define AUE_GETATTRLISTAT 43202 /* Darwin. */ +#define AUE_OPENBYID 43203 /* Darwin. */ +#define AUE_OPENBYID_R 43204 /* Darwin. */ +#define AUE_OPENBYID_RT 43205 /* Darwin. */ +#define AUE_OPENBYID_W 43206 /* Darwin. */ +#define AUE_OPENBYID_WT 43207 /* Darwin. */ +#define AUE_OPENBYID_RW 43208 /* Darwin. */ +#define AUE_OPENBYID_RWT 43209 /* Darwin. */ +#define AUE_CLONEFILEAT 43210 /* Darwin. */ +#define AUE_FCLONEFILEAT 43211 /* Darwin. */ +#define AUE_SETATTRLISTAT 43212 /* Darwin. */ +#define AUE_FMOUNT 43213 /* Darwin. */ -#define AUE_SESSION_START 44901 /* Darwin. */ -#define AUE_SESSION_UPDATE 44902 /* Darwin. */ -#define AUE_SESSION_END 44903 /* Darwin. */ -#define AUE_SESSION_CLOSE 44904 /* Darwin. */ +#define AUE_SESSION_START 44901 /* Darwin. */ +#define AUE_SESSION_UPDATE 44902 /* Darwin. */ +#define AUE_SESSION_END 44903 /* Darwin. */ +#define AUE_SESSION_CLOSE 44904 /* Darwin. */ /* * Darwin BSM uses a number of AUE_O_* definitions, which are aliased to the @@ -628,197 +628,197 @@ * import. Happily, these have different names than the AUE_O* definitions * in Solaris BSM. */ -#define AUE_O_CREAT AUE_OPEN_RWTC /* Darwin */ -#define AUE_O_EXECVE AUE_NULL /* Darwin */ -#define AUE_O_SBREAK AUE_NULL /* Darwin */ -#define AUE_O_LSEEK AUE_NULL /* Darwin */ -#define AUE_O_MOUNT AUE_NULL /* Darwin */ -#define AUE_O_UMOUNT AUE_NULL /* Darwin */ -#define AUE_O_STAT AUE_STAT /* Darwin */ -#define AUE_O_LSTAT AUE_LSTAT /* Darwin */ -#define AUE_O_FSTAT AUE_FSTAT /* Darwin */ -#define AUE_O_GETPAGESIZE AUE_NULL /* Darwin */ -#define AUE_O_VREAD AUE_NULL /* Darwin */ -#define AUE_O_VWRITE AUE_NULL /* Darwin */ -#define AUE_O_MMAP AUE_MMAP /* Darwin */ -#define AUE_O_VADVISE AUE_NULL /* Darwin */ -#define AUE_O_VHANGUP AUE_NULL /* Darwin */ -#define AUE_O_VLIMIT AUE_NULL /* Darwin */ -#define AUE_O_WAIT AUE_NULL /* Darwin */ -#define AUE_O_GETHOSTNAME AUE_NULL /* Darwin */ -#define AUE_O_SETHOSTNAME AUE_SYSCTL /* Darwin */ -#define AUE_O_GETDOPT AUE_NULL /* Darwin */ -#define AUE_O_SETDOPT AUE_NULL /* Darwin */ -#define AUE_O_ACCEPT AUE_NULL /* Darwin */ -#define AUE_O_SEND AUE_SENDMSG /* Darwin */ -#define AUE_O_RECV AUE_RECVMSG /* Darwin */ -#define AUE_O_VTIMES AUE_NULL /* Darwin */ -#define AUE_O_SIGVEC AUE_NULL /* Darwin */ -#define AUE_O_SIGBLOCK AUE_NULL /* Darwin */ -#define AUE_O_SIGSETMASK AUE_NULL /* Darwin */ -#define AUE_O_SIGSTACK AUE_NULL /* Darwin */ -#define AUE_O_RECVMSG AUE_RECVMSG /* Darwin */ -#define AUE_O_SENDMSG AUE_SENDMSG /* Darwin */ -#define AUE_O_VTRACE AUE_NULL /* Darwin */ -#define AUE_O_RESUBA AUE_NULL /* Darwin */ -#define AUE_O_RECVFROM AUE_RECVFROM /* Darwin */ -#define AUE_O_SETREUID AUE_SETREUID /* Darwin */ -#define AUE_O_SETREGID AUE_SETREGID /* Darwin */ -#define AUE_O_GETDIRENTRIES AUE_GETDIRENTRIES /* Darwin */ -#define AUE_O_TRUNCATE AUE_TRUNCATE /* Darwin */ -#define AUE_O_FTRUNCATE AUE_FTRUNCATE /* Darwin */ -#define AUE_O_GETPEERNAME AUE_NULL /* Darwin */ -#define AUE_O_GETHOSTID AUE_NULL /* Darwin */ -#define AUE_O_SETHOSTID AUE_NULL /* Darwin */ -#define AUE_O_GETRLIMIT AUE_NULL /* Darwin */ -#define AUE_O_SETRLIMIT AUE_SETRLIMIT /* Darwin */ -#define AUE_O_KILLPG AUE_KILL /* Darwin */ -#define AUE_O_SETQUOTA AUE_NULL /* Darwin */ -#define AUE_O_QUOTA AUE_NULL /* Darwin */ -#define AUE_O_GETSOCKNAME AUE_NULL /* Darwin */ -#define AUE_O_GETDIREENTRIES AUE_GETDIREENTRIES /* Darwin */ -#define AUE_O_ASYNCDAEMON AUE_NULL /* Darwin */ -#define AUE_O_GETDOMAINNAME AUE_NULL /* Darwin */ -#define AUE_O_SETDOMAINNAME AUE_SYSCTL /* Darwin */ -#define AUE_O_PCFS_MOUNT AUE_NULL /* Darwin */ -#define AUE_O_EXPORTFS AUE_NULL /* Darwin */ -#define AUE_O_USTATE AUE_NULL /* Darwin */ -#define AUE_O_WAIT3 AUE_NULL /* Darwin */ -#define AUE_O_RPAUSE AUE_NULL /* Darwin */ -#define AUE_O_GETDENTS AUE_NULL /* Darwin */ +#define AUE_O_CREAT AUE_OPEN_RWTC /* Darwin */ +#define AUE_O_EXECVE AUE_NULL /* Darwin */ +#define AUE_O_SBREAK AUE_NULL /* Darwin */ +#define AUE_O_LSEEK AUE_NULL /* Darwin */ +#define AUE_O_MOUNT AUE_NULL /* Darwin */ +#define AUE_O_UMOUNT AUE_NULL /* Darwin */ +#define AUE_O_STAT AUE_STAT /* Darwin */ +#define AUE_O_LSTAT AUE_LSTAT /* Darwin */ +#define AUE_O_FSTAT AUE_FSTAT /* Darwin */ +#define AUE_O_GETPAGESIZE AUE_NULL /* Darwin */ +#define AUE_O_VREAD AUE_NULL /* Darwin */ +#define AUE_O_VWRITE AUE_NULL /* Darwin */ +#define AUE_O_MMAP AUE_MMAP /* Darwin */ +#define AUE_O_VADVISE AUE_NULL /* Darwin */ +#define AUE_O_VHANGUP AUE_NULL /* Darwin */ +#define AUE_O_VLIMIT AUE_NULL /* Darwin */ +#define AUE_O_WAIT AUE_NULL /* Darwin */ +#define AUE_O_GETHOSTNAME AUE_NULL /* Darwin */ +#define AUE_O_SETHOSTNAME AUE_SYSCTL /* Darwin */ +#define AUE_O_GETDOPT AUE_NULL /* Darwin */ +#define AUE_O_SETDOPT AUE_NULL /* Darwin */ +#define AUE_O_ACCEPT AUE_NULL /* Darwin */ +#define AUE_O_SEND AUE_SENDMSG /* Darwin */ +#define AUE_O_RECV AUE_RECVMSG /* Darwin */ +#define AUE_O_VTIMES AUE_NULL /* Darwin */ +#define AUE_O_SIGVEC AUE_NULL /* Darwin */ +#define AUE_O_SIGBLOCK AUE_NULL /* Darwin */ +#define AUE_O_SIGSETMASK AUE_NULL /* Darwin */ +#define AUE_O_SIGSTACK AUE_NULL /* Darwin */ +#define AUE_O_RECVMSG AUE_RECVMSG /* Darwin */ +#define AUE_O_SENDMSG AUE_SENDMSG /* Darwin */ +#define AUE_O_VTRACE AUE_NULL /* Darwin */ +#define AUE_O_RESUBA AUE_NULL /* Darwin */ +#define AUE_O_RECVFROM AUE_RECVFROM /* Darwin */ +#define AUE_O_SETREUID AUE_SETREUID /* Darwin */ +#define AUE_O_SETREGID AUE_SETREGID /* Darwin */ +#define AUE_O_GETDIRENTRIES AUE_GETDIRENTRIES /* Darwin */ +#define AUE_O_TRUNCATE AUE_TRUNCATE /* Darwin */ +#define AUE_O_FTRUNCATE AUE_FTRUNCATE /* Darwin */ +#define AUE_O_GETPEERNAME AUE_NULL /* Darwin */ +#define AUE_O_GETHOSTID AUE_NULL /* Darwin */ +#define AUE_O_SETHOSTID AUE_NULL /* Darwin */ +#define AUE_O_GETRLIMIT AUE_NULL /* Darwin */ +#define AUE_O_SETRLIMIT AUE_SETRLIMIT /* Darwin */ +#define AUE_O_KILLPG AUE_KILL /* Darwin */ +#define AUE_O_SETQUOTA AUE_NULL /* Darwin */ +#define AUE_O_QUOTA AUE_NULL /* Darwin */ +#define AUE_O_GETSOCKNAME AUE_NULL /* Darwin */ +#define AUE_O_GETDIREENTRIES AUE_GETDIREENTRIES /* Darwin */ +#define AUE_O_ASYNCDAEMON AUE_NULL /* Darwin */ +#define AUE_O_GETDOMAINNAME AUE_NULL /* Darwin */ +#define AUE_O_SETDOMAINNAME AUE_SYSCTL /* Darwin */ +#define AUE_O_PCFS_MOUNT AUE_NULL /* Darwin */ +#define AUE_O_EXPORTFS AUE_NULL /* Darwin */ +#define AUE_O_USTATE AUE_NULL /* Darwin */ +#define AUE_O_WAIT3 AUE_NULL /* Darwin */ +#define AUE_O_RPAUSE AUE_NULL /* Darwin */ +#define AUE_O_GETDENTS AUE_NULL /* Darwin */ /* * Possible desired future values based on review of BSD/Darwin system calls. */ -#define AUE_ATGETMSG AUE_NULL -#define AUE_ATPUTMSG AUE_NULL -#define AUE_ATSOCKET AUE_NULL -#define AUE_ATPGETREQ AUE_NULL -#define AUE_ATPGETRSP AUE_NULL -#define AUE_ATPSNDREQ AUE_NULL -#define AUE_ATPSNDRSP AUE_NULL -#define AUE_BSDTHREADCREATE AUE_NULL -#define AUE_BSDTHREADTERMINATE AUE_NULL -#define AUE_BSDTHREADREGISTER AUE_NULL -#define AUE_CSOPS AUE_NULL -#define AUE_DUP AUE_NULL -#define AUE_FDATASYNC AUE_NULL -#define AUE_FGETATTRLIST AUE_NULL -#define AUE_FGETXATTR AUE_NULL -#define AUE_FLISTXATTR AUE_NULL -#define AUE_FREMOVEXATTR AUE_NULL -#define AUE_FSETATTRLIST AUE_NULL -#define AUE_FSETXATTR AUE_NULL -#define AUE_FSTATFS64 AUE_NULL -#define AUE_FSTATV AUE_NULL -#define AUE_FSTAT64 AUE_NULL -#define AUE_FSTAT64_EXTENDED AUE_NULL -#define AUE_GCCONTROL AUE_NULL -#define AUE_GETDIRENTRIES64 AUE_NULL -#define AUE_GETDTABLESIZE AUE_NULL -#define AUE_GETEGID AUE_NULL -#define AUE_GETEUID AUE_NULL -#define AUE_GETFSSTAT64 AUE_NULL -#define AUE_GETGID AUE_NULL -#define AUE_GETGROUPS AUE_NULL -#define AUE_GETITIMER AUE_NULL -#define AUE_GETLOGIN AUE_NULL -#define AUE_GETPEERNAME AUE_NULL -#define AUE_GETPGID AUE_NULL -#define AUE_GETPGRP AUE_NULL -#define AUE_GETPID AUE_NULL -#define AUE_GETPPID AUE_NULL -#define AUE_GETPRIORITY AUE_NULL -#define AUE_GETRLIMIT AUE_NULL -#define AUE_GETRUSAGE AUE_NULL -#define AUE_GETSGROUPS AUE_NULL -#define AUE_GETSID AUE_NULL -#define AUE_GETSOCKNAME AUE_NULL -#define AUE_GETTIMEOFDAY AUE_NULL -#define AUE_GETTID AUE_NULL -#define AUE_GETUID AUE_NULL -#define AUE_GETSOCKOPT AUE_NULL -#define AUE_GETWGROUPS AUE_NULL -#define AUE_GETXATTR AUE_NULL -#define AUE_IDENTITYSVC AUE_NULL -#define AUE_INITGROUPS AUE_NULL -#define AUE_IOPOLICYSYS AUE_NULL -#define AUE_ISSETUGID AUE_NULL -#define AUE_LEDGER AUE_NULL -#define AUE_LIOLISTIO AUE_NULL -#define AUE_LISTXATTR AUE_NULL -#define AUE_LSTATV AUE_NULL -#define AUE_LSTAT64 AUE_NULL -#define AUE_LSTAT64_EXTENDED AUE_NULL -#define AUE_MADVISE AUE_NULL -#define AUE_MINCORE AUE_NULL -#define AUE_MKCOMPLEX AUE_NULL -#define AUE_MODWATCH AUE_NULL -#define AUE_MSGCL AUE_NULL -#define AUE_MSYNC AUE_NULL -#define AUE_NECP AUE_NULL -#define AUE_NETAGENT AUE_NULL -#define AUE_PREADV AUE_NULL -#define AUE_PROCINFO AUE_NULL -#define AUE_PTHREADCANCELED AUE_NULL -#define AUE_PTHREADCHDIR AUE_NULL -#define AUE_PTHREADCONDBROADCAST AUE_NULL -#define AUE_PTHREADCONDDESTORY AUE_NULL -#define AUE_PTHREADCONDINIT AUE_NULL -#define AUE_PTHREADCONDSIGNAL AUE_NULL -#define AUE_PTHREADCONDWAIT AUE_NULL -#define AUE_PTHREADFCHDIR AUE_NULL -#define AUE_PTHREADMARK AUE_NULL -#define AUE_PTHREADMUTEXDESTROY AUE_NULL -#define AUE_PTHREADMUTEXINIT AUE_NULL -#define AUE_PTHREADMUTEXTRYLOCK AUE_NULL -#define AUE_PTHREADMUTEXUNLOCK AUE_NULL -#define AUE_PWRITEV AUE_NULL -#define AUE_REMOVEXATTR AUE_NULL -#define AUE_SBRK AUE_NULL -#define AUE_SELECT AUE_NULL -#define AUE_SEMDESTROY AUE_NULL -#define AUE_SEMGETVALUE AUE_NULL -#define AUE_SEMINIT AUE_NULL -#define AUE_SEMPOST AUE_NULL -#define AUE_SEMTRYWAIT AUE_NULL -#define AUE_SEMWAIT AUE_NULL -#define AUE_SEMWAITSIGNAL AUE_NULL -#define AUE_SETITIMER AUE_NULL -#define AUE_SETSGROUPS AUE_NULL -#define AUE_SETTID AUE_NULL -#define AUE_SETTIDWITHPID AUE_NULL -#define AUE_SETWGROUPS AUE_NULL -#define AUE_SETXATTR AUE_NULL -#define AUE_SHAREDREGIONCHECK AUE_NULL -#define AUE_SHAREDREGIONMAP AUE_NULL -#define AUE_SIGACTION AUE_NULL -#define AUE_SIGALTSTACK AUE_NULL -#define AUE_SIGPENDING AUE_NULL -#define AUE_SIGPROCMASK AUE_NULL -#define AUE_SIGRETURN AUE_NULL -#define AUE_SIGSUSPEND AUE_NULL -#define AUE_SIGWAIT AUE_NULL -#define AUE_SSTK AUE_NULL -#define AUE_STACKSNAPSHOT AUE_NULL -#define AUE_STATFS64 AUE_NULL -#define AUE_STATV AUE_NULL -#define AUE_STAT64 AUE_NULL -#define AUE_STAT64_EXTENDED AUE_NULL -#define AUE_SYNC AUE_NULL -#define AUE_SYSCALL AUE_NULL -#define AUE_TABLE AUE_NULL -#define AUE_VMPRESSUREMONITOR AUE_NULL -#define AUE_WAITEVENT AUE_NULL -#define AUE_WAITID AUE_NULL -#define AUE_WATCHEVENT AUE_NULL -#define AUE_WORKQOPEN AUE_NULL -#define AUE_WORKQOPS AUE_NULL -#define AUE_WORKLOOPCTL AUE_NULL -#define AUE_PERSONA AUE_NULL -#define AUE_USRCTL AUE_NULL -#define AUE_NEXUS AUE_NULL -#define AUE_CHANNEL AUE_NULL -#define AUE_NET AUE_NULL +#define AUE_ATGETMSG AUE_NULL +#define AUE_ATPUTMSG AUE_NULL +#define AUE_ATSOCKET AUE_NULL +#define AUE_ATPGETREQ AUE_NULL +#define AUE_ATPGETRSP AUE_NULL +#define AUE_ATPSNDREQ AUE_NULL +#define AUE_ATPSNDRSP AUE_NULL +#define AUE_BSDTHREADCREATE AUE_NULL +#define AUE_BSDTHREADTERMINATE AUE_NULL +#define AUE_BSDTHREADREGISTER AUE_NULL +#define AUE_CSOPS AUE_NULL +#define AUE_DUP AUE_NULL +#define AUE_FDATASYNC AUE_NULL +#define AUE_FGETATTRLIST AUE_NULL +#define AUE_FGETXATTR AUE_NULL +#define AUE_FLISTXATTR AUE_NULL +#define AUE_FREMOVEXATTR AUE_NULL +#define AUE_FSETATTRLIST AUE_NULL +#define AUE_FSETXATTR AUE_NULL +#define AUE_FSTATFS64 AUE_NULL +#define AUE_FSTATV AUE_NULL +#define AUE_FSTAT64 AUE_NULL +#define AUE_FSTAT64_EXTENDED AUE_NULL +#define AUE_GCCONTROL AUE_NULL +#define AUE_GETDIRENTRIES64 AUE_NULL +#define AUE_GETDTABLESIZE AUE_NULL +#define AUE_GETEGID AUE_NULL +#define AUE_GETEUID AUE_NULL +#define AUE_GETFSSTAT64 AUE_NULL +#define AUE_GETGID AUE_NULL +#define AUE_GETGROUPS AUE_NULL +#define AUE_GETITIMER AUE_NULL +#define AUE_GETLOGIN AUE_NULL +#define AUE_GETPEERNAME AUE_NULL +#define AUE_GETPGID AUE_NULL +#define AUE_GETPGRP AUE_NULL +#define AUE_GETPID AUE_NULL +#define AUE_GETPPID AUE_NULL +#define AUE_GETPRIORITY AUE_NULL +#define AUE_GETRLIMIT AUE_NULL +#define AUE_GETRUSAGE AUE_NULL +#define AUE_GETSGROUPS AUE_NULL +#define AUE_GETSID AUE_NULL +#define AUE_GETSOCKNAME AUE_NULL +#define AUE_GETTIMEOFDAY AUE_NULL +#define AUE_GETTID AUE_NULL +#define AUE_GETUID AUE_NULL +#define AUE_GETSOCKOPT AUE_NULL +#define AUE_GETWGROUPS AUE_NULL +#define AUE_GETXATTR AUE_NULL +#define AUE_IDENTITYSVC AUE_NULL +#define AUE_INITGROUPS AUE_NULL +#define AUE_IOPOLICYSYS AUE_NULL +#define AUE_ISSETUGID AUE_NULL +#define AUE_LEDGER AUE_NULL +#define AUE_LIOLISTIO AUE_NULL +#define AUE_LISTXATTR AUE_NULL +#define AUE_LSTATV AUE_NULL +#define AUE_LSTAT64 AUE_NULL +#define AUE_LSTAT64_EXTENDED AUE_NULL +#define AUE_MADVISE AUE_NULL +#define AUE_MINCORE AUE_NULL +#define AUE_MKCOMPLEX AUE_NULL +#define AUE_MODWATCH AUE_NULL +#define AUE_MSGCL AUE_NULL +#define AUE_MSYNC AUE_NULL +#define AUE_NECP AUE_NULL +#define AUE_NETAGENT AUE_NULL +#define AUE_PREADV AUE_NULL +#define AUE_PROCINFO AUE_NULL +#define AUE_PTHREADCANCELED AUE_NULL +#define AUE_PTHREADCHDIR AUE_NULL +#define AUE_PTHREADCONDBROADCAST AUE_NULL +#define AUE_PTHREADCONDDESTORY AUE_NULL +#define AUE_PTHREADCONDINIT AUE_NULL +#define AUE_PTHREADCONDSIGNAL AUE_NULL +#define AUE_PTHREADCONDWAIT AUE_NULL +#define AUE_PTHREADFCHDIR AUE_NULL +#define AUE_PTHREADMARK AUE_NULL +#define AUE_PTHREADMUTEXDESTROY AUE_NULL +#define AUE_PTHREADMUTEXINIT AUE_NULL +#define AUE_PTHREADMUTEXTRYLOCK AUE_NULL +#define AUE_PTHREADMUTEXUNLOCK AUE_NULL +#define AUE_PWRITEV AUE_NULL +#define AUE_REMOVEXATTR AUE_NULL +#define AUE_SBRK AUE_NULL +#define AUE_SELECT AUE_NULL +#define AUE_SEMDESTROY AUE_NULL +#define AUE_SEMGETVALUE AUE_NULL +#define AUE_SEMINIT AUE_NULL +#define AUE_SEMPOST AUE_NULL +#define AUE_SEMTRYWAIT AUE_NULL +#define AUE_SEMWAIT AUE_NULL +#define AUE_SEMWAITSIGNAL AUE_NULL +#define AUE_SETITIMER AUE_NULL +#define AUE_SETSGROUPS AUE_NULL +#define AUE_SETTID AUE_NULL +#define AUE_SETTIDWITHPID AUE_NULL +#define AUE_SETWGROUPS AUE_NULL +#define AUE_SETXATTR AUE_NULL +#define AUE_SHAREDREGIONCHECK AUE_NULL +#define AUE_SHAREDREGIONMAP AUE_NULL +#define AUE_SIGACTION AUE_NULL +#define AUE_SIGALTSTACK AUE_NULL +#define AUE_SIGPENDING AUE_NULL +#define AUE_SIGPROCMASK AUE_NULL +#define AUE_SIGRETURN AUE_NULL +#define AUE_SIGSUSPEND AUE_NULL +#define AUE_SIGWAIT AUE_NULL +#define AUE_SSTK AUE_NULL +#define AUE_STACKSNAPSHOT AUE_NULL +#define AUE_STATFS64 AUE_NULL +#define AUE_STATV AUE_NULL +#define AUE_STAT64 AUE_NULL +#define AUE_STAT64_EXTENDED AUE_NULL +#define AUE_SYNC AUE_NULL +#define AUE_SYSCALL AUE_NULL +#define AUE_TABLE AUE_NULL +#define AUE_VMPRESSUREMONITOR AUE_NULL +#define AUE_WAITEVENT AUE_NULL +#define AUE_WAITID AUE_NULL +#define AUE_WATCHEVENT AUE_NULL +#define AUE_WORKQOPEN AUE_NULL +#define AUE_WORKQOPS AUE_NULL +#define AUE_WORKLOOPCTL AUE_NULL +#define AUE_PERSONA AUE_NULL +#define AUE_USRCTL AUE_NULL +#define AUE_NEXUS AUE_NULL +#define AUE_CHANNEL AUE_NULL +#define AUE_NET AUE_NULL #endif /* !_BSM_AUDIT_KEVENTS_H_ */ diff --git a/bsd/bsm/audit_record.h b/bsd/bsm/audit_record.h index bedcb800a..8cd2cebec 100644 --- a/bsd/bsm/audit_record.h +++ b/bsd/bsm/audit_record.h @@ -32,88 +32,88 @@ #ifndef _BSM_AUDIT_RECORD_H_ #define _BSM_AUDIT_RECORD_H_ -#include /* token_t */ -#include /* struct timeval */ +#include /* token_t */ +#include /* struct timeval */ /* * Token type identifiers. */ -#define AUT_INVALID 0x00 -#define AUT_OTHER_FILE32 0x11 -#define AUT_OHEADER 0x12 -#define AUT_TRAILER 0x13 -#define AUT_HEADER32 0x14 -#define AUT_HEADER32_EX 0x15 -#define AUT_DATA 0x21 -#define AUT_IPC 0x22 -#define AUT_PATH 0x23 -#define AUT_SUBJECT32 0x24 -#define AUT_XATPATH 0x25 -#define AUT_PROCESS32 0x26 -#define AUT_RETURN32 0x27 -#define AUT_TEXT 0x28 -#define AUT_OPAQUE 0x29 -#define AUT_IN_ADDR 0x2a -#define AUT_IP 0x2b -#define AUT_IPORT 0x2c -#define AUT_ARG32 0x2d -#define AUT_SOCKET 0x2e -#define AUT_SEQ 0x2f -#define AUT_ACL 0x30 -#define AUT_ATTR 0x31 -#define AUT_IPC_PERM 0x32 -#define AUT_LABEL 0x33 -#define AUT_GROUPS 0x34 -#define AUT_ACE 0x35 -#define AUT_PRIV 0x38 -#define AUT_UPRIV 0x39 -#define AUT_LIAISON 0x3a -#define AUT_NEWGROUPS 0x3b -#define AUT_EXEC_ARGS 0x3c -#define AUT_EXEC_ENV 0x3d -#define AUT_ATTR32 0x3e -#define AUT_UNAUTH 0x3f -#define AUT_XATOM 0x40 -#define AUT_XOBJ 0x41 -#define AUT_XPROTO 0x42 -#define AUT_XSELECT 0x43 -#define AUT_XCOLORMAP 0x44 -#define AUT_XCURSOR 0x45 -#define AUT_XFONT 0x46 -#define AUT_XGC 0x47 -#define AUT_XPIXMAP 0x48 -#define AUT_XPROPERTY 0x49 -#define AUT_XWINDOW 0x4a -#define AUT_XCLIENT 0x4b -#define AUT_CMD 0x51 -#define AUT_EXIT 0x52 -#define AUT_ZONENAME 0x60 -#define AUT_HOST 0x70 -#define AUT_ARG64 0x71 -#define AUT_RETURN64 0x72 -#define AUT_ATTR64 0x73 -#define AUT_HEADER64 0x74 -#define AUT_SUBJECT64 0x75 -#define AUT_PROCESS64 0x77 -#define AUT_OTHER_FILE64 0x78 -#define AUT_HEADER64_EX 0x79 -#define AUT_SUBJECT32_EX 0x7a -#define AUT_PROCESS32_EX 0x7b -#define AUT_SUBJECT64_EX 0x7c -#define AUT_PROCESS64_EX 0x7d -#define AUT_IN_ADDR_EX 0x7e -#define AUT_SOCKET_EX 0x7f +#define AUT_INVALID 0x00 +#define AUT_OTHER_FILE32 0x11 +#define AUT_OHEADER 0x12 +#define AUT_TRAILER 0x13 +#define AUT_HEADER32 0x14 +#define AUT_HEADER32_EX 0x15 +#define AUT_DATA 0x21 +#define AUT_IPC 0x22 +#define AUT_PATH 0x23 +#define AUT_SUBJECT32 0x24 +#define AUT_XATPATH 0x25 +#define AUT_PROCESS32 0x26 +#define AUT_RETURN32 0x27 +#define AUT_TEXT 0x28 +#define AUT_OPAQUE 0x29 +#define AUT_IN_ADDR 0x2a +#define AUT_IP 0x2b +#define AUT_IPORT 0x2c +#define AUT_ARG32 0x2d +#define AUT_SOCKET 0x2e +#define AUT_SEQ 0x2f +#define AUT_ACL 0x30 +#define AUT_ATTR 0x31 +#define AUT_IPC_PERM 0x32 +#define AUT_LABEL 0x33 +#define AUT_GROUPS 0x34 +#define AUT_ACE 0x35 +#define AUT_PRIV 0x38 +#define AUT_UPRIV 0x39 +#define AUT_LIAISON 0x3a +#define AUT_NEWGROUPS 0x3b +#define AUT_EXEC_ARGS 0x3c +#define AUT_EXEC_ENV 0x3d +#define AUT_ATTR32 0x3e +#define AUT_UNAUTH 0x3f +#define AUT_XATOM 0x40 +#define AUT_XOBJ 0x41 +#define AUT_XPROTO 0x42 +#define AUT_XSELECT 0x43 +#define AUT_XCOLORMAP 0x44 +#define AUT_XCURSOR 0x45 +#define AUT_XFONT 0x46 +#define AUT_XGC 0x47 +#define AUT_XPIXMAP 0x48 +#define AUT_XPROPERTY 0x49 +#define AUT_XWINDOW 0x4a +#define AUT_XCLIENT 0x4b +#define AUT_CMD 0x51 +#define AUT_EXIT 0x52 +#define AUT_ZONENAME 0x60 +#define AUT_HOST 0x70 +#define AUT_ARG64 0x71 +#define AUT_RETURN64 0x72 +#define AUT_ATTR64 0x73 +#define AUT_HEADER64 0x74 +#define AUT_SUBJECT64 0x75 +#define AUT_PROCESS64 0x77 +#define AUT_OTHER_FILE64 0x78 +#define AUT_HEADER64_EX 0x79 +#define AUT_SUBJECT32_EX 0x7a +#define AUT_PROCESS32_EX 0x7b +#define AUT_SUBJECT64_EX 0x7c +#define AUT_PROCESS64_EX 0x7d +#define AUT_IN_ADDR_EX 0x7e +#define AUT_SOCKET_EX 0x7f /* * Pre-64-bit BSM, 32-bit tokens weren't explicitly named as '32'. We have * compatibility defines. */ -#define AUT_HEADER AUT_HEADER32 -#define AUT_ARG AUT_ARG32 -#define AUT_RETURN AUT_RETURN32 -#define AUT_SUBJECT AUT_SUBJECT32 -#define AUT_PROCESS AUT_PROCESS32 -#define AUT_OTHER_FILE AUT_OTHER_FILE32 +#define AUT_HEADER AUT_HEADER32 +#define AUT_ARG AUT_ARG32 +#define AUT_RETURN AUT_RETURN32 +#define AUT_SUBJECT AUT_SUBJECT32 +#define AUT_PROCESS AUT_PROCESS32 +#define AUT_OTHER_FILE AUT_OTHER_FILE32 /* * The values for the following token ids are not defined by BSM. @@ -122,14 +122,14 @@ * names more consistent with Sun's BSM. These originally came from Apple's * BSM. */ -#define AUT_SOCKINET32 0x80 /* XXX */ -#define AUT_SOCKINET128 0x81 /* XXX */ -#define AUT_SOCKUNIX 0x82 /* XXX */ +#define AUT_SOCKINET32 0x80 /* XXX */ +#define AUT_SOCKINET128 0x81 /* XXX */ +#define AUT_SOCKUNIX 0x82 /* XXX */ /* Apple specific tokens*/ -#define AUT_IDENTITY 0xed -#define AUT_KRB5_PRINCIPAL 0xee -#define AUT_CERT_HASH 0xef +#define AUT_IDENTITY 0xed +#define AUT_KRB5_PRINCIPAL 0xee +#define AUT_CERT_HASH 0xef /* print values for the arbitrary token */ #define AUP_BINARY 0 @@ -166,15 +166,15 @@ * Solaris BSM version, but has a separate version number in order to * identify a potentially different event identifier name space. */ -#define AUDIT_HEADER_VERSION_OLDDARWIN 1 /* In retrospect, a mistake. */ -#define AUDIT_HEADER_VERSION_SOLARIS 2 -#define AUDIT_HEADER_VERSION_TSOL25 3 -#define AUDIT_HEADER_VERSION_TSOL 4 -#define AUDIT_HEADER_VERSION_OPENBSM10 10 -#define AUDIT_HEADER_VERSION_OPENBSM11 11 -#define AUDIT_HEADER_VERSION_OPENBSM AUDIT_HEADER_VERSION_OPENBSM11 +#define AUDIT_HEADER_VERSION_OLDDARWIN 1 /* In retrospect, a mistake. */ +#define AUDIT_HEADER_VERSION_SOLARIS 2 +#define AUDIT_HEADER_VERSION_TSOL25 3 +#define AUDIT_HEADER_VERSION_TSOL 4 +#define AUDIT_HEADER_VERSION_OPENBSM10 10 +#define AUDIT_HEADER_VERSION_OPENBSM11 11 +#define AUDIT_HEADER_VERSION_OPENBSM AUDIT_HEADER_VERSION_OPENBSM11 -#define AUT_TRAILER_MAGIC 0xb105 +#define AUT_TRAILER_MAGIC 0xb105 /* BSM library calls */ @@ -193,118 +193,118 @@ struct sockaddr_un; struct vnode_au_info; #endif -int au_open(void); -int au_write(int d, token_t *m); -int au_close(int d, int keep, short event); -int au_close_buffer(int d, short event, u_char *buffer, size_t *buflen); -int au_close_token(token_t *tok, u_char *buffer, size_t *buflen); +int au_open(void); +int au_write(int d, token_t *m); +int au_close(int d, int keep, short event); +int au_close_buffer(int d, short event, u_char *buffer, size_t *buflen); +int au_close_token(token_t *tok, u_char *buffer, size_t *buflen); -token_t *au_to_file(const char *file, struct timeval tm); +token_t *au_to_file(const char *file, struct timeval tm); -token_t *au_to_header32_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm); -token_t *au_to_header32_ex_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm, struct auditinfo_addr *aia); -token_t *au_to_header64_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, - struct timeval tm); +token_t *au_to_header32_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, + struct timeval tm); +token_t *au_to_header32_ex_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, + struct timeval tm, struct auditinfo_addr *aia); +token_t *au_to_header64_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, + struct timeval tm); #if !defined(KERNEL) && !defined(_KERNEL) -token_t *au_to_header(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header_ex(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header32(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header64(int rec_size, au_event_t e_type, au_emod_t e_mod); -token_t *au_to_header32_ex(int rec_size, au_event_t e_type, au_emod_t e_mod); +token_t *au_to_header(int rec_size, au_event_t e_type, au_emod_t e_mod); +token_t *au_to_header_ex(int rec_size, au_event_t e_type, au_emod_t e_mod); +token_t *au_to_header32(int rec_size, au_event_t e_type, au_emod_t e_mod); +token_t *au_to_header64(int rec_size, au_event_t e_type, au_emod_t e_mod); +token_t *au_to_header32_ex(int rec_size, au_event_t e_type, au_emod_t e_mod); #endif -token_t *au_to_me(void); -token_t *au_to_arg(char n, const char *text, uint32_t v); -token_t *au_to_arg32(char n, const char *text, uint32_t v); -token_t *au_to_arg64(char n, const char *text, uint64_t v); +token_t *au_to_me(void); +token_t *au_to_arg(char n, const char *text, uint32_t v); +token_t *au_to_arg32(char n, const char *text, uint32_t v); +token_t *au_to_arg64(char n, const char *text, uint64_t v); #if defined(_KERNEL) || defined(KERNEL) -token_t *au_to_attr(struct vnode_au_info *vni); -token_t *au_to_attr32(struct vnode_au_info *vni); -token_t *au_to_attr64(struct vnode_au_info *vni); +token_t *au_to_attr(struct vnode_au_info *vni); +token_t *au_to_attr32(struct vnode_au_info *vni); +token_t *au_to_attr64(struct vnode_au_info *vni); #endif -token_t *au_to_data(char unit_print, char unit_type, char unit_count, - const char *p); -token_t *au_to_exit(int retval, int err); -token_t *au_to_groups(int *groups); -token_t *au_to_newgroups(uint16_t n, gid_t *groups); -token_t *au_to_in_addr(struct in_addr *internet_addr); -token_t *au_to_in_addr_ex(struct in6_addr *internet_addr); -token_t *au_to_ip(struct ip *ip); -token_t *au_to_ipc(char type, int id); -token_t *au_to_ipc_perm(struct ipc_perm *perm); -token_t *au_to_iport(uint16_t iport); -token_t *au_to_opaque(const char *data, uint16_t bytes); -token_t *au_to_path(const char *path); -token_t *au_to_process(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); -token_t *au_to_process32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); -token_t *au_to_process64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); -token_t *au_to_process_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); -token_t *au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, - uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, - au_tid_addr_t *tid); -token_t *au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); -token_t *au_to_return(char status, uint32_t ret); -token_t *au_to_return32(char status, uint32_t ret); -token_t *au_to_return64(char status, uint64_t ret); -token_t *au_to_seq(long audit_count); -token_t *au_to_socket_ex(u_short so_domain, u_short so_type, - struct sockaddr *sa_local, struct sockaddr *sa_remote); -token_t *au_to_sock_inet(struct sockaddr_in *so); -token_t *au_to_sock_inet32(struct sockaddr_in *so); -token_t *au_to_sock_inet128(struct sockaddr_in6 *so); -token_t *au_to_sock_unix(struct sockaddr_un *so); -token_t *au_to_subject(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); -token_t *au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); -token_t *au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); -token_t *au_to_subject_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); -token_t *au_to_subject32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); -token_t *au_to_subject64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, - gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_data(char unit_print, char unit_type, char unit_count, + const char *p); +token_t *au_to_exit(int retval, int err); +token_t *au_to_groups(int *groups); +token_t *au_to_newgroups(uint16_t n, gid_t *groups); +token_t *au_to_in_addr(struct in_addr *internet_addr); +token_t *au_to_in_addr_ex(struct in6_addr *internet_addr); +token_t *au_to_ip(struct ip *ip); +token_t *au_to_ipc(char type, int id); +token_t *au_to_ipc_perm(struct ipc_perm *perm); +token_t *au_to_iport(uint16_t iport); +token_t *au_to_opaque(const char *data, uint16_t bytes); +token_t *au_to_path(const char *path); +token_t *au_to_process(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); +token_t *au_to_process32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); +token_t *au_to_process64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); +token_t *au_to_process_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, + uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, + au_tid_addr_t *tid); +token_t *au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_return(char status, uint32_t ret); +token_t *au_to_return32(char status, uint32_t ret); +token_t *au_to_return64(char status, uint64_t ret); +token_t *au_to_seq(long audit_count); +token_t *au_to_socket_ex(u_short so_domain, u_short so_type, + struct sockaddr *sa_local, struct sockaddr *sa_remote); +token_t *au_to_sock_inet(struct sockaddr_in *so); +token_t *au_to_sock_inet32(struct sockaddr_in *so); +token_t *au_to_sock_inet128(struct sockaddr_in6 *so); +token_t *au_to_sock_unix(struct sockaddr_un *so); +token_t *au_to_subject(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid); +token_t *au_to_subject_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_subject32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); +token_t *au_to_subject64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, + gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid); #if defined(_KERNEL) || defined(KERNEL) -token_t *au_to_exec_args(char *args, int argc); -token_t *au_to_exec_env(char *envs, int envc); -token_t *au_to_certificate_hash(char *hash, int hashc); -token_t *au_to_krb5_principal(char *principal, int princ); +token_t *au_to_exec_args(char *args, int argc); +token_t *au_to_exec_env(char *envs, int envc); +token_t *au_to_certificate_hash(char *hash, int hashc); +token_t *au_to_krb5_principal(char *principal, int princ); #else -token_t *au_to_exec_args(char **argv); -token_t *au_to_exec_env(char **envp); -token_t *au_to_certificate_hash(char **hash); -token_t *au_to_krb5_principal(char **principal); +token_t *au_to_exec_args(char **argv); +token_t *au_to_exec_env(char **envp); +token_t *au_to_certificate_hash(char **hash); +token_t *au_to_krb5_principal(char **principal); #endif -token_t *au_to_text(const char *text); -token_t *au_to_kevent(struct kevent *kev); -token_t *au_to_trailer(int rec_size); -token_t *au_to_zonename(const char *zonename); -token_t *au_to_identity(uint32_t signer_type, const char* signing_id, - u_char signing_id_trunc, const char* team_id, u_char team_id_trunc, - uint8_t* cdhash, uint16_t cdhash_len); +token_t *au_to_text(const char *text); +token_t *au_to_kevent(struct kevent *kev); +token_t *au_to_trailer(int rec_size); +token_t *au_to_zonename(const char *zonename); +token_t *au_to_identity(uint32_t signer_type, const char* signing_id, + u_char signing_id_trunc, const char* team_id, u_char team_id_trunc, + uint8_t* cdhash, uint16_t cdhash_len); /* * BSM library routines for converting between local and BSM constant spaces. */ -int au_bsm_to_domain(u_short bsm_domain, int *local_domainp); -int au_bsm_to_errno(u_char bsm_error, int *errorp); -int au_bsm_to_fcntl_cmd(u_short bsm_fcntl_cmd, int *local_fcntl_cmdp); -int au_bsm_to_socket_type(u_short bsm_socket_type, - int *local_socket_typep); -u_short au_domain_to_bsm(int local_domain); -u_char au_errno_to_bsm(int local_errno); -u_short au_fcntl_cmd_to_bsm(int local_fcntl_command); -u_short au_socket_type_to_bsm(int local_socket_type); +int au_bsm_to_domain(u_short bsm_domain, int *local_domainp); +int au_bsm_to_errno(u_char bsm_error, int *errorp); +int au_bsm_to_fcntl_cmd(u_short bsm_fcntl_cmd, int *local_fcntl_cmdp); +int au_bsm_to_socket_type(u_short bsm_socket_type, + int *local_socket_typep); +u_short au_domain_to_bsm(int local_domain); +u_char au_errno_to_bsm(int local_errno); +u_short au_fcntl_cmd_to_bsm(int local_fcntl_command); +u_short au_socket_type_to_bsm(int local_socket_type); __END_DECLS diff --git a/bsd/bsm/audit_socket_type.h b/bsd/bsm/audit_socket_type.h index 85f6aef7a..d4b45d3d8 100644 --- a/bsd/bsm/audit_socket_type.h +++ b/bsd/bsm/audit_socket_type.h @@ -24,23 +24,23 @@ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. + * POSSIBILITY OF SUCH DAMAGE. * * $P4: //depot/projects/trustedbsd/openbsm/sys/bsm/audit_socket_type.h#1 $ */ #ifndef _BSM_AUDIT_SOCKET_TYPE_H_ -#define _BSM_AUDIT_SOCKET_TYPE_H_ +#define _BSM_AUDIT_SOCKET_TYPE_H_ /* * BSM socket type constants. */ -#define BSM_SOCK_DGRAM 1 -#define BSM_SOCK_STREAM 2 -#define BSM_SOCK_RAW 4 -#define BSM_SOCK_RDM 5 -#define BSM_SOCK_SEQPACKET 6 +#define BSM_SOCK_DGRAM 1 +#define BSM_SOCK_STREAM 2 +#define BSM_SOCK_RAW 4 +#define BSM_SOCK_RDM 5 +#define BSM_SOCK_SEQPACKET 6 -#define BSM_SOCK_UNKNOWN 500 +#define BSM_SOCK_UNKNOWN 500 #endif /* !_BSM_AUDIT_SOCKET_TYPE_H_ */ diff --git a/bsd/conf/files b/bsd/conf/files index 65e1393f6..e5a34f794 100644 --- a/bsd/conf/files +++ b/bsd/conf/files @@ -113,6 +113,7 @@ bsd/nfs/gss/ccrypto.c optional nfsserver bsd/kern/netboot.c optional nfsclient bsd/dev/dtrace/dtrace.c optional config_dtrace +bsd/dev/dtrace/lockprof.c optional config_dtrace bsd/dev/dtrace/lockstat.c optional config_dtrace bsd/dev/dtrace/dtrace_ptss.c optional config_dtrace bsd/dev/dtrace/dtrace_subr.c optional config_dtrace @@ -510,3 +511,4 @@ bsd/miscfs/nullfs/null_vnops.c optional nullfs bsd/tests/bsd_tests.c optional config_xnupost bsd/tests/pmap_test_sysctl.c optional config_xnupost +bsd/net/skywalk_stubs.c standard diff --git a/bsd/conf/param.c b/bsd/conf/param.c index d78d06c4a..401b05a3d 100644 --- a/bsd/conf/param.c +++ b/bsd/conf/param.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -80,39 +80,39 @@ #include #include -struct timezone tz = { 0, 0 }; +struct timezone tz = { 0, 0 }; #if CONFIG_EMBEDDED -#define NPROC 1000 /* Account for TOTAL_CORPSES_ALLOWED by making this slightly lower than we can. */ -#define NPROC_PER_UID 950 +#define NPROC 1000 /* Account for TOTAL_CORPSES_ALLOWED by making this slightly lower than we can. */ +#define NPROC_PER_UID 950 #else -#define NPROC (20 + 16 * 32) -#define NPROC_PER_UID (NPROC/2) +#define NPROC (20 + 16 * 32) +#define NPROC_PER_UID (NPROC/2) #endif /* NOTE: maxproc and hard_maxproc values are subject to device specific scaling in bsd_scale_setup */ -#define HNPROC 2500 /* based on thread_max */ -int maxproc = NPROC; -int maxprocperuid = NPROC_PER_UID; +#define HNPROC 2500 /* based on thread_max */ +int maxproc = NPROC; +int maxprocperuid = NPROC_PER_UID; #if CONFIG_EMBEDDED -int hard_maxproc = NPROC; /* hardcoded limit -- for embedded the number of processes is limited by the ASID space */ +int hard_maxproc = NPROC; /* hardcoded limit -- for embedded the number of processes is limited by the ASID space */ #else -int hard_maxproc = HNPROC; /* hardcoded limit */ +int hard_maxproc = HNPROC; /* hardcoded limit */ #endif int nprocs = 0; /* XXX */ //#define NTEXT (80 + NPROC / 8) /* actually the object cache */ -int desiredvnodes = 0; /* desiredvnodes is set explicitly in unix_startup.c */ -uint32_t kern_maxvnodes = 0; /* global, to be read from the device tree */ +int desiredvnodes = 0; /* desiredvnodes is set explicitly in unix_startup.c */ +uint32_t kern_maxvnodes = 0; /* global, to be read from the device tree */ #define MAXFILES (OPEN_MAX + 2048) -int maxfiles = MAXFILES; +int maxfiles = MAXFILES; -unsigned int ncallout = 16 + 2*NPROC; +unsigned int ncallout = 16 + 2 * NPROC; unsigned int nmbclusters = NMBCLUSTERS; -int nport = NPROC / 2; +int nport = NPROC / 2; /* * async IO (aio) configurable limits @@ -126,9 +126,9 @@ int aio_worker_threads = CONFIG_AIO_THREAD_COUNT; * them here forces loader errors if this file is omitted * (if they've been externed everywhere else; hah!). */ -struct callout *callout; -struct cblock *cfree; -struct cblock *cfreelist = NULL; -int cfreecount = 0; -struct buf *buf_headers; +struct callout *callout; +struct cblock *cfree; +struct cblock *cfreelist = NULL; +int cfreecount = 0; +struct buf *buf_headers; struct domains_head domains = TAILQ_HEAD_INITIALIZER(domains); diff --git a/bsd/crypto/rc4/rc4.c b/bsd/crypto/rc4/rc4.c index 2154cc404..da9559fea 100644 --- a/bsd/crypto/rc4/rc4.c +++ b/bsd/crypto/rc4/rc4.c @@ -1,10 +1,9 @@ - /* * rc4.c * * Copyright (c) 1996-2000 Whistle Communications, Inc. * All rights reserved. - * + * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; @@ -15,7 +14,7 @@ * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. - * + * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, @@ -61,14 +60,15 @@ rc4_init(struct rc4_state *const state, const u_char *key, int keylen) int i; /* Initialize state with identity permutation */ - for (i = 0; i < 256; i++) - state->perm[i] = (u_char)i; + for (i = 0; i < 256; i++) { + state->perm[i] = (u_char)i; + } state->index1 = 0; state->index2 = 0; - + /* Randomize the permutation using key data */ for (j = i = 0; i < 256; i++) { - j += state->perm[i] + key[i % keylen]; + j += state->perm[i] + key[i % keylen]; swap_bytes(&state->perm[i], &state->perm[j]); } } @@ -81,13 +81,12 @@ rc4_init(struct rc4_state *const state, const u_char *key, int keylen) */ void rc4_crypt(struct rc4_state *const state, - const u_char *inbuf, u_char *outbuf, int buflen) + const u_char *inbuf, u_char *outbuf, int buflen) { int i; u_char j; for (i = 0; i < buflen; i++) { - /* Update modification indicies */ state->index1++; state->index2 += state->perm[state->index1]; @@ -101,4 +100,3 @@ rc4_crypt(struct rc4_state *const state, outbuf[i] = inbuf[i] ^ state->perm[j]; } } - diff --git a/bsd/crypto/rc4/rc4.h b/bsd/crypto/rc4/rc4.h index 45971adbf..40a1f4fae 100644 --- a/bsd/crypto/rc4/rc4.h +++ b/bsd/crypto/rc4/rc4.h @@ -1,10 +1,9 @@ - /* * rc4.h * * Copyright (c) 1996-2000 Whistle Communications, Inc. * All rights reserved. - * + * * Subject to the following obligations and disclaimer of warranty, use and * redistribution of this software, in source or object code forms, with or * without modifications are expressly permitted by Whistle Communications; @@ -15,7 +14,7 @@ * Communications, Inc. trademarks, including the mark "WHISTLE * COMMUNICATIONS" on advertising, endorsements, or otherwise except as * such appears in the above copyright notice or in the software. - * + * * THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND * TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE, @@ -41,14 +40,13 @@ #define _SYS_CRYPTO_RC4_RC4_H_ struct rc4_state { - u_char perm[256]; - u_char index1; - u_char index2; + u_char perm[256]; + u_char index1; + u_char index2; }; extern void rc4_init(struct rc4_state *state, const u_char *key, int keylen); extern void rc4_crypt(struct rc4_state *state, - const u_char *inbuf, u_char *outbuf, int buflen); + const u_char *inbuf, u_char *outbuf, int buflen); #endif - diff --git a/bsd/crypto/sha1.h b/bsd/crypto/sha1.h index bfc874833..e069a88bb 100644 --- a/bsd/crypto/sha1.h +++ b/bsd/crypto/sha1.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/crypto/sha2.h b/bsd/crypto/sha2.h index 7e1dea80c..e1aeb8db2 100644 --- a/bsd/crypto/sha2.h +++ b/bsd/crypto/sha2.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/dev/arm/conf.c b/bsd/dev/arm/conf.c index ef78baad1..0e0e8a435 100644 --- a/bsd/dev/arm/conf.c +++ b/bsd/dev/arm/conf.c @@ -10,7 +10,7 @@ * HISTORY * * 30 July 1997 Umesh Vaishampayan (umeshv@apple.com) - * enabled file descriptor pseudo-device. + * enabled file descriptor pseudo-device. * 18 June 1993 ? at NeXT * Cleaned up a lot of stuff in this file. */ @@ -38,34 +38,34 @@ struct bdevsw bdevsw[] = /* 0 - 7 are reserved to NeXT */ - NO_BDEVICE, /* 0 */ - NO_BDEVICE, /* 1 */ - NO_BDEVICE, /* 2 */ - NO_BDEVICE, /* 3 */ - NO_BDEVICE, /* 4 */ - NO_BDEVICE, /* 5 */ - NO_BDEVICE, /* 6 */ - NO_BDEVICE, /* 7 */ + NO_BDEVICE, /* 0 */ + NO_BDEVICE, /* 1 */ + NO_BDEVICE, /* 2 */ + NO_BDEVICE, /* 3 */ + NO_BDEVICE, /* 4 */ + NO_BDEVICE, /* 5 */ + NO_BDEVICE, /* 6 */ + NO_BDEVICE, /* 7 */ /* 8 - 15 are reserved to the user */ - NO_BDEVICE, /* 8 */ - NO_BDEVICE, /* 9 */ - NO_BDEVICE, /* 10 */ - NO_BDEVICE, /* 11 */ - NO_BDEVICE, /* 12 */ - NO_BDEVICE, /* 13 */ - NO_BDEVICE, /* 14 */ - NO_BDEVICE, /* 15 */ + NO_BDEVICE, /* 8 */ + NO_BDEVICE, /* 9 */ + NO_BDEVICE, /* 10 */ + NO_BDEVICE, /* 11 */ + NO_BDEVICE, /* 12 */ + NO_BDEVICE, /* 13 */ + NO_BDEVICE, /* 14 */ + NO_BDEVICE, /* 15 */ /* 16 - 23 are reserved to NeXT */ - NO_BDEVICE, /* 16 */ - NO_BDEVICE, /* 17 */ - NO_BDEVICE, /* 18 */ - NO_BDEVICE, /* 18 */ - NO_BDEVICE, /* 20 */ - NO_BDEVICE, /* 21 */ - NO_BDEVICE, /* 22 */ - NO_BDEVICE, /* 23 */ + NO_BDEVICE, /* 16 */ + NO_BDEVICE, /* 17 */ + NO_BDEVICE, /* 18 */ + NO_BDEVICE, /* 18 */ + NO_BDEVICE, /* 20 */ + NO_BDEVICE, /* 21 */ + NO_BDEVICE, /* 22 */ + NO_BDEVICE, /* 23 */ }; const int nblkdev = sizeof(bdevsw) / sizeof(bdevsw[0]); @@ -91,9 +91,9 @@ extern d_open_t volopen; extern d_close_t volclose; extern d_ioctl_t volioctl; #else -#define volopen eno_opcl -#define volclose eno_opcl -#define volioctl eno_ioctl +#define volopen eno_opcl +#define volclose eno_opcl +#define volioctl eno_ioctl #endif extern d_open_t cttyopen; @@ -105,8 +105,8 @@ extern d_select_t cttyselect; extern d_read_t mmread; extern d_write_t mmwrite; extern d_ioctl_t mmioctl; -#define mmselect (select_fcn_t *)seltrue -#define mmmmap eno_mmap +#define mmselect (select_fcn_t *)seltrue +#define mmmmap eno_mmap #include #if NPTY > 0 @@ -123,18 +123,18 @@ extern d_write_t ptcwrite; extern d_select_t ptcselect; extern d_ioctl_t ptyioctl; #else -#define ptsopen eno_opcl -#define ptsclose eno_opcl -#define ptsread eno_rdwrt -#define ptswrite eno_rdwrt -#define ptsstop nulldev +#define ptsopen eno_opcl +#define ptsclose eno_opcl +#define ptsread eno_rdwrt +#define ptswrite eno_rdwrt +#define ptsstop nulldev -#define ptcopen eno_opcl -#define ptcclose eno_opcl -#define ptcread eno_rdwrt -#define ptcwrite eno_rdwrt -#define ptcselect eno_select -#define ptyioctl eno_ioctl +#define ptcopen eno_opcl +#define ptcclose eno_opcl +#define ptcread eno_rdwrt +#define ptcwrite eno_rdwrt +#define ptcselect eno_select +#define ptyioctl eno_ioctl #endif extern d_open_t logopen; @@ -149,19 +149,19 @@ extern d_read_t oslog_streamread; extern d_ioctl_t oslog_streamioctl; extern d_select_t oslog_streamselect; -extern d_open_t oslogopen; -extern d_close_t oslogclose; -extern d_select_t oslogselect; -extern d_ioctl_t oslogioctl; +extern d_open_t oslogopen; +extern d_close_t oslogclose; +extern d_select_t oslogselect; +extern d_ioctl_t oslogioctl; -#define nullopen (d_open_t *)&nulldev -#define nullclose (d_close_t *)&nulldev -#define nullread (d_read_t *)&nulldev -#define nullwrite (d_write_t *)&nulldev -#define nullioctl (d_ioctl_t *)&nulldev -#define nullselect (d_select_t *)&nulldev -#define nullstop (d_stop_t *)&nulldev -#define nullreset (d_reset_t *)&nulldev +#define nullopen (d_open_t *)&nulldev +#define nullclose (d_close_t *)&nulldev +#define nullread (d_read_t *)&nulldev +#define nullwrite (d_write_t *)&nulldev +#define nullioctl (d_ioctl_t *)&nulldev +#define nullselect (d_select_t *)&nulldev +#define nullstop (d_stop_t *)&nulldev +#define nullreset (d_reset_t *)&nulldev struct cdevsw cdevsw[] = { /* @@ -226,7 +226,7 @@ const int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]); uint64_t cdevsw_flags[sizeof(cdevsw) / sizeof(cdevsw[0])]; -#include /* for VCHR and VBLK */ +#include /* for VCHR and VBLK */ /* * return true if a disk */ @@ -241,41 +241,41 @@ isdisk(dev_t dev, int type) if (maj == NODEV) { break; } - /* FALL THROUGH */ + /* FALL THROUGH */ case VBLK: if (bdevsw[maj].d_type == D_DISK) { - return (1); + return 1; } break; } - return (0); + return 0; } static int chrtoblktab[] = { /* CHR *//* BLK *//* CHR *//* BLK */ - /* 0 */ NODEV, /* 1 */ NODEV, - /* 2 */ NODEV, /* 3 */ NODEV, - /* 4 */ NODEV, /* 5 */ NODEV, - /* 6 */ NODEV, /* 7 */ NODEV, - /* 8 */ NODEV, /* 9 */ NODEV, - /* 10 */ NODEV, /* 11 */ NODEV, - /* 12 */ NODEV, /* 13 */ NODEV, - /* 14 */ NODEV, /* 15 */ NODEV, - /* 16 */ NODEV, /* 17 */ NODEV, - /* 18 */ NODEV, /* 19 */ NODEV, - /* 20 */ NODEV, /* 21 */ NODEV, - /* 22 */ NODEV, /* 23 */ NODEV, - /* 24 */ NODEV, /* 25 */ NODEV, - /* 26 */ NODEV, /* 27 */ NODEV, - /* 28 */ NODEV, /* 29 */ NODEV, - /* 30 */ NODEV, /* 31 */ NODEV, - /* 32 */ NODEV, /* 33 */ NODEV, - /* 34 */ NODEV, /* 35 */ NODEV, - /* 36 */ NODEV, /* 37 */ NODEV, - /* 38 */ NODEV, /* 39 */ NODEV, - /* 40 */ NODEV, /* 41 */ NODEV, - /* 42 */ NODEV, /* 43 */ NODEV, - /* 44 */ NODEV, + /* 0 */ NODEV, /* 1 */ NODEV, + /* 2 */ NODEV, /* 3 */ NODEV, + /* 4 */ NODEV, /* 5 */ NODEV, + /* 6 */ NODEV, /* 7 */ NODEV, + /* 8 */ NODEV, /* 9 */ NODEV, + /* 10 */ NODEV, /* 11 */ NODEV, + /* 12 */ NODEV, /* 13 */ NODEV, + /* 14 */ NODEV, /* 15 */ NODEV, + /* 16 */ NODEV, /* 17 */ NODEV, + /* 18 */ NODEV, /* 19 */ NODEV, + /* 20 */ NODEV, /* 21 */ NODEV, + /* 22 */ NODEV, /* 23 */ NODEV, + /* 24 */ NODEV, /* 25 */ NODEV, + /* 26 */ NODEV, /* 27 */ NODEV, + /* 28 */ NODEV, /* 29 */ NODEV, + /* 30 */ NODEV, /* 31 */ NODEV, + /* 32 */ NODEV, /* 33 */ NODEV, + /* 34 */ NODEV, /* 35 */ NODEV, + /* 36 */ NODEV, /* 37 */ NODEV, + /* 38 */ NODEV, /* 39 */ NODEV, + /* 40 */ NODEV, /* 41 */ NODEV, + /* 42 */ NODEV, /* 43 */ NODEV, + /* 44 */ NODEV, }; /* @@ -286,21 +286,25 @@ chrtoblk(dev_t dev) { int blkmaj; - if (major(dev) >= nchrdev) - return (NODEV); + if (major(dev) >= nchrdev) { + return NODEV; + } blkmaj = chrtoblktab[major(dev)]; - if (blkmaj == NODEV) - return (NODEV); - return (makedev(blkmaj, minor(dev))); + if (blkmaj == NODEV) { + return NODEV; + } + return makedev(blkmaj, minor(dev)); } int chrtoblk_set(int cdev, int bdev) { - if (cdev >= nchrdev) - return (-1); - if (bdev != NODEV && bdev >= nblkdev) - return (-1); + if (cdev >= nchrdev) { + return -1; + } + if (bdev != NODEV && bdev >= nblkdev) { + return -1; + } chrtoblktab[cdev] = bdev; return 0; } diff --git a/bsd/dev/arm/cons.c b/bsd/dev/arm/cons.c index 910c15257..5ac18e219 100644 --- a/bsd/dev/arm/cons.c +++ b/bsd/dev/arm/cons.c @@ -3,9 +3,9 @@ */ /* * Copyright (c) 1987, 1988 NeXT, Inc. - * + * * HISTORY 7-Jan-93 Mac Gillon (mgillon) at NeXT Integrated POSIX support - * + * * 12-Aug-87 John Seamons (jks) at NeXT Ported to NeXT. */ @@ -20,7 +20,7 @@ #include #include -struct tty *constty; /* current console device */ +struct tty *constty; /* current console device */ /* * The km driver supplied the default console device for the systems @@ -41,17 +41,18 @@ int cnselect(__unused dev_t dev, int flag, void * wql, proc_t p); static dev_t cndev(void) { - if (constty) + if (constty) { return constty->t_dev; - else + } else { return km_tty[0]->t_dev; + } } int cnopen(__unused dev_t dev, int flag, int devtype, struct proc *pp) { dev = cndev(); - return ((*cdevsw[major(dev)].d_open)(dev, flag, devtype, pp)); + return (*cdevsw[major(dev)].d_open)(dev, flag, devtype, pp); } @@ -59,7 +60,7 @@ int cnclose(__unused dev_t dev, int flag, int mode, struct proc *pp) { dev = cndev(); - return ((*cdevsw[major(dev)].d_close)(dev, flag, mode, pp)); + return (*cdevsw[major(dev)].d_close)(dev, flag, mode, pp); } @@ -67,7 +68,7 @@ int cnread(__unused dev_t dev, struct uio *uio, int ioflag) { dev = cndev(); - return ((*cdevsw[major(dev)].d_read)(dev, uio, ioflag)); + return (*cdevsw[major(dev)].d_read)(dev, uio, ioflag); } @@ -75,7 +76,7 @@ int cnwrite(__unused dev_t dev, struct uio *uio, int ioflag) { dev = cndev(); - return ((*cdevsw[major(dev)].d_write)(dev, uio, ioflag)); + return (*cdevsw[major(dev)].d_write)(dev, uio, ioflag); } @@ -94,12 +95,13 @@ cnioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) */ if ((unsigned) cmd == TIOCCONS && constty) { int error = proc_suser(p); - if (error) - return (error); + if (error) { + return error; + } constty = NULL; - return (0); + return 0; } - return ((*cdevsw[major(dev)].d_ioctl)(dev, cmd, addr, flag, p)); + return (*cdevsw[major(dev)].d_ioctl)(dev, cmd, addr, flag, p); } @@ -107,5 +109,5 @@ int cnselect(__unused dev_t dev, int flag, void *wql, struct proc *p) { dev = cndev(); - return ((*cdevsw[major(dev)].d_select)(dev, flag, wql, p)); + return (*cdevsw[major(dev)].d_select)(dev, flag, wql, p); } diff --git a/bsd/dev/arm/disassembler.c b/bsd/dev/arm/disassembler.c index a5db2033e..cc7c2e95d 100644 --- a/bsd/dev/arm/disassembler.c +++ b/bsd/dev/arm/disassembler.c @@ -12,9 +12,10 @@ #include -#define BITS(x,n,mask) (((x) >> (n)) & (mask)) +#define BITS(x, n, mask) (((x) >> (n)) & (mask)) -static uint32_t thumb32_instword_to_arm(uint16_t hw1, uint16_t hw2) +static uint32_t +thumb32_instword_to_arm(uint16_t hw1, uint16_t hw2) { return (hw1 << 16) | hw2; } @@ -27,46 +28,55 @@ int dtrace_decode_thumb(uint32_t instr); */ static -int vfp_struct_loadstore(uint32_t instr) +int +vfp_struct_loadstore(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int vfp_64transfer(uint32_t instr) +int +vfp_64transfer(uint32_t instr) { /* These instructions all use RD and RN */ - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int vfp_transfer(uint32_t instr) +int +vfp_transfer(uint32_t instr) { /* These instructions all use RD only */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int vfp_loadstore(uint32_t instr) +int +vfp_loadstore(uint32_t instr) { - int opcode = BITS(instr,20,0x1F); + int opcode = BITS(instr, 20, 0x1F); /* Instrument VLDR */ - if ((opcode & 0x13) == 0x11 && ARM_RN(instr) == REG_PC) + if ((opcode & 0x13) == 0x11 && ARM_RN(instr) == REG_PC) { return FASTTRAP_T_VLDR_PC_IMMED; - + } + /* These instructions all use RN only */ - if (ARM_RN(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } @@ -76,9 +86,10 @@ int vfp_loadstore(uint32_t instr) */ static -int arm_unconditional_misc(uint32_t instr) +int +arm_unconditional_misc(uint32_t instr) { - int op = BITS(instr,20,0x7F); + int op = BITS(instr, 20, 0x7F); if ((op & 0x60) == 0x20) { /* VFP data processing uses its own registers */ @@ -93,10 +104,12 @@ int arm_unconditional_misc(uint32_t instr) } static -int arm_unconditional(uint32_t instr) +int +arm_unconditional(uint32_t instr) { - if (BITS(instr,27,0x1) == 0) + if (BITS(instr, 27, 0x1) == 0) { return arm_unconditional_misc(instr); + } /* The rest are privileged or BL/BLX, do not instrument */ @@ -106,11 +119,12 @@ int arm_unconditional(uint32_t instr) } static -int arm_syscall_coproc(uint32_t instr) +int +arm_syscall_coproc(uint32_t instr) { /* Instrument any VFP data processing instructions, ignore the rest */ - int op1 = BITS(instr,20,0x3F), coproc = BITS(instr,8,0xF), op = BITS(instr,4,0x1); + int op1 = BITS(instr, 20, 0x3F), coproc = BITS(instr, 8, 0xF), op = BITS(instr, 4, 0x1); if ((op1 & 0x3E) == 0 || (op1 & 0x30) == 0x30) { /* Undefined or swi */ @@ -120,11 +134,13 @@ int arm_syscall_coproc(uint32_t instr) if ((coproc & 0xE) == 0xA) { /* VFP instruction */ - if ((op1 & 0x20) == 0 && (op1 & 0x3A) != 0) + if ((op1 & 0x20) == 0 && (op1 & 0x3A) != 0) { return vfp_loadstore(instr); + } - if ((op1 & 0x3E) == 0x04) + if ((op1 & 0x3E) == 0x04) { return vfp_64transfer(instr); + } if ((op1 & 0x30) == 0x20) { /* VFP data processing or 8, 16, or 32 bit move between ARM reg and VFP reg */ @@ -141,98 +157,118 @@ int arm_syscall_coproc(uint32_t instr) } static -int arm_branch_link_blockdata(uint32_t instr) +int +arm_branch_link_blockdata(uint32_t instr) { - int branch = BITS(instr,25,0x1), link = BITS(instr,24,0x1), op = BITS(instr,20,0x1F), uses_pc = BITS(instr,15,0x1), uses_lr = BITS(instr,14,0x1); + int branch = BITS(instr, 25, 0x1), link = BITS(instr, 24, 0x1), op = BITS(instr, 20, 0x1F), uses_pc = BITS(instr, 15, 0x1), uses_lr = BITS(instr, 14, 0x1); if (branch == 1) { - if (link == 0) + if (link == 0) { return FASTTRAP_T_B_COND; + } return FASTTRAP_T_INV; } else { /* Only emulate a use of the pc if it's a return from function: ldmia sp!, { ... pc } */ - if (op == 0x0B && ARM_RN(instr) == REG_SP && uses_pc == 1) + if (op == 0x0B && ARM_RN(instr) == REG_SP && uses_pc == 1) { return FASTTRAP_T_LDM_PC; + } /* stmia sp!, { ... lr } doesn't touch the pc, but it is very common, so special case it */ - if (op == 0x12 && ARM_RN(instr) == REG_SP && uses_lr == 1) + if (op == 0x12 && ARM_RN(instr) == REG_SP && uses_lr == 1) { return FASTTRAP_T_STM_LR; + } - if (ARM_RN(instr) != REG_PC && uses_pc == 0) + if (ARM_RN(instr) != REG_PC && uses_pc == 0) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int arm_signed_multiplies(uint32_t instr) +int +arm_signed_multiplies(uint32_t instr) { - int op1 = BITS(instr,20,0x7), op2 = BITS(instr,5,0x7); + int op1 = BITS(instr, 20, 0x7), op2 = BITS(instr, 5, 0x7); /* smlald, smlsld, smmls use RD in addition to RM, RS, and RN */ if ((op1 == 0x4 && (op2 & 0x4) == 0) || (op1 == 0x5 && (op2 & 0x6) == 0x6)) { - if (ARM_RD(instr) == REG_PC) + if (ARM_RD(instr) == REG_PC) { return FASTTRAP_T_INV; + } } - if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_pack_unpack_sat_reversal(uint32_t instr) +int +arm_pack_unpack_sat_reversal(uint32_t instr) { - int op1 = BITS(instr,20,0x7), op2 = BITS(instr,5,0x7); + int op1 = BITS(instr, 20, 0x7), op2 = BITS(instr, 5, 0x7); /* pkh, sel use RN in addition to RD and RM */ if ((op1 == 0 && (op2 & 0x1) == 0) || (op1 == 0 && op2 == 0x5)) { - if (ARM_RN(instr) == REG_PC) + if (ARM_RN(instr) == REG_PC) { return FASTTRAP_T_INV; + } } - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_parallel_addsub_unsigned(uint32_t instr) +int +arm_parallel_addsub_unsigned(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_parallel_addsub_signed(uint32_t instr) +int +arm_parallel_addsub_signed(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_media(uint32_t instr) +int +arm_media(uint32_t instr) { - int op1 = BITS(instr,20,0x1F), op2 = BITS(instr,5,0x7); + int op1 = BITS(instr, 20, 0x1F), op2 = BITS(instr, 5, 0x7); - if ((op1 & 0x1C) == 0) + if ((op1 & 0x1C) == 0) { return arm_parallel_addsub_signed(instr); + } - if ((op1 & 0x1C) == 0x04) + if ((op1 & 0x1C) == 0x04) { return arm_parallel_addsub_unsigned(instr); + } - if ((op1 & 0x18) == 0x08) + if ((op1 & 0x18) == 0x08) { return arm_pack_unpack_sat_reversal(instr); + } - if ((op1 & 0x18) == 0x10) + if ((op1 & 0x18) == 0x10) { return arm_signed_multiplies(instr); + } if (op1 == 0x1F && op2 == 0x7) { /* Undefined instruction */ @@ -244,24 +280,27 @@ int arm_media(uint32_t instr) /* The registers are named differently in the reference manual for this instruction * but the following positions are correct */ - if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } if ((op1 & 0x1E) == 0x1C && (op2 & 0x3) == 0) { /* bfc bfi */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } if (((op1 & 0x1E) == 0x1A || (op1 & 0x1E) == 0x1E) && ((op2 & 0x3) == 0x2)) { /* sbfx ubfx */ - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } @@ -270,57 +309,67 @@ int arm_media(uint32_t instr) } static -int arm_loadstore_wordbyte(uint32_t instr) +int +arm_loadstore_wordbyte(uint32_t instr) { /* Instrument PC relative load with immediate, ignore any other uses of the PC */ - int R = BITS(instr,25,0x1), L = BITS(instr,20,0x1); + int R = BITS(instr, 25, 0x1), L = BITS(instr, 20, 0x1); if (R == 1) { /* Three register load/store */ - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Immediate load/store, but still do not support ldr pc, [pc...] */ - if (L == 1 && ARM_RN(instr) == REG_PC && ARM_RD(instr) != REG_PC) + if (L == 1 && ARM_RN(instr) == REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_LDR_PC_IMMED; + } - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int arm_saturating(uint32_t instr) +int +arm_saturating(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_misc(uint32_t instr) +int +arm_misc(uint32_t instr) { - int op = BITS(instr,21,0x3), __unused op1 = BITS(instr,16,0xF), op2 = BITS(instr,4,0x7); + int op = BITS(instr, 21, 0x3), __unused op1 = BITS(instr, 16, 0xF), op2 = BITS(instr, 4, 0x7); - if (op2 == 1 && op == 1) + if (op2 == 1 && op == 1) { return FASTTRAP_T_BX_REG; + } /* We do not need to emulate BLX for entry/return probes; if we eventually support full offset * tracing, then we will. This is because BLX overwrites the link register, so a function that * can execute this as its first instruction is a special function indeed. */ - if (op2 == 0x5) + if (op2 == 0x5) { return arm_saturating(instr); + } return FASTTRAP_T_INV; } static -int arm_msr_hints(__unused uint32_t instr) +int +arm_msr_hints(__unused uint32_t instr) { /* These deal with the psr, not instrumented */ @@ -328,7 +377,8 @@ int arm_msr_hints(__unused uint32_t instr) } static -int arm_sync_primitive(__unused uint32_t instr) +int +arm_sync_primitive(__unused uint32_t instr) { /* TODO will instrumenting these interfere with any kernel usage of these instructions? */ /* Don't instrument for now */ @@ -337,9 +387,10 @@ int arm_sync_primitive(__unused uint32_t instr) } static -int arm_extra_loadstore_unpriv(uint32_t instr) +int +arm_extra_loadstore_unpriv(uint32_t instr) { - int op = BITS(instr,20,0x1), __unused op2 = BITS(instr,5,0x3), immed = BITS(instr,22,0x1); + int op = BITS(instr, 20, 0x1), __unused op2 = BITS(instr, 5, 0x3), immed = BITS(instr, 22, 0x1); if (op == 0 && (op2 & 0x2) == 0x2) { /* Unpredictable or undefined */ @@ -347,91 +398,106 @@ int arm_extra_loadstore_unpriv(uint32_t instr) } if (immed == 1) { - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } - + return FASTTRAP_T_INV; } static -int arm_extra_loadstore(uint32_t instr) +int +arm_extra_loadstore(uint32_t instr) { - int op1 = BITS(instr,20,0x1F); + int op1 = BITS(instr, 20, 0x1F); /* There are two variants, and we do not instrument either of them that use the PC */ if ((op1 & 0x4) == 0) { /* Variant 1, register */ - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Variant 2, immediate */ - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int arm_halfword_multiply(uint32_t instr) +int +arm_halfword_multiply(uint32_t instr) { /* Not all multiply instructions use all four registers. The ones that don't should have those * register locations set to 0, so we can test them anyway. */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_multiply(uint32_t instr) +int +arm_multiply(uint32_t instr) { /* Not all multiply instructions use all four registers. The ones that don't should have those * register locations set to 0, so we can test them anyway. */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_immed(uint32_t instr) +int +arm_dataproc_immed(uint32_t instr) { /* All these instructions are either two registers, or one register and have 0 where the other reg would be used */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_regshift(uint32_t instr) +int +arm_dataproc_regshift(uint32_t instr) { /* All these instructions are either four registers, or three registers and have 0 where there last reg would be used */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_reg(uint32_t instr) +int +arm_dataproc_reg(uint32_t instr) { - int op1 = BITS(instr,20,0x1F), op2 = BITS(instr,7,0x1F), op3 = BITS(instr,5,0x3); + int op1 = BITS(instr, 20, 0x1F), op2 = BITS(instr, 7, 0x1F), op3 = BITS(instr, 5, 0x3); if (op1 == 0x11 || op1 == 0x13 || op1 == 0x15 || op1 == 0x17) { /* These are comparison flag setting instructions and do not have RD */ - if (ARM_RN(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } @@ -440,96 +506,119 @@ int arm_dataproc_reg(uint32_t instr) * movs pc, reg is a privileged instruction so we don't instrument that variant. The s bit * is bit 0 of op1 and should be zero. */ - if (op1 == 0x1A && op2 == 0 && op3 == 0 && ARM_RD(instr) == REG_PC) + if (op1 == 0x1A && op2 == 0 && op3 == 0 && ARM_RD(instr) == REG_PC) { return FASTTRAP_T_MOV_PC_REG; + } /* Any instruction at this point is a three register instruction or two register instruction with RN = 0 */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_misc(uint32_t instr) +int +arm_dataproc_misc(uint32_t instr) { - int op = BITS(instr,25,0x1), op1 = BITS(instr,20,0x1F), op2 = BITS(instr,4,0xF); + int op = BITS(instr, 25, 0x1), op1 = BITS(instr, 20, 0x1F), op2 = BITS(instr, 4, 0xF); if (op == 0) { - if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0) + if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0) { return arm_dataproc_reg(instr); + } - if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1) + if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1) { return arm_dataproc_regshift(instr); + } - if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0) + if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0) { return arm_misc(instr); + } - if ((op1 & 0x19) == 0x19 && (op2 & 0x9) == 0x8) + if ((op1 & 0x19) == 0x19 && (op2 & 0x9) == 0x8) { return arm_halfword_multiply(instr); + } - if ((op1 & 0x10) == 0 && op2 == 0x9) + if ((op1 & 0x10) == 0 && op2 == 0x9) { return arm_multiply(instr); + } - if ((op1 & 0x10) == 0x10 && op2 == 0x9) + if ((op1 & 0x10) == 0x10 && op2 == 0x9) { return arm_sync_primitive(instr); + } - if ((op1 & 0x12) != 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) + if ((op1 & 0x12) != 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) { return arm_extra_loadstore(instr); + } - if ((op1 & 0x12) == 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) + if ((op1 & 0x12) == 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) { return arm_extra_loadstore_unpriv(instr); + } } else { - if ((op1 & 0x19) != 0x10) + if ((op1 & 0x19) != 0x10) { return arm_dataproc_immed(instr); + } if (op1 == 0x10) { /* 16 bit immediate load (mov (immed)) [encoding A2] */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } if (op1 == 0x14) { /* high halfword 16 bit immediate load (movt) [encoding A1] */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } - if ((op1 & 0x1B) == 0x12) + if ((op1 & 0x1B) == 0x12) { return arm_msr_hints(instr); + } } return FASTTRAP_T_INV; } -int dtrace_decode_arm(uint32_t instr) +int +dtrace_decode_arm(uint32_t instr) { - int cond = BITS(instr,28,0xF), op1 = BITS(instr,25,0x7), op = BITS(instr,4,0x1); + int cond = BITS(instr, 28, 0xF), op1 = BITS(instr, 25, 0x7), op = BITS(instr, 4, 0x1); - if (cond == 0xF) + if (cond == 0xF) { return arm_unconditional(instr); + } - if ((op1 & 0x6) == 0) + if ((op1 & 0x6) == 0) { return arm_dataproc_misc(instr); + } - if (op1 == 0x2) + if (op1 == 0x2) { return arm_loadstore_wordbyte(instr); + } - if (op1 == 0x3 && op == 0) + if (op1 == 0x3 && op == 0) { return arm_loadstore_wordbyte(instr); + } - if (op1 == 0x3 && op == 1) + if (op1 == 0x3 && op == 1) { return arm_media(instr); + } - if ((op1 & 0x6) == 0x4) + if ((op1 & 0x6) == 0x4) { return arm_branch_link_blockdata(instr); + } - if ((op1 & 0x6) == 0x6) + if ((op1 & 0x6) == 0x6) { return arm_syscall_coproc(instr); + } return FASTTRAP_T_INV; } @@ -540,20 +629,23 @@ int dtrace_decode_arm(uint32_t instr) */ static -int thumb16_cond_supervisor(uint16_t instr) +int +thumb16_cond_supervisor(uint16_t instr) { - int opcode = BITS(instr,8,0xF); + int opcode = BITS(instr, 8, 0xF); - if ((opcode & 0xE) != 0xE) + if ((opcode & 0xE) != 0xE) { return FASTTRAP_T_B_COND; + } return FASTTRAP_T_INV; } static -int thumb16_misc(uint16_t instr) +int +thumb16_misc(uint16_t instr) { - int opcode = BITS(instr,5,0x7F); + int opcode = BITS(instr, 5, 0x7F); if ((opcode & 0x70) == 0x30 || (opcode & 0x70) == 0x70) { /* setend, cps, breakpoint, or if-then, not instrumentable */ @@ -572,16 +664,18 @@ int thumb16_misc(uint16_t instr) } static -int thumb16_loadstore_single(__unused uint16_t instr) +int +thumb16_loadstore_single(__unused uint16_t instr) { /* These all access the low registers or SP only */ return FASTTRAP_T_COMMON; } static -int thumb16_data_special_and_branch(uint16_t instr) +int +thumb16_data_special_and_branch(uint16_t instr) { - int opcode = BITS(instr,6,0xF); + int opcode = BITS(instr, 6, 0xF); if (opcode == 0x4) { /* Unpredictable */ @@ -589,56 +683,66 @@ int thumb16_data_special_and_branch(uint16_t instr) } else if ((opcode & 0xC) == 0xC) { /* bx or blx */ /* Only instrument the bx */ - if ((opcode & 0x2) == 0) + if ((opcode & 0x2) == 0) { return FASTTRAP_T_BX_REG; + } return FASTTRAP_T_INV; } else { /* Data processing on high registers, only instrument mov pc, reg */ - if ((opcode & 0xC) == 0x8 && THUMB16_HRD(instr) == REG_PC) + if ((opcode & 0xC) == 0x8 && THUMB16_HRD(instr) == REG_PC) { return FASTTRAP_T_CPY_PC; + } - if (THUMB16_HRM(instr) != REG_PC && THUMB16_HRD(instr) != REG_PC) + if (THUMB16_HRM(instr) != REG_PC && THUMB16_HRD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb16_data_proc(__unused uint16_t instr) +int +thumb16_data_proc(__unused uint16_t instr) { /* These all access the low registers only */ return FASTTRAP_T_COMMON; } static -int thumb16_shift_addsub_move_compare(__unused uint16_t instr) +int +thumb16_shift_addsub_move_compare(__unused uint16_t instr) { /* These all access the low registers only */ return FASTTRAP_T_COMMON; } static -int dtrace_decode_thumb16(uint16_t instr) +int +dtrace_decode_thumb16(uint16_t instr) { - int opcode = BITS(instr,10,0x3F); + int opcode = BITS(instr, 10, 0x3F); - if ((opcode & 0x30) == 0) + if ((opcode & 0x30) == 0) { return thumb16_shift_addsub_move_compare(instr); + } - if (opcode == 0x10) + if (opcode == 0x10) { return thumb16_data_proc(instr); + } - if (opcode == 0x11) + if (opcode == 0x11) { return thumb16_data_special_and_branch(instr); + } if ((opcode & 0x3E) == 0x12) { /* ldr (literal) */ return FASTTRAP_T_LDR_PC_IMMED; } - if ((opcode & 0x3C) == 0x14 || (opcode & 0x38) == 0x18 || (opcode & 0x38) == 0x20) + if ((opcode & 0x3C) == 0x14 || (opcode & 0x38) == 0x18 || (opcode & 0x38) == 0x20) { return thumb16_loadstore_single(instr); + } if ((opcode & 0x3E) == 0x28) { /* adr, uses the pc */ @@ -650,8 +754,9 @@ int dtrace_decode_thumb16(uint16_t instr) return FASTTRAP_T_COMMON; } - if ((opcode & 0x3C) == 0x2C) + if ((opcode & 0x3C) == 0x2C) { return thumb16_misc(instr); + } if ((opcode & 0x3E) == 0x30) { /* stm - can't access high registers */ @@ -680,11 +785,12 @@ int dtrace_decode_thumb16(uint16_t instr) */ static -int thumb32_coproc(uint16_t instr1, uint16_t instr2) +int +thumb32_coproc(uint16_t instr1, uint16_t instr2) { /* Instrument any VFP data processing instructions, ignore the rest */ - int op1 = BITS(instr1,4,0x3F), coproc = BITS(instr2,8,0xF), op = BITS(instr2,4,0x1); + int op1 = BITS(instr1, 4, 0x3F), coproc = BITS(instr2, 8, 0xF), op = BITS(instr2, 4, 0x1); if ((op1 & 0x3E) == 0) { /* Undefined */ @@ -693,18 +799,20 @@ int thumb32_coproc(uint16_t instr1, uint16_t instr2) if ((coproc & 0xE) == 0xA || (op1 & 0x30) == 0x30) { /* VFP instruction */ - uint32_t instr = thumb32_instword_to_arm(instr1,instr2); + uint32_t instr = thumb32_instword_to_arm(instr1, instr2); if ((op1 & 0x30) == 0x30) { /* VFP data processing uses its own registers */ return FASTTRAP_T_COMMON; } - if ((op1 & 0x3A) == 0x02 || (op1 & 0x38) == 0x08 || (op1 & 0x30) == 0x10) + if ((op1 & 0x3A) == 0x02 || (op1 & 0x38) == 0x08 || (op1 & 0x30) == 0x10) { return vfp_loadstore(instr); + } - if ((op1 & 0x3E) == 0x04) + if ((op1 & 0x3E) == 0x04) { return vfp_64transfer(instr); + } if ((op1 & 0x30) == 0x20) { /* VFP data processing or 8, 16, or 32 bit move between ARM reg and VFP reg */ @@ -721,192 +829,229 @@ int thumb32_coproc(uint16_t instr1, uint16_t instr2) } static -int thumb32_longmultiply(uint16_t instr1, uint16_t instr2) +int +thumb32_longmultiply(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,4,0x7), op2 = BITS(instr2,4,0xF); + int op1 = BITS(instr1, 4, 0x7), op2 = BITS(instr2, 4, 0xF); if ((op1 == 1 && op2 == 0xF) || (op1 == 0x3 && op2 == 0xF)) { /* Three register instruction */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Four register instruction */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && - THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && + THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb32_multiply(uint16_t instr1, uint16_t instr2) +int +thumb32_multiply(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,4,0x7), op2 = BITS(instr2,4,0x3); + int op1 = BITS(instr1, 4, 0x7), op2 = BITS(instr2, 4, 0x3); if ((op1 == 0 && op2 == 1) || (op1 == 0x6 && (op2 & 0x2) == 0)) { - if (THUMB32_RT(instr1,instr2) == REG_PC) + if (THUMB32_RT(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } } - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_misc(uint16_t instr1, uint16_t instr2) +int +thumb32_misc(uint16_t instr1, uint16_t instr2) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_parallel_addsub_unsigned(uint16_t instr1, uint16_t instr2) +int +thumb32_parallel_addsub_unsigned(uint16_t instr1, uint16_t instr2) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_parallel_addsub_signed(uint16_t instr1, uint16_t instr2) +int +thumb32_parallel_addsub_signed(uint16_t instr1, uint16_t instr2) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_dataproc_reg(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_reg(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,4,0xF), op2 = BITS(instr2,4,0xF); + int op1 = BITS(instr1, 4, 0xF), op2 = BITS(instr2, 4, 0xF); if (((0 <= op1) && (op1 <= 5)) && (op2 & 0x8) == 0x8) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } if ((op1 & 0x8) == 0 && op2 == 0) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } - if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0) - return thumb32_parallel_addsub_signed(instr1,instr2); + if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0) { + return thumb32_parallel_addsub_signed(instr1, instr2); + } - if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0x4) - return thumb32_parallel_addsub_unsigned(instr1,instr2); + if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0x4) { + return thumb32_parallel_addsub_unsigned(instr1, instr2); + } - if ((op1 & 0xC) == 0x8 && (op2 & 0xC) == 0x8) - return thumb32_misc(instr1,instr2); + if ((op1 & 0xC) == 0x8 && (op2 & 0xC) == 0x8) { + return thumb32_misc(instr1, instr2); + } return FASTTRAP_T_INV; } static -int thumb32_dataproc_regshift(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_regshift(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,5,0xF), S = BITS(instr1,4,0x1); + int op = BITS(instr1, 5, 0xF), S = BITS(instr1, 4, 0x1); if (op == 0 || op == 0x4 || op == 0x8 || op == 0xD) { /* These become test instructions if S is 1 and Rd is PC, otherwise they are data instructions. */ if (S == 1) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && - THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && + THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } } else if (op == 0x2 || op == 0x3) { /* These become moves if RN is PC, otherwise they are data insts. We don't instrument mov pc, reg here */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Normal three register instruction */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb32_store_single(uint16_t instr1, uint16_t instr2) +int +thumb32_store_single(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,5,0x7), op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 5, 0x7), op2 = BITS(instr2, 6, 0x3F); /* Do not support any use of the pc yet */ if ((op1 == 0 || op1 == 1 || op1 == 2) && (op2 & 0x20) == 0) { /* str (register) uses RM */ - if (THUMB32_RM(instr1,instr2) == REG_PC) + if (THUMB32_RM(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadbyte_memhint(uint16_t instr1, uint16_t instr2) +int +thumb32_loadbyte_memhint(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,7,0x3), __unused op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 7, 0x3), __unused op2 = BITS(instr2, 6, 0x3F); /* Do not support any use of the pc yet */ - if ((op1 == 0 || op1 == 0x2) && THUMB32_RM(instr1,instr2) == REG_PC) + if ((op1 == 0 || op1 == 0x2) && THUMB32_RM(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadhalfword_memhint(uint16_t instr1, uint16_t instr2) +int +thumb32_loadhalfword_memhint(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,7,0x3), op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 7, 0x3), op2 = BITS(instr2, 6, 0x3F); /* Do not support any use of the PC yet */ - if (op1 == 0 && op2 == 0 && THUMB32_RM(inst1,instr2) == REG_PC) + if (op1 == 0 && op2 == 0 && THUMB32_RM(inst1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadword(uint16_t instr1, uint16_t instr2) +int +thumb32_loadword(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,7,0x3), op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 7, 0x3), op2 = BITS(instr2, 6, 0x3F); - if ((op1 & 0x2) == 0 && THUMB32_RN(instr1,instr2) == REG_PC && THUMB32_RT(instr1,instr2) != REG_PC) + if ((op1 & 0x2) == 0 && THUMB32_RN(instr1, instr2) == REG_PC && THUMB32_RT(instr1, instr2) != REG_PC) { return FASTTRAP_T_LDR_PC_IMMED; + } if (op1 == 0 && op2 == 0) { /* ldr (register) uses an additional reg */ - if (THUMB32_RM(instr1,instr2) == REG_PC) + if (THUMB32_RM(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadstore_double_exclusive_table(__unused uint16_t instr1, __unused uint16_t instr2) +int +thumb32_loadstore_double_exclusive_table(__unused uint16_t instr1, __unused uint16_t instr2) { /* Don't instrument any of these */ @@ -914,9 +1059,10 @@ int thumb32_loadstore_double_exclusive_table(__unused uint16_t instr1, __unused } static -int thumb32_loadstore_multiple(uint16_t instr1, uint16_t instr2) +int +thumb32_loadstore_multiple(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,7,0x3), L = BITS(instr1,4,0x1), uses_pc = BITS(instr2,15,0x1), uses_lr = BITS(instr2,14,0x1); + int op = BITS(instr1, 7, 0x3), L = BITS(instr1, 4, 0x1), uses_pc = BITS(instr2, 15, 0x1), uses_lr = BITS(instr2, 14, 0x1); if (op == 0 || op == 0x3) { /* Privileged instructions: srs, rfe */ @@ -924,174 +1070,208 @@ int thumb32_loadstore_multiple(uint16_t instr1, uint16_t instr2) } /* Only emulate a use of the pc if it's a return from function: ldmia sp!, { ... pc }, aka pop { ... pc } */ - if (op == 0x1 && L == 1 && THUMB32_RN(instr1,instr2) == REG_SP && uses_pc == 1) + if (op == 0x1 && L == 1 && THUMB32_RN(instr1, instr2) == REG_SP && uses_pc == 1) { return FASTTRAP_T_LDM_PC; + } /* stmia sp!, { ... lr }, aka push { ... lr } doesn't touch the pc, but it is very common, so special case it */ - if (op == 0x2 && L == 0 && THUMB32_RN(instr1,instr2) == REG_SP && uses_lr == 1) + if (op == 0x2 && L == 0 && THUMB32_RN(instr1, instr2) == REG_SP && uses_lr == 1) { return FASTTRAP_T_STM_LR; + } - if (THUMB32_RN(instr1,instr2) != REG_PC && uses_pc == 0) + if (THUMB32_RN(instr1, instr2) != REG_PC && uses_pc == 0) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_misc_control(__unused uint16_t instr1, __unused uint16_t instr2) +int +thumb32_misc_control(__unused uint16_t instr1, __unused uint16_t instr2) { /* Privileged, and instructions dealing with ThumbEE */ return FASTTRAP_T_INV; } static -int thumb32_cps_hints(__unused uint16_t instr1, __unused uint16_t instr2) +int +thumb32_cps_hints(__unused uint16_t instr1, __unused uint16_t instr2) { /* Privileged */ return FASTTRAP_T_INV; } static -int thumb32_b_misc_control(uint16_t instr1, uint16_t instr2) +int +thumb32_b_misc_control(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,4,0x7F), op1 = BITS(instr2,12,0x7), __unused op2 = BITS(instr2,8,0xF); + int op = BITS(instr1, 4, 0x7F), op1 = BITS(instr2, 12, 0x7), __unused op2 = BITS(instr2, 8, 0xF); if ((op1 & 0x5) == 0) { - if ((op & 0x38) != 0x38) + if ((op & 0x38) != 0x38) { return FASTTRAP_T_B_COND; + } - if (op == 0x3A) - return thumb32_cps_hints(instr1,instr2); + if (op == 0x3A) { + return thumb32_cps_hints(instr1, instr2); + } - if (op == 0x3B) - return thumb32_misc_control(instr1,instr2); + if (op == 0x3B) { + return thumb32_misc_control(instr1, instr2); + } } - if ((op1 & 0x5) == 1) + if ((op1 & 0x5) == 1) { return FASTTRAP_T_B_UNCOND; + } return FASTTRAP_T_INV; } static -int thumb32_dataproc_plain_immed(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_plain_immed(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,4,0x1F); + int op = BITS(instr1, 4, 0x1F); if (op == 0x04 || op == 0x0C || op == 0x16) { /* mov, movt, bfi, bfc */ /* These use only RD */ - if (THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { - if (THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb32_dataproc_mod_immed(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_mod_immed(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,5,0xF), S = BITS(instr1,4,0x1); + int op = BITS(instr1, 5, 0xF), S = BITS(instr1, 4, 0x1); if (op == 0x2 || op == 0x3) { /* These allow REG_PC in RN, but it doesn't mean use the PC! */ - if (THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } if (op == 0 || op == 0x4 || op == 0x8 || op == 0xD) { /* These are test instructions, if the sign bit is set and RD is the PC. */ - if (S && THUMB32_RD(instr1,instr2) == REG_PC) + if (S && THUMB32_RD(instr1, instr2) == REG_PC) { return FASTTRAP_T_COMMON; + } } - if (THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int dtrace_decode_thumb32(uint16_t instr1, uint16_t instr2) +int +dtrace_decode_thumb32(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,11,0x3), op2 = BITS(instr1,4,0x7F), op = BITS(instr2,15,0x1); + int op1 = BITS(instr1, 11, 0x3), op2 = BITS(instr1, 4, 0x7F), op = BITS(instr2, 15, 0x1); if (op1 == 0x1) { - if ((op2 & 0x64) == 0) - return thumb32_loadstore_multiple(instr1,instr2); + if ((op2 & 0x64) == 0) { + return thumb32_loadstore_multiple(instr1, instr2); + } - if ((op2 & 0x64) == 0x04) - return thumb32_loadstore_double_exclusive_table(instr1,instr2); + if ((op2 & 0x64) == 0x04) { + return thumb32_loadstore_double_exclusive_table(instr1, instr2); + } - if ((op2 & 0x60) == 0x20) - return thumb32_dataproc_regshift(instr1,instr2); + if ((op2 & 0x60) == 0x20) { + return thumb32_dataproc_regshift(instr1, instr2); + } - if ((op2 & 0x40) == 0x40) - return thumb32_coproc(instr1,instr2); + if ((op2 & 0x40) == 0x40) { + return thumb32_coproc(instr1, instr2); + } } if (op1 == 0x2) { - if ((op2 & 0x20) == 0 && op == 0) - return thumb32_dataproc_mod_immed(instr1,instr2); + if ((op2 & 0x20) == 0 && op == 0) { + return thumb32_dataproc_mod_immed(instr1, instr2); + } - if ((op2 & 0x20) == 0x20 && op == 0) - return thumb32_dataproc_plain_immed(instr1,instr2); + if ((op2 & 0x20) == 0x20 && op == 0) { + return thumb32_dataproc_plain_immed(instr1, instr2); + } - if (op == 1) - return thumb32_b_misc_control(instr1,instr2); + if (op == 1) { + return thumb32_b_misc_control(instr1, instr2); + } } if (op1 == 0x3) { - if ((op2 & 0x71) == 0) - return thumb32_store_single(instr1,instr2); + if ((op2 & 0x71) == 0) { + return thumb32_store_single(instr1, instr2); + } if ((op2 & 0x71) == 0x10) { - return vfp_struct_loadstore(thumb32_instword_to_arm(instr1,instr2)); + return vfp_struct_loadstore(thumb32_instword_to_arm(instr1, instr2)); } - if ((op2 & 0x67) == 0x01) - return thumb32_loadbyte_memhint(instr1,instr2); + if ((op2 & 0x67) == 0x01) { + return thumb32_loadbyte_memhint(instr1, instr2); + } - if ((op2 & 0x67) == 0x03) - return thumb32_loadhalfword_memhint(instr1,instr2); + if ((op2 & 0x67) == 0x03) { + return thumb32_loadhalfword_memhint(instr1, instr2); + } - if ((op2 & 0x67) == 0x05) - return thumb32_loadword(instr1,instr2); + if ((op2 & 0x67) == 0x05) { + return thumb32_loadword(instr1, instr2); + } if ((op2 & 0x67) == 0x07) { /* Undefined instruction */ return FASTTRAP_T_INV; } - if ((op2 & 0x70) == 0x20) - return thumb32_dataproc_reg(instr1,instr2); + if ((op2 & 0x70) == 0x20) { + return thumb32_dataproc_reg(instr1, instr2); + } - if ((op2 & 0x78) == 0x30) - return thumb32_multiply(instr1,instr2); + if ((op2 & 0x78) == 0x30) { + return thumb32_multiply(instr1, instr2); + } - if ((op2 & 0x78) == 0x38) - return thumb32_longmultiply(instr1,instr2); + if ((op2 & 0x78) == 0x38) { + return thumb32_longmultiply(instr1, instr2); + } - if ((op2 & 0x40) == 0x40) - return thumb32_coproc(instr1,instr2); + if ((op2 & 0x40) == 0x40) { + return thumb32_coproc(instr1, instr2); + } } return FASTTRAP_T_INV; } -int dtrace_decode_thumb(uint32_t instr) +int +dtrace_decode_thumb(uint32_t instr) { uint16_t* pInstr = (uint16_t*) &instr; uint16_t hw1 = pInstr[0], hw2 = pInstr[1]; - int size = BITS(hw1,11,0x1F); + int size = BITS(hw1, 11, 0x1F); - if (size == 0x1D || size == 0x1E || size == 0x1F) - return dtrace_decode_thumb32(hw1,hw2); - else + if (size == 0x1D || size == 0x1E || size == 0x1F) { + return dtrace_decode_thumb32(hw1, hw2); + } else { return dtrace_decode_thumb16(hw1); + } } - diff --git a/bsd/dev/arm/dtrace_isa.c b/bsd/dev/arm/dtrace_isa.c index 07397b4b8..23d09f6a0 100644 --- a/bsd/dev/arm/dtrace_isa.c +++ b/bsd/dev/arm/dtrace_isa.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from - * mach/ppc/thread_status.h */ +#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from + * mach/ppc/thread_status.h */ #include #include @@ -46,7 +46,7 @@ #include #include #include -#include /* for thread_wakeup() */ +#include /* for thread_wakeup() */ #include #include #include @@ -57,8 +57,8 @@ extern struct arm_saved_state *find_kern_regs(thread_t); extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ typedef arm_saved_state_t savearea_t; -extern lck_attr_t *dtrace_lck_attr; -extern lck_grp_t *dtrace_lck_grp; +extern lck_attr_t *dtrace_lck_attr; +extern lck_grp_t *dtrace_lck_grp; int dtrace_arm_condition_true(int condition, int cpsr); @@ -69,9 +69,9 @@ inline void dtrace_membar_producer(void) { #if __ARM_SMP__ - __asm__ volatile("dmb ish" : : : "memory"); + __asm__ volatile ("dmb ish" : : : "memory"); #else - __asm__ volatile("nop" : : : "memory"); + __asm__ volatile ("nop" : : : "memory"); #endif } @@ -79,9 +79,9 @@ inline void dtrace_membar_consumer(void) { #if __ARM_SMP__ - __asm__ volatile("dmb ish" : : : "memory"); + __asm__ volatile ("dmb ish" : : : "memory"); #else - __asm__ volatile("nop" : : : "memory"); + __asm__ volatile ("nop" : : : "memory"); #endif } @@ -97,7 +97,7 @@ dtrace_getipl(void) * in osfmk/kern/cpu_data.h */ /* return get_interrupt_level(); */ - return (ml_at_interrupt_context() ? 1 : 0); + return ml_at_interrupt_context() ? 1 : 0; } #if __ARM_SMP__ @@ -119,11 +119,13 @@ xcRemote(void *foo) { xcArg_t *pArg = (xcArg_t *) foo; - if (pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL) - (pArg->f) (pArg->arg); + if (pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL) { + (pArg->f)(pArg->arg); + } - if (hw_atomic_sub(&dt_xc_sync, 1) == 0) + if (hw_atomic_sub(&dt_xc_sync, 1) == 0) { thread_wakeup((event_t) &dt_xc_sync); + } } #endif @@ -178,12 +180,12 @@ dtrace_getreg(struct regs * savearea, uint_t reg) struct arm_saved_state *regs = (struct arm_saved_state *) savearea; if (regs == NULL) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } /* beyond register limit? */ if (reg > ARM_SAVED_STATE32_COUNT - 1) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } return (uint64_t) ((unsigned int *) (&(regs->r)))[reg]; @@ -193,10 +195,10 @@ dtrace_getreg(struct regs * savearea, uint_t reg) static int dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, - user_addr_t sp) + user_addr_t sp) { int ret = 0; - + ASSERT(pcstack == NULL || pcstack_limit > 0); while (pc != 0) { @@ -204,18 +206,20 @@ dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, if (pcstack != NULL) { *pcstack++ = (uint64_t) pc; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { break; + } } - if (sp == 0) + if (sp == 0) { break; + } pc = dtrace_fuword32((sp + RETURN_OFFSET)); sp = dtrace_fuword32(sp); } - return (ret); + return ret; } void @@ -224,30 +228,35 @@ dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) thread_t thread = current_thread(); savearea_t *regs; user_addr_t pc, sp; - volatile uint16_t *flags = (volatile uint16_t *) & cpu_core[CPU->cpu_id].cpuc_dtrace_flags; + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; - if (*flags & CPU_DTRACE_FAULT) + if (*flags & CPU_DTRACE_FAULT) { return; + } - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } /* * If there's no user context we still need to zero the stack. */ - if (thread == NULL) + if (thread == NULL) { goto zero; + } regs = (savearea_t *) find_user_regs(thread); - if (regs == NULL) + if (regs == NULL) { goto zero; + } *pcstack++ = (uint64_t)dtrace_proc_selfpid(); pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } pc = regs->pc; sp = regs->sp; @@ -255,8 +264,9 @@ dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t) pc; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } pc = regs->lr; } @@ -270,8 +280,9 @@ dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) pcstack_limit -= n; zero: - while (pcstack_limit-- > 0) + while (pcstack_limit-- > 0) { *pcstack++ = 0ULL; + } } int @@ -282,15 +293,18 @@ dtrace_getustackdepth(void) user_addr_t pc, sp; int n = 0; - if (thread == NULL) + if (thread == NULL) { return 0; + } - if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) - return (-1); + if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) { + return -1; + } regs = (savearea_t *) find_user_regs(thread); - if (regs == NULL) + if (regs == NULL) { return 0; + } pc = regs->pc; sp = regs->sp; @@ -300,7 +314,7 @@ dtrace_getustackdepth(void) pc = regs->lr; } - /* + /* * Note that unlike ppc, the arm code does not use * CPU_DTRACE_USTACK_FP. This is because arm always * traces from the sp, even in syscall/profile/fbt @@ -309,7 +323,7 @@ dtrace_getustackdepth(void) n += dtrace_getustack_common(NULL, 0, pc, regs->r[7]); - return (n); + return n; } void @@ -319,40 +333,45 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) thread_t thread = current_thread(); savearea_t *regs; user_addr_t pc, sp; - - volatile uint16_t *flags = (volatile uint16_t *) & cpu_core[CPU->cpu_id].cpuc_dtrace_flags; + + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; #if 0 uintptr_t oldcontext; size_t s1, s2; #endif - if (*flags & CPU_DTRACE_FAULT) + if (*flags & CPU_DTRACE_FAULT) { return; + } - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } - /* + /* * If there's no user context we still need to zero the stack. */ - if (thread == NULL) + if (thread == NULL) { goto zero; + } regs = (savearea_t *) find_user_regs(thread); - if (regs == NULL) + if (regs == NULL) { goto zero; - + } + *pcstack++ = (uint64_t)dtrace_proc_selfpid(); pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; - + } + pc = regs->pc; sp = regs->sp; -#if 0 /* XXX signal stack crawl */ +#if 0 /* XXX signal stack crawl */ oldcontext = lwp->lwp_oldcontext; if (p->p_model == DATAMODEL_NATIVE) { @@ -368,8 +387,9 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) *pcstack++ = (uint64_t) pc; *fpstack++ = 0; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } pc = dtrace_fuword32(sp); } @@ -377,10 +397,11 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) *pcstack++ = (uint64_t) pc; *fpstack++ = sp; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { break; + } -#if 0 /* XXX signal stack crawl */ +#if 0 /* XXX signal stack crawl */ if (oldcontext == sp + s1 || oldcontext == sp + s2) { if (p->p_model == DATAMODEL_NATIVE) { ucontext_t *ucp = (ucontext_t *) oldcontext; @@ -421,13 +442,14 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) } zero: - while (pcstack_limit-- > 0) + while (pcstack_limit-- > 0) { *pcstack++ = 0ULL; + } } void dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, - uint32_t * intrpc) + uint32_t * intrpc) { struct frame *fp = (struct frame *) __builtin_frame_address(0); struct frame *nextfp, *minfp, *stacktop; @@ -437,17 +459,19 @@ dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, uintptr_t pc; uintptr_t caller = CPU->cpu_dtrace_caller; - if ((on_intr = CPU_ON_INTR(CPU)) != 0) + if ((on_intr = CPU_ON_INTR(CPU)) != 0) { stacktop = (struct frame *) dtrace_get_cpu_int_stack_top(); - else + } else { stacktop = (struct frame *) (dtrace_get_kernel_stack(current_thread()) + kernel_stack_size); + } minfp = fp; aframes++; - if (intrpc != NULL && depth < pcstack_limit) + if (intrpc != NULL && depth < pcstack_limit) { pcstack[depth++] = (pc_t) intrpc; + } while (depth < pcstack_limit) { nextfp = *(struct frame **) fp; @@ -500,13 +524,15 @@ dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, caller = (uintptr_t)NULL; } } else { - if (depth < pcstack_limit) + if (depth < pcstack_limit) { pcstack[depth++] = (pc_t) pc; + } } if (last) { - while (depth < pcstack_limit) + while (depth < pcstack_limit) { pcstack[depth++] = (pc_t) NULL; + } return; } fp = nextfp; @@ -519,10 +545,11 @@ dtrace_instr_size(uint32_t instr, int thumb_mode) { if (thumb_mode) { uint16_t instr16 = *(uint16_t*) &instr; - if (((instr16 >> 11) & 0x1F) > 0x1C) + if (((instr16 >> 11) & 0x1F) > 0x1C) { return 4; - else + } else { return 2; + } } else { return 4; } @@ -534,7 +561,7 @@ dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vs #pragma unused(arg, aframes, mstate, vstate) #if 0 /* XXX ARMTODO */ - uint64_t val; + uint64_t val; uintptr_t *fp = (uintptr_t *)__builtin_frame_address(0); uintptr_t *stack; uintptr_t pc; @@ -545,23 +572,23 @@ dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vs pc = fp[1]; if (dtrace_invop_callsite_pre != NULL - && pc > (uintptr_t)dtrace_invop_callsite_pre - && pc <= (uintptr_t)dtrace_invop_callsite_post) { - /* - * If we pass through the invalid op handler, we will - * use the pointer that it passed to the stack as the - * second argument to dtrace_invop() as the pointer to - * the frame we're hunting for. - */ - - stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */ - fp = (struct frame *)stack[1]; /* Grab *second* argument */ - stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */ - DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - val = (uint64_t)(stack[arg]); - DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return val; - } + && pc > (uintptr_t)dtrace_invop_callsite_pre + && pc <= (uintptr_t)dtrace_invop_callsite_post) { + /* + * If we pass through the invalid op handler, we will + * use the pointer that it passed to the stack as the + * second argument to dtrace_invop() as the pointer to + * the frame we're hunting for. + */ + + stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */ + fp = (struct frame *)stack[1]; /* Grab *second* argument */ + stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */ + DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); + val = (uint64_t)(stack[arg]); + DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); + return val; + } } /* @@ -573,14 +600,14 @@ dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vs DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); val = *(((uint64_t *)stack) + arg); /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return (val); + return val; #endif return 0xfeedfacedeafbeadLL; } void dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, - int fltoffs, int fault, uint64_t illval) + int fltoffs, int fault, uint64_t illval) { /* XXX ARMTODO */ /* @@ -595,13 +622,14 @@ void dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit)) { /* XXX ARMTODO check copied from ppc/x86*/ - /* + /* * "base" is the smallest toxic address in the range, "limit" is the first * VALID address greater than "base". - */ + */ func(0x0, VM_MIN_KERNEL_ADDRESS); - if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0) - func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0); + if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0) { + func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0); + } } int @@ -613,29 +641,30 @@ dtrace_arm_condition_true(int cond, int cpsr) cf = (cpsr & PSR_CF) ? 1 : 0, vf = (cpsr & PSR_VF) ? 1 : 0; - switch(cond) { - case 0: taken = zf; break; - case 1: taken = !zf; break; - case 2: taken = cf; break; - case 3: taken = !cf; break; - case 4: taken = nf; break; - case 5: taken = !nf; break; - case 6: taken = vf; break; - case 7: taken = !vf; break; - case 8: taken = (cf && !zf); break; - case 9: taken = (!cf || zf); break; - case 10: taken = (nf == vf); break; - case 11: taken = (nf != vf); break; - case 12: taken = (!zf && (nf == vf)); break; - case 13: taken = (zf || (nf != vf)); break; - case 14: taken = 1; break; - case 15: taken = 1; break; /* always "true" for ARM, unpredictable for THUMB. */ + switch (cond) { + case 0: taken = zf; break; + case 1: taken = !zf; break; + case 2: taken = cf; break; + case 3: taken = !cf; break; + case 4: taken = nf; break; + case 5: taken = !nf; break; + case 6: taken = vf; break; + case 7: taken = !vf; break; + case 8: taken = (cf && !zf); break; + case 9: taken = (!cf || zf); break; + case 10: taken = (nf == vf); break; + case 11: taken = (nf != vf); break; + case 12: taken = (!zf && (nf == vf)); break; + case 13: taken = (zf || (nf != vf)); break; + case 14: taken = 1; break; + case 15: taken = 1; break; /* always "true" for ARM, unpredictable for THUMB. */ } return taken; } -void dtrace_flush_caches(void) +void +dtrace_flush_caches(void) { /* TODO There were some problems with flushing just the cache line that had been modified. * For now, we'll flush the entire cache, until we figure out how to flush just the patched block. diff --git a/bsd/dev/arm/dtrace_subr_arm.c b/bsd/dev/arm/dtrace_subr_arm.c index ab0bc4820..f29583129 100644 --- a/bsd/dev/arm/dtrace_subr_arm.c +++ b/bsd/dev/arm/dtrace_subr_arm.c @@ -41,7 +41,7 @@ #include #include -int (*dtrace_pid_probe_ptr) (arm_saved_state_t *); +int (*dtrace_pid_probe_ptr)(arm_saved_state_t *); int (*dtrace_return_probe_ptr) (arm_saved_state_t *); kern_return_t @@ -88,12 +88,12 @@ dtrace_user_probe(arm_saved_state_t *regs, unsigned int instr) */ if (step == 0) { /* - * APPLE NOTE: We're returning KERN_FAILURE, which causes + * APPLE NOTE: We're returning KERN_FAILURE, which causes * the generic signal handling code to take over, which will effectively * deliver a EXC_BAD_INSTRUCTION to the user process. */ return KERN_FAILURE; - } + } /* * If we hit this trap unrelated to a return probe, we're @@ -113,8 +113,9 @@ dtrace_user_probe(arm_saved_state_t *regs, unsigned int instr) rwp = &CPU->cpu_ft_lock; lck_rw_lock_shared(rwp); - if (dtrace_return_probe_ptr != NULL) + if (dtrace_return_probe_ptr != NULL) { (void) (*dtrace_return_probe_ptr)(regs); + } lck_rw_unlock_shared(rwp); regs->pc = npc; diff --git a/bsd/dev/arm/fasttrap_isa.c b/bsd/dev/arm/fasttrap_isa.c index 07d41a228..c45a95288 100644 --- a/bsd/dev/arm/fasttrap_isa.c +++ b/bsd/dev/arm/fasttrap_isa.c @@ -33,7 +33,7 @@ #ifdef KERNEL #ifndef _KERNEL -#define _KERNEL /* Solaris vs. Darwin */ +#define _KERNEL /* Solaris vs. Darwin */ #endif #endif @@ -88,13 +88,13 @@ extern int dtrace_decode_thumb(uint32_t instr); #define THUMB_INSTR(x) (*(uint16_t*) &(x)) -#define SIGNEXTEND(x,v) ((((int) (x)) << (32-(v))) >> (32-(v))) -#define ALIGNADDR(x,v) (((x) >> (v)) << (v)) +#define SIGNEXTEND(x, v) ((((int) (x)) << (32-(v))) >> (32-(v))) +#define ALIGNADDR(x, v) (((x) >> (v)) << (v)) #define GETITSTATE(x) ((((x) >> 8) & 0xFC) | (((x) >> 25) & 0x3)) #define ISLASTINIT(x) (((x) & 0xF) == 8) -#define SET16(x,w) *((uint16_t*) (x)) = (w) -#define SET32(x,w) *((uint32_t*) (x)) = (w) +#define SET16(x, w) *((uint16_t*) (x)) = (w) +#define SET32(x, w) *((uint32_t*) (x)) = (w) #define IS_ARM_NOP(x) ((x) == 0xE1A00000) /* Marker for is-enabled probes */ @@ -115,7 +115,7 @@ extern int dtrace_arm_condition_true(int cond, int cpsr); int fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, - user_addr_t pc, fasttrap_probe_type_t type) + user_addr_t pc, fasttrap_probe_type_t type) { #pragma unused(type) uint32_t instr; @@ -128,12 +128,13 @@ fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, * pages, we potentially read the instruction in two parts. If the * second part fails, we just zero out that part of the instruction. */ - /* + /* * APPLE NOTE: Of course, we do not have a P_PR_LOCK, so this is racey... - */ + */ - if (uread(p, &instr, 4, pc) != 0) - return (-1); + if (uread(p, &instr, 4, pc) != 0) { + return -1; + } /* We want &instr to always point to the saved instruction, so just copy the * whole thing When cast to a pointer to a uint16_t, that will give us a @@ -142,37 +143,37 @@ fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, tp->ftt_instr = instr; if (tp->ftt_fntype != FASTTRAP_FN_DONE_INIT) { - switch(tp->ftt_fntype) { - case FASTTRAP_FN_UNKNOWN: - /* Can't instrument without any information. We can add some heuristics later if necessary. */ - return (-1); - - case FASTTRAP_FN_USDT: - if (IS_ARM_NOP(instr) || IS_ARM_IS_ENABLED(instr)) { - tp->ftt_thumb = 0; - } else if (IS_THUMB_NOP(THUMB_INSTR(instr)) || IS_THUMB_IS_ENABLED(THUMB_INSTR(instr))) { - tp->ftt_thumb = 1; - } else { - /* Shouldn't reach here - this means we don't recognize - * the instruction at one of the USDT probe locations - */ - return (-1); - } - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; - break; + switch (tp->ftt_fntype) { + case FASTTRAP_FN_UNKNOWN: + /* Can't instrument without any information. We can add some heuristics later if necessary. */ + return -1; - case FASTTRAP_FN_ARM: + case FASTTRAP_FN_USDT: + if (IS_ARM_NOP(instr) || IS_ARM_IS_ENABLED(instr)) { tp->ftt_thumb = 0; - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; - break; - - case FASTTRAP_FN_THUMB: + } else if (IS_THUMB_NOP(THUMB_INSTR(instr)) || IS_THUMB_IS_ENABLED(THUMB_INSTR(instr))) { tp->ftt_thumb = 1; - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; - break; + } else { + /* Shouldn't reach here - this means we don't recognize + * the instruction at one of the USDT probe locations + */ + return -1; + } + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + break; + + case FASTTRAP_FN_ARM: + tp->ftt_thumb = 0; + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + break; + + case FASTTRAP_FN_THUMB: + tp->ftt_thumb = 1; + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + break; - default: - return (-1); + default: + return -1; } } @@ -185,11 +186,11 @@ fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, if (tp->ftt_type == FASTTRAP_T_INV) { /* This is an instruction we either don't recognize or can't instrument */ printf("dtrace: fasttrap: Unrecognized instruction: %08x at %08x\n", - (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr,tp->ftt_thumb) == 2) ? tp->ftt_instr1 : instr, pc); - return (-1); + (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr, tp->ftt_thumb) == 2) ? tp->ftt_instr1 : instr, pc); + return -1; } - return (0); + return 0; } int @@ -205,12 +206,13 @@ fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp) instr = FASTTRAP_ARM_INSTR; } - if (uwrite(p, &instr, size, tp->ftt_pc) != 0) - return (-1); + if (uwrite(p, &instr, size, tp->ftt_pc) != 0) { + return -1; + } tp->ftt_installed = 1; - return (0); + return 0; } int @@ -224,22 +226,26 @@ fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp) * Distinguish between read or write failures and a changed * instruction. */ - if (uread(p, &instr, size, tp->ftt_pc) != 0) + if (uread(p, &instr, size, tp->ftt_pc) != 0) { goto end; + } if (tp->ftt_thumb) { - if (*((uint16_t*) &instr) != FASTTRAP_THUMB_INSTR) + if (*((uint16_t*) &instr) != FASTTRAP_THUMB_INSTR) { goto end; + } } else { - if (instr != FASTTRAP_ARM_INSTR) + if (instr != FASTTRAP_ARM_INSTR) { goto end; + } + } + if (uwrite(p, &tp->ftt_instr, size, tp->ftt_pc) != 0) { + return -1; } - if (uwrite(p, &tp->ftt_instr, size, tp->ftt_pc) != 0) - return (-1); end: tp->ftt_installed = 0; - return (0); + return 0; } static void @@ -258,15 +264,16 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && - tp->ftt_proc->ftpc_acount != 0) + tp->ftt_proc->ftpc_acount != 0) { break; + } } /* * Don't sweat it if we can't find the tracepoint again; unlike * when we're in fasttrap_pid_probe(), finding the tracepoint here * is not essential to the correct execution of the process. - */ + */ if (tp == NULL) { lck_mtx_unlock(pid_mtx); return; @@ -281,8 +288,9 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ */ if (tp->ftt_type != FASTTRAP_T_LDM_PC && tp->ftt_type != FASTTRAP_T_POP_PC && - new_pc - probe->ftp_faddr < probe->ftp_fsize) + new_pc - probe->ftp_faddr < probe->ftp_fsize) { continue; + } if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) { uint8_t already_triggered = atomic_or_8(&probe->ftp_triggered, 1); @@ -301,14 +309,14 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ #ifndef CONFIG_EMBEDDED if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id, - 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); + 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { #endif } else { dtrace_probe(id->fti_probe->ftp_id, - pc - id->fti_probe->ftp_faddr, - regs->r[0], 0, 0, 0); + pc - id->fti_probe->ftp_faddr, + regs->r[0], 0, 0, 0); } } if (retire_tp) { @@ -340,16 +348,16 @@ fasttrap_sigsegv(proc_t *p, uthread_t t, user_addr_t addr, arm_saved_state_t *re t->uu_code = addr; t->uu_siglist |= sigmask(SIGSEGV); - /* + /* * XXX These two line may be redundant; if not, then we need * XXX to potentially set the data address in the machine * XXX specific thread state structure to indicate the address. - */ + */ t->uu_exception = KERN_INVALID_ADDRESS; /* SIGSEGV */ t->uu_subcode = 0; /* XXX pad */ - - proc_unlock(p); - + + proc_unlock(p); + /* raise signal */ signal_setast(t->uu_context.vc_thread); #endif @@ -376,7 +384,8 @@ fasttrap_usdt_args(fasttrap_probe_t *probe, arm_saved_state_t *regs, int argc, } } -static void set_thumb_flag(arm_saved_state_t *regs, user_addr_t pc) +static void +set_thumb_flag(arm_saved_state_t *regs, user_addr_t pc) { if (pc & 1) { regs->cpsr |= PSR_TF; @@ -412,7 +421,7 @@ fasttrap_pid_probe(arm_saved_state_t *regs) if (uthread->t_dtrace_step) { ASSERT(uthread->t_dtrace_on); fasttrap_sigtrap(p, uthread, pc); - return (0); + return 0; } /* @@ -431,23 +440,25 @@ fasttrap_pid_probe(arm_saved_state_t *regs) */ if (p->p_lflag & P_LINVFORK) { proc_list_lock(); - while (p->p_lflag & P_LINVFORK) + while (p->p_lflag & P_LINVFORK) { p = p->p_pptr; + } proc_list_unlock(); } pid = p->p_pid; pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock; lck_mtx_lock(pid_mtx); - bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid,pc)]; + bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; /* * Lookup the tracepoint that the process just hit. */ for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && - tp->ftt_proc->ftpc_acount != 0) + tp->ftt_proc->ftpc_acount != 0) { break; + } } /* @@ -457,7 +468,7 @@ fasttrap_pid_probe(arm_saved_state_t *regs) */ if (tp == NULL) { lck_mtx_unlock(pid_mtx); - return (-1); + return -1; } /* Default to always execute */ @@ -470,11 +481,11 @@ fasttrap_pid_probe(arm_saved_state_t *regs) condition_code = itstate >> 4; } else { printf("dtrace: fasttrap: Tried to trace instruction %08x at %08x but not at end of IT block\n", - (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr,tp->ftt_thumb) == 2) ? tp->ftt_instr1 : tp->ftt_instr, pc); + (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr, tp->ftt_thumb) == 2) ? tp->ftt_instr1 : tp->ftt_instr, pc); fasttrap_tracepoint_remove(p, tp); lck_mtx_unlock(pid_mtx); - return (-1); + return -1; } } } else { @@ -487,7 +498,7 @@ fasttrap_pid_probe(arm_saved_state_t *regs) */ fasttrap_tracepoint_remove(p, tp); lck_mtx_unlock(pid_mtx); - return (-1); + return -1; } if (tp->ftt_ids != NULL) { @@ -505,7 +516,7 @@ fasttrap_pid_probe(arm_saved_state_t *regs) #ifndef CONFIG_EMBEDDED if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id, - 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); + 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { #endif @@ -592,498 +603,498 @@ fasttrap_pid_probe(arm_saved_state_t *regs) goto done; } - instr_size = dtrace_instr_size(tp->ftt_instr,tp->ftt_thumb); + instr_size = dtrace_instr_size(tp->ftt_instr, tp->ftt_thumb); switch (tp->ftt_type) { - case FASTTRAP_T_MOV_PC_REG: - case FASTTRAP_T_CPY_PC: - { - if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { - new_pc = pc + instr_size; - break; - } + case FASTTRAP_T_MOV_PC_REG: + case FASTTRAP_T_CPY_PC: + { + if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { + new_pc = pc + instr_size; + break; + } - int rm; - if (tp->ftt_thumb) { - rm = THUMB16_HRM(tp->ftt_instr1); - } else { - rm = tp->ftt_instr & 0xF; - } - new_pc = regs->r[rm]; + int rm; + if (tp->ftt_thumb) { + rm = THUMB16_HRM(tp->ftt_instr1); + } else { + rm = tp->ftt_instr & 0xF; + } + new_pc = regs->r[rm]; - /* This instruction does not change the Thumb state */ + /* This instruction does not change the Thumb state */ - break; - } + break; + } - case FASTTRAP_T_STM_LR: - case FASTTRAP_T_PUSH_LR: - { - /* - * This is a very common case, so we want to emulate this instruction if - * possible. However, on a push, it is possible that we might reach the end - * of a page and have to allocate a new page. Most of the time this will not - * happen, and we know that the push instruction can store at most 16 words, - * so check to see if we are far from the boundary, and if so, emulate. This - * can be made more aggressive by checking the actual number of words being - * pushed, but we won't do that for now. - * - * Some of the same issues that apply to POP_PC probably apply here also. - */ + case FASTTRAP_T_STM_LR: + case FASTTRAP_T_PUSH_LR: + { + /* + * This is a very common case, so we want to emulate this instruction if + * possible. However, on a push, it is possible that we might reach the end + * of a page and have to allocate a new page. Most of the time this will not + * happen, and we know that the push instruction can store at most 16 words, + * so check to see if we are far from the boundary, and if so, emulate. This + * can be made more aggressive by checking the actual number of words being + * pushed, but we won't do that for now. + * + * Some of the same issues that apply to POP_PC probably apply here also. + */ - int reglist; - int ret; - uintptr_t* base; + int reglist; + int ret; + uintptr_t* base; - if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { - new_pc = pc + instr_size; - break; - } + if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { + new_pc = pc + instr_size; + break; + } - base = (uintptr_t*) regs->sp; - if (((((uintptr_t) base)-16*4) >> PAGE_SHIFT) != (((uintptr_t) base) >> PAGE_SHIFT)) { - /* Crosses the page boundary, go to emulation */ - goto instr_emulate; - } + base = (uintptr_t*) regs->sp; + if (((((uintptr_t) base) - 16 * 4) >> PAGE_SHIFT) != (((uintptr_t) base) >> PAGE_SHIFT)) { + /* Crosses the page boundary, go to emulation */ + goto instr_emulate; + } - if (tp->ftt_thumb) { - if (instr_size == 4) { - /* We know we have to push lr, never push sp or pc */ - reglist = tp->ftt_instr2 & 0x1FFF; - } else { - reglist = tp->ftt_instr1 & 0xFF; - } - } else { + if (tp->ftt_thumb) { + if (instr_size == 4) { /* We know we have to push lr, never push sp or pc */ - reglist = tp->ftt_instr & 0x1FFF; + reglist = tp->ftt_instr2 & 0x1FFF; + } else { + reglist = tp->ftt_instr1 & 0xFF; } + } else { + /* We know we have to push lr, never push sp or pc */ + reglist = tp->ftt_instr & 0x1FFF; + } - /* Push the link register */ - base--; - ret = fasttrap_suword32((uint32_t) base, regs->lr); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); - new_pc = regs->pc; - break; - } + /* Push the link register */ + base--; + ret = fasttrap_suword32((uint32_t) base, regs->lr); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); + new_pc = regs->pc; + break; + } - /* Start pushing from $r12 */ - int regmask = 1 << 12; - int regnum = 12; - - while (regmask) { - if (reglist & regmask) { - base--; - ret = fasttrap_suword32((uint32_t) base, regs->r[regnum]); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); - new_pc = regs->pc; - break; - } + /* Start pushing from $r12 */ + int regmask = 1 << 12; + int regnum = 12; + + while (regmask) { + if (reglist & regmask) { + base--; + ret = fasttrap_suword32((uint32_t) base, regs->r[regnum]); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); + new_pc = regs->pc; + break; } - regmask >>= 1; - regnum--; } - - regs->sp = (uintptr_t) base; - - new_pc = pc + instr_size; - - break; + regmask >>= 1; + regnum--; } + regs->sp = (uintptr_t) base; - case FASTTRAP_T_LDM_PC: - case FASTTRAP_T_POP_PC: - { - /* TODO Two issues that will eventually need to be resolved: - * - * 1. Understand what the hardware does if we have to segfault (data abort) in - * the middle of a load multiple. We currently don't have a working segfault - * handler anyway, and with no swapfile we should never segfault on this load. - * If we do, we'll just kill the process by setting the pc to 0. - * - * 2. The emulation is no longer atomic. We currently only emulate pop for - * function epilogues, and so we should never have a race here because one - * thread should never be trying to manipulate another thread's stack frames. - * That is almost certainly a bug in the program. - * - * This will need to be fixed if we ever: - * a. Ship dtrace externally, as this could be a potential attack vector - * b. Support instruction level tracing, as we might then pop/ldm non epilogues. - * - */ - - /* Assume ldmia! sp/pop ... pc */ - - int regnum = 0, reglist; - int ret; - uintptr_t* base; + new_pc = pc + instr_size; - if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { - new_pc = pc + instr_size; - break; - } - - if (tp->ftt_thumb) { - if (instr_size == 4) { - /* We know we have to load the pc, don't do it twice */ - reglist = tp->ftt_instr2 & 0x7FFF; - } else { - reglist = tp->ftt_instr1 & 0xFF; - } - } else { - /* We know we have to load the pc, don't do it twice */ - reglist = tp->ftt_instr & 0x7FFF; - } + break; + } - base = (uintptr_t*) regs->sp; - while (reglist) { - if (reglist & 1) { - ret = fasttrap_fuword32((uint32_t) base, ®s->r[regnum]); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); - new_pc = regs->pc; - break; - } - base++; - } - reglist >>= 1; - regnum++; - } - ret = fasttrap_fuword32((uint32_t) base, &new_pc); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); - new_pc = regs->pc; - break; - } - base++; + case FASTTRAP_T_LDM_PC: + case FASTTRAP_T_POP_PC: + { + /* TODO Two issues that will eventually need to be resolved: + * + * 1. Understand what the hardware does if we have to segfault (data abort) in + * the middle of a load multiple. We currently don't have a working segfault + * handler anyway, and with no swapfile we should never segfault on this load. + * If we do, we'll just kill the process by setting the pc to 0. + * + * 2. The emulation is no longer atomic. We currently only emulate pop for + * function epilogues, and so we should never have a race here because one + * thread should never be trying to manipulate another thread's stack frames. + * That is almost certainly a bug in the program. + * + * This will need to be fixed if we ever: + * a. Ship dtrace externally, as this could be a potential attack vector + * b. Support instruction level tracing, as we might then pop/ldm non epilogues. + * + */ - regs->sp = (uintptr_t) base; + /* Assume ldmia! sp/pop ... pc */ - set_thumb_flag(regs, new_pc); + int regnum = 0, reglist; + int ret; + uintptr_t* base; + if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { + new_pc = pc + instr_size; break; } - case FASTTRAP_T_CB_N_Z: - { - /* Thumb mode instruction, and not permitted in IT block, so skip the condition code check */ - int rn = tp->ftt_instr1 & 0x7; - int offset = (((tp->ftt_instr1 & 0x00F8) >> 2) | ((tp->ftt_instr1 & 0x0200) >> 3)) + 4; - int nonzero = tp->ftt_instr1 & 0x0800; - if (!nonzero != !(regs->r[rn] == 0)) { - new_pc = pc + offset; + if (tp->ftt_thumb) { + if (instr_size == 4) { + /* We know we have to load the pc, don't do it twice */ + reglist = tp->ftt_instr2 & 0x7FFF; } else { - new_pc = pc + instr_size; + reglist = tp->ftt_instr1 & 0xFF; } - break; + } else { + /* We know we have to load the pc, don't do it twice */ + reglist = tp->ftt_instr & 0x7FFF; } - case FASTTRAP_T_B_COND: - { - /* Use the condition code in the instruction and ignore the ITSTATE */ - - int code, offset; - if (tp->ftt_thumb) { - if (instr_size == 4) { - code = (tp->ftt_instr1 >> 6) & 0xF; - if (code == 14 || code == 15) { - panic("fasttrap: Emulation of invalid branch"); - } - int S = (tp->ftt_instr1 >> 10) & 1, - J1 = (tp->ftt_instr2 >> 13) & 1, - J2 = (tp->ftt_instr2 >> 11) & 1; - offset = 4 + SIGNEXTEND( - (S << 20) | (J2 << 19) | (J1 << 18) | - ((tp->ftt_instr1 & 0x003F) << 12) | - ((tp->ftt_instr2 & 0x07FF) << 1), - 21); - } else { - code = (tp->ftt_instr1 >> 8) & 0xF; - if (code == 14 || code == 15) { - panic("fasttrap: Emulation of invalid branch"); - } - offset = 4 + (SIGNEXTEND(tp->ftt_instr1 & 0xFF, 8) << 1); - } - } else { - code = ARM_CONDCODE(tp->ftt_instr); - if (code == 15) { - panic("fasttrap: Emulation of invalid branch"); + base = (uintptr_t*) regs->sp; + while (reglist) { + if (reglist & 1) { + ret = fasttrap_fuword32((uint32_t) base, ®s->r[regnum]); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); + new_pc = regs->pc; + break; } - offset = 8 + (SIGNEXTEND(tp->ftt_instr & 0x00FFFFFF, 24) << 2); - } - - if (dtrace_arm_condition_true(code, regs->cpsr)) { - new_pc = pc + offset; - } else { - new_pc = pc + instr_size; + base++; } + reglist >>= 1; + regnum++; + } + ret = fasttrap_fuword32((uint32_t) base, &new_pc); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, regs); + new_pc = regs->pc; break; } + base++; - case FASTTRAP_T_B_UNCOND: - { - int offset; + regs->sp = (uintptr_t) base; - /* Unconditional branches can only be taken from Thumb mode */ - /* (This is different from an ARM branch with condition code "always") */ - ASSERT(tp->ftt_thumb == 1); + set_thumb_flag(regs, new_pc); - if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { - new_pc = pc + instr_size; - break; - } + break; + } + + case FASTTRAP_T_CB_N_Z: + { + /* Thumb mode instruction, and not permitted in IT block, so skip the condition code check */ + int rn = tp->ftt_instr1 & 0x7; + int offset = (((tp->ftt_instr1 & 0x00F8) >> 2) | ((tp->ftt_instr1 & 0x0200) >> 3)) + 4; + int nonzero = tp->ftt_instr1 & 0x0800; + if (!nonzero != !(regs->r[rn] == 0)) { + new_pc = pc + offset; + } else { + new_pc = pc + instr_size; + } + break; + } + + case FASTTRAP_T_B_COND: + { + /* Use the condition code in the instruction and ignore the ITSTATE */ + int code, offset; + if (tp->ftt_thumb) { if (instr_size == 4) { + code = (tp->ftt_instr1 >> 6) & 0xF; + if (code == 14 || code == 15) { + panic("fasttrap: Emulation of invalid branch"); + } int S = (tp->ftt_instr1 >> 10) & 1, J1 = (tp->ftt_instr2 >> 13) & 1, J2 = (tp->ftt_instr2 >> 11) & 1; - int I1 = (J1 != S) ? 0 : 1, I2 = (J2 != S) ? 0 : 1; offset = 4 + SIGNEXTEND( - (S << 24) | (I1 << 23) | (I2 << 22) | - ((tp->ftt_instr1 & 0x03FF) << 12) | - ((tp->ftt_instr2 & 0x07FF) << 1), - 25); + (S << 20) | (J2 << 19) | (J1 << 18) | + ((tp->ftt_instr1 & 0x003F) << 12) | + ((tp->ftt_instr2 & 0x07FF) << 1), + 21); } else { - uint32_t instr1 = tp->ftt_instr1; - offset = 4 + (SIGNEXTEND(instr1 & 0x7FF, 11) << 1); + code = (tp->ftt_instr1 >> 8) & 0xF; + if (code == 14 || code == 15) { + panic("fasttrap: Emulation of invalid branch"); + } + offset = 4 + (SIGNEXTEND(tp->ftt_instr1 & 0xFF, 8) << 1); } + } else { + code = ARM_CONDCODE(tp->ftt_instr); + if (code == 15) { + panic("fasttrap: Emulation of invalid branch"); + } + offset = 8 + (SIGNEXTEND(tp->ftt_instr & 0x00FFFFFF, 24) << 2); + } + if (dtrace_arm_condition_true(code, regs->cpsr)) { new_pc = pc + offset; + } else { + new_pc = pc + instr_size; + } + + break; + } + + case FASTTRAP_T_B_UNCOND: + { + int offset; + + /* Unconditional branches can only be taken from Thumb mode */ + /* (This is different from an ARM branch with condition code "always") */ + ASSERT(tp->ftt_thumb == 1); + if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { + new_pc = pc + instr_size; break; } - case FASTTRAP_T_BX_REG: - { - int reg; + if (instr_size == 4) { + int S = (tp->ftt_instr1 >> 10) & 1, + J1 = (tp->ftt_instr2 >> 13) & 1, + J2 = (tp->ftt_instr2 >> 11) & 1; + int I1 = (J1 != S) ? 0 : 1, I2 = (J2 != S) ? 0 : 1; + offset = 4 + SIGNEXTEND( + (S << 24) | (I1 << 23) | (I2 << 22) | + ((tp->ftt_instr1 & 0x03FF) << 12) | + ((tp->ftt_instr2 & 0x07FF) << 1), + 25); + } else { + uint32_t instr1 = tp->ftt_instr1; + offset = 4 + (SIGNEXTEND(instr1 & 0x7FF, 11) << 1); + } - if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { - new_pc = pc + instr_size; - break; - } + new_pc = pc + offset; - if (tp->ftt_thumb) { - reg = THUMB16_HRM(tp->ftt_instr1); - } else { - reg = ARM_RM(tp->ftt_instr); - } - new_pc = regs->r[reg]; - set_thumb_flag(regs, new_pc); + break; + } + + case FASTTRAP_T_BX_REG: + { + int reg; + if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { + new_pc = pc + instr_size; break; } - case FASTTRAP_T_LDR_PC_IMMED: - case FASTTRAP_T_VLDR_PC_IMMED: - /* Handle these instructions by replacing the PC in the instruction with another - * register. They are common, so we'd like to support them, and this way we do so - * without any risk of having to simulate a segfault. - */ + if (tp->ftt_thumb) { + reg = THUMB16_HRM(tp->ftt_instr1); + } else { + reg = ARM_RM(tp->ftt_instr); + } + new_pc = regs->r[reg]; + set_thumb_flag(regs, new_pc); - /* Fall through */ + break; + } - instr_emulate: - case FASTTRAP_T_COMMON: - { - user_addr_t addr; - uint8_t scratch[32]; - uint_t i = 0; - fasttrap_instr_t emul_instr; - emul_instr.instr32 = tp->ftt_instr; - int emul_instr_size; + case FASTTRAP_T_LDR_PC_IMMED: + case FASTTRAP_T_VLDR_PC_IMMED: + /* Handle these instructions by replacing the PC in the instruction with another + * register. They are common, so we'd like to support them, and this way we do so + * without any risk of having to simulate a segfault. + */ - /* - * Unfortunately sometimes when we emulate the instruction and have to replace the - * PC, there is no longer a thumb mode equivalent. We end up having to run the - * modified instruction in ARM mode. We use this variable to keep track of which - * mode we should emulate in. We still use the original variable to determine - * what mode to return to. - */ - uint8_t emul_thumb = tp->ftt_thumb; - int save_reg = -1; - uint32_t save_val = 0; + /* Fall through */ - /* - * Dealing with condition codes and emulation: - * We can't just uniformly do a condition code check here because not all instructions - * have condition codes. We currently do not support an instruction by instruction trace, - * so we can assume that either: 1. We are executing a Thumb instruction, in which case - * we either are not in an IT block and should execute always, or we are last in an IT - * block. Either way, the traced instruction will run correctly, and we won't have any - * problems when we return to the original code, because we will no longer be in the IT - * block. 2. We are executing an ARM instruction, in which case we are ok as long as - * we don't attempt to change the condition code. +instr_emulate: + case FASTTRAP_T_COMMON: + { + user_addr_t addr; + uint8_t scratch[32]; + uint_t i = 0; + fasttrap_instr_t emul_instr; + emul_instr.instr32 = tp->ftt_instr; + int emul_instr_size; + + /* + * Unfortunately sometimes when we emulate the instruction and have to replace the + * PC, there is no longer a thumb mode equivalent. We end up having to run the + * modified instruction in ARM mode. We use this variable to keep track of which + * mode we should emulate in. We still use the original variable to determine + * what mode to return to. + */ + uint8_t emul_thumb = tp->ftt_thumb; + int save_reg = -1; + uint32_t save_val = 0; + + /* + * Dealing with condition codes and emulation: + * We can't just uniformly do a condition code check here because not all instructions + * have condition codes. We currently do not support an instruction by instruction trace, + * so we can assume that either: 1. We are executing a Thumb instruction, in which case + * we either are not in an IT block and should execute always, or we are last in an IT + * block. Either way, the traced instruction will run correctly, and we won't have any + * problems when we return to the original code, because we will no longer be in the IT + * block. 2. We are executing an ARM instruction, in which case we are ok as long as + * we don't attempt to change the condition code. + */ + if (tp->ftt_type == FASTTRAP_T_LDR_PC_IMMED) { + /* We know we always have a free register (the one we plan to write the + * result value to!). So we'll replace the pc with that one. */ - if (tp->ftt_type == FASTTRAP_T_LDR_PC_IMMED) { - /* We know we always have a free register (the one we plan to write the - * result value to!). So we'll replace the pc with that one. - */ - int new_reg; - if (tp->ftt_thumb) { - /* Check to see if thumb or thumb2 */ - if (instr_size == 2) { - /* - * Sadness. We need to emulate this instruction in ARM mode - * because it has an 8 bit immediate offset. Instead of having - * to deal with condition codes in the ARM instruction, we'll - * just check the condition and abort if the condition is false. - */ - if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { - new_pc = pc + instr_size; - break; - } - - new_reg = (tp->ftt_instr1 >> 8) & 0x7; - regs->r[new_reg] = ALIGNADDR(regs->pc + 4, 2); - emul_thumb = 0; - emul_instr.instr32 = 0xE5900000 | (new_reg << 16) | (new_reg << 12) | ((tp->ftt_instr1 & 0xFF) << 2); - } else { - /* Thumb2. Just replace the register. */ - new_reg = (tp->ftt_instr2 >> 12) & 0xF; - regs->r[new_reg] = ALIGNADDR(regs->pc + 4, 2); - emul_instr.instr16.instr1 &= ~0x000F; - emul_instr.instr16.instr1 |= new_reg; + int new_reg; + if (tp->ftt_thumb) { + /* Check to see if thumb or thumb2 */ + if (instr_size == 2) { + /* + * Sadness. We need to emulate this instruction in ARM mode + * because it has an 8 bit immediate offset. Instead of having + * to deal with condition codes in the ARM instruction, we'll + * just check the condition and abort if the condition is false. + */ + if (!dtrace_arm_condition_true(condition_code, regs->cpsr)) { + new_pc = pc + instr_size; + break; } + + new_reg = (tp->ftt_instr1 >> 8) & 0x7; + regs->r[new_reg] = ALIGNADDR(regs->pc + 4, 2); + emul_thumb = 0; + emul_instr.instr32 = 0xE5900000 | (new_reg << 16) | (new_reg << 12) | ((tp->ftt_instr1 & 0xFF) << 2); } else { - /* ARM. Just replace the register. */ - new_reg = (tp->ftt_instr >> 12) & 0xF; - regs->r[new_reg] = ALIGNADDR(regs->pc + 8,2); - emul_instr.instr32 &= ~0x000F0000; - emul_instr.instr32 |= new_reg << 16; - } - } else if (tp->ftt_type == FASTTRAP_T_VLDR_PC_IMMED) { - /* This instruction only uses one register, and if we're here, we know - * it must be the pc. So we'll just replace it with R0. - */ - save_reg = 0; - save_val = regs->r[0]; - regs->r[save_reg] = ALIGNADDR(regs->pc + (tp->ftt_thumb ? 4 : 8), 2); - if (tp->ftt_thumb) { + /* Thumb2. Just replace the register. */ + new_reg = (tp->ftt_instr2 >> 12) & 0xF; + regs->r[new_reg] = ALIGNADDR(regs->pc + 4, 2); emul_instr.instr16.instr1 &= ~0x000F; - } else { - emul_instr.instr32 &= ~0x000F0000; + emul_instr.instr16.instr1 |= new_reg; } + } else { + /* ARM. Just replace the register. */ + new_reg = (tp->ftt_instr >> 12) & 0xF; + regs->r[new_reg] = ALIGNADDR(regs->pc + 8, 2); + emul_instr.instr32 &= ~0x000F0000; + emul_instr.instr32 |= new_reg << 16; } - - emul_instr_size = dtrace_instr_size(emul_instr.instr32, emul_thumb); - - /* - * At this point: - * tp->ftt_thumb = thumb mode of original instruction - * emul_thumb = thumb mode for emulation - * emul_instr = instruction we are using to emulate original instruction - * emul_instr_size = size of emulating instruction + } else if (tp->ftt_type == FASTTRAP_T_VLDR_PC_IMMED) { + /* This instruction only uses one register, and if we're here, we know + * it must be the pc. So we'll just replace it with R0. */ + save_reg = 0; + save_val = regs->r[0]; + regs->r[save_reg] = ALIGNADDR(regs->pc + (tp->ftt_thumb ? 4 : 8), 2); + if (tp->ftt_thumb) { + emul_instr.instr16.instr1 &= ~0x000F; + } else { + emul_instr.instr32 &= ~0x000F0000; + } + } - addr = uthread->t_dtrace_scratch->addr; + emul_instr_size = dtrace_instr_size(emul_instr.instr32, emul_thumb); - if (addr == 0LL) { - fasttrap_sigtrap(p, uthread, pc); // Should be killing target proc - new_pc = pc; - break; - } + /* + * At this point: + * tp->ftt_thumb = thumb mode of original instruction + * emul_thumb = thumb mode for emulation + * emul_instr = instruction we are using to emulate original instruction + * emul_instr_size = size of emulating instruction + */ - uthread->t_dtrace_scrpc = addr; - if (emul_thumb) { - /* - * No way to do an unconditional branch in Thumb mode, shove the address - * onto the user stack and go to the next location with a pop. This can - * segfault if this push happens to cross a stack page, but that's ok, since - * we are running in userland, and the kernel knows how to handle userland - * stack expansions correctly. - * - * Layout of scratch space for Thumb mode: - * Emulated instruction - * ldr save_reg, [pc, #16] (if necessary, restore any register we clobbered) - * push { r0, r1 } - * ldr r0, [pc, #4] - * str r0, [sp, #4] - * pop { r0, pc } - * Location we should return to in original program - * Saved value of clobbered register (if necessary) - */ + addr = uthread->t_dtrace_scratch->addr; - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + if (addr == 0LL) { + fasttrap_sigtrap(p, uthread, pc); // Should be killing target proc + new_pc = pc; + break; + } - if (save_reg != -1) { - uint16_t restore_inst = 0x4803; - restore_inst |= (save_reg & 0x7) << 8; - SET16(scratch+i, restore_inst); i += 2; // ldr reg, [pc , #16] - } + uthread->t_dtrace_scrpc = addr; + if (emul_thumb) { + /* + * No way to do an unconditional branch in Thumb mode, shove the address + * onto the user stack and go to the next location with a pop. This can + * segfault if this push happens to cross a stack page, but that's ok, since + * we are running in userland, and the kernel knows how to handle userland + * stack expansions correctly. + * + * Layout of scratch space for Thumb mode: + * Emulated instruction + * ldr save_reg, [pc, #16] (if necessary, restore any register we clobbered) + * push { r0, r1 } + * ldr r0, [pc, #4] + * str r0, [sp, #4] + * pop { r0, pc } + * Location we should return to in original program + * Saved value of clobbered register (if necessary) + */ - SET16(scratch+i, 0xB403); i += 2; // push { r0, r1 } - SET16(scratch+i, 0x4801); i += 2; // ldr r0, [pc, #4] - SET16(scratch+i, 0x9001); i += 2; // str r0, [sp, #4] - SET16(scratch+i, 0xBD01); i += 2; // pop { r0, pc } + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - if (i % 4) { - SET16(scratch+i, 0); i += 2; // padding - saved 32 bit words must be aligned - } - SET32(scratch+i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address - if (save_reg != -1) { - SET32(scratch+i, save_val); i += 4; // saved value of clobbered register - } + if (save_reg != -1) { + uint16_t restore_inst = 0x4803; + restore_inst |= (save_reg & 0x7) << 8; + SET16(scratch + i, restore_inst); i += 2; // ldr reg, [pc , #16] + } - uthread->t_dtrace_astpc = addr + i; - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - SET16(scratch+i, FASTTRAP_THUMB_RET_INSTR); i += 2; - } else { - /* - * Layout of scratch space for ARM mode: - * Emulated instruction - * ldr save_reg, [pc, #12] (if necessary, restore any register we clobbered) - * ldr pc, [pc, #4] - * Location we should return to in original program - * Saved value of clobbered register (if necessary) - */ + SET16(scratch + i, 0xB403); i += 2; // push { r0, r1 } + SET16(scratch + i, 0x4801); i += 2; // ldr r0, [pc, #4] + SET16(scratch + i, 0x9001); i += 2; // str r0, [sp, #4] + SET16(scratch + i, 0xBD01); i += 2; // pop { r0, pc } - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + if (i % 4) { + SET16(scratch + i, 0); i += 2; // padding - saved 32 bit words must be aligned + } + SET32(scratch + i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address + if (save_reg != -1) { + SET32(scratch + i, save_val); i += 4; // saved value of clobbered register + } - if (save_reg != -1) { - uint32_t restore_inst = 0xE59F0004; - restore_inst |= save_reg << 12; - SET32(scratch+i, restore_inst); i += 4; // ldr reg, [pc, #12] - } - SET32(scratch+i, 0xE51FF004); i += 4; // ldr pc, [pc, #4] + uthread->t_dtrace_astpc = addr + i; + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + SET16(scratch + i, FASTTRAP_THUMB_RET_INSTR); i += 2; + } else { + /* + * Layout of scratch space for ARM mode: + * Emulated instruction + * ldr save_reg, [pc, #12] (if necessary, restore any register we clobbered) + * ldr pc, [pc, #4] + * Location we should return to in original program + * Saved value of clobbered register (if necessary) + */ - SET32(scratch+i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address - if (save_reg != -1) { - SET32(scratch+i, save_val); i += 4; // Saved value of clobbered register - } + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - uthread->t_dtrace_astpc = addr + i; - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - SET32(scratch+i, FASTTRAP_ARM_RET_INSTR); i += 4; + if (save_reg != -1) { + uint32_t restore_inst = 0xE59F0004; + restore_inst |= save_reg << 12; + SET32(scratch + i, restore_inst); i += 4; // ldr reg, [pc, #12] } + SET32(scratch + i, 0xE51FF004); i += 4; // ldr pc, [pc, #4] - if (uwrite(p, scratch, i, uthread->t_dtrace_scratch->write_addr) != KERN_SUCCESS) { - fasttrap_sigtrap(p, uthread, pc); - new_pc = pc; - break; + SET32(scratch + i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address + if (save_reg != -1) { + SET32(scratch + i, save_val); i += 4; // Saved value of clobbered register } - if (tp->ftt_retids != NULL) { - uthread->t_dtrace_step = 1; - uthread->t_dtrace_ret = 1; - new_pc = uthread->t_dtrace_astpc + (emul_thumb ? 1 : 0); - } else { - new_pc = uthread->t_dtrace_scrpc + (emul_thumb ? 1 : 0); - } + uthread->t_dtrace_astpc = addr + i; + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + SET32(scratch + i, FASTTRAP_ARM_RET_INSTR); i += 4; + } - uthread->t_dtrace_pc = pc; - uthread->t_dtrace_npc = pc + instr_size; - uthread->t_dtrace_on = 1; - was_simulated = 0; - set_thumb_flag(regs, new_pc); + if (uwrite(p, scratch, i, uthread->t_dtrace_scratch->write_addr) != KERN_SUCCESS) { + fasttrap_sigtrap(p, uthread, pc); + new_pc = pc; break; } - default: - panic("fasttrap: mishandled an instruction"); + if (tp->ftt_retids != NULL) { + uthread->t_dtrace_step = 1; + uthread->t_dtrace_ret = 1; + new_pc = uthread->t_dtrace_astpc + (emul_thumb ? 1 : 0); + } else { + new_pc = uthread->t_dtrace_scrpc + (emul_thumb ? 1 : 0); + } + + uthread->t_dtrace_pc = pc; + uthread->t_dtrace_npc = pc + instr_size; + uthread->t_dtrace_on = 1; + was_simulated = 0; + set_thumb_flag(regs, new_pc); + break; + } + + default: + panic("fasttrap: mishandled an instruction"); } done: @@ -1097,12 +1108,12 @@ done: */ regs->pc = new_pc; - /* + /* * If there were no return probes when we first found the tracepoint, * we should feel no obligation to honor any return probes that were * subsequently enabled -- they'll just have to wait until the next - * time around. - */ + * time around. + */ if (tp->ftt_retids != NULL) { /* * We need to wait until the results of the instruction are @@ -1127,7 +1138,7 @@ done: } } - return (0); + return 0; } int @@ -1150,8 +1161,9 @@ fasttrap_return_probe(arm_saved_state_t *regs) */ if (p->p_lflag & P_LINVFORK) { proc_list_lock(); - while (p->p_lflag & P_LINVFORK) + while (p->p_lflag & P_LINVFORK) { p = p->p_pptr; + } proc_list_unlock(); } @@ -1162,29 +1174,30 @@ fasttrap_return_probe(arm_saved_state_t *regs) * complex web of lies. dtrace_return_probe() (our caller) * will correctly set %pc after we return. */ - regs->pc = pc; + regs->pc = pc; fasttrap_return_common(p, regs, pc, npc); - return (0); + return 0; } uint64_t fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno, - int aframes) + int aframes) { #pragma unused(arg, id, parg, aframes) arm_saved_state_t* regs = find_user_regs(current_thread()); /* First four arguments are in registers */ - if (argno < 4) + if (argno < 4) { return regs->r[argno]; + } /* Look on the stack for the rest */ uint32_t value; uint32_t* sp = (uint32_t*) regs->sp; DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - value = dtrace_fuword32((user_addr_t) (sp+argno-4)); + value = dtrace_fuword32((user_addr_t) (sp + argno - 4)); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR); return value; @@ -1195,9 +1208,8 @@ fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int afram { #pragma unused(arg, id, parg, argno, aframes) #if 0 - return (fasttrap_anarg(ttolwp(curthread)->lwp_regs, 0, argno)); + return fasttrap_anarg(ttolwp(curthread)->lwp_regs, 0, argno); #endif return 0; } - diff --git a/bsd/dev/arm/fbt_arm.c b/bsd/dev/arm/fbt_arm.c index 9205cfb21..95ee1dfe2 100644 --- a/bsd/dev/arm/fbt_arm.c +++ b/bsd/dev/arm/fbt_arm.c @@ -31,12 +31,12 @@ #ifdef KERNEL #ifndef _KERNEL -#define _KERNEL /* Solaris vs. Darwin */ +#define _KERNEL /* Solaris vs. Darwin */ #endif #endif -#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from - * mach/ppc/thread_status.h */ +#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from + * mach/ppc/thread_status.h */ #include #include #include @@ -71,31 +71,31 @@ #define DTRACE_INVOP_THUMB_SET_R7_SKIP 2 #define DTRACE_INVOP_THUMB_MOV_SP_TO_R7_SKIP 2 -#define FBT_IS_THUMB_PUSH_LR(x) (((x) & 0x0000ff00) == 0x0000b500) -#define FBT_IS_THUMB_POP_R7(x) (((x) & 0x0000ff80) == 0x0000bc80) -#define FBT_IS_THUMB32_POP_R7LR(x,y) (((x) == 0x0000e8bd) && (((y) & 0x00004080) == 0x00004080)) -#define FBT_IS_THUMB_POP_PC(x) (((x) & 0x0000ff00) == 0x0000bd00) -#define FBT_IS_THUMB_SET_R7(x) (((x) & 0x0000ff00) == 0x0000af00) -#define FBT_IS_THUMB_MOV_SP_TO_R7(x) (((x) & 0x0000ffff) == 0x0000466f) -#define FBT_THUMB_SET_R7_OFFSET(x) (((x) & 0x000000ff) << 2) -#define FBT_IS_THUMB_LDR_PC(x) (((x) & 0x0000f800) == 0x00004800) -#define FBT_IS_THUMB32_LDR_PC(x,y) ((x) == 0x0000f8df) /* Only for positive offset PC relative loads */ -#define FBT_THUMB_STACK_REGS(x) ((x) & 0x00FF) -#define FBT_IS_THUMB_BX_REG(x) (((x) & 0x0000ff87) == 0x00004700) +#define FBT_IS_THUMB_PUSH_LR(x) (((x) & 0x0000ff00) == 0x0000b500) +#define FBT_IS_THUMB_POP_R7(x) (((x) & 0x0000ff80) == 0x0000bc80) +#define FBT_IS_THUMB32_POP_R7LR(x, y) (((x) == 0x0000e8bd) && (((y) & 0x00004080) == 0x00004080)) +#define FBT_IS_THUMB_POP_PC(x) (((x) & 0x0000ff00) == 0x0000bd00) +#define FBT_IS_THUMB_SET_R7(x) (((x) & 0x0000ff00) == 0x0000af00) +#define FBT_IS_THUMB_MOV_SP_TO_R7(x) (((x) & 0x0000ffff) == 0x0000466f) +#define FBT_THUMB_SET_R7_OFFSET(x) (((x) & 0x000000ff) << 2) +#define FBT_IS_THUMB_LDR_PC(x) (((x) & 0x0000f800) == 0x00004800) +#define FBT_IS_THUMB32_LDR_PC(x, y) ((x) == 0x0000f8df) /* Only for positive offset PC relative loads */ +#define FBT_THUMB_STACK_REGS(x) ((x) & 0x00FF) +#define FBT_IS_THUMB_BX_REG(x) (((x) & 0x0000ff87) == 0x00004700) -#define FBT_PATCHVAL 0xdefc -#define FBT_AFRAMES_ENTRY 8 -#define FBT_AFRAMES_RETURN 6 +#define FBT_PATCHVAL 0xdefc +#define FBT_AFRAMES_ENTRY 8 +#define FBT_AFRAMES_RETURN 6 -#define FBT_ENTRY "entry" -#define FBT_RETURN "return" -#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) +#define FBT_ENTRY "entry" +#define FBT_RETURN "return" +#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) -#define VFPSAVE_ALIGN_DTRACE 16 /* This value should come from VFPSAVE_ALIGN */ +#define VFPSAVE_ALIGN_DTRACE 16 /* This value should come from VFPSAVE_ALIGN */ -extern dtrace_provider_id_t fbt_id; -extern fbt_probe_t **fbt_probetab; -extern int fbt_probetab_mask; +extern dtrace_provider_id_t fbt_id; +extern fbt_probe_t **fbt_probetab; +extern int fbt_probetab_mask; kern_return_t fbt_perfCallback(int, struct arm_saved_state *, __unused int, __unused int); @@ -105,25 +105,29 @@ extern int dtrace_arm_condition_true(int cond, int cpsr); /* Calculate the address of the ldr. (From the ARM Architecture reference) */ /* Does not check to see if it's really a load instruction, caller must do that */ -static uint32_t thumb_ldr_pc_address(uint32_t address) +static uint32_t +thumb_ldr_pc_address(uint32_t address) { return (address & 0xFFFFFFFC) + (*(uint16_t*) address & 0xFF) * 4 + 4; } -static uint32_t thumb32_ldr_pc_address(uint32_t address) +static uint32_t +thumb32_ldr_pc_address(uint32_t address) { - return (address & 0xFFFFFFFC) + (*(uint16_t*) (address+2) & 0xFFF) + 4; + return (address & 0xFFFFFFFC) + (*(uint16_t*) (address + 2) & 0xFFF) + 4; } /* Extract the current ITSTATE from the CPSR */ -static uint32_t get_itstate(uint32_t cpsr) +static uint32_t +get_itstate(uint32_t cpsr) { return - ((cpsr & 0x06000000) >> 25) | - ((cpsr & 0x0000FC00) >> 8); + ((cpsr & 0x06000000) >> 25) | + ((cpsr & 0x0000FC00) >> 8); } -static void clear_itstate(uint32_t* cpsr) +static void +clear_itstate(uint32_t* cpsr) { *cpsr &= ~0x0600FC00; } @@ -136,8 +140,8 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) for (; fbt != NULL; fbt = fbt->fbtp_hashnext) { if ((uintptr_t) fbt->fbtp_patchpoint == addr) { if (0 == CPU->cpu_dtrace_invop_underway) { - CPU->cpu_dtrace_invop_underway = 1; /* Race not possible on - * this per-cpu state */ + CPU->cpu_dtrace_invop_underway = 1; /* Race not possible on + * this per-cpu state */ struct arm_saved_state* regs = (struct arm_saved_state*) stack; uintptr_t stack4 = *((uintptr_t*) regs->sp); @@ -149,7 +153,7 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) * most of the time we can't do that successfully anyway. * Instead, we just panic now so we fail fast. */ - panic("dtrace: fbt: The probe at %08x was called from FIQ_MODE",(unsigned) addr); + panic("dtrace: fbt: The probe at %08x was called from FIQ_MODE", (unsigned) addr); } /* @@ -158,28 +162,29 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) */ uint32_t itstate = get_itstate(regs->cpsr); if ((itstate & 0x7) != 0) { - panic("dtrace: fbt: Instruction stream error: Middle of IT block at %08x",(unsigned) addr); + panic("dtrace: fbt: Instruction stream error: Middle of IT block at %08x", (unsigned) addr); } if (fbt->fbtp_roffset == 0) { /* - We need the frames to set up the backtrace, but we won't have the frame pointers - until after the instruction is emulated. So here we calculate the address of the - frame pointer from the saved instruction and put it in the stack. Yes, we end up - repeating this work again when we emulate the instruction. - - This assumes that the frame area is immediately after the saved reg storage! - */ + * We need the frames to set up the backtrace, but we won't have the frame pointers + * until after the instruction is emulated. So here we calculate the address of the + * frame pointer from the saved instruction and put it in the stack. Yes, we end up + * repeating this work again when we emulate the instruction. + * + * This assumes that the frame area is immediately after the saved reg storage! + */ uint32_t offset = ((uint32_t) regs) + sizeof(struct arm_saved_state); #if __ARM_VFP__ /* Match the stack alignment required for arm_vfpsaved_state */ offset &= ~(VFPSAVE_ALIGN_DTRACE - 1); offset += VFPSAVE_ALIGN_DTRACE + sizeof(struct arm_vfpsaved_state); #endif /* __ARM_VFP__ */ - if (FBT_IS_THUMB_SET_R7(fbt->fbtp_savedval)) + if (FBT_IS_THUMB_SET_R7(fbt->fbtp_savedval)) { *((uint32_t*) offset) = regs->sp + FBT_THUMB_SET_R7_OFFSET(fbt->fbtp_savedval); - else + } else { *((uint32_t*) offset) = regs->sp; + } CPU->cpu_dtrace_caller = regs->lr; dtrace_probe(fbt->fbtp_id, regs->r[0], regs->r[1], regs->r[2], regs->r[3], stack4); @@ -211,15 +216,15 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) } /* - On other architectures, we return a DTRACE constant to let the callback function - know what was replaced. On the ARM, since the function prologue/epilogue machine code - can vary, we need the actual bytes of the instruction, so return the savedval instead. - */ - return (fbt->fbtp_savedval); + * On other architectures, we return a DTRACE constant to let the callback function + * know what was replaced. On the ARM, since the function prologue/epilogue machine code + * can vary, we need the actual bytes of the instruction, so return the savedval instead. + */ + return fbt->fbtp_savedval; } } - return (0); + return 0; } #define IS_USER_TRAP(regs) (((regs)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) @@ -228,10 +233,10 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) kern_return_t fbt_perfCallback( - int trapno, - struct arm_saved_state * regs, - __unused int unused1, - __unused int unused2) + int trapno, + struct arm_saved_state * regs, + __unused int unused1, + __unused int unused2) { #pragma unused (unused1) #pragma unused (unused2) @@ -243,25 +248,25 @@ fbt_perfCallback( oldlevel = ml_set_interrupts_enabled(FALSE); - __asm__ volatile( - "Ldtrace_invop_callsite_pre_label:\n" - ".data\n" - ".private_extern _dtrace_invop_callsite_pre\n" - "_dtrace_invop_callsite_pre:\n" - " .long Ldtrace_invop_callsite_pre_label\n" - ".text\n" - ); + __asm__ volatile ( + "Ldtrace_invop_callsite_pre_label:\n" + ".data\n" + ".private_extern _dtrace_invop_callsite_pre\n" + "_dtrace_invop_callsite_pre:\n" + " .long Ldtrace_invop_callsite_pre_label\n" + ".text\n" + ); emul = dtrace_invop(regs->pc, (uintptr_t*) regs, regs->r[0]); - __asm__ volatile( - "Ldtrace_invop_callsite_post_label:\n" - ".data\n" - ".private_extern _dtrace_invop_callsite_post\n" - "_dtrace_invop_callsite_post:\n" - " .long Ldtrace_invop_callsite_post_label\n" - ".text\n" - ); + __asm__ volatile ( + "Ldtrace_invop_callsite_post_label:\n" + ".data\n" + ".private_extern _dtrace_invop_callsite_post\n" + "_dtrace_invop_callsite_post:\n" + " .long Ldtrace_invop_callsite_post_label\n" + ".text\n" + ); /* * The following emulation code does not execute properly if we are in the middle of @@ -271,7 +276,7 @@ fbt_perfCallback( */ uint32_t itstate = get_itstate(regs->cpsr); if (itstate != 0) { - panic("dtrace: fbt: Not emulated: Middle of IT block at %08x",(unsigned) regs->pc); + panic("dtrace: fbt: Not emulated: Middle of IT block at %08x", (unsigned) regs->pc); } if (emul == DTRACE_INVOP_NOP) { @@ -334,11 +339,11 @@ fbt_perfCallback( void fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolName, machine_inst_t* symbolStart, machine_inst_t *instrHigh) { - unsigned int j; - int doenable = 0; - dtrace_id_t thisid; + unsigned int j; + int doenable = 0; + dtrace_id_t thisid; - fbt_probe_t *newfbt, *retfbt, *entryfbt; + fbt_probe_t *newfbt, *retfbt, *entryfbt; machine_inst_t *instr, *pushinstr = NULL, *limit, theInstr; int foundPushLR, savedRegs; @@ -357,8 +362,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam savedRegs = -1; limit = (machine_inst_t *)instrHigh; for (j = 0, instr = symbolStart, theInstr = 0; - (j < 8) && instr < instrHigh; j++, instr++) - { + (j < 8) && instr < instrHigh; j++, instr++) { theInstr = *instr; if (FBT_IS_THUMB_PUSH_LR(theInstr)) { foundPushLR = 1; @@ -366,17 +370,21 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam savedRegs = FBT_THUMB_STACK_REGS(theInstr); pushinstr = instr; } - if (foundPushLR && (FBT_IS_THUMB_SET_R7(theInstr) || FBT_IS_THUMB_MOV_SP_TO_R7(theInstr))) + if (foundPushLR && (FBT_IS_THUMB_SET_R7(theInstr) || FBT_IS_THUMB_MOV_SP_TO_R7(theInstr))) { /* Guard against a random setting of r7 from sp, we make sure we found the push first */ break; - if (FBT_IS_THUMB_BX_REG(theInstr)) /* We've gone too far, bail. */ + } + if (FBT_IS_THUMB_BX_REG(theInstr)) { /* We've gone too far, bail. */ break; - if (FBT_IS_THUMB_POP_PC(theInstr)) /* We've gone too far, bail. */ + } + if (FBT_IS_THUMB_POP_PC(theInstr)) { /* We've gone too far, bail. */ break; + } /* Check for 4 byte thumb instruction */ - if (dtrace_instr_size(theInstr,1) == 4) + if (dtrace_instr_size(theInstr, 1) == 4) { instr++; + } } if (!(foundPushLR && (FBT_IS_THUMB_SET_R7(theInstr) || FBT_IS_THUMB_MOV_SP_TO_R7(theInstr)))) { @@ -386,7 +394,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam thisid = dtrace_probe_lookup(fbt_id, modname, symbolName, FBT_ENTRY); newfbt = kmem_zalloc(sizeof(fbt_probe_t), KM_SLEEP); newfbt->fbtp_next = NULL; - strlcpy( (char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); + strlcpy((char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); if (thisid != 0) { /* @@ -397,11 +405,12 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam * fire, (as indicated by the current patched value), then * we want to enable this newfbt on the spot. */ - entryfbt = dtrace_probe_arg (fbt_id, thisid); - ASSERT (entryfbt != NULL); - for(; entryfbt != NULL; entryfbt = entryfbt->fbtp_next) { - if (entryfbt->fbtp_currentval == entryfbt->fbtp_patchval) + entryfbt = dtrace_probe_arg(fbt_id, thisid); + ASSERT(entryfbt != NULL); + for (; entryfbt != NULL; entryfbt = entryfbt->fbtp_next) { + if (entryfbt->fbtp_currentval == entryfbt->fbtp_patchval) { doenable++; + } if (entryfbt->fbtp_next == NULL) { entryfbt->fbtp_next = newfbt; @@ -409,8 +418,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam break; } } - } - else { + } else { /* * The dtrace_probe did not previously exist, so we * create it and hook in the newfbt. Since the probe is @@ -430,8 +438,9 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam newfbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = newfbt; - if (doenable) + if (doenable) { fbt_enable(NULL, newfbt->fbtp_id, newfbt); + } /* * The fbt entry chain is in place, one entry point per symbol. @@ -440,7 +449,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam * Here we find the end of the fbt return chain. */ - doenable=0; + doenable = 0; thisid = dtrace_probe_lookup(fbt_id, modname, symbolName, FBT_RETURN); @@ -451,16 +460,17 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam * (as indicated by the currrent patched value), then * we want to enable any new fbts on the spot. */ - retfbt = dtrace_probe_arg (fbt_id, thisid); + retfbt = dtrace_probe_arg(fbt_id, thisid); ASSERT(retfbt != NULL); - for (; retfbt != NULL; retfbt = retfbt->fbtp_next) { - if (retfbt->fbtp_currentval == retfbt->fbtp_patchval) + for (; retfbt != NULL; retfbt = retfbt->fbtp_next) { + if (retfbt->fbtp_currentval == retfbt->fbtp_patchval) { doenable++; - if(retfbt->fbtp_next == NULL) + } + if (retfbt->fbtp_next == NULL) { break; + } } - } - else { + } else { doenable = 0; retfbt = NULL; } @@ -472,8 +482,9 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam */ instr = pushinstr + 1; again: - if (instr >= limit) + if (instr >= limit) { return; + } /* * We (desperately) want to avoid erroneously instrumenting a @@ -501,8 +512,9 @@ again: /* Walked onto the start of the next routine? If so, bail out from this function */ if (FBT_IS_THUMB_PUSH_LR(theInstr)) { - if (!retfbt) - kprintf("dtrace: fbt: No return probe for %s, walked to next routine at %08x\n",symbolName,(unsigned)instr); + if (!retfbt) { + kprintf("dtrace: fbt: No return probe for %s, walked to next routine at %08x\n", symbolName, (unsigned)instr); + } return; } @@ -511,22 +523,25 @@ again: * of the function. */ if (FBT_IS_THUMB_LDR_PC(theInstr)) { uint32_t newlimit = thumb_ldr_pc_address((uint32_t) instr); - if (newlimit < (uint32_t) limit) + if (newlimit < (uint32_t) limit) { limit = (machine_inst_t*) newlimit; + } } - if ((instr+1) < limit && FBT_IS_THUMB32_LDR_PC(*instr,*(instr+1))) { + if ((instr + 1) < limit && FBT_IS_THUMB32_LDR_PC(*instr, *(instr + 1))) { uint32_t newlimit = thumb32_ldr_pc_address((uint32_t) instr); - if (newlimit < (uint32_t) limit) + if (newlimit < (uint32_t) limit) { limit = (machine_inst_t*) newlimit; + } } /* Look for the 1. pop { ..., pc } or 2. pop { ..., r7 } ... bx reg or 3. ldmia.w sp!, { ..., r7, lr } ... bx reg */ if (!FBT_IS_THUMB_POP_PC(theInstr) && !FBT_IS_THUMB_POP_R7(theInstr) && - !FBT_IS_THUMB32_POP_R7LR(theInstr,*(instr+1))) { + !FBT_IS_THUMB32_POP_R7LR(theInstr, *(instr + 1))) { instr++; - if (dtrace_instr_size(theInstr,1) == 4) + if (dtrace_instr_size(theInstr, 1) == 4) { instr++; + } goto again; } @@ -535,21 +550,24 @@ again: /* What we're popping doesn't match what we're pushing, assume that we've * gone too far in the function. Bail. */ - kprintf("dtrace: fbt: No return probe for %s, popped regs don't match at %08x\n",symbolName,(unsigned)instr); + kprintf("dtrace: fbt: No return probe for %s, popped regs don't match at %08x\n", symbolName, (unsigned)instr); return; } } else { /* Scan ahead for the bx */ for (j = 0; (j < 4) && (instr < limit); j++, instr++) { theInstr = *instr; - if (FBT_IS_THUMB_BX_REG(theInstr)) + if (FBT_IS_THUMB_BX_REG(theInstr)) { break; - if (dtrace_instr_size(theInstr,1) == 4) + } + if (dtrace_instr_size(theInstr, 1) == 4) { instr++; + } } - if (!FBT_IS_THUMB_BX_REG(theInstr)) + if (!FBT_IS_THUMB_BX_REG(theInstr)) { return; + } } /* @@ -558,7 +576,7 @@ again: newfbt = kmem_zalloc(sizeof(fbt_probe_t), KM_SLEEP); newfbt->fbtp_next = NULL; - strlcpy( (char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); + strlcpy((char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); if (retfbt == NULL) { newfbt->fbtp_id = dtrace_probe_create(fbt_id, modname, @@ -583,10 +601,10 @@ again: newfbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = newfbt; - if (doenable) + if (doenable) { fbt_enable(NULL, newfbt->fbtp_id, newfbt); + } instr++; goto again; } - diff --git a/bsd/dev/arm/kern_machdep.c b/bsd/dev/arm/kern_machdep.c index 2c27afaf2..695c74ff0 100644 --- a/bsd/dev/arm/kern_machdep.c +++ b/bsd/dev/arm/kern_machdep.c @@ -10,14 +10,14 @@ * Machine-specific kernel routines. */ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include #if __arm64__ -extern int bootarg_no64exec; /* bsd_init.c */ +extern int bootarg_no64exec; /* bsd_init.c */ static cpu_subtype_t cpu_subtype32(void); #endif /* __arm64__ */ @@ -39,19 +39,19 @@ cpu_subtype32() #endif /* __arm64__*/ /********************************************************************** - * Routine: grade_binary() - * - * Function: Return a relative preference for exectypes and - * execsubtypes in fat executable files. The higher the - * grade, the higher the preference. A grade of 0 means - * not acceptable. - **********************************************************************/ +* Routine: grade_binary() +* +* Function: Return a relative preference for exectypes and +* execsubtypes in fat executable files. The higher the +* grade, the higher the preference. A grade of 0 means +* not acceptable. +**********************************************************************/ int grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype) { #if __arm64__ cpu_subtype_t hostsubtype = - (exectype & CPU_ARCH_ABI64) ? cpu_subtype() : cpu_subtype32(); + (exectype & CPU_ARCH_ABI64) ? cpu_subtype() : cpu_subtype32(); #else cpu_subtype_t hostsubtype = cpu_subtype(); #endif /* __arm64__ */ @@ -59,7 +59,9 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype) switch (exectype) { #if __arm64__ case CPU_TYPE_ARM64: - if (bootarg_no64exec) return 0; + if (bootarg_no64exec) { + return 0; + } switch (hostsubtype) { case CPU_SUBTYPE_ARM64_V8: @@ -87,10 +89,10 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype) } goto v7s; - /* - * For Swift and later, we prefer to run a swift slice, but fall back - * to v7 as Cortex A9 errata should not apply - */ + /* + * For Swift and later, we prefer to run a swift slice, but fall back + * to v7 as Cortex A9 errata should not apply + */ v7s: case CPU_SUBTYPE_ARM_V7S: switch (execsubtype) { @@ -107,7 +109,7 @@ v7s: case CPU_SUBTYPE_ARM_V7K: return 6; } - break; + break; /* * For Cortex A9, we prefer the A9 slice, but will run v7 albeit @@ -126,21 +128,21 @@ v7: case CPU_SUBTYPE_ARM_V7: return 5; } - // fall through... + // fall through... case CPU_SUBTYPE_ARM_V6: switch (execsubtype) { case CPU_SUBTYPE_ARM_V6: return 4; } - // fall through... + // fall through... case CPU_SUBTYPE_ARM_V5TEJ: switch (execsubtype) { case CPU_SUBTYPE_ARM_V5TEJ: return 3; } - // fall through + // fall through case CPU_SUBTYPE_ARM_V4T: switch (execsubtype) { @@ -181,9 +183,9 @@ pie_required(cpu_type_t exectype, cpu_subtype_t execsubtype) case CPU_TYPE_ARM: switch (execsubtype) { - case CPU_SUBTYPE_ARM_V7K: - return TRUE; - } + case CPU_SUBTYPE_ARM_V7K: + return TRUE; + } break; } return FALSE; diff --git a/bsd/dev/arm/km.c b/bsd/dev/arm/km.c index cd77c9fb1..92a26ca84 100644 --- a/bsd/dev/arm/km.c +++ b/bsd/dev/arm/km.c @@ -3,9 +3,9 @@ */ /* * Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. - * + * * km.m - kernel keyboard/monitor module, procedural interface. - * + * * HISTORY */ #include @@ -15,9 +15,9 @@ #include #include #include -#include /* for kmopen */ +#include /* for kmopen */ #include -#include /* for kmopen */ +#include /* for kmopen */ #include #include #include @@ -31,8 +31,8 @@ extern void cnputsusr(char *, int); extern int cngetc(void); -void kminit(void); -void cons_cinput(char ch); +void kminit(void); +void cons_cinput(char ch); /* * 'Global' variables, shared only by this file and conf.c. @@ -59,7 +59,7 @@ void kminit(void) { km_tty[0] = ttymalloc(); - km_tty[0]->t_dev = makedev(12, 0); + km_tty[0]->t_dev = makedev(12, 0); initialized = 1; } @@ -75,8 +75,9 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) int ret; unit = minor(dev); - if (unit >= 1) - return (ENXIO); + if (unit >= 1) { + return ENXIO; + } tp = km_tty[unit]; @@ -99,9 +100,9 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) goto out; } - tp->t_state |= TS_CARR_ON; /* lie and say carrier exists and is - * on. */ - ret = ((*linesw[tp->t_line].l_open) (dev, tp)); + tp->t_state |= TS_CARR_ON; /* lie and say carrier exists and is + * on. */ + ret = ((*linesw[tp->t_line].l_open)(dev, tp)); { PE_Video video; wp = &tp->t_winsize; @@ -112,10 +113,11 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) wp->ws_xpixel = 8; wp->ws_ypixel = 16; - tty_unlock(tp); /* XXX race window */ + tty_unlock(tp); /* XXX race window */ - if (flag & O_POPUP) + if (flag & O_POPUP) { PE_initialize_console(0, kPETextScreen); + } bzero(&video, sizeof(video)); PE_current_console(&video); @@ -151,7 +153,7 @@ kmclose(dev_t dev, int flag, __unused int mode, __unused proc_t p) ttyclose(tp); tty_unlock(tp); - return (ret); + return ret; } int @@ -164,7 +166,7 @@ kmread(dev_t dev, struct uio * uio, int ioflag) ret = (*linesw[tp->t_line].l_read)(tp, uio, ioflag); tty_unlock(tp); - return (ret); + return ret; } int @@ -177,7 +179,7 @@ kmwrite(dev_t dev, struct uio * uio, int ioflag) ret = (*linesw[tp->t_line].l_write)(tp, uio, ioflag); tty_unlock(tp); - return (ret); + return ret; } int @@ -203,36 +205,37 @@ kmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) error = EINVAL; break; - /* Bodge in the CLOCAL flag as the km device is always local */ + /* Bodge in the CLOCAL flag as the km device is always local */ case TIOCSETA_32: case TIOCSETAW_32: case TIOCSETAF_32: - { - struct termios32 *t = (struct termios32 *)data; - t->c_cflag |= CLOCAL; - /* No Break */ - } + { + struct termios32 *t = (struct termios32 *)data; + t->c_cflag |= CLOCAL; + /* No Break */ + } goto fallthrough; case TIOCSETA_64: case TIOCSETAW_64: case TIOCSETAF_64: - { - struct user_termios *t = (struct user_termios *)data; - t->c_cflag |= CLOCAL; - /* No Break */ - } + { + struct user_termios *t = (struct user_termios *)data; + t->c_cflag |= CLOCAL; + /* No Break */ + } fallthrough: default: - error = (*linesw[tp->t_line].l_ioctl) (tp, cmd, data, flag, p); - if (ENOTTY != error) + error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); + if (ENOTTY != error) { break; + } error = ttioctl_locked(tp, cmd, data, flag, p); break; } tty_unlock(tp); - return (error); + return error; } @@ -249,17 +252,18 @@ fallthrough: * assumptions here, this routine should be static (and * inlined, given there is only one call site). */ -int +int kmputc(__unused dev_t dev, char c) { - if(!disableConsoleOutput && initialized) { + if (!disableConsoleOutput && initialized) { /* OCRNL */ - if(c == '\n') + if (c == '\n') { cnputcusr('\r'); + } cnputcusr(c); } - return (0); + return 0; } @@ -267,7 +271,7 @@ kmputc(__unused dev_t dev, char c) * Callouts from linesw. */ -#define KM_LOWAT_DELAY ((ns_time_t)1000) +#define KM_LOWAT_DELAY ((ns_time_t)1000) /* * t_oproc for this driver; called from within the line discipline @@ -277,10 +281,12 @@ kmputc(__unused dev_t dev, char c) static void kmstart(struct tty *tp) { - if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) + if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) { goto out; - if (tp->t_outq.c_cc == 0) + } + if (tp->t_outq.c_cc == 0) { goto out; + } tp->t_state |= TS_BUSY; if (tp->t_outq.c_cc > tp->t_lowat) { /* @@ -301,7 +307,7 @@ kmstart(struct tty *tp) return; out: - (*linesw[tp->t_line].l_start) (tp); + (*linesw[tp->t_line].l_start)(tp); return; } @@ -337,15 +343,16 @@ kmtimeout(void *arg) static int kmoutput(struct tty * tp) { - unsigned char buf[80]; /* buffer; limits output per call */ - unsigned char *cp; - int cc = -1; + unsigned char buf[80]; /* buffer; limits output per call */ + unsigned char *cp; + int cc = -1; /* While there is data available to be output... */ while (tp->t_outq.c_cc > 0) { cc = ndqb(&tp->t_outq, 0); - if (cc == 0) + if (cc == 0) { break; + } /* * attempt to output as many characters as are available, * up to the available transfer buffer size. @@ -372,7 +379,7 @@ kmoutput(struct tty * tp) } tp->t_state &= ~TS_BUSY; /* Start the output processing for the line discipline */ - (*linesw[tp->t_line].l_start) (tp); + (*linesw[tp->t_line].l_start)(tp); return 0; } @@ -395,9 +402,9 @@ kmoutput(struct tty * tp) void cons_cinput(char ch) { - struct tty *tp = km_tty[0]; /* XXX */ + struct tty *tp = km_tty[0]; /* XXX */ tty_lock(tp); - (*linesw[tp->t_line].l_rint) (ch, tp); + (*linesw[tp->t_line].l_rint)(ch, tp); tty_unlock(tp); } diff --git a/bsd/dev/arm/munge.c b/bsd/dev/arm/munge.c index d98953ad2..65eb5a2ae 100644 --- a/bsd/dev/arm/munge.c +++ b/bsd/dev/arm/munge.c @@ -2,7 +2,7 @@ * Coyright (c) 2005-2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * For arm32 ABI where 64-bit types are aligned to even registers and * 64-bits on stack, we need to unpack registers differently. So * we use the mungers for that. Currently this is just ARMv7k. @@ -43,7 +43,7 @@ #include -/* +/* * Userspace args are in r0-r6, then r8, then stack unless this is an * indirect call in which case the syscall number is in r0 then args * are in registers r1-r6, then r8, then stack. This is for mach and @@ -60,11 +60,11 @@ typedef enum { } style_t; #define DECLARE_AND_CAST(regs, args, ss, uu_args) const arm_saved_state_t *ss = (const arm_saved_state_t *)regs; \ - uint32_t *uu_args = (uint32_t *)args; + uint32_t *uu_args = (uint32_t *)args; -/* +/* * We start 32 bytes after sp since 4 registers are pushed onto the stack - * in the userspace syscall handler, and the first 4 stack argumnets are moved + * in the userspace syscall handler, and the first 4 stack argumnets are moved * into registers already */ #define ARG_SP_BYTE_OFFSET 32 @@ -101,9 +101,10 @@ marshal_no_pad(const arm_saved_state_t *ss, uint32_t *args, const uint32_t word_ /* stack */ if (word_count > copy_count) { error = copyin(ss->sp + ARG_SP_BYTE_OFFSET, - args, (word_count - copy_count) * sizeof(uint32_t)); - if (error) + args, (word_count - copy_count) * sizeof(uint32_t)); + if (error) { return error; + } } } return error; @@ -119,70 +120,70 @@ munge_w(const void *regs, void *args) return marshal_no_pad(regs, args, 1); } -int +int munge_ww(const void *regs, void *args) { return marshal_no_pad(regs, args, 2); } -int +int munge_www(const void *regs, void *args) { return marshal_no_pad(regs, args, 3); } -int +int munge_wwww(const void *regs, void *args) { return marshal_no_pad(regs, args, 4); } -int +int munge_wwwww(const void *regs, void *args) { return marshal_no_pad(regs, args, 5); } -int +int munge_wwwwww(const void *regs, void *args) { return marshal_no_pad(regs, args, 6); } -int +int munge_wwwwwww(const void *regs, void *args) { return marshal_no_pad(regs, args, 7); } -int +int munge_wwwwwwww(const void *regs, void *args) { return marshal_no_pad(regs, args, 8); } -int +int munge_wwl(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 3); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w uu_args[1] = ss->r[2]; // w uu_args[2] = ss->r[4]; // l (longs are aligned to even registers for armv7k, so skip r3) - uu_args[3] = ss->r[5]; // + uu_args[3] = ss->r[5]; // return 0; } } -int +int munge_wwlw(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 5); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wwl(regs, args); // wwl @@ -194,11 +195,11 @@ munge_wwlw(const void *regs, void *args) int munge_wwlww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { // the long-long here is aligned on an even register // so there shouldn't be any padding return marshal_no_pad(regs, args, 6); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wwlw(regs, args); // wwlw @@ -207,21 +208,22 @@ munge_wwlww(const void *regs, void *args) } } -int +int munge_wwlll(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 8); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wwl(regs, args); // wwl - if (error) + if (error) { return error; + } uu_args[4] = ss->r[6]; // l uu_args[5] = ss->r[8]; // return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // l - &(uu_args[6]), 2 * sizeof(uint32_t)); + &(uu_args[6]), 2 * sizeof(uint32_t)); } } @@ -234,9 +236,9 @@ munge_wwllww(const void *regs, void *args) int munge_wl(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { memcpy(args, regs, 4 * sizeof(uint32_t)); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -249,9 +251,9 @@ munge_wl(const void *regs, void *args) int munge_wlw(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) - memcpy(args, regs, 5 * sizeof(uint32_t)); - else { + if (REGS_TO_STYLE(regs) == kDirect) { + memcpy(args, regs, 5 * sizeof(uint32_t)); + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -265,9 +267,9 @@ munge_wlw(const void *regs, void *args) int munge_wlww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { memcpy(args, regs, 6 * sizeof(uint32_t)); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -287,17 +289,16 @@ munge_wlwwwll(const void *regs, void *args) if (REGS_TO_STYLE(regs) == kDirect) { memcpy(args, regs, 7 * sizeof(uint32_t)); // wlwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ll - uu_args + 8, 4 * sizeof(uint32_t)); - } - else { + uu_args + 8, 4 * sizeof(uint32_t)); + } else { uu_args[0] = ss->r[1]; // w uu_args[2] = ss->r[2]; // l - uu_args[3] = ss->r[3]; // + uu_args[3] = ss->r[3]; // uu_args[4] = ss->r[4]; // w uu_args[5] = ss->r[5]; // w uu_args[6] = ss->r[6]; // w return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ll - uu_args + 8, 4 * sizeof(uint32_t)); + uu_args + 8, 4 * sizeof(uint32_t)); } } @@ -309,30 +310,29 @@ munge_wlwwwllw(const void *regs, void *args) if (REGS_TO_STYLE(regs) == kDirect) { memcpy(args, regs, 7 * sizeof(uint32_t)); // wlwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, - uu_args + 8, 5 * sizeof(uint32_t)); // ll - } - else { + uu_args + 8, 5 * sizeof(uint32_t)); // ll + } else { uu_args[0] = ss->r[1]; // w uu_args[2] = ss->r[2]; // l - uu_args[3] = ss->r[3]; // + uu_args[3] = ss->r[3]; // uu_args[4] = ss->r[4]; // w uu_args[5] = ss->r[5]; // w uu_args[6] = ss->r[6]; // w return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // llw - uu_args + 8, 5 * sizeof(uint32_t)); + uu_args + 8, 5 * sizeof(uint32_t)); } } -int +int munge_wlwwlwlw(const void *regs, void *args) { DECLARE_AND_CAST(regs, args, ss, uu_args); - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { uu_args[0] = ss->r[0]; // w - else + } else { uu_args[0] = ss->r[1]; // w - + } uu_args[2] = ss->r[2]; // l uu_args[3] = ss->r[3]; // uu_args[4] = ss->r[4]; // w @@ -340,15 +340,15 @@ munge_wlwwlwlw(const void *regs, void *args) uu_args[6] = ss->r[6]; // l uu_args[7] = ss->r[8]; // return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // wlw - uu_args + 8, 5 * sizeof(uint32_t)); + uu_args + 8, 5 * sizeof(uint32_t)); } -int +int munge_wll(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) - memcpy(args, regs, 6 * sizeof(uint32_t)); - else { + if (REGS_TO_STYLE(regs) == kDirect) { + memcpy(args, regs, 6 * sizeof(uint32_t)); + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -360,7 +360,7 @@ munge_wll(const void *regs, void *args) return 0; } -int +int munge_wlll(const void *regs, void *args) { DECLARE_AND_CAST(regs, args, ss, uu_args); @@ -371,14 +371,14 @@ munge_wlll(const void *regs, void *args) return error; } -int +int munge_wllll(const void *regs, void *args) { DECLARE_AND_CAST(regs, args, ss, uu_args); munge_wlll(regs, args); // wlll return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // l - uu_args + 8, 2 * sizeof(uint32_t)); + uu_args + 8, 2 * sizeof(uint32_t)); } int @@ -387,24 +387,25 @@ munge_wllww(const void *regs, void *args) return munge_wlll(regs, args); } -int +int munge_wllwwll(const void *regs, void *args) { DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wlll(regs, args); // wllww - if (error) + if (error) { return error; + } return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ll - uu_args + 8, 4 * sizeof(uint32_t)); + uu_args + 8, 4 * sizeof(uint32_t)); } -int +int munge_wwwlw(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { memcpy(args, regs, 7 * sizeof(uint32_t)); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -420,9 +421,9 @@ munge_wwwlw(const void *regs, void *args) int munge_wwwlww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return munge_wlll(regs, args); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -435,13 +436,13 @@ munge_wwwlww(const void *regs, void *args) return 0; } } - -int + +int munge_wwwl(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return munge_wll(regs, args); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -453,12 +454,12 @@ munge_wwwl(const void *regs, void *args) } } -int +int munge_wwwwl(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 6); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -474,25 +475,26 @@ munge_wwwwl(const void *regs, void *args) int munge_wwwwlw(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 7); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wwwwl(regs, args); // wwwwl - if (error) + if (error) { return error; + } return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // w - uu_args + 6, sizeof(uint32_t)); + uu_args + 6, sizeof(uint32_t)); } } -int +int munge_wwwwwl(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return munge_wlll(regs, args); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -506,19 +508,20 @@ munge_wwwwwl(const void *regs, void *args) } } -int +int munge_wwwwwlww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return munge_wllll(regs, args); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wwwwwl(regs, args); // wwwwwl - if (error) + if (error) { return error; + } return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ww - uu_args + 8, 2 * sizeof(uint32_t)); + uu_args + 8, 2 * sizeof(uint32_t)); } } @@ -528,10 +531,11 @@ munge_wwwwwllw(const void *regs, void *args) DECLARE_AND_CAST(regs, args, ss, uu_args); int error = munge_wwwwwl(regs, args); // wwwwwl - if (error) + if (error) { return error; + } return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // lw - uu_args + 8, 3 * sizeof(uint32_t)); + uu_args + 8, 3 * sizeof(uint32_t)); } int @@ -542,17 +546,18 @@ munge_wwwwwlll(const void *regs, void *args) if (REGS_TO_STYLE(regs) == kDirect) { error = munge_wlll(regs, args); // wlll - if (error) + if (error) { return error; + } return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ll - uu_args + 8, 4 * sizeof(uint32_t)); - } - else { + uu_args + 8, 4 * sizeof(uint32_t)); + } else { error = munge_wwwwwl(regs, args); // wwwwwl - if (error) + if (error) { return error; + } return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ll - uu_args + 8, 4 * sizeof(uint32_t)); + uu_args + 8, 4 * sizeof(uint32_t)); } } @@ -561,52 +566,52 @@ munge_wwwwwwl(const void *regs, void *args) { munge_wwlll(regs, args); - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 8); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); memcpy(args, &(ss->r[1]), 6 * sizeof(uint32_t)); // wwwwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // l - &(uu_args[6]), 2 * sizeof(uint32_t)); + &(uu_args[6]), 2 * sizeof(uint32_t)); } } -int +int munge_wwwwwwlw(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 9); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); memcpy(args, &(ss->r[1]), 6 * sizeof(uint32_t)); // wwwwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // lw - &(uu_args[6]), 3 * sizeof(uint32_t)); + &(uu_args[6]), 3 * sizeof(uint32_t)); } } - -int + +int munge_wwwwwwll(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 10); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); memcpy(args, &(ss->r[1]), 6 * sizeof(uint32_t)); // wwwwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // ll - &(uu_args[6]), 4 * sizeof(uint32_t)); + &(uu_args[6]), 4 * sizeof(uint32_t)); } } -int +int munge_wsw(const void *regs, void *args) { return munge_wlw(regs, args); } -int +int munge_wws(const void *regs, void *args) { return munge_wwl(regs, args); @@ -624,12 +629,12 @@ munge_wwwsw(const void *regs, void *args) return munge_wwwlw(regs, args); } -int +int munge_llllll(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 12); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[2]; // l @@ -639,75 +644,79 @@ munge_llllll(const void *regs, void *args) uu_args[4] = ss->r[6]; // l uu_args[5] = ss->r[8]; // return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // lll - uu_args + 6, 6 * sizeof(uint32_t)); + uu_args + 6, 6 * sizeof(uint32_t)); } } -int +int munge_ll(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 4); - else + } else { memcpy(args, (const uint32_t*)regs + 2, 4 * sizeof(uint32_t)); + } return 0; } -int +int munge_l(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 2); - else + } else { memcpy(args, (const uint32_t*)regs + 2, 2 * sizeof(uint32_t)); + } return 0; } -int +int munge_lw(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 3); - else + } else { memcpy(args, (const uint32_t*)regs + 2, 3 * sizeof(uint32_t)); + } return 0; } int munge_lwww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 5); - else + } else { memcpy(args, (const uint32_t*)regs + 2, 5 * sizeof(uint32_t)); + } return 0; } -int +int munge_lwwwwwww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 9); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[2]; // l - uu_args[1] = ss->r[3]; // + uu_args[1] = ss->r[3]; // uu_args[2] = ss->r[4]; // w uu_args[3] = ss->r[5]; // w uu_args[4] = ss->r[6]; // w uu_args[5] = ss->r[8]; // w return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // www - uu_args + 6, 3 * sizeof(uint32_t)); + uu_args + 6, 3 * sizeof(uint32_t)); } } int munge_wwlwww(const void *regs, void *args) { - if (REGS_TO_STYLE(regs) == kDirect) + if (REGS_TO_STYLE(regs) == kDirect) { return marshal_no_pad(regs, args, 7); - else { + } else { DECLARE_AND_CAST(regs, args, ss, uu_args); uu_args[0] = ss->r[1]; // w @@ -717,9 +726,8 @@ munge_wwlwww(const void *regs, void *args) uu_args[4] = ss->r[6]; // w uu_args[5] = ss->r[8]; // w return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // w - uu_args + 6, sizeof(uint32_t)); + uu_args + 6, sizeof(uint32_t)); } - } int @@ -728,9 +736,9 @@ munge_wlwwwl(const void *regs, void *args) DECLARE_AND_CAST(regs, args, ss, uu_args); if (REGS_TO_STYLE(regs) == kDirect) { - memcpy(args, regs, 7 * sizeof(uint32_t)); // wlwww + memcpy(args, regs, 7 * sizeof(uint32_t)); // wlwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // l - uu_args + 8, 2 * sizeof(uint32_t)); + uu_args + 8, 2 * sizeof(uint32_t)); } else { uu_args[0] = ss->r[1]; // w uu_args[2] = ss->r[2]; // l @@ -739,19 +747,19 @@ munge_wlwwwl(const void *regs, void *args) uu_args[5] = ss->r[5]; // w uu_args[6] = ss->r[6]; // w return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // l - uu_args + 8, 2 * sizeof(uint32_t)); + uu_args + 8, 2 * sizeof(uint32_t)); } } int munge_wwlwwwl(const void *regs, void *args) { - DECLARE_AND_CAST(regs, args, ss, uu_args); + DECLARE_AND_CAST(regs, args, ss, uu_args); if (REGS_TO_STYLE(regs) == kDirect) { - memcpy(args, regs, 7 * sizeof(uint32_t)); // wwlwww + memcpy(args, regs, 7 * sizeof(uint32_t)); // wwlwww return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // l - uu_args + 8, 2 * sizeof(uint32_t)); + uu_args + 8, 2 * sizeof(uint32_t)); } else { uu_args[0] = ss->r[1]; // w uu_args[1] = ss->r[2]; // w @@ -760,7 +768,7 @@ munge_wwlwwwl(const void *regs, void *args) uu_args[4] = ss->r[6]; // w uu_args[5] = ss->r[8]; // w return copyin(ss->sp + ARG_SP_BYTE_OFFSET, // wl - uu_args + 6, 4 * sizeof(uint32_t)); + uu_args + 6, 4 * sizeof(uint32_t)); } } diff --git a/bsd/dev/arm/pci_device.h b/bsd/dev/arm/pci_device.h index 32844c3ce..f624a4215 100644 --- a/bsd/dev/arm/pci_device.h +++ b/bsd/dev/arm/pci_device.h @@ -3,11 +3,11 @@ */ /* * @OSF_FREE_COPYRIGHT@ - * + * */ /* * HISTORY - * + * * Revision 1.2 1998/09/30 21:20:44 wsanchez * Merged in IntelMerge1 (mburg: Intel support) * @@ -18,17 +18,17 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.6.2 1995/12/15 10:52:14 bernadat - * Split dev and vendor ids. - * [95/11/15 bernadat] + * Split dev and vendor ids. + * [95/11/15 bernadat] * * Revision 1.1.6.1 1995/02/23 17:22:27 alanl - * Taken from DIPC2_SHARED - * [1995/01/03 19:09:31 alanl] - * + * Taken from DIPC2_SHARED + * [1995/01/03 19:09:31 alanl] + * * Revision 1.1.2.1 1994/10/11 18:24:42 rwd - * Created. - * [1994/10/11 18:15:31 rwd] - * + * Created. + * [1994/10/11 18:15:31 rwd] + * * $EndLog$ */ /* @@ -46,30 +46,30 @@ * Per driver structure. * *------------------------------------------------------------ -*/ + */ typedef unsigned short pci_vendor_id_t; typedef unsigned short pci_dev_id_t; typedef union { - unsigned long cfg1; - struct { - unsigned char enable; - unsigned char forward; - unsigned short port; - } cfg2; - } pcici_t; + unsigned long cfg1; + struct { + unsigned char enable; + unsigned char forward; + unsigned short port; + } cfg2; +} pcici_t; struct pci_driver { - int (*probe )(pcici_t pci_ident); /* test whether device - is present */ - int (*attach)(pcici_t pci_ident); /* setup driver for a - device */ - pci_vendor_id_t vendor_id; /* vendor pci id */ - pci_dev_id_t device_id; /* device pci id */ - char *name; /* device name */ - char *vendor; /* device long name */ - void (*intr)(int); /* interupt handler */ + int (*probe )(pcici_t pci_ident);/* test whether device + * is present */ + int (*attach)(pcici_t pci_ident);/* setup driver for a + * device */ + pci_vendor_id_t vendor_id; /* vendor pci id */ + pci_dev_id_t device_id; /* device pci id */ + char *name; /* device name */ + char *vendor; /* device long name */ + void (*intr)(int); /* interupt handler */ }; /*----------------------------------------------------------- @@ -83,7 +83,7 @@ struct pci_driver { * So this structure may grow .. * *----------------------------------------------------------- -*/ + */ struct pci_device { struct pci_driver * pd_driver; @@ -97,10 +97,10 @@ struct pci_device { * on entry, the mapping function assigns an address. * *----------------------------------------------------------- -*/ + */ int pci_map_mem(pcici_t tag, - unsigned long entry, - vm_offset_t *va, - vm_offset_t *pa); + unsigned long entry, + vm_offset_t *va, + vm_offset_t *pa); #endif /*__PCI_DEVICE_H__*/ diff --git a/bsd/dev/arm/pio.h b/bsd/dev/arm/pio.h index fd9c1ecca..9cbdc6517 100644 --- a/bsd/dev/arm/pio.h +++ b/bsd/dev/arm/pio.h @@ -6,7 +6,7 @@ */ /* * HISTORY - * + * * Revision 1.2 1998/09/30 21:20:45 wsanchez * Merged in IntelMerge1 (mburg: Intel support) * @@ -17,104 +17,104 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.8.2 1996/07/31 09:46:36 paire - * Merged with nmk20b7_shared (1.1.11.2 -> 1.1.11.1) - * [96/06/10 paire] + * Merged with nmk20b7_shared (1.1.11.2 -> 1.1.11.1) + * [96/06/10 paire] * * Revision 1.1.11.2 1996/06/13 12:38:25 bernadat - * Do not use inline macros when MACH_ASSERT is configured. - * [96/05/24 bernadat] - * + * Do not use inline macros when MACH_ASSERT is configured. + * [96/05/24 bernadat] + * * Revision 1.1.11.1 1996/05/14 13:50:23 paire - * Added new linl and loutl __inline__. - * Added conditional compilation for [l]{in|oub}[bwl]() __inline__. - * [95/11/24 paire] - * + * Added new linl and loutl __inline__. + * Added conditional compilation for [l]{in|oub}[bwl]() __inline__. + * [95/11/24 paire] + * * Revision 1.1.8.1 1994/09/23 02:00:28 ezf - * change marker to not FREE - * [1994/09/22 21:25:52 ezf] - * + * change marker to not FREE + * [1994/09/22 21:25:52 ezf] + * * Revision 1.1.4.5 1993/08/09 19:40:41 dswartz - * Add ANSI prototypes - CR#9523 - * [1993/08/06 17:45:57 dswartz] - * + * Add ANSI prototypes - CR#9523 + * [1993/08/06 17:45:57 dswartz] + * * Revision 1.1.4.4 1993/06/11 15:17:37 jeffc - * CR9176 - ANSI C violations: inb/outb macros must be changed from - * ({ ... }) to inline functions, with proper type definitions. Callers - * must pass proper types to these functions: 386 I/O port addresses - * are unsigned shorts (not pointers). - * [1993/06/10 14:26:10 jeffc] - * + * CR9176 - ANSI C violations: inb/outb macros must be changed from + * ({ ... }) to inline functions, with proper type definitions. Callers + * must pass proper types to these functions: 386 I/O port addresses + * are unsigned shorts (not pointers). + * [1993/06/10 14:26:10 jeffc] + * * Revision 1.1.4.3 1993/06/07 22:09:28 jeffc - * CR9176 - ANSI C violations: trailing tokens on CPP - * directives, extra semicolons after decl_ ..., asm keywords - * [1993/06/07 19:00:26 jeffc] - * + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:00:26 jeffc] + * * Revision 1.1.4.2 1993/06/04 15:28:45 jeffc - * CR9176 - ANSI problems - - * Added casts to get macros to take caddr_t as an I/O space address. - * [1993/06/04 13:45:55 jeffc] - * + * CR9176 - ANSI problems - + * Added casts to get macros to take caddr_t as an I/O space address. + * [1993/06/04 13:45:55 jeffc] + * * Revision 1.1 1992/09/30 02:25:51 robert - * Initial revision - * + * Initial revision + * * $EndLog$ */ /* CMU_HIST */ /* * Revision 2.5 91/05/14 16:14:20 mrt - * Correcting copyright - * + * Correcting copyright + * * Revision 2.4 91/02/05 17:13:56 mrt - * Changed to new Mach copyright - * [91/02/01 17:37:08 mrt] - * + * Changed to new Mach copyright + * [91/02/01 17:37:08 mrt] + * * Revision 2.3 90/12/20 16:36:37 jeffreyh - * changes for __STDC__ - * [90/12/07 jeffreyh] - * + * changes for __STDC__ + * [90/12/07 jeffreyh] + * * Revision 2.2 90/11/26 14:48:41 rvb - * Pulled from 2.5 - * [90/11/22 10:09:38 rvb] - * - * [90/08/14 mg32] - * - * Now we know how types are factor in. - * Cleaned up a bunch: eliminated ({ for output and flushed unused - * output variables. - * [90/08/14 rvb] - * - * This is how its done in gcc: - * Created. - * [90/03/26 rvb] - * + * Pulled from 2.5 + * [90/11/22 10:09:38 rvb] + * + * [90/08/14 mg32] + * + * Now we know how types are factor in. + * Cleaned up a bunch: eliminated ({ for output and flushed unused + * output variables. + * [90/08/14 rvb] + * + * This is how its done in gcc: + * Created. + * [90/03/26 rvb] + * */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* +/* */ #ifndef ARM_PIO_H #define ARM_PIO_H @@ -122,103 +122,109 @@ typedef unsigned short i386_ioport_t; /* read a longword */ -extern unsigned long inl( - i386_ioport_t port); +extern unsigned long inl( + i386_ioport_t port); /* read a shortword */ -extern unsigned short inw( - i386_ioport_t port); +extern unsigned short inw( + i386_ioport_t port); /* read a byte */ -extern unsigned char inb( - i386_ioport_t port); +extern unsigned char inb( + i386_ioport_t port); /* write a longword */ -extern void outl( - i386_ioport_t port, - unsigned long datum); +extern void outl( + i386_ioport_t port, + unsigned long datum); /* write a word */ -extern void outw( - i386_ioport_t port, - unsigned short datum); +extern void outw( + i386_ioport_t port, + unsigned short datum); /* write a longword */ -extern void outb( - i386_ioport_t port, - unsigned char datum); +extern void outb( + i386_ioport_t port, + unsigned char datum); /* input an array of longwords */ -extern void linl( - i386_ioport_t port, - int * data, - int count); +extern void linl( + i386_ioport_t port, + int * data, + int count); /* output an array of longwords */ -extern void loutl( - i386_ioport_t port, - int * data, - int count); +extern void loutl( + i386_ioport_t port, + int * data, + int count); /* input an array of words */ -extern void linw( - i386_ioport_t port, - int * data, - int count); +extern void linw( + i386_ioport_t port, + int * data, + int count); /* output an array of words */ -extern void loutw( - i386_ioport_t port, - int * data, - int count); +extern void loutw( + i386_ioport_t port, + int * data, + int count); /* input an array of bytes */ -extern void linb( - i386_ioport_t port, - char * data, - int count); +extern void linb( + i386_ioport_t port, + char * data, + int count); /* output an array of bytes */ -extern void loutb( - i386_ioport_t port, - char * data, - int count); +extern void loutb( + i386_ioport_t port, + char * data, + int count); -extern __inline__ unsigned long inl( - i386_ioport_t port) +extern __inline__ unsigned long +inl( + i386_ioport_t port) { unsigned long datum; - __asm__ volatile("inl %1, %0" : "=a" (datum) : "d" (port)); - return(datum); + __asm__ volatile ("inl %1, %0" : "=a" (datum) : "d" (port)); + return datum; } -extern __inline__ unsigned short inw( - i386_ioport_t port) +extern __inline__ unsigned short +inw( + i386_ioport_t port) { unsigned short datum; - __asm__ volatile(".byte 0x66; inl %1, %0" : "=a" (datum) : "d" (port)); - return(datum); + __asm__ volatile (".byte 0x66; inl %1, %0" : "=a" (datum) : "d" (port)); + return datum; } -extern __inline__ unsigned char inb( - i386_ioport_t port) +extern __inline__ unsigned char +inb( + i386_ioport_t port) { unsigned char datum; - __asm__ volatile("inb %1, %0" : "=a" (datum) : "d" (port)); - return(datum); + __asm__ volatile ("inb %1, %0" : "=a" (datum) : "d" (port)); + return datum; } -extern __inline__ void outl( - i386_ioport_t port, - unsigned long datum) +extern __inline__ void +outl( + i386_ioport_t port, + unsigned long datum) { - __asm__ volatile("outl %0, %1" : : "a" (datum), "d" (port)); + __asm__ volatile ("outl %0, %1" : : "a" (datum), "d" (port)); } -extern __inline__ void outw( - i386_ioport_t port, - unsigned short datum) +extern __inline__ void +outw( + i386_ioport_t port, + unsigned short datum) { - __asm__ volatile(".byte 0x66; outl %0, %1" : : "a" (datum), "d" (port)); + __asm__ volatile (".byte 0x66; outl %0, %1" : : "a" (datum), "d" (port)); } -extern __inline__ void outb( - i386_ioport_t port, - unsigned char datum) +extern __inline__ void +outb( + i386_ioport_t port, + unsigned char datum) { - __asm__ volatile("outb %0, %1" : : "a" (datum), "d" (port)); + __asm__ volatile ("outb %0, %1" : : "a" (datum), "d" (port)); } #endif /* ARM_PIO_H */ diff --git a/bsd/dev/arm/sdt_arm.c b/bsd/dev/arm/sdt_arm.c index b8db51b52..938aa048b 100644 --- a/bsd/dev/arm/sdt_arm.c +++ b/bsd/dev/arm/sdt_arm.c @@ -58,12 +58,12 @@ sdt_invop(__unused uintptr_t addr, __unused uintptr_t *stack, __unused uintptr_t uintptr_t stack4 = *((uintptr_t*) regs->sp); dtrace_probe(sdt->sdp_id, regs->r[0], regs->r[1], regs->r[2], regs->r[3], stack4); - - return (DTRACE_INVOP_NOP); + + return DTRACE_INVOP_NOP; } } - return (0); + return 0; } struct frame { @@ -75,8 +75,8 @@ struct frame { uint64_t sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) { -#pragma unused(arg,id,parg) /* __APPLE__ */ - uint64_t val = 0; +#pragma unused(arg,id,parg) /* __APPLE__ */ + uint64_t val = 0; struct frame *fp = (struct frame *)__builtin_frame_address(0); uintptr_t *stack; uintptr_t pc; @@ -88,17 +88,16 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) * e.g. arg==5 refers to the 6th arg passed to the probed function. */ int inreg = 4; - + for (i = 1; i <= aframes; i++) { fp = fp->backchain; pc = fp->retaddr; if (dtrace_invop_callsite_pre != NULL - && pc > (uintptr_t)dtrace_invop_callsite_pre - && pc <= (uintptr_t)dtrace_invop_callsite_post) { - - /* - * When we pass through the invalid op handler, + && pc > (uintptr_t)dtrace_invop_callsite_pre + && pc <= (uintptr_t)dtrace_invop_callsite_post) { + /* + * When we pass through the invalid op handler, * we expect to find the save area structure, * pushed on the stack where we took the trap. * If the argument we seek is passed in a register, then @@ -106,7 +105,7 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) * If the argument we seek is passed on the stack, then * we increment the frame pointer further, to find the * pushed args - */ + */ /* fp points to the dtrace_invop activation */ fp = fp->backchain; /* to the fbt_perfCallback activation */ @@ -131,7 +130,7 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) argno -= inreg; } goto load; - } + } } /* @@ -143,24 +142,23 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) */ argno++; /* Advance past probeID */ - if (argno <= inreg) { + if (argno <= inreg) { /* * This shouldn't happen. If the argument is passed in a * register then it should have been, well, passed in a * register... */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } - + argno -= (inreg + 1); stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */ load: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ + /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ val = (uint64_t)(*(((uintptr_t *)stack) + argno)); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return (val); - -} + return val; +} diff --git a/bsd/dev/arm/stubs.c b/bsd/dev/arm/stubs.c index 644dae630..298450d4e 100644 --- a/bsd/dev/arm/stubs.c +++ b/bsd/dev/arm/stubs.c @@ -36,12 +36,14 @@ copyoutstr(const void *from, user_addr_t to, size_t maxlen, size_t * lencopied) int error = 0; slen = strlen(from) + 1; - if (slen > maxlen) + if (slen > maxlen) { error = ENAMETOOLONG; + } len = min(maxlen, slen); - if (copyout(from, to, len)) + if (copyout(from, to, len)) { error = EFAULT; + } *lencopied = len; return error; @@ -65,13 +67,15 @@ copystr(const void *vfrom, void *vto, size_t maxlen, size_t * lencopied) for (l = 0; l < maxlen; l++) { if ((*to++ = *from++) == '\0') { - if (lencopied) + if (lencopied) { *lencopied = l + 1; + } return 0; } } - if (lencopied) + if (lencopied) { *lencopied = maxlen; + } return ENAMETOOLONG; } diff --git a/bsd/dev/arm/sysctl.c b/bsd/dev/arm/sysctl.c index d97e80e21..c5ae6100b 100644 --- a/bsd/dev/arm/sysctl.c +++ b/bsd/dev/arm/sysctl.c @@ -12,18 +12,19 @@ #include #include -extern int trap_on_alignment_fault; -extern uint64_t wake_abstime; +extern int trap_on_alignment_fault; +extern uint64_t wake_abstime; +extern int lck_mtx_adaptive_spin_mode; static SYSCTL_INT(_machdep, OID_AUTO, alignmenttrap, - CTLFLAG_RW, &trap_on_alignment_fault, 0, - "trap on alignment faults (number of alignment faults per trap)"); + CTLFLAG_RW, &trap_on_alignment_fault, 0, + "trap on alignment faults (number of alignment faults per trap)"); static SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime, - CTLFLAG_RD | CTLFLAG_KERN, &wake_abstime, - "Absolute Time at the last wakeup"); + CTLFLAG_RD | CTLFLAG_KERN, &wake_abstime, + "Absolute Time at the last wakeup"); static int sysctl_time_since_reset SYSCTL_HANDLER_ARGS @@ -40,9 +41,9 @@ sysctl_time_since_reset SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_time_since_reset, "I", - "Continuous time since last SOC boot/wake started"); + CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, + 0, 0, sysctl_time_since_reset, "I", + "Continuous time since last SOC boot/wake started"); static int sysctl_wake_conttime SYSCTL_HANDLER_ARGS @@ -59,17 +60,17 @@ sysctl_wake_conttime SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_wake_conttime, "I", - "Continuous Time at the last wakeup"); + CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, + 0, 0, sysctl_wake_conttime, "I", + "Continuous Time at the last wakeup"); /* * For source compatibility, here's some machdep.cpu mibs that * use host_info() to simulate reasonable answers. */ -SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "CPU info"); +SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "CPU info"); static int arm_host_info SYSCTL_HANDLER_ARGS @@ -78,18 +79,20 @@ arm_host_info SYSCTL_HANDLER_ARGS host_basic_info_data_t hinfo; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; -#define BSD_HOST 1 +#define BSD_HOST 1 kern_return_t kret = host_info((host_t)BSD_HOST, - HOST_BASIC_INFO, (host_info_t)&hinfo, &count); - if (KERN_SUCCESS != kret) - return (EINVAL); + HOST_BASIC_INFO, (host_info_t)&hinfo, &count); + if (KERN_SUCCESS != kret) { + return EINVAL; + } - if (sizeof (uint32_t) != arg2) + if (sizeof(uint32_t) != arg2) { panic("size mismatch"); + } - uintptr_t woffset = (uintptr_t)arg1 / sizeof (uint32_t); + uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t); uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset); - return (SYSCTL_OUT(req, &datum, sizeof (datum))); + return SYSCTL_OUT(req, &datum, sizeof(datum)); } /* @@ -100,10 +103,10 @@ arm_host_info SYSCTL_HANDLER_ARGS */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, physical_cpu_max), - sizeof (integer_t), - arm_host_info, "I", "CPU cores per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, physical_cpu_max), + sizeof(integer_t), + arm_host_info, "I", "CPU cores per package"); /* * machdep.cpu.core_count @@ -113,10 +116,10 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package, */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, physical_cpu), - sizeof (integer_t), - arm_host_info, "I", "Number of enabled cores per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, physical_cpu), + sizeof(integer_t), + arm_host_info, "I", "Number of enabled cores per package"); /* * machdep.cpu.logical_per_package @@ -127,10 +130,10 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count, */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, logical_cpu_max), - sizeof (integer_t), - arm_host_info, "I", "CPU logical cpus per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, logical_cpu_max), + sizeof(integer_t), + arm_host_info, "I", "CPU logical cpus per package"); /* * machdep.cpu.thread_count @@ -140,10 +143,10 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package, */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, logical_cpu), - sizeof (integer_t), - arm_host_info, "I", "Number of enabled threads per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, logical_cpu), + sizeof(integer_t), + arm_host_info, "I", "Number of enabled threads per package"); /* * machdep.cpu.brand_string @@ -174,10 +177,15 @@ make_brand_string SYSCTL_HANDLER_ARGS break; } char buf[80]; - snprintf(buf, sizeof (buf), "%s processor", impl); - return (SYSCTL_OUT(req, buf, strlen(buf) + 1)); + snprintf(buf, sizeof(buf), "%s processor", impl); + return SYSCTL_OUT(req, buf, strlen(buf) + 1); } SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, - CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, make_brand_string, "A", "CPU brand string"); + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, make_brand_string, "A", "CPU brand string"); + +static +SYSCTL_INT(_machdep, OID_AUTO, lck_mtx_adaptive_spin_mode, + CTLFLAG_RW, &lck_mtx_adaptive_spin_mode, 0, + "Enable adaptive spin behavior for kernel mutexes"); diff --git a/bsd/dev/arm/systemcalls.c b/bsd/dev/arm/systemcalls.c index 2fa6a6580..5ac5fcde2 100644 --- a/bsd/dev/arm/systemcalls.c +++ b/bsd/dev/arm/systemcalls.c @@ -182,6 +182,7 @@ unix_syscall( arm_prepare_syscall_return(callp, state, uthread, error); uthread->uu_flag &= ~UT_NOTCANCELPT; + uthread->syscall_code = 0; if (uthread->uu_lowpri_window) { /* @@ -251,6 +252,7 @@ unix_syscall_return(int error) arm_prepare_syscall_return(callp, regs, uthread, error); uthread->uu_flag &= ~UT_NOTCANCELPT; + uthread->syscall_code = 0; if (uthread->uu_lowpri_window) { /* @@ -463,20 +465,18 @@ arm_get_syscall_args(uthread_t uthread, struct arm_saved_state *state, struct sy static int arm_get_u64_syscall_args(uthread_t uthread, arm_saved_state64_t *regs, struct sysent *callp) { - int indirect_offset, regparams; + int indirect_offset; #if CONFIG_REQUIRES_U32_MUNGING sy_munge_t *mungerp; #endif indirect_offset = (regs->x[ARM64_SYSCALL_CODE_REG_NUM] == 0) ? 1 : 0; - regparams = 9 - indirect_offset; /* * Everything should fit in registers for now. */ - assert(callp->sy_narg <= 8); - if (callp->sy_narg > regparams) { + if (callp->sy_narg > (int)(sizeof(uthread->uu_arg) / sizeof(uthread->uu_arg[0]))) { return -1; } diff --git a/bsd/dev/arm/table_inline.h b/bsd/dev/arm/table_inline.h index 8f358e423..f5996137d 100644 --- a/bsd/dev/arm/table_inline.h +++ b/bsd/dev/arm/table_inline.h @@ -11,7 +11,7 @@ * 2 April 1992 ? at NeXT * Created. */ - + #include #include @@ -20,17 +20,17 @@ static inline gdt_entry_t * sel_to_gdt_entry(sel_t sel) { - return (&gdt[sel.index]); + return &gdt[sel.index]; } static inline idt_entry_t * sel_to_idt_entry(sel_t sel) { - return (&idt[sel.index]); + return &idt[sel.index]; } static inline ldt_entry_t * sel_to_ldt_entry(ldt_t *tbl, sel_t sel) { - return (&tbl[sel.index]); + return &tbl[sel.index]; } diff --git a/bsd/dev/arm/unix_signal.c b/bsd/dev/arm/unix_signal.c index 0bc010816..12d7b69f7 100644 --- a/bsd/dev/arm/unix_signal.c +++ b/bsd/dev/arm/unix_signal.c @@ -29,30 +29,30 @@ extern struct arm_saved_state *get_user_regs(thread_t); extern user_addr_t thread_get_cthread_self(void); extern kern_return_t thread_getstatus(thread_t act, int flavor, - thread_state_t tstate, mach_msg_type_number_t *count); + thread_state_t tstate, mach_msg_type_number_t *count); extern kern_return_t thread_getstatus_to_user(thread_t act, int flavor, - thread_state_t tstate, mach_msg_type_number_t *count); + thread_state_t tstate, mach_msg_type_number_t *count); extern kern_return_t machine_thread_state_convert_to_user(thread_t act, int flavor, - thread_state_t tstate, mach_msg_type_number_t *count); + thread_state_t tstate, mach_msg_type_number_t *count); extern kern_return_t thread_setstatus(thread_t thread, int flavor, - thread_state_t tstate, mach_msg_type_number_t count); + thread_state_t tstate, mach_msg_type_number_t count); extern kern_return_t thread_setstatus_from_user(thread_t thread, int flavor, - thread_state_t tstate, mach_msg_type_number_t count); + thread_state_t tstate, mach_msg_type_number_t count); /* XXX Put these someplace smarter... */ -typedef struct mcontext32 mcontext32_t; +typedef struct mcontext32 mcontext32_t; typedef struct mcontext64 mcontext64_t; /* Signal handler flavors supported */ /* These defns should match the Libc implmn */ -#define UC_TRAD 1 -#define UC_FLAVOR 30 +#define UC_TRAD 1 +#define UC_FLAVOR 30 /* The following are valid mcontext sizes */ #define UC_FLAVOR_SIZE32 ((ARM_THREAD_STATE_COUNT + ARM_EXCEPTION_STATE_COUNT + ARM_VFP_STATE_COUNT) * sizeof(int)) #define UC_FLAVOR_SIZE64 ((ARM_THREAD_STATE64_COUNT + ARM_EXCEPTION_STATE64_COUNT + ARM_NEON_STATE64_COUNT) * sizeof(int)) #if __arm64__ -#define C_64_REDZONE_LEN 128 +#define C_64_REDZONE_LEN 128 #endif static int @@ -65,24 +65,28 @@ sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp) tstate = (void *) ts; state_count = ARM_THREAD_STATE_COUNT; - if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (thread_getstatus(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } mcp->ss = *ts; tstate = (void *) &mcp->ss; state_count = ARM_THREAD_STATE_COUNT; - if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } tstate = (void *) &mcp->es; state_count = ARM_EXCEPTION_STATE_COUNT; - if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (thread_getstatus(th_act, ARM_EXCEPTION_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } tstate = (void *) &mcp->fs; state_count = ARM_VFP_STATE_COUNT; - if (thread_getstatus_to_user(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (thread_getstatus_to_user(th_act, ARM_VFP_STATE, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } return 0; } @@ -90,9 +94,9 @@ sendsig_get_state32(thread_t th_act, arm_thread_state_t *ts, mcontext32_t *mcp) #if defined(__arm64__) struct user_sigframe64 { /* We can pass the last two args in registers for ARM64 */ - user64_siginfo_t sinfo; - struct user_ucontext64 uctx; - mcontext64_t mctx; + user64_siginfo_t sinfo; + struct user_ucontext64 uctx; + mcontext64_t mctx; }; static int @@ -105,24 +109,28 @@ sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp tstate = (void *) ts; state_count = ARM_THREAD_STATE64_COUNT; - if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } mcp->ss = *ts; tstate = (void *) &mcp->ss; state_count = ARM_THREAD_STATE64_COUNT; - if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } tstate = (void *) &mcp->es; state_count = ARM_EXCEPTION_STATE64_COUNT; - if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } tstate = (void *) &mcp->ns; state_count = ARM_NEON_STATE64_COUNT; - if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) + if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) { return EINVAL; + } return 0; } @@ -133,19 +141,20 @@ sendsig_fill_uctx64(user_ucontext64_t *uctx, int oonstack, int mask, user64_addr bzero(uctx, sizeof(*uctx)); uctx->uc_onstack = oonstack; uctx->uc_sigmask = mask; - uctx->uc_stack.ss_sp = sp; + uctx->uc_stack.ss_sp = sp; uctx->uc_stack.ss_size = stack_size; - if (oonstack) + if (oonstack) { uctx->uc_stack.ss_flags |= SS_ONSTACK; + } uctx->uc_link = (user64_addr_t)0; - uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64; + uctx->uc_mcsize = (user64_size_t) UC_FLAVOR_SIZE64; uctx->uc_mcontext64 = (user64_addr_t) p_mctx; } static kern_return_t -sendsig_set_thread_state64(arm_thread_state64_t *regs, - user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo, - user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act) +sendsig_set_thread_state64(arm_thread_state64_t *regs, + user64_addr_t catcher, int infostyle, int sig, user64_addr_t p_sinfo, + user64_addr_t p_uctx, user64_addr_t token, user64_addr_t trampact, user64_addr_t sp, thread_t th_act) { assert(proc_is64bit_data(current_proc())); @@ -169,21 +178,21 @@ sendsig_fill_uctx32(user_ucontext32_t *uctx, int oonstack, int mask, user_addr_t bzero(uctx, sizeof(*uctx)); uctx->uc_onstack = oonstack; uctx->uc_sigmask = mask; - uctx->uc_stack.ss_sp = (user32_addr_t) sp; + uctx->uc_stack.ss_sp = (user32_addr_t) sp; uctx->uc_stack.ss_size = (user32_size_t) stack_size; - if (oonstack) + if (oonstack) { uctx->uc_stack.ss_flags |= SS_ONSTACK; + } uctx->uc_link = (user32_addr_t)0; - uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32; + uctx->uc_mcsize = (user32_size_t) UC_FLAVOR_SIZE32; uctx->uc_mcontext = (user32_addr_t) p_mctx; } static kern_return_t -sendsig_set_thread_state32(arm_thread_state_t *regs, - user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo, - user32_addr_t trampact, user32_addr_t sp, thread_t th_act) +sendsig_set_thread_state32(arm_thread_state_t *regs, + user32_addr_t catcher, int infostyle, int sig, user32_addr_t p_sinfo, + user32_addr_t trampact, user32_addr_t sp, thread_t th_act) { - assert(!proc_is64bit_data(current_proc())); regs->r[0] = catcher; @@ -212,14 +221,14 @@ sendsig_set_thread_state32(arm_thread_state_t *regs, static void sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catcher) { - bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); ut->t_dtrace_siginfo.si_signo = sinfo->si_signo; ut->t_dtrace_siginfo.si_code = sinfo->si_code; ut->t_dtrace_siginfo.si_pid = sinfo->si_pid; ut->t_dtrace_siginfo.si_uid = sinfo->si_uid; ut->t_dtrace_siginfo.si_status = sinfo->si_status; - /* XXX truncates faulting address to void * */ + /* XXX truncates faulting address to void * */ ut->t_dtrace_siginfo.si_addr = CAST_DOWN_EXPLICIT(void *, sinfo->si_addr); /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ @@ -230,19 +239,19 @@ sendsig_do_dtrace(uthread_t ut, user_siginfo_t *sinfo, int sig, user_addr_t catc default: break; } - + /* XXX truncates faulting address to uintptr_t */ DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), void (*)(void), CAST_DOWN(sig_t, catcher)); } -#endif - +#endif + struct user_sigframe32 { - user32_addr_t puctx; - user32_addr_t token; - user32_siginfo_t sinfo; - struct user_ucontext32 uctx; - mcontext32_t mctx; + user32_addr_t puctx; + user32_addr_t token; + user32_siginfo_t sinfo; + struct user_ucontext32 uctx; + mcontext32_t mctx; }; /* @@ -256,7 +265,7 @@ sendsig( int sig, int mask, __unused uint32_t code -) + ) { union { struct ts32 { @@ -268,7 +277,7 @@ sendsig( } ts64; #endif } ts; - union { + union { struct user_sigframe32 uf32; #if defined(__arm64__) struct user_sigframe64 uf64; @@ -276,12 +285,12 @@ sendsig( } user_frame; user_siginfo_t sinfo; - user_addr_t sp = 0, trampact; + user_addr_t sp = 0, trampact; struct sigacts *ps = p->p_sigacts; int oonstack, infostyle; thread_t th_act; struct uthread *ut; - user_size_t stack_size = 0; + user_size_t stack_size = 0; user_addr_t p_uctx, token_uctx; kern_return_t kr; @@ -291,10 +300,11 @@ sendsig( bzero(&ts, sizeof(ts)); bzero(&user_frame, sizeof(user_frame)); - if (p->p_sigacts->ps_siginfo & sigmask(sig)) + if (p->p_sigacts->ps_siginfo & sigmask(sig)) { infostyle = UC_FLAVOR; - else + } else { infostyle = UC_TRAD; + } trampact = ps->ps_trampact[sig]; oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; @@ -308,7 +318,7 @@ sendsig( goto bad2; } #else - panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); + panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) { @@ -354,19 +364,19 @@ sendsig( if (proc_is64bit_data(p)) { #if defined(__arm64__) sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size, - (user64_addr_t)&((struct user_sigframe64*)sp)->mctx); + (user64_addr_t)&((struct user_sigframe64*)sp)->mctx); #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { - sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size, - (user32_addr_t)&((struct user_sigframe32*)sp)->mctx); + sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size, + (user32_addr_t)&((struct user_sigframe32*)sp)->mctx); } /* * Setup siginfo. */ - bzero((caddr_t) & sinfo, sizeof(sinfo)); + bzero((caddr_t) &sinfo, sizeof(sinfo)); sinfo.si_signo = sig; if (proc_is64bit_data(p)) { @@ -383,15 +393,16 @@ sendsig( switch (sig) { case SIGILL: -#ifdef BER_XXX - if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) +#ifdef BER_XXX + if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) { sinfo.si_code = ILL_ILLOPC; - else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) + } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) { sinfo.si_code = ILL_PRVOPC; - else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) + } else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) { sinfo.si_code = ILL_ILLTRP; - else + } else { sinfo.si_code = ILL_NOOP; + } #else sinfo.si_code = ILL_ILLTRP; #endif @@ -425,14 +436,15 @@ sendsig( sinfo.si_addr = user_frame.uf32.mctx.es.far; } -#ifdef BER_XXX +#ifdef BER_XXX /* First check in srr1 and then in dsisr */ - if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) + if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) { sinfo.si_code = SEGV_ACCERR; - else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) + } else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) { sinfo.si_code = SEGV_ACCERR; - else + } else { sinfo.si_code = SEGV_MAPERR; + } #else sinfo.si_code = SEGV_ACCERR; #endif @@ -462,15 +474,15 @@ sendsig( p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { - if (WIFEXITED(status_and_exitcode)) + if (WIFEXITED(status_and_exitcode)) { sinfo.si_code = CLD_EXITED; - else if (WIFSIGNALED(status_and_exitcode)) { + } else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; - status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); + status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; - status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); + status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } } } @@ -486,11 +498,11 @@ sendsig( } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE sendsig_do_dtrace(ut, &sinfo, sig, catcher); #endif /* CONFIG_DTRACE */ - /* + /* * Copy signal-handling frame out to user space, set thread state. */ if (proc_is64bit_data(p)) { @@ -498,7 +510,7 @@ sendsig( user64_addr_t token; /* - * mctx filled in when we get state. uctx filled in by + * mctx filled in when we get state. uctx filled in by * sendsig_fill_uctx64(). We fill in the sinfo now. */ siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo); @@ -513,22 +525,23 @@ sendsig( token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) { - goto bad; - } + goto bad; + } if (sendsig_set_thread_state64(&ts.ts64.ss, - catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, - (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) + catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, + (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) { goto bad; + } #else - panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); + panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { user32_addr_t token; /* - * mctx filled in when we get state. uctx filled in by + * mctx filled in when we get state. uctx filled in by * sendsig_fill_uctx32(). We fill in the sinfo, *pointer* * to uctx and token now. */ @@ -547,13 +560,14 @@ sendsig( user_frame.uf32.token = token; if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) { - goto bad; - } + goto bad; + } if (sendsig_set_thread_state32(&ts.ts32.ss, - CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo, - CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) + CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo, + CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) { goto bad; + } } proc_lock(p); @@ -591,7 +605,7 @@ sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_ad error = copyin(uctx_addr, uctx, sizeof(*uctx)); if (error) { - return (error); + return error; } /* validate the machine context size */ @@ -599,20 +613,20 @@ sigreturn_copyin_ctx32(struct user_ucontext32 *uctx, mcontext32_t *mctx, user_ad case UC_FLAVOR_SIZE32: break; default: - return (EINVAL); + return EINVAL; } assert(uctx->uc_mcsize == sizeof(*mctx)); error = copyin((user_addr_t)uctx->uc_mcontext, mctx, uctx->uc_mcsize); if (error) { - return (error); + return error; } return 0; } static int -sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx) +sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx) { assert(!proc_is64bit_data(current_proc())); @@ -626,10 +640,10 @@ sigreturn_set_state32(thread_t th_act, mcontext32_t *mctx) #endif if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE, (void *)&mctx->ss, ARM_THREAD_STATE_COUNT) != KERN_SUCCESS) { - return (EINVAL); + return EINVAL; } if (thread_setstatus_from_user(th_act, ARM_VFP_STATE, (void *)&mctx->fs, ARM_VFP_STATE_COUNT) != KERN_SUCCESS) { - return (EINVAL); + return EINVAL; } return 0; @@ -645,7 +659,7 @@ sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_ad error = copyin(uctx_addr, uctx, sizeof(*uctx)); if (error) { - return (error); + return error; } /* validate the machine context size */ @@ -653,20 +667,20 @@ sigreturn_copyin_ctx64(struct user_ucontext64 *uctx, mcontext64_t *mctx, user_ad case UC_FLAVOR_SIZE64: break; default: - return (EINVAL); + return EINVAL; } assert(uctx->uc_mcsize == sizeof(*mctx)); error = copyin((user_addr_t)uctx->uc_mcontext64, mctx, uctx->uc_mcsize); if (error) { - return (error); + return error; } return 0; } static int -sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx) +sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx) { assert(proc_is64bit_data(current_proc())); @@ -674,10 +688,10 @@ sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx) mctx->ss.cpsr = (mctx->ss.cpsr & ~PSR64_MODE_MASK) | PSR64_USER64_DEFAULT; if (thread_setstatus_from_user(th_act, ARM_THREAD_STATE64, (void *)&mctx->ss, ARM_THREAD_STATE64_COUNT) != KERN_SUCCESS) { - return (EINVAL); + return EINVAL; } if (thread_setstatus_from_user(th_act, ARM_NEON_STATE64, (void *)&mctx->ns, ARM_NEON_STATE64_COUNT) != KERN_SUCCESS) { - return (EINVAL); + return EINVAL; } return 0; @@ -687,9 +701,9 @@ sigreturn_set_state64(thread_t th_act, mcontext64_t *mctx) /* ARGSUSED */ int sigreturn( - struct proc * p, - struct sigreturn_args * uap, - __unused int *retval) + struct proc * p, + struct sigreturn_args * uap, + __unused int *retval) { union { user_ucontext32_t uc32; @@ -698,7 +712,7 @@ sigreturn( #endif } uctx; - union { + union { mcontext32_t mc32; #if defined(__arm64__) mcontext64_t mc64; @@ -738,17 +752,19 @@ sigreturn( sigmask = uctx.uc32.uc_sigmask; } - if ((onstack & 01)) + if ((onstack & 01)) { p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; - else + } else { p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; + } ut->uu_sigmask = sigmask & ~sigcantmask; - if (ut->uu_siglist & ~ut->uu_sigmask) + if (ut->uu_siglist & ~ut->uu_sigmask) { signal_setast(current_thread()); + } sigreturn_validation = atomic_load_explicit( - &ps->ps_sigreturn_validation, memory_order_relaxed); + &ps->ps_sigreturn_validation, memory_order_relaxed); token_uctx = uap->uctx; kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); assert(kr == KERN_SUCCESS); @@ -760,7 +776,7 @@ sigreturn( if ((user64_addr_t)uap->token != token) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n", - p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); + p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); #endif /* DEVELOPMENT || DEBUG */ if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { return EINVAL; @@ -769,8 +785,8 @@ sigreturn( error = sigreturn_set_state64(th_act, &mctx.mc64); if (error != 0) { #if DEVELOPMENT || DEBUG - printf("process %s[%d] sigreturn set_state64 error %d\n", - p->p_comm, p->p_pid, error); + printf("process %s[%d] sigreturn set_state64 error %d\n", + p->p_comm, p->p_pid, error); #endif /* DEVELOPMENT || DEBUG */ return error; } @@ -783,7 +799,7 @@ sigreturn( if ((user32_addr_t)uap->token != token) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n", - p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); + p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); #endif /* DEVELOPMENT || DEBUG */ if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { return EINVAL; @@ -792,14 +808,14 @@ sigreturn( error = sigreturn_set_state32(th_act, &mctx.mc32); if (error != 0) { #if DEVELOPMENT || DEBUG - printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n", - p->p_comm, p->p_pid, error); + printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n", + p->p_comm, p->p_pid, error); #endif /* DEVELOPMENT || DEBUG */ return error; } } - return (EJUSTRETURN); + return EJUSTRETURN; } /* @@ -808,17 +824,16 @@ sigreturn( */ int machine_exception(int exception, - __unused mach_exception_code_t code, - __unused mach_exception_subcode_t subcode) + __unused mach_exception_code_t code, + __unused mach_exception_subcode_t subcode) { switch (exception) { - case EXC_BAD_INSTRUCTION: - return SIGILL; + case EXC_BAD_INSTRUCTION: + return SIGILL; - case EXC_ARITHMETIC: - return SIGFPE; + case EXC_ARITHMETIC: + return SIGFPE; } return 0; } - diff --git a/bsd/dev/arm64/conf.c b/bsd/dev/arm64/conf.c index 761484db1..e40f4340a 100644 --- a/bsd/dev/arm64/conf.c +++ b/bsd/dev/arm64/conf.c @@ -37,34 +37,34 @@ struct bdevsw bdevsw[] = /* 0 - 7 are reserved to Apple */ - NO_BDEVICE, /* 0 */ - NO_BDEVICE, /* 1 */ - NO_BDEVICE, /* 2 */ - NO_BDEVICE, /* 3 */ - NO_BDEVICE, /* 4 */ - NO_BDEVICE, /* 5 */ - NO_BDEVICE, /* 6 */ - NO_BDEVICE, /* 7 */ + NO_BDEVICE, /* 0 */ + NO_BDEVICE, /* 1 */ + NO_BDEVICE, /* 2 */ + NO_BDEVICE, /* 3 */ + NO_BDEVICE, /* 4 */ + NO_BDEVICE, /* 5 */ + NO_BDEVICE, /* 6 */ + NO_BDEVICE, /* 7 */ /* 8 - 15 are reserved to the user */ - NO_BDEVICE, /* 8 */ - NO_BDEVICE, /* 9 */ - NO_BDEVICE, /* 10 */ - NO_BDEVICE, /* 11 */ - NO_BDEVICE, /* 12 */ - NO_BDEVICE, /* 13 */ - NO_BDEVICE, /* 14 */ - NO_BDEVICE, /* 15 */ + NO_BDEVICE, /* 8 */ + NO_BDEVICE, /* 9 */ + NO_BDEVICE, /* 10 */ + NO_BDEVICE, /* 11 */ + NO_BDEVICE, /* 12 */ + NO_BDEVICE, /* 13 */ + NO_BDEVICE, /* 14 */ + NO_BDEVICE, /* 15 */ /* 16 - 23 are reserved to Apple */ - NO_BDEVICE, /* 16 */ - NO_BDEVICE, /* 17 */ - NO_BDEVICE, /* 18 */ - NO_BDEVICE, /* 18 */ - NO_BDEVICE, /* 20 */ - NO_BDEVICE, /* 21 */ - NO_BDEVICE, /* 22 */ - NO_BDEVICE, /* 23 */ + NO_BDEVICE, /* 16 */ + NO_BDEVICE, /* 17 */ + NO_BDEVICE, /* 18 */ + NO_BDEVICE, /* 18 */ + NO_BDEVICE, /* 20 */ + NO_BDEVICE, /* 21 */ + NO_BDEVICE, /* 22 */ + NO_BDEVICE, /* 23 */ }; const int nblkdev = sizeof(bdevsw) / sizeof(bdevsw[0]); @@ -90,9 +90,9 @@ extern d_open_t volopen; extern d_close_t volclose; extern d_ioctl_t volioctl; #else -#define volopen eno_opcl -#define volclose eno_opcl -#define volioctl eno_ioctl +#define volopen eno_opcl +#define volclose eno_opcl +#define volioctl eno_ioctl #endif extern d_open_t cttyopen; @@ -104,8 +104,8 @@ extern d_select_t cttyselect; extern d_read_t mmread; extern d_write_t mmwrite; extern d_ioctl_t mmioctl; -#define mmselect (select_fcn_t *)seltrue -#define mmmmap eno_mmap +#define mmselect (select_fcn_t *)seltrue +#define mmmmap eno_mmap #include #if NPTY > 0 @@ -122,18 +122,18 @@ extern d_write_t ptcwrite; extern d_select_t ptcselect; extern d_ioctl_t ptyioctl; #else -#define ptsopen eno_opcl -#define ptsclose eno_opcl -#define ptsread eno_rdwrt -#define ptswrite eno_rdwrt -#define ptsstop nulldev +#define ptsopen eno_opcl +#define ptsclose eno_opcl +#define ptsread eno_rdwrt +#define ptswrite eno_rdwrt +#define ptsstop nulldev -#define ptcopen eno_opcl -#define ptcclose eno_opcl -#define ptcread eno_rdwrt -#define ptcwrite eno_rdwrt -#define ptcselect eno_select -#define ptyioctl eno_ioctl +#define ptcopen eno_opcl +#define ptcclose eno_opcl +#define ptcread eno_rdwrt +#define ptcwrite eno_rdwrt +#define ptcselect eno_select +#define ptyioctl eno_ioctl #endif extern d_open_t logopen; @@ -154,14 +154,14 @@ extern d_close_t oslogclose; extern d_ioctl_t oslogioctl; extern d_select_t oslogselect; -#define nullopen (d_open_t *)&nulldev -#define nullclose (d_close_t *)&nulldev -#define nullread (d_read_t *)&nulldev -#define nullwrite (d_write_t *)&nulldev -#define nullioctl (d_ioctl_t *)&nulldev -#define nullselect (d_select_t *)&nulldev -#define nullstop (d_stop_t *)&nulldev -#define nullreset (d_reset_t *)&nulldev +#define nullopen (d_open_t *)&nulldev +#define nullclose (d_close_t *)&nulldev +#define nullread (d_read_t *)&nulldev +#define nullwrite (d_write_t *)&nulldev +#define nullioctl (d_ioctl_t *)&nulldev +#define nullselect (d_select_t *)&nulldev +#define nullstop (d_stop_t *)&nulldev +#define nullreset (d_reset_t *)&nulldev struct cdevsw cdevsw[] = { /* @@ -226,7 +226,7 @@ const int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]); uint64_t cdevsw_flags[sizeof(cdevsw) / sizeof(cdevsw[0])]; -#include /* for VCHR and VBLK */ +#include /* for VCHR and VBLK */ /* * return true if a disk */ @@ -241,41 +241,41 @@ isdisk(dev_t dev, int type) if (maj == NODEV) { break; } - /* FALL THROUGH */ + /* FALL THROUGH */ case VBLK: if (bdevsw[maj].d_type == D_DISK) { - return (1); + return 1; } break; } - return (0); + return 0; } static int chrtoblktab[] = { /* CHR *//* BLK *//* CHR *//* BLK */ - /* 0 */ NODEV, /* 1 */ NODEV, - /* 2 */ NODEV, /* 3 */ NODEV, - /* 4 */ NODEV, /* 5 */ NODEV, - /* 6 */ NODEV, /* 7 */ NODEV, - /* 8 */ NODEV, /* 9 */ NODEV, - /* 10 */ NODEV, /* 11 */ NODEV, - /* 12 */ NODEV, /* 13 */ NODEV, - /* 14 */ NODEV, /* 15 */ NODEV, - /* 16 */ NODEV, /* 17 */ NODEV, - /* 18 */ NODEV, /* 19 */ NODEV, - /* 20 */ NODEV, /* 21 */ NODEV, - /* 22 */ NODEV, /* 23 */ NODEV, - /* 24 */ NODEV, /* 25 */ NODEV, - /* 26 */ NODEV, /* 27 */ NODEV, - /* 28 */ NODEV, /* 29 */ NODEV, - /* 30 */ NODEV, /* 31 */ NODEV, - /* 32 */ NODEV, /* 33 */ NODEV, - /* 34 */ NODEV, /* 35 */ NODEV, - /* 36 */ NODEV, /* 37 */ NODEV, - /* 38 */ NODEV, /* 39 */ NODEV, - /* 40 */ NODEV, /* 41 */ NODEV, - /* 42 */ NODEV, /* 43 */ NODEV, - /* 44 */ NODEV, + /* 0 */ NODEV, /* 1 */ NODEV, + /* 2 */ NODEV, /* 3 */ NODEV, + /* 4 */ NODEV, /* 5 */ NODEV, + /* 6 */ NODEV, /* 7 */ NODEV, + /* 8 */ NODEV, /* 9 */ NODEV, + /* 10 */ NODEV, /* 11 */ NODEV, + /* 12 */ NODEV, /* 13 */ NODEV, + /* 14 */ NODEV, /* 15 */ NODEV, + /* 16 */ NODEV, /* 17 */ NODEV, + /* 18 */ NODEV, /* 19 */ NODEV, + /* 20 */ NODEV, /* 21 */ NODEV, + /* 22 */ NODEV, /* 23 */ NODEV, + /* 24 */ NODEV, /* 25 */ NODEV, + /* 26 */ NODEV, /* 27 */ NODEV, + /* 28 */ NODEV, /* 29 */ NODEV, + /* 30 */ NODEV, /* 31 */ NODEV, + /* 32 */ NODEV, /* 33 */ NODEV, + /* 34 */ NODEV, /* 35 */ NODEV, + /* 36 */ NODEV, /* 37 */ NODEV, + /* 38 */ NODEV, /* 39 */ NODEV, + /* 40 */ NODEV, /* 41 */ NODEV, + /* 42 */ NODEV, /* 43 */ NODEV, + /* 44 */ NODEV, }; /* @@ -286,21 +286,25 @@ chrtoblk(dev_t dev) { int blkmaj; - if (major(dev) >= nchrdev) - return (NODEV); + if (major(dev) >= nchrdev) { + return NODEV; + } blkmaj = chrtoblktab[major(dev)]; - if (blkmaj == NODEV) - return (NODEV); - return (makedev(blkmaj, minor(dev))); + if (blkmaj == NODEV) { + return NODEV; + } + return makedev(blkmaj, minor(dev)); } int chrtoblk_set(int cdev, int bdev) { - if (cdev >= nchrdev) - return (-1); - if (bdev != NODEV && bdev >= nblkdev) - return (-1); + if (cdev >= nchrdev) { + return -1; + } + if (bdev != NODEV && bdev >= nblkdev) { + return -1; + } chrtoblktab[cdev] = bdev; return 0; } diff --git a/bsd/dev/arm64/disassembler.c b/bsd/dev/arm64/disassembler.c index 7195d0d72..48bf43cb8 100644 --- a/bsd/dev/arm64/disassembler.c +++ b/bsd/dev/arm64/disassembler.c @@ -12,9 +12,10 @@ #include -#define BITS(x,n,mask) (((x) >> (n)) & (mask)) +#define BITS(x, n, mask) (((x) >> (n)) & (mask)) -static uint32_t thumb32_instword_to_arm(uint16_t hw1, uint16_t hw2) +static uint32_t +thumb32_instword_to_arm(uint16_t hw1, uint16_t hw2) { return (hw1 << 16) | hw2; } @@ -28,46 +29,55 @@ int dtrace_decode_thumb(uint32_t instr); */ static -int vfp_struct_loadstore(uint32_t instr) +int +vfp_struct_loadstore(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int vfp_64transfer(uint32_t instr) +int +vfp_64transfer(uint32_t instr) { /* These instructions all use RD and RN */ - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int vfp_transfer(uint32_t instr) +int +vfp_transfer(uint32_t instr) { /* These instructions all use RD only */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int vfp_loadstore(uint32_t instr) +int +vfp_loadstore(uint32_t instr) { - int opcode = BITS(instr,20,0x1F); + int opcode = BITS(instr, 20, 0x1F); /* Instrument VLDR */ - if ((opcode & 0x13) == 0x11 && ARM_RN(instr) == REG_PC) + if ((opcode & 0x13) == 0x11 && ARM_RN(instr) == REG_PC) { return FASTTRAP_T_VLDR_PC_IMMED; - + } + /* These instructions all use RN only */ - if (ARM_RN(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } @@ -77,9 +87,10 @@ int vfp_loadstore(uint32_t instr) */ static -int arm_unconditional_misc(uint32_t instr) +int +arm_unconditional_misc(uint32_t instr) { - int op = BITS(instr,20,0x7F); + int op = BITS(instr, 20, 0x7F); if ((op & 0x60) == 0x20) { /* VFP data processing uses its own registers */ @@ -94,10 +105,12 @@ int arm_unconditional_misc(uint32_t instr) } static -int arm_unconditional(uint32_t instr) +int +arm_unconditional(uint32_t instr) { - if (BITS(instr,27,0x1) == 0) + if (BITS(instr, 27, 0x1) == 0) { return arm_unconditional_misc(instr); + } /* The rest are privileged or BL/BLX, do not instrument */ @@ -107,11 +120,12 @@ int arm_unconditional(uint32_t instr) } static -int arm_syscall_coproc(uint32_t instr) +int +arm_syscall_coproc(uint32_t instr) { /* Instrument any VFP data processing instructions, ignore the rest */ - int op1 = BITS(instr,20,0x3F), coproc = BITS(instr,8,0xF), op = BITS(instr,4,0x1); + int op1 = BITS(instr, 20, 0x3F), coproc = BITS(instr, 8, 0xF), op = BITS(instr, 4, 0x1); if ((op1 & 0x3E) == 0 || (op1 & 0x30) == 0x30) { /* Undefined or swi */ @@ -121,11 +135,13 @@ int arm_syscall_coproc(uint32_t instr) if ((coproc & 0xE) == 0xA) { /* VFP instruction */ - if ((op1 & 0x20) == 0 && (op1 & 0x3A) != 0) + if ((op1 & 0x20) == 0 && (op1 & 0x3A) != 0) { return vfp_loadstore(instr); + } - if ((op1 & 0x3E) == 0x04) + if ((op1 & 0x3E) == 0x04) { return vfp_64transfer(instr); + } if ((op1 & 0x30) == 0x20) { /* VFP data processing or 8, 16, or 32 bit move between ARM reg and VFP reg */ @@ -142,98 +158,118 @@ int arm_syscall_coproc(uint32_t instr) } static -int arm_branch_link_blockdata(uint32_t instr) +int +arm_branch_link_blockdata(uint32_t instr) { - int branch = BITS(instr,25,0x1), link = BITS(instr,24,0x1), op = BITS(instr,20,0x1F), uses_pc = BITS(instr,15,0x1), uses_lr = BITS(instr,14,0x1); + int branch = BITS(instr, 25, 0x1), link = BITS(instr, 24, 0x1), op = BITS(instr, 20, 0x1F), uses_pc = BITS(instr, 15, 0x1), uses_lr = BITS(instr, 14, 0x1); if (branch == 1) { - if (link == 0) + if (link == 0) { return FASTTRAP_T_B_COND; + } return FASTTRAP_T_INV; } else { /* Only emulate a use of the pc if it's a return from function: ldmia sp!, { ... pc } */ - if (op == 0x0B && ARM_RN(instr) == REG_SP && uses_pc == 1) + if (op == 0x0B && ARM_RN(instr) == REG_SP && uses_pc == 1) { return FASTTRAP_T_LDM_PC; + } /* stmia sp!, { ... lr } doesn't touch the pc, but it is very common, so special case it */ - if (op == 0x12 && ARM_RN(instr) == REG_SP && uses_lr == 1) + if (op == 0x12 && ARM_RN(instr) == REG_SP && uses_lr == 1) { return FASTTRAP_T_STM_LR; + } - if (ARM_RN(instr) != REG_PC && uses_pc == 0) + if (ARM_RN(instr) != REG_PC && uses_pc == 0) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int arm_signed_multiplies(uint32_t instr) +int +arm_signed_multiplies(uint32_t instr) { - int op1 = BITS(instr,20,0x7), op2 = BITS(instr,5,0x7); + int op1 = BITS(instr, 20, 0x7), op2 = BITS(instr, 5, 0x7); /* smlald, smlsld, smmls use RD in addition to RM, RS, and RN */ if ((op1 == 0x4 && (op2 & 0x4) == 0) || (op1 == 0x5 && (op2 & 0x6) == 0x6)) { - if (ARM_RD(instr) == REG_PC) + if (ARM_RD(instr) == REG_PC) { return FASTTRAP_T_INV; + } } - if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_pack_unpack_sat_reversal(uint32_t instr) +int +arm_pack_unpack_sat_reversal(uint32_t instr) { - int op1 = BITS(instr,20,0x7), op2 = BITS(instr,5,0x7); + int op1 = BITS(instr, 20, 0x7), op2 = BITS(instr, 5, 0x7); /* pkh, sel use RN in addition to RD and RM */ if ((op1 == 0 && (op2 & 0x1) == 0) || (op1 == 0 && op2 == 0x5)) { - if (ARM_RN(instr) == REG_PC) + if (ARM_RN(instr) == REG_PC) { return FASTTRAP_T_INV; + } } - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_parallel_addsub_unsigned(uint32_t instr) +int +arm_parallel_addsub_unsigned(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_parallel_addsub_signed(uint32_t instr) +int +arm_parallel_addsub_signed(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_media(uint32_t instr) +int +arm_media(uint32_t instr) { - int op1 = BITS(instr,20,0x1F), op2 = BITS(instr,5,0x7); + int op1 = BITS(instr, 20, 0x1F), op2 = BITS(instr, 5, 0x7); - if ((op1 & 0x1C) == 0) + if ((op1 & 0x1C) == 0) { return arm_parallel_addsub_signed(instr); + } - if ((op1 & 0x1C) == 0x04) + if ((op1 & 0x1C) == 0x04) { return arm_parallel_addsub_unsigned(instr); + } - if ((op1 & 0x18) == 0x08) + if ((op1 & 0x18) == 0x08) { return arm_pack_unpack_sat_reversal(instr); + } - if ((op1 & 0x18) == 0x10) + if ((op1 & 0x18) == 0x10) { return arm_signed_multiplies(instr); + } if (op1 == 0x1F && op2 == 0x7) { /* Undefined instruction */ @@ -245,24 +281,27 @@ int arm_media(uint32_t instr) /* The registers are named differently in the reference manual for this instruction * but the following positions are correct */ - if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } if ((op1 & 0x1E) == 0x1C && (op2 & 0x3) == 0) { /* bfc bfi */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } if (((op1 & 0x1E) == 0x1A || (op1 & 0x1E) == 0x1E) && ((op2 & 0x3) == 0x2)) { /* sbfx ubfx */ - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } @@ -271,57 +310,67 @@ int arm_media(uint32_t instr) } static -int arm_loadstore_wordbyte(uint32_t instr) +int +arm_loadstore_wordbyte(uint32_t instr) { /* Instrument PC relative load with immediate, ignore any other uses of the PC */ - int R = BITS(instr,25,0x1), L = BITS(instr,20,0x1); + int R = BITS(instr, 25, 0x1), L = BITS(instr, 20, 0x1); if (R == 1) { /* Three register load/store */ - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Immediate load/store, but still do not support ldr pc, [pc...] */ - if (L == 1 && ARM_RN(instr) == REG_PC && ARM_RD(instr) != REG_PC) + if (L == 1 && ARM_RN(instr) == REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_LDR_PC_IMMED; + } - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int arm_saturating(uint32_t instr) +int +arm_saturating(uint32_t instr) { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_misc(uint32_t instr) +int +arm_misc(uint32_t instr) { - int op = BITS(instr,21,0x3), __unused op1 = BITS(instr,16,0xF), op2 = BITS(instr,4,0x7); + int op = BITS(instr, 21, 0x3), __unused op1 = BITS(instr, 16, 0xF), op2 = BITS(instr, 4, 0x7); - if (op2 == 1 && op == 1) + if (op2 == 1 && op == 1) { return FASTTRAP_T_BX_REG; + } /* We do not need to emulate BLX for entry/return probes; if we eventually support full offset * tracing, then we will. This is because BLX overwrites the link register, so a function that * can execute this as its first instruction is a special function indeed. */ - if (op2 == 0x5) + if (op2 == 0x5) { return arm_saturating(instr); + } return FASTTRAP_T_INV; } static -int arm_msr_hints(__unused uint32_t instr) +int +arm_msr_hints(__unused uint32_t instr) { /* These deal with the psr, not instrumented */ @@ -329,7 +378,8 @@ int arm_msr_hints(__unused uint32_t instr) } static -int arm_sync_primitive(__unused uint32_t instr) +int +arm_sync_primitive(__unused uint32_t instr) { /* TODO will instrumenting these interfere with any kernel usage of these instructions? */ /* Don't instrument for now */ @@ -338,9 +388,10 @@ int arm_sync_primitive(__unused uint32_t instr) } static -int arm_extra_loadstore_unpriv(uint32_t instr) +int +arm_extra_loadstore_unpriv(uint32_t instr) { - int op = BITS(instr,20,0x1), __unused op2 = BITS(instr,5,0x3), immed = BITS(instr,22,0x1); + int op = BITS(instr, 20, 0x1), __unused op2 = BITS(instr, 5, 0x3), immed = BITS(instr, 22, 0x1); if (op == 0 && (op2 & 0x2) == 0x2) { /* Unpredictable or undefined */ @@ -348,91 +399,106 @@ int arm_extra_loadstore_unpriv(uint32_t instr) } if (immed == 1) { - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } - + return FASTTRAP_T_INV; } static -int arm_extra_loadstore(uint32_t instr) +int +arm_extra_loadstore(uint32_t instr) { - int op1 = BITS(instr,20,0x1F); + int op1 = BITS(instr, 20, 0x1F); /* There are two variants, and we do not instrument either of them that use the PC */ if ((op1 & 0x4) == 0) { /* Variant 1, register */ - if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RM(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Variant 2, immediate */ - if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC && ARM_RN(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int arm_halfword_multiply(uint32_t instr) +int +arm_halfword_multiply(uint32_t instr) { /* Not all multiply instructions use all four registers. The ones that don't should have those * register locations set to 0, so we can test them anyway. */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_multiply(uint32_t instr) +int +arm_multiply(uint32_t instr) { /* Not all multiply instructions use all four registers. The ones that don't should have those * register locations set to 0, so we can test them anyway. */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_immed(uint32_t instr) +int +arm_dataproc_immed(uint32_t instr) { /* All these instructions are either two registers, or one register and have 0 where the other reg would be used */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_regshift(uint32_t instr) +int +arm_dataproc_regshift(uint32_t instr) { /* All these instructions are either four registers, or three registers and have 0 where there last reg would be used */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RS(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_reg(uint32_t instr) +int +arm_dataproc_reg(uint32_t instr) { - int op1 = BITS(instr,20,0x1F), op2 = BITS(instr,7,0x1F), op3 = BITS(instr,5,0x3); + int op1 = BITS(instr, 20, 0x1F), op2 = BITS(instr, 7, 0x1F), op3 = BITS(instr, 5, 0x3); if (op1 == 0x11 || op1 == 0x13 || op1 == 0x15 || op1 == 0x17) { /* These are comparison flag setting instructions and do not have RD */ - if (ARM_RN(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } @@ -441,96 +507,119 @@ int arm_dataproc_reg(uint32_t instr) * movs pc, reg is a privileged instruction so we don't instrument that variant. The s bit * is bit 0 of op1 and should be zero. */ - if (op1 == 0x1A && op2 == 0 && op3 == 0 && ARM_RD(instr) == REG_PC) + if (op1 == 0x1A && op2 == 0 && op3 == 0 && ARM_RD(instr) == REG_PC) { return FASTTRAP_T_MOV_PC_REG; + } /* Any instruction at this point is a three register instruction or two register instruction with RN = 0 */ - if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RM(instr) != REG_PC) + if (ARM_RN(instr) != REG_PC && ARM_RD(instr) != REG_PC && ARM_RM(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int arm_dataproc_misc(uint32_t instr) +int +arm_dataproc_misc(uint32_t instr) { - int op = BITS(instr,25,0x1), op1 = BITS(instr,20,0x1F), op2 = BITS(instr,4,0xF); + int op = BITS(instr, 25, 0x1), op1 = BITS(instr, 20, 0x1F), op2 = BITS(instr, 4, 0xF); if (op == 0) { - if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0) + if ((op1 & 0x19) != 0x10 && (op2 & 0x1) == 0) { return arm_dataproc_reg(instr); + } - if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1) + if ((op1 & 0x19) != 0x10 && (op2 & 0x9) == 0x1) { return arm_dataproc_regshift(instr); + } - if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0) + if ((op1 & 0x19) == 0x10 && (op2 & 0x8) == 0) { return arm_misc(instr); + } - if ((op1 & 0x19) == 0x19 && (op2 & 0x9) == 0x8) + if ((op1 & 0x19) == 0x19 && (op2 & 0x9) == 0x8) { return arm_halfword_multiply(instr); + } - if ((op1 & 0x10) == 0 && op2 == 0x9) + if ((op1 & 0x10) == 0 && op2 == 0x9) { return arm_multiply(instr); + } - if ((op1 & 0x10) == 0x10 && op2 == 0x9) + if ((op1 & 0x10) == 0x10 && op2 == 0x9) { return arm_sync_primitive(instr); + } - if ((op1 & 0x12) != 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) + if ((op1 & 0x12) != 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) { return arm_extra_loadstore(instr); + } - if ((op1 & 0x12) == 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) + if ((op1 & 0x12) == 0x02 && (op2 == 0xB || (op2 & 0xD) == 0xD)) { return arm_extra_loadstore_unpriv(instr); + } } else { - if ((op1 & 0x19) != 0x10) + if ((op1 & 0x19) != 0x10) { return arm_dataproc_immed(instr); + } if (op1 == 0x10) { /* 16 bit immediate load (mov (immed)) [encoding A2] */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } if (op1 == 0x14) { /* high halfword 16 bit immediate load (movt) [encoding A1] */ - if (ARM_RD(instr) != REG_PC) + if (ARM_RD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } - if ((op1 & 0x1B) == 0x12) + if ((op1 & 0x1B) == 0x12) { return arm_msr_hints(instr); + } } return FASTTRAP_T_INV; } -int dtrace_decode_arm(uint32_t instr) +int +dtrace_decode_arm(uint32_t instr) { - int cond = BITS(instr,28,0xF), op1 = BITS(instr,25,0x7), op = BITS(instr,4,0x1); + int cond = BITS(instr, 28, 0xF), op1 = BITS(instr, 25, 0x7), op = BITS(instr, 4, 0x1); - if (cond == 0xF) + if (cond == 0xF) { return arm_unconditional(instr); + } - if ((op1 & 0x6) == 0) + if ((op1 & 0x6) == 0) { return arm_dataproc_misc(instr); + } - if (op1 == 0x2) + if (op1 == 0x2) { return arm_loadstore_wordbyte(instr); + } - if (op1 == 0x3 && op == 0) + if (op1 == 0x3 && op == 0) { return arm_loadstore_wordbyte(instr); + } - if (op1 == 0x3 && op == 1) + if (op1 == 0x3 && op == 1) { return arm_media(instr); + } - if ((op1 & 0x6) == 0x4) + if ((op1 & 0x6) == 0x4) { return arm_branch_link_blockdata(instr); + } - if ((op1 & 0x6) == 0x6) + if ((op1 & 0x6) == 0x6) { return arm_syscall_coproc(instr); + } return FASTTRAP_T_INV; } @@ -540,20 +629,23 @@ int dtrace_decode_arm(uint32_t instr) */ static -int thumb16_cond_supervisor(uint16_t instr) +int +thumb16_cond_supervisor(uint16_t instr) { - int opcode = BITS(instr,8,0xF); + int opcode = BITS(instr, 8, 0xF); - if ((opcode & 0xE) != 0xE) + if ((opcode & 0xE) != 0xE) { return FASTTRAP_T_B_COND; + } return FASTTRAP_T_INV; } static -int thumb16_misc(uint16_t instr) +int +thumb16_misc(uint16_t instr) { - int opcode = BITS(instr,5,0x7F); + int opcode = BITS(instr, 5, 0x7F); if ((opcode & 0x70) == 0x30 || (opcode & 0x70) == 0x70) { /* setend, cps, breakpoint, or if-then, not instrumentable */ @@ -572,16 +664,18 @@ int thumb16_misc(uint16_t instr) } static -int thumb16_loadstore_single(__unused uint16_t instr) +int +thumb16_loadstore_single(__unused uint16_t instr) { /* These all access the low registers or SP only */ return FASTTRAP_T_COMMON; } static -int thumb16_data_special_and_branch(uint16_t instr) +int +thumb16_data_special_and_branch(uint16_t instr) { - int opcode = BITS(instr,6,0xF); + int opcode = BITS(instr, 6, 0xF); if (opcode == 0x4) { /* Unpredictable */ @@ -589,56 +683,66 @@ int thumb16_data_special_and_branch(uint16_t instr) } else if ((opcode & 0xC) == 0xC) { /* bx or blx */ /* Only instrument the bx */ - if ((opcode & 0x2) == 0) + if ((opcode & 0x2) == 0) { return FASTTRAP_T_BX_REG; + } return FASTTRAP_T_INV; } else { /* Data processing on high registers, only instrument mov pc, reg */ - if ((opcode & 0xC) == 0x8 && THUMB16_HRD(instr) == REG_PC) + if ((opcode & 0xC) == 0x8 && THUMB16_HRD(instr) == REG_PC) { return FASTTRAP_T_CPY_PC; + } - if (THUMB16_HRM(instr) != REG_PC && THUMB16_HRD(instr) != REG_PC) + if (THUMB16_HRM(instr) != REG_PC && THUMB16_HRD(instr) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb16_data_proc(__unused uint16_t instr) +int +thumb16_data_proc(__unused uint16_t instr) { /* These all access the low registers only */ return FASTTRAP_T_COMMON; } static -int thumb16_shift_addsub_move_compare(__unused uint16_t instr) +int +thumb16_shift_addsub_move_compare(__unused uint16_t instr) { /* These all access the low registers only */ return FASTTRAP_T_COMMON; } static -int dtrace_decode_thumb16(uint16_t instr) +int +dtrace_decode_thumb16(uint16_t instr) { - int opcode = BITS(instr,10,0x3F); + int opcode = BITS(instr, 10, 0x3F); - if ((opcode & 0x30) == 0) + if ((opcode & 0x30) == 0) { return thumb16_shift_addsub_move_compare(instr); + } - if (opcode == 0x10) + if (opcode == 0x10) { return thumb16_data_proc(instr); + } - if (opcode == 0x11) + if (opcode == 0x11) { return thumb16_data_special_and_branch(instr); + } if ((opcode & 0x3E) == 0x12) { /* ldr (literal) */ return FASTTRAP_T_LDR_PC_IMMED; } - if ((opcode & 0x3C) == 0x14 || (opcode & 0x38) == 0x18 || (opcode & 0x38) == 0x20) + if ((opcode & 0x3C) == 0x14 || (opcode & 0x38) == 0x18 || (opcode & 0x38) == 0x20) { return thumb16_loadstore_single(instr); + } if ((opcode & 0x3E) == 0x28) { /* adr, uses the pc */ @@ -650,8 +754,9 @@ int dtrace_decode_thumb16(uint16_t instr) return FASTTRAP_T_COMMON; } - if ((opcode & 0x3C) == 0x2C) + if ((opcode & 0x3C) == 0x2C) { return thumb16_misc(instr); + } if ((opcode & 0x3E) == 0x30) { /* stm - can't access high registers */ @@ -680,11 +785,12 @@ int dtrace_decode_thumb16(uint16_t instr) */ static -int thumb32_coproc(uint16_t instr1, uint16_t instr2) +int +thumb32_coproc(uint16_t instr1, uint16_t instr2) { /* Instrument any VFP data processing instructions, ignore the rest */ - int op1 = BITS(instr1,4,0x3F), coproc = BITS(instr2,8,0xF), op = BITS(instr2,4,0x1); + int op1 = BITS(instr1, 4, 0x3F), coproc = BITS(instr2, 8, 0xF), op = BITS(instr2, 4, 0x1); if ((op1 & 0x3E) == 0) { /* Undefined */ @@ -693,18 +799,20 @@ int thumb32_coproc(uint16_t instr1, uint16_t instr2) if ((coproc & 0xE) == 0xA || (op1 & 0x30) == 0x30) { /* VFP instruction */ - uint32_t instr = thumb32_instword_to_arm(instr1,instr2); + uint32_t instr = thumb32_instword_to_arm(instr1, instr2); if ((op1 & 0x30) == 0x30) { /* VFP data processing uses its own registers */ return FASTTRAP_T_COMMON; } - if ((op1 & 0x3A) == 0x02 || (op1 & 0x38) == 0x08 || (op1 & 0x30) == 0x10) + if ((op1 & 0x3A) == 0x02 || (op1 & 0x38) == 0x08 || (op1 & 0x30) == 0x10) { return vfp_loadstore(instr); + } - if ((op1 & 0x3E) == 0x04) + if ((op1 & 0x3E) == 0x04) { return vfp_64transfer(instr); + } if ((op1 & 0x30) == 0x20) { /* VFP data processing or 8, 16, or 32 bit move between ARM reg and VFP reg */ @@ -721,192 +829,229 @@ int thumb32_coproc(uint16_t instr1, uint16_t instr2) } static -int thumb32_longmultiply(uint16_t instr1, uint16_t instr2) +int +thumb32_longmultiply(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,4,0x7), op2 = BITS(instr2,4,0xF); + int op1 = BITS(instr1, 4, 0x7), op2 = BITS(instr2, 4, 0xF); if ((op1 == 1 && op2 == 0xF) || (op1 == 0x3 && op2 == 0xF)) { /* Three register instruction */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Four register instruction */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && - THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && + THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb32_multiply(uint16_t instr1, uint16_t instr2) +int +thumb32_multiply(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,4,0x7), op2 = BITS(instr2,4,0x3); + int op1 = BITS(instr1, 4, 0x7), op2 = BITS(instr2, 4, 0x3); if ((op1 == 0 && op2 == 1) || (op1 == 0x6 && (op2 & 0x2) == 0)) { - if (THUMB32_RT(instr1,instr2) == REG_PC) + if (THUMB32_RT(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } } - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_misc(uint16_t instr1, uint16_t instr2) +int +thumb32_misc(uint16_t instr1, uint16_t instr2) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_parallel_addsub_unsigned(uint16_t instr1, uint16_t instr2) +int +thumb32_parallel_addsub_unsigned(uint16_t instr1, uint16_t instr2) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_parallel_addsub_signed(uint16_t instr1, uint16_t instr2) +int +thumb32_parallel_addsub_signed(uint16_t instr1, uint16_t instr2) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_dataproc_reg(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_reg(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,4,0xF), op2 = BITS(instr2,4,0xF); + int op1 = BITS(instr1, 4, 0xF), op2 = BITS(instr2, 4, 0xF); if (((0 <= op1) && (op1 <= 5)) && (op2 & 0x8) == 0x8) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } if ((op1 & 0x8) == 0 && op2 == 0) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } - if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0) - return thumb32_parallel_addsub_signed(instr1,instr2); + if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0) { + return thumb32_parallel_addsub_signed(instr1, instr2); + } - if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0x4) - return thumb32_parallel_addsub_unsigned(instr1,instr2); + if ((op1 & 0x8) == 0x8 && (op2 & 0xC) == 0x4) { + return thumb32_parallel_addsub_unsigned(instr1, instr2); + } - if ((op1 & 0xC) == 0x8 && (op2 & 0xC) == 0x8) - return thumb32_misc(instr1,instr2); + if ((op1 & 0xC) == 0x8 && (op2 & 0xC) == 0x8) { + return thumb32_misc(instr1, instr2); + } return FASTTRAP_T_INV; } static -int thumb32_dataproc_regshift(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_regshift(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,5,0xF), S = BITS(instr1,4,0x1); + int op = BITS(instr1, 5, 0xF), S = BITS(instr1, 4, 0x1); if (op == 0 || op == 0x4 || op == 0x8 || op == 0xD) { /* These become test instructions if S is 1 and Rd is PC, otherwise they are data instructions. */ if (S == 1) { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && - THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && + THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } } else if (op == 0x2 || op == 0x3) { /* These become moves if RN is PC, otherwise they are data insts. We don't instrument mov pc, reg here */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { /* Normal three register instruction */ - if (THUMB32_RM(instr1,instr2) != REG_PC && THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RM(instr1, instr2) != REG_PC && THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb32_store_single(uint16_t instr1, uint16_t instr2) +int +thumb32_store_single(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,5,0x7), op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 5, 0x7), op2 = BITS(instr2, 6, 0x3F); /* Do not support any use of the pc yet */ if ((op1 == 0 || op1 == 1 || op1 == 2) && (op2 & 0x20) == 0) { /* str (register) uses RM */ - if (THUMB32_RM(instr1,instr2) == REG_PC) + if (THUMB32_RM(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadbyte_memhint(uint16_t instr1, uint16_t instr2) +int +thumb32_loadbyte_memhint(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,7,0x3), __unused op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 7, 0x3), __unused op2 = BITS(instr2, 6, 0x3F); /* Do not support any use of the pc yet */ - if ((op1 == 0 || op1 == 0x2) && THUMB32_RM(instr1,instr2) == REG_PC) + if ((op1 == 0 || op1 == 0x2) && THUMB32_RM(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadhalfword_memhint(uint16_t instr1, uint16_t instr2) +int +thumb32_loadhalfword_memhint(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,7,0x3), op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 7, 0x3), op2 = BITS(instr2, 6, 0x3F); /* Do not support any use of the PC yet */ - if (op1 == 0 && op2 == 0 && THUMB32_RM(inst1,instr2) == REG_PC) + if (op1 == 0 && op2 == 0 && THUMB32_RM(inst1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadword(uint16_t instr1, uint16_t instr2) +int +thumb32_loadword(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,7,0x3), op2 = BITS(instr2,6,0x3F); + int op1 = BITS(instr1, 7, 0x3), op2 = BITS(instr2, 6, 0x3F); - if ((op1 & 0x2) == 0 && THUMB32_RN(instr1,instr2) == REG_PC && THUMB32_RT(instr1,instr2) != REG_PC) + if ((op1 & 0x2) == 0 && THUMB32_RN(instr1, instr2) == REG_PC && THUMB32_RT(instr1, instr2) != REG_PC) { return FASTTRAP_T_LDR_PC_IMMED; + } if (op1 == 0 && op2 == 0) { /* ldr (register) uses an additional reg */ - if (THUMB32_RM(instr1,instr2) == REG_PC) + if (THUMB32_RM(instr1, instr2) == REG_PC) { return FASTTRAP_T_INV; + } } - if (THUMB32_RT(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RT(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_loadstore_double_exclusive_table(__unused uint16_t instr1, __unused uint16_t instr2) +int +thumb32_loadstore_double_exclusive_table(__unused uint16_t instr1, __unused uint16_t instr2) { /* Don't instrument any of these */ @@ -914,9 +1059,10 @@ int thumb32_loadstore_double_exclusive_table(__unused uint16_t instr1, __unused } static -int thumb32_loadstore_multiple(uint16_t instr1, uint16_t instr2) +int +thumb32_loadstore_multiple(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,7,0x3), L = BITS(instr1,4,0x1), uses_pc = BITS(instr2,15,0x1), uses_lr = BITS(instr2,14,0x1); + int op = BITS(instr1, 7, 0x3), L = BITS(instr1, 4, 0x1), uses_pc = BITS(instr2, 15, 0x1), uses_lr = BITS(instr2, 14, 0x1); if (op == 0 || op == 0x3) { /* Privileged instructions: srs, rfe */ @@ -924,175 +1070,210 @@ int thumb32_loadstore_multiple(uint16_t instr1, uint16_t instr2) } /* Only emulate a use of the pc if it's a return from function: ldmia sp!, { ... pc }, aka pop { ... pc } */ - if (op == 0x1 && L == 1 && THUMB32_RN(instr1,instr2) == REG_SP && uses_pc == 1) + if (op == 0x1 && L == 1 && THUMB32_RN(instr1, instr2) == REG_SP && uses_pc == 1) { return FASTTRAP_T_LDM_PC; + } /* stmia sp!, { ... lr }, aka push { ... lr } doesn't touch the pc, but it is very common, so special case it */ - if (op == 0x2 && L == 0 && THUMB32_RN(instr1,instr2) == REG_SP && uses_lr == 1) + if (op == 0x2 && L == 0 && THUMB32_RN(instr1, instr2) == REG_SP && uses_lr == 1) { return FASTTRAP_T_STM_LR; + } - if (THUMB32_RN(instr1,instr2) != REG_PC && uses_pc == 0) + if (THUMB32_RN(instr1, instr2) != REG_PC && uses_pc == 0) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int thumb32_misc_control(__unused uint16_t instr1, __unused uint16_t instr2) +int +thumb32_misc_control(__unused uint16_t instr1, __unused uint16_t instr2) { /* Privileged, and instructions dealing with ThumbEE */ return FASTTRAP_T_INV; } static -int thumb32_cps_hints(__unused uint16_t instr1, __unused uint16_t instr2) +int +thumb32_cps_hints(__unused uint16_t instr1, __unused uint16_t instr2) { /* Privileged */ return FASTTRAP_T_INV; } static -int thumb32_b_misc_control(uint16_t instr1, uint16_t instr2) +int +thumb32_b_misc_control(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,4,0x7F), op1 = BITS(instr2,12,0x7), __unused op2 = BITS(instr2,8,0xF); + int op = BITS(instr1, 4, 0x7F), op1 = BITS(instr2, 12, 0x7), __unused op2 = BITS(instr2, 8, 0xF); if ((op1 & 0x5) == 0) { - if ((op & 0x38) != 0x38) + if ((op & 0x38) != 0x38) { return FASTTRAP_T_B_COND; + } - if (op == 0x3A) - return thumb32_cps_hints(instr1,instr2); + if (op == 0x3A) { + return thumb32_cps_hints(instr1, instr2); + } - if (op == 0x3B) - return thumb32_misc_control(instr1,instr2); + if (op == 0x3B) { + return thumb32_misc_control(instr1, instr2); + } } - if ((op1 & 0x5) == 1) + if ((op1 & 0x5) == 1) { return FASTTRAP_T_B_UNCOND; + } return FASTTRAP_T_INV; } static -int thumb32_dataproc_plain_immed(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_plain_immed(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,4,0x1F); + int op = BITS(instr1, 4, 0x1F); if (op == 0x04 || op == 0x0C || op == 0x16) { /* mov, movt, bfi, bfc */ /* These use only RD */ - if (THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } else { - if (THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } return FASTTRAP_T_INV; } static -int thumb32_dataproc_mod_immed(uint16_t instr1, uint16_t instr2) +int +thumb32_dataproc_mod_immed(uint16_t instr1, uint16_t instr2) { - int op = BITS(instr1,5,0xF), S = BITS(instr1,4,0x1); + int op = BITS(instr1, 5, 0xF), S = BITS(instr1, 4, 0x1); if (op == 0x2 || op == 0x3) { /* These allow REG_PC in RN, but it doesn't mean use the PC! */ - if (THUMB32_RD(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } } if (op == 0 || op == 0x4 || op == 0x8 || op == 0xD) { /* These are test instructions, if the sign bit is set and RD is the PC. */ - if (S && THUMB32_RD(instr1,instr2) == REG_PC) + if (S && THUMB32_RD(instr1, instr2) == REG_PC) { return FASTTRAP_T_COMMON; + } } - if (THUMB32_RD(instr1,instr2) != REG_PC && THUMB32_RN(instr1,instr2) != REG_PC) + if (THUMB32_RD(instr1, instr2) != REG_PC && THUMB32_RN(instr1, instr2) != REG_PC) { return FASTTRAP_T_COMMON; + } return FASTTRAP_T_INV; } static -int dtrace_decode_thumb32(uint16_t instr1, uint16_t instr2) +int +dtrace_decode_thumb32(uint16_t instr1, uint16_t instr2) { - int op1 = BITS(instr1,11,0x3), op2 = BITS(instr1,4,0x7F), op = BITS(instr2,15,0x1); + int op1 = BITS(instr1, 11, 0x3), op2 = BITS(instr1, 4, 0x7F), op = BITS(instr2, 15, 0x1); if (op1 == 0x1) { - if ((op2 & 0x64) == 0) - return thumb32_loadstore_multiple(instr1,instr2); + if ((op2 & 0x64) == 0) { + return thumb32_loadstore_multiple(instr1, instr2); + } - if ((op2 & 0x64) == 0x04) - return thumb32_loadstore_double_exclusive_table(instr1,instr2); + if ((op2 & 0x64) == 0x04) { + return thumb32_loadstore_double_exclusive_table(instr1, instr2); + } - if ((op2 & 0x60) == 0x20) - return thumb32_dataproc_regshift(instr1,instr2); + if ((op2 & 0x60) == 0x20) { + return thumb32_dataproc_regshift(instr1, instr2); + } - if ((op2 & 0x40) == 0x40) - return thumb32_coproc(instr1,instr2); + if ((op2 & 0x40) == 0x40) { + return thumb32_coproc(instr1, instr2); + } } if (op1 == 0x2) { - if ((op2 & 0x20) == 0 && op == 0) - return thumb32_dataproc_mod_immed(instr1,instr2); + if ((op2 & 0x20) == 0 && op == 0) { + return thumb32_dataproc_mod_immed(instr1, instr2); + } - if ((op2 & 0x20) == 0x20 && op == 0) - return thumb32_dataproc_plain_immed(instr1,instr2); + if ((op2 & 0x20) == 0x20 && op == 0) { + return thumb32_dataproc_plain_immed(instr1, instr2); + } - if (op == 1) - return thumb32_b_misc_control(instr1,instr2); + if (op == 1) { + return thumb32_b_misc_control(instr1, instr2); + } } if (op1 == 0x3) { - if ((op2 & 0x71) == 0) - return thumb32_store_single(instr1,instr2); + if ((op2 & 0x71) == 0) { + return thumb32_store_single(instr1, instr2); + } if ((op2 & 0x71) == 0x10) { - return vfp_struct_loadstore(thumb32_instword_to_arm(instr1,instr2)); + return vfp_struct_loadstore(thumb32_instword_to_arm(instr1, instr2)); } - if ((op2 & 0x67) == 0x01) - return thumb32_loadbyte_memhint(instr1,instr2); + if ((op2 & 0x67) == 0x01) { + return thumb32_loadbyte_memhint(instr1, instr2); + } - if ((op2 & 0x67) == 0x03) - return thumb32_loadhalfword_memhint(instr1,instr2); + if ((op2 & 0x67) == 0x03) { + return thumb32_loadhalfword_memhint(instr1, instr2); + } - if ((op2 & 0x67) == 0x05) - return thumb32_loadword(instr1,instr2); + if ((op2 & 0x67) == 0x05) { + return thumb32_loadword(instr1, instr2); + } if ((op2 & 0x67) == 0x07) { /* Undefined instruction */ return FASTTRAP_T_INV; } - if ((op2 & 0x70) == 0x20) - return thumb32_dataproc_reg(instr1,instr2); + if ((op2 & 0x70) == 0x20) { + return thumb32_dataproc_reg(instr1, instr2); + } - if ((op2 & 0x78) == 0x30) - return thumb32_multiply(instr1,instr2); + if ((op2 & 0x78) == 0x30) { + return thumb32_multiply(instr1, instr2); + } - if ((op2 & 0x78) == 0x38) - return thumb32_longmultiply(instr1,instr2); + if ((op2 & 0x78) == 0x38) { + return thumb32_longmultiply(instr1, instr2); + } - if ((op2 & 0x40) == 0x40) - return thumb32_coproc(instr1,instr2); + if ((op2 & 0x40) == 0x40) { + return thumb32_coproc(instr1, instr2); + } } return FASTTRAP_T_INV; } -int dtrace_decode_thumb(uint32_t instr) +int +dtrace_decode_thumb(uint32_t instr) { uint16_t* pInstr = (uint16_t*) &instr; uint16_t hw1 = pInstr[0], hw2 = pInstr[1]; - int size = BITS(hw1,11,0x1F); + int size = BITS(hw1, 11, 0x1F); - if (size == 0x1D || size == 0x1E || size == 0x1F) - return dtrace_decode_thumb32(hw1,hw2); - else + if (size == 0x1D || size == 0x1E || size == 0x1F) { + return dtrace_decode_thumb32(hw1, hw2); + } else { return dtrace_decode_thumb16(hw1); + } } struct arm64_decode_entry { @@ -1102,36 +1283,38 @@ struct arm64_decode_entry { }; struct arm64_decode_entry arm64_decode_table[] = { - { .mask = 0xFFFFFFFF, .value = FASTTRAP_ARM64_OP_VALUE_FUNC_ENTRY, .type = FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY }, - { .mask = FASTTRAP_ARM64_OP_MASK_LDR_S_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_S_PC_REL, .type = FASTTRAP_T_ARM64_LDR_S_PC_REL }, - { .mask = FASTTRAP_ARM64_OP_MASK_LDR_W_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_W_PC_REL, .type = FASTTRAP_T_ARM64_LDR_W_PC_REL }, - { .mask = FASTTRAP_ARM64_OP_MASK_LDR_D_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_D_PC_REL, .type = FASTTRAP_T_ARM64_LDR_D_PC_REL }, - { .mask = FASTTRAP_ARM64_OP_MASK_LDR_X_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_X_PC_REL, .type = FASTTRAP_T_ARM64_LDR_X_PC_REL }, - { .mask = FASTTRAP_ARM64_OP_MASK_LDR_Q_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_Q_PC_REL, .type = FASTTRAP_T_ARM64_LDR_Q_PC_REL }, - { .mask = FASTTRAP_ARM64_OP_MASK_LRDSW_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LRDSW_PC_REL, .type = FASTTRAP_T_ARM64_LDRSW_PC_REL }, - { .mask = FASTTRAP_ARM64_OP_MASK_B_COND_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_B_COND_PC_REL, .type = FASTTRAP_T_ARM64_B_COND }, - { .mask = FASTTRAP_ARM64_OP_MASK_CBNZ_W_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBNZ_W_PC_REL, .type = FASTTRAP_T_ARM64_CBNZ_W }, - { .mask = FASTTRAP_ARM64_OP_MASK_CBNZ_X_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBNZ_X_PC_REL, .type = FASTTRAP_T_ARM64_CBNZ_X }, - { .mask = FASTTRAP_ARM64_OP_MASK_CBZ_W_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBZ_W_PC_REL, .type = FASTTRAP_T_ARM64_CBZ_W }, - { .mask = FASTTRAP_ARM64_OP_MASK_CBZ_X_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBZ_X_PC_REL, .type = FASTTRAP_T_ARM64_CBZ_X }, - { .mask = FASTTRAP_ARM64_OP_MASK_TBNZ_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_TBNZ_PC_REL, .type = FASTTRAP_T_ARM64_TBNZ }, - { .mask = FASTTRAP_ARM64_OP_MASK_TBZ_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_TBZ_PC_REL, .type = FASTTRAP_T_ARM64_TBZ }, - { .mask = FASTTRAP_ARM64_OP_MASK_B_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_B_PC_REL, .type = FASTTRAP_T_ARM64_B }, - { .mask = FASTTRAP_ARM64_OP_MASK_BL_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_BL_PC_REL, .type = FASTTRAP_T_ARM64_BL }, - { .mask = FASTTRAP_ARM64_OP_MASK_BLR, .value = FASTTRAP_ARM64_OP_VALUE_BLR, .type = FASTTRAP_T_ARM64_BLR }, - { .mask = FASTTRAP_ARM64_OP_MASK_BR, .value = FASTTRAP_ARM64_OP_VALUE_BR, .type = FASTTRAP_T_ARM64_BR }, - { .mask = FASTTRAP_ARM64_OP_MASK_RET, .value = FASTTRAP_ARM64_OP_VALUE_RET, .type = FASTTRAP_T_ARM64_RET }, - { .mask = FASTTRAP_ARM64_OP_MASK_ADRP, .value = FASTTRAP_ARM64_OP_VALUE_ADRP, .type = FASTTRAP_T_ARM64_ADRP }, - { .mask = FASTTRAP_ARM64_OP_MASK_ADR, .value = FASTTRAP_ARM64_OP_VALUE_ADR, .type = FASTTRAP_T_ARM64_ADR }, - { .mask = FASTTRAP_ARM64_OP_MASK_PRFM, .value = FASTTRAP_ARM64_OP_VALUE_PRFM, .type = FASTTRAP_T_ARM64_PRFM }, - { .mask = FASTTRAP_ARM64_OP_MASK_EXCL_MEM, .value = FASTTRAP_ARM64_OP_VALUE_EXCL_MEM, .type = FASTTRAP_T_ARM64_EXCLUSIVE_MEM }, - { .mask = FASTTRAP_ARM64_OP_MASK_RETAB, .value = FASTTRAP_ARM64_OP_VALUE_RETAB, .type = FASTTRAP_T_ARM64_RETAB }}; + { .mask = 0xFFFFFFFF, .value = FASTTRAP_ARM64_OP_VALUE_FUNC_ENTRY, .type = FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY }, + { .mask = FASTTRAP_ARM64_OP_MASK_LDR_S_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_S_PC_REL, .type = FASTTRAP_T_ARM64_LDR_S_PC_REL }, + { .mask = FASTTRAP_ARM64_OP_MASK_LDR_W_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_W_PC_REL, .type = FASTTRAP_T_ARM64_LDR_W_PC_REL }, + { .mask = FASTTRAP_ARM64_OP_MASK_LDR_D_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_D_PC_REL, .type = FASTTRAP_T_ARM64_LDR_D_PC_REL }, + { .mask = FASTTRAP_ARM64_OP_MASK_LDR_X_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_X_PC_REL, .type = FASTTRAP_T_ARM64_LDR_X_PC_REL }, + { .mask = FASTTRAP_ARM64_OP_MASK_LDR_Q_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LDR_Q_PC_REL, .type = FASTTRAP_T_ARM64_LDR_Q_PC_REL }, + { .mask = FASTTRAP_ARM64_OP_MASK_LRDSW_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_LRDSW_PC_REL, .type = FASTTRAP_T_ARM64_LDRSW_PC_REL }, + { .mask = FASTTRAP_ARM64_OP_MASK_B_COND_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_B_COND_PC_REL, .type = FASTTRAP_T_ARM64_B_COND }, + { .mask = FASTTRAP_ARM64_OP_MASK_CBNZ_W_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBNZ_W_PC_REL, .type = FASTTRAP_T_ARM64_CBNZ_W }, + { .mask = FASTTRAP_ARM64_OP_MASK_CBNZ_X_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBNZ_X_PC_REL, .type = FASTTRAP_T_ARM64_CBNZ_X }, + { .mask = FASTTRAP_ARM64_OP_MASK_CBZ_W_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBZ_W_PC_REL, .type = FASTTRAP_T_ARM64_CBZ_W }, + { .mask = FASTTRAP_ARM64_OP_MASK_CBZ_X_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_CBZ_X_PC_REL, .type = FASTTRAP_T_ARM64_CBZ_X }, + { .mask = FASTTRAP_ARM64_OP_MASK_TBNZ_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_TBNZ_PC_REL, .type = FASTTRAP_T_ARM64_TBNZ }, + { .mask = FASTTRAP_ARM64_OP_MASK_TBZ_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_TBZ_PC_REL, .type = FASTTRAP_T_ARM64_TBZ }, + { .mask = FASTTRAP_ARM64_OP_MASK_B_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_B_PC_REL, .type = FASTTRAP_T_ARM64_B }, + { .mask = FASTTRAP_ARM64_OP_MASK_BL_PC_REL, .value = FASTTRAP_ARM64_OP_VALUE_BL_PC_REL, .type = FASTTRAP_T_ARM64_BL }, + { .mask = FASTTRAP_ARM64_OP_MASK_BLR, .value = FASTTRAP_ARM64_OP_VALUE_BLR, .type = FASTTRAP_T_ARM64_BLR }, + { .mask = FASTTRAP_ARM64_OP_MASK_BR, .value = FASTTRAP_ARM64_OP_VALUE_BR, .type = FASTTRAP_T_ARM64_BR }, + { .mask = FASTTRAP_ARM64_OP_MASK_RET, .value = FASTTRAP_ARM64_OP_VALUE_RET, .type = FASTTRAP_T_ARM64_RET }, + { .mask = FASTTRAP_ARM64_OP_MASK_ADRP, .value = FASTTRAP_ARM64_OP_VALUE_ADRP, .type = FASTTRAP_T_ARM64_ADRP }, + { .mask = FASTTRAP_ARM64_OP_MASK_ADR, .value = FASTTRAP_ARM64_OP_VALUE_ADR, .type = FASTTRAP_T_ARM64_ADR }, + { .mask = FASTTRAP_ARM64_OP_MASK_PRFM, .value = FASTTRAP_ARM64_OP_VALUE_PRFM, .type = FASTTRAP_T_ARM64_PRFM }, + { .mask = FASTTRAP_ARM64_OP_MASK_EXCL_MEM, .value = FASTTRAP_ARM64_OP_VALUE_EXCL_MEM, .type = FASTTRAP_T_ARM64_EXCLUSIVE_MEM }, + { .mask = FASTTRAP_ARM64_OP_MASK_RETAB, .value = FASTTRAP_ARM64_OP_VALUE_RETAB, .type = FASTTRAP_T_ARM64_RETAB } +}; #define NUM_DECODE_ENTRIES (sizeof(arm64_decode_table) / sizeof(struct arm64_decode_entry)) -int dtrace_decode_arm64(uint32_t instr) +int +dtrace_decode_arm64(uint32_t instr) { unsigned i; @@ -1143,5 +1326,3 @@ int dtrace_decode_arm64(uint32_t instr) return FASTTRAP_T_COMMON; } - - diff --git a/bsd/dev/arm64/dtrace_isa.c b/bsd/dev/arm64/dtrace_isa.c index bd2716b95..6a9296fb0 100644 --- a/bsd/dev/arm64/dtrace_isa.c +++ b/bsd/dev/arm64/dtrace_isa.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from - * mach/ppc/thread_status.h */ +#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from + * mach/ppc/thread_status.h */ #include #include @@ -49,7 +49,7 @@ #include #include #include -#include /* for thread_wakeup() */ +#include /* for thread_wakeup() */ #include #include #include @@ -60,8 +60,8 @@ extern struct arm_saved_state *find_kern_regs(thread_t); extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ typedef arm_saved_state_t savearea_t; -extern lck_attr_t *dtrace_lck_attr; -extern lck_grp_t *dtrace_lck_grp; +extern lck_attr_t *dtrace_lck_attr; +extern lck_grp_t *dtrace_lck_grp; struct frame { @@ -76,9 +76,9 @@ inline void dtrace_membar_producer(void) { #if __ARM_SMP__ - __asm__ volatile("dmb ish" : : : "memory"); + __asm__ volatile ("dmb ish" : : : "memory"); #else - __asm__ volatile("nop" : : : "memory"); + __asm__ volatile ("nop" : : : "memory"); #endif } @@ -86,9 +86,9 @@ inline void dtrace_membar_consumer(void) { #if __ARM_SMP__ - __asm__ volatile("dmb ish" : : : "memory"); + __asm__ volatile ("dmb ish" : : : "memory"); #else - __asm__ volatile("nop" : : : "memory"); + __asm__ volatile ("nop" : : : "memory"); #endif } @@ -104,7 +104,7 @@ dtrace_getipl(void) * in osfmk/kern/cpu_data.h */ /* return get_interrupt_level(); */ - return (ml_at_interrupt_context() ? 1 : 0); + return ml_at_interrupt_context() ? 1 : 0; } #if __ARM_SMP__ @@ -126,11 +126,13 @@ xcRemote(void *foo) { xcArg_t *pArg = (xcArg_t *) foo; - if (pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL) - (pArg->f) (pArg->arg); + if (pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL) { + (pArg->f)(pArg->arg); + } - if (hw_atomic_sub(&dt_xc_sync, 1) == 0) + if (hw_atomic_sub(&dt_xc_sync, 1) == 0) { thread_wakeup((event_t) &dt_xc_sync); + } } #endif @@ -200,36 +202,36 @@ dtrace_getreg(struct regs * savearea, uint_t reg) if (regs == NULL) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } if (is_saved_state32(regs)) { // Fix special registers if user is 32 bits switch (reg) { - case ARM64_FP: - reg = ARM_FP; + case ARM64_FP: + reg = ARM_FP; break; - case ARM64_SP: - reg = ARM_SP; + case ARM64_SP: + reg = ARM_SP; break; - case ARM64_LR: - reg = ARM_LR; + case ARM64_LR: + reg = ARM_LR; break; - case ARM64_PC: - reg = ARM_PC; + case ARM64_PC: + reg = ARM_PC; break; - case ARM64_CPSR: - reg = ARM_CPSR; + case ARM64_CPSR: + reg = ARM_CPSR; break; } } if (!check_saved_state_reglimit(regs, reg)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } - return ((uint64_t)get_saved_state_reg(regs, reg)); + return (uint64_t)get_saved_state_reg(regs, reg); } #define RETURN_OFFSET 4 @@ -237,11 +239,11 @@ dtrace_getreg(struct regs * savearea, uint_t reg) static int dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, - user_addr_t sp) + user_addr_t sp) { int ret = 0; boolean_t is64bit = proc_is64bit_data(current_proc()); - + ASSERT(pcstack == NULL || pcstack_limit > 0); while (pc != 0) { @@ -249,12 +251,14 @@ dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, if (pcstack != NULL) { *pcstack++ = (uint64_t) pc; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { break; + } } - if (sp == 0) + if (sp == 0) { break; + } if (is64bit) { pc = dtrace_fuword64((sp + RETURN_OFFSET64)); @@ -265,7 +269,7 @@ dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, } } - return (ret); + return ret; } void @@ -274,40 +278,46 @@ dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) thread_t thread = current_thread(); savearea_t *regs; user_addr_t pc, sp, fp; - volatile uint16_t *flags = (volatile uint16_t *) & cpu_core[CPU->cpu_id].cpuc_dtrace_flags; + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; int n; - if (*flags & CPU_DTRACE_FAULT) + if (*flags & CPU_DTRACE_FAULT) { return; + } - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } /* * If there's no user context we still need to zero the stack. */ - if (thread == NULL) + if (thread == NULL) { goto zero; + } regs = (savearea_t *) find_user_regs(thread); - if (regs == NULL) + if (regs == NULL) { goto zero; + } *pcstack++ = (uint64_t)dtrace_proc_selfpid(); pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } pc = get_saved_state_pc(regs); sp = get_saved_state_sp(regs); - fp = get_saved_state_fp(regs); + fp = get_saved_state_fp(regs); if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { *pcstack++ = (uint64_t) pc; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } pc = get_saved_state_lr(regs); } @@ -321,8 +331,9 @@ dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) pcstack_limit -= n; zero: - while (pcstack_limit-- > 0) + while (pcstack_limit-- > 0) { *pcstack++ = 0ULL; + } } int @@ -333,16 +344,19 @@ dtrace_getustackdepth(void) user_addr_t pc, sp, fp; int n = 0; - if (thread == NULL) + if (thread == NULL) { return 0; + } - if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) - return (-1); + if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) { + return -1; + } regs = (savearea_t *) find_user_regs(thread); - if (regs == NULL) + if (regs == NULL) { return 0; - + } + pc = get_saved_state_pc(regs); sp = get_saved_state_sp(regs); fp = get_saved_state_fp(regs); @@ -358,10 +372,10 @@ dtrace_getustackdepth(void) * traces from the sp, even in syscall/profile/fbt * providers. */ - + n += dtrace_getustack_common(NULL, 0, pc, fp); - return (n); + return n; } void @@ -371,39 +385,44 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) boolean_t is64bit = proc_is64bit_data(current_proc()); savearea_t *regs; user_addr_t pc, sp; - volatile uint16_t *flags = (volatile uint16_t *) & cpu_core[CPU->cpu_id].cpuc_dtrace_flags; + volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; #if 0 uintptr_t oldcontext; size_t s1, s2; #endif - if (*flags & CPU_DTRACE_FAULT) + if (*flags & CPU_DTRACE_FAULT) { return; + } - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } - /* + /* * If there's no user context we still need to zero the stack. */ - if (thread == NULL) + if (thread == NULL) { goto zero; - + } + regs = (savearea_t *) find_user_regs(thread); - if (regs == NULL) + if (regs == NULL) { goto zero; + } *pcstack++ = (uint64_t)dtrace_proc_selfpid(); pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } pc = get_saved_state_pc(regs); sp = get_saved_state_lr(regs); -#if 0 /* XXX signal stack crawl */ +#if 0 /* XXX signal stack crawl */ oldcontext = lwp->lwp_oldcontext; if (p->p_model == DATAMODEL_NATIVE) { @@ -419,22 +438,25 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) *pcstack++ = (uint64_t) pc; *fpstack++ = 0; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { return; + } - if (is64bit) + if (is64bit) { pc = dtrace_fuword64(sp); - else + } else { pc = dtrace_fuword32(sp); + } } while (pc != 0 && sp != 0) { *pcstack++ = (uint64_t) pc; *fpstack++ = sp; pcstack_limit--; - if (pcstack_limit <= 0) + if (pcstack_limit <= 0) { break; + } -#if 0 /* XXX signal stack crawl */ +#if 0 /* XXX signal stack crawl */ if (oldcontext == sp + s1 || oldcontext == sp + s2) { if (p->p_model == DATAMODEL_NATIVE) { ucontext_t *ucp = (ucontext_t *) oldcontext; @@ -479,15 +501,16 @@ dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) #endif } -zero: - while (pcstack_limit-- > 0) +zero: + while (pcstack_limit-- > 0) { *pcstack++ = 0ULL; + } } void dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, - uint32_t * intrpc) + uint32_t * intrpc) { struct frame *fp = (struct frame *) __builtin_frame_address(0); struct frame *nextfp, *minfp, *stacktop; @@ -497,17 +520,20 @@ dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, uintptr_t pc; uintptr_t caller = CPU->cpu_dtrace_caller; - if ((on_intr = CPU_ON_INTR(CPU)) != 0) + if ((on_intr = CPU_ON_INTR(CPU)) != 0) { stacktop = (struct frame *) dtrace_get_cpu_int_stack_top(); - else + } + else { stacktop = (struct frame *) (dtrace_get_kernel_stack(current_thread()) + kernel_stack_size); + } minfp = fp; aframes++; - if (intrpc != NULL && depth < pcstack_limit) + if (intrpc != NULL && depth < pcstack_limit) { pcstack[depth++] = (pc_t) intrpc; + } while (depth < pcstack_limit) { nextfp = *(struct frame **) fp; @@ -564,13 +590,15 @@ dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, caller = (uintptr_t)NULL; } } else { - if (depth < pcstack_limit) + if (depth < pcstack_limit) { pcstack[depth++] = (pc_t) pc; + } } if (last) { - while (depth < pcstack_limit) + while (depth < pcstack_limit) { pcstack[depth++] = (pc_t) NULL; + } return; } fp = nextfp; @@ -590,10 +618,11 @@ dtrace_instr_size(uint32_t instr, int thumb_mode) { if (thumb_mode) { uint16_t instr16 = *(uint16_t*) &instr; - if (((instr16 >> 11) & 0x1F) > 0x1C) + if (((instr16 >> 11) & 0x1F) > 0x1C) { return 4; - else + } else { return 2; + } } else { return 4; } @@ -624,16 +653,15 @@ dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vs #endif if (dtrace_invop_callsite_pre != NULL - && pc > (uintptr_t) dtrace_invop_callsite_pre - && pc <= (uintptr_t) dtrace_invop_callsite_post) - { + && pc > (uintptr_t) dtrace_invop_callsite_pre + && pc <= (uintptr_t) dtrace_invop_callsite_post) { /* fp points to frame of dtrace_invop() activation */ fp = fp->backchain; /* to fbt_perfCallback activation */ fp = fp->backchain; /* to sleh_synchronous activation */ fp = fp->backchain; /* to fleh_synchronous activation */ - arm_saved_state_t *tagged_regs = (arm_saved_state_t*) ((void*) &fp[1]); - arm_saved_state64_t *saved_state = saved_state64(tagged_regs); + arm_saved_state_t *tagged_regs = (arm_saved_state_t*) ((void*) &fp[1]); + arm_saved_state64_t *saved_state = saved_state64(tagged_regs); if (arg <= inreg) { /* the argument will be found in a register */ @@ -665,7 +693,7 @@ dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vs * register... */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } arg -= (inreg + 1); @@ -673,17 +701,17 @@ dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vs load: if (dtrace_canload((uint64_t)(stack + arg), sizeof(uint64_t), - mstate, vstate)) { + mstate, vstate)) { /* dtrace_probe arguments arg0 ... arg4 are 64bits wide */ val = dtrace_load64((uint64_t)(stack + arg)); } - return (val); + return val; } void dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, - int fltoffs, int fault, uint64_t illval) + int fltoffs, int fault, uint64_t illval) { /* XXX ARMTODO */ /* @@ -698,16 +726,18 @@ void dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit)) { /* XXX ARMTODO check copied from ppc/x86*/ - /* + /* * "base" is the smallest toxic address in the range, "limit" is the first * VALID address greater than "base". - */ + */ func(0x0, VM_MIN_KERNEL_ADDRESS); - if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0) - func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0); + if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0) { + func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0); + } } -void dtrace_flush_caches(void) +void +dtrace_flush_caches(void) { /* TODO There were some problems with flushing just the cache line that had been modified. * For now, we'll flush the entire cache, until we figure out how to flush just the patched block. @@ -715,4 +745,3 @@ void dtrace_flush_caches(void) FlushPoU_Dcache(); InvalidatePoU_Icache(); } - diff --git a/bsd/dev/arm64/dtrace_subr_arm.c b/bsd/dev/arm64/dtrace_subr_arm.c index efdbb54f5..584317298 100644 --- a/bsd/dev/arm64/dtrace_subr_arm.c +++ b/bsd/dev/arm64/dtrace_subr_arm.c @@ -41,7 +41,7 @@ #include #include -int (*dtrace_pid_probe_ptr) (arm_saved_state_t *); +int (*dtrace_pid_probe_ptr)(arm_saved_state_t *); int (*dtrace_return_probe_ptr) (arm_saved_state_t *); kern_return_t @@ -110,12 +110,12 @@ dtrace_user_probe(arm_saved_state_t *regs) */ if (step == 0) { /* - * APPLE NOTE: We're returning KERN_FAILURE, which causes + * APPLE NOTE: We're returning KERN_FAILURE, which causes * the generic signal handling code to take over, which will effectively * deliver a EXC_BAD_INSTRUCTION to the user process. */ return KERN_FAILURE; - } + } /* * If we hit this trap unrelated to a return probe, we're @@ -139,8 +139,9 @@ dtrace_user_probe(arm_saved_state_t *regs) rwp = &CPU->cpu_ft_lock; lck_rw_lock_shared(rwp); - if (dtrace_return_probe_ptr != NULL) + if (dtrace_return_probe_ptr != NULL) { (void) (*dtrace_return_probe_ptr)(regs); + } lck_rw_unlock_shared(rwp); set_saved_state_pc(regs, npc); diff --git a/bsd/dev/arm64/fasttrap_isa.c b/bsd/dev/arm64/fasttrap_isa.c index 8643cbd92..50f980f2c 100644 --- a/bsd/dev/arm64/fasttrap_isa.c +++ b/bsd/dev/arm64/fasttrap_isa.c @@ -33,7 +33,7 @@ #ifdef KERNEL #ifndef _KERNEL -#define _KERNEL /* Solaris vs. Darwin */ +#define _KERNEL /* Solaris vs. Darwin */ #endif #endif #include @@ -94,13 +94,13 @@ extern int dtrace_decode_thumb(uint32_t instr); #define THUMB_INSTR(x) (*(uint16_t*) &(x)) -#define SIGNEXTEND(x,v) ((((int) (x)) << (32-(v))) >> (32-(v))) -#define ALIGNADDR(x,v) (((x) >> (v)) << (v)) +#define SIGNEXTEND(x, v) ((((int) (x)) << (32-(v))) >> (32-(v))) +#define ALIGNADDR(x, v) (((x) >> (v)) << (v)) #define GETITSTATE(x) ((((x) >> 8) & 0xFC) | (((x) >> 25) & 0x3)) #define ISLASTINIT(x) (((x) & 0xF) == 8) -#define SET16(x,w) *((uint16_t*) (x)) = (w) -#define SET32(x,w) *((uint32_t*) (x)) = (w) +#define SET16(x, w) *((uint16_t*) (x)) = (w) +#define SET32(x, w) *((uint32_t*) (x)) = (w) #define IS_ARM32_NOP(x) ((x) == 0xE1A00000) /* Marker for is-enabled probes */ @@ -121,12 +121,12 @@ extern int dtrace_decode_thumb(uint32_t instr); #define ARM_LDR_UF (1 << 23) #define ARM_LDR_BF (1 << 22) -static int fasttrap_tracepoint_init32 (proc_t *, fasttrap_tracepoint_t *, user_addr_t, fasttrap_probe_type_t); -static int fasttrap_tracepoint_init64 (proc_t *, fasttrap_tracepoint_t *, user_addr_t, fasttrap_probe_type_t); +static int fasttrap_tracepoint_init32(proc_t *, fasttrap_tracepoint_t *, user_addr_t, fasttrap_probe_type_t); +static int fasttrap_tracepoint_init64(proc_t *, fasttrap_tracepoint_t *, user_addr_t, fasttrap_probe_type_t); int fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, - user_addr_t pc, fasttrap_probe_type_t type) + user_addr_t pc, fasttrap_probe_type_t type) { if (proc_is64bit_data(p)) { return fasttrap_tracepoint_init64(p, tp, pc, type); @@ -137,7 +137,7 @@ fasttrap_tracepoint_init(proc_t *p, fasttrap_tracepoint_t *tp, static int fasttrap_tracepoint_init32(proc_t *p, fasttrap_tracepoint_t *tp, - user_addr_t pc, fasttrap_probe_type_t type) + user_addr_t pc, fasttrap_probe_type_t type) { #pragma unused(type) uint32_t instr; @@ -150,12 +150,13 @@ fasttrap_tracepoint_init32(proc_t *p, fasttrap_tracepoint_t *tp, * pages, we potentially read the instruction in two parts. If the * second part fails, we just zero out that part of the instruction. */ - /* + /* * APPLE NOTE: Of course, we do not have a P_PR_LOCK, so this is racey... - */ + */ - if (uread(p, &instr, 4, pc) != 0) - return (-1); + if (uread(p, &instr, 4, pc) != 0) { + return -1; + } /* We want &instr to always point to the saved instruction, so just copy the * whole thing When cast to a pointer to a uint16_t, that will give us a @@ -164,37 +165,37 @@ fasttrap_tracepoint_init32(proc_t *p, fasttrap_tracepoint_t *tp, tp->ftt_instr = instr; if (tp->ftt_fntype != FASTTRAP_FN_DONE_INIT) { - switch(tp->ftt_fntype) { - case FASTTRAP_FN_UNKNOWN: - /* Can't instrument without any information. We can add some heuristics later if necessary. */ - return (-1); - - case FASTTRAP_FN_USDT: - if (IS_ARM32_NOP(instr) || IS_ARM32_IS_ENABLED(instr)) { - tp->ftt_thumb = 0; - } else if (IS_THUMB32_NOP(THUMB_INSTR(instr)) || IS_THUMB32_IS_ENABLED(THUMB_INSTR(instr))) { - tp->ftt_thumb = 1; - } else { - /* Shouldn't reach here - this means we don't recognize - * the instruction at one of the USDT probe locations - */ - return (-1); - } - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; - break; + switch (tp->ftt_fntype) { + case FASTTRAP_FN_UNKNOWN: + /* Can't instrument without any information. We can add some heuristics later if necessary. */ + return -1; - case FASTTRAP_FN_ARM: + case FASTTRAP_FN_USDT: + if (IS_ARM32_NOP(instr) || IS_ARM32_IS_ENABLED(instr)) { tp->ftt_thumb = 0; - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; - break; - - case FASTTRAP_FN_THUMB: + } else if (IS_THUMB32_NOP(THUMB_INSTR(instr)) || IS_THUMB32_IS_ENABLED(THUMB_INSTR(instr))) { tp->ftt_thumb = 1; - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; - break; + } else { + /* Shouldn't reach here - this means we don't recognize + * the instruction at one of the USDT probe locations + */ + return -1; + } + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + break; + + case FASTTRAP_FN_ARM: + tp->ftt_thumb = 0; + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + break; - default: - return (-1); + case FASTTRAP_FN_THUMB: + tp->ftt_thumb = 1; + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + break; + + default: + return -1; } } @@ -207,17 +208,17 @@ fasttrap_tracepoint_init32(proc_t *p, fasttrap_tracepoint_t *tp, if (tp->ftt_type == FASTTRAP_T_INV) { /* This is an instruction we either don't recognize or can't instrument */ printf("dtrace: fasttrap init32: Unrecognized instruction: %08x at %08llx\n", - (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr,tp->ftt_thumb) == 2) ? tp->ftt_instr1 : instr, pc); - return (-1); + (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr, tp->ftt_thumb) == 2) ? tp->ftt_instr1 : instr, pc); + return -1; } - return (0); + return 0; } static int fasttrap_tracepoint_init64(proc_t *p, fasttrap_tracepoint_t *tp, - user_addr_t pc, fasttrap_probe_type_t type) + user_addr_t pc, fasttrap_probe_type_t type) { #pragma unused(type) uint32_t instr = 0; @@ -230,18 +231,19 @@ fasttrap_tracepoint_init64(proc_t *p, fasttrap_tracepoint_t *tp, * pages, we potentially read the instruction in two parts. If the * second part fails, we just zero out that part of the instruction. */ - /* + /* * APPLE NOTE: Of course, we do not have a P_PR_LOCK, so this is racey... - */ + */ - if (uread(p, &instr, 4, pc) != 0) - return (-1); + if (uread(p, &instr, 4, pc) != 0) { + return -1; + } tp->ftt_instr = instr; - tp->ftt_thumb = 0; /* Always zero on 64bit */ + tp->ftt_thumb = 0; /* Always zero on 64bit */ if (tp->ftt_fntype != FASTTRAP_FN_DONE_INIT) { - switch(tp->ftt_fntype) { + switch (tp->ftt_fntype) { case FASTTRAP_FN_UNKNOWN: case FASTTRAP_FN_ARM64: case FASTTRAP_FN_ARM64_32: @@ -254,14 +256,14 @@ fasttrap_tracepoint_init64(proc_t *p, fasttrap_tracepoint_t *tp, case FASTTRAP_FN_USDT: if (IS_ARM64_NOP(instr) || IS_ARM64_IS_ENABLED(instr)) { - tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; + tp->ftt_fntype = FASTTRAP_FN_DONE_INIT; } else { /* * Shouldn't reach here - this means we don't * recognize the instruction at one of the * USDT probe locations */ - return (-1); + return -1; } break; @@ -272,8 +274,8 @@ fasttrap_tracepoint_init64(proc_t *p, fasttrap_tracepoint_t *tp, /* * If we get an arm or thumb mode type * then we are clearly in the wrong path. - */ - return (-1); + */ + return -1; } } @@ -282,16 +284,16 @@ fasttrap_tracepoint_init64(proc_t *p, fasttrap_tracepoint_t *tp, if (tp->ftt_type == FASTTRAP_T_ARM64_EXCLUSIVE_MEM) { kprintf("Detected attempt to place DTrace probe on exclusive memory instruction (pc = 0x%llx); refusing to trace (or exclusive operation could never succeed).\n", pc); tp->ftt_type = FASTTRAP_T_INV; - return (-1); + return -1; } if (tp->ftt_type == FASTTRAP_T_INV) { /* This is an instruction we either don't recognize or can't instrument */ printf("dtrace: fasttrap init64: Unrecognized instruction: %08x at %08llx\n", instr, pc); - return (-1); + return -1; } - return (0); + return 0; } int @@ -304,8 +306,7 @@ fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp) if (proc_is64bit_data(p)) { size = 4; instr = FASTTRAP_ARM64_INSTR; - } - else { + } else { size = tp->ftt_thumb ? 2 : 4; if (tp->ftt_thumb) { *((uint16_t*) &instr) = FASTTRAP_THUMB32_INSTR; @@ -314,12 +315,13 @@ fasttrap_tracepoint_install(proc_t *p, fasttrap_tracepoint_t *tp) } } - if (uwrite(p, &instr, size, tp->ftt_pc) != 0) - return (-1); + if (uwrite(p, &instr, size, tp->ftt_pc) != 0) { + return -1; + } tp->ftt_installed = 1; - return (0); + return 0; } int @@ -335,36 +337,42 @@ fasttrap_tracepoint_remove(proc_t *p, fasttrap_tracepoint_t *tp) * instruction. */ size = 4; - if (uread(p, &instr, size, tp->ftt_pc) != 0) + if (uread(p, &instr, size, tp->ftt_pc) != 0) { goto end; + } - if (instr != FASTTRAP_ARM64_INSTR) + if (instr != FASTTRAP_ARM64_INSTR) { goto end; + } } else { /* * Distinguish between read or write failures and a changed * instruction. */ - size = tp->ftt_thumb ? 2 : 4; - if (uread(p, &instr, size, tp->ftt_pc) != 0) + size = tp->ftt_thumb ? 2 : 4; + if (uread(p, &instr, size, tp->ftt_pc) != 0) { goto end; - + } + if (tp->ftt_thumb) { - if (*((uint16_t*) &instr) != FASTTRAP_THUMB32_INSTR) + if (*((uint16_t*) &instr) != FASTTRAP_THUMB32_INSTR) { goto end; + } } else { - if (instr != FASTTRAP_ARM32_INSTR) + if (instr != FASTTRAP_ARM32_INSTR) { goto end; + } } } - if (uwrite(p, &tp->ftt_instr, size, tp->ftt_pc) != 0) - return (-1); + if (uwrite(p, &tp->ftt_instr, size, tp->ftt_pc) != 0) { + return -1; + } end: tp->ftt_installed = 0; - return (0); + return 0; } static void @@ -382,15 +390,16 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && - tp->ftt_proc->ftpc_acount != 0) + tp->ftt_proc->ftpc_acount != 0) { break; + } } /* * Don't sweat it if we can't find the tracepoint again; unlike * when we're in fasttrap_pid_probe(), finding the tracepoint here * is not essential to the correct execution of the process. - */ + */ if (tp == NULL) { lck_mtx_unlock(pid_mtx); return; @@ -403,18 +412,18 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ * need to trace it, and check here if the program counter is * external to the function. */ - if (is_saved_state32(regs)) - { + if (is_saved_state32(regs)) { if (tp->ftt_type != FASTTRAP_T_LDM_PC && tp->ftt_type != FASTTRAP_T_POP_PC && - new_pc - probe->ftp_faddr < probe->ftp_fsize) + new_pc - probe->ftp_faddr < probe->ftp_fsize) { continue; - } - else { + } + } else { /* ARM64_TODO - check for FASTTRAP_T_RET */ if ((tp->ftt_type != FASTTRAP_T_ARM64_RET || tp->ftt_type != FASTTRAP_T_ARM64_RETAB) && - new_pc - probe->ftp_faddr < probe->ftp_fsize) + new_pc - probe->ftp_faddr < probe->ftp_fsize) { continue; + } } if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) { uint8_t already_triggered = atomic_or_8(&probe->ftp_triggered, 1); @@ -434,19 +443,19 @@ fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_ #ifndef CONFIG_EMBEDDED if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id, - 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); + 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { #endif } else { if (is_saved_state32(regs)) { dtrace_probe(probe->ftp_id, - pc - id->fti_probe->ftp_faddr, - saved_state32(regs)->r[0], 0, 0, 0); + pc - id->fti_probe->ftp_faddr, + saved_state32(regs)->r[0], 0, 0, 0); } else { dtrace_probe(probe->ftp_id, - pc - id->fti_probe->ftp_faddr, - saved_state64(regs)->x[0], 0, 0, 0); + pc - id->fti_probe->ftp_faddr, + saved_state64(regs)->x[0], 0, 0, 0); } } } @@ -479,16 +488,16 @@ fasttrap_sigsegv(proc_t *p, uthread_t t, user_addr_t addr, arm_saved_state_t *re t->uu_code = addr; t->uu_siglist |= sigmask(SIGSEGV); - /* + /* * XXX These two line may be redundant; if not, then we need * XXX to potentially set the data address in the machine * XXX specific thread state structure to indicate the address. - */ + */ t->uu_exception = KERN_INVALID_ADDRESS; /* SIGSEGV */ t->uu_subcode = 0; /* XXX pad */ - - proc_unlock(p); - + + proc_unlock(p); + /* raise signal */ signal_setast(t->uu_context.vc_thread); #endif @@ -538,10 +547,11 @@ fasttrap_usdt_args64(fasttrap_probe_t *probe, arm_saved_state64_t *regs64, int a for (; i < argc; i++) { argv[i] = 0; - } + } } -static int condition_true(int cond, int cpsr) +static int +condition_true(int cond, int cpsr) { int taken = 0; int zf = (cpsr & PSR_ZF) ? 1 : 0, @@ -549,29 +559,30 @@ static int condition_true(int cond, int cpsr) cf = (cpsr & PSR_CF) ? 1 : 0, vf = (cpsr & PSR_VF) ? 1 : 0; - switch(cond) { - case 0: taken = zf; break; - case 1: taken = !zf; break; - case 2: taken = cf; break; - case 3: taken = !cf; break; - case 4: taken = nf; break; - case 5: taken = !nf; break; - case 6: taken = vf; break; - case 7: taken = !vf; break; - case 8: taken = (cf && !zf); break; - case 9: taken = (!cf || zf); break; - case 10: taken = (nf == vf); break; - case 11: taken = (nf != vf); break; - case 12: taken = (!zf && (nf == vf)); break; - case 13: taken = (zf || (nf != vf)); break; - case 14: taken = 1; break; - case 15: taken = 1; break; /* always "true" for ARM, unpredictable for THUMB. */ + switch (cond) { + case 0: taken = zf; break; + case 1: taken = !zf; break; + case 2: taken = cf; break; + case 3: taken = !cf; break; + case 4: taken = nf; break; + case 5: taken = !nf; break; + case 6: taken = vf; break; + case 7: taken = !vf; break; + case 8: taken = (cf && !zf); break; + case 9: taken = (!cf || zf); break; + case 10: taken = (nf == vf); break; + case 11: taken = (nf != vf); break; + case 12: taken = (!zf && (nf == vf)); break; + case 13: taken = (zf || (nf != vf)); break; + case 14: taken = 1; break; + case 15: taken = 1; break; /* always "true" for ARM, unpredictable for THUMB. */ } return taken; } -static void set_thumb_flag(arm_saved_state32_t *regs32, user_addr_t pc) +static void +set_thumb_flag(arm_saved_state32_t *regs32, user_addr_t pc) { if (pc & 1) { regs32->cpsr |= PSR_TF; @@ -580,7 +591,7 @@ static void set_thumb_flag(arm_saved_state32_t *regs32, user_addr_t pc) } } -static int +static int fasttrap_pid_probe_thumb_state_valid(arm_saved_state32_t *state32, fasttrap_tracepoint_t *tp) { uint32_t cpsr = state32->cpsr; @@ -589,7 +600,7 @@ fasttrap_pid_probe_thumb_state_valid(arm_saved_state32_t *state32, fasttrap_trac /* If in IT block, make sure it's the last statement in the block */ if ((itstate != 0) && !ISLASTINIT(itstate)) { printf("dtrace: fasttrap: Tried to trace instruction %08x at %08x but not at end of IT block\n", - (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr,tp->ftt_thumb) == 2) ? tp->ftt_instr1 : tp->ftt_instr, state32->pc); + (tp->ftt_thumb && dtrace_instr_size(tp->ftt_instr, tp->ftt_thumb) == 2) ? tp->ftt_instr1 : tp->ftt_instr, state32->pc); return 0; } @@ -600,28 +611,28 @@ fasttrap_pid_probe_thumb_state_valid(arm_saved_state32_t *state32, fasttrap_trac return 1; } -static int +static int fasttrap_get_condition_code(arm_saved_state32_t *regs32, fasttrap_tracepoint_t *tp) { /* Default to always execute */ - int condition_code = 0xE; + int condition_code = 0xE; if (tp->ftt_thumb) { uint32_t itstate = GETITSTATE(regs32->cpsr); - if (itstate != 0) { + if (itstate != 0) { /* In IT block, make sure it's the last statement in the block */ assert(ISLASTINIT(itstate)); condition_code = itstate >> 4; - } + } } else { condition_code = ARM_CONDCODE(tp->ftt_instr); - } + } return condition_code; } -static void -fasttrap_pid_probe_handle_patched_instr32(arm_saved_state_t *state, fasttrap_tracepoint_t *tp, uthread_t uthread, - proc_t *p, uint_t is_enabled, int *was_simulated) +static void +fasttrap_pid_probe_handle_patched_instr32(arm_saved_state_t *state, fasttrap_tracepoint_t *tp, uthread_t uthread, + proc_t *p, uint_t is_enabled, int *was_simulated) { arm_saved_state32_t *regs32 = saved_state32(state); uint32_t new_pc = 0; @@ -642,7 +653,7 @@ fasttrap_pid_probe_handle_patched_instr32(arm_saved_state_t *state, fasttrap_tra * probe was on some other instruction, but that would be a rather * exotic way to shoot oneself in the foot. */ - + if (is_enabled) { regs32->r[0] = 1; new_pc = regs32->pc + (tp->ftt_thumb ? 2 : 4); @@ -657,508 +668,508 @@ fasttrap_pid_probe_handle_patched_instr32(arm_saved_state_t *state, fasttrap_tra } condition_code = fasttrap_get_condition_code(regs32, tp); - instr_size = dtrace_instr_size(tp->ftt_instr,tp->ftt_thumb); + instr_size = dtrace_instr_size(tp->ftt_instr, tp->ftt_thumb); switch (tp->ftt_type) { - case FASTTRAP_T_MOV_PC_REG: - case FASTTRAP_T_CPY_PC: - { - if (!condition_true(condition_code, regs32->cpsr)) { - new_pc = pc + instr_size; - break; - } + case FASTTRAP_T_MOV_PC_REG: + case FASTTRAP_T_CPY_PC: + { + if (!condition_true(condition_code, regs32->cpsr)) { + new_pc = pc + instr_size; + break; + } - int rm; - if (tp->ftt_thumb) { - rm = THUMB16_HRM(tp->ftt_instr1); - } else { - rm = tp->ftt_instr & 0xF; - } - new_pc = regs32->r[rm]; + int rm; + if (tp->ftt_thumb) { + rm = THUMB16_HRM(tp->ftt_instr1); + } else { + rm = tp->ftt_instr & 0xF; + } + new_pc = regs32->r[rm]; - /* This instruction does not change the Thumb state */ + /* This instruction does not change the Thumb state */ - break; - } + break; + } - case FASTTRAP_T_STM_LR: - case FASTTRAP_T_PUSH_LR: - { - /* - * This is a very common case, so we want to emulate this instruction if - * possible. However, on a push, it is possible that we might reach the end - * of a page and have to allocate a new page. Most of the time this will not - * happen, and we know that the push instruction can store at most 16 words, - * so check to see if we are far from the boundary, and if so, emulate. This - * can be made more aggressive by checking the actual number of words being - * pushed, but we won't do that for now. - * - * Some of the same issues that apply to POP_PC probably apply here also. - */ + case FASTTRAP_T_STM_LR: + case FASTTRAP_T_PUSH_LR: + { + /* + * This is a very common case, so we want to emulate this instruction if + * possible. However, on a push, it is possible that we might reach the end + * of a page and have to allocate a new page. Most of the time this will not + * happen, and we know that the push instruction can store at most 16 words, + * so check to see if we are far from the boundary, and if so, emulate. This + * can be made more aggressive by checking the actual number of words being + * pushed, but we won't do that for now. + * + * Some of the same issues that apply to POP_PC probably apply here also. + */ - int reglist; - int ret; - uint32_t base; + int reglist; + int ret; + uint32_t base; - if (!condition_true(condition_code, regs32->cpsr)) { - new_pc = pc + instr_size; - break; - } + if (!condition_true(condition_code, regs32->cpsr)) { + new_pc = pc + instr_size; + break; + } - base = regs32->sp; - if (((base-16*4) >> PAGE_SHIFT) != (base >> PAGE_SHIFT)) { - /* Crosses the page boundary, go to emulation */ - goto instr_emulate; - } + base = regs32->sp; + if (((base - 16 * 4) >> PAGE_SHIFT) != (base >> PAGE_SHIFT)) { + /* Crosses the page boundary, go to emulation */ + goto instr_emulate; + } - if (tp->ftt_thumb) { - if (instr_size == 4) { - /* We know we have to push lr, never push sp or pc */ - reglist = tp->ftt_instr2 & 0x1FFF; - } else { - reglist = tp->ftt_instr1 & 0xFF; - } - } else { + if (tp->ftt_thumb) { + if (instr_size == 4) { /* We know we have to push lr, never push sp or pc */ - reglist = tp->ftt_instr & 0x1FFF; + reglist = tp->ftt_instr2 & 0x1FFF; + } else { + reglist = tp->ftt_instr1 & 0xFF; } + } else { + /* We know we have to push lr, never push sp or pc */ + reglist = tp->ftt_instr & 0x1FFF; + } - /* Push the link register */ - base -= 4; - ret = fasttrap_suword32(base, regs32->lr); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); - new_pc = regs32->pc; - break; - } + /* Push the link register */ + base -= 4; + ret = fasttrap_suword32(base, regs32->lr); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); + new_pc = regs32->pc; + break; + } - /* Start pushing from $r12 */ - int regmask = 1 << 12; - int regnum = 12; - - while (regmask) { - if (reglist & regmask) { - base -= 4; - ret = fasttrap_suword32(base, regs32->r[regnum]); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); - new_pc = regs32->pc; - break; - } + /* Start pushing from $r12 */ + int regmask = 1 << 12; + int regnum = 12; + + while (regmask) { + if (reglist & regmask) { + base -= 4; + ret = fasttrap_suword32(base, regs32->r[regnum]); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); + new_pc = regs32->pc; + break; } - regmask >>= 1; - regnum--; } - - regs32->sp = base; - - new_pc = pc + instr_size; - - break; + regmask >>= 1; + regnum--; } + regs32->sp = base; - case FASTTRAP_T_LDM_PC: - case FASTTRAP_T_POP_PC: - { - /* TODO Two issues that will eventually need to be resolved: - * - * 1. Understand what the hardware does if we have to segfault (data abort) in - * the middle of a load multiple. We currently don't have a working segfault - * handler anyway, and with no swapfile we should never segfault on this load. - * If we do, we'll just kill the process by setting the pc to 0. - * - * 2. The emulation is no longer atomic. We currently only emulate pop for - * function epilogues, and so we should never have a race here because one - * thread should never be trying to manipulate another thread's stack frames. - * That is almost certainly a bug in the program. - * - * This will need to be fixed if we ever: - * a. Ship dtrace externally, as this could be a potential attack vector - * b. Support instruction level tracing, as we might then pop/ldm non epilogues. - * - */ - - /* Assume ldmia! sp/pop ... pc */ - - int regnum = 0, reglist; - int ret; - uint32_t base; - - if (!condition_true(condition_code, regs32->cpsr)) { - new_pc = pc + instr_size; - break; - } + new_pc = pc + instr_size; - if (tp->ftt_thumb) { - if (instr_size == 4) { - /* We know we have to load the pc, don't do it twice */ - reglist = tp->ftt_instr2 & 0x7FFF; - } else { - reglist = tp->ftt_instr1 & 0xFF; - } - } else { - /* We know we have to load the pc, don't do it twice */ - reglist = tp->ftt_instr & 0x7FFF; - } + break; + } - base = regs32->sp; - while (reglist) { - if (reglist & 1) { - ret = fasttrap_fuword32((user_addr_t)base, ®s32->r[regnum]); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); - new_pc = regs32->pc; - break; - } - base += 4; - } - reglist >>= 1; - regnum++; - } - ret = fasttrap_fuword32((user_addr_t)base, &new_pc); - if (ret == -1) { - fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); - new_pc = regs32->pc; - break; - } - base += 4; + case FASTTRAP_T_LDM_PC: + case FASTTRAP_T_POP_PC: + { + /* TODO Two issues that will eventually need to be resolved: + * + * 1. Understand what the hardware does if we have to segfault (data abort) in + * the middle of a load multiple. We currently don't have a working segfault + * handler anyway, and with no swapfile we should never segfault on this load. + * If we do, we'll just kill the process by setting the pc to 0. + * + * 2. The emulation is no longer atomic. We currently only emulate pop for + * function epilogues, and so we should never have a race here because one + * thread should never be trying to manipulate another thread's stack frames. + * That is almost certainly a bug in the program. + * + * This will need to be fixed if we ever: + * a. Ship dtrace externally, as this could be a potential attack vector + * b. Support instruction level tracing, as we might then pop/ldm non epilogues. + * + */ - regs32->sp = base; + /* Assume ldmia! sp/pop ... pc */ - set_thumb_flag(regs32, new_pc); + int regnum = 0, reglist; + int ret; + uint32_t base; + if (!condition_true(condition_code, regs32->cpsr)) { + new_pc = pc + instr_size; break; } - case FASTTRAP_T_CB_N_Z: - { - /* Thumb mode instruction, and not permitted in IT block, so skip the condition code check */ - int rn = tp->ftt_instr1 & 0x7; - int offset = (((tp->ftt_instr1 & 0x00F8) >> 2) | ((tp->ftt_instr1 & 0x0200) >> 3)) + 4; - int nonzero = tp->ftt_instr1 & 0x0800; - if (!nonzero != !(regs32->r[rn] == 0)) { - new_pc = pc + offset; + if (tp->ftt_thumb) { + if (instr_size == 4) { + /* We know we have to load the pc, don't do it twice */ + reglist = tp->ftt_instr2 & 0x7FFF; } else { - new_pc = pc + instr_size; + reglist = tp->ftt_instr1 & 0xFF; } - break; + } else { + /* We know we have to load the pc, don't do it twice */ + reglist = tp->ftt_instr & 0x7FFF; } - case FASTTRAP_T_B_COND: - { - /* Use the condition code in the instruction and ignore the ITSTATE */ - - int code, offset; - if (tp->ftt_thumb) { - if (instr_size == 4) { - code = (tp->ftt_instr1 >> 6) & 0xF; - if (code == 14 || code == 15) { - panic("fasttrap: Emulation of invalid branch"); - } - int S = (tp->ftt_instr1 >> 10) & 1, - J1 = (tp->ftt_instr2 >> 13) & 1, - J2 = (tp->ftt_instr2 >> 11) & 1; - offset = 4 + SIGNEXTEND( - (S << 20) | (J2 << 19) | (J1 << 18) | - ((tp->ftt_instr1 & 0x003F) << 12) | - ((tp->ftt_instr2 & 0x07FF) << 1), - 21); - } else { - code = (tp->ftt_instr1 >> 8) & 0xF; - if (code == 14 || code == 15) { - panic("fasttrap: Emulation of invalid branch"); - } - offset = 4 + (SIGNEXTEND(tp->ftt_instr1 & 0xFF, 8) << 1); - } - } else { - code = ARM_CONDCODE(tp->ftt_instr); - if (code == 15) { - panic("fasttrap: Emulation of invalid branch"); + base = regs32->sp; + while (reglist) { + if (reglist & 1) { + ret = fasttrap_fuword32((user_addr_t)base, ®s32->r[regnum]); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); + new_pc = regs32->pc; + break; } - offset = 8 + (SIGNEXTEND(tp->ftt_instr & 0x00FFFFFF, 24) << 2); - } - - if (condition_true(code, regs32->cpsr)) { - new_pc = pc + offset; - } else { - new_pc = pc + instr_size; + base += 4; } + reglist >>= 1; + regnum++; + } + ret = fasttrap_fuword32((user_addr_t)base, &new_pc); + if (ret == -1) { + fasttrap_sigsegv(p, uthread, (user_addr_t) base, state); + new_pc = regs32->pc; break; } + base += 4; - case FASTTRAP_T_B_UNCOND: - { - int offset; + regs32->sp = base; - /* Unconditional branches can only be taken from Thumb mode */ - /* (This is different from an ARM branch with condition code "always") */ - ASSERT(tp->ftt_thumb == 1); + set_thumb_flag(regs32, new_pc); - if (!condition_true(condition_code, regs32->cpsr)) { - new_pc = pc + instr_size; - break; - } + break; + } + case FASTTRAP_T_CB_N_Z: + { + /* Thumb mode instruction, and not permitted in IT block, so skip the condition code check */ + int rn = tp->ftt_instr1 & 0x7; + int offset = (((tp->ftt_instr1 & 0x00F8) >> 2) | ((tp->ftt_instr1 & 0x0200) >> 3)) + 4; + int nonzero = tp->ftt_instr1 & 0x0800; + if (!nonzero != !(regs32->r[rn] == 0)) { + new_pc = pc + offset; + } else { + new_pc = pc + instr_size; + } + break; + } + + case FASTTRAP_T_B_COND: + { + /* Use the condition code in the instruction and ignore the ITSTATE */ + + int code, offset; + if (tp->ftt_thumb) { if (instr_size == 4) { + code = (tp->ftt_instr1 >> 6) & 0xF; + if (code == 14 || code == 15) { + panic("fasttrap: Emulation of invalid branch"); + } int S = (tp->ftt_instr1 >> 10) & 1, J1 = (tp->ftt_instr2 >> 13) & 1, J2 = (tp->ftt_instr2 >> 11) & 1; - int I1 = (J1 != S) ? 0 : 1, I2 = (J2 != S) ? 0 : 1; offset = 4 + SIGNEXTEND( - (S << 24) | (I1 << 23) | (I2 << 22) | - ((tp->ftt_instr1 & 0x03FF) << 12) | - ((tp->ftt_instr2 & 0x07FF) << 1), - 25); + (S << 20) | (J2 << 19) | (J1 << 18) | + ((tp->ftt_instr1 & 0x003F) << 12) | + ((tp->ftt_instr2 & 0x07FF) << 1), + 21); } else { - uint32_t instr1 = tp->ftt_instr1; - offset = 4 + (SIGNEXTEND(instr1 & 0x7FF, 11) << 1); + code = (tp->ftt_instr1 >> 8) & 0xF; + if (code == 14 || code == 15) { + panic("fasttrap: Emulation of invalid branch"); + } + offset = 4 + (SIGNEXTEND(tp->ftt_instr1 & 0xFF, 8) << 1); } + } else { + code = ARM_CONDCODE(tp->ftt_instr); + if (code == 15) { + panic("fasttrap: Emulation of invalid branch"); + } + offset = 8 + (SIGNEXTEND(tp->ftt_instr & 0x00FFFFFF, 24) << 2); + } + if (condition_true(code, regs32->cpsr)) { new_pc = pc + offset; + } else { + new_pc = pc + instr_size; + } + break; + } + + case FASTTRAP_T_B_UNCOND: + { + int offset; + + /* Unconditional branches can only be taken from Thumb mode */ + /* (This is different from an ARM branch with condition code "always") */ + ASSERT(tp->ftt_thumb == 1); + + if (!condition_true(condition_code, regs32->cpsr)) { + new_pc = pc + instr_size; break; } - case FASTTRAP_T_BX_REG: - { - int reg; + if (instr_size == 4) { + int S = (tp->ftt_instr1 >> 10) & 1, + J1 = (tp->ftt_instr2 >> 13) & 1, + J2 = (tp->ftt_instr2 >> 11) & 1; + int I1 = (J1 != S) ? 0 : 1, I2 = (J2 != S) ? 0 : 1; + offset = 4 + SIGNEXTEND( + (S << 24) | (I1 << 23) | (I2 << 22) | + ((tp->ftt_instr1 & 0x03FF) << 12) | + ((tp->ftt_instr2 & 0x07FF) << 1), + 25); + } else { + uint32_t instr1 = tp->ftt_instr1; + offset = 4 + (SIGNEXTEND(instr1 & 0x7FF, 11) << 1); + } - if (!condition_true(condition_code, regs32->cpsr)) { - new_pc = pc + instr_size; - break; - } + new_pc = pc + offset; - if (tp->ftt_thumb) { - reg = THUMB16_HRM(tp->ftt_instr1); - } else { - reg = ARM_RM(tp->ftt_instr); - } - new_pc = regs32->r[reg]; - set_thumb_flag(regs32, new_pc); + break; + } + + case FASTTRAP_T_BX_REG: + { + int reg; + if (!condition_true(condition_code, regs32->cpsr)) { + new_pc = pc + instr_size; break; } - case FASTTRAP_T_LDR_PC_IMMED: - case FASTTRAP_T_VLDR_PC_IMMED: - /* Handle these instructions by replacing the PC in the instruction with another - * register. They are common, so we'd like to support them, and this way we do so - * without any risk of having to simulate a segfault. - */ + if (tp->ftt_thumb) { + reg = THUMB16_HRM(tp->ftt_instr1); + } else { + reg = ARM_RM(tp->ftt_instr); + } + new_pc = regs32->r[reg]; + set_thumb_flag(regs32, new_pc); - /* Fall through */ + break; + } - instr_emulate: - case FASTTRAP_T_COMMON: - { - user_addr_t addr; - uint8_t scratch[32]; - uint_t i = 0; - fasttrap_instr_t emul_instr; - emul_instr.instr32 = tp->ftt_instr; - int emul_instr_size; + case FASTTRAP_T_LDR_PC_IMMED: + case FASTTRAP_T_VLDR_PC_IMMED: + /* Handle these instructions by replacing the PC in the instruction with another + * register. They are common, so we'd like to support them, and this way we do so + * without any risk of having to simulate a segfault. + */ - /* - * Unfortunately sometimes when we emulate the instruction and have to replace the - * PC, there is no longer a thumb mode equivalent. We end up having to run the - * modified instruction in ARM mode. We use this variable to keep track of which - * mode we should emulate in. We still use the original variable to determine - * what mode to return to. - */ - uint8_t emul_thumb = tp->ftt_thumb; - int save_reg = -1; - uint32_t save_val = 0; + /* Fall through */ - /* - * Dealing with condition codes and emulation: - * We can't just uniformly do a condition code check here because not all instructions - * have condition codes. We currently do not support an instruction by instruction trace, - * so we can assume that either: 1. We are executing a Thumb instruction, in which case - * we either are not in an IT block and should execute always, or we are last in an IT - * block. Either way, the traced instruction will run correctly, and we won't have any - * problems when we return to the original code, because we will no longer be in the IT - * block. 2. We are executing an ARM instruction, in which case we are ok as long as - * we don't attempt to change the condition code. +instr_emulate: + case FASTTRAP_T_COMMON: + { + user_addr_t addr; + uint8_t scratch[32]; + uint_t i = 0; + fasttrap_instr_t emul_instr; + emul_instr.instr32 = tp->ftt_instr; + int emul_instr_size; + + /* + * Unfortunately sometimes when we emulate the instruction and have to replace the + * PC, there is no longer a thumb mode equivalent. We end up having to run the + * modified instruction in ARM mode. We use this variable to keep track of which + * mode we should emulate in. We still use the original variable to determine + * what mode to return to. + */ + uint8_t emul_thumb = tp->ftt_thumb; + int save_reg = -1; + uint32_t save_val = 0; + + /* + * Dealing with condition codes and emulation: + * We can't just uniformly do a condition code check here because not all instructions + * have condition codes. We currently do not support an instruction by instruction trace, + * so we can assume that either: 1. We are executing a Thumb instruction, in which case + * we either are not in an IT block and should execute always, or we are last in an IT + * block. Either way, the traced instruction will run correctly, and we won't have any + * problems when we return to the original code, because we will no longer be in the IT + * block. 2. We are executing an ARM instruction, in which case we are ok as long as + * we don't attempt to change the condition code. + */ + if (tp->ftt_type == FASTTRAP_T_LDR_PC_IMMED) { + /* We know we always have a free register (the one we plan to write the + * result value to!). So we'll replace the pc with that one. */ - if (tp->ftt_type == FASTTRAP_T_LDR_PC_IMMED) { - /* We know we always have a free register (the one we plan to write the - * result value to!). So we'll replace the pc with that one. - */ - int new_reg; - if (tp->ftt_thumb) { - /* Check to see if thumb or thumb2 */ - if (instr_size == 2) { - /* - * Sadness. We need to emulate this instruction in ARM mode - * because it has an 8 bit immediate offset. Instead of having - * to deal with condition codes in the ARM instruction, we'll - * just check the condition and abort if the condition is false. - */ - if (!condition_true(condition_code, regs32->cpsr)) { - new_pc = pc + instr_size; - break; - } - - new_reg = (tp->ftt_instr1 >> 8) & 0x7; - regs32->r[new_reg] = ALIGNADDR(regs32->pc + 4, 2); - emul_thumb = 0; - emul_instr.instr32 = 0xE5900000 | (new_reg << 16) | (new_reg << 12) | ((tp->ftt_instr1 & 0xFF) << 2); - } else { - /* Thumb2. Just replace the register. */ - new_reg = (tp->ftt_instr2 >> 12) & 0xF; - regs32->r[new_reg] = ALIGNADDR(regs32->pc + 4, 2); - emul_instr.instr16.instr1 &= ~0x000F; - emul_instr.instr16.instr1 |= new_reg; + int new_reg; + if (tp->ftt_thumb) { + /* Check to see if thumb or thumb2 */ + if (instr_size == 2) { + /* + * Sadness. We need to emulate this instruction in ARM mode + * because it has an 8 bit immediate offset. Instead of having + * to deal with condition codes in the ARM instruction, we'll + * just check the condition and abort if the condition is false. + */ + if (!condition_true(condition_code, regs32->cpsr)) { + new_pc = pc + instr_size; + break; } + + new_reg = (tp->ftt_instr1 >> 8) & 0x7; + regs32->r[new_reg] = ALIGNADDR(regs32->pc + 4, 2); + emul_thumb = 0; + emul_instr.instr32 = 0xE5900000 | (new_reg << 16) | (new_reg << 12) | ((tp->ftt_instr1 & 0xFF) << 2); } else { - /* ARM. Just replace the register. */ - new_reg = (tp->ftt_instr >> 12) & 0xF; - regs32->r[new_reg] = ALIGNADDR(regs32->pc + 8,2); - emul_instr.instr32 &= ~0x000F0000; - emul_instr.instr32 |= new_reg << 16; - } - } else if (tp->ftt_type == FASTTRAP_T_VLDR_PC_IMMED) { - /* This instruction only uses one register, and if we're here, we know - * it must be the pc. So we'll just replace it with R0. - */ - save_reg = 0; - save_val = regs32->r[0]; - regs32->r[save_reg] = ALIGNADDR(regs32->pc + (tp->ftt_thumb ? 4 : 8), 2); - if (tp->ftt_thumb) { + /* Thumb2. Just replace the register. */ + new_reg = (tp->ftt_instr2 >> 12) & 0xF; + regs32->r[new_reg] = ALIGNADDR(regs32->pc + 4, 2); emul_instr.instr16.instr1 &= ~0x000F; - } else { - emul_instr.instr32 &= ~0x000F0000; + emul_instr.instr16.instr1 |= new_reg; } + } else { + /* ARM. Just replace the register. */ + new_reg = (tp->ftt_instr >> 12) & 0xF; + regs32->r[new_reg] = ALIGNADDR(regs32->pc + 8, 2); + emul_instr.instr32 &= ~0x000F0000; + emul_instr.instr32 |= new_reg << 16; } - - emul_instr_size = dtrace_instr_size(emul_instr.instr32, emul_thumb); - - /* - * At this point: - * tp->ftt_thumb = thumb mode of original instruction - * emul_thumb = thumb mode for emulation - * emul_instr = instruction we are using to emulate original instruction - * emul_instr_size = size of emulating instruction + } else if (tp->ftt_type == FASTTRAP_T_VLDR_PC_IMMED) { + /* This instruction only uses one register, and if we're here, we know + * it must be the pc. So we'll just replace it with R0. */ + save_reg = 0; + save_val = regs32->r[0]; + regs32->r[save_reg] = ALIGNADDR(regs32->pc + (tp->ftt_thumb ? 4 : 8), 2); + if (tp->ftt_thumb) { + emul_instr.instr16.instr1 &= ~0x000F; + } else { + emul_instr.instr32 &= ~0x000F0000; + } + } - addr = uthread->t_dtrace_scratch->addr; + emul_instr_size = dtrace_instr_size(emul_instr.instr32, emul_thumb); - if (addr == 0LL) { - fasttrap_sigtrap(p, uthread, pc); // Should be killing target proc - new_pc = pc; - break; - } + /* + * At this point: + * tp->ftt_thumb = thumb mode of original instruction + * emul_thumb = thumb mode for emulation + * emul_instr = instruction we are using to emulate original instruction + * emul_instr_size = size of emulating instruction + */ - uthread->t_dtrace_scrpc = addr; - if (emul_thumb) { - /* - * No way to do an unconditional branch in Thumb mode, shove the address - * onto the user stack and go to the next location with a pop. This can - * segfault if this push happens to cross a stack page, but that's ok, since - * we are running in userland, and the kernel knows how to handle userland - * stack expansions correctly. - * - * Layout of scratch space for Thumb mode: - * Emulated instruction - * ldr save_reg, [pc, #16] (if necessary, restore any register we clobbered) - * push { r0, r1 } - * ldr r0, [pc, #4] - * str r0, [sp, #4] - * pop { r0, pc } - * Location we should return to in original program - * Saved value of clobbered register (if necessary) - */ + addr = uthread->t_dtrace_scratch->addr; - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + if (addr == 0LL) { + fasttrap_sigtrap(p, uthread, pc); // Should be killing target proc + new_pc = pc; + break; + } - if (save_reg != -1) { - uint16_t restore_inst = 0x4803; - restore_inst |= (save_reg & 0x7) << 8; - SET16(scratch+i, restore_inst); i += 2; // ldr reg, [pc , #16] - } + uthread->t_dtrace_scrpc = addr; + if (emul_thumb) { + /* + * No way to do an unconditional branch in Thumb mode, shove the address + * onto the user stack and go to the next location with a pop. This can + * segfault if this push happens to cross a stack page, but that's ok, since + * we are running in userland, and the kernel knows how to handle userland + * stack expansions correctly. + * + * Layout of scratch space for Thumb mode: + * Emulated instruction + * ldr save_reg, [pc, #16] (if necessary, restore any register we clobbered) + * push { r0, r1 } + * ldr r0, [pc, #4] + * str r0, [sp, #4] + * pop { r0, pc } + * Location we should return to in original program + * Saved value of clobbered register (if necessary) + */ - SET16(scratch+i, 0xB403); i += 2; // push { r0, r1 } - SET16(scratch+i, 0x4801); i += 2; // ldr r0, [pc, #4] - SET16(scratch+i, 0x9001); i += 2; // str r0, [sp, #4] - SET16(scratch+i, 0xBD01); i += 2; // pop { r0, pc } + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - if (i % 4) { - SET16(scratch+i, 0); i += 2; // padding - saved 32 bit words must be aligned - } - SET32(scratch+i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address - if (save_reg != -1) { - SET32(scratch+i, save_val); i += 4; // saved value of clobbered register - } + if (save_reg != -1) { + uint16_t restore_inst = 0x4803; + restore_inst |= (save_reg & 0x7) << 8; + SET16(scratch + i, restore_inst); i += 2; // ldr reg, [pc , #16] + } - uthread->t_dtrace_astpc = addr + i; - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - SET16(scratch+i, FASTTRAP_THUMB32_RET_INSTR); i += 2; - } else { - /* - * Layout of scratch space for ARM mode: - * Emulated instruction - * ldr save_reg, [pc, #12] (if necessary, restore any register we clobbered) - * ldr pc, [pc, #4] - * Location we should return to in original program - * Saved value of clobbered register (if necessary) - */ + SET16(scratch + i, 0xB403); i += 2; // push { r0, r1 } + SET16(scratch + i, 0x4801); i += 2; // ldr r0, [pc, #4] + SET16(scratch + i, 0x9001); i += 2; // str r0, [sp, #4] + SET16(scratch + i, 0xBD01); i += 2; // pop { r0, pc } - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + if (i % 4) { + SET16(scratch + i, 0); i += 2; // padding - saved 32 bit words must be aligned + } + SET32(scratch + i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address + if (save_reg != -1) { + SET32(scratch + i, save_val); i += 4; // saved value of clobbered register + } - if (save_reg != -1) { - uint32_t restore_inst = 0xE59F0004; - restore_inst |= save_reg << 12; - SET32(scratch+i, restore_inst); i += 4; // ldr reg, [pc, #12] - } - SET32(scratch+i, 0xE51FF004); i += 4; // ldr pc, [pc, #4] + uthread->t_dtrace_astpc = addr + i; + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + SET16(scratch + i, FASTTRAP_THUMB32_RET_INSTR); i += 2; + } else { + /* + * Layout of scratch space for ARM mode: + * Emulated instruction + * ldr save_reg, [pc, #12] (if necessary, restore any register we clobbered) + * ldr pc, [pc, #4] + * Location we should return to in original program + * Saved value of clobbered register (if necessary) + */ - SET32(scratch+i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address - if (save_reg != -1) { - SET32(scratch+i, save_val); i += 4; // Saved value of clobbered register - } + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - uthread->t_dtrace_astpc = addr + i; - bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; - SET32(scratch+i, FASTTRAP_ARM32_RET_INSTR); i += 4; + if (save_reg != -1) { + uint32_t restore_inst = 0xE59F0004; + restore_inst |= save_reg << 12; + SET32(scratch + i, restore_inst); i += 4; // ldr reg, [pc, #12] } + SET32(scratch + i, 0xE51FF004); i += 4; // ldr pc, [pc, #4] - if (uwrite(p, scratch, i, uthread->t_dtrace_scratch->write_addr) != KERN_SUCCESS) { - fasttrap_sigtrap(p, uthread, pc); - new_pc = pc; - break; + SET32(scratch + i, pc + instr_size + (tp->ftt_thumb ? 1 : 0)); i += 4; // Return address + if (save_reg != -1) { + SET32(scratch + i, save_val); i += 4; // Saved value of clobbered register } - if (tp->ftt_retids != NULL) { - uthread->t_dtrace_step = 1; - uthread->t_dtrace_ret = 1; - new_pc = uthread->t_dtrace_astpc + (emul_thumb ? 1 : 0); - } else { - new_pc = uthread->t_dtrace_scrpc + (emul_thumb ? 1 : 0); - } + uthread->t_dtrace_astpc = addr + i; + bcopy(&emul_instr, &scratch[i], emul_instr_size); i += emul_instr_size; + SET32(scratch + i, FASTTRAP_ARM32_RET_INSTR); i += 4; + } - uthread->t_dtrace_pc = pc; - uthread->t_dtrace_npc = pc + instr_size; - uthread->t_dtrace_on = 1; - *was_simulated = 0; - set_thumb_flag(regs32, new_pc); + if (uwrite(p, scratch, i, uthread->t_dtrace_scratch->write_addr) != KERN_SUCCESS) { + fasttrap_sigtrap(p, uthread, pc); + new_pc = pc; break; } - default: - panic("fasttrap: mishandled an instruction"); + if (tp->ftt_retids != NULL) { + uthread->t_dtrace_step = 1; + uthread->t_dtrace_ret = 1; + new_pc = uthread->t_dtrace_astpc + (emul_thumb ? 1 : 0); + } else { + new_pc = uthread->t_dtrace_scrpc + (emul_thumb ? 1 : 0); + } + + uthread->t_dtrace_pc = pc; + uthread->t_dtrace_npc = pc + instr_size; + uthread->t_dtrace_on = 1; + *was_simulated = 0; + set_thumb_flag(regs32, new_pc); + break; + } + + default: + panic("fasttrap: mishandled an instruction"); } done: - set_saved_state_pc(state, new_pc); + set_saved_state_pc(state, new_pc); return; } /* * Copy out an instruction for execution in userland. * Trap back to kernel to handle return to original flow of execution, because - * direct branches don't have sufficient range (+/- 128MB) and we + * direct branches don't have sufficient range (+/- 128MB) and we * cannot clobber a GPR. Note that we have to specially handle PC-rel loads/stores * as well, which have range +/- 1MB (convert to an indirect load). Instruction buffer * layout: @@ -1171,7 +1182,7 @@ done: */ static void fasttrap_pid_probe_thunk_instr64(arm_saved_state_t *state, fasttrap_tracepoint_t *tp, proc_t *p, uthread_t uthread, - const uint32_t *instructions, uint32_t num_instrs, user_addr_t *pc_out) + const uint32_t *instructions, uint32_t num_instrs, user_addr_t *pc_out) { uint32_t local_scratch[8]; user_addr_t pc = get_saved_state_pc(state); @@ -1222,7 +1233,7 @@ fasttrap_pid_probe_thunk_instr64(arm_saved_state_t *state, fasttrap_tracepoint_t * Sign-extend bit "sign_bit_index" out to bit 64. */ static int64_t -sign_extend(int64_t input, uint32_t sign_bit_index) +sign_extend(int64_t input, uint32_t sign_bit_index) { assert(sign_bit_index < 63); if (input & (1ULL << sign_bit_index)) { @@ -1236,50 +1247,50 @@ sign_extend(int64_t input, uint32_t sign_bit_index) /* * Handle xzr vs. sp, fp, lr, etc. Will *not* read the SP. */ -static uint64_t +static uint64_t get_saved_state64_regno(arm_saved_state64_t *regs64, uint32_t regno, int use_xzr) { /* Set PC to register value */ switch (regno) { - case 29: - return regs64->fp; - case 30: - return regs64->lr; - case 31: - /* xzr */ - if (use_xzr) { - return 0; - } else { - return regs64->sp; - } - default: - return regs64->x[regno]; + case 29: + return regs64->fp; + case 30: + return regs64->lr; + case 31: + /* xzr */ + if (use_xzr) { + return 0; + } else { + return regs64->sp; + } + default: + return regs64->x[regno]; } } -static void +static void set_saved_state64_regno(arm_saved_state64_t *regs64, uint32_t regno, int use_xzr, register_t value) { /* Set PC to register value */ switch (regno) { - case 29: - regs64->fp = value; - break; - case 30: - regs64->lr = value; - break; - case 31: - if (!use_xzr) { - regs64->sp = value; - } - break; - default: - regs64->x[regno] = value; - break; + case 29: + regs64->fp = value; + break; + case 30: + regs64->lr = value; + break; + case 31: + if (!use_xzr) { + regs64->sp = value; + } + break; + default: + regs64->x[regno] = value; + break; } } -/* +/* * Common operation: extract sign-extended PC offset from instruction * Left-shifts result by two bits. */ @@ -1313,7 +1324,7 @@ do_cbz_cnbz(arm_saved_state64_t *regs64, uint32_t regwidth, uint32_t instr, int } /* Extract offset */ - offset = extract_address_literal_sign_extended(instr, 5, 19); + offset = extract_address_literal_sign_extended(instr, 5, 19); /* Do test */ if ((is_cbz && regval == 0) || ((!is_cbz) && regval != 0)) { @@ -1359,14 +1370,14 @@ do_tbz_tbnz(arm_saved_state64_t *regs64, uint32_t instr, int is_tbz, user_addr_t static void -fasttrap_pid_probe_handle_patched_instr64(arm_saved_state_t *state, fasttrap_tracepoint_t *tp __unused, uthread_t uthread, - proc_t *p, uint_t is_enabled, int *was_simulated) +fasttrap_pid_probe_handle_patched_instr64(arm_saved_state_t *state, fasttrap_tracepoint_t *tp __unused, uthread_t uthread, + proc_t *p, uint_t is_enabled, int *was_simulated) { int res1, res2; arm_saved_state64_t *regs64 = saved_state64(state); uint32_t instr = tp->ftt_instr; user_addr_t new_pc = 0; - + /* Neon state should be threaded throw, but hack it until we have better arm/arm64 integration */ arm_neon_saved_state64_t *ns64 = &(get_user_neon_regs(uthread->uu_thread)->ns_64); @@ -1377,349 +1388,347 @@ fasttrap_pid_probe_handle_patched_instr64(arm_saved_state_t *state, fasttrap_tra return; } - /* For USDT probes, bypass all the emulation logic for the nop instruction */ + /* For USDT probes, bypass all the emulation logic for the nop instruction */ if (IS_ARM64_NOP(tp->ftt_instr)) { set_saved_state_pc(state, regs64->pc + 4); return; } - - - /* Only one of many cases in the switch doesn't simulate */ - switch(tp->ftt_type) { - /* - * Function entry: emulate for speed. - * stp fp, lr, [sp, #-16]! - */ - case FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY: - { - /* Store values to stack */ - res1 = fasttrap_suword64(regs64->sp - 16, regs64->fp); - res2 = fasttrap_suword64(regs64->sp - 8, regs64->lr); - if (res1 != 0 || res2 != 0) { - fasttrap_sigsegv(p, uthread, regs64->sp - (res1 ? 16 : 8), state); - new_pc = regs64->pc; /* Bit of a hack */ - break; - } - /* Move stack pointer */ - regs64->sp -= 16; - /* Move PC forward */ - new_pc = regs64->pc + 4; - *was_simulated = 1; + /* Only one of many cases in the switch doesn't simulate */ + switch (tp->ftt_type) { + /* + * Function entry: emulate for speed. + * stp fp, lr, [sp, #-16]! + */ + case FASTTRAP_T_ARM64_STANDARD_FUNCTION_ENTRY: + { + /* Store values to stack */ + res1 = fasttrap_suword64(regs64->sp - 16, regs64->fp); + res2 = fasttrap_suword64(regs64->sp - 8, regs64->lr); + if (res1 != 0 || res2 != 0) { + fasttrap_sigsegv(p, uthread, regs64->sp - (res1 ? 16 : 8), state); + new_pc = regs64->pc; /* Bit of a hack */ break; } - /* - * PC-relative loads/stores: emulate for correctness. - * All loads are 32bits or greater (no need to handle byte or halfword accesses). - * LDR Wt, addr - * LDR Xt, addr - * LDRSW Xt, addr - * - * LDR St, addr - * LDR Dt, addr - * LDR Qt, addr - * PRFM label -> becomes a NOP - */ + /* Move stack pointer */ + regs64->sp -= 16; + + /* Move PC forward */ + new_pc = regs64->pc + 4; + *was_simulated = 1; + break; + } + + /* + * PC-relative loads/stores: emulate for correctness. + * All loads are 32bits or greater (no need to handle byte or halfword accesses). + * LDR Wt, addr + * LDR Xt, addr + * LDRSW Xt, addr + * + * LDR St, addr + * LDR Dt, addr + * LDR Qt, addr + * PRFM label -> becomes a NOP + */ + case FASTTRAP_T_ARM64_LDR_S_PC_REL: + case FASTTRAP_T_ARM64_LDR_W_PC_REL: + case FASTTRAP_T_ARM64_LDR_D_PC_REL: + case FASTTRAP_T_ARM64_LDR_X_PC_REL: + case FASTTRAP_T_ARM64_LDR_Q_PC_REL: + case FASTTRAP_T_ARM64_LDRSW_PC_REL: + { + uint64_t offset; + uint32_t valsize, regno; + user_addr_t address; + union { + uint32_t val32; + uint64_t val64; + uint128_t val128; + } value; + + /* Extract 19-bit offset, add to pc */ + offset = extract_address_literal_sign_extended(instr, 5, 19); + address = regs64->pc + offset; + + /* Extract destination register */ + regno = (instr & 0x1f); + assert(regno <= 31); + + /* Read value of desired size from memory */ + switch (tp->ftt_type) { case FASTTRAP_T_ARM64_LDR_S_PC_REL: case FASTTRAP_T_ARM64_LDR_W_PC_REL: + case FASTTRAP_T_ARM64_LDRSW_PC_REL: + valsize = 4; + break; case FASTTRAP_T_ARM64_LDR_D_PC_REL: case FASTTRAP_T_ARM64_LDR_X_PC_REL: + valsize = 8; + break; case FASTTRAP_T_ARM64_LDR_Q_PC_REL: - case FASTTRAP_T_ARM64_LDRSW_PC_REL: - { - uint64_t offset; - uint32_t valsize, regno; - user_addr_t address; - union { - uint32_t val32; - uint64_t val64; - uint128_t val128; - } value; - - /* Extract 19-bit offset, add to pc */ - offset = extract_address_literal_sign_extended(instr, 5, 19); - address = regs64->pc + offset; - - /* Extract destination register */ - regno = (instr & 0x1f); - assert(regno <= 31); - - /* Read value of desired size from memory */ - switch (tp->ftt_type) { - case FASTTRAP_T_ARM64_LDR_S_PC_REL: - case FASTTRAP_T_ARM64_LDR_W_PC_REL: - case FASTTRAP_T_ARM64_LDRSW_PC_REL: - valsize = 4; - break; - case FASTTRAP_T_ARM64_LDR_D_PC_REL: - case FASTTRAP_T_ARM64_LDR_X_PC_REL: - valsize = 8; - break; - case FASTTRAP_T_ARM64_LDR_Q_PC_REL: - valsize = 16; - break; - default: - panic("Should never get here!"); - valsize = -1; - break; - } - - if (copyin(address, &value, valsize) != 0) { - fasttrap_sigsegv(p, uthread, address, state); - new_pc = regs64->pc; /* Bit of a hack, we know about update in fasttrap_sigsegv() */ - break; - } - - /* Stash in correct register slot */ - switch (tp->ftt_type) { - case FASTTRAP_T_ARM64_LDR_W_PC_REL: - set_saved_state64_regno(regs64, regno, 1, value.val32); - break; - case FASTTRAP_T_ARM64_LDRSW_PC_REL: - set_saved_state64_regno(regs64, regno, 1, sign_extend(value.val32, 31)); - break; - case FASTTRAP_T_ARM64_LDR_X_PC_REL: - set_saved_state64_regno(regs64, regno, 1, value.val64); - break; - case FASTTRAP_T_ARM64_LDR_S_PC_REL: - ns64->v.s[regno][0] = value.val32; - break; - case FASTTRAP_T_ARM64_LDR_D_PC_REL: - ns64->v.d[regno][0] = value.val64; - break; - case FASTTRAP_T_ARM64_LDR_Q_PC_REL: - ns64->v.q[regno] = value.val128; - break; - default: - panic("Should never get here!"); - } - - - /* Move PC forward */ - new_pc = regs64->pc + 4; - *was_simulated = 1; + valsize = 16; break; - - } - - case FASTTRAP_T_ARM64_PRFM: - { - /* Becomes a NOP (architecturally permitted). Just move PC forward */ - new_pc = regs64->pc + 4; - *was_simulated = 1; + default: + panic("Should never get here!"); + valsize = -1; break; } - /* - * End explicit memory accesses. - */ - - /* - * Branches: parse condition codes if needed, emulate for correctness and - * in the case of the indirect branches, convenience - * B.cond - * CBNZ Wn, label - * CBNZ Xn, label - * CBZ Wn, label - * CBZ Xn, label - * TBNZ, Xn|Wn, #uimm16, label - * TBZ, Xn|Wn, #uimm16, label - * - * B label - * BL label - * - * BLR Xm - * BR Xm - * RET Xm - */ - case FASTTRAP_T_ARM64_B_COND: - { - int cond; - - /* Extract condition code */ - cond = (instr & 0xf); - - /* Determine if it passes */ - if (condition_true(cond, regs64->cpsr)) { - uint64_t offset; - - /* Extract 19-bit target offset, add to PC */ - offset = extract_address_literal_sign_extended(instr, 5, 19); - new_pc = regs64->pc + offset; - } else { - /* Move forwards */ - new_pc = regs64->pc + 4; - } - - *was_simulated = 1; + if (copyin(address, &value, valsize) != 0) { + fasttrap_sigsegv(p, uthread, address, state); + new_pc = regs64->pc; /* Bit of a hack, we know about update in fasttrap_sigsegv() */ break; } - case FASTTRAP_T_ARM64_CBNZ_W: - { - do_cbz_cnbz(regs64, 32, instr, 0, &new_pc); - *was_simulated = 1; + /* Stash in correct register slot */ + switch (tp->ftt_type) { + case FASTTRAP_T_ARM64_LDR_W_PC_REL: + set_saved_state64_regno(regs64, regno, 1, value.val32); break; - } - case FASTTRAP_T_ARM64_CBNZ_X: - { - do_cbz_cnbz(regs64, 64, instr, 0, &new_pc); - *was_simulated = 1; + case FASTTRAP_T_ARM64_LDRSW_PC_REL: + set_saved_state64_regno(regs64, regno, 1, sign_extend(value.val32, 31)); break; - } - case FASTTRAP_T_ARM64_CBZ_W: - { - do_cbz_cnbz(regs64, 32, instr, 1, &new_pc); - *was_simulated = 1; + case FASTTRAP_T_ARM64_LDR_X_PC_REL: + set_saved_state64_regno(regs64, regno, 1, value.val64); break; - } - case FASTTRAP_T_ARM64_CBZ_X: - { - do_cbz_cnbz(regs64, 64, instr, 1, &new_pc); - *was_simulated = 1; + case FASTTRAP_T_ARM64_LDR_S_PC_REL: + ns64->v.s[regno][0] = value.val32; break; - } - - case FASTTRAP_T_ARM64_TBNZ: - { - do_tbz_tbnz(regs64, instr, 0, &new_pc); - *was_simulated = 1; + case FASTTRAP_T_ARM64_LDR_D_PC_REL: + ns64->v.d[regno][0] = value.val64; break; - } - case FASTTRAP_T_ARM64_TBZ: - { - do_tbz_tbnz(regs64, instr, 1, &new_pc); - *was_simulated = 1; + case FASTTRAP_T_ARM64_LDR_Q_PC_REL: + ns64->v.q[regno] = value.val128; break; + default: + panic("Should never get here!"); } - case FASTTRAP_T_ARM64_B: - case FASTTRAP_T_ARM64_BL: - { - uint64_t offset; - /* Extract offset from instruction */ - offset = extract_address_literal_sign_extended(instr, 0, 26); - /* Update LR if appropriate */ - if (tp->ftt_type == FASTTRAP_T_ARM64_BL) { - regs64->lr = regs64->pc + 4; - } + /* Move PC forward */ + new_pc = regs64->pc + 4; + *was_simulated = 1; + break; + } + + case FASTTRAP_T_ARM64_PRFM: + { + /* Becomes a NOP (architecturally permitted). Just move PC forward */ + new_pc = regs64->pc + 4; + *was_simulated = 1; + break; + } + + /* + * End explicit memory accesses. + */ - /* Compute PC (unsigned addition for defined overflow) */ + /* + * Branches: parse condition codes if needed, emulate for correctness and + * in the case of the indirect branches, convenience + * B.cond + * CBNZ Wn, label + * CBNZ Xn, label + * CBZ Wn, label + * CBZ Xn, label + * TBNZ, Xn|Wn, #uimm16, label + * TBZ, Xn|Wn, #uimm16, label + * + * B label + * BL label + * + * BLR Xm + * BR Xm + * RET Xm + */ + case FASTTRAP_T_ARM64_B_COND: + { + int cond; + + /* Extract condition code */ + cond = (instr & 0xf); + + /* Determine if it passes */ + if (condition_true(cond, regs64->cpsr)) { + uint64_t offset; + + /* Extract 19-bit target offset, add to PC */ + offset = extract_address_literal_sign_extended(instr, 5, 19); new_pc = regs64->pc + offset; - *was_simulated = 1; - break; + } else { + /* Move forwards */ + new_pc = regs64->pc + 4; } - case FASTTRAP_T_ARM64_BLR: - case FASTTRAP_T_ARM64_BR: - { - uint32_t regno; + *was_simulated = 1; + break; + } - /* Extract register from instruction */ - regno = ((instr >> 5) & 0x1f); - assert(regno <= 31); + case FASTTRAP_T_ARM64_CBNZ_W: + { + do_cbz_cnbz(regs64, 32, instr, 0, &new_pc); + *was_simulated = 1; + break; + } + case FASTTRAP_T_ARM64_CBNZ_X: + { + do_cbz_cnbz(regs64, 64, instr, 0, &new_pc); + *was_simulated = 1; + break; + } + case FASTTRAP_T_ARM64_CBZ_W: + { + do_cbz_cnbz(regs64, 32, instr, 1, &new_pc); + *was_simulated = 1; + break; + } + case FASTTRAP_T_ARM64_CBZ_X: + { + do_cbz_cnbz(regs64, 64, instr, 1, &new_pc); + *was_simulated = 1; + break; + } - /* Update LR if appropriate */ - if (tp->ftt_type == FASTTRAP_T_ARM64_BLR) { - regs64->lr = regs64->pc + 4; - } + case FASTTRAP_T_ARM64_TBNZ: + { + do_tbz_tbnz(regs64, instr, 0, &new_pc); + *was_simulated = 1; + break; + } + case FASTTRAP_T_ARM64_TBZ: + { + do_tbz_tbnz(regs64, instr, 1, &new_pc); + *was_simulated = 1; + break; + } + case FASTTRAP_T_ARM64_B: + case FASTTRAP_T_ARM64_BL: + { + uint64_t offset; - /* Update PC in saved state */ - new_pc = get_saved_state64_regno(regs64, regno, 1); - *was_simulated = 1; - break; + /* Extract offset from instruction */ + offset = extract_address_literal_sign_extended(instr, 0, 26); + + /* Update LR if appropriate */ + if (tp->ftt_type == FASTTRAP_T_ARM64_BL) { + regs64->lr = regs64->pc + 4; } - case FASTTRAP_T_ARM64_RET: - { - /* Extract register */ - unsigned regno = ((instr >> 5) & 0x1f); - assert(regno <= 31); + /* Compute PC (unsigned addition for defined overflow) */ + new_pc = regs64->pc + offset; + *was_simulated = 1; + break; + } - /* Set PC to register value (xzr, not sp) */ - new_pc = get_saved_state64_regno(regs64, regno, 1); + case FASTTRAP_T_ARM64_BLR: + case FASTTRAP_T_ARM64_BR: + { + uint32_t regno; - *was_simulated = 1; - break; + /* Extract register from instruction */ + regno = ((instr >> 5) & 0x1f); + assert(regno <= 31); + + /* Update LR if appropriate */ + if (tp->ftt_type == FASTTRAP_T_ARM64_BLR) { + regs64->lr = regs64->pc + 4; } - case FASTTRAP_T_ARM64_RETAB: - { - /* Set PC to register value (xzr, not sp) */ - new_pc = get_saved_state64_regno(regs64, 30, 1); + + /* Update PC in saved state */ + new_pc = get_saved_state64_regno(regs64, regno, 1); + *was_simulated = 1; + break; + } + + case FASTTRAP_T_ARM64_RET: + { + /* Extract register */ + unsigned regno = ((instr >> 5) & 0x1f); + assert(regno <= 31); + + /* Set PC to register value (xzr, not sp) */ + new_pc = get_saved_state64_regno(regs64, regno, 1); + + *was_simulated = 1; + break; + } + case FASTTRAP_T_ARM64_RETAB: + { + /* Set PC to register value (xzr, not sp) */ + new_pc = get_saved_state64_regno(regs64, 30, 1); #if __has_feature(ptrauth_calls) - new_pc = (user_addr_t) ptrauth_strip((void *)new_pc, ptrauth_key_return_address); + new_pc = (user_addr_t) ptrauth_strip((void *)new_pc, ptrauth_key_return_address); #endif - *was_simulated = 1; - break; + *was_simulated = 1; + break; + } + /* + * End branches. + */ + /* + * Address calculations: emulate for correctness. + * + * ADRP Xd, label + * ADR Xd, label + */ + case FASTTRAP_T_ARM64_ADRP: + case FASTTRAP_T_ARM64_ADR: + { + uint64_t immhi, immlo, offset, result; + uint32_t regno; + + /* Extract destination register */ + regno = (instr & 0x1f); + assert(regno <= 31); + + /* Extract offset */ + immhi = ((instr & 0x00ffffe0) >> 5); /* bits [23,5]: 19 bits */ + immlo = ((instr & 0x60000000) >> 29); /* bits [30,29]: 2 bits */ + + /* Add to PC. Use unsigned addition so that overflow wraps (rather than being undefined). */ + if (tp->ftt_type == FASTTRAP_T_ARM64_ADRP) { + offset = (immhi << 14) | (immlo << 12); /* Concatenate bits into [32,12]*/ + offset = sign_extend(offset, 32); /* Sign extend from bit 32 */ + result = (regs64->pc & ~0xfffULL) + offset; /* And add to page of current pc */ + } else { + assert(tp->ftt_type == FASTTRAP_T_ARM64_ADR); + offset = (immhi << 2) | immlo; /* Concatenate bits into [20,0] */ + offset = sign_extend(offset, 20); /* Sign-extend */ + result = regs64->pc + offset; /* And add to page of current pc */ } - /* - * End branches. - */ - /* - * Address calculations: emulate for correctness. - * - * ADRP Xd, label - * ADR Xd, label - */ - case FASTTRAP_T_ARM64_ADRP: - case FASTTRAP_T_ARM64_ADR: - { - uint64_t immhi, immlo, offset, result; - uint32_t regno; - - /* Extract destination register */ - regno = (instr & 0x1f); - assert(regno <= 31); - - /* Extract offset */ - immhi = ((instr & 0x00ffffe0) >> 5); /* bits [23,5]: 19 bits */ - immlo = ((instr & 0x60000000) >> 29); /* bits [30,29]: 2 bits */ - - /* Add to PC. Use unsigned addition so that overflow wraps (rather than being undefined). */ - if (tp->ftt_type == FASTTRAP_T_ARM64_ADRP) { - offset = (immhi << 14) | (immlo << 12); /* Concatenate bits into [32,12]*/ - offset = sign_extend(offset, 32); /* Sign extend from bit 32 */ - result = (regs64->pc & ~0xfffULL) + offset; /* And add to page of current pc */ - } else { - assert(tp->ftt_type == FASTTRAP_T_ARM64_ADR); - offset = (immhi << 2) | immlo; /* Concatenate bits into [20,0] */ - offset = sign_extend(offset, 20); /* Sign-extend */ - result = regs64->pc + offset; /* And add to page of current pc */ - } + /* xzr, not sp */ + set_saved_state64_regno(regs64, regno, 1, result); - /* xzr, not sp */ - set_saved_state64_regno(regs64, regno, 1, result); - - /* Move PC forward */ - new_pc = regs64->pc + 4; - *was_simulated = 1; - break; - } + /* Move PC forward */ + new_pc = regs64->pc + 4; + *was_simulated = 1; + break; + } - /* - * End address calculations. - */ + /* + * End address calculations. + */ - /* - * Everything else: thunk to userland - */ - case FASTTRAP_T_COMMON: - { - fasttrap_pid_probe_thunk_instr64(state, tp, p, uthread, &tp->ftt_instr, 1, &new_pc); - *was_simulated = 0; - break; - } - default: - { - panic("An instruction DTrace doesn't expect: %d\n", tp->ftt_type); - break; - } + /* + * Everything else: thunk to userland + */ + case FASTTRAP_T_COMMON: + { + fasttrap_pid_probe_thunk_instr64(state, tp, p, uthread, &tp->ftt_instr, 1, &new_pc); + *was_simulated = 0; + break; + } + default: + { + panic("An instruction DTrace doesn't expect: %d\n", tp->ftt_type); + break; + } } set_saved_state_pc(state, new_pc); @@ -1754,7 +1763,7 @@ fasttrap_pid_probe(arm_saved_state_t *state) if (uthread->t_dtrace_step) { ASSERT(uthread->t_dtrace_on); fasttrap_sigtrap(p, uthread, (user_addr_t)pc); - return (0); + return 0; } /* @@ -1774,23 +1783,25 @@ fasttrap_pid_probe(arm_saved_state_t *state) */ if (p->p_lflag & P_LINVFORK) { proc_list_lock(); - while (p->p_lflag & P_LINVFORK) + while (p->p_lflag & P_LINVFORK) { p = p->p_pptr; + } proc_list_unlock(); } pid = p->p_pid; pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock; lck_mtx_lock(pid_mtx); - bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid,pc)]; + bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; /* * Lookup the tracepoint that the process just hit. */ for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && - tp->ftt_proc->ftpc_acount != 0) + tp->ftt_proc->ftpc_acount != 0) { break; + } } /* @@ -1800,7 +1811,7 @@ fasttrap_pid_probe(arm_saved_state_t *state) */ if (tp == NULL) { lck_mtx_unlock(pid_mtx); - return (-1); + return -1; } /* Validation of THUMB-related state */ @@ -1808,7 +1819,7 @@ fasttrap_pid_probe(arm_saved_state_t *state) if (!fasttrap_pid_probe_thumb_state_valid(saved_state32(state), tp)) { fasttrap_tracepoint_remove(p, tp); lck_mtx_unlock(pid_mtx); - return (-1); + return -1; } } @@ -1836,7 +1847,7 @@ fasttrap_pid_probe(arm_saved_state_t *state) #ifndef CONFIG_EMBEDDED if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id, - 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); + 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { #endif @@ -1864,11 +1875,11 @@ fasttrap_pid_probe(arm_saved_state_t *state) cookie = dtrace_interrupt_disable(); DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY); dtrace_probe(probe->ftp_id, - get_saved_state_reg(state, 0), - get_saved_state_reg(state, 1), - get_saved_state_reg(state, 2), - get_saved_state_reg(state, 3), - arg4); + get_saved_state_reg(state, 0), + get_saved_state_reg(state, 1), + get_saved_state_reg(state, 2), + get_saved_state_reg(state, 3), + arg4); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY); dtrace_interrupt_enable(cookie); } else if (id->fti_ptype == DTFTP_IS_ENABLED) { @@ -1882,12 +1893,11 @@ fasttrap_pid_probe(arm_saved_state_t *state) is_enabled = 1; } else if (probe->ftp_argmap == NULL) { dtrace_probe(probe->ftp_id, - get_saved_state_reg(state, 0), - get_saved_state_reg(state, 1), - get_saved_state_reg(state, 2), - get_saved_state_reg(state, 3), - arg4); - + get_saved_state_reg(state, 0), + get_saved_state_reg(state, 1), + get_saved_state_reg(state, 2), + get_saved_state_reg(state, 3), + arg4); } else { uint64_t t[5]; @@ -1928,12 +1938,12 @@ fasttrap_pid_probe(arm_saved_state_t *state) fasttrap_pid_probe_handle_patched_instr32(state, tp, uthread, p, is_enabled, &was_simulated); } - /* + /* * If there were no return probes when we first found the tracepoint, * we should feel no obligation to honor any return probes that were * subsequently enabled -- they'll just have to wait until the next - * time around. - */ + * time around. + */ if (tp->ftt_retids != NULL) { /* * We need to wait until the results of the instruction are @@ -1958,7 +1968,7 @@ fasttrap_pid_probe(arm_saved_state_t *state) } } - return (0); + return 0; } int @@ -1981,8 +1991,9 @@ fasttrap_return_probe(arm_saved_state_t *regs) */ if (p->p_lflag & P_LINVFORK) { proc_list_lock(); - while (p->p_lflag & P_LINVFORK) + while (p->p_lflag & P_LINVFORK) { p = p->p_pptr; + } proc_list_unlock(); } @@ -1997,45 +2008,45 @@ fasttrap_return_probe(arm_saved_state_t *regs) fasttrap_return_common(p, regs, pc, npc); - return (0); + return 0; } uint64_t fasttrap_pid_getarg(void *arg, dtrace_id_t id, void *parg, int argno, - int aframes) + int aframes) { #pragma unused(arg, id, parg, aframes) arm_saved_state_t* regs = find_user_regs(current_thread()); if (is_saved_state32(regs)) { /* First four arguments are in registers */ - if (argno < 4) + if (argno < 4) { return saved_state32(regs)->r[argno]; + } /* Look on the stack for the rest */ uint32_t value; uint32_t* sp = (uint32_t*)(uintptr_t) saved_state32(regs)->sp; DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - value = dtrace_fuword32((user_addr_t) (sp+argno-4)); + value = dtrace_fuword32((user_addr_t) (sp + argno - 4)); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR); return value; - } - else { + } else { /* First eight arguments are in registers */ - if (argno < 8) + if (argno < 8) { return saved_state64(regs)->x[argno]; + } /* Look on the stack for the rest */ uint64_t value; uint64_t* sp = (uint64_t*) saved_state64(regs)->sp; DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - value = dtrace_fuword64((user_addr_t) (sp+argno-8)); + value = dtrace_fuword64((user_addr_t) (sp + argno - 8)); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR); - return value; + return value; } - } uint64_t @@ -2043,9 +2054,8 @@ fasttrap_usdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int afram { #pragma unused(arg, id, parg, argno, aframes) #if 0 - return (fasttrap_anarg(ttolwp(curthread)->lwp_regs, 0, argno)); + return fasttrap_anarg(ttolwp(curthread)->lwp_regs, 0, argno); #endif return 0; } - diff --git a/bsd/dev/arm64/fbt_arm.c b/bsd/dev/arm64/fbt_arm.c index 3364a066e..083f98665 100644 --- a/bsd/dev/arm64/fbt_arm.c +++ b/bsd/dev/arm64/fbt_arm.c @@ -31,12 +31,12 @@ #ifdef KERNEL #ifndef _KERNEL -#define _KERNEL /* Solaris vs. Darwin */ +#define _KERNEL /* Solaris vs. Darwin */ #endif #endif -#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from - * mach/ppc/thread_status.h */ +#define MACH__POSIX_C_SOURCE_PRIVATE 1 /* pulls in suitable savearea from + * mach/ppc/thread_status.h */ #include #include #include @@ -67,8 +67,8 @@ #define DTRACE_INVOP_PUSH_FRAME 11 -#define DTRACE_INVOP_NOP_SKIP 4 -#define DTRACE_INVOP_ADD_FP_SP_SKIP 4 +#define DTRACE_INVOP_NOP_SKIP 4 +#define DTRACE_INVOP_ADD_FP_SP_SKIP 4 #define DTRACE_INVOP_POP_PC_SKIP 2 @@ -76,45 +76,45 @@ * stp fp, lr, [sp, #val] * stp fp, lr, [sp, #val]! */ -#define FBT_IS_ARM64_FRAME_PUSH(x) \ +#define FBT_IS_ARM64_FRAME_PUSH(x) \ (((x) & 0xffc07fff) == 0xa9007bfd || ((x) & 0xffc07fff) == 0xa9807bfd) /* * stp Xt1, Xt2, [sp, #val] * stp Xt1, Xt2, [sp, #val]! */ -#define FBT_IS_ARM64_PUSH(x) \ +#define FBT_IS_ARM64_PUSH(x) \ (((x) & 0xffc003e0) == 0xa90003e0 || ((x) & 0xffc003e0) == 0xa98003e0) /* * ldp fp, lr, [sp, #val] * ldp fp, lr, [sp], #val */ -#define FBT_IS_ARM64_FRAME_POP(x) \ +#define FBT_IS_ARM64_FRAME_POP(x) \ (((x) & 0xffc07fff) == 0xa9407bfd || ((x) & 0xffc07fff) == 0xa8c07bfd) -#define FBT_IS_ARM64_ADD_FP_SP(x) (((x) & 0xffc003ff) == 0x910003fd) /* add fp, sp, #val (add fp, sp, #0 == mov fp, sp) */ -#define FBT_IS_ARM64_RET(x) (((x) == 0xd65f03c0) || ((x) == 0xd65f0fff)) /* ret, retab */ +#define FBT_IS_ARM64_ADD_FP_SP(x) (((x) & 0xffc003ff) == 0x910003fd) /* add fp, sp, #val (add fp, sp, #0 == mov fp, sp) */ +#define FBT_IS_ARM64_RET(x) (((x) == 0xd65f03c0) || ((x) == 0xd65f0fff)) /* ret, retab */ -#define FBT_B_MASK 0xff000000 -#define FBT_B_IMM_MASK 0x00ffffff -#define FBT_B_INSTR 0x14000000 +#define FBT_B_MASK 0xff000000 +#define FBT_B_IMM_MASK 0x00ffffff +#define FBT_B_INSTR 0x14000000 -#define FBT_IS_ARM64_B_INSTR(x) ((x & FBT_B_MASK) == FBT_B_INSTR) -#define FBT_GET_ARM64_B_IMM(x) ((x & FBT_B_IMM_MASK) << 2) +#define FBT_IS_ARM64_B_INSTR(x) ((x & FBT_B_MASK) == FBT_B_INSTR) +#define FBT_GET_ARM64_B_IMM(x) ((x & FBT_B_IMM_MASK) << 2) -#define FBT_PATCHVAL 0xe7eeee7e -#define FBT_AFRAMES_ENTRY 7 -#define FBT_AFRAMES_RETURN 7 +#define FBT_PATCHVAL 0xe7eeee7e +#define FBT_AFRAMES_ENTRY 7 +#define FBT_AFRAMES_RETURN 7 -#define FBT_ENTRY "entry" -#define FBT_RETURN "return" -#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) +#define FBT_ENTRY "entry" +#define FBT_RETURN "return" +#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) -extern dtrace_provider_id_t fbt_id; -extern fbt_probe_t **fbt_probetab; -extern int fbt_probetab_mask; +extern dtrace_provider_id_t fbt_id; +extern fbt_probe_t **fbt_probetab; +extern int fbt_probetab_mask; kern_return_t fbt_perfCallback(int, struct arm_saved_state *, __unused int, __unused int); @@ -126,8 +126,8 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) for (; fbt != NULL; fbt = fbt->fbtp_hashnext) { if ((uintptr_t) fbt->fbtp_patchpoint == addr) { if (0 == CPU->cpu_dtrace_invop_underway) { - CPU->cpu_dtrace_invop_underway = 1; /* Race not possible on - * this per-cpu state */ + CPU->cpu_dtrace_invop_underway = 1; /* Race not possible on + * this per-cpu state */ if (fbt->fbtp_roffset == 0) { /* @@ -168,7 +168,7 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) */ CPU->cpu_dtrace_caller = get_saved_state_lr(regs); dtrace_probe(fbt->fbtp_id, get_saved_state_reg(regs, 0), get_saved_state_reg(regs, 1), - get_saved_state_reg(regs, 2), get_saved_state_reg(regs, 3),get_saved_state_reg(regs, 4)); + get_saved_state_reg(regs, 2), get_saved_state_reg(regs, 3), get_saved_state_reg(regs, 4)); CPU->cpu_dtrace_caller = 0; } else { /* @@ -204,15 +204,15 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) } /* - On other architectures, we return a DTRACE constant to let the callback function - know what was replaced. On the ARM, since the function prologue/epilogue machine code - can vary, we need the actual bytes of the instruction, so return the savedval instead. - */ - return (fbt->fbtp_savedval); + * On other architectures, we return a DTRACE constant to let the callback function + * know what was replaced. On the ARM, since the function prologue/epilogue machine code + * can vary, we need the actual bytes of the instruction, so return the savedval instead. + */ + return fbt->fbtp_savedval; } } - return (0); + return 0; } #define IS_USER_TRAP(regs) (PSR64_IS_USER(get_saved_state_cpsr(regs))) @@ -221,10 +221,10 @@ fbt_invop(uintptr_t addr, uintptr_t * stack, uintptr_t rval) kern_return_t fbt_perfCallback( - int trapno, - struct arm_saved_state * regs, - __unused int unused1, - __unused int unused2) + int trapno, + struct arm_saved_state * regs, + __unused int unused1, + __unused int unused2) { kern_return_t retval = KERN_FAILURE; @@ -235,25 +235,25 @@ fbt_perfCallback( oldlevel = ml_set_interrupts_enabled(FALSE); - __asm__ volatile( - "Ldtrace_invop_callsite_pre_label:\n" - ".data\n" - ".private_extern _dtrace_invop_callsite_pre\n" - "_dtrace_invop_callsite_pre:\n" - " .quad Ldtrace_invop_callsite_pre_label\n" - ".text\n" - ); - - emul = dtrace_invop(get_saved_state_pc(regs), (uintptr_t*) regs, get_saved_state_reg(regs,0)); - - __asm__ volatile( - "Ldtrace_invop_callsite_post_label:\n" - ".data\n" - ".private_extern _dtrace_invop_callsite_post\n" - "_dtrace_invop_callsite_post:\n" - " .quad Ldtrace_invop_callsite_post_label\n" - ".text\n" - ); + __asm__ volatile ( + "Ldtrace_invop_callsite_pre_label:\n" + ".data\n" + ".private_extern _dtrace_invop_callsite_pre\n" + "_dtrace_invop_callsite_pre:\n" + " .quad Ldtrace_invop_callsite_pre_label\n" + ".text\n" + ); + + emul = dtrace_invop(get_saved_state_pc(regs), (uintptr_t*) regs, get_saved_state_reg(regs, 0)); + + __asm__ volatile ( + "Ldtrace_invop_callsite_post_label:\n" + ".data\n" + ".private_extern _dtrace_invop_callsite_post\n" + "_dtrace_invop_callsite_post:\n" + " .quad Ldtrace_invop_callsite_post_label\n" + ".text\n" + ); if (emul == DTRACE_INVOP_NOP) { /* @@ -272,7 +272,7 @@ fbt_perfCallback( /* * emulate the instruction: - * add fp, sp, #val + * add fp, sp, #val */ assert(sp < (UINT64_MAX - val)); set_saved_state_fp(regs, sp + val); @@ -310,10 +310,10 @@ fbt_perfCallback( void fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolName, machine_inst_t* symbolStart, machine_inst_t *instrHigh) { - int doenable = 0; - dtrace_id_t thisid; + int doenable = 0; + dtrace_id_t thisid; - fbt_probe_t *newfbt, *retfbt, *entryfbt; + fbt_probe_t *newfbt, *retfbt, *entryfbt; machine_inst_t *instr, *pushinstr = NULL, *limit, theInstr; int foundPushLR, savedRegs; @@ -334,8 +334,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam assert(sizeof(*instr) == 4); - for (instr = symbolStart, theInstr = 0; instr < instrHigh; instr++) - { + for (instr = symbolStart, theInstr = 0; instr < instrHigh; instr++) { /* * Count the number of time we pushed something onto the stack * before hitting a frame push. That will give us an estimation @@ -348,13 +347,16 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam pushinstr = instr; } - if (foundPushLR && (FBT_IS_ARM64_ADD_FP_SP(theInstr))) + if (foundPushLR && (FBT_IS_ARM64_ADD_FP_SP(theInstr))) { /* Guard against a random setting of fp from sp, we make sure we found the push first */ break; - if (FBT_IS_ARM64_RET(theInstr)) /* We've gone too far, bail. */ + } + if (FBT_IS_ARM64_RET(theInstr)) { /* We've gone too far, bail. */ break; - if (FBT_IS_ARM64_FRAME_POP(theInstr)) /* We've gone too far, bail. */ + } + if (FBT_IS_ARM64_FRAME_POP(theInstr)) { /* We've gone too far, bail. */ break; + } } if (!(foundPushLR && (FBT_IS_ARM64_ADD_FP_SP(theInstr)))) { @@ -364,7 +366,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam thisid = dtrace_probe_lookup(fbt_id, modname, symbolName, FBT_ENTRY); newfbt = kmem_zalloc(sizeof(fbt_probe_t), KM_SLEEP); newfbt->fbtp_next = NULL; - strlcpy( (char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); + strlcpy((char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); if (thisid != 0) { /* @@ -375,11 +377,12 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam * fire, (as indicated by the current patched value), then * we want to enable this newfbt on the spot. */ - entryfbt = dtrace_probe_arg (fbt_id, thisid); - ASSERT (entryfbt != NULL); - for(; entryfbt != NULL; entryfbt = entryfbt->fbtp_next) { - if (entryfbt->fbtp_currentval == entryfbt->fbtp_patchval) + entryfbt = dtrace_probe_arg(fbt_id, thisid); + ASSERT(entryfbt != NULL); + for (; entryfbt != NULL; entryfbt = entryfbt->fbtp_next) { + if (entryfbt->fbtp_currentval == entryfbt->fbtp_patchval) { doenable++; + } if (entryfbt->fbtp_next == NULL) { entryfbt->fbtp_next = newfbt; @@ -387,8 +390,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam break; } } - } - else { + } else { /* * The dtrace_probe did not previously exist, so we * create it and hook in the newfbt. Since the probe is @@ -408,8 +410,9 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam newfbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = newfbt; - if (doenable) + if (doenable) { fbt_enable(NULL, newfbt->fbtp_id, newfbt); + } /* * The fbt entry chain is in place, one entry point per symbol. @@ -418,7 +421,7 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam * Here we find the end of the fbt return chain. */ - doenable=0; + doenable = 0; thisid = dtrace_probe_lookup(fbt_id, modname, symbolName, FBT_RETURN); @@ -429,16 +432,17 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam * (as indicated by the currrent patched value), then * we want to enable any new fbts on the spot. */ - retfbt = dtrace_probe_arg (fbt_id, thisid); + retfbt = dtrace_probe_arg(fbt_id, thisid); ASSERT(retfbt != NULL); - for (; retfbt != NULL; retfbt = retfbt->fbtp_next) { - if (retfbt->fbtp_currentval == retfbt->fbtp_patchval) + for (; retfbt != NULL; retfbt = retfbt->fbtp_next) { + if (retfbt->fbtp_currentval == retfbt->fbtp_patchval) { doenable++; - if(retfbt->fbtp_next == NULL) + } + if (retfbt->fbtp_next == NULL) { break; + } } - } - else { + } else { doenable = 0; retfbt = NULL; } @@ -450,8 +454,9 @@ fbt_provide_probe(struct modctl *ctl, const char *modname, const char* symbolNam */ instr = pushinstr + 1; again: - if (instr >= limit) + if (instr >= limit) { return; + } /* XXX FIXME ... extra jump table detection? */ @@ -462,8 +467,9 @@ again: /* Walked onto the start of the next routine? If so, bail out from this function */ if (FBT_IS_ARM64_FRAME_PUSH(theInstr)) { - if (!retfbt) - kprintf("dtrace: fbt: No return probe for %s, walked to next routine at 0x%016llx\n",symbolName,(uint64_t)instr); + if (!retfbt) { + kprintf("dtrace: fbt: No return probe for %s, walked to next routine at 0x%016llx\n", symbolName, (uint64_t)instr); + } return; } @@ -471,8 +477,8 @@ again: /* * Look for: - * ldp fp, lr, [sp], #val - * ldp fp, lr, [sp, #val] + * ldp fp, lr, [sp], #val + * ldp fp, lr, [sp, #val] */ if (!FBT_IS_ARM64_FRAME_POP(theInstr)) { instr++; @@ -485,25 +491,28 @@ again: /* Scan ahead for a ret or a branch outside the function */ for (; instr < limit; instr++) { theInstr = *instr; - if (FBT_IS_ARM64_RET(theInstr)) + if (FBT_IS_ARM64_RET(theInstr)) { break; + } if (FBT_IS_ARM64_B_INSTR(theInstr)) { machine_inst_t *dest = instr + FBT_GET_ARM64_B_IMM(theInstr); /* * Check whether the destination of the branch * is outside of the function */ - if (dest >= limit || dest < symbolStart) + if (dest >= limit || dest < symbolStart) { break; + } } } - if (!FBT_IS_ARM64_RET(theInstr) && !FBT_IS_ARM64_B_INSTR(theInstr)) + if (!FBT_IS_ARM64_RET(theInstr) && !FBT_IS_ARM64_B_INSTR(theInstr)) { return; + } newfbt = kmem_zalloc(sizeof(fbt_probe_t), KM_SLEEP); newfbt->fbtp_next = NULL; - strlcpy( (char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); + strlcpy((char *)&(newfbt->fbtp_name), symbolName, MAX_FBTP_NAME_CHARS ); if (retfbt == NULL) { newfbt->fbtp_id = dtrace_probe_create(fbt_id, modname, @@ -527,8 +536,9 @@ again: newfbt->fbtp_hashnext = fbt_probetab[FBT_ADDR2NDX(instr)]; fbt_probetab[FBT_ADDR2NDX(instr)] = newfbt; - if (doenable) + if (doenable) { fbt_enable(NULL, newfbt->fbtp_id, newfbt); + } instr++; goto again; diff --git a/bsd/dev/arm64/sdt_arm.c b/bsd/dev/arm64/sdt_arm.c index 17bb69327..598bd05b7 100644 --- a/bsd/dev/arm64/sdt_arm.c +++ b/bsd/dev/arm64/sdt_arm.c @@ -57,13 +57,13 @@ sdt_invop(__unused uintptr_t addr, __unused uintptr_t *stack, __unused uintptr_t struct arm_saved_state* regs = (struct arm_saved_state*) stack; dtrace_probe(sdt->sdp_id, get_saved_state_reg(regs, 0), get_saved_state_reg(regs, 1), - get_saved_state_reg(regs, 2), get_saved_state_reg(regs, 3),get_saved_state_reg(regs, 4)); + get_saved_state_reg(regs, 2), get_saved_state_reg(regs, 3), get_saved_state_reg(regs, 4)); - return (DTRACE_INVOP_NOP); + return DTRACE_INVOP_NOP; } } - return (0); + return 0; } struct frame { @@ -75,10 +75,9 @@ struct frame { uint64_t sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) { +#pragma unused(arg,id,parg) /* __APPLE__ */ -#pragma unused(arg,id,parg) /* __APPLE__ */ - - uint64_t val = 0; + uint64_t val = 0; struct frame *fp = (struct frame *)__builtin_frame_address(0); uintptr_t *stack; uintptr_t pc; @@ -88,18 +87,17 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) * A total of eight arguments are passed via registers; any argument * with an index of 7 or lower is therefore in a register. */ - + int inreg = 7; - + for (i = 1; i <= aframes; i++) { fp = fp->backchain; pc = fp->retaddr; - - if (dtrace_invop_callsite_pre != NULL - && pc > (uintptr_t)dtrace_invop_callsite_pre - && pc <= (uintptr_t)dtrace_invop_callsite_post) { - /* + if (dtrace_invop_callsite_pre != NULL + && pc > (uintptr_t)dtrace_invop_callsite_pre + && pc <= (uintptr_t)dtrace_invop_callsite_post) { + /* * When we pass through the invalid op handler, * we expect to find the save area structure, * pushed on the stack where we took the trap. @@ -108,7 +106,7 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) * If the argument we seek is passed on the stack, then * we increment the frame pointer further, to find the * pushed args - */ + */ /* fp points to the dtrace_invop activation */ fp = fp->backchain; /* fbt_perfCallback */ @@ -128,7 +126,7 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) argno -= (inreg + 1); } goto load; - } + } } /* @@ -140,23 +138,23 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) */ argno++; /* Advance past probeID */ - if (argno <= inreg) { + if (argno <= inreg) { /* * This shouldn't happen. If the argument is passed in a * register then it should have been, well, passed in a * register... */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } - + argno -= (inreg + 1); stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */ load: DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); - /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ + /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ val = (uint64_t)(*(((uintptr_t *)stack) + argno)); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return (val); -} + return val; +} diff --git a/bsd/dev/arm64/sysctl.c b/bsd/dev/arm64/sysctl.c index deb952d44..e2715281d 100644 --- a/bsd/dev/arm64/sysctl.c +++ b/bsd/dev/arm64/sysctl.c @@ -12,12 +12,13 @@ #include #include -extern uint64_t wake_abstime; +extern uint64_t wake_abstime; +extern int lck_mtx_adaptive_spin_mode; static SYSCTL_QUAD(_machdep, OID_AUTO, wake_abstime, - CTLFLAG_RD, &wake_abstime, - "Absolute Time at the last wakeup"); + CTLFLAG_RD, &wake_abstime, + "Absolute Time at the last wakeup"); static int sysctl_time_since_reset SYSCTL_HANDLER_ARGS @@ -34,9 +35,9 @@ sysctl_time_since_reset SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_machdep, OID_AUTO, time_since_reset, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_time_since_reset, "I", - "Continuous time since last SOC boot/wake started"); + CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, + 0, 0, sysctl_time_since_reset, "I", + "Continuous time since last SOC boot/wake started"); static int sysctl_wake_conttime SYSCTL_HANDLER_ARGS @@ -53,9 +54,9 @@ sysctl_wake_conttime SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_wake_conttime, "I", - "Continuous Time at the last wakeup"); + CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, + 0, 0, sysctl_wake_conttime, "I", + "Continuous Time at the last wakeup"); /* @@ -63,8 +64,8 @@ SYSCTL_PROC(_machdep, OID_AUTO, wake_conttime, * use host_info() to simulate reasonable answers. */ -SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "CPU info"); +SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "CPU info"); static int arm_host_info SYSCTL_HANDLER_ARGS @@ -73,18 +74,20 @@ arm_host_info SYSCTL_HANDLER_ARGS host_basic_info_data_t hinfo; mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; -#define BSD_HOST 1 +#define BSD_HOST 1 kern_return_t kret = host_info((host_t)BSD_HOST, - HOST_BASIC_INFO, (host_info_t)&hinfo, &count); - if (KERN_SUCCESS != kret) - return (EINVAL); + HOST_BASIC_INFO, (host_info_t)&hinfo, &count); + if (KERN_SUCCESS != kret) { + return EINVAL; + } - if (sizeof (uint32_t) != arg2) + if (sizeof(uint32_t) != arg2) { panic("size mismatch"); + } - uintptr_t woffset = (uintptr_t)arg1 / sizeof (uint32_t); + uintptr_t woffset = (uintptr_t)arg1 / sizeof(uint32_t); uint32_t datum = *(uint32_t *)(((uint32_t *)&hinfo) + woffset); - return (SYSCTL_OUT(req, &datum, sizeof (datum))); + return SYSCTL_OUT(req, &datum, sizeof(datum)); } /* @@ -95,10 +98,10 @@ arm_host_info SYSCTL_HANDLER_ARGS */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, physical_cpu_max), - sizeof (integer_t), - arm_host_info, "I", "CPU cores per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, physical_cpu_max), + sizeof(integer_t), + arm_host_info, "I", "CPU cores per package"); /* * machdep.cpu.core_count @@ -108,10 +111,10 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package, */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, physical_cpu), - sizeof (integer_t), - arm_host_info, "I", "Number of enabled cores per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, physical_cpu), + sizeof(integer_t), + arm_host_info, "I", "Number of enabled cores per package"); /* * machdep.cpu.logical_per_package @@ -122,10 +125,10 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count, */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, logical_cpu_max), - sizeof (integer_t), - arm_host_info, "I", "CPU logical cpus per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, logical_cpu_max), + sizeof(integer_t), + arm_host_info, "I", "CPU logical cpus per package"); /* * machdep.cpu.thread_count @@ -135,10 +138,10 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package, */ static SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(host_basic_info_data_t, logical_cpu), - sizeof (integer_t), - arm_host_info, "I", "Number of enabled threads per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(host_basic_info_data_t, logical_cpu), + sizeof(integer_t), + arm_host_info, "I", "Number of enabled threads per package"); /* * machdep.cpu.brand_string @@ -169,10 +172,22 @@ make_brand_string SYSCTL_HANDLER_ARGS break; } char buf[80]; - snprintf(buf, sizeof (buf), "%s processor", impl); - return (SYSCTL_OUT(req, buf, strlen(buf) + 1)); + snprintf(buf, sizeof(buf), "%s processor", impl); + return SYSCTL_OUT(req, buf, strlen(buf) + 1); } SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, - CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, make_brand_string, "A", "CPU brand string"); + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, make_brand_string, "A", "CPU brand string"); + +static +SYSCTL_INT(_machdep, OID_AUTO, lck_mtx_adaptive_spin_mode, + CTLFLAG_RW, &lck_mtx_adaptive_spin_mode, 0, + "Enable adaptive spin behavior for kernel mutexes"); + +#if DEVELOPMENT || DEBUG +extern uint64_t TLockTimeOut; +SYSCTL_QUAD(_machdep, OID_AUTO, tlto, + CTLFLAG_RW | CTLFLAG_LOCKED, &TLockTimeOut, + "Ticket spinlock timeout (MATUs): use with care"); +#endif diff --git a/bsd/dev/busvar.h b/bsd/dev/busvar.h index 2674eb6d7..9aebabab7 100644 --- a/bsd/dev/busvar.h +++ b/bsd/dev/busvar.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,29 +22,29 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Copyright (c) 1987 Next, Inc. * * HISTORY * 23-Jan-93 Doug Mitchell at NeXT * Broke out machine-independent portion. - */ + */ -#ifdef DRIVER_PRIVATE +#ifdef DRIVER_PRIVATE -#ifndef _BUSVAR_ +#ifndef _BUSVAR_ #define _BUSVAR_ /* pseudo device initialization routine support */ struct pseudo_init { - int ps_count; - int (*ps_func)(int count); + int ps_count; + int (*ps_func)(int count); }; extern struct pseudo_init pseudo_inits[]; #endif /* _BUSVAR_ */ -#endif /* DRIVER_PRIVATE */ +#endif /* DRIVER_PRIVATE */ diff --git a/bsd/dev/dtrace/blist.c b/bsd/dev/dtrace/blist.c index bbaaec0a9..180d30ffb 100644 --- a/bsd/dev/dtrace/blist.c +++ b/bsd/dev/dtrace/blist.c @@ -5,8 +5,8 @@ * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. * * This module implements a general bitmap allocator/deallocator. The - * allocator eats around 2 bits per 'block'. The module does not - * try to interpret the meaning of a 'block' other then to return + * allocator eats around 2 bits per 'block'. The module does not + * try to interpret the meaning of a 'block' other then to return * SWAPBLK_NONE on an allocation failure. * * A radix tree is used to maintain the bitmap. Two radix constants are @@ -14,9 +14,9 @@ * 32), and one for the meta nodes (typically 16). Both meta and leaf * nodes have a hint field. This field gives us a hint as to the largest * free contiguous range of blocks under the node. It may contain a - * value that is too high, but will never contain a value that is too + * value that is too high, but will never contain a value that is too * low. When the radix tree is searched, allocation failures in subtrees - * update the hint. + * update the hint. * * The radix tree also implements two collapsed states for meta nodes: * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is @@ -24,35 +24,35 @@ * the node is considered stale. These states are used to optimize * allocation and freeing operations. * - * The hinting greatly increases code efficiency for allocations while + * The hinting greatly increases code efficiency for allocations while * the general radix structure optimizes both allocations and frees. The - * radix tree should be able to operate well no matter how much + * radix tree should be able to operate well no matter how much * fragmentation there is and no matter how large a bitmap is used. * * Unlike the rlist code, the blist code wires all necessary memory at * creation time. Neither allocations nor frees require interaction with - * the memory subsystem. In contrast, the rlist code may allocate memory + * the memory subsystem. In contrast, the rlist code may allocate memory * on an rlist_free() call. The non-blocking features of the blist code * are used to great advantage in the swap code (vm/nswap_pager.c). The * rlist code uses a little less overall memory then the blist code (but - * due to swap interleaving not all that much less), but the blist code + * due to swap interleaving not all that much less), but the blist code * scales much, much better. * * LAYOUT: The radix tree is layed out recursively using a * linear array. Each meta node is immediately followed (layed out * sequentially in memory) by BLIST_META_RADIX lower level nodes. This * is a recursive structure but one that can be easily scanned through - * a very simple 'skip' calculation. In order to support large radixes, - * portions of the tree may reside outside our memory allocation. We - * handle this with an early-termination optimization (when bighint is - * set to -1) on the scan. The memory allocation is only large enough + * a very simple 'skip' calculation. In order to support large radixes, + * portions of the tree may reside outside our memory allocation. We + * handle this with an early-termination optimization (when bighint is + * set to -1) on the scan. The memory allocation is only large enough * to cover the number of blocks requested at creation time even if it * must be encompassed in larger root-node radix. * - * NOTE: the allocator cannot currently allocate more then - * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too - * large' if you try. This is an area that could use improvement. The - * radix is large enough that this restriction does not effect the swap + * NOTE: the allocator cannot currently allocate more then + * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too + * large' if you try. This is an area that could use improvement. The + * radix is large enough that this restriction does not effect the swap * system, though. Currently only the allocation code is effected by * this algorithmic unfeature. The freeing code can handle arbitrary * ranges. @@ -91,8 +91,8 @@ #include #include -#define malloc(a,b,c) malloc(a) -#define free(a,b) free(a) +#define malloc(a, b, c) malloc(a) +#define free(a, b) free(a) typedef unsigned int u_daddr_t; @@ -130,18 +130,18 @@ typedef unsigned int u_daddr_t; */ static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count); -static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk, - daddr_t count, daddr_t radix, int skip); +static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk, + daddr_t count, daddr_t radix, int skip); static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count); -static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, - daddr_t radix, int skip, daddr_t blk); -static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, - daddr_t skip, blist_t dest, daddr_t count); -static daddr_t blst_radix_init(blmeta_t *scan, daddr_t radix, - int skip, daddr_t count); +static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, + daddr_t radix, int skip, daddr_t blk); +static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, + daddr_t skip, blist_t dest, daddr_t count); +static daddr_t blst_radix_init(blmeta_t *scan, daddr_t radix, + int skip, daddr_t count); #ifndef _KERNEL -static void blst_radix_print(blmeta_t *scan, daddr_t blk, - daddr_t radix, int skip, int tab); +static void blst_radix_print(blmeta_t *scan, daddr_t blk, + daddr_t radix, int skip, int tab); #endif #if !defined(__APPLE__) @@ -156,11 +156,11 @@ static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); * * blocks must be greater then 0 * - * The smallest blist consists of a single leaf node capable of + * The smallest blist consists of a single leaf node capable of * managing BLIST_BMAP_RADIX blocks. */ -blist_t +blist_t blist_create(daddr_t blocks) { blist_t bl; @@ -195,15 +195,15 @@ blist_create(daddr_t blocks) bl->bl_blocks, bl->bl_blocks * 4 / 1024, (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024 - ); + ); printf("BLIST raw radix tree contains %d records\n", bl->bl_rootblks); #endif blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks); - return(bl); + return bl; } -void +void blist_destroy(blist_t bl) { free(bl->bl_root, M_SWAP); @@ -216,38 +216,41 @@ blist_destroy(blist_t bl) * not be allocated. */ -daddr_t +daddr_t blist_alloc(blist_t bl, daddr_t count) { daddr_t blk = SWAPBLK_NONE; if (bl) { - if (bl->bl_radix == BLIST_BMAP_RADIX) + if (bl->bl_radix == BLIST_BMAP_RADIX) { blk = blst_leaf_alloc(bl->bl_root, 0, count); - else + } else { blk = blst_meta_alloc(bl->bl_root, 0, count, - bl->bl_radix, bl->bl_skip); - if (blk != SWAPBLK_NONE) + bl->bl_radix, bl->bl_skip); + } + if (blk != SWAPBLK_NONE) { bl->bl_free -= count; + } } - return(blk); + return blk; } /* * blist_free() - free up space in the block bitmap. Return the base - * of a contiguous region. Panic if an inconsistancy is + * of a contiguous region. Panic if an inconsistancy is * found. */ -void +void blist_free(blist_t bl, daddr_t blkno, daddr_t count) { if (bl) { - if (bl->bl_radix == BLIST_BMAP_RADIX) + if (bl->bl_radix == BLIST_BMAP_RADIX) { blst_leaf_free(bl->bl_root, blkno, count); - else + } else { blst_meta_free(bl->bl_root, blkno, count, - bl->bl_radix, bl->bl_skip, 0); + bl->bl_radix, bl->bl_skip, 0); + } bl->bl_free += count; } } @@ -263,20 +266,22 @@ blist_free(blist_t bl, daddr_t blkno, daddr_t count) void blist_resize(blist_t *pbl, daddr_t count, int freenew) { - blist_t newbl = blist_create(count); - blist_t save = *pbl; - - *pbl = newbl; - if (count > save->bl_blocks) - count = save->bl_blocks; - blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); - - /* - * If resizing upwards, should we free the new space or not? - */ - if (freenew && count < newbl->bl_blocks) - blist_free(newbl, count, newbl->bl_blocks - count); - blist_destroy(save); + blist_t newbl = blist_create(count); + blist_t save = *pbl; + + *pbl = newbl; + if (count > save->bl_blocks) { + count = save->bl_blocks; + } + blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); + + /* + * If resizing upwards, should we free the new space or not? + */ + if (freenew && count < newbl->bl_blocks) { + blist_free(newbl, count, newbl->bl_blocks - count); + } + blist_destroy(save); } #ifdef BLIST_DEBUG @@ -299,7 +304,7 @@ blist_print(blist_t bl) * ALLOCATION SUPPORT FUNCTIONS * ************************************************************************ * - * These support functions do all the actual work. They may seem + * These support functions do all the actual work. They may seem * rather longish, but that's because I've commented them up. The * actual code is straight forward. * @@ -326,28 +331,28 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) * we have to take care of this case here. */ scan->bm_bighint = 0; - return(SWAPBLK_NONE); + return SWAPBLK_NONE; } if (count == 1) { /* * Optimized code to allocate one bit out of the bitmap */ u_daddr_t mask; - int j = BLIST_BMAP_RADIX/2; + int j = BLIST_BMAP_RADIX / 2; int r = 0; - mask = (u_daddr_t)-1 >> (BLIST_BMAP_RADIX/2); + mask = (u_daddr_t)-1 >> (BLIST_BMAP_RADIX / 2); while (j) { if ((orig & mask) == 0) { - r += j; - orig >>= j; + r += j; + orig >>= j; } j >>= 1; mask >>= j; } scan->u.bmu_bitmap &= ~(1 << r); - return(blk + r); + return blk + r; } #if !defined(__APPLE__) if (count <= BLIST_BMAP_RADIX) { @@ -370,7 +375,7 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) for (j = 0; j <= n; ++j) { if ((orig & mask) == mask) { scan->u.bmu_bitmap &= ~mask; - return(blk + j); + return blk + j; } mask = (mask << 1); } @@ -379,7 +384,7 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) * We couldn't allocate count in this subtree, update bighint. */ scan->bm_bighint = count - 1; - return(SWAPBLK_NONE); + return SWAPBLK_NONE; } /* @@ -393,17 +398,17 @@ blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count) static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk, daddr_t count, daddr_t radix, - int skip) + int skip) { int i; int next_skip = (skip >> BLIST_META_RADIX_SHIFT); - if (scan->u.bmu_avail == 0) { + if (scan->u.bmu_avail == 0) { /* * ALL-ALLOCATED special case */ scan->bm_bighint = count; - return(SWAPBLK_NONE); + return SWAPBLK_NONE; } if (scan->u.bmu_avail == radix) { @@ -414,8 +419,9 @@ blst_meta_alloc(blmeta_t *scan, daddr_t blk, daddr_t count, daddr_t radix, * sublevel. */ for (i = 1; i <= skip; i += next_skip) { - if (scan[i].bm_bighint == (daddr_t)-1) + if (scan[i].bm_bighint == (daddr_t)-1) { break; + } if (next_skip == 1) { scan[i].u.bmu_bitmap = (u_daddr_t)-1; scan[i].bm_bighint = BLIST_BMAP_RADIX; @@ -434,15 +440,17 @@ blst_meta_alloc(blmeta_t *scan, daddr_t blk, daddr_t count, daddr_t radix, * count fits in object */ daddr_t r; - if (next_skip == 1) + if (next_skip == 1) { r = blst_leaf_alloc(&scan[i], blk, count); - else + } else { r = blst_meta_alloc(&scan[i], blk, count, - radix, next_skip - 1); + radix, next_skip - 1); + } if (r != SWAPBLK_NONE) { scan->u.bmu_avail -= count; - if (scan->bm_bighint > scan->u.bmu_avail) + if (scan->bm_bighint > scan->u.bmu_avail) { scan->bm_bighint = scan->u.bmu_avail; + } return r; } } else if (scan[i].bm_bighint == (daddr_t)-1) { @@ -463,9 +471,10 @@ blst_meta_alloc(blmeta_t *scan, daddr_t blk, daddr_t count, daddr_t radix, /* * We couldn't allocate count in this subtree, update bighint. */ - if (scan->bm_bighint >= count) + if (scan->bm_bighint >= count) { scan->bm_bighint = count - 1; - return(SWAPBLK_NONE); + } + return SWAPBLK_NONE; } /* @@ -490,13 +499,14 @@ blst_leaf_free(blmeta_t *scan, daddr_t blk, int count) mask = ((u_daddr_t)-1 << n) & ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n)); - if (scan->u.bmu_bitmap & mask) + if (scan->u.bmu_bitmap & mask) { panic("blst_radix_free: freeing free block"); + } scan->u.bmu_bitmap |= mask; /* * We could probably do a better job here. We are required to make - * bighint at least as large as the biggest contiguous block of + * bighint at least as large as the biggest contiguous block of * data. If we just shoehorn it, a little extra overhead will * be incured on the next allocation (but only that one typically). */ @@ -514,9 +524,9 @@ blst_leaf_free(blmeta_t *scan, daddr_t blk, int count) * range). */ -static void +static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, daddr_t radix, - int skip, daddr_t blk) + int skip, daddr_t blk) { int i; int next_skip = (skip >> BLIST_META_RADIX_SHIFT); @@ -525,7 +535,7 @@ blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, daddr_t radix, printf("FREE (%x,%d) FROM (%x,%d)\n", freeBlk, count, blk, radix - ); + ); #endif if (scan->u.bmu_avail == 0) { @@ -536,15 +546,17 @@ blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, daddr_t radix, scan->u.bmu_avail = count; scan->bm_bighint = count; - if (count != radix) { + if (count != radix) { for (i = 1; i <= skip; i += next_skip) { - if (scan[i].bm_bighint == (daddr_t)-1) + if (scan[i].bm_bighint == (daddr_t)-1) { break; + } scan[i].bm_bighint = 0; - if (next_skip == 1) + if (next_skip == 1) { scan[i].u.bmu_bitmap = 0; - else + } else { scan[i].u.bmu_avail = 0; + } } /* fall through */ } @@ -557,10 +569,12 @@ blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, daddr_t radix, * ALL-FREE special case. */ - if (scan->u.bmu_avail == radix) + if (scan->u.bmu_avail == radix) { return; - if (scan->u.bmu_avail > radix) + } + if (scan->u.bmu_avail > radix) { panic("blst_meta_free: freeing already free blocks (%d) %d/%d", count, scan->u.bmu_avail, radix); + } /* * Break the free down into its components @@ -576,19 +590,23 @@ blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, daddr_t radix, daddr_t v; v = blk + radix - freeBlk; - if (v > count) + if (v > count) { v = count; + } - if (scan->bm_bighint == (daddr_t)-1) + if (scan->bm_bighint == (daddr_t)-1) { panic("blst_meta_free: freeing unexpected range"); + } - if (next_skip == 1) + if (next_skip == 1) { blst_leaf_free(&scan[i], freeBlk, v); - else + } else { blst_meta_free(&scan[i], freeBlk, v, radix, - next_skip - 1, blk); - if (scan->bm_bighint < scan[i].bm_bighint) - scan->bm_bighint = scan[i].bm_bighint; + next_skip - 1, blk); + } + if (scan->bm_bighint < scan[i].bm_bighint) { + scan->bm_bighint = scan[i].bm_bighint; + } count -= v; freeBlk += v; blk += radix; @@ -603,8 +621,9 @@ blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, daddr_t radix, * tree. The space may not already be free in the destination. */ -static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, - daddr_t skip, blist_t dest, daddr_t count) +static void +blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, + daddr_t skip, blist_t dest, daddr_t count) { int next_skip; int i; @@ -622,15 +641,19 @@ static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, #if !defined(__APPLE__) int i; - for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) - if (v & (1 << i)) + for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { + if (v & (1 << i)) { blist_free(dest, blk + i, 1); + } + } #else int j; /* Avoid shadow warnings */ - for (j = 0; j < (int)BLIST_BMAP_RADIX && j < count; ++j) - if (v & (1 << j)) + for (j = 0; j < (int)BLIST_BMAP_RADIX && j < count; ++j) { + if (v & (1 << j)) { blist_free(dest, blk + j, 1); + } + } #endif /* __APPLE__ */ } return; @@ -643,16 +666,18 @@ static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, /* * Source all allocated, leave dest allocated */ - if (scan->u.bmu_avail == 0) + if (scan->u.bmu_avail == 0) { return; + } if (scan->u.bmu_avail == radix) { /* * Source all free, free entire dest */ - if (count < radix) + if (count < radix) { blist_free(dest, blk, count); - else + } else { blist_free(dest, blk, radix); + } return; } @@ -660,29 +685,30 @@ static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, next_skip = (skip >> BLIST_META_RADIX_SHIFT); for (i = 1; count && i <= skip; i += next_skip) { - if (scan[i].bm_bighint == (daddr_t)-1) + if (scan[i].bm_bighint == (daddr_t)-1) { break; + } if (count >= radix) { blst_copy( - &scan[i], - blk, - radix, - next_skip - 1, - dest, - radix - ); + &scan[i], + blk, + radix, + next_skip - 1, + dest, + radix + ); count -= radix; } else { if (count) { blst_copy( - &scan[i], - blk, - radix, - next_skip - 1, - dest, - count - ); + &scan[i], + blk, + radix, + next_skip - 1, + dest, + count + ); } count = 0; } @@ -699,7 +725,7 @@ static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, * RADIX values we use. */ -static daddr_t +static daddr_t blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count) { int i; @@ -715,7 +741,7 @@ blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count) scan->bm_bighint = 0; scan->u.bmu_bitmap = 0; } - return(memindex); + return memindex; } /* @@ -738,40 +764,42 @@ blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count) * Allocate the entire object */ memindex = i + blst_radix_init( - ((scan) ? &scan[i] : NULL), - radix, - next_skip - 1, - radix - ); + ((scan) ? &scan[i] : NULL), + radix, + next_skip - 1, + radix + ); count -= radix; } else if (count > 0) { /* * Allocate a partial object */ memindex = i + blst_radix_init( - ((scan) ? &scan[i] : NULL), - radix, - next_skip - 1, - count - ); + ((scan) ? &scan[i] : NULL), + radix, + next_skip - 1, + count + ); count = 0; } else { /* * Add terminator and break out */ - if (scan) + if (scan) { scan[i].bm_bighint = (daddr_t)-1; + } break; } } - if (memindex < i) + if (memindex < i) { memindex = i; - return(memindex); + } + return memindex; } #ifdef BLIST_DEBUG -static void +static void blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab) { int i; @@ -780,42 +808,42 @@ blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab) if (radix == BLIST_BMAP_RADIX) { printf( - "%*.*s(%04x,%d): bitmap %08x big=%d\n", - tab, tab, "", - blk, radix, - scan->u.bmu_bitmap, - scan->bm_bighint - ); + "%*.*s(%04x,%d): bitmap %08x big=%d\n", + tab, tab, "", + blk, radix, + scan->u.bmu_bitmap, + scan->bm_bighint + ); return; } if (scan->u.bmu_avail == 0) { printf( - "%*.*s(%04x,%d) ALL ALLOCATED\n", - tab, tab, "", - blk, - radix - ); + "%*.*s(%04x,%d) ALL ALLOCATED\n", + tab, tab, "", + blk, + radix + ); return; } if (scan->u.bmu_avail == radix) { printf( - "%*.*s(%04x,%d) ALL FREE\n", - tab, tab, "", - blk, - radix - ); + "%*.*s(%04x,%d) ALL FREE\n", + tab, tab, "", + blk, + radix + ); return; } printf( - "%*.*s(%04x,%d): subtree (%d/%d) big=%d {\n", - tab, tab, "", - blk, radix, - scan->u.bmu_avail, - radix, - scan->bm_bighint - ); + "%*.*s(%04x,%d): subtree (%d/%d) big=%d {\n", + tab, tab, "", + blk, radix, + scan->u.bmu_avail, + radix, + scan->bm_bighint + ); radix >>= BLIST_META_RADIX_SHIFT; next_skip = (skip >> BLIST_META_RADIX_SHIFT); @@ -824,28 +852,28 @@ blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab) for (i = 1; i <= skip; i += next_skip) { if (scan[i].bm_bighint == (daddr_t)-1) { printf( - "%*.*s(%04x,%d): Terminator\n", - tab, tab, "", - blk, radix - ); + "%*.*s(%04x,%d): Terminator\n", + tab, tab, "", + blk, radix + ); lastState = 0; break; } blst_radix_print( - &scan[i], - blk, - radix, - next_skip - 1, - tab - ); + &scan[i], + blk, + radix, + next_skip - 1, + tab + ); blk += radix; } tab -= 4; printf( - "%*.*s}\n", - tab, tab, "" - ); + "%*.*s}\n", + tab, tab, "" + ); } #endif @@ -880,9 +908,10 @@ main(int ac, char **av) printf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix); fflush(stdout); - if (fgets(buf, sizeof(buf), stdin) == NULL) + if (fgets(buf, sizeof(buf), stdin) == NULL) { break; - switch(buf[0]) { + } + switch (buf[0]) { case 'r': if (sscanf(buf + 1, "%d", &count) == 1) { blist_resize(&bl, count, 1); @@ -910,19 +939,19 @@ main(int ac, char **av) case '?': case 'h': puts( - "p -print\n" - "a %d -allocate\n" - "f %x %d -free\n" - "r %d -resize\n" - "h/? -help" - ); + "p -print\n" + "a %d -allocate\n" + "f %x %d -free\n" + "r %d -resize\n" + "h/? -help" + ); break; default: printf("?\n"); break; } } - return(0); + return 0; } void diff --git a/bsd/dev/dtrace/blist.h b/bsd/dev/dtrace/blist.h index 6988eb4f4..b670ca156 100644 --- a/bsd/dev/dtrace/blist.h +++ b/bsd/dev/dtrace/blist.h @@ -10,7 +10,7 @@ * blkno = blist_alloc(blist, count) * (void) blist_free(blist, blkno, count) * (void) blist_resize(&blist, count, freeextra) - * + * * * Notes: * on creation, the entire list is marked reserved. You should @@ -21,8 +21,8 @@ * SWAPBLK_NONE is returned on failure. This module is typically * capable of managing up to (2^31) blocks per blist, though * the memory utilization would be insane if you actually did - * that. Managing something like 512MB worth of 4K blocks - * eats around 32 KBytes of memory. + * that. Managing something like 512MB worth of 4K blocks + * eats around 32 KBytes of memory. * * $FreeBSD: src/sys/sys/blist.h,v 1.2 1999/08/28 00:51:33 peter Exp $ */ @@ -30,37 +30,37 @@ #ifndef _SYS_BLIST_H_ #define _SYS_BLIST_H_ -#define LOG2(v) (((u_daddr_t)(v) >= 0x80000000U) ? 31 : \ - ((u_daddr_t)(v) >= 0x40000000U) ? 30 : \ - ((u_daddr_t)(v) >= 0x20000000U) ? 29 : \ - ((u_daddr_t)(v) >= 0x10000000U) ? 28 : \ - ((u_daddr_t)(v) >= 0x08000000U) ? 27 : \ - ((u_daddr_t)(v) >= 0x04000000U) ? 26 : \ - ((u_daddr_t)(v) >= 0x02000000U) ? 25 : \ - ((u_daddr_t)(v) >= 0x01000000U) ? 24 : \ - ((u_daddr_t)(v) >= 0x00800000U) ? 23 : \ - ((u_daddr_t)(v) >= 0x00400000U) ? 22 : \ - ((u_daddr_t)(v) >= 0x00200000U) ? 21 : \ - ((u_daddr_t)(v) >= 0x00100000U) ? 20 : \ - ((u_daddr_t)(v) >= 0x00080000U) ? 19 : \ - ((u_daddr_t)(v) >= 0x00040000U) ? 18 : \ - ((u_daddr_t)(v) >= 0x00020000U) ? 17 : \ - ((u_daddr_t)(v) >= 0x00010000U) ? 16 : \ - ((u_daddr_t)(v) >= 0x00008000U) ? 15 : \ - ((u_daddr_t)(v) >= 0x00004000U) ? 14 : \ - ((u_daddr_t)(v) >= 0x00002000U) ? 13 : \ - ((u_daddr_t)(v) >= 0x00001000U) ? 12 : \ - ((u_daddr_t)(v) >= 0x00000800U) ? 11 : \ - ((u_daddr_t)(v) >= 0x00000400U) ? 10 : \ - ((u_daddr_t)(v) >= 0x00000200U) ? 9 : \ - ((u_daddr_t)(v) >= 0x00000100U) ? 8 : \ - ((u_daddr_t)(v) >= 0x00000080U) ? 7 : \ - ((u_daddr_t)(v) >= 0x00000040U) ? 6 : \ - ((u_daddr_t)(v) >= 0x00000020U) ? 5 : \ - ((u_daddr_t)(v) >= 0x00000010U) ? 4 : \ - ((u_daddr_t)(v) >= 0x00000008U) ? 3 : \ - ((u_daddr_t)(v) >= 0x00000004U) ? 2 : \ - ((u_daddr_t)(v) >= 0x00000002U) ? 1 : 0) +#define LOG2(v) (((u_daddr_t)(v) >= 0x80000000U) ? 31 : \ + ((u_daddr_t)(v) >= 0x40000000U) ? 30 : \ + ((u_daddr_t)(v) >= 0x20000000U) ? 29 : \ + ((u_daddr_t)(v) >= 0x10000000U) ? 28 : \ + ((u_daddr_t)(v) >= 0x08000000U) ? 27 : \ + ((u_daddr_t)(v) >= 0x04000000U) ? 26 : \ + ((u_daddr_t)(v) >= 0x02000000U) ? 25 : \ + ((u_daddr_t)(v) >= 0x01000000U) ? 24 : \ + ((u_daddr_t)(v) >= 0x00800000U) ? 23 : \ + ((u_daddr_t)(v) >= 0x00400000U) ? 22 : \ + ((u_daddr_t)(v) >= 0x00200000U) ? 21 : \ + ((u_daddr_t)(v) >= 0x00100000U) ? 20 : \ + ((u_daddr_t)(v) >= 0x00080000U) ? 19 : \ + ((u_daddr_t)(v) >= 0x00040000U) ? 18 : \ + ((u_daddr_t)(v) >= 0x00020000U) ? 17 : \ + ((u_daddr_t)(v) >= 0x00010000U) ? 16 : \ + ((u_daddr_t)(v) >= 0x00008000U) ? 15 : \ + ((u_daddr_t)(v) >= 0x00004000U) ? 14 : \ + ((u_daddr_t)(v) >= 0x00002000U) ? 13 : \ + ((u_daddr_t)(v) >= 0x00001000U) ? 12 : \ + ((u_daddr_t)(v) >= 0x00000800U) ? 11 : \ + ((u_daddr_t)(v) >= 0x00000400U) ? 10 : \ + ((u_daddr_t)(v) >= 0x00000200U) ? 9 : \ + ((u_daddr_t)(v) >= 0x00000100U) ? 8 : \ + ((u_daddr_t)(v) >= 0x00000080U) ? 7 : \ + ((u_daddr_t)(v) >= 0x00000040U) ? 6 : \ + ((u_daddr_t)(v) >= 0x00000020U) ? 5 : \ + ((u_daddr_t)(v) >= 0x00000010U) ? 4 : \ + ((u_daddr_t)(v) >= 0x00000008U) ? 3 : \ + ((u_daddr_t)(v) >= 0x00000004U) ? 2 : \ + ((u_daddr_t)(v) >= 0x00000002U) ? 1 : 0) /* * blmeta and bl_bitmap_t MUST be a power of 2 in size. @@ -68,27 +68,27 @@ typedef struct blmeta { union { - daddr_t bmu_avail; /* space available under us */ - u_daddr_t bmu_bitmap; /* bitmap if we are a leaf */ + daddr_t bmu_avail; /* space available under us */ + u_daddr_t bmu_bitmap; /* bitmap if we are a leaf */ } u; - daddr_t bm_bighint; /* biggest contiguous block hint*/ + daddr_t bm_bighint; /* biggest contiguous block hint*/ } blmeta_t; typedef struct blist { - daddr_t bl_blocks; /* area of coverage */ - daddr_t bl_radix; /* coverage radix */ - daddr_t bl_skip; /* starting skip */ - daddr_t bl_free; /* number of free blocks */ - blmeta_t *bl_root; /* root of radix tree */ - daddr_t bl_rootblks; /* daddr_t blks allocated for tree */ + daddr_t bl_blocks; /* area of coverage */ + daddr_t bl_radix; /* coverage radix */ + daddr_t bl_skip; /* starting skip */ + daddr_t bl_free; /* number of free blocks */ + blmeta_t *bl_root; /* root of radix tree */ + daddr_t bl_rootblks; /* daddr_t blks allocated for tree */ } *blist_t; -#define BLIST_META_RADIX 16 -#define BLIST_META_RADIX_SHIFT LOG2(BLIST_META_RADIX) -#define BLIST_BMAP_RADIX (sizeof(u_daddr_t)*8) -#define BLIST_BMAP_RADIX_SHIFT LOG2(BLIST_BMAP_RADIX) +#define BLIST_META_RADIX 16 +#define BLIST_META_RADIX_SHIFT LOG2(BLIST_META_RADIX) +#define BLIST_BMAP_RADIX (sizeof(u_daddr_t)*8) +#define BLIST_BMAP_RADIX_SHIFT LOG2(BLIST_BMAP_RADIX) -#define BLIST_MAX_ALLOC BLIST_BMAP_RADIX +#define BLIST_MAX_ALLOC BLIST_BMAP_RADIX extern blist_t blist_create(daddr_t blocks); extern void blist_destroy(blist_t blist); @@ -97,4 +97,4 @@ extern void blist_free(blist_t blist, daddr_t blkno, daddr_t count); extern void blist_print(blist_t blist); extern void blist_resize(blist_t *pblist, daddr_t count, int freenew); -#endif /* _SYS_BLIST_H_ */ +#endif /* _SYS_BLIST_H_ */ diff --git a/bsd/dev/dtrace/dtrace.c b/bsd/dev/dtrace/dtrace.c index a83adc712..6de3c98c4 100644 --- a/bsd/dev/dtrace/dtrace.c +++ b/bsd/dev/dtrace/dtrace.c @@ -323,26 +323,60 @@ static dtrace_pattr_t dtrace_provider_attr = { }; static void -dtrace_nullop(void) -{} +dtrace_provide_nullop(void *arg, const dtrace_probedesc_t *desc) +{ +#pragma unused(arg, desc) +} + +static void +dtrace_provide_module_nullop(void *arg, struct modctl *ctl) +{ +#pragma unused(arg, ctl) +} static int -dtrace_enable_nullop(void) +dtrace_enable_nullop(void *arg, dtrace_id_t id, void *parg) { +#pragma unused(arg, id, parg) return (0); } +static void +dtrace_disable_nullop(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id, parg) +} + +static void +dtrace_suspend_nullop(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id, parg) +} + +static void +dtrace_resume_nullop(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id, parg) +} + +static void +dtrace_destroy_nullop(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id, parg) +} + + static dtrace_pops_t dtrace_provider_ops = { - .dtps_provide = (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, - .dtps_provide_module = (void (*)(void *, struct modctl *))dtrace_nullop, - .dtps_enable = (int (*)(void *, dtrace_id_t, void *))dtrace_nullop, - .dtps_disable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, - .dtps_suspend = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, - .dtps_resume = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, + .dtps_provide = dtrace_provide_nullop, + .dtps_provide_module = dtrace_provide_module_nullop, + .dtps_enable = dtrace_enable_nullop, + .dtps_disable = dtrace_disable_nullop, + .dtps_suspend = dtrace_suspend_nullop, + .dtps_resume = dtrace_resume_nullop, .dtps_getargdesc = NULL, .dtps_getargval = NULL, .dtps_usermode = NULL, - .dtps_destroy = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, + .dtps_destroy = dtrace_destroy_nullop, }; static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ @@ -550,7 +584,7 @@ dtrace_load##bits(uintptr_t addr) \ { \ volatile vm_offset_t recover = (vm_offset_t)&&dtraceLoadRecover##bits; \ *flags |= CPU_DTRACE_NOFAULT; \ - recover = dtrace_set_thread_recover(current_thread(), recover); \ + recover = dtrace_sign_and_set_thread_recover(current_thread(), recover); \ /*CSTYLED*/ \ /* \ * PR6394061 - avoid device memory that is unpredictably \ @@ -7939,22 +7973,19 @@ dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, if (pops->dtps_provide == NULL) { ASSERT(pops->dtps_provide_module != NULL); - provider->dtpv_pops.dtps_provide = - (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; + provider->dtpv_pops.dtps_provide = dtrace_provide_nullop; } if (pops->dtps_provide_module == NULL) { ASSERT(pops->dtps_provide != NULL); provider->dtpv_pops.dtps_provide_module = - (void (*)(void *, struct modctl *))dtrace_nullop; + dtrace_provide_module_nullop; } if (pops->dtps_suspend == NULL) { ASSERT(pops->dtps_resume == NULL); - provider->dtpv_pops.dtps_suspend = - (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; - provider->dtpv_pops.dtps_resume = - (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; + provider->dtpv_pops.dtps_suspend = dtrace_suspend_nullop; + provider->dtpv_pops.dtps_resume = dtrace_resume_nullop; } provider->dtpv_arg = arg; diff --git a/bsd/dev/dtrace/dtrace_glue.c b/bsd/dev/dtrace/dtrace_glue.c index bfda934bc..d33a8f030 100644 --- a/bsd/dev/dtrace/dtrace_glue.c +++ b/bsd/dev/dtrace/dtrace_glue.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -84,7 +84,6 @@ void dtrace_sprunlock(proc_t *p) { lck_mtx_unlock(&p->p_dtrace_sprlock); - } /* Not called from probe context */ @@ -152,9 +151,10 @@ uread(proc_t *p, void *buf, user_size_t len, user_addr_t a) if (map) { ret = vm_map_read_user( map, (vm_map_address_t)a, buf, (vm_size_t)len); vm_map_deallocate(map); - } else + } else { ret = KERN_TERMINATED; - + } + return (int)ret; } @@ -181,15 +181,16 @@ uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a) vm_map_t map = get_task_map_reference(task); if (map) { /* Find the memory permissions. */ - uint32_t nestingDepth=999999; + uint32_t nestingDepth = 999999; vm_region_submap_short_info_data_64_t info; mach_msg_type_number_t count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; mach_vm_address_t address = (mach_vm_address_t)a; mach_vm_size_t sizeOfRegion = (mach_vm_size_t)len; - + ret = mach_vm_region_recurse(map, &address, &sizeOfRegion, &nestingDepth, (vm_region_recurse_info_t)&info, &count); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { goto done; + } vm_prot_t reprotect; @@ -199,43 +200,45 @@ uwrite(proc_t *p, void *buf, user_size_t len, user_addr_t a) if (info.max_protection & VM_PROT_WRITE) { /* The memory is not currently writable, but can be made writable. */ - ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE); + ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, (reprotect & ~VM_PROT_EXECUTE) | VM_PROT_WRITE); } else { /* * The memory is not currently writable, and cannot be made writable. We need to COW this memory. * * Strange, we can't just say "reprotect | VM_PROT_COPY", that fails. */ - ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE); + ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE); } - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { goto done; - + } } else { /* The memory was already writable. */ reprotect = VM_PROT_NONE; } ret = vm_map_write_user( map, - buf, - (vm_map_address_t)a, - (vm_size_t)len); + buf, + (vm_map_address_t)a, + (vm_size_t)len); dtrace_flush_caches(); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { goto done; + } if (reprotect != VM_PROT_NONE) { ASSERT(reprotect & VM_PROT_EXECUTE); - ret = mach_vm_protect (map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect); + ret = mach_vm_protect(map, (mach_vm_offset_t)a, (mach_vm_size_t)len, 0, reprotect); } done: vm_map_deallocate(map); - } else + } else { ret = KERN_TERMINATED; + } return (int)ret; } @@ -257,30 +260,32 @@ cpu_core_t *cpu_core; /* XXX TLB lockdown? */ /* * dtrace_CRED() can be called from probe context. We cannot simply call kauth_cred_get() since * that function may try to resolve a lazy credential binding, which entails taking the proc_lock. - */ + */ cred_t * dtrace_CRED(void) { struct uthread *uthread = get_bsdthread_info(current_thread()); - if (uthread == NULL) + if (uthread == NULL) { return NULL; - else + } else { return uthread->uu_ucred; /* May return NOCRED which is defined to be 0 */ + } } -#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr)) -#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \ - HAS_ALLPRIVS(cr) : \ - PRIV_ISASSERT(&CR_OEPRIV(cr), pr)) +#define HAS_ALLPRIVS(cr) priv_isfullset(&CR_OEPRIV(cr)) +#define HAS_PRIVILEGE(cr, pr) ((pr) == PRIV_ALL ? \ + HAS_ALLPRIVS(cr) : \ + PRIV_ISASSERT(&CR_OEPRIV(cr), pr)) -int PRIV_POLICY_CHOICE(void* cred, int priv, int all) +int +PRIV_POLICY_CHOICE(void* cred, int priv, int all) { #pragma unused(priv, all) return kauth_cred_issuser(cred); /* XXX TODO: How is this different from PRIV_POLICY_ONLY? */ } -int +int PRIV_POLICY_ONLY(void *cr, int priv, int boolean) { #pragma unused(priv, boolean) @@ -288,7 +293,10 @@ PRIV_POLICY_ONLY(void *cr, int priv, int boolean) } uid_t -crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); } +crgetuid(const cred_t *cr) +{ + cred_t copy_cr = *cr; return kauth_cred_getuid(©_cr); +} /* * "cyclic" @@ -296,26 +304,26 @@ crgetuid(const cred_t *cr) { cred_t copy_cr = *cr; return kauth_cred_getuid(&cop typedef struct wrap_timer_call { /* node attributes */ - cyc_handler_t hdlr; - cyc_time_t when; - uint64_t deadline; - int cpuid; - boolean_t suspended; - struct timer_call call; + cyc_handler_t hdlr; + cyc_time_t when; + uint64_t deadline; + int cpuid; + boolean_t suspended; + struct timer_call call; /* next item in the linked list */ LIST_ENTRY(wrap_timer_call) entries; } wrap_timer_call_t; -#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL -#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL +#define WAKEUP_REAPER 0x7FFFFFFFFFFFFFFFLL +#define NEARLY_FOREVER 0x7FFFFFFFFFFFFFFELL typedef struct cyc_list { cyc_omni_handler_t cyl_omni; wrap_timer_call_t cyl_wrap_by_cpus[]; #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) -} __attribute__ ((aligned (8))) cyc_list_t; +} __attribute__ ((aligned(8))) cyc_list_t; #else } cyc_list_t; #endif @@ -325,16 +333,18 @@ void (*dtrace_cpu_state_changed_hook)(int, boolean_t) = NULL; void dtrace_cpu_state_changed(int, boolean_t); void -dtrace_install_cpu_hooks(void) { +dtrace_install_cpu_hooks(void) +{ dtrace_cpu_state_changed_hook = dtrace_cpu_state_changed; } void -dtrace_cpu_state_changed(int cpuid, boolean_t is_running) { +dtrace_cpu_state_changed(int cpuid, boolean_t is_running) +{ #pragma unused(cpuid) - wrap_timer_call_t *wrapTC = NULL; - boolean_t suspend = (is_running ? FALSE : TRUE); - dtrace_icookie_t s; + wrap_timer_call_t *wrapTC = NULL; + boolean_t suspend = (is_running ? FALSE : TRUE); + dtrace_icookie_t s; /* Ensure that we're not going to leave the CPU */ s = dtrace_interrupt_disable(); @@ -345,19 +355,19 @@ dtrace_cpu_state_changed(int cpuid, boolean_t is_running) { if (suspend) { assert(!wrapTC->suspended); /* If this fails, we'll panic anyway, so let's do this now. */ - if (!timer_call_cancel(&wrapTC->call)) + if (!timer_call_cancel(&wrapTC->call)) { panic("timer_call_set_suspend() failed to cancel a timer call"); + } wrapTC->suspended = TRUE; } else { /* Rearm the timer, but ensure it was suspended first. */ assert(wrapTC->suspended); clock_deadline_for_periodic_event(wrapTC->when.cyt_interval, mach_absolute_time(), - &wrapTC->deadline); + &wrapTC->deadline); timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline, - TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); + TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); wrapTC->suspended = FALSE; } - } /* Restore the previous interrupt state. */ @@ -372,7 +382,7 @@ _timer_call_apply_cyclic( void *ignore, void *vTChdl ) (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); - clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); + clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline)); timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline, TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL ); } @@ -382,7 +392,7 @@ timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_tim uint64_t now; dtrace_icookie_t s; - timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL ); + timer_call_setup( &(wrapTC->call), _timer_call_apply_cyclic, NULL ); wrapTC->hdlr = *handler; wrapTC->when = *when; @@ -391,15 +401,15 @@ timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_tim now = mach_absolute_time(); wrapTC->deadline = now; - clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); + clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline)); /* Insert the timer to the list of the running timers on this CPU, and start it. */ s = dtrace_interrupt_disable(); - wrapTC->cpuid = cpu_number(); - LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries); - timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline, - TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); - wrapTC->suspended = FALSE; + wrapTC->cpuid = cpu_number(); + LIST_INSERT_HEAD(&cpu_list[wrapTC->cpuid].cpu_cyc_list, wrapTC, entries); + timer_call_enter1(&wrapTC->call, (void*) wrapTC, wrapTC->deadline, + TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL); + wrapTC->suspended = FALSE; dtrace_interrupt_enable(s); return (cyclic_id_t)wrapTC; @@ -414,29 +424,31 @@ timer_call_remove_cyclic(wrap_timer_call_t *wrapTC) assert(wrapTC); assert(cpu_number() == wrapTC->cpuid); - if (!timer_call_cancel(&wrapTC->call)) + if (!timer_call_cancel(&wrapTC->call)) { panic("timer_call_remove_cyclic() failed to cancel a timer call"); + } - LIST_REMOVE(wrapTC, entries); + LIST_REMOVE(wrapTC, entries); } static void * timer_call_get_cyclic_arg(wrap_timer_call_t *wrapTC) { - return (wrapTC ? wrapTC->hdlr.cyh_arg : NULL); + return wrapTC ? wrapTC->hdlr.cyh_arg : NULL; } cyclic_id_t cyclic_timer_add(cyc_handler_t *handler, cyc_time_t *when) { wrap_timer_call_t *wrapTC = _MALLOC(sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); - if (NULL == wrapTC) + if (NULL == wrapTC) { return CYCLIC_NONE; - else + } else { return timer_call_add_cyclic( wrapTC, handler, when ); + } } -void +void cyclic_timer_remove(cyclic_id_t cyclic) { ASSERT( cyclic != CYCLIC_NONE ); @@ -465,10 +477,11 @@ cyclic_id_list_t cyclic_add_omni(cyc_omni_handler_t *omni) { cyc_list_t *cyc_list = - _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); + _MALLOC(sizeof(cyc_list_t) + NCPU * sizeof(wrap_timer_call_t), M_TEMP, M_ZERO | M_WAITOK); - if (NULL == cyc_list) + if (NULL == cyc_list) { return NULL; + } cyc_list->cyl_omni = *omni; @@ -512,7 +525,7 @@ typedef struct wrap_thread_call { } wrap_thread_call_t; /* - * _cyclic_apply will run on some thread under kernel_task. That's OK for the + * _cyclic_apply will run on some thread under kernel_task. That's OK for the * cleaner and the deadman, but too distant in time and place for the profile provider. */ static void @@ -523,12 +536,13 @@ _cyclic_apply( void *ignore, void *vTChdl ) (*(wrapTC->hdlr.cyh_func))( wrapTC->hdlr.cyh_arg ); - clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline) ); + clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, mach_absolute_time(), &(wrapTC->deadline)); (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); /* Did cyclic_remove request a wakeup call when this thread call was re-armed? */ - if (wrapTC->when.cyt_interval == WAKEUP_REAPER) + if (wrapTC->when.cyt_interval == WAKEUP_REAPER) { thread_wakeup((event_t)wrapTC); + } } cyclic_id_t @@ -537,8 +551,9 @@ cyclic_add(cyc_handler_t *handler, cyc_time_t *when) uint64_t now; wrap_thread_call_t *wrapTC = _MALLOC(sizeof(wrap_thread_call_t), M_TEMP, M_ZERO | M_WAITOK); - if (NULL == wrapTC) + if (NULL == wrapTC) { return CYCLIC_NONE; + } wrapTC->TChdl = thread_call_allocate( _cyclic_apply, NULL ); wrapTC->hdlr = *handler; @@ -552,7 +567,7 @@ cyclic_add(cyc_handler_t *handler, cyc_time_t *when) now = mach_absolute_time(); wrapTC->deadline = now; - clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) ); + clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline)); (void)thread_call_enter1_delayed( wrapTC->TChdl, (void *)wrapTC, wrapTC->deadline ); return (cyclic_id_t)wrapTC; @@ -581,9 +596,9 @@ cyclic_remove(cyclic_id_t cyclic) ASSERT(ret == THREAD_AWAKENED); } - if (thread_call_free(wrapTC->TChdl)) + if (thread_call_free(wrapTC->TChdl)) { _FREE(wrapTC, M_TEMP); - else { + } else { /* Gut this cyclic and move on ... */ wrapTC->hdlr.cyh_func = noop_cyh_func; wrapTC->when.cyt_interval = NEARLY_FOREVER; @@ -600,7 +615,10 @@ _dtrace_register_anon_DOF(char *name, uchar_t *data, uint_t nelements) } int -ddi_driver_major(dev_info_t *devi) { return (int)major(CAST_DOWN_EXPLICIT(int,devi)); } +ddi_driver_major(dev_info_t *devi) +{ + return (int)major(CAST_DOWN_EXPLICIT(int, devi)); +} int ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type, @@ -609,11 +627,12 @@ ddi_create_minor_node(dev_info_t *dip, const char *name, int spec_type, #pragma unused(spec_type,node_type,flag) dev_t dev = makedev( ddi_driver_major(dip), minor_num ); - if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) + if (NULL == devfs_make_node( dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, name, 0 )) { return DDI_FAILURE; - else + } else { return DDI_SUCCESS; -} + } +} void ddi_remove_minor_node(dev_info_t *dip, char *name) @@ -629,7 +648,7 @@ getemajor( dev_t d ) } minor_t -getminor ( dev_t d ) +getminor( dev_t d ) { return (minor_t) minor(d); } @@ -637,7 +656,10 @@ getminor ( dev_t d ) extern void Debugger(const char*); void -debug_enter(char *c) { Debugger(c); } +debug_enter(char *c) +{ + Debugger(c); +} /* * kmem @@ -668,8 +690,9 @@ dt_kmem_zalloc_site(size_t size, int kmflag, vm_allocation_site_t *site) vm_size_t vsize = size; void* buf = kalloc_canblock(&vsize, TRUE, site); - if(!buf) + if (!buf) { return NULL; + } bzero(buf, size); @@ -684,7 +707,9 @@ dt_kmem_free(void *buf, size_t size) * DTrace relies on this, its doing a lot of NULL frees. * A null free causes the debug builds to panic. */ - if (buf == NULL) return; + if (buf == NULL) { + return; + } ASSERT(size > 0); @@ -715,8 +740,9 @@ dt_kmem_alloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation_ */ hdr_size = sizeof(size_t) + sizeof(void*); mem = dt_kmem_alloc_site(size + align + hdr_size, kmflag, site); - if (mem == NULL) + if (mem == NULL) { return NULL; + } mem_aligned = (intptr_t) (((intptr_t) mem + align + hdr_size) & ~(align - 1)); @@ -738,8 +764,9 @@ dt_kmem_zalloc_aligned_site(size_t size, size_t align, int kmflag, vm_allocation buf = dt_kmem_alloc_aligned_site(size, align, kmflag, s); - if(!buf) + if (!buf) { return NULL; + } bzero(buf, size); @@ -754,8 +781,9 @@ dt_kmem_free_aligned(void* buf, size_t size) void **addr_to_free = (void**) (ptr - sizeof(void*)); size_t *size_to_free = (size_t*) (ptr - (sizeof(size_t) + sizeof(void*))); - if (buf == NULL) + if (buf == NULL) { return; + } dt_kmem_free(*addr_to_free, *size_to_free); } @@ -768,20 +796,20 @@ dt_kmem_free_aligned(void* buf, size_t size) */ kmem_cache_t * kmem_cache_create( - const char *name, /* descriptive name for this cache */ - size_t bufsize, /* size of the objects it manages */ - size_t align, /* required object alignment */ - int (*constructor)(void *, void *, int), /* object constructor */ - void (*destructor)(void *, void *), /* object destructor */ - void (*reclaim)(void *), /* memory reclaim callback */ - void *private, /* pass-thru arg for constr/destr/reclaim */ - vmem_t *vmp, /* vmem source for slab allocation */ - int cflags) /* cache creation flags */ + const char *name, /* descriptive name for this cache */ + size_t bufsize, /* size of the objects it manages */ + size_t align, /* required object alignment */ + int (*constructor)(void *, void *, int), /* object constructor */ + void (*destructor)(void *, void *), /* object destructor */ + void (*reclaim)(void *), /* memory reclaim callback */ + void *private, /* pass-thru arg for constr/destr/reclaim */ + vmem_t *vmp, /* vmem source for slab allocation */ + int cflags) /* cache creation flags */ { #pragma unused(name,align,constructor,destructor,reclaim,private,vmp,cflags) return (kmem_cache_t *)bufsize; /* A cookie that tracks the single object size. */ } - + void * kmem_cache_alloc(kmem_cache_t *cp, int kmflag) { @@ -811,33 +839,34 @@ typedef unsigned int u_daddr_t; /* By passing around blist *handles*, the underlying blist can be resized as needed. */ struct blist_hdl { - blist_t blist; + blist_t blist; }; -vmem_t * +vmem_t * vmem_create(const char *name, void *base, size_t size, size_t quantum, void *ignore5, - void *ignore6, vmem_t *source, size_t qcache_max, int vmflag) + void *ignore6, vmem_t *source, size_t qcache_max, int vmflag) { #pragma unused(name,quantum,ignore5,ignore6,source,qcache_max,vmflag) blist_t bl; struct blist_hdl *p = _MALLOC(sizeof(struct blist_hdl), M_TEMP, M_WAITOK); - + ASSERT(quantum == 1); ASSERT(NULL == ignore5); ASSERT(NULL == ignore6); ASSERT(NULL == source); ASSERT(0 == qcache_max); ASSERT(vmflag & VMC_IDENTIFIER); - + size = MIN(128, size); /* Clamp to 128 initially, since the underlying data structure is pre-allocated */ - + p->blist = bl = blist_create( size ); blist_free(bl, 0, size); - if (base) blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */ - + if (base) { + blist_alloc( bl, (daddr_t)(uintptr_t)base ); /* Chomp off initial ID(s) */ + } return (vmem_t *)p; } - + void * vmem_alloc(vmem_t *vmp, size_t size, int vmflag) { @@ -845,17 +874,18 @@ vmem_alloc(vmem_t *vmp, size_t size, int vmflag) struct blist_hdl *q = (struct blist_hdl *)vmp; blist_t bl = q->blist; daddr_t p; - + p = blist_alloc(bl, (daddr_t)size); - + if ((daddr_t)-1 == p) { blist_resize(&bl, (bl->bl_blocks) << 1, 1); q->blist = bl; p = blist_alloc(bl, (daddr_t)size); - if ((daddr_t)-1 == p) + if ((daddr_t)-1 == p) { panic("vmem_alloc: failure after blist_resize!"); + } } - + return (void *)(uintptr_t)p; } @@ -863,7 +893,7 @@ void vmem_free(vmem_t *vmp, void *vaddr, size_t size) { struct blist_hdl *p = (struct blist_hdl *)vmp; - + blist_free( p->blist, (daddr_t)(uintptr_t)vaddr, (daddr_t)size ); } @@ -871,9 +901,9 @@ void vmem_destroy(vmem_t *vmp) { struct blist_hdl *p = (struct blist_hdl *)vmp; - + blist_destroy( p->blist ); - _FREE( p, sizeof(struct blist_hdl) ); + _FREE( p, sizeof(struct blist_hdl)); } /* @@ -881,17 +911,17 @@ vmem_destroy(vmem_t *vmp) */ /* - * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at + * dtrace_gethrestime() provides the "walltimestamp", a value that is anchored at * January 1, 1970. Because it can be called from probe context, it must take no locks. */ hrtime_t dtrace_gethrestime(void) { - clock_sec_t secs; - clock_nsec_t nanosecs; - uint64_t secs64, ns64; - + clock_sec_t secs; + clock_nsec_t nanosecs; + uint64_t secs64, ns64; + clock_get_calendar_nanotime_nowait(&secs, &nanosecs); secs64 = (uint64_t)secs; ns64 = (uint64_t)nanosecs; @@ -917,7 +947,7 @@ dtrace_abs_to_nano(uint64_t elapsed) * denominator in a fraction. */ - if ( sTimebaseInfo.denom == 0 ) { + if (sTimebaseInfo.denom == 0) { (void) clock_timebase_info(&sTimebaseInfo); } @@ -928,11 +958,11 @@ dtrace_abs_to_nano(uint64_t elapsed) * Provided the final result is representable in 64 bits the following maneuver will * deliver that result without intermediate overflow. */ - if (sTimebaseInfo.denom == sTimebaseInfo.numer) + if (sTimebaseInfo.denom == sTimebaseInfo.numer) { return elapsed; - else if (sTimebaseInfo.denom == 1) + } else if (sTimebaseInfo.denom == 1) { return elapsed * (uint64_t)sTimebaseInfo.numer; - else { + } else { /* Decompose elapsed = eta32 * 2^32 + eps32: */ uint64_t eta32 = elapsed >> 32; uint64_t eps32 = elapsed & 0x00000000ffffffffLL; @@ -944,22 +974,23 @@ dtrace_abs_to_nano(uint64_t elapsed) uint64_t lambda64 = numer * eps32; /* Divide the constituents by denom: */ - uint64_t q32 = mu64/denom; + uint64_t q32 = mu64 / denom; uint64_t r32 = mu64 - (q32 * denom); /* mu64 % denom */ - return (q32 << 32) + ((r32 << 32) + lambda64)/denom; + return (q32 << 32) + ((r32 << 32) + lambda64) / denom; } } hrtime_t dtrace_gethrtime(void) { - static uint64_t start = 0; - - if (start == 0) + static uint64_t start = 0; + + if (start == 0) { start = mach_absolute_time(); - - return dtrace_abs_to_nano(mach_absolute_time() - start); + } + + return dtrace_abs_to_nano(mach_absolute_time() - start); } /* @@ -968,19 +999,21 @@ dtrace_gethrtime(void) uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new) { - if (OSCompareAndSwap( (UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) + if (OSCompareAndSwap((UInt32)cmp, (UInt32)new, (volatile UInt32 *)target )) { return cmp; - else + } else { return ~cmp; /* Must return something *other* than cmp */ + } } void * dtrace_casptr(void *target, void *cmp, void *new) { - if (OSCompareAndSwapPtr( cmp, new, (void**)target )) + if (OSCompareAndSwapPtr( cmp, new, (void**)target )) { return cmp; - else + } else { return (void *)(~(uintptr_t)cmp); /* Must return something *other* than cmp */ + } } /* @@ -1002,7 +1035,9 @@ dtrace_interrupt_enable(dtrace_icookie_t reenable) * MP coordination */ static void -dtrace_sync_func(void) {} +dtrace_sync_func(void) +{ +} /* * dtrace_sync() is not called from probe context. @@ -1019,7 +1054,7 @@ dtrace_sync(void) extern kern_return_t dtrace_copyio_preflight(addr64_t); extern kern_return_t dtrace_copyio_postflight(addr64_t); - + static int dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size) { @@ -1030,21 +1065,20 @@ dtrace_copycheck(user_addr_t uaddr, uintptr_t kaddr, size_t size) ASSERT(kaddr + size >= kaddr); - if ( uaddr + size < uaddr || /* Avoid address wrap. */ - KERN_FAILURE == dtrace_copyio_preflight(uaddr)) /* Machine specific setup/constraints. */ - { + if (uaddr + size < uaddr || /* Avoid address wrap. */ + KERN_FAILURE == dtrace_copyio_preflight(uaddr)) { /* Machine specific setup/constraints. */ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; - return (0); + return 0; } - return (1); + return 1; } void dtrace_copyin(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) { #pragma unused(flags) - + if (dtrace_copycheck( src, dst, len )) { if (copyin((const user_addr_t)src, (char *)dst, (vm_size_t)len)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); @@ -1058,9 +1092,9 @@ void dtrace_copyinstr(user_addr_t src, uintptr_t dst, size_t len, volatile uint16_t *flags) { #pragma unused(flags) - + size_t actual; - + if (dtrace_copycheck( src, dst, len )) { /* copyin as many as 'len' bytes. */ int error = copyinstr((const user_addr_t)src, (char *)dst, (vm_size_t)len, &actual); @@ -1083,7 +1117,7 @@ void dtrace_copyout(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) { #pragma unused(flags) - + if (dtrace_copycheck( dst, src, len )) { if (copyout((const void *)src, dst, (vm_size_t)len)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); @@ -1097,11 +1131,10 @@ void dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t *flags) { #pragma unused(flags) - + size_t actual; if (dtrace_copycheck( dst, src, len )) { - /* * ENAMETOOLONG is returned when 'len' bytes have been copied out but the NUL terminator was * not encountered. We raise CPU_DTRACE_BADADDR in that case. @@ -1118,7 +1151,8 @@ dtrace_copyoutstr(uintptr_t src, user_addr_t dst, size_t len, volatile uint16_t extern const int copysize_limit_panic; -int dtrace_copy_maxsize(void) +int +dtrace_copy_maxsize(void) { return copysize_limit_panic; } @@ -1132,19 +1166,21 @@ dtrace_buffer_copyout(const void *kaddr, user_addr_t uaddr, vm_size_t nbytes) * Partition the copyout in copysize_limit_panic-sized chunks */ while (nbytes >= (vm_size_t)maxsize) { - if (copyout(kaddr, uaddr, maxsize) != 0) - return (EFAULT); + if (copyout(kaddr, uaddr, maxsize) != 0) { + return EFAULT; + } nbytes -= maxsize; uaddr += maxsize; kaddr += maxsize; } if (nbytes > 0) { - if (copyout(kaddr, uaddr, nbytes) != 0) - return (EFAULT); + if (copyout(kaddr, uaddr, nbytes) != 0) { + return EFAULT; + } } - return (0); + return 0; } uint8_t @@ -1162,7 +1198,7 @@ dtrace_fuword8(user_addr_t uaddr) } DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return(ret); + return ret; } uint16_t @@ -1180,7 +1216,7 @@ dtrace_fuword16(user_addr_t uaddr) } DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return(ret); + return ret; } uint32_t @@ -1198,7 +1234,7 @@ dtrace_fuword32(user_addr_t uaddr) } DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return(ret); + return ret; } uint64_t @@ -1216,7 +1252,7 @@ dtrace_fuword64(user_addr_t uaddr) } DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return(ret); + return ret; } /* @@ -1310,7 +1346,7 @@ dtrace_tally_fault(user_addr_t uaddr) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[CPU->cpu_id].cpuc_dtrace_illval = uaddr; - return( DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE ); + return DTRACE_CPUFLAG_ISSET(CPU_DTRACE_NOFAULT) ? TRUE : FALSE; } #define TOTTY 0x02 @@ -1323,7 +1359,8 @@ vuprintf(const char *format, va_list ap) } /* Not called from probe context */ -void cmn_err( int level, const char *format, ... ) +void +cmn_err( int level, const char *format, ... ) { #pragma unused(level) va_list alist; @@ -1336,33 +1373,35 @@ void cmn_err( int level, const char *format, ... ) /* * History: - * 2002-01-24 gvdl Initial implementation of strstr + * 2002-01-24 gvdl Initial implementation of strstr */ __private_extern__ const char * strstr(const char *in, const char *str) { - char c; - size_t len; - if (!in || !str) - return in; - - c = *str++; - if (!c) - return (const char *) in; // Trivial empty string case - - len = strlen(str); - do { - char sc; + char c; + size_t len; + if (!in || !str) { + return in; + } - do { - sc = *in++; - if (!sc) - return (char *) 0; - } while (sc != c); - } while (strncmp(in, str, len) != 0); + c = *str++; + if (!c) { + return (const char *) in; // Trivial empty string case + } + len = strlen(str); + do { + char sc; + + do { + sc = *in++; + if (!sc) { + return (char *) 0; + } + } while (sc != c); + } while (strncmp(in, str, len) != 0); - return (const char *) (in - 1); + return (const char *) (in - 1); } const void* @@ -1375,14 +1414,15 @@ bsearch(const void *key, const void *base0, size_t nmemb, size_t size, int (*com for (lim = nmemb; lim != 0; lim >>= 1) { p = base + (lim >> 1) * size; cmp = (*compar)(key, p); - if (cmp == 0) + if (cmp == 0) { return p; - if (cmp > 0) { /* key > p: move right */ + } + if (cmp > 0) { /* key > p: move right */ base = (const char *)p + size; lim--; - } /* else move left */ + } /* else move left */ } - return (NULL); + return NULL; } /* @@ -1403,10 +1443,11 @@ dtrace_getstackdepth(int aframes) int depth = 0; int on_intr; - if ((on_intr = CPU_ON_INTR(CPU)) != 0) + if ((on_intr = CPU_ON_INTR(CPU)) != 0) { stacktop = (struct frame *)dtrace_get_cpu_int_stack_top(); - else + } else { stacktop = (struct frame *)(dtrace_get_kernel_stack(current_thread()) + kernel_stack_size); + } minfp = fp; @@ -1422,10 +1463,10 @@ dtrace_getstackdepth(int aframes) /* * Hop from interrupt stack to thread stack. */ - vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread()); + vm_offset_t kstack_base = dtrace_get_kernel_stack(current_thread()); - minfp = (struct frame *)kstack_base; - stacktop = (struct frame *)(kstack_base + kernel_stack_size); + minfp = (struct frame *)kstack_base; + stacktop = (struct frame *)(kstack_base + kernel_stack_size); on_intr = 0; continue; @@ -1437,10 +1478,11 @@ dtrace_getstackdepth(int aframes) minfp = fp; } - if (depth <= aframes) - return (0); + if (depth <= aframes) { + return 0; + } - return (depth - aframes); + return depth - aframes; } int @@ -1453,10 +1495,14 @@ dtrace_addr_in_module(void* addr, struct modctl *ctl) * Unconsidered */ void -dtrace_vtime_enable(void) {} +dtrace_vtime_enable(void) +{ +} void -dtrace_vtime_disable(void) {} +dtrace_vtime_disable(void) +{ +} #else /* else ! CONFIG_DTRACE */ @@ -1470,10 +1516,12 @@ dtrace_vtime_disable(void) {} kern_return_t _dtrace_register_anon_DOF(char *, unsigned char *, uint32_t); -kern_return_t _dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) { +kern_return_t +_dtrace_register_anon_DOF(char *arg1, unsigned char *arg2, uint32_t arg3) +{ #pragma unused(arg1, arg2, arg3) - return KERN_FAILURE; + return KERN_FAILURE; } #endif /* CONFIG_DTRACE */ diff --git a/bsd/dev/dtrace/dtrace_ptss.c b/bsd/dev/dtrace/dtrace_ptss.c index 6741f5563..b2cc59819 100644 --- a/bsd/dev/dtrace/dtrace_ptss.c +++ b/bsd/dev/dtrace/dtrace_ptss.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,7 +48,8 @@ * page of memory, the underlying kernel _MALLOC may block. */ struct dtrace_ptss_page_entry* -dtrace_ptss_claim_entry_locked(struct proc* p) { +dtrace_ptss_claim_entry_locked(struct proc* p) +{ LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED); struct dtrace_ptss_page_entry* entry = NULL; @@ -61,8 +62,9 @@ dtrace_ptss_claim_entry_locked(struct proc* p) { struct dtrace_ptss_page* page = dtrace_ptss_allocate_page(p); // Make sure we actually got a page - if (page == NULL) + if (page == NULL) { return NULL; + } // Add the page to the page list page->next = p->p_dtrace_ptss_pages; @@ -70,18 +72,19 @@ dtrace_ptss_claim_entry_locked(struct proc* p) { // CAS the entries onto the free list. do { - page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE-1].next = p->p_dtrace_ptss_free_list; - } while (!OSCompareAndSwapPtr((void *)page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE-1].next, - (void *)&page->entries[0], - (void * volatile *)&p->p_dtrace_ptss_free_list)); - + page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE - 1].next = p->p_dtrace_ptss_free_list; + } while (!OSCompareAndSwapPtr((void *)page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE - 1].next, + (void *)&page->entries[0], + (void * volatile *)&p->p_dtrace_ptss_free_list)); + // Now that we've added to the free list, try again. continue; } // Claim temp - if (!OSCompareAndSwapPtr((void *)temp, (void *)temp->next, (void * volatile *)&p->p_dtrace_ptss_free_list)) + if (!OSCompareAndSwapPtr((void *)temp, (void *)temp->next, (void * volatile *)&p->p_dtrace_ptss_free_list)) { continue; + } // At this point, we own temp. entry = temp; @@ -96,7 +99,8 @@ dtrace_ptss_claim_entry_locked(struct proc* p) { * This function does not require any locks to be held on entry. */ struct dtrace_ptss_page_entry* -dtrace_ptss_claim_entry(struct proc* p) { +dtrace_ptss_claim_entry(struct proc* p) +{ // Verify no locks held on entry LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED); LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_NOTOWNED); @@ -114,8 +118,9 @@ dtrace_ptss_claim_entry(struct proc* p) { } // Claim temp - if (!OSCompareAndSwapPtr((void *)temp, (void *)temp->next, (void * volatile *)&p->p_dtrace_ptss_free_list)) + if (!OSCompareAndSwapPtr((void *)temp, (void *)temp->next, (void * volatile *)&p->p_dtrace_ptss_free_list)) { continue; + } // At this point, we own temp. entry = temp; @@ -134,7 +139,8 @@ dtrace_ptss_claim_entry(struct proc* p) { * of releasing an entry to the free list is ignored. */ void -dtrace_ptss_release_entry(struct proc* p, struct dtrace_ptss_page_entry* e) { +dtrace_ptss_release_entry(struct proc* p, struct dtrace_ptss_page_entry* e) +{ if (p && p->p_dtrace_ptss_pages && e) { do { e->next = p->p_dtrace_ptss_free_list; @@ -144,7 +150,7 @@ dtrace_ptss_release_entry(struct proc* p, struct dtrace_ptss_page_entry* e) { /* * This function allocates a new page in the target process's address space. - * + * * It returns a dtrace_ptss_page that has its entries chained, with the last * entries next field set to NULL. It does not add the page or the entries to * the process's page/entry lists. @@ -156,24 +162,26 @@ dtrace_ptss_allocate_page(struct proc* p) { // Allocate the kernel side data struct dtrace_ptss_page* ptss_page = _MALLOC(sizeof(struct dtrace_ptss_page), M_TEMP, M_ZERO | M_WAITOK); - if (ptss_page == NULL) + if (ptss_page == NULL) { return NULL; + } // Now allocate a page in user space and set its protections to allow execute. task_t task = p->task; vm_map_t map = get_task_map_reference(task); - if (map == NULL) - goto err; + if (map == NULL) { + goto err; + } mach_vm_size_t size = PAGE_MAX_SIZE; mach_vm_offset_t addr = 0; mach_vm_offset_t write_addr = 0; - /* + /* * The embedded OS has extra permissions for writable and executable pages. * To ensure correct permissions, we must set the page protections separately. */ - vm_prot_t cur_protection = VM_PROT_READ|VM_PROT_EXECUTE; - vm_prot_t max_protection = VM_PROT_READ|VM_PROT_EXECUTE|VM_PROT_WRITE; + vm_prot_t cur_protection = VM_PROT_READ | VM_PROT_EXECUTE; + vm_prot_t max_protection = VM_PROT_READ | VM_PROT_EXECUTE | VM_PROT_WRITE; kern_return_t kr = mach_vm_map_kernel(map, &addr, size, 0, VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, IPC_PORT_NULL, 0, FALSE, cur_protection, max_protection, VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) { @@ -183,24 +191,26 @@ dtrace_ptss_allocate_page(struct proc* p) * If on embedded, remap the scratch space as writable at another * virtual address */ - kr = mach_vm_remap_kernel(map, &write_addr, size, 0, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE, map, addr, FALSE, &cur_protection, &max_protection, VM_INHERIT_DEFAULT); - if (kr != KERN_SUCCESS || !(max_protection & VM_PROT_WRITE)) + kr = mach_vm_remap_kernel(map, &write_addr, size, 0, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE, map, addr, FALSE, &cur_protection, &max_protection, VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS || !(max_protection & VM_PROT_WRITE)) { goto err; + } - kr = mach_vm_protect (map, (mach_vm_offset_t)write_addr, (mach_vm_size_t)size, 0, VM_PROT_READ | VM_PROT_WRITE); - if (kr != KERN_SUCCESS) + kr = mach_vm_protect(map, (mach_vm_offset_t)write_addr, (mach_vm_size_t)size, 0, VM_PROT_READ | VM_PROT_WRITE); + if (kr != KERN_SUCCESS) { goto err; + } // Chain the page entries. int i; - for (i=0; ientries[i].addr = addr + (i * DTRACE_PTSS_SCRATCH_SPACE_PER_THREAD); ptss_page->entries[i].write_addr = write_addr + (i * DTRACE_PTSS_SCRATCH_SPACE_PER_THREAD); - ptss_page->entries[i].next = &ptss_page->entries[i+1]; + ptss_page->entries[i].next = &ptss_page->entries[i + 1]; } // The last entry should point to NULL - ptss_page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE-1].next = NULL; + ptss_page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE - 1].next = NULL; vm_map_deallocate(map); @@ -209,15 +219,16 @@ dtrace_ptss_allocate_page(struct proc* p) err: _FREE(ptss_page, M_TEMP); - if (map) - vm_map_deallocate(map); + if (map) { + vm_map_deallocate(map); + } return NULL; } /* * This function frees an existing page in the target process's address space. - * + * * It does not alter any of the process's page/entry lists. * * TODO: Inline in dtrace_ptrace_exec_exit? @@ -243,10 +254,11 @@ dtrace_ptss_free_page(struct proc* p, struct dtrace_ptss_page* ptss_page) /* * This function assumes that the target process has been - * suspended, and the proc_lock & sprlock is held + * suspended, and the proc_lock & sprlock is held */ void -dtrace_ptss_enable(struct proc* p) { +dtrace_ptss_enable(struct proc* p) +{ LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED); LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED); @@ -267,7 +279,8 @@ dtrace_ptss_enable(struct proc* p) { * It assumes the sprlock is held, and the proc_lock is not. */ void -dtrace_ptss_exec_exit(struct proc* p) { +dtrace_ptss_exec_exit(struct proc* p) +{ /* * Should hold sprlock to touch the pages list. Must not * hold the proc lock to avoid deadlock. @@ -282,11 +295,11 @@ dtrace_ptss_exec_exit(struct proc* p) { while (temp != NULL) { struct dtrace_ptss_page* next = temp->next; - + // Do we need to specifically mach_vm_deallocate the user pages? // This can be called when the process is exiting, I believe the proc's // vm_map_t may already be toast. - + // Must be certain to free the kernel memory! _FREE(temp, M_TEMP); temp = next; @@ -303,7 +316,8 @@ dtrace_ptss_exec_exit(struct proc* p) { * Parent and child sprlock should be held, and proc_lock must NOT be held. */ void -dtrace_ptss_fork(struct proc* parent, struct proc* child) { +dtrace_ptss_fork(struct proc* parent, struct proc* child) +{ // The child should not have any pages/entries allocated at this point. // ASSERT(child->p_dtrace_ptss_pages == NULL); // ASSERT(child->p_dtrace_ptss_free_list == NULL); @@ -323,7 +337,7 @@ dtrace_ptss_fork(struct proc* parent, struct proc* child) { // Get page list from *PARENT* struct dtrace_ptss_page* temp = parent->p_dtrace_ptss_pages; - while (temp != NULL) { + while (temp != NULL) { // Freeing the page in the *CHILD* dtrace_ptss_free_page(child, temp); diff --git a/bsd/dev/dtrace/fbt.c b/bsd/dev/dtrace/fbt.c index d785547ca..036d85bcb 100644 --- a/bsd/dev/dtrace/fbt.c +++ b/bsd/dev/dtrace/fbt.c @@ -71,14 +71,14 @@ __private_extern__ void qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *)); -#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) -#define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */ +#define FBT_ADDR2NDX(addr) ((((uintptr_t)(addr)) >> 4) & fbt_probetab_mask) +#define FBT_PROBETAB_SIZE 0x8000 /* 32k entries -- 128K total */ -static int fbt_probetab_size; -dtrace_provider_id_t fbt_id; -fbt_probe_t **fbt_probetab; -int fbt_probetab_mask; -static int fbt_verbose = 0; +static int fbt_probetab_size; +dtrace_provider_id_t fbt_id; +fbt_probe_t **fbt_probetab; +int fbt_probetab_mask; +static int fbt_verbose = 0; int ignore_fbt_blacklist = 0; @@ -107,7 +107,7 @@ static const char * critical_blacklist[] = "_ZNK6OSData14getBytesNoCopyEv", /* Data::getBytesNoCopy, IOHibernateSystemWake path */ "__ZN16IOPlatformExpert11haltRestartEj", "__ZN18IODTPlatformExpert11haltRestartEj", - "__ZN9IODTNVRAM13savePanicInfoEPhy" + "__ZN9IODTNVRAM13savePanicInfoEPhy", "_disable_preemption", "_enable_preemption", "alternate_debugger_enter", @@ -280,12 +280,14 @@ static const char * probe_ctx_closure[] = "mt_update_thread", "ovbcopy", "panic", - "pmap64_pde", "pmap64_pdpt", "pmap_find_phys", "pmap_get_mapwindow", "pmap_pde", + "pmap_pde_internal0", + "pmap_pde_internal1", "pmap_pte", + "pmap_pte_internal", "pmap_put_mapwindow", "pmap_valid_page", "prf", @@ -305,9 +307,10 @@ static const char * probe_ctx_closure[] = #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" -static int _cmp(const void *a, const void *b) +static int +_cmp(const void *a, const void *b) { - return strncmp((const char *)a, *(const char **)b, strlen((const char *)a) + 1); + return strncmp((const char *)a, *(const char **)b, strlen((const char *)a) + 1); } #pragma clang diagnostic pop /* @@ -323,14 +326,15 @@ fbt_module_excluded(struct modctl* ctl) } if (ctl->mod_loaded == 0) { - return TRUE; + return TRUE; } - /* + /* * If the user sets this, trust they know what they are doing. */ - if (ignore_fbt_blacklist) + if (ignore_fbt_blacklist) { return FALSE; + } /* * These drivers control low level functions that when traced @@ -341,35 +345,44 @@ fbt_module_excluded(struct modctl* ctl) */ #ifdef __x86_64__ - if (strstr(ctl->mod_modname, "AppleACPIEC") != NULL) + if (strstr(ctl->mod_modname, "AppleACPIEC") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "AppleACPIPlatform") != NULL) + if (strstr(ctl->mod_modname, "AppleACPIPlatform") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "AppleRTC") != NULL) + if (strstr(ctl->mod_modname, "AppleRTC") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "IOACPIFamily") != NULL) + if (strstr(ctl->mod_modname, "IOACPIFamily") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "AppleIntelCPUPowerManagement") != NULL) + if (strstr(ctl->mod_modname, "AppleIntelCPUPowerManagement") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "AppleProfile") != NULL) + if (strstr(ctl->mod_modname, "AppleProfile") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "AppleIntelProfile") != NULL) + if (strstr(ctl->mod_modname, "AppleIntelProfile") != NULL) { return TRUE; + } - if (strstr(ctl->mod_modname, "AppleEFI") != NULL) + if (strstr(ctl->mod_modname, "AppleEFI") != NULL) { return TRUE; + } #elif __arm__ || __arm64__ if (LIT_STRNEQL(ctl->mod_modname, "com.apple.driver.AppleARMPlatform") || - LIT_STRNEQL(ctl->mod_modname, "com.apple.driver.AppleARMPL192VIC") || - LIT_STRNEQL(ctl->mod_modname, "com.apple.driver.AppleInterruptController")) + LIT_STRNEQL(ctl->mod_modname, "com.apple.driver.AppleARMPL192VIC") || + LIT_STRNEQL(ctl->mod_modname, "com.apple.driver.AppleInterruptController")) { return TRUE; + } #endif return FALSE; @@ -384,8 +397,9 @@ fbt_excluded(const char* name) /* * If the user set this, trust they know what they are doing. */ - if (ignore_fbt_blacklist) + if (ignore_fbt_blacklist) { return FALSE; + } if (LIT_STRNSTART(name, "dtrace_") && !LIT_STRNSTART(name, "dtrace_safe_")) { /* @@ -398,117 +412,124 @@ fbt_excluded(const char* name) } /* - * Place no probes on critical routines (5221096) - */ - if (bsearch( name, critical_blacklist, CRITICAL_BLACKLIST_COUNT, sizeof(name), _cmp ) != NULL) + * Place no probes on critical routines (5221096) + */ + if (bsearch( name, critical_blacklist, CRITICAL_BLACKLIST_COUNT, sizeof(name), _cmp ) != NULL) { return TRUE; + } /* - * Place no probes that could be hit in probe context. - */ + * Place no probes that could be hit in probe context. + */ if (bsearch( name, probe_ctx_closure, PROBE_CTX_CLOSURE_COUNT, sizeof(name), _cmp ) != NULL) { return TRUE; } /* - * Place no probes that could be hit in probe context. - * In the interests of safety, some of these may be overly cautious. - * Also exclude very low-level "firmware" class calls. - */ - if (LIT_STRNSTART(name, "cpu_") || /* Coarse */ - LIT_STRNSTART(name, "platform_") || /* Coarse */ - LIT_STRNSTART(name, "machine_") || /* Coarse */ - LIT_STRNSTART(name, "ml_") || /* Coarse */ - LIT_STRNSTART(name, "PE_") || /* Coarse */ - LIT_STRNSTART(name, "rtc_") || /* Coarse */ - LIT_STRNSTART(name, "_rtc_") || - LIT_STRNSTART(name, "rtclock_") || - LIT_STRNSTART(name, "clock_") || - LIT_STRNSTART(name, "bcopy") || - LIT_STRNSTART(name, "pmap_") || - LIT_STRNSTART(name, "hw_") || /* Coarse */ - LIT_STRNSTART(name, "lapic_") || /* Coarse */ - LIT_STRNSTART(name, "OSAdd") || - LIT_STRNSTART(name, "OSBit") || - LIT_STRNSTART(name, "OSDecrement") || - LIT_STRNSTART(name, "OSIncrement") || - LIT_STRNSTART(name, "OSCompareAndSwap") || - LIT_STRNSTART(name, "etimer_") || - LIT_STRNSTART(name, "dtxnu_kern_") || - LIT_STRNSTART(name, "flush_mmu_tlb_")) + * Place no probes that could be hit in probe context. + * In the interests of safety, some of these may be overly cautious. + * Also exclude very low-level "firmware" class calls. + */ + if (LIT_STRNSTART(name, "cpu_") || /* Coarse */ + LIT_STRNSTART(name, "platform_") || /* Coarse */ + LIT_STRNSTART(name, "machine_") || /* Coarse */ + LIT_STRNSTART(name, "ml_") || /* Coarse */ + LIT_STRNSTART(name, "PE_") || /* Coarse */ + LIT_STRNSTART(name, "rtc_") || /* Coarse */ + LIT_STRNSTART(name, "_rtc_") || + LIT_STRNSTART(name, "rtclock_") || + LIT_STRNSTART(name, "clock_") || + LIT_STRNSTART(name, "bcopy") || + LIT_STRNSTART(name, "pmap_") || + LIT_STRNSTART(name, "hw_") || /* Coarse */ + LIT_STRNSTART(name, "lapic_") || /* Coarse */ + LIT_STRNSTART(name, "OSAdd") || + LIT_STRNSTART(name, "OSBit") || + LIT_STRNSTART(name, "OSDecrement") || + LIT_STRNSTART(name, "OSIncrement") || + LIT_STRNSTART(name, "OSCompareAndSwap") || + LIT_STRNSTART(name, "etimer_") || + LIT_STRNSTART(name, "dtxnu_kern_") || + LIT_STRNSTART(name, "flush_mmu_tlb_")) { return TRUE; + } /* * Fasttrap inner-workings we can't instrument * on Intel (6230149) - */ + */ if (LIT_STRNSTART(name, "fasttrap_") || - LIT_STRNSTART(name, "fuword") || - LIT_STRNSTART(name, "suword")) + LIT_STRNSTART(name, "fuword") || + LIT_STRNSTART(name, "suword")) { return TRUE; + } - if (LIT_STRNSTART(name, "_dtrace")) + if (LIT_STRNSTART(name, "_dtrace")) { return TRUE; /* Shims in dtrace.c */ - - if (LIT_STRNSTART(name, "hibernate_")) + } + if (LIT_STRNSTART(name, "hibernate_")) { return TRUE; + } /* * Place no probes in the exception handling path */ #if __arm__ || __arm64__ if (LIT_STRNSTART(name, "fleh_") || - LIT_STRNSTART(name, "sleh_") || - LIT_STRNSTART(name, "timer_state_event") || - LIT_STRNEQL(name, "get_vfp_enabled")) + LIT_STRNSTART(name, "sleh_") || + LIT_STRNSTART(name, "timer_state_event") || + LIT_STRNEQL(name, "get_vfp_enabled")) { return TRUE; + } if (LIT_STRNSTART(name, "_ZNK15OSMetaClassBase8metaCastEPK11OSMetaClass") || - LIT_STRNSTART(name, "_ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass") || - LIT_STRNSTART(name, "_ZNK11OSMetaClass13checkMetaCastEPK15OSMetaClassBase")) + LIT_STRNSTART(name, "_ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass") || + LIT_STRNSTART(name, "_ZNK11OSMetaClass13checkMetaCastEPK15OSMetaClassBase")) { return TRUE; + } #endif #ifdef __x86_64__ if (LIT_STRNSTART(name, "machine_") || - LIT_STRNSTART(name, "idt64") || - LIT_STRNSTART(name, "ks_") || - LIT_STRNSTART(name, "hndl_") || - LIT_STRNSTART(name, "_intr_") || - LIT_STRNSTART(name, "mapping_") || - LIT_STRNSTART(name, "tsc_") || - LIT_STRNSTART(name, "pmCPU") || - LIT_STRNSTART(name, "pms") || - LIT_STRNSTART(name, "usimple_") || - LIT_STRNSTART(name, "lck_spin_lock") || - LIT_STRNSTART(name, "lck_spin_unlock") || - LIT_STRNSTART(name, "absolutetime_to_") || - LIT_STRNSTART(name, "commpage_") || - LIT_STRNSTART(name, "ml_") || - LIT_STRNSTART(name, "PE_") || - LIT_STRNSTART(name, "act_machine") || - LIT_STRNSTART(name, "acpi_") || - LIT_STRNSTART(name, "pal_")) { + LIT_STRNSTART(name, "idt64") || + LIT_STRNSTART(name, "ks_") || + LIT_STRNSTART(name, "hndl_") || + LIT_STRNSTART(name, "_intr_") || + LIT_STRNSTART(name, "mapping_") || + LIT_STRNSTART(name, "tsc_") || + LIT_STRNSTART(name, "pmCPU") || + LIT_STRNSTART(name, "pms") || + LIT_STRNSTART(name, "usimple_") || + LIT_STRNSTART(name, "lck_spin_lock") || + LIT_STRNSTART(name, "lck_spin_unlock") || + LIT_STRNSTART(name, "absolutetime_to_") || + LIT_STRNSTART(name, "commpage_") || + LIT_STRNSTART(name, "ml_") || + LIT_STRNSTART(name, "PE_") || + LIT_STRNSTART(name, "act_machine") || + LIT_STRNSTART(name, "acpi_") || + LIT_STRNSTART(name, "pal_")) { return TRUE; } // Don't Steal Mac OS X - if (LIT_STRNSTART(name, "dsmos_")) + if (LIT_STRNSTART(name, "dsmos_")) { return TRUE; + } #endif /* - * Place no probes that could be hit on the way to the debugger. - */ + * Place no probes that could be hit on the way to the debugger. + */ if (LIT_STRNSTART(name, "kdp_") || - LIT_STRNSTART(name, "kdb_") || - LIT_STRNSTART(name, "debug_")) { + LIT_STRNSTART(name, "kdb_") || + LIT_STRNSTART(name, "debug_")) { return TRUE; } #if KASAN if (LIT_STRNSTART(name, "kasan") || - LIT_STRNSTART(name, "__kasan") || - LIT_STRNSTART(name, "__asan")) { + LIT_STRNSTART(name, "__kasan") || + LIT_STRNSTART(name, "__asan")) { return TRUE; } #endif @@ -516,8 +537,9 @@ fbt_excluded(const char* name) /* * Place no probes that could be hit on the way to a panic. */ - if (NULL != strstr(name, "panic_")) + if (NULL != strstr(name, "panic_")) { return TRUE; + } return FALSE; } @@ -552,7 +574,7 @@ fbt_destroy(void *arg, dtrace_id_t id, void *parg) } next = fbt->fbtp_next; - kmem_free(fbt, sizeof (fbt_probe_t)); + kmem_free(fbt, sizeof(fbt_probe_t)); fbt = next; } while (fbt != NULL); @@ -566,71 +588,69 @@ fbt_enable(void *arg, dtrace_id_t id, void *parg) fbt_probe_t *fbt = parg; struct modctl *ctl = NULL; - for (; fbt != NULL; fbt = fbt->fbtp_next) { + for (; fbt != NULL; fbt = fbt->fbtp_next) { + ctl = fbt->fbtp_ctl; - ctl = fbt->fbtp_ctl; + if (!ctl->mod_loaded) { + if (fbt_verbose) { + cmn_err(CE_NOTE, "fbt is failing for probe %s " + "(module %s unloaded)", + fbt->fbtp_name, ctl->mod_modname); + } - if (!ctl->mod_loaded) { - if (fbt_verbose) { - cmn_err(CE_NOTE, "fbt is failing for probe %s " - "(module %s unloaded)", - fbt->fbtp_name, ctl->mod_modname); + continue; } - continue; - } + /* + * Now check that our modctl has the expected load count. If it + * doesn't, this module must have been unloaded and reloaded -- and + * we're not going to touch it. + */ + if (ctl->mod_loadcnt != fbt->fbtp_loadcnt) { + if (fbt_verbose) { + cmn_err(CE_NOTE, "fbt is failing for probe %s " + "(module %s reloaded)", + fbt->fbtp_name, ctl->mod_modname); + } - /* - * Now check that our modctl has the expected load count. If it - * doesn't, this module must have been unloaded and reloaded -- and - * we're not going to touch it. - */ - if (ctl->mod_loadcnt != fbt->fbtp_loadcnt) { - if (fbt_verbose) { - cmn_err(CE_NOTE, "fbt is failing for probe %s " - "(module %s reloaded)", - fbt->fbtp_name, ctl->mod_modname); + continue; } - continue; - } - - dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); - if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { - if (fbt_verbose) { - cmn_err(CE_NOTE, "fbt_enable is failing for probe %s " - "in module %s: tempDTraceTrapHook already occupied.", - fbt->fbtp_name, ctl->mod_modname); + dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); + if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { + if (fbt_verbose) { + cmn_err(CE_NOTE, "fbt_enable is failing for probe %s " + "in module %s: tempDTraceTrapHook already occupied.", + fbt->fbtp_name, ctl->mod_modname); + } + continue; } - continue; - } - if (fbt->fbtp_currentval != fbt->fbtp_patchval) { + if (fbt->fbtp_currentval != fbt->fbtp_patchval) { #if KASAN - /* Since dtrace probes can call into KASan and vice versa, things can get - * very slow if we have a lot of probes. This call will disable the KASan - * fakestack after a threshold of probes is reached. */ - kasan_fakestack_suspend(); + /* Since dtrace probes can call into KASan and vice versa, things can get + * very slow if we have a lot of probes. This call will disable the KASan + * fakestack after a threshold of probes is reached. */ + kasan_fakestack_suspend(); #endif - (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, - sizeof(fbt->fbtp_patchval)); - /* - * Make the patched instruction visible via a data + instruction - * cache flush for the platforms that need it - */ - flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); - invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); - fbt->fbtp_currentval = fbt->fbtp_patchval; + (void)ml_nofault_copy((vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, + sizeof(fbt->fbtp_patchval)); + /* + * Make the patched instruction visible via a data + instruction + * cache flush for the platforms that need it + */ + flush_dcache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_patchval), 0); + invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_patchval), 0); + fbt->fbtp_currentval = fbt->fbtp_patchval; - ctl->mod_nenabled++; + ctl->mod_nenabled++; + } } - } - - dtrace_membar_consumer(); + dtrace_membar_consumer(); - return (0); + return 0; } /*ARGSUSED*/ @@ -642,29 +662,30 @@ fbt_disable(void *arg, dtrace_id_t id, void *parg) struct modctl *ctl = NULL; for (; fbt != NULL; fbt = fbt->fbtp_next) { - ctl = fbt->fbtp_ctl; + ctl = fbt->fbtp_ctl; - if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) - continue; + if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) { + continue; + } - if (fbt->fbtp_currentval != fbt->fbtp_savedval) { - (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_savedval, (vm_offset_t)fbt->fbtp_patchpoint, - sizeof(fbt->fbtp_savedval)); - /* - * Make the patched instruction visible via a data + instruction - * cache flush for the platforms that need it - */ - flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); - invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); + if (fbt->fbtp_currentval != fbt->fbtp_savedval) { + (void)ml_nofault_copy((vm_offset_t)&fbt->fbtp_savedval, (vm_offset_t)fbt->fbtp_patchpoint, + sizeof(fbt->fbtp_savedval)); + /* + * Make the patched instruction visible via a data + instruction + * cache flush for the platforms that need it + */ + flush_dcache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_patchval), 0); + invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_patchval), 0); - fbt->fbtp_currentval = fbt->fbtp_savedval; - ASSERT(ctl->mod_nenabled > 0); - ctl->mod_nenabled--; + fbt->fbtp_currentval = fbt->fbtp_savedval; + ASSERT(ctl->mod_nenabled > 0); + ctl->mod_nenabled--; #if KASAN - kasan_fakestack_resume(); + kasan_fakestack_resume(); #endif - } + } } dtrace_membar_consumer(); } @@ -678,21 +699,22 @@ fbt_suspend(void *arg, dtrace_id_t id, void *parg) struct modctl *ctl = NULL; for (; fbt != NULL; fbt = fbt->fbtp_next) { - ctl = fbt->fbtp_ctl; + ctl = fbt->fbtp_ctl; - ASSERT(ctl->mod_nenabled > 0); - if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) - continue; + ASSERT(ctl->mod_nenabled > 0); + if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) { + continue; + } - (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_savedval, (vm_offset_t)fbt->fbtp_patchpoint, - sizeof(fbt->fbtp_savedval)); + (void)ml_nofault_copy((vm_offset_t)&fbt->fbtp_savedval, (vm_offset_t)fbt->fbtp_patchpoint, + sizeof(fbt->fbtp_savedval)); /* * Make the patched instruction visible via a data + instruction * cache flush for the platforms that need it */ - flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_savedval), 0); - invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_savedval), 0); + flush_dcache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_savedval), 0); + invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_savedval), 0); fbt->fbtp_currentval = fbt->fbtp_savedval; } @@ -709,32 +731,33 @@ fbt_resume(void *arg, dtrace_id_t id, void *parg) struct modctl *ctl = NULL; for (; fbt != NULL; fbt = fbt->fbtp_next) { - ctl = fbt->fbtp_ctl; - - ASSERT(ctl->mod_nenabled > 0); - if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) - continue; - - dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); - if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { - if (fbt_verbose) { - cmn_err(CE_NOTE, "fbt_resume is failing for probe %s " - "in module %s: tempDTraceTrapHook already occupied.", - fbt->fbtp_name, ctl->mod_modname); + ctl = fbt->fbtp_ctl; + + ASSERT(ctl->mod_nenabled > 0); + if (!ctl->mod_loaded || (ctl->mod_loadcnt != fbt->fbtp_loadcnt)) { + continue; } - return; - } - (void)ml_nofault_copy( (vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, - sizeof(fbt->fbtp_patchval)); + dtrace_casptr(&tempDTraceTrapHook, NULL, fbt_perfCallback); + if (tempDTraceTrapHook != (perfCallback)fbt_perfCallback) { + if (fbt_verbose) { + cmn_err(CE_NOTE, "fbt_resume is failing for probe %s " + "in module %s: tempDTraceTrapHook already occupied.", + fbt->fbtp_name, ctl->mod_modname); + } + return; + } + + (void)ml_nofault_copy((vm_offset_t)&fbt->fbtp_patchval, (vm_offset_t)fbt->fbtp_patchpoint, + sizeof(fbt->fbtp_patchval)); /* * Make the patched instruction visible via a data + instruction cache flush. */ - flush_dcache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); - invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint,(vm_size_t)sizeof(fbt->fbtp_patchval), 0); + flush_dcache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_patchval), 0); + invalidate_icache((vm_offset_t)fbt->fbtp_patchpoint, (vm_size_t)sizeof(fbt->fbtp_patchval), 0); - fbt->fbtp_currentval = fbt->fbtp_patchval; + fbt->fbtp_currentval = fbt->fbtp_patchval; } dtrace_membar_consumer(); @@ -748,9 +771,8 @@ fbt_provide_module_user_syms(struct modctl *ctl) dtrace_module_symbols_t* module_symbols = ctl->mod_user_symbols; if (module_symbols) { - for (i=0; idtmodsyms_count; i++) { - - /* + for (i = 0; i < module_symbols->dtmodsyms_count; i++) { + /* * symbol->dtsym_addr (the symbol address) passed in from * user space, is already slid for both kexts and kernel. */ @@ -759,23 +781,27 @@ fbt_provide_module_user_syms(struct modctl *ctl) char* name = symbol->dtsym_name; /* Lop off omnipresent leading underscore. */ - if (*name == '_') + if (*name == '_') { name += 1; + } - if (MOD_IS_MACH_KERNEL(ctl) && fbt_excluded(name)) + if (MOD_IS_MACH_KERNEL(ctl) && fbt_excluded(name)) { continue; + } /* * Ignore symbols with a null address */ - if (!symbol->dtsym_addr) + if (!symbol->dtsym_addr) { continue; + } /* * Ignore symbols not part of this module */ - if (!dtrace_addr_in_module((void*)symbol->dtsym_addr, ctl)) + if (!dtrace_addr_in_module((void*)symbol->dtsym_addr, ctl)) { continue; + } fbt_provide_probe(ctl, modname, name, (machine_inst_t*)(uintptr_t)symbol->dtsym_addr, (machine_inst_t*)(uintptr_t)(symbol->dtsym_addr + symbol->dtsym_size)); } @@ -797,19 +823,23 @@ fbt_provide_kernel_section(struct modctl *ctl, kernel_section_t *sect, kernel_nl const char *name = strings + sym[i].n_un.n_strx; uint64_t limit; - if (sym[i].n_value < sect_start || sym[i].n_value > sect_end) + if (sym[i].n_value < sect_start || sym[i].n_value > sect_end) { continue; + } /* Check that the symbol is a global and that it has a name. */ - if (((N_SECT | N_EXT) != n_type && (N_ABS | N_EXT) != n_type)) + if (((N_SECT | N_EXT) != n_type && (N_ABS | N_EXT) != n_type)) { continue; + } - if (0 == sym[i].n_un.n_strx) /* iff a null, "", name. */ + if (0 == sym[i].n_un.n_strx) { /* iff a null, "", name. */ continue; + } /* Lop off omnipresent leading underscore. */ - if (*name == '_') + if (*name == '_') { name += 1; + } #if defined(__arm__) // Skip non-thumb functions on arm32 @@ -818,8 +848,9 @@ fbt_provide_kernel_section(struct modctl *ctl, kernel_section_t *sect, kernel_nl } #endif /* defined(__arm__) */ - if (MOD_IS_MACH_KERNEL(ctl) && fbt_excluded(name)) + if (MOD_IS_MACH_KERNEL(ctl) && fbt_excluded(name)) { continue; + } /* * Find the function boundary by looking at either the @@ -827,14 +858,12 @@ fbt_provide_kernel_section(struct modctl *ctl, kernel_section_t *sect, kernel_nl */ if (i == nsyms - 1) { limit = sect_end; - } - else { + } else { limit = sym[i + 1].n_value; } fbt_provide_probe(ctl, ctl->mod_modname, name, (machine_inst_t*)sym[i].n_value, (machine_inst_t*)limit); } - } static int @@ -856,15 +885,17 @@ fbt_provide_module_kernel_syms(struct modctl *ctl) unsigned int i; size_t symlen; - if (mh->magic != MH_MAGIC_KERNEL) + if (mh->magic != MH_MAGIC_KERNEL) { return; + } cmd = (struct load_command *) &mh[1]; for (i = 0; i < mh->ncmds; i++) { if (cmd->cmd == LC_SEGMENT_KERNEL) { kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd; - if (LIT_STRNEQL(orig_sg->segname, SEG_LINKEDIT)) + if (LIT_STRNEQL(orig_sg->segname, SEG_LINKEDIT)) { linkedit = orig_sg; + } } else if (cmd->cmd == LC_SYMTAB) { symtab = (struct symtab_command *) cmd; } @@ -914,11 +945,13 @@ fbt_provide_module(void *arg, struct modctl *ctl) LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED); // Update the "ignore blacklist" bit - if (ignore_fbt_blacklist) + if (ignore_fbt_blacklist) { ctl->mod_flags |= MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES; + } - if (MOD_FBT_DONE(ctl)) + if (MOD_FBT_DONE(ctl)) { return; + } if (fbt_module_excluded(ctl)) { ctl->mod_flags |= MODCTL_FBT_INVALID; @@ -928,41 +961,44 @@ fbt_provide_module(void *arg, struct modctl *ctl) if (MOD_HAS_KERNEL_SYMBOLS(ctl)) { fbt_provide_module_kernel_syms(ctl); ctl->mod_flags |= MODCTL_FBT_PROBES_PROVIDED; - if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl)) + if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl)) { ctl->mod_flags |= MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED; + } return; } if (MOD_HAS_USERSPACE_SYMBOLS(ctl)) { fbt_provide_module_user_syms(ctl); ctl->mod_flags |= MODCTL_FBT_PROBES_PROVIDED; - if (MOD_FBT_PROVIDE_PRIVATE_PROBES(ctl)) + if (MOD_FBT_PROVIDE_PRIVATE_PROBES(ctl)) { ctl->mod_flags |= MODCTL_FBT_PRIVATE_PROBES_PROVIDED; - if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl)) + } + if (MOD_FBT_PROVIDE_BLACKLISTED_PROBES(ctl)) { ctl->mod_flags |= MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED; + } return; } } static dtrace_pattr_t fbt_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; static dtrace_pops_t fbt_pops = { - .dtps_provide = NULL, - .dtps_provide_module = fbt_provide_module, - .dtps_enable = fbt_enable, - .dtps_disable = fbt_disable, - .dtps_suspend = fbt_suspend, - .dtps_resume = fbt_resume, - .dtps_getargdesc = NULL, /* APPLE NOTE: fbt_getargdesc implemented in userspace */ - .dtps_getargval = NULL, - .dtps_usermode = NULL, - .dtps_destroy = fbt_destroy + .dtps_provide = NULL, + .dtps_provide_module = fbt_provide_module, + .dtps_enable = fbt_enable, + .dtps_disable = fbt_disable, + .dtps_suspend = fbt_suspend, + .dtps_resume = fbt_resume, + .dtps_getargdesc = NULL, /* APPLE NOTE: fbt_getargdesc implemented in userspace */ + .dtps_getargval = NULL, + .dtps_usermode = NULL, + .dtps_destroy = fbt_destroy }; static void @@ -970,7 +1006,7 @@ fbt_cleanup(dev_info_t *devi) { dtrace_invop_remove(fbt_invop); ddi_remove_minor_node(devi, NULL); - kmem_free(fbt_probetab, fbt_probetab_size * sizeof (fbt_probe_t *)); + kmem_free(fbt_probetab, fbt_probetab_size * sizeof(fbt_probe_t *)); fbt_probetab = NULL; fbt_probetab_mask = 0; } @@ -978,12 +1014,13 @@ fbt_cleanup(dev_info_t *devi) static int fbt_attach(dev_info_t *devi) { - if (fbt_probetab_size == 0) + if (fbt_probetab_size == 0) { fbt_probetab_size = FBT_PROBETAB_SIZE; + } fbt_probetab_mask = fbt_probetab_size - 1; fbt_probetab = - kmem_zalloc(fbt_probetab_size * sizeof (fbt_probe_t *), KM_SLEEP); + kmem_zalloc(fbt_probetab_size * sizeof(fbt_probe_t *), KM_SLEEP); dtrace_invop_add(fbt_invop); @@ -992,10 +1029,10 @@ fbt_attach(dev_info_t *devi) dtrace_register("fbt", &fbt_attr, DTRACE_PRIV_KERNEL, NULL, &fbt_pops, NULL, &fbt_id) != 0) { fbt_cleanup(devi); - return (DDI_FAILURE); + return DDI_FAILURE; } - return (DDI_SUCCESS); + return DDI_SUCCESS; } static d_open_t _fbt_open; @@ -1019,11 +1056,13 @@ sysctl_dtrace_ignore_fbt_blacklist SYSCTL_HANDLER_ARGS int value = *(int*)arg1; err = sysctl_io_number(req, value, sizeof(value), &value, NULL); - if (err) - return (err); + if (err) { + return err; + } if (req->newptr) { - if (!(value == 0 || value == 1)) - return (ERANGE); + if (!(value == 0 || value == 1)) { + return ERANGE; + } /* * We do not allow setting the blacklist back to on, as we have no way @@ -1036,19 +1075,20 @@ sysctl_dtrace_ignore_fbt_blacklist SYSCTL_HANDLER_ARGS * are permanent and do not change after boot. */ if (value != 1 || dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER || - dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL) - return (EPERM); + dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL) { + return EPERM; + } ignore_fbt_blacklist = 1; } - return (0); + return 0; } SYSCTL_PROC(_kern_dtrace, OID_AUTO, ignore_fbt_blacklist, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ignore_fbt_blacklist, 0, - sysctl_dtrace_ignore_fbt_blacklist, "I", "fbt provider ignore blacklist"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ignore_fbt_blacklist, 0, + sysctl_dtrace_ignore_fbt_blacklist, "I", "fbt provider ignore blacklist"); /* * A struct describing which functions will get invoked for certain @@ -1056,20 +1096,20 @@ SYSCTL_PROC(_kern_dtrace, OID_AUTO, ignore_fbt_blacklist, */ static struct cdevsw fbt_cdevsw = { - _fbt_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ + _fbt_open, /* open */ + eno_opcl, /* close */ + eno_rdwrt, /* read */ + eno_rdwrt, /* write */ + eno_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; #undef kmem_alloc /* from its binding to dt_kmem_alloc glue */ @@ -1086,7 +1126,7 @@ fbt_init( void ) return; } - PE_parse_boot_argn("IgnoreFBTBlacklist", &ignore_fbt_blacklist, sizeof (ignore_fbt_blacklist)); + PE_parse_boot_argn("IgnoreFBTBlacklist", &ignore_fbt_blacklist, sizeof(ignore_fbt_blacklist)); fbt_attach((dev_info_t*)(uintptr_t)majdevno); } diff --git a/bsd/dev/dtrace/lockprof.c b/bsd/dev/dtrace/lockprof.c new file mode 100644 index 000000000..f7ea6085e --- /dev/null +++ b/bsd/dev/dtrace/lockprof.c @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2018 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include + +#if LOCK_STATS +#define SPIN_HELD 0 +#define SPIN_MISS 1 +#define SPIN_SPIN 2 + +#define SPIN_HELD_PREFIX "spin-held-" +#define SPIN_MISS_PREFIX "spin-miss-" +#define SPIN_SPIN_PREFIX "spin-spin-" + +#define LOCKGROUPSTAT_AFRAMES 1 +#define LOCKGROUPSTAT_LEN 64 + +static dtrace_provider_id_t lockprof_id; + +decl_lck_mtx_data(extern, lck_grp_lock) +extern queue_head_t lck_grp_queue; +extern unsigned int lck_grp_cnt; + +#define LOCKPROF_MAX 10000 /* maximum number of lockprof probes */ +static uint32_t lockprof_count; /* current number of lockprof probes */ + +static const struct { + int kind; + const char *prefix; + bool time_event; +} events[] = { + {SPIN_HELD, SPIN_HELD_PREFIX, false}, + {SPIN_MISS, SPIN_MISS_PREFIX, false}, + {SPIN_SPIN, SPIN_SPIN_PREFIX, true}, + {0, NULL, false} +}; + +const static int hold_defaults[] = { + 100, 1000 +}; + +const static struct { + unsigned int time; + const char *suffix; + uint64_t mult; +} cont_defaults[] = { + {100, "ms", NANOSEC / MILLISEC} +}; + +typedef struct lockprof_probe { + int lockprof_kind; + dtrace_id_t lockprof_id; + uint64_t lockprof_limit; + lck_grp_t *lockprof_grp; +} lockprof_probe_t; + +void +lockprof_invoke(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t val) +{ + dtrace_probe(stat->lgs_probeid, (uintptr_t)grp, val, 0, 0, 0); +} + +static void +probe_create(int kind, const char *suffix, const char *grp_name, uint64_t count, uint64_t mult) +{ + char name[LOCKGROUPSTAT_LEN]; + lck_mtx_lock(&lck_grp_lock); + lck_grp_t *grp = (lck_grp_t*)queue_first(&lck_grp_queue); + uint64_t limit = count * mult; + + if (events[kind].time_event) { + nanoseconds_to_absolutetime(limit, &limit); + } + + for (unsigned int i = 0; i < lck_grp_cnt; i++, grp = (lck_grp_t*)queue_next((queue_entry_t)grp)) { + if (!grp_name || grp_name[0] == '\0' || strcmp(grp_name, grp->lck_grp_name) == 0) { + snprintf(name, sizeof(name), "%s%llu%s", events[kind].prefix, count, suffix ?: ""); + + if (dtrace_probe_lookup(lockprof_id, grp->lck_grp_name, NULL, name) != 0) { + continue; + } + if (lockprof_count >= LOCKPROF_MAX) { + break; + } + + lockprof_probe_t *probe = kmem_zalloc(sizeof(lockprof_probe_t), KM_SLEEP); + probe->lockprof_kind = kind; + probe->lockprof_limit = limit; + probe->lockprof_grp = grp; + + probe->lockprof_id = dtrace_probe_create(lockprof_id, grp->lck_grp_name, NULL, name, + LOCKGROUPSTAT_AFRAMES, probe); + + lockprof_count++; + } + } + lck_mtx_unlock(&lck_grp_lock); +} + +static void +lockprof_provide(void *arg, const dtrace_probedesc_t *desc) +{ +#pragma unused(arg) + size_t event_id, i, len; + + if (desc == NULL) { + for (i = 0; i < sizeof(hold_defaults) / sizeof(hold_defaults[0]); i++) { + probe_create(SPIN_HELD, NULL, NULL, hold_defaults[i], 1); + probe_create(SPIN_MISS, NULL, NULL, hold_defaults[i], 1); + } + for (i = 0; i < sizeof(cont_defaults) / sizeof(cont_defaults[0]); i++) { + probe_create(SPIN_SPIN, cont_defaults[i].suffix, NULL, cont_defaults[i].time, cont_defaults[i].mult); + } + return; + } + + const char *name, *suffix = NULL; + hrtime_t val = 0, mult = 1; + + const struct { + const char *name; + hrtime_t mult; + } suffixes[] = { + { "us", NANOSEC / MICROSEC }, + { "usec", NANOSEC / MICROSEC }, + { "ms", NANOSEC / MILLISEC }, + { "msec", NANOSEC / MILLISEC }, + { "s", NANOSEC / SEC }, + { "sec", NANOSEC / SEC }, + { NULL, 0 } + }; + + name = desc->dtpd_name; + + for (event_id = 0; events[event_id].prefix != NULL; event_id++) { + len = strlen(events[event_id].prefix); + + if (strncmp(name, events[event_id].prefix, len) != 0) { + continue; + } + break; + } + + if (events[event_id].prefix == NULL) { + return; + } + + + /* + * We need to start before any time suffix. + */ + for (i = strlen(name); i >= len; i--) { + if (name[i] >= '0' && name[i] <= '9') { + break; + } + suffix = &name[i]; + } + + /* + * Now determine the numerical value present in the probe name. + */ + for (uint64_t m = 1; i >= len; i--) { + if (name[i] < '0' || name[i] > '9') { + return; + } + + val += (name[i] - '0') * m; + m *= (hrtime_t)10; + } + + if (val == 0) { + return; + } + + if (events[event_id].time_event) { + for (i = 0, mult = 0; suffixes[i].name != NULL; i++) { + if (strncasecmp(suffixes[i].name, suffix, strlen(suffixes[i].name) + 1) == 0) { + mult = suffixes[i].mult; + break; + } + } + if (suffixes[i].name == NULL) { + return; + } + } else if (*suffix != '\0') { + return; + } + + probe_create(events[event_id].kind, suffix, desc->dtpd_mod, val, mult); +} + + +static lck_grp_stat_t* +lockprof_stat(lck_grp_t *grp, int kind) +{ + switch (kind) { + case SPIN_HELD: + return &grp->lck_grp_stats.lgss_spin_held; + case SPIN_MISS: + return &grp->lck_grp_stats.lgss_spin_miss; + case SPIN_SPIN: + return &grp->lck_grp_stats.lgss_spin_spin; + default: + return NULL; + } +} + +static int +lockprof_enable(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id, parg) + lockprof_probe_t *probe = (lockprof_probe_t*)parg; + lck_grp_t *grp = probe->lockprof_grp; + lck_grp_stat_t *stat; + + if (grp == NULL) { + return -1; + } + + if ((stat = lockprof_stat(grp, probe->lockprof_kind)) == NULL) { + return -1; + } + + /* + * lockprof_enable/disable are called with + * dtrace_lock held + */ + if (stat->lgs_limit != 0) { + return -1; + } + + stat->lgs_limit = probe->lockprof_limit; + stat->lgs_enablings++; + stat->lgs_probeid = probe->lockprof_id; + + return 0; +} + +static void +lockprof_disable(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id) + lockprof_probe_t *probe = (lockprof_probe_t*)parg; + lck_grp_t *grp = probe->lockprof_grp; + lck_grp_stat_t *stat; + + if (grp == NULL) { + return; + } + + if ((stat = lockprof_stat(grp, probe->lockprof_kind)) == NULL) { + return; + } + + if (stat->lgs_limit == 0 || stat->lgs_enablings == 0) { + return; + } + + stat->lgs_limit = 0; + stat->lgs_enablings--; + stat->lgs_probeid = 0; +} + +static void +lockprof_destroy(void *arg, dtrace_id_t id, void *parg) +{ +#pragma unused(arg, id) + lockprof_probe_t *probe = (lockprof_probe_t*)parg; + kmem_free(probe, sizeof(lockprof_probe_t)); + lockprof_count--; +} + +static void +lockprof_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc) +{ +#pragma unused(arg, id, parg) + const char *argdesc = NULL; + switch (desc->dtargd_ndx) { + case 0: + argdesc = "lck_grp_t*"; + break; + case 1: + argdesc = "uint64_t"; + break; + } + + if (argdesc) { + strlcpy(desc->dtargd_native, argdesc, DTRACE_ARGTYPELEN); + } else { + desc->dtargd_ndx = DTRACE_ARGNONE; + } +} +static dtrace_pattr_t lockprof_attr = { + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, +}; + +static dtrace_pops_t lockprof_pops = { + .dtps_provide = lockprof_provide, + .dtps_provide_module = NULL, + .dtps_enable = lockprof_enable, + .dtps_disable = lockprof_disable, + .dtps_suspend = NULL, + .dtps_resume = NULL, + .dtps_getargdesc = lockprof_getargdesc, + .dtps_getargval = NULL, + .dtps_usermode = NULL, + .dtps_destroy = lockprof_destroy +}; +#endif /* LOCK_STATS */ +void lockprof_init(void); +void +lockprof_init(void) +{ +#if LOCK_STATS + dtrace_register("lockprof", &lockprof_attr, + DTRACE_PRIV_KERNEL, NULL, + &lockprof_pops, NULL, &lockprof_id); +#endif /* LOCK_STATS */ +} diff --git a/bsd/dev/dtrace/lockstat.c b/bsd/dev/dtrace/lockstat.c index 3d06c46f4..f28db3a39 100644 --- a/bsd/dev/dtrace/lockstat.c +++ b/bsd/dev/dtrace/lockstat.c @@ -48,6 +48,8 @@ #include +#include + #include #define membar_producer dtrace_membar_producer @@ -68,27 +70,27 @@ * Hot patch values, x86 */ #if defined(__x86_64__) -#define NOP 0x90 -#define RET 0xc3 +#define NOP 0x90 +#define RET 0xc3 #define LOCKSTAT_AFRAMES 1 -#elif defined(__arm__) -#define NOP 0xE1A00000 -#define BXLR 0xE12FFF1E +#elif defined(__arm__) +#define NOP 0xE1A00000 +#define BXLR 0xE12FFF1E #define LOCKSTAT_AFRAMES 2 #elif defined(__arm64__) -#define NOP 0xD503201F -#define RET 0xD65f03c0 +#define NOP 0xD503201F +#define RET 0xD65f03c0 #define LOCKSTAT_AFRAMES 2 #else #error "not ported to this architecture" #endif typedef struct lockstat_probe { - const char *lsp_func; - const char *lsp_name; - int lsp_probe; - dtrace_id_t lsp_id; - const char *lsp_args; + const char *lsp_func; + const char *lsp_name; + int lsp_probe; + dtrace_id_t lsp_id; + const char *lsp_args; } lockstat_probe_t; lockstat_probe_t lockstat_probes[] = @@ -163,7 +165,7 @@ lockstat_probe_t lockstat_probes[] = #endif /* Interlock measurements would be nice, but later */ -#ifdef LATER +#ifdef LATER LOCKSTAT_PROBE(LS_LCK_RW_LOCK_EXCL_TO_SHARED, LSA_ILK_SPIN, LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN), LOCKSTAT_PROBE(LS_LCK_MTX_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_LOCK_ILK_SPIN), LOCKSTAT_PROBE(LS_LCK_MTX_EXT_LOCK, LSA_ILK_SPIN, LS_LCK_MTX_EXT_LOCK_ILK_SPIN), @@ -171,87 +173,14 @@ lockstat_probe_t lockstat_probes[] = LOCKSTAT_PROBE(LS_LCK_RW_TRY_LOCK_SHARED, LSA_SPIN, LS_LCK_RW_TRY_LOCK_SHARED_SPIN) #endif - { NULL, NULL, 0, 0, NULL} + { + NULL, NULL, 0, 0, NULL + } }; dtrace_id_t lockstat_probemap[LS_NPROBES]; - -typedef struct lockstat_assembly_probe { - int lsap_probe; - vm_offset_t * lsap_patch_point; -} lockstat_assembly_probe_t; - - - lockstat_assembly_probe_t assembly_probes[] = - { - { LS_LCK_INVALID, NULL } - }; - - -/* - * APPLE NOTE: - * Hot patch is used to manipulate probe points by swapping between - * no-op and return instructions. - * The active flag indicates whether the probe point will turn on or off. - * on == plant a NOP and thus fall through to the probe call - * off == plant a RET and thus avoid the probe call completely - * The ls_probe identifies which probe we will patch. - */ -static -void lockstat_hot_patch(boolean_t active, int ls_probe) -{ -#pragma unused(active) - int i; - - /* - * Loop through entire table, in case there are - * multiple patch points per probe. - */ - for (i = 0; assembly_probes[i].lsap_patch_point; i++) { - if (ls_probe == assembly_probes[i].lsap_probe) -#if defined(__x86_64__) - { - uint8_t instr; - instr = (active ? NOP : RET ); - (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point), - sizeof(instr)); - } -#elif defined (__arm__) - { - uint32_t instr; - instr = (active ? NOP : BXLR ); - (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point), - sizeof(instr)); - } -#elif defined (__arm64__) - { - uint32_t instr; - instr = (active ? NOP : RET ); - (void) ml_nofault_copy( (vm_offset_t)&instr, *(assembly_probes[i].lsap_patch_point), - sizeof(instr)); - } -#endif - } /* for */ -} - void (*lockstat_probe)(dtrace_id_t, uint64_t, uint64_t, - uint64_t, uint64_t, uint64_t); - - -/* - * APPLE NOTE: - * This wrapper is used only by assembler hot patched probes. - */ -void -lockstat_probe_wrapper(int probe, uintptr_t lp, int rwflag) -{ - dtrace_id_t id; - id = lockstat_probemap[probe]; - if (id != 0) - { - (*lockstat_probe)(id, (uintptr_t)lp, (uint64_t)rwflag, 0,0,0); - } -} + uint64_t, uint64_t, uint64_t); static dtrace_provider_id_t lockstat_id; @@ -260,7 +189,7 @@ static int lockstat_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg) /* __APPLE__ */ - + lockstat_probe_t *probe = parg; ASSERT(!lockstat_probemap[probe->lsp_probe]); @@ -268,10 +197,8 @@ lockstat_enable(void *arg, dtrace_id_t id, void *parg) lockstat_probemap[probe->lsp_probe] = id; membar_producer(); - lockstat_hot_patch(TRUE, probe->lsp_probe); membar_producer(); - return(0); - + return 0; } /*ARGSUSED*/ @@ -286,7 +213,6 @@ lockstat_disable(void *arg, dtrace_id_t id, void *parg) ASSERT(lockstat_probemap[probe->lsp_probe]); lockstat_probemap[probe->lsp_probe] = 0; - lockstat_hot_patch(FALSE, probe->lsp_probe); membar_producer(); /* @@ -302,7 +228,6 @@ lockstat_disable(void *arg, dtrace_id_t id, void *parg) return; } } - } /*ARGSUSED*/ @@ -310,15 +235,16 @@ static void lockstat_provide(void *arg, const dtrace_probedesc_t *desc) { #pragma unused(arg, desc) /* __APPLE__ */ - + int i = 0; for (i = 0; lockstat_probes[i].lsp_func != NULL; i++) { lockstat_probe_t *probe = &lockstat_probes[i]; if (dtrace_probe_lookup(lockstat_id, "mach_kernel", - probe->lsp_func, probe->lsp_name) != 0) + probe->lsp_func, probe->lsp_name) != 0) { continue; + } ASSERT(!probe->lsp_id); probe->lsp_id = dtrace_probe_create(lockstat_id, @@ -333,7 +259,7 @@ static void lockstat_destroy(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg, id) /* __APPLE__ */ - + lockstat_probe_t *probe = parg; ASSERT(!lockstat_probemap[probe->lsp_probe]); @@ -351,7 +277,7 @@ lockstat_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *des desc->dtargd_native[0] = '\0'; desc->dtargd_xlate[0] = '\0'; - while(argdesc[0] != '\0') { + while (argdesc[0] != '\0') { if (narg == desc->dtargd_ndx) { strlcpy(desc->dtargd_native, argdesc, DTRACE_ARGTYPELEN); return; @@ -364,24 +290,24 @@ lockstat_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *des } static dtrace_pattr_t lockstat_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, }; static dtrace_pops_t lockstat_pops = { - .dtps_provide = lockstat_provide, - .dtps_provide_module = NULL, - .dtps_enable = lockstat_enable, - .dtps_disable = lockstat_disable, - .dtps_suspend = NULL, - .dtps_resume = NULL, - .dtps_getargdesc = lockstat_getargdesc, - .dtps_getargval = NULL, - .dtps_usermode = NULL, - .dtps_destroy = lockstat_destroy + .dtps_provide = lockstat_provide, + .dtps_provide_module = NULL, + .dtps_enable = lockstat_enable, + .dtps_disable = lockstat_disable, + .dtps_suspend = NULL, + .dtps_resume = NULL, + .dtps_getargdesc = lockstat_getargdesc, + .dtps_getargval = NULL, + .dtps_usermode = NULL, + .dtps_destroy = lockstat_destroy }; static int @@ -392,18 +318,19 @@ lockstat_attach(dev_info_t *devi) dtrace_register("lockstat", &lockstat_attr, DTRACE_PRIV_KERNEL, NULL, &lockstat_pops, NULL, &lockstat_id) != 0) { ddi_remove_minor_node(devi, NULL); - return (DDI_FAILURE); + return DDI_FAILURE; } lockstat_probe = dtrace_probe; membar_producer(); - return (DDI_SUCCESS); + return DDI_SUCCESS; } d_open_t _lockstat_open; -int _lockstat_open(dev_t dev, int flags, int devtype, struct proc *p) +int +_lockstat_open(dev_t dev, int flags, int devtype, struct proc *p) { #pragma unused(dev,flags,devtype,p) return 0; @@ -417,25 +344,26 @@ int _lockstat_open(dev_t dev, int flags, int devtype, struct proc *p) */ static struct cdevsw lockstat_cdevsw = { - _lockstat_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ + _lockstat_open, /* open */ + eno_opcl, /* close */ + eno_rdwrt, /* read */ + eno_rdwrt, /* write */ + eno_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; void lockstat_init( void ); -void lockstat_init( void ) +void +lockstat_init( void ) { int majdevno = cdevsw_add(LOCKSTAT_MAJOR, &lockstat_cdevsw); diff --git a/bsd/dev/dtrace/profile_prvd.c b/bsd/dev/dtrace/profile_prvd.c index a76f901c4..6d36e4cde 100644 --- a/bsd/dev/dtrace/profile_prvd.c +++ b/bsd/dev/dtrace/profile_prvd.c @@ -84,7 +84,7 @@ static dtrace_provider_id_t profile_id; * On SPARC, the picture is further complicated because the compiler * optimizes away tail-calls -- so the following frames are optimized away: * - * profile_fire + * profile_fire * cyclic_expire * * This gives three frames. However, on DEBUG kernels, the cyclic_expire @@ -105,41 +105,41 @@ static dtrace_provider_id_t profile_id; #error Unknown architecture #endif -#define PROF_NAMELEN 15 +#define PROF_NAMELEN 15 -#define PROF_PROFILE 0 -#define PROF_TICK 1 -#define PROF_PREFIX_PROFILE "profile-" -#define PROF_PREFIX_TICK "tick-" +#define PROF_PROFILE 0 +#define PROF_TICK 1 +#define PROF_PREFIX_PROFILE "profile-" +#define PROF_PREFIX_TICK "tick-" typedef struct profile_probe { - char prof_name[PROF_NAMELEN]; - dtrace_id_t prof_id; - int prof_kind; - hrtime_t prof_interval; - cyclic_id_t prof_cyclic; + char prof_name[PROF_NAMELEN]; + dtrace_id_t prof_id; + int prof_kind; + hrtime_t prof_interval; + cyclic_id_t prof_cyclic; } profile_probe_t; typedef struct profile_probe_percpu { - hrtime_t profc_expected; - hrtime_t profc_interval; - profile_probe_t *profc_probe; + hrtime_t profc_expected; + hrtime_t profc_interval; + profile_probe_t *profc_probe; } profile_probe_percpu_t; -hrtime_t profile_interval_min = NANOSEC / 5000; /* 5000 hz */ -int profile_aframes = 0; /* override */ +hrtime_t profile_interval_min = NANOSEC / 5000; /* 5000 hz */ +int profile_aframes = 0; /* override */ static int profile_rates[] = { - 97, 199, 499, 997, 1999, - 4001, 4999, 0, 0, 0, - 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0 + 97, 199, 499, 997, 1999, + 4001, 4999, 0, 0, 0, + 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0 }; static int profile_ticks[] = { - 1, 10, 100, 500, 1000, - 5000, 0, 0, 0, 0, - 0, 0, 0, 0, 0 + 1, 10, 100, 500, 1000, + 5000, 0, 0, 0, 0, + 0, 0, 0, 0, 0 }; /* @@ -149,9 +149,9 @@ static int profile_ticks[] = { * this gets its value from PROFILE_MAX_DEFAULT or profile-max-probes if it's * present in the profile.conf file. */ -#define PROFILE_MAX_DEFAULT 1000 /* default max. number of probes */ -static uint32_t profile_max; /* maximum number of profile probes */ -static uint32_t profile_total; /* current number of profile probes */ +#define PROFILE_MAX_DEFAULT 1000 /* default max. number of probes */ +static uint32_t profile_max; /* maximum number of profile probes */ +static uint32_t profile_total; /* current number of profile probes */ static void profile_fire(void *arg) @@ -168,8 +168,7 @@ profile_fire(void *arg) if (NULL != kern_regs) { /* Kernel was interrupted. */ - dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, late, 0, 0); - + dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, late, 0, 0); } else { pal_register_cache_state(current_thread(), VALID); /* Possibly a user interrupt */ @@ -178,7 +177,7 @@ profile_fire(void *arg) if (NULL == tagged_regs) { /* Too bad, so sad, no useful interrupt state. */ dtrace_probe(prof->prof_id, 0xcafebabe, - 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */ + 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */ } else if (is_saved_state64(tagged_regs)) { x86_saved_state64_t *regs = saved_state64(tagged_regs); @@ -198,7 +197,7 @@ profile_fire(void *arg) if (arm_kern_regs->cpsr & 0xF) { /* Kernel was interrupted. */ - dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, late, 0, 0); + dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, late, 0, 0); } else { /* Possibly a user interrupt */ arm_saved_state_t *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread()); @@ -220,7 +219,7 @@ profile_fire(void *arg) if (saved_state64(arm_kern_regs)->cpsr & 0xF) { /* Kernel was interrupted. */ - dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, late, 0, 0); + dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, late, 0, 0); } else { /* Possibly a user interrupt */ arm_saved_state_t *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread()); @@ -248,7 +247,7 @@ profile_tick(void *arg) if (NULL != kern_regs) { /* Kernel was interrupted. */ - dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, 0, 0, 0); + dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, 0, 0, 0); } else { pal_register_cache_state(current_thread(), VALID); /* Possibly a user interrupt */ @@ -257,7 +256,7 @@ profile_tick(void *arg) if (NULL == tagged_regs) { /* Too bad, so sad, no useful interrupt state. */ dtrace_probe(prof->prof_id, 0xcafebabe, - 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */ + 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */ } else if (is_saved_state64(tagged_regs)) { x86_saved_state64_t *regs = saved_state64(tagged_regs); @@ -274,7 +273,7 @@ profile_tick(void *arg) if (NULL != arm_kern_regs) { /* Kernel was interrupted. */ - dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, 0, 0, 0); + dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, 0, 0, 0); } else { /* Possibly a user interrupt */ arm_saved_state_t *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread()); @@ -293,7 +292,7 @@ profile_tick(void *arg) if (NULL != arm_kern_regs) { /* Kernel was interrupted. */ - dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, 0, 0, 0); + dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, 0, 0, 0); } else { /* Possibly a user interrupt */ arm_saved_state_t *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread()); @@ -317,11 +316,13 @@ profile_create(hrtime_t interval, const char *name, int kind) { profile_probe_t *prof; - if (interval < profile_interval_min) + if (interval < profile_interval_min) { return; + } - if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) + if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) { return; + } atomic_add_32(&profile_total, 1); if (profile_total > profile_max) { @@ -329,10 +330,11 @@ profile_create(hrtime_t interval, const char *name, int kind) return; } - if (PROF_TICK == kind) - prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP); - else - prof = kmem_zalloc(sizeof (profile_probe_t) + NCPU*sizeof(profile_probe_percpu_t), KM_SLEEP); + if (PROF_TICK == kind) { + prof = kmem_zalloc(sizeof(profile_probe_t), KM_SLEEP); + } else { + prof = kmem_zalloc(sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t), KM_SLEEP); + } (void) strlcpy(prof->prof_name, name, sizeof(prof->prof_name)); prof->prof_interval = interval; @@ -365,23 +367,23 @@ profile_provide(void *arg, const dtrace_probedesc_t *desc) const char *name; hrtime_t mult; } suffixes[] = { - { "ns", NANOSEC / NANOSEC }, - { "nsec", NANOSEC / NANOSEC }, - { "us", NANOSEC / MICROSEC }, - { "usec", NANOSEC / MICROSEC }, - { "ms", NANOSEC / MILLISEC }, - { "msec", NANOSEC / MILLISEC }, - { "s", NANOSEC / SEC }, - { "sec", NANOSEC / SEC }, - { "m", NANOSEC * (hrtime_t)60 }, - { "min", NANOSEC * (hrtime_t)60 }, - { "h", NANOSEC * (hrtime_t)(60 * 60) }, - { "hour", NANOSEC * (hrtime_t)(60 * 60) }, - { "d", NANOSEC * (hrtime_t)(24 * 60 * 60) }, - { "day", NANOSEC * (hrtime_t)(24 * 60 * 60) }, - { "hz", 0 }, + { "ns", NANOSEC / NANOSEC }, + { "nsec", NANOSEC / NANOSEC }, + { "us", NANOSEC / MICROSEC }, + { "usec", NANOSEC / MICROSEC }, + { "ms", NANOSEC / MILLISEC }, + { "msec", NANOSEC / MILLISEC }, + { "s", NANOSEC / SEC }, + { "sec", NANOSEC / SEC }, + { "m", NANOSEC * (hrtime_t)60 }, + { "min", NANOSEC * (hrtime_t)60 }, + { "h", NANOSEC * (hrtime_t)(60 * 60) }, + { "hour", NANOSEC * (hrtime_t)(60 * 60) }, + { "d", NANOSEC * (hrtime_t)(24 * 60 * 60) }, + { "day", NANOSEC * (hrtime_t)(24 * 60 * 60) }, + { "hz", 0 }, { NULL, 0 } - }; + }; if (desc == NULL) { char n[PROF_NAMELEN]; @@ -389,18 +391,20 @@ profile_provide(void *arg, const dtrace_probedesc_t *desc) /* * If no description was provided, provide all of our probes. */ - for (i = 0; i < (int)(sizeof (profile_rates) / sizeof (int)); i++) { - if ((rate = profile_rates[i]) == 0) + for (i = 0; i < (int)(sizeof(profile_rates) / sizeof(int)); i++) { + if ((rate = profile_rates[i]) == 0) { continue; + } (void) snprintf(n, PROF_NAMELEN, "%s%d", PROF_PREFIX_PROFILE, rate); profile_create(NANOSEC / rate, n, PROF_PROFILE); } - for (i = 0; i < (int)(sizeof (profile_ticks) / sizeof (int)); i++) { - if ((rate = profile_ticks[i]) == 0) + for (i = 0; i < (int)(sizeof(profile_ticks) / sizeof(int)); i++) { + if ((rate = profile_ticks[i]) == 0) { continue; + } (void) snprintf(n, PROF_NAMELEN, "%s%d", PROF_PREFIX_TICK, rate); @@ -415,13 +419,15 @@ profile_provide(void *arg, const dtrace_probedesc_t *desc) for (i = 0; types[i].prefix != NULL; i++) { len = strlen(types[i].prefix); - if (strncmp(name, types[i].prefix, len) != 0) + if (strncmp(name, types[i].prefix, len) != 0) { continue; + } break; } - if (types[i].prefix == NULL) + if (types[i].prefix == NULL) { return; + } kind = types[i].kind; j = strlen(name) - len; @@ -430,8 +436,9 @@ profile_provide(void *arg, const dtrace_probedesc_t *desc) * We need to start before any time suffix. */ for (j = strlen(name); j >= len; j--) { - if (name[j] >= '0' && name[j] <= '9') + if (name[j] >= '0' && name[j] <= '9') { break; + } suffix = &name[j]; } @@ -441,15 +448,17 @@ profile_provide(void *arg, const dtrace_probedesc_t *desc) * Now determine the numerical value present in the probe name. */ for (; j >= len; j--) { - if (name[j] < '0' || name[j] > '9') + if (name[j] < '0' || name[j] > '9') { return; + } val += (name[j] - '0') * mult; mult *= (hrtime_t)10; } - if (val == 0) + if (val == 0) { return; + } /* * Look-up the suffix to determine the multiplier. @@ -462,8 +471,9 @@ profile_provide(void *arg, const dtrace_probedesc_t *desc) } } - if (suffixes[i].name == NULL && *suffix != '\0') + if (suffixes[i].name == NULL && *suffix != '\0') { return; + } if (mult == 0) { /* @@ -486,10 +496,11 @@ profile_destroy(void *arg, dtrace_id_t id, void *parg) ASSERT(prof->prof_cyclic == CYCLIC_NONE); - if (prof->prof_kind == PROF_TICK) - kmem_free(prof, sizeof (profile_probe_t)); - else - kmem_free(prof, sizeof (profile_probe_t) + NCPU*sizeof(profile_probe_percpu_t)); + if (prof->prof_kind == PROF_TICK) { + kmem_free(prof, sizeof(profile_probe_t)); + } else { + kmem_free(prof, sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t)); + } ASSERT(profile_total >= 1); atomic_add_32(&profile_total, -1); @@ -564,7 +575,7 @@ profile_enable(void *arg, dtrace_id_t id, void *parg) prof->prof_cyclic = (cyclic_id_t)cyclic_add_omni(&omni); /* cast puns cyclic_id_list_t with cyclic_id_t */ } - return(0); + return 0; } /*ARGSUSED*/ @@ -605,22 +616,21 @@ profile_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc profile_probe_t *prof = parg; const char *argdesc = NULL; switch (desc->dtargd_ndx) { - case 0: - argdesc = "void*"; - break; - case 1: - argdesc = "user_addr_t"; - break; - case 2: - if (prof->prof_kind == PROF_PROFILE) { - argdesc = "hrtime_t"; - } - break; + case 0: + argdesc = "void*"; + break; + case 1: + argdesc = "user_addr_t"; + break; + case 2: + if (prof->prof_kind == PROF_PROFILE) { + argdesc = "hrtime_t"; + } + break; } if (argdesc) { strlcpy(desc->dtargd_native, argdesc, DTRACE_ARGTYPELEN); - } - else { + } else { desc->dtargd_ndx = DTRACE_ARGNONE; } } @@ -636,24 +646,24 @@ profile_usermode(void *arg, dtrace_id_t id, void *parg) } static dtrace_pattr_t profile_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, }; static dtrace_pops_t profile_pops = { - .dtps_provide = profile_provide, - .dtps_provide_module = NULL, - .dtps_enable = profile_enable, - .dtps_disable = profile_disable, - .dtps_suspend = NULL, - .dtps_resume = NULL, - .dtps_getargdesc = profile_getargdesc, - .dtps_getargval = profile_getarg, - .dtps_usermode = profile_usermode, - .dtps_destroy = profile_destroy + .dtps_provide = profile_provide, + .dtps_provide_module = NULL, + .dtps_enable = profile_enable, + .dtps_disable = profile_disable, + .dtps_suspend = NULL, + .dtps_resume = NULL, + .dtps_getargdesc = profile_getargdesc, + .dtps_getargval = profile_getarg, + .dtps_usermode = profile_usermode, + .dtps_destroy = profile_destroy }; static int @@ -665,12 +675,12 @@ profile_attach(dev_info_t *devi) DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER, NULL, &profile_pops, NULL, &profile_id) != 0) { ddi_remove_minor_node(devi, NULL); - return (DDI_FAILURE); + return DDI_FAILURE; } profile_max = PROFILE_MAX_DEFAULT; - return (DDI_SUCCESS); + return DDI_SUCCESS; } /* @@ -684,22 +694,24 @@ profile_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) case DDI_DETACH: break; case DDI_SUSPEND: - return (DDI_SUCCESS); + return DDI_SUCCESS; default: - return (DDI_FAILURE); + return DDI_FAILURE; } - if (dtrace_unregister(profile_id) != 0) - return (DDI_FAILURE); + if (dtrace_unregister(profile_id) != 0) { + return DDI_FAILURE; + } ddi_remove_minor_node(devi, NULL); - return (DDI_SUCCESS); + return DDI_SUCCESS; } #endif /* __APPLE__ */ d_open_t _profile_open; -int _profile_open(dev_t dev, int flags, int devtype, struct proc *p) +int +_profile_open(dev_t dev, int flags, int devtype, struct proc *p) { #pragma unused(dev,flags,devtype,p) return 0; @@ -713,23 +725,24 @@ int _profile_open(dev_t dev, int flags, int devtype, struct proc *p) */ static struct cdevsw profile_cdevsw = { - _profile_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ + _profile_open, /* open */ + eno_opcl, /* close */ + eno_rdwrt, /* read */ + eno_rdwrt, /* write */ + eno_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; -void profile_init( void ) +void +profile_init( void ) { int majdevno = cdevsw_add(PROFILE_MAJOR, &profile_cdevsw); @@ -738,6 +751,6 @@ void profile_init( void ) return; } - profile_attach( (dev_info_t*)(uintptr_t)majdevno); + profile_attach((dev_info_t*)(uintptr_t)majdevno); } #undef PROFILE_MAJOR diff --git a/bsd/dev/dtrace/sdt.c b/bsd/dev/dtrace/sdt.c index 1abf6f14c..d851fb659 100644 --- a/bsd/dev/dtrace/sdt.c +++ b/bsd/dev/dtrace/sdt.c @@ -59,32 +59,32 @@ struct savearea_t; /* Used anonymously */ typedef kern_return_t (*perfCallback)(int, struct savearea_t *, __unused int, __unused int); extern perfCallback tempDTraceTrapHook; extern kern_return_t fbt_perfCallback(int, struct savearea_t *, __unused int, __unused int); -#define SDT_PATCHVAL 0xdefc -#define SDT_AFRAMES 7 +#define SDT_PATCHVAL 0xdefc +#define SDT_AFRAMES 7 #elif defined(__arm64__) typedef kern_return_t (*perfCallback)(int, struct savearea_t *, __unused int, __unused int); extern perfCallback tempDTraceTrapHook; extern kern_return_t fbt_perfCallback(int, struct savearea_t *, __unused int, __unused int); -#define SDT_PATCHVAL 0xe7eeee7e -#define SDT_AFRAMES 7 +#define SDT_PATCHVAL 0xe7eeee7e +#define SDT_AFRAMES 7 #elif defined(__x86_64__) typedef kern_return_t (*perfCallback)(int, struct savearea_t *, uintptr_t *, int); extern perfCallback tempDTraceTrapHook; extern kern_return_t fbt_perfCallback(int, struct savearea_t *, uintptr_t *, int); -#define SDT_PATCHVAL 0xf0 -#define SDT_AFRAMES 6 +#define SDT_PATCHVAL 0xf0 +#define SDT_AFRAMES 6 #else #error Unknown architecture #endif -#define SDT_PROBETAB_SIZE 0x1000 /* 4k entries -- 16K total */ +#define SDT_PROBETAB_SIZE 0x1000 /* 4k entries -- 16K total */ #define DTRACE_PROBE_PREFIX "_dtrace_probe$" -static int sdt_verbose = 0; -sdt_probe_t **sdt_probetab; -int sdt_probetab_size; -int sdt_probetab_mask; +static int sdt_verbose = 0; +sdt_probe_t **sdt_probetab; +int sdt_probetab_size; +int sdt_probetab_mask; /*ARGSUSED*/ static void @@ -103,16 +103,18 @@ __sdt_provide_module(void *arg, struct modctl *ctl) * our providers, we'll refuse to provide anything. */ for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { - if (prov->sdtp_id == DTRACE_PROVNONE) + if (prov->sdtp_id == DTRACE_PROVNONE) { return; + } } - if (!mp || mp->sdt_nprobes != 0 || (sdpd = mp->sdt_probes) == NULL) + if (!mp || mp->sdt_nprobes != 0 || (sdpd = mp->sdt_probes) == NULL) { return; + } for (sdpd = mp->sdt_probes; sdpd != NULL; sdpd = sdpd->sdpd_next) { - const char *name = sdpd->sdpd_name, *func; - char *nname; + const char *name = sdpd->sdpd_name, *func; + char *nname; int i, j; dtrace_id_t id; @@ -138,7 +140,7 @@ __sdt_provide_module(void *arg, struct modctl *ctl) nname[i] = '\0'; - sdp = kmem_zalloc(sizeof (sdt_probe_t), KM_SLEEP); + sdp = kmem_zalloc(sizeof(sdt_probe_t), KM_SLEEP); sdp->sdp_loadcnt = ctl->mod_loadcnt; sdp->sdp_ctl = ctl; sdp->sdp_name = nname; @@ -147,8 +149,9 @@ __sdt_provide_module(void *arg, struct modctl *ctl) func = sdpd->sdpd_func; - if (func == NULL) + if (func == NULL) { func = ""; + } /* * We have our provider. Now create the probe. @@ -168,9 +171,9 @@ __sdt_provide_module(void *arg, struct modctl *ctl) mp->sdt_nprobes++; } -#if 0 - printf ("__sdt_provide_module: sdpd=0x%p sdp=0x%p name=%s, id=%d\n", sdpd, sdp, nname, sdp->sdp_id); -#endif +#if 0 + printf("__sdt_provide_module: sdpd=0x%p sdp=0x%p name=%s, id=%d\n", sdpd, sdp, nname, sdp->sdp_id); +#endif sdp->sdp_hashnext = sdt_probetab[SDT_ADDR2NDX(sdpd->sdpd_offset)]; @@ -228,7 +231,7 @@ sdt_destroy(void *arg, dtrace_id_t id, void *parg) kmem_free(sdp->sdp_name, sdp->sdp_namelen); sdp = sdp->sdp_next; - kmem_free(old, sizeof (sdt_probe_t)); + kmem_free(old, sizeof(sdt_probe_t)); } } @@ -276,25 +279,25 @@ sdt_enable(void *arg, dtrace_id_t id, void *parg) "in module %s: tempDTraceTrapHook already occupied.", sdp->sdp_name, ctl->mod_modname); } - return (0); + return 0; } while (sdp != NULL) { - (void)ml_nofault_copy( (vm_offset_t)&sdp->sdp_patchval, (vm_offset_t)sdp->sdp_patchpoint, - (vm_size_t)sizeof(sdp->sdp_patchval)); + (void)ml_nofault_copy((vm_offset_t)&sdp->sdp_patchval, (vm_offset_t)sdp->sdp_patchpoint, + (vm_size_t)sizeof(sdp->sdp_patchval)); /* * Make the patched instruction visible via a data + instruction * cache fush on platforms that need it */ - flush_dcache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_patchval), 0); - invalidate_icache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_patchval), 0); + flush_dcache((vm_offset_t)sdp->sdp_patchpoint, (vm_size_t)sizeof(sdp->sdp_patchval), 0); + invalidate_icache((vm_offset_t)sdp->sdp_patchpoint, (vm_size_t)sizeof(sdp->sdp_patchval), 0); sdp = sdp->sdp_next; } err: - return (0); + return 0; } /*ARGSUSED*/ @@ -307,18 +310,19 @@ sdt_disable(void *arg, dtrace_id_t id, void *parg) ctl->mod_nenabled--; - if (!ctl->mod_loaded || ctl->mod_loadcnt != sdp->sdp_loadcnt) + if (!ctl->mod_loaded || ctl->mod_loadcnt != sdp->sdp_loadcnt) { goto err; + } while (sdp != NULL) { - (void)ml_nofault_copy( (vm_offset_t)&sdp->sdp_savedval, (vm_offset_t)sdp->sdp_patchpoint, - (vm_size_t)sizeof(sdp->sdp_savedval)); + (void)ml_nofault_copy((vm_offset_t)&sdp->sdp_savedval, (vm_offset_t)sdp->sdp_patchpoint, + (vm_size_t)sizeof(sdp->sdp_savedval)); /* * Make the patched instruction visible via a data + instruction * cache flush on platforms that need it */ - flush_dcache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_savedval), 0); - invalidate_icache((vm_offset_t)sdp->sdp_patchpoint,(vm_size_t)sizeof(sdp->sdp_savedval), 0); + flush_dcache((vm_offset_t)sdp->sdp_patchpoint, (vm_size_t)sizeof(sdp->sdp_savedval), 0); + invalidate_icache((vm_offset_t)sdp->sdp_patchpoint, (vm_size_t)sizeof(sdp->sdp_savedval), 0); sdp = sdp->sdp_next; } @@ -327,16 +331,16 @@ err: } static dtrace_pops_t sdt_pops = { - .dtps_provide = NULL, - .dtps_provide_module = sdt_provide_module, - .dtps_enable = sdt_enable, - .dtps_disable = sdt_disable, - .dtps_suspend = NULL, - .dtps_resume = NULL, - .dtps_getargdesc = sdt_getargdesc, - .dtps_getargval = sdt_getarg, - .dtps_usermode = NULL, - .dtps_destroy = sdt_destroy, + .dtps_provide = NULL, + .dtps_provide_module = sdt_provide_module, + .dtps_enable = sdt_enable, + .dtps_disable = sdt_disable, + .dtps_suspend = NULL, + .dtps_resume = NULL, + .dtps_getargdesc = sdt_getargdesc, + .dtps_getargval = sdt_getarg, + .dtps_usermode = NULL, + .dtps_destroy = sdt_destroy, }; /*ARGSUSED*/ @@ -349,15 +353,16 @@ sdt_attach(dev_info_t *devi) 0, DDI_PSEUDO, 0) == DDI_FAILURE) { cmn_err(CE_NOTE, "/dev/sdt couldn't create minor node"); ddi_remove_minor_node(devi, NULL); - return (DDI_FAILURE); + return DDI_FAILURE; } - if (sdt_probetab_size == 0) + if (sdt_probetab_size == 0) { sdt_probetab_size = SDT_PROBETAB_SIZE; + } sdt_probetab_mask = sdt_probetab_size - 1; sdt_probetab = - kmem_zalloc(sdt_probetab_size * sizeof (sdt_probe_t *), KM_SLEEP); + kmem_zalloc(sdt_probetab_size * sizeof(sdt_probe_t *), KM_SLEEP); dtrace_invop_add(sdt_invop); for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { @@ -369,7 +374,7 @@ sdt_attach(dev_info_t *devi) } } - return (DDI_SUCCESS); + return DDI_SUCCESS; } /* @@ -387,31 +392,33 @@ sdt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) break; case DDI_SUSPEND: - return (DDI_SUCCESS); + return DDI_SUCCESS; default: - return (DDI_FAILURE); + return DDI_FAILURE; } for (prov = sdt_providers; prov->sdtp_name != NULL; prov++) { if (prov->sdtp_id != DTRACE_PROVNONE) { - if (dtrace_unregister(prov->sdtp_id) != 0) - return (DDI_FAILURE); + if (dtrace_unregister(prov->sdtp_id) != 0) { + return DDI_FAILURE; + } prov->sdtp_id = DTRACE_PROVNONE; } } dtrace_invop_remove(sdt_invop); - kmem_free(sdt_probetab, sdt_probetab_size * sizeof (sdt_probe_t *)); + kmem_free(sdt_probetab, sdt_probetab_size * sizeof(sdt_probe_t *)); - return (DDI_SUCCESS); + return DDI_SUCCESS; } #endif /* __APPLE__ */ d_open_t _sdt_open; -int _sdt_open(dev_t dev, int flags, int devtype, struct proc *p) +int +_sdt_open(dev_t dev, int flags, int devtype, struct proc *p) { #pragma unused(dev,flags,devtype,p) return 0; @@ -425,20 +432,20 @@ int _sdt_open(dev_t dev, int flags, int devtype, struct proc *p) */ static struct cdevsw sdt_cdevsw = { - _sdt_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ + _sdt_open, /* open */ + eno_opcl, /* close */ + eno_rdwrt, /* read */ + eno_rdwrt, /* write */ + eno_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; static struct modctl g_sdt_kernctl; @@ -447,7 +454,8 @@ static struct module g_sdt_mach_module; #include #include -void sdt_early_init( void ) +void +sdt_early_init( void ) { if (dtrace_sdt_probes_restricted()) { return; @@ -460,17 +468,17 @@ void sdt_early_init( void ) struct load_command *cmd; kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL; struct symtab_command *orig_st = NULL; - kernel_nlist_t *sym = NULL; + kernel_nlist_t *sym = NULL; char *strings; - unsigned int i; - + unsigned int i; + g_sdt_mach_module.sdt_nprobes = 0; g_sdt_mach_module.sdt_probes = NULL; - + g_sdt_kernctl.mod_address = (vm_address_t)&g_sdt_mach_module; g_sdt_kernctl.mod_size = 0; strncpy((char *)&(g_sdt_kernctl.mod_modname), "mach_kernel", KMOD_MAX_NAME); - + g_sdt_kernctl.mod_next = NULL; g_sdt_kernctl.mod_stale = NULL; g_sdt_kernctl.mod_id = 0; @@ -478,60 +486,65 @@ void sdt_early_init( void ) g_sdt_kernctl.mod_loaded = 1; g_sdt_kernctl.mod_flags = 0; g_sdt_kernctl.mod_nenabled = 0; - + mh = &_mh_execute_header; cmd = (struct load_command*) &mh[1]; for (i = 0; i < mh->ncmds; i++) { if (cmd->cmd == LC_SEGMENT_KERNEL) { kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd; - - if (LIT_STRNEQL(orig_sg->segname, SEG_TEXT)) + + if (LIT_STRNEQL(orig_sg->segname, SEG_TEXT)) { orig_ts = orig_sg; - else if (LIT_STRNEQL(orig_sg->segname, SEG_LINKEDIT)) + } else if (LIT_STRNEQL(orig_sg->segname, SEG_LINKEDIT)) { orig_le = orig_sg; - else if (LIT_STRNEQL(orig_sg->segname, "")) + } else if (LIT_STRNEQL(orig_sg->segname, "")) { orig_ts = orig_sg; /* kexts have a single unnamed segment */ - } - else if (cmd->cmd == LC_SYMTAB) + } + } else if (cmd->cmd == LC_SYMTAB) { orig_st = (struct symtab_command *) cmd; - + } + cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); } - - if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) + + if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) { return; - + } + sym = (kernel_nlist_t *)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff); strings = (char *)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff); - + for (i = 0; i < orig_st->nsyms; i++) { uint8_t n_type = sym[i].n_type & (N_TYPE | N_EXT); char *name = strings + sym[i].n_un.n_strx; const char *prev_name; unsigned long best; unsigned int j; - + /* Check that the symbol is a global and that it has a name. */ - if (((N_SECT | N_EXT) != n_type && (N_ABS | N_EXT) != n_type)) + if (((N_SECT | N_EXT) != n_type && (N_ABS | N_EXT) != n_type)) { continue; - - if (0 == sym[i].n_un.n_strx) /* iff a null, "", name. */ + } + + if (0 == sym[i].n_un.n_strx) { /* iff a null, "", name. */ continue; - + } + /* Lop off omnipresent leading underscore. */ - if (*name == '_') + if (*name == '_') { name += 1; - + } + if (strncmp(name, DTRACE_PROBE_PREFIX, sizeof(DTRACE_PROBE_PREFIX) - 1) == 0) { sdt_probedesc_t *sdpd = kmem_alloc(sizeof(sdt_probedesc_t), KM_SLEEP); int len = strlen(name) + 1; - + sdpd->sdpd_name = kmem_alloc(len, KM_SLEEP); strncpy(sdpd->sdpd_name, name, len); /* NUL termination is ensured. */ - + prev_name = ""; best = 0; - + /* * Find the symbol immediately preceding the sdt probe site just discovered, * that symbol names the function containing the sdt probe. @@ -540,17 +553,21 @@ void sdt_early_init( void ) uint8_t jn_type = sym[j].n_type & N_TYPE; char *jname = strings + sym[j].n_un.n_strx; - if ((N_SECT != jn_type && N_ABS != jn_type)) + if ((N_SECT != jn_type && N_ABS != jn_type)) { continue; + } - if (0 == sym[j].n_un.n_strx) /* iff a null, "", name. */ + if (0 == sym[j].n_un.n_strx) { /* iff a null, "", name. */ continue; + } - if (*jname == '_') + if (*jname == '_') { jname += 1; + } - if (*(unsigned long *)sym[i].n_value <= (unsigned long)sym[j].n_value) + if (*(unsigned long *)sym[i].n_value <= (unsigned long)sym[j].n_value) { continue; + } if ((unsigned long)sym[j].n_value > best) { best = (unsigned long)sym[j].n_value; @@ -560,7 +577,7 @@ void sdt_early_init( void ) sdpd->sdpd_func = kmem_alloc((len = strlen(prev_name) + 1), KM_SLEEP); strncpy(sdpd->sdpd_func, prev_name, len); /* NUL termination is ensured. */ - + sdpd->sdpd_offset = *(unsigned long *)sym[i].n_value; #if defined(__arm__) /* PR8353094 - mask off thumb-bit */ @@ -571,7 +588,7 @@ void sdt_early_init( void ) #if 0 printf("sdt_init: sdpd_offset=0x%lx, n_value=0x%lx, name=%s\n", - sdpd->sdpd_offset, *(unsigned long *)sym[i].n_value, name); + sdpd->sdpd_offset, *(unsigned long *)sym[i].n_value, name); #endif sdpd->sdpd_next = g_sdt_mach_module.sdt_probes; @@ -583,10 +600,11 @@ void sdt_early_init( void ) } } -void sdt_init( void ) +void +sdt_init( void ) { int majdevno = cdevsw_add(SDT_MAJOR, &sdt_cdevsw); - + if (majdevno < 0) { printf("sdt_init: failed to allocate a major number!\n"); return; @@ -609,13 +627,14 @@ sdt_provide_module(void *arg, struct modctl *ctl) ASSERT(ctl != NULL); ASSERT(dtrace_kernel_symbol_mode != DTRACE_KERNEL_SYMBOLS_NEVER); LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED); - - if (MOD_SDT_DONE(ctl)) + + if (MOD_SDT_DONE(ctl)) { return; - + } + if (MOD_IS_MACH_KERNEL(ctl)) { __sdt_provide_module(arg, &g_sdt_kernctl); - + sdt_probedesc_t *sdpd = g_sdt_mach_module.sdt_probes; while (sdpd) { sdt_probedesc_t *this_sdpd = sdpd; @@ -630,7 +649,7 @@ sdt_provide_module(void *arg, struct modctl *ctl) * APPLE NOTE: sdt probes for kexts not yet implemented */ } - + /* Need to mark this module as completed */ ctl->mod_flags |= MODCTL_SDT_PROBES_PROVIDED; } diff --git a/bsd/dev/dtrace/sdt_subr.c b/bsd/dev/dtrace/sdt_subr.c index 03174ee08..65ae963fd 100644 --- a/bsd/dev/dtrace/sdt_subr.c +++ b/bsd/dev/dtrace/sdt_subr.c @@ -28,51 +28,51 @@ #include static dtrace_pattr_t vtrace_attr = { -{ DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_ISA }, }; static dtrace_pattr_t info_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; static dtrace_pattr_t fpu_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_CPU }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_CPU }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; static dtrace_pattr_t fsinfo_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, }; static dtrace_pattr_t stab_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, }; static dtrace_pattr_t sdt_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; sdt_provider_t sdt_providers[] = { @@ -188,622 +188,622 @@ sdt_argdesc_t sdt_args[] = { { "fsinfo", NULL, 1, 1, "int", NULL }, { "nfsv3", "op-getattr-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-getattr-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-getattr-start", 2, 3, "GETATTR3args *", NULL }, { "nfsv3", "op-getattr-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-getattr-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-getattr-done", 2, 3, "GETATTR3res *", NULL }, { "nfsv3", "op-setattr-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-setattr-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-setattr-start", 2, 3, "SETATTR3args *", NULL }, { "nfsv3", "op-setattr-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-setattr-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-setattr-done", 2, 3, "SETATTR3res *", NULL }, { "nfsv3", "op-lookup-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-lookup-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-lookup-start", 2, 3, "LOOKUP3args *", NULL }, { "nfsv3", "op-lookup-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-lookup-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-lookup-done", 2, 3, "LOOKUP3res *", NULL }, { "nfsv3", "op-access-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-access-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-access-start", 2, 3, "ACCESS3args *", NULL }, { "nfsv3", "op-access-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-access-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-access-done", 2, 3, "ACCESS3res *", NULL }, { "nfsv3", "op-commit-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-commit-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-commit-start", 2, 3, "COMMIT3args *", NULL }, { "nfsv3", "op-commit-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-commit-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-commit-done", 2, 3, "COMMIT3res *", NULL }, { "nfsv3", "op-create-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-create-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-create-start", 2, 3, "CREATE3args *", NULL }, { "nfsv3", "op-create-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-create-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-create-done", 2, 3, "CREATE3res *", NULL }, { "nfsv3", "op-fsinfo-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-fsinfo-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-fsinfo-start", 2, 3, "FSINFO3args *", NULL }, { "nfsv3", "op-fsinfo-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-fsinfo-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-fsinfo-done", 2, 3, "FSINFO3res *", NULL }, { "nfsv3", "op-fsstat-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-fsstat-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-fsstat-start", 2, 3, "FSSTAT3args *", NULL }, { "nfsv3", "op-fsstat-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-fsstat-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-fsstat-done", 2, 3, "FSSTAT3res *", NULL }, { "nfsv3", "op-link-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-link-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-link-start", 2, 3, "LINK3args *", NULL }, { "nfsv3", "op-link-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-link-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-link-done", 2, 3, "LINK3res *", NULL }, { "nfsv3", "op-mkdir-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-mkdir-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-mkdir-start", 2, 3, "MKDIR3args *", NULL }, { "nfsv3", "op-mkdir-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-mkdir-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-mkdir-done", 2, 3, "MKDIR3res *", NULL }, { "nfsv3", "op-mknod-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-mknod-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-mknod-start", 2, 3, "MKNOD3args *", NULL }, { "nfsv3", "op-mknod-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-mknod-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-mknod-done", 2, 3, "MKNOD3res *", NULL }, { "nfsv3", "op-null-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-null-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-null-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-null-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-pathconf-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-pathconf-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-pathconf-start", 2, 3, "PATHCONF3args *", NULL }, { "nfsv3", "op-pathconf-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-pathconf-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-pathconf-done", 2, 3, "PATHCONF3res *", NULL }, { "nfsv3", "op-read-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-read-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-read-start", 2, 3, "READ3args *", NULL }, { "nfsv3", "op-read-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-read-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-read-done", 2, 3, "READ3res *", NULL }, { "nfsv3", "op-readdir-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-readdir-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-readdir-start", 2, 3, "READDIR3args *", NULL }, { "nfsv3", "op-readdir-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-readdir-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-readdir-done", 2, 3, "READDIR3res *", NULL }, { "nfsv3", "op-readdirplus-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-readdirplus-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-readdirplus-start", 2, 3, "READDIRPLUS3args *", NULL }, { "nfsv3", "op-readdirplus-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-readdirplus-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-readdirplus-done", 2, 3, "READDIRPLUS3res *", NULL }, { "nfsv3", "op-readlink-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-readlink-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-readlink-start", 2, 3, "READLINK3args *", NULL }, { "nfsv3", "op-readlink-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-readlink-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-readlink-done", 2, 3, "READLINK3res *", NULL }, { "nfsv3", "op-remove-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-remove-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-remove-start", 2, 3, "REMOVE3args *", NULL }, { "nfsv3", "op-remove-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-remove-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-remove-done", 2, 3, "REMOVE3res *", NULL }, { "nfsv3", "op-rename-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-rename-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-rename-start", 2, 3, "RENAME3args *", NULL }, { "nfsv3", "op-rename-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-rename-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-rename-done", 2, 3, "RENAME3res *", NULL }, { "nfsv3", "op-rmdir-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-rmdir-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-rmdir-start", 2, 3, "RMDIR3args *", NULL }, { "nfsv3", "op-rmdir-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-rmdir-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-rmdir-done", 2, 3, "RMDIR3res *", NULL }, { "nfsv3", "op-setattr-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-setattr-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-setattr-start", 2, 3, "SETATTR3args *", NULL }, { "nfsv3", "op-setattr-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-setattr-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-setattr-done", 2, 3, "SETATTR3res *", NULL }, { "nfsv3", "op-symlink-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-symlink-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-symlink-start", 2, 3, "SYMLINK3args *", NULL }, { "nfsv3", "op-symlink-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-symlink-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-symlink-done", 2, 3, "SYMLINK3res *", NULL }, { "nfsv3", "op-write-start", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-write-start", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-write-start", 2, 3, "WRITE3args *", NULL }, { "nfsv3", "op-write-done", 0, 0, "struct svc_req *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv3", "op-write-done", 1, 1, "nfsv3oparg_t *", - "nfsv3opinfo_t *" }, + "nfsv3opinfo_t *" }, { "nfsv3", "op-write-done", 2, 3, "WRITE3res *", NULL }, { "nfsv4", "null-start", 0, 0, "struct svc_req *", "conninfo_t *" }, { "nfsv4", "null-done", 0, 0, "struct svc_req *", "conninfo_t *" }, { "nfsv4", "compound-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "compound-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "compound-start", 2, 1, "COMPOUND4args *", NULL }, { "nfsv4", "compound-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "compound-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "compound-done", 2, 1, "COMPOUND4res *", NULL }, { "nfsv4", "op-access-start", 0, 0, "struct compound_state *", - "conninfo_t *"}, + "conninfo_t *"}, { "nfsv4", "op-access-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-access-start", 2, 1, "ACCESS4args *", NULL }, { "nfsv4", "op-access-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-access-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-access-done", 2, 1, "ACCESS4res *", NULL }, { "nfsv4", "op-close-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-close-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-close-start", 2, 1, "CLOSE4args *", NULL }, { "nfsv4", "op-close-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-close-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-close-done", 2, 1, "CLOSE4res *", NULL }, { "nfsv4", "op-commit-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-commit-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-commit-start", 2, 1, "COMMIT4args *", NULL }, { "nfsv4", "op-commit-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-commit-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-commit-done", 2, 1, "COMMIT4res *", NULL }, { "nfsv4", "op-create-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-create-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-create-start", 2, 1, "CREATE4args *", NULL }, { "nfsv4", "op-create-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-create-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-create-done", 2, 1, "CREATE4res *", NULL }, { "nfsv4", "op-delegpurge-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-delegpurge-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-delegpurge-start", 2, 1, "DELEGPURGE4args *", NULL }, { "nfsv4", "op-delegpurge-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-delegpurge-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-delegpurge-done", 2, 1, "DELEGPURGE4res *", NULL }, { "nfsv4", "op-delegreturn-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-delegreturn-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-delegreturn-start", 2, 1, "DELEGRETURN4args *", NULL }, { "nfsv4", "op-delegreturn-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-delegreturn-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-delegreturn-done", 2, 1, "DELEGRETURN4res *", NULL }, { "nfsv4", "op-getattr-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-getattr-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-getattr-start", 2, 1, "GETATTR4args *", NULL }, { "nfsv4", "op-getattr-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-getattr-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-getattr-done", 2, 1, "GETATTR4res *", NULL }, { "nfsv4", "op-getfh-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-getfh-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-getfh-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-getfh-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-getfh-done", 2, 1, "GETFH4res *", NULL }, { "nfsv4", "op-link-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-link-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-link-start", 2, 1, "LINK4args *", NULL }, { "nfsv4", "op-link-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-link-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-link-done", 2, 1, "LINK4res *", NULL }, { "nfsv4", "op-lock-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lock-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lock-start", 2, 1, "LOCK4args *", NULL }, { "nfsv4", "op-lock-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lock-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lock-done", 2, 1, "LOCK4res *", NULL }, { "nfsv4", "op-lockt-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lockt-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lockt-start", 2, 1, "LOCKT4args *", NULL }, { "nfsv4", "op-lockt-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lockt-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lockt-done", 2, 1, "LOCKT4res *", NULL }, { "nfsv4", "op-locku-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-locku-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-locku-start", 2, 1, "LOCKU4args *", NULL }, { "nfsv4", "op-locku-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-locku-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-locku-done", 2, 1, "LOCKU4res *", NULL }, { "nfsv4", "op-lookup-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lookup-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lookup-start", 2, 1, "LOOKUP4args *", NULL }, { "nfsv4", "op-lookup-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lookup-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lookup-done", 2, 1, "LOOKUP4res *", NULL }, { "nfsv4", "op-lookupp-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lookupp-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lookupp-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-lookupp-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-lookupp-done", 2, 1, "LOOKUPP4res *", NULL }, { "nfsv4", "op-nverify-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-nverify-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-nverify-start", 2, 1, "NVERIFY4args *", NULL }, { "nfsv4", "op-nverify-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-nverify-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-nverify-done", 2, 1, "NVERIFY4res *", NULL }, { "nfsv4", "op-open-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-open-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-open-start", 2, 1, "OPEN4args *", NULL }, { "nfsv4", "op-open-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-open-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-open-done", 2, 1, "OPEN4res *", NULL }, { "nfsv4", "op-open-confirm-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-open-confirm-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-open-confirm-start", 2, 1, "OPEN_CONFIRM4args *", NULL }, { "nfsv4", "op-open-confirm-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-open-confirm-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-open-confirm-done", 2, 1, "OPEN_CONFIRM4res *", NULL }, { "nfsv4", "op-open-downgrade-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-open-downgrade-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-open-downgrade-start", 2, 1, "OPEN_DOWNGRADE4args *", NULL }, { "nfsv4", "op-open-downgrade-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-open-downgrade-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-open-downgrade-done", 2, 1, "OPEN_DOWNGRADE4res *", NULL }, { "nfsv4", "op-openattr-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-openattr-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-openattr-start", 2, 1, "OPENATTR4args *", NULL }, { "nfsv4", "op-openattr-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-openattr-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-openattr-done", 2, 1, "OPENATTR4res *", NULL }, { "nfsv4", "op-putfh-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-putfh-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-putfh-start", 2, 1, "PUTFH4args *", NULL }, { "nfsv4", "op-putfh-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-putfh-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-putfh-done", 2, 1, "PUTFH4res *", NULL }, { "nfsv4", "op-putpubfh-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-putpubfh-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-putpubfh-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-putpubfh-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-putpubfh-done", 2, 1, "PUTPUBFH4res *", NULL }, { "nfsv4", "op-putrootfh-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-putrootfh-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-putrootfh-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-putrootfh-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-putrootfh-done", 2, 1, "PUTROOTFH4res *", NULL }, { "nfsv4", "op-read-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-read-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-read-start", 2, 1, "READ4args *", NULL }, { "nfsv4", "op-read-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-read-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-read-done", 2, 1, "READ4res *", NULL }, { "nfsv4", "op-readdir-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-readdir-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-readdir-start", 2, 1, "READDIR4args *", NULL }, { "nfsv4", "op-readdir-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-readdir-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-readdir-done", 2, 1, "READDIR4res *", NULL }, { "nfsv4", "op-readlink-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-readlink-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-readlink-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-readlink-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-readlink-done", 2, 1, "READLINK4res *", NULL }, { "nfsv4", "op-release-lockowner-start", 0, 0, - "struct compound_state *", "conninfo_t *" }, + "struct compound_state *", "conninfo_t *" }, { "nfsv4", "op-release-lockowner-start", 1, 0, - "struct compound_state *", "nfsv4opinfo_t *" }, + "struct compound_state *", "nfsv4opinfo_t *" }, { "nfsv4", "op-release-lockowner-start", 2, 1, - "RELEASE_LOCKOWNER4args *", NULL }, + "RELEASE_LOCKOWNER4args *", NULL }, { "nfsv4", "op-release-lockowner-done", 0, 0, - "struct compound_state *", "conninfo_t *" }, + "struct compound_state *", "conninfo_t *" }, { "nfsv4", "op-release-lockowner-done", 1, 0, - "struct compound_state *", "nfsv4opinfo_t *" }, + "struct compound_state *", "nfsv4opinfo_t *" }, { "nfsv4", "op-release-lockowner-done", 2, 1, - "RELEASE_LOCKOWNER4res *", NULL }, + "RELEASE_LOCKOWNER4res *", NULL }, { "nfsv4", "op-remove-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-remove-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-remove-start", 2, 1, "REMOVE4args *", NULL }, { "nfsv4", "op-remove-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-remove-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-remove-done", 2, 1, "REMOVE4res *", NULL }, { "nfsv4", "op-rename-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-rename-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-rename-start", 2, 1, "RENAME4args *", NULL }, { "nfsv4", "op-rename-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-rename-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-rename-done", 2, 1, "RENAME4res *", NULL }, { "nfsv4", "op-renew-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-renew-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-renew-start", 2, 1, "RENEW4args *", NULL }, { "nfsv4", "op-renew-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-renew-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-renew-done", 2, 1, "RENEW4res *", NULL }, { "nfsv4", "op-restorefh-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-restorefh-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-restorefh-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-restorefh-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-restorefh-done", 2, 1, "RESTOREFH4res *", NULL }, { "nfsv4", "op-savefh-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-savefh-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-savefh-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-savefh-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-savefh-done", 2, 1, "SAVEFH4res *", NULL }, { "nfsv4", "op-secinfo-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-secinfo-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-secinfo-start", 2, 1, "SECINFO4args *", NULL }, { "nfsv4", "op-secinfo-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-secinfo-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-secinfo-done", 2, 1, "SECINFO4res *", NULL }, { "nfsv4", "op-setattr-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-setattr-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-setattr-start", 2, 1, "SETATTR4args *", NULL }, { "nfsv4", "op-setattr-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-setattr-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-setattr-done", 2, 1, "SETATTR4res *", NULL }, { "nfsv4", "op-setclientid-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-setclientid-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-setclientid-start", 2, 1, "SETCLIENTID4args *", NULL }, { "nfsv4", "op-setclientid-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-setclientid-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-setclientid-done", 2, 1, "SETCLIENTID4res *", NULL }, { "nfsv4", "op-setclientid-confirm-start", 0, 0, - "struct compound_state *", "conninfo_t *" }, + "struct compound_state *", "conninfo_t *" }, { "nfsv4", "op-setclientid-confirm-start", 1, 0, - "struct compound_state *", "nfsv4opinfo_t *" }, + "struct compound_state *", "nfsv4opinfo_t *" }, { "nfsv4", "op-setclientid-confirm-start", 2, 1, - "SETCLIENTID_CONFIRM4args *", NULL }, + "SETCLIENTID_CONFIRM4args *", NULL }, { "nfsv4", "op-setclientid-confirm-done", 0, 0, - "struct compound_state *", "conninfo_t *" }, + "struct compound_state *", "conninfo_t *" }, { "nfsv4", "op-setclientid-confirm-done", 1, 0, - "struct compound_state *", "nfsv4opinfo_t *" }, + "struct compound_state *", "nfsv4opinfo_t *" }, { "nfsv4", "op-setclientid-confirm-done", 2, 1, - "SETCLIENTID_CONFIRM4res *", NULL }, + "SETCLIENTID_CONFIRM4res *", NULL }, { "nfsv4", "op-verify-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-verify-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-verify-start", 2, 1, "VERIFY4args *", NULL }, { "nfsv4", "op-verify-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-verify-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-verify-done", 2, 1, "VERIFY4res *", NULL }, { "nfsv4", "op-write-start", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-write-start", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-write-start", 2, 1, "WRITE4args *", NULL }, { "nfsv4", "op-write-done", 0, 0, "struct compound_state *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "op-write-done", 1, 0, "struct compound_state *", - "nfsv4opinfo_t *" }, + "nfsv4opinfo_t *" }, { "nfsv4", "op-write-done", 2, 1, "WRITE4res *", NULL }, { "nfsv4", "cb-recall-start", 0, 0, "rfs4_client_t *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "cb-recall-start", 1, 1, "rfs4_deleg_state_t *", - "nfsv4cbinfo_t *" }, + "nfsv4cbinfo_t *" }, { "nfsv4", "cb-recall-start", 2, 2, "CB_RECALL4args *", NULL }, { "nfsv4", "cb-recall-done", 0, 0, "rfs4_client_t *", - "conninfo_t *" }, + "conninfo_t *" }, { "nfsv4", "cb-recall-done", 1, 1, "rfs4_deleg_state_t *", - "nfsv4cbinfo_t *" }, + "nfsv4cbinfo_t *" }, { "nfsv4", "cb-recall-done", 2, 2, "CB_RECALL4res *", NULL }, { "ip", "send", 0, 0, "struct mbuf *", "pktinfo_t *" }, @@ -974,21 +974,26 @@ sdt_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc) for (i = 0; sdt_args[i].sda_provider != NULL; i++) { sdt_argdesc_t *a = &sdt_args[i]; - if (strncmp(sdp->sdp_provider->sdtp_name, a->sda_provider, strlen(a->sda_provider) + 1) != 0) + if (strncmp(sdp->sdp_provider->sdtp_name, a->sda_provider, strlen(a->sda_provider) + 1) != 0) { continue; + } if (a->sda_name != NULL && - strncmp(sdp->sdp_name, a->sda_name, strlen(a->sda_name) + 1) != 0) + strncmp(sdp->sdp_name, a->sda_name, strlen(a->sda_name) + 1) != 0) { continue; + } - if (desc->dtargd_ndx != a->sda_ndx) + if (desc->dtargd_ndx != a->sda_ndx) { continue; + } - if (a->sda_native != NULL) + if (a->sda_native != NULL) { (void) strlcpy(desc->dtargd_native, a->sda_native, DTRACE_ARGTYPELEN); + } - if (a->sda_xlate != NULL) + if (a->sda_xlate != NULL) { (void) strlcpy(desc->dtargd_xlate, a->sda_xlate, DTRACE_ARGTYPELEN); + } desc->dtargd_mapping = a->sda_mapping; return; diff --git a/bsd/dev/dtrace/systrace.c b/bsd/dev/dtrace/systrace.c index 10ba83433..27d199eeb 100644 --- a/bsd/dev/dtrace/systrace.c +++ b/bsd/dev/dtrace/systrace.c @@ -64,7 +64,7 @@ #include #if defined (__x86_64__) -#define SYSTRACE_ARTIFICIAL_FRAMES 2 +#define SYSTRACE_ARTIFICIAL_FRAMES 2 #define MACHTRACE_ARTIFICIAL_FRAMES 3 #elif defined(__arm__) || defined(__arm64__) #define SYSTRACE_ARTIFICIAL_FRAMES 2 @@ -90,7 +90,7 @@ extern const char *syscallnames[]; extern lck_attr_t* dtrace_lck_attr; extern lck_grp_t* dtrace_lck_grp; -static lck_mtx_t dtrace_systrace_lock; /* probe state lock */ +static lck_mtx_t dtrace_systrace_lock; /* probe state lock */ systrace_sysent_t *systrace_sysent = NULL; void (*systrace_probe)(dtrace_id_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); @@ -108,7 +108,7 @@ systrace_stub(dtrace_id_t id, uint64_t arg0, uint64_t arg1, int32_t dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) { - unsigned short code; /* The system call number */ + unsigned short code; /* The system call number */ systrace_sysent_t *sy; dtrace_id_t id; @@ -135,7 +135,7 @@ dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) code = saved_state32(tagged_regs)->eax & I386_SYSCALL_NUMBER_MASK; if (code == 0) { - vm_offset_t params = (vm_offset_t) (saved_state32(tagged_regs)->uesp + sizeof (int)); + vm_offset_t params = (vm_offset_t) (saved_state32(tagged_regs)->uesp + sizeof(int)); code = fuword(params); } } @@ -151,10 +151,11 @@ dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) arm_saved_state_t *arm_regs = (arm_saved_state_t *) find_user_regs(current_thread()); /* Check for indirect system call */ - if (arm_regs->r[12] != 0) + if (arm_regs->r[12] != 0) { code = arm_regs->r[12]; - else + } else { code = arm_regs->r[0]; + } } #elif defined(__arm64__) { @@ -164,23 +165,21 @@ dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) * ... and for u64 can be in either x0 or x16 */ - /* see bsd/dev/arm/systemcalls.c:arm_get_syscall_number */ + /* see bsd/dev/arm/systemcalls.c:arm_get_syscall_number */ arm_saved_state_t *arm_regs = (arm_saved_state_t *) find_user_regs(current_thread()); if (is_saved_state32(arm_regs)) { - /* Check for indirect system call */ + /* Check for indirect system call */ if (saved_state32(arm_regs)->r[12] != 0) { code = saved_state32(arm_regs)->r[12]; - } - else { + } else { code = saved_state32(arm_regs)->r[0]; } } else { /* Check for indirect system call */ - if (saved_state64(arm_regs)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0 ) { + if (saved_state64(arm_regs)->x[ARM64_SYSCALL_CODE_REG_NUM] != 0) { code = saved_state64(arm_regs)->x[ARM64_SYSCALL_CODE_REG_NUM]; - } - else { + } else { code = saved_state64(arm_regs)->x[0]; } } @@ -195,15 +194,17 @@ dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) systrace_args(code, ip, uargs); if ((id = sy->stsy_entry) != DTRACE_IDNONE) { - uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); - if (uthread) + uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); + if (uthread) { uthread->t_dtrace_syscall_args = uargs; - + } + static_assert(SYSTRACE_NARGS >= 5, "not enough system call arguments"); (*systrace_probe)(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]); - - if (uthread) + + if (uthread) { uthread->t_dtrace_syscall_args = NULL; + } } @@ -227,14 +228,14 @@ dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) if ((id = sy->stsy_return) != DTRACE_IDNONE) { uint64_t munged_rv0, munged_rv1; - uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); + uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); - if (uthread) + if (uthread) { uthread->t_dtrace_errno = rval; /* Establish t_dtrace_errno now in case this enabling refers to it. */ - + } /* - * "Decode" rv for use in the call to dtrace_probe() - */ + * "Decode" rv for use in the call to dtrace_probe() + */ if (rval == ERESTART) { munged_rv0 = -1LL; /* System call will be reissued in user mode. Make DTrace report a -1 return. */ munged_rv1 = -1LL; @@ -284,20 +285,20 @@ dtrace_systrace_syscall(struct proc *pp, void *uap, int *rv) * "This is a bit of an historical artifact. At first, the syscall provider just * had its return value in arg0, and the fbt and pid providers had their return * values in arg1 (so that we could use arg0 for the offset of the return site). - * + * * We inevitably started writing scripts where we wanted to see the return * values from probes in all three providers, and we made this script easier * to write by replicating the syscall return values in arg1 to match fbt and * pid. We debated briefly about removing the return value from arg0, but * decided that it would be less confusing to have the same data in two places * than to have some non-helpful, non-intuitive value in arg0. - * + * * This change was made 4/23/2003 according to the DTrace project's putback log." - */ + */ (*systrace_probe)(id, munged_rv0, munged_rv0, munged_rv1, (uint64_t)rval, 0); } - return (rval); + return rval; } void @@ -311,14 +312,14 @@ dtrace_systrace_syscall_return(unsigned short code, int rval, int *rv) if ((id = sy->stsy_return) != DTRACE_IDNONE) { uint64_t munged_rv0, munged_rv1; - uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); + uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); - if (uthread) + if (uthread) { uthread->t_dtrace_errno = rval; /* Establish t_dtrace_errno now in case this enabling refers to it. */ - + } /* - * "Decode" rv for use in the call to dtrace_probe() - */ + * "Decode" rv for use in the call to dtrace_probe() + */ if (rval == ERESTART) { munged_rv0 = -1LL; /* System call will be reissued in user mode. Make DTrace report a -1 return. */ munged_rv1 = -1LL; @@ -366,11 +367,11 @@ dtrace_systrace_syscall_return(unsigned short code, int rval, int *rv) } } -#define SYSTRACE_SHIFT 16 -#define SYSTRACE_ISENTRY(x) ((int)(x) >> SYSTRACE_SHIFT) -#define SYSTRACE_SYSNUM(x) ((int)(x) & ((1 << SYSTRACE_SHIFT) - 1)) -#define SYSTRACE_ENTRY(id) ((1 << SYSTRACE_SHIFT) | (id)) -#define SYSTRACE_RETURN(id) (id) +#define SYSTRACE_SHIFT 16 +#define SYSTRACE_ISENTRY(x) ((int)(x) >> SYSTRACE_SHIFT) +#define SYSTRACE_SYSNUM(x) ((int)(x) & ((1 << SYSTRACE_SHIFT) - 1)) +#define SYSTRACE_ENTRY(id) ((1 << SYSTRACE_SHIFT) | (id)) +#define SYSTRACE_RETURN(id) (id) #if ((1 << SYSTRACE_SHIFT) <= NSYSCALL) #error 1 << SYSTRACE_SHIFT must exceed number of system calls @@ -387,13 +388,12 @@ static dtrace_provider_id_t systrace_id; static void systrace_init(struct sysent *actual, systrace_sysent_t **interposed) { - systrace_sysent_t *ssysent = *interposed; /* Avoid sysent shadow warning - from bsd/sys/sysent.h */ + * from bsd/sys/sysent.h */ unsigned int i; if (ssysent == NULL) { - *interposed = ssysent = kmem_zalloc(sizeof (systrace_sysent_t) * + *interposed = ssysent = kmem_zalloc(sizeof(systrace_sysent_t) * NSYSCALL, KM_SLEEP); } @@ -401,11 +401,13 @@ systrace_init(struct sysent *actual, systrace_sysent_t **interposed) struct sysent *a = &actual[i]; systrace_sysent_t *s = &ssysent[i]; - if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) + if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) { continue; + } - if (a->sy_callc == dtrace_systrace_syscall) + if (a->sy_callc == dtrace_systrace_syscall) { continue; + } s->stsy_underlying = a->sy_callc; s->stsy_return_type = a->sy_return_type; @@ -421,18 +423,21 @@ systrace_provide(void *arg, const dtrace_probedesc_t *desc) #pragma unused(arg) /* __APPLE__ */ unsigned int i; - if (desc != NULL) + if (desc != NULL) { return; + } systrace_init(sysent, &systrace_sysent); for (i = 0; i < NSYSCALL; i++) { - if (systrace_sysent[i].stsy_underlying == NULL) + if (systrace_sysent[i].stsy_underlying == NULL) { continue; + } if (dtrace_probe_lookup(systrace_id, NULL, - syscallnames[i], "entry") != 0) + syscallnames[i], "entry") != 0) { continue; + } (void) dtrace_probe_create(systrace_id, NULL, syscallnames[i], "entry", SYSTRACE_ARTIFICIAL_FRAMES, @@ -472,7 +477,7 @@ static int systrace_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg) /* __APPLE__ */ - + int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg); int enabled = (systrace_sysent[sysnum].stsy_entry != DTRACE_IDNONE || systrace_sysent[sysnum].stsy_return != DTRACE_IDNONE); @@ -485,7 +490,7 @@ systrace_enable(void *arg, dtrace_id_t id, void *parg) if (enabled) { ASSERT(sysent[sysnum].sy_callc == dtrace_systrace_syscall); - return(0); + return 0; } lck_mtx_lock(&dtrace_systrace_lock); @@ -494,7 +499,7 @@ systrace_enable(void *arg, dtrace_id_t id, void *parg) ml_nofault_copy((vm_offset_t)&dss, (vm_offset_t)&sysent[sysnum].sy_callc, sizeof(vm_offset_t)); } lck_mtx_unlock(&dtrace_systrace_lock); - return (0); + return 0; } /*ARGSUSED*/ @@ -502,17 +507,17 @@ static void systrace_disable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) /* __APPLE__ */ - + int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg); int disable = (systrace_sysent[sysnum].stsy_entry == DTRACE_IDNONE || systrace_sysent[sysnum].stsy_return == DTRACE_IDNONE); if (disable) { lck_mtx_lock(&dtrace_systrace_lock); - if (sysent[sysnum].sy_callc == dtrace_systrace_syscall) + if (sysent[sysnum].sy_callc == dtrace_systrace_syscall) { ml_nofault_copy((vm_offset_t)&systrace_sysent[sysnum].stsy_underlying, (vm_offset_t)&sysent[sysnum].sy_callc, sizeof(systrace_sysent[sysnum].stsy_underlying)); + } lck_mtx_unlock(&dtrace_systrace_lock); - } if (SYSTRACE_ISENTRY((uintptr_t)parg)) { @@ -523,24 +528,24 @@ systrace_disable(void *arg, dtrace_id_t id, void *parg) } static dtrace_pattr_t systrace_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; static dtrace_pops_t systrace_pops = { - .dtps_provide = systrace_provide, - .dtps_provide_module = NULL, - .dtps_enable = systrace_enable, - .dtps_disable = systrace_disable, - .dtps_suspend = NULL, - .dtps_resume = NULL, - .dtps_getargdesc = systrace_getargdesc, - .dtps_getargval = systrace_getargval, - .dtps_usermode = NULL, - .dtps_destroy = systrace_destroy + .dtps_provide = systrace_provide, + .dtps_provide_module = NULL, + .dtps_enable = systrace_enable, + .dtps_disable = systrace_disable, + .dtps_suspend = NULL, + .dtps_resume = NULL, + .dtps_getargdesc = systrace_getargdesc, + .dtps_getargval = systrace_getargval, + .dtps_usermode = NULL, + .dtps_destroy = systrace_destroy }; static int @@ -555,10 +560,10 @@ systrace_attach(dev_info_t *devi) &systrace_pops, NULL, &systrace_id) != 0) { systrace_probe = systrace_stub; ddi_remove_minor_node(devi, NULL); - return (DDI_FAILURE); + return DDI_FAILURE; } - return (DDI_SUCCESS); + return DDI_SUCCESS; } @@ -573,17 +578,18 @@ systrace_detach(dev_info_t *devi, ddi_detach_cmd_t cmd) case DDI_DETACH: break; case DDI_SUSPEND: - return (DDI_SUCCESS); + return DDI_SUCCESS; default: - return (DDI_FAILURE); + return DDI_FAILURE; } - if (dtrace_unregister(systrace_id) != 0) - return (DDI_FAILURE); + if (dtrace_unregister(systrace_id) != 0) { + return DDI_FAILURE; + } ddi_remove_minor_node(devi, NULL); systrace_probe = systrace_stub; - return (DDI_SUCCESS); + return DDI_SUCCESS; } #endif /* __APPLE__ */ @@ -594,14 +600,14 @@ typedef kern_return_t (*mach_call_t)(void *); typedef void mach_munge_t(void *); typedef struct { - int mach_trap_arg_count; - kern_return_t (*mach_trap_function)(void *); + int mach_trap_arg_count; + kern_return_t (*mach_trap_function)(void *); #if defined(__arm64__) || defined(__x86_64__) - mach_munge_t *mach_trap_arg_munge32; /* system call arguments for 32-bit */ + mach_munge_t *mach_trap_arg_munge32; /* system call arguments for 32-bit */ #endif - int mach_trap_u32_words; -#if MACH_ASSERT - const char* mach_trap_name; + int mach_trap_u32_words; +#if MACH_ASSERT + const char* mach_trap_name; #endif /* MACH_ASSERT */ } mach_trap_t; @@ -612,15 +618,15 @@ extern const char *mach_syscall_name_table[]; /* XXX From osfmk/i386/bsd_i386.c */ struct mach_call_args { - syscall_arg_t arg1; - syscall_arg_t arg2; - syscall_arg_t arg3; - syscall_arg_t arg4; - syscall_arg_t arg5; - syscall_arg_t arg6; - syscall_arg_t arg7; - syscall_arg_t arg8; - syscall_arg_t arg9; + syscall_arg_t arg1; + syscall_arg_t arg2; + syscall_arg_t arg3; + syscall_arg_t arg4; + syscall_arg_t arg5; + syscall_arg_t arg6; + syscall_arg_t arg7; + syscall_arg_t arg8; + syscall_arg_t arg9; }; #undef NSYSCALL @@ -631,10 +637,10 @@ struct mach_call_args { #endif typedef struct machtrace_sysent { - dtrace_id_t stsy_entry; - dtrace_id_t stsy_return; - kern_return_t (*stsy_underlying)(void *); - int32_t stsy_return_type; + dtrace_id_t stsy_entry; + dtrace_id_t stsy_return; + kern_return_t (*stsy_underlying)(void *); + int32_t stsy_return_type; } machtrace_sysent_t; static machtrace_sysent_t *machtrace_sysent = NULL; @@ -642,14 +648,14 @@ static machtrace_sysent_t *machtrace_sysent = NULL; void (*machtrace_probe)(dtrace_id_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t); -static uint64_t machtrace_getarg(void *, dtrace_id_t, void *, int, int); +static uint64_t machtrace_getarg(void *, dtrace_id_t, void *, int, int); static dtrace_provider_id_t machtrace_id; static kern_return_t dtrace_machtrace_syscall(struct mach_call_args *args) { - int code; /* The mach call number */ + int code; /* The mach call number */ machtrace_sysent_t *sy; dtrace_id_t id; @@ -689,9 +695,9 @@ dtrace_machtrace_syscall(struct mach_call_args *args) code = (int)saved_state64(arm_regs)->x[ARM64_SYSCALL_CODE_REG_NUM]; } - /* From bsd/arm64.c:mach_syscall */ + /* From bsd/arm64.c:mach_syscall */ ASSERT(code < 0); /* Otherwise it would be a Unix syscall */ - code = -code; + code = -code; } #else #error Unknown Architecture @@ -700,15 +706,17 @@ dtrace_machtrace_syscall(struct mach_call_args *args) sy = &machtrace_sysent[code]; if ((id = sy->stsy_entry) != DTRACE_IDNONE) { - uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); + uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); - if (uthread) + if (uthread) { uthread->t_dtrace_syscall_args = (void *)ip; - - (*machtrace_probe)(id, *ip, *(ip+1), *(ip+2), *(ip+3), *(ip+4)); - - if (uthread) - uthread->t_dtrace_syscall_args = (void *)0; + } + + (*machtrace_probe)(id, *ip, *(ip + 1), *(ip + 2), *(ip + 3), *(ip + 4)); + + if (uthread) { + uthread->t_dtrace_syscall_args = (void *)0; + } } #if 0 /* XXX */ @@ -729,10 +737,11 @@ dtrace_machtrace_syscall(struct mach_call_args *args) mach_call = (mach_call_t)(*sy->stsy_underlying); rval = mach_call(args); - if ((id = sy->stsy_return) != DTRACE_IDNONE) + if ((id = sy->stsy_return) != DTRACE_IDNONE) { (*machtrace_probe)(id, (uint64_t)rval, 0, 0, 0, 0); + } - return (rval); + return rval; } static void @@ -742,19 +751,21 @@ machtrace_init(const mach_trap_t *actual, machtrace_sysent_t **interposed) int i; if (msysent == NULL) { - *interposed = msysent = kmem_zalloc(sizeof (machtrace_sysent_t) * - NSYSCALL, KM_SLEEP); + *interposed = msysent = kmem_zalloc(sizeof(machtrace_sysent_t) * + NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { const mach_trap_t *a = &actual[i]; machtrace_sysent_t *s = &msysent[i]; - if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) + if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) { continue; + } - if (a->mach_trap_function == (mach_call_t)(dtrace_machtrace_syscall)) + if (a->mach_trap_function == (mach_call_t)(dtrace_machtrace_syscall)) { continue; + } s->stsy_underlying = a->mach_trap_function; } @@ -765,29 +776,31 @@ static void machtrace_provide(void *arg, const dtrace_probedesc_t *desc) { #pragma unused(arg) /* __APPLE__ */ - + int i; - if (desc != NULL) + if (desc != NULL) { return; + } machtrace_init(mach_trap_table, &machtrace_sysent); for (i = 0; i < NSYSCALL; i++) { - - if (machtrace_sysent[i].stsy_underlying == NULL) + if (machtrace_sysent[i].stsy_underlying == NULL) { continue; + } if (dtrace_probe_lookup(machtrace_id, NULL, - mach_syscall_name_table[i], "entry") != 0) + mach_syscall_name_table[i], "entry") != 0) { continue; + } (void) dtrace_probe_create(machtrace_id, NULL, mach_syscall_name_table[i], - "entry", MACHTRACE_ARTIFICIAL_FRAMES, - (void *)((uintptr_t)SYSTRACE_ENTRY(i))); + "entry", MACHTRACE_ARTIFICIAL_FRAMES, + (void *)((uintptr_t)SYSTRACE_ENTRY(i))); (void) dtrace_probe_create(machtrace_id, NULL, mach_syscall_name_table[i], - "return", MACHTRACE_ARTIFICIAL_FRAMES, - (void *)((uintptr_t)SYSTRACE_RETURN(i))); + "return", MACHTRACE_ARTIFICIAL_FRAMES, + (void *)((uintptr_t)SYSTRACE_RETURN(i))); machtrace_sysent[i].stsy_entry = DTRACE_IDNONE; machtrace_sysent[i].stsy_return = DTRACE_IDNONE; @@ -800,7 +813,7 @@ machtrace_destroy(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) /* __APPLE__ */ int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg); - + #pragma unused(sysnum) /* __APPLE__ */ /* @@ -819,10 +832,10 @@ static int machtrace_enable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg) /* __APPLE__ */ - + int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg); int enabled = (machtrace_sysent[sysnum].stsy_entry != DTRACE_IDNONE || - machtrace_sysent[sysnum].stsy_return != DTRACE_IDNONE); + machtrace_sysent[sysnum].stsy_return != DTRACE_IDNONE); if (SYSTRACE_ISENTRY((uintptr_t)parg)) { machtrace_sysent[sysnum].stsy_entry = id; @@ -831,8 +844,8 @@ machtrace_enable(void *arg, dtrace_id_t id, void *parg) } if (enabled) { - ASSERT(mach_trap_table[sysnum].mach_trap_function == (void *)dtrace_machtrace_syscall); - return(0); + ASSERT(mach_trap_table[sysnum].mach_trap_function == (void *)dtrace_machtrace_syscall); + return 0; } lck_mtx_lock(&dtrace_systrace_lock); @@ -844,7 +857,7 @@ machtrace_enable(void *arg, dtrace_id_t id, void *parg) lck_mtx_unlock(&dtrace_systrace_lock); - return(0); + return 0; } /*ARGSUSED*/ @@ -852,13 +865,12 @@ static void machtrace_disable(void *arg, dtrace_id_t id, void *parg) { #pragma unused(arg,id) /* __APPLE__ */ - + int sysnum = SYSTRACE_SYSNUM((uintptr_t)parg); int disable = (machtrace_sysent[sysnum].stsy_entry == DTRACE_IDNONE || - machtrace_sysent[sysnum].stsy_return == DTRACE_IDNONE); + machtrace_sysent[sysnum].stsy_return == DTRACE_IDNONE); if (disable) { - lck_mtx_lock(&dtrace_systrace_lock); if (mach_trap_table[sysnum].mach_trap_function == (mach_call_t)dtrace_machtrace_syscall) { @@ -875,24 +887,24 @@ machtrace_disable(void *arg, dtrace_id_t id, void *parg) } static dtrace_pattr_t machtrace_attr = { -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, -{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, -{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, + { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON }, + { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_ISA }, }; static dtrace_pops_t machtrace_pops = { - .dtps_provide = machtrace_provide, - .dtps_provide_module = NULL, - .dtps_enable = machtrace_enable, - .dtps_disable = machtrace_disable, - .dtps_suspend = NULL, - .dtps_resume = NULL, - .dtps_getargdesc = NULL, - .dtps_getargval = machtrace_getarg, - .dtps_usermode = NULL, - .dtps_destroy = machtrace_destroy + .dtps_provide = machtrace_provide, + .dtps_provide_module = NULL, + .dtps_enable = machtrace_enable, + .dtps_disable = machtrace_disable, + .dtps_suspend = NULL, + .dtps_resume = NULL, + .dtps_getargdesc = NULL, + .dtps_getargval = machtrace_getarg, + .dtps_usermode = NULL, + .dtps_destroy = machtrace_destroy }; static int @@ -900,22 +912,23 @@ machtrace_attach(dev_info_t *devi) { machtrace_probe = dtrace_probe; membar_enter(); - + if (ddi_create_minor_node(devi, "machtrace", S_IFCHR, 0, - DDI_PSEUDO, 0) == DDI_FAILURE || - dtrace_register("mach_trap", &machtrace_attr, DTRACE_PRIV_USER, NULL, - &machtrace_pops, NULL, &machtrace_id) != 0) { - machtrace_probe = (void*)&systrace_stub; + DDI_PSEUDO, 0) == DDI_FAILURE || + dtrace_register("mach_trap", &machtrace_attr, DTRACE_PRIV_USER, NULL, + &machtrace_pops, NULL, &machtrace_id) != 0) { + machtrace_probe = (void*)&systrace_stub; ddi_remove_minor_node(devi, NULL); - return (DDI_FAILURE); + return DDI_FAILURE; } - return (DDI_SUCCESS); + return DDI_SUCCESS; } d_open_t _systrace_open; -int _systrace_open(dev_t dev, int flags, int devtype, struct proc *p) +int +_systrace_open(dev_t dev, int flags, int devtype, struct proc *p) { #pragma unused(dev,flags,devtype,p) return 0; @@ -929,25 +942,26 @@ int _systrace_open(dev_t dev, int flags, int devtype, struct proc *p) */ static struct cdevsw systrace_cdevsw = { - _systrace_open, /* open */ - eno_opcl, /* close */ - eno_rdwrt, /* read */ - eno_rdwrt, /* write */ - eno_ioctl, /* ioctl */ + _systrace_open, /* open */ + eno_opcl, /* close */ + eno_rdwrt, /* read */ + eno_rdwrt, /* write */ + eno_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; void systrace_init( void ); -void systrace_init( void ) +void +systrace_init( void ) { if (dtrace_sdt_probes_restricted()) { return; @@ -972,24 +986,27 @@ systrace_getargval(void *arg, dtrace_id_t id, void *parg, int argno, int aframes uint64_t val = 0; uint64_t *uargs = NULL; - uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); + uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); - if (uthread) + if (uthread) { uargs = uthread->t_dtrace_syscall_args; - if (!uargs) - return(0); - if (argno < 0 || argno >= SYSTRACE_NARGS) - return(0); + } + if (!uargs) { + return 0; + } + if (argno < 0 || argno >= SYSTRACE_NARGS) { + return 0; + } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); val = uargs[argno]; DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return (val); + return val; } static void systrace_getargdesc(void *arg, dtrace_id_t id, void *parg, - dtrace_argdesc_t *desc) + dtrace_argdesc_t *desc) { #pragma unused(arg, id) int sysnum = SYSTRACE_SYSNUM(parg); @@ -1005,15 +1022,15 @@ systrace_getargdesc(void *arg, dtrace_id_t id, void *parg, if (SYSTRACE_ISENTRY((uintptr_t)parg)) { systrace_entry_setargdesc(sysnum, desc->dtargd_ndx, - desc->dtargd_native, sizeof(desc->dtargd_native)); - } - else { + desc->dtargd_native, sizeof(desc->dtargd_native)); + } else { systrace_return_setargdesc(sysnum, desc->dtargd_ndx, - desc->dtargd_native, sizeof(desc->dtargd_native)); + desc->dtargd_native, sizeof(desc->dtargd_native)); } - if (desc->dtargd_native[0] == '\0') + if (desc->dtargd_native[0] == '\0') { desc->dtargd_ndx = DTRACE_ARGNONE; + } } static uint64_t @@ -1024,17 +1041,18 @@ machtrace_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) syscall_arg_t *stack = (syscall_arg_t *)NULL; uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread()); - - if (uthread) + + if (uthread) { stack = (syscall_arg_t *)uthread->t_dtrace_syscall_args; - - if (!stack) - return(0); + } + + if (!stack) { + return 0; + } DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */ - val = (uint64_t)*(stack+argno); + val = (uint64_t)*(stack + argno); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return (val); + return val; } - diff --git a/bsd/dev/dtrace/systrace.h b/bsd/dev/dtrace/systrace.h index c86f324a9..f7b92bc9f 100644 --- a/bsd/dev/dtrace/systrace.h +++ b/bsd/dev/dtrace/systrace.h @@ -25,7 +25,7 @@ */ #ifndef _SYS_SYSTRACE_H -#define _SYS_SYSTRACE_H +#define _SYS_SYSTRACE_H /* #pragma ident "@(#)systrace.h 1.3 06/09/19 SMI" */ @@ -37,17 +37,17 @@ #include -#ifdef __cplusplus +#ifdef __cplusplus extern "C" { #endif #ifdef _KERNEL typedef struct systrace_sysent { - dtrace_id_t stsy_entry; - dtrace_id_t stsy_return; - int32_t (*stsy_underlying)(struct proc *, void *, int *); - int32_t stsy_return_type; + dtrace_id_t stsy_entry; + dtrace_id_t stsy_return; + int32_t (*stsy_underlying)(struct proc *, void *, int *); + int32_t stsy_return_type; } systrace_sysent_t; extern systrace_sysent_t *systrace_sysent; @@ -64,8 +64,8 @@ extern void dtrace_systrace_syscall_return(unsigned short, int, int *); #endif /* _KERNEL */ -#ifdef __cplusplus +#ifdef __cplusplus } #endif -#endif /* _SYS_SYSTRACE_H */ +#endif /* _SYS_SYSTRACE_H */ diff --git a/bsd/dev/i386/conf.c b/bsd/dev/i386/conf.c index 36de4f945..094506014 100644 --- a/bsd/dev/i386/conf.c +++ b/bsd/dev/i386/conf.c @@ -2,7 +2,7 @@ * Copyright (c) 1997-2017 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,7 +33,7 @@ * HISTORY * * 30 July 1997 Umesh Vaishampayan (umeshv@apple.com) - * enabled file descriptor pseudo-device. + * enabled file descriptor pseudo-device. * 18 June 1993 ? at NeXT * Cleaned up a lot of stuff in this file. */ @@ -45,13 +45,13 @@ #include /* Prototypes that should be elsewhere: */ -extern dev_t chrtoblk(dev_t dev); -extern int chrtoblk_set(int cdev, int bdev); +extern dev_t chrtoblk(dev_t dev); +extern int chrtoblk_set(int cdev, int bdev); -struct bdevsw bdevsw[] = +struct bdevsw bdevsw[] = { /* - * For block devices, every other block of 8 slots is + * For block devices, every other block of 8 slots is * reserved for Apple. The other slots are available for * the user. This way we can both add new entries without * running into each other. Be sure to fill in Apple's @@ -61,130 +61,130 @@ struct bdevsw bdevsw[] = /* 0 - 7 are reserved for Apple */ - NO_BDEVICE, /* 0*/ - NO_BDEVICE, /* 1*/ - NO_BDEVICE, /* 2*/ - NO_BDEVICE, /* 3*/ - NO_BDEVICE, /* 4*/ - NO_BDEVICE, /* 5*/ - NO_BDEVICE, /* 6*/ - NO_BDEVICE, /* 7*/ + NO_BDEVICE, /* 0*/ + NO_BDEVICE, /* 1*/ + NO_BDEVICE, /* 2*/ + NO_BDEVICE, /* 3*/ + NO_BDEVICE, /* 4*/ + NO_BDEVICE, /* 5*/ + NO_BDEVICE, /* 6*/ + NO_BDEVICE, /* 7*/ /* 8 - 15 are reserved to the user */ - NO_BDEVICE, /* 8*/ - NO_BDEVICE, /* 9*/ - NO_BDEVICE, /*10*/ - NO_BDEVICE, /*11*/ - NO_BDEVICE, /*12*/ - NO_BDEVICE, /*13*/ - NO_BDEVICE, /*14*/ - NO_BDEVICE, /*15*/ + NO_BDEVICE, /* 8*/ + NO_BDEVICE, /* 9*/ + NO_BDEVICE, /*10*/ + NO_BDEVICE, /*11*/ + NO_BDEVICE, /*12*/ + NO_BDEVICE, /*13*/ + NO_BDEVICE, /*14*/ + NO_BDEVICE, /*15*/ /* 16 - 23 are reserved for Apple */ - NO_BDEVICE, /*16*/ - NO_BDEVICE, /*17*/ - NO_BDEVICE, /*18*/ - NO_BDEVICE, /*18*/ - NO_BDEVICE, /*20*/ - NO_BDEVICE, /*21*/ - NO_BDEVICE, /*22*/ - NO_BDEVICE, /*23*/ + NO_BDEVICE, /*16*/ + NO_BDEVICE, /*17*/ + NO_BDEVICE, /*18*/ + NO_BDEVICE, /*18*/ + NO_BDEVICE, /*20*/ + NO_BDEVICE, /*21*/ + NO_BDEVICE, /*22*/ + NO_BDEVICE, /*23*/ }; const int nblkdev = sizeof(bdevsw) / sizeof(bdevsw[0]); extern struct tty *km_tty[]; -extern d_open_t cnopen; -extern d_close_t cnclose; -extern d_read_t cnread; -extern d_write_t cnwrite; -extern d_ioctl_t cnioctl; -extern d_select_t cnselect; -extern d_open_t kmopen; -extern d_close_t kmclose; -extern d_read_t kmread; -extern d_write_t kmwrite; -extern d_ioctl_t kmioctl; -extern d_open_t sgopen; -extern d_close_t sgclose; -extern d_ioctl_t sgioctl; +extern d_open_t cnopen; +extern d_close_t cnclose; +extern d_read_t cnread; +extern d_write_t cnwrite; +extern d_ioctl_t cnioctl; +extern d_select_t cnselect; +extern d_open_t kmopen; +extern d_close_t kmclose; +extern d_read_t kmread; +extern d_write_t kmwrite; +extern d_ioctl_t kmioctl; +extern d_open_t sgopen; +extern d_close_t sgclose; +extern d_ioctl_t sgioctl; #if NVOL > 0 -extern d_open_t volopen; -extern d_close_t volclose; -extern d_ioctl_t volioctl; +extern d_open_t volopen; +extern d_close_t volclose; +extern d_ioctl_t volioctl; #else -#define volopen eno_opcl -#define volclose eno_opcl -#define volioctl eno_ioctl +#define volopen eno_opcl +#define volclose eno_opcl +#define volioctl eno_ioctl #endif -extern d_open_t cttyopen; -extern d_read_t cttyread; -extern d_write_t cttywrite; -extern d_ioctl_t cttyioctl; -extern d_select_t cttyselect; +extern d_open_t cttyopen; +extern d_read_t cttyread; +extern d_write_t cttywrite; +extern d_ioctl_t cttyioctl; +extern d_select_t cttyselect; -extern d_read_t mmread; -extern d_write_t mmwrite; -extern d_ioctl_t mmioctl; -#define mmselect (select_fcn_t *)seltrue -#define mmmmap eno_mmap +extern d_read_t mmread; +extern d_write_t mmwrite; +extern d_ioctl_t mmioctl; +#define mmselect (select_fcn_t *)seltrue +#define mmmmap eno_mmap #include #if NPTY > 0 -extern d_open_t ptsopen; -extern d_close_t ptsclose; -extern d_read_t ptsread; -extern d_write_t ptswrite; -extern d_stop_t ptsstop; -extern d_select_t ptsselect; -extern d_open_t ptcopen; -extern d_close_t ptcclose; -extern d_read_t ptcread; -extern d_write_t ptcwrite; -extern d_select_t ptcselect; -extern d_ioctl_t ptyioctl; +extern d_open_t ptsopen; +extern d_close_t ptsclose; +extern d_read_t ptsread; +extern d_write_t ptswrite; +extern d_stop_t ptsstop; +extern d_select_t ptsselect; +extern d_open_t ptcopen; +extern d_close_t ptcclose; +extern d_read_t ptcread; +extern d_write_t ptcwrite; +extern d_select_t ptcselect; +extern d_ioctl_t ptyioctl; #else -#define ptsopen eno_opcl -#define ptsclose eno_opcl -#define ptsread eno_rdwrt -#define ptswrite eno_rdwrt -#define ptsstop nulldev +#define ptsopen eno_opcl +#define ptsclose eno_opcl +#define ptsread eno_rdwrt +#define ptswrite eno_rdwrt +#define ptsstop nulldev -#define ptcopen eno_opcl -#define ptcclose eno_opcl -#define ptcread eno_rdwrt -#define ptcwrite eno_rdwrt -#define ptcselect eno_select -#define ptyioctl eno_ioctl +#define ptcopen eno_opcl +#define ptcclose eno_opcl +#define ptcread eno_rdwrt +#define ptcwrite eno_rdwrt +#define ptcselect eno_select +#define ptyioctl eno_ioctl #endif -extern d_open_t logopen; -extern d_close_t logclose; -extern d_read_t logread; -extern d_ioctl_t logioctl; -extern d_select_t logselect; +extern d_open_t logopen; +extern d_close_t logclose; +extern d_read_t logread; +extern d_ioctl_t logioctl; +extern d_select_t logselect; -extern d_open_t oslog_streamopen; -extern d_close_t oslog_streamclose; -extern d_read_t oslog_streamread; -extern d_ioctl_t oslog_streamioctl; -extern d_select_t oslog_streamselect; +extern d_open_t oslog_streamopen; +extern d_close_t oslog_streamclose; +extern d_read_t oslog_streamread; +extern d_ioctl_t oslog_streamioctl; +extern d_select_t oslog_streamselect; -extern d_open_t oslogopen; -extern d_close_t oslogclose; -extern d_select_t oslogselect; -extern d_ioctl_t oslogioctl; +extern d_open_t oslogopen; +extern d_close_t oslogclose; +extern d_select_t oslogselect; +extern d_ioctl_t oslogioctl; -#define nullopen (d_open_t *)&nulldev -#define nullclose (d_close_t *)&nulldev -#define nullread (d_read_t *)&nulldev -#define nullwrite (d_write_t *)&nulldev -#define nullioctl (d_ioctl_t *)&nulldev -#define nullselect (d_select_t *)&nulldev -#define nullstop (d_stop_t *)&nulldev -#define nullreset (d_reset_t *)&nulldev +#define nullopen (d_open_t *)&nulldev +#define nullclose (d_close_t *)&nulldev +#define nullread (d_read_t *)&nulldev +#define nullwrite (d_write_t *)&nulldev +#define nullioctl (d_ioctl_t *)&nulldev +#define nullselect (d_select_t *)&nulldev +#define nullstop (d_stop_t *)&nulldev +#define nullreset (d_reset_t *)&nulldev struct cdevsw cdevsw[] = { /* @@ -249,14 +249,14 @@ const int nchrdev = sizeof(cdevsw) / sizeof(cdevsw[0]); uint64_t cdevsw_flags[sizeof(cdevsw) / sizeof(cdevsw[0])]; -#include /* for VCHR and VBLK */ +#include /* for VCHR and VBLK */ /* * return true if a disk */ int isdisk(dev_t dev, int type) { - dev_t maj = major(dev); + dev_t maj = major(dev); switch (type) { case VCHR: @@ -264,41 +264,41 @@ isdisk(dev_t dev, int type) if (maj == NODEV) { break; } - /* FALL THROUGH */ + /* FALL THROUGH */ case VBLK: if (bdevsw[maj].d_type == D_DISK) { - return (1); + return 1; } break; } - return(0); + return 0; } static int chrtoblktab[] = { /* CHR*/ /* BLK*/ /* CHR*/ /* BLK*/ - /* 0 */ NODEV, /* 1 */ NODEV, - /* 2 */ NODEV, /* 3 */ NODEV, - /* 4 */ NODEV, /* 5 */ NODEV, - /* 6 */ NODEV, /* 7 */ NODEV, - /* 8 */ NODEV, /* 9 */ NODEV, - /* 10 */ NODEV, /* 11 */ NODEV, - /* 12 */ NODEV, /* 13 */ NODEV, - /* 14 */ NODEV, /* 15 */ NODEV, - /* 16 */ NODEV, /* 17 */ NODEV, - /* 18 */ NODEV, /* 19 */ NODEV, - /* 20 */ NODEV, /* 21 */ NODEV, - /* 22 */ NODEV, /* 23 */ NODEV, - /* 24 */ NODEV, /* 25 */ NODEV, - /* 26 */ NODEV, /* 27 */ NODEV, - /* 28 */ NODEV, /* 29 */ NODEV, - /* 30 */ NODEV, /* 31 */ NODEV, - /* 32 */ NODEV, /* 33 */ NODEV, - /* 34 */ NODEV, /* 35 */ NODEV, - /* 36 */ NODEV, /* 37 */ NODEV, - /* 38 */ NODEV, /* 39 */ NODEV, - /* 40 */ NODEV, /* 41 */ NODEV, - /* 42 */ NODEV, /* 43 */ NODEV, - /* 44 */ NODEV, + /* 0 */ NODEV, /* 1 */ NODEV, + /* 2 */ NODEV, /* 3 */ NODEV, + /* 4 */ NODEV, /* 5 */ NODEV, + /* 6 */ NODEV, /* 7 */ NODEV, + /* 8 */ NODEV, /* 9 */ NODEV, + /* 10 */ NODEV, /* 11 */ NODEV, + /* 12 */ NODEV, /* 13 */ NODEV, + /* 14 */ NODEV, /* 15 */ NODEV, + /* 16 */ NODEV, /* 17 */ NODEV, + /* 18 */ NODEV, /* 19 */ NODEV, + /* 20 */ NODEV, /* 21 */ NODEV, + /* 22 */ NODEV, /* 23 */ NODEV, + /* 24 */ NODEV, /* 25 */ NODEV, + /* 26 */ NODEV, /* 27 */ NODEV, + /* 28 */ NODEV, /* 29 */ NODEV, + /* 30 */ NODEV, /* 31 */ NODEV, + /* 32 */ NODEV, /* 33 */ NODEV, + /* 34 */ NODEV, /* 35 */ NODEV, + /* 36 */ NODEV, /* 37 */ NODEV, + /* 38 */ NODEV, /* 39 */ NODEV, + /* 40 */ NODEV, /* 41 */ NODEV, + /* 42 */ NODEV, /* 43 */ NODEV, + /* 44 */ NODEV, }; /* @@ -309,22 +309,25 @@ chrtoblk(dev_t dev) { int blkmaj; - if (major(dev) >= nchrdev) - return(NODEV); + if (major(dev) >= nchrdev) { + return NODEV; + } blkmaj = chrtoblktab[major(dev)]; - if (blkmaj == NODEV) - return(NODEV); - return(makedev(blkmaj, minor(dev))); + if (blkmaj == NODEV) { + return NODEV; + } + return makedev(blkmaj, minor(dev)); } int chrtoblk_set(int cdev, int bdev) { - if (cdev >= nchrdev) - return (-1); - if (bdev != NODEV && bdev >= nblkdev) - return (-1); + if (cdev >= nchrdev) { + return -1; + } + if (bdev != NODEV && bdev >= nblkdev) { + return -1; + } chrtoblktab[cdev] = bdev; return 0; } - diff --git a/bsd/dev/i386/cons.c b/bsd/dev/i386/cons.c index dbd46a1cc..8a9ee5255 100644 --- a/bsd/dev/i386/cons.c +++ b/bsd/dev/i386/cons.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -55,7 +55,7 @@ #include #include -struct tty *constty; /* current console device */ +struct tty *constty; /* current console device */ /* * The km driver supplied the default console device for the systems @@ -76,17 +76,18 @@ int cnselect(__unused dev_t dev, int flag, void * wql, proc_t p); static dev_t cndev(void) { - if (constty) + if (constty) { return constty->t_dev; - else + } else { return km_tty[0]->t_dev; + } } int cnopen(__unused dev_t dev, int flag, int devtype, struct proc *pp) { dev = cndev(); - return ((*cdevsw[major(dev)].d_open)(dev, flag, devtype, pp)); + return (*cdevsw[major(dev)].d_open)(dev, flag, devtype, pp); } @@ -94,7 +95,7 @@ int cnclose(__unused dev_t dev, int flag, int mode, struct proc *pp) { dev = cndev(); - return ((*cdevsw[major(dev)].d_close)(dev, flag, mode, pp)); + return (*cdevsw[major(dev)].d_close)(dev, flag, mode, pp); } @@ -102,7 +103,7 @@ int cnread(__unused dev_t dev, struct uio *uio, int ioflag) { dev = cndev(); - return ((*cdevsw[major(dev)].d_read)(dev, uio, ioflag)); + return (*cdevsw[major(dev)].d_read)(dev, uio, ioflag); } @@ -110,7 +111,7 @@ int cnwrite(__unused dev_t dev, struct uio *uio, int ioflag) { dev = cndev(); - return ((*cdevsw[major(dev)].d_write)(dev, uio, ioflag)); + return (*cdevsw[major(dev)].d_write)(dev, uio, ioflag); } @@ -133,11 +134,11 @@ cnioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) if (!error) { constty = NULL; } - return(error); + return error; } -#endif /* 0 */ +#endif /* 0 */ - return ((*cdevsw[major(dev)].d_ioctl)(dev, cmd, addr, flag, p)); + return (*cdevsw[major(dev)].d_ioctl)(dev, cmd, addr, flag, p); } @@ -145,5 +146,5 @@ int cnselect(__unused dev_t dev, int flag, void *wql, struct proc *p) { dev = cndev(); - return ((*cdevsw[major(dev)].d_select)(dev, flag, wql, p)); + return (*cdevsw[major(dev)].d_select)(dev, flag, wql, p); } diff --git a/bsd/dev/i386/instr_size.c b/bsd/dev/i386/instr_size.c index 4d8c11dd2..14f7ea974 100644 --- a/bsd/dev/i386/instr_size.c +++ b/bsd/dev/i386/instr_size.c @@ -82,7 +82,7 @@ dtrace_dis_get_byte(void *p) * reported as having no memory impact. */ /* ARGSUSED2 */ -static int +static __attribute__((noinline)) int dtrace_dis_isize(uchar_t *instr, dis_isize_t which, model_t model, int *rmindex) { int sz; diff --git a/bsd/dev/i386/kern_machdep.c b/bsd/dev/i386/kern_machdep.c index 4d7891ba4..1b3d774f3 100644 --- a/bsd/dev/i386/kern_machdep.c +++ b/bsd/dev/i386/kern_machdep.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,28 +34,28 @@ * Machine-specific kernel routines. */ -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include /********************************************************************** - * Routine: grade_binary() - * - * Function: Say OK to CPU types that we can actually execute on the given - * system. 64-bit binaries have the highest preference, followed - * by 32-bit binaries. 0 means unsupported. - **********************************************************************/ +* Routine: grade_binary() +* +* Function: Say OK to CPU types that we can actually execute on the given +* system. 64-bit binaries have the highest preference, followed +* by 32-bit binaries. 0 means unsupported. +**********************************************************************/ int grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype) { cpu_subtype_t hostsubtype = cpu_subtype(); - switch(exectype) { - case CPU_TYPE_X86_64: /* native 64-bit */ - switch(hostsubtype) { - case CPU_SUBTYPE_X86_64_H: /* x86_64h can execute anything */ + switch (exectype) { + case CPU_TYPE_X86_64: /* native 64-bit */ + switch (hostsubtype) { + case CPU_SUBTYPE_X86_64_H: /* x86_64h can execute anything */ switch (execsubtype) { case CPU_SUBTYPE_X86_64_H: return 3; @@ -63,7 +63,7 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype) return 2; } break; - case CPU_SUBTYPE_X86_ARCH1: /* generic systems can only execute ALL subtype */ + case CPU_SUBTYPE_X86_ARCH1: /* generic systems can only execute ALL subtype */ switch (execsubtype) { case CPU_SUBTYPE_X86_64_ALL: return 2; @@ -71,7 +71,7 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype) break; } break; - case CPU_TYPE_X86: /* native */ + case CPU_TYPE_X86: /* native */ return 1; } diff --git a/bsd/dev/i386/km.c b/bsd/dev/i386/km.c index 19923cdc2..aee4edee8 100644 --- a/bsd/dev/i386/km.c +++ b/bsd/dev/i386/km.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. * * km.m - kernel keyboard/monitor module, procedural interface. * @@ -39,9 +39,9 @@ #include #include #include -#include /* for kmopen */ -#include -#include /* for kmopen */ +#include /* for kmopen */ +#include +#include /* for kmopen */ #include #include #include @@ -54,8 +54,8 @@ extern void cnputcusr(char); extern void cnputsusr(char *, int); extern int cngetc(void); -void kminit(void); -void cons_cinput(char ch); +void kminit(void); +void cons_cinput(char ch); /* * 'Global' variables, shared only by this file and conf.c. @@ -82,24 +82,25 @@ void kminit(void) { km_tty[0] = ttymalloc(); - km_tty[0]->t_dev = makedev(12, 0); + km_tty[0]->t_dev = makedev(12, 0); initialized = 1; } /* * cdevsw interface to km driver. */ -int +int kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) { int unit; struct tty *tp; struct winsize *wp; int ret; - + unit = minor(dev); - if(unit >= 1) - return (ENXIO); + if (unit >= 1) { + return ENXIO; + } tp = km_tty[unit]; @@ -108,8 +109,8 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) tp->t_oproc = kmstart; tp->t_param = NULL; tp->t_dev = dev; - - if ( !(tp->t_state & TS_ISOPEN) ) { + + if (!(tp->t_state & TS_ISOPEN)) { tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_cflag = (CREAD | CS8 | CLOCAL); @@ -129,23 +130,24 @@ kmopen(dev_t dev, int flag, __unused int devtype, proc_t pp) PE_Video video; wp = &tp->t_winsize; /* - * Magic numbers. These are CHARWIDTH and CHARHEIGHT + * Magic numbers. These are CHARWIDTH and CHARHEIGHT * from pexpert/i386/video_console.c */ wp->ws_xpixel = 8; wp->ws_ypixel = 16; - tty_unlock(tp); /* XXX race window */ + tty_unlock(tp); /* XXX race window */ - if (flag & O_POPUP) + if (flag & O_POPUP) { PE_initialize_console(0, kPETextScreen); + } bzero(&video, sizeof(video)); PE_current_console(&video); tty_lock(tp); - if( video.v_display == FB_TEXT_MODE && video.v_width != 0 && video.v_height != 0 ) { + if (video.v_display == FB_TEXT_MODE && video.v_width != 0 && video.v_height != 0) { wp->ws_col = video.v_width / wp->ws_xpixel; wp->ws_row = video.v_height / wp->ws_ypixel; } else { @@ -160,21 +162,21 @@ out: return ret; } -int +int kmclose(dev_t dev, int flag, __unused int mode, __unused proc_t p) { int ret; struct tty *tp = km_tty[minor(dev)]; tty_lock(tp); - ret = (*linesw[tp->t_line].l_close)(tp,flag); + ret = (*linesw[tp->t_line].l_close)(tp, flag); ttyclose(tp); tty_unlock(tp); - return (ret); + return ret; } -int +int kmread(dev_t dev, struct uio *uio, int ioflag) { int ret; @@ -184,10 +186,10 @@ kmread(dev_t dev, struct uio *uio, int ioflag) ret = (*linesw[tp->t_line].l_read)(tp, uio, ioflag); tty_unlock(tp); - return (ret); + return ret; } -int +int kmwrite(dev_t dev, struct uio *uio, int ioflag) { int ret; @@ -197,10 +199,10 @@ kmwrite(dev_t dev, struct uio *uio, int ioflag) ret = (*linesw[tp->t_line].l_write)(tp, uio, ioflag); tty_unlock(tp); - return (ret); + return ret; } -int +int kmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) { int error = 0; @@ -208,14 +210,14 @@ kmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) struct winsize *wp; tty_lock(tp); - + switch (cmd) { - case KMIOCSIZE: + case KMIOCSIZE: wp = (struct winsize *)data; *wp = tp->t_winsize; break; - - case TIOCSWINSZ: + + case TIOCSWINSZ: /* Prevent changing of console size -- * this ensures that login doesn't revert to the * termcap-defined size @@ -223,36 +225,37 @@ kmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) error = EINVAL; break; - /* Bodge in the CLOCAL flag as the km device is always local */ - case TIOCSETA_32: - case TIOCSETAW_32: - case TIOCSETAF_32: - { - struct termios32 *t = (struct termios32 *)data; - t->c_cflag |= CLOCAL; - /* No Break */ - } + /* Bodge in the CLOCAL flag as the km device is always local */ + case TIOCSETA_32: + case TIOCSETAW_32: + case TIOCSETAF_32: + { + struct termios32 *t = (struct termios32 *)data; + t->c_cflag |= CLOCAL; + /* No Break */ + } goto fallthrough; - case TIOCSETA_64: - case TIOCSETAW_64: - case TIOCSETAF_64: - { - struct user_termios *t = (struct user_termios *)data; - t->c_cflag |= CLOCAL; - /* No Break */ - } + case TIOCSETA_64: + case TIOCSETAW_64: + case TIOCSETAF_64: + { + struct user_termios *t = (struct user_termios *)data; + t->c_cflag |= CLOCAL; + /* No Break */ + } fallthrough: - default: + default: error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p); - if (ENOTTY != error) + if (ENOTTY != error) { break; + } error = ttioctl_locked(tp, cmd, data, flag, p); break; } tty_unlock(tp); - return (error); + return error; } /* @@ -268,38 +271,41 @@ fallthrough: * assumptions here, this routine should be static (and * inlined, given there is only one call site). */ -int +int kmputc(__unused dev_t dev, char c) { - if(!disableConsoleOutput && initialized) { + if (!disableConsoleOutput && initialized) { /* OCRNL */ - if(c == '\n') + if (c == '\n') { cnputcusr('\r'); + } cnputcusr(c); } - return (0); + return 0; } /* * Callouts from linesw. */ - -#define KM_LOWAT_DELAY ((ns_time_t)1000) + +#define KM_LOWAT_DELAY ((ns_time_t)1000) /* * t_oproc for this driver; called from within the line discipline * * Locks: Assumes tp is locked on entry, remains locked on exit */ -static void +static void kmstart(struct tty *tp) { - if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) + if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) { goto out; - if (tp->t_outq.c_cc == 0) + } + if (tp->t_outq.c_cc == 0) { goto out; + } tp->t_state |= TS_BUSY; kmoutput(tp); return; @@ -309,18 +315,18 @@ out: return; } -/* +/* * One-shot output retry timeout from kmoutput(); re-calls kmoutput() at * intervals until the output queue for the tty is empty, at which point * the timeout is not rescheduled by kmoutput() - * + * * This function must take the tty_lock() around the kmoutput() call; it * ignores the return value. */ static void kmtimeout(void *arg) { - struct tty *tp = (struct tty *)arg; + struct tty *tp = (struct tty *)arg; tty_lock(tp); (void)kmoutput(tp); @@ -338,19 +344,20 @@ kmtimeout(void *arg) * of sizeof(buf) charatcers at a time before dropping into * the timeout code). */ -static int +static int kmoutput(struct tty *tp) { - unsigned char buf[80]; /* buffer; limits output per call */ - unsigned char *cp; - int cc = -1; + unsigned char buf[80]; /* buffer; limits output per call */ + unsigned char *cp; + int cc = -1; /* While there is data available to be output... */ while (tp->t_outq.c_cc > 0) { cc = ndqb(&tp->t_outq, 0); - if (cc == 0) + if (cc == 0) { break; + } /* * attempt to output as many characters as are available, * up to the available transfer buffer size. @@ -373,7 +380,7 @@ kmoutput(struct tty *tp) * XXX This is likely not necessary, as the tty output queue is not * XXX writeable while we hold the tty_lock(). */ - if (tp->t_outq.c_cc > 0) { + if (tp->t_outq.c_cc > 0) { timeout(kmtimeout, tp, hz); } tp->t_state &= ~TS_BUSY; @@ -401,9 +408,9 @@ kmoutput(struct tty *tp) void cons_cinput(char ch) { - struct tty *tp = km_tty[0]; /* XXX */ + struct tty *tp = km_tty[0]; /* XXX */ tty_lock(tp); - (*linesw[tp->t_line].l_rint) (ch, tp); + (*linesw[tp->t_line].l_rint)(ch, tp); tty_unlock(tp); } diff --git a/bsd/dev/i386/sdt_x86.c b/bsd/dev/i386/sdt_x86.c index 9bd151891..b5c244cd8 100644 --- a/bsd/dev/i386/sdt_x86.c +++ b/bsd/dev/i386/sdt_x86.c @@ -59,43 +59,43 @@ sdt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax) dtrace_probe(sdt->sdp_id, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->r8); - return (DTRACE_INVOP_NOP); + return DTRACE_INVOP_NOP; } } - return (0); + return 0; } struct frame { - struct frame *backchain; - uintptr_t retaddr; + struct frame *backchain; + uintptr_t retaddr; }; /*ARGSUSED*/ uint64_t sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) { -#pragma unused(arg, id, parg) +#pragma unused(arg, id, parg) uint64_t val; struct frame *fp = (struct frame *)__builtin_frame_address(0); uintptr_t *stack; uintptr_t pc; int i; - /* - * A total of 6 arguments are passed via registers; any argument with - * index of 5 or lower is therefore in a register. - */ - int inreg = 5; + /* + * A total of 6 arguments are passed via registers; any argument with + * index of 5 or lower is therefore in a register. + */ + int inreg = 5; for (i = 1; i <= aframes; i++) { fp = fp->backchain; pc = fp->retaddr; if (dtrace_invop_callsite_pre != NULL - && pc > (uintptr_t)dtrace_invop_callsite_pre - && pc <= (uintptr_t)dtrace_invop_callsite_post) { + && pc > (uintptr_t)dtrace_invop_callsite_pre + && pc <= (uintptr_t)dtrace_invop_callsite_post) { /* * In the case of x86_64, we will use the pointer to the * save area structure that was pushed when we took the @@ -113,7 +113,7 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) fp = fp->backchain; /* to fbt_perfcallback() activation. */ fp = fp->backchain; /* to kernel_trap() activation. */ fp = fp->backchain; /* to trap_from_kernel() activation. */ - + x86_saved_state_t *tagged_regs = (x86_saved_state_t *)&fp[1]; x86_saved_state64_t *saved_state = saved_state64(tagged_regs); @@ -122,8 +122,8 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) } else { fp = (struct frame *)(saved_state->isf.rsp); stack = (uintptr_t *)&fp[0]; /* Find marshalled - arguments */ - argno -= (inreg +1); + * arguments */ + argno -= (inreg + 1); } goto load; } @@ -145,7 +145,7 @@ sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes) * register... */ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); - return (0); + return 0; } argno -= (inreg + 1); @@ -157,6 +157,5 @@ load: val = (uint64_t)(*(((uintptr_t *)stack) + argno)); DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); - return (val); + return val; } - diff --git a/bsd/dev/i386/stubs.c b/bsd/dev/i386/stubs.c index 4dd2830f5..aeb74b11f 100644 --- a/bsd/dev/i386/stubs.c +++ b/bsd/dev/i386/stubs.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -45,7 +45,7 @@ #include #include -/* +/* * copy a null terminated string from the kernel address space into * the user address space. * - if the user is denied write access, return EFAULT. @@ -58,25 +58,27 @@ int copyoutstr(const void *from, user_addr_t to, size_t maxlen, size_t *lencopied) { - size_t slen; - size_t len; - int error = 0; + size_t slen; + size_t len; + int error = 0; slen = strlen(from) + 1; - if (slen > maxlen) + if (slen > maxlen) { error = ENAMETOOLONG; + } - len = min(maxlen,slen); - if (copyout(from, to, len)) + len = min(maxlen, slen); + if (copyout(from, to, len)) { error = EFAULT; + } *lencopied = len; return error; } -/* - * copy a null terminated string from one point to another in +/* + * copy a null terminated string from one point to another in * the kernel address space. * - no access checks are performed. * - if the end of string isn't found before @@ -89,25 +91,27 @@ copyoutstr(const void *from, user_addr_t to, size_t maxlen, size_t *lencopied) int copystr(const void *vfrom, void *vto, size_t maxlen, size_t *lencopied) { - size_t l; - char const *from = (char const *) vfrom; - char *to = (char *) vto; + size_t l; + char const *from = (char const *) vfrom; + char *to = (char *) vto; for (l = 0; l < maxlen; l++) { if ((*to++ = *from++) == '\0') { - if (lencopied) + if (lencopied) { *lencopied = l + 1; + } return 0; } } - if (lencopied) + if (lencopied) { *lencopied = maxlen; + } return ENAMETOOLONG; } int copywithin(void *src, void *dst, size_t count) { - bcopy(src,dst,count); + bcopy(src, dst, count); return 0; } diff --git a/bsd/dev/i386/sysctl.c b/bsd/dev/i386/sysctl.c index 274e807c5..39dd11110 100644 --- a/bsd/dev/i386/sysctl.c +++ b/bsd/dev/i386/sysctl.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2003-2018 Apple Inc. All rights reserved. + * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,130 +45,142 @@ static int _i386_cpu_info SYSCTL_HANDLER_ARGS { - __unused struct sysctl_oid *unused_oidp = oidp; - void *ptr = arg1; - int value; - - if (arg2 == -1) { - ptr = *(void **)ptr; - arg2 = 0; - } - - if (arg2 == 0 && ((char *)ptr)[0] == '\0') { - return ENOENT; - } - - if (arg2 == sizeof(uint8_t)) { - value = (uint32_t) *(uint8_t *)ptr; - ptr = &value; - arg2 = sizeof(uint32_t); - } - return SYSCTL_OUT(req, ptr, arg2 ? (size_t) arg2 : strlen((char *)ptr)+1); + __unused struct sysctl_oid *unused_oidp = oidp; + void *ptr = arg1; + int value; + + if (arg2 == -1) { + ptr = *(void **)ptr; + arg2 = 0; + } + + if (arg2 == 0 && ((char *)ptr)[0] == '\0') { + return ENOENT; + } + + if (arg2 == sizeof(uint8_t)) { + value = (uint32_t) *(uint8_t *)ptr; + ptr = &value; + arg2 = sizeof(uint32_t); + } + return SYSCTL_OUT(req, ptr, arg2 ? (size_t) arg2 : strlen((char *)ptr) + 1); } static int i386_cpu_info SYSCTL_HANDLER_ARGS { - void *ptr = (uint8_t *)cpuid_info() + (uintptr_t)arg1; - return _i386_cpu_info(oidp, ptr, arg2, req); + void *ptr = (uint8_t *)cpuid_info() + (uintptr_t)arg1; + return _i386_cpu_info(oidp, ptr, arg2, req); } static int i386_cpu_info_nonzero SYSCTL_HANDLER_ARGS { - void *ptr = (uint8_t *)cpuid_info() + (uintptr_t)arg1; - int value = *(uint32_t *)ptr; + void *ptr = (uint8_t *)cpuid_info() + (uintptr_t)arg1; + int value = *(uint32_t *)ptr; - if (value == 0) - return ENOENT; + if (value == 0) { + return ENOENT; + } - return _i386_cpu_info(oidp, ptr, arg2, req); + return _i386_cpu_info(oidp, ptr, arg2, req); } static int cpu_mwait SYSCTL_HANDLER_ARGS { - i386_cpu_info_t *cpu_info = cpuid_info(); - void *ptr = (uint8_t *)cpu_info->cpuid_mwait_leafp + (uintptr_t)arg1; - if (cpu_info->cpuid_mwait_leafp == NULL) - return ENOENT; - return _i386_cpu_info(oidp, ptr, arg2, req); + i386_cpu_info_t *cpu_info = cpuid_info(); + void *ptr = (uint8_t *)cpu_info->cpuid_mwait_leafp + (uintptr_t)arg1; + if (cpu_info->cpuid_mwait_leafp == NULL) { + return ENOENT; + } + return _i386_cpu_info(oidp, ptr, arg2, req); } static int cpu_thermal SYSCTL_HANDLER_ARGS { - i386_cpu_info_t *cpu_info = cpuid_info(); - void *ptr = (uint8_t *)cpu_info->cpuid_thermal_leafp + (uintptr_t)arg1; - if (cpu_info->cpuid_thermal_leafp == NULL) - return ENOENT; - return _i386_cpu_info(oidp, ptr, arg2, req); + i386_cpu_info_t *cpu_info = cpuid_info(); + void *ptr = (uint8_t *)cpu_info->cpuid_thermal_leafp + (uintptr_t)arg1; + if (cpu_info->cpuid_thermal_leafp == NULL) { + return ENOENT; + } + return _i386_cpu_info(oidp, ptr, arg2, req); } static int cpu_arch_perf SYSCTL_HANDLER_ARGS { - i386_cpu_info_t *cpu_info = cpuid_info(); - void *ptr = (uint8_t *)cpu_info->cpuid_arch_perf_leafp + (uintptr_t)arg1; - if (cpu_info->cpuid_arch_perf_leafp == NULL) - return ENOENT; - return _i386_cpu_info(oidp, ptr, arg2, req); + i386_cpu_info_t *cpu_info = cpuid_info(); + void *ptr = (uint8_t *)cpu_info->cpuid_arch_perf_leafp + (uintptr_t)arg1; + if (cpu_info->cpuid_arch_perf_leafp == NULL) { + return ENOENT; + } + return _i386_cpu_info(oidp, ptr, arg2, req); } static int cpu_xsave SYSCTL_HANDLER_ARGS { - i386_cpu_info_t *cpu_info = cpuid_info(); - void *ptr = (uint8_t *)cpu_info->cpuid_xsave_leafp + (uintptr_t)arg1; - if (cpu_info->cpuid_xsave_leafp == NULL) - return ENOENT; - return _i386_cpu_info(oidp, ptr, arg2, req); + i386_cpu_info_t *cpu_info = cpuid_info(); + void *ptr = (uint8_t *)cpu_info->cpuid_xsave_leafp + (uintptr_t)arg1; + if (cpu_info->cpuid_xsave_leafp == NULL) { + return ENOENT; + } + return _i386_cpu_info(oidp, ptr, arg2, req); } static int cpu_features SYSCTL_HANDLER_ARGS { - __unused struct sysctl_oid *unused_oidp = oidp; - __unused void *unused_arg1 = arg1; - __unused int unused_arg2 = arg2; - char buf[512]; + __unused struct sysctl_oid *unused_oidp = oidp; + __unused void *unused_arg1 = arg1; + __unused int unused_arg2 = arg2; + char buf[512]; - buf[0] = '\0'; - cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)); + buf[0] = '\0'; + cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf)); - return SYSCTL_OUT(req, buf, strlen(buf) + 1); + return SYSCTL_OUT(req, buf, strlen(buf) + 1); } static int cpu_extfeatures SYSCTL_HANDLER_ARGS { - __unused struct sysctl_oid *unused_oidp = oidp; - __unused void *unused_arg1 = arg1; - __unused int unused_arg2 = arg2; - char buf[512]; + __unused struct sysctl_oid *unused_oidp = oidp; + __unused void *unused_arg1 = arg1; + __unused int unused_arg2 = arg2; + char buf[512]; - buf[0] = '\0'; - cpuid_get_extfeature_names(cpuid_extfeatures(), buf, sizeof(buf)); + buf[0] = '\0'; + cpuid_get_extfeature_names(cpuid_extfeatures(), buf, sizeof(buf)); - return SYSCTL_OUT(req, buf, strlen(buf) + 1); + return SYSCTL_OUT(req, buf, strlen(buf) + 1); } static int cpu_leaf7_features SYSCTL_HANDLER_ARGS { - __unused struct sysctl_oid *unused_oidp = oidp; - __unused void *unused_arg1 = arg1; - __unused int unused_arg2 = arg2; - char buf[512]; + __unused struct sysctl_oid *unused_oidp = oidp; + __unused void *unused_arg1 = arg1; + __unused int unused_arg2 = arg2; + char buf[512]; - uint32_t leaf7_features = cpuid_info()->cpuid_leaf7_features; - if (leaf7_features == 0) - return ENOENT; + uint64_t leaf7_features = cpuid_info()->cpuid_leaf7_features; + uint64_t leaf7_extfeatures = cpuid_info()->cpuid_leaf7_extfeatures; + if (leaf7_features == 0 && leaf7_extfeatures == 0) { + return ENOENT; + } - buf[0] = '\0'; - cpuid_get_leaf7_feature_names(leaf7_features, buf, sizeof(buf)); + buf[0] = '\0'; + cpuid_get_leaf7_feature_names(leaf7_features, buf, sizeof(buf)); + if (leaf7_extfeatures != 0) { + strlcat(buf, " ", sizeof(buf)); + cpuid_get_leaf7_extfeature_names(leaf7_extfeatures, buf + strlen(buf), + sizeof(buf) - strlen(buf)); + } - return SYSCTL_OUT(req, buf, strlen(buf) + 1); + return SYSCTL_OUT(req, buf, strlen(buf) + 1); } static int @@ -179,11 +191,12 @@ cpu_logical_per_package SYSCTL_HANDLER_ARGS __unused int unused_arg2 = arg2; i386_cpu_info_t *cpu_info = cpuid_info(); - if (!(cpuid_features() & CPUID_FEATURE_HTT)) + if (!(cpuid_features() & CPUID_FEATURE_HTT)) { return ENOENT; + } return SYSCTL_OUT(req, &cpu_info->cpuid_logical_per_package, - sizeof(cpu_info->cpuid_logical_per_package)); + sizeof(cpu_info->cpuid_logical_per_package)); } static int @@ -194,8 +207,9 @@ cpu_flex_ratio_desired SYSCTL_HANDLER_ARGS __unused int unused_arg2 = arg2; i386_cpu_info_t *cpu_info = cpuid_info(); - if (cpu_info->cpuid_model != 26) + if (cpu_info->cpuid_model != 26) { return ENOENT; + } return SYSCTL_OUT(req, &flex_ratio, sizeof(flex_ratio)); } @@ -208,8 +222,9 @@ cpu_flex_ratio_min SYSCTL_HANDLER_ARGS __unused int unused_arg2 = arg2; i386_cpu_info_t *cpu_info = cpuid_info(); - if (cpu_info->cpuid_model != 26) + if (cpu_info->cpuid_model != 26) { return ENOENT; + } return SYSCTL_OUT(req, &flex_ratio_min, sizeof(flex_ratio_min)); } @@ -222,8 +237,9 @@ cpu_flex_ratio_max SYSCTL_HANDLER_ARGS __unused int unused_arg2 = arg2; i386_cpu_info_t *cpu_info = cpuid_info(); - if (cpu_info->cpuid_model != 26) + if (cpu_info->cpuid_model != 26) { return ENOENT; + } return SYSCTL_OUT(req, &flex_ratio_max, sizeof(flex_ratio_max)); } @@ -236,10 +252,11 @@ cpu_ucode_update SYSCTL_HANDLER_ARGS __unused int unused_arg2 = arg2; uint64_t addr; int error; - + error = SYSCTL_IN(req, &addr, sizeof(addr)); - if (error) + if (error) { return error; + } int ret = ucode_interface(addr); return ret; @@ -313,8 +330,8 @@ static uint64_t kernel_timeout_spin = 0; static int misc_kernel_timeout_spin(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - uint64_t old_value; - uint64_t new_value; + uint64_t old_value; + uint64_t new_value; int changed = 0, error; char buf[128]; buf[0] = '\0'; @@ -331,415 +348,421 @@ misc_kernel_timeout_spin(__unused struct sysctl_oid *oidp, __unused void *arg1, #endif /* DEVELOPMENT || DEBUG */ -SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "CPU info"); +SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "CPU info"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, max_basic, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_max_basic),sizeof(uint32_t), - i386_cpu_info, "IU", "Max Basic Information value"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, max_basic, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_max_basic), sizeof(uint32_t), + i386_cpu_info, "IU", "Max Basic Information value"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, max_ext, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_max_ext), sizeof(uint32_t), - i386_cpu_info, "IU", "Max Extended Function Information value"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, max_ext, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_max_ext), sizeof(uint32_t), + i386_cpu_info, "IU", "Max Extended Function Information value"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, vendor, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_vendor), 0, - i386_cpu_info, "A", "CPU vendor"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, vendor, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_vendor), 0, + i386_cpu_info, "A", "CPU vendor"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_brand_string), 0, - i386_cpu_info, "A", "CPU brand string"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_brand_string), 0, + i386_cpu_info, "A", "CPU brand string"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, family, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_family), sizeof(uint8_t), - i386_cpu_info, "I", "CPU family"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, family, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_family), sizeof(uint8_t), + i386_cpu_info, "I", "CPU family"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, model, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_model), sizeof(uint8_t), - i386_cpu_info, "I", "CPU model"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, model, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_model), sizeof(uint8_t), + i386_cpu_info, "I", "CPU model"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, extmodel, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_extmodel), sizeof(uint8_t), - i386_cpu_info, "I", "CPU extended model"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, extmodel, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_extmodel), sizeof(uint8_t), + i386_cpu_info, "I", "CPU extended model"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_extfamily), sizeof(uint8_t), - i386_cpu_info, "I", "CPU extended family"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfamily, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_extfamily), sizeof(uint8_t), + i386_cpu_info, "I", "CPU extended family"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, stepping, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_stepping), sizeof(uint8_t), - i386_cpu_info, "I", "CPU stepping"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, stepping, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_stepping), sizeof(uint8_t), + i386_cpu_info, "I", "CPU stepping"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, feature_bits, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_features), sizeof(uint64_t), - i386_cpu_info, "IU", "CPU features"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, feature_bits, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_features), sizeof(uint64_t), + i386_cpu_info, "IU", "CPU features"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, leaf7_feature_bits, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_leaf7_features), - sizeof(uint32_t), - i386_cpu_info_nonzero, "IU", "CPU Leaf7 features"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_leaf7_features), + sizeof(uint64_t), + i386_cpu_info_nonzero, "IU", "CPU Leaf7 features [EBX ECX]"); + +SYSCTL_PROC(_machdep_cpu, OID_AUTO, leaf7_feature_bits_edx, + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_leaf7_extfeatures), + sizeof(uint32_t), + i386_cpu_info_nonzero, "IU", "CPU Leaf7 features [EDX]"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfeature_bits, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_extfeatures), sizeof(uint64_t), - i386_cpu_info, "IU", "CPU extended features"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfeature_bits, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_extfeatures), sizeof(uint64_t), + i386_cpu_info, "IU", "CPU extended features"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, signature, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_signature), sizeof(uint32_t), - i386_cpu_info, "I", "CPU signature"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, signature, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_signature), sizeof(uint32_t), + i386_cpu_info, "I", "CPU signature"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_brand), sizeof(uint8_t), - i386_cpu_info, "I", "CPU brand"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_brand), sizeof(uint8_t), + i386_cpu_info, "I", "CPU brand"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, features, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_features, "A", "CPU feature names"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, features, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_features, "A", "CPU feature names"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, leaf7_features, - CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_leaf7_features, "A", "CPU Leaf7 feature names"); + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_leaf7_features, "A", "CPU Leaf7 feature names"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfeatures, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_extfeatures, "A", "CPU extended feature names"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfeatures, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_extfeatures, "A", "CPU extended feature names"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_logical_per_package, "I", "CPU logical cpus per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_logical_per_package, "I", "CPU logical cpus per package"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_cores_per_package), - sizeof(uint32_t), - i386_cpu_info, "I", "CPU cores per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_cores_per_package), + sizeof(uint32_t), + i386_cpu_info, "I", "CPU cores per package"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, microcode_version, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_microcode_version), - sizeof(uint32_t), - i386_cpu_info, "I", "Microcode version number"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_microcode_version), + sizeof(uint32_t), + i386_cpu_info, "I", "Microcode version number"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, processor_flag, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_processor_flag), - sizeof(uint32_t), - i386_cpu_info, "I", "CPU processor flag"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_processor_flag), + sizeof(uint32_t), + i386_cpu_info, "I", "CPU processor flag"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, mwait, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "mwait"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, mwait, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "mwait"); SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, linesize_min, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_mwait_leaf_t, linesize_min), - sizeof(uint32_t), - cpu_mwait, "I", "Monitor/mwait minimum line size"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_mwait_leaf_t, linesize_min), + sizeof(uint32_t), + cpu_mwait, "I", "Monitor/mwait minimum line size"); SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, linesize_max, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_mwait_leaf_t, linesize_max), - sizeof(uint32_t), - cpu_mwait, "I", "Monitor/mwait maximum line size"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_mwait_leaf_t, linesize_max), + sizeof(uint32_t), + cpu_mwait, "I", "Monitor/mwait maximum line size"); SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, extensions, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_mwait_leaf_t, extensions), - sizeof(uint32_t), - cpu_mwait, "I", "Monitor/mwait extensions"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_mwait_leaf_t, extensions), + sizeof(uint32_t), + cpu_mwait, "I", "Monitor/mwait extensions"); SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, sub_Cstates, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_mwait_leaf_t, sub_Cstates), - sizeof(uint32_t), - cpu_mwait, "I", "Monitor/mwait sub C-states"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_mwait_leaf_t, sub_Cstates), + sizeof(uint32_t), + cpu_mwait, "I", "Monitor/mwait sub C-states"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, thermal, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "thermal"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, thermal, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "thermal"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, sensor, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, sensor), - sizeof(boolean_t), - cpu_thermal, "I", "Thermal sensor present"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, sensor), + sizeof(boolean_t), + cpu_thermal, "I", "Thermal sensor present"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, dynamic_acceleration, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, dynamic_acceleration), - sizeof(boolean_t), - cpu_thermal, "I", "Dynamic Acceleration Technology (Turbo Mode)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, dynamic_acceleration), + sizeof(boolean_t), + cpu_thermal, "I", "Dynamic Acceleration Technology (Turbo Mode)"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, invariant_APIC_timer, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, invariant_APIC_timer), - sizeof(boolean_t), - cpu_thermal, "I", "Invariant APIC Timer"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, invariant_APIC_timer), + sizeof(boolean_t), + cpu_thermal, "I", "Invariant APIC Timer"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, thresholds, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, thresholds), - sizeof(uint32_t), - cpu_thermal, "I", "Number of interrupt thresholds"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, thresholds), + sizeof(uint32_t), + cpu_thermal, "I", "Number of interrupt thresholds"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, ACNT_MCNT, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, ACNT_MCNT), - sizeof(boolean_t), - cpu_thermal, "I", "ACNT_MCNT capability"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, ACNT_MCNT), + sizeof(boolean_t), + cpu_thermal, "I", "ACNT_MCNT capability"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, core_power_limits, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, core_power_limits), - sizeof(boolean_t), - cpu_thermal, "I", "Power Limit Notifications at a Core Level"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, core_power_limits), + sizeof(boolean_t), + cpu_thermal, "I", "Power Limit Notifications at a Core Level"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, fine_grain_clock_mod, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, fine_grain_clock_mod), - sizeof(boolean_t), - cpu_thermal, "I", "Fine Grain Clock Modulation"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, fine_grain_clock_mod), + sizeof(boolean_t), + cpu_thermal, "I", "Fine Grain Clock Modulation"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, package_thermal_intr, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, package_thermal_intr), - sizeof(boolean_t), - cpu_thermal, "I", "Package Thermal interrupt and Status"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, package_thermal_intr), + sizeof(boolean_t), + cpu_thermal, "I", "Package Thermal interrupt and Status"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, hardware_feedback, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, hardware_feedback), - sizeof(boolean_t), - cpu_thermal, "I", "Hardware Coordination Feedback"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, hardware_feedback), + sizeof(boolean_t), + cpu_thermal, "I", "Hardware Coordination Feedback"); SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, energy_policy, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_thermal_leaf_t, energy_policy), - sizeof(boolean_t), - cpu_thermal, "I", "Energy Efficient Policy Support"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_thermal_leaf_t, energy_policy), + sizeof(boolean_t), + cpu_thermal, "I", "Energy Efficient Policy Support"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, xsave, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "xsave"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, xsave, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "xsave"); SYSCTL_PROC(_machdep_cpu_xsave, OID_AUTO, extended_state, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) 0, - sizeof(cpuid_xsave_leaf_t), - cpu_xsave, "IU", "XSAVE Extended State Main Leaf"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) 0, + sizeof(cpuid_xsave_leaf_t), + cpu_xsave, "IU", "XSAVE Extended State Main Leaf"); SYSCTL_PROC(_machdep_cpu_xsave, OID_AUTO, extended_state1, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) sizeof(cpuid_xsave_leaf_t), - sizeof(cpuid_xsave_leaf_t), - cpu_xsave, "IU", "XSAVE Extended State Sub-leaf 1"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) sizeof(cpuid_xsave_leaf_t), + sizeof(cpuid_xsave_leaf_t), + cpu_xsave, "IU", "XSAVE Extended State Sub-leaf 1"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, arch_perf, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "arch_perf"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, arch_perf, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "arch_perf"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, version, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, version), - sizeof(uint8_t), - cpu_arch_perf, "I", "Architectural Performance Version Number"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, version), + sizeof(uint8_t), + cpu_arch_perf, "I", "Architectural Performance Version Number"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, number, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, number), - sizeof(uint8_t), - cpu_arch_perf, "I", "Number of counters per logical cpu"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, number), + sizeof(uint8_t), + cpu_arch_perf, "I", "Number of counters per logical cpu"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, width, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, width), - sizeof(uint8_t), - cpu_arch_perf, "I", "Bit width of counters"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, width), + sizeof(uint8_t), + cpu_arch_perf, "I", "Bit width of counters"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, events_number, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, events_number), - sizeof(uint8_t), - cpu_arch_perf, "I", "Number of monitoring events"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, events_number), + sizeof(uint8_t), + cpu_arch_perf, "I", "Number of monitoring events"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, events, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, events), - sizeof(uint32_t), - cpu_arch_perf, "I", "Bit vector of events"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, events), + sizeof(uint32_t), + cpu_arch_perf, "I", "Bit vector of events"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, fixed_number, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, fixed_number), - sizeof(uint8_t), - cpu_arch_perf, "I", "Number of fixed-function counters"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, fixed_number), + sizeof(uint8_t), + cpu_arch_perf, "I", "Number of fixed-function counters"); SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, fixed_width, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(cpuid_arch_perf_leaf_t, fixed_width), - sizeof(uint8_t), - cpu_arch_perf, "I", "Bit-width of fixed-function counters"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(cpuid_arch_perf_leaf_t, fixed_width), + sizeof(uint8_t), + cpu_arch_perf, "I", "Bit-width of fixed-function counters"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, cache, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "cache"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "cache"); SYSCTL_PROC(_machdep_cpu_cache, OID_AUTO, linesize, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_cache_linesize), - sizeof(uint32_t), - i386_cpu_info, "I", "Cacheline size"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_cache_linesize), + sizeof(uint32_t), + i386_cpu_info, "I", "Cacheline size"); SYSCTL_PROC(_machdep_cpu_cache, OID_AUTO, L2_associativity, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_cache_L2_associativity), - sizeof(uint32_t), - i386_cpu_info, "I", "L2 cache associativity"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_cache_L2_associativity), + sizeof(uint32_t), + i386_cpu_info, "I", "L2 cache associativity"); SYSCTL_PROC(_machdep_cpu_cache, OID_AUTO, size, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_cache_size), - sizeof(uint32_t), - i386_cpu_info, "I", "Cache size (in Kbytes)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_cache_size), + sizeof(uint32_t), + i386_cpu_info, "I", "Cache size (in Kbytes)"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, tlb, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "tlb"); -SYSCTL_NODE(_machdep_cpu_tlb, OID_AUTO, inst, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "inst"); -SYSCTL_NODE(_machdep_cpu_tlb, OID_AUTO, data, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "data"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, tlb, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "tlb"); +SYSCTL_NODE(_machdep_cpu_tlb, OID_AUTO, inst, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "inst"); +SYSCTL_NODE(_machdep_cpu_tlb, OID_AUTO, data, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "data"); SYSCTL_PROC(_machdep_cpu_tlb_inst, OID_AUTO, small, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, - cpuid_tlb[TLB_INST][TLB_SMALL][0]), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of small page instruction TLBs"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, + cpuid_tlb[TLB_INST][TLB_SMALL][0]), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of small page instruction TLBs"); SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, small, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, - cpuid_tlb[TLB_DATA][TLB_SMALL][0]), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of small page data TLBs (1st level)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, + cpuid_tlb[TLB_DATA][TLB_SMALL][0]), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of small page data TLBs (1st level)"); SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, small_level1, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, - cpuid_tlb[TLB_DATA][TLB_SMALL][1]), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of small page data TLBs (2nd level)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, + cpuid_tlb[TLB_DATA][TLB_SMALL][1]), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of small page data TLBs (2nd level)"); SYSCTL_PROC(_machdep_cpu_tlb_inst, OID_AUTO, large, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, - cpuid_tlb[TLB_INST][TLB_LARGE][0]), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of large page instruction TLBs"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, + cpuid_tlb[TLB_INST][TLB_LARGE][0]), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of large page instruction TLBs"); SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, large, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, - cpuid_tlb[TLB_DATA][TLB_LARGE][0]), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of large page data TLBs (1st level)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, + cpuid_tlb[TLB_DATA][TLB_LARGE][0]), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of large page data TLBs (1st level)"); SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, large_level1, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, - cpuid_tlb[TLB_DATA][TLB_LARGE][1]), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of large page data TLBs (2nd level)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, + cpuid_tlb[TLB_DATA][TLB_LARGE][1]), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of large page data TLBs (2nd level)"); SYSCTL_PROC(_machdep_cpu_tlb, OID_AUTO, shared, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_stlb), - sizeof(uint32_t), - i386_cpu_info_nonzero, "I", - "Number of shared TLBs"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_stlb), + sizeof(uint32_t), + i386_cpu_info_nonzero, "I", + "Number of shared TLBs"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, address_bits, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "address_bits"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, address_bits, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "address_bits"); SYSCTL_PROC(_machdep_cpu_address_bits, OID_AUTO, physical, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_address_bits_physical), - sizeof(uint32_t), - i386_cpu_info, "I", "Number of physical address bits"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_address_bits_physical), + sizeof(uint32_t), + i386_cpu_info, "I", "Number of physical address bits"); SYSCTL_PROC(_machdep_cpu_address_bits, OID_AUTO, virtual, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_address_bits_virtual), - sizeof(uint32_t), - i386_cpu_info, "I", "Number of virtual address bits"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_address_bits_virtual), + sizeof(uint32_t), + i386_cpu_info, "I", "Number of virtual address bits"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, core_count), - sizeof(uint32_t), - i386_cpu_info, "I", "Number of enabled cores per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, core_count), + sizeof(uint32_t), + i386_cpu_info, "I", "Number of enabled cores per package"); SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, thread_count), - sizeof(uint32_t), - i386_cpu_info, "I", "Number of enabled threads per package"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, thread_count), + sizeof(uint32_t), + i386_cpu_info, "I", "Number of enabled threads per package"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, flex_ratio, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Flex ratio"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, flex_ratio, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Flex ratio"); SYSCTL_PROC(_machdep_cpu_flex_ratio, OID_AUTO, desired, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_flex_ratio_desired, "I", "Flex ratio desired (0 disabled)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_flex_ratio_desired, "I", "Flex ratio desired (0 disabled)"); SYSCTL_PROC(_machdep_cpu_flex_ratio, OID_AUTO, min, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_flex_ratio_min, "I", "Flex ratio min (efficiency)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_flex_ratio_min, "I", "Flex ratio min (efficiency)"); SYSCTL_PROC(_machdep_cpu_flex_ratio, OID_AUTO, max, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, - cpu_flex_ratio_max, "I", "Flex ratio max (non-turbo)"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, + cpu_flex_ratio_max, "I", "Flex ratio max (non-turbo)"); -SYSCTL_PROC(_machdep_cpu, OID_AUTO, ucupdate, - CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED, 0, 0, - cpu_ucode_update, "S", "Microcode update interface"); +SYSCTL_PROC(_machdep_cpu, OID_AUTO, ucupdate, + CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED, 0, 0, + cpu_ucode_update, "S", "Microcode update interface"); -SYSCTL_NODE(_machdep_cpu, OID_AUTO, tsc_ccc, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "TSC/CCC frequency information"); +SYSCTL_NODE(_machdep_cpu, OID_AUTO, tsc_ccc, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "TSC/CCC frequency information"); SYSCTL_PROC(_machdep_cpu_tsc_ccc, OID_AUTO, numerator, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.numerator), - sizeof(uint32_t), - i386_cpu_info, "I", "Numerator of TSC/CCC ratio"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.numerator), + sizeof(uint32_t), + i386_cpu_info, "I", "Numerator of TSC/CCC ratio"); SYSCTL_PROC(_machdep_cpu_tsc_ccc, OID_AUTO, denominator, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.denominator), - sizeof(uint32_t), - i386_cpu_info, "I", "Denominator of TSC/CCC ratio"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.denominator), + sizeof(uint32_t), + i386_cpu_info, "I", "Denominator of TSC/CCC ratio"); static const uint32_t apic_timer_vector = (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT); static const uint32_t apic_IPI_vector = (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT); SYSCTL_NODE(_machdep, OID_AUTO, vectors, CTLFLAG_RD | CTLFLAG_LOCKED, 0, - "Interrupt vector assignments"); + "Interrupt vector assignments"); -SYSCTL_UINT (_machdep_vectors, OID_AUTO, timer, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, __DECONST(uint32_t *,&apic_timer_vector), 0, ""); -SYSCTL_UINT (_machdep_vectors, OID_AUTO, IPI, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, __DECONST(uint32_t *,&apic_IPI_vector), 0, ""); +SYSCTL_UINT(_machdep_vectors, OID_AUTO, timer, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, __DECONST(uint32_t *, &apic_timer_vector), 0, ""); +SYSCTL_UINT(_machdep_vectors, OID_AUTO, IPI, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, __DECONST(uint32_t *, &apic_IPI_vector), 0, ""); uint64_t pmap_pv_hashlist_walks; uint64_t pmap_pv_hashlist_cnts; @@ -749,16 +772,16 @@ extern uint32_t pv_hashed_kern_low_water_mark; /*extern struct sysctl_oid_list sysctl__machdep_pmap_children;*/ -SYSCTL_NODE(_machdep, OID_AUTO, pmap, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "PMAP info"); +SYSCTL_NODE(_machdep, OID_AUTO, pmap, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "PMAP info"); -SYSCTL_QUAD (_machdep_pmap, OID_AUTO, hashwalks, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_pv_hashlist_walks, ""); -SYSCTL_QUAD (_machdep_pmap, OID_AUTO, hashcnts, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_pv_hashlist_cnts, ""); -SYSCTL_INT (_machdep_pmap, OID_AUTO, hashmax, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_pv_hashlist_max, 0, ""); -SYSCTL_INT (_machdep_pmap, OID_AUTO, kernel_text_ps, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_kernel_text_ps, 0, ""); -SYSCTL_INT (_machdep_pmap, OID_AUTO, kern_pv_reserve, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, &pv_hashed_kern_low_water_mark, 0, ""); +SYSCTL_QUAD(_machdep_pmap, OID_AUTO, hashwalks, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_pv_hashlist_walks, ""); +SYSCTL_QUAD(_machdep_pmap, OID_AUTO, hashcnts, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_pv_hashlist_cnts, ""); +SYSCTL_INT(_machdep_pmap, OID_AUTO, hashmax, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_pv_hashlist_max, 0, ""); +SYSCTL_INT(_machdep_pmap, OID_AUTO, kernel_text_ps, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &pmap_kernel_text_ps, 0, ""); +SYSCTL_INT(_machdep_pmap, OID_AUTO, kern_pv_reserve, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, &pv_hashed_kern_low_water_mark, 0, ""); -SYSCTL_NODE(_machdep, OID_AUTO, memmap, CTLFLAG_RD|CTLFLAG_LOCKED, NULL, "physical memory map"); +SYSCTL_NODE(_machdep, OID_AUTO, memmap, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "physical memory map"); uint64_t firmware_Conventional_bytes = 0; uint64_t firmware_RuntimeServices_bytes = 0; @@ -769,86 +792,107 @@ uint64_t firmware_Reserved_bytes = 0; uint64_t firmware_Unusable_bytes = 0; uint64_t firmware_other_bytes = 0; -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Conventional, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_Conventional_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, RuntimeServices, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_RuntimeServices_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, ACPIReclaim, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_ACPIReclaim_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, ACPINVS, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_ACPINVS_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, PalCode, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_PalCode_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Reserved, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_Reserved_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Unusable, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_Unusable_bytes, ""); -SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Other, CTLFLAG_RD|CTLFLAG_LOCKED, &firmware_other_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Conventional, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_Conventional_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, RuntimeServices, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_RuntimeServices_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, ACPIReclaim, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_ACPIReclaim_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, ACPINVS, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_ACPINVS_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, PalCode, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_PalCode_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Reserved, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_Reserved_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Unusable, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_Unusable_bytes, ""); +SYSCTL_QUAD(_machdep_memmap, OID_AUTO, Other, CTLFLAG_RD | CTLFLAG_LOCKED, &firmware_other_bytes, ""); -SYSCTL_NODE(_machdep, OID_AUTO, tsc, CTLFLAG_RD|CTLFLAG_LOCKED, NULL, "Timestamp counter parameters"); +SYSCTL_NODE(_machdep, OID_AUTO, tsc, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "Timestamp counter parameters"); SYSCTL_QUAD(_machdep_tsc, OID_AUTO, frequency, - CTLFLAG_RD|CTLFLAG_LOCKED, &tscFreq, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &tscFreq, ""); extern uint32_t deep_idle_rebase; SYSCTL_UINT(_machdep_tsc, OID_AUTO, deep_idle_rebase, - CTLFLAG_RD|CTLFLAG_LOCKED, &deep_idle_rebase, 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &deep_idle_rebase, 0, ""); SYSCTL_QUAD(_machdep_tsc, OID_AUTO, at_boot, - CTLFLAG_RD|CTLFLAG_LOCKED, &tsc_at_boot, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &tsc_at_boot, ""); SYSCTL_QUAD(_machdep_tsc, OID_AUTO, rebase_abs_time, - CTLFLAG_RD|CTLFLAG_LOCKED, &tsc_rebase_abs_time, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &tsc_rebase_abs_time, ""); SYSCTL_NODE(_machdep_tsc, OID_AUTO, nanotime, - CTLFLAG_RD|CTLFLAG_LOCKED, NULL, "TSC to ns conversion"); + CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "TSC to ns conversion"); SYSCTL_QUAD(_machdep_tsc_nanotime, OID_AUTO, tsc_base, - CTLFLAG_RD | CTLFLAG_LOCKED, - __DECONST(uint64_t *, &pal_rtc_nanotime_info.tsc_base), ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + __DECONST(uint64_t *, &pal_rtc_nanotime_info.tsc_base), ""); SYSCTL_QUAD(_machdep_tsc_nanotime, OID_AUTO, ns_base, - CTLFLAG_RD | CTLFLAG_LOCKED, - __DECONST(uint64_t *, &pal_rtc_nanotime_info.ns_base), ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + __DECONST(uint64_t *, &pal_rtc_nanotime_info.ns_base), ""); SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, scale, - CTLFLAG_RD | CTLFLAG_LOCKED, - __DECONST(uint32_t *, &pal_rtc_nanotime_info.scale), 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + __DECONST(uint32_t *, &pal_rtc_nanotime_info.scale), 0, ""); SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, shift, - CTLFLAG_RD | CTLFLAG_LOCKED, - __DECONST(uint32_t *, &pal_rtc_nanotime_info.shift), 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + __DECONST(uint32_t *, &pal_rtc_nanotime_info.shift), 0, ""); SYSCTL_UINT(_machdep_tsc_nanotime, OID_AUTO, generation, - CTLFLAG_RD | CTLFLAG_LOCKED, - __DECONST(uint32_t *, &pal_rtc_nanotime_info.generation), 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + __DECONST(uint32_t *, &pal_rtc_nanotime_info.generation), 0, ""); -SYSCTL_NODE(_machdep, OID_AUTO, misc, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Miscellaneous x86 kernel parameters"); +SYSCTL_NODE(_machdep, OID_AUTO, misc, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Miscellaneous x86 kernel parameters"); #if (DEVELOPMENT || DEBUG) extern uint32_t mp_interrupt_watchdog_events; SYSCTL_UINT(_machdep_misc, OID_AUTO, interrupt_watchdog_events, - CTLFLAG_RW|CTLFLAG_LOCKED, &mp_interrupt_watchdog_events, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &mp_interrupt_watchdog_events, 0, ""); #endif SYSCTL_PROC(_machdep_misc, OID_AUTO, panic_restart_timeout, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - panic_set_restart_timeout, "I", "Panic restart timeout in seconds"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + panic_set_restart_timeout, "I", "Panic restart timeout in seconds"); SYSCTL_PROC(_machdep_misc, OID_AUTO, interrupt_latency_max, - CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - misc_interrupt_latency_max, "A", "Maximum Interrupt latency"); + CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + misc_interrupt_latency_max, "A", "Maximum Interrupt latency"); #if DEVELOPMENT || DEBUG SYSCTL_PROC(_machdep_misc, OID_AUTO, machine_check_panic, - CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - misc_machine_check_panic, "A", "Machine-check exception test"); + CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + misc_machine_check_panic, "A", "Machine-check exception test"); SYSCTL_PROC(_machdep_misc, OID_AUTO, kernel_timeout_spin, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, sizeof(kernel_timeout_spin), - misc_kernel_timeout_spin, "Q", "Kernel timeout panic test"); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, sizeof(kernel_timeout_spin), + misc_kernel_timeout_spin, "Q", "Kernel timeout panic test"); SYSCTL_QUAD(_machdep, OID_AUTO, reportphyreadabs, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &reportphyreaddelayabs, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &reportphyreaddelayabs, ""); +SYSCTL_QUAD(_machdep, OID_AUTO, reportphywriteabs, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &reportphywritedelayabs, ""); +SYSCTL_QUAD(_machdep, OID_AUTO, tracephyreadabs, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &tracephyreaddelayabs, ""); +SYSCTL_QUAD(_machdep, OID_AUTO, tracephywriteabs, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &tracephywritedelayabs, ""); SYSCTL_INT(_machdep, OID_AUTO, reportphyreadosbt, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &reportphyreadosbt, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &reportphyreadosbt, 0, ""); +SYSCTL_INT(_machdep, OID_AUTO, reportphywriteosbt, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &reportphywriteosbt, 0, ""); SYSCTL_INT(_machdep, OID_AUTO, phyreaddelaypanic, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &phyreadpanic, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &phyreadpanic, 0, ""); +SYSCTL_INT(_machdep, OID_AUTO, phywritedelaypanic, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &phywritepanic, 0, ""); +#if DEVELOPMENT || DEBUG +extern uint64_t simulate_stretched_io; +SYSCTL_QUAD(_machdep, OID_AUTO, sim_stretched_io_ns, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &simulate_stretched_io, ""); +#endif extern int pmap_pagezero_mitigation; extern int pmap_asserts_enabled, pmap_asserts_traced; @@ -882,9 +926,9 @@ misc_svisor_read(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused } SYSCTL_PROC(_machdep_misc, OID_AUTO, misc_svisor_read, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - misc_svisor_read, "I", "supervisor mode read"); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + misc_svisor_read, "I", "supervisor mode read"); #endif /* DEVELOPMENT || DEBUG */ @@ -905,9 +949,9 @@ misc_timer_queue_trace(__unused struct sysctl_oid *oidp, __unused void *arg1, __ } SYSCTL_PROC(_machdep_misc, OID_AUTO, timer_queue_trace, - CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - misc_timer_queue_trace, "A", "Cut timer queue tracepoint"); + CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + misc_timer_queue_trace, "A", "Cut timer queue tracepoint"); extern long NMI_count; extern void NMI_cpus(void); @@ -927,23 +971,23 @@ misc_nmis(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int ar } SYSCTL_PROC(_machdep_misc, OID_AUTO, nmis, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - misc_nmis, "I", "Report/increment NMI count"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + misc_nmis, "I", "Report/increment NMI count"); /* Parameters related to timer coalescing tuning, to be replaced * with a dedicated systemcall in the future. */ /* Enable processing pending timers in the context of any other interrupt */ SYSCTL_INT(_kern, OID_AUTO, interrupt_timer_coalescing_enabled, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &interrupt_timer_coalescing_enabled, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &interrupt_timer_coalescing_enabled, 0, ""); /* Upon entering idle, process pending timers with HW deadlines * this far in the future. */ SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_idle_entry_hard_deadline_max, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &idle_entry_timer_processing_hdeadline_threshold, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &idle_entry_timer_processing_hdeadline_threshold, 0, ""); /* Track potentially expensive eager timer evaluations on QoS tier * switches. @@ -951,29 +995,41 @@ SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_idle_entry_hard_deadline_max, extern uint32_t ml_timer_eager_evaluations; SYSCTL_INT(_machdep, OID_AUTO, eager_timer_evaluations, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &ml_timer_eager_evaluations, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &ml_timer_eager_evaluations, 0, ""); extern uint64_t ml_timer_eager_evaluation_max; SYSCTL_QUAD(_machdep, OID_AUTO, eager_timer_evaluation_max, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &ml_timer_eager_evaluation_max, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &ml_timer_eager_evaluation_max, ""); extern uint64_t x86_isr_fp_simd_use; SYSCTL_QUAD(_machdep, OID_AUTO, x86_fp_simd_isr_uses, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &x86_isr_fp_simd_use, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &x86_isr_fp_simd_use, ""); #if DEVELOPMENT || DEBUG extern int plctrace_enabled; SYSCTL_INT(_machdep, OID_AUTO, pltrace, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &plctrace_enabled, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &plctrace_enabled, 0, ""); + +/* Intentionally not declared as volatile here: */ +extern int mmiotrace_enabled; + +SYSCTL_INT(_machdep, OID_AUTO, MMIOtrace, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &mmiotrace_enabled, 0, ""); extern int fpsimd_fault_popc; SYSCTL_INT(_machdep, OID_AUTO, fpsimd_fault_popc, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &fpsimd_fault_popc, 0, ""); + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &fpsimd_fault_popc, 0, ""); + +extern int allow_64bit_proc_LDT_ops; +SYSCTL_INT(_machdep, OID_AUTO, ldt64, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &allow_64bit_proc_LDT_ops, 0, ""); #endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/dev/i386/systemcalls.c b/bsd/dev/i386/systemcalls.c index 1fa041589..ebbca2d75 100644 --- a/bsd/dev/i386/systemcalls.c +++ b/bsd/dev/i386/systemcalls.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -69,8 +69,8 @@ extern void *find_user_regs(thread_t); extern const char *syscallnames[]; #define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || \ - ((code) == SYS_kdebug_trace64) || \ - ((code) == SYS_kdebug_trace_string)) + ((code) == SYS_kdebug_trace64) || \ + ((code) == SYS_kdebug_trace_string)) /* * Function: unix_syscall @@ -83,24 +83,25 @@ __attribute__((noreturn)) void unix_syscall(x86_saved_state_t *state) { - thread_t thread; - void *vt; - unsigned int code; - struct sysent *callp; - - int error; - vm_offset_t params; - struct proc *p; - struct uthread *uthread; - x86_saved_state32_t *regs; - boolean_t is_vfork; - pid_t pid; + thread_t thread; + void *vt; + unsigned int code; + struct sysent *callp; + + int error; + vm_offset_t params; + struct proc *p; + struct uthread *uthread; + x86_saved_state32_t *regs; + boolean_t is_vfork; + pid_t pid; assert(is_saved_state32(state)); regs = saved_state32(state); #if DEBUG - if (regs->eax == 0x800) + if (regs->eax == 0x800) { thread_exception_return(); + } #endif thread = current_thread(); uthread = get_bsdthread_info(thread); @@ -109,15 +110,16 @@ unix_syscall(x86_saved_state_t *state) /* Get the approriate proc; may be different from task's for vfork() */ is_vfork = uthread->uu_flag & UT_VFORK; - if (__improbable(is_vfork != 0)) + if (__improbable(is_vfork != 0)) { p = current_proc(); - else + } else { p = (struct proc *)get_bsdtask_info(current_task()); + } code = regs->eax & I386_SYSCALL_NUMBER_MASK; DEBUG_KPRINT_SYSCALL_UNIX("unix_syscall: code=%d(%s) eip=%u\n", - code, syscallnames[code >= nsysent ? SYS_invalid : code], (uint32_t)regs->eip); - params = (vm_offset_t) (regs->uesp + sizeof (int)); + code, syscallnames[code >= nsysent ? SYS_invalid : code], (uint32_t)regs->eip); + params = (vm_offset_t) (regs->uesp + sizeof(int)); regs->efl &= ~(EFL_CF); @@ -133,13 +135,13 @@ unix_syscall(x86_saved_state_t *state) if (callp->sy_arg_bytes != 0) { #if CONFIG_REQUIRES_U32_MUNGING - sy_munge_t *mungerp; + sy_munge_t *mungerp; #else #error U32 syscalls on x86_64 kernel requires munging #endif - uint32_t nargs; + uint32_t nargs; - assert((unsigned) callp->sy_arg_bytes <= sizeof (uthread->uu_arg)); + assert((unsigned) callp->sy_arg_bytes <= sizeof(uthread->uu_arg)); nargs = callp->sy_arg_bytes; error = copyin((user_addr_t) params, (char *) vt, nargs); if (error) { @@ -153,20 +155,22 @@ unix_syscall(x86_saved_state_t *state) int *ip = (int *)vt; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - *ip, *(ip+1), *(ip+2), *(ip+3), 0); + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + *ip, *(ip + 1), *(ip + 2), *(ip + 3), 0); } #if CONFIG_REQUIRES_U32_MUNGING mungerp = callp->sy_arg_munge32; - if (mungerp != NULL) + if (mungerp != NULL) { (*mungerp)(vt); + } #endif - } else - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - 0, 0, 0, 0, 0); + } else { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + 0, 0, 0, 0, 0); + } /* * Delayed binding of thread credential to process credential, if we @@ -181,8 +185,8 @@ unix_syscall(x86_saved_state_t *state) pid = proc_pid(p); #ifdef JOE_DEBUG - uthread->uu_iocount = 0; - uthread->uu_vpindex = 0; + uthread->uu_iocount = 0; + uthread->uu_vpindex = 0; #endif AUDIT_SYSCALL_ENTER(code, p, uthread); @@ -190,8 +194,9 @@ unix_syscall(x86_saved_state_t *state) AUDIT_SYSCALL_EXIT(code, p, uthread, error); #ifdef JOE_DEBUG - if (uthread->uu_iocount) - printf("system call returned with uu_iocount != 0\n"); + if (uthread->uu_iocount) { + printf("system call returned with uu_iocount != 0\n"); + } #endif #if CONFIG_DTRACE uthread->t_dtrace_errno = error; @@ -206,20 +211,19 @@ unix_syscall(x86_saved_state_t *state) */ pal_syscall_restart(thread, state); - } - else if (error != EJUSTRETURN) { + } else if (error != EJUSTRETURN) { if (__improbable(error)) { - regs->eax = error; - regs->efl |= EFL_CF; /* carry bit */ + regs->eax = error; + regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ - /* - * We split retval across two registers, in case the - * syscall had a 64-bit return value, in which case - * eax/edx matches the function call ABI. - */ - regs->eax = uthread->uu_rval[0]; - regs->edx = uthread->uu_rval[1]; - } + /* + * We split retval across two registers, in case the + * syscall had a 64-bit return value, in which case + * eax/edx matches the function call ABI. + */ + regs->eax = uthread->uu_rval[0]; + regs->edx = uthread->uu_rval[1]; + } } DEBUG_KPRINT_SYSCALL_UNIX( @@ -227,6 +231,7 @@ unix_syscall(x86_saved_state_t *state) error, regs->eax, regs->edx); uthread->uu_flag &= ~UT_NOTCANCELPT; + uthread->syscall_code = 0; #if DEBUG || DEVELOPMENT kern_allocation_name_t @@ -235,7 +240,7 @@ unix_syscall(x86_saved_state_t *state) #endif /* DEBUG || DEVELOPMENT */ if (__improbable(uthread->uu_lowpri_window)) { - /* + /* * task is marked as a low priority I/O type * and the I/O we issued while in this system call * collided with normal I/O operations... we'll @@ -244,10 +249,11 @@ unix_syscall(x86_saved_state_t *state) */ throttle_lowpri_io(1); } - if (__probable(!code_is_kdebug_trace(code))) - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, - error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0); + if (__probable(!code_is_kdebug_trace(code))) { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0); + } if (__improbable(!is_vfork && callp->sy_call == (sy_call_t *)execve && !error)) { pal_execve_return(thread); @@ -267,23 +273,24 @@ __attribute__((noreturn)) void unix_syscall64(x86_saved_state_t *state) { - thread_t thread; - void *vt; - unsigned int code; - struct sysent *callp; - int args_in_regs; - boolean_t args_start_at_rdi; - int error; - struct proc *p; - struct uthread *uthread; + thread_t thread; + void *vt; + unsigned int code; + struct sysent *callp; + int args_in_regs; + boolean_t args_start_at_rdi; + int error; + struct proc *p; + struct uthread *uthread; x86_saved_state64_t *regs; - pid_t pid; + pid_t pid; assert(is_saved_state64(state)); regs = saved_state64(state); -#if DEBUG - if (regs->rax == 0x2000800) +#if DEBUG + if (regs->rax == 0x2000800) { thread_exception_return(); + } #endif thread = current_thread(); uthread = get_bsdthread_info(thread); @@ -291,10 +298,11 @@ unix_syscall64(x86_saved_state_t *state) uthread_reset_proc_refcount(uthread); /* Get the approriate proc; may be different from task's for vfork() */ - if (__probable(!(uthread->uu_flag & UT_VFORK))) + if (__probable(!(uthread->uu_flag & UT_VFORK))) { p = (struct proc *)get_bsdtask_info(current_task()); - else + } else { p = current_proc(); + } /* Verify that we are not being called from a task without a proc */ if (__improbable(p == NULL)) { @@ -314,7 +322,7 @@ unix_syscall64(x86_saved_state_t *state) vt = (void *)uthread->uu_arg; if (__improbable(callp == sysent)) { - /* + /* * indirect system call... system call number * passed as 'arg0' */ @@ -337,9 +345,9 @@ unix_syscall64(x86_saved_state_t *state) if (!code_is_kdebug_trace(code)) { uint64_t *ip = (uint64_t *)vt; - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - (int)(*ip), (int)(*(ip+1)), (int)(*(ip+2)), (int)(*(ip+3)), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + (int)(*ip), (int)(*(ip + 1)), (int)(*(ip + 2)), (int)(*(ip + 3)), 0); } if (__improbable(callp->sy_narg > args_in_regs)) { @@ -355,10 +363,11 @@ unix_syscall64(x86_saved_state_t *state) /* NOTREACHED */ } } - } else + } else { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, - 0, 0, 0, 0, 0); + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + 0, 0, 0, 0, 0); + } /* * Delayed binding of thread credential to process credential, if we @@ -373,8 +382,8 @@ unix_syscall64(x86_saved_state_t *state) pid = proc_pid(p); #ifdef JOE_DEBUG - uthread->uu_iocount = 0; - uthread->uu_vpindex = 0; + uthread->uu_iocount = 0; + uthread->uu_vpindex = 0; #endif AUDIT_SYSCALL_ENTER(code, p, uthread); @@ -382,14 +391,15 @@ unix_syscall64(x86_saved_state_t *state) AUDIT_SYSCALL_EXIT(code, p, uthread, error); #ifdef JOE_DEBUG - if (uthread->uu_iocount) - printf("system call returned with uu_iocount != 0\n"); + if (uthread->uu_iocount) { + printf("system call returned with uu_iocount != 0\n"); + } #endif #if CONFIG_DTRACE uthread->t_dtrace_errno = error; #endif /* CONFIG_DTRACE */ - + if (__improbable(error == ERESTART)) { /* * all system calls come through via the syscall instruction @@ -397,13 +407,11 @@ unix_syscall64(x86_saved_state_t *state) * move the user's pc back to repeat the syscall: */ pal_syscall_restart( thread, state ); - } - else if (error != EJUSTRETURN) { + } else if (error != EJUSTRETURN) { if (__improbable(error)) { regs->rax = error; - regs->isf.rflags |= EFL_CF; /* carry bit */ + regs->isf.rflags |= EFL_CF; /* carry bit */ } else { /* (not error) */ - switch (callp->sy_return_type) { case _SYSCALL_RET_INT_T: regs->rax = uthread->uu_rval[0]; @@ -418,7 +426,7 @@ unix_syscall64(x86_saved_state_t *state) case _SYSCALL_RET_SIZE_T: case _SYSCALL_RET_SSIZE_T: case _SYSCALL_RET_UINT64_T: - regs->rax = *((uint64_t *)(&uthread->uu_rval[0])); + regs->rax = *((uint64_t *)(&uthread->uu_rval[0])); regs->rdx = 0; break; case _SYSCALL_RET_NONE: @@ -428,14 +436,15 @@ unix_syscall64(x86_saved_state_t *state) break; } regs->isf.rflags &= ~EFL_CF; - } + } } DEBUG_KPRINT_SYSCALL_UNIX( "unix_syscall64: error=%d retval=(%llu,%llu)\n", error, regs->rax, regs->rdx); - + uthread->uu_flag &= ~UT_NOTCANCELPT; + uthread->syscall_code = 0; #if DEBUG || DEVELOPMENT kern_allocation_name_t @@ -444,7 +453,7 @@ unix_syscall64(x86_saved_state_t *state) #endif /* DEBUG || DEVELOPMENT */ if (__improbable(uthread->uu_lowpri_window)) { - /* + /* * task is marked as a low priority I/O type * and the I/O we issued while in this system call * collided with normal I/O operations... we'll @@ -453,10 +462,11 @@ unix_syscall64(x86_saved_state_t *state) */ throttle_lowpri_io(1); } - if (__probable(!code_is_kdebug_trace(code))) - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, - error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0); + if (__probable(!code_is_kdebug_trace(code))) { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[0], uthread->uu_rval[1], pid, 0); + } #if PROC_REF_DEBUG if (__improbable(uthread_get_proc_refcount(uthread))) { @@ -472,8 +482,8 @@ unix_syscall64(x86_saved_state_t *state) void unix_syscall_return(int error) { - thread_t thread; - struct uthread *uthread; + thread_t thread; + struct uthread *uthread; struct proc *p; unsigned int code; struct sysent *callp; @@ -494,8 +504,9 @@ unix_syscall_return(int error) callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code]; #if CONFIG_DTRACE - if (callp->sy_call == dtrace_systrace_syscall) + if (callp->sy_call == dtrace_systrace_syscall) { dtrace_systrace_syscall_return( code, error, uthread->uu_rval ); + } #endif /* CONFIG_DTRACE */ AUDIT_SYSCALL_EXIT(code, p, uthread, error); @@ -503,14 +514,12 @@ unix_syscall_return(int error) /* * repeat the syscall */ - pal_syscall_restart( thread, find_user_regs(thread) ); - } - else if (error != EJUSTRETURN) { + pal_syscall_restart( thread, find_user_regs(thread)); + } else if (error != EJUSTRETURN) { if (error) { regs->rax = error; - regs->isf.rflags |= EFL_CF; /* carry bit */ + regs->isf.rflags |= EFL_CF; /* carry bit */ } else { /* (not error) */ - switch (callp->sy_return_type) { case _SYSCALL_RET_INT_T: regs->rax = uthread->uu_rval[0]; @@ -535,13 +544,13 @@ unix_syscall_return(int error) break; } regs->isf.rflags &= ~EFL_CF; - } + } } DEBUG_KPRINT_SYSCALL_UNIX( "unix_syscall_return: error=%d retval=(%llu,%llu)\n", error, regs->rax, regs->rdx); } else { - x86_saved_state32_t *regs; + x86_saved_state32_t *regs; regs = saved_state32(find_user_regs(thread)); @@ -551,22 +560,22 @@ unix_syscall_return(int error) callp = (code >= nsysent) ? &sysent[SYS_invalid] : &sysent[code]; #if CONFIG_DTRACE - if (callp->sy_call == dtrace_systrace_syscall) + if (callp->sy_call == dtrace_systrace_syscall) { dtrace_systrace_syscall_return( code, error, uthread->uu_rval ); + } #endif /* CONFIG_DTRACE */ AUDIT_SYSCALL_EXIT(code, p, uthread, error); if (error == ERESTART) { - pal_syscall_restart( thread, find_user_regs(thread) ); - } - else if (error != EJUSTRETURN) { + pal_syscall_restart( thread, find_user_regs(thread)); + } else if (error != EJUSTRETURN) { if (error) { regs->eax = error; - regs->efl |= EFL_CF; /* carry bit */ + regs->efl |= EFL_CF; /* carry bit */ } else { /* (not error) */ regs->eax = uthread->uu_rval[0]; regs->edx = uthread->uu_rval[1]; - } + } } DEBUG_KPRINT_SYSCALL_UNIX( "unix_syscall_return: error=%d retval=(%u,%u)\n", @@ -583,7 +592,7 @@ unix_syscall_return(int error) #endif /* DEBUG || DEVELOPMENT */ if (uthread->uu_lowpri_window) { - /* + /* * task is marked as a low priority I/O type * and the I/O we issued while in this system call * collided with normal I/O operations... we'll @@ -592,10 +601,11 @@ unix_syscall_return(int error) */ throttle_lowpri_io(1); } - if (!code_is_kdebug_trace(code)) - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, - error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0); + if (!code_is_kdebug_trace(code)) { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0); + } thread_exception_return(); /* NOTREACHED */ diff --git a/bsd/dev/i386/unix_signal.c b/bsd/dev/i386/unix_signal.c index 88e615b8b..603b21614 100644 --- a/bsd/dev/i386/unix_signal.c +++ b/bsd/dev/i386/unix_signal.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Copyright (c) 1992 NeXT, Inc. * * HISTORY @@ -49,8 +49,8 @@ #include -#include /* for thread_abort_safely */ -#include +#include /* for thread_abort_safely */ +#include #include #include @@ -66,22 +66,22 @@ /* Forward: */ extern kern_return_t thread_getstatus(thread_t act, int flavor, - thread_state_t tstate, mach_msg_type_number_t *count); + thread_state_t tstate, mach_msg_type_number_t *count); extern kern_return_t thread_setstatus(thread_t thread, int flavor, - thread_state_t tstate, mach_msg_type_number_t count); + thread_state_t tstate, mach_msg_type_number_t count); /* Signal handler flavors supported */ /* These defns should match the Libc implmn */ -#define UC_TRAD 1 -#define UC_FLAVOR 30 -#define UC_SET_ALT_STACK 0x40000000 -#define UC_RESET_ALT_STACK 0x80000000 +#define UC_TRAD 1 +#define UC_FLAVOR 30 +#define UC_SET_ALT_STACK 0x40000000 +#define UC_RESET_ALT_STACK 0x80000000 -#define C_32_STK_ALIGN 16 -#define C_64_STK_ALIGN 16 -#define C_64_REDZONE_LEN 128 -#define TRUNC_DOWN32(a,c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c)))) -#define TRUNC_DOWN64(a,c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c)))) +#define C_32_STK_ALIGN 16 +#define C_64_STK_ALIGN 16 +#define C_64_REDZONE_LEN 128 +#define TRUNC_DOWN32(a, c) ((((uint32_t)a)-(c)) & ((uint32_t)(-(c)))) +#define TRUNC_DOWN64(a, c) ((((uint64_t)a)-(c)) & ((uint64_t)(-(c)))) /* * Send an interrupt to process. @@ -89,18 +89,18 @@ extern kern_return_t thread_setstatus(thread_t thread, int flavor, * Stack is set up to allow sigcode stored * in u. to call routine, followed by chmk * to sigreturn routine below. After sigreturn - * resets the signal mask, the stack, the frame + * resets the signal mask, the stack, the frame * pointer, and the argument pointer, it returns * to the user specified pc, psl. */ struct sigframe32 { - int retaddr; - user32_addr_t catcher; /* sig_t */ - int sigstyle; - int sig; - user32_addr_t sinfo; /* siginfo32_t* */ - user32_addr_t uctx; /* struct ucontext32 */ - user32_addr_t token; + int retaddr; + user32_addr_t catcher; /* sig_t */ + int sigstyle; + int sig; + user32_addr_t sinfo; /* siginfo32_t* */ + user32_addr_t uctx; /* struct ucontext32 */ + user32_addr_t token; }; /* @@ -108,21 +108,20 @@ struct sigframe32 { * for the cases of extended states (plain FP, or AVX): */ typedef struct { - int flavor; natural_t state_count; size_t mcontext_size; -} xstate_info_t; + int flavor; natural_t state_count; size_t mcontext_size; +} xstate_info_t; static const xstate_info_t thread_state64[] = { - [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) }, - [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) }, -#if !defined(RC_HIDE_XNU_J137) - [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) } -#endif + [FP] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64) }, + [FP_FULL] = { x86_FLOAT_STATE64, x86_FLOAT_STATE64_COUNT, sizeof(struct mcontext64_full) }, + [AVX] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64) }, + [AVX_FULL] = { x86_AVX_STATE64, x86_AVX_STATE64_COUNT, sizeof(struct mcontext_avx64_full) }, + [AVX512] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64) }, + [AVX512_FULL] = { x86_AVX512_STATE64, x86_AVX512_STATE64_COUNT, sizeof(struct mcontext_avx512_64_full) } }; static const xstate_info_t thread_state32[] = { - [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) }, - [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) }, -#if !defined(RC_HIDE_XNU_J137) - [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) } -#endif + [FP] = { x86_FLOAT_STATE32, x86_FLOAT_STATE32_COUNT, sizeof(struct mcontext32) }, + [AVX] = { x86_AVX_STATE32, x86_AVX_STATE32_COUNT, sizeof(struct mcontext_avx32) }, + [AVX512] = { x86_AVX512_STATE32, x86_AVX512_STATE32_COUNT, sizeof(struct mcontext_avx512_32) } }; /* @@ -132,74 +131,76 @@ static const xstate_info_t thread_state32[] = { static void siginfo_user_to_user32_x86(user_siginfo_t *in, user32_siginfo_t *out) { - out->si_signo = in->si_signo; - out->si_errno = in->si_errno; - out->si_code = in->si_code; - out->si_pid = in->si_pid; - out->si_uid = in->si_uid; - out->si_status = in->si_status; - out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr); + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr); /* following cast works for sival_int because of padding */ - out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr); - out->si_band = in->si_band; /* range reduction */ - out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ + out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr); + out->si_band = in->si_band; /* range reduction */ + out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ } static void siginfo_user_to_user64_x86(user_siginfo_t *in, user64_siginfo_t *out) { - out->si_signo = in->si_signo; - out->si_errno = in->si_errno; - out->si_code = in->si_code; - out->si_pid = in->si_pid; - out->si_uid = in->si_uid; - out->si_status = in->si_status; - out->si_addr = in->si_addr; - out->si_value.sival_ptr = in->si_value.sival_ptr; - out->si_band = in->si_band; /* range reduction */ - out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = in->si_addr; + out->si_value.sival_ptr = in->si_value.sival_ptr; + out->si_band = in->si_band; /* range reduction */ + out->__pad[0] = in->pad[0]; /* mcontext.ss.r1 */ } void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { union { - struct mcontext_avx32 mctx_avx32; - struct mcontext_avx64 mctx_avx64; -#if !defined(RC_HIDE_XNU_J137) - struct mcontext_avx512_32 mctx_avx512_32; - struct mcontext_avx512_64 mctx_avx512_64; -#endif + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; + struct mcontext_avx64_full mctx_avx64_full; + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; + struct mcontext_avx512_64_full mctx_avx512_64_full; } mctx_store, *mctxp = &mctx_store; - user_addr_t ua_sp; - user_addr_t ua_fp; - user_addr_t ua_cr2; - user_addr_t ua_sip; - user_addr_t ua_uctxp; - user_addr_t ua_mctxp; - user_siginfo_t sinfo64; + user_addr_t ua_sp; + user_addr_t ua_fp; + user_addr_t ua_cr2; + user_addr_t ua_sip; + user_addr_t ua_uctxp; + user_addr_t ua_mctxp; + user_siginfo_t sinfo64; struct sigacts *ps = p->p_sigacts; - int oonstack, flavor; + int oonstack, flavor; user_addr_t trampact; int sigonstack; - void * state; + void * state, *fpstate; mach_msg_type_number_t state_count; thread_t thread; struct uthread * ut; int stack_size = 0; int infostyle = UC_TRAD; - xstate_t sig_xstate; + xstate_t sig_xstate; user_addr_t token_uctx; kern_return_t kr; + boolean_t reset_ss = TRUE; thread = current_thread(); ut = get_bsdthread_info(thread); - if (p->p_sigacts->ps_siginfo & sigmask(sig)) + if (p->p_sigacts->ps_siginfo & sigmask(sig)) { infostyle = UC_FLAVOR; + } oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; trampact = ps->ps_trampact[sig]; @@ -218,27 +219,61 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint sig_xstate = current_xstate(); if (proc_is64bit(p)) { - x86_thread_state64_t *tstate64; - struct user_ucontext64 uctx64; + x86_thread_state64_t *tstate64; + struct user_ucontext64 uctx64; user64_addr_t token; + int task_has_ldt = thread_task_has_ldt(thread); - flavor = x86_THREAD_STATE64; - state_count = x86_THREAD_STATE64_COUNT; + if (task_has_ldt) { + flavor = x86_THREAD_FULL_STATE64; + state_count = x86_THREAD_FULL_STATE64_COUNT; + fpstate = (void *)&mctxp->mctx_avx64_full.fs; + sig_xstate |= STATE64_FULL; + } else { + flavor = x86_THREAD_STATE64; + state_count = x86_THREAD_STATE64_COUNT; + fpstate = (void *)&mctxp->mctx_avx64.fs; + } state = (void *)&mctxp->mctx_avx64.ss; - if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) - goto bad; + + /* + * The state copying is performed with pointers to fields in the state + * struct. This works specifically because the mcontext is layed-out with the + * variable-sized FP-state as the last member. However, with the requirement + * to support passing "full" 64-bit state to the signal handler, that layout has now + * changed (since the "full" state has a larger "ss" member than the non-"full" + * structure. Because of this, and to retain the array-lookup method of determining + * structure sizes, we OR-in STATE64_FULL to sig_xstate to ensure the proper mcontext + * size is passed. + */ + + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) { + goto bad; + } + + if ((sig_xstate & STATE64_FULL) && mctxp->mctx_avx64.ss.cs != USER64_CS) { + if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && + (sigonstack)) { + reset_ss = TRUE; + } else { + reset_ss = FALSE; + } + } else { + reset_ss = FALSE; + } flavor = thread_state64[sig_xstate].flavor; state_count = thread_state64[sig_xstate].state_count; - state = (void *)&mctxp->mctx_avx64.fs; - if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) - goto bad; + if (thread_getstatus(thread, flavor, (thread_state_t)fpstate, &state_count) != KERN_SUCCESS) { + goto bad; + } flavor = x86_EXCEPTION_STATE64; state_count = x86_EXCEPTION_STATE64_COUNT; state = (void *)&mctxp->mctx_avx64.es; - if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) - goto bad; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) { + goto bad; + } tstate64 = &mctxp->mctx_avx64.ss; @@ -250,17 +285,20 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint ua_sp += stack_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { - ua_sp = tstate64->rsp; + if ((sig_xstate & STATE64_FULL) && tstate64->cs != USER64_CS) { + reset_ss = FALSE; + } + ua_sp = tstate64->rsp; } ua_cr2 = mctxp->mctx_avx64.es.faultvaddr; /* The x86_64 ABI defines a 128-byte red zone. */ ua_sp -= C_64_REDZONE_LEN; - ua_sp -= sizeof (struct user_ucontext64); - ua_uctxp = ua_sp; // someone tramples the first word! + ua_sp -= sizeof(struct user_ucontext64); + ua_uctxp = ua_sp; // someone tramples the first word! - ua_sp -= sizeof (user64_siginfo_t); + ua_sp -= sizeof(user64_siginfo_t); ua_sip = ua_sp; ua_sp -= thread_state64[sig_xstate].mcontext_size; @@ -296,18 +334,21 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint uctx64.uc_stack.ss_sp = ua_fp; uctx64.uc_stack.ss_size = stack_size; - if (oonstack) - uctx64.uc_stack.ss_flags |= SS_ONSTACK; + if (oonstack) { + uctx64.uc_stack.ss_flags |= SS_ONSTACK; + } uctx64.uc_link = 0; uctx64.uc_mcsize = thread_state64[sig_xstate].mcontext_size; uctx64.uc_mcontext64 = ua_mctxp; - - if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof (uctx64))) - goto bad; - if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) - goto bad; + if (copyout((caddr_t)&uctx64, ua_uctxp, sizeof(uctx64))) { + goto bad; + } + + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state64[sig_xstate].mcontext_size)) { + goto bad; + } sinfo64.pad[0] = tstate64->rsp; sinfo64.si_addr = tstate64->rip; @@ -315,14 +356,34 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint tstate64->rip = trampact; tstate64->rsp = ua_fp; tstate64->rflags = get_eflags_exportmask(); + /* - * JOE - might not need to set these + * SETH - need to set these for processes with LDTs */ tstate64->cs = USER64_CS; tstate64->fs = NULL_SEG; - tstate64->gs = USER_CTHREAD; + /* + * Set gs to 0 here to prevent restoration of %gs on return-to-user. If we + * did NOT do that here and %gs was non-zero, we'd blow away gsbase when + * we restore %gs in the kernel exit trampoline. + */ + tstate64->gs = 0; + + if (sig_xstate & STATE64_FULL) { + /* Reset DS, ES, and possibly SS */ + if (reset_ss) { + /* + * Restore %ss if (a) an altstack was used for signal delivery + * or (b) %cs at the time of the signal was the default + * (USER64_CS) + */ + mctxp->mctx_avx64_full.ss.ss = USER64_DS; + } + mctxp->mctx_avx64_full.ss.ds = USER64_DS; + mctxp->mctx_avx64_full.ss.es = 0; + } - /* + /* * Build the argument list for the signal handler. * Handler should call sigreturn to get out of it */ @@ -333,28 +394,31 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint tstate64->r8 = ua_uctxp; tstate64->r9 = token; } else { - x86_thread_state32_t *tstate32; - struct user_ucontext32 uctx32; - struct sigframe32 frame32; + x86_thread_state32_t *tstate32; + struct user_ucontext32 uctx32; + struct sigframe32 frame32; user32_addr_t token; - flavor = x86_THREAD_STATE32; + flavor = x86_THREAD_STATE32; state_count = x86_THREAD_STATE32_COUNT; state = (void *)&mctxp->mctx_avx32.ss; - if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) - goto bad; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) { + goto bad; + } flavor = thread_state32[sig_xstate].flavor; state_count = thread_state32[sig_xstate].state_count; state = (void *)&mctxp->mctx_avx32.fs; - if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) - goto bad; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) { + goto bad; + } flavor = x86_EXCEPTION_STATE32; state_count = x86_EXCEPTION_STATE32_COUNT; state = (void *)&mctxp->mctx_avx32.es; - if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) - goto bad; + if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) { + goto bad; + } tstate32 = &mctxp->mctx_avx32.ss; @@ -366,20 +430,20 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint ua_sp += stack_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { - ua_sp = tstate32->esp; + ua_sp = tstate32->esp; } ua_cr2 = mctxp->mctx_avx32.es.faultvaddr; - ua_sp -= sizeof (struct user_ucontext32); - ua_uctxp = ua_sp; // someone tramples the first word! + ua_sp -= sizeof(struct user_ucontext32); + ua_uctxp = ua_sp; // someone tramples the first word! - ua_sp -= sizeof (user32_siginfo_t); + ua_sp -= sizeof(user32_siginfo_t); ua_sip = ua_sp; ua_sp -= thread_state32[sig_xstate].mcontext_size; ua_mctxp = ua_sp; - ua_sp -= sizeof (struct sigframe32); + ua_sp -= sizeof(struct sigframe32); ua_fp = ua_sp; /* @@ -401,13 +465,13 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx); assert(kr == KERN_SUCCESS); token = CAST_DOWN_EXPLICIT(user32_addr_t, token_uctx) ^ - CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token); + CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token); - /* + /* * Build the argument list for the signal handler. * Handler should call sigreturn to get out of it */ - frame32.retaddr = -1; + frame32.retaddr = -1; frame32.sigstyle = infostyle; frame32.sig = sig; frame32.catcher = CAST_DOWN_EXPLICIT(user32_addr_t, ua_catcher); @@ -415,8 +479,9 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint frame32.uctx = CAST_DOWN_EXPLICIT(user32_addr_t, ua_uctxp); frame32.token = token; - if (copyout((caddr_t)&frame32, ua_fp, sizeof (frame32))) - goto bad; + if (copyout((caddr_t)&frame32, ua_fp, sizeof(frame32))) { + goto bad; + } /* * Build the signal context to be used by sigreturn. @@ -428,152 +493,153 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint uctx32.uc_stack.ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, ua_fp); uctx32.uc_stack.ss_size = stack_size; - if (oonstack) - uctx32.uc_stack.ss_flags |= SS_ONSTACK; + if (oonstack) { + uctx32.uc_stack.ss_flags |= SS_ONSTACK; + } uctx32.uc_link = 0; uctx32.uc_mcsize = thread_state64[sig_xstate].mcontext_size; uctx32.uc_mcontext = CAST_DOWN_EXPLICIT(user32_addr_t, ua_mctxp); - - if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof (uctx32))) - goto bad; - if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) - goto bad; + if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(uctx32))) { + goto bad; + } + + if (copyout((caddr_t)&mctx_store, ua_mctxp, thread_state32[sig_xstate].mcontext_size)) { + goto bad; + } sinfo64.pad[0] = tstate32->esp; sinfo64.si_addr = tstate32->eip; } switch (sig) { - case SIGILL: - switch (ut->uu_code) { - case EXC_I386_INVOP: - sinfo64.si_code = ILL_ILLOPC; - break; - default: - sinfo64.si_code = ILL_NOOP; - } + case SIGILL: + switch (ut->uu_code) { + case EXC_I386_INVOP: + sinfo64.si_code = ILL_ILLOPC; break; - case SIGFPE: + default: + sinfo64.si_code = ILL_NOOP; + } + break; + case SIGFPE: #define FP_IE 0 /* Invalid operation */ #define FP_DE 1 /* Denormalized operand */ #define FP_ZE 2 /* Zero divide */ #define FP_OE 3 /* overflow */ #define FP_UE 4 /* underflow */ #define FP_PE 5 /* precision */ - if (ut->uu_code == EXC_I386_DIV) { - sinfo64.si_code = FPE_INTDIV; - } - else if (ut->uu_code == EXC_I386_INTO) { - sinfo64.si_code = FPE_INTOVF; - } - else if (ut->uu_subcode & (1 << FP_ZE)) { - sinfo64.si_code = FPE_FLTDIV; - } else if (ut->uu_subcode & (1 << FP_OE)) { - sinfo64.si_code = FPE_FLTOVF; - } else if (ut->uu_subcode & (1 << FP_UE)) { - sinfo64.si_code = FPE_FLTUND; - } else if (ut->uu_subcode & (1 << FP_PE)) { - sinfo64.si_code = FPE_FLTRES; - } else if (ut->uu_subcode & (1 << FP_IE)) { - sinfo64.si_code = FPE_FLTINV; - } else { - sinfo64.si_code = FPE_NOOP; - } + if (ut->uu_code == EXC_I386_DIV) { + sinfo64.si_code = FPE_INTDIV; + } else if (ut->uu_code == EXC_I386_INTO) { + sinfo64.si_code = FPE_INTOVF; + } else if (ut->uu_subcode & (1 << FP_ZE)) { + sinfo64.si_code = FPE_FLTDIV; + } else if (ut->uu_subcode & (1 << FP_OE)) { + sinfo64.si_code = FPE_FLTOVF; + } else if (ut->uu_subcode & (1 << FP_UE)) { + sinfo64.si_code = FPE_FLTUND; + } else if (ut->uu_subcode & (1 << FP_PE)) { + sinfo64.si_code = FPE_FLTRES; + } else if (ut->uu_subcode & (1 << FP_IE)) { + sinfo64.si_code = FPE_FLTINV; + } else { + sinfo64.si_code = FPE_NOOP; + } + break; + case SIGBUS: + sinfo64.si_code = BUS_ADRERR; + sinfo64.si_addr = ua_cr2; + break; + case SIGTRAP: + sinfo64.si_code = TRAP_BRKPT; + break; + case SIGSEGV: + sinfo64.si_addr = ua_cr2; + + switch (ut->uu_code) { + case EXC_I386_GPFLT: + /* CR2 is meaningless after GP fault */ + /* XXX namespace clash! */ + sinfo64.si_addr = 0ULL; + sinfo64.si_code = 0; break; - case SIGBUS: - sinfo64.si_code = BUS_ADRERR; - sinfo64.si_addr = ua_cr2; + case KERN_PROTECTION_FAILURE: + sinfo64.si_code = SEGV_ACCERR; break; - case SIGTRAP: - sinfo64.si_code = TRAP_BRKPT; + case KERN_INVALID_ADDRESS: + sinfo64.si_code = SEGV_MAPERR; break; - case SIGSEGV: - sinfo64.si_addr = ua_cr2; - - switch (ut->uu_code) { - case EXC_I386_GPFLT: - /* CR2 is meaningless after GP fault */ - /* XXX namespace clash! */ - sinfo64.si_addr = 0ULL; - sinfo64.si_code = 0; - break; - case KERN_PROTECTION_FAILURE: - sinfo64.si_code = SEGV_ACCERR; - break; - case KERN_INVALID_ADDRESS: - sinfo64.si_code = SEGV_MAPERR; - break; - default: - sinfo64.si_code = FPE_NOOP; - } - break; default: - { - int status_and_exitcode; + sinfo64.si_code = FPE_NOOP; + } + break; + default: + { + int status_and_exitcode; - /* - * All other signals need to fill out a minimum set of - * information for the siginfo structure passed into - * the signal handler, if SA_SIGINFO was specified. - * - * p->si_status actually contains both the status and - * the exit code; we save it off in its own variable - * for later breakdown. - */ - proc_lock(p); - sinfo64.si_pid = p->si_pid; - p->si_pid =0; - status_and_exitcode = p->si_status; - p->si_status = 0; - sinfo64.si_uid = p->si_uid; - p->si_uid =0; - sinfo64.si_code = p->si_code; - p->si_code = 0; - proc_unlock(p); - if (sinfo64.si_code == CLD_EXITED) { - if (WIFEXITED(status_and_exitcode)) - sinfo64.si_code = CLD_EXITED; - else if (WIFSIGNALED(status_and_exitcode)) { - if (WCOREDUMP(status_and_exitcode)) { - sinfo64.si_code = CLD_DUMPED; - status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); - } else { - sinfo64.si_code = CLD_KILLED; - status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); - } + /* + * All other signals need to fill out a minimum set of + * information for the siginfo structure passed into + * the signal handler, if SA_SIGINFO was specified. + * + * p->si_status actually contains both the status and + * the exit code; we save it off in its own variable + * for later breakdown. + */ + proc_lock(p); + sinfo64.si_pid = p->si_pid; + p->si_pid = 0; + status_and_exitcode = p->si_status; + p->si_status = 0; + sinfo64.si_uid = p->si_uid; + p->si_uid = 0; + sinfo64.si_code = p->si_code; + p->si_code = 0; + proc_unlock(p); + if (sinfo64.si_code == CLD_EXITED) { + if (WIFEXITED(status_and_exitcode)) { + sinfo64.si_code = CLD_EXITED; + } else if (WIFSIGNALED(status_and_exitcode)) { + if (WCOREDUMP(status_and_exitcode)) { + sinfo64.si_code = CLD_DUMPED; + status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); + } else { + sinfo64.si_code = CLD_KILLED; + status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } } - /* - * The recorded status contains the exit code and the - * signal information, but the information to be passed - * in the siginfo to the handler is supposed to only - * contain the status, so we have to shift it out. - */ - sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); - p->p_xhighbits = 0; - break; } + /* + * The recorded status contains the exit code and the + * signal information, but the information to be passed + * in the siginfo to the handler is supposed to only + * contain the status, so we have to shift it out. + */ + sinfo64.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); + p->p_xhighbits = 0; + break; + } } if (proc_is64bit(p)) { user64_siginfo_t sinfo64_user64; - + bzero((caddr_t)&sinfo64_user64, sizeof(sinfo64_user64)); - - siginfo_user_to_user64_x86(&sinfo64,&sinfo64_user64); + + siginfo_user_to_user64_x86(&sinfo64, &sinfo64_user64); #if CONFIG_DTRACE - bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); - ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo; - ut->t_dtrace_siginfo.si_code = sinfo64.si_code; - ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid; - ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid; - ut->t_dtrace_siginfo.si_status = sinfo64.si_status; + ut->t_dtrace_siginfo.si_signo = sinfo64.si_signo; + ut->t_dtrace_siginfo.si_code = sinfo64.si_code; + ut->t_dtrace_siginfo.si_pid = sinfo64.si_pid; + ut->t_dtrace_siginfo.si_uid = sinfo64.si_uid; + ut->t_dtrace_siginfo.si_status = sinfo64.si_status; /* XXX truncates faulting address to void * on K32 */ - ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr); + ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo64.si_addr); /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ switch (sig) { @@ -586,32 +652,38 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint /* XXX truncates catcher address to uintptr_t */ DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), - void (*)(void), CAST_DOWN(sig_t, ua_catcher)); + void (*)(void), CAST_DOWN(sig_t, ua_catcher)); #endif /* CONFIG_DTRACE */ - if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof (sinfo64_user64))) + if (copyout((caddr_t)&sinfo64_user64, ua_sip, sizeof(sinfo64_user64))) { goto bad; + } - flavor = x86_THREAD_STATE64; - state_count = x86_THREAD_STATE64_COUNT; + if (sig_xstate & STATE64_FULL) { + flavor = x86_THREAD_FULL_STATE64; + state_count = x86_THREAD_FULL_STATE64_COUNT; + } else { + flavor = x86_THREAD_STATE64; + state_count = x86_THREAD_STATE64_COUNT; + } state = (void *)&mctxp->mctx_avx64.ss; } else { - x86_thread_state32_t *tstate32; + x86_thread_state32_t *tstate32; user32_siginfo_t sinfo32; bzero((caddr_t)&sinfo32, sizeof(sinfo32)); - siginfo_user_to_user32_x86(&sinfo64,&sinfo32); + siginfo_user_to_user32_x86(&sinfo64, &sinfo32); #if CONFIG_DTRACE - bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); + bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); - ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo; - ut->t_dtrace_siginfo.si_code = sinfo32.si_code; - ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid; - ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid; - ut->t_dtrace_siginfo.si_status = sinfo32.si_status; - ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr); + ut->t_dtrace_siginfo.si_signo = sinfo32.si_signo; + ut->t_dtrace_siginfo.si_code = sinfo32.si_code; + ut->t_dtrace_siginfo.si_pid = sinfo32.si_pid; + ut->t_dtrace_siginfo.si_uid = sinfo32.si_uid; + ut->t_dtrace_siginfo.si_status = sinfo32.si_status; + ut->t_dtrace_siginfo.si_addr = CAST_DOWN(void *, sinfo32.si_addr); /* Fire DTrace proc:::fault probe when signal is generated by hardware. */ switch (sig) { @@ -623,12 +695,13 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint } DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &(ut->t_dtrace_siginfo), - void (*)(void), CAST_DOWN(sig_t, ua_catcher)); + void (*)(void), CAST_DOWN(sig_t, ua_catcher)); #endif /* CONFIG_DTRACE */ - if (copyout((caddr_t)&sinfo32, ua_sip, sizeof (sinfo32))) + if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(sinfo32))) { goto bad; - + } + tstate32 = &mctxp->mctx_avx32.ss; tstate32->eip = CAST_DOWN_EXPLICIT(user32_addr_t, trampact); @@ -647,8 +720,9 @@ sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint state_count = x86_THREAD_STATE32_COUNT; state = (void *)tstate32; } - if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) - goto bad; + if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) { + goto bad; + } ml_fp_setvalid(FALSE); /* Tell the PAL layer about the signal */ @@ -688,28 +762,28 @@ int sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) { union { - struct mcontext_avx32 mctx_avx32; - struct mcontext_avx64 mctx_avx64; -#if !defined(RC_HIDE_XNU_J137) - struct mcontext_avx512_32 mctx_avx512_32; - struct mcontext_avx512_64 mctx_avx512_64; -#endif + struct mcontext_avx32 mctx_avx32; + struct mcontext_avx64 mctx_avx64; + struct mcontext_avx64_full mctx_avx64_full; + struct mcontext_avx512_32 mctx_avx512_32; + struct mcontext_avx512_64 mctx_avx512_64; + struct mcontext_avx512_64_full mctx_avx512_64_full; } mctx_store, *mctxp = &mctx_store; thread_t thread = current_thread(); struct uthread * ut; struct sigacts *ps = p->p_sigacts; - int error; - int onstack = 0; + int error; + int onstack = 0; mach_msg_type_number_t ts_count; unsigned int ts_flavor; - void * ts; + void * ts; mach_msg_type_number_t fs_count; unsigned int fs_flavor; - void * fs; - int rval = EJUSTRETURN; - xstate_t sig_xstate; + void * fs; + int rval = EJUSTRETURN; + xstate_t sig_xstate; uint32_t sigreturn_validation; user_addr_t token_uctx; kern_return_t kr; @@ -722,10 +796,10 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) */ if ((unsigned int)uap->infostyle == UC_SET_ALT_STACK) { ut->uu_sigstk.ss_flags |= SA_ONSTACK; - return (0); + return 0; } else if ((unsigned int)uap->infostyle == UC_RESET_ALT_STACK) { ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; - return (0); + return 0; } bzero(mctxp, sizeof(*mctxp)); @@ -733,56 +807,69 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) sig_xstate = current_xstate(); sigreturn_validation = atomic_load_explicit( - &ps->ps_sigreturn_validation, memory_order_relaxed); + &ps->ps_sigreturn_validation, memory_order_relaxed); token_uctx = uap->uctx; kr = machine_thread_siguctx_pointer_convert_to_user(thread, &token_uctx); assert(kr == KERN_SUCCESS); if (proc_is64bit(p)) { - struct user_ucontext64 uctx64; + struct user_ucontext64 uctx64; user64_addr_t token; + int task_has_ldt = thread_task_has_ldt(thread); - if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof (uctx64)))) - return(error); - - if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) - return(error); + if ((error = copyin(uap->uctx, (void *)&uctx64, sizeof(uctx64)))) { + return error; + } onstack = uctx64.uc_onstack & 01; ut->uu_sigmask = uctx64.uc_sigmask & ~sigcantmask; - ts_flavor = x86_THREAD_STATE64; - ts_count = x86_THREAD_STATE64_COUNT; + if (task_has_ldt) { + ts_flavor = x86_THREAD_FULL_STATE64; + ts_count = x86_THREAD_FULL_STATE64_COUNT; + fs = (void *)&mctxp->mctx_avx64_full.fs; + sig_xstate |= STATE64_FULL; + } else { + ts_flavor = x86_THREAD_STATE64; + ts_count = x86_THREAD_STATE64_COUNT; + fs = (void *)&mctxp->mctx_avx64.fs; + } + + if ((error = copyin(uctx64.uc_mcontext64, (void *)mctxp, thread_state64[sig_xstate].mcontext_size))) { + return error; + } + ts = (void *)&mctxp->mctx_avx64.ss; fs_flavor = thread_state64[sig_xstate].flavor; fs_count = thread_state64[sig_xstate].state_count; - fs = (void *)&mctxp->mctx_avx64.fs; token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; if ((user64_addr_t)uap->token != token) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n", - p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); + p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); #endif /* DEVELOPMENT || DEBUG */ if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { rval = EINVAL; } } - } else { - struct user_ucontext32 uctx32; + } else { + struct user_ucontext32 uctx32; user32_addr_t token; - if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof (uctx32)))) - return(error); + if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof(uctx32)))) { + return error; + } - if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) - return(error); + if ((error = copyin(CAST_USER_ADDR_T(uctx32.uc_mcontext), (void *)mctxp, thread_state32[sig_xstate].mcontext_size))) { + return error; + } onstack = uctx32.uc_onstack & 01; ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask; - ts_flavor = x86_THREAD_STATE32; + ts_flavor = x86_THREAD_STATE32; ts_count = x86_THREAD_STATE32_COUNT; ts = (void *)&mctxp->mctx_avx32.ss; @@ -791,11 +878,11 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) fs = (void *)&mctxp->mctx_avx32.fs; token = CAST_DOWN_EXPLICIT(user32_addr_t, uap->uctx) ^ - CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token); + CAST_DOWN_EXPLICIT(user32_addr_t, ps->ps_sigreturn_token); if ((user32_addr_t)uap->token != token) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n", - p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); + p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); #endif /* DEVELOPMENT || DEBUG */ if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { rval = EINVAL; @@ -803,13 +890,15 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) } } - if (onstack) + if (onstack) { ut->uu_sigstk.ss_flags |= SA_ONSTACK; - else + } else { ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; + } - if (ut->uu_siglist & ~ut->uu_sigmask) + if (ut->uu_siglist & ~ut->uu_sigmask) { signal_setast(thread); + } if (rval == EINVAL) { goto error_ret; @@ -823,21 +912,20 @@ sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) rval = EINVAL; #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn thread_setstatus error %d\n", - p->p_comm, p->p_pid, rval); + p->p_comm, p->p_pid, rval); #endif /* DEVELOPMENT || DEBUG */ goto error_ret; } - + ml_fp_setvalid(TRUE); - if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) { + if (thread_setstatus(thread, fs_flavor, fs, fs_count) != KERN_SUCCESS) { rval = EINVAL; #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn thread_setstatus error %d\n", - p->p_comm, p->p_pid, rval); + p->p_comm, p->p_pid, rval); #endif /* DEVELOPMENT || DEBUG */ goto error_ret; - } error_ret: return rval; @@ -850,34 +938,33 @@ error_ret: */ int machine_exception(int exception, - mach_exception_code_t code, - __unused mach_exception_subcode_t subcode) + mach_exception_code_t code, + __unused mach_exception_subcode_t subcode) { - switch(exception) { - case EXC_BAD_ACCESS: - /* Map GP fault to SIGSEGV, otherwise defer to caller */ - if (code == EXC_I386_GPFLT) { - return SIGSEGV; - } - break; + switch (exception) { + case EXC_BAD_ACCESS: + /* Map GP fault to SIGSEGV, otherwise defer to caller */ + if (code == EXC_I386_GPFLT) { + return SIGSEGV; + } + break; - case EXC_BAD_INSTRUCTION: - return SIGILL; + case EXC_BAD_INSTRUCTION: + return SIGILL; - case EXC_ARITHMETIC: - return SIGFPE; + case EXC_ARITHMETIC: + return SIGFPE; - case EXC_SOFTWARE: - if (code == EXC_I386_BOUND) { - /* - * Map #BR, the Bound Range Exceeded exception, to - * SIGTRAP. - */ - return SIGTRAP; - } - break; + case EXC_SOFTWARE: + if (code == EXC_I386_BOUND) { + /* + * Map #BR, the Bound Range Exceeded exception, to + * SIGTRAP. + */ + return SIGTRAP; + } + break; } return 0; } - diff --git a/bsd/dev/kmreg_com.h b/bsd/dev/kmreg_com.h index 032d74acc..6905f8b86 100644 --- a/bsd/dev/kmreg_com.h +++ b/bsd/dev/kmreg_com.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,56 +22,56 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. +/* Copyright (c) 1992 NeXT Computer, Inc. All rights reserved. * * kmreg_com.h - machine independent km ioctl interface. * * HISTORY - * 16-Jan-92 Doug Mitchell at NeXT - * Created. + * 16-Jan-92 Doug Mitchell at NeXT + * Created. */ - -#ifdef KERNEL_PRIVATE -#ifndef _BSD_DEV_KMREG_COM_ +#ifdef KERNEL_PRIVATE + +#ifndef _BSD_DEV_KMREG_COM_ #define _BSD_DEV_KMREG_COM_ #include -#include +#include -/* - * Colors for fg, bg in struct km_drawrect +/* + * Colors for fg, bg in struct km_drawrect */ -#define KM_COLOR_WHITE 0 -#define KM_COLOR_LTGRAY 1 -#define KM_COLOR_DKGRAY 2 -#define KM_COLOR_BLACK 3 +#define KM_COLOR_WHITE 0 +#define KM_COLOR_LTGRAY 1 +#define KM_COLOR_DKGRAY 2 +#define KM_COLOR_BLACK 3 /* * The data to be rendered is treated as a pixmap of 2 bit pixels. - * The most significant bits of each byte is the leftmost pixel in that + * The most significant bits of each byte is the leftmost pixel in that * byte. Pixel values are assigned as described above. * * Each scanline should start on a 4 pixel boundry within the bitmap, * and should be a multiple of 4 pixels in length. * - * For the KMIOCERASERECT call, 'data' should be an integer set to the + * For the KMIOCERASERECT call, 'data' should be an integer set to the * color to be used for the clear operation (data.fill). - * A rect at (x,y) measuring 'width' by 'height' will be cleared to + * A rect at (x,y) measuring 'width' by 'height' will be cleared to * the specified value. */ struct km_drawrect { - unsigned short x; /* Upper left corner of rect to be imaged. */ + unsigned short x; /* Upper left corner of rect to be imaged. */ unsigned short y; - unsigned short width; /* Width and height of rect to be imaged, - * in pixels */ + unsigned short width; /* Width and height of rect to be imaged, + * in pixels */ unsigned short height; union { - void *bits; /* Pointer to 2 bit per pixel raster data. */ - int fill; /* Const color for erase operation. */ + void *bits; /* Pointer to 2 bit per pixel raster data. */ + int fill; /* Const color for erase operation. */ } data; }; @@ -79,32 +79,32 @@ struct km_drawrect { * Argument to KMIOCANIMCTL. */ typedef enum { - KM_ANIM_STOP, /* stop permanently */ - KM_ANIM_SUSPEND, /* suspend */ - KM_ANIM_RESUME /* resume */ + KM_ANIM_STOP, /* stop permanently */ + KM_ANIM_SUSPEND, /* suspend */ + KM_ANIM_RESUME /* resume */ } km_anim_ctl_t; -#define KMIOCPOPUP _IO('k', 1) /* popup new window */ -#define KMIOCRESTORE _IO('k', 2) /* restore background */ -#define KMIOCDUMPLOG _IO('k', 3) /* dump message log */ -#define KMIOCDRAWRECT _IOW('k', 5, struct km_drawrect) /* Draw rect from - * bits */ -#define KMIOCERASERECT _IOW('k', 6, struct km_drawrect) /* Erase a rect */ +#define KMIOCPOPUP _IO('k', 1) /* popup new window */ +#define KMIOCRESTORE _IO('k', 2) /* restore background */ +#define KMIOCDUMPLOG _IO('k', 3) /* dump message log */ +#define KMIOCDRAWRECT _IOW('k', 5, struct km_drawrect) /* Draw rect from + * bits */ +#define KMIOCERASERECT _IOW('k', 6, struct km_drawrect) /* Erase a rect */ -#ifdef KERNEL_PRIVATE -#define KMIOCDISABLCONS _IO('k', 8) /* disable console messages */ -#endif /* KERNEL_PRIVATE */ +#ifdef KERNEL_PRIVATE +#define KMIOCDISABLCONS _IO('k', 8) /* disable console messages */ +#endif /* KERNEL_PRIVATE */ -#define KMIOCANIMCTL _IOW('k',9, km_anim_ctl_t) - /* stop animation */ -#define KMIOCSTATUS _IOR('k',10, int) /* get status bits */ -#define KMIOCSIZE _IOR('k',11, struct winsize) /* get screen size */ +#define KMIOCANIMCTL _IOW('k',9, km_anim_ctl_t) +/* stop animation */ +#define KMIOCSTATUS _IOR('k',10, int) /* get status bits */ +#define KMIOCSIZE _IOR('k',11, struct winsize) /* get screen size */ /* * Status bits returned via KMIOCSTATUS. */ -#define KMS_SEE_MSGS 0x00000001 +#define KMS_SEE_MSGS 0x00000001 -#endif /* _BSD_DEV_KMREG_COM_ */ +#endif /* _BSD_DEV_KMREG_COM_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/dev/ldd.h b/bsd/dev/ldd.h index ff8c020c4..e8b925b61 100644 --- a/bsd/dev/ldd.h +++ b/bsd/dev/ldd.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* @(#)ldd.h 2.0 03/20/90 (c) 1990 NeXT +/* @(#)ldd.h 2.0 03/20/90 (c) 1990 NeXT * * ldd.h - kernel prototypes used by loadable device drivers * @@ -34,7 +34,7 @@ * Split out public interface. * * 16-Aug-90 Gregg Kellogg (gk) at NeXT - * Removed a lot of stuff that's defined in other header files. + * Removed a lot of stuff that's defined in other header files. * Eventually this file should either go away or contain only imports of * other files. * @@ -43,7 +43,7 @@ * */ -#ifndef _BSD_DEV_LDD_PRIV_ +#ifndef _BSD_DEV_LDD_PRIV_ #define _BSD_DEV_LDD_PRIV_ #include @@ -51,5 +51,4 @@ typedef int (*PFI)(); -#endif /* _BSD_DEV_LDD_PRIV_ */ - +#endif /* _BSD_DEV_LDD_PRIV_ */ diff --git a/bsd/dev/mem.c b/bsd/dev/mem.c index 2b133e64e..758c358f6 100644 --- a/bsd/dev/mem.c +++ b/bsd/dev/mem.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -82,9 +82,9 @@ #include #include -#include /* for kernel_map */ +#include /* for kernel_map */ -#include /* for PE_parse_boot_argn */ +#include /* for PE_parse_boot_argn */ boolean_t iskmemdev(dev_t dev); @@ -95,7 +95,7 @@ boolean_t dev_kmem_mask_top_bit; void dev_kmem_init(void); #if defined(__x86_64__) -extern addr64_t kvtophys(vm_offset_t va); +extern addr64_t kvtophys(vm_offset_t va); #else #error need kvtophys prototype #endif @@ -113,28 +113,29 @@ int mmrw(dev_t dev, struct uio *uio, enum uio_rw rw); int mmread(dev_t dev, struct uio *uio) { - return (mmrw(dev, uio, UIO_READ)); + return mmrw(dev, uio, UIO_READ); } int mmwrite(dev_t dev, struct uio *uio) { - return (mmrw(dev, uio, UIO_WRITE)); + return mmrw(dev, uio, UIO_WRITE); } int -mmioctl(dev_t dev, u_long cmd, __unused caddr_t data, - __unused int flag, __unused struct proc *p) +mmioctl(dev_t dev, u_long cmd, __unused caddr_t data, + __unused int flag, __unused struct proc *p) { int minnum = minor(dev); if (0 == minnum || 1 == minnum) { /* /dev/mem and /dev/kmem */ #if CONFIG_DEV_KMEM - if (!dev_kmem_enabled) - return (ENODEV); + if (!dev_kmem_enabled) { + return ENODEV; + } #else - return (ENODEV); + return ENODEV; #endif } @@ -147,7 +148,7 @@ mmioctl(dev_t dev, u_long cmd, __unused caddr_t data, return ENODEV; } - return (0); + return 0; } int @@ -160,18 +161,18 @@ mmrw(dev_t dev, struct uio *uio, enum uio_rw rw) uio_update(uio, 0); switch (minor(dev)) { - /* minor device 0 is physical memory */ case 0: - return (ENODEV); + return ENODEV; /* minor device 1 is kernel memory */ case 1: #if !CONFIG_DEV_KMEM - return (ENODEV); + return ENODEV; #else /* CONFIG_DEV_KMEM */ - if (!dev_kmem_enabled) - return (ENODEV); + if (!dev_kmem_enabled) { + return ENODEV; + } vm_address_t kaddr = (vm_address_t)uio->uio_offset; if (dev_kmem_mask_top_bit) { @@ -184,7 +185,7 @@ mmrw(dev_t dev, struct uio *uio, enum uio_rw rw) const vm_address_t top_bit = (~((vm_address_t)0)) ^ (~((vm_address_t)0) >> 1UL); if (kaddr & top_bit) { /* top bit should not be set already */ - return (EFAULT); + return EFAULT; } kaddr |= top_bit; } @@ -193,67 +194,74 @@ mmrw(dev_t dev, struct uio *uio, enum uio_rw rw) /* Do some sanity checking */ if ((kaddr > (VM_MAX_KERNEL_ADDRESS - c)) || - (kaddr <= VM_MIN_KERNEL_AND_KEXT_ADDRESS)) + (kaddr <= VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { goto fault; - if (!kernacc(kaddr, c)) + } + if (!kernacc(kaddr, c)) { goto fault; + } error = uiomove((const char *)(uintptr_t)kaddr, - (int)c, uio); - if (error) + (int)c, uio); + if (error) { break; + } continue; /* Keep going until UIO is done */ #endif /* CONFIG_DEV_KMEM */ /* minor device 2 is EOF/RATHOLE */ case 2: - if (rw == UIO_READ) - return (0); + if (rw == UIO_READ) { + return 0; + } c = uio_curriovlen(uio); error = 0; /* Always succeeds, always consumes all input */ break; case 3: - if(devzerobuf == NULL) { - MALLOC(devzerobuf, caddr_t,PAGE_SIZE, M_TEMP, M_WAITOK); + if (devzerobuf == NULL) { + MALLOC(devzerobuf, caddr_t, PAGE_SIZE, M_TEMP, M_WAITOK); bzero(devzerobuf, PAGE_SIZE); } - if(uio->uio_rw == UIO_WRITE) { + if (uio->uio_rw == UIO_WRITE) { c = uio_curriovlen(uio); error = 0; /* Always succeeds, always consumes all input */ break; } - c = min(uio_curriovlen(uio), PAGE_SIZE); + c = min(uio_curriovlen(uio), PAGE_SIZE); error = uiomove(devzerobuf, (int)c, uio); - if (error) + if (error) { break; + } continue; /* Keep going until UIO is done */ default: - return (ENODEV); + return ENODEV; } - - if (error) + + if (error) { break; + } uio_update(uio, c); } - return (error); + return error; #if CONFIG_DEV_KMEM fault: - return (EFAULT); + return EFAULT; #endif } #if CONFIG_DEV_KMEM -void dev_kmem_init(void) +void +dev_kmem_init(void) { uint32_t kmem; if (PE_i_can_has_debugger(NULL) && - PE_parse_boot_argn("kmem", &kmem, sizeof (kmem))) { + PE_parse_boot_argn("kmem", &kmem, sizeof(kmem))) { if (kmem & 0x1) { dev_kmem_enabled = TRUE; } @@ -265,23 +273,24 @@ void dev_kmem_init(void) boolean_t kernacc( - off_t start, - size_t len -) + off_t start, + size_t len + ) { off_t base; off_t end; - + base = trunc_page(start); end = start + len; - + while (base < end) { - if(kvtophys((vm_offset_t)base) == 0ULL) - return(FALSE); + if (kvtophys((vm_offset_t)base) == 0ULL) { + return FALSE; + } base += page_size; - } + } - return (TRUE); + return TRUE; } #endif /* CONFIG_DEV_KMEM */ @@ -289,7 +298,8 @@ kernacc( /* * Returns true if dev is /dev/mem or /dev/kmem. */ -boolean_t iskmemdev(dev_t dev) +boolean_t +iskmemdev(dev_t dev) { - return (major(dev) == 3 && minor(dev) < 2); + return major(dev) == 3 && minor(dev) < 2; } diff --git a/bsd/dev/memdev.c b/bsd/dev/memdev.c index 434fcbdf7..ffac54d04 100644 --- a/bsd/dev/memdev.c +++ b/bsd/dev/memdev.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -71,7 +71,7 @@ /* * RAM disk driver. * - * Block interface to a ramdisk. + * Block interface to a ramdisk. * */ @@ -99,25 +99,25 @@ #include -void mdevinit(int the_cnt); +void mdevinit(int the_cnt); -static open_close_fcn_t mdevopen; -static open_close_fcn_t mdevclose; -static psize_fcn_t mdevsize; -static strategy_fcn_t mdevstrategy; -static int mdevbioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); -static int mdevcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); -static int mdevrw(dev_t dev, struct uio *uio, int ioflag); +static open_close_fcn_t mdevopen; +static open_close_fcn_t mdevclose; +static psize_fcn_t mdevsize; +static strategy_fcn_t mdevstrategy; +static int mdevbioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); +static int mdevcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); +static int mdevrw(dev_t dev, struct uio *uio, int ioflag); #ifdef CONFIG_MEMDEV_INSECURE -static char * nonspace(char *pos, char *end); -static char * getspace(char *pos, char *end); -static char * cvtnum(char *pos, char *end, uint64_t *num); +static char * nonspace(char *pos, char *end); +static char * getspace(char *pos, char *end); +static char * cvtnum(char *pos, char *end, uint64_t *num); #endif /* CONFIG_MEMDEV_INSECURE */ -extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes); -extern void mapping_set_mod(ppnum_t pn); -extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); +extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes); +extern void mapping_set_mod(ppnum_t pn); +extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); /* * Maximal number of memory devices. @@ -131,203 +131,211 @@ extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); */ static struct bdevsw mdevbdevsw = { - /* open */ mdevopen, - /* close */ mdevclose, - /* strategy */ mdevstrategy, - /* ioctl */ mdevbioctl, - /* dump */ eno_dump, - /* psize */ mdevsize, - /* flags */ D_DISK, + /* open */ mdevopen, + /* close */ mdevclose, + /* strategy */ mdevstrategy, + /* ioctl */ mdevbioctl, + /* dump */ eno_dump, + /* psize */ mdevsize, + /* flags */ D_DISK, }; static struct cdevsw mdevcdevsw = { - /* open */ mdevopen, - /* close */ mdevclose, - /* read */ mdevrw, - /* write */ mdevrw, - /* ioctl */ mdevcioctl, - /* stop */ eno_stop, - /* reset */ eno_reset, - /* ttys */ NULL, - /* select */ eno_select, - /* mmap */ eno_mmap, - /* strategy */ eno_strat, - /* getc */ eno_getc, - /* putc */ eno_putc, - /* flags */ D_DISK, + /* open */ mdevopen, + /* close */ mdevclose, + /* read */ mdevrw, + /* write */ mdevrw, + /* ioctl */ mdevcioctl, + /* stop */ eno_stop, + /* reset */ eno_reset, + /* ttys */ NULL, + /* select */ eno_select, + /* mmap */ eno_mmap, + /* strategy */ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* flags */ D_DISK, }; struct mdev { - uint64_t mdBase; /* file size in bytes */ - uint32_t mdSize; /* file size in bytes */ - int mdFlags; /* flags */ - int mdSecsize; /* sector size */ - int mdBDev; /* Block device number */ - int mdCDev; /* Character device number */ - void * mdbdevb; - void * mdcdevb; + uint64_t mdBase; /* file size in bytes */ + uint32_t mdSize; /* file size in bytes */ + int mdFlags; /* flags */ + int mdSecsize; /* sector size */ + int mdBDev; /* Block device number */ + int mdCDev; /* Character device number */ + void * mdbdevb; + void * mdcdevb; } mdev[NB_MAX_MDEVICES]; /* mdFlags */ -#define mdInited 0x01 /* This device defined */ -#define mdRO 0x02 /* This device is read-only */ -#define mdPhys 0x04 /* This device is in physical memory */ +#define mdInited 0x01 /* This device defined */ +#define mdRO 0x02 /* This device is read-only */ +#define mdPhys 0x04 /* This device is in physical memory */ int mdevBMajor = -1; int mdevCMajor = -1; -static int mdevioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, int is_char); -dev_t mdevadd(int devid, uint64_t base, unsigned int size, int phys); -dev_t mdevlookup(int devid); -void mdevremoveall(void); -int mdevgetrange(int devid, uint64_t *base, uint64_t *size); +static int mdevioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p, int is_char); +dev_t mdevadd(int devid, uint64_t base, unsigned int size, int phys); +dev_t mdevlookup(int devid); +void mdevremoveall(void); +int mdevgetrange(int devid, uint64_t *base, uint64_t *size); -static int mdevclose(__unused dev_t dev, __unused int flags, - __unused int devtype, __unused struct proc *p) { - - return (0); +static int +mdevclose(__unused dev_t dev, __unused int flags, + __unused int devtype, __unused struct proc *p) +{ + return 0; } -static int mdevopen(dev_t dev, int flags, __unused int devtype, __unused struct proc *p) { - +static int +mdevopen(dev_t dev, int flags, __unused int devtype, __unused struct proc *p) +{ int devid; - devid = minor(dev); /* Get minor device number */ - - if (devid >= NB_MAX_MDEVICES) return (ENXIO); /* Not valid */ + devid = minor(dev); /* Get minor device number */ - if ((flags & FWRITE) && (mdev[devid].mdFlags & mdRO)) return (EACCES); /* Currently mounted RO */ - - return(0); + if (devid >= NB_MAX_MDEVICES) { + return ENXIO; /* Not valid */ + } + if ((flags & FWRITE) && (mdev[devid].mdFlags & mdRO)) { + return EACCES; /* Currently mounted RO */ + } + return 0; } -static int mdevrw(dev_t dev, struct uio *uio, __unused int ioflag) { - int status; - addr64_t mdata; - int devid; - enum uio_seg saveflag; +static int +mdevrw(dev_t dev, struct uio *uio, __unused int ioflag) +{ + int status; + addr64_t mdata; + int devid; + enum uio_seg saveflag; - devid = minor(dev); /* Get minor device number */ + devid = minor(dev); /* Get minor device number */ - if (devid >= NB_MAX_MDEVICES) return (ENXIO); /* Not valid */ - if (!(mdev[devid].mdFlags & mdInited)) return (ENXIO); /* Have we actually been defined yet? */ + if (devid >= NB_MAX_MDEVICES) { + return ENXIO; /* Not valid */ + } + if (!(mdev[devid].mdFlags & mdInited)) { + return ENXIO; /* Have we actually been defined yet? */ + } + mdata = ((addr64_t)mdev[devid].mdBase << 12) + uio->uio_offset; /* Point to the area in "file" */ - mdata = ((addr64_t)mdev[devid].mdBase << 12) + uio->uio_offset; /* Point to the area in "file" */ - - saveflag = uio->uio_segflg; /* Remember what the request is */ + saveflag = uio->uio_segflg; /* Remember what the request is */ #if LP64_DEBUG if (UIO_IS_USER_SPACE(uio) == 0 && UIO_IS_SYS_SPACE(uio) == 0) { - panic("mdevrw - invalid uio_segflg\n"); + panic("mdevrw - invalid uio_segflg\n"); } #endif /* LP64_DEBUG */ /* Make sure we are moving from physical ram if physical device */ if (mdev[devid].mdFlags & mdPhys) { - if (uio->uio_segflg == UIO_USERSPACE64) - uio->uio_segflg = UIO_PHYS_USERSPACE64; - else if (uio->uio_segflg == UIO_USERSPACE32) - uio->uio_segflg = UIO_PHYS_USERSPACE32; - else - uio->uio_segflg = UIO_PHYS_USERSPACE; + if (uio->uio_segflg == UIO_USERSPACE64) { + uio->uio_segflg = UIO_PHYS_USERSPACE64; + } else if (uio->uio_segflg == UIO_USERSPACE32) { + uio->uio_segflg = UIO_PHYS_USERSPACE32; + } else { + uio->uio_segflg = UIO_PHYS_USERSPACE; + } } - status = uiomove64(mdata, uio_resid(uio), uio); /* Move the data */ - uio->uio_segflg = saveflag; /* Restore the flag */ + status = uiomove64(mdata, uio_resid(uio), uio); /* Move the data */ + uio->uio_segflg = saveflag; /* Restore the flag */ - return (status); + return status; } -static void mdevstrategy(struct buf *bp) { +static void +mdevstrategy(struct buf *bp) +{ unsigned int left, lop, csize; vm_offset_t vaddr, blkoff; int devid; addr64_t paddr, fvaddr; ppnum_t pp; - devid = minor(buf_device(bp)); /* Get minor device number */ + devid = minor(buf_device(bp)); /* Get minor device number */ - if ((mdev[devid].mdFlags & mdInited) == 0) { /* Have we actually been defined yet? */ - buf_seterror(bp, ENXIO); + if ((mdev[devid].mdFlags & mdInited) == 0) { /* Have we actually been defined yet? */ + buf_seterror(bp, ENXIO); buf_biodone(bp); return; } - buf_setresid(bp, buf_count(bp)); /* Set byte count */ - - blkoff = buf_blkno(bp) * mdev[devid].mdSecsize; /* Get offset into file */ + buf_setresid(bp, buf_count(bp)); /* Set byte count */ + + blkoff = buf_blkno(bp) * mdev[devid].mdSecsize; /* Get offset into file */ /* * Note that reading past end is an error, but reading at end is an EOF. For these * we just return with resid == count. */ - if (blkoff >= (mdev[devid].mdSize << 12)) { /* Are they trying to read/write at/after end? */ - if(blkoff != (mdev[devid].mdSize << 12)) { /* Are we trying to read after EOF? */ - buf_seterror(bp, EINVAL); /* Yeah, this is an error */ + if (blkoff >= (mdev[devid].mdSize << 12)) { /* Are they trying to read/write at/after end? */ + if (blkoff != (mdev[devid].mdSize << 12)) { /* Are we trying to read after EOF? */ + buf_seterror(bp, EINVAL); /* Yeah, this is an error */ } - buf_biodone(bp); /* Return */ + buf_biodone(bp); /* Return */ return; } - if ((blkoff + buf_count(bp)) > (mdev[devid].mdSize << 12)) { /* Will this read go past end? */ - buf_setcount(bp, ((mdev[devid].mdSize << 12) - blkoff)); /* Yes, trim to max */ + if ((blkoff + buf_count(bp)) > (mdev[devid].mdSize << 12)) { /* Will this read go past end? */ + buf_setcount(bp, ((mdev[devid].mdSize << 12) - blkoff)); /* Yes, trim to max */ } /* * make sure the buffer's data area is * accessible */ - if (buf_map(bp, (caddr_t *)&vaddr)) - panic("ramstrategy: buf_map failed\n"); + if (buf_map(bp, (caddr_t *)&vaddr)) { + panic("ramstrategy: buf_map failed\n"); + } + + fvaddr = (mdev[devid].mdBase << 12) + blkoff; /* Point to offset into ram disk */ - fvaddr = (mdev[devid].mdBase << 12) + blkoff; /* Point to offset into ram disk */ - - if (buf_flags(bp) & B_READ) { /* Is this a read? */ - if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ + if (buf_flags(bp) & B_READ) { /* Is this a read? */ + if (!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ bcopy((void *)((uintptr_t)fvaddr), - (void *)vaddr, (size_t)buf_count(bp)); /* This is virtual, just get the data */ - } - else { - left = buf_count(bp); /* Init the amount left to copy */ - while(left) { /* Go until it is all copied */ - - lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ - csize = min(lop, left); /* Don't move more than we need to */ - - pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr)); /* Get the sink physical address */ - if(!pp) { /* Not found, what gives? */ + (void *)vaddr, (size_t)buf_count(bp)); /* This is virtual, just get the data */ + } else { + left = buf_count(bp); /* Init the amount left to copy */ + while (left) { /* Go until it is all copied */ + lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ + csize = min(lop, left); /* Don't move more than we need to */ + + pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr)); /* Get the sink physical address */ + if (!pp) { /* Not found, what gives? */ panic("mdevstrategy: sink address %016llX not mapped\n", (addr64_t)((uintptr_t)vaddr)); } - paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ - bcopy_phys(fvaddr, paddr, csize); /* Copy this on in */ - mapping_set_mod(paddr >> 12); /* Make sure we know that it is modified */ - - left = left - csize; /* Calculate what is left */ - vaddr = vaddr + csize; /* Move to next sink address */ - fvaddr = fvaddr + csize; /* Bump to next physical address */ + paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ + bcopy_phys(fvaddr, paddr, csize); /* Copy this on in */ + mapping_set_mod(paddr >> 12); /* Make sure we know that it is modified */ + + left = left - csize; /* Calculate what is left */ + vaddr = vaddr + csize; /* Move to next sink address */ + fvaddr = fvaddr + csize; /* Bump to next physical address */ } } - } - else { /* This is a write */ - if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ + } else { /* This is a write */ + if (!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ bcopy((void *)vaddr, (void *)((uintptr_t)fvaddr), - (size_t)buf_count(bp)); /* This is virtual, just put the data */ - } - else { - left = buf_count(bp); /* Init the amount left to copy */ - while(left) { /* Go until it is all copied */ - - lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ - csize = min(lop, left); /* Don't move more than we need to */ - - pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr)); /* Get the source physical address */ - if(!pp) { /* Not found, what gives? */ + (size_t)buf_count(bp)); /* This is virtual, just put the data */ + } else { + left = buf_count(bp); /* Init the amount left to copy */ + while (left) { /* Go until it is all copied */ + lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ + csize = min(lop, left); /* Don't move more than we need to */ + + pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr)); /* Get the source physical address */ + if (!pp) { /* Not found, what gives? */ panic("mdevstrategy: source address %016llX not mapped\n", (addr64_t)((uintptr_t)vaddr)); } - paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ - - bcopy_phys(paddr, fvaddr, csize); /* Move this on out */ - - left = left - csize; /* Calculate what is left */ - vaddr = vaddr + csize; /* Move to next sink address */ - fvaddr = fvaddr + csize; /* Bump to next physical address */ + paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ + + bcopy_phys(paddr, fvaddr, csize); /* Move this on out */ + + left = left - csize; /* Calculate what is left */ + vaddr = vaddr + csize; /* Move to next sink address */ + fvaddr = fvaddr + csize; /* Bump to next physical address */ } } } @@ -339,115 +347,132 @@ static void mdevstrategy(struct buf *bp) { */ buf_unmap(bp); - buf_setresid(bp, 0); /* Nothing more to do */ - buf_biodone(bp); /* Say we've finished */ + buf_setresid(bp, 0); /* Nothing more to do */ + buf_biodone(bp); /* Say we've finished */ } -static int mdevbioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { - return (mdevioctl(dev, cmd, data, flag, p, 0)); +static int +mdevbioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) +{ + return mdevioctl(dev, cmd, data, flag, p, 0); } -static int mdevcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { - return (mdevioctl(dev, cmd, data, flag, p, 1)); +static int +mdevcioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) +{ + return mdevioctl(dev, cmd, data, flag, p, 1); } -static int mdevioctl(dev_t dev, u_long cmd, caddr_t data, __unused int flag, - struct proc *p, int is_char) { +static int +mdevioctl(dev_t dev, u_long cmd, caddr_t data, __unused int flag, + struct proc *p, int is_char) +{ int error; u_int32_t *f; u_int64_t *o; int devid; dk_memdev_info_t * memdev_info; - devid = minor(dev); /* Get minor device number */ - - if (devid >= NB_MAX_MDEVICES) return (ENXIO); /* Not valid */ - - error = proc_suser(p); /* Are we superman? */ - if (error) return (error); /* Nope... */ + devid = minor(dev); /* Get minor device number */ + if (devid >= NB_MAX_MDEVICES) { + return ENXIO; /* Not valid */ + } + error = proc_suser(p); /* Are we superman? */ + if (error) { + return error; /* Nope... */ + } f = (u_int32_t*)data; o = (u_int64_t *)data; memdev_info = (dk_memdev_info_t *) data; switch (cmd) { + case DKIOCGETMAXBLOCKCOUNTREAD: + *o = 32; + break; - case DKIOCGETMAXBLOCKCOUNTREAD: - *o = 32; - break; - - case DKIOCGETMAXBLOCKCOUNTWRITE: - *o = 32; - break; - - case DKIOCGETMAXSEGMENTCOUNTREAD: - *o = 32; - break; - - case DKIOCGETMAXSEGMENTCOUNTWRITE: - *o = 32; - break; - - case DKIOCGETBLOCKSIZE: - *f = mdev[devid].mdSecsize; - break; - - case DKIOCSETBLOCKSIZE: - if (is_char) return (ENODEV); /* We can only do this for a block */ - - if (*f < DEV_BSIZE) return (EINVAL); /* Too short? */ - - mdev[devid].mdSecsize = *f; /* set the new block size */ - break; - - case DKIOCISWRITABLE: - *f = 1; - break; - - case DKIOCGETBLOCKCOUNT: - if(!(mdev[devid].mdFlags & mdInited)) return (ENXIO); - *o = ((mdev[devid].mdSize << 12) + mdev[devid].mdSecsize - 1) / mdev[devid].mdSecsize; - break; - - /* - * We're interested in the following bits of information: - * Are you a memory-backed device (always yes, in this case)? - * Physical memory (mdPhys)? - * What is your base page? - * What is your size? - */ - case DKIOCGETMEMDEVINFO: - if (!(mdev[devid].mdFlags & mdInited)) return (ENXIO); - memdev_info->mi_mdev = TRUE; - memdev_info->mi_phys = (mdev[devid].mdFlags & mdPhys) ? TRUE : FALSE; - memdev_info->mi_base = mdev[devid].mdBase; - memdev_info->mi_size = mdev[devid].mdSize; - break; - - default: - error = ENOTTY; - break; - } - return(error); -} + case DKIOCGETMAXBLOCKCOUNTWRITE: + *o = 32; + break; + case DKIOCGETMAXSEGMENTCOUNTREAD: + *o = 32; + break; -static int mdevsize(dev_t dev) { + case DKIOCGETMAXSEGMENTCOUNTWRITE: + *o = 32; + break; - int devid; + case DKIOCGETBLOCKSIZE: + *f = mdev[devid].mdSecsize; + break; + + case DKIOCSETBLOCKSIZE: + if (is_char) { + return ENODEV; /* We can only do this for a block */ + } + if (*f < DEV_BSIZE) { + return EINVAL; /* Too short? */ + } + mdev[devid].mdSecsize = *f; /* set the new block size */ + break; - devid = minor(dev); /* Get minor device number */ - if (devid >= NB_MAX_MDEVICES) return (ENXIO); /* Not valid */ + case DKIOCISWRITABLE: + *f = 1; + break; - if ((mdev[devid].mdFlags & mdInited) == 0) return(-1); /* Not inited yet */ + case DKIOCGETBLOCKCOUNT: + if (!(mdev[devid].mdFlags & mdInited)) { + return ENXIO; + } + *o = ((mdev[devid].mdSize << 12) + mdev[devid].mdSecsize - 1) / mdev[devid].mdSecsize; + break; - return(mdev[devid].mdSecsize); + /* + * We're interested in the following bits of information: + * Are you a memory-backed device (always yes, in this case)? + * Physical memory (mdPhys)? + * What is your base page? + * What is your size? + */ + case DKIOCGETMEMDEVINFO: + if (!(mdev[devid].mdFlags & mdInited)) { + return ENXIO; + } + memdev_info->mi_mdev = TRUE; + memdev_info->mi_phys = (mdev[devid].mdFlags & mdPhys) ? TRUE : FALSE; + memdev_info->mi_base = mdev[devid].mdBase; + memdev_info->mi_size = mdev[devid].mdSize; + break; + + default: + error = ENOTTY; + break; + } + return error; } -#include -void mdevinit(__unused int the_cnt) { +static int +mdevsize(dev_t dev) +{ + int devid; + devid = minor(dev); /* Get minor device number */ + if (devid >= NB_MAX_MDEVICES) { + return ENXIO; /* Not valid */ + } + if ((mdev[devid].mdFlags & mdInited) == 0) { + return -1; /* Not inited yet */ + } + return mdev[devid].mdSecsize; +} + +#include + +void +mdevinit(__unused int the_cnt) +{ #ifdef CONFIG_MEMDEV_INSECURE int devid, phys; @@ -455,201 +480,250 @@ void mdevinit(__unused int the_cnt) { uint64_t size; char *ba, *lp; dev_t dev; - - - ba = PE_boot_args(); /* Get the boot arguments */ - lp = ba + 256; /* Point to the end */ - - while(1) { /* Step through, looking for our keywords */ - phys = 0; /* Assume virtual memory device */ - ba = nonspace(ba, lp); /* Find non-space */ - if(ba >= lp) return; /* We are done if no more... */ - if(((ba[0] != 'v') && (ba[0] != 'p')) - || (ba[1] != 'm') || (ba[2] != 'd') || (ba[4] != '=') - || (ba[3] < '0') || (ba[3] > 'f') - || ((ba[3] > '9') && (ba[3] < 'a'))) { /* Is this of form "vmdx=" or "pmdx=" where x is hex digit? */ - - ba = getspace(ba, lp); /* Find next white space or end */ - continue; /* Start looking for the next one */ - } - - if(ba[0] == 'p') phys = 1; /* Set physical memory disk */ - - devid = ba[3] & 0xF; /* Assume digit */ - if(ba[3] > '9') devid += 9; /* Adjust for hex digits */ - - ba = &ba[5]; /* Step past keyword */ - ba = cvtnum(ba, lp, &base); /* Convert base of memory disk */ - if(ba >= lp) return; /* Malformed one at the end, leave */ - if(ba[0] != '.') continue; /* If not length separater, try next... */ - if(base & 0xFFF) continue; /* Only allow page aligned stuff */ - - ba++; /* Step past '.' */ - ba = cvtnum(ba, lp, &size); /* Try to convert it */ - if(!size || (size & 0xFFF)) continue; /* Allow only non-zer page size multiples */ - if(ba < lp) { /* If we are not at end, check end character */ - if((ba[0] != ' ') && (ba[0] != 0)) continue; /* End must be null or space */ - } - - dev = mdevadd(devid, base >> 12, (unsigned)size >> 12, phys); /* Go add the device */ + + + ba = PE_boot_args(); /* Get the boot arguments */ + lp = ba + 256; /* Point to the end */ + + while (1) { /* Step through, looking for our keywords */ + phys = 0; /* Assume virtual memory device */ + ba = nonspace(ba, lp); /* Find non-space */ + if (ba >= lp) { + return; /* We are done if no more... */ + } + if (((ba[0] != 'v') && (ba[0] != 'p')) + || (ba[1] != 'm') || (ba[2] != 'd') || (ba[4] != '=') + || (ba[3] < '0') || (ba[3] > 'f') + || ((ba[3] > '9') && (ba[3] < 'a'))) { /* Is this of form "vmdx=" or "pmdx=" where x is hex digit? */ + ba = getspace(ba, lp); /* Find next white space or end */ + continue; /* Start looking for the next one */ + } + + if (ba[0] == 'p') { + phys = 1; /* Set physical memory disk */ + } + devid = ba[3] & 0xF; /* Assume digit */ + if (ba[3] > '9') { + devid += 9; /* Adjust for hex digits */ + } + ba = &ba[5]; /* Step past keyword */ + ba = cvtnum(ba, lp, &base); /* Convert base of memory disk */ + if (ba >= lp) { + return; /* Malformed one at the end, leave */ + } + if (ba[0] != '.') { + continue; /* If not length separater, try next... */ + } + if (base & 0xFFF) { + continue; /* Only allow page aligned stuff */ + } + ba++; /* Step past '.' */ + ba = cvtnum(ba, lp, &size); /* Try to convert it */ + if (!size || (size & 0xFFF)) { + continue; /* Allow only non-zer page size multiples */ + } + if (ba < lp) { /* If we are not at end, check end character */ + if ((ba[0] != ' ') && (ba[0] != 0)) { + continue; /* End must be null or space */ + } + } + + dev = mdevadd(devid, base >> 12, (unsigned)size >> 12, phys); /* Go add the device */ } #endif /* CONFIG_MEMDEV_INSECURE */ return; - } #ifdef CONFIG_MEMDEV_INSECURE -char *nonspace(char *pos, char *end) { /* Find next non-space in string */ - - if(pos >= end) return end; /* Don't go past end */ - if(pos[0] == 0) return end; /* If at null, make end */ - - while(1) { /* Keep going */ - if(pos[0] != ' ') return pos; /* Leave if we found one */ - pos++; /* Stop */ - if(pos >= end) return end; /* Quit if we run off end */ +char * +nonspace(char *pos, char *end) /* Find next non-space in string */ +{ + if (pos >= end) { + return end; /* Don't go past end */ + } + if (pos[0] == 0) { + return end; /* If at null, make end */ + } + while (1) { /* Keep going */ + if (pos[0] != ' ') { + return pos; /* Leave if we found one */ + } + pos++; /* Stop */ + if (pos >= end) { + return end; /* Quit if we run off end */ + } } } -char *getspace(char *pos, char *end) { /* Find next non-space in string */ - - while(1) { /* Keep going */ - if(pos >= end) return end; /* Don't go past end */ - if(pos[0] == 0) return end; /* Leave if we hit null */ - if(pos[0] == ' ') return pos; /* Leave if we found one */ - pos++; /* Stop */ +char * +getspace(char *pos, char *end) /* Find next non-space in string */ +{ + while (1) { /* Keep going */ + if (pos >= end) { + return end; /* Don't go past end */ + } + if (pos[0] == 0) { + return end; /* Leave if we hit null */ + } + if (pos[0] == ' ') { + return pos; /* Leave if we found one */ + } + pos++; /* Stop */ } } -char *cvtnum(char *pos, char *end, uint64_t *num) { /* Convert to a number */ - +char * +cvtnum(char *pos, char *end, uint64_t *num) /* Convert to a number */ +{ int rad, dig; - - *num = 0; /* Set answer to 0 to start */ + + *num = 0; /* Set answer to 0 to start */ rad = 10; - if(pos >= end) return end; /* Don't go past end */ - if(pos[0] == 0) return end; /* If at null, make end */ - - if(pos[0] == '0' && ((pos[1] == 'x') || (pos[1] == 'x'))) { /* A hex constant? */ + if (pos >= end) { + return end; /* Don't go past end */ + } + if (pos[0] == 0) { + return end; /* If at null, make end */ + } + if (pos[0] == '0' && ((pos[1] == 'x') || (pos[1] == 'x'))) { /* A hex constant? */ rad = 16; - pos += 2; /* Point to the number */ - } - - while(1) { /* Convert it */ - - if(pos >= end) return end; /* Don't go past end */ - if(pos[0] == 0) return end; /* If at null, make end */ - if(pos[0] < '0') return pos; /* Leave if non-digit */ - dig = pos[0] & 0xF; /* Extract digit */ - if(pos[0] > '9') { /* Is it bigger than 9? */ - if(rad == 10) return pos; /* Leave if not base 10 */ - if(!(((pos[0] >= 'A') && (pos[0] <= 'F')) - || ((pos[0] >= 'a') && (pos[0] <= 'f')))) return pos; /* Leave if bogus char */ - dig = dig + 9; /* Adjust for character */ - } - *num = (*num * rad) + dig; /* Accumulate the number */ - pos++; /* Step on */ + pos += 2; /* Point to the number */ + } + + while (1) { /* Convert it */ + if (pos >= end) { + return end; /* Don't go past end */ + } + if (pos[0] == 0) { + return end; /* If at null, make end */ + } + if (pos[0] < '0') { + return pos; /* Leave if non-digit */ + } + dig = pos[0] & 0xF; /* Extract digit */ + if (pos[0] > '9') { /* Is it bigger than 9? */ + if (rad == 10) { + return pos; /* Leave if not base 10 */ + } + if (!(((pos[0] >= 'A') && (pos[0] <= 'F')) + || ((pos[0] >= 'a') && (pos[0] <= 'f')))) { + return pos; /* Leave if bogus char */ + } + dig = dig + 9; /* Adjust for character */ + } + *num = (*num * rad) + dig; /* Accumulate the number */ + pos++; /* Step on */ } } #endif /* CONFIG_MEMDEV_INSECURE */ -dev_t mdevadd(int devid, uint64_t base, unsigned int size, int phys) { - +dev_t +mdevadd(int devid, uint64_t base, unsigned int size, int phys) +{ int i; - - if(devid < 0) { + if (devid < 0) { devid = -1; - for(i = 0; i < NB_MAX_MDEVICES; i++) { /* Search all known memory devices */ - if(!(mdev[i].mdFlags & mdInited)) { /* Is this a free one? */ - if(devid < 0)devid = i; /* Remember first free one */ - continue; /* Skip check */ + for (i = 0; i < NB_MAX_MDEVICES; i++) { /* Search all known memory devices */ + if (!(mdev[i].mdFlags & mdInited)) { /* Is this a free one? */ + if (devid < 0) { + devid = i; /* Remember first free one */ + } + continue; /* Skip check */ } - if(!(((base + size -1 ) < mdev[i].mdBase) || ((mdev[i].mdBase + mdev[i].mdSize - 1) < base))) { /* Is there any overlap? */ + if (!(((base + size - 1) < mdev[i].mdBase) || ((mdev[i].mdBase + mdev[i].mdSize - 1) < base))) { /* Is there any overlap? */ panic("mdevadd: attempt to add overlapping memory device at %016llX-%016llX\n", mdev[i].mdBase, mdev[i].mdBase + mdev[i].mdSize - 1); } } - if(devid < 0) { /* Do we have free slots? */ + if (devid < 0) { /* Do we have free slots? */ panic("mdevadd: attempt to add more than %d memory devices\n", NB_MAX_MDEVICES); } - } - else { - if(devid >= NB_MAX_MDEVICES) { /* Giving us something bogus? */ + } else { + if (devid >= NB_MAX_MDEVICES) { /* Giving us something bogus? */ panic("mdevadd: attempt to explicitly add a bogus memory device: %08X\n", devid); } - if(mdev[devid].mdFlags & mdInited) { /* Already there? */ + if (mdev[devid].mdFlags & mdInited) { /* Already there? */ panic("mdevadd: attempt to explicitly add a previously defined memory device: %08X\n", devid); } } - - if(mdevBMajor < 0) { /* Have we gotten a major number yet? */ - mdevBMajor = bdevsw_add(-1, &mdevbdevsw); /* Add to the table and figure out a major number */ + + if (mdevBMajor < 0) { /* Have we gotten a major number yet? */ + mdevBMajor = bdevsw_add(-1, &mdevbdevsw); /* Add to the table and figure out a major number */ if (mdevBMajor < 0) { printf("mdevadd: error - bdevsw_add() returned %d\n", mdevBMajor); return -1; } } - - if(mdevCMajor < 0) { /* Have we gotten a major number yet? */ - mdevCMajor = cdevsw_add_with_bdev(-1, &mdevcdevsw, mdevBMajor); /* Add to the table and figure out a major number */ + + if (mdevCMajor < 0) { /* Have we gotten a major number yet? */ + mdevCMajor = cdevsw_add_with_bdev(-1, &mdevcdevsw, mdevBMajor); /* Add to the table and figure out a major number */ if (mdevCMajor < 0) { printf("ramdevice_init: error - cdevsw_add() returned %d\n", mdevCMajor); return -1; } } - mdev[devid].mdBDev = makedev(mdevBMajor, devid); /* Get the device number */ - mdev[devid].mdbdevb = devfs_make_node(mdev[devid].mdBDev, DEVFS_BLOCK, /* Make the node */ - UID_ROOT, GID_OPERATOR, - 0600, "md%d", devid); - if (mdev[devid].mdbdevb == NULL) { /* Did we make one? */ + mdev[devid].mdBDev = makedev(mdevBMajor, devid); /* Get the device number */ + mdev[devid].mdbdevb = devfs_make_node(mdev[devid].mdBDev, DEVFS_BLOCK, /* Make the node */ + UID_ROOT, GID_OPERATOR, + 0600, "md%d", devid); + if (mdev[devid].mdbdevb == NULL) { /* Did we make one? */ printf("mdevadd: devfs_make_node for block failed!\n"); - return -1; /* Nope... */ + return -1; /* Nope... */ } - mdev[devid].mdCDev = makedev(mdevCMajor, devid); /* Get the device number */ - mdev[devid].mdcdevb = devfs_make_node(mdev[devid].mdCDev, DEVFS_CHAR, /* Make the node */ - UID_ROOT, GID_OPERATOR, - 0600, "rmd%d", devid); - if (mdev[devid].mdcdevb == NULL) { /* Did we make one? */ + mdev[devid].mdCDev = makedev(mdevCMajor, devid); /* Get the device number */ + mdev[devid].mdcdevb = devfs_make_node(mdev[devid].mdCDev, DEVFS_CHAR, /* Make the node */ + UID_ROOT, GID_OPERATOR, + 0600, "rmd%d", devid); + if (mdev[devid].mdcdevb == NULL) { /* Did we make one? */ printf("mdevadd: devfs_make_node for character failed!\n"); - return -1; /* Nope... */ - } - - mdev[devid].mdBase = base; /* Set the base address of ram disk */ - mdev[devid].mdSize = size; /* Set the length of the ram disk */ - mdev[devid].mdSecsize = DEV_BSIZE; /* Set starting block size */ - if(phys) mdev[devid].mdFlags |= mdPhys; /* Show that we are in physical memory */ - mdev[devid].mdFlags |= mdInited; /* Show we are all set up */ - printf("Added memory device md%x/rmd%x (%08X/%08X) at %016llX for %016llX\n", - devid, devid, mdev[devid].mdBDev, mdev[devid].mdCDev, base << 12, (uint64_t)size << 12); + return -1; /* Nope... */ + } + + mdev[devid].mdBase = base; /* Set the base address of ram disk */ + mdev[devid].mdSize = size; /* Set the length of the ram disk */ + mdev[devid].mdSecsize = DEV_BSIZE; /* Set starting block size */ + if (phys) { + mdev[devid].mdFlags |= mdPhys; /* Show that we are in physical memory */ + } + mdev[devid].mdFlags |= mdInited; /* Show we are all set up */ + printf("Added memory device md%x/rmd%x (%08X/%08X) at %016llX for %016llX\n", + devid, devid, mdev[devid].mdBDev, mdev[devid].mdCDev, base << 12, (uint64_t)size << 12); return mdev[devid].mdBDev; } -dev_t mdevlookup(int devid) { - - if((devid < 0) || (devid >= NB_MAX_MDEVICES)) return -1; /* Filter any bogus requests */ - if(!(mdev[devid].mdFlags & mdInited)) return -1; /* This one hasn't been defined */ - return mdev[devid].mdBDev; /* Return the device number */ +dev_t +mdevlookup(int devid) +{ + if ((devid < 0) || (devid >= NB_MAX_MDEVICES)) { + return -1; /* Filter any bogus requests */ + } + if (!(mdev[devid].mdFlags & mdInited)) { + return -1; /* This one hasn't been defined */ + } + return mdev[devid].mdBDev; /* Return the device number */ } -void mdevremoveall(void) { - +void +mdevremoveall(void) +{ int i; - for(i = 0; i < NB_MAX_MDEVICES; i++) { - if(!(mdev[i].mdFlags & mdInited)) continue; /* Ignore unused mdevs */ - - devfs_remove(mdev[i].mdbdevb); /* Remove the block device */ - devfs_remove(mdev[i].mdcdevb); /* Remove the character device */ + for (i = 0; i < NB_MAX_MDEVICES; i++) { + if (!(mdev[i].mdFlags & mdInited)) { + continue; /* Ignore unused mdevs */ + } + devfs_remove(mdev[i].mdbdevb); /* Remove the block device */ + devfs_remove(mdev[i].mdcdevb); /* Remove the character device */ - mdev[i].mdBase = 0; /* Clear the mdev's storage */ + mdev[i].mdBase = 0; /* Clear the mdev's storage */ mdev[i].mdSize = 0; mdev[i].mdSecsize = 0; mdev[i].mdFlags = 0; @@ -684,4 +758,3 @@ mdevgetrange(int devid, uint64_t *base, uint64_t *size) return 0; } - diff --git a/bsd/dev/memdev.h b/bsd/dev/memdev.h index bcf4e05ec..a8472296c 100644 --- a/bsd/dev/memdev.h +++ b/bsd/dev/memdev.h @@ -1,4 +1,3 @@ - #ifndef _SYS_MEMDEV_H_ #define _SYS_MEMDEV_H_ @@ -14,4 +13,4 @@ void mdevinit(vm_offset_t base, unsigned int size); #endif /* KERNEL_PRIVATE */ -#endif /* _SYS_MEMDEV_H_*/ +#endif /* _SYS_MEMDEV_H_*/ diff --git a/bsd/dev/monotonic.c b/bsd/dev/monotonic.c index 375a0ca4e..4a320cbbb 100644 --- a/bsd/dev/monotonic.c +++ b/bsd/dev/monotonic.c @@ -43,7 +43,7 @@ static int mt_cdev_open(dev_t dev, int flags, int devtype, proc_t p); static int mt_cdev_close(dev_t dev, int flags, int devtype, proc_t p); static int mt_cdev_ioctl(dev_t dev, unsigned long cmd, char *uptr, int fflag, - proc_t p); + proc_t p); #define MT_NODE "monotonic" @@ -116,10 +116,10 @@ mt_dev_init(void) char name[128]; snprintf(name, sizeof(name), MT_NODE "/%s", mt_devices[i].mtd_name); void *node = devfs_make_node(dev, DEVFS_CHAR, UID_ROOT, - GID_WINDOWSERVER, 0666, name); + GID_WINDOWSERVER, 0666, name); if (!node) { panic("monotonic: devfs_make_node failed for '%s'", - mt_devices[i].mtd_name); + mt_devices[i].mtd_name); __builtin_unreachable(); } @@ -131,7 +131,7 @@ mt_dev_init(void) static int mt_cdev_open(dev_t devnum, __unused int flags, __unused int devtype, - __unused proc_t p) + __unused proc_t p) { int error = 0; @@ -149,7 +149,7 @@ mt_cdev_open(dev_t devnum, __unused int flags, __unused int devtype, static int mt_cdev_close(dev_t devnum, __unused int flags, __unused int devtype, - __unused struct proc *p) + __unused struct proc *p) { mt_device_t dev = mt_get_device(devnum); @@ -211,7 +211,7 @@ mt_ctl_counts(mt_device_t dev, user_addr_t uptr) { uint64_t counts[dev->mtd_nmonitors][dev->mtd_ncounters]; memset(counts, 0, - dev->mtd_ncounters * dev->mtd_nmonitors * sizeof(counts[0][0])); + dev->mtd_ncounters * dev->mtd_nmonitors * sizeof(counts[0][0])); error = dev->mtd_read(ctl.in.ctr_mask, (uint64_t *)counts); if (error) { return error; @@ -254,7 +254,7 @@ mt_ctl_reset(mt_device_t dev) static int mt_cdev_ioctl(dev_t devnum, unsigned long cmd, char *arg, __unused int flags, - __unused proc_t p) + __unused proc_t p) { int error = ENODEV; user_addr_t uptr = *(user_addr_t *)(void *)arg; @@ -300,8 +300,9 @@ mt_cdev_ioctl(dev_t devnum, unsigned long cmd, char *arg, __unused int flags, return error; } -int thread_selfcounts(__unused struct proc *p, - struct thread_selfcounts_args *uap, __unused int *ret_out) +int +thread_selfcounts(__unused struct proc *p, + struct thread_selfcounts_args *uap, __unused int *ret_out) { switch (uap->type) { case 1: { @@ -416,28 +417,31 @@ copyout_counts: SYSCTL_DECL(_kern_monotonic); SYSCTL_NODE(_kern, OID_AUTO, monotonic, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "monotonic"); - -#define MT_SYSCTL(NAME, ARG, SIZE, SIZESTR, DESC) \ - SYSCTL_PROC(_kern_monotonic, OID_AUTO, NAME, \ - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MASKED | CTLFLAG_LOCKED, \ - (void *)(ARG), SIZE, mt_sysctl, SIZESTR, DESC) - -MT_SYSCTL(supported, MT_SUPPORTED, sizeof(int), "I", - "whether monotonic is supported"); -MT_SYSCTL(debug, MT_DEBUG, sizeof(int), "I", - "whether monotonic is printing debug messages"); -MT_SYSCTL(pmis, MT_PMIS, sizeof(uint64_t), "Q", - "number of PMIs seen"); -MT_SYSCTL(retrograde_updates, MT_RETROGRADE, sizeof(uint64_t), "Q", - "number of times a counter appeared to go backwards"); -MT_SYSCTL(task_thread_counting, MT_TASK_THREAD, sizeof(int), "I", - "whether task and thread counting is enabled"); -MT_SYSCTL(kdebug_test, MT_KDBG_TEST, sizeof(int), "O", - "whether task and thread counting is enabled"); -MT_SYSCTL(fixed_cpu_perf, MT_FIX_CPU_PERF, sizeof(uint64_t) * 2, "O", - "overhead of accessing the current CPU's counters"); -MT_SYSCTL(fixed_thread_perf, MT_FIX_THREAD_PERF, sizeof(uint64_t) * 2, "O", - "overhead of accessing the current thread's counters"); -MT_SYSCTL(fixed_task_perf, MT_FIX_TASK_PERF, sizeof(uint64_t) * 2, "O", - "overhead of accessing the current task's counters"); + "monotonic"); + +#define MT_SYSCTL(NAME, ARG, FLAGS, SIZE, SIZESTR, DESC) \ + SYSCTL_PROC(_kern_monotonic, OID_AUTO, NAME, \ + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | (FLAGS), \ + (void *)(ARG), SIZE, mt_sysctl, SIZESTR, DESC) + +MT_SYSCTL(supported, MT_SUPPORTED, 0, sizeof(int), "I", + "whether monotonic is supported"); +MT_SYSCTL(debug, MT_DEBUG, CTLFLAG_MASKED, sizeof(int), "I", + "whether monotonic is printing debug messages"); +MT_SYSCTL(pmis, MT_PMIS, 0, sizeof(uint64_t), "Q", + "number of PMIs seen"); +MT_SYSCTL(retrograde_updates, MT_RETROGRADE, 0, sizeof(uint64_t), "Q", + "number of times a counter appeared to go backwards"); +MT_SYSCTL(task_thread_counting, MT_TASK_THREAD, 0, sizeof(int), "I", + "whether task and thread counting is enabled"); +MT_SYSCTL(kdebug_test, MT_KDBG_TEST, CTLFLAG_MASKED, sizeof(int), "O", + "whether task and thread counting is enabled"); +MT_SYSCTL(fixed_cpu_perf, MT_FIX_CPU_PERF, CTLFLAG_MASKED, + sizeof(uint64_t) * 2, "O", + "overhead of accessing the current CPU's counters"); +MT_SYSCTL(fixed_thread_perf, MT_FIX_THREAD_PERF, CTLFLAG_MASKED, + sizeof(uint64_t) * 2, "O", + "overhead of accessing the current thread's counters"); +MT_SYSCTL(fixed_task_perf, MT_FIX_TASK_PERF, CTLFLAG_MASKED, + sizeof(uint64_t) * 2, "O", + "overhead of accessing the current task's counters"); diff --git a/bsd/dev/munge.c b/bsd/dev/munge.c index e44a638d6..7d2433cd1 100644 --- a/bsd/dev/munge.c +++ b/bsd/dev/munge.c @@ -2,7 +2,7 @@ * Coyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,58 +35,58 @@ * Refer to comments in bsd/sys/munge.h */ -static inline __attribute__((always_inline)) void +static inline __attribute__((always_inline)) void munge_32_to_64_unsigned(volatile uint64_t *dest, volatile uint32_t *src, int count); -void +void munge_w(void *args) { munge_32_to_64_unsigned(args, args, 1); } -void +void munge_ww(void *args) { munge_32_to_64_unsigned(args, args, 2); } -void +void munge_www(void *args) { munge_32_to_64_unsigned(args, args, 3); } -void +void munge_wwww(void *args) { munge_32_to_64_unsigned(args, args, 4); } -void +void munge_wwwww(void *args) { munge_32_to_64_unsigned(args, args, 5); } -void +void munge_wwwwww(void *args) { munge_32_to_64_unsigned(args, args, 6); } -void +void munge_wwwwwww(void *args) { munge_32_to_64_unsigned(args, args, 7); } -void +void munge_wwwwwwww(void *args) { munge_32_to_64_unsigned(args, args, 8); } -void +void munge_wl(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -96,7 +96,7 @@ munge_wl(void *args) out_args[0] = in_args[0]; } -void +void munge_wwl(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -107,7 +107,7 @@ munge_wwl(void *args) out_args[0] = in_args[0]; } -void +void munge_wwlw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -118,7 +118,7 @@ munge_wwlw(void *args) out_args[1] = in_args[1]; out_args[0] = in_args[0]; } -void +void munge_wwlll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -193,7 +193,7 @@ munge_wlwwwllw(void *args) munge_wlwwwll(args); } -void +void munge_wlwwlwlw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -209,7 +209,7 @@ munge_wlwwlwlw(void *args) out_args[0] = in_args[0]; } -void +void munge_wll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -220,7 +220,7 @@ munge_wll(void *args) out_args[0] = in_args[0]; } -void +void munge_wlll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -232,7 +232,7 @@ munge_wlll(void *args) out_args[0] = in_args[0]; } -void +void munge_wllll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -258,7 +258,7 @@ munge_wllww(void *args) out_args[0] = in_args[0]; } -void +void munge_wllwwll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -273,7 +273,7 @@ munge_wllwwll(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwlw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -286,7 +286,7 @@ munge_wwwlw(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwlww(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -299,8 +299,8 @@ munge_wwwlww(void *args) out_args[1] = in_args[1]; out_args[0] = in_args[0]; } - -void + +void munge_wwwl(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -312,7 +312,7 @@ munge_wwwl(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwlw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -326,7 +326,7 @@ munge_wwwwlw(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwl(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -339,7 +339,7 @@ munge_wwwwl(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwwl(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -353,7 +353,7 @@ munge_wwwwwl(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwwlww(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -369,7 +369,7 @@ munge_wwwwwlww(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwwllw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -385,7 +385,7 @@ munge_wwwwwllw(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwwlll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -401,7 +401,7 @@ munge_wwwwwlll(void *args) out_args[0] = in_args[0]; } -void +void munge_wwwwwwl(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -415,8 +415,8 @@ munge_wwwwwwl(void *args) out_args[1] = in_args[1]; out_args[0] = in_args[0]; } - -void + +void munge_wwwwwwlw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -431,8 +431,8 @@ munge_wwwwwwlw(void *args) out_args[1] = in_args[1]; out_args[0] = in_args[0]; } - -void + +void munge_wwwwwwll(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -448,7 +448,7 @@ munge_wwwwwwll(void *args) out_args[0] = in_args[0]; } -void +void munge_wsw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -459,7 +459,7 @@ munge_wsw(void *args) out_args[0] = in_args[0]; } -void +void munge_wws(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -470,7 +470,7 @@ munge_wws(void *args) out_args[0] = in_args[0]; } -void +void munge_wwws(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -496,25 +496,25 @@ munge_wwwsw(void *args) out_args[0] = in_args[0]; } -void +void munge_llllll(void *args __unused) { /* Nothing to do, already all 64-bit */ } -void +void munge_ll(void *args __unused) { /* Nothing to do, already all 64-bit */ } -void +void munge_l(void *args __unused) { /* Nothing to do, already all 64-bit */ } -void +void munge_lw(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; @@ -524,29 +524,29 @@ munge_lw(void *args) out_args[0] = *(volatile uint64_t*)&in_args[0]; } -void +void munge_lwww(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; volatile uint32_t *in_args = (volatile uint32_t*)args; - out_args[3] = in_args[4]; + out_args[3] = in_args[4]; out_args[2] = in_args[3]; out_args[1] = in_args[2]; out_args[0] = *(volatile uint64_t*)&in_args[0]; } -void +void munge_lwwwwwww(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; volatile uint32_t *in_args = (volatile uint32_t*)args; out_args[7] = in_args[8]; - out_args[6] = in_args[7]; + out_args[6] = in_args[7]; out_args[5] = in_args[6]; out_args[4] = in_args[5]; - out_args[3] = in_args[4]; + out_args[3] = in_args[4]; out_args[2] = in_args[3]; out_args[1] = in_args[2]; out_args[0] = *(volatile uint64_t*)&in_args[0]; @@ -570,7 +570,7 @@ munge_wwlwww(void *args) { volatile uint64_t *out_args = (volatile uint64_t*)args; volatile uint32_t *in_args = (volatile uint32_t*)args; - + out_args[5] = in_args[6]; out_args[4] = in_args[5]; out_args[3] = in_args[4]; @@ -610,15 +610,15 @@ munge_wwlwwwl(void *args) /* * Munge array of 32-bit values into an array of 64-bit values, - * without sign extension. Note, src and dest can be the same + * without sign extension. Note, src and dest can be the same * (copies from end of array) */ -static inline __attribute__((always_inline)) void +static inline __attribute__((always_inline)) void munge_32_to_64_unsigned(volatile uint64_t *dest, volatile uint32_t *src, int count) { int i; for (i = count - 1; i >= 0; i--) { dest[i] = src[i]; - } + } } diff --git a/bsd/dev/random/randomdev.c b/bsd/dev/random/randomdev.c index 6482f6094..7e5e10e3e 100644 --- a/bsd/dev/random/randomdev.c +++ b/bsd/dev/random/randomdev.c @@ -62,20 +62,20 @@ d_ioctl_t random_ioctl; */ static struct cdevsw random_cdevsw = { - random_open, /* open */ - random_close, /* close */ - random_read, /* read */ - random_write, /* write */ - random_ioctl, /* ioctl */ + random_open, /* open */ + random_close, /* close */ + random_read, /* read */ + random_write, /* write */ + random_ioctl, /* ioctl */ (stop_fcn_t *)nulldev, /* stop */ (reset_fcn_t *)nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; @@ -93,21 +93,20 @@ random_init(void) panic("random_init: failed to allocate a major number!"); } - devfs_make_node(makedev (ret, RANDOM_MINOR), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0666, "random", 0); + devfs_make_node(makedev(ret, RANDOM_MINOR), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "random", 0); /* * also make urandom * (which is exactly the same thing in our context) */ - devfs_make_node(makedev (ret, URANDOM_MINOR), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0666, "urandom", 0); - + devfs_make_node(makedev(ret, URANDOM_MINOR), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "urandom", 0); } int -random_ioctl( __unused dev_t dev, u_long cmd, __unused caddr_t data, - __unused int flag, __unused struct proc *p ) +random_ioctl( __unused dev_t dev, u_long cmd, __unused caddr_t data, + __unused int flag, __unused struct proc *p ) { switch (cmd) { case FIONBIO: @@ -117,7 +116,7 @@ random_ioctl( __unused dev_t dev, u_long cmd, __unused caddr_t data, return ENODEV; } - return (0); + return 0; } /* @@ -133,15 +132,17 @@ random_open(__unused dev_t dev, int flags, __unused int devtype, __unused struct * make sure that we have privledges do so */ if (flags & FWRITE) { - if (securelevel >= 2) - return (EPERM); + if (securelevel >= 2) { + return EPERM; + } #ifndef __APPLE__ - if ((securelevel >= 1) && proc_suser(p)) - return (EPERM); -#endif /* !__APPLE__ */ + if ((securelevel >= 1) && proc_suser(p)) { + return EPERM; + } +#endif /* !__APPLE__ */ } - return (0); + return 0; } @@ -152,7 +153,7 @@ random_open(__unused dev_t dev, int flags, __unused int devtype, __unused struct int random_close(__unused dev_t dev, __unused int flags, __unused int mode, __unused struct proc *p) { - return (0); + return 0; } @@ -161,29 +162,32 @@ random_close(__unused dev_t dev, __unused int flags, __unused int mode, __unused * prng. */ int -random_write (dev_t dev, struct uio *uio, __unused int ioflag) +random_write(dev_t dev, struct uio *uio, __unused int ioflag) { - int retCode = 0; - char rdBuffer[256]; - - if (minor(dev) != RANDOM_MINOR) - return EPERM; - - /* Security server is sending us entropy */ - - while (uio_resid(uio) > 0 && retCode == 0) { - /* get the user's data */ - int bytesToInput = MIN(uio_resid(uio), - (user_ssize_t) sizeof(rdBuffer)); - retCode = uiomove(rdBuffer, bytesToInput, uio); - if (retCode != 0) - break; - retCode = write_random(rdBuffer, bytesToInput); - if (retCode != 0) - break; - } - - return retCode; + int retCode = 0; + char rdBuffer[256]; + + if (minor(dev) != RANDOM_MINOR) { + return EPERM; + } + + /* Security server is sending us entropy */ + + while (uio_resid(uio) > 0 && retCode == 0) { + /* get the user's data */ + int bytesToInput = MIN(uio_resid(uio), + (user_ssize_t) sizeof(rdBuffer)); + retCode = uiomove(rdBuffer, bytesToInput, uio); + if (retCode != 0) { + break; + } + retCode = write_random(rdBuffer, bytesToInput); + if (retCode != 0) { + break; + } + } + + return retCode; } /* @@ -198,12 +202,13 @@ random_read(__unused dev_t dev, struct uio *uio, __unused int ioflag) user_ssize_t bytes_remaining = uio_resid(uio); while (bytes_remaining > 0 && retCode == 0) { int bytesToRead = MIN(bytes_remaining, - (user_ssize_t) sizeof(buffer)); + (user_ssize_t) sizeof(buffer)); read_random(buffer, bytesToRead); retCode = uiomove(buffer, bytesToRead, uio); - if (retCode != 0) + if (retCode != 0) { break; + } bytes_remaining = uio_resid(uio); } @@ -218,13 +223,14 @@ u_int32_t RandomULong(void) { u_int32_t buf; - read_random(&buf, sizeof (buf)); - return (buf); + read_random(&buf, sizeof(buf)); + return buf; } int -getentropy(__unused struct proc * p, struct getentropy_args *gap, __unused int * ret) { +getentropy(__unused struct proc * p, struct getentropy_args *gap, __unused int * ret) +{ user_addr_t user_addr; uint32_t user_size; char buffer[256]; diff --git a/bsd/dev/random/randomdev.h b/bsd/dev/random/randomdev.h index 2d3b1a33a..f3e3e3bca 100644 --- a/bsd/dev/random/randomdev.h +++ b/bsd/dev/random/randomdev.h @@ -2,7 +2,7 @@ * Copyright (c) 1999, 2000-2002, 2009 Apple, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! - - THIS FILE IS NEEDED TO PASS FIPS ACCEPTANCE FOR THE RANDOM NUMBER GENERATOR. - IF YOU ALTER IT IN ANY WAY, WE WILL NEED TO GO THOUGH FIPS ACCEPTANCE AGAIN, - AN OPERATION THAT IS VERY EXPENSIVE AND TIME CONSUMING. IN OTHER WORDS, - DON'T MESS WITH THIS FILE. - - WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! -*/ + * WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! + * + * THIS FILE IS NEEDED TO PASS FIPS ACCEPTANCE FOR THE RANDOM NUMBER GENERATOR. + * IF YOU ALTER IT IN ANY WAY, WE WILL NEED TO GO THOUGH FIPS ACCEPTANCE AGAIN, + * AN OPERATION THAT IS VERY EXPENSIVE AND TIME CONSUMING. IN OTHER WORDS, + * DON'T MESS WITH THIS FILE. + * + * WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! + */ #ifndef __DEV_RANDOMDEV_H__ #define __DEV_RANDOMDEV_H__ @@ -57,4 +57,3 @@ u_int32_t RandomULong( void ); #endif /* __APPLE_API_PRIVATE */ #endif /* __DEV_RANDOMDEV_H__ */ - diff --git a/bsd/dev/unix_startup.c b/bsd/dev/unix_startup.c index a63db5b5f..439eb25f9 100644 --- a/bsd/dev/unix_startup.c +++ b/bsd/dev/unix_startup.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -64,7 +64,7 @@ extern uint32_t tcp_recvspace; void bsd_bufferinit(void); -unsigned int bsd_mbuf_cluster_reserve(boolean_t *); +unsigned int bsd_mbuf_cluster_reserve(boolean_t *); void bsd_scale_setup(int); void bsd_exec_setup(int); @@ -72,23 +72,23 @@ void bsd_exec_setup(int); * Declare these as initialized data so we can patch them. */ -#ifdef NBUF +#ifdef NBUF int max_nbuf_headers = NBUF; int niobuf_headers = (NBUF / 2) + 2048; -int nbuf_hashelements = NBUF; -int nbuf_headers = NBUF; +int nbuf_hashelements = NBUF; +int nbuf_headers = NBUF; #else int max_nbuf_headers = 0; int niobuf_headers = 0; -int nbuf_hashelements = 0; -int nbuf_headers = 0; +int nbuf_hashelements = 0; +int nbuf_headers = 0; #endif -SYSCTL_INT (_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, ""); -SYSCTL_INT (_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &max_nbuf_headers, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, nbuf, CTLFLAG_RD | CTLFLAG_LOCKED, &nbuf_headers, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, maxnbuf, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_KERN, &max_nbuf_headers, 0, ""); __private_extern__ int customnbuf = 0; -int serverperfmode = 0; /* Flag indicates a server boot when set */ +int serverperfmode = 0; /* Flag indicates a server boot when set */ int ncl = 0; #if SOCKETS @@ -109,61 +109,70 @@ bsd_startupearly(void) kern_return_t ret; /* clip the number of buf headers upto 16k */ - if (max_nbuf_headers == 0) - max_nbuf_headers = atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */ - if ((customnbuf == 0) && (max_nbuf_headers > 16384)) + if (max_nbuf_headers == 0) { + max_nbuf_headers = atop_kernel(sane_size / 50); /* Get 2% of ram, but no more than we can map */ + } + if ((customnbuf == 0) && (max_nbuf_headers > 16384)) { max_nbuf_headers = 16384; - if (max_nbuf_headers < CONFIG_MIN_NBUF) + } + if (max_nbuf_headers < CONFIG_MIN_NBUF) { max_nbuf_headers = CONFIG_MIN_NBUF; + } /* clip the number of hash elements to 200000 */ - if ( (customnbuf == 0 ) && nbuf_hashelements == 0) { + if ((customnbuf == 0) && nbuf_hashelements == 0) { nbuf_hashelements = atop_kernel(sane_size / 50); - if (nbuf_hashelements > 200000) + if (nbuf_hashelements > 200000) { nbuf_hashelements = 200000; - } else + } + } else { nbuf_hashelements = max_nbuf_headers; + } if (niobuf_headers == 0) { - if (max_nbuf_headers < 4096) + if (max_nbuf_headers < 4096) { niobuf_headers = max_nbuf_headers; - else + } else { niobuf_headers = (max_nbuf_headers / 2) + 2048; + } } - if (niobuf_headers < CONFIG_MIN_NIOBUF) + if (niobuf_headers < CONFIG_MIN_NIOBUF) { niobuf_headers = CONFIG_MIN_NIOBUF; + } size = (max_nbuf_headers + niobuf_headers) * sizeof(struct buf); size = round_page(size); ret = kmem_suballoc(kernel_map, - &firstaddr, - size, - FALSE, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_FILE, - &bufferhdr_map); - - if (ret != KERN_SUCCESS) + &firstaddr, + size, + FALSE, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_FILE, + &bufferhdr_map); + + if (ret != KERN_SUCCESS) { panic("Failed to create bufferhdr_map"); + } ret = kernel_memory_allocate(bufferhdr_map, - &firstaddr, - size, - 0, - KMA_HERE | KMA_KOBJECT, - VM_KERN_MEMORY_FILE); + &firstaddr, + size, + 0, + KMA_HERE | KMA_KOBJECT, + VM_KERN_MEMORY_FILE); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { panic("Failed to allocate bufferhdr_map"); + } buf_headers = (struct buf *) firstaddr; bzero(buf_headers, size); #if SOCKETS { - static const unsigned int maxspace = 128 * 1024; + static const unsigned int maxspace = 128 * 1024; int scale; nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES; @@ -173,10 +182,12 @@ bsd_startupearly(void) tcp_sendspace *= scale; tcp_recvspace *= scale; - if (tcp_sendspace > maxspace) + if (tcp_sendspace > maxspace) { tcp_sendspace = maxspace; - if (tcp_recvspace > maxspace) + } + if (tcp_recvspace > maxspace) { tcp_recvspace = maxspace; + } } #endif /* INET || INET6 */ } @@ -185,18 +196,19 @@ bsd_startupearly(void) if (vnodes_sized == 0) { if (!PE_get_default("kern.maxvnodes", &desiredvnodes, sizeof(desiredvnodes))) { /* - * Size vnodes based on memory - * Number vnodes is (memsize/64k) + 1024 + * Size vnodes based on memory + * Number vnodes is (memsize/64k) + 1024 * This is the calculation that is used by launchd in tiger - * we are clipping the max based on 16G + * we are clipping the max based on 16G * ie ((16*1024*1024*1024)/(64 *1024)) + 1024 = 263168; * CONFIG_VNODES is set to 263168 for "medium" configurations (the default) - * but can be smaller or larger. + * but can be smaller or larger. */ - desiredvnodes = (sane_size/65536) + 1024; + desiredvnodes = (sane_size / 65536) + 1024; #ifdef CONFIG_VNODES - if (desiredvnodes > CONFIG_VNODES) - desiredvnodes = CONFIG_VNODES; + if (desiredvnodes > CONFIG_VNODES) { + desiredvnodes = CONFIG_VNODES; + } #endif } vnodes_sized = 1; @@ -218,16 +230,17 @@ bsd_bufferinit(void) #if SOCKETS ret = kmem_suballoc(kernel_map, - (vm_offset_t *) &mbutl, - (vm_size_t) (nmbclusters * MCLBYTES), - FALSE, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_MBUF, - &mb_map); - - if (ret != KERN_SUCCESS) + (vm_offset_t *) &mbutl, + (vm_size_t) (nmbclusters * MCLBYTES), + FALSE, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_MBUF, + &mb_map); + + if (ret != KERN_SUCCESS) { panic("Failed to allocate mb_map\n"); + } #endif /* SOCKETS */ /* @@ -238,11 +251,11 @@ bsd_bufferinit(void) /* 512 MB (K32) or 2 GB (K64) hard limit on size of the mbuf pool */ #if !defined(__LP64__) -#define MAX_MBUF_POOL (512 << MBSHIFT) +#define MAX_MBUF_POOL (512 << MBSHIFT) #else -#define MAX_MBUF_POOL (2ULL << GBSHIFT) +#define MAX_MBUF_POOL (2ULL << GBSHIFT) #endif /* !__LP64__ */ -#define MAX_NCL (MAX_MBUF_POOL >> MCLSHIFT) +#define MAX_NCL (MAX_MBUF_POOL >> MCLSHIFT) #if SOCKETS /* @@ -259,8 +272,9 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) static boolean_t was_overridden = FALSE; /* If called more than once, return the previously calculated size */ - if (mbuf_poolsz != 0) + if (mbuf_poolsz != 0) { goto done; + } /* * Some of these are parsed in parse_bsd_args(), but for x86 we get @@ -268,32 +282,35 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) * to correctly compute the size of the low-memory VM pool. It is * redundant but rather harmless. */ - (void) PE_parse_boot_argn("ncl", &ncl, sizeof (ncl)); - (void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof (mbuf_pool)); + (void) PE_parse_boot_argn("ncl", &ncl, sizeof(ncl)); + (void) PE_parse_boot_argn("mbuf_pool", &mbuf_pool, sizeof(mbuf_pool)); /* * Convert "mbuf_pool" from MB to # of 2KB clusters; it is * equivalent to "ncl", except that it uses different unit. */ - if (mbuf_pool != 0) + if (mbuf_pool != 0) { ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT; + } - if (sane_size > (64 * 1024 * 1024) || ncl != 0) { - - if (ncl || serverperfmode) + if (sane_size > (64 * 1024 * 1024) || ncl != 0) { + if (ncl || serverperfmode) { was_overridden = TRUE; + } - if ((nmbclusters = ncl) == 0) { + if ((nmbclusters = ncl) == 0) { /* Auto-configure the mbuf pool size */ nmbclusters = mbuf_default_ncl(serverperfmode, sane_size); } else { /* Make sure it's not odd in case ncl is manually set */ - if (nmbclusters & 0x1) + if (nmbclusters & 0x1) { --nmbclusters; + } /* And obey the upper limit */ - if (nmbclusters > MAX_NCL) + if (nmbclusters > MAX_NCL) { nmbclusters = MAX_NCL; + } } /* Round it down to nearest multiple of PAGE_SIZE */ @@ -301,17 +318,18 @@ bsd_mbuf_cluster_reserve(boolean_t *overridden) } mbuf_poolsz = nmbclusters << MCLSHIFT; done: - if (overridden) + if (overridden) { *overridden = was_overridden; + } - return (mbuf_poolsz); + return mbuf_poolsz; } #endif #if defined(__LP64__) extern int tcp_tcbhashsize; extern int max_cached_sock_count; -#endif +#endif void @@ -323,17 +341,17 @@ bsd_scale_setup(int scale) maxprocperuid = (maxproc * 2) / 3; if (scale > 2) { maxfiles *= scale; - maxfilesperproc = maxfiles/2; + maxfilesperproc = maxfiles / 2; } } /* Apply server scaling rules */ - if ((scale > 0) && (serverperfmode !=0)) { + if ((scale > 0) && (serverperfmode != 0)) { maxproc = 2500 * scale; hard_maxproc = maxproc; /* no fp usage */ - maxprocperuid = (maxproc*3)/4; + maxprocperuid = (maxproc * 3) / 4; maxfiles = (150000 * scale); - maxfilesperproc = maxfiles/2; + maxfilesperproc = maxfiles / 2; desiredvnodes = maxfiles; vnodes_sized = 1; tcp_tfo_backlog = 100 * scale; @@ -344,25 +362,24 @@ bsd_scale_setup(int scale) * For scale > 4 (> 32G), clip * tcp_tcbhashsize to 32K */ - tcp_tcbhashsize = 32 *1024; + tcp_tcbhashsize = 32 * 1024; if (scale > 7) { /* clip at 64G level */ max_cached_sock_count = 165000; } else { - max_cached_sock_count = 60000 + ((scale-1) * 15000); + max_cached_sock_count = 60000 + ((scale - 1) * 15000); } } else { - somaxconn = 512*scale; - tcp_tcbhashsize = 4*1024*scale; - max_cached_sock_count = 60000 + ((scale-1) * 15000); + somaxconn = 512 * scale; + tcp_tcbhashsize = 4 * 1024 * scale; + max_cached_sock_count = 60000 + ((scale - 1) * 15000); } } - if(maxproc > hard_maxproc) { + if (maxproc > hard_maxproc) { hard_maxproc = maxproc; } #endif bsd_exec_setup(scale); } - diff --git a/bsd/dev/vn/shadow.c b/bsd/dev/vn/shadow.c index 731f49989..50e391264 100644 --- a/bsd/dev/vn/shadow.c +++ b/bsd/dev/vn/shadow.c @@ -2,7 +2,7 @@ * Copyright (c) 2001-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -39,7 +39,7 @@ * 2) a band map to map a "band" within the original file to a corresponding * "band" in the shadow file. Each band has the same size. * - * The band map is used to ensure that blocks that are contiguous in the + * The band map is used to ensure that blocks that are contiguous in the * original file will remain contiguous in the shadow file. * * For debugging purposes, this file can be compiled standalone using: @@ -49,7 +49,7 @@ /* * Modification History * - * December 21, 2001 Dieter Siegmund (dieter@apple.com) + * December 21, 2001 Dieter Siegmund (dieter@apple.com) * - initial revision */ #include @@ -61,55 +61,55 @@ #ifdef TEST_SHADOW #include #include -#define my_malloc(a) malloc(a) -#define my_free(a) free(a) +#define my_malloc(a) malloc(a) +#define my_free(a) free(a) #else /* !TEST_SHADOW */ #include -#define my_malloc(a) _MALLOC(a, M_TEMP, M_WAITOK) -#define my_free(a) FREE(a, M_TEMP) +#define my_malloc(a) _MALLOC(a, M_TEMP, M_WAITOK) +#define my_free(a) FREE(a, M_TEMP) #include #endif /* TEST_SHADOW */ #include "shadow.h" -#define UINT32_ALL_ONES ((uint32_t)(-1)) -#define USHORT_ALL_ONES ((u_short)(-1)) -#define UCHAR_ALL_ONES ((u_char)(-1)) +#define UINT32_ALL_ONES ((uint32_t)(-1)) +#define USHORT_ALL_ONES ((u_short)(-1)) +#define UCHAR_ALL_ONES ((u_char)(-1)) -#define my_trunc(value, divisor) ((value) / (divisor) * (divisor)) +#define my_trunc(value, divisor) ((value) / (divisor) * (divisor)) /* a band size of 128K can represent a file up to 8GB */ -#define BAND_SIZE_DEFAULT_POWER_2 17 -#define BAND_SIZE_DEFAULT (1 << BAND_SIZE_DEFAULT_POWER_2) +#define BAND_SIZE_DEFAULT_POWER_2 17 +#define BAND_SIZE_DEFAULT (1 << BAND_SIZE_DEFAULT_POWER_2) -typedef u_short band_number_t; -#define BAND_ZERO ((band_number_t)0) -#define BAND_MAX ((band_number_t)65535) +typedef u_short band_number_t; +#define BAND_ZERO ((band_number_t)0) +#define BAND_MAX ((band_number_t)65535) struct shadow_map { - uint32_t blocks_per_band;/* size in blocks */ - uint32_t block_size; - u_char * block_bitmap; /* 1 bit per block; 1=written */ - band_number_t * bands; /* band map array */ - uint32_t file_size_blocks; /* size of file in bands */ - uint32_t shadow_size_bands; /* size of shadow in bands */ - uint32_t next_band; /* next free band */ - uint32_t zeroth_band; /* special-case 0th band */ + uint32_t blocks_per_band;/* size in blocks */ + uint32_t block_size; + u_char * block_bitmap; /* 1 bit per block; 1=written */ + band_number_t * bands; /* band map array */ + uint32_t file_size_blocks; /* size of file in bands */ + uint32_t shadow_size_bands; /* size of shadow in bands */ + uint32_t next_band; /* next free band */ + uint32_t zeroth_band; /* special-case 0th band */ }; typedef struct { - uint32_t byte; - uint32_t bit; + uint32_t byte; + uint32_t bit; } bitmap_offset_t; static __inline__ u_char bit(int b) { - return ((u_char)(1 << b)); + return (u_char)(1 << b); } -/* +/* * Function: bits_lower * Purpose: * Return a byte value in which bits numbered lower than 'b' are set. @@ -117,7 +117,7 @@ bit(int b) static __inline__ u_char bits_lower(int b) { - return ((u_char)(bit(b) - 1)); + return (u_char)(bit(b) - 1); } /* @@ -128,20 +128,20 @@ bits_lower(int b) static __inline__ u_char byte_set_bits(int start, int end) { - return ((u_char)((~bits_lower(start)) & (bits_lower(end) | bit(end)))); + return (u_char)((~bits_lower(start)) & (bits_lower(end) | bit(end))); } static __inline__ bitmap_offset_t bitmap_offset(off_t where) { - bitmap_offset_t b; + bitmap_offset_t b; - b.byte = where / NBBY; - b.bit = where % NBBY; - return (b); + b.byte = where / NBBY; + b.bit = where % NBBY; + return b; } -/* +/* * Function: bitmap_set * * Purpose: @@ -153,47 +153,48 @@ bitmap_offset(off_t where) static void bitmap_set(u_char * map, uint32_t start_bit, uint32_t bit_count) { - bitmap_offset_t start; - bitmap_offset_t end; - - start = bitmap_offset(start_bit); - end = bitmap_offset(start_bit + bit_count); - if (start.byte < end.byte) { - uint32_t n_bytes; - - if (start.bit) { - map[start.byte] |= byte_set_bits(start.bit, NBBY - 1); - start.bit = 0; - start.byte++; - if (start.byte == end.byte) - goto end; - } - - n_bytes = end.byte - start.byte; - - while (n_bytes >= (sizeof(uint32_t))) { - *((uint32_t *)(map + start.byte)) = UINT32_ALL_ONES; - start.byte += sizeof(uint32_t); - n_bytes -= sizeof(uint32_t); - } - if (n_bytes >= sizeof(u_short)) { - *((u_short *)(map + start.byte)) = USHORT_ALL_ONES; - start.byte += sizeof(u_short); - n_bytes -= sizeof(u_short); - } - if (n_bytes == 1) { - map[start.byte] = UCHAR_ALL_ONES; - start.byte++; - n_bytes = 0; + bitmap_offset_t start; + bitmap_offset_t end; + + start = bitmap_offset(start_bit); + end = bitmap_offset(start_bit + bit_count); + if (start.byte < end.byte) { + uint32_t n_bytes; + + if (start.bit) { + map[start.byte] |= byte_set_bits(start.bit, NBBY - 1); + start.bit = 0; + start.byte++; + if (start.byte == end.byte) { + goto end; + } + } + + n_bytes = end.byte - start.byte; + + while (n_bytes >= (sizeof(uint32_t))) { + *((uint32_t *)(map + start.byte)) = UINT32_ALL_ONES; + start.byte += sizeof(uint32_t); + n_bytes -= sizeof(uint32_t); + } + if (n_bytes >= sizeof(u_short)) { + *((u_short *)(map + start.byte)) = USHORT_ALL_ONES; + start.byte += sizeof(u_short); + n_bytes -= sizeof(u_short); + } + if (n_bytes == 1) { + map[start.byte] = UCHAR_ALL_ONES; + start.byte++; + n_bytes = 0; + } } - } - end: - if (end.bit > start.bit) { - map[start.byte] |= byte_set_bits(start.bit, end.bit - 1); - } +end: + if (end.bit > start.bit) { + map[start.byte] |= byte_set_bits(start.bit, end.bit - 1); + } - return; + return; } /* @@ -209,110 +210,110 @@ bitmap_set(u_char * map, uint32_t start_bit, uint32_t bit_count) */ static uint32_t -bitmap_get(u_char * map, uint32_t start_bit, uint32_t bit_count, - boolean_t * ret_is_set) +bitmap_get(u_char * map, uint32_t start_bit, uint32_t bit_count, + boolean_t * ret_is_set) { - uint32_t count; - int i; - boolean_t is_set; - bitmap_offset_t start; - bitmap_offset_t end; - - start = bitmap_offset(start_bit); - end = bitmap_offset(start_bit + bit_count); - - is_set = (map[start.byte] & bit(start.bit)) ? TRUE : FALSE; - count = 0; - - if (start.byte < end.byte) { - uint32_t n_bytes; - - if (start.bit) { /* try to align to a byte */ - for (i = start.bit; i < NBBY; i++) { - boolean_t this_is_set; + uint32_t count; + int i; + boolean_t is_set; + bitmap_offset_t start; + bitmap_offset_t end; + + start = bitmap_offset(start_bit); + end = bitmap_offset(start_bit + bit_count); + + is_set = (map[start.byte] & bit(start.bit)) ? TRUE : FALSE; + count = 0; + + if (start.byte < end.byte) { + uint32_t n_bytes; + + if (start.bit) { /* try to align to a byte */ + for (i = start.bit; i < NBBY; i++) { + boolean_t this_is_set; + + this_is_set = (map[start.byte] & bit(i)) ? TRUE : FALSE; + if (this_is_set != is_set) { + goto done; /* found bit that was different, we're done */ + } + count++; + } + start.bit = 0; /* made it to the next byte */ + start.byte++; + if (start.byte == end.byte) { + goto end; /* no more bytes, check for any leftover bits */ + } + } + /* calculate how many bytes are left in the range */ + n_bytes = end.byte - start.byte; + + /* check for 4 bytes of the same bits */ + while (n_bytes >= sizeof(uint32_t)) { + uint32_t * valPtr = (uint32_t *)(map + start.byte); + if ((is_set && *valPtr == UINT32_ALL_ONES) + || (!is_set && *valPtr == 0)) { + count += sizeof(*valPtr) * NBBY; + start.byte += sizeof(*valPtr); + n_bytes -= sizeof(*valPtr); + } else { + break; /* bits differ */ + } + } + /* check for 2 bytes of the same bits */ + if (n_bytes >= sizeof(u_short)) { + u_short * valPtr = (u_short *)(map + start.byte); + + if ((is_set && *valPtr == USHORT_ALL_ONES) + || (!is_set && (*valPtr == 0))) { + count += sizeof(*valPtr) * NBBY; + start.byte += sizeof(*valPtr); + n_bytes -= sizeof(*valPtr); + } + } - this_is_set = (map[start.byte] & bit(i)) ? TRUE : FALSE; - if (this_is_set != is_set) { - goto done; /* found bit that was different, we're done */ + /* check for 1 byte of the same bits */ + if (n_bytes) { + if ((is_set && map[start.byte] == UCHAR_ALL_ONES) + || (!is_set && map[start.byte] == 0)) { + count += NBBY; + start.byte++; + n_bytes--; + } + /* we found bits that were different, find the first one */ + if (n_bytes) { + for (i = 0; i < NBBY; i++) { + boolean_t this_is_set; + + this_is_set = (map[start.byte] & bit(i)) ? TRUE : FALSE; + if (this_is_set != is_set) { + break; + } + count++; + } + goto done; + } } - count++; - } - start.bit = 0; /* made it to the next byte */ - start.byte++; - if (start.byte == end.byte) - goto end; /* no more bytes, check for any leftover bits */ } - /* calculate how many bytes are left in the range */ - n_bytes = end.byte - start.byte; - - /* check for 4 bytes of the same bits */ - while (n_bytes >= sizeof(uint32_t)) { - uint32_t * valPtr = (uint32_t *)(map + start.byte); - if ((is_set && *valPtr == UINT32_ALL_ONES) - || (!is_set && *valPtr == 0)) { - count += sizeof(*valPtr) * NBBY; - start.byte += sizeof(*valPtr); - n_bytes -= sizeof(*valPtr); - } - else - break; /* bits differ */ - } - /* check for 2 bytes of the same bits */ - if (n_bytes >= sizeof(u_short)) { - u_short * valPtr = (u_short *)(map + start.byte); - - if ((is_set && *valPtr == USHORT_ALL_ONES) - || (!is_set && (*valPtr == 0))) { - count += sizeof(*valPtr) * NBBY; - start.byte += sizeof(*valPtr); - n_bytes -= sizeof(*valPtr); - } - } +end: + for (i = start.bit; i < (int)end.bit; i++) { + boolean_t this_is_set = (map[start.byte] & bit(i)) ? TRUE : FALSE; - /* check for 1 byte of the same bits */ - if (n_bytes) { - if ((is_set && map[start.byte] == UCHAR_ALL_ONES) - || (!is_set && map[start.byte] == 0)) { - count += NBBY; - start.byte++; - n_bytes--; - } - /* we found bits that were different, find the first one */ - if (n_bytes) { - for (i = 0; i < NBBY; i++) { - boolean_t this_is_set; - - this_is_set = (map[start.byte] & bit(i)) ? TRUE : FALSE; - if (this_is_set != is_set) { + if (this_is_set != is_set) { break; - } - count++; } - goto done; - } - } - } - - end: - for (i = start.bit; i < (int)end.bit; i++) { - boolean_t this_is_set = (map[start.byte] & bit(i)) ? TRUE : FALSE; - - if (this_is_set != is_set) { - break; + count++; } - count++; - } - done: - *ret_is_set = is_set; - return (count); +done: + *ret_is_set = is_set; + return count; } static __inline__ band_number_t shadow_map_block_to_band(shadow_map_t * map, uint32_t block) { - return (block / map->blocks_per_band); + return block / map->blocks_per_band; } /* @@ -324,90 +325,88 @@ shadow_map_block_to_band(shadow_map_t * map, uint32_t block) */ static boolean_t shadow_map_mapped_band(shadow_map_t * map, band_number_t band, - boolean_t map_it, band_number_t * mapped_band) + boolean_t map_it, band_number_t * mapped_band) { - boolean_t is_mapped = FALSE; - - if (band == map->zeroth_band) { - *mapped_band = BAND_ZERO; - is_mapped = TRUE; - } - else { - *mapped_band = map->bands[band]; - if (*mapped_band == BAND_ZERO) { - if (map_it) { - /* grow the file */ - if (map->next_band == 0) { - /* remember the zero'th band */ - map->zeroth_band = band; - } - *mapped_band = map->bands[band] = map->next_band++; + boolean_t is_mapped = FALSE; + + if (band == map->zeroth_band) { + *mapped_band = BAND_ZERO; is_mapped = TRUE; - } - } - else { - is_mapped = TRUE; + } else { + *mapped_band = map->bands[band]; + if (*mapped_band == BAND_ZERO) { + if (map_it) { + /* grow the file */ + if (map->next_band == 0) { + /* remember the zero'th band */ + map->zeroth_band = band; + } + *mapped_band = map->bands[band] = map->next_band++; + is_mapped = TRUE; + } + } else { + is_mapped = TRUE; + } } - } - return (is_mapped); + return is_mapped; } -/* +/* * Function: shadow_map_contiguous * * Purpose: - * Return the first offset within the range position..(position + count) + * Return the first offset within the range position..(position + count) * that is not a contiguous mapped band. * * If called with is_write = TRUE, this function will map bands as it goes. */ static uint32_t shadow_map_contiguous(shadow_map_t * map, uint32_t start_block, - uint32_t num_blocks, boolean_t is_write) + uint32_t num_blocks, boolean_t is_write) { - band_number_t band = shadow_map_block_to_band(map, start_block); - uint32_t end_block = start_block + num_blocks; - boolean_t is_mapped; - band_number_t mapped_band; - uint32_t ret_end_block = end_block; - uint32_t p; - - is_mapped = shadow_map_mapped_band(map, band, is_write, &mapped_band); - if (is_write == FALSE && is_mapped == FALSE) { - static int happened = 0; - /* this can't happen */ - if (happened == 0) { - printf("shadow_map_contiguous: this can't happen!\n"); - happened = 1; - } - return (start_block); - } - for (p = my_trunc(start_block + map->blocks_per_band, - map->blocks_per_band); - p < end_block; p += map->blocks_per_band) { - band_number_t next_mapped_band; - - band++; - is_mapped = shadow_map_mapped_band(map, band, is_write, - &next_mapped_band); + band_number_t band = shadow_map_block_to_band(map, start_block); + uint32_t end_block = start_block + num_blocks; + boolean_t is_mapped; + band_number_t mapped_band; + uint32_t ret_end_block = end_block; + uint32_t p; + + is_mapped = shadow_map_mapped_band(map, band, is_write, &mapped_band); if (is_write == FALSE && is_mapped == FALSE) { - return (p); + static int happened = 0; + /* this can't happen */ + if (happened == 0) { + printf("shadow_map_contiguous: this can't happen!\n"); + happened = 1; + } + return start_block; } - if ((mapped_band + 1) != next_mapped_band) { - /* not contiguous */ - ret_end_block = p; - break; + for (p = my_trunc(start_block + map->blocks_per_band, + map->blocks_per_band); + p < end_block; p += map->blocks_per_band) { + band_number_t next_mapped_band; + + band++; + is_mapped = shadow_map_mapped_band(map, band, is_write, + &next_mapped_band); + if (is_write == FALSE && is_mapped == FALSE) { + return p; + } + if ((mapped_band + 1) != next_mapped_band) { + /* not contiguous */ + ret_end_block = p; + break; + } + mapped_band = next_mapped_band; } - mapped_band = next_mapped_band; - } - return (ret_end_block); + return ret_end_block; } -/* +/* * Function: block_bitmap_size * Purpose: - * The number of bytes required in a block bitmap to represent a file of size + * The number of bytes required in a block bitmap to represent a file of size * file_size. * * The bytes required is the number of blocks in the file, @@ -422,8 +421,8 @@ shadow_map_contiguous(shadow_map_t * map, uint32_t start_block, static __inline__ uint32_t block_bitmap_size(off_t file_size, uint32_t block_size) { - off_t blocks = howmany(file_size, block_size); - return (howmany(blocks, NBBY)); + off_t blocks = howmany(file_size, block_size); + return howmany(blocks, NBBY); } /* @@ -437,10 +436,10 @@ block_bitmap_size(off_t file_size, uint32_t block_size) * The output values (*incr_block_offset, *incr_block_count) refer to the * shadow file if the return value is TRUE. They refer to the original * file if the return value is FALSE. - + * * Blocks within a band may or may not have been written, in addition, * Bands are not necessarily contiguous, therefore: - * *incr_block_count <= block_count + * *incr_block_count <= block_count * The caller must be prepared to call this function interatively * to complete the whole i/o. * Returns: @@ -449,38 +448,37 @@ block_bitmap_size(off_t file_size, uint32_t block_size) */ boolean_t shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, - uint32_t * incr_block_offset, uint32_t * incr_block_count) + uint32_t * incr_block_offset, uint32_t * incr_block_count) { - boolean_t written = FALSE; - uint32_t n_blocks; - - if (block_offset >= map->file_size_blocks - || (block_offset + block_count) > map->file_size_blocks) { - printf("shadow_map_read: request (%d, %d) exceeds file size %d\n", - block_offset, block_count, map->file_size_blocks); - *incr_block_count = 0; - } - n_blocks = bitmap_get(map->block_bitmap, block_offset, block_count, - &written); - if (written == FALSE) { - *incr_block_count = n_blocks; - *incr_block_offset = block_offset; - } - else { /* start has been written, and therefore mapped */ - band_number_t mapped_band; - uint32_t band_limit; - - mapped_band = map->bands[shadow_map_block_to_band(map, block_offset)]; - *incr_block_offset = mapped_band * map->blocks_per_band - + (block_offset % map->blocks_per_band); - band_limit - = shadow_map_contiguous(map, block_offset, block_count, FALSE); - *incr_block_count = band_limit - block_offset; - if (*incr_block_count > n_blocks) { - *incr_block_count = n_blocks; + boolean_t written = FALSE; + uint32_t n_blocks; + + if (block_offset >= map->file_size_blocks + || (block_offset + block_count) > map->file_size_blocks) { + printf("shadow_map_read: request (%d, %d) exceeds file size %d\n", + block_offset, block_count, map->file_size_blocks); + *incr_block_count = 0; } - } - return (written); + n_blocks = bitmap_get(map->block_bitmap, block_offset, block_count, + &written); + if (written == FALSE) { + *incr_block_count = n_blocks; + *incr_block_offset = block_offset; + } else { /* start has been written, and therefore mapped */ + band_number_t mapped_band; + uint32_t band_limit; + + mapped_band = map->bands[shadow_map_block_to_band(map, block_offset)]; + *incr_block_offset = mapped_band * map->blocks_per_band + + (block_offset % map->blocks_per_band); + band_limit + = shadow_map_contiguous(map, block_offset, block_count, FALSE); + *incr_block_count = band_limit - block_offset; + if (*incr_block_count > n_blocks) { + *incr_block_count = n_blocks; + } + } + return written; } /* @@ -489,55 +487,55 @@ shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, * Purpose: * Calculate the block offset within the shadow to write, and the number * blocks to write. The input values (block_offset, block_count) refer - * to the original file. The output values + * to the original file. The output values * (*incr_block_offset, *incr_block_count) refer to the shadow file. * * Bands are not necessarily contiguous, therefore: - * *incr_block_count <= block_count + * *incr_block_count <= block_count * The caller must be prepared to call this function interatively * to complete the whole i/o. * Returns: - * TRUE if the shadow file was grown, FALSE otherwise. + * TRUE if the shadow file was grown, FALSE otherwise. */ boolean_t -shadow_map_write(shadow_map_t * map, uint32_t block_offset, - uint32_t block_count, uint32_t * incr_block_offset, - uint32_t * incr_block_count) +shadow_map_write(shadow_map_t * map, uint32_t block_offset, + uint32_t block_count, uint32_t * incr_block_offset, + uint32_t * incr_block_count) { - uint32_t band_limit; - band_number_t mapped_band; - boolean_t shadow_grew = FALSE; - - if (block_offset >= map->file_size_blocks - || (block_offset + block_count) > map->file_size_blocks) { - printf("shadow_map_write: request (%d, %d) exceeds file size %d\n", - block_offset, block_count, map->file_size_blocks); - *incr_block_count = 0; - } - - band_limit = shadow_map_contiguous(map, block_offset, block_count, TRUE); - mapped_band = map->bands[shadow_map_block_to_band(map, block_offset)]; - *incr_block_offset = mapped_band * map->blocks_per_band - + (block_offset % map->blocks_per_band); - *incr_block_count = band_limit - block_offset; - - /* mark these blocks as written */ - bitmap_set(map->block_bitmap, block_offset, *incr_block_count); - - if (map->next_band > map->shadow_size_bands) { - map->shadow_size_bands = map->next_band; - shadow_grew = TRUE; - } - return (shadow_grew); + uint32_t band_limit; + band_number_t mapped_band; + boolean_t shadow_grew = FALSE; + + if (block_offset >= map->file_size_blocks + || (block_offset + block_count) > map->file_size_blocks) { + printf("shadow_map_write: request (%d, %d) exceeds file size %d\n", + block_offset, block_count, map->file_size_blocks); + *incr_block_count = 0; + } + + band_limit = shadow_map_contiguous(map, block_offset, block_count, TRUE); + mapped_band = map->bands[shadow_map_block_to_band(map, block_offset)]; + *incr_block_offset = mapped_band * map->blocks_per_band + + (block_offset % map->blocks_per_band); + *incr_block_count = band_limit - block_offset; + + /* mark these blocks as written */ + bitmap_set(map->block_bitmap, block_offset, *incr_block_count); + + if (map->next_band > map->shadow_size_bands) { + map->shadow_size_bands = map->next_band; + shadow_grew = TRUE; + } + return shadow_grew; } boolean_t shadow_map_is_written(shadow_map_t * map, uint32_t block_offset) { - bitmap_offset_t b; + bitmap_offset_t b; - b = bitmap_offset(block_offset); - return ((map->block_bitmap[b.byte] & bit(b.bit)) ? TRUE : FALSE); + b = bitmap_offset(block_offset); + return (map->block_bitmap[b.byte] & bit(b.bit)) ? TRUE : FALSE; } /* @@ -549,10 +547,10 @@ shadow_map_is_written(shadow_map_t * map, uint32_t block_offset) uint32_t shadow_map_shadow_size(shadow_map_t * map) { - return (map->shadow_size_bands * map->blocks_per_band); + return map->shadow_size_bands * map->blocks_per_band; } -/* +/* * Function: shadow_map_create * * Purpose: @@ -562,64 +560,66 @@ shadow_map_shadow_size(shadow_map_t * map) * NULL if an error occurred. */ shadow_map_t * -shadow_map_create(off_t file_size, off_t shadow_size, - uint32_t band_size, uint32_t block_size) +shadow_map_create(off_t file_size, off_t shadow_size, + uint32_t band_size, uint32_t block_size) { - void * block_bitmap = NULL; - uint32_t bitmap_size; - band_number_t * bands = NULL; - shadow_map_t * map; - uint32_t n_bands = 0; - - if (band_size == 0) { - band_size = BAND_SIZE_DEFAULT; - } - - n_bands = howmany(file_size, band_size); - if (n_bands > (BAND_MAX + 1)) { - printf("file is too big: %d > %d\n", - n_bands, BAND_MAX); - goto failure; - } - - /* create a block bitmap, one bit per block */ - bitmap_size = block_bitmap_size(file_size, block_size); - block_bitmap = my_malloc(bitmap_size); - if (block_bitmap == NULL) { - printf("failed to allocate bitmap\n"); - goto failure; - } - bzero(block_bitmap, bitmap_size); - - /* get the band map */ - bands = (band_number_t *)my_malloc(n_bands * sizeof(band_number_t)); - if (bands == NULL) { - printf("failed to allocate bands\n"); - goto failure; - } - bzero(bands, n_bands * sizeof(band_number_t)); - - map = my_malloc(sizeof(*map)); - if (map == NULL) { - printf("failed to allocate map\n"); - goto failure; - } - map->blocks_per_band = band_size / block_size; - map->block_bitmap = block_bitmap; - map->bands = bands; - map->file_size_blocks = n_bands * map->blocks_per_band; - map->next_band = 0; - map->zeroth_band = -1; - map->shadow_size_bands = howmany(shadow_size, band_size); - map->block_size = block_size; - return (map); - - failure: - if (block_bitmap) - my_free(block_bitmap); - if (bands) - my_free(bands); - return (NULL); + void * block_bitmap = NULL; + uint32_t bitmap_size; + band_number_t * bands = NULL; + shadow_map_t * map; + uint32_t n_bands = 0; + + if (band_size == 0) { + band_size = BAND_SIZE_DEFAULT; + } + + n_bands = howmany(file_size, band_size); + if (n_bands > (BAND_MAX + 1)) { + printf("file is too big: %d > %d\n", + n_bands, BAND_MAX); + goto failure; + } + + /* create a block bitmap, one bit per block */ + bitmap_size = block_bitmap_size(file_size, block_size); + block_bitmap = my_malloc(bitmap_size); + if (block_bitmap == NULL) { + printf("failed to allocate bitmap\n"); + goto failure; + } + bzero(block_bitmap, bitmap_size); + + /* get the band map */ + bands = (band_number_t *)my_malloc(n_bands * sizeof(band_number_t)); + if (bands == NULL) { + printf("failed to allocate bands\n"); + goto failure; + } + bzero(bands, n_bands * sizeof(band_number_t)); + + map = my_malloc(sizeof(*map)); + if (map == NULL) { + printf("failed to allocate map\n"); + goto failure; + } + map->blocks_per_band = band_size / block_size; + map->block_bitmap = block_bitmap; + map->bands = bands; + map->file_size_blocks = n_bands * map->blocks_per_band; + map->next_band = 0; + map->zeroth_band = -1; + map->shadow_size_bands = howmany(shadow_size, band_size); + map->block_size = block_size; + return map; + +failure: + if (block_bitmap) { + my_free(block_bitmap); + } + if (bands) { + my_free(bands); + } + return NULL; } /* @@ -629,113 +629,115 @@ shadow_map_create(off_t file_size, off_t shadow_size, */ void shadow_map_free(shadow_map_t * map) -{ - if (map->block_bitmap) - my_free(map->block_bitmap); - if (map->bands) - my_free(map->bands); - map->block_bitmap = NULL; - map->bands = NULL; - my_free(map); - return; +{ + if (map->block_bitmap) { + my_free(map->block_bitmap); + } + if (map->bands) { + my_free(map->bands); + } + map->block_bitmap = NULL; + map->bands = NULL; + my_free(map); + return; } #ifdef TEST_SHADOW -#define BAND_SIZE_BLOCKS (BAND_SIZE_DEFAULT / 512) +#define BAND_SIZE_BLOCKS (BAND_SIZE_DEFAULT / 512) enum { - ReadRequest, - WriteRequest, + ReadRequest, + WriteRequest, }; typedef struct { - int type; - uint32_t offset; - uint32_t count; + int type; + uint32_t offset; + uint32_t count; } block_request_t; int main() { - shadow_map_t * map; - int i; - block_request_t requests[] = { - { WriteRequest, BAND_SIZE_BLOCKS * 2, 1 }, - { ReadRequest, BAND_SIZE_BLOCKS / 2, BAND_SIZE_BLOCKS * 2 - 2 }, - { WriteRequest, BAND_SIZE_BLOCKS * 1, 5 * BAND_SIZE_BLOCKS + 3}, - { ReadRequest, 0, BAND_SIZE_BLOCKS * 10 }, - { WriteRequest, BAND_SIZE_BLOCKS * (BAND_MAX - 1), - BAND_SIZE_BLOCKS * 2}, - { 0, 0 }, - }; - - map = shadow_map_create(1024 * 1024 * 1024 * 8ULL, 0, 0, 512); - if (map == NULL) { - printf("shadow_map_create failed\n"); - exit(1); - } - for (i = 0; TRUE; i++) { - uint32_t offset; - uint32_t resid; - boolean_t shadow_grew; - boolean_t read_shadow; - - if (requests[i].count == 0) { - break; + shadow_map_t * map; + int i; + block_request_t requests[] = { + { WriteRequest, BAND_SIZE_BLOCKS * 2, 1 }, + { ReadRequest, BAND_SIZE_BLOCKS / 2, BAND_SIZE_BLOCKS * 2 - 2 }, + { WriteRequest, BAND_SIZE_BLOCKS * 1, 5 * BAND_SIZE_BLOCKS + 3}, + { ReadRequest, 0, BAND_SIZE_BLOCKS * 10 }, + { WriteRequest, BAND_SIZE_BLOCKS * (BAND_MAX - 1), + BAND_SIZE_BLOCKS * 2}, + { 0, 0 }, + }; + + map = shadow_map_create(1024 * 1024 * 1024 * 8ULL, 0, 0, 512); + if (map == NULL) { + printf("shadow_map_create failed\n"); + exit(1); } - offset = requests[i].offset; - resid = requests[i].count; - printf("\n%s REQUEST (%ld, %ld)\n", - requests[i].type == WriteRequest ? "WRITE" : "READ", - offset, resid); - switch (requests[i].type) { - case WriteRequest: - while (resid > 0) { - uint32_t this_offset; - uint32_t this_count; - - shadow_grew = shadow_map_write(map, offset, - resid, - &this_offset, - &this_count); - printf("\t(%ld, %ld) => (%ld, %ld)", - offset, resid, this_offset, this_count); - resid -= this_count; - offset += this_count; - if (shadow_grew) { - printf(" shadow grew to %ld", shadow_map_shadow_size(map)); + for (i = 0; TRUE; i++) { + uint32_t offset; + uint32_t resid; + boolean_t shadow_grew; + boolean_t read_shadow; + + if (requests[i].count == 0) { + break; } - printf("\n"); - } - break; - case ReadRequest: - while (resid > 0) { - uint32_t this_offset; - uint32_t this_count; - - read_shadow = shadow_map_read(map, offset, - resid, - &this_offset, - &this_count); - printf("\t(%ld, %ld) => (%ld, %ld)%s\n", - offset, resid, this_offset, this_count, - read_shadow ? " from shadow" : ""); - if (this_count == 0) { - printf("this_count is 0, aborting\n"); - break; + offset = requests[i].offset; + resid = requests[i].count; + printf("\n%s REQUEST (%ld, %ld)\n", + requests[i].type == WriteRequest ? "WRITE" : "READ", + offset, resid); + switch (requests[i].type) { + case WriteRequest: + while (resid > 0) { + uint32_t this_offset; + uint32_t this_count; + + shadow_grew = shadow_map_write(map, offset, + resid, + &this_offset, + &this_count); + printf("\t(%ld, %ld) => (%ld, %ld)", + offset, resid, this_offset, this_count); + resid -= this_count; + offset += this_count; + if (shadow_grew) { + printf(" shadow grew to %ld", shadow_map_shadow_size(map)); + } + printf("\n"); + } + break; + case ReadRequest: + while (resid > 0) { + uint32_t this_offset; + uint32_t this_count; + + read_shadow = shadow_map_read(map, offset, + resid, + &this_offset, + &this_count); + printf("\t(%ld, %ld) => (%ld, %ld)%s\n", + offset, resid, this_offset, this_count, + read_shadow ? " from shadow" : ""); + if (this_count == 0) { + printf("this_count is 0, aborting\n"); + break; + } + resid -= this_count; + offset += this_count; + } + break; + default: + break; } - resid -= this_count; - offset += this_count; - } - break; - default: - break; } - } - if (map) { - shadow_map_free(map); - } - exit(0); - return (0); + if (map) { + shadow_map_free(map); + } + exit(0); + return 0; } #endif diff --git a/bsd/dev/vn/shadow.h b/bsd/dev/vn/shadow.h index ce2c677dc..72d68d3a3 100644 --- a/bsd/dev/vn/shadow.h +++ b/bsd/dev/vn/shadow.h @@ -2,7 +2,7 @@ * Copyright (c) 1999, 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,10 +37,10 @@ typedef struct shadow_map shadow_map_t; boolean_t shadow_map_read(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, - uint32_t * incr_block_offset, uint32_t * incr_block_count); + uint32_t * incr_block_offset, uint32_t * incr_block_count); boolean_t shadow_map_write(shadow_map_t * map, uint32_t block_offset, uint32_t block_count, - uint32_t * incr_block_offset, uint32_t * incr_block_count); + uint32_t * incr_block_offset, uint32_t * incr_block_count); boolean_t shadow_map_is_written(shadow_map_t * map, uint32_t block_offset); @@ -48,13 +48,10 @@ uint32_t shadow_map_shadow_size(shadow_map_t * map); shadow_map_t * -shadow_map_create(off_t file_size, off_t shadow_size, - uint32_t band_size, uint32_t block_size); +shadow_map_create(off_t file_size, off_t shadow_size, + uint32_t band_size, uint32_t block_size); void shadow_map_free(shadow_map_t * map); #endif /* __APPLE_API_PRIVATE */ #endif /* __VN_SHADOW_H__ */ - - - diff --git a/bsd/dev/vn/vn.c b/bsd/dev/vn/vn.c index 703a8ad7a..c0819facb 100644 --- a/bsd/dev/vn/vn.c +++ b/bsd/dev/vn/vn.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -120,20 +120,20 @@ #include "shadow.h" -static void +static void vndevice_do_init(void); -static ioctl_fcn_t vnioctl_chr; -static ioctl_fcn_t vnioctl_blk; -static open_close_fcn_t vnopen; -static open_close_fcn_t vnclose; -static psize_fcn_t vnsize; -static strategy_fcn_t vnstrategy; -static read_write_fcn_t vnread; -static read_write_fcn_t vnwrite; +static ioctl_fcn_t vnioctl_chr; +static ioctl_fcn_t vnioctl_blk; +static open_close_fcn_t vnopen; +static open_close_fcn_t vnclose; +static psize_fcn_t vnsize; +static strategy_fcn_t vnstrategy; +static read_write_fcn_t vnread; +static read_write_fcn_t vnwrite; -static int vndevice_bdev_major; -static int vndevice_cdev_major; +static int vndevice_bdev_major; +static int vndevice_cdev_major; /* * cdevsw @@ -142,92 +142,92 @@ static int vndevice_cdev_major; */ static struct bdevsw vn_bdevsw = { - /* open */ vnopen, - /* close */ vnclose, - /* strategy */ vnstrategy, - /* ioctl */ vnioctl_blk, - /* dump */ eno_dump, - /* psize */ vnsize, - /* flags */ D_DISK, + /* open */ vnopen, + /* close */ vnclose, + /* strategy */ vnstrategy, + /* ioctl */ vnioctl_blk, + /* dump */ eno_dump, + /* psize */ vnsize, + /* flags */ D_DISK, }; static struct cdevsw vn_cdevsw = { - /* open */ vnopen, - /* close */ vnclose, - /* read */ vnread, - /* write */ vnwrite, - /* ioctl */ vnioctl_chr, - /* stop */ eno_stop, - /* reset */ eno_reset, - /* ttys */ NULL, - /* select */ eno_select, - /* mmap */ eno_mmap, - /* strategy */ eno_strat, - /* getc */ eno_getc, - /* putc */ eno_putc, - /* flags */ D_DISK, + /* open */ vnopen, + /* close */ vnclose, + /* read */ vnread, + /* write */ vnwrite, + /* ioctl */ vnioctl_chr, + /* stop */ eno_stop, + /* reset */ eno_reset, + /* ttys */ NULL, + /* select */ eno_select, + /* mmap */ eno_mmap, + /* strategy */ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* flags */ D_DISK, }; struct vn_softc { - u_int64_t sc_fsize; /* file size in bytes */ - u_int64_t sc_size; /* size of vn, sc_secsize scale */ - int sc_flags; /* flags */ - u_int32_t sc_secsize; /* sector size */ - struct vnode *sc_vp; /* vnode if not NULL */ - uint32_t sc_vid; - int sc_open_flags; - struct vnode *sc_shadow_vp; /* shadow vnode if not NULL */ - uint32_t sc_shadow_vid; - shadow_map_t * sc_shadow_map; /* shadow map if not NULL */ - kauth_cred_t sc_cred; /* credentials */ - u_int32_t sc_options; /* options */ - void * sc_bdev; - void * sc_cdev; + u_int64_t sc_fsize; /* file size in bytes */ + u_int64_t sc_size; /* size of vn, sc_secsize scale */ + int sc_flags; /* flags */ + u_int32_t sc_secsize; /* sector size */ + struct vnode *sc_vp; /* vnode if not NULL */ + uint32_t sc_vid; + int sc_open_flags; + struct vnode *sc_shadow_vp; /* shadow vnode if not NULL */ + uint32_t sc_shadow_vid; + shadow_map_t * sc_shadow_map; /* shadow map if not NULL */ + kauth_cred_t sc_cred; /* credentials */ + u_int32_t sc_options; /* options */ + void * sc_bdev; + void * sc_cdev; } vn_table[NVNDEVICE]; -#define ROOT_IMAGE_UNIT 0 +#define ROOT_IMAGE_UNIT 0 /* sc_flags */ -#define VNF_INITED 0x01 -#define VNF_READONLY 0x02 +#define VNF_INITED 0x01 +#define VNF_READONLY 0x02 -static u_int32_t vn_options; +static u_int32_t vn_options; -#define IFOPT(vn,opt) if (((vn)->sc_options|vn_options) & (opt)) -#define TESTOPT(vn,opt) (((vn)->sc_options|vn_options) & (opt)) +#define IFOPT(vn, opt) if (((vn)->sc_options|vn_options) & (opt)) +#define TESTOPT(vn, opt) (((vn)->sc_options|vn_options) & (opt)) -static int setcred(struct vnode * vp, kauth_cred_t cred); -static void vnclear (struct vn_softc *vn, vfs_context_t ctx); +static int setcred(struct vnode * vp, kauth_cred_t cred); +static void vnclear(struct vn_softc *vn, vfs_context_t ctx); static void vn_ioctl_to_64(struct vn_ioctl_32 *from, struct vn_ioctl_64 *to); void vndevice_init(void); int vndevice_root_image(char * path, char devname[], dev_t * dev_p); static int vniocattach_file(struct vn_softc *vn, - struct vn_ioctl_64 *vniop, - dev_t dev, - int in_kernel, - proc_t p); + struct vn_ioctl_64 *vniop, + dev_t dev, + int in_kernel, + proc_t p); static int vniocattach_shadow(struct vn_softc * vn, - struct vn_ioctl_64 *vniop, - dev_t dev, - int in_kernel, - proc_t p); + struct vn_ioctl_64 *vniop, + dev_t dev, + int in_kernel, + proc_t p); static __inline__ int vnunit(dev_t dev) { - return (minor(dev)); + return minor(dev); } -static int -vnclose(__unused dev_t dev, __unused int flags, - __unused int devtype, __unused proc_t p) +static int +vnclose(__unused dev_t dev, __unused int flags, + __unused int devtype, __unused proc_t p) { - return (0); + return 0; } -static int +static int vnopen(dev_t dev, int flags, __unused int devtype, __unused proc_t p) { struct vn_softc *vn; @@ -235,87 +235,88 @@ vnopen(dev_t dev, int flags, __unused int devtype, __unused proc_t p) unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { - return (ENXIO); + return ENXIO; } vn = vn_table + unit; - if ((flags & FWRITE) && (vn->sc_flags & VNF_READONLY)) - return (EACCES); + if ((flags & FWRITE) && (vn->sc_flags & VNF_READONLY)) { + return EACCES; + } - return(0); + return 0; } static int -file_io(struct vnode * vp, vfs_context_t ctx, - enum uio_rw op, char * base, off_t offset, user_ssize_t count, - user_ssize_t * resid) +file_io(struct vnode * vp, vfs_context_t ctx, + enum uio_rw op, char * base, off_t offset, user_ssize_t count, + user_ssize_t * resid) { - uio_t auio; - int error; - char uio_buf[UIO_SIZEOF(1)]; - - auio = uio_createwithbuffer(1, offset, UIO_SYSSPACE, op, - &uio_buf[0], sizeof(uio_buf)); + uio_t auio; + int error; + char uio_buf[UIO_SIZEOF(1)]; + + auio = uio_createwithbuffer(1, offset, UIO_SYSSPACE, op, + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(base), count); - if (op == UIO_READ) + if (op == UIO_READ) { error = VNOP_READ(vp, auio, IO_SYNC, ctx); - else + } else { error = VNOP_WRITE(vp, auio, IO_SYNC, ctx); + } if (resid != NULL) { *resid = uio_resid(auio); } - return (error); + return error; } static __inline__ off_t block_round(off_t o, int blocksize) { - return ((o + blocksize - 1) / blocksize); + return (o + blocksize - 1) / blocksize; } static __inline__ off_t block_truncate(off_t o, int blocksize) { - return (o / blocksize); + return o / blocksize; } static __inline__ int block_remainder(off_t o, int blocksize) { - return (o % blocksize); + return o % blocksize; } static int -vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, - vfs_context_t ctx) +vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, + vfs_context_t ctx) { - u_int32_t blocksize = vn->sc_secsize; - int error = 0; - off_t offset; - user_ssize_t resid; - off_t orig_offset; - user_ssize_t orig_resid; + u_int32_t blocksize = vn->sc_secsize; + int error = 0; + off_t offset; + user_ssize_t resid; + off_t orig_offset; + user_ssize_t orig_resid; orig_resid = resid = uio_resid(uio); orig_offset = offset = uio_offset(uio); while (resid > 0) { - u_int32_t remainder; - u_int32_t this_block_number; - u_int32_t this_block_count; - off_t this_offset; - user_ssize_t this_resid; - struct vnode * vp; + u_int32_t remainder; + u_int32_t this_block_number; + u_int32_t this_block_count; + off_t this_offset; + user_ssize_t this_resid; + struct vnode * vp; /* figure out which blocks to read */ remainder = block_remainder(offset, blocksize); if (shadow_map_read(vn->sc_shadow_map, - block_truncate(offset, blocksize), - block_round(resid + remainder, blocksize), - &this_block_number, &this_block_count)) { + block_truncate(offset, blocksize), + block_round(resid + remainder, blocksize), + &this_block_number, &this_block_count)) { vp = vn->sc_shadow_vp; - } - else { + } else { vp = vn->sc_vp; } @@ -343,34 +344,34 @@ vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, } uio_setresid(uio, resid); uio_setoffset(uio, offset); - return (error); + return error; } static int vncopy_block_to_shadow(struct vn_softc * vn, vfs_context_t ctx, - u_int32_t file_block, u_int32_t shadow_block) + u_int32_t file_block, u_int32_t shadow_block) { - int error; - char * tmpbuf; + int error; + char * tmpbuf; tmpbuf = _MALLOC(vn->sc_secsize, M_TEMP, M_WAITOK); if (tmpbuf == NULL) { - return (ENOMEM); + return ENOMEM; } /* read one block from file at file_block offset */ error = file_io(vn->sc_vp, ctx, UIO_READ, - tmpbuf, (off_t)file_block * vn->sc_secsize, - vn->sc_secsize, NULL); + tmpbuf, (off_t)file_block * vn->sc_secsize, + vn->sc_secsize, NULL); if (error) { goto done; } /* write one block to shadow file at shadow_block offset */ error = file_io(vn->sc_shadow_vp, ctx, UIO_WRITE, - tmpbuf, (off_t)shadow_block * vn->sc_secsize, - vn->sc_secsize, NULL); - done: + tmpbuf, (off_t)shadow_block * vn->sc_secsize, + vn->sc_secsize, NULL); +done: FREE(tmpbuf, M_TEMP); - return (error); + return error; } enum { @@ -379,25 +380,25 @@ enum { }; static int -vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, - vfs_context_t ctx) +vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, + vfs_context_t ctx) { - u_int32_t blocksize = vn->sc_secsize; - int error = 0; - user_ssize_t resid; - off_t offset; + u_int32_t blocksize = vn->sc_secsize; + int error = 0; + user_ssize_t resid; + off_t offset; resid = uio_resid(uio); offset = uio_offset(uio); while (resid > 0) { - int flags = 0; - u_int32_t offset_block_number; - u_int32_t remainder; - u_int32_t resid_block_count; - u_int32_t shadow_block_count; - u_int32_t shadow_block_number; - user_ssize_t this_resid; + int flags = 0; + u_int32_t offset_block_number; + u_int32_t remainder; + u_int32_t resid_block_count; + u_int32_t shadow_block_count; + u_int32_t shadow_block_number; + user_ssize_t this_resid; /* figure out which blocks to write */ offset_block_number = block_truncate(offset, blocksize); @@ -406,51 +407,51 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, /* figure out if the first or last blocks are partial writes */ if (remainder > 0 && !shadow_map_is_written(vn->sc_shadow_map, - offset_block_number)) { + offset_block_number)) { /* the first block is a partial write */ flags |= FLAGS_FIRST_BLOCK_PARTIAL; } if (resid_block_count > 1 && !shadow_map_is_written(vn->sc_shadow_map, - offset_block_number - + resid_block_count - 1) + offset_block_number + + resid_block_count - 1) && block_remainder(offset + resid, blocksize) > 0) { /* the last block is a partial write */ flags |= FLAGS_LAST_BLOCK_PARTIAL; } if (shadow_map_write(vn->sc_shadow_map, - offset_block_number, resid_block_count, - &shadow_block_number, - &shadow_block_count)) { + offset_block_number, resid_block_count, + &shadow_block_number, + &shadow_block_count)) { /* shadow file is growing */ #if 0 /* truncate the file to its new length before write */ - off_t size; - size = (off_t)shadow_map_shadow_size(vn->sc_shadow_map) - * vn->sc_secsize; + off_t size; + size = (off_t)shadow_map_shadow_size(vn->sc_shadow_map) + * vn->sc_secsize; vnode_setsize(vn->sc_shadow_vp, size, IO_SYNC, ctx); #endif } /* write the blocks (or parts thereof) */ uio_setoffset(uio, (off_t) - shadow_block_number * blocksize + remainder); + shadow_block_number * blocksize + remainder); this_resid = (off_t)shadow_block_count * blocksize - remainder; if (this_resid >= resid) { this_resid = resid; if ((flags & FLAGS_LAST_BLOCK_PARTIAL) != 0) { /* copy the last block to the shadow */ - u_int32_t d; - u_int32_t s; + u_int32_t d; + u_int32_t s; - s = offset_block_number - + resid_block_count - 1; - d = shadow_block_number - + shadow_block_count - 1; + s = offset_block_number + + resid_block_count - 1; + d = shadow_block_number + + shadow_block_count - 1; error = vncopy_block_to_shadow(vn, ctx, s, d); if (error) { printf("vnwrite_shadow: failed to copy" - " block %u to shadow block %u\n", - s, d); + " block %u to shadow block %u\n", + s, d); break; } } @@ -459,13 +460,13 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, if ((flags & FLAGS_FIRST_BLOCK_PARTIAL) != 0) { /* copy the first block to the shadow */ error = vncopy_block_to_shadow(vn, ctx, - offset_block_number, - shadow_block_number); + offset_block_number, + shadow_block_number); if (error) { printf("vnwrite_shadow: failed to" - " copy block %u to shadow block %u\n", - offset_block_number, - shadow_block_number); + " copy block %u to shadow block %u\n", + offset_block_number, + shadow_block_number); break; } } @@ -484,23 +485,23 @@ vnwrite_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, } uio_setresid(uio, resid); uio_setoffset(uio, offset); - return (error); + return error; } -static int +static int vnread(dev_t dev, struct uio *uio, int ioflag) { - struct vfs_context context; - int error = 0; - off_t offset; - proc_t p; - user_ssize_t resid; - struct vn_softc * vn; - int unit; + struct vfs_context context; + int error = 0; + off_t offset; + proc_t p; + user_ssize_t resid; + struct vn_softc * vn; + int unit; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { - return (ENXIO); + return ENXIO; } p = current_proc(); vn = vn_table + unit; @@ -543,7 +544,7 @@ vnread(dev_t dev, struct uio *uio, int ioflag) if (vn->sc_shadow_vp != NULL) { error = vnode_getwithvid(vn->sc_shadow_vp, - vn->sc_shadow_vid); + vn->sc_shadow_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; @@ -557,24 +558,24 @@ vnread(dev_t dev, struct uio *uio, int ioflag) error = VNOP_READ(vn->sc_vp, uio, ioflag, &context); } vnode_put(vn->sc_vp); - done: - return (error); +done: + return error; } -static int +static int vnwrite(dev_t dev, struct uio *uio, int ioflag) { - struct vfs_context context; - int error; - off_t offset; - proc_t p; - user_ssize_t resid; - struct vn_softc * vn; - int unit; + struct vfs_context context; + int error; + off_t offset; + proc_t p; + user_ssize_t resid; + struct vn_softc * vn; + int unit; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { - return (ENXIO); + return ENXIO; } p = current_proc(); vn = vn_table + unit; @@ -620,7 +621,7 @@ vnwrite(dev_t dev, struct uio *uio, int ioflag) if (vn->sc_shadow_vp != NULL) { error = vnode_getwithvid(vn->sc_shadow_vp, - vn->sc_shadow_vid); + vn->sc_shadow_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; @@ -634,42 +635,41 @@ vnwrite(dev_t dev, struct uio *uio, int ioflag) error = VNOP_WRITE(vn->sc_vp, uio, ioflag, &context); } vnode_put(vn->sc_vp); - done: - return (error); +done: + return error; } static int shadow_read(struct vn_softc * vn, struct buf * bp, char * base, - vfs_context_t ctx) + vfs_context_t ctx) { - u_int32_t blocksize = vn->sc_secsize; - int error = 0; - u_int32_t offset; - boolean_t read_shadow; - u_int32_t resid; - u_int32_t start = 0; + u_int32_t blocksize = vn->sc_secsize; + int error = 0; + u_int32_t offset; + boolean_t read_shadow; + u_int32_t resid; + u_int32_t start = 0; offset = buf_blkno(bp); resid = buf_resid(bp) / blocksize; while (resid > 0) { - user_ssize_t temp_resid; - u_int32_t this_offset; - u_int32_t this_resid; - struct vnode * vp; + user_ssize_t temp_resid; + u_int32_t this_offset; + u_int32_t this_resid; + struct vnode * vp; read_shadow = shadow_map_read(vn->sc_shadow_map, - offset, resid, - &this_offset, &this_resid); + offset, resid, + &this_offset, &this_resid); if (read_shadow) { vp = vn->sc_shadow_vp; - } - else { + } else { vp = vn->sc_vp; } error = file_io(vp, ctx, UIO_READ, base + start, - (off_t)this_offset * blocksize, - (user_ssize_t)this_resid * blocksize, - &temp_resid); + (off_t)this_offset * blocksize, + (user_ssize_t)this_resid * blocksize, + &temp_resid); if (error) { break; } @@ -683,44 +683,44 @@ shadow_read(struct vn_softc * vn, struct buf * bp, char * base, start += this_resid * blocksize; } buf_setresid(bp, resid * blocksize); - return (error); + return error; } static int -shadow_write(struct vn_softc * vn, struct buf * bp, char * base, - vfs_context_t ctx) +shadow_write(struct vn_softc * vn, struct buf * bp, char * base, + vfs_context_t ctx) { - u_int32_t blocksize = vn->sc_secsize; - int error = 0; - u_int32_t offset; - boolean_t shadow_grew; - u_int32_t resid; - u_int32_t start = 0; + u_int32_t blocksize = vn->sc_secsize; + int error = 0; + u_int32_t offset; + boolean_t shadow_grew; + u_int32_t resid; + u_int32_t start = 0; offset = buf_blkno(bp); resid = buf_resid(bp) / blocksize; while (resid > 0) { - user_ssize_t temp_resid; - u_int32_t this_offset; - u_int32_t this_resid; + user_ssize_t temp_resid; + u_int32_t this_offset; + u_int32_t this_resid; - shadow_grew = shadow_map_write(vn->sc_shadow_map, - offset, resid, - &this_offset, &this_resid); + shadow_grew = shadow_map_write(vn->sc_shadow_map, + offset, resid, + &this_offset, &this_resid); if (shadow_grew) { #if 0 - off_t size; + off_t size; /* truncate the file to its new length before write */ - size = (off_t)shadow_map_shadow_size(vn->sc_shadow_map) - * blocksize; + size = (off_t)shadow_map_shadow_size(vn->sc_shadow_map) + * blocksize; vnode_setsize(vn->sc_shadow_vp, size, IO_SYNC, ctx); #endif } - error = file_io(vn->sc_shadow_vp, ctx, UIO_WRITE, - base + start, - (off_t)this_offset * blocksize, - (user_ssize_t)this_resid * blocksize, - &temp_resid); + error = file_io(vn->sc_shadow_vp, ctx, UIO_WRITE, + base + start, + (off_t)this_offset * blocksize, + (user_ssize_t)this_resid * blocksize, + &temp_resid); if (error) { break; } @@ -734,39 +734,40 @@ shadow_write(struct vn_softc * vn, struct buf * bp, char * base, start += this_resid * blocksize; } buf_setresid(bp, resid * blocksize); - return (error); + return error; } static int vn_readwrite_io(struct vn_softc * vn, struct buf * bp, vfs_context_t ctx) { - int error = 0; - char * iov_base; - caddr_t vaddr; + int error = 0; + char * iov_base; + caddr_t vaddr; - if (buf_map(bp, &vaddr)) - panic("vn device: buf_map failed"); + if (buf_map(bp, &vaddr)) { + panic("vn device: buf_map failed"); + } iov_base = (char *)vaddr; if (vn->sc_shadow_vp == NULL) { - user_ssize_t temp_resid; + user_ssize_t temp_resid; error = file_io(vn->sc_vp, ctx, - buf_flags(bp) & B_READ ? UIO_READ : UIO_WRITE, - iov_base, - (off_t)buf_blkno(bp) * vn->sc_secsize, - buf_resid(bp), &temp_resid); + buf_flags(bp) & B_READ ? UIO_READ : UIO_WRITE, + iov_base, + (off_t)buf_blkno(bp) * vn->sc_secsize, + buf_resid(bp), &temp_resid); buf_setresid(bp, temp_resid); - } - else { - if (buf_flags(bp) & B_READ) + } else { + if (buf_flags(bp) & B_READ) { error = shadow_read(vn, bp, iov_base, ctx); - else + } else { error = shadow_write(vn, bp, iov_base, ctx); + } } buf_unmap(bp); - return (error); + return error; } static void @@ -774,11 +775,11 @@ vnstrategy(struct buf *bp) { struct vn_softc *vn; int error = 0; - long sz; /* in sc_secsize chunks */ + long sz; /* in sc_secsize chunks */ daddr64_t blk_num; - struct vnode * shadow_vp = NULL; - struct vnode * vp = NULL; - struct vfs_context context; + struct vnode * shadow_vp = NULL; + struct vnode * vp = NULL; + struct vfs_context context; vn = vn_table + vnunit(buf_device(bp)); if ((vn->sc_flags & VNF_INITED) == 0) { @@ -834,7 +835,7 @@ vnstrategy(struct buf *bp) shadow_vp = vn->sc_shadow_vp; if (shadow_vp != NULL) { error = vnode_getwithvid(shadow_vp, - vn->sc_shadow_vid); + vn->sc_shadow_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; @@ -850,19 +851,19 @@ vnstrategy(struct buf *bp) vnode_put(shadow_vp); } - done: +done: if (error) { - buf_seterror(bp, error); + buf_seterror(bp, error); } buf_biodone(bp); return; } /* ARGSUSED */ -static int +static int vnioctl(dev_t dev, u_long cmd, caddr_t data, - __unused int flag, proc_t p, - int is_char) + __unused int flag, proc_t p, + int is_char) { struct vn_softc *vn; struct vn_ioctl_64 *viop; @@ -872,11 +873,11 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, int unit; struct vfsioattr ioattr; struct vn_ioctl_64 user_vnio; - struct vfs_context context; + struct vfs_context context; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { - return (ENXIO); + return ENXIO; } vn = vn_table + unit; @@ -917,10 +918,11 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, break; } - if (vn->sc_vp != NULL) + if (vn->sc_vp != NULL) { vfs_ioattr(vnode_mount(vn->sc_vp), &ioattr); - else + } else { bzero(&ioattr, sizeof(ioattr)); + } switch (cmd) { case DKIOCISVIRTUAL: @@ -951,7 +953,7 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, *o = ioattr.io_maxsegwritesize; break; case DKIOCGETBLOCKSIZE: - *f = vn->sc_secsize; + *f = vn->sc_secsize; break; case DKIOCSETBLOCKSIZE: if (is_char) { @@ -1086,20 +1088,20 @@ vnioctl(dev_t dev, u_long cmd, caddr_t data, error = ENOTTY; break; } - done: - return(error); +done: + return error; } -static int +static int vnioctl_chr(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) { - return (vnioctl(dev, cmd, data, flag, p, TRUE)); + return vnioctl(dev, cmd, data, flag, p, TRUE); } -static int +static int vnioctl_blk(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) { - return (vnioctl(dev, cmd, data, flag, p, FALSE)); + return vnioctl(dev, cmd, data, flag, p, FALSE); } /* @@ -1111,58 +1113,55 @@ vnioctl_blk(dev_t dev, u_long cmd, caddr_t data, int flag, proc_t p) static int vniocattach_file(struct vn_softc *vn, - struct vn_ioctl_64 *vniop, - dev_t dev, - int in_kernel, - proc_t p) + struct vn_ioctl_64 *vniop, + dev_t dev, + int in_kernel, + proc_t p) { - dev_t cdev; + dev_t cdev; vfs_context_t ctx = vfs_context_current(); kauth_cred_t cred; struct nameidata nd; off_t file_size; int error, flags; - flags = FREAD|FWRITE; + flags = FREAD | FWRITE; if (in_kernel) { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx); - } - else { - NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), - vniop->vn_file, ctx); + } else { + NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), + vniop->vn_file, ctx); } /* vn_open gives both long- and short-term references */ error = vn_open(&nd, flags, 0); if (error) { if (error != EACCES && error != EPERM && error != EROFS) { - return (error); + return error; } flags &= ~FWRITE; if (in_kernel) { - NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, - vniop->vn_file, ctx); - } - else { - NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), - vniop->vn_file, ctx); + NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, + vniop->vn_file, ctx); + } else { + NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), + vniop->vn_file, ctx); } error = vn_open(&nd, flags, 0); if (error) { - return (error); + return error; } } if (nd.ni_vp->v_type != VREG) { error = EINVAL; - } - else { + } else { error = vnode_size(nd.ni_vp, &file_size, ctx); } if (error != 0) { (void) vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); - return (error); + return error; } cred = kauth_cred_proc_ref(p); nd.ni_vp->v_flag |= VNOCACHE_DATA; @@ -1171,7 +1170,7 @@ vniocattach_file(struct vn_softc *vn, (void)vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); kauth_cred_unref(&cred); - return(error); + return error; } vn->sc_secsize = DEV_BSIZE; vn->sc_fsize = file_size; @@ -1182,55 +1181,55 @@ vniocattach_file(struct vn_softc *vn, vn->sc_cred = cred; cdev = makedev(vndevice_cdev_major, minor(dev)); vn->sc_cdev = devfs_make_node(cdev, DEVFS_CHAR, - UID_ROOT, GID_OPERATOR, - 0600, "rvn%d", - minor(dev)); + UID_ROOT, GID_OPERATOR, + 0600, "rvn%d", + minor(dev)); vn->sc_flags |= VNF_INITED; - if (flags == FREAD) + if (flags == FREAD) { vn->sc_flags |= VNF_READONLY; + } /* lose the short-term reference */ vnode_put(nd.ni_vp); - return(0); + return 0; } static int -vniocattach_shadow(struct vn_softc *vn, struct vn_ioctl_64 *vniop, - __unused dev_t dev, int in_kernel, proc_t p) +vniocattach_shadow(struct vn_softc *vn, struct vn_ioctl_64 *vniop, + __unused dev_t dev, int in_kernel, proc_t p) { vfs_context_t ctx = vfs_context_current(); struct nameidata nd; int error, flags; - shadow_map_t * map; + shadow_map_t * map; off_t file_size; - flags = FREAD|FWRITE; + flags = FREAD | FWRITE; if (in_kernel) { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx); - } - else { - NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), - vniop->vn_file, ctx); + } else { + NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), + vniop->vn_file, ctx); } /* vn_open gives both long- and short-term references */ error = vn_open(&nd, flags, 0); if (error) { /* shadow MUST be writable! */ - return (error); + return error; } - if (nd.ni_vp->v_type != VREG + if (nd.ni_vp->v_type != VREG || (error = vnode_size(nd.ni_vp, &file_size, ctx))) { (void)vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); - return (error ? error : EINVAL); + return error ? error : EINVAL; } map = shadow_map_create(vn->sc_fsize, file_size, - 0, vn->sc_secsize); + 0, vn->sc_secsize); if (map == NULL) { (void)vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); vn->sc_shadow_vp = NULL; - return (ENOMEM); + return ENOMEM; } vn->sc_shadow_vp = nd.ni_vp; vn->sc_shadow_vid = vnode_vid(nd.ni_vp); @@ -1240,25 +1239,25 @@ vniocattach_shadow(struct vn_softc *vn, struct vn_ioctl_64 *vniop, /* lose the short-term reference */ vnode_put(nd.ni_vp); - return(0); + return 0; } int vndevice_root_image(char * path, char devname[], dev_t * dev_p) { - int error = 0; - struct vn_softc * vn; - struct vn_ioctl_64 vnio; + int error = 0; + struct vn_softc * vn; + struct vn_ioctl_64 vnio; vnio.vn_file = CAST_USER_ADDR_T(path); vnio.vn_size = 0; vn = vn_table + ROOT_IMAGE_UNIT; - *dev_p = makedev(vndevice_bdev_major, - ROOT_IMAGE_UNIT); + *dev_p = makedev(vndevice_bdev_major, + ROOT_IMAGE_UNIT); snprintf(devname, 16, "vn%d", ROOT_IMAGE_UNIT); error = vniocattach_file(vn, &vnio, *dev_p, 1, current_proc()); - return (error); + return error; } /* @@ -1272,7 +1271,7 @@ setcred(struct vnode * vp, kauth_cred_t cred) { char *tmpbuf; int error = 0; - struct vfs_context context; + struct vfs_context context; /* * Horrible kludge to establish credentials for NFS XXX. @@ -1282,7 +1281,7 @@ setcred(struct vnode * vp, kauth_cred_t cred) tmpbuf = _MALLOC(DEV_BSIZE, M_TEMP, M_WAITOK); error = file_io(vp, &context, UIO_READ, tmpbuf, 0, DEV_BSIZE, NULL); FREE(tmpbuf, M_TEMP); - return (error); + return error; } void @@ -1314,72 +1313,75 @@ vnclear(struct vn_softc *vn, vfs_context_t ctx) } } -static int +static int vnsize(dev_t dev) { - int secsize; + int secsize; struct vn_softc *vn; int unit; unit = vnunit(dev); if (vnunit(dev) >= NVNDEVICE) { - return (-1); + return -1; } vn = vn_table + unit; - if ((vn->sc_flags & VNF_INITED) == 0) + if ((vn->sc_flags & VNF_INITED) == 0) { secsize = -1; - else + } else { secsize = vn->sc_secsize; + } - return (secsize); + return secsize; } -#define CDEV_MAJOR -1 -#define BDEV_MAJOR -1 +#define CDEV_MAJOR -1 +#define BDEV_MAJOR -1 static int vndevice_inited = 0; -void +void vndevice_init(void) { - if (vndevice_inited) + if (vndevice_inited) { return; - + } + vndevice_do_init(); } - -static void + +static void vndevice_do_init( void ) { - int i; + int i; vndevice_bdev_major = bdevsw_add(BDEV_MAJOR, &vn_bdevsw); if (vndevice_bdev_major < 0) { printf("vndevice_init: bdevsw_add() returned %d\n", - vndevice_bdev_major); + vndevice_bdev_major); return; } vndevice_cdev_major = cdevsw_add_with_bdev(CDEV_MAJOR, &vn_cdevsw, - vndevice_bdev_major); + vndevice_bdev_major); if (vndevice_cdev_major < 0) { printf("vndevice_init: cdevsw_add() returned %d\n", - vndevice_cdev_major); + vndevice_cdev_major); return; } for (i = 0; i < NVNDEVICE; i++) { - dev_t dev = makedev(vndevice_bdev_major, i); + dev_t dev = makedev(vndevice_bdev_major, i); vn_table[i].sc_bdev = devfs_make_node(dev, DEVFS_BLOCK, - UID_ROOT, GID_OPERATOR, - 0600, "vn%d", - i); - if (vn_table[i].sc_bdev == NULL) + UID_ROOT, GID_OPERATOR, + 0600, "vn%d", + i); + if (vn_table[i].sc_bdev == NULL) { printf("vninit: devfs_make_node failed!\n"); + } } } -static void -vn_ioctl_to_64(struct vn_ioctl_32 *from, struct vn_ioctl_64 *to) +static void +vn_ioctl_to_64(struct vn_ioctl_32 *from, struct vn_ioctl_64 *to) { to->vn_file = CAST_USER_ADDR_T(from->vn_file); to->vn_size = from->vn_size; diff --git a/bsd/i386/_limits.h b/bsd/i386/_limits.h index 3b9e7a6f7..0d46e8511 100644 --- a/bsd/i386/_limits.h +++ b/bsd/i386/_limits.h @@ -2,13 +2,13 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * The contents of this file constitute Original Code as defined in and * are subject to the Apple Public Source License Version 1.1 (the * "License"). You may not use this file except in compliance with the * License. Please obtain a copy of the License at * http://www.apple.com/publicsource and read it before using this file. - * + * * This Original Code and all software distributed under the License are * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -16,12 +16,12 @@ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the * License for the specific language governing rights and limitations * under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ -#ifndef _I386__LIMITS_H_ -#define _I386__LIMITS_H_ +#ifndef _I386__LIMITS_H_ +#define _I386__LIMITS_H_ -#define __DARWIN_CLK_TCK 100 /* ticks per second */ +#define __DARWIN_CLK_TCK 100 /* ticks per second */ -#endif /* _I386__LIMITS_H_ */ +#endif /* _I386__LIMITS_H_ */ diff --git a/bsd/i386/_mcontext.h b/bsd/i386/_mcontext.h index e2544d076..ee3dfe990 100644 --- a/bsd/i386/_mcontext.h +++ b/bsd/i386/_mcontext.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,124 +35,164 @@ #ifndef _STRUCT_MCONTEXT32 #if __DARWIN_UNIX03 -#define _STRUCT_MCONTEXT32 struct __darwin_mcontext32 +#define _STRUCT_MCONTEXT32 struct __darwin_mcontext32 _STRUCT_MCONTEXT32 { - _STRUCT_X86_EXCEPTION_STATE32 __es; - _STRUCT_X86_THREAD_STATE32 __ss; - _STRUCT_X86_FLOAT_STATE32 __fs; + _STRUCT_X86_EXCEPTION_STATE32 __es; + _STRUCT_X86_THREAD_STATE32 __ss; + _STRUCT_X86_FLOAT_STATE32 __fs; }; -#define _STRUCT_MCONTEXT_AVX32 struct __darwin_mcontext_avx32 +#define _STRUCT_MCONTEXT_AVX32 struct __darwin_mcontext_avx32 _STRUCT_MCONTEXT_AVX32 { - _STRUCT_X86_EXCEPTION_STATE32 __es; - _STRUCT_X86_THREAD_STATE32 __ss; - _STRUCT_X86_AVX_STATE32 __fs; + _STRUCT_X86_EXCEPTION_STATE32 __es; + _STRUCT_X86_THREAD_STATE32 __ss; + _STRUCT_X86_AVX_STATE32 __fs; }; -#if !defined(RC_HIDE_XNU_J137) #if defined(_STRUCT_X86_AVX512_STATE32) -#define _STRUCT_MCONTEXT_AVX512_32 struct __darwin_mcontext_avx512_32 +#define _STRUCT_MCONTEXT_AVX512_32 struct __darwin_mcontext_avx512_32 _STRUCT_MCONTEXT_AVX512_32 { - _STRUCT_X86_EXCEPTION_STATE32 __es; - _STRUCT_X86_THREAD_STATE32 __ss; - _STRUCT_X86_AVX512_STATE32 __fs; + _STRUCT_X86_EXCEPTION_STATE32 __es; + _STRUCT_X86_THREAD_STATE32 __ss; + _STRUCT_X86_AVX512_STATE32 __fs; }; #endif /* _STRUCT_X86_AVX512_STATE32 */ -#endif /* RC_HIDE_XNU_J137 */ #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_MCONTEXT32 struct mcontext32 +#define _STRUCT_MCONTEXT32 struct mcontext32 _STRUCT_MCONTEXT32 { - _STRUCT_X86_EXCEPTION_STATE32 es; - _STRUCT_X86_THREAD_STATE32 ss; - _STRUCT_X86_FLOAT_STATE32 fs; + _STRUCT_X86_EXCEPTION_STATE32 es; + _STRUCT_X86_THREAD_STATE32 ss; + _STRUCT_X86_FLOAT_STATE32 fs; }; -#define _STRUCT_MCONTEXT_AVX32 struct mcontext_avx32 +#define _STRUCT_MCONTEXT_AVX32 struct mcontext_avx32 _STRUCT_MCONTEXT_AVX32 { - _STRUCT_X86_EXCEPTION_STATE32 es; - _STRUCT_X86_THREAD_STATE32 ss; - _STRUCT_X86_AVX_STATE32 fs; + _STRUCT_X86_EXCEPTION_STATE32 es; + _STRUCT_X86_THREAD_STATE32 ss; + _STRUCT_X86_AVX_STATE32 fs; }; -#if !defined(RC_HIDE_XNU_J137) #if defined(_STRUCT_X86_AVX512_STATE32) -#define _STRUCT_MCONTEXT_AVX512_32 struct mcontext_avx512_32 +#define _STRUCT_MCONTEXT_AVX512_32 struct mcontext_avx512_32 _STRUCT_MCONTEXT_AVX512_32 { - _STRUCT_X86_EXCEPTION_STATE32 es; - _STRUCT_X86_THREAD_STATE32 ss; - _STRUCT_X86_AVX512_STATE32 fs; + _STRUCT_X86_EXCEPTION_STATE32 es; + _STRUCT_X86_THREAD_STATE32 ss; + _STRUCT_X86_AVX512_STATE32 fs; }; #endif /* _STRUCT_X86_AVX512_STATE32 */ -#endif /* RC_HIDE_XNU_J137 */ #endif /* __DARWIN_UNIX03 */ #endif /* _STRUCT_MCONTEXT32 */ #ifndef _STRUCT_MCONTEXT64 #if __DARWIN_UNIX03 -#define _STRUCT_MCONTEXT64 struct __darwin_mcontext64 +#define _STRUCT_MCONTEXT64 struct __darwin_mcontext64 _STRUCT_MCONTEXT64 { - _STRUCT_X86_EXCEPTION_STATE64 __es; - _STRUCT_X86_THREAD_STATE64 __ss; - _STRUCT_X86_FLOAT_STATE64 __fs; + _STRUCT_X86_EXCEPTION_STATE64 __es; + _STRUCT_X86_THREAD_STATE64 __ss; + _STRUCT_X86_FLOAT_STATE64 __fs; }; -#define _STRUCT_MCONTEXT_AVX64 struct __darwin_mcontext_avx64 +#define _STRUCT_MCONTEXT64_FULL struct __darwin_mcontext64_full +_STRUCT_MCONTEXT64_FULL +{ + _STRUCT_X86_EXCEPTION_STATE64 __es; + _STRUCT_X86_THREAD_FULL_STATE64 __ss; + _STRUCT_X86_FLOAT_STATE64 __fs; +}; + +#define _STRUCT_MCONTEXT_AVX64 struct __darwin_mcontext_avx64 _STRUCT_MCONTEXT_AVX64 { - _STRUCT_X86_EXCEPTION_STATE64 __es; - _STRUCT_X86_THREAD_STATE64 __ss; - _STRUCT_X86_AVX_STATE64 __fs; + _STRUCT_X86_EXCEPTION_STATE64 __es; + _STRUCT_X86_THREAD_STATE64 __ss; + _STRUCT_X86_AVX_STATE64 __fs; +}; + +#define _STRUCT_MCONTEXT_AVX64_FULL struct __darwin_mcontext_avx64_full +_STRUCT_MCONTEXT_AVX64_FULL +{ + _STRUCT_X86_EXCEPTION_STATE64 __es; + _STRUCT_X86_THREAD_FULL_STATE64 __ss; + _STRUCT_X86_AVX_STATE64 __fs; }; -#if !defined(RC_HIDE_XNU_J137) #if defined(_STRUCT_X86_AVX512_STATE64) -#define _STRUCT_MCONTEXT_AVX512_64 struct __darwin_mcontext_avx512_64 +#define _STRUCT_MCONTEXT_AVX512_64 struct __darwin_mcontext_avx512_64 _STRUCT_MCONTEXT_AVX512_64 { - _STRUCT_X86_EXCEPTION_STATE64 __es; - _STRUCT_X86_THREAD_STATE64 __ss; - _STRUCT_X86_AVX512_STATE64 __fs; + _STRUCT_X86_EXCEPTION_STATE64 __es; + _STRUCT_X86_THREAD_STATE64 __ss; + _STRUCT_X86_AVX512_STATE64 __fs; +}; + +#define _STRUCT_MCONTEXT_AVX512_64_FULL struct __darwin_mcontext_avx512_64_full +_STRUCT_MCONTEXT_AVX512_64_FULL +{ + _STRUCT_X86_EXCEPTION_STATE64 __es; + _STRUCT_X86_THREAD_FULL_STATE64 __ss; + _STRUCT_X86_AVX512_STATE64 __fs; }; #endif /* _STRUCT_X86_AVX512_STATE64 */ -#endif /* RC_HIDE_XNU_J137 */ #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_MCONTEXT64 struct mcontext64 +#define _STRUCT_MCONTEXT64 struct mcontext64 _STRUCT_MCONTEXT64 { - _STRUCT_X86_EXCEPTION_STATE64 es; - _STRUCT_X86_THREAD_STATE64 ss; - _STRUCT_X86_FLOAT_STATE64 fs; + _STRUCT_X86_EXCEPTION_STATE64 es; + _STRUCT_X86_THREAD_STATE64 ss; + _STRUCT_X86_FLOAT_STATE64 fs; +}; + +#define _STRUCT_MCONTEXT64_FULL struct mcontext64_full +_STRUCT_MCONTEXT64_FULL +{ + _STRUCT_X86_EXCEPTION_STATE64 es; + _STRUCT_X86_THREAD_FULL_STATE64 ss; + _STRUCT_X86_FLOAT_STATE64 fs; }; -#define _STRUCT_MCONTEXT_AVX64 struct mcontext_avx64 +#define _STRUCT_MCONTEXT_AVX64 struct mcontext_avx64 _STRUCT_MCONTEXT_AVX64 { - _STRUCT_X86_EXCEPTION_STATE64 es; - _STRUCT_X86_THREAD_STATE64 ss; - _STRUCT_X86_AVX_STATE64 fs; + _STRUCT_X86_EXCEPTION_STATE64 es; + _STRUCT_X86_THREAD_STATE64 ss; + _STRUCT_X86_AVX_STATE64 fs; +}; + +#define _STRUCT_MCONTEXT_AVX64_FULL struct mcontext_avx64_full +_STRUCT_MCONTEXT_AVX64_FULL +{ + _STRUCT_X86_EXCEPTION_STATE64 es; + _STRUCT_X86_THREAD_FULL_STATE64 ss; + _STRUCT_X86_AVX_STATE64 fs; }; -#if !defined(RC_HIDE_XNU_J137) #if defined(_STRUCT_X86_AVX512_STATE64) -#define _STRUCT_MCONTEXT_AVX512_64 struct mcontext_avx512_64 +#define _STRUCT_MCONTEXT_AVX512_64 struct mcontext_avx512_64 _STRUCT_MCONTEXT_AVX512_64 { - _STRUCT_X86_EXCEPTION_STATE64 es; - _STRUCT_X86_THREAD_STATE64 ss; - _STRUCT_X86_AVX512_STATE64 fs; + _STRUCT_X86_EXCEPTION_STATE64 es; + _STRUCT_X86_THREAD_STATE64 ss; + _STRUCT_X86_AVX512_STATE64 fs; +}; + +#define _STRUCT_MCONTEXT_AVX512_64_FULL struct mcontext_avx512_64_full +_STRUCT_MCONTEXT_AVX512_64_FULL +{ + _STRUCT_X86_EXCEPTION_STATE64 es; + _STRUCT_X86_THREAD_FULL_STATE64 ss; + _STRUCT_X86_AVX512_STATE64 fs; }; #endif /* _STRUCT_X86_AVX512_STATE64 */ -#endif /* RC_HIDE_XNU_J137 */ #endif /* __DARWIN_UNIX03 */ #endif /* _STRUCT_MCONTEXT64 */ @@ -161,11 +201,11 @@ _STRUCT_MCONTEXT_AVX512_64 #ifndef _MCONTEXT_T #define _MCONTEXT_T #if defined(__LP64__) -typedef _STRUCT_MCONTEXT64 *mcontext_t; +typedef _STRUCT_MCONTEXT64 *mcontext_t; #define _STRUCT_MCONTEXT _STRUCT_MCONTEXT64 #else -typedef _STRUCT_MCONTEXT32 *mcontext_t; -#define _STRUCT_MCONTEXT _STRUCT_MCONTEXT32 +typedef _STRUCT_MCONTEXT32 *mcontext_t; +#define _STRUCT_MCONTEXT _STRUCT_MCONTEXT32 #endif #endif /* _MCONTEXT_T */ diff --git a/bsd/i386/_param.h b/bsd/i386/_param.h index 61f0d24c3..3a0ac8bba 100644 --- a/bsd/i386/_param.h +++ b/bsd/i386/_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,8 +36,8 @@ * data types (int, long, ...). The result is unsigned int and must be * cast to any desired pointer type. */ -#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) -#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) +#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1) +#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES) #define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1) #define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(__darwin_size_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32) diff --git a/bsd/i386/_types.h b/bsd/i386/_types.h index 4df007203..b115ed12d 100644 --- a/bsd/i386/_types.h +++ b/bsd/i386/_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _BSD_I386__TYPES_H_ -#define _BSD_I386__TYPES_H_ +#ifndef _BSD_I386__TYPES_H_ +#define _BSD_I386__TYPES_H_ /* * This header file contains integer types. It's intended to also contain @@ -34,20 +34,20 @@ */ #ifdef __GNUC__ -typedef __signed char __int8_t; -#else /* !__GNUC__ */ -typedef char __int8_t; -#endif /* !__GNUC__ */ -typedef unsigned char __uint8_t; -typedef short __int16_t; -typedef unsigned short __uint16_t; -typedef int __int32_t; -typedef unsigned int __uint32_t; -typedef long long __int64_t; -typedef unsigned long long __uint64_t; +typedef __signed char __int8_t; +#else /* !__GNUC__ */ +typedef char __int8_t; +#endif /* !__GNUC__ */ +typedef unsigned char __uint8_t; +typedef short __int16_t; +typedef unsigned short __uint16_t; +typedef int __int32_t; +typedef unsigned int __uint32_t; +typedef long long __int64_t; +typedef unsigned long long __uint64_t; -typedef long __darwin_intptr_t; -typedef unsigned int __darwin_natural_t; +typedef long __darwin_intptr_t; +typedef unsigned int __darwin_natural_t; /* * The rune type below is declared to be an ``int'' instead of the more natural @@ -67,56 +67,56 @@ typedef unsigned int __darwin_natural_t; * character set plus one extra value (WEOF). wint_t must be at least 16 bits. */ -typedef int __darwin_ct_rune_t; /* ct_rune_t */ +typedef int __darwin_ct_rune_t; /* ct_rune_t */ /* * mbstate_t is an opaque object to keep conversion state, during multibyte * stream conversions. The content must not be referenced by user programs. */ typedef union { - char __mbstate8[128]; - long long _mbstateL; /* for alignment */ + char __mbstate8[128]; + long long _mbstateL; /* for alignment */ } __mbstate_t; -typedef __mbstate_t __darwin_mbstate_t; /* mbstate_t */ +typedef __mbstate_t __darwin_mbstate_t; /* mbstate_t */ #if defined(__PTRDIFF_TYPE__) -typedef __PTRDIFF_TYPE__ __darwin_ptrdiff_t; /* ptr1 - ptr2 */ +typedef __PTRDIFF_TYPE__ __darwin_ptrdiff_t; /* ptr1 - ptr2 */ #elif defined(__LP64__) -typedef long __darwin_ptrdiff_t; /* ptr1 - ptr2 */ +typedef long __darwin_ptrdiff_t; /* ptr1 - ptr2 */ #else -typedef int __darwin_ptrdiff_t; /* ptr1 - ptr2 */ +typedef int __darwin_ptrdiff_t; /* ptr1 - ptr2 */ #endif /* __GNUC__ */ #if defined(__SIZE_TYPE__) -typedef __SIZE_TYPE__ __darwin_size_t; /* sizeof() */ +typedef __SIZE_TYPE__ __darwin_size_t; /* sizeof() */ #else -typedef unsigned long __darwin_size_t; /* sizeof() */ +typedef unsigned long __darwin_size_t; /* sizeof() */ #endif #if (__GNUC__ > 2) -typedef __builtin_va_list __darwin_va_list; /* va_list */ +typedef __builtin_va_list __darwin_va_list; /* va_list */ #else -typedef void * __darwin_va_list; /* va_list */ +typedef void * __darwin_va_list; /* va_list */ #endif #if defined(__WCHAR_TYPE__) -typedef __WCHAR_TYPE__ __darwin_wchar_t; /* wchar_t */ +typedef __WCHAR_TYPE__ __darwin_wchar_t; /* wchar_t */ #else -typedef __darwin_ct_rune_t __darwin_wchar_t; /* wchar_t */ +typedef __darwin_ct_rune_t __darwin_wchar_t; /* wchar_t */ #endif -typedef __darwin_wchar_t __darwin_rune_t; /* rune_t */ +typedef __darwin_wchar_t __darwin_rune_t; /* rune_t */ #if defined(__WINT_TYPE__) -typedef __WINT_TYPE__ __darwin_wint_t; /* wint_t */ +typedef __WINT_TYPE__ __darwin_wint_t; /* wint_t */ #else -typedef __darwin_ct_rune_t __darwin_wint_t; /* wint_t */ +typedef __darwin_ct_rune_t __darwin_wint_t; /* wint_t */ #endif -typedef unsigned long __darwin_clock_t; /* clock() */ -typedef __uint32_t __darwin_socklen_t; /* socklen_t (duh) */ -typedef long __darwin_ssize_t; /* byte count or error */ -typedef long __darwin_time_t; /* time() */ +typedef unsigned long __darwin_clock_t; /* clock() */ +typedef __uint32_t __darwin_socklen_t; /* socklen_t (duh) */ +typedef long __darwin_ssize_t; /* byte count or error */ +typedef long __darwin_time_t; /* time() */ -#endif /* _BSD_I386__TYPES_H_ */ +#endif /* _BSD_I386__TYPES_H_ */ diff --git a/bsd/i386/dis_tables.h b/bsd/i386/dis_tables.h index 6e2ec7f54..b627e5201 100644 --- a/bsd/i386/dis_tables.h +++ b/bsd/i386/dis_tables.h @@ -24,11 +24,11 @@ */ /* Copyright (c) 1988 AT&T */ -/* All Rights Reserved */ +/* All Rights Reserved */ #ifndef _DIS_TABLES_H -#define _DIS_TABLES_H +#define _DIS_TABLES_H /* #pragma ident "@(#)dis_tables.h 1.10 07/07/10 SMI" */ @@ -55,56 +55,56 @@ extern "C" { /* * values for cpu mode */ -#define SIZE16 1 -#define SIZE32 2 -#define SIZE64 3 +#define SIZE16 1 +#define SIZE32 2 +#define SIZE64 3 -#define OPLEN 256 -#define PFIXLEN 8 -#define NCPS 20 /* number of chars per symbol */ +#define OPLEN 256 +#define PFIXLEN 8 +#define NCPS 20 /* number of chars per symbol */ /* * data structures that must be provided to dtrace_dis86() */ typedef struct d86opnd { - char d86_opnd[OPLEN]; /* symbolic rep of operand */ - char d86_prefix[PFIXLEN]; /* any prefix string or "" */ - uint_t d86_mode; /* mode for immediate */ - uint_t d86_value_size; /* size in bytes of d86_value */ - uint64_t d86_value; /* immediate value of opnd */ + char d86_opnd[OPLEN]; /* symbolic rep of operand */ + char d86_prefix[PFIXLEN]; /* any prefix string or "" */ + uint_t d86_mode; /* mode for immediate */ + uint_t d86_value_size; /* size in bytes of d86_value */ + uint64_t d86_value; /* immediate value of opnd */ } d86opnd_t; typedef struct dis86 { - uint_t d86_mode; - uint_t d86_error; - uint_t d86_len; /* instruction length */ - int d86_rmindex; /* index of modrm byte or -1 */ - uint_t d86_memsize; /* size of memory referenced */ - char d86_bytes[16]; /* bytes of instruction */ - char d86_mnem[OPLEN]; - uint_t d86_numopnds; - uint_t d86_rex_prefix; /* value of REX prefix if !0 */ - char *d86_seg_prefix; /* segment prefix, if any */ - uint_t d86_opnd_size; - uint_t d86_addr_size; - uint_t d86_got_modrm; - uint_t d86_vsib; /* Has a VSIB */ - struct d86opnd d86_opnd[4]; /* up to 4 operands */ - int (*d86_check_func)(void *); - int (*d86_get_byte)(void *); + uint_t d86_mode; + uint_t d86_error; + uint_t d86_len; /* instruction length */ + int d86_rmindex; /* index of modrm byte or -1 */ + uint_t d86_memsize; /* size of memory referenced */ + char d86_bytes[16]; /* bytes of instruction */ + char d86_mnem[OPLEN]; + uint_t d86_numopnds; + uint_t d86_rex_prefix; /* value of REX prefix if !0 */ + char *d86_seg_prefix; /* segment prefix, if any */ + uint_t d86_opnd_size; + uint_t d86_addr_size; + uint_t d86_got_modrm; + uint_t d86_vsib; /* Has a VSIB */ + struct d86opnd d86_opnd[4]; /* up to 4 operands */ + int (*d86_check_func)(void *); + int (*d86_get_byte)(void *); #ifdef DIS_TEXT - int (*d86_sym_lookup)(void *, uint64_t, char *, size_t); - int (*d86_sprintf_func)(char *, size_t, const char *, ...); - int d86_flags; - uint_t d86_imm_bytes; + int (*d86_sym_lookup)(void *, uint64_t, char *, size_t); + int (*d86_sprintf_func)(char *, size_t, const char *, ...); + int d86_flags; + uint_t d86_imm_bytes; #endif - void *d86_data; + void *d86_data; } dis86_t; extern int dtrace_disx86(dis86_t *x, uint_t cpu_mode); -#define DIS_F_OCTAL 0x1 /* Print all numbers in octal */ -#define DIS_F_NOIMMSYM 0x2 /* Don't print symbols for immediates (.o) */ +#define DIS_F_OCTAL 0x1 /* Print all numbers in octal */ +#define DIS_F_NOIMMSYM 0x2 /* Don't print symbols for immediates (.o) */ #ifdef DIS_TEXT extern void dtrace_disx86_str(dis86_t *x, uint_t cpu_mode, uint64_t pc, @@ -116,4 +116,3 @@ extern void dtrace_disx86_str(dis86_t *x, uint_t cpu_mode, uint64_t pc, #endif #endif /* _DIS_TABLES_H */ - diff --git a/bsd/i386/disklabel.h b/bsd/i386/disklabel.h index 283c9174d..13026f8fa 100644 --- a/bsd/i386/disklabel.h +++ b/bsd/i386/disklabel.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_DISKLABEL_H_ @@ -31,14 +31,14 @@ #include #ifdef __APPLE_API_OBSOLETE -#define LABELSECTOR (1024 / DEV_BSIZE) /* sector containing label */ -#define LABELOFFSET 0 /* offset of label in sector */ -#define MAXPARTITIONS 8 /* number of partitions */ -#define RAW_PART 2 /* raw partition: xx?c */ +#define LABELSECTOR (1024 / DEV_BSIZE) /* sector containing label */ +#define LABELOFFSET 0 /* offset of label in sector */ +#define MAXPARTITIONS 8 /* number of partitions */ +#define RAW_PART 2 /* raw partition: xx?c */ /* Just a dummy */ struct cpu_disklabel { - int cd_dummy; /* must have one element. */ + int cd_dummy; /* must have one element. */ }; #endif /* __APPLE_API_OBSOLETE */ diff --git a/bsd/i386/endian.h b/bsd/i386/endian.h index 9f64aa0c8..06854fe46 100644 --- a/bsd/i386/endian.h +++ b/bsd/i386/endian.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -64,7 +64,7 @@ */ #ifndef _I386__ENDIAN_H_ -#define _I386__ENDIAN_H_ +#define _I386__ENDIAN_H_ #include /* @@ -82,19 +82,19 @@ * Definitions for byte order, according to byte significance from low * address to high. */ -#define __DARWIN_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */ -#define __DARWIN_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */ -#define __DARWIN_PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */ +#define __DARWIN_LITTLE_ENDIAN 1234 /* LSB first: i386, vax */ +#define __DARWIN_BIG_ENDIAN 4321 /* MSB first: 68000, ibm, net */ +#define __DARWIN_PDP_ENDIAN 3412 /* LSB first in word, MSW first in long */ -#define __DARWIN_BYTE_ORDER __DARWIN_LITTLE_ENDIAN +#define __DARWIN_BYTE_ORDER __DARWIN_LITTLE_ENDIAN -#if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) +#if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN -#define BIG_ENDIAN __DARWIN_BIG_ENDIAN -#define PDP_ENDIAN __DARWIN_PDP_ENDIAN +#define LITTLE_ENDIAN __DARWIN_LITTLE_ENDIAN +#define BIG_ENDIAN __DARWIN_BIG_ENDIAN +#define PDP_ENDIAN __DARWIN_PDP_ENDIAN -#define BYTE_ORDER __DARWIN_BYTE_ORDER +#define BYTE_ORDER __DARWIN_BYTE_ORDER #include diff --git a/bsd/i386/exec.h b/bsd/i386/exec.h index 677bd83fa..24de8642d 100644 --- a/bsd/i386/exec.h +++ b/bsd/i386/exec.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -66,15 +66,15 @@ #ifdef BSD_KERNEL_PRIVATE /* Size of a page in an object file. */ -#define __LDPGSZ 4096 +#define __LDPGSZ 4096 /* Valid magic number check. */ -#define N_BADMAG(ex) \ +#define N_BADMAG(ex) \ ((ex).a_magic != NMAGIC && (ex).a_magic != OMAGIC && \ (ex).a_magic != ZMAGIC) /* Address of the bottom of the text segment. */ -#define N_TXTADDR(X) 0 +#define N_TXTADDR(X) 0 /* Address of the bottom of the data segment. */ #define N_DATADDR(ex) \ @@ -82,11 +82,11 @@ : __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) /* Text segment offset. */ -#define N_TXTOFF(ex) \ +#define N_TXTOFF(ex) \ ((ex).a_magic == ZMAGIC ? __LDPGSZ : sizeof(struct exec)) /* Data segment offset. */ -#define N_DATOFF(ex) \ +#define N_DATOFF(ex) \ (N_TXTOFF(ex) + ((ex).a_magic != ZMAGIC ? (ex).a_text : \ __LDPGSZ + ((ex).a_text - 1 & ~(__LDPGSZ - 1)))) @@ -96,23 +96,23 @@ (ex).a_drsize) /* String table offset. */ -#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms) +#define N_STROFF(ex) (N_SYMOFF(ex) + (ex).a_syms) /* Description of the object file header (a.out format). */ struct exec { -#define OMAGIC 0407 /* old impure format */ -#define NMAGIC 0410 /* read-only text */ -#define ZMAGIC 0413 /* demand load format */ -#define QMAGIC 0314 /* demand load format. Header in text. */ - unsigned int a_magic; /* magic number */ +#define OMAGIC 0407 /* old impure format */ +#define NMAGIC 0410 /* read-only text */ +#define ZMAGIC 0413 /* demand load format */ +#define QMAGIC 0314 /* demand load format. Header in text. */ + unsigned int a_magic; /* magic number */ - unsigned int a_text; /* text segment size */ - unsigned int a_data; /* initialized data size */ - unsigned int a_bss; /* uninitialized data size */ - unsigned int a_syms; /* symbol table size */ - unsigned int a_entry; /* entry point */ - unsigned int a_trsize; /* text relocation size */ - unsigned int a_drsize; /* data relocation size */ + unsigned int a_text; /* text segment size */ + unsigned int a_data; /* initialized data size */ + unsigned int a_bss; /* uninitialized data size */ + unsigned int a_syms; /* symbol table size */ + unsigned int a_entry; /* entry point */ + unsigned int a_trsize; /* text relocation size */ + unsigned int a_drsize; /* data relocation size */ }; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/i386/fasttrap_isa.h b/bsd/i386/fasttrap_isa.h index c2392eb0c..974b59c5b 100644 --- a/bsd/i386/fasttrap_isa.h +++ b/bsd/i386/fasttrap_isa.h @@ -23,8 +23,8 @@ * Use is subject to license terms. */ -#ifndef _FASTTRAP_ISA_H -#define _FASTTRAP_ISA_H +#ifndef _FASTTRAP_ISA_H +#define _FASTTRAP_ISA_H /* * #pragma ident "@(#)fasttrap_isa.h 1.6 06/09/19 SMI" @@ -33,85 +33,85 @@ #include #include -#ifdef __cplusplus +#ifdef __cplusplus extern "C" { #endif -#define FASTTRAP_MAX_INSTR_SIZE 15 +#define FASTTRAP_MAX_INSTR_SIZE 15 -#define FASTTRAP_INSTR 0xcc +#define FASTTRAP_INSTR 0xcc -#define FASTTRAP_SUNWDTRACE_SIZE 64 +#define FASTTRAP_SUNWDTRACE_SIZE 64 -typedef uint8_t fasttrap_instr_t; +typedef uint8_t fasttrap_instr_t; typedef struct fasttrap_machtp { - uint8_t ftmt_instr[FASTTRAP_MAX_INSTR_SIZE]; /* orig. instr. */ - uint8_t ftmt_size; /* instruction size */ - uint8_t ftmt_ripmode; /* %rip-relative handling mode */ - uint8_t ftmt_modrm; /* saved modrm byte */ - uint8_t ftmt_type; /* emulation type */ - uint8_t ftmt_code; /* branch condition */ - uint8_t ftmt_base; /* branch base */ - uint8_t ftmt_index; /* branch index */ - uint8_t ftmt_scale; /* branch scale */ - uint8_t ftmt_segment; /* segment for memory accesses */ - user_addr_t ftmt_dest; /* destination of control flow */ - uint8_t ftmt_installed:1; - uint8_t ftmt_retired:1; + uint8_t ftmt_instr[FASTTRAP_MAX_INSTR_SIZE]; /* orig. instr. */ + uint8_t ftmt_size; /* instruction size */ + uint8_t ftmt_ripmode; /* %rip-relative handling mode */ + uint8_t ftmt_modrm; /* saved modrm byte */ + uint8_t ftmt_type; /* emulation type */ + uint8_t ftmt_code; /* branch condition */ + uint8_t ftmt_base; /* branch base */ + uint8_t ftmt_index; /* branch index */ + uint8_t ftmt_scale; /* branch scale */ + uint8_t ftmt_segment; /* segment for memory accesses */ + user_addr_t ftmt_dest; /* destination of control flow */ + uint8_t ftmt_installed:1; + uint8_t ftmt_retired:1; } fasttrap_machtp_t; -#define ftt_instr ftt_mtp.ftmt_instr -#define ftt_ripmode ftt_mtp.ftmt_ripmode -#define ftt_modrm ftt_mtp.ftmt_modrm -#define ftt_size ftt_mtp.ftmt_size -#define ftt_type ftt_mtp.ftmt_type -#define ftt_code ftt_mtp.ftmt_code -#define ftt_base ftt_mtp.ftmt_base -#define ftt_index ftt_mtp.ftmt_index -#define ftt_scale ftt_mtp.ftmt_scale -#define ftt_segment ftt_mtp.ftmt_segment -#define ftt_dest ftt_mtp.ftmt_dest -#define ftt_installed ftt_mtp.ftmt_installed -#define ftt_retired ftt_mtp.ftmt_retired - - -#define FASTTRAP_T_COMMON 0x00 /* common case -- no emulation */ -#define FASTTRAP_T_JCC 0x01 /* near and far conditional jumps */ -#define FASTTRAP_T_LOOP 0x02 /* loop instructions */ -#define FASTTRAP_T_JCXZ 0x03 /* jump if %ecx/%rcx is zero */ -#define FASTTRAP_T_JMP 0x04 /* relative jump */ -#define FASTTRAP_T_CALL 0x05 /* near call (and link) */ -#define FASTTRAP_T_RET 0x06 /* ret */ -#define FASTTRAP_T_RET16 0x07 /* ret */ +#define ftt_instr ftt_mtp.ftmt_instr +#define ftt_ripmode ftt_mtp.ftmt_ripmode +#define ftt_modrm ftt_mtp.ftmt_modrm +#define ftt_size ftt_mtp.ftmt_size +#define ftt_type ftt_mtp.ftmt_type +#define ftt_code ftt_mtp.ftmt_code +#define ftt_base ftt_mtp.ftmt_base +#define ftt_index ftt_mtp.ftmt_index +#define ftt_scale ftt_mtp.ftmt_scale +#define ftt_segment ftt_mtp.ftmt_segment +#define ftt_dest ftt_mtp.ftmt_dest +#define ftt_installed ftt_mtp.ftmt_installed +#define ftt_retired ftt_mtp.ftmt_retired + + +#define FASTTRAP_T_COMMON 0x00 /* common case -- no emulation */ +#define FASTTRAP_T_JCC 0x01 /* near and far conditional jumps */ +#define FASTTRAP_T_LOOP 0x02 /* loop instructions */ +#define FASTTRAP_T_JCXZ 0x03 /* jump if %ecx/%rcx is zero */ +#define FASTTRAP_T_JMP 0x04 /* relative jump */ +#define FASTTRAP_T_CALL 0x05 /* near call (and link) */ +#define FASTTRAP_T_RET 0x06 /* ret */ +#define FASTTRAP_T_RET16 0x07 /* ret */ /* * For performance rather than correctness. */ -#define FASTTRAP_T_PUSHL_EBP 0x10 /* pushl %ebp (for function entry) */ -#define FASTTRAP_T_NOP 0x11 /* nop */ +#define FASTTRAP_T_PUSHL_EBP 0x10 /* pushl %ebp (for function entry) */ +#define FASTTRAP_T_NOP 0x11 /* nop */ -#define FASTTRAP_RIP_1 0x1 -#define FASTTRAP_RIP_2 0x2 -#define FASTTRAP_RIP_X 0x4 +#define FASTTRAP_RIP_1 0x1 +#define FASTTRAP_RIP_2 0x2 +#define FASTTRAP_RIP_X 0x4 /* * Segment values. */ -#define FASTTRAP_SEG_NONE 0 -#define FASTTRAP_SEG_CS 1 -#define FASTTRAP_SEG_DS 2 -#define FASTTRAP_SEG_ES 3 -#define FASTTRAP_SEG_FS 4 -#define FASTTRAP_SEG_GS 5 -#define FASTTRAP_SEG_SS 6 - -#define FASTTRAP_RETURN_AFRAMES 6 -#define FASTTRAP_ENTRY_AFRAMES 5 -#define FASTTRAP_OFFSET_AFRAMES 5 - -#ifdef __cplusplus +#define FASTTRAP_SEG_NONE 0 +#define FASTTRAP_SEG_CS 1 +#define FASTTRAP_SEG_DS 2 +#define FASTTRAP_SEG_ES 3 +#define FASTTRAP_SEG_FS 4 +#define FASTTRAP_SEG_GS 5 +#define FASTTRAP_SEG_SS 6 + +#define FASTTRAP_RETURN_AFRAMES 6 +#define FASTTRAP_ENTRY_AFRAMES 5 +#define FASTTRAP_OFFSET_AFRAMES 5 + +#ifdef __cplusplus } #endif -#endif /* _FASTTRAP_ISA_H */ +#endif /* _FASTTRAP_ISA_H */ diff --git a/bsd/i386/limits.h b/bsd/i386/limits.h index 9ee7c03be..9bc2e5718 100644 --- a/bsd/i386/limits.h +++ b/bsd/i386/limits.h @@ -39,11 +39,11 @@ #include #include -#define CHAR_BIT 8 /* number of bits in a char */ -#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */ +#define CHAR_BIT 8 /* number of bits in a char */ +#define MB_LEN_MAX 6 /* Allow 31 bit UTF2 */ #if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define CLK_TCK __DARWIN_CLK_TCK /* ticks per second */ +#define CLK_TCK __DARWIN_CLK_TCK /* ticks per second */ #endif /* !_ANSI_SOURCE && (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* @@ -56,50 +56,50 @@ * These numbers work for pcc as well. The UINT_MAX and ULONG_MAX values * are written as hex so that GCC will be quiet about large integer constants. */ -#define SCHAR_MAX 127 /* min value for a signed char */ -#define SCHAR_MIN (-128) /* max value for a signed char */ +#define SCHAR_MAX 127 /* min value for a signed char */ +#define SCHAR_MIN (-128) /* max value for a signed char */ -#define UCHAR_MAX 255 /* max value for an unsigned char */ -#define CHAR_MAX 127 /* max value for a char */ -#define CHAR_MIN (-128) /* min value for a char */ +#define UCHAR_MAX 255 /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ -#define USHRT_MAX 65535 /* max value for an unsigned short */ -#define SHRT_MAX 32767 /* max value for a short */ -#define SHRT_MIN (-32768) /* min value for a short */ +#define USHRT_MAX 65535 /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ -#define UINT_MAX 0xffffffff /* max value for an unsigned int */ -#define INT_MAX 2147483647 /* max value for an int */ -#define INT_MIN (-2147483647-1) /* min value for an int */ +#define UINT_MAX 0xffffffff /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ #ifdef __LP64__ -#define ULONG_MAX 0xffffffffffffffffUL /* max unsigned long */ -#define LONG_MAX 0x7fffffffffffffffL /* max signed long */ -#define LONG_MIN (-0x7fffffffffffffffL-1) /* min signed long */ +#define ULONG_MAX 0xffffffffffffffffUL /* max unsigned long */ +#define LONG_MAX 0x7fffffffffffffffL /* max signed long */ +#define LONG_MIN (-0x7fffffffffffffffL-1) /* min signed long */ #else /* !__LP64__ */ -#define ULONG_MAX 0xffffffffUL /* max unsigned long */ -#define LONG_MAX 2147483647L /* max signed long */ -#define LONG_MIN (-2147483647L-1) /* min signed long */ +#define ULONG_MAX 0xffffffffUL /* max unsigned long */ +#define LONG_MAX 2147483647L /* max signed long */ +#define LONG_MIN (-2147483647L-1) /* min signed long */ #endif /* __LP64__ */ -#define ULLONG_MAX 0xffffffffffffffffULL /* max unsigned long long */ -#define LLONG_MAX 0x7fffffffffffffffLL /* max signed long long */ -#define LLONG_MIN (-0x7fffffffffffffffLL-1) /* min signed long long */ +#define ULLONG_MAX 0xffffffffffffffffULL /* max unsigned long long */ +#define LLONG_MAX 0x7fffffffffffffffLL /* max signed long long */ +#define LLONG_MIN (-0x7fffffffffffffffLL-1) /* min signed long long */ #if !defined(_ANSI_SOURCE) #ifdef __LP64__ -#define LONG_BIT 64 +#define LONG_BIT 64 #else /* !__LP64__ */ -#define LONG_BIT 32 +#define LONG_BIT 32 #endif /* __LP64__ */ -#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */ -#define WORD_BIT 32 +#define SSIZE_MAX LONG_MAX /* max value for a ssize_t */ +#define WORD_BIT 32 #if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE) -#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */ +#define SIZE_T_MAX ULONG_MAX /* max value for a size_t */ -#define UQUAD_MAX ULLONG_MAX -#define QUAD_MAX LLONG_MAX -#define QUAD_MIN LLONG_MIN +#define UQUAD_MAX ULLONG_MAX +#define QUAD_MAX LLONG_MAX +#define QUAD_MIN LLONG_MIN #endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */ #endif /* !_ANSI_SOURCE */ diff --git a/bsd/i386/param.h b/bsd/i386/param.h index 221318fe1..bff89a516 100644 --- a/bsd/i386/param.h +++ b/bsd/i386/param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -79,20 +79,20 @@ * data types (int, long, ...). The result is unsigned int and must be * cast to any desired pointer type. */ -#define ALIGNBYTES __DARWIN_ALIGNBYTES -#define ALIGN(p) __DARWIN_ALIGN(p) +#define ALIGNBYTES __DARWIN_ALIGNBYTES +#define ALIGN(p) __DARWIN_ALIGN(p) -#define NBPG 4096 /* bytes/page */ -#define PGOFSET (NBPG-1) /* byte offset into page */ -#define PGSHIFT 12 /* LOG2(NBPG) */ +#define NBPG 4096 /* bytes/page */ +#define PGOFSET (NBPG-1) /* byte offset into page */ +#define PGSHIFT 12 /* LOG2(NBPG) */ -#define DEV_BSIZE 512 -#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ -#define BLKDEV_IOSIZE 2048 -#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */ +#define DEV_BSIZE 512 +#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ +#define BLKDEV_IOSIZE 2048 +#define MAXPHYS (128 * 1024) /* max raw I/O transfer size */ -#define CLSIZE 1 -#define CLSIZELOG2 0 +#define CLSIZE 1 +#define CLSIZELOG2 0 /* * Constants related to network buffer management. @@ -101,47 +101,47 @@ * clusters (MAPPED_MBUFS), MCLBYTES must also be an integral multiple * of the hardware page size. */ -#define MSIZESHIFT 8 /* 256 */ -#define MSIZE (1 << MSIZESHIFT) /* size of an mbuf */ -#define MCLSHIFT 11 /* 2048 */ -#define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */ -#define MBIGCLSHIFT 12 /* 4096 */ -#define MBIGCLBYTES (1 << MBIGCLSHIFT) /* size of a big cluster */ -#define M16KCLSHIFT 14 /* 16384 */ -#define M16KCLBYTES (1 << M16KCLSHIFT) /* size of a jumbo cluster */ - -#define MCLOFSET (MCLBYTES - 1) +#define MSIZESHIFT 8 /* 256 */ +#define MSIZE (1 << MSIZESHIFT) /* size of an mbuf */ +#define MCLSHIFT 11 /* 2048 */ +#define MCLBYTES (1 << MCLSHIFT) /* size of an mbuf cluster */ +#define MBIGCLSHIFT 12 /* 4096 */ +#define MBIGCLBYTES (1 << MBIGCLSHIFT) /* size of a big cluster */ +#define M16KCLSHIFT 14 /* 16384 */ +#define M16KCLBYTES (1 << M16KCLSHIFT) /* size of a jumbo cluster */ + +#define MCLOFSET (MCLBYTES - 1) #ifndef NMBCLUSTERS -#define NMBCLUSTERS ((1024 * 1024) / MCLBYTES) /* cl map size: 1MB */ +#define NMBCLUSTERS ((1024 * 1024) / MCLBYTES) /* cl map size: 1MB */ #endif /* * Some macros for units conversion */ /* Core clicks (NeXT_page_size bytes) to segments and vice versa */ -#define ctos(x) (x) -#define stoc(x) (x) +#define ctos(x) (x) +#define stoc(x) (x) /* Core clicks (4096 bytes) to disk blocks */ -#define ctod(x) ((x)<<(PGSHIFT-DEV_BSHIFT)) -#define dtoc(x) ((x)>>(PGSHIFT-DEV_BSHIFT)) -#define dtob(x) ((x)<>(PGSHIFT-DEV_BSHIFT)) +#define dtob(x) ((x)<>PGSHIFT) +#define btoc(x) (((unsigned)(x)+(NBPG-1))>>PGSHIFT) #ifdef __APPLE__ #define btodb(bytes, devBlockSize) \ - ((unsigned)(bytes) / devBlockSize) + ((unsigned)(bytes) / devBlockSize) #define dbtob(db, devBlockSize) \ - ((unsigned)(db) * devBlockSize) + ((unsigned)(db) * devBlockSize) #else -#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ +#define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ ((unsigned)(bytes) >> DEV_BSHIFT) -#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ +#define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((unsigned)(db) << DEV_BSHIFT) #endif @@ -151,21 +151,21 @@ * add an entry to cdevsw/bdevsw for that purpose. * For now though just use DEV_BSIZE. */ -#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) +#define bdbtofsb(bn) ((bn) / (BLKDEV_IOSIZE/DEV_BSIZE)) /* * Macros to decode (and encode) processor status word. */ -#define STATUS_WORD(rpl, ipl) (((ipl) << 8) | (rpl)) -#define USERMODE(x) (((x) & 3) == 3) -#define BASEPRI(x) (((x) & (255 << 8)) == 0) +#define STATUS_WORD(rpl, ipl) (((ipl) << 8) | (rpl)) +#define USERMODE(x) (((x) & 3) == 3) +#define BASEPRI(x) (((x) & (255 << 8)) == 0) -#if defined(KERNEL) || defined(STANDALONE) -#define DELAY(n) delay(n) +#if defined(KERNEL) || defined(STANDALONE) +#define DELAY(n) delay(n) -#else /* defined(KERNEL) || defined(STANDALONE) */ -#define DELAY(n) { int N = (n); while (--N > 0); } -#endif /* defined(KERNEL) || defined(STANDALONE) */ +#else /* defined(KERNEL) || defined(STANDALONE) */ +#define DELAY(n) { int N = (n); while (--N > 0); } +#endif /* defined(KERNEL) || defined(STANDALONE) */ #endif /* _I386_PARAM_H_ */ diff --git a/bsd/i386/profile.h b/bsd/i386/profile.h index fce3663d8..e630636e0 100644 --- a/bsd/i386/profile.h +++ b/bsd/i386/profile.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -47,8 +47,8 @@ #warning MCOUNT_* not implemented yet. #define MCOUNT_INIT -#define MCOUNT_ENTER /* s = splhigh(); */ /* XXX TODO */ -#define MCOUNT_EXIT /* (void) splx(s); */ /* XXX TODO */ +#define MCOUNT_ENTER /* s = splhigh(); */ /* XXX TODO */ +#define MCOUNT_EXIT /* (void) splx(s); */ /* XXX TODO */ #endif /* __APPLE_API_UNSTABLE */ #endif /* KERNEL */ diff --git a/bsd/i386/psl.h b/bsd/i386/psl.h index f11c97ef2..89d26971f 100644 --- a/bsd/i386/psl.h +++ b/bsd/i386/psl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,26 +31,26 @@ * Intel386 Family: Definition of eflags register. * */ - -#if KERNEL_PRIVATE + +#if KERNEL_PRIVATE #ifndef _BSD_I386_PSL_H_ #define _BSD_I386_PSL_H_ - -#define EFL_ALLCC ( \ - EFL_CF | \ - EFL_PF | \ - EFL_AF | \ - EFL_ZF | \ - EFL_SF | \ - EFL_OF \ - ) -#define EFL_USERSET ( EFL_IF | EFL_SET ) -#define EFL_USERCLR ( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR ) -#define PSL_ALLCC EFL_ALLCC -#define PSL_T EFL_TF +#define EFL_ALLCC ( \ + EFL_CF | \ + EFL_PF | \ + EFL_AF | \ + EFL_ZF | \ + EFL_SF | \ + EFL_OF \ + ) +#define EFL_USERSET ( EFL_IF | EFL_SET ) +#define EFL_USERCLR ( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR ) + +#define PSL_ALLCC EFL_ALLCC +#define PSL_T EFL_TF -#endif /* _BSD_I386_PSL_H_ */ +#endif /* _BSD_I386_PSL_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/i386/ptrace.h b/bsd/i386/ptrace.h index b76395119..043e3f175 100644 --- a/bsd/i386/ptrace.h +++ b/bsd/i386/ptrace.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/i386/reboot.h b/bsd/i386/reboot.h index 5e43cb309..0fbfa53e5 100644 --- a/bsd/i386/reboot.h +++ b/bsd/i386/reboot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - -#ifndef _BSD_I386_REBOOT_H_ + +#ifndef _BSD_I386_REBOOT_H_ #define _BSD_I386_REBOOT_H_ /* * Empty file (publicly) */ - + #include -#ifdef BSD_KERNEL_PRIVATE +#ifdef BSD_KERNEL_PRIVATE /* * Use most significant 16 bits to avoid collisions with * machine independent flags. */ -#define RB_POWERDOWN 0x00010000 /* power down on halt */ -#define RB_NOBOOTRC 0x00020000 /* don't run '/etc/rc.boot' */ -#define RB_DEBUG 0x00040000 /* drop into mini monitor on panic */ -#define RB_EJECT 0x00080000 /* eject disks on halt */ -#define RB_COMMAND 0x00100000 /* new boot command specified */ -#define RB_NOFP 0x00200000 /* don't use floating point */ -#define RB_BOOTNEXT 0x00400000 /* reboot into NeXT */ -#define RB_BOOTDOS 0x00800000 /* reboot into DOS */ -#define RB_PRETTY 0x01000000 /* shutdown with pretty graphics */ +#define RB_POWERDOWN 0x00010000 /* power down on halt */ +#define RB_NOBOOTRC 0x00020000 /* don't run '/etc/rc.boot' */ +#define RB_DEBUG 0x00040000 /* drop into mini monitor on panic */ +#define RB_EJECT 0x00080000 /* eject disks on halt */ +#define RB_COMMAND 0x00100000 /* new boot command specified */ +#define RB_NOFP 0x00200000 /* don't use floating point */ +#define RB_BOOTNEXT 0x00400000 /* reboot into NeXT */ +#define RB_BOOTDOS 0x00800000 /* reboot into DOS */ +#define RB_PRETTY 0x01000000 /* shutdown with pretty graphics */ -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ -#endif /* _BSD_I386_REBOOT_H_ */ +#endif /* _BSD_I386_REBOOT_H_ */ diff --git a/bsd/i386/reg.h b/bsd/i386/reg.h index 415533afd..d12802f21 100644 --- a/bsd/i386/reg.h +++ b/bsd/i386/reg.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,13 +31,13 @@ * Intel386 Family: User registers for U**X. * */ - -#ifdef KERNEL_PRIVATE + +#ifdef KERNEL_PRIVATE #ifndef _BSD_I386_REG_H_ #define _BSD_I386_REG_H_ -#endif /* _BSD_I386_REG_H_ */ +#endif /* _BSD_I386_REG_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/bsd/i386/signal.h b/bsd/i386/signal.h index 2e3ce85ec..1843b79e5 100644 --- a/bsd/i386/signal.h +++ b/bsd/i386/signal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,15 +30,14 @@ * */ -#ifndef _I386_SIGNAL_H_ -#define _I386_SIGNAL_H_ 1 +#ifndef _I386_SIGNAL_H_ +#define _I386_SIGNAL_H_ 1 #include #ifndef _ANSI_SOURCE -typedef int sig_atomic_t; +typedef int sig_atomic_t; #endif /* ! _ANSI_SOURCE */ -#endif /* _I386_SIGNAL_H_ */ - +#endif /* _I386_SIGNAL_H_ */ diff --git a/bsd/i386/types.h b/bsd/i386/types.h index 30f0bc591..5ae48ccd2 100644 --- a/bsd/i386/types.h +++ b/bsd/i386/types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -63,8 +63,8 @@ * @(#)types.h 8.3 (Berkeley) 1/5/94 */ -#ifndef _MACHTYPES_H_ -#define _MACHTYPES_H_ +#ifndef _MACHTYPES_H_ +#define _MACHTYPES_H_ #ifndef __ASSEMBLER__ #include @@ -84,9 +84,9 @@ #include #if __LP64__ -typedef int64_t register_t; +typedef int64_t register_t; #else -typedef int32_t register_t; +typedef int32_t register_t; #endif #include @@ -94,14 +94,14 @@ typedef int32_t register_t; #if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) /* These types are used for reserving the largest possible size. */ -typedef u_int64_t user_addr_t; -typedef u_int64_t user_size_t; -typedef int64_t user_ssize_t; -typedef int64_t user_long_t; -typedef u_int64_t user_ulong_t; -typedef int64_t user_time_t; -typedef int64_t user_off_t; -#define USER_ADDR_NULL ((user_addr_t) 0) +typedef u_int64_t user_addr_t; +typedef u_int64_t user_size_t; +typedef int64_t user_ssize_t; +typedef int64_t user_long_t; +typedef u_int64_t user_ulong_t; +typedef int64_t user_time_t; +typedef int64_t user_off_t; +#define USER_ADDR_NULL ((user_addr_t) 0) #define CAST_USER_ADDR_T(a_ptr) ((user_addr_t)((uintptr_t)(a_ptr))) #ifdef KERNEL @@ -122,28 +122,28 @@ typedef int64_t user_off_t; * running under translation must conform to the 32-bit Intel ABI. */ -typedef __uint64_t user64_addr_t __attribute__((aligned(8))); -typedef __uint64_t user64_size_t __attribute__((aligned(8))); -typedef __int64_t user64_ssize_t __attribute__((aligned(8))); -typedef __int64_t user64_long_t __attribute__((aligned(8))); -typedef __uint64_t user64_ulong_t __attribute__((aligned(8))); -typedef __int64_t user64_time_t __attribute__((aligned(8))); -typedef __int64_t user64_off_t __attribute__((aligned(8))); +typedef __uint64_t user64_addr_t __attribute__((aligned(8))); +typedef __uint64_t user64_size_t __attribute__((aligned(8))); +typedef __int64_t user64_ssize_t __attribute__((aligned(8))); +typedef __int64_t user64_long_t __attribute__((aligned(8))); +typedef __uint64_t user64_ulong_t __attribute__((aligned(8))); +typedef __int64_t user64_time_t __attribute__((aligned(8))); +typedef __int64_t user64_off_t __attribute__((aligned(8))); -typedef __uint32_t user32_addr_t; -typedef __uint32_t user32_size_t; -typedef __int32_t user32_ssize_t; -typedef __int32_t user32_long_t; -typedef __uint32_t user32_ulong_t; -typedef __int32_t user32_time_t; -typedef __int64_t user32_off_t __attribute__((aligned(4))); +typedef __uint32_t user32_addr_t; +typedef __uint32_t user32_size_t; +typedef __int32_t user32_ssize_t; +typedef __int32_t user32_long_t; +typedef __uint32_t user32_ulong_t; +typedef __int32_t user32_time_t; +typedef __int64_t user32_off_t __attribute__((aligned(4))); #endif /* KERNEL */ #endif /* !_ANSI_SOURCE && (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* This defines the size of syscall arguments after copying into the kernel: */ -typedef u_int64_t syscall_arg_t; +typedef u_int64_t syscall_arg_t; #endif /* __ASSEMBLER__ */ -#endif /* _MACHTYPES_H_ */ +#endif /* _MACHTYPES_H_ */ diff --git a/bsd/i386/vmparam.h b/bsd/i386/vmparam.h index 6ea9e94b9..abade8925 100644 --- a/bsd/i386/vmparam.h +++ b/bsd/i386/vmparam.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,39 +22,39 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _BSD_I386_VMPARAM_H_ -#define _BSD_I386_VMPARAM_H_ 1 +#ifndef _BSD_I386_VMPARAM_H_ +#define _BSD_I386_VMPARAM_H_ 1 #include -#define USRSTACK VM_USRSTACK32 -#define USRSTACK64 VM_USRSTACK64 +#define USRSTACK VM_USRSTACK32 +#define USRSTACK64 VM_USRSTACK64 /* * Virtual memory related constants, all in bytes */ #ifndef DFLDSIZ -#define DFLDSIZ (RLIM_INFINITY) /* initial data size limit */ +#define DFLDSIZ (RLIM_INFINITY) /* initial data size limit */ #endif #ifndef MAXDSIZ -#define MAXDSIZ (RLIM_INFINITY) /* max data size */ +#define MAXDSIZ (RLIM_INFINITY) /* max data size */ #endif -#ifndef DFLSSIZ -#define DFLSSIZ (8*1024*1024) /* initial stack size limit */ +#ifndef DFLSSIZ +#define DFLSSIZ (8*1024*1024) /* initial stack size limit */ #endif -#ifndef MAXSSIZ -#define MAXSSIZ (64*1024*1024) /* max stack size */ +#ifndef MAXSSIZ +#define MAXSSIZ (64*1024*1024) /* max stack size */ #endif -#ifndef DFLCSIZ -#define DFLCSIZ (0) /* initial core size limit */ +#ifndef DFLCSIZ +#define DFLCSIZ (0) /* initial core size limit */ #endif -#ifndef MAXCSIZ -#define MAXCSIZ (RLIM_INFINITY) /* max core size */ -#endif /* MAXCSIZ */ +#ifndef MAXCSIZ +#define MAXCSIZ (RLIM_INFINITY) /* max core size */ +#endif /* MAXCSIZ */ -#endif /* _BSD_I386_VMPARAM_H_ */ +#endif /* _BSD_I386_VMPARAM_H_ */ diff --git a/bsd/kern/ast.h b/bsd/kern/ast.h index 43d896fd9..7fc56d217 100644 --- a/bsd/kern/ast.h +++ b/bsd/kern/ast.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* CMU_HIST */ diff --git a/bsd/kern/bsd_init.c b/bsd/kern/bsd_init.c index 23e115db0..73be8cd43 100644 --- a/bsd/kern/bsd_init.c +++ b/bsd/kern/bsd_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - * + * * * Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993 * The Regents of the University of California. All rights reserved. @@ -65,7 +65,7 @@ * @(#)init_main.c 8.16 (Berkeley) 5/14/95 */ -/* +/* * * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University @@ -115,7 +115,7 @@ #include #include -#include /* for pseudo_inits */ +#include /* for pseudo_inits */ #include #include #include @@ -126,53 +126,53 @@ #include #include #include -#include /* for thread_resume() */ -#include /* for ubc_init() */ -#include /* for mcache_init() */ -#include /* for mbinit() */ -#include /* for knote_init() */ -#include /* for eventhandler_init() */ -#include /* for memorystatus_init() */ -#include /* for aio_init() */ -#include /* for psem_cache_init() */ -#include /* for dlil_init() */ -#include /* for proto_kpi_init() */ -#include /* for iptap_init() */ -#include /* for pipeinit() */ -#include /* for socketinit() */ -#include /* for domaininit() */ -#include /* for thread_wakeup() */ -#include /* for ether_family_init() */ -#include /* for gif_init() */ -#include /* for vnode_pager_bootstrap() */ -#include /* for devfs_kernel_mount() */ -#include /* for kmem_suballoc() */ -#include /* for psem_lock_init() */ -#include /* for log_setsize() */ -#include /* for tty_init() */ -#include /* proc_uuid_policy_init() */ -#include /* flow_divert_init() */ -#include /* for cfil_init() */ -#include /* for necp_init() */ -#include /* for netagent_init() */ -#include /* for pkt_mnglr_init() */ -#include /* for utun_register_control() */ -#include /* for ipsec_register_control() */ -#include /* for net_str_id_init() */ -#include /* for netsrc_init() */ -#include /* for nstat_init() */ -#include /* for tcp_cc_init() */ -#include /* for mptcp_control_register() */ -#include /* for nwk_wq_init */ -#include /* for assert() */ -#include /* for init_system_override() */ +#include /* for thread_resume() */ +#include /* for ubc_init() */ +#include /* for mcache_init() */ +#include /* for mbinit() */ +#include /* for knote_init() */ +#include /* for eventhandler_init() */ +#include /* for memorystatus_init() */ +#include /* for aio_init() */ +#include /* for psem_cache_init() */ +#include /* for dlil_init() */ +#include /* for proto_kpi_init() */ +#include /* for iptap_init() */ +#include /* for pipeinit() */ +#include /* for socketinit() */ +#include /* for domaininit() */ +#include /* for thread_wakeup() */ +#include /* for ether_family_init() */ +#include /* for gif_init() */ +#include /* for vnode_pager_bootstrap() */ +#include /* for devfs_kernel_mount() */ +#include /* for kmem_suballoc() */ +#include /* for psem_lock_init() */ +#include /* for log_setsize() */ +#include /* for tty_init() */ +#include /* proc_uuid_policy_init() */ +#include /* flow_divert_init() */ +#include /* for cfil_init() */ +#include /* for necp_init() */ +#include /* for netagent_init() */ +#include /* for pkt_mnglr_init() */ +#include /* for utun_register_control() */ +#include /* for ipsec_register_control() */ +#include /* for net_str_id_init() */ +#include /* for netsrc_init() */ +#include /* for nstat_init() */ +#include /* for tcp_cc_init() */ +#include /* for mptcp_control_register() */ +#include /* for nwk_wq_init */ +#include /* for assert() */ +#include /* for init_system_override() */ #include #if CONFIG_MACF #include -#include /* mac_init_bsd() */ -#include /* mac_update_task_label() */ +#include /* mac_init_bsd() */ +#include /* mac_update_task_label() */ #endif #include @@ -198,24 +198,24 @@ #include #endif -void * get_user_regs(thread_t); /* XXX kludge for */ -void IOKitInitializeTime(void); /* XXX */ -void IOSleep(unsigned int); /* XXX */ -void loopattach(void); /* XXX */ +void * get_user_regs(thread_t); /* XXX kludge for */ +void IOKitInitializeTime(void); /* XXX */ +void IOSleep(unsigned int); /* XXX */ +void loopattach(void); /* XXX */ const char copyright[] = -"Copyright (c) 1982, 1986, 1989, 1991, 1993\n\t" -"The Regents of the University of California. " -"All rights reserved.\n\n"; + "Copyright (c) 1982, 1986, 1989, 1991, 1993\n\t" + "The Regents of the University of California. " + "All rights reserved.\n\n"; /* Components of the first process -- never freed. */ -struct proc proc0; -struct session session0; -struct pgrp pgrp0; -struct filedesc filedesc0; -struct plimit limit0; -struct pstats pstats0; -struct sigacts sigacts0; +struct proc proc0; +struct session session0; +struct pgrp pgrp0; +struct filedesc filedesc0; +struct plimit limit0; +struct pstats pstats0; +struct sigacts sigacts0; proc_t kernproc; proc_t initproc; @@ -231,22 +231,22 @@ int nswapmap; void *swapmap; struct swdevt swdevt[1]; -dev_t rootdev; /* device of the root */ -dev_t dumpdev; /* device to take dumps on */ -long dumplo; /* offset into dumpdev */ -long hostid; -char hostname[MAXHOSTNAMELEN]; -int hostnamelen; -char domainname[MAXDOMNAMELEN]; -int domainnamelen; +dev_t rootdev; /* device of the root */ +dev_t dumpdev; /* device to take dumps on */ +long dumplo; /* offset into dumpdev */ +long hostid; +char hostname[MAXHOSTNAMELEN]; +int hostnamelen; +char domainname[MAXDOMNAMELEN]; +int domainnamelen; char rootdevice[DEVMAXNAMESIZE]; #if KMEMSTATS -struct kmemstats kmemstats[M_LAST]; +struct kmemstats kmemstats[M_LAST]; #endif -struct vnode *rootvp; +struct vnode *rootvp; int boothowto = RB_DEBUG; int minimalboot = 0; #if CONFIG_EMBEDDED @@ -275,8 +275,8 @@ extern void acct_init(void); extern int serverperfmode; extern int ncl; -vm_map_t bsd_pageable_map; -vm_map_t mb_map; +vm_map_t bsd_pageable_map; +vm_map_t mb_map; static int bsd_simul_execs; static int bsd_pageable_map_size; @@ -314,7 +314,7 @@ char dyld_alt_path[MAXPATHLEN]; int use_alt_dyld = 0; #endif -int cmask = CMASK; +int cmask = CMASK; extern int customnbuf; kern_return_t bsd_autoconf(void); @@ -349,11 +349,11 @@ int policy_check_flags = 0; extern int check_policy_init(int); #endif -#endif /* CONFIG_MACF */ +#endif /* CONFIG_MACF */ /* If we are using CONFIG_DTRACE */ #if CONFIG_DTRACE - extern void dtrace_postinit(void); +extern void dtrace_postinit(void); #endif /* @@ -374,8 +374,8 @@ extern int check_policy_init(int); static void process_name(const char *s, proc_t p) { - strlcpy(p->p_comm, s, sizeof(p->p_comm)); - strlcpy(p->p_name, s, sizeof(p->p_name)); + strlcpy(p->p_comm, s, sizeof(p->p_comm)); + strlcpy(p->p_name, s, sizeof(p->p_name)); } /* To allow these values to be patched, they're globals here */ @@ -384,8 +384,8 @@ struct rlimit vm_initial_limit_stack = { DFLSSIZ, MAXSSIZ - PAGE_MAX_SIZE }; struct rlimit vm_initial_limit_data = { DFLDSIZ, MAXDSIZ }; struct rlimit vm_initial_limit_core = { DFLCSIZ, MAXCSIZ }; -extern thread_t cloneproc(task_t, coalition_t, proc_t, int, int); -extern int (*mountroot)(void); +extern thread_t cloneproc(task_t, coalition_t, proc_t, int, int); +extern int (*mountroot)(void); lck_grp_t * proc_lck_grp; lck_grp_t * proc_slock_grp; @@ -442,7 +442,7 @@ bsd_init(void) struct uthread *ut; unsigned int i; struct vfs_context context; - kern_return_t ret; + kern_return_t ret; struct ucred temp_cred; struct posix_cred temp_pcred; #if NFSCLIENT || CONFIG_IMAGEBOOT @@ -454,10 +454,10 @@ bsd_init(void) throttle_init(); printf(copyright); - + bsd_init_kprintf("calling kmeminit\n"); kmeminit(); - + bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); @@ -477,10 +477,10 @@ bsd_init(void) /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); - kernproc = &proc0; /* implicitly bzero'ed */ + kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ - set_bsdtask_info(kernel_task,(void *)kernproc); + set_bsdtask_info(kernel_task, (void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); @@ -488,47 +488,31 @@ bsd_init(void) /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); - proc_lck_grp_attr= lck_grp_attr_alloc_init(); + proc_lck_grp_attr = lck_grp_attr_alloc_init(); - proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); + proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); -#if CONFIG_FINE_LOCK_GROUPS - proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); - proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock", proc_lck_grp_attr); - proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); - proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); -#endif - proc_kqhashlock_grp = lck_grp_alloc_init("proc-kqhashlock", proc_lck_grp_attr); - proc_knhashlock_grp = lck_grp_alloc_init("proc-knhashlock", proc_lck_grp_attr); + proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); + proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock", proc_lck_grp_attr); + proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); + proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); + proc_kqhashlock_grp = lck_grp_alloc_init("proc-kqhashlock", proc_lck_grp_attr); + proc_knhashlock_grp = lck_grp_alloc_init("proc-knhashlock", proc_lck_grp_attr); #if CONFIG_XNUPOST sysctl_debug_test_stackshot_owner_grp = lck_grp_alloc_init("test-stackshot-owner-grp", LCK_GRP_ATTR_NULL); sysctl_debug_test_stackshot_owner_init_mtx = lck_mtx_alloc_init( - sysctl_debug_test_stackshot_owner_grp, - LCK_ATTR_NULL); + sysctl_debug_test_stackshot_owner_grp, + LCK_ATTR_NULL); #endif /* !CONFIG_XNUPOST */ /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); -#if 0 -#if __PROC_INTERNAL_DEBUG - lck_attr_setdebug(proc_lck_attr); -#endif -#endif -#if CONFIG_FINE_LOCK_GROUPS proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); -#else - proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); - proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); - lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); - lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); - lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr); - lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); -#endif assert(bsd_simul_execs != 0); execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); @@ -536,11 +520,12 @@ bsd_init(void) execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); - - if (current_task() != kernel_task) + + if (current_task() != kernel_task) { printf("bsd_init: We have a problem, " - "current task is not kernel task\n"); - + "current task is not kernel task\n"); + } + bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); @@ -569,11 +554,7 @@ bsd_init(void) kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); -#ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); -#else - lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); -#endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; @@ -586,11 +567,7 @@ bsd_init(void) session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; -#ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); -#else - lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); -#endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); @@ -599,7 +576,7 @@ bsd_init(void) #endif kernproc->task = kernel_task; - + kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_lflag = 0; @@ -610,8 +587,9 @@ bsd_init(void) #endif #if DEVELOPMENT || DEBUG - if (bootarg_disable_aslr) + if (bootarg_disable_aslr) { kernproc->p_flag |= P_DISABLE_ASLR; + } #endif kernproc->p_nice = NZERO; @@ -619,7 +597,7 @@ bsd_init(void) TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); - + kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; @@ -635,14 +613,14 @@ bsd_init(void) /* kern_proc, shouldn't call up to DS for group membership */ temp_pcred.cr_flags = CRF_NOMEMBERD; temp_cred.cr_audit.as_aia_p = audit_default_aia_p; - + bsd_init_kprintf("calling kauth_cred_create\n"); /* * We have to label the temp cred before we create from it to * properly set cr_ngroups, or the create will fail. */ posix_cred_label(&temp_cred, &temp_pcred); - kernproc->p_ucred = kauth_cred_create(&temp_cred); + kernproc->p_ucred = kauth_cred_create(&temp_cred); /* update cred on proc */ PROC_UPDATE_CREDS_ONPROC(kernproc); @@ -677,9 +655,10 @@ bsd_init(void) /* Create the limits structures. */ kernproc->p_limit = &limit0; - for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) - limit0.pl_rlimit[i].rlim_cur = - limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; + for (i = 0; i < sizeof(kernproc->p_rlimit) / sizeof(kernproc->p_rlimit[0]); i++) { + limit0.pl_rlimit[i].rlim_cur = + limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; + } limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; @@ -702,20 +681,21 @@ bsd_init(void) * for temporary copying (execve()). */ { - vm_offset_t minimum; + vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); assert(bsd_pageable_map_size != 0); ret = kmem_suballoc(kernel_map, - &minimum, - (vm_size_t)bsd_pageable_map_size, - TRUE, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_BSD, - &bsd_pageable_map); - if (ret != KERN_SUCCESS) + &minimum, + (vm_size_t)bsd_pageable_map_size, + TRUE, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_BSD, + &bsd_pageable_map); + if (ret != KERN_SUCCESS) { panic("bsd_init: Failed to allocate bsd pageable map"); + } } bsd_init_kprintf("calling fpxlog_init\n"); @@ -766,7 +746,7 @@ bsd_init(void) */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); - audit_init(); + audit_init(); #endif /* Initialize kqueues */ @@ -836,7 +816,7 @@ bsd_init(void) iptap_init(); #if FLOW_DIVERT flow_divert_init(); -#endif /* FLOW_DIVERT */ +#endif /* FLOW_DIVERT */ #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; @@ -885,7 +865,7 @@ bsd_init(void) #include #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); - loopattach(); /* XXX */ + loopattach(); /* XXX */ #endif #if NGIF /* Initialize gif interface (after lo0) */ @@ -907,14 +887,14 @@ bsd_init(void) /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); - + #if CONTENT_FILTER cfil_init(); #endif #if PACKET_MANGLER pkt_mnglr_init(); -#endif +#endif #if NECP /* Initialize Network Extension Control Policies */ @@ -943,7 +923,7 @@ bsd_init(void) inittodr(0); /* Mount the root file system. */ - while( TRUE) { + while (TRUE) { int err; bsd_init_kprintf("calling setconf\n"); @@ -953,18 +933,19 @@ bsd_init(void) #endif bsd_init_kprintf("vfs_mountroot\n"); - if (0 == (err = vfs_mountroot())) + if (0 == (err = vfs_mountroot())) { break; + } rootdevice[0] = '\0'; #if NFSCLIENT if (netboot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); - for (i=1; 1; i*=2) { + for (i = 1; 1; i *= 2) { printf("bsd_init: failed to mount network root, error %d, %s\n", - err, PE_boot_args()); + err, PE_boot_args()); printf("We are hanging here...\n"); - IOSleep(i*60*1000); + IOSleep(i * 60 * 1000); } /*NOTREACHED*/ } @@ -981,8 +962,9 @@ bsd_init(void) bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ - if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) + if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) { panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); + } rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); @@ -997,41 +979,41 @@ bsd_init(void) if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); - for (i=1; 1; i*=2) { + for (i = 1; 1; i *= 2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", - err, PE_boot_args()); + err, PE_boot_args()); printf("We are hanging here...\n"); - IOSleep(i*60*1000); + IOSleep(i * 60 * 1000); } /*NOTREACHED*/ } } #endif - + #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it - */ + */ if (netboot == FALSE && imageboot_needed()) { - /* + /* * An image was found. No turning back: we're booted * with a kernel from the disk image. */ - imageboot_setup(); + imageboot_setup(); } #endif /* CONFIG_IMAGEBOOT */ - + /* set initial time; all other resource data is already zero'ed */ microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start); #if DEVFS { - char mounthere[] = "/dev"; /* !const because of internal casting */ + char mounthere[] = "/dev"; /* !const because of internal casting */ - bsd_init_kprintf("calling devfs_kernel_mount\n"); - devfs_kernel_mount(mounthere); + bsd_init_kprintf("calling devfs_kernel_mount\n"); + devfs_kernel_mount(mounthere); } #endif /* DEVFS */ @@ -1047,8 +1029,9 @@ bsd_init(void) bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ - if (mountroot_post_hook != NULL) + if (mountroot_post_hook != NULL) { mountroot_post_hook(); + } #if 0 /* not yet */ consider_zone_gc(FALSE); @@ -1056,7 +1039,7 @@ bsd_init(void) /* Initialize System Override call */ init_system_override(); - + bsd_init_kprintf("done\n"); } @@ -1074,7 +1057,7 @@ bsdinit_task(void) mac_cred_label_associate_user(p->p_ucred); #endif - vm_init_before_launchd(); + vm_init_before_launchd(); #if CONFIG_XNUPOST int result = bsd_list_tests(); @@ -1096,17 +1079,18 @@ bsd_autoconf(void) kprintf("bsd_autoconf: calling kminit\n"); kminit(); - /* + /* * Early startup for bsd pseudodevices. */ { - struct pseudo_init *pi; - - for (pi = pseudo_inits; pi->ps_func; pi++) - (*pi->ps_func) (pi->ps_count); + struct pseudo_init *pi; + + for (pi = pseudo_inits; pi->ps_func; pi++) { + (*pi->ps_func)(pi->ps_count); + } } - return( IOKitBSDInit()); + return IOKitBSDInit(); } @@ -1114,31 +1098,30 @@ bsd_autoconf(void) static void setconf(void) -{ - u_int32_t flags; - kern_return_t err; +{ + u_int32_t flags; + kern_return_t err; err = IOFindBSDRoot(rootdevice, sizeof(rootdevice), &rootdev, &flags); - if( err) { + if (err) { printf("setconf: IOFindBSDRoot returned an error (%d);" - "setting rootdevice to 'sd0a'.\n", err); /* XXX DEBUG TEMP */ + "setting rootdevice to 'sd0a'.\n", err); /* XXX DEBUG TEMP */ rootdev = makedev( 6, 0 ); strlcpy(rootdevice, "sd0a", sizeof(rootdevice)); flags = 0; } #if NFSCLIENT - if( flags & 1 ) { + if (flags & 1) { /* network device */ mountroot = netboot_mountroot; } else { #endif - /* otherwise have vfs determine root filesystem */ - mountroot = NULL; + /* otherwise have vfs determine root filesystem */ + mountroot = NULL; #if NFSCLIENT - } +} #endif - } void @@ -1154,10 +1137,11 @@ bsd_utaskbootstrap(void) thread = cloneproc(TASK_NULL, COALITION_NULL, kernproc, FALSE, TRUE); /* Hold the reference as it will be dropped during shutdown */ - initproc = proc_find(1); + initproc = proc_find(1); #if __PROC_INTERNAL_DEBUG - if (initproc == PROC_NULL) + if (initproc == PROC_NULL) { panic("bsd_utaskbootstrap: initproc not set\n"); + } #endif /* * Since we aren't going back out the normal way to our parent, @@ -1178,14 +1162,17 @@ parse_bsd_args(void) char namep[16]; int msgbuf; - if ( PE_parse_boot_argn("-s", namep, sizeof (namep))) + if (PE_parse_boot_argn("-s", namep, sizeof(namep))) { boothowto |= RB_SINGLE; + } - if (PE_parse_boot_argn("-b", namep, sizeof (namep))) + if (PE_parse_boot_argn("-b", namep, sizeof(namep))) { boothowto |= RB_NOBOOTRC; + } - if (PE_parse_boot_argn("-x", namep, sizeof (namep))) /* safe boot */ + if (PE_parse_boot_argn("-x", namep, sizeof(namep))) { /* safe boot */ boothowto |= RB_SAFEBOOT; + } if (PE_parse_boot_argn("-minimalboot", namep, sizeof(namep))) { /* @@ -1198,37 +1185,41 @@ parse_bsd_args(void) #if __arm64__ /* disable 64 bit grading */ - if (PE_parse_boot_argn("-no64exec", namep, sizeof (namep))) + if (PE_parse_boot_argn("-no64exec", namep, sizeof(namep))) { bootarg_no64exec = 1; + } #endif #if __x86_64__ /* disable 32 bit grading */ - if (PE_parse_boot_argn("-no32exec", namep, sizeof (namep))) + if (PE_parse_boot_argn("-no32exec", namep, sizeof(namep))) { bootarg_no32exec = 1; + } #endif /* disable vnode_cache_is_authorized() by setting vnode_cache_defeat */ - if (PE_parse_boot_argn("-vnode_cache_defeat", namep, sizeof (namep))) + if (PE_parse_boot_argn("-vnode_cache_defeat", namep, sizeof(namep))) { bootarg_vnode_cache_defeat = 1; + } #if DEVELOPMENT || DEBUG - if (PE_parse_boot_argn("-disable_aslr", namep, sizeof (namep))) + if (PE_parse_boot_argn("-disable_aslr", namep, sizeof(namep))) { bootarg_disable_aslr = 1; + } #endif - PE_parse_boot_argn("ncl", &ncl, sizeof (ncl)); + PE_parse_boot_argn("ncl", &ncl, sizeof(ncl)); if (PE_parse_boot_argn("nbuf", &max_nbuf_headers, - sizeof (max_nbuf_headers))) { + sizeof(max_nbuf_headers))) { customnbuf = 1; } #if CONFIG_MACF #if defined (__i386__) || defined (__x86_64__) - PE_parse_boot_argn("policy_check", &policy_check_flags, sizeof (policy_check_flags)); + PE_parse_boot_argn("policy_check", &policy_check_flags, sizeof(policy_check_flags)); #endif -#endif /* CONFIG_MACF */ +#endif /* CONFIG_MACF */ - if (PE_parse_boot_argn("msgbuf", &msgbuf, sizeof (msgbuf))) { + if (PE_parse_boot_argn("msgbuf", &msgbuf, sizeof(msgbuf))) { log_setsize(msgbuf); oslog_setsize(msgbuf); } @@ -1238,8 +1229,9 @@ parse_bsd_args(void) } #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) - if (PE_parse_boot_argn("-no_vnode_jetsam", namep, sizeof(namep))) - bootarg_no_vnode_jetsam = 1; + if (PE_parse_boot_argn("-no_vnode_jetsam", namep, sizeof(namep))) { + bootarg_no_vnode_jetsam = 1; + } #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ @@ -1270,17 +1262,17 @@ parse_bsd_args(void) PE_parse_boot_argn("sigrestrict", &sigrestrict_arg, sizeof(sigrestrict_arg)); -#if DEVELOPMENT|| DEBUG +#if DEVELOPMENT || DEBUG if (PE_parse_boot_argn("-no_sigsys", namep, sizeof(namep))) { send_sigsys = false; } #endif -#if (DEVELOPMENT|| DEBUG) +#if (DEVELOPMENT || DEBUG) if (PE_parse_boot_argn("alt-dyld", dyld_alt_path, sizeof(dyld_alt_path))) { - if (strlen(dyld_alt_path) > 0) { - use_alt_dyld = 1; - } + if (strlen(dyld_alt_path) > 0) { + use_alt_dyld = 1; + } } #endif } @@ -1288,39 +1280,37 @@ parse_bsd_args(void) void bsd_exec_setup(int scale) { - switch (scale) { - case 0: - case 1: - bsd_simul_execs = BSD_SIMUL_EXECS; - break; - case 2: - case 3: - bsd_simul_execs = 65; - break; - case 4: - case 5: - bsd_simul_execs = 129; - break; - case 6: - case 7: - bsd_simul_execs = 257; - break; - default: - bsd_simul_execs = 513; - break; - + case 0: + case 1: + bsd_simul_execs = BSD_SIMUL_EXECS; + break; + case 2: + case 3: + bsd_simul_execs = 65; + break; + case 4: + case 5: + bsd_simul_execs = 129; + break; + case 6: + case 7: + bsd_simul_execs = 257; + break; + default: + bsd_simul_execs = 513; + break; } bsd_pageable_map_size = (bsd_simul_execs * BSD_PAGEABLE_SIZE_PER_EXEC); } #if !NFSCLIENT -int +int netboot_root(void); -int +int netboot_root(void) { - return(0); + return 0; } #endif diff --git a/bsd/kern/bsd_stubs.c b/bsd/kern/bsd_stubs.c index aa2ed85ad..fb33955de 100644 --- a/bsd/kern/bsd_stubs.c +++ b/bsd/kern/bsd_stubs.c @@ -63,15 +63,18 @@ kmem_mb_alloc(vm_map_t mbmap, int size, int physContig, kern_return_t *err) vm_offset_t addr = 0; kern_return_t kr = KERN_SUCCESS; - if (!physContig) + if (!physContig) { kr = kernel_memory_allocate(mbmap, &addr, size, 0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF); - else + } else { kr = kmem_alloc_contig(mbmap, &addr, size, PAGE_MASK, 0xfffff, 0, KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_MBUF); + } - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { addr = 0; - if (err) + } + if (err) { *err = kr; + } return addr; } @@ -95,19 +98,22 @@ current_proc(void) ut = (struct uthread *)get_bsdthread_info(thread); if (ut && (ut->uu_flag & UT_VFORK) && ut->uu_proc) { p = ut->uu_proc; - if ((p->p_lflag & P_LINVFORK) == 0) + if ((p->p_lflag & P_LINVFORK) == 0) { panic("returning child proc not under vfork"); - if (p->p_vforkact != (void *)thread) + } + if (p->p_vforkact != (void *)thread) { panic("returning child proc which is not cur_act"); - return (p); + } + return p; } p = (struct proc *)get_bsdtask_info(current_task()); - if (p == NULL) - return (kernproc); + if (p == NULL) { + return kernproc; + } - return (p); + return p; } /* Device switch add delete routines */ @@ -129,25 +135,28 @@ bdevsw_isfree(int index) struct bdevsw * devsw; if (index < 0) { - if (index == -1) + if (index == -1) { index = 1; /* start at 1 to avoid collision with volfs (Radar 2842228) */ - else + } else { index = -index; /* start at least this far up in the table */ + } devsw = &bdevsw[index]; for (; index < nblkdev; index++, devsw++) { - if (memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) == 0) + if (memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) == 0) { break; + } } } - if (index < 0 || index >= nblkdev) - return (-1); + if (index < 0 || index >= nblkdev) { + return -1; + } devsw = &bdevsw[index]; if ((memcmp((char *)devsw, (char *)&nobdev, sizeof(struct bdevsw)) != 0)) { - return (-1); + return -1; } - return (index); + return index; } /* @@ -170,7 +179,7 @@ bdevsw_add(int index, struct bdevsw * bsw) bdevsw[index] = *bsw; } lck_mtx_unlock(&devsw_lock_list_mtx); - return (index); + return index; } /* * if the slot has the same bsw, then remove @@ -181,8 +190,9 @@ bdevsw_remove(int index, struct bdevsw * bsw) { struct bdevsw * devsw; - if (index < 0 || index >= nblkdev) - return (-1); + if (index < 0 || index >= nblkdev) { + return -1; + } devsw = &bdevsw[index]; lck_mtx_lock_spin(&devsw_lock_list_mtx); @@ -192,7 +202,7 @@ bdevsw_remove(int index, struct bdevsw * bsw) bdevsw[index] = nobdev; } lck_mtx_unlock(&devsw_lock_list_mtx); - return (index); + return index; } /* @@ -210,25 +220,28 @@ cdevsw_isfree(int index) struct cdevsw * devsw; if (index < 0) { - if (index == -1) + if (index == -1) { index = 0; - else + } else { index = -index; /* start at least this far up in the table */ + } devsw = &cdevsw[index]; for (; index < nchrdev; index++, devsw++) { - if (memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) == 0) + if (memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) == 0) { break; + } } } - if (index < 0 || index >= nchrdev) - return (-1); + if (index < 0 || index >= nchrdev) { + return -1; + } devsw = &cdevsw[index]; if ((memcmp((char *)devsw, (char *)&nocdev, sizeof(struct cdevsw)) != 0)) { - return (-1); + return -1; } - return (index); + return index; } /* @@ -256,7 +269,7 @@ cdevsw_add(int index, struct cdevsw * csw) cdevsw[index] = *csw; } lck_mtx_unlock(&devsw_lock_list_mtx); - return (index); + return index; } /* * if the slot has the same csw, then remove @@ -267,8 +280,9 @@ cdevsw_remove(int index, struct cdevsw * csw) { struct cdevsw * devsw; - if (index < 0 || index >= nchrdev) - return (-1); + if (index < 0 || index >= nchrdev) { + return -1; + } devsw = &cdevsw[index]; lck_mtx_lock_spin(&devsw_lock_list_mtx); @@ -279,13 +293,13 @@ cdevsw_remove(int index, struct cdevsw * csw) cdevsw_flags[index] = 0; } lck_mtx_unlock(&devsw_lock_list_mtx); - return (index); + return index; } static int cdev_set_bdev(int cdev, int bdev) { - return (chrtoblk_set(cdev, bdev)); + return chrtoblk_set(cdev, bdev); } int @@ -293,13 +307,13 @@ cdevsw_add_with_bdev(int index, struct cdevsw * csw, int bdev) { index = cdevsw_add(index, csw); if (index < 0) { - return (index); + return index; } if (cdev_set_bdev(index, bdev) < 0) { cdevsw_remove(index, csw); - return (-1); + return -1; } - return (index); + return index; } int diff --git a/bsd/kern/chunklist.h b/bsd/kern/chunklist.h index bae213357..b4fe59d01 100644 --- a/bsd/kern/chunklist.h +++ b/bsd/kern/chunklist.h @@ -47,7 +47,6 @@ struct chunklist_pubkey { }; const struct chunklist_pubkey chunklist_pubkeys[] = { - }; #define CHUNKLIST_NPUBKEYS (sizeof(chunklist_pubkeys)/sizeof(chunklist_pubkeys[0])) diff --git a/bsd/kern/decmpfs.c b/bsd/kern/decmpfs.c index 3e62edb97..84866b9e2 100644 --- a/bsd/kern/decmpfs.c +++ b/bsd/kern/decmpfs.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if !FS_COMPRESSION /* We need these symbols even though compression is turned off */ -#define UNUSED_SYMBOL(x) asm(".global _" #x "\n.set _" #x ", 0\n"); +#define UNUSED_SYMBOL(x) asm(".global _" #x "\n.set _" #x ", 0\n"); UNUSED_SYMBOL(register_decmpfs_decompressor) UNUSED_SYMBOL(unregister_decmpfs_decompressor) @@ -79,25 +79,27 @@ UNUSED_SYMBOL(decmpfs_validate_compressed_file) static const char * baseName(const char *path) { - if (!path) - return NULL; - const char *ret = path; - int i; - for (i = 0; path[i] != 0; i++) { - if (path[i] == '/') - ret = &path[i + 1]; - } - return ret; + if (!path) { + return NULL; + } + const char *ret = path; + int i; + for (i = 0; path[i] != 0; i++) { + if (path[i] == '/') { + ret = &path[i + 1]; + } + } + return ret; } static char* vnpath(vnode_t vp, char *path, int len) { - int origlen = len; - path[0] = 0; - vn_getpath(vp, path, &len); - path[origlen - 1] = 0; - return path; + int origlen = len; + path[0] = 0; + vn_getpath(vp, path, &len); + path[origlen - 1] = 0; + return path; } #define ErrorLog(x, args...) printf("%s:%d:%s: " x, baseName(__FILE__), __LINE__, __FUNCTION__, ## args) @@ -124,71 +126,71 @@ vnpath(vnode_t vp, char *path, int len) static SInt32 totalAlloc; typedef struct { - uint32_t allocSz; - uint32_t magic; - const char *file; - int line; + uint32_t allocSz; + uint32_t magic; + const char *file; + int line; } allocated; static void * _malloc(uint32_t sz, __unused int type, __unused int flags, const char *file, int line) { - uint32_t allocSz = sz + 2 * sizeof(allocated); - - allocated *alloc = NULL; - MALLOC(alloc, allocated *, allocSz, type, flags); - if (!alloc) { - ErrorLog("malloc failed\n"); - return NULL; - } - - char *ret = (char*)&alloc[1]; - allocated *alloc2 = (allocated*)(ret + sz); - - alloc->allocSz = allocSz; - alloc->magic = 0xdadadada; - alloc->file = file; - alloc->line = line; - - *alloc2 = *alloc; - - int s = OSAddAtomic(sz, &totalAlloc); - ErrorLog("malloc(%d) -> %p, total allocations %d\n", sz, ret, s + sz); - - return ret; + uint32_t allocSz = sz + 2 * sizeof(allocated); + + allocated *alloc = NULL; + MALLOC(alloc, allocated *, allocSz, type, flags); + if (!alloc) { + ErrorLog("malloc failed\n"); + return NULL; + } + + char *ret = (char*)&alloc[1]; + allocated *alloc2 = (allocated*)(ret + sz); + + alloc->allocSz = allocSz; + alloc->magic = 0xdadadada; + alloc->file = file; + alloc->line = line; + + *alloc2 = *alloc; + + int s = OSAddAtomic(sz, &totalAlloc); + ErrorLog("malloc(%d) -> %p, total allocations %d\n", sz, ret, s + sz); + + return ret; } static void _free(char *ret, __unused int type, const char *file, int line) { - if (!ret) { - ErrorLog("freeing null\n"); - return; - } - allocated *alloc = (allocated*)ret; - alloc--; - uint32_t sz = alloc->allocSz - 2 * sizeof(allocated); - allocated *alloc2 = (allocated*)(ret + sz); - - if (alloc->magic != 0xdadadada) { - panic("freeing bad pointer"); - } - - if (memcmp(alloc, alloc2, sizeof(*alloc)) != 0) { - panic("clobbered data"); - } - - memset(ret, 0xce, sz); - alloc2->file = file; - alloc2->line = line; - FREE(alloc, type); - int s = OSAddAtomic(-sz, &totalAlloc); - ErrorLog("free(%p,%d) -> total allocations %d\n", ret, sz, s - sz); + if (!ret) { + ErrorLog("freeing null\n"); + return; + } + allocated *alloc = (allocated*)ret; + alloc--; + uint32_t sz = alloc->allocSz - 2 * sizeof(allocated); + allocated *alloc2 = (allocated*)(ret + sz); + + if (alloc->magic != 0xdadadada) { + panic("freeing bad pointer"); + } + + if (memcmp(alloc, alloc2, sizeof(*alloc)) != 0) { + panic("clobbered data"); + } + + memset(ret, 0xce, sz); + alloc2->file = file; + alloc2->line = line; + FREE(alloc, type); + int s = OSAddAtomic(-sz, &totalAlloc); + ErrorLog("free(%p,%d) -> total allocations %d\n", ret, sz, s - sz); } #undef MALLOC #undef FREE -#define MALLOC(space, cast, size, type, flags) (space) = (cast)_malloc(size, type, flags, __FILE__, __LINE__) +#define MALLOC(space, cast, size, type, flags) (space) = (cast)_malloc(size, type, flags, __FILE__, __LINE__) #define FREE(addr, type) _free((void *)addr, type, __FILE__, __LINE__) #endif /* MALLOC_DEBUG */ @@ -211,26 +213,28 @@ vfs_context_t decmpfs_ctx; static void * _func_from_offset(uint32_t type, uintptr_t offset) { - /* get the function at the given offset in the registration for the given type */ - const decmpfs_registration *reg = decompressors[type]; - const char *regChar = (const char*)reg; - const char *func = ®Char[offset]; - void * const * funcPtr = (void * const *) func; - - switch (reg->decmpfs_registration) { - case DECMPFS_REGISTRATION_VERSION_V1: - if (offset > offsetof_func(free_data)) - return NULL; - break; - case DECMPFS_REGISTRATION_VERSION_V3: - if (offset > offsetof_func(get_flags)) - return NULL; - break; - default: - return NULL; - } - - return funcPtr[0]; + /* get the function at the given offset in the registration for the given type */ + const decmpfs_registration *reg = decompressors[type]; + const char *regChar = (const char*)reg; + const char *func = ®Char[offset]; + void * const * funcPtr = (void * const *) func; + + switch (reg->decmpfs_registration) { + case DECMPFS_REGISTRATION_VERSION_V1: + if (offset > offsetof_func(free_data)) { + return NULL; + } + break; + case DECMPFS_REGISTRATION_VERSION_V3: + if (offset > offsetof_func(get_flags)) { + return NULL; + } + break; + default: + return NULL; + } + + return funcPtr[0]; } extern void IOServicePublishResource( const char * property, boolean_t value ); @@ -241,52 +245,53 @@ static void * _decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset) { /* - this function should be called while holding a shared lock to decompressorsLock, - and will return with the lock held + * this function should be called while holding a shared lock to decompressorsLock, + * and will return with the lock held */ - - if (type >= CMP_MAX) + + if (type >= CMP_MAX) { return NULL; - + } + if (decompressors[type] != NULL) { // the compressor has already registered but the function might be null return _func_from_offset(type, offset); } - - // does IOKit know about a kext that is supposed to provide this type? - char providesName[80]; - snprintf(providesName, sizeof(providesName), "com.apple.AppleFSCompression.providesType%u", type); - if (IOCatalogueMatchingDriversPresent(providesName)) { - // there is a kext that says it will register for this type, so let's wait for it - char resourceName[80]; - uint64_t delay = 10000000ULL; // 10 milliseconds. - snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", type); - ErrorLogWithPath("waiting for %s\n", resourceName); - while(decompressors[type] == NULL) { - lck_rw_unlock_shared(decompressorsLock); // we have to unlock to allow the kext to register - if (IOServiceWaitForMatchingResource(resourceName, delay)) { - lck_rw_lock_shared(decompressorsLock); - break; - } - if (!IOCatalogueMatchingDriversPresent(providesName)) { - // - ErrorLogWithPath("the kext with %s is no longer present\n", providesName); - lck_rw_lock_shared(decompressorsLock); - break; - } - ErrorLogWithPath("still waiting for %s\n", resourceName); - delay *= 2; - lck_rw_lock_shared(decompressorsLock); - } - // IOKit says the kext is loaded, so it should be registered too! - if (decompressors[type] == NULL) { - ErrorLogWithPath("we found %s, but the type still isn't registered\n", providesName); - return NULL; - } - // it's now registered, so let's return the function - return _func_from_offset(type, offset); - } - + + // does IOKit know about a kext that is supposed to provide this type? + char providesName[80]; + snprintf(providesName, sizeof(providesName), "com.apple.AppleFSCompression.providesType%u", type); + if (IOCatalogueMatchingDriversPresent(providesName)) { + // there is a kext that says it will register for this type, so let's wait for it + char resourceName[80]; + uint64_t delay = 10000000ULL; // 10 milliseconds. + snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", type); + ErrorLogWithPath("waiting for %s\n", resourceName); + while (decompressors[type] == NULL) { + lck_rw_unlock_shared(decompressorsLock); // we have to unlock to allow the kext to register + if (IOServiceWaitForMatchingResource(resourceName, delay)) { + lck_rw_lock_shared(decompressorsLock); + break; + } + if (!IOCatalogueMatchingDriversPresent(providesName)) { + // + ErrorLogWithPath("the kext with %s is no longer present\n", providesName); + lck_rw_lock_shared(decompressorsLock); + break; + } + ErrorLogWithPath("still waiting for %s\n", resourceName); + delay *= 2; + lck_rw_lock_shared(decompressorsLock); + } + // IOKit says the kext is loaded, so it should be registered too! + if (decompressors[type] == NULL) { + ErrorLogWithPath("we found %s, but the type still isn't registered\n", providesName); + return NULL; + } + // it's now registered, so let's return the function + return _func_from_offset(type, offset); + } + // the compressor hasn't registered, so it never will unless someone manually kextloads it ErrorLogWithPath("tried to access a compressed file of unregistered type %d\n", type); return NULL; @@ -300,29 +305,31 @@ _decmp_get_func(vnode_t vp, uint32_t type, uintptr_t offset) static int vnsize(vnode_t vp, uint64_t *size) { - struct vnode_attr va; - VATTR_INIT(&va); - VATTR_WANTED(&va, va_data_size); + struct vnode_attr va; + VATTR_INIT(&va); + VATTR_WANTED(&va, va_data_size); int error = vnode_getattr(vp, &va, decmpfs_ctx); - if (error != 0) { - ErrorLogWithPath("vnode_getattr err %d\n", error); - return error; - } - *size = va.va_data_size; - return 0; + if (error != 0) { + ErrorLogWithPath("vnode_getattr err %d\n", error); + return error; + } + *size = va.va_data_size; + return 0; } #endif /* COMPRESSION_DEBUG */ #pragma mark --- cnode routines --- -decmpfs_cnode *decmpfs_cnode_alloc(void) +decmpfs_cnode * +decmpfs_cnode_alloc(void) { decmpfs_cnode *dp; MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK); return dp; } -void decmpfs_cnode_free(decmpfs_cnode *dp) +void +decmpfs_cnode_free(decmpfs_cnode *dp) { FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE); } @@ -330,7 +337,7 @@ void decmpfs_cnode_free(decmpfs_cnode *dp) void decmpfs_cnode_init(decmpfs_cnode *cp) { - memset(cp, 0, sizeof(*cp)); + memset(cp, 0, sizeof(*cp)); lck_rw_init(&cp->compressed_data_lock, decmpfs_lockgrp, NULL); } @@ -367,7 +374,7 @@ void decmpfs_lock_compressed_data(decmpfs_cnode *cp, int exclusive) { void *thread = current_thread(); - + if (cp->lockowner == thread) { /* this thread is already holding an exclusive lock, so bump the count */ cp->lockcount++; @@ -385,7 +392,7 @@ void decmpfs_unlock_compressed_data(decmpfs_cnode *cp, __unused int exclusive) { void *thread = current_thread(); - + if (cp->lockowner == thread) { /* this thread is holding an exclusive lock, so decrement the count */ if ((--cp->lockcount) > 0) { @@ -394,83 +401,96 @@ decmpfs_unlock_compressed_data(decmpfs_cnode *cp, __unused int exclusive) } cp->lockowner = NULL; } - + lck_rw_done(&cp->compressed_data_lock); } uint32_t decmpfs_cnode_get_vnode_state(decmpfs_cnode *cp) { - return cp->cmp_state; + return cp->cmp_state; } void decmpfs_cnode_set_vnode_state(decmpfs_cnode *cp, uint32_t state, int skiplock) { - if (!skiplock) decmpfs_lock_compressed_data(cp, 1); + if (!skiplock) { + decmpfs_lock_compressed_data(cp, 1); + } cp->cmp_state = state; - if (state == FILE_TYPE_UNKNOWN) { - /* clear out the compression type too */ - cp->cmp_type = 0; - } - if (!skiplock) decmpfs_unlock_compressed_data(cp, 1); + if (state == FILE_TYPE_UNKNOWN) { + /* clear out the compression type too */ + cp->cmp_type = 0; + } + if (!skiplock) { + decmpfs_unlock_compressed_data(cp, 1); + } } static void decmpfs_cnode_set_vnode_cmp_type(decmpfs_cnode *cp, uint32_t cmp_type, int skiplock) { - if (!skiplock) decmpfs_lock_compressed_data(cp, 1); - cp->cmp_type = cmp_type; - if (!skiplock) decmpfs_unlock_compressed_data(cp, 1); + if (!skiplock) { + decmpfs_lock_compressed_data(cp, 1); + } + cp->cmp_type = cmp_type; + if (!skiplock) { + decmpfs_unlock_compressed_data(cp, 1); + } } static void decmpfs_cnode_set_vnode_minimal_xattr(decmpfs_cnode *cp, int minimal_xattr, int skiplock) { - if (!skiplock) decmpfs_lock_compressed_data(cp, 1); - cp->cmp_minimal_xattr = minimal_xattr; - if (!skiplock) decmpfs_unlock_compressed_data(cp, 1); + if (!skiplock) { + decmpfs_lock_compressed_data(cp, 1); + } + cp->cmp_minimal_xattr = minimal_xattr; + if (!skiplock) { + decmpfs_unlock_compressed_data(cp, 1); + } } uint64_t decmpfs_cnode_get_vnode_cached_size(decmpfs_cnode *cp) { - return cp->uncompressed_size; + return cp->uncompressed_size; } static void decmpfs_cnode_set_vnode_cached_size(decmpfs_cnode *cp, uint64_t size) { - while(1) { - uint64_t old = cp->uncompressed_size; - if (OSCompareAndSwap64(old, size, (UInt64*)&cp->uncompressed_size)) { - return; - } else { - /* failed to write our value, so loop */ - } - } + while (1) { + uint64_t old = cp->uncompressed_size; + if (OSCompareAndSwap64(old, size, (UInt64*)&cp->uncompressed_size)) { + return; + } else { + /* failed to write our value, so loop */ + } + } } static uint64_t decmpfs_cnode_get_decompression_flags(decmpfs_cnode *cp) { - return cp->decompression_flags; + return cp->decompression_flags; } static void decmpfs_cnode_set_decompression_flags(decmpfs_cnode *cp, uint64_t flags) { - while(1) { - uint64_t old = cp->decompression_flags; - if (OSCompareAndSwap64(old, flags, (UInt64*)&cp->decompression_flags)) { - return; - } else { - /* failed to write our value, so loop */ - } - } + while (1) { + uint64_t old = cp->decompression_flags; + if (OSCompareAndSwap64(old, flags, (UInt64*)&cp->decompression_flags)) { + return; + } else { + /* failed to write our value, so loop */ + } + } } -uint32_t decmpfs_cnode_cmp_type(decmpfs_cnode *cp) +uint32_t +decmpfs_cnode_cmp_type(decmpfs_cnode *cp) { return cp->cmp_type; } @@ -480,421 +500,434 @@ uint32_t decmpfs_cnode_cmp_type(decmpfs_cnode *cp) static int decmpfs_fetch_compressed_header(vnode_t vp, decmpfs_cnode *cp, decmpfs_header **hdrOut, int returnInvalid) { - /* - fetches vp's compression xattr, converting it into a decmpfs_header; returns 0 or errno - if returnInvalid == 1, returns the header even if the type was invalid (out of range), - and return ERANGE in that case - */ - - size_t read_size = 0; - size_t attr_size = 0; - uio_t attr_uio = NULL; - int err = 0; - char *data = NULL; - const bool no_additional_data= ((cp != NULL) - && (cp->cmp_type != 0) - && (cp->cmp_minimal_xattr != 0)); - char uio_buf[ UIO_SIZEOF(1) ]; - decmpfs_header *hdr = NULL; - - /* - * Trace the following parameters on entry with event-id 0x03120004 - * - * @vp->v_id: vnode-id for which to fetch compressed header. - * @no_additional_data: If set true then xattr didn't have any extra data. - * @returnInvalid: return the header even though the type is out of range. - */ - DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, - no_additional_data, returnInvalid); - - if (no_additional_data) { - /* this file's xattr didn't have any extra data when we fetched it, so we can synthesize a header from the data in the cnode */ - - MALLOC(data, char *, sizeof(decmpfs_header), M_TEMP, M_WAITOK); - if (!data) { - err = ENOMEM; - goto out; - } - hdr = (decmpfs_header*)data; - hdr->attr_size = sizeof(decmpfs_disk_header); - hdr->compression_magic = DECMPFS_MAGIC; - hdr->compression_type = cp->cmp_type; - hdr->uncompressed_size = decmpfs_cnode_get_vnode_cached_size(cp); - } else { - /* figure out how big the xattr is on disk */ - err = vn_getxattr(vp, DECMPFS_XATTR_NAME, NULL, &attr_size, XATTR_NOSECURITY, decmpfs_ctx); - if (err != 0) - goto out; - - if (attr_size < sizeof(decmpfs_disk_header) || attr_size > MAX_DECMPFS_XATTR_SIZE) { - err = EINVAL; - goto out; - } - - /* allocation includes space for the extra attr_size field of a compressed_header */ - MALLOC(data, char *, attr_size + sizeof(hdr->attr_size), M_TEMP, M_WAITOK); - if (!data) { - err = ENOMEM; - goto out; - } - - /* read the xattr into our buffer, skipping over the attr_size field at the beginning */ - attr_uio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); - uio_addiov(attr_uio, CAST_USER_ADDR_T(data + sizeof(hdr->attr_size)), attr_size); - - err = vn_getxattr(vp, DECMPFS_XATTR_NAME, attr_uio, &read_size, XATTR_NOSECURITY, decmpfs_ctx); - if (err != 0) - goto out; - if (read_size != attr_size) { - err = EINVAL; - goto out; - } - hdr = (decmpfs_header*)data; - hdr->attr_size = attr_size; - /* swap the fields to native endian */ - hdr->compression_magic = OSSwapLittleToHostInt32(hdr->compression_magic); - hdr->compression_type = OSSwapLittleToHostInt32(hdr->compression_type); - hdr->uncompressed_size = OSSwapLittleToHostInt64(hdr->uncompressed_size); - } - - if (hdr->compression_magic != DECMPFS_MAGIC) { - ErrorLogWithPath("invalid compression_magic 0x%08x, should be 0x%08x\n", hdr->compression_magic, DECMPFS_MAGIC); - err = EINVAL; + /* + * fetches vp's compression xattr, converting it into a decmpfs_header; returns 0 or errno + * if returnInvalid == 1, returns the header even if the type was invalid (out of range), + * and return ERANGE in that case + */ + + size_t read_size = 0; + size_t attr_size = 0; + uio_t attr_uio = NULL; + int err = 0; + char *data = NULL; + const bool no_additional_data = ((cp != NULL) + && (cp->cmp_type != 0) + && (cp->cmp_minimal_xattr != 0)); + char uio_buf[UIO_SIZEOF(1)]; + decmpfs_header *hdr = NULL; + + /* + * Trace the following parameters on entry with event-id 0x03120004 + * + * @vp->v_id: vnode-id for which to fetch compressed header. + * @no_additional_data: If set true then xattr didn't have any extra data. + * @returnInvalid: return the header even though the type is out of range. + */ + DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, + no_additional_data, returnInvalid); + + if (no_additional_data) { + /* this file's xattr didn't have any extra data when we fetched it, so we can synthesize a header from the data in the cnode */ + + MALLOC(data, char *, sizeof(decmpfs_header), M_TEMP, M_WAITOK); + if (!data) { + err = ENOMEM; + goto out; + } + hdr = (decmpfs_header*)data; + hdr->attr_size = sizeof(decmpfs_disk_header); + hdr->compression_magic = DECMPFS_MAGIC; + hdr->compression_type = cp->cmp_type; + hdr->uncompressed_size = decmpfs_cnode_get_vnode_cached_size(cp); + } else { + /* figure out how big the xattr is on disk */ + err = vn_getxattr(vp, DECMPFS_XATTR_NAME, NULL, &attr_size, XATTR_NOSECURITY, decmpfs_ctx); + if (err != 0) { + goto out; + } + + if (attr_size < sizeof(decmpfs_disk_header) || attr_size > MAX_DECMPFS_XATTR_SIZE) { + err = EINVAL; + goto out; + } + + /* allocation includes space for the extra attr_size field of a compressed_header */ + MALLOC(data, char *, attr_size + sizeof(hdr->attr_size), M_TEMP, M_WAITOK); + if (!data) { + err = ENOMEM; + goto out; + } + + /* read the xattr into our buffer, skipping over the attr_size field at the beginning */ + attr_uio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); + uio_addiov(attr_uio, CAST_USER_ADDR_T(data + sizeof(hdr->attr_size)), attr_size); + + err = vn_getxattr(vp, DECMPFS_XATTR_NAME, attr_uio, &read_size, XATTR_NOSECURITY, decmpfs_ctx); + if (err != 0) { + goto out; + } + if (read_size != attr_size) { + err = EINVAL; + goto out; + } + hdr = (decmpfs_header*)data; + hdr->attr_size = attr_size; + /* swap the fields to native endian */ + hdr->compression_magic = OSSwapLittleToHostInt32(hdr->compression_magic); + hdr->compression_type = OSSwapLittleToHostInt32(hdr->compression_type); + hdr->uncompressed_size = OSSwapLittleToHostInt64(hdr->uncompressed_size); + } + + if (hdr->compression_magic != DECMPFS_MAGIC) { + ErrorLogWithPath("invalid compression_magic 0x%08x, should be 0x%08x\n", hdr->compression_magic, DECMPFS_MAGIC); + err = EINVAL; goto out; - } - - if (hdr->compression_type >= CMP_MAX) { - if (returnInvalid) { - /* return the header even though the type is out of range */ - err = ERANGE; - } else { - ErrorLogWithPath("compression_type %d out of range\n", hdr->compression_type); - err = EINVAL; - } + } + + if (hdr->compression_type >= CMP_MAX) { + if (returnInvalid) { + /* return the header even though the type is out of range */ + err = ERANGE; + } else { + ErrorLogWithPath("compression_type %d out of range\n", hdr->compression_type); + err = EINVAL; + } goto out; - } - + } + out: - if (err && (err != ERANGE)) { - DebugLogWithPath("err %d\n", err); - if (data) FREE(data, M_TEMP); - *hdrOut = NULL; - } else { - *hdrOut = hdr; - } - /* - * Trace the following parameters on return with event-id 0x03120004. - * - * @vp->v_id: vnode-id for which to fetch compressed header. - * @err: value returned from this function. - */ - DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, err); - return err; + if (err && (err != ERANGE)) { + DebugLogWithPath("err %d\n", err); + if (data) { + FREE(data, M_TEMP); + } + *hdrOut = NULL; + } else { + *hdrOut = hdr; + } + /* + * Trace the following parameters on return with event-id 0x03120004. + * + * @vp->v_id: vnode-id for which to fetch compressed header. + * @err: value returned from this function. + */ + DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_COMPRESSED_HEADER, vp->v_id, err); + return err; } static int decmpfs_fast_get_state(decmpfs_cnode *cp) { - /* - return the cached state - this should *only* be called when we know that decmpfs_file_is_compressed has already been called, - because this implies that the cached state is valid - */ - int cmp_state = decmpfs_cnode_get_vnode_state(cp); - - switch(cmp_state) { - case FILE_IS_NOT_COMPRESSED: - case FILE_IS_COMPRESSED: - case FILE_IS_CONVERTING: - return cmp_state; - case FILE_TYPE_UNKNOWN: - /* - we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, - which should not be possible - */ - ErrorLog("decmpfs_fast_get_state called on unknown file\n"); - return FILE_IS_NOT_COMPRESSED; - default: - /* */ - ErrorLog("unknown cmp_state %d\n", cmp_state); - return FILE_IS_NOT_COMPRESSED; - } + /* + * return the cached state + * this should *only* be called when we know that decmpfs_file_is_compressed has already been called, + * because this implies that the cached state is valid + */ + int cmp_state = decmpfs_cnode_get_vnode_state(cp); + + switch (cmp_state) { + case FILE_IS_NOT_COMPRESSED: + case FILE_IS_COMPRESSED: + case FILE_IS_CONVERTING: + return cmp_state; + case FILE_TYPE_UNKNOWN: + /* + * we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, + * which should not be possible + */ + ErrorLog("decmpfs_fast_get_state called on unknown file\n"); + return FILE_IS_NOT_COMPRESSED; + default: + /* */ + ErrorLog("unknown cmp_state %d\n", cmp_state); + return FILE_IS_NOT_COMPRESSED; + } } static int decmpfs_fast_file_is_compressed(decmpfs_cnode *cp) { - int cmp_state = decmpfs_cnode_get_vnode_state(cp); - - switch(cmp_state) { - case FILE_IS_NOT_COMPRESSED: - return 0; - case FILE_IS_COMPRESSED: - case FILE_IS_CONVERTING: - return 1; - case FILE_TYPE_UNKNOWN: - /* - we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, - which should not be possible - */ - ErrorLog("decmpfs_fast_get_state called on unknown file\n"); - return 0; - default: - /* */ - ErrorLog("unknown cmp_state %d\n", cmp_state); - return 0; - } + int cmp_state = decmpfs_cnode_get_vnode_state(cp); + + switch (cmp_state) { + case FILE_IS_NOT_COMPRESSED: + return 0; + case FILE_IS_COMPRESSED: + case FILE_IS_CONVERTING: + return 1; + case FILE_TYPE_UNKNOWN: + /* + * we should only get here if decmpfs_file_is_compressed was not called earlier on this vnode, + * which should not be possible + */ + ErrorLog("decmpfs_fast_get_state called on unknown file\n"); + return 0; + default: + /* */ + ErrorLog("unknown cmp_state %d\n", cmp_state); + return 0; + } } errno_t decmpfs_validate_compressed_file(vnode_t vp, decmpfs_cnode *cp) { - /* give a compressor a chance to indicate that a compressed file is invalid */ - - decmpfs_header *hdr = NULL; - errno_t err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); - if (err) { - /* we couldn't get the header */ - if (decmpfs_fast_get_state(cp) == FILE_IS_NOT_COMPRESSED) { - /* the file is no longer compressed, so return success */ - err = 0; - } - goto out; - } - - lck_rw_lock_shared(decompressorsLock); - decmpfs_validate_compressed_file_func validate = decmp_get_func(vp, hdr->compression_type, validate); - if (validate) { /* make sure this validation function is valid */ - /* is the data okay? */ + /* give a compressor a chance to indicate that a compressed file is invalid */ + + decmpfs_header *hdr = NULL; + errno_t err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); + if (err) { + /* we couldn't get the header */ + if (decmpfs_fast_get_state(cp) == FILE_IS_NOT_COMPRESSED) { + /* the file is no longer compressed, so return success */ + err = 0; + } + goto out; + } + + lck_rw_lock_shared(decompressorsLock); + decmpfs_validate_compressed_file_func validate = decmp_get_func(vp, hdr->compression_type, validate); + if (validate) { /* make sure this validation function is valid */ + /* is the data okay? */ err = validate(vp, decmpfs_ctx, hdr); - } else if (decmp_get_func(vp, hdr->compression_type, fetch) == NULL) { - /* the type isn't registered */ - err = EIO; - } else { - /* no validate registered, so nothing to do */ - err = 0; - } - lck_rw_unlock_shared(decompressorsLock); + } else if (decmp_get_func(vp, hdr->compression_type, fetch) == NULL) { + /* the type isn't registered */ + err = EIO; + } else { + /* no validate registered, so nothing to do */ + err = 0; + } + lck_rw_unlock_shared(decompressorsLock); out: - if (hdr) FREE(hdr, M_TEMP); + if (hdr) { + FREE(hdr, M_TEMP); + } #if COMPRESSION_DEBUG - if (err) { - DebugLogWithPath("decmpfs_validate_compressed_file ret %d, vp->v_flag %d\n", err, vp->v_flag); - } + if (err) { + DebugLogWithPath("decmpfs_validate_compressed_file ret %d, vp->v_flag %d\n", err, vp->v_flag); + } #endif - return err; + return err; } int decmpfs_file_is_compressed(vnode_t vp, decmpfs_cnode *cp) { - /* - determines whether vp points to a compressed file - - to speed up this operation, we cache the result in the cnode, and do as little as possible - in the case where the cnode already has a valid cached state - - */ - - int ret = 0; - int error = 0; - uint32_t cmp_state; - struct vnode_attr va_fetch; - decmpfs_header *hdr = NULL; - mount_t mp = NULL; - int cnode_locked = 0; - int saveInvalid = 0; // save the header data even though the type was out of range - uint64_t decompression_flags = 0; - bool is_mounted, is_local_fs; - - if (vnode_isnamedstream(vp)) { - /* - named streams can't be compressed - since named streams of the same file share the same cnode, - we don't want to get/set the state in the cnode, just return 0 - */ - return 0; - } - - /* examine the cached a state in this cnode */ - cmp_state = decmpfs_cnode_get_vnode_state(cp); - switch(cmp_state) { - case FILE_IS_NOT_COMPRESSED: - return 0; - case FILE_IS_COMPRESSED: - return 1; - case FILE_IS_CONVERTING: - /* treat the file as compressed, because this gives us a way to block future reads until decompression is done */ - return 1; - case FILE_TYPE_UNKNOWN: - /* the first time we encountered this vnode, so we need to check it out */ - break; - default: - /* unknown state, assume file is not compressed */ - ErrorLogWithPath("unknown cmp_state %d\n", cmp_state); - return 0; - } - - if (!vnode_isreg(vp)) { - /* only regular files can be compressed */ - ret = FILE_IS_NOT_COMPRESSED; - goto done; - } - - is_mounted = false; - is_local_fs = false; - mp = vnode_mount(vp); - if (mp) - is_mounted = true; - if (is_mounted) - is_local_fs = ((mp->mnt_flag & MNT_LOCAL)); - /* - * Trace the following parameters on entry with event-id 0x03120014. - * - * @vp->v_id: vnode-id of the file being queried. - * @is_mounted: set to true if @vp belongs to a mounted fs. - * @is_local_fs: set to true if @vp belongs to local fs. - */ - DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, - is_mounted, is_local_fs); - - if (!is_mounted) { - /* - this should only be true before we mount the root filesystem - we short-cut this return to avoid the call to getattr below, which - will fail before root is mounted - */ - ret = FILE_IS_NOT_COMPRESSED; - goto done; - } - - if (!is_local_fs) { - /* compression only supported on local filesystems */ - ret = FILE_IS_NOT_COMPRESSED; - goto done; - } - + /* + * determines whether vp points to a compressed file + * + * to speed up this operation, we cache the result in the cnode, and do as little as possible + * in the case where the cnode already has a valid cached state + * + */ + + int ret = 0; + int error = 0; + uint32_t cmp_state; + struct vnode_attr va_fetch; + decmpfs_header *hdr = NULL; + mount_t mp = NULL; + int cnode_locked = 0; + int saveInvalid = 0; // save the header data even though the type was out of range + uint64_t decompression_flags = 0; + bool is_mounted, is_local_fs; + + if (vnode_isnamedstream(vp)) { + /* + * named streams can't be compressed + * since named streams of the same file share the same cnode, + * we don't want to get/set the state in the cnode, just return 0 + */ + return 0; + } + + /* examine the cached a state in this cnode */ + cmp_state = decmpfs_cnode_get_vnode_state(cp); + switch (cmp_state) { + case FILE_IS_NOT_COMPRESSED: + return 0; + case FILE_IS_COMPRESSED: + return 1; + case FILE_IS_CONVERTING: + /* treat the file as compressed, because this gives us a way to block future reads until decompression is done */ + return 1; + case FILE_TYPE_UNKNOWN: + /* the first time we encountered this vnode, so we need to check it out */ + break; + default: + /* unknown state, assume file is not compressed */ + ErrorLogWithPath("unknown cmp_state %d\n", cmp_state); + return 0; + } + + if (!vnode_isreg(vp)) { + /* only regular files can be compressed */ + ret = FILE_IS_NOT_COMPRESSED; + goto done; + } + + is_mounted = false; + is_local_fs = false; + mp = vnode_mount(vp); + if (mp) { + is_mounted = true; + } + if (is_mounted) { + is_local_fs = ((mp->mnt_flag & MNT_LOCAL)); + } + /* + * Trace the following parameters on entry with event-id 0x03120014. + * + * @vp->v_id: vnode-id of the file being queried. + * @is_mounted: set to true if @vp belongs to a mounted fs. + * @is_local_fs: set to true if @vp belongs to local fs. + */ + DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, + is_mounted, is_local_fs); + + if (!is_mounted) { + /* + * this should only be true before we mount the root filesystem + * we short-cut this return to avoid the call to getattr below, which + * will fail before root is mounted + */ + ret = FILE_IS_NOT_COMPRESSED; + goto done; + } + + if (!is_local_fs) { + /* compression only supported on local filesystems */ + ret = FILE_IS_NOT_COMPRESSED; + goto done; + } + /* lock our cnode data so that another caller doesn't change the state under us */ decmpfs_lock_compressed_data(cp, 1); cnode_locked = 1; - + VATTR_INIT(&va_fetch); VATTR_WANTED(&va_fetch, va_flags); error = vnode_getattr(vp, &va_fetch, decmpfs_ctx); if (error) { - /* failed to get the bsd flags so the file is not compressed */ - ret = FILE_IS_NOT_COMPRESSED; - goto done; - } + /* failed to get the bsd flags so the file is not compressed */ + ret = FILE_IS_NOT_COMPRESSED; + goto done; + } if (va_fetch.va_flags & UF_COMPRESSED) { /* UF_COMPRESSED is on, make sure the file has the DECMPFS_XATTR_NAME xattr */ - error = decmpfs_fetch_compressed_header(vp, cp, &hdr, 1); - if ((hdr != NULL) && (error == ERANGE)) { - saveInvalid = 1; - } - if (error) { - /* failed to get the xattr so the file is not compressed */ - ret = FILE_IS_NOT_COMPRESSED; - goto done; - } - /* we got the xattr, so the file is compressed */ - ret = FILE_IS_COMPRESSED; - goto done; - } - /* UF_COMPRESSED isn't on, so the file isn't compressed */ - ret = FILE_IS_NOT_COMPRESSED; - + error = decmpfs_fetch_compressed_header(vp, cp, &hdr, 1); + if ((hdr != NULL) && (error == ERANGE)) { + saveInvalid = 1; + } + if (error) { + /* failed to get the xattr so the file is not compressed */ + ret = FILE_IS_NOT_COMPRESSED; + goto done; + } + /* we got the xattr, so the file is compressed */ + ret = FILE_IS_COMPRESSED; + goto done; + } + /* UF_COMPRESSED isn't on, so the file isn't compressed */ + ret = FILE_IS_NOT_COMPRESSED; + done: - if (((ret == FILE_IS_COMPRESSED) || saveInvalid) && hdr) { + if (((ret == FILE_IS_COMPRESSED) || saveInvalid) && hdr) { /* - cache the uncompressed size away in the cnode + * cache the uncompressed size away in the cnode */ - + if (!cnode_locked) { /* - we should never get here since the only place ret is set to FILE_IS_COMPRESSED - is after the call to decmpfs_lock_compressed_data above + * we should never get here since the only place ret is set to FILE_IS_COMPRESSED + * is after the call to decmpfs_lock_compressed_data above */ decmpfs_lock_compressed_data(cp, 1); cnode_locked = 1; } - - decmpfs_cnode_set_vnode_cached_size(cp, hdr->uncompressed_size); + + decmpfs_cnode_set_vnode_cached_size(cp, hdr->uncompressed_size); decmpfs_cnode_set_vnode_state(cp, ret, 1); - decmpfs_cnode_set_vnode_cmp_type(cp, hdr->compression_type, 1); - /* remember if the xattr's size was equal to the minimal xattr */ - if (hdr->attr_size == sizeof(decmpfs_disk_header)) { - decmpfs_cnode_set_vnode_minimal_xattr(cp, 1, 1); - } - if (ret == FILE_IS_COMPRESSED) { - /* update the ubc's size for this file */ - ubc_setsize(vp, hdr->uncompressed_size); - - /* update the decompression flags in the decmpfs cnode */ - lck_rw_lock_shared(decompressorsLock); - decmpfs_get_decompression_flags_func get_flags = decmp_get_func(vp, hdr->compression_type, get_flags); - if (get_flags) { - decompression_flags = get_flags(vp, decmpfs_ctx, hdr); - } - lck_rw_unlock_shared(decompressorsLock); - decmpfs_cnode_set_decompression_flags(cp, decompression_flags); - } + decmpfs_cnode_set_vnode_cmp_type(cp, hdr->compression_type, 1); + /* remember if the xattr's size was equal to the minimal xattr */ + if (hdr->attr_size == sizeof(decmpfs_disk_header)) { + decmpfs_cnode_set_vnode_minimal_xattr(cp, 1, 1); + } + if (ret == FILE_IS_COMPRESSED) { + /* update the ubc's size for this file */ + ubc_setsize(vp, hdr->uncompressed_size); + + /* update the decompression flags in the decmpfs cnode */ + lck_rw_lock_shared(decompressorsLock); + decmpfs_get_decompression_flags_func get_flags = decmp_get_func(vp, hdr->compression_type, get_flags); + if (get_flags) { + decompression_flags = get_flags(vp, decmpfs_ctx, hdr); + } + lck_rw_unlock_shared(decompressorsLock); + decmpfs_cnode_set_decompression_flags(cp, decompression_flags); + } } else { /* we might have already taken the lock above; if so, skip taking it again by passing cnode_locked as the skiplock parameter */ decmpfs_cnode_set_vnode_state(cp, ret, cnode_locked); } - - if (cnode_locked) decmpfs_unlock_compressed_data(cp, 1); - - if (hdr) FREE(hdr, M_TEMP); - /* - * Trace the following parameters on return with event-id 0x03120014. - * - * @vp->v_id: vnode-id of the file being queried. - * @return: set to 1 is file is compressed. - */ - switch(ret) { - case FILE_IS_NOT_COMPRESSED: - DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); - return 0; - case FILE_IS_COMPRESSED: - case FILE_IS_CONVERTING: - DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 1); - return 1; - default: - /* unknown state, assume file is not compressed */ - DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); - ErrorLogWithPath("unknown ret %d\n", ret); - return 0; - } + + if (cnode_locked) { + decmpfs_unlock_compressed_data(cp, 1); + } + + if (hdr) { + FREE(hdr, M_TEMP); + } + /* + * Trace the following parameters on return with event-id 0x03120014. + * + * @vp->v_id: vnode-id of the file being queried. + * @return: set to 1 is file is compressed. + */ + switch (ret) { + case FILE_IS_NOT_COMPRESSED: + DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); + return 0; + case FILE_IS_COMPRESSED: + case FILE_IS_CONVERTING: + DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 1); + return 1; + default: + /* unknown state, assume file is not compressed */ + DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FILE_IS_COMPRESSED, vp->v_id, 0); + ErrorLogWithPath("unknown ret %d\n", ret); + return 0; + } } int decmpfs_update_attributes(vnode_t vp, struct vnode_attr *vap) { - int error = 0; - - if (VATTR_IS_ACTIVE(vap, va_flags)) { - /* the BSD flags are being updated */ - if (vap->va_flags & UF_COMPRESSED) { - /* the compressed bit is being set, did it change? */ - struct vnode_attr va_fetch; - int old_flags = 0; - VATTR_INIT(&va_fetch); - VATTR_WANTED(&va_fetch, va_flags); + int error = 0; + + if (VATTR_IS_ACTIVE(vap, va_flags)) { + /* the BSD flags are being updated */ + if (vap->va_flags & UF_COMPRESSED) { + /* the compressed bit is being set, did it change? */ + struct vnode_attr va_fetch; + int old_flags = 0; + VATTR_INIT(&va_fetch); + VATTR_WANTED(&va_fetch, va_flags); error = vnode_getattr(vp, &va_fetch, decmpfs_ctx); - if (error) - return error; - - old_flags = va_fetch.va_flags; - - if (!(old_flags & UF_COMPRESSED)) { - /* - * Compression bit was turned on, make sure the file has the DECMPFS_XATTR_NAME attribute. - * This precludes anyone from using the UF_COMPRESSED bit for anything else, and it enforces - * an order of operation -- you must first do the setxattr and then the chflags. - */ - + if (error) { + return error; + } + + old_flags = va_fetch.va_flags; + + if (!(old_flags & UF_COMPRESSED)) { + /* + * Compression bit was turned on, make sure the file has the DECMPFS_XATTR_NAME attribute. + * This precludes anyone from using the UF_COMPRESSED bit for anything else, and it enforces + * an order of operation -- you must first do the setxattr and then the chflags. + */ + if (VATTR_IS_ACTIVE(vap, va_data_size)) { /* * don't allow the caller to set the BSD flag and the size in the same call @@ -903,44 +936,46 @@ decmpfs_update_attributes(vnode_t vp, struct vnode_attr *vap) vap->va_flags &= ~UF_COMPRESSED; return 0; } - - decmpfs_header *hdr = NULL; - error = decmpfs_fetch_compressed_header(vp, NULL, &hdr, 1); - if (error == 0) { - /* - allow the flag to be set since the decmpfs attribute is present - in that case, we also want to truncate the data fork of the file - */ - VATTR_SET_ACTIVE(vap, va_data_size); - vap->va_data_size = 0; - } else if (error == ERANGE) { - /* the file had a decmpfs attribute but the type was out of range, so don't muck with the file's data size */ - } else { - /* no DECMPFS_XATTR_NAME attribute, so deny the update */ + + decmpfs_header *hdr = NULL; + error = decmpfs_fetch_compressed_header(vp, NULL, &hdr, 1); + if (error == 0) { + /* + * allow the flag to be set since the decmpfs attribute is present + * in that case, we also want to truncate the data fork of the file + */ + VATTR_SET_ACTIVE(vap, va_data_size); + vap->va_data_size = 0; + } else if (error == ERANGE) { + /* the file had a decmpfs attribute but the type was out of range, so don't muck with the file's data size */ + } else { + /* no DECMPFS_XATTR_NAME attribute, so deny the update */ vap->va_flags &= ~UF_COMPRESSED; - } - if (hdr) FREE(hdr, M_TEMP); - } - } - } - - return 0; + } + if (hdr) { + FREE(hdr, M_TEMP); + } + } + } + } + + return 0; } static int wait_for_decompress(decmpfs_cnode *cp) { - int state; - lck_mtx_lock(decompress_channel_mtx); - do { - state = decmpfs_fast_get_state(cp); - if (state != FILE_IS_CONVERTING) { - /* file is not decompressing */ - lck_mtx_unlock(decompress_channel_mtx); - return state; - } - msleep((caddr_t)&decompress_channel, decompress_channel_mtx, PINOD, "wait_for_decompress", NULL); - } while(1); + int state; + lck_mtx_lock(decompress_channel_mtx); + do { + state = decmpfs_fast_get_state(cp); + if (state != FILE_IS_CONVERTING) { + /* file is not decompressing */ + lck_mtx_unlock(decompress_channel_mtx); + return state; + } + msleep((caddr_t)&decompress_channel, decompress_channel_mtx, PINOD, "wait_for_decompress", NULL); + } while (1); } #pragma mark --- decmpfs hide query routines --- @@ -949,19 +984,21 @@ int decmpfs_hides_rsrc(vfs_context_t ctx, decmpfs_cnode *cp) { /* - WARNING!!! - callers may (and do) pass NULL for ctx, so we should only use it - for this equality comparison - - This routine should only be called after a file has already been through decmpfs_file_is_compressed + * WARNING!!! + * callers may (and do) pass NULL for ctx, so we should only use it + * for this equality comparison + * + * This routine should only be called after a file has already been through decmpfs_file_is_compressed */ - - if (ctx == decmpfs_ctx) + + if (ctx == decmpfs_ctx) { return 0; - - if (!decmpfs_fast_file_is_compressed(cp)) + } + + if (!decmpfs_fast_file_is_compressed(cp)) { return 0; - + } + /* all compressed files hide their resource fork */ return 1; } @@ -970,106 +1007,115 @@ int decmpfs_hides_xattr(vfs_context_t ctx, decmpfs_cnode *cp, const char *xattr) { /* - WARNING!!! - callers may (and do) pass NULL for ctx, so we should only use it - for this equality comparison - - This routine should only be called after a file has already been through decmpfs_file_is_compressed + * WARNING!!! + * callers may (and do) pass NULL for ctx, so we should only use it + * for this equality comparison + * + * This routine should only be called after a file has already been through decmpfs_file_is_compressed */ - - if (ctx == decmpfs_ctx) + + if (ctx == decmpfs_ctx) { return 0; - if (strncmp(xattr, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME) - 1) == 0) + } + if (strncmp(xattr, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME) - 1) == 0) { return decmpfs_hides_rsrc(ctx, cp); - if (!decmpfs_fast_file_is_compressed(cp)) - /* file is not compressed, so don't hide this xattr */ + } + if (!decmpfs_fast_file_is_compressed(cp)) { + /* file is not compressed, so don't hide this xattr */ return 0; - if (strncmp(xattr, DECMPFS_XATTR_NAME, sizeof(DECMPFS_XATTR_NAME) - 1) == 0) - /* it's our xattr, so hide it */ + } + if (strncmp(xattr, DECMPFS_XATTR_NAME, sizeof(DECMPFS_XATTR_NAME) - 1) == 0) { + /* it's our xattr, so hide it */ return 1; + } /* don't hide this xattr */ return 0; } #pragma mark --- registration/validation routines --- -static inline int registration_valid(const decmpfs_registration *registration) +static inline int +registration_valid(const decmpfs_registration *registration) { - return registration && ((registration->decmpfs_registration == DECMPFS_REGISTRATION_VERSION_V1) || (registration->decmpfs_registration == DECMPFS_REGISTRATION_VERSION_V3)); + return registration && ((registration->decmpfs_registration == DECMPFS_REGISTRATION_VERSION_V1) || (registration->decmpfs_registration == DECMPFS_REGISTRATION_VERSION_V3)); } errno_t register_decmpfs_decompressor(uint32_t compression_type, const decmpfs_registration *registration) { - /* called by kexts to register decompressors */ - - errno_t ret = 0; - int locked = 0; - char resourceName[80]; - - if ((compression_type >= CMP_MAX) || !registration_valid(registration)) { - ret = EINVAL; - goto out; - } - - lck_rw_lock_exclusive(decompressorsLock); locked = 1; - - /* make sure the registration for this type is zero */ + /* called by kexts to register decompressors */ + + errno_t ret = 0; + int locked = 0; + char resourceName[80]; + + if ((compression_type >= CMP_MAX) || !registration_valid(registration)) { + ret = EINVAL; + goto out; + } + + lck_rw_lock_exclusive(decompressorsLock); locked = 1; + + /* make sure the registration for this type is zero */ if (decompressors[compression_type] != NULL) { ret = EEXIST; goto out; } - decompressors[compression_type] = registration; - snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type); - IOServicePublishResource(resourceName, TRUE); - + decompressors[compression_type] = registration; + snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type); + IOServicePublishResource(resourceName, TRUE); + out: - if (locked) lck_rw_unlock_exclusive(decompressorsLock); - return ret; + if (locked) { + lck_rw_unlock_exclusive(decompressorsLock); + } + return ret; } errno_t unregister_decmpfs_decompressor(uint32_t compression_type, decmpfs_registration *registration) { - /* called by kexts to unregister decompressors */ - - errno_t ret = 0; - int locked = 0; - char resourceName[80]; - - if ((compression_type >= CMP_MAX) || !registration_valid(registration)) { - ret = EINVAL; - goto out; - } - - lck_rw_lock_exclusive(decompressorsLock); locked = 1; - if (decompressors[compression_type] != registration) { - ret = EEXIST; - goto out; - } - decompressors[compression_type] = NULL; - snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type); - IOServicePublishResource(resourceName, FALSE); - + /* called by kexts to unregister decompressors */ + + errno_t ret = 0; + int locked = 0; + char resourceName[80]; + + if ((compression_type >= CMP_MAX) || !registration_valid(registration)) { + ret = EINVAL; + goto out; + } + + lck_rw_lock_exclusive(decompressorsLock); locked = 1; + if (decompressors[compression_type] != registration) { + ret = EEXIST; + goto out; + } + decompressors[compression_type] = NULL; + snprintf(resourceName, sizeof(resourceName), "com.apple.AppleFSCompression.Type%u", compression_type); + IOServicePublishResource(resourceName, FALSE); + out: - if (locked) lck_rw_unlock_exclusive(decompressorsLock); - return ret; + if (locked) { + lck_rw_unlock_exclusive(decompressorsLock); + } + return ret; } static int compression_type_valid(vnode_t vp, decmpfs_header *hdr) { - /* fast pre-check to determine if the given compressor has checked in */ - int ret = 0; - - /* every compressor must have at least a fetch function */ - lck_rw_lock_shared(decompressorsLock); - if (decmp_get_func(vp, hdr->compression_type, fetch) != NULL) { - ret = 1; - } - lck_rw_unlock_shared(decompressorsLock); - - return ret; + /* fast pre-check to determine if the given compressor has checked in */ + int ret = 0; + + /* every compressor must have at least a fetch function */ + lck_rw_lock_shared(decompressorsLock); + if (decmp_get_func(vp, hdr->compression_type, fetch) != NULL) { + ret = 1; + } + lck_rw_unlock_shared(decompressorsLock); + + return ret; } #pragma mark --- compression/decompression routines --- @@ -1077,552 +1123,567 @@ compression_type_valid(vnode_t vp, decmpfs_header *hdr) static int decmpfs_fetch_uncompressed_data(vnode_t vp, decmpfs_cnode *cp, decmpfs_header *hdr, off_t offset, user_ssize_t size, int nvec, decmpfs_vector *vec, uint64_t *bytes_read) { - /* get the uncompressed bytes for the specified region of vp by calling out to the registered compressor */ - - int err = 0; - - *bytes_read = 0; - - if ((uint64_t)offset >= hdr->uncompressed_size) { - /* reading past end of file; nothing to do */ - err = 0; - goto out; - } - if (offset < 0) { - /* tried to read from before start of file */ - err = EINVAL; - goto out; - } - if ((uint64_t)(offset + size) > hdr->uncompressed_size) { - /* adjust size so we don't read past the end of the file */ + /* get the uncompressed bytes for the specified region of vp by calling out to the registered compressor */ + + int err = 0; + + *bytes_read = 0; + + if ((uint64_t)offset >= hdr->uncompressed_size) { + /* reading past end of file; nothing to do */ + err = 0; + goto out; + } + if (offset < 0) { + /* tried to read from before start of file */ + err = EINVAL; + goto out; + } + if ((uint64_t)(offset + size) > hdr->uncompressed_size) { + /* adjust size so we don't read past the end of the file */ size = hdr->uncompressed_size - offset; } - if (size == 0) { - /* nothing to read */ - err = 0; - goto out; - } - - /* - * Trace the following parameters on entry with event-id 0x03120008. - * - * @vp->v_id: vnode-id of the file being decompressed. - * @hdr->compression_type: compression type. - * @offset: offset from where to fetch uncompressed data. - * @size: amount of uncompressed data to fetch. - * - * Please NOTE: @offset and @size can overflow in theory but - * here it is safe. - */ - DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, - hdr->compression_type, (int)offset, (int)size); - lck_rw_lock_shared(decompressorsLock); - decmpfs_fetch_uncompressed_data_func fetch = decmp_get_func(vp, hdr->compression_type, fetch); - if (fetch) { + if (size == 0) { + /* nothing to read */ + err = 0; + goto out; + } + + /* + * Trace the following parameters on entry with event-id 0x03120008. + * + * @vp->v_id: vnode-id of the file being decompressed. + * @hdr->compression_type: compression type. + * @offset: offset from where to fetch uncompressed data. + * @size: amount of uncompressed data to fetch. + * + * Please NOTE: @offset and @size can overflow in theory but + * here it is safe. + */ + DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, + hdr->compression_type, (int)offset, (int)size); + lck_rw_lock_shared(decompressorsLock); + decmpfs_fetch_uncompressed_data_func fetch = decmp_get_func(vp, hdr->compression_type, fetch); + if (fetch) { err = fetch(vp, decmpfs_ctx, hdr, offset, size, nvec, vec, bytes_read); lck_rw_unlock_shared(decompressorsLock); - if (err == 0) { - uint64_t decompression_flags = decmpfs_cnode_get_decompression_flags(cp); - if (decompression_flags & DECMPFS_FLAGS_FORCE_FLUSH_ON_DECOMPRESS) { -#if !defined(__i386__) && !defined(__x86_64__) - int i; - for (i = 0; i < nvec; i++) { - flush_dcache64((addr64_t)(uintptr_t)vec[i].buf, vec[i].size, FALSE); - } + if (err == 0) { + uint64_t decompression_flags = decmpfs_cnode_get_decompression_flags(cp); + if (decompression_flags & DECMPFS_FLAGS_FORCE_FLUSH_ON_DECOMPRESS) { +#if !defined(__i386__) && !defined(__x86_64__) + int i; + for (i = 0; i < nvec; i++) { + flush_dcache64((addr64_t)(uintptr_t)vec[i].buf, vec[i].size, FALSE); + } #endif - } - } - } else { - err = ENOTSUP; - lck_rw_unlock_shared(decompressorsLock); - } - /* - * Trace the following parameters on return with event-id 0x03120008. - * - * @vp->v_id: vnode-id of the file being decompressed. - * @bytes_read: amount of uncompressed bytes fetched in bytes. - * @err: value returned from this function. - * - * Please NOTE: @bytes_read can overflow in theory but here it is safe. - */ - DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, - (int)*bytes_read, err); + } + } + } else { + err = ENOTSUP; + lck_rw_unlock_shared(decompressorsLock); + } + /* + * Trace the following parameters on return with event-id 0x03120008. + * + * @vp->v_id: vnode-id of the file being decompressed. + * @bytes_read: amount of uncompressed bytes fetched in bytes. + * @err: value returned from this function. + * + * Please NOTE: @bytes_read can overflow in theory but here it is safe. + */ + DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FETCH_UNCOMPRESSED_DATA, vp->v_id, + (int)*bytes_read, err); out: - return err; + return err; } static kern_return_t commit_upl(upl_t upl, upl_offset_t pl_offset, size_t uplSize, int flags, int abort) { - kern_return_t kr = 0; + kern_return_t kr = 0; #if CONFIG_IOSCHED - upl_unmark_decmp(upl); + upl_unmark_decmp(upl); #endif /* CONFIG_IOSCHED */ - - /* commit the upl pages */ - if (abort) { - VerboseLog("aborting upl, flags 0x%08x\n", flags); + + /* commit the upl pages */ + if (abort) { + VerboseLog("aborting upl, flags 0x%08x\n", flags); kr = ubc_upl_abort_range(upl, pl_offset, uplSize, flags); - if (kr != KERN_SUCCESS) - ErrorLog("ubc_upl_abort_range error %d\n", (int)kr); - } else { - VerboseLog("committing upl, flags 0x%08x\n", flags | UPL_COMMIT_CLEAR_DIRTY); + if (kr != KERN_SUCCESS) { + ErrorLog("ubc_upl_abort_range error %d\n", (int)kr); + } + } else { + VerboseLog("committing upl, flags 0x%08x\n", flags | UPL_COMMIT_CLEAR_DIRTY); kr = ubc_upl_commit_range(upl, pl_offset, uplSize, flags | UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_WRITTEN_BY_KERNEL); - if (kr != KERN_SUCCESS) - ErrorLog("ubc_upl_commit_range error %d\n", (int)kr); - } - return kr; + if (kr != KERN_SUCCESS) { + ErrorLog("ubc_upl_commit_range error %d\n", (int)kr); + } + } + return kr; } errno_t decmpfs_pagein_compressed(struct vnop_pagein_args *ap, int *is_compressed, decmpfs_cnode *cp) { - /* handles a page-in request from vfs for a compressed file */ - - int err = 0; - vnode_t vp = ap->a_vp; - upl_t pl = ap->a_pl; + /* handles a page-in request from vfs for a compressed file */ + + int err = 0; + vnode_t vp = ap->a_vp; + upl_t pl = ap->a_pl; upl_offset_t pl_offset = ap->a_pl_offset; - off_t f_offset = ap->a_f_offset; - size_t size = ap->a_size; + off_t f_offset = ap->a_f_offset; + size_t size = ap->a_size; int flags = ap->a_flags; - off_t uplPos = 0; - user_ssize_t uplSize = 0; + off_t uplPos = 0; + user_ssize_t uplSize = 0; void *data = NULL; - decmpfs_header *hdr = NULL; - uint64_t cachedSize = 0; + decmpfs_header *hdr = NULL; + uint64_t cachedSize = 0; int cmpdata_locked = 0; - - if(!decmpfs_trylock_compressed_data(cp, 0)) { - return EAGAIN; - } - cmpdata_locked = 1; - - + + if (!decmpfs_trylock_compressed_data(cp, 0)) { + return EAGAIN; + } + cmpdata_locked = 1; + + if (flags & ~(UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD)) { DebugLogWithPath("pagein: unknown flags 0x%08x\n", (flags & ~(UPL_IOSYNC | UPL_NOCOMMIT | UPL_NORDAHEAD))); } - - err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); - if (err != 0) { - goto out; - } - - cachedSize = hdr->uncompressed_size; - - if (!compression_type_valid(vp, hdr)) { - /* compressor not registered */ - err = ENOTSUP; - goto out; - } + + err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); + if (err != 0) { + goto out; + } + + cachedSize = hdr->uncompressed_size; + + if (!compression_type_valid(vp, hdr)) { + /* compressor not registered */ + err = ENOTSUP; + goto out; + } #if CONFIG_IOSCHED /* Mark the UPL as the requesting UPL for decompression */ upl_mark_decmp(pl); #endif /* CONFIG_IOSCHED */ - /* map the upl so we can fetch into it */ + /* map the upl so we can fetch into it */ kern_return_t kr = ubc_upl_map(pl, (vm_offset_t*)&data); if ((kr != KERN_SUCCESS) || (data == NULL)) { err = ENOSPC; data = NULL; #if CONFIG_IOSCHED upl_unmark_decmp(pl); -#endif /* CONFIG_IOSCHED */ +#endif /* CONFIG_IOSCHED */ goto out; } - - uplPos = f_offset; - uplSize = size; - - /* clip the size to the size of the file */ - if ((uint64_t)uplPos + uplSize > cachedSize) { - /* truncate the read to the size of the file */ - uplSize = cachedSize - uplPos; - } - - /* do the fetch */ - decmpfs_vector vec; - + + uplPos = f_offset; + uplSize = size; + + /* clip the size to the size of the file */ + if ((uint64_t)uplPos + uplSize > cachedSize) { + /* truncate the read to the size of the file */ + uplSize = cachedSize - uplPos; + } + + /* do the fetch */ + decmpfs_vector vec; + decompress: - /* the mapped data pointer points to the first page of the page list, so we want to start filling in at an offset of pl_offset */ - vec.buf = (char*)data + pl_offset; - vec.size = size; - - uint64_t did_read = 0; + /* the mapped data pointer points to the first page of the page list, so we want to start filling in at an offset of pl_offset */ + vec.buf = (char*)data + pl_offset; + vec.size = size; + + uint64_t did_read = 0; if (decmpfs_fast_get_state(cp) == FILE_IS_CONVERTING) { ErrorLogWithPath("unexpected pagein during decompress\n"); /* - if the file is converting, this must be a recursive call to pagein from underneath a call to decmpfs_decompress_file; - pretend that it succeeded but don't do anything since we're just going to write over the pages anyway + * if the file is converting, this must be a recursive call to pagein from underneath a call to decmpfs_decompress_file; + * pretend that it succeeded but don't do anything since we're just going to write over the pages anyway */ err = 0; did_read = 0; } else { - err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, uplPos, uplSize, 1, &vec, &did_read); - } - if (err) { - DebugLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); - int cmp_state = decmpfs_fast_get_state(cp); - if (cmp_state == FILE_IS_CONVERTING) { - DebugLogWithPath("cmp_state == FILE_IS_CONVERTING\n"); - cmp_state = wait_for_decompress(cp); - if (cmp_state == FILE_IS_COMPRESSED) { - DebugLogWithPath("cmp_state == FILE_IS_COMPRESSED\n"); - /* a decompress was attempted but it failed, let's try calling fetch again */ - goto decompress; - } - } - if (cmp_state == FILE_IS_NOT_COMPRESSED) { - DebugLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n"); - /* the file was decompressed after we started reading it */ - *is_compressed = 0; /* instruct caller to fall back to its normal path */ - } - } - - /* zero out whatever we didn't read, and zero out the end of the last page(s) */ - uint64_t total_size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); - if (did_read < total_size) { - memset((char*)vec.buf + did_read, 0, total_size - did_read); - } - + err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, uplPos, uplSize, 1, &vec, &did_read); + } + if (err) { + DebugLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); + int cmp_state = decmpfs_fast_get_state(cp); + if (cmp_state == FILE_IS_CONVERTING) { + DebugLogWithPath("cmp_state == FILE_IS_CONVERTING\n"); + cmp_state = wait_for_decompress(cp); + if (cmp_state == FILE_IS_COMPRESSED) { + DebugLogWithPath("cmp_state == FILE_IS_COMPRESSED\n"); + /* a decompress was attempted but it failed, let's try calling fetch again */ + goto decompress; + } + } + if (cmp_state == FILE_IS_NOT_COMPRESSED) { + DebugLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n"); + /* the file was decompressed after we started reading it */ + *is_compressed = 0; /* instruct caller to fall back to its normal path */ + } + } + + /* zero out whatever we didn't read, and zero out the end of the last page(s) */ + uint64_t total_size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + if (did_read < total_size) { + memset((char*)vec.buf + did_read, 0, total_size - did_read); + } + #if CONFIG_IOSCHED upl_unmark_decmp(pl); -#endif /* CONFIG_IOSCHED */ - +#endif /* CONFIG_IOSCHED */ + kr = ubc_upl_unmap(pl); data = NULL; /* make sure to set data to NULL so we don't try to unmap again below */ - if (kr != KERN_SUCCESS) - ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr); - else { - if (!err) { - /* commit our pages */ + if (kr != KERN_SUCCESS) { + ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr); + } else { + if (!err) { + /* commit our pages */ kr = commit_upl(pl, pl_offset, total_size, UPL_COMMIT_FREE_ON_EMPTY, 0); - } - } - + } + } + out: - if (data) ubc_upl_unmap(pl); - if (hdr) FREE(hdr, M_TEMP); - if (cmpdata_locked) decmpfs_unlock_compressed_data(cp, 0); - if (err) { + if (data) { + ubc_upl_unmap(pl); + } + if (hdr) { + FREE(hdr, M_TEMP); + } + if (cmpdata_locked) { + decmpfs_unlock_compressed_data(cp, 0); + } + if (err) { #if 0 - if (err != ENXIO && err != ENOSPC) { - char *path; - MALLOC(path, char *, PATH_MAX, M_TEMP, M_WAITOK); - panic("%s: decmpfs_pagein_compressed: err %d", vnpath(vp, path, PATH_MAX), err); - FREE(path, M_TEMP); - } + if (err != ENXIO && err != ENOSPC) { + char *path; + MALLOC(path, char *, PATH_MAX, M_TEMP, M_WAITOK); + panic("%s: decmpfs_pagein_compressed: err %d", vnpath(vp, path, PATH_MAX), err); + FREE(path, M_TEMP); + } #endif /* 0 */ - ErrorLogWithPath("err %d\n", err); - } + ErrorLogWithPath("err %d\n", err); + } return err; } -errno_t +errno_t decmpfs_read_compressed(struct vnop_read_args *ap, int *is_compressed, decmpfs_cnode *cp) { - /* handles a read request from vfs for a compressed file */ - - uio_t uio = ap->a_uio; - vnode_t vp = ap->a_vp; - int err = 0; - int countInt = 0; - off_t uplPos = 0; - user_ssize_t uplSize = 0; - user_ssize_t uplRemaining = 0; - off_t curUplPos = 0; - user_ssize_t curUplSize = 0; - kern_return_t kr = KERN_SUCCESS; - int abort_read = 0; - void *data = NULL; - uint64_t did_read = 0; - upl_t upl = NULL; - upl_page_info_t *pli = NULL; - decmpfs_header *hdr = NULL; - uint64_t cachedSize = 0; - off_t uioPos = 0; - user_ssize_t uioRemaining = 0; + /* handles a read request from vfs for a compressed file */ + + uio_t uio = ap->a_uio; + vnode_t vp = ap->a_vp; + int err = 0; + int countInt = 0; + off_t uplPos = 0; + user_ssize_t uplSize = 0; + user_ssize_t uplRemaining = 0; + off_t curUplPos = 0; + user_ssize_t curUplSize = 0; + kern_return_t kr = KERN_SUCCESS; + int abort_read = 0; + void *data = NULL; + uint64_t did_read = 0; + upl_t upl = NULL; + upl_page_info_t *pli = NULL; + decmpfs_header *hdr = NULL; + uint64_t cachedSize = 0; + off_t uioPos = 0; + user_ssize_t uioRemaining = 0; int cmpdata_locked = 0; - + decmpfs_lock_compressed_data(cp, 0); cmpdata_locked = 1; - - uplPos = uio_offset(uio); - uplSize = uio_resid(uio); - VerboseLogWithPath("uplPos %lld uplSize %lld\n", uplPos, uplSize); - - cachedSize = decmpfs_cnode_get_vnode_cached_size(cp); - - if ((uint64_t)uplPos + uplSize > cachedSize) { - /* truncate the read to the size of the file */ - uplSize = cachedSize - uplPos; - } - - /* give the cluster layer a chance to fill in whatever it already has */ - countInt = (uplSize > INT_MAX) ? INT_MAX : uplSize; - err = cluster_copy_ubc_data(vp, uio, &countInt, 0); - if (err != 0) - goto out; - - /* figure out what's left */ - uioPos = uio_offset(uio); - uioRemaining = uio_resid(uio); - if ((uint64_t)uioPos + uioRemaining > cachedSize) { - /* truncate the read to the size of the file */ - uioRemaining = cachedSize - uioPos; - } - - if (uioRemaining <= 0) { - /* nothing left */ - goto out; - } - - err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); - if (err != 0) { - goto out; - } - if (!compression_type_valid(vp, hdr)) { - err = ENOTSUP; - goto out; - } - - uplPos = uioPos; - uplSize = uioRemaining; + + uplPos = uio_offset(uio); + uplSize = uio_resid(uio); + VerboseLogWithPath("uplPos %lld uplSize %lld\n", uplPos, uplSize); + + cachedSize = decmpfs_cnode_get_vnode_cached_size(cp); + + if ((uint64_t)uplPos + uplSize > cachedSize) { + /* truncate the read to the size of the file */ + uplSize = cachedSize - uplPos; + } + + /* give the cluster layer a chance to fill in whatever it already has */ + countInt = (uplSize > INT_MAX) ? INT_MAX : uplSize; + err = cluster_copy_ubc_data(vp, uio, &countInt, 0); + if (err != 0) { + goto out; + } + + /* figure out what's left */ + uioPos = uio_offset(uio); + uioRemaining = uio_resid(uio); + if ((uint64_t)uioPos + uioRemaining > cachedSize) { + /* truncate the read to the size of the file */ + uioRemaining = cachedSize - uioPos; + } + + if (uioRemaining <= 0) { + /* nothing left */ + goto out; + } + + err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); + if (err != 0) { + goto out; + } + if (!compression_type_valid(vp, hdr)) { + err = ENOTSUP; + goto out; + } + + uplPos = uioPos; + uplSize = uioRemaining; #if COMPRESSION_DEBUG - DebugLogWithPath("uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); + DebugLogWithPath("uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); #endif - - lck_rw_lock_shared(decompressorsLock); - decmpfs_adjust_fetch_region_func adjust_fetch = decmp_get_func(vp, hdr->compression_type, adjust_fetch); - if (adjust_fetch) { - /* give the compressor a chance to adjust the portion of the file that we read */ + + lck_rw_lock_shared(decompressorsLock); + decmpfs_adjust_fetch_region_func adjust_fetch = decmp_get_func(vp, hdr->compression_type, adjust_fetch); + if (adjust_fetch) { + /* give the compressor a chance to adjust the portion of the file that we read */ adjust_fetch(vp, decmpfs_ctx, hdr, &uplPos, &uplSize); - VerboseLogWithPath("adjusted uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); - } - lck_rw_unlock_shared(decompressorsLock); - - /* clip the adjusted size to the size of the file */ - if ((uint64_t)uplPos + uplSize > cachedSize) { - /* truncate the read to the size of the file */ - uplSize = cachedSize - uplPos; - } - - if (uplSize <= 0) { - /* nothing left */ - goto out; - } - - /* - since we're going to create a upl for the given region of the file, - make sure we're on page boundaries - */ - - if (uplPos & (PAGE_SIZE - 1)) { - /* round position down to page boundary */ - uplSize += (uplPos & (PAGE_SIZE - 1)); - uplPos &= ~(PAGE_SIZE - 1); - } - /* round size up to page multiple */ - uplSize = (uplSize + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); - - VerboseLogWithPath("new uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); - - uplRemaining = uplSize; - curUplPos = uplPos; - curUplSize = 0; - - while(uplRemaining > 0) { - /* start after the last upl */ - curUplPos += curUplSize; - - /* clip to max upl size */ - curUplSize = uplRemaining; - if (curUplSize > MAX_UPL_SIZE_BYTES) { - curUplSize = MAX_UPL_SIZE_BYTES; - } - - /* create the upl */ - kr = ubc_create_upl_kernel(vp, curUplPos, curUplSize, &upl, &pli, UPL_SET_LITE, VM_KERN_MEMORY_FILE); - if (kr != KERN_SUCCESS) { - ErrorLogWithPath("ubc_create_upl error %d\n", (int)kr); - err = EINVAL; - goto out; - } - VerboseLogWithPath("curUplPos %lld curUplSize %lld\n", (uint64_t)curUplPos, (uint64_t)curUplSize); - -#if CONFIG_IOSCHED - /* Mark the UPL as the requesting UPL for decompression */ - upl_mark_decmp(upl); -#endif /* CONFIG_IOSCHED */ + VerboseLogWithPath("adjusted uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); + } + lck_rw_unlock_shared(decompressorsLock); + + /* clip the adjusted size to the size of the file */ + if ((uint64_t)uplPos + uplSize > cachedSize) { + /* truncate the read to the size of the file */ + uplSize = cachedSize - uplPos; + } + + if (uplSize <= 0) { + /* nothing left */ + goto out; + } + + /* + * since we're going to create a upl for the given region of the file, + * make sure we're on page boundaries + */ + + if (uplPos & (PAGE_SIZE - 1)) { + /* round position down to page boundary */ + uplSize += (uplPos & (PAGE_SIZE - 1)); + uplPos &= ~(PAGE_SIZE - 1); + } + /* round size up to page multiple */ + uplSize = (uplSize + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + + VerboseLogWithPath("new uplPos %lld uplSize %lld\n", (uint64_t)uplPos, (uint64_t)uplSize); - /* map the upl */ - kr = ubc_upl_map(upl, (vm_offset_t*)&data); - if (kr != KERN_SUCCESS) { + uplRemaining = uplSize; + curUplPos = uplPos; + curUplSize = 0; - commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); + while (uplRemaining > 0) { + /* start after the last upl */ + curUplPos += curUplSize; + + /* clip to max upl size */ + curUplSize = uplRemaining; + if (curUplSize > MAX_UPL_SIZE_BYTES) { + curUplSize = MAX_UPL_SIZE_BYTES; + } + + /* create the upl */ + kr = ubc_create_upl_kernel(vp, curUplPos, curUplSize, &upl, &pli, UPL_SET_LITE, VM_KERN_MEMORY_FILE); + if (kr != KERN_SUCCESS) { + ErrorLogWithPath("ubc_create_upl error %d\n", (int)kr); + err = EINVAL; + goto out; + } + VerboseLogWithPath("curUplPos %lld curUplSize %lld\n", (uint64_t)curUplPos, (uint64_t)curUplSize); + +#if CONFIG_IOSCHED + /* Mark the UPL as the requesting UPL for decompression */ + upl_mark_decmp(upl); +#endif /* CONFIG_IOSCHED */ + + /* map the upl */ + kr = ubc_upl_map(upl, (vm_offset_t*)&data); + if (kr != KERN_SUCCESS) { + commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); #if 0 - char *path; - MALLOC(path, char *, PATH_MAX, M_TEMP, M_WAITOK); - panic("%s: decmpfs_read_compressed: ubc_upl_map error %d", vnpath(vp, path, PATH_MAX), (int)kr); - FREE(path, M_TEMP); + char *path; + MALLOC(path, char *, PATH_MAX, M_TEMP, M_WAITOK); + panic("%s: decmpfs_read_compressed: ubc_upl_map error %d", vnpath(vp, path, PATH_MAX), (int)kr); + FREE(path, M_TEMP); #else /* 0 */ - ErrorLogWithPath("ubc_upl_map kr=0x%x\n", (int)kr); + ErrorLogWithPath("ubc_upl_map kr=0x%x\n", (int)kr); #endif /* 0 */ - err = EINVAL; - goto out; - } - - /* make sure the map succeeded */ - if (!data) { - - commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); - - ErrorLogWithPath("ubc_upl_map mapped null\n"); - err = EINVAL; - goto out; - } - - /* fetch uncompressed data into the mapped upl */ - decmpfs_vector vec; - decompress: - vec = (decmpfs_vector){ .buf = data, .size = curUplSize }; - err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, curUplPos, curUplSize, 1, &vec, &did_read); - if (err) { - ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); - - /* maybe the file is converting to decompressed */ - int cmp_state = decmpfs_fast_get_state(cp); - if (cmp_state == FILE_IS_CONVERTING) { - ErrorLogWithPath("cmp_state == FILE_IS_CONVERTING\n"); - cmp_state = wait_for_decompress(cp); - if (cmp_state == FILE_IS_COMPRESSED) { - ErrorLogWithPath("cmp_state == FILE_IS_COMPRESSED\n"); - /* a decompress was attempted but it failed, let's try fetching again */ - goto decompress; - } - } - if (cmp_state == FILE_IS_NOT_COMPRESSED) { - ErrorLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n"); - /* the file was decompressed after we started reading it */ - abort_read = 1; /* we're not going to commit our data */ - *is_compressed = 0; /* instruct caller to fall back to its normal path */ - } - kr = KERN_FAILURE; - did_read = 0; - } - /* zero out the remainder of the last page */ - memset((char*)data + did_read, 0, curUplSize - did_read); - kr = ubc_upl_unmap(upl); - if (kr == KERN_SUCCESS) { - if (abort_read) { + err = EINVAL; + goto out; + } + + /* make sure the map succeeded */ + if (!data) { + commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); + + ErrorLogWithPath("ubc_upl_map mapped null\n"); + err = EINVAL; + goto out; + } + + /* fetch uncompressed data into the mapped upl */ + decmpfs_vector vec; +decompress: + vec = (decmpfs_vector){ .buf = data, .size = curUplSize }; + err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, curUplPos, curUplSize, 1, &vec, &did_read); + if (err) { + ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); + + /* maybe the file is converting to decompressed */ + int cmp_state = decmpfs_fast_get_state(cp); + if (cmp_state == FILE_IS_CONVERTING) { + ErrorLogWithPath("cmp_state == FILE_IS_CONVERTING\n"); + cmp_state = wait_for_decompress(cp); + if (cmp_state == FILE_IS_COMPRESSED) { + ErrorLogWithPath("cmp_state == FILE_IS_COMPRESSED\n"); + /* a decompress was attempted but it failed, let's try fetching again */ + goto decompress; + } + } + if (cmp_state == FILE_IS_NOT_COMPRESSED) { + ErrorLogWithPath("cmp_state == FILE_IS_NOT_COMPRESSED\n"); + /* the file was decompressed after we started reading it */ + abort_read = 1; /* we're not going to commit our data */ + *is_compressed = 0; /* instruct caller to fall back to its normal path */ + } + kr = KERN_FAILURE; + did_read = 0; + } + /* zero out the remainder of the last page */ + memset((char*)data + did_read, 0, curUplSize - did_read); + kr = ubc_upl_unmap(upl); + if (kr == KERN_SUCCESS) { + if (abort_read) { kr = commit_upl(upl, 0, curUplSize, UPL_ABORT_FREE_ON_EMPTY, 1); - } else { - VerboseLogWithPath("uioPos %lld uioRemaining %lld\n", (uint64_t)uioPos, (uint64_t)uioRemaining); - if (uioRemaining) { - off_t uplOff = uioPos - curUplPos; - if (uplOff < 0) { - ErrorLogWithPath("uplOff %lld should never be negative\n", (int64_t)uplOff); - err = EINVAL; - } else { - off_t count = curUplPos + curUplSize - uioPos; - if (count < 0) { - /* this upl is entirely before the uio */ - } else { - if (count > uioRemaining) - count = uioRemaining; - int io_resid = count; - err = cluster_copy_upl_data(uio, upl, uplOff, &io_resid); - int copied = count - io_resid; - VerboseLogWithPath("uplOff %lld count %lld copied %lld\n", (uint64_t)uplOff, (uint64_t)count, (uint64_t)copied); - if (err) { - ErrorLogWithPath("cluster_copy_upl_data err %d\n", err); - } - uioPos += copied; - uioRemaining -= copied; - } - } - } + } else { + VerboseLogWithPath("uioPos %lld uioRemaining %lld\n", (uint64_t)uioPos, (uint64_t)uioRemaining); + if (uioRemaining) { + off_t uplOff = uioPos - curUplPos; + if (uplOff < 0) { + ErrorLogWithPath("uplOff %lld should never be negative\n", (int64_t)uplOff); + err = EINVAL; + } else { + off_t count = curUplPos + curUplSize - uioPos; + if (count < 0) { + /* this upl is entirely before the uio */ + } else { + if (count > uioRemaining) { + count = uioRemaining; + } + int io_resid = count; + err = cluster_copy_upl_data(uio, upl, uplOff, &io_resid); + int copied = count - io_resid; + VerboseLogWithPath("uplOff %lld count %lld copied %lld\n", (uint64_t)uplOff, (uint64_t)count, (uint64_t)copied); + if (err) { + ErrorLogWithPath("cluster_copy_upl_data err %d\n", err); + } + uioPos += copied; + uioRemaining -= copied; + } + } + } kr = commit_upl(upl, 0, curUplSize, UPL_COMMIT_FREE_ON_EMPTY | UPL_COMMIT_INACTIVATE, 0); - if (err) { - goto out; - } - } - } else { - ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr); - } - - uplRemaining -= curUplSize; - } - + if (err) { + goto out; + } + } + } else { + ErrorLogWithPath("ubc_upl_unmap error %d\n", (int)kr); + } + + uplRemaining -= curUplSize; + } + out: - if (hdr) FREE(hdr, M_TEMP); - if (cmpdata_locked) decmpfs_unlock_compressed_data(cp, 0); - if (err) {/* something went wrong */ - ErrorLogWithPath("err %d\n", err); - return err; - } - + if (hdr) { + FREE(hdr, M_TEMP); + } + if (cmpdata_locked) { + decmpfs_unlock_compressed_data(cp, 0); + } + if (err) {/* something went wrong */ + ErrorLogWithPath("err %d\n", err); + return err; + } + #if COMPRESSION_DEBUG - uplSize = uio_resid(uio); - if (uplSize) - VerboseLogWithPath("still %lld bytes to copy\n", uplSize); + uplSize = uio_resid(uio); + if (uplSize) { + VerboseLogWithPath("still %lld bytes to copy\n", uplSize); + } #endif - return 0; + return 0; } int decmpfs_free_compressed_data(vnode_t vp, decmpfs_cnode *cp) { - /* - call out to the decompressor to free remove any data associated with this compressed file - then delete the file's compression xattr - */ - decmpfs_header *hdr = NULL; - - /* - * Trace the following parameters on entry with event-id 0x03120010. - * - * @vp->v_id: vnode-id of the file for which to free compressed data. - */ - DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id); - - int err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); - if (err) { - ErrorLogWithPath("decmpfs_fetch_compressed_header err %d\n", err); - } else { - lck_rw_lock_shared(decompressorsLock); - decmpfs_free_compressed_data_func free_data = decmp_get_func(vp, hdr->compression_type, free_data); - if (free_data) { + /* + * call out to the decompressor to free remove any data associated with this compressed file + * then delete the file's compression xattr + */ + decmpfs_header *hdr = NULL; + + /* + * Trace the following parameters on entry with event-id 0x03120010. + * + * @vp->v_id: vnode-id of the file for which to free compressed data. + */ + DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id); + + int err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); + if (err) { + ErrorLogWithPath("decmpfs_fetch_compressed_header err %d\n", err); + } else { + lck_rw_lock_shared(decompressorsLock); + decmpfs_free_compressed_data_func free_data = decmp_get_func(vp, hdr->compression_type, free_data); + if (free_data) { err = free_data(vp, decmpfs_ctx, hdr); - } else { - /* nothing to do, so no error */ - err = 0; - } - lck_rw_unlock_shared(decompressorsLock); - - if (err != 0) { - ErrorLogWithPath("decompressor err %d\n", err); - } - } - /* - * Trace the following parameters on return with event-id 0x03120010. - * - * @vp->v_id: vnode-id of the file for which to free compressed data. - * @err: value returned from this function. - */ - DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id, err); - - /* delete the xattr */ + } else { + /* nothing to do, so no error */ + err = 0; + } + lck_rw_unlock_shared(decompressorsLock); + + if (err != 0) { + ErrorLogWithPath("decompressor err %d\n", err); + } + } + /* + * Trace the following parameters on return with event-id 0x03120010. + * + * @vp->v_id: vnode-id of the file for which to free compressed data. + * @err: value returned from this function. + */ + DECMPFS_EMIT_TRACE_RETURN(DECMPDBG_FREE_COMPRESSED_DATA, vp->v_id, err); + + /* delete the xattr */ err = vn_removexattr(vp, DECMPFS_XATTR_NAME, 0, decmpfs_ctx); - if (err != 0) { - goto out; - } - + if (err != 0) { + goto out; + } + out: - if (hdr) FREE(hdr, M_TEMP); - return err; + if (hdr) { + FREE(hdr, M_TEMP); + } + return err; } #pragma mark --- file conversion routines --- @@ -1630,34 +1691,34 @@ out: static int unset_compressed_flag(vnode_t vp) { - int err = 0; - struct vnode_attr va; - int new_bsdflags = 0; - - VATTR_INIT(&va); - VATTR_WANTED(&va, va_flags); + int err = 0; + struct vnode_attr va; + int new_bsdflags = 0; + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_flags); err = vnode_getattr(vp, &va, decmpfs_ctx); - - if (err != 0) { - ErrorLogWithPath("vnode_getattr err %d\n", err); - } else { - new_bsdflags = va.va_flags & ~UF_COMPRESSED; - - VATTR_INIT(&va); - VATTR_SET(&va, va_flags, new_bsdflags); + + if (err != 0) { + ErrorLogWithPath("vnode_getattr err %d\n", err); + } else { + new_bsdflags = va.va_flags & ~UF_COMPRESSED; + + VATTR_INIT(&va); + VATTR_SET(&va, va_flags, new_bsdflags); err = vnode_setattr(vp, &va, decmpfs_ctx); - if (err != 0) { - ErrorLogWithPath("vnode_setattr err %d\n", err); - } - } - return err; + if (err != 0) { + ErrorLogWithPath("vnode_setattr err %d\n", err); + } + } + return err; } int decmpfs_decompress_file(vnode_t vp, decmpfs_cnode *cp, off_t toSize, int truncate_okay, int skiplock) { /* convert a compressed file to an uncompressed file */ - + int err = 0; char *data = NULL; uio_t uio_w = 0; @@ -1682,59 +1743,60 @@ decmpfs_decompress_file(vnode_t vp, decmpfs_cnode *cp, off_t toSize, int truncat * Please NOTE: @toSize can overflow in theory but here it is safe. */ DECMPFS_EMIT_TRACE_ENTRY(DECMPDBG_DECOMPRESS_FILE, vp->v_id, - (int)toSize, truncate_okay, skiplock); - + (int)toSize, truncate_okay, skiplock); + if (!skiplock) { decmpfs_lock_compressed_data(cp, 1); cmpdata_locked = 1; } - + decompress: old_state = decmpfs_fast_get_state(cp); - - switch(old_state) { - case FILE_IS_NOT_COMPRESSED: - { - /* someone else decompressed the file */ - err = 0; - goto out; - } - - case FILE_TYPE_UNKNOWN: - { - /* the file is in an unknown state, so update the state and retry */ - (void)decmpfs_file_is_compressed(vp, cp); - - /* try again */ - goto decompress; - } - - case FILE_IS_COMPRESSED: - { - /* the file is compressed, so decompress it */ - break; - } - - default: - { - /* - this shouldn't happen since multiple calls to decmpfs_decompress_file lock each other out, - and when decmpfs_decompress_file returns, the state should be always be set back to - FILE_IS_NOT_COMPRESSED or FILE_IS_UNKNOWN - */ - err = EINVAL; - goto out; - } + + switch (old_state) { + case FILE_IS_NOT_COMPRESSED: + { + /* someone else decompressed the file */ + err = 0; + goto out; + } + + case FILE_TYPE_UNKNOWN: + { + /* the file is in an unknown state, so update the state and retry */ + (void)decmpfs_file_is_compressed(vp, cp); + + /* try again */ + goto decompress; + } + + case FILE_IS_COMPRESSED: + { + /* the file is compressed, so decompress it */ + break; + } + + default: + { + /* + * this shouldn't happen since multiple calls to decmpfs_decompress_file lock each other out, + * and when decmpfs_decompress_file returns, the state should be always be set back to + * FILE_IS_NOT_COMPRESSED or FILE_IS_UNKNOWN + */ + err = EINVAL; + goto out; } - - err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); + } + + err = decmpfs_fetch_compressed_header(vp, cp, &hdr, 0); if (err != 0) { goto out; } - + uncompressed_size = hdr->uncompressed_size; - if (toSize == -1) + if (toSize == -1) { toSize = hdr->uncompressed_size; - + } + if (toSize == 0) { /* special case truncating the file to zero bytes */ goto nodecmp; @@ -1742,32 +1804,32 @@ decompress: /* the caller is trying to grow the file, so we should decompress all the data */ toSize = hdr->uncompressed_size; } - - allocSize = MIN(64*1024, toSize); + + allocSize = MIN(64 * 1024, toSize); MALLOC(data, char *, allocSize, M_TEMP, M_WAITOK); if (!data) { err = ENOMEM; goto out; } - + uio_w = uio_create(1, 0LL, UIO_SYSSPACE, UIO_WRITE); if (!uio_w) { err = ENOMEM; goto out; } uio_w->uio_flags |= UIO_FLAGS_IS_COMPRESSED_FILE; - + remaining = toSize; - + /* tell the buffer cache that this is an empty file */ ubc_setsize(vp, 0); - + /* if we got here, we need to decompress the file */ decmpfs_cnode_set_vnode_state(cp, FILE_IS_CONVERTING, 1); - - while(remaining > 0) { + + while (remaining > 0) { /* loop decompressing data from the file and writing it into the data fork */ - + uint64_t bytes_read = 0; decmpfs_vector vec = { .buf = data, .size = MIN(allocSize, remaining) }; err = decmpfs_fetch_uncompressed_data(vp, cp, hdr, offset, vec.size, 1, &vec, &bytes_read); @@ -1775,12 +1837,12 @@ decompress: ErrorLogWithPath("decmpfs_fetch_uncompressed_data err %d\n", err); goto out; } - + if (bytes_read == 0) { /* we're done reading data */ break; } - + uio_reset(uio_w, offset, UIO_SYSSPACE, UIO_WRITE); err = uio_addiov(uio_w, CAST_USER_ADDR_T(data), bytes_read); if (err != 0) { @@ -1788,7 +1850,7 @@ decompress: err = ENOMEM; goto out; } - + err = VNOP_WRITE(vp, uio_w, 0, decmpfs_ctx); if (err != 0) { /* if the write failed, truncate the file to zero bytes */ @@ -1798,7 +1860,7 @@ decompress: offset += bytes_read; remaining -= bytes_read; } - + if (err == 0) { if (offset != toSize) { ErrorLogWithPath("file decompressed to %lld instead of %lld\n", offset, toSize); @@ -1806,7 +1868,7 @@ decompress: goto out; } } - + if (err == 0) { /* sync the data and metadata */ err = VNOP_FSYNC(vp, MNT_WAIT, decmpfs_ctx); @@ -1815,7 +1877,7 @@ decompress: goto out; } } - + if (err != 0) { /* write, setattr, or fsync failed */ ErrorLogWithPath("aborting decompress, err %d\n", err); @@ -1826,29 +1888,29 @@ decompress: } goto out; } - + nodecmp: /* if we're truncating the file to zero bytes, we'll skip ahead to here */ - + /* unset the compressed flag */ unset_compressed_flag(vp); - + /* free the compressed data associated with this file */ err = decmpfs_free_compressed_data(vp, cp); if (err != 0) { ErrorLogWithPath("decmpfs_free_compressed_data err %d\n", err); } - + /* - even if free_compressed_data or vnode_getattr/vnode_setattr failed, return success - since we succeeded in writing all of the file data to the data fork + * even if free_compressed_data or vnode_getattr/vnode_setattr failed, return success + * since we succeeded in writing all of the file data to the data fork */ err = 0; - + /* if we got this far, the file was successfully decompressed */ update_file_state = 1; new_state = FILE_IS_NOT_COMPRESSED; - + #if COMPRESSION_DEBUG { uint64_t filesize = 0; @@ -1856,12 +1918,18 @@ nodecmp: DebugLogWithPath("new file size %lld\n", filesize); } #endif - + out: - if (hdr) FREE(hdr, M_TEMP); - if (data) FREE(data, M_TEMP); - if (uio_w) uio_free(uio_w); - + if (hdr) { + FREE(hdr, M_TEMP); + } + if (data) { + FREE(data, M_TEMP); + } + if (uio_w) { + uio_free(uio_w); + } + if (err != 0) { /* if there was a failure, reset compression flags to unknown and clear the buffer cache data */ update_file_state = 1; @@ -1869,17 +1937,19 @@ out: if (uncompressed_size) { ubc_setsize(vp, 0); ubc_setsize(vp, uncompressed_size); - } + } } - + if (update_file_state) { lck_mtx_lock(decompress_channel_mtx); decmpfs_cnode_set_vnode_state(cp, new_state, 1); wakeup((caddr_t)&decompress_channel); /* wake up anyone who might have been waiting for decompression */ lck_mtx_unlock(decompress_channel_mtx); } - - if (cmpdata_locked) decmpfs_unlock_compressed_data(cp, 1); + + if (cmpdata_locked) { + decmpfs_unlock_compressed_data(cp, 1); + } /* * Trace the following parameters on return with event-id 0x03120000. * @@ -1893,83 +1963,88 @@ out: #pragma mark --- Type1 compressor --- /* - The "Type1" compressor stores the data fork directly in the compression xattr + * The "Type1" compressor stores the data fork directly in the compression xattr */ static int decmpfs_validate_compressed_file_Type1(__unused vnode_t vp, __unused vfs_context_t ctx, decmpfs_header *hdr) { - int err = 0; - - if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { - err = EINVAL; - goto out; - } + int err = 0; + + if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { + err = EINVAL; + goto out; + } out: - return err; + return err; } static int decmpfs_fetch_uncompressed_data_Type1(__unused vnode_t vp, __unused vfs_context_t ctx, decmpfs_header *hdr, off_t offset, user_ssize_t size, int nvec, decmpfs_vector *vec, uint64_t *bytes_read) { - int err = 0; - int i; - user_ssize_t remaining; - - if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { - err = EINVAL; - goto out; - } - + int err = 0; + int i; + user_ssize_t remaining; + + if (hdr->uncompressed_size + sizeof(decmpfs_disk_header) != (uint64_t)hdr->attr_size) { + err = EINVAL; + goto out; + } + #if COMPRESSION_DEBUG - static int dummy = 0; // prevent syslog from coalescing printfs - DebugLogWithPath("%d memcpy %lld at %lld\n", dummy++, size, (uint64_t)offset); + static int dummy = 0; // prevent syslog from coalescing printfs + DebugLogWithPath("%d memcpy %lld at %lld\n", dummy++, size, (uint64_t)offset); #endif - - remaining = size; - for (i = 0; (i < nvec) && (remaining > 0); i++) { - user_ssize_t curCopy = vec[i].size; - if (curCopy > remaining) - curCopy = remaining; - memcpy(vec[i].buf, hdr->attr_bytes + offset, curCopy); - offset += curCopy; - remaining -= curCopy; - } - - if ((bytes_read) && (err == 0)) - *bytes_read = (size - remaining); - + + remaining = size; + for (i = 0; (i < nvec) && (remaining > 0); i++) { + user_ssize_t curCopy = vec[i].size; + if (curCopy > remaining) { + curCopy = remaining; + } + memcpy(vec[i].buf, hdr->attr_bytes + offset, curCopy); + offset += curCopy; + remaining -= curCopy; + } + + if ((bytes_read) && (err == 0)) { + *bytes_read = (size - remaining); + } + out: - return err; + return err; } SECURITY_READ_ONLY_EARLY(static decmpfs_registration) Type1Reg = { - .decmpfs_registration = DECMPFS_REGISTRATION_VERSION, - .validate = decmpfs_validate_compressed_file_Type1, - .adjust_fetch = NULL, /* no adjust necessary */ - .fetch = decmpfs_fetch_uncompressed_data_Type1, - .free_data = NULL, /* no free necessary */ - .get_flags = NULL /* no flags */ + .decmpfs_registration = DECMPFS_REGISTRATION_VERSION, + .validate = decmpfs_validate_compressed_file_Type1, + .adjust_fetch = NULL,/* no adjust necessary */ + .fetch = decmpfs_fetch_uncompressed_data_Type1, + .free_data = NULL,/* no free necessary */ + .get_flags = NULL/* no flags */ }; #pragma mark --- decmpfs initialization --- -void decmpfs_init() +void +decmpfs_init() { - static int done = 0; - if (done) return; - + static int done = 0; + if (done) { + return; + } + decmpfs_ctx = vfs_context_create(vfs_context_kernel()); - - lck_grp_attr_t *attr = lck_grp_attr_alloc_init(); - decmpfs_lockgrp = lck_grp_alloc_init("VFSCOMP", attr); - lck_grp_attr_free(attr); - decompressorsLock = lck_rw_alloc_init(decmpfs_lockgrp, NULL); - decompress_channel_mtx = lck_mtx_alloc_init(decmpfs_lockgrp, NULL); - - register_decmpfs_decompressor(CMP_Type1, &Type1Reg); - - done = 1; + + lck_grp_attr_t *attr = lck_grp_attr_alloc_init(); + decmpfs_lockgrp = lck_grp_alloc_init("VFSCOMP", attr); + lck_grp_attr_free(attr); + decompressorsLock = lck_rw_alloc_init(decmpfs_lockgrp, NULL); + decompress_channel_mtx = lck_mtx_alloc_init(decmpfs_lockgrp, NULL); + + register_decmpfs_decompressor(CMP_Type1, &Type1Reg); + + done = 1; } #endif /* FS_COMPRESSION */ diff --git a/bsd/kern/imageboot.c b/bsd/kern/imageboot.c index 493d740f8..96c0a1e73 100644 --- a/bsd/kern/imageboot.c +++ b/bsd/kern/imageboot.c @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -75,12 +75,12 @@ static boolean_t imageboot_setup_new(void); __private_extern__ int imageboot_format_is_valid(const char *root_path) { - return (strncmp(root_path, kIBFilePrefix, - strlen(kIBFilePrefix)) == 0); + return strncmp(root_path, kIBFilePrefix, + strlen(kIBFilePrefix)) == 0; } static void -vnode_get_and_drop_always(vnode_t vp) +vnode_get_and_drop_always(vnode_t vp) { vnode_getalways(vp); vnode_rele(vp); @@ -92,18 +92,19 @@ imageboot_needed(void) { int result = 0; char *root_path = NULL; - + DBG_TRACE("%s: checking for presence of root path\n", __FUNCTION__); MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (root_path == NULL) + if (root_path == NULL) { panic("%s: M_NAMEI zone exhausted", __FUNCTION__); + } /* Check for first layer */ if (!(PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) || - PE_parse_boot_argn("rp", root_path, MAXPATHLEN) || - PE_parse_boot_argn(IMAGEBOOT_ROOT_ARG, root_path, MAXPATHLEN) || - PE_parse_boot_argn(IMAGEBOOT_AUTHROOT_ARG, root_path, MAXPATHLEN))) { + PE_parse_boot_argn("rp", root_path, MAXPATHLEN) || + PE_parse_boot_argn(IMAGEBOOT_ROOT_ARG, root_path, MAXPATHLEN) || + PE_parse_boot_argn(IMAGEBOOT_AUTHROOT_ARG, root_path, MAXPATHLEN))) { goto out; } @@ -118,7 +119,7 @@ imageboot_needed(void) /* Check for second layer */ if (!(PE_parse_boot_argn("rp1", root_path, MAXPATHLEN) || - PE_parse_boot_argn(IMAGEBOOT_CONTAINER_ARG, root_path, MAXPATHLEN))) { + PE_parse_boot_argn(IMAGEBOOT_CONTAINER_ARG, root_path, MAXPATHLEN))) { goto out; } @@ -127,31 +128,31 @@ imageboot_needed(void) DBG_TRACE("%s: Found %s\n", __FUNCTION__, root_path); } else { panic("%s: Invalid URL scheme for %s\n", - __FUNCTION__, root_path); + __FUNCTION__, root_path); } out: FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); - return (result); + return result; } /* * Swaps in new root filesystem based on image path. * Current root filesystem is removed from mount list and - * tagged MNTK_BACKS_ROOT, MNT_ROOTFS is cleared on it, and - * "rootvnode" is reset. Root vnode of currentroot filesystem + * tagged MNTK_BACKS_ROOT, MNT_ROOTFS is cleared on it, and + * "rootvnode" is reset. Root vnode of currentroot filesystem * is returned with usecount (no iocount). */ __private_extern__ int imageboot_mount_image(const char *root_path, int height) { - dev_t dev; - int error; - vnode_t old_rootvnode = NULL; - vnode_t newdp; - mount_t new_rootfs; + dev_t dev; + int error; + vnode_t old_rootvnode = NULL; + vnode_t newdp; + mount_t new_rootfs; error = di_root_image(root_path, rootdevice, DEVMAXNAMESIZE, &dev); if (error) { @@ -170,18 +171,19 @@ imageboot_mount_image(const char *root_path, int height) * Get the vnode for '/'. * Set fdp->fd_fd.fd_cdir to reference it. */ - if (VFS_ROOT(TAILQ_LAST(&mountlist,mntlist), &newdp, vfs_context_kernel())) + if (VFS_ROOT(TAILQ_LAST(&mountlist, mntlist), &newdp, vfs_context_kernel())) { panic("%s: cannot find root vnode", __FUNCTION__); + } if (rootvnode != NULL) { /* remember the old rootvnode, but remove it from mountlist */ - mount_t old_rootfs; + mount_t old_rootfs; old_rootvnode = rootvnode; old_rootfs = rootvnode->v_mount; - + mount_list_remove(old_rootfs); - + mount_lock(old_rootfs); #ifdef CONFIG_IMGSRC_ACCESS old_rootfs->mnt_kern_flag |= MNTK_BACKS_ROOT; @@ -205,14 +207,14 @@ imageboot_mount_image(const char *root_path, int height) if (old_rootvnode != NULL) { #ifdef CONFIG_IMGSRC_ACCESS - if (height >= 0 && PE_imgsrc_mount_supported()) { - imgsrc_rootvnodes[height] = old_rootvnode; - } else { + if (height >= 0 && PE_imgsrc_mount_supported()) { + imgsrc_rootvnodes[height] = old_rootvnode; + } else { + vnode_get_and_drop_always(old_rootvnode); + } +#else + height = 0; /* keep the compiler from complaining */ vnode_get_and_drop_always(old_rootvnode); - } -#else - height = 0; /* keep the compiler from complaining */ - vnode_get_and_drop_always(old_rootvnode); #endif /* CONFIG_IMGSRC_ACCESS */ } return 0; @@ -243,7 +245,7 @@ key_byteswap(void *_dst, const void *_src, size_t len) len = len / sizeof(uint32_t); for (size_t i = 0; i < len; i++) { - dst[len-i-1] = OSSwapInt32(src[i]); + dst[len - i - 1] = OSSwapInt32(src[i]); } } @@ -348,17 +350,17 @@ validate_signature(const uint8_t *key_msb, size_t keylen, uint8_t *sig_msb, size key_byteswap(sig, sig_msb, siglen); err = rsa_make_pub(rsa_ctx, - sizeof(exponent), exponent, - CHUNKLIST_PUBKEY_LEN, modulus); + sizeof(exponent), exponent, + CHUNKLIST_PUBKEY_LEN, modulus); if (err) { AUTHPRNT("rsa_make_pub() failed"); goto out; } err = rsa_verify_pkcs1v15(rsa_ctx, CC_DIGEST_OID_SHA256, - SHA256_DIGEST_LENGTH, digest, - siglen, sig, - &sig_valid); + SHA256_DIGEST_LENGTH, digest, + siglen, sig, + &sig_valid); if (err) { sig_valid = false; AUTHPRNT("rsa_verify() failed"); @@ -397,26 +399,26 @@ validate_chunklist(void *buf, size_t len) /* recognized file format? */ if (hdr->cl_magic != CHUNKLIST_MAGIC || - hdr->cl_file_ver != CHUNKLIST_FILE_VERSION_10 || - hdr->cl_chunk_method != CHUNKLIST_SIGNATURE_METHOD_10 || - hdr->cl_sig_method != CHUNKLIST_SIGNATURE_METHOD_10) { + hdr->cl_file_ver != CHUNKLIST_FILE_VERSION_10 || + hdr->cl_chunk_method != CHUNKLIST_SIGNATURE_METHOD_10 || + hdr->cl_sig_method != CHUNKLIST_SIGNATURE_METHOD_10) { AUTHPRNT("unrecognized chunklist format"); return EINVAL; } /* does the chunk list fall within the bounds of the buffer? */ if (os_mul_and_add_overflow(hdr->cl_chunk_count, sizeof(struct chunklist_chunk), hdr->cl_chunk_offset, &chunks_end) || - hdr->cl_chunk_offset < sizeof(struct chunklist_hdr) || chunks_end > len) { + hdr->cl_chunk_offset < sizeof(struct chunklist_hdr) || chunks_end > len) { AUTHPRNT("invalid chunk_count (%llu) or chunk_offset (%llu)", - hdr->cl_chunk_count, hdr->cl_chunk_offset); + hdr->cl_chunk_count, hdr->cl_chunk_offset); return EINVAL; } /* does the signature fall within the bounds of the buffer? */ if (os_add_overflow(hdr->cl_sig_offset, sizeof(struct chunklist_sig), &sig_end) || - hdr->cl_sig_offset < sizeof(struct chunklist_hdr) || - hdr->cl_sig_offset < chunks_end || - hdr->cl_sig_offset > len) { + hdr->cl_sig_offset < sizeof(struct chunklist_hdr) || + hdr->cl_sig_offset < chunks_end || + hdr->cl_sig_offset > len) { AUTHPRNT("invalid signature offset (%llu)", hdr->cl_sig_offset); return EINVAL; } @@ -439,7 +441,7 @@ validate_chunklist(void *buf, size_t len) for (size_t i = 0; i < CHUNKLIST_NPUBKEYS; i++) { const struct chunklist_pubkey *key = &chunklist_pubkeys[i]; err = validate_signature(key->key, CHUNKLIST_PUBKEY_LEN, - buf + hdr->cl_sig_offset, sigsz, sha_digest); + buf + hdr->cl_sig_offset, sigsz, sha_digest); if (err == 0) { AUTHDBG("validated chunklist signature with key %lu (prod=%d)", i, key->isprod); valid_sig = key->isprod; @@ -646,7 +648,7 @@ authenticate_root(const char *root_path) { char *chunklist_path = NULL; void *chunklist_buf = NULL; - size_t chunklist_len = 32*1024*1024UL; + size_t chunklist_len = 32 * 1024 * 1024UL; int err = 0; err = construct_chunklist_path(root_path, &chunklist_path); @@ -691,7 +693,7 @@ authenticate_root(const char *root_path) /* everything checked out - go ahead and mount this */ AUTHDBG("root image authenticated"); - out: +out: kfree_safe(chunklist_buf); kfree_safe(chunklist_path); return err; @@ -726,7 +728,7 @@ getuuidfromheader_safe(const void *buf, size_t bufsz, size_t *uuidsz) } if (os_add_overflow(cmd->cmdsize, offset, &offset) || - offset > bufsz - sizeof(struct uuid_command)) { + offset > bufsz - sizeof(struct uuid_command)) { return NULL; } } @@ -745,7 +747,7 @@ auth_version_check(void) { int err = 0; void *buf = NULL; - size_t bufsz = 4*1024*1024UL; + size_t bufsz = 4 * 1024 * 1024UL; /* get the UUID of the libkern in /S/L/E */ @@ -901,7 +903,7 @@ imageboot_mount_ramdisk(const char *path) } /* Switch to new root vnode */ - if (VFS_ROOT(TAILQ_LAST(&mountlist,mntlist), &newdp, vfs_context_kernel())) { + if (VFS_ROOT(TAILQ_LAST(&mountlist, mntlist), &newdp, vfs_context_kernel())) { panic("%s: cannot find root vnode", __func__); } rootvnode = newdp; @@ -1004,7 +1006,7 @@ imageboot_setup_new() if (error) { panic("Failed to mount root image (err=%d, auth=%d, ramdisk=%d)\n", - error, auth_root, ramdisk_root); + error, auth_root, ramdisk_root); } if (auth_root) { @@ -1039,9 +1041,9 @@ imageboot_setup() /* * New boot-arg scheme: - * root-dmg : the dmg that will be the root filesystem. - * auth-root-dmg : same as root-dmg but with image authentication. - * container-dmg : an optional dmg that contains the root-dmg. + * root-dmg : the dmg that will be the root filesystem. + * auth-root-dmg : same as root-dmg but with image authentication. + * container-dmg : an optional dmg that contains the root-dmg. */ if (imageboot_setup_new()) { return; @@ -1054,16 +1056,16 @@ imageboot_setup() * Look for outermost disk image to root from. If we're doing a nested boot, * there's some sense in which the outer image never needs to be the root filesystem, * but it does need very similar treatment: it must not be unmounted, needs a fake - * device vnode created for it, and should not show up in getfsstat() until exposed + * device vnode created for it, and should not show up in getfsstat() until exposed * with MNT_IMGSRC. We just make it the temporary root. */ - if((PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == FALSE) && - (PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) == FALSE)) { + if ((PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == FALSE) && + (PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) == FALSE)) { panic("%s: no valid path to image.\n", __FUNCTION__); } printf("%s: root image url is %s\n", __FUNCTION__, root_path); - + error = imageboot_mount_image(root_path, 0); if (error) { panic("Failed on first stage of imageboot."); @@ -1072,19 +1074,19 @@ imageboot_setup() /* * See if we are rooting from a nested image */ - if(PE_parse_boot_argn("rp1", root_path, MAXPATHLEN) == FALSE) { + if (PE_parse_boot_argn("rp1", root_path, MAXPATHLEN) == FALSE) { goto done; } - + printf("%s: second level root image url is %s\n", __FUNCTION__, root_path); /* * If we fail to set up second image, it's not a given that we - * can safely root off the first. + * can safely root off the first. */ error = imageboot_mount_image(root_path, 1); if (error) { - panic("Failed on second stage of imageboot."); + panic("Failed on second stage of imageboot."); } done: diff --git a/bsd/kern/kdebug.c b/bsd/kern/kdebug.c index 66361ad0d..f901211e0 100644 --- a/bsd/kern/kdebug.c +++ b/bsd/kern/kdebug.c @@ -72,9 +72,9 @@ #include #include #include -#include /* for isset() */ +#include /* for isset() */ -#include /* for host_info() */ +#include /* for host_info() */ #include #include @@ -108,10 +108,10 @@ */ typedef struct kd_iop { - kd_callback_t callback; - uint32_t cpu_id; - uint64_t last_timestamp; /* Prevent timer rollback */ - struct kd_iop* next; + kd_callback_t callback; + uint32_t cpu_id; + uint64_t last_timestamp; /* Prevent timer rollback */ + struct kd_iop* next; } kd_iop_t; static kd_iop_t* kd_iops = NULL; @@ -152,7 +152,8 @@ static mach_port_t kdbg_typefilter_memory_entry; */ #define TYPEFILTER_ALLOC_SIZE MAX(round_page_32(KDBG_TYPEFILTER_BITMAP_SIZE), KDBG_TYPEFILTER_BITMAP_SIZE) -static typefilter_t typefilter_create(void) +static typefilter_t +typefilter_create(void) { typefilter_t tf; if (KERN_SUCCESS == kmem_alloc(kernel_map, (vm_offset_t*)&tf, TYPEFILTER_ALLOC_SIZE, VM_KERN_MEMORY_DIAG)) { @@ -162,52 +163,60 @@ static typefilter_t typefilter_create(void) return NULL; } -static void typefilter_deallocate(typefilter_t tf) +static void +typefilter_deallocate(typefilter_t tf) { assert(tf != NULL); assert(tf != kdbg_typefilter); kmem_free(kernel_map, (vm_offset_t)tf, TYPEFILTER_ALLOC_SIZE); } -static void typefilter_copy(typefilter_t dst, typefilter_t src) +static void +typefilter_copy(typefilter_t dst, typefilter_t src) { assert(src != NULL); assert(dst != NULL); memcpy(dst, src, KDBG_TYPEFILTER_BITMAP_SIZE); } -static void typefilter_reject_all(typefilter_t tf) +static void +typefilter_reject_all(typefilter_t tf) { assert(tf != NULL); memset(tf, 0, KDBG_TYPEFILTER_BITMAP_SIZE); } -static void typefilter_allow_all(typefilter_t tf) +static void +typefilter_allow_all(typefilter_t tf) { assert(tf != NULL); memset(tf, ~0, KDBG_TYPEFILTER_BITMAP_SIZE); } -static void typefilter_allow_class(typefilter_t tf, uint8_t class) +static void +typefilter_allow_class(typefilter_t tf, uint8_t class) { assert(tf != NULL); const uint32_t BYTES_PER_CLASS = 256 / 8; // 256 subclasses, 1 bit each memset(&tf[class * BYTES_PER_CLASS], 0xFF, BYTES_PER_CLASS); } -static void typefilter_allow_csc(typefilter_t tf, uint16_t csc) +static void +typefilter_allow_csc(typefilter_t tf, uint16_t csc) { assert(tf != NULL); setbit(tf, csc); } -static bool typefilter_is_debugid_allowed(typefilter_t tf, uint32_t id) +static bool +typefilter_is_debugid_allowed(typefilter_t tf, uint32_t id) { assert(tf != NULL); return isset(tf, KDBG_EXTRACT_CSC(id)); } -static mach_port_t typefilter_create_memory_entry(typefilter_t tf) +static mach_port_t +typefilter_create_memory_entry(typefilter_t tf) { assert(tf != NULL); @@ -215,11 +224,11 @@ static mach_port_t typefilter_create_memory_entry(typefilter_t tf) memory_object_size_t size = TYPEFILTER_ALLOC_SIZE; mach_make_memory_entry_64(kernel_map, - &size, - (memory_object_offset_t)tf, - VM_PROT_READ, - &memory_entry, - MACH_PORT_NULL); + &size, + (memory_object_offset_t)tf, + VM_PROT_READ, + &memory_entry, + MACH_PORT_NULL); return memory_entry; } @@ -232,8 +241,8 @@ static void kdbg_disable_typefilter(void); * External prototypes */ -void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); -int cpu_number(void); /* XXX include path broken */ +void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *); +int cpu_number(void); /* XXX include path broken */ void commpage_update_kdebug_state(void); /* XXX sign */ extern int log_leaks; @@ -259,8 +268,8 @@ static int kdbg_debug = 0; #if KDEBUG_MOJO_TRACE #include static void kdebug_serial_print( /* forward */ - uint32_t, uint32_t, uint64_t, - uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); + uint32_t, uint32_t, uint64_t, + uintptr_t, uintptr_t, uintptr_t, uintptr_t, uintptr_t); #endif int kdbg_control(int *, u_int, user_addr_t, size_t *); @@ -286,23 +295,23 @@ static boolean_t kdbg_wait(uint64_t timeout_ms, boolean_t locked_wait); static void kdbg_wakeup(void); int kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, - uint8_t** cpumap, uint32_t* cpumap_size); + uint8_t** cpumap, uint32_t* cpumap_size); static kd_threadmap *kdbg_thrmap_init_internal(unsigned int count, - unsigned int *mapsize, - unsigned int *mapcount); + unsigned int *mapsize, + unsigned int *mapcount); static boolean_t kdebug_current_proc_enabled(uint32_t debugid); static errno_t kdebug_check_trace_string(uint32_t debugid, uint64_t str_id); int kdbg_write_v3_header(user_addr_t, size_t *, int); int kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag, - uint32_t sub_tag, uint64_t length, - vnode_t vp, vfs_context_t ctx); + uint32_t sub_tag, uint64_t length, + vnode_t vp, vfs_context_t ctx); user_addr_t kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag, - uint64_t length, vnode_t vp, - vfs_context_t ctx); + uint64_t length, vnode_t vp, + vfs_context_t ctx); // Helper functions @@ -338,8 +347,8 @@ static bool kd_early_done = false; #define SLOW_NOLOG 0x01 #define SLOW_CHECKS 0x02 -#define EVENTS_PER_STORAGE_UNIT 2048 -#define MIN_STORAGE_UNITS_PER_CPU 4 +#define EVENTS_PER_STORAGE_UNIT 2048 +#define MIN_STORAGE_UNITS_PER_CPU 4 #define POINTER_FROM_KDS_PTR(x) (&kd_bufs[x.buffer_index].kdsb_addr[x.offset]) @@ -352,24 +361,24 @@ union kds_ptr { }; struct kd_storage { - union kds_ptr kds_next; + union kds_ptr kds_next; uint32_t kds_bufindx; uint32_t kds_bufcnt; uint32_t kds_readlast; boolean_t kds_lostevents; uint64_t kds_timestamp; - kd_buf kds_records[EVENTS_PER_STORAGE_UNIT]; + kd_buf kds_records[EVENTS_PER_STORAGE_UNIT]; }; #define MAX_BUFFER_SIZE (1024 * 1024 * 128) #define N_STORAGE_UNITS_PER_BUFFER (MAX_BUFFER_SIZE / sizeof(struct kd_storage)) static_assert(N_STORAGE_UNITS_PER_BUFFER <= 0x7ff, - "shoudn't overflow kds_ptr.offset"); + "shoudn't overflow kds_ptr.offset"); struct kd_storage_buffers { - struct kd_storage *kdsb_addr; - uint32_t kdsb_size; + struct kd_storage *kdsb_addr; + uint32_t kdsb_size; }; #define KDS_PTR_NULL 0xffffffff @@ -387,7 +396,7 @@ struct kd_bufinfo { uint32_t _pad; uint64_t kd_prev_timebase; uint32_t num_bufs; -} __attribute__(( aligned(MAX_CPU_CACHE_LINE_SIZE) )); +} __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE))); /* @@ -396,9 +405,9 @@ struct kd_bufinfo { */ struct kd_ctrl_page_t { union kds_ptr kds_free_list; - uint32_t enabled :1; - uint32_t _pad0 :31; - int kds_inuse_count; + uint32_t enabled :1; + uint32_t _pad0 :31; + int kds_inuse_count; uint32_t kdebug_flags; uint32_t kdebug_slowcheck; uint64_t oldest_time; @@ -421,21 +430,21 @@ struct kd_ctrl_page_t { struct kd_bufinfo *kdbip = NULL; -#define KDCOPYBUF_COUNT 8192 -#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf)) +#define KDCOPYBUF_COUNT 8192 +#define KDCOPYBUF_SIZE (KDCOPYBUF_COUNT * sizeof(kd_buf)) -#define PAGE_4KB 4096 -#define PAGE_16KB 16384 +#define PAGE_4KB 4096 +#define PAGE_16KB 16384 kd_buf *kdcopybuf = NULL; unsigned int nkdbufs = 0; -unsigned int kdlog_beg=0; -unsigned int kdlog_end=0; -unsigned int kdlog_value1=0; -unsigned int kdlog_value2=0; -unsigned int kdlog_value3=0; -unsigned int kdlog_value4=0; +unsigned int kdlog_beg = 0; +unsigned int kdlog_end = 0; +unsigned int kdlog_value1 = 0; +unsigned int kdlog_value2 = 0; +unsigned int kdlog_value3 = 0; +unsigned int kdlog_value4 = 0; static lck_spin_t * kdw_spin_lock; static lck_spin_t * kds_spin_lock; @@ -444,10 +453,10 @@ kd_threadmap *kd_mapptr = 0; unsigned int kd_mapsize = 0; unsigned int kd_mapcount = 0; -off_t RAW_file_offset = 0; -int RAW_file_written = 0; +off_t RAW_file_offset = 0; +int RAW_file_written = 0; -#define RAW_FLUSH_SIZE (2 * 1024 * 1024) +#define RAW_FLUSH_SIZE (2 * 1024 * 1024) /* * A globally increasing counter for identifying strings in trace. Starts at @@ -466,23 +475,21 @@ static uint64_t g_curr_str_id = 1; */ static uint64_t g_str_id_signature = (0x70acULL << STR_ID_SIG_OFFSET); -#define INTERRUPT 0x01050000 -#define MACH_vmfault 0x01300008 -#define BSC_SysCall 0x040c0000 -#define MACH_SysCall 0x010c0000 +#define INTERRUPT 0x01050000 +#define MACH_vmfault 0x01300008 +#define BSC_SysCall 0x040c0000 +#define MACH_SysCall 0x010c0000 /* task to string structure */ -struct tts -{ - task_t task; /* from procs task */ - pid_t pid; /* from procs p_pid */ - char task_comm[20]; /* from procs p_comm */ +struct tts { + task_t task; /* from procs task */ + pid_t pid; /* from procs p_pid */ + char task_comm[20];/* from procs p_comm */ }; typedef struct tts tts_t; -struct krt -{ +struct krt { kd_threadmap *map; /* pointer to the map buffer */ int count; int maxcount; @@ -514,31 +521,32 @@ kdbg_cpu_count(boolean_t early_trace) static boolean_t kdbg_iop_list_is_valid(kd_iop_t* iop) { - if (iop) { - /* Is list sorted by cpu_id? */ - kd_iop_t* temp = iop; - do { - assert(!temp->next || temp->next->cpu_id == temp->cpu_id - 1); - assert(temp->next || (temp->cpu_id == kdbg_cpu_count(FALSE) || temp->cpu_id == kdbg_cpu_count(TRUE))); - } while ((temp = temp->next)); - - /* Does each entry have a function and a name? */ - temp = iop; - do { - assert(temp->callback.func); - assert(strlen(temp->callback.iop_name) < sizeof(temp->callback.iop_name)); - } while ((temp = temp->next)); - } - - return TRUE; + if (iop) { + /* Is list sorted by cpu_id? */ + kd_iop_t* temp = iop; + do { + assert(!temp->next || temp->next->cpu_id == temp->cpu_id - 1); + assert(temp->next || (temp->cpu_id == kdbg_cpu_count(FALSE) || temp->cpu_id == kdbg_cpu_count(TRUE))); + } while ((temp = temp->next)); + + /* Does each entry have a function and a name? */ + temp = iop; + do { + assert(temp->callback.func); + assert(strlen(temp->callback.iop_name) < sizeof(temp->callback.iop_name)); + } while ((temp = temp->next)); + } + + return TRUE; } static boolean_t kdbg_iop_list_contains_cpu_id(kd_iop_t* list, uint32_t cpu_id) { while (list) { - if (list->cpu_id == cpu_id) + if (list->cpu_id == cpu_id) { return TRUE; + } list = list->next; } @@ -556,11 +564,13 @@ kdbg_iop_list_callback(kd_iop_t* iop, kd_callback_type type, void* arg) } } +static lck_grp_t *kdebug_lck_grp = NULL; + static void kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type) { int s = ml_set_interrupts_enabled(FALSE); - lck_spin_lock(kds_spin_lock); + lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp); if (enabled) { /* * The oldest valid time is now; reject old events from IOPs. @@ -571,7 +581,7 @@ kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type) kd_ctrl_page.enabled = 1; commpage_update_kdebug_state(); } else { - kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT); + kdebug_enable &= ~(KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT); kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG; kd_ctrl_page.enabled = 0; commpage_update_kdebug_state(); @@ -596,7 +606,7 @@ static void kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled) { int s = ml_set_interrupts_enabled(FALSE); - lck_spin_lock(kds_spin_lock); + lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp); if (enabled) { kd_ctrl_page.kdebug_slowcheck |= slowflag; @@ -605,7 +615,7 @@ kdbg_set_flags(int slowflag, int enableflag, boolean_t enabled) kd_ctrl_page.kdebug_slowcheck &= ~slowflag; kdebug_enable &= ~enableflag; } - + lck_spin_unlock(kds_spin_lock); ml_set_interrupts_enabled(s); } @@ -618,7 +628,7 @@ disable_wrap(uint32_t *old_slowcheck, uint32_t *old_flags) { boolean_t wrapped; int s = ml_set_interrupts_enabled(FALSE); - lck_spin_lock(kds_spin_lock); + lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp); *old_slowcheck = kd_ctrl_page.kdebug_slowcheck; *old_flags = kd_ctrl_page.kdebug_flags; @@ -637,12 +647,13 @@ static void enable_wrap(uint32_t old_slowcheck) { int s = ml_set_interrupts_enabled(FALSE); - lck_spin_lock(kds_spin_lock); + lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp); kd_ctrl_page.kdebug_flags &= ~KDBG_NOWRAP; - if ( !(old_slowcheck & SLOW_NOLOG)) + if (!(old_slowcheck & SLOW_NOLOG)) { kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG; + } lck_spin_unlock(kds_spin_lock); ml_set_interrupts_enabled(s); @@ -683,10 +694,11 @@ create_buffers(boolean_t early_trace) goto out; } - if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU)) + if (nkdbufs < (kd_ctrl_page.kdebug_cpus * EVENTS_PER_STORAGE_UNIT * MIN_STORAGE_UNITS_PER_CPU)) { n_storage_units = kd_ctrl_page.kdebug_cpus * MIN_STORAGE_UNITS_PER_CPU; - else + } else { n_storage_units = nkdbufs / EVENTS_PER_STORAGE_UNIT; + } nkdbufs = n_storage_units * EVENTS_PER_STORAGE_UNIT; @@ -696,13 +708,14 @@ create_buffers(boolean_t early_trace) f_buffer_size = N_STORAGE_UNITS_PER_BUFFER * sizeof(struct kd_storage); p_buffer_size = (n_storage_units % N_STORAGE_UNITS_PER_BUFFER) * sizeof(struct kd_storage); - if (p_buffer_size) + if (p_buffer_size) { n_storage_buffers++; + } kd_bufs = NULL; if (kdcopybuf == 0) { - if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) { + if (kmem_alloc(kernel_map, (vm_offset_t *)&kdcopybuf, (vm_size_t)KDCOPYBUF_SIZE, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) { error = ENOSPC; goto out; } @@ -735,8 +748,8 @@ create_buffers(boolean_t early_trace) for (i = 0; i < n_storage_buffers; i++) { struct kd_storage *kds; - int n_elements; - int n; + int n_elements; + int n; n_elements = kd_bufs[i].kdsb_size / sizeof(struct kd_storage); kds = kd_bufs[i].kdsb_addr; @@ -759,23 +772,24 @@ create_buffers(boolean_t early_trace) kdbip[i].kd_lostevents = FALSE; kdbip[i].num_bufs = 0; } - + kd_ctrl_page.kdebug_flags |= KDBG_BUFINIT; kd_ctrl_page.kds_inuse_count = 0; n_storage_threshold = n_storage_units / 2; out: - if (error) + if (error) { delete_buffers(); + } - return(error); + return error; } static void delete_buffers(void) { unsigned int i; - + if (kd_bufs) { for (i = 0; i < n_storage_buffers; i++) { if (kd_bufs[i].kdsb_addr) { @@ -796,10 +810,10 @@ delete_buffers(void) if (kdbip) { kmem_free(kernel_map, (vm_offset_t)kdbip, sizeof(struct kd_bufinfo) * kd_ctrl_page.kdebug_cpus); - + kdbip = NULL; } - kd_ctrl_page.kdebug_iops = NULL; + kd_ctrl_page.kdebug_iops = NULL; kd_ctrl_page.kdebug_cpus = 0; kd_ctrl_page.kdebug_flags &= ~KDBG_BUFINIT; } @@ -808,14 +822,14 @@ void release_storage_unit(int cpu, uint32_t kdsp_raw) { int s = 0; - struct kd_storage *kdsp_actual; + struct kd_storage *kdsp_actual; struct kd_bufinfo *kdbp; union kds_ptr kdsp; kdsp.raw = kdsp_raw; s = ml_set_interrupts_enabled(FALSE); - lck_spin_lock(kds_spin_lock); + lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp); kdbp = &kdbip[cpu]; @@ -824,7 +838,7 @@ release_storage_unit(int cpu, uint32_t kdsp_raw) * it's possible for the storage unit pointed to * by kdsp to have already been stolen... so * check to see if it's still the head of the list - * now that we're behind the lock that protects + * now that we're behind the lock that protects * adding and removing from the queue... * since we only ever release and steal units from * that position, if it's no longer the head @@ -854,7 +868,7 @@ allocate_storage_unit(int cpu) int s = 0; s = ml_set_interrupts_enabled(FALSE); - lck_spin_lock(kds_spin_lock); + lck_spin_lock_grp(kds_spin_lock, kdebug_lck_grp); kdbp = &kdbip[cpu]; @@ -862,8 +876,9 @@ allocate_storage_unit(int cpu) if (kdbp->kd_list_tail.raw != KDS_PTR_NULL) { kdsp_actual = POINTER_FROM_KDS_PTR(kdbp->kd_list_tail); - if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT) + if (kdsp_actual->kds_bufindx < EVENTS_PER_STORAGE_UNIT) { goto out; + } } if ((kdsp = kd_ctrl_page.kds_free_list).raw != KDS_PTR_NULL) { @@ -889,7 +904,6 @@ allocate_storage_unit(int cpu) oldest_ts = UINT64_MAX; for (kdbp_try = &kdbip[0]; kdbp_try < &kdbip[kd_ctrl_page.kdebug_cpus]; kdbp_try++) { - if (kdbp_try->kd_list_head.raw == KDS_PTR_NULL) { /* * no storage unit to steal @@ -937,8 +951,9 @@ allocate_storage_unit(int cpu) if (kdbp_vict->kd_list_head.raw != KDS_PTR_NULL) { kdsp_next_actual = POINTER_FROM_KDS_PTR(kdbp_vict->kd_list_head); kdsp_next_actual->kds_lostevents = TRUE; - } else + } else { kdbp_vict->kd_lostevents = TRUE; + } if (kd_ctrl_page.oldest_time < oldest_ts) { kd_ctrl_page.oldest_time = oldest_ts; @@ -947,23 +962,24 @@ allocate_storage_unit(int cpu) } kdsp_actual->kds_timestamp = kdbg_timestamp(); kdsp_actual->kds_next.raw = KDS_PTR_NULL; - kdsp_actual->kds_bufcnt = 0; + kdsp_actual->kds_bufcnt = 0; kdsp_actual->kds_readlast = 0; kdsp_actual->kds_lostevents = kdbp->kd_lostevents; kdbp->kd_lostevents = FALSE; kdsp_actual->kds_bufindx = 0; - if (kdbp->kd_list_head.raw == KDS_PTR_NULL) + if (kdbp->kd_list_head.raw == KDS_PTR_NULL) { kdbp->kd_list_head = kdsp; - else + } else { POINTER_FROM_KDS_PTR(kdbp->kd_list_tail)->kds_next = kdsp; + } kdbp->kd_list_tail = kdsp; out: lck_spin_unlock(kds_spin_lock); ml_set_interrupts_enabled(s); - return (retval); + return retval; } int @@ -972,7 +988,7 @@ kernel_debug_register_callback(kd_callback_t callback) kd_iop_t* iop; if (kmem_alloc(kernel_map, (vm_offset_t *)&iop, sizeof(kd_iop_t), VM_KERN_MEMORY_DIAG) == KERN_SUCCESS) { memcpy(&iop->callback, &callback, sizeof(kd_callback_t)); - + /* * Some IOP clients are not providing a name. * @@ -980,24 +996,26 @@ kernel_debug_register_callback(kd_callback_t callback) */ { boolean_t is_valid_name = FALSE; - for (uint32_t length=0; length 0x20 && callback.iop_name[length] < 0x7F) + if (callback.iop_name[length] > 0x20 && callback.iop_name[length] < 0x7F) { continue; + } if (callback.iop_name[length] == 0) { - if (length) + if (length) { is_valid_name = TRUE; + } break; } } - + if (!is_valid_name) { strlcpy(iop->callback.iop_name, "IOP-???", sizeof(iop->callback.iop_name)); } } iop->last_timestamp = 0; - + do { /* * We use two pieces of state, the old list head @@ -1008,7 +1026,7 @@ kernel_debug_register_callback(kd_callback_t callback) * TLDR; Must not read kd_iops more than once per loop. */ iop->next = kd_iops; - iop->cpu_id = iop->next ? (iop->next->cpu_id+1) : kdbg_cpu_count(FALSE); + iop->cpu_id = iop->next ? (iop->next->cpu_id + 1) : kdbg_cpu_count(FALSE); /* * Header says OSCompareAndSwapPtr has a memory barrier @@ -1023,43 +1041,44 @@ kernel_debug_register_callback(kd_callback_t callback) void kernel_debug_enter( - uint32_t coreid, - uint32_t debugid, - uint64_t timestamp, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uintptr_t threadid + uint32_t coreid, + uint32_t debugid, + uint64_t timestamp, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, + uintptr_t threadid ) { - uint32_t bindx; - kd_buf *kd; + uint32_t bindx; + kd_buf *kd; struct kd_bufinfo *kdbp; struct kd_storage *kdsp_actual; union kds_ptr kds_raw; if (kd_ctrl_page.kdebug_slowcheck) { - - if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT))) + if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT))) { goto out1; - + } + if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) { - if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid)) + if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid)) { goto record_event; + } goto out1; - } - else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) { - if (debugid >= kdlog_beg && debugid <= kdlog_end) + } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) { + if (debugid >= kdlog_beg && debugid <= kdlog_end) { goto record_event; + } goto out1; - } - else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) { + } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) { if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value2 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value3 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value4) + (debugid & KDBG_EVENTID_MASK) != kdlog_value2 && + (debugid & KDBG_EVENTID_MASK) != kdlog_value3 && + (debugid & KDBG_EVENTID_MASK) != kdlog_value4) { goto out1; + } } } @@ -1070,29 +1089,31 @@ record_event: #if CONFIG_EMBEDDED /* - * When start_kern_tracing is called by the kernel to trace very - * early kernel events, it saves data to a secondary buffer until - * it is possible to initialize ktrace, and then dumps the events - * into the ktrace buffer using this method. In this case, iops will - * be NULL, and the coreid will be zero. It is not possible to have - * a valid IOP coreid of zero, so pass if both iops is NULL and coreid - * is zero. - */ + * When start_kern_tracing is called by the kernel to trace very + * early kernel events, it saves data to a secondary buffer until + * it is possible to initialize ktrace, and then dumps the events + * into the ktrace buffer using this method. In this case, iops will + * be NULL, and the coreid will be zero. It is not possible to have + * a valid IOP coreid of zero, so pass if both iops is NULL and coreid + * is zero. + */ assert(kdbg_iop_list_contains_cpu_id(kd_ctrl_page.kdebug_iops, coreid) || (kd_ctrl_page.kdebug_iops == NULL && coreid == 0)); #endif disable_preemption(); - if (kd_ctrl_page.enabled == 0) + if (kd_ctrl_page.enabled == 0) { goto out; + } kdbp = &kdbip[coreid]; timestamp &= KDBG_TIMESTAMP_MASK; #if KDEBUG_MOJO_TRACE - if (kdebug_enable & KDEBUG_ENABLE_SERIAL) + if (kdebug_enable & KDEBUG_ENABLE_SERIAL) { kdebug_serial_print(coreid, debugid, timestamp, - arg1, arg2, arg3, arg4, threadid); + arg1, arg2, arg3, arg4, threadid); + } #endif retry_q: @@ -1105,7 +1126,7 @@ retry_q: kdsp_actual = NULL; bindx = EVENTS_PER_STORAGE_UNIT; } - + if (kdsp_actual == NULL || bindx >= EVENTS_PER_STORAGE_UNIT) { if (allocate_storage_unit(coreid) == FALSE) { /* @@ -1116,12 +1137,14 @@ retry_q: } goto retry_q; } - if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) + if (!OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) { goto retry_q; + } // IOP entries can be allocated before xnu allocates and inits the buffer - if (timestamp < kdsp_actual->kds_timestamp) + if (timestamp < kdsp_actual->kds_timestamp) { kdsp_actual->kds_timestamp = timestamp; + } kd = &kdsp_actual->kds_records[bindx]; @@ -1131,7 +1154,7 @@ retry_q: kd->arg3 = arg3; kd->arg4 = arg4; kd->arg5 = threadid; - + kdbg_set_timestamp_and_cpu(kd, timestamp, coreid); OSAddAtomic(1, &kdsp_actual->kds_bufcnt); @@ -1152,7 +1175,7 @@ static inline bool kdebug_debugid_procfilt_allowed(uint32_t debugid) { uint32_t procfilt_flags = kd_ctrl_page.kdebug_flags & - (KDBG_PIDCHECK | KDBG_PIDEXCLUDE); + (KDBG_PIDCHECK | KDBG_PIDEXCLUDE); if (!procfilt_flags) { return true; @@ -1162,7 +1185,7 @@ kdebug_debugid_procfilt_allowed(uint32_t debugid) * DBG_TRACE and MACH_SCHED tracepoints ignore the process filter. */ if ((debugid & 0xffff0000) == MACHDBG_CODE(DBG_MACH_SCHED, 0) || - (debugid >> 24 == DBG_TRACE)) { + (debugid >> 24 == DBG_TRACE)) { return true; } @@ -1212,42 +1235,44 @@ kernel_debug_internal( if (kd_ctrl_page.kdebug_slowcheck) { if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) || - !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT))) - { + !(kdebug_enable & (KDEBUG_ENABLE_TRACE | KDEBUG_ENABLE_PPT))) { goto out1; } if (!ml_at_interrupt_context() && observe_procfilt && - !kdebug_debugid_procfilt_allowed(debugid)) { + !kdebug_debugid_procfilt_allowed(debugid)) { goto out1; } if (kd_ctrl_page.kdebug_flags & KDBG_TYPEFILTER_CHECK) { - if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid)) + if (typefilter_is_debugid_allowed(kdbg_typefilter, debugid)) { goto record_event; + } goto out1; } else if (only_filter) { goto out1; - } - else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) { + } else if (kd_ctrl_page.kdebug_flags & KDBG_RANGECHECK) { /* Always record trace system info */ - if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) + if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) { goto record_event; + } - if (debugid < kdlog_beg || debugid > kdlog_end) + if (debugid < kdlog_beg || debugid > kdlog_end) { goto out1; - } - else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) { + } + } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) { /* Always record trace system info */ - if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) + if (KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE) { goto record_event; + } if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 && (debugid & KDBG_EVENTID_MASK) != kdlog_value2 && (debugid & KDBG_EVENTID_MASK) != kdlog_value3 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value4) + (debugid & KDBG_EVENTID_MASK) != kdlog_value4) { goto out1; + } } } else if (only_filter) { goto out1; @@ -1256,17 +1281,19 @@ kernel_debug_internal( record_event: disable_preemption(); - if (kd_ctrl_page.enabled == 0) + if (kd_ctrl_page.enabled == 0) { goto out; + } cpu = cpu_number(); kdbp = &kdbip[cpu]; #if KDEBUG_MOJO_TRACE - if (kdebug_enable & KDEBUG_ENABLE_SERIAL) + if (kdebug_enable & KDEBUG_ENABLE_SERIAL) { kdebug_serial_print(cpu, debugid, - kdbg_timestamp() & KDBG_TIMESTAMP_MASK, - arg1, arg2, arg3, arg4, arg5); + kdbg_timestamp() & KDBG_TIMESTAMP_MASK, + arg1, arg2, arg3, arg4, arg5); + } #endif retry_q: @@ -1293,8 +1320,9 @@ retry_q: now = kdbg_timestamp() & KDBG_TIMESTAMP_MASK; - if ( !OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) + if (!OSCompareAndSwap(bindx, bindx + 1, &kdsp_actual->kds_bufindx)) { goto retry_q; + } kd = &kdsp_actual->kds_records[bindx]; @@ -1316,8 +1344,8 @@ out: enable_preemption(); out1: if (kds_waiter && kd_ctrl_page.kds_inuse_count >= n_storage_threshold) { - uint32_t etype; - uint32_t stype; + uint32_t etype; + uint32_t stype; etype = debugid & KDBG_EVENTID_MASK; stype = debugid & KDBG_CSC_MASK; @@ -1331,25 +1359,25 @@ out1: void kernel_debug( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, __unused uintptr_t arg5) { kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, - (uintptr_t)thread_tid(current_thread()), 0); + (uintptr_t)thread_tid(current_thread()), 0); } void kernel_debug1( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uintptr_t arg5) + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, + uintptr_t arg5) { kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, arg5, 0); } @@ -1364,7 +1392,7 @@ kernel_debug_flags( uint64_t flags) { kernel_debug_internal(debugid, arg1, arg2, arg3, arg4, - (uintptr_t)thread_tid(current_thread()), flags); + (uintptr_t)thread_tid(current_thread()), flags); } void @@ -1413,9 +1441,9 @@ kernel_debug_string_simple(uint32_t eventid, const char *str) } kernel_debug_internal(debugid, str_buf[0], - str_buf[1], - str_buf[2], - str_buf[3], thread_id, 0); + str_buf[1], + str_buf[2], + str_buf[3], thread_id, 0); debugid &= KDBG_EVENTID_MASK; int i = 4; @@ -1427,25 +1455,37 @@ kernel_debug_string_simple(uint32_t eventid, const char *str) debugid |= DBG_FUNC_END; } kernel_debug_internal(debugid, str_buf[i], - str_buf[i + 1], - str_buf[i + 2], - str_buf[i + 3], thread_id, 0); + str_buf[i + 1], + str_buf[i + 2], + str_buf[i + 3], thread_id, 0); } } -extern int master_cpu; /* MACH_KERNEL_PRIVATE */ +extern int master_cpu; /* MACH_KERNEL_PRIVATE */ /* * Used prior to start_kern_tracing() being called. * Log temporarily into a static buffer. */ void kernel_debug_early( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4) + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4) { +#if defined(__x86_64__) + extern int early_boot; + /* + * Note that "early" isn't early enough in some cases where + * we're invoked before gsbase is set on x86, hence the + * check of "early_boot". + */ + if (early_boot) { + return; + } +#endif + /* If early tracing is over, use the normal path. */ if (kd_early_done) { KERNEL_DEBUG_CONSTANT(debugid, arg1, arg2, arg3, arg4, 0); @@ -1486,23 +1526,23 @@ kernel_debug_early_end(void) #if !CONFIG_EMBEDDED /* Fake sentinel marking the start of kernel time relative to TSC */ kernel_debug_enter(0, - TRACE_TIMESTAMPS, - 0, - (uint32_t)(tsc_rebase_abs_time >> 32), - (uint32_t)tsc_rebase_abs_time, - tsc_at_boot, - 0, - 0); + TRACE_TIMESTAMPS, + 0, + (uint32_t)(tsc_rebase_abs_time >> 32), + (uint32_t)tsc_rebase_abs_time, + tsc_at_boot, + 0, + 0); #endif for (unsigned int i = 0; i < kd_early_index; i++) { kernel_debug_enter(0, - kd_early_buffer[i].debugid, - kd_early_buffer[i].timestamp, - kd_early_buffer[i].arg1, - kd_early_buffer[i].arg2, - kd_early_buffer[i].arg3, - kd_early_buffer[i].arg4, - 0); + kd_early_buffer[i].debugid, + kd_early_buffer[i].timestamp, + kd_early_buffer[i].arg1, + kd_early_buffer[i].arg2, + kd_early_buffer[i].arg3, + kd_early_buffer[i].arg4, + 0); } /* Cut events-lost event on overflow */ @@ -1534,8 +1574,8 @@ kdebug_validate_debugid(uint32_t debugid) debugid_class = KDBG_EXTRACT_CLASS(debugid); switch (debugid_class) { - case DBG_TRACE: - return EPERM; + case DBG_TRACE: + return EPERM; } return 0; @@ -1546,8 +1586,8 @@ kdebug_validate_debugid(uint32_t debugid) */ int kdebug_typefilter(__unused struct proc* p, - struct kdebug_typefilter_args* uap, - __unused int *retval) + struct kdebug_typefilter_args* uap, + __unused int *retval) { int ret = KERN_SUCCESS; @@ -1594,19 +1634,19 @@ kdebug_typefilter(__unused struct proc* p, vm_map_t user_map = current_map(); ret = mach_to_bsd_errno( - mach_vm_map_kernel(user_map, // target map - &user_addr, // [in, out] target address - TYPEFILTER_ALLOC_SIZE, // initial size - 0, // mask (alignment?) - VM_FLAGS_ANYWHERE, // flags - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - kdbg_typefilter_memory_entry, // port (memory entry!) - 0, // offset (in memory entry) - FALSE, // should copy - VM_PROT_READ, // cur_prot - VM_PROT_READ, // max_prot - VM_INHERIT_SHARE)); // inherit behavior on fork + mach_vm_map_kernel(user_map, // target map + &user_addr, // [in, out] target address + TYPEFILTER_ALLOC_SIZE, // initial size + 0, // mask (alignment?) + VM_FLAGS_ANYWHERE, // flags + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + kdbg_typefilter_memory_entry, // port (memory entry!) + 0, // offset (in memory entry) + FALSE, // should copy + VM_PROT_READ, // cur_prot + VM_PROT_READ, // max_prot + VM_INHERIT_SHARE)); // inherit behavior on fork if (ret == KERN_SUCCESS) { vm_size_t user_ptr_size = vm_map_is_64bit(user_map) ? 8 : 4; @@ -1645,22 +1685,24 @@ kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval) * enabled. This is to match the userspace wrapper behavior, which is optimizing * for non-error case performance. */ -int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval) +int +kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval) { int err; - if ( __probable(kdebug_enable == 0) ) - return(0); + if (__probable(kdebug_enable == 0)) { + return 0; + } if ((err = kdebug_validate_debugid(uap->code)) != 0) { return err; } kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, - (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, - (uintptr_t)thread_tid(current_thread()), 0); + (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, + (uintptr_t)thread_tid(current_thread()), 0); - return(0); + return 0; } /* @@ -1689,7 +1731,7 @@ int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __u */ static uint64_t kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr, - size_t str_len) + size_t str_len) { /* str must be word-aligned */ uintptr_t *str = vstr; @@ -1697,14 +1739,14 @@ kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr, uintptr_t thread_id; int i; uint32_t trace_debugid = TRACEDBG_CODE(DBG_TRACE_STRING, - TRACE_STRING_GLOBAL); + TRACE_STRING_GLOBAL); thread_id = (uintptr_t)thread_tid(current_thread()); /* if the ID is being invalidated, just emit that */ if (str_id != 0 && str_len == 0) { kernel_debug_internal(trace_debugid | DBG_FUNC_START | DBG_FUNC_END, - (uintptr_t)debugid, (uintptr_t)str_id, 0, 0, thread_id, 0); + (uintptr_t)debugid, (uintptr_t)str_id, 0, 0, thread_id, 0); return str_id; } @@ -1721,7 +1763,7 @@ kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr, } kernel_debug_internal(trace_debugid, (uintptr_t)debugid, (uintptr_t)str_id, - str[0], str[1], thread_id, 0); + str[0], str[1], thread_id, 0); trace_debugid &= KDBG_EVENTID_MASK; i = 2; @@ -1732,9 +1774,9 @@ kernel_debug_string_internal(uint32_t debugid, uint64_t str_id, void *vstr, trace_debugid |= DBG_FUNC_END; } kernel_debug_internal(trace_debugid, str[i], - str[i + 1], - str[i + 2], - str[i + 3], thread_id, 0); + str[i + 1], + str[i + 2], + str[i + 3], thread_id, 0); } return str_id; @@ -1755,8 +1797,7 @@ kdebug_current_proc_enabled(uint32_t debugid) /* always emit trace system and scheduling events */ if ((KDBG_EXTRACT_CLASS(debugid) == DBG_TRACE || - (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0))) - { + (debugid & KDBG_CSC_MASK) == MACHDBG_CODE(DBG_MACH_SCHED, 0))) { return TRUE; } @@ -1803,10 +1844,9 @@ kdebug_debugid_explicitly_enabled(uint32_t debugid) } } else if (kd_ctrl_page.kdebug_flags & KDBG_VALCHECK) { if ((debugid & KDBG_EVENTID_MASK) != kdlog_value1 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value2 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value3 && - (debugid & KDBG_EVENTID_MASK) != kdlog_value4) - { + (debugid & KDBG_EVENTID_MASK) != kdlog_value2 && + (debugid & KDBG_EVENTID_MASK) != kdlog_value3 && + (debugid & KDBG_EVENTID_MASK) != kdlog_value4) { return FALSE; } } @@ -1879,7 +1919,7 @@ kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str) memset(str_buf, 0, sizeof(str_buf)); len_copied = strlcpy(str_buf, str, MAX_STR_LEN + 1); *str_id = kernel_debug_string_internal(debugid, *str_id, str_buf, - len_copied); + len_copied); return 0; } @@ -1888,8 +1928,8 @@ kernel_debug_string(uint32_t debugid, uint64_t *str_id, const char *str) */ int kdebug_trace_string(__unused struct proc *p, - struct kdebug_trace_string_args *uap, - uint64_t *retval) + struct kdebug_trace_string_args *uap, + uint64_t *retval) { __attribute__((aligned(sizeof(uintptr_t)))) char str_buf[STR_BUF_SIZE]; static_assert(sizeof(str_buf) > MAX_STR_LEN); @@ -1918,7 +1958,7 @@ kdebug_trace_string(__unused struct proc *p, } *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, - NULL, 0); + NULL, 0); return 0; } @@ -1940,7 +1980,7 @@ kdebug_trace_string(__unused struct proc *p, len_copied--; *retval = kernel_debug_string_internal(uap->debugid, uap->str_id, str_buf, - len_copied); + len_copied); return 0; } @@ -1948,7 +1988,6 @@ static void kdbg_lock_init(void) { static lck_grp_attr_t *kdebug_lck_grp_attr = NULL; - static lck_grp_t *kdebug_lck_grp = NULL; static lck_attr_t *kdebug_lck_attr = NULL; if (kd_ctrl_page.kdebug_flags & KDBG_LOCKINIT) { @@ -1971,7 +2010,7 @@ kdbg_bootstrap(boolean_t early_trace) { kd_ctrl_page.kdebug_flags &= ~KDBG_WRAPPED; - return (create_buffers(early_trace)); + return create_buffers(early_trace); } int @@ -2001,20 +2040,20 @@ kdbg_reinit(boolean_t early_trace) RAW_file_offset = 0; RAW_file_written = 0; - return(ret); + return ret; } void kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid) { - if (!proc) { + if (!proc) { *arg_pid = 0; - *arg_uniqueid = 0; - } else { + *arg_uniqueid = 0; + } else { *arg_pid = proc->p_pid; *arg_uniqueid = proc->p_uniqueid; - if ((uint64_t) *arg_uniqueid != proc->p_uniqueid) { - *arg_uniqueid = 0; + if ((uint64_t) *arg_uniqueid != proc->p_uniqueid) { + *arg_uniqueid = 0; } } } @@ -2023,7 +2062,7 @@ kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid) void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *arg4) { - char *dbg_nameptr; + char *dbg_nameptr; int dbg_namelen; long dbg_parms[4]; @@ -2039,20 +2078,21 @@ kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, long *a */ dbg_nameptr = proc->p_comm; dbg_namelen = (int)strlen(proc->p_comm); - dbg_parms[0]=0L; - dbg_parms[1]=0L; - dbg_parms[2]=0L; - dbg_parms[3]=0L; - - if(dbg_namelen > (int)sizeof(dbg_parms)) + dbg_parms[0] = 0L; + dbg_parms[1] = 0L; + dbg_parms[2] = 0L; + dbg_parms[3] = 0L; + + if (dbg_namelen > (int)sizeof(dbg_parms)) { dbg_namelen = (int)sizeof(dbg_parms); - + } + strncpy((char *)dbg_parms, dbg_nameptr, dbg_namelen); - *arg1=dbg_parms[0]; - *arg2=dbg_parms[1]; - *arg3=dbg_parms[2]; - *arg4=dbg_parms[3]; + *arg1 = dbg_parms[0]; + *arg2 = dbg_parms[1]; + *arg3 = dbg_parms[2]; + *arg4 = dbg_parms[3]; } static void @@ -2065,16 +2105,17 @@ kdbg_resolve_map(thread_t th_act, void *opaque) mapptr = &t->map[t->count]; mapptr->thread = (uintptr_t)thread_tid(th_act); - (void) strlcpy (mapptr->command, t->atts->task_comm, - sizeof(t->atts->task_comm)); + (void) strlcpy(mapptr->command, t->atts->task_comm, + sizeof(t->atts->task_comm)); /* * Some kernel threads have no associated pid. * We still need to mark the entry as valid. */ - if (t->atts->pid) + if (t->atts->pid) { mapptr->valid = t->atts->pid; - else + } else { mapptr->valid = 1; + } t->count++; } @@ -2116,7 +2157,7 @@ kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, uint32_t bytes_needed = sizeof(kd_cpumap_header) + cpu_count * sizeof(kd_cpumap); uint32_t bytes_available = *cpumap_size; *cpumap_size = bytes_needed; - + if (*cpumap == NULL) { if (kmem_alloc(kernel_map, (vm_offset_t*)cpumap, (vm_size_t)*cpumap_size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) { return ENOMEM; @@ -2138,11 +2179,11 @@ kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, cpus[index].cpu_id = iops->cpu_id; cpus[index].flags = KDBG_CPUMAP_IS_IOP; strlcpy(cpus[index].name, iops->callback.iop_name, sizeof(cpus->name)); - + iops = iops->next; index--; } - + while (index >= 0) { cpus[index].cpu_id = index; cpus[index].flags = 0; @@ -2150,7 +2191,7 @@ kdbg_cpumap_init_internal(kd_iop_t* iops, uint32_t cpu_count, uint8_t** cpumap, index--; } - + return KERN_SUCCESS; } @@ -2190,7 +2231,7 @@ kdbg_thrmap_init_internal(unsigned int count, unsigned int *mapsize, unsigned in /* * The proc count could change during buffer allocation, * so introduce a small fudge factor to bump up the - * buffer sizes. This gives new tasks some chance of + * buffer sizes. This gives new tasks some chance of * making into the tables. Bump up by 25%. */ *mapcount += *mapcount / 4; @@ -2337,16 +2378,16 @@ int kdbg_setpid(kd_regtype *kdr) { pid_t pid; - int flag, ret=0; + int flag, ret = 0; struct proc *p; pid = (pid_t)kdr->value1; flag = (int)kdr->value2; if (pid >= 0) { - if ((p = proc_find(pid)) == NULL) + if ((p = proc_find(pid)) == NULL) { ret = ESRCH; - else { + } else { if (flag == 1) { /* * turn on pid check for this and all pids @@ -2362,16 +2403,16 @@ kdbg_setpid(kd_regtype *kdr) * Don't turn off all pid checking though * * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDCHECK; - */ + */ p->p_kdebug = 0; } proc_rele(p); } - } - else + } else { ret = EINVAL; + } - return(ret); + return ret; } /* This is for pid exclusion in the trace buffer */ @@ -2379,16 +2420,16 @@ int kdbg_setpidex(kd_regtype *kdr) { pid_t pid; - int flag, ret=0; + int flag, ret = 0; struct proc *p; pid = (pid_t)kdr->value1; flag = (int)kdr->value2; if (pid >= 0) { - if ((p = proc_find(pid)) == NULL) + if ((p = proc_find(pid)) == NULL) { ret = ESRCH; - else { + } else { if (flag == 1) { /* * turn on pid exclusion @@ -2398,22 +2439,22 @@ kdbg_setpidex(kd_regtype *kdr) kdbg_set_flags(SLOW_CHECKS, 0, TRUE); p->p_kdebug = 1; - } - else { + } else { /* * turn off pid exclusion for this pid value * Don't turn off all pid exclusion though * * kd_ctrl_page.kdebug_flags &= ~KDBG_PIDEXCLUDE; - */ + */ p->p_kdebug = 0; } proc_rele(p); } - } else + } else { ret = EINVAL; + } - return(ret); + return ret; } /* @@ -2477,7 +2518,7 @@ kdbg_copyin_typefilter(user_addr_t addr, size_t size) * * Allocating a typefilter for the copyin allows * the kernel to hold the invariant that DBG_TRACE - * must always be allowed. + * must always be allowed. */ if (!kdbg_typefilter) { if ((ret = kdbg_initialize_typefilter(tf))) { @@ -2492,8 +2533,9 @@ kdbg_copyin_typefilter(user_addr_t addr, size_t size) kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter); } - if (tf) + if (tf) { typefilter_deallocate(tf); + } } return ret; @@ -2539,7 +2581,7 @@ kdbg_disable_typefilter(void) */ typefilter_allow_all(kdbg_typefilter); kdbg_iop_list_callback(kd_ctrl_page.kdebug_iops, - KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter); + KD_CALLBACK_TYPEFILTER_CHANGED, kdbg_typefilter); } } @@ -2560,32 +2602,31 @@ kdebug_commpage_state(void) int kdbg_setreg(kd_regtype * kdr) { - int ret=0; + int ret = 0; unsigned int val_1, val_2, val; switch (kdr->type) { - - case KDBG_CLASSTYPE : + case KDBG_CLASSTYPE: val_1 = (kdr->value1 & 0xff); val_2 = (kdr->value2 & 0xff); - kdlog_beg = (val_1<<24); - kdlog_end = (val_2<<24); + kdlog_beg = (val_1 << 24); + kdlog_end = (val_2 << 24); kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES; kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_CLASSTYPE); kdbg_set_flags(SLOW_CHECKS, 0, TRUE); break; - case KDBG_SUBCLSTYPE : + case KDBG_SUBCLSTYPE: val_1 = (kdr->value1 & 0xff); val_2 = (kdr->value2 & 0xff); val = val_2 + 1; - kdlog_beg = ((val_1<<24) | (val_2 << 16)); - kdlog_end = ((val_1<<24) | (val << 16)); + kdlog_beg = ((val_1 << 24) | (val_2 << 16)); + kdlog_end = ((val_1 << 24) | (val << 16)); kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES; kd_ctrl_page.kdebug_flags &= ~KDBG_VALCHECK; /* Turn off specific value check */ kd_ctrl_page.kdebug_flags |= (KDBG_RANGECHECK | KDBG_SUBCLSTYPE); kdbg_set_flags(SLOW_CHECKS, 0, TRUE); break; - case KDBG_RANGETYPE : + case KDBG_RANGETYPE: kdlog_beg = (kdr->value1); kdlog_end = (kdr->value2); kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES; @@ -2603,31 +2644,32 @@ kdbg_setreg(kd_regtype * kdr) kd_ctrl_page.kdebug_flags |= KDBG_VALCHECK; /* Turn on specific value check */ kdbg_set_flags(SLOW_CHECKS, 0, TRUE); break; - case KDBG_TYPENONE : + case KDBG_TYPENONE: kd_ctrl_page.kdebug_flags &= (unsigned int)~KDBG_CKTYPES; - if ( (kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | - KDBG_PIDCHECK | KDBG_PIDEXCLUDE | - KDBG_TYPEFILTER_CHECK)) ) + if ((kd_ctrl_page.kdebug_flags & (KDBG_RANGECHECK | KDBG_VALCHECK | + KDBG_PIDCHECK | KDBG_PIDEXCLUDE | + KDBG_TYPEFILTER_CHECK))) { kdbg_set_flags(SLOW_CHECKS, 0, TRUE); - else + } else { kdbg_set_flags(SLOW_CHECKS, 0, FALSE); + } kdlog_beg = 0; kdlog_end = 0; break; - default : + default: ret = EINVAL; break; } - return(ret); + return ret; } static int kdbg_write_to_vnode(caddr_t buffer, size_t size, vnode_t vp, vfs_context_t ctx, off_t file_offset) { - return vn_rdwr(UIO_WRITE, vp, buffer, size, file_offset, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, - vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + return vn_rdwr(UIO_WRITE, vp, buffer, size, file_offset, UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, + vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); } int @@ -2652,8 +2694,7 @@ kdbg_write_v3_chunk_header(user_addr_t buffer, uint32_t tag, uint32_t sub_tag, u goto write_error; } RAW_file_offset += (sizeof(kd_chunk_header_v3)); - } - else { + } else { ret = copyout(&header, buffer, sizeof(kd_chunk_header_v3)); if (ret) { goto write_error; @@ -2679,7 +2720,7 @@ kdbg_write_v3_chunk_header_to_buffer(void * buffer, uint32_t tag, uint32_t sub_t memcpy(buffer, &header, sizeof(kd_chunk_header_v3)); - return (sizeof(kd_chunk_header_v3)); + return sizeof(kd_chunk_header_v3); } int @@ -2692,7 +2733,7 @@ kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void p = current_proc(); proc_fdlock(p); - if ( (fp_lookup(p, fd, &fp, 1)) ) { + if ((fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); return EFAULT; } @@ -2708,7 +2749,7 @@ kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void vp = (struct vnode *) fp->f_fglob->fg_data; proc_fdunlock(p); - if ( (vnode_getwithref(vp)) == 0 ) { + if ((vnode_getwithref(vp)) == 0) { RAW_file_offset = fp->f_fglob->fg_offset; kd_chunk_header_v3 chunk_header = { @@ -2738,180 +2779,176 @@ kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void user_addr_t kdbg_write_v3_event_chunk_header(user_addr_t buffer, uint32_t tag, uint64_t length, vnode_t vp, vfs_context_t ctx) { - uint64_t future_chunk_timestamp = 0; - length += sizeof(uint64_t); - - if (kdbg_write_v3_chunk_header(buffer, tag, V3_EVENT_DATA_VERSION, length, vp, ctx)) { - return 0; - } - if (buffer) { - buffer += sizeof(kd_chunk_header_v3); - } - - // Check that only one of them is valid - assert(!buffer ^ !vp); - assert((vp == NULL) || (ctx != NULL)); - - // Write the 8-byte future_chunk_timestamp field in the payload - if (buffer || vp) { - if (vp) { - int ret = kdbg_write_to_vnode((caddr_t)&future_chunk_timestamp, sizeof(uint64_t), vp, ctx, RAW_file_offset); - if (!ret) { - RAW_file_offset += (sizeof(uint64_t)); - } - } - else { - if (copyout(&future_chunk_timestamp, buffer, sizeof(uint64_t))) { - return 0; - } - } - } - - return (buffer + sizeof(uint64_t)); + uint64_t future_chunk_timestamp = 0; + length += sizeof(uint64_t); + + if (kdbg_write_v3_chunk_header(buffer, tag, V3_EVENT_DATA_VERSION, length, vp, ctx)) { + return 0; + } + if (buffer) { + buffer += sizeof(kd_chunk_header_v3); + } + + // Check that only one of them is valid + assert(!buffer ^ !vp); + assert((vp == NULL) || (ctx != NULL)); + + // Write the 8-byte future_chunk_timestamp field in the payload + if (buffer || vp) { + if (vp) { + int ret = kdbg_write_to_vnode((caddr_t)&future_chunk_timestamp, sizeof(uint64_t), vp, ctx, RAW_file_offset); + if (!ret) { + RAW_file_offset += (sizeof(uint64_t)); + } + } else { + if (copyout(&future_chunk_timestamp, buffer, sizeof(uint64_t))) { + return 0; + } + } + } + + return buffer + sizeof(uint64_t); } int kdbg_write_v3_header(user_addr_t user_header, size_t *user_header_size, int fd) { - int ret = KERN_SUCCESS; - - uint8_t* cpumap = 0; - uint32_t cpumap_size = 0; - uint32_t thrmap_size = 0; - - size_t bytes_needed = 0; - - // Check that only one of them is valid - assert(!user_header ^ !fd); - assert(user_header_size); - - if ( !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) ) { - ret = EINVAL; - goto bail; - } - - if ( !(user_header || fd) ) { - ret = EINVAL; - goto bail; - } - - // Initialize the cpu map - ret = kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size); - if (ret != KERN_SUCCESS) { - goto bail; - } - - // Check if a thread map is initialized - if ( !kd_mapptr ) { - ret = EINVAL; - goto bail; - } - thrmap_size = kd_mapcount * sizeof(kd_threadmap); - - mach_timebase_info_data_t timebase = {0, 0}; - clock_timebase_info(&timebase); - - // Setup the header. - // See v3 header description in sys/kdebug.h for more inforamtion. - kd_header_v3 header = { - .tag = RAW_VERSION3, - .sub_tag = V3_HEADER_VERSION, - .length = (sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header)), - .timebase_numer = timebase.numer, - .timebase_denom = timebase.denom, - .timestamp = 0, /* FIXME rdar://problem/22053009 */ - .walltime_secs = 0, - .walltime_usecs = 0, - .timezone_minuteswest = 0, - .timezone_dst = 0, + int ret = KERN_SUCCESS; + + uint8_t* cpumap = 0; + uint32_t cpumap_size = 0; + uint32_t thrmap_size = 0; + + size_t bytes_needed = 0; + + // Check that only one of them is valid + assert(!user_header ^ !fd); + assert(user_header_size); + + if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT)) { + ret = EINVAL; + goto bail; + } + + if (!(user_header || fd)) { + ret = EINVAL; + goto bail; + } + + // Initialize the cpu map + ret = kdbg_cpumap_init_internal(kd_ctrl_page.kdebug_iops, kd_ctrl_page.kdebug_cpus, &cpumap, &cpumap_size); + if (ret != KERN_SUCCESS) { + goto bail; + } + + // Check if a thread map is initialized + if (!kd_mapptr) { + ret = EINVAL; + goto bail; + } + thrmap_size = kd_mapcount * sizeof(kd_threadmap); + + mach_timebase_info_data_t timebase = {0, 0}; + clock_timebase_info(&timebase); + + // Setup the header. + // See v3 header description in sys/kdebug.h for more inforamtion. + kd_header_v3 header = { + .tag = RAW_VERSION3, + .sub_tag = V3_HEADER_VERSION, + .length = (sizeof(kd_header_v3) + cpumap_size - sizeof(kd_cpumap_header)), + .timebase_numer = timebase.numer, + .timebase_denom = timebase.denom, + .timestamp = 0, /* FIXME rdar://problem/22053009 */ + .walltime_secs = 0, + .walltime_usecs = 0, + .timezone_minuteswest = 0, + .timezone_dst = 0, #if defined(__LP64__) - .flags = 1, + .flags = 1, #else - .flags = 0, + .flags = 0, #endif - }; - - // If its a buffer, check if we have enough space to copy the header and the maps. - if (user_header) { - bytes_needed = header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3)); - if (*user_header_size < bytes_needed) { - ret = EINVAL; - goto bail; - } - } - - // Start writing the header - if (fd) { - void *hdr_ptr = (void *)(((uintptr_t) &header) + sizeof(kd_chunk_header_v3)); - size_t payload_size = (sizeof(kd_header_v3) - sizeof(kd_chunk_header_v3)); - - ret = kdbg_write_v3_chunk_to_fd(RAW_VERSION3, V3_HEADER_VERSION, header.length, hdr_ptr, payload_size, fd); - if (ret) { - goto bail; - } - } - else { - if (copyout(&header, user_header, sizeof(kd_header_v3))) { - ret = EFAULT; - goto bail; - } - // Update the user pointer - user_header += sizeof(kd_header_v3); - } - - // Write a cpu map. This is a sub chunk of the header - cpumap = (uint8_t*)((uintptr_t) cpumap + sizeof(kd_cpumap_header)); - size_t payload_size = (size_t)(cpumap_size - sizeof(kd_cpumap_header)); - if (fd) { - ret = kdbg_write_v3_chunk_to_fd(V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, (void *)cpumap, payload_size, fd); - if (ret) { - goto bail; - } - } - else { - ret = kdbg_write_v3_chunk_header(user_header, V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, NULL, NULL); - if (ret) { - goto bail; - } - user_header += sizeof(kd_chunk_header_v3); - if (copyout(cpumap, user_header, payload_size)) { - ret = EFAULT; - goto bail; - } - // Update the user pointer - user_header += payload_size; - } - - // Write a thread map - if (fd) { - ret = kdbg_write_v3_chunk_to_fd(V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, (void *)kd_mapptr, thrmap_size, fd); - if (ret) { - goto bail; - } - } - else { - ret = kdbg_write_v3_chunk_header(user_header, V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, NULL, NULL); - if (ret) { - goto bail; - } - user_header += sizeof(kd_chunk_header_v3); - if (copyout(kd_mapptr, user_header, thrmap_size)) { - ret = EFAULT; - goto bail; - } - user_header += thrmap_size; - } - - if (fd) { - RAW_file_written += bytes_needed; - } - - *user_header_size = bytes_needed; + }; + + // If its a buffer, check if we have enough space to copy the header and the maps. + if (user_header) { + bytes_needed = header.length + thrmap_size + (2 * sizeof(kd_chunk_header_v3)); + if (*user_header_size < bytes_needed) { + ret = EINVAL; + goto bail; + } + } + + // Start writing the header + if (fd) { + void *hdr_ptr = (void *)(((uintptr_t) &header) + sizeof(kd_chunk_header_v3)); + size_t payload_size = (sizeof(kd_header_v3) - sizeof(kd_chunk_header_v3)); + + ret = kdbg_write_v3_chunk_to_fd(RAW_VERSION3, V3_HEADER_VERSION, header.length, hdr_ptr, payload_size, fd); + if (ret) { + goto bail; + } + } else { + if (copyout(&header, user_header, sizeof(kd_header_v3))) { + ret = EFAULT; + goto bail; + } + // Update the user pointer + user_header += sizeof(kd_header_v3); + } + + // Write a cpu map. This is a sub chunk of the header + cpumap = (uint8_t*)((uintptr_t) cpumap + sizeof(kd_cpumap_header)); + size_t payload_size = (size_t)(cpumap_size - sizeof(kd_cpumap_header)); + if (fd) { + ret = kdbg_write_v3_chunk_to_fd(V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, (void *)cpumap, payload_size, fd); + if (ret) { + goto bail; + } + } else { + ret = kdbg_write_v3_chunk_header(user_header, V3_CPU_MAP, V3_CPUMAP_VERSION, payload_size, NULL, NULL); + if (ret) { + goto bail; + } + user_header += sizeof(kd_chunk_header_v3); + if (copyout(cpumap, user_header, payload_size)) { + ret = EFAULT; + goto bail; + } + // Update the user pointer + user_header += payload_size; + } + + // Write a thread map + if (fd) { + ret = kdbg_write_v3_chunk_to_fd(V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, (void *)kd_mapptr, thrmap_size, fd); + if (ret) { + goto bail; + } + } else { + ret = kdbg_write_v3_chunk_header(user_header, V3_THREAD_MAP, V3_THRMAP_VERSION, thrmap_size, NULL, NULL); + if (ret) { + goto bail; + } + user_header += sizeof(kd_chunk_header_v3); + if (copyout(kd_mapptr, user_header, thrmap_size)) { + ret = EFAULT; + goto bail; + } + user_header += thrmap_size; + } + + if (fd) { + RAW_file_written += bytes_needed; + } + + *user_header_size = bytes_needed; bail: - if (cpumap) { - kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size); - } - return (ret); + if (cpumap) { + kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size); + } + return ret; } int @@ -2931,12 +2968,14 @@ kdbg_readcpumap(user_addr_t user_cpumap, size_t *user_cpumap_size) } *user_cpumap_size = cpumap_size; kmem_free(kernel_map, (vm_offset_t)cpumap, cpumap_size); - } else + } else { ret = EINVAL; - } else + } + } else { ret = EINVAL; + } - return (ret); + return ret; } int @@ -2948,20 +2987,22 @@ kdbg_readcurthrmap(user_addr_t buffer, size_t *bufsize) unsigned int count = 0; int ret = 0; - count = *bufsize/sizeof(kd_threadmap); + count = *bufsize / sizeof(kd_threadmap); *bufsize = 0; - if ( (mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount)) ) { - if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap))) + if ((mapptr = kdbg_thrmap_init_internal(count, &mapsize, &mapcount))) { + if (copyout(mapptr, buffer, mapcount * sizeof(kd_threadmap))) { ret = EFAULT; - else + } else { *bufsize = (mapcount * sizeof(kd_threadmap)); + } kmem_free(kernel_map, (vm_offset_t)mapptr, mapsize); - } else + } else { ret = EINVAL; + } - return (ret); + return ret; } static int @@ -3020,7 +3061,7 @@ kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx) * the cpumap is embedded in the last 4K page before when the event data is expected. * This way the tools can read the data starting the next page boundary on both * 4K and 16K systems preserving compatibility with older versions of the tools - */ + */ if (pad_size > PAGE_4KB) { pad_size -= PAGE_4KB; extra_thread_count = (pad_size / sizeof(kd_threadmap)) + 1; @@ -3035,7 +3076,7 @@ kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx) header.TOD_usecs = usecs; ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)&header, sizeof(RAW_header), RAW_file_offset, - UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); if (ret) { goto write_error; } @@ -3044,7 +3085,7 @@ kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx) if (write_thread_map) { ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)kd_mapptr, map_size, RAW_file_offset, - UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); if (ret) { goto write_error; } @@ -3063,7 +3104,7 @@ kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx) memset(pad_buf, 0, pad_size); ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset, - UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); kfree(pad_buf, pad_size); if (ret) { goto write_error; @@ -3093,7 +3134,7 @@ kdbg_write_v1_header(boolean_t write_thread_map, vnode_t vp, vfs_context_t ctx) } ret = vn_rdwr(UIO_WRITE, vp, (caddr_t)pad_buf, pad_size, RAW_file_offset, - UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); kfree(pad_buf, pad_size); if (ret) { goto write_error; @@ -3204,8 +3245,7 @@ kdbg_readthrmap_v3(user_addr_t buffer, size_t buffer_size, int fd) map_initialized = (kd_ctrl_page.kdebug_flags & KDBG_MAPINIT); map_size = kd_mapcount * sizeof(kd_threadmap); - if (map_initialized && (buffer_size >= map_size)) - { + if (map_initialized && (buffer_size >= map_size)) { ret = kdbg_write_v3_header(buffer, &buffer_size, fd); if (ret == 0) { @@ -3215,24 +3255,17 @@ kdbg_readthrmap_v3(user_addr_t buffer, size_t buffer_size, int fd) ret = EINVAL; } - return ret; + return ret; } static void -kdbg_set_nkdbufs(unsigned int value) +kdbg_set_nkdbufs(unsigned int req_nkdbufs) { /* - * We allow a maximum buffer size of 50% of either ram or max mapped - * address, whichever is smaller 'value' is the desired number of trace - * entries + * Only allow allocation up to half the available memory (sane_size). */ - unsigned int max_entries = (sane_size / 2) / sizeof(kd_buf); - - if (value <= max_entries) { - nkdbufs = value; - } else { - nkdbufs = max_entries; - } + uint64_t max_nkdbufs = (sane_size / 2) / sizeof(kd_buf); + nkdbufs = (req_nkdbufs > max_nkdbufs) ? max_nkdbufs : req_nkdbufs; } /* @@ -3255,7 +3288,7 @@ kdbg_wait(uint64_t timeout_ms, boolean_t locked_wait) if (timeout_ms != 0) { uint64_t ns = timeout_ms * NSEC_PER_MSEC; - nanoseconds_to_absolutetime(ns, &abstime); + nanoseconds_to_absolutetime(ns, &abstime); clock_absolutetime_interval_to_deadline(abstime, &abstime); } @@ -3263,7 +3296,7 @@ kdbg_wait(uint64_t timeout_ms, boolean_t locked_wait) if (!s) { panic("kdbg_wait() called with interrupts disabled"); } - lck_spin_lock(kdw_spin_lock); + lck_spin_lock_grp(kdw_spin_lock, kdebug_lck_grp); if (!locked_wait) { /* drop the mutex to allow others to access trace */ @@ -3271,8 +3304,7 @@ kdbg_wait(uint64_t timeout_ms, boolean_t locked_wait) } while (wait_result == THREAD_AWAKENED && - kd_ctrl_page.kds_inuse_count < n_storage_threshold) - { + kd_ctrl_page.kds_inuse_count < n_storage_threshold) { kds_waiter = 1; if (abstime) { @@ -3320,8 +3352,7 @@ kdbg_wakeup(void) if (lck_spin_try_lock(kdw_spin_lock)) { if (kds_waiter && - (kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) - { + (kd_ctrl_page.kds_inuse_count >= n_storage_threshold)) { kds_waiter = 0; need_kds_wakeup = TRUE; } @@ -3346,14 +3377,13 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep) proc_t p; if (name[0] == KERN_KDWRITETR || - name[0] == KERN_KDWRITETR_V3 || - name[0] == KERN_KDWRITEMAP || - name[0] == KERN_KDWRITEMAP_V3 || - name[0] == KERN_KDEFLAGS || - name[0] == KERN_KDDFLAGS || - name[0] == KERN_KDENABLE || - name[0] == KERN_KDSETBUF) - { + name[0] == KERN_KDWRITETR_V3 || + name[0] == KERN_KDWRITEMAP || + name[0] == KERN_KDWRITEMAP_V3 || + name[0] == KERN_KDEFLAGS || + name[0] == KERN_KDDFLAGS || + name[0] == KERN_KDENABLE || + name[0] == KERN_KDSETBUF) { if (namelen < 2) { return EINVAL; } @@ -3372,8 +3402,7 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep) */ if (name[0] != KERN_KDGETBUF && name[0] != KERN_KDGETREG && - name[0] != KERN_KDREADCURTHRMAP) - { + name[0] != KERN_KDREADCURTHRMAP) { if ((ret = ktrace_configure(KTRACE_KDEBUG))) { goto out; } @@ -3383,242 +3412,244 @@ kdbg_control(int *name, u_int namelen, user_addr_t where, size_t *sizep) } } - switch(name[0]) { - case KERN_KDGETBUF: - if (size < sizeof(kd_bufinfo.nkdbufs)) { - /* - * There is not enough room to return even - * the first element of the info structure. - */ - ret = EINVAL; - break; - } + switch (name[0]) { + case KERN_KDGETBUF: + if (size < sizeof(kd_bufinfo.nkdbufs)) { + /* + * There is not enough room to return even + * the first element of the info structure. + */ + ret = EINVAL; + break; + } - memset(&kd_bufinfo, 0, sizeof(kd_bufinfo)); + memset(&kd_bufinfo, 0, sizeof(kd_bufinfo)); - kd_bufinfo.nkdbufs = nkdbufs; - kd_bufinfo.nkdthreads = kd_mapcount; + kd_bufinfo.nkdbufs = nkdbufs; + kd_bufinfo.nkdthreads = kd_mapcount; - if ( (kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG) ) - kd_bufinfo.nolog = 1; - else - kd_bufinfo.nolog = 0; + if ((kd_ctrl_page.kdebug_slowcheck & SLOW_NOLOG)) { + kd_bufinfo.nolog = 1; + } else { + kd_bufinfo.nolog = 0; + } - kd_bufinfo.flags = kd_ctrl_page.kdebug_flags; + kd_bufinfo.flags = kd_ctrl_page.kdebug_flags; #if defined(__LP64__) - kd_bufinfo.flags |= KDBG_LP64; + kd_bufinfo.flags |= KDBG_LP64; #endif - { - int pid = ktrace_get_owning_pid(); - kd_bufinfo.bufid = (pid == 0 ? -1 : pid); - } + { + int pid = ktrace_get_owning_pid(); + kd_bufinfo.bufid = (pid == 0 ? -1 : pid); + } - if (size >= sizeof(kd_bufinfo)) { - /* - * Provide all the info we have - */ - if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo))) - ret = EINVAL; - } else { - /* - * For backwards compatibility, only provide - * as much info as there is room for. - */ - if (copyout(&kd_bufinfo, where, size)) - ret = EINVAL; + if (size >= sizeof(kd_bufinfo)) { + /* + * Provide all the info we have + */ + if (copyout(&kd_bufinfo, where, sizeof(kd_bufinfo))) { + ret = EINVAL; } - break; + } else { + /* + * For backwards compatibility, only provide + * as much info as there is room for. + */ + if (copyout(&kd_bufinfo, where, size)) { + ret = EINVAL; + } + } + break; - case KERN_KDREADCURTHRMAP: - ret = kdbg_readcurthrmap(where, sizep); - break; + case KERN_KDREADCURTHRMAP: + ret = kdbg_readcurthrmap(where, sizep); + break; - case KERN_KDEFLAGS: - value &= KDBG_USERFLAGS; - kd_ctrl_page.kdebug_flags |= value; - break; + case KERN_KDEFLAGS: + value &= KDBG_USERFLAGS; + kd_ctrl_page.kdebug_flags |= value; + break; - case KERN_KDDFLAGS: - value &= KDBG_USERFLAGS; - kd_ctrl_page.kdebug_flags &= ~value; - break; + case KERN_KDDFLAGS: + value &= KDBG_USERFLAGS; + kd_ctrl_page.kdebug_flags &= ~value; + break; - case KERN_KDENABLE: + case KERN_KDENABLE: + /* + * Enable tracing mechanism. Two types: + * KDEBUG_TRACE is the standard one, + * and KDEBUG_PPT which is a carefully + * chosen subset to avoid performance impact. + */ + if (value) { /* - * Enable tracing mechanism. Two types: - * KDEBUG_TRACE is the standard one, - * and KDEBUG_PPT which is a carefully - * chosen subset to avoid performance impact. + * enable only if buffer is initialized */ - if (value) { - /* - * enable only if buffer is initialized - */ - if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || - !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) { - ret = EINVAL; - break; - } - kdbg_thrmap_init(); - - kdbg_set_tracing_enabled(TRUE, value); + if (!(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || + !(value == KDEBUG_ENABLE_TRACE || value == KDEBUG_ENABLE_PPT)) { + ret = EINVAL; + break; } - else - { - if (!kdebug_enable) { - break; - } + kdbg_thrmap_init(); - kernel_debug_disable(); + kdbg_set_tracing_enabled(TRUE, value); + } else { + if (!kdebug_enable) { + break; } - break; - case KERN_KDSETBUF: - kdbg_set_nkdbufs(value); - break; + kernel_debug_disable(); + } + break; - case KERN_KDSETUP: - ret = kdbg_reinit(FALSE); - break; + case KERN_KDSETBUF: + kdbg_set_nkdbufs(value); + break; - case KERN_KDREMOVE: - ktrace_reset(KTRACE_KDEBUG); - break; + case KERN_KDSETUP: + ret = kdbg_reinit(FALSE); + break; - case KERN_KDSETREG: - if(size < sizeof(kd_regtype)) { - ret = EINVAL; - break; - } - if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { - ret = EINVAL; - break; - } + case KERN_KDREMOVE: + ktrace_reset(KTRACE_KDEBUG); + break; - ret = kdbg_setreg(&kd_Reg); + case KERN_KDSETREG: + if (size < sizeof(kd_regtype)) { + ret = EINVAL; break; - - case KERN_KDGETREG: + } + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { ret = EINVAL; break; + } - case KERN_KDREADTR: - ret = kdbg_read(where, sizep, NULL, NULL, RAW_VERSION1); - break; + ret = kdbg_setreg(&kd_Reg); + break; - case KERN_KDWRITETR: - case KERN_KDWRITETR_V3: - case KERN_KDWRITEMAP: - case KERN_KDWRITEMAP_V3: - { - struct vfs_context context; - struct fileproc *fp; - size_t number; - vnode_t vp; - int fd; + case KERN_KDGETREG: + ret = EINVAL; + break; - if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) { - (void)kdbg_wait(size, TRUE); - } - p = current_proc(); - fd = value; + case KERN_KDREADTR: + ret = kdbg_read(where, sizep, NULL, NULL, RAW_VERSION1); + break; - proc_fdlock(p); - if ( (ret = fp_lookup(p, fd, &fp, 1)) ) { - proc_fdunlock(p); - break; - } - context.vc_thread = current_thread(); - context.vc_ucred = fp->f_fglob->fg_cred; + case KERN_KDWRITETR: + case KERN_KDWRITETR_V3: + case KERN_KDWRITEMAP: + case KERN_KDWRITEMAP_V3: + { + struct vfs_context context; + struct fileproc *fp; + size_t number; + vnode_t vp; + int fd; + + if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) { + (void)kdbg_wait(size, TRUE); + } + p = current_proc(); + fd = value; - if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { - fp_drop(p, fd, fp, 1); - proc_fdunlock(p); + proc_fdlock(p); + if ((ret = fp_lookup(p, fd, &fp, 1))) { + proc_fdunlock(p); + break; + } + context.vc_thread = current_thread(); + context.vc_ucred = fp->f_fglob->fg_cred; - ret = EBADF; - break; - } - vp = (struct vnode *)fp->f_fglob->fg_data; + if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - if ((ret = vnode_getwithref(vp)) == 0) { - RAW_file_offset = fp->f_fglob->fg_offset; - if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) { - number = nkdbufs * sizeof(kd_buf); + ret = EBADF; + break; + } + vp = (struct vnode *)fp->f_fglob->fg_data; + proc_fdunlock(p); + + if ((ret = vnode_getwithref(vp)) == 0) { + RAW_file_offset = fp->f_fglob->fg_offset; + if (name[0] == KERN_KDWRITETR || name[0] == KERN_KDWRITETR_V3) { + number = nkdbufs * sizeof(kd_buf); - KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_START); - if (name[0] == KERN_KDWRITETR_V3) - ret = kdbg_read(0, &number, vp, &context, RAW_VERSION3); - else - ret = kdbg_read(0, &number, vp, &context, RAW_VERSION1); - KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_END, number); + KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_START); + if (name[0] == KERN_KDWRITETR_V3) { + ret = kdbg_read(0, &number, vp, &context, RAW_VERSION3); + } else { + ret = kdbg_read(0, &number, vp, &context, RAW_VERSION1); + } + KDBG_RELEASE(TRACE_WRITING_EVENTS | DBG_FUNC_END, number); - *sizep = number; + *sizep = number; + } else { + number = kd_mapcount * sizeof(kd_threadmap); + if (name[0] == KERN_KDWRITEMAP_V3) { + ret = kdbg_readthrmap_v3(0, number, fd); } else { - number = kd_mapcount * sizeof(kd_threadmap); - if (name[0] == KERN_KDWRITEMAP_V3) { - ret = kdbg_readthrmap_v3(0, number, fd); - } else { - ret = kdbg_write_thread_map(vp, &context); - } + ret = kdbg_write_thread_map(vp, &context); } - fp->f_fglob->fg_offset = RAW_file_offset; - vnode_put(vp); } - fp_drop(p, fd, fp, 0); + fp->f_fglob->fg_offset = RAW_file_offset; + vnode_put(vp); + } + fp_drop(p, fd, fp, 0); + break; + } + case KERN_KDBUFWAIT: + *sizep = kdbg_wait(size, FALSE); + break; + + case KERN_KDPIDTR: + if (size < sizeof(kd_regtype)) { + ret = EINVAL; break; } - case KERN_KDBUFWAIT: - *sizep = kdbg_wait(size, FALSE); + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { + ret = EINVAL; break; + } - case KERN_KDPIDTR: - if (size < sizeof(kd_regtype)) { - ret = EINVAL; - break; - } - if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { - ret = EINVAL; - break; - } + ret = kdbg_setpid(&kd_Reg); + break; - ret = kdbg_setpid(&kd_Reg); + case KERN_KDPIDEX: + if (size < sizeof(kd_regtype)) { + ret = EINVAL; break; - - case KERN_KDPIDEX: - if (size < sizeof(kd_regtype)) { - ret = EINVAL; - break; - } - if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { - ret = EINVAL; - break; - } - - ret = kdbg_setpidex(&kd_Reg); + } + if (copyin(where, &kd_Reg, sizeof(kd_regtype))) { + ret = EINVAL; break; + } - case KERN_KDCPUMAP: - ret = kdbg_readcpumap(where, sizep); - break; + ret = kdbg_setpidex(&kd_Reg); + break; - case KERN_KDTHRMAP: - ret = kdbg_copyout_thread_map(where, sizep); - break; + case KERN_KDCPUMAP: + ret = kdbg_readcpumap(where, sizep); + break; - case KERN_KDSET_TYPEFILTER: { - ret = kdbg_copyin_typefilter(where, size); - break; - } + case KERN_KDTHRMAP: + ret = kdbg_copyout_thread_map(where, sizep); + break; - case KERN_KDTEST: - ret = kdbg_test(size); - break; + case KERN_KDSET_TYPEFILTER: { + ret = kdbg_copyin_typefilter(where, size); + break; + } - default: - ret = EINVAL; - break; + case KERN_KDTEST: + ret = kdbg_test(size); + break; + + default: + ret = EINVAL; + break; } out: ktrace_unlock(); @@ -3656,13 +3687,14 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uin boolean_t wrapped = FALSE; assert(number); - count = *number/sizeof(kd_buf); + count = *number / sizeof(kd_buf); *number = 0; ktrace_assert_lock_held(); - if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0) + if (count == 0 || !(kd_ctrl_page.kdebug_flags & KDBG_BUFINIT) || kdcopybuf == 0) { return EINVAL; + } thread_set_eager_preempt(current_thread()); @@ -3697,8 +3729,9 @@ kdbg_read(user_addr_t buffer, size_t *number, vnode_t vp, vfs_context_t ctx, uin */ wrapped = disable_wrap(&old_kdebug_slowcheck, &old_kdebug_flags); - if (count > nkdbufs) + if (count > nkdbufs) { count = nkdbufs; + } if ((tempbuf_count = count) > KDCOPYBUF_COUNT) { tempbuf_count = KDCOPYBUF_COUNT; @@ -3783,7 +3816,7 @@ next_event: * stolen storage unit. */ uint64_t lost_time = - kdbg_get_timestamp(&kdsp_actual->kds_records[0]); + kdbg_get_timestamp(&kdsp_actual->kds_records[0]); if (kd_ctrl_page.oldest_time < lost_time) { /* * If this is the first time we've seen lost events for @@ -3801,10 +3834,10 @@ next_event: if ((t > barrier_max) && (barrier_max > 0)) { if (kdbg_debug) { printf("kdebug: FUTURE EVENT: debugid %#8x: " - "time %lld from CPU %u " - "(barrier at time %lld, read %lu events)\n", - kdsp_actual->kds_records[rcursor].debugid, - t, cpu, barrier_max, *number + tempbuf_number); + "time %lld from CPU %u " + "(barrier at time %lld, read %lu events)\n", + kdsp_actual->kds_records[rcursor].debugid, + t, cpu, barrier_max, *number + tempbuf_number); } /* * Need to flush IOPs again before we can sort any more @@ -3885,8 +3918,9 @@ next_event: /* Copy earliest event into merged events scratch buffer. */ *tempbuf = kdsp_actual->kds_records[kdsp_actual->kds_readlast++]; - if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT) + if (kdsp_actual->kds_readlast == EVENTS_PER_STORAGE_UNIT) { release_storage_unit(min_cpu, kdsp.raw); + } /* * Watch for out of order timestamps (from IOPs). @@ -3915,8 +3949,9 @@ nextevent: tempbuf_number++; tempbuf++; - if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE) + if ((RAW_file_written += sizeof(kd_buf)) >= RAW_FLUSH_SIZE) { break; + } } if (tempbuf_number) { /* @@ -3928,12 +3963,13 @@ nextevent: kd_ctrl_page.oldest_time = latest_time; } if (file_version == RAW_VERSION3) { - if ( !(kdbg_write_v3_event_chunk_header(buffer, V3_RAW_EVENTS, (tempbuf_number * sizeof(kd_buf)), vp, ctx))) { + if (!(kdbg_write_v3_event_chunk_header(buffer, V3_RAW_EVENTS, (tempbuf_number * sizeof(kd_buf)), vp, ctx))) { error = EFAULT; goto check_error; } - if (buffer) + if (buffer) { buffer += (sizeof(kd_chunk_header_v3) + sizeof(uint64_t)); + } assert(count >= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t))); count -= (sizeof(kd_chunk_header_v3) + sizeof(uint64_t)); @@ -3942,8 +3978,9 @@ nextevent: if (vp) { size_t write_size = tempbuf_number * sizeof(kd_buf); error = kdbg_write_to_vnode((caddr_t)kdcopybuf, write_size, vp, ctx, RAW_file_offset); - if (!error) + if (!error) { RAW_file_offset += write_size; + } if (RAW_file_written >= RAW_FLUSH_SIZE) { error = VNOP_FSYNC(vp, MNT_NOWAIT, ctx); @@ -3963,20 +4000,22 @@ check_error: count -= tempbuf_number; *number += tempbuf_number; } - if (out_of_events == TRUE) - /* - * all trace buffers are empty - */ - break; + if (out_of_events == TRUE) { + /* + * all trace buffers are empty + */ + break; + } - if ((tempbuf_count = count) > KDCOPYBUF_COUNT) - tempbuf_count = KDCOPYBUF_COUNT; + if ((tempbuf_count = count) > KDCOPYBUF_COUNT) { + tempbuf_count = KDCOPYBUF_COUNT; + } } - if ( !(old_kdebug_flags & KDBG_NOWRAP)) { + if (!(old_kdebug_flags & KDBG_NOWRAP)) { enable_wrap(old_kdebug_slowcheck); } thread_clear_eager_preempt(current_thread()); - return (error); + return error; } static int @@ -4028,12 +4067,12 @@ kdbg_test(size_t flavor) /* ensure old timestamps are not emitted from kernel_debug_enter */ kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code), - 100 /* very old timestamp */, 0, 0, 0, - 0, (uintptr_t)thread_tid(current_thread())); + 100 /* very old timestamp */, 0, 0, 0, + 0, (uintptr_t)thread_tid(current_thread())); code++; kernel_debug_enter(dummy_iop, KDEBUG_TEST_CODE(code), - kdbg_timestamp(), 0, 0, 0, 0, - (uintptr_t)thread_tid(current_thread())); + kdbg_timestamp(), 0, 0, 0, 0, + (uintptr_t)thread_tid(current_thread())); code++; break; @@ -4082,7 +4121,7 @@ kdbg_set_typefilter_string(const char *filter_desc) typefilter_allow_class(kdbg_typefilter, DBG_TRACE); /* if the filter description starts with a number, assume it's a csc */ - if (filter_desc[0] >= '0' && filter_desc[0] <= '9'){ + if (filter_desc[0] >= '0' && filter_desc[0] <= '9') { unsigned long csc = strtoul(filter_desc, NULL, 0); if (filter_desc != end && csc <= KDBG_CSC_MAX) { typefilter_allow_csc(kdbg_typefilter, csc); @@ -4106,24 +4145,24 @@ kdbg_set_typefilter_string(const char *filter_desc) } switch (filter_type) { - case 'C': - if (allow_value <= KDBG_CLASS_MAX) { - typefilter_allow_class(kdbg_typefilter, allow_value); - } else { - /* illegal class */ - return; - } - break; - case 'S': - if (allow_value <= KDBG_CSC_MAX) { - typefilter_allow_csc(kdbg_typefilter, allow_value); - } else { - /* illegal class subclass */ - return; - } - break; - default: + case 'C': + if (allow_value <= KDBG_CLASS_MAX) { + typefilter_allow_class(kdbg_typefilter, allow_value); + } else { + /* illegal class */ + return; + } + break; + case 'S': + if (allow_value <= KDBG_CSC_MAX) { + typefilter_allow_csc(kdbg_typefilter, allow_value); + } else { + /* illegal class subclass */ return; + } + break; + default: + return; } /* advance to next filter entry */ @@ -4140,7 +4179,7 @@ kdbg_set_typefilter_string(const char *filter_desc) */ void kdebug_trace_start(unsigned int n_events, const char *filter_desc, - boolean_t wrapping, boolean_t at_wake) + boolean_t wrapping, boolean_t at_wake) { if (!n_events) { kd_early_done = true; @@ -4189,7 +4228,7 @@ kdebug_trace_start(unsigned int n_events, const char *filter_desc, } kdbg_set_tracing_enabled(TRUE, KDEBUG_ENABLE_TRACE | (kdebug_serial ? - KDEBUG_ENABLE_SERIAL : 0)); + KDEBUG_ENABLE_SERIAL : 0)); if (!at_wake) { /* @@ -4206,7 +4245,7 @@ kdebug_trace_start(unsigned int n_events, const char *filter_desc, #if KDEBUG_MOJO_TRACE if (kdebug_serial) { printf("serial output enabled with %lu named events\n", - sizeof(kd_events)/sizeof(kd_event_t)); + sizeof(kd_events) / sizeof(kd_event_t)); } #endif /* KDEBUG_MOJO_TRACE */ @@ -4275,11 +4314,11 @@ kdbg_dump_trace_to_file(const char *filename) .arg5 = thread_tid(current_thread()), }; kdbg_set_timestamp_and_cpu(&end_event, kdbg_timestamp(), - cpu_number()); + cpu_number()); /* this is best effort -- ignore any errors */ (void)kdbg_write_to_vnode((caddr_t)&end_event, sizeof(kd_buf), vp, ctx, - RAW_file_offset); + RAW_file_offset); out_close: vnode_close(vp, FWRITE, ctx); @@ -4305,21 +4344,21 @@ kdbg_sysctl_continuous SYSCTL_HANDLER_ARGS } SYSCTL_NODE(_kern, OID_AUTO, kdbg, CTLFLAG_RD | CTLFLAG_LOCKED, 0, - "kdbg"); + "kdbg"); SYSCTL_PROC(_kern_kdbg, OID_AUTO, experimental_continuous, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, - sizeof(int), kdbg_sysctl_continuous, "I", - "Set kdebug to use mach_continuous_time"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, + sizeof(int), kdbg_sysctl_continuous, "I", + "Set kdebug to use mach_continuous_time"); SYSCTL_INT(_kern_kdbg, OID_AUTO, debug, - CTLFLAG_RW | CTLFLAG_LOCKED, - &kdbg_debug, 0, "Set kdebug debug mode"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &kdbg_debug, 0, "Set kdebug debug mode"); SYSCTL_QUAD(_kern_kdbg, OID_AUTO, oldest_time, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - &kd_ctrl_page.oldest_time, - "Find the oldest timestamp still in trace"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + &kd_ctrl_page.oldest_time, + "Find the oldest timestamp still in trace"); #if KDEBUG_MOJO_TRACE static kd_event_t * @@ -4328,28 +4367,28 @@ binary_search(uint32_t id) int low, high, mid; low = 0; - high = sizeof(kd_events)/sizeof(kd_event_t) - 1; + high = (int)(sizeof(kd_events) / sizeof(kd_event_t)) - 1; - while (TRUE) - { + while (TRUE) { mid = (low + high) / 2; - if (low > high) + if (low > high) { return NULL; /* failed */ - else if ( low + 1 >= high) { + } else if (low + 1 >= high) { /* We have a match */ - if (kd_events[high].id == id) + if (kd_events[high].id == id) { return &kd_events[high]; - else if (kd_events[low].id == id) + } else if (kd_events[low].id == id) { return &kd_events[low]; - else + } else { return NULL; /* search failed */ - } - else if (id < kd_events[mid].id) + } + } else if (id < kd_events[mid].id) { high = mid; - else + } else { low = mid; - } + } + } } /* @@ -4357,47 +4396,48 @@ binary_search(uint32_t id) * Using a per-cpu cache of a single entry * before resorting to a binary search of the full table. */ -#define NCACHE 1 -static kd_event_t *last_hit[MAX_CPUS]; +#define NCACHE 1 +static kd_event_t *last_hit[MAX_CPUS]; static kd_event_t * event_lookup_cache(uint32_t cpu, uint32_t id) { - if (last_hit[cpu] == NULL || last_hit[cpu]->id != id) + if (last_hit[cpu] == NULL || last_hit[cpu]->id != id) { last_hit[cpu] = binary_search(id); + } return last_hit[cpu]; } -static uint64_t kd_last_timstamp; +static uint64_t kd_last_timstamp; static void kdebug_serial_print( - uint32_t cpunum, - uint32_t debugid, - uint64_t timestamp, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uintptr_t threadid + uint32_t cpunum, + uint32_t debugid, + uint64_t timestamp, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, + uintptr_t threadid ) { - char kprintf_line[192]; - char event[40]; - uint64_t us = timestamp / NSEC_PER_USEC; - uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100; - uint64_t delta = timestamp - kd_last_timstamp; - uint64_t delta_us = delta / NSEC_PER_USEC; - uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100; - uint32_t event_id = debugid & KDBG_EVENTID_MASK; - const char *command; - const char *bra; - const char *ket; - kd_event_t *ep; + char kprintf_line[192]; + char event[40]; + uint64_t us = timestamp / NSEC_PER_USEC; + uint64_t us_tenth = (timestamp % NSEC_PER_USEC) / 100; + uint64_t delta = timestamp - kd_last_timstamp; + uint64_t delta_us = delta / NSEC_PER_USEC; + uint64_t delta_us_tenth = (delta % NSEC_PER_USEC) / 100; + uint32_t event_id = debugid & KDBG_EVENTID_MASK; + const char *command; + const char *bra; + const char *ket; + kd_event_t *ep; /* event time and delta from last */ snprintf(kprintf_line, sizeof(kprintf_line), - "%11llu.%1llu %8llu.%1llu ", - us, us_tenth, delta_us, delta_us_tenth); + "%11llu.%1llu %8llu.%1llu ", + us, us_tenth, delta_us, delta_us_tenth); /* event (id or name) - start prefixed by "[", end postfixed by "]" */ @@ -4405,60 +4445,62 @@ kdebug_serial_print( ket = (debugid & DBG_FUNC_END) ? "]" : " "; ep = event_lookup_cache(cpunum, event_id); if (ep) { - if (strlen(ep->name) < sizeof(event) - 3) + if (strlen(ep->name) < sizeof(event) - 3) { snprintf(event, sizeof(event), "%s%s%s", - bra, ep->name, ket); - else + bra, ep->name, ket); + } else { snprintf(event, sizeof(event), "%s%x(name too long)%s", - bra, event_id, ket); + bra, event_id, ket); + } } else { snprintf(event, sizeof(event), "%s%x%s", - bra, event_id, ket); + bra, event_id, ket); } snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-40s ", event); + sizeof(kprintf_line) - strlen(kprintf_line), + "%-40s ", event); /* arg1 .. arg4 with special cases for strings */ switch (event_id) { - case VFS_LOOKUP: - case VFS_LOOKUP_DONE: + case VFS_LOOKUP: + case VFS_LOOKUP_DONE: if (debugid & DBG_FUNC_START) { /* arg1 hex then arg2..arg4 chars */ snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-16lx %-8s%-8s%-8s ", - arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4); + sizeof(kprintf_line) - strlen(kprintf_line), + "%-16lx %-8s%-8s%-8s ", + arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4); break; } - /* else fall through for arg1..arg4 chars */ - case TRACE_STRING_EXEC: - case TRACE_STRING_NEWTHREAD: - case TRACE_INFO_STRING: + /* else fall through for arg1..arg4 chars */ + case TRACE_STRING_EXEC: + case TRACE_STRING_NEWTHREAD: + case TRACE_INFO_STRING: snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-8s%-8s%-8s%-8s ", - (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4); + sizeof(kprintf_line) - strlen(kprintf_line), + "%-8s%-8s%-8s%-8s ", + (char*)&arg1, (char*)&arg2, (char*)&arg3, (char*)&arg4); break; - default: + default: snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - "%-16lx %-16lx %-16lx %-16lx", - arg1, arg2, arg3, arg4); + sizeof(kprintf_line) - strlen(kprintf_line), + "%-16lx %-16lx %-16lx %-16lx", + arg1, arg2, arg3, arg4); } /* threadid, cpu and command name */ if (threadid == (uintptr_t)thread_tid(current_thread()) && current_proc() && - current_proc()->p_comm[0]) + current_proc()->p_comm[0]) { command = current_proc()->p_comm; - else + } else { command = "-"; + } snprintf(kprintf_line + strlen(kprintf_line), - sizeof(kprintf_line) - strlen(kprintf_line), - " %-16lx %-2d %s\n", - threadid, cpunum, command); - + sizeof(kprintf_line) - strlen(kprintf_line), + " %-16lx %-2d %s\n", + threadid, cpunum, command); + kprintf("%s", kprintf_line); kd_last_timstamp = timestamp; } diff --git a/bsd/kern/kern_acct.c b/bsd/kern/kern_acct.c index 8bca8fa41..b9a3dde38 100644 --- a/bsd/kern/kern_acct.c +++ b/bsd/kern/kern_acct.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -68,7 +68,7 @@ /* HISTORY * 08-May-95 Mac Gillon (mgillon) at NeXT * Purged old history - * New version based on 4.4 + * New version based on 4.4 */ /* * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce @@ -113,9 +113,9 @@ * The former's operation is described in Leffler, et al., and the latter * was provided by UCB with the 4.4BSD-Lite release */ -comp_t encode_comp_t(uint32_t, uint32_t); -void acctwatch(void *); -void acct_init(void); +comp_t encode_comp_t(uint32_t, uint32_t); +void acctwatch(void *); +void acct_init(void); /* * Accounting vnode pointer, and suspended accounting vnode pointer. States @@ -128,15 +128,15 @@ void acct_init(void); * NULL !NULL Accounting enabled, but suspended * !NULL !NULL */ -struct vnode *acctp; -struct vnode *suspend_acctp; +struct vnode *acctp; +struct vnode *suspend_acctp; /* * Values associated with enabling and disabling accounting */ -int acctsuspend = 2; /* stop accounting when < 2% free space left */ -int acctresume = 4; /* resume when free space risen to > 4% */ -int acctchkfreq = 15; /* frequency (in seconds) to check space */ +int acctsuspend = 2; /* stop accounting when < 2% free space left */ +int acctresume = 4; /* resume when free space risen to > 4% */ +int acctchkfreq = 15; /* frequency (in seconds) to check space */ static lck_grp_t *acct_subsys_lck_grp; @@ -162,13 +162,14 @@ acct(proc_t p, struct acct_args *uap, __unused int *retval) { struct nameidata nd; int error; - struct vfs_context *ctx; + struct vfs_context *ctx; ctx = vfs_context_current(); /* Make sure that the caller is root. */ - if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag))) - return (error); + if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag))) { + return error; + } /* * If accounting is to be started to a file, open that file for @@ -176,28 +177,30 @@ acct(proc_t p, struct acct_args *uap, __unused int *retval) */ if (uap->path != USER_ADDR_NULL) { NDINIT(&nd, LOOKUP, OP_OPEN, NOFOLLOW, UIO_USERSPACE, uap->path, ctx); - if ((error = vn_open(&nd, FWRITE, 0))) - return (error); + if ((error = vn_open(&nd, FWRITE, 0))) { + return error; + } #if CONFIG_MACF error = mac_system_check_acct(vfs_context_ucred(ctx), nd.ni_vp); if (error) { vnode_put(nd.ni_vp); vn_close(nd.ni_vp, FWRITE, ctx); - return (error); + return error; } #endif vnode_put(nd.ni_vp); if (nd.ni_vp->v_type != VREG) { vn_close(nd.ni_vp, FWRITE, ctx); - return (EACCES); + return EACCES; } } #if CONFIG_MACF else { error = mac_system_check_acct(vfs_context_ucred(ctx), NULL); - if (error) - return (error); + if (error) { + return error; + } } #endif @@ -209,13 +212,13 @@ acct(proc_t p, struct acct_args *uap, __unused int *retval) if (acctp != NULLVP || suspend_acctp != NULLVP) { untimeout(acctwatch, NULL); error = vn_close((acctp != NULLVP ? acctp : suspend_acctp), - FWRITE, vfs_context_current()); + FWRITE, vfs_context_current()); acctp = suspend_acctp = NULLVP; } if (uap->path == USER_ADDR_NULL) { ACCT_SUBSYS_UNLOCK(); - return (error); + return error; } /* @@ -226,7 +229,7 @@ acct(proc_t p, struct acct_args *uap, __unused int *retval) ACCT_SUBSYS_UNLOCK(); acctwatch(NULL); - return (error); + return error; } /* @@ -253,7 +256,7 @@ acct_process(proc_t p) vp = acctp; if (vp == NULLVP) { ACCT_SUBSYS_UNLOCK(); - return (0); + return 0; } /* @@ -282,10 +285,11 @@ acct_process(proc_t p) tmp = ut; timevaladd(&tmp, &st); t = tmp.tv_sec * hz + tmp.tv_usec / tick; - if (t) + if (t) { an_acct.ac_mem = (r->ru_ixrss + r->ru_idrss + r->ru_isrss) / t; - else + } else { an_acct.ac_mem = 0; + } /* (5) The number of disk I/O operations done */ an_acct.ac_io = encode_comp_t(r->ru_inblock + r->ru_oublock, 0); @@ -297,17 +301,19 @@ acct_process(proc_t p) an_acct.ac_gid = kauth_cred_getrgid(safecred); /* (7) The terminal from which the process was started */ - + sessp = proc_session(p); if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) && ((tp = SESSION_TP(sessp)) != TTY_NULL)) { tty_lock(tp); an_acct.ac_tty = tp->t_dev; tty_unlock(tp); - }else + } else { an_acct.ac_tty = NODEV; + } - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } /* (8) The boolean flags that tell how the process terminated, etc. */ an_acct.ac_flag = p->p_acflag; @@ -316,16 +322,16 @@ acct_process(proc_t p) * Now, just write the accounting information to the file. */ if ((error = vnode_getwithref(vp)) == 0) { - error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&an_acct, sizeof (an_acct), - (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, safecred, - (int *)0, p); + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&an_acct, sizeof(an_acct), + (off_t)0, UIO_SYSSPACE, IO_APPEND | IO_UNIT, safecred, + (int *)0, p); vnode_put(vp); } kauth_cred_unref(&safecred); ACCT_SUBSYS_UNLOCK(); - return (error); + return error; } /* @@ -334,9 +340,9 @@ acct_process(proc_t p) * Leffler, et al., on page 63. */ -#define MANTSIZE 13 /* 13 bit mantissa. */ -#define EXPSIZE 3 /* Base 8 (3 bit) exponent. */ -#define MAXFRACT ((1 << MANTSIZE) - 1) /* Maximum fractional value. */ +#define MANTSIZE 13 /* 13 bit mantissa. */ +#define EXPSIZE 3 /* Base 8 (3 bit) exponent. */ +#define MAXFRACT ((1 << MANTSIZE) - 1) /* Maximum fractional value. */ comp_t encode_comp_t(uint32_t s, uint32_t us) @@ -346,11 +352,11 @@ encode_comp_t(uint32_t s, uint32_t us) exp = 0; rnd = 0; s *= AHZ; - s += us / (1000000 / AHZ); /* Maximize precision. */ + s += us / (1000000 / AHZ); /* Maximize precision. */ while (s > MAXFRACT) { - rnd = s & (1 << (EXPSIZE - 1)); /* Round up? */ - s >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */ + rnd = s & (1 << (EXPSIZE - 1)); /* Round up? */ + s >>= EXPSIZE; /* Base 8 exponent == 3 bit shift. */ exp++; } @@ -361,9 +367,9 @@ encode_comp_t(uint32_t s, uint32_t us) } /* Clean it up and polish it off. */ - exp <<= MANTSIZE; /* Shift the exponent into place */ - exp += s; /* and add on the mantissa. */ - return (exp); + exp <<= MANTSIZE; /* Shift the exponent into place */ + exp += s; /* and add on the mantissa. */ + return exp; } /* @@ -425,6 +431,6 @@ acctwatch(__unused void *a) return; } ACCT_SUBSYS_UNLOCK(); - + timeout(acctwatch, NULL, acctchkfreq * hz); } diff --git a/bsd/kern/kern_aio.c b/bsd/kern/kern_aio.c index b0a82bb82..ee523dff5 100644 --- a/bsd/kern/kern_aio.c +++ b/bsd/kern/kern_aio.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,11 +30,11 @@ /* * todo: * 1) ramesh is looking into how to replace taking a reference on - * the user's map (vm_map_reference()) since it is believed that + * the user's map (vm_map_reference()) since it is believed that * would not hold the process for us. * 2) david is looking into a way for us to set the priority of the - * worker threads to match that of the user's thread when the - * async IO was queued. + * worker threads to match that of the user's thread when the + * async IO was queued. */ @@ -73,78 +73,76 @@ #include #include -#define AIO_work_queued 1 -#define AIO_worker_wake 2 -#define AIO_completion_sig 3 -#define AIO_completion_cleanup_wait 4 -#define AIO_completion_cleanup_wake 5 -#define AIO_completion_suspend_wake 6 -#define AIO_fsync_delay 7 -#define AIO_cancel 10 -#define AIO_cancel_async_workq 11 -#define AIO_cancel_sync_workq 12 -#define AIO_cancel_activeq 13 -#define AIO_cancel_doneq 14 -#define AIO_fsync 20 -#define AIO_read 30 -#define AIO_write 40 -#define AIO_listio 50 -#define AIO_error 60 -#define AIO_error_val 61 -#define AIO_error_activeq 62 -#define AIO_error_workq 63 -#define AIO_return 70 -#define AIO_return_val 71 -#define AIO_return_activeq 72 -#define AIO_return_workq 73 -#define AIO_exec 80 -#define AIO_exit 90 -#define AIO_exit_sleep 91 -#define AIO_close 100 -#define AIO_close_sleep 101 -#define AIO_suspend 110 -#define AIO_suspend_sleep 111 -#define AIO_worker_thread 120 +#define AIO_work_queued 1 +#define AIO_worker_wake 2 +#define AIO_completion_sig 3 +#define AIO_completion_cleanup_wait 4 +#define AIO_completion_cleanup_wake 5 +#define AIO_completion_suspend_wake 6 +#define AIO_fsync_delay 7 +#define AIO_cancel 10 +#define AIO_cancel_async_workq 11 +#define AIO_cancel_sync_workq 12 +#define AIO_cancel_activeq 13 +#define AIO_cancel_doneq 14 +#define AIO_fsync 20 +#define AIO_read 30 +#define AIO_write 40 +#define AIO_listio 50 +#define AIO_error 60 +#define AIO_error_val 61 +#define AIO_error_activeq 62 +#define AIO_error_workq 63 +#define AIO_return 70 +#define AIO_return_val 71 +#define AIO_return_activeq 72 +#define AIO_return_workq 73 +#define AIO_exec 80 +#define AIO_exit 90 +#define AIO_exit_sleep 91 +#define AIO_close 100 +#define AIO_close_sleep 101 +#define AIO_suspend 110 +#define AIO_suspend_sleep 111 +#define AIO_worker_thread 120 #if 0 #undef KERNEL_DEBUG #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT #endif -/* - * aio requests queue up on the aio_async_workq or lio_sync_workq (for - * lio_listio LIO_WAIT). Requests then move to the per process aio_activeq - * (proc.aio_activeq) when one of our worker threads start the IO. +/* + * aio requests queue up on the aio_async_workq or lio_sync_workq (for + * lio_listio LIO_WAIT). Requests then move to the per process aio_activeq + * (proc.aio_activeq) when one of our worker threads start the IO. * And finally, requests move to the per process aio_doneq (proc.aio_doneq) - * when the IO request completes. The request remains on aio_doneq until - * user process calls aio_return or the process exits, either way that is our - * trigger to release aio resources. + * when the IO request completes. The request remains on aio_doneq until + * user process calls aio_return or the process exits, either way that is our + * trigger to release aio resources. */ typedef struct aio_workq { - TAILQ_HEAD(, aio_workq_entry) aioq_entries; - int aioq_count; - lck_mtx_t aioq_mtx; - struct waitq aioq_waitq; + TAILQ_HEAD(, aio_workq_entry) aioq_entries; + int aioq_count; + lck_mtx_t aioq_mtx; + struct waitq aioq_waitq; } *aio_workq_t; #define AIO_NUM_WORK_QUEUES 1 -struct aio_anchor_cb -{ - volatile int32_t aio_inflight_count; /* entries that have been taken from a workq */ - volatile int32_t aio_done_count; /* entries on all done queues (proc.aio_doneq) */ - volatile int32_t aio_total_count; /* total extant entries */ - +struct aio_anchor_cb { + volatile int32_t aio_inflight_count; /* entries that have been taken from a workq */ + volatile int32_t aio_done_count; /* entries on all done queues (proc.aio_doneq) */ + volatile int32_t aio_total_count; /* total extant entries */ + /* Hash table of queues here */ - int aio_num_workqs; - struct aio_workq aio_async_workqs[AIO_NUM_WORK_QUEUES]; + int aio_num_workqs; + struct aio_workq aio_async_workqs[AIO_NUM_WORK_QUEUES]; }; typedef struct aio_anchor_cb aio_anchor_cb; -struct aio_lio_context -{ - int io_waiter; - int io_issued; - int io_completed; +struct aio_lio_context { + int io_waiter; + int io_issued; + int io_completed; }; typedef struct aio_lio_context aio_lio_context; @@ -156,80 +154,80 @@ typedef struct aio_lio_context aio_lio_context; * At this time, for binary compatibility reasons, we cannot create new proc fields. */ #define AIO_SUSPEND_SLEEP_CHAN p_aio_active_count -#define AIO_CLEANUP_SLEEP_CHAN p_aio_total_count +#define AIO_CLEANUP_SLEEP_CHAN p_aio_total_count -#define ASSERT_AIO_FROM_PROC(aiop, theproc) \ - if ((aiop)->procp != (theproc)) { \ - panic("AIO on a proc list that does not belong to that proc.\n"); \ +#define ASSERT_AIO_FROM_PROC(aiop, theproc) \ + if ((aiop)->procp != (theproc)) { \ + panic("AIO on a proc list that does not belong to that proc.\n"); \ } /* * LOCAL PROTOTYPES */ -static void aio_proc_lock(proc_t procp); -static void aio_proc_lock_spin(proc_t procp); -static void aio_proc_unlock(proc_t procp); -static lck_mtx_t* aio_proc_mutex(proc_t procp); -static void aio_proc_move_done_locked(proc_t procp, aio_workq_entry *entryp); -static void aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp); -static int aio_get_process_count(proc_t procp ); -static int aio_active_requests_for_process(proc_t procp ); -static int aio_proc_active_requests_for_file(proc_t procp, int fd); -static boolean_t is_already_queued(proc_t procp, user_addr_t aiocbp ); -static boolean_t should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd); - -static void aio_entry_lock(aio_workq_entry *entryp); -static void aio_entry_lock_spin(aio_workq_entry *entryp); -static aio_workq_t aio_entry_workq(aio_workq_entry *entryp); -static lck_mtx_t* aio_entry_mutex(__unused aio_workq_entry *entryp); -static void aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp); -static void aio_workq_add_entry_locked(aio_workq_t queue, aio_workq_entry *entryp); -static void aio_entry_ref_locked(aio_workq_entry *entryp); -static void aio_entry_unref_locked(aio_workq_entry *entryp); -static void aio_entry_ref(aio_workq_entry *entryp); -static void aio_entry_unref(aio_workq_entry *entryp); -static void aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, - int wait_for_completion, boolean_t disable_notification); -static int aio_entry_try_workq_remove(aio_workq_entry *entryp); -static boolean_t aio_delay_fsync_request( aio_workq_entry *entryp ); -static int aio_free_request(aio_workq_entry *entryp); - -static void aio_workq_init(aio_workq_t wq); -static void aio_workq_lock_spin(aio_workq_t wq); -static void aio_workq_unlock(aio_workq_t wq); -static lck_mtx_t* aio_workq_mutex(aio_workq_t wq); - -static void aio_work_thread( void ); +static void aio_proc_lock(proc_t procp); +static void aio_proc_lock_spin(proc_t procp); +static void aio_proc_unlock(proc_t procp); +static lck_mtx_t* aio_proc_mutex(proc_t procp); +static void aio_proc_move_done_locked(proc_t procp, aio_workq_entry *entryp); +static void aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp); +static int aio_get_process_count(proc_t procp ); +static int aio_active_requests_for_process(proc_t procp ); +static int aio_proc_active_requests_for_file(proc_t procp, int fd); +static boolean_t is_already_queued(proc_t procp, user_addr_t aiocbp ); +static boolean_t should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd); + +static void aio_entry_lock(aio_workq_entry *entryp); +static void aio_entry_lock_spin(aio_workq_entry *entryp); +static aio_workq_t aio_entry_workq(aio_workq_entry *entryp); +static lck_mtx_t* aio_entry_mutex(__unused aio_workq_entry *entryp); +static void aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp); +static void aio_workq_add_entry_locked(aio_workq_t queue, aio_workq_entry *entryp); +static void aio_entry_ref_locked(aio_workq_entry *entryp); +static void aio_entry_unref_locked(aio_workq_entry *entryp); +static void aio_entry_ref(aio_workq_entry *entryp); +static void aio_entry_unref(aio_workq_entry *entryp); +static void aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, + int wait_for_completion, boolean_t disable_notification); +static int aio_entry_try_workq_remove(aio_workq_entry *entryp); +static boolean_t aio_delay_fsync_request( aio_workq_entry *entryp ); +static int aio_free_request(aio_workq_entry *entryp); + +static void aio_workq_init(aio_workq_t wq); +static void aio_workq_lock_spin(aio_workq_t wq); +static void aio_workq_unlock(aio_workq_t wq); +static lck_mtx_t* aio_workq_mutex(aio_workq_t wq); + +static void aio_work_thread( void ); static aio_workq_entry *aio_get_some_work( void ); -static int aio_get_all_queues_count( void ); -static int aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ); -static int aio_validate( aio_workq_entry *entryp ); -static int aio_increment_total_count(void); -static int aio_decrement_total_count(void); - -static int do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, int wait_for_completion, boolean_t disable_notification ); -static void do_aio_completion( aio_workq_entry *entryp ); -static int do_aio_fsync( aio_workq_entry *entryp ); -static int do_aio_read( aio_workq_entry *entryp ); -static int do_aio_write( aio_workq_entry *entryp ); -static void do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); -static void do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); -static int lio_create_entry(proc_t procp, - user_addr_t aiocbp, - void *group_tag, - aio_workq_entry **entrypp ); +static int aio_get_all_queues_count( void ); +static int aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ); +static int aio_validate( aio_workq_entry *entryp ); +static int aio_increment_total_count(void); +static int aio_decrement_total_count(void); + +static int do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, int wait_for_completion, boolean_t disable_notification ); +static void do_aio_completion( aio_workq_entry *entryp ); +static int do_aio_fsync( aio_workq_entry *entryp ); +static int do_aio_read( aio_workq_entry *entryp ); +static int do_aio_write( aio_workq_entry *entryp ); +static void do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); +static void do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ); +static int lio_create_entry(proc_t procp, + user_addr_t aiocbp, + void *group_tag, + aio_workq_entry **entrypp ); static aio_workq_entry *aio_create_queue_entry(proc_t procp, - user_addr_t aiocbp, - void *group_tag, - int kindOfIO); + user_addr_t aiocbp, + void *group_tag, + int kindOfIO); static user_addr_t *aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent); -static void free_lio_context(aio_lio_context* context); -static void aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked); +static void free_lio_context(aio_lio_context* context); +static void aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked); -#define ASSERT_AIO_PROC_LOCK_OWNED(p) lck_mtx_assert(aio_proc_mutex((p)), LCK_MTX_ASSERT_OWNED) -#define ASSERT_AIO_WORKQ_LOCK_OWNED(q) lck_mtx_assert(aio_workq_mutex((q)), LCK_MTX_ASSERT_OWNED) -#define ASSERT_AIO_ENTRY_LOCK_OWNED(e) lck_mtx_assert(aio_entry_mutex((e)), LCK_MTX_ASSERT_OWNED) +#define ASSERT_AIO_PROC_LOCK_OWNED(p) lck_mtx_assert(aio_proc_mutex((p)), LCK_MTX_ASSERT_OWNED) +#define ASSERT_AIO_WORKQ_LOCK_OWNED(q) lck_mtx_assert(aio_workq_mutex((q)), LCK_MTX_ASSERT_OWNED) +#define ASSERT_AIO_ENTRY_LOCK_OWNED(e) lck_mtx_assert(aio_entry_mutex((e)), LCK_MTX_ASSERT_OWNED) /* * EXTERNAL PROTOTYPES @@ -237,35 +235,35 @@ static void aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_ /* in ...bsd/kern/sys_generic.c */ extern int dofileread(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, - off_t offset, int flags, user_ssize_t *retval ); + user_addr_t bufp, user_size_t nbyte, + off_t offset, int flags, user_ssize_t *retval ); extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, off_t offset, - int flags, user_ssize_t *retval ); + user_addr_t bufp, user_size_t nbyte, off_t offset, + int flags, user_ssize_t *retval ); #if DEBUG -static uint32_t lio_contexts_alloced = 0; +static uint32_t lio_contexts_alloced = 0; #endif /* DEBUG */ /* * aio external global variables. */ -extern int aio_max_requests; /* AIO_MAX - configurable */ -extern int aio_max_requests_per_process; /* AIO_PROCESS_MAX - configurable */ -extern int aio_worker_threads; /* AIO_THREAD_COUNT - configurable */ +extern int aio_max_requests; /* AIO_MAX - configurable */ +extern int aio_max_requests_per_process; /* AIO_PROCESS_MAX - configurable */ +extern int aio_worker_threads; /* AIO_THREAD_COUNT - configurable */ /* * aio static variables. */ -static aio_anchor_cb aio_anchor; -static lck_grp_t *aio_proc_lock_grp; -static lck_grp_t *aio_entry_lock_grp; -static lck_grp_t *aio_queue_lock_grp; -static lck_attr_t *aio_lock_attr; -static lck_grp_attr_t *aio_lock_grp_attr; -static struct zone *aio_workq_zonep; -static lck_mtx_t aio_entry_mtx; -static lck_mtx_t aio_proc_mtx; +static aio_anchor_cb aio_anchor; +static lck_grp_t *aio_proc_lock_grp; +static lck_grp_t *aio_entry_lock_grp; +static lck_grp_t *aio_queue_lock_grp; +static lck_attr_t *aio_lock_attr; +static lck_grp_attr_t *aio_lock_grp_attr; +static struct zone *aio_workq_zonep; +static lck_mtx_t aio_entry_mtx; +static lck_mtx_t aio_proc_mtx; static void aio_entry_lock(__unused aio_workq_entry *entryp) @@ -273,13 +271,13 @@ aio_entry_lock(__unused aio_workq_entry *entryp) lck_mtx_lock(&aio_entry_mtx); } -static void +static void aio_entry_lock_spin(__unused aio_workq_entry *entryp) { lck_mtx_lock_spin(&aio_entry_mtx); } -static void +static void aio_entry_unlock(__unused aio_workq_entry *entryp) { lck_mtx_unlock(&aio_entry_mtx); @@ -287,18 +285,18 @@ aio_entry_unlock(__unused aio_workq_entry *entryp) /* Hash */ static aio_workq_t -aio_entry_workq(__unused aio_workq_entry *entryp) +aio_entry_workq(__unused aio_workq_entry *entryp) { return &aio_anchor.aio_async_workqs[0]; } static lck_mtx_t* -aio_entry_mutex(__unused aio_workq_entry *entryp) +aio_entry_mutex(__unused aio_workq_entry *entryp) { return &aio_entry_mtx; } -static void +static void aio_workq_init(aio_workq_t wq) { TAILQ_INIT(&wq->aioq_entries); @@ -308,10 +306,10 @@ aio_workq_init(aio_workq_t wq) } -/* +/* * Can be passed a queue which is locked spin. */ -static void +static void aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp) { ASSERT_AIO_WORKQ_LOCK_OWNED(queue); @@ -319,35 +317,35 @@ aio_workq_remove_entry_locked(aio_workq_t queue, aio_workq_entry *entryp) if (entryp->aio_workq_link.tqe_prev == NULL) { panic("Trying to remove an entry from a work queue, but it is not on a queue\n"); } - + TAILQ_REMOVE(&queue->aioq_entries, entryp, aio_workq_link); queue->aioq_count--; entryp->aio_workq_link.tqe_prev = NULL; /* Not on a workq */ - - if (queue->aioq_count < 0) { + + if (queue->aioq_count < 0) { panic("Negative count on a queue.\n"); } } -static void +static void aio_workq_add_entry_locked(aio_workq_t queue, aio_workq_entry *entryp) { ASSERT_AIO_WORKQ_LOCK_OWNED(queue); TAILQ_INSERT_TAIL(&queue->aioq_entries, entryp, aio_workq_link); - if (queue->aioq_count < 0) { + if (queue->aioq_count < 0) { panic("Negative count on a queue.\n"); } queue->aioq_count++; } -static void -aio_proc_lock(proc_t procp) +static void +aio_proc_lock(proc_t procp) { lck_mtx_lock(aio_proc_mutex(procp)); } -static void +static void aio_proc_lock_spin(proc_t procp) { lck_mtx_lock_spin(aio_proc_mutex(procp)); @@ -373,7 +371,7 @@ aio_proc_remove_done_locked(proc_t procp, aio_workq_entry *entryp) procp->p_aio_total_count--; } -static void +static void aio_proc_unlock(proc_t procp) { lck_mtx_unlock(aio_proc_mutex(procp)); @@ -385,7 +383,7 @@ aio_proc_mutex(proc_t procp) return &procp->p_mlock; } -static void +static void aio_entry_ref_locked(aio_workq_entry *entryp) { ASSERT_AIO_ENTRY_LOCK_OWNED(entryp); @@ -409,14 +407,14 @@ aio_entry_unref_locked(aio_workq_entry *entryp) } } -static void +static void aio_entry_ref(aio_workq_entry *entryp) { aio_entry_lock_spin(entryp); aio_entry_ref_locked(entryp); aio_entry_unlock(entryp); } -static void +static void aio_entry_unref(aio_workq_entry *entryp) { aio_entry_lock_spin(entryp); @@ -428,11 +426,11 @@ aio_entry_unref(aio_workq_entry *entryp) } else { aio_entry_unlock(entryp); } - + return; } -static void +static void aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, int wait_for_completion, boolean_t disable_notification) { aio_entry_lock_spin(entryp); @@ -442,21 +440,21 @@ aio_entry_update_for_cancel(aio_workq_entry *entryp, boolean_t cancelled, int wa entryp->errorval = ECANCELED; entryp->returnval = -1; } - - if ( wait_for_completion ) { + + if (wait_for_completion) { entryp->flags |= wait_for_completion; /* flag for special completion processing */ } - - if ( disable_notification ) { + + if (disable_notification) { entryp->flags |= AIO_DISABLE; /* Don't want a signal */ } - aio_entry_unlock(entryp); + aio_entry_unlock(entryp); } static int aio_entry_try_workq_remove(aio_workq_entry *entryp) -{ +{ /* Can only be cancelled if it's still on a work queue */ if (entryp->aio_workq_link.tqe_prev != NULL) { aio_workq_t queue; @@ -468,7 +466,7 @@ aio_entry_try_workq_remove(aio_workq_entry *entryp) aio_workq_remove_entry_locked(queue, entryp); aio_workq_unlock(queue); return 1; - } else { + } else { aio_workq_unlock(queue); } } @@ -476,13 +474,13 @@ aio_entry_try_workq_remove(aio_workq_entry *entryp) return 0; } -static void +static void aio_workq_lock_spin(aio_workq_t wq) { lck_mtx_lock_spin(aio_workq_mutex(wq)); } -static void +static void aio_workq_unlock(aio_workq_t wq) { lck_mtx_unlock(aio_workq_mutex(wq)); @@ -496,7 +494,7 @@ aio_workq_mutex(aio_workq_t wq) /* * aio_cancel - attempt to cancel one or more async IO requests currently - * outstanding against file descriptor uap->fd. If uap->aiocbp is not + * outstanding against file descriptor uap->fd. If uap->aiocbp is not * NULL then only one specific IO is cancelled (if possible). If uap->aiocbp * is NULL then all outstanding async IO request for the given file * descriptor are cancelled (if possible). @@ -504,11 +502,11 @@ aio_workq_mutex(aio_workq_t wq) int aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) { - struct user_aiocb my_aiocb; - int result; + struct user_aiocb my_aiocb; + int result; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel)) | DBG_FUNC_START, - (int)p, (int)uap->aiocbp, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); /* quick check to see if there are any async IO requests queued up */ if (aio_get_all_queues_count() < 1) { @@ -516,26 +514,27 @@ aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) *retval = AIO_ALLDONE; goto ExitRoutine; } - - *retval = -1; - if ( uap->aiocbp != USER_ADDR_NULL ) { - if ( proc_is64bit(p) ) { + + *retval = -1; + if (uap->aiocbp != USER_ADDR_NULL) { + if (proc_is64bit(p)) { struct user64_aiocb aiocb64; - - result = copyin( uap->aiocbp, &aiocb64, sizeof(aiocb64) ); - if (result == 0 ) - do_munge_aiocb_user64_to_user(&aiocb64, &my_aiocb); + result = copyin( uap->aiocbp, &aiocb64, sizeof(aiocb64)); + if (result == 0) { + do_munge_aiocb_user64_to_user(&aiocb64, &my_aiocb); + } } else { struct user32_aiocb aiocb32; - result = copyin( uap->aiocbp, &aiocb32, sizeof(aiocb32) ); - if ( result == 0 ) + result = copyin( uap->aiocbp, &aiocb32, sizeof(aiocb32)); + if (result == 0) { do_munge_aiocb_user32_to_user( &aiocb32, &my_aiocb ); + } } - if ( result != 0 ) { - result = EAGAIN; + if (result != 0) { + result = EAGAIN; goto ExitRoutine; } @@ -543,7 +542,7 @@ aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) /* descriptor passed in and the file descriptor embedded in */ /* the aiocb causes unspecified results. We return EBADF in */ /* that situation. */ - if ( uap->fd != my_aiocb.aio_fildes ) { + if (uap->fd != my_aiocb.aio_fildes) { result = EBADF; goto ExitRoutine; } @@ -554,71 +553,68 @@ aio_cancel(proc_t p, struct aio_cancel_args *uap, int *retval ) ASSERT_AIO_PROC_LOCK_OWNED(p); aio_proc_unlock(p); - if ( result != -1 ) { + if (result != -1) { *retval = result; result = 0; goto ExitRoutine; } - + result = EBADF; - -ExitRoutine: - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel)) | DBG_FUNC_END, - (int)p, (int)uap->aiocbp, result, 0, 0 ); - return( result ); +ExitRoutine: + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, result, 0, 0 ); + return result; } /* aio_cancel */ /* - * _aio_close - internal function used to clean up async IO requests for - * a file descriptor that is closing. + * _aio_close - internal function used to clean up async IO requests for + * a file descriptor that is closing. * THIS MAY BLOCK. */ __private_extern__ void _aio_close(proc_t p, int fd ) { - int error; + int error; /* quick check to see if there are any async IO requests queued up */ if (aio_get_all_queues_count() < 1) { return; } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_close)) | DBG_FUNC_START, - (int)p, fd, 0, 0, 0 ); - + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_close)) | DBG_FUNC_START, + (int)p, fd, 0, 0, 0 ); + /* cancel all async IO requests on our todo queues for this file descriptor */ aio_proc_lock(p); error = do_aio_cancel_locked( p, fd, 0, AIO_CLOSE_WAIT, FALSE ); ASSERT_AIO_PROC_LOCK_OWNED(p); - if ( error == AIO_NOTCANCELED ) { - /* - * AIO_NOTCANCELED is returned when we find an aio request for this process - * and file descriptor on the active async IO queue. Active requests cannot - * be cancelled so we must wait for them to complete. We will get a special - * wake up call on our channel used to sleep for ALL active requests to - * complete. This sleep channel (proc.AIO_CLEANUP_SLEEP_CHAN) is only used - * when we must wait for all active aio requests. + if (error == AIO_NOTCANCELED) { + /* + * AIO_NOTCANCELED is returned when we find an aio request for this process + * and file descriptor on the active async IO queue. Active requests cannot + * be cancelled so we must wait for them to complete. We will get a special + * wake up call on our channel used to sleep for ALL active requests to + * complete. This sleep channel (proc.AIO_CLEANUP_SLEEP_CHAN) is only used + * when we must wait for all active aio requests. */ - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_close_sleep)) | DBG_FUNC_NONE, - (int)p, fd, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_close_sleep)) | DBG_FUNC_NONE, + (int)p, fd, 0, 0, 0 ); while (aio_proc_active_requests_for_file(p, fd) > 0) { msleep(&p->AIO_CLEANUP_SLEEP_CHAN, aio_proc_mutex(p), PRIBIO, "aio_close", 0 ); } - } - + aio_proc_unlock(p); - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_close)) | DBG_FUNC_END, - (int)p, fd, 0, 0, 0 ); + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_close)) | DBG_FUNC_END, + (int)p, fd, 0, 0, 0 ); return; - } /* _aio_close */ @@ -631,122 +627,121 @@ _aio_close(proc_t p, int fd ) int aio_error(proc_t p, struct aio_error_args *uap, int *retval ) { - aio_workq_entry *entryp; - int error; + aio_workq_entry *entryp; + int error; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error)) | DBG_FUNC_START, - (int)p, (int)uap->aiocbp, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_error)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); /* see if there are any aios to check */ if (aio_get_all_queues_count() < 1) { return EINVAL; } - + aio_proc_lock(p); - + /* look for a match on our queue of async IO requests that have completed */ TAILQ_FOREACH( entryp, &p->p_aio_doneq, aio_proc_link) { - if ( entryp->uaiocbp == uap->aiocbp ) { + if (entryp->uaiocbp == uap->aiocbp) { ASSERT_AIO_FROM_PROC(entryp, p); aio_entry_lock_spin(entryp); *retval = entryp->errorval; error = 0; aio_entry_unlock(entryp); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error_val)) | DBG_FUNC_NONE, - (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_error_val)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); goto ExitRoutine; } } - + /* look for a match on our queue of active async IO requests */ TAILQ_FOREACH( entryp, &p->p_aio_activeq, aio_proc_link) { - if ( entryp->uaiocbp == uap->aiocbp ) { + if (entryp->uaiocbp == uap->aiocbp) { ASSERT_AIO_FROM_PROC(entryp, p); *retval = EINPROGRESS; error = 0; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error_activeq)) | DBG_FUNC_NONE, - (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_error_activeq)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); goto ExitRoutine; } } error = EINVAL; - + ExitRoutine: - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_error)) | DBG_FUNC_END, - (int)p, (int)uap->aiocbp, error, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_error)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); aio_proc_unlock(p); - return( error ); - + return error; } /* aio_error */ /* - * aio_fsync - asynchronously force all IO operations associated - * with the file indicated by the file descriptor (uap->aiocbp->aio_fildes) and + * aio_fsync - asynchronously force all IO operations associated + * with the file indicated by the file descriptor (uap->aiocbp->aio_fildes) and * queued at the time of the call to the synchronized completion state. - * NOTE - we do not support op O_DSYNC at this point since we do not support the + * NOTE - we do not support op O_DSYNC at this point since we do not support the * fdatasync() call. */ int aio_fsync(proc_t p, struct aio_fsync_args *uap, int *retval ) { - int error; - int fsync_kind; + int error; + int fsync_kind; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync)) | DBG_FUNC_START, - (int)p, (int)uap->aiocbp, uap->op, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, uap->op, 0, 0 ); *retval = 0; /* 0 := O_SYNC for binary backward compatibility with Panther */ - if (uap->op == O_SYNC || uap->op == 0) + if (uap->op == O_SYNC || uap->op == 0) { fsync_kind = AIO_FSYNC; - else if ( uap->op == O_DSYNC ) + } else if (uap->op == O_DSYNC) { fsync_kind = AIO_DSYNC; - else { + } else { *retval = -1; error = EINVAL; goto ExitRoutine; } - + error = aio_queue_async_request( p, uap->aiocbp, fsync_kind ); - if ( error != 0 ) + if (error != 0) { *retval = -1; + } -ExitRoutine: - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync)) | DBG_FUNC_END, - (int)p, (int)uap->aiocbp, error, 0, 0 ); - - return( error ); +ExitRoutine: + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + return error; } /* aio_fsync */ -/* aio_read - asynchronously read uap->aiocbp->aio_nbytes bytes from the - * file descriptor (uap->aiocbp->aio_fildes) into the buffer +/* aio_read - asynchronously read uap->aiocbp->aio_nbytes bytes from the + * file descriptor (uap->aiocbp->aio_fildes) into the buffer * (uap->aiocbp->aio_buf). */ int aio_read(proc_t p, struct aio_read_args *uap, int *retval ) { - int error; + int error; + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_read)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_read)) | DBG_FUNC_START, - (int)p, (int)uap->aiocbp, 0, 0, 0 ); - *retval = 0; error = aio_queue_async_request( p, uap->aiocbp, AIO_READ ); - if ( error != 0 ) + if (error != 0) { *retval = -1; + } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_read)) | DBG_FUNC_END, - (int)p, (int)uap->aiocbp, error, 0, 0 ); - - return( error ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_read)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + return error; } /* aio_read */ @@ -754,18 +749,18 @@ aio_read(proc_t p, struct aio_read_args *uap, int *retval ) * aio_return - return the return status associated with the async IO * request referred to by uap->aiocbp. The return status is the value * that would be returned by corresponding IO request (read, write, - * fdatasync, or sync). This is where we release kernel resources + * fdatasync, or sync). This is where we release kernel resources * held for async IO call associated with the given aiocb pointer. */ int aio_return(proc_t p, struct aio_return_args *uap, user_ssize_t *retval ) { - aio_workq_entry *entryp; - int error; - boolean_t proc_lock_held = FALSE; - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return)) | DBG_FUNC_START, - (int)p, (int)uap->aiocbp, 0, 0, 0 ); + aio_workq_entry *entryp; + int error; + boolean_t proc_lock_held = FALSE; + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_return)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); /* See if there are any entries to check */ if (aio_get_all_queues_count() < 1) { @@ -780,10 +775,10 @@ aio_return(proc_t p, struct aio_return_args *uap, user_ssize_t *retval ) /* look for a match on our queue of async IO requests that have completed */ TAILQ_FOREACH( entryp, &p->p_aio_doneq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); - if ( entryp->uaiocbp == uap->aiocbp ) { + if (entryp->uaiocbp == uap->aiocbp) { /* Done and valid for aio_return(), pull it off the list */ aio_proc_remove_done_locked(p, entryp); - + /* Drop the proc lock, but keep the entry locked */ aio_entry_lock(entryp); aio_proc_unlock(p); @@ -796,79 +791,76 @@ aio_return(proc_t p, struct aio_return_args *uap, user_ssize_t *retval ) if (entryp->aio_refcount == 0) { aio_entry_unlock(entryp); aio_free_request(entryp); - } - else { + } else { /* Whoever has the refcount will have to free it */ entryp->flags |= AIO_DO_FREE; aio_entry_unlock(entryp); } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return_val)) | DBG_FUNC_NONE, - (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_return_val)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); goto ExitRoutine; } } - + /* look for a match on our queue of active async IO requests */ TAILQ_FOREACH( entryp, &p->p_aio_activeq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); - if ( entryp->uaiocbp == uap->aiocbp ) { + if (entryp->uaiocbp == uap->aiocbp) { error = EINPROGRESS; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return_activeq)) | DBG_FUNC_NONE, - (int)p, (int)uap->aiocbp, *retval, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_return_activeq)) | DBG_FUNC_NONE, + (int)p, (int)uap->aiocbp, *retval, 0, 0 ); goto ExitRoutine; } } - + error = EINVAL; - + ExitRoutine: - if (proc_lock_held) + if (proc_lock_held) { aio_proc_unlock(p); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_return)) | DBG_FUNC_END, - (int)p, (int)uap->aiocbp, error, 0, 0 ); - - return( error ); + } + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_return)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + return error; } /* aio_return */ /* - * _aio_exec - internal function used to clean up async IO requests for - * a process that is going away due to exec(). We cancel any async IOs + * _aio_exec - internal function used to clean up async IO requests for + * a process that is going away due to exec(). We cancel any async IOs * we can and wait for those already active. We also disable signaling - * for cancelled or active aio requests that complete. + * for cancelled or active aio requests that complete. * This routine MAY block! */ __private_extern__ void _aio_exec(proc_t p ) { - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_START, - (int)p, 0, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_START, + (int)p, 0, 0, 0, 0 ); _aio_exit( p ); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_END, - (int)p, 0, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_exec)) | DBG_FUNC_END, + (int)p, 0, 0, 0, 0 ); return; - } /* _aio_exec */ /* - * _aio_exit - internal function used to clean up async IO requests for - * a process that is terminating (via exit() or exec() ). We cancel any async IOs + * _aio_exit - internal function used to clean up async IO requests for + * a process that is terminating (via exit() or exec() ). We cancel any async IOs * we can and wait for those already active. We also disable signaling * for cancelled or active aio requests that complete. This routine MAY block! */ __private_extern__ void _aio_exit(proc_t p ) { - int error; - aio_workq_entry *entryp; + int error; + aio_workq_entry *entryp; /* quick check to see if there are any async IO requests queued up */ @@ -876,48 +868,48 @@ _aio_exit(proc_t p ) return; } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exit)) | DBG_FUNC_START, - (int)p, 0, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_exit)) | DBG_FUNC_START, + (int)p, 0, 0, 0, 0 ); aio_proc_lock(p); - /* - * cancel async IO requests on the todo work queue and wait for those - * already active to complete. + /* + * cancel async IO requests on the todo work queue and wait for those + * already active to complete. */ error = do_aio_cancel_locked( p, 0, 0, AIO_EXIT_WAIT, TRUE ); ASSERT_AIO_PROC_LOCK_OWNED(p); - if ( error == AIO_NOTCANCELED ) { - /* - * AIO_NOTCANCELED is returned when we find an aio request for this process - * on the active async IO queue. Active requests cannot be cancelled so we - * must wait for them to complete. We will get a special wake up call on - * our channel used to sleep for ALL active requests to complete. This sleep - * channel (proc.AIO_CLEANUP_SLEEP_CHAN) is only used when we must wait for all - * active aio requests. + if (error == AIO_NOTCANCELED) { + /* + * AIO_NOTCANCELED is returned when we find an aio request for this process + * on the active async IO queue. Active requests cannot be cancelled so we + * must wait for them to complete. We will get a special wake up call on + * our channel used to sleep for ALL active requests to complete. This sleep + * channel (proc.AIO_CLEANUP_SLEEP_CHAN) is only used when we must wait for all + * active aio requests. */ - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exit_sleep)) | DBG_FUNC_NONE, - (int)p, 0, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_exit_sleep)) | DBG_FUNC_NONE, + (int)p, 0, 0, 0, 0 ); while (p->p_aio_active_count != 0) { msleep(&p->AIO_CLEANUP_SLEEP_CHAN, aio_proc_mutex(p), PRIBIO, "aio_exit", 0 ); } } - + if (p->p_aio_active_count != 0) { panic("Exiting process has %d active AIOs after cancellation has completed.\n", p->p_aio_active_count); } - + /* release all aio resources used by this process */ entryp = TAILQ_FIRST( &p->p_aio_doneq ); - while ( entryp != NULL ) { + while (entryp != NULL) { ASSERT_AIO_FROM_PROC(entryp, p); - aio_workq_entry *next_entryp; - + aio_workq_entry *next_entryp; + next_entryp = TAILQ_NEXT( entryp, aio_proc_link); aio_proc_remove_done_locked(p, entryp); - + /* we cannot free requests that are still completing */ aio_entry_lock_spin(entryp); if (entryp->aio_refcount == 0) { @@ -930,31 +922,29 @@ _aio_exit(proc_t p ) aio_proc_lock(p); entryp = TAILQ_FIRST( &p->p_aio_doneq ); continue; - } - else { + } else { /* whoever has the reference will have to do the free */ entryp->flags |= AIO_DO_FREE; - } + } aio_entry_unlock(entryp); entryp = next_entryp; } - + aio_proc_unlock(p); - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_exit)) | DBG_FUNC_END, - (int)p, 0, 0, 0, 0 ); + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_exit)) | DBG_FUNC_END, + (int)p, 0, 0, 0, 0 ); return; - } /* _aio_exit */ static boolean_t -should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd) +should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd) { - if ( (aiocbp == USER_ADDR_NULL && fd == 0) || - (aiocbp != USER_ADDR_NULL && entryp->uaiocbp == aiocbp) || - (aiocbp == USER_ADDR_NULL && fd == entryp->aiocb.aio_fildes) ) { + if ((aiocbp == USER_ADDR_NULL && fd == 0) || + (aiocbp != USER_ADDR_NULL && entryp->uaiocbp == aiocbp) || + (aiocbp == USER_ADDR_NULL && fd == entryp->aiocb.aio_fildes)) { return TRUE; } @@ -963,36 +953,36 @@ should_cancel(aio_workq_entry *entryp, user_addr_t aiocbp, int fd) /* * do_aio_cancel_locked - cancel async IO requests (if possible). We get called by - * aio_cancel, close, and at exit. - * There are three modes of operation: 1) cancel all async IOs for a process - - * fd is 0 and aiocbp is NULL 2) cancel all async IOs for file descriptor - fd + * aio_cancel, close, and at exit. + * There are three modes of operation: 1) cancel all async IOs for a process - + * fd is 0 and aiocbp is NULL 2) cancel all async IOs for file descriptor - fd * is > 0 and aiocbp is NULL 3) cancel one async IO associated with the given * aiocbp. - * Returns -1 if no matches were found, AIO_CANCELED when we cancelled all - * target async IO requests, AIO_NOTCANCELED if we could not cancel all - * target async IO requests, and AIO_ALLDONE if all target async IO requests + * Returns -1 if no matches were found, AIO_CANCELED when we cancelled all + * target async IO requests, AIO_NOTCANCELED if we could not cancel all + * target async IO requests, and AIO_ALLDONE if all target async IO requests * were already complete. - * WARNING - do not deference aiocbp in this routine, it may point to user + * WARNING - do not deference aiocbp in this routine, it may point to user * land data that has not been copied in (when called from aio_cancel() ) * * Called with proc locked, and returns the same way. */ static int -do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, - int wait_for_completion, boolean_t disable_notification ) +do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, + int wait_for_completion, boolean_t disable_notification ) { ASSERT_AIO_PROC_LOCK_OWNED(p); - aio_workq_entry *entryp; - int result; + aio_workq_entry *entryp; + int result; result = -1; - + /* look for a match on our queue of async todo work. */ entryp = TAILQ_FIRST(&p->p_aio_activeq); - while ( entryp != NULL ) { + while (entryp != NULL) { ASSERT_AIO_FROM_PROC(entryp, p); - aio_workq_entry *next_entryp; + aio_workq_entry *next_entryp; next_entryp = TAILQ_NEXT( entryp, aio_proc_link); if (!should_cancel(entryp, aiocbp, fd)) { @@ -1011,23 +1001,23 @@ do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, /* Now it's officially cancelled. Do the completion */ result = AIO_CANCELED; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_async_workq)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_async_workq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); do_aio_completion(entryp); /* This will free if the aio_return() has already happened ... */ aio_entry_unref(entryp); aio_proc_lock(p); - if ( aiocbp != USER_ADDR_NULL ) { - return( result ); + if (aiocbp != USER_ADDR_NULL) { + return result; } - /* - * Restart from the head of the proc active queue since it - * may have been changed while we were away doing completion - * processing. - * + /* + * Restart from the head of the proc active queue since it + * may have been changed while we were away doing completion + * processing. + * * Note that if we found an uncancellable AIO before, we will * either find it again or discover that it's been completed, * so resetting the result will not cause us to return success @@ -1036,51 +1026,50 @@ do_aio_cancel_locked(proc_t p, int fd, user_addr_t aiocbp, entryp = TAILQ_FIRST(&p->p_aio_activeq); result = -1; /* As if beginning anew */ } else { - /* + /* * It's been taken off the active queue already, i.e. is in flight. * All we can do is ask for notification. */ result = AIO_NOTCANCELED; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_activeq)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_activeq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); /* Mark for waiting and such; will not take a ref if "cancelled" arg is FALSE */ aio_entry_update_for_cancel(entryp, FALSE, wait_for_completion, disable_notification); - if ( aiocbp != USER_ADDR_NULL ) { - return( result ); + if (aiocbp != USER_ADDR_NULL) { + return result; } entryp = next_entryp; } } /* while... */ - - /* - * if we didn't find any matches on the todo or active queues then look for a - * match on our queue of async IO requests that have completed and if found - * return AIO_ALLDONE result. + + /* + * if we didn't find any matches on the todo or active queues then look for a + * match on our queue of async IO requests that have completed and if found + * return AIO_ALLDONE result. * * Proc AIO lock is still held. */ - if ( result == -1 ) { + if (result == -1) { TAILQ_FOREACH(entryp, &p->p_aio_doneq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); if (should_cancel(entryp, aiocbp, fd)) { result = AIO_ALLDONE; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_doneq)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_cancel_doneq)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, fd, 0, 0 ); - if ( aiocbp != USER_ADDR_NULL ) { - return( result ); + if (aiocbp != USER_ADDR_NULL) { + return result; } } } } - return( result ); - + return result; } - /* do_aio_cancel_locked */ +/* do_aio_cancel_locked */ /* @@ -1096,72 +1085,71 @@ int aio_suspend(proc_t p, struct aio_suspend_args *uap, int *retval ) { __pthread_testcancel(1); - return(aio_suspend_nocancel(p, (struct aio_suspend_nocancel_args *)uap, retval)); + return aio_suspend_nocancel(p, (struct aio_suspend_nocancel_args *)uap, retval); } int aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retval ) { - int error; - int i, count; - uint64_t abstime; + int error; + int i, count; + uint64_t abstime; struct user_timespec ts; - aio_workq_entry *entryp; - user_addr_t *aiocbpp; - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend)) | DBG_FUNC_START, - (int)p, uap->nent, 0, 0, 0 ); + aio_workq_entry *entryp; + user_addr_t *aiocbpp; + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend)) | DBG_FUNC_START, + (int)p, uap->nent, 0, 0, 0 ); *retval = -1; abstime = 0; aiocbpp = NULL; - count = aio_get_all_queues_count( ); - if ( count < 1 ) { + count = aio_get_all_queues_count(); + if (count < 1) { error = EINVAL; goto ExitThisRoutine; } - if ( uap->nent < 1 || uap->nent > aio_max_requests_per_process ) { + if (uap->nent < 1 || uap->nent > aio_max_requests_per_process) { error = EINVAL; goto ExitThisRoutine; } - if ( uap->timeoutp != USER_ADDR_NULL ) { - if ( proc_is64bit(p) ) { + if (uap->timeoutp != USER_ADDR_NULL) { + if (proc_is64bit(p)) { struct user64_timespec temp; - error = copyin( uap->timeoutp, &temp, sizeof(temp) ); - if ( error == 0 ) { + error = copyin( uap->timeoutp, &temp, sizeof(temp)); + if (error == 0) { ts.tv_sec = temp.tv_sec; ts.tv_nsec = temp.tv_nsec; } - } - else { + } else { struct user32_timespec temp; - error = copyin( uap->timeoutp, &temp, sizeof(temp) ); - if ( error == 0 ) { + error = copyin( uap->timeoutp, &temp, sizeof(temp)); + if (error == 0) { ts.tv_sec = temp.tv_sec; ts.tv_nsec = temp.tv_nsec; } } - if ( error != 0 ) { + if (error != 0) { error = EAGAIN; goto ExitThisRoutine; } - - if ( ts.tv_sec < 0 || ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000 ) { + + if (ts.tv_sec < 0 || ts.tv_nsec < 0 || ts.tv_nsec >= 1000000000) { error = EINVAL; goto ExitThisRoutine; } - nanoseconds_to_absolutetime( (uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, - &abstime ); + nanoseconds_to_absolutetime((uint64_t)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec, + &abstime ); clock_absolutetime_interval_to_deadline( abstime, &abstime ); } aiocbpp = aio_copy_in_list(p, uap->aiocblist, uap->nent); - if ( aiocbpp == NULL ) { + if (aiocbpp == NULL) { error = EAGAIN; goto ExitThisRoutine; } @@ -1169,18 +1157,19 @@ aio_suspend_nocancel(proc_t p, struct aio_suspend_nocancel_args *uap, int *retva /* check list of aio requests to see if any have completed */ check_for_our_aiocbp: aio_proc_lock_spin(p); - for ( i = 0; i < uap->nent; i++ ) { - user_addr_t aiocbp; + for (i = 0; i < uap->nent; i++) { + user_addr_t aiocbp; /* NULL elements are legal so check for 'em */ aiocbp = *(aiocbpp + i); - if ( aiocbp == USER_ADDR_NULL ) + if (aiocbp == USER_ADDR_NULL) { continue; - + } + /* return immediately if any aio request in the list is done */ TAILQ_FOREACH( entryp, &p->p_aio_doneq, aio_proc_link) { ASSERT_AIO_FROM_PROC(entryp, p); - if ( entryp->uaiocbp == aiocbp ) { + if (entryp->uaiocbp == aiocbp) { aio_proc_unlock(p); *retval = 0; error = 0; @@ -1189,92 +1178,91 @@ check_for_our_aiocbp: } } /* for ( ; i < uap->nent; ) */ - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend_sleep)) | DBG_FUNC_NONE, - (int)p, uap->nent, 0, 0, 0 ); - - /* - * wait for an async IO to complete or a signal fires or timeout expires. - * we return EAGAIN (35) for timeout expiration and EINTR (4) when a signal - * interrupts us. If an async IO completes before a signal fires or our + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend_sleep)) | DBG_FUNC_NONE, + (int)p, uap->nent, 0, 0, 0 ); + + /* + * wait for an async IO to complete or a signal fires or timeout expires. + * we return EAGAIN (35) for timeout expiration and EINTR (4) when a signal + * interrupts us. If an async IO completes before a signal fires or our * timeout expires, we get a wakeup call from aio_work_thread(). */ error = msleep1(&p->AIO_SUSPEND_SLEEP_CHAN, aio_proc_mutex(p), PCATCH | PWAIT | PDROP, "aio_suspend", abstime); /* XXX better priority? */ - if ( error == 0 ) { - /* + if (error == 0) { + /* * got our wakeup call from aio_work_thread(). - * Since we can get a wakeup on this channel from another thread in the - * same process we head back up to make sure this is for the correct aiocbp. - * If it is the correct aiocbp we will return from where we do the check + * Since we can get a wakeup on this channel from another thread in the + * same process we head back up to make sure this is for the correct aiocbp. + * If it is the correct aiocbp we will return from where we do the check * (see entryp->uaiocbp == aiocbp after check_for_our_aiocbp label) - * else we will fall out and just sleep again. + * else we will fall out and just sleep again. */ goto check_for_our_aiocbp; - } - else if ( error == EWOULDBLOCK ) { + } else if (error == EWOULDBLOCK) { /* our timeout expired */ error = EAGAIN; - } - else { + } else { /* we were interrupted */ error = EINTR; } ExitThisRoutine: - if ( aiocbpp != NULL ) + if (aiocbpp != NULL) { FREE( aiocbpp, M_TEMP ); + } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend)) | DBG_FUNC_END, - (int)p, uap->nent, error, 0, 0 ); - - return( error ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_suspend)) | DBG_FUNC_END, + (int)p, uap->nent, error, 0, 0 ); + return error; } /* aio_suspend */ -/* aio_write - asynchronously write uap->aiocbp->aio_nbytes bytes to the - * file descriptor (uap->aiocbp->aio_fildes) from the buffer +/* aio_write - asynchronously write uap->aiocbp->aio_nbytes bytes to the + * file descriptor (uap->aiocbp->aio_fildes) from the buffer * (uap->aiocbp->aio_buf). */ int aio_write(proc_t p, struct aio_write_args *uap, int *retval ) { - int error; - + int error; + *retval = 0; - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_write)) | DBG_FUNC_START, - (int)p, (int)uap->aiocbp, 0, 0, 0 ); + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_write)) | DBG_FUNC_START, + (int)p, (int)uap->aiocbp, 0, 0, 0 ); error = aio_queue_async_request( p, uap->aiocbp, AIO_WRITE ); - if ( error != 0 ) + if (error != 0) { *retval = -1; + } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_write)) | DBG_FUNC_END, - (int)p, (int)uap->aiocbp, error, 0, 0 ); - - return( error ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_write)) | DBG_FUNC_END, + (int)p, (int)uap->aiocbp, error, 0, 0 ); + return error; } /* aio_write */ static user_addr_t * aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent) { - user_addr_t *aiocbpp; - int i, result; + user_addr_t *aiocbpp; + int i, result; /* we reserve enough space for largest possible pointer size */ MALLOC( aiocbpp, user_addr_t *, (nent * sizeof(user_addr_t)), M_TEMP, M_WAITOK ); - if ( aiocbpp == NULL ) + if (aiocbpp == NULL) { goto err; + } /* copyin our aiocb pointers from list */ - result = copyin( aiocblist, aiocbpp, - proc_is64bit(procp) ? (nent * sizeof(user64_addr_t)) - : (nent * sizeof(user32_addr_t)) ); - if ( result) { + result = copyin( aiocblist, aiocbpp, + proc_is64bit(procp) ? (nent * sizeof(user64_addr_t)) + : (nent * sizeof(user32_addr_t))); + if (result) { FREE( aiocbpp, M_TEMP ); aiocbpp = NULL; goto err; @@ -1285,7 +1273,7 @@ aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent) * munge and expand when these pointers came from a * 32-bit process */ - if ( !proc_is64bit(procp) ) { + if (!proc_is64bit(procp)) { /* copy from last to first to deal with overlap */ user32_addr_t *my_ptrp = ((user32_addr_t *)aiocbpp) + (nent - 1); user_addr_t *my_addrp = aiocbpp + (nent - 1); @@ -1296,17 +1284,18 @@ aio_copy_in_list(proc_t procp, user_addr_t aiocblist, int nent) } err: - return (aiocbpp); + return aiocbpp; } static int aio_copy_in_sigev(proc_t procp, user_addr_t sigp, struct user_sigevent *sigev) { - int result = 0; + int result = 0; - if (sigp == USER_ADDR_NULL) + if (sigp == USER_ADDR_NULL) { goto out; + } /* * We need to munge aio_sigevent since it contains pointers. @@ -1318,23 +1307,22 @@ aio_copy_in_sigev(proc_t procp, user_addr_t sigp, struct user_sigevent *sigev) * Notes: This does NOT affect us since we don't support * sigev_value yet in the aio context. */ - if ( proc_is64bit(procp) ) { + if (proc_is64bit(procp)) { struct user64_sigevent sigevent64; - result = copyin( sigp, &sigevent64, sizeof(sigevent64) ); - if ( result == 0 ) { + result = copyin( sigp, &sigevent64, sizeof(sigevent64)); + if (result == 0) { sigev->sigev_notify = sigevent64.sigev_notify; sigev->sigev_signo = sigevent64.sigev_signo; sigev->sigev_value.size_equivalent.sival_int = sigevent64.sigev_value.size_equivalent.sival_int; sigev->sigev_notify_function = sigevent64.sigev_notify_function; sigev->sigev_notify_attributes = sigevent64.sigev_notify_attributes; } - } else { struct user32_sigevent sigevent32; - result = copyin( sigp, &sigevent32, sizeof(sigevent32) ); - if ( result == 0 ) { + result = copyin( sigp, &sigevent32, sizeof(sigevent32)); + if (result == 0) { sigev->sigev_notify = sigevent32.sigev_notify; sigev->sigev_signo = sigevent32.sigev_signo; sigev->sigev_value.size_equivalent.sival_int = sigevent32.sigev_value.sival_int; @@ -1343,12 +1331,12 @@ aio_copy_in_sigev(proc_t procp, user_addr_t sigp, struct user_sigevent *sigev) } } - if ( result != 0 ) { + if (result != 0) { result = EAGAIN; } out: - return (result); + return result; } /* @@ -1376,7 +1364,7 @@ static void aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) { #if 0 - aio_workq_entry *my_entryp; /* used for insertion sort */ + aio_workq_entry *my_entryp; /* used for insertion sort */ #endif /* 0 */ aio_workq_t queue = aio_entry_workq(entryp); @@ -1387,7 +1375,7 @@ aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) ASSERT_AIO_PROC_LOCK_OWNED(procp); /* Onto proc queue */ - TAILQ_INSERT_TAIL(&procp->p_aio_activeq, entryp, aio_proc_link); + TAILQ_INSERT_TAIL(&procp->p_aio_activeq, entryp, aio_proc_link); procp->p_aio_active_count++; procp->p_aio_total_count++; @@ -1395,9 +1383,9 @@ aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) aio_workq_lock_spin(queue); aio_workq_add_entry_locked(queue, entryp); waitq_wakeup64_one(&queue->aioq_waitq, CAST_EVENT64_T(queue), - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); aio_workq_unlock(queue); - + if (proc_locked == 0) { aio_proc_unlock(procp); } @@ -1423,23 +1411,26 @@ aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) entryp->priority = (((2 * NZERO) - 1) - procp->p_nice); /* only premit depressing the priority */ - if (entryp->aiocb.aio_reqprio < 0) + if (entryp->aiocb.aio_reqprio < 0) { entryp->aiocb.aio_reqprio = 0; + } if (entryp->aiocb.aio_reqprio > 0) { entryp->priority -= entryp->aiocb.aio_reqprio; - if (entryp->priority < 0) + if (entryp->priority < 0) { entryp->priority = 0; + } } /* Insertion sort the entry; lowest ->priority to highest */ TAILQ_FOREACH(my_entryp, &aio_anchor.aio_async_workq, aio_workq_link) { - if ( entryp->priority <= my_entryp->priority) { + if (entryp->priority <= my_entryp->priority) { TAILQ_INSERT_BEFORE(my_entryp, entryp, aio_workq_link); break; } } - if (my_entryp == NULL) + if (my_entryp == NULL) { TAILQ_INSERT_TAIL( &aio_anchor.aio_async_workq, entryp, aio_workq_link ); + } #endif /* 0 */ } @@ -1456,61 +1447,61 @@ aio_enqueue_work( proc_t procp, aio_workq_entry *entryp, int proc_locked) int lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) { - int i; - int call_result; - int result; - int old_count; - aio_workq_entry **entryp_listp; - user_addr_t *aiocbpp; - struct user_sigevent aiosigev; - aio_lio_context *lio_context; - boolean_t free_context = FALSE; - uint32_t *paio_offset; - uint32_t *paio_nbytes; - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_listio)) | DBG_FUNC_START, - (int)p, uap->nent, uap->mode, 0, 0 ); - + int i; + int call_result; + int result; + int old_count; + aio_workq_entry **entryp_listp; + user_addr_t *aiocbpp; + struct user_sigevent aiosigev; + aio_lio_context *lio_context; + boolean_t free_context = FALSE; + uint32_t *paio_offset; + uint32_t *paio_nbytes; + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_listio)) | DBG_FUNC_START, + (int)p, uap->nent, uap->mode, 0, 0 ); + entryp_listp = NULL; lio_context = NULL; aiocbpp = NULL; call_result = -1; *retval = -1; - if ( !(uap->mode == LIO_NOWAIT || uap->mode == LIO_WAIT) ) { + if (!(uap->mode == LIO_NOWAIT || uap->mode == LIO_WAIT)) { call_result = EINVAL; goto ExitRoutine; } - if ( uap->nent < 1 || uap->nent > AIO_LISTIO_MAX ) { + if (uap->nent < 1 || uap->nent > AIO_LISTIO_MAX) { call_result = EINVAL; goto ExitRoutine; } - - /* + + /* * allocate a list of aio_workq_entry pointers that we will use * to queue up all our requests at once while holding our lock. */ MALLOC( entryp_listp, void *, (uap->nent * sizeof(aio_workq_entry *)), M_TEMP, M_WAITOK ); - if ( entryp_listp == NULL ) { + if (entryp_listp == NULL) { call_result = EAGAIN; goto ExitRoutine; } - + MALLOC( lio_context, aio_lio_context*, sizeof(aio_lio_context), M_TEMP, M_WAITOK ); - if ( lio_context == NULL ) { + if (lio_context == NULL) { call_result = EAGAIN; goto ExitRoutine; } -#if DEBUG +#if DEBUG OSIncrementAtomic(&lio_contexts_alloced); #endif /* DEBUG */ free_context = TRUE; bzero(lio_context, sizeof(aio_lio_context)); - + aiocbpp = aio_copy_in_list(p, uap->aiocblist, uap->nent); - if ( aiocbpp == NULL ) { + if (aiocbpp == NULL) { call_result = EAGAIN; goto ExitRoutine; } @@ -1523,30 +1514,31 @@ lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) /* Only copy in an sigev if the user supplied one */ if (uap->sigp != USER_ADDR_NULL) { call_result = aio_copy_in_sigev(p, uap->sigp, &aiosigev); - if ( call_result) + if (call_result) { goto ExitRoutine; + } } /* process list of aio requests */ free_context = FALSE; lio_context->io_issued = uap->nent; lio_context->io_waiter = uap->mode == LIO_WAIT ? 1 : 0; /* Should it be freed by last AIO */ - for ( i = 0; i < uap->nent; i++ ) { - user_addr_t my_aiocbp; - aio_workq_entry *entryp; - + for (i = 0; i < uap->nent; i++) { + user_addr_t my_aiocbp; + aio_workq_entry *entryp; + *(entryp_listp + i) = NULL; my_aiocbp = *(aiocbpp + i); - + /* NULL elements are legal so check for 'em */ - if ( my_aiocbp == USER_ADDR_NULL ) { + if (my_aiocbp == USER_ADDR_NULL) { aio_proc_lock_spin(p); lio_context->io_issued--; aio_proc_unlock(p); continue; } - /* + /* * We use lio_context to mark IO requests for delayed completion * processing which means we wait until all IO requests in the * group have completed before we either return to the caller @@ -1555,20 +1547,21 @@ lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) * We use the address of the lio_context for this, since it is * unique in the address space. */ - result = lio_create_entry( p, my_aiocbp, lio_context, (entryp_listp + i) ); - if ( result != 0 && call_result == -1 ) + result = lio_create_entry( p, my_aiocbp, lio_context, (entryp_listp + i)); + if (result != 0 && call_result == -1) { call_result = result; - + } + /* NULL elements are legal so check for 'em */ entryp = *(entryp_listp + i); - if ( entryp == NULL ) { + if (entryp == NULL) { aio_proc_lock_spin(p); lio_context->io_issued--; aio_proc_unlock(p); continue; } - - if ( uap->mode == LIO_NOWAIT ) { + + if (uap->mode == LIO_NOWAIT) { /* Set signal hander, if any */ entryp->aiocb.aio_sigevent = aiosigev; } else { @@ -1580,153 +1573,151 @@ lio_listio(proc_t p, struct lio_listio_args *uap, int *retval ) old_count = aio_increment_total_count(); aio_proc_lock_spin(p); - if ( old_count >= aio_max_requests || - aio_get_process_count( entryp->procp ) >= aio_max_requests_per_process || - is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE ) { - + if (old_count >= aio_max_requests || + aio_get_process_count( entryp->procp ) >= aio_max_requests_per_process || + is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE) { lio_context->io_issued--; aio_proc_unlock(p); - + aio_decrement_total_count(); - if ( call_result == -1 ) + if (call_result == -1) { call_result = EAGAIN; + } aio_free_request(entryp); entryp_listp[i] = NULL; continue; } - + lck_mtx_convert_spin(aio_proc_mutex(p)); aio_enqueue_work(p, entryp, 1); aio_proc_unlock(p); - - KERNEL_DEBUG_CONSTANT( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_START, - (int)p, (int)entryp->uaiocbp, entryp->flags, entryp->aiocb.aio_fildes, 0 ); - paio_offset = (uint32_t*) &entryp->aiocb.aio_offset; - paio_nbytes = (uint32_t*) &entryp->aiocb.aio_nbytes; - KERNEL_DEBUG_CONSTANT( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_END, - paio_offset[0], (sizeof(entryp->aiocb.aio_offset) == sizeof(uint64_t) ? paio_offset[1] : 0), - paio_nbytes[0], (sizeof(entryp->aiocb.aio_nbytes) == sizeof(uint64_t) ? paio_nbytes[1] : 0), - 0 ); - } - - switch(uap->mode) { + + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_START, + (int)p, (int)entryp->uaiocbp, entryp->flags, entryp->aiocb.aio_fildes, 0 ); + paio_offset = (uint32_t*) &entryp->aiocb.aio_offset; + paio_nbytes = (uint32_t*) &entryp->aiocb.aio_nbytes; + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_END, + paio_offset[0], (sizeof(entryp->aiocb.aio_offset) == sizeof(uint64_t) ? paio_offset[1] : 0), + paio_nbytes[0], (sizeof(entryp->aiocb.aio_nbytes) == sizeof(uint64_t) ? paio_nbytes[1] : 0), + 0 ); + } + + switch (uap->mode) { case LIO_WAIT: aio_proc_lock_spin(p); while (lio_context->io_completed < lio_context->io_issued) { result = msleep(lio_context, aio_proc_mutex(p), PCATCH | PRIBIO | PSPIN, "lio_listio", 0); - + /* If we were interrupted, fail out (even if all finished) */ if (result != 0) { call_result = EINTR; lio_context->io_waiter = 0; break; - } + } } /* If all IOs have finished must free it */ if (lio_context->io_completed == lio_context->io_issued) { free_context = TRUE; - } + } aio_proc_unlock(p); break; - + case LIO_NOWAIT: break; } - + /* call_result == -1 means we had no trouble queueing up requests */ - if ( call_result == -1 ) { + if (call_result == -1) { call_result = 0; *retval = 0; } -ExitRoutine: - if ( entryp_listp != NULL ) +ExitRoutine: + if (entryp_listp != NULL) { FREE( entryp_listp, M_TEMP ); - if ( aiocbpp != NULL ) + } + if (aiocbpp != NULL) { FREE( aiocbpp, M_TEMP ); + } if (free_context) { free_lio_context(lio_context); } - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_listio)) | DBG_FUNC_END, - (int)p, call_result, 0, 0, 0 ); - - return( call_result ); - + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_listio)) | DBG_FUNC_END, + (int)p, call_result, 0, 0, 0 ); + + return call_result; } /* lio_listio */ /* * aio worker thread. this is where all the real work gets done. - * we get a wake up call on sleep channel &aio_anchor.aio_async_workq + * we get a wake up call on sleep channel &aio_anchor.aio_async_workq * after new work is queued up. */ __attribute__((noreturn)) static void aio_work_thread(void) { - aio_workq_entry *entryp; - int error; - vm_map_t currentmap; - vm_map_t oldmap = VM_MAP_NULL; - task_t oldaiotask = TASK_NULL; - struct uthread *uthreadp = NULL; - - for( ;; ) { - /* + aio_workq_entry *entryp; + int error; + vm_map_t currentmap; + vm_map_t oldmap = VM_MAP_NULL; + task_t oldaiotask = TASK_NULL; + struct uthread *uthreadp = NULL; + + for (;;) { + /* * returns with the entry ref'ed. - * sleeps until work is available. + * sleeps until work is available. */ - entryp = aio_get_some_work(); + entryp = aio_get_some_work(); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_START, - (int)entryp->procp, (int)entryp->uaiocbp, entryp->flags, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_START, + (int)entryp->procp, (int)entryp->uaiocbp, entryp->flags, 0, 0 ); /* * Assume the target's address space identity for the duration * of the IO. Note: don't need to have the entryp locked, * because the proc and map don't change until it's freed. */ - currentmap = get_task_map( (current_proc())->task ); - if ( currentmap != entryp->aio_map ) { + currentmap = get_task_map((current_proc())->task ); + if (currentmap != entryp->aio_map) { uthreadp = (struct uthread *) get_bsdthread_info(current_thread()); oldaiotask = uthreadp->uu_aio_task; uthreadp->uu_aio_task = entryp->procp->task; oldmap = vm_map_switch( entryp->aio_map ); } - if ( (entryp->flags & AIO_READ) != 0 ) { + if ((entryp->flags & AIO_READ) != 0) { error = do_aio_read( entryp ); - } - else if ( (entryp->flags & AIO_WRITE) != 0 ) { + } else if ((entryp->flags & AIO_WRITE) != 0) { error = do_aio_write( entryp ); - } - else if ( (entryp->flags & (AIO_FSYNC | AIO_DSYNC)) != 0 ) { + } else if ((entryp->flags & (AIO_FSYNC | AIO_DSYNC)) != 0) { error = do_aio_fsync( entryp ); - } - else { - printf( "%s - unknown aio request - flags 0x%02X \n", - __FUNCTION__, entryp->flags ); + } else { + printf( "%s - unknown aio request - flags 0x%02X \n", + __FUNCTION__, entryp->flags ); error = EINVAL; } /* Restore old map */ - if ( currentmap != entryp->aio_map ) { + if (currentmap != entryp->aio_map) { (void) vm_map_switch( oldmap ); uthreadp->uu_aio_task = oldaiotask; } - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_END, - (int)entryp->procp, (int)entryp->uaiocbp, entryp->errorval, - entryp->returnval, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_worker_thread)) | DBG_FUNC_END, + (int)entryp->procp, (int)entryp->uaiocbp, entryp->errorval, + entryp->returnval, 0 ); + - /* XXX COUNTS */ aio_entry_lock_spin(entryp); - entryp->errorval = error; + entryp->errorval = error; aio_entry_unlock(entryp); /* we're done with the IO request so pop it off the active queue and */ @@ -1738,8 +1729,8 @@ aio_work_thread(void) OSDecrementAtomic(&aio_anchor.aio_inflight_count); /* remove our reference to the user land map. */ - if ( VM_MAP_NULL != entryp->aio_map ) { - vm_map_t my_map; + if (VM_MAP_NULL != entryp->aio_map) { + vm_map_t my_map; my_map = entryp->aio_map; entryp->aio_map = VM_MAP_NULL; @@ -1751,11 +1742,9 @@ aio_work_thread(void) /* Will free if needed */ aio_entry_unref(entryp); - } /* for ( ;; ) */ /* NOT REACHED */ - } /* aio_work_thread */ @@ -1768,78 +1757,78 @@ aio_work_thread(void) static aio_workq_entry * aio_get_some_work( void ) { - aio_workq_entry *entryp = NULL; - aio_workq_t queue = NULL; + aio_workq_entry *entryp = NULL; + aio_workq_t queue = NULL; /* Just one queue for the moment. In the future there will be many. */ - queue = &aio_anchor.aio_async_workqs[0]; + queue = &aio_anchor.aio_async_workqs[0]; aio_workq_lock_spin(queue); if (queue->aioq_count == 0) { goto nowork; } - /* + /* * Hold the queue lock. * * pop some work off the work queue and add to our active queue - * Always start with the queue lock held. + * Always start with the queue lock held. */ - for(;;) { - /* + for (;;) { + /* * Pull of of work queue. Once it's off, it can't be cancelled, * so we can take our ref once we drop the queue lock. */ entryp = TAILQ_FIRST(&queue->aioq_entries); - /* - * If there's no work or only fsyncs that need delay, go to sleep - * and then start anew from aio_work_thread + /* + * If there's no work or only fsyncs that need delay, go to sleep + * and then start anew from aio_work_thread */ if (entryp == NULL) { goto nowork; } aio_workq_remove_entry_locked(queue, entryp); - + aio_workq_unlock(queue); - /* + /* * Check if it's an fsync that must be delayed. No need to lock the entry; * that flag would have been set at initialization. */ - if ( (entryp->flags & AIO_FSYNC) != 0 ) { - /* + if ((entryp->flags & AIO_FSYNC) != 0) { + /* * Check for unfinished operations on the same file * in this proc's queue. */ aio_proc_lock_spin(entryp->procp); - if ( aio_delay_fsync_request( entryp ) ) { + if (aio_delay_fsync_request( entryp )) { /* It needs to be delayed. Put it back on the end of the work queue */ - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync_delay)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_fsync_delay)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); aio_proc_unlock(entryp->procp); aio_workq_lock_spin(queue); aio_workq_add_entry_locked(queue, entryp); continue; - } + } aio_proc_unlock(entryp->procp); } - + break; } aio_entry_ref(entryp); OSIncrementAtomic(&aio_anchor.aio_inflight_count); - return( entryp ); + return entryp; nowork: /* We will wake up when someone enqueues something */ waitq_assert_wait64(&queue->aioq_waitq, CAST_EVENT64_T(queue), THREAD_UNINT, 0); aio_workq_unlock(queue); - thread_block( (thread_continue_t)aio_work_thread ); + thread_block((thread_continue_t)aio_work_thread ); // notreached return NULL; @@ -1856,23 +1845,23 @@ aio_delay_fsync_request( aio_workq_entry *entryp ) if (entryp == TAILQ_FIRST(&entryp->procp->p_aio_activeq)) { return FALSE; } - + return TRUE; } /* aio_delay_fsync_request */ static aio_workq_entry * aio_create_queue_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, int kindOfIO) { - aio_workq_entry *entryp; - int result = 0; + aio_workq_entry *entryp; + int result = 0; entryp = (aio_workq_entry *) zalloc( aio_workq_zonep ); - if ( entryp == NULL ) { - result = EAGAIN; + if (entryp == NULL) { + result = EAGAIN; goto error_exit; } - bzero( entryp, sizeof(*entryp) ); + bzero( entryp, sizeof(*entryp)); /* fill in the rest of the aio_workq_entry */ entryp->procp = procp; @@ -1882,22 +1871,23 @@ aio_create_queue_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, int ki entryp->aio_map = VM_MAP_NULL; entryp->aio_refcount = 0; - if ( proc_is64bit(procp) ) { + if (proc_is64bit(procp)) { struct user64_aiocb aiocb64; - - result = copyin( aiocbp, &aiocb64, sizeof(aiocb64) ); - if (result == 0 ) + + result = copyin( aiocbp, &aiocb64, sizeof(aiocb64)); + if (result == 0) { do_munge_aiocb_user64_to_user(&aiocb64, &entryp->aiocb); - + } } else { struct user32_aiocb aiocb32; - - result = copyin( aiocbp, &aiocb32, sizeof(aiocb32) ); - if ( result == 0 ) + + result = copyin( aiocbp, &aiocb32, sizeof(aiocb32)); + if (result == 0) { do_munge_aiocb_user32_to_user( &aiocb32, &entryp->aiocb ); + } } - if ( result != 0 ) { + if (result != 0) { result = EAGAIN; goto error_exit; } @@ -1908,25 +1898,26 @@ aio_create_queue_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, int ki /* do some more validation on the aiocb and embedded file descriptor */ result = aio_validate( entryp ); - if ( result != 0 ) + if (result != 0) { goto error_exit_with_ref; + } /* get a reference on the current_thread, which is passed in vfs_context. */ entryp->thread = current_thread(); thread_reference( entryp->thread ); - return ( entryp ); + return entryp; error_exit_with_ref: - if ( VM_MAP_NULL != entryp->aio_map ) { + if (VM_MAP_NULL != entryp->aio_map) { vm_map_deallocate( entryp->aio_map ); } error_exit: - if ( result && entryp != NULL ) { + if (result && entryp != NULL) { zfree( aio_workq_zonep, entryp ); entryp = NULL; } - return ( entryp ); + return entryp; } @@ -1934,17 +1925,17 @@ error_exit: * aio_queue_async_request - queue up an async IO request on our work queue then * wake up one of our worker threads to do the actual work. We get a reference * to our caller's user land map in order to keep it around while we are - * processing the request. + * processing the request. */ static int aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ) { - aio_workq_entry *entryp; - int result; - int old_count; - uint32_t *paio_offset; - uint32_t *paio_nbytes; - + aio_workq_entry *entryp; + int result; + int old_count; + uint32_t *paio_offset; + uint32_t *paio_nbytes; + old_count = aio_increment_total_count(); if (old_count >= aio_max_requests) { result = EAGAIN; @@ -1952,7 +1943,7 @@ aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ) } entryp = aio_create_queue_entry( procp, aiocbp, 0, kindOfIO); - if ( entryp == NULL ) { + if (entryp == NULL) { result = EAGAIN; goto error_noalloc; } @@ -1960,35 +1951,35 @@ aio_queue_async_request(proc_t procp, user_addr_t aiocbp, int kindOfIO ) aio_proc_lock_spin(procp); - if ( is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE ) { - result = EAGAIN; + if (is_already_queued( entryp->procp, entryp->uaiocbp ) == TRUE) { + result = EAGAIN; goto error_exit; } /* check our aio limits to throttle bad or rude user land behavior */ if (aio_get_process_count( procp ) >= aio_max_requests_per_process) { printf("aio_queue_async_request(): too many in flight for proc: %d.\n", procp->p_aio_total_count); - result = EAGAIN; + result = EAGAIN; goto error_exit; } - + /* Add the IO to proc and work queues, wake up threads as appropriate */ lck_mtx_convert_spin(aio_proc_mutex(procp)); aio_enqueue_work(procp, entryp, 1); - + aio_proc_unlock(procp); - - paio_offset = (uint32_t*) &entryp->aiocb.aio_offset; - paio_nbytes = (uint32_t*) &entryp->aiocb.aio_nbytes; - KERNEL_DEBUG_CONSTANT( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_START, - (int)procp, (int)aiocbp, entryp->flags, entryp->aiocb.aio_fildes, 0 ); - KERNEL_DEBUG_CONSTANT( (BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_END, - paio_offset[0], (sizeof(entryp->aiocb.aio_offset) == sizeof(uint64_t) ? paio_offset[1] : 0), - paio_nbytes[0], (sizeof(entryp->aiocb.aio_nbytes) == sizeof(uint64_t) ? paio_nbytes[1] : 0), - 0 ); - - return( 0 ); - + + paio_offset = (uint32_t*) &entryp->aiocb.aio_offset; + paio_nbytes = (uint32_t*) &entryp->aiocb.aio_nbytes; + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_START, + (int)procp, (int)aiocbp, entryp->flags, entryp->aiocb.aio_fildes, 0 ); + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_AIO, AIO_work_queued)) | DBG_FUNC_END, + paio_offset[0], (sizeof(entryp->aiocb.aio_offset) == sizeof(uint64_t) ? paio_offset[1] : 0), + paio_nbytes[0], (sizeof(entryp->aiocb.aio_nbytes) == sizeof(uint64_t) ? paio_nbytes[1] : 0), + 0 ); + + return 0; + error_exit: /* * This entry has not been queued up so no worries about @@ -2000,8 +1991,7 @@ error_exit: error_noalloc: aio_decrement_total_count(); - return( result ); - + return result; } /* aio_queue_async_request */ @@ -2023,26 +2013,26 @@ error_noalloc: * * * Notes: We get a reference to our caller's user land map in order - * to keep it around while we are processing the request. + * to keep it around while we are processing the request. * * lio_listio calls behave differently at completion they do * completion notification when all async IO requests have * completed. We use group_tag to tag IO requests that behave - * in the delay notification manner. + * in the delay notification manner. * * All synchronous operations are considered to not have a * signal routine associated with them (sigp == USER_ADDR_NULL). */ static int lio_create_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, - aio_workq_entry **entrypp ) + aio_workq_entry **entrypp ) { - aio_workq_entry *entryp; - int result; + aio_workq_entry *entryp; + int result; entryp = aio_create_queue_entry( procp, aiocbp, group_tag, AIO_LIO); - if ( entryp == NULL ) { - result = EAGAIN; + if (entryp == NULL) { + result = EAGAIN; goto error_exit; } @@ -2050,26 +2040,25 @@ lio_create_entry(proc_t procp, user_addr_t aiocbp, void *group_tag, * Look for lio_listio LIO_NOP requests and ignore them; this is * not really an error, but we need to free our aio_workq_entry. */ - if ( entryp->aiocb.aio_lio_opcode == LIO_NOP ) { + if (entryp->aiocb.aio_lio_opcode == LIO_NOP) { result = 0; goto error_exit; } *entrypp = entryp; - return( 0 ); - + return 0; + error_exit: - if ( entryp != NULL ) { + if (entryp != NULL) { /* * This entry has not been queued up so no worries about * unlocked state and aio_map */ aio_free_request(entryp); } - - return( result ); - + + return result; } /* lio_create_entry */ @@ -2083,21 +2072,20 @@ static int aio_free_request(aio_workq_entry *entryp) { /* remove our reference to the user land map. */ - if ( VM_MAP_NULL != entryp->aio_map) { + if (VM_MAP_NULL != entryp->aio_map) { vm_map_deallocate(entryp->aio_map); } /* remove our reference to thread which enqueued the request */ - if ( NULL != entryp->thread ) { + if (NULL != entryp->thread) { thread_deallocate( entryp->thread ); } entryp->aio_refcount = -1; /* A bit of poisoning in case of bad refcounting. */ - + zfree( aio_workq_zonep, entryp ); - return( 0 ); - + return 0; } /* aio_free_request */ @@ -2107,35 +2095,37 @@ aio_free_request(aio_workq_entry *entryp) * validate the aiocb passed in by one of the aio syscalls. */ static int -aio_validate( aio_workq_entry *entryp ) +aio_validate( aio_workq_entry *entryp ) { - struct fileproc *fp; - int flag; - int result; - + struct fileproc *fp; + int flag; + int result; + result = 0; - if ( (entryp->flags & AIO_LIO) != 0 ) { - if ( entryp->aiocb.aio_lio_opcode == LIO_READ ) + if ((entryp->flags & AIO_LIO) != 0) { + if (entryp->aiocb.aio_lio_opcode == LIO_READ) { entryp->flags |= AIO_READ; - else if ( entryp->aiocb.aio_lio_opcode == LIO_WRITE ) + } else if (entryp->aiocb.aio_lio_opcode == LIO_WRITE) { entryp->flags |= AIO_WRITE; - else if ( entryp->aiocb.aio_lio_opcode == LIO_NOP ) - return( 0 ); - else - return( EINVAL ); + } else if (entryp->aiocb.aio_lio_opcode == LIO_NOP) { + return 0; + } else { + return EINVAL; + } } flag = FREAD; - if ( (entryp->flags & (AIO_WRITE | AIO_FSYNC | AIO_DSYNC)) != 0 ) { + if ((entryp->flags & (AIO_WRITE | AIO_FSYNC | AIO_DSYNC)) != 0) { flag = FWRITE; } - if ( (entryp->flags & (AIO_READ | AIO_WRITE)) != 0 ) { - if ( entryp->aiocb.aio_nbytes > INT_MAX || - entryp->aiocb.aio_buf == USER_ADDR_NULL || - entryp->aiocb.aio_offset < 0 ) - return( EINVAL ); + if ((entryp->flags & (AIO_READ | AIO_WRITE)) != 0) { + if (entryp->aiocb.aio_nbytes > INT_MAX || + entryp->aiocb.aio_buf == USER_ADDR_NULL || + entryp->aiocb.aio_offset < 0) { + return EINVAL; + } } /* @@ -2145,65 +2135,64 @@ aio_validate( aio_workq_entry *entryp ) * are ignored, since SIGEV_THREAD is unsupported. This is consistent * with no [RTS] (RalTime Signal) option group support. */ - switch ( entryp->aiocb.aio_sigevent.sigev_notify ) { + switch (entryp->aiocb.aio_sigevent.sigev_notify) { case SIGEV_SIGNAL: - { - int signum; + { + int signum; /* make sure we have a valid signal number */ signum = entryp->aiocb.aio_sigevent.sigev_signo; - if ( signum <= 0 || signum >= NSIG || - signum == SIGKILL || signum == SIGSTOP ) - return (EINVAL); - } - break; + if (signum <= 0 || signum >= NSIG || + signum == SIGKILL || signum == SIGSTOP) { + return EINVAL; + } + } + break; case SIGEV_NONE: break; case SIGEV_THREAD: - /* Unsupported [RTS] */ + /* Unsupported [RTS] */ default: - return (EINVAL); + return EINVAL; } - + /* validate the file descriptor and that the file was opened * for the appropriate read / write access. */ proc_fdlock(entryp->procp); - result = fp_lookup( entryp->procp, entryp->aiocb.aio_fildes, &fp , 1); - if ( result == 0 ) { - if ( (fp->f_fglob->fg_flag & flag) == 0 ) { + result = fp_lookup( entryp->procp, entryp->aiocb.aio_fildes, &fp, 1); + if (result == 0) { + if ((fp->f_fglob->fg_flag & flag) == 0) { /* we don't have read or write access */ result = EBADF; - } - else if ( FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE ) { + } else if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { /* this is not a file */ result = ESPIPE; - } else - fp->f_flags |= FP_AIOISSUED; + } else { + fp->f_flags |= FP_AIOISSUED; + } - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp , 1); - } - else { + fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 1); + } else { result = EBADF; } - - proc_fdunlock(entryp->procp); - return( result ); + proc_fdunlock(entryp->procp); + return result; } /* aio_validate */ -static int +static int aio_increment_total_count() { return OSIncrementAtomic(&aio_anchor.aio_total_count); } -static int +static int aio_decrement_total_count() { int old = OSDecrementAtomic(&aio_anchor.aio_total_count); @@ -2215,77 +2204,70 @@ aio_decrement_total_count() } static int -aio_get_process_count(proc_t procp ) +aio_get_process_count(proc_t procp ) { return procp->p_aio_total_count; - } /* aio_get_process_count */ static int -aio_get_all_queues_count( void ) +aio_get_all_queues_count( void ) { return aio_anchor.aio_total_count; - } /* aio_get_all_queues_count */ /* - * do_aio_completion. Handle async IO completion. + * do_aio_completion. Handle async IO completion. */ static void -do_aio_completion( aio_workq_entry *entryp ) +do_aio_completion( aio_workq_entry *entryp ) { - - boolean_t lastLioCompleted = FALSE; - aio_lio_context *lio_context = NULL; + boolean_t lastLioCompleted = FALSE; + aio_lio_context *lio_context = NULL; int waiter = 0; - + lio_context = (aio_lio_context *)entryp->group_tag; - + if (lio_context != NULL) { - aio_proc_lock_spin(entryp->procp); /* Account for this I/O completing. */ - lio_context->io_completed++; - + lio_context->io_completed++; + /* Are we done with this lio context? */ - if (lio_context->io_issued == lio_context->io_completed) { - lastLioCompleted = TRUE; - } - + if (lio_context->io_issued == lio_context->io_completed) { + lastLioCompleted = TRUE; + } + waiter = lio_context->io_waiter; - + /* explicit wakeup of lio_listio() waiting in LIO_WAIT */ if ((entryp->flags & AIO_LIO_NOTIFY) && (lastLioCompleted) && (waiter != 0)) { /* wake up the waiter */ wakeup(lio_context); } - + aio_proc_unlock(entryp->procp); } - - if ( entryp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && - (entryp->flags & AIO_DISABLE) == 0 ) { - - boolean_t performSignal = FALSE; - if (lio_context == NULL) { - performSignal = TRUE; - } - else { - /* + + if (entryp->aiocb.aio_sigevent.sigev_notify == SIGEV_SIGNAL && + (entryp->flags & AIO_DISABLE) == 0) { + boolean_t performSignal = FALSE; + if (lio_context == NULL) { + performSignal = TRUE; + } else { + /* * If this was the last request in the group and a signal * is desired, send one. */ performSignal = lastLioCompleted; - } - - if (performSignal) { - - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_sig)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, - entryp->aiocb.aio_sigevent.sigev_signo, 0, 0 ); - + } + + if (performSignal) { + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_sig)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, + entryp->aiocb.aio_sigevent.sigev_signo, 0, 0 ); + psignal( entryp->procp, entryp->aiocb.aio_sigevent.sigev_signo ); } } @@ -2293,81 +2275,80 @@ do_aio_completion( aio_workq_entry *entryp ) if ((entryp->flags & AIO_EXIT_WAIT) && (entryp->flags & AIO_CLOSE_WAIT)) { panic("Close and exit flags set at the same time\n"); } - + /* * need to handle case where a process is trying to exit, exec, or * close and is currently waiting for active aio requests to complete. - * If AIO_CLEANUP_WAIT is set then we need to look to see if there are any - * other requests in the active queue for this process. If there are + * If AIO_CLEANUP_WAIT is set then we need to look to see if there are any + * other requests in the active queue for this process. If there are * none then wakeup using the AIO_CLEANUP_SLEEP_CHAN tsleep channel. * If there are some still active then do nothing - we only want to - * wakeup when all active aio requests for the process are complete. + * wakeup when all active aio requests for the process are complete. * * Don't need to lock the entry or proc to check the cleanup flag. It can only be - * set for cancellation, while the entryp is still on a proc list; now it's + * set for cancellation, while the entryp is still on a proc list; now it's * off, so that flag is already set if it's going to be. */ - if ( (entryp->flags & AIO_EXIT_WAIT) != 0 ) { - int active_requests; + if ((entryp->flags & AIO_EXIT_WAIT) != 0) { + int active_requests; + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); - aio_proc_lock_spin(entryp->procp); active_requests = aio_active_requests_for_process( entryp->procp ); - if ( active_requests < 1 ) { - /* + if (active_requests < 1) { + /* * no active aio requests for this process, continue exiting. In this * case, there should be no one else waiting ont he proc in AIO... */ wakeup_one((caddr_t)&entryp->procp->AIO_CLEANUP_SLEEP_CHAN); aio_proc_unlock(entryp->procp); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); } else { aio_proc_unlock(entryp->procp); } } - - if ( (entryp->flags & AIO_CLOSE_WAIT) != 0 ) { - int active_requests; - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); - + if ((entryp->flags & AIO_CLOSE_WAIT) != 0) { + int active_requests; + + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wait)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + aio_proc_lock_spin(entryp->procp); active_requests = aio_proc_active_requests_for_file( entryp->procp, entryp->aiocb.aio_fildes); - if ( active_requests < 1 ) { + if (active_requests < 1) { /* Can't wakeup_one(); multiple closes might be in progress. */ wakeup(&entryp->procp->AIO_CLEANUP_SLEEP_CHAN); aio_proc_unlock(entryp->procp); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_cleanup_wake)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); } else { aio_proc_unlock(entryp->procp); } } - /* + /* * A thread in aio_suspend() wants to known about completed IOs. If it checked * the done list before we moved our AIO there, then it already asserted its wait, * and we can wake it up without holding the lock. If it checked the list after * we did our move, then it already has seen the AIO that we moved. Herego, we * can do our wakeup without holding the lock. */ - wakeup( (caddr_t) &entryp->procp->AIO_SUSPEND_SLEEP_CHAN ); - KERNEL_DEBUG( (BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_suspend_wake)) | DBG_FUNC_NONE, - (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); + wakeup((caddr_t) &entryp->procp->AIO_SUSPEND_SLEEP_CHAN ); + KERNEL_DEBUG((BSDDBG_CODE(DBG_BSD_AIO, AIO_completion_suspend_wake)) | DBG_FUNC_NONE, + (int)entryp->procp, (int)entryp->uaiocbp, 0, 0, 0 ); - /* + /* * free the LIO context if the last lio completed and no thread is * waiting */ - if (lastLioCompleted && (waiter == 0)) - free_lio_context (lio_context); - - + if (lastLioCompleted && (waiter == 0)) { + free_lio_context(lio_context); + } } /* do_aio_completion */ @@ -2377,29 +2358,29 @@ do_aio_completion( aio_workq_entry *entryp ) static int do_aio_read( aio_workq_entry *entryp ) { - struct fileproc *fp; - int error; - struct vfs_context context; + struct fileproc *fp; + int error; + struct vfs_context context; - if ( (error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp , 0)) ) - return(error); - if ( (fp->f_fglob->fg_flag & FREAD) == 0 ) { + if ((error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp, 0))) { + return error; + } + if ((fp->f_fglob->fg_flag & FREAD) == 0) { fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - return(EBADF); + return EBADF; } - context.vc_thread = entryp->thread; /* XXX */ + context.vc_thread = entryp->thread; /* XXX */ context.vc_ucred = fp->f_fglob->fg_cred; - error = dofileread(&context, fp, - entryp->aiocb.aio_buf, - entryp->aiocb.aio_nbytes, - entryp->aiocb.aio_offset, FOF_OFFSET, - &entryp->returnval); + error = dofileread(&context, fp, + entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, FOF_OFFSET, + &entryp->returnval); fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - - return( error ); - + + return error; } /* do_aio_read */ @@ -2409,41 +2390,42 @@ do_aio_read( aio_workq_entry *entryp ) static int do_aio_write( aio_workq_entry *entryp ) { - struct fileproc *fp; - int error, flags; - struct vfs_context context; + struct fileproc *fp; + int error, flags; + struct vfs_context context; - if ( (error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp , 0)) ) - return(error); - if ( (fp->f_fglob->fg_flag & FWRITE) == 0 ) { + if ((error = fp_lookup(entryp->procp, entryp->aiocb.aio_fildes, &fp, 0))) { + return error; + } + if ((fp->f_fglob->fg_flag & FWRITE) == 0) { fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); - return(EBADF); + return EBADF; } flags = FOF_PCRED; - if ( (fp->f_fglob->fg_flag & O_APPEND) == 0 ) { + if ((fp->f_fglob->fg_flag & O_APPEND) == 0) { flags |= FOF_OFFSET; } - context.vc_thread = entryp->thread; /* XXX */ + context.vc_thread = entryp->thread; /* XXX */ context.vc_ucred = fp->f_fglob->fg_cred; /* NB: tell dofilewrite the offset, and to use the proc cred */ error = dofilewrite(&context, - fp, - entryp->aiocb.aio_buf, - entryp->aiocb.aio_nbytes, - entryp->aiocb.aio_offset, - flags, - &entryp->returnval); - - if (entryp->returnval) + fp, + entryp->aiocb.aio_buf, + entryp->aiocb.aio_nbytes, + entryp->aiocb.aio_offset, + flags, + &entryp->returnval); + + if (entryp->returnval) { fp_drop_written(entryp->procp, entryp->aiocb.aio_fildes, fp); - else + } else { fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); + } - return( error ); - + return error; } /* do_aio_write */ @@ -2454,8 +2436,7 @@ do_aio_write( aio_workq_entry *entryp ) static int aio_active_requests_for_process(proc_t procp ) { - return( procp->p_aio_active_count ); - + return procp->p_aio_active_count; } /* aio_active_requests_for_process */ /* @@ -2483,11 +2464,11 @@ aio_proc_active_requests_for_file(proc_t procp, int fd) static int do_aio_fsync( aio_workq_entry *entryp ) { - struct vfs_context context; - struct vnode *vp; - struct fileproc *fp; - int sync_flag; - int error; + struct vfs_context context; + struct vnode *vp; + struct fileproc *fp; + int sync_flag; + int error; /* * We are never called unless either AIO_FSYNC or AIO_DSYNC are set. @@ -2503,17 +2484,18 @@ do_aio_fsync( aio_workq_entry *entryp ) * Metadata necessary for data retrieval ust be committed to stable * storage in either case (file length, etc.). */ - if (entryp->flags & AIO_FSYNC) + if (entryp->flags & AIO_FSYNC) { sync_flag = MNT_WAIT; - else + } else { sync_flag = MNT_DWAIT; - + } + error = fp_getfvp( entryp->procp, entryp->aiocb.aio_fildes, &fp, &vp); - if ( error == 0 ) { - if ( (error = vnode_getwithref(vp)) ) { - fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); + if (error == 0) { + if ((error = vnode_getwithref(vp))) { + fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); entryp->returnval = -1; - return(error); + return error; } context.vc_thread = current_thread(); context.vc_ucred = fp->f_fglob->fg_cred; @@ -2524,62 +2506,59 @@ do_aio_fsync( aio_workq_entry *entryp ) fp_drop(entryp->procp, entryp->aiocb.aio_fildes, fp, 0); } - if ( error != 0 ) + if (error != 0) { entryp->returnval = -1; + } - return( error ); - + return error; } /* do_aio_fsync */ /* - * is_already_queued - runs through our queues to see if the given + * is_already_queued - runs through our queues to see if the given * aiocbp / process is there. Returns TRUE if there is a match * on any of our aio queues. * * Called with proc aio lock held (can be held spin) */ static boolean_t -is_already_queued(proc_t procp, - user_addr_t aiocbp ) +is_already_queued(proc_t procp, + user_addr_t aiocbp ) { - aio_workq_entry *entryp; - boolean_t result; - + aio_workq_entry *entryp; + boolean_t result; + result = FALSE; - + /* look for matches on our queue of async IO requests that have completed */ TAILQ_FOREACH( entryp, &procp->p_aio_doneq, aio_proc_link ) { - if ( aiocbp == entryp->uaiocbp ) { + if (aiocbp == entryp->uaiocbp) { result = TRUE; goto ExitThisRoutine; } } - + /* look for matches on our queue of active async IO requests */ TAILQ_FOREACH( entryp, &procp->p_aio_activeq, aio_proc_link ) { - if ( aiocbp == entryp->uaiocbp ) { + if (aiocbp == entryp->uaiocbp) { result = TRUE; goto ExitThisRoutine; } } - + ExitThisRoutine: - return( result ); - + return result; } /* is_already_queued */ static void free_lio_context(aio_lio_context* context) { - -#if DEBUG +#if DEBUG OSDecrementAtomic(&lio_contexts_alloced); #endif /* DEBUG */ FREE( context, M_TEMP ); - } /* free_lio_context */ @@ -2589,8 +2568,8 @@ free_lio_context(aio_lio_context* context) __private_extern__ void aio_init( void ) { - int i; - + int i; + aio_lock_grp_attr = lck_grp_attr_alloc_init(); aio_proc_lock_grp = lck_grp_alloc_init("aio_proc", aio_lock_grp_attr);; aio_entry_lock_grp = lck_grp_alloc_init("aio_entry", aio_lock_grp_attr);; @@ -2610,11 +2589,10 @@ aio_init( void ) } - i = sizeof( aio_workq_entry ); + i = sizeof(aio_workq_entry); aio_workq_zonep = zinit( i, i * aio_max_requests, i * aio_max_requests, "aiowq" ); - + _aio_create_worker_threads( aio_worker_threads ); - } /* aio_init */ @@ -2624,21 +2602,20 @@ aio_init( void ) __private_extern__ void _aio_create_worker_threads( int num ) { - int i; - + int i; + /* create some worker threads to handle the async IO requests */ - for ( i = 0; i < num; i++ ) { - thread_t myThread; - - if ( KERN_SUCCESS != kernel_thread_start((thread_continue_t)aio_work_thread, NULL, &myThread) ) { - printf( "%s - failed to create a work thread \n", __FUNCTION__ ); - } - else + for (i = 0; i < num; i++) { + thread_t myThread; + + if (KERN_SUCCESS != kernel_thread_start((thread_continue_t)aio_work_thread, NULL, &myThread)) { + printf( "%s - failed to create a work thread \n", __FUNCTION__ ); + } else { thread_deallocate(myThread); + } } - + return; - } /* _aio_create_worker_threads */ /* @@ -2647,7 +2624,7 @@ _aio_create_worker_threads( int num ) task_t get_aiotask(void) { - return ((struct uthread *)get_bsdthread_info(current_thread()))->uu_aio_task; + return ((struct uthread *)get_bsdthread_info(current_thread()))->uu_aio_task; } @@ -2657,8 +2634,8 @@ get_aiotask(void) * sizes in order to let downstream code always work on the same type of * aiocb (in our case that is a user_aiocb) */ -static void -do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ) +static void +do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ) { the_user_aiocbp->aio_fildes = my_aiocbp->aio_fildes; the_user_aiocbp->aio_offset = my_aiocbp->aio_offset; @@ -2676,19 +2653,19 @@ do_munge_aiocb_user32_to_user( struct user32_aiocb *my_aiocbp, struct user_aiocb //LP64 the_user_aiocbp->aio_sigevent.sigev_notify = my_aiocbp->aio_sigevent.sigev_notify; the_user_aiocbp->aio_sigevent.sigev_signo = my_aiocbp->aio_sigevent.sigev_signo; - the_user_aiocbp->aio_sigevent.sigev_value.size_equivalent.sival_int = - my_aiocbp->aio_sigevent.sigev_value.sival_int; - the_user_aiocbp->aio_sigevent.sigev_notify_function = - CAST_USER_ADDR_T(my_aiocbp->aio_sigevent.sigev_notify_function); - the_user_aiocbp->aio_sigevent.sigev_notify_attributes = - CAST_USER_ADDR_T(my_aiocbp->aio_sigevent.sigev_notify_attributes); + the_user_aiocbp->aio_sigevent.sigev_value.size_equivalent.sival_int = + my_aiocbp->aio_sigevent.sigev_value.sival_int; + the_user_aiocbp->aio_sigevent.sigev_notify_function = + CAST_USER_ADDR_T(my_aiocbp->aio_sigevent.sigev_notify_function); + the_user_aiocbp->aio_sigevent.sigev_notify_attributes = + CAST_USER_ADDR_T(my_aiocbp->aio_sigevent.sigev_notify_attributes); } /* Similar for 64-bit user process, so that we don't need to satisfy * the alignment constraints of the original user64_aiocb */ -static void -do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ) +static void +do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb *the_user_aiocbp ) { the_user_aiocbp->aio_fildes = my_aiocbp->aio_fildes; the_user_aiocbp->aio_offset = my_aiocbp->aio_offset; @@ -2696,13 +2673,13 @@ do_munge_aiocb_user64_to_user( struct user64_aiocb *my_aiocbp, struct user_aiocb the_user_aiocbp->aio_nbytes = my_aiocbp->aio_nbytes; the_user_aiocbp->aio_reqprio = my_aiocbp->aio_reqprio; the_user_aiocbp->aio_lio_opcode = my_aiocbp->aio_lio_opcode; - + the_user_aiocbp->aio_sigevent.sigev_notify = my_aiocbp->aio_sigevent.sigev_notify; the_user_aiocbp->aio_sigevent.sigev_signo = my_aiocbp->aio_sigevent.sigev_signo; - the_user_aiocbp->aio_sigevent.sigev_value.size_equivalent.sival_int = - my_aiocbp->aio_sigevent.sigev_value.size_equivalent.sival_int; - the_user_aiocbp->aio_sigevent.sigev_notify_function = - my_aiocbp->aio_sigevent.sigev_notify_function; - the_user_aiocbp->aio_sigevent.sigev_notify_attributes = - my_aiocbp->aio_sigevent.sigev_notify_attributes; + the_user_aiocbp->aio_sigevent.sigev_value.size_equivalent.sival_int = + my_aiocbp->aio_sigevent.sigev_value.size_equivalent.sival_int; + the_user_aiocbp->aio_sigevent.sigev_notify_function = + my_aiocbp->aio_sigevent.sigev_notify_function; + the_user_aiocbp->aio_sigevent.sigev_notify_attributes = + my_aiocbp->aio_sigevent.sigev_notify_attributes; } diff --git a/bsd/kern/kern_asl.c b/bsd/kern/kern_asl.c index 3972cda2d..09e1cd059 100644 --- a/bsd/kern/kern_asl.c +++ b/bsd/kern/kern_asl.c @@ -47,17 +47,17 @@ #include /* String to append as format modifier for each key-value pair */ -#define KASL_KEYVAL_FMT "[%s %s] " -#define KASL_KEYVAL_FMT_LEN (sizeof(KASL_KEYVAL_FMT) - 1) +#define KASL_KEYVAL_FMT "[%s %s] " +#define KASL_KEYVAL_FMT_LEN (sizeof(KASL_KEYVAL_FMT) - 1) -#define KASL_NEWLINE_CHAR "\n" -#define KASL_NEWLINE_CHAR_LEN (sizeof(KASL_NEWLINE_CHAR) - 1) +#define KASL_NEWLINE_CHAR "\n" +#define KASL_NEWLINE_CHAR_LEN (sizeof(KASL_NEWLINE_CHAR) - 1) /* Length of entire ASL message in 10 characters. Kernel defaults to zero */ -#define KASL_ASL_MSG_LEN " 0" +#define KASL_ASL_MSG_LEN " 0" /* Length of default format string to be used by printf */ -#define MAX_FMT_LEN 256 +#define MAX_FMT_LEN 256 /* Function to print input values as key-value pairs in format @@ -68,13 +68,13 @@ * (b) variable number of arguments passed to this function. * * Parameters - - * level - Priority level for this ASL message + * level - Priority level for this ASL message * facility - Facility for this ASL message. * num_pairs - Number of key-value pairs provided by vargs argument. - * vargs - List of key-value pairs. - * ... - Additional key-value pairs (apart from vargs) as variable - * argument list. A NULL value indicates the end of the - * variable argument list. + * vargs - List of key-value pairs. + * ... - Additional key-value pairs (apart from vargs) as variable + * argument list. A NULL value indicates the end of the + * variable argument list. * * Returns - * zero - On success, when it prints all key-values pairs provided. @@ -85,7 +85,7 @@ int kern_asl_msg_va(int level, const char *facility, int num_pairs, va_list vargs, ...) { int err = 0; - char fmt[MAX_FMT_LEN]; /* Format string to use with vaddlog */ + char fmt[MAX_FMT_LEN]; /* Format string to use with vaddlog */ int calc_pairs = 0; size_t len; int i; @@ -100,13 +100,13 @@ kern_asl_msg_va(int level, const char *facility, int num_pairs, va_list vargs, . */ if (facility) { snprintf(fmt, MAX_FMT_LEN, "%s [%s %d] [%s %s] ", - KASL_ASL_MSG_LEN, - KASL_KEY_LEVEL, level, - KASL_KEY_FACILITY, facility); + KASL_ASL_MSG_LEN, + KASL_KEY_LEVEL, level, + KASL_KEY_FACILITY, facility); } else { snprintf(fmt, MAX_FMT_LEN, "%s [%s %d] ", - KASL_ASL_MSG_LEN, - KASL_KEY_LEVEL, level); + KASL_ASL_MSG_LEN, + KASL_KEY_LEVEL, level); } /* Determine the number of key-value format string [%s %s] that @@ -197,7 +197,7 @@ kern_asl_msg_va(int level, const char *facility, int num_pairs, va_list vargs, . * doesn't grok ASL either. */ - return (err); + return err; } int @@ -215,11 +215,11 @@ kern_asl_msg(int level, const char *facility, int num_pairs, ...) } /* Search if given string contains '[' and ']'. If any, escape it by - * prefixing with a '\'. If the length of the string is not big enough, + * prefixing with a '\'. If the length of the string is not big enough, * no changes are done and error is returned. * * Parameters - - * str - string that can contain '[' or ']', should be NULL terminated + * str - string that can contain '[' or ']', should be NULL terminated * len - length, in bytes, of valid data, including NULL character. * buflen - size of buffer that contains the string */ @@ -244,7 +244,7 @@ escape_str(char *str, int len, int buflen) * characters */ if ((buflen - len) < count) { - return (ENOSPC); + return ENOSPC; } src = str + len; @@ -260,5 +260,5 @@ escape_str(char *str, int len, int buflen) } } - return (0); + return 0; } diff --git a/bsd/kern/kern_authorization.c b/bsd/kern/kern_authorization.c index 574cc24c8..574d4b198 100644 --- a/bsd/kern/kern_authorization.c +++ b/bsd/kern/kern_authorization.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,7 +31,7 @@ */ #include -#include /* XXX trim includes */ +#include /* XXX trim includes */ #include #include #include @@ -59,8 +59,8 @@ lck_grp_t *kauth_lck_grp; static lck_mtx_t *kauth_scope_mtx; -#define KAUTH_SCOPELOCK() lck_mtx_lock(kauth_scope_mtx); -#define KAUTH_SCOPEUNLOCK() lck_mtx_unlock(kauth_scope_mtx); +#define KAUTH_SCOPELOCK() lck_mtx_lock(kauth_scope_mtx); +#define KAUTH_SCOPEUNLOCK() lck_mtx_unlock(kauth_scope_mtx); /* * We support listeners for scopes that have not been registered yet. @@ -69,72 +69,72 @@ static lck_mtx_t *kauth_scope_mtx; * remove it from kauth_dangling_listeners and add it to the active scope. */ struct kauth_listener { - TAILQ_ENTRY(kauth_listener) kl_link; - const char * kl_identifier; - kauth_scope_callback_t kl_callback; - void * kl_idata; + TAILQ_ENTRY(kauth_listener) kl_link; + const char * kl_identifier; + kauth_scope_callback_t kl_callback; + void * kl_idata; }; /* XXX - kauth_todo - there is a race if a scope listener is removed while we * we are in the kauth_authorize_action code path. We intentionally do not take - * a scope lock in order to get the best possible performance. we will fix this - * post Tiger. - * Until the race is fixed our kext clients are responsible for all active + * a scope lock in order to get the best possible performance. we will fix this + * post Tiger. + * Until the race is fixed our kext clients are responsible for all active * requests that may be in their callback code or on the way to their callback * code before they free kauth_listener.kl_callback or kauth_listener.kl_idata. - * We keep copies of these in our kauth_local_listener in an attempt to limit - * our expose to unlisten race. + * We keep copies of these in our kauth_local_listener in an attempt to limit + * our expose to unlisten race. */ struct kauth_local_listener { - kauth_listener_t kll_listenerp; - kauth_scope_callback_t kll_callback; - void * kll_idata; + kauth_listener_t kll_listenerp; + kauth_scope_callback_t kll_callback; + void * kll_idata; }; typedef struct kauth_local_listener *kauth_local_listener_t; -static TAILQ_HEAD(,kauth_listener) kauth_dangling_listeners; +static TAILQ_HEAD(, kauth_listener) kauth_dangling_listeners; -/* +/* * Scope listeners need to be reworked to be dynamic. - * We intentionally used a static table to avoid locking issues with linked + * We intentionally used a static table to avoid locking issues with linked * lists. The listeners may be called quite often. * XXX - kauth_todo */ #define KAUTH_SCOPE_MAX_LISTENERS 15 struct kauth_scope { - TAILQ_ENTRY(kauth_scope) ks_link; + TAILQ_ENTRY(kauth_scope) ks_link; volatile struct kauth_local_listener ks_listeners[KAUTH_SCOPE_MAX_LISTENERS]; - const char * ks_identifier; - kauth_scope_callback_t ks_callback; - void * ks_idata; - u_int ks_flags; + const char * ks_identifier; + kauth_scope_callback_t ks_callback; + void * ks_idata; + u_int ks_flags; }; /* values for kauth_scope.ks_flags */ -#define KS_F_HAS_LISTENERS (1 << 0) +#define KS_F_HAS_LISTENERS (1 << 0) -static TAILQ_HEAD(,kauth_scope) kauth_scopes; +static TAILQ_HEAD(, kauth_scope) kauth_scopes; static int kauth_add_callback_to_scope(kauth_scope_t sp, kauth_listener_t klp); -static void kauth_scope_init(void); +static void kauth_scope_init(void); static kauth_scope_t kauth_alloc_scope(const char *identifier, kauth_scope_callback_t callback, void *idata); static kauth_listener_t kauth_alloc_listener(const char *identifier, kauth_scope_callback_t callback, void *idata); #if 0 -static int kauth_scope_valid(kauth_scope_t scope); +static int kauth_scope_valid(kauth_scope_t scope); #endif -kauth_scope_t kauth_scope_process; -static int kauth_authorize_process_callback(kauth_cred_t _credential, void *_idata, kauth_action_t _action, +kauth_scope_t kauth_scope_process; +static int kauth_authorize_process_callback(kauth_cred_t _credential, void *_idata, kauth_action_t _action, uintptr_t arg0, uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3); -kauth_scope_t kauth_scope_generic; -static int kauth_authorize_generic_callback(kauth_cred_t _credential, void *_idata, kauth_action_t _action, +kauth_scope_t kauth_scope_generic; +static int kauth_authorize_generic_callback(kauth_cred_t _credential, void *_idata, kauth_action_t _action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); -kauth_scope_t kauth_scope_fileop; +kauth_scope_t kauth_scope_fileop; -extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); -extern char * get_pathbuff(void); -extern void release_pathbuff(char *path); +extern int cansignal(struct proc *, kauth_cred_t, struct proc *, int); +extern char * get_pathbuff(void); +extern void release_pathbuff(char *path); /* * Initialization. @@ -142,7 +142,7 @@ extern void release_pathbuff(char *path); void kauth_init(void) { - lck_grp_attr_t *grp_attributes; + lck_grp_attr_t *grp_attributes; TAILQ_INIT(&kauth_scopes); TAILQ_INIT(&kauth_dangling_listeners); @@ -183,19 +183,20 @@ kauth_scope_init(void) static kauth_scope_t kauth_alloc_scope(const char *identifier, kauth_scope_callback_t callback, void *idata) { - kauth_scope_t sp; + kauth_scope_t sp; /* * Allocate and populate the scope structure. */ MALLOC(sp, kauth_scope_t, sizeof(*sp), M_KAUTH, M_WAITOK | M_ZERO); - if (sp == NULL) - return(NULL); + if (sp == NULL) { + return NULL; + } sp->ks_flags = 0; sp->ks_identifier = identifier; sp->ks_idata = idata; sp->ks_callback = callback; - return(sp); + return sp; } static kauth_listener_t @@ -207,22 +208,24 @@ kauth_alloc_listener(const char *identifier, kauth_scope_callback_t callback, vo * Allocate and populate the listener structure. */ MALLOC(lsp, kauth_listener_t, sizeof(*lsp), M_KAUTH, M_WAITOK); - if (lsp == NULL) - return(NULL); + if (lsp == NULL) { + return NULL; + } lsp->kl_identifier = identifier; lsp->kl_idata = idata; lsp->kl_callback = callback; - return(lsp); + return lsp; } kauth_scope_t kauth_register_scope(const char *identifier, kauth_scope_callback_t callback, void *idata) { - kauth_scope_t sp, tsp; - kauth_listener_t klp; + kauth_scope_t sp, tsp; + kauth_listener_t klp; - if ((sp = kauth_alloc_scope(identifier, callback, idata)) == NULL) - return(NULL); + if ((sp = kauth_alloc_scope(identifier, callback, idata)) == NULL) { + return NULL; + } /* * Lock the list and insert. @@ -230,11 +233,11 @@ kauth_register_scope(const char *identifier, kauth_scope_callback_t callback, vo KAUTH_SCOPELOCK(); TAILQ_FOREACH(tsp, &kauth_scopes, ks_link) { /* duplicate! */ - if (strncmp(tsp->ks_identifier, identifier, - strlen(tsp->ks_identifier) + 1) == 0) { + if (strncmp(tsp->ks_identifier, identifier, + strlen(tsp->ks_identifier) + 1) == 0) { KAUTH_SCOPEUNLOCK(); FREE(sp, M_KAUTH); - return(NULL); + return NULL; } } TAILQ_INSERT_TAIL(&kauth_scopes, sp, ks_link); @@ -248,14 +251,13 @@ kauth_register_scope(const char *identifier, kauth_scope_callback_t callback, vo restart: TAILQ_FOREACH(klp, &kauth_dangling_listeners, kl_link) { if (strncmp(klp->kl_identifier, sp->ks_identifier, - strlen(klp->kl_identifier) + 1) == 0) { + strlen(klp->kl_identifier) + 1) == 0) { /* found a match on the dangling listener list. add it to the * the active scope. */ if (kauth_add_callback_to_scope(sp, klp) == 0) { TAILQ_REMOVE(&kauth_dangling_listeners, klp, kl_link); - } - else { + } else { #if 0 printf("%s - failed to add listener to scope \"%s\" \n", __FUNCTION__, sp->ks_identifier); #endif @@ -266,7 +268,7 @@ restart: } KAUTH_SCOPEUNLOCK(); - return(sp); + return sp; } @@ -274,18 +276,18 @@ restart: void kauth_deregister_scope(kauth_scope_t scope) { - int i; + int i; KAUTH_SCOPELOCK(); TAILQ_REMOVE(&kauth_scopes, scope, ks_link); - + /* relocate listeners back to the waiting list */ for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) { if (scope->ks_listeners[i].kll_listenerp != NULL) { TAILQ_INSERT_TAIL(&kauth_dangling_listeners, scope->ks_listeners[i].kll_listenerp, kl_link); scope->ks_listeners[i].kll_listenerp = NULL; - /* + /* * XXX - kauth_todo - WARNING, do not clear kll_callback or * kll_idata here. they are part of our scope unlisten race hack */ @@ -293,7 +295,7 @@ kauth_deregister_scope(kauth_scope_t scope) } KAUTH_SCOPEUNLOCK(); FREE(scope, M_KAUTH); - + return; } @@ -301,10 +303,11 @@ kauth_listener_t kauth_listen_scope(const char *identifier, kauth_scope_callback_t callback, void *idata) { kauth_listener_t klp; - kauth_scope_t sp; + kauth_scope_t sp; - if ((klp = kauth_alloc_listener(identifier, callback, idata)) == NULL) - return(NULL); + if ((klp = kauth_alloc_listener(identifier, callback, idata)) == NULL) { + return NULL; + } /* * Lock the scope list and check to see whether this scope already exists. @@ -312,34 +315,34 @@ kauth_listen_scope(const char *identifier, kauth_scope_callback_t callback, void KAUTH_SCOPELOCK(); TAILQ_FOREACH(sp, &kauth_scopes, ks_link) { if (strncmp(sp->ks_identifier, identifier, - strlen(sp->ks_identifier) + 1) == 0) { + strlen(sp->ks_identifier) + 1) == 0) { /* scope exists, add it to scope listener table */ if (kauth_add_callback_to_scope(sp, klp) == 0) { KAUTH_SCOPEUNLOCK(); - return(klp); + return klp; } /* table already full */ KAUTH_SCOPEUNLOCK(); FREE(klp, M_KAUTH); - return(NULL); + return NULL; } } - + /* scope doesn't exist, put on waiting list. */ TAILQ_INSERT_TAIL(&kauth_dangling_listeners, klp, kl_link); KAUTH_SCOPEUNLOCK(); - return(klp); + return klp; } void kauth_unlisten_scope(kauth_listener_t listener) { - kauth_scope_t sp; - kauth_listener_t klp; - int i, listener_count, do_free; - + kauth_scope_t sp; + kauth_listener_t klp; + int i, listener_count, do_free; + KAUTH_SCOPELOCK(); /* search the active scope for this listener */ @@ -351,12 +354,11 @@ kauth_unlisten_scope(kauth_listener_t listener) if (sp->ks_listeners[i].kll_listenerp == listener) { sp->ks_listeners[i].kll_listenerp = NULL; do_free = 1; - /* + /* * XXX - kauth_todo - WARNING, do not clear kll_callback or * kll_idata here. they are part of our scope unlisten race hack */ - } - else if (sp->ks_listeners[i].kll_listenerp != NULL) { + } else if (sp->ks_listeners[i].kll_listenerp != NULL) { listener_count++; } } @@ -401,36 +403,39 @@ kauth_authorize_action(kauth_scope_t scope, kauth_cred_t credential, kauth_actio int result, ret, i; /* ask the scope */ - if (scope->ks_callback != NULL) + if (scope->ks_callback != NULL) { result = scope->ks_callback(credential, scope->ks_idata, action, arg0, arg1, arg2, arg3); - else + } else { result = KAUTH_RESULT_DEFER; + } /* check with listeners */ if ((scope->ks_flags & KS_F_HAS_LISTENERS) != 0) { for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) { - /* XXX - kauth_todo - there is a race here if listener is removed - we will fix this post Tiger. + /* XXX - kauth_todo - there is a race here if listener is removed - we will fix this post Tiger. * Until the race is fixed our kext clients are responsible for all active requests that may * be in their callbacks or on the way to their callbacks before they free kl_callback or kl_idata. - * We keep copies of these in our kauth_local_listener in an attempt to limit our expose to - * unlisten race. + * We keep copies of these in our kauth_local_listener in an attempt to limit our expose to + * unlisten race. */ - if (scope->ks_listeners[i].kll_listenerp == NULL || - scope->ks_listeners[i].kll_callback == NULL) + if (scope->ks_listeners[i].kll_listenerp == NULL || + scope->ks_listeners[i].kll_callback == NULL) { continue; + } ret = scope->ks_listeners[i].kll_callback( - credential, scope->ks_listeners[i].kll_idata, - action, arg0, arg1, arg2, arg3); + credential, scope->ks_listeners[i].kll_idata, + action, arg0, arg1, arg2, arg3); if ((ret == KAUTH_RESULT_DENY) || - (result == KAUTH_RESULT_DEFER)) + (result == KAUTH_RESULT_DEFER)) { result = ret; + } } } /* we need an explicit allow, or the auth fails */ - /* XXX need a mechanism for auth failure to be signalled vs. denial */ - return(result == KAUTH_RESULT_ALLOW ? 0 : EPERM); + /* XXX need a mechanism for auth failure to be signalled vs. denial */ + return result == KAUTH_RESULT_ALLOW ? 0 : EPERM; } /* @@ -438,10 +443,9 @@ kauth_authorize_action(kauth_scope_t scope, kauth_cred_t credential, kauth_actio */ int kauth_authorize_allow(__unused kauth_cred_t credential, __unused void *idata, __unused kauth_action_t action, - __unused uintptr_t arg0, __unused uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3) + __unused uintptr_t arg0, __unused uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3) { - - return(KAUTH_RESULT_ALLOW); + return KAUTH_RESULT_ALLOW; } #if 0 @@ -451,15 +455,16 @@ kauth_authorize_allow(__unused kauth_cred_t credential, __unused void *idata, __ static int kauth_scope_valid(kauth_scope_t scope) { - kauth_scope_t sp; + kauth_scope_t sp; KAUTH_SCOPELOCK(); TAILQ_FOREACH(sp, &kauth_scopes, ks_link) { - if (sp == scope) + if (sp == scope) { break; + } } KAUTH_SCOPEUNLOCK(); - return((sp == NULL) ? 0 : 1); + return (sp == NULL) ? 0 : 1; } #endif @@ -470,35 +475,37 @@ kauth_scope_valid(kauth_scope_t scope) int kauth_authorize_process(kauth_cred_t credential, kauth_action_t action, struct proc *process, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) { - return(kauth_authorize_action(kauth_scope_process, credential, action, (uintptr_t)process, arg1, arg2, arg3)); + return kauth_authorize_action(kauth_scope_process, credential, action, (uintptr_t)process, arg1, arg2, arg3); } static int kauth_authorize_process_callback(kauth_cred_t credential, __unused void *idata, kauth_action_t action, uintptr_t arg0, uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3) { - switch(action) { + switch (action) { case KAUTH_PROCESS_CANSIGNAL: panic("KAUTH_PROCESS_CANSIGNAL not implemented"); /* XXX credential wrong here */ /* arg0 - process to signal * arg1 - signal to send the process */ - if (cansignal(current_proc(), credential, (struct proc *)arg0, (int)arg1)) - return(KAUTH_RESULT_ALLOW); + if (cansignal(current_proc(), credential, (struct proc *)arg0, (int)arg1)) { + return KAUTH_RESULT_ALLOW; + } break; case KAUTH_PROCESS_CANTRACE: - /* current_proc() - process that will do the tracing - * arg0 - process to be traced - * arg1 - pointer to int - reason (errno) for denial + /* current_proc() - process that will do the tracing + * arg0 - process to be traced + * arg1 - pointer to int - reason (errno) for denial */ - if (cantrace(current_proc(), credential, (proc_t)arg0, (int *)arg1)) - return(KAUTH_RESULT_ALLOW); + if (cantrace(current_proc(), credential, (proc_t)arg0, (int *)arg1)) { + return KAUTH_RESULT_ALLOW; + } break; } /* no explicit result, so defer to others in the chain */ - return(KAUTH_RESULT_DEFER); + return KAUTH_RESULT_DEFER; } /* @@ -535,23 +542,23 @@ kauth_authorize_fileop_has_listeners(void) * otherwize return 0 */ if ((kauth_scope_fileop->ks_flags & KS_F_HAS_LISTENERS) != 0) { - return(1); + return 1; } - return (0); + return 0; } int kauth_authorize_fileop(kauth_cred_t credential, kauth_action_t action, uintptr_t arg0, uintptr_t arg1) { - char *namep = NULL; - int name_len; - uintptr_t arg2 = 0; - - /* we do not have a primary handler for the fileop scope so bail out if + char *namep = NULL; + int name_len; + uintptr_t arg2 = 0; + + /* we do not have a primary handler for the fileop scope so bail out if * there are no listeners. */ if ((kauth_scope_fileop->ks_flags & KS_F_HAS_LISTENERS) == 0) { - return(0); + return 0; } if (action == KAUTH_FILEOP_OPEN || @@ -564,7 +571,7 @@ kauth_authorize_fileop(kauth_cred_t credential, kauth_action_t action, uintptr_t name_len = MAXPATHLEN; if (vn_getpath((vnode_t)arg0, namep, &name_len) != 0) { release_pathbuff(namep); - return(0); + return 0; } if (action == KAUTH_FILEOP_CLOSE || action == KAUTH_FILEOP_WILL_RENAME) { @@ -577,14 +584,14 @@ kauth_authorize_fileop(kauth_cred_t credential, kauth_action_t action, uintptr_t arg2 = arg1; } arg1 = (uintptr_t)namep; - } + } kauth_authorize_action(kauth_scope_fileop, credential, action, arg0, arg1, arg2, 0); - + if (namep != NULL) { release_pathbuff(namep); } - - return(0); + + return 0; } /* @@ -594,26 +601,26 @@ kauth_authorize_fileop(kauth_cred_t credential, kauth_action_t action, uintptr_t int kauth_authorize_generic(kauth_cred_t credential, kauth_action_t action) { - if (credential == NULL) + if (credential == NULL) { panic("auth against NULL credential"); + } - return(kauth_authorize_action(kauth_scope_generic, credential, action, 0, 0, 0, 0)); - + return kauth_authorize_action(kauth_scope_generic, credential, action, 0, 0, 0, 0); } static int kauth_authorize_generic_callback(kauth_cred_t credential, __unused void *idata, kauth_action_t action, - __unused uintptr_t arg0, __unused uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3) + __unused uintptr_t arg0, __unused uintptr_t arg1, __unused uintptr_t arg2, __unused uintptr_t arg3) { - switch(action) { + switch (action) { case KAUTH_GENERIC_ISSUSER: /* XXX == 0 ? */ - return((kauth_cred_getuid(credential) == 0) ? - KAUTH_RESULT_ALLOW : KAUTH_RESULT_DENY); + return (kauth_cred_getuid(credential) == 0) ? + KAUTH_RESULT_ALLOW : KAUTH_RESULT_DENY; } /* no explicit result, so defer to others in the chain */ - return(KAUTH_RESULT_DEFER); + return KAUTH_RESULT_DEFER; } /* @@ -637,7 +644,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) /* always allowed to do nothing */ if (eval->ae_requested == 0) { eval->ae_result = KAUTH_RESULT_ALLOW; - return(0); + return 0; } eval->ae_residual = eval->ae_requested; @@ -656,59 +663,66 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) KAUTH_DEBUG(" ACL - %d entries, initial residual %x", eval->ae_count, eval->ae_residual); for (i = 0, ace = eval->ae_acl; i < eval->ae_count; i++, ace++) { - /* * Skip inherit-only entries. */ - if (ace->ace_flags & KAUTH_ACE_ONLY_INHERIT) + if (ace->ace_flags & KAUTH_ACE_ONLY_INHERIT) { continue; + } /* * Expand generic rights, if appropriate. */ rights = ace->ace_rights; - if (rights & KAUTH_ACE_GENERIC_ALL) + if (rights & KAUTH_ACE_GENERIC_ALL) { rights |= eval->ae_exp_gall; - if (rights & KAUTH_ACE_GENERIC_READ) + } + if (rights & KAUTH_ACE_GENERIC_READ) { rights |= eval->ae_exp_gread; - if (rights & KAUTH_ACE_GENERIC_WRITE) + } + if (rights & KAUTH_ACE_GENERIC_WRITE) { rights |= eval->ae_exp_gwrite; - if (rights & KAUTH_ACE_GENERIC_EXECUTE) + } + if (rights & KAUTH_ACE_GENERIC_EXECUTE) { rights |= eval->ae_exp_gexec; + } /* * Determine whether this entry applies to the current request. This * saves us checking the GUID if the entry has nothing to do with what * we're currently doing. */ - switch(ace->ace_flags & KAUTH_ACE_KINDMASK) { + switch (ace->ace_flags & KAUTH_ACE_KINDMASK) { case KAUTH_ACE_PERMIT: - if (!(eval->ae_residual & rights)) + if (!(eval->ae_residual & rights)) { continue; + } break; case KAUTH_ACE_DENY: - if (!(eval->ae_requested & rights)) + if (!(eval->ae_requested & rights)) { continue; + } eval->ae_found_deny = TRUE; break; default: /* we don't recognise this ACE, skip it */ continue; } - + /* * Verify whether this entry applies to the credential. */ wkguid = kauth_wellknown_guid(&ace->ace_applicable); - switch(wkguid) { + switch (wkguid) { case KAUTH_WKG_OWNER: applies = eval->ae_options & KAUTH_AEVAL_IS_OWNER; break; case KAUTH_WKG_GROUP: - if (!gotguid || (eval->ae_options & KAUTH_AEVAL_IN_GROUP_UNKNOWN)) + if (!gotguid || (eval->ae_options & KAUTH_AEVAL_IN_GROUP_UNKNOWN)) { applies = ((ace->ace_flags & KAUTH_ACE_KINDMASK) == KAUTH_ACE_DENY); - else + } else { applies = eval->ae_options & KAUTH_AEVAL_IN_GROUP; + } break; /* we short-circuit these here rather than wasting time calling the group membership code */ case KAUTH_WKG_EVERYBODY: @@ -723,7 +737,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) applies = !gotguid ? 0 : kauth_guid_equal(&guid, &ace->ace_applicable); KAUTH_DEBUG(" ACL - ACE applicable " K_UUID_FMT " caller " K_UUID_FMT " %smatched", K_UUID_ARG(ace->ace_applicable), K_UUID_ARG(guid), applies ? "" : "not "); - + if (!applies) { error = !gotguid ? ENOENT : kauth_cred_ismember_guid(cred, &ace->ace_applicable, &applies); /* @@ -734,7 +748,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) */ if (error != 0) { KAUTH_DEBUG(" ACL[%d] - can't get membership, making pessimistic assumption", i); - switch(ace->ace_flags & KAUTH_ACE_KINDMASK) { + switch (ace->ace_flags & KAUTH_ACE_KINDMASK) { case KAUTH_ACE_PERMIT: applies = 0; break; @@ -749,13 +763,14 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) KAUTH_DEBUG(" ACL - entry matches caller"); } } - if (!applies) + if (!applies) { continue; + } /* * Apply ACE to outstanding rights. */ - switch(ace->ace_flags & KAUTH_ACE_KINDMASK) { + switch (ace->ace_flags & KAUTH_ACE_KINDMASK) { case KAUTH_ACE_PERMIT: /* satisfy any rights that this ACE grants */ eval->ae_residual = eval->ae_residual & ~rights; @@ -763,7 +778,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) /* all rights satisfied? */ if (eval->ae_residual == 0) { eval->ae_result = KAUTH_RESULT_ALLOW; - return(0); + return 0; } break; case KAUTH_ACE_DENY: @@ -771,7 +786,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) if (eval->ae_requested & rights) { KAUTH_DEBUG(" ACL[%d] - denying based on %x", i, rights); eval->ae_result = KAUTH_RESULT_DENY; - return(0); + return 0; } break; default: @@ -781,7 +796,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) } /* if not permitted, defer to other modes of authorisation */ eval->ae_result = KAUTH_RESULT_DEFER; - return(0); + return 0; } /* @@ -796,7 +811,7 @@ kauth_acl_evaluate(kauth_cred_t cred, kauth_acl_eval_t eval) int kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int isdir, vfs_context_t ctx) { - int entries, error, index; + int entries, error, index; unsigned int i; struct vnode_attr dva; kauth_acl_t inherit, result; @@ -825,10 +840,11 @@ kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int is VATTR_WANTED(&dva, va_acl); if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) { KAUTH_DEBUG(" ERROR - could not get parent directory ACL for inheritance"); - return(error); + return error; } - if (VATTR_IS_SUPPORTED(&dva, va_acl)) + if (VATTR_IS_SUPPORTED(&dva, va_acl)) { inherit = dva.va_acl; + } } /* @@ -838,8 +854,9 @@ kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int is entries = 0; if (inherit != NULL) { for (i = 0; i < inherit->acl_entrycount; i++) { - if (inherit->acl_ace[i].ace_flags & (isdir ? KAUTH_ACE_DIRECTORY_INHERIT : KAUTH_ACE_FILE_INHERIT)) + if (inherit->acl_ace[i].ace_flags & (isdir ? KAUTH_ACE_DIRECTORY_INHERIT : KAUTH_ACE_FILE_INHERIT)) { entries++; + } } } @@ -852,10 +869,11 @@ kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int is } if (initial != NULL) { - if (initial->acl_entrycount != KAUTH_FILESEC_NOACL) + if (initial->acl_entrycount != KAUTH_FILESEC_NOACL) { entries += initial->acl_entrycount; - else + } else { initial = NULL; + } } /* @@ -869,7 +887,7 @@ kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int is error = 0; goto out; } - + /* * Allocate the result buffer. */ @@ -926,9 +944,10 @@ kauth_acl_inherit(vnode_t dvp, kauth_acl_t initial, kauth_acl_t *product, int is KAUTH_DEBUG(" INHERIT - product ACL has %d entries", index); error = 0; out: - if (inherit != NULL) + if (inherit != NULL) { kauth_acl_free(inherit); - return(error); + } + return error; } /* @@ -966,7 +985,7 @@ kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp) kauth_filesec_t fsec; u_int32_t count; size_t copysize; - + error = 0; fsec = NULL; @@ -990,16 +1009,18 @@ kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp) user_addr_t uaddr = mach_vm_round_page(known_bound); count = (uaddr - known_bound) / sizeof(struct kauth_ace); } - if (count > 32) + if (count > 32) { count = 32; + } restart: if ((fsec = kauth_filesec_alloc(count)) == NULL) { error = ENOMEM; goto out; } copysize = KAUTH_FILESEC_SIZE(count); - if ((error = copyin(xsecurity, (caddr_t)fsec, copysize)) != 0) + if ((error = copyin(xsecurity, (caddr_t)fsec, copysize)) != 0) { goto out; + } /* validate the filesec header */ if (fsec->fsec_magic != KAUTH_FILESEC_MAGIC) { @@ -1021,16 +1042,17 @@ restart: kauth_filesec_free(fsec); goto restart; } - + out: if (error) { - if (fsec) + if (fsec) { kauth_filesec_free(fsec); + } } else { *xsecdestpp = fsec; AUDIT_ARG(opaque, fsec, copysize); } - return(error); + return error; } /* @@ -1055,11 +1077,12 @@ out: kauth_filesec_t kauth_filesec_alloc(int count) { - kauth_filesec_t fsp; - + kauth_filesec_t fsp; + /* if the caller hasn't given us a valid size hint, assume the worst */ - if ((count < 0) || (count > KAUTH_ACL_MAX_ENTRIES)) - return(NULL); + if ((count < 0) || (count > KAUTH_ACL_MAX_ENTRIES)) { + return NULL; + } MALLOC(fsp, kauth_filesec_t, KAUTH_FILESEC_SIZE(count), M_KAUTH, M_WAITOK); if (fsp != NULL) { @@ -1069,8 +1092,8 @@ kauth_filesec_alloc(int count) fsp->fsec_entrycount = KAUTH_FILESEC_NOACL; fsp->fsec_flags = 0; } - return(fsp); -} + return fsp; +} /* * Free a kauth_filesec_t that was previous allocated, either by a direct @@ -1088,16 +1111,18 @@ void kauth_filesec_free(kauth_filesec_t fsp) { #ifdef KAUTH_DEBUG_ENABLE - if (fsp == KAUTH_FILESEC_NONE) + if (fsp == KAUTH_FILESEC_NONE) { panic("freeing KAUTH_FILESEC_NONE"); - if (fsp == KAUTH_FILESEC_WANTED) + } + if (fsp == KAUTH_FILESEC_WANTED) { panic("freeing KAUTH_FILESEC_WANTED"); + } #endif FREE(fsp, M_KAUTH); } /* - * Set the endianness of a filesec and an ACL; if 'acl' is NULL, use the + * Set the endianness of a filesec and an ACL; if 'acl' is NULL, use the * ACL interior to 'fsec' instead. If the endianness doesn't change, then * this function will have no effect. * @@ -1115,17 +1140,19 @@ kauth_filesec_free(kauth_filesec_t fsp) void kauth_filesec_acl_setendian(int kendian, kauth_filesec_t fsec, kauth_acl_t acl) { - uint32_t compare_magic = KAUTH_FILESEC_MAGIC; - uint32_t invert_magic = ntohl(KAUTH_FILESEC_MAGIC); - uint32_t compare_acl_entrycount; - uint32_t i; + uint32_t compare_magic = KAUTH_FILESEC_MAGIC; + uint32_t invert_magic = ntohl(KAUTH_FILESEC_MAGIC); + uint32_t compare_acl_entrycount; + uint32_t i; - if (compare_magic == invert_magic) + if (compare_magic == invert_magic) { return; + } /* If no ACL, use ACL interior to 'fsec' instead */ - if (acl == NULL) + if (acl == NULL) { acl = &fsec->fsec_acl; + } compare_acl_entrycount = acl->acl_entrycount; @@ -1134,21 +1161,23 @@ kauth_filesec_acl_setendian(int kendian, kauth_filesec_t fsec, kauth_acl_t acl) * are valid. The following switch and tests effectively reject * conversions on invalid magic numbers as a desirable side effect. */ - switch(kendian) { - case KAUTH_ENDIAN_HOST: /* not in host, convert to host */ - if (fsec->fsec_magic != invert_magic) + switch (kendian) { + case KAUTH_ENDIAN_HOST: /* not in host, convert to host */ + if (fsec->fsec_magic != invert_magic) { return; + } /* acl_entrycount is byteswapped */ compare_acl_entrycount = ntohl(acl->acl_entrycount); break; - case KAUTH_ENDIAN_DISK: /* not in disk, convert to disk */ - if (fsec->fsec_magic != compare_magic) + case KAUTH_ENDIAN_DISK: /* not in disk, convert to disk */ + if (fsec->fsec_magic != compare_magic) { return; + } break; - default: /* bad argument */ + default: /* bad argument */ return; } - + /* We are go for conversion */ fsec->fsec_magic = ntohl(fsec->fsec_magic); acl->acl_entrycount = ntohl(acl->acl_entrycount); @@ -1161,7 +1190,7 @@ kauth_filesec_acl_setendian(int kendian, kauth_filesec_t fsec, kauth_acl_t acl) acl->acl_ace[i].ace_rights = ntohl(acl->acl_ace[i].ace_rights); } } - } +} /* @@ -1170,19 +1199,20 @@ kauth_filesec_acl_setendian(int kendian, kauth_filesec_t fsec, kauth_acl_t acl) kauth_acl_t kauth_acl_alloc(int count) { - kauth_acl_t aclp; - + kauth_acl_t aclp; + /* if the caller hasn't given us a valid size hint, assume the worst */ - if ((count < 0) || (count > KAUTH_ACL_MAX_ENTRIES)) - return(NULL); + if ((count < 0) || (count > KAUTH_ACL_MAX_ENTRIES)) { + return NULL; + } MALLOC(aclp, kauth_acl_t, KAUTH_ACL_SIZE(count), M_KAUTH, M_WAITOK); if (aclp != NULL) { aclp->acl_entrycount = 0; aclp->acl_flags = 0; } - return(aclp); -} + return aclp; +} void kauth_acl_free(kauth_acl_t aclp) @@ -1194,9 +1224,10 @@ kauth_acl_free(kauth_acl_t aclp) /* * WARNING - caller must hold KAUTH_SCOPELOCK */ -static int kauth_add_callback_to_scope(kauth_scope_t sp, kauth_listener_t klp) +static int +kauth_add_callback_to_scope(kauth_scope_t sp, kauth_listener_t klp) { - int i; + int i; for (i = 0; i < KAUTH_SCOPE_MAX_LISTENERS; i++) { if (sp->ks_listeners[i].kll_listenerp == NULL) { @@ -1204,8 +1235,8 @@ static int kauth_add_callback_to_scope(kauth_scope_t sp, kauth_listener_t klp) sp->ks_listeners[i].kll_idata = klp->kl_idata; sp->ks_listeners[i].kll_listenerp = klp; sp->ks_flags |= KS_F_HAS_LISTENERS; - return(0); + return 0; } } - return(ENOSPC); + return ENOSPC; } diff --git a/bsd/kern/kern_backtrace.c b/bsd/kern/kern_backtrace.c index 925994950..29329bf77 100644 --- a/bsd/kern/kern_backtrace.c +++ b/bsd/kern/kern_backtrace.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,6 +32,8 @@ #include #include +#if DEVELOPMENT || DEBUG + #define MAX_BACKTRACE (128) #define BACKTRACE_USER (0) @@ -39,21 +41,23 @@ static int backtrace_sysctl SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_kern, OID_AUTO, backtrace, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "backtrace"); + "backtrace"); SYSCTL_PROC(_kern_backtrace, OID_AUTO, user, - CTLFLAG_RW | CTLFLAG_LOCKED, (void *)BACKTRACE_USER, - sizeof(uint64_t), backtrace_sysctl, "O", "take user backtrace of current thread"); + CTLFLAG_RW | CTLFLAG_LOCKED, (void *)BACKTRACE_USER, + sizeof(uint64_t), backtrace_sysctl, "O", + "take user backtrace of current thread"); static int backtrace_sysctl SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) - uintptr_t *bt; - uint32_t bt_len, bt_filled; uintptr_t type = (uintptr_t)arg1; - bool user_64; - int err = 0; + uintptr_t *bt = NULL; + uint32_t bt_len = 0, bt_filled = 0; + size_t bt_size = 0; + int error = 0; + bool user_64 = false; if (type != BACKTRACE_USER) { return EINVAL; @@ -64,23 +68,27 @@ backtrace_sysctl SYSCTL_HANDLER_ARGS } bt_len = req->oldlen > MAX_BACKTRACE ? MAX_BACKTRACE : req->oldlen; - bt = kalloc(sizeof(uintptr_t) * bt_len); + bt_size = sizeof(bt[0]) * bt_len; + bt = kalloc(bt_size); if (!bt) { return ENOBUFS; } - bzero(bt, sizeof(uintptr_t) * bt_len); - err = backtrace_user(bt, bt_len, &bt_filled, &user_64); - if (err) { + memset(bt, 0, bt_size); + error = backtrace_user(bt, bt_len, &bt_filled, &user_64); + if (error) { goto out; } + bt_filled = min(bt_filled, bt_len); - err = copyout(bt, req->oldptr, bt_filled * sizeof(uint64_t)); - if (err) { + error = copyout(bt, req->oldptr, sizeof(bt[0]) * bt_filled); + if (error) { goto out; } req->oldidx = bt_filled; out: - kfree(bt, sizeof(uintptr_t) * bt_len); - return err; + kfree(bt, bt_size); + return error; } + +#endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/kern/kern_clock.c b/bsd/kern/kern_clock.c index 08507cdc5..fd776ee2f 100644 --- a/bsd/kern/kern_clock.c +++ b/bsd/kern/kern_clock.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -101,7 +101,7 @@ int tvtohz(struct timeval *tv); * times per second, is used to do scheduling and timeout calculations. * The second timer does resource utilization estimation statistically * based on the state of the machine phz times a second. Both functions - * can be performed by a single clock (ie hz == phz), however the + * can be performed by a single clock (ie hz == phz), however the * statistics will be much more prone to errors. Ideally a machine * would have separate clocks measuring time spent in user state, system * state, interrupt state, and idle state. These clocks would allow a non- @@ -128,11 +128,11 @@ int tick = (1000000 / 100); /* GET RID OF THIS !!! */ */ void timeout( - timeout_fcn_t fcn, - void *param, - int interval) + timeout_fcn_t fcn, + void *param, + int interval) { - uint64_t deadline; + uint64_t deadline; clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline); thread_call_func_delayed((thread_call_func_t)fcn, param, deadline); @@ -148,13 +148,13 @@ timeout( */ void timeout_with_leeway( - timeout_fcn_t fcn, - void *param, - int interval, - int leeway_interval) + timeout_fcn_t fcn, + void *param, + int interval, + int leeway_interval) { - uint64_t deadline; - uint64_t leeway; + uint64_t deadline; + uint64_t leeway; clock_interval_to_deadline(interval, NSEC_PER_SEC / hz, &deadline); @@ -170,8 +170,8 @@ timeout_with_leeway( */ void untimeout( - timeout_fcn_t fcn, - void *param) + timeout_fcn_t fcn, + void *param) { thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE); } @@ -186,14 +186,14 @@ untimeout( */ void bsd_timeout( - timeout_fcn_t fcn, - void *param, + timeout_fcn_t fcn, + void *param, struct timespec *ts) { - uint64_t deadline = 0; + uint64_t deadline = 0; if (ts && (ts->tv_sec || ts->tv_nsec)) { - nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &deadline ); + nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &deadline ); clock_absolutetime_interval_to_deadline( deadline, &deadline ); } thread_call_func_delayed((thread_call_func_t)fcn, param, deadline); @@ -206,8 +206,8 @@ bsd_timeout( */ void bsd_untimeout( - timeout_fcn_t fcn, - void *param) + timeout_fcn_t fcn, + void *param) { thread_call_func_cancel((thread_call_func_t)fcn, param, FALSE); } @@ -236,16 +236,17 @@ hzto(struct timeval *tv) * Maximum value for any timeout in 10ms ticks is 250 days. */ sec = tv->tv_sec - now.tv_sec; - if (sec <= 0x7fffffff / 1000 - 1000) + if (sec <= 0x7fffffff / 1000 - 1000) { ticks = ((tv->tv_sec - now.tv_sec) * 1000 + - (tv->tv_usec - now.tv_usec) / 1000) - / (tick / 1000); - else if (sec <= 0x7fffffff / hz) + (tv->tv_usec - now.tv_usec) / 1000) + / (tick / 1000); + } else if (sec <= 0x7fffffff / hz) { ticks = sec * hz; - else + } else { ticks = 0x7fffffff; + } - return (ticks); + return ticks; } /* @@ -267,8 +268,8 @@ sysctl_clockrate } SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_clockrate, "S,clockinfo", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_clockrate, "S,clockinfo", ""); /* @@ -313,20 +314,22 @@ tvtohz(struct timeval *tv) usec -= 1000000; } printf("tvotohz: negative time difference %ld sec %ld usec\n", - sec, usec); + sec, usec); #endif ticks = 1; - } else if (sec <= LONG_MAX / 1000000) + } else if (sec <= LONG_MAX / 1000000) { ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) - / tick + 1; - else if (sec <= LONG_MAX / hz) + / tick + 1; + } else if (sec <= LONG_MAX / hz) { ticks = sec * hz - + ((unsigned long)usec + (tick - 1)) / tick + 1; - else + + ((unsigned long)usec + (tick - 1)) / tick + 1; + } else { ticks = LONG_MAX; - if (ticks > INT_MAX) + } + if (ticks > INT_MAX) { ticks = INT_MAX; - return ((int)ticks); + } + return (int)ticks; } @@ -339,8 +342,9 @@ tvtohz(struct timeval *tv) void startprofclock(struct proc *p) { - if ((p->p_flag & P_PROFIL) == 0) + if ((p->p_flag & P_PROFIL) == 0) { OSBitOrAtomic(P_PROFIL, &p->p_flag); + } } /* @@ -349,8 +353,9 @@ startprofclock(struct proc *p) void stopprofclock(struct proc *p) { - if (p->p_flag & P_PROFIL) + if (p->p_flag & P_PROFIL) { OSBitAndAtomic(~((uint32_t)P_PROFIL), &p->p_flag); + } } /* TBD locking user profiling is not resolved yet */ @@ -358,14 +363,16 @@ void bsd_uprofil(struct time_value *syst, user_addr_t pc) { struct proc *p = current_proc(); - int ticks; - struct timeval *tv; + int ticks; + struct timeval *tv; struct timeval st; - if (p == NULL) - return; - if ( !(p->p_flag & P_PROFIL)) - return; + if (p == NULL) { + return; + } + if (!(p->p_flag & P_PROFIL)) { + return; + } st.tv_sec = syst->seconds; st.tv_usec = syst->microseconds; @@ -373,10 +380,11 @@ bsd_uprofil(struct time_value *syst, user_addr_t pc) tv = &(p->p_stats->p_ru.ru_stime); ticks = ((tv->tv_sec - st.tv_sec) * 1000 + - (tv->tv_usec - st.tv_usec) / 1000) / - (tick / 1000); - if (ticks) + (tv->tv_usec - st.tv_usec) / 1000) / + (tick / 1000); + if (ticks) { addupc_task(p, pc, ticks); + } } /* TBD locking user profiling is not resolved yet */ @@ -386,15 +394,17 @@ get_procrustime(time_value_t *tv) struct proc *p = current_proc(); struct timeval st; - if (p == NULL) + if (p == NULL) { return; - if ( !(p->p_flag & P_PROFIL)) - return; + } + if (!(p->p_flag & P_PROFIL)) { + return; + } //proc_lock(p); st = p->p_stats->p_ru.ru_stime; //proc_unlock(p); - + tv->seconds = st.tv_sec; tv->microseconds = st.tv_usec; } diff --git a/bsd/kern/kern_control.c b/bsd/kern/kern_control.c index 099201dac..dede2e6e8 100644 --- a/bsd/kern/kern_control.c +++ b/bsd/kern/kern_control.c @@ -58,70 +58,70 @@ #include struct kctl { - TAILQ_ENTRY(kctl) next; /* controller chain */ - kern_ctl_ref kctlref; + TAILQ_ENTRY(kctl) next; /* controller chain */ + kern_ctl_ref kctlref; /* controller information provided when registering */ - char name[MAX_KCTL_NAME]; /* unique identifier */ - u_int32_t id; - u_int32_t reg_unit; + char name[MAX_KCTL_NAME]; /* unique identifier */ + u_int32_t id; + u_int32_t reg_unit; /* misc communication information */ - u_int32_t flags; /* support flags */ - u_int32_t recvbufsize; /* request more than the default buffer size */ - u_int32_t sendbufsize; /* request more than the default buffer size */ + u_int32_t flags; /* support flags */ + u_int32_t recvbufsize; /* request more than the default buffer size */ + u_int32_t sendbufsize; /* request more than the default buffer size */ /* Dispatch functions */ - ctl_bind_func bind; /* Prepare contact */ - ctl_connect_func connect; /* Make contact */ - ctl_disconnect_func disconnect; /* Break contact */ - ctl_send_func send; /* Send data to nke */ - ctl_send_list_func send_list; /* Send list of packets */ - ctl_setopt_func setopt; /* set kctl configuration */ - ctl_getopt_func getopt; /* get kctl configuration */ - ctl_rcvd_func rcvd; /* Notify nke when client reads data */ - - TAILQ_HEAD(, ctl_cb) kcb_head; - u_int32_t lastunit; + ctl_bind_func bind; /* Prepare contact */ + ctl_connect_func connect; /* Make contact */ + ctl_disconnect_func disconnect; /* Break contact */ + ctl_send_func send; /* Send data to nke */ + ctl_send_list_func send_list; /* Send list of packets */ + ctl_setopt_func setopt; /* set kctl configuration */ + ctl_getopt_func getopt; /* get kctl configuration */ + ctl_rcvd_func rcvd; /* Notify nke when client reads data */ + + TAILQ_HEAD(, ctl_cb) kcb_head; + u_int32_t lastunit; }; struct ctl_cb { - TAILQ_ENTRY(ctl_cb) next; /* controller chain */ - lck_mtx_t *mtx; - struct socket *so; /* controlling socket */ - struct kctl *kctl; /* back pointer to controller */ - void *userdata; - struct sockaddr_ctl sac; - u_int32_t usecount; + TAILQ_ENTRY(ctl_cb) next; /* controller chain */ + lck_mtx_t *mtx; + struct socket *so; /* controlling socket */ + struct kctl *kctl; /* back pointer to controller */ + void *userdata; + struct sockaddr_ctl sac; + u_int32_t usecount; }; #ifndef ROUNDUP64 -#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) +#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) #endif #ifndef ADVANCE64 -#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) +#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) #endif /* * Definitions and vars for we support */ -#define CTL_SENDSIZE (2 * 1024) /* default buffer size */ -#define CTL_RECVSIZE (8 * 1024) /* default buffer size */ +#define CTL_SENDSIZE (2 * 1024) /* default buffer size */ +#define CTL_RECVSIZE (8 * 1024) /* default buffer size */ /* * Definitions and vars for we support */ -static u_int32_t ctl_maxunit = 65536; -static lck_grp_attr_t *ctl_lck_grp_attr = 0; -static lck_attr_t *ctl_lck_attr = 0; -static lck_grp_t *ctl_lck_grp = 0; -static lck_mtx_t *ctl_mtx; +static u_int32_t ctl_maxunit = 65536; +static lck_grp_attr_t *ctl_lck_grp_attr = 0; +static lck_attr_t *ctl_lck_attr = 0; +static lck_grp_t *ctl_lck_grp = 0; +static lck_mtx_t *ctl_mtx; /* all the controllers are chained */ -TAILQ_HEAD(kctl_list, kctl) ctl_head; +TAILQ_HEAD(kctl_list, kctl) ctl_head; static int ctl_attach(struct socket *, int, struct proc *); static int ctl_detach(struct socket *); @@ -130,11 +130,11 @@ static int ctl_bind(struct socket *, struct sockaddr *, struct proc *); static int ctl_connect(struct socket *, struct sockaddr *, struct proc *); static int ctl_disconnect(struct socket *); static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data, - struct ifnet *ifp, struct proc *p); + struct ifnet *ifp, struct proc *p); static int ctl_send(struct socket *, int, struct mbuf *, - struct sockaddr *, struct mbuf *, struct proc *); + struct sockaddr *, struct mbuf *, struct proc *); static int ctl_send_list(struct socket *, int, struct mbuf *, - struct sockaddr *, struct mbuf *, struct proc *); + struct sockaddr *, struct mbuf *, struct proc *); static int ctl_ctloutput(struct socket *, struct sockopt *); static int ctl_peeraddr(struct socket *so, struct sockaddr **nam); static int ctl_usr_rcvd(struct socket *so, int flags); @@ -143,7 +143,7 @@ static struct kctl *ctl_find_by_name(const char *); static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit); static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, - u_int32_t *); + u_int32_t *); static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit); static void ctl_post_msg(u_int32_t event_code, u_int32_t id); @@ -152,43 +152,43 @@ static int ctl_unlock(struct socket *, int, void *); static lck_mtx_t * ctl_getlock(struct socket *, int); static struct pr_usrreqs ctl_usrreqs = { - .pru_attach = ctl_attach, - .pru_bind = ctl_bind, - .pru_connect = ctl_connect, - .pru_control = ctl_ioctl, - .pru_detach = ctl_detach, - .pru_disconnect = ctl_disconnect, - .pru_peeraddr = ctl_peeraddr, - .pru_rcvd = ctl_usr_rcvd, - .pru_send = ctl_send, - .pru_send_list = ctl_send_list, - .pru_sosend = sosend, - .pru_sosend_list = sosend_list, - .pru_soreceive = soreceive, - .pru_soreceive_list = soreceive_list, + .pru_attach = ctl_attach, + .pru_bind = ctl_bind, + .pru_connect = ctl_connect, + .pru_control = ctl_ioctl, + .pru_detach = ctl_detach, + .pru_disconnect = ctl_disconnect, + .pru_peeraddr = ctl_peeraddr, + .pru_rcvd = ctl_usr_rcvd, + .pru_send = ctl_send, + .pru_send_list = ctl_send_list, + .pru_sosend = sosend, + .pru_sosend_list = sosend_list, + .pru_soreceive = soreceive, + .pru_soreceive_list = soreceive_list, }; static struct protosw kctlsw[] = { -{ - .pr_type = SOCK_DGRAM, - .pr_protocol = SYSPROTO_CONTROL, - .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD, - .pr_ctloutput = ctl_ctloutput, - .pr_usrreqs = &ctl_usrreqs, - .pr_lock = ctl_lock, - .pr_unlock = ctl_unlock, - .pr_getlock = ctl_getlock, -}, -{ - .pr_type = SOCK_STREAM, - .pr_protocol = SYSPROTO_CONTROL, - .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD, - .pr_ctloutput = ctl_ctloutput, - .pr_usrreqs = &ctl_usrreqs, - .pr_lock = ctl_lock, - .pr_unlock = ctl_unlock, - .pr_getlock = ctl_getlock, -} + { + .pr_type = SOCK_DGRAM, + .pr_protocol = SYSPROTO_CONTROL, + .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD, + .pr_ctloutput = ctl_ctloutput, + .pr_usrreqs = &ctl_usrreqs, + .pr_lock = ctl_lock, + .pr_unlock = ctl_unlock, + .pr_getlock = ctl_getlock, + }, + { + .pr_type = SOCK_STREAM, + .pr_protocol = SYSPROTO_CONTROL, + .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD, + .pr_ctloutput = ctl_ctloutput, + .pr_usrreqs = &ctl_usrreqs, + .pr_lock = ctl_lock, + .pr_unlock = ctl_unlock, + .pr_getlock = ctl_getlock, + } }; __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS; @@ -197,7 +197,7 @@ __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net_systm, OID_AUTO, kctl, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel control family"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family"); struct kctlstat kctlstat; SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats, @@ -205,26 +205,26 @@ SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats, kctl_getstat, "S,kctlstat", ""); SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - kctl_reg_list, "S,xkctl_reg", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + kctl_reg_list, "S,xkctl_reg", ""); SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - kctl_pcblist, "S,xkctlpcb", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + kctl_pcblist, "S,xkctlpcb", ""); u_int32_t ctl_autorcvbuf_max = 256 * 1024; SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax, - CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, ""); u_int32_t ctl_autorcvbuf_high = 0; SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh, - CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, ""); u_int32_t ctl_debug = 0; SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug, - CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, ""); -#define KCTL_TBL_INC 16 +#define KCTL_TBL_INC 16 static uintptr_t kctl_tbl_size = 0; static u_int32_t kctl_tbl_growing = 0; @@ -246,7 +246,7 @@ kern_control_init(struct domain *dp) { struct protosw *pr; int i; - int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw)); + int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw)); VERIFY(!(dp->dom_flags & DOM_INITIALIZED)); VERIFY(dp == systemdomain); @@ -277,16 +277,18 @@ kern_control_init(struct domain *dp) } TAILQ_INIT(&ctl_head); - for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) + for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); + } } static void kcb_delete(struct ctl_cb *kcb) { if (kcb != 0) { - if (kcb->mtx != 0) + if (kcb->mtx != 0) { lck_mtx_free(kcb->mtx, ctl_lck_grp); + } FREE(kcb, M_TEMP); } } @@ -302,7 +304,7 @@ ctl_attach(struct socket *so, int proto, struct proc *p) { #pragma unused(proto, p) int error = 0; - struct ctl_cb *kcb = 0; + struct ctl_cb *kcb = 0; MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK); if (kcb == NULL) { @@ -324,18 +326,18 @@ quit: kcb_delete(kcb); kcb = 0; } - return (error); + return error; } static int ctl_sofreelastref(struct socket *so) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; so->so_pcb = 0; if (kcb != 0) { - struct kctl *kctl; + struct kctl *kctl; if ((kctl = kcb->kctl) != 0) { lck_mtx_lock(ctl_mtx); TAILQ_REMOVE(&kctl->kcb_head, kcb, next); @@ -346,16 +348,17 @@ ctl_sofreelastref(struct socket *so) kcb_delete(kcb); } sofreelastref(so, 1); - return (0); + return 0; } static int ctl_detach(struct socket *so) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - if (kcb == 0) - return (0); + if (kcb == 0) { + return 0; + } if (kcb->kctl != NULL && kcb->kctl->bind != NULL && kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) { @@ -371,7 +374,7 @@ ctl_detach(struct socket *so) soisdisconnected(so); so->so_flags |= SOF_PCBCLEARING; - return (0); + return 0; } static int @@ -379,7 +382,7 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) { struct kctl *kctl = NULL; int error = 0; - struct sockaddr_ctl sa; + struct sockaddr_ctl sa; struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; struct ctl_cb *kcb_next = NULL; u_quad_t sbmaxsize; @@ -391,11 +394,11 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) if (kcb->kctl != NULL) { // Already set up, skip - return (0); + return 0; } if (nam->sa_len != sizeof(struct sockaddr_ctl)) { - return (EINVAL); + return EINVAL; } bcopy(nam, &sa, sizeof(struct sockaddr_ctl)); @@ -404,32 +407,32 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit); if (kctl == NULL) { lck_mtx_unlock(ctl_mtx); - return (ENOENT); + return ENOENT; } if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && - (so->so_type != SOCK_STREAM)) || - (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && - (so->so_type != SOCK_DGRAM))) { - lck_mtx_unlock(ctl_mtx); - return (EPROTOTYPE); - } + (so->so_type != SOCK_STREAM)) || + (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && + (so->so_type != SOCK_DGRAM))) { + lck_mtx_unlock(ctl_mtx); + return EPROTOTYPE; + } if (kctl->flags & CTL_FLAG_PRIVILEGED) { if (p == 0) { lck_mtx_unlock(ctl_mtx); - return (EINVAL); + return EINVAL; } if (kauth_cred_issuser(kauth_cred_get()) == 0) { lck_mtx_unlock(ctl_mtx); - return (EPERM); + return EPERM; } } if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) { if (kcb_find(kctl, sa.sc_unit) != NULL) { lck_mtx_unlock(ctl_mtx); - return (EBUSY); + return EBUSY; } } else { /* Find an unused ID, assumes control IDs are in order */ @@ -448,7 +451,7 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) if (unit == ctl_maxunit) { lck_mtx_unlock(ctl_mtx); - return (EBUSY); + return EBUSY; } sa.sc_unit = unit; @@ -486,10 +489,11 @@ ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) error = soreserve(so, sendbufsize, recvbufsize); if (error) { - if (ctl_debug) + if (ctl_debug) { printf("%s - soreserve(%llx, %u, %u) error %d\n", - __func__, (uint64_t)VM_KERNEL_ADDRPERM(so), - sendbufsize, recvbufsize, error); + __func__, (uint64_t)VM_KERNEL_ADDRPERM(so), + sendbufsize, recvbufsize, error); + } goto done; } @@ -505,7 +509,7 @@ done: kctlstat.kcs_conn_fail++; lck_mtx_unlock(ctl_mtx); } - return (error); + return error; } static int @@ -520,7 +524,7 @@ ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p) error = ctl_setup_kctl(so, nam, p); if (error) { - return (error); + return error; } if (kcb->kctl == NULL) { @@ -528,14 +532,14 @@ ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p) } if (kcb->kctl->bind == NULL) { - return (EINVAL); + return EINVAL; } socket_unlock(so, 0); error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata); socket_lock(so, 0); - return (error); + return error; } static int @@ -550,7 +554,7 @@ ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) error = ctl_setup_kctl(so, nam, p); if (error) { - return (error); + return error; } if (kcb->kctl == NULL) { @@ -592,16 +596,16 @@ end: kctlstat.kcs_conn_fail++; lck_mtx_unlock(ctl_mtx); } - return (error); + return error; } static int ctl_disconnect(struct socket *so) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; if ((kcb = (struct ctl_cb *)so->so_pcb)) { - struct kctl *kctl = kcb->kctl; + struct kctl *kctl = kcb->kctl; if (kctl && kctl->disconnect) { socket_unlock(so, 0); @@ -625,21 +629,23 @@ ctl_disconnect(struct socket *so) lck_mtx_unlock(ctl_mtx); socket_lock(so, 0); } - return (0); + return 0; } static int ctl_peeraddr(struct socket *so, struct sockaddr **nam) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; - struct sockaddr_ctl sc; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; + struct sockaddr_ctl sc; - if (kcb == NULL) /* sanity check */ - return (ENOTCONN); + if (kcb == NULL) { /* sanity check */ + return ENOTCONN; + } - if ((kctl = kcb->kctl) == NULL) - return (EINVAL); + if ((kctl = kcb->kctl) == NULL) { + return EINVAL; + } bzero(&sc, sizeof(struct sockaddr_ctl)); sc.sc_len = sizeof(struct sockaddr_ctl); @@ -650,7 +656,7 @@ ctl_peeraddr(struct socket *so, struct sockaddr **nam) *nam = dup_sockaddr((struct sockaddr *)&sc, 1); - return (0); + return 0; } static void @@ -677,9 +683,10 @@ ctl_sbrcv_trim(struct socket *so) if (trim > 0) { sbreserve(sb, (sb->sb_hiwat - trim)); - if (ctl_debug) + if (ctl_debug) { printf("%s - shrunk to %d\n", __func__, sb->sb_hiwat); + } } } } @@ -687,11 +694,11 @@ ctl_sbrcv_trim(struct socket *so) static int ctl_usr_rcvd(struct socket *so, int flags) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; if ((kctl = kcb->kctl) == NULL) { - return (EINVAL); + return EINVAL; } if (kctl->rcvd) { @@ -702,27 +709,30 @@ ctl_usr_rcvd(struct socket *so, int flags) ctl_sbrcv_trim(so); - return (0); + return 0; } static int ctl_send(struct socket *so, int flags, struct mbuf *m, - struct sockaddr *addr, struct mbuf *control, - struct proc *p) + struct sockaddr *addr, struct mbuf *control, + struct proc *p) { #pragma unused(addr, p) - int error = 0; - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; - if (control) + if (control) { m_freem(control); + } - if (kcb == NULL) /* sanity check */ + if (kcb == NULL) { /* sanity check */ error = ENOTCONN; + } - if (error == 0 && (kctl = kcb->kctl) == NULL) + if (error == 0 && (kctl = kcb->kctl) == NULL) { error = EINVAL; + } if (error == 0 && kctl->send) { so_tc_update_stats(m, so, m_get_service_class(m)); @@ -732,37 +742,43 @@ ctl_send(struct socket *so, int flags, struct mbuf *m, socket_lock(so, 0); } else { m_freem(m); - if (error == 0) + if (error == 0) { error = ENOTSUP; + } } - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail); - return (error); + } + return error; } static int ctl_send_list(struct socket *so, int flags, struct mbuf *m, - __unused struct sockaddr *addr, struct mbuf *control, - __unused struct proc *p) + __unused struct sockaddr *addr, struct mbuf *control, + __unused struct proc *p) { - int error = 0; - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; - if (control) + if (control) { m_freem_list(control); + } - if (kcb == NULL) /* sanity check */ + if (kcb == NULL) { /* sanity check */ error = ENOTCONN; + } - if (error == 0 && (kctl = kcb->kctl) == NULL) + if (error == 0 && (kctl = kcb->kctl) == NULL) { error = EINVAL; + } if (error == 0 && kctl->send_list) { struct mbuf *nxt; - for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) + for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) { so_tc_update_stats(nxt, so, m_get_service_class(nxt)); + } socket_unlock(so, 0); error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit, @@ -780,40 +796,45 @@ ctl_send_list(struct socket *so, int flags, struct mbuf *m, socket_lock(so, 0); m = nextpkt; } - if (m != NULL) + if (m != NULL) { m_freem_list(m); + } } else { m_freem_list(m); - if (error == 0) + if (error == 0) { error = ENOTSUP; + } } - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail); - return (error); + } + return error; } static errno_t ctl_rcvbspace(struct socket *so, u_int32_t datasize, - u_int32_t kctlflags, u_int32_t flags) + u_int32_t kctlflags, u_int32_t flags) { struct sockbuf *sb = &so->so_rcv; u_int32_t space = sbspace(sb); errno_t error; if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) { - if ((u_int32_t) space >= datasize) + if ((u_int32_t) space >= datasize) { error = 0; - else + } else { error = ENOBUFS; + } } else if ((flags & CTL_DATA_CRIT) == 0) { /* * Reserve 25% for critical messages */ if (space < (sb->sb_hiwat >> 2) || - space < datasize) + space < datasize) { error = ENOBUFS; - else + } else { error = 0; + } } else { u_int32_t autorcvbuf_max; @@ -821,7 +842,7 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, * Allow overcommit of 25% */ autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2), - ctl_autorcvbuf_max); + ctl_autorcvbuf_max); if ((u_int32_t) space >= datasize) { error = 0; @@ -834,9 +855,9 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, if (sbreserve(sb, min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) { - - if (sb->sb_hiwat > ctl_autorcvbuf_high) + if (sb->sb_hiwat > ctl_autorcvbuf_high) { ctl_autorcvbuf_high = sb->sb_hiwat; + } /* * A final check @@ -847,9 +868,10 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, error = ENOBUFS; } - if (ctl_debug) + if (ctl_debug) { printf("%s - grown to %d error %d\n", __func__, sb->sb_hiwat, error); + } } else { error = ENOBUFS; } @@ -857,21 +879,21 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, error = ENOBUFS; } } - return (error); + return error; } errno_t ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags) { - struct socket *so; - errno_t error = 0; - int len = m->m_pkthdr.len; - u_int32_t kctlflags; + struct socket *so; + errno_t error = 0; + int len = m->m_pkthdr.len; + u_int32_t kctlflags; so = kcb_find_socket(kctlref, unit, &kctlflags); if (so == NULL) { - return (EINVAL); + return EINVAL; } if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) { @@ -879,28 +901,32 @@ ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m, OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock); goto bye; } - if ((flags & CTL_DATA_EOR)) + if ((flags & CTL_DATA_EOR)) { m->m_flags |= M_EOR; + } so_recv_data_stat(so, m, 0); if (sbappend(&so->so_rcv, m) != 0) { - if ((flags & CTL_DATA_NOWAKEUP) == 0) + if ((flags & CTL_DATA_NOWAKEUP) == 0) { sorwakeup(so); + } } else { error = ENOBUFS; OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock); } bye: - if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) + if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) { printf("%s - crit data err %d len %d hiwat %d cc: %d\n", - __func__, error, len, - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + __func__, error, len, + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + } socket_unlock(so, 1); - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail); + } - return (error); + return error; } /* @@ -912,15 +938,16 @@ m_space(struct mbuf *m) int space = 0; struct mbuf *nxt; - for (nxt = m; nxt != NULL; nxt = nxt->m_next) + for (nxt = m; nxt != NULL; nxt = nxt->m_next) { space += nxt->m_len; + } - return (space); + return space; } errno_t ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, - u_int32_t flags, struct mbuf **m_remain) + u_int32_t flags, struct mbuf **m_remain) { struct socket *so = NULL; errno_t error = 0; @@ -955,9 +982,10 @@ ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, for (m = m_list; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; - if (m->m_pkthdr.len == 0 && ctl_debug) + if (m->m_pkthdr.len == 0 && ctl_debug) { printf("%s: %llx m_pkthdr.len is 0", - __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)); + __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)); + } /* * The mbuf is either appended or freed by sbappendrecord() @@ -967,7 +995,7 @@ ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) { error = ENOBUFS; OSIncrementAtomic64( - (SInt64 *)&kctlstat.kcs_enqueue_fullsock); + (SInt64 *)&kctlstat.kcs_enqueue_fullsock); break; } else { /* @@ -985,20 +1013,22 @@ ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, m = nextpkt; error = ENOBUFS; OSIncrementAtomic64( - (SInt64 *)&kctlstat.kcs_enqueue_fullsock); + (SInt64 *)&kctlstat.kcs_enqueue_fullsock); break; } } } - if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) + if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) { sorwakeup(so); + } done: if (so != NULL) { - if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) + if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) { printf("%s - crit data err %d len %d hiwat %d cc: %d\n", - __func__, error, len, - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + __func__, error, len, + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + } socket_unlock(so, 1); } @@ -1011,35 +1041,38 @@ done: printf("%s m_list %llx\n", __func__, (uint64_t) VM_KERNEL_ADDRPERM(m_list)); - for (n = m; n != NULL; n = n->m_nextpkt) + for (n = m; n != NULL; n = n->m_nextpkt) { printf(" remain %llx m_next %llx\n", (uint64_t) VM_KERNEL_ADDRPERM(n), (uint64_t) VM_KERNEL_ADDRPERM(n->m_next)); + } } } else { - if (m != NULL) + if (m != NULL) { m_freem_list(m); + } } - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail); - return (error); + } + return error; } errno_t ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags) { - struct socket *so; - struct mbuf *m; - errno_t error = 0; - unsigned int num_needed; - struct mbuf *n; - size_t curlen = 0; - u_int32_t kctlflags; + struct socket *so; + struct mbuf *m; + errno_t error = 0; + unsigned int num_needed; + struct mbuf *n; + size_t curlen = 0; + u_int32_t kctlflags; so = kcb_find_socket(kctlref, unit, &kctlflags); if (so == NULL) { - return (EINVAL); + return EINVAL; } if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) { @@ -1052,9 +1085,10 @@ ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0); if (m == NULL) { kctlstat.kcs_enqdata_mb_alloc_fail++; - if (ctl_debug) + if (ctl_debug) { printf("%s: m_allocpacket_internal(%lu) failed\n", __func__, len); + } error = ENOMEM; goto bye; } @@ -1062,20 +1096,23 @@ ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, for (n = m; n != NULL; n = n->m_next) { size_t mlen = mbuf_maxlen(n); - if (mlen + curlen > len) + if (mlen + curlen > len) { mlen = len - curlen; + } n->m_len = mlen; bcopy((char *)data + curlen, n->m_data, mlen); curlen += mlen; } mbuf_pkthdr_setlen(m, curlen); - if ((flags & CTL_DATA_EOR)) + if ((flags & CTL_DATA_EOR)) { m->m_flags |= M_EOR; + } so_recv_data_stat(so, m, 0); if (sbappend(&so->so_rcv, m) != 0) { - if ((flags & CTL_DATA_NOWAKEUP) == 0) + if ((flags & CTL_DATA_NOWAKEUP) == 0) { sorwakeup(so); + } } else { kctlstat.kcs_enqdata_sbappend_fail++; error = ENOBUFS; @@ -1083,30 +1120,33 @@ ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, } bye: - if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) + if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) { printf("%s - crit data err %d len %d hiwat %d cc: %d\n", - __func__, error, (int)len, - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + __func__, error, (int)len, + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + } socket_unlock(so, 1); - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail); - return (error); + } + return error; } errno_t ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt) { - struct socket *so; + struct socket *so; u_int32_t cnt; struct mbuf *m1; - if (pcnt == NULL) - return (EINVAL); + if (pcnt == NULL) { + return EINVAL; + } so = kcb_find_socket(kctlref, unit, NULL); if (so == NULL) { - return (EINVAL); + return EINVAL; } cnt = 0; @@ -1114,50 +1154,53 @@ ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt) while (m1 != NULL) { if (m1->m_type == MT_DATA || m1->m_type == MT_HEADER || - m1->m_type == MT_OOBDATA) + m1->m_type == MT_OOBDATA) { cnt += 1; + } m1 = m1->m_nextpkt; } *pcnt = cnt; socket_unlock(so, 1); - return (0); + return 0; } errno_t ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space) { - struct socket *so; + struct socket *so; long avail; - if (space == NULL) - return (EINVAL); + if (space == NULL) { + return EINVAL; + } so = kcb_find_socket(kctlref, unit, NULL); if (so == NULL) { - return (EINVAL); + return EINVAL; } avail = sbspace(&so->so_rcv); *space = (avail < 0) ? 0 : avail; socket_unlock(so, 1); - return (0); + return 0; } errno_t ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *difference) { - struct socket *so; + struct socket *so; - if (difference == NULL) - return (EINVAL); + if (difference == NULL) { + return EINVAL; + } so = kcb_find_socket(kctlref, unit, NULL); if (so == NULL) { - return (EINVAL); + return EINVAL; } if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) { @@ -1167,147 +1210,156 @@ ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit, } socket_unlock(so, 1); - return (0); + return 0; } static int ctl_ctloutput(struct socket *so, struct sockopt *sopt) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; - int error = 0; - void *data = NULL; - size_t len; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; + int error = 0; + void *data = NULL; + size_t len; if (sopt->sopt_level != SYSPROTO_CONTROL) { - return (EINVAL); + return EINVAL; } - if (kcb == NULL) /* sanity check */ - return (ENOTCONN); + if (kcb == NULL) { /* sanity check */ + return ENOTCONN; + } - if ((kctl = kcb->kctl) == NULL) - return (EINVAL); + if ((kctl = kcb->kctl) == NULL) { + return EINVAL; + } switch (sopt->sopt_dir) { - case SOPT_SET: - if (kctl->setopt == NULL) - return (ENOTSUP); - if (sopt->sopt_valsize != 0) { - MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, - M_WAITOK | M_ZERO); - if (data == NULL) - return (ENOMEM); - error = sooptcopyin(sopt, data, - sopt->sopt_valsize, sopt->sopt_valsize); - } - if (error == 0) { - socket_unlock(so, 0); - error = (*kctl->setopt)(kctl->kctlref, - kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name, - data, sopt->sopt_valsize); - socket_lock(so, 0); + case SOPT_SET: + if (kctl->setopt == NULL) { + return ENOTSUP; + } + if (sopt->sopt_valsize != 0) { + MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, + M_WAITOK | M_ZERO); + if (data == NULL) { + return ENOMEM; } + error = sooptcopyin(sopt, data, + sopt->sopt_valsize, sopt->sopt_valsize); + } + if (error == 0) { + socket_unlock(so, 0); + error = (*kctl->setopt)(kctl->kctlref, + kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name, + data, sopt->sopt_valsize); + socket_lock(so, 0); + } - if (data != NULL) - FREE(data, M_TEMP); - break; + if (data != NULL) { + FREE(data, M_TEMP); + } + break; - case SOPT_GET: - if (kctl->getopt == NULL) - return (ENOTSUP); + case SOPT_GET: + if (kctl->getopt == NULL) { + return ENOTSUP; + } - if (sopt->sopt_valsize && sopt->sopt_val) { - MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, - M_WAITOK | M_ZERO); - if (data == NULL) - return (ENOMEM); - /* - * 4108337 - copy user data in case the - * kernel control needs it - */ - error = sooptcopyin(sopt, data, - sopt->sopt_valsize, sopt->sopt_valsize); + if (sopt->sopt_valsize && sopt->sopt_val) { + MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, + M_WAITOK | M_ZERO); + if (data == NULL) { + return ENOMEM; } + /* + * 4108337 - copy user data in case the + * kernel control needs it + */ + error = sooptcopyin(sopt, data, + sopt->sopt_valsize, sopt->sopt_valsize); + } + if (error == 0) { + len = sopt->sopt_valsize; + socket_unlock(so, 0); + error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit, + kcb->userdata, sopt->sopt_name, + data, &len); + if (data != NULL && len > sopt->sopt_valsize) { + panic_plain("ctl_ctloutput: ctl %s returned " + "len (%lu) > sopt_valsize (%lu)\n", + kcb->kctl->name, len, + sopt->sopt_valsize); + } + socket_lock(so, 0); if (error == 0) { - len = sopt->sopt_valsize; - socket_unlock(so, 0); - error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit, - kcb->userdata, sopt->sopt_name, - data, &len); - if (data != NULL && len > sopt->sopt_valsize) - panic_plain("ctl_ctloutput: ctl %s returned " - "len (%lu) > sopt_valsize (%lu)\n", - kcb->kctl->name, len, - sopt->sopt_valsize); - socket_lock(so, 0); - if (error == 0) { - if (data != NULL) - error = sooptcopyout(sopt, data, len); - else - sopt->sopt_valsize = len; + if (data != NULL) { + error = sooptcopyout(sopt, data, len); + } else { + sopt->sopt_valsize = len; } } - if (data != NULL) - FREE(data, M_TEMP); - break; + } + if (data != NULL) { + FREE(data, M_TEMP); + } + break; } - return (error); + return error; } static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data, - struct ifnet *ifp, struct proc *p) + struct ifnet *ifp, struct proc *p) { #pragma unused(so, ifp, p) - int error = ENOTSUP; + int error = ENOTSUP; switch (cmd) { - /* get the number of controllers */ - case CTLIOCGCOUNT: { - struct kctl *kctl; - u_int32_t n = 0; + /* get the number of controllers */ + case CTLIOCGCOUNT: { + struct kctl *kctl; + u_int32_t n = 0; - lck_mtx_lock(ctl_mtx); - TAILQ_FOREACH(kctl, &ctl_head, next) - n++; - lck_mtx_unlock(ctl_mtx); + lck_mtx_lock(ctl_mtx); + TAILQ_FOREACH(kctl, &ctl_head, next) + n++; + lck_mtx_unlock(ctl_mtx); - bcopy(&n, data, sizeof (n)); - error = 0; - break; - } - case CTLIOCGINFO: { - struct ctl_info ctl_info; - struct kctl *kctl = 0; - size_t name_len; + bcopy(&n, data, sizeof(n)); + error = 0; + break; + } + case CTLIOCGINFO: { + struct ctl_info ctl_info; + struct kctl *kctl = 0; + size_t name_len; - bcopy(data, &ctl_info, sizeof (ctl_info)); - name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME); + bcopy(data, &ctl_info, sizeof(ctl_info)); + name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME); - if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { - error = EINVAL; - break; - } - lck_mtx_lock(ctl_mtx); - kctl = ctl_find_by_name(ctl_info.ctl_name); - lck_mtx_unlock(ctl_mtx); - if (kctl == 0) { - error = ENOENT; - break; - } - ctl_info.ctl_id = kctl->id; - bcopy(&ctl_info, data, sizeof (ctl_info)); - error = 0; + if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { + error = EINVAL; break; } + lck_mtx_lock(ctl_mtx); + kctl = ctl_find_by_name(ctl_info.ctl_name); + lck_mtx_unlock(ctl_mtx); + if (kctl == 0) { + error = ENOENT; + break; + } + ctl_info.ctl_id = kctl->id; + bcopy(&ctl_info, data, sizeof(ctl_info)); + error = 0; + break; + } /* add controls to get list of NKEs */ - } - return (error); + return error; } static void @@ -1324,20 +1376,22 @@ kctl_tbl_grow() do { (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx, - PSOCK | PCATCH, "kctl_tbl_growing", 0); + PSOCK | PCATCH, "kctl_tbl_growing", 0); } while (kctl_tbl_growing); kctl_tbl_growing_waiting--; } /* Another thread grew the table */ - if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) + if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) { return; + } /* Verify we have a sane size */ if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) { kctlstat.kcs_tbl_size_too_big++; - if (ctl_debug) + if (ctl_debug) { printf("%s kctl_tbl_size %lu too big\n", __func__, kctl_tbl_size); + } return; } kctl_tbl_growing = 1; @@ -1378,8 +1432,9 @@ kctl_make_ref(struct kctl *kctl) lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); - if (kctl_tbl_count >= kctl_tbl_size) + if (kctl_tbl_count >= kctl_tbl_size) { kctl_tbl_grow(); + } kctl->kctlref = NULL; for (i = 0; i < kctl_tbl_size; i++) { @@ -1395,7 +1450,7 @@ kctl_make_ref(struct kctl *kctl) * Add generation count as salt to reference to prevent * use after deregister */ - ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) & + ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) & KCTLREF_GENCNT_MASK) + ((i + 1) & KCTLREF_INDEX_MASK); @@ -1406,14 +1461,16 @@ kctl_make_ref(struct kctl *kctl) } } - if (kctl->kctlref == NULL) + if (kctl->kctlref == NULL) { panic("%s no space in table", __func__); + } - if (ctl_debug > 0) + if (ctl_debug > 0) { printf("%s %p for %p\n", - __func__, kctl->kctlref, kctl); + __func__, kctl->kctlref, kctl); + } - return (kctl->kctlref); + return kctl->kctlref; } static void @@ -1453,14 +1510,14 @@ kctl_from_ref(kern_ctl_ref kctlref) if (i >= kctl_tbl_size) { kctlstat.kcs_bad_kctlref++; - return (NULL); + return NULL; } kctl = kctl_table[i]; if (kctl->kctlref != kctlref) { kctlstat.kcs_bad_kctlref++; - return (NULL); + return NULL; } - return (kctl); + return kctl; } /* @@ -1469,23 +1526,27 @@ kctl_from_ref(kern_ctl_ref kctlref) errno_t ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) { - struct kctl *kctl = NULL; - struct kctl *kctl_next = NULL; - u_int32_t id = 1; - size_t name_len; - int is_extended = 0; - - if (userkctl == NULL) /* sanity check */ - return (EINVAL); - if (userkctl->ctl_connect == NULL) - return (EINVAL); + struct kctl *kctl = NULL; + struct kctl *kctl_next = NULL; + u_int32_t id = 1; + size_t name_len; + int is_extended = 0; + + if (userkctl == NULL) { /* sanity check */ + return EINVAL; + } + if (userkctl->ctl_connect == NULL) { + return EINVAL; + } name_len = strlen(userkctl->ctl_name); - if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) - return (EINVAL); + if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { + return EINVAL; + } MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK); - if (kctl == NULL) - return (ENOMEM); + if (kctl == NULL) { + return ENOMEM; + } bzero((char *)kctl, sizeof(*kctl)); lck_mtx_lock(ctl_mtx); @@ -1493,7 +1554,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) if (kctl_make_ref(kctl) == NULL) { lck_mtx_unlock(ctl_mtx); FREE(kctl, M_TEMP); - return (ENOMEM); + return ENOMEM; } /* @@ -1515,7 +1576,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) kctl_delete_ref(kctl->kctlref); lck_mtx_unlock(ctl_mtx); FREE(kctl, M_TEMP); - return (EEXIST); + return EEXIST; } /* Start with 1 in case the list is empty */ @@ -1551,15 +1612,16 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) kctl->reg_unit = -1; } else { TAILQ_FOREACH(kctl_next, &ctl_head, next) { - if (kctl_next->id > userkctl->ctl_id) + if (kctl_next->id > userkctl->ctl_id) { break; + } } if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) { kctl_delete_ref(kctl->kctlref); lck_mtx_unlock(ctl_mtx); FREE(kctl, M_TEMP); - return (EEXIST); + return EEXIST; } kctl->id = userkctl->ctl_id; kctl->reg_unit = userkctl->ctl_unit; @@ -1599,10 +1661,11 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) TAILQ_INIT(&kctl->kcb_head); - if (kctl_next) + if (kctl_next) { TAILQ_INSERT_BEFORE(kctl_next, kctl, next); - else + } else { TAILQ_INSERT_TAIL(&ctl_head, kctl, next); + } kctlstat.kcs_reg_count++; kctlstat.kcs_gencnt++; @@ -1612,27 +1675,28 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) *kctlref = kctl->kctlref; ctl_post_msg(KEV_CTL_REGISTERED, kctl->id); - return (0); + return 0; } errno_t ctl_deregister(void *kctlref) { - struct kctl *kctl; + struct kctl *kctl; lck_mtx_lock(ctl_mtx); if ((kctl = kctl_from_ref(kctlref)) == NULL) { kctlstat.kcs_bad_kctlref++; lck_mtx_unlock(ctl_mtx); - if (ctl_debug != 0) + if (ctl_debug != 0) { printf("%s invalid kctlref %p\n", - __func__, kctlref); - return (EINVAL); + __func__, kctlref); + } + return EINVAL; } if (!TAILQ_EMPTY(&kctl->kcb_head)) { lck_mtx_unlock(ctl_mtx); - return (EBUSY); + return EBUSY; } TAILQ_REMOVE(&ctl_head, kctl, next); @@ -1645,7 +1709,7 @@ ctl_deregister(void *kctlref) ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id); FREE(kctl, M_TEMP); - return (0); + return 0; } /* @@ -1654,53 +1718,57 @@ ctl_deregister(void *kctlref) static struct kctl * ctl_find_by_name(const char *name) { - struct kctl *kctl; + struct kctl *kctl; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(kctl, &ctl_head, next) - if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) - return (kctl); + if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) { + return kctl; + } - return (NULL); + return NULL; } u_int32_t ctl_id_by_name(const char *name) { - u_int32_t ctl_id = 0; - struct kctl *kctl; + u_int32_t ctl_id = 0; + struct kctl *kctl; lck_mtx_lock(ctl_mtx); kctl = ctl_find_by_name(name); - if (kctl) + if (kctl) { ctl_id = kctl->id; + } lck_mtx_unlock(ctl_mtx); - return (ctl_id); + return ctl_id; } errno_t ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize) { - int found = 0; + int found = 0; struct kctl *kctl; lck_mtx_lock(ctl_mtx); TAILQ_FOREACH(kctl, &ctl_head, next) { - if (kctl->id == id) + if (kctl->id == id) { break; + } } if (kctl) { - if (maxsize > MAX_KCTL_NAME) + if (maxsize > MAX_KCTL_NAME) { maxsize = MAX_KCTL_NAME; + } strlcpy(out_name, kctl->name, maxsize); found = 1; } lck_mtx_unlock(ctl_mtx); - return (found ? 0 : ENOENT); + return found ? 0 : ENOENT; } /* @@ -1710,17 +1778,18 @@ ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize) static struct kctl * ctl_find_by_id_unit(u_int32_t id, u_int32_t unit) { - struct kctl *kctl; + struct kctl *kctl; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(kctl, &ctl_head, next) { - if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) - return (kctl); - else if (kctl->id == id && kctl->reg_unit == unit) - return (kctl); + if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) { + return kctl; + } else if (kctl->id == id && kctl->reg_unit == unit) { + return kctl; + } } - return (NULL); + return NULL; } /* @@ -1729,22 +1798,23 @@ ctl_find_by_id_unit(u_int32_t id, u_int32_t unit) static struct ctl_cb * kcb_find(struct kctl *kctl, u_int32_t unit) { - struct ctl_cb *kcb; + struct ctl_cb *kcb; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(kcb, &kctl->kcb_head, next) - if (kcb->sac.sc_unit == unit) - return (kcb); + if (kcb->sac.sc_unit == unit) { + return kcb; + } - return (NULL); + return NULL; } static struct socket * kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags) { struct socket *so = NULL; - struct ctl_cb *kcb; + struct ctl_cb *kcb; void *lr_saved; struct kctl *kctl; int i; @@ -1758,16 +1828,17 @@ kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags) if ((kctl = kctl_from_ref(kctlref)) == NULL) { kctlstat.kcs_bad_kctlref++; lck_mtx_unlock(ctl_mtx); - if (ctl_debug != 0) + if (ctl_debug != 0) { printf("%s invalid kctlref %p\n", - __func__, kctlref); - return (NULL); + __func__, kctlref); + } + return NULL; } kcb = kcb_find(kctl, unit); if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) { lck_mtx_unlock(ctl_mtx); - return (NULL); + return NULL; } /* * This prevents the socket from being closed @@ -1798,19 +1869,20 @@ kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags) } kcb->usecount--; - if (kcb->usecount == 0) + if (kcb->usecount == 0) { wakeup((event_t)&kcb->usecount); + } lck_mtx_unlock(ctl_mtx); - return (so); + return so; } static void ctl_post_msg(u_int32_t event_code, u_int32_t id) { - struct ctl_event_data ctl_ev_data; - struct kev_msg ev_msg; + struct ctl_event_data ctl_ev_data; + struct kev_msg ev_msg; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED); @@ -1837,14 +1909,15 @@ ctl_lock(struct socket *so, int refcount, void *lr) { void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } if (so->so_pcb != NULL) { lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx); - } else { + } else { panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n", so, lr_saved, solockhistory_nr(so)); /* NOTREACHED */ @@ -1852,17 +1925,18 @@ ctl_lock(struct socket *so, int refcount, void *lr) if (so->so_usecount < 0) { panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n", - so, so->so_pcb, lr_saved, so->so_usecount, - solockhistory_nr(so)); + so, so->so_pcb, lr_saved, so->so_usecount, + solockhistory_nr(so)); /* NOTREACHED */ } - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; - return (0); + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; + return 0; } static int @@ -1871,10 +1945,11 @@ ctl_unlock(struct socket *so, int refcount, void *lr) void *lr_saved; lck_mtx_t *mutex_held; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } #if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n", @@ -1883,8 +1958,9 @@ ctl_unlock(struct socket *so, int refcount, void *lr) (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx), so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved)); #endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */ - if (refcount) + if (refcount) { so->so_usecount--; + } if (so->so_usecount < 0) { panic("ctl_unlock: so=%p usecount=%x lrh= %s\n", @@ -1893,38 +1969,40 @@ ctl_unlock(struct socket *so, int refcount, void *lr) } if (so->so_pcb == NULL) { panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n", - so, so->so_usecount, (void *)lr_saved, - solockhistory_nr(so)); + so, so->so_usecount, (void *)lr_saved, + solockhistory_nr(so)); /* NOTREACHED */ } mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx; - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); - so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; - lck_mtx_unlock(mutex_held); + lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + so->unlock_lr[so->next_unlock_lr] = lr_saved; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; + lck_mtx_unlock(mutex_held); - if (so->so_usecount == 0) + if (so->so_usecount == 0) { ctl_sofreelastref(so); + } - return (0); + return 0; } static lck_mtx_t * ctl_getlock(struct socket *so, int flags) { #pragma unused(flags) - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - if (so->so_pcb) { - if (so->so_usecount < 0) - panic("ctl_getlock: so=%p usecount=%x lrh= %s\n", - so, so->so_usecount, solockhistory_nr(so)); - return (kcb->mtx); + if (so->so_pcb) { + if (so->so_usecount < 0) { + panic("ctl_getlock: so=%p usecount=%x lrh= %s\n", + so, so->so_usecount, solockhistory_nr(so)); + } + return kcb->mtx; } else { - panic("ctl_getlock: so=%p NULL NO so_pcb %s\n", - so, solockhistory_nr(so)); - return (so->so_proto->pr_domain->dom_mtx); + panic("ctl_getlock: so=%p NULL NO so_pcb %s\n", + so, solockhistory_nr(so)); + return so->so_proto->pr_domain->dom_mtx; } } @@ -1932,264 +2010,268 @@ __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error = 0; - int n, i; - struct xsystmgen xsg; - void *buf = NULL; - struct kctl *kctl; - size_t item_size = ROUNDUP64(sizeof (struct xkctl_reg)); - - buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) - return (ENOMEM); - - lck_mtx_lock(ctl_mtx); - - n = kctlstat.kcs_reg_count; - - if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n/8) * sizeof(struct xkctl_reg); - goto done; - } - if (req->newptr != USER_ADDR_NULL) { - error = EPERM; - goto done; - } - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; - } - /* - * We are done if there is no pcb - */ - if (n == 0) { - goto done; - } - - i = 0; - for (i = 0, kctl = TAILQ_FIRST(&ctl_head); - i < n && kctl != NULL; - i++, kctl = TAILQ_NEXT(kctl, next)) { - struct xkctl_reg *xkr = (struct xkctl_reg *)buf; - struct ctl_cb *kcb; - u_int32_t pcbcount = 0; - - TAILQ_FOREACH(kcb, &kctl->kcb_head, next) - pcbcount++; - - bzero(buf, item_size); - - xkr->xkr_len = sizeof(struct xkctl_reg); - xkr->xkr_kind = XSO_KCREG; - xkr->xkr_id = kctl->id; - xkr->xkr_reg_unit = kctl->reg_unit; - xkr->xkr_flags = kctl->flags; - xkr->xkr_kctlref = (uint64_t)(kctl->kctlref); - xkr->xkr_recvbufsize = kctl->recvbufsize; - xkr->xkr_sendbufsize = kctl->sendbufsize; - xkr->xkr_lastunit = kctl->lastunit; - xkr->xkr_pcbcount = pcbcount; - xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect); - xkr->xkr_disconnect = - (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect); - xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send); - xkr->xkr_send_list = - (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list); - xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt); - xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt); - xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd); - strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name)); - - error = SYSCTL_OUT(req, buf, item_size); - } - - if (error == 0) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; + int error = 0; + int n, i; + struct xsystmgen xsg; + void *buf = NULL; + struct kctl *kctl; + size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg)); + + buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); + if (buf == NULL) { + return ENOMEM; + } + + lck_mtx_lock(ctl_mtx); + + n = kctlstat.kcs_reg_count; + + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg); + goto done; + } + if (req->newptr != USER_ADDR_NULL) { + error = EPERM; + goto done; + } + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; + } + /* + * We are done if there is no pcb + */ + if (n == 0) { + goto done; + } + + i = 0; + for (i = 0, kctl = TAILQ_FIRST(&ctl_head); + i < n && kctl != NULL; + i++, kctl = TAILQ_NEXT(kctl, next)) { + struct xkctl_reg *xkr = (struct xkctl_reg *)buf; + struct ctl_cb *kcb; + u_int32_t pcbcount = 0; + + TAILQ_FOREACH(kcb, &kctl->kcb_head, next) + pcbcount++; + + bzero(buf, item_size); + + xkr->xkr_len = sizeof(struct xkctl_reg); + xkr->xkr_kind = XSO_KCREG; + xkr->xkr_id = kctl->id; + xkr->xkr_reg_unit = kctl->reg_unit; + xkr->xkr_flags = kctl->flags; + xkr->xkr_kctlref = (uint64_t)(kctl->kctlref); + xkr->xkr_recvbufsize = kctl->recvbufsize; + xkr->xkr_sendbufsize = kctl->sendbufsize; + xkr->xkr_lastunit = kctl->lastunit; + xkr->xkr_pcbcount = pcbcount; + xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect); + xkr->xkr_disconnect = + (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect); + xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send); + xkr->xkr_send_list = + (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list); + xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt); + xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt); + xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd); + strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name)); + + error = SYSCTL_OUT(req, buf, item_size); + } + + if (error == 0) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; } } done: - lck_mtx_unlock(ctl_mtx); + lck_mtx_unlock(ctl_mtx); - if (buf != NULL) - FREE(buf, M_TEMP); + if (buf != NULL) { + FREE(buf, M_TEMP); + } - return (error); + return error; } __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error = 0; - int n, i; - struct xsystmgen xsg; - void *buf = NULL; - struct kctl *kctl; - size_t item_size = ROUNDUP64(sizeof (struct xkctlpcb)) + - ROUNDUP64(sizeof (struct xsocket_n)) + - 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) + - ROUNDUP64(sizeof (struct xsockstat_n)); - - buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) - return (ENOMEM); - - lck_mtx_lock(ctl_mtx); - - n = kctlstat.kcs_pcbcount; - - if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n/8) * item_size; - goto done; - } - if (req->newptr != USER_ADDR_NULL) { - error = EPERM; - goto done; - } - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; - } - /* - * We are done if there is no pcb - */ - if (n == 0) { - goto done; - } - - i = 0; - for (i = 0, kctl = TAILQ_FIRST(&ctl_head); - i < n && kctl != NULL; - kctl = TAILQ_NEXT(kctl, next)) { - struct ctl_cb *kcb; - - for (kcb = TAILQ_FIRST(&kctl->kcb_head); - i < n && kcb != NULL; - i++, kcb = TAILQ_NEXT(kcb, next)) { - struct xkctlpcb *xk = (struct xkctlpcb *)buf; - struct xsocket_n *xso = (struct xsocket_n *) - ADVANCE64(xk, sizeof (*xk)); - struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) - ADVANCE64(xso, sizeof (*xso)); - struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) - ADVANCE64(xsbrcv, sizeof (*xsbrcv)); - struct xsockstat_n *xsostats = (struct xsockstat_n *) - ADVANCE64(xsbsnd, sizeof (*xsbsnd)); - - bzero(buf, item_size); - - xk->xkp_len = sizeof(struct xkctlpcb); - xk->xkp_kind = XSO_KCB; - xk->xkp_unit = kcb->sac.sc_unit; - xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb); - xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl); - xk->xkp_kctlid = kctl->id; - strlcpy(xk->xkp_kctlname, kctl->name, - sizeof(xk->xkp_kctlname)); - - sotoxsocket_n(kcb->so, xso); - sbtoxsockbuf_n(kcb->so ? - &kcb->so->so_rcv : NULL, xsbrcv); - sbtoxsockbuf_n(kcb->so ? - &kcb->so->so_snd : NULL, xsbsnd); - sbtoxsockstat_n(kcb->so, xsostats); - - error = SYSCTL_OUT(req, buf, item_size); + int error = 0; + int n, i; + struct xsystmgen xsg; + void *buf = NULL; + struct kctl *kctl; + size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) + + ROUNDUP64(sizeof(struct xsocket_n)) + + 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) + + ROUNDUP64(sizeof(struct xsockstat_n)); + + buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); + if (buf == NULL) { + return ENOMEM; + } + + lck_mtx_lock(ctl_mtx); + + n = kctlstat.kcs_pcbcount; + + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = (n + n / 8) * item_size; + goto done; + } + if (req->newptr != USER_ADDR_NULL) { + error = EPERM; + goto done; + } + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; + } + /* + * We are done if there is no pcb + */ + if (n == 0) { + goto done; + } + + i = 0; + for (i = 0, kctl = TAILQ_FIRST(&ctl_head); + i < n && kctl != NULL; + kctl = TAILQ_NEXT(kctl, next)) { + struct ctl_cb *kcb; + + for (kcb = TAILQ_FIRST(&kctl->kcb_head); + i < n && kcb != NULL; + i++, kcb = TAILQ_NEXT(kcb, next)) { + struct xkctlpcb *xk = (struct xkctlpcb *)buf; + struct xsocket_n *xso = (struct xsocket_n *) + ADVANCE64(xk, sizeof(*xk)); + struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) + ADVANCE64(xso, sizeof(*xso)); + struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) + ADVANCE64(xsbrcv, sizeof(*xsbrcv)); + struct xsockstat_n *xsostats = (struct xsockstat_n *) + ADVANCE64(xsbsnd, sizeof(*xsbsnd)); + + bzero(buf, item_size); + + xk->xkp_len = sizeof(struct xkctlpcb); + xk->xkp_kind = XSO_KCB; + xk->xkp_unit = kcb->sac.sc_unit; + xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb); + xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl); + xk->xkp_kctlid = kctl->id; + strlcpy(xk->xkp_kctlname, kctl->name, + sizeof(xk->xkp_kctlname)); + + sotoxsocket_n(kcb->so, xso); + sbtoxsockbuf_n(kcb->so ? + &kcb->so->so_rcv : NULL, xsbrcv); + sbtoxsockbuf_n(kcb->so ? + &kcb->so->so_snd : NULL, xsbsnd); + sbtoxsockstat_n(kcb->so, xsostats); + + error = SYSCTL_OUT(req, buf, item_size); } } - if (error == 0) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; + if (error == 0) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; } } done: - lck_mtx_unlock(ctl_mtx); + lck_mtx_unlock(ctl_mtx); - return (error); + return error; } int kctl_getstat SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error = 0; + int error = 0; - lck_mtx_lock(ctl_mtx); + lck_mtx_lock(ctl_mtx); - if (req->newptr != USER_ADDR_NULL) { - error = EPERM; - goto done; + if (req->newptr != USER_ADDR_NULL) { + error = EPERM; + goto done; } - if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = sizeof(struct kctlstat); - goto done; + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = sizeof(struct kctlstat); + goto done; } - error = SYSCTL_OUT(req, &kctlstat, - MIN(sizeof(struct kctlstat), req->oldlen)); + error = SYSCTL_OUT(req, &kctlstat, + MIN(sizeof(struct kctlstat), req->oldlen)); done: - lck_mtx_unlock(ctl_mtx); - return (error); + lck_mtx_unlock(ctl_mtx); + return error; } void kctl_fill_socketinfo(struct socket *so, struct socket_info *si) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kern_ctl_info *kcsi = - &si->soi_proto.pri_kern_ctl; - struct kctl *kctl = kcb->kctl; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kern_ctl_info *kcsi = + &si->soi_proto.pri_kern_ctl; + struct kctl *kctl = kcb->kctl; - si->soi_kind = SOCKINFO_KERN_CTL; + si->soi_kind = SOCKINFO_KERN_CTL; - if (kctl == 0) - return; + if (kctl == 0) { + return; + } - kcsi->kcsi_id = kctl->id; - kcsi->kcsi_reg_unit = kctl->reg_unit; - kcsi->kcsi_flags = kctl->flags; - kcsi->kcsi_recvbufsize = kctl->recvbufsize; - kcsi->kcsi_sendbufsize = kctl->sendbufsize; - kcsi->kcsi_unit = kcb->sac.sc_unit; - strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME); + kcsi->kcsi_id = kctl->id; + kcsi->kcsi_reg_unit = kctl->reg_unit; + kcsi->kcsi_flags = kctl->flags; + kcsi->kcsi_recvbufsize = kctl->recvbufsize; + kcsi->kcsi_sendbufsize = kctl->sendbufsize; + kcsi->kcsi_unit = kcb->sac.sc_unit; + strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME); } diff --git a/bsd/kern/kern_core.c b/bsd/kern/kern_core.c index 07acd675c..77c38ccbe 100644 --- a/bsd/kern/kern_core.c +++ b/bsd/kern/kern_core.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved. @@ -59,10 +59,10 @@ #include #include /* last */ -#include /* current_map() */ -#include /* mach_vm_region_recurse() */ -#include /* task_suspend() */ -#include /* get_task_numacts() */ +#include /* current_map() */ +#include /* mach_vm_region_recurse() */ +#include /* task_suspend() */ +#include /* get_task_numacts() */ #include @@ -72,39 +72,39 @@ #endif typedef struct { - int flavor; /* the number for this flavor */ - mach_msg_type_number_t count; /* count of ints in this flavor */ + int flavor; /* the number for this flavor */ + mach_msg_type_number_t count; /* count of ints in this flavor */ } mythread_state_flavor_t; #if defined (__i386__) || defined (__x86_64__) -mythread_state_flavor_t thread_flavor_array [] = { - {x86_THREAD_STATE, x86_THREAD_STATE_COUNT}, - {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT}, - {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT}, - }; -int mynum_flavors=3; +mythread_state_flavor_t thread_flavor_array[] = { + {x86_THREAD_STATE, x86_THREAD_STATE_COUNT}, + {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT}, + {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT}, +}; +int mynum_flavors = 3; #elif defined (__arm__) -mythread_state_flavor_t thread_flavor_array[]={ - {ARM_THREAD_STATE , ARM_THREAD_STATE_COUNT}, - {ARM_VFP_STATE, ARM_VFP_STATE_COUNT}, - {ARM_EXCEPTION_STATE, ARM_EXCEPTION_STATE_COUNT} - }; -int mynum_flavors=3; +mythread_state_flavor_t thread_flavor_array[] = { + {ARM_THREAD_STATE, ARM_THREAD_STATE_COUNT}, + {ARM_VFP_STATE, ARM_VFP_STATE_COUNT}, + {ARM_EXCEPTION_STATE, ARM_EXCEPTION_STATE_COUNT} +}; +int mynum_flavors = 3; #elif defined (__arm64__) -mythread_state_flavor_t thread_flavor_array[]={ - {ARM_THREAD_STATE64 , ARM_THREAD_STATE64_COUNT}, - /* ARM64_TODO: VFP */ - {ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT} - }; -int mynum_flavors=2; +mythread_state_flavor_t thread_flavor_array[] = { + {ARM_THREAD_STATE64, ARM_THREAD_STATE64_COUNT}, + /* ARM64_TODO: VFP */ + {ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT} +}; +int mynum_flavors = 2; #else #error architecture not supported #endif typedef struct { - vm_offset_t header; + vm_offset_t header; int hoffset; mythread_state_flavor_t *flavors; int tstate_size; @@ -115,13 +115,13 @@ extern int freespace_mb(vnode_t vp); /* XXX not in a Mach header anywhere */ kern_return_t thread_getstatus(thread_t act, int flavor, - thread_state_t tstate, mach_msg_type_number_t *count); -void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); + thread_state_t tstate, mach_msg_type_number_t *count); +void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *); #ifdef SECURE_KERNEL -__XNU_PRIVATE_EXTERN int do_coredump = 0; /* default: don't dump cores */ +__XNU_PRIVATE_EXTERN int do_coredump = 0; /* default: don't dump cores */ #else -__XNU_PRIVATE_EXTERN int do_coredump = 1; /* default: dump cores */ +__XNU_PRIVATE_EXTERN int do_coredump = 1; /* default: dump cores */ #endif __XNU_PRIVATE_EXTERN int sugid_coredump = 0; /* default: but not SGUID binaries */ @@ -173,40 +173,40 @@ process_cpu_subtype(proc_t core_proc) static void collectth_state(thread_t th_act, void *tirp) { - vm_offset_t header; - int hoffset, i ; + vm_offset_t header; + int hoffset, i; mythread_state_flavor_t *flavors; - struct thread_command *tc; + struct thread_command *tc; tir_t *t = (tir_t *)tirp; - /* - * Fill in thread command structure. - */ - header = t->header; - hoffset = t->hoffset; - flavors = t->flavors; - - tc = (struct thread_command *) (header + hoffset); - tc->cmd = LC_THREAD; - tc->cmdsize = sizeof(struct thread_command) - + t->tstate_size; - hoffset += sizeof(struct thread_command); - /* - * Follow with a struct thread_state_flavor and - * the appropriate thread state struct for each - * thread state flavor. - */ - for (i = 0; i < t->flavor_count; i++) { - *(mythread_state_flavor_t *)(header+hoffset) = - flavors[i]; - hoffset += sizeof(mythread_state_flavor_t); - thread_getstatus(th_act, flavors[i].flavor, - (thread_state_t)(header+hoffset), - &flavors[i].count); - hoffset += flavors[i].count*sizeof(int); - } + /* + * Fill in thread command structure. + */ + header = t->header; + hoffset = t->hoffset; + flavors = t->flavors; + + tc = (struct thread_command *) (header + hoffset); + tc->cmd = LC_THREAD; + tc->cmdsize = sizeof(struct thread_command) + + t->tstate_size; + hoffset += sizeof(struct thread_command); + /* + * Follow with a struct thread_state_flavor and + * the appropriate thread state struct for each + * thread state flavor. + */ + for (i = 0; i < t->flavor_count; i++) { + *(mythread_state_flavor_t *)(header + hoffset) = + flavors[i]; + hoffset += sizeof(mythread_state_flavor_t); + thread_getstatus(th_act, flavors[i].flavor, + (thread_state_t)(header + hoffset), + &flavors[i].count); + hoffset += flavors[i].count * sizeof(int); + } - t->hoffset = hoffset; + t->hoffset = hoffset; } /* @@ -227,60 +227,59 @@ collectth_state(thread_t th_act, void *tirp) * to assumptions below; see variable declaration section for * details. */ -#define MAX_TSTATE_FLAVORS 10 +#define MAX_TSTATE_FLAVORS 10 int coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) { /* Begin assumptions that limit us to only the current process */ vfs_context_t ctx = vfs_context_current(); - vm_map_t map = current_map(); - task_t task = current_task(); + vm_map_t map = current_map(); + task_t task = current_task(); /* End assumptions */ kauth_cred_t cred = vfs_context_ucred(ctx); int error = 0; struct vnode_attr va; - int thread_count, segment_count; - int command_size, header_size, tstate_size; - int hoffset; - off_t foffset; + int thread_count, segment_count; + int command_size, header_size, tstate_size; + int hoffset; + off_t foffset; mach_vm_offset_t vmoffset; - vm_offset_t header; - mach_vm_size_t vmsize; - vm_prot_t prot; - vm_prot_t maxprot; - vm_inherit_t inherit; - int error1 = 0; - char stack_name[MAXCOMLEN+6]; - char *alloced_name = NULL; - char *name; + vm_offset_t header; + mach_vm_size_t vmsize; + vm_prot_t prot; + vm_prot_t maxprot; + vm_inherit_t inherit; + int error1 = 0; + char stack_name[MAXCOMLEN + 6]; + char *alloced_name = NULL; + char *name; mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS]; - vm_size_t mapsize; - int i; + vm_size_t mapsize; + int i; uint32_t nesting_depth = 0; - kern_return_t kret; + kern_return_t kret; struct vm_region_submap_info_64 vbr; mach_msg_type_number_t vbrcount = 0; tir_t tir1; struct vnode * vp; - struct mach_header *mh = NULL; /* protected by is_64 */ - struct mach_header_64 *mh64 = NULL; /* protected by is_64 */ - int is_64 = 0; - size_t mach_header_sz = sizeof(struct mach_header); - size_t segment_command_sz = sizeof(struct segment_command); - + struct mach_header *mh = NULL; /* protected by is_64 */ + struct mach_header_64 *mh64 = NULL; /* protected by is_64 */ + int is_64 = 0; + size_t mach_header_sz = sizeof(struct mach_header); + size_t segment_command_sz = sizeof(struct segment_command); + if (current_proc() != core_proc) { panic("coredump() called against proc that is not current_proc: %p", core_proc); } - if (do_coredump == 0 || /* Not dumping at all */ - ( (sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */ - ( (kauth_cred_getsvuid(cred) != kauth_cred_getruid(cred)) || - (kauth_cred_getsvgid(cred) != kauth_cred_getrgid(cred))))) { - + if (do_coredump == 0 || /* Not dumping at all */ + ((sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */ + ((kauth_cred_getsvuid(cred) != kauth_cred_getruid(cred)) || + (kauth_cred_getsvgid(cred) != kauth_cred_getrgid(cred))))) { #if CONFIG_AUDIT audit_proc_coredump(core_proc, NULL, EFAULT); #endif - return (EFAULT); + return EFAULT; } #if CONFIG_CSR @@ -293,7 +292,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) #if CONFIG_AUDIT audit_proc_coredump(core_proc, NULL, EFAULT); #endif - return (EFAULT); + return EFAULT; } #endif @@ -306,8 +305,9 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) mapsize = get_vmmap_size(map); if (((coredump_flags & COREDUMP_IGNORE_ULIMIT) == 0) && - (mapsize >= core_proc->p_rlimit[RLIMIT_CORE].rlim_cur)) - return (EFAULT); + (mapsize >= core_proc->p_rlimit[RLIMIT_CORE].rlim_cur)) { + return EFAULT; + } (void) task_suspend_internal(task); @@ -317,15 +317,17 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) /* if name creation fails, fall back to historical behaviour... */ if (alloced_name == NULL || proc_core_name(core_proc->p_comm, kauth_cred_getuid(cred), - core_proc->p_pid, alloced_name, MAXPATHLEN)) { + core_proc->p_pid, alloced_name, MAXPATHLEN)) { snprintf(stack_name, sizeof(stack_name), - "/cores/core.%d", core_proc->p_pid); + "/cores/core.%d", core_proc->p_pid); name = stack_name; - } else + } else { name = alloced_name; + } - if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx))) + if ((error = vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, ctx))) { goto out2; + } VATTR_INIT(&va); VATTR_WANTED(&va, va_nlink); @@ -336,7 +338,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) goto out; } - VATTR_INIT(&va); /* better to do it here than waste more stack in vnode_setsize */ + VATTR_INIT(&va); /* better to do it here than waste more stack in vnode_setsize */ VATTR_SET(&va, va_data_size, 0); if (core_proc == initproc) { VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); @@ -357,16 +359,17 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) */ thread_count = get_task_numacts(task); - segment_count = get_vmmap_entries(map); /* XXX */ - tir1.flavor_count = sizeof(thread_flavor_array)/sizeof(mythread_state_flavor_t); - bcopy(thread_flavor_array, flavors,sizeof(thread_flavor_array)); + segment_count = get_vmmap_entries(map); /* XXX */ + tir1.flavor_count = sizeof(thread_flavor_array) / sizeof(mythread_state_flavor_t); + bcopy(thread_flavor_array, flavors, sizeof(thread_flavor_array)); tstate_size = 0; - for (i = 0; i < tir1.flavor_count; i++) + for (i = 0; i < tir1.flavor_count; i++) { tstate_size += sizeof(mythread_state_flavor_t) + - (flavors[i].count * sizeof(int)); + (flavors[i].count * sizeof(int)); + } command_size = segment_count * segment_command_sz + - thread_count*sizeof(struct thread_command) + - tstate_size*thread_count; + thread_count * sizeof(struct thread_command) + + tstate_size * thread_count; header_size = command_size + mach_header_sz; @@ -386,7 +389,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) mh64->filetype = MH_CORE; mh64->ncmds = segment_count + thread_count; mh64->sizeofcmds = command_size; - mh64->reserved = 0; /* 8 byte alignment */ + mh64->reserved = 0; /* 8 byte alignment */ } else { mh = (struct mach_header *)header; mh->magic = MH_MAGIC; @@ -397,28 +400,28 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) mh->sizeofcmds = command_size; } - hoffset = mach_header_sz; /* offset into header */ - foffset = round_page(header_size); /* offset into file */ - vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */ + hoffset = mach_header_sz; /* offset into header */ + foffset = round_page(header_size); /* offset into file */ + vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */ /* - * We use to check for an error, here, now we try and get + * We use to check for an error, here, now we try and get * as much as we can */ while (segment_count > 0) { - struct segment_command *sc; - struct segment_command_64 *sc64; + struct segment_command *sc; + struct segment_command_64 *sc64; /* * Get region information for next region. */ - + while (1) { vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; - if((kret = mach_vm_region_recurse(map, - &vmoffset, &vmsize, &nesting_depth, - (vm_region_recurse_info_t)&vbr, - &vbrcount)) != KERN_SUCCESS) { + if ((kret = mach_vm_region_recurse(map, + &vmoffset, &vmsize, &nesting_depth, + (vm_region_recurse_info_t)&vbr, + &vbrcount)) != KERN_SUCCESS) { break; } /* @@ -429,18 +432,19 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) */ if (!(is_64) && (vmoffset + vmsize > VM_MAX_ADDRESS)) { - kret = KERN_INVALID_ADDRESS; + kret = KERN_INVALID_ADDRESS; break; } - if(vbr.is_submap) { + if (vbr.is_submap) { nesting_depth++; continue; } else { break; } } - if(kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { break; + } prot = vbr.protection; maxprot = vbr.max_protection; @@ -462,16 +466,16 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) sc64->initprot = prot; sc64->nsects = 0; sc64->flags = 0; - } else { + } else { sc = (struct segment_command *) (header + hoffset); sc->cmd = LC_SEGMENT; sc->cmdsize = sizeof(struct segment_command); /* segment name is zeroed by kmem_alloc */ sc->segname[0] = 0; - sc->vmaddr = CAST_DOWN_EXPLICIT(vm_offset_t,vmoffset); - sc->vmsize = CAST_DOWN_EXPLICIT(vm_size_t,vmsize); - sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t,foffset); /* will never truncate */ - sc->filesize = CAST_DOWN_EXPLICIT(uint32_t,vmsize); /* will never truncate */ + sc->vmaddr = CAST_DOWN_EXPLICIT(vm_offset_t, vmoffset); + sc->vmsize = CAST_DOWN_EXPLICIT(vm_size_t, vmsize); + sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t, foffset); /* will never truncate */ + sc->filesize = CAST_DOWN_EXPLICIT(uint32_t, vmsize); /* will never truncate */ sc->maxprot = maxprot; sc->initprot = prot; sc->nsects = 0; @@ -484,7 +488,7 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) */ if ((prot & VM_PROT_READ) == 0) { mach_vm_protect(map, vmoffset, vmsize, FALSE, - prot|VM_PROT_READ); + prot | VM_PROT_READ); } /* * Only actually perform write if we can read. @@ -492,13 +496,11 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) * a hole in the file. */ if ((maxprot & VM_PROT_READ) == VM_PROT_READ - && vbr.user_tag != VM_MEMORY_IOKIT - && coredumpok(map,vmoffset)) { - + && vbr.user_tag != VM_MEMORY_IOKIT + && coredumpok(map, vmoffset)) { error = vn_rdwr_64(UIO_WRITE, vp, vmoffset, vmsize, foffset, - (IS_64BIT_PROCESS(core_proc) ? UIO_USERSPACE64 : UIO_USERSPACE32), - IO_NOCACHE|IO_NODELOCKED|IO_UNIT, cred, (int64_t *) 0, core_proc); - + (IS_64BIT_PROCESS(core_proc) ? UIO_USERSPACE64 : UIO_USERSPACE32), + IO_NOCACHE | IO_NODELOCKED | IO_UNIT, cred, (int64_t *) 0, core_proc); } hoffset += segment_command_sz; @@ -526,30 +528,33 @@ coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags) tir1.hoffset = hoffset; tir1.flavors = flavors; tir1.tstate_size = tstate_size; - task_act_iterate_wth_args(task, collectth_state,&tir1); + task_act_iterate_wth_args(task, collectth_state, &tir1); /* * Write out the Mach header at the beginning of the * file. OK to use a 32 bit write for this. */ error = vn_rdwr(UIO_WRITE, vp, (caddr_t)header, header_size, (off_t)0, - UIO_SYSSPACE, IO_NOCACHE|IO_NODELOCKED|IO_UNIT, cred, (int *) 0, core_proc); + UIO_SYSSPACE, IO_NOCACHE | IO_NODELOCKED | IO_UNIT, cred, (int *) 0, core_proc); kmem_free(kernel_map, header, header_size); - if ((coredump_flags & COREDUMP_FULLFSYNC) && error == 0) + if ((coredump_flags & COREDUMP_FULLFSYNC) && error == 0) { error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx); + } out: error1 = vnode_close(vp, FWRITE, ctx); out2: #if CONFIG_AUDIT audit_proc_coredump(core_proc, name, error); #endif - if (alloced_name != NULL) + if (alloced_name != NULL) { FREE(alloced_name, M_TEMP); - if (error == 0) + } + if (error == 0) { error = error1; + } - return (error); + return error; } #else /* CONFIG_COREDUMP */ diff --git a/bsd/kern/kern_credential.c b/bsd/kern/kern_credential.c index 141807dc8..34b65a24f 100644 --- a/bsd/kern/kern_credential.c +++ b/bsd/kern/kern_credential.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,7 +37,7 @@ * and identity information. */ -#include /* XXX trim includes */ +#include /* XXX trim includes */ #include #include #include @@ -53,19 +53,19 @@ #include #include -#include /* For manifest constants in posix_cred_access */ +#include /* For manifest constants in posix_cred_access */ #include #include #include -#include +#include #include #include #ifdef MACH_ASSERT # undef MACH_ASSERT #endif -#define MACH_ASSERT 1 /* XXX so bogus */ +#define MACH_ASSERT 1 /* XXX so bogus */ #include #if CONFIG_MACF @@ -78,9 +78,7 @@ void mach_kauth_cred_uthread_update( void ); -#define CRED_DIAGNOSTIC 0 - -# define NULLCRED_CHECK(_c) do {if (!IS_VALID_CRED(_c)) panic("%s: bad credential %p", __FUNCTION__,_c);} while(0) +# define NULLCRED_CHECK(_c) do {if (!IS_VALID_CRED(_c)) panic("%s: bad credential %p", __FUNCTION__,_c);} while(0) /* Set to 1 to turn on KAUTH_DEBUG for kern_credential.c */ #if 0 @@ -110,42 +108,42 @@ void mach_kauth_cred_uthread_update( void ); */ #if DEBUG_CRED -#define DEBUG_CRED_ENTER printf -#define DEBUG_CRED_CHANGE printf +#define DEBUG_CRED_ENTER printf +#define DEBUG_CRED_CHANGE printf extern void kauth_cred_print(kauth_cred_t cred); -#include /* needed for get_backtrace( ) */ +#include /* needed for get_backtrace( ) */ int is_target_cred( kauth_cred_t the_cred ); void get_backtrace( void ); -static int sysctl_dump_creds( __unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req ); +static int sysctl_dump_creds( __unused struct sysctl_oid *oidp, __unused void *arg1, + __unused int arg2, struct sysctl_req *req ); static int -sysctl_dump_cred_backtraces( __unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req ); +sysctl_dump_cred_backtraces( __unused struct sysctl_oid *oidp, __unused void *arg1, + __unused int arg2, struct sysctl_req *req ); #define MAX_STACK_DEPTH 8 struct cred_backtrace { - int depth; - void * stack[ MAX_STACK_DEPTH ]; + int depth; + void * stack[MAX_STACK_DEPTH]; }; typedef struct cred_backtrace cred_backtrace; #define MAX_CRED_BUFFER_SLOTS 200 struct cred_debug_buffer { - int next_slot; - cred_backtrace stack_buffer[ MAX_CRED_BUFFER_SLOTS ]; + int next_slot; + cred_backtrace stack_buffer[MAX_CRED_BUFFER_SLOTS]; }; typedef struct cred_debug_buffer cred_debug_buffer; cred_debug_buffer * cred_debug_buf_p = NULL; -#else /* !DEBUG_CRED */ +#else /* !DEBUG_CRED */ -#define DEBUG_CRED_ENTER(fmt, ...) do {} while (0) -#define DEBUG_CRED_CHANGE(fmt, ...) do {} while (0) +#define DEBUG_CRED_ENTER(fmt, ...) do {} while (0) +#define DEBUG_CRED_CHANGE(fmt, ...) do {} while (0) -#endif /* !DEBUG_CRED */ +#endif /* !DEBUG_CRED */ #if CONFIG_EXT_RESOLVER /* @@ -158,31 +156,31 @@ cred_debug_buffer * cred_debug_buf_p = NULL; */ static lck_mtx_t *kauth_resolver_mtx; -#define KAUTH_RESOLVER_LOCK() lck_mtx_lock(kauth_resolver_mtx); -#define KAUTH_RESOLVER_UNLOCK() lck_mtx_unlock(kauth_resolver_mtx); +#define KAUTH_RESOLVER_LOCK() lck_mtx_lock(kauth_resolver_mtx); +#define KAUTH_RESOLVER_UNLOCK() lck_mtx_unlock(kauth_resolver_mtx); -static volatile pid_t kauth_resolver_identity; -static int kauth_identitysvc_has_registered; -static int kauth_resolver_registered; -static uint32_t kauth_resolver_sequence; -static int kauth_resolver_timeout = 30; /* default: 30 seconds */ +static volatile pid_t kauth_resolver_identity; +static int kauth_identitysvc_has_registered; +static int kauth_resolver_registered; +static uint32_t kauth_resolver_sequence; +static int kauth_resolver_timeout = 30; /* default: 30 seconds */ struct kauth_resolver_work { TAILQ_ENTRY(kauth_resolver_work) kr_link; struct kauth_identity_extlookup kr_work; - uint64_t kr_extend; - uint32_t kr_seqno; - int kr_refs; - int kr_flags; -#define KAUTH_REQUEST_UNSUBMITTED (1<<0) -#define KAUTH_REQUEST_SUBMITTED (1<<1) -#define KAUTH_REQUEST_DONE (1<<2) - int kr_result; + uint64_t kr_extend; + uint32_t kr_seqno; + int kr_refs; + int kr_flags; +#define KAUTH_REQUEST_UNSUBMITTED (1<<0) +#define KAUTH_REQUEST_SUBMITTED (1<<1) +#define KAUTH_REQUEST_DONE (1<<2) + int kr_result; }; TAILQ_HEAD(kauth_resolver_unsubmitted_head, kauth_resolver_work) kauth_resolver_unsubmitted; -TAILQ_HEAD(kauth_resolver_submitted_head, kauth_resolver_work) kauth_resolver_submitted; -TAILQ_HEAD(kauth_resolver_done_head, kauth_resolver_work) kauth_resolver_done; +TAILQ_HEAD(kauth_resolver_submitted_head, kauth_resolver_work) kauth_resolver_submitted; +TAILQ_HEAD(kauth_resolver_done_head, kauth_resolver_work) kauth_resolver_done; /* Number of resolver timeouts between logged complaints */ #define KAUTH_COMPLAINT_INTERVAL 1000 @@ -190,39 +188,39 @@ int kauth_resolver_timeout_cnt = 0; #if DEVELOPMENT || DEBUG /* Internal builds get different (less ambiguous) breadcrumbs. */ -#define KAUTH_RESOLVER_FAILED_ERRCODE EOWNERDEAD +#define KAUTH_RESOLVER_FAILED_ERRCODE EOWNERDEAD #else /* But non-Internal builds get errors that are allowed by standards. */ -#define KAUTH_RESOLVER_FAILED_ERRCODE EIO +#define KAUTH_RESOLVER_FAILED_ERRCODE EIO #endif /* DEVELOPMENT || DEBUG */ int kauth_resolver_failed_cnt = 0; -#define RESOLVER_FAILED_MESSAGE(fmt, args...) \ -do { \ - if (!(kauth_resolver_failed_cnt++ % 100)) { \ - printf("%s: " fmt "\n", __PRETTY_FUNCTION__, ##args); \ - } \ +#define RESOLVER_FAILED_MESSAGE(fmt, args...) \ +do { \ + if (!(kauth_resolver_failed_cnt++ % 100)) { \ + printf("%s: " fmt "\n", __PRETTY_FUNCTION__, ##args); \ + } \ } while (0) -static int kauth_resolver_submit(struct kauth_identity_extlookup *lkp, uint64_t extend_data); -static int kauth_resolver_complete(user_addr_t message); -static int kauth_resolver_getwork(user_addr_t message); -static int kauth_resolver_getwork2(user_addr_t message); +static int kauth_resolver_submit(struct kauth_identity_extlookup *lkp, uint64_t extend_data); +static int kauth_resolver_complete(user_addr_t message); +static int kauth_resolver_getwork(user_addr_t message); +static int kauth_resolver_getwork2(user_addr_t message); static __attribute__((noinline)) int __KERNEL_IS_WAITING_ON_EXTERNAL_CREDENTIAL_RESOLVER__( - struct kauth_resolver_work *); + struct kauth_resolver_work *); -#define KAUTH_CACHES_MAX_SIZE 10000 /* Max # entries for both groups and id caches */ +#define KAUTH_CACHES_MAX_SIZE 10000 /* Max # entries for both groups and id caches */ struct kauth_identity { TAILQ_ENTRY(kauth_identity) ki_link; - int ki_valid; - uid_t ki_uid; - gid_t ki_gid; - int ki_supgrpcnt; - gid_t ki_supgrps[NGROUPS]; - guid_t ki_guid; + int ki_valid; + uid_t ki_uid; + gid_t ki_gid; + int ki_supgrpcnt; + gid_t ki_supgrps[NGROUPS]; + guid_t ki_guid; ntsid_t ki_ntsid; - const char *ki_name; /* string name from string cache */ + const char *ki_name; /* string name from string cache */ /* * Expiry times are the earliest time at which we will disregard the * cached state and go to userland. Before then if the valid bit is @@ -230,77 +228,71 @@ struct kauth_identity { * not go to userland to resolve, just assume that there is no answer * available. */ - time_t ki_groups_expiry; - time_t ki_guid_expiry; - time_t ki_ntsid_expiry; + time_t ki_groups_expiry; + time_t ki_guid_expiry; + time_t ki_ntsid_expiry; }; static TAILQ_HEAD(kauth_identity_head, kauth_identity) kauth_identities; static lck_mtx_t *kauth_identity_mtx; -#define KAUTH_IDENTITY_LOCK() lck_mtx_lock(kauth_identity_mtx); -#define KAUTH_IDENTITY_UNLOCK() lck_mtx_unlock(kauth_identity_mtx); -#define KAUTH_IDENTITY_CACHEMAX_DEFAULT 100 /* XXX default sizing? */ +#define KAUTH_IDENTITY_LOCK() lck_mtx_lock(kauth_identity_mtx); +#define KAUTH_IDENTITY_UNLOCK() lck_mtx_unlock(kauth_identity_mtx); +#define KAUTH_IDENTITY_CACHEMAX_DEFAULT 100 /* XXX default sizing? */ static int kauth_identity_cachemax = KAUTH_IDENTITY_CACHEMAX_DEFAULT; static int kauth_identity_count; static struct kauth_identity *kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, - ntsid_t *ntsidp, time_t ntsid_expiry, int supgrpcnt, gid_t *supgrps, time_t groups_expiry, - const char *name, int nametype); -static void kauth_identity_register_and_free(struct kauth_identity *kip); -static void kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_identity *kip, uint64_t extend_data); -static void kauth_identity_trimcache(int newsize); -static void kauth_identity_lru(struct kauth_identity *kip); -static int kauth_identity_guid_expired(struct kauth_identity *kip); -static int kauth_identity_ntsid_expired(struct kauth_identity *kip); -static int kauth_identity_find_uid(uid_t uid, struct kauth_identity *kir, char *getname); -static int kauth_identity_find_gid(gid_t gid, struct kauth_identity *kir, char *getname); -static int kauth_identity_find_guid(guid_t *guidp, struct kauth_identity *kir, char *getname); -static int kauth_identity_find_ntsid(ntsid_t *ntsid, struct kauth_identity *kir, char *getname); -static int kauth_identity_find_nam(char *name, int valid, struct kauth_identity *kir); + ntsid_t *ntsidp, time_t ntsid_expiry, int supgrpcnt, gid_t *supgrps, time_t groups_expiry, + const char *name, int nametype); +static void kauth_identity_register_and_free(struct kauth_identity *kip); +static void kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_identity *kip, uint64_t extend_data); +static void kauth_identity_trimcache(int newsize); +static void kauth_identity_lru(struct kauth_identity *kip); +static int kauth_identity_guid_expired(struct kauth_identity *kip); +static int kauth_identity_ntsid_expired(struct kauth_identity *kip); +static int kauth_identity_find_uid(uid_t uid, struct kauth_identity *kir, char *getname); +static int kauth_identity_find_gid(gid_t gid, struct kauth_identity *kir, char *getname); +static int kauth_identity_find_guid(guid_t *guidp, struct kauth_identity *kir, char *getname); +static int kauth_identity_find_ntsid(ntsid_t *ntsid, struct kauth_identity *kir, char *getname); +static int kauth_identity_find_nam(char *name, int valid, struct kauth_identity *kir); struct kauth_group_membership { TAILQ_ENTRY(kauth_group_membership) gm_link; - uid_t gm_uid; /* the identity whose membership we're recording */ - gid_t gm_gid; /* group of which they are a member */ - time_t gm_expiry; /* TTL for the membership, or 0 for persistent entries */ - int gm_flags; -#define KAUTH_GROUP_ISMEMBER (1<<0) + uid_t gm_uid; /* the identity whose membership we're recording */ + gid_t gm_gid; /* group of which they are a member */ + time_t gm_expiry; /* TTL for the membership, or 0 for persistent entries */ + int gm_flags; +#define KAUTH_GROUP_ISMEMBER (1<<0) }; TAILQ_HEAD(kauth_groups_head, kauth_group_membership) kauth_groups; static lck_mtx_t *kauth_groups_mtx; -#define KAUTH_GROUPS_LOCK() lck_mtx_lock(kauth_groups_mtx); -#define KAUTH_GROUPS_UNLOCK() lck_mtx_unlock(kauth_groups_mtx); -#define KAUTH_GROUPS_CACHEMAX_DEFAULT 100 /* XXX default sizing? */ +#define KAUTH_GROUPS_LOCK() lck_mtx_lock(kauth_groups_mtx); +#define KAUTH_GROUPS_UNLOCK() lck_mtx_unlock(kauth_groups_mtx); +#define KAUTH_GROUPS_CACHEMAX_DEFAULT 100 /* XXX default sizing? */ static int kauth_groups_cachemax = KAUTH_GROUPS_CACHEMAX_DEFAULT; static int kauth_groups_count; -static int kauth_groups_expired(struct kauth_group_membership *gm); -static void kauth_groups_lru(struct kauth_group_membership *gm); -static void kauth_groups_updatecache(struct kauth_identity_extlookup *el); -static void kauth_groups_trimcache(int newsize); - -#endif /* CONFIG_EXT_RESOLVER */ +static int kauth_groups_expired(struct kauth_group_membership *gm); +static void kauth_groups_lru(struct kauth_group_membership *gm); +static void kauth_groups_updatecache(struct kauth_identity_extlookup *el); +static void kauth_groups_trimcache(int newsize); -#define KAUTH_CRED_TABLE_SIZE 97 +#endif /* CONFIG_EXT_RESOLVER */ -TAILQ_HEAD(kauth_cred_entry_head, ucred); -static struct kauth_cred_entry_head * kauth_cred_table_anchor = NULL; +#define KAUTH_CRED_TABLE_SIZE 128 -#define KAUTH_CRED_HASH_DEBUG 0 +LIST_HEAD(kauth_cred_entry_head, ucred); +static struct kauth_cred_entry_head + kauth_cred_table_anchor[KAUTH_CRED_TABLE_SIZE]; -static int kauth_cred_add(kauth_cred_t new_cred); -static boolean_t kauth_cred_remove(kauth_cred_t cred); -static inline u_long kauth_cred_hash(const uint8_t *datap, int data_len, u_long start_key); -static u_long kauth_cred_get_hashkey(kauth_cred_t cred); +static struct kauth_cred_entry_head *kauth_cred_get_bucket(kauth_cred_t cred); +static kauth_cred_t kauth_cred_add(kauth_cred_t new_cred, struct kauth_cred_entry_head *bucket); +static void kauth_cred_remove_locked(kauth_cred_t cred); static kauth_cred_t kauth_cred_update(kauth_cred_t old_cred, kauth_cred_t new_cred, boolean_t retain_auditinfo); -static boolean_t kauth_cred_unref_hashlocked(kauth_cred_t *credp); - -#if KAUTH_CRED_HASH_DEBUG -static int kauth_cred_count = 0; -static void kauth_cred_hash_print(void); -static void kauth_cred_print(kauth_cred_t cred); -#endif +static kauth_cred_t kauth_cred_find_and_ref(kauth_cred_t cred, + struct kauth_cred_entry_head *bucket); +static bool kauth_cred_is_equal(kauth_cred_t cred1, kauth_cred_t cred2); #if CONFIG_EXT_RESOLVER @@ -308,7 +300,7 @@ static void kauth_cred_print(kauth_cred_t cred); * __KERNEL_IS_WAITING_ON_EXTERNAL_CREDENTIAL_RESOLVER__ * * Description: Waits for the user space daemon to respond to the request - * we made. Function declared non inline to be visible in + * we made. Function declared non inline to be visible in * stackshots and spindumps as well as debugging. * * Parameters: workp Work queue entry. @@ -320,7 +312,7 @@ static void kauth_cred_print(kauth_cred_t cred); * ERESTART returned by msleep. * */ -static __attribute__((noinline)) int +static __attribute__((noinline)) int __KERNEL_IS_WAITING_ON_EXTERNAL_CREDENTIAL_RESOLVER__( struct kauth_resolver_work *workp) { @@ -332,8 +324,9 @@ __KERNEL_IS_WAITING_ON_EXTERNAL_CREDENTIAL_RESOLVER__( ts.tv_nsec = 0; error = msleep(workp, kauth_resolver_mtx, PCATCH, "kr_submit", &ts); /* request has been completed? */ - if ((error == 0) && (workp->kr_flags & KAUTH_REQUEST_DONE)) + if ((error == 0) && (workp->kr_flags & KAUTH_REQUEST_DONE)) { break; + } /* woken because the resolver has died? */ if (kauth_resolver_identity == 0) { RESOLVER_FAILED_MESSAGE("kauth external resolver died while while waiting for work to complete"); @@ -341,8 +334,9 @@ __KERNEL_IS_WAITING_ON_EXTERNAL_CREDENTIAL_RESOLVER__( break; } /* an error? */ - if (error != 0) + if (error != 0) { break; + } } return error; } @@ -382,7 +376,7 @@ kauth_resolver_init(void) TAILQ_INIT(&kauth_resolver_submitted); TAILQ_INIT(&kauth_resolver_done); kauth_resolver_sequence = 31337; - kauth_resolver_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0/*LCK_ATTR_NULL*/); + kauth_resolver_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0 /*LCK_ATTR_NULL*/); } @@ -428,8 +422,8 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp, uint64_t extend_data { struct kauth_resolver_work *workp, *killp; struct timespec ts; - int error, shouldfree; - + int error, shouldfree; + /* no point actually blocking if the resolver isn't up yet */ if (kauth_resolver_identity == 0) { /* @@ -441,20 +435,21 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp, uint64_t extend_data * process, so that memberd doesn't starve while we are in a * tight loop between user and kernel, eating all the CPU. */ - error = tsleep(&ts, PZERO | PCATCH, "kr_submit", hz/2); + error = tsleep(&ts, PZERO | PCATCH, "kr_submit", hz / 2); if (kauth_resolver_identity == 0) { /* * if things haven't changed while we were asleep, * tell the caller we couldn't get an authoritative * answer. */ - return(EWOULDBLOCK); + return EWOULDBLOCK; } } - + MALLOC(workp, struct kauth_resolver_work *, sizeof(*workp), M_KAUTH, M_WAITOK); - if (workp == NULL) - return(ENOMEM); + if (workp == NULL) { + return ENOMEM; + } workp->kr_work = *lkp; workp->kr_extend = extend_data; @@ -488,42 +483,43 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp, uint64_t extend_data error = __KERNEL_IS_WAITING_ON_EXTERNAL_CREDENTIAL_RESOLVER__(workp); /* if the request was processed, copy the result */ - if (error == 0) + if (error == 0) { *lkp = workp->kr_work; - + } + if (error == EWOULDBLOCK) { - if ((kauth_resolver_timeout_cnt++ % KAUTH_COMPLAINT_INTERVAL) == 0) { - printf("kauth external resolver timed out (%d timeout(s) of %d seconds).\n", - kauth_resolver_timeout_cnt, kauth_resolver_timeout); - } - - if (workp->kr_flags & KAUTH_REQUEST_UNSUBMITTED) { - /* - * If the request timed out and was never collected, the resolver - * is dead and probably not coming back anytime soon. In this - * case we revert to no-resolver behaviour, and punt all the other - * sleeping requests to clear the backlog. - */ - KAUTH_DEBUG("RESOLVER - request timed out without being collected for processing, resolver dead"); - - /* - * Make the current resolver non-authoritative, and mark it as - * no longer registered to prevent kauth_cred_ismember_gid() - * enqueueing more work until a new one is registered. This - * mitigates the damage a crashing resolver may inflict. - */ - kauth_resolver_identity = 0; - kauth_resolver_registered = 0; - - /* kill all the other requestes that are waiting as well */ - TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link) - wakeup(killp); - TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link) - wakeup(killp); - /* Cause all waiting-for-work threads to return EIO */ - wakeup((caddr_t)&kauth_resolver_unsubmitted); - } - } + if ((kauth_resolver_timeout_cnt++ % KAUTH_COMPLAINT_INTERVAL) == 0) { + printf("kauth external resolver timed out (%d timeout(s) of %d seconds).\n", + kauth_resolver_timeout_cnt, kauth_resolver_timeout); + } + + if (workp->kr_flags & KAUTH_REQUEST_UNSUBMITTED) { + /* + * If the request timed out and was never collected, the resolver + * is dead and probably not coming back anytime soon. In this + * case we revert to no-resolver behaviour, and punt all the other + * sleeping requests to clear the backlog. + */ + KAUTH_DEBUG("RESOLVER - request timed out without being collected for processing, resolver dead"); + + /* + * Make the current resolver non-authoritative, and mark it as + * no longer registered to prevent kauth_cred_ismember_gid() + * enqueueing more work until a new one is registered. This + * mitigates the damage a crashing resolver may inflict. + */ + kauth_resolver_identity = 0; + kauth_resolver_registered = 0; + + /* kill all the other requestes that are waiting as well */ + TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link) + wakeup(killp); + TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link) + wakeup(killp); + /* Cause all waiting-for-work threads to return EIO */ + wakeup((caddr_t)&kauth_resolver_unsubmitted); + } + } /* * drop our reference on the work item, and note whether we should @@ -560,7 +556,7 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp, uint64_t extend_data } KAUTH_DEBUG("RESOLVER - returning %d", error); - return(error); + return error; } @@ -599,7 +595,7 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3 if (!IOTaskHasEntitlement(current_task(), IDENTITYSVC_ENTITLEMENT)) { KAUTH_DEBUG("RESOLVER - pid %d not entitled to call identitysvc", current_proc()->p_pid); - return(EPERM); + return EPERM; } /* @@ -609,7 +605,7 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3 new_id = current_proc()->p_pid; if ((error = kauth_authorize_generic(kauth_cred_get(), KAUTH_GENERIC_ISSUSER)) != 0) { KAUTH_DEBUG("RESOLVER - pid %d refused permission to become identity resolver", new_id); - return(error); + return error; } KAUTH_RESOLVER_LOCK(); if (kauth_resolver_identity != new_id) { @@ -637,7 +633,7 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3 wakeup(&kauth_resolver_unsubmitted); } KAUTH_RESOLVER_UNLOCK(); - return(0); + return 0; } /* @@ -646,49 +642,49 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3 */ if ((kauth_cred_getuid(kauth_cred_get()) != 0) || (current_proc()->p_pid != kauth_resolver_identity)) { KAUTH_DEBUG("RESOLVER - call from bogus resolver %d\n", current_proc()->p_pid); - return(EPERM); + return EPERM; } - + if (opcode == KAUTH_GET_CACHE_SIZES) { KAUTH_IDENTITY_LOCK(); sz_arg.kcs_id_size = kauth_identity_cachemax; KAUTH_IDENTITY_UNLOCK(); - + KAUTH_GROUPS_LOCK(); sz_arg.kcs_group_size = kauth_groups_cachemax; KAUTH_GROUPS_UNLOCK(); - if ((error = copyout(&sz_arg, uap->message, sizeof (sz_arg))) != 0) { - return (error); + if ((error = copyout(&sz_arg, uap->message, sizeof(sz_arg))) != 0) { + return error; } - - return (0); + + return 0; } else if (opcode == KAUTH_SET_CACHE_SIZES) { - if ((error = copyin(uap->message, &sz_arg, sizeof (sz_arg))) != 0) { - return (error); + if ((error = copyin(uap->message, &sz_arg, sizeof(sz_arg))) != 0) { + return error; } - + if ((sz_arg.kcs_group_size > KAUTH_CACHES_MAX_SIZE) || (sz_arg.kcs_id_size > KAUTH_CACHES_MAX_SIZE)) { - return (EINVAL); + return EINVAL; } - + KAUTH_IDENTITY_LOCK(); kauth_identity_cachemax = sz_arg.kcs_id_size; kauth_identity_trimcache(kauth_identity_cachemax); KAUTH_IDENTITY_UNLOCK(); - + KAUTH_GROUPS_LOCK(); kauth_groups_cachemax = sz_arg.kcs_group_size; kauth_groups_trimcache(kauth_groups_cachemax); KAUTH_GROUPS_UNLOCK(); - - return (0); + + return 0; } else if (opcode == KAUTH_CLEAR_CACHES) { KAUTH_IDENTITY_LOCK(); kauth_identity_trimcache(0); KAUTH_IDENTITY_UNLOCK(); - + KAUTH_GROUPS_LOCK(); kauth_groups_trimcache(0); KAUTH_GROUPS_UNLOCK(); @@ -710,37 +706,39 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3 kauth_resolver_registered = 0; TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link) - wakeup(killp); + wakeup(killp); TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link) - wakeup(killp); + wakeup(killp); /* Cause all waiting-for-work threads to return EIO */ wakeup((caddr_t)&kauth_resolver_unsubmitted); KAUTH_RESOLVER_UNLOCK(); } - + /* * Got a result returning? */ if (opcode & KAUTH_EXTLOOKUP_RESULT) { - if ((error = kauth_resolver_complete(message)) != 0) - return(error); + if ((error = kauth_resolver_complete(message)) != 0) { + return error; + } } /* * Caller wants to take more work? */ if (opcode & KAUTH_EXTLOOKUP_WORKER) { - if ((error = kauth_resolver_getwork(message)) != 0) - return(error); + if ((error = kauth_resolver_getwork(message)) != 0) { + return error; + } } - return(0); + return 0; } /* * kauth_resolver_getwork_continue - * + * * Description: Continuation for kauth_resolver_getwork * * Parameters: result Error code or 0 for the sleep @@ -762,7 +760,7 @@ kauth_resolver_getwork_continue(int result) if (result) { KAUTH_RESOLVER_UNLOCK(); - return(result); + return result; } /* @@ -784,13 +782,13 @@ kauth_resolver_getwork_continue(int result) error = KAUTH_RESOLVER_FAILED_ERRCODE; } KAUTH_RESOLVER_UNLOCK(); - return(error); + return error; } thread = current_thread(); ut = get_bsdthread_info(thread); message = ut->uu_save.uus_kauth.message; - return(kauth_resolver_getwork2(message)); + return kauth_resolver_getwork2(message); } @@ -817,7 +815,7 @@ static int kauth_resolver_getwork2(user_addr_t message) { struct kauth_resolver_work *workp; - int error; + int error; /* * Note: We depend on the caller protecting us from a NULL work item @@ -839,8 +837,8 @@ kauth_resolver_getwork2(user_addr_t message) } /* AFTER FIELD */ if ((error = copyout(&workp->kr_work.el_info_reserved_1, - message + offsetof(struct kauth_identity_extlookup, el_info_reserved_1), - sizeof(struct kauth_identity_extlookup) - offsetof(struct kauth_identity_extlookup, el_info_reserved_1))) != 0) { + message + offsetof(struct kauth_identity_extlookup, el_info_reserved_1), + sizeof(struct kauth_identity_extlookup) - offsetof(struct kauth_identity_extlookup, el_info_reserved_1))) != 0) { KAUTH_DEBUG("RESOLVER - error submitting work to resolve"); goto out; } @@ -861,13 +859,13 @@ kauth_resolver_getwork2(user_addr_t message) error = copyin(message + offsetof(struct kauth_identity_extlookup, el_extend), &uaddr, sizeof(uaddr)); if (!error) { - size_t actual; /* not used */ + size_t actual; /* not used */ /* * Use copyoutstr() to reduce the copy size; we let * this catch a NULL uaddr because we shouldn't be * asking in that case anyway. */ - error = copyoutstr(CAST_DOWN(void *,workp->kr_extend), uaddr, MAXPATHLEN, &actual); + error = copyoutstr(CAST_DOWN(void *, workp->kr_extend), uaddr, MAXPATHLEN, &actual); } if (error) { KAUTH_DEBUG("RESOLVER - error submitting work to resolve"); @@ -881,7 +879,7 @@ kauth_resolver_getwork2(user_addr_t message) out: KAUTH_RESOLVER_UNLOCK(); - return(error); + return error; } @@ -908,7 +906,7 @@ static int kauth_resolver_getwork(user_addr_t message) { struct kauth_resolver_work *workp; - int error; + int error; KAUTH_RESOLVER_LOCK(); error = 0; @@ -927,7 +925,7 @@ kauth_resolver_getwork(user_addr_t message) printf("external resolver died"); error = KAUTH_RESOLVER_FAILED_ERRCODE; } - return(error); + return error; } return kauth_resolver_getwork2(message); } @@ -947,7 +945,7 @@ kauth_resolver_getwork(user_addr_t message) static int kauth_resolver_complete(user_addr_t message) { - struct kauth_identity_extlookup extl; + struct kauth_identity_extlookup extl; struct kauth_resolver_work *workp; struct kauth_resolver_work *killp; int error, result, want_extend_data; @@ -958,7 +956,7 @@ kauth_resolver_complete(user_addr_t message) */ if ((error = copyin(message, &extl, sizeof(extl))) != 0) { KAUTH_DEBUG("RESOLVER - error getting completed work\n"); - return(error); + return error; } KAUTH_RESOLVER_LOCK(); @@ -996,9 +994,9 @@ kauth_resolver_complete(user_addr_t message) kauth_resolver_registered = 0; TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link) - wakeup(killp); + wakeup(killp); TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link) - wakeup(killp); + wakeup(killp); /* Cause all waiting-for-work threads to return EIO */ wakeup((caddr_t)&kauth_resolver_unsubmitted); /* and return EIO to the caller */ @@ -1037,7 +1035,7 @@ kauth_resolver_complete(user_addr_t message) /* * Do we want extend_data? */ - want_extend_data = (workp->kr_work.el_flags & (KAUTH_EXTLOOKUP_WANT_PWNAM|KAUTH_EXTLOOKUP_WANT_GRNAM)); + want_extend_data = (workp->kr_work.el_flags & (KAUTH_EXTLOOKUP_WANT_PWNAM | KAUTH_EXTLOOKUP_WANT_GRNAM)); /* * Get the request of the submitted queue so @@ -1080,18 +1078,18 @@ kauth_resolver_complete(user_addr_t message) * part of a user's address space if they return * flags that mismatch the original request's flags. */ - if (want_extend_data && (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM))) { - size_t actual; /* notused */ + if (want_extend_data && (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM | KAUTH_EXTLOOKUP_VALID_GRNAM))) { + size_t actual; /* notused */ KAUTH_RESOLVER_UNLOCK(); error = copyinstr(extl.el_extend, CAST_DOWN(void *, workp->kr_extend), MAXPATHLEN, &actual); KAUTH_DEBUG("RESOLVER - resolver got name :%*s: len = %d\n", (int)actual, - actual ? "null" : (char *)extl.el_extend, actual); + actual ? "null" : (char *)extl.el_extend, actual); KAUTH_RESOLVER_LOCK(); - } else if (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) { + } else if (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM | KAUTH_EXTLOOKUP_VALID_GRNAM)) { error = EFAULT; KAUTH_DEBUG("RESOLVER - resolver returned mismatching extension flags (%d), request contained (%d)", - extl.el_flags, request_flags); + extl.el_flags, request_flags); } /* @@ -1109,8 +1107,8 @@ kauth_resolver_complete(user_addr_t message) * timed out the work record will be gone. */ KAUTH_RESOLVER_UNLOCK(); - - return(error); + + return error; } #endif /* CONFIG_EXT_RESOLVER */ @@ -1119,12 +1117,12 @@ kauth_resolver_complete(user_addr_t message) * Identity cache. */ -#define KI_VALID_UID (1<<0) /* UID and GID are mutually exclusive */ -#define KI_VALID_GID (1<<1) -#define KI_VALID_GUID (1<<2) -#define KI_VALID_NTSID (1<<3) -#define KI_VALID_PWNAM (1<<4) /* Used for translation */ -#define KI_VALID_GRNAM (1<<5) /* Used for translation */ +#define KI_VALID_UID (1<<0) /* UID and GID are mutually exclusive */ +#define KI_VALID_GID (1<<1) +#define KI_VALID_GUID (1<<2) +#define KI_VALID_NTSID (1<<3) +#define KI_VALID_PWNAM (1<<4) /* Used for translation */ +#define KI_VALID_GRNAM (1<<5) /* Used for translation */ #define KI_VALID_GROUPS (1<<6) #if CONFIG_EXT_RESOLVER @@ -1148,7 +1146,7 @@ void kauth_identity_init(void) { TAILQ_INIT(&kauth_identities); - kauth_identity_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0/*LCK_ATTR_NULL*/); + kauth_identity_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0 /*LCK_ATTR_NULL*/); } @@ -1171,11 +1169,11 @@ kauth_identity_init(void) */ static struct kauth_identity * kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, - ntsid_t *ntsidp, time_t ntsid_expiry, int supgrpcnt, gid_t *supgrps, time_t groups_expiry, - const char *name, int nametype) + ntsid_t *ntsidp, time_t ntsid_expiry, int supgrpcnt, gid_t *supgrps, time_t groups_expiry, + const char *name, int nametype) { struct kauth_identity *kip; - + /* get and fill in a new identity */ MALLOC(kip, struct kauth_identity *, sizeof(*kip), M_KAUTH, M_WAITOK | M_ZERO); if (kip != NULL) { @@ -1184,8 +1182,9 @@ kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, kip->ki_valid = KI_VALID_GID; } if (uid != KAUTH_UID_NONE) { - if (kip->ki_valid & KI_VALID_GID) + if (kip->ki_valid & KI_VALID_GID) { panic("can't allocate kauth identity with both uid and gid"); + } kip->ki_uid = uid; kip->ki_valid = KI_VALID_UID; } @@ -1200,8 +1199,9 @@ kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, if ((supgrpcnt < 0) || (supgrpcnt > NGROUPS) || (supgrps == NULL)) { return NULL; } - if (kip->ki_valid & KI_VALID_GID) + if (kip->ki_valid & KI_VALID_GID) { panic("can't allocate kauth identity with both gid and supplementary groups"); + } kip->ki_supgrpcnt = supgrpcnt; memcpy(kip->ki_supgrps, supgrps, sizeof(supgrps[0]) * supgrpcnt); kip->ki_valid |= KI_VALID_GROUPS; @@ -1222,7 +1222,7 @@ kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry, kip->ki_valid |= nametype; } } - return(kip); + return kip; } @@ -1252,19 +1252,22 @@ kauth_identity_register_and_free(struct kauth_identity *kip) ip = NULL; KAUTH_IDENTITY_LOCK(); if (kip->ki_valid & KI_VALID_UID) { - if (kip->ki_valid & KI_VALID_GID) + if (kip->ki_valid & KI_VALID_GID) { panic("kauth_identity: can't insert record with both UID and GID as key"); + } TAILQ_FOREACH(ip, &kauth_identities, ki_link) - if ((ip->ki_valid & KI_VALID_UID) && (ip->ki_uid == kip->ki_uid)) - break; + if ((ip->ki_valid & KI_VALID_UID) && (ip->ki_uid == kip->ki_uid)) { + break; + } } else if (kip->ki_valid & KI_VALID_GID) { TAILQ_FOREACH(ip, &kauth_identities, ki_link) - if ((ip->ki_valid & KI_VALID_GID) && (ip->ki_gid == kip->ki_gid)) - break; + if ((ip->ki_valid & KI_VALID_GID) && (ip->ki_gid == kip->ki_gid)) { + break; + } } else { panic("kauth_identity: can't insert record without UID or GID as key"); } - + if (ip != NULL) { /* we already have an entry, merge/overwrite */ if (kip->ki_valid & KI_VALID_GUID) { @@ -1281,8 +1284,9 @@ kauth_identity_register_and_free(struct kauth_identity *kip) if (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM)) { /* if there's an old one, discard it */ const char *oname = NULL; - if (ip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM)) + if (ip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM)) { oname = ip->ki_name; + } ip->ki_name = kip->ki_name; kip->ki_name = oname; } @@ -1304,8 +1308,9 @@ kauth_identity_register_and_free(struct kauth_identity *kip) /* have to drop lock before freeing expired entry (it may be in use) */ if (ip != NULL) { /* if the ki_name field is used, clear it first */ - if (ip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM)) + if (ip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM)) { vfs_removename(ip->ki_name); + } /* free the expired entry */ FREE(ip, M_KAUTH); } @@ -1358,10 +1363,10 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id * over the allocation later. */ if (elp->el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM | KAUTH_EXTLOOKUP_VALID_GRNAM)) { - const char *tmp = CAST_DOWN(const char *,extend_data); + const char *tmp = CAST_DOWN(const char *, extend_data); speculative_name = vfs_addname(tmp, strnlen(tmp, MAXPATHLEN - 1), 0, 0); } - + /* user identity? */ if (elp->el_flags & KAUTH_EXTLOOKUP_VALID_UID) { KAUTH_IDENTITY_LOCK(); @@ -1372,7 +1377,7 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id assert(elp->el_sup_grp_cnt <= NGROUPS); if (elp->el_sup_grp_cnt > NGROUPS) { KAUTH_DEBUG("CACHE - invalid sup_grp_cnt provided (%d), truncating to %d", - elp->el_sup_grp_cnt, NGROUPS); + elp->el_sup_grp_cnt, NGROUPS); elp->el_sup_grp_cnt = NGROUPS; } kip->ki_supgrpcnt = elp->el_sup_grp_cnt; @@ -1404,8 +1409,9 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id } } kauth_identity_lru(kip); - if (rkip != NULL) + if (rkip != NULL) { *rkip = *kip; + } KAUTH_DEBUG("CACHE - refreshed %d is " K_UUID_FMT, kip->ki_uid, K_UUID_ARG(kip->ki_guid)); break; } @@ -1424,10 +1430,12 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id (elp->el_flags & KAUTH_EXTLOOKUP_VALID_PWNAM) ? speculative_name : NULL, KI_VALID_PWNAM); if (kip != NULL) { - if (rkip != NULL) + if (rkip != NULL) { *rkip = *kip; - if (elp->el_flags & KAUTH_EXTLOOKUP_VALID_PWNAM) + } + if (elp->el_flags & KAUTH_EXTLOOKUP_VALID_PWNAM) { speculative_name = NULL; + } KAUTH_DEBUG("CACHE - learned %d is " K_UUID_FMT, kip->ki_uid, K_UUID_ARG(kip->ki_guid)); kauth_identity_register_and_free(kip); } @@ -1464,8 +1472,9 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id } } kauth_identity_lru(kip); - if (rkip != NULL) + if (rkip != NULL) { *rkip = *kip; + } KAUTH_DEBUG("CACHE - refreshed %d is " K_UUID_FMT, kip->ki_uid, K_UUID_ARG(kip->ki_guid)); break; } @@ -1484,10 +1493,12 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id (elp->el_flags & KAUTH_EXTLOOKUP_VALID_GRNAM) ? speculative_name : NULL, KI_VALID_GRNAM); if (kip != NULL) { - if (rkip != NULL) + if (rkip != NULL) { *rkip = *kip; - if (elp->el_flags & KAUTH_EXTLOOKUP_VALID_GRNAM) + } + if (elp->el_flags & KAUTH_EXTLOOKUP_VALID_GRNAM) { speculative_name = NULL; + } KAUTH_DEBUG("CACHE - learned %d is " K_UUID_FMT, kip->ki_uid, K_UUID_ARG(kip->ki_guid)); kauth_identity_register_and_free(kip); } @@ -1507,11 +1518,12 @@ kauth_identity_updatecache(struct kauth_identity_extlookup *elp, struct kauth_id * Must be called with the identity cache lock held. */ static void -kauth_identity_trimcache(int newsize) { - struct kauth_identity *kip; - +kauth_identity_trimcache(int newsize) +{ + struct kauth_identity *kip; + lck_mtx_assert(kauth_identity_mtx, LCK_MTX_ASSERT_OWNED); - + while (kauth_identity_count > newsize) { kip = TAILQ_LAST(&kauth_identities, kauth_identity_head); TAILQ_REMOVE(&kauth_identities, kip, ki_link); @@ -1566,13 +1578,14 @@ kauth_identity_guid_expired(struct kauth_identity *kip) /* * Expiration time of 0 means this entry is persistent. */ - if (kip->ki_guid_expiry == 0) - return (0); + if (kip->ki_guid_expiry == 0) { + return 0; + } microuptime(&tv); KAUTH_DEBUG("CACHE - GUID expires @ %ld now %ld", kip->ki_guid_expiry, tv.tv_sec); - return((kip->ki_guid_expiry <= tv.tv_sec) ? 1 : 0); + return (kip->ki_guid_expiry <= tv.tv_sec) ? 1 : 0; } @@ -1595,13 +1608,14 @@ kauth_identity_ntsid_expired(struct kauth_identity *kip) /* * Expiration time of 0 means this entry is persistent. */ - if (kip->ki_ntsid_expiry == 0) - return (0); + if (kip->ki_ntsid_expiry == 0) { + return 0; + } microuptime(&tv); KAUTH_DEBUG("CACHE - NTSID expires @ %ld now %ld", kip->ki_ntsid_expiry, tv.tv_sec); - return((kip->ki_ntsid_expiry <= tv.tv_sec) ? 1 : 0); + return (kip->ki_ntsid_expiry <= tv.tv_sec) ? 1 : 0; } /* @@ -1623,13 +1637,14 @@ kauth_identity_groups_expired(struct kauth_identity *kip) /* * Expiration time of 0 means this entry is persistent. */ - if (kip->ki_groups_expiry == 0) - return (0); + if (kip->ki_groups_expiry == 0) { + return 0; + } microuptime(&tv); KAUTH_DEBUG("CACHE - GROUPS expires @ %ld now %ld\n", kip->ki_groups_expiry, tv.tv_sec); - return((kip->ki_groups_expiry <= tv.tv_sec) ? 1 : 0); + return (kip->ki_groups_expiry <= tv.tv_sec) ? 1 : 0; } /* @@ -1659,13 +1674,14 @@ kauth_identity_find_uid(uid_t uid, struct kauth_identity *kir, char *getname) /* Copy via structure assignment */ *kir = *kip; /* If a name is wanted and one exists, copy it out */ - if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) + if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) { strlcpy(getname, kip->ki_name, MAXPATHLEN); + } break; } } KAUTH_IDENTITY_UNLOCK(); - return((kip == NULL) ? ENOENT : 0); + return (kip == NULL) ? ENOENT : 0; } @@ -1696,13 +1712,14 @@ kauth_identity_find_gid(uid_t gid, struct kauth_identity *kir, char *getname) /* Copy via structure assignment */ *kir = *kip; /* If a name is wanted and one exists, copy it out */ - if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) + if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) { strlcpy(getname, kip->ki_name, MAXPATHLEN); + } break; } } KAUTH_IDENTITY_UNLOCK(); - return((kip == NULL) ? ENOENT : 0); + return (kip == NULL) ? ENOENT : 0; } @@ -1736,13 +1753,14 @@ kauth_identity_find_guid(guid_t *guidp, struct kauth_identity *kir, char *getnam /* Copy via structure assignment */ *kir = *kip; /* If a name is wanted and one exists, copy it out */ - if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) + if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) { strlcpy(getname, kip->ki_name, MAXPATHLEN); + } break; } } KAUTH_IDENTITY_UNLOCK(); - return((kip == NULL) ? ENOENT : 0); + return (kip == NULL) ? ENOENT : 0; } /* @@ -1775,7 +1793,7 @@ kauth_identity_find_nam(char *name, int valid, struct kauth_identity *kir) } } KAUTH_IDENTITY_UNLOCK(); - return((kip == NULL) ? ENOENT : 0); + return (kip == NULL) ? ENOENT : 0; } @@ -1809,15 +1827,16 @@ kauth_identity_find_ntsid(ntsid_t *ntsid, struct kauth_identity *kir, char *getn /* Copy via structure assignment */ *kir = *kip; /* If a name is wanted and one exists, copy it out */ - if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) + if (getname != NULL && (kip->ki_valid & (KI_VALID_PWNAM | KI_VALID_GRNAM))) { strlcpy(getname, kip->ki_name, MAXPATHLEN); + } break; } } KAUTH_IDENTITY_UNLOCK(); - return((kip == NULL) ? ENOENT : 0); + return (kip == NULL) ? ENOENT : 0; } -#endif /* CONFIG_EXT_RESOLVER */ +#endif /* CONFIG_EXT_RESOLVER */ /* @@ -1840,7 +1859,7 @@ guid_t kauth_null_guid; int kauth_guid_equal(guid_t *guid1, guid_t *guid2) { - return(bcmp(guid1, guid2, sizeof(*guid1)) == 0); + return bcmp(guid1, guid2, sizeof(*guid1)) == 0; } @@ -1860,8 +1879,8 @@ kauth_guid_equal(guid_t *guid1, guid_t *guid2) int kauth_wellknown_guid(guid_t *guid) { - static char fingerprint[] = {0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef}; - uint32_t code; + static char fingerprint[] = {0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef}; + uint32_t code; /* * All WKGs begin with the same 12 bytes. */ @@ -1870,25 +1889,25 @@ kauth_wellknown_guid(guid_t *guid) * The final 4 bytes are our code (in network byte order). */ code = OSSwapHostToBigInt32(*(uint32_t *)&guid->g_guid[12]); - switch(code) { + switch (code) { case 0x0000000c: - return(KAUTH_WKG_EVERYBODY); + return KAUTH_WKG_EVERYBODY; case 0xfffffffe: - return(KAUTH_WKG_NOBODY); + return KAUTH_WKG_NOBODY; case 0x0000000a: - return(KAUTH_WKG_OWNER); + return KAUTH_WKG_OWNER; case 0x00000010: - return(KAUTH_WKG_GROUP); + return KAUTH_WKG_GROUP; } } - return(KAUTH_WKG_NOT); + return KAUTH_WKG_NOT; } /* * kauth_ntsid_equal * - * Description: Determine the equality of two NTSIDs (NT Security Identifiers) + * Description: Determine the equality of two NTSIDs (NT Security Identifiers) * * Parameters: sid1 Pointer to first NTSID * sid2 Pointer to second NTSID @@ -1902,9 +1921,10 @@ kauth_ntsid_equal(ntsid_t *sid1, ntsid_t *sid2) /* check sizes for equality, also sanity-check size while we're at it */ if ((KAUTH_NTSID_SIZE(sid1) == KAUTH_NTSID_SIZE(sid2)) && (KAUTH_NTSID_SIZE(sid1) <= sizeof(*sid1)) && - bcmp(sid1, sid2, KAUTH_NTSID_SIZE(sid1)) == 0) - return(1); - return(0); + bcmp(sid1, sid2, KAUTH_NTSID_SIZE(sid1)) == 0) { + return 1; + } + return 0; } @@ -1949,12 +1969,12 @@ kauth_ntsid_equal(ntsid_t *sid1, ntsid_t *sid2) static int kauth_cred_change_egid(kauth_cred_t cred, gid_t new_egid) { - int i; - int displaced = 1; + int i; + int displaced = 1; #if radar_4600026 - int is_member; -#endif /* radar_4600026 */ - gid_t old_egid = kauth_cred_getgid(cred); + int is_member; +#endif /* radar_4600026 */ + gid_t old_egid = kauth_cred_getgid(cred); posix_cred_t pcred = posix_cred_get(cred); /* Ignoring the first entry, scan for a match for the new egid */ @@ -1975,12 +1995,12 @@ kauth_cred_change_egid(kauth_cred_t cred, gid_t new_egid) #error Fix radar 4600026 first!!! /* -This is correct for memberd behaviour, but incorrect for POSIX; to address -this, we would need to automatically opt-out any SUID/SGID binary, and force -it to use initgroups to opt back in. We take the approach of considering it -opt'ed out in any group of 16 displacement instead, since it's a much more -conservative approach (i.e. less likely to cause things to break). -*/ + * This is correct for memberd behaviour, but incorrect for POSIX; to address + * this, we would need to automatically opt-out any SUID/SGID binary, and force + * it to use initgroups to opt back in. We take the approach of considering it + * opt'ed out in any group of 16 displacement instead, since it's a much more + * conservative approach (i.e. less likely to cause things to break). + */ /* * If we displaced a member of the supplementary groups list of the @@ -1993,15 +2013,15 @@ conservative approach (i.e. less likely to cause things to break). if (displaced && !(pcred->cr_flags & CRF_NOMEMBERD) && kauth_cred_ismember_gid(cred, new_egid, &is_member) == 0 && is_member) { - displaced = 0; + displaced = 0; DEBUG_CRED_CHANGE("kauth_cred_change_egid: reset displaced\n"); } -#endif /* radar_4600026 */ +#endif /* radar_4600026 */ /* set the new EGID into the old spot */ pcred->cr_groups[0] = new_egid; - return (displaced); + return displaced; } @@ -2018,7 +2038,7 @@ uid_t kauth_cred_getuid(kauth_cred_t cred) { NULLCRED_CHECK(cred); - return(posix_cred_get(cred)->cr_uid); + return posix_cred_get(cred)->cr_uid; } @@ -2035,7 +2055,7 @@ uid_t kauth_cred_getruid(kauth_cred_t cred) { NULLCRED_CHECK(cred); - return(posix_cred_get(cred)->cr_ruid); + return posix_cred_get(cred)->cr_ruid; } @@ -2052,7 +2072,7 @@ uid_t kauth_cred_getsvuid(kauth_cred_t cred) { NULLCRED_CHECK(cred); - return(posix_cred_get(cred)->cr_svuid); + return posix_cred_get(cred)->cr_svuid; } @@ -2069,7 +2089,7 @@ gid_t kauth_cred_getgid(kauth_cred_t cred) { NULLCRED_CHECK(cred); - return(posix_cred_get(cred)->cr_gid); + return posix_cred_get(cred)->cr_gid; } @@ -2086,7 +2106,7 @@ gid_t kauth_cred_getrgid(kauth_cred_t cred) { NULLCRED_CHECK(cred); - return(posix_cred_get(cred)->cr_rgid); + return posix_cred_get(cred)->cr_rgid; } @@ -2103,11 +2123,11 @@ gid_t kauth_cred_getsvgid(kauth_cred_t cred) { NULLCRED_CHECK(cred); - return(posix_cred_get(cred)->cr_svgid); + return posix_cred_get(cred)->cr_svgid; } -static int kauth_cred_cache_lookup(int from, int to, void *src, void *dst); +static int kauth_cred_cache_lookup(int from, int to, void *src, void *dst); #if CONFIG_EXT_RESOLVER == 0 /* @@ -2119,7 +2139,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) /* NB: These must match the definitions used by Libinfo's mbr_identifier_translate(). */ static const uuid_t _user_compat_prefix = {0xff, 0xff, 0xee, 0xee, 0xdd, 0xdd, 0xcc, 0xcc, 0xbb, 0xbb, 0xaa, 0xaa, 0x00, 0x00, 0x00, 0x00}; static const uuid_t _group_compat_prefix = {0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0xab, 0xcd, 0xef, 0x00, 0x00, 0x00, 0x00}; -#define COMPAT_PREFIX_LEN (sizeof(uuid_t) - sizeof(id_t)) +#define COMPAT_PREFIX_LEN (sizeof(uuid_t) - sizeof(id_t)) assert(from != to); @@ -2131,7 +2151,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) uint8_t *uu = dst; memcpy(uu, _user_compat_prefix, sizeof(_user_compat_prefix)); memcpy(&uu[COMPAT_PREFIX_LEN], &uid, sizeof(uid)); - return (0); + return 0; } break; } @@ -2142,7 +2162,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) uint8_t *uu = dst; memcpy(uu, _group_compat_prefix, sizeof(_group_compat_prefix)); memcpy(&uu[COMPAT_PREFIX_LEN], &gid, sizeof(gid)); - return (0); + return 0; } break; } @@ -2154,14 +2174,14 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) id_t uid; memcpy(&uid, &uu[COMPAT_PREFIX_LEN], sizeof(uid)); *(id_t *)dst = ntohl(uid); - return (0); + return 0; } } else if (to == KI_VALID_GID) { if (memcmp(uu, _group_compat_prefix, COMPAT_PREFIX_LEN) == 0) { id_t gid; memcpy(&gid, &uu[COMPAT_PREFIX_LEN], sizeof(gid)); *(id_t *)dst = ntohl(gid); - return (0); + return 0; } } break; @@ -2170,13 +2190,13 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) /* NOT IMPLEMENTED */ break; } - return (ENOENT); + return ENOENT; } #endif #if defined(CONFIG_EXT_RESOLVER) && (CONFIG_EXT_RESOLVER) /* - * Structure to hold supplemental groups. Used for impedance matching with + * Structure to hold supplemental groups. Used for impedance matching with * kauth_cred_cache_lookup below. */ struct supgroups { @@ -2212,7 +2232,7 @@ kauth_cred_uid2groups(uid_t *uid, gid_t *groups, int *gcount) rv = kauth_cred_cache_lookup(KI_VALID_UID, KI_VALID_GROUPS, uid, &supgroups); - return (rv); + return rv; } #endif @@ -2235,7 +2255,7 @@ kauth_cred_uid2groups(uid_t *uid, gid_t *groups, int *gcount) int kauth_cred_guid2pwnam(guid_t *guidp, char *pwnam) { - return(kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_PWNAM, guidp, pwnam)); + return kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_PWNAM, guidp, pwnam); } @@ -2258,7 +2278,7 @@ kauth_cred_guid2pwnam(guid_t *guidp, char *pwnam) int kauth_cred_guid2grnam(guid_t *guidp, char *grnam) { - return(kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_GRNAM, guidp, grnam)); + return kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_GRNAM, guidp, grnam); } @@ -2282,7 +2302,7 @@ kauth_cred_guid2grnam(guid_t *guidp, char *grnam) int kauth_cred_pwnam2guid(char *pwnam, guid_t *guidp) { - return(kauth_cred_cache_lookup(KI_VALID_PWNAM, KI_VALID_GUID, pwnam, guidp)); + return kauth_cred_cache_lookup(KI_VALID_PWNAM, KI_VALID_GUID, pwnam, guidp); } @@ -2306,7 +2326,7 @@ kauth_cred_pwnam2guid(char *pwnam, guid_t *guidp) int kauth_cred_grnam2guid(char *grnam, guid_t *guidp) { - return(kauth_cred_cache_lookup(KI_VALID_GRNAM, KI_VALID_GUID, grnam, guidp)); + return kauth_cred_cache_lookup(KI_VALID_GRNAM, KI_VALID_GUID, grnam, guidp); } @@ -2327,7 +2347,7 @@ kauth_cred_grnam2guid(char *grnam, guid_t *guidp) int kauth_cred_guid2uid(guid_t *guidp, uid_t *uidp) { - return(kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_UID, guidp, uidp)); + return kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_UID, guidp, uidp); } @@ -2348,7 +2368,7 @@ kauth_cred_guid2uid(guid_t *guidp, uid_t *uidp) int kauth_cred_guid2gid(guid_t *guidp, gid_t *gidp) { - return(kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_GID, guidp, gidp)); + return kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_GID, guidp, gidp); } /* @@ -2368,7 +2388,7 @@ kauth_cred_guid2gid(guid_t *guidp, gid_t *gidp) int kauth_cred_nfs4domain2dsnode(__unused char *nfs4domain, __unused char *dsnode) { - return(ENOENT); + return ENOENT; } /* @@ -2388,7 +2408,7 @@ kauth_cred_nfs4domain2dsnode(__unused char *nfs4domain, __unused char *dsnode) int kauth_cred_dsnode2nfs4domain(__unused char *dsnode, __unused char *nfs4domain) { - return(ENOENT); + return ENOENT; } /* @@ -2408,7 +2428,7 @@ kauth_cred_dsnode2nfs4domain(__unused char *dsnode, __unused char *nfs4domain) int kauth_cred_ntsid2uid(ntsid_t *sidp, uid_t *uidp) { - return(kauth_cred_cache_lookup(KI_VALID_NTSID, KI_VALID_UID, sidp, uidp)); + return kauth_cred_cache_lookup(KI_VALID_NTSID, KI_VALID_UID, sidp, uidp); } @@ -2429,7 +2449,7 @@ kauth_cred_ntsid2uid(ntsid_t *sidp, uid_t *uidp) int kauth_cred_ntsid2gid(ntsid_t *sidp, gid_t *gidp) { - return(kauth_cred_cache_lookup(KI_VALID_NTSID, KI_VALID_GID, sidp, gidp)); + return kauth_cred_cache_lookup(KI_VALID_NTSID, KI_VALID_GID, sidp, gidp); } @@ -2450,7 +2470,7 @@ kauth_cred_ntsid2gid(ntsid_t *sidp, gid_t *gidp) int kauth_cred_ntsid2guid(ntsid_t *sidp, guid_t *guidp) { - return(kauth_cred_cache_lookup(KI_VALID_NTSID, KI_VALID_GUID, sidp, guidp)); + return kauth_cred_cache_lookup(KI_VALID_NTSID, KI_VALID_GUID, sidp, guidp); } @@ -2471,7 +2491,7 @@ kauth_cred_ntsid2guid(ntsid_t *sidp, guid_t *guidp) int kauth_cred_uid2guid(uid_t uid, guid_t *guidp) { - return(kauth_cred_cache_lookup(KI_VALID_UID, KI_VALID_GUID, &uid, guidp)); + return kauth_cred_cache_lookup(KI_VALID_UID, KI_VALID_GUID, &uid, guidp); } @@ -2493,7 +2513,7 @@ int kauth_cred_getguid(kauth_cred_t cred, guid_t *guidp) { NULLCRED_CHECK(cred); - return(kauth_cred_uid2guid(kauth_cred_getuid(cred), guidp)); + return kauth_cred_uid2guid(kauth_cred_getuid(cred), guidp); } @@ -2514,7 +2534,7 @@ kauth_cred_getguid(kauth_cred_t cred, guid_t *guidp) int kauth_cred_gid2guid(gid_t gid, guid_t *guidp) { - return(kauth_cred_cache_lookup(KI_VALID_GID, KI_VALID_GUID, &gid, guidp)); + return kauth_cred_cache_lookup(KI_VALID_GID, KI_VALID_GUID, &gid, guidp); } @@ -2535,7 +2555,7 @@ kauth_cred_gid2guid(gid_t gid, guid_t *guidp) int kauth_cred_uid2ntsid(uid_t uid, ntsid_t *sidp) { - return(kauth_cred_cache_lookup(KI_VALID_UID, KI_VALID_NTSID, &uid, sidp)); + return kauth_cred_cache_lookup(KI_VALID_UID, KI_VALID_NTSID, &uid, sidp); } @@ -2557,7 +2577,7 @@ int kauth_cred_getntsid(kauth_cred_t cred, ntsid_t *sidp) { NULLCRED_CHECK(cred); - return(kauth_cred_uid2ntsid(kauth_cred_getuid(cred), sidp)); + return kauth_cred_uid2ntsid(kauth_cred_getuid(cred), sidp); } @@ -2578,7 +2598,7 @@ kauth_cred_getntsid(kauth_cred_t cred, ntsid_t *sidp) int kauth_cred_gid2ntsid(gid_t gid, ntsid_t *sidp) { - return(kauth_cred_cache_lookup(KI_VALID_GID, KI_VALID_NTSID, &gid, sidp)); + return kauth_cred_cache_lookup(KI_VALID_GID, KI_VALID_NTSID, &gid, sidp); } @@ -2599,7 +2619,7 @@ kauth_cred_gid2ntsid(gid_t gid, ntsid_t *sidp) int kauth_cred_guid2ntsid(guid_t *guidp, ntsid_t *sidp) { - return(kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_NTSID, guidp, sidp)); + return kauth_cred_cache_lookup(KI_VALID_GUID, KI_VALID_NTSID, guidp, sidp); } @@ -2632,7 +2652,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) char *namebuf = NULL; KAUTH_DEBUG("CACHE - translate %d to %d", from, to); - + /* * Look for an existing cache entry for this association. * If the entry has not expired, return the cached information. @@ -2641,13 +2661,14 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) * atomically. */ if (to == KI_VALID_PWNAM || to == KI_VALID_GRNAM) { - if (dst == NULL) - return (EINVAL); + if (dst == NULL) { + return EINVAL; + } namebuf = dst; *namebuf = '\0'; } ki.ki_valid = 0; - switch(from) { + switch (from) { case KI_VALID_UID: error = kauth_identity_find_uid(*(uid_t *)src, &ki, namebuf); break; @@ -2666,22 +2687,23 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) error = kauth_identity_find_nam((char *)src, from, &ki); break; default: - return(EINVAL); + return EINVAL; } /* If we didn't get what we're asking for. Call the resolver */ - if (!error && !(to & ki.ki_valid)) + if (!error && !(to & ki.ki_valid)) { error = ENOENT; + } /* lookup failure or error */ if (error != 0) { /* any other error is fatal */ if (error != ENOENT) { /* XXX bogus check - this is not possible */ KAUTH_DEBUG("CACHE - cache search error %d", error); - return(error); + return error; } } else { /* found a valid cached entry, check expiry */ - switch(to) { + switch (to) { case KI_VALID_GUID: expired = kauth_identity_guid_expired; break; @@ -2692,7 +2714,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) expired = kauth_identity_groups_expired; break; default: - switch(from) { + switch (from) { case KI_VALID_GUID: expired = kauth_identity_guid_expired; break; @@ -2719,13 +2741,13 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) * get a result from the resolver, we will use it as * a better-than nothing alternative. */ - + KAUTH_DEBUG("CACHE - expired entry found"); } } else { KAUTH_DEBUG("CACHE - no expiry function"); } - + if (!expired) { /* do we have a translation? */ if (ki.ki_valid & to) { @@ -2734,26 +2756,26 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) goto found; } else { /* - * GUIDs and NTSIDs map to either a UID or a GID, but not both. - * If we went looking for a translation from GUID or NTSID and - * found a translation that wasn't for our desired type, then - * don't bother calling the resolver. We know that this + * GUIDs and NTSIDs map to either a UID or a GID, but not both. + * If we went looking for a translation from GUID or NTSID and + * found a translation that wasn't for our desired type, then + * don't bother calling the resolver. We know that this * GUID/NTSID can't translate to our desired type. */ - switch(from) { + switch (from) { case KI_VALID_GUID: case KI_VALID_NTSID: - switch(to) { + switch (to) { case KI_VALID_GID: if ((ki.ki_valid & KI_VALID_UID)) { KAUTH_DEBUG("CACHE - unexpected entry 0x%08x & %x", ki.ki_valid, KI_VALID_GID); - return (ENOENT); + return ENOENT; } break; case KI_VALID_UID: if ((ki.ki_valid & KI_VALID_GID)) { KAUTH_DEBUG("CACHE - unexpected entry 0x%08x & %x", ki.ki_valid, KI_VALID_UID); - return (ENOENT); + return ENOENT; } break; } @@ -2774,7 +2796,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) */ bzero(&el, sizeof(el)); el.el_info_pid = current_proc()->p_pid; - switch(from) { + switch (from) { case KI_VALID_UID: el.el_flags = KAUTH_EXTLOOKUP_VALID_UID; el.el_uid = *(uid_t *)src; @@ -2804,7 +2826,7 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) extend_data = CAST_USER_ADDR_T(src); break; default: - return(EINVAL); + return EINVAL; } /* * Here we ask for everything all at once, to avoid having to work @@ -2833,37 +2855,38 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) el.el_flags |= KAUTH_EXTLOOKUP_WANT_SUPGRPS; if (ki.ki_valid & KI_VALID_GROUPS) { /* - * Copy the current supplemental groups for the resolver. + * Copy the current supplemental groups for the resolver. * The resolver should check these groups first and if - * the user (uid) is still a member it should endeavor to + * the user (uid) is still a member it should endeavor to * keep them in the list. Otherwise NFS clients could get * changing access to server file system objects on each * expiration. */ if (ki.ki_supgrpcnt > NGROUPS) { panic("kauth data structure corrupted. kauth identity 0x%p with %d groups, greater than max of %d", - &ki, ki.ki_supgrpcnt, NGROUPS); + &ki, ki.ki_supgrpcnt, NGROUPS); } el.el_sup_grp_cnt = ki.ki_supgrpcnt; - memcpy(el.el_sup_groups, ki.ki_supgrps, sizeof (el.el_sup_groups[0]) * ki.ki_supgrpcnt); + memcpy(el.el_sup_groups, ki.ki_supgrps, sizeof(el.el_sup_groups[0]) * ki.ki_supgrpcnt); /* Let the resolver know these were the previous valid groups */ el.el_flags |= KAUTH_EXTLOOKUP_VALID_SUPGRPS; KAUTH_DEBUG("GROUPS: Sending previously valid GROUPS"); - } else + } else { KAUTH_DEBUG("GROUPS: no valid groups to send"); + } } /* Call resolver */ KAUTH_DEBUG("CACHE - calling resolver for %x", el.el_flags); DTRACE_PROC3(kauth__id__resolver__submitted, int, from, int, to, uintptr_t, src); - + error = kauth_resolver_submit(&el, extend_data); DTRACE_PROC2(kauth__id__resolver__returned, int, error, struct kauth_identity_extlookup *, &el) - + KAUTH_DEBUG("CACHE - resolver returned %d", error); /* was the external lookup successful? */ @@ -2875,10 +2898,11 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) * * If we came from a name, we know the extend_data is valid. */ - if (from == KI_VALID_PWNAM) + if (from == KI_VALID_PWNAM) { el.el_flags |= KAUTH_EXTLOOKUP_VALID_PWNAM; - else if (from == KI_VALID_GRNAM) + } else if (from == KI_VALID_GRNAM) { el.el_flags |= KAUTH_EXTLOOKUP_VALID_GRNAM; + } kauth_identity_updatecache(&el, &ki, extend_data); @@ -2890,14 +2914,15 @@ kauth_cred_cache_lookup(int from, int to, void *src, void *dst) error = ENOENT; } } - if (error) - return(error); + if (error) { + return error; + } found: /* * Copy from the appropriate struct kauth_identity cache entry * structure into the destination buffer area. */ - switch(to) { + switch (to) { case KI_VALID_UID: *(uid_t *)dst = ki.ki_uid; break; @@ -2911,26 +2936,26 @@ found: *(ntsid_t *)dst = ki.ki_ntsid; break; case KI_VALID_GROUPS: { - struct supgroups *gp = (struct supgroups *)dst; - u_int32_t limit = ki.ki_supgrpcnt; - - if (gp->count) { - limit = MIN(ki.ki_supgrpcnt, *gp->count); - *gp->count = limit; - } - - memcpy(gp->groups, ki.ki_supgrps, sizeof(gid_t) * limit); + struct supgroups *gp = (struct supgroups *)dst; + u_int32_t limit = ki.ki_supgrpcnt; + + if (gp->count) { + limit = MIN(ki.ki_supgrpcnt, *gp->count); + *gp->count = limit; } - break; + + memcpy(gp->groups, ki.ki_supgrps, sizeof(gid_t) * limit); + } + break; case KI_VALID_PWNAM: case KI_VALID_GRNAM: /* handled in kauth_resolver_complete() */ break; default: - return(EINVAL); + return EINVAL; } KAUTH_DEBUG("CACHE - returned successfully"); - return(0); + return 0; } @@ -2959,7 +2984,7 @@ void kauth_groups_init(void) { TAILQ_INIT(&kauth_groups); - kauth_groups_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0/*LCK_ATTR_NULL*/); + kauth_groups_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0 /*LCK_ATTR_NULL*/); } @@ -2982,12 +3007,13 @@ kauth_groups_expired(struct kauth_group_membership *gm) /* * Expiration time of 0 means this entry is persistent. */ - if (gm->gm_expiry == 0) - return (0); - + if (gm->gm_expiry == 0) { + return 0; + } + microuptime(&tv); - - return((gm->gm_expiry <= tv.tv_sec) ? 1 : 0); + + return (gm->gm_expiry <= tv.tv_sec) ? 1 : 0; } @@ -3036,12 +3062,13 @@ kauth_groups_updatecache(struct kauth_identity_extlookup *el) { struct kauth_group_membership *gm; struct timeval tv; - + /* need a valid response if we are to cache anything */ if ((el->el_flags & - (KAUTH_EXTLOOKUP_VALID_UID | KAUTH_EXTLOOKUP_VALID_GID | KAUTH_EXTLOOKUP_VALID_MEMBERSHIP)) != - (KAUTH_EXTLOOKUP_VALID_UID | KAUTH_EXTLOOKUP_VALID_GID | KAUTH_EXTLOOKUP_VALID_MEMBERSHIP)) + (KAUTH_EXTLOOKUP_VALID_UID | KAUTH_EXTLOOKUP_VALID_GID | KAUTH_EXTLOOKUP_VALID_MEMBERSHIP)) != + (KAUTH_EXTLOOKUP_VALID_UID | KAUTH_EXTLOOKUP_VALID_GID | KAUTH_EXTLOOKUP_VALID_MEMBERSHIP)) { return; + } microuptime(&tv); @@ -3066,8 +3093,9 @@ kauth_groups_updatecache(struct kauth_identity_extlookup *el) KAUTH_GROUPS_UNLOCK(); /* if we found an entry to update, stop here */ - if (gm != NULL) + if (gm != NULL) { return; + } /* allocate a new record */ MALLOC(gm, struct kauth_group_membership *, sizeof(*gm), M_KAUTH, M_WAITOK); @@ -3080,7 +3108,7 @@ kauth_groups_updatecache(struct kauth_identity_extlookup *el) gm->gm_flags &= ~KAUTH_GROUP_ISMEMBER; } gm->gm_expiry = (el->el_member_valid) ? el->el_member_valid + tv.tv_sec : 0; - } + } /* * Insert the new entry. Note that it's possible to race ourselves @@ -3100,8 +3128,9 @@ kauth_groups_updatecache(struct kauth_identity_extlookup *el) KAUTH_GROUPS_UNLOCK(); /* free expired cache entry */ - if (gm != NULL) + if (gm != NULL) { FREE(gm, M_KAUTH); + } } /* @@ -3110,11 +3139,12 @@ kauth_groups_updatecache(struct kauth_identity_extlookup *el) * Must be called with the group cache lock held. */ static void -kauth_groups_trimcache(int new_size) { - struct kauth_group_membership *gm; +kauth_groups_trimcache(int new_size) +{ + struct kauth_group_membership *gm; lck_mtx_assert(kauth_groups_mtx, LCK_MTX_ASSERT_OWNED); - + while (kauth_groups_count > new_size) { gm = TAILQ_LAST(&kauth_groups, kauth_groups_head); TAILQ_REMOVE(&kauth_groups, gm, gm_link); @@ -3122,7 +3152,7 @@ kauth_groups_trimcache(int new_size) { FREE(gm, M_KAUTH); } } -#endif /* CONFIG_EXT_RESOLVER */ +#endif /* CONFIG_EXT_RESOLVER */ /* * Group membership KPI @@ -3174,7 +3204,7 @@ kauth_cred_ismember_gid(kauth_cred_t cred, gid_t gid, int *resultp) for (i = 0; i < pcred->cr_ngroups; i++) { if (gid == pcred->cr_groups[i]) { *resultp = 1; - return(0); + return 0; } } @@ -3184,9 +3214,9 @@ kauth_cred_ismember_gid(kauth_cred_t cred, gid_t gid, int *resultp) */ if (pcred->cr_gmuid == KAUTH_UID_NONE) { *resultp = 0; - return(0); + return 0; } - + #if CONFIG_EXT_RESOLVER struct kauth_group_membership *gm; struct kauth_identity_extlookup el; @@ -3198,7 +3228,7 @@ kauth_cred_ismember_gid(kauth_cred_t cred, gid_t gid, int *resultp) */ if (!kauth_resolver_registered) { *resultp = 0; - return(0); + return 0; } /* TODO: */ @@ -3218,14 +3248,15 @@ kauth_cred_ismember_gid(kauth_cred_t cred, gid_t gid, int *resultp) } /* did we find a membership entry? */ - if (gm != NULL) + if (gm != NULL) { *resultp = (gm->gm_flags & KAUTH_GROUP_ISMEMBER) ? 1 : 0; + } KAUTH_GROUPS_UNLOCK(); /* if we did, we can return now */ if (gm != NULL) { DTRACE_PROC2(kauth__group__cache__hit, int, pcred->cr_gmuid, int, gid); - return(0); + return 0; } /* nothing in the cache, need to go to userland */ @@ -3234,29 +3265,30 @@ kauth_cred_ismember_gid(kauth_cred_t cred, gid_t gid, int *resultp) el.el_flags = KAUTH_EXTLOOKUP_VALID_UID | KAUTH_EXTLOOKUP_VALID_GID | KAUTH_EXTLOOKUP_WANT_MEMBERSHIP; el.el_uid = pcred->cr_gmuid; el.el_gid = gid; - el.el_member_valid = 0; /* XXX set by resolver? */ + el.el_member_valid = 0; /* XXX set by resolver? */ DTRACE_PROC2(kauth__group__resolver__submitted, int, el.el_uid, int, el.el_gid); - + error = kauth_resolver_submit(&el, 0ULL); - + DTRACE_PROC2(kauth__group__resolver__returned, int, error, int, el.el_flags); - - if (error != 0) - return(error); + + if (error != 0) { + return error; + } /* save the results from the lookup */ kauth_groups_updatecache(&el); /* if we successfully ascertained membership, report */ if (el.el_flags & KAUTH_EXTLOOKUP_VALID_MEMBERSHIP) { *resultp = (el.el_flags & KAUTH_EXTLOOKUP_ISMEMBER) ? 1 : 0; - return(0); + return 0; } - return(ENOENT); + return ENOENT; #else *resultp = 0; - return(0); + return 0; #endif } @@ -3327,7 +3359,7 @@ kauth_cred_ismember_guid(__unused kauth_cred_t cred, guid_t *guidp, int *resultp } if (ki.ki_valid & KI_VALID_UID) { *resultp = 0; - return (0); + return 0; } } #endif /* CONFIG_EXT_RESOLVER */ @@ -3348,24 +3380,24 @@ kauth_cred_ismember_guid(__unused kauth_cred_t cred, guid_t *guidp, int *resultp } } else { #if CONFIG_EXT_RESOLVER - do_check: +do_check: #endif /* CONFIG_EXT_RESOLVER */ error = kauth_cred_ismember_gid(cred, gid, resultp); } } - break; + break; } - return(error); + return error; } /* * kauth_cred_gid_subset * - * Description: Given two credentials, determine if all GIDs associated with - * the first are also associated with the second + * Description: Given two credentials, determine if all GIDs associated with + * the first are also associated with the second * * Parameters: cred1 Credential to check for - * cred2 Credential to check in + * cred2 Credential to check in * resultp Pointer to int to contain the * result of the call * @@ -3380,7 +3412,7 @@ kauth_cred_ismember_guid(__unused kauth_cred_t cred, guid_t *guidp, int *resultp * Notes: This function guarantees not to modify resultp when returning * an error. */ -int +int kauth_cred_gid_subset(kauth_cred_t cred1, kauth_cred_t cred2, int *resultp) { int i, err, res = 1; @@ -3407,18 +3439,18 @@ kauth_cred_gid_subset(kauth_cred_t cred1, kauth_cred_t cred2, int *resultp) } if (!res && pcred1->cr_rgid != pcred2->cr_rgid && - pcred1->cr_rgid != pcred2->cr_svgid) { + pcred1->cr_rgid != pcred2->cr_svgid) { *resultp = 0; return 0; } /* Finally, check saved gid */ - if ((err = kauth_cred_ismember_gid(cred2, pcred1->cr_svgid, &res)) != 0){ + if ((err = kauth_cred_ismember_gid(cred2, pcred1->cr_svgid, &res)) != 0) { return err; } if (!res && pcred1->cr_svgid != pcred2->cr_rgid && - pcred1->cr_svgid != pcred2->cr_svgid) { + pcred1->cr_svgid != pcred2->cr_svgid) { *resultp = 0; return 0; } @@ -3445,7 +3477,7 @@ kauth_cred_gid_subset(kauth_cred_t cred1, kauth_cred_t cred2, int *resultp) int kauth_cred_issuser(kauth_cred_t cred) { - return(kauth_cred_getuid(cred) == 0); + return kauth_cred_getuid(cred) == 0; } @@ -3454,14 +3486,10 @@ kauth_cred_issuser(kauth_cred_t cred) */ /* lock protecting credential hash table */ -static lck_mtx_t *kauth_cred_hash_mtx; -#define KAUTH_CRED_HASH_LOCK() lck_mtx_lock(kauth_cred_hash_mtx); -#define KAUTH_CRED_HASH_UNLOCK() lck_mtx_unlock(kauth_cred_hash_mtx); -#if KAUTH_CRED_HASH_DEBUG -#define KAUTH_CRED_HASH_LOCK_ASSERT() lck_mtx_assert(kauth_cred_hash_mtx, LCK_MTX_ASSERT_OWNED) -#else /* !KAUTH_CRED_HASH_DEBUG */ -#define KAUTH_CRED_HASH_LOCK_ASSERT() -#endif /* !KAUTH_CRED_HASH_DEBUG */ +static lck_mtx_t kauth_cred_hash_mtx; +#define KAUTH_CRED_HASH_LOCK() lck_mtx_lock(&kauth_cred_hash_mtx); +#define KAUTH_CRED_HASH_UNLOCK() lck_mtx_unlock(&kauth_cred_hash_mtx); +#define KAUTH_CRED_HASH_LOCK_ASSERT() LCK_MTX_ASSERT(&kauth_cred_hash_mtx, LCK_MTX_ASSERT_OWNED) /* @@ -3498,18 +3526,10 @@ static lck_mtx_t *kauth_cred_hash_mtx; void kauth_cred_init(void) { - int i; - - kauth_cred_hash_mtx = lck_mtx_alloc_init(kauth_lck_grp, 0/*LCK_ATTR_NULL*/); - - /*allocate credential hash table */ - MALLOC(kauth_cred_table_anchor, struct kauth_cred_entry_head *, - (sizeof(struct kauth_cred_entry_head) * KAUTH_CRED_TABLE_SIZE), - M_KAUTH, M_WAITOK | M_ZERO); - if (kauth_cred_table_anchor == NULL) - panic("startup: kauth_cred_init"); - for (i = 0; i < KAUTH_CRED_TABLE_SIZE; i++) { - TAILQ_INIT(&kauth_cred_table_anchor[i]); + lck_mtx_init(&kauth_cred_hash_mtx, kauth_lck_grp, 0 /*LCK_ATTR_NULL*/); + + for (int i = 0; i < KAUTH_CRED_TABLE_SIZE; i++) { + LIST_INIT(&kauth_cred_table_anchor[i]); } } @@ -3527,7 +3547,7 @@ kauth_cred_init(void) uid_t kauth_getuid(void) { - return(kauth_cred_getuid(kauth_cred_get())); + return kauth_cred_getuid(kauth_cred_get()); } @@ -3544,7 +3564,7 @@ kauth_getuid(void) uid_t kauth_getruid(void) { - return(kauth_cred_getruid(kauth_cred_get())); + return kauth_cred_getruid(kauth_cred_get()); } @@ -3561,7 +3581,7 @@ kauth_getruid(void) gid_t kauth_getgid(void) { - return(kauth_cred_getgid(kauth_cred_get())); + return kauth_cred_getgid(kauth_cred_get()); } @@ -3578,7 +3598,7 @@ kauth_getgid(void) gid_t kauth_getrgid(void) { - return(kauth_cred_getrgid(kauth_cred_get())); + return kauth_cred_getrgid(kauth_cred_get()); } @@ -3612,8 +3632,9 @@ kauth_cred_get(void) uthread = get_bsdthread_info(current_thread()); /* sanity */ - if (uthread == NULL) + if (uthread == NULL) { panic("thread wants credential but has no BSD thread info"); + } /* * We can lazy-bind credentials to threads, as long as their processes * have them. @@ -3622,11 +3643,12 @@ kauth_cred_get(void) * XXX should probably be called out in a function. */ if (uthread->uu_ucred == NOCRED) { - if ((p = (proc_t) get_bsdtask_info(get_threadtask(current_thread()))) == NULL) + if ((p = (proc_t) get_bsdtask_info(get_threadtask(current_thread()))) == NULL) { panic("thread wants credential but has no BSD process"); + } uthread->uu_ucred = kauth_cred_proc_ref(p); } - return(uthread->uu_ucred); + return uthread->uu_ucred; } void @@ -3665,8 +3687,9 @@ kauth_cred_uthread_update(uthread_t uthread, proc_t proc) (uthread->uu_flag & UT_SETUID) == 0) { kauth_cred_t old = uthread->uu_ucred; uthread->uu_ucred = kauth_cred_proc_ref(proc); - if (IS_VALID_CRED(old)) + if (IS_VALID_CRED(old)) { kauth_cred_unref(&old); + } } } @@ -3698,15 +3721,7 @@ kauth_cred_uthread_update(uthread_t uthread, proc_t proc) kauth_cred_t kauth_cred_get_with_ref(void) { - struct proc *procp; - struct uthread *uthread; - - uthread = get_bsdthread_info(current_thread()); - /* sanity checks */ - if (uthread == NULL) - panic("%s - thread wants credential but has no BSD thread info", __FUNCTION__); - if ((procp = (proc_t) get_bsdtask_info(get_threadtask(current_thread()))) == NULL) - panic("%s - thread wants credential but has no BSD process", __FUNCTION__); + struct uthread *uthread = current_uthread(); /* * We can lazy-bind credentials to threads, as long as their processes @@ -3717,11 +3732,11 @@ kauth_cred_get_with_ref(void) */ if (uthread->uu_ucred == NOCRED) { /* take reference for new cred in thread */ - uthread->uu_ucred = kauth_cred_proc_ref(procp); + uthread->uu_ucred = kauth_cred_proc_ref(current_proc()); } /* take a reference for our caller */ kauth_cred_ref(uthread->uu_ucred); - return(uthread->uu_ucred); + return uthread->uu_ucred; } @@ -3757,16 +3772,15 @@ kauth_cred_get_with_ref(void) kauth_cred_t kauth_cred_proc_ref(proc_t procp) { - kauth_cred_t cred; - + kauth_cred_t cred; + proc_ucred_lock(procp); cred = proc_ucred(procp); kauth_cred_ref(cred); proc_ucred_unlock(procp); - return(cred); + return cred; } - /* * kauth_cred_alloc * @@ -3779,8 +3793,8 @@ kauth_cred_proc_ref(proc_t procp) * * Notes: The newly allocated credential is zero'ed as part of the * allocation process, with the exception of the reference - * count, which is set to 1 to indicate a single reference - * held by the caller. + * count, which is set to 0 to indicate the caller still has + * to call kauth_cred_add(). * * Since newly allocated credentials have no external pointers * referencing them, prior to making them visible in an externally @@ -3807,37 +3821,44 @@ kauth_cred_proc_ref(proc_t procp) * than returning a NULL pointer; the code that calls this * function needs to be audited before this can be changed. */ -kauth_cred_t +static kauth_cred_t kauth_cred_alloc(void) { kauth_cred_t newcred; - - MALLOC_ZONE(newcred, kauth_cred_t, sizeof(*newcred), M_CRED, M_WAITOK); + + MALLOC_ZONE(newcred, kauth_cred_t, sizeof(*newcred), M_CRED, M_WAITOK | M_ZERO); + assert(newcred); if (newcred != 0) { posix_cred_t newpcred = posix_cred_get(newcred); - bzero(newcred, sizeof(*newcred)); - newcred->cr_ref = 1; newcred->cr_audit.as_aia_p = audit_default_aia_p; /* must do this, or cred has same group membership as uid 0 */ newpcred->cr_gmuid = KAUTH_UID_NONE; -#if CRED_DIAGNOSTIC - } else { - panic("kauth_cred_alloc: couldn't allocate credential"); -#endif +#if CONFIG_MACF + mac_cred_label_init(newcred); +#endif } + return newcred; +} -#if KAUTH_CRED_HASH_DEBUG - kauth_cred_count++; -#endif +/* + * kauth_cred_free + * + * Description: Destroy a credential + * + * Parameters: cred Credential to destroy. + */ +static void +kauth_cred_free(kauth_cred_t cred) +{ + assert(os_atomic_load(&cred->cr_ref, relaxed) == 0); #if CONFIG_MACF - mac_cred_label_init(newcred); + mac_cred_label_destroy(cred); #endif - - return(newcred); + AUDIT_SESSION_UNREF(cred); + FREE_ZONE(cred, sizeof(*cred), M_CRED); } - /* * kauth_cred_create * @@ -3858,21 +3879,14 @@ kauth_cred_alloc(void) * maintain this field, we can't expect callers to know how it * needs to be set. Callers should be prepared for this field * to be overwritten. - * - * XXX: This code will tight-loop if memory for a new credential is - * persistently unavailable; this is perhaps not the wisest way - * to handle this condition, but current callers do not expect - * a failure. */ kauth_cred_t kauth_cred_create(kauth_cred_t cred) { - kauth_cred_t found_cred, new_cred = NULL; - posix_cred_t pcred = posix_cred_get(cred); + kauth_cred_t found_cred, new_cred = NULL; + posix_cred_t pcred = posix_cred_get(cred); int is_member = 0; - KAUTH_CRED_HASH_LOCK_ASSERT(); - if (pcred->cr_flags & CRF_NOMEMBERD) { pcred->cr_gmuid = KAUTH_UID_NONE; } else { @@ -3910,63 +3924,33 @@ kauth_cred_create(kauth_cred_t cred) } /* Caller *must* specify at least the egid in cr_groups[0] */ - if (pcred->cr_ngroups < 1) - return(NULL); - - for (;;) { - KAUTH_CRED_HASH_LOCK(); - found_cred = kauth_cred_find(cred); - if (found_cred != NULL) { - /* - * Found an existing credential so we'll bump - * reference count and return - */ - kauth_cred_ref(found_cred); - KAUTH_CRED_HASH_UNLOCK(); - return(found_cred); - } - KAUTH_CRED_HASH_UNLOCK(); - - /* - * No existing credential found. Create one and add it to - * our hash table. - */ - new_cred = kauth_cred_alloc(); - if (new_cred != NULL) { - int err; - posix_cred_t new_pcred = posix_cred_get(new_cred); - new_pcred->cr_uid = pcred->cr_uid; - new_pcred->cr_ruid = pcred->cr_ruid; - new_pcred->cr_svuid = pcred->cr_svuid; - new_pcred->cr_rgid = pcred->cr_rgid; - new_pcred->cr_svgid = pcred->cr_svgid; - new_pcred->cr_gmuid = pcred->cr_gmuid; - new_pcred->cr_ngroups = pcred->cr_ngroups; - bcopy(&pcred->cr_groups[0], &new_pcred->cr_groups[0], sizeof(new_pcred->cr_groups)); + if (pcred->cr_ngroups < 1) { + return NULL; + } + + struct kauth_cred_entry_head *bucket = kauth_cred_get_bucket(cred); + + KAUTH_CRED_HASH_LOCK(); + found_cred = kauth_cred_find_and_ref(cred, bucket); + KAUTH_CRED_HASH_UNLOCK(); + if (found_cred != NULL) { + return found_cred; + } + + /* + * No existing credential found. Create one and add it to + * our hash table. + */ + new_cred = kauth_cred_alloc(); + if (new_cred != NULL) { + *posix_cred_get(new_cred) = *pcred; #if CONFIG_AUDIT - bcopy(&cred->cr_audit, &new_cred->cr_audit, - sizeof(new_cred->cr_audit)); -#endif - new_pcred->cr_flags = pcred->cr_flags; - - KAUTH_CRED_HASH_LOCK(); - err = kauth_cred_add(new_cred); - KAUTH_CRED_HASH_UNLOCK(); - - /* Retry if kauth_cred_add returns non zero value */ - if (err == 0) - break; -#if CONFIG_MACF - mac_cred_label_destroy(new_cred); + new_cred->cr_audit = cred->cr_audit; #endif - AUDIT_SESSION_UNREF(new_cred); - - FREE_ZONE(new_cred, sizeof(*new_cred), M_CRED); - new_cred = NULL; - } + new_cred = kauth_cred_add(new_cred, bucket); } - return(new_cred); + return new_cred; } @@ -4020,7 +4004,7 @@ kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t euid, uid_t svuid, uid (svuid == KAUTH_UID_NONE || pcred->cr_svuid == svuid) && (pcred->cr_gmuid == gmuid)) { /* no change needed */ - return(cred); + return cred; } /* @@ -4047,7 +4031,7 @@ kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t euid, uid_t svuid, uid temp_pcred->cr_flags |= CRF_NOMEMBERD; } - return(kauth_cred_update(cred, &temp_cred, TRUE)); + return kauth_cred_update(cred, &temp_cred, TRUE); } @@ -4080,7 +4064,7 @@ kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t euid, uid_t svuid, uid kauth_cred_t kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid) { - struct ucred temp_cred; + struct ucred temp_cred; posix_cred_t temp_pcred = posix_cred_get(&temp_cred); posix_cred_t pcred = posix_cred_get(cred); @@ -4088,14 +4072,14 @@ kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid) DEBUG_CRED_ENTER("kauth_cred_setresgid %p %d %d %d\n", cred, rgid, egid, svgid); /* - * We don't need to do anything if the given GID are already the + * We don't need to do anything if the given GID are already the * same as the GIDs in the credential. */ if (pcred->cr_groups[0] == egid && pcred->cr_rgid == rgid && pcred->cr_svgid == svgid) { /* no change needed */ - return(cred); + return cred; } /* @@ -4120,13 +4104,13 @@ kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid) temp_pcred->cr_svgid = svgid; } - return(kauth_cred_update(cred, &temp_cred, TRUE)); + return kauth_cred_update(cred, &temp_cred, TRUE); } /* - * Update the given credential with the given groups. We only allocate a new - * credential when the given gid actually results in changes to the existing + * Update the given credential with the given groups. We only allocate a new + * credential when the given gid actually results in changes to the existing * credential. * The gmuid argument supplies a new uid (or KAUTH_UID_NONE to opt out) * which will be used for group membership checking. @@ -4176,7 +4160,7 @@ kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid) kauth_cred_t kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmuid) { - int i; + int i; struct ucred temp_cred; posix_cred_t temp_pcred = posix_cred_get(&temp_cred); posix_cred_t pcred; @@ -4191,12 +4175,13 @@ kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmu */ if ((pcred->cr_gmuid == gmuid) && (pcred->cr_ngroups == groupcount)) { for (i = 0; i < groupcount; i++) { - if (pcred->cr_groups[i] != groups[i]) + if (pcred->cr_groups[i] != groups[i]) { break; + } } if (i == groupcount) { /* no change needed */ - return(cred); + return cred; } } @@ -4211,12 +4196,13 @@ kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmu temp_pcred->cr_ngroups = groupcount; bcopy(groups, temp_pcred->cr_groups, sizeof(temp_pcred->cr_groups)); temp_pcred->cr_gmuid = gmuid; - if (gmuid == KAUTH_UID_NONE) + if (gmuid == KAUTH_UID_NONE) { temp_pcred->cr_flags |= CRF_NOMEMBERD; - else + } else { temp_pcred->cr_flags &= ~CRF_NOMEMBERD; + } - return(kauth_cred_update(cred, &temp_cred, TRUE)); + return kauth_cred_update(cred, &temp_cred, TRUE); } /* @@ -4235,22 +4221,23 @@ kauth_cred_getgroups(kauth_cred_t cred, gid_t *grouplist, int *countp) { int limit = NGROUPS; posix_cred_t pcred; - + pcred = posix_cred_get(cred); -#if CONFIG_EXT_RESOLVER +#if CONFIG_EXT_RESOLVER /* * If we've not opted out of using the resolver, then convert the cred to a list * of supplemental groups. We do this only if there has been a resolver to talk to, * since we may be too early in boot, or in an environment that isn't using DS. */ - if (kauth_identitysvc_has_registered && kauth_external_supplementary_groups_supported && (pcred->cr_flags & CRF_NOMEMBERD) == 0) { + if (kauth_identitysvc_has_registered && kauth_external_supplementary_groups_supported && (pcred->cr_flags & CRF_NOMEMBERD) == 0) { uid_t uid = kauth_cred_getuid(cred); int err; - + err = kauth_cred_uid2groups(&uid, grouplist, countp); - if (!err) + if (!err) { return 0; + } /* On error just fall through */ KAUTH_DEBUG("kauth_cred_getgroups failed %d\n", err); @@ -4325,9 +4312,9 @@ kauth_cred_setuidgid(kauth_cred_t cred, uid_t uid, gid_t gid) * user IDs are already the same as the user ID passed into us. */ if (pcred->cr_uid == uid && pcred->cr_ruid == uid && pcred->cr_svuid == uid && - pcred->cr_gid == gid && pcred->cr_rgid == gid && pcred->cr_svgid == gid) { + pcred->cr_gid == gid && pcred->cr_rgid == gid && pcred->cr_svgid == gid) { /* no change needed */ - return(cred); + return cred; } /* @@ -4359,7 +4346,7 @@ kauth_cred_setuidgid(kauth_cred_t cred, uid_t uid, gid_t gid) temp_cred.cr_label = cred->cr_label; #endif - return(kauth_cred_update(cred, &temp_cred, TRUE)); + return kauth_cred_update(cred, &temp_cred, TRUE); } @@ -4407,7 +4394,7 @@ kauth_cred_setsvuidgid(kauth_cred_t cred, uid_t uid, gid_t gid) */ if (pcred->cr_svuid == uid && pcred->cr_svgid == gid) { /* no change needed */ - return(cred); + return cred; } DEBUG_CRED_CHANGE("kauth_cred_setsvuidgid: cred change\n"); @@ -4418,13 +4405,13 @@ kauth_cred_setsvuidgid(kauth_cred_t cred, uid_t uid, gid_t gid) temp_pcred->cr_svuid = uid; temp_pcred->cr_svgid = gid; - return(kauth_cred_update(cred, &temp_cred, TRUE)); + return kauth_cred_update(cred, &temp_cred, TRUE); } /* * kauth_cred_setauditinfo - * + * * Description: Update the given credential using the given au_session_t. * * Parameters: cred The original credential @@ -4457,19 +4444,19 @@ kauth_cred_setauditinfo(kauth_cred_t cred, au_session_t *auditinfo_p) */ if (bcmp(&cred->cr_audit, auditinfo_p, sizeof(cred->cr_audit)) == 0) { /* no change needed */ - return(cred); + return cred; } bcopy(cred, &temp_cred, sizeof(temp_cred)); bcopy(auditinfo_p, &temp_cred.cr_audit, sizeof(temp_cred.cr_audit)); - return(kauth_cred_update(cred, &temp_cred, FALSE)); + return kauth_cred_update(cred, &temp_cred, FALSE); } #if CONFIG_MACF /* * kauth_cred_label_update - * + * * Description: Update the MAC label associated with a credential * * Parameters: cred The original credential @@ -4503,12 +4490,12 @@ kauth_cred_label_update(kauth_cred_t cred, struct label *label) newcred = kauth_cred_update(cred, &temp_cred, TRUE); mac_cred_label_destroy(&temp_cred); - return (newcred); + return newcred; } /* * kauth_cred_label_update_execve - * + * * Description: Update the MAC label associated with a credential as * part of exec * @@ -4541,8 +4528,8 @@ kauth_cred_label_update(kauth_cred_t cred, struct label *label) static kauth_cred_t kauth_cred_label_update_execve(kauth_cred_t cred, vfs_context_t ctx, - struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *scriptl, - struct label *execl, unsigned int *csflags, void *macextensions, int *disjointp, int *labelupdateerror) + struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *scriptl, + struct label *execl, unsigned int *csflags, void *macextensions, int *disjointp, int *labelupdateerror) { kauth_cred_t newcred; struct ucred temp_cred; @@ -4551,13 +4538,13 @@ kauth_cred_label_update_execve(kauth_cred_t cred, vfs_context_t ctx, mac_cred_label_init(&temp_cred); mac_cred_label_associate(cred, &temp_cred); - mac_cred_label_update_execve(ctx, &temp_cred, - vp, offset, scriptvp, scriptl, execl, csflags, - macextensions, disjointp, labelupdateerror); + mac_cred_label_update_execve(ctx, &temp_cred, + vp, offset, scriptvp, scriptl, execl, csflags, + macextensions, disjointp, labelupdateerror); newcred = kauth_cred_update(cred, &temp_cred, TRUE); mac_cred_label_destroy(&temp_cred); - return (newcred); + return newcred; } /* @@ -4572,7 +4559,8 @@ kauth_cred_label_update_execve(kauth_cred_t cred, vfs_context_t ctx, * of this call. The caller should not assume the process reference to * the old credential still exists. */ -int kauth_proc_label_update(struct proc *p, struct label *label) +int +kauth_proc_label_update(struct proc *p, struct label *label) { kauth_cred_t my_cred, my_new_cred; @@ -4582,17 +4570,15 @@ int kauth_proc_label_update(struct proc *p, struct label *label) /* get current credential and take a reference while we muck with it */ for (;;) { - - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ my_new_cred = kauth_cred_label_update(my_cred, label); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("kauth_proc_setlabel_unlocked CH(%d): %p/0x%08x -> %p/0x%08x\n", p->p_pid, my_cred, my_cred->cr_flags, my_new_cred, my_new_cred->cr_flags); proc_ucred_lock(p); @@ -4619,8 +4605,8 @@ int kauth_proc_label_update(struct proc *p, struct label *label) } /* Drop old proc reference or our extra reference */ kauth_cred_unref(&my_cred); - - return (0); + + return 0; } /* @@ -4636,7 +4622,7 @@ int kauth_proc_label_update(struct proc *p, struct label *label) * vp The vnode being exec'ed * scriptl The script MAC label * execl The executable MAC label - * lupdateerror The error place holder for MAC label authority + * lupdateerror The error place holder for MAC label authority * to update about possible termination * * Returns: 0 Label update did not make credential @@ -4648,11 +4634,11 @@ int kauth_proc_label_update(struct proc *p, struct label *label) * result of this call. The caller should not assume the process * reference to the old credential still exists. */ - + void kauth_proc_label_update_execve(struct proc *p, vfs_context_t ctx, - struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *scriptl, - struct label *execl, unsigned int *csflags, void *macextensions, int *disjoint, int *update_return) + struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *scriptl, + struct label *execl, unsigned int *csflags, void *macextensions, int *disjoint, int *update_return) { kauth_cred_t my_cred, my_new_cred; my_cred = kauth_cred_proc_ref(p); @@ -4661,17 +4647,15 @@ kauth_proc_label_update_execve(struct proc *p, vfs_context_t ctx, /* get current credential and take a reference while we muck with it */ for (;;) { - - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ my_new_cred = kauth_cred_label_update_execve(my_cred, ctx, vp, offset, scriptvp, scriptl, execl, csflags, macextensions, disjoint, update_return); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("kauth_proc_label_update_execve_unlocked CH(%d): %p/0x%08x -> %p/0x%08x\n", p->p_pid, my_cred, my_cred->cr_flags, my_new_cred, my_new_cred->cr_flags); proc_ucred_lock(p); @@ -4703,7 +4687,7 @@ kauth_proc_label_update_execve(struct proc *p, vfs_context_t ctx, /* * for temporary binary compatibility */ -kauth_cred_t kauth_cred_setlabel(kauth_cred_t cred, struct label *label); +kauth_cred_t kauth_cred_setlabel(kauth_cred_t cred, struct label *label); kauth_cred_t kauth_cred_setlabel(kauth_cred_t cred, struct label *label) { @@ -4720,27 +4704,27 @@ kauth_proc_setlabel(struct proc *p, struct label *label) #else -/* this is a temp hack to cover us when MACF is not built in a kernel configuration. +/* this is a temp hack to cover us when MACF is not built in a kernel configuration. * Since we cannot build our export lists based on the kernel configuration we need - * to define a stub. + * to define a stub. */ kauth_cred_t kauth_cred_label_update(__unused kauth_cred_t cred, __unused void *label) { - return(NULL); + return NULL; } int kauth_proc_label_update(__unused struct proc *p, __unused void *label) { - return (0); + return 0; } #if 1 /* * for temporary binary compatibility */ -kauth_cred_t kauth_cred_setlabel(kauth_cred_t cred, void *label); +kauth_cred_t kauth_cred_setlabel(kauth_cred_t cred, void *label); kauth_cred_t kauth_cred_setlabel(__unused kauth_cred_t cred, __unused void *label) { @@ -4751,11 +4735,72 @@ int kauth_proc_setlabel(struct proc *p, void *label); int kauth_proc_setlabel(__unused struct proc *p, __unused void *label) { - return (0); + return 0; } #endif #endif +// TODO: move to os_refcnt once the ABI issue is resolved + +#define KAUTH_CRED_REF_MAX 0x0ffffffful + +__attribute__((noinline, cold, not_tail_called, noreturn)) +static void +kauth_cred_panic_resurrection(kauth_cred_t cred) +{ + panic("kauth_cred_unref: cred %p resurrected", cred); + __builtin_unreachable(); +} + +__attribute__((noinline, cold, not_tail_called, noreturn)) +static void +kauth_cred_panic_over_released(kauth_cred_t cred) +{ + panic("kauth_cred_unref: cred %p over-released", cred); + __builtin_unreachable(); +} + +__attribute__((noinline, cold, not_tail_called, noreturn)) +static void +kauth_cred_panic_over_retain(kauth_cred_t cred) +{ + panic("kauth_cred_ref: cred %p over-retained", cred); + __builtin_unreachable(); +} + +/* + * kauth_cred_tryref + * + * Description: Tries to take a reference, used from kauth_cred_find_and_ref + * to debounce the race with kauth_cred_unref. + * + * Parameters: cred The credential to reference + * + * Returns: (bool) Whether the reference was taken + */ +static inline bool +kauth_cred_tryref(kauth_cred_t cred) +{ + u_long old_ref, new_ref; + os_atomic_rmw_loop(&cred->cr_ref, old_ref, new_ref, relaxed, { + if (old_ref == 0) { + os_atomic_rmw_loop_give_up(return false); + } + new_ref = old_ref + 1; + }); + if (__improbable(old_ref >= KAUTH_CRED_REF_MAX)) { + kauth_cred_panic_over_retain(cred); + } + +#if 0 // use this to watch a specific credential + if (is_target_cred( *credp ) != 0) { + get_backtrace(); + } +#endif + + return true; +} + /* * kauth_cred_ref * @@ -4764,119 +4809,60 @@ kauth_proc_setlabel(__unused struct proc *p, __unused void *label) * Parameters: cred The credential to reference * * Returns: (void) - * - * Notes: This function adds a reference to the provided credential; - * the existing reference on the credential is assumed to be - * held stable over this operation by taking the appropriate - * lock to protect the pointer from which it is being referenced, - * if necessary (e.g. the proc lock is held over the call if the - * credential being referenced is from p_ucred, the vnode lock - * if from the per vnode name cache cred cache, and so on). - * - * This is safe from the kauth_cred_unref() path, since an atomic - * add is used, and the unref path specifically checks to see that - * the value has not been changed to add a reference between the - * time the credential is unreferenced by another pointer and the - * time it is unreferenced from the cred hash cache. */ void kauth_cred_ref(kauth_cred_t cred) { - int old_value; - - NULLCRED_CHECK(cred); - - old_value = OSAddAtomicLong(1, (long*)&cred->cr_ref); + u_long old_ref = os_atomic_inc_orig(&cred->cr_ref, relaxed); - if (old_value < 1) - panic("kauth_cred_ref: trying to take a reference on a cred with no references"); + if (__improbable(old_ref < 1)) { + kauth_cred_panic_resurrection(cred); + } + if (__improbable(old_ref >= KAUTH_CRED_REF_MAX)) { + kauth_cred_panic_over_retain(cred); + } #if 0 // use this to watch a specific credential - if ( is_target_cred( cred ) != 0 ) { - get_backtrace( ); + if (is_target_cred( cred ) != 0) { + get_backtrace(); } #endif - - return; } - /* - * kauth_cred_unref_hashlocked + * kauth_cred_unref_fast * - * Description: release a credential reference; when the last reference is - * released, the credential will be freed. + * Description: Release a credential reference. * * Parameters: credp Pointer to address containing * credential to be freed * - * Returns: TRUE if the credential must be destroyed by the caller. - * FALSE otherwise. - * - * Implicit returns: - * *credp Set to NOCRED - * - * Notes: This function assumes the credential hash lock is held. + * Returns: true This was the last reference. + * false The object has more refs. * - * This function is internal use only, since the hash lock is - * scoped to this compilation unit. - * - * This function destroys the contents of the pointer passed by - * the caller to prevent the caller accidentally attempting to - * release a given reference twice in error. - * - * The last reference is considered to be released when a release - * of a credential of a reference count of 2 occurs; this is an - * intended effect, to take into account the reference held by - * the credential hash, which is released at the same time. */ -static boolean_t -kauth_cred_unref_hashlocked(kauth_cred_t *credp) +static inline bool +kauth_cred_unref_fast(kauth_cred_t cred) { - int old_value; - boolean_t destroy_it = FALSE; - - KAUTH_CRED_HASH_LOCK_ASSERT(); - NULLCRED_CHECK(*credp); - - old_value = OSAddAtomicLong(-1, (long*)&(*credp)->cr_ref); - -#if DIAGNOSTIC - if (old_value == 0) - panic("%s:0x%08x kauth_cred_unref_hashlocked: dropping a reference on a cred with no references", current_proc()->p_comm, *credp); - if (old_value == 1) - panic("%s:0x%08x kauth_cred_unref_hashlocked: dropping a reference on a cred with no hash entry", current_proc()->p_comm, *credp); -#endif + u_long old_ref = os_atomic_dec_orig(&cred->cr_ref, relaxed); #if 0 // use this to watch a specific credential - if ( is_target_cred( *credp ) != 0 ) { - get_backtrace( ); + if (is_target_cred( *credp ) != 0) { + get_backtrace(); } #endif - /* - * If the old_value is 2, then we have just released the last external - * reference to this credential - */ - if (old_value < 3) { - /* The last absolute reference is our credential hash table */ - destroy_it = kauth_cred_remove(*credp); - } - - if (destroy_it == FALSE) { - *credp = NOCRED; + if (__improbable(old_ref <= 0)) { + kauth_cred_panic_over_released(cred); } - - return (destroy_it); + return old_ref == 1; } - /* * kauth_cred_unref * - * Description: Release a credential reference while holding the credential - * hash lock; when the last reference is released, the credential - * will be freed. + * Description: Release a credential reference. + * Frees the credential if it is the last ref. * * Parameters: credp Pointer to address containing * credential to be freed @@ -4886,29 +4872,18 @@ kauth_cred_unref_hashlocked(kauth_cred_t *credp) * Implicit returns: * *credp Set to NOCRED * - * Notes: See kauth_cred_unref_hashlocked() for more information. - * */ void kauth_cred_unref(kauth_cred_t *credp) { - boolean_t destroy_it; - - KAUTH_CRED_HASH_LOCK(); - destroy_it = kauth_cred_unref_hashlocked(credp); - KAUTH_CRED_HASH_UNLOCK(); - - if (destroy_it == TRUE) { - assert(*credp != NOCRED); -#if CONFIG_MACF - mac_cred_label_destroy(*credp); -#endif - AUDIT_SESSION_UNREF(*credp); - - (*credp)->cr_ref = 0; - FREE_ZONE(*credp, sizeof(*(*credp)), M_CRED); - *credp = NOCRED; + if (kauth_cred_unref_fast(*credp)) { + KAUTH_CRED_HASH_LOCK(); + kauth_cred_remove_locked(*credp); + KAUTH_CRED_HASH_UNLOCK(); + kauth_cred_free(*credp); } + + *credp = NOCRED; } @@ -4956,8 +4931,8 @@ kauth_cred_rele(kauth_cred_t cred) * * The newly allocated credential is copied as part of the * allocation process, with the exception of the reference - * count, which is set to 1 to indicate a single reference - * held by the caller. + * count, which is set to 0 to indicate the caller still has + * to call kauth_cred_add(). * * Since newly allocated credentials have no external pointers * referencing them, prior to making them visible in an externally @@ -4975,32 +4950,24 @@ kauth_cred_rele(kauth_cred_t cred) * will trigger if this protocol is not observed). * */ -kauth_cred_t +static kauth_cred_t kauth_cred_dup(kauth_cred_t cred) { kauth_cred_t newcred; -#if CONFIG_MACF - struct label *temp_label; -#endif - -#if CRED_DIAGNOSTIC - if (cred == NOCRED || cred == FSCRED) - panic("kauth_cred_dup: bad credential"); -#endif + + assert(cred != NOCRED && cred != FSCRED); newcred = kauth_cred_alloc(); if (newcred != NULL) { -#if CONFIG_MACF - temp_label = newcred->cr_label; + newcred->cr_posix = cred->cr_posix; +#if CONFIG_AUDIT + newcred->cr_audit = cred->cr_audit; #endif - bcopy(cred, newcred, sizeof(*newcred)); #if CONFIG_MACF - newcred->cr_label = temp_label; mac_cred_label_associate(cred, newcred); #endif AUDIT_SESSION_REF(cred); - newcred->cr_ref = 1; } - return(newcred); + return newcred; } /* @@ -5032,7 +4999,7 @@ kauth_cred_copy_real(kauth_cred_t cred) if ((pcred->cr_ruid == pcred->cr_uid) && (pcred->cr_rgid == pcred->cr_gid)) { kauth_cred_ref(cred); - return(cred); + return cred; } /* @@ -5050,50 +5017,26 @@ kauth_cred_copy_real(kauth_cred_t cred) * If the cred is not opted out, make sure we are using the r/euid * for group checks */ - if (temp_pcred->cr_gmuid != KAUTH_UID_NONE) + if (temp_pcred->cr_gmuid != KAUTH_UID_NONE) { temp_pcred->cr_gmuid = pcred->cr_ruid; + } - for (;;) { - int err; - - KAUTH_CRED_HASH_LOCK(); - found_cred = kauth_cred_find(&temp_cred); - if (found_cred == cred) { - /* same cred so just bail */ - KAUTH_CRED_HASH_UNLOCK(); - return(cred); - } - if (found_cred != NULL) { - /* - * Found a match so we bump reference count on new - * one. We leave the old one alone. - */ - kauth_cred_ref(found_cred); - KAUTH_CRED_HASH_UNLOCK(); - return(found_cred); - } - - /* - * Must allocate a new credential, copy in old credential - * data and update the real user and group IDs. - */ - newcred = kauth_cred_dup(&temp_cred); - err = kauth_cred_add(newcred); - KAUTH_CRED_HASH_UNLOCK(); + struct kauth_cred_entry_head *bucket = kauth_cred_get_bucket(cred); - /* Retry if kauth_cred_add() fails */ - if (err == 0) - break; -#if CONFIG_MACF - mac_cred_label_destroy(newcred); -#endif - AUDIT_SESSION_UNREF(newcred); + KAUTH_CRED_HASH_LOCK(); + found_cred = kauth_cred_find_and_ref(&temp_cred, bucket); + KAUTH_CRED_HASH_UNLOCK(); - FREE_ZONE(newcred, sizeof(*newcred), M_CRED); - newcred = NULL; + if (found_cred) { + return found_cred; } - - return(newcred); + + /* + * Must allocate a new credential, copy in old credential + * data and update the real user and group IDs. + */ + newcred = kauth_cred_dup(&temp_cred); + return kauth_cred_add(newcred, bucket); } @@ -5128,78 +5071,65 @@ kauth_cred_copy_real(kauth_cred_t cred) */ static kauth_cred_t kauth_cred_update(kauth_cred_t old_cred, kauth_cred_t model_cred, - boolean_t retain_auditinfo) -{ - kauth_cred_t found_cred, new_cred = NULL; - + boolean_t retain_auditinfo) +{ + kauth_cred_t cred; + /* * Make sure we carry the auditinfo forward to the new credential * unless we are actually updating the auditinfo. */ if (retain_auditinfo) { - bcopy(&old_cred->cr_audit, &model_cred->cr_audit, - sizeof(model_cred->cr_audit)); + model_cred->cr_audit = old_cred->cr_audit; } - - for (;;) { - int err; - KAUTH_CRED_HASH_LOCK(); - found_cred = kauth_cred_find(model_cred); - if (found_cred == old_cred) { - /* same cred so just bail */ - KAUTH_CRED_HASH_UNLOCK(); - return(old_cred); - } - if (found_cred != NULL) { - boolean_t destroy_it; - - DEBUG_CRED_CHANGE("kauth_cred_update(cache hit): %p -> %p\n", old_cred, found_cred); - /* - * Found a match so we bump reference count on new - * one and decrement reference count on the old one. - */ - kauth_cred_ref(found_cred); - destroy_it = kauth_cred_unref_hashlocked(&old_cred); - KAUTH_CRED_HASH_UNLOCK(); - if (destroy_it == TRUE) { - assert(old_cred != NOCRED); -#if CONFIG_MACF - mac_cred_label_destroy(old_cred); -#endif - AUDIT_SESSION_UNREF(old_cred); - - old_cred->cr_ref = 0; - FREE_ZONE(old_cred, sizeof(*old_cred), M_CRED); - old_cred = NOCRED; + if (kauth_cred_is_equal(old_cred, model_cred)) { + return old_cred; + } - } - return(found_cred); - } + struct kauth_cred_entry_head *bucket = kauth_cred_get_bucket(model_cred); + KAUTH_CRED_HASH_LOCK(); + cred = kauth_cred_find_and_ref(model_cred, bucket); + if (cred != NULL) { /* - * Must allocate a new credential using the model. also - * adds the new credential to the credential hash table. + * We found a hit, so we can get rid of the old_cred. + * If we didn't, then we need to keep the old_cred around, + * because `model_cred` has copies of things such as the cr_label + * or audit session that it has not refcounts for. */ - new_cred = kauth_cred_dup(model_cred); - err = kauth_cred_add(new_cred); + bool needs_free = kauth_cred_unref_fast(old_cred); + if (needs_free) { + kauth_cred_remove_locked(old_cred); + } KAUTH_CRED_HASH_UNLOCK(); - /* retry if kauth_cred_add returns non zero value */ - if (err == 0) - break; -#if CONFIG_MACF - mac_cred_label_destroy(new_cred); -#endif - AUDIT_SESSION_UNREF(new_cred); - - FREE_ZONE(new_cred, sizeof(*new_cred), M_CRED); - new_cred = NULL; + DEBUG_CRED_CHANGE("kauth_cred_update(cache hit): %p -> %p\n", + old_cred, cred); + if (needs_free) { + kauth_cred_free(old_cred); + } + return cred; } - DEBUG_CRED_CHANGE("kauth_cred_update(cache miss): %p -> %p\n", old_cred, new_cred); + KAUTH_CRED_HASH_UNLOCK(); + + /* + * Must allocate a new credential using the model. also + * adds the new credential to the credential hash table. + */ + cred = kauth_cred_dup(model_cred); + cred = kauth_cred_add(cred, bucket); + DEBUG_CRED_CHANGE("kauth_cred_update(cache miss): %p -> %p\n", + old_cred, cred); + + + /* + * This can't be done before the kauth_cred_dup() as the model_cred + * has pointers that old_cred owns references for. + */ kauth_cred_unref(&old_cred); - return(new_cred); + return cred; } @@ -5207,104 +5137,148 @@ kauth_cred_update(kauth_cred_t old_cred, kauth_cred_t model_cred, * kauth_cred_add * * Description: Add the given credential to our credential hash table and - * take an additional reference to account for our use of the - * credential in the hash table + * take an initial reference to account for the object being + * now valid. * * Parameters: new_cred Credential to insert into cred - * hash cache + * hash cache, or to destroy when + * a collision is detected. * - * Returns: 0 Success - * -1 Hash insertion failed: caller - * should retry - * - * Locks: Caller is expected to hold KAUTH_CRED_HASH_LOCK + * Returns: (kauth_thread_t) The inserted cred, or the + * collision that was found. * * Notes: The 'new_cred' MUST NOT already be in the cred hash cache */ -static int -kauth_cred_add(kauth_cred_t new_cred) +static kauth_cred_t +kauth_cred_add(kauth_cred_t new_cred, struct kauth_cred_entry_head *bucket) { - u_long hash_key; + kauth_cred_t found_cred; + u_long old_ref; - KAUTH_CRED_HASH_LOCK_ASSERT(); - - hash_key = kauth_cred_get_hashkey(new_cred); - hash_key %= KAUTH_CRED_TABLE_SIZE; + KAUTH_CRED_HASH_LOCK(); + found_cred = kauth_cred_find_and_ref(new_cred, bucket); + if (found_cred) { + KAUTH_CRED_HASH_UNLOCK(); + kauth_cred_free(new_cred); + return found_cred; + } - /* race fix - there is a window where another matching credential - * could have been inserted between the time this one was created and we - * got the hash lock. If we find a match return an error and have the - * the caller retry. - */ - if (kauth_cred_find(new_cred) != NULL) { - return(-1); + old_ref = os_atomic_xchg(&new_cred->cr_ref, 1, relaxed); + if (old_ref != 0) { + panic("kauth_cred_add: invalid cred %p", new_cred); } - - /* take a reference for our use in credential hash table */ - kauth_cred_ref(new_cred); /* insert the credential into the hash table */ - TAILQ_INSERT_HEAD(&kauth_cred_table_anchor[hash_key], new_cred, cr_link); - - return(0); + LIST_INSERT_HEAD(bucket, new_cred, cr_link); + + KAUTH_CRED_HASH_UNLOCK(); + return new_cred; } +/* + * kauth_cred_remove_locked + * + * Description: Remove the given credential from our credential hash table. + * + * Parameters: cred Credential to remove. + * + * Locks: Caller is expected to hold KAUTH_CRED_HASH_LOCK + */ +static void +kauth_cred_remove_locked(kauth_cred_t cred) +{ + KAUTH_CRED_HASH_LOCK_ASSERT(); + + if (cred->cr_link.le_prev == NULL) { + panic("kauth_cred_unref: cred %p never added", cred); + } + + LIST_REMOVE(cred, cr_link); +} -/* - * kauth_cred_remove +/* + * kauth_cred_is_equal * - * Description: Remove the given credential from our credential hash table + * Description: Returns whether two credentions are identical. * - * Parameters: cred Credential to remove from cred + * Parameters: cred1 Credential to compare + * cred2 Credential to compare + * + * Returns: true Credentials are equal + * false Credentials are different + */ +static bool +kauth_cred_is_equal(kauth_cred_t cred1, kauth_cred_t cred2) +{ + posix_cred_t pcred1 = posix_cred_get(cred1); + posix_cred_t pcred2 = posix_cred_get(cred2); + + /* + * don't worry about the label unless the flags in + * either credential tell us to. + */ + if (memcmp(pcred1, pcred2, sizeof(*pcred1))) { + return false; + } + if (memcmp(&cred1->cr_audit, &cred2->cr_audit, sizeof(cred1->cr_audit))) { + return false; + } +#if CONFIG_MACF + /* Note: we know the flags are equal, so we only need to test one */ + if (pcred1->cr_flags & CRF_MAC_ENFORCE) { + if (!mac_cred_label_compare(cred1->cr_label, cred2->cr_label)) { + return false; + } + } +#endif + return true; +} + +/* + * kauth_cred_find_and_ref + * + * Description: Using the given credential data, look for a match in our + * credential hash table + * + * Parameters: cred Credential to lookup in cred * hash cache * - * Returns: TRUE if the cred was found & removed from the hash; FALSE if not. + * Returns: NULL Not found + * !NULL Matching credential already in + * cred hash cache, with a +1 ref * * Locks: Caller is expected to hold KAUTH_CRED_HASH_LOCK - * - * Notes: The check for the reference increment after entry is generally - * agree to be safe, since we use atomic operations, and the - * following code occurs with the hash lock held; in theory, this - * protects us from the 2->1 reference that gets us here. */ -static boolean_t -kauth_cred_remove(kauth_cred_t cred) +static kauth_cred_t +kauth_cred_find_and_ref(kauth_cred_t cred, struct kauth_cred_entry_head *bucket) { - u_long hash_key; - kauth_cred_t found_cred; + kauth_cred_t found_cred; - hash_key = kauth_cred_get_hashkey(cred); - hash_key %= KAUTH_CRED_TABLE_SIZE; + KAUTH_CRED_HASH_LOCK_ASSERT(); - /* Avoid race */ - if (cred->cr_ref < 1) - panic("cred reference underflow"); - if (cred->cr_ref > 1) - return (FALSE); /* someone else got a ref */ - /* Find cred in the credential hash table */ - TAILQ_FOREACH(found_cred, &kauth_cred_table_anchor[hash_key], cr_link) { - if (found_cred == cred) { - /* found a match, remove it from the hash table */ - TAILQ_REMOVE(&kauth_cred_table_anchor[hash_key], found_cred, cr_link); -#if KAUTH_CRED_HASH_DEBUG - kauth_cred_count--; -#endif - return (TRUE); + LIST_FOREACH(found_cred, bucket, cr_link) { + if (kauth_cred_is_equal(found_cred, cred)) { + /* + * newer entries are inserted at the head, + * no hit further in the chain can possibly + * be successfully retained. + */ + if (!kauth_cred_tryref(found_cred)) { + found_cred = NULL; + } + break; } } - /* Did not find a match... this should not happen! XXX Make panic? */ - printf("%s:%d - %s - %s - did not find a match for %p\n", __FILE__, __LINE__, __FUNCTION__, current_proc()->p_comm, cred); - return (FALSE); + return found_cred; } - -/* +/* * kauth_cred_find * - * Description: Using the given credential data, look for a match in our - * credential hash table + * Description: This interface is sadly KPI but people can't possibly use it, + * as they need to hold a lock that isn't exposed. * * Parameters: cred Credential to lookup in cred * hash cache @@ -5318,51 +5292,19 @@ kauth_cred_remove(kauth_cred_t cred) kauth_cred_t kauth_cred_find(kauth_cred_t cred) { - u_long hash_key; - kauth_cred_t found_cred; - posix_cred_t pcred = posix_cred_get(cred); + struct kauth_cred_entry_head *bucket = kauth_cred_get_bucket(cred); + kauth_cred_t found_cred; KAUTH_CRED_HASH_LOCK_ASSERT(); -#if KAUTH_CRED_HASH_DEBUG - static int test_count = 0; - - test_count++; - if ((test_count % 200) == 0) { - kauth_cred_hash_print(); - } -#endif - - hash_key = kauth_cred_get_hashkey(cred); - hash_key %= KAUTH_CRED_TABLE_SIZE; - /* Find cred in the credential hash table */ - TAILQ_FOREACH(found_cred, &kauth_cred_table_anchor[hash_key], cr_link) { - boolean_t match; - posix_cred_t found_pcred = posix_cred_get(found_cred); - - /* - * don't worry about the label unless the flags in - * either credential tell us to. - */ - match = (bcmp(found_pcred, pcred, sizeof (*pcred)) == 0) ? TRUE : FALSE; - match = match && ((bcmp(&found_cred->cr_audit, &cred->cr_audit, - sizeof(cred->cr_audit)) == 0) ? TRUE : FALSE); -#if CONFIG_MACF - if (((found_pcred->cr_flags & CRF_MAC_ENFORCE) != 0) || - ((pcred->cr_flags & CRF_MAC_ENFORCE) != 0)) { - match = match && mac_cred_label_compare(found_cred->cr_label, - cred->cr_label); - } -#endif - if (match) { - /* found a match */ - return(found_cred); + LIST_FOREACH(found_cred, bucket, cr_link) { + if (kauth_cred_is_equal(found_cred, cred)) { + break; } } - /* No match found */ - return(NULL); + return found_cred; } @@ -5381,8 +5323,8 @@ kauth_cred_find(kauth_cred_t cred) static inline u_long kauth_cred_hash(const uint8_t *datap, int data_len, u_long start_key) { - u_long hash_key = start_key; - u_long temp; + u_long hash_key = start_key; + u_long temp; while (data_len > 0) { hash_key = (hash_key << 4) + *datap++; @@ -5393,92 +5335,56 @@ kauth_cred_hash(const uint8_t *datap, int data_len, u_long start_key) hash_key &= ~temp; data_len--; } - return(hash_key); + return hash_key; } /* - * kauth_cred_get_hashkey + * kauth_cred_get_bucket * * Description: Generate a hash key using data that makes up a credential; * based on ElfHash. We hash on the entire credential data, * not including the ref count or the TAILQ, which are mutable; * everything else isn't. * + * Returns the bucket correspondong to this hash key. + * * Parameters: cred Credential for which hash is * desired * - * Returns: (u_long) Returned hash key + * Returns: (kauth_cred_entry_head *) Returned bucket. * * Notes: When actually moving the POSIX credential into a real label, * remember to update this hash computation. */ -static u_long -kauth_cred_get_hashkey(kauth_cred_t cred) +static struct kauth_cred_entry_head * +kauth_cred_get_bucket(kauth_cred_t cred) { #if CONFIG_MACF posix_cred_t pcred = posix_cred_get(cred); #endif - u_long hash_key = 0; - - hash_key = kauth_cred_hash((uint8_t *)&cred->cr_posix, - sizeof (struct posix_cred), - hash_key); - hash_key = kauth_cred_hash((uint8_t *)&cred->cr_audit, - sizeof(struct au_session), - hash_key); + u_long hash_key = 0; + + hash_key = kauth_cred_hash((uint8_t *)&cred->cr_posix, + sizeof(struct posix_cred), + hash_key); + hash_key = kauth_cred_hash((uint8_t *)&cred->cr_audit, + sizeof(struct au_session), + hash_key); #if CONFIG_MACF if (pcred->cr_flags & CRF_MAC_ENFORCE) { - hash_key = kauth_cred_hash((uint8_t *)cred->cr_label, - sizeof (struct label), - hash_key); + hash_key = kauth_cred_hash((uint8_t *)cred->cr_label, + sizeof(struct label), + hash_key); } #endif - return(hash_key); -} - -#if KAUTH_CRED_HASH_DEBUG -/* - * kauth_cred_hash_print - * - * Description: Print out cred hash cache table information for debugging - * purposes, including the credential contents - * - * Parameters: (void) - * - * Returns: (void) - * - * Implicit returns: Results in console output - */ -static void -kauth_cred_hash_print(void) -{ - int i, j; - kauth_cred_t found_cred; - - printf("\n\t kauth credential hash table statistics - current cred count %d \n", kauth_cred_count); - /* count slot hits, misses, collisions, and max depth */ - for (i = 0; i < KAUTH_CRED_TABLE_SIZE; i++) { - printf("[%02d] ", i); - j = 0; - TAILQ_FOREACH(found_cred, &kauth_cred_table_anchor[i], cr_link) { - if (j > 0) { - printf("---- "); - } - j++; - kauth_cred_print(found_cred); - printf("\n"); - } - if (j == 0) { - printf("NOCRED \n"); - } - } + hash_key %= KAUTH_CRED_TABLE_SIZE; + return &kauth_cred_table_anchor[hash_key]; } -#endif /* KAUTH_CRED_HASH_DEBUG */ -#if (defined(KAUTH_CRED_HASH_DEBUG) && (KAUTH_CRED_HASH_DEBUG != 0)) || defined(DEBUG_CRED) +#ifdef DEBUG_CRED /* * kauth_cred_print * @@ -5492,112 +5398,134 @@ kauth_cred_hash_print(void) * Implicit returns: Results in console output */ void -kauth_cred_print(kauth_cred_t cred) +kauth_cred_print(kauth_cred_t cred) { - int i; + int i; printf("%p - refs %lu flags 0x%08x uids e%d r%d sv%d gm%d ", cred, cred->cr_ref, cred->cr_flags, cred->cr_uid, cred->cr_ruid, cred->cr_svuid, cred->cr_gmuid); printf("group count %d gids ", cred->cr_ngroups); for (i = 0; i < NGROUPS; i++) { - if (i == 0) + if (i == 0) { printf("e"); + } printf("%d ", cred->cr_groups[i]); } printf("r%d sv%d ", cred->cr_rgid, cred->cr_svgid); - printf("auditinfo_addr %d %d %d %d %d %d\n", - cred->cr_audit.s_aia_p->ai_auid, - cred->cr_audit.as_mask.am_success, - cred->cr_audit.as_mask.am_failure, - cred->cr_audit.as_aia_p->ai_termid.at_port, - cred->cr_audit.as_aia_p->ai_termid.at_addr[0], - cred->cr_audit.as_aia_p->ai_asid); + printf("auditinfo_addr %d %d %d %d %d %d\n", + cred->cr_audit.s_aia_p->ai_auid, + cred->cr_audit.as_mask.am_success, + cred->cr_audit.as_mask.am_failure, + cred->cr_audit.as_aia_p->ai_termid.at_port, + cred->cr_audit.as_aia_p->ai_termid.at_addr[0], + cred->cr_audit.as_aia_p->ai_asid); } -int is_target_cred( kauth_cred_t the_cred ) +int +is_target_cred( kauth_cred_t the_cred ) { - if ( the_cred->cr_uid != 0 ) - return( 0 ); - if ( the_cred->cr_ruid != 0 ) - return( 0 ); - if ( the_cred->cr_svuid != 0 ) - return( 0 ); - if ( the_cred->cr_ngroups != 11 ) - return( 0 ); - if ( the_cred->cr_groups[0] != 11 ) - return( 0 ); - if ( the_cred->cr_groups[1] != 81 ) - return( 0 ); - if ( the_cred->cr_groups[2] != 63947 ) - return( 0 ); - if ( the_cred->cr_groups[3] != 80288 ) - return( 0 ); - if ( the_cred->cr_groups[4] != 89006 ) - return( 0 ); - if ( the_cred->cr_groups[5] != 52173 ) - return( 0 ); - if ( the_cred->cr_groups[6] != 84524 ) - return( 0 ); - if ( the_cred->cr_groups[7] != 79 ) - return( 0 ); - if ( the_cred->cr_groups[8] != 80292 ) - return( 0 ); - if ( the_cred->cr_groups[9] != 80 ) - return( 0 ); - if ( the_cred->cr_groups[10] != 90824 ) - return( 0 ); - if ( the_cred->cr_rgid != 11 ) - return( 0 ); - if ( the_cred->cr_svgid != 11 ) - return( 0 ); - if ( the_cred->cr_gmuid != 3475 ) - return( 0 ); - if ( the_cred->cr_audit.as_aia_p->ai_auid != 3475 ) - return( 0 ); + if (the_cred->cr_uid != 0) { + return 0; + } + if (the_cred->cr_ruid != 0) { + return 0; + } + if (the_cred->cr_svuid != 0) { + return 0; + } + if (the_cred->cr_ngroups != 11) { + return 0; + } + if (the_cred->cr_groups[0] != 11) { + return 0; + } + if (the_cred->cr_groups[1] != 81) { + return 0; + } + if (the_cred->cr_groups[2] != 63947) { + return 0; + } + if (the_cred->cr_groups[3] != 80288) { + return 0; + } + if (the_cred->cr_groups[4] != 89006) { + return 0; + } + if (the_cred->cr_groups[5] != 52173) { + return 0; + } + if (the_cred->cr_groups[6] != 84524) { + return 0; + } + if (the_cred->cr_groups[7] != 79) { + return 0; + } + if (the_cred->cr_groups[8] != 80292) { + return 0; + } + if (the_cred->cr_groups[9] != 80) { + return 0; + } + if (the_cred->cr_groups[10] != 90824) { + return 0; + } + if (the_cred->cr_rgid != 11) { + return 0; + } + if (the_cred->cr_svgid != 11) { + return 0; + } + if (the_cred->cr_gmuid != 3475) { + return 0; + } + if (the_cred->cr_audit.as_aia_p->ai_auid != 3475) { + return 0; + } /* - if ( the_cred->cr_audit.as_mask.am_success != 0 ) - return( 0 ); - if ( the_cred->cr_audit.as_mask.am_failure != 0 ) - return( 0 ); - if ( the_cred->cr_audit.as_aia_p->ai_termid.at_port != 0 ) - return( 0 ); - if ( the_cred->cr_audit.as_aia_p->ai_termid.at_addr[0] != 0 ) - return( 0 ); - if ( the_cred->cr_audit.as_aia_p->ai_asid != 0 ) - return( 0 ); - if ( the_cred->cr_flags != 0 ) - return( 0 ); -*/ - return( -1 ); // found target cred + * if ( the_cred->cr_audit.as_mask.am_success != 0 ) + * return( 0 ); + * if ( the_cred->cr_audit.as_mask.am_failure != 0 ) + * return( 0 ); + * if ( the_cred->cr_audit.as_aia_p->ai_termid.at_port != 0 ) + * return( 0 ); + * if ( the_cred->cr_audit.as_aia_p->ai_termid.at_addr[0] != 0 ) + * return( 0 ); + * if ( the_cred->cr_audit.as_aia_p->ai_asid != 0 ) + * return( 0 ); + * if ( the_cred->cr_flags != 0 ) + * return( 0 ); + */ + return -1; // found target cred } -void get_backtrace( void ) +void +get_backtrace( void ) { - int my_slot; - void * my_stack[ MAX_STACK_DEPTH ]; - int i, my_depth; - - if ( cred_debug_buf_p == NULL ) { + int my_slot; + void * my_stack[MAX_STACK_DEPTH]; + int i, my_depth; + + if (cred_debug_buf_p == NULL) { MALLOC(cred_debug_buf_p, cred_debug_buffer *, sizeof(*cred_debug_buf_p), M_KAUTH, M_WAITOK); bzero(cred_debug_buf_p, sizeof(*cred_debug_buf_p)); - } + } - if ( cred_debug_buf_p->next_slot > (MAX_CRED_BUFFER_SLOTS - 1) ) { + if (cred_debug_buf_p->next_slot > (MAX_CRED_BUFFER_SLOTS - 1)) { /* buffer is full */ return; } - + my_depth = OSBacktrace(&my_stack[0], MAX_STACK_DEPTH); - if ( my_depth == 0 ) { + if (my_depth == 0) { printf("%s - OSBacktrace failed \n", __FUNCTION__); return; } - + /* fill new backtrace */ my_slot = cred_debug_buf_p->next_slot; cred_debug_buf_p->next_slot++; - cred_debug_buf_p->stack_buffer[ my_slot ].depth = my_depth; - for ( i = 0; i < my_depth; i++ ) { - cred_debug_buf_p->stack_buffer[ my_slot ].stack[ i ] = my_stack[ i ]; + cred_debug_buf_p->stack_buffer[my_slot].depth = my_depth; + for (i = 0; i < my_depth; i++) { + cred_debug_buf_p->stack_buffer[my_slot].stack[i] = my_stack[i]; } return; @@ -5606,42 +5534,43 @@ void get_backtrace( void ) /* subset of struct ucred for use in sysctl_dump_creds */ struct debug_ucred { - void *credp; - u_long cr_ref; /* reference count */ - uid_t cr_uid; /* effective user id */ - uid_t cr_ruid; /* real user id */ - uid_t cr_svuid; /* saved user id */ - short cr_ngroups; /* number of groups in advisory list */ - gid_t cr_groups[NGROUPS]; /* advisory group list */ - gid_t cr_rgid; /* real group id */ - gid_t cr_svgid; /* saved group id */ - uid_t cr_gmuid; /* UID for group membership purposes */ - struct auditinfo_addr cr_audit; /* user auditing data. */ - void *cr_label; /* MACF label */ - int cr_flags; /* flags on credential */ + void *credp; + u_long cr_ref; /* reference count */ + uid_t cr_uid; /* effective user id */ + uid_t cr_ruid; /* real user id */ + uid_t cr_svuid; /* saved user id */ + short cr_ngroups; /* number of groups in advisory list */ + gid_t cr_groups[NGROUPS]; /* advisory group list */ + gid_t cr_rgid; /* real group id */ + gid_t cr_svgid; /* saved group id */ + uid_t cr_gmuid; /* UID for group membership purposes */ + struct auditinfo_addr cr_audit; /* user auditing data. */ + void *cr_label; /* MACF label */ + int cr_flags; /* flags on credential */ }; typedef struct debug_ucred debug_ucred; SYSCTL_PROC(_kern, OID_AUTO, dump_creds, CTLFLAG_RD, NULL, 0, sysctl_dump_creds, "S,debug_ucred", "List of credentials in the cred hash"); -/* accessed by: +/* accessed by: * err = sysctlbyname( "kern.dump_creds", bufp, &len, NULL, 0 ); */ static int sysctl_dump_creds( __unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req ) { - int i, j, counter = 0; - int error; - size_t space; - kauth_cred_t found_cred; - debug_ucred * cred_listp; - debug_ucred * nextp; + int i, j, counter = 0; + int error; + size_t space; + kauth_cred_t found_cred; + debug_ucred * cred_listp; + debug_ucred * nextp; /* This is a readonly node. */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* calculate space needed */ for (i = 0; i < KAUTH_CRED_TABLE_SIZE; i++) { @@ -5658,10 +5587,10 @@ sysctl_dump_creds( __unused struct sysctl_oid *oidp, __unused void *arg1, __unus } MALLOC( cred_listp, debug_ucred *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); - if ( cred_listp == NULL ) { - return (ENOMEM); + if (cred_listp == NULL) { + return ENOMEM; } - + /* fill in creds to send back */ nextp = cred_listp; space = 0; @@ -5673,8 +5602,8 @@ sysctl_dump_creds( __unused struct sysctl_oid *oidp, __unused void *arg1, __unus nextp->cr_ruid = found_cred->cr_ruid; nextp->cr_svuid = found_cred->cr_svuid; nextp->cr_ngroups = found_cred->cr_ngroups; - for ( j = 0; j < nextp->cr_ngroups; j++ ) { - nextp->cr_groups[ j ] = found_cred->cr_groups[ j ]; + for (j = 0; j < nextp->cr_ngroups; j++) { + nextp->cr_groups[j] = found_cred->cr_groups[j]; } nextp->cr_rgid = found_cred->cr_rgid; nextp->cr_svgid = found_cred->cr_svgid; @@ -5705,46 +5634,47 @@ sysctl_dump_creds( __unused struct sysctl_oid *oidp, __unused void *arg1, __unus nextp->cr_flags = found_cred->cr_flags; nextp++; space += sizeof(debug_ucred); - if ( space > req->oldlen ) { + if (space > req->oldlen) { FREE(cred_listp, M_TEMP); - return (ENOMEM); + return ENOMEM; } } } req->oldlen = space; error = SYSCTL_OUT(req, cred_listp, req->oldlen); FREE(cred_listp, M_TEMP); - return (error); + return error; } SYSCTL_PROC(_kern, OID_AUTO, cred_bt, CTLFLAG_RD, NULL, 0, sysctl_dump_cred_backtraces, "S,cred_debug_buffer", "dump credential backtrace"); -/* accessed by: +/* accessed by: * err = sysctlbyname( "kern.cred_bt", bufp, &len, NULL, 0 ); */ static int sysctl_dump_cred_backtraces( __unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req ) { - int i, j; - int error; - size_t space; - cred_debug_buffer * bt_bufp; - cred_backtrace * nextp; + int i, j; + int error; + size_t space; + cred_debug_buffer * bt_bufp; + cred_backtrace * nextp; /* This is a readonly node. */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - if ( cred_debug_buf_p == NULL ) { - return (EAGAIN); + if (cred_debug_buf_p == NULL) { + return EAGAIN; } /* calculate space needed */ - space = sizeof( cred_debug_buf_p->next_slot ); - space += (sizeof( cred_backtrace ) * cred_debug_buf_p->next_slot); + space = sizeof(cred_debug_buf_p->next_slot); + space += (sizeof(cred_backtrace) * cred_debug_buf_p->next_slot); /* they are querying us so just return the space required. */ if (req->oldptr == USER_ADDR_NULL) { @@ -5752,24 +5682,24 @@ sysctl_dump_cred_backtraces( __unused struct sysctl_oid *oidp, __unused void *ar return 0; } - if ( space > req->oldlen ) { - return (ENOMEM); + if (space > req->oldlen) { + return ENOMEM; } MALLOC( bt_bufp, cred_debug_buffer *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); - if ( bt_bufp == NULL ) { - return (ENOMEM); + if (bt_bufp == NULL) { + return ENOMEM; } - + /* fill in backtrace info to send back */ bt_bufp->next_slot = cred_debug_buf_p->next_slot; space = sizeof(bt_bufp->next_slot); - - nextp = &bt_bufp->stack_buffer[ 0 ]; + + nextp = &bt_bufp->stack_buffer[0]; for (i = 0; i < cred_debug_buf_p->next_slot; i++) { - nextp->depth = cred_debug_buf_p->stack_buffer[ i ].depth; - for ( j = 0; j < nextp->depth; j++ ) { - nextp->stack[ j ] = cred_debug_buf_p->stack_buffer[ i ].stack[ j ]; + nextp->depth = cred_debug_buf_p->stack_buffer[i].depth; + for (j = 0; j < nextp->depth; j++) { + nextp->stack[j] = cred_debug_buf_p->stack_buffer[i].stack[j]; } space += sizeof(*nextp); nextp++; @@ -5777,10 +5707,10 @@ sysctl_dump_cred_backtraces( __unused struct sysctl_oid *oidp, __unused void *ar req->oldlen = space; error = SYSCTL_OUT(req, bt_bufp, req->oldlen); FREE(bt_bufp, M_TEMP); - return (error); + return error; } -#endif /* KAUTH_CRED_HASH_DEBUG || DEBUG_CRED */ +#endif /* DEBUG_CRED */ /* @@ -5874,7 +5804,7 @@ posix_cred_create(posix_cred_t pcred) posix_cred_t posix_cred_get(kauth_cred_t cred) { - return(&cred->cr_posix); + return &cred->cr_posix; } @@ -5902,7 +5832,7 @@ posix_cred_get(kauth_cred_t cred) void posix_cred_label(kauth_cred_t cred, posix_cred_t pcred) { - cred->cr_posix = *pcred; /* structure assign for now */ + cred->cr_posix = *pcred; /* structure assign for now */ } @@ -5936,8 +5866,9 @@ posix_cred_access(kauth_cred_t cred, id_t object_uid, id_t object_gid, mode_t ob /* * Check first for owner rights */ - if (kauth_cred_getuid(cred) == object_uid && (mode_req & mode_owner) == mode_req) - return (0); + if (kauth_cred_getuid(cred) == object_uid && (mode_req & mode_owner) == mode_req) { + return 0; + } /* * Combined group and world rights check, if we don't have owner rights @@ -5947,7 +5878,7 @@ posix_cred_access(kauth_cred_t cred, id_t object_uid, id_t object_gid, mode_t ob * world rights, avoiding a group membership check, which is expensive. */ if ((mode_req & mode_group & mode_world) == mode_req) { - return (0); + return 0; } else { /* * NON-OPTIMIZED: requires group membership check. @@ -5962,18 +5893,18 @@ posix_cred_access(kauth_cred_t cred, id_t object_uid, id_t object_gid, mode_t ob /* * DENY: +group denies */ - return (EACCES); + return EACCES; } else { if ((mode_req & mode_world) != mode_req) { /* * DENY: both -group & world would deny */ - return (EACCES); + return EACCES; } else { /* * ALLOW: allowed by -group and +world */ - return (0); + return 0; } } } else { @@ -5987,18 +5918,18 @@ posix_cred_access(kauth_cred_t cred, id_t object_uid, id_t object_gid, mode_t ob /* * ALLOW: allowed by +group */ - return (0); + return 0; } else { if ((mode_req & mode_world) != mode_req) { /* * DENY: both -group & world would deny */ - return (EACCES); + return EACCES; } else { /* * ALLOW: allowed by -group and +world */ - return (0); + return 0; } } } diff --git a/bsd/kern/kern_cs.c b/bsd/kern/kern_cs.c index 2b40cea3e..c6ab1e5bf 100644 --- a/bsd/kern/kern_cs.c +++ b/bsd/kern/kern_cs.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -87,23 +87,23 @@ unsigned int cs_debug_unsigned_mmap_failures = 0; #if SECURE_KERNEL /* -Here we split cs_enforcement_enable into cs_system_enforcement_enable and cs_process_enforcement_enable - -cs_system_enforcement_enable governs whether or not system level code signing enforcement mechanisms -are applied on the system. Today, the only such mechanism is code signing enforcement of the dyld shared -cache. - -cs_process_enforcement_enable governs whether code signing enforcement mechanisms are applied to all -processes or only those that opt into such enforcement. - -(On iOS and related, both of these are set by default. On macOS, only cs_system_enforcement_enable -is set by default. Processes can then be opted into code signing enforcement on a case by case basis.) + * Here we split cs_enforcement_enable into cs_system_enforcement_enable and cs_process_enforcement_enable + * + * cs_system_enforcement_enable governs whether or not system level code signing enforcement mechanisms + * are applied on the system. Today, the only such mechanism is code signing enforcement of the dyld shared + * cache. + * + * cs_process_enforcement_enable governs whether code signing enforcement mechanisms are applied to all + * processes or only those that opt into such enforcement. + * + * (On iOS and related, both of these are set by default. On macOS, only cs_system_enforcement_enable + * is set by default. Processes can then be opted into code signing enforcement on a case by case basis.) */ const int cs_system_enforcement_enable = 1; const int cs_process_enforcement_enable = 1; const int cs_library_val_enable = 1; #else /* !SECURE_KERNEL */ -int cs_enforcement_panic=0; +int cs_enforcement_panic = 0; int cs_relax_platform_task_ports = 0; #if CONFIG_ENFORCE_SIGNED_CODE @@ -132,11 +132,11 @@ SYSCTL_INT(_vm, OID_AUTO, cs_force_kill, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_force_ SYSCTL_INT(_vm, OID_AUTO, cs_force_hard, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_force_hard, 0, ""); SYSCTL_INT(_vm, OID_AUTO, cs_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_debug, 0, ""); SYSCTL_INT(_vm, OID_AUTO, cs_debug_fail_on_unsigned_code, CTLFLAG_RW | CTLFLAG_LOCKED, - &cs_debug_fail_on_unsigned_code, 0, ""); + &cs_debug_fail_on_unsigned_code, 0, ""); SYSCTL_UINT(_vm, OID_AUTO, cs_debug_unsigned_exec_failures, CTLFLAG_RD | CTLFLAG_LOCKED, - &cs_debug_unsigned_exec_failures, 0, ""); + &cs_debug_unsigned_exec_failures, 0, ""); SYSCTL_UINT(_vm, OID_AUTO, cs_debug_unsigned_mmap_failures, CTLFLAG_RD | CTLFLAG_LOCKED, - &cs_debug_unsigned_mmap_failures, 0, ""); + &cs_debug_unsigned_mmap_failures, 0, ""); SYSCTL_INT(_vm, OID_AUTO, cs_all_vnodes, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_all_vnodes, 0, ""); @@ -161,11 +161,11 @@ cs_init(void) #endif /* watchos || x86_64 */ #endif /* MACH_ASSERT */ PE_parse_boot_argn("panic_on_cs_killed", &panic_on_cs_killed, - sizeof (panic_on_cs_killed)); + sizeof(panic_on_cs_killed)); #if !SECURE_KERNEL int disable_cs_enforcement = 0; - PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, - sizeof (disable_cs_enforcement)); + PE_parse_boot_argn("cs_enforcement_disable", &disable_cs_enforcement, + sizeof(disable_cs_enforcement)); if (disable_cs_enforcement && PE_i_can_has_debugger(NULL) != 0) { cs_system_enforcement_enable = 0; cs_process_enforcement_enable = 0; @@ -176,14 +176,14 @@ cs_init(void) } PE_parse_boot_argn("cs_relax_platform_task_ports", - &cs_relax_platform_task_ports, - sizeof(cs_relax_platform_task_ports)); + &cs_relax_platform_task_ports, + sizeof(cs_relax_platform_task_ports)); - PE_parse_boot_argn("cs_debug", &cs_debug, sizeof (cs_debug)); + PE_parse_boot_argn("cs_debug", &cs_debug, sizeof(cs_debug)); #if !CONFIG_ENFORCE_LIBRARY_VALIDATION PE_parse_boot_argn("cs_library_val_enable", &cs_library_val_enable, - sizeof (cs_library_val_enable)); + sizeof(cs_library_val_enable)); #endif #endif /* !SECURE_KERNEL */ @@ -200,28 +200,30 @@ cs_allow_invalid(struct proc *p) #endif #if CONFIG_MACF /* There needs to be a MAC policy to implement this hook, or else the - * kill bits will be cleared here every time. If we have + * kill bits will be cleared here every time. If we have * CONFIG_ENFORCE_SIGNED_CODE, we can assume there is a policy - * implementing the hook. + * implementing the hook. */ - if( 0 != mac_proc_check_run_cs_invalid(p)) { - if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() " - "not allowed: pid %d\n", - p->p_pid); + if (0 != mac_proc_check_run_cs_invalid(p)) { + if (cs_debug) { + printf("CODE SIGNING: cs_allow_invalid() " + "not allowed: pid %d\n", + p->p_pid); + } return 0; } - if(cs_debug) printf("CODE SIGNING: cs_allow_invalid() " - "allowed: pid %d\n", - p->p_pid); + if (cs_debug) { + printf("CODE SIGNING: cs_allow_invalid() " + "allowed: pid %d\n", + p->p_pid); + } proc_lock(p); p->p_csflags &= ~(CS_KILL | CS_HARD); - if (p->p_csflags & CS_VALID) - { + if (p->p_csflags & CS_VALID) { p->p_csflags |= CS_DEBUGGED; } - proc_unlock(p); - + vm_map_switch_protect(get_task_map(p->task), FALSE); #endif return (p->p_csflags & (CS_KILL | CS_HARD)) == 0; @@ -230,23 +232,26 @@ cs_allow_invalid(struct proc *p) int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) { - struct proc *p; - int send_kill = 0, retval = 0, verbose = cs_debug; - uint32_t csflags; + struct proc *p; + int send_kill = 0, retval = 0, verbose = cs_debug; + uint32_t csflags; p = current_proc(); - if (verbose) + if (verbose) { printf("CODE SIGNING: cs_invalid_page(0x%llx): p=%d[%s]\n", vaddr, p->p_pid, p->p_comm); + } proc_lock(p); /* XXX for testing */ - if (cs_force_kill) + if (cs_force_kill) { p->p_csflags |= CS_KILL; - if (cs_force_hard) + } + if (cs_force_hard) { p->p_csflags |= CS_HARD; + } /* CS_KILL triggers a kill signal, and no you can't have the page. Nothing else. */ if (p->p_csflags & CS_KILL) { @@ -255,7 +260,7 @@ cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) send_kill = 1; retval = 1; } - + /* CS_HARD means fail the mapping operation so the process stays valid. */ if (p->p_csflags & CS_HARD) { retval = 1; @@ -269,12 +274,13 @@ cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) csflags = p->p_csflags; proc_unlock(p); - if (verbose) + if (verbose) { printf("CODE SIGNING: cs_invalid_page(0x%llx): " - "p=%d[%s] final status 0x%x, %s page%s\n", - vaddr, p->p_pid, p->p_comm, p->p_csflags, - retval ? "denying" : "allowing (remove VALID)", - send_kill ? " sending SIGKILL" : ""); + "p=%d[%s] final status 0x%x, %s page%s\n", + vaddr, p->p_pid, p->p_comm, p->p_csflags, + retval ? "denying" : "allowing (remove VALID)", + send_kill ? " sending SIGKILL" : ""); + } if (send_kill) { /* We will set the exit reason for the thread later */ @@ -297,15 +303,17 @@ cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed) int cs_process_enforcement(struct proc *p) { - - if (cs_process_enforcement_enable) + if (cs_process_enforcement_enable) { return 1; - - if (p == NULL) + } + + if (p == NULL) { p = current_proc(); + } - if (p != NULL && (p->p_csflags & CS_ENFORCEMENT)) + if (p != NULL && (p->p_csflags & CS_ENFORCEMENT)) { return 1; + } return 0; } @@ -328,32 +336,35 @@ cs_system_enforcement(void) int cs_valid(struct proc *p) { - - if (p == NULL) + if (p == NULL) { p = current_proc(); + } - if (p != NULL && (p->p_csflags & CS_VALID)) + if (p != NULL && (p->p_csflags & CS_VALID)) { return 1; + } return 0; } /* - * Library validation functions + * Library validation functions */ int cs_require_lv(struct proc *p) { - - if (cs_library_val_enable) + if (cs_library_val_enable) { return 1; + } - if (p == NULL) + if (p == NULL) { p = current_proc(); - - if (p != NULL && (p->p_csflags & CS_REQUIRE_LV)) + } + + if (p != NULL && (p->p_csflags & CS_REQUIRE_LV)) { return 1; - + } + return 0; } @@ -384,36 +395,36 @@ cs_system_require_lv(void) * * Description: This function returns the base offset into the (possibly universal) binary * for a given blob. -*/ + */ off_t csblob_get_base_offset(struct cs_blob *blob) { - return blob->csb_base_offset; + return blob->csb_base_offset; } /* * Function: csblob_get_size * * Description: This function returns the size of a given blob. -*/ + */ vm_size_t csblob_get_size(struct cs_blob *blob) { - return blob->csb_mem_size; + return blob->csb_mem_size; } /* * Function: csblob_get_addr * * Description: This function returns the address of a given blob. -*/ + */ vm_address_t csblob_get_addr(struct cs_blob *blob) { - return blob->csb_mem_kaddr; + return blob->csb_mem_kaddr; } /* @@ -421,38 +432,39 @@ csblob_get_addr(struct cs_blob *blob) * * Description: This function returns true if the binary is * in the trust cache. -*/ + */ int csblob_get_platform_binary(struct cs_blob *blob) { - if (blob && blob->csb_platform_binary) - return 1; - return 0; + if (blob && blob->csb_platform_binary) { + return 1; + } + return 0; } /* * Function: csblob_get_flags * * Description: This function returns the flags for a given blob -*/ + */ unsigned int csblob_get_flags(struct cs_blob *blob) { - return blob->csb_flags; + return blob->csb_flags; } /* * Function: csblob_get_hashtype * * Description: This function returns the hash type for a given blob -*/ + */ uint8_t csblob_get_hashtype(struct cs_blob const * const blob) { - return blob->csb_hashtype != NULL ? cs_hash_type(blob->csb_hashtype) : 0; + return blob->csb_hashtype != NULL ? cs_hash_type(blob->csb_hashtype) : 0; } /* @@ -464,11 +476,13 @@ csblob_get_hashtype(struct cs_blob const * const blob) struct cs_blob * csproc_get_blob(struct proc *p) { - if (NULL == p) + if (NULL == p) { return NULL; + } - if (NULL == p->p_textvp) + if (NULL == p->p_textvp) { return NULL; + } if ((p->p_csflags & CS_SIGNED) == 0) { return NULL; @@ -494,7 +508,7 @@ csvnode_get_blob(struct vnode *vp, off_t offset) * * Description: This function returns a pointer to the * team id of csblob -*/ + */ const char * csblob_get_teamid(struct cs_blob *csblob) { @@ -513,11 +527,13 @@ csblob_get_identity(struct cs_blob *csblob) const CS_CodeDirectory *cd; cd = (const CS_CodeDirectory *)csblob_find_blob(csblob, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY); - if (cd == NULL) + if (cd == NULL) { return NULL; + } - if (cd->identOffset == 0) + if (cd->identOffset == 0) { return NULL; + } return ((const char *)cd) + ntohl(cd->identOffset); } @@ -549,72 +565,80 @@ csblob_get_signer_type(struct cs_blob *csblob) void * csblob_entitlements_dictionary_copy(struct cs_blob *csblob) { - if (!csblob->csb_entitlements) return NULL; - osobject_retain(csblob->csb_entitlements); - return csblob->csb_entitlements; + if (!csblob->csb_entitlements) { + return NULL; + } + osobject_retain(csblob->csb_entitlements); + return csblob->csb_entitlements; } void csblob_entitlements_dictionary_set(struct cs_blob *csblob, void * entitlements) { - assert(csblob->csb_entitlements == NULL); - if (entitlements) osobject_retain(entitlements); - csblob->csb_entitlements = entitlements; + assert(csblob->csb_entitlements == NULL); + if (entitlements) { + osobject_retain(entitlements); + } + csblob->csb_entitlements = entitlements; } /* - * Function: csproc_get_teamid + * Function: csproc_get_teamid * * Description: This function returns a pointer to the * team id of the process p -*/ + */ const char * csproc_get_teamid(struct proc *p) { struct cs_blob *csblob; csblob = csproc_get_blob(p); - if (csblob == NULL) - return NULL; + if (csblob == NULL) { + return NULL; + } return csblob_get_teamid(csblob); } /* - * Function: csproc_get_signer_type + * Function: csproc_get_signer_type * * Description: This function returns the signer type * of the process p -*/ + */ unsigned int csproc_get_signer_type(struct proc *p) { struct cs_blob *csblob; csblob = csproc_get_blob(p); - if (csblob == NULL) - return CS_SIGNER_TYPE_UNKNOWN; + if (csblob == NULL) { + return CS_SIGNER_TYPE_UNKNOWN; + } return csblob_get_signer_type(csblob); } /* - * Function: csvnode_get_teamid + * Function: csvnode_get_teamid * * Description: This function returns a pointer to the * team id of the binary at the given offset in vnode vp -*/ + */ const char * csvnode_get_teamid(struct vnode *vp, off_t offset) { struct cs_blob *csblob; - if (vp == NULL) + if (vp == NULL) { return NULL; + } csblob = ubc_cs_blob_get(vp, -1, offset); - if (csblob == NULL) - return NULL; + if (csblob == NULL) { + return NULL; + } return csblob_get_teamid(csblob); } @@ -633,7 +657,7 @@ csproc_get_platform_binary(struct proc *p) csblob = csproc_get_blob(p); /* If there is no csblob this returns 0 because - it is true that it is not a platform binary */ + * it is true that it is not a platform binary */ return (csblob == NULL) ? 0 : csblob->csb_platform_binary; } @@ -642,7 +666,7 @@ csproc_get_platform_path(struct proc *p) { struct cs_blob *csblob; - csblob = csproc_get_blob(p); + csblob = csproc_get_blob(p); return (csblob == NULL) ? 0 : csblob->csb_platform_path; } @@ -711,8 +735,9 @@ csproc_check_invalid_allowed(struct proc* __unused p) p = current_proc(); } - if (p != NULL && (p->p_csflags & CS_INVALID_ALLOWED)) + if (p != NULL && (p->p_csflags & CS_INVALID_ALLOWED)) { return 1; + } #endif return 0; } @@ -727,44 +752,49 @@ csproc_check_invalid_allowed(struct proc* __unused p) int csproc_get_prod_signed(struct proc *p) { - return ((p->p_csflags & CS_DEV_CODE) == 0); + return (p->p_csflags & CS_DEV_CODE) == 0; } /* * Function: csfg_get_platform_binary * - * Description: This function returns the - * platform binary field for the - * fileglob fg + * Description: This function returns the + * platform binary field for the + * fileglob fg */ -int +int csfg_get_platform_binary(struct fileglob *fg) { int platform_binary = 0; struct ubc_info *uip; vnode_t vp; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return 0; - + } + vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return 0; + } vnode_lock(vp); - if (!UBCINFOEXISTS(vp)) + if (!UBCINFOEXISTS(vp)) { goto out; - + } + uip = vp->v_ubcinfo; - if (uip == NULL) + if (uip == NULL) { goto out; - - if (uip->cs_blobs == NULL) + } + + if (uip->cs_blobs == NULL) { goto out; + } /* It is OK to extract the teamid from the first blob - because all blobs of a vnode must have the same teamid */ + * because all blobs of a vnode must have the same teamid */ platform_binary = uip->cs_blobs->csb_platform_binary; out: vnode_unlock(vp); @@ -777,19 +807,23 @@ csfg_get_cdhash(struct fileglob *fg, uint64_t offset, size_t *cdhash_size) { vnode_t vp; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return NULL; + } vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return NULL; + } struct cs_blob *csblob = NULL; - if ((csblob = ubc_cs_blob_get(vp, -1, offset)) == NULL) + if ((csblob = ubc_cs_blob_get(vp, -1, offset)) == NULL) { return NULL; + } - if (cdhash_size) + if (cdhash_size) { *cdhash_size = CS_CDHASH_LEN; + } return csblob->csb_cdhash; } @@ -798,7 +832,7 @@ csfg_get_cdhash(struct fileglob *fg, uint64_t offset, size_t *cdhash_size) * Function: csfg_get_signer_type * * Description: This returns the signer type - * for the fileglob fg + * for the fileglob fg */ unsigned int csfg_get_signer_type(struct fileglob *fg) @@ -807,26 +841,31 @@ csfg_get_signer_type(struct fileglob *fg) unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN; vnode_t vp; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return CS_SIGNER_TYPE_UNKNOWN; - + } + vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return CS_SIGNER_TYPE_UNKNOWN; + } vnode_lock(vp); - if (!UBCINFOEXISTS(vp)) + if (!UBCINFOEXISTS(vp)) { goto out; - + } + uip = vp->v_ubcinfo; - if (uip == NULL) + if (uip == NULL) { goto out; - - if (uip->cs_blobs == NULL) + } + + if (uip->cs_blobs == NULL) { goto out; + } /* It is OK to extract the signer type from the first blob, - because all blobs of a vnode must have the same signer type. */ + * because all blobs of a vnode must have the same signer type. */ signer_type = uip->cs_blobs->csb_signer_type; out: vnode_unlock(vp); @@ -838,7 +877,7 @@ out: * Function: csfg_get_teamid * * Description: This returns a pointer to - * the teamid for the fileglob fg + * the teamid for the fileglob fg */ const char * csfg_get_teamid(struct fileglob *fg) @@ -847,26 +886,31 @@ csfg_get_teamid(struct fileglob *fg) const char *str = NULL; vnode_t vp; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return NULL; - + } + vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return NULL; + } vnode_lock(vp); - if (!UBCINFOEXISTS(vp)) + if (!UBCINFOEXISTS(vp)) { goto out; - + } + uip = vp->v_ubcinfo; - if (uip == NULL) + if (uip == NULL) { goto out; - - if (uip->cs_blobs == NULL) + } + + if (uip->cs_blobs == NULL) { goto out; + } /* It is OK to extract the teamid from the first blob - because all blobs of a vnode must have the same teamid */ + * because all blobs of a vnode must have the same teamid */ str = uip->cs_blobs->csb_teamid; out: vnode_unlock(vp); @@ -888,26 +932,31 @@ csfg_get_prod_signed(struct fileglob *fg) vnode_t vp; int prod_signed = 0; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return 0; - + } + vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return 0; + } vnode_lock(vp); - if (!UBCINFOEXISTS(vp)) + if (!UBCINFOEXISTS(vp)) { goto out; - + } + uip = vp->v_ubcinfo; - if (uip == NULL) + if (uip == NULL) { goto out; - - if (uip->cs_blobs == NULL) + } + + if (uip->cs_blobs == NULL) { goto out; + } /* It is OK to extract the flag from the first blob - because all blobs of a vnode must have the same cs_flags */ + * because all blobs of a vnode must have the same cs_flags */ prod_signed = (uip->cs_blobs->csb_flags & CS_DEV_CODE) == 0; out: vnode_unlock(vp); @@ -927,16 +976,19 @@ csfg_get_identity(struct fileglob *fg, off_t offset) vnode_t vp; struct cs_blob *csblob = NULL; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return NULL; + } vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return NULL; + } csblob = ubc_cs_blob_get(vp, -1, offset); - if (csblob == NULL) + if (csblob == NULL) { return NULL; + } return csblob_get_identity(csblob); } @@ -953,12 +1005,14 @@ csfg_get_platform_identifier(struct fileglob *fg, off_t offset) { vnode_t vp; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return 0; + } vp = (struct vnode *)fg->fg_data; - if (vp == NULL) + if (vp == NULL) { return 0; + } return csvnode_get_platform_identifier(vp, offset); } @@ -977,12 +1031,14 @@ csvnode_get_platform_identifier(struct vnode *vp, off_t offset) const CS_CodeDirectory *code_dir; csblob = ubc_cs_blob_get(vp, -1, offset); - if (csblob == NULL) + if (csblob == NULL) { return 0; + } code_dir = csblob->csb_cd; - if (code_dir == NULL || ntohl(code_dir->length) < 8) + if (code_dir == NULL || ntohl(code_dir->length) < 8) { return 0; + } return code_dir->platform; } @@ -997,8 +1053,9 @@ csvnode_get_platform_identifier(struct vnode *vp, off_t offset) uint8_t csproc_get_platform_identifier(struct proc *p) { - if (NULL == p->p_textvp) + if (NULL == p->p_textvp) { return 0; + } return csvnode_get_platform_identifier(p->p_textvp, p->p_textoff); } @@ -1006,7 +1063,7 @@ csproc_get_platform_identifier(struct proc *p) uint32_t cs_entitlement_flags(struct proc *p) { - return (p->p_csflags & CS_ENTITLEMENT_FLAGS); + return p->p_csflags & CS_ENTITLEMENT_FLAGS; } int @@ -1035,13 +1092,14 @@ csfg_get_path(struct fileglob *fg, char *path, int *len) { vnode_t vp = NULL; - if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) + if (FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { return -1; - + } + vp = (struct vnode *)fg->fg_data; /* vn_getpath returns 0 for success, - or an error code */ + * or an error code */ return vn_getpath(vp, path, len); } @@ -1068,11 +1126,13 @@ cs_entitlements_blob_get(proc_t p, void **out_start, size_t *out_length) return 0; } - if (NULL == p->p_textvp) + if (NULL == p->p_textvp) { return EINVAL; + } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { return 0; + } return csblob_get_entitlements(csblob, out_start, out_length); } @@ -1092,11 +1152,13 @@ cs_identity_get(proc_t p) return NULL; } - if (NULL == p->p_textvp) + if (NULL == p->p_textvp) { return NULL; + } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { return NULL; + } return csblob_get_identity(csblob); } @@ -1118,11 +1180,13 @@ cs_blob_get(proc_t p, void **out_start, size_t *out_length) *out_start = NULL; *out_length = 0; - if (NULL == p->p_textvp) + if (NULL == p->p_textvp) { return EINVAL; + } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { return 0; + } *out_start = (void *)csblob->csb_mem_kaddr; *out_length = csblob->csb_mem_size; @@ -1143,11 +1207,13 @@ cs_get_cdhash(struct proc *p) return NULL; } - if (NULL == p->p_textvp) + if (NULL == p->p_textvp) { return NULL; + } - if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) + if ((csblob = ubc_cs_blob_get(p->p_textvp, -1, p->p_textoff)) == NULL) { return NULL; + } return csblob->csb_cdhash; } diff --git a/bsd/kern/kern_csr.c b/bsd/kern/kern_csr.c index 15a5ede70..6423d23ca 100644 --- a/bsd/kern/kern_csr.c +++ b/bsd/kern/kern_csr.c @@ -63,8 +63,9 @@ int csr_check(csr_config_t mask) { boot_args *args = (boot_args *)PE_state.bootArgs; - if (mask & CSR_ALLOW_DEVICE_CONFIGURATION) + if (mask & CSR_ALLOW_DEVICE_CONFIGURATION) { return (args->flags & kBootArgsFlagCSRConfigMode) ? 0 : EPERM; + } csr_config_t config; int ret = csr_get_active_config(&config); @@ -77,14 +78,16 @@ csr_check(csr_config_t mask) // CSR_ALLOW_UNTRUSTED_KEXTS as a proxy for "SIP is disabled" on the // grounds that you can do the same damage with a kernel debugger as // you can with an untrusted kext. - if ((config & (CSR_ALLOW_UNTRUSTED_KEXTS|CSR_ALLOW_APPLE_INTERNAL)) != 0) + if ((config & (CSR_ALLOW_UNTRUSTED_KEXTS | CSR_ALLOW_APPLE_INTERNAL)) != 0) { config |= CSR_ALLOW_KERNEL_DEBUGGER; + } ret = ((config & mask) == mask) ? 0 : EPERM; if (ret == EPERM) { // Override the return value if booted from the BaseSystem and the mask does not contain any flag that should always be enforced. - if (csr_allow_all && (mask & CSR_ALWAYS_ENFORCED_FLAGS) == 0) + if (csr_allow_all && (mask & CSR_ALWAYS_ENFORCED_FLAGS) == 0) { ret = 0; + } } return ret; @@ -104,12 +107,14 @@ syscall_csr_check(struct csrctl_args *args) csr_config_t mask = 0; int error = 0; - if (args->useraddr == 0 || args->usersize != sizeof(mask)) + if (args->useraddr == 0 || args->usersize != sizeof(mask)) { return EINVAL; + } error = copyin(args->useraddr, &mask, sizeof(mask)); - if (error) + if (error) { return error; + } return csr_check(mask); } @@ -120,12 +125,14 @@ syscall_csr_get_active_config(struct csrctl_args *args) csr_config_t config = 0; int error = 0; - if (args->useraddr == 0 || args->usersize != sizeof(config)) + if (args->useraddr == 0 || args->usersize != sizeof(config)) { return EINVAL; + } error = csr_get_active_config(&config); - if (error) + if (error) { return error; + } return copyout(&config, args->useraddr, sizeof(config)); } @@ -138,11 +145,11 @@ int csrctl(__unused proc_t p, struct csrctl_args *args, __unused int32_t *retval) { switch (args->op) { - case CSR_SYSCALL_CHECK: - return syscall_csr_check(args); - case CSR_SYSCALL_GET_ACTIVE_CONFIG: - return syscall_csr_get_active_config(args); - default: - return ENOSYS; + case CSR_SYSCALL_CHECK: + return syscall_csr_check(args); + case CSR_SYSCALL_GET_ACTIVE_CONFIG: + return syscall_csr_get_active_config(args); + default: + return ENOSYS; } } diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index d906cf440..9d68de20e 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -154,7 +154,7 @@ static void _fdrelse(struct proc * p, int fd); extern void file_lock_init(void); -extern kauth_scope_t kauth_scope_fileop; +extern kauth_scope_t kauth_scope_fileop; /* Conflict wait queue for when selects collide (opaque type) */ extern struct waitq select_conflict_queue; @@ -179,15 +179,15 @@ extern struct waitq select_conflict_queue; #define f_offset f_fglob->fg_offset #define f_data f_fglob->fg_data #define CHECK_ADD_OVERFLOW_INT64L(x, y) \ - (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \ - (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \ - ? 1 : 0) + (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \ + (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \ + ? 1 : 0) /* * Descriptor management. */ -struct fmsglist fmsghead; /* head of list of open files */ -struct fmsglist fmsg_ithead; /* head of list of open files */ -int nfiles; /* actual number of open files */ +struct fmsglist fmsghead; /* head of list of open files */ +struct fmsglist fmsg_ithead; /* head of list of open files */ +int nfiles; /* actual number of open files */ lck_grp_attr_t * file_lck_grp_attr; @@ -205,9 +205,9 @@ lck_mtx_t * uipc_lock; * Parameters: fl Flock structure. * cur_file_offset Current offset in the file. * - * Returns: 0 on Success. + * Returns: 0 on Success. * EOVERFLOW on overflow. - * EINVAL on offset less than zero. + * EINVAL on offset less than zero. */ static int @@ -228,12 +228,12 @@ check_file_seek_range(struct flock *fl, off_t cur_file_offset) } /* Check if end marker is beyond LLONG_MAX. */ if ((fl->l_len > 0) && (CHECK_ADD_OVERFLOW_INT64L(fl->l_start + - cur_file_offset, fl->l_len - 1))) { + cur_file_offset, fl->l_len - 1))) { return EOVERFLOW; } /* Check if the end marker is negative. */ if ((fl->l_len <= 0) && (fl->l_start + cur_file_offset + - fl->l_len < 0)) { + fl->l_len < 0)) { return EINVAL; } } else if (fl->l_whence == SEEK_SET) { @@ -247,7 +247,7 @@ check_file_seek_range(struct flock *fl, off_t cur_file_offset) return EOVERFLOW; } /* Check if the end marker is negative. */ - if ((fl->l_len < 0) && fl->l_start + fl->l_len < 0) { + if ((fl->l_len < 0) && fl->l_start + fl->l_len < 0) { return EINVAL; } } @@ -270,9 +270,9 @@ void file_lock_init(void) { /* allocate file lock group attribute and group */ - file_lck_grp_attr= lck_grp_attr_alloc_init(); + file_lck_grp_attr = lck_grp_attr_alloc_init(); - file_lck_grp = lck_grp_alloc_init("file", file_lck_grp_attr); + file_lck_grp = lck_grp_alloc_init("file", file_lck_grp_attr); /* Allocate file lock attribute */ file_lck_attr = lck_attr_alloc_init(); @@ -354,7 +354,7 @@ getdtablesize(proc_t p, __unused struct getdtablesize_args *uap, int32_t *retval *retval = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); proc_fdunlock(p); - return (0); + return 0; } @@ -362,21 +362,22 @@ void procfdtbl_reservefd(struct proc * p, int fd) { p->p_fd->fd_ofiles[fd] = NULL; - p->p_fd->fd_ofileflags[fd] |= UF_RESERVED; + p->p_fd->fd_ofileflags[fd] |= UF_RESERVED; } void procfdtbl_markclosefd(struct proc * p, int fd) { - p->p_fd->fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING); + p->p_fd->fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING); } void procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp) { - if (fp != NULL) - p->p_fd->fd_ofiles[fd] = fp; - p->p_fd->fd_ofileflags[fd] &= ~UF_RESERVED; + if (fp != NULL) { + p->p_fd->fd_ofiles[fd] = fp; + } + p->p_fd->fd_ofileflags[fd] &= ~UF_RESERVED; if ((p->p_fd->fd_ofileflags[fd] & UF_RESVWAIT) == UF_RESVWAIT) { p->p_fd->fd_ofileflags[fd] &= ~UF_RESVWAIT; wakeup(&p->p_fd); @@ -386,7 +387,7 @@ procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp) void procfdtbl_waitfd(struct proc * p, int fd) { - p->p_fd->fd_ofileflags[fd] |= UF_RESVWAIT; + p->p_fd->fd_ofileflags[fd] |= UF_RESVWAIT; msleep(&p->p_fd, &p->p_fdmlock, PRIBIO, "ftbl_waitfd", NULL); } @@ -399,7 +400,7 @@ procfdtbl_clearfd(struct proc * p, int fd) waiting = (p->p_fd->fd_ofileflags[fd] & UF_RESVWAIT); p->p_fd->fd_ofiles[fd] = NULL; p->p_fd->fd_ofileflags[fd] = 0; - if ( waiting == UF_RESVWAIT) { + if (waiting == UF_RESVWAIT) { wakeup(&p->p_fd); } } @@ -424,19 +425,22 @@ _fdrelse(struct proc * p, int fd) struct filedesc *fdp = p->p_fd; int nfd = 0; - if (fd < fdp->fd_freefile) + if (fd < fdp->fd_freefile) { fdp->fd_freefile = fd; + } #if DIAGNOSTIC - if (fd > fdp->fd_lastfile) - panic("fdrelse: fd_lastfile inconsistent"); + if (fd > fdp->fd_lastfile) { + panic("fdrelse: fd_lastfile inconsistent"); + } #endif procfdtbl_clearfd(p, fd); while ((nfd = fdp->fd_lastfile) > 0 && - fdp->fd_ofiles[nfd] == NULL && - !(fdp->fd_ofileflags[nfd] & UF_RESERVED)) + fdp->fd_ofiles[nfd] == NULL && + !(fdp->fd_ofileflags[nfd] & UF_RESERVED)) { /* JMM - What about files with lingering EV_VANISHED knotes? */ fdp->fd_lastfile--; + } } @@ -447,72 +451,78 @@ fd_rdwr( uint64_t base, int64_t len, enum uio_seg segflg, - off_t offset, - int io_flg, + off_t offset, + int io_flg, int64_t *aresid) { - struct fileproc *fp; - proc_t p; - int error = 0; + struct fileproc *fp; + proc_t p; + int error = 0; int flags = 0; int spacetype; uio_t auio = NULL; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; struct vfs_context context = *(vfs_context_current()); bool wrote_some = false; p = current_proc(); - error = fp_lookup(p, fd, &fp, 0); - if (error) - return(error); + error = fp_lookup(p, fd, &fp, 0); + if (error) { + return error; + } if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_PIPE && fp->f_type != DTYPE_SOCKET) { error = EINVAL; goto out; } if (rw == UIO_WRITE && !(fp->f_flag & FWRITE)) { - error = EBADF; + error = EBADF; goto out; } if (rw == UIO_READ && !(fp->f_flag & FREAD)) { - error = EBADF; - goto out; + error = EBADF; + goto out; } context.vc_ucred = fp->f_fglob->fg_cred; - if (UIO_SEG_IS_USER_SPACE(segflg)) + if (UIO_SEG_IS_USER_SPACE(segflg)) { spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; - else + } else { spacetype = UIO_SYSSPACE; + } auio = uio_createwithbuffer(1, offset, spacetype, rw, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, base, len); - if ( !(io_flg & IO_APPEND)) + if (!(io_flg & IO_APPEND)) { flags = FOF_OFFSET; + } if (rw == UIO_WRITE) { user_ssize_t orig_resid = uio_resid(auio); error = fo_write(fp, auio, flags, &context); wrote_some = uio_resid(auio) < orig_resid; - } else + } else { error = fo_read(fp, auio, flags, &context); + } - if (aresid) + if (aresid) { *aresid = uio_resid(auio); - else { - if (uio_resid(auio) && error == 0) + } else { + if (uio_resid(auio) && error == 0) { error = EIO; + } } out: - if (wrote_some) - fp_drop_written(p, fd, fp); - else - fp_drop(p, fd, fp, 0); + if (wrote_some) { + fp_drop_written(p, fd, fp); + } else { + fp_drop(p, fd, fp, 0); + } return error; } @@ -543,20 +553,20 @@ dup(proc_t p, struct dup_args *uap, int32_t *retval) struct fileproc *fp; proc_fdlock(p); - if ( (error = fp_lookup(p, old, &fp, 1)) ) { + if ((error = fp_lookup(p, old, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } if (FP_ISGUARDED(fp, GUARD_DUP)) { error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP); (void) fp_drop(p, old, fp, 1); proc_fdunlock(p); - return (error); + return error; } - if ( (error = fdalloc(p, 0, &new)) ) { + if ((error = fdalloc(p, 0, &new))) { fp_drop(p, old, fp, 1); proc_fdunlock(p); - return (error); + return error; } error = finishdup(p, fdp, old, new, 0, retval); fp_drop(p, old, fp, 1); @@ -567,7 +577,7 @@ dup(proc_t p, struct dup_args *uap, int32_t *retval) new, 0, (int64_t)VM_KERNEL_ADDRPERM(fp->f_data)); } - return (error); + return error; } /* @@ -597,34 +607,34 @@ dup2(proc_t p, struct dup2_args *uap, int32_t *retval) proc_fdlock(p); startover: - if ( (error = fp_lookup(p, old, &fp, 1)) ) { + if ((error = fp_lookup(p, old, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } if (FP_ISGUARDED(fp, GUARD_DUP)) { error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP); (void) fp_drop(p, old, fp, 1); proc_fdunlock(p); - return (error); + return error; } if (new < 0 || - (rlim_t)new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || + (rlim_t)new >= p->p_rlimit[RLIMIT_NOFILE].rlim_cur || new >= maxfiles) { fp_drop(p, old, fp, 1); proc_fdunlock(p); - return (EBADF); + return EBADF; } if (old == new) { fp_drop(p, old, fp, 1); *retval = new; proc_fdunlock(p); - return (0); + return 0; } if (new < 0 || new >= fdp->fd_nfiles) { - if ( (error = fdalloc(p, new, &i)) ) { + if ((error = fdalloc(p, new, &i))) { fp_drop(p, old, fp, 1); proc_fdunlock(p); - return (error); + return error; } if (new != i) { fdrelse(p, i); @@ -632,13 +642,13 @@ startover: } } else { closeit: - while ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) { - fp_drop(p, old, fp, 1); - procfdtbl_waitfd(p, new); + while ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) { + fp_drop(p, old, fp, 1); + procfdtbl_waitfd(p, new); #if DIAGNOSTIC - proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); + proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); #endif - goto startover; + goto startover; } if ((fdp->fd_ofiles[new] != NULL) && @@ -649,7 +659,7 @@ closeit: new, nfp, kGUARD_EXC_CLOSE); (void) fp_drop(p, new, nfp, 1); proc_fdunlock(p); - return (error); + return error; } (void)close_internal_locked(p, new, nfp, FD_DUP2RESV); #if DIAGNOSTIC @@ -657,10 +667,11 @@ closeit: #endif procfdtbl_clearfd(p, new); goto startover; - } else { + } else { #if DIAGNOSTIC - if (fdp->fd_ofiles[new] != NULL) + if (fdp->fd_ofiles[new] != NULL) { panic("dup2: no ref on fileproc %d", new); + } #endif procfdtbl_reservefd(p, new); } @@ -668,19 +679,20 @@ closeit: #if DIAGNOSTIC proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); #endif - } #if DIAGNOSTIC - if (fdp->fd_ofiles[new] != 0) + if (fdp->fd_ofiles[new] != 0) { panic("dup2: overwriting fd_ofiles with new %d", new); - if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) + } + if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) { panic("dup2: unreserved fileflags with new %d", new); + } #endif error = finishdup(p, fdp, old, new, 0, retval); fp_drop(p, old, fp, 1); proc_fdunlock(p); - return(error); + return error; } @@ -709,7 +721,7 @@ int fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval)); + return fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval); } @@ -774,7 +786,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) struct filedesc *fdp = p->p_fd; struct fileproc *fp; char *pop; - struct vnode *vp = NULLVP; /* for AUDIT_ARG() at end */ + struct vnode *vp = NULLVP; /* for AUDIT_ARG() at end */ int i, tmp, error, error2, flg = 0; struct flock fl = {}; struct flocktimeout fltimeout; @@ -791,9 +803,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) AUDIT_ARG(cmd, uap->cmd); proc_fdlock(p); - if ( (error = fp_lookup(p, fd, &fp, 1)) ) { + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } context.vc_thread = current_thread(); context.vc_ucred = fp->f_cred; @@ -801,8 +813,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) is64bit = proc_is64bit(p); if (is64bit) { argp = uap->arg; - } - else { + } else { /* * Since the arg parameter is defined as a long but may be * either a long or a pointer we must take care to handle @@ -820,12 +831,12 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) #if CONFIG_MACF error = mac_file_check_fcntl(proc_ucred(p), fp->f_fglob, uap->cmd, uap->arg); - if (error) + if (error) { goto out; + } #endif switch (uap->cmd) { - case F_DUPFD: case F_DUPFD_CLOEXEC: if (FP_ISGUARDED(fp, GUARD_DUP)) { @@ -839,8 +850,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EINVAL; goto out; } - if ( (error = fdalloc(p, newmin, &i)) ) + if ((error = fdalloc(p, newmin, &i))) { goto out; + } error = finishdup(p, fdp, fd, i, uap->cmd == F_DUPFD_CLOEXEC ? UF_EXCLOSE : 0, retval); goto out; @@ -852,9 +864,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case F_SETFD: AUDIT_ARG(value32, uap->arg); - if (uap->arg & FD_CLOEXEC) + if (uap->arg & FD_CLOEXEC) { *pop |= UF_EXCLOSE; - else { + } else { if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) { error = fp_guard_exception(p, fd, fp, kGUARD_EXC_NOCLOEXEC); @@ -877,12 +889,14 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) fp->f_flag |= FFLAGS(tmp) & FCNTLFLAGS; tmp = fp->f_flag & FNONBLOCK; error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context); - if (error) + if (error) { goto out; + } tmp = fp->f_flag & FASYNC; error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context); - if (!error) + if (!error) { goto out; + } fp->f_flag &= ~FNONBLOCK; tmp = 0; (void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context); @@ -903,7 +917,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) AUDIT_ARG(value32, tmp); if (fp->f_type == DTYPE_SOCKET) { ((struct socket *)fp->f_data)->so_pgid = tmp; - error =0; + error = 0; goto out; } if (fp->f_type == DTYPE_PIPE) { @@ -930,7 +944,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) if (fp->f_type == DTYPE_SOCKET) { #if SOCKETS error = sock_setsockopt((struct socket *)fp->f_data, - SOL_SOCKET, SO_NOSIGPIPE, &tmp, sizeof (tmp)); + SOL_SOCKET, SO_NOSIGPIPE, &tmp, sizeof(tmp)); #else error = EINVAL; #endif @@ -938,10 +952,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) struct fileglob *fg = fp->f_fglob; lck_mtx_lock_spin(&fg->fg_lock); - if (tmp) + if (tmp) { fg->fg_lflags |= FG_NOSIGPIPE; - else - fg->fg_lflags &= FG_NOSIGPIPE; + } else { + fg->fg_lflags &= ~FG_NOSIGPIPE; + } lck_mtx_unlock(&fg->fg_lock); error = 0; } @@ -950,7 +965,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case F_GETNOSIGPIPE: if (fp->f_type == DTYPE_SOCKET) { #if SOCKETS - int retsize = sizeof (*retval); + int retsize = sizeof(*retval); error = sock_getsockopt((struct socket *)fp->f_data, SOL_SOCKET, SO_NOSIGPIPE, retval, &retsize); #else @@ -958,7 +973,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) #endif } else { *retval = (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) ? - 1 : 0; + 1 : 0; error = 0; } goto out; @@ -975,22 +990,23 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) struct fileglob *fg = fp->f_fglob; lck_mtx_lock_spin(&fg->fg_lock); - if (fg->fg_lflags & FG_CONFINED) + if (fg->fg_lflags & FG_CONFINED) { error = 0; - else if (1 != fg->fg_count) - error = EAGAIN; /* go close the dup .. */ - else if (UF_FORKCLOSE == (*pop & UF_FORKCLOSE)) { + } else if (1 != fg->fg_count) { + error = EAGAIN; /* go close the dup .. */ + } else if (UF_FORKCLOSE == (*pop & UF_FORKCLOSE)) { fg->fg_lflags |= FG_CONFINED; error = 0; - } else - error = EBADF; /* open without O_CLOFORK? */ + } else { + error = EBADF; /* open without O_CLOFORK? */ + } lck_mtx_unlock(&fg->fg_lock); } else { /* * Other subsystems may have built on the immutability * of FG_CONFINED; clearing it may be tricky. */ - error = EPERM; /* immutable */ + error = EPERM; /* immutable */ } goto out; @@ -1004,7 +1020,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case F_OFD_SETLKWTIMEOUT: case F_OFD_SETLKW: flg |= F_WAIT; - /* Fall into F_SETLK */ + /* Fall into F_SETLK */ case F_SETLK: case F_OFD_SETLK: @@ -1041,11 +1057,12 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { goto outdrop; } - if (fl.l_whence == SEEK_CUR) + if (fl.l_whence == SEEK_CUR) { fl.l_start += offset; + } #if CONFIG_MACF error = mac_file_check_lock(proc_ucred(p), fp->f_fglob, @@ -1152,8 +1169,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* Copy in the lock structure */ error = copyin(argp, (caddr_t)&fl, sizeof(fl)); - if (error) + if (error) { goto outdrop; + } /* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */ /* and ending byte for EOVERFLOW in SEEK_SET */ @@ -1187,9 +1205,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } - if ( (error = vnode_getwithref(vp)) == 0 ) { - if (fl.l_whence == SEEK_CUR) - fl.l_start += offset; + if ((error = vnode_getwithref(vp)) == 0) { + if (fl.l_whence == SEEK_CUR) { + fl.l_start += offset; + } #if CONFIG_MACF error = mac_file_check_lock(proc_ucred(p), fp->f_fglob, @@ -1213,8 +1232,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) (void)vnode_put(vp); - if (error == 0) + if (error == 0) { error = copyout((caddr_t)&fl, argp, sizeof(fl)); + } } goto outdrop; @@ -1237,8 +1257,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } error = copyin(argp, (caddr_t)&alloc_struct, sizeof(alloc_struct)); - if (error) + if (error) { goto outdrop; + } /* now set the space allocated to 0 */ alloc_struct.fst_bytesalloc = 0; @@ -1251,11 +1272,13 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) alloc_flags |= PREALLOCATE; - if (alloc_struct.fst_flags & F_ALLOCATECONTIG) + if (alloc_struct.fst_flags & F_ALLOCATECONTIG) { alloc_flags |= ALLOCATECONTIG; + } - if (alloc_struct.fst_flags & F_ALLOCATEALL) + if (alloc_struct.fst_flags & F_ALLOCATEALL) { alloc_flags |= ALLOCATEALL; + } /* * Do any position mode specific stuff. The only @@ -1263,7 +1286,6 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) */ switch (alloc_struct.fst_posmode) { - case F_PEOFPOSMODE: if (alloc_struct.fst_offset != 0) { error = EINVAL; @@ -1285,21 +1307,22 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) default: { error = EINVAL; goto outdrop; - } } - if ( (error = vnode_getwithref(vp)) == 0 ) { - /* + } + if ((error = vnode_getwithref(vp)) == 0) { + /* * call allocate to get the space */ - error = VNOP_ALLOCATE(vp,alloc_struct.fst_length,alloc_flags, - &alloc_struct.fst_bytesalloc, alloc_struct.fst_offset, - &context); + error = VNOP_ALLOCATE(vp, alloc_struct.fst_length, alloc_flags, + &alloc_struct.fst_bytesalloc, alloc_struct.fst_offset, + &context); (void)vnode_put(vp); error2 = copyout((caddr_t)&alloc_struct, argp, sizeof(alloc_struct)); - if (error == 0) + if (error == 0) { error = error2; + } } goto outdrop; } @@ -1383,14 +1406,16 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - error = copyin(argp, (caddr_t)&offset, sizeof (off_t)); - if (error) + error = copyin(argp, (caddr_t)&offset, sizeof(off_t)); + if (error) { goto outdrop; + } AUDIT_ARG(value64, offset); error = vnode_getwithref(vp); - if (error) + if (error) { goto outdrop; + } #if CONFIG_MACF error = mac_vnode_check_truncate(&context, @@ -1415,8 +1440,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) &context); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_truncate(&context, fp->f_fglob->fg_cred, vp); + } #endif } @@ -1428,10 +1454,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBADF; goto out; } - if (uap->arg) - fp->f_fglob->fg_flag &= ~FNORDAHEAD; - else - fp->f_fglob->fg_flag |= FNORDAHEAD; + if (uap->arg) { + fp->f_fglob->fg_flag &= ~FNORDAHEAD; + } else { + fp->f_fglob->fg_flag |= FNORDAHEAD; + } goto out; @@ -1440,10 +1467,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBADF; goto out; } - if (uap->arg) - fp->f_fglob->fg_flag |= FNOCACHE; - else - fp->f_fglob->fg_flag &= ~FNOCACHE; + if (uap->arg) { + fp->f_fglob->fg_flag |= FNOCACHE; + } else { + fp->f_fglob->fg_flag &= ~FNOCACHE; + } goto out; @@ -1452,10 +1480,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBADF; goto out; } - if (uap->arg) - fp->f_fglob->fg_flag |= FNODIRECT; - else - fp->f_fglob->fg_flag &= ~FNODIRECT; + if (uap->arg) { + fp->f_fglob->fg_flag |= FNODIRECT; + } else { + fp->f_fglob->fg_flag &= ~FNODIRECT; + } goto out; @@ -1464,50 +1493,51 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBADF; goto out; } - if (uap->arg) - fp->f_fglob->fg_flag |= FSINGLE_WRITER; - else - fp->f_fglob->fg_flag &= ~FSINGLE_WRITER; + if (uap->arg) { + fp->f_fglob->fg_flag |= FSINGLE_WRITER; + } else { + fp->f_fglob->fg_flag &= ~FSINGLE_WRITER; + } goto out; case F_GLOBAL_NOCACHE: - if (fp->f_type != DTYPE_VNODE) { - error = EBADF; + if (fp->f_type != DTYPE_VNODE) { + error = EBADF; goto out; } vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - if ( (error = vnode_getwithref(vp)) == 0 ) { - - *retval = vnode_isnocache(vp); + if ((error = vnode_getwithref(vp)) == 0) { + *retval = vnode_isnocache(vp); - if (uap->arg) - vnode_setnocache(vp); - else - vnode_clearnocache(vp); + if (uap->arg) { + vnode_setnocache(vp); + } else { + vnode_clearnocache(vp); + } (void)vnode_put(vp); } goto outdrop; case F_CHECK_OPENEVT: - if (fp->f_type != DTYPE_VNODE) { - error = EBADF; + if (fp->f_type != DTYPE_VNODE) { + error = EBADF; goto out; } vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - if ( (error = vnode_getwithref(vp)) == 0 ) { - - *retval = vnode_is_openevt(vp); + if ((error = vnode_getwithref(vp)) == 0) { + *retval = vnode_is_openevt(vp); - if (uap->arg) - vnode_set_openevt(vp); - else - vnode_clear_openevt(vp); + if (uap->arg) { + vnode_set_openevt(vp); + } else { + vnode_clear_openevt(vp); + } (void)vnode_put(vp); } @@ -1523,31 +1553,32 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - if ( (error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct))) ) + if ((error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct)))) { goto outdrop; - if ( (error = vnode_getwithref(vp)) == 0 ) { - error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context); + } + if ((error = vnode_getwithref(vp)) == 0) { + error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context); (void)vnode_put(vp); } goto outdrop; - } + } - case F_FLUSH_DATA: + case F_FLUSH_DATA: - if (fp->f_type != DTYPE_VNODE) { - error = EBADF; - goto out; - } - vp = (struct vnode *)fp->f_data; - proc_fdunlock(p); + if (fp->f_type != DTYPE_VNODE) { + error = EBADF; + goto out; + } + vp = (struct vnode *)fp->f_data; + proc_fdunlock(p); - if ( (error = vnode_getwithref(vp)) == 0 ) { - error = VNOP_FSYNC(vp, MNT_NOWAIT, &context); + if ((error = vnode_getwithref(vp)) == 0) { + error = VNOP_FSYNC(vp, MNT_NOWAIT, &context); - (void)vnode_put(vp); - } - goto outdrop; + (void)vnode_put(vp); + } + goto outdrop; case F_LOG2PHYS: case F_LOG2PHYS_EXT: { @@ -1560,8 +1591,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) if (uap->cmd == F_LOG2PHYS_EXT) { error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct)); - if (error) + if (error) { goto out; + } file_offset = l2p_struct.l2p_devoffset; } else { file_offset = fp->f_offset; @@ -1572,7 +1604,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { goto outdrop; } error = VNOP_OFFTOBLK(vp, file_offset, &lbn); @@ -1603,11 +1635,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) (void)vnode_put(vp); if (!error) { - l2p_struct.l2p_flags = 0; /* for now */ + l2p_struct.l2p_flags = 0; /* for now */ if (uap->cmd == F_LOG2PHYS_EXT) { l2p_struct.l2p_contigbytes = run - (file_offset - offset); } else { - l2p_struct.l2p_contigbytes = 0; /* for now */ + l2p_struct.l2p_contigbytes = 0; /* for now */ } /* @@ -1617,15 +1649,14 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) if (bn == -1) { /* Don't multiply it by the block size */ l2p_struct.l2p_devoffset = bn; - } - else { + } else { l2p_struct.l2p_devoffset = bn * devBlockSize; l2p_struct.l2p_devoffset += file_offset - offset; } error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct)); } goto outdrop; - } + } case F_GETPATH: { char *pathbufp; int pathlen; @@ -1643,12 +1674,13 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = ENOMEM; goto outdrop; } - if ( (error = vnode_getwithref(vp)) == 0 ) { - error = vn_getpath(vp, pathbufp, &pathlen); - (void)vnode_put(vp); + if ((error = vnode_getwithref(vp)) == 0) { + error = vn_getpath(vp, pathbufp, &pathlen); + (void)vnode_put(vp); - if (error == 0) - error = copyout((caddr_t)pathbufp, argp, pathlen); + if (error == 0) { + error = copyout((caddr_t)pathbufp, argp, pathlen); + } } FREE(pathbufp, M_TEMP); goto outdrop; @@ -1659,7 +1691,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) size_t pathlen; if (fp->f_type != DTYPE_VNODE) { - error = EBADF; + error = EBADF; goto out; } vp = (struct vnode *)fp->f_data; @@ -1668,10 +1700,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) pathlen = MAXPATHLEN; pathbufp = kalloc(MAXPATHLEN); - if ( (error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0 ) { - if ( (error = vnode_getwithref(vp)) == 0 ) { + if ((error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0) { + if ((error = vnode_getwithref(vp)) == 0) { AUDIT_ARG(text, pathbufp); - error = vn_path_package_check(vp, pathbufp, pathlen, retval); + error = vn_path_package_check(vp, pathbufp, pathlen, retval); (void)vnode_put(vp); } @@ -1692,8 +1724,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - if ( (error = vnode_getwithref(vp)) == 0 ) { - error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)NULL, 0, &context); + if ((error = vnode_getwithref(vp)) == 0) { + error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)NULL, 0, &context); (void)vnode_put(vp); } @@ -1749,16 +1781,16 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) AUDIT_ARG(mode, fopen.o_mode); VATTR_INIT(&va); /* Mask off all but regular access permissions */ - cmode = ((fopen.o_mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; + cmode = ((fopen.o_mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; VATTR_SET(&va, va_mode, cmode & ACCESSPERMS); /* Start the lookup relative to the file descriptor's vnode. */ NDINIT(&nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - fopen.o_pathname, &context); + fopen.o_pathname, &context); nd.ni_dvp = vp; error = open1(&context, &nd, fopen.o_flags, &va, - fileproc_alloc_init, NULL, retval); + fileproc_alloc_init, NULL, retval); vnode_put(vp); break; @@ -1802,7 +1834,6 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) vnode_put(vp); break; - } case F_ADDSIGS: @@ -1834,15 +1865,16 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } error = vnode_getwithref(vp); - if (error) + if (error) { goto outdrop; + } if (IS_64BIT_PROCESS(p)) { - error = copyin(argp, &fs, sizeof (fs)); + error = copyin(argp, &fs, sizeof(fs)); } else { struct user32_fsignatures fs32; - error = copyin(argp, &fs32, sizeof (fs32)); + error = copyin(argp, &fs32, sizeof(fs32)); fs.fs_file_start = fs32.fs_file_start; fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start); fs.fs_blob_size = fs32.fs_blob_size; @@ -1857,8 +1889,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * First check if we have something loaded a this offset */ blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start); - if (blob != NULL) - { + if (blob != NULL) { /* If this is for dyld_sim revalidate the blob */ if (uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) { error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags); @@ -1897,23 +1928,23 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } - if(uap->cmd == F_ADDSIGS) { + if (uap->cmd == F_ADDSIGS) { error = copyin(fs.fs_blob_start, - (void *) kernel_blob_addr, - kernel_blob_size); - } else /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM */ { + (void *) kernel_blob_addr, + kernel_blob_size); + } else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM */ int resid; error = vn_rdwr(UIO_READ, - vp, - (caddr_t) kernel_blob_addr, - kernel_blob_size, - fs.fs_file_start + fs.fs_blob_start, - UIO_SYSSPACE, - 0, - kauth_cred_get(), - &resid, - p); + vp, + (caddr_t) kernel_blob_addr, + kernel_blob_size, + fs.fs_file_start + fs.fs_blob_start, + UIO_SYSSPACE, + 0, + kauth_cred_get(), + &resid, + p); if ((error == 0) && resid) { /* kernel_blob_size rounded to a page size, but signature may be at end of file */ memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid); @@ -1922,26 +1953,26 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) if (error) { ubc_cs_blob_deallocate(kernel_blob_addr, - kernel_blob_size); + kernel_blob_size); vnode_put(vp); goto outdrop; } blob = NULL; error = ubc_cs_blob_add(vp, - CPU_TYPE_ANY, /* not for a specific architecture */ - fs.fs_file_start, - &kernel_blob_addr, - kernel_blob_size, - NULL, - blob_add_flags, - &blob); + CPU_TYPE_ANY, /* not for a specific architecture */ + fs.fs_file_start, + &kernel_blob_addr, + kernel_blob_size, + NULL, + blob_add_flags, + &blob); /* ubc_blob_add() has consumed "kernel_blob_addr" if it is zeroed */ if (error) { if (kernel_blob_addr) { ubc_cs_blob_deallocate(kernel_blob_addr, - kernel_blob_size); + kernel_blob_size); } vnode_put(vp); goto outdrop; @@ -1959,9 +1990,10 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * all archs. Lets overwrite that. */ off_t end_offset = 0; - if (blob) + if (blob) { end_offset = blob->csb_end_offset; - error = copyout(&end_offset, argp, sizeof (end_offset)); + } + error = copyout(&end_offset, argp, sizeof(end_offset)); } (void) vnode_put(vp); @@ -1974,7 +2006,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } case F_CHECK_LV: { struct fileglob *fg; - fchecklv_t lv; + fchecklv_t lv = {}; if (fp->f_type != DTYPE_VNODE) { error = EBADF; @@ -1984,17 +2016,18 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) proc_fdunlock(p); if (IS_64BIT_PROCESS(p)) { - error = copyin(argp, &lv, sizeof (lv)); + error = copyin(argp, &lv, sizeof(lv)); } else { - struct user32_fchecklv lv32; + struct user32_fchecklv lv32 = {}; - error = copyin(argp, &lv32, sizeof (lv32)); + error = copyin(argp, &lv32, sizeof(lv32)); lv.lv_file_start = lv32.lv_file_start; lv.lv_error_message = (void *)(uintptr_t)lv32.lv_error_message; - lv.lv_error_message_size = lv32.lv_error_message; + lv.lv_error_message_size = lv32.lv_error_message_size; } - if (error) + if (error) { goto outdrop; + } #if CONFIG_MACF error = mac_file_check_library_validation(p, fg, lv.lv_file_start, @@ -2024,10 +2057,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) VATTR_WANTED(&va, va_dataprotect_class); error = VNOP_GETATTR(vp, &va, &context); if (!error) { - if (VATTR_IS_SUPPORTED(&va, va_dataprotect_class)) + if (VATTR_IS_SUPPORTED(&va, va_dataprotect_class)) { *retval = va.va_dataprotect_class; - else + } else { error = ENOTSUP; + } } vnode_put(vp); @@ -2053,7 +2087,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* Only go forward if you have write access */ vfs_context_t ctx = vfs_context_current(); - if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { + if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { vnode_put(vp); error = EBADF; goto outdrop; @@ -2111,7 +2145,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } vp = (struct vnode*) fp->f_data; - proc_fdunlock (p); + proc_fdunlock(p); if (vnode_getwithref(vp)) { error = ENOENT; @@ -2120,7 +2154,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = VNOP_IOCTL(vp, F_GETPROTECTIONLEVEL, (caddr_t)retval, 0, &context); - vnode_put (vp); + vnode_put(vp); break; } @@ -2131,7 +2165,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } vp = (struct vnode*) fp->f_data; - proc_fdunlock (p); + proc_fdunlock(p); if (vnode_getwithref(vp)) { error = ENOENT; @@ -2145,7 +2179,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = VNOP_IOCTL(vp, F_GETDEFAULTPROTLEVEL, (caddr_t)retval, 0, &context); - vnode_put (vp); + vnode_put(vp); break; } @@ -2159,8 +2193,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg); error = priv_check_cred(kauth_cred_get(), PRIV_VFS_MOVE_DATA_EXTENTS, 0); - if (error) + if (error) { goto out; + } if (fp->f_type != DTYPE_VNODE) { error = EBADF; @@ -2178,7 +2213,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * Get the references before we start acquiring iocounts on the vnodes, * while we still hold the proc fd lock */ - if ( (error = fp_lookup(p, fd2, &fp2, 1)) ) { + if ((error = fp_lookup(p, fd2, &fp2, 1))) { error = EBADF; goto out; } @@ -2213,7 +2248,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } if (vnode_getwithref(dst_vp)) { - vnode_put (src_vp); + vnode_put(src_vp); fp_drop(p, fd2, fp2, 0); error = ENOENT; goto outdrop; @@ -2224,17 +2259,17 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * both live on the same filesystem. */ if (dst_vp == src_vp) { - vnode_put (src_vp); - vnode_put (dst_vp); - fp_drop (p, fd2, fp2, 0); + vnode_put(src_vp); + vnode_put(dst_vp); + fp_drop(p, fd2, fp2, 0); error = EINVAL; goto outdrop; } if (dst_vp->v_mount != src_vp->v_mount) { - vnode_put (src_vp); - vnode_put (dst_vp); - fp_drop (p, fd2, fp2, 0); + vnode_put(src_vp); + vnode_put(dst_vp); + fp_drop(p, fd2, fp2, 0); error = EXDEV; goto outdrop; } @@ -2242,8 +2277,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* Now we have a legit pair of FDs. Go to work */ /* Now check for write access to the target files */ - if(vnode_authorize(src_vp, NULLVP, - (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) { + if (vnode_authorize(src_vp, NULLVP, + (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) { vnode_put(src_vp); vnode_put(dst_vp); fp_drop(p, fd2, fp2, 0); @@ -2251,8 +2286,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } - if(vnode_authorize(dst_vp, NULLVP, - (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) { + if (vnode_authorize(dst_vp, NULLVP, + (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) { vnode_put(src_vp); vnode_put(dst_vp); fp_drop(p, fd2, fp2, 0); @@ -2261,11 +2296,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } /* Verify that both vps point to files and not directories */ - if ( !vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) { + if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) { error = EINVAL; - vnode_put (src_vp); - vnode_put (dst_vp); - fp_drop (p, fd2, fp2, 0); + vnode_put(src_vp); + vnode_put(dst_vp); + fp_drop(p, fd2, fp2, 0); goto outdrop; } @@ -2276,8 +2311,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context); - vnode_put (src_vp); - vnode_put (dst_vp); + vnode_put(src_vp); + vnode_put(dst_vp); fp_drop(p, fd2, fp2, 0); break; } @@ -2294,7 +2329,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } vp = (struct vnode*) fp->f_data; - proc_fdunlock (p); + proc_fdunlock(p); /* get the vnode */ if (vnode_getwithref(vp)) { @@ -2312,7 +2347,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* invoke ioctl to pass off to FS */ /* Only go forward if you have write access */ vfs_context_t ctx = vfs_context_current(); - if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { + if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { vnode_put(vp); error = EBADF; goto outdrop; @@ -2320,7 +2355,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)&gcounter, 0, &context); - vnode_put (vp); + vnode_put(vp); break; } @@ -2329,9 +2364,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * the open FD will written to the Fastflow. */ case F_SET_GREEDY_MODE: - /* intentionally drop through to the same handler as F_SETSTATIC. - * both fcntls should pass the argument and their selector into VNOP_IOCTL. - */ + /* intentionally drop through to the same handler as F_SETSTATIC. + * both fcntls should pass the argument and their selector into VNOP_IOCTL. + */ /* * SPI (private) for indicating to a filesystem that subsequent writes to @@ -2359,7 +2394,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* Only go forward if you have write access */ vfs_context_t ctx = vfs_context_current(); - if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { + if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { vnode_put(vp); error = EBADF; goto outdrop; @@ -2384,8 +2419,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* extract 32 bits of flags from userland */ param_ptr = (caddr_t) uap->arg; param = (uint32_t) param_ptr; - } - else { + } else { /* If no argument is specified, error out */ error = EINVAL; goto out; @@ -2396,12 +2430,12 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * all of them are mutually exclusive for now. */ switch (param) { - case F_IOTYPE_ISOCHRONOUS: - break; + case F_IOTYPE_ISOCHRONOUS: + break; - default: - error = EINVAL; - goto out; + default: + error = EINVAL; + goto out; } @@ -2420,7 +2454,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* Only go forward if you have write access */ vfs_context_t ctx = vfs_context_current(); - if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { + if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { vnode_put(vp); error = EBADF; goto outdrop; @@ -2458,7 +2492,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* only proceed if you have write access */ vfs_context_t ctx = vfs_context_current(); - if(vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { + if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) { vnode_put(vp); error = EBADF; goto outdrop; @@ -2467,10 +2501,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) /* If arg != 0, set, otherwise unset */ if (uap->arg) { - error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)1, 0, &context); - } - else { - error = VNOP_IOCTL (vp, uap->cmd, (caddr_t)NULL, 0, &context); + error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)1, 0, &context); + } else { + error = VNOP_IOCTL(vp, uap->cmd, (caddr_t)NULL, 0, &context); } vnode_put(vp); @@ -2498,13 +2531,13 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = ENOMEM; goto outdrop; } - if ( (error = vnode_getwithref(vp)) == 0 ) { + if ((error = vnode_getwithref(vp)) == 0) { int backingstore = 0; /* Check for error from vn_getpath before moving on */ if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) { if (vp->v_tag == VT_HFS) { - error = VNOP_IOCTL (vp, uap->cmd, (caddr_t) &backingstore, 0, &context); + error = VNOP_IOCTL(vp, uap->cmd, (caddr_t) &backingstore, 0, &context); } (void)vnode_put(vp); @@ -2521,8 +2554,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = EBUSY; } } - } else + } else { (void)vnode_put(vp); + } } FREE(pathbufp, M_TEMP); goto outdrop; @@ -2548,24 +2582,24 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * for this vnode; this can include special devices, and will * effectively overload fcntl() to send ioctl()'s. */ - if((uap->cmd & IOC_VOID) && (uap->cmd & IOC_INOUT)){ - error = EINVAL; + if ((uap->cmd & IOC_VOID) && (uap->cmd & IOC_INOUT)) { + error = EINVAL; goto out; } /* Catch any now-invalid fcntl() selectors */ switch (uap->cmd) { - case (int)APFSIOC_REVERT_TO_SNAPSHOT: - case (int)FSIOC_FIOSEEKHOLE: - case (int)FSIOC_FIOSEEKDATA: - case HFS_GET_BOOT_INFO: - case HFS_SET_BOOT_INFO: - case FIOPINSWAP: - case F_MARKDEPENDENCY: - error = EINVAL; - goto out; - default: - break; + case (int)APFSIOC_REVERT_TO_SNAPSHOT: + case (int)FSIOC_FIOSEEKHOLE: + case (int)FSIOC_FIOSEEKDATA: + case HFS_GET_BOOT_INFO: + case HFS_SET_BOOT_INFO: + case FIOPINSWAP: + case F_MARKDEPENDENCY: + error = EINVAL; + goto out; + default: + break; } if (fp->f_type != DTYPE_VNODE) { @@ -2575,7 +2609,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) vp = (struct vnode *)fp->f_data; proc_fdunlock(p); - if ( (error = vnode_getwithref(vp)) == 0 ) { + if ((error = vnode_getwithref(vp)) == 0) { #define STK_PARAMS 128 char stkbuf[STK_PARAMS] = {0}; unsigned int size; @@ -2598,7 +2632,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } memp = NULL; - if (size > sizeof (stkbuf)) { + if (size > sizeof(stkbuf)) { if ((memp = (caddr_t)kalloc(size)) == 0) { (void)vnode_put(vp); error = ENOMEM; @@ -2615,14 +2649,15 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) error = copyin(argp, data, size); if (error) { (void)vnode_put(vp); - if (memp) + if (memp) { kfree(memp, size); + } goto outdrop; } /* Bzero the section beyond that which was needed */ if (size <= sizeof(stkbuf)) { - bzero ( (((uint8_t*)data) + size), (sizeof(stkbuf) - size)); + bzero((((uint8_t*)data) + size), (sizeof(stkbuf) - size)); } } else { /* int */ @@ -2640,9 +2675,9 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) bzero(data, size); } else if (uap->cmd & IOC_VOID) { if (is64bit) { - *(user_addr_t *)data = argp; + *(user_addr_t *)data = argp; } else { - *(uint32_t *)data = (uint32_t)argp; + *(uint32_t *)data = (uint32_t)argp; } } @@ -2651,10 +2686,12 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) (void)vnode_put(vp); /* Copy any output data to user */ - if (error == 0 && (uap->cmd & IOC_OUT) && size) + if (error == 0 && (uap->cmd & IOC_OUT) && size) { error = copyout(data, argp, size); - if (memp) + } + if (memp) { kfree(memp, size); + } } break; } @@ -2662,11 +2699,11 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) outdrop: AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); fp_drop(p, fd, fp, 0); - return(error); + return error; out: fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return(error); + return error; } @@ -2711,7 +2748,7 @@ finishdup(proc_t p, if ((ofp = fdp->fd_ofiles[old]) == NULL || (fdp->fd_ofileflags[old] & UF_RESERVED)) { fdrelse(p, new); - return (EBADF); + return EBADF; } fg_ref(ofp); @@ -2720,7 +2757,7 @@ finishdup(proc_t p, if (error) { fg_drop(ofp); fdrelse(p, new); - return (error); + return error; } #endif @@ -2733,24 +2770,27 @@ finishdup(proc_t p, if (nfp == NULL) { fg_drop(ofp); fdrelse(p, new); - return (ENOMEM); + return ENOMEM; } nfp->f_fglob = ofp->f_fglob; #if DIAGNOSTIC - if (fdp->fd_ofiles[new] != 0) + if (fdp->fd_ofiles[new] != 0) { panic("finishdup: overwriting fd_ofiles with new %d", new); - if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) + } + if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) { panic("finishdup: unreserved fileflags with new %d", new); + } #endif - if (new > fdp->fd_lastfile) + if (new > fdp->fd_lastfile) { fdp->fd_lastfile = new; + } *fdflags(p, new) |= fd_flags; procfdtbl_releasefd(p, new, nfp); *retval = new; - return (0); + return 0; } @@ -2768,14 +2808,14 @@ finishdup(proc_t p, * fp_lookup:EBADF Bad file descriptor * fp_guard_exception:??? Guarded file descriptor * close_internal:EBADF - * close_internal:??? Anything returnable by a per-fileops + * close_internal:??? Anything returnable by a per-fileops * close function */ int close(proc_t p, struct close_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(close_nocancel(p, (struct close_nocancel_args *)uap, retval)); + return close_nocancel(p, (struct close_nocancel_args *)uap, retval); } @@ -2790,23 +2830,23 @@ close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retv proc_fdlock(p); - if ( (error = fp_lookup(p,fd,&fp, 1)) ) { + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } if (FP_ISGUARDED(fp, GUARD_CLOSE)) { error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE); (void) fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return (error); + return error; } error = close_internal_locked(p, fd, fp, 0); proc_fdunlock(p); - return (error); + return error; } @@ -2822,7 +2862,7 @@ close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retv * * Returns: 0 Success * EBADF fd already in close wait state - * closef_locked:??? Anything returnable by a per-fileops + * closef_locked:??? Anything returnable by a per-fileops * close function * * Locks: Assumes proc_fdlock for process is held by the caller and returns @@ -2836,7 +2876,7 @@ int close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags) { struct filedesc *fdp = p->p_fd; - int error =0; + int error = 0; int resvfd = flags & FD_DUP2RESV; @@ -2854,60 +2894,66 @@ close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags) #if DIAGNOSTIC - if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0) + if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0) { panic("close_internal: unreserved fileflags with fd %d", fd); + } #endif fp->f_flags |= FP_CLOSING; - if ( (fp->f_flags & FP_AIOISSUED) || kauth_authorize_fileop_has_listeners() ) { - - proc_fdunlock(p); + if ((fp->f_flags & FP_AIOISSUED) || kauth_authorize_fileop_has_listeners()) { + proc_fdunlock(p); - if ( (fp->f_type == DTYPE_VNODE) && kauth_authorize_fileop_has_listeners() ) { - /* + if ((fp->f_type == DTYPE_VNODE) && kauth_authorize_fileop_has_listeners()) { + /* * call out to allow 3rd party notification of close. * Ignore result of kauth_authorize_fileop call. */ - if (vnode_getwithref((vnode_t)fp->f_data) == 0) { - u_int fileop_flags = 0; - if ((fp->f_flags & FP_WRITTEN) != 0) - fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED; - kauth_authorize_fileop(fp->f_fglob->fg_cred, KAUTH_FILEOP_CLOSE, - (uintptr_t)fp->f_data, (uintptr_t)fileop_flags); + if (vnode_getwithref((vnode_t)fp->f_data) == 0) { + u_int fileop_flags = 0; + if ((fp->f_flags & FP_WRITTEN) != 0) { + fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED; + } + kauth_authorize_fileop(fp->f_fglob->fg_cred, KAUTH_FILEOP_CLOSE, + (uintptr_t)fp->f_data, (uintptr_t)fileop_flags); vnode_put((vnode_t)fp->f_data); } } - if (fp->f_flags & FP_AIOISSUED) - /* + if (fp->f_flags & FP_AIOISSUED) { + /* * cancel all async IO requests that can be cancelled. */ - _aio_close( p, fd ); + _aio_close( p, fd ); + } proc_fdlock(p); } - if (fd < fdp->fd_knlistsize) + if (fd < fdp->fd_knlistsize) { knote_fdclose(p, fd); - - if (fp->f_flags & FP_WAITEVENT) - (void)waitevent_close(p, fp); + } fileproc_drain(p, fp); + if (fp->f_flags & FP_WAITEVENT) { + (void)waitevent_close(p, fp); + } + if (resvfd == 0) { _fdrelse(p, fd); } else { procfdtbl_reservefd(p, fd); } - if (ENTR_SHOULDTRACE && fp->f_type == DTYPE_SOCKET) + if (ENTR_SHOULDTRACE && fp->f_type == DTYPE_SOCKET) { KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_END, fd, 0, (int64_t)VM_KERNEL_ADDRPERM(fp->f_data)); + } error = closef_locked(fp, fp->f_fglob, p); - if ((fp->f_flags & FP_WAITCLOSE) == FP_WAITCLOSE) + if ((fp->f_flags & FP_WAITCLOSE) == FP_WAITCLOSE) { wakeup(&fp->f_flags); + } fp->f_flags &= ~(FP_WAITCLOSE | FP_CLOSING); proc_fdunlock(p); @@ -2918,12 +2964,13 @@ close_internal_locked(proc_t p, int fd, struct fileproc *fp, int flags) #if DIAGNOSTIC if (resvfd != 0) { - if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0) + if ((fdp->fd_ofileflags[fd] & UF_RESERVED) == 0) { panic("close with reserved fd returns with freed fd:%d: proc: %p", fd, p); + } } #endif - return(error); + return error; } @@ -2987,7 +3034,7 @@ fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsec AUDIT_ARG(fd, fd); if ((error = fp_lookup(p, fd, &fp, 0)) != 0) { - return(error); + return error; } type = fp->f_type; data = fp->f_data; @@ -2996,7 +3043,6 @@ fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsec sbptr = (void *)&source; switch (type) { - case DTYPE_VNODE: if ((error = vnode_getwithref((vnode_t)data)) == 0) { /* @@ -3006,7 +3052,7 @@ fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsec */ if (xsecurity == USER_ADDR_NULL) { error = vn_stat_noauth((vnode_t)data, sbptr, NULL, isstat64, ctx, - fp->f_fglob->fg_cred); + fp->f_fglob->fg_cred); } else { error = vn_stat((vnode_t)data, sbptr, &fsec, isstat64, ctx); } @@ -3075,9 +3121,8 @@ fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsec /* caller wants extended security information? */ if (xsecurity != USER_ADDR_NULL) { - /* did we get any? */ - if (fsec == KAUTH_FILESEC_NONE) { + if (fsec == KAUTH_FILESEC_NONE) { if (susize(xsecurity_size, 0) != 0) { error = EFAULT; goto out; @@ -3093,15 +3138,17 @@ fstat1(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity, user_addr_t xsec } /* if the caller supplied enough room, copy out to it */ - if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) + if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) { error = copyout(fsec, xsecurity, KAUTH_FILESEC_COPYSIZE(fsec)); + } } } out: fp_drop(p, fd, fp, 0); - if (fsec != NULL) + if (fsec != NULL) { kauth_filesec_free(fsec); - return (error); + } + return error; } @@ -3124,7 +3171,7 @@ out: int fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval) { - return(fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0)); + return fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0); } @@ -3143,7 +3190,7 @@ fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retv int fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval) { - return(fstat1(p, uap->fd, uap->ub, 0, 0, 0)); + return fstat1(p, uap->fd, uap->ub, 0, 0, 0); } @@ -3166,7 +3213,7 @@ fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval) int fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval) { - return(fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1)); + return fstat1(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1); } @@ -3186,7 +3233,7 @@ fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t * int fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval) { - return(fstat1(p, uap->fd, uap->ub, 0, 0, 1)); + return fstat1(p, uap->fd, uap->ub, 0, 0, 1); } @@ -3221,13 +3268,13 @@ fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval) AUDIT_ARG(fd, uap->fd); - if ( (error = fp_lookup(p, fd, &fp, 0)) ) - return(error); + if ((error = fp_lookup(p, fd, &fp, 0))) { + return error; + } type = fp->f_type; data = fp->f_data; switch (type) { - case DTYPE_SOCKET: if (uap->name != _PC_PIPE_BUF) { error = EINVAL; @@ -3249,8 +3296,8 @@ fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval) case DTYPE_VNODE: vp = (struct vnode *)data; - if ( (error = vnode_getwithref(vp)) == 0) { - AUDIT_ARG(vnpath, vp, ARG_VNODE1); + if ((error = vnode_getwithref(vp)) == 0) { + AUDIT_ARG(vnpath, vp, ARG_VNODE1); error = vn_pathconf(vp, uap->name, retval, vfs_context_current()); @@ -3261,12 +3308,11 @@ fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval) default: error = EINVAL; goto out; - } /*NOTREACHED*/ out: fp_drop(p, fd, fp, 0); - return(error); + return error; } /* @@ -3315,38 +3361,44 @@ fdalloc(proc_t p, int want, int *result) lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); for (;;) { last = min(fdp->fd_nfiles, lim); - if ((i = want) < fdp->fd_freefile) + if ((i = want) < fdp->fd_freefile) { i = fdp->fd_freefile; + } for (; i < last; i++) { if (fdp->fd_ofiles[i] == NULL && !(fdp->fd_ofileflags[i] & UF_RESERVED)) { procfdtbl_reservefd(p, i); - if (i > fdp->fd_lastfile) + if (i > fdp->fd_lastfile) { fdp->fd_lastfile = i; - if (want <= fdp->fd_freefile) + } + if (want <= fdp->fd_freefile) { fdp->fd_freefile = i; + } *result = i; - return (0); + return 0; } } /* * No space in current array. Expand? */ - if (fdp->fd_nfiles >= lim) - return (EMFILE); - if (fdp->fd_nfiles < NDEXTENT) + if (fdp->fd_nfiles >= lim) { + return EMFILE; + } + if (fdp->fd_nfiles < NDEXTENT) { numfiles = NDEXTENT; - else + } else { numfiles = 2 * fdp->fd_nfiles; + } /* Enforce lim */ - if (numfiles > lim) + if (numfiles > lim) { numfiles = lim; + } proc_fdunlock(p); MALLOC_ZONE(newofiles, struct fileproc **, - numfiles * OFILESIZE, M_OFILETABL, M_WAITOK); + numfiles * OFILESIZE, M_OFILETABL, M_WAITOK); proc_fdlock(p); if (newofiles == NULL) { - return (ENOMEM); + return ENOMEM; } if (fdp->fd_nfiles >= numfiles) { FREE_ZONE(newofiles, numfiles * OFILESIZE, M_OFILETABL); @@ -3359,15 +3411,15 @@ fdalloc(proc_t p, int want, int *result) */ oldnfiles = fdp->fd_nfiles; (void) memcpy(newofiles, fdp->fd_ofiles, - oldnfiles * sizeof(*fdp->fd_ofiles)); + oldnfiles * sizeof(*fdp->fd_ofiles)); (void) memset(&newofiles[oldnfiles], 0, - (numfiles - oldnfiles) * sizeof(*fdp->fd_ofiles)); + (numfiles - oldnfiles) * sizeof(*fdp->fd_ofiles)); (void) memcpy(newofileflags, fdp->fd_ofileflags, - oldnfiles * sizeof(*fdp->fd_ofileflags)); + oldnfiles * sizeof(*fdp->fd_ofileflags)); (void) memset(&newofileflags[oldnfiles], 0, - (numfiles - oldnfiles) * - sizeof(*fdp->fd_ofileflags)); + (numfiles - oldnfiles) * + sizeof(*fdp->fd_ofileflags)); ofiles = fdp->fd_ofiles; fdp->fd_ofiles = newofiles; fdp->fd_ofileflags = newofileflags; @@ -3404,14 +3456,17 @@ fdavail(proc_t p, int n) int i, lim; lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles); - if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) - return (1); + if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) { + return 1; + } fpp = &fdp->fd_ofiles[fdp->fd_freefile]; flags = &fdp->fd_ofileflags[fdp->fd_freefile]; - for (i = fdp->fd_nfiles - fdp->fd_freefile; --i >= 0; fpp++, flags++) - if (*fpp == NULL && !(*flags & UF_RESERVED) && --n <= 0) - return (1); - return (0); + for (i = fdp->fd_nfiles - fdp->fd_freefile; --i >= 0; fpp++, flags++) { + if (*fpp == NULL && !(*flags & UF_RESERVED) && --n <= 0) { + return 1; + } + } + return 0; } @@ -3463,13 +3518,14 @@ fdgetf_noref(proc_t p, int fd, struct fileproc **resultfp) struct fileproc *fp; if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - return (EBADF); + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + return EBADF; } - if (resultfp) + if (resultfp) { *resultfp = fp; - return (0); + } + return 0; } @@ -3508,24 +3564,26 @@ fp_getfvp(proc_t p, int fd, struct fileproc **resultfp, struct vnode **resultvp) proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_VNODE) { proc_fdunlock(p); - return(ENOTSUP); + return ENOTSUP; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultvp) + } + if (resultvp) { *resultvp = (struct vnode *)fp->f_data; + } proc_fdunlock(p); - return (0); + return 0; } @@ -3560,33 +3618,36 @@ fp_getfvp(proc_t p, int fd, struct fileproc **resultfp, struct vnode **resultvp) */ int fp_getfvpandvid(proc_t p, int fd, struct fileproc **resultfp, - struct vnode **resultvp, uint32_t *vidp) + struct vnode **resultvp, uint32_t *vidp) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_VNODE) { proc_fdunlock(p); - return(ENOTSUP); + return ENOTSUP; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultvp) + } + if (resultvp) { *resultvp = (struct vnode *)fp->f_data; - if (vidp) + } + if (vidp) { *vidp = (uint32_t)vnode_vid((struct vnode *)fp->f_data); + } proc_fdunlock(p); - return (0); + return 0; } @@ -3617,31 +3678,33 @@ fp_getfvpandvid(proc_t p, int fd, struct fileproc **resultfp, */ int fp_getfsock(proc_t p, int fd, struct fileproc **resultfp, - struct socket **results) + struct socket **results) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_SOCKET) { proc_fdunlock(p); - return(EOPNOTSUPP); + return EOPNOTSUPP; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (results) + } + if (results) { *results = (struct socket *)fp->f_data; + } proc_fdunlock(p); - return (0); + return 0; } @@ -3672,31 +3735,33 @@ fp_getfsock(proc_t p, int fd, struct fileproc **resultfp, */ int fp_getfkq(proc_t p, int fd, struct fileproc **resultfp, - struct kqueue **resultkq) + struct kqueue **resultkq) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; proc_fdlock_spin(p); - if ( fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + if (fd < 0 || fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_KQUEUE) { proc_fdunlock(p); - return(EBADF); + return EBADF; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultkq) + } + if (resultkq) { *resultkq = (struct kqueue *)fp->f_data; + } proc_fdunlock(p); - return (0); + return 0; } @@ -3729,32 +3794,33 @@ fp_getfkq(proc_t p, int fd, struct fileproc **resultfp, */ int fp_getfpshm(proc_t p, int fd, struct fileproc **resultfp, - struct pshmnode **resultpshm) + struct pshmnode **resultpshm) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_PSXSHM) { - proc_fdunlock(p); - return(EBADF); + return EBADF; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultpshm) + } + if (resultpshm) { *resultpshm = (struct pshmnode *)fp->f_data; + } proc_fdunlock(p); - return (0); + return 0; } @@ -3796,31 +3862,33 @@ fp_getfpshm(proc_t p, int fd, struct fileproc **resultfp, */ int fp_getfpsem(proc_t p, int fd, struct fileproc **resultfp, - struct psemnode **resultpsem) + struct psemnode **resultpsem) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_PSXSEM) { proc_fdunlock(p); - return(EBADF); + return EBADF; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultpsem) + } + if (resultpsem) { *resultpsem = (struct psemnode *)fp->f_data; + } proc_fdunlock(p); - return (0); + return 0; } @@ -3851,31 +3919,33 @@ fp_getfpsem(proc_t p, int fd, struct fileproc **resultfp, */ int fp_getfpipe(proc_t p, int fd, struct fileproc **resultfp, - struct pipe **resultpipe) + struct pipe **resultpipe) { struct filedesc *fdp = p->p_fd; struct fileproc *fp; proc_fdlock_spin(p); if (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { proc_fdunlock(p); - return (EBADF); + return EBADF; } if (fp->f_type != DTYPE_PIPE) { proc_fdunlock(p); - return(EBADF); + return EBADF; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultpipe) + } + if (resultpipe) { *resultpipe = (struct pipe *)fp->f_data; + } proc_fdunlock(p); - return (0); + return 0; } @@ -3909,23 +3979,27 @@ fp_lookup(proc_t p, int fd, struct fileproc **resultfp, int locked) struct filedesc *fdp = p->p_fd; struct fileproc *fp; - if (!locked) + if (!locked) { proc_fdlock_spin(p); + } if (fd < 0 || fdp == NULL || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - if (!locked) + (fp = fdp->fd_ofiles[fd]) == NULL || + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + if (!locked) { proc_fdunlock(p); - return (EBADF); + } + return EBADF; } fp->f_iocount++; - if (resultfp) + if (resultfp) { *resultfp = fp; - if (!locked) + } + if (!locked) { proc_fdunlock(p); + } - return (0); + return 0; } @@ -3953,8 +4027,9 @@ fp_tryswap(proc_t p, int fd, struct fileproc *nfp) proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); - if (0 != (error = fp_lookup(p, fd, &fp, 1))) - return (error); + if (0 != (error = fp_lookup(p, fd, &fp, 1))) { + return error; + } /* * At this point, our caller (change_guardedfd_np) has * one f_iocount reference, and we just took another @@ -3963,11 +4038,10 @@ fp_tryswap(proc_t p, int fd, struct fileproc *nfp) if (fp->f_iocount < 2) { panic("f_iocount too small %d", fp->f_iocount); } else if (2 == fp->f_iocount) { - /* Copy the contents of *fp, preserving the "type" of *nfp */ nfp->f_flags = (nfp->f_flags & FP_TYPEMASK) | - (fp->f_flags & ~FP_TYPEMASK); + (fp->f_flags & ~FP_TYPEMASK); nfp->f_iocount = fp->f_iocount; nfp->f_fglob = fp->f_fglob; nfp->f_wset = fp->f_wset; @@ -3991,7 +4065,7 @@ fp_tryswap(proc_t p, int fd, struct fileproc *nfp) } (void) fp_drop(p, fd, fp, 1); } - return (error); + return error; } @@ -4017,7 +4091,7 @@ fp_tryswap(proc_t p, int fd, struct fileproc *nfp) int fp_drop_written(proc_t p, int fd, struct fileproc *fp) { - int error; + int error; proc_fdlock_spin(p); @@ -4027,7 +4101,7 @@ fp_drop_written(proc_t p, int fd, struct fileproc *fp) proc_fdunlock(p); - return (error); + return error; } @@ -4053,7 +4127,7 @@ fp_drop_written(proc_t p, int fd, struct fileproc *fp) int fp_drop_event(proc_t p, int fd, struct fileproc *fp) { - int error; + int error; proc_fdlock_spin(p); @@ -4063,7 +4137,7 @@ fp_drop_event(proc_t p, int fd, struct fileproc *fp) proc_fdunlock(p); - return (error); + return error; } @@ -4094,35 +4168,40 @@ int fp_drop(proc_t p, int fd, struct fileproc *fp, int locked) { struct filedesc *fdp = p->p_fd; - int needwakeup = 0; + int needwakeup = 0; - if (!locked) + if (!locked) { proc_fdlock_spin(p); - if ((fp == FILEPROC_NULL) && (fd < 0 || fd >= fdp->fd_nfiles || - (fp = fdp->fd_ofiles[fd]) == NULL || - ((fdp->fd_ofileflags[fd] & UF_RESERVED) && - !(fdp->fd_ofileflags[fd] & UF_CLOSING)))) { - if (!locked) + } + if ((fp == FILEPROC_NULL) && (fd < 0 || fd >= fdp->fd_nfiles || + (fp = fdp->fd_ofiles[fd]) == NULL || + ((fdp->fd_ofileflags[fd] & UF_RESERVED) && + !(fdp->fd_ofileflags[fd] & UF_CLOSING)))) { + if (!locked) { proc_fdunlock(p); - return (EBADF); + } + return EBADF; } fp->f_iocount--; if (fp->f_iocount == 0) { - if (fp->f_flags & FP_SELCONFLICT) + if (fp->f_flags & FP_SELCONFLICT) { fp->f_flags &= ~FP_SELCONFLICT; + } if (p->p_fpdrainwait) { p->p_fpdrainwait = 0; needwakeup = 1; } } - if (!locked) + if (!locked) { proc_fdunlock(p); - if (needwakeup) - wakeup(&p->p_fpdrainwait); + } + if (needwakeup) { + wakeup(&p->p_fpdrainwait); + } - return (0); + return 0; } @@ -4169,20 +4248,21 @@ file_vnode(int fd, struct vnode **vpp) int error; proc_fdlock_spin(p); - if ( (error = fp_lookup(p, fd, &fp, 1)) ) { + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } if (fp->f_type != DTYPE_VNODE) { - fp_drop(p, fd, fp,1); + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return(EINVAL); + return EINVAL; } - if (vpp != NULL) + if (vpp != NULL) { *vpp = (struct vnode *)fp->f_data; + } proc_fdunlock(p); - return(0); + return 0; } @@ -4231,25 +4311,27 @@ file_vnode_withvid(int fd, struct vnode **vpp, uint32_t * vidp) int error; proc_fdlock_spin(p); - if ( (error = fp_lookup(p, fd, &fp, 1)) ) { + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } if (fp->f_type != DTYPE_VNODE) { - fp_drop(p, fd, fp,1); + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return(EINVAL); + return EINVAL; } vp = (struct vnode *)fp->f_data; - if (vpp != NULL) + if (vpp != NULL) { *vpp = vp; + } - if ((vidp != NULL) && (vp != NULLVP)) + if ((vidp != NULL) && (vp != NULLVP)) { *vidp = (uint32_t)vp->v_id; + } proc_fdunlock(p); - return(0); + return 0; } @@ -4295,19 +4377,19 @@ file_socket(int fd, struct socket **sp) int error; proc_fdlock_spin(p); - if ( (error = fp_lookup(p, fd, &fp, 1)) ) { + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } if (fp->f_type != DTYPE_SOCKET) { - fp_drop(p, fd, fp,1); + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return(ENOTSOCK); + return ENOTSOCK; } *sp = (struct socket *)fp->f_data; proc_fdunlock(p); - return(0); + return 0; } @@ -4337,21 +4419,20 @@ file_socket(int fd, struct socket **sp) int file_flags(int fd, int *flags) { - proc_t p = current_proc(); struct fileproc *fp; int error; proc_fdlock_spin(p); - if ( (error = fp_lookup(p, fd, &fp, 1)) ) { + if ((error = fp_lookup(p, fd, &fp, 1))) { proc_fdunlock(p); - return(error); + return error; } *flags = (int)fp->f_flag; - fp_drop(p, fd, fp,1); + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return(0); + return 0; } @@ -4396,21 +4477,22 @@ file_drop(int fd) { struct fileproc *fp; proc_t p = current_proc(); - int needwakeup = 0; + int needwakeup = 0; proc_fdlock_spin(p); if (fd < 0 || fd >= p->p_fd->fd_nfiles || - (fp = p->p_fd->fd_ofiles[fd]) == NULL || - ((p->p_fd->fd_ofileflags[fd] & UF_RESERVED) && - !(p->p_fd->fd_ofileflags[fd] & UF_CLOSING))) { + (fp = p->p_fd->fd_ofiles[fd]) == NULL || + ((p->p_fd->fd_ofileflags[fd] & UF_RESERVED) && + !(p->p_fd->fd_ofileflags[fd] & UF_CLOSING))) { proc_fdunlock(p); - return (EBADF); + return EBADF; } - fp->f_iocount --; + fp->f_iocount--; if (fp->f_iocount == 0) { - if (fp->f_flags & FP_SELCONFLICT) + if (fp->f_flags & FP_SELCONFLICT) { fp->f_flags &= ~FP_SELCONFLICT; + } if (p->p_fpdrainwait) { p->p_fpdrainwait = 0; @@ -4419,9 +4501,10 @@ file_drop(int fd) } proc_fdunlock(p); - if (needwakeup) - wakeup(&p->p_fpdrainwait); - return(0); + if (needwakeup) { + wakeup(&p->p_fpdrainwait); + } + return 0; } @@ -4462,8 +4545,8 @@ static int falloc_withalloc_locked(proc_t, struct fileproc **, int *, int falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx) { - return (falloc_withalloc(p, resultfp, resultfd, ctx, - fileproc_alloc_init, NULL)); + return falloc_withalloc(p, resultfp, resultfd, ctx, + fileproc_alloc_init, NULL); } /* @@ -4480,7 +4563,7 @@ falloc_withalloc(proc_t p, struct fileproc **resultfp, int *resultfd, resultfp, resultfd, ctx, fp_zalloc, arg, 1); proc_fdunlock(p); - return (error); + return error; } /* @@ -4532,10 +4615,10 @@ static const struct fileops uninitops; */ int falloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, - vfs_context_t ctx, int locked) + vfs_context_t ctx, int locked) { - return (falloc_withalloc_locked(p, resultfp, resultfd, ctx, - fileproc_alloc_init, NULL, locked)); + return falloc_withalloc_locked(p, resultfp, resultfd, ctx, + fileproc_alloc_init, NULL, locked); } static int @@ -4547,25 +4630,29 @@ falloc_withalloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, struct fileglob *fg; int error, nfd; - if (!locked) + if (!locked) { proc_fdlock(p); - if ( (error = fdalloc(p, 0, &nfd)) ) { - if (!locked) + } + if ((error = fdalloc(p, 0, &nfd))) { + if (!locked) { proc_fdunlock(p); - return (error); + } + return error; } if (nfiles >= maxfiles) { - if (!locked) + if (!locked) { proc_fdunlock(p); + } tablefull("file"); - return (ENFILE); + return ENFILE; } #if CONFIG_MACF error = mac_file_check_create(proc_ucred(p)); if (error) { - if (!locked) + if (!locked) { proc_fdunlock(p); - return (error); + } + return error; } #endif @@ -4579,16 +4666,18 @@ falloc_withalloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, fp = (*fp_zalloc)(crarg); if (fp == NULL) { - if (locked) + if (locked) { proc_fdlock(p); - return (ENOMEM); + } + return ENOMEM; } MALLOC_ZONE(fg, struct fileglob *, sizeof(struct fileglob), M_FILEGLOB, M_WAITOK); if (fg == NULL) { fileproc_free(fp); - if (locked) + if (locked) { proc_fdlock(p); - return (ENOMEM); + } + return ENOMEM; } bzero(fg, sizeof(struct fileglob)); lck_mtx_init(&fg->fg_lock, file_lck_grp, file_lck_attr); @@ -4615,15 +4704,18 @@ falloc_withalloc_locked(proc_t p, struct fileproc **resultfp, int *resultfd, p->p_fd->fd_ofiles[nfd] = fp; - if (!locked) + if (!locked) { proc_fdunlock(p); + } - if (resultfp) + if (resultfp) { *resultfp = fp; - if (resultfd) + } + if (resultfd) { *resultfd = nfd; + } - return (0); + return 0; } @@ -4712,7 +4804,6 @@ fdexec(proc_t p, short flags, int self_exec) assert(fdp->fd_knhashmask == 0); for (i = fdp->fd_lastfile; i >= 0; i--) { - struct fileproc *fp = fdp->fd_ofiles[i]; char *flagp = &fdp->fd_ofileflags[i]; @@ -4723,22 +4814,25 @@ fdexec(proc_t p, short flags, int self_exec) * except files marked explicitly as "inherit" and * not marked close-on-exec. */ - if ((*flagp & (UF_EXCLOSE|UF_INHERIT)) != UF_INHERIT) + if ((*flagp & (UF_EXCLOSE | UF_INHERIT)) != UF_INHERIT) { *flagp |= UF_EXCLOSE; + } *flagp &= ~UF_INHERIT; } if ( - ((*flagp & (UF_RESERVED|UF_EXCLOSE)) == UF_EXCLOSE) + ((*flagp & (UF_RESERVED | UF_EXCLOSE)) == UF_EXCLOSE) #if CONFIG_MACF - || (fp && mac_file_check_inherit(proc_ucred(p), fp->f_fglob)) + || (fp && mac_file_check_inherit(proc_ucred(p), fp->f_fglob)) #endif - ) { + ) { procfdtbl_clearfd(p, i); - if (i == fdp->fd_lastfile && i > 0) + if (i == fdp->fd_lastfile && i > 0) { fdp->fd_lastfile--; - if (i < fdp->fd_freefile) + } + if (i < fdp->fd_freefile) { fdp->fd_freefile = i; + } /* * Wait for any third party viewers (e.g., lsof) @@ -4763,12 +4857,13 @@ fdexec(proc_t p, short flags, int self_exec) dealloc_kq = fdp->fd_wqkqueue; fdp->fd_wqkqueue = NULL; } - + proc_fdunlock(p); /* Anything to free? */ - if (dealloc_kq) + if (dealloc_kq) { kqueue_dealloc(dealloc_kq); + } } @@ -4814,12 +4909,13 @@ fdcopy(proc_t p, vnode_t uth_cdir) struct filedesc *newfdp, *fdp = p->p_fd; int i; struct fileproc *ofp, *fp; - vnode_t v_dir; + vnode_t v_dir; MALLOC_ZONE(newfdp, struct filedesc *, - sizeof(*newfdp), M_FILEDESC, M_WAITOK); - if (newfdp == NULL) - return(NULL); + sizeof(*newfdp), M_FILEDESC, M_WAITOK); + if (newfdp == NULL) { + return NULL; + } proc_fdlock(p); @@ -4833,8 +4929,9 @@ fdcopy(proc_t p, vnode_t uth_cdir) * inherit the new current working directory from the current thread * instead, before we take our references. */ - if (uth_cdir != NULLVP) + if (uth_cdir != NULLVP) { newfdp->fd_cdir = uth_cdir; + } /* * For both fd_cdir and fd_rdir make sure we get @@ -4844,16 +4941,18 @@ fdcopy(proc_t p, vnode_t uth_cdir) * and allows us to do the vnode_rele only on * a properly referenced vp */ - if ( (v_dir = newfdp->fd_cdir) ) { - if (vnode_getwithref(v_dir) == 0) { - if ( (vnode_ref(v_dir)) ) - newfdp->fd_cdir = NULL; + if ((v_dir = newfdp->fd_cdir)) { + if (vnode_getwithref(v_dir) == 0) { + if ((vnode_ref(v_dir))) { + newfdp->fd_cdir = NULL; + } vnode_put(v_dir); - } else - newfdp->fd_cdir = NULL; + } else { + newfdp->fd_cdir = NULL; + } } if (newfdp->fd_cdir == NULL && fdp->fd_cdir) { - /* + /* * we couldn't get a new reference on * the current working directory being * inherited... we might as well drop @@ -4862,22 +4961,23 @@ fdcopy(proc_t p, vnode_t uth_cdir) * it useless... by dropping it we'll * be that much closer to recycling it */ - vnode_rele(fdp->fd_cdir); + vnode_rele(fdp->fd_cdir); fdp->fd_cdir = NULL; } - if ( (v_dir = newfdp->fd_rdir) ) { + if ((v_dir = newfdp->fd_rdir)) { if (vnode_getwithref(v_dir) == 0) { - if ( (vnode_ref(v_dir)) ) - newfdp->fd_rdir = NULL; + if ((vnode_ref(v_dir))) { + newfdp->fd_rdir = NULL; + } vnode_put(v_dir); } else { - newfdp->fd_rdir = NULL; + newfdp->fd_rdir = NULL; } } /* Coming from a chroot environment and unable to get a reference... */ if (newfdp->fd_rdir == NULL && fdp->fd_rdir) { - /* + /* * We couldn't get a new reference on * the chroot directory being * inherited... this is fatal, since @@ -4885,10 +4985,11 @@ fdcopy(proc_t p, vnode_t uth_cdir) * escape from a chroot environment by * the new process. */ - if (newfdp->fd_cdir) - vnode_rele(newfdp->fd_cdir); + if (newfdp->fd_cdir) { + vnode_rele(newfdp->fd_cdir); + } FREE_ZONE(newfdp, sizeof *newfdp, M_FILEDESC); - return(NULL); + return NULL; } /* @@ -4897,30 +4998,33 @@ fdcopy(proc_t p, vnode_t uth_cdir) * additional memory for the number of descriptors currently * in use. */ - if (newfdp->fd_lastfile < NDFILE) + if (newfdp->fd_lastfile < NDFILE) { i = NDFILE; - else { + } else { /* * Compute the smallest multiple of NDEXTENT needed * for the file descriptors currently in use, * allowing the table to shrink. */ i = newfdp->fd_nfiles; - while (i > 1 + 2 * NDEXTENT && i > 1 + newfdp->fd_lastfile * 2) + while (i > 1 + 2 * NDEXTENT && i > 1 + newfdp->fd_lastfile * 2) { i /= 2; + } } proc_fdunlock(p); MALLOC_ZONE(newfdp->fd_ofiles, struct fileproc **, - i * OFILESIZE, M_OFILETABL, M_WAITOK); + i * OFILESIZE, M_OFILETABL, M_WAITOK); if (newfdp->fd_ofiles == NULL) { - if (newfdp->fd_cdir) - vnode_rele(newfdp->fd_cdir); - if (newfdp->fd_rdir) + if (newfdp->fd_cdir) { + vnode_rele(newfdp->fd_cdir); + } + if (newfdp->fd_rdir) { vnode_rele(newfdp->fd_rdir); + } FREE_ZONE(newfdp, sizeof(*newfdp), M_FILEDESC); - return(NULL); + return NULL; } (void) memset(newfdp->fd_ofiles, 0, i * OFILESIZE); proc_fdlock(p); @@ -4933,40 +5037,20 @@ fdcopy(proc_t p, vnode_t uth_cdir) char *flags; (void) memcpy(newfdp->fd_ofiles, fdp->fd_ofiles, - (newfdp->fd_lastfile + 1) * sizeof(*fdp->fd_ofiles)); + (newfdp->fd_lastfile + 1) * sizeof(*fdp->fd_ofiles)); (void) memcpy(newfdp->fd_ofileflags, fdp->fd_ofileflags, - (newfdp->fd_lastfile + 1) * sizeof(*fdp->fd_ofileflags)); + (newfdp->fd_lastfile + 1) * sizeof(*fdp->fd_ofileflags)); - /* - * kq descriptors cannot be copied. - */ - if (newfdp->fd_knlistsize != -1) { - fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile]; - flags = &newfdp->fd_ofileflags[newfdp->fd_lastfile]; - for (i = newfdp->fd_lastfile; - i >= 0; i--, fpp--, flags--) { - if (*flags & UF_RESERVED) - continue; /* (removed below) */ - if (*fpp != NULL && (*fpp)->f_type == DTYPE_KQUEUE) { - *fpp = NULL; - *flags = 0; - if (i < newfdp->fd_freefile) - newfdp->fd_freefile = i; - } - if (*fpp == NULL && i == newfdp->fd_lastfile && i > 0) - newfdp->fd_lastfile--; - } - } - fpp = newfdp->fd_ofiles; - flags = newfdp->fd_ofileflags; - - for (i = newfdp->fd_lastfile + 1; --i >= 0; fpp++, flags++) + fpp = &newfdp->fd_ofiles[newfdp->fd_lastfile]; + flags = &newfdp->fd_ofileflags[newfdp->fd_lastfile]; + for (i = newfdp->fd_lastfile; i >= 0; i--, fpp--, flags--) { if ((ofp = *fpp) != NULL && 0 == (ofp->f_fglob->fg_lflags & FG_CONFINED) && - 0 == (*flags & (UF_FORKCLOSE|UF_RESERVED))) { + 0 == (*flags & (UF_FORKCLOSE | UF_RESERVED))) { #if DEBUG - if (FILEPROC_TYPE(ofp) != FTYPE_SIMPLE) + if (FILEPROC_TYPE(ofp) != FTYPE_SIMPLE) { panic("complex fileproc"); + } #endif fp = fileproc_alloc_init(NULL); if (fp == NULL) { @@ -4983,11 +5067,18 @@ fdcopy(proc_t p, vnode_t uth_cdir) *fpp = fp; } } else { - if (i < newfdp->fd_freefile) - newfdp->fd_freefile = i; *fpp = NULL; *flags = 0; } + if (*fpp == NULL) { + if (i == newfdp->fd_lastfile && i > 0) { + newfdp->fd_lastfile--; + } + if (i < newfdp->fd_freefile) { + newfdp->fd_freefile = i; + } + } + } } proc_fdunlock(p); @@ -5005,7 +5096,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) lck_mtx_init(&newfdp->fd_kqhashlock, proc_kqhashlock_grp, proc_lck_attr); lck_mtx_init(&newfdp->fd_knhashlock, proc_knhashlock_grp, proc_lck_attr); - return (newfdp); + return newfdp; } @@ -5034,16 +5125,17 @@ fdfree(proc_t p) proc_fdlock(p); if (p == kernproc || NULL == (fdp = p->p_fd)) { - proc_fdunlock(p); + proc_fdunlock(p); return; } extern struct filedesc filedesc0; - if (&filedesc0 == fdp) + if (&filedesc0 == fdp) { panic("filedesc0"); + } - /* + /* * deallocate all the knotes up front and claim empty * tables to make any subsequent kqueue closes faster. */ @@ -5061,14 +5153,15 @@ fdfree(proc_t p) if (fdp->fd_nfiles > 0 && fdp->fd_ofiles) { for (i = fdp->fd_lastfile; i >= 0; i--) { if ((fp = fdp->fd_ofiles[i]) != NULL) { - - if (fdp->fd_ofileflags[i] & UF_RESERVED) - panic("fdfree: found fp with UF_RESERVED"); + if (fdp->fd_ofileflags[i] & UF_RESERVED) { + panic("fdfree: found fp with UF_RESERVED"); + } procfdtbl_reservefd(p, i); - if (fp->f_flags & FP_WAITEVENT) + if (fp->f_flags & FP_WAITEVENT) { (void)waitevent_close(p, fp); + } (void) closef_locked(fp, fp->f_fglob, p); fileproc_free(fp); } @@ -5085,21 +5178,25 @@ fdfree(proc_t p) proc_fdunlock(p); - if (dealloc_kq) + if (dealloc_kq) { kqueue_dealloc(dealloc_kq); + } - if (fdp->fd_cdir) + if (fdp->fd_cdir) { vnode_rele(fdp->fd_cdir); - if (fdp->fd_rdir) + } + if (fdp->fd_rdir) { vnode_rele(fdp->fd_rdir); + } proc_fdlock_spin(p); p->p_fd = NULL; proc_fdunlock(p); if (fdp->fd_kqhash) { - for (uint32_t j = 0; j <= fdp->fd_kqhashmask; j++) + for (uint32_t j = 0; j <= fdp->fd_kqhashmask; j++) { assert(SLIST_EMPTY(&fdp->fd_kqhash[j])); + } FREE(fdp->fd_kqhash, M_KQUEUE); } @@ -5119,7 +5216,7 @@ fdfree(proc_t p) * p Pointer to proc structure * * Returns: 0 Success - * closef_finish:??? Anything returnable by a per-fileops + * closef_finish:??? Anything returnable by a per-fileops * close function * * Note: Decrements reference count on file structure; if this was the @@ -5138,14 +5235,15 @@ closef_locked(struct fileproc *fp, struct fileglob *fg, proc_t p) int error; if (fg == NULL) { - return (0); + return 0; } /* Set up context with cred stashed in fg */ - if (p == current_proc()) + if (p == current_proc()) { context.vc_thread = current_thread(); - else + } else { context.vc_thread = NULL; + } context.vc_ucred = fg->fg_cred; /* @@ -5166,7 +5264,7 @@ closef_locked(struct fileproc *fp, struct fileglob *fg, proc_t p) lf.l_type = F_UNLCK; vp = (struct vnode *)fg->fg_data; - if ( (error = vnode_getwithref(vp)) == 0 ) { + if ((error = vnode_getwithref(vp)) == 0) { (void) VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL); (void)vnode_put(vp); } @@ -5177,21 +5275,24 @@ closef_locked(struct fileproc *fp, struct fileglob *fg, proc_t p) if (fg->fg_count > 0) { lck_mtx_unlock(&fg->fg_lock); - return (0); + return 0; } #if DIAGNOSTIC - if (fg->fg_count != 0) + if (fg->fg_count != 0) { panic("fg %p: being freed with bad fg_count (%d)", fg, fg->fg_count); + } #endif - if (fp && (fp->f_flags & FP_WRITTEN)) - fg->fg_flag |= FWASWRITTEN; + if (fp && (fp->f_flags & FP_WRITTEN)) { + fg->fg_flag |= FWASWRITTEN; + } fg->fg_lflags |= FG_TERM; lck_mtx_unlock(&fg->fg_lock); - if (p) + if (p) { proc_fdunlock(p); + } /* Since we ensure that fg->fg_ops is always initialized, * it is safe to invoke fo_close on the fg */ @@ -5199,10 +5300,11 @@ closef_locked(struct fileproc *fp, struct fileglob *fg, proc_t p) fg_free(fg); - if (p) + if (p) { proc_fdlock(p); + } - return(error); + return error; } @@ -5224,7 +5326,7 @@ closef_locked(struct fileproc *fp, struct fileglob *fg, proc_t p) * close to prevent fd's from being closed out from under * operations currently in progress and blocked * - * See Also: file_vnode(), file_socket(), file_drop(), and the cautions + * See Also: file_vnode(), file_socket(), file_drop(), and the cautions * regarding their use and interaction with this function. */ void @@ -5232,39 +5334,41 @@ fileproc_drain(proc_t p, struct fileproc * fp) { struct vfs_context context; - context.vc_thread = proc_thread(p); /* XXX */ + context.vc_thread = proc_thread(p); /* XXX */ context.vc_ucred = fp->f_fglob->fg_cred; - fp->f_iocount-- ; /* (the one the close holds) */ + fp->f_iocount--; /* (the one the close holds) */ while (fp->f_iocount) { - - lck_mtx_convert_spin(&p->p_fdmlock); + lck_mtx_convert_spin(&p->p_fdmlock); if (fp->f_fglob->fg_ops->fo_drain) { (*fp->f_fglob->fg_ops->fo_drain)(fp, &context); } if ((fp->f_flags & FP_INSELECT) == FP_INSELECT) { if (waitq_wakeup64_all((struct waitq *)fp->f_wset, NO_EVENT64, - THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES) == KERN_INVALID_ARGUMENT) + THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES) == KERN_INVALID_ARGUMENT) { panic("bad wait queue for waitq_wakeup64_all %p (fp:%p)", fp->f_wset, fp); + } } if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { if (waitq_wakeup64_all(&select_conflict_queue, NO_EVENT64, - THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES) == KERN_INVALID_ARGUMENT) + THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES) == KERN_INVALID_ARGUMENT) { panic("bad select_conflict_queue"); + } } p->p_fpdrainwait = 1; msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO, "fpdrain", NULL); - } #if DIAGNOSTIC - if ((fp->f_flags & FP_INSELECT) != 0) + if ((fp->f_flags & FP_INSELECT) != 0) { panic("FP_INSELECT set on drained fp"); + } #endif - if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) + if ((fp->f_flags & FP_SELCONFLICT) == FP_SELCONFLICT) { fp->f_flags &= ~FP_SELCONFLICT; + } } @@ -5287,13 +5391,13 @@ fileproc_drain(proc_t p, struct fileproc * fp) int fp_free(proc_t p, int fd, struct fileproc * fp) { - proc_fdlock_spin(p); + proc_fdlock_spin(p); fdrelse(p, fd); - proc_fdunlock(p); + proc_fdunlock(p); fg_free(fp->f_fglob); fileproc_free(fp); - return(0); + return 0; } @@ -5329,13 +5433,13 @@ flock(proc_t p, struct flock_args *uap, __unused int32_t *retval) struct vnode *vp; struct flock lf; vfs_context_t ctx = vfs_context_current(); - int error=0; + int error = 0; AUDIT_ARG(fd, uap->fd); - if ( (error = fp_getfvp(p, fd, &fp, &vp)) ) { - return(error); + if ((error = fp_getfvp(p, fd, &fp, &vp))) { + return error; } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { goto out1; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -5349,30 +5453,31 @@ flock(proc_t p, struct flock_args *uap, __unused int32_t *retval) error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); goto out; } - if (how & LOCK_EX) + if (how & LOCK_EX) { lf.l_type = F_WRLCK; - else if (how & LOCK_SH) + } else if (how & LOCK_SH) { lf.l_type = F_RDLCK; - else { - error = EBADF; + } else { + error = EBADF; goto out; } #if CONFIG_MACF error = mac_file_check_lock(proc_ucred(p), fp->f_fglob, F_SETLK, &lf); - if (error) + if (error) { goto out; + } #endif error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, (how & LOCK_NB ? F_FLOCK : F_FLOCK | F_WAIT), ctx, NULL); - if (!error) + if (!error) { fp->f_flag |= FHASLOCK; + } out: (void)vnode_put(vp); out1: fp_drop(p, fd, fp, 0); - return(error); - + return error; } /* @@ -5381,14 +5486,14 @@ out1: * Description: Obtain a Mach send right for a given file descriptor. * * Parameters: p Process calling fileport - * uap->fd The fd to reference - * uap->portnamep User address at which to place port name. + * uap->fd The fd to reference + * uap->portnamep User address at which to place port name. * * Returns: 0 Success. - * EBADF Bad file descriptor. - * EINVAL File descriptor had type that cannot be sent, misc. other errors. - * EFAULT Address at which to store port name is not valid. - * EAGAIN Resource shortage. + * EBADF Bad file descriptor. + * EINVAL File descriptor had type that cannot be sent, misc. other errors. + * EFAULT Address at which to store port name is not valid. + * EAGAIN Resource shortage. * * Implicit returns: * On success, name of send right is stored at user-specified address. @@ -5486,7 +5591,7 @@ fileport_releasefg(struct fileglob *fg) * Description: Obtain the file descriptor for a given Mach send right. * * Parameters: p Process calling fileport - * uap->port Name of send right to file port. + * uap->port Name of send right to file port. * * Returns: 0 Success * EINVAL Invalid Mach port name, or port is not for a file. @@ -5500,7 +5605,7 @@ int fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval) { struct fileglob *fg; - struct fileproc *fp = FILEPROC_NULL; + struct fileproc *fp = FILEPROC_NULL; ipc_port_t port = IPC_PORT_NULL; mach_port_name_t send = uap->port; kern_return_t res; @@ -5508,7 +5613,7 @@ fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval) int err; res = ipc_object_copyin(get_task_ipcspace(p->task), - send, MACH_MSG_TYPE_COPY_SEND, &port); + send, MACH_MSG_TYPE_COPY_SEND, &port); if (res != KERN_SUCCESS) { err = EINVAL; @@ -5530,7 +5635,7 @@ fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval) fp->f_fglob = fg; fg_ref(fp); - proc_fdlock(p); + proc_fdlock(p); err = fdalloc(p, 0, &fd); if (err != 0) { proc_fdunlock(p); @@ -5598,17 +5703,16 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error) fp = fdp->fd_ofiles[indx]; if (dfd < 0 || dfd >= fdp->fd_nfiles || - (wfp = fdp->fd_ofiles[dfd]) == NULL || wfp == fp || - (fdp->fd_ofileflags[dfd] & UF_RESERVED)) { - - proc_fdunlock(p); - return (EBADF); + (wfp = fdp->fd_ofiles[dfd]) == NULL || wfp == fp || + (fdp->fd_ofileflags[dfd] & UF_RESERVED)) { + proc_fdunlock(p); + return EBADF; } #if CONFIG_MACF myerror = mac_file_check_dup(proc_ucred(p), wfp->f_fglob, dfd); if (myerror) { proc_fdunlock(p); - return (myerror); + return myerror; } #endif /* @@ -5627,34 +5731,36 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error) case ENODEV: if (FP_ISGUARDED(wfp, GUARD_DUP)) { proc_fdunlock(p); - return (EPERM); + return EPERM; } /* * Check that the mode the file is being opened for is a * subset of the mode of the existing descriptor. */ - if (((flags & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) { - proc_fdunlock(p); - return (EACCES); + if (((flags & (FREAD | FWRITE)) | wfp->f_flag) != wfp->f_flag) { + proc_fdunlock(p); + return EACCES; } - if (indx > fdp->fd_lastfile) + if (indx > fdp->fd_lastfile) { fdp->fd_lastfile = indx; + } (void)fg_ref(wfp); - if (fp->f_fglob) - fg_free(fp->f_fglob); + if (fp->f_fglob) { + fg_free(fp->f_fglob); + } fp->f_fglob = wfp->f_fglob; fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd] | - (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; + (flags & O_CLOEXEC) ? UF_EXCLOSE : 0; - proc_fdunlock(p); - return (0); + proc_fdunlock(p); + return 0; default: - proc_fdunlock(p); - return (error); + proc_fdunlock(p); + return error; } /* NOTREACHED */ } @@ -5682,12 +5788,14 @@ fg_ref(struct fileproc * fp) lck_mtx_lock_spin(&fg->fg_lock); #if DIAGNOSTIC - if ((fp->f_flags & ~((unsigned int)FP_VALID_FLAGS)) != 0) + if ((fp->f_flags & ~((unsigned int)FP_VALID_FLAGS)) != 0) { panic("fg_ref: invalid bits on fp %p", fp); + } - if (fg->fg_count == 0) + if (fg->fg_count == 0) { panic("fg_ref: adding fgcount to zeroed fg: fp %p fg %p", fp, fg); + } #endif fg->fg_count++; lck_mtx_unlock(&fg->fg_lock); @@ -5750,7 +5858,7 @@ fg_insertuipc_mark(struct fileglob * fg) insert = TRUE; } lck_mtx_unlock(&fg->fg_lock); - return (insert); + return insert; } /* @@ -5815,7 +5923,7 @@ fg_removeuipc_mark(struct fileglob * fg) remove = TRUE; } lck_mtx_unlock(&fg->fg_lock); - return (remove); + return remove; } /* @@ -5868,7 +5976,7 @@ fg_removeuipc(struct fileglob * fg) int fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) { - return ((*fp->f_ops->fo_read)(fp, uio, flags, ctx)); + return (*fp->f_ops->fo_read)(fp, uio, flags, ctx); } @@ -5889,7 +5997,7 @@ fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) int fo_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) { - return((*fp->f_ops->fo_write)(fp, uio, flags, ctx)); + return (*fp->f_ops->fo_write)(fp, uio, flags, ctx); } @@ -5923,7 +6031,7 @@ fo_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) proc_fdunlock(vfs_context_proc(ctx)); error = (*fp->f_ops->fo_ioctl)(fp, com, data, ctx); proc_fdlock(vfs_context_proc(ctx)); - return(error); + return error; } @@ -5944,7 +6052,7 @@ fo_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) int fo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) { - return((*fp->f_ops->fo_select)(fp, which, wql, ctx)); + return (*fp->f_ops->fo_select)(fp, which, wql, ctx); } @@ -5964,7 +6072,7 @@ fo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) int fo_close(struct fileglob *fg, vfs_context_t ctx) { - return((*fg->fg_ops->fo_close)(fg, ctx)); + return (*fg->fg_ops->fo_close)(fg, ctx); } @@ -5984,9 +6092,9 @@ fo_close(struct fileglob *fg, vfs_context_t ctx) */ int fo_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx) + struct kevent_internal_s *kev, vfs_context_t ctx) { - return ((*fp->f_ops->fo_kqfilter)(fp, kn, kev, ctx)); + return (*fp->f_ops->fo_kqfilter)(fp, kn, kev, ctx); } /* @@ -6004,7 +6112,7 @@ file_issendable(proc_t p, struct fileproc *fp) case DTYPE_PIPE: case DTYPE_PSXSHM: case DTYPE_NETPOLICY: - return (0 == (fp->f_fglob->fg_lflags & FG_CONFINED)); + return 0 == (fp->f_fglob->fg_lflags & FG_CONFINED); default: /* DTYPE_KQUEUE, DTYPE_FSEVENTS, DTYPE_PSXSEM */ return FALSE; @@ -6017,11 +6125,12 @@ fileproc_alloc_init(__unused void *arg) { struct fileproc *fp; - MALLOC_ZONE(fp, struct fileproc *, sizeof (*fp), M_FILEPROC, M_WAITOK); - if (fp) - bzero(fp, sizeof (*fp)); + MALLOC_ZONE(fp, struct fileproc *, sizeof(*fp), M_FILEPROC, M_WAITOK); + if (fp) { + bzero(fp, sizeof(*fp)); + } - return (fp); + return fp; } void @@ -6029,7 +6138,7 @@ fileproc_free(struct fileproc *fp) { switch (FILEPROC_TYPE(fp)) { case FTYPE_SIMPLE: - FREE_ZONE(fp, sizeof (*fp), M_FILEPROC); + FREE_ZONE(fp, sizeof(*fp), M_FILEPROC); break; case FTYPE_GUARDED: guarded_fileproc_free(fp); diff --git a/bsd/kern/kern_ecc.c b/bsd/kern/kern_ecc.c index c2f018e52..5d1dbfad7 100644 --- a/bsd/kern/kern_ecc.c +++ b/bsd/kern/kern_ecc.c @@ -1,30 +1,30 @@ /* -* Copyright (c) 2013 Apple Inc. All rights reserved. -* -* @APPLE_OSREFERENCE_LICENSE_HEADER_START@ -* -* This file contains Original Code and/or Modifications of Original Code -* as defined in and that are subject to the Apple Public Source License -* Version 2.0 (the 'License'). You may not use this file except in -* compliance with the License. The rights granted to you under the License -* may not be used to create, or enable the creation or redistribution of, -* unlawful or unlicensed copies of an Apple operating system, or to -* circumvent, violate, or enable the circumvention or violation of, any -* terms of an Apple operating system software license agreement. -* -* Please obtain a copy of the License at -* http://www.opensource.apple.com/apsl/ and read it before using this file. -* -* The Original Code and all software distributed under the License are -* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER -* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, -* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. -* Please see the License for the specific language governing rights and -* limitations under the License. -* -* @APPLE_OSREFERENCE_LICENSE_HEADER_END@ -*/ + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ #include #include #include @@ -38,8 +38,8 @@ #include static int -get_ecc_data_handler(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, - struct sysctl_req *req) +get_ecc_data_handler(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, + struct sysctl_req *req) { struct ecc_event ev; int changed, retval; @@ -49,10 +49,10 @@ get_ecc_data_handler(__unused struct sysctl_oid *oidp, __unused void *arg1, __un } if (KERN_SUCCESS != ecc_log_get_next_event(&ev)) { - /* - * EAGAIN would be better, but sysctl infrastructure + /* + * EAGAIN would be better, but sysctl infrastructure * interprets that */ - return EBUSY; + return EBUSY; } retval = sysctl_io_opaque(req, &ev, sizeof(ev), &changed); @@ -62,6 +62,6 @@ get_ecc_data_handler(__unused struct sysctl_oid *oidp, __unused void *arg1, __un } SYSCTL_PROC(_kern, OID_AUTO, next_ecc_event, - CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLTYPE_STRUCT, - 0, 0, get_ecc_data_handler, - "-", ""); + CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLTYPE_STRUCT, + 0, 0, get_ecc_data_handler, + "-", ""); diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c index ecffc8db7..c45fbcffa 100644 --- a/bsd/kern/kern_event.c +++ b/bsd/kern/kern_event.c @@ -75,6 +75,7 @@ #include #include #include +#include // SYS_* constants #include #include #include @@ -118,26 +119,26 @@ #include #endif -extern thread_t port_name_to_thread(mach_port_name_t port_name); /* osfmk/kern/ipc_tt.h */ +extern thread_t port_name_to_thread(mach_port_name_t port_name); /* osfmk/kern/ipc_tt.h */ extern mach_port_name_t ipc_entry_name_mask(mach_port_name_t name); /* osfmk/ipc/ipc_entry.h */ #define KEV_EVTID(code) BSDDBG_CODE(DBG_BSD_KEVENT, (code)) MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); -#define KQ_EVENT NO_EVENT64 +#define KQ_EVENT NO_EVENT64 static int kqueue_read(struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); + int flags, vfs_context_t ctx); static int kqueue_write(struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); + int flags, vfs_context_t ctx); static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data, - vfs_context_t ctx); + vfs_context_t ctx); static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id, - vfs_context_t ctx); + vfs_context_t ctx); static int kqueue_close(struct fileglob *fg, vfs_context_t ctx); static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); + struct kevent_internal_s *kev, vfs_context_t ctx); static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx); static const struct fileops kqueueops = { @@ -153,33 +154,33 @@ static const struct fileops kqueueops = { static void kevent_put_kq(struct proc *p, kqueue_id_t id, struct fileproc *fp, struct kqueue *kq); static int kevent_internal(struct proc *p, - kqueue_id_t id, kqueue_id_t *id_out, - user_addr_t changelist, int nchanges, - user_addr_t eventlist, int nevents, - user_addr_t data_out, uint64_t data_available, - unsigned int flags, user_addr_t utimeout, - kqueue_continue_t continuation, - int32_t *retval); + kqueue_id_t id, kqueue_id_t *id_out, + user_addr_t changelist, int nchanges, + user_addr_t eventlist, int nevents, + user_addr_t data_out, uint64_t data_available, + unsigned int flags, user_addr_t utimeout, + kqueue_continue_t continuation, + int32_t *retval); static int kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, - struct proc *p, unsigned int flags); + struct proc *p, unsigned int flags); static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, - struct proc *p, unsigned int flags); + struct proc *p, unsigned int flags); char * kevent_description(struct kevent_internal_s *kevp, char *s, size_t n); static int kevent_register_wait_prepare(struct knote *kn, struct kevent_internal_s *kev); static void kevent_register_wait_block(struct turnstile *ts, thread_t handoff_thread, - struct knote_lock_ctx *knlc, thread_continue_t cont, - struct _kevent_register *cont_args) __dead2; + struct knote_lock_ctx *knlc, thread_continue_t cont, + struct _kevent_register *cont_args) __dead2; static void kevent_register_wait_return(struct _kevent_register *cont_args) __dead2; static void kevent_register_wait_cleanup(struct knote *kn); static inline void kqueue_release_last(struct proc *p, kqueue_t kqu); static void kqueue_interrupt(struct kqueue *kq); static int kevent_callback(struct kqueue *kq, struct kevent_internal_s *kevp, - void *data); + void *data); static void kevent_continue(struct kqueue *kq, void *data, int error); static void kqueue_scan_continue(void *contp, wait_result_t wait_result); static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, void *callback_data, - struct filt_process_s *process_data, int *countp); + struct filt_process_s *process_data, int *countp); static int kqueue_queue_empty(struct kqueue *kq, kq_index_t qos_index); static struct kqtailq *kqueue_get_suppressed_queue(kqueue_t kq, struct knote *kn); @@ -230,10 +231,10 @@ static void kqworkloop_request_help(struct kqworkloop *kqwl, kq_index_t qos_inde static int kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags); static int knote_process(struct knote *kn, kevent_callback_t callback, void *callback_data, - struct filt_process_s *process_data); + struct filt_process_s *process_data); static int kq_add_knote(struct kqueue *kq, struct knote *kn, - struct knote_lock_ctx *knlc, struct proc *p); + struct knote_lock_ctx *knlc, struct proc *p); static struct knote *kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev, bool is_fd, struct proc *p); static void knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc); @@ -254,7 +255,7 @@ static void knote_unsuppress(struct knote *kn); static void knote_wakeup(struct knote *kn); static bool knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn, - int result, thread_qos_t *qos_out); + int result, thread_qos_t *qos_out); static void knote_apply_qos_override(struct knote *kn, kq_index_t qos_index); static void knote_adjust_qos(struct kqueue *kq, struct knote *kn, int result); static void knote_reset_priority(struct knote *kn, pthread_priority_t pp); @@ -288,7 +289,7 @@ kevent_debug_flags(void) } #endif -#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) +#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) /* placeholder for not-yet-implemented filters */ static int filt_badattach(struct knote *kn, struct kevent_internal_s *kev); @@ -353,7 +354,7 @@ SECURITY_READ_ONLY_EARLY(static struct filterops *) sysfilt_ops[EVFILTID_MAX] = [~EVFILT_MACHPORT] = &machport_filtops, [~EVFILT_FS] = &fs_filtops, [~EVFILT_USER] = &user_filtops, - &bad_filtops, + &bad_filtops, [~EVFILT_VM] = &bad_filtops, [~EVFILT_SOCK] = &file_filtops, #if CONFIG_MEMORYSTATUS @@ -403,7 +404,7 @@ kqr_kqueue(proc_t p, struct kqrequest *kqr) } else { kqu.kqwq = (struct kqworkq *)p->p_fd->fd_wqkqueue; assert(kqr >= kqu.kqwq->kqwq_request && - kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS); + kqr < kqu.kqwq->kqwq_request + KQWQ_NBUCKETS); } return kqu; } @@ -411,7 +412,7 @@ kqr_kqueue(proc_t p, struct kqrequest *kqr) static inline boolean_t is_workqueue_thread(thread_t thread) { - return (thread_get_tag(thread) & THREAD_TAG_WORKQUEUE); + return thread_get_tag(thread) & THREAD_TAG_WORKQUEUE; } /* @@ -513,7 +514,8 @@ knhash_unlock(proc_t p) #if DEBUG || DEVELOPMENT __attribute__((noinline, not_tail_called, disable_tail_calls)) -void knote_lock_ctx_chk(struct knote_lock_ctx *knlc) +void +knote_lock_ctx_chk(struct knote_lock_ctx *knlc) { /* evil hackery to make sure no one forgets to unlock */ assert(knlc->knlc_state == KNOTE_LOCK_CTX_UNLOCKED); @@ -525,7 +527,9 @@ knote_lock_ctx_find(struct kqueue *kq, struct knote *kn) { struct knote_lock_ctx *ctx; LIST_FOREACH(ctx, &kq->kq_knlocks, knlc_le) { - if (ctx->knlc_knote == kn) return ctx; + if (ctx->knlc_knote == kn) { + return ctx; + } } panic("knote lock context not found: %p", kn); __builtin_trap(); @@ -535,7 +539,7 @@ knote_lock_ctx_find(struct kqueue *kq, struct knote *kn) __attribute__((noinline)) static bool __result_use_check knote_lock_slow(struct kqueue *kq, struct knote *kn, - struct knote_lock_ctx *knlc, int kqlocking) + struct knote_lock_ctx *knlc, int kqlocking) { kqlock_held(kq); @@ -553,7 +557,7 @@ knote_lock_slow(struct kqueue *kq, struct knote *kn, if (thread_handoff_deallocate(owner_thread) == THREAD_RESTART) { if (kqlocking == KNOTE_KQ_LOCK_ALWAYS || - kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { + kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { kqlock(kq); } #if DEBUG || DEVELOPMENT @@ -563,10 +567,10 @@ knote_lock_slow(struct kqueue *kq, struct knote *kn, return false; } #if DEBUG || DEVELOPMENT - assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED); + assert(knlc->knlc_state == KNOTE_LOCK_CTX_LOCKED); #endif if (kqlocking == KNOTE_KQ_LOCK_ALWAYS || - kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) { + kqlocking == KNOTE_KQ_LOCK_ON_SUCCESS) { kqlock(kq); } return true; @@ -581,7 +585,7 @@ knote_lock_slow(struct kqueue *kq, struct knote *kn, */ static bool __result_use_check knote_lock(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, - int kqlocking) + int kqlocking) { kqlock_held(kq); @@ -609,7 +613,7 @@ knote_lock(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, #endif if (kqlocking == KNOTE_KQ_UNLOCK || - kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { + kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { kqunlock(kq); } return true; @@ -624,7 +628,7 @@ knote_lock(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, */ static void knote_unlock(struct kqueue *kq, struct knote *kn, - struct knote_lock_ctx *knlc, int flags) + struct knote_lock_ctx *knlc, int flags) { kqlock_held(kq); @@ -679,7 +683,7 @@ knote_unlock(struct kqueue *kq, struct knote *kn, */ static void knote_unlock_cancel(struct kqueue *kq, struct knote *kn, - struct knote_lock_ctx *knlc, int kqlocking) + struct knote_lock_ctx *knlc, int kqlocking) { kqlock_held(kq); @@ -691,7 +695,7 @@ knote_unlock_cancel(struct kqueue *kq, struct knote *kn, kn->kn_status &= ~KN_LOCKED; if (kqlocking == KNOTE_KQ_UNLOCK || - kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { + kqlocking == KNOTE_KQ_LOCK_ON_FAILURE) { kqunlock(kq); } if (!TAILQ_EMPTY(&knlc->knlc_head)) { @@ -714,8 +718,9 @@ knote_call_filter_event(struct kqueue *kq, struct knote *kn, long hint) kqlock_held(kq); - if (kn->kn_status & (KN_DROPPING | KN_VANISHED)) + if (kn->kn_status & (KN_DROPPING | KN_VANISHED)) { return; + } kn->kn_inuse++; kqunlock(kq); @@ -725,8 +730,9 @@ knote_call_filter_event(struct kqueue *kq, struct knote *kn, long hint) dropping = (kn->kn_status & KN_DROPPING); if (!dropping && (result & FILTER_ACTIVE)) { - if (result & FILTER_ADJUST_EVENT_QOS_BIT) + if (result & FILTER_ADJUST_EVENT_QOS_BIT) { knote_adjust_qos(kq, kn, result); + } knote_activate(kn); } @@ -742,8 +748,8 @@ knote_call_filter_event(struct kqueue *kq, struct knote *kn, long hint) } if (dropping) { waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, - CAST_EVENT64_T(&kn->kn_inuse), - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + CAST_EVENT64_T(&kn->kn_inuse), + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); } } } @@ -765,8 +771,8 @@ knote_wait_for_filter_events(struct kqueue *kq, struct knote *kn) if (kn->kn_inuse) { wr = waitq_assert_wait64((struct waitq *)&kq->kq_wqs, - CAST_EVENT64_T(&kn->kn_inuse), - THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER); + CAST_EVENT64_T(&kn->kn_inuse), + THREAD_UNINT | THREAD_WAIT_NOREPORT, TIMEOUT_WAIT_FOREVER); } kqunlock(kq); if (wr == THREAD_WAITING) { @@ -789,9 +795,10 @@ SECURITY_READ_ONLY_EARLY(static struct filterops) file_filtops = { #pragma mark kqread_filtops -#define f_flag f_fglob->fg_flag -#define f_ops f_fglob->fg_ops -#define f_data f_fglob->fg_data +#define f_flag f_fglob->fg_flag +#define f_ops f_fglob->fg_ops +#define f_data f_fglob->fg_data +#define f_lflags f_fglob->fg_lflags static void filt_kqdetach(struct knote *kn) @@ -809,7 +816,7 @@ filt_kqueue(struct knote *kn, __unused long hint) { struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; - return (kq->kq_count > 0); + return kq->kq_count > 0; } static int @@ -840,8 +847,9 @@ filt_kqprocess(struct knote *kn, struct filt_process_s *data, struct kevent_inte res = (kn->kn_data > 0); if (res) { *kev = kn->kn_kevent; - if (kn->kn_flags & EV_CLEAR) + if (kn->kn_flags & EV_CLEAR) { kn->kn_data = 0; + } } kqunlock(kq); @@ -878,25 +886,26 @@ filt_procattach(struct knote *kn, __unused struct kevent_internal_s *kev) const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS; - if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) + if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) { do { pid_t selfpid = proc_selfpid(); - if (p->p_ppid == selfpid) - break; /* parent => ok */ - + if (p->p_ppid == selfpid) { + break; /* parent => ok */ + } if ((p->p_lflag & P_LTRACED) != 0 && - (p->p_oppid == selfpid)) - break; /* parent-in-waiting => ok */ - + (p->p_oppid == selfpid)) { + break; /* parent-in-waiting => ok */ + } proc_rele(p); knote_set_error(kn, EACCES); return 0; } while (0); + } proc_klist_lock(); - kn->kn_ptr.p_proc = p; /* store the proc handle */ + kn->kn_ptr.p_proc = p; /* store the proc handle */ KNOTE_ATTACH(&p->p_klist, kn); @@ -908,7 +917,7 @@ filt_procattach(struct knote *kn, __unused struct kevent_internal_s *kev) * only captures edge-triggered events after this point * so it can't already be fired. */ - return (0); + return 0; } @@ -970,8 +979,9 @@ filt_proc(struct knote *kn, long hint) /* * if the user is interested in this event, record it. */ - if (kn->kn_sfflags & event) + if (kn->kn_sfflags & event) { kn->kn_fflags |= event; + } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" @@ -996,11 +1006,11 @@ filt_proc(struct knote *kn, long hint) if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) { kn->kn_fflags |= NOTE_EXIT_DETAIL; if ((kn->kn_ptr.p_proc->p_lflag & - P_LTERM_DECRYPTFAIL) != 0) { + P_LTERM_DECRYPTFAIL) != 0) { kn->kn_data |= NOTE_EXIT_DECRYPTFAIL; } if ((kn->kn_ptr.p_proc->p_lflag & - P_LTERM_JETSAM) != 0) { + P_LTERM_JETSAM) != 0) { kn->kn_data |= NOTE_EXIT_MEMORY; switch (kn->kn_ptr.p_proc->p_lflag & P_JETSAM_MASK) { case P_JETSAM_VMPAGESHORTAGE: @@ -1027,14 +1037,14 @@ filt_proc(struct knote *kn, long hint) } } if ((kn->kn_ptr.p_proc->p_csflags & - CS_KILLED) != 0) { + CS_KILLED) != 0) { kn->kn_data |= NOTE_EXIT_CSERROR; } } } /* if we have any matching state, activate the knote */ - return (kn->kn_fflags != 0); + return kn->kn_fflags != 0; } static int @@ -1071,7 +1081,7 @@ filt_procprocess(struct knote *kn, struct filt_process_s *data, struct kevent_in res = (kn->kn_fflags != 0); if (res) { *kev = kn->kn_kevent; - kn->kn_flags |= EV_CLEAR; /* automatically set */ + kn->kn_flags |= EV_CLEAR; /* automatically set */ kn->kn_fflags = 0; kn->kn_data = 0; } @@ -1091,7 +1101,7 @@ SECURITY_READ_ONLY_EARLY(static struct filterops) proc_filtops = { struct filt_timer_params { uint64_t deadline; /* deadline in abs/cont time - (or 0 if NOTE_ABSOLUTE and deadline is in past) */ + * (or 0 if NOTE_ABSOLUTE and deadline is in past) */ uint64_t leeway; /* leeway in abstime, or 0 if none */ uint64_t interval; /* interval in abstime or 0 if non-repeating timer */ }; @@ -1122,7 +1132,7 @@ struct filt_timer_params { * dispatched. */ #define TIMER_IDLE 0x0 -#define TIMER_ARMED 0x1 +#define TIMER_ARMED 0x1 #define TIMER_FIRED 0x2 #define TIMER_IMMEDIATE 0x3 @@ -1155,7 +1165,7 @@ filt_timer_set_params(struct knote *kn, struct filt_timer_params *params) */ static int filt_timervalidate(const struct kevent_internal_s *kev, - struct filt_timer_params *params) + struct filt_timer_params *params) { /* * There are 5 knobs that need to be chosen for a timer registration: @@ -1192,7 +1202,7 @@ filt_timervalidate(const struct kevent_internal_s *kev, boolean_t use_abstime = FALSE; - switch (kev->fflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS|NOTE_MACHTIME)) { + switch (kev->fflags & (NOTE_SECONDS | NOTE_USECONDS | NOTE_NSECONDS | NOTE_MACHTIME)) { case NOTE_SECONDS: multiplier = NSEC_PER_SEC; break; @@ -1210,7 +1220,7 @@ filt_timervalidate(const struct kevent_internal_s *kev, multiplier = NSEC_PER_SEC / 1000; break; default: - return (EINVAL); + return EINVAL; } /* transform the leeway in kn_ext[1] to same time scale */ @@ -1219,10 +1229,11 @@ filt_timervalidate(const struct kevent_internal_s *kev, if (use_abstime) { leeway_abs = (uint64_t)kev->ext[1]; - } else { + } else { uint64_t leeway_ns; - if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) - return (ERANGE); + if (os_mul_overflow((uint64_t)kev->ext[1], multiplier, &leeway_ns)) { + return ERANGE; + } nanoseconds_to_absolutetime(leeway_ns, &leeway_abs); } @@ -1240,8 +1251,9 @@ filt_timervalidate(const struct kevent_internal_s *kev, } else { uint64_t calendar_deadline_ns; - if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) - return (ERANGE); + if (os_mul_overflow((uint64_t)kev->data, multiplier, &calendar_deadline_ns)) { + return ERANGE; + } /* calendar_deadline_ns is in nanoseconds since the epoch */ @@ -1274,12 +1286,13 @@ filt_timervalidate(const struct kevent_internal_s *kev, * it does not change the calendar timebase. */ - if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) + if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) { clock_continuoustime_interval_to_deadline(interval_abs, - &deadline_abs); - else + &deadline_abs); + } else { clock_absolutetime_interval_to_deadline(interval_abs, - &deadline_abs); + &deadline_abs); + } } else { deadline_abs = 0; /* cause immediate expiration */ } @@ -1310,24 +1323,26 @@ filt_timervalidate(const struct kevent_internal_s *kev, interval_abs = (uint64_t)kev->data; } else { uint64_t interval_ns; - if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) - return (ERANGE); + if (os_mul_overflow((uint64_t)kev->data, multiplier, &interval_ns)) { + return ERANGE; + } nanoseconds_to_absolutetime(interval_ns, &interval_abs); } uint64_t deadline = 0; - if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) + if (kev->fflags & NOTE_MACH_CONTINUOUS_TIME) { clock_continuoustime_interval_to_deadline(interval_abs, &deadline); - else + } else { clock_absolutetime_interval_to_deadline(interval_abs, &deadline); + } params->deadline = deadline; params->interval = interval_abs; } - return (0); + return 0; } /* @@ -1340,7 +1355,7 @@ filt_timerexpire(void *knx, __unused void *spare) int v; if (os_atomic_cmpxchgv(&kn->kn_hookid, TIMER_ARMED, TIMER_FIRED, - &v, relaxed)) { + &v, relaxed)) { // our f_event always would say FILTER_ACTIVE, // so be leaner and just do it. struct kqueue *kq = knote_get_kq(kn); @@ -1405,22 +1420,25 @@ filt_timerarm(struct knote *kn) assert(os_atomic_load(&kn->kn_hookid, relaxed) == TIMER_IDLE); - if (filter_flags & NOTE_CRITICAL) + if (filter_flags & NOTE_CRITICAL) { timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL; - else if (filter_flags & NOTE_BACKGROUND) + } else if (filter_flags & NOTE_BACKGROUND) { timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND; - else + } else { timer_flags |= THREAD_CALL_DELAY_USER_NORMAL; + } - if (filter_flags & NOTE_LEEWAY) + if (filter_flags & NOTE_LEEWAY) { timer_flags |= THREAD_CALL_DELAY_LEEWAY; + } - if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) + if (filter_flags & NOTE_MACH_CONTINUOUS_TIME) { timer_flags |= THREAD_CALL_CONTINUOUS; + } os_atomic_store(&kn->kn_hookid, TIMER_ARMED, relaxed); thread_call_enter_delayed_with_leeway((thread_call_t)kn->kn_hook, NULL, - deadline, leeway, timer_flags); + deadline, leeway, timer_flags); } /* @@ -1439,8 +1457,8 @@ filt_timerattach(struct knote *kn, struct kevent_internal_s *kev) } callout = thread_call_allocate_with_options(filt_timerexpire, - (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH, - THREAD_CALL_OPTIONS_ONCE); + (thread_call_param_t)kn, THREAD_CALL_PRIORITY_HIGH, + THREAD_CALL_OPTIONS_ONCE); if (NULL == callout) { knote_set_error(kn, ENOMEM); @@ -1453,8 +1471,9 @@ filt_timerattach(struct knote *kn, struct kevent_internal_s *kev) os_atomic_store(&kn->kn_hookid, TIMER_IDLE, relaxed); /* NOTE_ABSOLUTE implies EV_ONESHOT */ - if (kn->kn_sfflags & NOTE_ABSOLUTE) + if (kn->kn_sfflags & NOTE_ABSOLUTE) { kn->kn_flags |= EV_ONESHOT; + } if (filt_timer_is_ready(kn)) { os_atomic_store(&kn->kn_hookid, TIMER_IMMEDIATE, relaxed); @@ -1583,10 +1602,11 @@ filt_timerprocess( uint64_t now; - if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) + if (kn->kn_sfflags & NOTE_MACH_CONTINUOUS_TIME) { now = mach_continuous_time(); - else + } else { now = mach_absolute_time(); + } uint64_t first_deadline = kn->kn_ext[0]; uint64_t interval_abs = kn->kn_sdata; @@ -1649,7 +1669,7 @@ filt_userattach(struct knote *kn, __unused struct kevent_internal_s *kev) } else { kn->kn_hookid = 0; } - return (kn->kn_hookid); + return kn->kn_hookid; } static void @@ -1748,12 +1768,12 @@ filt_wlturnstile_interlock_is_workq(struct kqworkloop *kqwl) { struct kqrequest *kqr = &kqwl->kqwl_request; return (kqr->kqr_state & KQR_THREQUESTED) && - (kqr->kqr_thread == THREAD_NULL); + (kqr->kqr_thread == THREAD_NULL); } static void filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts, - turnstile_update_flags_t flags) + turnstile_update_flags_t flags) { turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; struct kqrequest *kqr = &kqwl->kqwl_request; @@ -1780,7 +1800,7 @@ filt_wlupdate_inheritor(struct kqworkloop *kqwl, struct turnstile *ts, __result_use_check static int filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn, - struct kevent_internal_s *kev, kq_index_t qos_index, int op) + struct kevent_internal_s *kev, kq_index_t qos_index, int op) { user_addr_t uaddr = CAST_USER_ADDR_T(kev->ext[EV_EXTIDX_WL_ADDR]); struct kqrequest *kqr = &kqwl->kqwl_request; @@ -1888,7 +1908,7 @@ filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn, } } else if (op == FILT_WLDROP) { if ((kn->kn_sfflags & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE)) == - NOTE_WL_SYNC_WAIT) { + NOTE_WL_SYNC_WAIT) { /* * When deleting a SYNC_WAIT knote that hasn't been woken up * explicitly, issue a wake up. @@ -1966,7 +1986,7 @@ filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn, } else if (filt_wlturnstile_interlock_is_workq(kqwl)) { workq_kern_threadreq_lock(kqwl->kqwl_p); workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, kqr, new_owner, - ts, TURNSTILE_IMMEDIATE_UPDATE); + ts, TURNSTILE_IMMEDIATE_UPDATE); workq_kern_threadreq_unlock(kqwl->kqwl_p); if (!filt_wlturnstile_interlock_is_workq(kqwl)) { /* @@ -1993,7 +2013,7 @@ filt_wlupdate(struct kqworkloop *kqwl, struct knote *kn, if (needs_wake && ts) { waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T((event_t)kn), - (thread_t)kn->kn_hook, THREAD_AWAKENED); + (thread_t)kn->kn_hook, THREAD_AWAKENED); } kq_req_unlock(kqwl); @@ -2053,7 +2073,7 @@ out: */ static inline void filt_wlremember_last_update(struct knote *kn, struct kevent_internal_s *kev, - int error) + int error) { kn->kn_fflags = kev->fflags; kn->kn_data = error; @@ -2198,7 +2218,7 @@ filt_wlwait_continue(void *parameter, wait_result_t wr) */ static void __dead2 filt_wlpost_register_wait(struct uthread *uth, struct knote_lock_ctx *knlc, - struct _kevent_register *cont_args) + struct _kevent_register *cont_args) { struct kqworkloop *kqwl = (struct kqworkloop *)cont_args->kq; struct kqrequest *kqr = &kqwl->kqwl_request; @@ -2215,12 +2235,12 @@ filt_wlpost_register_wait(struct uthread *uth, struct knote_lock_ctx *knlc, } ts = turnstile_prepare((uintptr_t)kqwl, &kqwl->kqwl_turnstile, - TURNSTILE_NULL, TURNSTILE_WORKLOOPS); + TURNSTILE_NULL, TURNSTILE_WORKLOOPS); if (workq_locked) { workq_kern_threadreq_update_inheritor(kqwl->kqwl_p, - &kqwl->kqwl_request, kqwl->kqwl_owner, ts, - TURNSTILE_DELAYED_UPDATE); + &kqwl->kqwl_request, kqwl->kqwl_owner, ts, + TURNSTILE_DELAYED_UPDATE); if (!filt_wlturnstile_interlock_is_workq(kqwl)) { /* * if the interlock is no longer the workqueue lock, @@ -2240,7 +2260,7 @@ filt_wlpost_register_wait(struct uthread *uth, struct knote_lock_ctx *knlc, thread_set_pending_block_hint(uth->uu_thread, kThreadWaitWorkloopSyncWait); waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(cont_args->knote), - THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER); + THREAD_ABORTSAFE, TIMEOUT_WAIT_FOREVER); if (workq_locked) { workq_kern_threadreq_unlock(kqwl->kqwl_p); @@ -2258,7 +2278,7 @@ filt_wlpost_register_wait(struct uthread *uth, struct knote_lock_ctx *knlc, /* called in stackshot context to report the thread responsible for blocking this thread */ void kdp_workloop_sync_wait_find_owner(__assert_only thread_t thread, - event64_t event, thread_waitinfo_t *waitinfo) + event64_t event, thread_waitinfo_t *waitinfo) { struct knote *kn = (struct knote *)event; assert(kdp_is_in_zone(kn, "knote zone")); @@ -2303,7 +2323,7 @@ filt_wldetach(__assert_only struct knote *kn) static int filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_internal_s *kev, - thread_qos_t *qos_index) + thread_qos_t *qos_index) { int new_commands = kev->fflags & NOTE_WL_COMMANDS_MASK; int sav_commands = kn->kn_sfflags & NOTE_WL_COMMANDS_MASK; @@ -2326,21 +2346,25 @@ filt_wlvalidate_kev_flags(struct knote *kn, struct kevent_internal_s *kev, switch (new_commands) { case NOTE_WL_THREAD_REQUEST: /* thread requests can only update themselves */ - if (sav_commands != NOTE_WL_THREAD_REQUEST) + if (sav_commands != NOTE_WL_THREAD_REQUEST) { return EINVAL; + } break; case NOTE_WL_SYNC_WAIT: - if (kev->fflags & NOTE_WL_END_OWNERSHIP) + if (kev->fflags & NOTE_WL_END_OWNERSHIP) { return EINVAL; + } goto sync_checks; case NOTE_WL_SYNC_WAKE: - sync_checks: - if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) +sync_checks: + if (!(sav_commands & (NOTE_WL_SYNC_WAIT | NOTE_WL_SYNC_WAKE))) { return EINVAL; - if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) + } + if ((kev->flags & (EV_ENABLE | EV_DELETE)) == EV_ENABLE) { return EINVAL; + } break; default: @@ -2454,12 +2478,12 @@ filt_wlprocess( task_t t = current_task(); uint64_t val; if (addr && task_is_active(t) && !task_is_halting(t) && - copyin_word(addr, &val, sizeof(val)) == 0 && - val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 && - (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) { + copyin_word(addr, &val, sizeof(val)) == 0 && + val && (val & DISPATCH_QUEUE_ENQUEUED) == 0 && + (val >> 48) != 0xdead && (val >> 48) != 0 && (val >> 48) != 0xffff) { panic("kevent: workloop %#016llx is not enqueued " - "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)", - kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]); + "(kn:%p dq_state:%#016llx kev.dq_state:%#016llx)", + kn->kn_udata, kn, val, kn->kn_ext[EV_EXTIDX_WL_VALUE]); } } #endif @@ -2521,11 +2545,12 @@ kqueue_alloc(struct proc *p, unsigned int flags) int i; kqwq = (struct kqworkq *)zalloc(kqworkq_zone); - if (kqwq == NULL) + if (kqwq == NULL) { return NULL; + } kq = &kqwq->kqwq_kqueue; - bzero(kqwq, sizeof (struct kqworkq)); + bzero(kqwq, sizeof(struct kqworkq)); kqwq->kqwq_state = KQ_WORKQ; @@ -2558,10 +2583,11 @@ kqueue_alloc(struct proc *p, unsigned int flags) int i; kqwl = (struct kqworkloop *)zalloc(kqworkloop_zone); - if (kqwl == NULL) + if (kqwl == NULL) { return NULL; + } - bzero(kqwl, sizeof (struct kqworkloop)); + bzero(kqwl, sizeof(struct kqworkloop)); kqwl->kqwl_state = KQ_WORKLOOP | KQ_DYNAMIC; kqwl->kqwl_retains = 1; /* donate a retain to creator */ @@ -2581,11 +2607,12 @@ kqueue_alloc(struct proc *p, unsigned int flags) struct kqfile *kqf; kqf = (struct kqfile *)zalloc(kqfile_zone); - if (kqf == NULL) + if (kqf == NULL) { return NULL; + } kq = &kqf->kqf_kqueue; - bzero(kqf, sizeof (struct kqfile)); + bzero(kqf, sizeof(struct kqfile)); TAILQ_INIT(&kqf->kqf_queue); TAILQ_INIT(&kqf->kqf_suppressed); @@ -2599,12 +2626,13 @@ kqueue_alloc(struct proc *p, unsigned int flags) if (fdp->fd_knlistsize < 0) { proc_fdlock(p); - if (fdp->fd_knlistsize < 0) - fdp->fd_knlistsize = 0; /* this process has had a kq */ + if (fdp->fd_knlistsize < 0) { + fdp->fd_knlistsize = 0; /* this process has had a kq */ + } proc_fdunlock(p); } - return (kq); + return kq; } /* @@ -2664,8 +2692,9 @@ knotes_dealloc(proc_t p) knhash_unlock(p); /* free the kn_hash table */ - if (kn_hash) + if (kn_hash) { FREE(kn_hash, M_KQUEUE); + } proc_fdlock(p); } @@ -2729,8 +2758,9 @@ kqueue_dealloc(struct kqueue *kq) struct knote *kn; int i; - if (kq == NULL) + if (kq == NULL) { return; + } p = kq->kq_p; fdp = p->p_fd; @@ -2790,7 +2820,9 @@ kqueue_dealloc(struct kqueue *kq) struct kqworkloop *kqwl = (struct kqworkloop *)kq; thread_t cur_owner = kqworkloop_invalidate(kqwl); - if (cur_owner) thread_deallocate(cur_owner); + if (cur_owner) { + thread_deallocate(cur_owner); + } if (kqwl->kqwl_request.kqr_state & KQR_ALLOCATED_TURNSTILE) { struct turnstile *ts; @@ -2811,7 +2843,7 @@ kqueue_dealloc(struct kqueue *kq) lck_spin_destroy(&kq->kq_reqlock, kq_lck_grp); if (kq->kq_state & KQ_WORKQ) { - zfree(kqworkq_zone, (struct kqworkq *)kq); + zfree(kqworkq_zone, kq); } else if (kq->kq_state & KQ_WORKLOOP) { struct kqworkloop *kqwl = (struct kqworkloop *)kq; @@ -2819,7 +2851,7 @@ kqueue_dealloc(struct kqueue *kq) lck_mtx_destroy(&kqwl->kqwl_statelock, kq_lck_grp); zfree(kqworkloop_zone, kqwl); } else { - zfree(kqfile_zone, (struct kqfile *)kq); + zfree(kqfile_zone, kq); } } @@ -2829,15 +2861,18 @@ kqueue_retain(struct kqueue *kq) struct kqworkloop *kqwl = (struct kqworkloop *)kq; uint32_t previous; - if ((kq->kq_state & KQ_DYNAMIC) == 0) + if ((kq->kq_state & KQ_DYNAMIC) == 0) { return; + } previous = OSIncrementAtomic(&kqwl->kqwl_retains); - if (previous == KQ_WORKLOOP_RETAINS_MAX) + if (previous == KQ_WORKLOOP_RETAINS_MAX) { panic("kq(%p) retain overflow", kq); + } - if (previous == 0) + if (previous == 0) { panic("kq(%p) resurrection", kq); + } } #define KQUEUE_CANT_BE_LAST_REF 0 @@ -2871,52 +2906,54 @@ kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval) error = falloc_withalloc(p, &fp, &fd, vfs_context_current(), fp_zalloc, cra); if (error) { - return (error); + return error; } kq = kqueue_alloc(p, 0); if (kq == NULL) { fp_free(p, fd, fp); - return (ENOMEM); + return ENOMEM; } fp->f_flag = FREAD | FWRITE; fp->f_ops = &kqueueops; fp->f_data = kq; + fp->f_lflags |= FG_CONFINED; proc_fdlock(p); - *fdflags(p, fd) |= UF_EXCLOSE; + *fdflags(p, fd) |= UF_EXCLOSE | UF_FORKCLOSE; procfdtbl_releasefd(p, fd, NULL); fp_drop(p, fd, fp, 1); proc_fdunlock(p); *retval = fd; - return (error); + return error; } int kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval) { - return (kqueue_body(p, fileproc_alloc_init, NULL, retval)); + return kqueue_body(p, fileproc_alloc_init, NULL, retval); } static int kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p, - unsigned int flags) + unsigned int flags) { int advance; int error; if (flags & KEVENT_FLAG_LEGACY32) { - bzero(kevp, sizeof (*kevp)); + bzero(kevp, sizeof(*kevp)); if (IS_64BIT_PROCESS(p)) { struct user64_kevent kev64; - advance = sizeof (kev64); + advance = sizeof(kev64); error = copyin(*addrp, (caddr_t)&kev64, advance); - if (error) - return (error); + if (error) { + return error; + } kevp->ident = kev64.ident; kevp->filter = kev64.filter; kevp->flags = kev64.flags; @@ -2926,10 +2963,11 @@ kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p } else { struct user32_kevent kev32; - advance = sizeof (kev32); + advance = sizeof(kev32); error = copyin(*addrp, (caddr_t)&kev32, advance); - if (error) - return (error); + if (error) { + return error; + } kevp->ident = (uintptr_t)kev32.ident; kevp->filter = kev32.filter; kevp->flags = kev32.flags; @@ -2940,12 +2978,13 @@ kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p } else if (flags & KEVENT_FLAG_LEGACY64) { struct kevent64_s kev64; - bzero(kevp, sizeof (*kevp)); + bzero(kevp, sizeof(*kevp)); - advance = sizeof (struct kevent64_s); + advance = sizeof(struct kevent64_s); error = copyin(*addrp, (caddr_t)&kev64, advance); - if (error) - return(error); + if (error) { + return error; + } kevp->ident = kev64.ident; kevp->filter = kev64.filter; kevp->flags = kev64.flags; @@ -2954,16 +2993,16 @@ kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p kevp->data = kev64.data; kevp->ext[0] = kev64.ext[0]; kevp->ext[1] = kev64.ext[1]; - } else { struct kevent_qos_s kevqos; - bzero(kevp, sizeof (*kevp)); + bzero(kevp, sizeof(*kevp)); - advance = sizeof (struct kevent_qos_s); + advance = sizeof(struct kevent_qos_s); error = copyin(*addrp, (caddr_t)&kevqos, advance); - if (error) + if (error) { return error; + } kevp->ident = kevqos.ident; kevp->filter = kevqos.filter; kevp->flags = kevqos.flags; @@ -2977,14 +3016,15 @@ kevent_copyin(user_addr_t *addrp, struct kevent_internal_s *kevp, struct proc *p kevp->ext[2] = kevqos.ext[2]; kevp->ext[3] = kevqos.ext[3]; } - if (!error) + if (!error) { *addrp += advance; - return (error); + } + return error; } static int kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc *p, - unsigned int flags) + unsigned int flags) { user_addr_t addr = *addrp; int advance; @@ -3002,7 +3042,7 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc * if (IS_64BIT_PROCESS(p)) { struct user64_kevent kev64; - advance = sizeof (kev64); + advance = sizeof(kev64); bzero(&kev64, advance); /* @@ -3010,7 +3050,7 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc * * value of (uintptr_t)-1. */ kev64.ident = (kevp->ident == (uintptr_t)-1) ? - (uint64_t)-1LL : (uint64_t)kevp->ident; + (uint64_t)-1LL : (uint64_t)kevp->ident; kev64.filter = kevp->filter; kev64.flags = kevp->flags; @@ -3021,7 +3061,7 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc * } else { struct user32_kevent kev32; - advance = sizeof (kev32); + advance = sizeof(kev32); bzero(&kev32, advance); kev32.ident = (uint32_t)kevp->ident; kev32.filter = kevp->filter; @@ -3034,7 +3074,7 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc * } else if (flags & KEVENT_FLAG_LEGACY64) { struct kevent64_s kev64; - advance = sizeof (struct kevent64_s); + advance = sizeof(struct kevent64_s); if (flags & KEVENT_FLAG_STACK_EVENTS) { addr -= advance; } @@ -3051,7 +3091,7 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc * } else { struct kevent_qos_s kevqos; - advance = sizeof (struct kevent_qos_s); + advance = sizeof(struct kevent_qos_s); if (flags & KEVENT_FLAG_STACK_EVENTS) { addr -= advance; } @@ -3071,20 +3111,21 @@ kevent_copyout(struct kevent_internal_s *kevp, user_addr_t *addrp, struct proc * error = copyout((caddr_t)&kevqos, addr, advance); } if (!error) { - if (flags & KEVENT_FLAG_STACK_EVENTS) + if (flags & KEVENT_FLAG_STACK_EVENTS) { *addrp = addr; - else + } else { *addrp = addr + advance; + } } - return (error); + return error; } static int kevent_get_data_size( - struct proc *p, - uint64_t data_available, - unsigned int flags, - user_size_t *residp) + struct proc *p, + uint64_t data_available, + unsigned int flags, + user_size_t *residp) { user_size_t resid; int error = 0; @@ -3101,8 +3142,9 @@ kevent_get_data_size( error = copyin((user_addr_t)data_available, &usize, sizeof(usize)); resid = (user_size_t)usize; } - if (error) - return(error); + if (error) { + return error; + } } else { resid = 0; } @@ -3112,10 +3154,10 @@ kevent_get_data_size( static int kevent_put_data_size( - struct proc *p, - uint64_t data_available, - unsigned int flags, - user_size_t resid) + struct proc *p, + uint64_t data_available, + unsigned int flags, + user_size_t resid) { int error = 0; @@ -3171,12 +3213,14 @@ kevent_continue(__unused struct kqueue *kq, void *data, int error) } /* don't restart after signals... */ - if (error == ERESTART) + if (error == ERESTART) { error = EINTR; - else if (error == EWOULDBLOCK) + } else if (error == EWOULDBLOCK) { error = 0; - if (error == 0) + } + if (error == 0) { *retval = noutputs; + } unix_syscall_return(error); } @@ -3190,14 +3234,14 @@ kevent(struct proc *p, struct kevent_args *uap, int32_t *retval) unsigned int flags = KEVENT_FLAG_LEGACY32; return kevent_internal(p, - (kqueue_id_t)uap->fd, NULL, - uap->changelist, uap->nchanges, - uap->eventlist, uap->nevents, - 0ULL, 0ULL, - flags, - uap->timeout, - kevent_continue, - retval); + (kqueue_id_t)uap->fd, NULL, + uap->changelist, uap->nchanges, + uap->eventlist, uap->nevents, + 0ULL, 0ULL, + flags, + uap->timeout, + kevent_continue, + retval); } int @@ -3210,14 +3254,14 @@ kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval) flags |= KEVENT_FLAG_LEGACY64; return kevent_internal(p, - (kqueue_id_t)uap->fd, NULL, - uap->changelist, uap->nchanges, - uap->eventlist, uap->nevents, - 0ULL, 0ULL, - flags, - uap->timeout, - kevent_continue, - retval); + (kqueue_id_t)uap->fd, NULL, + uap->changelist, uap->nchanges, + uap->eventlist, uap->nevents, + 0ULL, 0ULL, + flags, + uap->timeout, + kevent_continue, + retval); } int @@ -3227,33 +3271,33 @@ kevent_qos(struct proc *p, struct kevent_qos_args *uap, int32_t *retval) uap->flags &= KEVENT_FLAG_USER; return kevent_internal(p, - (kqueue_id_t)uap->fd, NULL, - uap->changelist, uap->nchanges, - uap->eventlist, uap->nevents, - uap->data_out, (uint64_t)uap->data_available, - uap->flags, - 0ULL, - kevent_continue, - retval); + (kqueue_id_t)uap->fd, NULL, + uap->changelist, uap->nchanges, + uap->eventlist, uap->nevents, + uap->data_out, (uint64_t)uap->data_available, + uap->flags, + 0ULL, + kevent_continue, + retval); } int kevent_qos_internal(struct proc *p, int fd, - user_addr_t changelist, int nchanges, - user_addr_t eventlist, int nevents, - user_addr_t data_out, user_size_t *data_available, - unsigned int flags, - int32_t *retval) + user_addr_t changelist, int nchanges, + user_addr_t eventlist, int nevents, + user_addr_t data_out, user_size_t *data_available, + unsigned int flags, + int32_t *retval) { return kevent_internal(p, - (kqueue_id_t)fd, NULL, - changelist, nchanges, - eventlist, nevents, - data_out, (uint64_t)data_available, - (flags | KEVENT_FLAG_KERNEL), - 0ULL, - NULL, - retval); + (kqueue_id_t)fd, NULL, + changelist, nchanges, + eventlist, nevents, + data_out, (uint64_t)data_available, + (flags | KEVENT_FLAG_KERNEL), + 0ULL, + NULL, + retval); } int @@ -3263,40 +3307,40 @@ kevent_id(struct proc *p, struct kevent_id_args *uap, int32_t *retval) uap->flags &= KEVENT_FLAG_USER; return kevent_internal(p, - (kqueue_id_t)uap->id, NULL, - uap->changelist, uap->nchanges, - uap->eventlist, uap->nevents, - uap->data_out, (uint64_t)uap->data_available, - (uap->flags | KEVENT_FLAG_DYNAMIC_KQUEUE), - 0ULL, - kevent_continue, - retval); + (kqueue_id_t)uap->id, NULL, + uap->changelist, uap->nchanges, + uap->eventlist, uap->nevents, + uap->data_out, (uint64_t)uap->data_available, + (uap->flags | KEVENT_FLAG_DYNAMIC_KQUEUE), + 0ULL, + kevent_continue, + retval); } int kevent_id_internal(struct proc *p, kqueue_id_t *id, - user_addr_t changelist, int nchanges, - user_addr_t eventlist, int nevents, - user_addr_t data_out, user_size_t *data_available, - unsigned int flags, - int32_t *retval) + user_addr_t changelist, int nchanges, + user_addr_t eventlist, int nevents, + user_addr_t data_out, user_size_t *data_available, + unsigned int flags, + int32_t *retval) { return kevent_internal(p, - *id, id, - changelist, nchanges, - eventlist, nevents, - data_out, (uint64_t)data_available, - (flags | KEVENT_FLAG_KERNEL | KEVENT_FLAG_DYNAMIC_KQUEUE), - 0ULL, - NULL, - retval); + *id, id, + changelist, nchanges, + eventlist, nevents, + data_out, (uint64_t)data_available, + (flags | KEVENT_FLAG_KERNEL | KEVENT_FLAG_DYNAMIC_KQUEUE), + 0ULL, + NULL, + retval); } static int kevent_get_timeout(struct proc *p, - user_addr_t utimeout, - unsigned int flags, - struct timeval *atvp) + user_addr_t utimeout, + unsigned int flags, + struct timeval *atvp) { struct timeval atv; int error = 0; @@ -3311,19 +3355,22 @@ kevent_get_timeout(struct proc *p, } else if (IS_64BIT_PROCESS(p)) { struct user64_timespec ts; error = copyin(utimeout, &ts, sizeof(ts)); - if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0) + if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0) { error = EINVAL; - else + } else { TIMESPEC_TO_TIMEVAL(&rtv, &ts); + } } else { struct user32_timespec ts; error = copyin(utimeout, &ts, sizeof(ts)); TIMESPEC_TO_TIMEVAL(&rtv, &ts); } - if (error) - return (error); - if (itimerfix(&rtv)) - return (EINVAL); + if (error) { + return error; + } + if (itimerfix(&rtv)) { + return EINVAL; + } getmicrouptime(&atv); timevaladd(&atv, &rtv); } else { @@ -3361,7 +3408,7 @@ kevent_set_kq_mode(struct kqueue *kq, unsigned int flags) return 0; } -#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) +#define KQ_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) #define CONFIG_KQ_HASHSIZE CONFIG_KN_HASHSIZE static inline void @@ -3473,7 +3520,9 @@ kqueue_hash_lookup(struct proc *p, kqueue_id_t id) /* should hold the kq hash lock */ kqhash_lock_held(p); - if (fdp->fd_kqhashmask == 0) return NULL; + if (fdp->fd_kqhashmask == 0) { + return NULL; + } list = &fdp->fd_kqhash[KQ_HASH(id, fdp->fd_kqhashmask)]; SLIST_FOREACH(kqwl, list, kqwl_hashlink) { @@ -3498,7 +3547,9 @@ kqueue_release_last(struct proc *p, kqueue_t kqu) thread_t cur_owner = kqworkloop_invalidate(kqu.kqwl); kqueue_hash_remove(p, kq); kqhash_unlock(p); - if (cur_owner) thread_deallocate(cur_owner); + if (cur_owner) { + thread_deallocate(cur_owner); + } kqueue_dealloc(kq); } else { kqhash_unlock(p); @@ -3569,8 +3620,8 @@ kevent_get_bound_kqworkloop(thread_t thread) static int kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, - unsigned int flags, struct fileproc **fpp, int *fdp, - struct kqueue **kqp) + unsigned int flags, struct fileproc **fpp, int *fdp, + struct kqueue **kqp) { struct filedesc *descp = p->p_fd; struct fileproc *fp = NULL; @@ -3595,7 +3646,6 @@ kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, if (id == (kqueue_id_t)-1 && (flags & KEVENT_FLAG_KERNEL) && (flags & KEVENT_FLAG_WORKLOOP)) { - if (!is_workqueue_thread(th) || !kq) { return EINVAL; } @@ -3610,7 +3660,6 @@ kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, /* try shortcut on kq lookup for bound threads */ if (kq != NULL && ((struct kqworkloop *)kq)->kqwl_dynamicid == id) { - if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) { return EEXIST; } @@ -3663,10 +3712,9 @@ kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, kqueue_dealloc(alloc_kq); } } else { - if (flags & KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) { kqhash_unlock(p); - return EEXIST; + return EEXIST; } /* retain a reference while working with this kq. */ @@ -3674,7 +3722,6 @@ kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, kqueue_retain(kq); kqhash_unlock(p); } - } else if (flags & KEVENT_FLAG_WORKQ) { /* must already exist for bound threads. */ if (flags & KEVENT_FLAG_KERNEL) { @@ -3707,13 +3754,15 @@ kevent_get_kq(struct proc *p, kqueue_id_t id, workq_threadreq_param_t *trp, } else { /* get a usecount for the kq itself */ fd = (int)id; - if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) - return (error); + if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) { + return error; + } } if ((error = kevent_set_kq_mode(kq, flags)) != 0) { /* drop the usecount */ - if (fp != NULL) + if (fp != NULL) { fp_drop(p, fd, fp, 0); + } return error; } @@ -3788,7 +3837,7 @@ kevent_exit_on_workloop_ownership_leak(thread_t thread) kqhash_unlock(p); reason = os_reason_create(OS_REASON_LIBSYSTEM, - OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK); + OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK); if (reason == OS_REASON_NULL) { goto out; } @@ -3804,13 +3853,13 @@ kevent_exit_on_workloop_ownership_leak(thread_t thread) struct kcdata_descriptor *kcd = &reason->osr_kcd_descriptor; if (kcdata_get_memory_addr(kcd, EXIT_REASON_WORKLOOP_ID, - sizeof(workloop_id), &addr) == KERN_SUCCESS) { + sizeof(workloop_id), &addr) == KERN_SUCCESS) { kcdata_memcpy(kcd, addr, &workloop_id, sizeof(workloop_id)); } uint64_t serial_no = kevent_workloop_serial_no_copyin(p, workloop_id); if (serial_no && kcdata_get_memory_addr(kcd, EXIT_REASON_DISPATCH_QUEUE_NO, - sizeof(serial_no), &addr) == KERN_SUCCESS) { + sizeof(serial_no), &addr) == KERN_SUCCESS) { kcdata_memcpy(kcd, addr, &serial_no, sizeof(serial_no)); } } @@ -3818,32 +3867,32 @@ out: #if DEVELOPMENT || DEBUG if (kevent_debug_flags() & KEVENT_PANIC_ON_WORKLOOP_OWNERSHIP_LEAK) { panic("thread %p in task %p is leaked workloop 0x%016llx ownership", - thread, p->task, workloop_id); + thread, p->task, workloop_id); } psignal_try_thread_with_reason(p, thread, SIGABRT, reason); return 0; #else return exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL, - FALSE, FALSE, 0, reason); + FALSE, FALSE, 0, reason); #endif } static inline boolean_t kevent_args_requesting_events(unsigned int flags, int nevents) { - return (!(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0); + return !(flags & KEVENT_FLAG_ERROR_EVENTS) && nevents > 0; } static int kevent_internal(struct proc *p, - kqueue_id_t id, kqueue_id_t *id_out, - user_addr_t changelist, int nchanges, - user_addr_t ueventlist, int nevents, - user_addr_t data_out, uint64_t data_available, - unsigned int flags, - user_addr_t utimeout, - kqueue_continue_t continuation, - int32_t *retval) + kqueue_id_t id, kqueue_id_t *id_out, + user_addr_t changelist, int nchanges, + user_addr_t ueventlist, int nevents, + user_addr_t data_out, uint64_t data_available, + unsigned int flags, + user_addr_t utimeout, + kqueue_continue_t continuation, + int32_t *retval) { uthread_t ut; struct kqueue *kq; @@ -3860,47 +3909,53 @@ kevent_internal(struct proc *p, /* Don't allow user-space threads to process output events from the workq kqs */ if (((flags & (KEVENT_FLAG_WORKQ | KEVENT_FLAG_KERNEL)) == KEVENT_FLAG_WORKQ) && - kevent_args_requesting_events(flags, nevents)) + kevent_args_requesting_events(flags, nevents)) { return EINVAL; + } if (flags & KEVENT_FLAG_PARKING) { - if (!kevent_args_requesting_events(flags, nevents) || id != (kqueue_id_t)-1) + if (!kevent_args_requesting_events(flags, nevents) || id != (kqueue_id_t)-1) { return EINVAL; + } } /* restrict dynamic kqueue allocation to workloops (for now) */ - if ((flags & (KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP)) == KEVENT_FLAG_DYNAMIC_KQUEUE) + if ((flags & (KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP)) == KEVENT_FLAG_DYNAMIC_KQUEUE) { return EINVAL; + } - if ((flags & (KEVENT_FLAG_WORKLOOP)) && (flags & (KEVENT_FLAG_WORKQ))) + if ((flags & (KEVENT_FLAG_WORKLOOP)) && (flags & (KEVENT_FLAG_WORKQ))) { return EINVAL; + } if (flags & (KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST)) { - /* allowed only on workloops when calling kevent_id from user-space */ - if (!(flags & KEVENT_FLAG_WORKLOOP) || (flags & KEVENT_FLAG_KERNEL) || !(flags & KEVENT_FLAG_DYNAMIC_KQUEUE)) + if (!(flags & KEVENT_FLAG_WORKLOOP) || (flags & KEVENT_FLAG_KERNEL) || !(flags & KEVENT_FLAG_DYNAMIC_KQUEUE)) { return EINVAL; + } } /* prepare to deal with stack-wise allocation of out events */ if (flags & KEVENT_FLAG_STACK_EVENTS) { int scale = ((flags & KEVENT_FLAG_LEGACY32) ? - (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) : - sizeof(struct user32_kevent)) : - ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) : - sizeof(struct kevent_qos_s))); + (IS_64BIT_PROCESS(p) ? sizeof(struct user64_kevent) : + sizeof(struct user32_kevent)) : + ((flags & KEVENT_FLAG_LEGACY64) ? sizeof(struct kevent64_s) : + sizeof(struct kevent_qos_s))); ueventlist += nevents * scale; } /* convert timeout to absolute - if we have one (and not immediate) */ error = kevent_get_timeout(p, utimeout, flags, &atv); - if (error) + if (error) { return error; + } /* copyin initial value of data residual from data_available */ error = kevent_get_data_size(p, data_available, flags, &data_size); - if (error) + if (error) { return error; + } /* get the kq we are going to be working on */ error = kevent_get_kq(p, id, NULL, flags, &fp, &fd, &kq); @@ -3915,8 +3970,9 @@ kevent_internal(struct proc *p, .uu_flags = flags, }); #endif // CONFIG_WORKLOOP_DEBUG - if (error) + if (error) { return error; + } /* only bound threads can receive events on workloops */ if (flags & KEVENT_FLAG_WORKLOOP) { @@ -3945,15 +4001,15 @@ kevent_internal(struct proc *p, if (id_out) { *id_out = kqwl->kqwl_dynamicid; } - } /* register all the change requests the user provided... */ noutputs = 0; while (nchanges > 0 && error == 0) { error = kevent_copyin(&changelist, &kev, p, flags); - if (error) + if (error) { break; + } /* Make sure user doesn't pass in any system flags */ kev.flags &= ~EV_SYSFLAGS; @@ -3993,7 +4049,7 @@ kevent_internal(struct proc *p, } // keep in sync with kevent_register_wait_return() - if (nevents > 0 && (kev.flags & (EV_ERROR|EV_RECEIPT))) { + if (nevents > 0 && (kev.flags & (EV_ERROR | EV_RECEIPT))) { if ((kev.flags & EV_ERROR) == 0) { kev.flags |= EV_ERROR; kev.data = 0; @@ -4010,8 +4066,9 @@ kevent_internal(struct proc *p, } /* short-circuit the scan if we only want error events */ - if (flags & KEVENT_FLAG_ERROR_EVENTS) + if (flags & KEVENT_FLAG_ERROR_EVENTS) { nevents = 0; + } /* process pending events */ if (nevents > 0 && noutputs == 0 && error == 0) { @@ -4038,9 +4095,9 @@ kevent_internal(struct proc *p, needs_end_processing = false; error = kqueue_scan(kq, kevent_callback, - continuation, cont_args, - &cont_args->process_data, - &atv, p); + continuation, cont_args, + &cont_args->process_data, + &atv, p); /* process remaining outputs */ noutputs = cont_args->eventout; @@ -4066,13 +4123,15 @@ out: kevent_put_kq(p, id, fp, kq); /* don't restart after signals... */ - if (error == ERESTART) + if (error == ERESTART) { error = EINTR; - else if (error == EWOULDBLOCK) + } else if (error == EWOULDBLOCK) { error = 0; - if (error == 0) + } + if (error == 0) { *retval = noutputs; - return (error); + } + return error; } @@ -4084,7 +4143,7 @@ out: */ static int kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp, - void *data) + void *data) { struct _kevent *cont_args; int error; @@ -4096,15 +4155,16 @@ kevent_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp, * Copy out the appropriate amount of event data for this user. */ error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(), - cont_args->process_data.fp_flags); + cont_args->process_data.fp_flags); /* * If there isn't space for additional events, return * a harmless error to stop the processing here */ - if (error == 0 && ++cont_args->eventout == cont_args->eventcount) + if (error == 0 && ++cont_args->eventout == cont_args->eventcount) { error = EWOULDBLOCK; - return (error); + } + return error; } /* @@ -4128,12 +4188,12 @@ kevent_description(struct kevent_internal_s *kevp, char *s, size_t n) kevp->ext[0], kevp->ext[1] ); - return (s); + return s; } static int kevent_register_validate_priority(struct kqueue *kq, struct knote *kn, - struct kevent_internal_s *kev) + struct kevent_internal_s *kev) { /* We don't care about the priority of a disabled or deleted knote */ if (kev->flags & (EV_DISABLE | EV_DELETE)) { @@ -4207,8 +4267,8 @@ kevent_register_wait_cleanup(struct knote *kn) */ static void kevent_register_wait_block(struct turnstile *ts, thread_t thread, - struct knote_lock_ctx *knlc, thread_continue_t cont, - struct _kevent_register *cont_args) + struct knote_lock_ctx *knlc, thread_continue_t cont, + struct _kevent_register *cont_args) { knote_unlock(cont_args->kq, cont_args->knote, knlc, KNOTE_KQ_UNLOCK); turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD); @@ -4231,13 +4291,15 @@ kevent_register_wait_return(struct _kevent_register *cont_args) thread_deallocate(cont_args->handoff_thread); } - if (kev->flags & (EV_ERROR|EV_RECEIPT)) { + if (kev->flags & (EV_ERROR | EV_RECEIPT)) { if ((kev->flags & EV_ERROR) == 0) { kev->flags |= EV_ERROR; kev->data = 0; } error = kevent_copyout(kev, &cont_args->ueventlist, p, cont_args->flags); - if (error == 0) cont_args->eventout++; + if (error == 0) { + cont_args->eventout++; + } } kevent_put_kq(p, cont_args->fd, cont_args->fp, kq); @@ -4263,7 +4325,7 @@ kevent_register_wait_return(struct _kevent_register *cont_args) int kevent_register(struct kqueue *kq, struct kevent_internal_s *kev, - struct knote_lock_ctx *knlc) + struct knote_lock_ctx *knlc) { struct proc *p = kq->kq_p; const struct filterops *fops; @@ -4276,7 +4338,7 @@ kevent_register(struct kqueue *kq, struct kevent_internal_s *kev, error = EINVAL; goto out; } - fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ + fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ } else { error = EINVAL; goto out; @@ -4284,28 +4346,30 @@ kevent_register(struct kqueue *kq, struct kevent_internal_s *kev, /* restrict EV_VANISHED to adding udata-specific dispatch kevents */ if ((kev->flags & EV_VANISHED) && - (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) { + (kev->flags & (EV_ADD | EV_DISPATCH2)) != (EV_ADD | EV_DISPATCH2)) { error = EINVAL; goto out; } /* Simplify the flags - delete and disable overrule */ - if (kev->flags & EV_DELETE) + if (kev->flags & EV_DELETE) { kev->flags &= ~EV_ADD; - if (kev->flags & EV_DISABLE) + } + if (kev->flags & EV_DISABLE) { kev->flags &= ~EV_ENABLE; + } if (kq->kq_state & KQ_WORKLOOP) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_REGISTER), - ((struct kqworkloop *)kq)->kqwl_dynamicid, - kev->udata, kev->flags, kev->filter); + ((struct kqworkloop *)kq)->kqwl_dynamicid, + kev->udata, kev->flags, kev->filter); } else if (kq->kq_state & KQ_WORKQ) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_REGISTER), - 0, kev->udata, kev->flags, kev->filter); + 0, kev->udata, kev->flags, kev->filter); } else { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_REGISTER), - VM_KERNEL_UNSLIDE_OR_PERM(kq), - kev->udata, kev->flags, kev->filter); + VM_KERNEL_UNSLIDE_OR_PERM(kq), + kev->udata, kev->flags, kev->filter); } restart: @@ -4323,7 +4387,7 @@ restart: */ if ((kev_flags & EV_ADD) && (kev_flags & EV_DELETE) && - (kq->kq_state & KQ_WORKLOOP)) { + (kq->kq_state & KQ_WORKLOOP)) { /* * For workloops, understand EV_ADD|EV_DELETE as a "soft" delete * that doesn't care about ENOENT, so just pretend the deletion @@ -4333,7 +4397,6 @@ restart: error = ENOENT; } goto out; - } else if (kn == NULL) { /* * No knote found, need to attach a new one (attach) @@ -4351,8 +4414,9 @@ restart: kn = knote_alloc(); if (kn == NULL) { error = ENOMEM; - if (knote_fp != NULL) + if (knote_fp != NULL) { fp_drop(p, kev->ident, knote_fp, 0); + } goto out; } @@ -4369,12 +4433,15 @@ restart: } /* snapshot matching/dispatching protcol flags into knote */ - if (kev->flags & EV_DISPATCH) + if (kev->flags & EV_DISPATCH) { kn->kn_status |= KN_DISPATCH; - if (kev->flags & EV_UDATA_SPECIFIC) + } + if (kev->flags & EV_UDATA_SPECIFIC) { kn->kn_status |= KN_UDATA_SPECIFIC; - if (kev->flags & EV_DISABLE) + } + if (kev->flags & EV_DISABLE) { kn->kn_status |= KN_DISABLED; + } /* * copy the kevent state into knote @@ -4394,8 +4461,9 @@ restart: if (error) { (void)kqueue_release(kq, KQUEUE_CANT_BE_LAST_REF); knote_free(kn); - if (knote_fp != NULL) + if (knote_fp != NULL) { fp_drop(p, kev->ident, knote_fp, 0); + } if (error == ERESTART) { goto restart; @@ -4440,20 +4508,18 @@ restart: knote_set_qos_overcommit(kn); if (result & FILTER_ACTIVE) { - if (result & FILTER_ADJUST_EVENT_QOS_BIT) + if (result & FILTER_ADJUST_EVENT_QOS_BIT) { knote_adjust_qos(kq, kn, result); + } knote_activate(kn); } - } else if (!knote_lock(kq, kn, knlc, KNOTE_KQ_LOCK_ON_SUCCESS)) { - /* * The knote was dropped while we were waiting for the lock, * we need to re-evaluate entirely */ goto restart; - } else if (kev->flags & EV_DELETE) { /* * Deletion of a knote (drop) @@ -4472,12 +4538,14 @@ restart: drop = knote_fops(kn)->f_allow_drop(kn, kev); kqlock(kq); - if (!drop) goto out_unlock; + if (!drop) { + goto out_unlock; + } } if ((kev->flags & EV_ENABLE) == 0 && - (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) == - (KN_DISPATCH2 | KN_DISABLED)) { + (kn->kn_status & (KN_DISPATCH2 | KN_DISABLED)) == + (KN_DISPATCH2 | KN_DISABLED)) { kn->kn_status |= KN_DEFERDELETE; error = EINPROGRESS; goto out_unlock; @@ -4485,7 +4553,6 @@ restart: knote_drop(kq, kn, knlc); goto out; - } else { /* * Regular update of a knote (touch) @@ -4514,14 +4581,17 @@ restart: result = 0; } else { /* accept new kevent state */ - if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) + if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0) { kn->kn_udata = kev->udata; - if (kev->flags & EV_DISABLE) + } + if (kev->flags & EV_DISABLE) { knote_disable(kn); - if (result & (FILTER_UPDATE_REQ_QOS | FILTER_ADJUST_EVENT_QOS_BIT)) + } + if (result & (FILTER_UPDATE_REQ_QOS | FILTER_ADJUST_EVENT_QOS_BIT)) { knote_dequeue(kn); + } if ((result & FILTER_UPDATE_REQ_QOS) && - kev->qos && kev->qos != kn->kn_qos) { + kev->qos && kev->qos != kn->kn_qos) { knote_reset_priority(kn, kev->qos); } if (result & FILTER_ACTIVE) { @@ -4538,8 +4608,9 @@ restart: knote_wakeup(kn); } } - if (kev->flags & EV_ENABLE) + if (kev->flags & EV_ENABLE) { knote_enable(kn); + } } } @@ -4584,9 +4655,9 @@ out: */ static int knote_process(struct knote *kn, - kevent_callback_t callback, - void *callback_data, - struct filt_process_s *process_data) + kevent_callback_t callback, + void *callback_data, + struct filt_process_s *process_data) { struct kevent_internal_s kev; struct kqueue *kq = knote_get_kq(kn); @@ -4602,26 +4673,26 @@ knote_process(struct knote *kn, * Must be queued and not disabled/suppressed */ assert(kn->kn_status & KN_QUEUED); - assert(kn->kn_status & (KN_ACTIVE|KN_STAYACTIVE)); - assert(!(kn->kn_status & (KN_DISABLED|KN_SUPPRESSED|KN_DROPPING))); + assert(kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)); + assert(!(kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING))); if (kq->kq_state & KQ_WORKLOOP) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS), - ((struct kqworkloop *)kq)->kqwl_dynamicid, - kn->kn_udata, kn->kn_status | (kn->kn_id << 32), - kn->kn_filtid); + ((struct kqworkloop *)kq)->kqwl_dynamicid, + kn->kn_udata, kn->kn_status | (kn->kn_id << 32), + kn->kn_filtid); } else if (kq->kq_state & KQ_WORKQ) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS), - 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), - kn->kn_filtid); + 0, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), + kn->kn_filtid); } else { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS), - VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata, - kn->kn_status | (kn->kn_id << 32), kn->kn_filtid); + VM_KERNEL_UNSLIDE_OR_PERM(kq), kn->kn_udata, + kn->kn_status | (kn->kn_id << 32), kn->kn_filtid); } if ((kn->kn_status & KN_DROPPING) || - !knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) { + !knote_lock(kq, kn, &knlc, KNOTE_KQ_LOCK_ALWAYS)) { /* * When the knote is dropping or has dropped, * then there's nothing we want to process. @@ -4689,8 +4760,9 @@ knote_process(struct knote *kn, return EJUSTRETURN; } - if (result & FILTER_ADJUST_EVENT_QOS_BIT) + if (result & FILTER_ADJUST_EVENT_QOS_BIT) { knote_adjust_qos(kq, kn, result); + } kev.qos = _pthread_priority_combine(kn->kn_qos, kn->kn_qos_override); if (kev.flags & EV_ONESHOT) { @@ -4722,8 +4794,8 @@ knote_process(struct knote *kn, if (kev.flags & EV_VANISHED) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KNOTE_VANISHED), - kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), - kn->kn_filtid); + kev.ident, kn->kn_udata, kn->kn_status | (kn->kn_id << 32), + kn->kn_filtid); } error = (callback)(kq, &kev, callback_data); @@ -4739,7 +4811,7 @@ knote_process(struct knote *kn, #define KQWQAE_UNBIND 3 static int kqworkq_acknowledge_events(struct kqworkq *kqwq, struct kqrequest *kqr, - int kevent_flags, int kqwqae_op) + int kevent_flags, int kqwqae_op) { thread_qos_t old_override = THREAD_QOS_UNSPECIFIED; thread_t thread = kqr->kqr_thread; @@ -4800,7 +4872,7 @@ kqworkq_acknowledge_events(struct kqworkq *kqwq, struct kqrequest *kqr, */ if (kqr->kqr_state & KQR_WAKEUP) { kqueue_threadreq_initiate(&kqwq->kqwq_kqueue, kqr, - kqr->kqr_qos_index, 0); + kqr->kqr_qos_index, 0); } } @@ -4831,18 +4903,18 @@ kqworkq_acknowledge_events(struct kqworkq *kqwq, struct kqrequest *kqr, */ static int kqworkq_begin_processing(struct kqworkq *kqwq, struct kqrequest *kqr, - int kevent_flags) + int kevent_flags) { int rc = 0; KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_START, - 0, kqr->kqr_qos_index); + 0, kqr->kqr_qos_index); rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags, - KQWQAE_BEGIN_PROCESSING); + KQWQAE_BEGIN_PROCESSING); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_PROCESS_BEGIN) | DBG_FUNC_END, - thread_tid(kqr->kqr_thread), kqr->kqr_state); + thread_tid(kqr->kqr_thread), kqr->kqr_state); return rc; } @@ -4881,8 +4953,8 @@ kqworkloop_acknowledge_events(struct kqworkloop *kqwl) * further overrides keep pushing. */ if (knote_fops(kn)->f_adjusts_qos && (kn->kn_status & KN_DISABLED) && - (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 && - (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) { + (kn->kn_status & (KN_STAYACTIVE | KN_DROPPING)) == 0 && + (kn->kn_flags & (EV_DISPATCH | EV_DISABLE)) == EV_DISPATCH) { qos = MAX(qos, knote_get_qos_override_index(kn)); continue; } @@ -4904,7 +4976,7 @@ kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags) kqlock_held(kq); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_START, - kqwl->kqwl_dynamicid, 0, 0); + kqwl->kqwl_dynamicid, 0, 0); /* nobody else should still be processing */ assert((kq->kq_state & KQ_PROCESSING) == 0); @@ -4986,7 +5058,7 @@ kqworkloop_begin_processing(struct kqworkloop *kqwl, unsigned int kevent_flags) done: KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_BEGIN) | DBG_FUNC_END, - kqwl->kqwl_dynamicid, 0, 0); + kqwl->kqwl_dynamicid, 0, 0); return rc; } @@ -5008,25 +5080,26 @@ kqfile_begin_processing(struct kqueue *kq) assert((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); + VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); /* wait to become the exclusive processing thread */ for (;;) { if (kq->kq_state & KQ_DRAIN) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(kq), 2); + VM_KERNEL_UNSLIDE_OR_PERM(kq), 2); return -1; } - if ((kq->kq_state & KQ_PROCESSING) == 0) + if ((kq->kq_state & KQ_PROCESSING) == 0) { break; + } /* if someone else is processing the queue, wait */ kq->kq_state |= KQ_PROCWAIT; suppressq = kqueue_get_suppressed_queue(kq, NULL); waitq_assert_wait64((struct waitq *)&kq->kq_wqs, - CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT, - TIMEOUT_WAIT_FOREVER); + CAST_EVENT64_T(suppressq), THREAD_UNINT | THREAD_WAIT_NOREPORT, + TIMEOUT_WAIT_FOREVER); kqunlock(kq); thread_block(THREAD_CONTINUE_NULL); @@ -5042,7 +5115,7 @@ kqfile_begin_processing(struct kqueue *kq) /* anything left to process? */ if (kqueue_queue_empty(kq, QOS_INDEX_KQFILE)) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(kq), 1); + VM_KERNEL_UNSLIDE_OR_PERM(kq), 1); return -1; } @@ -5050,7 +5123,7 @@ kqfile_begin_processing(struct kqueue *kq) kq->kq_state |= KQ_PROCESSING; KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_BEGIN) | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(kq)); + VM_KERNEL_UNSLIDE_OR_PERM(kq)); return 0; } @@ -5064,7 +5137,7 @@ kqfile_begin_processing(struct kqueue *kq) */ static int kqworkq_end_processing(struct kqworkq *kqwq, struct kqrequest *kqr, - int kevent_flags) + int kevent_flags) { if (!kqueue_queue_empty(&kqwq->kqwq_kqueue, kqr->kqr_qos_index)) { /* remember we didn't process everything */ @@ -5079,7 +5152,7 @@ kqworkq_end_processing(struct kqworkq *kqwq, struct kqrequest *kqr, * which is a failure condition for end_processing. */ int rc = kqworkq_acknowledge_events(kqwq, kqr, kevent_flags, - KQWQAE_END_PROCESSING); + KQWQAE_END_PROCESSING); if (rc == 0) { return -1; } @@ -5110,7 +5183,7 @@ kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags) kqlock_held(kq); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_START, - kqwl->kqwl_dynamicid, 0, 0); + kqwl->kqwl_dynamicid, 0, 0); if (flags & KQ_PROCESSING) { assert(kq->kq_state & KQ_PROCESSING); @@ -5123,7 +5196,7 @@ kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags) if (!TAILQ_EMPTY(&kqwl->kqwl_queue[KQWL_BUCKET_STAYACTIVE])) { kq_req_lock(kqwl); kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_QOS, - KQWL_BUCKET_STAYACTIVE); + KQWL_BUCKET_STAYACTIVE); kq_req_unlock(kqwl); } @@ -5173,7 +5246,7 @@ kqworkloop_end_processing(struct kqworkloop *kqwl, int flags, int kevent_flags) } KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_PROCESS_END) | DBG_FUNC_END, - kqwl->kqwl_dynamicid, 0, 0); + kqwl->kqwl_dynamicid, 0, 0); return rc; } @@ -5190,10 +5263,10 @@ kqfile_end_processing(struct kqueue *kq) kqlock_held(kq); - assert((kq->kq_state & (KQ_WORKQ|KQ_WORKLOOP)) == 0); + assert((kq->kq_state & (KQ_WORKQ | KQ_WORKLOOP)) == 0); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQ_PROCESS_END), - VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); + VM_KERNEL_UNSLIDE_OR_PERM(kq), 0); /* * Return suppressed knotes to their original state. @@ -5210,15 +5283,15 @@ kqfile_end_processing(struct kqueue *kq) if (procwait) { /* first wake up any thread already waiting to process */ waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, - CAST_EVENT64_T(suppressq), - THREAD_AWAKENED, - WAITQ_ALL_PRIORITIES); + CAST_EVENT64_T(suppressq), + THREAD_AWAKENED, + WAITQ_ALL_PRIORITIES); } } static int kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, - struct kqueue_workloop_params *params, int *retval) + struct kqueue_workloop_params *params, int *retval) { int error = 0; int fd; @@ -5236,23 +5309,23 @@ kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, } if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_PRI) && - (params->kqwlp_sched_pri < 1 || - params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) { + (params->kqwlp_sched_pri < 1 || + params->kqwlp_sched_pri > 63 /* MAXPRI_USER */)) { error = EINVAL; break; } if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_SCHED_POL) && - invalid_policy(params->kqwlp_sched_pol)) { + invalid_policy(params->kqwlp_sched_pol)) { error = EINVAL; break; } if ((params->kqwlp_flags & KQ_WORKLOOP_CREATE_CPU_PERCENT) && - (params->kqwlp_cpu_percent <= 0 || - params->kqwlp_cpu_percent > 100 || - params->kqwlp_cpu_refillms <= 0 || - params->kqwlp_cpu_refillms > 0x00ffffff)) { + (params->kqwlp_cpu_percent <= 0 || + params->kqwlp_cpu_percent > 100 || + params->kqwlp_cpu_refillms <= 0 || + params->kqwlp_cpu_refillms > 0x00ffffff)) { error = EINVAL; break; } @@ -5272,8 +5345,8 @@ kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, } error = kevent_get_kq(p, params->kqwlp_id, &trp, - KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | - KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST , &fp, &fd, &kq); + KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | + KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST, &fp, &fd, &kq); if (error) { break; } @@ -5290,8 +5363,8 @@ kqueue_workloop_ctl_internal(proc_t p, uintptr_t cmd, uint64_t __unused options, break; case KQ_WORKLOOP_DESTROY: error = kevent_get_kq(p, params->kqwlp_id, NULL, - KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | - KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST , &fp, &fd, &kq); + KEVENT_FLAG_DYNAMIC_KQUEUE | KEVENT_FLAG_WORKLOOP | + KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST, &fp, &fd, &kq); if (error) { break; } @@ -5333,7 +5406,7 @@ kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval) } return kqueue_workloop_ctl_internal(p, uap->cmd, uap->options, ¶ms, - retval); + retval); } /* @@ -5351,10 +5424,10 @@ kqueue_workloop_ctl(proc_t p, struct kqueue_workloop_ctl_args *uap, int *retval) */ static int kqueue_process(struct kqueue *kq, - kevent_callback_t callback, - void *callback_data, - struct filt_process_s *process_data, - int *countp) + kevent_callback_t callback, + void *callback_data, + struct filt_process_s *process_data, + int *countp) { struct uthread *ut = get_bsdthread_info(current_thread()); struct kqrequest *kqr = ut->uu_kqr_bound; @@ -5435,7 +5508,9 @@ process_again: * * If we returned events however, end processing never fails. */ - if (error || nevents) flags &= ~KEVENT_FLAG_PARKING; + if (error || nevents) { + flags &= ~KEVENT_FLAG_PARKING; + } if (kq->kq_state & KQ_WORKQ) { rc = kqworkq_end_processing(kqu.kqwq, kqr, flags); } else if (kq->kq_state & KQ_WORKLOOP) { @@ -5449,7 +5524,7 @@ process_again: #if DEBUG || DEVELOPMENT if (retries-- == 0) { panic("kevent: way too many knote_process retries, kq: %p (0x%02x)", - kq, kq->kq_state); + kq, kq->kq_state); } #endif goto process_again; @@ -5472,28 +5547,29 @@ kqueue_scan_continue(void *data, wait_result_t wait_result) switch (wait_result) { case THREAD_AWAKENED: { kqlock(kq); - retry: +retry: error = kqueue_process(kq, cont_args->call, cont_args->data, - process_data, &count); + process_data, &count); if (error == 0 && count == 0) { if (kq->kq_state & KQ_DRAIN) { kqunlock(kq); goto drain; } - if (kq->kq_state & KQ_WAKEUP) + if (kq->kq_state & KQ_WAKEUP) { goto retry; + } waitq_assert_wait64((struct waitq *)&kq->kq_wqs, - KQ_EVENT, THREAD_ABORTSAFE, - cont_args->deadline); + KQ_EVENT, THREAD_ABORTSAFE, + cont_args->deadline); kq->kq_state |= KQ_SLEEP; kqunlock(kq); thread_block_parameter(kqueue_scan_continue, kq); /* NOTREACHED */ } kqunlock(kq); - } break; + } break; case THREAD_TIMED_OUT: error = EWOULDBLOCK; break; @@ -5501,7 +5577,7 @@ kqueue_scan_continue(void *data, wait_result_t wait_result) error = EINTR; break; case THREAD_RESTART: - drain: +drain: error = EBADF; break; default: @@ -5531,12 +5607,12 @@ kqueue_scan_continue(void *data, wait_result_t wait_result) */ int kqueue_scan(struct kqueue *kq, - kevent_callback_t callback, - kqueue_continue_t continuation, - void *callback_data, - struct filt_process_s *process_data, - struct timeval *atvp, - __unused struct proc *p) + kevent_callback_t callback, + kqueue_continue_t continuation, + void *callback_data, + struct filt_process_s *process_data, + struct timeval *atvp, + __unused struct proc *p) { thread_continue_t cont = THREAD_CONTINUE_NULL; unsigned int flags; @@ -5564,10 +5640,10 @@ kqueue_scan(struct kqueue *kq, */ kqlock(kq); error = kqueue_process(kq, callback, callback_data, - process_data, &count); - if (error || count) + process_data, &count); + if (error || count) { break; /* lock still held */ - + } /* looks like we have to consider blocking */ if (first) { first = 0; @@ -5577,8 +5653,8 @@ kqueue_scan(struct kqueue *kq, clock_get_uptime(&now); nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC + - atvp->tv_usec * (long)NSEC_PER_USEC, - &deadline); + atvp->tv_usec * (long)NSEC_PER_USEC, + &deadline); if (now >= deadline) { /* non-blocking call */ error = EWOULDBLOCK; @@ -5587,7 +5663,7 @@ kqueue_scan(struct kqueue *kq, deadline -= now; clock_absolutetime_interval_to_deadline(deadline, &deadline); } else { - deadline = 0; /* block forever */ + deadline = 0; /* block forever */ } if (continuation) { @@ -5616,9 +5692,9 @@ kqueue_scan(struct kqueue *kq, /* go ahead and wait */ waitq_assert_wait64_leeway((struct waitq *)&kq->kq_wqs, - KQ_EVENT, THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, - deadline, TIMEOUT_NO_LEEWAY); + KQ_EVENT, THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, TIMEOUT_NO_LEEWAY); kq->kq_state |= KQ_SLEEP; kqunlock(kq); wait_result = thread_block_parameter(cont, kq); @@ -5640,7 +5716,7 @@ kqueue_scan(struct kqueue *kq, } } kqunlock(kq); - return (error); + return error; } @@ -5651,37 +5727,37 @@ kqueue_scan(struct kqueue *kq, /*ARGSUSED*/ static int kqueue_read(__unused struct fileproc *fp, - __unused struct uio *uio, - __unused int flags, - __unused vfs_context_t ctx) + __unused struct uio *uio, + __unused int flags, + __unused vfs_context_t ctx) { - return (ENXIO); + return ENXIO; } /*ARGSUSED*/ static int kqueue_write(__unused struct fileproc *fp, - __unused struct uio *uio, - __unused int flags, - __unused vfs_context_t ctx) + __unused struct uio *uio, + __unused int flags, + __unused vfs_context_t ctx) { - return (ENXIO); + return ENXIO; } /*ARGSUSED*/ static int kqueue_ioctl(__unused struct fileproc *fp, - __unused u_long com, - __unused caddr_t data, - __unused vfs_context_t ctx) + __unused u_long com, + __unused caddr_t data, + __unused vfs_context_t ctx) { - return (ENOTTY); + return ENOTTY; } /*ARGSUSED*/ static int kqueue_select(struct fileproc *fp, int which, void *wq_link_id, - __unused vfs_context_t ctx) + __unused vfs_context_t ctx) { struct kqueue *kq = (struct kqueue *)fp->f_data; struct kqtailq *queue; @@ -5689,8 +5765,9 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id, struct knote *kn; int retnum = 0; - if (which != FREAD) - return (0); + if (which != FREAD) { + return 0; + } kqlock(kq); @@ -5706,11 +5783,11 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id, */ if (wq_link_id != NULL) { thread_t cur_act = current_thread(); - struct uthread * ut = get_bsdthread_info(cur_act); + struct uthread * ut = get_bsdthread_info(cur_act); kq->kq_state |= KQ_SEL; waitq_link((struct waitq *)&kq->kq_wqs, ut->uu_wqset, - WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id); + WAITQ_SHOULD_LOCK, (uint64_t *)wq_link_id); /* always consume the reserved link object */ waitq_link_release(*(uint64_t *)wq_link_id); @@ -5731,7 +5808,7 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id, if (kqfile_begin_processing(kq) == -1) { kqunlock(kq); - return (0); + return 0; } queue = &kq->kq_queue[QOS_INDEX_KQFILE]; @@ -5763,7 +5840,7 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id, /* If didn't vanish while suppressed - peek at it */ if ((kn->kn_status & KN_DROPPING) || !knote_lock(kq, kn, &knlc, - KNOTE_KQ_LOCK_ON_FAILURE)) { + KNOTE_KQ_LOCK_ON_FAILURE)) { continue; } @@ -5786,7 +5863,7 @@ kqueue_select(struct fileproc *fp, int which, void *wq_link_id, out: kqfile_end_processing(kq); kqunlock(kq); - return (retnum); + return retnum; } /* @@ -5801,7 +5878,7 @@ kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx) assert((kqf->kqf_state & KQ_WORKQ) == 0); kqueue_dealloc(&kqf->kqf_kqueue); fg->fg_data = NULL; - return (0); + return 0; } /* @@ -5819,7 +5896,7 @@ kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx) */ static int kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, - __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) + __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) { struct kqfile *kqf = (struct kqfile *)kn->kn_fp->f_data; struct kqueue *kq = &kqf->kqf_kqueue; @@ -5848,8 +5925,7 @@ kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, kqlock(parentkq); if (parentkq->kq_level > 0 && - parentkq->kq_level < kq->kq_level) - { + parentkq->kq_level < kq->kq_level) { kqunlock(parentkq); knote_set_error(kn, EINVAL); return 0; @@ -5872,12 +5948,13 @@ kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, kqlock(kq); KNOTE_ATTACH(&kqf->kqf_sel.si_note, kn); /* indicate nesting in child, if needed */ - if (kq->kq_level == 0) + if (kq->kq_level == 0) { kq->kq_level = 1; + } int count = kq->kq_count; kqunlock(kq); - return (count > 0); + return count > 0; } } @@ -5896,7 +5973,7 @@ kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx) kq->kq_state |= KQ_DRAIN; kqueue_interrupt(kq); kqunlock(kq); - return (0); + return 0; } /*ARGSUSED*/ @@ -5911,32 +5988,65 @@ kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p) bzero((void *)sb64, sizeof(*sb64)); sb64->st_size = kq->kq_count; - if (kq->kq_state & KQ_KEV_QOS) + if (kq->kq_state & KQ_KEV_QOS) { sb64->st_blksize = sizeof(struct kevent_qos_s); - else if (kq->kq_state & KQ_KEV64) + } else if (kq->kq_state & KQ_KEV64) { sb64->st_blksize = sizeof(struct kevent64_s); - else if (IS_64BIT_PROCESS(p)) + } else if (IS_64BIT_PROCESS(p)) { sb64->st_blksize = sizeof(struct user64_kevent); - else + } else { sb64->st_blksize = sizeof(struct user32_kevent); + } sb64->st_mode = S_IFIFO; } else { struct stat *sb = (struct stat *)ub; bzero((void *)sb, sizeof(*sb)); sb->st_size = kq->kq_count; - if (kq->kq_state & KQ_KEV_QOS) + if (kq->kq_state & KQ_KEV_QOS) { sb->st_blksize = sizeof(struct kevent_qos_s); - else if (kq->kq_state & KQ_KEV64) + } else if (kq->kq_state & KQ_KEV64) { sb->st_blksize = sizeof(struct kevent64_s); - else if (IS_64BIT_PROCESS(p)) + } else if (IS_64BIT_PROCESS(p)) { sb->st_blksize = sizeof(struct user64_kevent); - else + } else { sb->st_blksize = sizeof(struct user32_kevent); + } sb->st_mode = S_IFIFO; } kqunlock(kq); - return (0); + return 0; +} + +static inline bool +kqueue_threadreq_can_use_ast(struct kqueue *kq) +{ + if (current_proc() == kq->kq_p) { + /* + * Setting an AST from a non BSD syscall is unsafe: mach_msg_trap() can + * do combined send/receive and in the case of self-IPC, the AST may bet + * set on a thread that will not return to userspace and needs the + * thread the AST would create to unblock itself. + * + * At this time, we really want to target: + * + * - kevent variants that can cause thread creations, and dispatch + * really only uses kevent_qos and kevent_id, + * + * - workq_kernreturn (directly about thread creations) + * + * - bsdthread_ctl which is used for qos changes and has direct impact + * on the creator thread scheduling decisions. + */ + switch (current_uthread()->syscall_code) { + case SYS_kevent_qos: + case SYS_kevent_id: + case SYS_workq_kernreturn: + case SYS_bsdthread_ctl: + return true; + } + } + return false; } /* @@ -5950,7 +6060,7 @@ kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p) */ static void kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, - kq_index_t qos, int flags) + kq_index_t qos, int flags) { assert(kqr->kqr_state & KQR_WAKEUP); assert(kqr->kqr_thread == THREAD_NULL); @@ -5971,12 +6081,12 @@ kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, assert(kqwl->kqwl_owner == THREAD_NULL); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THREQUEST), - kqwl->kqwl_dynamicid, 0, qos, kqr->kqr_state); + kqwl->kqwl_dynamicid, 0, qos, kqr->kqr_state); ts = kqwl->kqwl_turnstile; } else { assert(kq->kq_state & KQ_WORKQ); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_THREQUEST), - -1, 0, qos, kqr->kqr_state); + -1, 0, qos, kqr->kqr_state); } kqr->kqr_state |= KQR_THREQUESTED; @@ -5986,11 +6096,9 @@ kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, * Provide the pthread kext a pointer to a workq_threadreq_s structure for * its use until a corresponding kqueue_threadreq_bind callback. */ -#if 0 // 45129862 - if ((kq->kq_state & KQ_WORKLOOP) && current_proc() == kq->kq_p) { + if (kqueue_threadreq_can_use_ast(kq)) { flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE; } -#endif if (qos == KQWQ_QOS_MANAGER) { qos = WORKQ_THREAD_QOS_MANAGER; } @@ -6013,7 +6121,7 @@ kqueue_threadreq_initiate(struct kqueue *kq, struct kqrequest *kqr, */ void kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t req, - thread_t thread) + thread_t thread) { struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req); struct uthread *ut = get_bsdthread_info(thread); @@ -6030,7 +6138,7 @@ kqueue_threadreq_bind_prepost(struct proc *p __unused, workq_threadreq_t req, * is the interlock for the turnstile and can update the inheritor. */ turnstile_update_inheritor(ts, thread, TURNSTILE_IMMEDIATE_UPDATE | - TURNSTILE_INHERITOR_THREAD); + TURNSTILE_INHERITOR_THREAD); turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); } } @@ -6064,11 +6172,9 @@ kqueue_threadreq_modify(struct kqueue *kq, struct kqrequest *kqr, kq_index_t qos kq_req_held(kq); int flags = 0; -#if 0 // 45129862 - if ((kq->kq_state & KQ_WORKLOOP) && kq->kq_p == current_proc()) { + if (kqueue_threadreq_can_use_ast(kq)) { flags |= WORKQ_THREADREQ_SET_AST_ON_FAILURE; } -#endif workq_kern_threadreq_modify(kq->kq_p, kqr, qos, flags); } @@ -6081,7 +6187,7 @@ kqueue_threadreq_modify(struct kqueue *kq, struct kqrequest *kqr, kq_index_t qos */ void kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, thread_t thread, - unsigned int flags) + unsigned int flags) { struct kqrequest *kqr = __container_of(req, struct kqrequest, kqr_req); kqueue_t kqu = kqr_kqueue(p, kqr); @@ -6140,8 +6246,8 @@ kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, thread_t thread, } KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_BIND), kqu.kqwl->kqwl_dynamicid, - thread_tid(thread), kqr->kqr_qos_index, - (kqr->kqr_override_index << 16) | kqr->kqr_state); + thread_tid(thread), kqr->kqr_qos_index, + (kqr->kqr_override_index << 16) | kqr->kqr_state); ut->uu_kqueue_override = kqr->kqr_override_index; if (kqr->kqr_override_index) { @@ -6151,8 +6257,8 @@ kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, thread_t thread, assert(kqr->kqr_override_index == 0); KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_BIND), -1, - thread_tid(thread), kqr->kqr_qos_index, - (kqr->kqr_override_index << 16) | kqr->kqr_state); + thread_tid(thread), kqr->kqr_qos_index, + (kqr->kqr_override_index << 16) | kqr->kqr_state); } } @@ -6313,7 +6419,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) case KQWL_UTQ_PARKING: case KQWL_UTQ_UNBINDING: kqr->kqr_override_index = qos; - /* FALLTHROUGH */ + /* FALLTHROUGH */ case KQWL_UTQ_RECOMPUTE_WAKEUP_QOS: if (op == KQWL_UTQ_RECOMPUTE_WAKEUP_QOS) { assert(qos == THREAD_QOS_UNSPECIFIED); @@ -6324,7 +6430,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) kqr->kqr_override_index = THREAD_QOS_UNSPECIFIED; } if (!TAILQ_EMPTY(&kqwl->kqwl_queue[i]) && - (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) { + (kqr->kqr_wakeup_indexes & KQWL_STAYACTIVE_FIRED_BIT)) { /* * If the KQWL_STAYACTIVE_FIRED_BIT is set, it means a stay active * knote may have fired, so we need to merge in kqr_stayactive_qos. @@ -6354,7 +6460,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) goto recompute; case KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE: - recompute: +recompute: /* * When modifying the wakeup QoS or the override QoS, we always need to * maintain our invariant that kqr_override_index is at least as large @@ -6394,8 +6500,8 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) #if 0 /* JMM - need new trace hooks for owner overrides */ KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), - kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->kqr_qos_index, - (kqr->kqr_override_index << 16) | kqr->kqr_state); + kqwl->kqwl_dynamicid, thread_tid(kqwl_owner), kqr->kqr_qos_index, + (kqr->kqr_override_index << 16) | kqr->kqr_state); #endif if (new_owner_override == old_owner_override) { // nothing to do @@ -6403,7 +6509,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) thread_add_ipc_override(kqwl_owner, new_owner_override); } else if (new_owner_override == THREAD_QOS_UNSPECIFIED) { thread_drop_ipc_override(kqwl_owner); - } else /* old_owner_override != new_owner_override */ { + } else { /* old_owner_override != new_owner_override */ thread_update_ipc_override(kqwl_owner, new_owner_override); } } @@ -6426,7 +6532,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) initiate_flags = WORKQ_THREADREQ_ATTEMPT_REBIND; } kqueue_threadreq_initiate(kq, kqr, new_owner_override, - initiate_flags); + initiate_flags); } } else if (servicer) { /* @@ -6440,7 +6546,7 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) thread_add_ipc_override(servicer, kqr->kqr_override_index); } else if (kqr->kqr_override_index == THREAD_QOS_UNSPECIFIED) { thread_drop_ipc_override(servicer); - } else /* ut->uu_kqueue_override != kqr->kqr_override_index */ { + } else { /* ut->uu_kqueue_override != kqr->kqr_override_index */ thread_update_ipc_override(servicer, kqr->kqr_override_index); } ut->uu_kqueue_override = kqr->kqr_override_index; @@ -6466,8 +6572,8 @@ kqworkloop_update_threads_qos(struct kqworkloop *kqwl, int op, kq_index_t qos) if (qos_changed) { KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_THADJUST), kqwl->kqwl_dynamicid, - thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, - (kqr->kqr_override_index << 16) | kqr->kqr_state); + thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, + (kqr->kqr_override_index << 16) | kqr->kqr_state); } } @@ -6493,7 +6599,7 @@ kqueue_get_queue(struct kqueue *kq, kq_index_t qos_index) assert(qos_index == QOS_INDEX_KQFILE); } static_assert(offsetof(struct kqueue, kq_queue) == sizeof(struct kqueue), - "struct kqueue::kq_queue must be exactly at the end"); + "struct kqueue::kq_queue must be exactly at the end"); return &kq->kq_queue[qos_index]; } @@ -6528,7 +6634,7 @@ kqueue_get_turnstile(kqueue_t kqu, bool can_alloc) if (kqr_state & KQR_ALLOCATED_TURNSTILE) { /* force a dependency to pair with the atomic or with release below */ return os_atomic_load_with_dependency_on(&kqu.kqwl->kqwl_turnstile, - kqr_state); + kqr_state); } if (!can_alloc) { @@ -6547,11 +6653,11 @@ kqueue_get_turnstile(kqueue_t kqu, bool can_alloc) ts = kqu.kqwl->kqwl_turnstile; } else { ts = turnstile_prepare((uintptr_t)kqu.kqwl, &kqu.kqwl->kqwl_turnstile, - ts, TURNSTILE_WORKLOOPS); + ts, TURNSTILE_WORKLOOPS); /* release-barrier to pair with the unlocked load of kqwl_turnstile above */ os_atomic_or(&kqu.kqwl->kqwl_request.kqr_state, - KQR_ALLOCATED_TURNSTILE, release); + KQR_ALLOCATED_TURNSTILE, release); } if (filt_wlturnstile_interlock_is_workq(kqu.kqwl)) { @@ -6632,7 +6738,7 @@ knote_set_qos_overcommit(struct knote *kn) /* turn overcommit on for the appropriate thread request? */ if ((kn->kn_qos & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) && - (kq->kq_state & KQ_WORKLOOP)) { + (kq->kq_state & KQ_WORKLOOP)) { struct kqworkloop *kqwl = (struct kqworkloop *)kq; struct kqrequest *kqr = &kqwl->kqwl_request; @@ -6661,7 +6767,7 @@ knote_get_qos_override_index(struct knote *kn) static void kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn, - kq_index_t override_index) + kq_index_t override_index) { struct kqrequest *kqr; kq_index_t old_override_index; @@ -6680,10 +6786,11 @@ kqworkq_update_override(struct kqworkq *kqwq, struct knote *kn, /* apply the override to [incoming?] servicing thread */ if (kqr->kqr_thread) { - if (old_override_index) + if (old_override_index) { thread_update_ipc_override(kqr->kqr_thread, override_index); - else + } else { thread_add_ipc_override(kqr->kqr_thread, override_index); + } } } kq_req_unlock(kqwq); @@ -6694,7 +6801,7 @@ kqworkloop_update_override(struct kqworkloop *kqwl, kq_index_t override_index) { kq_req_lock(kqwl); kqworkloop_update_threads_qos(kqwl, KQWL_UTQ_UPDATE_WAKEUP_OVERRIDE, - override_index); + override_index); kq_req_unlock(kqwl); } @@ -6706,7 +6813,7 @@ kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread) kq_index_t ipc_override = ut->uu_kqueue_override; KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWL_UNBIND), kqwl->kqwl_dynamicid, - thread_tid(thread), 0, 0); + thread_tid(thread), 0, 0); kq_req_held(kqwl); assert(ut->uu_kqr_bound == kqr); @@ -6715,9 +6822,9 @@ kqworkloop_unbind_locked(struct kqworkloop *kqwl, thread_t thread) if (kqwl->kqwl_owner == NULL && kqwl->kqwl_turnstile) { turnstile_update_inheritor(kqwl->kqwl_turnstile, - TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE); + TURNSTILE_INHERITOR_NULL, TURNSTILE_IMMEDIATE_UPDATE); turnstile_update_inheritor_complete(kqwl->kqwl_turnstile, - TURNSTILE_INTERLOCK_HELD); + TURNSTILE_INTERLOCK_HELD); } kqr->kqr_thread = NULL; @@ -6783,13 +6890,13 @@ kqworkloop_unbind(proc_t p, struct kqworkloop *kqwl) static thread_qos_t kqworkq_unbind_locked(__assert_only struct kqworkq *kqwq, - struct kqrequest *kqr, thread_t thread) + struct kqrequest *kqr, thread_t thread) { struct uthread *ut = get_bsdthread_info(thread); kq_index_t old_override = kqr->kqr_override_index; KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KQWQ_UNBIND), -1, - thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, 0); + thread_tid(kqr->kqr_thread), kqr->kqr_qos_index, 0); kq_req_held(kqwq); assert(ut->uu_kqr_bound == kqr); @@ -6858,7 +6965,7 @@ knote_apply_qos_override(struct knote *kn, kq_index_t qos_index) static bool knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn, int result, - thread_qos_t *qos_out) + thread_qos_t *qos_out) { thread_qos_t qos_index = (result >> FILTER_ADJUST_EVENT_QOS_SHIFT) & 7; @@ -6893,11 +7000,13 @@ knote_should_apply_qos_override(struct kqueue *kq, struct knote *kn, int result, * incoming event has no QoS, else, the registration QoS acts as a floor. */ if (kn->kn_qos & _PTHREAD_PRIORITY_FALLBACK_FLAG) { - if (qos_index == THREAD_QOS_UNSPECIFIED) + if (qos_index == THREAD_QOS_UNSPECIFIED) { qos_index = kn->kn_req_index; + } } else { - if (qos_index < kn->kn_req_index) + if (qos_index < kn->kn_req_index) { qos_index = kn->kn_req_index; + } } if ((kn->kn_status & KN_MERGE_QOS) && (qos_index < kn->kn_qos_override)) { /* Never lower QoS when in "Merge" mode */ @@ -6967,14 +7076,15 @@ knote_wakeup(struct knote *kn) struct kqfile *kqf = (struct kqfile *)kq; /* flag wakeups during processing */ - if (kq->kq_state & KQ_PROCESSING) + if (kq->kq_state & KQ_PROCESSING) { kq->kq_state |= KQ_WAKEUP; + } /* wakeup a thread waiting on this queue */ if (kq->kq_state & (KQ_SLEEP | KQ_SEL)) { kq->kq_state &= ~(KQ_SLEEP | KQ_SEL); waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, KQ_EVENT, - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); } /* wakeup other kqueues/select sets we're inside */ @@ -6994,9 +7104,9 @@ kqueue_interrupt(struct kqueue *kq) if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0) { kq->kq_state &= ~(KQ_SLEEP | KQ_SEL); (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, - KQ_EVENT, - THREAD_RESTART, - WAITQ_ALL_PRIORITIES); + KQ_EVENT, + THREAD_RESTART, + WAITQ_ALL_PRIORITIES); } /* wakeup threads waiting their turn to process */ @@ -7008,9 +7118,9 @@ kqueue_interrupt(struct kqueue *kq) kq->kq_state &= ~KQ_PROCWAIT; suppressq = kqueue_get_suppressed_queue(kq, NULL); (void)waitq_wakeup64_all((struct waitq *)&kq->kq_wqs, - CAST_EVENT64_T(suppressq), - THREAD_RESTART, - WAITQ_ALL_PRIORITIES); + CAST_EVENT64_T(suppressq), + THREAD_RESTART, + WAITQ_ALL_PRIORITIES); } } @@ -7083,7 +7193,7 @@ knote_attach(struct klist *list, struct knote *kn) { int ret = SLIST_EMPTY(list); SLIST_INSERT_HEAD(list, kn, kn_selnext); - return (ret); + return ret; } /* @@ -7094,7 +7204,7 @@ int knote_detach(struct klist *list, struct knote *kn) { SLIST_REMOVE(list, kn, knote, kn_selnext); - return (SLIST_EMPTY(list)); + return SLIST_EMPTY(list); } /* @@ -7113,7 +7223,7 @@ knote_detach(struct klist *list, struct knote *kn) * recursively - which likely is not supported. */ void -knote_vanish(struct klist *list) +knote_vanish(struct klist *list, bool make_active) { struct knote *kn; struct knote *kn_next; @@ -7122,12 +7232,26 @@ knote_vanish(struct klist *list) struct kqueue *kq = knote_get_kq(kn); kqlock(kq); - if (kn->kn_status & KN_REQVANISH) { - /* If EV_VANISH supported - prepare to deliver one */ + if (__probable(kn->kn_status & KN_REQVANISH)) { + /* + * If EV_VANISH supported - prepare to deliver one + */ kn->kn_status |= KN_VANISHED; - knote_activate(kn); } else { - knote_call_filter_event(kq, kn, NOTE_REVOKE); + /* + * Handle the legacy way to indicate that the port/portset was + * deallocated or left the current Mach portspace (modern technique + * is with an EV_VANISHED protocol). + * + * Deliver an EV_EOF event for these changes (hopefully it will get + * delivered before the port name recycles to the same generation + * count and someone tries to re-register a kevent for it or the + * events are udata-specific - avoiding a conflict). + */ + kn->kn_flags |= EV_EOF | EV_ONESHOT; + } + if (make_active) { + knote_activate(kn); } kqunlock(kq); } @@ -7180,9 +7304,9 @@ knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link) kr = waitq_link(wq, &kq->kq_wqs, WAITQ_ALREADY_LOCKED, reserved_link); if (kr == KERN_SUCCESS) { knote_markstayactive(kn); - return (0); + return 0; } else { - return (EINVAL); + return EINVAL; } } @@ -7203,7 +7327,7 @@ knote_unlink_waitq(struct knote *kn, struct waitq *wq) kr = waitq_unlink(wq, &kq->kq_wqs); knote_clearstayactive(kn); - return ((kr != KERN_SUCCESS) ? EINVAL : 0); + return (kr != KERN_SUCCESS) ? EINVAL : 0; } /* @@ -7226,9 +7350,10 @@ restart: kqlock(kq); - if (kq->kq_p != p) + if (kq->kq_p != p) { panic("%s: proc mismatch (kq->kq_p=%p != p=%p)", __func__, kq->kq_p, p); + } /* * If the knote supports EV_VANISHED delivery, @@ -7249,8 +7374,9 @@ restart: kqunlock(kq); knote_fops(kn)->f_detach(kn); - if (knote_fops(kn)->f_isfd) + if (knote_fops(kn)->f_isfd) { fp_drop(p, kn->kn_id, kn->kn_fp, 0); + } kqlock(kq); knote_activate(kn); @@ -7279,9 +7405,9 @@ restart: */ static struct knote * knote_fdfind(struct kqueue *kq, - struct kevent_internal_s *kev, - bool is_fd, - struct proc *p) + struct kevent_internal_s *kev, + bool is_fd, + struct proc *p) { struct filedesc *fdp = p->p_fd; struct klist *list = NULL; @@ -7338,17 +7464,18 @@ knote_fdfind(struct kqueue *kq, */ static int kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, - struct proc *p) + struct proc *p) { struct filedesc *fdp = p->p_fd; struct klist *list = NULL; int ret = 0; bool is_fd = knote_fops(kn)->f_isfd; - if (is_fd) + if (is_fd) { proc_fdlock(p); - else + } else { knhash_lock(p); + } if (knote_fdfind(kq, &kn->kn_kevent, is_fd, p) != NULL) { /* found an existing knote: we can't add this one */ @@ -7375,7 +7502,6 @@ kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, SLIST_INSERT_HEAD(list, kn, kn_link); ret = 0; goto out_locked; - } else { /* knote is fd based */ @@ -7389,10 +7515,11 @@ kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, } /* have to grow the fd_knlist */ size = fdp->fd_knlistsize; - while (size <= kn->kn_id) + while (size <= kn->kn_id) { size += KQEXTENT; + } - if (size >= (UINT_MAX/sizeof(struct klist *))) { + if (size >= (UINT_MAX / sizeof(struct klist *))) { ret = EINVAL; goto out_locked; } @@ -7418,7 +7545,6 @@ kq_add_knote(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc, SLIST_INSERT_HEAD(list, kn, kn_link); ret = 0; goto out_locked; - } out_locked: @@ -7427,10 +7553,11 @@ out_locked: assert((kn->kn_status & KN_LOCKED) == 0); (void)knote_lock(kq, kn, knlc, KNOTE_KQ_UNLOCK); } - if (is_fd) + if (is_fd) { proc_fdunlock(p); - else + } else { knhash_unlock(p); + } return ret; } @@ -7445,7 +7572,7 @@ out_locked: */ static void kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p, - struct knote_lock_ctx *knlc) + struct knote_lock_ctx *knlc) { struct filedesc *fdp = p->p_fd; struct klist *list = NULL; @@ -7454,13 +7581,14 @@ kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p, is_fd = knote_fops(kn)->f_isfd; - if (is_fd) + if (is_fd) { proc_fdlock(p); - else + } else { knhash_lock(p); + } if (is_fd) { - assert ((u_int)fdp->fd_knlistsize > kn->kn_id); + assert((u_int)fdp->fd_knlistsize > kn->kn_id); list = &fdp->fd_knlist[kn->kn_id]; } else { list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; @@ -7474,13 +7602,15 @@ kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p, } else { kqunlock(kq); } - if (is_fd) + if (is_fd) { proc_fdunlock(p); - else + } else { knhash_unlock(p); + } - if (kq_state & KQ_DYNAMIC) + if (kq_state & KQ_DYNAMIC) { kqueue_release_last(p, kq); + } } /* @@ -7492,14 +7622,15 @@ kq_remove_knote(struct kqueue *kq, struct knote *kn, struct proc *p, static struct knote * kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev, - bool is_fd, struct proc *p) + bool is_fd, struct proc *p) { struct knote * ret; - if (is_fd) + if (is_fd) { proc_fdlock(p); - else + } else { knhash_lock(p); + } ret = knote_fdfind(kq, kev, is_fd, p); @@ -7507,10 +7638,11 @@ kq_find_knote_and_kq_lock(struct kqueue *kq, struct kevent_internal_s *kev, kqlock(kq); } - if (is_fd) + if (is_fd) { proc_fdunlock(p); - else + } else { knhash_unlock(p); + } return ret; } @@ -7548,8 +7680,9 @@ knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc) /* kq may be freed when kq_remove_knote() returns */ kq_remove_knote(kq, kn, p, knlc); - if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0)) + if (knote_fops(kn)->f_isfd && ((kn->kn_status & KN_VANISHED) == 0)) { fp_drop(p, kn->kn_id, kn->kn_fp, 0); + } knote_free(kn); } @@ -7558,16 +7691,18 @@ knote_drop(struct kqueue *kq, struct knote *kn, struct knote_lock_ctx *knlc) static void knote_activate(struct knote *kn) { - if (kn->kn_status & KN_ACTIVE) + if (kn->kn_status & KN_ACTIVE) { return; + } KDBG_FILTERED(KEV_EVTID(BSD_KEVENT_KNOTE_ACTIVATE), - kn->kn_udata, kn->kn_status | (kn->kn_id << 32), - kn->kn_filtid); + kn->kn_udata, kn->kn_status | (kn->kn_id << 32), + kn->kn_filtid); kn->kn_status |= KN_ACTIVE; - if (knote_enqueue(kn)) + if (knote_enqueue(kn)) { knote_wakeup(kn); + } } /* called with kqueue lock held */ @@ -7575,16 +7710,18 @@ static void knote_deactivate(struct knote *kn) { kn->kn_status &= ~KN_ACTIVE; - if ((kn->kn_status & KN_STAYACTIVE) == 0) + if ((kn->kn_status & KN_STAYACTIVE) == 0) { knote_dequeue(kn); + } } /* called with kqueue lock held */ static void knote_enable(struct knote *kn) { - if ((kn->kn_status & KN_DISABLED) == 0) + if ((kn->kn_status & KN_DISABLED) == 0) { return; + } kn->kn_status &= ~KN_DISABLED; @@ -7616,8 +7753,9 @@ knote_enable(struct knote *kn) static void knote_disable(struct knote *kn) { - if (kn->kn_status & KN_DISABLED) + if (kn->kn_status & KN_DISABLED) { return; + } kn->kn_status |= KN_DISABLED; knote_dequeue(kn); @@ -7632,8 +7770,9 @@ knote_suppress(struct knote *kn) kqlock_held(kq); - if (kn->kn_status & KN_SUPPRESSED) + if (kn->kn_status & KN_SUPPRESSED) { return; + } knote_dequeue(kn); kn->kn_status |= KN_SUPPRESSED; @@ -7650,8 +7789,9 @@ knote_unsuppress(struct knote *kn) kqlock_held(kq); - if ((kn->kn_status & KN_SUPPRESSED) == 0) + if ((kn->kn_status & KN_SUPPRESSED) == 0) { return; + } kn->kn_status &= ~KN_SUPPRESSED; suppressq = kqueue_get_suppressed_queue(kq, kn); @@ -7693,8 +7833,9 @@ static int knote_enqueue(struct knote *kn) { if ((kn->kn_status & (KN_ACTIVE | KN_STAYACTIVE)) == 0 || - (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING))) + (kn->kn_status & (KN_DISABLED | KN_SUPPRESSED | KN_DROPPING))) { return 0; + } if ((kn->kn_status & KN_QUEUED) == 0) { struct kqtailq *queue = knote_get_queue(kn); @@ -7706,7 +7847,7 @@ knote_enqueue(struct knote *kn) kq->kq_count++; return 1; } - return ((kn->kn_status & KN_STAYACTIVE) != 0); + return (kn->kn_status & KN_STAYACTIVE) != 0; } @@ -7719,8 +7860,9 @@ knote_dequeue(struct knote *kn) kqlock_held(kq); - if ((kn->kn_status & KN_QUEUED) == 0) + if ((kn->kn_status & KN_QUEUED) == 0) { return; + } queue = knote_get_queue(kn); TAILQ_REMOVE(queue, kn, kn_tqe); @@ -7731,22 +7873,22 @@ knote_dequeue(struct knote *kn) void knote_init(void) { - knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote), - 8192, "knote zone"); + knote_zone = zinit(sizeof(struct knote), 8192 * sizeof(struct knote), + 8192, "knote zone"); - kqfile_zone = zinit(sizeof(struct kqfile), 8192*sizeof(struct kqfile), - 8192, "kqueue file zone"); + kqfile_zone = zinit(sizeof(struct kqfile), 8192 * sizeof(struct kqfile), + 8192, "kqueue file zone"); - kqworkq_zone = zinit(sizeof(struct kqworkq), 8192*sizeof(struct kqworkq), - 8192, "kqueue workq zone"); + kqworkq_zone = zinit(sizeof(struct kqworkq), 8192 * sizeof(struct kqworkq), + 8192, "kqueue workq zone"); - kqworkloop_zone = zinit(sizeof(struct kqworkloop), 8192*sizeof(struct kqworkloop), - 8192, "kqueue workloop zone"); + kqworkloop_zone = zinit(sizeof(struct kqworkloop), 8192 * sizeof(struct kqworkloop), + 8192, "kqueue workloop zone"); /* allocate kq lock group attribute and group */ kq_lck_grp_attr = lck_grp_attr_alloc_init(); - kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr); + kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr); /* Allocate kq lock attribute */ kq_lck_attr = lck_attr_alloc_init(); @@ -7792,23 +7934,23 @@ knote_free(struct knote *kn) #include #ifndef ROUNDUP64 -#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) +#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) #endif #ifndef ADVANCE64 -#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) +#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) #endif static lck_grp_attr_t *kev_lck_grp_attr; static lck_attr_t *kev_lck_attr; static lck_grp_t *kev_lck_grp; -static decl_lck_rw_data(,kev_lck_data); +static decl_lck_rw_data(, kev_lck_data); static lck_rw_t *kev_rwlock = &kev_lck_data; static int kev_attach(struct socket *so, int proto, struct proc *p); static int kev_detach(struct socket *so); static int kev_control(struct socket *so, u_long cmd, caddr_t data, - struct ifnet *ifp, struct proc *p); + struct ifnet *ifp, struct proc *p); static lck_mtx_t * event_getlock(struct socket *, int); static int event_lock(struct socket *, int, void *); static int event_unlock(struct socket *, int, void *); @@ -7817,38 +7959,38 @@ static int event_sofreelastref(struct socket *); static void kev_delete(struct kern_event_pcb *); static struct pr_usrreqs event_usrreqs = { - .pru_attach = kev_attach, - .pru_control = kev_control, - .pru_detach = kev_detach, - .pru_soreceive = soreceive, + .pru_attach = kev_attach, + .pru_control = kev_control, + .pru_detach = kev_detach, + .pru_soreceive = soreceive, }; static struct protosw eventsw[] = { -{ - .pr_type = SOCK_RAW, - .pr_protocol = SYSPROTO_EVENT, - .pr_flags = PR_ATOMIC, - .pr_usrreqs = &event_usrreqs, - .pr_lock = event_lock, - .pr_unlock = event_unlock, - .pr_getlock = event_getlock, -} + { + .pr_type = SOCK_RAW, + .pr_protocol = SYSPROTO_EVENT, + .pr_flags = PR_ATOMIC, + .pr_usrreqs = &event_usrreqs, + .pr_lock = event_lock, + .pr_unlock = event_unlock, + .pr_getlock = event_getlock, + } }; __private_extern__ int kevt_getstat SYSCTL_HANDLER_ARGS; __private_extern__ int kevt_pcblist SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net_systm, OID_AUTO, kevt, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel event family"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel event family"); struct kevtstat kevtstat; SYSCTL_PROC(_net_systm_kevt, OID_AUTO, stats, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - kevt_getstat, "S,kevtstat", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + kevt_getstat, "S,kevtstat", ""); SYSCTL_PROC(_net_systm_kevt, OID_AUTO, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - kevt_pcblist, "S,xkevtpcb", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + kevt_pcblist, "S,xkevtpcb", ""); static lck_mtx_t * event_getlock(struct socket *so, int flags) @@ -7856,17 +7998,18 @@ event_getlock(struct socket *so, int flags) #pragma unused(flags) struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb; - if (so->so_pcb != NULL) { - if (so->so_usecount < 0) + if (so->so_pcb != NULL) { + if (so->so_usecount < 0) { panic("%s: so=%p usecount=%d lrh= %s\n", __func__, so, so->so_usecount, solockhistory_nr(so)); - /* NOTREACHED */ + } + /* NOTREACHED */ } else { panic("%s: so=%p NULL NO so_pcb %s\n", __func__, so, solockhistory_nr(so)); /* NOTREACHED */ } - return (&ev_pcb->evp_mtx); + return &ev_pcb->evp_mtx; } static int @@ -7874,14 +8017,15 @@ event_lock(struct socket *so, int refcount, void *lr) { void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } if (so->so_pcb != NULL) { lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx); - } else { + } else { panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__, so, lr_saved, solockhistory_nr(so)); /* NOTREACHED */ @@ -7894,12 +8038,13 @@ event_lock(struct socket *so, int refcount, void *lr) /* NOTREACHED */ } - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; - return (0); + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; + return 0; } static int @@ -7908,10 +8053,11 @@ event_unlock(struct socket *so, int refcount, void *lr) void *lr_saved; lck_mtx_t *mutex_held; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } if (refcount) { so->so_usecount--; @@ -7931,7 +8077,7 @@ event_unlock(struct socket *so, int refcount, void *lr) LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; if (so->so_usecount == 0) { VERIFY(so->so_flags & SOF_PCBCLEARING); @@ -7940,7 +8086,7 @@ event_unlock(struct socket *so, int refcount, void *lr) lck_mtx_unlock(mutex_held); } - return (0); + return 0; } static int @@ -7971,18 +8117,18 @@ event_sofreelastref(struct socket *so) kev_delete(ev_pcb); sofreelastref(so, 1); - return (0); + return 0; } -static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw)); +static int event_proto_count = (sizeof(eventsw) / sizeof(struct protosw)); static struct kern_event_head kern_event_head; static u_int32_t static_event_id = 0; -#define EVPCB_ZONE_MAX 65536 -#define EVPCB_ZONE_NAME "kerneventpcb" +#define EVPCB_ZONE_MAX 65536 +#define EVPCB_ZONE_NAME "kerneventpcb" static struct zone *ev_pcb_zone; /* @@ -8022,8 +8168,9 @@ kern_event_init(struct domain *dp) /* NOTREACHED */ } - for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) + for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); + } ev_pcb_zone = zinit(sizeof(struct kern_event_pcb), EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME); @@ -8042,11 +8189,12 @@ kev_attach(struct socket *so, __unused int proto, __unused struct proc *p) struct kern_event_pcb *ev_pcb; error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE); - if (error != 0) - return (error); + if (error != 0) { + return error; + } if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) { - return (ENOBUFS); + return ENOBUFS; } bzero(ev_pcb, sizeof(struct kern_event_pcb)); lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr); @@ -8061,7 +8209,7 @@ kev_attach(struct socket *so, __unused int proto, __unused struct proc *p) kevtstat.kes_gencnt++; lck_rw_done(kev_rwlock); - return (error); + return error; } static void @@ -8082,22 +8230,23 @@ kev_detach(struct socket *so) so->so_flags |= SOF_PCBCLEARING; } - return (0); + return 0; } /* * For now, kev_vendor_code and mbuf_tags use the same * mechanism. */ -errno_t kev_vendor_code_find( - const char *string, - u_int32_t *out_vendor_code) +errno_t +kev_vendor_code_find( + const char *string, + u_int32_t *out_vendor_code) { if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) { - return (EINVAL); + return EINVAL; } - return (net_str_id_find_internal(string, out_vendor_code, - NSI_VENDOR_CODE, 1)); + return net_str_id_find_internal(string, out_vendor_code, + NSI_VENDOR_CODE, 1); } errno_t @@ -8107,8 +8256,9 @@ kev_msg_post(struct kev_msg *event_msg) net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE); - if (event_msg == NULL) - return (EINVAL); + if (event_msg == NULL) { + return EINVAL; + } /* * Limit third parties to posting events for registered vendor codes @@ -8117,9 +8267,9 @@ kev_msg_post(struct kev_msg *event_msg) if (event_msg->vendor_code < min_vendor || event_msg->vendor_code > max_vendor) { OSIncrementAtomic64((SInt64 *)&kevtstat.kes_badvendor); - return (EINVAL); + return EINVAL; } - return (kev_post_msg(event_msg)); + return kev_post_msg(event_msg); } int @@ -8136,28 +8286,30 @@ kev_post_msg(struct kev_msg *event_msg) total_size = KEV_MSG_HEADER_SIZE; for (i = 0; i < 5; i++) { - if (event_msg->dv[i].data_length == 0) + if (event_msg->dv[i].data_length == 0) { break; + } total_size += event_msg->dv[i].data_length; } if (total_size > MLEN) { OSIncrementAtomic64((SInt64 *)&kevtstat.kes_toobig); - return (EMSGSIZE); + return EMSGSIZE; } m = m_get(M_WAIT, MT_DATA); if (m == 0) { OSIncrementAtomic64((SInt64 *)&kevtstat.kes_nomem); - return (ENOMEM); + return ENOMEM; } ev = mtod(m, struct kern_event_msg *); total_size = KEV_MSG_HEADER_SIZE; tmp = (char *) &ev->event_data[0]; for (i = 0; i < 5; i++) { - if (event_msg->dv[i].data_length == 0) + if (event_msg->dv[i].data_length == 0) { break; + } total_size += event_msg->dv[i].data_length; bcopy(event_msg->dv[i].data_ptr, tmp, @@ -8210,7 +8362,7 @@ kev_post_msg(struct kev_msg *event_msg) m_free(m); lck_mtx_unlock(&ev_pcb->evp_mtx); lck_rw_done(kev_rwlock); - return (ENOMEM); + return ENOMEM; } if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) { /* @@ -8230,15 +8382,15 @@ kev_post_msg(struct kev_msg *event_msg) m_free(m); lck_rw_done(kev_rwlock); - return (0); + return 0; } static int kev_control(struct socket *so, - u_long cmd, - caddr_t data, - __unused struct ifnet *ifp, - __unused struct proc *p) + u_long cmd, + caddr_t data, + __unused struct ifnet *ifp, + __unused struct proc *p) { struct kev_request *kev_req = (struct kev_request *) data; struct kern_event_pcb *ev_pcb; @@ -8246,32 +8398,32 @@ kev_control(struct socket *so, u_int32_t *id_value = (u_int32_t *) data; switch (cmd) { - case SIOCGKEVID: - *id_value = static_event_id; - break; - case SIOCSKEVFILT: - ev_pcb = (struct kern_event_pcb *) so->so_pcb; - ev_pcb->evp_vendor_code_filter = kev_req->vendor_code; - ev_pcb->evp_class_filter = kev_req->kev_class; - ev_pcb->evp_subclass_filter = kev_req->kev_subclass; - break; - case SIOCGKEVFILT: - ev_pcb = (struct kern_event_pcb *) so->so_pcb; - kev_req->vendor_code = ev_pcb->evp_vendor_code_filter; - kev_req->kev_class = ev_pcb->evp_class_filter; - kev_req->kev_subclass = ev_pcb->evp_subclass_filter; - break; - case SIOCGKEVVENDOR: - kev_vendor = (struct kev_vendor_code *)data; - /* Make sure string is NULL terminated */ - kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0; - return (net_str_id_find_internal(kev_vendor->vendor_string, - &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0)); - default: - return (ENOTSUP); + case SIOCGKEVID: + *id_value = static_event_id; + break; + case SIOCSKEVFILT: + ev_pcb = (struct kern_event_pcb *) so->so_pcb; + ev_pcb->evp_vendor_code_filter = kev_req->vendor_code; + ev_pcb->evp_class_filter = kev_req->kev_class; + ev_pcb->evp_subclass_filter = kev_req->kev_subclass; + break; + case SIOCGKEVFILT: + ev_pcb = (struct kern_event_pcb *) so->so_pcb; + kev_req->vendor_code = ev_pcb->evp_vendor_code_filter; + kev_req->kev_class = ev_pcb->evp_class_filter; + kev_req->kev_subclass = ev_pcb->evp_subclass_filter; + break; + case SIOCGKEVVENDOR: + kev_vendor = (struct kev_vendor_code *)data; + /* Make sure string is NULL terminated */ + kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN - 1] = 0; + return net_str_id_find_internal(kev_vendor->vendor_string, + &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0); + default: + return ENOTSUP; } - return (0); + return 0; } int @@ -8296,7 +8448,7 @@ kevt_getstat SYSCTL_HANDLER_ARGS done: lck_rw_done(kev_rwlock); - return (error); + return error; } __private_extern__ int @@ -8307,34 +8459,35 @@ kevt_pcblist SYSCTL_HANDLER_ARGS int n, i; struct xsystmgen xsg; void *buf = NULL; - size_t item_size = ROUNDUP64(sizeof (struct xkevtpcb)) + - ROUNDUP64(sizeof (struct xsocket_n)) + - 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) + - ROUNDUP64(sizeof (struct xsockstat_n)); + size_t item_size = ROUNDUP64(sizeof(struct xkevtpcb)) + + ROUNDUP64(sizeof(struct xsocket_n)) + + 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) + + ROUNDUP64(sizeof(struct xsockstat_n)); struct kern_event_pcb *ev_pcb; buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) - return (ENOMEM); + if (buf == NULL) { + return ENOMEM; + } lck_rw_lock_shared(kev_rwlock); n = kevtstat.kes_pcbcount; if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n/8) * item_size; + req->oldidx = (n + n / 8) * item_size; goto done; } if (req->newptr != USER_ADDR_NULL) { error = EPERM; goto done; } - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); xsg.xg_count = n; xsg.xg_gen = kevtstat.kes_gencnt; xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); if (error) { goto done; } @@ -8351,13 +8504,13 @@ kevt_pcblist SYSCTL_HANDLER_ARGS i++, ev_pcb = LIST_NEXT(ev_pcb, evp_link)) { struct xkevtpcb *xk = (struct xkevtpcb *)buf; struct xsocket_n *xso = (struct xsocket_n *) - ADVANCE64(xk, sizeof (*xk)); + ADVANCE64(xk, sizeof(*xk)); struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) - ADVANCE64(xso, sizeof (*xso)); + ADVANCE64(xso, sizeof(*xso)); struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) - ADVANCE64(xsbrcv, sizeof (*xsbrcv)); + ADVANCE64(xsbrcv, sizeof(*xsbrcv)); struct xsockstat_n *xsostats = (struct xsockstat_n *) - ADVANCE64(xsbsnd, sizeof (*xsbsnd)); + ADVANCE64(xsbsnd, sizeof(*xsbsnd)); bzero(buf, item_size); @@ -8372,9 +8525,9 @@ kevt_pcblist SYSCTL_HANDLER_ARGS sotoxsocket_n(ev_pcb->evp_socket, xso); sbtoxsockbuf_n(ev_pcb->evp_socket ? - &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv); + &ev_pcb->evp_socket->so_rcv : NULL, xsbrcv); sbtoxsockbuf_n(ev_pcb->evp_socket ? - &ev_pcb->evp_socket->so_snd : NULL, xsbsnd); + &ev_pcb->evp_socket->so_snd : NULL, xsbsnd); sbtoxsockstat_n(ev_pcb->evp_socket, xsostats); lck_mtx_unlock(&ev_pcb->evp_mtx); @@ -8390,12 +8543,12 @@ kevt_pcblist SYSCTL_HANDLER_ARGS * while we were processing this request, and it * might be necessary to retry. */ - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); xsg.xg_count = n; xsg.xg_gen = kevtstat.kes_gencnt; xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); if (error) { goto done; } @@ -8404,7 +8557,7 @@ kevt_pcblist SYSCTL_HANDLER_ARGS done: lck_rw_done(kev_rwlock); - return (error); + return error; } #endif /* SOCKETS */ @@ -8418,21 +8571,22 @@ fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo) st = &kinfo->kq_stat; st->vst_size = kq->kq_count; - if (kq->kq_state & KQ_KEV_QOS) + if (kq->kq_state & KQ_KEV_QOS) { st->vst_blksize = sizeof(struct kevent_qos_s); - else if (kq->kq_state & KQ_KEV64) + } else if (kq->kq_state & KQ_KEV64) { st->vst_blksize = sizeof(struct kevent64_s); - else + } else { st->vst_blksize = sizeof(struct kevent); + } st->vst_mode = S_IFIFO; st->vst_ino = (kq->kq_state & KQ_DYNAMIC) ? - ((struct kqworkloop *)kq)->kqwl_dynamicid : 0; + ((struct kqworkloop *)kq)->kqwl_dynamicid : 0; /* flags exported to libproc as PROC_KQUEUE_* (sys/proc_info.h) */ #define PROC_KQUEUE_MASK (KQ_SEL|KQ_SLEEP|KQ_KEV32|KQ_KEV64|KQ_KEV_QOS|KQ_WORKQ|KQ_WORKLOOP) kinfo->kq_state = kq->kq_state & PROC_KQUEUE_MASK; - return (0); + return 0; } static int @@ -8462,20 +8616,23 @@ fill_kqueue_dyninfo(struct kqueue *kq, struct kqueue_dyninfo *kqdi) kqdi->kqdi_sync_waiter_qos = 0; trp.trp_value = kqwl->kqwl_params; - if (trp.trp_flags & TRP_PRIORITY) + if (trp.trp_flags & TRP_PRIORITY) { kqdi->kqdi_pri = trp.trp_pri; - else + } else { kqdi->kqdi_pri = 0; + } - if (trp.trp_flags & TRP_POLICY) + if (trp.trp_flags & TRP_POLICY) { kqdi->kqdi_pol = trp.trp_pol; - else + } else { kqdi->kqdi_pol = 0; + } - if (trp.trp_flags & TRP_CPUPERCENT) + if (trp.trp_flags & TRP_CPUPERCENT) { kqdi->kqdi_cpupercent = trp.trp_cpupercent; - else + } else { kqdi->kqdi_cpupercent = 0; + } kq_req_unlock(kqwl); @@ -8534,7 +8691,7 @@ knote_clearstayactive(struct knote *kn) static unsigned long kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo *buf, - unsigned long buflen, unsigned long nknotes) + unsigned long buflen, unsigned long nknotes) { for (; kn; kn = SLIST_NEXT(kn, kn_link)) { if (kq == knote_get_kq(kn)) { @@ -8574,7 +8731,7 @@ kevent_extinfo_emit(struct kqueue *kq, struct knote *kn, struct kevent_extinfo * int kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize, - int32_t *nkqueues_out) + int32_t *nkqueues_out) { proc_t p = (proc_t)proc; struct filedesc *fdp = p->p_fd; @@ -8648,7 +8805,7 @@ out: int kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, - uint32_t ubufsize, int32_t *size_out) + uint32_t ubufsize, int32_t *size_out) { proc_t p = (proc_t)proc; struct kqueue *kq; @@ -8690,7 +8847,7 @@ kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, int kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, - uint32_t ubufsize, int32_t *nknotes_out) + uint32_t ubufsize, int32_t *nknotes_out) { proc_t p = (proc_t)proc; struct kqueue *kq; @@ -8714,7 +8871,7 @@ kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, int pid_kqueue_extinfo(proc_t p, struct kqueue *kq, user_addr_t ubuf, - uint32_t bufsize, int32_t *retval) + uint32_t bufsize, int32_t *retval) { struct knote *kn; int i; @@ -8767,7 +8924,7 @@ out: static unsigned int klist_copy_udata(struct klist *list, uint64_t *buf, - unsigned int buflen, unsigned int nknotes) + unsigned int buflen, unsigned int nknotes) { struct kevent_internal_s *kev; struct knote *kn; @@ -8788,7 +8945,7 @@ klist_copy_udata(struct klist *list, uint64_t *buf, static unsigned int kqlist_copy_dynamicids(__assert_only proc_t p, struct kqlist *list, - uint64_t *buf, unsigned int buflen, unsigned int nids) + uint64_t *buf, unsigned int buflen, unsigned int nids) { kqhash_lock_held(p); struct kqworkloop *kqwl; @@ -8830,7 +8987,7 @@ kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize) if (fdp->fd_kqhashmask != 0) { for (int i = 0; i < (int)fdp->fd_kqhashmask + 1; i++) { nuptrs = kqlist_copy_dynamicids(p, &fdp->fd_kqhash[i], buf, buflen, - nuptrs); + nuptrs); } } kqhash_unlock(p); @@ -8867,10 +9024,10 @@ kevent_set_return_to_kernel_user_tsd(proc_t p, thread_t thread) } if (copyout((proc_is_64bit ? (void *)&ast_flags64 : (void *)&ast_flags32), - (user_addr_t)ast_addr, - user_addr_size) != 0) { + (user_addr_t)ast_addr, + user_addr_size) != 0) { printf("pid %d (tid:%llu): copyout of return_to_kernel ast flags failed with " - "ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr); + "ast_addr = %llu\n", p->p_pid, thread_tid(current_thread()), ast_addr); } } @@ -8924,12 +9081,12 @@ kevent_sysctl SYSCTL_HANDLER_ARGS } SYSCTL_NODE(_kern, OID_AUTO, kevent, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "kevent information"); + "kevent information"); SYSCTL_PROC(_kern_kevent, OID_AUTO, bound_id, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, - (void *)KEVENT_SYSCTL_BOUND_ID, - sizeof(kqueue_id_t), kevent_sysctl, "Q", - "get the ID of the bound kqueue"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, + (void *)KEVENT_SYSCTL_BOUND_ID, + sizeof(kqueue_id_t), kevent_sysctl, "Q", + "get the ID of the bound kqueue"); #endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index 5e145cfac..fea51a172 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -32,7 +32,7 @@ * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ - + /*- * Copyright (c) 1982, 1986, 1991, 1993 * The Regents of the University of California. All rights reserved. @@ -92,7 +92,7 @@ #include #include #include -#include +#include #include #include #include @@ -105,9 +105,9 @@ #include #include #if SYSV_SHM -#include /* shmexec() */ +#include /* shmexec() */ #endif -#include /* ubc_map() */ +#include /* ubc_map() */ #include #include #include @@ -162,6 +162,8 @@ #include #endif +#include + extern boolean_t vm_darkwake_mode; #if CONFIG_DTRACE @@ -180,12 +182,12 @@ static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL; /* support for child creation in exec after vfork */ thread_t fork_create_child(task_t parent_task, - coalition_t *parent_coalition, - proc_t child_proc, - int inherit_memory, - int is_64bit_addr, - int is_64bit_data, - int in_exec); + coalition_t *parent_coalition, + proc_t child_proc, + int inherit_memory, + int is_64bit_addr, + int is_64bit_data, + int in_exec); void vfork_exit(proc_t p, int rv); extern void proc_apply_task_networkbg_internal(proc_t, thread_t); extern void task_set_did_exec_flag(task_t task); @@ -200,15 +202,15 @@ extern void ipc_importance_release(void *elem); /* * Mach things for which prototypes are unavailable from Mach headers */ -void ipc_task_reset( - task_t task); -void ipc_thread_reset( - thread_t thread); +void ipc_task_reset( + task_t task); +void ipc_thread_reset( + thread_t thread); kern_return_t ipc_object_copyin( - ipc_space_t space, - mach_port_name_t name, - mach_msg_type_name_t msgt_name, - ipc_object_t *objectp); + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_object_t *objectp); void ipc_port_release_send(ipc_port_t); #if DEVELOPMENT || DEBUG @@ -237,7 +239,7 @@ __attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL_ * activator in exec_activate_image() before treating * it as malformed/corrupt. */ -#define EAI_ITERLIMIT 3 +#define EAI_ITERLIMIT 3 /* * For #! interpreter parsing @@ -247,22 +249,23 @@ __attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL_ extern vm_map_t bsd_pageable_map; extern const struct fileops vnops; +extern int nextpidversion; -#define USER_ADDR_ALIGN(addr, val) \ +#define USER_ADDR_ALIGN(addr, val) \ ( ( (user_addr_t)(addr) + (val) - 1) \ - & ~((val) - 1) ) + & ~((val) - 1) ) - /* Platform Code Exec Logging */ +/* Platform Code Exec Logging */ static int platform_exec_logging = 0; SYSCTL_DECL(_security_mac); SYSCTL_INT(_security_mac, OID_AUTO, platform_exec_logging, CTLFLAG_RW, &platform_exec_logging, 0, - "log cdhashes for all platform binary executions"); + "log cdhashes for all platform binary executions"); static os_log_t peLog = OS_LOG_DEFAULT; -struct image_params; /* Forward */ +struct image_params; /* Forward */ static int exec_activate_image(struct image_params *imgp); static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp); static int load_return_to_errno(load_return_t lrtn); @@ -273,7 +276,7 @@ static int exec_extract_strings(struct image_params *imgp); static int exec_add_apple_strings(struct image_params *imgp, const load_result_t *load_result); static int exec_handle_sugid(struct image_params *imgp); static int sugid_scripts = 0; -SYSCTL_INT (_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW | CTLFLAG_LOCKED, &sugid_scripts, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW | CTLFLAG_LOCKED, &sugid_scripts, 0, ""); static kern_return_t create_unix_stack(vm_map_t map, load_result_t* load_result, proc_t p); static int copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size); static void exec_resettextvp(proc_t, struct image_params *); @@ -281,7 +284,7 @@ static int check_for_signature(proc_t, struct image_params *); static void exec_prefault_data(proc_t, struct image_params *, load_result_t *); static errno_t exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_present, ipc_port_t * portwatch_ports); static errno_t exec_handle_spawnattr_policy(proc_t p, int psa_apptype, uint64_t psa_qos_clamp, uint64_t psa_darwin_role, - ipc_port_t * portwatch_ports, int portwatch_count); + ipc_port_t * portwatch_ports, int portwatch_count); /* * exec_add_user_string @@ -305,35 +308,36 @@ static int exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolean_t is_ncargs) { int error = 0; - + do { size_t len = 0; int space; - - if (is_ncargs) + + if (is_ncargs) { space = imgp->ip_argspace; /* by definition smaller than ip_strspace */ - else + } else { space = imgp->ip_strspace; - + } + if (space <= 0) { error = E2BIG; break; } - + if (!UIO_SEG_IS_USER_SPACE(seg)) { - char *kstr = CAST_DOWN(char *,str); /* SAFE */ + char *kstr = CAST_DOWN(char *, str); /* SAFE */ error = copystr(kstr, imgp->ip_strendp, space, &len); - } else { + } else { error = copyinstr(str, imgp->ip_strendp, space, &len); } imgp->ip_strendp += len; imgp->ip_strspace -= len; - if (is_ncargs) + if (is_ncargs) { imgp->ip_argspace -= len; - + } } while (error == ENAMETOOLONG); - + return error; } @@ -341,7 +345,7 @@ exec_add_user_string(struct image_params *imgp, user_addr_t str, int seg, boolea * dyld is now passed the executable path as a getenv-like variable * in the same fashion as the stack_guard and malloc_entropy keys. */ -#define EXECUTABLE_KEY "executable_path=" +#define EXECUTABLE_KEY "executable_path=" /* * exec_save_path @@ -390,13 +394,13 @@ exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char len = MIN(MAXPATHLEN, imgp->ip_strspace); - switch(seg) { + switch (seg) { case UIO_USERSPACE32: - case UIO_USERSPACE64: /* Same for copyin()... */ + case UIO_USERSPACE64: /* Same for copyin()... */ error = copyinstr(path, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len); break; case UIO_SYSSPACE: - kpath = CAST_DOWN(char *,path); /* SAFE */ + kpath = CAST_DOWN(char *, path); /* SAFE */ error = copystr(kpath, imgp->ip_strings + strlen(EXECUTABLE_KEY), len, &len); break; default: @@ -416,7 +420,7 @@ exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char } } - return(error); + return error; } /* @@ -424,7 +428,7 @@ exec_save_path(struct image_params *imgp, user_addr_t path, int seg, const char * * If we detect a shell script, we need to reset the string area * state so that the interpreter can be saved onto the stack. - + * * Parameters; struct image_params * image parameter block * * Returns: int 0 Success @@ -441,9 +445,9 @@ exec_reset_save_path(struct image_params *imgp) { imgp->ip_strendp = imgp->ip_strings; imgp->ip_argspace = NCARGS; - imgp->ip_strspace = ( NCARGS + PAGE_SIZE ); + imgp->ip_strspace = (NCARGS + PAGE_SIZE); - return (0); + return 0; } /* @@ -482,12 +486,12 @@ exec_shell_imgact(struct image_params *imgp) if (vdata[0] != '#' || vdata[1] != '!' || (imgp->ip_flags & IMGPF_INTERPRET) != 0) { - return (-1); + return -1; } if (imgp->ip_origcputype != 0) { /* Fat header previously matched, don't allow shell script inside */ - return (-1); + return -1; } imgp->ip_flags |= IMGPF_INTERPRET; @@ -503,10 +507,10 @@ exec_shell_imgact(struct image_params *imgp) } /* Try to find the first non-whitespace character */ - for( ihp = &vdata[2]; ihp < &vdata[IMG_SHSIZE]; ihp++ ) { + for (ihp = &vdata[2]; ihp < &vdata[IMG_SHSIZE]; ihp++) { if (IS_EOL(*ihp)) { /* Did not find interpreter, "#!\n" */ - return (ENOEXEC); + return ENOEXEC; } else if (IS_WHITESPACE(*ihp)) { /* Whitespace, like "#! /bin/sh\n", keep going. */ } else { @@ -517,13 +521,13 @@ exec_shell_imgact(struct image_params *imgp) if (ihp == &vdata[IMG_SHSIZE]) { /* All whitespace, like "#! " */ - return (ENOEXEC); + return ENOEXEC; } line_startp = ihp; /* Try to find the end of the interpreter+args string */ - for ( ; ihp < &vdata[IMG_SHSIZE]; ihp++ ) { + for (; ihp < &vdata[IMG_SHSIZE]; ihp++) { if (IS_EOL(*ihp)) { /* Got it */ break; @@ -534,7 +538,7 @@ exec_shell_imgact(struct image_params *imgp) if (ihp == &vdata[IMG_SHSIZE]) { /* A long line, like "#! blah blah blah" without end */ - return (ENOEXEC); + return ENOEXEC; } /* Backtrack until we find the last non-whitespace */ @@ -554,18 +558,20 @@ exec_shell_imgact(struct image_params *imgp) /* copy the interpreter name */ interp = imgp->ip_interp_buffer; - for ( ihp = line_startp; (ihp < line_endp) && !IS_WHITESPACE(*ihp); ihp++) + for (ihp = line_startp; (ihp < line_endp) && !IS_WHITESPACE(*ihp); ihp++) { *interp++ = *ihp; + } *interp = '\0'; exec_reset_save_path(imgp); exec_save_path(imgp, CAST_USER_ADDR_T(imgp->ip_interp_buffer), - UIO_SYSSPACE, NULL); + UIO_SYSSPACE, NULL); /* Copy the entire interpreter + args for later processing into argv[] */ interp = imgp->ip_interp_buffer; - for ( ihp = line_startp; (ihp < line_endp); ihp++) + for (ihp = line_startp; (ihp < line_endp); ihp++) { *interp++ = *ihp; + } *interp = '\0'; #if !SECURE_KERNEL @@ -582,13 +588,14 @@ exec_shell_imgact(struct image_params *imgp) p = vfs_context_proc(imgp->ip_vfs_context); error = falloc(p, &fp, &fd, imgp->ip_vfs_context); - if (error) - return(error); + if (error) { + return error; + } fp->f_fglob->fg_flag = FREAD; fp->f_fglob->fg_ops = &vnops; fp->f_fglob->fg_data = (caddr_t)imgp->ip_vp; - + proc_fdlock(p); procfdtbl_releasefd(p, fd, NULL); fp_drop(p, fd, fp, 1); @@ -599,7 +606,7 @@ exec_shell_imgact(struct image_params *imgp) } #endif - return (-3); + return -3; } @@ -625,7 +632,7 @@ exec_shell_imgact(struct image_params *imgp) * activators should not be given the opportunity to attempt * to activate the image. * - * If we find an encapsulated binary, we make no assertions + * If we find an encapsulated binary, we make no assertions * about its validity; instead, we leave that up to a rescan * for an activator to claim it, and, if it is claimed by one, * that activator is responsible for determining validity. @@ -680,9 +687,9 @@ exec_fat_imgact(struct image_params *imgp) } lret = fatfile_getbestarch_for_cputype(pref, - (vm_offset_t)fat_header, - PAGE_SIZE, - &fat_arch); + (vm_offset_t)fat_header, + PAGE_SIZE, + &fat_arch); if (lret == LOAD_SUCCESS) { goto use_arch; } @@ -696,8 +703,8 @@ exec_fat_imgact(struct image_params *imgp) regular_grading: /* Look up our preferred architecture in the fat file. */ lret = fatfile_getbestarch((vm_offset_t)fat_header, - PAGE_SIZE, - &fat_arch); + PAGE_SIZE, + &fat_arch); if (lret != LOAD_SUCCESS) { error = load_return_to_errno(lret); goto bad; @@ -706,9 +713,9 @@ regular_grading: use_arch: /* Read the Mach-O header out of fat_arch */ error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, - PAGE_SIZE, fat_arch.offset, - UIO_SYSSPACE, (IO_UNIT|IO_NODELOCKED), - cred, &resid, p); + PAGE_SIZE, fat_arch.offset, + UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED), + cred, &resid, p); if (error) { goto bad; } @@ -726,7 +733,7 @@ use_arch: bad: kauth_cred_unref(&cred); - return (error); + return error; } static int @@ -783,7 +790,7 @@ set_proc_name(struct image_params *imgp, proc_t p) } bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_name, - (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen); + (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen); p->p_name[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0'; if (imgp->ip_ndp->ni_cnd.cn_namelen > MAXCOMLEN) { @@ -791,19 +798,10 @@ set_proc_name(struct image_params *imgp, proc_t p) } bcopy((caddr_t)imgp->ip_ndp->ni_cnd.cn_nameptr, (caddr_t)p->p_comm, - (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen); + (unsigned)imgp->ip_ndp->ni_cnd.cn_namelen); p->p_comm[imgp->ip_ndp->ni_cnd.cn_namelen] = '\0'; } -static uint64_t get_va_fsid(struct vnode_attr *vap) -{ - if (VATTR_IS_SUPPORTED(vap, va_fsid64)) { - return *(uint64_t *)&vap->va_fsid64; - } else { - return vap->va_fsid; - } -} - /* * exec_mach_imgact * @@ -831,28 +829,28 @@ static int exec_mach_imgact(struct image_params *imgp) { struct mach_header *mach_header = (struct mach_header *)imgp->ip_vdata; - proc_t p = vfs_context_proc(imgp->ip_vfs_context); - int error = 0; - task_t task; - task_t new_task = NULL; /* protected by vfexec */ - thread_t thread; - struct uthread *uthread; + proc_t p = vfs_context_proc(imgp->ip_vfs_context); + int error = 0; + task_t task; + task_t new_task = NULL; /* protected by vfexec */ + thread_t thread; + struct uthread *uthread; vm_map_t old_map = VM_MAP_NULL; vm_map_t map = VM_MAP_NULL; - load_return_t lret; - load_result_t load_result = {}; + load_return_t lret; + load_result_t load_result = {}; struct _posix_spawnattr *psa = NULL; - int spawn = (imgp->ip_flags & IMGPF_SPAWN); - int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC); - int exec = (imgp->ip_flags & IMGPF_EXEC); - os_reason_t exec_failure_reason = OS_REASON_NULL; + int spawn = (imgp->ip_flags & IMGPF_SPAWN); + int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC); + int exec = (imgp->ip_flags & IMGPF_EXEC); + os_reason_t exec_failure_reason = OS_REASON_NULL; /* * make sure it's a Mach-O 1.0 or Mach-O 2.0 binary; the difference * is a reserved field on the end, so for the most part, we can * treat them as if they were identical. Reverse-endian Mach-O * binaries are recognized but not compatible. - */ + */ if ((mach_header->magic == MH_CIGAM) || (mach_header->magic == MH_CIGAM_64)) { error = EBADARCH; @@ -873,7 +871,7 @@ exec_mach_imgact(struct image_params *imgp) if (imgp->ip_origcputype != 0) { /* Fat header previously had an idea about this thin file */ if (imgp->ip_origcputype != mach_header->cputype || - imgp->ip_origcpusubtype != mach_header->cpusubtype) { + imgp->ip_origcpusubtype != mach_header->cpusubtype) { error = EBADARCH; goto bad; } @@ -922,13 +920,13 @@ grade: } - /* Copy in arguments/environment from the old process */ error = exec_extract_strings(imgp); - if (error) + if (error) { goto bad; + } - AUDIT_ARG(argv, imgp->ip_startargv, imgp->ip_argc, + AUDIT_ARG(argv, imgp->ip_startargv, imgp->ip_argc, imgp->ip_endargv - imgp->ip_startargv); AUDIT_ARG(envv, imgp->ip_endargv, imgp->ip_envc, imgp->ip_endenvv - imgp->ip_endargv); @@ -942,12 +940,12 @@ grade: */ if (vfexec) { imgp->ip_new_thread = fork_create_child(task, - NULL, - p, - FALSE, - (imgp->ip_flags & IMGPF_IS_64BIT_ADDR), - (imgp->ip_flags & IMGPF_IS_64BIT_DATA), - FALSE); + NULL, + p, + FALSE, + (imgp->ip_flags & IMGPF_IS_64BIT_ADDR), + (imgp->ip_flags & IMGPF_IS_64BIT_DATA), + FALSE); /* task and thread ref returned, will be released in __mac_execve */ if (imgp->ip_new_thread == NULL) { error = ENOMEM; @@ -981,7 +979,7 @@ grade: error = load_return_to_errno(lret); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_BAD_MACHO, 0, 0); if (lret == LOAD_BADMACHO_UPX) { /* set anything that might be useful in the crash report */ set_proc_name(imgp, p); @@ -1010,34 +1008,40 @@ grade: vm_map_set_user_wire_limit(map, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur); - /* + /* * Set code-signing flags if this binary is signed, or if parent has * requested them on exec. */ if (load_result.csflags & CS_VALID) { - imgp->ip_csflags |= load_result.csflags & - (CS_VALID|CS_SIGNED|CS_DEV_CODE| - CS_HARD|CS_KILL|CS_RESTRICT|CS_ENFORCEMENT|CS_REQUIRE_LV| - CS_FORCED_LV|CS_ENTITLEMENTS_VALIDATED|CS_DYLD_PLATFORM|CS_RUNTIME| - CS_ENTITLEMENT_FLAGS| - CS_EXEC_SET_HARD|CS_EXEC_SET_KILL|CS_EXEC_SET_ENFORCEMENT); + imgp->ip_csflags |= load_result.csflags & + (CS_VALID | CS_SIGNED | CS_DEV_CODE | + CS_HARD | CS_KILL | CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV | + CS_FORCED_LV | CS_ENTITLEMENTS_VALIDATED | CS_DYLD_PLATFORM | CS_RUNTIME | + CS_ENTITLEMENT_FLAGS | + CS_EXEC_SET_HARD | CS_EXEC_SET_KILL | CS_EXEC_SET_ENFORCEMENT); } else { imgp->ip_csflags &= ~CS_VALID; } - if (p->p_csflags & CS_EXEC_SET_HARD) + if (p->p_csflags & CS_EXEC_SET_HARD) { imgp->ip_csflags |= CS_HARD; - if (p->p_csflags & CS_EXEC_SET_KILL) + } + if (p->p_csflags & CS_EXEC_SET_KILL) { imgp->ip_csflags |= CS_KILL; - if (p->p_csflags & CS_EXEC_SET_ENFORCEMENT) + } + if (p->p_csflags & CS_EXEC_SET_ENFORCEMENT) { imgp->ip_csflags |= CS_ENFORCEMENT; + } if (p->p_csflags & CS_EXEC_INHERIT_SIP) { - if (p->p_csflags & CS_INSTALLER) + if (p->p_csflags & CS_INSTALLER) { imgp->ip_csflags |= CS_INSTALLER; - if (p->p_csflags & CS_DATAVAULT_CONTROLLER) + } + if (p->p_csflags & CS_DATAVAULT_CONTROLLER) { imgp->ip_csflags |= CS_DATAVAULT_CONTROLLER; - if (p->p_csflags & CS_NVRAM_UNRESTRICTED) + } + if (p->p_csflags & CS_NVRAM_UNRESTRICTED) { imgp->ip_csflags |= CS_NVRAM_UNRESTRICTED; + } } /* @@ -1060,7 +1064,7 @@ grade: vm_map_deallocate(map); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE, 0, 0); exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SUGID_FAILURE); goto badtoolate; } @@ -1083,9 +1087,8 @@ grade: lret = activate_exec_state(task, p, thread, &load_result); if (lret != KERN_SUCCESS) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE, 0, 0); exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_ACTV_THREADSTATE); goto badtoolate; } @@ -1093,30 +1096,31 @@ grade: /* * deal with voucher on exec-calling thread. */ - if (imgp->ip_new_thread == NULL) + if (imgp->ip_new_thread == NULL) { thread_set_mach_voucher(current_thread(), IPC_VOUCHER_NULL); + } /* Make sure we won't interrupt ourself signalling a partial process */ - if (!vfexec && !spawn && (p->p_lflag & P_LTRACED)) + if (!vfexec && !spawn && (p->p_lflag & P_LTRACED)) { psignal(p, SIGTRAP); + } if (load_result.unixproc && - create_unix_stack(get_task_map(task), - &load_result, - p) != KERN_SUCCESS) { + create_unix_stack(get_task_map(task), + &load_result, + p) != KERN_SUCCESS) { error = load_return_to_errno(LOAD_NOSPACE); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC, 0, 0); exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_STACK_ALLOC); goto badtoolate; } error = exec_add_apple_strings(imgp, &load_result); if (error) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT, 0, 0); exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_APPLE_STRING_INIT); goto badtoolate; } @@ -1125,7 +1129,7 @@ grade: old_map = vm_map_switch(get_task_map(task)); if (load_result.unixproc) { - user_addr_t ap; + user_addr_t ap; /* * Copy the strings area out into the new process address @@ -1137,7 +1141,7 @@ grade: vm_map_switch(old_map); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS, 0, 0); exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_STRINGS); goto badtoolate; } @@ -1146,8 +1150,8 @@ grade: } if (load_result.dynlinker) { - uint64_t ap; - int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; + uint64_t ap; + int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; /* Adjust the stack */ ap = thread_adjuserstack(thread, -new_ptr_size); @@ -1157,7 +1161,7 @@ grade: vm_map_switch(old_map); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER, 0, 0); exec_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_COPYOUT_DYNLINKER); goto badtoolate; } @@ -1186,8 +1190,9 @@ grade: #if SYSV_SHM /* FIXME: Till vmspace inherit is fixed: */ - if (!vfexec && p->vm_shm) + if (!vfexec && p->vm_shm) { shmexec(p); + } #endif #if SYSV_SEM /* Clean up the semaphores */ @@ -1205,15 +1210,15 @@ grade: if (secluded_for_apps && load_result.platform_binary) { if (strncmp(p->p_name, - "Camera", - sizeof (p->p_name)) == 0) { + "Camera", + sizeof(p->p_name)) == 0) { task_set_could_use_secluded_mem(task, TRUE); } else { task_set_could_use_secluded_mem(task, FALSE); } if (strncmp(p->p_name, - "mediaserverd", - sizeof (p->p_name)) == 0) { + "mediaserverd", + sizeof(p->p_name)) == 0) { task_set_could_also_use_secluded_mem(task, TRUE); } } @@ -1221,6 +1226,10 @@ grade: #if __arm64__ if (load_result.legacy_footprint) { +#if DEVELOPMENT || DEBUG + printf("%s: %d[%s] legacy footprint (mach-o)\n", + __FUNCTION__, p->p_pid, p->p_name); +#endif /* DEVELOPMENT || DEBUG */ task_set_legacy_footprint(task, TRUE); } #endif /* __arm64__ */ @@ -1269,7 +1278,7 @@ grade: uintptr_t fsid = 0, fileid = 0; if (imgp->ip_vattr) { - uint64_t fsid64 = get_va_fsid(imgp->ip_vattr); + uint64_t fsid64 = vnode_get_va_fsid(imgp->ip_vattr); fsid = fsid64; fileid = imgp->ip_vattr->va_fileid; // check for (unexpected) overflow and trace zero in that case @@ -1278,14 +1287,14 @@ grade: } } KERNEL_DEBUG_CONSTANT_IST1(TRACE_DATA_EXEC, p->p_pid, fsid, fileid, 0, - (uintptr_t)thread_tid(thread)); + (uintptr_t)thread_tid(thread)); /* * Collect the pathname for tracing */ kdbg_trace_string(p, &args[0], &args[1], &args[2], &args[3]); KERNEL_DEBUG_CONSTANT_IST1(TRACE_STRING_EXEC, args[0], args[1], - args[2], args[3], (uintptr_t)thread_tid(thread)); + args[2], args[3], (uintptr_t)thread_tid(thread)); } /* @@ -1349,7 +1358,7 @@ badtoolate: os_reason_free(exec_failure_reason); exec_failure_reason = OS_REASON_NULL; } - + done: if (load_result.threadstate) { kfree(load_result.threadstate, load_result.threadstate_sz); @@ -1359,7 +1368,7 @@ done: bad: /* If we hit this, we likely would have leaked an exit reason */ assert(exec_failure_reason == OS_REASON_NULL); - return(error); + return error; } @@ -1376,9 +1385,9 @@ struct execsw { int (*ex_imgact)(struct image_params *); const char *ex_name; } execsw[] = { - { exec_mach_imgact, "Mach-o Binary" }, - { exec_fat_imgact, "Fat Binary" }, - { exec_shell_imgact, "Interpreter Script" }, + { exec_mach_imgact, "Mach-o Binary" }, + { exec_fat_imgact, "Fat Binary" }, + { exec_shell_imgact, "Interpreter Script" }, { NULL, NULL} }; @@ -1417,15 +1426,16 @@ exec_activate_image(struct image_params *imgp) const char *excpath; int error; int resid; - int once = 1; /* save SGUID-ness for interpreted files */ + int once = 1; /* save SGUID-ness for interpreted files */ int i; int itercount = 0; proc_t p = vfs_context_proc(imgp->ip_vfs_context); error = execargs_alloc(imgp); - if (error) + if (error) { goto bad_notrans; - + } + error = exec_save_path(imgp, imgp->ip_user_fname, imgp->ip_seg, &excpath); if (error) { goto bad_notrans; @@ -1441,14 +1451,15 @@ exec_activate_image(struct image_params *imgp) } NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, - UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context); + UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context); again: error = namei(ndp); - if (error) + if (error) { goto bad_notrans; - imgp->ip_ndp = ndp; /* successful namei(); call nameidone() later */ - imgp->ip_vp = ndp->ni_vp; /* if set, need to vnode_put() at some point */ + } + imgp->ip_ndp = ndp; /* successful namei(); call nameidone() later */ + imgp->ip_vp = ndp->ni_vp; /* if set, need to vnode_put() at some point */ /* * Before we start the transition from binary A to binary B, make @@ -1465,12 +1476,14 @@ again: } error = proc_transstart(p, 1, 0); proc_unlock(p); - if (error) + if (error) { goto bad_notrans; + } error = exec_check_permissions(imgp); - if (error) + if (error) { goto bad; + } /* Copy; avoid invocation of an interpreter overwriting the original */ if (once) { @@ -1479,11 +1492,12 @@ again: } error = vn_rdwr(UIO_READ, imgp->ip_vp, imgp->ip_vdata, PAGE_SIZE, 0, - UIO_SYSSPACE, IO_NODELOCKED, - vfs_context_ucred(imgp->ip_vfs_context), - &resid, vfs_context_proc(imgp->ip_vfs_context)); - if (error) + UIO_SYSSPACE, IO_NODELOCKED, + vfs_context_ucred(imgp->ip_vfs_context), + &resid, vfs_context_proc(imgp->ip_vfs_context)); + if (error) { goto bad; + } if (resid) { memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid); @@ -1496,50 +1510,52 @@ encapsulated_binary: goto bad; } error = -1; - for(i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) { - + for (i = 0; error == -1 && execsw[i].ex_imgact != NULL; i++) { error = (*execsw[i].ex_imgact)(imgp); switch (error) { /* case -1: not claimed: continue */ - case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */ + case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */ goto encapsulated_binary; - case -3: /* Interpreter */ + case -3: /* Interpreter */ #if CONFIG_MACF /* * Copy the script label for later use. Note that * the label can be different when the script is * actually read by the interpreter. */ - if (imgp->ip_scriptlabelp) + if (imgp->ip_scriptlabelp) { mac_vnode_label_free(imgp->ip_scriptlabelp); + } imgp->ip_scriptlabelp = mac_vnode_label_alloc(); if (imgp->ip_scriptlabelp == NULL) { error = ENOMEM; break; } mac_vnode_label_copy(imgp->ip_vp->v_label, - imgp->ip_scriptlabelp); + imgp->ip_scriptlabelp); /* * Take a ref of the script vnode for later use. */ - if (imgp->ip_scriptvp) + if (imgp->ip_scriptvp) { vnode_put(imgp->ip_scriptvp); - if (vnode_getwithref(imgp->ip_vp) == 0) + } + if (vnode_getwithref(imgp->ip_vp) == 0) { imgp->ip_scriptvp = imgp->ip_vp; + } #endif nameidone(ndp); vnode_put(imgp->ip_vp); - imgp->ip_vp = NULL; /* already put */ + imgp->ip_vp = NULL; /* already put */ imgp->ip_ndp = NULL; /* already nameidone */ /* Use excpath, which exec_shell_imgact reset to the interpreter */ NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF, - UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context); + UIO_SYSSPACE, CAST_USER_ADDR_T(excpath), imgp->ip_vfs_context); proc_transend(p, 0); goto again; @@ -1560,22 +1576,25 @@ encapsulated_binary: */ if (kauth_authorize_fileop_has_listeners()) { kauth_authorize_fileop(vfs_context_ucred(imgp->ip_vfs_context), - KAUTH_FILEOP_EXEC, - (uintptr_t)ndp->ni_vp, 0); + KAUTH_FILEOP_EXEC, + (uintptr_t)ndp->ni_vp, 0); } } bad: proc_transend(p, 0); bad_notrans: - if (imgp->ip_strings) + if (imgp->ip_strings) { execargs_free(imgp); - if (imgp->ip_ndp) + } + if (imgp->ip_ndp) { nameidone(imgp->ip_ndp); - if (ndp) + } + if (ndp) { FREE(ndp, M_TEMP); + } - return (error); + return error; } @@ -1591,7 +1610,7 @@ bad_notrans: */ static errno_t exec_handle_spawnattr_policy(proc_t p, int psa_apptype, uint64_t psa_qos_clamp, uint64_t psa_darwin_role, - ipc_port_t * portwatch_ports, int portwatch_count) + ipc_port_t * portwatch_ports, int portwatch_count) { int apptype = TASK_APPTYPE_NONE; int qos_clamp = THREAD_QOS_UNSPECIFIED; @@ -1600,49 +1619,49 @@ exec_handle_spawnattr_policy(proc_t p, int psa_apptype, uint64_t psa_qos_clamp, if ((psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK) != 0) { int proctype = psa_apptype & POSIX_SPAWN_PROC_TYPE_MASK; - switch(proctype) { - case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE: - apptype = TASK_APPTYPE_DAEMON_INTERACTIVE; - break; - case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD: - apptype = TASK_APPTYPE_DAEMON_STANDARD; - break; - case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE: - apptype = TASK_APPTYPE_DAEMON_ADAPTIVE; - break; - case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND: - apptype = TASK_APPTYPE_DAEMON_BACKGROUND; - break; - case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT: - apptype = TASK_APPTYPE_APP_DEFAULT; - break; + switch (proctype) { + case POSIX_SPAWN_PROC_TYPE_DAEMON_INTERACTIVE: + apptype = TASK_APPTYPE_DAEMON_INTERACTIVE; + break; + case POSIX_SPAWN_PROC_TYPE_DAEMON_STANDARD: + apptype = TASK_APPTYPE_DAEMON_STANDARD; + break; + case POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE: + apptype = TASK_APPTYPE_DAEMON_ADAPTIVE; + break; + case POSIX_SPAWN_PROC_TYPE_DAEMON_BACKGROUND: + apptype = TASK_APPTYPE_DAEMON_BACKGROUND; + break; + case POSIX_SPAWN_PROC_TYPE_APP_DEFAULT: + apptype = TASK_APPTYPE_APP_DEFAULT; + break; #if !CONFIG_EMBEDDED - case POSIX_SPAWN_PROC_TYPE_APP_TAL: - apptype = TASK_APPTYPE_APP_TAL; - break; + case POSIX_SPAWN_PROC_TYPE_APP_TAL: + apptype = TASK_APPTYPE_APP_TAL; + break; #endif /* !CONFIG_EMBEDDED */ - default: - apptype = TASK_APPTYPE_NONE; - /* TODO: Should an invalid value here fail the spawn? */ - break; + default: + apptype = TASK_APPTYPE_NONE; + /* TODO: Should an invalid value here fail the spawn? */ + break; } } if (psa_qos_clamp != POSIX_SPAWN_PROC_CLAMP_NONE) { switch (psa_qos_clamp) { - case POSIX_SPAWN_PROC_CLAMP_UTILITY: - qos_clamp = THREAD_QOS_UTILITY; - break; - case POSIX_SPAWN_PROC_CLAMP_BACKGROUND: - qos_clamp = THREAD_QOS_BACKGROUND; - break; - case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE: - qos_clamp = THREAD_QOS_MAINTENANCE; - break; - default: - qos_clamp = THREAD_QOS_UNSPECIFIED; - /* TODO: Should an invalid value here fail the spawn? */ - break; + case POSIX_SPAWN_PROC_CLAMP_UTILITY: + qos_clamp = THREAD_QOS_UTILITY; + break; + case POSIX_SPAWN_PROC_CLAMP_BACKGROUND: + qos_clamp = THREAD_QOS_BACKGROUND; + break; + case POSIX_SPAWN_PROC_CLAMP_MAINTENANCE: + qos_clamp = THREAD_QOS_MAINTENANCE; + break; + default: + qos_clamp = THREAD_QOS_UNSPECIFIED; + /* TODO: Should an invalid value here fail the spawn? */ + break; } } @@ -1650,33 +1669,33 @@ exec_handle_spawnattr_policy(proc_t p, int psa_apptype, uint64_t psa_qos_clamp, proc_darwin_role_to_task_role(psa_darwin_role, &role); } - if (apptype != TASK_APPTYPE_NONE || + if (apptype != TASK_APPTYPE_NONE || qos_clamp != THREAD_QOS_UNSPECIFIED || - role != TASK_UNSPECIFIED) { + role != TASK_UNSPECIFIED) { proc_set_task_spawnpolicy(p->task, apptype, qos_clamp, role, - portwatch_ports, portwatch_count); + portwatch_ports, portwatch_count); } - return (0); + return 0; } /* * exec_handle_port_actions * - * Description: Go through the _posix_port_actions_t contents, - * calling task_set_special_port, task_set_exception_ports - * and/or audit_session_spawnjoin for the current task. + * Description: Go through the _posix_port_actions_t contents, + * calling task_set_special_port, task_set_exception_ports + * and/or audit_session_spawnjoin for the current task. * * Parameters: struct image_params * Image parameter block * * Returns: 0 Success - * EINVAL Failure - * ENOTSUP Illegal posix_spawn attr flag was set + * EINVAL Failure + * ENOTSUP Illegal posix_spawn attr flag was set */ static errno_t exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_present, - ipc_port_t * portwatch_ports) + ipc_port_t * portwatch_ports) { _posix_spawn_port_actions_t pacts = imgp->ip_px_spa; #if CONFIG_AUDIT @@ -1696,8 +1715,8 @@ exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_presen if (MACH_PORT_VALID(act->new_port)) { kr = ipc_object_copyin(get_task_ipcspace(current_task()), - act->new_port, MACH_MSG_TYPE_COPY_SEND, - (ipc_object_t *) &port); + act->new_port, MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *) &port); if (kr != KERN_SUCCESS) { ret = EINVAL; @@ -1712,15 +1731,17 @@ exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_presen case PSPA_SPECIAL: kr = task_set_special_port(task, act->which, port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { ret = EINVAL; + } break; case PSPA_EXCEPTION: kr = task_set_exception_ports(task, act->mask, port, - act->behavior, act->flavor); - if (kr != KERN_SUCCESS) + act->behavior, act->flavor); + if (kr != KERN_SUCCESS) { ret = EINVAL; + } break; #if CONFIG_AUDIT case PSPA_AU_SESSION: @@ -1755,9 +1776,10 @@ exec_handle_port_actions(struct image_params *imgp, boolean_t * portwatch_presen } done: - if (0 != ret) + if (0 != ret) { DTRACE_PROC1(spawn__port__failure, mach_port_name_t, act->new_port); - return (ret); + } + return ret; } /* @@ -1785,12 +1807,12 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) int action; proc_t p = vfs_context_proc(imgp->ip_vfs_context); _posix_spawn_file_actions_t px_sfap = imgp->ip_px_sfa; - int ival[2]; /* dummy retval for system calls) */ + int ival[2]; /* dummy retval for system calls) */ for (action = 0; action < px_sfap->psfa_act_count; action++) { - _psfa_action_t *psfa = &px_sfap->psfa_act_acts[ action]; + _psfa_action_t *psfa = &px_sfap->psfa_act_acts[action]; - switch(psfa->psfaa_type) { + switch (psfa->psfaa_type) { case PSFA_OPEN: { /* * Open is different, in that it requires the use of @@ -1819,19 +1841,19 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) VATTR_INIT(vap); /* Mask off all but regular access permissions */ - mode = ((mode &~ p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT; + mode = ((mode & ~p->p_fd->fd_cmask) & ALLPERMS) & ~S_ISTXT; VATTR_SET(vap, va_mode, mode & ACCESSPERMS); NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_SYSSPACE, - CAST_USER_ADDR_T(psfa->psfaa_openargs.psfao_path), - imgp->ip_vfs_context); + CAST_USER_ADDR_T(psfa->psfaa_openargs.psfao_path), + imgp->ip_vfs_context); - error = open1(imgp->ip_vfs_context, - ndp, - psfa->psfaa_openargs.psfao_oflag, - vap, - fileproc_alloc_init, NULL, - ival); + error = open1(imgp->ip_vfs_context, + ndp, + psfa->psfaa_openargs.psfao_oflag, + vap, + fileproc_alloc_init, NULL, + ival); FREE(bufp, M_TEMP); @@ -1841,8 +1863,9 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) * reworking all the open code to preallocate fd * slots, and internally taking one as an argument. */ - if (error || ival[0] == psfa->psfaa_filedes) + if (error || ival[0] == psfa->psfaa_filedes) { break; + } origfd = ival[0]; /* @@ -1860,8 +1883,9 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) * fd we wanted, the error will stop us. */ error = dup2(p, &dup2a, ival); - if (error) + if (error) { break; + } /* * Finally, close the original fd. @@ -1869,8 +1893,8 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) ca.fd = origfd; error = close_nocancel(p, &ca, ival); - } - break; + } + break; case PSFA_DUP2: { struct dup2_args dup2a; @@ -1885,8 +1909,8 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) * fd we wanted, the error will stop us. */ error = dup2(p, &dup2a, ival); - } - break; + } + break; case PSFA_CLOSE: { struct close_nocancel_args ca; @@ -1894,8 +1918,8 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) ca.fd = psfa->psfaa_filedes; error = close_nocancel(p, &ca, ival); - } - break; + } + break; case PSFA_INHERIT: { struct fcntl_nocancel_args fcntla; @@ -1909,8 +1933,9 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) */ fcntla.fd = psfa->psfaa_filedes; fcntla.cmd = F_GETFD; - if ((error = fcntl_nocancel(p, &fcntla, ival)) != 0) + if ((error = fcntl_nocancel(p, &fcntla, ival)) != 0) { break; + } if ((ival[0] & FD_CLOEXEC) == FD_CLOEXEC) { fcntla.fd = psfa->psfaa_filedes; @@ -1918,9 +1943,8 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) fcntla.arg = ival[0] & ~FD_CLOEXEC; error = fcntl_nocancel(p, &fcntla, ival); } - - } - break; + } + break; default: error = EINVAL; @@ -1932,7 +1956,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) if (error) { if (PSFA_OPEN == psfa->psfaa_type) { DTRACE_PROC1(spawn__open__failure, uintptr_t, - psfa->psfaa_openargs.psfao_path); + psfa->psfaa_openargs.psfao_path); } else { DTRACE_PROC1(spawn__fd__failure, int, psfa->psfaa_filedes); } @@ -1940,8 +1964,9 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) } } - if (error != 0 || (psa_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) == 0) - return (error); + if (error != 0 || (psa_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) == 0) { + return error; + } /* * If POSIX_SPAWN_CLOEXEC_DEFAULT is set, behave (during @@ -1960,7 +1985,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) switch (psfa->psfaa_type) { case PSFA_DUP2: fd = psfa->psfaa_openargs.psfao_oflag; - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case PSFA_OPEN: case PSFA_INHERIT: *fdflags(p, fd) |= UF_INHERIT; @@ -1972,7 +1997,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags) } proc_fdunlock(p); - return (0); + return 0; } #if CONFIG_MACF @@ -1985,20 +2010,23 @@ exec_spawnattr_getmacpolicyinfo(const void *macextensions, const char *policynam const struct _posix_spawn_mac_policy_extensions *psmx = macextensions; int i; - if (psmx == NULL) + if (psmx == NULL) { return NULL; + } for (i = 0; i < psmx->psmx_count; i++) { const _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i]; if (strncmp(extension->policyname, policyname, sizeof(extension->policyname)) == 0) { - if (lenp != NULL) + if (lenp != NULL) { *lenp = extension->datalen; + } return extension->datap; } } - if (lenp != NULL) + if (lenp != NULL) { *lenp = 0; + } return NULL; } @@ -2019,8 +2047,9 @@ spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args, _p } MALLOC(psmx, _posix_spawn_mac_policy_extensions_t, px_args->mac_extensions_size, M_TEMP, M_WAITOK); - if ((error = copyin(px_args->mac_extensions, psmx, px_args->mac_extensions_size)) != 0) + if ((error = copyin(px_args->mac_extensions, psmx, px_args->mac_extensions_size)) != 0) { goto bad; + } size_t extsize = PS_MAC_EXTENSIONS_SIZE(psmx->psmx_count); if (extsize == 0 || extsize > px_args->mac_extensions_size) { @@ -2053,8 +2082,9 @@ spawn_copyin_macpolicyinfo(const struct user__posix_spawn_args_desc *px_args, _p bad: if (psmx != NULL) { - for (i = 0; i < copycnt; i++) + for (i = 0; i < copycnt; i++) { FREE(psmx->psmx_extensions[i].datap, M_TEMP); + } FREE(psmx, M_TEMP); } return error; @@ -2065,16 +2095,19 @@ spawn_free_macpolicyinfo(_posix_spawn_mac_policy_extensions_t psmx) { int i; - if (psmx == NULL) + if (psmx == NULL) { return; - for (i = 0; i < psmx->psmx_count; i++) + } + for (i = 0; i < psmx->psmx_count; i++) { FREE(psmx->psmx_extensions[i].datap, M_TEMP); + } FREE(psmx, M_TEMP); } #endif /* CONFIG_MACF */ #if CONFIG_COALITIONS -static inline void spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES]) +static inline void +spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_TYPES]) { for (int c = 0; c < COALITION_NUM_TYPES; c++) { if (coal[c]) { @@ -2086,7 +2119,8 @@ static inline void spawn_coalitions_release_all(coalition_t coal[COALITION_NUM_T #endif #if CONFIG_PERSONAS -static int spawn_validate_persona(struct _posix_spawn_persona_info *px_persona) +static int +spawn_validate_persona(struct _posix_spawn_persona_info *px_persona) { int error = 0; struct persona *persona = NULL; @@ -2096,8 +2130,9 @@ static int spawn_validate_persona(struct _posix_spawn_persona_info *px_persona) * TODO: rdar://problem/19981151 * Add entitlement check! */ - if (!kauth_cred_issuser(kauth_cred_get())) + if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; + } persona = persona_lookup(px_persona->pspi_id); if (!persona) { @@ -2123,7 +2158,7 @@ static int spawn_validate_persona(struct _posix_spawn_persona_info *px_persona) gid_t groups[NGROUPS_MAX]; if (persona_get_groups(persona, &ngroups, groups, - px_persona->pspi_ngroups) != 0) { + px_persona->pspi_ngroups) != 0) { error = EINVAL; goto out; } @@ -2145,29 +2180,33 @@ static int spawn_validate_persona(struct _posix_spawn_persona_info *px_persona) } out: - if (persona) + if (persona) { persona_put(persona); + } return error; } -static int spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_persona) +static int +spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_persona) { int ret; kauth_cred_t cred; struct persona *persona = NULL; int override = !!(px_persona->pspi_flags & POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE); - if (!override) + if (!override) { return persona_proc_adopt_id(p, px_persona->pspi_id, NULL); + } /* * we want to spawn into the given persona, but we want to override * the kauth with a different UID/GID combo */ persona = persona_lookup(px_persona->pspi_id); - if (!persona) + if (!persona) { return ESRCH; + } cred = persona_get_cred(persona); if (!cred) { @@ -2177,24 +2216,24 @@ static int spawn_persona_adopt(proc_t p, struct _posix_spawn_persona_info *px_pe if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_UID) { cred = kauth_cred_setresuid(cred, - px_persona->pspi_uid, - px_persona->pspi_uid, - px_persona->pspi_uid, - KAUTH_UID_NONE); + px_persona->pspi_uid, + px_persona->pspi_uid, + px_persona->pspi_uid, + KAUTH_UID_NONE); } if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GID) { cred = kauth_cred_setresgid(cred, - px_persona->pspi_gid, - px_persona->pspi_gid, - px_persona->pspi_gid); + px_persona->pspi_gid, + px_persona->pspi_gid, + px_persona->pspi_gid); } if (px_persona->pspi_flags & POSIX_SPAWN_PERSONA_GROUPS) { cred = kauth_cred_setgroups(cred, - px_persona->pspi_groups, - px_persona->pspi_ngroups, - px_persona->pspi_gmuid); + px_persona->pspi_groups, + px_persona->pspi_ngroups, + px_persona->pspi_gmuid); } ret = persona_proc_adopt(p, persona, cred); @@ -2205,6 +2244,22 @@ out: } #endif +#if __arm64__ +static inline void +proc_legacy_footprint(proc_t p, task_t task, const char *caller) +{ + boolean_t legacy_footprint_entitled; + + legacy_footprint_entitled = IOTaskHasEntitlement(task, + "com.apple.private.memory.legacy_footprint"); + if (legacy_footprint_entitled) { + printf("%s: %d[%s] legacy footprint (entitled)\n", + caller, p->p_pid, p->p_name); + task_set_legacy_footprint(task, TRUE); + } +} +#endif /* __arm64__ */ + /* * posix_spawn * @@ -2236,14 +2291,14 @@ out: int posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) { - proc_t p = ap; /* quiet bogus GCC vfork() warning */ + proc_t p = ap; /* quiet bogus GCC vfork() warning */ user_addr_t pid = uap->pid; - int ival[2]; /* dummy retval for setpgid() */ - char *bufp = NULL; + int ival[2]; /* dummy retval for setpgid() */ + char *bufp = NULL; struct image_params *imgp; struct vnode_attr *vap; struct vnode_attr *origvap; - struct uthread *uthread = 0; /* compiler complains if not set to 0*/ + struct uthread *uthread = 0; /* compiler complains if not set to 0*/ int error, sig; int is_64 = IS_64BIT_PROCESS(p); struct vfs_context context; @@ -2267,7 +2322,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) #endif /* - * Allocate a big chunk for locals instead of using stack since these + * Allocate a big chunk for locals instead of using stack since these * structures are pretty big. */ MALLOC(bufp, char *, (sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap)), M_TEMP, M_WAITOK | M_ZERO); @@ -2293,7 +2348,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) imgp->ip_cs_error = OS_REASON_NULL; if (uap->adesc != USER_ADDR_NULL) { - if(is_64) { + if (is_64) { error = copyin(uap->adesc, &px_args, sizeof(px_args)); } else { struct user32__posix_spawn_args_desc px_args32; @@ -2317,13 +2372,14 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) px_args.persona_info_size = px_args32.persona_info_size; px_args.persona_info = CAST_USER_ADDR_T(px_args32.persona_info); } - if (error) + if (error) { goto bad; + } if (px_args.attr_size != 0) { - /* - * We are not copying the port_actions pointer, - * because we already have it from px_args. + /* + * We are not copying the port_actions pointer, + * because we already have it from px_args. * This is a bit fragile: */ @@ -2331,7 +2387,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) goto bad; } - bzero( (void *)( (unsigned long) &px_sa + px_sa_offset), sizeof(px_sa) - px_sa_offset ); + bzero((void *)((unsigned long) &px_sa + px_sa_offset), sizeof(px_sa) - px_sa_offset ); imgp->ip_px_sa = &px_sa; } @@ -2351,9 +2407,10 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) } imgp->ip_px_sfa = px_sfap; - if ((error = copyin(px_args.file_actions, px_sfap, - px_args.file_actions_size)) != 0) + if ((error = copyin(px_args.file_actions, px_sfap, + px_args.file_actions_size)) != 0) { goto bad; + } /* Verify that the action count matches the struct size */ size_t psfsize = PSF_ACTIONS_SIZE(px_sfap->psfa_act_count); @@ -2365,22 +2422,23 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) if (px_args.port_actions_size != 0) { /* Limit port_actions to one page of data */ if (px_args.port_actions_size < PS_PORT_ACTIONS_SIZE(1) || - px_args.port_actions_size > PAGE_SIZE) { + px_args.port_actions_size > PAGE_SIZE) { error = EINVAL; goto bad; } - MALLOC(px_spap, _posix_spawn_port_actions_t, - px_args.port_actions_size, M_TEMP, M_WAITOK); + MALLOC(px_spap, _posix_spawn_port_actions_t, + px_args.port_actions_size, M_TEMP, M_WAITOK); if (px_spap == NULL) { error = ENOMEM; goto bad; } imgp->ip_px_spa = px_spap; - if ((error = copyin(px_args.port_actions, px_spap, - px_args.port_actions_size)) != 0) + if ((error = copyin(px_args.port_actions, px_spap, + px_args.port_actions_size)) != 0) { goto bad; + } /* Verify that the action count matches the struct size */ size_t pasize = PS_PORT_ACTIONS_SIZE(px_spap->pspa_count); @@ -2398,7 +2456,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) goto bad; } - MALLOC(px_persona, struct _posix_spawn_persona_info *, px_args.persona_info_size, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(px_persona, struct _posix_spawn_persona_info *, px_args.persona_info_size, M_TEMP, M_WAITOK | M_ZERO); if (px_persona == NULL) { error = ENOMEM; goto bad; @@ -2406,16 +2464,19 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) imgp->ip_px_persona = px_persona; if ((error = copyin(px_args.persona_info, px_persona, - px_args.persona_info_size)) != 0) + px_args.persona_info_size)) != 0) { goto bad; - if ((error = spawn_validate_persona(px_persona)) != 0) + } + if ((error = spawn_validate_persona(px_persona)) != 0) { goto bad; + } } #endif #if CONFIG_MACF if (px_args.mac_extensions_size != 0) { - if ((error = spawn_copyin_macpolicyinfo(&px_args, (_posix_spawn_mac_policy_extensions_t *)&imgp->ip_px_smpx)) != 0) + if ((error = spawn_copyin_macpolicyinfo(&px_args, (_posix_spawn_mac_policy_extensions_t *)&imgp->ip_px_smpx)) != 0) { goto bad; + } } #endif /* CONFIG_MACF */ } @@ -2429,8 +2490,8 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) * which depends on it. */ if (uthread->uu_flag & UT_VFORK) { - error = EINVAL; - goto bad; + error = EINVAL; + goto bad; } /* @@ -2440,8 +2501,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) * which is one of the most expensive things about using fork() * and execve(). */ - if (imgp->ip_px_sa == NULL || !(px_sa.psa_flags & POSIX_SPAWN_SETEXEC)){ - + if (imgp->ip_px_sa == NULL || !(px_sa.psa_flags & POSIX_SPAWN_SETEXEC)) { /* Set the new task's coalition, if it is requested. */ coalition_t coal[COALITION_NUM_TYPES] = { COALITION_NULL }; #if CONFIG_COALITIONS @@ -2450,17 +2510,20 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) struct _posix_spawn_coalition_info coal_info; int coal_role[COALITION_NUM_TYPES]; - if (imgp->ip_px_sa == NULL || !px_args.coal_info) + if (imgp->ip_px_sa == NULL || !px_args.coal_info) { goto do_fork1; + } memset(&coal_info, 0, sizeof(coal_info)); - if (px_args.coal_info_size > sizeof(coal_info)) + if (px_args.coal_info_size > sizeof(coal_info)) { px_args.coal_info_size = sizeof(coal_info); + } error = copyin(px_args.coal_info, - &coal_info, px_args.coal_info_size); - if (error != 0) + &coal_info, px_args.coal_info_size); + if (error != 0) { goto bad; + } ncoals = 0; for (i = 0; i < COALITION_NUM_TYPES; i++) { @@ -2473,8 +2536,8 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) */ if (!task_is_in_privileged_coalition(p->task, i)) { coal_dbg("ERROR: %d not in privilegd " - "coalition of type %d", - p->p_pid, i); + "coalition of type %d", + p->p_pid, i); spawn_coalitions_release_all(coal); error = EPERM; goto bad; @@ -2489,7 +2552,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) coal[i] = coalition_find_and_activate_by_id(cid); if (coal[i] == COALITION_NULL) { coal_dbg("could not find coalition id:%llu " - "(perhaps it has been terminated or reaped)", cid); + "(perhaps it has been terminated or reaped)", cid); /* * release any other coalition's we * may have a reference to @@ -2500,7 +2563,7 @@ posix_spawn(proc_t ap, struct posix_spawn_args *uap, int32_t *retval) } if (coalition_type(coal[i]) != i) { coal_dbg("coalition with id:%lld is not of type:%d" - " (it's type:%d)", cid, i, coalition_type(coal[i])); + " (it's type:%d)", cid, i, coalition_type(coal[i])); error = ESRCH; goto bad; } @@ -2541,10 +2604,11 @@ do_fork1: /* set the roles of this task within each given coalition */ if (error == 0) { kr = coalitions_set_roles(coal, new_task, coal_role); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { error = EINVAL; + } if (kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_COALITION, - MACH_COALITION_ADOPT))) { + MACH_COALITION_ADOPT))) { for (i = 0; i < COALITION_NUM_TYPES; i++) { if (coal[i] != COALITION_NULL) { /* @@ -2552,10 +2616,10 @@ do_fork1: * will get truncated to 32 bits */ KDBG_RELEASE(MACHDBG_CODE( - DBG_MACH_COALITION, - MACH_COALITION_ADOPT), - coalition_id(coal[i]), - get_task_uniqueid(new_task)); + DBG_MACH_COALITION, + MACH_COALITION_ADOPT), + coalition_id(coal[i]), + get_task_uniqueid(new_task)); } } } @@ -2567,8 +2631,8 @@ do_fork1: if (error != 0) { goto bad; } - imgp->ip_flags |= IMGPF_SPAWN; /* spawn w/o exec */ - spawn_no_exec = TRUE; /* used in later tests */ + imgp->ip_flags |= IMGPF_SPAWN; /* spawn w/o exec */ + spawn_no_exec = TRUE; /* used in later tests */ #if CONFIG_PERSONAS /* @@ -2584,7 +2648,7 @@ do_fork1: #if 0 if (!proc_has_persona(p) && imgp->ip_px_persona == NULL) { MALLOC(px_persona, struct _posix_spawn_persona_info *, - sizeof(*px_persona), M_TEMP, M_WAITOK|M_ZERO); + sizeof(*px_persona), M_TEMP, M_WAITOK | M_ZERO); if (px_persona == NULL) { error = ENOMEM; goto bad; @@ -2621,12 +2685,12 @@ do_fork1: * transition from proc->task, since it will modify old_task. */ imgp->ip_new_thread = fork_create_child(old_task, - NULL, - p, - FALSE, - p->p_flag & P_LP64, - task_get_64bit_data(old_task), - TRUE); + NULL, + p, + FALSE, + p->p_flag & P_LP64, + task_get_64bit_data(old_task), + TRUE); /* task and thread ref returned by fork_create_child */ if (imgp->ip_new_thread == NULL) { error = ENOMEM; @@ -2639,7 +2703,7 @@ do_fork1: if (spawn_no_exec) { p = (proc_t)get_bsdthreadtask_info(imgp->ip_new_thread); - + /* * We had to wait until this point before firing the * proc:::create probe, otherwise p would not point to the @@ -2650,7 +2714,7 @@ do_fork1: assert(p != NULL); context.vc_thread = imgp->ip_new_thread; - context.vc_ucred = p->p_ucred; /* XXX must NOT be kauth_cred_get() */ + context.vc_ucred = p->p_ucred; /* XXX must NOT be kauth_cred_get() */ /* * Post fdcopy(), pre exec_handle_sugid() - this is where we want @@ -2666,8 +2730,9 @@ do_fork1: * is handled in exec_handle_file_actions(). */ if ((error = exec_handle_file_actions(imgp, - imgp->ip_px_sa != NULL ? px_sa.psa_flags : 0)) != 0) + imgp->ip_px_sa != NULL ? px_sa.psa_flags : 0)) != 0) { goto bad; + } } /* Has spawn port actions? */ @@ -2676,8 +2741,9 @@ do_fork1: boolean_t portwatch_present = FALSE; /* Will this process become adaptive? The apptype isn't ready yet, so we can't look there. */ - if (imgp->ip_px_sa != NULL && px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE) + if (imgp->ip_px_sa != NULL && px_sa.psa_apptype == POSIX_SPAWN_PROC_TYPE_DAEMON_ADAPTIVE) { is_adaptive = TRUE; + } /* * portwatch only: @@ -2691,8 +2757,9 @@ do_fork1: portwatch_ports = NULL; } - if ((error = exec_handle_port_actions(imgp, &portwatch_present, portwatch_ports)) != 0) + if ((error = exec_handle_port_actions(imgp, &portwatch_present, portwatch_ports)) != 0) { goto bad; + } if (portwatch_present == FALSE && portwatch_ports != NULL) { FREE(portwatch_ports, M_TEMP); @@ -2715,8 +2782,9 @@ do_fork1: * Effectively, call setpgid() system call; works * because there are no pointer arguments. */ - if((error = setpgid(p, &spga, ival)) != 0) + if ((error = setpgid(p, &spga, ival)) != 0) { goto bad; + } } /* @@ -2770,8 +2838,9 @@ do_fork1: * being executed. */ error = spawn_persona_adopt(p, imgp->ip_px_persona); - if (error != 0) + if (error != 0) { goto bad; + } } #endif /* CONFIG_PERSONAS */ #if !SECURE_KERNEL @@ -2785,20 +2854,23 @@ do_fork1: * useful or necessary to disable ASLR on a per-process * basis for unit testing and debugging. */ - if (px_sa.psa_flags & _POSIX_SPAWN_DISABLE_ASLR) + if (px_sa.psa_flags & _POSIX_SPAWN_DISABLE_ASLR) { OSBitOrAtomic(P_DISABLE_ASLR, &p->p_flag); + } #endif /* !SECURE_KERNEL */ /* Randomize high bits of ASLR slide */ - if (px_sa.psa_flags & _POSIX_SPAWN_HIGH_BITS_ASLR) + if (px_sa.psa_flags & _POSIX_SPAWN_HIGH_BITS_ASLR) { imgp->ip_flags |= IMGPF_HIGH_BITS_ASLR; + } /* * Forcibly disallow execution from data pages for the spawned process * even if it would otherwise be permitted by the architecture default. */ - if (px_sa.psa_flags & _POSIX_SPAWN_ALLOW_DATA_EXEC) + if (px_sa.psa_flags & _POSIX_SPAWN_ALLOW_DATA_EXEC) { imgp->ip_flags |= IMGPF_ALLOW_DATA_EXEC; + } } /* @@ -2806,10 +2878,11 @@ do_fork1: * _POSIX_SPAWN_DISABLE_ASLR attribute was found above or if * P_DISABLE_ASLR was inherited from the parent process. */ - if (p->p_flag & P_DISABLE_ASLR) + if (p->p_flag & P_DISABLE_ASLR) { imgp->ip_flags |= IMGPF_DISABLE_ASLR; + } - /* + /* * Clear transition flag so we won't hang if exec_activate_image() causes * an automount (and launchd does a proc sysctl to service it). * @@ -2820,11 +2893,12 @@ do_fork1: proc_transit_set = 0; } -#if MAC_SPAWN /* XXX */ +#if MAC_SPAWN /* XXX */ if (uap->mac_p != USER_ADDR_NULL) { error = mac_execve_enter(uap->mac_p, imgp); - if (error) + if (error) { goto bad; + } } #endif @@ -2832,7 +2906,7 @@ do_fork1: * Activate the image */ error = exec_activate_image(imgp); - + if (error == 0 && !spawn_no_exec) { p = proc_exec_switch_task(p, old_task, new_task, imgp->ip_new_thread); /* proc ref returned */ @@ -2874,8 +2948,9 @@ do_fork1: * they were unmasked in the parent; note that some signals * are not maskable. */ - if (px_sa.psa_flags & POSIX_SPAWN_SETSIGMASK) + if (px_sa.psa_flags & POSIX_SPAWN_SETSIGMASK) { child_uthread->uu_sigmask = (px_sa.psa_sigmask & ~sigcantmask); + } /* * Default a list of signals instead of ignoring them, if * they were ignored in the parent. Note that we pass @@ -2888,9 +2963,10 @@ do_fork1: vec.sa_tramp = 0; vec.sa_mask = 0; vec.sa_flags = 0; - for (sig = 1; sig < NSIG; sig++) - if (px_sa.psa_sigdefault & (1 << (sig-1))) { + for (sig = 1; sig < NSIG; sig++) { + if (px_sa.psa_sigdefault & (1 << (sig - 1))) { error = setsigvec(p, child_thread, sig, &vec, spawn_no_exec); + } } } @@ -2908,10 +2984,10 @@ do_fork1: * whomever is turning it on could just as easily choose not to do so. */ error = proc_set_task_ruse_cpu(p->task, - TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC, - px_sa.psa_cpumonitor_percent, - px_sa.psa_cpumonitor_interval * NSEC_PER_SEC, - 0, TRUE); + TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC, + px_sa.psa_cpumonitor_percent, + px_sa.psa_cpumonitor_interval * NSEC_PER_SEC, + 0, TRUE); } } @@ -2920,29 +2996,31 @@ bad: if (error == 0) { /* reset delay idle sleep status if set */ #if !CONFIG_EMBEDDED - if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) + if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) { OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &p->p_flag); + } #endif /* !CONFIG_EMBEDDED */ /* upon successful spawn, re/set the proc control state */ if (imgp->ip_px_sa != NULL) { switch (px_sa.psa_pcontrol) { - case POSIX_SPAWN_PCONTROL_THROTTLE: - p->p_pcaction = P_PCTHROTTLE; - break; - case POSIX_SPAWN_PCONTROL_SUSPEND: - p->p_pcaction = P_PCSUSP; - break; - case POSIX_SPAWN_PCONTROL_KILL: - p->p_pcaction = P_PCKILL; - break; - case POSIX_SPAWN_PCONTROL_NONE: - default: - p->p_pcaction = 0; - break; - }; + case POSIX_SPAWN_PCONTROL_THROTTLE: + p->p_pcaction = P_PCTHROTTLE; + break; + case POSIX_SPAWN_PCONTROL_SUSPEND: + p->p_pcaction = P_PCSUSP; + break; + case POSIX_SPAWN_PCONTROL_KILL: + p->p_pcaction = P_PCKILL; + break; + case POSIX_SPAWN_PCONTROL_NONE: + default: + p->p_pcaction = 0; + break; + } + ; } exec_resettextvp(p, imgp); - + #if CONFIG_MEMORYSTATUS /* Has jetsam attributes? */ if (imgp->ip_px_sa != NULL && (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_SET)) { @@ -2956,20 +3034,19 @@ bad: */ if (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND) { memorystatus_update(p, px_sa.psa_priority, 0, - (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY), - TRUE, - -1, TRUE, - px_sa.psa_memlimit_inactive, FALSE); + (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY), + TRUE, + -1, TRUE, + px_sa.psa_memlimit_inactive, FALSE); } else { memorystatus_update(p, px_sa.psa_priority, 0, - (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY), - TRUE, - px_sa.psa_memlimit_active, - (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL), - px_sa.psa_memlimit_inactive, - (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL)); + (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY), + TRUE, + px_sa.psa_memlimit_active, + (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL), + px_sa.psa_memlimit_inactive, + (px_sa.psa_jetsam_flags & POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL)); } - } #endif /* CONFIG_MEMORYSTATUS */ if (imgp->ip_px_sa != NULL && px_sa.psa_thread_limit > 0) { @@ -2986,8 +3063,9 @@ bad: * before check_for_signature(), which uses psignal. */ if (spawn_no_exec) { - if (proc_transit_set) + if (proc_transit_set) { proc_transend(p, 0); + } /* * Drop the signal lock on the child which was taken on our @@ -3001,8 +3079,9 @@ bad: } /* flag exec has occurred, notify only if it has not failed due to FP Key error */ - if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) + if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) { proc_knote(p, NOTE_EXEC); + } if (error == 0) { @@ -3018,6 +3097,10 @@ bad: task_bank_init(new_task); proc_transend(p, 0); } + +#if __arm64__ + proc_legacy_footprint(p, new_task, __FUNCTION__); +#endif /* __arm64__ */ } /* Inherit task role from old task to new task for exec */ @@ -3038,7 +3121,7 @@ bad: struct _posix_spawnattr *psa = (struct _posix_spawnattr *) imgp->ip_px_sa; exec_handle_spawnattr_policy(p, psa->psa_apptype, psa->psa_qos_clamp, psa->psa_darwin_role, - portwatch_ports, portwatch_count); + portwatch_ports, portwatch_count); } /* @@ -3101,34 +3184,44 @@ bad: psignal_vfork(p, p->task, imgp->ip_new_thread, SIGTRAP); } - if (error == 0 && !spawn_no_exec) - KDBG(BSDDBG_CODE(DBG_BSD_PROC,BSD_PROC_EXEC), - p->p_pid); + if (error == 0 && !spawn_no_exec) { + KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC), + p->p_pid); + } } if (imgp != NULL) { - if (imgp->ip_vp) + if (imgp->ip_vp) { vnode_put(imgp->ip_vp); - if (imgp->ip_scriptvp) + } + if (imgp->ip_scriptvp) { vnode_put(imgp->ip_scriptvp); - if (imgp->ip_strings) + } + if (imgp->ip_strings) { execargs_free(imgp); - if (imgp->ip_px_sfa != NULL) + } + if (imgp->ip_px_sfa != NULL) { FREE(imgp->ip_px_sfa, M_TEMP); - if (imgp->ip_px_spa != NULL) + } + if (imgp->ip_px_spa != NULL) { FREE(imgp->ip_px_spa, M_TEMP); + } #if CONFIG_PERSONAS - if (imgp->ip_px_persona != NULL) + if (imgp->ip_px_persona != NULL) { FREE(imgp->ip_px_persona, M_TEMP); + } #endif #if CONFIG_MACF - if (imgp->ip_px_smpx != NULL) + if (imgp->ip_px_smpx != NULL) { spawn_free_macpolicyinfo(imgp->ip_px_smpx); - if (imgp->ip_execlabelp) + } + if (imgp->ip_execlabelp) { mac_cred_label_free(imgp->ip_execlabelp); - if (imgp->ip_scriptlabelp) + } + if (imgp->ip_scriptlabelp) { mac_vnode_label_free(imgp->ip_scriptlabelp); + } if (imgp->ip_cs_error != OS_REASON_NULL) { os_reason_free(imgp->ip_cs_error); imgp->ip_cs_error = OS_REASON_NULL; @@ -3207,8 +3300,9 @@ bad: /* * If the parent wants the pid, copy it out */ - if (pid != USER_ADDR_NULL) + if (pid != USER_ADDR_NULL) { (void)suword(pid, p->p_pid); + } retval[0] = error; /* @@ -3267,8 +3361,8 @@ bad: if (inherit != NULL) { ipc_importance_release(inherit); } - - return(error); + + return error; } /* @@ -3433,7 +3527,7 @@ execve(proc_t p, struct execve_args *uap, int32_t *retval) muap.mac_p = USER_ADDR_NULL; err = __mac_execve(p, &muap, retval); - return(err); + return err; } /* @@ -3465,14 +3559,14 @@ execve(proc_t p, struct execve_args *uap, int32_t *retval) int __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) { - char *bufp = NULL; + char *bufp = NULL; struct image_params *imgp; struct vnode_attr *vap; struct vnode_attr *origvap; int error; int is_64 = IS_64BIT_PROCESS(p); struct vfs_context context; - struct uthread *uthread; + struct uthread *uthread; task_t old_task = current_task(); task_t new_task = NULL; boolean_t should_release_proc_ref = FALSE; @@ -3481,9 +3575,9 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) void *inherit = NULL; context.vc_thread = current_thread(); - context.vc_ucred = kauth_cred_proc_ref(p); /* XXX must NOT be kauth_cred_get() */ + context.vc_ucred = kauth_cred_proc_ref(p); /* XXX must NOT be kauth_cred_get() */ - /* Allocate a big chunk for locals instead of using stack since these + /* Allocate a big chunk for locals instead of using stack since these * structures a pretty big. */ MALLOC(bufp, char *, (sizeof(*imgp) + sizeof(*vap) + sizeof(*origvap)), M_TEMP, M_WAITOK | M_ZERO); @@ -3494,7 +3588,7 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) } vap = (struct vnode_attr *) (bufp + sizeof(*imgp)); origvap = (struct vnode_attr *) (bufp + sizeof(*imgp) + sizeof(*vap)); - + /* Initialize the common data in the image_params structure */ imgp->ip_user_fname = uap->fname; imgp->ip_user_argv = uap->argp; @@ -3549,12 +3643,12 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) * transition from proc->task, since it will modify old_task. */ imgp->ip_new_thread = fork_create_child(old_task, - NULL, - p, - FALSE, - p->p_flag & P_LP64, - task_get_64bit_data(old_task), - TRUE); + NULL, + p, + FALSE, + p->p_flag & P_LP64, + task_get_64bit_data(old_task), + TRUE); /* task and thread ref returned by fork_create_child */ if (imgp->ip_new_thread == NULL) { error = ENOMEM; @@ -3592,10 +3686,11 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) } kauth_cred_unref(&context.vc_ucred); - + /* Image not claimed by any activator? */ - if (error == -1) + if (error == -1) { error = ENOEXEC; + } if (!error) { exec_done = TRUE; @@ -3606,20 +3701,26 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) } /* flag exec has occurred, notify only if it has not failed due to FP Key error */ - if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) + if (exec_done && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) { proc_knote(p, NOTE_EXEC); + } - if (imgp->ip_vp != NULLVP) + if (imgp->ip_vp != NULLVP) { vnode_put(imgp->ip_vp); - if (imgp->ip_scriptvp != NULLVP) + } + if (imgp->ip_scriptvp != NULLVP) { vnode_put(imgp->ip_scriptvp); - if (imgp->ip_strings) + } + if (imgp->ip_strings) { execargs_free(imgp); + } #if CONFIG_MACF - if (imgp->ip_execlabelp) + if (imgp->ip_execlabelp) { mac_cred_label_free(imgp->ip_execlabelp); - if (imgp->ip_scriptlabelp) + } + if (imgp->ip_scriptlabelp) { mac_vnode_label_free(imgp->ip_scriptlabelp); + } #endif if (imgp->ip_cs_error != OS_REASON_NULL) { os_reason_free(imgp->ip_cs_error); @@ -3640,6 +3741,10 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) task_bank_init(new_task); proc_transend(p, 0); +#if __arm64__ + proc_legacy_footprint(p, new_task, __FUNCTION__); +#endif /* __arm64__ */ + /* Sever any extant thread affinity */ thread_affinity_exec(current_thread()); @@ -3674,8 +3779,9 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) #if CONFIG_DTRACE dtrace_thread_didexec(imgp->ip_new_thread); - if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) + if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) { (*dtrace_proc_waitfor_hook)(p); + } #endif #if CONFIG_AUDIT @@ -3747,8 +3853,8 @@ exit_with_error: if (inherit != NULL) { ipc_importance_release(inherit); } - - return(error); + + return error; } @@ -3778,11 +3884,11 @@ copyinptr(user_addr_t froma, user_addr_t *toptr, int ptr_size) unsigned int i; error = copyin(froma, &i, 4); - *toptr = CAST_USER_ADDR_T(i); /* SAFE */ + *toptr = CAST_USER_ADDR_T(i); /* SAFE */ } else { error = copyin(froma, toptr, 8); } - return (error); + return error; } @@ -3807,13 +3913,13 @@ copyoutptr(user_addr_t ua, user_addr_t ptr, int ptr_size) if (ptr_size == 4) { /* 64 bit value containing 32 bit address */ - unsigned int i = CAST_DOWN_EXPLICIT(unsigned int,ua); /* SAFE */ + unsigned int i = CAST_DOWN_EXPLICIT(unsigned int, ua); /* SAFE */ error = copyout(&i, ptr, 4); } else { error = copyout(&ua, ptr, 8); } - return (error); + return error; } @@ -3895,25 +4001,25 @@ static int exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) { proc_t p = vfs_context_proc(imgp->ip_vfs_context); - int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; - int ptr_area_size; + int ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; + int ptr_area_size; void *ptr_buffer_start, *ptr_buffer; int string_size; - user_addr_t string_area; /* *argv[], *env[] */ - user_addr_t ptr_area; /* argv[], env[], applev[] */ - user_addr_t argc_area; /* argc */ - user_addr_t stack; + user_addr_t string_area; /* *argv[], *env[] */ + user_addr_t ptr_area; /* argv[], env[], applev[] */ + user_addr_t argc_area; /* argc */ + user_addr_t stack; int error; unsigned i; struct copyout_desc { - char *start_string; - int count; + char *start_string; + int count; #if CONFIG_DTRACE - user_addr_t *dtrace_cookie; + user_addr_t *dtrace_cookie; #endif - boolean_t null_term; + boolean_t null_term; } descriptors[] = { { .start_string = imgp->ip_startargv, @@ -4001,11 +4107,12 @@ exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) * Copy out the entire strings area. */ error = copyout(imgp->ip_strings, string_area, - string_size); - if (error) + string_size); + if (error) { goto bad; + } - for (i = 0; i < sizeof(descriptors)/sizeof(descriptors[0]); i++) { + for (i = 0; i < sizeof(descriptors) / sizeof(descriptors[0]); i++) { char *cur_string = descriptors[i].start_string; int j; @@ -4023,14 +4130,14 @@ exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) */ for (j = 0; j < descriptors[i].count; j++) { user_addr_t cur_address = string_area + (cur_string - imgp->ip_strings); - + /* Copy out the pointer to the current string. Alignment has been verified */ if (ptr_size == 8) { *(uint64_t *)ptr_buffer = (uint64_t)cur_address; } else { *(uint32_t *)ptr_buffer = (uint32_t)cur_address; } - + ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size); cur_string += strlen(cur_string) + 1; /* Only a NUL between strings in the same area */ } @@ -4041,7 +4148,7 @@ exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) } else { *(uint32_t *)ptr_buffer = 0; } - + ptr_buffer = (void *)((uintptr_t)ptr_buffer + ptr_size); } } @@ -4050,17 +4157,19 @@ exec_copyout_strings(struct image_params *imgp, user_addr_t *stackp) * Copy out all our pointer arrays in bulk. */ error = copyout(ptr_buffer_start, ptr_area, - ptr_area_size); - if (error) + ptr_area_size); + if (error) { goto bad; + } /* argc (int32, stored in a ptr_size area) */ error = copyoutptr((user_addr_t)imgp->ip_argc, argc_area, ptr_size); - if (error) + if (error) { goto bad; + } bad: - return(error); + return error; } @@ -4095,14 +4204,14 @@ static int exec_extract_strings(struct image_params *imgp) { int error = 0; - int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT_ADDR) ? 8 : 4; + int ptr_size = (imgp->ip_flags & IMGPF_WAS_64BIT_ADDR) ? 8 : 4; int new_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; - user_addr_t argv = imgp->ip_user_argv; - user_addr_t envv = imgp->ip_user_envv; + user_addr_t argv = imgp->ip_user_argv; + user_addr_t envv = imgp->ip_user_envv; /* * Adjust space reserved for the path name by however much padding it - * needs. Doing this here since we didn't know if this would be a 32- + * needs. Doing this here since we didn't know if this would be a 32- * or 64-bit process back in exec_save_path. */ while (imgp->ip_strspace % new_ptr_size != 0) { @@ -4117,8 +4226,8 @@ exec_extract_strings(struct image_params *imgp) imgp->ip_startargv = imgp->ip_strendp; imgp->ip_argc = 0; - if((imgp->ip_flags & IMGPF_INTERPRET) != 0) { - user_addr_t arg; + if ((imgp->ip_flags & IMGPF_INTERPRET) != 0) { + user_addr_t arg; char *argstart, *ch; /* First, the arguments in the "#!" string are tokenized and extracted. */ @@ -4149,8 +4258,9 @@ exec_extract_strings(struct image_params *imgp) } /* Error-check, regardless of whether this is the last interpreter arg or not */ - if (error) + if (error) { goto bad; + } if (imgp->ip_argspace < new_ptr_size) { error = E2BIG; goto bad; @@ -4167,8 +4277,9 @@ exec_extract_strings(struct image_params *imgp) * to locate their script arguments. */ error = copyinptr(argv, &arg, ptr_size); - if (error) + if (error) { goto bad; + } if (arg != 0LL) { argv += ptr_size; /* consume without using */ } @@ -4181,9 +4292,10 @@ exec_extract_strings(struct image_params *imgp) } else { error = exec_add_user_string(imgp, imgp->ip_user_fname, imgp->ip_seg, TRUE); } - - if (error) + + if (error) { goto bad; + } if (imgp->ip_argspace < new_ptr_size) { error = E2BIG; goto bad; @@ -4193,11 +4305,12 @@ exec_extract_strings(struct image_params *imgp) } while (argv != 0LL) { - user_addr_t arg; + user_addr_t arg; error = copyinptr(argv, &arg, ptr_size); - if (error) + if (error) { goto bad; + } if (arg == 0LL) { break; @@ -4206,18 +4319,19 @@ exec_extract_strings(struct image_params *imgp) argv += ptr_size; /* - * av[n...] = arg[n] - */ + * av[n...] = arg[n] + */ error = exec_add_user_string(imgp, arg, imgp->ip_seg, TRUE); - if (error) + if (error) { goto bad; + } if (imgp->ip_argspace < new_ptr_size) { error = E2BIG; goto bad; } imgp->ip_argspace -= new_ptr_size; /* to hold argv[] entry */ imgp->ip_argc++; - } + } /* Save space for argv[] NULL terminator */ if (imgp->ip_argspace < new_ptr_size) { @@ -4225,29 +4339,31 @@ exec_extract_strings(struct image_params *imgp) goto bad; } imgp->ip_argspace -= new_ptr_size; - + /* Note where the args ends and env begins. */ imgp->ip_endargv = imgp->ip_strendp; imgp->ip_envc = 0; /* Now, get the environment */ while (envv != 0LL) { - user_addr_t env; + user_addr_t env; error = copyinptr(envv, &env, ptr_size); - if (error) + if (error) { goto bad; + } envv += ptr_size; if (env == 0LL) { break; } /* - * av[n...] = env[n] - */ + * av[n...] = env[n] + */ error = exec_add_user_string(imgp, env, imgp->ip_seg, TRUE); - if (error) + if (error) { goto bad; + } if (imgp->ip_argspace < new_ptr_size) { error = E2BIG; goto bad; @@ -4273,7 +4389,7 @@ exec_extract_strings(struct image_params *imgp) imgp->ip_strspace--; imgp->ip_argspace--; } - + /* Note where the envv ends and applev begins. */ imgp->ip_endenvv = imgp->ip_strendp; @@ -4292,13 +4408,13 @@ bad: * 8-byte guard. Until somebody needs more than an 8-byte guard value, don't * do the work to construct them. */ -#define GUARD_VALUES 1 -#define GUARD_KEY "stack_guard=" +#define GUARD_VALUES 1 +#define GUARD_KEY "stack_guard=" /* * System malloc needs some entropy when it is initialized. */ -#define ENTROPY_VALUES 2 +#define ENTROPY_VALUES 2 #define ENTROPY_KEY "malloc_entropy=" /* @@ -4329,9 +4445,9 @@ extern user64_addr_t commpage_text64_location; static int exec_add_entropy_key(struct image_params *imgp, - const char *key, - int values, - boolean_t embedNUL) + const char *key, + int values, + boolean_t embedNUL) { const int limit = 8; uint64_t entropy[limit]; @@ -4340,7 +4456,7 @@ exec_add_entropy_key(struct image_params *imgp, values = limit; } - read_random(entropy, sizeof(entropy[0]) * values); + read_random(entropy, sizeof(entropy[0]) * values); if (embedNUL) { entropy[0] &= ~(0xffull << 8); @@ -4362,7 +4478,7 @@ exec_add_entropy_key(struct image_params *imgp, */ static int exec_add_apple_strings(struct image_params *imgp, - const load_result_t *load_result) + const load_result_t *load_result) { int error; int img_ptr_size = (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) ? 8 : 4; @@ -4393,7 +4509,7 @@ exec_add_apple_strings(struct image_params *imgp, if ((proc_flags & _POSIX_SPAWN_NANO_ALLOCATOR) == _POSIX_SPAWN_NANO_ALLOCATOR) { const char *nano_string = NANO_ENGAGE_KEY; error = exec_add_user_string(imgp, CAST_USER_ADDR_T(nano_string), UIO_SYSSPACE, FALSE); - if (error){ + if (error) { goto bad; } imgp->ip_applec++; @@ -4432,7 +4548,7 @@ exec_add_apple_strings(struct image_params *imgp, } imgp->ip_applec++; - /* + /* * Add MAIN_STACK_KEY: Supplies the address and size of the main thread's * stack if it was allocated by the kernel. * @@ -4442,11 +4558,11 @@ exec_add_apple_strings(struct image_params *imgp, if (load_result->unixproc) { char stack_string[strlen(MAIN_STACK_KEY) + (HEX_STR_LEN + 1) * MAIN_STACK_VALUES + 1]; snprintf(stack_string, sizeof(stack_string), - MAIN_STACK_KEY "0x%llx,0x%llx,0x%llx,0x%llx", - (uint64_t)load_result->user_stack, - (uint64_t)load_result->user_stack_size, - (uint64_t)load_result->user_stack_alloc, - (uint64_t)load_result->user_stack_alloc_size); + MAIN_STACK_KEY "0x%llx,0x%llx,0x%llx,0x%llx", + (uint64_t)load_result->user_stack, + (uint64_t)load_result->user_stack_size, + (uint64_t)load_result->user_stack_alloc, + (uint64_t)load_result->user_stack_alloc_size); error = exec_add_user_string(imgp, CAST_USER_ADDR_T(stack_string), UIO_SYSSPACE, FALSE); if (error) { goto bad; @@ -4455,12 +4571,12 @@ exec_add_apple_strings(struct image_params *imgp, } if (imgp->ip_vattr) { - uint64_t fsid = get_va_fsid(imgp->ip_vattr); + uint64_t fsid = vnode_get_va_fsid(imgp->ip_vattr); uint64_t fsobjid = imgp->ip_vattr->va_fileid; char fsid_string[strlen(FSID_KEY) + strlen(FSID_MAX_STRING) + 1]; snprintf(fsid_string, sizeof(fsid_string), - FSID_KEY "0x%llx,0x%llx", fsid, fsobjid); + FSID_KEY "0x%llx,0x%llx", fsid, fsobjid); error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE); if (error) { goto bad; @@ -4468,26 +4584,26 @@ exec_add_apple_strings(struct image_params *imgp, imgp->ip_applec++; } - if (imgp->ip_dyld_fsid || imgp->ip_dyld_fsobjid ) { + if (imgp->ip_dyld_fsid || imgp->ip_dyld_fsobjid) { char fsid_string[strlen(DYLD_FSID_KEY) + strlen(FSID_MAX_STRING) + 1]; snprintf(fsid_string, sizeof(fsid_string), - DYLD_FSID_KEY "0x%llx,0x%llx", imgp->ip_dyld_fsid, imgp->ip_dyld_fsobjid); + DYLD_FSID_KEY "0x%llx,0x%llx", imgp->ip_dyld_fsid, imgp->ip_dyld_fsobjid); error = exec_add_user_string(imgp, CAST_USER_ADDR_T(fsid_string), UIO_SYSSPACE, FALSE); if (error) { goto bad; } imgp->ip_applec++; } - - uint8_t cdhash[SHA1_RESULTLEN]; + + uint8_t cdhash[SHA1_RESULTLEN]; int cdhash_errror = ubc_cs_getcdhash(imgp->ip_vp, imgp->ip_arch_offset, cdhash); if (cdhash_errror == 0) { - char hash_string[strlen(CDHASH_KEY) + 2*SHA1_RESULTLEN + 1]; + char hash_string[strlen(CDHASH_KEY) + 2 * SHA1_RESULTLEN + 1]; strncpy(hash_string, CDHASH_KEY, sizeof(hash_string)); - char *p = hash_string + sizeof(CDHASH_KEY) - 1; - for (int i = 0; i < SHA1_RESULTLEN; i++) { + char *p = hash_string + sizeof(CDHASH_KEY) - 1; + for (int i = 0; i < SHA1_RESULTLEN; i++) { snprintf(p, 3, "%02x", (int) cdhash[i]); - p += 2; + p += 2; } error = exec_add_user_string(imgp, CAST_USER_ADDR_T(hash_string), UIO_SYSSPACE, FALSE); if (error) { @@ -4506,7 +4622,7 @@ bad: return error; } -#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) +#define unix_stack_size(p) (p->p_rlimit[RLIMIT_STACK].rlim_cur) /* * exec_check_permissions @@ -4534,9 +4650,10 @@ exec_check_permissions(struct image_params *imgp) kauth_action_t action; /* Only allow execution of regular files */ - if (!vnode_isreg(vp)) - return (EACCES); - + if (!vnode_isreg(vp)) { + return EACCES; + } + /* Get the file attributes that we will be using here and elsewhere */ VATTR_INIT(vap); VATTR_WANTED(vap, va_uid); @@ -4546,48 +4663,56 @@ exec_check_permissions(struct image_params *imgp) VATTR_WANTED(vap, va_fsid64); VATTR_WANTED(vap, va_fileid); VATTR_WANTED(vap, va_data_size); - if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0) - return (error); + if ((error = vnode_getattr(vp, vap, imgp->ip_vfs_context)) != 0) { + return error; + } /* * Ensure that at least one execute bit is on - otherwise root * will always succeed, and we don't want to happen unless the * file really is executable. */ - if (!vfs_authopaque(vnode_mount(vp)) && ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) - return (EACCES); + if (!vfs_authopaque(vnode_mount(vp)) && ((vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0)) { + return EACCES; + } /* Disallow zero length files */ - if (vap->va_data_size == 0) - return (ENOEXEC); + if (vap->va_data_size == 0) { + return ENOEXEC; + } imgp->ip_arch_offset = (user_size_t)0; imgp->ip_arch_size = vap->va_data_size; /* Disable setuid-ness for traced programs or if MNT_NOSUID */ - if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED)) + if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_lflag & P_LTRACED)) { vap->va_mode &= ~(VSUID | VSGID); + } /* * Disable _POSIX_SPAWN_ALLOW_DATA_EXEC and _POSIX_SPAWN_DISABLE_ASLR * flags for setuid/setgid binaries. */ - if (vap->va_mode & (VSUID | VSGID)) + if (vap->va_mode & (VSUID | VSGID)) { imgp->ip_flags &= ~(IMGPF_ALLOW_DATA_EXEC | IMGPF_DISABLE_ASLR); + } #if CONFIG_MACF error = mac_vnode_check_exec(imgp->ip_vfs_context, vp, imgp); - if (error) - return (error); + if (error) { + return error; + } #endif - /* Check for execute permission */ - action = KAUTH_VNODE_EXECUTE; - /* Traced images must also be readable */ - if (p->p_lflag & P_LTRACED) - action |= KAUTH_VNODE_READ_DATA; - if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0) - return (error); + /* Check for execute permission */ + action = KAUTH_VNODE_EXECUTE; + /* Traced images must also be readable */ + if (p->p_lflag & P_LTRACED) { + action |= KAUTH_VNODE_READ_DATA; + } + if ((error = vnode_authorize(vp, NULL, action, imgp->ip_vfs_context)) != 0) { + return error; + } #if 0 /* Don't let it run if anyone had it open for writing */ @@ -4595,7 +4720,7 @@ exec_check_permissions(struct image_params *imgp) if (vp->v_writecount) { panic("going to return ETXTBSY %x", vp); vnode_unlock(vp); - return (ETXTBSY); + return ETXTBSY; } vnode_unlock(vp); #endif @@ -4603,7 +4728,7 @@ exec_check_permissions(struct image_params *imgp) /* XXX May want to indicate to underlying FS that vnode is open */ - return (error); + return error; } @@ -4636,17 +4761,17 @@ exec_check_permissions(struct image_params *imgp) static int exec_handle_sugid(struct image_params *imgp) { - proc_t p = vfs_context_proc(imgp->ip_vfs_context); - kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context); - kauth_cred_t my_cred, my_new_cred; - int i; - int leave_sugid_clear = 0; - int mac_reset_ipc = 0; - int error = 0; - task_t task = NULL; + proc_t p = vfs_context_proc(imgp->ip_vfs_context); + kauth_cred_t cred = vfs_context_ucred(imgp->ip_vfs_context); + kauth_cred_t my_cred, my_new_cred; + int i; + int leave_sugid_clear = 0; + int mac_reset_ipc = 0; + int error = 0; + task_t task = NULL; #if CONFIG_MACF - int mac_transition, disjoint_cred = 0; - int label_update_return = 0; + int mac_transition, disjoint_cred = 0; + int label_update_return = 0; /* * Determine whether a call to update the MAC label will result in the @@ -4658,14 +4783,14 @@ exec_handle_sugid(struct image_params *imgp) * slow down the exec fast path for normal binaries. */ mac_transition = mac_cred_check_label_update_execve( - imgp->ip_vfs_context, - imgp->ip_vp, - imgp->ip_arch_offset, - imgp->ip_scriptvp, - imgp->ip_scriptlabelp, - imgp->ip_execlabelp, - p, - imgp->ip_px_smpx); + imgp->ip_vfs_context, + imgp->ip_vp, + imgp->ip_arch_offset, + imgp->ip_scriptvp, + imgp->ip_scriptlabelp, + imgp->ip_execlabelp, + p, + imgp->ip_px_smpx); #endif OSBitAndAtomic(~((uint32_t)P_SUGID), &p->p_flag); @@ -4685,11 +4810,10 @@ exec_handle_sugid(struct image_params *imgp) * such a call. */ if (((imgp->ip_origvattr->va_mode & VSUID) != 0 && - kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) || + kauth_cred_getuid(cred) != imgp->ip_origvattr->va_uid) || ((imgp->ip_origvattr->va_mode & VSGID) != 0 && - ((kauth_cred_ismember_gid(cred, imgp->ip_origvattr->va_gid, &leave_sugid_clear) || !leave_sugid_clear) || - (kauth_cred_getgid(cred) != imgp->ip_origvattr->va_gid)))) { - + ((kauth_cred_ismember_gid(cred, imgp->ip_origvattr->va_gid, &leave_sugid_clear) || !leave_sugid_clear) || + (kauth_cred_getgid(cred) != imgp->ip_origvattr->va_gid)))) { #if CONFIG_MACF /* label for MAC transition and neither VSUID nor VSGID */ handle_mac_transition: @@ -4776,13 +4900,13 @@ handle_mac_transition: #endif /* !SECURE_KERNEL */ #if CONFIG_MACF - /* + /* * If a policy has indicated that it will transition the label, * before making the call into the MAC policies, get a new * duplicate credential, so they can modify it without * modifying any others sharing it. */ - if (mac_transition) { + if (mac_transition) { /* * This hook may generate upcalls that require * importance donation from the kernel. @@ -4791,16 +4915,16 @@ handle_mac_transition: thread_t thread = current_thread(); thread_enable_send_importance(thread, TRUE); kauth_proc_label_update_execve(p, - imgp->ip_vfs_context, - imgp->ip_vp, - imgp->ip_arch_offset, - imgp->ip_scriptvp, - imgp->ip_scriptlabelp, - imgp->ip_execlabelp, - &imgp->ip_csflags, - imgp->ip_px_smpx, - &disjoint_cred, /* will be non zero if disjoint */ - &label_update_return); + imgp->ip_vfs_context, + imgp->ip_vp, + imgp->ip_arch_offset, + imgp->ip_scriptvp, + imgp->ip_scriptlabelp, + imgp->ip_execlabelp, + &imgp->ip_csflags, + imgp->ip_px_smpx, + &disjoint_cred, /* will be non zero if disjoint */ + &label_update_return); thread_enable_send_importance(thread, FALSE); if (disjoint_cred) { @@ -4815,13 +4939,13 @@ handle_mac_transition: */ leave_sugid_clear = 0; } - + imgp->ip_mac_return = label_update_return; } - + mac_reset_ipc = mac_proc_check_inherit_ipc_ports(p, p->p_textvp, p->p_textoff, imgp->ip_vp, imgp->ip_arch_offset, imgp->ip_scriptvp); -#endif /* CONFIG_MACF */ +#endif /* CONFIG_MACF */ /* * If 'leave_sugid_clear' is non-zero, then we passed the @@ -4829,7 +4953,7 @@ handle_mac_transition: * the previous cred was a member of the VSGID group, but * that it was not the default at the time of the execve, * and that the post-labelling credential was not disjoint. - * So we don't set the P_SUGID or reset mach ports and fds + * So we don't set the P_SUGID or reset mach ports and fds * on the basis of simply running this code. */ if (mac_reset_ipc || !leave_sugid_clear) { @@ -4840,9 +4964,9 @@ handle_mac_transition: * task/thread after. */ ipc_task_reset((imgp->ip_new_thread != NULL) ? - get_threadtask(imgp->ip_new_thread) : p->task); + get_threadtask(imgp->ip_new_thread) : p->task); ipc_thread_reset((imgp->ip_new_thread != NULL) ? - imgp->ip_new_thread : current_thread()); + imgp->ip_new_thread : current_thread()); } if (!leave_sugid_clear) { @@ -4860,17 +4984,17 @@ handle_mac_transition: * to libc. */ for (i = 0; i < 3; i++) { - - if (p->p_fd->fd_ofiles[i] != NULL) + if (p->p_fd->fd_ofiles[i] != NULL) { continue; + } /* * Do the kernel equivalent of * - * if i == 0 - * (void) open("/dev/null", O_RDONLY); - * else - * (void) open("/dev/null", O_WRONLY); + * if i == 0 + * (void) open("/dev/null", O_RDONLY); + * else + * (void) open("/dev/null", O_WRONLY); */ struct fileproc *fp; @@ -4878,14 +5002,16 @@ handle_mac_transition: int flag; struct nameidata *ndp = NULL; - if (i == 0) + if (i == 0) { flag = FREAD; - else + } else { flag = FWRITE; + } if ((error = falloc(p, - &fp, &indx, imgp->ip_vfs_context)) != 0) + &fp, &indx, imgp->ip_vfs_context)) != 0) { continue; + } MALLOC(ndp, struct nameidata *, sizeof(*ndp), M_TEMP, M_WAITOK | M_ZERO); if (ndp == NULL) { @@ -4936,7 +5062,7 @@ handle_mac_transition: } } -#endif /* CONFIG_MACF */ +#endif /* CONFIG_MACF */ /* * Implement the semantic where the effective user and group become @@ -4948,7 +5074,7 @@ handle_mac_transition: */ for (;;) { my_cred = kauth_cred_proc_ref(p); - my_new_cred = kauth_cred_setsvuidgid(my_cred, kauth_cred_getuid(my_cred), kauth_cred_getgid(my_cred)); + my_new_cred = kauth_cred_setsvuidgid(my_cred, kauth_cred_getuid(my_cred), kauth_cred_getgid(my_cred)); if (my_new_cred == my_cred) { kauth_cred_unref(&my_cred); @@ -4977,7 +5103,7 @@ handle_mac_transition: /* Update the process' identity version and set the security token */ - p->p_idversion++; + p->p_idversion = OSIncrementAtomic(&nextpidversion); if (imgp->ip_new_thread != NULL) { task = get_threadtask(imgp->ip_new_thread); @@ -4986,7 +5112,7 @@ handle_mac_transition: } set_security_token_task_internal(p, task); - return(error); + return error; } @@ -5008,17 +5134,20 @@ handle_mac_transition: * !KERN_SUCCESS Mach failure code */ static kern_return_t -create_unix_stack(vm_map_t map, load_result_t* load_result, - proc_t p) +create_unix_stack(vm_map_t map, load_result_t* load_result, + proc_t p) { - mach_vm_size_t size, prot_size; - mach_vm_offset_t addr, prot_addr; - kern_return_t kr; + mach_vm_size_t size, prot_size; + mach_vm_offset_t addr, prot_addr; + kern_return_t kr; + + mach_vm_address_t user_stack = load_result->user_stack; - mach_vm_address_t user_stack = load_result->user_stack; - proc_lock(p); p->user_stack = user_stack; + if (load_result->custom_stack) { + p->p_lflag |= P_LCUSTOM_STACK; + } proc_unlock(p); if (load_result->user_stack_alloc_size > 0) { @@ -5034,12 +5163,12 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, } addr = mach_vm_trunc_page(load_result->user_stack - size); kr = mach_vm_allocate_kernel(map, &addr, size, - VM_FLAGS_FIXED, VM_MEMORY_STACK); + VM_FLAGS_FIXED, VM_MEMORY_STACK); if (kr != KERN_SUCCESS) { // Can't allocate at default location, try anywhere addr = 0; kr = mach_vm_allocate_kernel(map, &addr, size, - VM_FLAGS_ANYWHERE, VM_MEMORY_STACK); + VM_FLAGS_ANYWHERE, VM_MEMORY_STACK); if (kr != KERN_SUCCESS) { return kr; } @@ -5059,7 +5188,9 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, * size limit for this process. */ if (load_result->user_stack_size == 0) { + proc_list_lock(); load_result->user_stack_size = unix_stack_size(p); + proc_list_unlock(); prot_size = mach_vm_trunc_page(size - load_result->user_stack_size); } else { prot_size = PAGE_SIZE; @@ -5067,10 +5198,10 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, prot_addr = addr; kr = mach_vm_protect(map, - prot_addr, - prot_size, - FALSE, - VM_PROT_NONE); + prot_addr, + prot_size, + FALSE, + VM_PROT_NONE); if (kr != KERN_SUCCESS) { (void)mach_vm_deallocate(map, addr, size); return kr; @@ -5093,7 +5224,7 @@ create_unix_stack(vm_map_t map, load_result_t* load_result, * path NULL terminated path * * Returns: KERN_SUCCESS Success - * !KERN_SUCCESS See execve/mac_execve for error codes + * !KERN_SUCCESS See execve/mac_execve for error codes * * Notes: The process that is passed in is the first manufactured * process on the system, and gets here via bsd_ast() firing @@ -5125,8 +5256,9 @@ load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path) size_t path_length = strlen(path) + 1; argv0 = scratch_addr; error = copyout(path, argv0, path_length); - if (error) + if (error) { return error; + } scratch_addr = USER_ADDR_ALIGN(scratch_addr + path_length, sizeof(user_addr_t)); @@ -5136,12 +5268,13 @@ load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path) */ if (boothowto & RB_SINGLE) { const char *init_args = "-s"; - size_t init_args_length = strlen(init_args)+1; + size_t init_args_length = strlen(init_args) + 1; argv1 = scratch_addr; error = copyout(init_args, argv1, init_args_length); - if (error) + if (error) { return error; + } scratch_addr = USER_ADDR_ALIGN(scratch_addr + init_args_length, sizeof(user_addr_t)); } @@ -5154,8 +5287,9 @@ load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path) argv64bit[2] = USER_ADDR_NULL; error = copyout(argv64bit, scratch_addr, sizeof(argv64bit)); - if (error) + if (error) { return error; + } } else { user32_addr_t argv32bit[3] = {}; @@ -5164,8 +5298,9 @@ load_init_program_at_path(proc_t p, user_addr_t scratch_addr, const char* path) argv32bit[2] = USER_ADDR_NULL; error = copyout(argv32bit, scratch_addr, sizeof(argv32bit)); - if (error) + if (error) { return error; + } } /* @@ -5242,13 +5377,14 @@ load_init_program(proc_t p) if (PE_parse_boot_argn("launchdsuffix", launchd_suffix, sizeof(launchd_suffix))) { char launchd_path[128]; boolean_t is_release_suffix = ((launchd_suffix[0] == 0) || - (strcmp(launchd_suffix, "release") == 0)); + (strcmp(launchd_suffix, "release") == 0)); if (is_release_suffix) { printf("load_init_program: attempting to load /sbin/launchd\n"); error = load_init_program_at_path(p, (user_addr_t)scratch_addr, "/sbin/launchd"); - if (!error) + if (!error) { return; + } panic("Process 1 exec of launchd.release failed, errno %d", error); } else { @@ -5267,7 +5403,7 @@ load_init_program(proc_t p) #endif error = ENOENT; - for (i = 0; i < sizeof(init_programs)/sizeof(init_programs[0]); i++) { + for (i = 0; i < sizeof(init_programs) / sizeof(init_programs[0]); i++) { printf("load_init_program: attempting to load %s\n", init_programs[i]); error = load_init_program_at_path(p, (user_addr_t)scratch_addr, init_programs[i]); if (!error) { @@ -5277,7 +5413,7 @@ load_init_program(proc_t p) } } - panic("Process 1 exec of %s failed, errno %d", ((i == 0) ? "" : init_programs[i-1]), error); + panic("Process 1 exec of %s failed, errno %d", ((i == 0) ? "" : init_programs[i - 1]), error); } /* @@ -5374,29 +5510,34 @@ static int execargs_waiters = 0; lck_mtx_t *execargs_cache_lock; static void -execargs_lock_lock(void) { +execargs_lock_lock(void) +{ lck_mtx_lock_spin(execargs_cache_lock); } static void -execargs_lock_unlock(void) { +execargs_lock_unlock(void) +{ lck_mtx_unlock(execargs_cache_lock); } static wait_result_t -execargs_lock_sleep(void) { - return(lck_mtx_sleep(execargs_cache_lock, LCK_SLEEP_DEFAULT, &execargs_free_count, THREAD_INTERRUPTIBLE)); +execargs_lock_sleep(void) +{ + return lck_mtx_sleep(execargs_cache_lock, LCK_SLEEP_DEFAULT, &execargs_free_count, THREAD_INTERRUPTIBLE); } static kern_return_t -execargs_purgeable_allocate(char **execarg_address) { +execargs_purgeable_allocate(char **execarg_address) +{ kern_return_t kr = vm_allocate_kernel(bsd_pageable_map, (vm_offset_t *)execarg_address, BSD_PAGEABLE_SIZE_PER_EXEC, VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE, VM_KERN_MEMORY_NONE); assert(kr == KERN_SUCCESS); return kr; } static kern_return_t -execargs_purgeable_reference(void *execarg_address) { +execargs_purgeable_reference(void *execarg_address) +{ int state = VM_PURGABLE_NONVOLATILE; kern_return_t kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state); @@ -5405,7 +5546,8 @@ execargs_purgeable_reference(void *execarg_address) { } static kern_return_t -execargs_purgeable_volatilize(void *execarg_address) { +execargs_purgeable_volatilize(void *execarg_address) +{ int state = VM_PURGABLE_VOLATILE | VM_PURGABLE_ORDERING_OBSOLETE; kern_return_t kr; kr = vm_purgable_control(bsd_pageable_map, (vm_offset_t) execarg_address, VM_PURGABLE_SET_STATE, &state); @@ -5416,7 +5558,8 @@ execargs_purgeable_volatilize(void *execarg_address) { } static void -execargs_wakeup_waiters(void) { +execargs_wakeup_waiters(void) +{ thread_wakeup(&execargs_free_count); } @@ -5435,7 +5578,7 @@ execargs_alloc(struct image_params *imgp) execargs_waiters--; if (res != THREAD_AWAKENED) { execargs_lock_unlock(); - return (EINTR); + return EINTR; } } @@ -5454,25 +5597,25 @@ execargs_alloc(struct image_params *imgp) assert(execargs_free_count >= 0); execargs_lock_unlock(); - + if (cache_index == -1) { kret = execargs_purgeable_allocate(&imgp->ip_strings); - } - else + } else { kret = execargs_purgeable_reference(imgp->ip_strings); + } assert(kret == KERN_SUCCESS); if (kret != KERN_SUCCESS) { - return (ENOMEM); + return ENOMEM; } /* last page used to read in file headers */ - imgp->ip_vdata = imgp->ip_strings + ( NCARGS + PAGE_SIZE ); + imgp->ip_vdata = imgp->ip_strings + (NCARGS + PAGE_SIZE); imgp->ip_strendp = imgp->ip_strings; imgp->ip_argspace = NCARGS; - imgp->ip_strspace = ( NCARGS + PAGE_SIZE ); + imgp->ip_strspace = (NCARGS + PAGE_SIZE); - return (0); + return 0; } /* @@ -5494,7 +5637,7 @@ execargs_free(struct image_params *imgp) kern_return_t kret; int i; boolean_t needs_wakeup = FALSE; - + kret = execargs_purgeable_volatilize(imgp->ip_strings); execargs_lock_lock(); @@ -5511,15 +5654,17 @@ execargs_free(struct image_params *imgp) assert(imgp->ip_strings == NULL); - if (execargs_waiters > 0) + if (execargs_waiters > 0) { needs_wakeup = TRUE; - + } + execargs_lock_unlock(); - if (needs_wakeup == TRUE) + if (needs_wakeup == TRUE) { execargs_wakeup_waiters(); + } - return ((kret == KERN_SUCCESS ? 0 : EINVAL)); + return kret == KERN_SUCCESS ? 0 : EINVAL; } static void @@ -5533,8 +5678,9 @@ exec_resettextvp(proc_t p, struct image_params *imgp) vp = imgp->ip_vp; offset = imgp->ip_arch_offset; - if (vp == NULLVP) + if (vp == NULLVP) { panic("exec_resettextvp: expected valid vp"); + } ret = vnode_ref(vp); proc_lock(p); @@ -5542,46 +5688,47 @@ exec_resettextvp(proc_t p, struct image_params *imgp) p->p_textvp = vp; p->p_textoff = offset; } else { - p->p_textvp = NULLVP; /* this is paranoia */ + p->p_textvp = NULLVP; /* this is paranoia */ p->p_textoff = 0; } proc_unlock(p); - if ( tvp != NULLVP) { + if (tvp != NULLVP) { if (vnode_getwithref(tvp) == 0) { vnode_rele(tvp); vnode_put(tvp); } - } - + } } // Includes the 0-byte (therefore "SIZE" instead of "LEN"). static const size_t CS_CDHASH_STRING_SIZE = CS_CDHASH_LEN * 2 + 1; -static void cdhash_to_string(char str[CS_CDHASH_STRING_SIZE], uint8_t const * const cdhash) { - static char const nibble[] = "0123456789abcdef"; - - /* Apparently still the safest way to get a hex representation - * of binary data. - * xnu's printf routines have %*D/%20D in theory, but "not really", see: - * confusion around %*D/%nD in printf - */ - for (int i = 0; i < CS_CDHASH_LEN; ++i) { - str[i*2] = nibble[(cdhash[i] & 0xf0) >> 4]; - str[i*2+1] = nibble[cdhash[i] & 0x0f]; - } - str[CS_CDHASH_STRING_SIZE - 1] = 0; +static void +cdhash_to_string(char str[CS_CDHASH_STRING_SIZE], uint8_t const * const cdhash) +{ + static char const nibble[] = "0123456789abcdef"; + + /* Apparently still the safest way to get a hex representation + * of binary data. + * xnu's printf routines have %*D/%20D in theory, but "not really", see: + * confusion around %*D/%nD in printf + */ + for (int i = 0; i < CS_CDHASH_LEN; ++i) { + str[i * 2] = nibble[(cdhash[i] & 0xf0) >> 4]; + str[i * 2 + 1] = nibble[cdhash[i] & 0x0f]; + } + str[CS_CDHASH_STRING_SIZE - 1] = 0; } /* * __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__ - * + * * Description: Waits for the userspace daemon to respond to the request - * we made. Function declared non inline to be visible in + * we made. Function declared non inline to be visible in * stackshots and spindumps as well as debugging. */ -__attribute__((noinline)) int +__attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL__(mach_port_t task_access_port, int32_t new_pid) { return find_code_signature(task_access_port, new_pid); @@ -5610,7 +5757,7 @@ check_for_signature(proc_t p, struct image_params *imgp) proc_unlock(p); /* Set the switch_protect flag on the map */ - if(p->p_csflags & (CS_HARD|CS_KILL)) { + if (p->p_csflags & (CS_HARD | CS_KILL)) { vm_map_switch_protect(get_task_map(p->task), TRUE); } @@ -5620,9 +5767,8 @@ check_for_signature(proc_t p, struct image_params *imgp) * approve of exec, kill and return immediately. */ if (imgp->ip_mac_return != 0) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY, 0, 0); signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_SECURITY_POLICY); error = imgp->ip_mac_return; unexpected_failure = TRUE; @@ -5637,22 +5783,22 @@ check_for_signature(proc_t p, struct image_params *imgp) } /* If the code signature came through the image activation path, we skip the - * taskgated / externally attached path. */ + * taskgated / externally attached path. */ if (imgp->ip_csflags & CS_SIGNED) { error = 0; goto done; } - /* The rest of the code is for signatures that either already have been externally - * attached (likely, but not necessarily by a previous run through the taskgated - * path), or that will now be attached by taskgated. */ - + /* The rest of the code is for signatures that either already have been externally + * attached (likely, but not necessarily by a previous run through the taskgated + * path), or that will now be attached by taskgated. */ + kr = task_get_task_access_port(p->task, &port); if (KERN_SUCCESS != kr || !IPC_PORT_VALID(port)) { error = 0; if (require_success) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT, 0, 0); + p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT, 0, 0); signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASK_ACCESS_PORT); error = EACCES; } @@ -5661,9 +5807,9 @@ check_for_signature(proc_t p, struct image_params *imgp) /* * taskgated returns KERN_SUCCESS if it has completed its work - * and the exec should continue, KERN_FAILURE if the exec should - * fail, or it may error out with different error code in an - * event of mig failure (e.g. process was signalled during the + * and the exec should continue, KERN_FAILURE if the exec should + * fail, or it may error out with different error code in an + * event of mig failure (e.g. process was signalled during the * rpc call, taskgated died, mig server died etc.). */ @@ -5676,14 +5822,14 @@ check_for_signature(proc_t p, struct image_params *imgp) error = EACCES; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG, 0, 0); + p->p_pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG, 0, 0); signature_failure_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG); goto done; default: error = EACCES; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER, 0, 0); signature_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_TASKGATED_OTHER); unexpected_failure = TRUE; goto done; @@ -5699,34 +5845,31 @@ check_for_signature(proc_t p, struct image_params *imgp) * Adhoc signed non-platform binaries without special cs_flags and without any * entitlements (unrestricted ones still pass AMFI). */ if ( - /* Revalidate the blob if necessary through bumped generation count. */ - (ubc_cs_generation_check(p->p_textvp) == 0 || - ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0) == 0) && - /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */ + /* Revalidate the blob if necessary through bumped generation count. */ + (ubc_cs_generation_check(p->p_textvp) == 0 || + ubc_cs_blob_revalidate(p->p_textvp, csb, imgp, 0) == 0) && + /* Only CS_ADHOC, no CS_KILL, CS_HARD etc. */ (csb->csb_flags & CS_ALLOWED_MACHO) == CS_ADHOC && /* If it has a CMS blob, it's not adhoc. The CS_ADHOC flag can lie. */ csblob_find_blob_bytes((const uint8_t *)csb->csb_mem_kaddr, csb->csb_mem_size, - CSSLOT_SIGNATURESLOT, - CSMAGIC_BLOBWRAPPER) == NULL && + CSSLOT_SIGNATURESLOT, + CSMAGIC_BLOBWRAPPER) == NULL && /* It could still be in a trust cache (unlikely with CS_ADHOC), or a magic path. */ csb->csb_platform_binary == 0 && /* No entitlements, not even unrestricted ones. */ - csb->csb_entitlements_blob == NULL) { - + csb->csb_entitlements_blob == NULL) { proc_lock(p); p->p_csflags |= CS_SIGNED | CS_VALID; proc_unlock(p); - } else { uint8_t cdhash[CS_CDHASH_LEN]; char cdhash_string[CS_CDHASH_STRING_SIZE]; proc_getcdhash(p, cdhash); cdhash_to_string(cdhash_string, cdhash); printf("ignoring detached code signature on '%s' with cdhash '%s' " - "because it is invalid, or not a simple adhoc signature.\n", - p->p_name, cdhash_string); + "because it is invalid, or not a simple adhoc signature.\n", + p->p_name, cdhash_string); } - } } @@ -5742,16 +5885,17 @@ done: cdhash_to_string(cdhash_string, cdhash); os_log(peLog, "CS Platform Exec Logging: Executing platform signed binary " - "'%s' with cdhash %s\n", p->p_name, cdhash_string); + "'%s' with cdhash %s\n", p->p_name, cdhash_string); } } else { - if (!unexpected_failure) + if (!unexpected_failure) { p->p_csflags |= CS_KILLED; + } /* make very sure execution fails */ if (vfexec || spawn) { assert(signature_failure_reason != OS_REASON_NULL); psignal_vfork_with_reason(p, p->task, imgp->ip_new_thread, - SIGKILL, signature_failure_reason); + SIGKILL, signature_failure_reason); signature_failure_reason = OS_REASON_NULL; error = 0; } else { @@ -5776,7 +5920,8 @@ done: * in the process' page tables, we prefault some pages if * possible. Errors are non-fatal. */ -static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result) +static void +exec_prefault_data(proc_t p __unused, struct image_params *imgp, load_result_t *load_result) { int ret; size_t expected_all_image_infos_size; @@ -5785,12 +5930,12 @@ static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, loa * Prefault executable or dyld entry point. */ vm_fault(current_map(), - vm_map_trunc_page(load_result->entry_point, - vm_map_page_mask(current_map())), - VM_PROT_READ | VM_PROT_EXECUTE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); - + vm_map_trunc_page(load_result->entry_point, + vm_map_page_mask(current_map())), + VM_PROT_READ | VM_PROT_EXECUTE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); + if (imgp->ip_flags & IMGPF_IS_64BIT_ADDR) { expected_all_image_infos_size = sizeof(struct user64_dyld_all_image_infos); } else { @@ -5799,11 +5944,11 @@ static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, loa /* Decode dyld anchor structure from */ if (load_result->dynlinker && - load_result->all_image_info_addr && - load_result->all_image_info_size >= expected_all_image_infos_size) { + load_result->all_image_info_addr && + load_result->all_image_info_size >= expected_all_image_infos_size) { union { - struct user64_dyld_all_image_infos infos64; - struct user32_dyld_all_image_infos infos32; + struct user64_dyld_all_image_infos infos64; + struct user32_dyld_all_image_infos infos32; } all_image_infos; /* @@ -5811,26 +5956,25 @@ static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, loa * and recovery path. */ vm_fault(current_map(), - vm_map_trunc_page(load_result->all_image_info_addr, - vm_map_page_mask(current_map())), - VM_PROT_READ | VM_PROT_WRITE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_map_trunc_page(load_result->all_image_info_addr, + vm_map_page_mask(current_map())), + VM_PROT_READ | VM_PROT_WRITE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); if ((load_result->all_image_info_addr & PAGE_MASK) + expected_all_image_infos_size > PAGE_SIZE) { /* all_image_infos straddles a page */ vm_fault(current_map(), - vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1, - vm_map_page_mask(current_map())), - VM_PROT_READ | VM_PROT_WRITE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_map_trunc_page(load_result->all_image_info_addr + expected_all_image_infos_size - 1, + vm_map_page_mask(current_map())), + VM_PROT_READ | VM_PROT_WRITE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); } ret = copyin(load_result->all_image_info_addr, - &all_image_infos, - expected_all_image_infos_size); + &all_image_infos, + expected_all_image_infos_size); if (ret == 0 && all_image_infos.infos32.version >= DYLD_ALL_IMAGE_INFOS_ADDRESS_MINIMUM_VERSION) { - user_addr_t notification_address; user_addr_t dyld_image_address; user_addr_t dyld_version_address; @@ -5866,38 +6010,38 @@ static void exec_prefault_data(proc_t p __unused, struct image_params *imgp, loa #if 0 kprintf("exec_prefault: 0x%016llx 0x%08x 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", - (uint64_t)load_result->all_image_info_addr, - all_image_infos.infos32.version, - (uint64_t)notification_address, - (uint64_t)dyld_image_address, - (uint64_t)dyld_version_address, - (uint64_t)dyld_all_image_infos_address); + (uint64_t)load_result->all_image_info_addr, + all_image_infos.infos32.version, + (uint64_t)notification_address, + (uint64_t)dyld_image_address, + (uint64_t)dyld_version_address, + (uint64_t)dyld_all_image_infos_address); #endif vm_fault(current_map(), - vm_map_trunc_page(notification_address + dyld_slide_amount, - vm_map_page_mask(current_map())), - VM_PROT_READ | VM_PROT_EXECUTE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_map_trunc_page(notification_address + dyld_slide_amount, + vm_map_page_mask(current_map())), + VM_PROT_READ | VM_PROT_EXECUTE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); vm_fault(current_map(), - vm_map_trunc_page(dyld_image_address + dyld_slide_amount, - vm_map_page_mask(current_map())), - VM_PROT_READ | VM_PROT_EXECUTE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_map_trunc_page(dyld_image_address + dyld_slide_amount, + vm_map_page_mask(current_map())), + VM_PROT_READ | VM_PROT_EXECUTE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); vm_fault(current_map(), - vm_map_trunc_page(dyld_version_address + dyld_slide_amount, - vm_map_page_mask(current_map())), - VM_PROT_READ, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_map_trunc_page(dyld_version_address + dyld_slide_amount, + vm_map_page_mask(current_map())), + VM_PROT_READ, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); vm_fault(current_map(), - vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount, - vm_map_page_mask(current_map())), - VM_PROT_READ | VM_PROT_WRITE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_map_trunc_page(dyld_all_image_infos_address + dyld_slide_amount, + vm_map_page_mask(current_map())), + VM_PROT_READ | VM_PROT_WRITE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); } } } diff --git a/bsd/kern/kern_exit.c b/bsd/kern/kern_exit.c index edffa1854..e958587b4 100644 --- a/bsd/kern/kern_exit.c +++ b/bsd/kern/kern_exit.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -71,7 +71,7 @@ * is included in support of clause 2.2 (b) of the Apple Public License, * Version 2.0. */ - + #include #include #include @@ -133,7 +133,7 @@ #include #if SYSV_SHM -#include /* shmexit */ +#include /* shmexit */ #endif /* SYSV_SHM */ #if CONFIG_PERSONAS #include @@ -154,23 +154,23 @@ void dtrace_proc_exit(proc_t p); void proc_prepareexit(proc_t p, int rv, boolean_t perf_notify); void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, - mach_exception_data_type_t code, mach_exception_data_type_t subcode, - uint64_t *udata_buffer, int num_udata, void *reason); + mach_exception_data_type_t code, mach_exception_data_type_t subcode, + uint64_t *udata_buffer, int num_udata, void *reason); mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p); void vfork_exit(proc_t p, int rv); __private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p); __private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p); static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock); static void populate_corpse_crashinfo(proc_t p, task_t corpse_task, - struct rusage_superset *rup, mach_exception_data_type_t code, - mach_exception_data_type_t subcode, uint64_t *udata_buffer, - int num_udata, os_reason_t reason); + struct rusage_superset *rup, mach_exception_data_type_t code, + mach_exception_data_type_t subcode, uint64_t *udata_buffer, + int num_udata, os_reason_t reason); static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode); extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, char *buffer, uint32_t buffersize, int32_t *retval); static __attribute__((noinline)) void launchd_crashed_panic(proc_t p, int rv); extern void proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo); extern void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]); -extern uint64_t get_task_phys_footprint_limit(task_t); +extern uint64_t get_task_phys_footprint_limit(task_t); int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size); extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task); @@ -178,14 +178,14 @@ extern uint64_t task_corpse_get_crashed_thread_id(task_t corpse_task); /* * Things which should have prototypes in headers, but don't */ -void proc_exit(proc_t p); -int wait1continue(int result); -int waitidcontinue(int result); +void proc_exit(proc_t p); +int wait1continue(int result); +int waitidcontinue(int result); kern_return_t sys_perf_notify(thread_t thread, int pid); kern_return_t task_exception_notify(exception_type_t exception, - mach_exception_data_type_t code, mach_exception_data_type_t subcode); + mach_exception_data_type_t code, mach_exception_data_type_t subcode); kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); -void delay(int); +void delay(int); void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor); /* @@ -195,31 +195,31 @@ void gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor); void siginfo_user_to_user32(user_siginfo_t *in, user32_siginfo_t *out) { - out->si_signo = in->si_signo; - out->si_errno = in->si_errno; - out->si_code = in->si_code; - out->si_pid = in->si_pid; - out->si_uid = in->si_uid; - out->si_status = in->si_status; - out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_addr); + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_addr); /* following cast works for sival_int because of padding */ - out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t,in->si_value.sival_ptr); - out->si_band = in->si_band; /* range reduction */ + out->si_value.sival_ptr = CAST_DOWN_EXPLICIT(user32_addr_t, in->si_value.sival_ptr); + out->si_band = in->si_band; /* range reduction */ } void siginfo_user_to_user64(user_siginfo_t *in, user64_siginfo_t *out) { - out->si_signo = in->si_signo; - out->si_errno = in->si_errno; - out->si_code = in->si_code; - out->si_pid = in->si_pid; - out->si_uid = in->si_uid; - out->si_status = in->si_status; - out->si_addr = in->si_addr; + out->si_signo = in->si_signo; + out->si_errno = in->si_errno; + out->si_code = in->si_code; + out->si_pid = in->si_pid; + out->si_uid = in->si_uid; + out->si_status = in->si_status; + out->si_addr = in->si_addr; /* following cast works for sival_int because of padding */ - out->si_value.sival_ptr = in->si_value.sival_ptr; - out->si_band = in->si_band; /* range reduction */ + out->si_value.sival_ptr = in->si_value.sival_ptr; + out->si_band = in->si_band; /* range reduction */ } static int @@ -228,31 +228,33 @@ copyoutsiginfo(user_siginfo_t *native, boolean_t is64, user_addr_t uaddr) if (is64) { user64_siginfo_t sinfo64; - bzero(&sinfo64, sizeof (sinfo64)); + bzero(&sinfo64, sizeof(sinfo64)); siginfo_user_to_user64(native, &sinfo64); - return (copyout(&sinfo64, uaddr, sizeof (sinfo64))); + return copyout(&sinfo64, uaddr, sizeof(sinfo64)); } else { user32_siginfo_t sinfo32; - bzero(&sinfo32, sizeof (sinfo32)); + bzero(&sinfo32, sizeof(sinfo32)); siginfo_user_to_user32(native, &sinfo32); - return (copyout(&sinfo32, uaddr, sizeof (sinfo32))); + return copyout(&sinfo32, uaddr, sizeof(sinfo32)); } } -void gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, - mach_exception_data_type_t code, mach_exception_data_type_t subcode, - uint64_t *udata_buffer, int num_udata, void *reason) +void +gather_populate_corpse_crashinfo(proc_t p, task_t corpse_task, + mach_exception_data_type_t code, mach_exception_data_type_t subcode, + uint64_t *udata_buffer, int num_udata, void *reason) { struct rusage_superset rup; gather_rusage_info(p, &rup.ri, RUSAGE_INFO_CURRENT); rup.ri.ri_phys_footprint = 0; populate_corpse_crashinfo(p, corpse_task, &rup, code, subcode, - udata_buffer, num_udata, reason); + udata_buffer, num_udata, reason); } -static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode) +static void +proc_update_corpse_exception_codes(proc_t p, mach_exception_data_type_t *code, mach_exception_data_type_t *subcode) { mach_exception_data_type_t code_update = *code; mach_exception_data_type_t subcode_update = *subcode; @@ -261,19 +263,19 @@ static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_typ } switch (p->p_exit_reason->osr_namespace) { - case OS_REASON_JETSAM: - if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) { - /* Update the code with EXC_RESOURCE code for high memory watermark */ - EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY); - EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK); - EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(p->task)) >> 20)); - subcode_update = 0; - break; - } - - break; - default: + case OS_REASON_JETSAM: + if (p->p_exit_reason->osr_code == JETSAM_REASON_MEMORY_PERPROCESSLIMIT) { + /* Update the code with EXC_RESOURCE code for high memory watermark */ + EXC_RESOURCE_ENCODE_TYPE(code_update, RESOURCE_TYPE_MEMORY); + EXC_RESOURCE_ENCODE_FLAVOR(code_update, FLAVOR_HIGH_WATERMARK); + EXC_RESOURCE_HWM_ENCODE_LIMIT(code_update, ((get_task_phys_footprint_limit(p->task)) >> 20)); + subcode_update = 0; break; + } + + break; + default: + break; } *code = code_update; @@ -281,7 +283,8 @@ static void proc_update_corpse_exception_codes(proc_t p, mach_exception_data_typ return; } -mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p) +mach_exception_data_type_t +proc_encode_exit_exception_code(proc_t p) { uint64_t subcode = 0; @@ -297,8 +300,8 @@ mach_exception_data_type_t proc_encode_exit_exception_code(proc_t p) static void populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset *rup, - mach_exception_data_type_t code, mach_exception_data_type_t subcode, - uint64_t *udata_buffer, int num_udata, os_reason_t reason) + mach_exception_data_type_t code, mach_exception_data_type_t subcode, + uint64_t *udata_buffer, int num_udata, os_reason_t reason) { mach_vm_address_t uaddr = 0; mach_exception_data_type_t exc_codes[EXCEPTION_CODE_MAX]; @@ -313,19 +316,19 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * uint64_t max_footprint_mb; uint64_t max_footprint; - uint64_t ledger_internal; - uint64_t ledger_internal_compressed; - uint64_t ledger_iokit_mapped; - uint64_t ledger_alternate_accounting; - uint64_t ledger_alternate_accounting_compressed; - uint64_t ledger_purgeable_nonvolatile; - uint64_t ledger_purgeable_nonvolatile_compressed; - uint64_t ledger_page_table; - uint64_t ledger_phys_footprint; - uint64_t ledger_phys_footprint_lifetime_max; - uint64_t ledger_network_nonvolatile; - uint64_t ledger_network_nonvolatile_compressed; - uint64_t ledger_wired_mem; + uint64_t ledger_internal; + uint64_t ledger_internal_compressed; + uint64_t ledger_iokit_mapped; + uint64_t ledger_alternate_accounting; + uint64_t ledger_alternate_accounting_compressed; + uint64_t ledger_purgeable_nonvolatile; + uint64_t ledger_purgeable_nonvolatile_compressed; + uint64_t ledger_page_table; + uint64_t ledger_phys_footprint; + uint64_t ledger_phys_footprint_lifetime_max; + uint64_t ledger_network_nonvolatile; + uint64_t ledger_network_nonvolatile_compressed; + uint64_t ledger_wired_mem; void *crash_info_ptr = task_get_corpseinfo(corpse_task); @@ -414,8 +417,9 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * } cputype = cpu_type() & ~CPU_ARCH_MASK; - if (IS_64BIT_PROCESS(p)) + if (IS_64BIT_PROCESS(p)) { cputype |= CPU_ARCH_ABI64; + } if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_CPUTYPE, sizeof(cpu_type_t), &uaddr)) { kcdata_memcpy(crash_info_ptr, uaddr, &cputype, sizeof(cpu_type_t)); @@ -427,71 +431,71 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * kcdata_memcpy(crash_info_ptr, uaddr, &max_footprint_mb, sizeof(max_footprint_mb)); } - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) { - ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(p->task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max)); - } - - // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) { - ledger_internal = get_task_internal(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) { - ledger_internal_compressed = get_task_internal_compressed(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) { - ledger_iokit_mapped = get_task_iokit_mapped(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) { - ledger_alternate_accounting = get_task_alternate_accounting(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) { - ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) { - ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) { - ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) { - ledger_page_table = get_task_page_table(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) { - ledger_phys_footprint = get_task_phys_footprint(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) { - ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) { - ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed)); - } - - if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) { - ledger_wired_mem = get_task_wired_mem(corpse_task); - kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem)); - } + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT_LIFETIME_MAX, sizeof(ledger_phys_footprint_lifetime_max), &uaddr)) { + ledger_phys_footprint_lifetime_max = get_task_phys_footprint_lifetime_max(p->task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint_lifetime_max, sizeof(ledger_phys_footprint_lifetime_max)); + } + + // In the forking case, the current ledger info is copied into the corpse while the original task is suspended for consistency + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL, sizeof(ledger_internal), &uaddr)) { + ledger_internal = get_task_internal(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal, sizeof(ledger_internal)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_INTERNAL_COMPRESSED, sizeof(ledger_internal_compressed), &uaddr)) { + ledger_internal_compressed = get_task_internal_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_internal_compressed, sizeof(ledger_internal_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_IOKIT_MAPPED, sizeof(ledger_iokit_mapped), &uaddr)) { + ledger_iokit_mapped = get_task_iokit_mapped(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_iokit_mapped, sizeof(ledger_iokit_mapped)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING, sizeof(ledger_alternate_accounting), &uaddr)) { + ledger_alternate_accounting = get_task_alternate_accounting(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting, sizeof(ledger_alternate_accounting)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_ALTERNATE_ACCOUNTING_COMPRESSED, sizeof(ledger_alternate_accounting_compressed), &uaddr)) { + ledger_alternate_accounting_compressed = get_task_alternate_accounting_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_alternate_accounting_compressed, sizeof(ledger_alternate_accounting_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE, sizeof(ledger_purgeable_nonvolatile), &uaddr)) { + ledger_purgeable_nonvolatile = get_task_purgeable_nonvolatile(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile, sizeof(ledger_purgeable_nonvolatile)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PURGEABLE_NONVOLATILE_COMPRESSED, sizeof(ledger_purgeable_nonvolatile_compressed), &uaddr)) { + ledger_purgeable_nonvolatile_compressed = get_task_purgeable_nonvolatile_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_purgeable_nonvolatile_compressed, sizeof(ledger_purgeable_nonvolatile_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PAGE_TABLE, sizeof(ledger_page_table), &uaddr)) { + ledger_page_table = get_task_page_table(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_page_table, sizeof(ledger_page_table)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_PHYS_FOOTPRINT, sizeof(ledger_phys_footprint), &uaddr)) { + ledger_phys_footprint = get_task_phys_footprint(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_phys_footprint, sizeof(ledger_phys_footprint)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE, sizeof(ledger_network_nonvolatile), &uaddr)) { + ledger_network_nonvolatile = get_task_network_nonvolatile(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile, sizeof(ledger_network_nonvolatile)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_NETWORK_NONVOLATILE_COMPRESSED, sizeof(ledger_network_nonvolatile_compressed), &uaddr)) { + ledger_network_nonvolatile_compressed = get_task_network_nonvolatile_compressed(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_network_nonvolatile_compressed, sizeof(ledger_network_nonvolatile_compressed)); + } + + if (KERN_SUCCESS == kcdata_get_memory_addr(crash_info_ptr, TASK_CRASHINFO_LEDGER_WIRED_MEM, sizeof(ledger_wired_mem), &uaddr)) { + ledger_wired_mem = get_task_wired_mem(corpse_task); + kcdata_memcpy(crash_info_ptr, uaddr, &ledger_wired_mem, sizeof(ledger_wired_mem)); + } bzero(&pwqinfo, sizeof(struct proc_workqueueinfo)); retval = fill_procworkqueue(p, &pwqinfo); @@ -546,7 +550,7 @@ populate_corpse_crashinfo(proc_t p, task_t corpse_task, struct rusage_superset * if (num_udata > 0) { if (KERN_SUCCESS == kcdata_get_memory_addr_for_array(crash_info_ptr, TASK_CRASHINFO_UDATA_PTRS, - sizeof(uint64_t), num_udata, &uaddr)) { + sizeof(uint64_t), num_udata, &uaddr)) { kcdata_memcpy(crash_info_ptr, uaddr, udata_buffer, sizeof(uint64_t) * num_udata); } } @@ -564,7 +568,7 @@ launchd_exit_reason_get_string_desc(os_reason_t exit_reason) kcdata_iter_t iter; if (exit_reason == OS_REASON_NULL || exit_reason->osr_kcd_buf == NULL || - exit_reason->osr_bufsize == 0) { + exit_reason->osr_bufsize == 0) { return NULL; } @@ -579,7 +583,7 @@ launchd_exit_reason_get_string_desc(os_reason_t exit_reason) if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_OS_REASON) { #if DEBUG || DEVELOPMENT printf("launchd exit reason buffer type mismatch, expected %d got %d\n", - KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter)); + KCDATA_BUFFER_BEGIN_OS_REASON, kcdata_iter_type(iter)); #endif return NULL; } @@ -599,11 +603,11 @@ launchd_crashed_panic(proc_t p, int rv) if (p->p_exit_reason == OS_REASON_NULL) { printf("pid 1 exited -- no exit reason available -- (signal %d, exit %d)\n", - WTERMSIG(rv), WEXITSTATUS(rv)); + WTERMSIG(rv), WEXITSTATUS(rv)); } else { printf("pid 1 exited -- exit reason namespace %d subcode 0x%llx, description %s\n", - p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, launchd_exit_reason_desc ? - launchd_exit_reason_desc : "none"); + p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, launchd_exit_reason_desc ? + launchd_exit_reason_desc : "none"); } const char *launchd_crashed_prefix_str; @@ -640,10 +644,10 @@ launchd_crashed_panic(proc_t p, int rv) if (err != 0) { printf("Failed to generate initproc core file: error %d, took %d.%03d seconds\n", - err, (uint32_t)tv_sec, tv_msec); + err, (uint32_t)tv_sec, tv_msec); } else { printf("Generated initproc core file in %d.%03d seconds\n", - (uint32_t)tv_sec, tv_msec); + (uint32_t)tv_sec, tv_msec); } #endif /* (DEVELOPMENT || DEBUG) && CONFIG_COREDUMP */ @@ -651,12 +655,12 @@ launchd_crashed_panic(proc_t p, int rv) if (p->p_exit_reason == OS_REASON_NULL) { panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s -- no exit reason available -- (signal %d, exit status %d %s)", - launchd_crashed_prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((p->p_csflags & CS_KILLED) ? "CS_KILLED" : "")); + launchd_crashed_prefix_str, WTERMSIG(rv), WEXITSTATUS(rv), ((p->p_csflags & CS_KILLED) ? "CS_KILLED" : "")); } else { panic_with_options(0, NULL, DEBUGGER_OPTION_INITPROC_PANIC, "%s %s -- exit reason namespace %d subcode 0x%llx description: %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s", - ((p->p_csflags & CS_KILLED) ? "CS_KILLED" : ""), - launchd_crashed_prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, - launchd_exit_reason_desc ? launchd_exit_reason_desc : "none"); + ((p->p_csflags & CS_KILLED) ? "CS_KILLED" : ""), + launchd_crashed_prefix_str, p->p_exit_reason->osr_namespace, p->p_exit_reason->osr_code, + launchd_exit_reason_desc ? launchd_exit_reason_desc : "none"); } } @@ -666,36 +670,36 @@ launchd_crashed_panic(proc_t p, int rv) static int abort_with_payload_internal(proc_t p, - uint32_t reason_namespace, uint64_t reason_code, - user_addr_t payload, uint32_t payload_size, - user_addr_t reason_string, uint64_t reason_flags, - uint32_t internal_flags) + uint32_t reason_namespace, uint64_t reason_code, + user_addr_t payload, uint32_t payload_size, + user_addr_t reason_string, uint64_t reason_flags, + uint32_t internal_flags) { os_reason_t exit_reason = OS_REASON_NULL; kern_return_t kr = KERN_SUCCESS; if (internal_flags & OS_REASON_IFLAG_USER_FAULT) { uint32_t old_value = atomic_load_explicit(&p->p_user_faults, - memory_order_relaxed); + memory_order_relaxed); for (;;) { if (old_value >= OS_REASON_TOTAL_USER_FAULTS_PER_PROC) { return EQFULL; } // this reloads the value in old_value if (atomic_compare_exchange_strong_explicit(&p->p_user_faults, - &old_value, old_value + 1, memory_order_relaxed, - memory_order_relaxed)) { + &old_value, old_value + 1, memory_order_relaxed, + memory_order_relaxed)) { break; } } } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, reason_namespace, - reason_code, 0, 0); + p->p_pid, reason_namespace, + reason_code, 0, 0); exit_reason = build_userspace_exit_reason(reason_namespace, reason_code, - payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT); + payload, payload_size, reason_string, reason_flags | OS_REASON_FLAG_ABORT); if (internal_flags & OS_REASON_IFLAG_USER_FAULT) { mach_exception_code_t code = 0; @@ -733,22 +737,22 @@ abort_with_payload_internal(proc_t p, int abort_with_payload(struct proc *cur_proc, struct abort_with_payload_args *args, - __unused void *retval) + __unused void *retval) { abort_with_payload_internal(cur_proc, args->reason_namespace, - args->reason_code, args->payload, args->payload_size, - args->reason_string, args->reason_flags, 0); + args->reason_code, args->payload, args->payload_size, + args->reason_string, args->reason_flags, 0); return 0; } int os_fault_with_payload(struct proc *cur_proc, - struct os_fault_with_payload_args *args, __unused int *retval) + struct os_fault_with_payload_args *args, __unused int *retval) { return abort_with_payload_internal(cur_proc, args->reason_namespace, - args->reason_code, args->payload, args->payload_size, - args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT); + args->reason_code, args->payload, args->payload_size, + args->reason_string, args->reason_flags, OS_REASON_IFLAG_USER_FAULT); } @@ -765,8 +769,9 @@ exit(proc_t p, struct exit_args *uap, int *retval) thread_exception_return(); /* NOTREACHED */ - while (TRUE) + while (TRUE) { thread_block(THREAD_CONTINUE_NULL); + } /* NOTREACHED */ } @@ -783,7 +788,7 @@ exit1(proc_t p, int rv, int *retval) int exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify, - int jetsam_flags) + int jetsam_flags) { return exit_with_reason(p, rv, retval, thread_can_terminate, perf_notify, jetsam_flags, OS_REASON_NULL); } @@ -793,7 +798,7 @@ exit1_internal(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, bo */ int exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, boolean_t perf_notify, - int jetsam_flags, struct os_reason *exit_reason) + int jetsam_flags, struct os_reason *exit_reason) { thread_t self = current_thread(); struct task *task = p->task; @@ -806,31 +811,31 @@ exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, * right here. */ - ut = get_bsdthread_info(self); - if ((p == current_proc()) && - (ut->uu_flag & UT_VFORK)) { + ut = get_bsdthread_info(self); + if ((p == current_proc()) && + (ut->uu_flag & UT_VFORK)) { os_reason_free(exit_reason); if (!thread_can_terminate) { return EINVAL; } vfork_exit(p, rv); - vfork_return(p , retval, p->p_pid); + vfork_return(p, retval, p->p_pid); unix_syscall_return(0); /* NOT REACHED */ - } + } - /* + /* * The parameter list of audit_syscall_exit() was augmented to * take the Darwin syscall number as the first parameter, * which is currently required by mac_audit_postselect(). */ - /* + /* * The BSM token contains two components: an exit status as passed - * to exit(), and a return value to indicate what sort of exit it + * to exit(), and a return value to indicate what sort of exit it * was. The exit status is WEXITSTATUS(rv), but it's not clear - * what the return value is. + * what the return value is. */ AUDIT_ARG(exit, WEXITSTATUS(rv), 0); /* @@ -844,9 +849,9 @@ exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, /* mark process is going to exit and pull out of DBG/disk throttle */ /* TODO: This should be done after becoming exit thread */ proc_set_task_policy(p->task, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); + TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); - proc_lock(p); + proc_lock(p); error = proc_transstart(p, 1, (jetsam_flags ? 1 : 0)); if (error == EDEADLK) { /* @@ -856,10 +861,10 @@ exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, */ proc_unlock(p); os_reason_free(exit_reason); - if (current_proc() == p){ + if (current_proc() == p) { if (p->exit_thread == self) { printf("exit_thread failed to exit, leaving process %s[%d] in unkillable limbo\n", - p->p_comm, p->p_pid); + p->p_comm, p->p_pid); } if (thread_can_terminate) { @@ -877,10 +882,10 @@ exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, if (get_threadtask(self) != task) { proc_unlock(p); - return(0); - } + return 0; + } proc_unlock(p); - + thread_terminate(self); if (!thread_can_terminate) { return 0; @@ -894,8 +899,8 @@ exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, if (exit_reason != OS_REASON_NULL) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_COMMIT) | DBG_FUNC_NONE, - p->p_pid, exit_reason->osr_namespace, - exit_reason->osr_code, 0, 0); + p->p_pid, exit_reason->osr_namespace, + exit_reason->osr_code, 0, 0); } assert(p->p_exit_reason == OS_REASON_NULL); @@ -913,11 +918,11 @@ exit_with_reason(proc_t p, int rv, int *retval, boolean_t thread_can_terminate, /* Last thread to terminate will call proc_exit() */ task_terminate_internal(task); - return(0); + return 0; } void -proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) +proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) { mach_exception_data_type_t code = 0, subcode = 0; @@ -935,8 +940,8 @@ proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) /* * Generate a corefile/crashlog if: - * The process doesn't have an exit reason that indicates no crash report should be created - * AND any of the following are true: + * The process doesn't have an exit reason that indicates no crash report should be created + * AND any of the following are true: * - The process was terminated due to a fatal signal that generates a core * - The process was killed due to a code signing violation * - The process has an exit reason that indicates we should generate a crash report @@ -945,26 +950,26 @@ proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) * (which normally triggers a core) but may indicate that no crash report should be created. */ if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) && - (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) || - (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & - OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) { - /* + (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) || + (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & + OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) { + /* * Workaround for processes checking up on PT_DENY_ATTACH: * should be backed out post-Leopard (details in 5431025). */ - if ((SIGSEGV == WTERMSIG(rv)) && - (p->p_pptr->p_lflag & P_LNOATTACH)) { + if ((SIGSEGV == WTERMSIG(rv)) && + (p->p_pptr->p_lflag & P_LNOATTACH)) { goto skipcheck; } /* * Crash Reporter looks for the signal value, original exception - * type, and low 20 bits of the original code in code[0] - * (8, 4, and 20 bits respectively). code[1] is unmodified. + * type, and low 20 bits of the original code in code[0] + * (8, 4, and 20 bits respectively). code[1] is unmodified. */ code = ((WTERMSIG(rv) & 0xff) << 24) | - ((ut->uu_exception & 0x0f) << 20) | - ((int)ut->uu_code & 0xfffff); + ((ut->uu_exception & 0x0f) << 20) | + ((int)ut->uu_code & 0xfffff); subcode = ut->uu_subcode; kr = task_exception_notify(EXC_CRASH, code, subcode); @@ -1005,7 +1010,7 @@ skipcheck: * If the zombie allocation fails, just punt the stats. */ MALLOC_ZONE(rup, struct rusage_superset *, - sizeof (*rup), M_ZOMBIE, M_WAITOK); + sizeof(*rup), M_ZOMBIE, M_WAITOK); if (rup != NULL) { gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT); rup->ri.ri_phys_footprint = 0; @@ -1036,7 +1041,7 @@ skipcheck: /* Update the code, subcode based on exit reason */ proc_update_corpse_exception_codes(p, &code, &subcode); populate_corpse_crashinfo(p, p->task, rup, - code, subcode, buffer, num_knotes, NULL); + code, subcode, buffer, num_knotes, NULL); if (buffer != NULL) { kfree(buffer, buf_size); } @@ -1055,7 +1060,7 @@ skipcheck: #endif LIST_REMOVE(p, p_list); - LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ + LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ /* will not be visible via proc_find */ p->p_listflag |= P_LIST_EXITED; @@ -1076,7 +1081,7 @@ skipcheck: proc_unlock(p); } -void +void proc_exit(proc_t p) { proc_t q; @@ -1094,7 +1099,7 @@ proc_exit(proc_t p) proc_lock(p); proc_transstart(p, 1, 0); - if( !(p->p_lflag & P_LEXIT)) { + if (!(p->p_lflag & P_LEXIT)) { /* * This can happen if a thread_terminate() occurs * in a single-threaded process. @@ -1102,7 +1107,7 @@ proc_exit(proc_t p) p->p_lflag |= P_LEXIT; proc_transend(p, 1); proc_unlock(p); - proc_prepareexit(p, 0, TRUE); + proc_prepareexit(p, 0, TRUE); (void) task_terminate_internal(task); proc_lock(p); } else { @@ -1118,17 +1123,18 @@ proc_exit(proc_t p) */ if ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 0)) { p->p_sigwaitcnt++; - while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) + while ((p->p_lflag & P_LINSIGNAL) || (p->p_sigwaitcnt > 1)) { msleep(&p->p_sigmask, &p->p_mlock, PWAIT, "proc_sigdrain", NULL); + } p->p_sigwaitcnt--; } proc_unlock(p); pid = p->p_pid; exitval = p->p_xstat; - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, - BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START, - pid, exitval, 0, 0, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, + BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_START, + pid, exitval, 0, 0, 0); #if CONFIG_DTRACE dtrace_proc_exit(p); @@ -1140,7 +1146,7 @@ proc_exit(proc_t p) * need to cancel async IO requests that can be cancelled and wait for those * already active. MAY BLOCK! */ - + proc_refdrain(p); /* if any pending cpu limits action, clear it */ @@ -1163,7 +1169,7 @@ proc_exit(proc_t p) workq_exit(p); if (uth->uu_lowpri_window) { - /* + /* * task is marked as a low priority I/O type * and the I/O we issued while in flushing files on close * collided with normal I/O operations... @@ -1175,21 +1181,21 @@ proc_exit(proc_t p) #if SYSV_SHM /* Close ref SYSV Shared memory*/ - if (p->vm_shm) + if (p->vm_shm) { shmexit(p); + } #endif #if SYSV_SEM /* Release SYSV semaphores */ semexit(p); #endif - + #if PSYNCH pth_proc_hashdelete(p); #endif /* PSYNCH */ sessp = proc_session(p); if (SESS_LEADER(p, sessp)) { - if (sessp->s_ttyvp != NULLVP) { struct vnode *ttyvp; int ttyvid; @@ -1287,8 +1293,9 @@ proc_exit(proc_t p) if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { p->p_listflag &= ~P_LIST_EXITCOUNT; proc_shutdown_exitcount--; - if (proc_shutdown_exitcount == 0) + if (proc_shutdown_exitcount == 0) { wakeup(&proc_shutdown_exitcount); + } } /* wait till parentrefs are dropped and grant no more */ @@ -1296,8 +1303,9 @@ proc_exit(proc_t p) while ((q = p->p_children.lh_first) != NULL) { int reparentedtoinit = (q->p_listflag & P_LIST_DEADPARENT) ? 1 : 0; if (q->p_stat == SZOMB) { - if (p != q->p_pptr) + if (p != q->p_pptr) { panic("parent child linkage broken"); + } /* check for sysctl zomb lookup */ while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); @@ -1309,15 +1317,15 @@ proc_exit(proc_t p) * the reference here exclusively and their can be * no waiters. So there is no need for a wakeup * after we are done. Also the reap frees the structure - * and the proc struct cannot be used for wakeups as well. + * and the proc struct cannot be used for wakeups as well. * It is safe to use q here as this is system reap */ (void)reap_child_locked(p, q, 1, reparentedtoinit, 1, 0); } else { /* - * Traced processes are killed - * since their existence means someone is messing up. - */ + * Traced processes are killed + * since their existence means someone is messing up. + */ if (q->p_lflag & P_LTRACED) { struct proc *opp; @@ -1327,8 +1335,9 @@ proc_exit(proc_t p) * the time we drop the list_lock and attempt * to acquire its proc_lock. */ - if (proc_ref_locked(q) != q) + if (proc_ref_locked(q) != q) { continue; + } proc_list_unlock(); @@ -1356,12 +1365,12 @@ proc_exit(proc_t p) proc_unlock(q); /* - * The sigwait_thread could be stopped at a - * breakpoint. Wake it up to kill. - * Need to do this as it could be a thread which is not - * the first thread in the task. So any attempts to kill - * the process would result into a deadlock on q->sigwait. - */ + * The sigwait_thread could be stopped at a + * breakpoint. Wake it up to kill. + * Need to do this as it could be a thread which is not + * the first thread in the task. So any attempts to kill + * the process would result into a deadlock on q->sigwait. + */ thread_resume(thread); clear_wait(thread, THREAD_INTERRUPTED); threadsignal(thread, SIGKILL, 0, TRUE); @@ -1406,10 +1415,10 @@ proc_exit(proc_t p) * structure, this information is lost. */ if (p->p_ru != NULL) { - calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL); - p->p_ru->ru = p->p_stats->p_ru; + calcru(p, &p->p_stats->p_ru.ru_utime, &p->p_stats->p_ru.ru_stime, NULL); + p->p_ru->ru = p->p_stats->p_ru; - ruadd(&(p->p_ru->ru), &p->p_stats->p_cru); + ruadd(&(p->p_ru->ru), &p->p_stats->p_cru); } /* @@ -1462,29 +1471,28 @@ proc_exit(proc_t p) */ pp = proc_parent(p); if (pp->p_flag & P_NOCLDWAIT) { - if (p->p_ru != NULL) { proc_lock(pp); #if 3839178 - /* - * If the parent is ignoring SIGCHLD, then POSIX requires - * us to not add the resource usage to the parent process - - * we are only going to hand it off to init to get reaped. - * We should contest the standard in this case on the basis - * of RLIMIT_CPU. - */ -#else /* !3839178 */ - /* - * Add child resource usage to parent before giving - * zombie to init. If we were unable to allocate a - * zombie structure, this information is lost. - */ + /* + * If the parent is ignoring SIGCHLD, then POSIX requires + * us to not add the resource usage to the parent process - + * we are only going to hand it off to init to get reaped. + * We should contest the standard in this case on the basis + * of RLIMIT_CPU. + */ +#else /* !3839178 */ + /* + * Add child resource usage to parent before giving + * zombie to init. If we were unable to allocate a + * zombie structure, this information is lost. + */ ruadd(&pp->p_stats->p_cru, &p->p_ru->ru); -#endif /* !3839178 */ +#endif /* !3839178 */ update_rusage_info_child(&pp->p_stats->ri_child, &p->p_ru->ri); proc_unlock(pp); } - + /* kernel can reap this one, no need to move it to launchd */ proc_list_lock(); p->p_listflag |= P_LIST_DEADPARENT; @@ -1513,16 +1521,16 @@ proc_exit(proc_t p) */ DTRACE_PROC2(exited, proc_t, p, int, exitval); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, - BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, - pid, exitval, 0, 0, 0); + BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, + pid, exitval, 0, 0, 0); p->p_stat = SZOMB; - /* + /* * The current process can be reaped so, no one * can depend on this */ psignal(pp, SIGCHLD); - + /* and now wakeup the parent */ proc_list_lock(); wakeup((caddr_t)pp); @@ -1538,8 +1546,8 @@ proc_exit(proc_t p) DTRACE_PROC2(exited, proc_t, p, int, exitval); proc_list_lock(); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, - BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, - pid, exitval, 0, 0, 0); + BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT) | DBG_FUNC_END, + pid, exitval, 0, 0, 0); /* check for sysctl zomb lookup */ while ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { msleep(&p->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); @@ -1554,14 +1562,14 @@ proc_exit(proc_t p) * the reference here exclusively and their can be * no waiters. So there is no need for a wakeup * after we are done. AlsO the reap frees the structure - * and the proc struct cannot be used for wakeups as well. + * and the proc struct cannot be used for wakeups as well. * It is safe to use p here as this is system reap */ (void)reap_child_locked(pp, p, 1, 0, 1, 1); /* list lock dropped by reap_child_locked */ } if (uth->uu_lowpri_window) { - /* + /* * task is marked as a low priority I/O type and we've * somehow picked up another throttle during exit processing... * no need to throttle this thread since its going away @@ -1571,7 +1579,6 @@ proc_exit(proc_t p) } proc_rele(pp); - } @@ -1594,20 +1601,21 @@ proc_exit(proc_t p) static int reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoinit, int locked, int droplock) { - proc_t trace_parent = PROC_NULL; /* Traced parent process, if tracing */ + proc_t trace_parent = PROC_NULL; /* Traced parent process, if tracing */ - if (locked == 1) + if (locked == 1) { proc_list_unlock(); - + } + /* * If we got the child via a ptrace 'attach', * we need to give it back to the old parent. * * Exception: someone who has been reparented to launchd before being * ptraced can simply be reaped, refer to radar 5677288 - * p_oppid -> ptraced - * trace_parent == initproc -> away from launchd - * reparentedtoinit -> came to launchd by reparenting + * p_oppid -> ptraced + * trace_parent == initproc -> away from launchd + * reparentedtoinit -> came to launchd by reparenting */ if (child->p_oppid) { int knote_hint; @@ -1620,13 +1628,12 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi proc_unlock(child); if ((trace_parent = proc_find(oppid)) - && !((trace_parent == initproc) && reparentedtoinit)) { - + && !((trace_parent == initproc) && reparentedtoinit)) { if (trace_parent != initproc) { - /* - * proc internal fileds and p_ucred usage safe - * here as child is dead and is not reaped or - * reparented yet + /* + * proc internal fileds and p_ucred usage safe + * here as child is dead and is not reaped or + * reparented yet */ proc_lock(trace_parent); trace_parent->si_pid = child->p_pid; @@ -1636,10 +1643,10 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi proc_unlock(trace_parent); } proc_reparentlocked(child, trace_parent, 1, 0); - + /* resend knote to original parent (and others) after reparenting */ proc_knote(child, knote_hint); - + psignal(trace_parent, SIGCHLD); proc_list_lock(); wakeup((caddr_t)trace_parent); @@ -1647,9 +1654,10 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi wakeup(&child->p_stat); proc_list_unlock(); proc_rele(trace_parent); - if ((locked == 1) && (droplock == 0)) + if ((locked == 1) && (droplock == 0)) { proc_list_lock(); - return (0); + } + return 0; } /* @@ -1664,7 +1672,7 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi proc_rele(trace_parent); } } - + #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" proc_knote(child, NOTE_REAP); @@ -1684,8 +1692,8 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi * of RLIMIT_CPU. */ if (!(parent->p_flag & P_NOCLDWAIT)) -#endif /* 3839178 */ - ruadd(&parent->p_stats->p_cru, &child->p_ru->ru); +#endif /* 3839178 */ + ruadd(&parent->p_stats->p_cru, &child->p_ru->ru); update_rusage_info_child(&parent->p_stats->ri_child, &child->p_ru->ri); proc_unlock(parent); FREE_ZONE(child->p_ru, sizeof *child->p_ru, M_ZOMBIE); @@ -1729,12 +1737,13 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi leavepgrp(child); proc_list_lock(); - LIST_REMOVE(child, p_list); /* off zombproc */ + LIST_REMOVE(child, p_list); /* off zombproc */ parent->p_childrencnt--; LIST_REMOVE(child, p_sibling); /* If there are no more children wakeup parent */ - if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children))) - wakeup((caddr_t)parent); /* with list lock held */ + if ((deadparent != 0) && (LIST_EMPTY(&parent->p_children))) { + wakeup((caddr_t)parent); /* with list lock held */ + } child->p_listflag &= ~P_LIST_WAITING; wakeup(&child->p_stat); @@ -1754,7 +1763,7 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi proc_list_unlock(); -#if CONFIG_FINE_LOCK_GROUPS + lck_mtx_destroy(&child->p_mlock, proc_mlock_grp); lck_mtx_destroy(&child->p_ucred_mlock, proc_ucred_mlock_grp); lck_mtx_destroy(&child->p_fdmlock, proc_fdmlock_grp); @@ -1762,21 +1771,13 @@ reap_child_locked(proc_t parent, proc_t child, int deadparent, int reparentedtoi lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp); #endif lck_spin_destroy(&child->p_slock, proc_slock_grp); -#else /* CONFIG_FINE_LOCK_GROUPS */ - lck_mtx_destroy(&child->p_mlock, proc_lck_grp); - lck_mtx_destroy(&child->p_ucred_mlock, proc_lck_grp); - lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp); -#if CONFIG_DTRACE - lck_mtx_destroy(&child->p_dtrace_sprlock, proc_lck_grp); -#endif - lck_spin_destroy(&child->p_slock, proc_lck_grp); -#endif /* CONFIG_FINE_LOCK_GROUPS */ FREE_ZONE(child, sizeof *child, M_PROC); - if ((locked == 1) && (droplock == 0)) + if ((locked == 1) && (droplock == 0)) { proc_list_lock(); + } - return (1); + return 1; } @@ -1790,8 +1791,9 @@ wait1continue(int result) struct wait4_nocancel_args *uap; int *retval; - if (result) - return(result); + if (result) { + return result; + } p = current_proc(); thread = current_thread(); @@ -1800,14 +1802,14 @@ wait1continue(int result) wait4_data = &uth->uu_save.uus_wait4_data; uap = wait4_data->args; retval = wait4_data->retval; - return(wait4_nocancel(p, uap, retval)); + return wait4_nocancel(p, uap, retval); } int wait4(proc_t q, struct wait4_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval)); + return wait4_nocancel(q, (struct wait4_nocancel_args *)uap, retval); } int @@ -1822,8 +1824,9 @@ wait4_nocancel(proc_t q, struct wait4_nocancel_args *uap, int32_t *retval) AUDIT_ARG(pid, uap->pid); - if (uap->pid == 0) + if (uap->pid == 0) { uap->pid = -q->p_pgrpid; + } loop: proc_list_lock(); @@ -1832,19 +1835,20 @@ loop1: sibling_count = 0; PCHILDREN_FOREACH(q, p) { - if ( p->p_sibling.le_next != 0 ) + if (p->p_sibling.le_next != 0) { sibling_count++; + } if (uap->pid != WAIT_ANY && p->p_pid != uap->pid && - p->p_pgrpid != -(uap->pid)) + p->p_pgrpid != -(uap->pid)) { continue; + } nfound++; /* XXX This is racy because we don't get the lock!!!! */ if (p->p_listflag & P_LIST_WAITING) { - /* we're not using a continuation here but we still need to stash * the args for stackshot. */ uth = current_uthread(); @@ -1863,74 +1867,77 @@ loop1: proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) + if ((error = mac_proc_check_wait(q, p)) != 0) { goto out; + } #endif retval[0] = p->p_pid; if (uap->status) { /* Legacy apps expect only 8 bits of status */ - status = 0xffff & p->p_xstat; /* convert to int */ + status = 0xffff & p->p_xstat; /* convert to int */ error = copyout((caddr_t)&status, - uap->status, - sizeof(status)); - if (error) + uap->status, + sizeof(status)); + if (error) { goto out; + } } if (uap->rusage) { if (p->p_ru == NULL) { error = ENOMEM; } else { if (IS_64BIT_PROCESS(q)) { - struct user64_rusage my_rusage = {}; + struct user64_rusage my_rusage = {}; munge_user64_rusage(&p->p_ru->ru, &my_rusage); error = copyout((caddr_t)&my_rusage, - uap->rusage, - sizeof (my_rusage)); - } - else { - struct user32_rusage my_rusage = {}; + uap->rusage, + sizeof(my_rusage)); + } else { + struct user32_rusage my_rusage = {}; munge_user32_rusage(&p->p_ru->ru, &my_rusage); error = copyout((caddr_t)&my_rusage, - uap->rusage, - sizeof (my_rusage)); + uap->rusage, + sizeof(my_rusage)); } } /* information unavailable? */ - if (error) + if (error) { goto out; + } } /* Conformance change for 6577252. * When SIGCHLD is blocked and wait() returns because the status - * of a child process is available and there are no other + * of a child process is available and there are no other * children processes, then any pending SIGCHLD signal is cleared. */ - if ( sibling_count == 0 ) { + if (sibling_count == 0) { int mask = sigmask(SIGCHLD); uth = current_uthread(); - if ( (uth->uu_sigmask & mask) != 0 ) { + if ((uth->uu_sigmask & mask) != 0) { /* we are blocking SIGCHLD signals. clear any pending SIGCHLD. - * This locking looks funny but it is protecting access to the + * This locking looks funny but it is protecting access to the * thread via p_uthlist. */ - proc_lock(q); - uth->uu_siglist &= ~mask; /* clear pending signal */ + proc_lock(q); + uth->uu_siglist &= ~mask; /* clear pending signal */ proc_unlock(q); } } - + /* Clean up */ (void)reap_child_locked(q, p, 0, reparentedtoinit, 0, 0); - return (0); + return 0; } if (p->p_stat == SSTOP && (p->p_lflag & P_LWAITED) == 0 && (p->p_lflag & P_LTRACED || uap->options & WUNTRACED)) { proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) + if ((error = mac_proc_check_wait(q, p)) != 0) { goto out; + } #endif proc_lock(p); p->p_lflag |= P_LWAITED; @@ -1939,10 +1946,11 @@ loop1: if (uap->status) { status = W_STOPCODE(p->p_xstat); error = copyout((caddr_t)&status, - uap->status, + uap->status, sizeof(status)); - } else + } else { error = 0; + } goto out; } /* @@ -1953,8 +1961,9 @@ loop1: (p->p_flag & P_CONTINUED)) { proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) + if ((error = mac_proc_check_wait(q, p)) != 0) { goto out; + } #endif /* Prevent other process for waiting for this event */ @@ -1963,10 +1972,11 @@ loop1: if (uap->status) { status = W_STOPCODE(SIGCONT); error = copyout((caddr_t)&status, - uap->status, + uap->status, sizeof(status)); - } else + } else { error = 0; + } goto out; } p->p_listflag &= ~P_LIST_WAITING; @@ -1975,13 +1985,13 @@ loop1: /* list lock is held when we get here any which way */ if (nfound == 0) { proc_list_unlock(); - return (ECHILD); + return ECHILD; } if (uap->options & WNOHANG) { retval[0] = 0; proc_list_unlock(); - return (0); + return 0; } /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */ @@ -1991,8 +2001,9 @@ loop1: wait4_data->retval = retval; thread_set_pending_block_hint(current_thread(), kThreadWaitOnProcess); - if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) - return (error); + if ((error = msleep0((caddr_t)q, proc_list_mlock, PWAIT | PCATCH | PDROP, "wait", 0, wait1continue))) { + return error; + } goto loop; out: @@ -2000,14 +2011,14 @@ out: p->p_listflag &= ~P_LIST_WAITING; wakeup(&p->p_stat); proc_list_unlock(); - return (error); + return error; } #if DEBUG -#define ASSERT_LCK_MTX_OWNED(lock) \ - lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED) +#define ASSERT_LCK_MTX_OWNED(lock) \ + lck_mtx_assert(lock, LCK_MTX_ASSERT_OWNED) #else -#define ASSERT_LCK_MTX_OWNED(lock) /* nothing */ +#define ASSERT_LCK_MTX_OWNED(lock) /* nothing */ #endif int @@ -2020,8 +2031,9 @@ waitidcontinue(int result) struct waitid_nocancel_args *uap; int *retval; - if (result) - return (result); + if (result) { + return result; + } p = current_proc(); thread = current_thread(); @@ -2030,7 +2042,7 @@ waitidcontinue(int result) waitid_data = &uth->uu_save.uus_waitid_data; uap = waitid_data->args; retval = waitid_data->retval; - return(waitid_nocancel(p, uap, retval)); + return waitid_nocancel(p, uap, retval); } /* @@ -2050,14 +2062,14 @@ int waitid(proc_t q, struct waitid_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval)); + return waitid_nocancel(q, (struct waitid_nocancel_args *)uap, retval); } int waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { - user_siginfo_t siginfo; /* siginfo data to return to caller */ + user_siginfo_t siginfo; /* siginfo data to return to caller */ boolean_t caller64 = IS_64BIT_PROCESS(q); int nfound; proc_t p; @@ -2066,16 +2078,17 @@ waitid_nocancel(proc_t q, struct waitid_nocancel_args *uap, struct _waitid_data *waitid_data; if (uap->options == 0 || - (uap->options & ~(WNOHANG|WNOWAIT|WCONTINUED|WSTOPPED|WEXITED))) - return (EINVAL); /* bits set that aren't recognized */ - + (uap->options & ~(WNOHANG | WNOWAIT | WCONTINUED | WSTOPPED | WEXITED))) { + return EINVAL; /* bits set that aren't recognized */ + } switch (uap->idtype) { - case P_PID: /* child with process ID equal to... */ - case P_PGID: /* child with process group ID equal to... */ - if (((int)uap->id) < 0) - return (EINVAL); + case P_PID: /* child with process ID equal to... */ + case P_PGID: /* child with process group ID equal to... */ + if (((int)uap->id) < 0) { + return EINVAL; + } break; - case P_ALL: /* any child */ + case P_ALL: /* any child */ break; } @@ -2086,15 +2099,17 @@ loop1: PCHILDREN_FOREACH(q, p) { switch (uap->idtype) { - case P_PID: /* child with process ID equal to... */ - if (p->p_pid != (pid_t)uap->id) + case P_PID: /* child with process ID equal to... */ + if (p->p_pid != (pid_t)uap->id) { continue; + } break; - case P_PGID: /* child with process group ID equal to... */ - if (p->p_pgrpid != (pid_t)uap->id) + case P_PGID: /* child with process group ID equal to... */ + if (p->p_pgrpid != (pid_t)uap->id) { continue; + } break; - case P_ALL: /* any child */ + case P_ALL: /* any child */ break; } @@ -2106,23 +2121,25 @@ loop1: */ if (p->p_listflag & P_LIST_WAITING) { (void) msleep(&p->p_stat, proc_list_mlock, - PWAIT, "waitidcoll", 0); + PWAIT, "waitidcoll", 0); goto loop1; } - p->p_listflag |= P_LIST_WAITING; /* mark busy */ + p->p_listflag |= P_LIST_WAITING; /* mark busy */ nfound++; - bzero(&siginfo, sizeof (siginfo)); + bzero(&siginfo, sizeof(siginfo)); switch (p->p_stat) { - case SZOMB: /* Exited */ - if (!(uap->options & WEXITED)) + case SZOMB: /* Exited */ + if (!(uap->options & WEXITED)) { break; + } proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) + if ((error = mac_proc_check_wait(q, p)) != 0) { goto out; + } #endif siginfo.si_signo = SIGCHLD; siginfo.si_pid = p->p_pid; @@ -2130,48 +2147,54 @@ loop1: p->p_xhighbits = 0; if (WIFSIGNALED(p->p_xstat)) { siginfo.si_code = WCOREDUMP(p->p_xstat) ? - CLD_DUMPED : CLD_KILLED; - } else + CLD_DUMPED : CLD_KILLED; + } else { siginfo.si_code = CLD_EXITED; + } if ((error = copyoutsiginfo(&siginfo, - caller64, uap->infop)) != 0) + caller64, uap->infop)) != 0) { goto out; + } /* Prevent other process for waiting for this event? */ if (!(uap->options & WNOWAIT)) { (void) reap_child_locked(q, p, 0, 0, 0, 0); - return (0); + return 0; } goto out; - case SSTOP: /* Stopped */ + case SSTOP: /* Stopped */ /* * If we are not interested in stopped processes, then * ignore this one. */ - if (!(uap->options & WSTOPPED)) + if (!(uap->options & WSTOPPED)) { break; + } /* * If someone has already waited it, we lost a race * to be the one to return status. */ - if ((p->p_lflag & P_LWAITED) != 0) + if ((p->p_lflag & P_LWAITED) != 0) { break; + } proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) + if ((error = mac_proc_check_wait(q, p)) != 0) { goto out; + } #endif siginfo.si_signo = SIGCHLD; siginfo.si_pid = p->p_pid; - siginfo.si_status = p->p_xstat; /* signal number */ + siginfo.si_status = p->p_xstat; /* signal number */ siginfo.si_code = CLD_STOPPED; if ((error = copyoutsiginfo(&siginfo, - caller64, uap->infop)) != 0) + caller64, uap->infop)) != 0) { goto out; + } /* Prevent other process for waiting for this event? */ if (!(uap->options & WNOWAIT)) { @@ -2181,21 +2204,24 @@ loop1: } goto out; - default: /* All other states => Continued */ - if (!(uap->options & WCONTINUED)) + default: /* All other states => Continued */ + if (!(uap->options & WCONTINUED)) { break; + } /* * If the flag isn't set, then this process has not * been stopped and continued, or the status has * already been reaped by another caller of waitid(). */ - if ((p->p_flag & P_CONTINUED) == 0) + if ((p->p_flag & P_CONTINUED) == 0) { break; + } proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) + if ((error = mac_proc_check_wait(q, p)) != 0) { goto out; + } #endif siginfo.si_signo = SIGCHLD; siginfo.si_code = CLD_CONTINUED; @@ -2205,8 +2231,9 @@ loop1: proc_unlock(p); if ((error = copyoutsiginfo(&siginfo, - caller64, uap->infop)) != 0) + caller64, uap->infop)) != 0) { goto out; + } /* Prevent other process for waiting for this event? */ if (!(uap->options & WNOWAIT)) { @@ -2228,14 +2255,15 @@ loop1: if (nfound == 0) { proc_list_unlock(); - return (ECHILD); + return ECHILD; } if (uap->options & WNOHANG) { proc_list_unlock(); #if CONFIG_MACF - if ((error = mac_proc_check_wait(q, p)) != 0) - return (error); + if ((error = mac_proc_check_wait(q, p)) != 0) { + return error; + } #endif /* * The state of the siginfo structure in this case @@ -2246,7 +2274,7 @@ loop1: * WNOHANG" is to store a zero into si_pid before * invocation, then check for a non-zero value afterwards. */ - return (0); + return 0; } /* Save arguments for continuation. Backing storage is in uthread->uu_arg, and will not be deallocated */ @@ -2256,8 +2284,9 @@ loop1: waitid_data->retval = retval; if ((error = msleep0(q, proc_list_mlock, - PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) - return (error); + PWAIT | PCATCH | PDROP, "waitid", 0, waitidcontinue)) != 0) { + return error; + } goto loop; out: @@ -2265,7 +2294,7 @@ out: p->p_listflag &= ~P_LIST_WAITING; wakeup(&p->p_stat); proc_list_unlock(); - return (error); + return error; } /* @@ -2276,39 +2305,46 @@ proc_reparentlocked(proc_t child, proc_t parent, int signallable, int locked) { proc_t oldparent = PROC_NULL; - if (child->p_pptr == parent) + if (child->p_pptr == parent) { return; + } - if (locked == 0) + if (locked == 0) { proc_list_lock(); + } oldparent = child->p_pptr; #if __PROC_INTERNAL_DEBUG - if (oldparent == PROC_NULL) + if (oldparent == PROC_NULL) { panic("proc_reparent: process %p does not have a parent\n", child); + } #endif LIST_REMOVE(child, p_sibling); #if __PROC_INTERNAL_DEBUG - if (oldparent->p_childrencnt == 0) + if (oldparent->p_childrencnt == 0) { panic("process children count already 0\n"); + } #endif oldparent->p_childrencnt--; #if __PROC_INTERNAL_DEBUG1 - if (oldparent->p_childrencnt < 0) + if (oldparent->p_childrencnt < 0) { panic("process children count -ve\n"); + } #endif LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); - parent->p_childrencnt++; + parent->p_childrencnt++; child->p_pptr = parent; child->p_ppid = parent->p_pid; proc_list_unlock(); - if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) + if ((signallable != 0) && (initproc == parent) && (child->p_stat == SZOMB)) { psignal(initproc, SIGCHLD); - if (locked == 1) + } + if (locked == 1) { proc_list_lock(); + } } /* @@ -2338,15 +2374,15 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) * right here. */ - ut = get_bsdthread_info(self); + ut = get_bsdthread_info(self); proc_lock(p); - if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) { - /* - * This happens when a parent exits/killed and vfork is in progress - * other threads. But shutdown code for ex has already called exit1() - */ + if ((p->p_lflag & P_LPEXIT) == P_LPEXIT) { + /* + * This happens when a parent exits/killed and vfork is in progress + * other threads. But shutdown code for ex has already called exit1() + */ proc_unlock(p); return; } @@ -2355,7 +2391,7 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) if (forceexit == 0) { /* - * parent of a vfork child has already called exit() and the + * parent of a vfork child has already called exit() and the * thread that has vfork in proress terminates. So there is no * separate address space here and it has already been marked for * termination. This was never covered before and could cause problems @@ -2379,7 +2415,7 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) #endif LIST_REMOVE(p, p_list); - LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ + LIST_INSERT_HEAD(&zombproc, p, p_list); /* Place onto zombproc. */ /* will not be visible via proc_find */ p->p_listflag |= P_LIST_EXITED; @@ -2406,7 +2442,7 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) /* XXX Zombie allocation may fail, in which case stats get lost */ MALLOC_ZONE(rup, struct rusage_superset *, - sizeof (*rup), M_ZOMBIE, M_WAITOK); + sizeof(*rup), M_ZOMBIE, M_WAITOK); proc_refdrain(p); @@ -2418,7 +2454,6 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) sessp = proc_session(p); if (SESS_LEADER(p, sessp)) { - if (sessp->s_ttyvp != NULLVP) { struct vnode *ttyvp; int ttyvid; @@ -2463,8 +2498,8 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) sessp->s_ttypgrpid = NO_PID; session_unlock(sessp); - if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { - if (tp != TTY_NULL) { + if ((ttyvp != NULLVP) && (vnode_getwithvid(ttyvp, ttyvid) == 0)) { + if (tp != TTY_NULL) { tty_lock(tp); (void) ttywait(tp); tty_unlock(tp); @@ -2514,8 +2549,9 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) proc_childdrainstart(p); while ((q = p->p_children.lh_first) != NULL) { if (q->p_stat == SZOMB) { - if (p != q->p_pptr) + if (p != q->p_pptr) { panic("parent child linkage broken"); + } /* check for lookups by zomb sysctl */ while ((q->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { msleep(&q->p_stat, proc_list_mlock, PWAIT, "waitcoll", 0); @@ -2527,15 +2563,15 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) * the reference here exclusively and their can be * no waiters. So there is no need for a wakeup * after we are done. AlsO the reap frees the structure - * and the proc struct cannot be used for wakeups as well. + * and the proc struct cannot be used for wakeups as well. * It is safe to use q here as this is system reap */ (void)reap_child_locked(p, q, 1, 0, 1, 0); } else { /* - * Traced processes are killed - * since their existence means someone is messing up. - */ + * Traced processes are killed + * since their existence means someone is messing up. + */ if (q->p_lflag & P_LTRACED) { struct proc *opp; @@ -2565,12 +2601,12 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) proc_unlock(q); /* - * The sigwait_thread could be stopped at a - * breakpoint. Wake it up to kill. - * Need to do this as it could be a thread which is not - * the first thread in the task. So any attempts to kill - * the process would result into a deadlock on q->sigwait. - */ + * The sigwait_thread could be stopped at a + * breakpoint. Wake it up to kill. + * Need to do this as it could be a thread which is not + * the first thread in the task. So any attempts to kill + * the process would result into a deadlock on q->sigwait. + */ thread_resume(thread); clear_wait(thread, THREAD_INTERRUPTED); threadsignal(thread, SIGKILL, 0, TRUE); @@ -2605,39 +2641,39 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) * structure, this information is lost. */ if (rup != NULL) { - rup->ru = p->p_stats->p_ru; - timerclear(&rup->ru.ru_utime); - timerclear(&rup->ru.ru_stime); + rup->ru = p->p_stats->p_ru; + timerclear(&rup->ru.ru_utime); + timerclear(&rup->ru.ru_stime); #ifdef FIXME - if (task) { - mach_task_basic_info_data_t tinfo; - task_thread_times_info_data_t ttimesinfo; - int task_info_stuff, task_ttimes_stuff; - struct timeval ut,st; - - task_info_stuff = MACH_TASK_BASIC_INFO_COUNT; - task_info(task, MACH_TASK_BASIC_INFO, - &tinfo, &task_info_stuff); - p->p_ru->ru.ru_utime.tv_sec = tinfo.user_time.seconds; - p->p_ru->ru.ru_utime.tv_usec = tinfo.user_time.microseconds; - p->p_ru->ru.ru_stime.tv_sec = tinfo.system_time.seconds; - p->p_ru->ru.ru_stime.tv_usec = tinfo.system_time.microseconds; - - task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; - task_info(task, TASK_THREAD_TIMES_INFO, - &ttimesinfo, &task_ttimes_stuff); - - ut.tv_sec = ttimesinfo.user_time.seconds; - ut.tv_usec = ttimesinfo.user_time.microseconds; - st.tv_sec = ttimesinfo.system_time.seconds; - st.tv_usec = ttimesinfo.system_time.microseconds; - timeradd(&ut,&p->p_ru->ru.ru_utime,&p->p_ru->ru.ru_utime); - timeradd(&st,&p->p_ru->ru.ru_stime,&p->p_ru->ru.ru_stime); - } + if (task) { + mach_task_basic_info_data_t tinfo; + task_thread_times_info_data_t ttimesinfo; + int task_info_stuff, task_ttimes_stuff; + struct timeval ut, st; + + task_info_stuff = MACH_TASK_BASIC_INFO_COUNT; + task_info(task, MACH_TASK_BASIC_INFO, + &tinfo, &task_info_stuff); + p->p_ru->ru.ru_utime.tv_sec = tinfo.user_time.seconds; + p->p_ru->ru.ru_utime.tv_usec = tinfo.user_time.microseconds; + p->p_ru->ru.ru_stime.tv_sec = tinfo.system_time.seconds; + p->p_ru->ru.ru_stime.tv_usec = tinfo.system_time.microseconds; + + task_ttimes_stuff = TASK_THREAD_TIMES_INFO_COUNT; + task_info(task, TASK_THREAD_TIMES_INFO, + &ttimesinfo, &task_ttimes_stuff); + + ut.tv_sec = ttimesinfo.user_time.seconds; + ut.tv_usec = ttimesinfo.user_time.microseconds; + st.tv_sec = ttimesinfo.system_time.seconds; + st.tv_usec = ttimesinfo.system_time.microseconds; + timeradd(&ut, &p->p_ru->ru.ru_utime, &p->p_ru->ru.ru_utime); + timeradd(&st, &p->p_ru->ru.ru_stime, &p->p_ru->ru.ru_stime); + } #endif /* FIXME */ - ruadd(&rup->ru, &p->p_stats->p_cru); + ruadd(&rup->ru, &p->p_stats->p_cru); gather_rusage_info(p, &rup->ri, RUSAGE_INFO_CURRENT); rup->ri.ri_phys_footprint = 0; @@ -2740,7 +2776,7 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) * the reference here exclusively and their can be * no waiters. So there is no need for a wakeup * after we are done. AlsO the reap frees the structure - * and the proc struct cannot be used for wakeups as well. + * and the proc struct cannot be used for wakeups as well. * It is safe to use p here as this is system reap */ (void)reap_child_locked(pp, p, 0, 0, 1, 1); @@ -2749,14 +2785,14 @@ vfork_exit_internal(proc_t p, int rv, int forceexit) proc_rele(pp); } - + /* * munge_rusage * LP64 support - long is 64 bits if we are dealing with a 64 bit user * process. We munge the kernel version of rusage into the * 64 bit version. */ -__private_extern__ void +__private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p) { /* Zero-out struct so that padding is cleared */ @@ -2788,9 +2824,11 @@ munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusa } /* For a 64-bit kernel and 32-bit userspace, munging may be needed */ -__private_extern__ void +__private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p) { + bzero(a_user_rusage_p, sizeof(struct user32_rusage)); + /* timeval changes size, so utime and stime need special handling */ a_user_rusage_p->ru_utime.tv_sec = a_rusage_p->ru_utime.tv_sec; a_user_rusage_p->ru_utime.tv_usec = a_rusage_p->ru_utime.tv_usec; @@ -2831,4 +2869,3 @@ kdp_wait4_find_process(thread_t thread, __unused event64_t wait_event, thread_wa // See man wait4 for other valid wait4 arguments. waitinfo->owner = args->pid; } - diff --git a/bsd/kern/kern_fork.c b/bsd/kern/kern_fork.c index 2fb8a03d6..772c16355 100644 --- a/bsd/kern/kern_fork.c +++ b/bsd/kern/kern_fork.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -137,9 +137,9 @@ static void (*dtrace_proc_waitfor_hook)(proc_t) = NULL; #include #include -#include /* for shmfork() */ -#include /* for thread_create() */ -#include /* for thread_resume() */ +#include /* for shmfork() */ +#include /* for thread_create() */ +#include /* for thread_resume() */ #include @@ -153,23 +153,24 @@ extern void act_thread_catt(void *ctx); void thread_set_child(thread_t child, int pid); void *act_thread_csave(void); extern boolean_t task_is_exec_copy(task_t); +int nextpidversion = 0; thread_t cloneproc(task_t, coalition_t *, proc_t, int, int); proc_t forkproc(proc_t); void forkproc_free(proc_t); thread_t fork_create_child(task_t parent_task, - coalition_t *parent_coalitions, - proc_t child, - int inherit_memory, - int is_64bit_addr, - int is_64bit_data, - int in_exec); + coalition_t *parent_coalitions, + proc_t child, + int inherit_memory, + int is_64bit_addr, + int is_64bit_data, + int in_exec); void proc_vfork_begin(proc_t parent_proc); void proc_vfork_end(proc_t parent_proc); -#define DOFORK 0x1 /* fork() system call */ -#define DOVFORK 0x2 /* vfork() system call */ +#define DOFORK 0x1 /* fork() system call */ +#define DOVFORK 0x2 /* vfork() system call */ /* * proc_vfork_begin @@ -212,10 +213,12 @@ proc_vfork_end(proc_t parent_proc) { proc_lock(parent_proc); parent_proc->p_vforkcnt--; - if (parent_proc->p_vforkcnt < 0) + if (parent_proc->p_vforkcnt < 0) { panic("vfork cnt is -ve"); - if (parent_proc->p_vforkcnt == 0) + } + if (parent_proc->p_vforkcnt == 0) { parent_proc->p_lflag &= ~P_LVFORK; + } proc_unlock(parent_proc); } @@ -305,7 +308,7 @@ vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval) proc_t child_proc = ut->uu_proc; retval[0] = child_proc->p_pid; - retval[1] = 1; /* flag child return for user space */ + retval[1] = 1; /* flag child return for user space */ /* * Drop the signal lock on the child which was taken on our @@ -320,7 +323,7 @@ vfork(proc_t parent_proc, __unused struct vfork_args *uap, int32_t *retval) ut->uu_flag &= ~UT_VFORKING; } - return (err); + return err; } @@ -376,7 +379,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit { thread_t parent_thread = (thread_t)current_thread(); uthread_t parent_uthread = (uthread_t)get_bsdthread_info(parent_thread); - proc_t child_proc = NULL; /* set in switch, but compiler... */ + proc_t child_proc = NULL; /* set in switch, but compiler... */ thread_t child_thread = NULL; uid_t uid; int count; @@ -403,7 +406,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit #endif proc_list_unlock(); tablefull("proc"); - return (EAGAIN); + return EAGAIN; } proc_list_unlock(); @@ -425,7 +428,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit */ panic("The per-user process limit has been hit; parent pid=%d, uid=%d", parent_proc->p_pid, uid); #endif - err = EAGAIN; + err = EAGAIN; goto bad; } @@ -435,12 +438,12 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit * it to fork. This is an advisory-only check. */ err = mac_proc_check_fork(parent_proc); - if (err != 0) { + if (err != 0) { goto bad; } #endif - switch(kind) { + switch (kind) { case PROC_CREATE_VFORK: /* * Prevent a vfork while we are in vfork(); we should @@ -551,7 +554,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit /* XXX is this actually necessary, given syscall return? */ thread_set_child(parent_thread, child_proc->p_pid); - child_proc->p_acflag = AFORK; /* forked but not exec'ed */ + child_proc->p_acflag = AFORK; /* forked but not exec'ed */ /* * Preserve synchronization semantics of vfork. If @@ -559,7 +562,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit * on child, and sleep on our proc (in case of exit). */ child_proc->p_lflag |= P_LPPWAIT; - pinsertchild(parent_proc, child_proc); /* set visible */ + pinsertchild(parent_proc, child_proc); /* set visible */ break; @@ -572,7 +575,7 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit */ spawn = 1; - /* FALLSTHROUGH */ + /* FALLSTHROUGH */ case PROC_CREATE_FORK: /* @@ -582,10 +585,10 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit * differences. Contrarily, spawned processes do not inherit. */ if ((child_thread = cloneproc(parent_proc->task, - spawn ? coalitions : NULL, - parent_proc, - spawn ? FALSE : TRUE, - FALSE)) == NULL) { + spawn ? coalitions : NULL, + parent_proc, + spawn ? FALSE : TRUE, + FALSE)) == NULL) { /* Failed to create thread */ err = EAGAIN; goto bad; @@ -641,11 +644,11 @@ fork1(proc_t parent_proc, thread_t *child_threadp, int kind, coalition_t *coalit */ thread_set_child(child_thread, child_proc->p_pid); - child_proc->p_acflag = AFORK; /* forked but not exec'ed */ + child_proc->p_acflag = AFORK; /* forked but not exec'ed */ #if CONFIG_DTRACE dtrace_proc_fork(parent_proc, child_proc, spawn); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ if (!spawn) { /* * Of note, we need to initialize the bank context behind @@ -675,7 +678,7 @@ bad: (void)chgproccnt(uid, -1); } - return (err); + return err; } @@ -705,7 +708,7 @@ vfork_return(proc_t child_proc, int32_t *retval, int rval) proc_t parent_proc = get_bsdtask_info(parent_task); thread_t th = current_thread(); uthread_t uth = get_bsdthread_info(th); - + act_thread_catt(uth->uu_userstate); /* clear vfork state in parent proc structure */ @@ -731,7 +734,7 @@ vfork_return(proc_t child_proc, int32_t *retval, int rval) if (retval) { retval[0] = rval; - retval[1] = 0; /* mark parent */ + retval[1] = 0; /* mark parent */ } } @@ -750,7 +753,7 @@ vfork_return(proc_t child_proc, int32_t *retval, int rval) * is_64bit_addr TRUE, if the child being created will * be associated with a 64 bit address space * is_64bit_data TRUE if the child being created will use a - 64-bit register state + * 64-bit register state * in_exec TRUE, if called from execve or posix spawn set exec * FALSE, if called from fork or vfexec * @@ -772,26 +775,26 @@ vfork_return(proc_t child_proc, int32_t *retval, int rval) */ thread_t fork_create_child(task_t parent_task, - coalition_t *parent_coalitions, - proc_t child_proc, - int inherit_memory, - int is_64bit_addr, - int is_64bit_data, - int in_exec) + coalition_t *parent_coalitions, + proc_t child_proc, + int inherit_memory, + int is_64bit_addr, + int is_64bit_data, + int in_exec) { - thread_t child_thread = NULL; - task_t child_task; - kern_return_t result; + thread_t child_thread = NULL; + task_t child_task; + kern_return_t result; /* Create a new task for the child process */ result = task_create_internal(parent_task, - parent_coalitions, - inherit_memory, - is_64bit_addr, - is_64bit_data, - TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */ - in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */ - &child_task); + parent_coalitions, + inherit_memory, + is_64bit_addr, + is_64bit_data, + TF_LRETURNWAIT | TF_LRETURNWAITER, /* All created threads will wait in task_wait_to_return */ + in_exec ? TPF_EXEC_COPY : TPF_NONE, /* Mark the task exec copy if in execve */ + &child_task); if (result != KERN_SUCCESS) { printf("%s: task_create_internal failed. Code: %d\n", __func__, result); @@ -810,24 +813,26 @@ fork_create_child(task_t parent_task, set_bsdtask_info(child_task, child_proc); /* Propagate CPU limit timer from parent */ - if (timerisset(&child_proc->p_rlim_cpu)) + if (timerisset(&child_proc->p_rlim_cpu)) { task_vtimer_set(child_task, TASK_VTIMER_RLIM); + } /* * Set child process BSD visible scheduler priority if nice value * inherited from parent */ - if (child_proc->p_nice != 0) + if (child_proc->p_nice != 0) { resetpriority(child_proc); + } /* * Create a new thread for the child process * The new thread is waiting on the event triggered by 'task_clear_return_wait' */ result = thread_create_waiting(child_task, - (thread_continue_t)task_wait_to_return, - task_get_return_wait_event(child_task), - &child_thread); + (thread_continue_t)task_wait_to_return, + task_get_return_wait_event(child_task), + &child_thread); if (result != KERN_SUCCESS) { printf("%s: thread_create failed. Code: %d\n", @@ -837,14 +842,14 @@ fork_create_child(task_t parent_task, } /* - * Tag thread as being the first thread in its task. - */ + * Tag thread as being the first thread in its task. + */ thread_set_tag(child_thread, THREAD_TAG_MAINTHREAD); bad: thread_yield_internal(1); - return(child_thread); + return child_thread; } @@ -888,7 +893,7 @@ fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval) thread_t child_thread; int err; - retval[1] = 0; /* flag parent return for user space */ + retval[1] = 0; /* flag parent return for user space */ if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) { task_t child_task; @@ -911,8 +916,9 @@ fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval) DTRACE_PROC1(create, proc_t, child_proc); #if CONFIG_DTRACE - if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) + if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL) { (*dtrace_proc_waitfor_hook)(child_proc); + } #endif /* "Return" to the child */ @@ -925,7 +931,7 @@ fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval) thread_deallocate(child_thread); } - return(err); + return err; } @@ -996,12 +1002,12 @@ cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc const int parent_64bit_data = (parent_task == TASK_NULL) ? parent_64bit_addr : task_get_64bit_data(parent_task); child_thread = fork_create_child(parent_task, - parent_coalitions, - child_proc, - inherit_memory, - parent_64bit_addr, - parent_64bit_data, - FALSE); + parent_coalitions, + child_proc, + inherit_memory, + parent_64bit_addr, + parent_64bit_data, + FALSE); if (child_thread == NULL) { /* @@ -1035,7 +1041,7 @@ cloneproc(task_t parent_task, coalition_t *parent_coalitions, proc_t parent_proc */ child_proc->p_stat = SRUN; bad: - return(child_thread); + return child_thread; } @@ -1107,7 +1113,6 @@ forkproc_free(proc_t p) /* Update the audit session proc count */ AUDIT_SESSION_PROCEXIT(p); -#if CONFIG_FINE_LOCK_GROUPS lck_mtx_destroy(&p->p_mlock, proc_mlock_grp); lck_mtx_destroy(&p->p_fdmlock, proc_fdmlock_grp); lck_mtx_destroy(&p->p_ucred_mlock, proc_ucred_mlock_grp); @@ -1115,15 +1120,6 @@ forkproc_free(proc_t p) lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp); #endif lck_spin_destroy(&p->p_slock, proc_slock_grp); -#else /* CONFIG_FINE_LOCK_GROUPS */ - lck_mtx_destroy(&p->p_mlock, proc_lck_grp); - lck_mtx_destroy(&p->p_fdmlock, proc_lck_grp); - lck_mtx_destroy(&p->p_ucred_mlock, proc_lck_grp); -#if CONFIG_DTRACE - lck_mtx_destroy(&p->p_dtrace_sprlock, proc_lck_grp); -#endif - lck_spin_destroy(&p->p_slock, proc_lck_grp); -#endif /* CONFIG_FINE_LOCK_GROUPS */ /* Release the credential reference */ kauth_cred_unref(&p->p_ucred); @@ -1168,14 +1164,14 @@ forkproc_free(proc_t p) proc_t forkproc(proc_t parent_proc) { - proc_t child_proc; /* Our new process */ - static int nextpid = 0, pidwrap = 0, nextpidversion = 0; + proc_t child_proc; /* Our new process */ + static int nextpid = 0, pidwrap = 0; static uint64_t nextuniqueid = 0; int error = 0; struct session *sessp; uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread()); - MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK); + MALLOC_ZONE(child_proc, proc_t, sizeof *child_proc, M_PROC, M_WAITOK); if (child_proc == NULL) { printf("forkproc: M_PROC zone exhausted\n"); goto bad; @@ -1184,7 +1180,7 @@ forkproc(proc_t parent_proc) bzero(child_proc, sizeof *child_proc); MALLOC_ZONE(child_proc->p_stats, struct pstats *, - sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK); + sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK); if (child_proc->p_stats == NULL) { printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n"); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); @@ -1192,7 +1188,7 @@ forkproc(proc_t parent_proc) goto bad; } MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *, - sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK); + sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK); if (child_proc->p_sigacts == NULL) { printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n"); FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); @@ -1216,7 +1212,7 @@ forkproc(proc_t parent_proc) /* - * Find an unused PID. + * Find an unused PID. */ proc_list_lock(); @@ -1233,9 +1229,8 @@ retry: pidwrap = 1; } if (pidwrap != 0) { - /* if the pid stays in hash both for zombie and runniing state */ - if (pfind_locked(nextpid) != PROC_NULL) { + if (pfind_locked(nextpid) != PROC_NULL) { nextpid++; goto retry; } @@ -1243,22 +1238,23 @@ retry: if (pgfind_internal(nextpid) != PGRP_NULL) { nextpid++; goto retry; - } + } if (session_find_internal(nextpid) != SESSION_NULL) { nextpid++; goto retry; - } + } } nprocs++; child_proc->p_pid = nextpid; - child_proc->p_responsible_pid = nextpid; /* initially responsible for self */ - child_proc->p_idversion = nextpidversion++; + child_proc->p_responsible_pid = nextpid; /* initially responsible for self */ + child_proc->p_idversion = OSIncrementAtomic(&nextpidversion); /* kernel process is handcrafted and not from fork, so start from 1 */ child_proc->p_uniqueid = ++nextuniqueid; #if 1 if (child_proc->p_pid != 0) { - if (pfind_locked(child_proc->p_pid) != PROC_NULL) + if (pfind_locked(child_proc->p_pid) != PROC_NULL) { panic("proc in the list already\n"); + } } #endif /* Insert in the hash */ @@ -1300,8 +1296,9 @@ retry: #else /* !CONFIG_EMBEDDED */ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_DISABLE_ASLR | P_SUGID)); #endif /* !CONFIG_EMBEDDED */ - if (parent_proc->p_flag & P_PROFIL) + if (parent_proc->p_flag & P_PROFIL) { startprofclock(child_proc); + } child_proc->p_vfs_iopolicy = (parent_proc->p_vfs_iopolicy & (P_VFS_IOPOLICY_VALID_MASK)); @@ -1315,7 +1312,6 @@ retry: /* update audit session proc count */ AUDIT_SESSION_PROCNEW(child_proc); -#if CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_mtx_init(&child_proc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr); @@ -1323,15 +1319,7 @@ retry: lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr); #endif lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr); -#else /* !CONFIG_FINE_LOCK_GROUPS */ - lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr); - lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr); - lck_mtx_init(&child_proc->p_ucred_mlock, proc_lck_grp, proc_lck_attr); -#if CONFIG_DTRACE - lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr); -#endif - lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr); -#endif /* !CONFIG_FINE_LOCK_GROUPS */ + klist_init(&child_proc->p_klist); if (child_proc->p_textvp != NULLVP) { @@ -1340,8 +1328,9 @@ retry: if (vnode_getwithref(child_proc->p_textvp) == 0) { error = vnode_ref(child_proc->p_textvp); vnode_put(child_proc->p_textvp); - if (error != 0) + if (error != 0) { child_proc->p_textvp = NULLVP; + } } } @@ -1376,15 +1365,17 @@ retry: bzero(child_proc->p_stats, sizeof(*child_proc->p_stats)); microtime_with_abstime(&child_proc->p_start, &child_proc->p_stats->ps_start); - if (parent_proc->p_sigacts != NULL) + if (parent_proc->p_sigacts != NULL) { (void)memcpy(child_proc->p_sigacts, - parent_proc->p_sigacts, sizeof *child_proc->p_sigacts); - else + parent_proc->p_sigacts, sizeof *child_proc->p_sigacts); + } else { (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts); + } sessp = proc_session(parent_proc); - if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) + if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) { OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag); + } session_rele(sessp); /* @@ -1447,8 +1438,8 @@ retry: child_proc->p_memstat_effectivepriority = JETSAM_PRIORITY_DEFAULT; child_proc->p_memstat_requestedpriority = JETSAM_PRIORITY_DEFAULT; child_proc->p_memstat_userdata = 0; - child_proc->p_memstat_idle_start = 0; - child_proc->p_memstat_idle_delta = 0; + child_proc->p_memstat_idle_start = 0; + child_proc->p_memstat_idle_delta = 0; child_proc->p_memstat_memlimit = 0; child_proc->p_memstat_memlimit_active = 0; child_proc->p_memstat_memlimit_inactive = 0; @@ -1460,7 +1451,7 @@ retry: #endif /* CONFIG_MEMORYSTATUS */ bad: - return(child_proc); + return child_proc; } void @@ -1479,7 +1470,7 @@ proc_unlock(proc_t p) void proc_spinlock(proc_t p) { - lck_spin_lock(&p->p_slock); + lck_spin_lock_grp(&p->p_slock, proc_slock_grp); } void @@ -1488,25 +1479,25 @@ proc_spinunlock(proc_t p) lck_spin_unlock(&p->p_slock); } -void +void proc_list_lock(void) { lck_mtx_lock(proc_list_mlock); } -void +void proc_list_unlock(void) { lck_mtx_unlock(proc_list_mlock); } -void +void proc_ucred_lock(proc_t p) { lck_mtx_lock(&p->p_ucred_mlock); } -void +void proc_ucred_unlock(proc_t p) { lck_mtx_unlock(&p->p_ucred_mlock); @@ -1530,9 +1521,9 @@ uthread_zone_init(void) rethrottle_lock_attr = lck_attr_alloc_init(); uthread_zone = zinit(sizeof(struct uthread), - thread_max * sizeof(struct uthread), - THREAD_CHUNK * sizeof(struct uthread), - "uthreads"); + thread_max * sizeof(struct uthread), + THREAD_CHUNK * sizeof(struct uthread), + "uthreads"); } void * @@ -1543,8 +1534,9 @@ uthread_alloc(task_t task, thread_t thread, int noinherit) uthread_t uth_parent; void *ut; - if (uthread_zone == NULL) + if (uthread_zone == NULL) { uthread_zone_init(); + } ut = (void *)zalloc(uthread_zone); bzero(ut, sizeof(struct uthread)); @@ -1554,7 +1546,7 @@ uthread_alloc(task_t task, thread_t thread, int noinherit) uth->uu_thread = thread; lck_spin_init(&uth->uu_rethrottle_lock, rethrottle_lock_grp, - rethrottle_lock_attr); + rethrottle_lock_attr); /* * Thread inherits credential from the creating thread, if both @@ -1565,7 +1557,7 @@ uthread_alloc(task_t task, thread_t thread, int noinherit) * one later, it will be lazily assigned from the task's process. */ uth_parent = (uthread_t)get_bsdthread_info(current_thread()); - if ((noinherit == 0) && task == current_task() && + if ((noinherit == 0) && task == current_task() && uth_parent != NULL && IS_VALID_CRED(uth_parent->uu_ucred)) { /* @@ -1576,28 +1568,30 @@ uthread_alloc(task_t task, thread_t thread, int noinherit) kauth_cred_ref(uth_parent->uu_ucred); uth->uu_ucred = uth_parent->uu_ucred; /* the credential we just inherited is an assumed credential */ - if (uth_parent->uu_flag & UT_SETUID) + if (uth_parent->uu_flag & UT_SETUID) { uth->uu_flag |= UT_SETUID; + } } else { /* sometimes workqueue threads are created out task context */ - if ((task != kernel_task) && (p != PROC_NULL)) + if ((task != kernel_task) && (p != PROC_NULL)) { uth->uu_ucred = kauth_cred_proc_ref(p); - else + } else { uth->uu_ucred = NOCRED; + } } - + if ((task != kernel_task) && p) { - proc_lock(p); if (noinherit != 0) { /* workq threads will not inherit masks */ uth->uu_sigmask = ~workq_threadmask; } else if (uth_parent) { - if (uth_parent->uu_flag & UT_SAS_OLDMASK) + if (uth_parent->uu_flag & UT_SAS_OLDMASK) { uth->uu_sigmask = uth_parent->uu_oldmask; - else + } else { uth->uu_sigmask = uth_parent->uu_sigmask; + } } uth->uu_context.vc_thread = thread; /* @@ -1616,7 +1610,7 @@ uthread_alloc(task_t task, thread_t thread, int noinherit) #endif } - return (ut); + return ut; } /* @@ -1645,7 +1639,7 @@ uthread_cleanup_name(void *uthread) return; } -/* +/* * This routine frees all the BSD context in uthread except the credential. * It does not free the uthread structure as well */ @@ -1701,8 +1695,9 @@ uthread_cleanup(task_t task, void *uthread, void * bsd_info) } if (uth->uu_wqset) { - if (waitq_set_is_valid(uth->uu_wqset)) + if (waitq_set_is_valid(uth->uu_wqset)) { waitq_set_deinit(uth->uu_wqset); + } FREE(uth->uu_wqset, M_SELECT); uth->uu_wqset = NULL; uth->uu_wqstate_sz = 0; @@ -1711,8 +1706,7 @@ uthread_cleanup(task_t task, void *uthread, void * bsd_info) os_reason_free(uth->uu_exit_reason); if ((task != kernel_task) && p) { - - if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) { + if (((uth->uu_flag & UT_VFORK) == UT_VFORK) && (uth->uu_proc != PROC_NULL)) { vfork_exit_internal(uth->uu_proc, 0, 1); } /* diff --git a/bsd/kern/kern_guarded.c b/bsd/kern/kern_guarded.c index 795eb5667..dc29cb531 100644 --- a/bsd/kern/kern_guarded.c +++ b/bsd/kern/kern_guarded.c @@ -2,7 +2,7 @@ * Copyright (c) 2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -60,8 +60,8 @@ #define f_flag f_fglob->fg_flag #define f_type f_fglob->fg_ops->fo_type extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, off_t offset, - int flags, user_ssize_t *retval ); + user_addr_t bufp, user_size_t nbyte, off_t offset, + int flags, user_ssize_t *retval ); extern int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval); /* @@ -69,7 +69,7 @@ extern int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t * */ kern_return_t task_exception_notify(exception_type_t exception, - mach_exception_data_type_t code, mach_exception_data_type_t subcode); + mach_exception_data_type_t code, mach_exception_data_type_t subcode); kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); /* @@ -87,17 +87,17 @@ kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_ struct guarded_fileproc { struct fileproc gf_fileproc; - u_int gf_magic; - u_int gf_attrs; - guardid_t gf_guard; + u_int gf_magic; + u_int gf_attrs; + guardid_t gf_guard; }; -const size_t sizeof_guarded_fileproc = sizeof (struct guarded_fileproc); +const size_t sizeof_guarded_fileproc = sizeof(struct guarded_fileproc); -#define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp)) -#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc) +#define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp)) +#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc) -#define GUARDED_FILEPROC_MAGIC 0x29083 +#define GUARDED_FILEPROC_MAGIC 0x29083 struct gfp_crarg { guardid_t gca_guard; @@ -110,16 +110,17 @@ guarded_fileproc_alloc_init(void *crarg) struct gfp_crarg *aarg = crarg; struct guarded_fileproc *gfp; - if ((gfp = kalloc(sizeof (*gfp))) == NULL) - return (NULL); + if ((gfp = kalloc(sizeof(*gfp))) == NULL) { + return NULL; + } - bzero(gfp, sizeof (*gfp)); + bzero(gfp, sizeof(*gfp)); gfp->gf_fileproc.f_flags = FTYPE_GUARDED; gfp->gf_magic = GUARDED_FILEPROC_MAGIC; gfp->gf_guard = aarg->gca_guard; gfp->gf_attrs = aarg->gca_attrs; - return (GFP_TO_FP(gfp)); + return GFP_TO_FP(gfp); } void @@ -128,10 +129,11 @@ guarded_fileproc_free(struct fileproc *fp) struct guarded_fileproc *gfp = FP_TO_GFP(fp); if (FILEPROC_TYPE(fp) != FTYPE_GUARDED || - GUARDED_FILEPROC_MAGIC != gfp->gf_magic) + GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt fp %p flags %x", __func__, fp, fp->f_flags); + } - kfree(gfp, sizeof (*gfp)); + kfree(gfp, sizeof(*gfp)); } static int @@ -141,31 +143,34 @@ fp_lookup_guarded(proc_t p, int fd, guardid_t guard, struct fileproc *fp; int error; - if ((error = fp_lookup(p, fd, &fp, locked)) != 0) - return (error); + if ((error = fp_lookup(p, fd, &fp, locked)) != 0) { + return error; + } if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) { (void) fp_drop(p, fd, fp, locked); - return (EINVAL); + return EINVAL; } struct guarded_fileproc *gfp = FP_TO_GFP(fp); - if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) + if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt fp %p", __func__, fp); + } if (guard != gfp->gf_guard) { (void) fp_drop(p, fd, fp, locked); - return (EPERM); /* *not* a mismatch exception */ + return EPERM; /* *not* a mismatch exception */ } - if (gfpp) + if (gfpp) { *gfpp = gfp; - return (0); + } + return 0; } /* * Expected use pattern: * * if (FP_ISGUARDED(fp, GUARD_CLOSE)) { - * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE); + * error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE); * proc_fdunlock(p); * return (error); * } @@ -177,12 +182,13 @@ fp_isguarded(struct fileproc *fp, u_int attrs) if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) { struct guarded_fileproc *gfp = FP_TO_GFP(fp); - if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) + if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt gfp %p flags %x", __func__, gfp, fp->f_flags); - return ((attrs & gfp->gf_attrs) == attrs); + } + return (attrs & gfp->gf_attrs) == attrs; } - return (0); + return 0; } extern char *proc_name_address(void *p); @@ -190,8 +196,9 @@ extern char *proc_name_address(void *p); int fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor) { - if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) + if (FILEPROC_TYPE(fp) != FTYPE_GUARDED) { panic("%s corrupt fp %p flags %x", __func__, fp, fp->f_flags); + } struct guarded_fileproc *gfp = FP_TO_GFP(fp); /* all gfd fields protected via proc_fdlock() */ @@ -205,7 +212,7 @@ fp_guard_exception(proc_t p, int fd, struct fileproc *fp, u_int flavor) thread_t t = current_thread(); thread_guard_violation(t, code, subcode); - return (EPERM); + return EPERM; } /* @@ -251,16 +258,18 @@ fd_guard_ast( int guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval) { - if ((uap->flags & O_CLOEXEC) == 0) - return (EINVAL); + if ((uap->flags & O_CLOEXEC) == 0) { + return EINVAL; + } #define GUARD_REQUIRED (GUARD_DUP) -#define GUARD_ALL (GUARD_REQUIRED | \ - (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE)) +#define GUARD_ALL (GUARD_REQUIRED | \ + (GUARD_CLOSE | GUARD_SOCKET_IPC | GUARD_FILEPORT | GUARD_WRITE)) if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) || - ((uap->guardflags & ~GUARD_ALL) != 0)) - return (EINVAL); + ((uap->guardflags & ~GUARD_ALL) != 0)) { + return EINVAL; + } int error; struct gfp_crarg crarg = { @@ -268,14 +277,16 @@ guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval) }; if ((error = copyin(uap->guard, - &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0) - return (error); + &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) { + return error; + } /* * Disallow certain guard values -- is zero enough? */ - if (crarg.gca_guard == 0) - return (EINVAL); + if (crarg.gca_guard == 0) { + return EINVAL; + } struct filedesc *fdp = p->p_fd; struct vnode_attr va; @@ -288,10 +299,10 @@ guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval) VATTR_SET(&va, va_mode, cmode & ACCESSPERMS); NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - uap->path, ctx); + uap->path, ctx); - return (open1(ctx, &nd, uap->flags | O_CLOFORK, &va, - guarded_fileproc_alloc_init, &crarg, retval)); + return open1(ctx, &nd, uap->flags | O_CLOFORK, &va, + guarded_fileproc_alloc_init, &crarg, retval); } /* @@ -305,12 +316,14 @@ guarded_open_np(proc_t p, struct guarded_open_np_args *uap, int32_t *retval) int guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap, int32_t *retval) { - if ((uap->flags & O_CLOEXEC) == 0) - return (EINVAL); + if ((uap->flags & O_CLOEXEC) == 0) { + return EINVAL; + } if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) || - ((uap->guardflags & ~GUARD_ALL) != 0)) - return (EINVAL); + ((uap->guardflags & ~GUARD_ALL) != 0)) { + return EINVAL; + } int error; struct gfp_crarg crarg = { @@ -318,14 +331,16 @@ guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap }; if ((error = copyin(uap->guard, - &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0) - return (error); + &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) { + return error; + } /* * Disallow certain guard values -- is zero enough? */ - if (crarg.gca_guard == 0) - return (EINVAL); + if (crarg.gca_guard == 0) { + return EINVAL; + } struct filedesc *fdp = p->p_fd; struct vnode_attr va; @@ -338,33 +353,33 @@ guarded_open_dprotected_np(proc_t p, struct guarded_open_dprotected_np_args *uap VATTR_SET(&va, va_mode, cmode & ACCESSPERMS); NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - uap->path, ctx); + uap->path, ctx); - /* - * Initialize the extra fields in vnode_attr to pass down dataprotection + /* + * Initialize the extra fields in vnode_attr to pass down dataprotection * extra fields. * 1. target cprotect class. - * 2. set a flag to mark it as requiring open-raw-encrypted semantics. - */ - if (uap->flags & O_CREAT) { + * 2. set a flag to mark it as requiring open-raw-encrypted semantics. + */ + if (uap->flags & O_CREAT) { VATTR_SET(&va, va_dataprotect_class, uap->dpclass); } - - if (uap->dpflags & (O_DP_GETRAWENCRYPTED|O_DP_GETRAWUNENCRYPTED)) { - if ( uap->flags & (O_RDWR | O_WRONLY)) { + + if (uap->dpflags & (O_DP_GETRAWENCRYPTED | O_DP_GETRAWUNENCRYPTED)) { + if (uap->flags & (O_RDWR | O_WRONLY)) { /* Not allowed to write raw encrypted bytes */ - return EINVAL; - } + return EINVAL; + } if (uap->dpflags & O_DP_GETRAWENCRYPTED) { - VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); + VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); } if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) { - VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED); + VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED); } } - return (open1(ctx, &nd, uap->flags | O_CLOFORK, &va, - guarded_fileproc_alloc_init, &crarg, retval)); + return open1(ctx, &nd, uap->flags | O_CLOFORK, &va, + guarded_fileproc_alloc_init, &crarg, retval); } /* @@ -380,8 +395,9 @@ int guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval) { if (((uap->guardflags & GUARD_REQUIRED) != GUARD_REQUIRED) || - ((uap->guardflags & ~GUARD_ALL) != 0)) - return (EINVAL); + ((uap->guardflags & ~GUARD_ALL) != 0)) { + return EINVAL; + } int error; struct gfp_crarg crarg = { @@ -389,13 +405,15 @@ guarded_kqueue_np(proc_t p, struct guarded_kqueue_np_args *uap, int32_t *retval) }; if ((error = copyin(uap->guard, - &(crarg.gca_guard), sizeof (crarg.gca_guard))) != 0) - return (error); + &(crarg.gca_guard), sizeof(crarg.gca_guard))) != 0) { + return error; + } - if (crarg.gca_guard == 0) - return (EINVAL); + if (crarg.gca_guard == 0) { + return EINVAL; + } - return (kqueue_body(p, guarded_fileproc_alloc_init, &crarg, retval)); + return kqueue_body(p, guarded_fileproc_alloc_init, &crarg, retval); } /* @@ -412,17 +430,18 @@ guarded_close_np(proc_t p, struct guarded_close_np_args *uap, AUDIT_SYSCLOSE(p, fd); - if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0) - return (error); + if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) { + return error; + } proc_fdlock(p); if ((error = fp_lookup_guarded(p, fd, uguard, &gfp, 1)) != 0) { proc_fdunlock(p); - return (error); + return error; } error = close_internal_locked(p, fd, GFP_TO_FP(gfp), 0); proc_fdunlock(p); - return (error); + return error; } /* @@ -451,7 +470,7 @@ guarded_close_np(proc_t p, struct guarded_close_np_args *uap, * If 'nguard' is NULL, fd must be guarded at entry, * must match with what's already guarding the descriptor, and the * result will be to completely remove the guard. Note also that the - * fdflags are copied to the descriptor from the incoming *fdflagsp argument. + * fdflags are copied to the descriptor from the incoming *fdflagsp argument. * * If the descriptor is guarded, and neither 'guard' nor 'nguard' is NULL * and matches what's already guarding the descriptor, @@ -494,30 +513,30 @@ change_fdguard_np(proc_t p, struct change_fdguard_np_args *uap, int nfdflags = 0; if (0 != uap->guard && - 0 != (error = copyin(uap->guard, &oldg, sizeof (oldg)))) - return (error); /* can't copyin current guard */ - + 0 != (error = copyin(uap->guard, &oldg, sizeof(oldg)))) { + return error; /* can't copyin current guard */ + } if (0 != uap->nguard && - 0 != (error = copyin(uap->nguard, &newg, sizeof (newg)))) - return (error); /* can't copyin new guard */ - + 0 != (error = copyin(uap->nguard, &newg, sizeof(newg)))) { + return error; /* can't copyin new guard */ + } if (0 != uap->fdflagsp && - 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof (nfdflags)))) - return (error); /* can't copyin new fdflags */ - + 0 != (error = copyin(uap->fdflagsp, &nfdflags, sizeof(nfdflags)))) { + return error; /* can't copyin new fdflags */ + } proc_fdlock(p); restart: if ((error = fp_lookup(p, fd, &fp, 1)) != 0) { proc_fdunlock(p); - return (error); + return error; } if (0 != uap->fdflagsp) { int ofdflags = FDFLAGS_GET(p, fd); int ofl = ((ofdflags & UF_EXCLOSE) ? FD_CLOEXEC : 0) | - ((ofdflags & UF_FORKCLOSE) ? FD_CLOFORK : 0); + ((ofdflags & UF_FORKCLOSE) ? FD_CLOFORK : 0); proc_fdunlock(p); - if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof (ofl)))) { + if (0 != (error = copyout(&ofl, uap->fdflagsp, sizeof(ofl)))) { proc_fdlock(p); goto dropout; /* can't copyout old fdflags */ } @@ -525,29 +544,34 @@ restart: } if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) { - if (0 == uap->guard || 0 == uap->guardflags) + if (0 == uap->guard || 0 == uap->guardflags) { error = EINVAL; /* missing guard! */ - else if (0 == oldg) + } else if (0 == oldg) { error = EPERM; /* guardids cannot be zero */ + } } else { - if (0 != uap->guard || 0 != uap->guardflags) + if (0 != uap->guard || 0 != uap->guardflags) { error = EINVAL; /* guard provided, but none needed! */ + } } - if (0 != error) + if (0 != error) { goto dropout; + } if (0 != uap->nguard) { /* * There's a new guard in town. */ - if (0 == newg) + if (0 == newg) { error = EINVAL; /* guards cannot contain zero */ - else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) || - ((uap->nguardflags & ~GUARD_ALL) != 0)) + } else if (((uap->nguardflags & GUARD_REQUIRED) != GUARD_REQUIRED) || + ((uap->nguardflags & ~GUARD_ALL) != 0)) { error = EINVAL; /* must have valid attributes too */ - if (0 != error) + } + if (0 != error) { goto dropout; + } if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) { /* @@ -555,9 +579,10 @@ restart: */ struct guarded_fileproc *gfp = FP_TO_GFP(fp); - if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) + if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt gfp %p flags %x", - __func__, gfp, fp->f_flags); + __func__, gfp, fp->f_flags); + } if (oldg == gfp->gf_guard && uap->guardflags == gfp->gf_attrs) { @@ -567,12 +592,14 @@ restart: * fdflags "side-effects" as we go. Note that * userland can request FD_CLOFORK semantics. */ - if (gfp->gf_attrs & GUARD_CLOSE) + if (gfp->gf_attrs & GUARD_CLOSE) { FDFLAGS_CLR(p, fd, UF_FORKCLOSE); + } gfp->gf_guard = newg; gfp->gf_attrs = uap->nguardflags; - if (gfp->gf_attrs & GUARD_CLOSE) + if (gfp->gf_attrs & GUARD_CLOSE) { FDFLAGS_SET(p, fd, UF_FORKCLOSE); + } FDFLAGS_SET(p, fd, (nfdflags & FD_CLOFORK) ? UF_FORKCLOSE : 0); /* FG_CONFINED enforced regardless */ @@ -603,7 +630,7 @@ restart: .gca_attrs = uap->nguardflags }; struct fileproc *nfp = - guarded_fileproc_alloc_init(&crarg); + guarded_fileproc_alloc_init(&crarg); struct guarded_fileproc *gfp; proc_fdlock(p); @@ -611,8 +638,9 @@ restart: switch (error = fp_tryswap(p, fd, nfp)) { case 0: /* guarded-ness comes with side-effects */ gfp = FP_TO_GFP(nfp); - if (gfp->gf_attrs & GUARD_CLOSE) + if (gfp->gf_attrs & GUARD_CLOSE) { FDFLAGS_SET(p, fd, UF_FORKCLOSE); + } FDFLAGS_SET(p, fd, UF_EXCLOSE); (void) fp_drop(p, fd, nfp, 1); fileproc_free(fp); @@ -627,7 +655,7 @@ restart: break; } proc_fdunlock(p); - return (error); + return error; } } else { /* @@ -644,9 +672,10 @@ restart: goto dropout; } - if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) + if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) { panic("%s: corrupt gfp %p flags %x", - __func__, gfp, fp->f_flags); + __func__, gfp, fp->f_flags); + } if (oldg != gfp->gf_guard || uap->guardflags != gfp->gf_attrs) { @@ -679,7 +708,7 @@ restart: break; } proc_fdunlock(p); - return (error); + return error; } else { /* * Not already guarded, and no new guard? @@ -691,7 +720,7 @@ restart: dropout: (void) fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return (error); + return error; } /* @@ -703,7 +732,7 @@ dropout: int guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t *retval) { - int error; + int error; int fd = uap->fd; guardid_t uguard; struct fileproc *fp; @@ -712,30 +741,32 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t AUDIT_ARG(fd, fd); - if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0) - return (error); + if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) { + return error; + } error = fp_lookup_guarded(p, fd, uguard, &gfp, 0); - if (error) - return(error); + if (error) { + return error; + } fp = GFP_TO_FP(gfp); if ((fp->f_flag & FWRITE) == 0) { error = EBADF; } else { - struct vfs_context context = *(vfs_context_current()); context.vc_ucred = fp->f_fglob->fg_cred; error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte, - (off_t)-1, 0, retval); + (off_t)-1, 0, retval); wrote_some = *retval > 0; } - if (wrote_some) - fp_drop_written(p, fd, fp); - else - fp_drop(p, fd, fp, 0); - return(error); + if (wrote_some) { + fp_drop_written(p, fd, fp); + } else { + fp_drop(p, fd, fp, 0); + } + return error; } /* @@ -744,11 +775,11 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t * * Initial implementation of guarded pwrites. */ - int - guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval) - { +int +guarded_pwrite_np(struct proc *p, struct guarded_pwrite_np_args *uap, user_ssize_t *retval) +{ struct fileproc *fp; - int error; + int error; int fd = uap->fd; vnode_t vp = (vnode_t)0; guardid_t uguard; @@ -757,12 +788,14 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t AUDIT_ARG(fd, fd); - if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0) - return (error); + if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) { + return error; + } error = fp_lookup_guarded(p, fd, uguard, &gfp, 0); - if (error) - return(error); + if (error) { + return error; + } fp = GFP_TO_FP(gfp); if ((fp->f_flag & FWRITE) == 0) { @@ -779,7 +812,7 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t if (vnode_isfifo(vp)) { error = ESPIPE; goto errout; - } + } if ((vp->v_flag & VISTTY)) { error = ENXIO; goto errout; @@ -790,19 +823,20 @@ guarded_write_np(struct proc *p, struct guarded_write_np_args *uap, user_ssize_t } error = dofilewrite(&context, fp, uap->buf, uap->nbyte, - uap->offset, FOF_OFFSET, retval); + uap->offset, FOF_OFFSET, retval); wrote_some = *retval > 0; } errout: - if (wrote_some) - fp_drop_written(p, fd, fp); - else - fp_drop(p, fd, fp, 0); + if (wrote_some) { + fp_drop_written(p, fd, fp); + } else { + fp_drop(p, fd, fp, 0); + } KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_guarded_pwrite_np) | DBG_FUNC_NONE), - uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); - - return(error); + uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); + + return error; } /* @@ -826,14 +860,15 @@ guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize AUDIT_ARG(fd, uap->fd); /* Verify range bedfore calling uio_create() */ - if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) - return (EINVAL); + if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) { + return EINVAL; + } /* allocate a uio large enough to hold the number of iovecs passed */ auio = uio_create(uap->iovcnt, 0, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), - UIO_WRITE); - + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), + UIO_WRITE); + /* get location of iovecs within the uio. then copyin the iovecs from * user space. */ @@ -843,25 +878,27 @@ guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize goto ExitThisRoutine; } error = copyin_user_iovec_array(uap->iovp, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - uap->iovcnt, iovp); + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + uap->iovcnt, iovp); if (error) { goto ExitThisRoutine; } - - /* finalize uio_t for use and do the IO + + /* finalize uio_t for use and do the IO */ error = uio_calculateresid(auio); if (error) { goto ExitThisRoutine; } - if ((error = copyin(uap->guard, &uguard, sizeof (uguard))) != 0) + if ((error = copyin(uap->guard, &uguard, sizeof(uguard))) != 0) { goto ExitThisRoutine; + } error = fp_lookup_guarded(p, uap->fd, uguard, &gfp, 0); - if (error) + if (error) { goto ExitThisRoutine; + } fp = GFP_TO_FP(gfp); if ((fp->f_flag & FWRITE) == 0) { @@ -870,16 +907,17 @@ guarded_writev_np(struct proc *p, struct guarded_writev_np_args *uap, user_ssize error = wr_uio(p, fp, auio, retval); wrote_some = *retval > 0; } - - if (wrote_some) - fp_drop_written(p, uap->fd, fp); - else - fp_drop(p, uap->fd, fp, 0); + + if (wrote_some) { + fp_drop_written(p, uap->fd, fp); + } else { + fp_drop(p, uap->fd, fp, 0); + } ExitThisRoutine: if (auio != NULL) { uio_free(auio); } - return (error); + return error; } /* @@ -896,15 +934,16 @@ falloc_guarded(struct proc *p, struct fileproc **fp, int *fd, struct gfp_crarg crarg; if (((attrs & GUARD_REQUIRED) != GUARD_REQUIRED) || - ((attrs & ~GUARD_ALL) != 0) || (*guard == 0)) - return (EINVAL); + ((attrs & ~GUARD_ALL) != 0) || (*guard == 0)) { + return EINVAL; + } - bzero(&crarg, sizeof (crarg)); + bzero(&crarg, sizeof(crarg)); crarg.gca_guard = *guard; crarg.gca_attrs = attrs; - return (falloc_withalloc(p, fp, fd, ctx, guarded_fileproc_alloc_init, - &crarg)); + return falloc_withalloc(p, fp, fd, ctx, guarded_fileproc_alloc_init, + &crarg); } #if CONFIG_MACF && CONFIG_VNGUARD @@ -951,7 +990,7 @@ struct vng_owner { /* lives on the fileglob label */ static struct vng_info * new_vgi(unsigned attrs, guardid_t guard) { - struct vng_info *vgi = kalloc(sizeof (*vgi)); + struct vng_info *vgi = kalloc(sizeof(*vgi)); vgi->vgi_guard = guard; vgi->vgi_attrs = attrs; TAILQ_INIT(&vgi->vgi_owners); @@ -961,8 +1000,8 @@ new_vgi(unsigned attrs, guardid_t guard) static struct vng_owner * new_vgo(proc_t p, struct fileglob *fg) { - struct vng_owner *vgo = kalloc(sizeof (*vgo)); - memset(vgo, 0, sizeof (*vgo)); + struct vng_owner *vgo = kalloc(sizeof(*vgo)); + memset(vgo, 0, sizeof(*vgo)); vgo->vgo_p = p; vgo->vgo_fg = fg; return vgo; @@ -988,18 +1027,18 @@ free_vgi(struct vng_info *vgi) { assert(TAILQ_EMPTY(&vgi->vgi_owners)); #if DEVELOP || DEBUG - memset(vgi, 0xbeadfade, sizeof (*vgi)); + memset(vgi, 0xbeadfade, sizeof(*vgi)); #endif - kfree(vgi, sizeof (*vgi)); + kfree(vgi, sizeof(*vgi)); } static void free_vgo(struct vng_owner *vgo) { #if DEVELOP || DEBUG - memset(vgo, 0x2bedf1d0, sizeof (*vgo)); + memset(vgo, 0x2bedf1d0, sizeof(*vgo)); #endif - kfree(vgo, sizeof (*vgo)); + kfree(vgo, sizeof(*vgo)); } static int label_slot; @@ -1011,10 +1050,11 @@ vng_lbl_get(struct label *label) { lck_rw_assert(&llock, LCK_RW_ASSERT_HELD); void *data; - if (NULL == label) + if (NULL == label) { data = NULL; - else + } else { data = (void *)mac_label_get(label, label_slot); + } return data; } @@ -1023,8 +1063,9 @@ vng_lbl_get_withattr(struct label *label, unsigned attrmask) { struct vng_info *vgi = vng_lbl_get(label); assert(NULL == vgi || (vgi->vgi_attrs & ~VNG_ALL) == 0); - if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask)) + if (NULL != vgi && 0 == (vgi->vgi_attrs & attrmask)) { vgi = NULL; + } return vgi; } @@ -1042,19 +1083,21 @@ vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns) const int fd = vns->vns_fd; if ((vns->vns_attrs & ~VNG_ALL) != 0 || - 0 == vns->vns_attrs || 0 == vns->vns_guard) + 0 == vns->vns_attrs || 0 == vns->vns_guard) { return EINVAL; + } int error; struct fileproc *fp; - if (0 != (error = fp_lookup(p, fd, &fp, 0))) + if (0 != (error = fp_lookup(p, fd, &fp, 0))) { return error; + } do { /* * To avoid trivial DoS, insist that the caller * has read/write access to the file. */ - if ((FREAD|FWRITE) != (fp->f_flag & (FREAD|FWRITE))) { + if ((FREAD | FWRITE) != (fp->f_flag & (FREAD | FWRITE))) { error = EBADF; break; } @@ -1101,9 +1144,10 @@ vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns) if (NULL == vgi) { /* vnode unguarded, add the first guard */ - if (NULL != vgo) + if (NULL != vgo) { panic("vnguard label on fileglob " - "but not vnode"); + "but not vnode"); + } /* add a kusecount so we can unlabel later */ error = vnode_ref_ext(vp, O_EVTONLY, 0); if (0 == error) { @@ -1118,10 +1162,11 @@ vnguard_sysc_setguard(proc_t p, const struct vnguard_set *vns) } else { /* vnode already guarded */ free_vgi(nvgi); - if (vgi->vgi_guard != vns->vns_guard) + if (vgi->vgi_guard != vns->vns_guard) { error = EPERM; /* guard mismatch */ - else if (vgi->vgi_attrs != vns->vns_attrs) + } else if (vgi->vgi_attrs != vns->vns_attrs) { error = EACCES; /* attr mismatch */ + } if (0 != error || NULL != vgo) { free_vgo(nvgo); break; @@ -1147,21 +1192,23 @@ vng_policy_syscall(proc_t p, int cmd, user_addr_t arg) switch (cmd) { case VNG_SYSC_PING: - if (0 == arg) + if (0 == arg) { error = 0; + } break; case VNG_SYSC_SET_GUARD: { struct vnguard_set vns; - error = copyin(arg, (void *)&vns, sizeof (vns)); - if (error) + error = copyin(arg, (void *)&vns, sizeof(vns)); + if (error) { break; + } error = vnguard_sysc_setguard(p, &vns); break; } default: break; } - return (error); + return error; } /* @@ -1207,8 +1254,9 @@ static os_reason_t vng_reason_from_pathname(const char *path, uint32_t pathlen) { os_reason_t r = os_reason_create(OS_REASON_GUARD, GUARD_REASON_VNODE); - if (NULL == r) - return (r); + if (NULL == r) { + return r; + } /* * If the pathname is very long, just keep the trailing part */ @@ -1224,11 +1272,11 @@ vng_reason_from_pathname(const char *path, uint32_t pathlen) if (kcdata_get_memory_addr(kcd, EXIT_REASON_USER_DESC, pathlen, &addr) == KERN_SUCCESS) { kcdata_memcpy(kcd, addr, path, pathlen); - return (r); + return r; } } os_reason_free(r); - return (OS_REASON_NULL); + return OS_REASON_NULL; } static int vng_policy_flags; @@ -1244,7 +1292,7 @@ vng_guard_violation(const struct vng_info *vgi, retval = EPERM; } - if (vng_policy_flags & (kVNG_POLICY_LOGMSG|kVNG_POLICY_UPRINTMSG)) { + if (vng_policy_flags & (kVNG_POLICY_LOGMSG | kVNG_POLICY_UPRINTMSG)) { /* log a message */ const char *op; switch (opval) { @@ -1298,11 +1346,12 @@ vng_guard_violation(const struct vng_info *vgi, proc_pid(vgo->vgo_p), vgi->vgi_guard); } } - if (NULL != nm) + if (NULL != nm) { vnode_putname(nm); + } } - if (vng_policy_flags & (kVNG_POLICY_EXC|kVNG_POLICY_EXC_CORPSE)) { + if (vng_policy_flags & (kVNG_POLICY_EXC | kVNG_POLICY_EXC_CORPSE)) { /* EXC_GUARD exception */ const struct vng_owner *vgo = TAILQ_FIRST(&vgi->vgi_owners); pid_t pid = vgo ? proc_pid(vgo->vgo_p) : 0; @@ -1322,14 +1371,17 @@ vng_guard_violation(const struct vng_info *vgi, os_reason_t r = NULL; if (NULL != path) { vn_getpath(vp, path, &len); - if (*path && len) + if (*path && len) { r = vng_reason_from_pathname(path, len); + } } task_violated_guard(code, subcode, r); /* not fatal */ - if (NULL != r) + if (NULL != r) { os_reason_free(r); - if (NULL != path) + } + if (NULL != path) { FREE(path, M_TEMP); + } } else { thread_t t = current_thread(); thread_guard_violation(t, code, subcode); @@ -1339,7 +1391,7 @@ vng_guard_violation(const struct vng_info *vgi, psignal(p, SIGKILL); } - return (retval); + return retval; } /* @@ -1374,17 +1426,19 @@ vng_vnode_check_rename(kauth_cred_t __unused cred, lck_rw_lock_shared(&llock); const struct vng_info *vgi = vng_lbl_get_withattr(label, VNG_RENAME_FROM); - if (NULL != vgi) + if (NULL != vgi) { error = vng_guard_violation(vgi, VNG_RENAME_FROM, vp); + } if (0 == error) { vgi = vng_lbl_get_withattr(tlabel, VNG_RENAME_TO); - if (NULL != vgi) + if (NULL != vgi) { error = vng_guard_violation(vgi, VNG_RENAME_TO, tvp); + } } lck_rw_unlock_shared(&llock); } - return (error); + return error; } static int @@ -1396,12 +1450,13 @@ vng_vnode_check_link(kauth_cred_t __unused cred, if (NULL != label) { lck_rw_lock_shared(&llock); const struct vng_info *vgi = - vng_lbl_get_withattr(label, VNG_LINK); - if (vgi) + vng_lbl_get_withattr(label, VNG_LINK); + if (vgi) { error = vng_guard_violation(vgi, VNG_LINK, vp); + } lck_rw_unlock_shared(&llock); } - return (error); + return error; } static int @@ -1414,11 +1469,12 @@ vng_vnode_check_unlink(kauth_cred_t __unused cred, lck_rw_lock_shared(&llock); const struct vng_info *vgi = vng_lbl_get_withattr(label, VNG_UNLINK); - if (vgi) + if (vgi) { error = vng_guard_violation(vgi, VNG_UNLINK, vp); + } lck_rw_unlock_shared(&llock); } - return (error); + return error; } /* @@ -1437,15 +1493,16 @@ vng_vnode_check_write(kauth_cred_t __unused actv_cred, proc_t p = current_proc(); const struct vng_owner *vgo; TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) { - if (vgo->vgo_p == p) + if (vgo->vgo_p == p) { goto done; + } } error = vng_guard_violation(vgi, VNG_WRITE_OTHER, vp); } - done: +done: lck_rw_unlock_shared(&llock); } - return (error); + return error; } /* @@ -1465,12 +1522,13 @@ vng_vnode_check_truncate(kauth_cred_t __unused actv_cred, proc_t p = current_proc(); const struct vng_owner *vgo; TAILQ_FOREACH(vgo, &vgi->vgi_owners, vgo_link) { - if (vgo->vgo_p == p) + if (vgo->vgo_p == p) { goto done; + } } error = vng_guard_violation(vgi, VNG_TRUNC_OTHER, vp); } - done: +done: lck_rw_unlock_shared(&llock); } return error; @@ -1485,18 +1543,20 @@ vng_vnode_check_exchangedata(kauth_cred_t __unused cred, if (NULL != flabel || NULL != slabel) { lck_rw_lock_shared(&llock); const struct vng_info *vgi = - vng_lbl_get_withattr(flabel, VNG_EXCHDATA); - if (NULL != vgi) + vng_lbl_get_withattr(flabel, VNG_EXCHDATA); + if (NULL != vgi) { error = vng_guard_violation(vgi, VNG_EXCHDATA, fvp); + } if (0 == error) { vgi = vng_lbl_get_withattr(slabel, VNG_EXCHDATA); - if (NULL != vgi) + if (NULL != vgi) { error = vng_guard_violation(vgi, VNG_EXCHDATA, svp); + } } lck_rw_unlock_shared(&llock); } - return (error); + return error; } /* Intercept open-time truncations (by "other") of a guarded vnode */ @@ -1505,9 +1565,10 @@ static int vng_vnode_check_open(kauth_cred_t cred, struct vnode *vp, struct label *label, int acc_mode) { - if (0 == (acc_mode & O_TRUNC)) - return (0); - return (vng_vnode_check_truncate(cred, NULL, vp, label)); + if (0 == (acc_mode & O_TRUNC)) { + return 0; + } + return vng_vnode_check_truncate(cred, NULL, vp, label); } /* @@ -1558,13 +1619,15 @@ static mac_policy_handle_t vng_policy_handle; void vnguard_policy_init(void) { - if (0 == PE_i_can_has_debugger(NULL)) + if (0 == PE_i_can_has_debugger(NULL)) { return; + } vng_policy_flags = kVNG_POLICY_LOGMSG | - kVNG_POLICY_EXC_CORPSE | kVNG_POLICY_UPRINTMSG; - PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof (vng_policy_flags)); - if (vng_policy_flags) + kVNG_POLICY_EXC_CORPSE | kVNG_POLICY_UPRINTMSG; + PE_parse_boot_argn("vnguard", &vng_policy_flags, sizeof(vng_policy_flags)); + if (vng_policy_flags) { mac_policy_register(&vng_policy_conf, &vng_policy_handle, NULL); + } } #if DEBUG || DEVELOPMENT @@ -1573,7 +1636,7 @@ vnguard_policy_init(void) SYSCTL_DECL(_kern_vnguard); SYSCTL_NODE(_kern, OID_AUTO, vnguard, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "vnguard"); SYSCTL_INT(_kern_vnguard, OID_AUTO, flags, CTLFLAG_RW | CTLFLAG_LOCKED, - &vng_policy_flags, 0, "vnguard policy flags"); + &vng_policy_flags, 0, "vnguard policy flags"); #endif #endif /* CONFIG_MACF && CONFIG_VNGUARD */ diff --git a/bsd/kern/kern_kpc.c b/bsd/kern/kern_kpc.c index 96700a924..5e23e548e 100644 --- a/bsd/kern/kern_kpc.c +++ b/bsd/kern/kern_kpc.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -113,13 +113,13 @@ kpc_get_bigarray(uint32_t *size_out) /* abstract sysctl handlers */ static int sysctl_get_int( struct sysctl_oid *oidp, struct sysctl_req *req, - uint32_t value ) + uint32_t value ) { int error = 0; - + /* copy out the old value */ error = sysctl_handle_int(oidp, &value, 0, req); - + return error; } @@ -128,11 +128,12 @@ sysctl_set_int( struct sysctl_req *req, int (*set_func)(int)) { int error = 0; int value = 0; - - error = SYSCTL_IN( req, &value, sizeof(value) ); - if( error ) + + error = SYSCTL_IN( req, &value, sizeof(value)); + if (error) { return error; - + } + error = set_func( value ); return error; @@ -140,18 +141,19 @@ sysctl_set_int( struct sysctl_req *req, int (*set_func)(int)) static int sysctl_getset_int( struct sysctl_oid *oidp, struct sysctl_req *req, - int (*get_func)(void), int (*set_func)(int) ) + int (*get_func)(void), int (*set_func)(int)) { int error = 0; uint32_t value = 0; - + /* get the old value and process it */ value = get_func(); /* copy out the old value, get the new value */ error = sysctl_handle_int(oidp, &value, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } /* if that worked, and we're writing... */ error = set_func( value ); @@ -162,78 +164,83 @@ sysctl_getset_int( struct sysctl_oid *oidp, struct sysctl_req *req, static int sysctl_setget_int( struct sysctl_req *req, - int (*setget_func)(int) ) + int (*setget_func)(int)) { int error = 0; int value = 0; - - error = SYSCTL_IN( req, &value, sizeof(value) ); - if( error ) + + error = SYSCTL_IN( req, &value, sizeof(value)); + if (error) { return error; - + } + value = setget_func(value); - error = SYSCTL_OUT( req, &value, sizeof(value) ); + error = SYSCTL_OUT( req, &value, sizeof(value)); return error; } static int sysctl_kpc_get_counters(uint32_t counters, - uint32_t *size, void *buf) + uint32_t *size, void *buf) { uint64_t *ctr_buf = (uint64_t*)buf; int curcpu; uint32_t count; count = kpc_get_cpu_counters(counters & KPC_ALL_CPUS, - counters, - &curcpu, &ctr_buf[1]); - if (!count) + counters, + &curcpu, &ctr_buf[1]); + if (!count) { return EINVAL; + } ctr_buf[0] = curcpu; - *size = (count+1) * sizeof(uint64_t); + *size = (count + 1) * sizeof(uint64_t); return 0; } -static int +static int sysctl_kpc_get_shadow_counters(uint32_t counters, - uint32_t *size, void *buf) + uint32_t *size, void *buf) { uint64_t *ctr_buf = (uint64_t*)buf; int curcpu; uint32_t count; count = kpc_get_shadow_counters(counters & KPC_ALL_CPUS, - counters, - &curcpu, &ctr_buf[1]); + counters, + &curcpu, &ctr_buf[1]); - if (!count) + if (!count) { return EINVAL; + } ctr_buf[0] = curcpu; - *size = (count+1) * sizeof(uint64_t); + *size = (count + 1) * sizeof(uint64_t); return 0; } static int sysctl_kpc_get_thread_counters(uint32_t tid, - uint32_t *size, void *buf) + uint32_t *size, void *buf) { uint32_t count = *size / sizeof(uint64_t); int r; - if( tid != 0 ) + if (tid != 0) { return EINVAL; + } r = kpc_get_curthread_counters(&count, buf); - if( !r ) + if (!r) { *size = count * sizeof(uint64_t); + } return r; } @@ -248,8 +255,9 @@ static int sysctl_kpc_set_config(uint32_t classes, void* buf) { /* userspace cannot reconfigure the power class */ - if (classes & KPC_CLASS_POWER_MASK) - return (EPERM); + if (classes & KPC_CLASS_POWER_MASK) { + return EPERM; + } return kpc_set_config( classes, buf); } @@ -263,8 +271,9 @@ static int sysctl_kpc_set_period(uint32_t classes, void* buf) { /* userspace cannot reconfigure the power class */ - if (classes & KPC_CLASS_POWER_MASK) - return (EPERM); + if (classes & KPC_CLASS_POWER_MASK) { + return EPERM; + } return kpc_set_period( classes, buf); } @@ -283,7 +292,7 @@ sysctl_kpc_set_actionid(uint32_t classes, void* buf) static int sysctl_get_bigarray(struct sysctl_req *req, - int (*get_fn)(uint32_t, uint32_t*, void*)) + int (*get_fn)(uint32_t, uint32_t*, void*)) { uint32_t bufsize = 0; uint64_t *buf = kpc_get_bigarray(&bufsize); @@ -324,7 +333,7 @@ sysctl_actionid_size( uint32_t classes ) static int sysctl_getset_bigarray(struct sysctl_req *req, int (*size_fn)(uint32_t arg), - int (*get_fn)(uint32_t, void*), int (*set_fn)(uint32_t, void*)) + int (*get_fn)(uint32_t, void*), int (*set_fn)(uint32_t, void*)) { int error = 0; uint64_t arg; @@ -390,7 +399,7 @@ kpc_sysctl SYSCTL_HANDLER_ARGS ktrace_lock(); // Most sysctls require an access check, but a few are public. - switch( (uintptr_t) arg1 ) { + switch ((uintptr_t) arg1) { case REQ_CLASSES: case REQ_CONFIG_COUNT: case REQ_COUNTER_COUNT: @@ -412,31 +421,30 @@ kpc_sysctl SYSCTL_HANDLER_ARGS lck_mtx_lock(&sysctl_lock); /* which request */ - switch( (uintptr_t) arg1 ) - { + switch ((uintptr_t) arg1) { case REQ_CLASSES: ret = sysctl_get_int( oidp, req, - kpc_get_classes() ); + kpc_get_classes()); break; case REQ_COUNTING: ret = sysctl_getset_int( oidp, req, - (getint_t)kpc_get_running, - (setint_t)kpc_set_running ); + (getint_t)kpc_get_running, + (setint_t)kpc_set_running ); break; case REQ_THREAD_COUNTING: ret = sysctl_getset_int( oidp, req, - (getint_t)kpc_get_thread_counting, - (setint_t)kpc_set_thread_counting ); + (getint_t)kpc_get_thread_counting, + (setint_t)kpc_set_thread_counting ); break; case REQ_CONFIG_COUNT: ret = sysctl_setget_int( req, - (setget_func_t)kpc_get_config_count ); + (setget_func_t)kpc_get_config_count ); break; case REQ_COUNTER_COUNT: ret = sysctl_setget_int( req, - (setget_func_t)kpc_get_counter_count ); + (setget_func_t)kpc_get_counter_count ); break; @@ -454,23 +462,23 @@ kpc_sysctl SYSCTL_HANDLER_ARGS case REQ_CONFIG: ret = sysctl_getset_bigarray( req, - sysctl_config_size, - sysctl_kpc_get_config, - sysctl_kpc_set_config ); + sysctl_config_size, + sysctl_kpc_get_config, + sysctl_kpc_set_config ); break; case REQ_PERIOD: ret = sysctl_getset_bigarray( req, - sysctl_counter_size, - sysctl_kpc_get_period, - sysctl_kpc_set_period ); + sysctl_counter_size, + sysctl_kpc_get_period, + sysctl_kpc_set_period ); break; case REQ_ACTIONID: ret = sysctl_getset_bigarray( req, - sysctl_actionid_size, - sysctl_kpc_get_actionid, - sysctl_kpc_set_actionid ); + sysctl_actionid_size, + sysctl_kpc_get_actionid, + sysctl_kpc_set_actionid ); break; @@ -496,81 +504,81 @@ kpc_sysctl SYSCTL_HANDLER_ARGS /*** sysctl definitions ***/ /* root kperf node */ -SYSCTL_NODE(, OID_AUTO, kpc, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "kpc"); +SYSCTL_NODE(, OID_AUTO, kpc, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "kpc"); /* values */ SYSCTL_PROC(_kpc, OID_AUTO, classes, - CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_CLASSES, - sizeof(int), kpc_sysctl, "I", "Available classes"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_CLASSES, + sizeof(int), kpc_sysctl, "I", "Available classes"); SYSCTL_PROC(_kpc, OID_AUTO, counting, - CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_COUNTING, - sizeof(int), kpc_sysctl, "I", "PMCs counting"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_COUNTING, + sizeof(int), kpc_sysctl, "I", "PMCs counting"); SYSCTL_PROC(_kpc, OID_AUTO, thread_counting, - CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_THREAD_COUNTING, - sizeof(int), kpc_sysctl, "I", "Thread accumulation"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_THREAD_COUNTING, + sizeof(int), kpc_sysctl, "I", "Thread accumulation"); SYSCTL_PROC(_kpc, OID_AUTO, pmu_version, - CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void *)REQ_PMU_VERSION, - sizeof(int), kpc_sysctl, "I", "PMU version for hardware"); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_PMU_VERSION, + sizeof(int), kpc_sysctl, "I", "PMU version for hardware"); /* faux values */ SYSCTL_PROC(_kpc, OID_AUTO, config_count, - CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_CONFIG_COUNT, - sizeof(int), kpc_sysctl, "S", "Config count"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_CONFIG_COUNT, + sizeof(int), kpc_sysctl, "S", "Config count"); SYSCTL_PROC(_kpc, OID_AUTO, counter_count, - CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_COUNTER_COUNT, - sizeof(int), kpc_sysctl, "S", "Counter count"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_COUNTER_COUNT, + sizeof(int), kpc_sysctl, "S", "Counter count"); SYSCTL_PROC(_kpc, OID_AUTO, sw_inc, - CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_SW_INC, - sizeof(int), kpc_sysctl, "S", "Software increment"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_SW_INC, + sizeof(int), kpc_sysctl, "S", "Software increment"); /* arrays */ SYSCTL_PROC(_kpc, OID_AUTO, thread_counters, - CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_THREAD_COUNTERS, - sizeof(uint64_t), kpc_sysctl, - "QU", "Current thread counters"); + CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_THREAD_COUNTERS, + sizeof(uint64_t), kpc_sysctl, + "QU", "Current thread counters"); SYSCTL_PROC(_kpc, OID_AUTO, counters, - CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_COUNTERS, - sizeof(uint64_t), kpc_sysctl, - "QU", "Current counters"); + CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_COUNTERS, + sizeof(uint64_t), kpc_sysctl, + "QU", "Current counters"); SYSCTL_PROC(_kpc, OID_AUTO, shadow_counters, - CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_SHADOW_COUNTERS, - sizeof(uint64_t), kpc_sysctl, - "QU", "Current shadow counters"); + CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_SHADOW_COUNTERS, + sizeof(uint64_t), kpc_sysctl, + "QU", "Current shadow counters"); SYSCTL_PROC(_kpc, OID_AUTO, config, - CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_CONFIG, - sizeof(uint64_t), kpc_sysctl, - "QU", "Set counter configs"); + CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_CONFIG, + sizeof(uint64_t), kpc_sysctl, + "QU", "Set counter configs"); SYSCTL_PROC(_kpc, OID_AUTO, period, - CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_PERIOD, - sizeof(uint64_t), kpc_sysctl, - "QU", "Set counter periods"); + CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_PERIOD, + sizeof(uint64_t), kpc_sysctl, + "QU", "Set counter periods"); SYSCTL_PROC(_kpc, OID_AUTO, actionid, - CTLFLAG_RD|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED, - (void*)REQ_ACTIONID, - sizeof(uint32_t), kpc_sysctl, - "QU", "Set counter actionids"); + CTLFLAG_RD | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_ACTIONID, + sizeof(uint32_t), kpc_sysctl, + "QU", "Set counter actionids"); diff --git a/bsd/kern/kern_ktrace.c b/bsd/kern/kern_ktrace.c index 628a19a55..672bd151e 100644 --- a/bsd/kern/kern_ktrace.c +++ b/bsd/kern/kern_ktrace.c @@ -245,7 +245,7 @@ ktrace_promote_background(void) bool ktrace_background_active(void) { - return (ktrace_state == KTRACE_STATE_BG); + return ktrace_state == KTRACE_STATE_BG; } int @@ -253,8 +253,7 @@ ktrace_read_check(void) { ktrace_assert_lock_held(); - if (proc_uniqueid(current_proc()) == ktrace_owning_unique_id) - { + if (proc_uniqueid(current_proc()) == ktrace_owning_unique_id) { return 0; } @@ -304,8 +303,7 @@ ktrace_configure(uint32_t config_mask) /* background configure while foreground is active is not allowed */ if (proc_uniqueid(p) == ktrace_bg_unique_id && - ktrace_state == KTRACE_STATE_FG) - { + ktrace_state == KTRACE_STATE_FG) { return EBUSY; } @@ -370,7 +368,7 @@ ktrace_kernel_configure(uint32_t config_mask) ktrace_release_ownership(); strlcpy(ktrace_last_owner_execname, "kernel_task", - sizeof(ktrace_last_owner_execname)); + sizeof(ktrace_last_owner_execname)); } static errno_t @@ -483,7 +481,7 @@ ktrace_set_owning_proc(proc_t p) ktrace_owning_unique_id = proc_uniqueid(p); ktrace_owning_pid = proc_pid(p); strlcpy(ktrace_last_owner_execname, proc_name_address(p), - sizeof(ktrace_last_owner_execname)); + sizeof(ktrace_last_owner_execname)); } static void @@ -500,24 +498,24 @@ static int ktrace_sysctl SYSCTL_HANDLER_ARGS; SYSCTL_NODE(, OID_AUTO, ktrace, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "ktrace"); SYSCTL_UINT(_ktrace, OID_AUTO, state, CTLFLAG_RD | CTLFLAG_LOCKED, - &ktrace_state, 0, - ""); + &ktrace_state, 0, + ""); SYSCTL_INT(_ktrace, OID_AUTO, owning_pid, CTLFLAG_RD | CTLFLAG_LOCKED, - &ktrace_owning_pid, 0, - "pid of the process that owns ktrace"); + &ktrace_owning_pid, 0, + "pid of the process that owns ktrace"); SYSCTL_INT(_ktrace, OID_AUTO, background_pid, CTLFLAG_RD | CTLFLAG_LOCKED, - &ktrace_bg_pid, 0, - "pid of the background ktrace tool"); + &ktrace_bg_pid, 0, + "pid of the background ktrace tool"); SYSCTL_STRING(_ktrace, OID_AUTO, configured_by, CTLFLAG_RD | CTLFLAG_LOCKED, - ktrace_last_owner_execname, 0, - "execname of process that last configured ktrace"); + ktrace_last_owner_execname, 0, + "execname of process that last configured ktrace"); SYSCTL_PROC(_ktrace, OID_AUTO, init_background, CTLFLAG_RW | CTLFLAG_LOCKED, - (void *)SYSCTL_INIT_BACKGROUND, sizeof(int), - ktrace_sysctl, "I", "initialize calling process as background"); + (void *)SYSCTL_INIT_BACKGROUND, sizeof(int), + ktrace_sysctl, "I", "initialize calling process as background"); static int ktrace_sysctl SYSCTL_HANDLER_ARGS diff --git a/bsd/kern/kern_lockf.c b/bsd/kern/kern_lockf.c index 32ab96fd7..92ecc8164 100644 --- a/bsd/kern/kern_lockf.c +++ b/bsd/kern/kern_lockf.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -87,7 +87,7 @@ static int maxlockdepth = MAXDEPTH; #if (DEVELOPMENT || DEBUG) -#define LOCKF_DEBUGGING 1 +#define LOCKF_DEBUGGING 1 #endif #ifdef LOCKF_DEBUGGING @@ -95,34 +95,34 @@ static int maxlockdepth = MAXDEPTH; void lf_print(const char *tag, struct lockf *lock); void lf_printlist(const char *tag, struct lockf *lock); -#define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */ -#define LF_DBG_LIST (1 << 1) /* split, coalesce */ -#define LF_DBG_IMPINH (1 << 2) /* importance inheritance */ -#define LF_DBG_TRACE (1 << 3) /* errors, exit */ +#define LF_DBG_LOCKOP (1 << 0) /* setlk, getlk, clearlk */ +#define LF_DBG_LIST (1 << 1) /* split, coalesce */ +#define LF_DBG_IMPINH (1 << 2) /* importance inheritance */ +#define LF_DBG_TRACE (1 << 3) /* errors, exit */ -static int lockf_debug = 0; /* was 2, could be 3 ;-) */ +static int lockf_debug = 0; /* was 2, could be 3 ;-) */ SYSCTL_INT(_debug, OID_AUTO, lockf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lockf_debug, 0, ""); /* * If there is no mask bit selector, or there is one, and the selector is * set, then output the debugging diagnostic. */ -#define LOCKF_DEBUG(mask, ...) \ - do { \ - if( !(mask) || ((mask) & lockf_debug)) { \ - printf(__VA_ARGS__); \ - } \ +#define LOCKF_DEBUG(mask, ...) \ + do { \ + if( !(mask) || ((mask) & lockf_debug)) { \ + printf(__VA_ARGS__); \ + } \ } while(0) -#else /* !LOCKF_DEBUGGING */ -#define LOCKF_DEBUG(mask, ...) /* mask */ -#endif /* !LOCKF_DEBUGGING */ +#else /* !LOCKF_DEBUGGING */ +#define LOCKF_DEBUG(mask, ...) /* mask */ +#endif /* !LOCKF_DEBUGGING */ MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); #define NOLOCKF (struct lockf *)0 -#define SELF 0x1 -#define OTHERS 0x2 -#define OFF_MAX 0x7fffffffffffffffULL /* max off_t */ +#define SELF 0x1 +#define OTHERS 0x2 +#define OFF_MAX 0x7fffffffffffffffULL /* max off_t */ /* * Overlapping lock states @@ -136,20 +136,20 @@ typedef enum { OVERLAP_ENDS_AFTER_LOCK } overlap_t; -static int lf_clearlock(struct lockf *); +static int lf_clearlock(struct lockf *); static overlap_t lf_findoverlap(struct lockf *, - struct lockf *, int, struct lockf ***, struct lockf **); + struct lockf *, int, struct lockf ***, struct lockf **); static struct lockf *lf_getblock(struct lockf *, pid_t); -static int lf_getlock(struct lockf *, struct flock *, pid_t); -static int lf_setlock(struct lockf *, struct timespec *); -static int lf_split(struct lockf *, struct lockf *); -static void lf_wakelock(struct lockf *, boolean_t); +static int lf_getlock(struct lockf *, struct flock *, pid_t); +static int lf_setlock(struct lockf *, struct timespec *); +static int lf_split(struct lockf *, struct lockf *); +static void lf_wakelock(struct lockf *, boolean_t); #if IMPORTANCE_INHERITANCE -static void lf_hold_assertion(task_t, struct lockf *); -static void lf_jump_to_queue_head(struct lockf *, struct lockf *); -static void lf_drop_assertion(struct lockf *); -static void lf_boost_blocking_proc(struct lockf *, struct lockf *); -static void lf_adjust_assertion(struct lockf *block); +static void lf_hold_assertion(task_t, struct lockf *); +static void lf_jump_to_queue_head(struct lockf *, struct lockf *); +static void lf_drop_assertion(struct lockf *); +static void lf_boost_blocking_proc(struct lockf *, struct lockf *); +static void lf_adjust_assertion(struct lockf *block); #endif /* IMPORTANCE_INHERITANCE */ /* @@ -200,7 +200,7 @@ lf_advlock(struct vnop_advlock_args *ap) LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: '%s' unlock without lock\n", vfs_context_proc(context)->p_comm); - return (0); + return 0; } } @@ -208,7 +208,6 @@ lf_advlock(struct vnop_advlock_args *ap) * Convert the flock structure into a start and end. */ switch (fl->l_whence) { - case SEEK_SET: case SEEK_CUR: /* @@ -229,46 +228,47 @@ lf_advlock(struct vnop_advlock_args *ap) if ((error = vnode_size(vp, (off_t *)&size, context))) { LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: vnode_getattr failed: %d\n", error); - return (error); + return error; } if (size > OFF_MAX || (fl->l_start > 0 && - size > (u_quad_t)(OFF_MAX - fl->l_start))) - return (EOVERFLOW); + size > (u_quad_t)(OFF_MAX - fl->l_start))) { + return EOVERFLOW; + } start = size + fl->l_start; break; default: LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: unknown whence %d\n", fl->l_whence); - return (EINVAL); + return EINVAL; } if (start < 0) { LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: start < 0 (%qd)\n", start); - return (EINVAL); + return EINVAL; } if (fl->l_len < 0) { if (start == 0) { LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: len < 0 & start == 0\n"); - return (EINVAL); + return EINVAL; } end = start - 1; start += fl->l_len; if (start < 0) { LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: start < 0 (%qd)\n", start); - return (EINVAL); + return EINVAL; } - } else if (fl->l_len == 0) + } else if (fl->l_len == 0) { end = -1; - else { + } else { oadd = fl->l_len - 1; if (oadd > (off_t)(OFF_MAX - start)) { - LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: overflow\n"); - return (EOVERFLOW); + LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: overflow\n"); + return EOVERFLOW; } end = start + oadd; } @@ -276,8 +276,9 @@ lf_advlock(struct vnop_advlock_args *ap) * Create the lockf structure */ MALLOC(lock, struct lockf *, sizeof *lock, M_LOCKF, M_WAITOK); - if (lock == NULL) - return (ENOLCK); + if (lock == NULL) { + return ENOLCK; + } lock->lf_start = start; lock->lf_end = end; lock->lf_id = ap->a_id; @@ -290,19 +291,21 @@ lf_advlock(struct vnop_advlock_args *ap) #if IMPORTANCE_INHERITANCE lock->lf_boosted = LF_NOT_BOOSTED; #endif - if (ap->a_flags & F_POSIX) + if (ap->a_flags & F_POSIX) { lock->lf_owner = (struct proc *)lock->lf_id; - else + } else { lock->lf_owner = NULL; + } - if (ap->a_flags & F_FLOCK) - lock->lf_flags |= F_WAKE1_SAFE; + if (ap->a_flags & F_FLOCK) { + lock->lf_flags |= F_WAKE1_SAFE; + } - lck_mtx_lock(&vp->v_lock); /* protect the lockf list */ + lck_mtx_lock(&vp->v_lock); /* protect the lockf list */ /* * Do the requested operation. */ - switch(ap->a_op) { + switch (ap->a_op) { case F_SETLK: /* * For F_OFD_* locks, lf_id is the fileglob. @@ -314,8 +317,9 @@ lf_advlock(struct vnop_advlock_args *ap) */ if (ap->a_flags & F_OFD_LOCK) { struct fileglob *fg = (void *)lock->lf_id; - if (fg->fg_lflags & FG_CONFINED) + if (fg->fg_lflags & FG_CONFINED) { lock->lf_owner = current_proc(); + } } error = lf_setlock(lock, ap->a_timeout); break; @@ -340,10 +344,10 @@ lf_advlock(struct vnop_advlock_args *ap) error = EINVAL; break; } - lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */ + lck_mtx_unlock(&vp->v_lock); /* done manipulating the list */ LOCKF_DEBUG(LF_DBG_TRACE, "lf_advlock: normal exit: %d\n", error); - return (error); + return error; } /* @@ -357,8 +361,9 @@ lf_abort_advlocks(vnode_t vp) { struct lockf *lock; - if ((lock = vp->v_lockf) == NULL) - return; + if ((lock = vp->v_lockf) == NULL) { + return; + } lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); @@ -422,7 +427,7 @@ lf_coalesce_adjacent(struct lockf *lock) } /* - * NOTE: Assumes that if two locks are adjacent on the number line + * NOTE: Assumes that if two locks are adjacent on the number line * and belong to the same owner, then they are adjacent on the list. */ if ((*lf)->lf_end != -1 && @@ -472,7 +477,7 @@ lf_coalesce_adjacent(struct lockf *lock) * set is unsuccessful. * * timeout Timeout specified in the case of - * SETLKWTIMEOUT. + * SETLKWTIMEOUT. * * Returns: 0 Success * EAGAIN @@ -508,8 +513,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) * Set the priority */ priority = PLOCK; - if (lock->lf_type == F_WRLCK) + if (lock->lf_type == F_WRLCK) { priority += 4; + } priority |= PCATCH; /* * Scan lock list for this file looking for locks that would block us. @@ -521,7 +527,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) if ((lock->lf_flags & F_WAIT) == 0) { DTRACE_FSINFO(advlock__nowait, vnode_t, vp); FREE(lock, M_LOCKF); - return (EAGAIN); + return EAGAIN; } /* @@ -571,8 +577,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) * proc_lock). */ waitblock = waitblock->lf_next; - if (waitblock == NULL) + if (waitblock == NULL) { break; + } /* * Make sure it's an advisory range @@ -580,8 +587,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) * if we mix lock types, it's our own * fault. */ - if ((waitblock->lf_flags & F_POSIX) == 0) + if ((waitblock->lf_flags & F_POSIX) == 0) { break; + } /* * If the owner of the lock that's @@ -593,7 +601,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) if (bproc == lock->lf_owner) { proc_unlock(wproc); FREE(lock, M_LOCKF); - return (EDEADLK); + return EDEADLK; } } } @@ -610,7 +618,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) lock->lf_type = F_UNLCK; if ((error = lf_clearlock(lock)) != 0) { FREE(lock, M_LOCKF); - return (error); + return error; } lock->lf_type = F_WRLCK; } @@ -621,8 +629,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) lock->lf_next = block; TAILQ_INSERT_TAIL(&block->lf_blkhd, lock, lf_block); - if ( !(lock->lf_flags & F_FLOCK)) - block->lf_flags &= ~F_WAKE1_SAFE; + if (!(lock->lf_flags & F_FLOCK)) { + block->lf_flags &= ~F_WAKE1_SAFE; + } #if IMPORTANCE_INHERITANCE /* @@ -639,12 +648,13 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) * created them, and thus have an "owner", in which case * we also attempt importance donation. */ - if ((lock->lf_flags & block->lf_flags & F_POSIX) != 0) + if ((lock->lf_flags & block->lf_flags & F_POSIX) != 0) { lf_boost_blocking_proc(lock, block); - else if ((lock->lf_flags & block->lf_flags & F_OFD_LOCK) && + } else if ((lock->lf_flags & block->lf_flags & F_OFD_LOCK) && lock->lf_owner != block->lf_owner && - NULL != lock->lf_owner && NULL != block->lf_owner) + NULL != lock->lf_owner && NULL != block->lf_owner) { lf_boost_blocking_proc(lock, block); + } #endif /* IMPORTANCE_INHERITANCE */ #ifdef LOCKF_DEBUGGING @@ -657,8 +667,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) error = msleep(lock, &vp->v_lock, priority, lockstr, timeout); - if (error == 0 && (lock->lf_flags & F_ABORT) != 0) + if (error == 0 && (lock->lf_flags & F_ABORT) != 0) { error = EBADF; + } if (lock->lf_next) { /* @@ -689,19 +700,21 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) } if (!TAILQ_EMPTY(&lock->lf_blkhd)) { - if ((block = lf_getblock(lock, -1)) != NULL) + if ((block = lf_getblock(lock, -1)) != NULL) { lf_move_blocked(block, lock); + } } if (error) { - if (!TAILQ_EMPTY(&lock->lf_blkhd)) - lf_wakelock(lock, TRUE); + if (!TAILQ_EMPTY(&lock->lf_blkhd)) { + lf_wakelock(lock, TRUE); + } FREE(lock, M_LOCKF); /* Return ETIMEDOUT if timeout occoured. */ if (error == EWOULDBLOCK) { error = ETIMEDOUT; } - return (error); + return error; } } @@ -718,8 +731,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) needtolink = 1; for (;;) { ovcase = lf_findoverlap(block, lock, SELF, &prev, &overlap); - if (ovcase) + if (ovcase) { block = overlap->lf_next; + } /* * Six cases: * 0) no overlap @@ -743,8 +757,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) * able to acquire it. */ if (lock->lf_type == F_RDLCK && - overlap->lf_type == F_WRLCK) - lf_wakelock(overlap, TRUE); + overlap->lf_type == F_WRLCK) { + lf_wakelock(overlap, TRUE); + } overlap->lf_type = lock->lf_type; FREE(lock, M_LOCKF); lock = overlap; /* for lf_coalesce_adjacent() */ @@ -771,7 +786,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) */ if (lf_split(overlap, lock)) { FREE(lock, M_LOCKF); - return (ENOLCK); + return ENOLCK; } } lf_wakelock(overlap, TRUE); @@ -784,7 +799,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) */ if (lock->lf_type == F_RDLCK && overlap->lf_type == F_WRLCK) { - lf_wakelock(overlap, TRUE); + lf_wakelock(overlap, TRUE); } else { while (!TAILQ_EMPTY(&overlap->lf_blkhd)) { ltmp = TAILQ_FIRST(&overlap->lf_blkhd); @@ -803,8 +818,9 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) lock->lf_next = overlap->lf_next; prev = &lock->lf_next; needtolink = 0; - } else + } else { *prev = overlap->lf_next; + } FREE(overlap, M_LOCKF); continue; @@ -842,7 +858,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout) lf_printlist("lf_setlock(out)", lock); } #endif /* LOCKF_DEBUGGING */ - return (0); + return 0; } @@ -870,20 +886,23 @@ lf_clearlock(struct lockf *unlock) struct lockf *overlap, **prev; overlap_t ovcase; - if (lf == NOLOCKF) - return (0); + if (lf == NOLOCKF) { + return 0; + } #ifdef LOCKF_DEBUGGING - if (unlock->lf_type != F_UNLCK) + if (unlock->lf_type != F_UNLCK) { panic("lf_clearlock: bad type"); - if (lockf_debug & LF_DBG_LOCKOP) + } + if (lockf_debug & LF_DBG_LOCKOP) { lf_print("lf_clearlock", unlock); + } #endif /* LOCKF_DEBUGGING */ prev = head; while ((ovcase = lf_findoverlap(lf, unlock, SELF, &prev, &overlap)) != OVERLAP_NONE) { /* * Wakeup the list of locks to be retried. */ - lf_wakelock(overlap, FALSE); + lf_wakelock(overlap, FALSE); #if IMPORTANCE_INHERITANCE if (overlap->lf_boosted == LF_BOOSTED) { lf_drop_assertion(overlap); @@ -891,7 +910,7 @@ lf_clearlock(struct lockf *unlock) #endif /* IMPORTANCE_INHERITANCE */ switch (ovcase) { - case OVERLAP_NONE: /* satisfy compiler enum/switch */ + case OVERLAP_NONE: /* satisfy compiler enum/switch */ break; case OVERLAP_EQUALS_LOCK: @@ -908,8 +927,9 @@ lf_clearlock(struct lockf *unlock) * If we can't split the lock, we can't grant it. * Claim a system limit for the resource shortage. */ - if (lf_split(overlap, unlock)) - return (ENOLCK); + if (lf_split(overlap, unlock)) { + return ENOLCK; + } overlap->lf_next = unlock->lf_next; break; @@ -932,10 +952,11 @@ lf_clearlock(struct lockf *unlock) break; } #ifdef LOCKF_DEBUGGING - if (lockf_debug & LF_DBG_LOCKOP) + if (lockf_debug & LF_DBG_LOCKOP) { lf_printlist("lf_clearlock", unlock); + } #endif /* LOCKF_DEBUGGING */ - return (0); + return 0; } @@ -967,30 +988,33 @@ lf_getlock(struct lockf *lock, struct flock *fl, pid_t matchpid) struct lockf *block; #ifdef LOCKF_DEBUGGING - if (lockf_debug & LF_DBG_LOCKOP) + if (lockf_debug & LF_DBG_LOCKOP) { lf_print("lf_getlock", lock); + } #endif /* LOCKF_DEBUGGING */ if ((block = lf_getblock(lock, matchpid))) { fl->l_type = block->lf_type; fl->l_whence = SEEK_SET; fl->l_start = block->lf_start; - if (block->lf_end == -1) + if (block->lf_end == -1) { fl->l_len = 0; - else + } else { fl->l_len = block->lf_end - block->lf_start + 1; + } if (NULL != block->lf_owner) { /* * lf_owner is only non-NULL when the lock * "owner" can be unambiguously determined */ fl->l_pid = proc_pid(block->lf_owner); - } else + } else { fl->l_pid = -1; + } } else { fl->l_type = F_UNLCK; } - return (0); + return 0; } /* @@ -1025,17 +1049,19 @@ lf_getblock(struct lockf *lock, pid_t matchpid) * but the pid doesn't match, then keep on looking .. */ if (matchpid != -1 && - (overlap->lf_flags & (F_POSIX|F_OFD_LOCK)) != 0 && - proc_pid(overlap->lf_owner) != matchpid) + (overlap->lf_flags & (F_POSIX | F_OFD_LOCK)) != 0 && + proc_pid(overlap->lf_owner) != matchpid) { continue; + } /* * does it block us? */ - if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) - return (overlap); + if ((lock->lf_type == F_WRLCK || overlap->lf_type == F_WRLCK)) { + return overlap; + } } - return (NOLOCKF); + return NOLOCKF; } @@ -1085,30 +1111,32 @@ lf_getblock(struct lockf *lock, pid_t matchpid) */ static overlap_t lf_findoverlap(struct lockf *lf, struct lockf *lock, int type, - struct lockf ***prev, struct lockf **overlap) + struct lockf ***prev, struct lockf **overlap) { off_t start, end; int found_self = 0; *overlap = lf; - if (lf == NOLOCKF) - return (0); + if (lf == NOLOCKF) { + return 0; + } #ifdef LOCKF_DEBUGGING - if (lockf_debug & LF_DBG_LIST) + if (lockf_debug & LF_DBG_LIST) { lf_print("lf_findoverlap: looking for overlap in", lock); + } #endif /* LOCKF_DEBUGGING */ start = lock->lf_start; end = lock->lf_end; while (lf != NOLOCKF) { if (((type & SELF) && lf->lf_id != lock->lf_id) || ((type & OTHERS) && lf->lf_id == lock->lf_id)) { - /* + /* * Locks belonging to one process are adjacent on the * list, so if we've found any locks belonging to us, * and we're now seeing something else, then we've * examined all "self" locks. Note that bailing out - * here is quite important; for coalescing, we assume - * numerically adjacent locks from the same owner to + * here is quite important; for coalescing, we assume + * numerically adjacent locks from the same owner to * be adjacent on the list. */ if ((type & SELF) && found_self) { @@ -1125,8 +1153,9 @@ lf_findoverlap(struct lockf *lf, struct lockf *lock, int type, } #ifdef LOCKF_DEBUGGING - if (lockf_debug & LF_DBG_LIST) + if (lockf_debug & LF_DBG_LIST) { lf_print("\tchecking", lf); + } #endif /* LOCKF_DEBUGGING */ /* * OK, check for overlap @@ -1137,45 +1166,46 @@ lf_findoverlap(struct lockf *lf, struct lockf *lock, int type, LOCKF_DEBUG(LF_DBG_LIST, "no overlap\n"); /* - * NOTE: assumes that locks for the same process are + * NOTE: assumes that locks for the same process are * nonintersecting and ordered. */ - if ((type & SELF) && end != -1 && lf->lf_start > end) - return (OVERLAP_NONE); + if ((type & SELF) && end != -1 && lf->lf_start > end) { + return OVERLAP_NONE; + } *prev = &lf->lf_next; *overlap = lf = lf->lf_next; continue; } if ((lf->lf_start == start) && (lf->lf_end == end)) { LOCKF_DEBUG(LF_DBG_LIST, "overlap == lock\n"); - return (OVERLAP_EQUALS_LOCK); + return OVERLAP_EQUALS_LOCK; } if ((lf->lf_start <= start) && (end != -1) && ((lf->lf_end >= end) || (lf->lf_end == -1))) { LOCKF_DEBUG(LF_DBG_LIST, "overlap contains lock\n"); - return (OVERLAP_CONTAINS_LOCK); + return OVERLAP_CONTAINS_LOCK; } if (start <= lf->lf_start && - (end == -1 || - (lf->lf_end != -1 && end >= lf->lf_end))) { + (end == -1 || + (lf->lf_end != -1 && end >= lf->lf_end))) { LOCKF_DEBUG(LF_DBG_LIST, "lock contains overlap\n"); - return (OVERLAP_CONTAINED_BY_LOCK); + return OVERLAP_CONTAINED_BY_LOCK; } if ((lf->lf_start < start) && - ((lf->lf_end >= start) || (lf->lf_end == -1))) { + ((lf->lf_end >= start) || (lf->lf_end == -1))) { LOCKF_DEBUG(LF_DBG_LIST, "overlap starts before lock\n"); - return (OVERLAP_STARTS_BEFORE_LOCK); + return OVERLAP_STARTS_BEFORE_LOCK; } if ((lf->lf_start > start) && - (end != -1) && - ((lf->lf_end > end) || (lf->lf_end == -1))) { + (end != -1) && + ((lf->lf_end > end) || (lf->lf_end == -1))) { LOCKF_DEBUG(LF_DBG_LIST, "overlap ends after lock\n"); - return (OVERLAP_ENDS_AFTER_LOCK); + return OVERLAP_ENDS_AFTER_LOCK; } panic("lf_findoverlap: default"); } - return (OVERLAP_NONE); + return OVERLAP_NONE; } @@ -1219,21 +1249,22 @@ lf_split(struct lockf *lock1, struct lockf *lock2) if (lock1->lf_start == lock2->lf_start) { lock1->lf_start = lock2->lf_end + 1; lock2->lf_next = lock1; - return (0); + return 0; } if (lock1->lf_end == lock2->lf_end) { lock1->lf_end = lock2->lf_start - 1; lock2->lf_next = lock1->lf_next; lock1->lf_next = lock2; - return (0); + return 0; } /* * Make a new lock consisting of the last part of * the encompassing lock */ MALLOC(splitlock, struct lockf *, sizeof *splitlock, M_LOCKF, M_WAITOK); - if (splitlock == NULL) - return (ENOLCK); + if (splitlock == NULL) { + return ENOLCK; + } bcopy(lock1, splitlock, sizeof *splitlock); splitlock->lf_start = lock2->lf_end + 1; TAILQ_INIT(&splitlock->lf_blkhd); @@ -1245,7 +1276,7 @@ lf_split(struct lockf *lock1, struct lockf *lock2) lock2->lf_next = splitlock; lock1->lf_next = lock2; - return (0); + return 0; } @@ -1273,8 +1304,9 @@ lf_wakelock(struct lockf *listhead, boolean_t force_all) struct lockf *wakelock; boolean_t wake_all = TRUE; - if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE)) - wake_all = FALSE; + if (force_all == FALSE && (listhead->lf_flags & F_WAKE1_SAFE)) { + wake_all = FALSE; + } while (!TAILQ_EMPTY(&listhead->lf_blkhd)) { wakelock = TAILQ_FIRST(&listhead->lf_blkhd); @@ -1282,8 +1314,9 @@ lf_wakelock(struct lockf *listhead, boolean_t force_all) wakelock->lf_next = NOLOCKF; #ifdef LOCKF_DEBUGGING - if (lockf_debug & LF_DBG_LOCKOP) + if (lockf_debug & LF_DBG_LOCKOP) { lf_print("lf_wakelock: awakening", wakelock); + } #endif /* LOCKF_DEBUGGING */ if (wake_all == FALSE) { /* @@ -1294,27 +1327,28 @@ lf_wakelock(struct lockf *listhead, boolean_t force_all) if (!TAILQ_EMPTY(&listhead->lf_blkhd)) { TAILQ_CONCAT(&wakelock->lf_blkhd, &listhead->lf_blkhd, lf_block); - struct lockf *tlock; + struct lockf *tlock; - TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) { + TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) { if (TAILQ_NEXT(tlock, lf_block) == tlock) { /* See rdar://10887303 */ panic("cycle in wakelock list"); } - tlock->lf_next = wakelock; + tlock->lf_next = wakelock; } } } wakeup(wakelock); - if (wake_all == FALSE) - break; + if (wake_all == FALSE) { + break; + } } } #ifdef LOCKF_DEBUGGING -#define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner)) +#define GET_LF_OWNER_PID(lf) (proc_pid((lf)->lf_owner)) /* * lf_print DEBUG @@ -1331,31 +1365,34 @@ void lf_print(const char *tag, struct lockf *lock) { printf("%s: lock %p for ", tag, (void *)lock); - if (lock->lf_flags & F_POSIX) + if (lock->lf_flags & F_POSIX) { printf("proc %p (owner %d)", lock->lf_id, GET_LF_OWNER_PID(lock)); - else if (lock->lf_flags & F_OFD_LOCK) + } else if (lock->lf_flags & F_OFD_LOCK) { printf("fg %p (owner %d)", lock->lf_id, GET_LF_OWNER_PID(lock)); - else + } else { printf("id %p", (void *)lock->lf_id); - if (lock->lf_vnode != 0) + } + if (lock->lf_vnode != 0) { printf(" in vno %p, %s, start 0x%016llx, end 0x%016llx", lock->lf_vnode, lock->lf_type == F_RDLCK ? "shared" : lock->lf_type == F_WRLCK ? "exclusive" : lock->lf_type == F_UNLCK ? "unlock" : "unknown", (intmax_t)lock->lf_start, (intmax_t)lock->lf_end); - else + } else { printf(" %s, start 0x%016llx, end 0x%016llx", lock->lf_type == F_RDLCK ? "shared" : lock->lf_type == F_WRLCK ? "exclusive" : lock->lf_type == F_UNLCK ? "unlock" : "unknown", (intmax_t)lock->lf_start, (intmax_t)lock->lf_end); - if (!TAILQ_EMPTY(&lock->lf_blkhd)) + } + if (!TAILQ_EMPTY(&lock->lf_blkhd)) { printf(" block %p\n", (void *)TAILQ_FIRST(&lock->lf_blkhd)); - else + } else { printf("\n"); + } } @@ -1376,21 +1413,23 @@ lf_printlist(const char *tag, struct lockf *lock) { struct lockf *lf, *blk; - if (lock->lf_vnode == 0) + if (lock->lf_vnode == 0) { return; + } printf("%s: Lock list for vno %p:\n", tag, lock->lf_vnode); for (lf = lock->lf_vnode->v_lockf; lf; lf = lf->lf_next) { - printf("\tlock %p for ",(void *)lf); - if (lf->lf_flags & F_POSIX) + printf("\tlock %p for ", (void *)lf); + if (lf->lf_flags & F_POSIX) { printf("proc %p (owner %d)", lf->lf_id, GET_LF_OWNER_PID(lf)); - else if (lf->lf_flags & F_OFD_LOCK) + } else if (lf->lf_flags & F_OFD_LOCK) { printf("fg %p (owner %d)", lf->lf_id, GET_LF_OWNER_PID(lf)); - else + } else { printf("id %p", (void *)lf->lf_id); + } printf(", %s, start 0x%016llx, end 0x%016llx", lf->lf_type == F_RDLCK ? "shared" : lf->lf_type == F_WRLCK ? "exclusive" : @@ -1398,22 +1437,24 @@ lf_printlist(const char *tag, struct lockf *lock) "unknown", (intmax_t)lf->lf_start, (intmax_t)lf->lf_end); TAILQ_FOREACH(blk, &lf->lf_blkhd, lf_block) { printf("\n\t\tlock request %p for ", (void *)blk); - if (blk->lf_flags & F_POSIX) + if (blk->lf_flags & F_POSIX) { printf("proc %p (owner %d)", blk->lf_id, GET_LF_OWNER_PID(blk)); - else if (blk->lf_flags & F_OFD_LOCK) + } else if (blk->lf_flags & F_OFD_LOCK) { printf("fg %p (owner %d)", blk->lf_id, GET_LF_OWNER_PID(blk)); - else + } else { printf("id %p", (void *)blk->lf_id); + } printf(", %s, start 0x%016llx, end 0x%016llx", blk->lf_type == F_RDLCK ? "shared" : blk->lf_type == F_WRLCK ? "exclusive" : blk->lf_type == F_UNLCK ? "unlock" : "unknown", (intmax_t)blk->lf_start, (intmax_t)blk->lf_end); - if (!TAILQ_EMPTY(&blk->lf_blkhd)) + if (!TAILQ_EMPTY(&blk->lf_blkhd)) { panic("lf_printlist: bad list"); + } } printf("\n"); } @@ -1427,20 +1468,20 @@ lf_printlist(const char *tag, struct lockf *lock) * * Call task importance hold assertion on the owner of the lock. * - * Parameters: block_task Owner of the lock blocking + * Parameters: block_task Owner of the lock blocking * current thread. * - * block lock on which the current thread + * block lock on which the current thread * is blocking on. * * Returns: * - * Notes: The task reference on block_task is not needed to be hold since - * the current thread has vnode lock and block_task has a file - * lock, thus removing file lock in exit requires block_task to + * Notes: The task reference on block_task is not needed to be hold since + * the current thread has vnode lock and block_task has a file + * lock, thus removing file lock in exit requires block_task to * grab the vnode lock. */ -static void +static void lf_hold_assertion(task_t block_task, struct lockf *block) { if (task_importance_hold_file_lock_assertion(block_task, 1) == 0) { @@ -1458,7 +1499,7 @@ lf_hold_assertion(task_t block_task, struct lockf *block) * Jump the lock from the tail of the block queue to the head of * the queue. * - * Parameters: block lockf struct containing the + * Parameters: block lockf struct containing the * block queue. * lock lockf struct to be jumped to the * front. @@ -1466,7 +1507,7 @@ lf_hold_assertion(task_t block_task, struct lockf *block) * Returns: */ static void -lf_jump_to_queue_head(struct lockf *block, struct lockf *lock) +lf_jump_to_queue_head(struct lockf *block, struct lockf *lock) { /* Move the lock to the head of the block queue. */ TAILQ_REMOVE(&block->lf_blkhd, lock, lf_block); @@ -1483,7 +1524,7 @@ lf_jump_to_queue_head(struct lockf *block, struct lockf *lock) * * Returns: */ -static void +static void lf_drop_assertion(struct lockf *block) { LOCKF_DEBUG(LF_DBG_IMPINH, "lf: %d: dropping assertion for lock %p\n", @@ -1520,9 +1561,8 @@ lf_adjust_assertion(struct lockf *block) /* Check if block and next are same type of locks */ if (((block->lf_flags & next->lf_flags & F_POSIX) != 0) || ((block->lf_flags & next->lf_flags & F_OFD_LOCK) && - (block->lf_owner != next->lf_owner) && - (NULL != block->lf_owner && NULL != next->lf_owner))) { - + (block->lf_owner != next->lf_owner) && + (NULL != block->lf_owner && NULL != next->lf_owner))) { /* Check if next would be boosting block */ if (task_is_importance_donor(proc_task(next->lf_owner)) && task_is_importance_receiver_type(proc_task(block->lf_owner))) { diff --git a/bsd/kern/kern_malloc.c b/bsd/kern/kern_malloc.c index 565839738..51fbadb8b 100644 --- a/bsd/kern/kern_malloc.c +++ b/bsd/kern/kern_malloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -114,7 +114,7 @@ void kmeminit(void); /* Strings corresponding to types of memory. - * Must be in synch with the #defines is sys/malloc.h + * Must be in synch with the #defines is sys/malloc.h * NOTE - the reason we pass null strings in some cases is to reduce of foot * print as much as possible for systems where a tiny kernel is needed. * todo - We should probably redesign this and use enums for our types and only @@ -123,141 +123,141 @@ void kmeminit(void); * (for example see types M_UDFMNT, M_TEMP, etc in sys/malloc.h) */ const char *memname[] = { - "free", /* 0 M_FREE */ - "mbuf", /* 1 M_MBUF */ - "devbuf", /* 2 M_DEVBUF */ - "socket", /* 3 M_SOCKET */ - "pcb", /* 4 M_PCB */ - "routetbl", /* 5 M_RTABLE */ - "hosttbl", /* 6 M_HTABLE */ - "fragtbl", /* 7 M_FTABLE */ - "zombie", /* 8 M_ZOMBIE */ - "ifaddr", /* 9 M_IFADDR */ - "soopts", /* 10 M_SOOPTS */ - "soname", /* 11 M_SONAME */ - "namei", /* 12 M_NAMEI */ - "gprof", /* 13 M_GPROF */ - "ioctlops", /* 14 M_IOCTLOPS */ - "mapmem", /* 15 M_MAPMEM */ - "cred", /* 16 M_CRED */ - "pgrp", /* 17 M_PGRP */ - "session", /* 18 M_SESSION */ - "iov32", /* 19 M_IOV32 */ - "mount", /* 20 M_MOUNT */ - "fhandle", /* 21 M_FHANDLE */ + "free", /* 0 M_FREE */ + "mbuf", /* 1 M_MBUF */ + "devbuf", /* 2 M_DEVBUF */ + "socket", /* 3 M_SOCKET */ + "pcb", /* 4 M_PCB */ + "routetbl", /* 5 M_RTABLE */ + "hosttbl", /* 6 M_HTABLE */ + "fragtbl", /* 7 M_FTABLE */ + "zombie", /* 8 M_ZOMBIE */ + "ifaddr", /* 9 M_IFADDR */ + "soopts", /* 10 M_SOOPTS */ + "soname", /* 11 M_SONAME */ + "namei", /* 12 M_NAMEI */ + "gprof", /* 13 M_GPROF */ + "ioctlops", /* 14 M_IOCTLOPS */ + "mapmem", /* 15 M_MAPMEM */ + "cred", /* 16 M_CRED */ + "pgrp", /* 17 M_PGRP */ + "session", /* 18 M_SESSION */ + "iov32", /* 19 M_IOV32 */ + "mount", /* 20 M_MOUNT */ + "fhandle", /* 21 M_FHANDLE */ #if (NFSCLIENT || NFSSERVER) - "NFS req", /* 22 M_NFSREQ */ - "NFS mount", /* 23 M_NFSMNT */ - "NFS node", /* 24 M_NFSNODE */ + "NFS req", /* 22 M_NFSREQ */ + "NFS mount", /* 23 M_NFSMNT */ + "NFS node", /* 24 M_NFSNODE */ #else - "", /* 22 M_NFSREQ */ - "", /* 23 M_NFSMNT */ - "", /* 24 M_NFSNODE */ + "", /* 22 M_NFSREQ */ + "", /* 23 M_NFSMNT */ + "", /* 24 M_NFSNODE */ #endif - "vnodes", /* 25 M_VNODE */ - "namecache", /* 26 M_CACHE */ + "vnodes", /* 25 M_VNODE */ + "namecache", /* 26 M_CACHE */ #if QUOTA - "UFS quota", /* 27 M_DQUOT */ + "UFS quota", /* 27 M_DQUOT */ #else - "", /* 27 M_DQUOT */ + "", /* 27 M_DQUOT */ #endif - "proc uuid policy", /* 28 M_PROC_UUID_POLICY */ + "proc uuid policy", /* 28 M_PROC_UUID_POLICY */ #if (SYSV_SEM || SYSV_MSG || SYSV_SHM) - "shm", /* 29 M_SHM */ + "shm", /* 29 M_SHM */ #else - "", /* 29 M_SHM */ + "", /* 29 M_SHM */ #endif - "plimit", /* 30 M_VMMAP */ - "sigacts", /* 31 M_VMMAPENT */ - "VM object", /* 32 M_VMOBJ */ - "VM objhash", /* 33 M_VMOBJHASH */ - "VM pmap", /* 34 M_VMPMAP */ - "VM pvmap", /* 35 M_VMPVENT */ - "VM pager", /* 36 M_VMPAGER */ - "VM pgdata", /* 37 M_VMPGDATA */ - "fileproc", /* 38 M_FILEPROC */ - "file desc", /* 39 M_FILEDESC */ - "lockf", /* 40 M_LOCKF */ - "proc", /* 41 M_PROC */ - "pstats", /* 42 M_SUBPROC */ - "LFS segment", /* 43 M_SEGMENT */ - "LFS node", /* 44 M_LFSNODE */ - "", /* 45 M_FFSNODE */ - "MFS node", /* 46 M_MFSNODE */ - "NQNFS Lease", /* 47 M_NQLEASE */ - "NQNFS Host", /* 48 M_NQMHOST */ - "Export Host", /* 49 M_NETADDR */ + "plimit", /* 30 M_VMMAP */ + "sigacts", /* 31 M_VMMAPENT */ + "VM object", /* 32 M_VMOBJ */ + "VM objhash", /* 33 M_VMOBJHASH */ + "VM pmap", /* 34 M_VMPMAP */ + "VM pvmap", /* 35 M_VMPVENT */ + "VM pager", /* 36 M_VMPAGER */ + "VM pgdata", /* 37 M_VMPGDATA */ + "fileproc", /* 38 M_FILEPROC */ + "file desc", /* 39 M_FILEDESC */ + "lockf", /* 40 M_LOCKF */ + "proc", /* 41 M_PROC */ + "pstats", /* 42 M_SUBPROC */ + "LFS segment", /* 43 M_SEGMENT */ + "LFS node", /* 44 M_LFSNODE */ + "", /* 45 M_FFSNODE */ + "MFS node", /* 46 M_MFSNODE */ + "NQNFS Lease", /* 47 M_NQLEASE */ + "NQNFS Host", /* 48 M_NQMHOST */ + "Export Host", /* 49 M_NETADDR */ #if (NFSCLIENT || NFSSERVER) - "NFS srvsock", /* 50 M_NFSSVC */ - "NFS uid", /* 51 M_NFSUID */ - "NFS daemon", /* 52 M_NFSD */ + "NFS srvsock", /* 50 M_NFSSVC */ + "NFS uid", /* 51 M_NFSUID */ + "NFS daemon", /* 52 M_NFSD */ #else - "", /* 50 M_NFSSVC */ - "", /* 51 M_NFSUID */ - "", /* 52 M_NFSD */ + "", /* 50 M_NFSSVC */ + "", /* 51 M_NFSUID */ + "", /* 52 M_NFSD */ #endif - "ip_moptions", /* 53 M_IPMOPTS */ - "in_multi", /* 54 M_IPMADDR */ - "ether_multi", /* 55 M_IFMADDR */ - "mrt", /* 56 M_MRTABLE */ - "", /* 57 unused entry */ - "", /* 58 unused entry */ + "ip_moptions", /* 53 M_IPMOPTS */ + "in_multi", /* 54 M_IPMADDR */ + "ether_multi", /* 55 M_IFMADDR */ + "mrt", /* 56 M_MRTABLE */ + "", /* 57 unused entry */ + "", /* 58 unused entry */ #if (NFSCLIENT || NFSSERVER) - "NFSV3 srvdesc",/* 59 M_NFSRVDESC */ - "NFSV3 diroff", /* 60 M_NFSDIROFF */ - "NFSV3 bigfh", /* 61 M_NFSBIGFH */ + "NFSV3 srvdesc",/* 59 M_NFSRVDESC */ + "NFSV3 diroff", /* 60 M_NFSDIROFF */ + "NFSV3 bigfh", /* 61 M_NFSBIGFH */ #else - "", /* 59 M_NFSRVDESC */ - "", /* 60 M_NFSDIROFF */ - "", /* 61 M_NFSBIGFH */ + "", /* 59 M_NFSRVDESC */ + "", /* 60 M_NFSDIROFF */ + "", /* 61 M_NFSBIGFH */ #endif - "MSDOSFS mount",/* 62 M_MSDOSFSMNT */ - "MSDOSFS fat", /* 63 M_MSDOSFSFAT */ - "MSDOSFS node", /* 64 M_MSDOSFSNODE */ - "ttys", /* 65 M_TTYS */ - "exec", /* 66 M_EXEC */ - "miscfs mount", /* 67 M_MISCFSMNT */ - "miscfs node", /* 68 M_MISCFSNODE */ - "adosfs mount", /* 69 M_ADOSFSMNT */ - "adosfs node", /* 70 M_ADOSFSNODE */ - "adosfs anode", /* 71 M_ANODE */ - "buf hdrs", /* 72 M_BUFHDR */ - "ofile tabl", /* 73 M_OFILETABL */ - "mbuf clust", /* 74 M_MCLUST */ - "", /* 75 unused */ - "", /* 76 unused */ - "", /* 77 unused */ - "", /* 78 unused */ - "", /* 79 unused */ - "temp", /* 80 M_TEMP */ - "key mgmt", /* 81 M_SECA */ - "DEVFS", /* 82 M_DEVFS */ - "IpFw/IpAcct", /* 83 M_IPFW */ - "UDF node", /* 84 M_UDFNODE */ - "UDF mount", /* 85 M_UDFMNT */ + "MSDOSFS mount",/* 62 M_MSDOSFSMNT */ + "MSDOSFS fat", /* 63 M_MSDOSFSFAT */ + "MSDOSFS node", /* 64 M_MSDOSFSNODE */ + "ttys", /* 65 M_TTYS */ + "exec", /* 66 M_EXEC */ + "miscfs mount", /* 67 M_MISCFSMNT */ + "miscfs node", /* 68 M_MISCFSNODE */ + "adosfs mount", /* 69 M_ADOSFSMNT */ + "adosfs node", /* 70 M_ADOSFSNODE */ + "adosfs anode", /* 71 M_ANODE */ + "buf hdrs", /* 72 M_BUFHDR */ + "ofile tabl", /* 73 M_OFILETABL */ + "mbuf clust", /* 74 M_MCLUST */ + "", /* 75 unused */ + "", /* 76 unused */ + "", /* 77 unused */ + "", /* 78 unused */ + "", /* 79 unused */ + "temp", /* 80 M_TEMP */ + "key mgmt", /* 81 M_SECA */ + "DEVFS", /* 82 M_DEVFS */ + "IpFw/IpAcct", /* 83 M_IPFW */ + "UDF node", /* 84 M_UDFNODE */ + "UDF mount", /* 85 M_UDFMNT */ #if INET6 - "IPv6 NDP", /* 86 M_IP6NDP */ - "IPv6 options", /* 87 M_IP6OPT */ - "IPv6 Misc", /* 88 M_IP6MISC */ + "IPv6 NDP", /* 86 M_IP6NDP */ + "IPv6 options", /* 87 M_IP6OPT */ + "IPv6 Misc", /* 88 M_IP6MISC */ #else - "", /* 86 M_IP6NDP */ - "", /* 87 M_IP6OPT */ - "", /* 88 M_IP6MISC */ + "", /* 86 M_IP6NDP */ + "", /* 87 M_IP6OPT */ + "", /* 88 M_IP6MISC */ #endif "TCP Segment Q",/* 89 M_TSEGQ */ - "IGMP state", /* 90 M_IGMP */ - "", /* 91 unused */ - "", /* 92 unused */ - "specinfo", /* 93 M_SPECINFO */ - "kqueue", /* 94 M_KQUEUE */ - "", /* 95 unused */ - "cluster_read", /* 96 M_CLRDAHEAD */ - "cluster_write",/* 97 M_CLWRBEHIND */ - "iov64", /* 98 M_IOV64 */ - "fileglob", /* 99 M_FILEGLOB */ - "kauth", /* 100 M_KAUTH */ - "dummynet", /* 101 M_DUMMYNET */ - "", /* 102 M_UNSAFEFS */ + "IGMP state", /* 90 M_IGMP */ + "", /* 91 unused */ + "", /* 92 unused */ + "specinfo", /* 93 M_SPECINFO */ + "kqueue", /* 94 M_KQUEUE */ + "", /* 95 unused */ + "cluster_read", /* 96 M_CLRDAHEAD */ + "cluster_write",/* 97 M_CLWRBEHIND */ + "iov64", /* 98 M_IOV64 */ + "fileglob", /* 99 M_FILEGLOB */ + "kauth", /* 100 M_KAUTH */ + "dummynet", /* 101 M_DUMMYNET */ + "", /* 102 M_UNSAFEFS */ "macpipelabel", /* 103 M_MACPIPELABEL */ "mactemp", /* 104 M_MACTEMP */ "sbuf", /* 105 M_SBUF */ @@ -273,231 +273,231 @@ const char *memname[] = { #else "", /* 109 M_DECMPFS_CNODE */ #endif /* FS_COMPRESSION */ - "ipmfilter", /* 110 M_INMFILTER */ - "ipmsource", /* 111 M_IPMSOURCE */ - "in6mfilter", /* 112 M_IN6MFILTER */ - "ip6mopts", /* 113 M_IP6MOPTS */ - "ip6msource", /* 114 M_IP6MSOURCE */ + "ipmfilter", /* 110 M_INMFILTER */ + "ipmsource", /* 111 M_IPMSOURCE */ + "in6mfilter", /* 112 M_IN6MFILTER */ + "ip6mopts", /* 113 M_IP6MOPTS */ + "ip6msource", /* 114 M_IP6MSOURCE */ #if FLOW_DIVERT - "flow_divert_pcb", /* 115 M_FLOW_DIVERT_PCB */ - "flow_divert_group", /* 116 M_FLOW_DIVERT_GROUP */ + "flow_divert_pcb", /* 115 M_FLOW_DIVERT_PCB */ + "flow_divert_group", /* 116 M_FLOW_DIVERT_GROUP */ #else - "", /* 115 M_FLOW_DIVERT_PCB */ - "", /* 116 M_FLOW_DIVERT_GROUP */ + "", /* 115 M_FLOW_DIVERT_PCB */ + "", /* 116 M_FLOW_DIVERT_GROUP */ #endif - "ip6cga", /* 117 M_IP6CGA */ + "ip6cga", /* 117 M_IP6CGA */ #if NECP - "necp", /* 118 M_NECP */ - "necp_session_policy", /* 119 M_NECP_SESSION_POLICY */ - "necp_socket_policy", /* 120 M_NECP_SOCKET_POLICY */ - "necp_ip_policy", /* 121 M_NECP_IP_POLICY */ + "necp", /* 118 M_NECP */ + "necp_session_policy", /* 119 M_NECP_SESSION_POLICY */ + "necp_socket_policy", /* 120 M_NECP_SOCKET_POLICY */ + "necp_ip_policy", /* 121 M_NECP_IP_POLICY */ #else - "", /* 118 M_NECP */ - "", /* 119 M_NECP_SESSION_POLICY */ - "", /* 120 M_NECP_SOCKET_POLICY */ - "", /* 121 M_NECP_IP_POLICY */ + "", /* 118 M_NECP */ + "", /* 119 M_NECP_SESSION_POLICY */ + "", /* 120 M_NECP_SOCKET_POLICY */ + "", /* 121 M_NECP_IP_POLICY */ #endif - "fdvnodedata" /* 122 M_FD_VN_DATA */ - "fddirbuf", /* 123 M_FD_DIRBUF */ - "netagent", /* 124 M_NETAGENT */ + "fdvnodedata" /* 122 M_FD_VN_DATA */ + "fddirbuf", /* 123 M_FD_DIRBUF */ + "netagent", /* 124 M_NETAGENT */ "Event Handler",/* 125 M_EVENTHANDLER */ - "Link Layer Table", /* 126 M_LLTABLE */ - "Network Work Queue", /* 127 M_NWKWQ */ + "Link Layer Table", /* 126 M_LLTABLE */ + "Network Work Queue", /* 127 M_NWKWQ */ "Content Filter", /* 128 M_CFIL */ - "" + "" }; /* for use with kmzones.kz_zalloczone */ -#define KMZ_CREATEZONE_ACCT ((void *)-3) -#define KMZ_CREATEZONE ((void *)-2) -#define KMZ_LOOKUPZONE ((void *)-1) -#define KMZ_MALLOC ((void *)0) -#define KMZ_SHAREZONE ((void *)1) +#define KMZ_CREATEZONE_ACCT ((void *)-3) +#define KMZ_CREATEZONE ((void *)-2) +#define KMZ_LOOKUPZONE ((void *)-1) +#define KMZ_MALLOC ((void *)0) +#define KMZ_SHAREZONE ((void *)1) struct kmzones { - size_t kz_elemsize; - void *kz_zalloczone; - boolean_t kz_noencrypt; + size_t kz_elemsize; + void *kz_zalloczone; + boolean_t kz_noencrypt; } kmzones[M_LAST] = { -#define SOS(sname) sizeof (struct sname) -#define SOX(sname) -1 - { -1, 0, FALSE }, /* 0 M_FREE */ - { MSIZE, KMZ_CREATEZONE, FALSE }, /* 1 M_MBUF */ - { 0, KMZ_MALLOC, FALSE }, /* 2 M_DEVBUF */ - { SOS(socket), KMZ_CREATEZONE, TRUE }, /* 3 M_SOCKET */ - { SOS(inpcb), KMZ_LOOKUPZONE, TRUE }, /* 4 M_PCB */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 5 M_RTABLE */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 6 M_HTABLE */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 7 M_FTABLE */ - { SOS(rusage), KMZ_CREATEZONE, TRUE }, /* 8 M_ZOMBIE */ - { 0, KMZ_MALLOC, FALSE }, /* 9 M_IFADDR */ - { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 10 M_SOOPTS */ - { 0, KMZ_MALLOC, FALSE }, /* 11 M_SONAME */ - { MAXPATHLEN, KMZ_CREATEZONE, FALSE }, /* 12 M_NAMEI */ - { 0, KMZ_MALLOC, FALSE }, /* 13 M_GPROF */ - { 0, KMZ_MALLOC, FALSE }, /* 14 M_IOCTLOPS */ - { 0, KMZ_MALLOC, FALSE }, /* 15 M_MAPMEM */ - { SOS(ucred), KMZ_CREATEZONE, FALSE }, /* 16 M_CRED */ - { SOS(pgrp), KMZ_CREATEZONE, FALSE }, /* 17 M_PGRP */ - { SOS(session), KMZ_CREATEZONE, FALSE }, /* 18 M_SESSION */ - { SOS(user32_iovec), KMZ_LOOKUPZONE, FALSE },/* 19 M_IOV32 */ - { SOS(mount), KMZ_CREATEZONE, FALSE }, /* 20 M_MOUNT */ - { 0, KMZ_MALLOC, FALSE }, /* 21 M_FHANDLE */ +#define SOS(sname) sizeof (struct sname) +#define SOX(sname) -1 + { -1, 0, FALSE }, /* 0 M_FREE */ + { MSIZE, KMZ_CREATEZONE, FALSE }, /* 1 M_MBUF */ + { 0, KMZ_MALLOC, FALSE }, /* 2 M_DEVBUF */ + { SOS(socket), KMZ_CREATEZONE, TRUE }, /* 3 M_SOCKET */ + { SOS(inpcb), KMZ_LOOKUPZONE, TRUE }, /* 4 M_PCB */ + { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 5 M_RTABLE */ + { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 6 M_HTABLE */ + { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 7 M_FTABLE */ + { SOS(rusage), KMZ_CREATEZONE, TRUE }, /* 8 M_ZOMBIE */ + { 0, KMZ_MALLOC, FALSE }, /* 9 M_IFADDR */ + { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 10 M_SOOPTS */ + { 0, KMZ_MALLOC, FALSE }, /* 11 M_SONAME */ + { MAXPATHLEN, KMZ_CREATEZONE, FALSE }, /* 12 M_NAMEI */ + { 0, KMZ_MALLOC, FALSE }, /* 13 M_GPROF */ + { 0, KMZ_MALLOC, FALSE }, /* 14 M_IOCTLOPS */ + { 0, KMZ_MALLOC, FALSE }, /* 15 M_MAPMEM */ + { SOS(ucred), KMZ_CREATEZONE, FALSE }, /* 16 M_CRED */ + { SOS(pgrp), KMZ_CREATEZONE, FALSE }, /* 17 M_PGRP */ + { SOS(session), KMZ_CREATEZONE, FALSE }, /* 18 M_SESSION */ + { SOS(user32_iovec), KMZ_LOOKUPZONE, FALSE }, /* 19 M_IOV32 */ + { SOS(mount), KMZ_CREATEZONE, FALSE }, /* 20 M_MOUNT */ + { 0, KMZ_MALLOC, FALSE }, /* 21 M_FHANDLE */ #if (NFSCLIENT || NFSSERVER) - { SOS(nfsreq), KMZ_CREATEZONE, FALSE }, /* 22 M_NFSREQ */ - { SOS(nfsmount),KMZ_CREATEZONE, FALSE }, /* 23 M_NFSMNT */ - { SOS(nfsnode), KMZ_CREATEZONE, FALSE }, /* 24 M_NFSNODE */ + { SOS(nfsreq), KMZ_CREATEZONE, FALSE }, /* 22 M_NFSREQ */ + { SOS(nfsmount), KMZ_CREATEZONE, FALSE }, /* 23 M_NFSMNT */ + { SOS(nfsnode), KMZ_CREATEZONE, FALSE }, /* 24 M_NFSNODE */ #else - { 0, KMZ_MALLOC, FALSE }, /* 22 M_NFSREQ */ - { 0, KMZ_MALLOC, FALSE }, /* 23 M_NFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 24 M_NFSNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 22 M_NFSREQ */ + { 0, KMZ_MALLOC, FALSE }, /* 23 M_NFSMNT */ + { 0, KMZ_MALLOC, FALSE }, /* 24 M_NFSNODE */ #endif - { SOS(vnode), KMZ_CREATEZONE, TRUE }, /* 25 M_VNODE */ - { SOS(namecache), KMZ_CREATEZONE, FALSE }, /* 26 M_CACHE */ + { SOS(vnode), KMZ_CREATEZONE, TRUE }, /* 25 M_VNODE */ + { SOS(namecache), KMZ_CREATEZONE, FALSE }, /* 26 M_CACHE */ #if QUOTA - { SOX(dquot), KMZ_LOOKUPZONE, FALSE }, /* 27 M_DQUOT */ + { SOX(dquot), KMZ_LOOKUPZONE, FALSE }, /* 27 M_DQUOT */ #else - { 0, KMZ_MALLOC, FALSE }, /* 27 M_DQUOT */ + { 0, KMZ_MALLOC, FALSE }, /* 27 M_DQUOT */ #endif - { 0, KMZ_MALLOC, FALSE }, /* 28 M_PROC_UUID_POLICY */ - { 0, KMZ_MALLOC, FALSE }, /* 29 M_SHM */ - { SOS(plimit), KMZ_CREATEZONE, TRUE }, /* 30 M_PLIMIT */ - { SOS(sigacts), KMZ_CREATEZONE_ACCT, TRUE }, /* 31 M_SIGACTS */ - { 0, KMZ_MALLOC, FALSE }, /* 32 M_VMOBJ */ - { 0, KMZ_MALLOC, FALSE }, /* 33 M_VMOBJHASH */ - { 0, KMZ_MALLOC, FALSE }, /* 34 M_VMPMAP */ - { 0, KMZ_MALLOC, FALSE }, /* 35 M_VMPVENT */ - { 0, KMZ_MALLOC, FALSE }, /* 36 M_VMPAGER */ - { 0, KMZ_MALLOC, FALSE }, /* 37 M_VMPGDATA */ - { SOS(fileproc),KMZ_CREATEZONE_ACCT, TRUE }, /* 38 M_FILEPROC */ - { SOS(filedesc),KMZ_CREATEZONE_ACCT, TRUE }, /* 39 M_FILEDESC */ - { SOX(lockf), KMZ_CREATEZONE_ACCT, TRUE }, /* 40 M_LOCKF */ - { SOS(proc), KMZ_CREATEZONE, FALSE }, /* 41 M_PROC */ - { SOS(pstats), KMZ_CREATEZONE, TRUE }, /* 42 M_PSTATS */ - { 0, KMZ_MALLOC, FALSE }, /* 43 M_SEGMENT */ - { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 44 M_LFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 45 M_FFSNODE */ - { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 46 M_MFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 47 M_NQLEASE */ - { 0, KMZ_MALLOC, FALSE }, /* 48 M_NQMHOST */ - { 0, KMZ_MALLOC, FALSE }, /* 49 M_NETADDR */ + { 0, KMZ_MALLOC, FALSE }, /* 28 M_PROC_UUID_POLICY */ + { 0, KMZ_MALLOC, FALSE }, /* 29 M_SHM */ + { SOS(plimit), KMZ_CREATEZONE, TRUE }, /* 30 M_PLIMIT */ + { SOS(sigacts), KMZ_CREATEZONE_ACCT, TRUE }, /* 31 M_SIGACTS */ + { 0, KMZ_MALLOC, FALSE }, /* 32 M_VMOBJ */ + { 0, KMZ_MALLOC, FALSE }, /* 33 M_VMOBJHASH */ + { 0, KMZ_MALLOC, FALSE }, /* 34 M_VMPMAP */ + { 0, KMZ_MALLOC, FALSE }, /* 35 M_VMPVENT */ + { 0, KMZ_MALLOC, FALSE }, /* 36 M_VMPAGER */ + { 0, KMZ_MALLOC, FALSE }, /* 37 M_VMPGDATA */ + { SOS(fileproc), KMZ_CREATEZONE_ACCT, TRUE }, /* 38 M_FILEPROC */ + { SOS(filedesc), KMZ_CREATEZONE_ACCT, TRUE }, /* 39 M_FILEDESC */ + { SOX(lockf), KMZ_CREATEZONE_ACCT, TRUE }, /* 40 M_LOCKF */ + { SOS(proc), KMZ_CREATEZONE, FALSE }, /* 41 M_PROC */ + { SOS(pstats), KMZ_CREATEZONE, TRUE }, /* 42 M_PSTATS */ + { 0, KMZ_MALLOC, FALSE }, /* 43 M_SEGMENT */ + { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 44 M_LFSNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 45 M_FFSNODE */ + { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 46 M_MFSNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 47 M_NQLEASE */ + { 0, KMZ_MALLOC, FALSE }, /* 48 M_NQMHOST */ + { 0, KMZ_MALLOC, FALSE }, /* 49 M_NETADDR */ #if (NFSCLIENT || NFSSERVER) { SOX(nfsrv_sock), - KMZ_CREATEZONE_ACCT, FALSE }, /* 50 M_NFSSVC */ - { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */ + KMZ_CREATEZONE_ACCT, FALSE }, /* 50 M_NFSSVC */ + { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */ { SOX(nfsrvcache), - KMZ_CREATEZONE_ACCT, FALSE }, /* 52 M_NFSD */ + KMZ_CREATEZONE_ACCT, FALSE }, /* 52 M_NFSD */ #else - { 0, KMZ_MALLOC, FALSE }, /* 50 M_NFSSVC */ - { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */ - { 0, KMZ_MALLOC, FALSE }, /* 52 M_NFSD */ + { 0, KMZ_MALLOC, FALSE }, /* 50 M_NFSSVC */ + { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */ + { 0, KMZ_MALLOC, FALSE }, /* 52 M_NFSD */ #endif { SOX(ip_moptions), - KMZ_LOOKUPZONE, FALSE }, /* 53 M_IPMOPTS */ - { SOX(in_multi),KMZ_LOOKUPZONE, FALSE }, /* 54 M_IPMADDR */ + KMZ_LOOKUPZONE, FALSE }, /* 53 M_IPMOPTS */ + { SOX(in_multi), KMZ_LOOKUPZONE, FALSE }, /* 54 M_IPMADDR */ { SOX(ether_multi), - KMZ_LOOKUPZONE, FALSE }, /* 55 M_IFMADDR */ - { SOX(mrt), KMZ_CREATEZONE, TRUE }, /* 56 M_MRTABLE */ - { 0, KMZ_MALLOC, FALSE }, /* 57 unused entry */ - { 0, KMZ_MALLOC, FALSE }, /* 58 unused entry */ + KMZ_LOOKUPZONE, FALSE }, /* 55 M_IFMADDR */ + { SOX(mrt), KMZ_CREATEZONE, TRUE }, /* 56 M_MRTABLE */ + { 0, KMZ_MALLOC, FALSE }, /* 57 unused entry */ + { 0, KMZ_MALLOC, FALSE }, /* 58 unused entry */ #if (NFSCLIENT || NFSSERVER) { SOS(nfsrv_descript), - KMZ_CREATEZONE_ACCT, FALSE }, /* 59 M_NFSRVDESC */ - { SOS(nfsdmap), KMZ_CREATEZONE, FALSE }, /* 60 M_NFSDIROFF */ - { SOS(fhandle), KMZ_LOOKUPZONE, FALSE }, /* 61 M_NFSBIGFH */ + KMZ_CREATEZONE_ACCT, FALSE }, /* 59 M_NFSRVDESC */ + { SOS(nfsdmap), KMZ_CREATEZONE, FALSE }, /* 60 M_NFSDIROFF */ + { SOS(fhandle), KMZ_LOOKUPZONE, FALSE }, /* 61 M_NFSBIGFH */ #else - { 0, KMZ_MALLOC, FALSE }, /* 59 M_NFSRVDESC */ - { 0, KMZ_MALLOC, FALSE }, /* 60 M_NFSDIROFF */ - { 0, KMZ_MALLOC, FALSE }, /* 61 M_NFSBIGFH */ + { 0, KMZ_MALLOC, FALSE }, /* 59 M_NFSRVDESC */ + { 0, KMZ_MALLOC, FALSE }, /* 60 M_NFSDIROFF */ + { 0, KMZ_MALLOC, FALSE }, /* 61 M_NFSBIGFH */ #endif - { 0, KMZ_MALLOC, FALSE }, /* 62 M_MSDOSFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 63 M_MSDOSFSFAT */ - { 0, KMZ_MALLOC, FALSE }, /* 64 M_MSDOSFSNODE */ - { SOS(tty), KMZ_CREATEZONE, FALSE }, /* 65 M_TTYS */ - { 0, KMZ_MALLOC, FALSE }, /* 66 M_EXEC */ - { 0, KMZ_MALLOC, FALSE }, /* 67 M_MISCFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 68 M_MISCFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 69 M_ADOSFSMNT */ - { 0, KMZ_MALLOC, FALSE }, /* 70 M_ADOSFSNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 71 M_ANODE */ - { 0, KMZ_MALLOC, TRUE }, /* 72 M_BUFHDR */ + { 0, KMZ_MALLOC, FALSE }, /* 62 M_MSDOSFSMNT */ + { 0, KMZ_MALLOC, FALSE }, /* 63 M_MSDOSFSFAT */ + { 0, KMZ_MALLOC, FALSE }, /* 64 M_MSDOSFSNODE */ + { SOS(tty), KMZ_CREATEZONE, FALSE }, /* 65 M_TTYS */ + { 0, KMZ_MALLOC, FALSE }, /* 66 M_EXEC */ + { 0, KMZ_MALLOC, FALSE }, /* 67 M_MISCFSMNT */ + { 0, KMZ_MALLOC, FALSE }, /* 68 M_MISCFSNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 69 M_ADOSFSMNT */ + { 0, KMZ_MALLOC, FALSE }, /* 70 M_ADOSFSNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 71 M_ANODE */ + { 0, KMZ_MALLOC, TRUE }, /* 72 M_BUFHDR */ { (NDFILE * OFILESIZE), - KMZ_CREATEZONE_ACCT, FALSE }, /* 73 M_OFILETABL */ - { MCLBYTES, KMZ_CREATEZONE, FALSE }, /* 74 M_MCLUST */ - { 0, KMZ_MALLOC, FALSE }, /* 75 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 76 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 77 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 78 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 79 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 80 M_TEMP */ - { 0, KMZ_MALLOC, FALSE }, /* 81 M_SECA */ - { 0, KMZ_MALLOC, FALSE }, /* 82 M_DEVFS */ - { 0, KMZ_MALLOC, FALSE }, /* 83 M_IPFW */ - { 0, KMZ_MALLOC, FALSE }, /* 84 M_UDFNODE */ - { 0, KMZ_MALLOC, FALSE }, /* 85 M_UDFMOUNT */ - { 0, KMZ_MALLOC, FALSE }, /* 86 M_IP6NDP */ - { 0, KMZ_MALLOC, FALSE }, /* 87 M_IP6OPT */ - { 0, KMZ_MALLOC, FALSE }, /* 88 M_IP6MISC */ - { 0, KMZ_MALLOC, FALSE }, /* 89 M_TSEGQ */ - { 0, KMZ_MALLOC, FALSE }, /* 90 M_IGMP */ - { 0, KMZ_MALLOC, FALSE }, /* 91 unused */ - { 0, KMZ_MALLOC, FALSE }, /* 92 unused */ - { SOS(specinfo),KMZ_CREATEZONE, TRUE }, /* 93 M_SPECINFO */ - { SOS(kqueue), KMZ_CREATEZONE, FALSE }, /* 94 M_KQUEUE */ - { 0, KMZ_MALLOC, FALSE }, /* 95 unused */ - { SOS(cl_readahead), KMZ_CREATEZONE, TRUE }, /* 96 M_CLRDAHEAD */ - { SOS(cl_writebehind),KMZ_CREATEZONE, TRUE }, /* 97 M_CLWRBEHIND */ - { SOS(user64_iovec), KMZ_LOOKUPZONE, FALSE },/* 98 M_IOV64 */ - { SOS(fileglob), KMZ_CREATEZONE, TRUE }, /* 99 M_FILEGLOB */ - { 0, KMZ_MALLOC, FALSE }, /* 100 M_KAUTH */ - { 0, KMZ_MALLOC, FALSE }, /* 101 M_DUMMYNET */ - { 0, KMZ_MALLOC, FALSE }, /* 102 M_UNSAFEFS */ - { 0, KMZ_MALLOC, FALSE }, /* 103 M_MACPIPELABEL */ - { 0, KMZ_MALLOC, FALSE }, /* 104 M_MACTEMP */ - { 0, KMZ_MALLOC, FALSE }, /* 105 M_SBUF */ - { 0, KMZ_MALLOC, FALSE }, /* 106 M_HFS_EXTATTR */ - { 0, KMZ_MALLOC, FALSE }, /* 107 M_SELECT */ - { 0, KMZ_MALLOC, FALSE }, /* 108 M_TRAFFIC_MGT */ + KMZ_CREATEZONE_ACCT, FALSE }, /* 73 M_OFILETABL */ + { MCLBYTES, KMZ_CREATEZONE, FALSE }, /* 74 M_MCLUST */ + { 0, KMZ_MALLOC, FALSE }, /* 75 unused */ + { 0, KMZ_MALLOC, FALSE }, /* 76 unused */ + { 0, KMZ_MALLOC, FALSE }, /* 77 unused */ + { 0, KMZ_MALLOC, FALSE }, /* 78 unused */ + { 0, KMZ_MALLOC, FALSE }, /* 79 unused */ + { 0, KMZ_MALLOC, FALSE }, /* 80 M_TEMP */ + { 0, KMZ_MALLOC, FALSE }, /* 81 M_SECA */ + { 0, KMZ_MALLOC, FALSE }, /* 82 M_DEVFS */ + { 0, KMZ_MALLOC, FALSE }, /* 83 M_IPFW */ + { 0, KMZ_MALLOC, FALSE }, /* 84 M_UDFNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 85 M_UDFMOUNT */ + { 0, KMZ_MALLOC, FALSE }, /* 86 M_IP6NDP */ + { 0, KMZ_MALLOC, FALSE }, /* 87 M_IP6OPT */ + { 0, KMZ_MALLOC, FALSE }, /* 88 M_IP6MISC */ + { 0, KMZ_MALLOC, FALSE }, /* 89 M_TSEGQ */ + { 0, KMZ_MALLOC, FALSE }, /* 90 M_IGMP */ + { 0, KMZ_MALLOC, FALSE }, /* 91 unused */ + { 0, KMZ_MALLOC, FALSE }, /* 92 unused */ + { SOS(specinfo), KMZ_CREATEZONE, TRUE }, /* 93 M_SPECINFO */ + { SOS(kqueue), KMZ_CREATEZONE, FALSE }, /* 94 M_KQUEUE */ + { 0, KMZ_MALLOC, FALSE }, /* 95 unused */ + { SOS(cl_readahead), KMZ_CREATEZONE, TRUE }, /* 96 M_CLRDAHEAD */ + { SOS(cl_writebehind), KMZ_CREATEZONE, TRUE }, /* 97 M_CLWRBEHIND */ + { SOS(user64_iovec), KMZ_LOOKUPZONE, FALSE }, /* 98 M_IOV64 */ + { SOS(fileglob), KMZ_CREATEZONE, TRUE }, /* 99 M_FILEGLOB */ + { 0, KMZ_MALLOC, FALSE }, /* 100 M_KAUTH */ + { 0, KMZ_MALLOC, FALSE }, /* 101 M_DUMMYNET */ + { 0, KMZ_MALLOC, FALSE }, /* 102 M_UNSAFEFS */ + { 0, KMZ_MALLOC, FALSE }, /* 103 M_MACPIPELABEL */ + { 0, KMZ_MALLOC, FALSE }, /* 104 M_MACTEMP */ + { 0, KMZ_MALLOC, FALSE }, /* 105 M_SBUF */ + { 0, KMZ_MALLOC, FALSE }, /* 106 M_HFS_EXTATTR */ + { 0, KMZ_MALLOC, FALSE }, /* 107 M_SELECT */ + { 0, KMZ_MALLOC, FALSE }, /* 108 M_TRAFFIC_MGT */ #if FS_COMPRESSION - { SOS(decmpfs_cnode),KMZ_CREATEZONE , FALSE}, /* 109 M_DECMPFS_CNODE */ + { SOS(decmpfs_cnode), KMZ_CREATEZONE, FALSE}, /* 109 M_DECMPFS_CNODE */ #else - { 0, KMZ_MALLOC, FALSE }, /* 109 M_DECMPFS_CNODE */ + { 0, KMZ_MALLOC, FALSE }, /* 109 M_DECMPFS_CNODE */ #endif /* FS_COMPRESSION */ - { 0, KMZ_MALLOC, FALSE }, /* 110 M_INMFILTER */ - { 0, KMZ_MALLOC, FALSE }, /* 111 M_IPMSOURCE */ - { 0, KMZ_MALLOC, FALSE }, /* 112 M_IN6MFILTER */ - { 0, KMZ_MALLOC, FALSE }, /* 113 M_IP6MOPTS */ - { 0, KMZ_MALLOC, FALSE }, /* 114 M_IP6MSOURCE */ + { 0, KMZ_MALLOC, FALSE }, /* 110 M_INMFILTER */ + { 0, KMZ_MALLOC, FALSE }, /* 111 M_IPMSOURCE */ + { 0, KMZ_MALLOC, FALSE }, /* 112 M_IN6MFILTER */ + { 0, KMZ_MALLOC, FALSE }, /* 113 M_IP6MOPTS */ + { 0, KMZ_MALLOC, FALSE }, /* 114 M_IP6MSOURCE */ #if FLOW_DIVERT - { SOS(flow_divert_pcb), KMZ_CREATEZONE, TRUE }, /* 115 M_FLOW_DIVERT_PCB */ - { SOS(flow_divert_group), KMZ_CREATEZONE, TRUE }, /* 116 M_FLOW_DIVERT_GROUP */ + { SOS(flow_divert_pcb), KMZ_CREATEZONE, TRUE }, /* 115 M_FLOW_DIVERT_PCB */ + { SOS(flow_divert_group), KMZ_CREATEZONE, TRUE }, /* 116 M_FLOW_DIVERT_GROUP */ #else - { 0, KMZ_MALLOC, FALSE }, /* 115 M_FLOW_DIVERT_PCB */ - { 0, KMZ_MALLOC, FALSE }, /* 116 M_FLOW_DIVERT_GROUP */ -#endif /* FLOW_DIVERT */ - { 0, KMZ_MALLOC, FALSE }, /* 117 M_IP6CGA */ - { 0, KMZ_MALLOC, FALSE }, /* 118 M_NECP */ + { 0, KMZ_MALLOC, FALSE }, /* 115 M_FLOW_DIVERT_PCB */ + { 0, KMZ_MALLOC, FALSE }, /* 116 M_FLOW_DIVERT_GROUP */ +#endif /* FLOW_DIVERT */ + { 0, KMZ_MALLOC, FALSE }, /* 117 M_IP6CGA */ + { 0, KMZ_MALLOC, FALSE }, /* 118 M_NECP */ #if NECP - { SOS(necp_session_policy), KMZ_CREATEZONE, TRUE }, /* 119 M_NECP_SESSION_POLICY */ - { SOS(necp_kernel_socket_policy), KMZ_CREATEZONE, TRUE }, /* 120 M_NECP_SOCKET_POLICY */ - { SOS(necp_kernel_ip_output_policy), KMZ_CREATEZONE, TRUE }, /* 121 M_NECP_IP_POLICY */ + { SOS(necp_session_policy), KMZ_CREATEZONE, TRUE }, /* 119 M_NECP_SESSION_POLICY */ + { SOS(necp_kernel_socket_policy), KMZ_CREATEZONE, TRUE }, /* 120 M_NECP_SOCKET_POLICY */ + { SOS(necp_kernel_ip_output_policy), KMZ_CREATEZONE, TRUE }, /* 121 M_NECP_IP_POLICY */ #else - { 0, KMZ_MALLOC, FALSE }, /* 119 M_NECP_SESSION_POLICY */ - { 0, KMZ_MALLOC, FALSE }, /* 120 M_NECP_SOCKET_POLICY */ - { 0, KMZ_MALLOC, FALSE }, /* 121 M_NECP_IP_POLICY */ + { 0, KMZ_MALLOC, FALSE }, /* 119 M_NECP_SESSION_POLICY */ + { 0, KMZ_MALLOC, FALSE }, /* 120 M_NECP_SOCKET_POLICY */ + { 0, KMZ_MALLOC, FALSE }, /* 121 M_NECP_IP_POLICY */ #endif /* NECP */ - { 0, KMZ_MALLOC, FALSE }, /* 122 M_FD_VN_DATA */ - { 0, KMZ_MALLOC, FALSE }, /* 123 M_FD_DIRBUF */ - { 0, KMZ_MALLOC, FALSE }, /* 124 M_NETAGENT */ - { 0, KMZ_MALLOC, FALSE }, /* 125 M_EVENTHANDLER */ - { 0, KMZ_MALLOC, FALSE }, /* 126 M_LLTABLE */ - { 0, KMZ_MALLOC, FALSE }, /* 127 M_NWKWQ */ - { 0, KMZ_MALLOC, FALSE }, /* 128 M_CFIL */ -#undef SOS -#undef SOX + { 0, KMZ_MALLOC, FALSE }, /* 122 M_FD_VN_DATA */ + { 0, KMZ_MALLOC, FALSE }, /* 123 M_FD_DIRBUF */ + { 0, KMZ_MALLOC, FALSE }, /* 124 M_NETAGENT */ + { 0, KMZ_MALLOC, FALSE }, /* 125 M_EVENTHANDLER */ + { 0, KMZ_MALLOC, FALSE }, /* 126 M_LLTABLE */ + { 0, KMZ_MALLOC, FALSE }, /* 127 M_NWKWQ */ + { 0, KMZ_MALLOC, FALSE }, /* 128 M_CFIL */ +#undef SOS +#undef SOX }; -extern zone_t kalloc_zone(vm_size_t); /* XXX */ +extern zone_t kalloc_zone(vm_size_t); /* XXX */ /* * Initialize the kernel memory allocator @@ -505,33 +505,34 @@ extern zone_t kalloc_zone(vm_size_t); /* XXX */ void kmeminit(void) { - struct kmzones *kmz; + struct kmzones *kmz; - if ((sizeof(kmzones)/sizeof(kmzones[0])) != (sizeof(memname)/sizeof(memname[0]))) { + if ((sizeof(kmzones) / sizeof(kmzones[0])) != (sizeof(memname) / sizeof(memname[0]))) { panic("kmeminit: kmzones has %lu elements but memname has %lu\n", - (sizeof(kmzones)/sizeof(kmzones[0])), (sizeof(memname)/sizeof(memname[0]))); + (sizeof(kmzones) / sizeof(kmzones[0])), (sizeof(memname) / sizeof(memname[0]))); } kmz = kmzones; while (kmz < &kmzones[M_LAST]) { /* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) + if (kmz->kz_elemsize == (size_t)(-1)) { ; - else + } else /* XXX */ if (kmz->kz_zalloczone == KMZ_CREATEZONE || kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT) { kmz->kz_zalloczone = zinit(kmz->kz_elemsize, - 1024 * 1024, PAGE_SIZE, - memname[kmz - kmzones]); + 1024 * 1024, PAGE_SIZE, + memname[kmz - kmzones]); zone_change(kmz->kz_zalloczone, Z_CALLERACCT, - (kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT)); + (kmz->kz_zalloczone == KMZ_CREATEZONE_ACCT)); - if (kmz->kz_noencrypt == TRUE) + if (kmz->kz_noencrypt == TRUE) { zone_change(kmz->kz_zalloczone, Z_NOENCRYPT, TRUE); - } - else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE) + } + } else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE) { kmz->kz_zalloczone = kalloc_zone(kmz->kz_elemsize); + } kmz++; } @@ -539,15 +540,15 @@ kmeminit(void) kmz = kmzones; while (kmz < &kmzones[M_LAST]) { /* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) + if (kmz->kz_elemsize == (size_t)(-1)) { ; - else + } else /* XXX */ if (kmz->kz_zalloczone == KMZ_SHAREZONE) { kmz->kz_zalloczone = - kmzones[kmz->kz_elemsize].kz_zalloczone; + kmzones[kmz->kz_elemsize].kz_zalloczone; kmz->kz_elemsize = - kmzones[kmz->kz_elemsize].kz_elemsize; + kmzones[kmz->kz_elemsize].kz_elemsize; } kmz++; @@ -556,34 +557,36 @@ kmeminit(void) void * _MALLOC_external( - size_t size, - int type, - int flags); + size_t size, + int type, + int flags); void * _MALLOC_external( - size_t size, - int type, - int flags) + size_t size, + int type, + int flags) { - static vm_allocation_site_t site = { .tag = VM_KERN_MEMORY_KALLOC, .flags = VM_TAG_BT }; - return (__MALLOC(size, type, flags, &site)); + static vm_allocation_site_t site = { .tag = VM_KERN_MEMORY_KALLOC, .flags = VM_TAG_BT }; + return __MALLOC(size, type, flags, &site); } void * __MALLOC( - size_t size, - int type, - int flags, + size_t size, + int type, + int flags, vm_allocation_site_t *site) { - void *addr = NULL; - vm_size_t msize = size; + void *addr = NULL; + vm_size_t msize = size; - if (type >= M_LAST) + if (type >= M_LAST) { panic("_malloc TYPE"); + } - if (size == 0) - return (NULL); + if (size == 0) { + return NULL; + } if (msize != size) { panic("Requested size to __MALLOC is too large (%llx)!\n", (uint64_t)size); @@ -596,13 +599,13 @@ __MALLOC( if (addr == NULL) { /* * We get here when the caller told us to block waiting for memory, but - * kalloc said there's no memory left to get. Generally, this means there's a + * kalloc said there's no memory left to get. Generally, this means there's a * leak or the caller asked for an impossibly large amount of memory. If the caller - * is expecting a NULL return code then it should explicitly set the flag M_NULL. - * If the caller isn't expecting a NULL return code, we just panic. This is less - * than ideal, but returning NULL when the caller isn't expecting it doesn't help - * since the majority of callers don't check the return value and will just - * dereference the pointer and trap anyway. We may as well get a more + * is expecting a NULL return code then it should explicitly set the flag M_NULL. + * If the caller isn't expecting a NULL return code, we just panic. This is less + * than ideal, but returning NULL when the caller isn't expecting it doesn't help + * since the majority of callers don't check the return value and will just + * dereference the pointer and trap anyway. We may as well get a more * descriptive message out while we can. */ if (flags & M_NULL) { @@ -611,113 +614,122 @@ __MALLOC( panic("_MALLOC: kalloc returned NULL (potential leak), size %llu", (uint64_t) size); } } - if (!addr) - return (0); + if (!addr) { + return 0; + } - if (flags & M_ZERO) + if (flags & M_ZERO) { bzero(addr, size); + } - return (addr); + return addr; } void _FREE( - void *addr, - int type) + void *addr, + int type) { - if (type >= M_LAST) + if (type >= M_LAST) { panic("_free TYPE"); + } - if (!addr) + if (!addr) { return; /* correct (convenient bsd kernel legacy) */ - + } kfree_addr(addr); } void * __REALLOC( - void *addr, - size_t size, - int type, - int flags, + void *addr, + size_t size, + int type, + int flags, vm_allocation_site_t *site) { - void *newaddr; - size_t alloc; + void *newaddr; + size_t alloc; /* realloc(NULL, ...) is equivalent to malloc(...) */ - if (addr == NULL) - return (__MALLOC(size, type, flags, site)); + if (addr == NULL) { + return __MALLOC(size, type, flags, site); + } alloc = kalloc_size(addr); - /* - * Find out the size of the bucket in which the new sized allocation - * would land. If it matches the bucket of the original allocation, + /* + * Find out the size of the bucket in which the new sized allocation + * would land. If it matches the bucket of the original allocation, * simply return the address. */ if (kalloc_bucket_size(size) == alloc) { - if (flags & M_ZERO) { - if (alloc < size) + if (flags & M_ZERO) { + if (alloc < size) { bzero(addr + alloc, (size - alloc)); - else + } else { bzero(addr + size, (alloc - size)); + } } return addr; } /* Allocate a new, bigger (or smaller) block */ - if ((newaddr = __MALLOC(size, type, flags, site)) == NULL) - return (NULL); + if ((newaddr = __MALLOC(size, type, flags, site)) == NULL) { + return NULL; + } /* Copy over original contents */ bcopy(addr, newaddr, MIN(size, alloc)); _FREE(addr, type); - return (newaddr); + return newaddr; } void * _MALLOC_ZONE_external( - size_t size, - int type, - int flags); + size_t size, + int type, + int flags); void * _MALLOC_ZONE_external( - size_t size, - int type, - int flags) + size_t size, + int type, + int flags) { - return (__MALLOC_ZONE(size, type, flags, NULL)); + return __MALLOC_ZONE(size, type, flags, NULL); } void * __MALLOC_ZONE( - size_t size, - int type, - int flags, + size_t size, + int type, + int flags, vm_allocation_site_t *site) { - struct kmzones *kmz; - void *elem; + struct kmzones *kmz; + void *elem; - if (type >= M_LAST) + if (type >= M_LAST) { panic("_malloc_zone TYPE"); + } kmz = &kmzones[type]; - if (kmz->kz_zalloczone == KMZ_MALLOC) + if (kmz->kz_zalloczone == KMZ_MALLOC) { panic("_malloc_zone ZONE: type = %d", type); + } /* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) + if (kmz->kz_elemsize == (size_t)(-1)) { panic("_malloc_zone XXX"); + } /* XXX */ - if (size == kmz->kz_elemsize) + if (size == kmz->kz_elemsize) { if (flags & M_NOWAIT) { - elem = (void *)zalloc_noblock(kmz->kz_zalloczone); + elem = (void *)zalloc_noblock(kmz->kz_zalloczone); } else { - elem = (void *)zalloc(kmz->kz_zalloczone); + elem = (void *)zalloc(kmz->kz_zalloczone); } - else { + } else { vm_size_t kalloc_size = size; if (size > kalloc_size) { elem = NULL; @@ -728,35 +740,40 @@ __MALLOC_ZONE( } } - if (elem && (flags & M_ZERO)) + if (elem && (flags & M_ZERO)) { bzero(elem, size); + } - return (elem); + return elem; } void _FREE_ZONE( - void *elem, - size_t size, - int type) + void *elem, + size_t size, + int type) { - struct kmzones *kmz; + struct kmzones *kmz; - if (type >= M_LAST) + if (type >= M_LAST) { panic("FREE_SIZE"); + } kmz = &kmzones[type]; - if (kmz->kz_zalloczone == KMZ_MALLOC) + if (kmz->kz_zalloczone == KMZ_MALLOC) { panic("free_zone ZONE"); + } /* XXX */ - if (kmz->kz_elemsize == (size_t)(-1)) + if (kmz->kz_elemsize == (size_t)(-1)) { panic("FREE_SIZE XXX"); + } /* XXX */ - if (size == kmz->kz_elemsize) + if (size == kmz->kz_elemsize) { zfree(kmz->kz_zalloczone, elem); - else + } else { kfree(elem, size); + } } #if DEBUG || DEVELOPMENT @@ -772,7 +789,7 @@ sysctl_zone_map_jetsam_limit SYSCTL_HANDLER_ARGS oldval = zone_map_jetsam_limit; error = sysctl_io_number(req, oldval, sizeof(int), &val, NULL); if (error || !req->newptr) { - return (error); + return error; } if (val <= 0 || val > 100) { @@ -781,11 +798,11 @@ sysctl_zone_map_jetsam_limit SYSCTL_HANDLER_ARGS } zone_map_jetsam_limit = val; - return (0); + return 0; } -SYSCTL_PROC(_kern, OID_AUTO, zone_map_jetsam_limit, CTLTYPE_INT|CTLFLAG_RW, 0, 0, - sysctl_zone_map_jetsam_limit, "I", "Zone map jetsam limit"); +SYSCTL_PROC(_kern, OID_AUTO, zone_map_jetsam_limit, CTLTYPE_INT | CTLFLAG_RW, 0, 0, + sysctl_zone_map_jetsam_limit, "I", "Zone map jetsam limit"); extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); @@ -801,8 +818,8 @@ sysctl_zone_map_size_and_capacity SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, zone_map_size_and_capacity, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, - 0, 0, &sysctl_zone_map_size_and_capacity, "Q", "Current size and capacity of the zone map"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, + 0, 0, &sysctl_zone_map_size_and_capacity, "Q", "Current size and capacity of the zone map"); extern boolean_t run_zone_test(void); @@ -821,8 +838,8 @@ sysctl_run_zone_test SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, run_zone_test, - CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_LOCKED, - 0, 0, &sysctl_run_zone_test, "I", "Test zone allocator KPI"); + CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_LOCKED, + 0, 0, &sysctl_run_zone_test, "I", "Test zone allocator KPI"); #endif /* DEBUG || DEVELOPMENT */ @@ -845,8 +862,9 @@ sysctl_zleak_active SYSCTL_HANDLER_ARGS val = oldval = get_zleak_state(); error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } /* * Can only be activated if it's off (and not failed.) * Cannot be deactivated once it's on. @@ -854,14 +872,16 @@ sysctl_zleak_active SYSCTL_HANDLER_ARGS if (val == 1 && oldval == 0) { kern_return_t kr = zleak_activate(); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { printf("zleak_active: failed to activate " "live zone leak debugging (%d).\n", kr); - } if (val == 0 && oldval == 1) { + } + } + if (val == 0 && oldval == 1) { printf("zleak_active: active, cannot be disabled.\n"); - return (EINVAL); + return EINVAL; } - return (0); + return 0; } SYSCTL_PROC(_kern_zleak, OID_AUTO, active, @@ -896,16 +916,18 @@ sysctl_zleak_threshold SYSCTL_HANDLER_ARGS int error; uint64_t value = *(vm_size_t *)arg1; - error = sysctl_io_number(req, value, sizeof (value), &value, NULL); + error = sysctl_io_number(req, value, sizeof(value), &value, NULL); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } - if (value > (uint64_t)zleak_max_zonemap_size) - return (ERANGE); + if (value > (uint64_t)zleak_max_zonemap_size) { + return ERANGE; + } *(vm_size_t *)arg1 = value; - return (0); + return 0; } /* @@ -937,7 +959,7 @@ SYSCTL_PROC(_kern_zleak, OID_AUTO, zone_threshold, &zleak_per_zone_tracking_threshold, 0, sysctl_zleak_threshold, "Q", "zleak per-zone threshold"); -#endif /* CONFIG_ZLEAKS */ +#endif /* CONFIG_ZLEAKS */ extern uint64_t get_zones_collectable_bytes(void); @@ -951,5 +973,5 @@ sysctl_zones_collectable_bytes SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, zones_collectable_bytes, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, - 0, 0, &sysctl_zones_collectable_bytes, "Q", "Collectable memory in zones"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, + 0, 0, &sysctl_zones_collectable_bytes, "Q", "Collectable memory in zones"); diff --git a/bsd/kern/kern_memorystatus.c b/bsd/kern/kern_memorystatus.c index f52c05c1a..5c3410624 100644 --- a/bsd/kern/kern_memorystatus.c +++ b/bsd/kern/kern_memorystatus.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * */ @@ -67,7 +67,7 @@ #include #endif /* CONFIG_FREEZE */ -#include +#include #include #include @@ -75,19 +75,19 @@ /* For logging clarity */ static const char *memorystatus_kill_cause_name[] = { - "" , /* kMemorystatusInvalid */ - "jettisoned" , /* kMemorystatusKilled */ - "highwater" , /* kMemorystatusKilledHiwat */ - "vnode-limit" , /* kMemorystatusKilledVnodes */ - "vm-pageshortage" , /* kMemorystatusKilledVMPageShortage */ - "proc-thrashing" , /* kMemorystatusKilledProcThrashing */ - "fc-thrashing" , /* kMemorystatusKilledFCThrashing */ - "per-process-limit" , /* kMemorystatusKilledPerProcessLimit */ - "disk-space-shortage" , /* kMemorystatusKilledDiskSpaceShortage */ - "idle-exit" , /* kMemorystatusKilledIdleExit */ - "zone-map-exhaustion" , /* kMemorystatusKilledZoneMapExhaustion */ - "vm-compressor-thrashing" , /* kMemorystatusKilledVMCompressorThrashing */ - "vm-compressor-space-shortage" , /* kMemorystatusKilledVMCompressorSpaceShortage */ + "", /* kMemorystatusInvalid */ + "jettisoned", /* kMemorystatusKilled */ + "highwater", /* kMemorystatusKilledHiwat */ + "vnode-limit", /* kMemorystatusKilledVnodes */ + "vm-pageshortage", /* kMemorystatusKilledVMPageShortage */ + "proc-thrashing", /* kMemorystatusKilledProcThrashing */ + "fc-thrashing", /* kMemorystatusKilledFCThrashing */ + "per-process-limit", /* kMemorystatusKilledPerProcessLimit */ + "disk-space-shortage", /* kMemorystatusKilledDiskSpaceShortage */ + "idle-exit", /* kMemorystatusKilledIdleExit */ + "zone-map-exhaustion", /* kMemorystatusKilledZoneMapExhaustion */ + "vm-compressor-thrashing", /* kMemorystatusKilledVMCompressorThrashing */ + "vm-compressor-space-shortage", /* kMemorystatusKilledVMCompressorSpaceShortage */ }; static const char * @@ -110,7 +110,7 @@ memorystatus_priority_band_name(int32_t priority) return "CRITICAL"; } - return ("?"); + return "?"; } /* Does cause indicate vm or fc thrashing? */ @@ -131,8 +131,9 @@ is_reason_thrashing(unsigned cause) static boolean_t is_reason_zone_map_exhaustion(unsigned cause) { - if (cause == kMemorystatusKilledZoneMapExhaustion) + if (cause == kMemorystatusKilledZoneMapExhaustion) { return TRUE; + } return FALSE; } @@ -171,48 +172,48 @@ do { \ * soon be in effect down in the ledgers. */ -#define SET_ACTIVE_LIMITS_LOCKED(p, limit, is_fatal) \ -MACRO_BEGIN \ -(p)->p_memstat_memlimit_active = (limit); \ - if (is_fatal) { \ - (p)->p_memstat_state |= P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL; \ - } else { \ - (p)->p_memstat_state &= ~P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL; \ - } \ +#define SET_ACTIVE_LIMITS_LOCKED(p, limit, is_fatal) \ +MACRO_BEGIN \ +(p)->p_memstat_memlimit_active = (limit); \ + if (is_fatal) { \ + (p)->p_memstat_state |= P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL; \ + } else { \ + (p)->p_memstat_state &= ~P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL; \ + } \ MACRO_END -#define SET_INACTIVE_LIMITS_LOCKED(p, limit, is_fatal) \ -MACRO_BEGIN \ -(p)->p_memstat_memlimit_inactive = (limit); \ - if (is_fatal) { \ - (p)->p_memstat_state |= P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL; \ - } else { \ - (p)->p_memstat_state &= ~P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL; \ - } \ +#define SET_INACTIVE_LIMITS_LOCKED(p, limit, is_fatal) \ +MACRO_BEGIN \ +(p)->p_memstat_memlimit_inactive = (limit); \ + if (is_fatal) { \ + (p)->p_memstat_state |= P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL; \ + } else { \ + (p)->p_memstat_state &= ~P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL; \ + } \ MACRO_END -#define CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal) \ -MACRO_BEGIN \ -(p)->p_memstat_memlimit = (p)->p_memstat_memlimit_active; \ - if ((p)->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL) { \ - (p)->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; \ - is_fatal = TRUE; \ - } else { \ - (p)->p_memstat_state &= ~P_MEMSTAT_FATAL_MEMLIMIT; \ - is_fatal = FALSE; \ - } \ +#define CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal) \ +MACRO_BEGIN \ +(p)->p_memstat_memlimit = (p)->p_memstat_memlimit_active; \ + if ((p)->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL) { \ + (p)->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; \ + is_fatal = TRUE; \ + } else { \ + (p)->p_memstat_state &= ~P_MEMSTAT_FATAL_MEMLIMIT; \ + is_fatal = FALSE; \ + } \ MACRO_END -#define CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal) \ -MACRO_BEGIN \ -(p)->p_memstat_memlimit = (p)->p_memstat_memlimit_inactive; \ - if ((p)->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) { \ - (p)->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; \ - is_fatal = TRUE; \ - } else { \ - (p)->p_memstat_state &= ~P_MEMSTAT_FATAL_MEMLIMIT; \ - is_fatal = FALSE; \ - } \ +#define CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal) \ +MACRO_BEGIN \ +(p)->p_memstat_memlimit = (p)->p_memstat_memlimit_inactive; \ + if ((p)->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) { \ + (p)->p_memstat_state |= P_MEMSTAT_FATAL_MEMLIMIT; \ + is_fatal = TRUE; \ + } else { \ + (p)->p_memstat_state &= ~P_MEMSTAT_FATAL_MEMLIMIT; \ + is_fatal = FALSE; \ + } \ MACRO_END @@ -292,8 +293,8 @@ static int memorystatus_list_count = 0; #define MEMSTAT_BUCKET_COUNT (JETSAM_PRIORITY_MAX + 1) typedef struct memstat_bucket { - TAILQ_HEAD(, proc) list; - int count; + TAILQ_HEAD(, proc) list; + int count; } memstat_bucket_t; memstat_bucket_t memstat_bucket[MEMSTAT_BUCKET_COUNT]; @@ -305,7 +306,7 @@ uint64_t memstat_idle_demotion_deadline = 0; int system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; int applications_aging_band = JETSAM_PRIORITY_IDLE; -#define isProcessInAgingBands(p) ((isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) || (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band))) +#define isProcessInAgingBands(p) ((isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) || (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band))) /* * Checking the p_memstat_state almost always requires the proc_list_lock @@ -314,14 +315,14 @@ int applications_aging_band = JETSAM_PRIORITY_IDLE; * App -- almost always managed by a system process. Always have dirty tracking OFF. Can include extensions too. * System Processes -- not managed by anybody. Always have dirty tracking ON. Can include extensions (here) too. */ -#define isApp(p) ((p->p_memstat_state & P_MEMSTAT_MANAGED) || ! (p->p_memstat_dirty & P_DIRTY_TRACK)) -#define isSysProc(p) ( ! (p->p_memstat_state & P_MEMSTAT_MANAGED) || (p->p_memstat_dirty & P_DIRTY_TRACK)) +#define isApp(p) ((p->p_memstat_state & P_MEMSTAT_MANAGED) || ! (p->p_memstat_dirty & P_DIRTY_TRACK)) +#define isSysProc(p) ( ! (p->p_memstat_state & P_MEMSTAT_MANAGED) || (p->p_memstat_dirty & P_DIRTY_TRACK)) -#define kJetsamAgingPolicyNone (0) -#define kJetsamAgingPolicyLegacy (1) -#define kJetsamAgingPolicySysProcsReclaimedFirst (2) -#define kJetsamAgingPolicyAppsReclaimedFirst (3) -#define kJetsamAgingPolicyMax kJetsamAgingPolicyAppsReclaimedFirst +#define kJetsamAgingPolicyNone (0) +#define kJetsamAgingPolicyLegacy (1) +#define kJetsamAgingPolicySysProcsReclaimedFirst (2) +#define kJetsamAgingPolicyAppsReclaimedFirst (3) +#define kJetsamAgingPolicyMax kJetsamAgingPolicyAppsReclaimedFirst unsigned int jetsam_aging_policy = kJetsamAgingPolicyLegacy; @@ -352,7 +353,7 @@ sysctl_set_jetsam_aging_policy SYSCTL_HANDLER_ARGS error = sysctl_io_number(req, jetsam_aging_policy, sizeof(int), &val, NULL); if (error || !req->newptr) { - return (error); + return error; } if ((val < 0) || (val > kJetsamAgingPolicyMax)) { @@ -365,48 +366,45 @@ sysctl_set_jetsam_aging_policy SYSCTL_HANDLER_ARGS * that might be in progress currently. We use the proc_list_lock() just for * consistency with all the routines dealing with 'aging' processes. We need * a lighterweight lock. - */ + */ proc_list_lock(); old_system_procs_aging_band = system_procs_aging_band; old_applications_aging_band = applications_aging_band; - - switch (val) { - case kJetsamAgingPolicyNone: - new_system_procs_aging_band = JETSAM_PRIORITY_IDLE; - new_applications_aging_band = JETSAM_PRIORITY_IDLE; - break; + switch (val) { + case kJetsamAgingPolicyNone: + new_system_procs_aging_band = JETSAM_PRIORITY_IDLE; + new_applications_aging_band = JETSAM_PRIORITY_IDLE; + break; - case kJetsamAgingPolicyLegacy: - /* - * Legacy behavior where some daemons get a 10s protection once and only before the first clean->dirty->clean transition before going into IDLE band. - */ - new_system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; - new_applications_aging_band = JETSAM_PRIORITY_IDLE; - break; + case kJetsamAgingPolicyLegacy: + /* + * Legacy behavior where some daemons get a 10s protection once and only before the first clean->dirty->clean transition before going into IDLE band. + */ + new_system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; + new_applications_aging_band = JETSAM_PRIORITY_IDLE; + break; - case kJetsamAgingPolicySysProcsReclaimedFirst: - new_system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; - new_applications_aging_band = JETSAM_PRIORITY_AGING_BAND2; - break; + case kJetsamAgingPolicySysProcsReclaimedFirst: + new_system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; + new_applications_aging_band = JETSAM_PRIORITY_AGING_BAND2; + break; - case kJetsamAgingPolicyAppsReclaimedFirst: - new_system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND2; - new_applications_aging_band = JETSAM_PRIORITY_AGING_BAND1; - break; + case kJetsamAgingPolicyAppsReclaimedFirst: + new_system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND2; + new_applications_aging_band = JETSAM_PRIORITY_AGING_BAND1; + break; - default: - break; + default: + break; } if (old_system_procs_aging_band && (old_system_procs_aging_band != new_system_procs_aging_band)) { - old_bucket = &memstat_bucket[old_system_procs_aging_band]; p = TAILQ_FIRST(&old_bucket->list); - + while (p) { - next_proc = TAILQ_NEXT(p, p_memstat_list); if (isSysProc(p)) { @@ -423,12 +421,10 @@ sysctl_set_jetsam_aging_policy SYSCTL_HANDLER_ARGS } if (old_applications_aging_band && (old_applications_aging_band != new_applications_aging_band)) { - old_bucket = &memstat_bucket[old_applications_aging_band]; p = TAILQ_FIRST(&old_bucket->list); - - while (p) { + while (p) { next_proc = TAILQ_NEXT(p, p_memstat_list); if (isApp(p)) { @@ -450,11 +446,11 @@ sysctl_set_jetsam_aging_policy SYSCTL_HANDLER_ARGS proc_list_unlock(); - return (0); + return 0; } -SYSCTL_PROC(_kern, OID_AUTO, set_jetsam_aging_policy, CTLTYPE_INT|CTLFLAG_RW, - 0, 0, sysctl_set_jetsam_aging_policy, "I", "Jetsam Aging Policy"); +SYSCTL_PROC(_kern, OID_AUTO, set_jetsam_aging_policy, CTLTYPE_INT | CTLFLAG_RW, + 0, 0, sysctl_set_jetsam_aging_policy, "I", "Jetsam Aging Policy"); #endif /*0*/ static int @@ -470,7 +466,7 @@ sysctl_jetsam_set_sysprocs_idle_delay_time SYSCTL_HANDLER_ARGS error = sysctl_io_number(req, old_time_in_secs, sizeof(int), &val, NULL); if (error || !req->newptr) { - return (error); + return error; } if ((val < 0) || (val > INT32_MAX)) { @@ -479,12 +475,12 @@ sysctl_jetsam_set_sysprocs_idle_delay_time SYSCTL_HANDLER_ARGS } nanoseconds_to_absolutetime((uint64_t)val * NSEC_PER_SEC, &memorystatus_sysprocs_idle_delay_time); - - return(0); + + return 0; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_sysprocs_idle_delay_time, CTLTYPE_INT|CTLFLAG_RW, - 0, 0, sysctl_jetsam_set_sysprocs_idle_delay_time, "I", "Aging window for system processes"); +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_sysprocs_idle_delay_time, CTLTYPE_INT | CTLFLAG_RW, + 0, 0, sysctl_jetsam_set_sysprocs_idle_delay_time, "I", "Aging window for system processes"); static int @@ -500,7 +496,7 @@ sysctl_jetsam_set_apps_idle_delay_time SYSCTL_HANDLER_ARGS error = sysctl_io_number(req, old_time_in_secs, sizeof(int), &val, NULL); if (error || !req->newptr) { - return (error); + return error; } if ((val < 0) || (val > INT32_MAX)) { @@ -509,36 +505,36 @@ sysctl_jetsam_set_apps_idle_delay_time SYSCTL_HANDLER_ARGS } nanoseconds_to_absolutetime((uint64_t)val * NSEC_PER_SEC, &memorystatus_apps_idle_delay_time); - - return(0); + + return 0; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_apps_idle_delay_time, CTLTYPE_INT|CTLFLAG_RW, - 0, 0, sysctl_jetsam_set_apps_idle_delay_time, "I", "Aging window for applications"); +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_apps_idle_delay_time, CTLTYPE_INT | CTLFLAG_RW, + 0, 0, sysctl_jetsam_set_apps_idle_delay_time, "I", "Aging window for applications"); -SYSCTL_INT(_kern, OID_AUTO, jetsam_aging_policy, CTLTYPE_INT|CTLFLAG_RD, &jetsam_aging_policy, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, jetsam_aging_policy, CTLTYPE_INT | CTLFLAG_RD, &jetsam_aging_policy, 0, ""); static unsigned int memorystatus_dirty_count = 0; -SYSCTL_INT(_kern, OID_AUTO, max_task_pmem, CTLFLAG_RD|CTLFLAG_LOCKED|CTLFLAG_MASKED, &max_task_footprint_mb, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, max_task_pmem, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, &max_task_footprint_mb, 0, ""); #if CONFIG_EMBEDDED -SYSCTL_INT(_kern, OID_AUTO, memorystatus_level, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_level, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, memorystatus_level, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_level, 0, ""); #endif /* CONFIG_EMBEDDED */ int memorystatus_get_level(__unused struct proc *p, struct memorystatus_get_level_args *args, __unused int *ret) { - user_addr_t level = 0; - + user_addr_t level = 0; + level = args->level; - + if (copyout(&memorystatus_level, level, sizeof(memorystatus_level)) != 0) { return EFAULT; } - + return 0; } @@ -571,9 +567,9 @@ static boolean_t memorystatus_idle_snapshot = 0; unsigned int memorystatus_delta = 0; /* Jetsam Loop Detection */ -static boolean_t memorystatus_jld_enabled = FALSE; /* Enable jetsam loop detection */ -static uint32_t memorystatus_jld_eval_period_msecs = 0; /* Init pass sets this based on device memory size */ -static int memorystatus_jld_eval_aggressive_count = 3; /* Raise the priority max after 'n' aggressive loops */ +static boolean_t memorystatus_jld_enabled = FALSE; /* Enable jetsam loop detection */ +static uint32_t memorystatus_jld_eval_period_msecs = 0; /* Init pass sets this based on device memory size */ +static int memorystatus_jld_eval_aggressive_count = 3; /* Raise the priority max after 'n' aggressive loops */ static int memorystatus_jld_eval_aggressive_priority_band_max = 15; /* Kill aggressively up through this band */ /* @@ -582,25 +578,25 @@ static int memorystatus_jld_eval_aggressive_priority_band_max = 15; /* Kil * * RESTRICTIONS: * - Such a request is respected/acknowledged only once while that 'requesting' app is in the FG band i.e. if aggressive jetsam was - * needed and the 'lenient' mode was deployed then that's it for this special mode while the app is in the FG band. + * needed and the 'lenient' mode was deployed then that's it for this special mode while the app is in the FG band. * * - If the app is still in the FG band and aggressive jetsam is needed again, there will be no stop-and-check the next time around. * * - Also, the transition of the 'requesting' app away from the FG band will void this special behavior. */ -#define AGGRESSIVE_JETSAM_LENIENT_MODE_THRESHOLD 25 -boolean_t memorystatus_aggressive_jetsam_lenient_allowed = FALSE; -boolean_t memorystatus_aggressive_jetsam_lenient = FALSE; +#define AGGRESSIVE_JETSAM_LENIENT_MODE_THRESHOLD 25 +boolean_t memorystatus_aggressive_jetsam_lenient_allowed = FALSE; +boolean_t memorystatus_aggressive_jetsam_lenient = FALSE; #if DEVELOPMENT || DEBUG -/* +/* * Jetsam Loop Detection tunables. */ -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_period_msecs, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_jld_eval_period_msecs, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_aggressive_count, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_jld_eval_aggressive_count, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_aggressive_priority_band_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_jld_eval_aggressive_priority_band_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_period_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_eval_period_msecs, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_aggressive_count, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_eval_aggressive_count, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jld_eval_aggressive_priority_band_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jld_eval_aggressive_priority_band_max, 0, ""); #endif /* DEVELOPMENT || DEBUG */ static uint32_t kill_under_pressure_cause = 0; @@ -631,10 +627,10 @@ static void memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t static void memorystatus_clear_errors(void); static void memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *max_footprint_lifetime, uint32_t *purgeable_pages); static void memorystatus_get_task_phys_footprint_page_counts(task_t task, - uint64_t *internal_pages, uint64_t *internal_compressed_pages, - uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, - uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, - uint64_t *iokit_mapped_pages, uint64_t *page_table_pages); + uint64_t *internal_pages, uint64_t *internal_compressed_pages, + uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, + uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, + uint64_t *iokit_mapped_pages, uint64_t *page_table_pages); static void memorystatus_get_task_memory_region_count(task_t task, uint64_t *count); @@ -668,7 +664,7 @@ extern unsigned int vm_page_throttled_count; extern unsigned int vm_page_purgeable_count; extern unsigned int vm_page_wire_count; #if CONFIG_SECLUDED_MEMORY -extern unsigned int vm_page_secluded_count; +extern unsigned int vm_page_secluded_count; #endif /* CONFIG_SECLUDED_MEMORY */ #if CONFIG_JETSAM @@ -716,7 +712,7 @@ unsigned int memorystatus_refreeze_eligible_count = 0; /* # of processes current #if VM_PRESSURE_EVENTS -boolean_t memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t exceeded); +boolean_t memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t exceeded); vm_pressure_level_t memorystatus_vm_pressure_level = kVMPressureNormal; @@ -740,15 +736,15 @@ static int memorystatus_send_note(int event_code, void *data, size_t data_length * This value is the threshold that a process must meet to be considered for scavenging. */ #if CONFIG_EMBEDDED -#define VM_PRESSURE_MINIMUM_RSIZE 6 /* MB */ +#define VM_PRESSURE_MINIMUM_RSIZE 6 /* MB */ #else /* CONFIG_EMBEDDED */ -#define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ +#define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */ #endif /* CONFIG_EMBEDDED */ uint32_t vm_pressure_task_footprint_min = VM_PRESSURE_MINIMUM_RSIZE; #if DEVELOPMENT || DEBUG -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_vm_pressure_task_footprint_min, CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pressure_task_footprint_min, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_vm_pressure_task_footprint_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pressure_task_footprint_min, 0, ""); #endif /* DEVELOPMENT || DEBUG */ #endif /* VM_PRESSURE_EVENTS */ @@ -804,7 +800,7 @@ static unsigned int memorystatus_freeze_pages_max = 0; static unsigned int memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT; static unsigned int memorystatus_freeze_daily_mb_max = FREEZE_DAILY_MB_MAX_DEFAULT; -static uint64_t memorystatus_freeze_budget_pages_remaining = 0; //remaining # of pages that can be frozen to disk +static uint64_t memorystatus_freeze_budget_pages_remaining = 0; //remaining # of pages that can be frozen to disk static boolean_t memorystatus_freeze_degradation = FALSE; //protected by the freezer mutex. Signals we are in a degraded freeze mode. static unsigned int memorystatus_max_frozen_demotions_daily = 0; @@ -814,8 +810,8 @@ static unsigned int memorystatus_thaw_count_demotion_threshold = 0; static uint64_t memorystatus_freeze_pageouts = 0; /* Throttling */ -#define DEGRADED_WINDOW_MINS (30) -#define NORMAL_WINDOW_MINS (24 * 60) +#define DEGRADED_WINDOW_MINS (30) +#define NORMAL_WINDOW_MINS (24 * 60) static throttle_interval_t throttle_intervals[] = { { DEGRADED_WINDOW_MINS, 1, 0, 0, { 0, 0 }}, @@ -831,10 +827,10 @@ static void memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed); static uint64_t memorystatus_freezer_thread_next_run_ts = 0; -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_frozen_count, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_thaw_count, 0, ""); -SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_pageouts, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_pageouts, ""); -SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_budget_pages_remaining, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_budget_pages_remaining, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_frozen_count, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_thaw_count, 0, ""); +SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_pageouts, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freeze_pageouts, ""); +SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_budget_pages_remaining, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freeze_budget_pages_remaining, ""); #endif /* CONFIG_FREEZE */ @@ -847,7 +843,7 @@ extern struct knote *vm_find_knote_from_pid(pid_t, struct klist *); static unsigned int memorystatus_debug_dump_this_bucket = 0; static void -memorystatus_debug_dump_bucket_locked (unsigned int bucket_index) +memorystatus_debug_dump_bucket_locked(unsigned int bucket_index) { proc_t p = NULL; uint64_t bytes = 0; @@ -855,10 +851,10 @@ memorystatus_debug_dump_bucket_locked (unsigned int bucket_index) unsigned int b = bucket_index; boolean_t traverse_all_buckets = FALSE; - if (bucket_index >= MEMSTAT_BUCKET_COUNT) { + if (bucket_index >= MEMSTAT_BUCKET_COUNT) { traverse_all_buckets = TRUE; b = 0; - } else { + } else { traverse_all_buckets = FALSE; b = bucket_index; } @@ -873,49 +869,49 @@ memorystatus_debug_dump_bucket_locked (unsigned int bucket_index) * F==Fatal, NF==NonFatal */ - printf("memorystatus_debug_dump ***START*(PAGE_SIZE_64=%llu)**\n", PAGE_SIZE_64); + printf("memorystatus_debug_dump ***START*(PAGE_SIZE_64=%llu)**\n", PAGE_SIZE_64); printf("bucket [pid] [pages / MB] [state] [EP / RP] dirty deadline [L-limit / C-limit / A-limit / IA-limit] name\n"); p = memorystatus_get_first_proc_locked(&b, traverse_all_buckets); while (p) { bytes = get_task_phys_footprint(p->task); task_get_phys_footprint_limit(p->task, &ledger_limit); printf("%2d [%5d] [%5lld /%3lldMB] 0x%-8x [%2d / %2d] 0x%-3x %10lld [%3d / %3d%s / %3d%s / %3d%s] %s\n", - b, p->p_pid, - (bytes / PAGE_SIZE_64), /* task's footprint converted from bytes to pages */ - (bytes / (1024ULL * 1024ULL)), /* task's footprint converted from bytes to MB */ - p->p_memstat_state, p->p_memstat_effectivepriority, p->p_memstat_requestedpriority, p->p_memstat_dirty, p->p_memstat_idledeadline, - ledger_limit, - p->p_memstat_memlimit, - (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), - p->p_memstat_memlimit_active, - (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL ? "F " : "NF"), - p->p_memstat_memlimit_inactive, - (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL ? "F " : "NF"), - (*p->p_name ? p->p_name : "unknown")); + b, p->p_pid, + (bytes / PAGE_SIZE_64), /* task's footprint converted from bytes to pages */ + (bytes / (1024ULL * 1024ULL)), /* task's footprint converted from bytes to MB */ + p->p_memstat_state, p->p_memstat_effectivepriority, p->p_memstat_requestedpriority, p->p_memstat_dirty, p->p_memstat_idledeadline, + ledger_limit, + p->p_memstat_memlimit, + (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), + p->p_memstat_memlimit_active, + (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL ? "F " : "NF"), + p->p_memstat_memlimit_inactive, + (p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL ? "F " : "NF"), + (*p->p_name ? p->p_name : "unknown")); p = memorystatus_get_next_proc_locked(&b, p, traverse_all_buckets); - } - printf("memorystatus_debug_dump ***END***\n"); + } + printf("memorystatus_debug_dump ***END***\n"); } static int sysctl_memorystatus_debug_dump_bucket SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) - int bucket_index = 0; - int error; + int bucket_index = 0; + int error; error = SYSCTL_OUT(req, arg1, sizeof(int)); if (error || !req->newptr) { - return (error); + return error; + } + error = SYSCTL_IN(req, &bucket_index, sizeof(int)); + if (error || !req->newptr) { + return error; } - error = SYSCTL_IN(req, &bucket_index, sizeof(int)); - if (error || !req->newptr) { - return (error); - } if (bucket_index >= MEMSTAT_BUCKET_COUNT) { /* * All jetsam buckets will be dumped. */ - } else { + } else { /* * Only a single bucket will be dumped. */ @@ -925,17 +921,17 @@ sysctl_memorystatus_debug_dump_bucket SYSCTL_HANDLER_ARGS memorystatus_debug_dump_bucket_locked(bucket_index); proc_list_unlock(); memorystatus_debug_dump_this_bucket = bucket_index; - return (error); + return error; } /* * Debug aid to look at jetsam buckets and proc jetsam fields. * Use this sysctl to act on a particular jetsam bucket. * Writing the sysctl triggers the dump. - * Usage: sysctl kern.memorystatus_debug_dump_this_bucket= + * Usage: sysctl kern.memorystatus_debug_dump_this_bucket= */ -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_debug_dump_this_bucket, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_debug_dump_this_bucket, 0, sysctl_memorystatus_debug_dump_bucket, "I", ""); +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_debug_dump_this_bucket, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_debug_dump_this_bucket, 0, sysctl_memorystatus_debug_dump_bucket, "I", ""); /* Debug aid to aid determination of limit */ @@ -947,17 +943,17 @@ sysctl_memorystatus_highwater_enable SYSCTL_HANDLER_ARGS proc_t p; unsigned int b = 0; int error, enable = 0; - boolean_t use_active; /* use the active limit and active limit attributes */ + boolean_t use_active; /* use the active limit and active limit attributes */ boolean_t is_fatal; error = SYSCTL_OUT(req, arg1, sizeof(int)); if (error || !req->newptr) { - return (error); + return error; } error = SYSCTL_IN(req, &enable, sizeof(int)); if (error || !req->newptr) { - return (error); + return error; } if (!(enable == 0 || enable == 1)) { @@ -971,13 +967,11 @@ sysctl_memorystatus_highwater_enable SYSCTL_HANDLER_ARGS use_active = proc_jetsam_state_is_active_locked(p); if (enable) { - if (use_active == TRUE) { CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); } else { CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); } - } else { /* * Disabling limits does not touch the stored variants. @@ -995,16 +989,15 @@ sysctl_memorystatus_highwater_enable SYSCTL_HANDLER_ARGS p = memorystatus_get_next_proc_locked(&b, p, TRUE); } - + memorystatus_highwater_enabled = enable; proc_list_unlock(); return 0; - } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_highwater_enabled, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_highwater_enabled, 0, sysctl_memorystatus_highwater_enable, "I", ""); +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_highwater_enabled, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_highwater_enabled, 0, sysctl_memorystatus_highwater_enable, "I", ""); #if VM_PRESSURE_EVENTS @@ -1012,7 +1005,7 @@ SYSCTL_PROC(_kern, OID_AUTO, memorystatus_highwater_enabled, CTLTYPE_INT|CTLFLAG * This routine is used for targeted notifications regardless of system memory pressure * and regardless of whether or not the process has already been notified. * It bypasses and has no effect on the only-one-notification per soft-limit policy. - * + * * "memnote" is the current user. */ @@ -1024,12 +1017,13 @@ sysctl_memorystatus_vm_pressure_send SYSCTL_HANDLER_ARGS int error = 0, pid = 0; struct knote *kn = NULL; boolean_t found_knote = FALSE; - int fflags = 0; /* filter flags for EVFILT_MEMORYSTATUS */ + int fflags = 0; /* filter flags for EVFILT_MEMORYSTATUS */ uint64_t value = 0; error = sysctl_handle_quad(oidp, &value, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } /* * Find the pid in the low 32 bits of value passed in. @@ -1053,18 +1047,17 @@ sysctl_memorystatus_vm_pressure_send SYSCTL_HANDLER_ARGS /* * See event.h ... fflags for EVFILT_MEMORYSTATUS */ - if (!((fflags == NOTE_MEMORYSTATUS_PRESSURE_NORMAL)|| - (fflags == NOTE_MEMORYSTATUS_PRESSURE_WARN) || - (fflags == NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) || - (fflags == NOTE_MEMORYSTATUS_LOW_SWAP) || - (fflags == NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || - (fflags == NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || - (((fflags & NOTE_MEMORYSTATUS_MSL_STATUS) != 0 && - ((fflags & ~NOTE_MEMORYSTATUS_MSL_STATUS) == 0))))) { - + if (!((fflags == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) || + (fflags == NOTE_MEMORYSTATUS_PRESSURE_WARN) || + (fflags == NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) || + (fflags == NOTE_MEMORYSTATUS_LOW_SWAP) || + (fflags == NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) || + (fflags == NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) || + (((fflags & NOTE_MEMORYSTATUS_MSL_STATUS) != 0 && + ((fflags & ~NOTE_MEMORYSTATUS_MSL_STATUS) == 0))))) { printf("memorystatus_vm_pressure_send: notification [0x%x] not supported \n", fflags); error = 1; - return (error); + return error; } /* @@ -1097,20 +1090,20 @@ sysctl_memorystatus_vm_pressure_send SYSCTL_HANDLER_ARGS memorystatus_klist_unlock(); - return (error); + return error; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_send, CTLTYPE_QUAD|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_send, CTLTYPE_QUAD | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorystatus_vm_pressure_send, "Q", ""); #endif /* VM_PRESSURE_EVENTS */ -SYSCTL_INT(_kern, OID_AUTO, memorystatus_idle_snapshot, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_idle_snapshot, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, memorystatus_idle_snapshot, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_idle_snapshot, 0, ""); #if CONFIG_JETSAM -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_available_pages_critical, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_base, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_available_pages_critical_base, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_idle_offset, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_available_pages_critical_idle_offset, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_available_pages_critical, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_base, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_available_pages_critical_base, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_idle_offset, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_available_pages_critical_idle_offset, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, memorystatus_policy_more_free_offset_pages, CTLFLAG_RW, &memorystatus_policy_more_free_offset_pages, 0, ""); static unsigned int memorystatus_jetsam_panic_debug = 0; @@ -1119,7 +1112,7 @@ static unsigned int memorystatus_jetsam_policy_offset_pages_diagnostic = 0; /* Diagnostic code */ enum { - kJetsamDiagnosticModeNone = 0, + kJetsamDiagnosticModeNone = 0, kJetsamDiagnosticModeAll = 1, kJetsamDiagnosticModeStopAtFirstActive = 2, kJetsamDiagnosticModeCount @@ -1135,27 +1128,28 @@ sysctl_jetsam_diagnostic_mode SYSCTL_HANDLER_ARGS const char *diagnosticStrings[] = { "jetsam: diagnostic mode: resetting critical level.", "jetsam: diagnostic mode: will examine all processes", - "jetsam: diagnostic mode: will stop at first active process" + "jetsam: diagnostic mode: will stop at first active process" }; - + int error, val = jetsam_diagnostic_mode; boolean_t changed = FALSE; error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } if ((val < 0) || (val >= kJetsamDiagnosticModeCount)) { printf("jetsam: diagnostic mode: invalid value - %d\n", val); return EINVAL; } - + proc_list_lock(); - + if ((unsigned int) val != jetsam_diagnostic_mode) { jetsam_diagnostic_mode = val; memorystatus_jetsam_policy &= ~kPolicyDiagnoseActive; - + switch (jetsam_diagnostic_mode) { case kJetsamDiagnosticModeNone: /* Already cleared */ @@ -1170,28 +1164,28 @@ sysctl_jetsam_diagnostic_mode SYSCTL_HANDLER_ARGS /* Already validated */ break; } - + memorystatus_update_levels_locked(FALSE); changed = TRUE; } - + proc_list_unlock(); - + if (changed) { printf("%s\n", diagnosticStrings[val]); } - - return (0); + + return 0; } -SYSCTL_PROC(_debug, OID_AUTO, jetsam_diagnostic_mode, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED|CTLFLAG_ANYBODY, - &jetsam_diagnostic_mode, 0, sysctl_jetsam_diagnostic_mode, "I", "Jetsam Diagnostic Mode"); +SYSCTL_PROC(_debug, OID_AUTO, jetsam_diagnostic_mode, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, + &jetsam_diagnostic_mode, 0, sysctl_jetsam_diagnostic_mode, "I", "Jetsam Diagnostic Mode"); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jetsam_policy_offset_pages_diagnostic, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_jetsam_policy_offset_pages_diagnostic, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jetsam_policy_offset_pages_diagnostic, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_jetsam_policy_offset_pages_diagnostic, 0, ""); #if VM_PRESSURE_EVENTS -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_pressure, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_available_pages_pressure, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_pressure, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_available_pages_pressure, 0, ""); #endif /* VM_PRESSURE_EVENTS */ @@ -1199,17 +1193,17 @@ SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_pressure, CTLFLAG_RW|C #if CONFIG_FREEZE -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_jetsam_band, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_jetsam_band, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_daily_mb_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_daily_mb_max, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_degraded_mode, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_freeze_degradation, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_jetsam_band, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_jetsam_band, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_daily_mb_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_daily_mb_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_degraded_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freeze_degradation, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_threshold, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_threshold, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_threshold, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_pages_min, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_pages_min, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_pages_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_pages_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_pages_min, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_pages_min, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_pages_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_pages_max, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_refreeze_eligible_count, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_refreeze_eligible_count, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_processes_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_frozen_processes_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_refreeze_eligible_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_refreeze_eligible_count, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_processes_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_frozen_processes_max, 0, ""); /* * Max. shared-anonymous memory in MB that can be held by frozen processes in the high jetsam band. @@ -1217,28 +1211,35 @@ SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_processes_max, CTLFLAG_RW|CTLFL * Default is 10% of system-wide task limit. */ -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_frozen_shared_mb_max, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_frozen_shared_mb, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_frozen_shared_mb_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_frozen_shared_mb, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb_per_process_max, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_shared_mb_per_process_max, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_private_shared_pages_ratio, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_private_shared_pages_ratio, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_shared_mb_per_process_max, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_shared_mb_per_process_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_private_shared_pages_ratio, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_private_shared_pages_ratio, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_min_processes, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_suspended_threshold, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_min_processes, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_suspended_threshold, 0, ""); /* * max. # of frozen process demotions we will allow in our daily cycle. */ -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_max_freeze_demotions_daily, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_max_frozen_demotions_daily, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_max_freeze_demotions_daily, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_max_frozen_demotions_daily, 0, ""); /* * min # of thaws needed by a process to protect it from getting demoted into the IDLE band. */ -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count_demotion_threshold, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_thaw_count_demotion_threshold, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_thaw_count_demotion_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_thaw_count_demotion_threshold, 0, ""); boolean_t memorystatus_freeze_throttle_enabled = TRUE; -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_throttle_enabled, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_throttle_enabled, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_throttle_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_throttle_enabled, 0, ""); -#define VM_PAGES_FOR_ALL_PROCS (2) -/* +/* + * When set to true, this keeps frozen processes in the compressor pool in memory, instead of swapping them out to disk. + * Exposed via the sysctl kern.memorystatus_freeze_to_memory. + */ +boolean_t memorystatus_freeze_to_memory = FALSE; +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_to_memory, CTLFLAG_RW | CTLFLAG_LOCKED, &memorystatus_freeze_to_memory, 0, ""); + +#define VM_PAGES_FOR_ALL_PROCS (2) +/* * Manual trigger of freeze and thaw for dev / debug kernels only. */ static int @@ -1255,8 +1256,9 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS } error = sysctl_handle_int(oidp, &pid, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } if (pid == VM_PAGES_FOR_ALL_PROCS) { vm_pageout_anonymous_pages(); @@ -1276,6 +1278,11 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS * Freezer backed by the compressor and swap file(s) * will hold compressed data. * + * Set the sysctl kern.memorystatus_freeze_to_memory to true to keep compressed data from + * being swapped out to disk. Note that this disables freezer swap support globally, + * not just for the process being frozen. + * + * * We don't care about the global freezer budget or the process's (min/max) budget here. * The freeze sysctl is meant to force-freeze a process. * @@ -1284,7 +1291,6 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS * P_MEMSTAT_FROZEN bit, and elevate the process to a higher band (if the freezer is active). */ max_pages = memorystatus_freeze_pages_max; - } else { /* * We only have the compressor without any swap. @@ -1303,9 +1309,9 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS */ if (state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED)) { printf("sysctl_freeze: p_memstat_state check failed, process is%s%s%s\n", - (state & P_MEMSTAT_TERMINATED) ? " terminated" : "", - (state & P_MEMSTAT_LOCKED) ? " locked" : "", - (state & P_MEMSTAT_FREEZE_DISABLED) ? " unfreezable" : ""); + (state & P_MEMSTAT_TERMINATED) ? " terminated" : "", + (state & P_MEMSTAT_LOCKED) ? " locked" : "", + (state & P_MEMSTAT_FREEZE_DISABLED) ? " unfreezable" : ""); proc_rele(p); lck_mtx_unlock(&freezer_mutex); @@ -1331,7 +1337,7 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { strlcpy(reason, "no swap space", 128); } - + printf("sysctl_freeze: task_freeze failed: %s\n", reason); if (error == KERN_NO_SPACE) { @@ -1356,7 +1362,7 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS * We elevate only if we are going to swap out the data. */ error = memorystatus_update_inactive_jetsam_priority_band(pid, MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE, - memorystatus_freeze_jetsam_band, TRUE); + memorystatus_freeze_jetsam_band, TRUE); if (error) { printf("sysctl_freeze: Elevating frozen process to higher jetsam band failed with %d\n", error); @@ -1377,7 +1383,7 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS return EINVAL; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_freeze, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_freeze, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorystatus_freeze, "I", ""); static int @@ -1393,8 +1399,9 @@ sysctl_memorystatus_available_pages_thaw SYSCTL_HANDLER_ARGS } error = sysctl_handle_int(oidp, &pid, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } if (pid == VM_PAGES_FOR_ALL_PROCS) { do_fastwake_warmup_all(); @@ -1427,41 +1434,41 @@ sysctl_memorystatus_available_pages_thaw SYSCTL_HANDLER_ARGS return EINVAL; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_thaw, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_thaw, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorystatus_available_pages_thaw, "I", ""); -typedef struct _global_freezable_status{ +typedef struct _global_freezable_status { boolean_t freeze_pages_threshold_crossed; boolean_t freeze_eligible_procs_available; boolean_t freeze_scheduled_in_future; }global_freezable_status_t; -typedef struct _proc_freezable_status{ - boolean_t freeze_has_memstat_state; - boolean_t freeze_has_pages_min; - int freeze_has_probability; - boolean_t freeze_attempted; - uint32_t p_memstat_state; - uint32_t p_pages; - int p_freeze_error_code; - int p_pid; - char p_name[MAXCOMLEN + 1]; +typedef struct _proc_freezable_status { + boolean_t freeze_has_memstat_state; + boolean_t freeze_has_pages_min; + int freeze_has_probability; + boolean_t freeze_attempted; + uint32_t p_memstat_state; + uint32_t p_pages; + int p_freeze_error_code; + int p_pid; + char p_name[MAXCOMLEN + 1]; }proc_freezable_status_t; #define MAX_FREEZABLE_PROCESSES 100 static int -memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t *retval) +memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t *retval) { - uint32_t proc_count = 0, i = 0; - global_freezable_status_t *list_head; - proc_freezable_status_t *list_entry; - size_t list_size = 0; - proc_t p; - memstat_bucket_t *bucket; - uint32_t state = 0, pages = 0, entry_count = 0; - boolean_t try_freeze = TRUE; - int error = 0, probability_of_use = 0; + uint32_t proc_count = 0, i = 0; + global_freezable_status_t *list_head; + proc_freezable_status_t *list_entry; + size_t list_size = 0; + proc_t p; + memstat_bucket_t *bucket; + uint32_t state = 0, pages = 0, entry_count = 0; + boolean_t try_freeze = TRUE; + int error = 0, probability_of_use = 0; if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE == FALSE) { @@ -1474,7 +1481,7 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t return EINVAL; } - list_head = (global_freezable_status_t*)kalloc(list_size); + list_head = (global_freezable_status_t*)kalloc(list_size); if (list_head == NULL) { return ENOMEM; } @@ -1494,16 +1501,15 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t list_entry = (proc_freezable_status_t*) ((uintptr_t)list_head + sizeof(global_freezable_status_t)); bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE]; - + entry_count = (memorystatus_global_probabilities_size / sizeof(memorystatus_internal_probabilities_t)); p = memorystatus_get_first_proc_locked(&i, FALSE); proc_count++; while ((proc_count <= MAX_FREEZABLE_PROCESSES) && - (p) && - (list_size < buffer_size)) { - + (p) && + (list_size < buffer_size)) { if (isApp(p) == FALSE) { p = memorystatus_get_next_proc_locked(&i, p, FALSE); proc_count++; @@ -1513,12 +1519,11 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t strlcpy(list_entry->p_name, p->p_name, MAXCOMLEN + 1); list_entry->p_pid = p->p_pid; - + state = p->p_memstat_state; if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) || - !(state & P_MEMSTAT_SUSPENDED)) { - + !(state & P_MEMSTAT_SUSPENDED)) { try_freeze = list_entry->freeze_has_memstat_state = FALSE; } else { try_freeze = list_entry->freeze_has_memstat_state = TRUE; @@ -1537,14 +1542,13 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t } list_entry->p_pages = pages; - + if (entry_count) { uint32_t j = 0; - for (j = 0; j < entry_count; j++ ) { + for (j = 0; j < entry_count; j++) { if (strncmp(memorystatus_global_probabilities_table[j].proc_name, - p->p_name, - MAXCOMLEN + 1) == 0) { - + p->p_name, + MAXCOMLEN + 1) == 0) { probability_of_use = memorystatus_global_probabilities_table[j].use_probability; break; } @@ -1565,7 +1569,6 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t } if (try_freeze) { - uint32_t purgeable, wired, clean, dirty, shared; uint32_t max_pages = 0; int freezer_error_code = 0; @@ -1582,11 +1585,11 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t list_entry++; list_size += sizeof(proc_freezable_status_t); - + p = memorystatus_get_next_proc_locked(&i, p, FALSE); proc_count++; } - + proc_list_unlock(); buffer_size = list_size; @@ -1602,7 +1605,7 @@ memorystatus_freezer_get_status(user_addr_t buffer, size_t buffer_size, int32_t kfree(list_head, list_size); MEMORYSTATUS_DEBUG(1, "memorystatus_freezer_get_status: returning %d (%lu - size)\n", error, (unsigned long)*list_size); - + return error; } @@ -1623,9 +1626,9 @@ memorystatus_freezer_control(int32_t flags, user_addr_t buffer, size_t buffer_si #endif /* DEVELOPMENT || DEBUG */ extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, - void *parameter, - integer_t priority, - thread_t *new_thread); + void *parameter, + integer_t priority, + thread_t *new_thread); #if DEVELOPMENT || DEBUG @@ -1633,12 +1636,13 @@ static int sysctl_memorystatus_disconnect_page_mappings SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) - int error = 0, pid = 0; - proc_t p; + int error = 0, pid = 0; + proc_t p; error = sysctl_handle_int(oidp, &pid, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } lck_mtx_lock(&disconnect_page_mappings_mutex); @@ -1652,17 +1656,19 @@ sysctl_memorystatus_disconnect_page_mappings SYSCTL_HANDLER_ARGS proc_rele(p); - if (error) + if (error) { error = EIO; - } else + } + } else { error = EINVAL; + } } lck_mtx_unlock(&disconnect_page_mappings_mutex); return error; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_disconnect_page_mappings, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_disconnect_page_mappings, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorystatus_disconnect_page_mappings, "I", ""); #endif /* DEVELOPMENT || DEBUG */ @@ -1677,41 +1683,42 @@ SYSCTL_PROC(_kern, OID_AUTO, memorystatus_disconnect_page_mappings, CTLTYPE_INT| * Currently sort_order is only meaningful when handling * coalitions. * - * Return: + * Return: * 0 on success - * non-0 on failure + * non-0 on failure */ -static int memorystatus_sort_bucket(unsigned int bucket_index, int sort_order) +static int +memorystatus_sort_bucket(unsigned int bucket_index, int sort_order) { int coal_sort_order; /* * Verify the jetsam priority */ - if (bucket_index >= MEMSTAT_BUCKET_COUNT) { - return(EINVAL); - } + if (bucket_index >= MEMSTAT_BUCKET_COUNT) { + return EINVAL; + } #if DEVELOPMENT || DEBUG - if (sort_order == JETSAM_SORT_DEFAULT) { + if (sort_order == JETSAM_SORT_DEFAULT) { coal_sort_order = COALITION_SORT_DEFAULT; } else { - coal_sort_order = sort_order; /* only used for testing scenarios */ + coal_sort_order = sort_order; /* only used for testing scenarios */ } #else /* Verify default */ - if (sort_order == JETSAM_SORT_DEFAULT) { + if (sort_order == JETSAM_SORT_DEFAULT) { coal_sort_order = COALITION_SORT_DEFAULT; } else { - return(EINVAL); + return EINVAL; } #endif proc_list_lock(); - + if (memstat_bucket[bucket_index].count == 0) { proc_list_unlock(); - return (0); + return 0; } switch (bucket_index) { @@ -1728,25 +1735,26 @@ static int memorystatus_sort_bucket(unsigned int bucket_index, int sort_order) break; } proc_list_unlock(); - - return(0); + + return 0; } /* * Sort processes by size for a single jetsam bucket. */ -static void memorystatus_sort_by_largest_process_locked(unsigned int bucket_index) +static void +memorystatus_sort_by_largest_process_locked(unsigned int bucket_index) { proc_t p = NULL, insert_after_proc = NULL, max_proc = NULL; proc_t next_p = NULL, prev_max_proc = NULL; uint32_t pages = 0, max_pages = 0; memstat_bucket_t *current_bucket; - + if (bucket_index >= MEMSTAT_BUCKET_COUNT) { return; } - + current_bucket = &memstat_bucket[bucket_index]; p = TAILQ_FIRST(¤t_bucket->list); @@ -1756,10 +1764,10 @@ static void memorystatus_sort_by_largest_process_locked(unsigned int bucket_inde max_pages = pages; max_proc = p; prev_max_proc = p; - + while ((next_p = TAILQ_NEXT(p, p_memstat_list)) != NULL) { /* traversing list until we find next largest process */ - p=next_p; + p = next_p; memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL); if (pages > max_pages) { max_pages = pages; @@ -1784,7 +1792,9 @@ static void memorystatus_sort_by_largest_process_locked(unsigned int bucket_inde } } -static proc_t memorystatus_get_first_proc_locked(unsigned int *bucket_index, boolean_t search) { +static proc_t +memorystatus_get_first_proc_locked(unsigned int *bucket_index, boolean_t search) +{ memstat_bucket_t *current_bucket; proc_t next_p; @@ -1800,14 +1810,16 @@ static proc_t memorystatus_get_first_proc_locked(unsigned int *bucket_index, boo next_p = TAILQ_FIRST(¤t_bucket->list); } } - + return next_p; } -static proc_t memorystatus_get_next_proc_locked(unsigned int *bucket_index, proc_t p, boolean_t search) { +static proc_t +memorystatus_get_next_proc_locked(unsigned int *bucket_index, proc_t p, boolean_t search) +{ memstat_bucket_t *current_bucket; proc_t next_p; - + if (!p || ((*bucket_index) >= MEMSTAT_BUCKET_COUNT)) { return NULL; } @@ -1827,10 +1839,10 @@ static proc_t memorystatus_get_next_proc_locked(unsigned int *bucket_index, proc * unless parallel jetsam is enabled. */ struct jetsam_thread_state { - boolean_t inited; /* if the thread is initialized */ - int memorystatus_wakeup; /* wake channel */ - int index; /* jetsam thread index */ - thread_t thread; /* jetsam thread pointer */ + boolean_t inited; /* if the thread is initialized */ + int memorystatus_wakeup; /* wake channel */ + int index; /* jetsam thread index */ + thread_t thread; /* jetsam thread pointer */ } *jetsam_threads; /* Maximum number of jetsam threads allowed */ @@ -1856,8 +1868,9 @@ static inline struct jetsam_thread_state * jetsam_current_thread(void) { for (int thr_id = 0; thr_id < max_jetsam_threads; thr_id++) { - if (jetsam_threads[thr_id].thread == current_thread()) + if (jetsam_threads[thr_id].thread == current_thread()) { return &(jetsam_threads[thr_id]); + } } panic("jetsam_current_thread() is being called from a non-jetsam thread\n"); /* Contol should not reach here */ @@ -1891,9 +1904,9 @@ memorystatus_init(void) if (kill_on_no_paging_space == TRUE) { max_kill_priority = JETSAM_PRIORITY_MAX; } -#endif +#endif + - /* Init buckets */ for (i = 0; i < MEMSTAT_BUCKET_COUNT; i++) { TAILQ_INIT(&memstat_bucket[i].list); @@ -1904,7 +1917,7 @@ memorystatus_init(void) #if CONFIG_JETSAM nanoseconds_to_absolutetime((uint64_t)DEFERRED_IDLE_EXIT_TIME_SECS * NSEC_PER_SEC, &memorystatus_sysprocs_idle_delay_time); nanoseconds_to_absolutetime((uint64_t)DEFERRED_IDLE_EXIT_TIME_SECS * NSEC_PER_SEC, &memorystatus_apps_idle_delay_time); - + /* Apply overrides */ PE_get_default("kern.jetsam_delta", &delta_percentage, sizeof(delta_percentage)); if (delta_percentage == 0) { @@ -1919,13 +1932,11 @@ memorystatus_init(void) assert(pressure_threshold_percentage < 100); PE_get_default("kern.jetsam_freeze_threshold", &freeze_threshold_percentage, sizeof(freeze_threshold_percentage)); assert(freeze_threshold_percentage < 100); - - if (!PE_parse_boot_argn("jetsam_aging_policy", &jetsam_aging_policy, - sizeof (jetsam_aging_policy))) { + if (!PE_parse_boot_argn("jetsam_aging_policy", &jetsam_aging_policy, + sizeof(jetsam_aging_policy))) { if (!PE_get_default("kern.jetsam_aging_policy", &jetsam_aging_policy, - sizeof(jetsam_aging_policy))) { - + sizeof(jetsam_aging_policy))) { jetsam_aging_policy = kJetsamAgingPolicyLegacy; } } @@ -1935,34 +1946,33 @@ memorystatus_init(void) } switch (jetsam_aging_policy) { + case kJetsamAgingPolicyNone: + system_procs_aging_band = JETSAM_PRIORITY_IDLE; + applications_aging_band = JETSAM_PRIORITY_IDLE; + break; - case kJetsamAgingPolicyNone: - system_procs_aging_band = JETSAM_PRIORITY_IDLE; - applications_aging_band = JETSAM_PRIORITY_IDLE; - break; - - case kJetsamAgingPolicyLegacy: - /* - * Legacy behavior where some daemons get a 10s protection once - * AND only before the first clean->dirty->clean transition before - * going into IDLE band. - */ - system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; - applications_aging_band = JETSAM_PRIORITY_IDLE; - break; + case kJetsamAgingPolicyLegacy: + /* + * Legacy behavior where some daemons get a 10s protection once + * AND only before the first clean->dirty->clean transition before + * going into IDLE band. + */ + system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; + applications_aging_band = JETSAM_PRIORITY_IDLE; + break; - case kJetsamAgingPolicySysProcsReclaimedFirst: - system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; - applications_aging_band = JETSAM_PRIORITY_AGING_BAND2; - break; + case kJetsamAgingPolicySysProcsReclaimedFirst: + system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND1; + applications_aging_band = JETSAM_PRIORITY_AGING_BAND2; + break; - case kJetsamAgingPolicyAppsReclaimedFirst: - system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND2; - applications_aging_band = JETSAM_PRIORITY_AGING_BAND1; - break; + case kJetsamAgingPolicyAppsReclaimedFirst: + system_procs_aging_band = JETSAM_PRIORITY_AGING_BAND2; + applications_aging_band = JETSAM_PRIORITY_AGING_BAND1; + break; - default: - break; + default: + break; } /* @@ -1975,23 +1985,23 @@ memorystatus_init(void) assert(JETSAM_PRIORITY_ELEVATED_INACTIVE > applications_aging_band); /* Take snapshots for idle-exit kills by default? First check the boot-arg... */ - if (!PE_parse_boot_argn("jetsam_idle_snapshot", &memorystatus_idle_snapshot, sizeof (memorystatus_idle_snapshot))) { - /* ...no boot-arg, so check the device tree */ - PE_get_default("kern.jetsam_idle_snapshot", &memorystatus_idle_snapshot, sizeof(memorystatus_idle_snapshot)); + if (!PE_parse_boot_argn("jetsam_idle_snapshot", &memorystatus_idle_snapshot, sizeof(memorystatus_idle_snapshot))) { + /* ...no boot-arg, so check the device tree */ + PE_get_default("kern.jetsam_idle_snapshot", &memorystatus_idle_snapshot, sizeof(memorystatus_idle_snapshot)); } memorystatus_delta = delta_percentage * atop_64(max_mem) / 100; memorystatus_available_pages_critical_idle_offset = idle_offset_percentage * atop_64(max_mem) / 100; memorystatus_available_pages_critical_base = (critical_threshold_percentage / delta_percentage) * memorystatus_delta; memorystatus_policy_more_free_offset_pages = (policy_more_free_offset_percentage / delta_percentage) * memorystatus_delta; - + /* Jetsam Loop Detection */ if (max_mem <= (512 * 1024 * 1024)) { /* 512 MB devices */ - memorystatus_jld_eval_period_msecs = 8000; /* 8000 msecs == 8 second window */ + memorystatus_jld_eval_period_msecs = 8000; /* 8000 msecs == 8 second window */ } else { /* 1GB and larger devices */ - memorystatus_jld_eval_period_msecs = 6000; /* 6000 msecs == 6 second window */ + memorystatus_jld_eval_period_msecs = 6000; /* 6000 msecs == 6 second window */ } memorystatus_jld_enabled = TRUE; @@ -2004,16 +2014,16 @@ memorystatus_init(void) memorystatus_jetsam_snapshot_max = maxproc; memorystatus_jetsam_snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + - (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_max); + (sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_max); - memorystatus_jetsam_snapshot = - (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); + memorystatus_jetsam_snapshot = + (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); if (!memorystatus_jetsam_snapshot) { panic("Could not allocate memorystatus_jetsam_snapshot"); } memorystatus_jetsam_snapshot_copy = - (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); + (memorystatus_jetsam_snapshot_t*)kalloc(memorystatus_jetsam_snapshot_size); if (!memorystatus_jetsam_snapshot_copy) { panic("Could not allocate memorystatus_jetsam_snapshot_copy"); } @@ -2025,21 +2035,21 @@ memorystatus_init(void) #if CONFIG_FREEZE memorystatus_freeze_threshold = (freeze_threshold_percentage / delta_percentage) * memorystatus_delta; #endif - + /* Check the boot-arg to see if fast jetsam is allowed */ - if (!PE_parse_boot_argn("fast_jetsam_enabled", &fast_jetsam_enabled, sizeof (fast_jetsam_enabled))) { + if (!PE_parse_boot_argn("fast_jetsam_enabled", &fast_jetsam_enabled, sizeof(fast_jetsam_enabled))) { fast_jetsam_enabled = 0; } /* Check the boot-arg to configure the maximum number of jetsam threads */ - if (!PE_parse_boot_argn("max_jetsam_threads", &max_jetsam_threads, sizeof (max_jetsam_threads))) { + if (!PE_parse_boot_argn("max_jetsam_threads", &max_jetsam_threads, sizeof(max_jetsam_threads))) { max_jetsam_threads = JETSAM_THREADS_LIMIT; } /* Restrict the maximum number of jetsam threads to JETSAM_THREADS_LIMIT */ if (max_jetsam_threads > JETSAM_THREADS_LIMIT) { max_jetsam_threads = JETSAM_THREADS_LIMIT; - } + } /* For low CPU systems disable fast jetsam mechanism */ if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { @@ -2052,7 +2062,6 @@ memorystatus_init(void) /* Initialize all the jetsam threads */ for (i = 0; i < max_jetsam_threads; i++) { - result = kernel_thread_start_priority(memorystatus_thread, NULL, 95 /* MAXPRI_KERNEL */, &jetsam_threads[i].thread); if (result == KERN_SUCCESS) { jetsam_threads[i].inited = FALSE; @@ -2070,27 +2079,28 @@ vm_run_compactor(void); /* * The jetsam no frills kill call - * Return: 0 on success + * Return: 0 on success * error code on failure (EINVAL...) */ static int -jetsam_do_kill(proc_t p, int jetsam_flags, os_reason_t jetsam_reason) { +jetsam_do_kill(proc_t p, int jetsam_flags, os_reason_t jetsam_reason) +{ int error = 0; error = exit_with_reason(p, W_EXITCODE(0, SIGKILL), (int *)NULL, FALSE, FALSE, jetsam_flags, jetsam_reason); - return(error); + return error; } /* * Wrapper for processes exiting with memorystatus details */ static boolean_t -memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason) { - +memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason) +{ int error = 0; __unused pid_t victim_pid = p->p_pid; - KERNEL_DEBUG_CONSTANT( (BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)) | DBG_FUNC_START, - victim_pid, cause, vm_page_free_count, 0, 0); + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)) | DBG_FUNC_START, + victim_pid, cause, vm_page_free_count, 0, 0); DTRACE_MEMORYSTATUS3(memorystatus_do_kill, proc_t, p, os_reason_t, jetsam_reason, uint32_t, cause); #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) @@ -2103,9 +2113,9 @@ memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason) { if (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND) { printf("memorystatus: killing process %d [%s] in high band %s (%d) - memorystatus_available_pages: %llu\n", p->p_pid, - (*p->p_name ? p->p_name : "unknown"), - memorystatus_priority_band_name(p->p_memstat_effectivepriority), p->p_memstat_effectivepriority, - (uint64_t)memorystatus_available_pages); + (*p->p_name ? p->p_name : "unknown"), + memorystatus_priority_band_name(p->p_memstat_effectivepriority), p->p_memstat_effectivepriority, + (uint64_t)memorystatus_available_pages); } /* @@ -2114,23 +2124,23 @@ memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason) { */ int jetsam_flags = P_LTERM_JETSAM; switch (cause) { - case kMemorystatusKilledHiwat: jetsam_flags |= P_JETSAM_HIWAT; break; - case kMemorystatusKilledVnodes: jetsam_flags |= P_JETSAM_VNODE; break; - case kMemorystatusKilledVMPageShortage: jetsam_flags |= P_JETSAM_VMPAGESHORTAGE; break; - case kMemorystatusKilledVMCompressorThrashing: - case kMemorystatusKilledVMCompressorSpaceShortage: jetsam_flags |= P_JETSAM_VMTHRASHING; break; - case kMemorystatusKilledFCThrashing: jetsam_flags |= P_JETSAM_FCTHRASHING; break; - case kMemorystatusKilledPerProcessLimit: jetsam_flags |= P_JETSAM_PID; break; - case kMemorystatusKilledIdleExit: jetsam_flags |= P_JETSAM_IDLEEXIT; break; + case kMemorystatusKilledHiwat: jetsam_flags |= P_JETSAM_HIWAT; break; + case kMemorystatusKilledVnodes: jetsam_flags |= P_JETSAM_VNODE; break; + case kMemorystatusKilledVMPageShortage: jetsam_flags |= P_JETSAM_VMPAGESHORTAGE; break; + case kMemorystatusKilledVMCompressorThrashing: + case kMemorystatusKilledVMCompressorSpaceShortage: jetsam_flags |= P_JETSAM_VMTHRASHING; break; + case kMemorystatusKilledFCThrashing: jetsam_flags |= P_JETSAM_FCTHRASHING; break; + case kMemorystatusKilledPerProcessLimit: jetsam_flags |= P_JETSAM_PID; break; + case kMemorystatusKilledIdleExit: jetsam_flags |= P_JETSAM_IDLEEXIT; break; } error = jetsam_do_kill(p, jetsam_flags, jetsam_reason); - KERNEL_DEBUG_CONSTANT( (BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)) | DBG_FUNC_END, - victim_pid, cause, vm_page_free_count, error, 0); + KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)) | DBG_FUNC_END, + victim_pid, cause, vm_page_free_count, error, 0); vm_run_compactor(); - return (error == 0); + return error == 0; } /* @@ -2138,7 +2148,8 @@ memorystatus_do_kill(proc_t p, uint32_t cause, os_reason_t jetsam_reason) { */ static void -memorystatus_check_levels_locked(void) { +memorystatus_check_levels_locked(void) +{ #if CONFIG_JETSAM /* Update levels */ memorystatus_update_levels_locked(TRUE); @@ -2150,7 +2161,7 @@ memorystatus_check_levels_locked(void) { #endif /* CONFIG_JETSAM */ } -/* +/* * Pin a process to a particular jetsam band when it is in the background i.e. not doing active work. * For an application: that means no longer in the FG band * For a daemon: that means no longer in its 'requested' jetsam priority band @@ -2159,9 +2170,9 @@ memorystatus_check_levels_locked(void) { int memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, int jetsam_prio, boolean_t effective_now) { - int error = 0; + int error = 0; boolean_t enable = FALSE; - proc_t p = NULL; + proc_t p = NULL; if (op_flags == MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE) { enable = TRUE; @@ -2173,15 +2184,12 @@ memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, p = proc_find(pid); if (p != NULL) { - if ((enable && ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) == P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND)) || (!enable && ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) == 0))) { /* * No change in state. */ - } else { - proc_list_lock(); if (enable) { @@ -2190,7 +2198,7 @@ memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, if (effective_now) { if (p->p_memstat_effectivepriority < jetsam_prio) { - if(memorystatus_highwater_enabled) { + if (memorystatus_highwater_enabled) { /* * Process is about to transition from * inactive --> active @@ -2209,7 +2217,6 @@ memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, } } } else { - p->p_memstat_state &= ~P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND; memorystatus_invalidate_idle_demotion_locked(p, TRUE); @@ -2228,7 +2235,6 @@ memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, } proc_rele(p); error = 0; - } else { error = ESRCH; } @@ -2237,79 +2243,74 @@ memorystatus_update_inactive_jetsam_priority_band(pid_t pid, uint32_t op_flags, } static void -memorystatus_perform_idle_demotion(__unused void *spare1, __unused void *spare2) +memorystatus_perform_idle_demotion(__unused void *spare1, __unused void *spare2) { proc_t p; uint64_t current_time = 0, idle_delay_time = 0; int demote_prio_band = 0; memstat_bucket_t *demotion_bucket; - + MEMORYSTATUS_DEBUG(1, "memorystatus_perform_idle_demotion()\n"); - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_IDLE_DEMOTE) | DBG_FUNC_START, 0, 0, 0, 0, 0); - - current_time = mach_absolute_time(); - + + current_time = mach_absolute_time(); + proc_list_lock(); demote_prio_band = JETSAM_PRIORITY_IDLE + 1; for (; demote_prio_band < JETSAM_PRIORITY_MAX; demote_prio_band++) { - - if (demote_prio_band != system_procs_aging_band && demote_prio_band != applications_aging_band) + if (demote_prio_band != system_procs_aging_band && demote_prio_band != applications_aging_band) { continue; + } demotion_bucket = &memstat_bucket[demote_prio_band]; p = TAILQ_FIRST(&demotion_bucket->list); - + while (p) { MEMORYSTATUS_DEBUG(1, "memorystatus_perform_idle_demotion() found %d\n", p->p_pid); - + assert(p->p_memstat_idledeadline); assert(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS); if (current_time >= p->p_memstat_idledeadline) { - if ((isSysProc(p) && - ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED|P_DIRTY_IS_DIRTY)) != P_DIRTY_IDLE_EXIT_ENABLED)) || /* system proc marked dirty*/ - task_has_assertions((struct task *)(p->task))) { /* has outstanding assertions which might indicate outstanding work too */ + ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED | P_DIRTY_IS_DIRTY)) != P_DIRTY_IDLE_EXIT_ENABLED)) || /* system proc marked dirty*/ + task_has_assertions((struct task *)(p->task))) { /* has outstanding assertions which might indicate outstanding work too */ idle_delay_time = (isSysProc(p)) ? memorystatus_sysprocs_idle_delay_time : memorystatus_apps_idle_delay_time; p->p_memstat_idledeadline += idle_delay_time; p = TAILQ_NEXT(p, p_memstat_list); - } else { - proc_t next_proc = NULL; next_proc = TAILQ_NEXT(p, p_memstat_list); memorystatus_invalidate_idle_demotion_locked(p, TRUE); memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, false, true); - + p = next_proc; continue; - } } else { // No further candidates break; } } - } memorystatus_reschedule_idle_demotion_locked(); - + proc_list_unlock(); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_IDLE_DEMOTE) | DBG_FUNC_END, 0, 0, 0, 0, 0); } static void -memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state) -{ +memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state) +{ boolean_t present_in_sysprocs_aging_bucket = FALSE; boolean_t present_in_apps_aging_bucket = FALSE; uint64_t idle_delay_time = 0; @@ -2325,15 +2326,13 @@ memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state) return; } - if (isProcessInAgingBands(p)){ - + if (isProcessInAgingBands(p)) { if (jetsam_aging_policy != kJetsamAgingPolicyLegacy) { assert((p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) != P_DIRTY_AGING_IN_PROGRESS); } if (isSysProc(p) && system_procs_aging_band) { present_in_sysprocs_aging_bucket = TRUE; - } else if (isApp(p) && applications_aging_band) { present_in_apps_aging_bucket = TRUE; } @@ -2342,10 +2341,10 @@ memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state) assert(!present_in_sysprocs_aging_bucket); assert(!present_in_apps_aging_bucket); - MEMORYSTATUS_DEBUG(1, "memorystatus_schedule_idle_demotion_locked: scheduling demotion to idle band for pid %d (dirty:0x%x, set_state %d, demotions %d).\n", + MEMORYSTATUS_DEBUG(1, "memorystatus_schedule_idle_demotion_locked: scheduling demotion to idle band for pid %d (dirty:0x%x, set_state %d, demotions %d).\n", p->p_pid, p->p_memstat_dirty, set_state, (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)); - if(isSysProc(p)) { + if (isSysProc(p)) { assert((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED); } @@ -2355,19 +2354,18 @@ memorystatus_schedule_idle_demotion_locked(proc_t p, boolean_t set_state) p->p_memstat_dirty |= P_DIRTY_AGING_IN_PROGRESS; p->p_memstat_idledeadline = mach_absolute_time() + idle_delay_time; } - + assert(p->p_memstat_idledeadline); - - if (isSysProc(p) && present_in_sysprocs_aging_bucket == FALSE) { - memorystatus_scheduled_idle_demotions_sysprocs++; + if (isSysProc(p) && present_in_sysprocs_aging_bucket == FALSE) { + memorystatus_scheduled_idle_demotions_sysprocs++; } else if (isApp(p) && present_in_apps_aging_bucket == FALSE) { memorystatus_scheduled_idle_demotions_apps++; } } static void -memorystatus_invalidate_idle_demotion_locked(proc_t p, boolean_t clear_state) +memorystatus_invalidate_idle_demotion_locked(proc_t p, boolean_t clear_state) { boolean_t present_in_sysprocs_aging_bucket = FALSE; boolean_t present_in_apps_aging_bucket = FALSE; @@ -2381,7 +2379,6 @@ memorystatus_invalidate_idle_demotion_locked(proc_t p, boolean_t clear_state) } if (isProcessInAgingBands(p)) { - if (jetsam_aging_policy != kJetsamAgingPolicyLegacy) { assert((p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) == P_DIRTY_AGING_IN_PROGRESS); } @@ -2390,7 +2387,6 @@ memorystatus_invalidate_idle_demotion_locked(proc_t p, boolean_t clear_state) assert(p->p_memstat_effectivepriority == system_procs_aging_band); assert(p->p_memstat_idledeadline); present_in_sysprocs_aging_bucket = TRUE; - } else if (isApp(p) && applications_aging_band) { assert(p->p_memstat_effectivepriority == applications_aging_band); assert(p->p_memstat_idledeadline); @@ -2398,49 +2394,47 @@ memorystatus_invalidate_idle_demotion_locked(proc_t p, boolean_t clear_state) } } - MEMORYSTATUS_DEBUG(1, "memorystatus_invalidate_idle_demotion(): invalidating demotion to idle band for pid %d (clear_state %d, demotions %d).\n", + MEMORYSTATUS_DEBUG(1, "memorystatus_invalidate_idle_demotion(): invalidating demotion to idle band for pid %d (clear_state %d, demotions %d).\n", p->p_pid, clear_state, (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)); - - + + if (clear_state) { - p->p_memstat_idledeadline = 0; - p->p_memstat_dirty &= ~P_DIRTY_AGING_IN_PROGRESS; + p->p_memstat_idledeadline = 0; + p->p_memstat_dirty &= ~P_DIRTY_AGING_IN_PROGRESS; } - - if (isSysProc(p) &&present_in_sysprocs_aging_bucket == TRUE) { + + if (isSysProc(p) && present_in_sysprocs_aging_bucket == TRUE) { memorystatus_scheduled_idle_demotions_sysprocs--; assert(memorystatus_scheduled_idle_demotions_sysprocs >= 0); - } else if (isApp(p) && present_in_apps_aging_bucket == TRUE) { memorystatus_scheduled_idle_demotions_apps--; assert(memorystatus_scheduled_idle_demotions_apps >= 0); } - assert((memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps) >= 0); + assert((memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps) >= 0); } static void -memorystatus_reschedule_idle_demotion_locked(void) { - if (0 == (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)) { - if (memstat_idle_demotion_deadline) { - /* Transitioned 1->0, so cancel next call */ - thread_call_cancel(memorystatus_idle_demotion_call); - memstat_idle_demotion_deadline = 0; - } - } else { - memstat_bucket_t *demotion_bucket; - proc_t p = NULL, p1 = NULL, p2 = NULL; - - if (system_procs_aging_band) { - +memorystatus_reschedule_idle_demotion_locked(void) +{ + if (0 == (memorystatus_scheduled_idle_demotions_sysprocs + memorystatus_scheduled_idle_demotions_apps)) { + if (memstat_idle_demotion_deadline) { + /* Transitioned 1->0, so cancel next call */ + thread_call_cancel(memorystatus_idle_demotion_call); + memstat_idle_demotion_deadline = 0; + } + } else { + memstat_bucket_t *demotion_bucket; + proc_t p = NULL, p1 = NULL, p2 = NULL; + + if (system_procs_aging_band) { demotion_bucket = &memstat_bucket[system_procs_aging_band]; p1 = TAILQ_FIRST(&demotion_bucket->list); p = p1; } - if (applications_aging_band) { - + if (applications_aging_band) { demotion_bucket = &memstat_bucket[applications_aging_band]; p2 = TAILQ_FIRST(&demotion_bucket->list); @@ -2449,51 +2443,48 @@ memorystatus_reschedule_idle_demotion_locked(void) { } else { p = (p1 == NULL) ? p2 : p1; } - } assert(p); if (p != NULL) { assert(p && p->p_memstat_idledeadline); - if (memstat_idle_demotion_deadline != p->p_memstat_idledeadline){ + if (memstat_idle_demotion_deadline != p->p_memstat_idledeadline) { thread_call_enter_delayed(memorystatus_idle_demotion_call, p->p_memstat_idledeadline); memstat_idle_demotion_deadline = p->p_memstat_idledeadline; } } - } + } } -/* +/* * List manipulation */ - -int + +int memorystatus_add(proc_t p, boolean_t locked) { memstat_bucket_t *bucket; - + MEMORYSTATUS_DEBUG(1, "memorystatus_list_add(): adding pid %d with priority %d.\n", p->p_pid, p->p_memstat_effectivepriority); if (!locked) { - proc_list_lock(); - } + proc_list_lock(); + } DTRACE_MEMORYSTATUS2(memorystatus_add, proc_t, p, int32_t, p->p_memstat_effectivepriority); /* Processes marked internal do not have priority tracked */ if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { - goto exit; + goto exit; } - + bucket = &memstat_bucket[p->p_memstat_effectivepriority]; - + if (isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) { assert(bucket->count == memorystatus_scheduled_idle_demotions_sysprocs - 1); - } else if (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band)) { assert(bucket->count == memorystatus_scheduled_idle_demotions_apps - 1); - } else if (p->p_memstat_effectivepriority == JETSAM_PRIORITY_IDLE) { /* * Entering the idle band. @@ -2508,12 +2499,12 @@ memorystatus_add(proc_t p, boolean_t locked) memorystatus_list_count++; memorystatus_check_levels_locked(); - + exit: - if (!locked) { - proc_list_unlock(); - } - + if (!locked) { + proc_list_unlock(); + } + return 0; } @@ -2544,32 +2535,32 @@ void memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_insert, boolean_t skip_demotion_check) { memstat_bucket_t *old_bucket, *new_bucket; - + assert(priority < MEMSTAT_BUCKET_COUNT); - + /* Ensure that exit isn't underway, leaving the proc retained but removed from its bucket */ if ((p->p_listflag & P_LIST_EXITED) != 0) { return; } MEMORYSTATUS_DEBUG(1, "memorystatus_update_priority_locked(): setting %s(%d) to priority %d, inserting at %s\n", - (*p->p_name ? p->p_name : "unknown"), p->p_pid, priority, head_insert ? "head" : "tail"); + (*p->p_name ? p->p_name : "unknown"), p->p_pid, priority, head_insert ? "head" : "tail"); DTRACE_MEMORYSTATUS3(memorystatus_update_priority, proc_t, p, int32_t, p->p_memstat_effectivepriority, int, priority); #if DEVELOPMENT || DEBUG if (priority == JETSAM_PRIORITY_IDLE && /* if the process is on its way into the IDLE band */ - skip_demotion_check == FALSE && /* and it isn't via the path that will set the INACTIVE memlimits */ + skip_demotion_check == FALSE && /* and it isn't via the path that will set the INACTIVE memlimits */ (p->p_memstat_dirty & P_DIRTY_TRACK) && /* and it has 'DIRTY' tracking enabled */ ((p->p_memstat_memlimit != p->p_memstat_memlimit_inactive) || /* and we notice that the current limit isn't the right value (inactive) */ - ((p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) ? ( ! (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)) : (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)))) /* OR type (fatal vs non-fatal) */ + ((p->p_memstat_state & P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL) ? (!(p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)) : (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT)))) { /* OR type (fatal vs non-fatal) */ panic("memorystatus_update_priority_locked: on %s with 0x%x, prio: %d and %d\n", p->p_name, p->p_memstat_state, priority, p->p_memstat_memlimit); /* then we must catch this */ + } #endif /* DEVELOPMENT || DEBUG */ old_bucket = &memstat_bucket[p->p_memstat_effectivepriority]; if (skip_demotion_check == FALSE) { - if (isSysProc(p)) { /* * For system processes, the memorystatus_dirty_* routines take care of adding/removing @@ -2588,7 +2579,7 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser if (p->p_memstat_state & P_MEMSTAT_FROZEN) { if (priority <= memorystatus_freeze_jetsam_band) { priority = memorystatus_freeze_jetsam_band; - } + } } else #endif /* CONFIG_FREEZE */ { @@ -2596,23 +2587,22 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser priority = JETSAM_PRIORITY_ELEVATED_INACTIVE; } } - assert(! (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS)); + assert(!(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS)); } } else if (isApp(p)) { - /* * Check to see if the application is being lowered in jetsam priority. If so, and: * - it has an 'elevated inactive jetsam band' attribute, then put it in the appropriate band. * - it is a normal application, then let it age in the aging band if that policy is in effect. */ - + if (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) { #if CONFIG_FREEZE if (p->p_memstat_state & P_MEMSTAT_FROZEN) { if (priority <= memorystatus_freeze_jetsam_band) { priority = memorystatus_freeze_jetsam_band; - } - } else + } + } else #endif /* CONFIG_FREEZE */ { if (priority <= JETSAM_PRIORITY_ELEVATED_INACTIVE) { @@ -2620,14 +2610,13 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser } } } else { - if (applications_aging_band) { - if (p->p_memstat_effectivepriority == applications_aging_band) { + if (p->p_memstat_effectivepriority == applications_aging_band) { assert(old_bucket->count == (memorystatus_scheduled_idle_demotions_apps + 1)); } if ((jetsam_aging_policy != kJetsamAgingPolicyLegacy) && (priority <= applications_aging_band)) { - assert(! (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS)); + assert(!(p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS)); priority = applications_aging_band; memorystatus_schedule_idle_demotion_locked(p, TRUE); } @@ -2643,27 +2632,28 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser TAILQ_REMOVE(&old_bucket->list, p, p_memstat_list); old_bucket->count--; - new_bucket = &memstat_bucket[priority]; - if (head_insert) + new_bucket = &memstat_bucket[priority]; + if (head_insert) { TAILQ_INSERT_HEAD(&new_bucket->list, p, p_memstat_list); - else + } else { TAILQ_INSERT_TAIL(&new_bucket->list, p, p_memstat_list); + } new_bucket->count++; if (memorystatus_highwater_enabled) { boolean_t is_fatal; boolean_t use_active; - /* + /* * If cached limit data is updated, then the limits * will be enforced by writing to the ledgers. */ boolean_t ledger_update_needed = TRUE; /* - * Here, we must update the cached memory limit if the task + * Here, we must update the cached memory limit if the task * is transitioning between: - * active <--> inactive + * active <--> inactive * FG <--> BG * but: * dirty <--> clean is ignored @@ -2674,26 +2664,23 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser */ if (p->p_memstat_dirty & P_DIRTY_TRACK) { - if (skip_demotion_check == TRUE && priority == JETSAM_PRIORITY_IDLE) { CACHE_INACTIVE_LIMITS_LOCKED(p, is_fatal); use_active = FALSE; } else { ledger_update_needed = FALSE; } - } else if ((priority >= JETSAM_PRIORITY_FOREGROUND) && (p->p_memstat_effectivepriority < JETSAM_PRIORITY_FOREGROUND)) { /* - * inactive --> active + * inactive --> active * BG --> FG * assign active state */ CACHE_ACTIVE_LIMITS_LOCKED(p, is_fatal); use_active = TRUE; - } else if ((priority < JETSAM_PRIORITY_FOREGROUND) && (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND)) { /* - * active --> inactive + * active --> inactive * FG --> BG * assign inactive state */ @@ -2716,9 +2703,9 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser task_set_phys_footprint_limit_internal(p->task, (p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1, NULL, use_active, is_fatal); MEMORYSTATUS_DEBUG(3, "memorystatus_update_priority_locked: new limit on pid %d (%dMB %s) priority old --> new (%d --> %d) dirty?=0x%x %s\n", - p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), - (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), p->p_memstat_effectivepriority, priority, p->p_memstat_dirty, - (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); + p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), + (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), p->p_memstat_effectivepriority, priority, p->p_memstat_dirty, + (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); } } @@ -2726,7 +2713,7 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser * Record idle start or idle delta. */ if (p->p_memstat_effectivepriority == priority) { - /* + /* * This process is not transitioning between * jetsam priority buckets. Do nothing. */ @@ -2749,7 +2736,6 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser if (p->p_memstat_state & P_MEMSTAT_FREEZE_IGNORE) { p->p_memstat_state &= ~P_MEMSTAT_FREEZE_IGNORE; } - } else if (priority == JETSAM_PRIORITY_IDLE) { /* * Transitioning into the idle priority bucket. @@ -2770,7 +2756,7 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser (priority >= JETSAM_PRIORITY_FOREGROUND)); } #endif /* CONFIG_SECLUDED_MEMORY */ - + memorystatus_check_levels_locked(); } @@ -2805,8 +2791,8 @@ memorystatus_update_priority_locked(proc_t p, int priority, boolean_t head_inser int memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effective, boolean_t update_memlimit, - int32_t memlimit_active, boolean_t memlimit_active_is_fatal, - int32_t memlimit_inactive, boolean_t memlimit_inactive_is_fatal) + int32_t memlimit_active, boolean_t memlimit_active_is_fatal, + int32_t memlimit_inactive, boolean_t memlimit_inactive_is_fatal) { int ret; boolean_t head_insert = false; @@ -2814,13 +2800,13 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect MEMORYSTATUS_DEBUG(1, "memorystatus_update: changing (%s) pid %d: priority %d, user_data 0x%llx\n", (*p->p_name ? p->p_name : "unknown"), p->p_pid, priority, user_data); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_UPDATE) | DBG_FUNC_START, p->p_pid, priority, user_data, effective, 0); - + if (priority == -1) { /* Use as shorthand for default priority */ priority = JETSAM_PRIORITY_DEFAULT; } else if ((priority == system_procs_aging_band) || (priority == applications_aging_band)) { /* Both the aging bands are reserved for internal use; if requested, adjust to JETSAM_PRIORITY_IDLE. */ - priority = JETSAM_PRIORITY_IDLE; + priority = JETSAM_PRIORITY_IDLE; } else if (priority == JETSAM_PRIORITY_IDLE_HEAD) { /* JETSAM_PRIORITY_IDLE_HEAD inserts at the head of the idle queue */ priority = JETSAM_PRIORITY_IDLE; @@ -2832,14 +2818,14 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect } proc_list_lock(); - + assert(!(p->p_memstat_state & P_MEMSTAT_INTERNAL)); if (effective && (p->p_memstat_state & P_MEMSTAT_PRIORITYUPDATED)) { ret = EALREADY; proc_list_unlock(); MEMORYSTATUS_DEBUG(1, "memorystatus_update: effective change specified for pid %d, but change already occurred.\n", p->p_pid); - goto out; + goto out; } if ((p->p_memstat_state & P_MEMSTAT_TERMINATED) || ((p->p_listflag & P_LIST_EXITED) != 0)) { @@ -2848,7 +2834,7 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect */ ret = EBUSY; proc_list_unlock(); - goto out; + goto out; } p->p_memstat_state |= P_MEMSTAT_PRIORITYUPDATED; @@ -2866,9 +2852,9 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect */ MEMORYSTATUS_DEBUG(3, "memorystatus_update(enter): pid %d, priority %d, dirty=0x%x, Active(%dMB %s), Inactive(%dMB, %s)\n", - p->p_pid, priority, p->p_memstat_dirty, - memlimit_active, (memlimit_active_is_fatal ? "F " : "NF"), - memlimit_inactive, (memlimit_inactive_is_fatal ? "F " : "NF")); + p->p_pid, priority, p->p_memstat_dirty, + memlimit_active, (memlimit_active_is_fatal ? "F " : "NF"), + memlimit_inactive, (memlimit_inactive_is_fatal ? "F " : "NF")); if (memlimit_active <= 0) { /* @@ -2881,7 +2867,7 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect /* * For backward compatibility with some unexplained launchd behavior, * we allow a zero sized limit. But we still enforce system_wide limit - * when written to the ledgers. + * when written to the ledgers. */ if (memlimit_active < 0) { @@ -2934,9 +2920,9 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect task_set_phys_footprint_limit_internal(p->task, ((p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1), NULL, use_active, is_fatal); MEMORYSTATUS_DEBUG(3, "memorystatus_update: init: limit on pid %d (%dMB %s) targeting priority(%d) dirty?=0x%x %s\n", - p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), - (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), priority, p->p_memstat_dirty, - (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); + p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), + (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), priority, p->p_memstat_dirty, + (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); } } @@ -2945,13 +2931,12 @@ memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effect * But, we could be removing it from those buckets. * Check and take appropriate steps if so. */ - + if (isProcessInAgingBands(p)) { - memorystatus_invalidate_idle_demotion_locked(p, TRUE); memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, FALSE, TRUE); } else { - if (jetsam_aging_policy == kJetsamAgingPolicyLegacy && priority == JETSAM_PRIORITY_IDLE) { + if (jetsam_aging_policy == kJetsamAgingPolicyLegacy && priority == JETSAM_PRIORITY_IDLE) { /* * Daemons with 'inactive' limits will go through the dirty tracking codepath. * This path deals with apps that may have 'inactive' limits e.g. WebContent processes. @@ -2980,25 +2965,22 @@ memorystatus_remove(proc_t p, boolean_t locked) { int ret; memstat_bucket_t *bucket; - boolean_t reschedule = FALSE; + boolean_t reschedule = FALSE; MEMORYSTATUS_DEBUG(1, "memorystatus_list_remove: removing pid %d\n", p->p_pid); - if (!locked) { - proc_list_lock(); - } + if (!locked) { + proc_list_lock(); + } assert(!(p->p_memstat_state & P_MEMSTAT_INTERNAL)); - + bucket = &memstat_bucket[p->p_memstat_effectivepriority]; if (isSysProc(p) && system_procs_aging_band && (p->p_memstat_effectivepriority == system_procs_aging_band)) { - assert(bucket->count == memorystatus_scheduled_idle_demotions_sysprocs); reschedule = TRUE; - } else if (isApp(p) && applications_aging_band && (p->p_memstat_effectivepriority == applications_aging_band)) { - assert(bucket->count == memorystatus_scheduled_idle_demotions_apps); reschedule = TRUE; } @@ -3022,14 +3004,13 @@ memorystatus_remove(proc_t p, boolean_t locked) /* If awaiting demotion to the idle band, clean up */ if (reschedule) { memorystatus_invalidate_idle_demotion_locked(p, TRUE); - memorystatus_reschedule_idle_demotion_locked(); + memorystatus_reschedule_idle_demotion_locked(); } memorystatus_check_levels_locked(); -#if CONFIG_FREEZE +#if CONFIG_FREEZE if (p->p_memstat_state & (P_MEMSTAT_FROZEN)) { - if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { p->p_memstat_state &= ~P_MEMSTAT_REFREEZE_ELIGIBLE; memorystatus_refreeze_eligible_count--; @@ -3045,12 +3026,12 @@ memorystatus_remove(proc_t p, boolean_t locked) } #endif - if (!locked) { - proc_list_unlock(); - } + if (!locked) { + proc_list_unlock(); + } if (p) { - ret = 0; + ret = 0; } else { ret = ESRCH; } @@ -3063,32 +3044,33 @@ memorystatus_remove(proc_t p, boolean_t locked) * * Return: * 0 on success - * non-0 on failure + * non-0 on failure * * The proc_list_lock is held by the caller. */ static int -memorystatus_validate_track_flags(struct proc *target_p, uint32_t pcontrol) { +memorystatus_validate_track_flags(struct proc *target_p, uint32_t pcontrol) +{ /* See that the process isn't marked for termination */ if (target_p->p_memstat_dirty & P_DIRTY_TERMINATED) { return EBUSY; } - + /* Idle exit requires that process be tracked */ if ((pcontrol & PROC_DIRTY_ALLOW_IDLE_EXIT) && - !(pcontrol & PROC_DIRTY_TRACK)) { + !(pcontrol & PROC_DIRTY_TRACK)) { return EINVAL; } /* 'Launch in progress' tracking requires that process have enabled dirty tracking too. */ if ((pcontrol & PROC_DIRTY_LAUNCH_IN_PROGRESS) && - !(pcontrol & PROC_DIRTY_TRACK)) { + !(pcontrol & PROC_DIRTY_TRACK)) { return EINVAL; } /* Only one type of DEFER behavior is allowed.*/ - if ((pcontrol & PROC_DIRTY_DEFER) && + if ((pcontrol & PROC_DIRTY_DEFER) && (pcontrol & PROC_DIRTY_DEFER_ALWAYS)) { return EINVAL; } @@ -3096,33 +3078,31 @@ memorystatus_validate_track_flags(struct proc *target_p, uint32_t pcontrol) { /* Deferral is only relevant if idle exit is specified */ if (((pcontrol & PROC_DIRTY_DEFER) || (pcontrol & PROC_DIRTY_DEFER_ALWAYS)) && - !(pcontrol & PROC_DIRTY_ALLOWS_IDLE_EXIT)) { + !(pcontrol & PROC_DIRTY_ALLOWS_IDLE_EXIT)) { return EINVAL; } - - return(0); + + return 0; } static void -memorystatus_update_idle_priority_locked(proc_t p) { +memorystatus_update_idle_priority_locked(proc_t p) +{ int32_t priority; MEMORYSTATUS_DEBUG(1, "memorystatus_update_idle_priority_locked(): pid %d dirty 0x%X\n", p->p_pid, p->p_memstat_dirty); - assert(isSysProc(p)); - - if ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED|P_DIRTY_IS_DIRTY)) == P_DIRTY_IDLE_EXIT_ENABLED) { + assert(isSysProc(p)); + if ((p->p_memstat_dirty & (P_DIRTY_IDLE_EXIT_ENABLED | P_DIRTY_IS_DIRTY)) == P_DIRTY_IDLE_EXIT_ENABLED) { priority = (p->p_memstat_dirty & P_DIRTY_AGING_IN_PROGRESS) ? system_procs_aging_band : JETSAM_PRIORITY_IDLE; } else { priority = p->p_memstat_requestedpriority; } - - if (priority != p->p_memstat_effectivepriority) { + if (priority != p->p_memstat_effectivepriority) { if ((jetsam_aging_policy == kJetsamAgingPolicyLegacy) && (priority == JETSAM_PRIORITY_IDLE)) { - /* * This process is on its way into the IDLE band. The system is * using 'legacy' jetsam aging policy. That means, this process @@ -3133,7 +3113,6 @@ memorystatus_update_idle_priority_locked(proc_t p) { */ if (p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) { - /* * This process has the 'elevated inactive jetsam band' attribute. * So, there will be no trip to IDLE after all. @@ -3145,12 +3124,11 @@ memorystatus_update_idle_priority_locked(proc_t p) { } memorystatus_update_priority_locked(p, priority, false, true); - } else { memorystatus_update_priority_locked(p, priority, false, false); } } -} +} /* * Processes can opt to have their state tracked by the kernel, indicating when they are busy (dirty) or idle @@ -3170,18 +3148,19 @@ memorystatus_update_idle_priority_locked(proc_t p) { */ int -memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { +memorystatus_dirty_track(proc_t p, uint32_t pcontrol) +{ unsigned int old_dirty; boolean_t reschedule = FALSE; boolean_t already_deferred = FALSE; boolean_t defer_now = FALSE; int ret = 0; - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DIRTY_TRACK), - p->p_pid, p->p_memstat_dirty, pcontrol, 0, 0); - + p->p_pid, p->p_memstat_dirty, pcontrol, 0, 0); + proc_list_lock(); - + if ((p->p_listflag & P_LIST_EXITED) != 0) { /* * Process is on its way out. @@ -3194,13 +3173,13 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { ret = EPERM; goto exit; } - + if ((ret = memorystatus_validate_track_flags(p, pcontrol)) != 0) { /* error */ goto exit; } - old_dirty = p->p_memstat_dirty; + old_dirty = p->p_memstat_dirty; /* These bits are cumulative, as per */ if (pcontrol & PROC_DIRTY_TRACK) { @@ -3208,7 +3187,7 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { } if (pcontrol & PROC_DIRTY_ALLOW_IDLE_EXIT) { - p->p_memstat_dirty |= P_DIRTY_ALLOW_IDLE_EXIT; + p->p_memstat_dirty |= P_DIRTY_ALLOW_IDLE_EXIT; } if (pcontrol & PROC_DIRTY_LAUNCH_IN_PROGRESS) { @@ -3222,7 +3201,6 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { /* This can be set and cleared exactly once. */ if (pcontrol & (PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) { - if ((pcontrol & (PROC_DIRTY_DEFER)) && !(old_dirty & P_DIRTY_DEFER)) { p->p_memstat_dirty |= P_DIRTY_DEFER; @@ -3237,27 +3215,23 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { } MEMORYSTATUS_DEBUG(1, "memorystatus_on_track_dirty(): set idle-exit %s / defer %s / dirty %s for pid %d\n", - ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) ? "Y" : "N", - defer_now ? "Y" : "N", - p->p_memstat_dirty & P_DIRTY ? "Y" : "N", - p->p_pid); + ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) ? "Y" : "N", + defer_now ? "Y" : "N", + p->p_memstat_dirty & P_DIRTY ? "Y" : "N", + p->p_pid); /* Kick off or invalidate the idle exit deferment if there's a state transition. */ if (!(p->p_memstat_dirty & P_DIRTY_IS_DIRTY)) { if ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) { - if (defer_now && !already_deferred) { - /* - * Request to defer a clean process that's idle-exit enabled + * Request to defer a clean process that's idle-exit enabled * and not already in the jetsam deferred band. Most likely a * new launch. */ memorystatus_schedule_idle_demotion_locked(p, TRUE); reschedule = TRUE; - } else if (!defer_now) { - /* * The process isn't asking for the 'aging' facility. * Could be that it is: @@ -3269,7 +3243,7 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { * some processes have tried to use this to * opt out of the 'aging' facility. */ - + memorystatus_invalidate_idle_demotion_locked(p, TRUE); } else { /* @@ -3277,7 +3251,7 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { * we'll go ahead and opt it in because this is likely * a new launch (clean process, dirty tracking enabled) */ - + memorystatus_schedule_idle_demotion_locked(p, TRUE); } @@ -3285,10 +3259,9 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { } } } else { - /* * We are trying to operate on a dirty process. Dirty processes have to - * be removed from the deferred band. The question is do we reset the + * be removed from the deferred band. The question is do we reset the * deferred state or not? * * This could be a legal request like: @@ -3313,7 +3286,6 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { memorystatus_invalidate_idle_demotion_locked(p, TRUE); reschedule = TRUE; } else { - boolean_t reset_state = (jetsam_aging_policy != kJetsamAgingPolicyLegacy) ? TRUE : FALSE; memorystatus_invalidate_idle_demotion_locked(p, reset_state); @@ -3322,21 +3294,22 @@ memorystatus_dirty_track(proc_t p, uint32_t pcontrol) { } memorystatus_update_idle_priority_locked(p); - + if (reschedule) { memorystatus_reschedule_idle_demotion_locked(); } - + ret = 0; - -exit: + +exit: proc_list_unlock(); - + return ret; } int -memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { +memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) +{ int ret; boolean_t kill = false; boolean_t reschedule = FALSE; @@ -3361,18 +3334,19 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { goto exit; } - if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) + if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { was_dirty = TRUE; + } if (!(p->p_memstat_dirty & P_DIRTY_TRACK)) { /* Dirty tracking not enabled */ - ret = EINVAL; + ret = EINVAL; } else if (pcontrol && (p->p_memstat_dirty & P_DIRTY_TERMINATED)) { - /* + /* * Process is set to be terminated and we're attempting to mark it dirty. * Set for termination and marking as clean is OK - see . */ - ret = EBUSY; + ret = EBUSY; } else { int flag = (self == TRUE) ? P_DIRTY : P_DIRTY_SHUTDOWN; if (pcontrol && !(p->p_memstat_dirty & flag)) { @@ -3387,7 +3361,7 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { kill = true; } else if ((flag == P_DIRTY) && (p->p_memstat_dirty & P_DIRTY_TERMINATED)) { /* Kill previously terminated processes if set clean */ - kill = true; + kill = true; } p->p_memstat_dirty &= ~flag; memorystatus_dirty_count--; @@ -3402,15 +3376,14 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { goto exit; } - if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) + if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) { now_dirty = TRUE; + } if ((was_dirty == TRUE && now_dirty == FALSE) || (was_dirty == FALSE && now_dirty == TRUE)) { - /* Manage idle exit deferral, if applied */ if ((p->p_memstat_dirty & P_DIRTY_IDLE_EXIT_ENABLED) == P_DIRTY_IDLE_EXIT_ENABLED) { - /* * Legacy mode: P_DIRTY_AGING_IN_PROGRESS means the process is in the aging band OR it might be heading back * there once it's clean again. For the legacy case, this only applies if it has some protection window left. @@ -3426,14 +3399,13 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { * New dirty process i.e. "was_dirty == FALSE && now_dirty == TRUE" * * The process will move from its aging band to its higher requested - * jetsam band. + * jetsam band. */ boolean_t reset_state = (jetsam_aging_policy != kJetsamAgingPolicyLegacy) ? TRUE : FALSE; memorystatus_invalidate_idle_demotion_locked(p, reset_state); reschedule = TRUE; } else { - /* * Process is back from "dirty" to "clean". */ @@ -3445,11 +3417,11 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { * The process' hasn't enrolled in the "always defer after dirty" * mode and its deadline has expired. It currently * does not reside in any of the aging buckets. - * - * It's on its way to the JETSAM_PRIORITY_IDLE + * + * It's on its way to the JETSAM_PRIORITY_IDLE * bucket via memorystatus_update_idle_priority_locked() * below. - + * * So all we need to do is reset all the state on the * process that's related to the aging bucket i.e. * the AGING_IN_PROGRESS flag and the timer deadline. @@ -3474,7 +3446,6 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { } } } else { - memorystatus_schedule_idle_demotion_locked(p, TRUE); reschedule = TRUE; } @@ -3487,8 +3458,8 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { boolean_t ledger_update_needed = TRUE; boolean_t use_active; boolean_t is_fatal; - /* - * We are in this path because this process transitioned between + /* + * We are in this path because this process transitioned between * dirty <--> clean state. Update the cached memory limits. */ @@ -3508,7 +3479,7 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { * out and is entering the IDLE band. * See memorystatus_update_priority_locked() for that. */ - + if (p->p_memstat_dirty & P_DIRTY_ALLOW_IDLE_EXIT) { ledger_update_needed = FALSE; } else { @@ -3540,13 +3511,12 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { proc_rele_locked(p); MEMORYSTATUS_DEBUG(3, "memorystatus_dirty_set: new limit on pid %d (%dMB %s) priority(%d) dirty?=0x%x %s\n", - p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), - (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), p->p_memstat_effectivepriority, p->p_memstat_dirty, - (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); + p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), + (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), p->p_memstat_effectivepriority, p->p_memstat_dirty, + (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); } - } - + /* If the deferral state changed, reschedule the demotion timer */ if (reschedule) { memorystatus_reschedule_idle_demotion_locked(); @@ -3561,7 +3531,7 @@ memorystatus_dirty_set(proc_t p, boolean_t self, uint32_t pcontrol) { proc_rele_locked(p); } } - + exit: proc_list_unlock(); @@ -3569,12 +3539,12 @@ exit: } int -memorystatus_dirty_clear(proc_t p, uint32_t pcontrol) { - +memorystatus_dirty_clear(proc_t p, uint32_t pcontrol) +{ int ret = 0; MEMORYSTATUS_DEBUG(1, "memorystatus_dirty_clear(): %d 0x%x 0x%x\n", p->p_pid, pcontrol, p->p_memstat_dirty); - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DIRTY_CLEAR), p->p_pid, pcontrol, 0, 0, 0); proc_list_lock(); @@ -3594,9 +3564,9 @@ memorystatus_dirty_clear(proc_t p, uint32_t pcontrol) { if (!(p->p_memstat_dirty & P_DIRTY_TRACK)) { /* Dirty tracking not enabled */ - ret = EINVAL; + ret = EINVAL; goto exit; - } + } if (!pcontrol || (pcontrol & (PROC_DIRTY_LAUNCH_IN_PROGRESS | PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) == 0) { ret = EINVAL; @@ -3609,7 +3579,6 @@ memorystatus_dirty_clear(proc_t p, uint32_t pcontrol) { /* This can be set and cleared exactly once. */ if (pcontrol & (PROC_DIRTY_DEFER | PROC_DIRTY_DEFER_ALWAYS)) { - if (p->p_memstat_dirty & P_DIRTY_DEFER) { p->p_memstat_dirty &= ~(P_DIRTY_DEFER); } @@ -3631,11 +3600,12 @@ exit: } int -memorystatus_dirty_get(proc_t p) { +memorystatus_dirty_get(proc_t p) +{ int ret = 0; - + proc_list_lock(); - + if (p->p_memstat_dirty & P_DIRTY_TRACK) { ret |= PROC_DIRTY_TRACKED; if (p->p_memstat_dirty & P_DIRTY_ALLOW_IDLE_EXIT) { @@ -3648,21 +3618,22 @@ memorystatus_dirty_get(proc_t p) { ret |= PROC_DIRTY_LAUNCH_IS_IN_PROGRESS; } } - + proc_list_unlock(); - + return ret; } int -memorystatus_on_terminate(proc_t p) { +memorystatus_on_terminate(proc_t p) +{ int sig; - + proc_list_lock(); - + p->p_memstat_dirty |= P_DIRTY_TERMINATED; - - if ((p->p_memstat_dirty & (P_DIRTY_TRACK|P_DIRTY_IS_DIRTY)) == P_DIRTY_TRACK) { + + if ((p->p_memstat_dirty & (P_DIRTY_TRACK | P_DIRTY_IS_DIRTY)) == P_DIRTY_TRACK) { /* Clean; mark as terminated and issue SIGKILL */ sig = SIGKILL; } else { @@ -3671,7 +3642,7 @@ memorystatus_on_terminate(proc_t p) { } proc_list_unlock(); - + return sig; } @@ -3724,7 +3695,7 @@ memorystatus_on_resume(proc_t p) } memorystatus_suspended_count--; - + pid = p->p_pid; #endif @@ -3735,7 +3706,7 @@ memorystatus_on_resume(proc_t p) p->p_memstat_state &= ~P_MEMSTAT_SUSPENDED; proc_list_unlock(); - + #if CONFIG_FREEZE if (frozen) { memorystatus_freeze_entry_t data = { pid, FALSE, 0 }; @@ -3751,16 +3722,17 @@ memorystatus_on_inactivity(proc_t p) #if CONFIG_FREEZE /* Wake the freeze thread */ thread_wakeup((event_t)&memorystatus_freeze_wakeup); -#endif +#endif } /* * The proc_list_lock is held by the caller. -*/ + */ static uint32_t -memorystatus_build_state(proc_t p) { +memorystatus_build_state(proc_t p) +{ uint32_t snapshot_state = 0; - + /* General */ if (p->p_memstat_state & P_MEMSTAT_SUSPENDED) { snapshot_state |= kMemorystatusSuspended; @@ -3769,9 +3741,9 @@ memorystatus_build_state(proc_t p) { snapshot_state |= kMemorystatusFrozen; } if (p->p_memstat_state & P_MEMSTAT_REFREEZE_ELIGIBLE) { - snapshot_state |= kMemorystatusWasThawed; + snapshot_state |= kMemorystatusWasThawed; } - + /* Tracking */ if (p->p_memstat_dirty & P_DIRTY_TRACK) { snapshot_state |= kMemorystatusTracked; @@ -3797,34 +3769,34 @@ kill_idle_exit_proc(void) /* Pick next idle exit victim. */ current_time = mach_absolute_time(); - + jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_IDLE_EXIT); if (jetsam_reason == OS_REASON_NULL) { printf("kill_idle_exit_proc: failed to allocate jetsam reason\n"); } proc_list_lock(); - + p = memorystatus_get_first_proc_locked(&i, FALSE); while (p) { /* No need to look beyond the idle band */ if (p->p_memstat_effectivepriority != JETSAM_PRIORITY_IDLE) { break; } - - if ((p->p_memstat_dirty & (P_DIRTY_ALLOW_IDLE_EXIT|P_DIRTY_IS_DIRTY|P_DIRTY_TERMINATED)) == (P_DIRTY_ALLOW_IDLE_EXIT)) { + + if ((p->p_memstat_dirty & (P_DIRTY_ALLOW_IDLE_EXIT | P_DIRTY_IS_DIRTY | P_DIRTY_TERMINATED)) == (P_DIRTY_ALLOW_IDLE_EXIT)) { if (current_time >= p->p_memstat_idledeadline) { p->p_memstat_dirty |= P_DIRTY_TERMINATED; victim_p = proc_ref_locked(p); break; } } - + p = memorystatus_get_next_proc_locked(&i, p, FALSE); } - + proc_list_unlock(); - + if (victim_p) { printf("memorystatus: killing_idle_process pid %d [%s]\n", victim_p->p_pid, (*victim_p->p_name ? victim_p->p_name : "unknown")); killed = memorystatus_do_kill(victim_p, kMemorystatusKilledIdleExit, jetsam_reason); @@ -3838,19 +3810,19 @@ kill_idle_exit_proc(void) static void memorystatus_thread_wake(void) -{ +{ int thr_id = 0; int active_thr = atomic_load(&active_jetsam_threads); /* Wakeup all the jetsam threads */ for (thr_id = 0; thr_id < active_thr; thr_id++) { - thread_wakeup((event_t)&jetsam_threads[thr_id].memorystatus_wakeup); - } -} - + thread_wakeup((event_t)&jetsam_threads[thr_id].memorystatus_wakeup); + } +} + #if CONFIG_JETSAM -static void +static void memorystatus_thread_pool_max() { /* Increase the jetsam thread pool to max_jetsam_threads */ @@ -3881,8 +3853,8 @@ memorystatus_thread_block(uint32_t interval_ms, thread_continue_t continuation) } else { assert_wait(&jetsam_thread->memorystatus_wakeup, THREAD_UNINT); } - - return thread_block(continuation); + + return thread_block(continuation); } static boolean_t @@ -3894,7 +3866,7 @@ memorystatus_avail_pages_below_pressure(void) * key off of the system having dynamic swap support. With full swap support, * the system shouldn't really need to worry about various page thresholds. */ - return (memorystatus_available_pages <= memorystatus_available_pages_pressure); + return memorystatus_available_pages <= memorystatus_available_pages_pressure; #else /* CONFIG_EMBEDDED */ return FALSE; #endif /* CONFIG_EMBEDDED */ @@ -3904,7 +3876,7 @@ static boolean_t memorystatus_avail_pages_below_critical(void) { #if CONFIG_EMBEDDED - return (memorystatus_available_pages <= memorystatus_available_pages_critical); + return memorystatus_available_pages <= memorystatus_available_pages_critical; #else /* CONFIG_EMBEDDED */ return FALSE; #endif /* CONFIG_EMBEDDED */ @@ -3921,7 +3893,7 @@ memorystatus_post_snapshot(int32_t priority, uint32_t cause) * tree. */ - return ((priority != JETSAM_PRIORITY_IDLE) || memorystatus_idle_snapshot); + return (priority != JETSAM_PRIORITY_IDLE) || memorystatus_idle_snapshot; #else /* CONFIG_EMBEDDED */ /* @@ -3934,7 +3906,7 @@ memorystatus_post_snapshot(int32_t priority, uint32_t cause) */ boolean_t snapshot_eligible_kill_cause = (is_reason_thrashing(cause) || is_reason_zone_map_exhaustion(cause)); - return ((priority != JETSAM_PRIORITY_IDLE) || memorystatus_idle_snapshot || snapshot_eligible_kill_cause); + return (priority != JETSAM_PRIORITY_IDLE) || memorystatus_idle_snapshot || snapshot_eligible_kill_cause; #endif /* CONFIG_EMBEDDED */ } @@ -3942,17 +3914,17 @@ static boolean_t memorystatus_action_needed(void) { #if CONFIG_EMBEDDED - return (is_reason_thrashing(kill_under_pressure_cause) || - is_reason_zone_map_exhaustion(kill_under_pressure_cause) || - memorystatus_available_pages <= memorystatus_available_pages_pressure); + return is_reason_thrashing(kill_under_pressure_cause) || + is_reason_zone_map_exhaustion(kill_under_pressure_cause) || + memorystatus_available_pages <= memorystatus_available_pages_pressure; #else /* CONFIG_EMBEDDED */ - return (is_reason_thrashing(kill_under_pressure_cause) || - is_reason_zone_map_exhaustion(kill_under_pressure_cause)); + return is_reason_thrashing(kill_under_pressure_cause) || + is_reason_zone_map_exhaustion(kill_under_pressure_cause); #endif /* CONFIG_EMBEDDED */ } #if CONFIG_FREEZE -extern void vm_swap_consider_defragmenting(int); +extern void vm_swap_consider_defragmenting(int); /* * This routine will _jetsam_ all frozen processes @@ -4005,7 +3977,6 @@ again: next_p = memorystatus_get_first_proc_locked(&band, TRUE); while (next_p) { - p = next_p; next_p = memorystatus_get_next_proc_locked(&band, p, TRUE); @@ -4050,7 +4021,6 @@ again: errors_over_prev_iteration = 0; while (bucket_count) { - bucket_count--; /* @@ -4102,7 +4072,7 @@ again: } #if DEVELOPMENT || DEBUG panic("memorystatus_disable_freeze: Failed to kill all frozen processes, memorystatus_frozen_count = %d, errors = %d", - memorystatus_frozen_count, errors_over_prev_iteration); + memorystatus_frozen_count, errors_over_prev_iteration); #endif /* DEVELOPMENT || DEBUG */ } proc_list_unlock(); @@ -4110,17 +4080,16 @@ again: os_reason_free(jetsam_reason); if (killed) { - vm_swap_consider_defragmenting(VM_SWAP_FLAGS_FORCE_DEFRAG | VM_SWAP_FLAGS_FORCE_RECLAIM); proc_list_lock(); size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + - sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count); + sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count); uint64_t timestamp_now = mach_absolute_time(); memorystatus_jetsam_snapshot->notification_time = timestamp_now; memorystatus_jetsam_snapshot->js_gencount++; if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || - timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { + timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { proc_list_unlock(); int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size)); if (!ret) { @@ -4157,7 +4126,7 @@ memorystatus_act_on_hiwat_processes(uint32_t *errors, uint32_t *hwm_kill, boolea #if CONFIG_JETSAM /* No highwater processes to kill. Continue or stop for now? */ if (!is_reason_thrashing(kill_under_pressure_cause) && - !is_reason_zone_map_exhaustion(kill_under_pressure_cause) && + !is_reason_zone_map_exhaustion(kill_under_pressure_cause) && (memorystatus_available_pages > memorystatus_available_pages_critical)) { /* * We are _not_ out of pressure but we are above the critical threshold and there's: @@ -4166,7 +4135,7 @@ memorystatus_act_on_hiwat_processes(uint32_t *errors, uint32_t *hwm_kill, boolea * - no more HWM processes left. * For now, don't kill any other processes. */ - + if (*hwm_kill == 0) { memorystatus_thread_wasted_wakeup++; } @@ -4184,27 +4153,26 @@ static boolean_t memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_idle_kills, boolean_t *corpse_list_purged, boolean_t *post_snapshot) { if (memorystatus_jld_enabled == TRUE) { - boolean_t killed; uint32_t errors = 0; /* Jetsam Loop Detection - locals */ memstat_bucket_t *bucket; - int jld_bucket_count = 0; - struct timeval jld_now_tstamp = {0,0}; - uint64_t jld_now_msecs = 0; - int elevated_bucket_count = 0; + int jld_bucket_count = 0; + struct timeval jld_now_tstamp = {0, 0}; + uint64_t jld_now_msecs = 0; + int elevated_bucket_count = 0; /* Jetsam Loop Detection - statics */ static uint64_t jld_timestamp_msecs = 0; - static int jld_idle_kill_candidates = 0; /* Number of available processes in band 0,1 at start */ - static int jld_eval_aggressive_count = 0; /* Bumps the max priority in aggressive loop */ + static int jld_idle_kill_candidates = 0; /* Number of available processes in band 0,1 at start */ + static int jld_eval_aggressive_count = 0; /* Bumps the max priority in aggressive loop */ static int32_t jld_priority_band_max = JETSAM_PRIORITY_UI_SUPPORT; /* * Jetsam Loop Detection: attempt to detect * rapid daemon relaunches in the lower bands. */ - + microuptime(&jld_now_tstamp); /* @@ -4247,17 +4215,16 @@ memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_ * memorystatus_jld_eval_aggressive_count is a tunable * memorystatus_jld_eval_aggressive_priority_band_max is a tunable */ - if ( (jld_bucket_count == 0) || - (jld_now_msecs > (jld_timestamp_msecs + memorystatus_jld_eval_period_msecs))) { - - /* - * Refresh evaluation parameters + if ((jld_bucket_count == 0) || + (jld_now_msecs > (jld_timestamp_msecs + memorystatus_jld_eval_period_msecs))) { + /* + * Refresh evaluation parameters */ - jld_timestamp_msecs = jld_now_msecs; + jld_timestamp_msecs = jld_now_msecs; jld_idle_kill_candidates = jld_bucket_count; - *jld_idle_kills = 0; + *jld_idle_kills = 0; jld_eval_aggressive_count = 0; - jld_priority_band_max = JETSAM_PRIORITY_UI_SUPPORT; + jld_priority_band_max = JETSAM_PRIORITY_UI_SUPPORT; } if (*jld_idle_kills > jld_idle_kill_candidates) { @@ -4265,13 +4232,13 @@ memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_ #if DEVELOPMENT || DEBUG printf("memorystatus: aggressive%d: beginning of window: %lld ms, : timestamp now: %lld ms\n", - jld_eval_aggressive_count, - jld_timestamp_msecs, - jld_now_msecs); + jld_eval_aggressive_count, + jld_timestamp_msecs, + jld_now_msecs); printf("memorystatus: aggressive%d: idle candidates: %d, idle kills: %d\n", - jld_eval_aggressive_count, - jld_idle_kill_candidates, - *jld_idle_kills); + jld_eval_aggressive_count, + jld_idle_kill_candidates, + *jld_idle_kills); #endif /* DEVELOPMENT || DEBUG */ if ((jld_eval_aggressive_count == memorystatus_jld_eval_aggressive_count) && @@ -4283,13 +4250,12 @@ memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_ */ task_purge_all_corpses(); *corpse_list_purged = TRUE; - } - else if (jld_eval_aggressive_count > memorystatus_jld_eval_aggressive_count) { - /* + } else if (jld_eval_aggressive_count > memorystatus_jld_eval_aggressive_count) { + /* * Bump up the jetsam priority limit (eg: the bucket index) * Enforce bucket index sanity. */ - if ((memorystatus_jld_eval_aggressive_priority_band_max < 0) || + if ((memorystatus_jld_eval_aggressive_priority_band_max < 0) || (memorystatus_jld_eval_aggressive_priority_band_max >= MEMSTAT_BUCKET_COUNT)) { /* * Do nothing. Stick with the default level. @@ -4301,7 +4267,6 @@ memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_ /* Visit elevated processes first */ while (elevated_bucket_count) { - elevated_bucket_count--; /* @@ -4345,16 +4310,16 @@ memorystatus_act_aggressive(uint32_t cause, os_reason_t jetsam_reason, int *jld_ */ killed = memorystatus_kill_top_process_aggressive( kMemorystatusKilledProcThrashing, - jld_eval_aggressive_count, - jld_priority_band_max, + jld_eval_aggressive_count, + jld_priority_band_max, &errors); - + if (killed) { /* Always generate logs after aggressive kill */ *post_snapshot = TRUE; *jld_idle_kills = 0; return TRUE; - } + } } return FALSE; @@ -4372,11 +4337,11 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) uint32_t hwm_kill = 0; boolean_t sort_flag = TRUE; boolean_t corpse_list_purged = FALSE; - int jld_idle_kills = 0; + int jld_idle_kills = 0; struct jetsam_thread_state *jetsam_thread = jetsam_current_thread(); if (jetsam_thread->inited == FALSE) { - /* + /* * It's the first time the thread has run, so just mark the thread as privileged and block. * This avoids a spurious pass with unset variables, as set out in . */ @@ -4394,17 +4359,17 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) jetsam_thread->inited = TRUE; memorystatus_thread_block(0, memorystatus_thread); } - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_SCAN) | DBG_FUNC_START, - memorystatus_available_pages, memorystatus_jld_enabled, memorystatus_jld_eval_period_msecs, memorystatus_jld_eval_aggressive_count,0); + memorystatus_available_pages, memorystatus_jld_enabled, memorystatus_jld_eval_period_msecs, memorystatus_jld_eval_aggressive_count, 0); /* * Jetsam aware version. * * The VM pressure notification thread is working it's way through clients in parallel. * - * So, while the pressure notification thread is targeting processes in order of - * increasing jetsam priority, we can hopefully reduce / stop it's work by killing + * So, while the pressure notification thread is targeting processes in order of + * increasing jetsam priority, we can hopefully reduce / stop it's work by killing * any processes that have exceeded their highwater mark. * * If we run out of HWM processes and our available pages drops below the critical threshold, then, @@ -4419,24 +4384,24 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) cause = kill_under_pressure_cause; switch (cause) { - case kMemorystatusKilledFCThrashing: - jetsam_reason_code = JETSAM_REASON_MEMORY_FCTHRASHING; - break; - case kMemorystatusKilledVMCompressorThrashing: - jetsam_reason_code = JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING; - break; - case kMemorystatusKilledVMCompressorSpaceShortage: - jetsam_reason_code = JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE; - break; - case kMemorystatusKilledZoneMapExhaustion: - jetsam_reason_code = JETSAM_REASON_ZONE_MAP_EXHAUSTION; - break; - case kMemorystatusKilledVMPageShortage: - /* falls through */ - default: - jetsam_reason_code = JETSAM_REASON_MEMORY_VMPAGESHORTAGE; - cause = kMemorystatusKilledVMPageShortage; - break; + case kMemorystatusKilledFCThrashing: + jetsam_reason_code = JETSAM_REASON_MEMORY_FCTHRASHING; + break; + case kMemorystatusKilledVMCompressorThrashing: + jetsam_reason_code = JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING; + break; + case kMemorystatusKilledVMCompressorSpaceShortage: + jetsam_reason_code = JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE; + break; + case kMemorystatusKilledZoneMapExhaustion: + jetsam_reason_code = JETSAM_REASON_ZONE_MAP_EXHAUSTION; + break; + case kMemorystatusKilledVMPageShortage: + /* falls through */ + default: + jetsam_reason_code = JETSAM_REASON_MEMORY_VMPAGESHORTAGE; + cause = kMemorystatusKilledVMPageShortage; + break; } /* Highwater */ @@ -4474,8 +4439,7 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) if (killed) { if (memorystatus_post_snapshot(priority, cause) == TRUE) { - - post_snapshot = TRUE; + post_snapshot = TRUE; } /* Jetsam Loop Detection */ @@ -4500,7 +4464,7 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) } goto done; } - + if (memorystatus_avail_pages_below_critical()) { /* * Still under pressure and unable to kill a process - purge corpse memory @@ -4517,8 +4481,8 @@ memorystatus_thread(void *param __unused, wait_result_t wr __unused) panic("memorystatus_jetsam_thread: no victim! available pages:%llu\n", (uint64_t)memorystatus_available_pages); } } - -done: + +done: /* * We do not want to over-kill when thrashing has been detected. @@ -4538,7 +4502,7 @@ done: } kill_under_pressure_cause = 0; - + if (errors) { memorystatus_clear_errors(); } @@ -4546,12 +4510,12 @@ done: if (post_snapshot) { proc_list_lock(); size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + - sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count); + sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count); uint64_t timestamp_now = mach_absolute_time(); memorystatus_jetsam_snapshot->notification_time = timestamp_now; memorystatus_jetsam_snapshot->js_gencount++; if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || - timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { + timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { proc_list_unlock(); int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size)); if (!ret) { @@ -4565,20 +4529,21 @@ done: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_SCAN) | DBG_FUNC_END, - memorystatus_available_pages, 0, 0, 0, 0); + memorystatus_available_pages, 0, 0, 0, 0); memorystatus_thread_block(0, memorystatus_thread); } /* * Returns TRUE: - * when an idle-exitable proc was killed + * when an idle-exitable proc was killed * Returns FALSE: * when there are no more idle-exitable procs found - * when the attempt to kill an idle-exitable proc failed + * when the attempt to kill an idle-exitable proc failed */ -boolean_t memorystatus_idle_exit_from_VM(void) { - +boolean_t +memorystatus_idle_exit_from_VM(void) +{ /* * This routine should no longer be needed since we are * now using jetsam bands on all platforms and so will deal @@ -4590,7 +4555,7 @@ boolean_t memorystatus_idle_exit_from_VM(void) { * to rather kill those processes than start swapping earlier. */ - return(kill_idle_exit_proc()); + return kill_idle_exit_proc(); } /* @@ -4613,7 +4578,7 @@ memorystatus_on_ledger_footprint_exceeded(boolean_t warning, boolean_t memlimit_ * This is a warning path which implies that the current process is close, but has * not yet exceeded its per-process memory limit. */ - if (memorystatus_warn_process(p->p_pid, memlimit_is_active, memlimit_is_fatal, FALSE /* not exceeded */) != TRUE) { + if (memorystatus_warn_process(p->p_pid, memlimit_is_active, memlimit_is_fatal, FALSE /* not exceeded */) != TRUE) { /* Print warning, since it's possible that task has not registered for pressure notifications */ os_log(OS_LOG_DEFAULT, "memorystatus_on_ledger_footprint_exceeded: failed to warn the current task (%d exiting, or no handler registered?).\n", p->p_pid); } @@ -4668,9 +4633,9 @@ memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_act */ os_log_with_startup_serial(OS_LOG_DEFAULT, "EXC_RESOURCE -> %s[%d] exceeded mem limit: %s%s %d MB (%s)\n", - (*p->p_name ? p->p_name : "unknown"), p->p_pid, (memlimit_is_active ? "Active" : "Inactive"), - (memlimit_is_fatal ? "Hard" : "Soft"), max_footprint_mb, - (memlimit_is_fatal ? "fatal" : "non-fatal")); + (*p->p_name ? p->p_name : "unknown"), p->p_pid, (memlimit_is_active ? "Active" : "Inactive"), + (memlimit_is_fatal ? "Hard" : "Soft"), max_footprint_mb, + (memlimit_is_fatal ? "fatal" : "non-fatal")); return; } @@ -4700,8 +4665,8 @@ memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_act */ static boolean_t -proc_jetsam_state_is_active_locked(proc_t p) { - +proc_jetsam_state_is_active_locked(proc_t p) +{ if ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) && (p->p_memstat_effectivepriority == JETSAM_PRIORITY_ELEVATED_INACTIVE)) { /* @@ -4743,8 +4708,9 @@ proc_jetsam_state_is_active_locked(proc_t p) { } } -static boolean_t -memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason) { +static boolean_t +memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason) +{ boolean_t res; uint32_t errors = 0; @@ -4755,7 +4721,7 @@ memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jet } else { res = memorystatus_kill_specific_process(victim_pid, cause, jetsam_reason); } - + if (errors) { memorystatus_clear_errors(); } @@ -4763,12 +4729,12 @@ memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jet if (res == TRUE) { /* Fire off snapshot notification */ proc_list_lock(); - size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + - sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count; + size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + + sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count; uint64_t timestamp_now = mach_absolute_time(); memorystatus_jetsam_snapshot->notification_time = timestamp_now; if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || - timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { + timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { proc_list_unlock(); int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size)); if (!ret) { @@ -4787,14 +4753,15 @@ memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jet /* * Jetsam a specific process. */ -static boolean_t -memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason) { +static boolean_t +memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason) +{ boolean_t killed; proc_t p; uint64_t killtime = 0; - clock_sec_t tv_sec; - clock_usec_t tv_usec; - uint32_t tv_msec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; + uint32_t tv_msec; /* TODO - add a victim queue and push this into the main jetsam thread */ @@ -4807,24 +4774,24 @@ memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t proc_list_lock(); if (memorystatus_jetsam_snapshot_count == 0) { - memorystatus_init_jetsam_snapshot_locked(NULL,0); + memorystatus_init_jetsam_snapshot_locked(NULL, 0); } killtime = mach_absolute_time(); - absolutetime_to_microtime(killtime, &tv_sec, &tv_usec); - tv_msec = tv_usec / 1000; + absolutetime_to_microtime(killtime, &tv_sec, &tv_usec); + tv_msec = tv_usec / 1000; memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime); proc_list_unlock(); os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_specific_process pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", - (unsigned long)tv_sec, tv_msec, victim_pid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_kill_cause_name[cause], p->p_memstat_effectivepriority, (uint64_t)memorystatus_available_pages); - + (unsigned long)tv_sec, tv_msec, victim_pid, (*p->p_name ? p->p_name : "unknown"), + memorystatus_kill_cause_name[cause], p->p_memstat_effectivepriority, (uint64_t)memorystatus_available_pages); + killed = memorystatus_do_kill(p, cause, jetsam_reason); proc_rele(p); - + return killed; } @@ -4860,7 +4827,7 @@ proc_memstat_terminated(proc_t p, boolean_t set) /* * This is invoked when cpulimits have been exceeded while in fatal mode. * The jetsam_flags do not apply as those are for memory related kills. - * We call this routine so that the offending process is killed with + * We call this routine so that the offending process is killed with * a non-zero exit status. */ void @@ -4872,7 +4839,7 @@ jetsam_on_ledger_cpulimit_exceeded(void) os_reason_t jetsam_reason = OS_REASON_NULL; printf("task_exceeded_cpulimit: killing pid %d [%s]\n", - p->p_pid, (*p->p_name ? p->p_name : "(unknown)")); + p->p_pid, (*p->p_name ? p->p_name : "(unknown)")); jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_CPULIMIT); if (jetsam_reason == OS_REASON_NULL) { @@ -4880,7 +4847,7 @@ jetsam_on_ledger_cpulimit_exceeded(void) } retval = jetsam_do_kill(p, jetsam_flags, jetsam_reason); - + if (retval) { printf("task_exceeded_cpulimit: failed to kill current task (exiting?).\n"); } @@ -4921,9 +4888,9 @@ uint64_t memorystatus_vm_map_fork_pidwatch_val = 0; static int sysctl_memorystatus_vm_map_fork_pidwatch SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - uint64_t new_value = 0; + uint64_t new_value = 0; uint64_t old_value = 0; - int error = 0; + int error = 0; /* * The pid is held in the low 32 bits. @@ -4931,13 +4898,13 @@ static int sysctl_memorystatus_vm_map_fork_pidwatch SYSCTL_HANDLER_ARGS { */ old_value = memorystatus_vm_map_fork_pidwatch_val; - error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL); + error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL); - if (error || !req->newptr) { + if (error || !req->newptr) { /* * No new value passed in. */ - return(error); + return error; } /* @@ -4947,11 +4914,11 @@ static int sysctl_memorystatus_vm_map_fork_pidwatch SYSCTL_HANDLER_ARGS { memorystatus_vm_map_fork_pidwatch_val = new_value & 0xFFFFFFFF; printf("memorystatus: pidwatch old_value = 0x%llx, new_value = 0x%llx \n", old_value, new_value); - return(error); + return error; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_map_fork_pidwatch, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED| CTLFLAG_MASKED, - 0, 0, sysctl_memorystatus_vm_map_fork_pidwatch, "Q", "get/set pid watched for in vm_map_fork"); +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_map_fork_pidwatch, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED, + 0, 0, sysctl_memorystatus_vm_map_fork_pidwatch, "Q", "get/set pid watched for in vm_map_fork"); /* @@ -5024,7 +4991,7 @@ memorystatus_allowed_vm_map_fork(task_t task) if (max_task_footprint_mb == 0) { set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); - return (is_allowed); + return is_allowed; } footprint_in_bytes = get_task_phys_footprint(task); @@ -5037,13 +5004,12 @@ memorystatus_allowed_vm_map_fork(task_t task) if (footprint_in_bytes > max_allowed_bytes) { printf("memorystatus disallowed vm_map_fork %lld %lld\n", footprint_in_bytes, max_allowed_bytes); set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED); - return (!is_allowed); + return !is_allowed; } #endif /* CONFIG_EMBEDDED */ set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED); - return (is_allowed); - + return is_allowed; } static void @@ -5059,7 +5025,7 @@ memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *ma *footprint = (uint32_t)pages; if (max_footprint_lifetime) { - pages = (get_task_resident_max(task) / PAGE_SIZE_64); + pages = (get_task_phys_footprint_lifetime_max(task) / PAGE_SIZE_64); assert(((uint32_t)pages) == pages); *max_footprint_lifetime = (uint32_t)pages; } @@ -5072,10 +5038,10 @@ memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *ma static void memorystatus_get_task_phys_footprint_page_counts(task_t task, - uint64_t *internal_pages, uint64_t *internal_compressed_pages, - uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, - uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, - uint64_t *iokit_mapped_pages, uint64_t *page_table_pages) + uint64_t *internal_pages, uint64_t *internal_compressed_pages, + uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages, + uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages, + uint64_t *iokit_mapped_pages, uint64_t *page_table_pages) { assert(task); @@ -5147,7 +5113,6 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, for (i = 0; i < memorystatus_jetsam_snapshot_count; i++) { if (snapshot_list[i].pid == p->p_pid) { - entry = &snapshot_list[i]; if (entry->killed || entry->jse_killtime) { @@ -5178,56 +5143,55 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, * If a process has moved between bands since snapshot was * initialized, then likely these fields changed too. */ - if (entry->priority != p->p_memstat_effectivepriority) { - + if (entry->priority != p->p_memstat_effectivepriority) { strlcpy(entry->name, p->p_name, sizeof(entry->name)); entry->priority = p->p_memstat_effectivepriority; entry->state = memorystatus_build_state(p); entry->user_data = p->p_memstat_userdata; entry->fds = p->p_fd->fd_nfiles; - } - - /* - * Always update the page counts on a kill. - */ - - uint32_t pages = 0; - uint32_t max_pages_lifetime = 0; - uint32_t purgeable_pages = 0; - - memorystatus_get_task_page_counts(p->task, &pages, &max_pages_lifetime, &purgeable_pages); - entry->pages = (uint64_t)pages; - entry->max_pages_lifetime = (uint64_t)max_pages_lifetime; - entry->purgeable_pages = (uint64_t)purgeable_pages; - - uint64_t internal_pages = 0; - uint64_t internal_compressed_pages = 0; - uint64_t purgeable_nonvolatile_pages = 0; - uint64_t purgeable_nonvolatile_compressed_pages = 0; - uint64_t alternate_accounting_pages = 0; - uint64_t alternate_accounting_compressed_pages = 0; - uint64_t iokit_mapped_pages = 0; - uint64_t page_table_pages = 0; - - memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages, - &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages, - &alternate_accounting_pages, &alternate_accounting_compressed_pages, - &iokit_mapped_pages, &page_table_pages); + } - entry->jse_internal_pages = internal_pages; - entry->jse_internal_compressed_pages = internal_compressed_pages; - entry->jse_purgeable_nonvolatile_pages = purgeable_nonvolatile_pages; - entry->jse_purgeable_nonvolatile_compressed_pages = purgeable_nonvolatile_compressed_pages; - entry->jse_alternate_accounting_pages = alternate_accounting_pages; - entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages; - entry->jse_iokit_mapped_pages = iokit_mapped_pages; - entry->jse_page_table_pages = page_table_pages; + /* + * Always update the page counts on a kill. + */ - uint64_t region_count = 0; - memorystatus_get_task_memory_region_count(p->task, ®ion_count); - entry->jse_memory_region_count = region_count; + uint32_t pages = 0; + uint32_t max_pages_lifetime = 0; + uint32_t purgeable_pages = 0; + + memorystatus_get_task_page_counts(p->task, &pages, &max_pages_lifetime, &purgeable_pages); + entry->pages = (uint64_t)pages; + entry->max_pages_lifetime = (uint64_t)max_pages_lifetime; + entry->purgeable_pages = (uint64_t)purgeable_pages; + + uint64_t internal_pages = 0; + uint64_t internal_compressed_pages = 0; + uint64_t purgeable_nonvolatile_pages = 0; + uint64_t purgeable_nonvolatile_compressed_pages = 0; + uint64_t alternate_accounting_pages = 0; + uint64_t alternate_accounting_compressed_pages = 0; + uint64_t iokit_mapped_pages = 0; + uint64_t page_table_pages = 0; + + memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages, + &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages, + &alternate_accounting_pages, &alternate_accounting_compressed_pages, + &iokit_mapped_pages, &page_table_pages); + + entry->jse_internal_pages = internal_pages; + entry->jse_internal_compressed_pages = internal_compressed_pages; + entry->jse_purgeable_nonvolatile_pages = purgeable_nonvolatile_pages; + entry->jse_purgeable_nonvolatile_compressed_pages = purgeable_nonvolatile_compressed_pages; + entry->jse_alternate_accounting_pages = alternate_accounting_pages; + entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages; + entry->jse_iokit_mapped_pages = iokit_mapped_pages; + entry->jse_page_table_pages = page_table_pages; + + uint64_t region_count = 0; + memorystatus_get_task_memory_region_count(p->task, ®ion_count); + entry->jse_memory_region_count = region_count; - goto exit; + goto exit; } } @@ -5246,8 +5210,7 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, unsigned int next = memorystatus_jetsam_snapshot_count; - if(memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[next], (snapshot->js_gencount)) == TRUE) { - + if (memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[next], (snapshot->js_gencount)) == TRUE) { entry = &snapshot_list[next]; entry->killed = kill_cause; entry->jse_killtime = killtime; @@ -5262,7 +5225,7 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, * when we notice we've hit the max. */ printf("memorystatus: WARNING snapshot buffer is full, count %d\n", - memorystatus_jetsam_snapshot_count); + memorystatus_jetsam_snapshot_count); } } } @@ -5280,28 +5243,28 @@ exit: */ MEMORYSTATUS_DEBUG(4, "memorystatus_update_jetsam_snapshot_entry_locked: failed to update pid %d, priority %d, count %d\n", - p->p_pid, p->p_memstat_effectivepriority, memorystatus_jetsam_snapshot_count); + p->p_pid, p->p_memstat_effectivepriority, memorystatus_jetsam_snapshot_count); } return; } #if CONFIG_JETSAM -void memorystatus_pages_update(unsigned int pages_avail) +void +memorystatus_pages_update(unsigned int pages_avail) { memorystatus_available_pages = pages_avail; #if VM_PRESSURE_EVENTS /* * Since memorystatus_available_pages changes, we should - * re-evaluate the pressure levels on the system and + * re-evaluate the pressure levels on the system and * check if we need to wake the pressure thread. * We also update memorystatus_level in that routine. - */ + */ vm_pressure_response(); if (memorystatus_available_pages <= memorystatus_available_pages_pressure) { - if (memorystatus_hwm_candidates || (memorystatus_available_pages <= memorystatus_available_pages_critical)) { memorystatus_thread_wake(); } @@ -5317,7 +5280,7 @@ void memorystatus_pages_update(unsigned int pages_avail) if (memorystatus_freeze_thread_should_run() == TRUE) { /* * The freezer thread is usually woken up by some user-space call i.e. pid_hibernate(any process). - * That trigger isn't invoked often enough and so we are enabling this explicit wakeup here. + * That trigger isn't invoked often enough and so we are enabling this explicit wakeup here. */ if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { thread_wakeup((event_t)&memorystatus_freeze_wakeup); @@ -5328,15 +5291,15 @@ void memorystatus_pages_update(unsigned int pages_avail) #else /* VM_PRESSURE_EVENTS */ boolean_t critical, delta; - + if (!memorystatus_delta) { - return; + return; } - + critical = (pages_avail < memorystatus_available_pages_critical) ? TRUE : FALSE; - delta = ((pages_avail >= (memorystatus_available_pages + memorystatus_delta)) - || (memorystatus_available_pages >= (pages_avail + memorystatus_delta))) ? TRUE : FALSE; - + delta = ((pages_avail >= (memorystatus_available_pages + memorystatus_delta)) + || (memorystatus_available_pages >= (pages_avail + memorystatus_delta))) ? TRUE : FALSE; + if (critical || delta) { unsigned int total_pages; @@ -5353,21 +5316,21 @@ void memorystatus_pages_update(unsigned int pages_avail) static boolean_t memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_snapshot_entry_t *entry, uint64_t gencount) -{ +{ clock_sec_t tv_sec; clock_usec_t tv_usec; uint32_t pages = 0; uint32_t max_pages_lifetime = 0; uint32_t purgeable_pages = 0; - uint64_t internal_pages = 0; - uint64_t internal_compressed_pages = 0; - uint64_t purgeable_nonvolatile_pages = 0; - uint64_t purgeable_nonvolatile_compressed_pages = 0; - uint64_t alternate_accounting_pages = 0; - uint64_t alternate_accounting_compressed_pages = 0; - uint64_t iokit_mapped_pages = 0; - uint64_t page_table_pages =0; - uint64_t region_count = 0; + uint64_t internal_pages = 0; + uint64_t internal_compressed_pages = 0; + uint64_t purgeable_nonvolatile_pages = 0; + uint64_t purgeable_nonvolatile_compressed_pages = 0; + uint64_t alternate_accounting_pages = 0; + uint64_t alternate_accounting_compressed_pages = 0; + uint64_t iokit_mapped_pages = 0; + uint64_t page_table_pages = 0; + uint64_t region_count = 0; uint64_t cids[COALITION_NUM_TYPES]; memset(entry, 0, sizeof(memorystatus_jetsam_snapshot_entry_t)); @@ -5382,9 +5345,9 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna entry->purgeable_pages = (uint64_t)purgeable_pages; memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages, - &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages, - &alternate_accounting_pages, &alternate_accounting_compressed_pages, - &iokit_mapped_pages, &page_table_pages); + &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages, + &alternate_accounting_pages, &alternate_accounting_compressed_pages, + &iokit_mapped_pages, &page_table_pages); entry->jse_internal_pages = internal_pages; entry->jse_internal_compressed_pages = internal_compressed_pages; @@ -5408,10 +5371,10 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna entry->cpu_time.tv_usec = (int64_t)tv_usec; assert(p->p_stats != NULL); - entry->jse_starttime = p->p_stats->ps_start; /* abstime process started */ - entry->jse_killtime = 0; /* abstime jetsam chose to kill process */ - entry->killed = 0; /* the jetsam kill cause */ - entry->jse_gencount = gencount; /* indicates a pass through jetsam thread, when process was targeted to be killed */ + entry->jse_starttime = p->p_stats->ps_start; /* abstime process started */ + entry->jse_killtime = 0; /* abstime jetsam chose to kill process */ + entry->killed = 0; /* the jetsam kill cause */ + entry->jse_gencount = gencount; /* indicates a pass through jetsam thread, when process was targeted to be killed */ entry->jse_idle_delta = p->p_memstat_idle_delta; /* Most recent timespan spent in idle-band */ @@ -5424,26 +5387,26 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna proc_coalitionids(p, cids); entry->jse_coalition_jetsam_id = cids[COALITION_TYPE_JETSAM]; - return TRUE; + return TRUE; } static void memorystatus_init_snapshot_vmstats(memorystatus_jetsam_snapshot_t *snapshot) { kern_return_t kr = KERN_SUCCESS; - mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; - vm_statistics64_data_t vm_stat; + mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; + vm_statistics64_data_t vm_stat; if ((kr = host_statistics64(host_self(), HOST_VM_INFO64, (host_info64_t)&vm_stat, &count)) != KERN_SUCCESS) { printf("memorystatus_init_jetsam_snapshot_stats: host_statistics64 failed with %d\n", kr); memset(&snapshot->stats, 0, sizeof(snapshot->stats)); } else { - snapshot->stats.free_pages = vm_stat.free_count; - snapshot->stats.active_pages = vm_stat.active_count; - snapshot->stats.inactive_pages = vm_stat.inactive_count; - snapshot->stats.throttled_pages = vm_stat.throttled_count; - snapshot->stats.purgeable_pages = vm_stat.purgeable_count; - snapshot->stats.wired_pages = vm_stat.wire_count; + snapshot->stats.free_pages = vm_stat.free_count; + snapshot->stats.active_pages = vm_stat.active_count; + snapshot->stats.inactive_pages = vm_stat.inactive_count; + snapshot->stats.throttled_pages = vm_stat.throttled_count; + snapshot->stats.purgeable_pages = vm_stat.purgeable_count; + snapshot->stats.wired_pages = vm_stat.wire_count; snapshot->stats.speculative_pages = vm_stat.speculative_count; snapshot->stats.filebacked_pages = vm_stat.external_page_count; @@ -5456,7 +5419,7 @@ memorystatus_init_snapshot_vmstats(memorystatus_jetsam_snapshot_t *snapshot) get_zone_map_size(&snapshot->stats.zone_map_size, &snapshot->stats.zone_map_capacity); get_largest_zone_info(snapshot->stats.largest_zone_name, sizeof(snapshot->stats.largest_zone_name), - &snapshot->stats.largest_zone_size); + &snapshot->stats.largest_zone_size); } /* @@ -5465,7 +5428,8 @@ memorystatus_init_snapshot_vmstats(memorystatus_jetsam_snapshot_t *snapshot) * Data can be consumed at any time. */ void -memorystatus_init_at_boot_snapshot() { +memorystatus_init_at_boot_snapshot() +{ memorystatus_init_snapshot_vmstats(&memorystatus_at_boot_snapshot); memorystatus_at_boot_snapshot.entry_count = 0; memorystatus_at_boot_snapshot.notification_time = 0; /* updated when consumed */ @@ -5512,19 +5476,19 @@ memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snap while (next_p) { p = next_p; next_p = memorystatus_get_next_proc_locked(&b, p, TRUE); - + if (FALSE == memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[i], snapshot->js_gencount)) { continue; } - + MEMORYSTATUS_DEBUG(0, "jetsam snapshot pid %d, uuid = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", - p->p_pid, - p->p_uuid[0], p->p_uuid[1], p->p_uuid[2], p->p_uuid[3], p->p_uuid[4], p->p_uuid[5], p->p_uuid[6], p->p_uuid[7], - p->p_uuid[8], p->p_uuid[9], p->p_uuid[10], p->p_uuid[11], p->p_uuid[12], p->p_uuid[13], p->p_uuid[14], p->p_uuid[15]); + p->p_pid, + p->p_uuid[0], p->p_uuid[1], p->p_uuid[2], p->p_uuid[3], p->p_uuid[4], p->p_uuid[5], p->p_uuid[6], p->p_uuid[7], + p->p_uuid[8], p->p_uuid[9], p->p_uuid[10], p->p_uuid[11], p->p_uuid[12], p->p_uuid[13], p->p_uuid[14], p->p_uuid[15]); if (++i == snapshot_max) { break; - } + } } snapshot->entry_count = i; @@ -5539,10 +5503,11 @@ memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snap #if CONFIG_JETSAM static int -memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size) { +memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size) +{ int ret; memorystatus_jetsam_panic_options_t debug; - + if (buffer_size != sizeof(memorystatus_jetsam_panic_options_t)) { return EINVAL; } @@ -5551,14 +5516,14 @@ memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size) { if (ret) { return ret; } - + /* Panic bits match kMemorystatusKilled* enum */ memorystatus_jetsam_panic_debug = (memorystatus_jetsam_panic_debug & ~debug.mask) | (debug.data & debug.mask); - + /* Copyout new value */ debug.data = memorystatus_jetsam_panic_debug; ret = copyout(&debug, buffer, sizeof(memorystatus_jetsam_panic_options_t)); - + return ret; } #endif /* CONFIG_JETSAM */ @@ -5569,8 +5534,8 @@ memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size) { * function. */ static int -memorystatus_cmd_test_jetsam_sort(int priority, int sort_order) { - +memorystatus_cmd_test_jetsam_sort(int priority, int sort_order) +{ int error = 0; unsigned int bucket_index = 0; @@ -5584,7 +5549,7 @@ memorystatus_cmd_test_jetsam_sort(int priority, int sort_order) { error = memorystatus_sort_bucket(bucket_index, sort_order); - return (error); + return error; } #endif /* DEVELOPMENT || DEBUG */ @@ -5600,12 +5565,12 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool pid_t aPid = 0; uint32_t aPid_ep = 0; - uint64_t killtime = 0; - clock_sec_t tv_sec; - clock_usec_t tv_usec; - uint32_t tv_msec; - boolean_t retval = FALSE; - uint64_t num_pages_purged = 0; + uint64_t killtime = 0; + clock_sec_t tv_sec; + clock_usec_t tv_usec; + uint32_t tv_msec; + boolean_t retval = FALSE; + uint64_t num_pages_purged = 0; aPid = p->p_pid; aPid_ep = p->p_memstat_effectivepriority; @@ -5626,18 +5591,17 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool */ if (cause == kMemorystatusKilledHiwat) { uint64_t footprint_in_bytes = get_task_phys_footprint(p->task); - uint64_t memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ + uint64_t memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ success = (footprint_in_bytes <= memlimit_in_bytes); } else { success = (memorystatus_avail_pages_below_pressure() == FALSE); } if (success) { - memorystatus_purge_before_jetsam_success++; os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: purged %llu pages from pid %d [%s] and avoided %s\n", - num_pages_purged, aPid, (*p->p_name ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause]); + num_pages_purged, aPid, (*p->p_name ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause]); *killed = FALSE; @@ -5648,10 +5612,10 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) MEMORYSTATUS_DEBUG(1, "jetsam: %s pid %d [%s] - %lld Mb > 1 (%d Mb)\n", - (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing", - aPid, (*p->p_name ? p->p_name : "unknown"), - (footprint_in_bytes / (1024ULL * 1024ULL)), /* converted bytes to MB */ - p->p_memstat_memlimit); + (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing", + aPid, (*p->p_name ? p->p_name : "unknown"), + (footprint_in_bytes / (1024ULL * 1024ULL)), /* converted bytes to MB */ + p->p_memstat_memlimit); #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ killtime = mach_absolute_time(); @@ -5662,17 +5626,17 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) { if (cause == kMemorystatusKilledHiwat) { MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] for diagnosis - memorystatus_available_pages: %d\n", - aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages); + aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages); } else { int activeProcess = p->p_memstat_state & P_MEMSTAT_FOREGROUND; if (activeProcess) { MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] (active) for diagnosis - memorystatus_available_pages: %d\n", - aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages); + aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages); - if (memorystatus_jetsam_policy & kPolicyDiagnoseFirst) { - jetsam_diagnostic_suspended_one_active_proc = 1; - printf("jetsam: returning after suspending first active proc - %d\n", aPid); - } + if (memorystatus_jetsam_policy & kPolicyDiagnoseFirst) { + jetsam_diagnostic_suspended_one_active_proc = 1; + printf("jetsam: returning after suspending first active proc - %d\n", aPid); + } } } @@ -5707,9 +5671,9 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool } os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: %s pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", - (unsigned long)tv_sec, tv_msec, kill_reason_string, - aPid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); + (unsigned long)tv_sec, tv_msec, kill_reason_string, + aPid, (*p->p_name ? p->p_name : "unknown"), + memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); /* * memorystatus_do_kill drops a reference, so take another one so we can @@ -5731,21 +5695,21 @@ memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, bool */ static boolean_t memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause, os_reason_t jetsam_reason, - int32_t *priority, uint32_t *errors) + int32_t *priority, uint32_t *errors) { pid_t aPid; proc_t p = PROC_NULL, next_p = PROC_NULL; boolean_t new_snapshot = FALSE, force_new_snapshot = FALSE, killed = FALSE, freed_mem = FALSE; unsigned int i = 0; uint32_t aPid_ep; - int32_t local_max_kill_prio = JETSAM_PRIORITY_IDLE; + int32_t local_max_kill_prio = JETSAM_PRIORITY_IDLE; #ifndef CONFIG_FREEZE #pragma unused(any) #endif - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + memorystatus_available_pages, 0, 0, 0, 0); #if CONFIG_JETSAM @@ -5798,21 +5762,21 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause #if DEVELOPMENT || DEBUG int procSuspendedForDiagnosis; #endif /* DEVELOPMENT || DEBUG */ - + p = next_p; next_p = memorystatus_get_next_proc_locked(&i, p, TRUE); - + #if DEVELOPMENT || DEBUG procSuspendedForDiagnosis = p->p_memstat_state & P_MEMSTAT_DIAG_SUSPENDED; #endif /* DEVELOPMENT || DEBUG */ - + aPid = p->p_pid; aPid_ep = p->p_memstat_effectivepriority; if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) { continue; /* with lock held */ } - + #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && procSuspendedForDiagnosis) { printf("jetsam: continuing after ignoring proc suspended already for diagnosis - %d\n", aPid); @@ -5820,8 +5784,7 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause } #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */ - if (cause == kMemorystatusKilledVnodes) - { + if (cause == kMemorystatusKilledVnodes) { /* * If the system runs out of vnodes, we systematically jetsam * processes in hopes of stumbling onto a vnode gain that helps @@ -5844,7 +5807,7 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause } else { skip = TRUE; } - + if (skip) { continue; } else @@ -5858,7 +5821,6 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause * acquisition of the proc lock. */ p->p_memstat_state |= P_MEMSTAT_TERMINATED; - } else { /* * We need to restart the search again because @@ -5871,19 +5833,19 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause continue; } - /* - * Capture a snapshot if none exists and: + /* + * Capture a snapshot if none exists and: * - we are forcing a new snapshot creation, either because: - * - on a particular platform we need these snapshots every time, OR + * - on a particular platform we need these snapshots every time, OR * - a boot-arg/embedded device tree property has been set. - * - priority was not requested (this is something other than an ambient kill) - * - the priority was requested *and* the targeted process is not at idle priority - */ - if ((memorystatus_jetsam_snapshot_count == 0) && + * - priority was not requested (this is something other than an ambient kill) + * - the priority was requested *and* the targeted process is not at idle priority + */ + if ((memorystatus_jetsam_snapshot_count == 0) && (force_new_snapshot || memorystatus_idle_snapshot || ((!priority) || (priority && (aPid_ep != JETSAM_PRIORITY_IDLE))))) { - memorystatus_init_jetsam_snapshot_locked(NULL,0); - new_snapshot = TRUE; - } + memorystatus_init_jetsam_snapshot_locked(NULL, 0); + new_snapshot = TRUE; + } proc_list_unlock(); @@ -5903,7 +5865,7 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause proc_rele(p); goto exit; } - + /* * Failure - first unwind the state, * then fall through to restart the search. @@ -5918,9 +5880,9 @@ memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause next_p = memorystatus_get_first_proc_locked(&i, TRUE); } } - + proc_list_unlock(); - + exit: os_reason_free(jetsam_reason); @@ -5930,19 +5892,19 @@ exit: memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; proc_list_unlock(); } - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0); + memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0); return killed; } /* - * Jetsam aggressively + * Jetsam aggressively */ static boolean_t memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, - int32_t priority_max, uint32_t *errors) + int32_t priority_max, uint32_t *errors) { pid_t aPid; proc_t p = PROC_NULL, next_p = PROC_NULL; @@ -5952,13 +5914,13 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, int32_t aPid_ep = 0; unsigned int memorystatus_level_snapshot = 0; uint64_t killtime = 0; - clock_sec_t tv_sec; - clock_usec_t tv_usec; - uint32_t tv_msec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; + uint32_t tv_msec; os_reason_t jetsam_reason = OS_REASON_NULL; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, - memorystatus_available_pages, priority_max, 0, 0, 0); + memorystatus_available_pages, priority_max, 0, 0, 0); memorystatus_sort_bucket(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_DEFAULT); @@ -5978,7 +5940,6 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, if (((next_p->p_listflag & P_LIST_EXITED) != 0) || ((unsigned int)(next_p->p_memstat_effectivepriority) != i)) { - /* * We have raced with next_p running on another core. * It may be exiting or it may have moved to a different @@ -5991,7 +5952,7 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, */ MEMORYSTATUS_DEBUG(1, "memorystatus: aggressive%d: rewinding band %d, %s(%d) moved or exiting.\n", - aggr_count, i, (*next_p->p_name ? next_p->p_name : "unknown"), next_p->p_pid); + aggr_count, i, (*next_p->p_name ? next_p->p_name : "unknown"), next_p->p_pid); next_p = memorystatus_get_first_proc_locked(&i, TRUE); continue; @@ -6001,28 +5962,28 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, next_p = memorystatus_get_next_proc_locked(&i, p, TRUE); if (p->p_memstat_effectivepriority > priority_max) { - /* + /* * Bail out of this killing spree if we have * reached beyond the priority_max jetsam band. - * That is, we kill up to and through the + * That is, we kill up to and through the * priority_max jetsam band. */ proc_list_unlock(); goto exit; } - + #if DEVELOPMENT || DEBUG activeProcess = p->p_memstat_state & P_MEMSTAT_FOREGROUND; procSuspendedForDiagnosis = p->p_memstat_state & P_MEMSTAT_DIAG_SUSPENDED; #endif /* DEVELOPMENT || DEBUG */ - + aPid = p->p_pid; aPid_ep = p->p_memstat_effectivepriority; if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) { continue; } - + #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && procSuspendedForDiagnosis) { printf("jetsam: continuing after ignoring proc suspended already for diagnosis - %d\n", aPid); @@ -6034,14 +5995,14 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, * Capture a snapshot if none exists. */ if (memorystatus_jetsam_snapshot_count == 0) { - memorystatus_init_jetsam_snapshot_locked(NULL,0); + memorystatus_init_jetsam_snapshot_locked(NULL, 0); new_snapshot = TRUE; } - - /* + + /* * Mark as terminated so that if exit1() indicates success, but the process (for example) - * is blocked in task_exception_notify(), it'll be skipped if encountered again - see - * . This is cheaper than examining P_LEXIT, which requires the + * is blocked in task_exception_notify(), it'll be skipped if encountered again - see + * . This is cheaper than examining P_LEXIT, which requires the * acquisition of the proc lock. */ p->p_memstat_state |= P_MEMSTAT_TERMINATED; @@ -6049,7 +6010,7 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, killtime = mach_absolute_time(); absolutetime_to_microtime(killtime, &tv_sec, &tv_usec); tv_msec = tv_usec / 1000; - + /* Shift queue, update stats */ memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime); @@ -6065,25 +6026,25 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, while (next_p && (proc_ref_locked(next_p) != next_p)) { proc_t temp_p; - /* - * We must have raced with next_p exiting on another core. - * Recover by getting the next eligible process in the band. - */ + /* + * We must have raced with next_p exiting on another core. + * Recover by getting the next eligible process in the band. + */ MEMORYSTATUS_DEBUG(1, "memorystatus: aggressive%d: skipping %d [%s] (exiting?)\n", - aggr_count, next_p->p_pid, (*next_p->p_name ? next_p->p_name : "(unknown)")); + aggr_count, next_p->p_pid, (*next_p->p_name ? next_p->p_name : "(unknown)")); temp_p = next_p; next_p = memorystatus_get_next_proc_locked(&i, temp_p, TRUE); - } + } } proc_list_unlock(); printf("%lu.%03d memorystatus: %s%d pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", - (unsigned long)tv_sec, tv_msec, - ((aPid_ep == JETSAM_PRIORITY_IDLE) ? "killing_idle_process_aggressive" : "killing_top_process_aggressive"), - aggr_count, aPid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); + (unsigned long)tv_sec, tv_msec, + ((aPid_ep == JETSAM_PRIORITY_IDLE) ? "killing_idle_process_aggressive" : "killing_top_process_aggressive"), + aggr_count, aPid, (*p->p_name ? p->p_name : "unknown"), + memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); memorystatus_level_snapshot = memorystatus_level; @@ -6102,7 +6063,7 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, p = NULL; killed = FALSE; - /* + /* * Continue the killing spree. */ proc_list_lock(); @@ -6122,7 +6083,7 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, continue; } - + /* * Failure - first unwind the state, * then fall through to restart the search. @@ -6145,7 +6106,7 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, * We might have raced with "p" exiting on another core, resulting in no * ref on "p". Or, we may have failed to kill "p". * - * Either way, we fall thru to here, leaving the proc in the + * Either way, we fall thru to here, leaving the proc in the * P_MEMSTAT_TERMINATED or P_MEMSTAT_ERROR state. * * And, we hold the the proc_list_lock at this point. @@ -6153,27 +6114,26 @@ memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count, next_p = memorystatus_get_first_proc_locked(&i, TRUE); } - + proc_list_unlock(); - + exit: os_reason_free(jetsam_reason); /* Clear snapshot if freshly captured and no target was found */ if (new_snapshot && (kill_count == 0)) { - proc_list_lock(); - memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; - proc_list_unlock(); + proc_list_lock(); + memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; + proc_list_unlock(); } - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0); + memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0); if (kill_count > 0) { - return(TRUE); - } - else { - return(FALSE); + return TRUE; + } else { + return FALSE; } } @@ -6187,15 +6147,15 @@ memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged) uint32_t aPid_ep; os_reason_t jetsam_reason = OS_REASON_NULL; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); - + memorystatus_available_pages, 0, 0, 0, 0); + jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_HIGHWATER); if (jetsam_reason == OS_REASON_NULL) { printf("memorystatus_kill_hiwat_proc: failed to allocate exit reason\n"); } proc_list_lock(); - + next_p = memorystatus_get_first_proc_locked(&i, TRUE); while (next_p) { uint64_t footprint_in_bytes = 0; @@ -6204,21 +6164,21 @@ memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged) p = next_p; next_p = memorystatus_get_next_proc_locked(&i, p, TRUE); - + aPid = p->p_pid; aPid_ep = p->p_memstat_effectivepriority; - + if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) { continue; } - + /* skip if no limit set */ if (p->p_memstat_memlimit <= 0) { continue; } footprint_in_bytes = get_task_phys_footprint(p->task); - memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ + memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */ skip = (footprint_in_bytes <= memlimit_in_bytes); #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG) @@ -6235,19 +6195,18 @@ memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged) skip = TRUE; } else { skip = FALSE; - } + } } #endif if (skip) { continue; } else { - if (memorystatus_jetsam_snapshot_count == 0) { - memorystatus_init_jetsam_snapshot_locked(NULL,0); + memorystatus_init_jetsam_snapshot_locked(NULL, 0); new_snapshot = TRUE; } - + if (proc_ref_locked(p) == p) { /* * Mark as terminated so that if exit1() indicates success, but the process (for example) @@ -6269,7 +6228,7 @@ memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged) next_p = memorystatus_get_first_proc_locked(&i, TRUE); continue; } - + freed_mem = memorystatus_kill_proc(p, kMemorystatusKilledHiwat, jetsam_reason, &killed); /* purged and/or killed 'p' */ /* Success? */ @@ -6299,9 +6258,9 @@ memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged) next_p = memorystatus_get_first_proc_locked(&i, TRUE); } } - + proc_list_unlock(); - + exit: os_reason_free(jetsam_reason); @@ -6311,9 +6270,9 @@ exit: memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0; proc_list_unlock(); } - - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0); + + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_END, + memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0); return killed; } @@ -6333,13 +6292,13 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un int kill_count = 0; uint32_t aPid_ep; uint64_t killtime = 0; - clock_sec_t tv_sec; - clock_usec_t tv_usec; - uint32_t tv_msec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; + uint32_t tv_msec; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + memorystatus_available_pages, 0, 0, 0, 0); #if CONFIG_FREEZE boolean_t consider_frozen_only = FALSE; @@ -6353,7 +6312,6 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un next_p = memorystatus_get_first_proc_locked(&band, FALSE); while (next_p) { - p = next_p; next_p = memorystatus_get_next_proc_locked(&band, p, FALSE); @@ -6372,7 +6330,7 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un } #if CONFIG_FREEZE - if (consider_frozen_only && ! (p->p_memstat_state & P_MEMSTAT_FROZEN)) { + if (consider_frozen_only && !(p->p_memstat_state & P_MEMSTAT_FROZEN)) { continue; } @@ -6383,13 +6341,13 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un #if DEVELOPMENT || DEBUG MEMORYSTATUS_DEBUG(1, "jetsam: elevated%d process pid %d [%s] - memorystatus_available_pages: %d\n", - aggr_count, - aPid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_available_pages); + aggr_count, + aPid, (*p->p_name ? p->p_name : "unknown"), + memorystatus_available_pages); #endif /* DEVELOPMENT || DEBUG */ if (memorystatus_jetsam_snapshot_count == 0) { - memorystatus_init_jetsam_snapshot_locked(NULL,0); + memorystatus_init_jetsam_snapshot_locked(NULL, 0); new_snapshot = TRUE; } @@ -6402,14 +6360,13 @@ memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, un memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime); if (proc_ref_locked(p) == p) { - proc_list_unlock(); - os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_top_process_elevated%d pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", - (unsigned long)tv_sec, tv_msec, - aggr_count, - aPid, (*p->p_name ? p->p_name : "unknown"), - memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); + os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_top_process_elevated%d pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n", + (unsigned long)tv_sec, tv_msec, + aggr_count, + aPid, (*p->p_name ? p->p_name : "unknown"), + memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages); /* * memorystatus_do_kill drops a reference, so take another one so we can @@ -6465,13 +6422,14 @@ exit: } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END, - memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0); + memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0); - return (killed); + return killed; } -static boolean_t -memorystatus_kill_process_async(pid_t victim_pid, uint32_t cause) { +static boolean_t +memorystatus_kill_process_async(pid_t victim_pid, uint32_t cause) +{ /* * TODO: allow a general async path * @@ -6479,21 +6437,22 @@ memorystatus_kill_process_async(pid_t victim_pid, uint32_t cause) { * add the appropriate exit reason code mapping. */ if ((victim_pid != -1) || - (cause != kMemorystatusKilledVMPageShortage && - cause != kMemorystatusKilledVMCompressorThrashing && - cause != kMemorystatusKilledVMCompressorSpaceShortage && - cause != kMemorystatusKilledFCThrashing && - cause != kMemorystatusKilledZoneMapExhaustion)) { + (cause != kMemorystatusKilledVMPageShortage && + cause != kMemorystatusKilledVMCompressorThrashing && + cause != kMemorystatusKilledVMCompressorSpaceShortage && + cause != kMemorystatusKilledFCThrashing && + cause != kMemorystatusKilledZoneMapExhaustion)) { return FALSE; } - + kill_under_pressure_cause = cause; memorystatus_thread_wake(); return TRUE; } boolean_t -memorystatus_kill_on_VM_compressor_space_shortage(boolean_t async) { +memorystatus_kill_on_VM_compressor_space_shortage(boolean_t async) +{ if (async) { return memorystatus_kill_process_async(-1, kMemorystatusKilledVMCompressorSpaceShortage); } else { @@ -6508,7 +6467,8 @@ memorystatus_kill_on_VM_compressor_space_shortage(boolean_t async) { #if CONFIG_JETSAM boolean_t -memorystatus_kill_on_VM_compressor_thrashing(boolean_t async) { +memorystatus_kill_on_VM_compressor_thrashing(boolean_t async) +{ if (async) { return memorystatus_kill_process_async(-1, kMemorystatusKilledVMCompressorThrashing); } else { @@ -6521,8 +6481,9 @@ memorystatus_kill_on_VM_compressor_thrashing(boolean_t async) { } } -boolean_t -memorystatus_kill_on_VM_page_shortage(boolean_t async) { +boolean_t +memorystatus_kill_on_VM_page_shortage(boolean_t async) +{ if (async) { return memorystatus_kill_process_async(-1, kMemorystatusKilledVMPageShortage); } else { @@ -6536,9 +6497,8 @@ memorystatus_kill_on_VM_page_shortage(boolean_t async) { } boolean_t -memorystatus_kill_on_FC_thrashing(boolean_t async) { - - +memorystatus_kill_on_FC_thrashing(boolean_t async) +{ if (async) { return memorystatus_kill_process_async(-1, kMemorystatusKilledFCThrashing); } else { @@ -6551,8 +6511,9 @@ memorystatus_kill_on_FC_thrashing(boolean_t async) { } } -boolean_t -memorystatus_kill_on_vnode_limit(void) { +boolean_t +memorystatus_kill_on_vnode_limit(void) +{ os_reason_t jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_VNODE); if (jetsam_reason == OS_REASON_NULL) { printf("memorystatus_kill_on_vnode_limit: failed to allocate jetsam reason\n"); @@ -6564,7 +6525,8 @@ memorystatus_kill_on_vnode_limit(void) { #endif /* CONFIG_JETSAM */ boolean_t -memorystatus_kill_on_zone_map_exhaustion(pid_t pid) { +memorystatus_kill_on_zone_map_exhaustion(pid_t pid) +{ boolean_t res = FALSE; if (pid == -1) { res = memorystatus_kill_process_async(-1, kMemorystatusKilledZoneMapExhaustion); @@ -6602,7 +6564,6 @@ memorystatus_freeze_init(void) result = kernel_thread_start(memorystatus_freeze_thread, NULL, &thread); if (result == KERN_SUCCESS) { - proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2); proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); thread_set_thread_name(thread, "VM_freezer"); @@ -6633,7 +6594,7 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) state = p->p_memstat_state; if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) || - !(state & P_MEMSTAT_SUSPENDED)) { + !(state & P_MEMSTAT_SUSPENDED)) { goto out; } @@ -6646,12 +6607,10 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) entry_count = (memorystatus_global_probabilities_size / sizeof(memorystatus_internal_probabilities_t)); if (entry_count) { - - for (i=0; i < entry_count; i++ ) { + for (i = 0; i < entry_count; i++) { if (strncmp(memorystatus_global_probabilities_table[i].proc_name, - p->p_name, - MAXCOMLEN + 1) == 0) { - + p->p_name, + MAXCOMLEN + 1) == 0) { probability_of_use = memorystatus_global_probabilities_table[i].use_probability; break; } @@ -6682,7 +6641,7 @@ memorystatus_freeze_process_sync(proc_t p) int ret = EINVAL; pid_t aPid = 0; boolean_t memorystatus_freeze_swap_low = FALSE; - int freezer_error_code = 0; + int freezer_error_code = 0; lck_mtx_lock(&freezer_mutex); @@ -6722,9 +6681,7 @@ memorystatus_freeze_process_sync(proc_t p) } if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { - max_pages = MIN(memorystatus_freeze_pages_max, memorystatus_freeze_budget_pages_remaining); - } else { /* * We only have the compressor without any swap. @@ -6737,26 +6694,25 @@ memorystatus_freeze_process_sync(proc_t p) proc_list_unlock(); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + memorystatus_available_pages, 0, 0, 0, 0); ret = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END, - memorystatus_available_pages, aPid, 0, 0, 0); + memorystatus_available_pages, aPid, 0, 0, 0); DTRACE_MEMORYSTATUS6(memorystatus_freeze, proc_t, p, unsigned int, memorystatus_available_pages, boolean_t, purgeable, unsigned int, wired, uint32_t, clean, uint32_t, dirty); MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_process_sync: task_freeze %s for pid %d [%s] - " - "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, max_pages %d, shared %d\n", - (ret == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (*p->p_name ? p->p_name : "(unknown)"), - memorystatus_available_pages, purgeable, wired, clean, dirty, max_pages, shared); + "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, max_pages %d, shared %d\n", + (ret == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (*p->p_name ? p->p_name : "(unknown)"), + memorystatus_available_pages, purgeable, wired, clean, dirty, max_pages, shared); proc_list_lock(); if (ret == KERN_SUCCESS) { - os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...done", - aPid, (*p->p_name ? p->p_name : "unknown")); + aPid, (*p->p_name ? p->p_name : "unknown")); memorystatus_freeze_entry_t data = { aPid, TRUE, dirty }; @@ -6780,9 +6736,8 @@ memorystatus_freeze_process_sync(proc_t p) memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { - ret = memorystatus_update_inactive_jetsam_priority_band(p->p_pid, MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE, - memorystatus_freeze_jetsam_band, TRUE); + memorystatus_freeze_jetsam_band, TRUE); if (ret) { printf("Elevating the frozen process failed with %d\n", ret); @@ -6828,7 +6783,7 @@ memorystatus_freeze_process_sync(proc_t p) } os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...skipped (%s)", - aPid, (*p->p_name ? p->p_name : "unknown"), reason); + aPid, (*p->p_name ? p->p_name : "unknown"), reason); p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE; } @@ -6864,15 +6819,15 @@ memorystatus_freeze_top_process(void) band = (unsigned int) memorystatus_freeze_jetsam_band; } - freeze_process: +freeze_process: next_p = memorystatus_get_first_proc_locked(&band, FALSE); while (next_p) { kern_return_t kr; uint32_t purgeable, wired, clean, dirty, shared; uint32_t max_pages = 0; - int freezer_error_code = 0; - + int freezer_error_code = 0; + p = next_p; next_p = memorystatus_get_next_proc_locked(&band, p, FALSE); @@ -6919,7 +6874,6 @@ memorystatus_freeze_top_process(void) p->p_memstat_state &= ~P_MEMSTAT_REFREEZE_ELIGIBLE; memorystatus_refreeze_eligible_count--; - } else { if (memorystatus_is_process_eligible_for_freeze(p) == FALSE) { continue; // with lock held @@ -6933,14 +6887,13 @@ memorystatus_freeze_top_process(void) */ max_pages = MIN(memorystatus_freeze_pages_max, memorystatus_freeze_budget_pages_remaining); - } else { /* * We only have the compressor pool. */ max_pages = UINT32_MAX - 1; } - + /* Mark as locked temporarily to avoid kill */ p->p_memstat_state |= P_MEMSTAT_LOCKED; @@ -6952,33 +6905,32 @@ memorystatus_freeze_top_process(void) proc_list_unlock(); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START, - memorystatus_available_pages, 0, 0, 0, 0); + memorystatus_available_pages, 0, 0, 0, 0); kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, &freezer_error_code, FALSE /* eval only */); - + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END, - memorystatus_available_pages, aPid, 0, 0, 0); + memorystatus_available_pages, aPid, 0, 0, 0); MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_top_process: task_freeze %s for pid %d [%s] - " - "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, max_pages %d, shared %d\n", - (kr == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (*p->p_name ? p->p_name : "(unknown)"), - memorystatus_available_pages, purgeable, wired, clean, dirty, max_pages, shared); - + "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, max_pages %d, shared %d\n", + (kr == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (*p->p_name ? p->p_name : "(unknown)"), + memorystatus_available_pages, purgeable, wired, clean, dirty, max_pages, shared); + proc_list_lock(); - + /* Success? */ if (KERN_SUCCESS == kr) { - if (refreeze_processes) { os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: Refreezing (general) pid %d [%s]...done", - aPid, (*p->p_name ? p->p_name : "unknown")); + aPid, (*p->p_name ? p->p_name : "unknown")); } else { os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (general) pid %d [%s]...done", - aPid, (*p->p_name ? p->p_name : "unknown")); + aPid, (*p->p_name ? p->p_name : "unknown")); } memorystatus_freeze_entry_t data = { aPid, TRUE, dirty }; - + p->p_memstat_freeze_sharedanon_pages += shared; memorystatus_frozen_shared_mb += shared; @@ -6999,7 +6951,6 @@ memorystatus_freeze_top_process(void) memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data)); if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { - ret = memorystatus_update_inactive_jetsam_priority_band(p->p_pid, MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE, memorystatus_freeze_jetsam_band, TRUE); if (ret) { @@ -7041,7 +6992,6 @@ memorystatus_freeze_top_process(void) break; } else { - p->p_memstat_state &= ~P_MEMSTAT_LOCKED; if (refreeze_processes == TRUE) { @@ -7084,7 +7034,7 @@ memorystatus_freeze_top_process(void) } os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (general) pid %d [%s]...skipped (%s)", - aPid, (*p->p_name ? p->p_name : "unknown"), reason); + aPid, (*p->p_name ? p->p_name : "unknown"), reason); if (vm_compressor_low_on_space() || vm_swap_low_on_space()) { break; @@ -7110,23 +7060,22 @@ memorystatus_freeze_top_process(void) goto freeze_process; } - + proc_list_unlock(); - + return ret; } -static inline boolean_t -memorystatus_can_freeze_processes(void) +static inline boolean_t +memorystatus_can_freeze_processes(void) { boolean_t ret; - + proc_list_lock(); - - if (memorystatus_suspended_count) { + if (memorystatus_suspended_count) { memorystatus_freeze_suspended_threshold = MIN(memorystatus_freeze_suspended_threshold, FREEZE_SUSPENDED_THRESHOLD_DEFAULT); - + if ((memorystatus_suspended_count - memorystatus_frozen_count) > memorystatus_freeze_suspended_threshold) { ret = TRUE; } else { @@ -7135,30 +7084,30 @@ memorystatus_can_freeze_processes(void) } else { ret = FALSE; } - + proc_list_unlock(); - + return ret; } -static boolean_t +static boolean_t memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low) { boolean_t can_freeze = TRUE; /* Only freeze if we're sufficiently low on memory; this holds off freeze right - after boot, and is generally is a no-op once we've reached steady state. */ + * after boot, and is generally is a no-op once we've reached steady state. */ if (memorystatus_available_pages > memorystatus_freeze_threshold) { return FALSE; } - + /* Check minimum suspended process threshold. */ if (!memorystatus_can_freeze_processes()) { return FALSE; } assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); - if ( !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + if (!VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { /* * In-core compressor used for freezing WITHOUT on-disk swap support. */ @@ -7168,7 +7117,6 @@ memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low) } can_freeze = FALSE; - } else { if (*memorystatus_freeze_swap_low) { *memorystatus_freeze_swap_low = FALSE; @@ -7189,9 +7137,8 @@ memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low) can_freeze = FALSE; } - } - + return can_freeze; } @@ -7224,7 +7171,6 @@ memorystatus_demote_frozen_processes(void) next_p = memorystatus_get_first_proc_locked(&band, FALSE); while (next_p) { - p = next_p; next_p = memorystatus_get_next_proc_locked(&band, p, FALSE); @@ -7243,7 +7189,7 @@ memorystatus_demote_frozen_processes(void) memorystatus_update_priority_locked(p, JETSAM_PRIORITY_IDLE, TRUE, TRUE); #if DEVELOPMENT || DEBUG os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus_demote_frozen_process pid %d [%s]", - p->p_pid, (*p->p_name ? p->p_name : "unknown")); + p->p_pid, (*p->p_name ? p->p_name : "unknown")); #endif /* DEVELOPMENT || DEBUG */ /* @@ -7317,14 +7263,12 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) struct throttle_interval_t *interval = NULL; if (memorystatus_freeze_degradation == TRUE) { - interval = degraded_throttle_window; if (CMP_MACH_TIMESPEC(&ts, &interval->ts) >= 0) { memorystatus_freeze_degradation = FALSE; interval->pageouts = 0; interval->max_pageouts = 0; - } else { *budget_pages_allowed = interval->max_pageouts - interval->pageouts; } @@ -7363,7 +7307,6 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) *budget_pages_allowed = interval->max_pageouts; memorystatus_demote_frozen_processes(); - } else { /* * Current throttle window. @@ -7375,8 +7318,8 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) #if DEVELOPMENT || DEBUG /* - * This can only happen in the INTERNAL configs because we allow modifying the daily budget for testing. - */ + * This can only happen in the INTERNAL configs because we allow modifying the daily budget for testing. + */ if (freeze_daily_pageouts_max > interval->max_pageouts) { /* @@ -7388,17 +7331,13 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) #endif /* DEVELOPMENT || DEBUG */ if (memorystatus_freeze_degradation == FALSE) { - if (interval->pageouts >= interval->max_pageouts) { - *budget_pages_allowed = 0; - } else { - int budget_left = interval->max_pageouts - interval->pageouts; int budget_threshold = (freeze_daily_pageouts_max * FREEZE_DEGRADATION_BUDGET_THRESHOLD) / 100; - mach_timespec_t time_left = {0,0}; + mach_timespec_t time_left = {0, 0}; time_left.tv_sec = interval->ts.tv_sec; time_left.tv_nsec = 0; @@ -7406,7 +7345,6 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) SUB_MACH_TIMESPEC(&time_left, &ts); if (budget_left <= budget_threshold) { - /* * For the current normal window, calculate how much we would pageout in a DEGRADED_WINDOW_MINS duration. * And also calculate what we would pageout for the same DEGRADED_WINDOW_MINS duration if we had the full @@ -7422,7 +7360,6 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) */ if (current_budget_rate_allowed < normal_budget_rate_allowed) { - memorystatus_freeze_degradation = TRUE; degraded_throttle_window->max_pageouts = current_budget_rate_allowed; degraded_throttle_window->pageouts = 0; @@ -7441,8 +7378,8 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) } MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_update_throttle_interval: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n", - interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60, - interval->throttle ? "on" : "off"); + interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60, + interval->throttle ? "on" : "off"); } static void @@ -7453,12 +7390,9 @@ memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused) lck_mtx_lock(&freezer_mutex); if (memorystatus_freeze_enabled) { - if ((memorystatus_frozen_count < memorystatus_frozen_processes_max) || (memorystatus_refreeze_eligible_count >= MIN_THAW_REFREEZE_THRESHOLD)) { - if (memorystatus_can_freeze(&memorystatus_freeze_swap_low)) { - /* Only freeze if we've not exceeded our pageout budgets.*/ memorystatus_freeze_update_throttle(&memorystatus_freeze_budget_pages_remaining); @@ -7480,7 +7414,7 @@ memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused) assert_wait((event_t) &memorystatus_freeze_wakeup, THREAD_UNINT); lck_mtx_unlock(&freezer_mutex); - thread_block((thread_continue_t) memorystatus_freeze_thread); + thread_block((thread_continue_t) memorystatus_freeze_thread); } static boolean_t @@ -7541,7 +7475,7 @@ sysctl_memorystatus_do_fastwake_warmup_all SYSCTL_HANDLER_ARGS return 0; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_do_fastwake_warmup_all, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_do_fastwake_warmup_all, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorystatus_do_fastwake_warmup_all, "I", ""); #endif /* CONFIG_FREEZE */ @@ -7551,7 +7485,8 @@ SYSCTL_PROC(_kern, OID_AUTO, memorystatus_do_fastwake_warmup_all, CTLTYPE_INT|CT #if CONFIG_MEMORYSTATUS static int -memorystatus_send_note(int event_code, void *data, size_t data_length) { +memorystatus_send_note(int event_code, void *data, size_t data_length) +{ int ret; struct kev_msg ev_msg; @@ -7569,13 +7504,13 @@ memorystatus_send_note(int event_code, void *data, size_t data_length) { if (ret) { printf("%s: kev_post_msg() failed, err %d\n", __func__, ret); } - + return ret; } boolean_t -memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t limit_exceeded) { - +memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t limit_exceeded) +{ boolean_t ret = FALSE; boolean_t found_knote = FALSE; struct knote *kn = NULL; @@ -7634,7 +7569,6 @@ memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused bool } #else /* CONFIG_EMBEDDED */ if (!limit_exceeded) { - /* * Processes on desktop are not expecting to handle a system-wide * critical or system-wide warning notification from this path. @@ -7645,7 +7579,7 @@ memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused bool */ if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { - found_knote=TRUE; + found_knote = TRUE; if (!is_fatal) { /* * Restrict proc_limit_warn notifications when @@ -7653,28 +7587,28 @@ memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused bool */ if (is_active) { if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE) { - /* - * Mark this knote for delivery. - */ - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + /* + * Mark this knote for delivery. + */ + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; /* * And suppress it from future notifications. - */ - kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE; + */ + kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE; send_knote_count++; - } + } } else { if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE) { - /* - * Mark this knote for delivery. - */ - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + /* + * Mark this knote for delivery. + */ + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; /* * And suppress it from future notifications. - */ - kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE; + */ + kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE; send_knote_count++; - } + } } } else { /* @@ -7706,7 +7640,6 @@ memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused bool kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE; kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; send_knote_count++; - } } else { if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE) { @@ -7763,18 +7696,19 @@ memorystatus_low_mem_privileged_listener(uint32_t op_flags) return EINVAL; } - return (task_low_mem_privileged_listener(current_task(), set_privilege, NULL)); + return task_low_mem_privileged_listener(current_task(), set_privilege, NULL); } int -memorystatus_send_pressure_note(pid_t pid) { - MEMORYSTATUS_DEBUG(1, "memorystatus_send_pressure_note(): pid %d\n", pid); - return memorystatus_send_note(kMemorystatusPressureNote, &pid, sizeof(pid)); +memorystatus_send_pressure_note(pid_t pid) +{ + MEMORYSTATUS_DEBUG(1, "memorystatus_send_pressure_note(): pid %d\n", pid); + return memorystatus_send_note(kMemorystatusPressureNote, &pid, sizeof(pid)); } void -memorystatus_send_low_swap_note(void) { - +memorystatus_send_low_swap_note(void) +{ struct knote *kn = NULL; memorystatus_klist_lock(); @@ -7793,16 +7727,17 @@ memorystatus_send_low_swap_note(void) { } boolean_t -memorystatus_bg_pressure_eligible(proc_t p) { - boolean_t eligible = FALSE; - +memorystatus_bg_pressure_eligible(proc_t p) +{ + boolean_t eligible = FALSE; + proc_list_lock(); - + MEMORYSTATUS_DEBUG(1, "memorystatus_bg_pressure_eligible: pid %d, state 0x%x\n", p->p_pid, p->p_memstat_state); - - /* Foreground processes have already been dealt with at this point, so just test for eligibility */ - if (!(p->p_memstat_state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN))) { - eligible = TRUE; + + /* Foreground processes have already been dealt with at this point, so just test for eligibility */ + if (!(p->p_memstat_state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN))) { + eligible = TRUE; } if (p->p_memstat_effectivepriority < JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC) { @@ -7816,25 +7751,27 @@ memorystatus_bg_pressure_eligible(proc_t p) { } proc_list_unlock(); - - return eligible; + + return eligible; } boolean_t -memorystatus_is_foreground_locked(proc_t p) { - return ((p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND) || - (p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND_SUPPORT)); +memorystatus_is_foreground_locked(proc_t p) +{ + return (p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND) || + (p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND_SUPPORT); } /* * This is meant for stackshot and kperf -- it does not take the proc_list_lock * to access the p_memstat_dirty field. */ -void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit) +void +memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit) { if (!v) { - *is_dirty = FALSE; - *is_dirty_tracked = FALSE; + *is_dirty = FALSE; + *is_dirty_tracked = FALSE; *allow_idle_exit = FALSE; } else { proc_t p = (proc_t)v; @@ -7850,21 +7787,21 @@ void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is * Trigger levels to test the mechanism. * Can be used via a sysctl. */ -#define TEST_LOW_MEMORY_TRIGGER_ONE 1 -#define TEST_LOW_MEMORY_TRIGGER_ALL 2 -#define TEST_PURGEABLE_TRIGGER_ONE 3 -#define TEST_PURGEABLE_TRIGGER_ALL 4 -#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE 5 -#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL 6 +#define TEST_LOW_MEMORY_TRIGGER_ONE 1 +#define TEST_LOW_MEMORY_TRIGGER_ALL 2 +#define TEST_PURGEABLE_TRIGGER_ONE 3 +#define TEST_PURGEABLE_TRIGGER_ALL 4 +#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE 5 +#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL 6 -boolean_t memorystatus_manual_testing_on = FALSE; -vm_pressure_level_t memorystatus_manual_testing_level = kVMPressureNormal; +boolean_t memorystatus_manual_testing_on = FALSE; +vm_pressure_level_t memorystatus_manual_testing_level = kVMPressureNormal; extern struct knote * vm_pressure_select_optimal_candidate_to_notify(struct klist *, int, boolean_t); -#define VM_PRESSURE_NOTIFY_WAIT_PERIOD 10000 /* milliseconds */ +#define VM_PRESSURE_NOTIFY_WAIT_PERIOD 10000 /* milliseconds */ #if DEBUG #define VM_PRESSURE_DEBUG(cond, format, ...) \ @@ -7875,9 +7812,11 @@ do { \ #define VM_PRESSURE_DEBUG(cond, format, ...) #endif -#define INTER_NOTIFICATION_DELAY (250000) /* .25 second */ +#define INTER_NOTIFICATION_DELAY (250000) /* .25 second */ -void memorystatus_on_pageout_scan_end(void) { +void +memorystatus_on_pageout_scan_end(void) +{ /* No-op */ } @@ -7898,9 +7837,7 @@ boolean_t is_knote_registered_modify_task_pressure_bits(struct knote *kn_max, int knote_pressure_level, task_t task, vm_pressure_level_t pressure_level_to_clear, vm_pressure_level_t pressure_level_to_set) { if (kn_max->kn_sfflags & knote_pressure_level) { - if (pressure_level_to_clear && task_has_been_notified(task, pressure_level_to_clear) == TRUE) { - task_clear_has_been_notified(task, pressure_level_to_clear); } @@ -7917,10 +7854,9 @@ memorystatus_klist_reset_all_for_level(vm_pressure_level_t pressure_level_to_cle struct knote *kn = NULL; memorystatus_klist_lock(); - SLIST_FOREACH(kn, &memorystatus_klist, kn_selnext) { - - proc_t p = PROC_NULL; - struct task* t = TASK_NULL; + SLIST_FOREACH(kn, &memorystatus_klist, kn_selnext) { + proc_t p = PROC_NULL; + struct task* t = TASK_NULL; p = knote_get_kq(kn)->kq_p; proc_list_lock(); @@ -7953,11 +7889,13 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int static void vm_dispatch_memory_pressure(void); void consider_vm_pressure_events(void); -void consider_vm_pressure_events(void) +void +consider_vm_pressure_events(void) { vm_dispatch_memory_pressure(); } -static void vm_dispatch_memory_pressure(void) +static void +vm_dispatch_memory_pressure(void) { memorystatus_update_vm_pressure(FALSE); } @@ -7968,13 +7906,13 @@ convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t); struct knote * vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process) { - struct knote *kn = NULL, *kn_max = NULL; - uint64_t resident_max = 0; /* MB */ - struct timeval curr_tstamp = {0, 0}; - int elapsed_msecs = 0; - int selected_task_importance = 0; - static int pressure_snapshot = -1; - boolean_t pressure_increase = FALSE; + struct knote *kn = NULL, *kn_max = NULL; + uint64_t resident_max = 0; /* MB */ + struct timeval curr_tstamp = {0, 0}; + int elapsed_msecs = 0; + int selected_task_importance = 0; + static int pressure_snapshot = -1; + boolean_t pressure_increase = FALSE; if (pressure_snapshot == -1) { /* @@ -7983,7 +7921,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int pressure_snapshot = level; pressure_increase = TRUE; } else { - if (level && (level >= pressure_snapshot)) { pressure_increase = TRUE; } else { @@ -8009,14 +7946,13 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int microuptime(&curr_tstamp); - SLIST_FOREACH(kn, candidate_list, kn_selnext) { - - uint64_t resident_size = 0; /* MB */ - proc_t p = PROC_NULL; - struct task* t = TASK_NULL; - int curr_task_importance = 0; - boolean_t consider_knote = FALSE; - boolean_t privileged_listener = FALSE; + SLIST_FOREACH(kn, candidate_list, kn_selnext) { + uint64_t resident_size = 0; /* MB */ + proc_t p = PROC_NULL; + struct task* t = TASK_NULL; + int curr_task_importance = 0; + boolean_t consider_knote = FALSE; + boolean_t privileged_listener = FALSE; p = knote_get_kq(kn)->kq_p; proc_list_lock(); @@ -8068,14 +8004,11 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int * AND only if the pressure is increasing. */ if (level > 0) { - if (task_has_been_notified(t, level) == FALSE) { - /* * Is this a privileged listener? */ if (task_low_mem_privileged_listener(t, FALSE, &privileged_listener) == 0) { - if (privileged_listener) { kn_max = kn; proc_rele(p); @@ -8087,7 +8020,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int continue; } } else if (level == 0) { - /* * Task wasn't notified when the pressure was increasing and so * no need to notify it that the pressure is decreasing. @@ -8099,21 +8031,19 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int } /* - * We don't want a small process to block large processes from - * being notified again. - */ - resident_size = (get_task_phys_footprint(t))/(1024*1024ULL); /* MB */ - - if (resident_size >= vm_pressure_task_footprint_min) { + * We don't want a small process to block large processes from + * being notified again. + */ + resident_size = (get_task_phys_footprint(t)) / (1024 * 1024ULL); /* MB */ + if (resident_size >= vm_pressure_task_footprint_min) { if (level > 0) { /* * Warning or Critical Pressure. */ - if (pressure_increase) { + if (pressure_increase) { if ((curr_task_importance < selected_task_importance) || ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { - /* * We have found a candidate process which is: * a) at a lower importance than the current selected process @@ -8126,7 +8056,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int } else { if ((curr_task_importance > selected_task_importance) || ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { - /* * We have found a candidate process which is: * a) at a higher importance than the current selected process @@ -8143,7 +8072,6 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int */ if ((curr_task_importance > selected_task_importance) || ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) { - consider_knote = TRUE; } } @@ -8154,12 +8082,12 @@ vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int selected_task_importance = curr_task_importance; consider_knote = FALSE; /* reset for the next candidate */ } - } else { - /* There was no candidate with enough resident memory to scavenge */ - VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %llu resident...\n", p->p_pid, resident_size); - } + } else { + /* There was no candidate with enough resident memory to scavenge */ + VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %llu resident...\n", p->p_pid, resident_size); + } proc_rele(p); - } + } done_scanning: if (kn_max) { @@ -8170,36 +8098,36 @@ done_scanning: return kn_max; } -#define VM_PRESSURE_DECREASED_SMOOTHING_PERIOD 5000 /* milliseconds */ -#define WARNING_NOTIFICATION_RESTING_PERIOD 25 /* seconds */ -#define CRITICAL_NOTIFICATION_RESTING_PERIOD 25 /* seconds */ +#define VM_PRESSURE_DECREASED_SMOOTHING_PERIOD 5000 /* milliseconds */ +#define WARNING_NOTIFICATION_RESTING_PERIOD 25 /* seconds */ +#define CRITICAL_NOTIFICATION_RESTING_PERIOD 25 /* seconds */ uint64_t next_warning_notification_sent_at_ts = 0; uint64_t next_critical_notification_sent_at_ts = 0; kern_return_t -memorystatus_update_vm_pressure(boolean_t target_foreground_process) -{ - struct knote *kn_max = NULL; - struct knote *kn_cur = NULL, *kn_temp = NULL; /* for safe list traversal */ - pid_t target_pid = -1; - struct klist dispatch_klist = { NULL }; - proc_t target_proc = PROC_NULL; - struct task *task = NULL; - boolean_t found_candidate = FALSE; - - static vm_pressure_level_t level_snapshot = kVMPressureNormal; - static vm_pressure_level_t prev_level_snapshot = kVMPressureNormal; - boolean_t smoothing_window_started = FALSE; - struct timeval smoothing_window_start_tstamp = {0, 0}; - struct timeval curr_tstamp = {0, 0}; - int elapsed_msecs = 0; - uint64_t curr_ts = mach_absolute_time(); +memorystatus_update_vm_pressure(boolean_t target_foreground_process) +{ + struct knote *kn_max = NULL; + struct knote *kn_cur = NULL, *kn_temp = NULL; /* for safe list traversal */ + pid_t target_pid = -1; + struct klist dispatch_klist = { NULL }; + proc_t target_proc = PROC_NULL; + struct task *task = NULL; + boolean_t found_candidate = FALSE; + + static vm_pressure_level_t level_snapshot = kVMPressureNormal; + static vm_pressure_level_t prev_level_snapshot = kVMPressureNormal; + boolean_t smoothing_window_started = FALSE; + struct timeval smoothing_window_start_tstamp = {0, 0}; + struct timeval curr_tstamp = {0, 0}; + int elapsed_msecs = 0; + uint64_t curr_ts = mach_absolute_time(); #if !CONFIG_JETSAM -#define MAX_IDLE_KILLS 100 /* limit the number of idle kills allowed */ +#define MAX_IDLE_KILLS 100 /* limit the number of idle kills allowed */ - int idle_kill_counter = 0; + int idle_kill_counter = 0; /* * On desktop we take this opportunity to free up memory pressure @@ -8226,7 +8154,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) #endif /* !CONFIG_JETSAM */ if (level_snapshot != kVMPressureNormal) { - /* * Check to see if we are still in the 'resting' period * after having notified all clients interested in @@ -8236,7 +8163,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) level_snapshot = memorystatus_vm_pressure_level; if (level_snapshot == kVMPressureWarning || level_snapshot == kVMPressureUrgent) { - if (next_warning_notification_sent_at_ts) { if (curr_ts < next_warning_notification_sent_at_ts) { delay(INTER_NOTIFICATION_DELAY * 4 /* 1 sec */); @@ -8247,7 +8173,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) memorystatus_klist_reset_all_for_level(kVMPressureWarning); } } else if (level_snapshot == kVMPressureCritical) { - if (next_critical_notification_sent_at_ts) { if (curr_ts < next_critical_notification_sent_at_ts) { delay(INTER_NOTIFICATION_DELAY * 4 /* 1 sec */); @@ -8260,7 +8185,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) } while (1) { - /* * There is a race window here. But it's not clear * how much we benefit from having extra synchronization. @@ -8273,7 +8197,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) * and see if this condition stays. */ if (smoothing_window_started == FALSE) { - smoothing_window_started = TRUE; microuptime(&smoothing_window_start_tstamp); } @@ -8283,7 +8206,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000; if (elapsed_msecs < VM_PRESSURE_DECREASED_SMOOTHING_PERIOD) { - delay(INTER_NOTIFICATION_DELAY); continue; } @@ -8295,7 +8217,7 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) memorystatus_klist_lock(); kn_max = vm_pressure_select_optimal_candidate_to_notify(&memorystatus_klist, level_snapshot, target_foreground_process); - if (kn_max == NULL) { + if (kn_max == NULL) { memorystatus_klist_unlock(); /* @@ -8321,9 +8243,9 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) } return KERN_FAILURE; } - + target_proc = knote_get_kq(kn_max)->kq_p; - + proc_list_lock(); if (target_proc != proc_ref_locked(target_proc)) { target_proc = PROC_NULL; @@ -8332,21 +8254,18 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) continue; } proc_list_unlock(); - + target_pid = target_proc->p_pid; task = (struct task *)(target_proc->task); - - if (level_snapshot != kVMPressureNormal) { + if (level_snapshot != kVMPressureNormal) { if (level_snapshot == kVMPressureWarning || level_snapshot == kVMPressureUrgent) { - if (is_knote_registered_modify_task_pressure_bits(kn_max, NOTE_MEMORYSTATUS_PRESSURE_WARN, task, 0, kVMPressureWarning) == TRUE) { found_candidate = TRUE; } } else { if (level_snapshot == kVMPressureCritical) { - if (is_knote_registered_modify_task_pressure_bits(kn_max, NOTE_MEMORYSTATUS_PRESSURE_CRITICAL, task, 0, kVMPressureCritical) == TRUE) { found_candidate = TRUE; } @@ -8354,7 +8273,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) } } else { if (kn_max->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_NORMAL) { - task_clear_has_been_notified(task, kVMPressureWarning); task_clear_has_been_notified(task, kVMPressureCritical); @@ -8369,7 +8287,6 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) } SLIST_FOREACH_SAFE(kn_cur, &memorystatus_klist, kn_selnext, kn_temp) { - int knote_pressure_level = convert_internal_pressure_level_to_dispatch_level(level_snapshot); if (is_knote_registered_modify_task_pressure_bits(kn_cur, knote_pressure_level, task, 0, level_snapshot) == TRUE) { @@ -8404,14 +8321,13 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) * No need for delays etc. */ } else { - uint32_t sleep_interval = INTER_NOTIFICATION_DELAY; #if CONFIG_JETSAM unsigned int page_delta = 0; unsigned int skip_delay_page_threshold = 0; assert(memorystatus_available_pages_pressure >= memorystatus_available_pages_critical_base); - + page_delta = (memorystatus_available_pages_pressure - memorystatus_available_pages_critical_base) / 2; skip_delay_page_threshold = memorystatus_available_pages_pressure - page_delta; @@ -8423,7 +8339,7 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) sleep_interval = 0; } #endif /* CONFIG_JETSAM */ - + if (sleep_interval) { delay(sleep_interval); } @@ -8436,31 +8352,30 @@ memorystatus_update_vm_pressure(boolean_t target_foreground_process) vm_pressure_level_t convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t internal_pressure_level) { - vm_pressure_level_t dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL; - - switch (internal_pressure_level) { + vm_pressure_level_t dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL; - case kVMPressureNormal: - { - dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL; - break; - } + switch (internal_pressure_level) { + case kVMPressureNormal: + { + dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL; + break; + } - case kVMPressureWarning: - case kVMPressureUrgent: - { - dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_WARN; - break; - } + case kVMPressureWarning: + case kVMPressureUrgent: + { + dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_WARN; + break; + } - case kVMPressureCritical: - { - dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL; - break; - } + case kVMPressureCritical: + { + dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL; + break; + } - default: - break; + default: + break; } return dispatch_level; @@ -8474,10 +8389,11 @@ sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS int error = 0; error = priv_check_cred(kauth_cred_get(), PRIV_VM_PRESSURE, 0); - if (error) - return (error); + if (error) { + return error; + } -#endif /* CONFIG_EMBEDDED */ +#endif /* CONFIG_EMBEDDED */ vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(memorystatus_vm_pressure_level); return SYSCTL_OUT(req, &dispatch_level, sizeof(dispatch_level)); @@ -8485,12 +8401,12 @@ sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS #if DEBUG || DEVELOPMENT -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", ""); #else /* DEBUG || DEVELOPMENT */ -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", ""); #endif /* DEBUG || DEVELOPMENT */ @@ -8509,13 +8425,13 @@ sysctl_memorypressure_manual_trigger SYSCTL_HANDLER_ARGS error = sysctl_handle_int(oidp, &level, 0, req); if (error || !req->newptr) { - return (error); + return error; } memorystatus_manual_testing_on = TRUE; trigger_request = (level >> 16) & 0xFFFF; - pressure_level = (level & 0xFFFF); + pressure_level = (level & 0xFFFF); if (trigger_request < TEST_LOW_MEMORY_TRIGGER_ONE || trigger_request > TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL) { @@ -8536,17 +8452,12 @@ sysctl_memorypressure_manual_trigger SYSCTL_HANDLER_ARGS * So we translate those events to our internal levels here. */ if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) { - memorystatus_manual_testing_level = kVMPressureNormal; force_purge = 0; - } else if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_WARN) { - memorystatus_manual_testing_level = kVMPressureWarning; force_purge = vm_pageout_state.memorystatus_purge_on_warning; - } else if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) { - memorystatus_manual_testing_level = kVMPressureCritical; force_purge = vm_pageout_state.memorystatus_purge_on_critical; } @@ -8569,24 +8480,24 @@ sysctl_memorypressure_manual_trigger SYSCTL_HANDLER_ARGS /* no purging requested */ break; } - while (vm_purgeable_object_purge_one_unlocked(force_purge)); + while (vm_purgeable_object_purge_one_unlocked(force_purge)) { + ; + } break; } if ((trigger_request == TEST_LOW_MEMORY_TRIGGER_ONE) || (trigger_request == TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE)) { - memorystatus_update_vm_pressure(TRUE); } if ((trigger_request == TEST_LOW_MEMORY_TRIGGER_ALL) || (trigger_request == TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL)) { - while (memorystatus_update_vm_pressure(FALSE) == KERN_SUCCESS) { continue; } } - + if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) { memorystatus_manual_testing_on = FALSE; } @@ -8594,42 +8505,42 @@ sysctl_memorypressure_manual_trigger SYSCTL_HANDLER_ARGS return 0; } -SYSCTL_PROC(_kern, OID_AUTO, memorypressure_manual_trigger, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorypressure_manual_trigger, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_memorypressure_manual_trigger, "I", ""); -SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_warning, CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_warning, 0, ""); -SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_urgent, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_urgent, 0, ""); -SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_critical, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_critical, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_warning, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_warning, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_urgent, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_urgent, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_critical, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.memorystatus_purge_on_critical, 0, ""); #if DEBUG || DEVELOPMENT -SYSCTL_UINT(_kern, OID_AUTO, memorystatus_vm_pressure_events_enabled, CTLFLAG_RW|CTLFLAG_LOCKED, &vm_pressure_events_enabled, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, memorystatus_vm_pressure_events_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pressure_events_enabled, 0, ""); #endif #endif /* VM_PRESSURE_EVENTS */ /* Return both allocated and actual size, since there's a race between allocation and list compilation */ static int -memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t *buffer_size, size_t *list_size, boolean_t size_only) +memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t *buffer_size, size_t *list_size, boolean_t size_only) { - uint32_t list_count, i = 0; + uint32_t list_count, i = 0; memorystatus_priority_entry_t *list_entry; proc_t p; - list_count = memorystatus_list_count; + list_count = memorystatus_list_count; *list_size = sizeof(memorystatus_priority_entry_t) * list_count; /* Just a size check? */ if (size_only) { return 0; } - + /* Otherwise, validate the size of the buffer */ if (*buffer_size < *list_size) { return EINVAL; } - *list_ptr = (memorystatus_priority_entry_t*)kalloc(*list_size); + *list_ptr = (memorystatus_priority_entry_t*)kalloc(*list_size); if (!*list_ptr) { return ENOMEM; } @@ -8650,62 +8561,64 @@ memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t list_entry->user_data = p->p_memstat_userdata; if (p->p_memstat_memlimit <= 0) { - task_get_phys_footprint_limit(p->task, &list_entry->limit); - } else { - list_entry->limit = p->p_memstat_memlimit; - } + task_get_phys_footprint_limit(p->task, &list_entry->limit); + } else { + list_entry->limit = p->p_memstat_memlimit; + } list_entry->state = memorystatus_build_state(p); list_entry++; *list_size += sizeof(memorystatus_priority_entry_t); - + p = memorystatus_get_next_proc_locked(&i, p, TRUE); } - + proc_list_unlock(); - + MEMORYSTATUS_DEBUG(1, "memorystatus_get_priority_list: returning %lu for size\n", (unsigned long)*list_size); - + return 0; } static int -memorystatus_get_priority_pid(pid_t pid, user_addr_t buffer, size_t buffer_size) { - int error = 0; - memorystatus_priority_entry_t mp_entry; +memorystatus_get_priority_pid(pid_t pid, user_addr_t buffer, size_t buffer_size) +{ + int error = 0; + memorystatus_priority_entry_t mp_entry; - /* Validate inputs */ - if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_priority_entry_t))) { - return EINVAL; - } + /* Validate inputs */ + if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_priority_entry_t))) { + return EINVAL; + } proc_t p = proc_find(pid); - if (!p) { - return ESRCH; - } + if (!p) { + return ESRCH; + } - memset (&mp_entry, 0, sizeof(memorystatus_priority_entry_t)); + memset(&mp_entry, 0, sizeof(memorystatus_priority_entry_t)); - mp_entry.pid = p->p_pid; - mp_entry.priority = p->p_memstat_effectivepriority; - mp_entry.user_data = p->p_memstat_userdata; - if (p->p_memstat_memlimit <= 0) { - task_get_phys_footprint_limit(p->task, &mp_entry.limit); - } else { - mp_entry.limit = p->p_memstat_memlimit; - } - mp_entry.state = memorystatus_build_state(p); + mp_entry.pid = p->p_pid; + mp_entry.priority = p->p_memstat_effectivepriority; + mp_entry.user_data = p->p_memstat_userdata; + if (p->p_memstat_memlimit <= 0) { + task_get_phys_footprint_limit(p->task, &mp_entry.limit); + } else { + mp_entry.limit = p->p_memstat_memlimit; + } + mp_entry.state = memorystatus_build_state(p); - proc_rele(p); + proc_rele(p); - error = copyout(&mp_entry, buffer, buffer_size); + error = copyout(&mp_entry, buffer, buffer_size); - return (error); + return error; } static int -memorystatus_cmd_get_priority_list(pid_t pid, user_addr_t buffer, size_t buffer_size, int32_t *retval) { +memorystatus_cmd_get_priority_list(pid_t pid, user_addr_t buffer, size_t buffer_size, int32_t *retval) +{ int error = 0; boolean_t size_only; size_t list_size; @@ -8713,7 +8626,7 @@ memorystatus_cmd_get_priority_list(pid_t pid, user_addr_t buffer, size_t buffer_ /* * When a non-zero pid is provided, the 'list' has only one entry. */ - + size_only = ((buffer == USER_ADDR_NULL) ? TRUE: FALSE); if (pid != 0) { @@ -8740,19 +8653,19 @@ memorystatus_cmd_get_priority_list(pid_t pid, user_addr_t buffer, size_t buffer_ *retval = list_size; } - return (error); + return error; } -static void +static void memorystatus_clear_errors(void) { proc_t p; unsigned int i = 0; KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_CLEAR_ERRORS) | DBG_FUNC_START, 0, 0, 0, 0, 0); - + proc_list_lock(); - + p = memorystatus_get_first_proc_locked(&i, TRUE); while (p) { if (p->p_memstat_state & P_MEMSTAT_ERROR) { @@ -8760,7 +8673,7 @@ memorystatus_clear_errors(void) } p = memorystatus_get_next_proc_locked(&i, p, TRUE); } - + proc_list_unlock(); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_CLEAR_ERRORS) | DBG_FUNC_END, 0, 0, 0, 0, 0); @@ -8768,8 +8681,8 @@ memorystatus_clear_errors(void) #if CONFIG_JETSAM static void -memorystatus_update_levels_locked(boolean_t critical_only) { - +memorystatus_update_levels_locked(boolean_t critical_only) +{ memorystatus_available_pages_critical = memorystatus_available_pages_critical_base; /* @@ -8780,8 +8693,8 @@ memorystatus_update_levels_locked(boolean_t critical_only) { if (first_bucket->count) { memorystatus_available_pages_critical += memorystatus_available_pages_critical_idle_offset; - if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure ) { - /* + if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure) { + /* * The critical threshold must never exceed the pressure threshold */ memorystatus_available_pages_critical = memorystatus_available_pages_pressure; @@ -8792,8 +8705,8 @@ memorystatus_update_levels_locked(boolean_t critical_only) { if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) { memorystatus_available_pages_critical += memorystatus_jetsam_policy_offset_pages_diagnostic; - if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure ) { - /* + if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure) { + /* * The critical threshold must never exceed the pressure threshold */ memorystatus_available_pages_critical = memorystatus_available_pages_pressure; @@ -8808,7 +8721,7 @@ memorystatus_update_levels_locked(boolean_t critical_only) { if (critical_only) { return; } - + #if VM_PRESSURE_EVENTS memorystatus_available_pages_pressure = (pressure_threshold_percentage / delta_percentage) * memorystatus_delta; #if DEBUG || DEVELOPMENT @@ -8823,20 +8736,23 @@ void memorystatus_fast_jetsam_override(boolean_t enable_override) { /* If fast jetsam is not enabled, simply return */ - if (!fast_jetsam_enabled) + if (!fast_jetsam_enabled) { return; + } if (enable_override) { - if ((memorystatus_jetsam_policy & kPolicyMoreFree) == kPolicyMoreFree) + if ((memorystatus_jetsam_policy & kPolicyMoreFree) == kPolicyMoreFree) { return; + } proc_list_lock(); memorystatus_jetsam_policy |= kPolicyMoreFree; memorystatus_thread_pool_max(); memorystatus_update_levels_locked(TRUE); proc_list_unlock(); } else { - if ((memorystatus_jetsam_policy & kPolicyMoreFree) == 0) + if ((memorystatus_jetsam_policy & kPolicyMoreFree) == 0) { return; + } proc_list_lock(); memorystatus_jetsam_policy &= ~kPolicyMoreFree; memorystatus_thread_pool_default(); @@ -8861,8 +8777,9 @@ sysctl_kern_memorystatus_policy_more_free SYSCTL_HANDLER_ARGS */ error = sysctl_handle_int(oidp, &more_free, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } if (more_free) { memorystatus_fast_jetsam_override(true); @@ -8872,7 +8789,7 @@ sysctl_kern_memorystatus_policy_more_free SYSCTL_HANDLER_ARGS return 0; } -SYSCTL_PROC(_kern, OID_AUTO, memorystatus_policy_more_free, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_policy_more_free, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_kern_memorystatus_policy_more_free, "I", ""); #endif /* CONFIG_JETSAM */ @@ -8881,7 +8798,8 @@ SYSCTL_PROC(_kern, OID_AUTO, memorystatus_policy_more_free, CTLTYPE_INT|CTLFLAG_ * Get the at_boot snapshot */ static int -memorystatus_get_at_boot_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) { +memorystatus_get_at_boot_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) +{ size_t input_size = *snapshot_size; /* @@ -8907,7 +8825,7 @@ memorystatus_get_at_boot_snapshot(memorystatus_jetsam_snapshot_t **snapshot, siz *snapshot = &memorystatus_at_boot_snapshot; MEMORYSTATUS_DEBUG(7, "memorystatus_get_at_boot_snapshot: returned inputsize (%ld), snapshot_size(%ld), listcount(%d)\n", - (long)input_size, (long)*snapshot_size, 0); + (long)input_size, (long)*snapshot_size, 0); return 0; } @@ -8915,7 +8833,8 @@ memorystatus_get_at_boot_snapshot(memorystatus_jetsam_snapshot_t **snapshot, siz * Get the previous fully populated snapshot */ static int -memorystatus_get_jetsam_snapshot_copy(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) { +memorystatus_get_jetsam_snapshot_copy(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) +{ size_t input_size = *snapshot_size; if (memorystatus_jetsam_snapshot_copy_count > 0) { @@ -8935,16 +8854,17 @@ memorystatus_get_jetsam_snapshot_copy(memorystatus_jetsam_snapshot_t **snapshot, *snapshot = memorystatus_jetsam_snapshot_copy; MEMORYSTATUS_DEBUG(7, "memorystatus_get_jetsam_snapshot_copy: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n", - (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_copy_count); + (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_copy_count); return 0; } static int -memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) { +memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) +{ size_t input_size = *snapshot_size; uint32_t ods_list_count = memorystatus_list_count; - memorystatus_jetsam_snapshot_t *ods = NULL; /* The on_demand snapshot buffer */ + memorystatus_jetsam_snapshot_t *ods = NULL; /* The on_demand snapshot buffer */ *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (ods_list_count)); @@ -8967,7 +8887,7 @@ memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, s */ ods = (memorystatus_jetsam_snapshot_t *)kalloc(*snapshot_size); if (!ods) { - return (ENOMEM); + return ENOMEM; } memset(ods, 0, *snapshot_size); @@ -8985,13 +8905,14 @@ memorystatus_get_on_demand_snapshot(memorystatus_jetsam_snapshot_t **snapshot, s *snapshot = ods; MEMORYSTATUS_DEBUG(7, "memorystatus_get_on_demand_snapshot: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n", - (long)input_size, (long)*snapshot_size, (long)ods_list_count); - + (long)input_size, (long)*snapshot_size, (long)ods_list_count); + return 0; } static int -memorystatus_get_jetsam_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) { +memorystatus_get_jetsam_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) +{ size_t input_size = *snapshot_size; if (memorystatus_jetsam_snapshot_count > 0) { @@ -9011,14 +8932,15 @@ memorystatus_get_jetsam_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size *snapshot = memorystatus_jetsam_snapshot; MEMORYSTATUS_DEBUG(7, "memorystatus_get_jetsam_snapshot: returned inputsize (%ld), snapshot_size(%ld), listcount(%ld)\n", - (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_count); + (long)input_size, (long)*snapshot_size, (long)memorystatus_jetsam_snapshot_count); return 0; } static int -memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval) { +memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval) +{ int error = EINVAL; boolean_t size_only; boolean_t is_default_snapshot = FALSE; @@ -9054,7 +8976,6 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b * an on_demand snapshot buffer, which is freed below. */ error = memorystatus_get_on_demand_snapshot(&snapshot, &buffer_size, size_only); - } else if (flags & MEMORYSTATUS_SNAPSHOT_AT_BOOT) { is_at_boot_snapshot = TRUE; error = memorystatus_get_at_boot_snapshot(&snapshot, &buffer_size, size_only); @@ -9105,7 +9026,7 @@ memorystatus_cmd_get_jetsam_snapshot(int32_t flags, user_addr_t buffer, size_t b * The on_demand snapshot is always freed, * even if the copyout failed. */ - if(snapshot) { + if (snapshot) { kfree(snapshot, buffer_size); } } @@ -9119,7 +9040,7 @@ out: } /* - * Routine: memorystatus_cmd_grp_set_priorities + * Routine: memorystatus_cmd_grp_set_priorities * Purpose: Update priorities for a group of processes. * * [priority] @@ -9133,7 +9054,7 @@ out: * [17 | p55, p67, p19 ] * [12 | p103 p10 ] * [ 7 | p25 ] - * [ 0 | p71, p82, ] + * [ 0 | p71, p82, ] * * after [ new band | pid] * [ xxx | p71, p82, p25, p103, p10, p55, p67, p19, p101] @@ -9156,7 +9077,6 @@ out: static int memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) { - /* * We only handle setting priority * per process @@ -9180,7 +9100,7 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) uint32_t bucket_index = 0; boolean_t head_insert; int32_t new_priority; - + proc_t p; /* Verify inputs */ @@ -9211,8 +9131,8 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) error = EINVAL; goto out; } - - for (i=0; i < entry_count; i++) { + + for (i = 0; i < entry_count; i++) { if (entries[i].priority == -1) { /* Use as shorthand for default priority */ entries[i].priority = JETSAM_PRIORITY_DEFAULT; @@ -9220,7 +9140,7 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) /* Both the aging bands are reserved for internal use; * if requested, adjust to JETSAM_PRIORITY_IDLE. */ entries[i].priority = JETSAM_PRIORITY_IDLE; - } else if (entries[i].priority == JETSAM_PRIORITY_IDLE_HEAD) { + } else if (entries[i].priority == JETSAM_PRIORITY_IDLE_HEAD) { /* JETSAM_PRIORITY_IDLE_HEAD inserts at the head of the idle * queue */ /* Deal with this later */ @@ -9232,7 +9152,7 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) } table_size = sizeof(memorystatus_internal_properties_t) * entry_count; - if ( (table = (memorystatus_internal_properties_t *)kalloc(table_size)) == NULL) { + if ((table = (memorystatus_internal_properties_t *)kalloc(table_size)) == NULL) { error = ENOMEM; goto out; } @@ -9247,14 +9167,14 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) * to highest priority. */ - bucket_index=0; - + bucket_index = 0; + proc_list_lock(); /* Create the ordered table */ - p = memorystatus_get_first_proc_locked(&bucket_index, TRUE); + p = memorystatus_get_first_proc_locked(&bucket_index, TRUE); while (p && (table_count < entry_count)) { - for (i=0; i < entry_count; i++ ) { + for (i = 0; i < entry_count; i++) { if (p->p_pid == entries[i].pid) { /* Build the table data */ table[table_count].proc = p; @@ -9265,9 +9185,9 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) } p = memorystatus_get_next_proc_locked(&bucket_index, p, TRUE); } - + /* We now have ordered list of procs ready to move */ - for (i=0; i < table_count; i++) { + for (i = 0; i < table_count; i++) { p = table[i].proc; assert(p != NULL); @@ -9279,7 +9199,7 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) new_priority = table[i].priority; head_insert = false; } - + /* Not allowed */ if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { continue; @@ -9305,13 +9225,15 @@ memorystatus_cmd_grp_set_priorities(user_addr_t buffer, size_t buffer_size) */ out: KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY, entry_count, table_count, 0, 0); - - if (entries) + + if (entries) { kfree(entries, buffer_size); - if (table) + } + if (table) { kfree(table, table_size); + } - return (error); + return error; } static int @@ -9351,14 +9273,14 @@ memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) error = EINVAL; goto out; } - + /* Verify sanity of input priorities */ - for (i=0; i < entry_count; i++) { + for (i = 0; i < entry_count; i++) { /* * 0 - low probability of use. * 1 - high probability of use. * - * Keeping this field an int (& not a bool) to allow + * Keeping this field an int (& not a bool) to allow * us to experiment with different values/approaches * later on. */ @@ -9370,7 +9292,7 @@ memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) tmp_table_new_size = sizeof(memorystatus_internal_probabilities_t) * entry_count; - if ( (tmp_table_new = (memorystatus_internal_probabilities_t *) kalloc(tmp_table_new_size)) == NULL) { + if ((tmp_table_new = (memorystatus_internal_probabilities_t *) kalloc(tmp_table_new_size)) == NULL) { error = ENOMEM; goto out; } @@ -9387,14 +9309,14 @@ memorystatus_cmd_grp_set_probabilities(user_addr_t buffer, size_t buffer_size) memorystatus_global_probabilities_size = tmp_table_new_size; tmp_table_new = NULL; - for (i=0; i < entry_count; i++ ) { + for (i = 0; i < entry_count; i++) { /* Build the table data */ strlcpy(memorystatus_global_probabilities_table[i].proc_name, entries[i].proc_name, MAXCOMLEN + 1); memorystatus_global_probabilities_table[i].use_probability = entries[i].use_probability; } proc_list_unlock(); - + out: KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_GRP_SET_PROP) | DBG_FUNC_END, MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY, entry_count, tmp_table_new_size, 0, 0); @@ -9408,8 +9330,7 @@ out: tmp_table_old = NULL; } - return (error); - + return error; } static int @@ -9418,13 +9339,9 @@ memorystatus_cmd_grp_set_properties(int32_t flags, user_addr_t buffer, size_t bu int error = 0; if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY) == MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY) { - error = memorystatus_cmd_grp_set_priorities(buffer, buffer_size); - } else if ((flags & MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY) == MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY) { - error = memorystatus_cmd_grp_set_probabilities(buffer, buffer_size); - } else { error = EINVAL; } @@ -9437,9 +9354,10 @@ memorystatus_cmd_grp_set_properties(int32_t flags, user_addr_t buffer, size_t bu * It is not used for the setting of memory limits, which is why the last 6 args to the * memorystatus_update() call are 0 or FALSE. */ - + static int -memorystatus_cmd_set_priority_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) { +memorystatus_cmd_set_priority_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) +{ int error = 0; memorystatus_priority_properties_t mpp_entry; @@ -9447,31 +9365,32 @@ memorystatus_cmd_set_priority_properties(pid_t pid, user_addr_t buffer, size_t b if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(memorystatus_priority_properties_t))) { return EINVAL; } - + error = copyin(buffer, &mpp_entry, buffer_size); if (error == 0) { proc_t p; - + p = proc_find(pid); if (!p) { return ESRCH; } - + if (p->p_memstat_state & P_MEMSTAT_INTERNAL) { proc_rele(p); return EPERM; } - + error = memorystatus_update(p, mpp_entry.priority, mpp_entry.user_data, FALSE, FALSE, 0, 0, FALSE, FALSE); proc_rele(p); } - - return(error); + + return error; } static int -memorystatus_cmd_set_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) { +memorystatus_cmd_set_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) +{ int error = 0; memorystatus_memlimit_properties_t mmp_entry; @@ -9486,7 +9405,7 @@ memorystatus_cmd_set_memlimit_properties(pid_t pid, user_addr_t buffer, size_t b error = memorystatus_set_memlimit_properties(pid, &mmp_entry); } - return(error); + return error; } /* @@ -9497,7 +9416,8 @@ memorystatus_cmd_set_memlimit_properties(pid_t pid, user_addr_t buffer, size_t b * to the task's ledgers via task_set_phys_footprint_limit(). */ static int -memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) { +memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) +{ int error = 0; memorystatus_memlimit_properties_t mmp_entry; @@ -9506,7 +9426,7 @@ memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t b return EINVAL; } - memset (&mmp_entry, 0, sizeof(memorystatus_memlimit_properties_t)); + memset(&mmp_entry, 0, sizeof(memorystatus_memlimit_properties_t)); proc_t p = proc_find(pid); if (!p) { @@ -9518,7 +9438,7 @@ memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t b * No locks taken since we hold a reference to the proc. */ - if (p->p_memstat_memlimit_active > 0 ) { + if (p->p_memstat_memlimit_active > 0) { mmp_entry.memlimit_active = p->p_memstat_memlimit_active; } else { task_convert_phys_footprint_limit(-1, &mmp_entry.memlimit_active); @@ -9543,7 +9463,7 @@ memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t b error = copyout(&mmp_entry, buffer, buffer_size); - return(error); + return error; } @@ -9555,7 +9475,8 @@ memorystatus_cmd_get_memlimit_properties(pid_t pid, user_addr_t buffer, size_t b * The delta is returned as bytes in excess or zero. */ static int -memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) { +memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t buffer, size_t buffer_size, __unused int32_t *retval) +{ int error = 0; uint64_t footprint_in_bytes = 0; uint64_t delta_in_bytes = 0; @@ -9564,7 +9485,7 @@ memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t b /* Validate inputs */ if ((pid == 0) || (buffer == USER_ADDR_NULL) || (buffer_size != sizeof(uint64_t)) || (flags != 0)) { - return EINVAL; + return EINVAL; } proc_t p = proc_find(pid); @@ -9587,7 +9508,7 @@ memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t b proc_rele(p); - memlimit_bytes = memlimit_mb * 1024 * 1024; /* MB to bytes */ + memlimit_bytes = memlimit_mb * 1024 * 1024; /* MB to bytes */ /* * Computed delta always returns >= 0 bytes @@ -9598,28 +9519,30 @@ memorystatus_cmd_get_memlimit_excess_np(pid_t pid, uint32_t flags, user_addr_t b error = copyout(&delta_in_bytes, buffer, sizeof(delta_in_bytes)); - return(error); + return error; } static int -memorystatus_cmd_get_pressure_status(int32_t *retval) { +memorystatus_cmd_get_pressure_status(int32_t *retval) +{ int error; - + /* Need privilege for check */ error = priv_check_cred(kauth_cred_get(), PRIV_VM_PRESSURE, 0); if (error) { - return (error); + return error; } - + /* Inherently racy, so it's not worth taking a lock here */ *retval = (kVMPressureNormal != memorystatus_vm_pressure_level) ? 1 : 0; - + return error; } int -memorystatus_get_pressure_status_kdp() { +memorystatus_get_pressure_status_kdp() +{ return (kVMPressureNormal != memorystatus_vm_pressure_level) ? 1 : 0; } @@ -9642,7 +9565,8 @@ memorystatus_get_pressure_status_kdp() { #if CONFIG_JETSAM static int -memorystatus_cmd_set_jetsam_memory_limit(pid_t pid, int32_t high_water_mark, __unused int32_t *retval, boolean_t is_fatal_limit) { +memorystatus_cmd_set_jetsam_memory_limit(pid_t pid, int32_t high_water_mark, __unused int32_t *retval, boolean_t is_fatal_limit) +{ int error = 0; memorystatus_memlimit_properties_t entry; @@ -9657,20 +9581,20 @@ memorystatus_cmd_set_jetsam_memory_limit(pid_t pid, int32_t high_water_mark, __u } error = memorystatus_set_memlimit_properties(pid, &entry); - return (error); + return error; } #endif /* CONFIG_JETSAM */ static int -memorystatus_set_memlimit_properties(pid_t pid, memorystatus_memlimit_properties_t *entry) { - +memorystatus_set_memlimit_properties(pid_t pid, memorystatus_memlimit_properties_t *entry) +{ int32_t memlimit_active; boolean_t memlimit_active_is_fatal; int32_t memlimit_inactive; boolean_t memlimit_inactive_is_fatal; uint32_t valid_attrs = 0; int error = 0; - + proc_t p = proc_find(pid); if (!p) { return ESRCH; @@ -9766,15 +9690,15 @@ memorystatus_set_memlimit_properties(pid_t pid, memorystatus_memlimit_properties error = (task_set_phys_footprint_limit_internal(p->task, ((p->p_memstat_memlimit > 0) ? p->p_memstat_memlimit : -1), NULL, use_active, is_fatal) == 0) ? 0 : EINVAL; MEMORYSTATUS_DEBUG(3, "memorystatus_set_memlimit_properties: new limit on pid %d (%dMB %s) current priority (%d) dirty_state?=0x%x %s\n", - p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), - (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), p->p_memstat_effectivepriority, p->p_memstat_dirty, - (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); + p->p_pid, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1), + (p->p_memstat_state & P_MEMSTAT_FATAL_MEMLIMIT ? "F " : "NF"), p->p_memstat_effectivepriority, p->p_memstat_dirty, + (p->p_memstat_dirty ? ((p->p_memstat_dirty & P_DIRTY) ? "isdirty" : "isclean") : "")); DTRACE_MEMORYSTATUS2(memorystatus_set_memlimit, proc_t, p, int32_t, (p->p_memstat_memlimit > 0 ? p->p_memstat_memlimit : -1)); } proc_list_unlock(); proc_rele(p); - + return error; } @@ -9904,11 +9828,11 @@ memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable) /* Freeze preference set to FALSE. Set the P_MEMSTAT_FREEZE_DISABLED bit. */ p->p_memstat_state |= P_MEMSTAT_FREEZE_DISABLED; printf("memorystatus_set_process_is_freezable: disabling freeze for pid %d [%s]\n", - p->p_pid, (*p->p_name ? p->p_name : "unknown")); + p->p_pid, (*p->p_name ? p->p_name : "unknown")); } else { p->p_memstat_state &= ~P_MEMSTAT_FREEZE_DISABLED; printf("memorystatus_set_process_is_freezable: enabling freeze for pid %d [%s]\n", - p->p_pid, (*p->p_name ? p->p_name : "unknown")); + p->p_pid, (*p->p_name ? p->p_name : "unknown")); } proc_rele_locked(p); proc_list_unlock(); @@ -9917,7 +9841,8 @@ memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable) } int -memorystatus_control(struct proc *p __unused, struct memorystatus_control_args *args, int *ret) { +memorystatus_control(struct proc *p __unused, struct memorystatus_control_args *args, int *ret) +{ int error = EINVAL; boolean_t skip_auth_check = FALSE; os_reason_t jetsam_reason = OS_REASON_NULL; @@ -9967,7 +9892,7 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * break; case MEMORYSTATUS_CMD_GRP_SET_PROPERTIES: error = memorystatus_cmd_grp_set_properties((int32_t)args->flags, args->buffer, args->buffersize, ret); - break; + break; case MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT: error = memorystatus_cmd_get_jetsam_snapshot((int32_t)args->flags, args->buffer, args->buffersize, ret); break; @@ -9992,7 +9917,7 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * error = memorystatus_cmd_set_jetsam_memory_limit(args->pid, (int32_t)args->flags, ret, TRUE); break; #endif /* CONFIG_JETSAM */ - /* Test commands */ + /* Test commands */ #if DEVELOPMENT || DEBUG case MEMORYSTATUS_CMD_TEST_JETSAM: jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_GENERIC); @@ -10076,7 +10001,7 @@ out: static int filt_memorystatusattach(struct knote *kn, __unused struct kevent_internal_s *kev) -{ +{ int error; kn->kn_flags |= EV_CLEAR; @@ -10110,7 +10035,6 @@ filt_memorystatus(struct knote *kn __unused, long hint) kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_WARN; } } else if (memorystatus_vm_pressure_level == kVMPressureCritical) { - if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) { kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL; } @@ -10124,15 +10048,15 @@ filt_memorystatus(struct knote *kn __unused, long hint) case kMemorystatusProcLimitWarn: if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; - } - break; + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN; + } + break; case kMemorystatusProcLimitCritical: if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) { - kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; - } - break; + kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL; + } + break; default: break; @@ -10145,11 +10069,11 @@ filt_memorystatus(struct knote *kn __unused, long hint) pid_t knote_pid = knote_proc->p_pid; printf("filt_memorystatus: sending kn 0x%lx (event 0x%x) for pid (%d)\n", - (unsigned long)kn, kn->kn_fflags, knote_pid); + (unsigned long)kn, kn->kn_fflags, knote_pid); } #endif - return (kn->kn_fflags != 0); + return kn->kn_fflags != 0; } static int @@ -10258,23 +10182,27 @@ filt_memorystatusprocess(struct knote *kn, struct filt_process_s *data, struct k } static void -memorystatus_klist_lock(void) { +memorystatus_klist_lock(void) +{ lck_mtx_lock(&memorystatus_klist_mutex); } static void -memorystatus_klist_unlock(void) { +memorystatus_klist_unlock(void) +{ lck_mtx_unlock(&memorystatus_klist_mutex); } -void -memorystatus_kevent_init(lck_grp_t *grp, lck_attr_t *attr) { +void +memorystatus_kevent_init(lck_grp_t *grp, lck_attr_t *attr) +{ lck_mtx_init(&memorystatus_klist_mutex, grp, attr); klist_init(&memorystatus_klist); } int -memorystatus_knote_register(struct knote *kn) { +memorystatus_knote_register(struct knote *kn) +{ int error = 0; memorystatus_klist_lock(); @@ -10282,8 +10210,7 @@ memorystatus_knote_register(struct knote *kn) { /* * Support only userspace visible flags. */ - if ((kn->kn_sfflags & EVFILT_MEMORYSTATUS_ALL_MASK) == (unsigned int) kn->kn_sfflags) { - + if ((kn->kn_sfflags & EVFILT_MEMORYSTATUS_ALL_MASK) == (unsigned int) kn->kn_sfflags) { #if !CONFIG_EMBEDDED if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) { kn->kn_sfflags |= NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE; @@ -10297,18 +10224,18 @@ memorystatus_knote_register(struct knote *kn) { #endif /* !CONFIG_EMBEDDED */ KNOTE_ATTACH(&memorystatus_klist, kn); - - } else { + } else { error = ENOTSUP; } - + memorystatus_klist_unlock(); - + return error; } void -memorystatus_knote_unregister(struct knote *kn __unused) { +memorystatus_knote_unregister(struct knote *kn __unused) +{ memorystatus_klist_lock(); KNOTE_DETACH(&memorystatus_klist, kn); memorystatus_klist_unlock(); @@ -10318,7 +10245,8 @@ memorystatus_knote_unregister(struct knote *kn __unused) { #if 0 #if CONFIG_JETSAM && VM_PRESSURE_EVENTS static boolean_t -memorystatus_issue_pressure_kevent(boolean_t pressured) { +memorystatus_issue_pressure_kevent(boolean_t pressured) +{ memorystatus_klist_lock(); KNOTE(&memorystatus_klist, pressured ? kMemorystatusPressure : kMemorystatusNoPressure); memorystatus_klist_unlock(); @@ -10331,25 +10259,26 @@ memorystatus_issue_pressure_kevent(boolean_t pressured) { /* sorting info for a particular priority bucket */ typedef struct memstat_sort_info { - coalition_t msi_coal; - uint64_t msi_page_count; - pid_t msi_pid; - int msi_ntasks; + coalition_t msi_coal; + uint64_t msi_page_count; + pid_t msi_pid; + int msi_ntasks; } memstat_sort_info_t; -/* +/* * qsort from smallest page count to largest page count * * return < 0 for a < b * 0 for a == b * > 0 for a > b */ -static int memstat_asc_cmp(const void *a, const void *b) +static int +memstat_asc_cmp(const void *a, const void *b) { - const memstat_sort_info_t *msA = (const memstat_sort_info_t *)a; - const memstat_sort_info_t *msB = (const memstat_sort_info_t *)b; + const memstat_sort_info_t *msA = (const memstat_sort_info_t *)a; + const memstat_sort_info_t *msB = (const memstat_sort_info_t *)b; - return (int)((uint64_t)msA->msi_page_count - (uint64_t)msB->msi_page_count); + return (int)((uint64_t)msA->msi_page_count - (uint64_t)msB->msi_page_count); } /* @@ -10358,8 +10287,8 @@ static int memstat_asc_cmp(const void *a, const void *b) static int memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coal_sort_order) { -#define MAX_SORT_PIDS 80 -#define MAX_COAL_LEADERS 10 +#define MAX_SORT_PIDS 80 +#define MAX_COAL_LEADERS 10 unsigned int b = bucket_index; int nleaders = 0; @@ -10370,7 +10299,7 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa int total_pids_moved = 0; int i; - /* + /* * The system is typically under memory pressure when in this * path, hence, we want to avoid dynamic memory allocation. */ @@ -10378,51 +10307,51 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa pid_t pid_list[MAX_SORT_PIDS]; if (bucket_index >= MEMSTAT_BUCKET_COUNT) { - return(0); - } + return 0; + } /* * Clear the array that holds coalition leader information */ - for (i=0; i < MAX_COAL_LEADERS; i++) { + for (i = 0; i < MAX_COAL_LEADERS; i++) { leaders[i].msi_coal = COALITION_NULL; - leaders[i].msi_page_count = 0; /* will hold total coalition page count */ - leaders[i].msi_pid = 0; /* will hold coalition leader pid */ - leaders[i].msi_ntasks = 0; /* will hold the number of tasks in a coalition */ + leaders[i].msi_page_count = 0; /* will hold total coalition page count */ + leaders[i].msi_pid = 0; /* will hold coalition leader pid */ + leaders[i].msi_ntasks = 0; /* will hold the number of tasks in a coalition */ } - p = memorystatus_get_first_proc_locked(&b, FALSE); - while (p) { - if (coalition_is_leader(p->task, COALITION_TYPE_JETSAM, &coal)) { + p = memorystatus_get_first_proc_locked(&b, FALSE); + while (p) { + if (coalition_is_leader(p->task, COALITION_TYPE_JETSAM, &coal)) { if (nleaders < MAX_COAL_LEADERS) { int coal_ntasks = 0; uint64_t coal_page_count = coalition_get_page_count(coal, &coal_ntasks); leaders[nleaders].msi_coal = coal; leaders[nleaders].msi_page_count = coal_page_count; - leaders[nleaders].msi_pid = p->p_pid; /* the coalition leader */ + leaders[nleaders].msi_pid = p->p_pid; /* the coalition leader */ leaders[nleaders].msi_ntasks = coal_ntasks; nleaders++; } else { - /* + /* * We've hit MAX_COAL_LEADERS meaning we can handle no more coalitions. - * Abandoned coalitions will linger at the tail of the priority band + * Abandoned coalitions will linger at the tail of the priority band * when this sort session ends. * TODO: should this be an assert? */ printf("%s: WARNING: more than %d leaders in priority band [%d]\n", - __FUNCTION__, MAX_COAL_LEADERS, bucket_index); + __FUNCTION__, MAX_COAL_LEADERS, bucket_index); break; } - } - p=memorystatus_get_next_proc_locked(&b, p, FALSE); - } + } + p = memorystatus_get_next_proc_locked(&b, p, FALSE); + } if (nleaders == 0) { /* Nothing to sort */ - return(0); + return 0; } - /* + /* * Sort the coalition leader array, from smallest coalition page count * to largest coalition page count. When inserted in the priority bucket, * smallest coalition is handled first, resulting in the last to be jetsammed. @@ -10434,8 +10363,8 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa #if 0 for (i = 0; i < nleaders; i++) { printf("%s: coal_leader[%d of %d] pid[%d] pages[%llu] ntasks[%d]\n", - __FUNCTION__, i, nleaders, leaders[i].msi_pid, leaders[i].msi_page_count, - leaders[i].msi_ntasks); + __FUNCTION__, i, nleaders, leaders[i].msi_pid, leaders[i].msi_page_count, + leaders[i].msi_ntasks); } #endif @@ -10458,8 +10387,7 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa * based on their coalition role. */ total_pids_moved = 0; - for (i=0; i < nleaders; i++) { - + for (i = 0; i < nleaders; i++) { /* a bit of bookkeeping */ pids_moved = 0; @@ -10468,30 +10396,30 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, 1); /* xpc services should jetsam after extensions */ - ntasks = coalition_get_pid_list (leaders[i].msi_coal, COALITION_ROLEMASK_XPC, - coal_sort_order, pid_list, MAX_SORT_PIDS); + ntasks = coalition_get_pid_list(leaders[i].msi_coal, COALITION_ROLEMASK_XPC, + coal_sort_order, pid_list, MAX_SORT_PIDS); if (ntasks > 0) { - pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, - (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); + pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, + (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); } /* extensions should jetsam after unmarked processes */ - ntasks = coalition_get_pid_list (leaders[i].msi_coal, COALITION_ROLEMASK_EXT, - coal_sort_order, pid_list, MAX_SORT_PIDS); + ntasks = coalition_get_pid_list(leaders[i].msi_coal, COALITION_ROLEMASK_EXT, + coal_sort_order, pid_list, MAX_SORT_PIDS); if (ntasks > 0) { pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, - (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); + (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); } /* undefined coalition members should be the first to jetsam */ - ntasks = coalition_get_pid_list (leaders[i].msi_coal, COALITION_ROLEMASK_UNDEF, - coal_sort_order, pid_list, MAX_SORT_PIDS); + ntasks = coalition_get_pid_list(leaders[i].msi_coal, COALITION_ROLEMASK_UNDEF, + coal_sort_order, pid_list, MAX_SORT_PIDS); if (ntasks > 0) { - pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, - (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); + pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, + (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS)); } #if 0 @@ -10500,27 +10428,26 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa * All the pids in the coalition were found in this band. */ printf("%s: pids_moved[%d] equal total coalition ntasks[%d] \n", __FUNCTION__, - pids_moved, leaders[i].msi_ntasks); + pids_moved, leaders[i].msi_ntasks); } else if (pids_moved > leaders[i].msi_ntasks) { /* * Apparently new coalition members showed up during the sort? */ printf("%s: pids_moved[%d] were greater than expected coalition ntasks[%d] \n", __FUNCTION__, - pids_moved, leaders[i].msi_ntasks); + pids_moved, leaders[i].msi_ntasks); } else { /* * Apparently not all the pids in the coalition were found in this band? */ printf("%s: pids_moved[%d] were less than expected coalition ntasks[%d] \n", __FUNCTION__, - pids_moved, leaders[i].msi_ntasks); + pids_moved, leaders[i].msi_ntasks); } #endif total_pids_moved += pids_moved; - } /* end for */ - return(total_pids_moved); + return total_pids_moved; } @@ -10528,17 +10455,17 @@ memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coa * Traverse a list of pids, searching for each within the priority band provided. * If pid is found, move it to the front of the priority band. * Never searches outside the priority band provided. - * + * * Input: * bucket_index - jetsam priority band. * pid_list - pointer to a list of pids. * list_sz - number of pids in the list. * - * Pid list ordering is important in that, + * Pid list ordering is important in that, * pid_list[n] is expected to jetsam ahead of pid_list[n+1]. * The sort_order is set by the coalition default. * - * Return: + * Return: * the number of pids found and hence moved within the priority band. */ static int @@ -10549,15 +10476,15 @@ memorystatus_move_list_locked(unsigned int bucket_index, pid_t *pid_list, int li int found_pids = 0; if ((pid_list == NULL) || (list_sz <= 0)) { - return(0); + return 0; } if (bucket_index >= MEMSTAT_BUCKET_COUNT) { - return(0); - } + return 0; + } current_bucket = &memstat_bucket[bucket_index]; - for (i=0; i < list_sz; i++) { + for (i = 0; i < list_sz; i++) { unsigned int b = bucket_index; proc_t p = NULL; proc_t aProc = NULL; @@ -10565,41 +10492,41 @@ memorystatus_move_list_locked(unsigned int bucket_index, pid_t *pid_list, int li int list_index; list_index = ((list_sz - 1) - i); - aPid = pid_list[list_index]; - - /* never search beyond bucket_index provided */ - p = memorystatus_get_first_proc_locked(&b, FALSE); - while (p) { - if (p->p_pid == aPid) { - aProc = p; - break; - } - p = memorystatus_get_next_proc_locked(&b, p, FALSE); - } - - if (aProc == NULL) { + aPid = pid_list[list_index]; + + /* never search beyond bucket_index provided */ + p = memorystatus_get_first_proc_locked(&b, FALSE); + while (p) { + if (p->p_pid == aPid) { + aProc = p; + break; + } + p = memorystatus_get_next_proc_locked(&b, p, FALSE); + } + + if (aProc == NULL) { /* pid not found in this band, just skip it */ - continue; - } else { - TAILQ_REMOVE(¤t_bucket->list, aProc, p_memstat_list); - TAILQ_INSERT_HEAD(¤t_bucket->list, aProc, p_memstat_list); + continue; + } else { + TAILQ_REMOVE(¤t_bucket->list, aProc, p_memstat_list); + TAILQ_INSERT_HEAD(¤t_bucket->list, aProc, p_memstat_list); found_pids++; - } - } - return(found_pids); + } + } + return found_pids; } int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index) { - int32_t i = JETSAM_PRIORITY_IDLE; + int32_t i = JETSAM_PRIORITY_IDLE; int count = 0; if (max_bucket_index >= MEMSTAT_BUCKET_COUNT) { - return(-1); - } + return -1; + } - while(i <= max_bucket_index) { + while (i <= max_bucket_index) { count += memstat_bucket[i++].count; } @@ -10635,18 +10562,18 @@ memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) * as an indication to modify ledgers. For that it needs the task lock * and since we came into this function with the task lock held, we'll deadlock. * - * Unfortunately we can't completely disable ledger updates because we still + * Unfortunately we can't completely disable ledger updates because we still * need the ledger updates for a subset of processes i.e. daemons. * When all processes on all platforms support memory limits, we can simply call * memorystatus_update(). - + * * It also has some logic to deal with 'aging' which, currently, is only applicable * on CONFIG_JETSAM configs. So, till every platform has CONFIG_JETSAM we'll need * to do this explicit band transition. */ memstat_bucket_t *current_bucket, *new_bucket; - int32_t priority = 0; + int32_t priority = 0; proc_list_lock(); @@ -10658,7 +10585,7 @@ memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) * let's skip the whole jetsam band transition. */ proc_list_unlock(); - return(0); + return 0; } if (is_appnap) { @@ -10673,7 +10600,7 @@ memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) * parameters. */ proc_list_unlock(); - return (0); + return 0; } current_bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE]; @@ -10691,7 +10618,7 @@ memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) * Record idle start or idle delta. */ if (p->p_memstat_effectivepriority == priority) { - /* + /* * This process is not transitioning between * jetsam priority buckets. Do nothing. */ @@ -10720,7 +10647,7 @@ memorystatus_update_priority_for_appnap(proc_t p, boolean_t is_appnap) proc_list_unlock(); - return (0); + return 0; #else /* !CONFIG_JETSAM */ #pragma unused(p) diff --git a/bsd/kern/kern_mib.c b/bsd/kern/kern_mib.c index 08cd860d6..dd0cc669b 100644 --- a/bsd/kern/kern_mib.c +++ b/bsd/kern/kern_mib.c @@ -113,64 +113,64 @@ extern vm_map_t bsd_pageable_map; #include #include -#include /* for host_info() */ +#include /* for host_info() */ #if defined(__i386__) || defined(__x86_64__) -#include /* for cpuid_info() */ +#include /* for cpuid_info() */ #endif #if defined(__arm__) || defined(__arm64__) -#include /* for cpuid_info() & cache_info() */ +#include /* for cpuid_info() & cache_info() */ #endif #ifndef MAX -#define MAX(a,b) (a >= b ? a : b) +#define MAX(a, b) (a >= b ? a : b) #endif /* XXX This should be in a BSD accessible Mach header, but isn't. */ extern unsigned int vm_page_wire_count; -static int cputype, cpusubtype, cputhreadtype, cpufamily, cpu64bit; -static uint64_t cacheconfig[10], cachesize[10]; -static int packages; - -SYSCTL_NODE(, 0, sysctl, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Sysctl internal magic"); -SYSCTL_NODE(, CTL_KERN, kern, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "High kernel, proc, limits &c"); -SYSCTL_NODE(, CTL_VM, vm, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Virtual memory"); -SYSCTL_NODE(, CTL_VFS, vfs, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "File system"); -SYSCTL_NODE(, CTL_NET, net, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Network, (see socket.h)"); -SYSCTL_NODE(, CTL_DEBUG, debug, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Debugging"); -SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "hardware"); -SYSCTL_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "machine dependent"); -SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "user-level"); - -SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "bridge"); - -#define SYSCTL_RETURN(r, x) SYSCTL_OUT(r, &x, sizeof(x)) +static int cputype, cpusubtype, cputhreadtype, cpufamily, cpu64bit; +static uint64_t cacheconfig[10], cachesize[10]; +static int packages; + +SYSCTL_NODE(, 0, sysctl, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Sysctl internal magic"); +SYSCTL_NODE(, CTL_KERN, kern, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "High kernel, proc, limits &c"); +SYSCTL_NODE(, CTL_VM, vm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Virtual memory"); +SYSCTL_NODE(, CTL_VFS, vfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "File system"); +SYSCTL_NODE(, CTL_NET, net, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Network, (see socket.h)"); +SYSCTL_NODE(, CTL_DEBUG, debug, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Debugging"); +SYSCTL_NODE(, CTL_HW, hw, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "hardware"); +SYSCTL_NODE(, CTL_MACHDEP, machdep, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "machine dependent"); +SYSCTL_NODE(, CTL_USER, user, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "user-level"); + +SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "bridge"); + +#define SYSCTL_RETURN(r, x) SYSCTL_OUT(r, &x, sizeof(x)) /****************************************************************************** * hw.* MIB */ -#define CTLHW_RETQUAD (1 << 31) -#define CTLHW_LOCAL (1 << 30) +#define CTLHW_RETQUAD (1 << 31) +#define CTLHW_LOCAL (1 << 30) -#define HW_LOCAL_CPUTHREADTYPE (1 | CTLHW_LOCAL) -#define HW_LOCAL_PHYSICALCPU (2 | CTLHW_LOCAL) -#define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL) -#define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL) -#define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL) +#define HW_LOCAL_CPUTHREADTYPE (1 | CTLHW_LOCAL) +#define HW_LOCAL_PHYSICALCPU (2 | CTLHW_LOCAL) +#define HW_LOCAL_PHYSICALCPUMAX (3 | CTLHW_LOCAL) +#define HW_LOCAL_LOGICALCPU (4 | CTLHW_LOCAL) +#define HW_LOCAL_LOGICALCPUMAX (5 | CTLHW_LOCAL) /* @@ -179,7 +179,7 @@ SYSCTL_NODE(_kern, OID_AUTO, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0, */ static int sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, - int arg2, struct sysctl_req *req) + int arg2, struct sysctl_req *req) { char dummy[65]; int epochTemp; @@ -211,39 +211,39 @@ sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, switch (arg2) { case HW_NCPU: if (kret == KERN_SUCCESS) { - return(SYSCTL_RETURN(req, hinfo.max_cpus)); + return SYSCTL_RETURN(req, hinfo.max_cpus); } else { - return(EINVAL); + return EINVAL; } case HW_AVAILCPU: if (kret == KERN_SUCCESS) { - return(SYSCTL_RETURN(req, hinfo.avail_cpus)); + return SYSCTL_RETURN(req, hinfo.avail_cpus); } else { - return(EINVAL); + return EINVAL; } case HW_LOCAL_PHYSICALCPU: if (kret == KERN_SUCCESS) { - return(SYSCTL_RETURN(req, hinfo.physical_cpu)); + return SYSCTL_RETURN(req, hinfo.physical_cpu); } else { - return(EINVAL); + return EINVAL; } case HW_LOCAL_PHYSICALCPUMAX: if (kret == KERN_SUCCESS) { - return(SYSCTL_RETURN(req, hinfo.physical_cpu_max)); + return SYSCTL_RETURN(req, hinfo.physical_cpu_max); } else { - return(EINVAL); + return EINVAL; } case HW_LOCAL_LOGICALCPU: if (kret == KERN_SUCCESS) { - return(SYSCTL_RETURN(req, hinfo.logical_cpu)); + return SYSCTL_RETURN(req, hinfo.logical_cpu); } else { - return(EINVAL); + return EINVAL; } case HW_LOCAL_LOGICALCPUMAX: if (kret == KERN_SUCCESS) { - return(SYSCTL_RETURN(req, hinfo.logical_cpu_max)); + return SYSCTL_RETURN(req, hinfo.logical_cpu_max); } else { - return(EINVAL); + return EINVAL; } case HW_PAGESIZE: { @@ -265,67 +265,74 @@ sysctl_hw_generic(__unused struct sysctl_oid *oidp, __unused void *arg1, qval = (long long)val; break; case HW_L2CACHESIZE: - if (cpu_info.l2_cache_size == 0xFFFFFFFF) - return(EINVAL); + if (cpu_info.l2_cache_size == 0xFFFFFFFF) { + return EINVAL; + } val = cpu_info.l2_cache_size; qval = (long long)val; break; case HW_L3CACHESIZE: - if (cpu_info.l3_cache_size == 0xFFFFFFFF) - return(EINVAL); + if (cpu_info.l3_cache_size == 0xFFFFFFFF) { + return EINVAL; + } val = cpu_info.l3_cache_size; qval = (long long)val; break; - /* - * Deprecated variables. We still support these for - * backwards compatibility purposes only. - */ + /* + * Deprecated variables. We still support these for + * backwards compatibility purposes only. + */ case HW_MACHINE: bzero(dummy, sizeof(dummy)); - if(!PEGetMachineName(dummy,64)) - return(EINVAL); + if (!PEGetMachineName(dummy, 64)) { + return EINVAL; + } dummy[64] = 0; - return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1)); + return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); case HW_MODEL: bzero(dummy, sizeof(dummy)); - if(!PEGetModelName(dummy,64)) - return(EINVAL); + if (!PEGetModelName(dummy, 64)) { + return EINVAL; + } dummy[64] = 0; - return(SYSCTL_OUT(req, dummy, strlen(dummy) + 1)); + return SYSCTL_OUT(req, dummy, strlen(dummy) + 1); case HW_USERMEM: - { + { int usermem = mem_size - vm_page_wire_count * page_size; - return(SYSCTL_RETURN(req, usermem)); - } + return SYSCTL_RETURN(req, usermem); + } case HW_EPOCH: - epochTemp = PEGetPlatformEpoch(); - if (epochTemp == -1) - return(EINVAL); - return(SYSCTL_RETURN(req, epochTemp)); + epochTemp = PEGetPlatformEpoch(); + if (epochTemp == -1) { + return EINVAL; + } + return SYSCTL_RETURN(req, epochTemp); case HW_VECTORUNIT: { int vector = cpu_info.vector_unit == 0? 0 : 1; - return(SYSCTL_RETURN(req, vector)); + return SYSCTL_RETURN(req, vector); } case HW_L2SETTINGS: - if (cpu_info.l2_cache_size == 0xFFFFFFFF) - return(EINVAL); - return(SYSCTL_RETURN(req, cpu_info.l2_settings)); + if (cpu_info.l2_cache_size == 0xFFFFFFFF) { + return EINVAL; + } + return SYSCTL_RETURN(req, cpu_info.l2_settings); case HW_L3SETTINGS: - if (cpu_info.l3_cache_size == 0xFFFFFFFF) - return(EINVAL); - return(SYSCTL_RETURN(req, cpu_info.l3_settings)); + if (cpu_info.l3_cache_size == 0xFFFFFFFF) { + return EINVAL; + } + return SYSCTL_RETURN(req, cpu_info.l3_settings); default: - return(ENOTSUP); + return ENOTSUP; } /* * Callers may come to us with either int or quad buffers. */ if (doquad) { - return(SYSCTL_RETURN(req, qval)); + return SYSCTL_RETURN(req, qval); } - return(SYSCTL_RETURN(req, val)); + return SYSCTL_RETURN(req, val); } /* hw.pagesize and hw.tbfrequency are expected as 64 bit values */ @@ -362,46 +369,46 @@ sysctl_tbfrequency /* * hw.* MIB variables. */ -SYSCTL_PROC (_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", ""); -SYSCTL_PROC (_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", ""); -SYSCTL_PROC (_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", ""); -SYSCTL_PROC (_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", ""); -SYSCTL_PROC (_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", ""); -SYSCTL_PROC (_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", ""); -SYSCTL_INT (_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, ""); -SYSCTL_INT (_hw, OID_AUTO, cputype, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputype, 0, ""); -SYSCTL_INT (_hw, OID_AUTO, cpusubtype, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpusubtype, 0, ""); -SYSCTL_INT (_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, ""); -SYSCTL_INT (_hw, OID_AUTO, cpufamily, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpufamily, 0, ""); -SYSCTL_OPAQUE (_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", ""); -SYSCTL_OPAQUE (_hw, OID_AUTO, cachesize, CTLFLAG_RD | CTLFLAG_LOCKED, &cachesize, sizeof(cachesize), "Q", ""); -SYSCTL_PROC (_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", ""); -SYSCTL_PROC (_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", ""); +SYSCTL_PROC(_hw, HW_NCPU, ncpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_NCPU, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_AVAILCPU, activecpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_AVAILCPU, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, physicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPU, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, physicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_PHYSICALCPUMAX, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, logicalcpu, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPU, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, OID_AUTO, logicalcpu_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_LOCAL_LOGICALCPUMAX, sysctl_hw_generic, "I", ""); +SYSCTL_INT(_hw, HW_BYTEORDER, byteorder, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, BYTE_ORDER, ""); +SYSCTL_INT(_hw, OID_AUTO, cputype, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputype, 0, ""); +SYSCTL_INT(_hw, OID_AUTO, cpusubtype, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpusubtype, 0, ""); +SYSCTL_INT(_hw, OID_AUTO, cpu64bit_capable, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpu64bit, 0, ""); +SYSCTL_INT(_hw, OID_AUTO, cpufamily, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &cpufamily, 0, ""); +SYSCTL_OPAQUE(_hw, OID_AUTO, cacheconfig, CTLFLAG_RD | CTLFLAG_LOCKED, &cacheconfig, sizeof(cacheconfig), "Q", ""); +SYSCTL_OPAQUE(_hw, OID_AUTO, cachesize, CTLFLAG_RD | CTLFLAG_LOCKED, &cachesize, sizeof(cachesize), "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, pagesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, pagesize32, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_pagesize32, "Q", ""); #if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__)) -SYSCTL_QUAD (_hw, OID_AUTO, busfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, busfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_min_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, busfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_max_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, cpufrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, cpufrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_min_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, cpufrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_max_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, busfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, busfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_min_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, busfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_frequency_max_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, cpufrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, cpufrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_min_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, cpufrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_frequency_max_hz, ""); #endif -SYSCTL_PROC (_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); -SYSCTL_PROC (_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); -SYSCTL_PROC (_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); -SYSCTL_PROC (_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); -SYSCTL_PROC (_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, cachelinesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_CACHELINE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, l1icachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, l1dcachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, l2cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); +SYSCTL_PROC(_hw, OID_AUTO, l3cachesize, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE | CTLHW_RETQUAD, sysctl_hw_generic, "Q", ""); #if (defined(__arm__) || defined(__arm64__)) && (DEBUG || DEVELOPMENT) -SYSCTL_QUAD (_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, ""); -SYSCTL_QUAD (_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, memfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_min_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, memfrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.mem_frequency_max_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, prffrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_min, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_min_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, prffrequency_max, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.prf_frequency_max_hz, ""); +SYSCTL_QUAD(_hw, OID_AUTO, fixfrequency, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.fix_frequency_hz, ""); #endif /* __arm__ || __arm64__ */ SYSCTL_PROC(_hw, OID_AUTO, tbfrequency, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, sysctl_tbfrequency, "Q", ""); -SYSCTL_QUAD (_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, ""); -SYSCTL_INT (_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, ""); +SYSCTL_QUAD(_hw, HW_MEMSIZE, memsize, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &max_mem, ""); +SYSCTL_INT(_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, &packages, 0, ""); /* * Optional CPU features can register nodes below hw.optional. @@ -411,9 +418,9 @@ SYSCTL_INT (_hw, OID_AUTO, packages, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOC * 0. If the feature is present and its use is advised, the node should * return 1. */ -SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW|CTLFLAG_LOCKED, NULL, "optional features"); +SYSCTL_NODE(_hw, OID_AUTO, optional, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "optional features"); -SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, ""); /* always set */ +SYSCTL_INT(_hw_optional, OID_AUTO, floatingpoint, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (int *)NULL, 1, ""); /* always set */ /* * Optional device hardware features can be registered by drivers below hw.features @@ -432,71 +439,70 @@ SYSCTL_NODE(_hw, OID_AUTO, features, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "hardwar * * The *_compat nodes are *NOT* visible within the kernel. */ -SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_PAGESIZE, pagesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_PAGESIZE, sysctl_hw_generic, "I", ""); #if DEBUG || DEVELOPMENT || (!defined(__arm__) && !defined(__arm64__)) -SYSCTL_COMPAT_INT (_hw, HW_BUS_FREQ, busfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_clock_rate_hz, 0, ""); -SYSCTL_COMPAT_INT (_hw, HW_CPU_FREQ, cpufrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_clock_rate_hz, 0, ""); +SYSCTL_COMPAT_INT(_hw, HW_BUS_FREQ, busfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.bus_clock_rate_hz, 0, ""); +SYSCTL_COMPAT_INT(_hw, HW_CPU_FREQ, cpufrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.cpu_clock_rate_hz, 0, ""); #endif -SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_CACHELINE, cachelinesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_CACHELINE, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_L1ICACHESIZE, l1icachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1ICACHESIZE, sysctl_hw_generic, "I", ""); SYSCTL_PROC(_hw, HW_L1DCACHESIZE, l1dcachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L1DCACHESIZE, sysctl_hw_generic, "I", ""); -SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", ""); -SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", ""); -SYSCTL_COMPAT_INT (_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, ""); -SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", ""); -SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", ""); -SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, ""); -SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", ""); -SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", ""); -SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", ""); -SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", ""); -SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", ""); -SYSCTL_INT (_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, ""); +SYSCTL_PROC(_hw, HW_L2CACHESIZE, l2cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2CACHESIZE, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_L3CACHESIZE, l3cachesize_compat, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3CACHESIZE, sysctl_hw_generic, "I", ""); +SYSCTL_COMPAT_INT(_hw, HW_TB_FREQ, tbfrequency_compat, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &gPEClockFrequencyInfo.timebase_frequency_hz, 0, ""); +SYSCTL_PROC(_hw, HW_MACHINE, machine, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MACHINE, sysctl_hw_generic, "A", ""); +SYSCTL_PROC(_hw, HW_MODEL, model, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_MODEL, sysctl_hw_generic, "A", ""); +SYSCTL_COMPAT_UINT(_hw, HW_PHYSMEM, physmem, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, &mem_size, 0, ""); +SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_USERMEM, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_EPOCH, epoch, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_EPOCH, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_VECTORUNIT, vectorunit, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_VECTORUNIT, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_L2SETTINGS, l2settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L2SETTINGS, sysctl_hw_generic, "I", ""); +SYSCTL_PROC(_hw, HW_L3SETTINGS, l3settings, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_LOCKED, 0, HW_L3SETTINGS, sysctl_hw_generic, "I", ""); +SYSCTL_INT(_hw, OID_AUTO, cputhreadtype, CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, &cputhreadtype, 0, ""); #if defined(__i386__) || defined(__x86_64__) static int sysctl_cpu_capability (__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req) { - uint64_t mask = (uint64_t) (uintptr_t) arg1; - boolean_t is_capable = (_get_cpu_capabilities() & mask) != 0; + uint64_t mask = (uint64_t) (uintptr_t) arg1; + boolean_t is_capable = (_get_cpu_capabilities() & mask) != 0; return SYSCTL_OUT(req, &is_capable, sizeof(is_capable)); - } -SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMMX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE2, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE3, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSupplementalSSE3, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE4_1, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE4_2, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, mmx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMMX, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE2, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE3, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, supplementalsse3, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSupplementalSSE3, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE4_1, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sse4_2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSSE4_2, 0, sysctl_cpu_capability, "I", ""); /* "x86_64" is actually a preprocessor symbol on the x86_64 kernel, so we have to hack this */ #undef x86_64 -SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) k64Bit, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAES, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX1_0, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRDRAND, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasF16C, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasENFSTRG, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasFMA, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX2_0, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI1, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI2, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMPX, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSGX, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, x86_64, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) k64Bit, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, aes, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAES, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx1_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX1_0, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, rdrand, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRDRAND, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, f16c, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasF16C, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, enfstrg, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasENFSTRG, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, fma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasFMA, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx2_0, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX2_0, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI1, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI2, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMPX, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSGX, 0, sysctl_cpu_capability, "I", ""); #if !defined(RC_HIDE_XNU_J137) -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512F, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512CD, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512DQ, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512BW, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512VL, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512IFMA, 0, sysctl_cpu_capability, "I", ""); -SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512VBMI, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512f, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512F, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512cd, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512CD, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512dq, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512DQ, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512bw, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512BW, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vl, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512VL, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512ifma, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512IFMA, 0, sysctl_cpu_capability, "I", ""); +SYSCTL_PROC(_hw_optional, OID_AUTO, avx512vbmi, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasAVX512VBMI, 0, sysctl_cpu_capability, "I", ""); #endif /* not RC_HIDE_XNU_J137 */ #elif defined (__arm__) || defined (__arm64__) int watchpoint_flag = -1; @@ -592,7 +598,7 @@ sysctl_mib_init(void) /* hw.packages */ packages = roundup(ml_cpu_cache_sharing(0), cpuid_info()->thread_count) - / cpuid_info()->thread_count; + / cpuid_info()->thread_count; #elif defined(__arm__) || defined(__arm64__) /* end __i386 */ @@ -624,5 +630,4 @@ sysctl_mib_init(void) #else #error unknown architecture #endif /* !__i386__ && !__x86_64 && !__arm__ && !__arm64__ */ - } diff --git a/bsd/kern/kern_mman.c b/bsd/kern/kern_mman.c index f373a0c79..29853fd43 100644 --- a/bsd/kern/kern_mman.c +++ b/bsd/kern/kern_mman.c @@ -1,8 +1,8 @@ /* * Copyright (c) 2007 Apple Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -144,33 +144,33 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * Map in special device (must be SHARED) or file */ struct fileproc *fp; - struct vnode *vp; - int flags; - int prot; - int err=0; - vm_map_t user_map; - kern_return_t result; - vm_map_offset_t user_addr; - vm_map_size_t user_size; - vm_object_offset_t pageoff; - vm_object_offset_t file_pos; - int alloc_flags = 0; - vm_tag_t tag = VM_KERN_MEMORY_NONE; - vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - boolean_t docow; - vm_prot_t maxprot; - void *handle; - memory_object_t pager = MEMORY_OBJECT_NULL; - memory_object_control_t control; - int mapanon=0; - int fpref=0; - int error =0; + struct vnode *vp; + int flags; + int prot; + int err = 0; + vm_map_t user_map; + kern_return_t result; + vm_map_offset_t user_addr; + vm_map_size_t user_size; + vm_object_offset_t pageoff; + vm_object_offset_t file_pos; + int alloc_flags = 0; + vm_tag_t tag = VM_KERN_MEMORY_NONE; + vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + boolean_t docow; + vm_prot_t maxprot; + void *handle; + memory_object_t pager = MEMORY_OBJECT_NULL; + memory_object_control_t control; + int mapanon = 0; + int fpref = 0; + int error = 0; int fd = uap->fd; int num_retries = 0; /* * Note that for UNIX03 conformance, there is additional parameter checking for - * mmap() system call in libsyscall prior to entering the kernel. The sanity + * mmap() system call in libsyscall prior to entering the kernel. The sanity * checks and argument validation done in this function are not the only places * one can get returned errnos. */ @@ -191,24 +191,26 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * for write or execute access, we must imply read access as well; * otherwise programs expecting this to work will fail to operate. */ - if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) + if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) { prot |= VM_PROT_READ; -#endif /* radar 3777787 */ + } +#endif /* radar 3777787 */ flags = uap->flags; vp = NULLVP; /* * The vm code does not have prototypes & compiler doesn't do the' - * the right thing when you cast 64bit value and pass it in function + * the right thing when you cast 64bit value and pass it in function * call. So here it is. */ file_pos = (vm_object_offset_t)uap->pos; /* make sure mapping fits into numeric range etc */ - if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64) - return (EINVAL); + if (file_pos + user_size > (vm_object_offset_t)-PAGE_SIZE_64) { + return EINVAL; + } /* * Align the file position to a page boundary, @@ -219,9 +221,9 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) /* Adjust size for rounding (on both ends). */ - user_size += pageoff; /* low end... */ - user_size = vm_map_round_page(user_size, - vm_map_page_mask(user_map)); /* hi end */ + user_size += pageoff; /* low end... */ + user_size = vm_map_round_page(user_size, + vm_map_page_mask(user_map)); /* hi end */ if (flags & MAP_JIT) { if ((flags & MAP_FIXED) || @@ -255,8 +257,9 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * should be aligned after adjustment by pageoff. */ user_addr -= pageoff; - if (user_addr & vm_map_page_mask(user_map)) - return (EINVAL); + if (user_addr & vm_map_page_mask(user_map)) { + return EINVAL; + } } #ifdef notyet /* DO not have apis to get this info, need to wait till then*/ @@ -269,16 +272,16 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * location. */ else if (addr < vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ, - vm_map_page_mask(user_map))) + vm_map_page_mask(user_map))) { addr = vm_map_round_page(p->p_vmspace->vm_daddr + MAXDSIZ, - vm_map_page_mask(user_map)); + vm_map_page_mask(user_map)); + } #endif alloc_flags = 0; if (flags & MAP_ANON) { - maxprot = VM_PROT_ALL; #if CONFIG_MACF /* @@ -287,12 +290,12 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) error = mac_proc_check_map_anon(p, user_addr, user_size, prot, flags, &maxprot); if (error) { return EINVAL; - } + } #endif /* MAC */ /* * Mapping blank space is trivial. Use positive fds as the alias - * value for memory tracking. + * value for memory tracking. */ if (fd != -1) { /* @@ -300,9 +303,9 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * (see the VM_FLAGS_* definitions). */ alloc_flags = fd & (VM_FLAGS_ALIAS_MASK | - VM_FLAGS_SUPERPAGE_MASK | - VM_FLAGS_PURGABLE | - VM_FLAGS_4GB_CHUNK); + VM_FLAGS_SUPERPAGE_MASK | + VM_FLAGS_PURGABLE | + VM_FLAGS_4GB_CHUNK); if (alloc_flags != fd) { /* reject if there are any extra flags */ return EINVAL; @@ -310,7 +313,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) VM_GET_FLAGS_ALIAS(alloc_flags, tag); alloc_flags &= ~VM_FLAGS_ALIAS_MASK; } - + handle = NULL; file_pos = 0; mapanon = 1; @@ -318,16 +321,18 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) struct vnode_attr va; vfs_context_t ctx = vfs_context_current(); - if (flags & MAP_JIT) + if (flags & MAP_JIT) { return EINVAL; + } /* * Mapping file, get fp for validation. Obtain vnode and make * sure it is of appropriate type. */ err = fp_lookup(p, fd, &fp, 0); - if (err) - return(err); + if (err) { + return err; + } fpref = 1; switch (FILEGLOB_DTYPE(fp->f_fglob)) { case DTYPE_PSXSHM: @@ -346,8 +351,9 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) } vp = (struct vnode *)fp->f_fglob->fg_data; error = vnode_getwithref(vp); - if(error != 0) + if (error != 0) { goto bad; + } if (vp->v_type != VREG && vp->v_type != VCHR) { (void)vnode_put(vp); @@ -356,7 +362,7 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) } AUDIT_ARG(vnpath, vp, ARG_VNODE1); - + /* * POSIX: mmap needs to update access time for mapped files */ @@ -385,10 +391,10 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * credentials do we use for determination? What if * proc does a setuid? */ - maxprot = VM_PROT_EXECUTE; /* ??? */ - if (fp->f_fglob->fg_flag & FREAD) + maxprot = VM_PROT_EXECUTE; /* ??? */ + if (fp->f_fglob->fg_flag & FREAD) { maxprot |= VM_PROT_READ; - else if (prot & PROT_READ) { + } else if (prot & PROT_READ) { (void)vnode_put(vp); error = EACCES; goto bad; @@ -398,41 +404,42 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * MAP_SHARED or via the implicit sharing of character * device mappings), and we are trying to get write * permission although we opened it without asking - * for it, bail out. + * for it, bail out. */ if ((flags & MAP_SHARED) != 0) { if ((fp->f_fglob->fg_flag & FWRITE) != 0 && /* - * Do not allow writable mappings of + * Do not allow writable mappings of * swap files (see vm_swapfile_pager.c). */ !vnode_isswap(vp)) { - /* - * check for write access - * - * Note that we already made this check when granting FWRITE - * against the file, so it seems redundant here. - */ - error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx); - - /* if not granted for any reason, but we wanted it, bad */ - if ((prot & PROT_WRITE) && (error != 0)) { - vnode_put(vp); - goto bad; - } - - /* if writable, remember */ - if (error == 0) - maxprot |= VM_PROT_WRITE; - + /* + * check for write access + * + * Note that we already made this check when granting FWRITE + * against the file, so it seems redundant here. + */ + error = vnode_authorize(vp, NULL, KAUTH_VNODE_CHECKIMMUTABLE, ctx); + + /* if not granted for any reason, but we wanted it, bad */ + if ((prot & PROT_WRITE) && (error != 0)) { + vnode_put(vp); + goto bad; + } + + /* if writable, remember */ + if (error == 0) { + maxprot |= VM_PROT_WRITE; + } } else if ((prot & PROT_WRITE) != 0) { (void)vnode_put(vp); error = EACCES; goto bad; } - } else + } else { maxprot |= VM_PROT_WRITE; + } handle = (void *)vp; #if CONFIG_MACF @@ -446,9 +453,10 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) } } - if (user_size == 0) { - if (!mapanon) + if (user_size == 0) { + if (!mapanon) { (void)vnode_put(vp); + } error = 0; goto bad; } @@ -458,11 +466,12 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) * to the nearest page boundary. */ user_size = vm_map_round_page(user_size, - vm_map_page_mask(user_map)); + vm_map_page_mask(user_map)); if (file_pos & vm_map_page_mask(user_map)) { - if (!mapanon) + if (!mapanon) { (void)vnode_put(vp); + } error = EINVAL; goto bad; } @@ -470,12 +479,13 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) if ((flags & MAP_FIXED) == 0) { alloc_flags |= VM_FLAGS_ANYWHERE; user_addr = vm_map_round_page(user_addr, - vm_map_page_mask(user_map)); + vm_map_page_mask(user_map)); } else { if (user_addr != vm_map_trunc_page(user_addr, - vm_map_page_mask(user_map))) { - if (!mapanon) - (void)vnode_put(vp); + vm_map_page_mask(user_map))) { + if (!mapanon) { + (void)vnode_put(vp); + } error = EINVAL; goto bad; } @@ -493,8 +503,9 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) alloc_flags |= VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; } - if (flags & MAP_NOCACHE) + if (flags & MAP_NOCACHE) { alloc_flags |= VM_FLAGS_NO_CACHE; + } if (flags & MAP_JIT) { vmk_flags.vmkf_map_jit = TRUE; @@ -512,29 +523,33 @@ mmap(proc_t p, struct mmap_args *uap, user_addr_t *retval) #ifdef notyet /* Hmm .. */ #if defined(VM_PROT_READ_IS_EXEC) - if (prot & VM_PROT_READ) + if (prot & VM_PROT_READ) { prot |= VM_PROT_EXECUTE; - if (maxprot & VM_PROT_READ) + } + if (maxprot & VM_PROT_READ) { maxprot |= VM_PROT_EXECUTE; + } #endif #endif #if 3777787 - if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) + if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) { prot |= VM_PROT_READ; - if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) + } + if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) { maxprot |= VM_PROT_READ; -#endif /* radar 3777787 */ + } +#endif /* radar 3777787 */ map_anon_retry: result = vm_map_enter_mem_object(user_map, - &user_addr, user_size, - 0, alloc_flags, vmk_flags, - tag, - IPC_PORT_NULL, 0, FALSE, - prot, maxprot, - (flags & MAP_SHARED) ? - VM_INHERIT_SHARE : - VM_INHERIT_DEFAULT); + &user_addr, user_size, + 0, alloc_flags, vmk_flags, + tag, + IPC_PORT_NULL, 0, FALSE, + prot, maxprot, + (flags & MAP_SHARED) ? + VM_INHERIT_SHARE : + VM_INHERIT_DEFAULT); /* If a non-binding address was specified for this anonymous * mapping, retry the mapping with a zero base @@ -559,7 +574,7 @@ map_anon_retry: } else { control = ubc_getobject(vp, UBC_FLAGS_NONE); } - + if (control == NULL) { (void)vnode_put(vp); error = ENOMEM; @@ -570,31 +585,35 @@ map_anon_retry: * Set credentials: * FIXME: if we're writing the file we need a way to * ensure that someone doesn't replace our R/W creds - * with ones that only work for read. + * with ones that only work for read. */ ubc_setthreadcred(vp, p, current_thread()); docow = FALSE; - if ((flags & (MAP_ANON|MAP_SHARED)) == 0) { + if ((flags & (MAP_ANON | MAP_SHARED)) == 0) { docow = TRUE; } #ifdef notyet /* Hmm .. */ #if defined(VM_PROT_READ_IS_EXEC) - if (prot & VM_PROT_READ) + if (prot & VM_PROT_READ) { prot |= VM_PROT_EXECUTE; - if (maxprot & VM_PROT_READ) + } + if (maxprot & VM_PROT_READ) { maxprot |= VM_PROT_EXECUTE; + } #endif #endif /* notyet */ #if 3777787 - if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) + if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) { prot |= VM_PROT_READ; - if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) + } + if (maxprot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) { maxprot |= VM_PROT_READ; -#endif /* radar 3777787 */ + } +#endif /* radar 3777787 */ map_file_retry: if ((flags & MAP_RESILIENT_CODESIGN) || @@ -617,14 +636,14 @@ map_file_retry: } result = vm_map_enter_mem_object_control(user_map, - &user_addr, user_size, - 0, alloc_flags, vmk_flags, - tag, - control, file_pos, - docow, prot, maxprot, - (flags & MAP_SHARED) ? - VM_INHERIT_SHARE : - VM_INHERIT_DEFAULT); + &user_addr, user_size, + 0, alloc_flags, vmk_flags, + tag, + control, file_pos, + docow, prot, maxprot, + (flags & MAP_SHARED) ? + VM_INHERIT_SHARE : + VM_INHERIT_DEFAULT); /* If a non-binding address was specified for this file backed * mapping, retry the mapping with a zero base @@ -666,22 +685,23 @@ bad: */ memory_object_deallocate(pager); } - if (fpref) + if (fpref) { fp_drop(p, fd, fp, 0); + } KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_mmap) | DBG_FUNC_NONE), fd, (uint32_t)(*retval), (uint32_t)user_size, error, 0); -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO2, SYS_mmap) | DBG_FUNC_NONE), (uint32_t)(*retval >> 32), (uint32_t)(user_size >> 32), - (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0); + (uint32_t)(file_pos >> 32), (uint32_t)file_pos, 0); #endif - return(error); + return error; } int msync(__unused proc_t p, struct msync_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(msync_nocancel(p, (struct msync_nocancel_args *)uap, retval)); + return msync_nocancel(p, (struct msync_nocancel_args *)uap, retval); } int @@ -692,12 +712,12 @@ msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused int3 int flags; vm_map_t user_map; int rv; - vm_sync_t sync_flags=0; + vm_sync_t sync_flags = 0; user_map = current_map(); addr = (mach_vm_offset_t) uap->addr; size = (mach_vm_size_t)uap->len; -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_msync) | DBG_FUNC_NONE), (uint32_t)(addr >> 32), (uint32_t)(size >> 32), 0, 0, 0); #endif if (addr & vm_map_page_mask(user_map)) { @@ -708,57 +728,62 @@ msync_nocancel(__unused proc_t p, struct msync_nocancel_args *uap, __unused int3 /* * We cannot support this properly without maintaining * list all mmaps done. Cannot use vm_map_entry as they could be - * split or coalesced by indepenedant actions. So instead of + * split or coalesced by indepenedant actions. So instead of * inaccurate results, lets just return error as invalid size * specified */ - return (EINVAL); /* XXX breaks posix apps */ + return EINVAL; /* XXX breaks posix apps */ } flags = uap->flags; /* disallow contradictory flags */ - if ((flags & (MS_SYNC|MS_ASYNC)) == (MS_SYNC|MS_ASYNC)) - return (EINVAL); - - if (flags & MS_KILLPAGES) - sync_flags |= VM_SYNC_KILLPAGES; - if (flags & MS_DEACTIVATE) - sync_flags |= VM_SYNC_DEACTIVATE; - if (flags & MS_INVALIDATE) - sync_flags |= VM_SYNC_INVALIDATE; - - if ( !(flags & (MS_KILLPAGES | MS_DEACTIVATE))) { - if (flags & MS_ASYNC) - sync_flags |= VM_SYNC_ASYNCHRONOUS; - else - sync_flags |= VM_SYNC_SYNCHRONOUS; + if ((flags & (MS_SYNC | MS_ASYNC)) == (MS_SYNC | MS_ASYNC)) { + return EINVAL; + } + + if (flags & MS_KILLPAGES) { + sync_flags |= VM_SYNC_KILLPAGES; + } + if (flags & MS_DEACTIVATE) { + sync_flags |= VM_SYNC_DEACTIVATE; + } + if (flags & MS_INVALIDATE) { + sync_flags |= VM_SYNC_INVALIDATE; + } + + if (!(flags & (MS_KILLPAGES | MS_DEACTIVATE))) { + if (flags & MS_ASYNC) { + sync_flags |= VM_SYNC_ASYNCHRONOUS; + } else { + sync_flags |= VM_SYNC_SYNCHRONOUS; + } } - sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */ + sync_flags |= VM_SYNC_CONTIGUOUS; /* complain if holes */ rv = mach_vm_msync(user_map, addr, size, sync_flags); switch (rv) { case KERN_SUCCESS: break; - case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */ - return (ENOMEM); + case KERN_INVALID_ADDRESS: /* hole in region being sync'ed */ + return ENOMEM; case KERN_FAILURE: - return (EIO); + return EIO; default: - return (EINVAL); + return EINVAL; } - return (0); + return 0; } int munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval) { - mach_vm_offset_t user_addr; - mach_vm_size_t user_size; - kern_return_t result; - vm_map_t user_map; + mach_vm_offset_t user_addr; + mach_vm_size_t user_size; + kern_return_t result; + vm_map_t user_map; user_map = current_map(); user_addr = (mach_vm_offset_t) uap->addr; @@ -772,8 +797,9 @@ munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval) return EINVAL; } - if (user_addr + user_size < user_addr) - return(EINVAL); + if (user_addr + user_size < user_addr) { + return EINVAL; + } if (user_size == 0) { /* UNIX SPEC: size is 0, return EINVAL */ @@ -782,19 +808,19 @@ munmap(__unused proc_t p, struct munmap_args *uap, __unused int32_t *retval) result = mach_vm_deallocate(user_map, user_addr, user_size); if (result != KERN_SUCCESS) { - return(EINVAL); + return EINVAL; } - return(0); + return 0; } int mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval) { vm_prot_t prot; - mach_vm_offset_t user_addr; - mach_vm_size_t user_size; - kern_return_t result; - vm_map_t user_map; + mach_vm_offset_t user_addr; + mach_vm_size_t user_size; + kern_return_t result; + vm_map_t user_map; #if CONFIG_MACF int error; #endif @@ -812,23 +838,26 @@ mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval) /* UNIX SPEC: user address is not page-aligned, return EINVAL */ return EINVAL; } - + #ifdef notyet /* Hmm .. */ #if defined(VM_PROT_READ_IS_EXEC) - if (prot & VM_PROT_READ) + if (prot & VM_PROT_READ) { prot |= VM_PROT_EXECUTE; + } #endif #endif /* notyet */ #if 3936456 - if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) + if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) { prot |= VM_PROT_READ; -#endif /* 3936456 */ + } +#endif /* 3936456 */ #if defined(__arm64__) - if (prot & VM_PROT_STRIP_READ) + if (prot & VM_PROT_STRIP_READ) { prot &= ~(VM_PROT_READ | VM_PROT_STRIP_READ); + } #endif #if CONFIG_MACF @@ -843,53 +872,54 @@ mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval) * e.g., making the stack executable. */ error = mac_proc_check_mprotect(p, user_addr, - user_size, prot); - if (error) - return (error); + user_size, prot); + if (error) { + return error; + } #endif - if(prot & VM_PROT_TRUSTED) { + if (prot & VM_PROT_TRUSTED) { #if CONFIG_DYNAMIC_CODE_SIGNING /* CODE SIGNING ENFORCEMENT - JIT support */ /* The special protection value VM_PROT_TRUSTED requests that we treat * this page as if it had a valid code signature. - * If this is enabled, there MUST be a MAC policy implementing the + * If this is enabled, there MUST be a MAC policy implementing the * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be * compromised because the check would always succeed and thusly any * process could sign dynamically. */ result = vm_map_sign( - user_map, + user_map, vm_map_trunc_page(user_addr, - vm_map_page_mask(user_map)), - vm_map_round_page(user_addr+user_size, - vm_map_page_mask(user_map))); + vm_map_page_mask(user_map)), + vm_map_round_page(user_addr + user_size, + vm_map_page_mask(user_map))); switch (result) { - case KERN_SUCCESS: - break; - case KERN_INVALID_ADDRESS: - /* UNIX SPEC: for an invalid address range, return ENOMEM */ - return ENOMEM; - default: - return EINVAL; + case KERN_SUCCESS: + break; + case KERN_INVALID_ADDRESS: + /* UNIX SPEC: for an invalid address range, return ENOMEM */ + return ENOMEM; + default: + return EINVAL; } #else return ENOTSUP; #endif } prot &= ~VM_PROT_TRUSTED; - + result = mach_vm_protect(user_map, user_addr, user_size, - FALSE, prot); + FALSE, prot); switch (result) { case KERN_SUCCESS: - return (0); + return 0; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; case KERN_INVALID_ADDRESS: /* UNIX SPEC: for an invalid address range, return ENOMEM */ return ENOMEM; } - return (EINVAL); + return EINVAL; } @@ -899,8 +929,8 @@ minherit(__unused proc_t p, struct minherit_args *uap, __unused int32_t *retval) mach_vm_offset_t addr; mach_vm_size_t size; vm_inherit_t inherit; - vm_map_t user_map; - kern_return_t result; + vm_map_t user_map; + kern_return_t result; AUDIT_ARG(addr, uap->addr); AUDIT_ARG(len, uap->len); @@ -912,14 +942,14 @@ minherit(__unused proc_t p, struct minherit_args *uap, __unused int32_t *retval) user_map = current_map(); result = mach_vm_inherit(user_map, addr, size, - inherit); + inherit); switch (result) { case KERN_SUCCESS: - return (0); + return 0; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; } - return (EINVAL); + return EINVAL; } int @@ -929,72 +959,72 @@ madvise(__unused proc_t p, struct madvise_args *uap, __unused int32_t *retval) mach_vm_offset_t start; mach_vm_size_t size; vm_behavior_t new_behavior; - kern_return_t result; + kern_return_t result; /* * Since this routine is only advisory, we default to conservative * behavior. */ switch (uap->behav) { - case MADV_RANDOM: - new_behavior = VM_BEHAVIOR_RANDOM; - break; - case MADV_SEQUENTIAL: - new_behavior = VM_BEHAVIOR_SEQUENTIAL; - break; - case MADV_NORMAL: - new_behavior = VM_BEHAVIOR_DEFAULT; - break; - case MADV_WILLNEED: - new_behavior = VM_BEHAVIOR_WILLNEED; - break; - case MADV_DONTNEED: - new_behavior = VM_BEHAVIOR_DONTNEED; - break; - case MADV_FREE: - new_behavior = VM_BEHAVIOR_FREE; - break; - case MADV_ZERO_WIRED_PAGES: - new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES; - break; - case MADV_FREE_REUSABLE: - new_behavior = VM_BEHAVIOR_REUSABLE; - break; - case MADV_FREE_REUSE: - new_behavior = VM_BEHAVIOR_REUSE; - break; - case MADV_CAN_REUSE: - new_behavior = VM_BEHAVIOR_CAN_REUSE; - break; - case MADV_PAGEOUT: + case MADV_RANDOM: + new_behavior = VM_BEHAVIOR_RANDOM; + break; + case MADV_SEQUENTIAL: + new_behavior = VM_BEHAVIOR_SEQUENTIAL; + break; + case MADV_NORMAL: + new_behavior = VM_BEHAVIOR_DEFAULT; + break; + case MADV_WILLNEED: + new_behavior = VM_BEHAVIOR_WILLNEED; + break; + case MADV_DONTNEED: + new_behavior = VM_BEHAVIOR_DONTNEED; + break; + case MADV_FREE: + new_behavior = VM_BEHAVIOR_FREE; + break; + case MADV_ZERO_WIRED_PAGES: + new_behavior = VM_BEHAVIOR_ZERO_WIRED_PAGES; + break; + case MADV_FREE_REUSABLE: + new_behavior = VM_BEHAVIOR_REUSABLE; + break; + case MADV_FREE_REUSE: + new_behavior = VM_BEHAVIOR_REUSE; + break; + case MADV_CAN_REUSE: + new_behavior = VM_BEHAVIOR_CAN_REUSE; + break; + case MADV_PAGEOUT: #if MACH_ASSERT - new_behavior = VM_BEHAVIOR_PAGEOUT; - break; + new_behavior = VM_BEHAVIOR_PAGEOUT; + break; #else /* MACH_ASSERT */ - return ENOTSUP; + return ENOTSUP; #endif /* MACH_ASSERT */ - default: - return(EINVAL); + default: + return EINVAL; } start = (mach_vm_offset_t) uap->addr; size = (mach_vm_size_t) uap->len; - + #if __arm64__ if (start == 0 && size != 0 && (uap->behav == MADV_FREE || - uap->behav == MADV_FREE_REUSABLE)) { + uap->behav == MADV_FREE_REUSABLE)) { printf("** FOURK_COMPAT: %d[%s] " - "failing madvise(0x%llx,0x%llx,%s)\n", - p->p_pid, p->p_comm, start, size, - ((uap->behav == MADV_FREE_REUSABLE) - ? "MADV_FREE_REUSABLE" - : "MADV_FREE")); + "failing madvise(0x%llx,0x%llx,%s)\n", + p->p_pid, p->p_comm, start, size, + ((uap->behav == MADV_FREE_REUSABLE) + ? "MADV_FREE_REUSABLE" + : "MADV_FREE")); DTRACE_VM3(fourk_compat_madvise, - uint64_t, start, - uint64_t, size, - int, uap->behav); + uint64_t, start, + uint64_t, size, + int, uap->behav); return EINVAL; } #endif /* __arm64__ */ @@ -1007,7 +1037,7 @@ madvise(__unused proc_t p, struct madvise_args *uap, __unused int32_t *retval) return 0; case KERN_INVALID_ADDRESS: return EINVAL; - case KERN_NO_SPACE: + case KERN_NO_SPACE: return ENOMEM; } @@ -1022,13 +1052,13 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) user_addr_t vec = 0; int error = 0; int lastvecindex = 0; - int mincoreinfo=0; + int mincoreinfo = 0; int pqueryinfo = 0; unsigned int pqueryinfo_vec_size = 0; vm_page_info_basic_t info = NULL; mach_msg_type_number_t count = 0; char *kernel_vec = NULL; - unsigned int req_vec_size_pages = 0, cur_vec_size_pages = 0, vecindex = 0; + uint64_t req_vec_size_pages = 0, cur_vec_size_pages = 0, vecindex = 0; kern_return_t kr = KERN_SUCCESS; map = current_map(); @@ -1038,15 +1068,17 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) * mode. */ first_addr = addr = vm_map_trunc_page(uap->addr, - vm_map_page_mask(map)); + vm_map_page_mask(map)); end = vm_map_round_page(uap->addr + uap->len, - vm_map_page_mask(map)); + vm_map_page_mask(map)); - if (end < addr) - return (EINVAL); + if (end < addr) { + return EINVAL; + } - if (end == addr) - return (0); + if (end == addr) { + return 0; + } /* * We are going to loop through the whole 'req_vec_size' pages @@ -1059,7 +1091,7 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) kernel_vec = (void*) _MALLOC(cur_vec_size_pages * sizeof(char), M_TEMP, M_WAITOK | M_ZERO); if (kernel_vec == NULL) { - return (ENOMEM); + return ENOMEM; } /* @@ -1072,20 +1104,19 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) if (info == NULL) { FREE(kernel_vec, M_TEMP); - return (ENOMEM); + return ENOMEM; } while (addr < end) { - cur_end = addr + (cur_vec_size_pages * PAGE_SIZE_64); count = VM_PAGE_INFO_BASIC_COUNT; kr = vm_map_page_range_info_internal(map, - addr, - cur_end, - VM_PAGE_INFO_BASIC, - (vm_page_info_t) info, - &count); + addr, + cur_end, + VM_PAGE_INFO_BASIC, + (vm_page_info_t) info, + &count); assert(kr == KERN_SUCCESS); @@ -1095,28 +1126,33 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) * up the pages elsewhere. */ lastvecindex = -1; - for( ; addr < cur_end; addr += PAGE_SIZE ) { - + for (; addr < cur_end; addr += PAGE_SIZE) { pqueryinfo = info[lastvecindex + 1].disposition; mincoreinfo = 0; - if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT) + if (pqueryinfo & VM_PAGE_QUERY_PAGE_PRESENT) { mincoreinfo |= MINCORE_INCORE; - if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF) + } + if (pqueryinfo & VM_PAGE_QUERY_PAGE_REF) { mincoreinfo |= MINCORE_REFERENCED; - if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY) + } + if (pqueryinfo & VM_PAGE_QUERY_PAGE_DIRTY) { mincoreinfo |= MINCORE_MODIFIED; - if (pqueryinfo & VM_PAGE_QUERY_PAGE_PAGED_OUT) + } + if (pqueryinfo & VM_PAGE_QUERY_PAGE_PAGED_OUT) { mincoreinfo |= MINCORE_PAGED_OUT; - if (pqueryinfo & VM_PAGE_QUERY_PAGE_COPIED) + } + if (pqueryinfo & VM_PAGE_QUERY_PAGE_COPIED) { mincoreinfo |= MINCORE_COPIED; - if ((pqueryinfo & VM_PAGE_QUERY_PAGE_EXTERNAL) == 0) + } + if ((pqueryinfo & VM_PAGE_QUERY_PAGE_EXTERNAL) == 0) { mincoreinfo |= MINCORE_ANONYMOUS; + } /* * calculate index into user supplied byte vector */ - vecindex = (addr - first_addr)>> PAGE_SHIFT; + vecindex = (addr - first_addr) >> PAGE_SHIFT; kernel_vec[vecindex] = (char)mincoreinfo; lastvecindex = vecindex; } @@ -1147,10 +1183,10 @@ mincore(__unused proc_t p, struct mincore_args *uap, __unused int32_t *retval) FREE(info, M_TEMP); if (error) { - return (EFAULT); + return EFAULT; } - return (0); + return 0; } int @@ -1159,7 +1195,7 @@ mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval) vm_map_t user_map; vm_map_offset_t addr; vm_map_size_t size, pageoff; - kern_return_t result; + kern_return_t result; AUDIT_ARG(addr, uap->addr); AUDIT_ARG(len, uap->len); @@ -1168,28 +1204,31 @@ mlock(__unused proc_t p, struct mlock_args *uap, __unused int32_t *retvalval) size = (vm_map_size_t)uap->len; /* disable wrap around */ - if (addr + size < addr) - return (EINVAL); + if (addr + size < addr) { + return EINVAL; + } - if (size == 0) - return (0); + if (size == 0) { + return 0; + } user_map = current_map(); pageoff = (addr & vm_map_page_mask(user_map)); addr -= pageoff; - size = vm_map_round_page(size+pageoff, vm_map_page_mask(user_map)); + size = vm_map_round_page(size + pageoff, vm_map_page_mask(user_map)); /* have to call vm_map_wire directly to pass "I don't know" protections */ - result = vm_map_wire_kernel(user_map, addr, addr+size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK, TRUE); + result = vm_map_wire_kernel(user_map, addr, addr + size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK, TRUE); - if (result == KERN_RESOURCE_SHORTAGE) + if (result == KERN_RESOURCE_SHORTAGE) { return EAGAIN; - else if (result == KERN_PROTECTION_FAILURE) + } else if (result == KERN_PROTECTION_FAILURE) { return EACCES; - else if (result != KERN_SUCCESS) + } else if (result != KERN_SUCCESS) { return ENOMEM; + } - return 0; /* KERN_SUCCESS */ + return 0; /* KERN_SUCCESS */ } int @@ -1198,7 +1237,7 @@ munlock(__unused proc_t p, struct munlock_args *uap, __unused int32_t *retval) mach_vm_offset_t addr; mach_vm_size_t size; vm_map_t user_map; - kern_return_t result; + kern_return_t result; AUDIT_ARG(addr, uap->addr); AUDIT_ARG(addr, uap->len); @@ -1209,152 +1248,155 @@ munlock(__unused proc_t p, struct munlock_args *uap, __unused int32_t *retval) /* JMM - need to remove all wirings by spec - this just removes one */ result = mach_vm_wire_kernel(host_priv_self(), user_map, addr, size, VM_PROT_NONE, VM_KERN_MEMORY_MLOCK); - return (result == KERN_SUCCESS ? 0 : ENOMEM); + return result == KERN_SUCCESS ? 0 : ENOMEM; } int mlockall(__unused proc_t p, __unused struct mlockall_args *uap, __unused int32_t *retval) { - return (ENOSYS); + return ENOSYS; } int munlockall(__unused proc_t p, __unused struct munlockall_args *uap, __unused int32_t *retval) { - return(ENOSYS); + return ENOSYS; } #if CONFIG_CODE_DECRYPTION int mremap_encrypted(__unused struct proc *p, struct mremap_encrypted_args *uap, __unused int32_t *retval) { - mach_vm_offset_t user_addr; - mach_vm_size_t user_size; - kern_return_t result; - vm_map_t user_map; - uint32_t cryptid; - cpu_type_t cputype; - cpu_subtype_t cpusubtype; - pager_crypt_info_t crypt_info; - const char * cryptname = 0; - char *vpath; - int len, ret; - struct proc_regioninfo_internal pinfo; - vnode_t vp; - uintptr_t vnodeaddr; - uint32_t vid; - - AUDIT_ARG(addr, uap->addr); - AUDIT_ARG(len, uap->len); - - user_map = current_map(); - user_addr = (mach_vm_offset_t) uap->addr; - user_size = (mach_vm_size_t) uap->len; - - cryptid = uap->cryptid; - cputype = uap->cputype; - cpusubtype = uap->cpusubtype; - - if (user_addr & vm_map_page_mask(user_map)) { - /* UNIX SPEC: user address is not page-aligned, return EINVAL */ - return EINVAL; - } - - switch(cryptid) { - case 0: - /* not encrypted, just an empty load command */ - return 0; - case 1: - cryptname="com.apple.unfree"; - break; - case 0x10: - /* some random cryptid that you could manually put into - * your binary if you want NULL */ - cryptname="com.apple.null"; - break; - default: - return EINVAL; - } - - if (NULL == text_crypter_create) return ENOTSUP; - - ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid); - if (ret == 0 || !vnodeaddr) { - /* No really, this returns 0 if the memory address is not backed by a file */ - return (EINVAL); - } - - vp = (vnode_t)vnodeaddr; - if ((vnode_getwithvid(vp, vid)) == 0) { - MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if(vpath == NULL) { - vnode_put(vp); - return (ENOMEM); - } - - len = MAXPATHLEN; - ret = vn_getpath(vp, vpath, &len); - if(ret) { - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); - vnode_put(vp); - return (ret); - } - - vnode_put(vp); - } else { - return (EINVAL); - } + mach_vm_offset_t user_addr; + mach_vm_size_t user_size; + kern_return_t result; + vm_map_t user_map; + uint32_t cryptid; + cpu_type_t cputype; + cpu_subtype_t cpusubtype; + pager_crypt_info_t crypt_info; + const char * cryptname = 0; + char *vpath; + int len, ret; + struct proc_regioninfo_internal pinfo; + vnode_t vp; + uintptr_t vnodeaddr; + uint32_t vid; + + AUDIT_ARG(addr, uap->addr); + AUDIT_ARG(len, uap->len); + + user_map = current_map(); + user_addr = (mach_vm_offset_t) uap->addr; + user_size = (mach_vm_size_t) uap->len; + + cryptid = uap->cryptid; + cputype = uap->cputype; + cpusubtype = uap->cpusubtype; + + if (user_addr & vm_map_page_mask(user_map)) { + /* UNIX SPEC: user address is not page-aligned, return EINVAL */ + return EINVAL; + } + + switch (cryptid) { + case 0: + /* not encrypted, just an empty load command */ + return 0; + case 1: + cryptname = "com.apple.unfree"; + break; + case 0x10: + /* some random cryptid that you could manually put into + * your binary if you want NULL */ + cryptname = "com.apple.null"; + break; + default: + return EINVAL; + } + + if (NULL == text_crypter_create) { + return ENOTSUP; + } + + ret = fill_procregioninfo_onlymappedvnodes( proc_task(p), user_addr, &pinfo, &vnodeaddr, &vid); + if (ret == 0 || !vnodeaddr) { + /* No really, this returns 0 if the memory address is not backed by a file */ + return EINVAL; + } + + vp = (vnode_t)vnodeaddr; + if ((vnode_getwithvid(vp, vid)) == 0) { + MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (vpath == NULL) { + vnode_put(vp); + return ENOMEM; + } + + len = MAXPATHLEN; + ret = vn_getpath(vp, vpath, &len); + if (ret) { + FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + vnode_put(vp); + return ret; + } + + vnode_put(vp); + } else { + return EINVAL; + } #if 0 - kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n", - __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size); + kprintf("%s vpath %s cryptid 0x%08x cputype 0x%08x cpusubtype 0x%08x range 0x%016llx size 0x%016llx\n", + __FUNCTION__, vpath, cryptid, cputype, cpusubtype, (uint64_t)user_addr, (uint64_t)user_size); #endif - /* set up decrypter first */ - crypt_file_data_t crypt_data = { - .filename = vpath, - .cputype = cputype, - .cpusubtype = cpusubtype }; - result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data); + /* set up decrypter first */ + crypt_file_data_t crypt_data = { + .filename = vpath, + .cputype = cputype, + .cpusubtype = cpusubtype + }; + result = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data); #if VM_MAP_DEBUG_APPLE_PROTECT - if (vm_map_debug_apple_protect) { - printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n", - p->p_pid, p->p_comm, - user_map, - (uint64_t) user_addr, - (uint64_t) (user_addr + user_size), - __FUNCTION__, vpath, result); - } + if (vm_map_debug_apple_protect) { + printf("APPLE_PROTECT: %d[%s] map %p [0x%llx:0x%llx] %s(%s) -> 0x%x\n", + p->p_pid, p->p_comm, + user_map, + (uint64_t) user_addr, + (uint64_t) (user_addr + user_size), + __FUNCTION__, vpath, result); + } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ - FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); - - if(result) { - printf("%s: unable to create decrypter %s, kr=%d\n", - __FUNCTION__, cryptname, result); - if (result == kIOReturnNotPrivileged) { - /* text encryption returned decryption failure */ - return (EPERM); - } else { - return (ENOMEM); - } - } - - /* now remap using the decrypter */ - vm_object_offset_t crypto_backing_offset; - crypto_backing_offset = -1; /* i.e. use map entry's offset */ - result = vm_map_apple_protected(user_map, - user_addr, - user_addr+user_size, - crypto_backing_offset, - &crypt_info); - if (result) { - printf("%s: mapping failed with %d\n", __FUNCTION__, result); - } - - if (result) { - return (EPERM); - } - return 0; + FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); + + if (result) { + printf("%s: unable to create decrypter %s, kr=%d\n", + __FUNCTION__, cryptname, result); + if (result == kIOReturnNotPrivileged) { + /* text encryption returned decryption failure */ + return EPERM; + } else { + return ENOMEM; + } + } + + /* now remap using the decrypter */ + vm_object_offset_t crypto_backing_offset; + crypto_backing_offset = -1; /* i.e. use map entry's offset */ + result = vm_map_apple_protected(user_map, + user_addr, + user_addr + user_size, + crypto_backing_offset, + &crypt_info); + if (result) { + printf("%s: mapping failed with %d\n", __FUNCTION__, result); + } + + if (result) { + return EPERM; + } + return 0; } #endif /* CONFIG_CODE_DECRYPTION */ diff --git a/bsd/kern/kern_newsysctl.c b/bsd/kern/kern_newsysctl.c index 0381325a9..746752d34 100644 --- a/bsd/kern/kern_newsysctl.c +++ b/bsd/kern/kern_newsysctl.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * * @@ -96,7 +96,7 @@ lck_mtx_t * sysctl_unlocked_node_lock = NULL; #undef STATIC #endif #if 0 -#define STATIC +#define STATIC #else #define STATIC static #endif @@ -104,32 +104,32 @@ lck_mtx_t * sysctl_unlocked_node_lock = NULL; /* forward declarations of static functions */ STATIC void sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i); STATIC int sysctl_sysctl_debug(struct sysctl_oid *oidp, void *arg1, - int arg2, struct sysctl_req *req); + int arg2, struct sysctl_req *req); STATIC int sysctl_sysctl_name(struct sysctl_oid *oidp, void *arg1, - int arg2, struct sysctl_req *req); -STATIC int sysctl_sysctl_next_ls (struct sysctl_oid_list *lsp, - int *name, u_int namelen, int *next, int *len, int level, - struct sysctl_oid **oidpp); + int arg2, struct sysctl_req *req); +STATIC int sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, + int *name, u_int namelen, int *next, int *len, int level, + struct sysctl_oid **oidpp); STATIC int sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l); STATIC int sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l); -STATIC int name2oid (char *name, int *oid, u_int *len); +STATIC int name2oid(char *name, int *oid, u_int *len); STATIC int sysctl_sysctl_name2oid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_sysctl_next(struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req); + struct sysctl_req *req); STATIC int sysctl_sysctl_oidfmt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_old_user(struct sysctl_req *req, const void *p, size_t l); STATIC int sysctl_new_user(struct sysctl_req *req, void *p, size_t l); STATIC void sysctl_create_user_req(struct sysctl_req *req, struct proc *p, user_addr_t oldp, - size_t oldlen, user_addr_t newp, size_t newlen); + size_t oldlen, user_addr_t newp, size_t newlen); STATIC int sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestring, size_t namestringlen, int *name, u_int namelen, struct sysctl_req *req); -int kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); -int kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -int userland_sysctl(boolean_t string_is_canonical, - char *namestring, size_t namestringlen, - int *name, u_int namelen, struct sysctl_req *req, - size_t *retval); +int kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldlenp, void *new, size_t newlen); +int kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +int userland_sysctl(boolean_t string_is_canonical, + char *namestring, size_t namestringlen, + int *name, u_int namelen, struct sysctl_req *req, + size_t *retval); struct sysctl_oid_list sysctl__children; /* root list */ @@ -160,9 +160,9 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) * XXX: will subject us to use-after-free by other consumers. */ MALLOC(oidp, struct sysctl_oid *, sizeof(*oidp), M_TEMP, M_WAITOK | M_ZERO); - if (oidp == NULL) - return; /* reject: no memory */ - + if (oidp == NULL) { + return; /* reject: no memory */ + } /* * Copy the structure only through the oid_fmt field, which * is the last field in a non-OID2 OID structure. @@ -179,7 +179,7 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) oidp = new_oidp; break; default: - return; /* rejects unknown version */ + return; /* rejects unknown version */ } } @@ -195,16 +195,18 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) /* First, find the highest oid in the parent list >OID_AUTO_START-1 */ n = OID_AUTO_START; SLIST_FOREACH(p, parent, oid_link) { - if (p->oid_number > n) + if (p->oid_number > n) { n = p->oid_number; + } } oidp->oid_number = n + 1; /* * Reflect the number in an llocated OID into the template * of the caller for sysctl_unregister_oid() compares. */ - if (oidp != new_oidp) + if (oidp != new_oidp) { new_oidp->oid_number = oidp->oid_number; + } } @@ -213,14 +215,16 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) */ q = NULL; SLIST_FOREACH(p, parent, oid_link) { - if (oidp->oid_number < p->oid_number) + if (oidp->oid_number < p->oid_number) { break; + } q = p; } - if (q) + if (q) { SLIST_INSERT_AFTER(q, oidp, oid_link); - else + } else { SLIST_INSERT_HEAD(parent, oidp, oid_link); + } /* Release the write lock */ lck_rw_unlock_exclusive(sysctl_geometry_lock); @@ -229,8 +233,8 @@ sysctl_register_oid(struct sysctl_oid *new_oidp) void sysctl_unregister_oid(struct sysctl_oid *oidp) { - struct sysctl_oid *removed_oidp = NULL; /* OID removed from tree */ - struct sysctl_oid *old_oidp = NULL; /* OID compatibility copy */ + struct sysctl_oid *removed_oidp = NULL; /* OID removed from tree */ + struct sysctl_oid *old_oidp = NULL; /* OID compatibility copy */ /* Get the write lock to modify the geometry */ lck_rw_lock_exclusive(sysctl_geometry_lock); @@ -243,9 +247,9 @@ sysctl_unregister_oid(struct sysctl_oid *oidp) * normally and free the memory. */ SLIST_FOREACH(old_oidp, oidp->oid_parent, oid_link) { - if (!memcmp(&oidp->oid_number, &old_oidp->oid_number, (offsetof(struct sysctl_oid, oid_descr)-offsetof(struct sysctl_oid, oid_number)))) { - break; - } + if (!memcmp(&oidp->oid_number, &old_oidp->oid_number, (offsetof(struct sysctl_oid, oid_descr) - offsetof(struct sysctl_oid, oid_number)))) { + break; + } } if (old_oidp != NULL) { SLIST_REMOVE(old_oidp->oid_parent, old_oidp, sysctl_oid, oid_link); @@ -260,8 +264,8 @@ sysctl_unregister_oid(struct sysctl_oid *oidp) removed_oidp = oidp; break; default: - /* XXX: Can't happen; probably tree coruption.*/ - break; /* rejects unknown version */ + /* XXX: Can't happen; probably tree coruption.*/ + break; /* rejects unknown version */ } } @@ -274,7 +278,7 @@ sysctl_unregister_oid(struct sysctl_oid *oidp) * * Note: oidp could be NULL if it wasn't found. */ - while(removed_oidp && removed_oidp->oid_refcnt) { + while (removed_oidp && removed_oidp->oid_refcnt) { lck_rw_sleep(sysctl_geometry_lock, LCK_SLEEP_EXCLUSIVE, &removed_oidp->oid_refcnt, THREAD_UNINT); } @@ -298,7 +302,7 @@ sysctl_register_set(const char *set) LINKER_SET_FOREACH(oidpp, struct sysctl_oid **, set) { oidp = *oidpp; if (!(oidp->oid_kind & CTLFLAG_NOAUTO)) { - sysctl_register_oid(oidp); + sysctl_register_oid(oidp); } } } @@ -311,7 +315,7 @@ sysctl_unregister_set(const char *set) LINKER_SET_FOREACH(oidpp, struct sysctl_oid **, set) { oidp = *oidpp; if (!(oidp->oid_kind & CTLFLAG_NOAUTO)) { - sysctl_unregister_oid(oidp); + sysctl_unregister_oid(oidp); } } } @@ -356,11 +360,14 @@ sysctl_early_init(void) */ int -sysctl_io_number(struct sysctl_req *req, long long bigValue, size_t valueSize, void *pValue, int *changed) { - int smallValue; - int error; +sysctl_io_number(struct sysctl_req *req, long long bigValue, size_t valueSize, void *pValue, int *changed) +{ + int smallValue; + int error; - if (changed) *changed = 0; + if (changed) { + *changed = 0; + } /* * Handle the various combinations of caller buffer size and @@ -372,45 +379,51 @@ sysctl_io_number(struct sysctl_req *req, long long bigValue, size_t valueSize, v /* 32 bit value expected or 32 bit buffer offered */ if (((valueSize == sizeof(int)) || ((req->oldlen == sizeof(int)) && (valueSize == sizeof(long long)))) - && (req->oldptr)) { + && (req->oldptr)) { smallValue = (int)bigValue; - if ((long long)smallValue != bigValue) - return(ERANGE); + if ((long long)smallValue != bigValue) { + return ERANGE; + } error = SYSCTL_OUT(req, &smallValue, sizeof(smallValue)); } else { /* any other case is either size-equal or a bug */ error = SYSCTL_OUT(req, &bigValue, valueSize); } /* error or nothing to set */ - if (error || !req->newptr) - return(error); + if (error || !req->newptr) { + return error; + } /* set request for constant */ - if (pValue == NULL) - return(EPERM); + if (pValue == NULL) { + return EPERM; + } /* set request needs to convert? */ if ((req->newlen == sizeof(int)) && (valueSize == sizeof(long long))) { /* new value is 32 bits, upconvert to 64 bits */ error = SYSCTL_IN(req, &smallValue, sizeof(smallValue)); - if (!error) + if (!error) { *(long long *)pValue = (long long)smallValue; + } } else if ((req->newlen == sizeof(long long)) && (valueSize == sizeof(int))) { /* new value is 64 bits, downconvert to 32 bits and range check */ error = SYSCTL_IN(req, &bigValue, sizeof(bigValue)); if (!error) { smallValue = (int)bigValue; - if ((long long)smallValue != bigValue) - return(ERANGE); + if ((long long)smallValue != bigValue) { + return ERANGE; + } *(int *)pValue = smallValue; } } else { /* sizes match, just copy in */ error = SYSCTL_IN(req, pValue, valueSize); } - if (!error && changed) + if (!error && changed) { *changed = 1; - return(error); + } + return error; } int @@ -418,16 +431,18 @@ sysctl_io_string(struct sysctl_req *req, char *pValue, size_t valueSize, int tru { int error; - if (changed) *changed = 0; + if (changed) { + *changed = 0; + } - if (trunc && req->oldptr && req->oldlen && (req->oldlenoldptr && req->oldlen && (req->oldlen < strlen(pValue) + 1)) { /* If trunc != 0, if you give it a too small (but larger than * 0 bytes) buffer, instead of returning ENOMEM, it truncates the * returned string to the buffer size. This preserves the semantics * of some library routines implemented via sysctl, which truncate * their returned data, rather than simply returning an error. The * returned string is always NUL terminated. */ - error = SYSCTL_OUT(req, pValue, req->oldlen-1); + error = SYSCTL_OUT(req, pValue, req->oldlen - 1); if (!error) { char c = 0; error = SYSCTL_OUT(req, &c, 1); @@ -438,50 +453,59 @@ sysctl_io_string(struct sysctl_req *req, char *pValue, size_t valueSize, int tru } /* error or no new value */ - if (error || !req->newptr) - return(error); + if (error || !req->newptr) { + return error; + } /* attempt to set read-only value */ - if (valueSize == 0) - return(EPERM); + if (valueSize == 0) { + return EPERM; + } /* make sure there's room for the new string */ - if (req->newlen >= valueSize) - return(EINVAL); + if (req->newlen >= valueSize) { + return EINVAL; + } /* copy the string in and force NUL termination */ error = SYSCTL_IN(req, pValue, req->newlen); pValue[req->newlen] = '\0'; - if (!error && changed) + if (!error && changed) { *changed = 1; - return(error); + } + return error; } -int sysctl_io_opaque(struct sysctl_req *req,void *pValue, size_t valueSize, int *changed) +int +sysctl_io_opaque(struct sysctl_req *req, void *pValue, size_t valueSize, int *changed) { int error; - if (changed) *changed = 0; + if (changed) { + *changed = 0; + } /* Copy blob out */ error = SYSCTL_OUT(req, pValue, valueSize); /* error or nothing to set */ - if (error || !req->newptr) - return(error); + if (error || !req->newptr) { + return error; + } error = SYSCTL_IN(req, pValue, valueSize); - if (!error && changed) + if (!error && changed) { *changed = 1; - return(error); + } + return error; } /* * "Staff-functions" * - * These functions implement a presently undocumented interface + * These functions implement a presently undocumented interface * used by the sysctl program to walk the tree, and get the type * so it can print the value. * This interface is under work and consideration, and should probably @@ -528,35 +552,35 @@ sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i) struct sysctl_oid *oidp; SLIST_FOREACH(oidp, l, oid_link) { - - for (k=0; koid_number, oidp->oid_name); printf("%c%c%c", - oidp->oid_kind & CTLFLAG_LOCKED ? 'L':' ', - oidp->oid_kind & CTLFLAG_RD ? 'R':' ', - oidp->oid_kind & CTLFLAG_WR ? 'W':' '); + oidp->oid_kind & CTLFLAG_LOCKED ? 'L':' ', + oidp->oid_kind & CTLFLAG_RD ? 'R':' ', + oidp->oid_kind & CTLFLAG_WR ? 'W':' '); - if (oidp->oid_handler) + if (oidp->oid_handler) { printf(" *Handler"); + } switch (oidp->oid_kind & CTLTYPE) { - case CTLTYPE_NODE: - printf(" Node\n"); - if (!oidp->oid_handler) { - sysctl_sysctl_debug_dump_node( - oidp->oid_arg1, i+2); - } - break; - case CTLTYPE_INT: printf(" Int\n"); break; - case CTLTYPE_STRING: printf(" String\n"); break; - case CTLTYPE_QUAD: printf(" Quad\n"); break; - case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break; - default: printf("\n"); + case CTLTYPE_NODE: + printf(" Node\n"); + if (!oidp->oid_handler) { + sysctl_sysctl_debug_dump_node( + oidp->oid_arg1, i + 2); + } + break; + case CTLTYPE_INT: printf(" Int\n"); break; + case CTLTYPE_STRING: printf(" String\n"); break; + case CTLTYPE_QUAD: printf(" Quad\n"); break; + case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break; + default: printf("\n"); } - } } @@ -579,7 +603,7 @@ sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i) */ STATIC int sysctl_sysctl_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, __unused struct sysctl_req *req) + __unused int arg2, __unused struct sysctl_req *req) { lck_rw_lock_shared(sysctl_geometry_lock); sysctl_sysctl_debug_dump_node(&sysctl__children, 0); @@ -587,8 +611,8 @@ sysctl_sysctl_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, return ENOENT; } -SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_sysctl_debug, "-", ""); +SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_sysctl_debug, "-", ""); /* * sysctl_sysctl_name @@ -638,7 +662,7 @@ SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD | CTLFLAG_LOCKED, */ STATIC int sysctl_sysctl_name(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { int *name = (int *) arg1; u_int namelen = arg2; @@ -650,14 +674,16 @@ sysctl_sysctl_name(__unused struct sysctl_oid *oidp, void *arg1, int arg2, lck_rw_lock_shared(sysctl_geometry_lock); while (namelen) { if (!lsp) { - snprintf(tempbuf,sizeof(tempbuf),"%d",*name); - if (req->oldidx) + snprintf(tempbuf, sizeof(tempbuf), "%d", *name); + if (req->oldidx) { error = SYSCTL_OUT(req, ".", 1); - if (!error) + } + if (!error) { error = SYSCTL_OUT(req, tempbuf, strlen(tempbuf)); + } if (error) { lck_rw_done(sysctl_geometry_lock); - return (error); + return error; } namelen--; name++; @@ -665,27 +691,32 @@ sysctl_sysctl_name(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } lsp2 = 0; SLIST_FOREACH(oid, lsp, oid_link) { - if (oid->oid_number != *name) + if (oid->oid_number != *name) { continue; + } - if (req->oldidx) + if (req->oldidx) { error = SYSCTL_OUT(req, ".", 1); - if (!error) + } + if (!error) { error = SYSCTL_OUT(req, oid->oid_name, - strlen(oid->oid_name)); + strlen(oid->oid_name)); + } if (error) { lck_rw_done(sysctl_geometry_lock); - return (error); + return error; } namelen--; name++; - if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE) + if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE) { break; + } - if (oid->oid_handler) + if (oid->oid_handler) { break; + } lsp2 = (struct sysctl_oid_list *)oid->oid_arg1; break; @@ -693,7 +724,7 @@ sysctl_sysctl_name(__unused struct sysctl_oid *oidp, void *arg1, int arg2, lsp = lsp2; } lck_rw_done(sysctl_geometry_lock); - return (SYSCTL_OUT(req, "", 1)); + return SYSCTL_OUT(req, "", 1); } SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_name, ""); @@ -731,8 +762,8 @@ SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_name, " * we STRONGLY discourage these types of handlers */ STATIC int -sysctl_sysctl_next_ls (struct sysctl_oid_list *lsp, int *name, u_int namelen, - int *next, int *len, int level, struct sysctl_oid **oidpp) +sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen, + int *next, int *len, int level, struct sysctl_oid **oidpp) { struct sysctl_oid *oidp; @@ -742,48 +773,59 @@ sysctl_sysctl_next_ls (struct sysctl_oid_list *lsp, int *name, u_int namelen, *oidpp = oidp; if (!namelen) { - if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) { return 0; - if (oidp->oid_handler) + } + if (oidp->oid_handler) { /* We really should call the handler here...*/ return 0; + } lsp = (struct sysctl_oid_list *)oidp->oid_arg1; - if (!SLIST_FIRST(lsp)) + if (!SLIST_FIRST(lsp)) { /* This node had no children - skip it! */ continue; + } - if (!sysctl_sysctl_next_ls (lsp, 0, 0, next+1, - len, level+1, oidpp)) + if (!sysctl_sysctl_next_ls(lsp, 0, 0, next + 1, + len, level + 1, oidpp)) { return 0; + } goto next; } - if (oidp->oid_number < *name) + if (oidp->oid_number < *name) { continue; + } if (oidp->oid_number > *name) { - if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) { return 0; - if (oidp->oid_handler) + } + if (oidp->oid_handler) { return 0; + } lsp = (struct sysctl_oid_list *)oidp->oid_arg1; - if (!sysctl_sysctl_next_ls (lsp, name+1, namelen-1, - next+1, len, level+1, oidpp)) - return (0); + if (!sysctl_sysctl_next_ls(lsp, name + 1, namelen - 1, + next + 1, len, level + 1, oidpp)) { + return 0; + } goto next; } - if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) { continue; + } - if (oidp->oid_handler) + if (oidp->oid_handler) { continue; + } lsp = (struct sysctl_oid_list *)oidp->oid_arg1; - if (!sysctl_sysctl_next_ls (lsp, name+1, namelen-1, next+1, - len, level+1, oidpp)) - return (0); - next: + if (!sysctl_sysctl_next_ls(lsp, name + 1, namelen - 1, next + 1, + len, level + 1, oidpp)) { + return 0; + } +next: namelen = 1; *len = level; } @@ -830,7 +872,7 @@ sysctl_sysctl_next_ls (struct sysctl_oid_list *lsp, int *name, u_int namelen, */ STATIC int sysctl_sysctl_next(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { int *name = (int *) arg1; u_int namelen = arg2; @@ -840,12 +882,13 @@ sysctl_sysctl_next(__unused struct sysctl_oid *oidp, void *arg1, int arg2, int newoid[CTL_MAXNAME] = {}; lck_rw_lock_shared(sysctl_geometry_lock); - i = sysctl_sysctl_next_ls (lsp, name, namelen, newoid, &j, 1, &oid); + i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid); lck_rw_done(sysctl_geometry_lock); - if (i) + if (i) { return ENOENT; - error = SYSCTL_OUT(req, newoid, j * sizeof (int)); - return (error); + } + error = SYSCTL_OUT(req, newoid, j * sizeof(int)); + return error; } SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_next, ""); @@ -870,27 +913,31 @@ SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_next, " * Locks: Assumes sysctl_geometry_lock is held prior to calling */ STATIC int -name2oid (char *name, int *oid, u_int *len) +name2oid(char *name, int *oid, u_int *len) { int i; struct sysctl_oid *oidp; struct sysctl_oid_list *lsp = &sysctl__children; char *p; - if (!*name) + if (!*name) { return ENOENT; + } - p = name + strlen(name) - 1 ; - if (*p == '.') + p = name + strlen(name) - 1; + if (*p == '.') { *p = '\0'; + } *len = 0; - for (p = name; *p && *p != '.'; p++) + for (p = name; *p && *p != '.'; p++) { ; + } i = *p; - if (i == '.') + if (i == '.') { *p = '\0'; + } oidp = SLIST_FIRST(lsp); @@ -903,24 +950,28 @@ name2oid (char *name, int *oid, u_int *len) (*len)++; if (!i) { - return (0); + return 0; } - if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) + if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) { break; + } - if (oidp->oid_handler) + if (oidp->oid_handler) { break; + } lsp = (struct sysctl_oid_list *)oidp->oid_arg1; oidp = SLIST_FIRST(lsp); *p = i; /* restore */ - name = p+1; - for (p = name; *p && *p != '.'; p++) - ; + name = p + 1; + for (p = name; *p && *p != '.'; p++) { + ; + } i = *p; - if (i == '.') + if (i == '.') { *p = '\0'; + } } return ENOENT; } @@ -966,28 +1017,31 @@ name2oid (char *name, int *oid, u_int *len) */ STATIC int sysctl_sysctl_name2oid(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { char *p; int error, oid[CTL_MAXNAME] = {}; - u_int len = 0; /* set by name2oid() */ + u_int len = 0; /* set by name2oid() */ - if (req->newlen < 1) + if (req->newlen < 1) { return ENOENT; - if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */ - return (ENAMETOOLONG); + } + if (req->newlen >= MAXPATHLEN) { /* XXX arbitrary, undocumented */ + return ENAMETOOLONG; + } - MALLOC(p, char *,req->newlen+1, M_TEMP, M_WAITOK); - if (!p) - return ENOMEM; + MALLOC(p, char *, req->newlen + 1, M_TEMP, M_WAITOK); + if (!p) { + return ENOMEM; + } error = SYSCTL_IN(req, p, req->newlen); if (error) { FREE(p, M_TEMP); - return (error); + return error; } - p [req->newlen] = '\0'; + p[req->newlen] = '\0'; /* * Note: We acquire and release the geometry lock here to @@ -999,15 +1053,16 @@ sysctl_sysctl_name2oid(__unused struct sysctl_oid *oidp, __unused void *arg1, FREE(p, M_TEMP); - if (error) - return (error); + if (error) { + return error; + } error = SYSCTL_OUT(req, oid, len * sizeof *oid); - return (error); + return error; } -SYSCTL_PROC(_sysctl, 3, name2oid, CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, - sysctl_sysctl_name2oid, "I", ""); +SYSCTL_PROC(_sysctl, 3, name2oid, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, 0, 0, + sysctl_sysctl_name2oid, "I", ""); /* * sysctl_sysctl_oidfmt @@ -1048,10 +1103,10 @@ SYSCTL_PROC(_sysctl, 3, name2oid, CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_KERN | CTLF */ STATIC int sysctl_sysctl_oidfmt(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { int *name = (int *) arg1; - int error = ENOENT; /* default error: not found */ + int error = ENOENT; /* default error: not found */ u_int namelen = arg2; u_int indx; struct sysctl_oid *oid; @@ -1065,10 +1120,12 @@ sysctl_sysctl_oidfmt(__unused struct sysctl_oid *oidp, void *arg1, int arg2, if (oid->oid_number == name[indx]) { indx++; if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { - if (oid->oid_handler) + if (oid->oid_handler) { goto found; - if (indx == namelen) + } + if (indx == namelen) { goto found; + } lsp = (struct sysctl_oid_list *)oid->oid_arg1; oid = SLIST_FIRST(lsp); } else { @@ -1086,16 +1143,18 @@ sysctl_sysctl_oidfmt(__unused struct sysctl_oid *oidp, void *arg1, int arg2, goto err; found: - if (!oid->oid_fmt) + if (!oid->oid_fmt) { goto err; - error = SYSCTL_OUT(req, - &oid->oid_kind, sizeof(oid->oid_kind)); - if (!error) - error = SYSCTL_OUT(req, oid->oid_fmt, - strlen(oid->oid_fmt)+1); + } + error = SYSCTL_OUT(req, + &oid->oid_kind, sizeof(oid->oid_kind)); + if (!error) { + error = SYSCTL_OUT(req, oid->oid_fmt, + strlen(oid->oid_fmt) + 1); + } err: lck_rw_done(sysctl_geometry_lock); - return (error); + return error; } SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_oidfmt, ""); @@ -1114,7 +1173,7 @@ SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_sysctl_oidfm int sysctl_handle_int(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { return sysctl_io_number(req, arg1? *(int*)arg1: arg2, sizeof(int), arg1, NULL); } @@ -1125,10 +1184,11 @@ sysctl_handle_int(__unused struct sysctl_oid *oidp, void *arg1, int arg2, int sysctl_handle_long(__unused struct sysctl_oid *oidp, void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { - if (!arg1) - return (EINVAL); + if (!arg1) { + return EINVAL; + } return sysctl_io_number(req, *(long*)arg1, sizeof(long), arg1, NULL); } @@ -1138,10 +1198,11 @@ sysctl_handle_long(__unused struct sysctl_oid *oidp, void *arg1, int sysctl_handle_quad(__unused struct sysctl_oid *oidp, void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { - if (!arg1) - return (EINVAL); + if (!arg1) { + return EINVAL; + } return sysctl_io_number(req, *(long long*)arg1, sizeof(long long), arg1, NULL); } @@ -1154,19 +1215,21 @@ sysctl_handle_quad(__unused struct sysctl_oid *oidp, void *arg1, */ int sysctl_handle_int2quad(__unused struct sysctl_oid *oidp, void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int error = 0; long long val; int newval; - if (!arg1) - return (EINVAL); + if (!arg1) { + return EINVAL; + } val = (long long)*(int *)arg1; error = SYSCTL_OUT(req, &val, sizeof(long long)); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } error = SYSCTL_IN(req, &val, sizeof(long long)); if (!error) { @@ -1181,19 +1244,19 @@ sysctl_handle_int2quad(__unused struct sysctl_oid *oidp, void *arg1, *(int *)arg1 = newval; } } - return (error); + return error; } /* * Handle our generic '\0' terminated 'C' string. * Two cases: - * a variable string: point arg1 at it, arg2 is max length. - * a constant string: point arg1 at it, arg2 is zero. + * a variable string: point arg1 at it, arg2 is max length. + * a constant string: point arg1 at it, arg2 is zero. */ int sysctl_handle_string( __unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { return sysctl_io_string(req, arg1, arg2, 0, NULL); } @@ -1205,7 +1268,7 @@ sysctl_handle_string( __unused struct sysctl_oid *oidp, void *arg1, int arg2, int sysctl_handle_opaque(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { return sysctl_io_opaque(req, arg1, arg2, NULL); } @@ -1220,27 +1283,32 @@ sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l) if (req->oldptr) { i = l; - if (i > req->oldlen - req->oldidx) + if (i > req->oldlen - req->oldidx) { i = req->oldlen - req->oldidx; - if (i > 0) + } + if (i > 0) { bcopy((const void*)p, CAST_DOWN(char *, (req->oldptr + req->oldidx)), i); + } } req->oldidx += l; - if (req->oldptr && i != l) - return (ENOMEM); - return (0); + if (req->oldptr && i != l) { + return ENOMEM; + } + return 0; } STATIC int sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l) { - if (!req->newptr) + if (!req->newptr) { return 0; - if (req->newlen - req->newidx < l) - return (EINVAL); + } + if (req->newlen - req->newidx < l) { + return EINVAL; + } bcopy(CAST_DOWN(char *, (req->newptr + req->newidx)), p, l); req->newidx += l; - return (0); + return 0; } int @@ -1254,10 +1322,12 @@ kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldle */ bzero(&req, sizeof req); req.p = p; - if (oldlenp) + if (oldlenp) { req.oldlen = *oldlenp; - if (old) + } + if (old) { req.oldptr = CAST_USER_ADDR_T(old); + } if (newlen) { req.newlen = newlen; req.newptr = CAST_USER_ADDR_T(new); @@ -1269,13 +1339,15 @@ kernel_sysctl(struct proc *p, int *name, u_int namelen, void *old, size_t *oldle /* make the request */ error = sysctl_root(TRUE, FALSE, NULL, 0, name, namelen, &req); - if (error && error != ENOMEM) - return (error); + if (error && error != ENOMEM) { + return error; + } - if (oldlenp) + if (oldlenp) { *oldlenp = req.oldidx; + } - return (error); + return error; } /* @@ -1288,20 +1360,25 @@ sysctl_old_user(struct sysctl_req *req, const void *p, size_t l) size_t i = 0; if (req->oldptr) { - if (req->oldlen - req->oldidx < l) - return (ENOMEM); + if (req->oldlen - req->oldidx < l) { + return ENOMEM; + } i = l; - if (i > req->oldlen - req->oldidx) + if (i > req->oldlen - req->oldidx) { i = req->oldlen - req->oldidx; - if (i > 0) + } + if (i > 0) { error = copyout((const void*)p, (req->oldptr + req->oldidx), i); + } } req->oldidx += l; - if (error) - return (error); - if (req->oldptr && i < l) - return (ENOMEM); - return (0); + if (error) { + return error; + } + if (req->oldptr && i < l) { + return ENOMEM; + } + return 0; } STATIC int @@ -1309,13 +1386,15 @@ sysctl_new_user(struct sysctl_req *req, void *p, size_t l) { int error; - if (!req->newptr) + if (!req->newptr) { return 0; - if (req->newlen - req->newidx < l) - return (EINVAL); + } + if (req->newlen - req->newidx < l) { + return EINVAL; + } error = copyin((req->newptr + req->newidx), p, l); req->newidx += l; - return (error); + return error; } /* @@ -1345,13 +1424,12 @@ sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestri goto err; } } - + oid = SLIST_FIRST(lsp); indx = 0; while (oid && indx < CTL_MAXNAME) { if (oid->oid_number == name[indx]) { - if (!from_kernel && !string_is_canonical) { if (namestring_started) { if (strlcat(namestring, ".", namestringlen) >= namestringlen) { @@ -1366,14 +1444,14 @@ sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestri } namestring_started = TRUE; } - + indx++; - if (!(oid->oid_kind & CTLFLAG_LOCKED)) - { + if (!(oid->oid_kind & CTLFLAG_LOCKED)) { unlocked_node_found = TRUE; } - if (oid->oid_kind & CTLFLAG_NOLOCK) + if (oid->oid_kind & CTLFLAG_NOLOCK) { req->lock = 0; + } /* * For SYSCTL_PROC() functions which are for sysctl's * which have parameters at the end of their OID @@ -1387,10 +1465,10 @@ sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestri * will become unsupported. */ if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { - if (oid->oid_handler) + if (oid->oid_handler) { goto found; - if (indx == namelen) - { + } + if (indx == namelen) { error = ENOENT; goto err; } @@ -1398,8 +1476,7 @@ sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestri lsp = (struct sysctl_oid_list *)oid->oid_arg1; oid = SLIST_FIRST(lsp); } else { - if (indx != namelen) - { + if (indx != namelen) { error = EISDIR; goto err; } @@ -1412,7 +1489,7 @@ sysctl_root(boolean_t from_kernel, boolean_t string_is_canonical, char *namestri error = ENOENT; goto err; found: - + /* * indx is the index of the first remaining OID name, * for sysctls that take them as arguments @@ -1420,17 +1497,17 @@ found: if (!from_kernel && !string_is_canonical && (indx < namelen)) { char tempbuf[10]; u_int indx2; - + for (indx2 = indx; indx2 < namelen; indx2++) { - snprintf(tempbuf, sizeof(tempbuf), "%d",name[indx2]); - + snprintf(tempbuf, sizeof(tempbuf), "%d", name[indx2]); + if (namestring_started) { if (strlcat(namestring, ".", namestringlen) >= namestringlen) { error = ENAMETOOLONG; goto err; } } - + if (strlcat(namestring, tempbuf, namestringlen) >= namestringlen) { error = ENAMETOOLONG; goto err; @@ -1438,10 +1515,10 @@ found: namestring_started = TRUE; } } - + /* If writing isn't allowed */ if (req->newptr && (!(oid->oid_kind & CTLFLAG_WR) || - ((oid->oid_kind & CTLFLAG_SECURE) && securelevel > 0))) { + ((oid->oid_kind & CTLFLAG_SECURE) && securelevel > 0))) { error = (EPERM); goto err; } @@ -1449,8 +1526,7 @@ found: /* * If we're inside the kernel, the OID must be marked as kernel-valid. */ - if (from_kernel && !(oid->oid_kind & CTLFLAG_KERN)) - { + if (from_kernel && !(oid->oid_kind & CTLFLAG_KERN)) { error = (EPERM); goto err; } @@ -1465,8 +1541,9 @@ found: */ if (!(oid->oid_kind & CTLFLAG_ANYBODY) && req->newptr && req->p && - (error = proc_suser(req->p))) + (error = proc_suser(req->p))) { goto err; + } /* * sysctl_unregister_oid() may change the handler value, so grab it @@ -1474,7 +1551,7 @@ found: */ oid_handler = oid->oid_handler; if (!oid_handler) { - error = EINVAL; + error = EINVAL; goto err; } @@ -1491,24 +1568,24 @@ found: #if CONFIG_MACF if (!from_kernel) { error = mac_system_check_sysctlbyname(kauth_cred_get(), - namestring, - name, - namelen, - req->oldptr, - req->oldlen, - req->newptr, - req->newlen); - if (error) + namestring, + name, + namelen, + req->oldptr, + req->oldlen, + req->newptr, + req->newlen); + if (error) { goto dropref; + } } #endif - + /* * ...however, we still have to grab the mutex for those calls which * may be into code whose reentrancy is protected by it. */ - if (unlocked_node_found) - { + if (unlocked_node_found) { lck_mtx_lock(sysctl_unlocked_node_lock); } @@ -1520,8 +1597,7 @@ found: } error = i; - if (unlocked_node_found) - { + if (unlocked_node_found) { lck_mtx_unlock(sysctl_unlocked_node_lock); } @@ -1545,29 +1621,31 @@ dropref: * OIDs. */ lck_rw_lock_shared(sysctl_geometry_lock); - if (OSAddAtomic(-1, &oid->oid_refcnt) == 1) + if (OSAddAtomic(-1, &oid->oid_refcnt) == 1) { wakeup(&oid->oid_refcnt); + } err: lck_rw_done(sysctl_geometry_lock); - return (error); + return error; } -void sysctl_create_user_req(struct sysctl_req *req, struct proc *p, user_addr_t oldp, - size_t oldlen, user_addr_t newp, size_t newlen) +void +sysctl_create_user_req(struct sysctl_req *req, struct proc *p, user_addr_t oldp, + size_t oldlen, user_addr_t newp, size_t newlen) { bzero(req, sizeof(*req)); - + req->p = p; - + req->oldlen = oldlen; req->oldptr = oldp; - + if (newlen) { req->newlen = newlen; req->newptr = newp; } - + req->oldfunc = sysctl_old_user; req->newfunc = sysctl_new_user; req->lock = 1; @@ -1584,34 +1662,38 @@ sysctl(proc_t p, struct sysctl_args *uap, __unused int32_t *retval) struct sysctl_req req; char *namestring; size_t namestringlen = MAXPATHLEN; - + /* * all top-level sysctl names are non-terminal */ - if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) - return (EINVAL); + if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) { + return EINVAL; + } error = copyin(uap->name, &name[0], uap->namelen * sizeof(int)); - if (error) - return (error); - + if (error) { + return error; + } + AUDIT_ARG(ctlname, name, uap->namelen); - - if (uap->newlen > SIZE_T_MAX) - return (EINVAL); + + if (uap->newlen > SIZE_T_MAX) { + return EINVAL; + } newlen = (size_t)uap->newlen; - + if (uap->oldlenp != USER_ADDR_NULL) { - uint64_t oldlen64 = fuulong(uap->oldlenp); + uint64_t oldlen64 = fuulong(uap->oldlenp); /* * If more than 4G, clamp to 4G */ - if (oldlen64 > SIZE_T_MAX) + if (oldlen64 > SIZE_T_MAX) { oldlen = SIZE_T_MAX; - else + } else { oldlen = (size_t)oldlen64; + } } - + sysctl_create_user_req(&req, p, uap->old, oldlen, uap->new, newlen); /* Guess that longest length for the passed-in MIB, if we can be more aggressive than MAXPATHLEN */ @@ -1621,26 +1703,28 @@ sysctl(proc_t p, struct sysctl_args *uap, __unused int32_t *retval) } else if (name[0] == CTL_HW && name[1] < HW_MAXID) { namestringlen = 32; /* "hw.cachelinesize_compat" */ } - } + } MALLOC(namestring, char *, namestringlen, M_TEMP, M_WAITOK); if (!namestring) { - oldlen = 0; - goto err; + oldlen = 0; + goto err; } error = userland_sysctl(FALSE, namestring, namestringlen, name, uap->namelen, &req, &oldlen); - + FREE(namestring, M_TEMP); - - if ((error) && (error != ENOMEM)) - return (error); - + + if ((error) && (error != ENOMEM)) { + return error; + } + err: - if (uap->oldlenp != USER_ADDR_NULL) + if (uap->oldlenp != USER_ADDR_NULL) { error = suulong(uap->oldlenp, oldlen); - - return (error); + } + + return error; } int @@ -1653,56 +1737,61 @@ sysctlbyname(proc_t p, struct sysctlbyname_args *uap, __unused int32_t *retval) struct sysctl_req req; int oid[CTL_MAXNAME]; - if (uap->namelen >= MAXPATHLEN) /* XXX arbitrary, undocumented */ - return (ENAMETOOLONG); + if (uap->namelen >= MAXPATHLEN) { /* XXX arbitrary, undocumented */ + return ENAMETOOLONG; + } namelen = (size_t)uap->namelen; - - MALLOC(name, char *, namelen+1, M_TEMP, M_WAITOK); - if (!name) - return ENOMEM; + + MALLOC(name, char *, namelen + 1, M_TEMP, M_WAITOK); + if (!name) { + return ENOMEM; + } error = copyin(uap->name, name, namelen); if (error) { FREE(name, M_TEMP); - return (error); + return error; } name[namelen] = '\0'; /* XXX * AUDIT_ARG(ctlname, name, uap->namelen); */ - + if (uap->newlen > SIZE_T_MAX) { FREE(name, M_TEMP); - return (EINVAL); + return EINVAL; } newlen = (size_t)uap->newlen; - + if (uap->oldlenp != USER_ADDR_NULL) { - uint64_t oldlen64 = fuulong(uap->oldlenp); - + uint64_t oldlen64 = fuulong(uap->oldlenp); + /* * If more than 4G, clamp to 4G */ - if (oldlen64 > SIZE_T_MAX) + if (oldlen64 > SIZE_T_MAX) { oldlen = SIZE_T_MAX; - else + } else { oldlen = (size_t)oldlen64; + } } - + sysctl_create_user_req(&req, p, uap->old, oldlen, uap->new, newlen); - error = userland_sysctl(TRUE, name, namelen+1, oid, CTL_MAXNAME, &req, &oldlen); - + error = userland_sysctl(TRUE, name, namelen + 1, oid, CTL_MAXNAME, &req, &oldlen); + FREE(name, M_TEMP); - if ((error) && (error != ENOMEM)) - return (error); - - if (uap->oldlenp != USER_ADDR_NULL) + if ((error) && (error != ENOMEM)) { + return error; + } + + if (uap->oldlenp != USER_ADDR_NULL) { error = suulong(uap->oldlenp, oldlen); - - return (error); + } + + return error; } /* @@ -1711,32 +1800,35 @@ sysctlbyname(proc_t p, struct sysctlbyname_args *uap, __unused int32_t *retval) */ int userland_sysctl(boolean_t string_is_canonical, - char *namestring, size_t namestringlen, - int *name, u_int namelen, struct sysctl_req *req, - size_t *retval) + char *namestring, size_t namestringlen, + int *name, u_int namelen, struct sysctl_req *req, + size_t *retval) { int error = 0; struct sysctl_req req2; do { - /* if EAGAIN, reset output cursor */ - req2 = *req; - if (!string_is_canonical) - namestring[0] = '\0'; + /* if EAGAIN, reset output cursor */ + req2 = *req; + if (!string_is_canonical) { + namestring[0] = '\0'; + } - error = sysctl_root(FALSE, string_is_canonical, namestring, namestringlen, name, namelen, &req2); + error = sysctl_root(FALSE, string_is_canonical, namestring, namestringlen, name, namelen, &req2); } while (error == EAGAIN); - if (error && error != ENOMEM) - return (error); + if (error && error != ENOMEM) { + return error; + } if (retval) { - if (req2.oldptr && req2.oldidx > req2.oldlen) + if (req2.oldptr && req2.oldidx > req2.oldlen) { *retval = req2.oldlen; - else + } else { *retval = req2.oldidx; + } } - return (error); + return error; } /* @@ -1766,10 +1858,10 @@ kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, s oidlen = sizeof(oid); error = kernel_sysctl(current_proc(), name2mib_oid, 2, oid, &oidlen, __DECONST(void *, name), strlen(name)); oidlen /= sizeof(int); - + /* now use the OID */ - if (error == 0) + if (error == 0) { error = kernel_sysctl(current_proc(), oid, oidlen, oldp, oldlenp, newp, newlen); - return(error); + } + return error; } - diff --git a/bsd/kern/kern_ntptime.c b/bsd/kern/kern_ntptime.c index 937f12d5d..589ff9d97 100644 --- a/bsd/kern/kern_ntptime.c +++ b/bsd/kern/kern_ntptime.c @@ -83,28 +83,28 @@ #include typedef int64_t l_fp; -#define L_ADD(v, u) ((v) += (u)) -#define L_SUB(v, u) ((v) -= (u)) -#define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) -#define L_NEG(v) ((v) = -(v)) +#define L_ADD(v, u) ((v) += (u)) +#define L_SUB(v, u) ((v) -= (u)) +#define L_ADDHI(v, a) ((v) += (int64_t)(a) << 32) +#define L_NEG(v) ((v) = -(v)) #define L_RSHIFT(v, n) \ do { \ - if ((v) < 0) \ - (v) = -(-(v) >> (n)); \ - else \ - (v) = (v) >> (n); \ + if ((v) < 0) \ + (v) = -(-(v) >> (n)); \ + else \ + (v) = (v) >> (n); \ } while (0) -#define L_MPY(v, a) ((v) *= (a)) -#define L_CLR(v) ((v) = 0) -#define L_ISNEG(v) ((v) < 0) +#define L_MPY(v, a) ((v) *= (a)) +#define L_CLR(v) ((v) = 0) +#define L_ISNEG(v) ((v) < 0) #define L_LINT(v, a) \ do { \ - if ((a) > 0) \ - ((v) = (int64_t)(a) << 32); \ - else \ - ((v) = -((int64_t)(-(a)) << 32)); \ + if ((a) > 0) \ + ((v) = (int64_t)(a) << 32); \ + else \ + ((v) = -((int64_t)(-(a)) << 32)); \ } while (0) -#define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) +#define L_GINT(v) ((v) < 0 ? -(-(v) >> 32) : (v) >> 32) /* * Generic NTP kernel interface @@ -170,8 +170,8 @@ typedef int64_t l_fp; * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -#define SHIFT_PLL 4 -#define SHIFT_FLL 2 +#define SHIFT_PLL 4 +#define SHIFT_FLL 2 static int time_state = TIME_OK; int time_status = STA_UNSYNC; @@ -190,17 +190,17 @@ static int updated; static lck_spin_t * ntp_lock; static lck_grp_t * ntp_lock_grp; static lck_attr_t * ntp_lock_attr; -static lck_grp_attr_t *ntp_lock_grp_attr; +static lck_grp_attr_t *ntp_lock_grp_attr; -#define NTP_LOCK(enable) \ - enable = ml_set_interrupts_enabled(FALSE); \ - lck_spin_lock(ntp_lock); +#define NTP_LOCK(enable) \ + enable = ml_set_interrupts_enabled(FALSE); \ + lck_spin_lock(ntp_lock); -#define NTP_UNLOCK(enable) \ - lck_spin_unlock(ntp_lock);\ - ml_set_interrupts_enabled(enable); +#define NTP_UNLOCK(enable) \ + lck_spin_unlock(ntp_lock);\ + ml_set_interrupts_enabled(enable); -#define NTP_ASSERT_LOCKED() LCK_SPIN_ASSERT(ntp_lock, LCK_ASSERT_OWNED) +#define NTP_ASSERT_LOCKED() LCK_SPIN_ASSERT(ntp_lock, LCK_ASSERT_OWNED) static timer_call_data_t ntp_loop_update; static uint64_t ntp_loop_deadline; @@ -225,11 +225,11 @@ SYSCTL_INT(_kern, OID_AUTO, log_clock_adjustments, CTLFLAG_RW | CTLFLAG_LOCKED, static bool ntp_is_time_error(int tsl) { + if (tsl & (STA_UNSYNC | STA_CLOCKERR)) { + return true; + } - if (tsl & (STA_UNSYNC | STA_CLOCKERR)) - return (true); - - return (false); + return false; } static void @@ -243,7 +243,7 @@ ntp_gettime1(struct ntptimeval *ntvp) ntvp->time.tv_sec = atv.tv_sec; ntvp->time.tv_nsec = atv.tv_nsec; if ((unsigned long)atv.tv_sec > last_time_maxerror_update) { - time_maxerror += (MAXFREQ / 1000)*(atv.tv_sec-last_time_maxerror_update); + time_maxerror += (MAXFREQ / 1000) * (atv.tv_sec - last_time_maxerror_update); last_time_maxerror_update = atv.tv_sec; } ntvp->maxerror = time_maxerror; @@ -251,8 +251,9 @@ ntp_gettime1(struct ntptimeval *ntvp) ntvp->tai = time_tai; ntvp->time_state = time_state; - if (ntp_is_time_error(time_status)) + if (ntp_is_time_error(time_status)) { ntvp->time_state = TIME_ERROR; + } } int @@ -286,8 +287,9 @@ ntp_gettime(struct proc *p, struct ntp_gettime_args *uap, __unused int32_t *retv error = copyout(&user_ntv, uap->ntvp, sizeof(user_ntv)); } - if (error) + if (error) { return error; + } return ntv.time_state; } @@ -315,7 +317,6 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) ntv.constant = user_ntv.constant; ntv.precision = user_ntv.precision; ntv.tolerance = user_ntv.tolerance; - } else { struct user32_timex user_ntv; error = copyin(uap->tp, &user_ntv, sizeof(user_ntv)); @@ -329,13 +330,14 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) ntv.precision = user_ntv.precision; ntv.tolerance = user_ntv.tolerance; } - if (error) - return (error); + if (error) { + return error; + } #if DEVELOPEMNT || DEBUG if (g_should_log_clock_adjustments) { os_log(OS_LOG_DEFAULT, "%s: BEFORE modes %u offset %ld freq %ld status %d constant %ld time_adjtime %lld\n", - __func__, ntv.modes, ntv.offset, ntv.freq, ntv.status, ntv.constant, time_adjtime); + __func__, ntv.modes, ntv.offset, ntv.freq, ntv.status, ntv.constant, time_adjtime); } #endif /* @@ -353,12 +355,13 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT)) { #if CONFIG_MACF error = mac_system_check_settime(kauth_cred_get()); - if (error) - return (error); + if (error) { + return error; + } #endif - if ((error = priv_check_cred(kauth_cred_get(), PRIV_ADJTIME, 0))) - return (error); - + if ((error = priv_check_cred(kauth_cred_get(), PRIV_ADJTIME, 0))) { + return error; + } } } @@ -369,8 +372,9 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) time_maxerror = ntv.maxerror; last_time_maxerror_update = sec; } - if (modes & MOD_ESTERROR) + if (modes & MOD_ESTERROR) { time_esterror = ntv.esterror; + } if (modes & MOD_STATUS) { if (time_status & STA_PLL && !(ntv.status & STA_PLL)) { time_state = TIME_OK; @@ -385,32 +389,38 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) time_status &= STA_SUPPORTED; } if (modes & MOD_TIMECONST) { - if (ntv.constant < 0) + if (ntv.constant < 0) { time_constant = 0; - else if (ntv.constant > MAXTC) + } else if (ntv.constant > MAXTC) { time_constant = MAXTC; - else + } else { time_constant = ntv.constant; + } } if (modes & MOD_TAI) { - if (ntv.constant > 0) + if (ntv.constant > 0) { time_tai = ntv.constant; + } } - if (modes & MOD_NANO) + if (modes & MOD_NANO) { time_status |= STA_NANO; - if (modes & MOD_MICRO) + } + if (modes & MOD_MICRO) { time_status &= ~STA_NANO; - if (modes & MOD_CLKB) + } + if (modes & MOD_CLKB) { time_status |= STA_CLK; - if (modes & MOD_CLKA) + } + if (modes & MOD_CLKA) { time_status &= ~STA_CLK; + } if (modes & MOD_FREQUENCY) { freq = (ntv.freq * 1000LL) >> 16; - if (freq > MAXFREQ) + if (freq > MAXFREQ) { L_LINT(time_freq, MAXFREQ); - else if (freq < -MAXFREQ) + } else if (freq < -MAXFREQ) { L_LINT(time_freq, -MAXFREQ); - else { + } else { /* * ntv.freq is [PPM * 2^16] = [us/s * 2^16] * time_freq is [ns/s * 2^32] @@ -419,10 +429,11 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) } } if (modes & MOD_OFFSET) { - if (time_status & STA_NANO) + if (time_status & STA_NANO) { hardupdate(ntv.offset); - else + } else { hardupdate(ntv.offset * 1000); + } } ret = ntp_is_time_error(time_status) ? TIME_ERROR : time_state; @@ -430,7 +441,7 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) #if DEVELOPEMNT || DEBUG if (g_should_log_clock_adjustments) { os_log(OS_LOG_DEFAULT, "%s: AFTER modes %u offset %lld freq %lld status %d constant %ld time_adjtime %lld\n", - __func__, modes, time_offset, time_freq, time_status, time_constant, time_adjtime); + __func__, modes, time_offset, time_freq, time_status, time_constant, time_adjtime); } #endif @@ -442,44 +453,46 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) struct user64_timex user_ntv = {}; user_ntv.modes = modes; - if (time_status & STA_NANO) + if (time_status & STA_NANO) { user_ntv.offset = L_GINT(time_offset); - else + } else { user_ntv.offset = L_GINT(time_offset) / 1000; + } user_ntv.freq = L_GINT((time_freq / 1000LL) << 16); user_ntv.maxerror = time_maxerror; user_ntv.esterror = time_esterror; user_ntv.status = time_status; user_ntv.constant = time_constant; - if (time_status & STA_NANO) + if (time_status & STA_NANO) { user_ntv.precision = time_precision; - else + } else { user_ntv.precision = time_precision / 1000; + } user_ntv.tolerance = MAXFREQ * SCALE_PPM; /* unlock before copyout */ NTP_UNLOCK(enable); error = copyout(&user_ntv, uap->tp, sizeof(user_ntv)); - - } - else{ + } else { struct user32_timex user_ntv = {}; user_ntv.modes = modes; - if (time_status & STA_NANO) + if (time_status & STA_NANO) { user_ntv.offset = L_GINT(time_offset); - else + } else { user_ntv.offset = L_GINT(time_offset) / 1000; + } user_ntv.freq = L_GINT((time_freq / 1000LL) << 16); user_ntv.maxerror = time_maxerror; user_ntv.esterror = time_esterror; user_ntv.status = time_status; user_ntv.constant = time_constant; - if (time_status & STA_NANO) + if (time_status & STA_NANO) { user_ntv.precision = time_precision; - else + } else { user_ntv.precision = time_precision / 1000; + } user_ntv.tolerance = MAXFREQ * SCALE_PPM; /* unlock before copyout */ @@ -488,17 +501,20 @@ ntp_adjtime(struct proc *p, struct ntp_adjtime_args *uap, int32_t *retval) error = copyout(&user_ntv, uap->tp, sizeof(user_ntv)); } - if (modes) + if (modes) { start_ntp_loop(); + } - if (error == 0) + if (error == 0) { *retval = ret; + } - return (error); + return error; } int64_t -ntp_get_freq(void){ +ntp_get_freq(void) +{ return time_freq; } @@ -515,7 +531,7 @@ ntp_update_second(int64_t *adjustment, clock_sec_t secs) NTP_ASSERT_LOCKED(); if (secs > last_time_maxerror_update) { - time_maxerror += (MAXFREQ / 1000)*(secs-last_time_maxerror_update); + time_maxerror += (MAXFREQ / 1000) * (secs - last_time_maxerror_update); last_time_maxerror_update = secs; } @@ -534,16 +550,17 @@ ntp_update_second(int64_t *adjustment, clock_sec_t secs) * until the last second is slewed the final < 500 usecs. */ if (time_adjtime != 0) { - if (time_adjtime > 1000000) + if (time_adjtime > 1000000) { tickrate = 5000; - else if (time_adjtime < -1000000) + } else if (time_adjtime < -1000000) { tickrate = -5000; - else if (time_adjtime > 500) + } else if (time_adjtime > 500) { tickrate = 500; - else if (time_adjtime < -500) + } else if (time_adjtime < -500) { tickrate = -500; - else + } else { tickrate = time_adjtime; + } time_adjtime -= tickrate; L_LINT(ftemp, tickrate * 1000); L_ADD(time_adj, ftemp); @@ -551,20 +568,19 @@ ntp_update_second(int64_t *adjustment, clock_sec_t secs) if (old_time_adjtime || ((time_offset || old_offset) && (time_offset != old_offset))) { updated = 1; - } - else{ + } else { updated = 0; } #if DEVELOPEMNT || DEBUG if (g_should_log_clock_adjustments) { - int64_t nano = (time_adj > 0)? time_adj >> 32 : -((-time_adj) >> 32); - int64_t frac = (time_adj > 0)? ((uint32_t) time_adj) : -((uint32_t) (-time_adj)); + int64_t nano = (time_adj > 0)? time_adj >> 32 : -((-time_adj) >> 32); + int64_t frac = (time_adj > 0)? ((uint32_t) time_adj) : -((uint32_t) (-time_adj)); os_log(OS_LOG_DEFAULT, "%s:AFTER offset %lld (%lld) freq %lld status %d " - "constant %ld time_adjtime %lld nano %lld frac %lld adj %lld\n", - __func__, time_offset, (time_offset > 0)? time_offset >> 32 : -((-time_offset) >> 32), - time_freq, time_status, time_constant, time_adjtime, nano, frac, time_adj); + "constant %ld time_adjtime %lld nano %lld frac %lld adj %lld\n", + __func__, time_offset, (time_offset > 0)? time_offset >> 32 : -((-time_offset) >> 32), + time_freq, time_status, time_constant, time_adjtime, nano, frac, time_adj); } #endif @@ -592,7 +608,7 @@ ntp_update_second(int64_t *adjustment, clock_sec_t secs) */ static void hardupdate(offset) - long offset; +long offset; { long mtemp = 0; long time_monitor; @@ -601,15 +617,17 @@ hardupdate(offset) NTP_ASSERT_LOCKED(); - if (!(time_status & STA_PLL)) + if (!(time_status & STA_PLL)) { return; + } - if (offset > MAXPHASE) + if (offset > MAXPHASE) { time_monitor = MAXPHASE; - else if (offset < -MAXPHASE) + } else if (offset < -MAXPHASE) { time_monitor = -MAXPHASE; - else + } else { time_monitor = offset; + } L_LINT(time_offset, time_monitor); clock_get_calendar_uptime(&time_uptime); @@ -633,10 +651,11 @@ hardupdate(offset) } time_reftime = time_uptime; - if (L_GINT(time_freq) > MAXFREQ) + if (L_GINT(time_freq) > MAXFREQ) { L_LINT(time_freq, MAXFREQ); - else if (L_GINT(time_freq) < -MAXFREQ) + } else if (L_GINT(time_freq) < -MAXFREQ) { L_LINT(time_freq, -MAXFREQ); + } } @@ -647,8 +666,9 @@ kern_adjtime(struct timeval *delta) int64_t ltr, ltw; boolean_t enable; - if (delta == NULL) - return (EINVAL); + if (delta == NULL) { + return EINVAL; + } ltw = (int64_t)delta->tv_sec * (int64_t)USEC_PER_SEC + delta->tv_usec; @@ -658,7 +678,7 @@ kern_adjtime(struct timeval *delta) #if DEVELOPEMNT || DEBUG if (g_should_log_clock_adjustments) { os_log(OS_LOG_DEFAULT, "%s:AFTER offset %lld freq %lld status %d constant %ld time_adjtime %lld\n", - __func__, time_offset, time_freq, time_status, time_constant, time_adjtime); + __func__, time_offset, time_freq, time_status, time_constant, time_adjtime); } #endif NTP_UNLOCK(enable); @@ -674,26 +694,26 @@ kern_adjtime(struct timeval *delta) start_ntp_loop(); - return (0); + return 0; } int adjtime(struct proc *p, struct adjtime_args *uap, __unused int32_t *retval) { - struct timeval atv; int error; /* Check that this task is entitled to set the time or it is root */ if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT)) { - #if CONFIG_MACF error = mac_system_check_settime(kauth_cred_get()); - if (error) - return (error); + if (error) { + return error; + } #endif - if ((error = priv_check_cred(kauth_cred_get(), PRIV_ADJTIME, 0))) - return (error); + if ((error = priv_check_cred(kauth_cred_get(), PRIV_ADJTIME, 0))) { + return error; + } } if (IS_64BIT_PROCESS(p)) { @@ -707,8 +727,9 @@ adjtime(struct proc *p, struct adjtime_args *uap, __unused int32_t *retval) atv.tv_sec = user_atv.tv_sec; atv.tv_usec = user_atv.tv_usec; } - if (error) - return (error); + if (error) { + return error; + } kern_adjtime(&atv); @@ -726,8 +747,7 @@ adjtime(struct proc *p, struct adjtime_args *uap, __unused int32_t *retval) } } - return (error); - + return error; } static void @@ -751,7 +771,6 @@ ntp_loop_update_call(void) static void refresh_ntp_loop(void) { - NTP_ASSERT_LOCKED(); if (--ntp_loop_active == 0) { /* @@ -761,11 +780,11 @@ refresh_ntp_loop(void) if (updated) { clock_deadline_for_periodic_event(ntp_loop_period, mach_absolute_time(), &ntp_loop_deadline); - if (!timer_call_enter(&ntp_loop_update, ntp_loop_deadline, TIMER_CALL_SYS_CRITICAL)) - ntp_loop_active++; + if (!timer_call_enter(&ntp_loop_update, ntp_loop_deadline, TIMER_CALL_SYS_CRITICAL)) { + ntp_loop_active++; + } } } - } /* @@ -783,7 +802,7 @@ start_ntp_loop(void) ntp_loop_deadline = mach_absolute_time() + ntp_loop_period; if (!timer_call_enter(&ntp_loop_update, ntp_loop_deadline, TIMER_CALL_SYS_CRITICAL)) { - ntp_loop_active++; + ntp_loop_active++; } NTP_UNLOCK(enable); @@ -793,7 +812,7 @@ start_ntp_loop(void) static void init_ntp_loop(void) { - uint64_t abstime; + uint64_t abstime; ntp_loop_active = 0; nanoseconds_to_absolutetime(NTP_LOOP_PERIOD_INTERVAL, &abstime); @@ -804,7 +823,6 @@ init_ntp_loop(void) void ntp_init(void) { - L_CLR(time_offset); L_CLR(time_freq); diff --git a/bsd/kern/kern_overrides.c b/bsd/kern/kern_overrides.c index 0d7ece73d..04c70d47a 100644 --- a/bsd/kern/kern_overrides.c +++ b/bsd/kern/kern_overrides.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -51,44 +51,44 @@ #include /* Mutex for global system override state */ -static lck_mtx_t sys_override_lock; +static lck_mtx_t sys_override_lock; static lck_grp_t *sys_override_mtx_grp; static lck_attr_t *sys_override_mtx_attr; static lck_grp_attr_t *sys_override_mtx_grp_attr; -/* +/* * Assertion counts for system properties (add new ones for each new mechanism) * * The assertion count management for system overrides is as follows: * * - All assertion counts are protected by the sys_override_lock. * - * - Each caller of system_override() increments the assertion count for the - * mechanism it specified in the flags. The caller then blocks for the - * timeout specified in the system call. + * - Each caller of system_override() increments the assertion count for the + * mechanism it specified in the flags. The caller then blocks for the + * timeout specified in the system call. * - * - At the end of the timeout, the caller thread wakes up and decrements the + * - At the end of the timeout, the caller thread wakes up and decrements the * assertion count for the mechanism it originally took an assertion on. * - * - If another caller calls the system_override() to disable the override - * for a mechanism, it simply disables the mechanism without changing any - * assertion counts. That way, the assertion counts are properly balanced. + * - If another caller calls the system_override() to disable the override + * for a mechanism, it simply disables the mechanism without changing any + * assertion counts. That way, the assertion counts are properly balanced. * - * One thing to note is that a SYS_OVERRIDE_DISABLE disables the overrides + * One thing to note is that a SYS_OVERRIDE_DISABLE disables the overrides * for a mechanism irrespective of how many clients requested that override. - * That makes the implementation simpler and avoids keeping a lot of process + * That makes the implementation simpler and avoids keeping a lot of process * specific state in the kernel. * */ -static int64_t io_throttle_assert_cnt; -static int64_t cpu_throttle_assert_cnt; -static int64_t fast_jetsam_assert_cnt; +static int64_t io_throttle_assert_cnt; +static int64_t cpu_throttle_assert_cnt; +static int64_t fast_jetsam_assert_cnt; /* Wait Channel for system override */ -static uint64_t sys_override_wait; +static uint64_t sys_override_wait; /* Global variable to indicate if system_override is enabled */ -int sys_override_enabled; +int sys_override_enabled; /* Helper routines */ static void system_override_begin(uint64_t flags); @@ -119,7 +119,7 @@ system_override(__unused struct proc *p, struct system_override_args * uap, __un /* Check credentials for caller. Only entitled processes are allowed to make this call. */ if ((error = priv_check_cred(kauth_cred_get(), PRIV_SYSTEM_OVERRIDE, 0))) { goto out; - } + } /* Check to see if sane flags are specified. */ if ((flags & ~SYS_OVERRIDE_FLAGS_MASK) != 0) { @@ -152,20 +152,20 @@ out: /* * Helper routines for enabling/disabling system overrides for various mechanisms. - * These routines should be called with the sys_override_lock held. Each subsystem + * These routines should be called with the sys_override_lock held. Each subsystem * which is hooked into the override service provides two routines: - * + * * - void sys_override_foo_init(void); * Routine to initialize the subsystem or the data needed for the override to work. - * This routine is optional and if a subsystem needs it, it should be invoked from + * This routine is optional and if a subsystem needs it, it should be invoked from * init_system_override(). - * + * * - void sys_override_foo(boolean_t enable_override); - * Routine to enable/disable the override mechanism for that subsystem. A value of - * true indicates that the mechanism should be overridden and the special behavior - * should begin. A false value indicates that the subsystem should return to default - * behavior. This routine is mandatory and should be invoked as part of the helper - * routines if the flags passed in the syscall match the subsystem. Also, this + * Routine to enable/disable the override mechanism for that subsystem. A value of + * true indicates that the mechanism should be overridden and the special behavior + * should begin. A false value indicates that the subsystem should return to default + * behavior. This routine is mandatory and should be invoked as part of the helper + * routines if the flags passed in the syscall match the subsystem. Also, this * routine should preferably be idempotent. */ @@ -173,92 +173,90 @@ static void system_override_callouts(uint64_t flags, boolean_t enable_override) { switch (flags) { - case SYS_OVERRIDE_IO_THROTTLE: - if (enable_override) { - KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_START, - current_proc()->p_pid, 0, 0, 0, 0); - } else { - KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_END, - current_proc()->p_pid, 0, 0, 0, 0); - } - sys_override_io_throttle(enable_override); - break; - - case SYS_OVERRIDE_CPU_THROTTLE: - if (enable_override) { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_START, - current_proc()->p_pid, 0, 0, 0, 0); - } else { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_END, - current_proc()->p_pid, 0, 0, 0, 0); - } - sys_override_cpu_throttle(enable_override); - break; - - case SYS_OVERRIDE_FAST_JETSAM: - if (enable_override) { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FAST_JETSAM) | DBG_FUNC_START, - current_proc()->p_pid, 0, 0, 0, 0); - } else { - KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FAST_JETSAM) | DBG_FUNC_END, - current_proc()->p_pid, 0, 0, 0, 0); - } + case SYS_OVERRIDE_IO_THROTTLE: + if (enable_override) { + KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_START, + current_proc()->p_pid, 0, 0, 0, 0); + } else { + KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_END, + current_proc()->p_pid, 0, 0, 0, 0); + } + sys_override_io_throttle(enable_override); + break; + + case SYS_OVERRIDE_CPU_THROTTLE: + if (enable_override) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_START, + current_proc()->p_pid, 0, 0, 0, 0); + } else { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_END, + current_proc()->p_pid, 0, 0, 0, 0); + } + sys_override_cpu_throttle(enable_override); + break; + + case SYS_OVERRIDE_FAST_JETSAM: + if (enable_override) { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FAST_JETSAM) | DBG_FUNC_START, + current_proc()->p_pid, 0, 0, 0, 0); + } else { + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FAST_JETSAM) | DBG_FUNC_END, + current_proc()->p_pid, 0, 0, 0, 0); + } #if CONFIG_JETSAM - memorystatus_fast_jetsam_override(enable_override); + memorystatus_fast_jetsam_override(enable_override); #endif /* CONFIG_JETSAM */ - break; + break; - default: - panic("Unknown option to system_override_callouts(): %llu\n", flags); + default: + panic("Unknown option to system_override_callouts(): %llu\n", flags); } } /* * system_override_begin(uint64_t flags) * - * Routine to start a system override if the assertion count + * Routine to start a system override if the assertion count * transitions from 0->1 for a specified mechanism. */ static void system_override_begin(uint64_t flags) { lck_mtx_assert(&sys_override_lock, LCK_MTX_ASSERT_OWNED); - + if (flags & SYS_OVERRIDE_IO_THROTTLE) { if (io_throttle_assert_cnt == 0) { system_override_callouts(SYS_OVERRIDE_IO_THROTTLE, true); } io_throttle_assert_cnt++; } - + if (flags & SYS_OVERRIDE_CPU_THROTTLE) { if (cpu_throttle_assert_cnt == 0) { system_override_callouts(SYS_OVERRIDE_CPU_THROTTLE, true); } cpu_throttle_assert_cnt++; } - + if (flags & SYS_OVERRIDE_FAST_JETSAM) { if (fast_jetsam_assert_cnt == 0) { system_override_callouts(SYS_OVERRIDE_FAST_JETSAM, true); } fast_jetsam_assert_cnt++; } - } /* * system_override_end(uint64_t flags) * - * Routine to end a system override if the assertion count + * Routine to end a system override if the assertion count * transitions from 1->0 for a specified mechanism. */ static void system_override_end(uint64_t flags) { - lck_mtx_assert(&sys_override_lock, LCK_MTX_ASSERT_OWNED); - + if (flags & SYS_OVERRIDE_IO_THROTTLE) { assert(io_throttle_assert_cnt > 0); io_throttle_assert_cnt--; @@ -282,31 +280,29 @@ system_override_end(uint64_t flags) system_override_callouts(SYS_OVERRIDE_FAST_JETSAM, false); } } - } /* * system_override_abort(uint64_t flags) * - * Routine to abort a system override (if one was active) - * irrespective of the assertion counts and number of blocked + * Routine to abort a system override (if one was active) + * irrespective of the assertion counts and number of blocked * requestors. */ static void system_override_abort(uint64_t flags) { - lck_mtx_assert(&sys_override_lock, LCK_MTX_ASSERT_OWNED); - + if ((flags & SYS_OVERRIDE_IO_THROTTLE) && (io_throttle_assert_cnt > 0)) { system_override_callouts(SYS_OVERRIDE_IO_THROTTLE, false); } - if ((flags & SYS_OVERRIDE_CPU_THROTTLE) && (cpu_throttle_assert_cnt > 0)) { + if ((flags & SYS_OVERRIDE_CPU_THROTTLE) && (cpu_throttle_assert_cnt > 0)) { system_override_callouts(SYS_OVERRIDE_CPU_THROTTLE, false); } - if ((flags & SYS_OVERRIDE_FAST_JETSAM) && (fast_jetsam_assert_cnt > 0)) { + if ((flags & SYS_OVERRIDE_FAST_JETSAM) && (fast_jetsam_assert_cnt > 0)) { system_override_callouts(SYS_OVERRIDE_FAST_JETSAM, false); } } @@ -319,4 +315,3 @@ PROCESS_OVERRIDING_SYSTEM_DEFAULTS(uint64_t timeout) ts.tv_nsec = timeout - ((long)ts.tv_sec * NSEC_PER_SEC); msleep((caddr_t)&sys_override_wait, &sys_override_lock, PRIBIO | PCATCH, "system_override", &ts); } - diff --git a/bsd/kern/kern_pcsamples.c b/bsd/kern/kern_pcsamples.c index eaedcc705..69694bc5b 100644 --- a/bsd/kern/kern_pcsamples.c +++ b/bsd/kern/kern_pcsamples.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,9 +37,9 @@ #include vm_offset_t pc_buftomem = 0; -unsigned int * pc_buffer = 0; /* buffer that holds each pc */ -unsigned int * pc_bufptr = 0; -unsigned int * pc_buflast = 0; +unsigned int * pc_buffer = 0; /* buffer that holds each pc */ +unsigned int * pc_bufptr = 0; +unsigned int * pc_buflast = 0; unsigned int npcbufs = 8192; /* number of pc entries in buffer */ unsigned int pc_bufsize = 0; unsigned int pcsample_flags = 0; @@ -72,42 +72,40 @@ int pcsamples_reinit(void); int enable_branch_tracing(void) { - struct proc *p; - if (-1 != pc_sample_pid) { - p = proc_find(pc_sample_pid); - if (p) { - p->p_btrace = 1; - proc_rele(p); - } - } - else { - pc_trace_frameworks = TRUE; - } - - return 1; + struct proc *p; + if (-1 != pc_sample_pid) { + p = proc_find(pc_sample_pid); + if (p) { + p->p_btrace = 1; + proc_rele(p); + } + } else { + pc_trace_frameworks = TRUE; + } + return 1; } int disable_branch_tracing(void) { - struct proc *p; - switch (pc_sample_pid) { - case -1: - pc_trace_frameworks = FALSE; - break; - case 0: - break; - default: - p = proc_find(pc_sample_pid); - if (p) { - p->p_btrace = 0; + struct proc *p; + switch (pc_sample_pid) { + case -1: + pc_trace_frameworks = FALSE; + break; + case 0: + break; + default: + p = proc_find(pc_sample_pid); + if (p) { + p->p_btrace = 0; proc_rele(p); - } - break; - } - clr_be_bit(); - return 1; + } + break; + } + clr_be_bit(); + return 1; } /* @@ -117,12 +115,14 @@ disable_branch_tracing(void) int branch_tracing_enabled(void) { - struct proc *p = current_proc(); - if (TRUE == pc_trace_frameworks) return TRUE; - if (p) { - return (p->p_btrace); - } - return 0; + struct proc *p = current_proc(); + if (TRUE == pc_trace_frameworks) { + return TRUE; + } + if (p) { + return p->p_btrace; + } + return 0; } @@ -130,92 +130,93 @@ void add_pcbuffer(void) { int i; - unsigned int pc; - - if (!pcsample_enable) - return; - - for (i=0; i < pc_trace_cnt; i++) - { - pc = pc_trace_buf[i]; - - if ((pcsample_beg <= pc) && (pc < pcsample_end)) - { - if (pc_bufptr > pc_buffer) - { - if ( (*(pc_bufptr-1)) == pc ) - continue; /* Ignore, probably spinning */ - } - - /* Then the sample is in our range */ - *pc_bufptr = pc; - pc_bufptr++; - } - } + unsigned int pc; + + if (!pcsample_enable) { + return; + } + + for (i = 0; i < pc_trace_cnt; i++) { + pc = pc_trace_buf[i]; + + if ((pcsample_beg <= pc) && (pc < pcsample_end)) { + if (pc_bufptr > pc_buffer) { + if ((*(pc_bufptr - 1)) == pc) { + continue; /* Ignore, probably spinning */ + } + } + + /* Then the sample is in our range */ + *pc_bufptr = pc; + pc_bufptr++; + } + } /* We never wrap the buffer */ - if ((pc_bufptr + pc_trace_cnt) >= pc_buflast) - { - pcsample_enable = 0; - (void)disable_branch_tracing(); - wakeup(&pcsample_enable); - } + if ((pc_bufptr + pc_trace_cnt) >= pc_buflast) { + pcsample_enable = 0; + (void)disable_branch_tracing(); + wakeup(&pcsample_enable); + } return; } int pcsamples_bootstrap(void) { - if (!disable_branch_tracing()) - return(ENOTSUP); + if (!disable_branch_tracing()) { + return ENOTSUP; + } - pc_bufsize = npcbufs * sizeof(* pc_buffer); + pc_bufsize = npcbufs * sizeof(*pc_buffer); if (kmem_alloc(kernel_map, &pc_buftomem, - (vm_size_t)pc_bufsize) == KERN_SUCCESS) - pc_buffer = (unsigned int *) pc_buftomem; - else - pc_buffer = NULL; + (vm_size_t)pc_bufsize) == KERN_SUCCESS) { + pc_buffer = (unsigned int *) pc_buftomem; + } else { + pc_buffer = NULL; + } if (pc_buffer) { pc_bufptr = pc_buffer; pc_buflast = &pc_bufptr[npcbufs]; pcsample_enable = 0; - return(0); + return 0; } else { - pc_bufsize=0; - return(EINVAL); + pc_bufsize = 0; + return EINVAL; } - } int pcsamples_reinit(void) { - int ret=0; + int ret = 0; - pcsample_enable = 0; + pcsample_enable = 0; - if (pc_bufsize && pc_buffer) + if (pc_bufsize && pc_buffer) { kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize); + } - ret= pcsamples_bootstrap(); - return(ret); + ret = pcsamples_bootstrap(); + return ret; } void pcsamples_clear(void) { - /* Clean up the sample buffer, set defaults */ - global_state_pid = -1; + /* Clean up the sample buffer, set defaults */ + global_state_pid = -1; pcsample_enable = 0; - if(pc_bufsize && pc_buffer) - kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize); + if (pc_bufsize && pc_buffer) { + kmem_free(kernel_map, (vm_offset_t)pc_buffer, pc_bufsize); + } pc_buffer = NULL; pc_bufptr = NULL; pc_buflast = NULL; pc_bufsize = 0; - pcsample_beg= 0; - pcsample_end= 0; + pcsample_beg = 0; + pcsample_end = 0; bzero((void *)pcsample_comm, sizeof(pcsample_comm)); (void)disable_branch_tracing(); pc_sample_pid = 0; @@ -225,226 +226,199 @@ pcsamples_clear(void) int pcsamples_control(int *name, __unused u_int namelen, user_addr_t where, size_t *sizep) { - int ret=0; - size_t size=*sizep; - int value = name[1]; - pcinfo_t pc_bufinfo = {}; - pid_t *pidcheck; - - pid_t curpid; - struct proc *p, *curproc; - - if (name[0] != PCSAMPLE_GETNUMBUF) - { - curproc = current_proc(); - if (curproc) - curpid = curproc->p_pid; - else - return (ESRCH); - - if (global_state_pid == -1) - global_state_pid = curpid; - else if (global_state_pid != curpid) - { - if((p = proc_find(global_state_pid)) == NULL) - { - /* The global pid no longer exists */ - global_state_pid = curpid; - } - else - { - proc_rele(p); - /* The global pid exists, deny this request */ - return(EBUSY); - } - } - } - - - switch(name[0]) { - case PCSAMPLE_DISABLE: /* used to disable */ - pcsample_enable=0; - break; - case PCSAMPLE_SETNUMBUF: - /* The buffer size is bounded by a min and max number of samples */ - if (value < pc_trace_cnt) { - ret=EINVAL; - break; + int ret = 0; + size_t size = *sizep; + int value = name[1]; + pcinfo_t pc_bufinfo = {}; + pid_t *pidcheck; + + pid_t curpid; + struct proc *p, *curproc; + + if (name[0] != PCSAMPLE_GETNUMBUF) { + curproc = current_proc(); + if (curproc) { + curpid = curproc->p_pid; + } else { + return ESRCH; + } + + if (global_state_pid == -1) { + global_state_pid = curpid; + } else if (global_state_pid != curpid) { + if ((p = proc_find(global_state_pid)) == NULL) { + /* The global pid no longer exists */ + global_state_pid = curpid; + } else { + proc_rele(p); + /* The global pid exists, deny this request */ + return EBUSY; } - if (value <= MAX_PCSAMPLES) - /* npcbufs = value & ~(PC_TRACE_CNT-1); */ - npcbufs = value; - else - npcbufs = MAX_PCSAMPLES; + } + } + + + switch (name[0]) { + case PCSAMPLE_DISABLE: /* used to disable */ + pcsample_enable = 0; + break; + case PCSAMPLE_SETNUMBUF: + /* The buffer size is bounded by a min and max number of samples */ + if (value < pc_trace_cnt) { + ret = EINVAL; break; - case PCSAMPLE_GETNUMBUF: - if (size < sizeof(pc_bufinfo)) { - ret=EINVAL; - break; - } - pc_bufinfo.npcbufs = npcbufs; - pc_bufinfo.bufsize = pc_bufsize; - pc_bufinfo.enable = pcsample_enable; - pc_bufinfo.pcsample_beg = pcsample_beg; - pc_bufinfo.pcsample_end = pcsample_end; - if(copyout (&pc_bufinfo, where, sizeof(pc_bufinfo))) - { - ret=EINVAL; - } + } + if (value <= MAX_PCSAMPLES) { + /* npcbufs = value & ~(PC_TRACE_CNT-1); */ + npcbufs = value; + } else { + npcbufs = MAX_PCSAMPLES; + } + break; + case PCSAMPLE_GETNUMBUF: + if (size < sizeof(pc_bufinfo)) { + ret = EINVAL; break; - case PCSAMPLE_SETUP: - ret=pcsamples_reinit(); + } + pc_bufinfo.npcbufs = npcbufs; + pc_bufinfo.bufsize = pc_bufsize; + pc_bufinfo.enable = pcsample_enable; + pc_bufinfo.pcsample_beg = pcsample_beg; + pc_bufinfo.pcsample_end = pcsample_end; + if (copyout(&pc_bufinfo, where, sizeof(pc_bufinfo))) { + ret = EINVAL; + } + break; + case PCSAMPLE_SETUP: + ret = pcsamples_reinit(); + break; + case PCSAMPLE_REMOVE: + pcsamples_clear(); + break; + case PCSAMPLE_READBUF: + /* A nonzero value says enable and wait on the buffer */ + /* A zero value says read up the buffer immediately */ + if (value == 0) { + /* Do not wait on the buffer */ + pcsample_enable = 0; + (void)disable_branch_tracing(); + ret = pcsamples_read(where, sizep); break; - case PCSAMPLE_REMOVE: - pcsamples_clear(); + } else if ((pc_bufsize <= 0) || (!pc_buffer)) { + /* enable only if buffer is initialized */ + ret = EINVAL; break; - case PCSAMPLE_READBUF: - /* A nonzero value says enable and wait on the buffer */ - /* A zero value says read up the buffer immediately */ - if (value == 0) - { - /* Do not wait on the buffer */ - pcsample_enable = 0; - (void)disable_branch_tracing(); - ret = pcsamples_read(where, sizep); - break; - } - else if ((pc_bufsize <= 0) || (!pc_buffer)) - { - /* enable only if buffer is initialized */ - ret=EINVAL; - break; - } + } - /* Turn on branch tracing */ - if (!enable_branch_tracing()) - { - ret = ENOTSUP; - break; - } + /* Turn on branch tracing */ + if (!enable_branch_tracing()) { + ret = ENOTSUP; + break; + } - /* Enable sampling */ - pcsample_enable = 1; + /* Enable sampling */ + pcsample_enable = 1; - ret = tsleep(&pcsample_enable, PRIBIO | PCATCH, "pcsample", 0); - pcsample_enable = 0; - (void)disable_branch_tracing(); + ret = tsleep(&pcsample_enable, PRIBIO | PCATCH, "pcsample", 0); + pcsample_enable = 0; + (void)disable_branch_tracing(); - if (ret) - { - /* Eventually fix this... if (ret != EINTR) */ - if (ret) - { + if (ret) { + /* Eventually fix this... if (ret != EINTR) */ + if (ret) { /* On errors, except EINTR, we want to cleanup buffer ptrs */ /* pc_bufptr = pc_buffer; */ *sizep = 0; - } - } - else - { - /* The only way to get here is if the buffer is full */ - ret = pcsamples_read(where, sizep); - } + } + } else { + /* The only way to get here is if the buffer is full */ + ret = pcsamples_read(where, sizep); + } + break; + case PCSAMPLE_SETREG: + if (size < sizeof(pc_bufinfo)) { + ret = EINVAL; break; - case PCSAMPLE_SETREG: - if (size < sizeof(pc_bufinfo)) - { - ret = EINVAL; - break; - } - if (copyin(where, &pc_bufinfo, sizeof(pc_bufinfo))) - { - ret = EINVAL; - break; - } - - pcsample_beg = pc_bufinfo.pcsample_beg; - pcsample_end = pc_bufinfo.pcsample_end; + } + if (copyin(where, &pc_bufinfo, sizeof(pc_bufinfo))) { + ret = EINVAL; + break; + } + + pcsample_beg = pc_bufinfo.pcsample_beg; + pcsample_end = pc_bufinfo.pcsample_end; + break; + case PCSAMPLE_COMM: + if (!(sizeof(pcsample_comm) > size)) { + ret = EINVAL; break; - case PCSAMPLE_COMM: - if (!(sizeof(pcsample_comm) > size)) - { - ret = EINVAL; - break; - } - bzero((void *)pcsample_comm, sizeof(pcsample_comm)); - if (copyin(where, pcsample_comm, size)) - { - ret = EINVAL; - break; - } - - /* Check for command name or pid */ - if (pcsample_comm[0] != '\0') - { - ret= ENOTSUP; - break; - } - else - { - if (size != (2 * sizeof(pid_t))) - { - ret = EINVAL; - break; - } - else - { + } + bzero((void *)pcsample_comm, sizeof(pcsample_comm)); + if (copyin(where, pcsample_comm, size)) { + ret = EINVAL; + break; + } + + /* Check for command name or pid */ + if (pcsample_comm[0] != '\0') { + ret = ENOTSUP; + break; + } else { + if (size != (2 * sizeof(pid_t))) { + ret = EINVAL; + break; + } else { pidcheck = (pid_t *)pcsample_comm; pc_sample_pid = pidcheck[1]; - } - } - break; - default: - ret= ENOTSUP; - break; + } + } + break; + default: + ret = ENOTSUP; + break; } - return(ret); + return ret; } -/* - This buffer must be read up in one call. - If the buffer isn't big enough to hold - all the samples, it will copy up enough - to fill the buffer and throw the rest away. - This buffer never wraps. -*/ +/* + * This buffer must be read up in one call. + * If the buffer isn't big enough to hold + * all the samples, it will copy up enough + * to fill the buffer and throw the rest away. + * This buffer never wraps. + */ int pcsamples_read(user_addr_t buffer, size_t *number) { - size_t count=0; - size_t copycount; - - count = (*number)/sizeof(* pc_buffer); - - if (count && pc_bufsize && pc_buffer) - { - copycount = pc_bufptr - pc_buffer; - - if (copycount <= 0) - { - *number = 0; - return(0); + size_t count = 0; + size_t copycount; + + count = (*number) / sizeof(*pc_buffer); + + if (count && pc_bufsize && pc_buffer) { + copycount = pc_bufptr - pc_buffer; + + if (copycount <= 0) { + *number = 0; + return 0; } - if (copycount > count) - copycount = count; + if (copycount > count) { + copycount = count; + } - /* We actually have data to send up */ - if(copyout(pc_buffer, buffer, copycount * sizeof(* pc_buffer))) - { - *number = 0; - return(EINVAL); + /* We actually have data to send up */ + if (copyout(pc_buffer, buffer, copycount * sizeof(*pc_buffer))) { + *number = 0; + return EINVAL; } - *number = copycount; - pc_bufptr = pc_buffer; - return(0); - } - else - { - *number = 0; - return(0); - } + *number = copycount; + pc_bufptr = pc_buffer; + return 0; + } else { + *number = 0; + return 0; + } } - diff --git a/bsd/kern/kern_persona.c b/bsd/kern/kern_persona.c index e05e8d424..c9c846717 100644 --- a/bsd/kern/kern_persona.c +++ b/bsd/kern/kern_persona.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -87,7 +87,8 @@ kauth_cred_t g_default_persona_cred; extern void mach_kauth_cred_uthread_update(void); -void personas_bootstrap(void) +void +personas_bootstrap(void) { struct posix_cred pcred; @@ -98,7 +99,6 @@ void personas_bootstrap(void) g_next_persona_id = FIRST_PERSONA_ID; persona_lck_grp_attr = lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(persona_lck_grp_attr); persona_lck_grp = lck_grp_alloc_init("personas", persona_lck_grp_attr); persona_lck_attr = lck_attr_alloc_init(); @@ -106,8 +106,8 @@ void personas_bootstrap(void) lck_mtx_init(&all_personas_lock, persona_lck_grp, persona_lck_attr); persona_zone = zinit(sizeof(struct persona), - MAX_PERSONAS * sizeof(struct persona), - MAX_PERSONAS, "personas"); + MAX_PERSONAS * sizeof(struct persona), + MAX_PERSONAS, "personas"); assert(persona_zone != NULL); /* @@ -123,12 +123,13 @@ void personas_bootstrap(void) pcred.cr_gmuid = KAUTH_UID_NONE; g_default_persona_cred = posix_cred_create(&pcred); - if (!g_default_persona_cred) + if (!g_default_persona_cred) { panic("couldn't create default persona credentials!"); + } g_system_persona = persona_alloc(PERSONA_SYSTEM_UID, - PERSONA_SYSTEM_LOGIN, - PERSONA_SYSTEM, NULL); + PERSONA_SYSTEM_LOGIN, + PERSONA_SYSTEM, NULL); int err = persona_init_begin(g_system_persona); assert(err == 0); @@ -137,29 +138,33 @@ void personas_bootstrap(void) assert(g_system_persona != NULL); } -struct persona *persona_alloc(uid_t id, const char *login, int type, int *error) +struct persona * +persona_alloc(uid_t id, const char *login, int type, int *error) { struct persona *persona; int err = 0; if (!login) { pna_err("Must provide a login name for a new persona!"); - if (error) + if (error) { *error = EINVAL; + } return NULL; } if (type <= PERSONA_INVALID || type > PERSONA_TYPE_MAX) { pna_err("Invalid type: %d", type); - if (error) + if (error) { *error = EINVAL; + } return NULL; } persona = (struct persona *)zalloc(persona_zone); if (!persona) { - if (error) + if (error) { *error = ENOMEM; + } return NULL; } @@ -172,7 +177,7 @@ struct persona *persona_alloc(uid_t id, const char *login, int type, int *error) goto out_error; } - strncpy(persona->pna_login, login, sizeof(persona->pna_login)-1); + strncpy(persona->pna_login, login, sizeof(persona->pna_login) - 1); persona_dbg("Starting persona allocation for: '%s'", persona->pna_login); LIST_INIT(&persona->pna_members); @@ -224,13 +229,14 @@ out_error: * structure as valid * * Conditions: - * persona has been allocated via persona_alloc() - * nothing locked + * persona has been allocated via persona_alloc() + * nothing locked * * Returns: - * global persona list is locked (even on error) + * global persona list is locked (even on error) */ -int persona_init_begin(struct persona *persona) +int +persona_init_begin(struct persona *persona) { struct persona *tmp; int err = 0; @@ -246,8 +252,9 @@ int persona_init_begin(struct persona *persona) lock_personas(); try_again: - if (id == PERSONA_ID_NONE) + if (id == PERSONA_ID_NONE) { persona->pna_id = g_next_persona_id; + } persona_dbg("Beginning Initialization of %d:%d (%s)...", id, persona->pna_id, persona->pna_login); @@ -275,17 +282,19 @@ try_again: } persona_unlock(tmp); } - if (err) + if (err) { goto out; + } /* ensure the cred has proper UID/GID defaults */ kauth_cred_ref(persona->pna_cred); tmp_cred = kauth_cred_setuidgid(persona->pna_cred, - persona->pna_id, - persona->pna_id); + persona->pna_id, + persona->pna_id); kauth_cred_unref(&persona->pna_cred); - if (tmp_cred != persona->pna_cred) + if (tmp_cred != persona->pna_cred) { persona->pna_cred = tmp_cred; + } if (!persona->pna_cred) { err = EACCES; @@ -298,10 +307,11 @@ try_again: kauth_cred_ref(persona->pna_cred); /* opt _out_ of memberd as a default */ tmp_cred = kauth_cred_setgroups(persona->pna_cred, - &new_group, 1, KAUTH_UID_NONE); + &new_group, 1, KAUTH_UID_NONE); kauth_cred_unref(&persona->pna_cred); - if (tmp_cred != persona->pna_cred) + if (tmp_cred != persona->pna_cred) { persona->pna_cred = tmp_cred; + } if (!persona->pna_cred) { err = EACCES; @@ -309,8 +319,9 @@ try_again: } /* if the kernel supplied the persona ID, increment for next time */ - if (id == PERSONA_ID_NONE) + if (id == PERSONA_ID_NONE) { g_next_persona_id += PERSONA_ID_STEP; + } persona->pna_valid = PERSONA_INIT_TOKEN; @@ -340,13 +351,14 @@ out: * only mark the persona valid if the input parameter 'error' is 0. * * Conditions: - * persona is initialized via persona_init_begin() - * global persona list is locked via lock_personas() + * persona is initialized via persona_init_begin() + * global persona list is locked via lock_personas() * * Returns: - * global persona list is unlocked + * global persona list is unlocked */ -void persona_init_end(struct persona *persona, int error) +void +persona_init_end(struct persona *persona, int error) { if (persona == NULL) { return; @@ -365,7 +377,7 @@ void persona_init_end(struct persona *persona, int error) /* remove this persona from the global count */ (void)hw_atomic_add(&g_total_personas, -1); } else if (error == 0 && - persona->pna_valid == PERSONA_INIT_TOKEN) { + persona->pna_valid == PERSONA_INIT_TOKEN) { persona->pna_valid = PERSONA_MAGIC; LIST_INSERT_HEAD(&all_personas, persona, pna_list); persona_dbg("Initialization of %d (%s) Complete.", persona->pna_id, persona->pna_login); @@ -374,17 +386,20 @@ void persona_init_end(struct persona *persona, int error) unlock_personas(); } -static struct persona *persona_get_locked(struct persona *persona) +static struct persona * +persona_get_locked(struct persona *persona) { os_ref_retain_locked(&persona->pna_refcount); return persona; } -struct persona *persona_get(struct persona *persona) +struct persona * +persona_get(struct persona *persona) { struct persona *ret; - if (!persona) + if (!persona) { return NULL; + } persona_lock(persona); ret = persona_get_locked(persona); persona_unlock(persona); @@ -392,12 +407,14 @@ struct persona *persona_get(struct persona *persona) return ret; } -void persona_put(struct persona *persona) +void +persona_put(struct persona *persona) { int destroy = 0; - if (!persona) + if (!persona) { return; + } persona_lock(persona); if (os_ref_release_locked(&persona->pna_refcount) == 0) { @@ -405,22 +422,25 @@ void persona_put(struct persona *persona) } persona_unlock(persona); - if (!destroy) + if (!destroy) { return; + } persona_dbg("Destroying persona %s", persona_desc(persona, 0)); /* release our credential reference */ - if (persona->pna_cred) + if (persona->pna_cred) { kauth_cred_unref(&persona->pna_cred); + } /* remove it from the global list and decrement the count */ lock_personas(); persona_lock(persona); if (persona_valid(persona)) { LIST_REMOVE(persona, pna_list); - if (hw_atomic_add(&g_total_personas, -1) == UINT_MAX) + if (hw_atomic_add(&g_total_personas, -1) == UINT_MAX) { panic("persona count underflow!\n"); + } persona_mkinvalid(persona); } persona_unlock(persona); @@ -431,14 +451,17 @@ void persona_put(struct persona *persona) zfree(persona_zone, persona); } -uid_t persona_get_id(struct persona *persona) +uid_t +persona_get_id(struct persona *persona) { - if (persona) + if (persona) { return persona->pna_id; + } return PERSONA_ID_NONE; } -struct persona *persona_lookup(uid_t id) +struct persona * +persona_lookup(uid_t id) { struct persona *persona, *tmp; @@ -463,7 +486,8 @@ struct persona *persona_lookup(uid_t id) return persona; } -struct persona *persona_lookup_and_invalidate(uid_t id) +struct persona * +persona_lookup_and_invalidate(uid_t id) { struct persona *persona, *entry, *tmp; @@ -477,8 +501,9 @@ struct persona *persona_lookup_and_invalidate(uid_t id) persona = persona_get_locked(entry); assert(persona != NULL); LIST_REMOVE(persona, pna_list); - if (hw_atomic_add(&g_total_personas, -1) == UINT_MAX) + if (hw_atomic_add(&g_total_personas, -1) == UINT_MAX) { panic("persona ref count underflow!\n"); + } persona_mkinvalid(persona); } persona_unlock(entry); @@ -491,59 +516,70 @@ struct persona *persona_lookup_and_invalidate(uid_t id) return persona; } -int persona_find(const char *login, uid_t uid, - struct persona **persona, size_t *plen) +int +persona_find(const char *login, uid_t uid, + struct persona **persona, size_t *plen) { struct persona *tmp; int match = 0; size_t found = 0; - if (login) + if (login) { match++; - if (uid != PERSONA_ID_NONE) + } + if (uid != PERSONA_ID_NONE) { match++; + } - if (match == 0) + if (match == 0) { return EINVAL; + } persona_dbg("Searching with %d parameters (l:\"%s\", u:%d)", - match, login, uid); + match, login, uid); lock_personas(); LIST_FOREACH(tmp, &all_personas, pna_list) { int m = 0; persona_lock(tmp); - if (login && strncmp(tmp->pna_login, login, sizeof(tmp->pna_login)) == 0) + if (login && strncmp(tmp->pna_login, login, sizeof(tmp->pna_login)) == 0) { m++; - if (uid != PERSONA_ID_NONE && uid == tmp->pna_id) + } + if (uid != PERSONA_ID_NONE && uid == tmp->pna_id) { m++; + } if (m == match) { - if (persona && *plen > found) + if (persona && *plen > found) { persona[found] = persona_get_locked(tmp); + } found++; } #ifdef PERSONA_DEBUG - if (m > 0) + if (m > 0) { persona_dbg("ID:%d Matched %d/%d, found:%d, *plen:%d", - tmp->pna_id, m, match, (int)found, (int)*plen); + tmp->pna_id, m, match, (int)found, (int)*plen); + } #endif persona_unlock(tmp); } unlock_personas(); *plen = found; - if (!found) + if (!found) { return ESRCH; + } return 0; } -struct persona *persona_proc_get(pid_t pid) +struct persona * +persona_proc_get(pid_t pid) { struct persona *persona; proc_t p = proc_find(pid); - if (!p) + if (!p) { return NULL; + } proc_lock(p); persona = persona_get(p->p_persona); @@ -554,7 +590,8 @@ struct persona *persona_proc_get(pid_t pid) return persona; } -struct persona *current_persona_get(void) +struct persona * +current_persona_get(void) { proc_t p = current_proc(); struct persona *persona; @@ -569,29 +606,33 @@ struct persona *current_persona_get(void) /** * inherit a persona from parent to child */ -int persona_proc_inherit(proc_t child, proc_t parent) +int +persona_proc_inherit(proc_t child, proc_t parent) { if (child->p_persona != NULL) { persona_dbg("proc_inherit: child already in persona: %s", - persona_desc(child->p_persona, 0)); + persona_desc(child->p_persona, 0)); return -1; } /* no persona to inherit */ - if (parent->p_persona == NULL) + if (parent->p_persona == NULL) { return 0; + } return persona_proc_adopt(child, parent->p_persona, parent->p_ucred); } -int persona_proc_adopt_id(proc_t p, uid_t id, kauth_cred_t auth_override) +int +persona_proc_adopt_id(proc_t p, uid_t id, kauth_cred_t auth_override) { int ret; struct persona *persona; persona = persona_lookup(id); - if (!persona) + if (!persona) { return ESRCH; + } ret = persona_proc_adopt(p, persona, auth_override); @@ -611,9 +652,10 @@ typedef enum e_persona_reset_op { * internal cleanup routine for proc_set_cred_internal * */ -static struct persona *proc_reset_persona_internal(proc_t p, persona_reset_op_t op, - struct persona *old_persona, - struct persona *new_persona) +static struct persona * +proc_reset_persona_internal(proc_t p, persona_reset_op_t op, + struct persona *old_persona, + struct persona *new_persona) { #if (DEVELOPMENT || DEBUG) persona_lock_assert_held(new_persona); @@ -622,7 +664,7 @@ static struct persona *proc_reset_persona_internal(proc_t p, persona_reset_op_t switch (op) { case PROC_REMOVE_PERSONA: old_persona = p->p_persona; - /* fall through */ + /* fall through */ case PROC_RESET_OLD_PERSONA: break; default: @@ -661,8 +703,9 @@ static struct persona *proc_reset_persona_internal(proc_t p, persona_reset_op_t * previous persona the process had adopted. The caller is * responsible to release the reference. */ -static struct persona *proc_set_cred_internal(proc_t p, struct persona *persona, - kauth_cred_t auth_override, int *rlim_error) +static struct persona * +proc_set_cred_internal(proc_t p, struct persona *persona, + kauth_cred_t auth_override, int *rlim_error) { struct persona *old_persona = NULL; kauth_cred_t my_cred, my_new_cred; @@ -674,12 +717,13 @@ static struct persona *proc_set_cred_internal(proc_t p, struct persona *persona, * by the thread which took the trans lock! */ assert(((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) && - p->p_transholder == current_thread()); + p->p_transholder == current_thread()); assert(persona != NULL); /* no work to do if we "re-adopt" the same persona */ - if (p->p_persona == persona) + if (p->p_persona == persona) { return NULL; + } /* * If p is in a persona, then we need to remove 'p' from the list of @@ -688,16 +732,18 @@ static struct persona *proc_set_cred_internal(proc_t p, struct persona *persona, */ if (p->p_persona) { old_persona = proc_reset_persona_internal(p, PROC_REMOVE_PERSONA, - NULL, persona); + NULL, persona); } - if (auth_override) + if (auth_override) { my_new_cred = auth_override; - else + } else { my_new_cred = persona->pna_cred; + } - if (!my_new_cred) + if (!my_new_cred) { panic("NULL credentials (persona:%p)", persona); + } *rlim_error = 0; @@ -713,10 +759,10 @@ static struct persona *proc_set_cred_internal(proc_t p, struct persona *persona, if (new_uid != 0 && (rlim_t)chgproccnt(new_uid, 0) > p->p_rlimit[RLIMIT_NPROC].rlim_cur) { pna_err("PID:%d hit proc rlimit in new persona(%d): %s", - p->p_pid, new_uid, persona_desc(persona, 1)); + p->p_pid, new_uid, persona_desc(persona, 1)); *rlim_error = EACCES; (void)proc_reset_persona_internal(p, PROC_RESET_OLD_PERSONA, - old_persona, persona); + old_persona, persona); kauth_cred_unref(&my_new_cred); return NULL; } @@ -727,9 +773,9 @@ static struct persona *proc_set_cred_internal(proc_t p, struct persona *persona, set_proc_cred: my_cred = kauth_cred_proc_ref(p); persona_dbg("proc_adopt PID:%d, %s -> %s", - p->p_pid, - persona_desc(old_persona, 1), - persona_desc(persona, 1)); + p->p_pid, + persona_desc(old_persona, 1), + persona_desc(persona, 1)); old_uid = kauth_cred_getruid(my_cred); @@ -770,13 +816,14 @@ set_proc_cred: * Update the proc count. * If the UIDs are the same, then there is no work to do. */ - if (old_persona) + if (old_persona) { old_uid = old_persona->pna_id; + } if (new_uid != old_uid) { count = chgproccnt(old_uid, -1); persona_dbg("Decrement %s:%d proc_count to: %d", - old_persona ? "Persona" : "UID", old_uid, count); + old_persona ? "Persona" : "UID", old_uid, count); /* * Increment the proc count on the UID associated with @@ -785,7 +832,7 @@ set_proc_cred: */ count = chgproccnt(new_uid, 1); persona_dbg("Increment Persona:%d (UID:%d) proc_count to: %d", - new_uid, kauth_cred_getuid(my_new_cred), count); + new_uid, kauth_cred_getuid(my_new_cred), count); } OSBitOrAtomic(P_ADOPTPERSONA, &p->p_flag); @@ -800,17 +847,19 @@ set_proc_cred: return old_persona; } -int persona_proc_adopt(proc_t p, struct persona *persona, kauth_cred_t auth_override) +int +persona_proc_adopt(proc_t p, struct persona *persona, kauth_cred_t auth_override) { int error; struct persona *old_persona; struct session * sessp; - if (!persona) + if (!persona) { return EINVAL; + } persona_dbg("%d adopting Persona %d (%s)", proc_pid(p), - persona->pna_id, persona_desc(persona, 0)); + persona->pna_id, persona_desc(persona, 0)); persona_lock(persona); if (!persona->pna_cred || !persona_valid(persona)) { @@ -832,8 +881,8 @@ int persona_proc_adopt(proc_t p, struct persona *persona, kauth_cred_t auth_over if (persona->pna_pgid) { uid_t uid = kauth_cred_getuid(persona->pna_cred); persona_dbg(" PID:%d, pgid:%d%s", - p->p_pid, persona->pna_pgid, - persona->pna_pgid == uid ? ", new_session" : "."); + p->p_pid, persona->pna_pgid, + persona->pna_pgid == uid ? ", new_session" : "."); enterpgrp(p, persona->pna_pgid, persona->pna_pgid == uid); } @@ -853,14 +902,16 @@ int persona_proc_adopt(proc_t p, struct persona *persona, kauth_cred_t auth_over /* * Drop the reference to the old persona. */ - if (old_persona) + if (old_persona) { persona_put(old_persona); + } persona_dbg("%s", error == 0 ? "SUCCESS" : "FAILED"); return error; } -int persona_proc_drop(proc_t p) +int +persona_proc_drop(proc_t p) { struct persona *persona = NULL; @@ -911,12 +962,14 @@ try_again: return 0; } -int persona_get_type(struct persona *persona) +int +persona_get_type(struct persona *persona) { int type; - if (!persona) + if (!persona) { return PERSONA_INVALID; + } persona_lock(persona); if (!persona_valid(persona)) { @@ -929,12 +982,14 @@ int persona_get_type(struct persona *persona) return type; } -int persona_set_cred(struct persona *persona, kauth_cred_t cred) +int +persona_set_cred(struct persona *persona, kauth_cred_t cred) { int ret = 0; kauth_cred_t my_cred; - if (!persona || !cred) + if (!persona || !cred) { return EINVAL; + } persona_lock(persona); if (!persona_initialized(persona)) { @@ -951,14 +1006,15 @@ int persona_set_cred(struct persona *persona, kauth_cred_t cred) /* ensure that the UID matches the persona ID */ my_cred = kauth_cred_setresuid(my_cred, persona->pna_id, - persona->pna_id, persona->pna_id, - KAUTH_UID_NONE); + persona->pna_id, persona->pna_id, + KAUTH_UID_NONE); /* TODO: clear the saved GID?! */ /* replace the persona's cred with the new one */ - if (persona->pna_cred) + if (persona->pna_cred) { kauth_cred_unref(&persona->pna_cred); + } persona->pna_cred = my_cred; out_unlock: @@ -966,12 +1022,14 @@ out_unlock: return ret; } -int persona_set_cred_from_proc(struct persona *persona, proc_t proc) +int +persona_set_cred_from_proc(struct persona *persona, proc_t proc) { int ret = 0; kauth_cred_t parent_cred, my_cred; - if (!persona || !proc) + if (!persona || !proc) { return EINVAL; + } persona_lock(persona); if (!persona_initialized(persona)) { @@ -992,12 +1050,13 @@ int persona_set_cred_from_proc(struct persona *persona, proc_t proc) /* ensure that the UID matches the persona ID */ my_cred = kauth_cred_setresuid(my_cred, persona->pna_id, - persona->pna_id, persona->pna_id, - KAUTH_UID_NONE); + persona->pna_id, persona->pna_id, + KAUTH_UID_NONE); /* replace the persona's cred with the new one */ - if (persona->pna_cred) + if (persona->pna_cred) { kauth_cred_unref(&persona->pna_cred); + } persona->pna_cred = my_cred; kauth_cred_unref(&parent_cred); @@ -1007,16 +1066,19 @@ out_unlock: return ret; } -kauth_cred_t persona_get_cred(struct persona *persona) +kauth_cred_t +persona_get_cred(struct persona *persona) { kauth_cred_t cred = NULL; - if (!persona) + if (!persona) { return NULL; + } persona_lock(persona); - if (!persona_valid(persona)) + if (!persona_valid(persona)) { goto out_unlock; + } if (persona->pna_cred) { kauth_cred_ref(persona->pna_cred); @@ -1029,12 +1091,14 @@ out_unlock: return cred; } -uid_t persona_get_uid(struct persona *persona) +uid_t +persona_get_uid(struct persona *persona) { uid_t uid = UID_MAX; - if (!persona || !persona->pna_cred) + if (!persona || !persona->pna_cred) { return UID_MAX; + } persona_lock(persona); if (persona_valid(persona)) { @@ -1046,13 +1110,15 @@ uid_t persona_get_uid(struct persona *persona) return uid; } -int persona_set_gid(struct persona *persona, gid_t gid) +int +persona_set_gid(struct persona *persona, gid_t gid) { int ret = 0; kauth_cred_t my_cred, new_cred; - if (!persona || !persona->pna_cred) + if (!persona || !persona->pna_cred) { return EINVAL; + } persona_lock(persona); if (!persona_initialized(persona)) { @@ -1067,8 +1133,9 @@ int persona_set_gid(struct persona *persona, gid_t gid) my_cred = persona->pna_cred; kauth_cred_ref(my_cred); new_cred = kauth_cred_setresgid(my_cred, gid, gid, gid); - if (new_cred != my_cred) + if (new_cred != my_cred) { persona->pna_cred = new_cred; + } kauth_cred_unref(&my_cred); out_unlock: @@ -1076,30 +1143,36 @@ out_unlock: return ret; } -gid_t persona_get_gid(struct persona *persona) +gid_t +persona_get_gid(struct persona *persona) { gid_t gid = GID_MAX; - if (!persona || !persona->pna_cred) + if (!persona || !persona->pna_cred) { return GID_MAX; + } persona_lock(persona); - if (persona_valid(persona)) + if (persona_valid(persona)) { gid = kauth_cred_getgid(persona->pna_cred); + } persona_unlock(persona); return gid; } -int persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid) +int +persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, uid_t gmuid) { int ret = 0; kauth_cred_t my_cred, new_cred; - if (!persona || !persona->pna_cred) + if (!persona || !persona->pna_cred) { return EINVAL; - if (ngroups > NGROUPS_MAX) + } + if (ngroups > NGROUPS_MAX) { return EINVAL; + } persona_lock(persona); if (!persona_initialized(persona)) { @@ -1114,8 +1187,9 @@ int persona_set_groups(struct persona *persona, gid_t *groups, unsigned ngroups, my_cred = persona->pna_cred; kauth_cred_ref(my_cred); new_cred = kauth_cred_setgroups(my_cred, groups, (int)ngroups, gmuid); - if (new_cred != my_cred) + if (new_cred != my_cred) { persona->pna_cred = new_cred; + } kauth_cred_unref(&my_cred); out_unlock: @@ -1123,11 +1197,13 @@ out_unlock: return ret; } -int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz) +int +persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups, unsigned groups_sz) { int ret = EINVAL; - if (!persona || !persona->pna_cred || !groups || !ngroups || groups_sz > NGROUPS) + if (!persona || !persona->pna_cred || !groups || !ngroups || groups_sz > NGROUPS) { return EINVAL; + } *ngroups = groups_sz; @@ -1143,16 +1219,19 @@ int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups return ret; } -uid_t persona_get_gmuid(struct persona *persona) +uid_t +persona_get_gmuid(struct persona *persona) { uid_t gmuid = KAUTH_UID_NONE; - if (!persona || !persona->pna_cred) + if (!persona || !persona->pna_cred) { return gmuid; + } persona_lock(persona); - if (!persona_valid(persona)) + if (!persona_valid(persona)) { goto out_unlock; + } posix_cred_t pcred = posix_cred_get(persona->pna_cred); gmuid = pcred->cr_gmuid; @@ -1162,15 +1241,18 @@ out_unlock: return gmuid; } -int persona_get_login(struct persona *persona, char login[MAXLOGNAME+1]) +int +persona_get_login(struct persona *persona, char login[MAXLOGNAME + 1]) { int ret = EINVAL; - if (!persona || !persona->pna_cred) + if (!persona || !persona->pna_cred) { return EINVAL; + } persona_lock(persona); - if (!persona_valid(persona)) + if (!persona_valid(persona)) { goto out_unlock; + } strlcpy(login, persona->pna_login, MAXLOGNAME); ret = 0; @@ -1188,45 +1270,53 @@ out_unlock: * symbol exports for kext compatibility */ -uid_t persona_get_id(__unused struct persona *persona) +uid_t +persona_get_id(__unused struct persona *persona) { return PERSONA_ID_NONE; } -int persona_get_type(__unused struct persona *persona) +int +persona_get_type(__unused struct persona *persona) { return PERSONA_INVALID; } -kauth_cred_t persona_get_cred(__unused struct persona *persona) +kauth_cred_t +persona_get_cred(__unused struct persona *persona) { return NULL; } -struct persona *persona_lookup(__unused uid_t id) +struct persona * +persona_lookup(__unused uid_t id) { return NULL; } -int persona_find(__unused const char *login, - __unused uid_t uid, - __unused struct persona **persona, - __unused size_t *plen) +int +persona_find(__unused const char *login, + __unused uid_t uid, + __unused struct persona **persona, + __unused size_t *plen) { return ENOTSUP; } -struct persona *current_persona_get(void) +struct persona * +current_persona_get(void) { return NULL; } -struct persona *persona_get(struct persona *persona) +struct persona * +persona_get(struct persona *persona) { return persona; } -void persona_put(__unused struct persona *persona) +void +persona_put(__unused struct persona *persona) { return; } diff --git a/bsd/kern/kern_physio.c b/bsd/kern/kern_physio.c index 6d9580dcf..ccab3bbc3 100644 --- a/bsd/kern/kern_physio.c +++ b/bsd/kern/kern_physio.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -79,13 +79,13 @@ #include int -physio( void (*f_strategy)(buf_t), - buf_t bp, - dev_t dev, - int flags, - u_int (*f_minphys)(buf_t), - struct uio *uio, - int blocksize) +physio( void (*f_strategy)(buf_t), + buf_t bp, + dev_t dev, + int flags, + u_int (*f_minphys)(buf_t), + struct uio *uio, + int blocksize) { struct proc *p = current_proc(); int error, i, buf_allocated, todo, iosize; @@ -107,12 +107,13 @@ physio( void (*f_strategy)(buf_t), if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { user_addr_t base; user_size_t len; - + if (uio_getiov(uio, i, &base, &len) || - !useracc(base, - len, - (flags == B_READ) ? B_WRITE : B_READ)) - return (EFAULT); + !useracc(base, + len, + (flags == B_READ) ? B_WRITE : B_READ)) { + return EFAULT; + } } } /* @@ -121,11 +122,12 @@ physio( void (*f_strategy)(buf_t), if (bp == NULL) { bp = buf_alloc((vnode_t)0); buf_allocated = 1; - } else - orig_bflags = buf_flags(bp); + } else { + orig_bflags = buf_flags(bp); + } /* * at this point we should have a buffer - * that is marked BL_BUSY... we either + * that is marked BL_BUSY... we either * acquired it via buf_alloc, or it was * passed into us... if it was passed * in, it needs to already be owned by @@ -146,7 +148,7 @@ physio( void (*f_strategy)(buf_t), * "Set by physio for raw transfers.", in addition * to the read/write flag.) */ - buf_setflags(bp, B_PHYS | B_RAW); + buf_setflags(bp, B_PHYS | B_RAW); /* * [while there is data to transfer and no I/O error] @@ -154,85 +156,89 @@ physio( void (*f_strategy)(buf_t), * of the 'while' loop. */ while (uio_resid(uio) > 0) { - - if ( (iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED) - iosize = MAXPHYSIO_WIRED; - /* - * make sure we're set to issue a fresh I/O - * in the right direction - */ - buf_reset(bp, flags); - - /* [set up the buffer for a maximum-sized transfer] */ - buf_setblkno(bp, uio_offset(uio) / blocksize); - buf_setcount(bp, iosize); - buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio))); - - /* - * [call f_minphys to bound the tranfer size] - * and remember the amount of data to transfer, - * for later comparison. - */ - (*f_minphys)(bp); - todo = buf_count(bp); - - /* - * [lock the part of the user address space involved - * in the transfer] - */ - - if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { - error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)), - (user_size_t)todo); - if (error) - goto done; - } - - /* [call f_strategy to start the transfer] */ - (*f_strategy)(bp); - - - /* [wait for the transfer to complete] */ - error = (int)buf_biowait(bp); - - /* - * [unlock the part of the address space previously - * locked] - */ - if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) - vsunlock(CAST_USER_ADDR_T(buf_dataptr(bp)), - (user_size_t)todo, - (flags & B_READ)); - - /* - * [deduct the transfer size from the total number - * of data to transfer] - */ - done = buf_count(bp) - buf_resid(bp); - uio_update(uio, done); - - /* - * Now, check for an error. - * Also, handle weird end-of-disk semantics. - */ - if (error || done < todo) + if ((iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED) { + iosize = MAXPHYSIO_WIRED; + } + /* + * make sure we're set to issue a fresh I/O + * in the right direction + */ + buf_reset(bp, flags); + + /* [set up the buffer for a maximum-sized transfer] */ + buf_setblkno(bp, uio_offset(uio) / blocksize); + buf_setcount(bp, iosize); + buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio))); + + /* + * [call f_minphys to bound the tranfer size] + * and remember the amount of data to transfer, + * for later comparison. + */ + (*f_minphys)(bp); + todo = buf_count(bp); + + /* + * [lock the part of the user address space involved + * in the transfer] + */ + + if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { + error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)), + (user_size_t)todo); + if (error) { goto done; + } + } + + /* [call f_strategy to start the transfer] */ + (*f_strategy)(bp); + + + /* [wait for the transfer to complete] */ + error = (int)buf_biowait(bp); + + /* + * [unlock the part of the address space previously + * locked] + */ + if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { + vsunlock(CAST_USER_ADDR_T(buf_dataptr(bp)), + (user_size_t)todo, + (flags & B_READ)); + } + + /* + * [deduct the transfer size from the total number + * of data to transfer] + */ + done = buf_count(bp) - buf_resid(bp); + uio_update(uio, done); + + /* + * Now, check for an error. + * Also, handle weird end-of-disk semantics. + */ + if (error || done < todo) { + goto done; + } } done: - if (buf_allocated) - buf_free(bp); - else + if (buf_allocated) { + buf_free(bp); + } else { buf_setflags(bp, orig_bflags); + } - return (error); + return error; } /* * Leffler, et al., says on p. 231: * "The minphys() routine is called by physio() to adjust the * size of each I/O transfer before the latter is passed to - * the strategy routine..." + * the strategy routine..." * * so, just adjust the buffer's count accounting to MAXPHYS here, * and return the new count; @@ -240,7 +246,6 @@ done: u_int minphys(struct buf *bp) { - buf_setcount(bp, min(MAXPHYS, buf_count(bp))); - return buf_count(bp); + return buf_count(bp); } diff --git a/bsd/kern/kern_priv.c b/bsd/kern/kern_priv.c index 462c3fbd2..88adf2dda 100644 --- a/bsd/kern/kern_priv.c +++ b/bsd/kern/kern_priv.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -88,18 +88,19 @@ priv_check_cred(kauth_cred_t cred, int priv, int flags) */ #if CONFIG_MACF error = mac_priv_check(cred, priv); - if (error) + if (error) { goto out; + } #endif /* Only grant all privileges to root if DEFAULT_UNPRIVELEGED flag is NOT set. */ if (!(flags & PRIVCHECK_DEFAULT_UNPRIVILEGED_FLAG)) { /* - * Having determined if privilege is restricted by various policies, - * now determine if privilege is granted. At this point, any policy - * may grant privilege. For now, we allow short-circuit boolean - * evaluation, so may not call all policies. Perhaps we should. - */ + * Having determined if privilege is restricted by various policies, + * now determine if privilege is granted. At this point, any policy + * may grant privilege. For now, we allow short-circuit boolean + * evaluation, so may not call all policies. Perhaps we should. + */ if (kauth_cred_getuid(cred) == 0) { error = 0; goto out; @@ -123,11 +124,11 @@ priv_check_cred(kauth_cred_t cred, int priv, int flags) */ error = EPERM; out: - return (error); + return error; } int -proc_check_footprint_priv(void) +proc_check_footprint_priv(void) { - return (priv_check_cred(kauth_cred_get(), PRIV_VM_FOOTPRINT_LIMIT, 0)); + return priv_check_cred(kauth_cred_get(), PRIV_VM_FOOTPRINT_LIMIT, 0); } diff --git a/bsd/kern/kern_proc.c b/bsd/kern/kern_proc.c index 3107ae6d0..1d7689232 100644 --- a/bsd/kern/kern_proc.c +++ b/bsd/kern/kern_proc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -69,7 +69,7 @@ /* HISTORY * 04-Aug-97 Umesh Vaishampayan (umeshv@apple.com) * Added current_proc_EXTERNAL() function for the use of kernel - * lodable modules. + * lodable modules. * * 05-Jun-95 Mac Gillon (mgillon) at NeXT * New version based on 3.3NS and 4.4 @@ -103,7 +103,7 @@ #include #include #include -#include /* vm_map_switch_protect() */ +#include /* vm_map_switch_protect() */ #include #include #include @@ -139,12 +139,12 @@ */ struct uidinfo { LIST_ENTRY(uidinfo) ui_hash; - uid_t ui_uid; - long ui_proccnt; + uid_t ui_uid; + long ui_proccnt; }; -#define UIHASH(uid) (&uihashtbl[(uid) & uihash]) -LIST_HEAD(uihashhead, uidinfo) *uihashtbl; -u_long uihash; /* size of hash table - 1 */ +#define UIHASH(uid) (&uihashtbl[(uid) & uihash]) +LIST_HEAD(uihashhead, uidinfo) * uihashtbl; +u_long uihash; /* size of hash table - 1 */ /* * Other process lists @@ -168,11 +168,11 @@ extern int cs_debug; #if CONFIG_COREDUMP /* Name to give to core files */ #if defined(XNU_TARGET_OS_BRIDGE) -__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/internal/%N.core"}; +__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/internal/%N.core"}; #elif CONFIG_EMBEDDED -__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/private/var/cores/%N.core"}; +__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/private/var/cores/%N.core"}; #else -__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN+1] = {"/cores/core.%P"}; +__XNU_PRIVATE_EXTERN char corefilename[MAXPATHLEN + 1] = {"/cores/core.%P"}; #endif #endif @@ -207,12 +207,13 @@ int fixjob_callback(proc_t, void *); uint64_t get_current_unique_pid(void) { - proc_t p = current_proc(); + proc_t p = current_proc(); - if (p) + if (p) { return p->p_uniqueid; - else + } else { return 0; + } } /* @@ -248,9 +249,11 @@ chgproccnt(uid_t uid, int diff) again: proc_list_lock(); uipp = UIHASH(uid); - for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) - if (uip->ui_uid == uid) + for (uip = uipp->lh_first; uip != 0; uip = uip->ui_hash.le_next) { + if (uip->ui_uid == uid) { break; + } + } if (uip) { uip->ui_proccnt += diff; if (uip->ui_proccnt > 0) { @@ -258,8 +261,9 @@ again: proc_list_unlock(); goto out; } - if (uip->ui_proccnt < 0) + if (uip->ui_proccnt < 0) { panic("chgproccnt: procs < 0"); + } LIST_REMOVE(uip, ui_hash); retval = 0; proc_list_unlock(); @@ -286,13 +290,15 @@ again: } proc_list_unlock(); MALLOC_ZONE(newuip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK); - if (newuip == NULL) + if (newuip == NULL) { panic("chgproccnt: M_PROC zone depleted"); + } goto again; out: - if (newuip != NULL) + if (newuip != NULL) { FREE_ZONE(newuip, sizeof(*uip), M_PROC); - return(retval); + } + return retval; } /* @@ -304,13 +310,15 @@ inferior(proc_t p) int retval = 0; proc_list_lock(); - for (; p != current_proc(); p = p->p_pptr) - if (p->p_pid == 0) + for (; p != current_proc(); p = p->p_pptr) { + if (p->p_pid == 0) { goto out; + } + } retval = 1; out: proc_list_unlock(); - return(retval); + return retval; } /* @@ -324,21 +332,23 @@ isinferior(proc_t p, proc_t t) proc_t start = p; /* if p==t they are not inferior */ - if (p == t) - return(0); + if (p == t) { + return 0; + } proc_list_lock(); for (; p != t; p = p->p_pptr) { nchecked++; /* Detect here if we're in a cycle */ - if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) + if ((p->p_pid == 0) || (p->p_pptr == start) || (nchecked >= nprocs)) { goto out; + } } retval = 1; out: proc_list_unlock(); - return(retval); + return retval; } int @@ -347,22 +357,25 @@ proc_isinferior(int pid1, int pid2) proc_t p = PROC_NULL; proc_t t = PROC_NULL; int retval = 0; - - if (((p = proc_find(pid1)) != (proc_t)0 ) && ((t = proc_find(pid2)) != (proc_t)0)) + + if (((p = proc_find(pid1)) != (proc_t)0) && ((t = proc_find(pid2)) != (proc_t)0)) { retval = isinferior(p, t); + } - if (p != PROC_NULL) + if (p != PROC_NULL) { proc_rele(p); - if (t != PROC_NULL) + } + if (t != PROC_NULL) { proc_rele(t); + } - return(retval); + return retval; } proc_t proc_find(int pid) { - return(proc_findinternal(pid, 0)); + return proc_findinternal(pid, 0); } proc_t @@ -375,14 +388,15 @@ proc_findinternal(int pid, int locked) } p = pfind_locked(pid); - if ((p == PROC_NULL) || (p != proc_ref_locked(p))) + if ((p == PROC_NULL) || (p != proc_ref_locked(p))) { p = PROC_NULL; + } if (locked == 0) { proc_list_unlock(); } - return(p); + return p; } proc_t @@ -393,17 +407,19 @@ proc_findthread(thread_t thread) proc_list_lock(); uth = get_bsdthread_info(thread); - if (uth && (uth->uu_flag & UT_VFORK)) + if (uth && (uth->uu_flag & UT_VFORK)) { p = uth->uu_proc; - else + } else { p = (proc_t)(get_bsdthreadtask_info(thread)); + } p = proc_ref_locked(p); proc_list_unlock(); - return(p); + return p; } void -uthread_reset_proc_refcount(void *uthread) { +uthread_reset_proc_refcount(void *uthread) +{ uthread_t uth; uth = (uthread_t) uthread; @@ -420,7 +436,8 @@ uthread_reset_proc_refcount(void *uthread) { #if PROC_REF_DEBUG int -uthread_get_proc_refcount(void *uthread) { +uthread_get_proc_refcount(void *uthread) +{ uthread_t uth; if (proc_ref_tracking_disabled) { @@ -434,7 +451,8 @@ uthread_get_proc_refcount(void *uthread) { #endif static void -record_procref(proc_t p __unused, int count) { +record_procref(proc_t p __unused, int count) +{ uthread_t uth; uth = current_uthread(); @@ -457,7 +475,8 @@ record_procref(proc_t p __unused, int count) { } static boolean_t -uthread_needs_to_wait_in_proc_refwait(void) { +uthread_needs_to_wait_in_proc_refwait(void) +{ uthread_t uth = current_uthread(); /* @@ -466,20 +485,21 @@ uthread_needs_to_wait_in_proc_refwait(void) { * proc refs to wait in proc_refwait causes * deadlocks and makes proc_find non-reentrant. */ - if (uth->uu_proc_refcount == 0) + if (uth->uu_proc_refcount == 0) { return TRUE; + } return FALSE; } -int +int proc_rele(proc_t p) { proc_list_lock(); proc_rele_locked(p); proc_list_unlock(); - return(0); + return 0; } proc_t @@ -490,10 +510,11 @@ proc_self(void) p = current_proc(); proc_list_lock(); - if (p != proc_ref_locked(p)) + if (p != proc_ref_locked(p)) { p = PROC_NULL; + } proc_list_unlock(); - return(p); + return p; } @@ -502,14 +523,15 @@ proc_ref_locked(proc_t p) { proc_t p1 = p; int pid = proc_pid(p); - + retry: /* * if process still in creation or proc got recycled * during msleep then return failure. */ - if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) - return (PROC_NULL); + if ((p == PROC_NULL) || (p1 != p) || ((p->p_listflag & P_LIST_INCREATE) != 0)) { + return PROC_NULL; + } /* * Do not return process marked for termination @@ -523,9 +545,9 @@ retry: ((p->p_listflag & P_LIST_EXITED) == 0) && ((p->p_listflag & P_LIST_DEAD) == 0) && (((p->p_listflag & (P_LIST_DRAIN | P_LIST_DRAINWAIT)) == 0) || - ((p->p_listflag & P_LIST_REFWAIT) != 0))) { + ((p->p_listflag & P_LIST_REFWAIT) != 0))) { if ((p->p_listflag & P_LIST_REFWAIT) != 0 && uthread_needs_to_wait_in_proc_refwait()) { - msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0) ; + msleep(&p->p_listflag, proc_list_mlock, 0, "proc_refwait", 0); /* * the proc might have been recycled since we dropped * the proc list lock, get the proc again. @@ -535,17 +557,16 @@ retry: } p->p_refcount++; record_procref(p, 1); - } - else + } else { p1 = PROC_NULL; + } - return(p1); + return p1; } void proc_rele_locked(proc_t p) { - if (p->p_refcount > 0) { p->p_refcount--; record_procref(p, -1); @@ -553,9 +574,9 @@ proc_rele_locked(proc_t p) p->p_listflag &= ~P_LIST_DRAINWAIT; wakeup(&p->p_refcount); } - } else + } else { panic("proc_rele_locked -ve ref\n"); - + } } proc_t @@ -565,16 +586,15 @@ proc_find_zombref(int pid) proc_list_lock(); - again: +again: p = pfind_locked(pid); /* should we bail? */ - if ((p == PROC_NULL) /* not found */ - || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */ - || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */ - + if ((p == PROC_NULL) /* not found */ + || ((p->p_listflag & P_LIST_INCREATE) != 0) /* not created yet */ + || ((p->p_listflag & P_LIST_EXITED) == 0)) { /* not started exit */ proc_list_unlock(); - return (PROC_NULL); + return PROC_NULL; } /* If someone else is controlling the (unreaped) zombie - wait */ @@ -586,14 +606,14 @@ proc_find_zombref(int pid) proc_list_unlock(); - return(p); + return p; } void proc_drop_zombref(proc_t p) { proc_list_lock(); - if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { + if ((p->p_listflag & P_LIST_WAITING) == P_LIST_WAITING) { p->p_listflag &= ~P_LIST_WAITING; wakeup(&p->p_stat); } @@ -630,7 +650,7 @@ proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait) /* Do not wait in ref drain for launchd exec */ while (p->p_refcount && !initexec) { p->p_listflag |= P_LIST_DRAINWAIT; - msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0) ; + msleep(&p->p_refcount, proc_list_mlock, 0, "proc_refdrain", 0); } p->p_listflag &= ~P_LIST_DRAIN; @@ -645,7 +665,7 @@ proc_refdrain_with_refwait(proc_t p, boolean_t get_ref_and_allow_wait) proc_list_unlock(); if (get_ref_and_allow_wait) { - return (p); + return p; } return NULL; } @@ -659,7 +679,7 @@ proc_refwake(proc_t p) proc_list_unlock(); } -proc_t +proc_t proc_parentholdref(proc_t p) { proc_t parent = PROC_NULL; @@ -674,7 +694,7 @@ loop: parent = PROC_NULL; goto out; } - + if ((pp->p_listflag & (P_LIST_CHILDDRSTART | P_LIST_CHILDDRAINED)) == P_LIST_CHILDDRSTART) { pp->p_listflag |= P_LIST_CHILDDRWAIT; msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0); @@ -691,16 +711,17 @@ loop: parent = pp; goto out; } - + out: proc_list_unlock(); - return(parent); + return parent; } -int +int proc_parentdropref(proc_t p, int listlocked) { - if (listlocked == 0) + if (listlocked == 0) { proc_list_lock(); + } if (p->p_parentref > 0) { p->p_parentref--; @@ -708,26 +729,29 @@ proc_parentdropref(proc_t p, int listlocked) p->p_listflag &= ~P_LIST_PARENTREFWAIT; wakeup(&p->p_parentref); } - } else + } else { panic("proc_parentdropref -ve ref\n"); - if (listlocked == 0) + } + if (listlocked == 0) { proc_list_unlock(); + } - return(0); + return 0; } void proc_childdrainstart(proc_t p) { #if __PROC_INTERNAL_DEBUG - if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) + if ((p->p_listflag & P_LIST_CHILDDRSTART) == P_LIST_CHILDDRSTART) { panic("proc_childdrainstart: childdrain already started\n"); + } #endif p->p_listflag |= P_LIST_CHILDDRSTART; /* wait for all that hold parentrefs to drop */ while (p->p_parentref > 0) { p->p_listflag |= P_LIST_PARENTREFWAIT; - msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0) ; + msleep(&p->p_parentref, proc_list_mlock, 0, "proc_childdrainstart", 0); } } @@ -736,12 +760,13 @@ void proc_childdrainend(proc_t p) { #if __PROC_INTERNAL_DEBUG - if (p->p_childrencnt > 0) + if (p->p_childrencnt > 0) { panic("exiting: children stil hanging around\n"); + } #endif p->p_listflag |= P_LIST_CHILDDRAINED; - if ((p->p_listflag & (P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT)) != 0) { - p->p_listflag &= ~(P_LIST_CHILDLKWAIT |P_LIST_CHILDDRWAIT); + if ((p->p_listflag & (P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT)) != 0) { + p->p_listflag &= ~(P_LIST_CHILDLKWAIT | P_LIST_CHILDDRWAIT); wakeup(&p->p_childrencnt); } } @@ -750,49 +775,55 @@ void proc_checkdeadrefs(__unused proc_t p) { #if __PROC_INTERNAL_DEBUG - if ((p->p_listflag & P_LIST_INHASH) != 0) + if ((p->p_listflag & P_LIST_INHASH) != 0) { panic("proc being freed and still in hash %p: %u\n", p, p->p_listflag); - if (p->p_childrencnt != 0) + } + if (p->p_childrencnt != 0) { panic("proc being freed and pending children cnt %p:%d\n", p, p->p_childrencnt); - if (p->p_refcount != 0) + } + if (p->p_refcount != 0) { panic("proc being freed and pending refcount %p:%d\n", p, p->p_refcount); - if (p->p_parentref != 0) + } + if (p->p_parentref != 0) { panic("proc being freed and pending parentrefs %p:%d\n", p, p->p_parentref); + } #endif } int proc_pid(proc_t p) { - if (p != NULL) - return (p->p_pid); + if (p != NULL) { + return p->p_pid; + } return -1; } int proc_ppid(proc_t p) { - if (p != NULL) - return (p->p_ppid); + if (p != NULL) { + return p->p_ppid; + } return -1; } int proc_selfpid(void) { - return (current_proc()->p_pid); + return current_proc()->p_pid; } int proc_selfppid(void) { - return (current_proc()->p_ppid); + return current_proc()->p_ppid; } int proc_selfcsflags(void) { - return (current_proc()->p_csflags); + return current_proc()->p_csflags; } #if CONFIG_DTRACE @@ -803,37 +834,37 @@ dtrace_current_proc_vforking(void) struct uthread *ut = get_bsdthread_info(th); if (ut && - ((ut->uu_flag & (UT_VFORK|UT_VFORKING)) == (UT_VFORK|UT_VFORKING))) { + ((ut->uu_flag & (UT_VFORK | UT_VFORKING)) == (UT_VFORK | UT_VFORKING))) { /* * Handle the narrow window where we're in the vfork syscall, * but we're not quite ready to claim (in particular, to DTrace) * that we're running as the child. */ - return (get_bsdtask_info(get_threadtask(th))); + return get_bsdtask_info(get_threadtask(th)); } - return (current_proc()); + return current_proc(); } int dtrace_proc_selfpid(void) { - return (dtrace_current_proc_vforking()->p_pid); + return dtrace_current_proc_vforking()->p_pid; } -int +int dtrace_proc_selfppid(void) { - return (dtrace_current_proc_vforking()->p_ppid); + return dtrace_current_proc_vforking()->p_ppid; } uid_t dtrace_proc_selfruid(void) { - return (dtrace_current_proc_vforking()->p_ruid); + return dtrace_current_proc_vforking()->p_ruid; } #endif /* CONFIG_DTRACE */ -proc_t +proc_t proc_parent(proc_t p) { proc_t parent; @@ -843,23 +874,24 @@ proc_parent(proc_t p) loop: pp = p->p_pptr; parent = proc_ref_locked(pp); - if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED)== 0)){ + if ((parent == PROC_NULL) && (pp != PROC_NULL) && (pp->p_stat != SZOMB) && ((pp->p_listflag & P_LIST_EXITED) != 0) && ((pp->p_listflag & P_LIST_CHILDDRAINED) == 0)) { pp->p_listflag |= P_LIST_CHILDLKWAIT; msleep(&pp->p_childrencnt, proc_list_mlock, 0, "proc_parent", 0); goto loop; } proc_list_unlock(); - return(parent); + return parent; } static boolean_t proc_parent_is_currentproc(proc_t p) { boolean_t ret = FALSE; - + proc_list_lock(); - if (p->p_pptr == current_proc()) + if (p->p_pptr == current_proc()) { ret = TRUE; + } proc_list_unlock(); return ret; @@ -880,13 +912,15 @@ void proc_name_kdp(task_t t, char * buf, int size) { proc_t p = get_bsdtask_info(t); - if (p == PROC_NULL) + if (p == PROC_NULL) { return; + } - if ((size_t)size > sizeof(p->p_comm)) + if ((size_t)size > sizeof(p->p_comm)) { strlcpy(buf, &p->p_name[0], MIN((int)sizeof(p->p_name), size)); - else + } else { strlcpy(buf, &p->p_comm[0], MIN((int)sizeof(p->p_comm), size)); + } } int @@ -916,15 +950,18 @@ proc_starttime_kdp(void *p, unaligned_u64 *tv_sec, unaligned_u64 *tv_usec, unali { proc_t pp = (proc_t)p; if (pp != PROC_NULL) { - if (tv_sec != NULL) + if (tv_sec != NULL) { *tv_sec = pp->p_start.tv_sec; - if (tv_usec != NULL) + } + if (tv_usec != NULL) { *tv_usec = pp->p_start.tv_usec; + } if (abstime != NULL) { - if (pp->p_stats != NULL) + if (pp->p_stats != NULL) { *abstime = pp->p_stats->ps_start; - else + } else { *abstime = 0; + } } } } @@ -938,9 +975,10 @@ proc_name_address(void *p) char * proc_best_name(proc_t p) { - if (p->p_name[0] != 0) - return (&p->p_name[0]); - return (&p->p_comm[0]); + if (p->p_name[0] != 0) { + return &p->p_name[0]; + } + return &p->p_comm[0]; } void @@ -948,7 +986,7 @@ proc_selfname(char * buf, int size) { proc_t p; - if ((p = current_proc())!= (proc_t)0) { + if ((p = current_proc()) != (proc_t)0) { strlcpy(buf, &p->p_comm[0], size); } } @@ -959,23 +997,23 @@ proc_signal(int pid, int signum) proc_t p; if ((p = proc_find(pid)) != PROC_NULL) { - psignal(p, signum); - proc_rele(p); - } + psignal(p, signum); + proc_rele(p); + } } int proc_issignal(int pid, sigset_t mask) { proc_t p; - int error=0; + int error = 0; if ((p = proc_find(pid)) != PROC_NULL) { error = proc_pendingsignals(p, mask); proc_rele(p); - } + } - return(error); + return error; } int @@ -983,10 +1021,10 @@ proc_noremotehang(proc_t p) { int retval = 0; - if (p) + if (p) { retval = p->p_flag & P_NOREMOTEHANG; - return(retval? 1: 0); - + } + return retval? 1: 0; } int @@ -994,9 +1032,10 @@ proc_exiting(proc_t p) { int retval = 0; - if (p) + if (p) { retval = p->p_lflag & P_LEXIT; - return(retval? 1: 0); + } + return retval? 1: 0; } int @@ -1004,10 +1043,10 @@ proc_in_teardown(proc_t p) { int retval = 0; - if (p) + if (p) { retval = p->p_lflag & P_LPEXIT; - return(retval? 1: 0); - + } + return retval? 1: 0; } int @@ -1015,10 +1054,10 @@ proc_forcequota(proc_t p) { int retval = 0; - if (p) + if (p) { retval = p->p_flag & P_FORCEQUOTA; - return(retval? 1: 0); - + } + return retval? 1: 0; } int @@ -1030,7 +1069,7 @@ proc_suser(proc_t p) my_cred = kauth_cred_proc_ref(p); error = suser(my_cred, &p->p_acflag); kauth_cred_unref(&my_cred); - return(error); + return error; } task_t @@ -1039,7 +1078,7 @@ proc_task(proc_t proc) return (task_t)proc->task; } -/* +/* * Obtain the first thread in a process * * XXX This is a bad thing to do; it exists predominantly to support the @@ -1048,20 +1087,21 @@ proc_task(proc_t proc) * XXX needs an audit of the context (proxy vs. not) to clean up. */ thread_t -proc_thread(proc_t proc) -{ - uthread_t uth = TAILQ_FIRST(&proc->p_uthlist); +proc_thread(proc_t proc) +{ + uthread_t uth = TAILQ_FIRST(&proc->p_uthlist); - if (uth != NULL) - return(uth->uu_context.vc_thread); + if (uth != NULL) { + return uth->uu_context.vc_thread; + } - return(NULL); -} + return NULL; +} kauth_cred_t proc_ucred(proc_t p) { - return(p->p_ucred); + return p->p_ucred; } struct uthread * @@ -1069,14 +1109,14 @@ current_uthread() { thread_t th = current_thread(); - return((struct uthread *)get_bsdthread_info(th)); + return (struct uthread *)get_bsdthread_info(th); } int proc_is64bit(proc_t p) { - return(IS_64BIT_PROCESS(p)); + return IS_64BIT_PROCESS(p); } int @@ -1089,7 +1129,7 @@ proc_is64bit_data(proc_t p) int proc_pidversion(proc_t p) { - return(p->p_idversion); + return p->p_idversion; } uint32_t @@ -1101,25 +1141,25 @@ proc_persona_id(proc_t p) uint32_t proc_getuid(proc_t p) { - return(p->p_uid); + return p->p_uid; } uint32_t proc_getgid(proc_t p) { - return(p->p_gid); + return p->p_gid; } uint64_t proc_uniqueid(proc_t p) { - return(p->p_uniqueid); + return p->p_uniqueid; } uint64_t proc_puniqueid(proc_t p) { - return(p->p_puniqueid); + return p->p_puniqueid; } void @@ -1128,7 +1168,7 @@ proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES]) #if CONFIG_COALITIONS task_coalition_ids(p->task, ids); #else - memset(ids, 0, sizeof(uint64_t [COALITION_NUM_TYPES])); + memset(ids, 0, sizeof(uint64_t[COALITION_NUM_TYPES])); #endif return; } @@ -1136,13 +1176,13 @@ proc_coalitionids(__unused proc_t p, __unused uint64_t ids[COALITION_NUM_TYPES]) uint64_t proc_was_throttled(proc_t p) { - return (p->was_throttled); + return p->was_throttled; } uint64_t proc_did_throttle(proc_t p) { - return (p->did_throttle); + return p->did_throttle; } int @@ -1165,7 +1205,7 @@ proc_getexecutablevnode(proc_t p) { vnode_t tvp = p->p_textvp; - if ( tvp != NULLVP) { + if (tvp != NULLVP) { if (vnode_getwithref(tvp) == 0) { return tvp; } @@ -1178,22 +1218,23 @@ proc_getexecutablevnode(proc_t p) void bsd_set_dependency_capable(task_t task) { - proc_t p = get_bsdtask_info(task); + proc_t p = get_bsdtask_info(task); - if (p) { - OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag); - } + if (p) { + OSBitOrAtomic(P_DEPENDENCY_CAPABLE, &p->p_flag); + } } -#ifndef __arm__ +#ifndef __arm__ int IS_64BIT_PROCESS(proc_t p) { - if (p && (p->p_flag & P_LP64)) - return(1); - else - return(0); + if (p && (p->p_flag & P_LP64)) { + return 1; + } else { + return 0; + } } #endif @@ -1208,21 +1249,23 @@ pfind_locked(pid_t pid) proc_t q; #endif - if (!pid) - return (kernproc); + if (!pid) { + return kernproc; + } for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next) { if (p->p_pid == pid) { #if DEBUG for (q = p->p_hash.le_next; q != 0; q = q->p_hash.le_next) { - if ((p !=q) && (q->p_pid == pid)) + if ((p != q) && (q->p_pid == pid)) { panic("two procs with same pid %p:%p:%d:%d\n", p, q, p->p_pid, q->p_pid); + } } #endif - return (p); + return p; } } - return (NULL); + return NULL; } /* @@ -1236,13 +1279,15 @@ pzfind(pid_t pid) proc_list_lock(); - for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) - if (p->p_pid == pid) + for (p = zombproc.lh_first; p != 0; p = p->p_list.le_next) { + if (p->p_pid == pid) { break; + } + } proc_list_unlock(); - return (p); + return p; } /* @@ -1256,12 +1301,13 @@ pgfind(pid_t pgid) proc_list_lock(); pgrp = pgfind_internal(pgid); - if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) + if ((pgrp == NULL) || ((pgrp->pg_listflags & PGRP_FLAG_TERMINATE) != 0)) { pgrp = PGRP_NULL; - else + } else { pgrp->pg_refcount++; + } proc_list_unlock(); - return(pgrp); + return pgrp; } @@ -1271,17 +1317,20 @@ pgfind_internal(pid_t pgid) { struct pgrp *pgrp; - for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) - if (pgrp->pg_id == pgid) - return (pgrp); - return (NULL); + for (pgrp = PGRPHASH(pgid)->lh_first; pgrp != 0; pgrp = pgrp->pg_hash.le_next) { + if (pgrp->pg_id == pgid) { + return pgrp; + } + } + return NULL; } void pg_rele(struct pgrp * pgrp) { - if(pgrp == PGRP_NULL) + if (pgrp == PGRP_NULL) { return; + } pg_rele_dropref(pgrp); } @@ -1304,10 +1353,12 @@ session_find_internal(pid_t sessid) { struct session *sess; - for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) - if (sess->s_sid == sessid) - return (sess); - return (NULL); + for (sess = SESSHASH(sessid)->lh_first; sess != 0; sess = sess->s_hash.le_next) { + if (sess->s_sid == sessid) { + return sess; + } + } + return NULL; } @@ -1344,11 +1395,11 @@ pinsertchild(proc_t parent, proc_t child) pg_rele(pg); proc_list_lock(); - + #if CONFIG_MEMORYSTATUS memorystatus_add(child, TRUE); #endif - + parent->p_childrencnt++; LIST_INSERT_HEAD(&parent->p_children, child, p_sibling); @@ -1377,10 +1428,12 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) procsp = proc_session(p); #if DIAGNOSTIC - if (pgrp != NULL && mksess) /* firewalls */ + if (pgrp != NULL && mksess) { /* firewalls */ panic("enterpgrp: setsid into non-empty pgrp"); - if (SESS_LEADER(p, procsp)) + } + if (SESS_LEADER(p, procsp)) { panic("enterpgrp: session leader attempted setpgrp"); + } #endif if (pgrp == PGRP_NULL) { pid_t savepid = p->p_pid; @@ -1389,22 +1442,27 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) * new process group */ #if DIAGNOSTIC - if (p->p_pid != pgid) + if (p->p_pid != pgid) { panic("enterpgrp: new pgrp and pid != pgid"); + } #endif MALLOC_ZONE(pgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP, M_WAITOK); - if (pgrp == NULL) + if (pgrp == NULL) { panic("enterpgrp: M_PGRP zone depleted"); + } if ((np = proc_find(savepid)) == NULL || np != p) { - if (np != PROC_NULL) + if (np != PROC_NULL) { proc_rele(np); - if (mypgrp != PGRP_NULL) + } + if (mypgrp != PGRP_NULL) { pg_rele(mypgrp); - if (procsp != SESSION_NULL) + } + if (procsp != SESSION_NULL) { session_rele(procsp); + } FREE_ZONE(pgrp, sizeof(struct pgrp), M_PGRP); - return (ESRCH); + return ESRCH; } proc_rele(np); if (mksess) { @@ -1414,9 +1472,10 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) * new session */ MALLOC_ZONE(sess, struct session *, - sizeof(struct session), M_SESSION, M_WAITOK); - if (sess == NULL) + sizeof(struct session), M_SESSION, M_WAITOK); + if (sess == NULL) { panic("enterpgrp: M_SESSION zone depleted"); + } sess->s_leader = p; sess->s_sid = p->p_pid; sess->s_count = 1; @@ -1425,11 +1484,9 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) sess->s_flags = 0; sess->s_listflags = 0; sess->s_ttypgrpid = NO_PID; -#if CONFIG_FINE_LOCK_GROUPS + lck_mtx_init(&sess->s_mlock, proc_mlock_grp, proc_lck_attr); -#else - lck_mtx_init(&sess->s_mlock, proc_lck_grp, proc_lck_attr); -#endif + bcopy(procsp->s_login, sess->s_login, sizeof(sess->s_login)); OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag); @@ -1438,24 +1495,24 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) proc_list_unlock(); pgrp->pg_session = sess; #if DIAGNOSTIC - if (p != current_proc()) + if (p != current_proc()) { panic("enterpgrp: mksession and p != curproc"); + } #endif } else { proc_list_lock(); pgrp->pg_session = procsp; - - if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) - panic("enterpgrp: providing ref to terminating session "); + + if ((pgrp->pg_session->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) { + panic("enterpgrp: providing ref to terminating session "); + } pgrp->pg_session->s_count++; proc_list_unlock(); } pgrp->pg_id = pgid; -#if CONFIG_FINE_LOCK_GROUPS + lck_mtx_init(&pgrp->pg_mlock, proc_mlock_grp, proc_lck_attr); -#else - lck_mtx_init(&pgrp->pg_mlock, proc_lck_grp, proc_lck_attr); -#endif + LIST_INIT(&pgrp->pg_members); pgrp->pg_membercnt = 0; pgrp->pg_jobc = 0; @@ -1466,15 +1523,18 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) proc_list_unlock(); } else if (pgrp == mypgrp) { pg_rele(pgrp); - if (mypgrp != NULL) + if (mypgrp != NULL) { pg_rele(mypgrp); - if (procsp != SESSION_NULL) + } + if (procsp != SESSION_NULL) { session_rele(procsp); - return (0); + } + return 0; } - if (procsp != SESSION_NULL) + if (procsp != SESSION_NULL) { session_rele(procsp); + } /* * Adjust eligibility of affected pgrps to participate in job control. * Increment eligibility counts before decrementing, otherwise we @@ -1483,12 +1543,13 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) fixjobc(p, pgrp, 1); fixjobc(p, mypgrp, 0); - if(mypgrp != PGRP_NULL) + if (mypgrp != PGRP_NULL) { pg_rele(mypgrp); + } pgrp_replace(p, pgrp); pg_rele(pgrp); - return(0); + return 0; } /* @@ -1497,9 +1558,8 @@ enterpgrp(proc_t p, pid_t pgid, int mksess) int leavepgrp(proc_t p) { - pgrp_remove(p); - return (0); + return 0; } /* @@ -1527,7 +1587,7 @@ pgdelete_dropref(struct pgrp *pgrp) } pgrp->pg_listflags |= PGRP_FLAG_TERMINATE; - + if (pgrp->pg_refcount > 0) { proc_list_unlock(); return; @@ -1537,7 +1597,7 @@ pgdelete_dropref(struct pgrp *pgrp) LIST_REMOVE(pgrp, pg_hash); proc_list_unlock(); - + ttyp = SESSION_TP(pgrp->pg_session); if (ttyp != TTY_NULL) { if (ttyp->t_pgrp == pgrp) { @@ -1554,39 +1614,37 @@ pgdelete_dropref(struct pgrp *pgrp) proc_list_lock(); sessp = pgrp->pg_session; - if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) - panic("pg_deleteref: manipulating refs of already terminating session"); + if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) { + panic("pg_deleteref: manipulating refs of already terminating session"); + } if (--sessp->s_count == 0) { - if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) + if ((sessp->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) { panic("pg_deleteref: terminating already terminated session"); + } sessp->s_listflags |= S_LIST_TERM; ttyp = SESSION_TP(sessp); LIST_REMOVE(sessp, s_hash); proc_list_unlock(); if (ttyp != TTY_NULL) { tty_lock(ttyp); - if (ttyp->t_session == sessp) + if (ttyp->t_session == sessp) { ttyp->t_session = NULL; + } tty_unlock(ttyp); } proc_list_lock(); sessp->s_listflags |= S_LIST_DEAD; - if (sessp->s_count != 0) - panic("pg_deleteref: freeing session in use"); + if (sessp->s_count != 0) { + panic("pg_deleteref: freeing session in use"); + } proc_list_unlock(); -#if CONFIG_FINE_LOCK_GROUPS lck_mtx_destroy(&sessp->s_mlock, proc_mlock_grp); -#else - lck_mtx_destroy(&sessp->s_mlock, proc_lck_grp); -#endif + FREE_ZONE(sessp, sizeof(struct session), M_SESSION); - } else + } else { proc_list_unlock(); -#if CONFIG_FINE_LOCK_GROUPS + } lck_mtx_destroy(&pgrp->pg_mlock, proc_mlock_grp); -#else - lck_mtx_destroy(&pgrp->pg_mlock, proc_lck_grp); -#endif FREE_ZONE(pgrp, sizeof(*pgrp), M_PGRP); } @@ -1617,7 +1675,7 @@ fixjob_callback(proc_t p, void * arg) hispg = proc_pgrp(p); hissess = proc_session(p); - if ((hispg != pg) && + if ((hispg != pg) && (hissess == mysession)) { pgrp_lock(hispg); if (entering) { @@ -1626,15 +1684,18 @@ fixjob_callback(proc_t p, void * arg) } else if (--hispg->pg_jobc == 0) { pgrp_unlock(hispg); orphanpg(hispg); - } else + } else { pgrp_unlock(hispg); + } } - if (hissess != SESSION_NULL) + if (hissess != SESSION_NULL) { session_rele(hissess); - if (hispg != PGRP_NULL) + } + if (hispg != PGRP_NULL) { pg_rele(hispg); + } - return(PROC_RETURNED); + return PROC_RETURNED; } void @@ -1648,21 +1709,23 @@ fixjobc(proc_t p, struct pgrp *pgrp, int entering) boolean_t proc_parent_self; /* - * Check if p's parent is current proc, if yes then no need to take - * a ref; calling proc_parent with current proc as parent may + * Check if p's parent is current proc, if yes then no need to take + * a ref; calling proc_parent with current proc as parent may * deadlock if current proc is exiting. */ proc_parent_self = proc_parent_is_currentproc(p); - if (proc_parent_self) + if (proc_parent_self) { parent = current_proc(); - else + } else { parent = proc_parent(p); + } if (parent != PROC_NULL) { - hispgrp = proc_pgrp(parent); + hispgrp = proc_pgrp(parent); hissess = proc_session(parent); - if (!proc_parent_self) + if (!proc_parent_self) { proc_rele(parent); + } } @@ -1676,17 +1739,20 @@ fixjobc(proc_t p, struct pgrp *pgrp, int entering) if (entering) { pgrp->pg_jobc++; pgrp_unlock(pgrp); - }else if (--pgrp->pg_jobc == 0) { + } else if (--pgrp->pg_jobc == 0) { pgrp_unlock(pgrp); orphanpg(pgrp); - } else + } else { pgrp_unlock(pgrp); + } } - if (hissess != SESSION_NULL) + if (hissess != SESSION_NULL) { session_rele(hissess); - if (hispgrp != PGRP_NULL) + } + if (hispgrp != PGRP_NULL) { pg_rele(hispgrp); + } /* * Check this process' children to see whether they qualify @@ -1794,14 +1860,14 @@ out: int proc_is_classic(proc_t p __unused) { - return (0); + return 0; } /* XXX Why does this function exist? Need to kill it off... */ proc_t current_proc_EXTERNAL(void) { - return (current_proc()); + return current_proc(); } int @@ -1824,32 +1890,33 @@ proc_is_forcing_hfs_case_sensitivity(proc_t p) */ __private_extern__ int proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name, - size_t cf_name_len) + size_t cf_name_len) { const char *format, *appendstr; - char id_buf[11]; /* Buffer for pid/uid -- max 4B */ + char id_buf[11]; /* Buffer for pid/uid -- max 4B */ size_t i, l, n; - if (cf_name == NULL) + if (cf_name == NULL) { goto toolong; + } format = corefilename; for (i = 0, n = 0; n < cf_name_len && format[i]; i++) { switch (format[i]) { - case '%': /* Format character */ + case '%': /* Format character */ i++; switch (format[i]) { case '%': appendstr = "%"; break; - case 'N': /* process name */ + case 'N': /* process name */ appendstr = name; break; - case 'P': /* process id */ + case 'P': /* process id */ snprintf(id_buf, sizeof(id_buf), "%u", pid); appendstr = id_buf; break; - case 'U': /* user id */ + case 'U': /* user id */ snprintf(id_buf, sizeof(id_buf), "%u", uid); appendstr = id_buf; break; @@ -1857,13 +1924,14 @@ proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name, goto endofstring; default: appendstr = ""; - log(LOG_ERR, + log(LOG_ERR, "Unknown format character %c in `%s'\n", format[i], format); } l = strlen(appendstr); - if ((n + l) >= cf_name_len) + if ((n + l) >= cf_name_len) { goto toolong; + } bcopy(appendstr, cf_name + n, l); n += l; break; @@ -1871,36 +1939,38 @@ proc_core_name(const char *name, uid_t uid, pid_t pid, char *cf_name, cf_name[n++] = format[i]; } } - if (format[i] != '\0') + if (format[i] != '\0') { goto toolong; - return (0); + } + return 0; toolong: log(LOG_ERR, "pid %ld (%s), uid (%u): corename is too long\n", (long)pid, name, (uint32_t)uid); - return (1); + return 1; endofstring: log(LOG_ERR, "pid %ld (%s), uid (%u): unexpected end of string after %% token\n", (long)pid, name, (uint32_t)uid); - return (1); + return 1; } #endif /* CONFIG_COREDUMP */ /* Code Signing related routines */ -int +int csops(__unused proc_t p, struct csops_args *uap, __unused int32_t *retval) { - return(csops_internal(uap->pid, uap->ops, uap->useraddr, - uap->usersize, USER_ADDR_NULL)); + return csops_internal(uap->pid, uap->ops, uap->useraddr, + uap->usersize, USER_ADDR_NULL); } -int +int csops_audittoken(__unused proc_t p, struct csops_audittoken_args *uap, __unused int32_t *retval) { - if (uap->uaudittoken == USER_ADDR_NULL) - return(EINVAL); - return(csops_internal(uap->pid, uap->ops, uap->useraddr, - uap->usersize, uap->uaudittoken)); + if (uap->uaudittoken == USER_ADDR_NULL) { + return EINVAL; + } + return csops_internal(uap->pid, uap->ops, uap->useraddr, + uap->usersize, uap->uaudittoken); } static int @@ -1909,8 +1979,9 @@ csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uadd char fakeheader[8] = { 0 }; int error; - if (usize < sizeof(fakeheader)) + if (usize < sizeof(fakeheader)) { return ERANGE; + } /* if no blob, fill in zero header */ if (NULL == start) { @@ -1920,10 +1991,11 @@ csops_copy_token(void *start, size_t length, user_size_t usize, user_addr_t uadd /* ... if input too short, copy out length of entitlement */ uint32_t length32 = htonl((uint32_t)length); memcpy(&fakeheader[4], &length32, sizeof(length32)); - + error = copyout(fakeheader, uaddr, sizeof(fakeheader)); - if (error == 0) + if (error == 0) { return ERANGE; /* input buffer to short, ERANGE signals that */ + } return error; } return copyout(start, uaddr, length); @@ -1940,42 +2012,46 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user off_t toff; unsigned char cdhash[SHA1_RESULTLEN]; audit_token_t token; - unsigned int upid=0, uidversion = 0; - + unsigned int upid = 0, uidversion = 0; + forself = error = 0; - if (pid == 0) + if (pid == 0) { pid = proc_selfpid(); - if (pid == proc_selfpid()) + } + if (pid == proc_selfpid()) { forself = 1; + } switch (ops) { - case CS_OPS_STATUS: - case CS_OPS_CDHASH: - case CS_OPS_PIDOFFSET: - case CS_OPS_ENTITLEMENTS_BLOB: - case CS_OPS_IDENTITY: - case CS_OPS_BLOB: - case CS_OPS_TEAMID: - break; /* not restricted to root */ - default: - if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) - return(EPERM); - break; + case CS_OPS_STATUS: + case CS_OPS_CDHASH: + case CS_OPS_PIDOFFSET: + case CS_OPS_ENTITLEMENTS_BLOB: + case CS_OPS_IDENTITY: + case CS_OPS_BLOB: + case CS_OPS_TEAMID: + break; /* not restricted to root */ + default: + if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE) { + return EPERM; + } + break; } pt = proc_find(pid); - if (pt == PROC_NULL) - return(ESRCH); + if (pt == PROC_NULL) { + return ESRCH; + } upid = pt->p_pid; uidversion = pt->p_idversion; if (uaudittoken != USER_ADDR_NULL) { - error = copyin(uaudittoken, &token, sizeof(audit_token_t)); - if (error != 0) + if (error != 0) { goto out; + } /* verify the audit token pid/idversion matches with proc */ if ((token.val[5] != upid) || (token.val[7] != uidversion)) { error = ESRCH; @@ -1985,272 +2061,287 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user #if CONFIG_MACF switch (ops) { - case CS_OPS_MARKINVALID: - case CS_OPS_MARKHARD: - case CS_OPS_MARKKILL: - case CS_OPS_MARKRESTRICT: - case CS_OPS_SET_STATUS: - case CS_OPS_CLEARINSTALLER: - case CS_OPS_CLEARPLATFORM: - if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) - goto out; - break; - default: - if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) - goto out; + case CS_OPS_MARKINVALID: + case CS_OPS_MARKHARD: + case CS_OPS_MARKKILL: + case CS_OPS_MARKRESTRICT: + case CS_OPS_SET_STATUS: + case CS_OPS_CLEARINSTALLER: + case CS_OPS_CLEARPLATFORM: + if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops))) { + goto out; + } + break; + default: + if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops))) { + goto out; + } } #endif switch (ops) { + case CS_OPS_STATUS: { + uint32_t retflags; - case CS_OPS_STATUS: { - uint32_t retflags; - - proc_lock(pt); - retflags = pt->p_csflags; - if (cs_process_enforcement(pt)) - retflags |= CS_ENFORCEMENT; - if (csproc_get_platform_binary(pt)) - retflags |= CS_PLATFORM_BINARY; - if (csproc_get_platform_path(pt)) - retflags |= CS_PLATFORM_PATH; - //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV - if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) { - retflags &= (~CS_REQUIRE_LV); - } - proc_unlock(pt); - - if (uaddr != USER_ADDR_NULL) - error = copyout(&retflags, uaddr, sizeof(uint32_t)); - break; + proc_lock(pt); + retflags = pt->p_csflags; + if (cs_process_enforcement(pt)) { + retflags |= CS_ENFORCEMENT; } - case CS_OPS_MARKINVALID: - proc_lock(pt); - if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */ - pt->p_csflags &= ~CS_VALID; /* set invalid */ - if ((pt->p_csflags & CS_KILL) == CS_KILL) { - pt->p_csflags |= CS_KILLED; - proc_unlock(pt); - if (cs_debug) { - printf("CODE SIGNING: marked invalid by pid %d: " - "p=%d[%s] honoring CS_KILL, final status 0x%x\n", - proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags); - } - psignal(pt, SIGKILL); - } else - proc_unlock(pt); - } else - proc_unlock(pt); - - break; - - case CS_OPS_MARKHARD: - proc_lock(pt); - pt->p_csflags |= CS_HARD; - if ((pt->p_csflags & CS_VALID) == 0) { - /* @@@ allow? reject? kill? @@@ */ - proc_unlock(pt); - error = EINVAL; - goto out; - } else - proc_unlock(pt); - break; + if (csproc_get_platform_binary(pt)) { + retflags |= CS_PLATFORM_BINARY; + } + if (csproc_get_platform_path(pt)) { + retflags |= CS_PLATFORM_PATH; + } + //Don't return CS_REQUIRE_LV if we turned it on with CS_FORCED_LV but still report CS_FORCED_LV + if ((pt->p_csflags & CS_FORCED_LV) == CS_FORCED_LV) { + retflags &= (~CS_REQUIRE_LV); + } + proc_unlock(pt); - case CS_OPS_MARKKILL: - proc_lock(pt); - pt->p_csflags |= CS_KILL; - if ((pt->p_csflags & CS_VALID) == 0) { + if (uaddr != USER_ADDR_NULL) { + error = copyout(&retflags, uaddr, sizeof(uint32_t)); + } + break; + } + case CS_OPS_MARKINVALID: + proc_lock(pt); + if ((pt->p_csflags & CS_VALID) == CS_VALID) { /* is currently valid */ + pt->p_csflags &= ~CS_VALID; /* set invalid */ + if ((pt->p_csflags & CS_KILL) == CS_KILL) { + pt->p_csflags |= CS_KILLED; proc_unlock(pt); + if (cs_debug) { + printf("CODE SIGNING: marked invalid by pid %d: " + "p=%d[%s] honoring CS_KILL, final status 0x%x\n", + proc_selfpid(), pt->p_pid, pt->p_comm, pt->p_csflags); + } psignal(pt, SIGKILL); - } else + } else { proc_unlock(pt); - break; + } + } else { + proc_unlock(pt); + } - case CS_OPS_PIDOFFSET: - toff = pt->p_textoff; - proc_rele(pt); - error = copyout(&toff, uaddr, sizeof(toff)); - return(error); + break; - case CS_OPS_CDHASH: + case CS_OPS_MARKHARD: + proc_lock(pt); + pt->p_csflags |= CS_HARD; + if ((pt->p_csflags & CS_VALID) == 0) { + /* @@@ allow? reject? kill? @@@ */ + proc_unlock(pt); + error = EINVAL; + goto out; + } else { + proc_unlock(pt); + } + break; - /* pt already holds a reference on its p_textvp */ - tvp = pt->p_textvp; - toff = pt->p_textoff; + case CS_OPS_MARKKILL: + proc_lock(pt); + pt->p_csflags |= CS_KILL; + if ((pt->p_csflags & CS_VALID) == 0) { + proc_unlock(pt); + psignal(pt, SIGKILL); + } else { + proc_unlock(pt); + } + break; - if (tvp == NULLVP || usize != SHA1_RESULTLEN) { - proc_rele(pt); - return EINVAL; - } + case CS_OPS_PIDOFFSET: + toff = pt->p_textoff; + proc_rele(pt); + error = copyout(&toff, uaddr, sizeof(toff)); + return error; - error = vn_getcdhash(tvp, toff, cdhash); + case CS_OPS_CDHASH: + + /* pt already holds a reference on its p_textvp */ + tvp = pt->p_textvp; + toff = pt->p_textoff; + + if (tvp == NULLVP || usize != SHA1_RESULTLEN) { proc_rele(pt); + return EINVAL; + } - if (error == 0) { - error = copyout(cdhash, uaddr, sizeof (cdhash)); - } + error = vn_getcdhash(tvp, toff, cdhash); + proc_rele(pt); - return error; + if (error == 0) { + error = copyout(cdhash, uaddr, sizeof(cdhash)); + } - case CS_OPS_ENTITLEMENTS_BLOB: { - void *start; - size_t length; + return error; - proc_lock(pt); + case CS_OPS_ENTITLEMENTS_BLOB: { + void *start; + size_t length; - if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { - proc_unlock(pt); - error = EINVAL; - break; - } + proc_lock(pt); - error = cs_entitlements_blob_get(pt, &start, &length); + if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { proc_unlock(pt); - if (error) - break; - - error = csops_copy_token(start, length, usize, uaddr); + error = EINVAL; break; } - case CS_OPS_MARKRESTRICT: - proc_lock(pt); - pt->p_csflags |= CS_RESTRICT; - proc_unlock(pt); - break; - case CS_OPS_SET_STATUS: { - uint32_t flags; + error = cs_entitlements_blob_get(pt, &start, &length); + proc_unlock(pt); + if (error) { + break; + } - if (usize < sizeof(flags)) { - error = ERANGE; - break; - } + error = csops_copy_token(start, length, usize, uaddr); + break; + } + case CS_OPS_MARKRESTRICT: + proc_lock(pt); + pt->p_csflags |= CS_RESTRICT; + proc_unlock(pt); + break; - error = copyin(uaddr, &flags, sizeof(flags)); - if (error) - break; + case CS_OPS_SET_STATUS: { + uint32_t flags; - /* only allow setting a subset of all code sign flags */ - flags &= - CS_HARD | CS_EXEC_SET_HARD | - CS_KILL | CS_EXEC_SET_KILL | - CS_RESTRICT | - CS_REQUIRE_LV | - CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT; - - proc_lock(pt); - if (pt->p_csflags & CS_VALID) - pt->p_csflags |= flags; - else - error = EINVAL; - proc_unlock(pt); + if (usize < sizeof(flags)) { + error = ERANGE; + break; + } + error = copyin(uaddr, &flags, sizeof(flags)); + if (error) { break; } - case CS_OPS_BLOB: { - void *start; - size_t length; - proc_lock(pt); - if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { - proc_unlock(pt); - error = EINVAL; - break; - } + /* only allow setting a subset of all code sign flags */ + flags &= + CS_HARD | CS_EXEC_SET_HARD | + CS_KILL | CS_EXEC_SET_KILL | + CS_RESTRICT | + CS_REQUIRE_LV | + CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT; + + proc_lock(pt); + if (pt->p_csflags & CS_VALID) { + pt->p_csflags |= flags; + } else { + error = EINVAL; + } + proc_unlock(pt); + + break; + } + case CS_OPS_BLOB: { + void *start; + size_t length; - error = cs_blob_get(pt, &start, &length); + proc_lock(pt); + if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { proc_unlock(pt); - if (error) - break; + error = EINVAL; + break; + } - error = csops_copy_token(start, length, usize, uaddr); + error = cs_blob_get(pt, &start, &length); + proc_unlock(pt); + if (error) { break; } - case CS_OPS_IDENTITY: - case CS_OPS_TEAMID: { - const char *identity; - uint8_t fakeheader[8]; - uint32_t idlen; - size_t length; - /* - * Make identity have a blob header to make it - * easier on userland to guess the identity - * length. - */ - if (usize < sizeof(fakeheader)) { - error = ERANGE; - break; - } - memset(fakeheader, 0, sizeof(fakeheader)); + error = csops_copy_token(start, length, usize, uaddr); + break; + } + case CS_OPS_IDENTITY: + case CS_OPS_TEAMID: { + const char *identity; + uint8_t fakeheader[8]; + uint32_t idlen; + size_t length; - proc_lock(pt); - if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { - proc_unlock(pt); - error = EINVAL; - break; - } + /* + * Make identity have a blob header to make it + * easier on userland to guess the identity + * length. + */ + if (usize < sizeof(fakeheader)) { + error = ERANGE; + break; + } + memset(fakeheader, 0, sizeof(fakeheader)); - identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt); + proc_lock(pt); + if ((pt->p_csflags & (CS_VALID | CS_DEBUGGED)) == 0) { proc_unlock(pt); - if (identity == NULL) { - error = ENOENT; - break; - } - - length = strlen(identity) + 1; /* include NUL */ - idlen = htonl(length + sizeof(fakeheader)); - memcpy(&fakeheader[4], &idlen, sizeof(idlen)); + error = EINVAL; + break; + } - error = copyout(fakeheader, uaddr, sizeof(fakeheader)); - if (error) - break; + identity = ops == CS_OPS_TEAMID ? csproc_get_teamid(pt) : cs_identity_get(pt); + proc_unlock(pt); + if (identity == NULL) { + error = ENOENT; + break; + } - if (usize < sizeof(fakeheader) + length) - error = ERANGE; - else if (usize > sizeof(fakeheader)) - error = copyout(identity, uaddr + sizeof(fakeheader), length); + length = strlen(identity) + 1; /* include NUL */ + idlen = htonl(length + sizeof(fakeheader)); + memcpy(&fakeheader[4], &idlen, sizeof(idlen)); + error = copyout(fakeheader, uaddr, sizeof(fakeheader)); + if (error) { break; } - case CS_OPS_CLEARINSTALLER: - proc_lock(pt); - pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP); - proc_unlock(pt); - break; + if (usize < sizeof(fakeheader) + length) { + error = ERANGE; + } else if (usize > sizeof(fakeheader)) { + error = copyout(identity, uaddr + sizeof(fakeheader), length); + } - case CS_OPS_CLEARPLATFORM: + break; + } + + case CS_OPS_CLEARINSTALLER: + proc_lock(pt); + pt->p_csflags &= ~(CS_INSTALLER | CS_DATAVAULT_CONTROLLER | CS_EXEC_INHERIT_SIP); + proc_unlock(pt); + break; + + case CS_OPS_CLEARPLATFORM: #if DEVELOPMENT || DEBUG - if (cs_process_global_enforcement()) { - error = ENOTSUP; - break; - } + if (cs_process_global_enforcement()) { + error = ENOTSUP; + break; + } #if CONFIG_CSR - if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) { - error = ENOTSUP; - break; - } + if (csr_check(CSR_ALLOW_APPLE_INTERNAL) != 0) { + error = ENOTSUP; + break; + } #endif - proc_lock(pt); - pt->p_csflags &= ~(CS_PLATFORM_BINARY|CS_PLATFORM_PATH); - csproc_clear_platform_binary(pt); - proc_unlock(pt); - break; + proc_lock(pt); + pt->p_csflags &= ~(CS_PLATFORM_BINARY | CS_PLATFORM_PATH); + csproc_clear_platform_binary(pt); + proc_unlock(pt); + break; #else - error = ENOTSUP; - break; + error = ENOTSUP; + break; #endif /* !DEVELOPMENT || DEBUG */ - default: - error = EINVAL; - break; + default: + error = EINVAL; + break; } out: proc_rele(pt); - return(error); + return error; } int @@ -2314,8 +2405,7 @@ proc_iterate( } if ((pid_count < pid_count_available) && - (flags & PROC_ZOMBPROCLIST)) - { + (flags & PROC_ZOMBPROCLIST)) { proc_t p; ZOMBPROC_FOREACH(p) { if ((filterfn != NULL) && (filterfn(p, filterarg) == 0)) { @@ -2344,19 +2434,19 @@ proc_iterate( switch (callout_ret) { case PROC_RETURNED_DONE: proc_rele(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED_DONE: goto out; case PROC_RETURNED: proc_rele(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED: break; default: panic("proc_iterate: callout returned %d for pid %d", - callout_ret, pid_list[i]); + callout_ret, pid_list[i]); break; } } else if (flags & PROC_ZOMBPROCLIST) { @@ -2369,19 +2459,19 @@ proc_iterate( switch (callout_ret) { case PROC_RETURNED_DONE: proc_drop_zombref(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED_DONE: goto out; case PROC_RETURNED: proc_drop_zombref(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED: break; default: panic("proc_iterate: callout returned %d for zombie pid %d", - callout_ret, pid_list[i]); + callout_ret, pid_list[i]); break; } } @@ -2390,7 +2480,6 @@ proc_iterate( out: kfree(pid_list, pid_list_size); return 0; - } void @@ -2416,7 +2505,7 @@ restart_foreach: } p = proc_ref_locked(p); if (!p) { - continue; + continue; } proc_list_unlock(); @@ -2496,18 +2585,18 @@ proc_childrenwalk( switch (callout_ret) { case PROC_RETURNED_DONE: proc_rele(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED_DONE: goto out; case PROC_RETURNED: proc_rele(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED: break; default: panic("proc_childrenwalk: callout returned %d for pid %d", - callout_ret, pid_list[i]); + callout_ret, pid_list[i]); break; } } @@ -2581,7 +2670,7 @@ pgrp_iterate( pg_rele(pgrp); } - for (int i = 0; i< pid_count; i++) { + for (int i = 0; i < pid_count; i++) { /* do not handle kernproc */ if (pid_list[i] == 0) { continue; @@ -2600,19 +2689,19 @@ pgrp_iterate( switch (callout_ret) { case PROC_RETURNED: proc_rele(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED: break; case PROC_RETURNED_DONE: proc_rele(p); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case PROC_CLAIMED_DONE: goto out; default: panic("pgrp_iterate: callout returned %d for pid %d", - callout_ret, pid_list[i]); + callout_ret, pid_list[i]); } } @@ -2629,31 +2718,32 @@ pgrp_add(struct pgrp * pgrp, struct proc * parent, struct proc * child) child->p_pgrpid = pgrp->pg_id; child->p_listflag |= P_LIST_INPGRP; /* - * When pgrp is being freed , a process can still - * request addition using setpgid from bash when - * login is terminated (login cycler) return ESRCH - * Safe to hold lock due to refcount on pgrp + * When pgrp is being freed , a process can still + * request addition using setpgid from bash when + * login is terminated (login cycler) return ESRCH + * Safe to hold lock due to refcount on pgrp */ if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) { - pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE; + pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE; } - if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) + if ((pgrp->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) { panic("pgrp_add : pgrp is dead adding process"); + } proc_list_unlock(); pgrp_lock(pgrp); pgrp->pg_membercnt++; - if ( parent != PROC_NULL) { + if (parent != PROC_NULL) { LIST_INSERT_AFTER(parent, child, p_pglist); - }else { + } else { LIST_INSERT_HEAD(&pgrp->pg_members, child, p_pglist); } pgrp_unlock(pgrp); proc_list_lock(); if (((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (pgrp->pg_membercnt != 0)) { - pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE; + pgrp->pg_listflags &= ~PGRP_FLAG_TERMINATE; } proc_list_unlock(); } @@ -2667,21 +2757,24 @@ pgrp_remove(struct proc * p) proc_list_lock(); #if __PROC_INTERNAL_DEBUG - if ((p->p_listflag & P_LIST_INPGRP) == 0) + if ((p->p_listflag & P_LIST_INPGRP) == 0) { panic("removing from pglist but no named ref\n"); + } #endif p->p_pgrpid = PGRPID_DEAD; p->p_listflag &= ~P_LIST_INPGRP; p->p_pgrp = NULL; proc_list_unlock(); - if (pg == PGRP_NULL) + if (pg == PGRP_NULL) { panic("pgrp_remove: pg is NULL"); + } pgrp_lock(pg); pg->pg_membercnt--; - if (pg->pg_membercnt < 0) - panic("pgprp: -ve membercnt pgprp:%p p:%p\n",pg, p); + if (pg->pg_membercnt < 0) { + panic("pgprp: -ve membercnt pgprp:%p p:%p\n", pg, p); + } LIST_REMOVE(p, p_pglist); if (pg->pg_members.lh_first == 0) { @@ -2698,11 +2791,11 @@ pgrp_remove(struct proc * p) static void pgrp_replace(struct proc * p, struct pgrp * newpg) { - struct pgrp * oldpg; + struct pgrp * oldpg; - proc_list_lock(); + proc_list_lock(); while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) { p->p_listflag |= P_LIST_PGRPTRWAIT; @@ -2712,67 +2805,70 @@ pgrp_replace(struct proc * p, struct pgrp * newpg) p->p_listflag |= P_LIST_PGRPTRANS; oldpg = p->p_pgrp; - if (oldpg == PGRP_NULL) + if (oldpg == PGRP_NULL) { panic("pgrp_replace: oldpg NULL"); + } oldpg->pg_refcount++; #if __PROC_INTERNAL_DEBUG - if ((p->p_listflag & P_LIST_INPGRP) == 0) - panic("removing from pglist but no named ref\n"); + if ((p->p_listflag & P_LIST_INPGRP) == 0) { + panic("removing from pglist but no named ref\n"); + } #endif - p->p_pgrpid = PGRPID_DEAD; - p->p_listflag &= ~P_LIST_INPGRP; - p->p_pgrp = NULL; - - proc_list_unlock(); - - pgrp_lock(oldpg); - oldpg->pg_membercnt--; - if (oldpg->pg_membercnt < 0) - panic("pgprp: -ve membercnt pgprp:%p p:%p\n",oldpg, p); - LIST_REMOVE(p, p_pglist); - if (oldpg->pg_members.lh_first == 0) { - pgrp_unlock(oldpg); - pgdelete_dropref(oldpg); - } else { - pgrp_unlock(oldpg); - pg_rele(oldpg); - } - - proc_list_lock(); - p->p_pgrp = newpg; - p->p_pgrpid = newpg->pg_id; - p->p_listflag |= P_LIST_INPGRP; - /* - * When pgrp is being freed , a process can still - * request addition using setpgid from bash when - * login is terminated (login cycler) return ESRCH - * Safe to hold lock due to refcount on pgrp - */ - if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) { - newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE; - } - - if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) - panic("pgrp_add : pgrp is dead adding process"); - proc_list_unlock(); - - pgrp_lock(newpg); - newpg->pg_membercnt++; + p->p_pgrpid = PGRPID_DEAD; + p->p_listflag &= ~P_LIST_INPGRP; + p->p_pgrp = NULL; + + proc_list_unlock(); + + pgrp_lock(oldpg); + oldpg->pg_membercnt--; + if (oldpg->pg_membercnt < 0) { + panic("pgprp: -ve membercnt pgprp:%p p:%p\n", oldpg, p); + } + LIST_REMOVE(p, p_pglist); + if (oldpg->pg_members.lh_first == 0) { + pgrp_unlock(oldpg); + pgdelete_dropref(oldpg); + } else { + pgrp_unlock(oldpg); + pg_rele(oldpg); + } + + proc_list_lock(); + p->p_pgrp = newpg; + p->p_pgrpid = newpg->pg_id; + p->p_listflag |= P_LIST_INPGRP; + /* + * When pgrp is being freed , a process can still + * request addition using setpgid from bash when + * login is terminated (login cycler) return ESRCH + * Safe to hold lock due to refcount on pgrp + */ + if ((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) { + newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE; + } + + if ((newpg->pg_listflags & PGRP_FLAG_DEAD) == PGRP_FLAG_DEAD) { + panic("pgrp_add : pgrp is dead adding process"); + } + proc_list_unlock(); + + pgrp_lock(newpg); + newpg->pg_membercnt++; LIST_INSERT_HEAD(&newpg->pg_members, p, p_pglist); - pgrp_unlock(newpg); + pgrp_unlock(newpg); - proc_list_lock(); - if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) { - newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE; - } + proc_list_lock(); + if (((newpg->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) == PGRP_FLAG_TERMINATE) && (newpg->pg_membercnt != 0)) { + newpg->pg_listflags &= ~PGRP_FLAG_TERMINATE; + } p->p_listflag &= ~P_LIST_PGRPTRANS; if ((p->p_listflag & P_LIST_PGRPTRWAIT) == P_LIST_PGRPTRWAIT) { p->p_listflag &= ~P_LIST_PGRPTRWAIT; wakeup(&p->p_pgrpid); - } - proc_list_unlock(); + proc_list_unlock(); } void @@ -2805,28 +2901,30 @@ proc_pgrp(proc_t p) { struct pgrp * pgrp; - if (p == PROC_NULL) - return(PGRP_NULL); + if (p == PROC_NULL) { + return PGRP_NULL; + } proc_list_lock(); while ((p->p_listflag & P_LIST_PGRPTRANS) == P_LIST_PGRPTRANS) { p->p_listflag |= P_LIST_PGRPTRWAIT; (void)msleep(&p->p_pgrpid, proc_list_mlock, 0, "proc_pgrp", 0); } - + pgrp = p->p_pgrp; assert(pgrp != NULL); if (pgrp != PGRP_NULL) { pgrp->pg_refcount++; - if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) + if ((pgrp->pg_listflags & (PGRP_FLAG_TERMINATE | PGRP_FLAG_DEAD)) != 0) { panic("proc_pgrp: ref being povided for dead pgrp"); + } } - + proc_list_unlock(); - - return(pgrp); + + return pgrp; } struct pgrp * @@ -2838,22 +2936,24 @@ tty_pgrp(struct tty * tp) pg = tp->t_pgrp; if (pg != PGRP_NULL) { - if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) + if ((pg->pg_listflags & PGRP_FLAG_DEAD) != 0) { panic("tty_pgrp: ref being povided for dead pgrp"); + } pg->pg_refcount++; } proc_list_unlock(); - return(pg); + return pg; } struct session * proc_session(proc_t p) { struct session * sess = SESSION_NULL; - - if (p == PROC_NULL) - return(SESSION_NULL); + + if (p == PROC_NULL) { + return SESSION_NULL; + } proc_list_lock(); @@ -2864,12 +2964,13 @@ proc_session(proc_t p) } if ((p->p_pgrp != PGRP_NULL) && ((sess = p->p_pgrp->pg_session) != SESSION_NULL)) { - if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) + if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) { panic("proc_session:returning sesssion ref on terminating session"); + } sess->s_count++; } proc_list_unlock(); - return(sess); + return sess; } void @@ -2877,33 +2978,34 @@ session_rele(struct session *sess) { proc_list_lock(); if (--sess->s_count == 0) { - if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) + if ((sess->s_listflags & (S_LIST_TERM | S_LIST_DEAD)) != 0) { panic("session_rele: terminating already terminated session"); + } sess->s_listflags |= S_LIST_TERM; LIST_REMOVE(sess, s_hash); sess->s_listflags |= S_LIST_DEAD; - if (sess->s_count != 0) - panic("session_rele: freeing session in use"); + if (sess->s_count != 0) { + panic("session_rele: freeing session in use"); + } proc_list_unlock(); -#if CONFIG_FINE_LOCK_GROUPS lck_mtx_destroy(&sess->s_mlock, proc_mlock_grp); -#else - lck_mtx_destroy(&sess->s_mlock, proc_lck_grp); -#endif FREE_ZONE(sess, sizeof(struct session), M_SESSION); - } else + } else { proc_list_unlock(); + } } int proc_transstart(proc_t p, int locked, int non_blocking) { - if (locked == 0) + if (locked == 0) { proc_lock(p); + } while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) { if (((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT) || non_blocking) { - if (locked == 0) + if (locked == 0) { proc_unlock(p); + } return EDEADLK; } p->p_lflag |= P_LTRANSWAIT; @@ -2911,62 +3013,70 @@ proc_transstart(proc_t p, int locked, int non_blocking) } p->p_lflag |= P_LINTRANSIT; p->p_transholder = current_thread(); - if (locked == 0) + if (locked == 0) { proc_unlock(p); + } return 0; } void proc_transcommit(proc_t p, int locked) { - if (locked == 0) + if (locked == 0) { proc_lock(p); + } - assert ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT); - assert (p->p_transholder == current_thread()); + assert((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT); + assert(p->p_transholder == current_thread()); p->p_lflag |= P_LTRANSCOMMIT; if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) { p->p_lflag &= ~P_LTRANSWAIT; wakeup(&p->p_lflag); } - if (locked == 0) + if (locked == 0) { proc_unlock(p); + } } void proc_transend(proc_t p, int locked) { - if (locked == 0) + if (locked == 0) { proc_lock(p); + } - p->p_lflag &= ~( P_LINTRANSIT | P_LTRANSCOMMIT); + p->p_lflag &= ~(P_LINTRANSIT | P_LTRANSCOMMIT); p->p_transholder = NULL; if ((p->p_lflag & P_LTRANSWAIT) == P_LTRANSWAIT) { p->p_lflag &= ~P_LTRANSWAIT; wakeup(&p->p_lflag); } - if (locked == 0) + if (locked == 0) { proc_unlock(p); + } } int proc_transwait(proc_t p, int locked) { - if (locked == 0) + if (locked == 0) { proc_lock(p); + } while ((p->p_lflag & P_LINTRANSIT) == P_LINTRANSIT) { if ((p->p_lflag & P_LTRANSCOMMIT) == P_LTRANSCOMMIT && current_proc() == p) { - if (locked == 0) + if (locked == 0) { proc_unlock(p); + } return EDEADLK; } p->p_lflag |= P_LTRANSWAIT; msleep(&p->p_lflag, &p->p_mlock, 0, "proc_signstart", NULL); } - if (locked == 0) + if (locked == 0) { proc_unlock(p); + } return 0; } @@ -3006,7 +3116,7 @@ proc_knote_drain(struct proc *p) proc_klist_unlock(); } -void +void proc_setregister(proc_t p) { proc_lock(p); @@ -3014,7 +3124,7 @@ proc_setregister(proc_t p) proc_unlock(p); } -void +void proc_resetregister(proc_t p) { proc_lock(p); @@ -3042,13 +3152,15 @@ proc_getpcontrol(int pid, int * pcontrolp) proc_t p; p = proc_find(pid); - if (p == PROC_NULL) - return(ESRCH); - if (pcontrolp != NULL) + if (p == PROC_NULL) { + return ESRCH; + } + if (pcontrolp != NULL) { *pcontrolp = p->p_pcaction; + } proc_rele(p); - return(0); + return 0; } int @@ -3061,41 +3173,41 @@ proc_dopcontrol(proc_t p) pcontrol = PROC_CONTROL_STATE(p); if (PROC_ACTION_STATE(p) == 0) { - switch(pcontrol) { - case P_PCTHROTTLE: - PROC_SETACTION_STATE(p); - proc_unlock(p); - printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm); - break; + switch (pcontrol) { + case P_PCTHROTTLE: + PROC_SETACTION_STATE(p); + proc_unlock(p); + printf("low swap: throttling pid %d (%s)\n", p->p_pid, p->p_comm); + break; - case P_PCSUSP: - PROC_SETACTION_STATE(p); - proc_unlock(p); - printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm); - task_suspend(p->task); - break; + case P_PCSUSP: + PROC_SETACTION_STATE(p); + proc_unlock(p); + printf("low swap: suspending pid %d (%s)\n", p->p_pid, p->p_comm); + task_suspend(p->task); + break; - case P_PCKILL: - PROC_SETACTION_STATE(p); - proc_unlock(p); - printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm); - psignal(p, SIGKILL); - break; + case P_PCKILL: + PROC_SETACTION_STATE(p); + proc_unlock(p); + printf("low swap: killing pid %d (%s)\n", p->p_pid, p->p_comm); + psignal(p, SIGKILL); + break; - default: - proc_unlock(p); + default: + proc_unlock(p); } - - } else + } else { proc_unlock(p); + } - return(PROC_RETURNED); + return PROC_RETURNED; } /* * Resume a throttled or suspended process. This is an internal interface that's only - * used by the user level code that presents the GUI when we run out of swap space and + * used by the user level code that presents the GUI when we run out of swap space and * hence is restricted to processes with superuser privileges. */ @@ -3108,68 +3220,69 @@ proc_resetpcontrol(int pid) proc_t self = current_proc(); /* if the process has been validated to handle resource control or root is valid one */ - if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) + if (((self->p_lflag & P_LVMRSRCOWNER) == 0) && (error = suser(kauth_cred_get(), 0))) { return error; + } p = proc_find(pid); - if (p == PROC_NULL) - return(ESRCH); - + if (p == PROC_NULL) { + return ESRCH; + } + proc_lock(p); pcontrol = PROC_CONTROL_STATE(p); - if(PROC_ACTION_STATE(p) !=0) { - switch(pcontrol) { - case P_PCTHROTTLE: - PROC_RESETACTION_STATE(p); - proc_unlock(p); - printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm); - break; + if (PROC_ACTION_STATE(p) != 0) { + switch (pcontrol) { + case P_PCTHROTTLE: + PROC_RESETACTION_STATE(p); + proc_unlock(p); + printf("low swap: unthrottling pid %d (%s)\n", p->p_pid, p->p_comm); + break; - case P_PCSUSP: - PROC_RESETACTION_STATE(p); - proc_unlock(p); - printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm); - task_resume(p->task); - break; + case P_PCSUSP: + PROC_RESETACTION_STATE(p); + proc_unlock(p); + printf("low swap: resuming pid %d (%s)\n", p->p_pid, p->p_comm); + task_resume(p->task); + break; - case P_PCKILL: - /* Huh? */ - PROC_SETACTION_STATE(p); - proc_unlock(p); - printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm); - break; + case P_PCKILL: + /* Huh? */ + PROC_SETACTION_STATE(p); + proc_unlock(p); + printf("low swap: attempt to unkill pid %d (%s) ignored\n", p->p_pid, p->p_comm); + break; - default: - proc_unlock(p); + default: + proc_unlock(p); } - - } else + } else { proc_unlock(p); + } proc_rele(p); - return(0); + return 0; } -struct no_paging_space -{ - uint64_t pcs_max_size; - uint64_t pcs_uniqueid; - int pcs_pid; - int pcs_proc_count; - uint64_t pcs_total_size; +struct no_paging_space { + uint64_t pcs_max_size; + uint64_t pcs_uniqueid; + int pcs_pid; + int pcs_proc_count; + uint64_t pcs_total_size; - uint64_t npcs_max_size; - uint64_t npcs_uniqueid; - int npcs_pid; - int npcs_proc_count; - uint64_t npcs_total_size; + uint64_t npcs_max_size; + uint64_t npcs_uniqueid; + int npcs_pid; + int npcs_proc_count; + uint64_t npcs_total_size; - int apcs_proc_count; - uint64_t apcs_total_size; + int apcs_proc_count; + uint64_t apcs_total_size; }; @@ -3177,7 +3290,7 @@ static int proc_pcontrol_filter(proc_t p, void *arg) { struct no_paging_space *nps; - uint64_t compressed; + uint64_t compressed; nps = (struct no_paging_space *)arg; @@ -3204,16 +3317,15 @@ proc_pcontrol_filter(proc_t p, void *arg) } nps->npcs_total_size += compressed; nps->npcs_proc_count++; - } - return (0); + return 0; } static int proc_pcontrol_null(__unused proc_t p, __unused void *arg) { - return(PROC_RETURNED); + return PROC_RETURNED; } @@ -3224,7 +3336,7 @@ proc_pcontrol_null(__unused proc_t p, __unused void *arg) * Since this eventually creates a memory deadlock situtation, we need to take action to free up * memory resources (both compressed and uncompressed) in order to prevent the system from hanging completely. * There are 2 categories of processes to deal with. Those that have an action - * associated with them by the task itself and those that do not. Actionable + * associated with them by the task itself and those that do not. Actionable * tasks can have one of three categories specified: ones that * can be killed immediately, ones that should be suspended, and ones that should * be throttled. Processes that do not have an action associated with them are normally @@ -3232,39 +3344,40 @@ proc_pcontrol_null(__unused proc_t p, __unused void *arg) * that only by killing them can we hope to put the system back into a usable state. */ -#define NO_PAGING_SPACE_DEBUG 0 +#define NO_PAGING_SPACE_DEBUG 0 -extern uint64_t vm_compressor_pages_compressed(void); +extern uint64_t vm_compressor_pages_compressed(void); -struct timeval last_no_space_action = {0, 0}; +struct timeval last_no_space_action = {0, 0}; #if DEVELOPMENT || DEBUG extern boolean_t kill_on_no_paging_space; #endif /* DEVELOPMENT || DEBUG */ -#define MB_SIZE (1024 * 1024ULL) -boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); +#define MB_SIZE (1024 * 1024ULL) +boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); -extern int32_t max_kill_priority; -extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index); +extern int32_t max_kill_priority; +extern int memorystatus_get_proccnt_upto_priority(int32_t max_bucket_index); int no_paging_space_action() { - proc_t p; + proc_t p; struct no_paging_space nps; - struct timeval now; + struct timeval now; /* * Throttle how often we come through here. Once every 5 seconds should be plenty. */ microtime(&now); - if (now.tv_sec <= last_no_space_action.tv_sec + 5) - return (0); + if (now.tv_sec <= last_no_space_action.tv_sec + 5) { + return 0; + } /* - * Examine all processes and find the biggest (biggest is based on the number of pages this + * Examine all processes and find the biggest (biggest is based on the number of pages this * task has in the compressor pool) that has been marked to have some action * taken when swap space runs out... we also find the biggest that hasn't been marked for * action. @@ -3279,11 +3392,11 @@ no_paging_space_action() #if NO_PAGING_SPACE_DEBUG printf("low swap: npcs_proc_count = %d, npcs_total_size = %qd, npcs_max_size = %qd\n", - nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size); + nps.npcs_proc_count, nps.npcs_total_size, nps.npcs_max_size); printf("low swap: pcs_proc_count = %d, pcs_total_size = %qd, pcs_max_size = %qd\n", - nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size); + nps.pcs_proc_count, nps.pcs_total_size, nps.pcs_max_size); printf("low swap: apcs_proc_count = %d, apcs_total_size = %qd\n", - nps.apcs_proc_count, nps.apcs_total_size); + nps.apcs_proc_count, nps.apcs_total_size); #endif if (nps.npcs_max_size > (vm_compressor_pages_compressed() * 50) / 100) { /* @@ -3291,7 +3404,6 @@ no_paging_space_action() * held by the compressor */ if ((p = proc_find(nps.npcs_pid)) != PROC_NULL) { - if (nps.npcs_uniqueid == p->p_uniqueid) { /* * verify this is still the same process @@ -3300,14 +3412,14 @@ no_paging_space_action() */ last_no_space_action = now; - printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size/MB_SIZE)); + printf("low swap: killing largest compressed process with pid %d (%s) and size %llu MB\n", p->p_pid, p->p_comm, (nps.pcs_max_size / MB_SIZE)); psignal(p, SIGKILL); - + proc_rele(p); - return (0); + return 0; } - + proc_rele(p); } } @@ -3317,10 +3429,9 @@ no_paging_space_action() * So we will invoke the memorystatus thread to go ahead and kill something. */ if (memorystatus_get_proccnt_upto_priority(max_kill_priority) > 0) { - last_no_space_action = now; memorystatus_kill_on_VM_compressor_space_shortage(TRUE /* async */); - return (1); + return 1; } /* @@ -3330,7 +3441,6 @@ no_paging_space_action() if (nps.pcs_max_size > 0) { if ((p = proc_find(nps.pcs_pid)) != PROC_NULL) { - if (nps.pcs_uniqueid == p->p_uniqueid) { /* * verify this is still the same process @@ -3338,14 +3448,14 @@ no_paging_space_action() * we were finishing the proc_iterate and getting to this point */ last_no_space_action = now; - + proc_dopcontrol(p); - + proc_rele(p); - - return (1); + + return 1; } - + proc_rele(p); } } @@ -3353,11 +3463,11 @@ no_paging_space_action() printf("low swap: unable to find any eligible processes to take action on\n"); - return (0); + return 0; } -int -proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval) +int +proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int *retval) { int ret = 0; proc_t target_proc = PROC_NULL; @@ -3381,30 +3491,34 @@ proc_trace_log(__unused proc_t p, struct proc_trace_log_args *uap, __unused int ret = EINVAL; goto out; } - } else + } else { ret = ENOENT; + } out: - if (target_proc != PROC_NULL) + if (target_proc != PROC_NULL) { proc_rele(target_proc); - return (ret); + } + return ret; } #if VM_SCAN_FOR_SHADOW_CHAIN extern int vm_map_shadow_max(vm_map_t map); int proc_shadow_max(void); -int proc_shadow_max(void) +int +proc_shadow_max(void) { - int retval, max; - proc_t p; - task_t task; - vm_map_t map; + int retval, max; + proc_t p; + task_t task; + vm_map_t map; max = 0; proc_list_lock(); for (p = allproc.lh_first; (p != 0); p = p->p_list.le_next) { - if (p->p_stat == SIDL) + if (p->p_stat == SIDL) { continue; + } task = p->task; if (task == NULL) { continue; @@ -3424,7 +3538,8 @@ int proc_shadow_max(void) #endif /* VM_SCAN_FOR_SHADOW_CHAIN */ void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid); -void proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid) +void +proc_set_responsible_pid(proc_t target_proc, pid_t responsible_pid) { if (target_proc != NULL) { target_proc->p_responsible_pid = responsible_pid; @@ -3449,8 +3564,9 @@ proc_chrooted(proc_t p) boolean_t proc_send_synchronous_EXC_RESOURCE(proc_t p) { - if (p == PROC_NULL) + if (p == PROC_NULL) { return FALSE; + } /* Send sync EXC_RESOURCE if the process is traced */ if (ISSET(p->p_lflag, P_LTRACED)) { @@ -3480,9 +3596,9 @@ proc_log_32bit_telemetry(proc_t p) * garbled name. */ bytes_printed = snprintf(signature_cur_end, - signature_buf_end - signature_cur_end, - "%s,%s,", p->p_name, - (p->p_pptr ? p->p_pptr->p_name : "")); + signature_buf_end - signature_cur_end, + "%s,%s,", p->p_name, + (p->p_pptr ? p->p_pptr->p_name : "")); if (bytes_printed > 0) { signature_cur_end += bytes_printed; @@ -3511,8 +3627,8 @@ proc_log_32bit_telemetry(proc_t p) } bytes_printed = snprintf(signature_cur_end, - signature_buf_end - signature_cur_end, - "%s,%s", teamid, identity); + signature_buf_end - signature_cur_end, + "%s,%s", teamid, identity); if (bytes_printed > 0) { signature_cur_end += bytes_printed; @@ -3529,9 +3645,9 @@ proc_log_32bit_telemetry(proc_t p) /* Emit log */ kern_asl_msg(LOG_DEBUG, "messagetracer", 3, - /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec", - /* 1 */ "com.apple.message.signature", signature_buf, - /* 2 */ "com.apple.message.summarize", "YES", - NULL); + /* 0 */ "com.apple.message.domain", "com.apple.kernel.32bit_exec", + /* 1 */ "com.apple.message.signature", signature_buf, + /* 2 */ "com.apple.message.summarize", "YES", + NULL); } #endif /* CONFIG_32BIT_TELEMETRY */ diff --git a/bsd/kern/kern_prot.c b/bsd/kern/kern_prot.c index 36beb2737..7840d8a4b 100644 --- a/bsd/kern/kern_prot.c +++ b/bsd/kern/kern_prot.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * * * Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved - * + * * * Copyright (c) 1982, 1986, 1989, 1990, 1991, 1993 * The Regents of the University of California. All rights reserved. @@ -66,7 +66,7 @@ * SUCH DAMAGE. * * @(#)kern_prot.c 8.9 (Berkeley) 2/14/95 - * + * * * NOTICE: This file was modified by McAfee Research in 2004 to introduce * support for mandatory and extensible security protections. This notice @@ -109,7 +109,7 @@ #include #include -#include /* for current_task() */ +#include /* for current_task() */ #include @@ -124,13 +124,13 @@ * can be used if needed when debugging is active. */ #if DEBUG_CRED -#define DEBUG_CRED_ENTER printf -#define DEBUG_CRED_CHANGE printf +#define DEBUG_CRED_ENTER printf +#define DEBUG_CRED_CHANGE printf extern void kauth_cred_print(kauth_cred_t cred); -#else /* !DEBUG_CRED */ -#define DEBUG_CRED_ENTER(fmt, ...) do {} while (0) -#define DEBUG_CRED_CHANGE(fmt, ...) do {} while (0) -#endif /* !DEBUG_CRED */ +#else /* !DEBUG_CRED */ +#define DEBUG_CRED_ENTER(fmt, ...) do {} while (0) +#define DEBUG_CRED_CHANGE(fmt, ...) do {} while (0) +#endif /* !DEBUG_CRED */ #if DEVELOPMENT || DEBUG extern void task_importance_update_owner_info(task_t); @@ -155,7 +155,7 @@ setprivexec(proc_t p, struct setprivexec_args *uap, int32_t *retval) AUDIT_ARG(value32, uap->flag); *retval = p->p_debugger; p->p_debugger = (uap->flag != 0); - return(0); + return 0; } @@ -173,9 +173,8 @@ setprivexec(proc_t p, struct setprivexec_args *uap, int32_t *retval) int getpid(proc_t p, __unused struct getpid_args *uap, int32_t *retval) { - *retval = p->p_pid; - return (0); + return 0; } @@ -193,9 +192,8 @@ getpid(proc_t p, __unused struct getpid_args *uap, int32_t *retval) int getppid(proc_t p, __unused struct getppid_args *uap, int32_t *retval) { - *retval = p->p_ppid; - return (0); + return 0; } @@ -213,9 +211,8 @@ getppid(proc_t p, __unused struct getppid_args *uap, int32_t *retval) int getpgrp(proc_t p, __unused struct getpgrp_args *uap, int32_t *retval) { - *retval = p->p_pgrpid; - return (0); + return 0; } @@ -242,17 +239,20 @@ getpgid(proc_t p, struct getpgid_args *uap, int32_t *retval) int refheld = 0; pt = p; - if (uap->pid == 0) + if (uap->pid == 0) { goto found; + } - if ((pt = proc_find(uap->pid)) == 0) - return (ESRCH); + if ((pt = proc_find(uap->pid)) == 0) { + return ESRCH; + } refheld = 1; found: *retval = pt->p_pgrpid; - if (refheld != 0) + if (refheld != 0) { proc_rele(pt); - return (0); + } + return 0; } @@ -280,20 +280,23 @@ getsid(proc_t p, struct getsid_args *uap, int32_t *retval) struct session * sessp; pt = p; - if (uap->pid == 0) + if (uap->pid == 0) { goto found; + } - if ((pt = proc_find(uap->pid)) == 0) - return (ESRCH); + if ((pt = proc_find(uap->pid)) == 0) { + return ESRCH; + } refheld = 1; found: sessp = proc_session(pt); *retval = sessp->s_sid; session_rele(sessp); - if (refheld != 0) + if (refheld != 0) { proc_rele(pt); - return (0); + } + return 0; } @@ -309,9 +312,8 @@ found: int getuid(__unused proc_t p, __unused struct getuid_args *uap, int32_t *retval) { - - *retval = kauth_getruid(); - return (0); + *retval = kauth_getruid(); + return 0; } @@ -327,9 +329,8 @@ getuid(__unused proc_t p, __unused struct getuid_args *uap, int32_t *retval) int geteuid(__unused proc_t p, __unused struct geteuid_args *uap, int32_t *retval) { - - *retval = kauth_getuid(); - return (0); + *retval = kauth_getuid(); + return 0; } @@ -348,22 +349,25 @@ int gettid(__unused proc_t p, struct gettid_args *uap, int32_t *retval) { struct uthread *uthread = get_bsdthread_info(current_thread()); - int error; + int error; /* * If this thread is not running with an override identity, we can't * return one to the caller, so return an error instead. */ - if (!(uthread->uu_flag & UT_SETUID)) - return (ESRCH); + if (!(uthread->uu_flag & UT_SETUID)) { + return ESRCH; + } - if ((error = suword(uap->uidp, kauth_cred_getruid(uthread->uu_ucred)))) - return (error); - if ((error = suword(uap->gidp, kauth_cred_getrgid(uthread->uu_ucred)))) - return (error); + if ((error = suword(uap->uidp, kauth_cred_getruid(uthread->uu_ucred)))) { + return error; + } + if ((error = suword(uap->gidp, kauth_cred_getrgid(uthread->uu_ucred)))) { + return error; + } *retval = 0; - return (0); + return 0; } @@ -379,9 +383,8 @@ gettid(__unused proc_t p, struct gettid_args *uap, int32_t *retval) int getgid(__unused proc_t p, __unused struct getgid_args *uap, int32_t *retval) { - *retval = kauth_getrgid(); - return (0); + return 0; } @@ -403,9 +406,8 @@ getgid(__unused proc_t p, __unused struct getgid_args *uap, int32_t *retval) int getegid(__unused proc_t p, __unused struct getegid_args *uap, int32_t *retval) { - *retval = kauth_getgid(); - return (0); + return 0; } @@ -454,22 +456,22 @@ getgroups(__unused proc_t p, struct getgroups_args *uap, int32_t *retval) if ((ngrp = uap->gidsetsize) == 0) { *retval = pcred->cr_ngroups; kauth_cred_unref(&cred); - return (0); + return 0; } if (ngrp < pcred->cr_ngroups) { kauth_cred_unref(&cred); - return (EINVAL); + return EINVAL; } ngrp = pcred->cr_ngroups; if ((error = copyout((caddr_t)pcred->cr_groups, - uap->gidset, - ngrp * sizeof(gid_t)))) { + uap->gidset, + ngrp * sizeof(gid_t)))) { kauth_cred_unref(&cred); - return (error); + return error; } kauth_cred_unref(&cred); *retval = ngrp; - return (0); + return 0; } @@ -483,12 +485,12 @@ getgroups(__unused proc_t p, struct getgroups_args *uap, int32_t *retval) int getsgroups(__unused proc_t p, __unused struct getsgroups_args *uap, __unused int32_t *retval) { - return(ENOTSUP); + return ENOTSUP; } /* * Return the per-thread/per-process whiteout groups list. - * + * * XXX implement getwgroups * */ @@ -496,7 +498,7 @@ getsgroups(__unused proc_t p, __unused struct getsgroups_args *uap, __unused int int getwgroups(__unused proc_t p, __unused struct getwgroups_args *uap, __unused int32_t *retval) { - return(ENOTSUP); + return ENOTSUP; } @@ -530,14 +532,15 @@ setsid(proc_t p, __unused struct setsid_args *uap, int32_t *retval) struct pgrp * pg = PGRP_NULL; if (p->p_pgrpid == p->p_pid || (pg = pgfind(p->p_pid)) || p->p_lflag & P_LINVFORK) { - if (pg != PGRP_NULL) + if (pg != PGRP_NULL) { pg_rele(pg); - return (EPERM); + } + return EPERM; } else { /* enter pgrp works with its own pgrp refcount */ (void)enterpgrp(p, p->p_pid, 1); *retval = p->p_pid; - return (0); + return 0; } } @@ -575,15 +578,15 @@ setsid(proc_t p, __unused struct setsid_args *uap, int32_t *retval) * is used as the target process group ID. * * Legacy: This system call entry point is also used to implement the - * legacy library routine setpgrp(), which under POSIX + * legacy library routine setpgrp(), which under POSIX * * XXX: Belongs in kern_proc.c */ int setpgid(proc_t curp, struct setpgid_args *uap, __unused int32_t *retval) { - proc_t targp = PROC_NULL; /* target process */ - struct pgrp *pg = PGRP_NULL; /* target pgrp */ + proc_t targp = PROC_NULL; /* target process */ + struct pgrp *pg = PGRP_NULL; /* target pgrp */ int error = 0; int refheld = 0; int samesess = 0; @@ -594,8 +597,9 @@ setpgid(proc_t curp, struct setpgid_args *uap, __unused int32_t *retval) if (uap->pid != 0 && uap->pid != curp->p_pid) { if ((targp = proc_find(uap->pid)) == 0 || !inferior(targp)) { - if (targp != PROC_NULL) + if (targp != PROC_NULL) { refheld = 1; + } error = ESRCH; goto out; } @@ -627,14 +631,14 @@ setpgid(proc_t curp, struct setpgid_args *uap, __unused int32_t *retval) error = EINVAL; goto out; } - if (uap->pgid == 0) + if (uap->pgid == 0) { uap->pgid = targp->p_pid; - else if (uap->pgid != targp->p_pid) { - if ((pg = pgfind(uap->pgid)) == 0){ + } else if (uap->pgid != targp->p_pid) { + if ((pg = pgfind(uap->pgid)) == 0) { error = EPERM; goto out; } - samesess = (pg->pg_session != curp_sessp); + samesess = (pg->pg_session != curp_sessp); pg_rele(pg); if (samesess != 0) { error = EPERM; @@ -643,13 +647,16 @@ setpgid(proc_t curp, struct setpgid_args *uap, __unused int32_t *retval) } error = enterpgrp(targp, uap->pgid, 0); out: - if (targp_sessp != SESSION_NULL) + if (targp_sessp != SESSION_NULL) { session_rele(targp_sessp); - if (curp_sessp != SESSION_NULL) + } + if (curp_sessp != SESSION_NULL) { session_rele(curp_sessp); - if (refheld != 0) + } + if (refheld != 0) { proc_rele(targp); - return(error); + } + return error; } @@ -670,7 +677,7 @@ out: * execution. */ int -proc_issetugid (proc_t p) +proc_issetugid(proc_t p) { return (p->p_flag & P_SUGID) ? 1 : 0; } @@ -688,7 +695,7 @@ issetugid(proc_t p, __unused struct issetugid_args *uap, int32_t *retval) */ *retval = proc_issetugid(p); - return (0); + return 0; } @@ -733,11 +740,11 @@ setuid(proc_t p, struct setuid_args *uap, __unused int32_t *retval) AUDIT_ARG(uid, uid); for (;;) { - if (uid != my_pcred->cr_ruid && /* allow setuid(getuid()) */ - uid != my_pcred->cr_svuid && /* allow setuid(saved uid) */ + if (uid != my_pcred->cr_ruid && /* allow setuid(getuid()) */ + uid != my_pcred->cr_svuid && /* allow setuid(saved uid) */ (error = suser(my_cred, &p->p_acflag))) { kauth_cred_unref(&my_cred); - return (error); + return error; } /* @@ -760,19 +767,19 @@ setuid(proc_t p, struct setuid_args *uap, __unused int32_t *retval) * to something other than the default list for the user, as * in entering a group or leaving an exclusion group). */ - if (!(my_pcred->cr_flags & CRF_NOMEMBERD)) + if (!(my_pcred->cr_flags & CRF_NOMEMBERD)) { gmuid = uid; + } - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ my_new_cred = kauth_cred_setresuid(my_cred, ruid, uid, svuid, gmuid); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("setuid CH(%d): %p/0x%08x -> %p/0x%08x\n", p->p_pid, my_cred, my_pcred->cr_flags, my_new_cred, posix_cred_get(my_new_cred)->cr_flags); /* @@ -829,9 +836,9 @@ setuid(proc_t p, struct setuid_args *uap, __unused int32_t *retval) } /* Drop old proc reference or our extra reference */ kauth_cred_unref(&my_cred); - + set_security_token(p); - return (0); + return 0; } @@ -870,24 +877,22 @@ seteuid(proc_t p, struct seteuid_args *uap, __unused int32_t *retval) my_pcred = posix_cred_get(my_cred); for (;;) { - if (euid != my_pcred->cr_ruid && euid != my_pcred->cr_svuid && - (error = suser(my_cred, &p->p_acflag))) { + (error = suser(my_cred, &p->p_acflag))) { kauth_cred_unref(&my_cred); - return (error); + return error; } - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ my_new_cred = kauth_cred_setresuid(my_cred, KAUTH_UID_NONE, euid, KAUTH_UID_NONE, my_pcred->cr_gmuid); - - if (my_cred != my_new_cred) { + if (my_cred != my_new_cred) { DEBUG_CRED_CHANGE("seteuid CH(%d): %p/0x%08x -> %p/0x%08x\n", p->p_pid, my_cred, my_pcred->cr_flags, my_new_cred, posix_cred_get(my_new_cred)->cr_flags); proc_ucred_lock(p); @@ -917,7 +922,7 @@ seteuid(proc_t p, struct seteuid_args *uap, __unused int32_t *retval) kauth_cred_unref(&my_cred); set_security_token(p); - return (0); + return 0; } @@ -964,10 +969,12 @@ setreuid(proc_t p, struct setreuid_args *uap, __unused int32_t *retval) ruid = uap->ruid; euid = uap->euid; - if (ruid == (uid_t)-1) + if (ruid == (uid_t)-1) { ruid = KAUTH_UID_NONE; - if (euid == (uid_t)-1) + } + if (euid == (uid_t)-1) { euid = KAUTH_UID_NONE; + } AUDIT_ARG(euid, euid); AUDIT_ARG(ruid, ruid); @@ -975,31 +982,30 @@ setreuid(proc_t p, struct setreuid_args *uap, __unused int32_t *retval) my_pcred = posix_cred_get(my_cred); for (;;) { - - if (((ruid != KAUTH_UID_NONE && /* allow no change of ruid */ - ruid != my_pcred->cr_ruid && /* allow ruid = ruid */ - ruid != my_pcred->cr_uid && /* allow ruid = euid */ - ruid != my_pcred->cr_svuid) || /* allow ruid = svuid */ - (euid != KAUTH_UID_NONE && /* allow no change of euid */ - euid != my_pcred->cr_uid && /* allow euid = euid */ - euid != my_pcred->cr_ruid && /* allow euid = ruid */ - euid != my_pcred->cr_svuid)) && /* allow euid = svuid */ + if (((ruid != KAUTH_UID_NONE && /* allow no change of ruid */ + ruid != my_pcred->cr_ruid && /* allow ruid = ruid */ + ruid != my_pcred->cr_uid && /* allow ruid = euid */ + ruid != my_pcred->cr_svuid) || /* allow ruid = svuid */ + (euid != KAUTH_UID_NONE && /* allow no change of euid */ + euid != my_pcred->cr_uid && /* allow euid = euid */ + euid != my_pcred->cr_ruid && /* allow euid = ruid */ + euid != my_pcred->cr_svuid)) && /* allow euid = svuid */ (error = suser(my_cred, &p->p_acflag))) { /* allow root user any */ kauth_cred_unref(&my_cred); - return (error); + return error; } uid_t new_euid; uid_t svuid = KAUTH_UID_NONE; new_euid = my_pcred->cr_uid; - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ if (euid != KAUTH_UID_NONE && my_pcred->cr_uid != euid) { /* changing the effective UID */ new_euid = euid; @@ -1013,14 +1019,13 @@ setreuid(proc_t p, struct setreuid_args *uap, __unused int32_t *retval) */ if (my_pcred->cr_svuid != uap->ruid && my_pcred->cr_svuid != uap->euid) { - svuid = new_euid; + svuid = new_euid; OSBitOrAtomic(P_SUGID, &p->p_flag); } my_new_cred = kauth_cred_setresuid(my_cred, ruid, euid, svuid, my_pcred->cr_gmuid); - - if (my_cred != my_new_cred) { + if (my_cred != my_new_cred) { DEBUG_CRED_CHANGE("setreuid CH(%d): %p/0x%08x -> %p/0x%08x\n", p->p_pid, my_cred, my_pcred->cr_flags, my_new_cred, posix_cred_get(my_new_cred)->cr_flags); /* @@ -1080,7 +1085,7 @@ setreuid(proc_t p, struct setreuid_args *uap, __unused int32_t *retval) kauth_cred_unref(&my_cred); set_security_token(p); - return (0); + return 0; } @@ -1129,18 +1134,18 @@ setgid(proc_t p, struct setgid_args *uap, __unused int32_t *retval) my_pcred = posix_cred_get(my_cred); for (;;) { - if (gid != my_pcred->cr_rgid && /* allow setgid(getgid()) */ - gid != my_pcred->cr_svgid && /* allow setgid(saved gid) */ + if (gid != my_pcred->cr_rgid && /* allow setgid(getgid()) */ + gid != my_pcred->cr_svgid && /* allow setgid(saved gid) */ (error = suser(my_cred, &p->p_acflag))) { kauth_cred_unref(&my_cred); - return (error); + return error; } /* * If we are privileged, then set the saved and real GID too; * otherwise, just set the effective GID */ - if (suser(my_cred, &p->p_acflag) == 0) { + if (suser(my_cred, &p->p_acflag) == 0) { svgid = gid; rgid = gid; } else { @@ -1148,16 +1153,15 @@ setgid(proc_t p, struct setgid_args *uap, __unused int32_t *retval) rgid = KAUTH_GID_NONE; } - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ my_new_cred = kauth_cred_setresgid(my_cred, rgid, gid, svgid); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("setgid(CH)%d: %p/0x%08x->%p/0x%08x\n", p->p_pid, my_cred, my_cred->cr_flags, my_new_cred, my_new_cred->cr_flags); proc_ucred_lock(p); @@ -1185,9 +1189,9 @@ setgid(proc_t p, struct setgid_args *uap, __unused int32_t *retval) } /* Drop old proc reference or our extra reference */ kauth_cred_unref(&my_cred); - + set_security_token(p); - return (0); + return 0; } @@ -1237,18 +1241,17 @@ setegid(proc_t p, struct setegid_args *uap, __unused int32_t *retval) egid != my_pcred->cr_svgid && (error = suser(my_cred, &p->p_acflag))) { kauth_cred_unref(&my_cred); - return (error); + return error; } - /* + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ my_new_cred = kauth_cred_setresgid(my_cred, KAUTH_GID_NONE, egid, KAUTH_GID_NONE); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("setegid(CH)%d: %p/0x%08x->%p/0x%08x\n", p->p_pid, my_cred, my_pcred->cr_flags, my_new_cred, posix_cred_get(my_new_cred)->cr_flags); proc_ucred_lock(p); @@ -1279,7 +1282,7 @@ setegid(proc_t p, struct setegid_args *uap, __unused int32_t *retval) kauth_cred_unref(&my_cred); set_security_token(p); - return (0); + return 0; } /* @@ -1332,10 +1335,12 @@ setregid(proc_t p, struct setregid_args *uap, __unused int32_t *retval) rgid = uap->rgid; egid = uap->egid; - if (rgid == (uid_t)-1) + if (rgid == (uid_t)-1) { rgid = KAUTH_GID_NONE; - if (egid == (uid_t)-1) + } + if (egid == (uid_t)-1) { egid = KAUTH_GID_NONE; + } AUDIT_ARG(egid, egid); AUDIT_ARG(rgid, rgid); @@ -1344,33 +1349,32 @@ setregid(proc_t p, struct setregid_args *uap, __unused int32_t *retval) my_pcred = posix_cred_get(my_cred); for (;;) { - - if (((rgid != KAUTH_UID_NONE && /* allow no change of rgid */ - rgid != my_pcred->cr_rgid && /* allow rgid = rgid */ - rgid != my_pcred->cr_gid && /* allow rgid = egid */ - rgid != my_pcred->cr_svgid) || /* allow rgid = svgid */ - (egid != KAUTH_UID_NONE && /* allow no change of egid */ - egid != my_pcred->cr_groups[0] && /* allow no change of egid */ - egid != my_pcred->cr_gid && /* allow egid = egid */ - egid != my_pcred->cr_rgid && /* allow egid = rgid */ - egid != my_pcred->cr_svgid)) && /* allow egid = svgid */ + if (((rgid != KAUTH_UID_NONE && /* allow no change of rgid */ + rgid != my_pcred->cr_rgid && /* allow rgid = rgid */ + rgid != my_pcred->cr_gid && /* allow rgid = egid */ + rgid != my_pcred->cr_svgid) || /* allow rgid = svgid */ + (egid != KAUTH_UID_NONE && /* allow no change of egid */ + egid != my_pcred->cr_groups[0] && /* allow no change of egid */ + egid != my_pcred->cr_gid && /* allow egid = egid */ + egid != my_pcred->cr_rgid && /* allow egid = rgid */ + egid != my_pcred->cr_svgid)) && /* allow egid = svgid */ (error = suser(my_cred, &p->p_acflag))) { /* allow root user any */ kauth_cred_unref(&my_cred); - return (error); + return error; } uid_t new_egid = my_pcred->cr_gid; uid_t new_rgid = my_pcred->cr_rgid; uid_t svgid = KAUTH_UID_NONE; - - /* + + /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is * a change, we drop the reference on the credential we * passed in. The subsequent compare is safe, because it is * a pointer compare rather than a contents compare. - */ + */ if (egid != KAUTH_UID_NONE && my_pcred->cr_gid != egid) { /* changing the effective GID */ new_egid = egid; @@ -1389,13 +1393,12 @@ setregid(proc_t p, struct setregid_args *uap, __unused int32_t *retval) */ if (my_pcred->cr_svgid != uap->rgid && my_pcred->cr_svgid != uap->egid) { - svgid = new_egid; + svgid = new_egid; OSBitOrAtomic(P_SUGID, &p->p_flag); } my_new_cred = kauth_cred_setresgid(my_cred, rgid, egid, svgid); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("setregid(CH)%d: %p/0x%08x->%p/0x%08x\n", p->p_pid, my_cred, my_pcred->cr_flags, my_new_cred, posix_cred_get(my_new_cred)->cr_flags); proc_ucred_lock(p); @@ -1424,7 +1427,7 @@ setregid(proc_t p, struct setregid_args *uap, __unused int32_t *retval) kauth_cred_unref(&my_cred); set_security_token(p); - return (0); + return 0; } @@ -1449,14 +1452,15 @@ settid(proc_t p, struct settid_args *uap, __unused int32_t *retval) AUDIT_ARG(uid, uid); AUDIT_ARG(gid, gid); - if (proc_suser(p) != 0) - return (EPERM); - - if (uid == KAUTH_UID_NONE) { + if (proc_suser(p) != 0) { + return EPERM; + } + if (uid == KAUTH_UID_NONE) { /* must already be assuming another identity in order to revert back */ - if ((uthread->uu_flag & UT_SETUID) == 0) - return (EPERM); + if ((uthread->uu_flag & UT_SETUID) == 0) { + return EPERM; + } /* revert to delayed binding of process credential */ uc = kauth_cred_proc_ref(p); @@ -1468,7 +1472,7 @@ settid(proc_t p, struct settid_args *uap, __unused int32_t *retval) /* cannot already be assuming another identity */ if ((uthread->uu_flag & UT_SETUID) != 0) { - return (EPERM); + return EPERM; } /* @@ -1478,11 +1482,12 @@ settid(proc_t p, struct settid_args *uap, __unused int32_t *retval) * current credential while we muck with it, so we can do * the post-compare for changes by pointer. */ - kauth_cred_ref(uthread->uu_ucred); + kauth_cred_ref(uthread->uu_ucred); my_cred = uthread->uu_ucred; my_new_cred = kauth_cred_setuidgid(my_cred, uid, gid); - if (my_cred != my_new_cred) + if (my_cred != my_new_cred) { uthread->uu_ucred = my_new_cred; + } uthread->uu_flag |= UT_SETUID; /* Drop old uthread reference or our extra reference */ @@ -1494,7 +1499,7 @@ settid(proc_t p, struct settid_args *uap, __unused int32_t *retval) * XXX it is unclear whether P_SUGID should be st at this point; * XXX in theory, it is being deprecated. */ - return (0); + return 0; } @@ -1520,7 +1525,7 @@ settid_with_pid(proc_t p, struct settid_with_pid_args *uap, __unused int32_t *re AUDIT_ARG(value32, uap->assume); if (proc_suser(p) != 0) { - return (EPERM); + return EPERM; } /* @@ -1536,17 +1541,19 @@ settid_with_pid(proc_t p, struct settid_with_pid_args *uap, __unused int32_t *re */ if (uap->assume != 0) { /* can't do this if we have already assumed an identity */ - if ((uthread->uu_flag & UT_SETUID) != 0) - return (EPERM); - + if ((uthread->uu_flag & UT_SETUID) != 0) { + return EPERM; + } + target_proc = proc_find(uap->pid); /* can't assume the identity of the kernel process */ if (target_proc == NULL || target_proc == kernproc) { - if (target_proc!= NULL) + if (target_proc != NULL) { proc_rele(target_proc); - return (ESRCH); + } + return ESRCH; } - + /* * Take a reference on the credential used in our target * process then use it as the identity for our current @@ -1559,39 +1566,41 @@ settid_with_pid(proc_t p, struct settid_with_pid_args *uap, __unused int32_t *re * credential following our assumption of a per-thread one, * since the credential cache will maintain a unique instance. */ - kauth_cred_ref(uthread->uu_ucred); + kauth_cred_ref(uthread->uu_ucred); my_cred = uthread->uu_ucred; my_target_cred = kauth_cred_proc_ref(target_proc); my_target_pcred = posix_cred_get(my_target_cred); my_new_cred = kauth_cred_setuidgid(my_cred, my_target_pcred->cr_uid, my_target_pcred->cr_gid); - if (my_cred != my_new_cred) + if (my_cred != my_new_cred) { uthread->uu_ucred = my_new_cred; - + } + uthread->uu_flag |= UT_SETUID; - + /* Drop old uthread reference or our extra reference */ proc_rele(target_proc); kauth_cred_unref(&my_cred); kauth_cred_unref(&my_target_cred); - return (0); + return 0; } - + /* * Otherwise, we are reverting back to normal mode of operation where * delayed binding of the process credential sets the credential in * the thread (uu_ucred) */ - if ((uthread->uu_flag & UT_SETUID) == 0) - return (EPERM); + if ((uthread->uu_flag & UT_SETUID) == 0) { + return EPERM; + } /* revert to delayed binding of process credential */ my_new_cred = kauth_cred_proc_ref(p); kauth_cred_unref(&uthread->uu_ucred); uthread->uu_ucred = my_new_cred; uthread->uu_flag &= ~UT_SETUID; - - return (0); + + return 0; } @@ -1634,37 +1643,38 @@ static int setgroups1(proc_t p, u_int gidsetsize, user_addr_t gidset, uid_t gmuid, __unused int32_t *retval) { u_int ngrp; - gid_t newgroups[NGROUPS] = { 0 }; - int error; + gid_t newgroups[NGROUPS] = { 0 }; + int error; kauth_cred_t my_cred, my_new_cred; struct uthread *uthread = get_bsdthread_info(current_thread()); DEBUG_CRED_ENTER("setgroups1 (%d/%d): %d 0x%016x %d\n", p->p_pid, (p->p_pptr ? p->p_pptr->p_pid : 0), gidsetsize, gidset, gmuid); ngrp = gidsetsize; - if (ngrp > NGROUPS) - return (EINVAL); + if (ngrp > NGROUPS) { + return EINVAL; + } - if ( ngrp < 1 ) { + if (ngrp < 1) { ngrp = 1; } else { error = copyin(gidset, - (caddr_t)newgroups, ngrp * sizeof(gid_t)); + (caddr_t)newgroups, ngrp * sizeof(gid_t)); if (error) { - return (error); + return error; } } my_cred = kauth_cred_proc_ref(p); if ((error = suser(my_cred, &p->p_acflag))) { kauth_cred_unref(&my_cred); - return (error); + return error; } if ((uthread->uu_flag & UT_SETUID) != 0) { #if DEBUG_CRED int my_cred_flags = uthread->uu_ucred->cr_flags; -#endif /* DEBUG_CRED */ +#endif /* DEBUG_CRED */ kauth_cred_unref(&my_cred); /* @@ -1680,17 +1690,16 @@ setgroups1(proc_t p, u_int gidsetsize, user_addr_t gidset, uid_t gmuid, __unused uthread->uu_ucred = kauth_cred_setgroups(my_cred, &newgroups[0], ngrp, gmuid); #if DEBUG_CRED if (my_cred != uthread->uu_ucred) { - DEBUG_CRED_CHANGE("setgroups1(CH)%d: %p/0x%08x->%p/0x%08x\n", p->p_pid, my_cred, my_cred_flags, uthread->uu_ucred , uthread->uu_ucred ->cr_flags); + DEBUG_CRED_CHANGE("setgroups1(CH)%d: %p/0x%08x->%p/0x%08x\n", p->p_pid, my_cred, my_cred_flags, uthread->uu_ucred, uthread->uu_ucred->cr_flags); } -#endif /* DEBUG_CRED */ +#endif /* DEBUG_CRED */ } else { - /* * get current credential and take a reference while we muck * with it */ for (;;) { - /* + /* * Set the credential with new info. If there is no * change, we get back the same credential we passed * in; if there is a change, we drop the reference on @@ -1700,14 +1709,13 @@ setgroups1(proc_t p, u_int gidsetsize, user_addr_t gidset, uid_t gmuid, __unused */ my_new_cred = kauth_cred_setgroups(my_cred, &newgroups[0], ngrp, gmuid); if (my_cred != my_new_cred) { - DEBUG_CRED_CHANGE("setgroups1(CH)%d: %p/0x%08x->%p/0x%08x\n", p->p_pid, my_cred, my_cred->cr_flags, my_new_cred, my_new_cred->cr_flags); proc_ucred_lock(p); /* * We need to protect for a race where another * thread also changed the credential after we - * took our reference. If p_ucred has + * took our reference. If p_ucred has * changed then we should restart this again * with the new cred. */ @@ -1734,7 +1742,7 @@ setgroups1(proc_t p, u_int gidsetsize, user_addr_t gidset, uid_t gmuid, __unused set_security_token(p); } - return (0); + return 0; } @@ -1772,7 +1780,7 @@ initgroups(proc_t p, struct initgroups_args *uap, __unused int32_t *retval) { DEBUG_CRED_ENTER("initgroups\n"); - return(setgroups1(p, uap->gidsetsize, uap->gidset, uap->gmuid, retval)); + return setgroups1(p, uap->gidsetsize, uap->gidset, uap->gmuid, retval); } @@ -1806,13 +1814,13 @@ setgroups(proc_t p, struct setgroups_args *uap, __unused int32_t *retval) { DEBUG_CRED_ENTER("setgroups\n"); - return(setgroups1(p, uap->gidsetsize, uap->gidset, KAUTH_UID_NONE, retval)); + return setgroups1(p, uap->gidsetsize, uap->gidset, KAUTH_UID_NONE, retval); } /* * Set the per-thread/per-process supplementary groups list. - * + * * XXX implement setsgroups * */ @@ -1820,12 +1828,12 @@ setgroups(proc_t p, struct setgroups_args *uap, __unused int32_t *retval) int setsgroups(__unused proc_t p, __unused struct setsgroups_args *uap, __unused int32_t *retval) { - return(ENOTSUP); + return ENOTSUP; } /* * Set the per-thread/per-process whiteout groups list. - * + * * XXX implement setwgroups * */ @@ -1833,7 +1841,7 @@ setsgroups(__unused proc_t p, __unused struct setsgroups_args *uap, __unused int int setwgroups(__unused proc_t p, __unused struct setwgroups_args *uap, __unused int32_t *retval) { - return(ENOTSUP); + return ENOTSUP; } @@ -1848,9 +1856,10 @@ groupmember(gid_t gid, kauth_cred_t cred) { int is_member; - if (kauth_cred_ismember_gid(cred, gid, &is_member) == 0 && is_member) - return (1); - return (0); + if (kauth_cred_ismember_gid(cred, gid, &is_member) == 0 && is_member) { + return 1; + } + return 0; } @@ -1872,15 +1881,17 @@ int suser(kauth_cred_t cred, u_short *acflag) { #if DIAGNOSTIC - if (!IS_VALID_CRED(cred)) + if (!IS_VALID_CRED(cred)) { panic("suser"); + } #endif if (kauth_cred_getuid(cred) == 0) { - if (acflag) + if (acflag) { *acflag |= ASU; - return (0); + } + return 0; } - return (EPERM); + return EPERM; } @@ -1910,24 +1921,25 @@ suser(kauth_cred_t cred, u_short *acflag) int getlogin(proc_t p, struct getlogin_args *uap, __unused int32_t *retval) { - char buffer[MAXLOGNAME+1]; + char buffer[MAXLOGNAME + 1]; struct session * sessp; - bzero(buffer, MAXLOGNAME+1); + bzero(buffer, MAXLOGNAME + 1); sessp = proc_session(p); - if (uap->namelen > MAXLOGNAME) + if (uap->namelen > MAXLOGNAME) { uap->namelen = MAXLOGNAME; + } - if(sessp != SESSION_NULL) { + if (sessp != SESSION_NULL) { session_lock(sessp); bcopy( sessp->s_login, buffer, uap->namelen); session_unlock(sessp); } session_rele(sessp); - return (copyout((caddr_t)buffer, uap->namebuf, uap->namelen)); + return copyout((caddr_t)buffer, uap->namebuf, uap->namelen); } @@ -1951,14 +1963,15 @@ int setlogin(proc_t p, struct setlogin_args *uap, __unused int32_t *retval) { int error; - size_t dummy=0; - char buffer[MAXLOGNAME+1]; + size_t dummy = 0; + char buffer[MAXLOGNAME + 1]; struct session * sessp; - if ((error = proc_suser(p))) - return (error); + if ((error = proc_suser(p))) { + return error; + } - bzero(&buffer[0], MAXLOGNAME+1); + bzero(&buffer[0], MAXLOGNAME + 1); error = copyinstr(uap->namebuf, @@ -1977,9 +1990,10 @@ setlogin(proc_t p, struct setlogin_args *uap, __unused int32_t *retval) if (!error) { AUDIT_ARG(text, buffer); - } else if (error == ENAMETOOLONG) + } else if (error == ENAMETOOLONG) { error = EINVAL; - return (error); + } + return error; } @@ -2017,12 +2031,13 @@ set_security_token_task_internal(proc_t p, void *t) * undefined behavior anyway, right? */ if (task == current_task()) { - uthread_t uthread; + uthread_t uthread; uthread = (uthread_t)get_bsdthread_info(current_thread()); - if (uthread->uu_flag & UT_VFORK) - return (1); + if (uthread->uu_flag & UT_VFORK) { + return 1; + } } - + my_cred = kauth_cred_proc_ref(p); my_pcred = posix_cred_get(my_cred); @@ -2055,23 +2070,24 @@ set_security_token_task_internal(proc_t p, void *t) host_priv = (sec_token.val[0]) ? HOST_PRIV_NULL : host_priv_self(); #if CONFIG_MACF - if (host_priv != HOST_PRIV_NULL && mac_system_check_host_priv(my_cred)) + if (host_priv != HOST_PRIV_NULL && mac_system_check_host_priv(my_cred)) { host_priv = HOST_PRIV_NULL; + } #endif kauth_cred_unref(&my_cred); #if DEVELOPMENT || DEBUG - /* + /* * Update the pid an proc name for importance base if any */ task_importance_update_owner_info(task); #endif - return (host_security_set_task_token(host_security_self(), - task, - sec_token, - audit_token, - host_priv) != KERN_SUCCESS); + return host_security_set_task_token(host_security_self(), + task, + sec_token, + audit_token, + host_priv) != KERN_SUCCESS; } @@ -2081,8 +2097,9 @@ int get_audit_token_pid(audit_token_t *audit_token) { /* keep in-sync with set_security_token (above) */ - if (audit_token) + if (audit_token) { return (int)audit_token->val[5]; + } return -1; } diff --git a/bsd/kern/kern_resource.c b/bsd/kern/kern_resource.c index cf55f42d5..839a190b5 100644 --- a/bsd/kern/kern_resource.c +++ b/bsd/kern/kern_resource.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -99,9 +99,9 @@ #include #include -#include /* for absolutetime_to_microtime() */ -#include /* for TRAFFIC_MGT_SO_* */ -#include /* for struct socket */ +#include /* for absolutetime_to_microtime() */ +#include /* for TRAFFIC_MGT_SO_* */ +#include /* for struct socket */ #if NECP #include #endif /* NECP */ @@ -117,9 +117,9 @@ #include #endif -int donice(struct proc *curp, struct proc *chgp, int n); -int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp); -int uthread_get_background_state(uthread_t); +int donice(struct proc *curp, struct proc *chgp, int n); +int dosetrlimit(struct proc *p, u_int which, struct rlimit *limp); +int uthread_get_background_state(uthread_t); static void do_background_socket(struct proc *p, thread_t thread); static int do_background_thread(thread_t thread, int priority); static int do_background_proc(struct proc *curp, struct proc *targetp, int priority); @@ -138,8 +138,8 @@ void fill_task_monotonic_rusage(task_t task, rusage_info_current *ri); int proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie); -rlim_t maxdmap = MAXDSIZ; /* XXX */ -rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */ +rlim_t maxdmap = MAXDSIZ; /* XXX */ +rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */ /* * Limits on the number of open files per process, and the number @@ -147,21 +147,21 @@ rlim_t maxsmap = MAXSSIZ - PAGE_MAX_SIZE; /* XXX */ * * Note: would be in kern/subr_param.c in FreeBSD. */ -__private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */ +__private_extern__ int maxfilesperproc = OPEN_MAX; /* per-proc open files limit */ SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED, - &maxprocperuid, 0, "Maximum processes allowed per userid" ); + &maxprocperuid, 0, "Maximum processes allowed per userid" ); SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED, - &maxfilesperproc, 0, "Maximum files allowed open per process" ); + &maxfilesperproc, 0, "Maximum files allowed open per process" ); /* Args and fn for proc_iteration callback used in setpriority */ struct puser_nice_args { proc_t curp; - int prio; - id_t who; - int * foundp; - int * errorp; + int prio; + id_t who; + int * foundp; + int * errorp; }; static int puser_donice_callback(proc_t p, void * arg); @@ -169,9 +169,9 @@ static int puser_donice_callback(proc_t p, void * arg); /* Args and fn for proc_iteration callback used in setpriority */ struct ppgrp_nice_args { proc_t curp; - int prio; - int * foundp; - int * errorp; + int prio; + int * foundp; + int * errorp; }; static int ppgrp_donice_callback(proc_t p, void * arg); @@ -188,22 +188,22 @@ getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) int error = 0; /* would also test (uap->who < 0), but id_t is unsigned */ - if (uap->who > 0x7fffffff) - return (EINVAL); + if (uap->who > 0x7fffffff) { + return EINVAL; + } switch (uap->which) { - case PRIO_PROCESS: if (uap->who == 0) { p = curp; low = p->p_nice; } else { p = proc_find(uap->who); - if (p == 0) + if (p == 0) { break; + } low = p->p_nice; proc_rele(p); - } break; @@ -213,14 +213,15 @@ getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) if (uap->who == 0) { /* returns the pgrp to ref */ pg = proc_pgrp(curp); - } else if ((pg = pgfind(uap->who)) == PGRP_NULL) { + } else if ((pg = pgfind(uap->who)) == PGRP_NULL) { break; } /* No need for iteration as it is a simple scan */ pgrp_lock(pg); PGMEMBERS_FOREACH(pg, p) { - if (p->p_nice < low) + if (p->p_nice < low) { low = p->p_nice; + } } pgrp_unlock(pg); pg_rele(pg); @@ -228,16 +229,18 @@ getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) } case PRIO_USER: - if (uap->who == 0) + if (uap->who == 0) { uap->who = kauth_cred_getuid(kauth_cred_get()); + } proc_list_lock(); for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { my_cred = kauth_cred_proc_ref(p); if (kauth_cred_getuid(my_cred) == uap->who && - p->p_nice < low) + p->p_nice < low) { low = p->p_nice; + } kauth_cred_unref(&my_cred); } @@ -247,8 +250,9 @@ getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) case PRIO_DARWIN_THREAD: /* we currently only support the current thread */ - if (uap->who != 0) - return (EINVAL); + if (uap->who != 0) { + return EINVAL; + } low = proc_get_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG); @@ -259,17 +263,20 @@ getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) p = curp; } else { p = proc_find(uap->who); - if (p == PROC_NULL) + if (p == PROC_NULL) { break; + } refheld = 1; } error = get_background_proc(curp, p, &low); - if (refheld) + if (refheld) { proc_rele(p); - if (error) - return (error); + } + if (error) { + return error; + } break; case PRIO_DARWIN_ROLE: @@ -277,26 +284,30 @@ getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval) p = curp; } else { p = proc_find(uap->who); - if (p == PROC_NULL) + if (p == PROC_NULL) { break; + } refheld = 1; } error = proc_get_darwin_role(curp, p, &low); - if (refheld) + if (refheld) { proc_rele(p); - if (error) - return (error); + } + if (error) { + return error; + } break; default: - return (EINVAL); + return EINVAL; + } + if (low == PRIO_MAX + 1) { + return ESRCH; } - if (low == PRIO_MAX + 1) - return (ESRCH); *retval = low; - return (0); + return 0; } /* call back function used for proc iteration in PRIO_USER */ @@ -310,16 +321,17 @@ puser_donice_callback(proc_t p, void * arg) my_cred = kauth_cred_proc_ref(p); if (kauth_cred_getuid(my_cred) == pun->who) { error = donice(pun->curp, p, pun->prio); - if (pun->errorp != NULL) + if (pun->errorp != NULL) { *pun->errorp = error; + } if (pun->foundp != NULL) { n = *pun->foundp; - *pun->foundp = n+1; + *pun->foundp = n + 1; } } kauth_cred_unref(&my_cred); - return(PROC_RETURNED); + return PROC_RETURNED; } /* call back function used for proc iteration in PRIO_PGRP */ @@ -331,14 +343,15 @@ ppgrp_donice_callback(proc_t p, void * arg) int n; error = donice(pun->curp, p, pun->prio); - if (pun->errorp != NULL) + if (pun->errorp != NULL) { *pun->errorp = error; - if (pun->foundp!= NULL) { + } + if (pun->foundp != NULL) { n = *pun->foundp; - *pun->foundp = n+1; + *pun->foundp = n + 1; } - return(PROC_RETURNED); + return PROC_RETURNED; } /* @@ -361,40 +374,43 @@ setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) AUDIT_ARG(value32, uap->prio); /* would also test (uap->who < 0), but id_t is unsigned */ - if (uap->who > 0x7fffffff) - return (EINVAL); + if (uap->who > 0x7fffffff) { + return EINVAL; + } switch (uap->which) { - case PRIO_PROCESS: - if (uap->who == 0) + if (uap->who == 0) { p = curp; - else { + } else { p = proc_find(uap->who); - if (p == 0) + if (p == 0) { break; + } refheld = 1; } error = donice(curp, p, uap->prio); found++; - if (refheld != 0) + if (refheld != 0) { proc_rele(p); + } break; case PRIO_PGRP: { struct pgrp *pg = PGRP_NULL; struct ppgrp_nice_args ppgrp; - + if (uap->who == 0) { pg = proc_pgrp(curp); - } else if ((pg = pgfind(uap->who)) == PGRP_NULL) + } else if ((pg = pgfind(uap->who)) == PGRP_NULL) { break; + } ppgrp.curp = curp; ppgrp.prio = uap->prio; ppgrp.foundp = &found; ppgrp.errorp = &error; - + /* PGRP_DROPREF drops the reference on process group */ pgrp_iterate(pg, PGRP_DROPREF, ppgrp_donice_callback, (void *)&ppgrp, NULL, NULL); @@ -404,8 +420,9 @@ setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) case PRIO_USER: { struct puser_nice_args punice; - if (uap->who == 0) + if (uap->who == 0) { uap->who = kauth_cred_getuid(kauth_cred_get()); + } punice.curp = curp; punice.prio = uap->prio; @@ -420,8 +437,9 @@ setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) case PRIO_DARWIN_THREAD: { /* we currently only support the current thread */ - if (uap->who != 0) - return (EINVAL); + if (uap->who != 0) { + return EINVAL; + } error = do_background_thread(current_thread(), uap->prio); found++; @@ -429,30 +447,34 @@ setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) } case PRIO_DARWIN_PROCESS: { - if (uap->who == 0) + if (uap->who == 0) { p = curp; - else { + } else { p = proc_find(uap->who); - if (p == 0) + if (p == 0) { break; + } refheld = 1; } error = do_background_proc(curp, p, uap->prio); found++; - if (refheld != 0) + if (refheld != 0) { proc_rele(p); + } break; } case PRIO_DARWIN_GPU: { - if (uap->who == 0) - return (EINVAL); + if (uap->who == 0) { + return EINVAL; + } p = proc_find(uap->who); - if (p == PROC_NULL) + if (p == PROC_NULL) { break; + } error = set_gpudeny_proc(curp, p, uap->prio); @@ -466,29 +488,32 @@ setpriority(struct proc *curp, struct setpriority_args *uap, int32_t *retval) p = curp; } else { p = proc_find(uap->who); - if (p == PROC_NULL) + if (p == PROC_NULL) { break; + } refheld = 1; } error = proc_set_darwin_role(curp, p, uap->prio); found++; - if (refheld != 0) + if (refheld != 0) { proc_rele(p); + } break; } default: - return (EINVAL); + return EINVAL; + } + if (found == 0) { + return ESRCH; } - if (found == 0) - return (ESRCH); if (error == EIDRM) { *retval = -2; error = 0; } - return (error); + return error; } @@ -514,18 +539,21 @@ donice(struct proc *curp, struct proc *chgp, int n) error = EPERM; goto out; } - if (n > PRIO_MAX) + if (n > PRIO_MAX) { n = PRIO_MAX; - if (n < PRIO_MIN) + } + if (n < PRIO_MIN) { n = PRIO_MIN; + } if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) { error = EACCES; goto out; } #if CONFIG_MACF error = mac_proc_check_sched(curp, chgp); - if (error) + if (error) { goto out; + } #endif proc_lock(chgp); chgp->p_nice = n; @@ -534,7 +562,7 @@ donice(struct proc *curp, struct proc *chgp, int n) out: kauth_cred_unref(&ucred); kauth_cred_unref(&my_cred); - return (error); + return error; } static int @@ -550,8 +578,8 @@ set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority) /* TODO: Entitlement instead of uid check */ if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && - kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && - kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { + kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && + kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { error = EPERM; goto out; } @@ -563,26 +591,26 @@ set_gpudeny_proc(struct proc *curp, struct proc *targetp, int priority) #if CONFIG_MACF error = mac_proc_check_sched(curp, targetp); - if (error) + if (error) { goto out; + } #endif switch (priority) { - case PRIO_DARWIN_GPU_DENY: - task_set_gpu_denied(proc_task(targetp), TRUE); - break; - case PRIO_DARWIN_GPU_ALLOW: - task_set_gpu_denied(proc_task(targetp), FALSE); - break; - default: - error = EINVAL; - goto out; + case PRIO_DARWIN_GPU_DENY: + task_set_gpu_denied(proc_task(targetp), TRUE); + break; + case PRIO_DARWIN_GPU_ALLOW: + task_set_gpu_denied(proc_task(targetp), FALSE); + break; + default: + error = EINVAL; + goto out; } out: kauth_cred_unref(&target_cred); - return (error); - + return error; } static int @@ -597,7 +625,7 @@ proc_set_darwin_role(proc_t curp, proc_t targetp, int priority) target_cred = kauth_cred_proc_ref(targetp); if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && - kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && + kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { if (priv_check_cred(ucred, PRIV_SETPRIORITY_DARWIN_ROLE, 0) != 0) { error = EPERM; @@ -607,8 +635,9 @@ proc_set_darwin_role(proc_t curp, proc_t targetp, int priority) if (curp != targetp) { #if CONFIG_MACF - if ((error = mac_proc_check_sched(curp, targetp))) + if ((error = mac_proc_check_sched(curp, targetp))) { goto out; + } #endif } @@ -620,15 +649,16 @@ proc_set_darwin_role(proc_t curp, proc_t targetp, int priority) integer_t role = 0; - if ((error = proc_darwin_role_to_task_role(priority, &role))) + if ((error = proc_darwin_role_to_task_role(priority, &role))) { goto out; + } proc_set_task_policy(proc_task(targetp), TASK_POLICY_ATTRIBUTE, - TASK_POLICY_ROLE, role); + TASK_POLICY_ROLE, role); out: kauth_cred_unref(&target_cred); - return (error); + return error; } static int @@ -643,7 +673,7 @@ proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority) target_cred = kauth_cred_proc_ref(targetp); if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && - kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && + kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { error = EPERM; goto out; @@ -651,8 +681,9 @@ proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority) if (curp != targetp) { #if CONFIG_MACF - if ((error = mac_proc_check_sched(curp, targetp))) + if ((error = mac_proc_check_sched(curp, targetp))) { goto out; + } #endif } @@ -662,7 +693,7 @@ proc_get_darwin_role(proc_t curp, proc_t targetp, int *priority) out: kauth_cred_unref(&target_cred); - return (error); + return error; } @@ -689,7 +720,7 @@ get_background_proc(struct proc *curp, struct proc *targetp, int *priority) out: kauth_cred_unref(&target_cred); - return (error); + return error; } static int @@ -708,42 +739,42 @@ do_background_proc(struct proc *curp, struct proc *targetp, int priority) target_cred = kauth_cred_proc_ref(targetp); if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) && - kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && - kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) - { + kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) && + kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) { error = EPERM; goto out; } #if CONFIG_MACF error = mac_proc_check_sched(curp, targetp); - if (error) + if (error) { goto out; + } #endif external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; switch (priority) { - case PRIO_DARWIN_BG: - enable = TASK_POLICY_ENABLE; - break; - case PRIO_DARWIN_NONUI: - /* ignored for compatibility */ - goto out; - default: - /* TODO: EINVAL if priority != 0 */ - enable = TASK_POLICY_DISABLE; - break; + case PRIO_DARWIN_BG: + enable = TASK_POLICY_ENABLE; + break; + case PRIO_DARWIN_NONUI: + /* ignored for compatibility */ + goto out; + default: + /* TODO: EINVAL if priority != 0 */ + enable = TASK_POLICY_DISABLE; + break; } proc_set_task_policy(proc_task(targetp), external, TASK_POLICY_DARWIN_BG, enable); out: kauth_cred_unref(&target_cred); - return (error); + return error; } -static void +static void do_background_socket(struct proc *p, thread_t thread) { #if SOCKETS @@ -753,10 +784,11 @@ do_background_socket(struct proc *p, thread_t thread) proc_fdlock(p); - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { background = proc_get_effective_thread_policy(thread, TASK_POLICY_ALL_SOCKETS_BG); - else + } else { background = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ALL_SOCKETS_BG); + } if (background) { /* @@ -786,16 +818,16 @@ do_background_socket(struct proc *p, thread_t thread) } } else { /* disable networking IO throttle. - * NOTE - It is a known limitation of the current design that we - * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for - * sockets created by other threads within this process. + * NOTE - It is a known limitation of the current design that we + * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for + * sockets created by other threads within this process. */ fdp = p->p_fd; - for ( i = 0; i < fdp->fd_nfiles; i++ ) { + for (i = 0; i < fdp->fd_nfiles; i++) { struct socket *sockp; - fp = fdp->fd_ofiles[ i ]; - if (fp == NULL || (fdp->fd_ofileflags[ i ] & UF_RESERVED) != 0) { + fp = fdp->fd_ofiles[i]; + if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { continue; } if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { @@ -841,12 +873,13 @@ do_background_thread(thread_t thread, int priority) ut = get_bsdthread_info(thread); /* Backgrounding is unsupported for threads in vfork */ - if ((ut->uu_flag & UT_VFORK) != 0) - return(EPERM); + if ((ut->uu_flag & UT_VFORK) != 0) { + return EPERM; + } /* Backgrounding is unsupported for workq threads */ if (thread_is_static_param(thread)) { - return(EPERM); + return EPERM; } /* Not allowed to combine QoS and DARWIN_BG, doing so strips the QoS */ @@ -878,10 +911,11 @@ setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval) int error; if ((error = copyin(uap->rlp, (caddr_t)&alim, - sizeof (struct rlimit)))) - return (error); + sizeof(struct rlimit)))) { + return error; + } - return (dosetrlimit(p, uap->which, &alim)); + return dosetrlimit(p, uap->which, &alim); } /* @@ -899,53 +933,54 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) { struct rlimit *alimp; int error; - kern_return_t kr; + kern_return_t kr; int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0; /* Mask out POSIX flag, saved above */ which &= ~_RLIMIT_POSIX_FLAG; - if (which >= RLIM_NLIMITS) - return (EINVAL); + if (which >= RLIM_NLIMITS) { + return EINVAL; + } alimp = &p->p_rlimit[which]; - if (limp->rlim_cur > limp->rlim_max) + if (limp->rlim_cur > limp->rlim_max) { return EINVAL; + } - if (limp->rlim_cur > alimp->rlim_max || - limp->rlim_max > alimp->rlim_max) + if (limp->rlim_cur > alimp->rlim_max || + limp->rlim_max > alimp->rlim_max) { if ((error = suser(kauth_cred_get(), &p->p_acflag))) { - return (error); + return error; + } } proc_limitblock(p); if ((error = proc_limitreplace(p)) != 0) { proc_limitunblock(p); - return(error); + return error; } alimp = &p->p_rlimit[which]; - - switch (which) { + switch (which) { case RLIMIT_CPU: if (limp->rlim_cur == RLIM_INFINITY) { task_vtimer_clear(p->task, TASK_VTIMER_RLIM); timerclear(&p->p_rlim_cpu); - } - else { - task_absolutetime_info_data_t tinfo; - mach_msg_type_number_t count; - struct timeval ttv, tv; - clock_sec_t tv_sec; - clock_usec_t tv_usec; + } else { + task_absolutetime_info_data_t tinfo; + mach_msg_type_number_t count; + struct timeval ttv, tv; + clock_sec_t tv_sec; + clock_usec_t tv_usec; count = TASK_ABSOLUTETIME_INFO_COUNT; task_info(p->task, TASK_ABSOLUTETIME_INFO, - (task_info_t)&tinfo, &count); + (task_info_t)&tinfo, &count); absolutetime_to_microtime(tinfo.total_user + tinfo.total_system, - &tv_sec, &tv_usec); + &tv_sec, &tv_usec); ttv.tv_sec = tv_sec; ttv.tv_usec = tv_usec; @@ -954,9 +989,9 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) timersub(&tv, &ttv, &p->p_rlim_cpu); timerclear(&tv); - if (timercmp(&p->p_rlim_cpu, &tv, >)) + if (timercmp(&p->p_rlim_cpu, &tv, >)) { task_vtimer_set(p->task, TASK_VTIMER_RLIM); - else { + } else { task_vtimer_clear(p->task, TASK_VTIMER_RLIM); timerclear(&p->p_rlim_cpu); @@ -967,30 +1002,39 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) break; case RLIMIT_DATA: - if (limp->rlim_cur > maxdmap) + if (limp->rlim_cur > maxdmap) { limp->rlim_cur = maxdmap; - if (limp->rlim_max > maxdmap) + } + if (limp->rlim_max > maxdmap) { limp->rlim_max = maxdmap; + } break; case RLIMIT_STACK: + if (p->p_lflag & P_LCUSTOM_STACK) { + /* Process has a custom stack set - rlimit cannot be used to change it */ + error = EINVAL; + goto out; + } + /* Disallow illegal stack size instead of clipping */ if (limp->rlim_cur > maxsmap || limp->rlim_max > maxsmap) { if (posix) { error = EINVAL; goto out; - } - else { - /* - * 4797860 - workaround poorly written installers by - * doing previous implementation (< 10.5) when caller + } else { + /* + * 4797860 - workaround poorly written installers by + * doing previous implementation (< 10.5) when caller * is non-POSIX conforming. */ - if (limp->rlim_cur > maxsmap) + if (limp->rlim_cur > maxsmap) { limp->rlim_cur = maxsmap; - if (limp->rlim_max > maxsmap) + } + if (limp->rlim_max > maxsmap) { limp->rlim_max = maxsmap; + } } } @@ -1002,15 +1046,15 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) if (limp->rlim_cur > alimp->rlim_cur) { user_addr_t addr; user_size_t size; - - /* grow stack */ - size = round_page_64(limp->rlim_cur); - size -= round_page_64(alimp->rlim_cur); + + /* grow stack */ + size = round_page_64(limp->rlim_cur); + size -= round_page_64(alimp->rlim_cur); addr = p->user_stack - round_page_64(limp->rlim_cur); - kr = mach_vm_protect(current_map(), - addr, size, - FALSE, VM_PROT_DEFAULT); + kr = mach_vm_protect(current_map(), + addr, size, + FALSE, VM_PROT_DEFAULT); if (kr != KERN_SUCCESS) { error = EINVAL; goto out; @@ -1020,7 +1064,7 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) user_size_t size; user_addr_t cur_sp; - /* shrink stack */ + /* shrink stack */ /* * First check if new stack limit would agree @@ -1028,13 +1072,13 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) * Get the current thread's stack pointer... */ cur_sp = thread_adjuserstack(current_thread(), - 0); + 0); if (cur_sp <= p->user_stack && cur_sp > (p->user_stack - - round_page_64(alimp->rlim_cur))) { + round_page_64(alimp->rlim_cur))) { /* stack pointer is in main stack */ if (cur_sp <= (p->user_stack - - round_page_64(limp->rlim_cur))) { + round_page_64(limp->rlim_cur))) { /* * New limit would cause * current usage to be invalid: @@ -1048,15 +1092,15 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) error = EINVAL; goto out; } - + size = round_page_64(alimp->rlim_cur); size -= round_page_64(limp->rlim_cur); addr = p->user_stack - round_page_64(alimp->rlim_cur); kr = mach_vm_protect(current_map(), - addr, size, - FALSE, VM_PROT_NONE); + addr, size, + FALSE, VM_PROT_NONE); if (kr != KERN_SUCCESS) { error = EINVAL; goto out; @@ -1067,58 +1111,62 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) break; case RLIMIT_NOFILE: - /* + /* * Only root can set the maxfiles limits, as it is * systemwide resource. If we are expecting POSIX behavior, * instead of clamping the value, return EINVAL. We do this * because historically, people have been able to attempt to * set RLIM_INFINITY to get "whatever the maximum is". - */ - if ( kauth_cred_issuser(kauth_cred_get()) ) { + */ + if (kauth_cred_issuser(kauth_cred_get())) { if (limp->rlim_cur != alimp->rlim_cur && limp->rlim_cur > (rlim_t)maxfiles) { - if (posix) { + if (posix) { error = EINVAL; goto out; } limp->rlim_cur = maxfiles; } if (limp->rlim_max != alimp->rlim_max && - limp->rlim_max > (rlim_t)maxfiles) + limp->rlim_max > (rlim_t)maxfiles) { limp->rlim_max = maxfiles; - } - else { + } + } else { if (limp->rlim_cur != alimp->rlim_cur && limp->rlim_cur > (rlim_t)maxfilesperproc) { - if (posix) { + if (posix) { error = EINVAL; goto out; } limp->rlim_cur = maxfilesperproc; } if (limp->rlim_max != alimp->rlim_max && - limp->rlim_max > (rlim_t)maxfilesperproc) + limp->rlim_max > (rlim_t)maxfilesperproc) { limp->rlim_max = maxfilesperproc; + } } break; case RLIMIT_NPROC: - /* + /* * Only root can set to the maxproc limits, as it is * systemwide resource; all others are limited to * maxprocperuid (presumably less than maxproc). */ - if ( kauth_cred_issuser(kauth_cred_get()) ) { - if (limp->rlim_cur > (rlim_t)maxproc) + if (kauth_cred_issuser(kauth_cred_get())) { + if (limp->rlim_cur > (rlim_t)maxproc) { limp->rlim_cur = maxproc; - if (limp->rlim_max > (rlim_t)maxproc) + } + if (limp->rlim_max > (rlim_t)maxproc) { limp->rlim_max = maxproc; - } - else { - if (limp->rlim_cur > (rlim_t)maxprocperuid) + } + } else { + if (limp->rlim_cur > (rlim_t)maxprocperuid) { limp->rlim_cur = maxprocperuid; - if (limp->rlim_max > (rlim_t)maxprocperuid) + } + if (limp->rlim_max > (rlim_t)maxprocperuid) { limp->rlim_max = maxprocperuid; + } } break; @@ -1129,7 +1177,6 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) vm_map_set_user_wire_limit(current_map(), limp->rlim_cur); break; - } /* switch... */ proc_lock(p); *alimp = *limp; @@ -1137,7 +1184,7 @@ dosetrlimit(struct proc *p, u_int which, struct rlimit *limp) error = 0; out: proc_limitunblock(p); - return (error); + return error; } /* ARGSUSED */ @@ -1152,11 +1199,12 @@ getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval) */ uap->which &= ~_RLIMIT_POSIX_FLAG; - if (uap->which >= RLIM_NLIMITS) - return (EINVAL); + if (uap->which >= RLIM_NLIMITS) { + return EINVAL; + } proc_limitget(p, uap->which, &lim); - return (copyout((caddr_t)&lim, - uap->rlp, sizeof (struct rlimit))); + return copyout((caddr_t)&lim, + uap->rlp, sizeof(struct rlimit)); } /* @@ -1167,12 +1215,13 @@ getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval) void calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip) { - task_t task; + task_t task; timerclear(up); timerclear(sp); - if (ip != NULL) + if (ip != NULL) { timerclear(ip); + } task = p->task; if (task) { @@ -1181,11 +1230,11 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *i task_events_info_data_t teventsinfo; mach_msg_type_number_t task_info_count, task_ttimes_count; mach_msg_type_number_t task_events_count; - struct timeval ut,st; + struct timeval ut, st; - task_info_count = MACH_TASK_BASIC_INFO_COUNT; + task_info_count = MACH_TASK_BASIC_INFO_COUNT; task_info(task, MACH_TASK_BASIC_INFO, - (task_info_t)&tinfo, &task_info_count); + (task_info_t)&tinfo, &task_info_count); ut.tv_sec = tinfo.user_time.seconds; ut.tv_usec = tinfo.user_time.microseconds; st.tv_sec = tinfo.system_time.seconds; @@ -1195,7 +1244,7 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *i task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT; task_info(task, TASK_THREAD_TIMES_INFO, - (task_info_t)&ttimesinfo, &task_ttimes_count); + (task_info_t)&ttimesinfo, &task_ttimes_count); ut.tv_sec = ttimesinfo.user_time.seconds; ut.tv_usec = ttimesinfo.user_time.microseconds; @@ -1206,19 +1255,20 @@ calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *i task_events_count = TASK_EVENTS_INFO_COUNT; task_info(task, TASK_EVENTS_INFO, - (task_info_t)&teventsinfo, &task_events_count); + (task_info_t)&teventsinfo, &task_events_count); /* * No need to lock "p": this does not need to be * completely consistent, right ? */ p->p_stats->p_ru.ru_minflt = (teventsinfo.faults - - teventsinfo.pageins); + teventsinfo.pageins); p->p_stats->p_ru.ru_majflt = teventsinfo.pageins; p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw - - p->p_stats->p_ru.ru_nvcsw); - if (p->p_stats->p_ru.ru_nivcsw < 0) + p->p_stats->p_ru.ru_nvcsw); + if (p->p_stats->p_ru.ru_nivcsw < 0) { p->p_stats->p_ru.ru_nivcsw = 0; + } p->p_stats->p_ru.ru_maxrss = tinfo.resident_size_max; } @@ -1234,8 +1284,8 @@ getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval) struct rusage *rup, rubuf; struct user64_rusage rubuf64 = {}; struct user32_rusage rubuf32 = {}; - size_t retsize = sizeof(rubuf); /* default: 32 bits */ - caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */ + size_t retsize = sizeof(rubuf); /* default: 32 bits */ + caddr_t retbuf = (caddr_t)&rubuf; /* default: 32 bits */ struct timeval utime; struct timeval stime; @@ -1261,7 +1311,7 @@ getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval) break; default: - return (EINVAL); + return EINVAL; } if (IS_64BIT_PROCESS(p)) { retsize = sizeof(rubuf64); @@ -1273,7 +1323,7 @@ getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval) munge_user32_rusage(&rubuf, &rubuf32); } - return (copyout(retbuf, uap->rusage, retsize)); + return copyout(retbuf, uap->rusage, retsize); } void @@ -1284,16 +1334,18 @@ ruadd(struct rusage *ru, struct rusage *ru2) timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime); timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime); - if (ru->ru_maxrss < ru2->ru_maxrss) + if (ru->ru_maxrss < ru2->ru_maxrss) { ru->ru_maxrss = ru2->ru_maxrss; + } ip = &ru->ru_first; ip2 = &ru2->ru_first; - for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) + for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--) { *ip++ += *ip2++; + } } /* * Add the rusage stats of child in parent. - * + * * It adds rusage statistics of child process and statistics of all its * children to its parent. * @@ -1303,17 +1355,17 @@ void update_rusage_info_child(struct rusage_info_child *ri, rusage_info_current *ri_current) { ri->ri_child_user_time += (ri_current->ri_user_time + - ri_current->ri_child_user_time); + ri_current->ri_child_user_time); ri->ri_child_system_time += (ri_current->ri_system_time + - ri_current->ri_child_system_time); + ri_current->ri_child_system_time); ri->ri_child_pkg_idle_wkups += (ri_current->ri_pkg_idle_wkups + - ri_current->ri_child_pkg_idle_wkups); + ri_current->ri_child_pkg_idle_wkups); ri->ri_child_interrupt_wkups += (ri_current->ri_interrupt_wkups + - ri_current->ri_child_interrupt_wkups); + ri_current->ri_child_interrupt_wkups); ri->ri_child_pageins += (ri_current->ri_pageins + - ri_current->ri_child_pageins); + ri_current->ri_child_pageins); ri->ri_child_elapsed_abstime += ((ri_current->ri_proc_exit_abstime - - ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime); + ri_current->ri_proc_start_abstime) + ri_current->ri_child_elapsed_abstime); } void @@ -1334,7 +1386,7 @@ proc_limitdrop(proc_t p, int exiting) proc_list_lock(); - if (--p->p_limit->pl_refcnt == 0) { + if (--p->p_limit->pl_refcnt == 0) { freelim = p->p_limit; p->p_limit = NULL; } @@ -1344,10 +1396,12 @@ proc_limitdrop(proc_t p, int exiting) } proc_list_unlock(); - if (freelim != NULL) + if (freelim != NULL) { FREE_ZONE(freelim, sizeof *p->p_limit, M_PLIMIT); - if (freeoldlim != NULL) + } + if (freeoldlim != NULL) { FREE_ZONE(freeoldlim, sizeof *p->p_olimit, M_PLIMIT); + } } @@ -1371,7 +1425,6 @@ proc_limitblock(proc_t p) } p->p_lflag |= P_LLIMCHANGE; proc_unlock(p); - } @@ -1398,15 +1451,15 @@ proc_limitreplace(proc_t p) if (p->p_limit->pl_refcnt == 1) { proc_list_unlock(); - return(0); + return 0; } - + proc_list_unlock(); MALLOC_ZONE(copy, struct plimit *, - sizeof(struct plimit), M_PLIMIT, M_WAITOK); + sizeof(struct plimit), M_PLIMIT, M_WAITOK); if (copy == NULL) { - return(ENOMEM); + return ENOMEM; } proc_list_lock(); @@ -1418,7 +1471,7 @@ proc_limitreplace(proc_t p) p->p_limit = copy; proc_list_unlock(); - return(0); + return 0; } static int @@ -1446,238 +1499,245 @@ iopolicysys(struct proc *p, struct iopolicysys_args *uap, int32_t *retval) int error = 0; struct _iopol_param_t iop_param; - if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) + if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0) { goto out; + } switch (iop_param.iop_iotype) { - case IOPOL_TYPE_DISK: - error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); - if (error == EIDRM) { - *retval = -2; - error = 0; - } - if (error) - goto out; - break; - case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY: - error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); - if (error) - goto out; - break; - case IOPOL_TYPE_VFS_ATIME_UPDATES: - error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); - if (error) - goto out; - break; - default: - error = EINVAL; + case IOPOL_TYPE_DISK: + error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); + if (error == EIDRM) { + *retval = -2; + error = 0; + } + if (error) { + goto out; + } + break; + case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY: + error = iopolicysys_vfs_hfs_case_sensitivity(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); + if (error) { + goto out; + } + break; + case IOPOL_TYPE_VFS_ATIME_UPDATES: + error = iopolicysys_vfs_atime_updates(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param); + if (error) { goto out; + } + break; + default: + error = EINVAL; + goto out; } /* Individual iotype handlers are expected to update iop_param, if requested with a GET command */ if (uap->cmd == IOPOL_CMD_GET) { error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param)); - if (error) + if (error) { goto out; + } } out: - return (error); + return error; } static int iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) { - int error = 0; - thread_t thread; - int policy_flavor; + int error = 0; + thread_t thread; + int policy_flavor; /* Validate scope */ switch (scope) { - case IOPOL_SCOPE_PROCESS: - thread = THREAD_NULL; - policy_flavor = TASK_POLICY_IOPOL; - break; + case IOPOL_SCOPE_PROCESS: + thread = THREAD_NULL; + policy_flavor = TASK_POLICY_IOPOL; + break; + + case IOPOL_SCOPE_THREAD: + thread = current_thread(); + policy_flavor = TASK_POLICY_IOPOL; - case IOPOL_SCOPE_THREAD: - thread = current_thread(); - policy_flavor = TASK_POLICY_IOPOL; - - /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */ - if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) { - switch (policy) { - case IOPOL_DEFAULT: - case IOPOL_PASSIVE: - break; - case IOPOL_UTILITY: - case IOPOL_THROTTLE: - case IOPOL_IMPORTANT: - case IOPOL_STANDARD: - if (!thread_is_static_param(thread)) { - thread_remove_qos_policy(thread); - /* - * This is not an error case, this is to return a marker to user-space that - * we stripped the thread of its QoS class. - */ - error = EIDRM; - break; - } - /* otherwise, fall through to the error case. */ - default: - error = EINVAL; - goto out; + /* Not allowed to combine QoS and (non-PASSIVE) IO policy, doing so strips the QoS */ + if (cmd == IOPOL_CMD_SET && thread_has_qos_policy(thread)) { + switch (policy) { + case IOPOL_DEFAULT: + case IOPOL_PASSIVE: + break; + case IOPOL_UTILITY: + case IOPOL_THROTTLE: + case IOPOL_IMPORTANT: + case IOPOL_STANDARD: + if (!thread_is_static_param(thread)) { + thread_remove_qos_policy(thread); + /* + * This is not an error case, this is to return a marker to user-space that + * we stripped the thread of its QoS class. + */ + error = EIDRM; + break; } + /* otherwise, fall through to the error case. */ + default: + error = EINVAL; + goto out; } - break; + } + break; - case IOPOL_SCOPE_DARWIN_BG: + case IOPOL_SCOPE_DARWIN_BG: #if CONFIG_EMBEDDED - /* Embedded doesn't want this as BG is always IOPOL_THROTTLE */ - error = ENOTSUP; - goto out; + /* Embedded doesn't want this as BG is always IOPOL_THROTTLE */ + error = ENOTSUP; + goto out; #else /* CONFIG_EMBEDDED */ - thread = THREAD_NULL; - policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL; - break; + thread = THREAD_NULL; + policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL; + break; #endif /* CONFIG_EMBEDDED */ - default: - error = EINVAL; - goto out; + default: + error = EINVAL; + goto out; } /* Validate policy */ if (cmd == IOPOL_CMD_SET) { switch (policy) { - case IOPOL_DEFAULT: - if (scope == IOPOL_SCOPE_DARWIN_BG) { - /* the current default BG throttle level is UTILITY */ - policy = IOPOL_UTILITY; - } else { - policy = IOPOL_IMPORTANT; - } - break; - case IOPOL_UTILITY: - /* fall-through */ - case IOPOL_THROTTLE: - /* These levels are OK */ - break; - case IOPOL_IMPORTANT: - /* fall-through */ - case IOPOL_STANDARD: - /* fall-through */ - case IOPOL_PASSIVE: - if (scope == IOPOL_SCOPE_DARWIN_BG) { - /* These levels are invalid for BG */ - error = EINVAL; - goto out; - } else { - /* OK for other scopes */ - } - break; - default: + case IOPOL_DEFAULT: + if (scope == IOPOL_SCOPE_DARWIN_BG) { + /* the current default BG throttle level is UTILITY */ + policy = IOPOL_UTILITY; + } else { + policy = IOPOL_IMPORTANT; + } + break; + case IOPOL_UTILITY: + /* fall-through */ + case IOPOL_THROTTLE: + /* These levels are OK */ + break; + case IOPOL_IMPORTANT: + /* fall-through */ + case IOPOL_STANDARD: + /* fall-through */ + case IOPOL_PASSIVE: + if (scope == IOPOL_SCOPE_DARWIN_BG) { + /* These levels are invalid for BG */ error = EINVAL; goto out; + } else { + /* OK for other scopes */ + } + break; + default: + error = EINVAL; + goto out; } } /* Perform command */ - switch(cmd) { - case IOPOL_CMD_SET: - if (thread != THREAD_NULL) - proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy); - else - proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy); - break; - case IOPOL_CMD_GET: - if (thread != THREAD_NULL) - policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor); - else - policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor); - iop_param->iop_policy = policy; - break; - default: - error = EINVAL; /* unknown command */ - break; + switch (cmd) { + case IOPOL_CMD_SET: + if (thread != THREAD_NULL) { + proc_set_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor, policy); + } else { + proc_set_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor, policy); + } + break; + case IOPOL_CMD_GET: + if (thread != THREAD_NULL) { + policy = proc_get_thread_policy(thread, TASK_POLICY_INTERNAL, policy_flavor); + } else { + policy = proc_get_task_policy(current_task(), TASK_POLICY_INTERNAL, policy_flavor); + } + iop_param->iop_policy = policy; + break; + default: + error = EINVAL; /* unknown command */ + break; } out: - return (error); + return error; } static int iopolicysys_vfs_hfs_case_sensitivity(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) { - int error = 0; + int error = 0; /* Validate scope */ switch (scope) { - case IOPOL_SCOPE_PROCESS: - /* Only process OK */ - break; - default: - error = EINVAL; - goto out; + case IOPOL_SCOPE_PROCESS: + /* Only process OK */ + break; + default: + error = EINVAL; + goto out; } /* Validate policy */ if (cmd == IOPOL_CMD_SET) { switch (policy) { - case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: - /* fall-through */ - case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: - /* These policies are OK */ - break; - default: - error = EINVAL; - goto out; + case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: + /* fall-through */ + case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: + /* These policies are OK */ + break; + default: + error = EINVAL; + goto out; } } /* Perform command */ - switch(cmd) { - case IOPOL_CMD_SET: - if (0 == kauth_cred_issuser(kauth_cred_get())) { - /* If it's a non-root process, it needs to have the entitlement to set the policy */ - boolean_t entitled = FALSE; - entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity"); - if (!entitled) { - error = EPERM; - goto out; - } + switch (cmd) { + case IOPOL_CMD_SET: + if (0 == kauth_cred_issuser(kauth_cred_get())) { + /* If it's a non-root process, it needs to have the entitlement to set the policy */ + boolean_t entitled = FALSE; + entitled = IOTaskHasEntitlement(current_task(), "com.apple.private.iopol.case_sensitivity"); + if (!entitled) { + error = EPERM; + goto out; } + } - switch (policy) { - case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: - OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy); - break; - case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: - OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy); - break; - default: - error = EINVAL; - goto out; - } - + switch (policy) { + case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT: + OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy); break; - case IOPOL_CMD_GET: - iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) - ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE - : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT; + case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE: + OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy); break; default: - error = EINVAL; /* unknown command */ - break; + error = EINVAL; + goto out; + } + + break; + case IOPOL_CMD_GET: + iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) + ? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE + : IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT; + break; + default: + error = EINVAL; /* unknown command */ + break; } out: - return (error); + return error; } static inline int get_thread_atime_policy(struct uthread *ut) { - return (ut->uu_flag & UT_ATIME_UPDATE)? IOPOL_ATIME_UPDATES_OFF: IOPOL_ATIME_UPDATES_DEFAULT; + return (ut->uu_flag & UT_ATIME_UPDATE) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT; } static inline void @@ -1703,62 +1763,64 @@ set_task_atime_policy(struct proc *p, int policy) static inline int get_task_atime_policy(struct proc *p) { - return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES)? IOPOL_ATIME_UPDATES_OFF: IOPOL_ATIME_UPDATES_DEFAULT; + return (p->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) ? IOPOL_ATIME_UPDATES_OFF : IOPOL_ATIME_UPDATES_DEFAULT; } static int iopolicysys_vfs_atime_updates(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param) { - int error = 0; - thread_t thread; + int error = 0; + thread_t thread; /* Validate scope */ switch (scope) { - case IOPOL_SCOPE_THREAD: - thread = current_thread(); - break; - case IOPOL_SCOPE_PROCESS: - thread = THREAD_NULL; - break; - default: - error = EINVAL; - goto out; + case IOPOL_SCOPE_THREAD: + thread = current_thread(); + break; + case IOPOL_SCOPE_PROCESS: + thread = THREAD_NULL; + break; + default: + error = EINVAL; + goto out; } /* Validate policy */ if (cmd == IOPOL_CMD_SET) { switch (policy) { - case IOPOL_ATIME_UPDATES_DEFAULT: - case IOPOL_ATIME_UPDATES_OFF: - break; - default: - error = EINVAL; - goto out; + case IOPOL_ATIME_UPDATES_DEFAULT: + case IOPOL_ATIME_UPDATES_OFF: + break; + default: + error = EINVAL; + goto out; } } /* Perform command */ - switch(cmd) { - case IOPOL_CMD_SET: - if (thread != THREAD_NULL) - set_thread_atime_policy(get_bsdthread_info(thread), policy); - else - set_task_atime_policy(p, policy); - break; - case IOPOL_CMD_GET: - if (thread != THREAD_NULL) - policy = get_thread_atime_policy(get_bsdthread_info(thread)); - else - policy = get_task_atime_policy(p); - iop_param->iop_policy = policy; - break; - default: - error = EINVAL; /* unknown command */ - break; + switch (cmd) { + case IOPOL_CMD_SET: + if (thread != THREAD_NULL) { + set_thread_atime_policy(get_bsdthread_info(thread), policy); + } else { + set_task_atime_policy(p, policy); + } + break; + case IOPOL_CMD_GET: + if (thread != THREAD_NULL) { + policy = get_thread_atime_policy(get_bsdthread_info(thread)); + } else { + policy = get_task_atime_policy(p); + } + iop_param->iop_policy = policy; + break; + default: + error = EINVAL; /* unknown command */ + break; } out: - return (error); + return error; } /* BSD call back function for task_policy networking changes */ @@ -1786,7 +1848,7 @@ gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) assert(p->p_stats != NULL); memset(ru, 0, sizeof(*ru)); - switch(flavor) { + switch (flavor) { case RUSAGE_INFO_V4: ru->ri_logical_writes = get_task_logical_writes(p->task); ru->ri_lifetime_max_phys_footprint = get_task_phys_footprint_lifetime_max(p->task); @@ -1799,18 +1861,18 @@ gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) case RUSAGE_INFO_V3: fill_task_qos_rusage(p->task, ru); fill_task_billed_usage(p->task, ru); - /* fall through */ + /* fall through */ case RUSAGE_INFO_V2: fill_task_io_rusage(p->task, ru); - /* fall through */ + /* fall through */ case RUSAGE_INFO_V1: /* * p->p_stats->ri_child statistics are protected under proc lock. */ proc_lock(p); - + ri_child = &(p->p_stats->ri_child); ru->ri_child_user_time = ri_child->ri_child_user_time; ru->ri_child_system_time = ri_child->ri_child_system_time; @@ -1820,10 +1882,10 @@ gather_rusage_info(proc_t p, rusage_info_current *ru, int flavor) ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime; proc_unlock(p); - /* fall through */ + /* fall through */ case RUSAGE_INFO_V0: - proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof (ru->ri_uuid)); + proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof(ru->ri_uuid)); fill_task_rusage(p->task, ru); ru->ri_proc_start_abstime = p->p_stats->ps_start; } @@ -1862,11 +1924,11 @@ proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie return EINVAL; } - if(size == 0) { + if (size == 0) { return EINVAL; } - /* + /* * If task is still alive, collect info from the live task itself. * Otherwise, look to the cached info in the zombie proc. */ @@ -1879,7 +1941,7 @@ proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie error = copyout(&p->p_ru->ri, buffer, size); } - return (error); + return error; } static int @@ -1914,14 +1976,14 @@ mach_to_bsd_rv(int mach_rv) int proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, __unused int32_t *retval) { - proc_t targetp; - int error = 0; - struct proc_rlimit_control_wakeupmon wakeupmon_args; + proc_t targetp; + int error = 0; + struct proc_rlimit_control_wakeupmon wakeupmon_args; uint32_t cpumon_flags; uint32_t cpulimits_flags; kauth_cred_t my_cred, target_cred; #if CONFIG_LEDGER_INTERVAL_MAX - uint32_t footprint_interval_flags; + uint32_t footprint_interval_flags; uint64_t interval_max_footprint; #endif /* CONFIG_LEDGER_INTERVAL_MAX */ @@ -1934,7 +1996,7 @@ proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *ua /* proc_self() can return NULL for an exiting process */ if (targetp == PROC_NULL) { - return (ESRCH); + return ESRCH; } my_cred = kauth_cred_get(); @@ -1945,19 +2007,19 @@ proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *ua kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) { proc_rele(targetp); kauth_cred_unref(&target_cred); - return (EACCES); + return EACCES; } switch (uap->flavor) { case RLIMIT_WAKEUPS_MONITOR: - if ((error = copyin(uap->arg, &wakeupmon_args, sizeof (wakeupmon_args))) != 0) { + if ((error = copyin(uap->arg, &wakeupmon_args, sizeof(wakeupmon_args))) != 0) { break; } if ((error = mach_to_bsd_rv(task_wakeups_monitor_ctl(targetp->task, &wakeupmon_args.wm_flags, - &wakeupmon_args.wm_rate))) != 0) { + &wakeupmon_args.wm_rate))) != 0) { break; } - error = copyout(&wakeupmon_args, uap->arg, sizeof (wakeupmon_args)); + error = copyout(&wakeupmon_args, uap->arg, sizeof(wakeupmon_args)); break; case RLIMIT_CPU_USAGE_MONITOR: cpumon_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127) @@ -1975,8 +2037,8 @@ proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *ua uint32_t ms_refill = 0; uint64_t ns_refill; - percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */ - ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */ + percent = (uint8_t)(cpulimits_flags & 0xffU); /* low 8 bits for percent */ + ms_refill = (cpulimits_flags >> 8) & 0xffffff; /* next 24 bits represent ms refill value */ if (percent >= 100) { error = EINVAL; break; @@ -2011,24 +2073,26 @@ proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *ua /* * Return value from this function becomes errno to userland caller. */ - return (error); + return error; } /* * Return the current amount of CPU consumed by this thread (in either user or kernel mode) */ -int thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval) +int +thread_selfusage(struct proc *p __unused, struct thread_selfusage_args *uap __unused, uint64_t *retval) { uint64_t runtime; runtime = thread_get_runtime_self(); *retval = runtime; - return (0); + return 0; } #if !MONOTONIC -int thread_selfcounts(__unused struct proc *p, __unused struct thread_selfcounts_args *uap, __unused int *ret_out) +int +thread_selfcounts(__unused struct proc *p, __unused struct thread_selfcounts_args *uap, __unused int *ret_out) { return ENOTSUP; } diff --git a/bsd/kern/kern_sfi.c b/bsd/kern/kern_sfi.c index f42fd4b1f..55e421bf1 100644 --- a/bsd/kern/kern_sfi.c +++ b/bsd/kern/kern_sfi.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * */ @@ -61,111 +61,113 @@ static int proc_apply_sfi_managed(proc_t p, void * arg); -int sfi_ctl(struct proc *p __unused, struct sfi_ctl_args *uap, int32_t *retval __unused) +int +sfi_ctl(struct proc *p __unused, struct sfi_ctl_args *uap, int32_t *retval __unused) { - uint32_t operation = uap->operation; - int error = 0; - kern_return_t kret = KERN_SUCCESS; - uint64_t out_time = 0; + uint32_t operation = uap->operation; + int error = 0; + kern_return_t kret = KERN_SUCCESS; + uint64_t out_time = 0; switch (operation) { - case SFI_CTL_OPERATION_SFI_SET_WINDOW: - if (uap->out_time != USER_ADDR_NULL) { - return EINVAL; - } - if (uap->sfi_class != SFI_CLASS_UNSPECIFIED) { - return EINVAL; - } - - error = priv_check_cred(kauth_cred_get(), PRIV_SELECTIVE_FORCED_IDLE, 0); - if (error) { - dprintf("%s failed privilege check for sfi_ctl: %d\n", p->p_comm, error); - return (error); - } else { - dprintf("%s succeeded privilege check for sfi_ctl\n", p->p_comm); - } - - if (uap->time == 0) { - /* actually a cancel */ - kret = sfi_window_cancel(); - } else { - kret = sfi_set_window(uap->time); - } - - if (kret) { - error = EINVAL; - } - - break; - case SFI_CTL_OPERATION_SFI_GET_WINDOW: - if (uap->time != 0) { - return EINVAL; - } - if (uap->sfi_class != SFI_CLASS_UNSPECIFIED) { - return EINVAL; - } - - kret = sfi_get_window(&out_time); - if (kret == KERN_SUCCESS) { - error = copyout(&out_time, uap->out_time, sizeof(out_time)); - } else { - error = EINVAL; - } - - break; - case SFI_CTL_OPERATION_SET_CLASS_OFFTIME: - if (uap->out_time != USER_ADDR_NULL) { - return EINVAL; - } - - error = priv_check_cred(kauth_cred_get(), PRIV_SELECTIVE_FORCED_IDLE, 0); - if (error) { - dprintf("%s failed privilege check for sfi_ctl: %d\n", p->p_comm, error); - return (error); - } else { - dprintf("%s succeeded privilege check for sfi_ctl\n", p->p_comm); - } - - if (uap->time == 0) { - /* actually a cancel */ - kret = sfi_class_offtime_cancel(uap->sfi_class); - } else { - kret = sfi_set_class_offtime(uap->sfi_class, uap->time); - } - - if (kret) { - error = EINVAL; - } - - break; - case SFI_CTL_OPERATION_GET_CLASS_OFFTIME: - if (uap->time != 0) { - return EINVAL; - } - - kret = sfi_get_class_offtime(uap->sfi_class, &out_time); - if (kret == KERN_SUCCESS) { - error = copyout(&out_time, uap->out_time, sizeof(out_time)); - } else { - error = EINVAL; - } - - break; - default: - error = ENOTSUP; - break; - } + case SFI_CTL_OPERATION_SFI_SET_WINDOW: + if (uap->out_time != USER_ADDR_NULL) { + return EINVAL; + } + if (uap->sfi_class != SFI_CLASS_UNSPECIFIED) { + return EINVAL; + } + + error = priv_check_cred(kauth_cred_get(), PRIV_SELECTIVE_FORCED_IDLE, 0); + if (error) { + dprintf("%s failed privilege check for sfi_ctl: %d\n", p->p_comm, error); + return error; + } else { + dprintf("%s succeeded privilege check for sfi_ctl\n", p->p_comm); + } + + if (uap->time == 0) { + /* actually a cancel */ + kret = sfi_window_cancel(); + } else { + kret = sfi_set_window(uap->time); + } + + if (kret) { + error = EINVAL; + } + + break; + case SFI_CTL_OPERATION_SFI_GET_WINDOW: + if (uap->time != 0) { + return EINVAL; + } + if (uap->sfi_class != SFI_CLASS_UNSPECIFIED) { + return EINVAL; + } + + kret = sfi_get_window(&out_time); + if (kret == KERN_SUCCESS) { + error = copyout(&out_time, uap->out_time, sizeof(out_time)); + } else { + error = EINVAL; + } + + break; + case SFI_CTL_OPERATION_SET_CLASS_OFFTIME: + if (uap->out_time != USER_ADDR_NULL) { + return EINVAL; + } + + error = priv_check_cred(kauth_cred_get(), PRIV_SELECTIVE_FORCED_IDLE, 0); + if (error) { + dprintf("%s failed privilege check for sfi_ctl: %d\n", p->p_comm, error); + return error; + } else { + dprintf("%s succeeded privilege check for sfi_ctl\n", p->p_comm); + } + + if (uap->time == 0) { + /* actually a cancel */ + kret = sfi_class_offtime_cancel(uap->sfi_class); + } else { + kret = sfi_set_class_offtime(uap->sfi_class, uap->time); + } + + if (kret) { + error = EINVAL; + } + + break; + case SFI_CTL_OPERATION_GET_CLASS_OFFTIME: + if (uap->time != 0) { + return EINVAL; + } + + kret = sfi_get_class_offtime(uap->sfi_class, &out_time); + if (kret == KERN_SUCCESS) { + error = copyout(&out_time, uap->out_time, sizeof(out_time)); + } else { + error = EINVAL; + } + + break; + default: + error = ENOTSUP; + break; + } return error; } -static int proc_apply_sfi_managed(proc_t p, void * arg) +static int +proc_apply_sfi_managed(proc_t p, void * arg) { uint32_t flags = *(uint32_t *)arg; pid_t pid = p->p_pid; boolean_t managed_enabled = (flags == SFI_PROCESS_SET_MANAGED)? TRUE : FALSE; - - if (pid == 0) { /* ignore setting on kernproc */ + + if (pid == 0) { /* ignore setting on kernproc */ return PROC_RETURNED; } @@ -176,85 +178,86 @@ static int proc_apply_sfi_managed(proc_t p, void * arg) } proc_set_task_policy(p->task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_SFI_MANAGED, - managed_enabled ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE); + TASK_POLICY_ATTRIBUTE, TASK_POLICY_SFI_MANAGED, + managed_enabled ? TASK_POLICY_ENABLE : TASK_POLICY_DISABLE); return PROC_RETURNED; } -int sfi_pidctl(struct proc *p __unused, struct sfi_pidctl_args *uap, int32_t *retval __unused) +int +sfi_pidctl(struct proc *p __unused, struct sfi_pidctl_args *uap, int32_t *retval __unused) { - uint32_t operation = uap->operation; - pid_t pid = uap->pid; - int error = 0; - uint32_t out_flags = 0; - boolean_t managed_enabled; - proc_t targetp; + uint32_t operation = uap->operation; + pid_t pid = uap->pid; + int error = 0; + uint32_t out_flags = 0; + boolean_t managed_enabled; + proc_t targetp; switch (operation) { - case SFI_PIDCTL_OPERATION_PID_SET_FLAGS: - if (uap->out_sfi_flags != USER_ADDR_NULL - || !(uap->sfi_flags & SFI_PROCESS_SET_MANAGED_MASK) - || uap->sfi_flags == SFI_PROCESS_SET_MANAGED_MASK) { + case SFI_PIDCTL_OPERATION_PID_SET_FLAGS: + if (uap->out_sfi_flags != USER_ADDR_NULL + || !(uap->sfi_flags & SFI_PROCESS_SET_MANAGED_MASK) + || uap->sfi_flags == SFI_PROCESS_SET_MANAGED_MASK) { + return EINVAL; + } + + error = priv_check_cred(kauth_cred_get(), PRIV_SELECTIVE_FORCED_IDLE, 0); + if (error) { + dprintf("%s failed privilege check for sfi_pidctl: %d\n", p->p_comm, error); + return error; + } else { + dprintf("%s succeeded privilege check for sfi_pidctl\n", p->p_comm); + } + + if (uap->pid == 0) { + /* only allow SFI_PROCESS_SET_UNMANAGED for pid 0 */ + if (uap->sfi_flags != SFI_PROCESS_SET_UNMANAGED) { return EINVAL; } - error = priv_check_cred(kauth_cred_get(), PRIV_SELECTIVE_FORCED_IDLE, 0); - if (error) { - dprintf("%s failed privilege check for sfi_pidctl: %d\n", p->p_comm, error); - return (error); - } else { - dprintf("%s succeeded privilege check for sfi_pidctl\n", p->p_comm); - } + proc_iterate(PROC_ALLPROCLIST, proc_apply_sfi_managed, (void *)&uap->sfi_flags, NULL, NULL); + break; + } - if (uap->pid == 0) { - /* only allow SFI_PROCESS_SET_UNMANAGED for pid 0 */ - if (uap->sfi_flags != SFI_PROCESS_SET_UNMANAGED) { - return EINVAL; - } + targetp = proc_find(pid); + if (!targetp) { + error = ESRCH; + break; + } - proc_iterate(PROC_ALLPROCLIST, proc_apply_sfi_managed, (void *)&uap->sfi_flags, NULL, NULL); - break; - } + proc_apply_sfi_managed(targetp, (void *)&uap->sfi_flags); - targetp = proc_find(pid); - if (!targetp) { - error = ESRCH; - break; - } - - proc_apply_sfi_managed(targetp, (void *)&uap->sfi_flags); - - proc_rele(targetp); + proc_rele(targetp); - break; - case SFI_PIDCTL_OPERATION_PID_GET_FLAGS: - if (uap->sfi_flags != 0) { - return EINVAL; - } - if (uap->pid == 0) { - return EINVAL; - } + break; + case SFI_PIDCTL_OPERATION_PID_GET_FLAGS: + if (uap->sfi_flags != 0) { + return EINVAL; + } + if (uap->pid == 0) { + return EINVAL; + } - targetp = proc_find(pid); - if (!targetp) { - error = ESRCH; - break; - } + targetp = proc_find(pid); + if (!targetp) { + error = ESRCH; + break; + } - managed_enabled = proc_get_task_policy(targetp->task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_SFI_MANAGED); + managed_enabled = proc_get_task_policy(targetp->task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_SFI_MANAGED); - proc_rele(targetp); + proc_rele(targetp); - out_flags = managed_enabled ? SFI_PROCESS_SET_MANAGED : SFI_PROCESS_SET_UNMANAGED; + out_flags = managed_enabled ? SFI_PROCESS_SET_MANAGED : SFI_PROCESS_SET_UNMANAGED; - error = copyout(&out_flags, uap->out_sfi_flags, sizeof(out_flags)); + error = copyout(&out_flags, uap->out_sfi_flags, sizeof(out_flags)); - break; - default: - error = ENOTSUP; - break; - } + break; + default: + error = ENOTSUP; + break; + } return error; } diff --git a/bsd/kern/kern_shutdown.c b/bsd/kern/kern_shutdown.c index 936fd61e2..c9b334c8d 100644 --- a/bsd/kern/kern_shutdown.c +++ b/bsd/kern/kern_shutdown.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -59,14 +59,14 @@ #include -#include /* for thread_block() */ -#include /* for host_priv_self() */ -#include /* for if_down_all() */ -#include /* for count_busy_buffers() */ -#include /* for vfs_unmountall() */ -#include /* for task_suspend() */ -#include /* abused for sync() */ -#include /* for delay_for_interval() */ +#include /* for thread_block() */ +#include /* for host_priv_self() */ +#include /* for if_down_all() */ +#include /* for count_busy_buffers() */ +#include /* for vfs_unmountall() */ +#include /* for task_suspend() */ +#include /* abused for sync() */ +#include /* for delay_for_interval() */ #include #include @@ -90,17 +90,17 @@ extern void halt_log_enter(const char * what, const void * pc, uint64_t time); extern boolean_t kdp_has_polled_corefile(void); #endif /* DEVELOPMENT || DEBUG */ -struct sd_filterargs{ +struct sd_filterargs { int delayterm; int shutdownstate; }; struct sd_iterargs { - int signo; /* the signal to be posted */ - int setsdstate; /* shutdown state to be set */ - int countproc; /* count processes on action */ - int activecount; /* number of processes on which action was done */ + int signo; /* the signal to be posted */ + int setsdstate; /* shutdown state to be set */ + int countproc; /* count processes on action */ + int activecount; /* number of processes on which action was done */ }; static vnode_t sd_logvp = NULLVP; @@ -142,7 +142,7 @@ zprint_panic_info(void) int get_system_inshutdown() { - return (system_inshutdown); + return system_inshutdown; } static void @@ -157,7 +157,7 @@ panic_kernel(int howto, char *message) int reboot_kernel(int howto, char *message) { - int hostboot_option=0; + int hostboot_option = 0; uint64_t startTime; if ((howto & (RB_PANIC | RB_QUICK)) == (RB_PANIC | RB_QUICK)) { @@ -165,22 +165,22 @@ reboot_kernel(int howto, char *message) } if (!OSCompareAndSwap(0, 1, &system_inshutdown)) { - if ( (howto&RB_QUICK) == RB_QUICK) + if ((howto & RB_QUICK) == RB_QUICK) { goto force_reboot; - return (EBUSY); + } + return EBUSY; } /* * Notify the power management root domain that the system will shut down. */ IOSystemShutdownNotification(kIOSystemShutdownNotificationStageProcessExit); - if ((howto&RB_QUICK)==RB_QUICK) { + if ((howto & RB_QUICK) == RB_QUICK) { printf("Quick reboot...\n"); - if ((howto&RB_NOSYNC)==0) { + if ((howto & RB_NOSYNC) == 0) { sync((proc_t)NULL, (void *)NULL, (int *)NULL); } - } - else if ((howto&RB_NOSYNC)==0) { + } else if ((howto & RB_NOSYNC) == 0) { int iter, nbusy; printf("syncing disks... "); @@ -201,8 +201,9 @@ reboot_kernel(int howto, char *message) halt_log_enter("audit_shutdown", 0, mach_absolute_time() - startTime); #endif - if (unmountroot_pre_hook != NULL) + if (unmountroot_pre_hook != NULL) { unmountroot_pre_hook(); + } startTime = mach_absolute_time(); sync((proc_t)NULL, (void *)NULL, (int *)NULL); @@ -232,15 +233,17 @@ reboot_kernel(int howto, char *message) startTime = mach_absolute_time(); for (iter = 0; iter < 100; iter++) { nbusy = count_busy_buffers(); - if (nbusy == 0) + if (nbusy == 0) { break; + } printf("%d ", nbusy); delay_for_interval( 1 * nbusy, 1000 * 1000); } - if (nbusy) + if (nbusy) { printf("giving up\n"); - else + } else { printf("done\n"); + } halt_log_enter("bufferclean", 0, mach_absolute_time() - startTime); } #if NETWORKING @@ -260,10 +263,12 @@ force_reboot: panic_kernel(howto, message); } - if (howto & RB_POWERDOWN) + if (howto & RB_POWERDOWN) { hostboot_option = HOST_REBOOT_HALT; - if (howto & RB_HALT) + } + if (howto & RB_HALT) { hostboot_option = HOST_REBOOT_HALT; + } if (howto & RB_UPSDELAY) { hostboot_option = HOST_REBOOT_UPSDELAY; @@ -273,7 +278,7 @@ force_reboot: /* * should not be reached */ - return (0); + return 0; } static int @@ -281,7 +286,7 @@ sd_openlog(vfs_context_t ctx) { int error = 0; struct timeval tv; - + /* Open shutdown log */ if ((error = vnode_open(PROC_SHUTDOWN_LOG, (O_CREAT | FWRITE | O_NOFOLLOW), 0644, 0, &sd_logvp, ctx))) { printf("Failed to open %s: error %d\n", PROC_SHUTDOWN_LOG, error); @@ -311,7 +316,7 @@ sd_closelog(vfs_context_t ctx) } static void -sd_log(vfs_context_t ctx, const char *fmt, ...) +sd_log(vfs_context_t ctx, const char *fmt, ...) { int resid, log_error, len; char logbuf[100]; @@ -328,13 +333,12 @@ sd_log(vfs_context_t ctx, const char *fmt, ...) va_start(arglist, fmt); len = vsnprintf(logbuf, sizeof(logbuf), fmt, arglist); log_error = vn_rdwr(UIO_WRITE, sd_logvp, (caddr_t)logbuf, len, sd_log_offset, - UIO_SYSSPACE, IO_UNIT | IO_NOAUTH, vfs_context_ucred(ctx), &resid, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_UNIT | IO_NOAUTH, vfs_context_ucred(ctx), &resid, vfs_context_proc(ctx)); if (log_error == EIO || log_error == 0) { sd_log_offset += (len - resid); } va_end(arglist); - } static int @@ -342,18 +346,18 @@ sd_filt1(proc_t p, void * args) { proc_t self = current_proc(); struct sd_filterargs * sf = (struct sd_filterargs *)args; - int delayterm = sf-> delayterm; + int delayterm = sf->delayterm; int shutdownstate = sf->shutdownstate; - if (((p->p_flag&P_SYSTEM) != 0) || (p->p_ppid == 0) - ||(p == self) || (p->p_stat == SZOMB) - || (p->p_shutdownstate != shutdownstate) - ||((delayterm == 0) && ((p->p_lflag& P_LDELAYTERM) == P_LDELAYTERM)) - || ((p->p_sigcatch & sigmask(SIGTERM))== 0)) { - return(0); - } - else - return(1); + if (((p->p_flag & P_SYSTEM) != 0) || (p->p_ppid == 0) + || (p == self) || (p->p_stat == SZOMB) + || (p->p_shutdownstate != shutdownstate) + || ((delayterm == 0) && ((p->p_lflag & P_LDELAYTERM) == P_LDELAYTERM)) + || ((p->p_sigcatch & sigmask(SIGTERM)) == 0)) { + return 0; + } else { + return 1; + } } @@ -377,8 +381,9 @@ sd_callback1(proc_t p, void * args) } psignal(p, signo); - if (countproc != 0) + if (countproc != 0) { sd->activecount++; + } } else { proc_unlock(p); } @@ -391,17 +396,17 @@ sd_filt2(proc_t p, void * args) { proc_t self = current_proc(); struct sd_filterargs * sf = (struct sd_filterargs *)args; - int delayterm = sf-> delayterm; + int delayterm = sf->delayterm; int shutdownstate = sf->shutdownstate; - if (((p->p_flag&P_SYSTEM) != 0) || (p->p_ppid == 0) - ||(p == self) || (p->p_stat == SZOMB) - || (p->p_shutdownstate == shutdownstate) - ||((delayterm == 0) && ((p->p_lflag& P_LDELAYTERM) == P_LDELAYTERM))) { - return(0); - } - else - return(1); + if (((p->p_flag & P_SYSTEM) != 0) || (p->p_ppid == 0) + || (p == self) || (p->p_stat == SZOMB) + || (p->p_shutdownstate == shutdownstate) + || ((delayterm == 0) && ((p->p_lflag & P_LDELAYTERM) == P_LDELAYTERM))) { + return 0; + } else { + return 1; + } } static int @@ -416,15 +421,16 @@ sd_callback2(proc_t p, void * args) p->p_shutdownstate = setsdstate; if (p->p_stat != SZOMB) { proc_unlock(p); - if (countproc != 0) { + if (countproc != 0) { proc_list_lock(); p->p_listflag |= P_LIST_EXITCOUNT; proc_shutdown_exitcount++; proc_list_unlock(); } psignal(p, signo); - if (countproc != 0) + if (countproc != 0) { sd->activecount++; + } } else { proc_unlock(p); } @@ -443,14 +449,14 @@ sd_callback3(proc_t p, void * args) proc_lock(p); p->p_shutdownstate = setsdstate; if (p->p_stat != SZOMB) { - /* - * NOTE: following code ignores sig_lock and plays - * with exit_thread correctly. This is OK unless we - * are a multiprocessor, in which case I do not - * understand the sig_lock. This needs to be fixed. - * XXX - */ - if (p->exit_thread) { /* someone already doing it */ + /* + * NOTE: following code ignores sig_lock and plays + * with exit_thread correctly. This is OK unless we + * are a multiprocessor, in which case I do not + * understand the sig_lock. This needs to be fixed. + * XXX + */ + if (p->exit_thread) { /* someone already doing it */ proc_unlock(p); /* give him a chance */ thread_block(THREAD_CONTINUE_NULL); @@ -462,7 +468,7 @@ sd_callback3(proc_t p, void * args) proc_unlock(p); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, - p->p_pid, 0, 1, 0, 0); + p->p_pid, 0, 1, 0, 0); sd->activecount++; exit1(p, 1, (int *)NULL); } @@ -529,7 +535,7 @@ sigterm_loop: /* post a SIGTERM to all that catch SIGTERM and not marked for delay */ proc_rebootscan(sd_callback1, (void *)&sdargs, sd_filt1, (void *)&sfargs); - if (sdargs.activecount != 0 && proc_shutdown_exitcount!= 0) { + if (sdargs.activecount != 0 && proc_shutdown_exitcount != 0) { proc_list_lock(); if (proc_shutdown_exitcount != 0) { /* @@ -542,12 +548,14 @@ sigterm_loop: error = msleep(&proc_shutdown_exitcount, proc_list_mlock, PWAIT, "shutdownwait", &ts); if (error != 0) { for (p = allproc.lh_first; p; p = p->p_list.le_next) { - if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) + if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { p->p_listflag &= ~P_LIST_EXITCOUNT; + } } for (p = zombproc.lh_first; p; p = p->p_list.le_next) { - if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) + if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { p->p_listflag &= ~P_LIST_EXITCOUNT; + } } } } @@ -585,26 +593,28 @@ sigterm_loop: error = 0; - if (sdargs.activecount != 0 && proc_shutdown_exitcount!= 0) { + if (sdargs.activecount != 0 && proc_shutdown_exitcount != 0) { proc_list_lock(); if (proc_shutdown_exitcount != 0) { /* - * wait for up to 60 seconds to allow these procs to exit normally - * - * History: The delay interval was changed from 100 to 200 - * for NFS requests in particular. - */ + * wait for up to 60 seconds to allow these procs to exit normally + * + * History: The delay interval was changed from 100 to 200 + * for NFS requests in particular. + */ ts.tv_sec = 10; ts.tv_nsec = 0; error = msleep(&proc_shutdown_exitcount, proc_list_mlock, PWAIT, "shutdownwait", &ts); if (error != 0) { for (p = allproc.lh_first; p; p = p->p_list.le_next) { - if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) + if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { p->p_listflag &= ~P_LIST_EXITCOUNT; + } } for (p = zombproc.lh_first; p; p = p->p_list.le_next) { - if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) + if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT) { p->p_listflag &= ~P_LIST_EXITCOUNT; + } } } } @@ -661,4 +671,3 @@ sigterm_loop: proc_rele(initproc); printf("continuing\n"); } - diff --git a/bsd/kern/kern_sig.c b/bsd/kern/kern_sig.c index c444668bd..254f60066 100644 --- a/bsd/kern/kern_sig.c +++ b/bsd/kern/kern_sig.c @@ -2,7 +2,7 @@ * Copyright (c) 1995-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -71,7 +71,7 @@ * Version 2.0. */ -#define SIGPROP /* include signal properties table */ +#define SIGPROP /* include signal properties table */ #include #include #include @@ -98,10 +98,10 @@ #include #include -#include /* for coredump */ -#include /* for APC support */ +#include /* for coredump */ +#include /* for APC support */ #include -#include /* extern void *get_bsdtask_info(task_t); */ +#include /* extern void *get_bsdtask_info(task_t); */ #include #include #include @@ -127,8 +127,8 @@ * +++ */ extern int thread_enable_fpe(thread_t act, int onoff); -extern thread_t port_name_to_thread(mach_port_name_t port_name); -extern kern_return_t get_signalact(task_t , thread_t *, int); +extern thread_t port_name_to_thread(mach_port_name_t port_name); +extern kern_return_t get_signalact(task_t, thread_t *, int); extern unsigned int get_useraddr(void); extern boolean_t task_did_exec(task_t task); extern boolean_t task_is_exec_copy(task_t task); @@ -137,8 +137,8 @@ extern boolean_t task_is_exec_copy(task_t task); * --- */ -extern void doexception(int exc, mach_exception_code_t code, - mach_exception_subcode_t sub); +extern void doexception(int exc, mach_exception_code_t code, + mach_exception_subcode_t sub); static void stop(proc_t, proc_t); static int cansignal_nomac(proc_t, kauth_cred_t, proc_t, int); @@ -154,18 +154,18 @@ kern_return_t semaphore_timedwait_trap_internal(mach_port_name_t, unsigned int, kern_return_t semaphore_wait_signal_trap_internal(mach_port_name_t, mach_port_name_t, void (*)(kern_return_t)); kern_return_t semaphore_wait_trap_internal(mach_port_name_t, void (*)(kern_return_t)); -static int filt_sigattach(struct knote *kn, struct kevent_internal_s *kev); -static void filt_sigdetach(struct knote *kn); -static int filt_signal(struct knote *kn, long hint); -static int filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev); -static int filt_signalprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); +static int filt_sigattach(struct knote *kn, struct kevent_internal_s *kev); +static void filt_sigdetach(struct knote *kn); +static int filt_signal(struct knote *kn, long hint); +static int filt_signaltouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_signalprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); SECURITY_READ_ONLY_EARLY(struct filterops) sig_filtops = { - .f_attach = filt_sigattach, - .f_detach = filt_sigdetach, - .f_event = filt_signal, - .f_touch = filt_signaltouch, - .f_process = filt_signalprocess, + .f_attach = filt_sigattach, + .f_detach = filt_sigdetach, + .f_event = filt_signal, + .f_touch = filt_signaltouch, + .f_process = filt_signalprocess, }; /* structures and fns for killpg1 iterartion callback and filters */ @@ -205,17 +205,17 @@ static void psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, static void sigaltstack_kern_to_user32(struct kern_sigaltstack *in, struct user32_sigaltstack *out) { - out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp); - out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size); - out->ss_flags = in->ss_flags; + out->ss_sp = CAST_DOWN_EXPLICIT(user32_addr_t, in->ss_sp); + out->ss_size = CAST_DOWN_EXPLICIT(user32_size_t, in->ss_size); + out->ss_flags = in->ss_flags; } static void sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstack *out) { - out->ss_sp = in->ss_sp; - out->ss_size = in->ss_size; - out->ss_flags = in->ss_flags; + out->ss_sp = in->ss_sp; + out->ss_size = in->ss_size; + out->ss_flags = in->ss_flags; } /* @@ -226,23 +226,23 @@ sigaltstack_kern_to_user64(struct kern_sigaltstack *in, struct user64_sigaltstac static void sigaltstack_user32_to_kern(struct user32_sigaltstack *in, struct kern_sigaltstack *out) { - out->ss_flags = in->ss_flags; - out->ss_size = in->ss_size; - out->ss_sp = CAST_USER_ADDR_T(in->ss_sp); + out->ss_flags = in->ss_flags; + out->ss_size = in->ss_size; + out->ss_sp = CAST_USER_ADDR_T(in->ss_sp); } static void sigaltstack_user64_to_kern(struct user64_sigaltstack *in, struct kern_sigaltstack *out) { - out->ss_flags = in->ss_flags; - out->ss_size = in->ss_size; - out->ss_sp = in->ss_sp; + out->ss_flags = in->ss_flags; + out->ss_size = in->ss_size; + out->ss_sp = in->ss_sp; } static void sigaction_kern_to_user32(struct kern_sigaction *in, struct user32_sigaction *out) { /* This assumes 32 bit __sa_handler is of type sig_t */ - out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t,in->__sigaction_u.__sa_handler); + out->__sigaction_u.__sa_handler = CAST_DOWN_EXPLICIT(user32_addr_t, in->__sigaction_u.__sa_handler); out->sa_mask = in->sa_mask; out->sa_flags = in->sa_flags; } @@ -265,7 +265,7 @@ __sigaction_user32_to_kern(struct __user32_sigaction *in, struct __kern_sigactio kern_return_t kr; kr = machine_thread_function_pointers_convert_from_user(current_thread(), - &out->sa_tramp, 1); + &out->sa_tramp, 1); assert(kr == KERN_SUCCESS); } @@ -279,19 +279,18 @@ __sigaction_user64_to_kern(struct __user64_sigaction *in, struct __kern_sigactio kern_return_t kr; kr = machine_thread_function_pointers_convert_from_user(current_thread(), - &out->sa_tramp, 1); + &out->sa_tramp, 1); assert(kr == KERN_SUCCESS); } #if SIGNAL_DEBUG void ram_printf(int); -int ram_debug=0; -unsigned int rdebug_proc=0; +int ram_debug = 0; +unsigned int rdebug_proc = 0; void ram_printf(int x) { - printf("x is %d",x); - + printf("x is %d", x); } #endif /* SIGNAL_DEBUG */ @@ -315,7 +314,7 @@ cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum) return 0; } - /* otherwise, root can always signal */ + /* otherwise, root can always signal */ if (kauth_cred_issuser(uc_src)) { return 1; } @@ -353,9 +352,9 @@ cansignal_nomac(proc_t src, kauth_cred_t uc_src, proc_t dst, int signum) * UID of the target, allow the signal to be sent. */ if (kauth_cred_getruid(uc_src) == kauth_cred_getruid(uc_dst) || - kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) || - kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) || - kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) { + kauth_cred_getruid(uc_src) == kauth_cred_getsvuid(uc_dst) || + kauth_cred_getuid(uc_src) == kauth_cred_getruid(uc_dst) || + kauth_cred_getuid(uc_src) == kauth_cred_getsvuid(uc_dst)) { allowed = 1; } @@ -413,7 +412,7 @@ signal_is_restricted(proc_t p, int signum) { if (sigmask(signum) & sigrestrictmask()) { if (sigrestrict_arg == 0 && - task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) { + task_get_apptype(p->task) == TASK_APPTYPE_APP_DEFAULT) { return ENOTSUP; } else { return EINVAL; @@ -454,36 +453,38 @@ sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval) struct sigacts *ps = p->p_sigacts; int signum; - int bit, error=0; + int bit, error = 0; uint32_t sigreturn_validation = PS_SIGRETURN_VALIDATION_DEFAULT; signum = uap->signum; if (signum <= 0 || signum >= NSIG || - signum == SIGKILL || signum == SIGSTOP) - return (EINVAL); + signum == SIGKILL || signum == SIGSTOP) { + return EINVAL; + } if (uap->nsa) { if (IS_64BIT_PROCESS(p)) { - struct __user64_sigaction __vec64; + struct __user64_sigaction __vec64; error = copyin(uap->nsa, &__vec64, sizeof(__vec64)); __sigaction_user64_to_kern(&__vec64, &__vec); } else { - struct __user32_sigaction __vec32; + struct __user32_sigaction __vec32; error = copyin(uap->nsa, &__vec32, sizeof(__vec32)); __sigaction_user32_to_kern(&__vec32, &__vec); } - if (error) - return (error); + if (error) { + return error; + } sigreturn_validation = (__vec.sa_flags & SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP) ? - PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED; + PS_SIGRETURN_VALIDATION_ENABLED : PS_SIGRETURN_VALIDATION_DISABLED; __vec.sa_flags &= SA_USERSPACE_MASK; /* Only pass on valid sa_flags */ if ((__vec.sa_flags & SA_SIGINFO) || __vec.sa_handler != SIG_DFL) { if ((error = signal_is_restricted(p, signum))) { if (error == ENOTSUP) { printf("%s(%d): denied attempt to register action for signal %d\n", - proc_name_address(p), proc_pid(p), signum); + proc_name_address(p), proc_pid(p), signum); } return error; } @@ -495,83 +496,93 @@ sigaction(proc_t p, struct sigaction_args *uap, __unused int32_t *retval) sa->sa_mask = ps->ps_catchmask[signum]; bit = sigmask(signum); sa->sa_flags = 0; - if ((ps->ps_sigonstack & bit) != 0) + if ((ps->ps_sigonstack & bit) != 0) { sa->sa_flags |= SA_ONSTACK; - if ((ps->ps_sigintr & bit) == 0) + } + if ((ps->ps_sigintr & bit) == 0) { sa->sa_flags |= SA_RESTART; - if (ps->ps_siginfo & bit) + } + if (ps->ps_siginfo & bit) { sa->sa_flags |= SA_SIGINFO; - if (ps->ps_signodefer & bit) + } + if (ps->ps_signodefer & bit) { sa->sa_flags |= SA_NODEFER; - if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) + } + if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDSTOP)) { sa->sa_flags |= SA_NOCLDSTOP; - if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) + } + if ((signum == SIGCHLD) && (p->p_flag & P_NOCLDWAIT)) { sa->sa_flags |= SA_NOCLDWAIT; + } if (IS_64BIT_PROCESS(p)) { - struct user64_sigaction vec64 = {}; + struct user64_sigaction vec64 = {}; sigaction_kern_to_user64(sa, &vec64); error = copyout(&vec64, uap->osa, sizeof(vec64)); } else { - struct user32_sigaction vec32 = {}; + struct user32_sigaction vec32 = {}; sigaction_kern_to_user32(sa, &vec32); error = copyout(&vec32, uap->osa, sizeof(vec32)); } - if (error) - return (error); + if (error) { + return error; + } } if (uap->nsa) { uint32_t old_sigreturn_validation = atomic_load_explicit( - &ps->ps_sigreturn_validation, memory_order_relaxed); + &ps->ps_sigreturn_validation, memory_order_relaxed); if (old_sigreturn_validation == PS_SIGRETURN_VALIDATION_DEFAULT) { atomic_compare_exchange_strong_explicit(&ps->ps_sigreturn_validation, - &old_sigreturn_validation, sigreturn_validation, - memory_order_relaxed, memory_order_relaxed); + &old_sigreturn_validation, sigreturn_validation, + memory_order_relaxed, memory_order_relaxed); } error = setsigvec(p, current_thread(), signum, &__vec, FALSE); } - return (error); + return error; } /* Routines to manipulate bits on all threads */ int -clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart) +clear_procsiglist(proc_t p, int bit, boolean_t in_signalstart) { struct uthread * uth; thread_t thact; proc_lock(p); - if (!in_signalstart) + if (!in_signalstart) { proc_signalstart(p, 1); + } if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { - thact = p->p_vforkact; + thact = p->p_vforkact; uth = (struct uthread *)get_bsdthread_info(thact); if (uth) { uth->uu_siglist &= ~bit; } - if (!in_signalstart) + if (!in_signalstart) { proc_signalend(p, 1); + } proc_unlock(p); - return(0); - } + return 0; + } TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { uth->uu_siglist &= ~bit; } p->p_siglist &= ~bit; - if (!in_signalstart) + if (!in_signalstart) { proc_signalend(p, 1); + } proc_unlock(p); - return(0); + return 0; } static int -unblock_procsigmask(proc_t p, int bit) +unblock_procsigmask(proc_t p, int bit) { struct uthread * uth; thread_t thact; @@ -580,7 +591,7 @@ unblock_procsigmask(proc_t p, int bit) proc_signalstart(p, 1); if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { - thact = p->p_vforkact; + thact = p->p_vforkact; uth = (struct uthread *)get_bsdthread_info(thact); if (uth) { uth->uu_sigmask &= ~bit; @@ -588,8 +599,8 @@ unblock_procsigmask(proc_t p, int bit) p->p_sigmask &= ~bit; proc_signalend(p, 1); proc_unlock(p); - return(0); - } + return 0; + } TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { uth->uu_sigmask &= ~bit; } @@ -597,11 +608,11 @@ unblock_procsigmask(proc_t p, int bit) proc_signalend(p, 1); proc_unlock(p); - return(0); + return 0; } static int -block_procsigmask(proc_t p, int bit) +block_procsigmask(proc_t p, int bit) { struct uthread * uth; thread_t thact; @@ -610,7 +621,7 @@ block_procsigmask(proc_t p, int bit) proc_signalstart(p, 1); if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { - thact = p->p_vforkact; + thact = p->p_vforkact; uth = (struct uthread *)get_bsdthread_info(thact); if (uth) { uth->uu_sigmask |= bit; @@ -618,8 +629,8 @@ block_procsigmask(proc_t p, int bit) p->p_sigmask |= bit; proc_signalend(p, 1); proc_unlock(p); - return(0); - } + return 0; + } TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { uth->uu_sigmask |= bit; } @@ -627,11 +638,11 @@ block_procsigmask(proc_t p, int bit) proc_signalend(p, 1); proc_unlock(p); - return(0); + return 0; } int -set_procsigmask(proc_t p, int bit) +set_procsigmask(proc_t p, int bit) { struct uthread * uth; thread_t thact; @@ -640,7 +651,7 @@ set_procsigmask(proc_t p, int bit) proc_signalstart(p, 1); if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { - thact = p->p_vforkact; + thact = p->p_vforkact; uth = (struct uthread *)get_bsdthread_info(thact); if (uth) { uth->uu_sigmask = bit; @@ -648,8 +659,8 @@ set_procsigmask(proc_t p, int bit) p->p_sigmask = bit; proc_signalend(p, 1); proc_unlock(p); - return(0); - } + return 0; + } TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { uth->uu_sigmask = bit; } @@ -657,7 +668,7 @@ set_procsigmask(proc_t p, int bit) proc_signalend(p, 1); proc_unlock(p); - return(0); + return 0; } /* XXX should be static? */ @@ -680,44 +691,52 @@ setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigactio assert(signum < NSIG); if ((signum == SIGKILL || signum == SIGSTOP) && - sa->sa_handler != SIG_DFL) - return(EINVAL); + sa->sa_handler != SIG_DFL) { + return EINVAL; + } bit = sigmask(signum); /* * Change setting atomically. */ ps->ps_sigact[signum] = sa->sa_handler; ps->ps_trampact[signum] = sa->sa_tramp; - ps->ps_catchmask[signum] = sa->sa_mask &~ sigcantmask; - if (sa->sa_flags & SA_SIGINFO) + ps->ps_catchmask[signum] = sa->sa_mask & ~sigcantmask; + if (sa->sa_flags & SA_SIGINFO) { ps->ps_siginfo |= bit; - else + } else { ps->ps_siginfo &= ~bit; - if ((sa->sa_flags & SA_RESTART) == 0) + } + if ((sa->sa_flags & SA_RESTART) == 0) { ps->ps_sigintr |= bit; - else + } else { ps->ps_sigintr &= ~bit; - if (sa->sa_flags & SA_ONSTACK) + } + if (sa->sa_flags & SA_ONSTACK) { ps->ps_sigonstack |= bit; - else + } else { ps->ps_sigonstack &= ~bit; - if (sa->sa_flags & SA_RESETHAND) + } + if (sa->sa_flags & SA_RESETHAND) { ps->ps_sigreset |= bit; - else + } else { ps->ps_sigreset &= ~bit; - if (sa->sa_flags & SA_NODEFER) + } + if (sa->sa_flags & SA_NODEFER) { ps->ps_signodefer |= bit; - else + } else { ps->ps_signodefer &= ~bit; + } if (signum == SIGCHLD) { - if (sa->sa_flags & SA_NOCLDSTOP) + if (sa->sa_flags & SA_NOCLDSTOP) { OSBitOrAtomic(P_NOCLDSTOP, &p->p_flag); - else + } else { OSBitAndAtomic(~((uint32_t)P_NOCLDSTOP), &p->p_flag); - if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) + } + if ((sa->sa_flags & SA_NOCLDWAIT) || (sa->sa_handler == SIG_IGN)) { OSBitOrAtomic(P_NOCLDWAIT, &p->p_flag); - else + } else { OSBitAndAtomic(~((uint32_t)P_NOCLDWAIT), &p->p_flag); + } } /* @@ -728,19 +747,20 @@ setsigvec(proc_t p, __unused thread_t thread, int signum, struct __kern_sigactio */ if (sa->sa_handler == SIG_IGN || (sigprop[signum] & SA_IGNORE && sa->sa_handler == SIG_DFL)) { - clear_procsiglist(p, bit, in_sigstart); - if (signum != SIGCONT) - p->p_sigignore |= bit; /* easier in psignal */ + if (signum != SIGCONT) { + p->p_sigignore |= bit; /* easier in psignal */ + } p->p_sigcatch &= ~bit; } else { p->p_sigignore &= ~bit; - if (sa->sa_handler == SIG_DFL) + if (sa->sa_handler == SIG_DFL) { p->p_sigcatch &= ~bit; - else + } else { p->p_sigcatch |= bit; + } } - return(0); + return 0; } /* @@ -752,9 +772,11 @@ siginit(proc_t p) { int i; - for (i = 1; i < NSIG; i++) - if (sigprop[i] & SA_IGNORE && i != SIGCONT) + for (i = 1; i < NSIG; i++) { + if (sigprop[i] & SA_IGNORE && i != SIGCONT) { p->p_sigignore |= sigmask(i); + } + } } /* @@ -791,15 +813,16 @@ execsigs(proc_t p, thread_t thread) mask = sigmask(nc); p->p_sigcatch &= ~mask; if (sigprop[nc] & SA_IGNORE) { - if (nc != SIGCONT) + if (nc != SIGCONT) { p->p_sigignore |= mask; + } ut->uu_siglist &= ~mask; } ps->ps_sigact[nc] = SIG_DFL; } atomic_store_explicit(&ps->ps_sigreturn_validation, - PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed); + PS_SIGRETURN_VALIDATION_DEFAULT, memory_order_relaxed); /* Generate random token value used to validate sigreturn arguments */ read_random(&ps->ps_sigreturn_token, sizeof(ps->ps_sigreturn_token)); @@ -838,8 +861,9 @@ sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval) goto out; } error = copyin(uap->mask, &nmask, sizeof(sigset_t)); - if (error) + if (error) { goto out; + } switch (uap->how) { case SIG_BLOCK: @@ -856,15 +880,16 @@ sigprocmask(proc_t p, struct sigprocmask_args *uap, __unused int32_t *retval) set_procsigmask(p, (nmask & ~sigcantmask)); signal_setast(current_thread()); break; - + default: error = EINVAL; break; } out: - if (!error && omask != USER_ADDR_NULL) + if (!error && omask != USER_ADDR_NULL) { copyout(&oldmask, omask, sizeof(sigset_t)); - return (error); + } + return error; } int @@ -876,9 +901,10 @@ sigpending(__unused proc_t p, struct sigpending_args *uap, __unused int32_t *ret ut = (struct uthread *)get_bsdthread_info(current_thread()); pendlist = ut->uu_siglist; - if (uap->osv) + if (uap->osv) { copyout(&pendlist, uap->osv, sizeof(sigset_t)); - return(0); + } + return 0; } /* @@ -898,7 +924,7 @@ int sigsuspend(proc_t p, struct sigsuspend_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval)); + return sigsuspend_nocancel(p, (struct sigsuspend_nocancel_args *)uap, retval); } int @@ -918,16 +944,16 @@ sigsuspend_nocancel(proc_t p, struct sigsuspend_nocancel_args *uap, __unused int ut->uu_oldmask = ut->uu_sigmask; ut->uu_flag |= UT_SAS_OLDMASK; ut->uu_sigmask = (uap->mask & ~sigcantmask); - (void) tsleep0((caddr_t) p, PPAUSE|PCATCH, "pause", 0, sigcontinue); + (void) tsleep0((caddr_t) p, PPAUSE | PCATCH, "pause", 0, sigcontinue); /* always return EINTR rather than ERESTART... */ - return (EINTR); + return EINTR; } int __disable_threadsignal(__unused proc_t p, - __unused struct __disable_threadsignal_args *uap, - __unused int32_t *retval) + __unused struct __disable_threadsignal_args *uap, + __unused int32_t *retval) { struct uthread *uth; @@ -936,28 +962,27 @@ __disable_threadsignal(__unused proc_t p, /* No longer valid to have any signal delivered */ uth->uu_flag |= (UT_NO_SIGMASK | UT_CANCELDISABLE); - return(0); - + return 0; } void __pthread_testcancel(int presyscall) { - thread_t self = current_thread(); struct uthread * uthread; uthread = (struct uthread *)get_bsdthread_info(self); - + uthread->uu_flag &= ~UT_NOTCANCELPT; if ((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { - if(presyscall != 0) { + if (presyscall != 0) { unix_syscall_return(EINTR); /* NOTREACHED */ - } else + } else { thread_abort_safely(self); + } } } @@ -965,7 +990,7 @@ __pthread_testcancel(int presyscall) int __pthread_markcancel(__unused proc_t p, - struct __pthread_markcancel_args *uap, __unused int32_t *retval) + struct __pthread_markcancel_args *uap, __unused int32_t *retval) { thread_act_t target_act; int error = 0; @@ -973,31 +998,33 @@ __pthread_markcancel(__unused proc_t p, target_act = (thread_act_t)port_name_to_thread(uap->thread_port); - if (target_act == THR_ACT_NULL) - return (ESRCH); + if (target_act == THR_ACT_NULL) { + return ESRCH; + } uth = (struct uthread *)get_bsdthread_info(target_act); /* if the thread is in vfork do not cancel */ - if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED )) == 0) { + if ((uth->uu_flag & (UT_VFORK | UT_CANCEL | UT_CANCELED)) == 0) { uth->uu_flag |= (UT_CANCEL | UT_NO_SIGMASK); - if (((uth->uu_flag & UT_NOTCANCELPT) == 0) - && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) - thread_abort_safely(target_act); + if (((uth->uu_flag & UT_NOTCANCELPT) == 0) + && ((uth->uu_flag & UT_CANCELDISABLE) == 0)) { + thread_abort_safely(target_act); + } } thread_deallocate(target_act); - return (error); + return error; } -/* if action =0 ; return the cancellation state , +/* if action =0 ; return the cancellation state , * if marked for cancellation, make the thread canceled * if action = 1 ; Enable the cancel handling * if action = 2; Disable the cancel handling */ int __pthread_canceled(__unused proc_t p, - struct __pthread_canceled_args *uap, __unused int32_t *retval) + struct __pthread_canceled_args *uap, __unused int32_t *retval) { thread_act_t thread; struct uthread *uth; @@ -1007,39 +1034,40 @@ __pthread_canceled(__unused proc_t p, uth = (struct uthread *)get_bsdthread_info(thread); switch (action) { - case 1: - uth->uu_flag &= ~UT_CANCELDISABLE; - return(0); - case 2: - uth->uu_flag |= UT_CANCELDISABLE; - return(0); - case 0: - default: - /* if the thread is in vfork do not cancel */ - if((uth->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { - uth->uu_flag &= ~UT_CANCEL; - uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK); - return(0); - } - return(EINVAL); - } - return(EINVAL); + case 1: + uth->uu_flag &= ~UT_CANCELDISABLE; + return 0; + case 2: + uth->uu_flag |= UT_CANCELDISABLE; + return 0; + case 0: + default: + /* if the thread is in vfork do not cancel */ + if ((uth->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { + uth->uu_flag &= ~UT_CANCEL; + uth->uu_flag |= (UT_CANCELED | UT_NO_SIGMASK); + return 0; + } + return EINVAL; + } + return EINVAL; } __attribute__((noreturn)) void -__posix_sem_syscall_return(kern_return_t kern_result) +__posix_sem_syscall_return(kern_return_t kern_result) { int error = 0; - if (kern_result == KERN_SUCCESS) + if (kern_result == KERN_SUCCESS) { error = 0; - else if (kern_result == KERN_ABORTED) + } else if (kern_result == KERN_ABORTED) { error = EINTR; - else if (kern_result == KERN_OPERATION_TIMED_OUT) + } else if (kern_result == KERN_OPERATION_TIMED_OUT) { error = ETIMEDOUT; - else + } else { error = EINVAL; + } unix_syscall_return(error); /* does not return */ } @@ -1054,26 +1082,24 @@ __posix_sem_syscall_return(kern_return_t kern_result) */ int __old_semwait_signal(proc_t p, struct __old_semwait_signal_args *uap, - int32_t *retval) + int32_t *retval) { __pthread_testcancel(0); - return(__old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval)); + return __old_semwait_signal_nocancel(p, (struct __old_semwait_signal_nocancel_args *)uap, retval); } int __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { - kern_return_t kern_result; int error; mach_timespec_t then; struct timespec now; struct user_timespec ts; boolean_t truncated_timeout = FALSE; - - if(uap->timeout) { - + + if (uap->timeout) { if (IS_64BIT_PROCESS(p)) { struct user64_timespec ts64; error = copyin(uap->ts, &ts64, sizeof(ts64)); @@ -1085,27 +1111,27 @@ __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_arg ts.tv_sec = ts32.tv_sec; ts.tv_nsec = ts32.tv_nsec; } - + if (error) { return error; } - + if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) { ts.tv_sec = 0xFFFFFFFF; ts.tv_nsec = 0; truncated_timeout = TRUE; } - + if (uap->relative) { then.tv_sec = ts.tv_sec; then.tv_nsec = ts.tv_nsec; } else { nanotime(&now); - + /* if time has elapsed, set time to null timepsec to bailout rightaway */ if (now.tv_sec == ts.tv_sec ? - now.tv_nsec > ts.tv_nsec : - now.tv_sec > ts.tv_sec) { + now.tv_nsec > ts.tv_nsec : + now.tv_sec > ts.tv_sec) { then.tv_sec = 0; then.tv_nsec = 0; } else { @@ -1113,35 +1139,35 @@ __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_arg then.tv_nsec = ts.tv_nsec - now.tv_nsec; if (then.tv_nsec < 0) { then.tv_nsec += NSEC_PER_SEC; - then.tv_sec--; + then.tv_sec--; } } } - - if (uap->mutex_sem == 0) + + if (uap->mutex_sem == 0) { kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); - else + } else { kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); - + } } else { - - if (uap->mutex_sem == 0) + if (uap->mutex_sem == 0) { kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return); - else - + } else { kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return); + } + } + + if (kern_result == KERN_SUCCESS && !truncated_timeout) { + return 0; + } else if (kern_result == KERN_SUCCESS && truncated_timeout) { + return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */ + } else if (kern_result == KERN_ABORTED) { + return EINTR; + } else if (kern_result == KERN_OPERATION_TIMED_OUT) { + return ETIMEDOUT; + } else { + return EINVAL; } - - if (kern_result == KERN_SUCCESS && !truncated_timeout) - return(0); - else if (kern_result == KERN_SUCCESS && truncated_timeout) - return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */ - else if (kern_result == KERN_ABORTED) - return(EINTR); - else if (kern_result == KERN_OPERATION_TIMED_OUT) - return(ETIMEDOUT); - else - return(EINVAL); } #endif /* OLD_SEMWAIT_SIGNAL*/ @@ -1154,86 +1180,84 @@ __old_semwait_signal_nocancel(proc_t p, struct __old_semwait_signal_nocancel_arg */ int __semwait_signal(proc_t p, struct __semwait_signal_args *uap, - int32_t *retval) + int32_t *retval) { __pthread_testcancel(0); - return(__semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval)); + return __semwait_signal_nocancel(p, (struct __semwait_signal_nocancel_args *)uap, retval); } int __semwait_signal_nocancel(__unused proc_t p, struct __semwait_signal_nocancel_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { - kern_return_t kern_result; mach_timespec_t then; struct timespec now; struct user_timespec ts; - boolean_t truncated_timeout = FALSE; - - if(uap->timeout) { - + boolean_t truncated_timeout = FALSE; + + if (uap->timeout) { ts.tv_sec = uap->tv_sec; - ts.tv_nsec = uap->tv_nsec; - - if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) { - ts.tv_sec = 0xFFFFFFFF; - ts.tv_nsec = 0; - truncated_timeout = TRUE; - } - + ts.tv_nsec = uap->tv_nsec; + + if ((ts.tv_sec & 0xFFFFFFFF00000000ULL) != 0) { + ts.tv_sec = 0xFFFFFFFF; + ts.tv_nsec = 0; + truncated_timeout = TRUE; + } + if (uap->relative) { then.tv_sec = ts.tv_sec; then.tv_nsec = ts.tv_nsec; } else { nanotime(&now); - /* if time has elapsed, set time to null timepsec to bailout rightaway */ - if (now.tv_sec == ts.tv_sec ? - now.tv_nsec > ts.tv_nsec : - now.tv_sec > ts.tv_sec) { - then.tv_sec = 0; - then.tv_nsec = 0; - } else { - then.tv_sec = ts.tv_sec - now.tv_sec; - then.tv_nsec = ts.tv_nsec - now.tv_nsec; - if (then.tv_nsec < 0) { - then.tv_nsec += NSEC_PER_SEC; - then.tv_sec--; - } - } - } - - if (uap->mutex_sem == 0) + /* if time has elapsed, set time to null timepsec to bailout rightaway */ + if (now.tv_sec == ts.tv_sec ? + now.tv_nsec > ts.tv_nsec : + now.tv_sec > ts.tv_sec) { + then.tv_sec = 0; + then.tv_nsec = 0; + } else { + then.tv_sec = ts.tv_sec - now.tv_sec; + then.tv_nsec = ts.tv_nsec - now.tv_nsec; + if (then.tv_nsec < 0) { + then.tv_nsec += NSEC_PER_SEC; + then.tv_sec--; + } + } + } + + if (uap->mutex_sem == 0) { kern_result = semaphore_timedwait_trap_internal((mach_port_name_t)uap->cond_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); - else + } else { kern_result = semaphore_timedwait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, then.tv_sec, then.tv_nsec, __posix_sem_syscall_return); - + } } else { - - if (uap->mutex_sem == 0) + if (uap->mutex_sem == 0) { kern_result = semaphore_wait_trap_internal(uap->cond_sem, __posix_sem_syscall_return); - else - + } else { kern_result = semaphore_wait_signal_trap_internal(uap->cond_sem, uap->mutex_sem, __posix_sem_syscall_return); + } + } + + if (kern_result == KERN_SUCCESS && !truncated_timeout) { + return 0; + } else if (kern_result == KERN_SUCCESS && truncated_timeout) { + return EINTR; /* simulate an exceptional condition because Mach doesn't support a longer timeout */ + } else if (kern_result == KERN_ABORTED) { + return EINTR; + } else if (kern_result == KERN_OPERATION_TIMED_OUT) { + return ETIMEDOUT; + } else { + return EINVAL; } - - if (kern_result == KERN_SUCCESS && !truncated_timeout) - return(0); - else if (kern_result == KERN_SUCCESS && truncated_timeout) - return(EINTR); /* simulate an exceptional condition because Mach doesn't support a longer timeout */ - else if (kern_result == KERN_ABORTED) - return(EINTR); - else if (kern_result == KERN_OPERATION_TIMED_OUT) - return(ETIMEDOUT); - else - return(EINVAL); } -int +int __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { thread_t target_act; int error = 0; @@ -1242,8 +1266,9 @@ __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap, target_act = (thread_t)port_name_to_thread(uap->thread_port); - if (target_act == THREAD_NULL) - return (ESRCH); + if (target_act == THREAD_NULL) { + return ESRCH; + } if ((u_int)signum >= NSIG) { error = EINVAL; goto out; @@ -1256,17 +1281,18 @@ __pthread_kill(__unused proc_t p, struct __pthread_kill_args *uap, goto out; } - if (signum) + if (signum) { psignal_uthread(target_act, signum); + } out: thread_deallocate(target_act); - return (error); + return error; } -int +int __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { user_addr_t set = uap->set; user_addr_t oset = uap->oset; @@ -1284,8 +1310,9 @@ __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap, } error = copyin(set, &nset, sizeof(sigset_t)); - if (error) + if (error) { goto out; + } switch (uap->how) { case SIG_BLOCK: @@ -1301,16 +1328,16 @@ __pthread_sigmask(__unused proc_t p, struct __pthread_sigmask_args *uap, ut->uu_sigmask = (nset & ~sigcantmask); signal_setast(current_thread()); break; - + default: error = EINVAL; - } out: - if (!error && oset != USER_ADDR_NULL) + if (!error && oset != USER_ADDR_NULL) { copyout(&oldset, oset, sizeof(sigset_t)); + } - return(error); + return error; } /* @@ -1319,14 +1346,14 @@ out: * copyin:EFAULT * copyout:EFAULT */ -int +int __sigwait(proc_t p, struct __sigwait_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(__sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval)); + return __sigwait_nocancel(p, (struct __sigwait_nocancel_args *)uap, retval); } -int +int __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32_t *retval) { struct uthread *ut; @@ -1334,31 +1361,34 @@ __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32 int error = 0; sigset_t mask; sigset_t siglist; - sigset_t sigw=0; + sigset_t sigw = 0; int signum; ut = (struct uthread *)get_bsdthread_info(current_thread()); - if (uap->set == USER_ADDR_NULL) - return(EINVAL); + if (uap->set == USER_ADDR_NULL) { + return EINVAL; + } error = copyin(uap->set, &mask, sizeof(sigset_t)); - if (error) - return(error); + if (error) { + return error; + } siglist = (mask & ~sigcantmask); - if (siglist == 0) - return(EINVAL); + if (siglist == 0) { + return EINVAL; + } proc_lock(p); if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { proc_unlock(p); - return(EINVAL); + return EINVAL; } else { proc_signalstart(p, 1); TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { - if ( (sigw = uth->uu_siglist & siglist) ) { + if ((sigw = uth->uu_siglist & siglist)) { break; } } @@ -1376,22 +1406,23 @@ __sigwait_nocancel(proc_t p, struct __sigwait_nocancel_args *uap, __unused int32 * save it here and mark the sigacts structure * to indicate this. */ - uth = ut; /* wait for it to be delivered to us */ + uth = ut; /* wait for it to be delivered to us */ ut->uu_oldmask = ut->uu_sigmask; ut->uu_flag |= UT_SAS_OLDMASK; if (siglist == (sigset_t)0) { proc_unlock(p); - return(EINVAL); + return EINVAL; } /* SIGKILL and SIGSTOP are not maskable as well */ - ut->uu_sigmask = ~(siglist|sigcantmask); - ut->uu_sigwait = siglist; + ut->uu_sigmask = ~(siglist | sigcantmask); + ut->uu_sigwait = siglist; /* No Continuations for now */ - error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE|PCATCH, "pause", 0); + error = msleep((caddr_t)&ut->uu_sigwait, &p->p_mlock, PPAUSE | PCATCH, "pause", 0); - if (error == ERESTART) + if (error == ERESTART) { error = 0; + } sigw = (ut->uu_sigwait & siglist); ut->uu_sigmask = ut->uu_oldmask; @@ -1401,8 +1432,9 @@ sigwait1: ut->uu_sigwait = 0; if (!error) { signum = ffs((unsigned int)sigw); - if (!signum) + if (!signum) { panic("sigwait with no signal wakeup"); + } /* Clear the pending signal in the thread it was delivered */ uth->uu_siglist &= ~(sigmask(signum)); @@ -1411,13 +1443,14 @@ sigwait1: #endif proc_unlock(p); - if (uap->sig != USER_ADDR_NULL) - error = copyout(&signum, uap->sig, sizeof(int)); - } else + if (uap->sig != USER_ADDR_NULL) { + error = copyout(&signum, uap->sig, sizeof(int)); + } + } else { proc_unlock(p); + } - return(error); - + return error; } int @@ -1432,24 +1465,27 @@ sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *r uth = (struct uthread *)get_bsdthread_info(current_thread()); pstk = &uth->uu_sigstk; - if ((uth->uu_flag & UT_ALTSTACK) == 0) + if ((uth->uu_flag & UT_ALTSTACK) == 0) { uth->uu_sigstk.ss_flags |= SA_DISABLE; + } onstack = pstk->ss_flags & SA_ONSTACK; if (uap->oss) { if (IS_64BIT_PROCESS(p)) { struct user64_sigaltstack ss64 = {}; - sigaltstack_kern_to_user64(pstk, &ss64); + sigaltstack_kern_to_user64(pstk, &ss64); error = copyout(&ss64, uap->oss, sizeof(ss64)); } else { struct user32_sigaltstack ss32 = {}; - sigaltstack_kern_to_user32(pstk, &ss32); + sigaltstack_kern_to_user32(pstk, &ss32); error = copyout(&ss32, uap->oss, sizeof(ss32)); } - if (error) - return (error); + if (error) { + return error; + } + } + if (uap->nss == USER_ADDR_NULL) { + return 0; } - if (uap->nss == USER_ADDR_NULL) - return (0); if (IS_64BIT_PROCESS(p)) { struct user64_sigaltstack ss64; error = copyin(uap->nss, &ss64, sizeof(ss64)); @@ -1459,29 +1495,33 @@ sigaltstack(__unused proc_t p, struct sigaltstack_args *uap, __unused int32_t *r error = copyin(uap->nss, &ss32, sizeof(ss32)); sigaltstack_user32_to_kern(&ss32, &ss); } - if (error) - return (error); - if ((ss.ss_flags & ~SA_DISABLE) != 0) { - return(EINVAL); + if (error) { + return error; + } + if ((ss.ss_flags & ~SA_DISABLE) != 0) { + return EINVAL; } if (ss.ss_flags & SA_DISABLE) { /* if we are here we are not in the signal handler ;so no need to check */ - if (uth->uu_sigstk.ss_flags & SA_ONSTACK) - return (EINVAL); + if (uth->uu_sigstk.ss_flags & SA_ONSTACK) { + return EINVAL; + } uth->uu_flag &= ~UT_ALTSTACK; uth->uu_sigstk.ss_flags = ss.ss_flags; - return (0); + return 0; + } + if (onstack) { + return EPERM; } - if (onstack) - return (EPERM); /* The older stacksize was 8K, enforce that one so no compat problems */ #define OLDMINSIGSTKSZ 8*1024 - if (ss.ss_size < OLDMINSIGSTKSZ) - return (ENOMEM); + if (ss.ss_size < OLDMINSIGSTKSZ) { + return ENOMEM; + } uth->uu_flag |= UT_ALTSTACK; - uth->uu_sigstk= ss; - return (0); + uth->uu_sigstk = ss; + return 0; } int @@ -1489,13 +1529,14 @@ kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval) { proc_t p; kauth_cred_t uc = kauth_cred_get(); - int posix = uap->posix; /* !0 if posix behaviour desired */ + int posix = uap->posix; /* !0 if posix behaviour desired */ AUDIT_ARG(pid, uap->pid); AUDIT_ARG(signum, uap->signum); - if ((u_int)uap->signum >= NSIG) - return (EINVAL); + if ((u_int)uap->signum >= NSIG) { + return EINVAL; + } if (uap->pid > 0) { /* kill single process */ if ((p = proc_find(uap->pid)) == NULL) { @@ -1504,34 +1545,35 @@ kill(proc_t cp, struct kill_args *uap, __unused int32_t *retval) * POSIX 1003.1-2001 requires returning success when killing a * zombie; see Rationale for kill(2). */ - return (0); + return 0; } - return (ESRCH); + return ESRCH; } AUDIT_ARG(process, p); if (!cansignal(cp, uc, p, uap->signum)) { proc_rele(p); - return(EPERM); + return EPERM; } - if (uap->signum) + if (uap->signum) { psignal(p, uap->signum); + } proc_rele(p); - return (0); + return 0; } switch (uap->pid) { case -1: /* broadcast signal */ - return (killpg1(cp, uap->signum, 0, 1, posix)); + return killpg1(cp, uap->signum, 0, 1, posix); case 0: /* signal own process group */ - return (killpg1(cp, uap->signum, 0, 0, posix)); + return killpg1(cp, uap->signum, 0, 0, posix); default: /* negative explicit process group */ - return (killpg1(cp, uap->signum, -(uap->pid), 0, posix)); + return killpg1(cp, uap->signum, -(uap->pid), 0, posix); } /* NOTREACHED */ } os_reason_t build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size, - user_addr_t reason_string, uint64_t reason_flags) + user_addr_t reason_string, uint64_t reason_flags) { os_reason_t exit_reason = OS_REASON_NULL; @@ -1555,7 +1597,7 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use exit_reason->osr_flags |= (reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER); if ((reason_flags & OS_REASON_FLAG_MASK_ALLOWED_FROM_USER) != reason_flags) { printf("build_userspace_exit_reason: illegal flags passed from userspace (some masked off) 0x%llx, ns: %u, code 0x%llx\n", - reason_flags, reason_namespace, reason_code); + reason_flags, reason_namespace, reason_code); } if (!(exit_reason->osr_flags & OS_REASON_FLAG_NO_CRASH_REPORT)) { @@ -1565,7 +1607,7 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use if (payload != USER_ADDR_NULL) { if (payload_size == 0) { printf("build_userspace_exit_reason: exit reason with namespace %u, nonzero payload but zero length\n", - reason_namespace); + reason_namespace); exit_reason->osr_flags |= OS_REASON_FLAG_BAD_PARAMS; payload = USER_ADDR_NULL; } else { @@ -1585,7 +1627,7 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use if (reason_user_desc != NULL) { error = copyinstr(reason_string, (void *) reason_user_desc, - EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len); + EXIT_REASON_USER_DESC_MAX_LEN, &reason_user_desc_len); if (error == 0) { num_items_to_copy++; @@ -1617,12 +1659,11 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use if (reason_user_desc != NULL && reason_user_desc_len != 0) { if (KERN_SUCCESS == kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor, - EXIT_REASON_USER_DESC, - reason_user_desc_len, - &data_addr)) { - + EXIT_REASON_USER_DESC, + reason_user_desc_len, + &data_addr)) { kcdata_memcpy(&exit_reason->osr_kcd_descriptor, (mach_vm_address_t) data_addr, - reason_user_desc, reason_user_desc_len); + reason_user_desc, reason_user_desc_len); } else { printf("build_userspace_exit_reason: failed to allocate space for reason string\n"); goto out_failed_copyin; @@ -1631,10 +1672,10 @@ build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, use if (payload != USER_ADDR_NULL) { if (KERN_SUCCESS == - kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor, - EXIT_REASON_USER_PAYLOAD, - payload_size, - &data_addr)) { + kcdata_get_memory_addr(&exit_reason->osr_kcd_descriptor, + EXIT_REASON_USER_PAYLOAD, + payload_size, + &data_addr)) { error = copyin(payload, (void *) data_addr, payload_size); if (error) { printf("build_userspace_exit_reason: failed to copy in payload data with error %d\n", error); @@ -1670,8 +1711,8 @@ out_failed_copyin: static int terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t reason_namespace, - uint64_t reason_code, user_addr_t payload, uint32_t payload_size, - user_addr_t reason_string, uint64_t reason_flags) + uint64_t reason_code, user_addr_t payload, uint32_t payload_size, + user_addr_t reason_string, uint64_t reason_flags) { proc_t target_proc = PROC_NULL; kauth_cred_t cur_cred = kauth_cred_get(); @@ -1705,11 +1746,11 @@ terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t } KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - target_proc->p_pid, reason_namespace, - reason_code, 0, 0); + target_proc->p_pid, reason_namespace, + reason_code, 0, 0); signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size, - reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID)); + reason_string, (reason_flags | OS_REASON_FLAG_NO_CRASHED_TID)); if (target_pid == cur_proc->p_pid) { /* @@ -1729,10 +1770,10 @@ terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t int terminate_with_payload(struct proc *cur_proc, struct terminate_with_payload_args *args, - __unused int32_t *retval) + __unused int32_t *retval) { return terminate_with_payload_internal(cur_proc, args->pid, args->reason_namespace, args->reason_code, args->payload, - args->payload_size, args->reason_string, args->reason_flags); + args->payload_size, args->reason_string, args->reason_flags); } static int @@ -1744,15 +1785,15 @@ killpg1_allfilt(proc_t p, void * arg) * Don't signal initproc, a system process, or the current process if POSIX * isn't specified. */ - return (p->p_pid > 1 && !(p->p_flag & P_SYSTEM) && - (kfargp->posix ? true : p != kfargp->curproc)); + return p->p_pid > 1 && !(p->p_flag & P_SYSTEM) && + (kfargp->posix ? true : p != kfargp->curproc); } static int killpg1_pgrpfilt(proc_t p, __unused void * arg) { /* XXX shouldn't this allow signalling zombies? */ - return (p->p_pid > 1 && !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB); + return p->p_pid > 1 && !(p->p_flag & P_SYSTEM) && p->p_stat != SZOMB; } static int @@ -1808,14 +1849,14 @@ killpg1(proc_t curproc, int signum, int pgid, int all, int posix) .posix = posix, .curproc = curproc }; proc_iterate(PROC_ALLPROCLIST | PROC_ZOMBPROCLIST, killpg1_callback, - &karg, killpg1_allfilt, &kfarg); + &karg, killpg1_allfilt, &kfarg); } else { if (pgid == 0) { /* * Send to current the current process' process group. */ pgrp = proc_pgrp(curproc); - } else { + } else { pgrp = pgfind(pgid); if (pgrp == NULL) { error = ESRCH; @@ -1825,12 +1866,12 @@ killpg1(proc_t curproc, int signum, int pgid, int all, int posix) /* PGRP_DROPREF drops the pgrp refernce */ pgrp_iterate(pgrp, PGRP_DROPREF, killpg1_callback, &karg, - killpg1_pgrpfilt, NULL); + killpg1_pgrpfilt, NULL); } error = (karg.nfound > 0 ? 0 : (posix ? EPERM : ESRCH)); out: kauth_cred_unref(&uc); - return (error); + return error; } /* @@ -1857,20 +1898,21 @@ pgsignal_filt(proc_t p, void * arg) { int checkctty = *(int*)arg; - if ((checkctty == 0) || p->p_flag & P_CONTROLT) - return(1); - else - return(0); + if ((checkctty == 0) || p->p_flag & P_CONTROLT) { + return 1; + } else { + return 0; + } } static int pgsignal_callback(proc_t p, void * arg) { - int signum = *(int*)arg; + int signum = *(int*)arg; psignal(p, signum); - return(PROC_RETURNED); + return PROC_RETURNED; } @@ -1905,18 +1947,21 @@ threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boo proc_t p; int mask; - if ((u_int)signum >= NSIG || signum == 0) + if ((u_int)signum >= NSIG || signum == 0) { return; + } mask = sigmask(signum); - if ((mask & threadmask) == 0) + if ((mask & threadmask) == 0) { return; + } sig_task = get_threadtask(sig_actthread); p = (proc_t)(get_bsdtask_info(sig_task)); uth = get_bsdthread_info(sig_actthread); - if (uth->uu_flag & UT_VFORK) + if (uth->uu_flag & UT_VFORK) { p = uth->uu_proc; + } proc_lock(p); if (!(p->p_lflag & P_LTRACED) && (p->p_sigignore & mask)) { @@ -1929,12 +1974,11 @@ threadsignal(thread_t sig_actthread, int signum, mach_exception_code_t code, boo /* Attempt to establish whether the signal will be fatal (mirrors logic in psignal_internal()) */ if (set_exitreason && ((p->p_lflag & P_LTRACED) || (!(uth->uu_sigwait & mask) - && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) && - !(mask & stopsigmask) && !(mask & contsigmask)) { - + && !(uth->uu_sigmask & mask) && !(p->p_sigcatch & mask))) && + !(mask & stopsigmask) && !(mask & contsigmask)) { if (uth->uu_exit_reason == OS_REASON_NULL) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_SIGNAL, signum, 0, 0); + p->p_pid, OS_REASON_SIGNAL, signum, 0, 0); os_reason_t signal_reason = build_signal_reason(signum, "exc handler"); @@ -1960,8 +2004,9 @@ set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked) os_reason_t exit_reason = (os_reason_t)reason; - if (exit_reason == OS_REASON_NULL) + if (exit_reason == OS_REASON_NULL) { return; + } if (!proc_locked) { targ_task = get_threadtask(th); @@ -2007,29 +2052,30 @@ get_signalthread(proc_t p, int signum, thread_t * thr) *thr = THREAD_NULL; if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { - sig_thread = p->p_vforkact; + sig_thread = p->p_vforkact; kret = check_actforsig(sig_task, sig_thread, 1); - if (kret == KERN_SUCCESS) { + if (kret == KERN_SUCCESS) { *thr = sig_thread; - return(KERN_SUCCESS); - }else - return(KERN_FAILURE); + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } } TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { - if(((uth->uu_flag & UT_NO_SIGMASK)== 0) && - (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) { + if (((uth->uu_flag & UT_NO_SIGMASK) == 0) && + (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask))) { if (check_actforsig(p->task, uth->uu_context.vc_thread, 1) == KERN_SUCCESS) { *thr = uth->uu_context.vc_thread; - return(KERN_SUCCESS); + return KERN_SUCCESS; } } } if (get_signalact(p->task, thr, 1) == KERN_SUCCESS) { - return(KERN_SUCCESS); + return KERN_SUCCESS; } - return(KERN_FAILURE); + return KERN_FAILURE; } static os_reason_t @@ -2049,7 +2095,7 @@ build_signal_reason(int signum, const char *procname) } reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) + - sizeof(sender_proc->p_pid)); + sizeof(sender_proc->p_pid)); ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate); if (ret != 0) { @@ -2058,29 +2104,29 @@ build_signal_reason(int signum, const char *procname) } if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PID, - sizeof(sender_proc->p_pid), &data_addr)) { + sizeof(sender_proc->p_pid), &data_addr)) { kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_pid, - sizeof(sender_proc->p_pid)); + sizeof(sender_proc->p_pid)); } else { printf("build_signal_reason: exceeded space in signal reason buf, unable to log PID\n"); } proc_name_length = sizeof(sender_proc->p_name); if (KERN_SUCCESS == kcdata_get_memory_addr(&signal_reason->osr_kcd_descriptor, KCDATA_TYPE_PROCNAME, - proc_name_length, &data_addr)) { + proc_name_length, &data_addr)) { if (procname) { char truncated_procname[proc_name_length]; strncpy((char *) &truncated_procname, procname, proc_name_length); truncated_procname[proc_name_length - 1] = '\0'; kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, truncated_procname, - strlen((char *) &truncated_procname)); + strlen((char *) &truncated_procname)); } else if (*sender_proc->p_name) { kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &sender_proc->p_name, - sizeof(sender_proc->p_name)); + sizeof(sender_proc->p_name)); } else { kcdata_memcpy(&signal_reason->osr_kcd_descriptor, data_addr, &default_sender_procname, - strlen(default_sender_procname) + 1); + strlen(default_sender_procname) + 1); } } else { printf("build_signal_reason: exceeded space in signal reason buf, unable to log procname\n"); @@ -2110,41 +2156,42 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, { int prop; user_addr_t action = USER_ADDR_NULL; - proc_t sig_proc; - thread_t sig_thread; - task_t sig_task; - int mask; - struct uthread *uth; - kern_return_t kret; - uid_t r_uid; - proc_t pp; - kauth_cred_t my_cred; - char *launchd_exit_reason_desc = NULL; - boolean_t update_thread_policy = FALSE; - - if ((u_int)signum >= NSIG || signum == 0) + proc_t sig_proc; + thread_t sig_thread; + task_t sig_task; + int mask; + struct uthread *uth; + kern_return_t kret; + uid_t r_uid; + proc_t pp; + kauth_cred_t my_cred; + char *launchd_exit_reason_desc = NULL; + boolean_t update_thread_policy = FALSE; + + if ((u_int)signum >= NSIG || signum == 0) { panic("psignal: bad signal number %d", signum); + } mask = sigmask(signum); prop = sigprop[signum]; #if SIGNAL_DEBUG - if(rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) { - ram_printf(3); - } + if (rdebug_proc && (p != PROC_NULL) && (p == rdebug_proc)) { + ram_printf(3); + } #endif /* SIGNAL_DEBUG */ /* catch unexpected initproc kills early for easier debuggging */ if (signum == SIGKILL && p == initproc) { if (signal_reason == NULL) { panic_plain("unexpected SIGKILL of %s %s (no reason provided)", - (p->p_name[0] != '\0' ? p->p_name : "initproc"), - ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : "")); + (p->p_name[0] != '\0' ? p->p_name : "initproc"), + ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : "")); } else { launchd_exit_reason_desc = launchd_exit_reason_get_string_desc(signal_reason); panic_plain("unexpected SIGKILL of %s %s with reason -- namespace %d code 0x%llx description %." LAUNCHD_PANIC_REASON_STRING_MAXLEN "s", - (p->p_name[0] != '\0' ? p->p_name : "initproc"), - ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""), + (p->p_name[0] != '\0' ? p->p_name : "initproc"), + ((p->p_csflags & CS_KILLED) ? "(CS_KILLED)" : ""), signal_reason->osr_namespace, signal_reason->osr_code, launchd_exit_reason_desc ? launchd_exit_reason_desc : "none"); } @@ -2192,12 +2239,13 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, return; } - if( (flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) { + if ((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) { proc_knote(sig_proc, NOTE_SIGNAL | signum); } - if ((flavor & PSIG_LOCKED)== 0) + if ((flavor & PSIG_LOCKED) == 0) { proc_signalstart(sig_proc, 0); + } /* Don't send signals to a process that has ignored them. */ if (((flavor & PSIG_VFORK) == 0) && ((sig_proc->p_lflag & P_LTRACED) == 0) && (sig_proc->p_sigignore & mask)) { @@ -2222,8 +2270,8 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, } else if (flavor & PSIG_TRY_THREAD) { uth = get_bsdthread_info(sig_thread); if (((uth->uu_flag & UT_NO_SIGMASK) == 0) && - (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) && - ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) { + (((uth->uu_sigmask & mask) == 0) || (uth->uu_sigwait & mask)) && + ((kret = check_actforsig(sig_proc->task, sig_thread, 1)) == KERN_SUCCESS)) { /* deliver to specified thread */ } else { /* deliver to any willing thread */ @@ -2250,9 +2298,9 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, */ if ((flavor & PSIG_VFORK) == 0) { - if (sig_proc->p_lflag & P_LTRACED) + if (sig_proc->p_lflag & P_LTRACED) { action = SIG_DFL; - else { + } else { /* * If the signal is being ignored, * then we forget about it immediately. @@ -2260,27 +2308,31 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, * and if it is set to SIG_IGN, * action will be SIG_DFL here.) */ - if (sig_proc->p_sigignore & mask) + if (sig_proc->p_sigignore & mask) { goto sigout_locked; + } - if (uth->uu_sigwait & mask) + if (uth->uu_sigwait & mask) { action = KERN_SIG_WAIT; - else if (uth->uu_sigmask & mask) + } else if (uth->uu_sigmask & mask) { action = KERN_SIG_HOLD; - else if (sig_proc->p_sigcatch & mask) + } else if (sig_proc->p_sigcatch & mask) { action = KERN_SIG_CATCH; - else + } else { action = SIG_DFL; + } } } /* TODO: p_nice isn't hooked up to the scheduler... */ if (sig_proc->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && - (sig_proc->p_lflag & P_LTRACED) == 0) - sig_proc->p_nice = NZERO; + (sig_proc->p_lflag & P_LTRACED) == 0) { + sig_proc->p_nice = NZERO; + } - if (prop & SA_CONT) + if (prop & SA_CONT) { uth->uu_siglist &= ~stopsigmask; + } if (prop & SA_STOP) { struct pgrp *pg; @@ -2292,7 +2344,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, */ pg = proc_pgrp(sig_proc); if (prop & SA_TTYSTOP && pg->pg_jobc == 0 && - action == SIG_DFL) { + action == SIG_DFL) { pg_rele(pg); goto sigout_locked; } @@ -2307,8 +2359,9 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, * except that stopped processes must be continued by SIGCONT. */ /* vfork will not go thru as action is SIG_DFL */ - if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) + if ((action == KERN_SIG_HOLD) && ((prop & SA_CONT) == 0 || sig_proc->p_stat != SSTOP)) { goto sigout_locked; + } /* * SIGKILL priority twiddling moved here from above because @@ -2328,14 +2381,16 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, * issig() and stop for the parent. */ if (sig_proc->p_lflag & P_LTRACED) { - if (sig_proc->p_stat != SSTOP) + if (sig_proc->p_stat != SSTOP) { goto runlocked; - else + } else { goto sigout_locked; + } } - if ((flavor & PSIG_VFORK) != 0) + if ((flavor & PSIG_VFORK) != 0) { goto runlocked; + } if (action == KERN_SIG_WAIT) { #if CONFIG_DTRACE @@ -2374,7 +2429,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, OSBitOrAtomic(P_CONTINUED, &sig_proc->p_flag); (void) task_resume_internal(sig_task); sig_proc->p_stat = SRUN; - } else if (sig_proc->p_stat == SSTOP) { + } else if (sig_proc->p_stat == SSTOP) { goto sigout_locked; } /* @@ -2433,8 +2488,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, pp = proc_parentholdref(sig_proc); stop(sig_proc, pp); - if (( pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) { - + if ((pp != PROC_NULL) && ((pp->p_flag & P_NOCLDSTOP) == 0)) { my_cred = kauth_cred_proc_ref(sig_proc); r_uid = kauth_cred_getruid(my_cred); kauth_cred_unref(&my_cred); @@ -2446,10 +2500,11 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, * when sent to the parent must set the * child's signal number into si_status. */ - if (signum != SIGSTOP) + if (signum != SIGSTOP) { pp->si_status = WEXITSTATUS(sig_proc->p_xstat); - else + } else { pp->si_status = W_EXITCODE(signum, signum); + } pp->si_code = CLD_STOPPED; pp->si_uid = r_uid; proc_unlock(sig_proc); @@ -2469,11 +2524,11 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, DTRACE_PROC3(signal__send, thread_t, sig_thread, proc_t, p, int, signum); switch (signum) { - /* - * Signals ignored by default have been dealt - * with already, since their bits are on in - * p_sigignore. - */ + /* + * Signals ignored by default have been dealt + * with already, since their bits are on in + * p_sigignore. + */ case SIGKILL: /* @@ -2498,7 +2553,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, if (uth->uu_exit_reason == OS_REASON_NULL) { if (signal_reason == OS_REASON_NULL) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - sig_proc->p_pid, OS_REASON_SIGNAL, signum, 0, 0); + sig_proc->p_pid, OS_REASON_SIGNAL, signum, 0, 0); signal_reason = build_signal_reason(signum, NULL); } @@ -2531,7 +2586,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, * cause their handlers to fire. If it's only * the SIGCONT, then don't wake up. */ - if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) { + if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (((uth->uu_siglist & ~uth->uu_sigmask) & ~sig_proc->p_sigignore) & ~mask)) { uth->uu_siglist &= ~mask; sig_proc->p_stat = SRUN; goto runlocked; @@ -2547,7 +2602,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, * the process, and for which there is no handler, * needs to act like SIGKILL */ - if (((flavor & (PSIG_VFORK|PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) { + if (((flavor & (PSIG_VFORK | PSIG_THREAD)) == 0) && (action == SIG_DFL) && (prop & SA_KILL)) { sig_proc->p_stat = SRUN; kret = thread_abort(sig_thread); update_thread_policy = (kret == KERN_SUCCESS); @@ -2555,7 +2610,7 @@ psignal_internal(proc_t p, task_t task, thread_t thread, int flavor, int signum, if (uth->uu_exit_reason == OS_REASON_NULL) { if (signal_reason == OS_REASON_NULL) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - sig_proc->p_pid, OS_REASON_SIGNAL, signum, 0, 0); + sig_proc->p_pid, OS_REASON_SIGNAL, signum, 0, 0); signal_reason = build_signal_reason(signum, NULL); } @@ -2585,20 +2640,22 @@ runlocked: * while we were stopped), check for a signal from the debugger. */ if (sig_proc->p_stat == SSTOP) { - if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) + if ((sig_proc->p_lflag & P_LTRACED) != 0 && sig_proc->p_xstat != 0) { uth->uu_siglist |= sigmask(sig_proc->p_xstat); + } if ((flavor & PSIG_VFORK) != 0) { sig_proc->p_stat = SRUN; } } else { /* - * setrunnable(p) in BSD and - * Wake up the thread if it is interruptible. - */ + * setrunnable(p) in BSD and + * Wake up the thread if it is interruptible. + */ sig_proc->p_stat = SRUN; - if ((flavor & PSIG_VFORK) == 0) + if ((flavor & PSIG_VFORK) == 0) { thread_abort_safely(sig_thread); + } } sigout_locked: @@ -2615,7 +2672,7 @@ sigout_locked: sigout_unlocked: os_reason_free(signal_reason); - if ((flavor & PSIG_LOCKED)== 0) { + if ((flavor & PSIG_LOCKED) == 0) { proc_signalend(sig_proc, 0); } } @@ -2702,7 +2759,7 @@ issignal_locked(proc_t p) cur_act = current_thread(); #if SIGNAL_DEBUG - if(rdebug_proc && (p == rdebug_proc)) { + if (rdebug_proc && (p == rdebug_proc)) { ram_printf(3); } #endif /* SIGNAL_DEBUG */ @@ -2720,9 +2777,10 @@ issignal_locked(proc_t p) for (;;) { sigbits = ut->uu_siglist & ~ut->uu_sigmask; - if (p->p_lflag & P_LPPWAIT) + if (p->p_lflag & P_LPPWAIT) { sigbits &= ~stopsigmask; - if (sigbits == 0) { /* no signal to send */ + } + if (sigbits == 0) { /* no signal to send */ retval = 0; goto out; } @@ -2740,12 +2798,12 @@ issignal_locked(proc_t p) continue; } - if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) { + if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) { /* * If traced, deliver the signal to the debugger, and wait to be * released. */ - task_t task; + task_t task; p->p_xstat = signum; if (p->p_lflag & P_LSIGEXC) { @@ -2781,9 +2839,9 @@ issignal_locked(proc_t p) } /* - * XXX Have to really stop for debuggers; - * XXX stop() doesn't do the right thing. - */ + * XXX Have to really stop for debuggers; + * XXX stop() doesn't do the right thing. + */ task = p->task; task_suspend_internal(task); @@ -2838,8 +2896,9 @@ issignal_locked(proc_t p) * otherwise we just look for signals again. */ signum = p->p_xstat; - if (signum == 0) + if (signum == 0) { continue; + } /* * Put the new signal into p_siglist. If the @@ -2847,8 +2906,9 @@ issignal_locked(proc_t p) */ mask = sigmask(signum); ut->uu_siglist |= mask; - if (ut->uu_sigmask & mask) + if (ut->uu_sigmask & mask) { continue; + } } /* @@ -2858,7 +2918,6 @@ issignal_locked(proc_t p) */ switch ((long)p->p_sigacts->ps_sigact[signum]) { - case (long)SIG_DFL: /* * If there is a pending stop signal to process @@ -2873,8 +2932,8 @@ issignal_locked(proc_t p) proc_unlock(p); pg = proc_pgrp(p); if (p->p_lflag & P_LTRACED || - (pg->pg_jobc == 0 && - prop & SA_TTYSTOP)) { + (pg->pg_jobc == 0 && + prop & SA_TTYSTOP)) { proc_lock(p); pg_rele(pg); break; /* ignore signal */ @@ -2903,8 +2962,9 @@ issignal_locked(proc_t p) psignal(pp, SIGCHLD); } - if (pp != PROC_NULL) + if (pp != PROC_NULL) { proc_parentdropref(pp, 0); + } } proc_lock(p); break; @@ -2925,8 +2985,9 @@ issignal_locked(proc_t p) * than SIGCONT, unless process is traced. */ if ((prop & SA_CONT) == 0 && - (p->p_lflag & P_LTRACED) == 0) + (p->p_lflag & P_LTRACED) == 0) { printf("issignal\n"); + } break; /* ignore signal */ default: @@ -2936,7 +2997,6 @@ issignal_locked(proc_t p) /* If we dropped through, the signal was ignored - remove it from pending list. */ ut->uu_siglist &= ~mask; - } /* for(;;) */ /* NOTREACHED */ @@ -2958,31 +3018,34 @@ CURSIG(proc_t p) thread_t cur_act; struct uthread * ut; int retnum = 0; - + cur_act = current_thread(); ut = get_bsdthread_info(cur_act); - if (ut->uu_siglist == 0) - return (0); + if (ut->uu_siglist == 0) { + return 0; + } - if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) - return (0); + if (((ut->uu_siglist & ~ut->uu_sigmask) == 0) && ((p->p_lflag & P_LTRACED) == 0)) { + return 0; + } sigbits = ut->uu_siglist & ~ut->uu_sigmask; - for(;;) { - if (p->p_lflag & P_LPPWAIT) + for (;;) { + if (p->p_lflag & P_LPPWAIT) { sigbits &= ~stopsigmask; - if (sigbits == 0) { /* no signal to send */ - return (retnum); + } + if (sigbits == 0) { /* no signal to send */ + return retnum; } signum = ffs((long)sigbits); mask = sigmask(signum); prop = sigprop[signum]; - sigbits &= ~mask; /* take the signal out */ + sigbits &= ~mask; /* take the signal out */ /* * We should see pending but ignored signals @@ -2993,7 +3056,7 @@ CURSIG(proc_t p) } if (p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0) { - return(signum); + return signum; } /* @@ -3003,7 +3066,6 @@ CURSIG(proc_t p) */ switch ((long)p->p_sigacts->ps_sigact[signum]) { - case (long)SIG_DFL: /* * If there is a pending stop signal to process @@ -3018,10 +3080,10 @@ CURSIG(proc_t p) pg = proc_pgrp(p); if (p->p_lflag & P_LTRACED || - (pg->pg_jobc == 0 && - prop & SA_TTYSTOP)) { + (pg->pg_jobc == 0 && + prop & SA_TTYSTOP)) { pg_rele(pg); - break; /* == ignore */ + break; /* == ignore */ } pg_rele(pg); retnum = signum; @@ -3031,11 +3093,11 @@ CURSIG(proc_t p) * Except for SIGCONT, shouldn't get here. * Default action is to ignore; drop it. */ - break; /* == ignore */ + break; /* == ignore */ } else { - return (signum); + return signum; } - /*NOTREACHED*/ + /*NOTREACHED*/ case (long)SIG_IGN: /* @@ -3044,16 +3106,17 @@ CURSIG(proc_t p) * than SIGCONT, unless process is traced. */ if ((prop & SA_CONT) == 0 && - (p->p_lflag & P_LTRACED) == 0) + (p->p_lflag & P_LTRACED) == 0) { printf("issignal\n"); - break; /* == ignore */ + } + break; /* == ignore */ default: /* * This signal has an action, let * postsig() process it. */ - return (signum); + return signum; } } /* NOTREACHED */ @@ -3092,13 +3155,15 @@ postsig_locked(int signum) os_reason_t ut_exit_reason = OS_REASON_NULL; #if DIAGNOSTIC - if (signum == 0) + if (signum == 0) { panic("postsig"); + } /* * This must be called on master cpu */ - if (cpu_number() != master_cpu) + if (cpu_number() != master_cpu) { panic("psig not on master"); + } #endif /* @@ -3135,14 +3200,15 @@ postsig_locked(int signum) proc_signalend(p, 1); proc_unlock(p); #if CONFIG_COREDUMP - if (coredump(p, 0, 0) == 0) + if (coredump(p, 0, 0) == 0) { signum |= WCOREFLAG; + } #endif - } else { + } else { proc_signalend(p, 1); proc_unlock(p); } - + #if CONFIG_DTRACE bzero((caddr_t)&(ut->t_dtrace_siginfo), sizeof(ut->t_dtrace_siginfo)); @@ -3159,14 +3225,14 @@ postsig_locked(int signum) default: break; } - + DTRACE_PROC3(signal__handle, int, signum, siginfo_t *, &(ut->t_dtrace_siginfo), - void (*)(void), SIG_DFL); + void (*)(void), SIG_DFL); #endif KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, - p->p_pid, W_EXITCODE(0, signum), 3, 0, 0); + p->p_pid, W_EXITCODE(0, signum), 3, 0, 0); exit_with_reason(p, W_EXITCODE(0, signum), (int *)NULL, TRUE, TRUE, 0, ut_exit_reason); @@ -3177,9 +3243,10 @@ postsig_locked(int signum) * If we get here, the signal must be caught. */ #if DIAGNOSTIC - if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) + if (catcher == SIG_IGN || (ut->uu_sigmask & mask)) { log(LOG_WARNING, - "postsig: processing masked or ignored signal\n"); + "postsig: processing masked or ignored signal\n"); + } #endif /* @@ -3195,14 +3262,17 @@ postsig_locked(int signum) returnmask = ut->uu_oldmask; ut->uu_flag &= ~UT_SAS_OLDMASK; ut->uu_oldmask = 0; - } else + } else { returnmask = ut->uu_sigmask; + } ut->uu_sigmask |= ps->ps_catchmask[signum]; - if ((ps->ps_signodefer & mask) == 0) + if ((ps->ps_signodefer & mask) == 0) { ut->uu_sigmask |= mask; + } if ((signum != SIGILL) && (signum != SIGTRAP) && (ps->ps_sigreset & mask)) { - if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) + if ((signum != SIGCONT) && (sigprop[signum] & SA_IGNORE)) { p->p_sigignore |= mask; + } ps->ps_sigact[signum] = SIG_DFL; ps->ps_siginfo &= ~mask; ps->ps_signodefer &= ~mask; @@ -3242,14 +3312,14 @@ filt_sigattach(struct knote *kn, __unused struct kevent_internal_s *kev) proc_klist_unlock(); /* edge-triggered events can't have fired before we attached */ - return (0); + return 0; } /* * remove the knote from the process list, if it hasn't already - * been removed by exit processing. + * been removed by exit processing. */ - + static void filt_sigdetach(struct knote *kn) { @@ -3273,17 +3343,17 @@ filt_sigdetach(struct knote *kn) static int filt_signal(struct knote *kn, long hint) { - if (hint & NOTE_SIGNAL) { hint &= ~NOTE_SIGNAL; - if (kn->kn_id == (unsigned int)hint) + if (kn->kn_id == (unsigned int)hint) { kn->kn_data++; + } } else if (hint & NOTE_EXIT) { panic("filt_signal: detected NOTE_EXIT event"); } - return (kn->kn_data != 0); + return kn->kn_data != 0; } static int @@ -3338,12 +3408,13 @@ bsd_ast(thread_t thread) { proc_t p = current_proc(); struct uthread *ut = get_bsdthread_info(thread); - int signum; + int signum; user_addr_t pc; static int bsd_init_done = 0; - if (p == NULL) + if (p == NULL) { return; + } /* don't run bsd ast on exec copy or exec'ed tasks */ if (task_did_exec(current_task()) || task_is_exec_copy(current_task())) { @@ -3357,37 +3428,39 @@ bsd_ast(thread_t thread) } if (timerisset(&p->p_vtimer_user.it_value)) { - uint32_t microsecs; + uint32_t microsecs; task_vtimer_update(p->task, TASK_VTIMER_USER, µsecs); if (!itimerdecr(p, &p->p_vtimer_user, microsecs)) { - if (timerisset(&p->p_vtimer_user.it_value)) + if (timerisset(&p->p_vtimer_user.it_value)) { task_vtimer_set(p->task, TASK_VTIMER_USER); - else + } else { task_vtimer_clear(p->task, TASK_VTIMER_USER); + } psignal_try_thread(p, thread, SIGVTALRM); } } if (timerisset(&p->p_vtimer_prof.it_value)) { - uint32_t microsecs; + uint32_t microsecs; task_vtimer_update(p->task, TASK_VTIMER_PROF, µsecs); if (!itimerdecr(p, &p->p_vtimer_prof, microsecs)) { - if (timerisset(&p->p_vtimer_prof.it_value)) + if (timerisset(&p->p_vtimer_prof.it_value)) { task_vtimer_set(p->task, TASK_VTIMER_PROF); - else + } else { task_vtimer_clear(p->task, TASK_VTIMER_PROF); + } psignal_try_thread(p, thread, SIGPROF); } } if (timerisset(&p->p_rlim_cpu)) { - struct timeval tv; + struct timeval tv; task_vtimer_update(p->task, TASK_VTIMER_RLIM, (uint32_t *) &tv.tv_usec); @@ -3397,7 +3470,6 @@ bsd_ast(thread_t thread) timersub(&p->p_rlim_cpu, &tv, &p->p_rlim_cpu); proc_spinunlock(p); } else { - timerclear(&p->p_rlim_cpu); proc_spinunlock(p); @@ -3409,9 +3481,9 @@ bsd_ast(thread_t thread) #if CONFIG_DTRACE if (ut->t_dtrace_sig) { - uint8_t dt_action_sig = ut->t_dtrace_sig; - ut->t_dtrace_sig = 0; - psignal(p, dt_action_sig); + uint8_t dt_action_sig = ut->t_dtrace_sig; + ut->t_dtrace_sig = 0; + psignal(p, dt_action_sig); } if (ut->t_dtrace_stop) { @@ -3432,20 +3504,20 @@ bsd_ast(thread_t thread) resumeproc->p_dtrace_stop = 0; proc_unlock(resumeproc); task_resume_internal(resumeproc->task); - } - else { + } else { proc_unlock(resumeproc); } proc_rele(resumeproc); } } - + #endif /* CONFIG_DTRACE */ proc_lock(p); if (CHECK_SIGNALS(p, current_thread(), ut)) { - while ( (signum = issignal_locked(p)) ) + while ((signum = issignal_locked(p))) { postsig_locked(signum); + } } proc_unlock(p); @@ -3475,7 +3547,7 @@ pt_setrunnable(proc_t p) proc_unlock(p); if (p->sigwait) { wakeup((caddr_t)&(p->sigwait)); - if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479 + if ((p->p_lflag & P_LSIGEXC) == 0) { // 5878479 task_release(task); } } @@ -3484,15 +3556,15 @@ pt_setrunnable(proc_t p) kern_return_t do_bsdexception( - int exc, - int code, - int sub) + int exc, + int code, + int sub) { mach_exception_data_type_t codes[EXCEPTION_CODE_MAX]; - codes[0] = code; + codes[0] = code; codes[1] = sub; - return(bsd_exception(exc, codes, 2)); + return bsd_exception(exc, codes, 2); } int @@ -3504,18 +3576,18 @@ proc_pendingsignals(proc_t p, sigset_t mask) proc_lock(p); /* If the process is in proc exit return no signal info */ - if (p->p_lflag & P_LPEXIT) { + if (p->p_lflag & P_LPEXIT) { goto out; } if ((p->p_lflag & P_LINVFORK) && p->p_vforkact) { - th = p->p_vforkact; + th = p->p_vforkact; uth = (struct uthread *)get_bsdthread_info(th); if (uth) { bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask); } goto out; - } + } bits = 0; TAILQ_FOREACH(uth, &p->p_uthlist, uu_list) { @@ -3523,14 +3595,14 @@ proc_pendingsignals(proc_t p, sigset_t mask) } out: proc_unlock(p); - return(bits); + return bits; } int thread_issignal(proc_t p, thread_t th, sigset_t mask) { struct uthread * uth; - sigset_t bits=0; + sigset_t bits = 0; proc_lock(p); uth = (struct uthread *)get_bsdthread_info(th); @@ -3538,7 +3610,7 @@ thread_issignal(proc_t p, thread_t th, sigset_t mask) bits = (((uth->uu_siglist & ~uth->uu_sigmask) & ~p->p_sigignore) & mask); } proc_unlock(p); - return(bits); + return bits; } /* @@ -3547,62 +3619,70 @@ thread_issignal(proc_t p, thread_t th, sigset_t mask) int hassigprop(int sig, int prop) { - return (sigprop[sig] & prop); + return sigprop[sig] & prop; } void pgsigio(pid_t pgid, int sig) -{ +{ proc_t p = PROC_NULL; - if (pgid < 0) + if (pgid < 0) { gsignal(-(pgid), sig); - - else if (pgid > 0 && (p = proc_find(pgid)) != 0) + } else if (pgid > 0 && (p = proc_find(pgid)) != 0) { psignal(p, sig); - if (p != PROC_NULL) + } + if (p != PROC_NULL) { proc_rele(p); + } } void proc_signalstart(proc_t p, int locked) { - if (!locked) + if (!locked) { proc_lock(p); - - if(p->p_signalholder == current_thread()) - panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock"); - + } + + if (p->p_signalholder == current_thread()) { + panic("proc_signalstart: thread attempting to signal a process for which it holds the signal lock"); + } + p->p_sigwaitcnt++; - while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) + while ((p->p_lflag & P_LINSIGNAL) == P_LINSIGNAL) { msleep(&p->p_sigmask, &p->p_mlock, 0, "proc_signstart", NULL); + } p->p_sigwaitcnt--; p->p_lflag |= P_LINSIGNAL; p->p_signalholder = current_thread(); - if (!locked) + if (!locked) { proc_unlock(p); + } } void proc_signalend(proc_t p, int locked) { - if (!locked) + if (!locked) { proc_lock(p); + } p->p_lflag &= ~P_LINSIGNAL; - if (p->p_sigwaitcnt > 0) + if (p->p_sigwaitcnt > 0) { wakeup(&p->p_sigmask); + } p->p_signalholder = NULL; - if (!locked) + if (!locked) { proc_unlock(p); + } } void sig_lock_to_exit(proc_t p) { - thread_t self = current_thread(); + thread_t self = current_thread(); p->exit_thread = self; proc_unlock(p); @@ -3616,11 +3696,11 @@ sig_lock_to_exit(proc_t p) int sig_try_locked(proc_t p) { - thread_t self = current_thread(); + thread_t self = current_thread(); while (p->sigwait || p->exit_thread) { if (p->exit_thread) { - return(0); + return 0; } msleep((caddr_t)&p->sigwait_thread, &p->p_mlock, PCATCH | PDROP, 0, 0); if (thread_should_abort(self)) { diff --git a/bsd/kern/kern_subr.c b/bsd/kern/kern_subr.c index 610c94936..62c599072 100644 --- a/bsd/kern/kern_subr.c +++ b/bsd/kern/kern_subr.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -84,7 +84,7 @@ #if DEBUG #include -static uint32_t uio_t_count = 0; +static uint32_t uio_t_count = 0; #endif /* DEBUG */ #define IS_VALID_UIO_SEGFLG(segflg) \ @@ -130,13 +130,14 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) int error = 0; #if DIAGNOSTIC - if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) + if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) { panic("uiomove: mode"); + } #endif #if LP64_DEBUG if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) { - panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ @@ -146,11 +147,11 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) if (acnt == 0) { continue; } - if (n > 0 && acnt > (uint64_t)n) + if (n > 0 && acnt > (uint64_t)n) { acnt = n; + } switch ((int) uio->uio_segflg) { - case UIO_USERSPACE64: case UIO_USERISPACE64: case UIO_USERSPACE32: @@ -158,98 +159,97 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) case UIO_USERSPACE: case UIO_USERISPACE: // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit - if (uio->uio_rw == UIO_READ) - { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, - (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0); - - error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, acnt ); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, - (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0,0); - } - else - { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, - (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0); - - error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), acnt); - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, - (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0,0); - } - if (error) - return (error); + if (uio->uio_rw == UIO_READ) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, + (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0, 0); + + error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.uiovp->iov_base, acnt ); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, + (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 0, 0); + } else { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, + (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0, 0); + + error = copyin(uio->uio_iovs.uiovp->iov_base, CAST_DOWN(caddr_t, cp), acnt); + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, + (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 0, 0); + } + if (error) { + return error; + } break; case UIO_SYSSPACE32: case UIO_SYSSPACE: - if (uio->uio_rw == UIO_READ) + if (uio->uio_rw == UIO_READ) { error = copywithin(CAST_DOWN(caddr_t, cp), CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), - acnt); - else + acnt); + } else { error = copywithin(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base), CAST_DOWN(caddr_t, cp), - acnt); + acnt); + } break; case UIO_PHYS_USERSPACE64: case UIO_PHYS_USERSPACE32: case UIO_PHYS_USERSPACE: - if (uio->uio_rw == UIO_READ) - { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, - (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0); + if (uio->uio_rw == UIO_READ) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, + (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1, 0); error = copypv((addr64_t)cp, uio->uio_iovs.uiovp->iov_base, acnt, cppvPsrc | cppvNoRefSrc); - if (error) /* Copy physical to virtual */ - error = EFAULT; + if (error) { /* Copy physical to virtual */ + error = EFAULT; + } - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, - (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1,0); - } - else - { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, - (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, + (int)cp, (uintptr_t)uio->uio_iovs.uiovp->iov_base, acnt, 1, 0); + } else { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, + (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1, 0); error = copypv(uio->uio_iovs.uiovp->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk); - if (error) /* Copy virtual to physical */ - error = EFAULT; - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, - (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1,0); - } - if (error) - return (error); + if (error) { /* Copy virtual to physical */ + error = EFAULT; + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, + (uintptr_t)uio->uio_iovs.uiovp->iov_base, (int)cp, acnt, 1, 0); + } + if (error) { + return error; + } break; case UIO_PHYS_SYSSPACE: - if (uio->uio_rw == UIO_READ) - { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, - (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0); - - error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc); - if (error) /* Copy physical to virtual */ - error = EFAULT; - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, - (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2,0); - } - else - { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, - (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0); - - error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk); - if (error) /* Copy virtual to physical */ - error = EFAULT; - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, - (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2,0); - } - if (error) - return (error); + if (uio->uio_rw == UIO_READ) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, + (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2, 0); + + error = copypv((addr64_t)cp, uio->uio_iovs.kiovp->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc); + if (error) { /* Copy physical to virtual */ + error = EFAULT; + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, + (int)cp, (uintptr_t)uio->uio_iovs.kiovp->iov_base, acnt, 2, 0); + } else { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, + (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2, 0); + + error = copypv(uio->uio_iovs.kiovp->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk); + if (error) { /* Copy virtual to physical */ + error = EFAULT; + } + + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, + (uintptr_t)uio->uio_iovs.kiovp->iov_base, (int)cp, acnt, 2, 0); + } + if (error) { + return error; + } break; default: @@ -259,7 +259,7 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) cp += acnt; n -= acnt; } - return (error); + return error; } /* @@ -268,24 +268,27 @@ uiomove64(const addr64_t c_cp, int n, struct uio *uio) int ureadc(int c, struct uio *uio) { - if (uio_resid(uio) <= 0) + if (uio_resid(uio) <= 0) { panic("ureadc: non-positive resid"); + } uio_update(uio, 0); - if (uio->uio_iovcnt == 0) + if (uio->uio_iovcnt == 0) { panic("ureadc: non-positive iovcnt"); - if (uio_curriovlen(uio) <= 0) + } + if (uio_curriovlen(uio) <= 0) { panic("ureadc: non-positive iovlen"); + } switch ((int) uio->uio_segflg) { - case UIO_USERSPACE32: case UIO_USERSPACE: case UIO_USERISPACE32: case UIO_USERISPACE: case UIO_USERSPACE64: case UIO_USERISPACE64: - if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0) - return (EFAULT); + if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0) { + return EFAULT; + } break; case UIO_SYSSPACE32: @@ -297,7 +300,7 @@ ureadc(int c, struct uio *uio) break; } uio_update(uio, 1); - return (0); + return 0; } /* @@ -307,58 +310,63 @@ void * hashinit(int elements, int type, u_long *hashmask) { long hashsize; - LIST_HEAD(generic, generic) *hashtbl; + LIST_HEAD(generic, generic) * hashtbl; int i; - if (elements <= 0) + if (elements <= 0) { panic("hashinit: bad cnt"); - for (hashsize = 1; hashsize <= elements; hashsize <<= 1) + } + for (hashsize = 1; hashsize <= elements; hashsize <<= 1) { continue; + } hashsize >>= 1; - MALLOC(hashtbl, struct generic *, - hashsize * sizeof(*hashtbl), type, M_WAITOK|M_ZERO); + MALLOC(hashtbl, struct generic *, + hashsize * sizeof(*hashtbl), type, M_WAITOK | M_ZERO); if (hashtbl != NULL) { - for (i = 0; i < hashsize; i++) + for (i = 0; i < hashsize; i++) { LIST_INIT(&hashtbl[i]); + } *hashmask = hashsize - 1; } - return (hashtbl); + return hashtbl; } /* * uio_resid - return the residual IO value for the given uio_t */ -user_ssize_t uio_resid( uio_t a_uio ) +user_ssize_t +uio_resid( uio_t a_uio ) { #if DEBUG if (a_uio == NULL) { - printf("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + printf("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } -/* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */ -/* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */ -/* } */ +/* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */ +/* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */ +/* } */ #endif /* DEBUG */ /* return 0 if there are no active iovecs */ if (a_uio == NULL) { - return( 0 ); + return 0; } - return( a_uio->uio_resid_64 ); + return a_uio->uio_resid_64; } /* * uio_setresid - set the residual IO value for the given uio_t */ -void uio_setresid( uio_t a_uio, user_ssize_t a_value ) +void +uio_setresid( uio_t a_uio, user_ssize_t a_value ) { #if DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } -/* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */ -/* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */ -/* } */ +/* if (IS_VALID_UIO_SEGFLG(a_uio->uio_segflg) == 0) { */ +/* panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); */ +/* } */ #endif /* DEBUG */ if (a_uio == NULL) { @@ -370,73 +378,74 @@ void uio_setresid( uio_t a_uio, user_ssize_t a_value ) } /* - * uio_curriovbase - return the base address of the current iovec associated + * uio_curriovbase - return the base address of the current iovec associated * with the given uio_t. May return 0. */ -user_addr_t uio_curriovbase( uio_t a_uio ) +user_addr_t +uio_curriovbase( uio_t a_uio ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL || a_uio->uio_iovcnt < 1) { - return(0); + return 0; } - + if (UIO_IS_USER_SPACE(a_uio)) { - return(a_uio->uio_iovs.uiovp->iov_base); + return a_uio->uio_iovs.uiovp->iov_base; } - return((user_addr_t)a_uio->uio_iovs.kiovp->iov_base); - + return (user_addr_t)a_uio->uio_iovs.kiovp->iov_base; } /* - * uio_curriovlen - return the length value of the current iovec associated + * uio_curriovlen - return the length value of the current iovec associated * with the given uio_t. */ -user_size_t uio_curriovlen( uio_t a_uio ) +user_size_t +uio_curriovlen( uio_t a_uio ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL || a_uio->uio_iovcnt < 1) { - return(0); + return 0; } - + if (UIO_IS_USER_SPACE(a_uio)) { - return(a_uio->uio_iovs.uiovp->iov_len); + return a_uio->uio_iovs.uiovp->iov_len; } - return((user_size_t)a_uio->uio_iovs.kiovp->iov_len); + return (user_size_t)a_uio->uio_iovs.kiovp->iov_len; } /* - * uio_setcurriovlen - set the length value of the current iovec associated + * uio_setcurriovlen - set the length value of the current iovec associated * with the given uio_t. */ -__private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value ) +__private_extern__ void +uio_setcurriovlen( uio_t a_uio, user_size_t a_value ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL) { - return; + return; } if (UIO_IS_USER_SPACE(a_uio)) { a_uio->uio_iovs.uiovp->iov_len = a_value; - } - else { + } else { #if LP64_DEBUG if (a_value > 0xFFFFFFFFull) { - panic("%s :%d - invalid a_value\n", __FILE__, __LINE__); + panic("%s :%d - invalid a_value\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ a_uio->uio_iovs.kiovp->iov_len = (size_t)a_value; @@ -447,51 +456,54 @@ __private_extern__ void uio_setcurriovlen( uio_t a_uio, user_size_t a_value ) /* * uio_iovcnt - return count of active iovecs for the given uio_t */ -int uio_iovcnt( uio_t a_uio ) +int +uio_iovcnt( uio_t a_uio ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL) { - return(0); + return 0; } - return( a_uio->uio_iovcnt ); + return a_uio->uio_iovcnt; } /* * uio_offset - return the current offset value for the given uio_t */ -off_t uio_offset( uio_t a_uio ) +off_t +uio_offset( uio_t a_uio ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL) { - return(0); + return 0; } - return( a_uio->uio_offset ); + return a_uio->uio_offset; } /* * uio_setoffset - set the current offset value for the given uio_t */ -void uio_setoffset( uio_t a_uio, off_t a_offset ) +void +uio_setoffset( uio_t a_uio, off_t a_offset ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL) { - return; + return; } a_uio->uio_offset = a_offset; return; @@ -500,35 +512,37 @@ void uio_setoffset( uio_t a_uio, off_t a_offset ) /* * uio_rw - return the read / write flag for the given uio_t */ -int uio_rw( uio_t a_uio ) +int +uio_rw( uio_t a_uio ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio == NULL) { - return(-1); + return -1; } - return( a_uio->uio_rw ); + return a_uio->uio_rw; } /* * uio_setrw - set the read / write flag for the given uio_t */ -void uio_setrw( uio_t a_uio, int a_value ) +void +uio_setrw( uio_t a_uio, int a_value ) { if (a_uio == NULL) { #if LP64_DEBUG - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); #endif /* LP64_DEBUG */ return; } #if LP64_DEBUG if (!(a_value == UIO_READ || a_value == UIO_WRITE)) { - panic("%s :%d - invalid a_value\n", __FILE__, __LINE__); + panic("%s :%d - invalid a_value\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ @@ -539,48 +553,50 @@ void uio_setrw( uio_t a_uio, int a_value ) } /* - * uio_isuserspace - return non zero value if the address space + * uio_isuserspace - return non zero value if the address space * flag is for a user address space (could be 32 or 64 bit). */ -int uio_isuserspace( uio_t a_uio ) +int +uio_isuserspace( uio_t a_uio ) { if (a_uio == NULL) { #if LP64_DEBUG - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); #endif /* LP64_DEBUG */ - return(0); + return 0; } if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) { - return( 1 ); + return 1; } - return( 0 ); + return 0; } /* * uio_create - create an uio_t. - * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t + * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t * is not fully initialized until all iovecs are added using uio_addiov calls. * a_iovcount is the maximum number of iovecs you may add. */ -uio_t uio_create( int a_iovcount, /* number of iovecs */ - off_t a_offset, /* current offset */ - int a_spacetype, /* type of address space */ - int a_iodirection ) /* read or write flag */ +uio_t +uio_create( int a_iovcount, /* number of iovecs */ + off_t a_offset, /* current offset */ + int a_spacetype, /* type of address space */ + int a_iodirection ) /* read or write flag */ { - void * my_buf_p; - size_t my_size; - uio_t my_uio; - + void * my_buf_p; + size_t my_size; + uio_t my_uio; + my_size = UIO_SIZEOF(a_iovcount); my_buf_p = kalloc(my_size); - my_uio = uio_createwithbuffer( a_iovcount, - a_offset, - a_spacetype, - a_iodirection, - my_buf_p, - my_size ); + my_uio = uio_createwithbuffer( a_iovcount, + a_offset, + a_spacetype, + a_iodirection, + my_buf_p, + my_size ); if (my_uio != 0) { /* leave a note that we allocated this uio_t */ my_uio->uio_flags |= UIO_FLAGS_WE_ALLOCED; @@ -588,50 +604,50 @@ uio_t uio_create( int a_iovcount, /* number of iovecs */ (void)hw_atomic_add(&uio_t_count, 1); #endif } - - return( my_uio ); + + return my_uio; } /* * uio_createwithbuffer - create an uio_t. - * Create a uio_t using the given buffer. The uio_t + * Create a uio_t using the given buffer. The uio_t * is not fully initialized until all iovecs are added using uio_addiov calls. * a_iovcount is the maximum number of iovecs you may add. * This call may fail if the given buffer is not large enough. */ -__private_extern__ uio_t - uio_createwithbuffer( int a_iovcount, /* number of iovecs */ - off_t a_offset, /* current offset */ - int a_spacetype, /* type of address space */ - int a_iodirection, /* read or write flag */ - void *a_buf_p, /* pointer to a uio_t buffer */ - size_t a_buffer_size ) /* size of uio_t buffer */ +__private_extern__ uio_t +uio_createwithbuffer( int a_iovcount, /* number of iovecs */ + off_t a_offset, /* current offset */ + int a_spacetype, /* type of address space */ + int a_iodirection, /* read or write flag */ + void *a_buf_p, /* pointer to a uio_t buffer */ + size_t a_buffer_size ) /* size of uio_t buffer */ { - uio_t my_uio = (uio_t) a_buf_p; - size_t my_size; - + uio_t my_uio = (uio_t) a_buf_p; + size_t my_size; + my_size = UIO_SIZEOF(a_iovcount); if (a_buffer_size < my_size) { #if DEBUG - panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__); + panic("%s :%d - a_buffer_size is too small\n", __FILE__, __LINE__); #endif /* DEBUG */ - return( NULL ); + return NULL; } my_size = a_buffer_size; - + #if DEBUG if (my_uio == 0) { - panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); + panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); } if (!IS_VALID_UIO_SEGFLG(a_spacetype)) { - panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); + panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); } if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) { - panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); + panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); } if (a_iovcount > UIO_MAXIOV) { - panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__); + panic("%s :%d - invalid a_iovcount\n", __FILE__, __LINE__); } #endif /* DEBUG */ @@ -662,9 +678,8 @@ __private_extern__ uio_t if (a_iovcount > 0) { my_uio->uio_iovs.uiovp = (struct user_iovec *) - (((uint8_t *)my_uio) + sizeof(struct uio)); - } - else { + (((uint8_t *)my_uio) + sizeof(struct uio)); + } else { my_uio->uio_iovs.uiovp = NULL; } @@ -673,22 +688,23 @@ __private_extern__ uio_t my_uio->uio_rw = a_iodirection; my_uio->uio_flags = UIO_FLAGS_INITED; - return( my_uio ); + return my_uio; } /* * uio_spacetype - return the address space type for the given uio_t */ -__private_extern__ int uio_spacetype( uio_t a_uio ) +__private_extern__ int +uio_spacetype( uio_t a_uio ) { if (a_uio == NULL) { #if LP64_DEBUG - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); #endif /* LP64_DEBUG */ - return(-1); + return -1; } - return( a_uio->uio_segflg ); + return a_uio->uio_segflg; } /* @@ -698,52 +714,53 @@ __private_extern__ int uio_spacetype( uio_t a_uio ) * which will increase as the IO is completed and is NOT embedded within the * uio, it is a seperate array of one or more iovecs. */ -__private_extern__ struct user_iovec * uio_iovsaddr( uio_t a_uio ) +__private_extern__ struct user_iovec * +uio_iovsaddr( uio_t a_uio ) { - struct user_iovec * my_addr; - + struct user_iovec * my_addr; + if (a_uio == NULL) { - return(NULL); + return NULL; } - + if (UIO_SEG_IS_USER_SPACE(a_uio->uio_segflg)) { /* we need this for compatibility mode. */ my_addr = (struct user_iovec *) a_uio->uio_iovs.uiovp; - } - else { + } else { #if DEBUG panic("uio_iovsaddr called for UIO_SYSSPACE request"); #endif my_addr = 0; } - return(my_addr); + return my_addr; } /* * uio_reset - reset an uio_t. - * Reset the given uio_t to initial values. The uio_t is not fully initialized - * until all iovecs are added using uio_addiov calls. - * The a_iovcount value passed in the uio_create is the maximum number of + * Reset the given uio_t to initial values. The uio_t is not fully initialized + * until all iovecs are added using uio_addiov calls. + * The a_iovcount value passed in the uio_create is the maximum number of * iovecs you may add. */ -void uio_reset( uio_t a_uio, - off_t a_offset, /* current offset */ - int a_spacetype, /* type of address space */ - int a_iodirection ) /* read or write flag */ +void +uio_reset( uio_t a_uio, + off_t a_offset, /* current offset */ + int a_spacetype, /* type of address space */ + int a_iodirection ) /* read or write flag */ { - vm_size_t my_size; - int my_max_iovs; - u_int32_t my_old_flags; - + vm_size_t my_size; + int my_max_iovs; + u_int32_t my_old_flags; + #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); + panic("%s :%d - could not allocate uio_t\n", __FILE__, __LINE__); } if (!IS_VALID_UIO_SEGFLG(a_spacetype)) { - panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); + panic("%s :%d - invalid address space type\n", __FILE__, __LINE__); } if (!(a_iodirection == UIO_READ || a_iodirection == UIO_WRITE)) { - panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); + panic("%s :%d - invalid IO direction flag\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ @@ -781,9 +798,8 @@ void uio_reset( uio_t a_uio, if (my_max_iovs > 0) { a_uio->uio_iovs.uiovp = (struct user_iovec *) - (((uint8_t *)a_uio) + sizeof(struct uio)); - } - else { + (((uint8_t *)a_uio) + sizeof(struct uio)); + } else { a_uio->uio_iovs.uiovp = NULL; } @@ -797,124 +813,125 @@ void uio_reset( uio_t a_uio, /* * uio_free - free a uio_t allocated via uio_init. this also frees all - * associated iovecs. + * associated iovecs. */ -void uio_free( uio_t a_uio ) +void +uio_free( uio_t a_uio ) { #if DEBUG if (a_uio == NULL) { - panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__); + panic("%s :%d - passing NULL uio_t\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ if (a_uio != NULL && (a_uio->uio_flags & UIO_FLAGS_WE_ALLOCED) != 0) { #if DEBUG - if (hw_atomic_sub(&uio_t_count, 1) == UINT_MAX) - panic("%s :%d - uio_t_count underflow\n", __FILE__, __LINE__); + if (hw_atomic_sub(&uio_t_count, 1) == UINT_MAX) { + panic("%s :%d - uio_t_count underflow\n", __FILE__, __LINE__); + } #endif kfree(a_uio, a_uio->uio_size); } - - } /* * uio_addiov - add an iovec to the given uio_t. You may call this up to - * the a_iovcount number that was passed to uio_create. This call will - * increment the residual IO count as iovecs are added to the uio_t. + * the a_iovcount number that was passed to uio_create. This call will + * increment the residual IO count as iovecs are added to the uio_t. * returns 0 if add was successful else non zero. */ -int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length ) +int +uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length ) { - int i; - + int i; + if (a_uio == NULL) { #if DEBUG - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); #endif /* LP64_DEBUG */ - return(-1); + return -1; } if (UIO_IS_USER_SPACE(a_uio)) { - for ( i = 0; i < a_uio->uio_max_iovs; i++ ) { + for (i = 0; i < a_uio->uio_max_iovs; i++) { if (a_uio->uio_iovs.uiovp[i].iov_len == 0 && a_uio->uio_iovs.uiovp[i].iov_base == 0) { a_uio->uio_iovs.uiovp[i].iov_len = a_length; a_uio->uio_iovs.uiovp[i].iov_base = a_baseaddr; a_uio->uio_iovcnt++; a_uio->uio_resid_64 += a_length; - return( 0 ); + return 0; } } - } - else { - for ( i = 0; i < a_uio->uio_max_iovs; i++ ) { + } else { + for (i = 0; i < a_uio->uio_max_iovs; i++) { if (a_uio->uio_iovs.kiovp[i].iov_len == 0 && a_uio->uio_iovs.kiovp[i].iov_base == 0) { a_uio->uio_iovs.kiovp[i].iov_len = (u_int64_t)a_length; a_uio->uio_iovs.kiovp[i].iov_base = (u_int64_t)a_baseaddr; a_uio->uio_iovcnt++; a_uio->uio_resid_64 += a_length; - return( 0 ); + return 0; } } } - return( -1 ); + return -1; } /* * uio_getiov - get iovec data associated with the given uio_t. Use * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)). * a_baseaddr_p and a_length_p may be NULL. - * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t. + * returns -1 when a_index is >= uio_t.uio_iovcnt or invalid uio_t. * returns 0 when data is returned. */ -int uio_getiov( uio_t a_uio, - int a_index, - user_addr_t * a_baseaddr_p, - user_size_t * a_length_p ) +int +uio_getiov( uio_t a_uio, + int a_index, + user_addr_t * a_baseaddr_p, + user_size_t * a_length_p ) { if (a_uio == NULL) { #if DEBUG - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); #endif /* DEBUG */ - return(-1); + return -1; + } + if (a_index < 0 || a_index >= a_uio->uio_iovcnt) { + return -1; } - if ( a_index < 0 || a_index >= a_uio->uio_iovcnt) { - return(-1); - } if (UIO_IS_USER_SPACE(a_uio)) { - if (a_baseaddr_p != NULL) { - *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base; - } - if (a_length_p != NULL) { - *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len; - } - } - else { - if (a_baseaddr_p != NULL) { - *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base; - } - if (a_length_p != NULL) { - *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len; - } - } - - return( 0 ); + if (a_baseaddr_p != NULL) { + *a_baseaddr_p = a_uio->uio_iovs.uiovp[a_index].iov_base; + } + if (a_length_p != NULL) { + *a_length_p = a_uio->uio_iovs.uiovp[a_index].iov_len; + } + } else { + if (a_baseaddr_p != NULL) { + *a_baseaddr_p = a_uio->uio_iovs.kiovp[a_index].iov_base; + } + if (a_length_p != NULL) { + *a_length_p = a_uio->uio_iovs.kiovp[a_index].iov_len; + } + } + + return 0; } /* * uio_calculateresid - runs through all iovecs associated with this * uio_t and calculates (and sets) the residual IO count. */ -__private_extern__ int uio_calculateresid( uio_t a_uio ) +__private_extern__ int +uio_calculateresid( uio_t a_uio ) { - int i; - u_int64_t resid = 0; - + int i; + u_int64_t resid = 0; + if (a_uio == NULL) { #if LP64_DEBUG - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); #endif /* LP64_DEBUG */ return EINVAL; } @@ -922,13 +939,15 @@ __private_extern__ int uio_calculateresid( uio_t a_uio ) a_uio->uio_iovcnt = a_uio->uio_max_iovs; if (UIO_IS_USER_SPACE(a_uio)) { a_uio->uio_resid_64 = 0; - for ( i = 0; i < a_uio->uio_max_iovs; i++ ) { + for (i = 0; i < a_uio->uio_max_iovs; i++) { if (a_uio->uio_iovs.uiovp[i].iov_len != 0 && a_uio->uio_iovs.uiovp[i].iov_base != 0) { - if (a_uio->uio_iovs.uiovp[i].iov_len > LONG_MAX) - return EINVAL; + if (a_uio->uio_iovs.uiovp[i].iov_len > LONG_MAX) { + return EINVAL; + } resid += a_uio->uio_iovs.uiovp[i].iov_len; - if (resid > LONG_MAX) + if (resid > LONG_MAX) { return EINVAL; + } } } a_uio->uio_resid_64 = resid; @@ -940,16 +959,17 @@ __private_extern__ int uio_calculateresid( uio_t a_uio ) a_uio->uio_iovs.uiovp++; } } - } - else { + } else { a_uio->uio_resid_64 = 0; - for ( i = 0; i < a_uio->uio_max_iovs; i++ ) { + for (i = 0; i < a_uio->uio_max_iovs; i++) { if (a_uio->uio_iovs.kiovp[i].iov_len != 0 && a_uio->uio_iovs.kiovp[i].iov_base != 0) { - if (a_uio->uio_iovs.kiovp[i].iov_len > LONG_MAX) + if (a_uio->uio_iovs.kiovp[i].iov_len > LONG_MAX) { return EINVAL; + } resid += a_uio->uio_iovs.kiovp[i].iov_len; - if (resid > LONG_MAX) + if (resid > LONG_MAX) { return EINVAL; + } } } a_uio->uio_resid_64 = resid; @@ -969,20 +989,21 @@ __private_extern__ int uio_calculateresid( uio_t a_uio ) /* * uio_update - update the given uio_t for a_count of completed IO. * This call decrements the current iovec length and residual IO value - * and increments the current iovec base address and offset value. + * and increments the current iovec base address and offset value. * If the current iovec length is 0 then advance to the next * iovec (if any). - * If the a_count passed in is 0, than only do the advancement + * If the a_count passed in is 0, than only do the advancement * over any 0 length iovec's. */ -void uio_update( uio_t a_uio, user_size_t a_count ) +void +uio_update( uio_t a_uio, user_size_t a_count ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) { - panic("%s :%d - invalid count value \n", __FILE__, __LINE__); + panic("%s :%d - invalid count value \n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ @@ -991,24 +1012,22 @@ void uio_update( uio_t a_uio, user_size_t a_count ) } if (UIO_IS_USER_SPACE(a_uio)) { - /* + /* * if a_count == 0, then we are asking to skip over * any empty iovs */ - if (a_count) { - if (a_count > a_uio->uio_iovs.uiovp->iov_len) { - a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len; + if (a_count) { + if (a_count > a_uio->uio_iovs.uiovp->iov_len) { + a_uio->uio_iovs.uiovp->iov_base += a_uio->uio_iovs.uiovp->iov_len; a_uio->uio_iovs.uiovp->iov_len = 0; - } - else { + } else { a_uio->uio_iovs.uiovp->iov_base += a_count; a_uio->uio_iovs.uiovp->iov_len -= a_count; } if (a_count > (user_size_t)a_uio->uio_resid_64) { a_uio->uio_offset += a_uio->uio_resid_64; a_uio->uio_resid_64 = 0; - } - else { + } else { a_uio->uio_offset += a_count; a_uio->uio_resid_64 -= a_count; } @@ -1022,26 +1041,23 @@ void uio_update( uio_t a_uio, user_size_t a_count ) a_uio->uio_iovs.uiovp++; } } - } - else { - /* + } else { + /* * if a_count == 0, then we are asking to skip over * any empty iovs */ - if (a_count) { - if (a_count > a_uio->uio_iovs.kiovp->iov_len) { - a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len; + if (a_count) { + if (a_count > a_uio->uio_iovs.kiovp->iov_len) { + a_uio->uio_iovs.kiovp->iov_base += a_uio->uio_iovs.kiovp->iov_len; a_uio->uio_iovs.kiovp->iov_len = 0; - } - else { - a_uio->uio_iovs.kiovp->iov_base += a_count; + } else { + a_uio->uio_iovs.kiovp->iov_base += a_count; a_uio->uio_iovs.kiovp->iov_len -= a_count; } if (a_count > (user_size_t)a_uio->uio_resid_64) { a_uio->uio_offset += a_uio->uio_resid_64; a_uio->uio_resid_64 = 0; - } - else { + } else { a_uio->uio_offset += a_count; a_uio->uio_resid_64 -= a_count; } @@ -1065,14 +1081,15 @@ void uio_update( uio_t a_uio, user_size_t a_count ) * IO. If the UIO was previously exhausted, this call will panic. * New code should not use this functionality. */ -__private_extern__ void uio_pushback( uio_t a_uio, user_size_t a_count ) +__private_extern__ void +uio_pushback( uio_t a_uio, user_size_t a_count ) { #if LP64_DEBUG if (a_uio == NULL) { - panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); + panic("%s :%d - invalid uio_t\n", __FILE__, __LINE__); } if (UIO_IS_32_BIT_SPACE(a_uio) && a_count > 0xFFFFFFFFull) { - panic("%s :%d - invalid count value \n", __FILE__, __LINE__); + panic("%s :%d - invalid count value \n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ @@ -1087,8 +1104,7 @@ __private_extern__ void uio_pushback( uio_t a_uio, user_size_t a_count ) if (UIO_IS_USER_SPACE(a_uio)) { a_uio->uio_iovs.uiovp->iov_base -= a_count; a_uio->uio_iovs.uiovp->iov_len += a_count; - } - else { + } else { a_uio->uio_iovs.kiovp->iov_base -= a_count; a_uio->uio_iovs.kiovp->iov_len += a_count; } @@ -1104,36 +1120,36 @@ __private_extern__ void uio_pushback( uio_t a_uio, user_size_t a_count ) * uio_duplicate - allocate a new uio and make a copy of the given uio_t. * may return NULL. */ -uio_t uio_duplicate( uio_t a_uio ) +uio_t +uio_duplicate( uio_t a_uio ) { - uio_t my_uio; - int i; + uio_t my_uio; + int i; if (a_uio == NULL) { - return(NULL); + return NULL; } - + my_uio = (uio_t) kalloc(a_uio->uio_size); if (my_uio == 0) { - panic("%s :%d - allocation failed\n", __FILE__, __LINE__); + panic("%s :%d - allocation failed\n", __FILE__, __LINE__); } - + bcopy((void *)a_uio, (void *)my_uio, a_uio->uio_size); /* need to set our iovec pointer to point to first active iovec */ if (my_uio->uio_max_iovs > 0) { my_uio->uio_iovs.uiovp = (struct user_iovec *) - (((uint8_t *)my_uio) + sizeof(struct uio)); + (((uint8_t *)my_uio) + sizeof(struct uio)); /* advance to first nonzero iovec */ if (my_uio->uio_iovcnt > 0) { - for ( i = 0; i < my_uio->uio_max_iovs; i++ ) { + for (i = 0; i < my_uio->uio_max_iovs; i++) { if (UIO_IS_USER_SPACE(a_uio)) { if (my_uio->uio_iovs.uiovp->iov_len != 0) { break; } my_uio->uio_iovs.uiovp++; - } - else { + } else { if (my_uio->uio_iovs.kiovp->iov_len != 0) { break; } @@ -1145,23 +1161,25 @@ uio_t uio_duplicate( uio_t a_uio ) my_uio->uio_flags = UIO_FLAGS_WE_ALLOCED | UIO_FLAGS_INITED; #if DEBUG - (void)hw_atomic_add(&uio_t_count, 1); + (void)hw_atomic_add(&uio_t_count, 1); #endif - return(my_uio); + return my_uio; } -int copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst) +int +copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct user_iovec *dst) { - size_t size_of_iovec = ( spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec)); + size_t size_of_iovec = (spacetype == UIO_USERSPACE64 ? sizeof(struct user64_iovec) : sizeof(struct user32_iovec)); int error; int i; // copyin to the front of "dst", without regard for putting records in the right places error = copyin(uaddr, dst, count * size_of_iovec); - if (error) - return (error); + if (error) { + return error; + } // now, unpack the entries in reverse order, so we don't overwrite anything for (i = count - 1; i >= 0; i--) { @@ -1172,9 +1190,9 @@ int copyin_user_iovec_array(user_addr_t uaddr, int spacetype, int count, struct } else { struct user32_iovec iovec = ((struct user32_iovec *)dst)[i]; dst[i].iov_base = iovec.iov_base; - dst[i].iov_len = iovec.iov_len; + dst[i].iov_len = iovec.iov_len; } } - return (0); + return 0; } diff --git a/bsd/kern/kern_symfile.c b/bsd/kern/kern_symfile.c index a88a51ca8..2ec92f29a 100644 --- a/bsd/kern/kern_symfile.c +++ b/bsd/kern/kern_symfile.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1998 Apple Computer, Inc. All rights reserved. @@ -74,595 +74,603 @@ get_kernel_symfile(__unused proc_t p, __unused char const **symfile); int get_kernel_symfile(__unused proc_t p, __unused char const **symfile) { - return KERN_FAILURE; + return KERN_FAILURE; } -struct kern_direct_file_io_ref_t -{ - vfs_context_t ctx; - struct vnode * vp; - dev_t device; - uint32_t blksize; - off_t filelength; - char cf; - char pinned; - char frozen; - char wbcranged; +struct kern_direct_file_io_ref_t { + vfs_context_t ctx; + struct vnode * vp; + dev_t device; + uint32_t blksize; + off_t filelength; + char cf; + char pinned; + char frozen; + char wbcranged; }; -static int file_ioctl(void * p1, void * p2, u_long theIoctl, caddr_t result) +static int +file_ioctl(void * p1, void * p2, u_long theIoctl, caddr_t result) { - dev_t device = *(dev_t*) p1; + dev_t device = *(dev_t*) p1; - return ((*bdevsw[major(device)].d_ioctl) - (device, theIoctl, result, S_IFBLK, p2)); + return (*bdevsw[major(device)].d_ioctl) + (device, theIoctl, result, S_IFBLK, p2); } -static int device_ioctl(void * p1, __unused void * p2, u_long theIoctl, caddr_t result) +static int +device_ioctl(void * p1, __unused void * p2, u_long theIoctl, caddr_t result) { - return (VNOP_IOCTL(p1, theIoctl, result, 0, p2)); + return VNOP_IOCTL(p1, theIoctl, result, 0, p2); } static int kern_ioctl_file_extents(struct kern_direct_file_io_ref_t * ref, u_long theIoctl, off_t offset, off_t end) { - int error = 0; - int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); - void * p1; - void * p2; - uint64_t fileblk; - size_t filechunk; - dk_extent_t extent; - dk_unmap_t unmap; - _dk_cs_pin_t pin; - - bzero(&extent, sizeof(dk_extent_t)); - bzero(&unmap, sizeof(dk_unmap_t)); - bzero(&pin, sizeof(pin)); - if (ref->vp->v_type == VREG) - { - p1 = &ref->device; - p2 = kernproc; - do_ioctl = &file_ioctl; - } - else - { - /* Partition. */ - p1 = ref->vp; - p2 = ref->ctx; - do_ioctl = &device_ioctl; - } - - if (_DKIOCCSPINEXTENT == theIoctl) { - /* Tell CS the image size, so it knows whether to place the subsequent pins SSD/HDD */ - pin.cp_extent.length = end; - pin.cp_flags = _DKIOCCSHIBERNATEIMGSIZE; - (void) do_ioctl(p1, p2, _DKIOCCSPINEXTENT, (caddr_t)&pin); - } else if (_DKIOCCSUNPINEXTENT == theIoctl) { - /* Tell CS hibernation is done, so it can stop blocking overlapping writes */ - pin.cp_flags = _DKIOCCSPINDISCARDBLACKLIST; - (void) do_ioctl(p1, p2, _DKIOCCSUNPINEXTENT, (caddr_t)&pin); - } - - for (; offset < end; offset += filechunk) - { - if (ref->vp->v_type == VREG) - { - daddr64_t blkno; - filechunk = 1*1024*1024*1024; - if (filechunk > (size_t)(end - offset)) - filechunk = (size_t)(end - offset); - error = VNOP_BLOCKMAP(ref->vp, offset, filechunk, &blkno, - &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); - if (error) break; - if (-1LL == blkno) continue; - fileblk = blkno * ref->blksize; - } - else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) - { - fileblk = offset; - filechunk = ref->filelength; - } - - if (DKIOCUNMAP == theIoctl) - { - extent.offset = fileblk; - extent.length = filechunk; - unmap.extents = &extent; - unmap.extentsCount = 1; - error = do_ioctl(p1, p2, theIoctl, (caddr_t)&unmap); -// printf("DKIOCUNMAP(%d) 0x%qx, 0x%qx\n", error, extent.offset, extent.length); - } - else if (_DKIOCCSPINEXTENT == theIoctl) - { - pin.cp_extent.offset = fileblk; - pin.cp_extent.length = filechunk; - pin.cp_flags = _DKIOCCSPINFORHIBERNATION; - error = do_ioctl(p1, p2, theIoctl, (caddr_t)&pin); - if (error && (ENOTTY != error)) - { - printf("_DKIOCCSPINEXTENT(%d) 0x%qx, 0x%qx\n", error, pin.cp_extent.offset, pin.cp_extent.length); - } - } - else if (_DKIOCCSUNPINEXTENT == theIoctl) - { - pin.cp_extent.offset = fileblk; - pin.cp_extent.length = filechunk; - pin.cp_flags = _DKIOCCSPINFORHIBERNATION; - error = do_ioctl(p1, p2, theIoctl, (caddr_t)&pin); - if (error && (ENOTTY != error)) - { - printf("_DKIOCCSUNPINEXTENT(%d) 0x%qx, 0x%qx\n", error, pin.cp_extent.offset, pin.cp_extent.length); - } - } - else error = EINVAL; - - if (error) break; - } - return (error); + int error = 0; + int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); + void * p1; + void * p2; + uint64_t fileblk; + size_t filechunk; + dk_extent_t extent; + dk_unmap_t unmap; + _dk_cs_pin_t pin; + + bzero(&extent, sizeof(dk_extent_t)); + bzero(&unmap, sizeof(dk_unmap_t)); + bzero(&pin, sizeof(pin)); + if (ref->vp->v_type == VREG) { + p1 = &ref->device; + p2 = kernproc; + do_ioctl = &file_ioctl; + } else { + /* Partition. */ + p1 = ref->vp; + p2 = ref->ctx; + do_ioctl = &device_ioctl; + } + + if (_DKIOCCSPINEXTENT == theIoctl) { + /* Tell CS the image size, so it knows whether to place the subsequent pins SSD/HDD */ + pin.cp_extent.length = end; + pin.cp_flags = _DKIOCCSHIBERNATEIMGSIZE; + (void) do_ioctl(p1, p2, _DKIOCCSPINEXTENT, (caddr_t)&pin); + } else if (_DKIOCCSUNPINEXTENT == theIoctl) { + /* Tell CS hibernation is done, so it can stop blocking overlapping writes */ + pin.cp_flags = _DKIOCCSPINDISCARDBLACKLIST; + (void) do_ioctl(p1, p2, _DKIOCCSUNPINEXTENT, (caddr_t)&pin); + } + + for (; offset < end; offset += filechunk) { + if (ref->vp->v_type == VREG) { + daddr64_t blkno; + filechunk = 1 * 1024 * 1024 * 1024; + if (filechunk > (size_t)(end - offset)) { + filechunk = (size_t)(end - offset); + } + error = VNOP_BLOCKMAP(ref->vp, offset, filechunk, &blkno, + &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); + if (error) { + break; + } + if (-1LL == blkno) { + continue; + } + fileblk = blkno * ref->blksize; + } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { + fileblk = offset; + filechunk = ref->filelength; + } + + if (DKIOCUNMAP == theIoctl) { + extent.offset = fileblk; + extent.length = filechunk; + unmap.extents = &extent; + unmap.extentsCount = 1; + error = do_ioctl(p1, p2, theIoctl, (caddr_t)&unmap); +// printf("DKIOCUNMAP(%d) 0x%qx, 0x%qx\n", error, extent.offset, extent.length); + } else if (_DKIOCCSPINEXTENT == theIoctl) { + pin.cp_extent.offset = fileblk; + pin.cp_extent.length = filechunk; + pin.cp_flags = _DKIOCCSPINFORHIBERNATION; + error = do_ioctl(p1, p2, theIoctl, (caddr_t)&pin); + if (error && (ENOTTY != error)) { + printf("_DKIOCCSPINEXTENT(%d) 0x%qx, 0x%qx\n", error, pin.cp_extent.offset, pin.cp_extent.length); + } + } else if (_DKIOCCSUNPINEXTENT == theIoctl) { + pin.cp_extent.offset = fileblk; + pin.cp_extent.length = filechunk; + pin.cp_flags = _DKIOCCSPINFORHIBERNATION; + error = do_ioctl(p1, p2, theIoctl, (caddr_t)&pin); + if (error && (ENOTTY != error)) { + printf("_DKIOCCSUNPINEXTENT(%d) 0x%qx, 0x%qx\n", error, pin.cp_extent.offset, pin.cp_extent.length); + } + } else { + error = EINVAL; + } + + if (error) { + break; + } + } + return error; } extern uint32_t freespace_mb(vnode_t vp); struct kern_direct_file_io_ref_t * -kern_open_file_for_direct_io(const char * name, - uint32_t iflags, - kern_get_file_extents_callback_t callback, - void * callback_ref, - off_t set_file_size, - off_t fs_free_size, - off_t write_file_offset, - void * write_file_addr, - size_t write_file_len, - dev_t * partition_device_result, - dev_t * image_device_result, - uint64_t * partitionbase_result, - uint64_t * maxiocount_result, - uint32_t * oflags) +kern_open_file_for_direct_io(const char * name, + uint32_t iflags, + kern_get_file_extents_callback_t callback, + void * callback_ref, + off_t set_file_size, + off_t fs_free_size, + off_t write_file_offset, + void * write_file_addr, + size_t write_file_len, + dev_t * partition_device_result, + dev_t * image_device_result, + uint64_t * partitionbase_result, + uint64_t * maxiocount_result, + uint32_t * oflags) { - struct kern_direct_file_io_ref_t * ref; - - proc_t p; - struct vnode_attr va; - dk_apfs_wbc_range_t wbc_range; - int error; - off_t f_offset; - uint64_t fileblk; - size_t filechunk; - uint64_t physoffset, minoffset; - dev_t device; - dev_t target = 0; - int isssd = 0; - uint32_t flags = 0; - uint32_t blksize; - off_t maxiocount, count, segcount, wbctotal; - boolean_t locked = FALSE; - int fmode, cmode; - struct nameidata nd; - u_int32_t ndflags; - off_t mpFree; - - int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); - void * p1 = NULL; - void * p2 = NULL; - - error = EFAULT; - - ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); - if (!ref) - { + struct kern_direct_file_io_ref_t * ref; + + proc_t p; + struct vnode_attr va; + dk_apfs_wbc_range_t wbc_range; + int error; + off_t f_offset; + uint64_t fileblk; + size_t filechunk; + uint64_t physoffset, minoffset; + dev_t device; + dev_t target = 0; + int isssd = 0; + uint32_t flags = 0; + uint32_t blksize; + off_t maxiocount, count, segcount, wbctotal; + boolean_t locked = FALSE; + int fmode, cmode; + struct nameidata nd; + u_int32_t ndflags; + off_t mpFree; + + int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); + void * p1 = NULL; + void * p2 = NULL; + error = EFAULT; - goto out; - } - - bzero(ref, sizeof(*ref)); - p = kernproc; - ref->ctx = vfs_context_kernel(); - - fmode = (kIOPolledFileCreate & iflags) ? (O_CREAT | FWRITE) : FWRITE; - cmode = S_IRUSR | S_IWUSR; - ndflags = NOFOLLOW; - NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ref->ctx); - VATTR_INIT(&va); - VATTR_SET(&va, va_mode, cmode); - VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); - VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); - if ((error = vn_open_auth(&nd, &fmode, &va))) { - kprintf("vn_open_auth(fmode: %d, cmode: %d) failed with error: %d\n", fmode, cmode, error); - goto out; - } - - ref->vp = nd.ni_vp; - if (ref->vp->v_type == VREG) - { - vnode_lock_spin(ref->vp); - SET(ref->vp->v_flag, VSWAP); - vnode_unlock(ref->vp); - } - - if (write_file_addr && write_file_len) - { - if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { - kprintf("kern_write_file() failed with error: %d\n", error); - goto out; - } - } - - VATTR_INIT(&va); - VATTR_WANTED(&va, va_rdev); - VATTR_WANTED(&va, va_fsid); - VATTR_WANTED(&va, va_devid); - VATTR_WANTED(&va, va_data_size); - VATTR_WANTED(&va, va_data_alloc); - VATTR_WANTED(&va, va_nlink); - error = EFAULT; - if (vnode_getattr(ref->vp, &va, ref->ctx)) goto out; - - wbctotal = 0; - mpFree = freespace_mb(ref->vp); - mpFree <<= 20; - kprintf("kern_direct_file(%s): vp size %qd, alloc %qd, mp free %qd, keep free %qd\n", - name, va.va_data_size, va.va_data_alloc, mpFree, fs_free_size); - - if (ref->vp->v_type == VREG) - { - /* Don't dump files with links. */ - if (va.va_nlink != 1) goto out; - - device = (VATTR_IS_SUPPORTED(&va, va_devid)) ? va.va_devid : va.va_fsid; - ref->filelength = va.va_data_size; - - p1 = &device; - p2 = p; - do_ioctl = &file_ioctl; - - if (kIOPolledFileHibernate & iflags) - { - error = do_ioctl(p1, p2, DKIOCAPFSGETWBCRANGE, (caddr_t) &wbc_range); - ref->wbcranged = (error == 0); - } - if (ref->wbcranged) - { - uint32_t idx; - assert(wbc_range.count <= (sizeof(wbc_range.extents) / sizeof(wbc_range.extents[0]))); - for (idx = 0; idx < wbc_range.count; idx++) wbctotal += wbc_range.extents[idx].length; - kprintf("kern_direct_file(%s): wbc %qd\n", name, wbctotal); - if (wbctotal) target = wbc_range.dev; - } - - if (set_file_size) - { - if (wbctotal) - { - if (wbctotal >= set_file_size) set_file_size = HIBERNATE_MIN_FILE_SIZE; - else - { - set_file_size -= wbctotal; - if (set_file_size < HIBERNATE_MIN_FILE_SIZE) set_file_size = HIBERNATE_MIN_FILE_SIZE; - } - } - if (fs_free_size) - { - mpFree += va.va_data_alloc; - if ((mpFree < set_file_size) || ((mpFree - set_file_size) < fs_free_size)) - { - error = ENOSPC; - goto out; + + ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); + if (!ref) { + error = EFAULT; + goto out; + } + + bzero(ref, sizeof(*ref)); + p = kernproc; + ref->ctx = vfs_context_kernel(); + + fmode = (kIOPolledFileCreate & iflags) ? (O_CREAT | FWRITE) : FWRITE; + cmode = S_IRUSR | S_IWUSR; + ndflags = NOFOLLOW; + NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ref->ctx); + VATTR_INIT(&va); + VATTR_SET(&va, va_mode, cmode); + VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); + VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); + if ((error = vn_open_auth(&nd, &fmode, &va))) { + kprintf("vn_open_auth(fmode: %d, cmode: %d) failed with error: %d\n", fmode, cmode, error); + goto out; + } + + ref->vp = nd.ni_vp; + if (ref->vp->v_type == VREG) { + vnode_lock_spin(ref->vp); + SET(ref->vp->v_flag, VSWAP); + vnode_unlock(ref->vp); + } + + if (write_file_addr && write_file_len) { + if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { + kprintf("kern_write_file() failed with error: %d\n", error); + goto out; + } + } + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_rdev); + VATTR_WANTED(&va, va_fsid); + VATTR_WANTED(&va, va_devid); + VATTR_WANTED(&va, va_data_size); + VATTR_WANTED(&va, va_data_alloc); + VATTR_WANTED(&va, va_nlink); + error = EFAULT; + if (vnode_getattr(ref->vp, &va, ref->ctx)) { + goto out; + } + + wbctotal = 0; + mpFree = freespace_mb(ref->vp); + mpFree <<= 20; + kprintf("kern_direct_file(%s): vp size %qd, alloc %qd, mp free %qd, keep free %qd\n", + name, va.va_data_size, va.va_data_alloc, mpFree, fs_free_size); + + if (ref->vp->v_type == VREG) { + /* Don't dump files with links. */ + if (va.va_nlink != 1) { + goto out; + } + + device = (VATTR_IS_SUPPORTED(&va, va_devid)) ? va.va_devid : va.va_fsid; + ref->filelength = va.va_data_size; + + p1 = &device; + p2 = p; + do_ioctl = &file_ioctl; + + if (kIOPolledFileHibernate & iflags) { + error = do_ioctl(p1, p2, DKIOCAPFSGETWBCRANGE, (caddr_t) &wbc_range); + ref->wbcranged = (error == 0); + } + if (ref->wbcranged) { + uint32_t idx; + assert(wbc_range.count <= (sizeof(wbc_range.extents) / sizeof(wbc_range.extents[0]))); + for (idx = 0; idx < wbc_range.count; idx++) { + wbctotal += wbc_range.extents[idx].length; + } + kprintf("kern_direct_file(%s): wbc %qd\n", name, wbctotal); + if (wbctotal) { + target = wbc_range.dev; + } + } + + if (set_file_size) { + if (wbctotal) { + if (wbctotal >= set_file_size) { + set_file_size = HIBERNATE_MIN_FILE_SIZE; + } else { + set_file_size -= wbctotal; + if (set_file_size < HIBERNATE_MIN_FILE_SIZE) { + set_file_size = HIBERNATE_MIN_FILE_SIZE; + } + } + } + if (fs_free_size) { + mpFree += va.va_data_alloc; + if ((mpFree < set_file_size) || ((mpFree - set_file_size) < fs_free_size)) { + error = ENOSPC; + goto out; + } + } + error = vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL | IO_NOAUTH, ref->ctx); + if (error) { + goto out; + } + ref->filelength = set_file_size; + } + } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { + /* Partition. */ + device = va.va_rdev; + + p1 = ref->vp; + p2 = ref->ctx; + do_ioctl = &device_ioctl; + } else { + /* Don't dump to non-regular files. */ + error = EFAULT; + goto out; + } + ref->device = device; + + // probe for CF + dk_corestorage_info_t cs_info; + memset(&cs_info, 0, sizeof(dk_corestorage_info_t)); + error = do_ioctl(p1, p2, DKIOCCORESTORAGE, (caddr_t)&cs_info); + ref->cf = (error == 0) && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES); + + // get block size + + error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); + if (error) { + goto out; + } + + minoffset = HIBERNATE_MIN_PHYSICAL_LBA * ref->blksize; + + if (ref->vp->v_type != VREG) { + error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); + if (error) { + goto out; } - } - error = vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL | IO_NOAUTH, ref->ctx); - if (error) goto out; - ref->filelength = set_file_size; - } - } - else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) - { - /* Partition. */ - device = va.va_rdev; - - p1 = ref->vp; - p2 = ref->ctx; - do_ioctl = &device_ioctl; - } - else - { - /* Don't dump to non-regular files. */ - error = EFAULT; - goto out; - } - ref->device = device; - - // probe for CF - dk_corestorage_info_t cs_info; - memset(&cs_info, 0, sizeof(dk_corestorage_info_t)); - error = do_ioctl(p1, p2, DKIOCCORESTORAGE, (caddr_t)&cs_info); - ref->cf = (error == 0) && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES); - - // get block size - - error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); - if (error) - goto out; - - minoffset = HIBERNATE_MIN_PHYSICAL_LBA * ref->blksize; - - if (ref->vp->v_type != VREG) - { - error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); - if (error) goto out; - ref->filelength = fileblk * ref->blksize; - } - - // pin logical extents, CS version - - error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); - if (error && (ENOTTY != error)) goto out; - ref->pinned = (error == 0); - - // pin logical extents, apfs version - - error = VNOP_IOCTL(ref->vp, FSCTL_FREEZE_EXTENTS, NULL, 0, ref->ctx); - if (error && (ENOTTY != error)) goto out; - ref->frozen = (error == 0); - - // generate the block list - - error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); - if (error) goto out; - locked = TRUE; - - f_offset = 0; - for (; f_offset < ref->filelength; f_offset += filechunk) - { - if (ref->vp->v_type == VREG) - { - filechunk = 1*1024*1024*1024; - daddr64_t blkno; - - error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, - &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); - if (error) goto out; - if (-1LL == blkno) continue; - fileblk = blkno * ref->blksize; - } - else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) - { - fileblk = f_offset; - filechunk = f_offset ? 0 : ref->filelength; - } - - physoffset = 0; - while (physoffset < filechunk) - { - dk_physical_extent_t getphysreq; - bzero(&getphysreq, sizeof(getphysreq)); - - getphysreq.offset = fileblk + physoffset; - getphysreq.length = (filechunk - physoffset); - error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); - if (error) goto out; - if (!target) - { - target = getphysreq.dev; - } - else if (target != getphysreq.dev) - { - error = ENOTSUP; - goto out; - } - - assert(getphysreq.offset >= minoffset); + ref->filelength = fileblk * ref->blksize; + } + + // pin logical extents, CS version + + error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); + if (error && (ENOTTY != error)) { + goto out; + } + ref->pinned = (error == 0); + + // pin logical extents, apfs version + + error = VNOP_IOCTL(ref->vp, FSCTL_FREEZE_EXTENTS, NULL, 0, ref->ctx); + if (error && (ENOTTY != error)) { + goto out; + } + ref->frozen = (error == 0); + + // generate the block list + + error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); + if (error) { + goto out; + } + locked = TRUE; + + f_offset = 0; + for (; f_offset < ref->filelength; f_offset += filechunk) { + if (ref->vp->v_type == VREG) { + filechunk = 1 * 1024 * 1024 * 1024; + daddr64_t blkno; + + error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, + &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); + if (error) { + goto out; + } + if (-1LL == blkno) { + continue; + } + fileblk = blkno * ref->blksize; + } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { + fileblk = f_offset; + filechunk = f_offset ? 0 : ref->filelength; + } + + physoffset = 0; + while (physoffset < filechunk) { + dk_physical_extent_t getphysreq; + bzero(&getphysreq, sizeof(getphysreq)); + + getphysreq.offset = fileblk + physoffset; + getphysreq.length = (filechunk - physoffset); + error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); + if (error) { + goto out; + } + if (!target) { + target = getphysreq.dev; + } else if (target != getphysreq.dev) { + error = ENOTSUP; + goto out; + } + + assert(getphysreq.offset >= minoffset); #if HIBFRAGMENT - uint64_t rev; - for (rev = 4096; rev <= getphysreq.length; rev += 4096) - { - callback(callback_ref, getphysreq.offset + getphysreq.length - rev, 4096); - } + uint64_t rev; + for (rev = 4096; rev <= getphysreq.length; rev += 4096) { + callback(callback_ref, getphysreq.offset + getphysreq.length - rev, 4096); + } #else - callback(callback_ref, getphysreq.offset, getphysreq.length); + callback(callback_ref, getphysreq.offset, getphysreq.length); #endif - physoffset += getphysreq.length; - } - } - if (ref->wbcranged) - { - uint32_t idx; - for (idx = 0; idx < wbc_range.count; idx++) - { - assert(wbc_range.extents[idx].offset >= minoffset); - callback(callback_ref, wbc_range.extents[idx].offset, wbc_range.extents[idx].length); - } - } - callback(callback_ref, 0ULL, 0ULL); - - if (ref->vp->v_type == VREG) p1 = ⌖ - else - { - p1 = ⌖ - p2 = p; - do_ioctl = &file_ioctl; - } - - // get partition base - - if (partitionbase_result) - { - error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); - if (error) - goto out; - } - - // get block size & constraints - - error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); - if (error) - goto out; - - maxiocount = 1*1024*1024*1024; - - error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); - if (error) - count = 0; - count *= blksize; - if (count && (count < maxiocount)) - maxiocount = count; - - error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); - if (error) - count = 0; - count *= blksize; - if (count && (count < maxiocount)) - maxiocount = count; - - error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); - if (error) - count = 0; - if (count && (count < maxiocount)) - maxiocount = count; - - error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); - if (error) - count = 0; - if (count && (count < maxiocount)) - maxiocount = count; - - error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); - if (!error) - error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t) &segcount); - if (error) - count = segcount = 0; - count *= segcount; - if (count && (count < maxiocount)) - maxiocount = count; - - error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); - if (!error) - error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t) &segcount); - if (error) - count = segcount = 0; - count *= segcount; - if (count && (count < maxiocount)) - maxiocount = count; - - kprintf("max io 0x%qx bytes\n", maxiocount); - if (maxiocount_result) - *maxiocount_result = maxiocount; - - error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); - if (!error && isssd) - flags |= kIOPolledFileSSD; - - if (partition_device_result) - *partition_device_result = device; - if (image_device_result) - *image_device_result = target; - if (oflags) - *oflags = flags; - - if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) - { - vnode_close(ref->vp, FWRITE, ref->ctx); - ref->vp = NULLVP; - ref->ctx = NULL; - } + physoffset += getphysreq.length; + } + } + if (ref->wbcranged) { + uint32_t idx; + for (idx = 0; idx < wbc_range.count; idx++) { + assert(wbc_range.extents[idx].offset >= minoffset); + callback(callback_ref, wbc_range.extents[idx].offset, wbc_range.extents[idx].length); + } + } + callback(callback_ref, 0ULL, 0ULL); + + if (ref->vp->v_type == VREG) { + p1 = ⌖ + } else { + p1 = ⌖ + p2 = p; + do_ioctl = &file_ioctl; + } + + // get partition base + + if (partitionbase_result) { + error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); + if (error) { + goto out; + } + } + + // get block size & constraints + + error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); + if (error) { + goto out; + } + + maxiocount = 1 * 1024 * 1024 * 1024; + + error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); + if (error) { + count = 0; + } + count *= blksize; + if (count && (count < maxiocount)) { + maxiocount = count; + } + + error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); + if (error) { + count = 0; + } + count *= blksize; + if (count && (count < maxiocount)) { + maxiocount = count; + } + + error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); + if (error) { + count = 0; + } + if (count && (count < maxiocount)) { + maxiocount = count; + } + + error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); + if (error) { + count = 0; + } + if (count && (count < maxiocount)) { + maxiocount = count; + } + + error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); + if (!error) { + error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t) &segcount); + } + if (error) { + count = segcount = 0; + } + count *= segcount; + if (count && (count < maxiocount)) { + maxiocount = count; + } + + error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); + if (!error) { + error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t) &segcount); + } + if (error) { + count = segcount = 0; + } + count *= segcount; + if (count && (count < maxiocount)) { + maxiocount = count; + } + + kprintf("max io 0x%qx bytes\n", maxiocount); + if (maxiocount_result) { + *maxiocount_result = maxiocount; + } + + error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); + if (!error && isssd) { + flags |= kIOPolledFileSSD; + } + + if (partition_device_result) { + *partition_device_result = device; + } + if (image_device_result) { + *image_device_result = target; + } + if (oflags) { + *oflags = flags; + } + + if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { + vnode_close(ref->vp, FWRITE, ref->ctx); + ref->vp = NULLVP; + ref->ctx = NULL; + } out: - printf("kern_open_file_for_direct_io(%p, %d)\n", ref, error); - - - if (error && locked) - { - p1 = &device; - (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); - } - - if (error && ref) - { - if (ref->vp) - { - (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (ref->pinned && ref->cf) ? ref->filelength : 0); - - if (ref->frozen) - { - (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); - } - if (ref->wbcranged) - { - (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); - } - vnode_close(ref->vp, FWRITE, ref->ctx); - ref->vp = NULLVP; - } - ref->ctx = NULL; - kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); - ref = NULL; - } - - return(ref); + printf("kern_open_file_for_direct_io(%p, %d)\n", ref, error); + + + if (error && locked) { + p1 = &device; + (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); + } + + if (error && ref) { + if (ref->vp) { + (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (ref->pinned && ref->cf) ? ref->filelength : 0); + + if (ref->frozen) { + (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); + } + if (ref->wbcranged) { + (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); + } + vnode_close(ref->vp, FWRITE, ref->ctx); + ref->vp = NULLVP; + } + ref->ctx = NULL; + kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); + ref = NULL; + } + + return ref; } int kern_write_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * addr, size_t len, int ioflag) { - return (vn_rdwr(UIO_WRITE, ref->vp, - addr, len, offset, - UIO_SYSSPACE, ioflag|IO_SYNC|IO_NODELOCKED|IO_UNIT, - vfs_context_ucred(ref->ctx), (int *) 0, - vfs_context_proc(ref->ctx))); + return vn_rdwr(UIO_WRITE, ref->vp, + addr, len, offset, + UIO_SYSSPACE, ioflag | IO_SYNC | IO_NODELOCKED | IO_UNIT, + vfs_context_ucred(ref->ctx), (int *) 0, + vfs_context_proc(ref->ctx)); } int kern_read_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * addr, size_t len, int ioflag) { - return (vn_rdwr(UIO_READ, ref->vp, - addr, len, offset, - UIO_SYSSPACE, ioflag|IO_SYNC|IO_NODELOCKED|IO_UNIT, - vfs_context_ucred(ref->ctx), (int *) 0, - vfs_context_proc(ref->ctx))); + return vn_rdwr(UIO_READ, ref->vp, + addr, len, offset, + UIO_SYSSPACE, ioflag | IO_SYNC | IO_NODELOCKED | IO_UNIT, + vfs_context_ucred(ref->ctx), (int *) 0, + vfs_context_proc(ref->ctx)); } struct mount * kern_file_mount(struct kern_direct_file_io_ref_t * ref) { - return (ref->vp->v_mount); + return ref->vp->v_mount; } void kern_close_file_for_direct_io(struct kern_direct_file_io_ref_t * ref, - off_t write_offset, void * addr, size_t write_length, - off_t discard_offset, off_t discard_end) + off_t write_offset, void * addr, size_t write_length, + off_t discard_offset, off_t discard_end) { - int error; - printf("kern_close_file_for_direct_io(%p)\n", ref); - - if (!ref) return; - - if (ref->vp) - { - int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); - void * p1; - void * p2; - - discard_offset = ((discard_offset + ref->blksize - 1) & ~(((off_t) ref->blksize) - 1)); - discard_end = ((discard_end) & ~(((off_t) ref->blksize) - 1)); - - if (ref->vp->v_type == VREG) - { - p1 = &ref->device; - p2 = kernproc; - do_ioctl = &file_ioctl; - } - else - { - /* Partition. */ - p1 = ref->vp; - p2 = ref->ctx; - do_ioctl = &device_ioctl; - } - (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); + int error; + printf("kern_close_file_for_direct_io(%p)\n", ref); + + if (!ref) { + return; + } + + if (ref->vp) { + int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); + void * p1; + void * p2; + + discard_offset = ((discard_offset + ref->blksize - 1) & ~(((off_t) ref->blksize) - 1)); + discard_end = ((discard_end) & ~(((off_t) ref->blksize) - 1)); + + if (ref->vp->v_type == VREG) { + p1 = &ref->device; + p2 = kernproc; + do_ioctl = &file_ioctl; + } else { + /* Partition. */ + p1 = ref->vp; + p2 = ref->ctx; + do_ioctl = &device_ioctl; + } + (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); //XXX If unmapping extents then don't also need to unpin; except ... //XXX if file unaligned (HFS 4k / Fusion 128k) then pin is superset and @@ -674,33 +682,29 @@ kern_close_file_for_direct_io(struct kern_direct_file_io_ref_t * ref, (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (will_unpin) ? ref->filelength : 0); - if (will_unmap) - { - (void) kern_ioctl_file_extents(ref, DKIOCUNMAP, discard_offset, (ref->cf) ? ref->filelength : discard_end); - } + if (will_unmap) { + (void) kern_ioctl_file_extents(ref, DKIOCUNMAP, discard_offset, (ref->cf) ? ref->filelength : discard_end); + } - if (ref->frozen) - { - (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); - } - if (ref->wbcranged) - { - (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); - } + if (ref->frozen) { + (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); + } + if (ref->wbcranged) { + (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); + } - if (addr && write_length) - { - (void) kern_write_file(ref, write_offset, addr, write_length, IO_SKIP_ENCRYPTION); - } + if (addr && write_length) { + (void) kern_write_file(ref, write_offset, addr, write_length, IO_SKIP_ENCRYPTION); + } - error = vnode_close(ref->vp, FWRITE, ref->ctx); + error = vnode_close(ref->vp, FWRITE, ref->ctx); - ref->vp = NULLVP; - kprintf("vnode_close(%d)\n", error); + ref->vp = NULLVP; + kprintf("vnode_close(%d)\n", error); - } + } - ref->ctx = NULL; + ref->ctx = NULL; - kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); + kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); } diff --git a/bsd/kern/kern_synch.c b/bsd/kern/kern_synch.c index 841cdeba9..d8afd780c 100644 --- a/bsd/kern/kern_synch.c +++ b/bsd/kern/kern_synch.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -54,10 +54,10 @@ #include #include -#include /* for unix_syscall_return() */ +#include /* for unix_syscall_return() */ #include -extern void compute_averunnable(void *); /* XXX */ +extern void compute_averunnable(void *); /* XXX */ __attribute__((noreturn)) static void @@ -76,49 +76,54 @@ _sleep_continue( __unused void *parameter, wait_result_t wresult) spinmutex = ut->uu_pri & PSPIN; switch (wresult) { - case THREAD_TIMED_OUT: - error = EWOULDBLOCK; + case THREAD_TIMED_OUT: + error = EWOULDBLOCK; + break; + case THREAD_AWAKENED: + /* + * Posix implies any signal should be delivered + * first, regardless of whether awakened due + * to receiving event. + */ + if (!catch) { break; - case THREAD_AWAKENED: - /* - * Posix implies any signal should be delivered - * first, regardless of whether awakened due - * to receiving event. - */ - if (!catch) - break; - /* else fall through */ - case THREAD_INTERRUPTED: - if (catch) { - if (thread_should_abort(self)) { - error = EINTR; - } else if (SHOULDissignal(p,ut)) { - if ((sig = CURSIG(p)) != 0) { - if (p->p_sigacts->ps_sigintr & sigmask(sig)) - error = EINTR; - else - error = ERESTART; - } - if (thread_should_abort(self)) { + } + /* else fall through */ + case THREAD_INTERRUPTED: + if (catch) { + if (thread_should_abort(self)) { + error = EINTR; + } else if (SHOULDissignal(p, ut)) { + if ((sig = CURSIG(p)) != 0) { + if (p->p_sigacts->ps_sigintr & sigmask(sig)) { error = EINTR; + } else { + error = ERESTART; } - } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { - /* due to thread cancel */ - error = EINTR; - } - } else + } + if (thread_should_abort(self)) { + error = EINTR; + } + } else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { + /* due to thread cancel */ error = EINTR; - break; + } + } else { + error = EINTR; + } + break; } - if (error == EINTR || error == ERESTART) + if (error == EINTR || error == ERESTART) { act_set_astbsd(self); + } if (ut->uu_mtx && !dropmutex) { - if (spinmutex) + if (spinmutex) { lck_mtx_lock_spin(ut->uu_mtx); - else + } else { lck_mtx_lock(ut->uu_mtx); + } } ut->uu_wchan = NULL; ut->uu_wmesg = NULL; @@ -149,12 +154,12 @@ _sleep_continue( __unused void *parameter, wait_result_t wresult) static int _sleep( - caddr_t chan, - int pri, - const char *wmsg, - u_int64_t abstime, - int (*continuation)(int), - lck_mtx_t *mtx) + caddr_t chan, + int pri, + const char *wmsg, + u_int64_t abstime, + int (*continuation)(int), + lck_mtx_t *mtx) { struct proc *p; thread_t self = current_thread(); @@ -170,176 +175,193 @@ _sleep( p = current_proc(); p->p_priority = pri & PRIMASK; /* It can still block in proc_exit() after the teardown. */ - if (p->p_stats != NULL) + if (p->p_stats != NULL) { OSIncrementAtomicLong(&p->p_stats->p_ru.ru_nvcsw); - - if (pri & PCATCH) + } + + if (pri & PCATCH) { catch = THREAD_ABORTSAFE; - else + } else { catch = THREAD_UNINT; + } /* set wait message & channel */ ut->uu_wchan = chan; ut->uu_wmesg = wmsg ? wmsg : "unknown"; if (mtx != NULL && chan != NULL && (thread_continue_t)continuation == THREAD_CONTINUE_NULL) { - int flags; + int flags; - if (dropmutex) + if (dropmutex) { flags = LCK_SLEEP_UNLOCK; - else + } else { flags = LCK_SLEEP_DEFAULT; + } - if (spinmutex) + if (spinmutex) { flags |= LCK_SLEEP_SPIN; + } - if (abstime) + if (abstime) { wait_result = lck_mtx_sleep_deadline(mtx, flags, chan, catch, abstime); - else + } else { wait_result = lck_mtx_sleep(mtx, flags, chan, catch); - } - else { - if (chan != NULL) + } + } else { + if (chan != NULL) { assert_wait_deadline(chan, catch, abstime); - if (mtx) + } + if (mtx) { lck_mtx_unlock(mtx); + } if (catch == THREAD_ABORTSAFE) { - if (SHOULDissignal(p,ut)) { + if (SHOULDissignal(p, ut)) { if ((sig = CURSIG(p)) != 0) { - if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) + if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) { goto block; - if (p->p_sigacts->ps_sigintr & sigmask(sig)) + } + if (p->p_sigacts->ps_sigintr & sigmask(sig)) { error = EINTR; - else + } else { error = ERESTART; + } if (mtx && !dropmutex) { - if (spinmutex) + if (spinmutex) { lck_mtx_lock_spin(mtx); - else + } else { lck_mtx_lock(mtx); + } } goto out; } } if (thread_should_abort(self)) { - if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) + if (clear_wait(self, THREAD_INTERRUPTED) == KERN_FAILURE) { goto block; + } error = EINTR; if (mtx && !dropmutex) { - if (spinmutex) + if (spinmutex) { lck_mtx_lock_spin(mtx); - else + } else { lck_mtx_lock(mtx); + } } goto out; } - } + } block: if ((thread_continue_t)continuation != THREAD_CONTINUE_NULL) { - ut->uu_continuation = continuation; + ut->uu_continuation = continuation; ut->uu_pri = pri; ut->uu_timo = abstime? 1: 0; ut->uu_mtx = mtx; (void) thread_block(_sleep_continue); /* NOTREACHED */ } - + wait_result = thread_block(THREAD_CONTINUE_NULL); if (mtx && !dropmutex) { - if (spinmutex) + if (spinmutex) { lck_mtx_lock_spin(mtx); - else + } else { lck_mtx_lock(mtx); + } } } switch (wait_result) { - case THREAD_TIMED_OUT: - error = EWOULDBLOCK; + case THREAD_TIMED_OUT: + error = EWOULDBLOCK; + break; + case THREAD_AWAKENED: + case THREAD_RESTART: + /* + * Posix implies any signal should be delivered + * first, regardless of whether awakened due + * to receiving event. + */ + if (catch != THREAD_ABORTSAFE) { break; - case THREAD_AWAKENED: - case THREAD_RESTART: - /* - * Posix implies any signal should be delivered - * first, regardless of whether awakened due - * to receiving event. - */ - if (catch != THREAD_ABORTSAFE) - break; - /* else fall through */ - case THREAD_INTERRUPTED: - if (catch == THREAD_ABORTSAFE) { - if (thread_should_abort(self)) { - error = EINTR; - } else if (SHOULDissignal(p, ut)) { - if ((sig = CURSIG(p)) != 0) { - if (p->p_sigacts->ps_sigintr & sigmask(sig)) - error = EINTR; - else - error = ERESTART; - } - if (thread_should_abort(self)) { + } + /* else fall through */ + case THREAD_INTERRUPTED: + if (catch == THREAD_ABORTSAFE) { + if (thread_should_abort(self)) { + error = EINTR; + } else if (SHOULDissignal(p, ut)) { + if ((sig = CURSIG(p)) != 0) { + if (p->p_sigacts->ps_sigintr & sigmask(sig)) { error = EINTR; + } else { + error = ERESTART; } - } else if( (ut->uu_flag & ( UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { - /* due to thread cancel */ - error = EINTR; - } - } else + } + if (thread_should_abort(self)) { + error = EINTR; + } + } else if ((ut->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { + /* due to thread cancel */ error = EINTR; - break; + } + } else { + error = EINTR; + } + break; } out: - if (error == EINTR || error == ERESTART) + if (error == EINTR || error == ERESTART) { act_set_astbsd(self); + } ut->uu_wchan = NULL; ut->uu_wmesg = NULL; - return (error); + return error; } int sleep( - void *chan, - int pri) + void *chan, + int pri) { return _sleep((caddr_t)chan, pri, (char *)NULL, 0, (int (*)(int))0, (lck_mtx_t *)0); } int msleep0( - void *chan, - lck_mtx_t *mtx, - int pri, - const char *wmsg, - int timo, - int (*continuation)(int)) + void *chan, + lck_mtx_t *mtx, + int pri, + const char *wmsg, + int timo, + int (*continuation)(int)) { - u_int64_t abstime = 0; + u_int64_t abstime = 0; - if (timo) + if (timo) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, mtx); } int msleep( - void *chan, - lck_mtx_t *mtx, - int pri, - const char *wmsg, - struct timespec *ts) + void *chan, + lck_mtx_t *mtx, + int pri, + const char *wmsg, + struct timespec *ts) { - u_int64_t abstime = 0; + u_int64_t abstime = 0; if (ts && (ts->tv_sec || ts->tv_nsec)) { - nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime ); + nanoseconds_to_absolutetime((uint64_t)ts->tv_sec * NSEC_PER_SEC + ts->tv_nsec, &abstime ); clock_absolutetime_interval_to_deadline( abstime, &abstime ); } @@ -348,52 +370,54 @@ msleep( int msleep1( - void *chan, - lck_mtx_t *mtx, - int pri, - const char *wmsg, - u_int64_t abstime) + void *chan, + lck_mtx_t *mtx, + int pri, + const char *wmsg, + u_int64_t abstime) { return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, mtx); } int tsleep( - void *chan, - int pri, - const char *wmsg, - int timo) + void *chan, + int pri, + const char *wmsg, + int timo) { - u_int64_t abstime = 0; + u_int64_t abstime = 0; - if (timo) + if (timo) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } return _sleep((caddr_t)chan, pri, wmsg, abstime, (int (*)(int))0, (lck_mtx_t *)0); } int tsleep0( - void *chan, - int pri, - const char *wmsg, - int timo, - int (*continuation)(int)) -{ - u_int64_t abstime = 0; - - if (timo) + void *chan, + int pri, + const char *wmsg, + int timo, + int (*continuation)(int)) +{ + u_int64_t abstime = 0; + + if (timo) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0); } int tsleep1( - void *chan, - int pri, - const char *wmsg, - u_int64_t abstime, - int (*continuation)(int)) -{ + void *chan, + int pri, + const char *wmsg, + u_int64_t abstime, + int (*continuation)(int)) +{ return _sleep((caddr_t)chan, pri, wmsg, abstime, continuation, (lck_mtx_t *)0); } @@ -430,25 +454,26 @@ resetpriority(struct proc *p) } struct loadavg averunnable = - { {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ +{ {0, 0, 0}, FSCALE }; /* load average, of runnable procs */ /* * Constants for averages over 1, 5, and 15 minutes * when sampling at 5 second intervals. */ static fixpt_t cexp[3] = { - (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */ - (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */ - (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */ + (fixpt_t)(0.9200444146293232 * FSCALE), /* exp(-1/12) */ + (fixpt_t)(0.9834714538216174 * FSCALE), /* exp(-1/60) */ + (fixpt_t)(0.9944598480048967 * FSCALE), /* exp(-1/180) */ }; void compute_averunnable(void *arg) { - unsigned int nrun = *(unsigned int *)arg; - struct loadavg *avg = &averunnable; - int i; + unsigned int nrun = *(unsigned int *)arg; + struct loadavg *avg = &averunnable; + int i; - for (i = 0; i < 3; i++) - avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + - nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; + for (i = 0; i < 3; i++) { + avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + + nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; + } } diff --git a/bsd/kern/kern_sysctl.c b/bsd/kern/kern_sysctl.c index d937e9e4f..566ab2606 100644 --- a/bsd/kern/kern_sysctl.c +++ b/bsd/kern/kern_sysctl.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -71,7 +71,7 @@ */ /* -* DEPRECATED sysctl system call code + * DEPRECATED sysctl system call code * * Everything in this file is deprecated. Sysctls should be handled * by the code in kern_newsysctl.c. @@ -110,6 +110,7 @@ #include #include +#include #include #include #include @@ -123,7 +124,6 @@ #include #include #include -#include #include #include #include @@ -166,18 +166,21 @@ */ #define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX) -extern int aio_max_requests; -extern int aio_max_requests_per_process; -extern int aio_worker_threads; +extern int aio_max_requests; +extern int aio_max_requests_per_process; +extern int aio_worker_threads; extern int lowpri_IO_window_msecs; extern int lowpri_IO_delay_msecs; +#if DEVELOPMENT || DEBUG extern int nx_enabled; +#endif extern int speculative_reads_disabled; extern unsigned int speculative_prefetch_max; extern unsigned int speculative_prefetch_max_iosize; extern unsigned int preheat_max_bytes; extern unsigned int preheat_min_bytes; extern long numvnodes; +extern long num_recycledvnodes; extern uuid_string_t bootsessionuuid_string; @@ -189,8 +192,8 @@ extern unsigned int vm_page_free_target; extern unsigned int vm_page_free_reserved; #if (DEVELOPMENT || DEBUG) -extern uint32_t vm_page_creation_throttled_hard; -extern uint32_t vm_page_creation_throttled_soft; +extern uint32_t vm_page_creation_throttled_hard; +extern uint32_t vm_page_creation_throttled_soft; #endif /* DEVELOPMENT || DEBUG */ /* @@ -222,24 +225,24 @@ fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict); STATIC void fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict); -extern int +extern int kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep); #if NFSCLIENT -extern int +extern int netboot_root(void); #endif int -pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, - proc_t p); +pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep, + proc_t p); int -sysctl_procargs(int *name, u_int namelen, user_addr_t where, - size_t *sizep, proc_t cur_proc); +sysctl_procargs(int *name, u_int namelen, user_addr_t where, + size_t *sizep, proc_t cur_proc); STATIC int -sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep, - proc_t cur_proc, int argc_yes); +sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep, + proc_t cur_proc, int argc_yes); int -sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, - size_t newlen, void *sp, int len); +sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp, + size_t newlen, void *sp, int len); STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg); STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg); @@ -258,10 +261,10 @@ STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int ar STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS; #if COUNT_SYSCALLS STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS; -#endif /* COUNT_SYSCALLS */ +#endif /* COUNT_SYSCALLS */ #if !CONFIG_EMBEDDED STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS; -#endif /* !CONFIG_EMBEDDED */ +#endif /* !CONFIG_EMBEDDED */ STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS; STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS; STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -292,7 +295,9 @@ STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, s STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); +#if DEVELOPMENT || DEBUG STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); +#endif STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -312,31 +317,31 @@ STATIC int sysctl_debug_test_stackshot_mutex_owner(struct sysctl_oid *oidp, void STATIC int sysctl_debug_test_stackshot_rwlck_owner(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); #endif -extern void IORegistrySetOSBuildVersion(char * build_version); +extern void IORegistrySetOSBuildVersion(char * build_version); STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64) { - la64->ldavg[0] = la->ldavg[0]; - la64->ldavg[1] = la->ldavg[1]; - la64->ldavg[2] = la->ldavg[2]; - la64->fscale = (user64_long_t)la->fscale; + la64->ldavg[0] = la->ldavg[0]; + la64->ldavg[1] = la->ldavg[1]; + la64->ldavg[2] = la->ldavg[2]; + la64->fscale = (user64_long_t)la->fscale; } STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32) { - la32->ldavg[0] = la->ldavg[0]; - la32->ldavg[1] = la->ldavg[1]; - la32->ldavg[2] = la->ldavg[2]; - la32->fscale = (user32_long_t)la->fscale; + la32->ldavg[0] = la->ldavg[0]; + la32->ldavg[1] = la->ldavg[1]; + la32->ldavg[2] = la->ldavg[2]; + la32->fscale = (user32_long_t)la->fscale; } #if CONFIG_COREDUMP /* * Attributes stored in the kernel. */ -extern char corefilename[MAXPATHLEN+1]; +extern char corefilename[MAXPATHLEN + 1]; extern int do_coredump; extern int sugid_coredump; #endif @@ -352,14 +357,14 @@ int securelevel; #endif STATIC int -sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) +sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void *arg1, + __unused int arg2, struct sysctl_req *req) { int error; struct uthread *ut = get_bsdthread_info(current_thread()); - user_addr_t oldp=0, newp=0; - size_t *oldlenp=NULL; - size_t newlen=0; + user_addr_t oldp = 0, newp = 0; + size_t *oldlenp = NULL; + size_t newlen = 0; oldp = req->oldptr; oldlenp = &(req->oldlen); @@ -367,37 +372,40 @@ sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void * newlen = req->newlen; /* We want the current length, and maybe the string itself */ - if(oldlenp) { + if (oldlenp) { /* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */ size_t currlen = MAXTHREADNAMESIZE - 1; - - if(ut->pth_name) + + if (ut->pth_name) { /* use length of current thread name */ currlen = strlen(ut->pth_name); - if(oldp) { - if(*oldlenp < currlen) + } + if (oldp) { + if (*oldlenp < currlen) { return ENOMEM; + } /* NOTE - we do not copy the NULL terminator */ - if(ut->pth_name) { - error = copyout(ut->pth_name,oldp,currlen); - if(error) + if (ut->pth_name) { + error = copyout(ut->pth_name, oldp, currlen); + if (error) { return error; + } } - } + } /* return length of thread name minus NULL terminator (just like strlen) */ req->oldidx = currlen; } /* We want to set the name to something */ - if(newp) - { - if(newlen > (MAXTHREADNAMESIZE - 1)) + if (newp) { + if (newlen > (MAXTHREADNAMESIZE - 1)) { return ENAMETOOLONG; - if(!ut->pth_name) - { + } + if (!ut->pth_name) { ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE ); - if(!ut->pth_name) + if (!ut->pth_name) { return ENOMEM; + } } else { kernel_debug_string_simple(TRACE_STRING_THREADNAME_PREV, ut->pth_name); } @@ -409,11 +417,11 @@ sysctl_handle_kern_threadname( __unused struct sysctl_oid *oidp, __unused void * kernel_debug_string_simple(TRACE_STRING_THREADNAME, ut->pth_name); } - + return 0; } -SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A",""); +SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname, "A", ""); #define BSD_HOST 1 STATIC int @@ -433,13 +441,13 @@ sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unus } size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */ - + if (req->oldlen < size) { return EINVAL; } MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK); - + kret = get_sched_statistics(buf, &size); if (kret != KERN_SUCCESS) { error = EINVAL; @@ -494,20 +502,20 @@ extern int get_kernel_symfile(proc_t, char **); #if COUNT_SYSCALLS #define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000) -extern unsigned int nsysent; +extern unsigned int nsysent; extern int syscalls_log[]; extern const char *syscallnames[]; STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS { - __unused int cmd = oidp->oid_arg2; /* subcommand*/ - __unused int *name = arg1; /* oid element argument vector */ - __unused int namelen = arg2; /* number of oid element arguments */ - user_addr_t oldp = req->oldptr; /* user buffer copy out address */ - size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ - user_addr_t newp = req->newptr; /* user buffer copy in address */ - size_t newlen = req->newlen; /* user buffer copy in size */ + __unused int cmd = oidp->oid_arg2; /* subcommand*/ + __unused int *name = arg1; /* oid element argument vector */ + __unused int namelen = arg2; /* number of oid element arguments */ + user_addr_t oldp = req->oldptr; /* user buffer copy out address */ + size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ + user_addr_t newp = req->newptr; /* user buffer copy in address */ + size_t newlen = req->newlen; /* user buffer copy in size */ int error; int tmp; @@ -517,47 +525,46 @@ sysctl_docountsyscalls SYSCTL_HANDLER_ARGS * > 0 means keep called counts for each bsd syscall * = 2 means dump current counts to the system log * = 3 means reset all counts - * for example, to dump current counts: + * for example, to dump current counts: * sysctl -w kern.count_calls=2 */ error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp); - if ( error != 0 ) { - return (error); + if (error != 0) { + return error; } - - if ( tmp == 1 ) { + + if (tmp == 1) { do_count_syscalls = 1; - } - else if ( tmp == 0 || tmp == 2 || tmp == 3 ) { - int i; - for ( i = 0; i < nsysent; i++ ) { - if ( syscalls_log[i] != 0 ) { - if ( tmp == 2 ) { + } else if (tmp == 0 || tmp == 2 || tmp == 3) { + int i; + for (i = 0; i < nsysent; i++) { + if (syscalls_log[i] != 0) { + if (tmp == 2) { printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]); - } - else { + } else { syscalls_log[i] = 0; } } } - if ( tmp != 0 ) { + if (tmp != 0) { do_count_syscalls = 1; } } /* adjust index so we return the right required/consumed amount */ - if (!error) + if (!error) { req->oldidx += req->oldlen; + } - return (error); + return error; } -SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - 0, /* Integer argument (arg2) */ - sysctl_docountsyscalls, /* Handler function */ - NULL, /* Data pointer */ - ""); -#endif /* COUNT_SYSCALLS */ +SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + 0, /* Integer argument (arg2) */ + sysctl_docountsyscalls, /* Handler function */ + NULL, /* Data pointer */ + ""); +#endif /* COUNT_SYSCALLS */ /* * The following sysctl_* functions should not be used @@ -574,25 +581,29 @@ SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD * for an integer-valued sysctl function. */ int -sysctl_int(user_addr_t oldp, size_t *oldlenp, - user_addr_t newp, size_t newlen, int *valp) +sysctl_int(user_addr_t oldp, size_t *oldlenp, + user_addr_t newp, size_t newlen, int *valp) { int error = 0; - if (oldp != USER_ADDR_NULL && oldlenp == NULL) - return (EFAULT); - if (oldp && *oldlenp < sizeof(int)) - return (ENOMEM); - if (newp && newlen != sizeof(int)) - return (EINVAL); + if (oldp != USER_ADDR_NULL && oldlenp == NULL) { + return EFAULT; + } + if (oldp && *oldlenp < sizeof(int)) { + return ENOMEM; + } + if (newp && newlen != sizeof(int)) { + return EINVAL; + } *oldlenp = sizeof(int); - if (oldp) + if (oldp) { error = copyout(valp, oldp, sizeof(int)); + } if (error == 0 && newp) { error = copyin(newp, valp, sizeof(int)); AUDIT_ARG(value32, *valp); } - return (error); + return error; } /* @@ -600,41 +611,48 @@ sysctl_int(user_addr_t oldp, size_t *oldlenp, * for an quad(64bit)-valued sysctl function. */ int -sysctl_quad(user_addr_t oldp, size_t *oldlenp, - user_addr_t newp, size_t newlen, quad_t *valp) +sysctl_quad(user_addr_t oldp, size_t *oldlenp, + user_addr_t newp, size_t newlen, quad_t *valp) { int error = 0; - if (oldp != USER_ADDR_NULL && oldlenp == NULL) - return (EFAULT); - if (oldp && *oldlenp < sizeof(quad_t)) - return (ENOMEM); - if (newp && newlen != sizeof(quad_t)) - return (EINVAL); + if (oldp != USER_ADDR_NULL && oldlenp == NULL) { + return EFAULT; + } + if (oldp && *oldlenp < sizeof(quad_t)) { + return ENOMEM; + } + if (newp && newlen != sizeof(quad_t)) { + return EINVAL; + } *oldlenp = sizeof(quad_t); - if (oldp) + if (oldp) { error = copyout(valp, oldp, sizeof(quad_t)); - if (error == 0 && newp) + } + if (error == 0 && newp) { error = copyin(newp, valp, sizeof(quad_t)); - return (error); + } + return error; } STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg) { - if (p->p_pid != (pid_t)*(int*)arg) - return(0); - else - return(1); + if (p->p_pid != (pid_t)*(int*)arg) { + return 0; + } else { + return 1; + } } STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg) { - if (p->p_pgrpid != (pid_t)*(int*)arg) - return(0); - else - return(1); + if (p->p_pgrpid != (pid_t)*(int*)arg) { + return 0; + } else { + return 1; + } } STATIC int @@ -645,14 +663,15 @@ sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg) /* This is very racy but list lock is held.. Hmmm. */ if ((p->p_flag & P_CONTROLT) == 0 || - (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) || - (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL || - tp->t_dev != (dev_t)*(int*)arg) - retval = 0; - else + (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) || + (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL || + tp->t_dev != (dev_t)*(int*)arg) { + retval = 0; + } else { retval = 1; + } - return(retval); + return retval; } STATIC int @@ -661,16 +680,18 @@ sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg) kauth_cred_t my_cred; uid_t uid; - if (p->p_ucred == NULL) - return(0); + if (p->p_ucred == NULL) { + return 0; + } my_cred = kauth_cred_proc_ref(p); uid = kauth_cred_getuid(my_cred); kauth_cred_unref(&my_cred); - if (uid != (uid_t)*(int*)arg) - return(0); - else - return(1); + if (uid != (uid_t)*(int*)arg) { + return 0; + } else { + return 1; + } } @@ -680,27 +701,29 @@ sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg) kauth_cred_t my_cred; uid_t ruid; - if (p->p_ucred == NULL) - return(0); + if (p->p_ucred == NULL) { + return 0; + } my_cred = kauth_cred_proc_ref(p); ruid = kauth_cred_getruid(my_cred); kauth_cred_unref(&my_cred); - if (ruid != (uid_t)*(int*)arg) - return(0); - else - return(1); + if (ruid != (uid_t)*(int*)arg) { + return 0; + } else { + return 1; + } } /* * try over estimating by 5 procs */ -#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc)) +#define KERN_PROCSLOP (5 * sizeof (struct kinfo_proc)) struct sysdoproc_args { - int buflen; - void *kprocp; + int buflen; + void *kprocp; boolean_t is_64_bit; - user_addr_t dp; + user_addr_t dp; size_t needed; int sizeof_kproc; int *errorp; @@ -716,37 +739,41 @@ sysdoproc_callback(proc_t p, void *arg) struct sysdoproc_args *args = arg; if (args->buflen >= args->sizeof_kproc) { - if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0)) - return (PROC_RETURNED); - if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0)) - return (PROC_RETURNED); - if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0)) - return (PROC_RETURNED); + if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0)) { + return PROC_RETURNED; + } + if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0)) { + return PROC_RETURNED; + } + if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0)) { + return PROC_RETURNED; + } bzero(args->kprocp, args->sizeof_kproc); - if (args->is_64_bit) + if (args->is_64_bit) { fill_user64_proc(p, args->kprocp); - else + } else { fill_user32_proc(p, args->kprocp); + } int error = copyout(args->kprocp, args->dp, args->sizeof_kproc); if (error) { *args->errorp = error; - return (PROC_RETURNED_DONE); + return PROC_RETURNED_DONE; } args->dp += args->sizeof_kproc; args->buflen -= args->sizeof_kproc; } args->needed += args->sizeof_kproc; - return (PROC_RETURNED); + return PROC_RETURNED; } SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, ""); STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS { - int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */ - int *name = arg1; /* oid element argument vector */ - int namelen = arg2; /* number of oid element arguments */ + int cmd = oidp->oid_arg2; /* subcommand for multiple nodes */ + int *name = arg1; /* oid element argument vector */ + int namelen = arg2; /* number of oid element arguments */ user_addr_t where = req->oldptr;/* user buffer copy out address */ user_addr_t dp = where; @@ -765,8 +792,9 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS int ttycheck = 0; int success = 0; - if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL)) - return (EINVAL); + if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL)) { + return EINVAL; + } if (is_64_bit) { sizeof_kproc = sizeof(user_kproc); @@ -777,33 +805,32 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS } switch (cmd) { + case KERN_PROC_PID: + filterfn = sysdoproc_filt_KERN_PROC_PID; + break; - case KERN_PROC_PID: - filterfn = sysdoproc_filt_KERN_PROC_PID; - break; + case KERN_PROC_PGRP: + filterfn = sysdoproc_filt_KERN_PROC_PGRP; + break; - case KERN_PROC_PGRP: - filterfn = sysdoproc_filt_KERN_PROC_PGRP; - break; - - case KERN_PROC_TTY: - ttycheck = 1; - break; + case KERN_PROC_TTY: + ttycheck = 1; + break; - case KERN_PROC_UID: - uidcheck = 1; - break; + case KERN_PROC_UID: + uidcheck = 1; + break; - case KERN_PROC_RUID: - ruidcheck = 1; - break; + case KERN_PROC_RUID: + ruidcheck = 1; + break; - case KERN_PROC_ALL: - break; + case KERN_PROC_ALL: + break; - default: - /* must be kern.proc. */ - return (ENOTSUP); + default: + /* must be kern.proc. */ + return ENOTSUP; } error = 0; @@ -817,37 +844,41 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS args.ruidcheck = ruidcheck; args.ttycheck = ttycheck; args.sizeof_kproc = sizeof_kproc; - if (namelen) + if (namelen) { args.uidval = name[0]; + } success = proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST), - sysdoproc_callback, &args, filterfn, name); + sysdoproc_callback, &args, filterfn, name); /* * rdar://problem/28433391: if we can't iterate over the processes, * make sure to return an error. */ - if (success != 0) - return (ENOMEM); + if (success != 0) { + return ENOMEM; + } - if (error) - return (error); + if (error) { + return error; + } dp = args.dp; needed = args.needed; - + if (where != USER_ADDR_NULL) { req->oldlen = dp - where; - if (needed > req->oldlen) - return (ENOMEM); + if (needed > req->oldlen) { + return ENOMEM; + } } else { needed += KERN_PROCSLOP; req->oldlen = needed; } /* adjust index so we return the right required/consumed amount */ req->oldidx += req->oldlen; - return (0); + return 0; } /* @@ -872,48 +903,48 @@ sysctl_prochandle SYSCTL_HANDLER_ARGS * which are not themselves system tools or libraries, some applications * have erroneously used them. */ -SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_ALL, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); -SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_PID, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); -SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_TTY, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); -SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_PGRP, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); -SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_UID, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); -SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_RUID, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); -SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - KERN_PROC_LCID, /* Integer argument (arg2) */ - sysctl_prochandle, /* Handler function */ - NULL, /* Data is size variant on ILP32/LP64 */ - ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_ALL, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_PID, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_TTY, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_PGRP, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_UID, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_RUID, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); +SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + KERN_PROC_LCID, /* Integer argument (arg2) */ + sysctl_prochandle, /* Handler function */ + NULL, /* Data is size variant on ILP32/LP64 */ + ""); /* @@ -933,8 +964,9 @@ fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep) if (pg != PGRP_NULL) { ep->e_pgid = p->p_pgrpid; ep->e_jobc = pg->pg_jobc; - if (sessp != SESSION_NULL && sessp->s_ttyvp) + if (sessp != SESSION_NULL && sessp->s_ttyvp) { ep->e_flag = EPROC_CTTY; + } } ep->e_ppid = p->p_ppid; if (p->p_ucred) { @@ -947,29 +979,32 @@ fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep) ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred); /* A fake historical *kauth_cred_t */ - ep->e_ucred.cr_ref = my_cred->cr_ref; + ep->e_ucred.cr_ref = os_atomic_load(&my_cred->cr_ref, relaxed); ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred); ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups; bcopy(posix_cred_get(my_cred)->cr_groups, - ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t)); + ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t)); kauth_cred_unref(&my_cred); } if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) && - (tp = SESSION_TP(sessp))) { + (tp = SESSION_TP(sessp))) { ep->e_tdev = tp->t_dev; ep->e_tpgid = sessp->s_ttypgrpid; - } else + } else { ep->e_tdev = NODEV; + } if (sessp != SESSION_NULL) { - if (SESS_LEADER(p, sessp)) + if (SESS_LEADER(p, sessp)) { ep->e_flag |= EPROC_SLEADER; + } session_rele(sessp); } - if (pg != PGRP_NULL) + if (pg != PGRP_NULL) { pg_rele(pg); + } } /* @@ -982,15 +1017,16 @@ fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep) struct pgrp *pg; struct session *sessp; kauth_cred_t my_cred; - + pg = proc_pgrp(p); sessp = proc_session(p); if (pg != PGRP_NULL) { ep->e_pgid = p->p_pgrpid; ep->e_jobc = pg->pg_jobc; - if (sessp != SESSION_NULL && sessp->s_ttyvp) + if (sessp != SESSION_NULL && sessp->s_ttyvp) { ep->e_flag = EPROC_CTTY; + } } ep->e_ppid = p->p_ppid; if (p->p_ucred) { @@ -1003,29 +1039,32 @@ fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep) ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred); /* A fake historical *kauth_cred_t */ - ep->e_ucred.cr_ref = my_cred->cr_ref; + ep->e_ucred.cr_ref = os_atomic_load(&my_cred->cr_ref, relaxed); ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred); ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups; bcopy(posix_cred_get(my_cred)->cr_groups, - ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t)); + ep->e_ucred.cr_groups, NGROUPS * sizeof(gid_t)); kauth_cred_unref(&my_cred); } if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) && - (tp = SESSION_TP(sessp))) { + (tp = SESSION_TP(sessp))) { ep->e_tdev = tp->t_dev; ep->e_tpgid = sessp->s_ttypgrpid; - } else + } else { ep->e_tdev = NODEV; + } if (sessp != SESSION_NULL) { - if (SESS_LEADER(p, sessp)) + if (SESS_LEADER(p, sessp)) { ep->e_flag |= EPROC_SLEADER; + } session_rele(sessp); } - if (pg != PGRP_NULL) + if (pg != PGRP_NULL) { pg_rele(pg); + } } /* @@ -1038,12 +1077,15 @@ fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp) exp->p_starttime.tv_sec = p->p_start.tv_sec; exp->p_starttime.tv_usec = p->p_start.tv_usec; exp->p_flag = p->p_flag; - if (p->p_lflag & P_LTRACED) + if (p->p_lflag & P_LTRACED) { exp->p_flag |= P_TRACED; - if (p->p_lflag & P_LPPWAIT) + } + if (p->p_lflag & P_LPPWAIT) { exp->p_flag |= P_PPWAIT; - if (p->p_lflag & P_LEXIT) + } + if (p->p_lflag & P_LEXIT) { exp->p_flag |= P_WEXIT; + } exp->p_stat = p->p_stat; exp->p_pid = p->p_pid; exp->p_oppid = p->p_oppid; @@ -1058,14 +1100,14 @@ fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp) exp->p_slptime = p->p_slptime; #endif exp->p_realtimer.it_interval.tv_sec = - (user32_time_t)p->p_realtimer.it_interval.tv_sec; + (user32_time_t)p->p_realtimer.it_interval.tv_sec; exp->p_realtimer.it_interval.tv_usec = - (__int32_t)p->p_realtimer.it_interval.tv_usec; + (__int32_t)p->p_realtimer.it_interval.tv_usec; exp->p_realtimer.it_value.tv_sec = - (user32_time_t)p->p_realtimer.it_value.tv_sec; + (user32_time_t)p->p_realtimer.it_value.tv_sec; exp->p_realtimer.it_value.tv_usec = - (__int32_t)p->p_realtimer.it_value.tv_usec; + (__int32_t)p->p_realtimer.it_value.tv_usec; exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec; exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec; @@ -1088,12 +1130,15 @@ fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp) exp->p_starttime.tv_sec = p->p_start.tv_sec; exp->p_starttime.tv_usec = p->p_start.tv_usec; exp->p_flag = p->p_flag; - if (p->p_lflag & P_LTRACED) + if (p->p_lflag & P_LTRACED) { exp->p_flag |= P_TRACED; - if (p->p_lflag & P_LPPWAIT) + } + if (p->p_lflag & P_LPPWAIT) { exp->p_flag |= P_PPWAIT; - if (p->p_lflag & P_LEXIT) + } + if (p->p_lflag & P_LEXIT) { exp->p_flag |= P_WEXIT; + } exp->p_stat = p->p_stat; exp->p_pid = p->p_pid; exp->p_oppid = p->p_oppid; @@ -1143,20 +1188,21 @@ fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp) STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS { - __unused int cmd = oidp->oid_arg2; /* subcommand*/ - int *name = arg1; /* oid element argument vector */ - int namelen = arg2; /* number of oid element arguments */ - user_addr_t oldp = req->oldptr; /* user buffer copy out address */ - size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ + __unused int cmd = oidp->oid_arg2; /* subcommand*/ + int *name = arg1; /* oid element argument vector */ + int namelen = arg2; /* number of oid element arguments */ + user_addr_t oldp = req->oldptr; /* user buffer copy out address */ + size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ // user_addr_t newp = req->newptr; /* user buffer copy in address */ // size_t newlen = req->newlen; /* user buffer copy in size */ - int ret=0; + int ret = 0; - if (namelen == 0) - return(ENOTSUP); + if (namelen == 0) { + return ENOTSUP; + } - switch(name[0]) { + switch (name[0]) { case KERN_KDEFLAGS: case KERN_KDDFLAGS: case KERN_KDENABLE: @@ -1182,22 +1228,23 @@ sysctl_kdebug_ops SYSCTL_HANDLER_ARGS ret = kdbg_control(name, namelen, oldp, oldlenp); break; default: - ret= ENOTSUP; + ret = ENOTSUP; break; } /* adjust index so we return the right required/consumed amount */ - if (!ret) + if (!ret) { req->oldidx += req->oldlen; + } - return (ret); + return ret; } -SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - 0, /* Integer argument (arg2) */ - sysctl_kdebug_ops, /* Handler function */ - NULL, /* Data pointer */ - ""); +SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + 0, /* Integer argument (arg2) */ + sysctl_kdebug_ops, /* Handler function */ + NULL, /* Data pointer */ + ""); #if !CONFIG_EMBEDDED @@ -1208,11 +1255,11 @@ SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS { - __unused int cmd = oidp->oid_arg2; /* subcommand*/ - int *name = arg1; /* oid element argument vector */ - int namelen = arg2; /* number of oid element arguments */ - user_addr_t oldp = req->oldptr; /* user buffer copy out address */ - size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ + __unused int cmd = oidp->oid_arg2; /* subcommand*/ + int *name = arg1; /* oid element argument vector */ + int namelen = arg2; /* number of oid element arguments */ + user_addr_t oldp = req->oldptr; /* user buffer copy out address */ + size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ // user_addr_t newp = req->newptr; /* user buffer copy in address */ // size_t newlen = req->newlen; /* user buffer copy in size */ int error; @@ -1220,27 +1267,28 @@ sysctl_doprocargs SYSCTL_HANDLER_ARGS error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0); /* adjust index so we return the right required/consumed amount */ - if (!error) + if (!error) { req->oldidx += req->oldlen; + } - return (error); + return error; } -SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - 0, /* Integer argument (arg2) */ - sysctl_doprocargs, /* Handler function */ - NULL, /* Data pointer */ - ""); -#endif /* !CONFIG_EMBEDDED */ +SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + 0, /* Integer argument (arg2) */ + sysctl_doprocargs, /* Handler function */ + NULL, /* Data pointer */ + ""); +#endif /* !CONFIG_EMBEDDED */ STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS { - __unused int cmd = oidp->oid_arg2; /* subcommand*/ - int *name = arg1; /* oid element argument vector */ - int namelen = arg2; /* number of oid element arguments */ - user_addr_t oldp = req->oldptr; /* user buffer copy out address */ - size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ + __unused int cmd = oidp->oid_arg2; /* subcommand*/ + int *name = arg1; /* oid element argument vector */ + int namelen = arg2; /* number of oid element arguments */ + user_addr_t oldp = req->oldptr; /* user buffer copy out address */ + size_t *oldlenp = &req->oldlen; /* user buffer copy out size */ // user_addr_t newp = req->newptr; /* user buffer copy in address */ // size_t newlen = req->newlen; /* user buffer copy in size */ int error; @@ -1248,53 +1296,55 @@ sysctl_doprocargs2 SYSCTL_HANDLER_ARGS error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1); /* adjust index so we return the right required/consumed amount */ - if (!error) + if (!error) { req->oldidx += req->oldlen; + } - return (error); + return error; } -SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - 0, /* Integer argument (arg2) */ - sysctl_doprocargs2, /* Handler function */ - NULL, /* Data pointer */ - ""); +SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + 0, /* Integer argument (arg2) */ + sysctl_doprocargs2, /* Handler function */ + NULL, /* Data pointer */ + ""); STATIC int -sysctl_procargsx(int *name, u_int namelen, user_addr_t where, - size_t *sizep, proc_t cur_proc, int argc_yes) +sysctl_procargsx(int *name, u_int namelen, user_addr_t where, + size_t *sizep, proc_t cur_proc, int argc_yes) { proc_t p; int buflen = where != USER_ADDR_NULL ? *sizep : 0; int error = 0; struct _vm_map *proc_map; struct task * task; - vm_map_copy_t tmp; - user_addr_t arg_addr; - size_t arg_size; + vm_map_copy_t tmp; + user_addr_t arg_addr; + size_t arg_size; caddr_t data; - size_t argslen=0; + size_t argslen = 0; int size; vm_size_t alloc_size = 0; - vm_offset_t copy_start, copy_end; + vm_offset_t copy_start, copy_end; kern_return_t ret; int pid; kauth_cred_t my_cred; uid_t uid; int argc = -1; - if ( namelen < 1 ) - return(EINVAL); - - if (argc_yes) - buflen -= sizeof(int); /* reserve first word to return argc */ + if (namelen < 1) { + return EINVAL; + } + if (argc_yes) { + buflen -= sizeof(int); /* reserve first word to return argc */ + } /* we only care about buflen when where (oldp from sysctl) is not NULL. */ /* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */ /* is not NULL then the caller wants us to return the length needed to */ - /* hold the data we would return */ + /* hold the data we would return */ if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) { - return(EINVAL); + return EINVAL; } arg_size = buflen; @@ -1304,7 +1354,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, pid = name[0]; p = proc_find(pid); if (p == NULL) { - return(EINVAL); + return EINVAL; } /* @@ -1319,14 +1369,14 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, if (!p->user_stack) { proc_rele(p); - return(EINVAL); + return EINVAL; } if (where == USER_ADDR_NULL) { /* caller only wants to know length of proc args data */ if (sizep == NULL) { proc_rele(p); - return(EFAULT); + return EFAULT; } size = p->p_argslen; @@ -1342,21 +1392,22 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, } size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0; *sizep = size; - return (0); + return 0; } my_cred = kauth_cred_proc_ref(p); uid = kauth_cred_getuid(my_cred); kauth_cred_unref(&my_cred); - if ((uid != kauth_cred_getuid(kauth_cred_get())) - && suser(kauth_cred_get(), &cur_proc->p_acflag)) { + if ((uid != kauth_cred_getuid(kauth_cred_get())) + && suser(kauth_cred_get(), &cur_proc->p_acflag)) { proc_rele(p); - return (EINVAL); + return EINVAL; } - if ((u_int)arg_size > p->p_argslen) - arg_size = round_page(p->p_argslen); + if ((u_int)arg_size > p->p_argslen) { + arg_size = round_page(p->p_argslen); + } arg_addr = p->user_stack - arg_size; @@ -1368,7 +1419,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, task = p->task; if (task == NULL) { proc_rele(p); - return(EINVAL); + return EINVAL; } /* save off argc before releasing the proc */ @@ -1386,26 +1437,27 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, proc_rele(p); proc_map = get_task_map_reference(task); task_deallocate(task); - - if (proc_map == NULL) - return(EINVAL); + + if (proc_map == NULL) { + return EINVAL; + } alloc_size = round_page(arg_size); ret = kmem_alloc(kernel_map, ©_start, alloc_size, VM_KERN_MEMORY_BSD); if (ret != KERN_SUCCESS) { vm_map_deallocate(proc_map); - return(ENOMEM); + return ENOMEM; } bzero((void *)copy_start, alloc_size); copy_end = round_page(copy_start + arg_size); - if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr, - (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) { - vm_map_deallocate(proc_map); - kmem_free(kernel_map, copy_start, - round_page(arg_size)); - return (EIO); + if (vm_map_copyin(proc_map, (vm_map_address_t)arg_addr, + (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) { + vm_map_deallocate(proc_map); + kmem_free(kernel_map, copy_start, + round_page(arg_size)); + return EIO; } /* @@ -1414,13 +1466,13 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, */ vm_map_deallocate(proc_map); - if( vm_map_copy_overwrite(kernel_map, - (vm_map_address_t)copy_start, - tmp, FALSE) != KERN_SUCCESS) { - kmem_free(kernel_map, copy_start, - round_page(arg_size)); - vm_map_copy_discard(tmp); - return (EIO); + if (vm_map_copy_overwrite(kernel_map, + (vm_map_address_t)copy_start, + tmp, FALSE) != KERN_SUCCESS) { + kmem_free(kernel_map, copy_start, + round_page(arg_size)); + vm_map_copy_discard(tmp); + return EIO; } if (arg_size > argslen) { @@ -1440,7 +1492,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, * (rdar://problem/13746466) */ #define EXECUTABLE_KEY "executable_path=" - if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0){ + if (strncmp(EXECUTABLE_KEY, data, strlen(EXECUTABLE_KEY)) == 0) { data += strlen(EXECUTABLE_KEY); size -= strlen(EXECUTABLE_KEY); } @@ -1457,12 +1509,11 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, * Make the old PROCARGS work to return the executable's path * But, only if there is enough space in the provided buffer * - * on entry: data [possibily] points to the beginning of the path - * + * on entry: data [possibily] points to the beginning of the path + * * Note: we keep all pointers&sizes aligned to word boundries */ - if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) ) - { + if ((!error) && (buflen > 0 && (u_int)buflen > argslen)) { int binPath_sz, alignedBinPath_sz = 0; int extraSpaceNeeded, addThis; user_addr_t placeHere; @@ -1470,37 +1521,41 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, int max_len = size; /* Some apps are really bad about messing up their stacks - So, we have to be extra careful about getting the length - of the executing binary. If we encounter an error, we bail. - */ + * So, we have to be extra careful about getting the length + * of the executing binary. If we encounter an error, we bail. + */ /* Limit ourselves to PATH_MAX paths */ - if ( max_len > PATH_MAX ) max_len = PATH_MAX; + if (max_len > PATH_MAX) { + max_len = PATH_MAX; + } binPath_sz = 0; - while ( (binPath_sz < max_len-1) && (*str++ != 0) ) + while ((binPath_sz < max_len - 1) && (*str++ != 0)) { binPath_sz++; + } /* If we have a NUL terminator, copy it, too */ - if (binPath_sz < max_len-1) binPath_sz += 1; + if (binPath_sz < max_len - 1) { + binPath_sz += 1; + } /* Pre-Flight the space requiremnts */ /* Account for the padding that fills out binPath to the next word */ - alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0; + alignedBinPath_sz += (binPath_sz & (sizeof(int) - 1)) ? (sizeof(int) - (binPath_sz & (sizeof(int) - 1))) : 0; placeHere = where + size; - /* Account for the bytes needed to keep placeHere word aligned */ - addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0; + /* Account for the bytes needed to keep placeHere word aligned */ + addThis = (placeHere & (sizeof(int) - 1)) ? (sizeof(int) - (placeHere & (sizeof(int) - 1))) : 0; /* Add up all the space that is needed */ extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int)); /* is there is room to tack on argv[0]? */ - if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded )) - { + if ((buflen & ~(sizeof(int) - 1)) >= (argslen + extraSpaceNeeded)) { placeHere += addThis; suword(placeHere, 0); placeHere += sizeof(int); @@ -1509,8 +1564,7 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, suword(placeHere, 0); placeHere += sizeof(int); error = copyout(data, placeHere, binPath_sz); - if ( ! error ) - { + if (!error) { placeHere += binPath_sz; suword(placeHere, 0); size += extraSpaceNeeded; @@ -1523,12 +1577,13 @@ sysctl_procargsx(int *name, u_int namelen, user_addr_t where, kmem_free(kernel_map, copy_start, copy_end - copy_start); } if (error) { - return(error); + return error; } - if (where != USER_ADDR_NULL) + if (where != USER_ADDR_NULL) { *sizep = size; - return (0); + } + return 0; } @@ -1542,13 +1597,14 @@ sysctl_aiomax int new_value, changed; int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed); if (changed) { - /* make sure the system-wide limit is greater than the per process limit */ - if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS) + /* make sure the system-wide limit is greater than the per process limit */ + if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS) { aio_max_requests = new_value; - else + } else { error = EINVAL; + } } - return(error); + return error; } @@ -1563,12 +1619,13 @@ sysctl_aioprocmax int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed); if (changed) { /* make sure per process limit is less than the system-wide limit */ - if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX) + if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX) { aio_max_requests_per_process = new_value; - else + } else { error = EINVAL; + } } - return(error); + return error; } @@ -1583,14 +1640,14 @@ sysctl_aiothreads int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed); if (changed) { /* we only allow an increase in the number of worker threads */ - if (new_value > aio_worker_threads ) { - _aio_create_worker_threads((new_value - aio_worker_threads)); + if (new_value > aio_worker_threads) { + _aio_create_worker_threads((new_value - aio_worker_threads)); aio_worker_threads = new_value; + } else { + error = EINVAL; } - else - error = EINVAL; } - return(error); + return error; } @@ -1606,34 +1663,75 @@ sysctl_maxproc if (changed) { AUDIT_ARG(value32, new_value); /* make sure the system-wide limit is less than the configured hard - limit set at kernel compilation */ - if (new_value <= hard_maxproc && new_value > 0) + * limit set at kernel compilation */ + if (new_value <= hard_maxproc && new_value > 0) { maxproc = new_value; - else + } else { error = EINVAL; + } + } + return error; +} + +extern int sched_enable_smt; +STATIC int +sysctl_sched_enable_smt +(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int new_value, changed; + int error = sysctl_io_number(req, sched_enable_smt, sizeof(int), &new_value, &changed); + if (error) { + return error; + } + kern_return_t kret = KERN_SUCCESS; + if (changed) { + AUDIT_ARG(value32, new_value); + if (new_value == 0) { + sched_enable_smt = 0; + kret = enable_smt_processors(false); + } else { + sched_enable_smt = 1; + kret = enable_smt_processors(true); + } + } + switch (kret) { + case KERN_SUCCESS: + error = 0; + break; + case KERN_INVALID_ARGUMENT: + error = EINVAL; + break; + case KERN_FAILURE: + error = EBUSY; + break; + default: + error = ENOENT; + break; } - return(error); + + return error; } -SYSCTL_STRING(_kern, KERN_OSTYPE, ostype, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - ostype, 0, ""); -SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - osrelease, 0, ""); -SYSCTL_INT(_kern, KERN_OSREV, osrevision, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - (int *)NULL, BSD, ""); -SYSCTL_STRING(_kern, KERN_VERSION, version, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - version, 0, ""); -SYSCTL_STRING(_kern, OID_AUTO, uuid, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - &kernel_uuid_string[0], 0, ""); +SYSCTL_STRING(_kern, KERN_OSTYPE, ostype, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + ostype, 0, ""); +SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + osrelease, 0, ""); +SYSCTL_INT(_kern, KERN_OSREV, osrevision, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + (int *)NULL, BSD, ""); +SYSCTL_STRING(_kern, KERN_VERSION, version, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + version, 0, ""); +SYSCTL_STRING(_kern, OID_AUTO, uuid, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &kernel_uuid_string[0], 0, ""); SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED, - &osbuild_config[0], 0, ""); + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_MASKED, + &osbuild_config[0], 0, ""); + #if DEBUG #ifndef DKPR @@ -1643,16 +1741,17 @@ SYSCTL_STRING(_kern, OID_AUTO, osbuildconfig, #if DKPR int debug_kprint_syscall = 0; -char debug_kprint_syscall_process[MAXCOMLEN+1]; +char debug_kprint_syscall_process[MAXCOMLEN + 1]; /* Thread safe: bits and string value are not used to reclaim state */ -SYSCTL_INT (_debug, OID_AUTO, kprint_syscall, - CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing"); -SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process, - CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process), - "name of process for kprintf syscall tracing"); +SYSCTL_INT(_debug, OID_AUTO, kprint_syscall, + CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing"); +SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process, + CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process), + "name of process for kprintf syscall tracing"); -int debug_kprint_current_process(const char **namep) +int +debug_kprint_current_process(const char **namep) { struct proc *p = current_proc(); @@ -1662,10 +1761,12 @@ int debug_kprint_current_process(const char **namep) if (debug_kprint_syscall_process[0]) { /* user asked to scope tracing to a particular process name */ - if(0 == strncmp(debug_kprint_syscall_process, - p->p_comm, sizeof(debug_kprint_syscall_process))) { + if (0 == strncmp(debug_kprint_syscall_process, + p->p_comm, sizeof(debug_kprint_syscall_process))) { /* no value in telling the user that we traced what they asked */ - if(namep) *namep = NULL; + if (namep) { + *namep = NULL; + } return 1; } else { @@ -1688,23 +1789,23 @@ int debug_kprint_current_process(const char **namep) STATIC int sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) { - int rval = 0; + int rval = 0; - rval = sysctl_handle_string(oidp, arg1, arg2, req); + rval = sysctl_handle_string(oidp, arg1, arg2, req); - if (req->newptr) { - IORegistrySetOSBuildVersion((char *)arg1); - } + if (req->newptr) { + IORegistrySetOSBuildVersion((char *)arg1); + } - return rval; + return rval; } SYSCTL_PROC(_kern, KERN_OSVERSION, osversion, - CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, - osversion, 256 /* OSVERSIZE*/, - sysctl_osversion, "A", ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + osversion, 256 /* OSVERSIZE*/, + sysctl_osversion, "A", ""); -static uint64_t osproductversion_string[48]; +char osproductversion[48] = { '\0' }; STATIC int sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) @@ -1713,18 +1814,18 @@ sysctl_osproductversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, /* * Can only ever be set by launchd, and only once at boot. */ - if (req->p->p_pid != 1 || osproductversion_string[0] != '\0') { + if (req->p->p_pid != 1 || osproductversion[0] != '\0') { return EPERM; } } - return sysctl_handle_string(oidp, arg1, arg2, req); + return sysctl_handle_string(oidp, arg1, arg2, req); } SYSCTL_PROC(_kern, OID_AUTO, osproductversion, - CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, - osproductversion_string, sizeof(osproductversion_string), - sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist"); + CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + osproductversion, sizeof(osproductversion), + sysctl_osproductversion, "A", "The ProductVersion from SystemVersion.plist"); static uint64_t osvariant_status = 0; @@ -1740,13 +1841,27 @@ sysctl_osvariant_status(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } } - return sysctl_handle_quad(oidp, arg1, arg2, req); + return sysctl_handle_quad(oidp, arg1, arg2, req); } SYSCTL_PROC(_kern, OID_AUTO, osvariant_status, - CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED, - &osvariant_status, sizeof(osvariant_status), - sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information"); + CTLFLAG_RW | CTLTYPE_QUAD | CTLFLAG_LOCKED | CTLFLAG_MASKED, + &osvariant_status, sizeof(osvariant_status), + sysctl_osvariant_status, "Q", "Opaque flags used to cache OS variant information"); + +#if defined(XNU_TARGET_OS_BRIDGE) +char macosproductversion[MACOS_VERS_LEN] = { '\0' }; + +SYSCTL_STRING(_kern, OID_AUTO, macosproductversion, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &macosproductversion[0], MACOS_VERS_LEN, "The currently running macOS ProductVersion (from SystemVersion.plist on macOS)"); + +char macosversion[MACOS_VERS_LEN] = { '\0' }; + +SYSCTL_STRING(_kern, OID_AUTO, macosversion, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &macosversion[0], MACOS_VERS_LEN, "The currently running macOS build version"); +#endif STATIC int sysctl_sysctl_bootargs @@ -1763,71 +1878,74 @@ sysctl_sysctl_bootargs strlcpy(buf, PE_boot_args(), boot_args_len); error = sysctl_io_string(req, buf, boot_args_len, 0, NULL); - return(error); + return error; } SYSCTL_PROC(_kern, OID_AUTO, bootargs, - CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING, - NULL, 0, - sysctl_sysctl_bootargs, "A", "bootargs"); + CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING, + NULL, 0, + sysctl_sysctl_bootargs, "A", "bootargs"); STATIC int sysctl_kernelcacheuuid(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) { - int rval = ENOENT; - if (kernelcache_uuid_valid) { - rval = sysctl_handle_string(oidp, arg1, arg2, req); - } - return rval; + int rval = ENOENT; + if (kernelcache_uuid_valid) { + rval = sysctl_handle_string(oidp, arg1, arg2, req); + } + return rval; } SYSCTL_PROC(_kern, OID_AUTO, kernelcacheuuid, - CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, - kernelcache_uuid_string, sizeof(kernelcache_uuid_string), - sysctl_kernelcacheuuid, "A", ""); - -SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &maxfiles, 0, ""); -SYSCTL_INT(_kern, KERN_ARGMAX, argmax, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - (int *)NULL, ARG_MAX, ""); -SYSCTL_INT(_kern, KERN_POSIX1, posix1version, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - (int *)NULL, _POSIX_VERSION, ""); -SYSCTL_INT(_kern, KERN_NGROUPS, ngroups, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - (int *)NULL, NGROUPS_MAX, ""); -SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - (int *)NULL, 1, ""); -#if 1 /* _POSIX_SAVED_IDS from */ -SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - (int *)NULL, 1, ""); + CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED, + kernelcache_uuid_string, sizeof(kernelcache_uuid_string), + sysctl_kernelcacheuuid, "A", ""); + +SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &maxfiles, 0, ""); +SYSCTL_INT(_kern, KERN_ARGMAX, argmax, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + (int *)NULL, ARG_MAX, ""); +SYSCTL_INT(_kern, KERN_POSIX1, posix1version, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + (int *)NULL, _POSIX_VERSION, ""); +SYSCTL_INT(_kern, KERN_NGROUPS, ngroups, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + (int *)NULL, NGROUPS_MAX, ""); +SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + (int *)NULL, 1, ""); +#if 1 /* _POSIX_SAVED_IDS from */ +SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + (int *)NULL, 1, ""); #else -SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - NULL, 0, ""); +SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids, + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + NULL, 0, ""); #endif -SYSCTL_INT(_kern, OID_AUTO, num_files, - CTLFLAG_RD | CTLFLAG_LOCKED, - &nfiles, 0, ""); -SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes, - CTLFLAG_RD | CTLFLAG_LOCKED, - &numvnodes, 0, ""); -SYSCTL_INT(_kern, OID_AUTO, num_tasks, - CTLFLAG_RD | CTLFLAG_LOCKED, - &task_max, 0, ""); -SYSCTL_INT(_kern, OID_AUTO, num_threads, - CTLFLAG_RD | CTLFLAG_LOCKED, - &thread_max, 0, ""); -SYSCTL_INT(_kern, OID_AUTO, num_taskthreads, - CTLFLAG_RD | CTLFLAG_LOCKED, - &task_threadmax, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, num_files, + CTLFLAG_RD | CTLFLAG_LOCKED, + &nfiles, 0, ""); +SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes, + CTLFLAG_RD | CTLFLAG_LOCKED, + &numvnodes, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, num_tasks, + CTLFLAG_RD | CTLFLAG_LOCKED, + &task_max, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, num_threads, + CTLFLAG_RD | CTLFLAG_LOCKED, + &thread_max, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, num_taskthreads, + CTLFLAG_RD | CTLFLAG_LOCKED, + &task_threadmax, 0, ""); +SYSCTL_LONG(_kern, OID_AUTO, num_recycledvnodes, + CTLFLAG_RD | CTLFLAG_LOCKED, + &num_recycledvnodes, ""); STATIC int -sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +sysctl_maxvnodes(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { int oldval = desiredvnodes; int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL); @@ -1836,60 +1954,73 @@ sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unuse resize_namecache(desiredvnodes); } - return(error); + return error; } -SYSCTL_INT(_kern, OID_AUTO, namecache_disabled, - CTLFLAG_RW | CTLFLAG_LOCKED, - &nc_disabled, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, namecache_disabled, + CTLFLAG_RW | CTLFLAG_LOCKED, + &nc_disabled, 0, ""); SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_maxvnodes, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_maxvnodes, "I", ""); SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_maxproc, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_maxproc, "I", ""); SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_aiomax, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_aiomax, "I", ""); SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_aioprocmax, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_aioprocmax, "I", ""); SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_aiothreads, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_aiothreads, "I", ""); + +SYSCTL_PROC(_kern, OID_AUTO, sched_enable_smt, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN, + 0, 0, sysctl_sched_enable_smt, "I", ""); + +extern int sched_allow_NO_SMT_threads; +SYSCTL_INT(_kern, OID_AUTO, sched_allow_NO_SMT_threads, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &sched_allow_NO_SMT_threads, 0, ""); #if (DEVELOPMENT || DEBUG) extern int sched_smt_balance; -SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance, - CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, - &sched_smt_balance, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &sched_smt_balance, 0, ""); extern int sched_allow_rt_smt; -SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt, - CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, - &sched_allow_rt_smt, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, sched_allow_rt_smt, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &sched_allow_rt_smt, 0, ""); +extern int sched_avoid_cpu0; +SYSCTL_INT(_kern, OID_AUTO, sched_avoid_cpu0, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &sched_avoid_cpu0, 0, ""); #if __arm__ || __arm64__ extern uint32_t perfcontrol_requested_recommended_cores; SYSCTL_UINT(_kern, OID_AUTO, sched_recommended_cores, - CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, - &perfcontrol_requested_recommended_cores, 0, ""); + CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + &perfcontrol_requested_recommended_cores, 0, ""); /* Scheduler perfcontrol callouts sysctls */ SYSCTL_DECL(_kern_perfcontrol_callout); SYSCTL_NODE(_kern, OID_AUTO, perfcontrol_callout, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "scheduler perfcontrol callouts"); + "scheduler perfcontrol callouts"); extern int perfcontrol_callout_stats_enabled; -SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled, - CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED, - &perfcontrol_callout_stats_enabled, 0, ""); +SYSCTL_INT(_kern_perfcontrol_callout, OID_AUTO, stats_enabled, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &perfcontrol_callout_stats_enabled, 0, ""); extern uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, - perfcontrol_callout_stat_t stat); + perfcontrol_callout_stat_t stat); /* On-Core Callout */ STATIC int @@ -1899,41 +2030,41 @@ sysctl_perfcontrol_callout_stat perfcontrol_callout_stat_t stat = (perfcontrol_callout_stat_t)arg1; perfcontrol_callout_type_t type = (perfcontrol_callout_type_t)arg2; return sysctl_io_number(req, (int)perfcontrol_callout_stat_avg(type, stat), - sizeof(int), NULL, NULL); + sizeof(int), NULL, NULL); } SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_instr, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_ON_CORE, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, oncore_cycles, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_ON_CORE, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_instr, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_OFF_CORE, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, offcore_cycles, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_OFF_CORE, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_instr, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_CONTEXT, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, context_cycles, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_CONTEXT, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_instr, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_INSTRS, PERFCONTROL_CALLOUT_STATE_UPDATE, + sysctl_perfcontrol_callout_stat, "I", ""); SYSCTL_PROC(_kern_perfcontrol_callout, OID_AUTO, update_cycles, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE, - sysctl_perfcontrol_callout_stat, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *)PERFCONTROL_STAT_CYCLES, PERFCONTROL_CALLOUT_STATE_UPDATE, + sysctl_perfcontrol_callout_stat, "I", ""); #endif /* __arm__ || __arm64__ */ #endif /* (DEVELOPMENT || DEBUG) */ @@ -1953,12 +2084,12 @@ sysctl_securelvl error = EPERM; } } - return(error); + return error; } SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_securelvl, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_securelvl, "I", ""); STATIC int @@ -1970,16 +2101,16 @@ sysctl_domainname if (changed) { domainnamelen = strlen(domainname); } - return(error); + return error; } SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname, - CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_domainname, "A", ""); + CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_domainname, "A", ""); -SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &hostid, 0, ""); +SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &hostid, 0, ""); STATIC int sysctl_hostname @@ -1990,78 +2121,78 @@ sysctl_hostname if (changed) { hostnamelen = req->newlen; } - return(error); + return error; } SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname, - CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_hostname, "A", ""); + CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_hostname, "A", ""); STATIC int sysctl_procname (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { /* Original code allowed writing, I'm copying this, although this all makes - no sense to me. Besides, this sysctl is never used. */ - return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL); + * no sense to me. Besides, this sysctl is never used. */ + return sysctl_io_string(req, &req->p->p_name[0], (2 * MAXCOMLEN + 1), 1, NULL); } SYSCTL_PROC(_kern, KERN_PROCNAME, procname, - CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - 0, 0, sysctl_procname, "A", ""); + CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + 0, 0, sysctl_procname, "A", ""); -SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &speculative_reads_disabled, 0, ""); +SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &speculative_reads_disabled, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &preheat_max_bytes, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &preheat_max_bytes, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &preheat_min_bytes, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &preheat_min_bytes, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &speculative_prefetch_max, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &speculative_prefetch_max, 0, ""); -SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &speculative_prefetch_max_iosize, 0, ""); +SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &speculative_prefetch_max_iosize, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_page_free_target, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_page_free_target, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_page_free_min, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_page_free_min, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_page_free_reserved, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_page_free_reserved, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_pageout_state.vm_page_speculative_percentage, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_pageout_state.vm_page_speculative_percentage, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_pageout_state.vm_page_speculative_q_age_ms, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_pageout_state.vm_page_speculative_q_age_ms, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_max_delayed_work_limit, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_max_delayed_work_limit, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_max_batch, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_max_batch, 0, ""); SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid, - CTLFLAG_RD | CTLFLAG_LOCKED, - &bootsessionuuid_string, sizeof(bootsessionuuid_string) , ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + &bootsessionuuid_string, sizeof(bootsessionuuid_string), ""); STATIC int sysctl_boottime @@ -2085,8 +2216,8 @@ sysctl_boottime } SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, - CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_boottime, "S,timeval", ""); + CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_boottime, "S,timeval", ""); STATIC int sysctl_symfile @@ -2094,15 +2225,16 @@ sysctl_symfile { char *str; int error = get_kernel_symfile(req->p, &str); - if (error) - return (error); + if (error) { + return error; + } return sysctl_io_string(req, str, 0, 0, NULL); } SYSCTL_PROC(_kern, KERN_SYMFILE, symfile, - CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_symfile, "A", ""); + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_symfile, "A", ""); #if NFSCLIENT STATIC int @@ -2113,8 +2245,8 @@ sysctl_netboot } SYSCTL_PROC(_kern, KERN_NETBOOT, netboot, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_netboot, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_netboot, "I", ""); #endif #ifdef CONFIG_IMGSRC_ACCESS @@ -2122,7 +2254,7 @@ SYSCTL_PROC(_kern, KERN_NETBOOT, netboot, * Legacy--act as if only one layer of nesting is possible. */ STATIC int -sysctl_imgsrcdev +sysctl_imgsrcdev (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { vfs_context_t ctx = vfs_context_current(); @@ -2131,17 +2263,17 @@ sysctl_imgsrcdev if (!vfs_context_issuser(ctx)) { return EPERM; - } + } if (imgsrc_rootvnodes[0] == NULL) { return ENOENT; - } + } result = vnode_getwithref(imgsrc_rootvnodes[0]); if (result != 0) { return result; } - + devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp; result = vnode_getwithref(devvp); if (result != 0) { @@ -2157,15 +2289,15 @@ out: } SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_imgsrcdev, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_imgsrcdev, "I", ""); STATIC int sysctl_imgsrcinfo (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { int error; - struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */ + struct imgsrc_info info[MAX_IMAGEBOOT_NESTING] = {}; /* 2 for now, no problem */ uint32_t i; vnode_t rvp, devvp; @@ -2187,13 +2319,13 @@ sysctl_imgsrcinfo return error; } - /* + /* * For now, no getting at a non-local volume. */ devvp = vnode_mount(rvp)->mnt_devvp; if (devvp == NULL) { vnode_put(rvp); - return EINVAL; + return EINVAL; } error = vnode_getwithref(devvp); @@ -2218,8 +2350,8 @@ sysctl_imgsrcinfo } SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo, - CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_imgsrcinfo, "I", ""); + CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_imgsrcinfo, "I", ""); #endif /* CONFIG_IMGSRC_ACCESS */ @@ -2228,16 +2360,16 @@ SYSCTL_DECL(_kern_timer); SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer"); -SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled, - CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, - &mach_timer_coalescing_enabled, 0, ""); +SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled, + CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, + &mach_timer_coalescing_enabled, 0, ""); SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1, - CTLFLAG_RW | CTLFLAG_LOCKED, - &timer_deadline_tracking_bin_1, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, + &timer_deadline_tracking_bin_1, ""); SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2, - CTLFLAG_RW | CTLFLAG_LOCKED, - &timer_deadline_tracking_bin_2, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, + &timer_deadline_tracking_bin_2, ""); SYSCTL_DECL(_kern_timer_longterm); SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm"); @@ -2249,68 +2381,69 @@ enum { ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS, LATENCY, LATENCY_MIN, LATENCY_MAX, SCAN_LIMIT, SCAN_INTERVAL, PAUSES }; -extern uint64_t timer_sysctl_get(int); +extern uint64_t timer_sysctl_get(int); extern int timer_sysctl_set(int, uint64_t); STATIC int sysctl_timer (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int oid = (int)arg1; - uint64_t value = timer_sysctl_get(oid); - uint64_t new_value; - int error; - int changed; + int oid = (int)arg1; + uint64_t value = timer_sysctl_get(oid); + uint64_t new_value; + int error; + int changed; error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed); - if (changed) + if (changed) { error = timer_sysctl_set(oid, new_value); + } return error; } SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - (void *) THRESHOLD, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + (void *) THRESHOLD, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_limit, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + (void *) SCAN_LIMIT, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_interval, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + (void *) SCAN_INTERVAL, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) QCOUNT, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) QCOUNT, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scan_pauses, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) PAUSES, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) PAUSES, 0, sysctl_timer, "Q", ""); #if DEBUG SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) ENQUEUES, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) ENQUEUES, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) DEQUEUES, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) DEQUEUES, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) ESCALATES, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) ESCALATES, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) SCANS, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) SCANS, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) PREEMPTS, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) PREEMPTS, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) LATENCY, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) LATENCY, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) LATENCY_MIN, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) LATENCY_MIN, 0, sysctl_timer, "Q", ""); SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - (void *) LATENCY_MAX, 0, sysctl_timer, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + (void *) LATENCY_MAX, 0, sysctl_timer, "Q", ""); #endif /* DEBUG */ STATIC int @@ -2321,8 +2454,8 @@ sysctl_usrstack } SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_usrstack, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_usrstack, "I", ""); STATIC int sysctl_usrstack64 @@ -2332,14 +2465,14 @@ sysctl_usrstack64 } SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_usrstack64, "Q", ""); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_usrstack64, "Q", ""); #if CONFIG_COREDUMP -SYSCTL_STRING(_kern, KERN_COREFILE, corefile, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - corefilename, sizeof(corefilename), ""); +SYSCTL_STRING(_kern, KERN_COREFILE, corefile, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + corefilename, sizeof(corefilename), ""); STATIC int sysctl_coredump @@ -2347,23 +2480,24 @@ sysctl_coredump { #ifdef SECURE_KERNEL (void)req; - return (ENOTSUP); + return ENOTSUP; #else int new_value, changed; int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed); if (changed) { - if ((new_value == 0) || (new_value == 1)) + if ((new_value == 0) || (new_value == 1)) { do_coredump = new_value; - else + } else { error = EINVAL; + } } - return(error); + return error; #endif } SYSCTL_PROC(_kern, KERN_COREDUMP, coredump, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_coredump, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_coredump, "I", ""); STATIC int sysctl_suid_coredump @@ -2371,23 +2505,24 @@ sysctl_suid_coredump { #ifdef SECURE_KERNEL (void)req; - return (ENOTSUP); + return ENOTSUP; #else int new_value, changed; int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed); if (changed) { - if ((new_value == 0) || (new_value == 1)) + if ((new_value == 0) || (new_value == 1)) { sugid_coredump = new_value; - else + } else { error = EINVAL; + } } - return(error); + return error; #endif } SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_suid_coredump, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_suid_coredump, "I", ""); #endif /* CONFIG_COREDUMP */ @@ -2400,18 +2535,19 @@ sysctl_delayterm int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed); if (changed) { proc_lock(p); - if (new_value) + if (new_value) { req->p->p_lflag |= P_LDELAYTERM; - else + } else { req->p->p_lflag &= ~P_LDELAYTERM; + } proc_unlock(p); } - return(error); + return error; } SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_delayterm, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_delayterm, "I", ""); STATIC int @@ -2419,30 +2555,31 @@ sysctl_rage_vnode (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { struct proc *p = req->p; - struct uthread *ut; + struct uthread *ut; int new_value, old_value, changed; int error; ut = get_bsdthread_info(current_thread()); - if (ut->uu_flag & UT_RAGE_VNODES) - old_value = KERN_RAGE_THREAD; - else if (p->p_lflag & P_LRAGE_VNODES) - old_value = KERN_RAGE_PROC; - else - old_value = 0; + if (ut->uu_flag & UT_RAGE_VNODES) { + old_value = KERN_RAGE_THREAD; + } else if (p->p_lflag & P_LRAGE_VNODES) { + old_value = KERN_RAGE_PROC; + } else { + old_value = 0; + } error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); if (error == 0) { - switch (new_value) { + switch (new_value) { case KERN_RAGE_PROC: - proc_lock(p); + proc_lock(p); p->p_lflag |= P_LRAGE_VNODES; proc_unlock(p); break; case KERN_UNRAGE_PROC: - proc_lock(p); + proc_lock(p); p->p_lflag &= ~P_LRAGE_VNODES; proc_unlock(p); break; @@ -2451,17 +2588,17 @@ sysctl_rage_vnode ut->uu_flag |= UT_RAGE_VNODES; break; case KERN_UNRAGE_THREAD: - ut = get_bsdthread_info(current_thread()); + ut = get_bsdthread_info(current_thread()); ut->uu_flag &= ~UT_RAGE_VNODES; break; } } - return(error); + return error; } SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - 0, 0, sysctl_rage_vnode, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + 0, 0, sysctl_rage_vnode, "I", ""); /* XXX move this interface into libproc and remove this sysctl */ STATIC int @@ -2474,31 +2611,35 @@ sysctl_setthread_cpupercent uint8_t percent = 0; int ms_refill = 0; - if (!req->newptr) - return (0); + if (!req->newptr) { + return 0; + } old_value = 0; - if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0) - return (error); + if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0) { + return error; + } - percent = new_value & 0xff; /* low 8 bytes for perent */ - ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */ - if (percent > 100) - return (EINVAL); + percent = new_value & 0xff; /* low 8 bytes for perent */ + ms_refill = (new_value >> 8) & 0xffffff; /* upper 24bytes represent ms refill value */ + if (percent > 100) { + return EINVAL; + } /* * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present. */ - if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0) - return (EIO); - - return (0); + if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0) { + return EIO; + } + + return 0; } SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent, - CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY, - 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit"); + CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY, + 0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit"); STATIC int @@ -2512,13 +2653,13 @@ sysctl_kern_check_openevt if (p->p_flag & P_CHECKOPENEVT) { old_value = KERN_OPENEVT_PROC; } else { - old_value = 0; + old_value = 0; } error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); if (error == 0) { - switch (new_value) { + switch (new_value) { case KERN_OPENEVT_PROC: OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag); break; @@ -2531,14 +2672,14 @@ sysctl_kern_check_openevt error = EINVAL; } } - return(error); + return error; } SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag"); - + 0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag"); +#if DEVELOPMENT || DEBUG STATIC int sysctl_nx (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) @@ -2551,67 +2692,71 @@ sysctl_nx int error; error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed); - if (error) + if (error) { return error; + } if (changed) { -#if defined(__i386__) || defined(__x86_64__) +#if defined(__x86_64__) /* * Only allow setting if NX is supported on the chip */ - if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) + if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) { return ENOTSUP; + } #endif nx_enabled = new_value; } - return(error); + return error; #endif /* SECURE_KERNEL */ } +#endif - - -SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_nx, "I", ""); +#if DEVELOPMENT || DEBUG +SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_nx, "I", ""); +#endif STATIC int sysctl_loadavg (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - if (proc_is64bit(req->p)) { - struct user64_loadavg loadinfo64 = {}; - fill_loadavg64(&averunnable, &loadinfo64); - return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL); - } else { - struct user32_loadavg loadinfo32 = {}; - fill_loadavg32(&averunnable, &loadinfo32); - return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL); - } + if (proc_is64bit(req->p)) { + struct user64_loadavg loadinfo64 = {}; + fill_loadavg64(&averunnable, &loadinfo64); + return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL); + } else { + struct user32_loadavg loadinfo32 = {}; + fill_loadavg32(&averunnable, &loadinfo32); + return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL); + } } SYSCTL_PROC(_vm, VM_LOADAVG, loadavg, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_loadavg, "S,loadavg", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_loadavg, "S,loadavg", ""); /* * Note: Thread safe; vm_map_lock protects in vm_toggle_entry_reuse() */ STATIC int sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { - int old_value=0, new_value=0, error=0; - - if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value )) - return(error); + int old_value = 0, new_value = 0, error = 0; + + if (vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value )) { + return error; + } error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL); if (!error) { - return (vm_toggle_entry_reuse(new_value, NULL)); + return vm_toggle_entry_reuse(new_value, NULL); } - return(error); + return error; } -SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I",""); +SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse, "I", ""); #ifdef CONFIG_XNUPOST @@ -2636,8 +2781,9 @@ sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS oldp = req->oldptr; newp = req->newptr; - if (newp) + if (newp) { return ENOTSUP; + } if ((void *)oldp == NULL) { /* return estimated size for second call where info can be placed */ @@ -2651,14 +2797,14 @@ sysctl_handle_xnupost_get_tests SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_debug, - OID_AUTO, - xnupost_get_tests, - CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, - 0, - sysctl_handle_xnupost_get_tests, - "-", - "read xnupost test data in kernel"); + OID_AUTO, + xnupost_get_tests, + CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, + 0, + sysctl_handle_xnupost_get_tests, + "-", + "read xnupost test data in kernel"); STATIC int sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS @@ -2686,8 +2832,9 @@ sysctl_debug_xnupost_ctl SYSCTL_HANDLER_ARGS /* pull in provided value from userspace */ error = SYSCTL_IN(req, &input[0], in_size); - if (error) + if (error) { return error; + } if (input[0] == XTCTL_RESET_TESTDATA) { outval[0] = xnupost_reset_all_tests(); @@ -2700,14 +2847,14 @@ out: } SYSCTL_PROC(_debug, - OID_AUTO, - xnupost_testctl, - CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, - 0, - sysctl_debug_xnupost_ctl, - "I", - "xnupost control for kernel testing"); + OID_AUTO, + xnupost_testctl, + CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, + 0, + sysctl_debug_xnupost_ctl, + "I", + "xnupost control for kernel testing"); extern void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t arraycount); @@ -2729,8 +2876,9 @@ sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * a /* pull in provided value from userspace */ error = SYSCTL_IN(req, &input[0], size_inval); - if (error) + if (error) { return error; + } test_oslog_handleOSLogCtl(input, outval, ARRCOUNT); @@ -2740,14 +2888,14 @@ sysctl_debug_test_oslog_ctl(__unused struct sysctl_oid * oidp, __unused void * a } SYSCTL_PROC(_debug, - OID_AUTO, - test_OSLogCtl, - CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, - 0, - sysctl_debug_test_oslog_ctl, - "I", - "testing oslog in kernel"); + OID_AUTO, + test_OSLogCtl, + CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, + 0, + sysctl_debug_test_oslog_ctl, + "I", + "testing oslog in kernel"); #include #include @@ -2783,45 +2931,45 @@ sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unus lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); if (!sysctl_debug_test_stackshot_mtx_inited) { lck_mtx_init(&sysctl_debug_test_stackshot_owner_lck, - sysctl_debug_test_stackshot_owner_grp, - LCK_ATTR_NULL); + sysctl_debug_test_stackshot_owner_grp, + LCK_ATTR_NULL); semaphore_create(kernel_task, - &sysctl_debug_test_stackshot_mutex_sem, - SYNC_POLICY_FIFO, 0); + &sysctl_debug_test_stackshot_mutex_sem, + SYNC_POLICY_FIFO, 0); sysctl_debug_test_stackshot_mtx_inited = 1; } lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); if (!error) { - switch(option) { - case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT: - lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck); - lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck); - break; - case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT: - lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck); - semaphore_wait(sysctl_debug_test_stackshot_mutex_sem); - lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck); - break; - case SYSCTL_DEBUG_MTX_SIGNAL: - semaphore_signal(sysctl_debug_test_stackshot_mutex_sem); - break; - case SYSCTL_DEBUG_MTX_TEARDOWN: - lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); - - lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck, - sysctl_debug_test_stackshot_owner_grp); - semaphore_destroy(kernel_task, - sysctl_debug_test_stackshot_mutex_sem); - sysctl_debug_test_stackshot_mtx_inited = 0; - - lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); - break; - case -1: /* user just wanted to read the value, so do nothing */ - break; - default: - error = EINVAL; - break; + switch (option) { + case SYSCTL_DEBUG_MTX_ACQUIRE_NOWAIT: + lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck); + lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck); + break; + case SYSCTL_DEBUG_MTX_ACQUIRE_WAIT: + lck_mtx_lock(&sysctl_debug_test_stackshot_owner_lck); + semaphore_wait(sysctl_debug_test_stackshot_mutex_sem); + lck_mtx_unlock(&sysctl_debug_test_stackshot_owner_lck); + break; + case SYSCTL_DEBUG_MTX_SIGNAL: + semaphore_signal(sysctl_debug_test_stackshot_mutex_sem); + break; + case SYSCTL_DEBUG_MTX_TEARDOWN: + lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); + + lck_mtx_destroy(&sysctl_debug_test_stackshot_owner_lck, + sysctl_debug_test_stackshot_owner_grp); + semaphore_destroy(kernel_task, + sysctl_debug_test_stackshot_mutex_sem); + sysctl_debug_test_stackshot_mtx_inited = 0; + + lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); + break; + case -1: /* user just wanted to read the value, so do nothing */ + break; + default: + error = EINVAL; + break; } } return error; @@ -2832,14 +2980,14 @@ sysctl_debug_test_stackshot_mutex_owner(__unused struct sysctl_oid *oidp, __unus * thread is hanging / taking a long time to do something. */ SYSCTL_PROC(_debug, - OID_AUTO, - test_MutexOwnerCtl, - CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, - 0, - sysctl_debug_test_stackshot_mutex_owner, - "-", - "Testing mutex owner in kernel"); + OID_AUTO, + test_MutexOwnerCtl, + CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, + 0, + sysctl_debug_test_stackshot_mutex_owner, + "-", + "Testing mutex owner in kernel"); volatile char sysctl_debug_test_stackshot_rwlck_inited = 0; lck_rw_t sysctl_debug_test_stackshot_owner_rwlck; @@ -2856,7 +3004,7 @@ STATIC int sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { long long option = -1; - /* if the user tries to read the sysctl, we tell them what the address of the lock is + /* if the user tries to read the sysctl, we tell them what the address of the lock is * (to test against stackshot's output) */ long long rwlck_unslid_addr = (long long)VM_KERNEL_UNSLIDE_OR_PERM(&sysctl_debug_test_stackshot_owner_rwlck); int error = sysctl_io_number(req, rwlck_unslid_addr, sizeof(long long), (void*)&option, NULL); @@ -2864,55 +3012,55 @@ sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unus lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); if (!sysctl_debug_test_stackshot_rwlck_inited) { lck_rw_init(&sysctl_debug_test_stackshot_owner_rwlck, - sysctl_debug_test_stackshot_owner_grp, - LCK_ATTR_NULL); + sysctl_debug_test_stackshot_owner_grp, + LCK_ATTR_NULL); semaphore_create(kernel_task, - &sysctl_debug_test_stackshot_rwlck_sem, - SYNC_POLICY_FIFO, - 0); + &sysctl_debug_test_stackshot_rwlck_sem, + SYNC_POLICY_FIFO, + 0); sysctl_debug_test_stackshot_rwlck_inited = 1; } lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); if (!error) { - switch(option) { - case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT: - lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); - lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); - break; - case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT: - lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); - semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem); - lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); - break; - case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT: - lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); - lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); - break; - case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT: - lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); - semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem); - lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); - break; - case SYSCTL_DEBUG_KRWLCK_SIGNAL: - semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem); - break; - case SYSCTL_DEBUG_KRWLCK_TEARDOWN: - lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); - - lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck, - sysctl_debug_test_stackshot_owner_grp); - semaphore_destroy(kernel_task, - sysctl_debug_test_stackshot_rwlck_sem); - sysctl_debug_test_stackshot_rwlck_inited = 0; - - lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); - break; - case -1: /* user just wanted to read the value, so do nothing */ - break; - default: - error = EINVAL; - break; + switch (option) { + case SYSCTL_DEBUG_KRWLCK_RACQUIRE_NOWAIT: + lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); + lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); + break; + case SYSCTL_DEBUG_KRWLCK_RACQUIRE_WAIT: + lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); + semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem); + lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_SHARED); + break; + case SYSCTL_DEBUG_KRWLCK_WACQUIRE_NOWAIT: + lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); + lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); + break; + case SYSCTL_DEBUG_KRWLCK_WACQUIRE_WAIT: + lck_rw_lock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); + semaphore_wait(sysctl_debug_test_stackshot_rwlck_sem); + lck_rw_unlock(&sysctl_debug_test_stackshot_owner_rwlck, LCK_RW_TYPE_EXCLUSIVE); + break; + case SYSCTL_DEBUG_KRWLCK_SIGNAL: + semaphore_signal(sysctl_debug_test_stackshot_rwlck_sem); + break; + case SYSCTL_DEBUG_KRWLCK_TEARDOWN: + lck_mtx_lock(sysctl_debug_test_stackshot_owner_init_mtx); + + lck_rw_destroy(&sysctl_debug_test_stackshot_owner_rwlck, + sysctl_debug_test_stackshot_owner_grp); + semaphore_destroy(kernel_task, + sysctl_debug_test_stackshot_rwlck_sem); + sysctl_debug_test_stackshot_rwlck_inited = 0; + + lck_mtx_unlock(sysctl_debug_test_stackshot_owner_init_mtx); + break; + case -1: /* user just wanted to read the value, so do nothing */ + break; + default: + error = EINVAL; + break; } } return error; @@ -2920,47 +3068,48 @@ sysctl_debug_test_stackshot_rwlck_owner(__unused struct sysctl_oid *oidp, __unus SYSCTL_PROC(_debug, - OID_AUTO, - test_RWLockOwnerCtl, - CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, - 0, - sysctl_debug_test_stackshot_rwlck_owner, - "-", - "Testing rwlock owner in kernel"); + OID_AUTO, + test_RWLockOwnerCtl, + CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, + 0, + sysctl_debug_test_stackshot_rwlck_owner, + "-", + "Testing rwlock owner in kernel"); #endif /* !CONFIG_XNUPOST */ STATIC int sysctl_swapusage (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int error; - uint64_t swap_total; - uint64_t swap_avail; - vm_size_t swap_pagesize; - boolean_t swap_encrypted; - struct xsw_usage xsu = {}; - - error = macx_swapinfo(&swap_total, - &swap_avail, - &swap_pagesize, - &swap_encrypted); - if (error) - return error; + int error; + uint64_t swap_total; + uint64_t swap_avail; + vm_size_t swap_pagesize; + boolean_t swap_encrypted; + struct xsw_usage xsu = {}; + + error = macx_swapinfo(&swap_total, + &swap_avail, + &swap_pagesize, + &swap_encrypted); + if (error) { + return error; + } - xsu.xsu_total = swap_total; - xsu.xsu_avail = swap_avail; - xsu.xsu_used = swap_total - swap_avail; - xsu.xsu_pagesize = swap_pagesize; - xsu.xsu_encrypted = swap_encrypted; - return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL); + xsu.xsu_total = swap_total; + xsu.xsu_avail = swap_avail; + xsu.xsu_used = swap_total - swap_avail; + xsu.xsu_pagesize = swap_pagesize; + xsu.xsu_encrypted = swap_encrypted; + return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL); } SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_swapusage, "S,xsw_usage", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_swapusage, "S,xsw_usage", ""); #if CONFIG_FREEZE extern void vm_page_reactivate_all_throttled(void); @@ -2974,38 +3123,39 @@ sysctl_freeze_enabled SYSCTL_HANDLER_ARGS boolean_t disabled; error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } - if (! VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + if (!VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { //assert(req->newptr); printf("Failed attempt to set vm.freeze_enabled sysctl\n"); return EINVAL; } - /* - * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue. + /* + * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue. */ disabled = (!val && memorystatus_freeze_enabled); - + memorystatus_freeze_enabled = val ? TRUE : FALSE; - + if (disabled) { vm_page_reactivate_all_throttled(); memorystatus_disable_freeze(); } - - return (0); + + return 0; } -SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", ""); +SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", ""); #endif /* CONFIG_FREEZE */ #if DEVELOPMENT || DEBUG extern int vm_num_swap_files_config; extern int vm_num_swap_files; extern lck_mtx_t vm_swap_data_lock; -#define VM_MAX_SWAP_FILE_NUM 100 +#define VM_MAX_SWAP_FILE_NUM 100 static int sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS @@ -3015,7 +3165,7 @@ sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) { - goto out; + goto out; } if (!VM_CONFIG_SWAP_IS_ACTIVE && !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { @@ -3042,16 +3192,16 @@ sysctl_vm_config_num_swap_files SYSCTL_HANDLER_ARGS lck_mtx_unlock(&vm_swap_data_lock); out: - return (0); + return 0; } SYSCTL_PROC(_debug, OID_AUTO, num_swap_files_configured, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_config_num_swap_files, "I", ""); #endif /* DEVELOPMENT || DEBUG */ /* this kernel does NOT implement shared_region_make_private_np() */ -SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private, - CTLFLAG_RD | CTLFLAG_LOCKED, - (int *)NULL, 0, ""); +SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private, + CTLFLAG_RD | CTLFLAG_LOCKED, + (int *)NULL, 0, ""); STATIC int fetch_process_cputype( @@ -3064,13 +3214,14 @@ fetch_process_cputype( int refheld = 0; cpu_type_t ret = 0; int error = 0; - - if (namelen == 0) + + if (namelen == 0) { p = cur_proc; - else if (namelen == 1) { + } else if (namelen == 1) { p = proc_find(name[0]); - if (p == NULL) - return (EINVAL); + if (p == NULL) { + return EINVAL; + } refheld = 1; } else { error = EINVAL; @@ -3083,39 +3234,43 @@ fetch_process_cputype( } *cputype = ret; - - if (refheld != 0) + + if (refheld != 0) { proc_rele(p); + } out: - return (error); + return error; } STATIC int sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { int error; cpu_type_t proc_cputype = 0; - if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0) + if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0) { return error; + } int res = 1; - if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) + if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) { res = 0; + } return SYSCTL_OUT(req, &res, sizeof(res)); -} -SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native"); +} +SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native, "I", "proc_native"); STATIC int sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { int error; cpu_type_t proc_cputype = 0; - if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0) + if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0) { return error; + } return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype)); } -SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype"); +SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype, "I", "proc_cputype"); STATIC int sysctl_safeboot @@ -3125,8 +3280,8 @@ sysctl_safeboot } SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_safeboot, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_safeboot, "I", ""); STATIC int sysctl_singleuser @@ -3136,29 +3291,30 @@ sysctl_singleuser } SYSCTL_PROC(_kern, OID_AUTO, singleuser, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_singleuser, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_singleuser, "I", ""); -STATIC int sysctl_minimalboot +STATIC int +sysctl_minimalboot (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { return sysctl_io_number(req, minimalboot, sizeof(int), NULL, NULL); } SYSCTL_PROC(_kern, OID_AUTO, minimalboot, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_minimalboot, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_minimalboot, "I", ""); /* * Controls for debugging affinity sets - see osfmk/kern/affinity.c */ -extern boolean_t affinity_sets_enabled; -extern int affinity_sets_mapping; +extern boolean_t affinity_sets_enabled; +extern int affinity_sets_mapping; -SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled, - CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled"); -SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping, - CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy"); +SYSCTL_INT(_kern, OID_AUTO, affinity_sets_enabled, + CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled"); +SYSCTL_INT(_kern, OID_AUTO, affinity_sets_mapping, + CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy"); /* * Boolean indicating if KASLR is active. @@ -3167,7 +3323,7 @@ STATIC int sysctl_slide (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - uint32_t slide; + uint32_t slide; slide = vm_kernel_slide ? 1 : 0; @@ -3175,13 +3331,13 @@ sysctl_slide } SYSCTL_PROC(_kern, OID_AUTO, slide, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_slide, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_slide, "I", ""); /* * Limit on total memory users can wire. * - * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined. + * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined. * * vm_user_wire_limit - per address space limit on wired memory. This puts a cap on the process's rlimit value. * @@ -3191,9 +3347,9 @@ SYSCTL_PROC(_kern, OID_AUTO, slide, * All values are in bytes. */ -vm_map_size_t vm_global_no_user_wire_amount; -vm_map_size_t vm_global_user_wire_limit; -vm_map_size_t vm_user_wire_limit; +vm_map_size_t vm_global_no_user_wire_amount; +vm_map_size_t vm_global_user_wire_limit; +vm_map_size_t vm_user_wire_limit; /* * There needs to be a more automatic/elegant way to do this @@ -3216,7 +3372,7 @@ SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, ""); -extern uint32_t vm_page_external_count; +extern uint32_t vm_page_external_count; SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, ""); @@ -3228,31 +3384,31 @@ SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min_divisor, CTLFLAG_RW | CTLFLAG_LO SYSCTL_INT(_vm, OID_AUTO, vm_page_xpmapped_min_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_pageout_state.vm_page_xpmapped_min_divisor, 0, ""); #endif -extern int vm_compressor_mode; -extern int vm_compressor_is_active; -extern int vm_compressor_available; -extern uint32_t vm_ripe_target_age; -extern uint32_t swapout_target_age; +extern int vm_compressor_mode; +extern int vm_compressor_is_active; +extern int vm_compressor_available; +extern uint32_t vm_ripe_target_age; +extern uint32_t swapout_target_age; extern int64_t compressor_bytes_used; extern int64_t c_segment_input_bytes; extern int64_t c_segment_compressed_bytes; -extern uint32_t compressor_eval_period_in_msecs; -extern uint32_t compressor_sample_min_in_msecs; -extern uint32_t compressor_sample_max_in_msecs; -extern uint32_t compressor_thrashing_threshold_per_10msecs; -extern uint32_t compressor_thrashing_min_per_10msecs; +extern uint32_t compressor_eval_period_in_msecs; +extern uint32_t compressor_sample_min_in_msecs; +extern uint32_t compressor_sample_max_in_msecs; +extern uint32_t compressor_thrashing_threshold_per_10msecs; +extern uint32_t compressor_thrashing_min_per_10msecs; extern uint32_t vm_compressor_time_thread; #if DEVELOPMENT || DEBUG -extern uint32_t vm_compressor_minorcompact_threshold_divisor; -extern uint32_t vm_compressor_majorcompact_threshold_divisor; -extern uint32_t vm_compressor_unthrottle_threshold_divisor; -extern uint32_t vm_compressor_catchup_threshold_divisor; +extern uint32_t vm_compressor_minorcompact_threshold_divisor; +extern uint32_t vm_compressor_majorcompact_threshold_divisor; +extern uint32_t vm_compressor_unthrottle_threshold_divisor; +extern uint32_t vm_compressor_catchup_threshold_divisor; -extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden; -extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden; -extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden; -extern uint32_t vm_compressor_catchup_threshold_divisor_overridden; +extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden; +extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden; +extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden; +extern uint32_t vm_compressor_catchup_threshold_divisor_overridden; extern vmct_stats_t vmct_stats; @@ -3264,15 +3420,15 @@ sysctl_minorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused int error = sysctl_io_number(req, vm_compressor_minorcompact_threshold_divisor, sizeof(int), &new_value, &changed); if (changed) { - vm_compressor_minorcompact_threshold_divisor = new_value; - vm_compressor_minorcompact_threshold_divisor_overridden = 1; + vm_compressor_minorcompact_threshold_divisor = new_value; + vm_compressor_minorcompact_threshold_divisor_overridden = 1; } - return(error); + return error; } SYSCTL_PROC(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, - CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - 0, 0, sysctl_minorcompact_threshold_divisor, "I", ""); + CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, + 0, 0, sysctl_minorcompact_threshold_divisor, "I", ""); STATIC int @@ -3282,15 +3438,15 @@ sysctl_majorcompact_threshold_divisor(__unused struct sysctl_oid *oidp, __unused int error = sysctl_io_number(req, vm_compressor_majorcompact_threshold_divisor, sizeof(int), &new_value, &changed); if (changed) { - vm_compressor_majorcompact_threshold_divisor = new_value; - vm_compressor_majorcompact_threshold_divisor_overridden = 1; + vm_compressor_majorcompact_threshold_divisor = new_value; + vm_compressor_majorcompact_threshold_divisor_overridden = 1; } - return(error); + return error; } SYSCTL_PROC(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, - CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - 0, 0, sysctl_majorcompact_threshold_divisor, "I", ""); + CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, + 0, 0, sysctl_majorcompact_threshold_divisor, "I", ""); STATIC int @@ -3300,15 +3456,15 @@ sysctl_unthrottle_threshold_divisor(__unused struct sysctl_oid *oidp, __unused v int error = sysctl_io_number(req, vm_compressor_unthrottle_threshold_divisor, sizeof(int), &new_value, &changed); if (changed) { - vm_compressor_unthrottle_threshold_divisor = new_value; - vm_compressor_unthrottle_threshold_divisor_overridden = 1; + vm_compressor_unthrottle_threshold_divisor = new_value; + vm_compressor_unthrottle_threshold_divisor_overridden = 1; } - return(error); + return error; } SYSCTL_PROC(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, - CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - 0, 0, sysctl_unthrottle_threshold_divisor, "I", ""); + CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, + 0, 0, sysctl_unthrottle_threshold_divisor, "I", ""); STATIC int @@ -3318,15 +3474,15 @@ sysctl_catchup_threshold_divisor(__unused struct sysctl_oid *oidp, __unused void int error = sysctl_io_number(req, vm_compressor_catchup_threshold_divisor, sizeof(int), &new_value, &changed); if (changed) { - vm_compressor_catchup_threshold_divisor = new_value; - vm_compressor_catchup_threshold_divisor_overridden = 1; + vm_compressor_catchup_threshold_divisor = new_value; + vm_compressor_catchup_threshold_divisor_overridden = 1; } - return(error); + return error; } SYSCTL_PROC(_vm, OID_AUTO, compressor_catchup_threshold_divisor, - CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - 0, 0, sysctl_catchup_threshold_divisor, "I", ""); + CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, + 0, 0, sysctl_catchup_threshold_divisor, "I", ""); #endif @@ -3454,13 +3610,13 @@ SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CT #if CONFIG_BACKGROUND_QUEUE -extern uint32_t vm_page_background_count; -extern uint32_t vm_page_background_target; -extern uint32_t vm_page_background_internal_count; -extern uint32_t vm_page_background_external_count; -extern uint32_t vm_page_background_mode; -extern uint32_t vm_page_background_exclude_external; -extern uint64_t vm_page_background_promoted_count; +extern uint32_t vm_page_background_count; +extern uint32_t vm_page_background_target; +extern uint32_t vm_page_background_internal_count; +extern uint32_t vm_page_background_external_count; +extern uint32_t vm_page_background_mode; +extern uint32_t vm_page_background_exclude_external; +extern uint64_t vm_page_background_promoted_count; extern uint64_t vm_pageout_rejected_bq_internal; extern uint64_t vm_pageout_rejected_bq_external; @@ -3488,8 +3644,7 @@ sysctl_toggle_darkwake_mode(__unused struct sysctl_oid *oidp, __unused void *arg int new_value, changed; int error = sysctl_io_number(req, vm_darkwake_mode, sizeof(int), &new_value, &changed); - if ( !error && changed) { - + if (!error && changed) { if (new_value != 0 && new_value != 1) { printf("Error: Invalid value passed to darkwake sysctl. Acceptable: 0 or 1.\n"); error = EINVAL; @@ -3498,22 +3653,22 @@ sysctl_toggle_darkwake_mode(__unused struct sysctl_oid *oidp, __unused void *arg } } - return(error); + return error; } SYSCTL_PROC(_vm, OID_AUTO, darkwake_mode, - CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - 0, 0, sysctl_toggle_darkwake_mode, "I", ""); + CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, + 0, 0, sysctl_toggle_darkwake_mode, "I", ""); #if (DEVELOPMENT || DEBUG) SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_hard, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_page_creation_throttled_hard, 0, ""); + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_page_creation_throttled_hard, 0, ""); SYSCTL_UINT(_vm, OID_AUTO, vm_page_creation_throttled_soft, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vm_page_creation_throttled_soft, 0, ""); + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vm_page_creation_throttled_soft, 0, ""); extern uint32_t vm_pageout_memorystatus_fb_factor_nr; extern uint32_t vm_pageout_memorystatus_fb_factor_dr; @@ -3562,38 +3717,38 @@ SYSCTL_UINT(_debug, OID_AUTO, oslog_s_dropped_msgcount, CTLFLAG_ANYBODY | CTLFLA */ extern uint32_t ipc_voucher_trace_contents; -SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents, - CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents"); +SYSCTL_INT(_kern, OID_AUTO, ipc_voucher_trace_contents, + CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents"); /* * Kernel stack size and depth */ -SYSCTL_INT (_kern, OID_AUTO, stack_size, - CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size"); -SYSCTL_INT (_kern, OID_AUTO, stack_depth_max, - CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch"); +SYSCTL_INT(_kern, OID_AUTO, stack_size, + CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size"); +SYSCTL_INT(_kern, OID_AUTO, stack_depth_max, + CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch"); extern unsigned int kern_feature_overrides; -SYSCTL_INT (_kern, OID_AUTO, kern_feature_overrides, - CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask"); +SYSCTL_INT(_kern, OID_AUTO, kern_feature_overrides, + CTLFLAG_RD | CTLFLAG_LOCKED, &kern_feature_overrides, 0, "Kernel feature override mask"); /* * enable back trace for port allocations */ extern int ipc_portbt; -SYSCTL_INT(_kern, OID_AUTO, ipc_portbt, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &ipc_portbt, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, ipc_portbt, + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &ipc_portbt, 0, ""); /* * Scheduler sysctls */ SYSCTL_STRING(_kern, OID_AUTO, sched, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - sched_string, sizeof(sched_string), - "Timeshare scheduler implementation"); + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + sched_string, sizeof(sched_string), + "Timeshare scheduler implementation"); #if CONFIG_QUIESCE_COUNTER static int @@ -3602,8 +3757,9 @@ sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS #pragma unused(arg1, arg2) int error = sysctl_handle_int(oidp, &cpu_checkin_min_interval_us, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { return error; + } cpu_quiescent_counter_set_min_interval_us(cpu_checkin_min_interval_us); @@ -3611,10 +3767,10 @@ sysctl_cpu_quiescent_counter_interval SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, - sysctl_cpu_quiescent_counter_interval, "I", - "Quiescent CPU checkin interval (microseconds)"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + sysctl_cpu_quiescent_counter_interval, "I", + "Quiescent CPU checkin interval (microseconds)"); #endif /* CONFIG_QUIESCE_COUNTER */ @@ -3625,9 +3781,9 @@ SYSCTL_PROC(_kern, OID_AUTO, cpu_checkin_interval, #if CONFIG_EMBEDDED #if !SECURE_KERNEL extern int precise_user_kernel_time; -SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time, - CTLFLAG_RW | CTLFLAG_LOCKED, - &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time"); +SYSCTL_INT(_kern, OID_AUTO, precise_user_kernel_time, + CTLFLAG_RW | CTLFLAG_LOCKED, + &precise_user_kernel_time, 0, "Precise accounting of kernel vs. user time"); #endif #endif @@ -3641,28 +3797,32 @@ STATIC int sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - int size = arg2; /* subcommand*/ + int size = arg2; /* subcommand*/ int error; int changed = 0; uint64_t old_value_ns; uint64_t new_value_ns; uint64_t value_abstime; - if (size == sizeof(uint32_t)) + if (size == sizeof(uint32_t)) { value_abstime = *((uint32_t *)arg1); - else if (size == sizeof(uint64_t)) + } else if (size == sizeof(uint64_t)) { value_abstime = *((uint64_t *)arg1); - else return ENOTSUP; + } else { + return ENOTSUP; + } absolutetime_to_nanoseconds(value_abstime, &old_value_ns); error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed); - if ((error) || (!changed)) + if ((error) || (!changed)) { return error; + } nanoseconds_to_absolutetime(new_value_ns, &value_abstime); - if (size == sizeof(uint32_t)) + if (size == sizeof(uint32_t)) { *((uint32_t *)arg1) = (uint32_t)value_abstime; - else + } else { *((uint64_t *)arg1) = value_abstime; + } return error; } @@ -3700,8 +3860,8 @@ SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale, SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max, CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, &tcoal_prio_params.timer_coalesce_fp_abstime_max, - sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max), - sysctl_timer_user_us_kernel_abstime, + sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max), + sysctl_timer_user_us_kernel_abstime, "Q", ""); SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale, @@ -3786,7 +3946,8 @@ SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max, */ static int -timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { +timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ int new_value = 0, old_value = 0, changed = 0, error; old_value = timer_get_user_idle_level(); @@ -3794,8 +3955,9 @@ timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __u error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); if (error == 0 && changed) { - if (timer_set_user_idle_level(new_value) != KERN_SUCCESS) + if (timer_set_user_idle_level(new_value) != KERN_SUCCESS) { error = ERANGE; + } } return error; @@ -3807,9 +3969,9 @@ SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level, timer_user_idle_level, "I", "User idle level heuristic, 0-128"); #if HYPERVISOR -SYSCTL_INT(_kern, OID_AUTO, hv_support, - CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, - &hv_support_available, 0, ""); +SYSCTL_INT(_kern, OID_AUTO, hv_support, + CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, + &hv_support_available, 0, ""); #endif #if CONFIG_EMBEDDED @@ -3880,8 +4042,8 @@ exit: } SYSCTL_PROC(_kern, OID_AUTO, darkboot, - CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, - 0, 0, sysctl_darkboot, "I", ""); + CTLFLAG_KERN | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, + 0, 0, sysctl_darkboot, "I", ""); #endif #if DEVELOPMENT || DEBUG @@ -3890,7 +4052,8 @@ SYSCTL_PROC(_kern, OID_AUTO, darkboot, * write-protected. */ static int -kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { +kern_sysent_write(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ uint64_t new_value = 0, old_value = 0; int changed = 0, error; @@ -3923,48 +4086,48 @@ static int sysctl_panic_test SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) - int rval = 0; - char str[32] = "entry prelog postlog postcore"; - - rval = sysctl_handle_string(oidp, str, sizeof(str), req); - - if (rval == 0 && req->newptr) { - if (strncmp("entry", str, strlen("entry")) == 0) { - panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry"); - } else if (strncmp("prelog", str, strlen("prelog")) == 0) { - panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog"); - } else if (strncmp("postlog", str, strlen("postlog")) == 0) { - panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog"); - } else if (strncmp("postcore", str, strlen("postcore")) == 0) { - panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core"); - } - } - - return rval; + int rval = 0; + char str[32] = "entry prelog postlog postcore"; + + rval = sysctl_handle_string(oidp, str, sizeof(str), req); + + if (rval == 0 && req->newptr) { + if (strncmp("entry", str, strlen("entry")) == 0) { + panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_ENTRY, "test recursive panic at entry"); + } else if (strncmp("prelog", str, strlen("prelog")) == 0) { + panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_PRELOG, "test recursive panic prior to writing a paniclog"); + } else if (strncmp("postlog", str, strlen("postlog")) == 0) { + panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTLOG, "test recursive panic subsequent to paniclog"); + } else if (strncmp("postcore", str, strlen("postcore")) == 0) { + panic_with_options(0, NULL, DEBUGGER_OPTION_RECURPANIC_POSTCORE, "test recursive panic subsequent to on-device core"); + } + } + + return rval; } static int sysctl_debugger_test SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) - int rval = 0; - char str[32] = "entry prelog postlog postcore"; - - rval = sysctl_handle_string(oidp, str, sizeof(str), req); - - if (rval == 0 && req->newptr) { - if (strncmp("entry", str, strlen("entry")) == 0) { - DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY); - } else if (strncmp("prelog", str, strlen("prelog")) == 0) { - DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG); - } else if (strncmp("postlog", str, strlen("postlog")) == 0) { - DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG); - } else if (strncmp("postcore", str, strlen("postcore")) == 0) { - DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE); - } - } - - return rval; + int rval = 0; + char str[32] = "entry prelog postlog postcore"; + + rval = sysctl_handle_string(oidp, str, sizeof(str), req); + + if (rval == 0 && req->newptr) { + if (strncmp("entry", str, strlen("entry")) == 0) { + DebuggerWithContext(0, NULL, "test recursive panic via debugger at entry", DEBUGGER_OPTION_RECURPANIC_ENTRY); + } else if (strncmp("prelog", str, strlen("prelog")) == 0) { + DebuggerWithContext(0, NULL, "test recursive panic via debugger prior to writing a paniclog", DEBUGGER_OPTION_RECURPANIC_PRELOG); + } else if (strncmp("postlog", str, strlen("postlog")) == 0) { + DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to paniclog", DEBUGGER_OPTION_RECURPANIC_POSTLOG); + } else if (strncmp("postcore", str, strlen("postcore")) == 0) { + DebuggerWithContext(0, NULL, "test recursive panic via debugger subsequent to on-device core", DEBUGGER_OPTION_RECURPANIC_POSTCORE); + } + } + + return rval; } decl_lck_spin_data(, spinlock_panic_test_lock) @@ -3974,15 +4137,18 @@ static void spinlock_panic_test_acquire_spinlock(void * arg __unused, wait_result_t wres __unused) { lck_spin_lock(&spinlock_panic_test_lock); - while (1) { ; } + while (1) { + ; + } } static int sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->newlen == 0) + if (req->newlen == 0) { return EINVAL; + } thread_t panic_spinlock_thread; /* Initialize panic spinlock */ @@ -3991,7 +4157,7 @@ sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS lck_attr_t * panic_spinlock_attr; panic_spinlock_grp_attr = lck_grp_attr_alloc_init(); - panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr); + panic_spinlock_grp = lck_grp_alloc_init("panic_spinlock", panic_spinlock_grp_attr); panic_spinlock_attr = lck_attr_alloc_init(); lck_spin_init(&spinlock_panic_test_lock, panic_spinlock_grp, panic_spinlock_attr); @@ -4004,7 +4170,9 @@ sysctl_spinlock_panic_test SYSCTL_HANDLER_ARGS /* Try to acquire spinlock -- should panic eventually */ lck_spin_lock(&spinlock_panic_test_lock); - while(1) { ; } + while (1) { + ; + } } __attribute__((noreturn)) @@ -4014,7 +4182,9 @@ simultaneous_panic_worker { atomic_int *start_panic = (atomic_int *)arg; - while (!atomic_load(start_panic)) { ; } + while (!atomic_load(start_panic)) { + ; + } panic("SIMULTANEOUS PANIC TEST: INITIATING PANIC FROM CPU %d", cpu_number()); __builtin_unreachable(); } @@ -4023,8 +4193,9 @@ static int sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->newlen == 0) + if (req->newlen == 0) { return EINVAL; + } int i = 0, threads_to_create = 2 * processor_count; atomic_int start_panic = 0; @@ -4040,11 +4211,13 @@ sysctl_simultaneous_panic_test SYSCTL_HANDLER_ARGS /* FAIL if we couldn't create at least processor_count threads */ if (threads_created < processor_count) { panic("SIMULTANEOUS PANIC TEST: FAILED TO CREATE ENOUGH THREADS, ONLY CREATED %d (of %d)", - threads_created, threads_to_create); + threads_created, threads_to_create); } atomic_exchange(&start_panic, 1); - while (1) { ; } + while (1) { + ; + } } SYSCTL_PROC(_debug, OID_AUTO, panic_test, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_MASKED, 0, 0, sysctl_panic_test, "A", "panic test"); @@ -4062,10 +4235,10 @@ SYSCTL_INT(_kern, OID_AUTO, exc_resource_threads_enabled, CTLFLAG_RD | CTLFLAG_L const uint32_t thread_groups_supported = 0; STATIC int -sysctl_thread_groups_supported (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +sysctl_thread_groups_supported(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { int value = thread_groups_supported; - return sysctl_io_number(req, value, sizeof(value), NULL, NULL); + return sysctl_io_number(req, value, sizeof(value), NULL, NULL); } SYSCTL_PROC(_kern, OID_AUTO, thread_groups_supported, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_KERN, @@ -4097,9 +4270,9 @@ sysctl_grade_cputype SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, grade_cputype, - CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_MASKED|CTLFLAG_LOCKED|CTLTYPE_OPAQUE, - 0, 0, &sysctl_grade_cputype, "S", - "grade value of cpu_type_t+cpu_sub_type_t"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED | CTLTYPE_OPAQUE, + 0, 0, &sysctl_grade_cputype, "S", + "grade value of cpu_type_t+cpu_sub_type_t"); #if DEVELOPMENT || DEBUG @@ -4124,32 +4297,32 @@ SYSCTL_PROC(_kern, OID_AUTO, unwedge_thread, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLF extern uintptr_t phys_carveout_pa; SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_pa, CTLFLAG_RD | CTLFLAG_LOCKED, - &phys_carveout_pa, - "base physical address of the phys_carveout_mb boot-arg region"); + &phys_carveout_pa, + "base physical address of the phys_carveout_mb boot-arg region"); extern size_t phys_carveout_size; SYSCTL_LONG(_kern, OID_AUTO, phys_carveout_size, CTLFLAG_RD | CTLFLAG_LOCKED, - &phys_carveout_size, - "size in bytes of the phys_carveout_mb boot-arg region"); + &phys_carveout_size, + "size in bytes of the phys_carveout_mb boot-arg region"); static int wedge_thread SYSCTL_HANDLER_ARGS { -#pragma unused(arg1, arg2) - - int error, val = 0; +#pragma unused(arg1, arg2) + + int error, val = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error || val == 0) { - return error; + return error; } - + uint64_t interval = 1; nanoseconds_to_absolutetime(1000 * 1000 * 50, &interval); atomic_store(&wedge_thread_should_wake, 0); while (!atomic_load(&wedge_thread_should_wake)) { - tsleep1(NULL, 0, "wedge_thread", mach_absolute_time()+interval, NULL); + tsleep1(NULL, 0, "wedge_thread", mach_absolute_time() + interval, NULL); } - + return 0; } @@ -4194,10 +4367,10 @@ sysctl_turnstile_test_prim_unlock SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_lock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock"); + 0, 0, sysctl_turnstile_test_prim_lock, "I", "turnstiles test lock"); SYSCTL_PROC(_kern, OID_AUTO, turnstiles_test_unlock, CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock"); + 0, 0, sysctl_turnstile_test_prim_unlock, "I", "turnstiles test unlock"); int turnstile_get_boost_stats_sysctl(void *req); @@ -4225,15 +4398,15 @@ sysctl_turnstile_unboost_stats SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, turnstile_boost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT, - 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats"); + 0, 0, sysctl_turnstile_boost_stats, "S", "turnstiles boost stats"); SYSCTL_PROC(_kern, OID_AUTO, turnstile_unboost_stats, CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLTYPE_STRUCT, - 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats"); + 0, 0, sysctl_turnstile_unboost_stats, "S", "turnstiles unboost stats"); SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_turnstile, - CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - &thread_block_on_turnstile_count, "thread blocked on turnstile count"); + CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, + &thread_block_on_turnstile_count, "thread blocked on turnstile count"); SYSCTL_QUAD(_kern, OID_AUTO, thread_block_count_on_reg_waitq, - CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count"); + CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, + &thread_block_on_regular_waitq_count, "thread blocked on regular waitq count"); static int sysctl_lck_mtx_test_lock SYSCTL_HANDLER_ARGS @@ -4298,8 +4471,9 @@ sysctl_get_test_mtx_stats SYSCTL_HANDLER_ARGS buffer_size = 1000; buffer = kalloc(buffer_size); - if (!buffer) + if (!buffer) { panic("Impossible to allocate memory for %s\n", __func__); + } lck_mtx_test_init(); @@ -4350,8 +4524,9 @@ sysctl_test_mtx_uncontended SYSCTL_HANDLER_ARGS buffer_size = 2000; offset = 0; buffer = kalloc(buffer_size); - if (!buffer) + if (!buffer) { panic("Impossible to allocate memory for %s\n", __func__); + } memset(buffer, 0, buffer_size); printf("%s starting uncontended mutex test with %d iterations\n", __func__, iter); @@ -4410,8 +4585,9 @@ sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS buffer_size = 1000; offset = 0; buffer = kalloc(buffer_size); - if (!buffer) + if (!buffer) { panic("Impossible to allocate memory for %s\n", __func__); + } memset(buffer, 0, buffer_size); printf("%s starting contended mutex test with %d iterations\n", __func__, iter); @@ -4432,22 +4608,27 @@ sysctl_test_mtx_contended SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_lock, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_lck_mtx_test_lock, "I", "lck mtx test lock"); + 0, 0, sysctl_lck_mtx_test_lock, "I", "lck mtx test lock"); -SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_unlock, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_lck_mtx_test_unlock, "I", "lck mtx test unlock"); +SYSCTL_PROC(_kern, OID_AUTO, lck_mtx_test_unlock, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_lck_mtx_test_unlock, "I", "lck mtx test unlock"); -SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED |CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics"); +SYSCTL_PROC(_kern, OID_AUTO, erase_all_test_mtx_stats, CTLFLAG_WR | CTLFLAG_MASKED | CTLFLAG_ANYBODY | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_erase_all_test_mtx_stats, "I", "erase test_mtx statistics"); -SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED| CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics"); +SYSCTL_PROC(_kern, OID_AUTO, get_test_mtx_stats, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_get_test_mtx_stats, "A", "get test_mtx statistics"); SYSCTL_PROC(_kern, OID_AUTO, test_mtx_contended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test"); + 0, 0, sysctl_test_mtx_contended, "A", "get statistics for contended mtx test"); SYSCTL_PROC(_kern, OID_AUTO, test_mtx_uncontended, CTLTYPE_STRING | CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test"); + 0, 0, sysctl_test_mtx_uncontended, "A", "get statistics for uncontended mtx test"); + +extern uint64_t MutexSpin; + +SYSCTL_QUAD(_kern, OID_AUTO, mutex_spin_us, CTLFLAG_RW, &MutexSpin, + "Spin time for acquiring a kernel mutex"); #if defined (__x86_64__) @@ -4467,7 +4648,9 @@ panic_thread_test_child_spin(void * arg, wait_result_t wres) } semaphore_signal(sysctl_test_panic_with_thread_sem); - while (1) { ; } + while (1) { + ; + } } #pragma clang diagnostic pop @@ -4521,6 +4704,6 @@ sysctl_test_panic_with_thread SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, test_panic_with_thread, CTLFLAG_MASKED | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_WR | CTLTYPE_STRING, - 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread"); + 0, 0, sysctl_test_panic_with_thread, "A", "test panic flow for backtracing a different thread"); #endif /* defined (__x86_64__) */ #endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/kern/kern_time.c b/bsd/kern/kern_time.c index ba2e6c990..d1d3c498e 100644 --- a/bsd/kern/kern_time.c +++ b/bsd/kern/kern_time.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -90,17 +90,18 @@ #endif #include #include +#include -#define HZ 100 /* XXX */ +#define HZ 100 /* XXX */ /* simple lock used to access timezone, tz structure */ lck_spin_t * tz_slock; lck_grp_t * tz_slock_grp; lck_attr_t * tz_slock_attr; -lck_grp_attr_t *tz_slock_grp_attr; +lck_grp_attr_t *tz_slock_grp_attr; -static void setthetime( - struct timeval *tv); +static void setthetime( + struct timeval *tv); void time_zone_slock_init(void); static boolean_t timeval_fixusec(struct timeval *t1); @@ -117,9 +118,9 @@ static boolean_t timeval_fixusec(struct timeval *t1); /* ARGSUSED */ int gettimeofday( - struct proc *p, - struct gettimeofday_args *uap, - __unused int32_t *retval) + struct proc *p, + struct gettimeofday_args *uap, + __unused int32_t *retval) { int error = 0; struct timezone ltz; /* local copy */ @@ -179,15 +180,16 @@ settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused i /* Check that this task is entitled to set the time or it is root */ if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT)) { - #if CONFIG_MACF error = mac_system_check_settime(kauth_cred_get()); - if (error) - return (error); + if (error) { + return error; + } #endif #ifndef CONFIG_EMBEDDED - if ((error = suser(kauth_cred_get(), &p->p_acflag))) - return (error); + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { + return error; + } #endif } @@ -204,17 +206,21 @@ settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused i atv.tv_sec = user_atv.tv_sec; atv.tv_usec = user_atv.tv_usec; } - if (error) - return (error); + if (error) { + return error; + } + } + if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz)))) { + return error; } - if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz)))) - return (error); if (uap->tv) { /* only positive values of sec/usec are accepted */ - if (atv.tv_sec < 0 || atv.tv_usec < 0) - return (EPERM); - if (!timeval_fixusec(&atv)) - return (EPERM); + if (atv.tv_sec < 0 || atv.tv_usec < 0) { + return EPERM; + } + if (!timeval_fixusec(&atv)) { + return EPERM; + } setthetime(&atv); } if (uap->tzp) { @@ -222,12 +228,12 @@ settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused i tz = atz; lck_spin_unlock(tz_slock); } - return (0); + return 0; } static void setthetime( - struct timeval *tv) + struct timeval *tv) { clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec); } @@ -238,9 +244,9 @@ setthetime( */ void inittodr( - __unused time_t base) + __unused time_t base) { - struct timeval tv; + struct timeval tv; /* * Assertion: @@ -253,8 +259,8 @@ inittodr( microtime(&tv); if (tv.tv_sec < 0 || tv.tv_usec < 0) { - printf ("WARNING: preposterous time in Real Time Clock"); - tv.tv_sec = 0; /* the UNIX epoch */ + printf("WARNING: preposterous time in Real Time Clock"); + tv.tv_sec = 0; /* the UNIX epoch */ tv.tv_usec = 0; setthetime(&tv); printf(" -- CHECK AND RESET THE DATE!\n"); @@ -264,18 +270,18 @@ inittodr( time_t boottime_sec(void) { - clock_sec_t secs; - clock_nsec_t nanosecs; + clock_sec_t secs; + clock_nsec_t nanosecs; clock_get_boottime_nanotime(&secs, &nanosecs); - return (secs); + return secs; } void boottime_timeval(struct timeval *tv) { - clock_sec_t secs; - clock_usec_t microsecs; + clock_sec_t secs; + clock_usec_t microsecs; clock_get_boottime_microtime(&secs, µsecs); @@ -311,14 +317,14 @@ getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval) { struct itimerval aitv; - if (uap->which > ITIMER_PROF) - return(EINVAL); + if (uap->which > ITIMER_PROF) { + return EINVAL; + } bzero(&aitv, sizeof(aitv)); proc_spinlock(p); switch (uap->which) { - case ITIMER_REAL: /* * If time for real time timer has passed return 0, @@ -327,18 +333,18 @@ getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval) */ aitv = p->p_realtimer; if (timerisset(&p->p_rtime)) { - struct timeval now; + struct timeval now; microuptime(&now); - if (timercmp(&p->p_rtime, &now, <)) + if (timercmp(&p->p_rtime, &now, <)) { timerclear(&aitv.it_value); - else { + } else { aitv.it_value = p->p_rtime; timevalsub(&aitv.it_value, &now); } - } - else + } else { timerclear(&aitv.it_value); + } break; case ITIMER_VIRTUAL: @@ -354,20 +360,20 @@ getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval) if (IS_64BIT_PROCESS(p)) { struct user64_itimerval user_itv; - bzero(&user_itv, sizeof (user_itv)); + bzero(&user_itv, sizeof(user_itv)); user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; user_itv.it_value.tv_sec = aitv.it_value.tv_sec; user_itv.it_value.tv_usec = aitv.it_value.tv_usec; - return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv))); + return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv)); } else { struct user32_itimerval user_itv; - bzero(&user_itv, sizeof (user_itv)); + bzero(&user_itv, sizeof(user_itv)); user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; user_itv.it_value.tv_sec = aitv.it_value.tv_sec; user_itv.it_value.tv_usec = aitv.it_value.tv_usec; - return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv))); + return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv)); } } @@ -388,36 +394,41 @@ setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) bzero(&aitv, sizeof(aitv)); - if (uap->which > ITIMER_PROF) - return (EINVAL); + if (uap->which > ITIMER_PROF) { + return EINVAL; + } if ((itvp = uap->itv)) { if (IS_64BIT_PROCESS(p)) { struct user64_itimerval user_itv; - if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv)))) - return (error); + if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) { + return error; + } aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec; aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec; aitv.it_value.tv_sec = user_itv.it_value.tv_sec; aitv.it_value.tv_usec = user_itv.it_value.tv_usec; - } else { + } else { struct user32_itimerval user_itv; - if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv)))) - return (error); + if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) { + return error; + } aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec; aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec; aitv.it_value.tv_sec = user_itv.it_value.tv_sec; aitv.it_value.tv_usec = user_itv.it_value.tv_usec; } } - if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval))) - return (error); - if (itvp == 0) - return (0); - if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) - return (EINVAL); + if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval))) { + return error; + } + if (itvp == 0) { + return 0; + } + if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval)) { + return EINVAL; + } switch (uap->which) { - case ITIMER_REAL: proc_spinlock(p); if (timerisset(&aitv.it_value)) { @@ -425,13 +436,15 @@ setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) timevaladd(&p->p_rtime, &aitv.it_value); p->p_realtimer = aitv; if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL, - tvtoabstime(&p->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL)) + tvtoabstime(&p->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL)) { p->p_ractive++; - } else { + } + } else { timerclear(&p->p_rtime); p->p_realtimer = aitv; - if (thread_call_cancel(p->p_rcall)) + if (thread_call_cancel(p->p_rcall)) { p->p_ractive--; + } } proc_spinunlock(p); @@ -439,10 +452,11 @@ setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) case ITIMER_VIRTUAL: - if (timerisset(&aitv.it_value)) + if (timerisset(&aitv.it_value)) { task_vtimer_set(p->task, TASK_VTIMER_USER); - else + } else { task_vtimer_clear(p->task, TASK_VTIMER_USER); + } proc_spinlock(p); p->p_vtimer_user = aitv; @@ -450,10 +464,11 @@ setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) break; case ITIMER_PROF: - if (timerisset(&aitv.it_value)) + if (timerisset(&aitv.it_value)) { task_vtimer_set(p->task, TASK_VTIMER_PROF); - else + } else { task_vtimer_clear(p->task, TASK_VTIMER_PROF); + } proc_spinlock(p); p->p_vtimer_prof = aitv; @@ -461,7 +476,7 @@ setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval) break; } - return (0); + return 0; } /* @@ -492,8 +507,9 @@ realitexpire( */ proc_spinunlock(p); - if (r != NULL) + if (r != NULL) { proc_rele(r); + } return; } @@ -537,8 +553,9 @@ realitexpire( if ((p->p_rtime.tv_sec + 2) >= t.tv_sec) { for (;;) { timevaladd(&p->p_rtime, &p->p_realtimer.it_interval); - if (timercmp(&p->p_rtime, &t, >)) + if (timercmp(&p->p_rtime, &t, >)) { break; + } } } else { p->p_rtime = p->p_realtimer.it_interval; @@ -549,7 +566,7 @@ realitexpire( assert(p->p_rcall != NULL); if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL, tvtoabstime(&p->p_rtime), 0, - THREAD_CALL_DELAY_USER_NORMAL)) { + THREAD_CALL_DELAY_USER_NORMAL)) { p->p_ractive++; } @@ -604,11 +621,11 @@ int itimerfix( struct timeval *tv) { - if (tv->tv_sec < 0 || tv->tv_sec > 100000000 || - tv->tv_usec < 0 || tv->tv_usec >= 1000000) - return (EINVAL); - return (0); + tv->tv_usec < 0 || tv->tv_usec >= 1000000) { + return EINVAL; + } + return 0; } int @@ -617,7 +634,7 @@ timespec_is_valid(const struct timespec *ts) /* The INT32_MAX limit ensures the timespec is safe for clock_*() functions * which accept 32-bit ints. */ if (ts->tv_sec < 0 || ts->tv_sec > INT32_MAX || - ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC) { + ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC) { return 0; } return 1; @@ -635,11 +652,10 @@ timespec_is_valid(const struct timespec *ts) */ int itimerdecr(proc_t p, - struct itimerval *itp, int usec) + struct itimerval *itp, int usec) { - proc_spinlock(p); - + if (itp->it_value.tv_usec < usec) { if (itp->it_value.tv_sec == 0) { /* expired, and already in next interval */ @@ -653,23 +669,24 @@ itimerdecr(proc_t p, usec = 0; if (timerisset(&itp->it_value)) { proc_spinunlock(p); - return (1); + return 1; } /* expired, exactly at end of interval */ expire: if (timerisset(&itp->it_interval)) { itp->it_value = itp->it_interval; if (itp->it_value.tv_sec > 0) { - itp->it_value.tv_usec -= usec; - if (itp->it_value.tv_usec < 0) { - itp->it_value.tv_usec += 1000000; - itp->it_value.tv_sec--; + itp->it_value.tv_usec -= usec; + if (itp->it_value.tv_usec < 0) { + itp->it_value.tv_usec += 1000000; + itp->it_value.tv_sec--; } } - } else - itp->it_value.tv_usec = 0; /* sec is already 0 */ + } else { + itp->it_value.tv_usec = 0; /* sec is already 0 */ + } proc_spinunlock(p); - return (0); + return 0; } /* @@ -684,7 +701,6 @@ timevaladd( struct timeval *t1, struct timeval *t2) { - t1->tv_sec += t2->tv_sec; t1->tv_usec += t2->tv_usec; timevalfix(t1); @@ -694,7 +710,6 @@ timevalsub( struct timeval *t1, struct timeval *t2) { - t1->tv_sec -= t2->tv_sec; t1->tv_usec -= t2->tv_usec; timevalfix(t1); @@ -703,7 +718,6 @@ void timevalfix( struct timeval *t1) { - if (t1->tv_usec < 0) { t1->tv_sec--; t1->tv_usec += 1000000; @@ -722,8 +736,9 @@ timeval_fixusec( assert(t1->tv_sec >= 0); if (t1->tv_usec >= 1000000) { - if (os_add_overflow(t1->tv_sec, t1->tv_usec / 1000000, &t1->tv_sec)) + if (os_add_overflow(t1->tv_sec, t1->tv_usec / 1000000, &t1->tv_sec)) { return FALSE; + } t1->tv_usec = t1->tv_usec % 1000000; } @@ -736,10 +751,10 @@ timeval_fixusec( */ void microtime( - struct timeval *tvp) + struct timeval *tvp) { - clock_sec_t tv_sec; - clock_usec_t tv_usec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; clock_get_calendar_microtime(&tv_sec, &tv_usec); @@ -749,10 +764,10 @@ microtime( void microtime_with_abstime( - struct timeval *tvp, uint64_t *abstime) + struct timeval *tvp, uint64_t *abstime) { - clock_sec_t tv_sec; - clock_usec_t tv_usec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; clock_get_calendar_absolute_and_microtime(&tv_sec, &tv_usec, abstime); @@ -762,10 +777,10 @@ microtime_with_abstime( void microuptime( - struct timeval *tvp) + struct timeval *tvp) { - clock_sec_t tv_sec; - clock_usec_t tv_usec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; clock_get_system_microtime(&tv_sec, &tv_usec); @@ -780,8 +795,8 @@ void nanotime( struct timespec *tsp) { - clock_sec_t tv_sec; - clock_nsec_t tv_nsec; + clock_sec_t tv_sec; + clock_nsec_t tv_nsec; clock_get_calendar_nanotime(&tv_sec, &tv_nsec); @@ -793,8 +808,8 @@ void nanouptime( struct timespec *tsp) { - clock_sec_t tv_sec; - clock_nsec_t tv_nsec; + clock_sec_t tv_sec; + clock_nsec_t tv_nsec; clock_get_system_nanotime(&tv_sec, &tv_nsec); @@ -804,16 +819,16 @@ nanouptime( uint64_t tvtoabstime( - struct timeval *tvp) + struct timeval *tvp) { - uint64_t result, usresult; + uint64_t result, usresult; clock_interval_to_absolutetime_interval( - tvp->tv_sec, NSEC_PER_SEC, &result); + tvp->tv_sec, NSEC_PER_SEC, &result); clock_interval_to_absolutetime_interval( - tvp->tv_usec, NSEC_PER_USEC, &usresult); + tvp->tv_usec, NSEC_PER_USEC, &usresult); - return (result + usresult); + return result + usresult; } uint64_t @@ -849,7 +864,7 @@ ratecheck(struct timeval *lasttime, const struct timeval *mininterval) rv = 1; } - return (rv); + return rv; } /* @@ -878,17 +893,19 @@ ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) *lasttime = tv; *curpps = 0; rv = 1; - } else if (maxpps < 0) + } else if (maxpps < 0) { rv = 1; - else if (*curpps < maxpps) + } else if (*curpps < maxpps) { rv = 1; - else + } else { rv = 0; + } #if 1 /* DIAGNOSTIC? */ /* be careful about wrap-around */ - if (*curpps + 1 > 0) + if (*curpps + 1 > 0) { *curpps = *curpps + 1; + } #else /* * assume that there's not too many calls to this function. @@ -900,7 +917,7 @@ ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) *curpps = *curpps + 1; #endif - return (rv); + return rv; } #endif /* NETWORKING */ @@ -919,3 +936,9 @@ time_zone_slock_init(void) tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr); } +int +__mach_bridge_remote_time(__unused struct proc *p, struct __mach_bridge_remote_time_args *mbrt_args, uint64_t *retval) +{ + *retval = mach_bridge_remote_time(mbrt_args->local_timestamp); + return 0; +} diff --git a/bsd/kern/kern_xxx.c b/bsd/kern/kern_xxx.c index 889ccd6c7..386d06971 100644 --- a/bsd/kern/kern_xxx.c +++ b/bsd/kern/kern_xxx.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -92,8 +92,8 @@ int reboot(struct proc *p, struct reboot_args *uap, __unused int32_t *retval) { char message[256]; - int error=0; - size_t dummy=0; + int error = 0; + size_t dummy = 0; #if CONFIG_MACF kauth_cred_t my_cred; #endif @@ -105,31 +105,34 @@ reboot(struct proc *p, struct reboot_args *uap, __unused int32_t *retval) if ((error = suser(kauth_cred_get(), &p->p_acflag))) { #if (DEVELOPMENT || DEBUG) /* allow non-root user to call panic on dev/debug kernels */ - if (!(uap->opt & RB_PANIC)) + if (!(uap->opt & RB_PANIC)) { return error; + } #else return error; #endif } - if (uap->opt & RB_COMMAND) - return ENOSYS; + if (uap->opt & RB_COMMAND) { + return ENOSYS; + } - if (uap->opt & RB_PANIC && uap->msg != USER_ADDR_NULL) { + if (uap->opt & RB_PANIC && uap->msg != USER_ADDR_NULL) { if (copyinstr(uap->msg, (void *)message, sizeof(message), (size_t *)&dummy)) { - strncpy(message, "user space RB_PANIC message copyin failed", sizeof(message)-1); + strncpy(message, "user space RB_PANIC message copyin failed", sizeof(message) - 1); } - } + } #if CONFIG_MACF #if (DEVELOPMENT || DEBUG) - if (uap->opt & RB_PANIC) { + if (uap->opt & RB_PANIC) { /* on dev/debug kernels: allow anyone to call panic */ goto skip_cred_check; } #endif - if (error) - return (error); + if (error) { + return error; + } my_cred = kauth_cred_proc_ref(p); error = mac_system_check_reboot(my_cred, uap->opt); kauth_cred_unref(&my_cred); @@ -153,8 +156,9 @@ usrctl(struct proc *p, __unused struct usrctl_args *uap, __unused int32_t *retva int error = 0; error = pshm_cache_purge_all(p); - if (error) + if (error) { return error; + } error = psem_cache_purge_all(p); return error; diff --git a/bsd/kern/kpi_mbuf.c b/bsd/kern/kpi_mbuf.c index 2d8e97be2..7fb3d23d1 100644 --- a/bsd/kern/kpi_mbuf.c +++ b/bsd/kern/kpi_mbuf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2004-2018 Apple Inc. All rights reserved. + * Copyright (c) 2004-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#define __KPI__ +#define __KPI__ #include #include @@ -50,9 +50,9 @@ static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR | /* Unalterable mbuf flags */ static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT); -#define MAX_MBUF_TX_COMPL_FUNC 32 +#define MAX_MBUF_TX_COMPL_FUNC 32 mbuf_tx_compl_func -mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC]; + mbuf_tx_compl_table[MAX_MBUF_TX_COMPL_FUNC]; extern lck_rw_t *mbuf_tx_compl_tbl_lock; u_int32_t mbuf_tx_compl_index = 0; @@ -77,43 +77,47 @@ SYSCTL_QUAD(_kern_ipc_mbtxcf, OID_AUTO, aborted, void * mbuf_data(mbuf_t mbuf) { - return (mbuf->m_data); + return mbuf->m_data; } void * mbuf_datastart(mbuf_t mbuf) { - if (mbuf->m_flags & M_EXT) - return (mbuf->m_ext.ext_buf); - if (mbuf->m_flags & M_PKTHDR) - return (mbuf->m_pktdat); - return (mbuf->m_dat); + if (mbuf->m_flags & M_EXT) { + return mbuf->m_ext.ext_buf; + } + if (mbuf->m_flags & M_PKTHDR) { + return mbuf->m_pktdat; + } + return mbuf->m_dat; } errno_t mbuf_setdata(mbuf_t mbuf, void *data, size_t len) { - size_t start = (size_t)((char *)mbuf_datastart(mbuf)); - size_t maxlen = mbuf_maxlen(mbuf); + size_t start = (size_t)((char *)mbuf_datastart(mbuf)); + size_t maxlen = mbuf_maxlen(mbuf); - if ((size_t)data < start || ((size_t)data) + len > start + maxlen) - return (EINVAL); + if ((size_t)data < start || ((size_t)data) + len > start + maxlen) { + return EINVAL; + } mbuf->m_data = data; mbuf->m_len = len; - return (0); + return 0; } errno_t mbuf_align_32(mbuf_t mbuf, size_t len) { - if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) - return (ENOTSUP); + if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf)) { + return ENOTSUP; + } mbuf->m_data = mbuf_datastart(mbuf); mbuf->m_data += - ((mbuf_trailingspace(mbuf) - len) &~ (sizeof(u_int32_t) - 1)); + ((mbuf_trailingspace(mbuf) - len) & ~(sizeof(u_int32_t) - 1)); - return (0); + return 0; } /* @@ -124,7 +128,7 @@ mbuf_align_32(mbuf_t mbuf, size_t len) addr64_t mbuf_data_to_physical(void *ptr) { - return ((addr64_t)mcl_to_paddr(ptr)); + return (addr64_t)mcl_to_paddr(ptr); } errno_t @@ -133,7 +137,7 @@ mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf) /* Must set *mbuf to NULL in failure case */ *mbuf = m_get(how, type); - return (*mbuf == NULL ? ENOMEM : 0); + return *mbuf == NULL ? ENOMEM : 0; } errno_t @@ -142,7 +146,7 @@ mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf) /* Must set *mbuf to NULL in failure case */ *mbuf = m_gethdr(how, type); - return (*mbuf == NULL ? ENOMEM : 0); + return *mbuf == NULL ? ENOMEM : 0; } errno_t @@ -150,14 +154,16 @@ mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf, caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t), size_t extsize, caddr_t extarg) { - if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) - return (EINVAL); + if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0) { + return EINVAL; + } if ((*mbuf = m_clattach(*mbuf, type, extbuf, - extfree, extsize, extarg, how, 0)) == NULL) - return (ENOMEM); + extfree, extsize, extarg, how, 0)) == NULL) { + return ENOMEM; + } - return (0); + return 0; } errno_t @@ -167,115 +173,128 @@ mbuf_ring_cluster_alloc(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf, caddr_t extbuf = NULL; errno_t err; - if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) - return (EINVAL); + if (mbuf == NULL || extfree == NULL || size == NULL || *size == 0) { + return EINVAL; + } - if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) - return (err); + if ((err = mbuf_alloccluster(how, size, &extbuf)) != 0) { + return err; + } if ((*mbuf = m_clattach(*mbuf, type, extbuf, extfree, *size, NULL, how, 1)) == NULL) { mbuf_freecluster(extbuf, *size); - return (ENOMEM); + return ENOMEM; } - return (0); + return 0; } int mbuf_ring_cluster_is_active(mbuf_t mbuf) { - return (m_ext_paired_is_active(mbuf)); + return m_ext_paired_is_active(mbuf); } errno_t mbuf_ring_cluster_activate(mbuf_t mbuf) { - if (mbuf_ring_cluster_is_active(mbuf)) - return (EBUSY); + if (mbuf_ring_cluster_is_active(mbuf)) { + return EBUSY; + } m_ext_paired_activate(mbuf); - return (0); + return 0; } errno_t mbuf_cluster_set_prop(mbuf_t mbuf, u_int32_t oldprop, u_int32_t newprop) { - if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) - return (EINVAL); + if (mbuf == NULL || !(mbuf->m_flags & M_EXT)) { + return EINVAL; + } - return (m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY); + return m_ext_set_prop(mbuf, oldprop, newprop) ? 0 : EBUSY; } errno_t mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop) { - if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) - return (EINVAL); + if (mbuf == NULL || prop == NULL || !(mbuf->m_flags & M_EXT)) { + return EINVAL; + } *prop = m_ext_get_prop(mbuf); - return (0); + return 0; } errno_t mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr) { - if (size == NULL || *size == 0 || addr == NULL) - return (EINVAL); + if (size == NULL || *size == 0 || addr == NULL) { + return EINVAL; + } *addr = NULL; /* Jumbo cluster pool not available? */ - if (*size > MBIGCLBYTES && njcl == 0) - return (ENOTSUP); + if (*size > MBIGCLBYTES && njcl == 0) { + return ENOTSUP; + } - if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) + if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL) { *size = MCLBYTES; - else if (*size > MCLBYTES && *size <= MBIGCLBYTES && - (*addr = m_bigalloc(how)) != NULL) + } else if (*size > MCLBYTES && *size <= MBIGCLBYTES && + (*addr = m_bigalloc(how)) != NULL) { *size = MBIGCLBYTES; - else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES && - (*addr = m_16kalloc(how)) != NULL) + } else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES && + (*addr = m_16kalloc(how)) != NULL) { *size = M16KCLBYTES; - else + } else { *size = 0; + } - if (*addr == NULL) - return (ENOMEM); + if (*addr == NULL) { + return ENOMEM; + } - return (0); + return 0; } void mbuf_freecluster(caddr_t addr, size_t size) { - if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) + if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES) { panic("%s: invalid size (%ld) for cluster %p", __func__, size, (void *)addr); + } - if (size == MCLBYTES) + if (size == MCLBYTES) { m_mclfree(addr); - else if (size == MBIGCLBYTES) + } else if (size == MBIGCLBYTES) { m_bigfree(addr, MBIGCLBYTES, NULL); - else if (njcl > 0) + } else if (njcl > 0) { m_16kfree(addr, M16KCLBYTES, NULL); - else + } else { panic("%s: freeing jumbo cluster to an empty pool", __func__); + } } errno_t mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf) { /* Must set *mbuf to NULL in failure case */ - errno_t error = 0; - int created = 0; + errno_t error = 0; + int created = 0; - if (mbuf == NULL) - return (EINVAL); + if (mbuf == NULL) { + return EINVAL; + } if (*mbuf == NULL) { *mbuf = m_get(how, type); - if (*mbuf == NULL) - return (ENOMEM); + if (*mbuf == NULL) { + return ENOMEM; + } created = 1; } /* @@ -298,28 +317,31 @@ mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf) error = EINVAL; goto out; } - if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) + if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) { error = ENOMEM; + } out: if (created && error != 0) { mbuf_free(*mbuf); *mbuf = NULL; } - return (error); + return error; } errno_t mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf) { /* Must set *mbuf to NULL in failure case */ - errno_t error = 0; - int created = 0; - if (mbuf == NULL) - return (EINVAL); + errno_t error = 0; + int created = 0; + if (mbuf == NULL) { + return EINVAL; + } if (*mbuf == NULL) { error = mbuf_get(how, type, mbuf); - if (error) - return (error); + if (error) { + return error; + } created = 1; } @@ -333,9 +355,10 @@ mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf) mbuf_free(*mbuf); *mbuf = NULL; } - if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) + if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0) { error = ENOMEM; - return (error); + } + return error; } @@ -343,18 +366,19 @@ errno_t mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf) { /* Must set *mbuf to NULL in failure case */ - errno_t error = 0; + errno_t error = 0; *mbuf = m_getpacket_how(how); if (*mbuf == NULL) { - if (how == MBUF_WAITOK) + if (how == MBUF_WAITOK) { error = ENOMEM; - else + } else { error = EWOULDBLOCK; + } } - return (error); + return error; } /* @@ -364,7 +388,7 @@ mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf) mbuf_t mbuf_free(mbuf_t mbuf) { - return (m_free(mbuf)); + return m_free(mbuf); } /* @@ -380,13 +404,13 @@ mbuf_freem(mbuf_t mbuf) int mbuf_freem_list(mbuf_t mbuf) { - return (m_freem_list(mbuf)); + return m_freem_list(mbuf); } size_t mbuf_leadingspace(const mbuf_t mbuf) { - return (M_LEADINGSPACE(mbuf)); + return M_LEADINGSPACE(mbuf); } /* @@ -397,7 +421,7 @@ mbuf_leadingspace(const mbuf_t mbuf) size_t mbuf_trailingspace(const mbuf_t mbuf) { - return (M_TRAILINGSPACE(mbuf)); + return M_TRAILINGSPACE(mbuf); } /* Manipulation */ @@ -408,7 +432,7 @@ mbuf_copym(const mbuf_t src, size_t offset, size_t len, /* Must set *mbuf to NULL in failure case */ *new_mbuf = m_copym(src, offset, len, how); - return (*new_mbuf == NULL ? ENOMEM : 0); + return *new_mbuf == NULL ? ENOMEM : 0; } errno_t @@ -417,7 +441,7 @@ mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf) /* Must set *new_mbuf to NULL in failure case */ *new_mbuf = m_dup(src, how); - return (*new_mbuf == NULL ? ENOMEM : 0); + return *new_mbuf == NULL ? ENOMEM : 0; } errno_t @@ -426,17 +450,17 @@ mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how) /* Must set *orig to NULL in failure case */ *orig = m_prepend_2(*orig, len, how, 0); - return (*orig == NULL ? ENOMEM : 0); + return *orig == NULL ? ENOMEM : 0; } errno_t mbuf_split(mbuf_t src, size_t offset, - mbuf_how_t how, mbuf_t *new_mbuf) + mbuf_how_t how, mbuf_t *new_mbuf) { /* Must set *new_mbuf to NULL in failure case */ *new_mbuf = m_split(src, offset, how); - return (*new_mbuf == NULL ? ENOMEM : 0); + return *new_mbuf == NULL ? ENOMEM : 0; } errno_t @@ -445,7 +469,7 @@ mbuf_pullup(mbuf_t *mbuf, size_t len) /* Must set *mbuf to NULL in failure case */ *mbuf = m_pullup(*mbuf, len); - return (*mbuf == NULL ? ENOMEM : 0); + return *mbuf == NULL ? ENOMEM : 0; } errno_t @@ -456,7 +480,7 @@ mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location) *location = m_pulldown(src, *offset, len, &new_offset); *offset = new_offset; - return (*location == NULL ? ENOMEM : 0); + return *location == NULL ? ENOMEM : 0; } /* @@ -477,45 +501,50 @@ mbuf_adjustlen(mbuf_t m, int amount) int used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) + m->m_len; - if ((size_t)(amount + used) > mbuf_maxlen(m)) - return (EINVAL); + if ((size_t)(amount + used) > mbuf_maxlen(m)) { + return EINVAL; + } } else if (-amount > m->m_len) { - return (EINVAL); + return EINVAL; } m->m_len += amount; - return (0); + return 0; } mbuf_t mbuf_concatenate(mbuf_t dst, mbuf_t src) { - if (dst == NULL) - return (NULL); + if (dst == NULL) { + return NULL; + } m_cat(dst, src); /* return dst as is in the current implementation */ - return (dst); + return dst; } errno_t mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data) { /* Copied m_copydata, added error handling (don't just panic) */ int count; - mbuf_t m = m0; + mbuf_t m = m0; while (off > 0) { - if (m == 0) - return (EINVAL); - if (off < (size_t)m->m_len) + if (m == 0) { + return EINVAL; + } + if (off < (size_t)m->m_len) { break; + } off -= m->m_len; m = m->m_next; } while (len > 0) { - if (m == 0) - return (EINVAL); + if (m == 0) { + return EINVAL; + } count = m->m_len - off > len ? len : m->m_len - off; bcopy(mtod(m, caddr_t) + off, out_data, count); len -= count; @@ -524,16 +553,17 @@ mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void *out_data) m = m->m_next; } - return (0); + return 0; } int mbuf_mclhasreference(mbuf_t mbuf) { - if ((mbuf->m_flags & M_EXT)) - return (m_mclhasreference(mbuf)); - else - return (0); + if ((mbuf->m_flags & M_EXT)) { + return m_mclhasreference(mbuf); + } else { + return 0; + } } @@ -541,24 +571,25 @@ mbuf_mclhasreference(mbuf_t mbuf) mbuf_t mbuf_next(const mbuf_t mbuf) { - return (mbuf->m_next); + return mbuf->m_next; } errno_t mbuf_setnext(mbuf_t mbuf, mbuf_t next) { if (next && ((next)->m_nextpkt != NULL || - (next)->m_type == MT_FREE)) - return (EINVAL); + (next)->m_type == MT_FREE)) { + return EINVAL; + } mbuf->m_next = next; - return (0); + return 0; } mbuf_t mbuf_nextpkt(const mbuf_t mbuf) { - return (mbuf->m_nextpkt); + return mbuf->m_nextpkt; } void @@ -570,7 +601,7 @@ mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt) size_t mbuf_len(const mbuf_t mbuf) { - return (mbuf->m_len); + return mbuf->m_len; } void @@ -582,32 +613,34 @@ mbuf_setlen(mbuf_t mbuf, size_t len) size_t mbuf_maxlen(const mbuf_t mbuf) { - if (mbuf->m_flags & M_EXT) - return (mbuf->m_ext.ext_size); - return (&mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf))); + if (mbuf->m_flags & M_EXT) { + return mbuf->m_ext.ext_size; + } + return &mbuf->m_dat[MLEN] - ((char *)mbuf_datastart(mbuf)); } mbuf_type_t mbuf_type(const mbuf_t mbuf) { - return (mbuf->m_type); + return mbuf->m_type; } errno_t mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type) { - if (new_type == MBUF_TYPE_FREE) - return (EINVAL); + if (new_type == MBUF_TYPE_FREE) { + return EINVAL; + } m_mchtype(mbuf, new_type); - return (0); + return 0; } mbuf_flags_t mbuf_flags(const mbuf_t mbuf) { - return (mbuf->m_flags & mbuf_flags_mask); + return mbuf->m_flags & mbuf_flags_mask; } errno_t @@ -637,13 +670,13 @@ mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags) * bit, as well as the rest of bookkeeping. */ if ((oflags ^ mbuf->m_flags) & M_PKTHDR) { - mbuf->m_flags ^= M_PKTHDR; /* restore */ + mbuf->m_flags ^= M_PKTHDR; /* restore */ ret = m_reinit(mbuf, (mbuf->m_flags & M_PKTHDR) ? 0 : 1); } } - return (ret); + return ret; } errno_t @@ -662,30 +695,43 @@ mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask) * bit, as well as the rest of bookkeeping. */ if ((oflags ^ mbuf->m_flags) & M_PKTHDR) { - mbuf->m_flags ^= M_PKTHDR; /* restore */ + mbuf->m_flags ^= M_PKTHDR; /* restore */ ret = m_reinit(mbuf, (mbuf->m_flags & M_PKTHDR) ? 0 : 1); } } - return (ret); + return ret; } errno_t mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src) { - if (((src)->m_flags & M_PKTHDR) == 0) - return (EINVAL); + if (((src)->m_flags & M_PKTHDR) == 0) { + return EINVAL; + } m_copy_pkthdr(dest, src); - return (0); + return 0; } size_t mbuf_pkthdr_len(const mbuf_t mbuf) { - return (mbuf->m_pkthdr.len); + if (((mbuf)->m_flags & M_PKTHDR) == 0) { + return 0; + } + /* + * While we Assert for development or debug builds, + * also make sure we never return negative length + * for release build. + */ + ASSERT(mbuf->m_pkthdr.len >= 0); + if (mbuf->m_pkthdr.len < 0) { + return 0; + } + return mbuf->m_pkthdr.len; } __private_extern__ size_t @@ -698,12 +744,16 @@ mbuf_pkthdr_maxlen(mbuf_t m) maxlen += mbuf_maxlen(n); n = mbuf_next(n); } - return (maxlen); + return maxlen; } void mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len) { + if (len > INT32_MAX) { + len = INT32_MAX; + } + mbuf->m_pkthdr.len = len; } @@ -720,7 +770,7 @@ mbuf_pkthdr_rcvif(const mbuf_t mbuf) * If we reference count ifnets, we should take a reference here * before returning */ - return (mbuf->m_pkthdr.rcvif); + return mbuf->m_pkthdr.rcvif; } errno_t @@ -728,13 +778,13 @@ mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet) { /* May want to walk ifnet list to determine if interface is valid */ mbuf->m_pkthdr.rcvif = (struct ifnet *)ifnet; - return (0); + return 0; } void* mbuf_pkthdr_header(const mbuf_t mbuf) { - return (mbuf->m_pkthdr.pkt_hdr); + return mbuf->m_pkthdr.pkt_hdr; } void @@ -783,7 +833,7 @@ mbuf_set_vlan_tag( mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID; mbuf->m_pkthdr.vlan_tag = vlan; - return (0); + return 0; } errno_t @@ -791,12 +841,12 @@ mbuf_get_vlan_tag( mbuf_t mbuf, u_int16_t *vlan) { - if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) - return (ENXIO); // No vlan tag set - + if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) { + return ENXIO; // No vlan tag set + } *vlan = mbuf->m_pkthdr.vlan_tag; - return (0); + return 0; } errno_t @@ -806,7 +856,7 @@ mbuf_clear_vlan_tag( mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID; mbuf->m_pkthdr.vlan_tag = 0; - return (0); + return 0; } static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags = @@ -824,11 +874,11 @@ mbuf_set_csum_requested( (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request; mbuf->m_pkthdr.csum_data = value; - return (0); + return 0; } static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags = - MBUF_TSO_IPV4 | MBUF_TSO_IPV6; + MBUF_TSO_IPV4 | MBUF_TSO_IPV6; errno_t mbuf_get_tso_requested( @@ -837,15 +887,17 @@ mbuf_get_tso_requested( u_int32_t *value) { if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || - request == NULL || value == NULL) - return (EINVAL); + request == NULL || value == NULL) { + return EINVAL; + } *request = mbuf->m_pkthdr.csum_flags; *request &= mbuf_valid_tso_request_flags; - if (*request && value != NULL) + if (*request && value != NULL) { *value = mbuf->m_pkthdr.tso_segsz; + } - return (0); + return 0; } errno_t @@ -860,7 +912,7 @@ mbuf_get_csum_requested( *value = mbuf->m_pkthdr.csum_data; } - return (0); + return 0; } errno_t @@ -870,12 +922,12 @@ mbuf_clear_csum_requested( mbuf->m_pkthdr.csum_flags &= 0xffff0000; mbuf->m_pkthdr.csum_data = 0; - return (0); + return 0; } static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags = - MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA | - MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL; + MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA | + MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL; errno_t mbuf_set_csum_performed( @@ -888,7 +940,7 @@ mbuf_set_csum_performed( (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed; mbuf->m_pkthdr.csum_data = value; - return (0); + return 0; } errno_t @@ -901,7 +953,7 @@ mbuf_get_csum_performed( mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags; *value = mbuf->m_pkthdr.csum_data; - return (0); + return 0; } errno_t @@ -911,7 +963,7 @@ mbuf_clear_csum_performed( mbuf->m_pkthdr.csum_flags &= 0xffff0000; mbuf->m_pkthdr.csum_data = 0; - return (0); + return 0; } errno_t @@ -919,11 +971,12 @@ mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, u_int16_t *csum) { if (mbuf == NULL || length == 0 || csum == NULL || - (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) - return (EINVAL); + (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) { + return EINVAL; + } *csum = inet_cksum(mbuf, protocol, offset, length); - return (0); + return 0; } #if INET6 @@ -932,28 +985,29 @@ mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, u_int16_t *csum) { if (mbuf == NULL || length == 0 || csum == NULL || - (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) - return (EINVAL); + (u_int32_t)mbuf->m_pkthdr.len < (offset + length)) { + return EINVAL; + } *csum = inet6_cksum(mbuf, protocol, offset, length); - return (0); + return 0; } #else /* INET6 */ errno_t mbuf_inet6_cksum(__unused mbuf_t mbuf, __unused int protocol, - __unused u_int32_t offset, __unused u_int32_t length, - __unused u_int16_t *csum) + __unused u_int32_t offset, __unused u_int32_t length, + __unused u_int16_t *csum) { panic("mbuf_inet6_cksum() doesn't exist on this platform\n"); - return (0); + return 0; } u_int16_t inet6_cksum(__unused struct mbuf *m, __unused unsigned int nxt, - __unused unsigned int off, __unused unsigned int len) + __unused unsigned int off, __unused unsigned int len) { panic("inet6_cksum() doesn't exist on this platform\n"); - return (0); + return 0; } void nd6_lookup_ipv6(void); @@ -967,7 +1021,7 @@ int in6addr_local(__unused struct in6_addr *a) { panic("in6addr_local() doesn't exist on this platform\n"); - return (0); + return 0; } void nd6_storelladdr(void); @@ -982,30 +1036,31 @@ nd6_storelladdr(void) * Mbuf tag KPIs */ -#define MTAG_FIRST_ID FIRST_KPI_STR_ID +#define MTAG_FIRST_ID FIRST_KPI_STR_ID errno_t mbuf_tag_id_find( - const char *string, - mbuf_tag_id_t *out_id) + const char *string, + mbuf_tag_id_t *out_id) { - return (net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1)); + return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1); } errno_t mbuf_tag_allocate( - mbuf_t mbuf, - mbuf_tag_id_t id, - mbuf_tag_type_t type, - size_t length, - mbuf_how_t how, - void** data_p) + mbuf_t mbuf, + mbuf_tag_id_t id, + mbuf_tag_type_t type, + size_t length, + mbuf_how_t how, + void** data_p) { struct m_tag *tag; u_int32_t mtag_id_first, mtag_id_last; - if (data_p != NULL) + if (data_p != NULL) { *data_p = NULL; + } /* Sanity check parameters */ (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, @@ -1013,26 +1068,26 @@ mbuf_tag_allocate( if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < mtag_id_first || id > mtag_id_last || length < 1 || (length & 0xffff0000) != 0 || data_p == NULL) { - return (EINVAL); + return EINVAL; } /* Make sure this mtag hasn't already been allocated */ tag = m_tag_locate(mbuf, id, type, NULL); if (tag != NULL) { - return (EEXIST); + return EEXIST; } /* Allocate an mtag */ tag = m_tag_create(id, type, length, how, mbuf); if (tag == NULL) { - return (how == M_WAITOK ? ENOMEM : EWOULDBLOCK); + return how == M_WAITOK ? ENOMEM : EWOULDBLOCK; } /* Attach the mtag and set *data_p */ m_tag_prepend(mbuf, tag); *data_p = tag + 1; - return (0); + return 0; } errno_t @@ -1046,10 +1101,12 @@ mbuf_tag_find( struct m_tag *tag; u_int32_t mtag_id_first, mtag_id_last; - if (length != NULL) + if (length != NULL) { *length = 0; - if (data_p != NULL) + } + if (data_p != NULL) { *data_p = NULL; + } /* Sanity check parameters */ (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, @@ -1057,27 +1114,27 @@ mbuf_tag_find( if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < mtag_id_first || id > mtag_id_last || length == NULL || data_p == NULL) { - return (EINVAL); + return EINVAL; } /* Locate an mtag */ tag = m_tag_locate(mbuf, id, type, NULL); if (tag == NULL) { - return (ENOENT); + return ENOENT; } /* Copy out the pointer to the data and the lenght value */ *length = tag->m_tag_len; *data_p = tag + 1; - return (0); + return 0; } void mbuf_tag_free( - mbuf_t mbuf, - mbuf_tag_id_t id, - mbuf_tag_type_t type) + mbuf_t mbuf, + mbuf_tag_id_t id, + mbuf_tag_type_t type) { struct m_tag *tag; u_int32_t mtag_id_first, mtag_id_last; @@ -1086,8 +1143,9 @@ mbuf_tag_free( (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, NSI_MBUF_TAG); if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || - id < mtag_id_first || id > mtag_id_last) + id < mtag_id_first || id > mtag_id_last) { return; + } tag = m_tag_locate(mbuf, id, type, NULL); if (tag == NULL) { @@ -1103,8 +1161,8 @@ mbuf_tag_free( * the nearest 64-bit boundary. This takes into account mbuf * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs. */ -#define MBUF_DRVAUX_MAXLEN \ - P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \ +#define MBUF_DRVAUX_MAXLEN \ + P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \ M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t)) errno_t @@ -1115,21 +1173,25 @@ mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family, struct m_tag *tag; if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || - length == 0 || length > MBUF_DRVAUX_MAXLEN) - return (EINVAL); + length == 0 || length > MBUF_DRVAUX_MAXLEN) { + return EINVAL; + } - if (data_p != NULL) + if (data_p != NULL) { *data_p = NULL; + } /* Check if one is already associated */ if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) - return (EEXIST); + KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) { + return EEXIST; + } /* Tag is (m_drvaux_tag + module specific data) */ if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX, - sizeof (*p) + length, how, mbuf)) == NULL) - return ((how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK); + sizeof(*p) + length, how, mbuf)) == NULL) { + return (how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK; + } p = (struct m_drvaux_tag *)(tag + 1); p->da_family = family; @@ -1139,10 +1201,11 @@ mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family, /* Associate the tag */ m_tag_prepend(mbuf, tag); - if (data_p != NULL) + if (data_p != NULL) { *data_p = (p + 1); + } - return (0); + return 0; } errno_t @@ -1152,31 +1215,36 @@ mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p, struct m_drvaux_tag *p; struct m_tag *tag; - if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) - return (EINVAL); + if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL) { + return EINVAL; + } *data_p = NULL; if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL) - return (ENOENT); + KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL) { + return ENOENT; + } /* Must be at least size of m_drvaux_tag */ - VERIFY(tag->m_tag_len >= sizeof (*p)); + VERIFY(tag->m_tag_len >= sizeof(*p)); p = (struct m_drvaux_tag *)(tag + 1); VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN); - if (family_p != NULL) + if (family_p != NULL) { *family_p = p->da_family; - if (subfamily_p != NULL) + } + if (subfamily_p != NULL) { *subfamily_p = p->da_subfamily; - if (length_p != NULL) + } + if (length_p != NULL) { *length_p = p->da_length; + } *data_p = (p + 1); - return (0); + return 0; } void @@ -1184,12 +1252,14 @@ mbuf_del_drvaux(mbuf_t mbuf) { struct m_tag *tag; - if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) + if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR)) { return; + } if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) + KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL) { m_tag_delete(mbuf, tag); + } } /* mbuf stats */ @@ -1231,18 +1301,20 @@ mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks, m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0); if (m == 0) { - if (maxchunks && *maxchunks && numchunks > *maxchunks) + if (maxchunks && *maxchunks && numchunks > *maxchunks) { error = ENOBUFS; - else + } else { error = ENOMEM; + } } else { - if (maxchunks) + if (maxchunks) { *maxchunks = numchunks; + } error = 0; *mbuf = m; } out: - return (error); + return error; } errno_t @@ -1264,18 +1336,20 @@ mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen, m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0); if (m == 0) { - if (maxchunks && *maxchunks && numchunks > *maxchunks) + if (maxchunks && *maxchunks && numchunks > *maxchunks) { error = ENOBUFS; - else + } else { error = ENOMEM; + } } else { - if (maxchunks) + if (maxchunks) { *maxchunks = numchunks; + } error = 0; *mbuf = m; } out: - return (error); + return error; } __private_extern__ size_t @@ -1288,7 +1362,7 @@ mbuf_pkt_list_len(mbuf_t m) len += mbuf_pkthdr_len(n); n = mbuf_nextpkt(n); } - return (len); + return len; } __private_extern__ size_t @@ -1301,7 +1375,7 @@ mbuf_pkt_list_maxlen(mbuf_t m) maxlen += mbuf_pkthdr_maxlen(n); n = mbuf_nextpkt(n); } - return (maxlen); + return maxlen; } /* @@ -1313,21 +1387,22 @@ mbuf_pkt_list_maxlen(mbuf_t m) */ errno_t mbuf_copyback( - mbuf_t m, - size_t off, - size_t len, - const void *data, - mbuf_how_t how) -{ - size_t mlen; - mbuf_t m_start = m; - mbuf_t n; - int totlen = 0; - errno_t result = 0; - const char *cp = data; - - if (m == NULL || len == 0 || data == NULL) - return (EINVAL); + mbuf_t m, + size_t off, + size_t len, + const void *data, + mbuf_how_t how) +{ + size_t mlen; + mbuf_t m_start = m; + mbuf_t n; + int totlen = 0; + errno_t result = 0; + const char *cp = data; + + if (m == NULL || len == 0 || data == NULL) { + return EINVAL; + } while (off > (mlen = m->m_len)) { off -= mlen; @@ -1348,7 +1423,7 @@ mbuf_copyback( mlen = MIN(m->m_len - off, len); if (mlen < len && m->m_next == NULL && mbuf_trailingspace(m) > 0) { - size_t grow = MIN(mbuf_trailingspace(m), len - mlen); + size_t grow = MIN(mbuf_trailingspace(m), len - mlen); mlen += grow; m->m_len += grow; } @@ -1358,8 +1433,9 @@ mbuf_copyback( mlen += off; off = 0; totlen += mlen; - if (len == 0) + if (len == 0) { break; + } if (m->m_next == 0) { n = m_get(how, m->m_type); if (n == NULL) { @@ -1380,107 +1456,115 @@ mbuf_copyback( } out: - if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) + if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen)) { m_start->m_pkthdr.len = totlen; + } - return (result); + return result; } u_int32_t mbuf_get_mlen(void) { - return (_MLEN); + return _MLEN; } u_int32_t mbuf_get_mhlen(void) { - return (_MHLEN); + return _MHLEN; } u_int32_t mbuf_get_minclsize(void) { - return (MHLEN + MLEN); + return MHLEN + MLEN; } u_int32_t mbuf_get_traffic_class_max_count(void) { - return (MBUF_TC_MAX); + return MBUF_TC_MAX; } errno_t mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index) { - if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) - return (EINVAL); + if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX) { + return EINVAL; + } *index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc))); - return (0); + return 0; } mbuf_traffic_class_t mbuf_get_traffic_class(mbuf_t m) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (MBUF_TC_BE); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return MBUF_TC_BE; + } - return (m_get_traffic_class(m)); + return m_get_traffic_class(m); } errno_t mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc) { if (m == NULL || !(m->m_flags & M_PKTHDR) || - ((u_int32_t)tc >= MBUF_TC_MAX)) - return (EINVAL); + ((u_int32_t)tc >= MBUF_TC_MAX)) { + return EINVAL; + } - return (m_set_traffic_class(m, tc)); + return m_set_traffic_class(m, tc); } int mbuf_is_traffic_class_privileged(mbuf_t m) { if (m == NULL || !(m->m_flags & M_PKTHDR) || - !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) - return (0); + !MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) { + return 0; + } - return ((m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0); + return (m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0; } u_int32_t mbuf_get_service_class_max_count(void) { - return (MBUF_SC_MAX_CLASSES); + return MBUF_SC_MAX_CLASSES; } errno_t mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index) { - if (index == NULL || !MBUF_VALID_SC(sc)) - return (EINVAL); + if (index == NULL || !MBUF_VALID_SC(sc)) { + return EINVAL; + } *index = MBUF_SCIDX(sc); - return (0); + return 0; } mbuf_svc_class_t mbuf_get_service_class(mbuf_t m) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (MBUF_SC_BE); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return MBUF_SC_BE; + } - return (m_get_service_class(m)); + return m_get_service_class(m); } errno_t mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } - return (m_set_service_class(m, sc)); + return m_set_service_class(m, sc); } errno_t @@ -1488,110 +1572,125 @@ mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp) { u_int32_t flags; - if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL) { + return EINVAL; + } *flagsp = 0; flags = m->m_pkthdr.pkt_flags; - if ((flags & (PKTF_INET_RESOLVE|PKTF_RESOLVE_RTR)) == - (PKTF_INET_RESOLVE|PKTF_RESOLVE_RTR)) + if ((flags & (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) == + (PKTF_INET_RESOLVE | PKTF_RESOLVE_RTR)) { *flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR; - if ((flags & (PKTF_INET6_RESOLVE|PKTF_RESOLVE_RTR)) == - (PKTF_INET6_RESOLVE|PKTF_RESOLVE_RTR)) + } + if ((flags & (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) == + (PKTF_INET6_RESOLVE | PKTF_RESOLVE_RTR)) { *flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR; + } /* These 2 flags are mutually exclusive */ VERIFY((*flagsp & (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) != (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)); - return (0); + return 0; } errno_t mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len) { if (m == NULL || area == NULL || area_len == NULL || - !(m->m_flags & M_PKTHDR)) - return (EINVAL); + !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } *area_len = m_scratch_get(m, area); - return (0); + return 0; } errno_t mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data) { - if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || unsent_data == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } - if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) - return (EINVAL); + if (!(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) { + return EINVAL; + } *unsent_data = m->m_pkthdr.bufstatus_if + m->m_pkthdr.bufstatus_sndbuf; - return (0); + return 0; } errno_t mbuf_get_buffer_status(const mbuf_t m, mbuf_buffer_status_t *buf_status) { if (m == NULL || buf_status == NULL || !(m->m_flags & M_PKTHDR) || - !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) - return (EINVAL); + !(m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA)) { + return EINVAL; + } buf_status->buf_interface = m->m_pkthdr.bufstatus_if; buf_status->buf_sndbuf = m->m_pkthdr.bufstatus_sndbuf; - return (0); + return 0; } errno_t mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval) { - if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); - if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) + if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } + if (m->m_pkthdr.pkt_flags & PKTF_NEW_FLOW) { *retval = 1; - else + } else { *retval = 0; - return (0); + } + return 0; } errno_t mbuf_last_pkt(const mbuf_t m, u_int32_t *retval) { - if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); - if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) + if (m == NULL || retval == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } + if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) { *retval = 1; - else + } else { *retval = 0; - return (0); + } + return 0; } errno_t mbuf_get_timestamp(mbuf_t m, u_int64_t *ts, boolean_t *valid) { - if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR) || ts == NULL) { + return EINVAL; + } if ((m->m_pkthdr.pkt_flags & PKTF_TS_VALID) == 0) { - if (valid != NULL) + if (valid != NULL) { *valid = FALSE; + } *ts = 0; } else { - if (valid != NULL) + if (valid != NULL) { *valid = TRUE; + } *ts = m->m_pkthdr.pkt_timestamp; } - return (0); + return 0; } errno_t mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } if (valid == FALSE) { m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID; @@ -1600,21 +1699,22 @@ mbuf_set_timestamp(mbuf_t m, u_int64_t ts, boolean_t valid) m->m_pkthdr.pkt_flags |= PKTF_TS_VALID; m->m_pkthdr.pkt_timestamp = ts; } - return (0); + return 0; } errno_t mbuf_get_status(mbuf_t m, kern_return_t *status) { - if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR) || status == NULL) { + return EINVAL; + } if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) { *status = 0; } else { *status = m->m_pkthdr.drv_tx_status; } - return (0); + return 0; } static void @@ -1630,49 +1730,53 @@ driver_mtag_init(mbuf_t m) errno_t mbuf_set_status(mbuf_t m, kern_return_t status) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } driver_mtag_init(m); m->m_pkthdr.drv_tx_status = status; - return (0); + return 0; } errno_t mbuf_get_flowid(mbuf_t m, u_int16_t *flowid) { - if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR) || flowid == NULL) { + return EINVAL; + } if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) { *flowid = 0; } else { *flowid = m->m_pkthdr.drv_flowid; } - return (0); + return 0; } errno_t mbuf_set_flowid(mbuf_t m, u_int16_t flowid) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } driver_mtag_init(m); m->m_pkthdr.drv_flowid = flowid; - return (0); + return 0; } errno_t mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data) { if (m == NULL || !(m->m_flags & M_PKTHDR) || arg == NULL || - data == NULL) - return (EINVAL); + data == NULL) { + return EINVAL; + } if ((m->m_pkthdr.pkt_flags & PKTF_DRIVER_MTAG) == 0) { *arg = 0; @@ -1681,21 +1785,22 @@ mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data) *arg = m->m_pkthdr.drv_tx_compl_arg; *data = m->m_pkthdr.drv_tx_compl_data; } - return (0); + return 0; } errno_t mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } driver_mtag_init(m); m->m_pkthdr.drv_tx_compl_arg = arg; m->m_pkthdr.drv_tx_compl_data = data; - return (0); + return 0; } static u_int32_t @@ -1705,10 +1810,10 @@ get_tx_compl_callback_index_locked(mbuf_tx_compl_func callback) for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) { if (mbuf_tx_compl_table[i] == callback) { - return (i); + return i; } } - return (UINT32_MAX); + return UINT32_MAX; } static u_int32_t @@ -1722,7 +1827,7 @@ get_tx_compl_callback_index(mbuf_tx_compl_func callback) lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock); - return (i); + return i; } mbuf_tx_compl_func @@ -1732,12 +1837,12 @@ m_get_tx_compl_callback(u_int32_t idx) if (idx >= MAX_MBUF_TX_COMPL_FUNC) { ASSERT(0); - return (NULL); + return NULL; } lck_rw_lock_shared(mbuf_tx_compl_tbl_lock); cb = mbuf_tx_compl_table[idx]; lck_rw_unlock_shared(mbuf_tx_compl_tbl_lock); - return (cb); + return cb; } errno_t @@ -1746,8 +1851,9 @@ mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback) int i; errno_t error; - if (callback == NULL) - return (EINVAL); + if (callback == NULL) { + return EINVAL; + } lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock); @@ -1769,7 +1875,7 @@ mbuf_register_tx_compl_callback(mbuf_tx_compl_func callback) unlock: lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock); - return (error); + return error; } errno_t @@ -1778,8 +1884,9 @@ mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback) int i; errno_t error; - if (callback == NULL) - return (EINVAL); + if (callback == NULL) { + return EINVAL; + } lck_rw_lock_exclusive(mbuf_tx_compl_tbl_lock); @@ -1795,21 +1902,22 @@ mbuf_unregister_tx_compl_callback(mbuf_tx_compl_func callback) unlock: lck_rw_unlock_exclusive(mbuf_tx_compl_tbl_lock); - return (error); + return error; } errno_t mbuf_get_timestamp_requested(mbuf_t m, boolean_t *requested) { - if (m == NULL || !(m->m_flags & M_PKTHDR)) - return (EINVAL); + if (m == NULL || !(m->m_flags & M_PKTHDR)) { + return EINVAL; + } if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) { *requested = FALSE; } else { *requested = TRUE; } - return (0); + return 0; } errno_t @@ -1819,12 +1927,14 @@ mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid, size_t i; if (m == NULL || !(m->m_flags & M_PKTHDR) || callback == NULL || - pktid == NULL) - return (EINVAL); + pktid == NULL) { + return EINVAL; + } i = get_tx_compl_callback_index(callback); - if (i == UINT32_MAX) - return (ENOENT); + if (i == UINT32_MAX) { + return ENOENT; + } #if (DEBUG || DEVELOPMENT) VERIFY(i < sizeof(m->m_pkthdr.pkt_compl_callbacks)); @@ -1845,7 +1955,7 @@ mbuf_set_timestamp_requested(mbuf_t m, uintptr_t *pktid, m->m_pkthdr.pkt_compl_callbacks |= (1 << i); *pktid = m->m_pkthdr.pkt_compl_context; - return (0); + return 0; } void @@ -1853,11 +1963,13 @@ m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp) { int i; - if (m == NULL) + if (m == NULL) { return; + } - if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) + if ((m->m_pkthdr.pkt_flags & PKTF_TX_COMPL_TS_REQ) == 0) { return; + } #if (DEBUG || DEVELOPMENT) if (mbuf_tx_compl_debug != 0 && ifp != NULL && @@ -1873,8 +1985,9 @@ m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp) for (i = 0; i < MAX_MBUF_TX_COMPL_FUNC; i++) { mbuf_tx_compl_func callback; - if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) + if ((m->m_pkthdr.pkt_compl_callbacks & (1 << i)) == 0) { continue; + } lck_rw_lock_shared(mbuf_tx_compl_tbl_lock); callback = mbuf_tx_compl_table[i]; @@ -1895,8 +2008,9 @@ m_do_tx_compl_callback(struct mbuf *m, struct ifnet *ifp) #if (DEBUG || DEVELOPMENT) if (mbuf_tx_compl_debug != 0) { OSDecrementAtomic64(&mbuf_tx_compl_outstanding); - if (ifp == NULL) + if (ifp == NULL) { atomic_add_64(&mbuf_tx_compl_aborted, 1); + } } #endif /* (DEBUG || DEVELOPMENT) */ } diff --git a/bsd/kern/kpi_mbuf_internal.h b/bsd/kern/kpi_mbuf_internal.h index 81d9077d5..6fce5935b 100644 --- a/bsd/kern/kpi_mbuf_internal.h +++ b/bsd/kern/kpi_mbuf_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef __KPI_MBUF_INTERNAL_ diff --git a/bsd/kern/kpi_socket.c b/bsd/kern/kpi_socket.c index a7b17264d..5feba8769 100644 --- a/bsd/kern/kpi_socket.c +++ b/bsd/kern/kpi_socket.c @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#define __KPI__ +#define __KPI__ #include #include #include @@ -49,8 +49,8 @@ #include #include -static errno_t sock_send_internal(socket_t, const struct msghdr *, - mbuf_t, int, size_t *); +static errno_t sock_send_internal(socket_t, const struct msghdr *, + mbuf_t, int, size_t *); #undef sock_accept #undef sock_socket @@ -73,28 +73,29 @@ sock_accept_common(socket_t sock, struct sockaddr *from, int fromlen, int flags, struct socket *new_so; lck_mtx_t *mutex_held; int dosocklock; - errno_t error = 0; + errno_t error = 0; - if (sock == NULL || new_sock == NULL) - return (EINVAL); + if (sock == NULL || new_sock == NULL) { + return EINVAL; + } socket_lock(sock, 1); if ((sock->so_options & SO_ACCEPTCONN) == 0) { socket_unlock(sock, 1); - return (EINVAL); + return EINVAL; } if ((flags & ~(MSG_DONTWAIT)) != 0) { socket_unlock(sock, 1); - return (ENOTSUP); + return ENOTSUP; } check_again: if (((flags & MSG_DONTWAIT) != 0 || (sock->so_state & SS_NBIO) != 0) && sock->so_comp.tqh_first == NULL) { socket_unlock(sock, 1); - return (EWOULDBLOCK); + return EWOULDBLOCK; } - if (sock->so_proto->pr_getlock != NULL) { + if (sock->so_proto->pr_getlock != NULL) { mutex_held = (*sock->so_proto->pr_getlock)(sock, PR_F_WILLUNLOCK); dosocklock = 1; } else { @@ -111,14 +112,14 @@ check_again: PSOCK | PCATCH, "sock_accept", NULL); if (error != 0) { socket_unlock(sock, 1); - return (error); + return error; } } if (sock->so_error != 0) { error = sock->so_error; sock->so_error = 0; socket_unlock(sock, 1); - return (error); + return error; } so_acquire_accept_list(sock, NULL); @@ -158,12 +159,12 @@ check_again: if ((error = soacceptfilter(new_so, sock)) != 0) { /* Drop reference on listening socket */ sodereference(sock); - return (error); + return error; } socket_lock(sock, 0); } - if (dosocklock) { + if (dosocklock) { LCK_MTX_ASSERT(new_so->so_proto->pr_getlock(new_so, 0), LCK_MTX_ASSERT_NOTOWNED); socket_lock(new_so, 1); @@ -171,7 +172,7 @@ check_again: (void) soacceptlock(new_so, &sa, 0); - socket_unlock(sock, 1); /* release the head */ + socket_unlock(sock, 1); /* release the head */ /* see comments in sock_setupcall() */ if (callback != NULL) { @@ -183,12 +184,14 @@ check_again: } if (sa != NULL && from != NULL) { - if (fromlen > sa->sa_len) + if (fromlen > sa->sa_len) { fromlen = sa->sa_len; + } memcpy(from, sa, fromlen); } - if (sa != NULL) + if (sa != NULL) { FREE(sa, M_SONAME); + } /* * If the socket has been marked as inactive by sosetdefunct(), @@ -199,25 +202,26 @@ check_again: SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL); } *new_sock = new_so; - if (dosocklock) + if (dosocklock) { socket_unlock(new_so, 1); - return (error); + } + return error; } errno_t sock_accept(socket_t sock, struct sockaddr *from, int fromlen, int flags, sock_upcall callback, void *cookie, socket_t *new_sock) { - return (sock_accept_common(sock, from, fromlen, flags, - callback, cookie, new_sock, false)); + return sock_accept_common(sock, from, fromlen, flags, + callback, cookie, new_sock, false); } errno_t sock_accept_internal(socket_t sock, struct sockaddr *from, int fromlen, int flags, sock_upcall callback, void *cookie, socket_t *new_sock) { - return (sock_accept_common(sock, from, fromlen, flags, - callback, cookie, new_sock, true)); + return sock_accept_common(sock, from, fromlen, flags, + callback, cookie, new_sock, true); } errno_t @@ -228,25 +232,28 @@ sock_bind(socket_t sock, const struct sockaddr *to) struct sockaddr_storage ss; boolean_t want_free = TRUE; - if (sock == NULL || to == NULL) - return (EINVAL); + if (sock == NULL || to == NULL) { + return EINVAL; + } - if (to->sa_len > sizeof (ss)) { + if (to->sa_len > sizeof(ss)) { MALLOC(sa, struct sockaddr *, to->sa_len, M_SONAME, M_WAITOK); - if (sa == NULL) - return (ENOBUFS); + if (sa == NULL) { + return ENOBUFS; + } } else { sa = (struct sockaddr *)&ss; want_free = FALSE; } memcpy(sa, to, to->sa_len); - error = sobindlock(sock, sa, 1); /* will lock socket */ + error = sobindlock(sock, sa, 1); /* will lock socket */ - if (sa != NULL && want_free == TRUE) + if (sa != NULL && want_free == TRUE) { FREE(sa, M_SONAME); + } - return (error); + return error; } errno_t @@ -258,14 +265,16 @@ sock_connect(socket_t sock, const struct sockaddr *to, int flags) struct sockaddr_storage ss; boolean_t want_free = TRUE; - if (sock == NULL || to == NULL) - return (EINVAL); + if (sock == NULL || to == NULL) { + return EINVAL; + } - if (to->sa_len > sizeof (ss)) { + if (to->sa_len > sizeof(ss)) { MALLOC(sa, struct sockaddr *, to->sa_len, M_SONAME, (flags & MSG_DONTWAIT) ? M_NOWAIT : M_WAITOK); - if (sa == NULL) - return (ENOBUFS); + if (sa == NULL) { + return ENOBUFS; + } } else { sa = (struct sockaddr *)&ss; want_free = FALSE; @@ -288,17 +297,19 @@ sock_connect(socket_t sock, const struct sockaddr *to, int flags) goto out; } - if (sock->so_proto->pr_getlock != NULL) + if (sock->so_proto->pr_getlock != NULL) { mutex_held = (*sock->so_proto->pr_getlock)(sock, PR_F_WILLUNLOCK); - else + } else { mutex_held = sock->so_proto->pr_domain->dom_mtx; + } while ((sock->so_state & SS_ISCONNECTING) && sock->so_error == 0) { error = msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK | PCATCH, "sock_connect", NULL); - if (error != 0) + if (error != 0) { break; + } } if (error == 0) { @@ -311,17 +322,18 @@ sock_connect(socket_t sock, const struct sockaddr *to, int flags) out: socket_unlock(sock, 1); - if (sa != NULL && want_free == TRUE) + if (sa != NULL && want_free == TRUE) { FREE(sa, M_SONAME); + } - return (error); + return error; } errno_t sock_connectwait(socket_t sock, const struct timeval *tv) { lck_mtx_t *mutex_held; - errno_t retval = 0; + errno_t retval = 0; struct timespec ts; socket_lock(sock, 1); @@ -332,10 +344,11 @@ sock_connectwait(socket_t sock, const struct timeval *tv) retval = sock->so_error; sock->so_error = 0; } else { - if ((sock->so_state & SS_ISCONNECTED) != 0) + if ((sock->so_state & SS_ISCONNECTED) != 0) { retval = 0; - else + } else { retval = EINVAL; + } } goto done; } @@ -349,15 +362,16 @@ sock_connectwait(socket_t sock, const struct timeval *tv) ts.tv_sec = tv->tv_sec; ts.tv_nsec = (tv->tv_usec * (integer_t)NSEC_PER_USEC); - if ((ts.tv_sec + (ts.tv_nsec/(long)NSEC_PER_SEC))/100 > SHRT_MAX) { + if ((ts.tv_sec + (ts.tv_nsec / (long)NSEC_PER_SEC)) / 100 > SHRT_MAX) { retval = EDOM; goto done; } - if (sock->so_proto->pr_getlock != NULL) + if (sock->so_proto->pr_getlock != NULL) { mutex_held = (*sock->so_proto->pr_getlock)(sock, PR_F_WILLUNLOCK); - else + } else { mutex_held = sock->so_proto->pr_domain->dom_mtx; + } msleep((caddr_t)&sock->so_timeo, mutex_held, PSOCK, "sock_connectwait", &ts); @@ -375,7 +389,7 @@ sock_connectwait(socket_t sock, const struct timeval *tv) done: socket_unlock(sock, 1); - return (retval); + return retval; } errno_t @@ -384,62 +398,66 @@ sock_nointerrupt(socket_t sock, int on) socket_lock(sock, 1); if (on) { - sock->so_rcv.sb_flags |= SB_NOINTR; /* This isn't safe */ - sock->so_snd.sb_flags |= SB_NOINTR; /* This isn't safe */ + sock->so_rcv.sb_flags |= SB_NOINTR; /* This isn't safe */ + sock->so_snd.sb_flags |= SB_NOINTR; /* This isn't safe */ } else { - sock->so_rcv.sb_flags &= ~SB_NOINTR; /* This isn't safe */ - sock->so_snd.sb_flags &= ~SB_NOINTR; /* This isn't safe */ + sock->so_rcv.sb_flags &= ~SB_NOINTR; /* This isn't safe */ + sock->so_snd.sb_flags &= ~SB_NOINTR; /* This isn't safe */ } socket_unlock(sock, 1); - return (0); + return 0; } errno_t -sock_getpeername(socket_t sock, struct sockaddr *peername, int peernamelen) +sock_getpeername(socket_t sock, struct sockaddr *peername, int peernamelen) { int error; - struct sockaddr *sa = NULL; + struct sockaddr *sa = NULL; - if (sock == NULL || peername == NULL || peernamelen < 0) - return (EINVAL); + if (sock == NULL || peername == NULL || peernamelen < 0) { + return EINVAL; + } socket_lock(sock, 1); - if (!(sock->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING))) { + if (!(sock->so_state & (SS_ISCONNECTED | SS_ISCONFIRMING))) { socket_unlock(sock, 1); - return (ENOTCONN); + return ENOTCONN; } error = sogetaddr_locked(sock, &sa, 1); socket_unlock(sock, 1); if (error == 0) { - if (peernamelen > sa->sa_len) + if (peernamelen > sa->sa_len) { peernamelen = sa->sa_len; + } memcpy(peername, sa, peernamelen); FREE(sa, M_SONAME); } - return (error); + return error; } errno_t -sock_getsockname(socket_t sock, struct sockaddr *sockname, int socknamelen) +sock_getsockname(socket_t sock, struct sockaddr *sockname, int socknamelen) { int error; - struct sockaddr *sa = NULL; + struct sockaddr *sa = NULL; - if (sock == NULL || sockname == NULL || socknamelen < 0) - return (EINVAL); + if (sock == NULL || sockname == NULL || socknamelen < 0) { + return EINVAL; + } socket_lock(sock, 1); error = sogetaddr_locked(sock, &sa, 0); socket_unlock(sock, 1); if (error == 0) { - if (socknamelen > sa->sa_len) + if (socknamelen > sa->sa_len) { socknamelen = sa->sa_len; + } memcpy(sockname, sa, socknamelen); FREE(sa, M_SONAME); } - return (error); + return error; } __private_extern__ int @@ -447,8 +465,9 @@ sogetaddr_locked(struct socket *so, struct sockaddr **psa, int peer) { int error; - if (so == NULL || psa == NULL) - return (EINVAL); + if (so == NULL || psa == NULL) { + return EINVAL; + } *psa = NULL; error = peer ? so->so_proto->pr_usrreqs->pru_peeraddr(so, psa) : @@ -460,7 +479,7 @@ sogetaddr_locked(struct socket *so, struct sockaddr **psa, int peer) FREE(*psa, M_SONAME); *psa = NULL; } - return (error); + return error; } errno_t @@ -468,32 +487,35 @@ sock_getaddr(socket_t sock, struct sockaddr **psa, int peer) { int error; - if (sock == NULL || psa == NULL) - return (EINVAL); + if (sock == NULL || psa == NULL) { + return EINVAL; + } socket_lock(sock, 1); error = sogetaddr_locked(sock, psa, peer); socket_unlock(sock, 1); - return (error); + return error; } void sock_freeaddr(struct sockaddr *sa) { - if (sa != NULL) + if (sa != NULL) { FREE(sa, M_SONAME); + } } errno_t sock_getsockopt(socket_t sock, int level, int optname, void *optval, - int *optlen) + int *optlen) { int error = 0; - struct sockopt sopt; + struct sockopt sopt; - if (sock == NULL || optval == NULL || optlen == NULL) - return (EINVAL); + if (sock == NULL || optval == NULL || optlen == NULL) { + return EINVAL; + } sopt.sopt_dir = SOPT_GET; sopt.sopt_level = level; @@ -501,26 +523,28 @@ sock_getsockopt(socket_t sock, int level, int optname, void *optval, sopt.sopt_val = CAST_USER_ADDR_T(optval); sopt.sopt_valsize = *optlen; sopt.sopt_p = kernproc; - error = sogetoptlock(sock, &sopt, 1); /* will lock socket */ - if (error == 0) + error = sogetoptlock(sock, &sopt, 1); /* will lock socket */ + if (error == 0) { *optlen = sopt.sopt_valsize; - return (error); + } + return error; } errno_t sock_ioctl(socket_t sock, unsigned long request, void *argp) { - return (soioctl(sock, request, argp, kernproc)); /* will lock socket */ + return soioctl(sock, request, argp, kernproc); /* will lock socket */ } errno_t sock_setsockopt(socket_t sock, int level, int optname, const void *optval, - int optlen) + int optlen) { - struct sockopt sopt; + struct sockopt sopt; - if (sock == NULL || optval == NULL) - return (EINVAL); + if (sock == NULL || optval == NULL) { + return EINVAL; + } sopt.sopt_dir = SOPT_SET; sopt.sopt_level = level; @@ -528,7 +552,7 @@ sock_setsockopt(socket_t sock, int level, int optname, const void *optval, sopt.sopt_val = CAST_USER_ADDR_T(optval); sopt.sopt_valsize = optlen; sopt.sopt_p = kernproc; - return (sosetoptlock(sock, &sopt, 1)); /* will lock socket */ + return sosetoptlock(sock, &sopt, 1); /* will lock socket */ } /* @@ -541,16 +565,17 @@ so_tc_from_dscp(u_int8_t dscp) { u_int32_t tc; - if (dscp >= 0x30 && dscp <= 0x3f) + if (dscp >= 0x30 && dscp <= 0x3f) { tc = SO_TC_VO; - else if (dscp >= 0x20 && dscp <= 0x2f) + } else if (dscp >= 0x20 && dscp <= 0x2f) { tc = SO_TC_VI; - else if (dscp >= 0x08 && dscp <= 0x17) + } else if (dscp >= 0x08 && dscp <= 0x17) { tc = SO_TC_BK_SYS; - else + } else { tc = SO_TC_BE; + } - return (tc); + return tc; } errno_t @@ -560,8 +585,9 @@ sock_settclassopt(socket_t sock, const void *optval, size_t optlen) struct sockopt sopt; int sotc; - if (sock == NULL || optval == NULL || optlen != sizeof (int)) - return (EINVAL); + if (sock == NULL || optval == NULL || optlen != sizeof(int)) { + return EINVAL; + } socket_lock(sock, 1); if (!(sock->so_state & SS_ISCONNECTED)) { @@ -588,12 +614,12 @@ sock_settclassopt(socket_t sock, const void *optval, size_t optlen) sopt.sopt_dir = SOPT_SET; sopt.sopt_val = CAST_USER_ADDR_T(&sotc); - sopt.sopt_valsize = sizeof (sotc); + sopt.sopt_valsize = sizeof(sotc); sopt.sopt_p = kernproc; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_TRAFFIC_CLASS; - error = sosetoptlock(sock, &sopt, 0); /* already locked */ + error = sosetoptlock(sock, &sopt, 0); /* already locked */ if (error != 0) { printf("%s: sosetopt SO_TRAFFIC_CLASS failed %d\n", @@ -606,8 +632,9 @@ sock_settclassopt(socket_t sock, const void *optval, size_t optlen) * We do not want to set traffic class bits if the destination * is not local. */ - if (!so_isdstlocal(sock)) + if (!so_isdstlocal(sock)) { goto out; + } sopt.sopt_dir = SOPT_SET; sopt.sopt_val = CAST_USER_ADDR_T(optval); @@ -628,12 +655,12 @@ sock_settclassopt(socket_t sock, const void *optval, size_t optlen) goto out; } - error = sosetoptlock(sock, &sopt, 0); /* already locked */ + error = sosetoptlock(sock, &sopt, 0); /* already locked */ socket_unlock(sock, 1); - return (error); + return error; out: socket_unlock(sock, 1); - return (error); + return error; } errno_t @@ -642,8 +669,9 @@ sock_gettclassopt(socket_t sock, void *optval, size_t *optlen) errno_t error = 0; struct sockopt sopt; - if (sock == NULL || optval == NULL || optlen == NULL) - return (EINVAL); + if (sock == NULL || optval == NULL || optlen == NULL) { + return EINVAL; + } sopt.sopt_dir = SOPT_GET; sopt.sopt_val = CAST_USER_ADDR_T(optval); @@ -653,7 +681,7 @@ sock_gettclassopt(socket_t sock, void *optval, size_t *optlen) socket_lock(sock, 1); if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL) { socket_unlock(sock, 1); - return (EINVAL); + return EINVAL; } switch (SOCK_DOM(sock)) { @@ -667,23 +695,24 @@ sock_gettclassopt(socket_t sock, void *optval, size_t *optlen) break; default: socket_unlock(sock, 1); - return (EINVAL); - + return EINVAL; } - error = sogetoptlock(sock, &sopt, 0); /* already locked */ + error = sogetoptlock(sock, &sopt, 0); /* already locked */ socket_unlock(sock, 1); - if (error == 0) + if (error == 0) { *optlen = sopt.sopt_valsize; - return (error); + } + return error; } errno_t sock_listen(socket_t sock, int backlog) { - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } - return (solisten(sock, backlog)); /* will lock socket */ + return solisten(sock, backlog); /* will lock socket */ } errno_t @@ -694,14 +723,15 @@ sock_receive_internal(socket_t sock, struct msghdr *msg, mbuf_t *data, struct mbuf *control = NULL; int error = 0; int length = 0; - struct sockaddr *fromsa = NULL; - char uio_buf[ UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0) ]; + struct sockaddr *fromsa = NULL; + char uio_buf[UIO_SIZEOF((msg != NULL) ? msg->msg_iovlen : 0)]; - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } auio = uio_createwithbuffer(((msg != NULL) ? msg->msg_iovlen : 0), - 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof (uio_buf)); + 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); if (msg != NULL && data == NULL) { int i; struct iovec *tempp = msg->msg_iov; @@ -711,24 +741,28 @@ sock_receive_internal(socket_t sock, struct msghdr *msg, mbuf_t *data, CAST_USER_ADDR_T((tempp + i)->iov_base), (tempp + i)->iov_len); } - if (uio_resid(auio) < 0) - return (EINVAL); + if (uio_resid(auio) < 0) { + return EINVAL; + } } else if (recvdlen != NULL) { uio_setresid(auio, (uio_resid(auio) + *recvdlen)); } length = uio_resid(auio); - if (recvdlen != NULL) + if (recvdlen != NULL) { *recvdlen = 0; + } /* let pru_soreceive handle the socket locking */ error = sock->so_proto->pr_usrreqs->pru_soreceive(sock, &fromsa, auio, data, (msg && msg->msg_control) ? &control : NULL, &flags); - if (error != 0) + if (error != 0) { goto cleanup; + } - if (recvdlen != NULL) + if (recvdlen != NULL) { *recvdlen = length - uio_resid(auio); + } if (msg != NULL) { msg->msg_flags = flags; @@ -770,11 +804,13 @@ sock_receive_internal(socket_t sock, struct msghdr *msg, mbuf_t *data, } cleanup: - if (control != NULL) + if (control != NULL) { m_freem(control); - if (fromsa != NULL) + } + if (fromsa != NULL) { FREE(fromsa, M_SONAME); - return (error); + } + return error; } errno_t @@ -782,10 +818,11 @@ sock_receive(socket_t sock, struct msghdr *msg, int flags, size_t *recvdlen) { if ((msg == NULL) || (msg->msg_iovlen < 1) || (msg->msg_iov[0].iov_len == 0) || - (msg->msg_iov[0].iov_base == NULL)) - return (EINVAL); + (msg->msg_iov[0].iov_base == NULL)) { + return EINVAL; + } - return (sock_receive_internal(sock, msg, NULL, flags, recvdlen)); + return sock_receive_internal(sock, msg, NULL, flags, recvdlen); } errno_t @@ -793,10 +830,11 @@ sock_receivembuf(socket_t sock, struct msghdr *msg, mbuf_t *data, int flags, size_t *recvlen) { if (data == NULL || recvlen == 0 || *recvlen <= 0 || (msg != NULL && - (msg->msg_iov != NULL || msg->msg_iovlen != 0))) - return (EINVAL); + (msg->msg_iov != NULL || msg->msg_iovlen != 0))) { + return EINVAL; + } - return (sock_receive_internal(sock, msg, data, flags, recvlen)); + return sock_receive_internal(sock, msg, data, flags, recvlen); } errno_t @@ -807,7 +845,7 @@ sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data, struct mbuf *control = NULL; int error = 0; int datalen = 0; - char uio_buf[ UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1)) ]; + char uio_buf[UIO_SIZEOF((msg != NULL ? msg->msg_iovlen : 1))]; if (sock == NULL) { error = EINVAL; @@ -818,7 +856,7 @@ sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data, struct iovec *tempp = msg->msg_iov; auio = uio_createwithbuffer(msg->msg_iovlen, 0, - UIO_SYSSPACE, UIO_WRITE, &uio_buf[0], sizeof (uio_buf)); + UIO_SYSSPACE, UIO_WRITE, &uio_buf[0], sizeof(uio_buf)); if (tempp != NULL) { int i; @@ -835,16 +873,18 @@ sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data, } } - if (sentlen != NULL) + if (sentlen != NULL) { *sentlen = 0; + } - if (auio != NULL) + if (auio != NULL) { datalen = uio_resid(auio); - else + } else { datalen = data->m_pkthdr.len; + } if (msg != NULL && msg->msg_control) { - if ((size_t)msg->msg_controllen < sizeof (struct cmsghdr)) { + if ((size_t)msg->msg_controllen < sizeof(struct cmsghdr)) { error = EINVAL; goto errorout; } @@ -876,17 +916,19 @@ sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data, * is consistent with sendit() behavior. */ if (auio != NULL && uio_resid(auio) != datalen && - (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) + (error == ERESTART || error == EINTR || error == EWOULDBLOCK)) { error = 0; + } if (error == 0 && sentlen != NULL) { - if (auio != NULL) + if (auio != NULL) { *sentlen = datalen - uio_resid(auio); - else + } else { *sentlen = datalen; + } } - return (error); + return error; /* * In cases where we detect an error before returning, we need to @@ -894,44 +936,50 @@ sock_send_internal(socket_t sock, const struct msghdr *msg, mbuf_t data, * free the mbuf chain if they encounter an error. */ errorout: - if (control) + if (control) { m_freem(control); - if (data) + } + if (data) { m_freem(data); - if (sentlen) + } + if (sentlen) { *sentlen = 0; - return (error); + } + return error; } errno_t sock_send(socket_t sock, const struct msghdr *msg, int flags, size_t *sentlen) { - if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1) - return (EINVAL); + if (msg == NULL || msg->msg_iov == NULL || msg->msg_iovlen < 1) { + return EINVAL; + } - return (sock_send_internal(sock, msg, NULL, flags, sentlen)); + return sock_send_internal(sock, msg, NULL, flags, sentlen); } errno_t sock_sendmbuf(socket_t sock, const struct msghdr *msg, mbuf_t data, - int flags, size_t *sentlen) + int flags, size_t *sentlen) { if (data == NULL || (msg != NULL && (msg->msg_iov != NULL || msg->msg_iovlen != 0))) { - if (data != NULL) + if (data != NULL) { m_freem(data); - return (EINVAL); + } + return EINVAL; } - return (sock_send_internal(sock, msg, data, flags, sentlen)); + return sock_send_internal(sock, msg, data, flags, sentlen); } errno_t sock_shutdown(socket_t sock, int how) { - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } - return (soshutdown(sock, how)); + return soshutdown(sock, how); } errno_t @@ -940,8 +988,9 @@ sock_socket_common(int domain, int type, int protocol, sock_upcall callback, { int error = 0; - if (new_so == NULL) - return (EINVAL); + if (new_so == NULL) { + return EINVAL; + } /* socreate will create an initial so_count */ error = socreate(domain, new_so, type, protocol); @@ -959,37 +1008,38 @@ sock_socket_common(int domain, int type, int protocol, sock_upcall callback, if (callback != NULL) { sock_setupcall(*new_so, callback, context); } - /* + /* * last_pid and last_upid should be zero for sockets * created using sock_socket */ (*new_so)->last_pid = 0; (*new_so)->last_upid = 0; } - return (error); + return error; } errno_t sock_socket_internal(int domain, int type, int protocol, sock_upcall callback, void *context, socket_t *new_so) { - return (sock_socket_common(domain, type, protocol, callback, - context, new_so, true)); + return sock_socket_common(domain, type, protocol, callback, + context, new_so, true); } errno_t sock_socket(int domain, int type, int protocol, sock_upcall callback, void *context, socket_t *new_so) { - return (sock_socket_common(domain, type, protocol, callback, - context, new_so, false)); + return sock_socket_common(domain, type, protocol, callback, + context, new_so, false); } void sock_close(socket_t sock) { - if (sock == NULL) + if (sock == NULL) { return; + } soclose(sock); } @@ -998,12 +1048,13 @@ sock_close(socket_t sock) void sock_retain(socket_t sock) { - if (sock == NULL) + if (sock == NULL) { return; + } socket_lock(sock, 1); sock->so_retaincnt++; - sock->so_usecount++; /* add extra reference for holding the socket */ + sock->so_usecount++; /* add extra reference for holding the socket */ socket_unlock(sock, 1); } @@ -1011,12 +1062,14 @@ sock_retain(socket_t sock) void sock_release(socket_t sock) { - if (sock == NULL) + if (sock == NULL) { return; + } socket_lock(sock, 1); - if (sock->so_upcallusecount > 0) + if (sock->so_upcallusecount > 0) { soclose_wait_locked(sock); + } sock->so_retaincnt--; if (sock->so_retaincnt < 0) { @@ -1044,16 +1097,18 @@ sock_release(socket_t sock) errno_t sock_setpriv(socket_t sock, int on) { - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } socket_lock(sock, 1); - if (on) + if (on) { sock->so_state |= SS_PRIV; - else + } else { sock->so_state &= ~SS_PRIV; + } socket_unlock(sock, 1); - return (0); + return 0; } int @@ -1064,7 +1119,7 @@ sock_isconnected(socket_t sock) socket_lock(sock, 1); retval = ((sock->so_state & SS_ISCONNECTED) ? 1 : 0); socket_unlock(sock, 1); - return (retval); + return retval; } int @@ -1075,21 +1130,24 @@ sock_isnonblocking(socket_t sock) socket_lock(sock, 1); retval = ((sock->so_state & SS_NBIO) ? 1 : 0); socket_unlock(sock, 1); - return (retval); + return retval; } errno_t sock_gettype(socket_t sock, int *outDomain, int *outType, int *outProtocol) { socket_lock(sock, 1); - if (outDomain != NULL) + if (outDomain != NULL) { *outDomain = SOCK_DOM(sock); - if (outType != NULL) + } + if (outType != NULL) { *outType = sock->so_type; - if (outProtocol != NULL) + } + if (outProtocol != NULL) { *outProtocol = SOCK_PROTO(sock); + } socket_unlock(sock, 1); - return (0); + return 0; } /* @@ -1103,7 +1161,7 @@ sock_gettype(socket_t sock, int *outDomain, int *outType, int *outProtocol) socket_t sock_getlistener(socket_t sock) { - return (sock->so_head); + return sock->so_head; } static inline void @@ -1122,12 +1180,14 @@ void socket_set_traffic_mgt_flags_locked(socket_t sock, u_int8_t flags) { u_int32_t soflags1 = 0; - - if ((flags & TRAFFIC_MGT_SO_BACKGROUND)) + + if ((flags & TRAFFIC_MGT_SO_BACKGROUND)) { soflags1 |= SOF1_TRAFFIC_MGT_SO_BACKGROUND; - if ((flags & TRAFFIC_MGT_TCP_RECVBG)) + } + if ((flags & TRAFFIC_MGT_TCP_RECVBG)) { soflags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG; - + } + (void) OSBitOrAtomic(soflags1, &sock->so_flags1); sock_set_tcp_stream_priority(sock); @@ -1149,11 +1209,13 @@ socket_clear_traffic_mgt_flags_locked(socket_t sock, u_int8_t flags) { u_int32_t soflags1 = 0; - if ((flags & TRAFFIC_MGT_SO_BACKGROUND)) + if ((flags & TRAFFIC_MGT_SO_BACKGROUND)) { soflags1 |= SOF1_TRAFFIC_MGT_SO_BACKGROUND; - if ((flags & TRAFFIC_MGT_TCP_RECVBG)) + } + if ((flags & TRAFFIC_MGT_TCP_RECVBG)) { soflags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG; - + } + (void) OSBitAndAtomic(~soflags1, &sock->so_flags1); sock_set_tcp_stream_priority(sock); @@ -1177,8 +1239,9 @@ socket_defunct(struct proc *p, socket_t so, int level) errno_t retval; if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC && - level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) - return (EINVAL); + level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) { + return EINVAL; + } socket_lock(so, 1); /* @@ -1191,13 +1254,14 @@ socket_defunct(struct proc *p, socket_t so, int level) if (level == SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC && (so->so_rcv.sb_flags & so->so_snd.sb_flags & SB_UNIX) != SB_UNIX) { socket_unlock(so, 1); - return (EOPNOTSUPP); + return EOPNOTSUPP; } retval = sosetdefunct(p, so, level, TRUE); - if (retval == 0) + if (retval == 0) { retval = sodefunct(p, so, level); + } socket_unlock(so, 1); - return (retval); + return retval; } void @@ -1206,8 +1270,9 @@ sock_setupcalls_locked(socket_t sock, sock_upcall rcallback, void *rcontext, { if (rcallback != NULL) { sock->so_rcv.sb_flags |= SB_UPCALL; - if (locked) + if (locked) { sock->so_rcv.sb_flags |= SB_UPCALL_LOCK; + } sock->so_rcv.sb_upcall = rcallback; sock->so_rcv.sb_upcallarg = rcontext; } else { @@ -1218,8 +1283,9 @@ sock_setupcalls_locked(socket_t sock, sock_upcall rcallback, void *rcontext, if (wcallback != NULL) { sock->so_snd.sb_flags |= SB_UPCALL; - if (locked) + if (locked) { sock->so_snd.sb_flags |= SB_UPCALL_LOCK; + } sock->so_snd.sb_upcall = wcallback; sock->so_snd.sb_upcallarg = wcontext; } else { @@ -1232,8 +1298,9 @@ sock_setupcalls_locked(socket_t sock, sock_upcall rcallback, void *rcontext, errno_t sock_setupcall(socket_t sock, sock_upcall callback, void *context) { - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } /* * Note that we don't wait for any in progress upcall to complete. @@ -1252,15 +1319,16 @@ sock_setupcall(socket_t sock, sock_upcall callback, void *context) #endif /* !CONFIG_EMBEDDED */ socket_unlock(sock, 1); - return (0); + return 0; } errno_t sock_setupcalls(socket_t sock, sock_upcall rcallback, void *rcontext, sock_upcall wcallback, void *wcontext) { - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } /* * Note that we don't wait for any in progress upcall to complete. @@ -1269,7 +1337,7 @@ sock_setupcalls(socket_t sock, sock_upcall rcallback, void *rcontext, sock_setupcalls_locked(sock, rcallback, rcontext, wcallback, wcontext, 0); socket_unlock(sock, 1); - return (0); + return 0; } void @@ -1296,14 +1364,15 @@ errno_t sock_catchevents(socket_t sock, sock_evupcall ecallback, void *econtext, u_int32_t emask) { - if (sock == NULL) - return (EINVAL); + if (sock == NULL) { + return EINVAL; + } socket_lock(sock, 1); sock_catchevents_locked(sock, ecallback, econtext, emask); socket_unlock(sock, 1); - return (0); + return 0; } /* @@ -1312,5 +1381,5 @@ sock_catchevents(socket_t sock, sock_evupcall ecallback, void *econtext, int sock_iskernel(socket_t so) { - return (so && so->last_pid == 0); + return so && so->last_pid == 0; } diff --git a/bsd/kern/kpi_socketfilter.c b/bsd/kern/kpi_socketfilter.c index 138c6b299..597045a88 100644 --- a/bsd/kern/kpi_socketfilter.c +++ b/bsd/kern/kpi_socketfilter.c @@ -51,44 +51,45 @@ #include #include +#include #include #include -#define SFEF_ATTACHED 0x1 /* SFE is on socket list */ -#define SFEF_NODETACH 0x2 /* Detach should not be called */ -#define SFEF_NOSOCKET 0x4 /* Socket is gone */ +#define SFEF_ATTACHED 0x1 /* SFE is on socket list */ +#define SFEF_NODETACH 0x2 /* Detach should not be called */ +#define SFEF_NOSOCKET 0x4 /* Socket is gone */ struct socket_filter_entry { - struct socket_filter_entry *sfe_next_onsocket; - struct socket_filter_entry *sfe_next_onfilter; - struct socket_filter_entry *sfe_next_oncleanup; + struct socket_filter_entry *sfe_next_onsocket; + struct socket_filter_entry *sfe_next_onfilter; + struct socket_filter_entry *sfe_next_oncleanup; - struct socket_filter *sfe_filter; - struct socket *sfe_socket; - void *sfe_cookie; + struct socket_filter *sfe_filter; + struct socket *sfe_socket; + void *sfe_cookie; - uint32_t sfe_flags; - int32_t sfe_refcount; + uint32_t sfe_flags; + int32_t sfe_refcount; }; struct socket_filter { - TAILQ_ENTRY(socket_filter) sf_protosw_next; - TAILQ_ENTRY(socket_filter) sf_global_next; - struct socket_filter_entry *sf_entry_head; + TAILQ_ENTRY(socket_filter) sf_protosw_next; + TAILQ_ENTRY(socket_filter) sf_global_next; + struct socket_filter_entry *sf_entry_head; - struct protosw *sf_proto; - struct sflt_filter sf_filter; - u_int32_t sf_refcount; + struct protosw *sf_proto; + struct sflt_filter sf_filter; + struct os_refcnt sf_refcount; }; TAILQ_HEAD(socket_filter_list, socket_filter); -static struct socket_filter_list sock_filter_head; -static lck_rw_t *sock_filter_lock = NULL; -static lck_mtx_t *sock_filter_cleanup_lock = NULL; -static struct socket_filter_entry *sock_filter_cleanup_entries = NULL; -static thread_t sock_filter_cleanup_thread = NULL; +static struct socket_filter_list sock_filter_head; +static lck_rw_t *sock_filter_lock = NULL; +static lck_mtx_t *sock_filter_cleanup_lock = NULL; +static struct socket_filter_entry *sock_filter_cleanup_entries = NULL; +static thread_t sock_filter_cleanup_thread = NULL; static void sflt_cleanup_thread(void *, wait_result_t); static void sflt_detach_locked(struct socket_filter_entry *entry); @@ -105,31 +106,30 @@ errno_t sflt_register(const struct sflt_filter *filter, int domain, __private_extern__ int sflt_permission_check(struct inpcb *inp) { - /* * All these permissions only apply to the co-processor interface, * so ignore IPv4. */ if (!(inp->inp_vflag & INP_IPV6)) { - return (0); + return 0; } /* Sockets that have this entitlement bypass socket filters. */ if (INP_INTCOPROC_ALLOWED(inp)) { - return (1); + return 1; } if ((inp->inp_flags & INP_BOUND_IF) && IFNET_IS_INTCOPROC(inp->inp_boundifp)) { - return (1); + return 1; } - return (0); + return 0; } __private_extern__ void sflt_init(void) { - lck_grp_attr_t *grp_attrib = NULL; - lck_attr_t *lck_attrib = NULL; - lck_grp_t *lck_group = NULL; + lck_grp_attr_t *grp_attrib = NULL; + lck_attr_t *lck_attrib = NULL; + lck_grp_t *lck_group = NULL; TAILQ_INIT(&sock_filter_head); @@ -145,21 +145,20 @@ sflt_init(void) } static void -sflt_retain_locked(struct socket_filter *filter) +sflt_retain_locked(struct socket_filter *filter) { - filter->sf_refcount++; + os_ref_retain_locked(&filter->sf_refcount); } static void sflt_release_locked(struct socket_filter *filter) { - filter->sf_refcount--; - if (filter->sf_refcount == 0) { + if (os_ref_release_locked(&filter->sf_refcount) == 0) { /* Call the unregistered function */ if (filter->sf_filter.sf_unregistered) { lck_rw_unlock_exclusive(sock_filter_lock); filter->sf_filter.sf_unregistered( - filter->sf_filter.sf_handle); + filter->sf_filter.sf_handle); lck_rw_lock_exclusive(sock_filter_lock); } @@ -237,9 +236,9 @@ sflt_cleanup_thread(void *blah, wait_result_t blah2) lck_rw_lock_exclusive(sock_filter_lock); /* Cleanup every dead item */ - struct socket_filter_entry *entry; + struct socket_filter_entry *entry; for (entry = dead; entry; entry = dead) { - struct socket_filter_entry **nextpp; + struct socket_filter_entry **nextpp; dead = entry->sfe_next_oncleanup; @@ -253,8 +252,8 @@ sflt_cleanup_thread(void *blah, wait_result_t blah2) * Warning - passing a potentially * dead socket may be bad */ - entry->sfe_filter->sf_filter. sf_detach( - entry->sfe_cookie, entry->sfe_socket); + entry->sfe_filter->sf_filter.sf_detach( + entry->sfe_cookie, entry->sfe_socket); lck_rw_lock_exclusive(sock_filter_lock); } @@ -306,22 +305,26 @@ sflt_attach_locked(struct socket *so, struct socket_filter *filter, int error = 0; struct socket_filter_entry *entry = NULL; - if (sflt_permission_check(sotoinpcb(so))) - return (0); + if (sflt_permission_check(sotoinpcb(so))) { + return 0; + } - if (filter == NULL) - return (ENOENT); + if (filter == NULL) { + return ENOENT; + } for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) { if (entry->sfe_filter->sf_filter.sf_handle == - filter->sf_filter.sf_handle) - return (EEXIST); + filter->sf_filter.sf_handle) { + return EEXIST; + } } /* allocate the socket filter entry */ - MALLOC(entry, struct socket_filter_entry *, sizeof (*entry), M_IFADDR, + MALLOC(entry, struct socket_filter_entry *, sizeof(*entry), M_IFADDR, M_WAITOK); - if (entry == NULL) - return (ENOMEM); + if (entry == NULL) { + return ENOMEM; + } /* Initialize the socket filter entry */ entry->sfe_cookie = NULL; @@ -350,16 +353,18 @@ sflt_attach_locked(struct socket *so, struct socket_filter *filter, lck_rw_unlock_exclusive(sock_filter_lock); /* Unlock the socket */ - if (socklocked) + if (socklocked) { socket_unlock(so, 0); + } /* It's finally safe to call the filter function */ error = entry->sfe_filter->sf_filter.sf_attach( - &entry->sfe_cookie, so); + &entry->sfe_cookie, so); /* Lock the socket again */ - if (socklocked) + if (socklocked) { socket_lock(so, 0); + } /* Lock the filters again */ lck_rw_lock_exclusive(sock_filter_lock); @@ -378,14 +383,15 @@ sflt_attach_locked(struct socket *so, struct socket_filter *filter, sflt_entry_release(entry); } - return (error); + return error; } errno_t sflt_attach_internal(socket_t socket, sflt_handle handle) { - if (socket == NULL || handle == 0) - return (EINVAL); + if (socket == NULL || handle == 0) { + return EINVAL; + } int result = EINVAL; @@ -393,7 +399,9 @@ sflt_attach_internal(socket_t socket, sflt_handle handle) struct socket_filter *filter = NULL; TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) { - if (filter->sf_filter.sf_handle == handle) break; + if (filter->sf_filter.sf_handle == handle) { + break; + } } if (filter) { @@ -402,7 +410,7 @@ sflt_attach_internal(socket_t socket, sflt_handle handle) lck_rw_unlock_exclusive(sock_filter_lock); - return (result); + return result; } static void @@ -428,8 +436,9 @@ sflt_initsock(struct socket *so) lck_rw_lock_shared(sock_filter_lock); if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) { /* Promote lock to exclusive */ - if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock)) + if (!lck_rw_lock_shared_to_exclusive(sock_filter_lock)) { lck_rw_lock_exclusive(sock_filter_lock); + } /* * Warning: A filter unregistering will be pulled out of @@ -455,8 +464,9 @@ sflt_initsock(struct socket *so) sflt_attach_locked(so, filter, 0); filter_next = TAILQ_NEXT(filter, sf_protosw_next); - if (filter_next) + if (filter_next) { sflt_retain_locked(filter_next); + } /* * Warning: filt_release_locked may remove @@ -525,8 +535,9 @@ static void sflt_notify_internal(struct socket *so, sflt_event_t event, void *param, sflt_handle handle) { - if (so->so_filt == NULL) + if (so->so_filt == NULL) { return; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -552,7 +563,7 @@ sflt_notify_internal(struct socket *so, sflt_event_t event, void *param, /* Finally call the filter */ entry->sfe_filter->sf_filter.sf_notify( - entry->sfe_cookie, so, event, param); + entry->sfe_cookie, so, event, param); /* * Take the socket filter lock again @@ -570,7 +581,7 @@ sflt_notify_internal(struct socket *so, sflt_event_t event, void *param, } __private_extern__ void -sflt_notify(struct socket *so, sflt_event_t event, void *param) +sflt_notify(struct socket *so, sflt_event_t event, void *param) { sflt_notify_internal(so, event, param, 0); } @@ -585,8 +596,9 @@ sflt_notify_after_register(struct socket *so, sflt_event_t event, __private_extern__ int sflt_ioctl(struct socket *so, u_long cmd, caddr_t data) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -612,7 +624,7 @@ sflt_ioctl(struct socket *so, u_long cmd, caddr_t data) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_ioctl( - entry->sfe_cookie, so, cmd, data); + entry->sfe_cookie, so, cmd, data); /* * Take the socket filter lock again @@ -628,14 +640,15 @@ sflt_ioctl(struct socket *so, u_long cmd, caddr_t data) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_bind(struct socket *so, const struct sockaddr *nam) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -661,7 +674,7 @@ sflt_bind(struct socket *so, const struct sockaddr *nam) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_bind( - entry->sfe_cookie, so, nam); + entry->sfe_cookie, so, nam); /* * Take the socket filter lock again and @@ -677,14 +690,15 @@ sflt_bind(struct socket *so, const struct sockaddr *nam) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_listen(struct socket *so) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -710,7 +724,7 @@ sflt_listen(struct socket *so) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_listen( - entry->sfe_cookie, so); + entry->sfe_cookie, so); /* * Take the socket filter lock again @@ -726,15 +740,16 @@ sflt_listen(struct socket *so) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_accept(struct socket *head, struct socket *so, const struct sockaddr *local, const struct sockaddr *remote) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -760,7 +775,7 @@ sflt_accept(struct socket *head, struct socket *so, /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_accept( - entry->sfe_cookie, head, so, local, remote); + entry->sfe_cookie, head, so, local, remote); /* * Take the socket filter lock again @@ -776,14 +791,15 @@ sflt_accept(struct socket *head, struct socket *so, socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_getsockname(struct socket *so, struct sockaddr **local) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -809,7 +825,7 @@ sflt_getsockname(struct socket *so, struct sockaddr **local) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_getsockname( - entry->sfe_cookie, so, local); + entry->sfe_cookie, so, local); /* * Take the socket filter lock again @@ -825,14 +841,15 @@ sflt_getsockname(struct socket *so, struct sockaddr **local) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_getpeername(struct socket *so, struct sockaddr **remote) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -858,7 +875,7 @@ sflt_getpeername(struct socket *so, struct sockaddr **remote) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_getpeername( - entry->sfe_cookie, so, remote); + entry->sfe_cookie, so, remote); /* * Take the socket filter lock again @@ -874,14 +891,15 @@ sflt_getpeername(struct socket *so, struct sockaddr **remote) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int -sflt_connectin(struct socket *so, const struct sockaddr *remote) +sflt_connectin(struct socket *so, const struct sockaddr *remote) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -907,7 +925,7 @@ sflt_connectin(struct socket *so, const struct sockaddr *remote) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_connect_in( - entry->sfe_cookie, so, remote); + entry->sfe_cookie, so, remote); /* * Take the socket filter lock again @@ -923,7 +941,7 @@ sflt_connectin(struct socket *so, const struct sockaddr *remote) socket_lock(so, 0); } - return (error); + return error; } static int @@ -953,7 +971,7 @@ sflt_connectout_common(struct socket *so, const struct sockaddr *nam) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_connect_out( - entry->sfe_cookie, so, nam); + entry->sfe_cookie, so, nam); /* * Take the socket filter lock again @@ -969,7 +987,7 @@ sflt_connectout_common(struct socket *so, const struct sockaddr *nam) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int @@ -979,20 +997,22 @@ sflt_connectout(struct socket *so, const struct sockaddr *nam) struct sockaddr *sa; int error; - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } /* * Workaround for rdar://23362120 * Always pass a buffer that can hold an IPv6 socket address */ - bzero(buf, sizeof (buf)); + bzero(buf, sizeof(buf)); bcopy(nam, buf, nam->sa_len); sa = (struct sockaddr *)buf; error = sflt_connectout_common(so, sa); - if (error != 0) - return (error); + if (error != 0) { + return error; + } /* * If the address was modified, copy it back @@ -1001,14 +1021,15 @@ sflt_connectout(struct socket *so, const struct sockaddr *nam) bcopy(sa, (struct sockaddr *)(uintptr_t)nam, nam->sa_len); } - return (0); + return 0; } __private_extern__ int sflt_setsockopt(struct socket *so, struct sockopt *sopt) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -1034,7 +1055,7 @@ sflt_setsockopt(struct socket *so, struct sockopt *sopt) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_setoption( - entry->sfe_cookie, so, sopt); + entry->sfe_cookie, so, sopt); /* * Take the socket filter lock again @@ -1050,14 +1071,15 @@ sflt_setsockopt(struct socket *so, struct sockopt *sopt) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_getsockopt(struct socket *so, struct sockopt *sopt) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -1083,7 +1105,7 @@ sflt_getsockopt(struct socket *so, struct sockopt *sopt) /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_getoption( - entry->sfe_cookie, so, sopt); + entry->sfe_cookie, so, sopt); /* * Take the socket filter lock again @@ -1099,15 +1121,16 @@ sflt_getsockopt(struct socket *so, struct sockopt *sopt) socket_lock(so, 0); } - return (error); + return error; } __private_extern__ int sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int unlocked = 0; @@ -1118,8 +1141,9 @@ sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data, for (entry = so->so_filt; entry && error == 0; entry = entry->sfe_next_onsocket) { /* skip if this is a subflow socket */ - if (so->so_flags & SOF_MP_SUBFLOW) + if (so->so_flags & SOF_MP_SUBFLOW) { continue; + } if ((entry->sfe_flags & SFEF_ATTACHED) && entry->sfe_filter->sf_filter.sf_data_out) { /* @@ -1142,7 +1166,7 @@ sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data, /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_data_out( - entry->sfe_cookie, so, to, data, control, flags); + entry->sfe_cookie, so, to, data, control, flags); /* * Take the socket filter lock again @@ -1156,19 +1180,21 @@ sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data, if (unlocked) { socket_lock(so, 0); - if (setsendthread) + if (setsendthread) { so->so_send_filt_thread = NULL; + } } - return (error); + return error; } __private_extern__ int sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags) { - if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) - return (0); + if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { + return 0; + } struct socket_filter_entry *entry; int error = 0; @@ -1179,8 +1205,9 @@ sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data, for (entry = so->so_filt; entry && (error == 0); entry = entry->sfe_next_onsocket) { /* skip if this is a subflow socket */ - if (so->so_flags & SOF_MP_SUBFLOW) + if (so->so_flags & SOF_MP_SUBFLOW) { continue; + } if ((entry->sfe_flags & SFEF_ATTACHED) && entry->sfe_filter->sf_filter.sf_data_in) { /* @@ -1198,7 +1225,7 @@ sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data, /* Call the filter */ error = entry->sfe_filter->sf_filter.sf_data_in( - entry->sfe_cookie, so, from, data, control, flags); + entry->sfe_cookie, so, from, data, control, flags); /* * Take the socket filter lock again @@ -1214,7 +1241,7 @@ sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data, socket_lock(so, 0); } - return (error); + return error; } #pragma mark -- KPI -- @@ -1225,17 +1252,18 @@ sflt_attach(socket_t socket, sflt_handle handle) socket_lock(socket, 1); errno_t result = sflt_attach_internal(socket, handle); socket_unlock(socket, 1); - return (result); + return result; } errno_t sflt_detach(socket_t socket, sflt_handle handle) { struct socket_filter_entry *entry; - errno_t result = 0; + errno_t result = 0; - if (socket == NULL || handle == 0) - return (EINVAL); + if (socket == NULL || handle == 0) { + return EINVAL; + } lck_rw_lock_exclusive(sock_filter_lock); for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) { @@ -1250,7 +1278,7 @@ sflt_detach(socket_t socket, sflt_handle handle) } lck_rw_unlock_exclusive(sock_filter_lock); - return (result); + return result; } struct solist { @@ -1260,7 +1288,7 @@ struct solist { static errno_t sflt_register_common(const struct sflt_filter *filter, int domain, int type, - int protocol, bool is_internal) + int protocol, bool is_internal) { struct socket_filter *sock_filt = NULL; struct socket_filter *match = NULL; @@ -1271,28 +1299,31 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, struct inpcb *inp; struct solist *solisthead = NULL, *solist = NULL; - if ((domain != PF_INET) && (domain != PF_INET6)) - return (ENOTSUP); + if ((domain != PF_INET) && (domain != PF_INET6)) { + return ENOTSUP; + } pr = pffindproto(domain, protocol, type); - if (pr == NULL) - return (ENOENT); + if (pr == NULL) { + return ENOENT; + } if (filter->sf_attach == NULL || filter->sf_detach == NULL || - filter->sf_handle == 0 || filter->sf_name == NULL) - return (EINVAL); + filter->sf_handle == 0 || filter->sf_name == NULL) { + return EINVAL; + } /* Allocate the socket filter */ - MALLOC(sock_filt, struct socket_filter *, sizeof (*sock_filt), + MALLOC(sock_filt, struct socket_filter *, sizeof(*sock_filt), M_IFADDR, M_WAITOK); if (sock_filt == NULL) { - return (ENOBUFS); + return ENOBUFS; } - bzero(sock_filt, sizeof (*sock_filt)); + bzero(sock_filt, sizeof(*sock_filt)); /* Legacy sflt_filter length; current structure minus extended */ - len = sizeof (*filter) - sizeof (struct sflt_filter_ext); + len = sizeof(*filter) - sizeof(struct sflt_filter_ext); /* * Include extended fields if filter defines SFLT_EXTENDED. * We've zeroed out our internal sflt_filter placeholder, @@ -1301,8 +1332,9 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, if (filter->sf_flags & SFLT_EXTENDED) { unsigned int ext_len = filter->sf_len; - if (ext_len > sizeof (struct sflt_filter_ext)) - ext_len = sizeof (struct sflt_filter_ext); + if (ext_len > sizeof(struct sflt_filter_ext)) { + ext_len = sizeof(struct sflt_filter_ext); + } len += ext_len; } @@ -1325,7 +1357,7 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, sf_protosw_next); sock_filt->sf_proto = pr; } - sflt_retain_locked(sock_filt); + os_ref_init(&sock_filt->sf_refcount, NULL); OSIncrementAtomic64(&net_api_stats.nas_sfltr_register_count); INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total); @@ -1337,20 +1369,21 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, if (match != NULL) { FREE(sock_filt, M_IFADDR); - return (EEXIST); + return EEXIST; } - if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) - return (error); + if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) { + return error; + } /* * Setup the filter on the TCP and UDP sockets already created. */ -#define SOLIST_ADD(_so) do { \ - solist->next = solisthead; \ - sock_retain((_so)); \ - solist->so = (_so); \ - solisthead = solist; \ +#define SOLIST_ADD(_so) do { \ + solist->next = solisthead; \ + sock_retain((_so)); \ + solist->so = (_so); \ + solisthead = solist; \ } while (0) if (protocol == IPPROTO_TCP) { lck_rw_lock_shared(tcbinfo.ipi_lock); @@ -1360,12 +1393,14 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) || !SOCK_CHECK_DOM(so, domain) || - !SOCK_CHECK_TYPE(so, type)) + !SOCK_CHECK_TYPE(so, type)) { continue; - MALLOC(solist, struct solist *, sizeof (*solist), + } + MALLOC(solist, struct solist *, sizeof(*solist), M_IFADDR, M_NOWAIT); - if (!solist) + if (!solist) { continue; + } SOLIST_ADD(so); } lck_rw_done(tcbinfo.ipi_lock); @@ -1377,12 +1412,14 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) || !SOCK_CHECK_DOM(so, domain) || - !SOCK_CHECK_TYPE(so, type)) + !SOCK_CHECK_TYPE(so, type)) { continue; - MALLOC(solist, struct solist *, sizeof (*solist), + } + MALLOC(solist, struct solist *, sizeof(*solist), M_IFADDR, M_NOWAIT); - if (!solist) + if (!solist) { continue; + } SOLIST_ADD(so); } lck_rw_done(udbinfo.ipi_lock); @@ -1396,28 +1433,29 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, so = solisthead->so; socket_lock(so, 0); sflt_initsock(so); - if (so->so_state & SS_ISCONNECTING) + if (so->so_state & SS_ISCONNECTING) { sflt_notify_after_register(so, sock_evt_connecting, handle); - else if (so->so_state & SS_ISCONNECTED) + } else if (so->so_state & SS_ISCONNECTED) { sflt_notify_after_register(so, sock_evt_connected, handle); - else if ((so->so_state & - (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) == - (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE)) + } else if ((so->so_state & + (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) == + (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) { sflt_notify_after_register(so, sock_evt_disconnecting, handle); - else if ((so->so_state & - (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) == - (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED)) + } else if ((so->so_state & + (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) == + (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) { sflt_notify_after_register(so, sock_evt_disconnected, handle); - else if (so->so_state & SS_CANTSENDMORE) + } else if (so->so_state & SS_CANTSENDMORE) { sflt_notify_after_register(so, sock_evt_cantsendmore, handle); - else if (so->so_state & SS_CANTRCVMORE) + } else if (so->so_state & SS_CANTRCVMORE) { sflt_notify_after_register(so, sock_evt_cantrecvmore, handle); + } socket_unlock(so, 0); /* XXX no easy way to post the sock_evt_closing event */ sock_release(so); @@ -1426,21 +1464,21 @@ sflt_register_common(const struct sflt_filter *filter, int domain, int type, FREE(solist, M_IFADDR); } - return (error); + return error; } errno_t sflt_register_internal(const struct sflt_filter *filter, int domain, int type, - int protocol) + int protocol) { - return (sflt_register_common(filter, domain, type, protocol, true)); + return sflt_register_common(filter, domain, type, protocol, true); } errno_t sflt_register(const struct sflt_filter *filter, int domain, int type, - int protocol) + int protocol) { - return (sflt_register_common(filter, domain, type, protocol, false)); + return sflt_register_common(filter, domain, type, protocol, false); } errno_t @@ -1451,8 +1489,9 @@ sflt_unregister(sflt_handle handle) /* Find the entry by the handle */ TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) { - if (filter->sf_filter.sf_handle == handle) + if (filter->sf_filter.sf_handle == handle) { break; + } } if (filter) { @@ -1481,10 +1520,11 @@ sflt_unregister(sflt_handle handle) lck_rw_unlock_exclusive(sock_filter_lock); - if (filter == NULL) - return (ENOENT); + if (filter == NULL) { + return ENOENT; + } - return (0); + return 0; } errno_t @@ -1493,11 +1533,12 @@ sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data, { int error = 0; - if (so == NULL || data == NULL) - return (EINVAL); + if (so == NULL || data == NULL) { + return EINVAL; + } if (flags & sock_data_filt_flag_oob) { - return (ENOTSUP); + return ENOTSUP; } socket_lock(so, 1); @@ -1510,14 +1551,16 @@ sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data, if (from) { if (sbappendaddr(&so->so_rcv, - (struct sockaddr *)(uintptr_t)from, data, control, NULL)) + (struct sockaddr *)(uintptr_t)from, data, control, NULL)) { sorwakeup(so); + } goto done; } if (control) { - if (sbappendcontrol(&so->so_rcv, data, control, NULL)) + if (sbappendcontrol(&so->so_rcv, data, control, NULL)) { sorwakeup(so); + } goto done; } @@ -1526,16 +1569,18 @@ sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data, error = EINVAL; goto done; } - if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) + if (sbappendrecord(&so->so_rcv, (struct mbuf *)data)) { sorwakeup(so); + } goto done; } - if (sbappend(&so->so_rcv, data)) + if (sbappend(&so->so_rcv, data)) { sorwakeup(so); + } done: socket_unlock(so, 1); - return (error); + return error; } errno_t @@ -1545,47 +1590,49 @@ sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data, int sosendflags = 0; /* reject if this is a subflow socket */ - if (so->so_flags & SOF_MP_SUBFLOW) - return (ENOTSUP); + if (so->so_flags & SOF_MP_SUBFLOW) { + return ENOTSUP; + } - if (flags & sock_data_filt_flag_oob) + if (flags & sock_data_filt_flag_oob) { sosendflags = MSG_OOB; - return (sosend(so, (struct sockaddr *)(uintptr_t)to, NULL, - data, control, sosendflags)); + } + return sosend(so, (struct sockaddr *)(uintptr_t)to, NULL, + data, control, sosendflags); } sockopt_dir sockopt_direction(sockopt_t sopt) { - return ((sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set); + return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set; } int sockopt_level(sockopt_t sopt) { - return (sopt->sopt_level); + return sopt->sopt_level; } int sockopt_name(sockopt_t sopt) { - return (sopt->sopt_name); + return sopt->sopt_name; } size_t sockopt_valsize(sockopt_t sopt) { - return (sopt->sopt_valsize); + return sopt->sopt_valsize; } errno_t sockopt_copyin(sockopt_t sopt, void *data, size_t len) { - return (sooptcopyin(sopt, data, len, len)); + return sooptcopyin(sopt, data, len, len); } errno_t sockopt_copyout(sockopt_t sopt, void *data, size_t len) { - return (sooptcopyout(sopt, data, len)); + return sooptcopyout(sopt, data, len); } diff --git a/bsd/kern/mach_fat.c b/bsd/kern/mach_fat.c index 7af7c6580..ffb26e9bf 100644 --- a/bsd/kern/mach_fat.c +++ b/bsd/kern/mach_fat.c @@ -2,7 +2,7 @@ * Copyright (c) 1991-2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -40,42 +40,42 @@ #include /********************************************************************** - * Routine: fatfile_getarch() - * - * Function: Locate the architecture-dependant contents of a fat - * file that match this CPU. - * - * Args: header: A pointer to the fat file header. - * size: How large the fat file header is (including fat_arch array) - * req_cpu_type: The required cpu type. - * mask_bits: Bits to mask from the sub-image type when - * grading it vs. the req_cpu_type - * archret (out): Pointer to fat_arch structure to hold - * the results. - * - * Returns: KERN_SUCCESS: Valid architecture found. - * KERN_FAILURE: No valid architecture found. - **********************************************************************/ +* Routine: fatfile_getarch() +* +* Function: Locate the architecture-dependant contents of a fat +* file that match this CPU. +* +* Args: header: A pointer to the fat file header. +* size: How large the fat file header is (including fat_arch array) +* req_cpu_type: The required cpu type. +* mask_bits: Bits to mask from the sub-image type when +* grading it vs. the req_cpu_type +* archret (out): Pointer to fat_arch structure to hold +* the results. +* +* Returns: KERN_SUCCESS: Valid architecture found. +* KERN_FAILURE: No valid architecture found. +**********************************************************************/ static load_return_t fatfile_getarch( - vm_offset_t data_ptr, - vm_size_t data_size, - cpu_type_t req_cpu_type, - cpu_type_t mask_bits, - struct fat_arch *archret) + vm_offset_t data_ptr, + vm_size_t data_size, + cpu_type_t req_cpu_type, + cpu_type_t mask_bits, + struct fat_arch *archret) { - load_return_t lret; - struct fat_arch *arch; - struct fat_arch *best_arch; - int grade; - int best_grade; - uint32_t nfat_arch, max_nfat_arch; - cpu_type_t testtype; - cpu_type_t testsubtype; - struct fat_header *header; + load_return_t lret; + struct fat_arch *arch; + struct fat_arch *best_arch; + int grade; + int best_grade; + uint32_t nfat_arch, max_nfat_arch; + cpu_type_t testtype; + cpu_type_t testsubtype; + struct fat_header *header; if (sizeof(struct fat_header) > data_size) { - return (LOAD_FAILURE); + return LOAD_FAILURE; } header = (struct fat_header *)data_ptr; @@ -84,7 +84,7 @@ fatfile_getarch( max_nfat_arch = (data_size - sizeof(struct fat_header)) / sizeof(struct fat_arch); if (nfat_arch > max_nfat_arch) { /* nfat_arch would cause us to read off end of buffer */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } /* @@ -93,20 +93,20 @@ fatfile_getarch( best_grade = 0; arch = (struct fat_arch *) (data_ptr + sizeof(struct fat_header)); for (; nfat_arch-- > 0; arch++) { - testtype = OSSwapBigToHostInt32(arch->cputype); - testsubtype = OSSwapBigToHostInt32(arch->cpusubtype) & ~CPU_SUBTYPE_MASK; + testtype = OSSwapBigToHostInt32(arch->cputype); + testsubtype = OSSwapBigToHostInt32(arch->cpusubtype) & ~CPU_SUBTYPE_MASK; /* * Check to see if right cpu type. */ - if((testtype & ~mask_bits) != (req_cpu_type & ~mask_bits)) { + if ((testtype & ~mask_bits) != (req_cpu_type & ~mask_bits)) { continue; } /* - * Get the grade of the cpu subtype (without feature flags) + * Get the grade of the cpu subtype (without feature flags) */ - grade = grade_binary(testtype, testsubtype); + grade = grade_binary(testtype, testsubtype); /* * Remember it if it's the best we've seen. @@ -123,16 +123,16 @@ fatfile_getarch( if (best_arch == NULL) { lret = LOAD_BADARCH; } else { - archret->cputype = - OSSwapBigToHostInt32(best_arch->cputype); - archret->cpusubtype = - OSSwapBigToHostInt32(best_arch->cpusubtype); - archret->offset = - OSSwapBigToHostInt32(best_arch->offset); - archret->size = - OSSwapBigToHostInt32(best_arch->size); - archret->align = - OSSwapBigToHostInt32(best_arch->align); + archret->cputype = + OSSwapBigToHostInt32(best_arch->cputype); + archret->cpusubtype = + OSSwapBigToHostInt32(best_arch->cpusubtype); + archret->offset = + OSSwapBigToHostInt32(best_arch->offset); + archret->size = + OSSwapBigToHostInt32(best_arch->size); + archret->align = + OSSwapBigToHostInt32(best_arch->align); lret = LOAD_SUCCESS; } @@ -140,14 +140,14 @@ fatfile_getarch( /* * Free the memory we allocated and return. */ - return(lret); + return lret; } load_return_t fatfile_getbestarch( - vm_offset_t data_ptr, - vm_size_t data_size, - struct fat_arch *archret) + vm_offset_t data_ptr, + vm_size_t data_size, + struct fat_arch *archret) { /* * Ignore all architectural bits when determining if an image @@ -170,26 +170,26 @@ fatfile_getbestarch_for_cputype( } /********************************************************************** - * Routine: fatfile_getarch_with_bits() - * - * Function: Locate the architecture-dependant contents of a fat - * file that match this CPU. - * - * Args: vp: The vnode for the fat file. - * archbits: Architecture specific feature bits - * header: A pointer to the fat file header. - * archret (out): Pointer to fat_arch structure to hold - * the results. - * - * Returns: KERN_SUCCESS: Valid architecture found. - * KERN_FAILURE: No valid architecture found. - **********************************************************************/ +* Routine: fatfile_getarch_with_bits() +* +* Function: Locate the architecture-dependant contents of a fat +* file that match this CPU. +* +* Args: vp: The vnode for the fat file. +* archbits: Architecture specific feature bits +* header: A pointer to the fat file header. +* archret (out): Pointer to fat_arch structure to hold +* the results. +* +* Returns: KERN_SUCCESS: Valid architecture found. +* KERN_FAILURE: No valid architecture found. +**********************************************************************/ load_return_t fatfile_getarch_with_bits( - integer_t archbits, - vm_offset_t data_ptr, - vm_size_t data_size, - struct fat_arch *archret) + integer_t archbits, + vm_offset_t data_ptr, + vm_size_t data_size, + struct fat_arch *archret) { /* * Scan the fat_arch array for matches with the requested @@ -214,11 +214,11 @@ fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) uint32_t max_nfat_arch, i, j; uint32_t fat_header_size; - struct fat_arch *arches; - struct fat_header *header; + struct fat_arch *arches; + struct fat_header *header; if (sizeof(struct fat_header) > data_size) { - return (LOAD_FAILURE); + return LOAD_FAILURE; } header = (struct fat_header *)data_ptr; @@ -227,20 +227,20 @@ fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) if (magic != FAT_MAGIC) { /* must be FAT_MAGIC big endian */ - return (LOAD_FAILURE); + return LOAD_FAILURE; } max_nfat_arch = (data_size - sizeof(struct fat_header)) / sizeof(struct fat_arch); if (nfat_arch > max_nfat_arch) { /* nfat_arch would cause us to read off end of buffer */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } /* now that we know the fat_arch list fits in the buffer, how much does it use? */ fat_header_size = sizeof(struct fat_header) + nfat_arch * sizeof(struct fat_arch); arches = (struct fat_arch *)(data_ptr + sizeof(struct fat_header)); - for (i=0; i < nfat_arch; i++) { + for (i = 0; i < nfat_arch; i++) { uint32_t i_begin = OSSwapBigToHostInt32(arches[i].offset); uint32_t i_size = OSSwapBigToHostInt32(arches[i].size); uint32_t i_cputype = OSSwapBigToHostInt32(arches[i].cputype); @@ -248,16 +248,16 @@ fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) if (i_begin < fat_header_size) { /* slice is trying to claim part of the file used by fat headers themselves */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } if ((UINT32_MAX - i_size) < i_begin) { /* start + size would overflow */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } uint32_t i_end = i_begin + i_size; - for (j=i+1; j < nfat_arch; j++) { + for (j = i + 1; j < nfat_arch; j++) { uint32_t j_begin = OSSwapBigToHostInt32(arches[j].offset); uint32_t j_size = OSSwapBigToHostInt32(arches[j].size); uint32_t j_cputype = OSSwapBigToHostInt32(arches[j].cputype); @@ -265,12 +265,12 @@ fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) if ((i_cputype == j_cputype) && (i_cpusubtype == j_cpusubtype)) { /* duplicate cputype/cpusubtype, results in ambiguous references */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } if ((UINT32_MAX - j_size) < j_begin) { /* start + size would overflow */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } uint32_t j_end = j_begin + j_size; @@ -279,18 +279,18 @@ fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) /* I completely precedes J */ } else { /* I started before J, but ends somewhere in or after J */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } } else { if (i_begin >= j_end) { /* I started after J started but also after J ended */ } else { /* I started after J started but before it ended, so there is overlap */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } } } } - return (LOAD_SUCCESS); + return LOAD_SUCCESS; } diff --git a/bsd/kern/mach_fat.h b/bsd/kern/mach_fat.h index def48fffd..6d108d1ec 100644 --- a/bsd/kern/mach_fat.h +++ b/bsd/kern/mach_fat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,8 +38,8 @@ load_return_t fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_si load_return_t fatfile_getbestarch(vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); load_return_t fatfile_getbestarch_for_cputype(cpu_type_t cputype, - vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); + vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); load_return_t fatfile_getarch_with_bits(integer_t archbits, - vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); + vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); #endif /* _BSD_KERN_MACH_FAT_H_ */ diff --git a/bsd/kern/mach_loader.c b/bsd/kern/mach_loader.c index 18d2239ba..82ee10f0f 100644 --- a/bsd/kern/mach_loader.c +++ b/bsd/kern/mach_loader.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -55,8 +55,8 @@ #include #include -#include /* vm_allocate() */ -#include /* mach_vm_allocate() */ +#include /* vm_allocate() */ +#include /* mach_vm_allocate() */ #include #include #include @@ -84,7 +84,7 @@ #include #include #include -#include /* for kIOReturnNotPrivileged */ +#include /* for kIOReturnNotPrivileged */ #include @@ -96,14 +96,14 @@ extern int bootarg_no32exec; /* bsd_init.c */ * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE * when KERNEL is defined. */ -extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t size, - boolean_t is_64bit); +extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t size, + boolean_t is_64bit); /* XXX should have prototypes in a shared header file */ -extern int get_map_nentries(vm_map_t); +extern int get_map_nentries(vm_map_t); -extern kern_return_t memory_object_signed(memory_object_control_t control, - boolean_t is_signed); +extern kern_return_t memory_object_signed(memory_object_control_t control, + boolean_t is_signed); /* An empty load_result_t */ static const load_result_t load_result_null = { @@ -123,6 +123,7 @@ static const load_result_t load_result_null = { .using_lcmain = 0, .is_64bit_addr = 0, .is_64bit_data = 0, + .custom_stack = 0, .csflags = 0, .has_pagezero = 0, .uuid = { 0 }, @@ -138,130 +139,130 @@ static const load_result_t load_result_null = { */ static load_return_t parse_machfile( - struct vnode *vp, - vm_map_t map, - thread_t thread, - struct mach_header *header, - off_t file_offset, - off_t macho_size, - int depth, - int64_t slide, - int64_t dyld_slide, - load_result_t *result, - load_result_t *binresult, - struct image_params *imgp -); + struct vnode *vp, + vm_map_t map, + thread_t thread, + struct mach_header *header, + off_t file_offset, + off_t macho_size, + int depth, + int64_t slide, + int64_t dyld_slide, + load_result_t *result, + load_result_t *binresult, + struct image_params *imgp + ); static load_return_t load_segment( - struct load_command *lcp, - uint32_t filetype, - void *control, - off_t pager_offset, - off_t macho_size, - struct vnode *vp, - vm_map_t map, - int64_t slide, - load_result_t *result -); + struct load_command *lcp, + uint32_t filetype, + void *control, + off_t pager_offset, + off_t macho_size, + struct vnode *vp, + vm_map_t map, + int64_t slide, + load_result_t *result + ); static load_return_t load_uuid( - struct uuid_command *uulp, - char *command_end, - load_result_t *result -); + struct uuid_command *uulp, + char *command_end, + load_result_t *result + ); static load_return_t load_code_signature( - struct linkedit_data_command *lcp, - struct vnode *vp, - off_t macho_offset, - off_t macho_size, - cpu_type_t cputype, - load_result_t *result, - struct image_params *imgp); - + struct linkedit_data_command *lcp, + struct vnode *vp, + off_t macho_offset, + off_t macho_size, + cpu_type_t cputype, + load_result_t *result, + struct image_params *imgp); + #if CONFIG_CODE_DECRYPTION static load_return_t set_code_unprotect( - struct encryption_info_command *lcp, - caddr_t addr, - vm_map_t map, - int64_t slide, - struct vnode *vp, - off_t macho_offset, - cpu_type_t cputype, - cpu_subtype_t cpusubtype); + struct encryption_info_command *lcp, + caddr_t addr, + vm_map_t map, + int64_t slide, + struct vnode *vp, + off_t macho_offset, + cpu_type_t cputype, + cpu_subtype_t cpusubtype); #endif static load_return_t load_main( - struct entry_point_command *epc, - thread_t thread, - int64_t slide, - load_result_t *result -); + struct entry_point_command *epc, + thread_t thread, + int64_t slide, + load_result_t *result + ); static load_return_t load_unixthread( - struct thread_command *tcp, - thread_t thread, - int64_t slide, - load_result_t *result -); + struct thread_command *tcp, + thread_t thread, + int64_t slide, + load_result_t *result + ); static load_return_t load_threadstate( - thread_t thread, - uint32_t *ts, - uint32_t total_size, + thread_t thread, + uint32_t *ts, + uint32_t total_size, load_result_t * -); + ); static load_return_t load_threadstack( - thread_t thread, - uint32_t *ts, - uint32_t total_size, - mach_vm_offset_t *user_stack, - int *customstack, - load_result_t *result -); + thread_t thread, + uint32_t *ts, + uint32_t total_size, + mach_vm_offset_t *user_stack, + int *customstack, + load_result_t *result + ); static load_return_t load_threadentry( - thread_t thread, - uint32_t *ts, - uint32_t total_size, - mach_vm_offset_t *entry_point -); + thread_t thread, + uint32_t *ts, + uint32_t total_size, + mach_vm_offset_t *entry_point + ); static load_return_t load_dylinker( - struct dylinker_command *lcp, - integer_t archbits, - vm_map_t map, - thread_t thread, - int depth, - int64_t slide, - load_result_t *result, - struct image_params *imgp -); + struct dylinker_command *lcp, + integer_t archbits, + vm_map_t map, + thread_t thread, + int depth, + int64_t slide, + load_result_t *result, + struct image_params *imgp + ); struct macho_data; static load_return_t get_macho_vnode( - const char *path, - integer_t archbits, - struct mach_header *mach_header, - off_t *file_offset, - off_t *macho_size, - struct macho_data *macho_data, - struct vnode **vpp -); + const char *path, + integer_t archbits, + struct mach_header *mach_header, + off_t *file_offset, + off_t *macho_size, + struct macho_data *macho_data, + struct vnode **vpp + ); static inline void widen_segment_command(const struct segment_command *scp32, @@ -291,9 +292,10 @@ note_all_image_info_section(const struct segment_command_64 *scp, } *sectionp; unsigned int i; - - if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) + + if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) { return; + } for (i = 0; i < scp->nsects; ++i) { sectionp = (const void *) ((const char *)sections + section_size * i); @@ -321,33 +323,33 @@ const int fourk_binary_compatibility_allow_wx = FALSE; load_return_t load_machfile( - struct image_params *imgp, - struct mach_header *header, - thread_t thread, - vm_map_t *mapp, - load_result_t *result -) + struct image_params *imgp, + struct mach_header *header, + thread_t thread, + vm_map_t *mapp, + load_result_t *result + ) { - struct vnode *vp = imgp->ip_vp; - off_t file_offset = imgp->ip_arch_offset; - off_t macho_size = imgp->ip_arch_size; - off_t file_size = imgp->ip_vattr->va_data_size; - pmap_t pmap = 0; /* protected by create_map */ - vm_map_t map; - load_result_t myresult; - load_return_t lret; + struct vnode *vp = imgp->ip_vp; + off_t file_offset = imgp->ip_arch_offset; + off_t macho_size = imgp->ip_arch_size; + off_t file_size = imgp->ip_vattr->va_data_size; + pmap_t pmap = 0; /* protected by create_map */ + vm_map_t map; + load_result_t myresult; + load_return_t lret; boolean_t enforce_hard_pagezero = TRUE; int in_exec = (imgp->ip_flags & IMGPF_EXEC); task_t task = current_task(); proc_t p = current_proc(); - int64_t aslr_page_offset = 0; - int64_t dyld_aslr_page_offset = 0; - int64_t aslr_section_size = 0; - int64_t aslr_section_offset = 0; - kern_return_t kret; + int64_t aslr_page_offset = 0; + int64_t dyld_aslr_page_offset = 0; + int64_t aslr_section_size = 0; + int64_t aslr_section_offset = 0; + kern_return_t kret; if (macho_size > file_size) { - return(LOAD_BADMACHO); + return LOAD_BADMACHO; } result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR); @@ -360,12 +362,12 @@ load_machfile( ledger_task = task; } pmap = pmap_create(get_task_ledger(ledger_task), - (vm_map_size_t) 0, - result->is_64bit_addr); + (vm_map_size_t) 0, + result->is_64bit_addr); map = vm_map_create(pmap, - 0, - vm_compute_max_offset(result->is_64bit_addr), - TRUE); + 0, + vm_compute_max_offset(result->is_64bit_addr), + TRUE); #if defined(__arm64__) if (result->is_64bit_addr) { @@ -379,22 +381,23 @@ load_machfile( vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT); #endif /* __arm64__ */ -#ifndef CONFIG_ENFORCE_SIGNED_CODE +#ifndef CONFIG_ENFORCE_SIGNED_CODE /* This turns off faulting for executable pages, which allows * to circumvent Code Signing Enforcement. The per process * flag (CS_ENFORCEMENT) is not set yet, but we can use the * global flag. */ - if ( !cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION) ) { - vm_map_disable_NX(map); - // TODO: Message Trace or log that this is happening + if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) { + vm_map_disable_NX(map); + // TODO: Message Trace or log that this is happening } #endif /* Forcibly disallow execution from data pages on even if the arch * normally permits it. */ - if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) + if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) { vm_map_disallow_data_exec(map); + } /* * Compute a random offset for ASLR, and an independent random offset for dyld. @@ -414,8 +417,9 @@ load_machfile( aslr_page_offset += aslr_section_offset; } - if (!result) + if (!result) { result = &myresult; + } *result = load_result_null; @@ -426,12 +430,12 @@ load_machfile( result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA); lret = parse_machfile(vp, map, thread, header, file_offset, macho_size, - 0, aslr_page_offset, dyld_aslr_page_offset, result, - NULL, imgp); + 0, aslr_page_offset, dyld_aslr_page_offset, result, + NULL, imgp); if (lret != LOAD_SUCCESS) { - vm_map_deallocate(map); /* will lose pmap reference too */ - return(lret); + vm_map_deallocate(map); /* will lose pmap reference too */ + return lret; } #if __x86_64__ @@ -454,24 +458,24 @@ load_machfile( vm_map_offset_t high_start; random_bits = random(); - random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT)-1; + random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1; high_start = (((vm_map_offset_t)random_bits) - << VM_MAP_HIGH_START_BITS_SHIFT); + << VM_MAP_HIGH_START_BITS_SHIFT); vm_map_set_high_start(map, high_start); } #endif /* __x86_64__ */ /* * Check to see if the page zero is enforced by the map->min_offset. - */ + */ if (enforce_hard_pagezero && (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) { #if __arm64__ if (!result->is_64bit_addr && /* not 64-bit address space */ - !(header->flags & MH_PIE) && /* not PIE */ + !(header->flags & MH_PIE) && /* not PIE */ (vm_map_page_shift(map) != FOURK_PAGE_SHIFT || - PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ - result->has_pagezero && /* has a "soft" page zero */ + PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ + result->has_pagezero && /* has a "soft" page zero */ fourk_binary_compatibility_unsafe) { /* * For backwards compatibility of "4K" apps on @@ -480,8 +484,8 @@ load_machfile( } else #endif /* __arm64__ */ { - vm_map_deallocate(map); /* will lose pmap reference too */ - return (LOAD_BADMACHO); + vm_map_deallocate(map); /* will lose pmap reference too */ + return LOAD_BADMACHO; } } @@ -514,8 +518,8 @@ load_machfile( */ kret = task_start_halt(task); if (kret != KERN_SUCCESS) { - vm_map_deallocate(map); /* will lose pmap reference too */ - return (LOAD_FAILURE); + vm_map_deallocate(map); /* will lose pmap reference too */ + return LOAD_FAILURE; } proc_transcommit(p, 0); workq_mark_exiting(p); @@ -543,15 +547,15 @@ load_machfile( } #endif /* CONFIG_32BIT_TELEMETRY */ - return(LOAD_SUCCESS); + return LOAD_SUCCESS; } int macho_printf = 0; -#define MACHO_PRINTF(args) \ - do { \ - if (macho_printf) { \ - printf args; \ - } \ +#define MACHO_PRINTF(args) \ + do { \ + if (macho_printf) { \ + printf args; \ + } \ } while (0) /* @@ -568,62 +572,62 @@ int macho_printf = 0; static load_return_t parse_machfile( - struct vnode *vp, - vm_map_t map, - thread_t thread, - struct mach_header *header, - off_t file_offset, - off_t macho_size, - int depth, - int64_t aslr_offset, - int64_t dyld_aslr_offset, - load_result_t *result, - load_result_t *binresult, - struct image_params *imgp -) + struct vnode *vp, + vm_map_t map, + thread_t thread, + struct mach_header *header, + off_t file_offset, + off_t macho_size, + int depth, + int64_t aslr_offset, + int64_t dyld_aslr_offset, + load_result_t *result, + load_result_t *binresult, + struct image_params *imgp + ) { - uint32_t ncmds; - struct load_command *lcp; - struct dylinker_command *dlp = 0; - integer_t dlarchbits = 0; - void * control; - load_return_t ret = LOAD_SUCCESS; - void * addr; - vm_size_t alloc_size, cmds_size; - size_t offset; - size_t oldoffset; /* for overflow check */ - int pass; - proc_t p = current_proc(); /* XXXX */ - int error; - int resid = 0; - size_t mach_header_sz = sizeof(struct mach_header); - boolean_t abi64; - boolean_t got_code_signatures = FALSE; - boolean_t found_header_segment = FALSE; - boolean_t found_xhdr = FALSE; - int64_t slide = 0; - boolean_t dyld_no_load_addr = FALSE; - boolean_t is_dyld = FALSE; - vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); + uint32_t ncmds; + struct load_command *lcp; + struct dylinker_command *dlp = 0; + integer_t dlarchbits = 0; + void * control; + load_return_t ret = LOAD_SUCCESS; + void * addr; + vm_size_t alloc_size, cmds_size; + size_t offset; + size_t oldoffset; /* for overflow check */ + int pass; + proc_t p = current_proc(); /* XXXX */ + int error; + int resid = 0; + size_t mach_header_sz = sizeof(struct mach_header); + boolean_t abi64; + boolean_t got_code_signatures = FALSE; + boolean_t found_header_segment = FALSE; + boolean_t found_xhdr = FALSE; + int64_t slide = 0; + boolean_t dyld_no_load_addr = FALSE; + boolean_t is_dyld = FALSE; + vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map)); #if __arm64__ - uint32_t pagezero_end = 0; - uint32_t executable_end = 0; - uint32_t writable_start = 0; - vm_map_size_t effective_page_size; + uint32_t pagezero_end = 0; + uint32_t executable_end = 0; + uint32_t writable_start = 0; + vm_map_size_t effective_page_size; effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map)); #endif /* __arm64__ */ if (header->magic == MH_MAGIC_64 || header->magic == MH_CIGAM_64) { - mach_header_sz = sizeof(struct mach_header_64); + mach_header_sz = sizeof(struct mach_header_64); } /* * Break infinite recursion */ if (depth > 1) { - return(LOAD_FAILURE); + return LOAD_FAILURE; } depth++; @@ -633,34 +637,34 @@ parse_machfile( */ if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) || !grade_binary(header->cputype, - header->cpusubtype & ~CPU_SUBTYPE_MASK)) - return(LOAD_BADARCH); + header->cpusubtype & ~CPU_SUBTYPE_MASK)) { + return LOAD_BADARCH; + } #if __x86_64__ if (bootarg_no32exec && (header->cputype == CPU_TYPE_X86)) { - return(LOAD_BADARCH_X86); + return LOAD_BADARCH_X86; } #endif abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64); switch (header->filetype) { - case MH_EXECUTE: if (depth != 1) { - return (LOAD_FAILURE); + return LOAD_FAILURE; } #if CONFIG_EMBEDDED if (header->flags & MH_DYLDLINK) { /* Check properties of dynamic executables */ if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) { - return (LOAD_FAILURE); + return LOAD_FAILURE; } result->needs_dynlinker = TRUE; } else { /* Check properties of static executables (disallowed except for development) */ #if !(DEVELOPMENT || DEBUG) - return (LOAD_FAILURE); + return LOAD_FAILURE; #endif } #endif /* CONFIG_EMBEDDED */ @@ -668,13 +672,13 @@ parse_machfile( break; case MH_DYLINKER: if (depth != 2) { - return (LOAD_FAILURE); + return LOAD_FAILURE; } is_dyld = TRUE; break; - + default: - return (LOAD_FAILURE); + return LOAD_FAILURE; } /* @@ -684,8 +688,8 @@ parse_machfile( /* ensure header + sizeofcmds falls within the file */ if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || - (off_t)cmds_size > macho_size || - round_page_overflow(cmds_size, &alloc_size)) { + (off_t)cmds_size > macho_size || + round_page_overflow(cmds_size, &alloc_size)) { return LOAD_BADMACHO; } @@ -734,15 +738,14 @@ parse_machfile( #endif for (pass = 0; pass <= 3; pass++) { - if (pass == 0 && !slide_realign && !is_dyld) { /* if we dont need to realign the slide or determine dyld's load * address, pass 0 can be skipped */ continue; } else if (pass == 1) { #if __arm64__ - boolean_t is_pie; - int64_t adjust; + boolean_t is_pie; + int64_t adjust; is_pie = ((header->flags & MH_PIE) != 0); if (pagezero_end != 0 && @@ -750,14 +753,14 @@ parse_machfile( /* need at least 1 page for PAGEZERO */ adjust = effective_page_size; MACHO_PRINTF(("pagezero boundary at " - "0x%llx; adjust slide from " - "0x%llx to 0x%llx%s\n", - (uint64_t) pagezero_end, - slide, - slide + adjust, - (is_pie - ? "" - : " BUT NO PIE ****** :-("))); + "0x%llx; adjust slide from " + "0x%llx to 0x%llx%s\n", + (uint64_t) pagezero_end, + slide, + slide + adjust, + (is_pie + ? "" + : " BUT NO PIE ****** :-("))); if (is_pie) { slide += adjust; pagezero_end += adjust; @@ -768,10 +771,9 @@ parse_machfile( if (pagezero_end != 0) { result->has_pagezero = TRUE; } - if (executable_end == writable_start && + if (executable_end == writable_start && (executable_end & effective_page_mask) != 0 && (executable_end & FOURK_PAGE_MASK) == 0) { - /* * The TEXT/DATA boundary is 4K-aligned but * not page-aligned. Adjust the slide to make @@ -779,19 +781,20 @@ parse_machfile( * with both write and execute permissions. */ adjust = - (effective_page_size - - (executable_end & effective_page_mask)); + (effective_page_size - + (executable_end & effective_page_mask)); MACHO_PRINTF(("page-unaligned X-W boundary at " - "0x%llx; adjust slide from " - "0x%llx to 0x%llx%s\n", - (uint64_t) executable_end, - slide, - slide + adjust, - (is_pie - ? "" - : " BUT NO PIE ****** :-("))); - if (is_pie) + "0x%llx; adjust slide from " + "0x%llx to 0x%llx%s\n", + (uint64_t) executable_end, + slide, + slide + adjust, + (is_pie + ? "" + : " BUT NO PIE ****** :-("))); + if (is_pie) { slide += adjust; + } } #endif /* __arm64__ */ @@ -807,7 +810,7 @@ parse_machfile( /* * Check that the entry point is contained in an executable segments - */ + */ if ((pass == 3) && (!result->using_lcmain && result->validentry == 0)) { thread_state_initialize(thread); ret = LOAD_FAILURE; @@ -833,7 +836,6 @@ parse_machfile( ncmds = header->ncmds; while (ncmds--) { - /* ensure enough space for a minimal load command */ if (offset + sizeof(struct load_command) > cmds_size) { ret = LOAD_BADMACHO; @@ -855,8 +857,8 @@ parse_machfile( * start of the image. */ if (os_add_overflow(offset, lcp->cmdsize, &offset) || - lcp->cmdsize < sizeof(struct load_command) || - offset > cmds_size) { + lcp->cmdsize < sizeof(struct load_command) || + offset > cmds_size) { ret = LOAD_BADMACHO; break; } @@ -865,7 +867,7 @@ parse_machfile( * Act on struct load_command's for which kernel * intervention is required. */ - switch(lcp->cmd) { + switch (lcp->cmd) { case LC_SEGMENT: { struct segment_command *scp = (struct segment_command *) lcp; if (pass == 0) { @@ -909,8 +911,9 @@ parse_machfile( found_xhdr = TRUE; } - if (pass != 2) + if (pass != 2) { break; + } if (abi64) { /* @@ -922,19 +925,19 @@ parse_machfile( } ret = load_segment(lcp, - header->filetype, - control, - file_offset, - macho_size, - vp, - map, - slide, - result); + header->filetype, + control, + file_offset, + macho_size, + vp, + map, + slide, + result); if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) { /* Enforce a single segment mapping offset zero, with R+X * protection. */ if (found_header_segment || - ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) != (VM_PROT_READ|VM_PROT_EXECUTE))) { + ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) { ret = LOAD_BADMACHO; break; } @@ -960,8 +963,9 @@ parse_machfile( found_xhdr = TRUE; } - if (pass != 2) + if (pass != 2) { break; + } if (!abi64) { /* @@ -973,20 +977,20 @@ parse_machfile( } ret = load_segment(lcp, - header->filetype, - control, - file_offset, - macho_size, - vp, - map, - slide, - result); + header->filetype, + control, + file_offset, + macho_size, + vp, + map, + slide, + result); if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) { /* Enforce a single segment mapping offset zero, with R+X * protection. */ if (found_header_segment || - ((scp64->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) != (VM_PROT_READ|VM_PROT_EXECUTE))) { + ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) { ret = LOAD_BADMACHO; break; } @@ -996,28 +1000,32 @@ parse_machfile( break; } case LC_UNIXTHREAD: - if (pass != 1) + if (pass != 1) { break; + } ret = load_unixthread( - (struct thread_command *) lcp, - thread, - slide, - result); + (struct thread_command *) lcp, + thread, + slide, + result); break; case LC_MAIN: - if (pass != 1) + if (pass != 1) { break; - if (depth != 1) + } + if (depth != 1) { break; + } ret = load_main( - (struct entry_point_command *) lcp, - thread, - slide, - result); + (struct entry_point_command *) lcp, + thread, + slide, + result); break; case LC_LOAD_DYLINKER: - if (pass != 3) + if (pass != 3) { break; + } if ((depth == 1) && (dlp == 0)) { dlp = (struct dylinker_command *)lcp; dlarchbits = (header->cputype & CPU_ARCH_MASK); @@ -1028,18 +1036,19 @@ parse_machfile( case LC_UUID: if (pass == 1 && depth == 1) { ret = load_uuid((struct uuid_command *) lcp, - (char *)addr + cmds_size, - result); + (char *)addr + cmds_size, + result); } break; case LC_CODE_SIGNATURE: /* CODE SIGNING */ - if (pass != 1) + if (pass != 1) { break; + } /* pager -> uip -> - load signatures & store in uip - set VM object "signed_pages" - */ + * load signatures & store in uip + * set VM object "signed_pages" + */ ret = load_code_signature( (struct linkedit_data_command *) lcp, vp, @@ -1050,14 +1059,14 @@ parse_machfile( imgp); if (ret != LOAD_SUCCESS) { printf("proc %d: load code signature error %d " - "for file \"%s\"\n", - p->p_pid, ret, vp->v_name); + "for file \"%s\"\n", + p->p_pid, ret, vp->v_name); /* * Allow injections to be ignored on devices w/o enforcement enabled */ - if (!cs_process_global_enforcement()) - ret = LOAD_SUCCESS; /* ignore error */ - + if (!cs_process_global_enforcement()) { + ret = LOAD_SUCCESS; /* ignore error */ + } } else { got_code_signatures = TRUE; } @@ -1068,29 +1077,31 @@ parse_machfile( vm_size_t off = 0; - if (cs_debug > 10) + if (cs_debug > 10) { printf("validating initial pages of %s\n", vp->v_name); - + } + while (off < alloc_size && ret == LOAD_SUCCESS) { - tainted = CS_VALIDATE_TAINTED; - - valid = cs_validate_range(vp, - NULL, - file_offset + off, - addr + off, - PAGE_SIZE, - &tainted); - if (!valid || (tainted & CS_VALIDATE_TAINTED)) { - if (cs_debug) - printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n", + tainted = CS_VALIDATE_TAINTED; + + valid = cs_validate_range(vp, + NULL, + file_offset + off, + addr + off, + PAGE_SIZE, + &tainted); + if (!valid || (tainted & CS_VALIDATE_TAINTED)) { + if (cs_debug) { + printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n", vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags); - if (cs_process_global_enforcement() || - (result->csflags & (CS_HARD|CS_KILL|CS_ENFORCEMENT))) { - ret = LOAD_FAILURE; - } - result->csflags &= ~CS_VALID; - } - off += PAGE_SIZE; + } + if (cs_process_global_enforcement() || + (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) { + ret = LOAD_FAILURE; + } + result->csflags &= ~CS_VALID; + } + off += PAGE_SIZE; } } @@ -1098,8 +1109,9 @@ parse_machfile( #if CONFIG_CODE_DECRYPTION case LC_ENCRYPTION_INFO: case LC_ENCRYPTION_INFO_64: - if (pass != 3) + if (pass != 3) { break; + } ret = set_code_unprotect( (struct encryption_info_command *) lcp, addr, map, slide, vp, file_offset, @@ -1107,32 +1119,31 @@ parse_machfile( if (ret != LOAD_SUCCESS) { os_reason_t load_failure_reason = OS_REASON_NULL; printf("proc %d: set_code_unprotect() error %d " - "for file \"%s\"\n", - p->p_pid, ret, vp->v_name); - /* - * Don't let the app run if it's + "for file \"%s\"\n", + p->p_pid, ret, vp->v_name); + /* + * Don't let the app run if it's * encrypted but we failed to set up the * decrypter. If the keys are missing it will * return LOAD_DECRYPTFAIL. */ - if (ret == LOAD_DECRYPTFAIL) { + if (ret == LOAD_DECRYPTFAIL) { /* failed to load due to missing FP keys */ proc_lock(p); p->p_lflag |= P_LTERM_DECRYPTFAIL; proc_unlock(p); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0); load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT); - } else { - + } else { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0); + p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0); load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT); - } + } - assert(load_failure_reason != OS_REASON_NULL); - psignal_with_reason(p, SIGKILL, load_failure_reason); + assert(load_failure_reason != OS_REASON_NULL); + psignal_with_reason(p, SIGKILL, load_failure_reason); } break; #endif @@ -1157,15 +1168,17 @@ parse_machfile( ret = LOAD_SUCCESS; break; } - if (ret != LOAD_SUCCESS) + if (ret != LOAD_SUCCESS) { break; + } } - if (ret != LOAD_SUCCESS) + if (ret != LOAD_SUCCESS) { break; + } } if (ret == LOAD_SUCCESS) { - if(!got_code_signatures && cs_process_global_enforcement()) { + if (!got_code_signatures && cs_process_global_enforcement()) { ret = LOAD_FAILURE; } @@ -1180,7 +1193,7 @@ parse_machfile( * offset regardless of the PIE-ness of the main binary. */ ret = load_dylinker(dlp, dlarchbits, map, thread, depth, - dyld_aslr_offset, result, imgp); + dyld_aslr_offset, result, imgp); } if ((ret == LOAD_SUCCESS) && (depth == 1)) { @@ -1192,7 +1205,7 @@ parse_machfile( ret = LOAD_FAILURE; } #endif - } + } } if (ret == LOAD_BADMACHO && found_xhdr) { @@ -1206,19 +1219,19 @@ parse_machfile( #if CONFIG_CODE_DECRYPTION -#define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096) +#define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096) static load_return_t unprotect_dsmos_segment( - uint64_t file_off, - uint64_t file_size, - struct vnode *vp, - off_t macho_offset, - vm_map_t map, - vm_map_offset_t map_addr, - vm_map_size_t map_size) + uint64_t file_off, + uint64_t file_size, + struct vnode *vp, + off_t macho_offset, + vm_map_t map, + vm_map_offset_t map_addr, + vm_map_size_t map_size) { - kern_return_t kr; + kern_return_t kr; /* * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of @@ -1235,7 +1248,7 @@ unprotect_dsmos_segment( * We start mapping in the unprotected area. * Skip the unprotected part... */ - vm_map_offset_t delta; + vm_map_offset_t delta; delta = APPLE_UNPROTECTED_HEADER_SIZE; delta -= file_off; @@ -1256,26 +1269,25 @@ unprotect_dsmos_segment( struct proc *p; p = current_proc(); printf("APPLE_PROTECT: %d[%s] map %p " - "[0x%llx:0x%llx] %s(%s)\n", - p->p_pid, p->p_comm, map, - (uint64_t) map_addr, - (uint64_t) (map_addr + map_size), - __FUNCTION__, vp->v_name); + "[0x%llx:0x%llx] %s(%s)\n", + p->p_pid, p->p_comm, map, + (uint64_t) map_addr, + (uint64_t) (map_addr + map_size), + __FUNCTION__, vp->v_name); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ /* The DSMOS pager can only be used by apple signed code */ struct cs_blob * blob = csvnode_get_blob(vp, file_off); - if( blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) - { + if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) { return LOAD_FAILURE; } kr = vm_map_apple_protected(map, - map_addr, - map_addr + map_size, - crypto_backing_offset, - &crypt_info); + map_addr, + map_addr + map_size, + crypto_backing_offset, + &crypt_info); } if (kr != KERN_SUCCESS) { @@ -1283,32 +1295,32 @@ unprotect_dsmos_segment( } return LOAD_SUCCESS; } -#else /* CONFIG_CODE_DECRYPTION */ +#else /* CONFIG_CODE_DECRYPTION */ static load_return_t unprotect_dsmos_segment( - __unused uint64_t file_off, - __unused uint64_t file_size, - __unused struct vnode *vp, - __unused off_t macho_offset, - __unused vm_map_t map, - __unused vm_map_offset_t map_addr, - __unused vm_map_size_t map_size) + __unused uint64_t file_off, + __unused uint64_t file_size, + __unused struct vnode *vp, + __unused off_t macho_offset, + __unused vm_map_t map, + __unused vm_map_offset_t map_addr, + __unused vm_map_size_t map_size) { return LOAD_SUCCESS; } -#endif /* CONFIG_CODE_DECRYPTION */ +#endif /* CONFIG_CODE_DECRYPTION */ /* * map_segment: * Maps a Mach-O segment, taking care of mis-alignment (wrt the system * page size) issues. - * + * * The mapping might result in 1, 2 or 3 map entries: - * 1. for the first page, which could be overlap with the previous - * mapping, - * 2. for the center (if applicable), - * 3. for the last page, which could overlap with the next mapping. + * 1. for the first page, which could be overlap with the previous + * mapping, + * 2. for the center (if applicable), + * 3. for the last page, which could overlap with the next mapping. * * For each of those map entries, we might have to interpose a * "fourk_pager" to deal with mis-alignment wrt the system page size, @@ -1320,21 +1332,21 @@ unprotect_dsmos_segment( */ static kern_return_t map_segment( - vm_map_t map, - vm_map_offset_t vm_start, - vm_map_offset_t vm_end, - memory_object_control_t control, - vm_map_offset_t file_start, - vm_map_offset_t file_end, - vm_prot_t initprot, - vm_prot_t maxprot, - load_result_t *result) + vm_map_t map, + vm_map_offset_t vm_start, + vm_map_offset_t vm_end, + memory_object_control_t control, + vm_map_offset_t file_start, + vm_map_offset_t file_end, + vm_prot_t initprot, + vm_prot_t maxprot, + load_result_t *result) { - vm_map_offset_t cur_offset, cur_start, cur_end; - kern_return_t ret; - vm_map_offset_t effective_page_mask; + vm_map_offset_t cur_offset, cur_start, cur_end; + kern_return_t ret; + vm_map_offset_t effective_page_mask; vm_map_kernel_flags_t vmk_flags, cur_vmk_flags; - + if (vm_end < vm_start || file_end < file_start) { return LOAD_BADMACHO; @@ -1359,11 +1371,11 @@ map_segment( vmk_flags.vmkf_fourk = TRUE; #else /* __arm64__ */ panic("map_segment: unexpected mis-alignment " - "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n", - (uint64_t) vm_start, - (uint64_t) vm_end, - (uint64_t) file_start, - (uint64_t) file_end); + "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n", + (uint64_t) vm_start, + (uint64_t) vm_end, + (uint64_t) file_start, + (uint64_t) file_end); #endif /* __arm64__ */ } @@ -1407,7 +1419,7 @@ map_segment( VM_INHERIT_DEFAULT); } if (ret != KERN_SUCCESS) { - return (LOAD_NOSPACE); + return LOAD_NOSPACE; } cur_offset += cur_end - cur_start; } @@ -1418,7 +1430,7 @@ map_segment( } if (vm_map_round_page(cur_end, effective_page_mask) >= vm_map_trunc_page(vm_start + (file_end - file_start), - effective_page_mask)) { + effective_page_mask)) { /* no middle */ } else { cur_start = cur_end; @@ -1448,8 +1460,8 @@ map_segment( #endif /* CONFIG_EMBEDDED */ cur_end = vm_map_trunc_page(vm_start + (file_end - - file_start), - effective_page_mask); + file_start), + effective_page_mask); if (control != MEMORY_OBJECT_CONTROL_NULL) { ret = vm_map_enter_mem_object_control( map, @@ -1480,7 +1492,7 @@ map_segment( VM_INHERIT_DEFAULT); } if (ret != KERN_SUCCESS) { - return (LOAD_NOSPACE); + return LOAD_NOSPACE; } cur_offset += cur_end - cur_start; } @@ -1491,7 +1503,7 @@ map_segment( cur_start = cur_end; #if __arm64__ if (!vm_map_page_aligned(vm_start + (file_end - file_start), - effective_page_mask)) { + effective_page_mask)) { /* one 4K pager for the last page */ cur_end = vm_start + (file_end - file_start); if (control != MEMORY_OBJECT_CONTROL_NULL) { @@ -1524,7 +1536,7 @@ map_segment( VM_INHERIT_DEFAULT); } if (ret != KERN_SUCCESS) { - return (LOAD_NOSPACE); + return LOAD_NOSPACE; } cur_offset += cur_end - cur_start; } @@ -1537,34 +1549,34 @@ done: static load_return_t load_segment( - struct load_command *lcp, - uint32_t filetype, - void * control, - off_t pager_offset, - off_t macho_size, - struct vnode *vp, - vm_map_t map, - int64_t slide, - load_result_t *result) + struct load_command *lcp, + uint32_t filetype, + void * control, + off_t pager_offset, + off_t macho_size, + struct vnode *vp, + vm_map_t map, + int64_t slide, + load_result_t *result) { struct segment_command_64 segment_command, *scp; - kern_return_t ret; - vm_map_size_t delta_size; - vm_prot_t initprot; - vm_prot_t maxprot; - size_t segment_command_size, total_section_size, - single_section_size; - vm_map_offset_t file_offset, file_size; - vm_map_offset_t vm_offset, vm_size; - vm_map_offset_t vm_start, vm_end, vm_end_aligned; - vm_map_offset_t file_start, file_end; - kern_return_t kr; - boolean_t verbose; - vm_map_size_t effective_page_size; - vm_map_offset_t effective_page_mask; + kern_return_t ret; + vm_map_size_t delta_size; + vm_prot_t initprot; + vm_prot_t maxprot; + size_t segment_command_size, total_section_size, + single_section_size; + vm_map_offset_t file_offset, file_size; + vm_map_offset_t vm_offset, vm_size; + vm_map_offset_t vm_start, vm_end, vm_end_aligned; + vm_map_offset_t file_start, file_end; + kern_return_t kr; + boolean_t verbose; + vm_map_size_t effective_page_size; + vm_map_offset_t effective_page_mask; #if __arm64__ - vm_map_kernel_flags_t vmk_flags; - boolean_t fourk_align; + vm_map_kernel_flags_t vmk_flags; + boolean_t fourk_align; #endif /* __arm64__ */ effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map)); @@ -1593,8 +1605,9 @@ load_segment( } #endif /* __arm64__ */ } - if (lcp->cmdsize < segment_command_size) - return (LOAD_BADMACHO); + if (lcp->cmdsize < segment_command_size) { + return LOAD_BADMACHO; + } total_section_size = lcp->cmdsize - segment_command_size; if (LC_SEGMENT_64 == lcp->cmd) { @@ -1606,16 +1619,16 @@ load_segment( if (verbose) { MACHO_PRINTF(("+++ load_segment %s " - "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] " - "prot %d/%d flags 0x%x\n", - scp->segname, - (uint64_t)(slide + scp->vmaddr), - (uint64_t)(slide + scp->vmaddr + scp->vmsize), - pager_offset + scp->fileoff, - pager_offset + scp->fileoff + scp->filesize, - scp->initprot, - scp->maxprot, - scp->flags)); + "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] " + "prot %d/%d flags 0x%x\n", + scp->segname, + (uint64_t)(slide + scp->vmaddr), + (uint64_t)(slide + scp->vmaddr + scp->vmsize), + pager_offset + scp->fileoff, + pager_offset + scp->fileoff + scp->filesize, + scp->initprot, + scp->maxprot, + scp->flags)); } /* @@ -1624,19 +1637,19 @@ load_segment( */ if (scp->fileoff + scp->filesize < scp->fileoff || scp->fileoff + scp->filesize > (uint64_t)macho_size) { - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } /* * Ensure that the number of sections specified would fit * within the load command size. */ if (total_section_size / single_section_size < scp->nsects) { - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } /* * Make sure the segment is page-aligned in the file. */ - file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */ + file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */ file_size = scp->filesize; #if __arm64__ if (fourk_align) { @@ -1650,14 +1663,14 @@ load_segment( } else #endif /* __arm64__ */ if ((file_offset & PAGE_MASK_64) != 0 || - /* we can't mmap() it if it's not page-aligned in the file */ + /* we can't mmap() it if it's not page-aligned in the file */ (file_offset & vm_map_page_mask(map)) != 0) { /* * The 1st test would have failed if the system's page size * was what this process believe is the page size, so let's * fail here too for the sake of consistency. */ - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } /* @@ -1667,18 +1680,19 @@ load_segment( */ if (result->cs_end_offset && result->cs_end_offset < (off_t)scp->fileoff && - result->cs_end_offset - scp->fileoff < scp->filesize) - { - if (cs_debug) + result->cs_end_offset - scp->fileoff < scp->filesize) { + if (cs_debug) { printf("section outside code signature\n"); + } return LOAD_BADMACHO; } vm_offset = scp->vmaddr + slide; vm_size = scp->vmsize; - if (vm_size == 0) - return (LOAD_SUCCESS); + if (vm_size == 0) { + return LOAD_SUCCESS; + } if (scp->vmaddr == 0 && file_size == 0 && vm_size != 0 && @@ -1699,27 +1713,27 @@ load_segment( */ vm_end = vm_offset + vm_size; if (vm_end < vm_offset) { - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } if (verbose) { MACHO_PRINTF(("++++++ load_segment: " - "page_zero up to 0x%llx\n", - (uint64_t) vm_end)); + "page_zero up to 0x%llx\n", + (uint64_t) vm_end)); } #if __arm64__ if (fourk_align) { /* raise min_offset as much as page-alignment allows */ vm_end_aligned = vm_map_trunc_page(vm_end, - effective_page_mask); + effective_page_mask); } else #endif /* __arm64__ */ { vm_end = vm_map_round_page(vm_end, - PAGE_MASK_64); + PAGE_MASK_64); vm_end_aligned = vm_end; } ret = vm_map_raise_min_offset(map, - vm_end_aligned); + vm_end_aligned); #if __arm64__ if (ret == 0 && vm_end > vm_end_aligned) { @@ -1731,23 +1745,23 @@ load_segment( map, &vm_end_aligned, vm_end - vm_end_aligned, - (mach_vm_offset_t) 0, /* mask */ + (mach_vm_offset_t) 0, /* mask */ VM_FLAGS_FIXED, vmk_flags, VM_KERN_MEMORY_NONE, IPC_PORT_NULL, 0, - FALSE, /* copy */ + FALSE, /* copy */ (scp->initprot & VM_PROT_ALL), (scp->maxprot & VM_PROT_ALL), VM_INHERIT_DEFAULT); } #endif /* __arm64__ */ - + if (ret != KERN_SUCCESS) { - return (LOAD_FAILURE); + return LOAD_FAILURE; } - return (LOAD_SUCCESS); + return LOAD_SUCCESS; } else { #if CONFIG_EMBEDDED /* not PAGEZERO: should not be mapped at address 0 */ @@ -1761,13 +1775,13 @@ load_segment( if (fourk_align) { /* 4K-align */ file_start = vm_map_trunc_page(file_offset, - FOURK_PAGE_MASK); + FOURK_PAGE_MASK); file_end = vm_map_round_page(file_offset + file_size, - FOURK_PAGE_MASK); + FOURK_PAGE_MASK); vm_start = vm_map_trunc_page(vm_offset, - FOURK_PAGE_MASK); + FOURK_PAGE_MASK); vm_end = vm_map_round_page(vm_offset + vm_size, - FOURK_PAGE_MASK); + FOURK_PAGE_MASK); if (!strncmp(scp->segname, "__LINKEDIT", 11) && page_aligned(file_start) && vm_map_page_aligned(file_start, vm_map_page_mask(map)) && @@ -1775,30 +1789,33 @@ load_segment( vm_map_page_aligned(vm_start, vm_map_page_mask(map))) { /* XXX last segment: ignore mis-aligned tail */ file_end = vm_map_round_page(file_end, - effective_page_mask); + effective_page_mask); vm_end = vm_map_round_page(vm_end, - effective_page_mask); + effective_page_mask); } } else #endif /* __arm64__ */ { file_start = vm_map_trunc_page(file_offset, - effective_page_mask); + effective_page_mask); file_end = vm_map_round_page(file_offset + file_size, - effective_page_mask); + effective_page_mask); vm_start = vm_map_trunc_page(vm_offset, - effective_page_mask); + effective_page_mask); vm_end = vm_map_round_page(vm_offset + vm_size, - effective_page_mask); + effective_page_mask); } - if (vm_start < result->min_vm_addr) + if (vm_start < result->min_vm_addr) { result->min_vm_addr = vm_start; - if (vm_end > result->max_vm_addr) + } + if (vm_end > result->max_vm_addr) { result->max_vm_addr = vm_end; + } - if (map == VM_MAP_NULL) - return (LOAD_SUCCESS); + if (map == VM_MAP_NULL) { + return LOAD_SUCCESS; + } if (vm_size > 0) { initprot = (scp->initprot) & VM_PROT_ALL; @@ -1808,22 +1825,22 @@ load_segment( */ if (verbose) { MACHO_PRINTF(("++++++ load_segment: " - "mapping at vm [0x%llx:0x%llx] of " - "file [0x%llx:0x%llx]\n", - (uint64_t) vm_start, - (uint64_t) vm_end, - (uint64_t) file_start, - (uint64_t) file_end)); + "mapping at vm [0x%llx:0x%llx] of " + "file [0x%llx:0x%llx]\n", + (uint64_t) vm_start, + (uint64_t) vm_end, + (uint64_t) file_start, + (uint64_t) file_end)); } ret = map_segment(map, - vm_start, - vm_end, - control, - file_start, - file_end, - initprot, - maxprot, - result); + vm_start, + vm_end, + control, + file_start, + file_end, + initprot, + maxprot, + result); if (ret) { return LOAD_NOSPACE; } @@ -1835,20 +1852,20 @@ load_segment( */ delta_size = map_size - scp->filesize; if (delta_size > 0) { - mach_vm_offset_t tmp; - + mach_vm_offset_t tmp; + ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD); if (ret != KERN_SUCCESS) { - return(LOAD_RESOURCE); + return LOAD_RESOURCE; } - + if (copyout(tmp, map_addr + scp->filesize, - delta_size)) { + delta_size)) { (void) mach_vm_deallocate( - kernel_map, tmp, delta_size); - return (LOAD_FAILURE); + kernel_map, tmp, delta_size); + return LOAD_FAILURE; } - + (void) mach_vm_deallocate(kernel_map, tmp, delta_size); } #endif /* FIXME */ @@ -1870,35 +1887,36 @@ load_segment( tmp = vm_start + (file_end - file_start); if (verbose) { MACHO_PRINTF(("++++++ load_segment: " - "delta mapping vm [0x%llx:0x%llx]\n", - (uint64_t) tmp, - (uint64_t) (tmp + delta_size))); + "delta mapping vm [0x%llx:0x%llx]\n", + (uint64_t) tmp, + (uint64_t) (tmp + delta_size))); } kr = map_segment(map, - tmp, - tmp + delta_size, - MEMORY_OBJECT_CONTROL_NULL, - 0, - delta_size, - scp->initprot, - scp->maxprot, - result); + tmp, + tmp + delta_size, + MEMORY_OBJECT_CONTROL_NULL, + 0, + delta_size, + scp->initprot, + scp->maxprot, + result); if (kr != KERN_SUCCESS) { - return(LOAD_NOSPACE); + return LOAD_NOSPACE; } } - if ( (scp->fileoff == 0) && (scp->filesize != 0) ) + if ((scp->fileoff == 0) && (scp->filesize != 0)) { result->mach_header = vm_offset; + } if (scp->flags & SG_PROTECTED_VERSION_1) { ret = unprotect_dsmos_segment(file_start, - file_end - file_start, - vp, - pager_offset, - map, - vm_start, - vm_end - vm_start); + file_end - file_start, + vp, + pager_offset, + map, + vm_start, + vm_end - vm_start); if (ret != LOAD_SUCCESS) { return ret; } @@ -1910,17 +1928,17 @@ load_segment( filetype == MH_DYLINKER && result->all_image_info_addr == MACH_VM_MIN_ADDRESS) { note_all_image_info_section(scp, - LC_SEGMENT_64 == lcp->cmd, - single_section_size, - ((const char *)lcp + - segment_command_size), - slide, - result); + LC_SEGMENT_64 == lcp->cmd, + single_section_size, + ((const char *)lcp + + segment_command_size), + slide, + result); } if (result->entry_point != MACH_VM_MIN_ADDRESS) { if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) { - if ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) == (VM_PROT_READ|VM_PROT_EXECUTE)) { + if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) { result->validentry = 1; } else { /* right range but wrong protections, unset if previously validated */ @@ -1935,53 +1953,55 @@ load_segment( static load_return_t load_uuid( - struct uuid_command *uulp, - char *command_end, - load_result_t *result -) + struct uuid_command *uulp, + char *command_end, + load_result_t *result + ) { - /* - * We need to check the following for this command: - * - The command size should be atleast the size of struct uuid_command - * - The UUID part of the command should be completely within the mach-o header - */ + /* + * We need to check the following for this command: + * - The command size should be atleast the size of struct uuid_command + * - The UUID part of the command should be completely within the mach-o header + */ - if ((uulp->cmdsize < sizeof(struct uuid_command)) || - (((char *)uulp + sizeof(struct uuid_command)) > command_end)) { - return (LOAD_BADMACHO); - } - - memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid)); - return (LOAD_SUCCESS); + if ((uulp->cmdsize < sizeof(struct uuid_command)) || + (((char *)uulp + sizeof(struct uuid_command)) > command_end)) { + return LOAD_BADMACHO; + } + + memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid)); + return LOAD_SUCCESS; } static load_return_t load_main( - struct entry_point_command *epc, - thread_t thread, - int64_t slide, - load_result_t *result -) + struct entry_point_command *epc, + thread_t thread, + int64_t slide, + load_result_t *result + ) { mach_vm_offset_t addr; - kern_return_t ret; - - if (epc->cmdsize < sizeof(*epc)) - return (LOAD_BADMACHO); + kern_return_t ret; + + if (epc->cmdsize < sizeof(*epc)) { + return LOAD_BADMACHO; + } if (result->thread_count != 0) { - return (LOAD_FAILURE); + return LOAD_FAILURE; + } + + if (thread == THREAD_NULL) { + return LOAD_SUCCESS; } - if (thread == THREAD_NULL) - return (LOAD_SUCCESS); - /* * LC_MAIN specifies stack size but not location. * Add guard page to allocation size (MAXSSIZ includes guard page). */ if (epc->stacksize) { - if (os_add_overflow(epc->stacksize, 4*PAGE_SIZE, &result->user_stack_size)) { + if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) { /* * We are going to immediately throw away this result, but we want * to make sure we aren't loading a dangerously close to @@ -1994,14 +2014,16 @@ load_main( if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) { return LOAD_BADMACHO; } + result->custom_stack = TRUE; } else { result->user_stack_alloc_size = MAXSSIZ; } /* use default location for stack */ ret = thread_userstackdefault(&addr, result->is_64bit_addr); - if (ret != KERN_SUCCESS) - return(LOAD_FAILURE); + if (ret != KERN_SUCCESS) { + return LOAD_FAILURE; + } /* The stack slides down from the default location */ result->user_stack = addr; @@ -2009,7 +2031,7 @@ load_main( if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { /* Already processed LC_MAIN or LC_UNIXTHREAD */ - return (LOAD_FAILURE); + return LOAD_FAILURE; } /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */ @@ -2018,48 +2040,53 @@ load_main( ret = thread_state_initialize( thread ); if (ret != KERN_SUCCESS) { - return(LOAD_FAILURE); + return LOAD_FAILURE; } result->unixproc = TRUE; result->thread_count++; - return(LOAD_SUCCESS); + return LOAD_SUCCESS; } static load_return_t load_unixthread( - struct thread_command *tcp, - thread_t thread, - int64_t slide, - load_result_t *result -) + struct thread_command *tcp, + thread_t thread, + int64_t slide, + load_result_t *result + ) { - load_return_t ret; - int customstack =0; + load_return_t ret; + int customstack = 0; mach_vm_offset_t addr; - if (tcp->cmdsize < sizeof(*tcp)) - return (LOAD_BADMACHO); + if (tcp->cmdsize < sizeof(*tcp)) { + return LOAD_BADMACHO; + } if (result->thread_count != 0) { - return (LOAD_FAILURE); + return LOAD_FAILURE; + } + + if (thread == THREAD_NULL) { + return LOAD_SUCCESS; } - if (thread == THREAD_NULL) - return (LOAD_SUCCESS); - ret = load_threadstack(thread, - (uint32_t *)(((vm_offset_t)tcp) + - sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - &addr, &customstack, result); - if (ret != LOAD_SUCCESS) - return(ret); + (uint32_t *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &addr, &customstack, result); + if (ret != LOAD_SUCCESS) { + return ret; + } /* LC_UNIXTHREAD optionally specifies stack size and location */ - if (!customstack) { + if (customstack) { + result->custom_stack = TRUE; + } else { result->user_stack_alloc_size = MAXSSIZ; } @@ -2068,49 +2095,51 @@ load_unixthread( result->user_stack -= slide; ret = load_threadentry(thread, - (uint32_t *)(((vm_offset_t)tcp) + - sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - &addr); - if (ret != LOAD_SUCCESS) - return(ret); + (uint32_t *)(((vm_offset_t)tcp) + + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + &addr); + if (ret != LOAD_SUCCESS) { + return ret; + } if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { /* Already processed LC_MAIN or LC_UNIXTHREAD */ - return (LOAD_FAILURE); + return LOAD_FAILURE; } result->entry_point = addr; result->entry_point += slide; ret = load_threadstate(thread, - (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), - tcp->cmdsize - sizeof(struct thread_command), - result); - if (ret != LOAD_SUCCESS) - return (ret); + (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), + tcp->cmdsize - sizeof(struct thread_command), + result); + if (ret != LOAD_SUCCESS) { + return ret; + } result->unixproc = TRUE; result->thread_count++; - return(LOAD_SUCCESS); + return LOAD_SUCCESS; } static load_return_t load_threadstate( - thread_t thread, - uint32_t *ts, - uint32_t total_size, - load_result_t *result -) + thread_t thread, + uint32_t *ts, + uint32_t total_size, + load_result_t *result + ) { - uint32_t size; - int flavor; - uint32_t thread_size; + uint32_t size; + int flavor; + uint32_t thread_size; uint32_t *local_ts = NULL; uint32_t local_ts_size = 0; - int ret; + int ret; (void)thread; @@ -2140,7 +2169,7 @@ load_threadstate( goto bad; } - ts += size; /* ts is a (uint32_t *) */ + ts += size; /* ts is a (uint32_t *) */ } result->threadstate = local_ts; @@ -2157,28 +2186,30 @@ bad: static load_return_t load_threadstack( - thread_t thread, - uint32_t *ts, - uint32_t total_size, - mach_vm_offset_t *user_stack, - int *customstack, - load_result_t *result -) + thread_t thread, + uint32_t *ts, + uint32_t total_size, + mach_vm_offset_t *user_stack, + int *customstack, + load_result_t *result + ) { - kern_return_t ret; - uint32_t size; - int flavor; - uint32_t stack_size; + kern_return_t ret; + uint32_t size; + int flavor; + uint32_t stack_size; while (total_size > 0) { flavor = *ts++; size = *ts++; - if (UINT32_MAX-2 < size || - UINT32_MAX/sizeof(uint32_t) < size+2) - return (LOAD_BADMACHO); - stack_size = (size+2)*sizeof(uint32_t); - if (stack_size > total_size) - return(LOAD_BADMACHO); + if (UINT32_MAX - 2 < size || + UINT32_MAX / sizeof(uint32_t) < size + 2) { + return LOAD_BADMACHO; + } + stack_size = (size + 2) * sizeof(uint32_t); + if (stack_size > total_size) { + return LOAD_BADMACHO; + } total_size -= stack_size; /* @@ -2188,26 +2219,26 @@ load_threadstack( */ ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); if (ret != KERN_SUCCESS) { - return(LOAD_FAILURE); + return LOAD_FAILURE; } - ts += size; /* ts is a (uint32_t *) */ + ts += size; /* ts is a (uint32_t *) */ } - return(LOAD_SUCCESS); + return LOAD_SUCCESS; } static load_return_t load_threadentry( - thread_t thread, - uint32_t *ts, - uint32_t total_size, - mach_vm_offset_t *entry_point -) + thread_t thread, + uint32_t *ts, + uint32_t total_size, + mach_vm_offset_t *entry_point + ) { - kern_return_t ret; - uint32_t size; - int flavor; - uint32_t entry_size; + kern_return_t ret; + uint32_t size; + int flavor; + uint32_t entry_size; /* * Set the thread state. @@ -2216,12 +2247,14 @@ load_threadentry( while (total_size > 0) { flavor = *ts++; size = *ts++; - if (UINT32_MAX-2 < size || - UINT32_MAX/sizeof(uint32_t) < size+2) - return (LOAD_BADMACHO); - entry_size = (size+2)*sizeof(uint32_t); - if (entry_size > total_size) - return(LOAD_BADMACHO); + if (UINT32_MAX - 2 < size || + UINT32_MAX / sizeof(uint32_t) < size + 2) { + return LOAD_BADMACHO; + } + entry_size = (size + 2) * sizeof(uint32_t); + if (entry_size > total_size) { + return LOAD_BADMACHO; + } total_size -= entry_size; /* * Third argument is a kernel space pointer; it gets cast @@ -2230,19 +2263,19 @@ load_threadentry( */ ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point); if (ret != KERN_SUCCESS) { - return(LOAD_FAILURE); + return LOAD_FAILURE; } - ts += size; /* ts is a (uint32_t *) */ + ts += size; /* ts is a (uint32_t *) */ } - return(LOAD_SUCCESS); + return LOAD_SUCCESS; } struct macho_data { - struct nameidata __nid; + struct nameidata __nid; union macho_vnode_header { - struct mach_header mach_header; - struct fat_header fat_header; - char __pad[512]; + struct mach_header mach_header; + struct fat_header fat_header; + char __pad[512]; } __header; }; @@ -2253,43 +2286,35 @@ extern char dyld_alt_path[]; extern int use_alt_dyld; #endif -static uint64_t get_va_fsid(struct vnode_attr *vap) -{ - if (VATTR_IS_SUPPORTED(vap, va_fsid64)) { - return *(uint64_t *)&vap->va_fsid64; - } else { - return vap->va_fsid; - } -} - static load_return_t load_dylinker( - struct dylinker_command *lcp, - integer_t archbits, - vm_map_t map, - thread_t thread, - int depth, - int64_t slide, - load_result_t *result, - struct image_params *imgp -) + struct dylinker_command *lcp, + integer_t archbits, + vm_map_t map, + thread_t thread, + int depth, + int64_t slide, + load_result_t *result, + struct image_params *imgp + ) { - const char *name; - struct vnode *vp = NULLVP; /* set by get_macho_vnode() */ - struct mach_header *header; - off_t file_offset = 0; /* set by get_macho_vnode() */ - off_t macho_size = 0; /* set by get_macho_vnode() */ - load_result_t *myresult; - kern_return_t ret; - struct macho_data *macho_data; + const char *name; + struct vnode *vp = NULLVP; /* set by get_macho_vnode() */ + struct mach_header *header; + off_t file_offset = 0; /* set by get_macho_vnode() */ + off_t macho_size = 0; /* set by get_macho_vnode() */ + load_result_t *myresult; + kern_return_t ret; + struct macho_data *macho_data; struct { - struct mach_header __header; - load_result_t __myresult; - struct macho_data __macho_data; + struct mach_header __header; + load_result_t __myresult; + struct macho_data __macho_data; } *dyld_data; - if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) + if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) { return LOAD_BADMACHO; + } name = (const char *)lcp + lcp->name.offset; @@ -2302,51 +2327,52 @@ load_dylinker( #if (DEVELOPMENT || DEBUG) - /* - * rdar://23680808 - * If an alternate dyld has been specified via boot args, check - * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this - * executable and redirect the kernel to load that linker. - */ - - if (use_alt_dyld) { - int policy_error; - uint32_t policy_flags = 0; - int32_t policy_gencount = 0; - - policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount); - if (policy_error == 0) { - if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) { - name = dyld_alt_path; - } - } - } + /* + * rdar://23680808 + * If an alternate dyld has been specified via boot args, check + * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this + * executable and redirect the kernel to load that linker. + */ + + if (use_alt_dyld) { + int policy_error; + uint32_t policy_flags = 0; + int32_t policy_gencount = 0; + + policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount); + if (policy_error == 0) { + if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) { + name = dyld_alt_path; + } + } + } #endif #if !(DEVELOPMENT || DEBUG) if (0 != strcmp(name, DEFAULT_DYLD_PATH)) { - return (LOAD_BADMACHO); + return LOAD_BADMACHO; } #endif /* Allocate wad-of-data from heap to reduce excessively deep stacks */ - MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK); + MALLOC(dyld_data, void *, sizeof(*dyld_data), M_TEMP, M_WAITOK); header = &dyld_data->__header; myresult = &dyld_data->__myresult; macho_data = &dyld_data->__macho_data; ret = get_macho_vnode(name, archbits, header, &file_offset, &macho_size, macho_data, &vp); - if (ret) + if (ret) { goto novp_out; + } *myresult = load_result_null; myresult->is_64bit_addr = result->is_64bit_addr; myresult->is_64bit_data = result->is_64bit_data; ret = parse_machfile(vp, map, thread, header, file_offset, - macho_size, depth, slide, 0, myresult, result, imgp); + macho_size, depth, slide, 0, myresult, result, imgp); if (ret == LOAD_SUCCESS) { if (result->threadstate) { @@ -2373,39 +2399,38 @@ load_dylinker( VATTR_WANTED(&va, va_fileid); int error = vnode_getattr(vp, &va, imgp->ip_vfs_context); if (error == 0) { - imgp->ip_dyld_fsid = get_va_fsid(&va); + imgp->ip_dyld_fsid = vnode_get_va_fsid(&va); imgp->ip_dyld_fsobjid = va.va_fileid; } vnode_put(vp); novp_out: FREE(dyld_data, M_TEMP); - return (ret); - + return ret; } static load_return_t load_code_signature( - struct linkedit_data_command *lcp, - struct vnode *vp, - off_t macho_offset, - off_t macho_size, - cpu_type_t cputype, - load_result_t *result, - struct image_params *imgp) + struct linkedit_data_command *lcp, + struct vnode *vp, + off_t macho_offset, + off_t macho_size, + cpu_type_t cputype, + load_result_t *result, + struct image_params *imgp) { - int ret; - kern_return_t kr; - vm_offset_t addr; - int resid; - struct cs_blob *blob; - int error; - vm_size_t blob_size; + int ret; + kern_return_t kr; + vm_offset_t addr; + int resid; + struct cs_blob *blob; + int error; + vm_size_t blob_size; addr = 0; blob = NULL; - if (lcp->cmdsize != sizeof (struct linkedit_data_command) || + if (lcp->cmdsize != sizeof(struct linkedit_data_command) || lcp->dataoff + lcp->datasize > macho_size) { ret = LOAD_BADMACHO; goto out; @@ -2458,31 +2483,31 @@ load_code_signature( ret = LOAD_NOSPACE; goto out; } - + resid = 0; error = vn_rdwr(UIO_READ, - vp, - (caddr_t) addr, - lcp->datasize, - macho_offset + lcp->dataoff, - UIO_SYSSPACE, - 0, - kauth_cred_get(), - &resid, - current_proc()); + vp, + (caddr_t) addr, + lcp->datasize, + macho_offset + lcp->dataoff, + UIO_SYSSPACE, + 0, + kauth_cred_get(), + &resid, + current_proc()); if (error || resid != 0) { ret = LOAD_IOERROR; goto out; } if (ubc_cs_blob_add(vp, - cputype, - macho_offset, - &addr, - lcp->datasize, - imgp, - 0, - &blob)) { + cputype, + macho_offset, + &addr, + lcp->datasize, + imgp, + 0, + &blob)) { if (addr) { ubc_cs_blob_deallocate(addr, blob_size); } @@ -2496,12 +2521,13 @@ load_code_signature( #if CHECK_CS_VALIDATION_BITMAP ubc_cs_validation_bitmap_allocate( vp ); #endif - + ret = LOAD_SUCCESS; out: if (ret == LOAD_SUCCESS) { - if (blob == NULL) + if (blob == NULL) { panic("success, but no blob!"); + } result->csflags |= blob->csb_flags; result->platform_binary = blob->csb_platform_binary; @@ -2521,7 +2547,7 @@ out: static load_return_t set_code_unprotect( struct encryption_info_command *eip, - caddr_t addr, + caddr_t addr, vm_map_t map, int64_t slide, struct vnode *vp, @@ -2533,7 +2559,7 @@ set_code_unprotect( pager_crypt_info_t crypt_info; const char * cryptname = 0; char *vpath; - + size_t offset; struct segment_command_64 *seg64; struct segment_command *seg32; @@ -2541,63 +2567,73 @@ set_code_unprotect( vm_object_offset_t crypto_backing_offset; kern_return_t kr; - if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO; - - switch(eip->cryptid) { - case 0: - /* not encrypted, just an empty load command */ - return LOAD_SUCCESS; - case 1: - cryptname="com.apple.unfree"; - break; - case 0x10: - /* some random cryptid that you could manually put into - * your binary if you want NULL */ - cryptname="com.apple.null"; - break; - default: - return LOAD_BADMACHO; + if (eip->cmdsize < sizeof(*eip)) { + return LOAD_BADMACHO; + } + + switch (eip->cryptid) { + case 0: + /* not encrypted, just an empty load command */ + return LOAD_SUCCESS; + case 1: + cryptname = "com.apple.unfree"; + break; + case 0x10: + /* some random cryptid that you could manually put into + * your binary if you want NULL */ + cryptname = "com.apple.null"; + break; + default: + return LOAD_BADMACHO; + } + + if (map == VM_MAP_NULL) { + return LOAD_SUCCESS; + } + if (NULL == text_crypter_create) { + return LOAD_FAILURE; } - - if (map == VM_MAP_NULL) return (LOAD_SUCCESS); - if (NULL == text_crypter_create) return LOAD_FAILURE; MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if(vpath == NULL) return LOAD_FAILURE; - + if (vpath == NULL) { + return LOAD_FAILURE; + } + len = MAXPATHLEN; error = vn_getpath(vp, vpath, &len); if (error) { FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); return LOAD_FAILURE; } - + /* set up decrypter first */ crypt_file_data_t crypt_data = { .filename = vpath, .cputype = cputype, - .cpusubtype = cpusubtype}; - kr=text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data); + .cpusubtype = cpusubtype + }; + kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data); #if VM_MAP_DEBUG_APPLE_PROTECT if (vm_map_debug_apple_protect) { struct proc *p; p = current_proc(); printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n", - p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr); + p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI); - - if(kr) { + + if (kr) { printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n", - cryptname, kr); + cryptname, kr); if (kr == kIOReturnNotPrivileged) { /* text encryption returned decryption failure */ - return(LOAD_DECRYPTFAIL); - }else + return LOAD_DECRYPTFAIL; + } else { return LOAD_RESOURCE; + } } - + /* this is terrible, but we have to rescan the load commands to find the * virtual address of this encrypted stuff. This code is gonna look like * the dyld source one day... */ @@ -2605,7 +2641,7 @@ set_code_unprotect( size_t mach_header_sz = sizeof(struct mach_header); if (header->magic == MH_MAGIC_64 || header->magic == MH_CIGAM_64) { - mach_header_sz = sizeof(struct mach_header_64); + mach_header_sz = sizeof(struct mach_header_64); } offset = mach_header_sz; uint32_t ncmds = header->ncmds; @@ -2615,49 +2651,49 @@ set_code_unprotect( */ struct load_command *lcp = (struct load_command *)(addr + offset); offset += lcp->cmdsize; - - switch(lcp->cmd) { - case LC_SEGMENT_64: - seg64 = (struct segment_command_64 *)lcp; - if ((seg64->fileoff <= eip->cryptoff) && - (seg64->fileoff+seg64->filesize >= - eip->cryptoff+eip->cryptsize)) { - map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide; - map_size = eip->cryptsize; - crypto_backing_offset = macho_offset + eip->cryptoff; - goto remap_now; - } - case LC_SEGMENT: - seg32 = (struct segment_command *)lcp; - if ((seg32->fileoff <= eip->cryptoff) && - (seg32->fileoff+seg32->filesize >= - eip->cryptoff+eip->cryptsize)) { - map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide; - map_size = eip->cryptsize; - crypto_backing_offset = macho_offset + eip->cryptoff; - goto remap_now; - } + + switch (lcp->cmd) { + case LC_SEGMENT_64: + seg64 = (struct segment_command_64 *)lcp; + if ((seg64->fileoff <= eip->cryptoff) && + (seg64->fileoff + seg64->filesize >= + eip->cryptoff + eip->cryptsize)) { + map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide; + map_size = eip->cryptsize; + crypto_backing_offset = macho_offset + eip->cryptoff; + goto remap_now; + } + case LC_SEGMENT: + seg32 = (struct segment_command *)lcp; + if ((seg32->fileoff <= eip->cryptoff) && + (seg32->fileoff + seg32->filesize >= + eip->cryptoff + eip->cryptsize)) { + map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide; + map_size = eip->cryptsize; + crypto_backing_offset = macho_offset + eip->cryptoff; + goto remap_now; + } } } - + /* if we get here, did not find anything */ return LOAD_BADMACHO; - + remap_now: /* now remap using the decrypter */ MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n", - (uint64_t) map_offset, - (uint64_t) (map_offset+map_size))); + (uint64_t) map_offset, + (uint64_t) (map_offset + map_size))); kr = vm_map_apple_protected(map, - map_offset, - map_offset+map_size, - crypto_backing_offset, - &crypt_info); + map_offset, + map_offset + map_size, + crypto_backing_offset, + &crypt_info); if (kr) { printf("set_code_unprotect(): mapping failed with %x\n", kr); return LOAD_PROTECT; } - + return LOAD_SUCCESS; } @@ -2672,23 +2708,23 @@ remap_now: static load_return_t get_macho_vnode( - const char *path, - integer_t archbits, - struct mach_header *mach_header, - off_t *file_offset, - off_t *macho_size, - struct macho_data *data, - struct vnode **vpp -) + const char *path, + integer_t archbits, + struct mach_header *mach_header, + off_t *file_offset, + off_t *macho_size, + struct macho_data *data, + struct vnode **vpp + ) { - struct vnode *vp; - vfs_context_t ctx = vfs_context_current(); - proc_t p = vfs_context_proc(ctx); - kauth_cred_t kerncred; - struct nameidata *ndp = &data->__nid; - boolean_t is_fat; - struct fat_arch fat_arch; - int error; + struct vnode *vp; + vfs_context_t ctx = vfs_context_current(); + proc_t p = vfs_context_proc(ctx); + kauth_cred_t kerncred; + struct nameidata *ndp = &data->__nid; + boolean_t is_fat; + struct fat_arch fat_arch; + int error; int resid; union macho_vnode_header *header = &data->__header; off_t fsize = (off_t)0; @@ -2712,7 +2748,7 @@ get_macho_vnode( } else { error = LOAD_FAILURE; } - return(error); + return error; } nameidone(ndp); vp = ndp->ni_vp; @@ -2747,7 +2783,7 @@ get_macho_vnode( goto bad1; } - if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof (*header), 0, + if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0, UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) { error = LOAD_IOERROR; goto bad2; @@ -2762,29 +2798,29 @@ get_macho_vnode( header->mach_header.magic == MH_MAGIC_64) { is_fat = FALSE; } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) { - is_fat = TRUE; + is_fat = TRUE; } else { error = LOAD_BADMACHO; goto bad2; } if (is_fat) { - error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header), - sizeof(*header)); + sizeof(*header)); if (error != LOAD_SUCCESS) { goto bad2; } /* Look up our architecture in the fat file. */ error = fatfile_getarch_with_bits(archbits, - (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch); - if (error != LOAD_SUCCESS) + (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch); + if (error != LOAD_SUCCESS) { goto bad2; + } /* Read the Mach-O header out of it */ error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header, - sizeof (header->mach_header), fat_arch.offset, + sizeof(header->mach_header), fat_arch.offset, UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p); if (error) { error = LOAD_IOERROR; @@ -2829,11 +2865,11 @@ get_macho_vnode( *vpp = vp; ubc_setsize(vp, fsize); - return (error); + return error; bad2: (void) VNOP_CLOSE(vp, FREAD, ctx); bad1: vnode_put(vp); - return(error); + return error; } diff --git a/bsd/kern/mach_loader.h b/bsd/kern/mach_loader.h index 7870e8e84..5a0f66ceb 100644 --- a/bsd/kern/mach_loader.h +++ b/bsd/kern/mach_loader.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,8 +36,8 @@ * 24-Aug-92 Doug Mitchell at NeXT * Created. */ - -#ifndef _BSD_KERN_MACH_LOADER_H_ + +#ifndef _BSD_KERN_MACH_LOADER_H_ #define _BSD_KERN_MACH_LOADER_H_ #include @@ -50,64 +50,64 @@ typedef int load_return_t; * function returns LOAD_SUCCESS. */ typedef struct _load_result { - user_addr_t mach_header; - user_addr_t entry_point; + user_addr_t mach_header; + user_addr_t entry_point; // The user stack pointer and addressable user stack size. - user_addr_t user_stack; - mach_vm_size_t user_stack_size; + user_addr_t user_stack; + mach_vm_size_t user_stack_size; // The allocation containing the stack and guard area. - user_addr_t user_stack_alloc; - mach_vm_size_t user_stack_alloc_size; + user_addr_t user_stack_alloc; + mach_vm_size_t user_stack_alloc_size; - mach_vm_address_t all_image_info_addr; - mach_vm_size_t all_image_info_size; + mach_vm_address_t all_image_info_addr; + mach_vm_size_t all_image_info_size; - int thread_count; + int thread_count; unsigned int - /* boolean_t */ unixproc :1, - needs_dynlinker :1, - dynlinker :1, - validentry :1, - has_pagezero :1, - using_lcmain :1, + unixproc : 1, + needs_dynlinker : 1, + dynlinker : 1, + validentry : 1, + has_pagezero : 1, + using_lcmain : 1, #if __arm64__ - legacy_footprint :1, + legacy_footprint : 1, #endif /* __arm64__ */ - is_64bit_addr :1, - is_64bit_data :1; - unsigned int csflags; - unsigned char uuid[16]; - mach_vm_address_t min_vm_addr; - mach_vm_address_t max_vm_addr; - unsigned int platform_binary; - off_t cs_end_offset; - void *threadstate; - size_t threadstate_sz; + is_64bit_addr : 1, + is_64bit_data : 1, + custom_stack : 1; + unsigned int csflags; + unsigned char uuid[16]; + mach_vm_address_t min_vm_addr; + mach_vm_address_t max_vm_addr; + unsigned int platform_binary; + off_t cs_end_offset; + void *threadstate; + size_t threadstate_sz; } load_result_t; struct image_params; load_return_t load_machfile( - struct image_params *imgp, - struct mach_header *header, - thread_t thread, - vm_map_t *mapp, - load_result_t *result); + struct image_params *imgp, + struct mach_header *header, + thread_t thread, + vm_map_t *mapp, + load_result_t *result); -#define LOAD_SUCCESS 0 -#define LOAD_BADARCH 1 /* CPU type/subtype not found */ -#define LOAD_BADMACHO 2 /* malformed mach-o file */ -#define LOAD_SHLIB 3 /* shlib version mismatch */ -#define LOAD_FAILURE 4 /* Miscellaneous error */ -#define LOAD_NOSPACE 5 /* No VM available */ -#define LOAD_PROTECT 6 /* protection violation */ -#define LOAD_RESOURCE 7 /* resource allocation failure */ -#define LOAD_ENOENT 8 /* resource not found */ -#define LOAD_IOERROR 9 /* IO error */ -#define LOAD_DECRYPTFAIL 10 /* FP decrypt failure */ -#define LOAD_BADMACHO_UPX 11 /* malformed mach-o file */ -#define LOAD_BADARCH_X86 12 /* -no32exec boot-arg + attempted load - of 32bit x86 binary */ +#define LOAD_SUCCESS 0 +#define LOAD_BADARCH 1 /* CPU type/subtype not found */ +#define LOAD_BADMACHO 2 /* malformed mach-o file */ +#define LOAD_SHLIB 3 /* shlib version mismatch */ +#define LOAD_FAILURE 4 /* Miscellaneous error */ +#define LOAD_NOSPACE 5 /* No VM available */ +#define LOAD_PROTECT 6 /* protection violation */ +#define LOAD_RESOURCE 7 /* resource allocation failure */ +#define LOAD_ENOENT 8 /* resource not found */ +#define LOAD_IOERROR 9 /* IO error */ +#define LOAD_DECRYPTFAIL 10 /* FP decrypt failure */ +#define LOAD_BADMACHO_UPX 11 /* malformed mach-o file */ +#define LOAD_BADARCH_X86 12 /* -no32exec boot-arg + attempted load of 32bit x86 binary */ -#endif /* _BSD_KERN_MACH_LOADER_H_ */ +#endif /* _BSD_KERN_MACH_LOADER_H_ */ diff --git a/bsd/kern/mach_process.c b/bsd/kern/mach_process.c index 72d262def..bce1b9784 100644 --- a/bsd/kern/mach_process.c +++ b/bsd/kern/mach_process.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -83,15 +83,15 @@ #include #include #include -#include /* cs_allow_invalid() */ +#include /* cs_allow_invalid() */ #include #include #include -#include /* for task_resume() */ -#include /* for thread_exception_return() */ +#include /* for task_resume() */ +#include /* for thread_exception_return() */ #include @@ -103,11 +103,11 @@ int get_task_userstop(task_t); /* Macros to clear/set/test flags. */ -#define SET(t, f) (t) |= (f) -#define CLR(t, f) (t) &= ~(f) -#define ISSET(t, f) ((t) & (f)) +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) -extern thread_t port_name_to_thread(mach_port_name_t port_name); +extern thread_t port_name_to_thread(mach_port_name_t port_name); extern thread_t get_firstthread(task_t); @@ -118,10 +118,10 @@ extern thread_t get_firstthread(task_t); int ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) { - struct proc *t = current_proc(); /* target process */ - task_t task; - thread_t th_act; - struct uthread *ut; + struct proc *t = current_proc(); /* target process */ + task_t task; + thread_t th_act; + struct uthread *ut; int tr_sigexc = 0; int error = 0; int stopped = 0; @@ -133,14 +133,15 @@ ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) if (uap->req == PT_DENY_ATTACH) { #if (DEVELOPMENT || DEBUG) && CONFIG_EMBEDDED - if (PE_i_can_has_debugger(NULL)) - return(0); + if (PE_i_can_has_debugger(NULL)) { + return 0; + } #endif proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { proc_unlock(p); KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE, - p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0); + p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0); exit1(p, W_EXITCODE(ENOTSUP, 0), retval); thread_exception_return(); @@ -149,25 +150,27 @@ ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval) SET(p->p_lflag, P_LNOATTACH); proc_unlock(p); - return(0); + return 0; } if (uap->req == PT_FORCEQUOTA) { if (kauth_cred_issuser(kauth_cred_get())) { OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag); - return (0); - } else - return (EPERM); + return 0; + } else { + return EPERM; + } } /* * Intercept and deal with "please trace me" request. - */ + */ if (uap->req == PT_TRACE_ME) { -retry_trace_me:; +retry_trace_me: ; proc_t pproc = proc_parent(p); - if (pproc == NULL) - return (EINVAL); + if (pproc == NULL) { + return EINVAL; + } #if CONFIG_MACF /* * NB: Cannot call kauth_authorize_process(..., KAUTH_PROCESS_CANTRACE, ...) @@ -177,50 +180,51 @@ retry_trace_me:; */ if ((error = mac_proc_check_debug(pproc, p)) == 0) { #endif - proc_lock(p); - /* Make sure the process wasn't re-parented. */ - if (p->p_ppid != pproc->p_pid) { - proc_unlock(p); - proc_rele(pproc); - goto retry_trace_me; - } - SET(p->p_lflag, P_LTRACED); - /* Non-attached case, our tracer is our parent. */ - p->p_oppid = p->p_ppid; + proc_lock(p); + /* Make sure the process wasn't re-parented. */ + if (p->p_ppid != pproc->p_pid) { proc_unlock(p); - /* Child and parent will have to be able to run modified code. */ - cs_allow_invalid(p); - cs_allow_invalid(pproc); -#if CONFIG_MACF + proc_rele(pproc); + goto retry_trace_me; } + SET(p->p_lflag, P_LTRACED); + /* Non-attached case, our tracer is our parent. */ + p->p_oppid = p->p_ppid; + proc_unlock(p); + /* Child and parent will have to be able to run modified code. */ + cs_allow_invalid(p); + cs_allow_invalid(pproc); +#if CONFIG_MACF + } #endif proc_rele(pproc); - return (error); + return error; } if (uap->req == PT_SIGEXC) { proc_lock(p); if (ISSET(p->p_lflag, P_LTRACED)) { SET(p->p_lflag, P_LSIGEXC); proc_unlock(p); - return(0); + return 0; } else { proc_unlock(p); - return(EINVAL); + return EINVAL; } } - /* - * We do not want ptrace to do anything with kernel or launchd + /* + * We do not want ptrace to do anything with kernel or launchd */ if (uap->pid < 2) { - return(EPERM); + return EPERM; } /* * Locate victim, and make sure it is traceable. */ - if ((t = proc_find(uap->pid)) == NULL) - return (ESRCH); + if ((t = proc_find(uap->pid)) == NULL) { + return ESRCH; + } AUDIT_ARG(process, t); @@ -233,7 +237,7 @@ retry_trace_me:; } if (uap->req == PT_ATTACH) { #pragma clang diagnostic pop - int err; + int err; #if CONFIG_EMBEDDED if (tr_sigexc == 0) { @@ -242,25 +246,27 @@ retry_trace_me:; } #endif - if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, - t, (uintptr_t)&err, 0, 0) == 0 ) { + if (kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, + t, (uintptr_t)&err, 0, 0) == 0) { /* it's OK to attach */ proc_lock(t); SET(t->p_lflag, P_LTRACED); - if (tr_sigexc) + if (tr_sigexc) { SET(t->p_lflag, P_LSIGEXC); - + } + t->p_oppid = t->p_ppid; /* Check whether child and parent are allowed to run modified * code (they'll have to) */ proc_unlock(t); cs_allow_invalid(t); cs_allow_invalid(p); - if (t->p_pptr != p) + if (t->p_pptr != p) { proc_reparentlocked(t, p, 1, 0); - + } + proc_lock(t); - if (get_task_userstop(task) > 0 ) { + if (get_task_userstop(task) > 0) { stopped = 1; } t->p_xstat = 0; @@ -271,17 +277,17 @@ retry_trace_me:; * issignal() again to properly connect to the tracing * process. */ - if (stopped) - task_resume(task); + if (stopped) { + task_resume(task); + } error = 0; goto out; - } - else { + } else { /* not allowed to attach, proper error code returned by kauth_authorize_process */ if (ISSET(t->p_lflag, P_LNOATTACH)) { psignal(p, SIGSEGV); } - + error = err; goto out; } @@ -322,7 +328,6 @@ retry_trace_me:; */ /* proc lock is held here */ switch (uap->req) { - case PT_DETACH: if (t->p_oppid != t->p_ppid) { struct proc *pp; @@ -347,7 +352,7 @@ retry_trace_me:; CLR(t->p_lflag, P_LSIGEXC); proc_unlock(t); goto resume; - + case PT_KILL: /* * Tell child process to kill itself after it @@ -356,14 +361,15 @@ retry_trace_me:; proc_unlock(t); #if CONFIG_MACF error = mac_proc_check_signal(p, t, SIGKILL); - if (0 != error) + if (0 != error) { goto resume; + } #endif psignal(t, SIGKILL); goto resume; - case PT_STEP: /* single step the child */ - case PT_CONTINUE: /* continue the child */ + case PT_STEP: /* single step the child */ + case PT_CONTINUE: /* continue the child */ proc_unlock(t); th_act = (thread_t)get_firstthread(task); if (th_act == THREAD_NULL) { @@ -385,15 +391,16 @@ retry_trace_me:; if (uap->data != 0) { #if CONFIG_MACF error = mac_proc_check_signal(p, t, uap->data); - if (0 != error) + if (0 != error) { goto out; + } #endif psignal(t, uap->data); } if (uap->req == PT_STEP) { - /* - * set trace bit + /* + * set trace bit * we use sending SIGSTOP as a comparable security check. */ #if CONFIG_MACF @@ -407,7 +414,7 @@ retry_trace_me:; goto out; } } else { - /* + /* * clear trace bit if on * we use sending SIGCONT as a comparable security check. */ @@ -421,8 +428,8 @@ retry_trace_me:; error = ENOTSUP; goto out; } - } - resume: + } +resume: proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; @@ -432,11 +439,12 @@ retry_trace_me:; if ((t->p_lflag & P_LSIGEXC) == 0) { task_resume(task); } - } else + } else { proc_unlock(t); - + } + break; - + case PT_THUPDATE: { proc_unlock(t); if ((unsigned)uap->data >= NSIG) { @@ -449,16 +457,17 @@ retry_trace_me:; goto out; } ut = (uthread_t)get_bsdthread_info(th_act); - if (uap->data) + if (uap->data) { ut->uu_siglist |= sigmask(uap->data); + } proc_lock(t); t->p_xstat = uap->data; t->p_stat = SRUN; proc_unlock(t); thread_deallocate(th_act); error = 0; - } - break; + } + break; default: proc_unlock(t); error = EINVAL; @@ -468,7 +477,7 @@ retry_trace_me:; error = 0; out: proc_rele(t); - return(error); + return error; } @@ -479,14 +488,14 @@ out: int cantrace(proc_t cur_procp, kauth_cred_t creds, proc_t traced_procp, int *errp) { - int my_err; + int my_err; /* * You can't trace a process if: * (1) it's the process that's doing the tracing, */ if (traced_procp->p_pid == cur_procp->p_pid) { *errp = EINVAL; - return (0); + return 0; } /* @@ -494,7 +503,7 @@ cantrace(proc_t cur_procp, kauth_cred_t creds, proc_t traced_procp, int *errp) */ if (ISSET(traced_procp->p_lflag, P_LTRACED)) { *errp = EBUSY; - return (0); + return 0; } /* @@ -502,28 +511,28 @@ cantrace(proc_t cur_procp, kauth_cred_t creds, proc_t traced_procp, int *errp) * (unless you're root). */ if ((kauth_cred_getruid(creds) != kauth_cred_getruid(proc_ucred(traced_procp)) || - ISSET(traced_procp->p_flag, P_SUGID)) && - (my_err = suser(creds, &cur_procp->p_acflag)) != 0) { + ISSET(traced_procp->p_flag, P_SUGID)) && + (my_err = suser(creds, &cur_procp->p_acflag)) != 0) { *errp = my_err; - return (0); + return 0; } if ((cur_procp->p_lflag & P_LTRACED) && isinferior(cur_procp, traced_procp)) { *errp = EPERM; - return (0); + return 0; } if (ISSET(traced_procp->p_lflag, P_LNOATTACH)) { *errp = EBUSY; - return (0); + return 0; } #if CONFIG_MACF if ((my_err = mac_proc_check_debug(cur_procp, traced_procp)) != 0) { *errp = my_err; - return (0); + return 0; } #endif - return(1); + return 1; } diff --git a/bsd/kern/mcache.c b/bsd/kern/mcache.c index 0794dc1db..a2263417d 100644 --- a/bsd/kern/mcache.c +++ b/bsd/kern/mcache.c @@ -68,14 +68,14 @@ #include -#define MCACHE_SIZE(n) \ +#define MCACHE_SIZE(n) \ __builtin_offsetof(mcache_t, mc_cpu[n]) /* Allocate extra in case we need to manually align the pointer */ -#define MCACHE_ALLOC_SIZE \ +#define MCACHE_ALLOC_SIZE \ (sizeof (void *) + MCACHE_SIZE(ncpu) + CPU_CACHE_LINE_SIZE) -#define MCACHE_CPU(c) \ +#define MCACHE_CPU(c) \ (mcache_cpu_t *)((void *)((char *)(c) + MCACHE_SIZE(cpu_number()))) /* @@ -85,19 +85,19 @@ * section, so that we can avoid recursive requests to reap the * caches when memory runs low. */ -#define MCACHE_LIST_LOCK() { \ - lck_mtx_lock(mcache_llock); \ - mcache_llock_owner = current_thread(); \ +#define MCACHE_LIST_LOCK() { \ + lck_mtx_lock(mcache_llock); \ + mcache_llock_owner = current_thread(); \ } -#define MCACHE_LIST_UNLOCK() { \ - mcache_llock_owner = NULL; \ - lck_mtx_unlock(mcache_llock); \ +#define MCACHE_LIST_UNLOCK() { \ + mcache_llock_owner = NULL; \ + lck_mtx_unlock(mcache_llock); \ } -#define MCACHE_LOCK(l) lck_mtx_lock(l) -#define MCACHE_UNLOCK(l) lck_mtx_unlock(l) -#define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l) +#define MCACHE_LOCK(l) lck_mtx_lock(l) +#define MCACHE_UNLOCK(l) lck_mtx_unlock(l) +#define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l) static int ncpu; static unsigned int cache_line_size; @@ -122,20 +122,20 @@ static unsigned int mcache_flags = 0; int mca_trn_max = MCA_TRN_MAX; -#define DUMP_MCA_BUF_SIZE 512 +#define DUMP_MCA_BUF_SIZE 512 static char *mca_dump_buf; static mcache_bkttype_t mcache_bkttype[] = { - { 1, 4096, 32768, NULL }, - { 3, 2048, 16384, NULL }, - { 7, 1024, 12288, NULL }, - { 15, 256, 8192, NULL }, - { 31, 64, 4096, NULL }, - { 47, 0, 2048, NULL }, - { 63, 0, 1024, NULL }, - { 95, 0, 512, NULL }, - { 143, 0, 256, NULL }, - { 165, 0, 0, NULL }, + { 1, 4096, 32768, NULL }, + { 3, 2048, 16384, NULL }, + { 7, 1024, 12288, NULL }, + { 15, 256, 8192, NULL }, + { 31, 64, 4096, NULL }, + { 47, 0, 2048, NULL }, + { 63, 0, 1024, NULL }, + { 95, 0, 512, NULL }, + { 143, 0, 256, NULL }, + { 165, 0, 0, NULL }, }; static mcache_t *mcache_create_common(const char *, size_t, size_t, @@ -189,7 +189,7 @@ mcache_init(void) VERIFY(mca_trn_max >= 2); ncpu = ml_get_max_cpus(); - (void) mcache_cache_line_size(); /* prime it */ + (void) mcache_cache_line_size(); /* prime it */ mcache_llock_grp_attr = lck_grp_attr_alloc_init(); mcache_llock_grp = lck_grp_alloc_init("mcache.list", @@ -199,29 +199,31 @@ mcache_init(void) mcache_reap_tcall = thread_call_allocate(mcache_reap_timeout, NULL); mcache_update_tcall = thread_call_allocate(mcache_update, NULL); - if (mcache_reap_tcall == NULL || mcache_update_tcall == NULL) + if (mcache_reap_tcall == NULL || mcache_update_tcall == NULL) { panic("mcache_init: thread_call_allocate failed"); + } mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE, PAGE_SIZE, "mcache"); - if (mcache_zone == NULL) + if (mcache_zone == NULL) { panic("mcache_init: failed to allocate mcache zone\n"); + } zone_change(mcache_zone, Z_CALLERACCT, FALSE); LIST_INIT(&mcache_head); - for (i = 0; i < sizeof (mcache_bkttype) / sizeof (*btp); i++) { + for (i = 0; i < sizeof(mcache_bkttype) / sizeof(*btp); i++) { btp = &mcache_bkttype[i]; - (void) snprintf(name, sizeof (name), "bkt_%d", + (void) snprintf(name, sizeof(name), "bkt_%d", btp->bt_bktsize); btp->bt_cache = mcache_create(name, - (btp->bt_bktsize + 1) * sizeof (void *), 0, 0, MCR_SLEEP); + (btp->bt_bktsize + 1) * sizeof(void *), 0, 0, MCR_SLEEP); } PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof(mcache_flags)); mcache_flags &= MCF_FLAGS_MASK; - mcache_audit_cache = mcache_create("audit", sizeof (mcache_audit_t), + mcache_audit_cache = mcache_create("audit", sizeof(mcache_audit_t), 0, 0, MCR_SLEEP); mcache_applyall(mcache_cache_bkt_enable); @@ -237,7 +239,7 @@ mcache_init(void) __private_extern__ unsigned int mcache_getflags(void) { - return (mcache_flags); + return mcache_flags; } /* @@ -251,7 +253,7 @@ mcache_cache_line_size(void) ml_cpu_get_info(&cpu_info); cache_line_size = cpu_info.cache_line_size; } - return (cache_line_size); + return cache_line_size; } /* @@ -263,9 +265,9 @@ __private_extern__ mcache_t * mcache_create(const char *name, size_t bufsize, size_t align, u_int32_t flags, int wait) { - return (mcache_create_common(name, bufsize, align, mcache_slab_alloc, - mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1, - wait)); + return mcache_create_common(name, bufsize, align, mcache_slab_alloc, + mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1, + wait); } /* @@ -279,8 +281,8 @@ mcache_create_ext(const char *name, size_t bufsize, mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg, u_int32_t flags, int wait) { - return (mcache_create_common(name, bufsize, 0, allocfn, - freefn, auditfn, logfn, notifyfn, arg, flags, 0, wait)); + return mcache_create_common(name, bufsize, 0, allocfn, + freefn, auditfn, logfn, notifyfn, arg, flags, 0, wait); } /* @@ -304,13 +306,15 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, int malloc_wait = (wait & MCR_NOSLEEP) ? M_NOWAIT : M_WAITOK; MALLOC(mca_dump_buf, char *, DUMP_MCA_BUF_SIZE, M_TEMP, malloc_wait | M_ZERO); - if (mca_dump_buf == NULL) - return (NULL); + if (mca_dump_buf == NULL) { + return NULL; + } } buf = zalloc(mcache_zone); - if (buf == NULL) + if (buf == NULL) { goto fail; + } bzero(buf, MCACHE_ALLOC_SIZE); @@ -322,8 +326,8 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, * is okay since we've allocated extra space for this. */ cp = (mcache_t *) - P2ROUNDUP((intptr_t)buf + sizeof (void *), CPU_CACHE_LINE_SIZE); - pbuf = (void **)((intptr_t)cp - sizeof (void *)); + P2ROUNDUP((intptr_t)buf + sizeof(void *), CPU_CACHE_LINE_SIZE); + pbuf = (void **)((intptr_t)cp - sizeof(void *)); *pbuf = buf; /* @@ -334,13 +338,15 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, align = 1; } else { /* Enforce 64-bit minimum alignment for zone-based buffers */ - if (align == 0) + if (align == 0) { align = MCACHE_ALIGN; + } align = P2ROUNDUP(align, MCACHE_ALIGN); } - if ((align & (align - 1)) != 0) + if ((align & (align - 1)) != 0) { panic("mcache_create: bad alignment %lu", align); + } cp->mc_align = align; cp->mc_slab_alloc = allocfn; @@ -352,9 +358,9 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, cp->mc_bufsize = bufsize; cp->mc_flags = (flags & MCF_FLAGS_MASK) | mcache_flags; - (void) snprintf(cp->mc_name, sizeof (cp->mc_name), "mcache.%s", name); + (void) snprintf(cp->mc_name, sizeof(cp->mc_name), "mcache.%s", name); - (void) snprintf(lck_name, sizeof (lck_name), "%s.cpu", cp->mc_name); + (void) snprintf(lck_name, sizeof(lck_name), "%s.cpu", cp->mc_name); cp->mc_cpu_lock_grp_attr = lck_grp_attr_alloc_init(); cp->mc_cpu_lock_grp = lck_grp_alloc_init(lck_name, cp->mc_cpu_lock_grp_attr); @@ -367,14 +373,15 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, * handle multiple-element allocation requests, where the elements * returned are linked together in a list. */ - chunksize = MAX(bufsize, sizeof (u_int64_t)); + chunksize = MAX(bufsize, sizeof(u_int64_t)); if (need_zone) { VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0); - chunksize += sizeof (uint64_t) + align; + chunksize += sizeof(uint64_t) + align; chunksize = P2ROUNDUP(chunksize, align); if ((cp->mc_slab_zone = zinit(chunksize, 64 * 1024 * ncpu, - PAGE_SIZE, cp->mc_name)) == NULL) + PAGE_SIZE, cp->mc_name)) == NULL) { goto fail; + } zone_change(cp->mc_slab_zone, Z_EXPAND, TRUE); } cp->mc_chunksize = chunksize; @@ -382,7 +389,7 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, /* * Initialize the bucket layer. */ - (void) snprintf(lck_name, sizeof (lck_name), "%s.bkt", cp->mc_name); + (void) snprintf(lck_name, sizeof(lck_name), "%s.bkt", cp->mc_name); cp->mc_bkt_lock_grp_attr = lck_grp_attr_alloc_init(); cp->mc_bkt_lock_grp = lck_grp_alloc_init(lck_name, cp->mc_bkt_lock_grp_attr); @@ -390,7 +397,7 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, lck_mtx_init(&cp->mc_bkt_lock, cp->mc_bkt_lock_grp, cp->mc_bkt_lock_attr); - (void) snprintf(lck_name, sizeof (lck_name), "%s.sync", cp->mc_name); + (void) snprintf(lck_name, sizeof(lck_name), "%s.sync", cp->mc_name); cp->mc_sync_lock_grp_attr = lck_grp_attr_alloc_init(); cp->mc_sync_lock_grp = lck_grp_alloc_init(lck_name, cp->mc_sync_lock_grp_attr); @@ -398,8 +405,9 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, lck_mtx_init(&cp->mc_sync_lock, cp->mc_sync_lock_grp, cp->mc_sync_lock_attr); - for (btp = mcache_bkttype; chunksize <= btp->bt_minbuf; btp++) + for (btp = mcache_bkttype; chunksize <= btp->bt_minbuf; btp++) { continue; + } cp->cache_bkttype = btp; @@ -417,8 +425,9 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, ccp->cc_pobjs = -1; } - if (mcache_ready) + if (mcache_ready) { mcache_cache_bkt_enable(cp); + } /* TODO: dynamically create sysctl for stats */ @@ -439,12 +448,13 @@ mcache_create_common(const char *name, size_t bufsize, size_t align, "chunksize %lu bktsize %d\n", name, need_zone ? "i" : "e", arg, bufsize, cp->mc_align, chunksize, btp->bt_bktsize); } - return (cp); + return cp; fail: - if (buf != NULL) + if (buf != NULL) { zfree(mcache_zone, buf); - return (NULL); + } + return NULL; } /* @@ -460,13 +470,14 @@ mcache_alloc_ext(mcache_t *cp, mcache_obj_t **list, unsigned int num, int wait) boolean_t nwretry = FALSE; /* MCR_NOSLEEP and MCR_FAILOK are mutually exclusive */ - VERIFY((wait & (MCR_NOSLEEP|MCR_FAILOK)) != (MCR_NOSLEEP|MCR_FAILOK)); + VERIFY((wait & (MCR_NOSLEEP | MCR_FAILOK)) != (MCR_NOSLEEP | MCR_FAILOK)); ASSERT(list != NULL); *list = NULL; - if (num == 0) - return (0); + if (num == 0) { + return 0; + } retry_alloc: /* We may not always be running in the same CPU in case of retries */ @@ -502,13 +513,15 @@ retry_alloc: MCACHE_UNLOCK(&ccp->cc_lock); if (!(cp->mc_flags & MCF_NOLEAKLOG) && - cp->mc_slab_log != NULL) + cp->mc_slab_log != NULL) { (*cp->mc_slab_log)(num, *top, TRUE); + } - if (cp->mc_flags & MCF_DEBUG) + if (cp->mc_flags & MCF_DEBUG) { goto debug_alloc; + } - return (num); + return num; } } @@ -526,8 +539,9 @@ retry_alloc: * can happen either because MCF_NOCPUCACHE is set, or because * the bucket layer is currently being resized. */ - if (ccp->cc_bktsize == 0) + if (ccp->cc_bktsize == 0) { break; + } /* * Both of the CPU's buckets are empty; try to get a full @@ -536,9 +550,10 @@ retry_alloc: */ bkt = mcache_bkt_alloc(cp, &cp->mc_full, NULL); if (bkt != NULL) { - if (ccp->cc_pfilled != NULL) + if (ccp->cc_pfilled != NULL) { mcache_bkt_free(cp, &cp->mc_empty, ccp->cc_pfilled); + } mcache_cpu_refill(ccp, bkt, ccp->cc_bktsize); continue; } @@ -563,8 +578,9 @@ retry_alloc: goto retry_alloc; } else if ((wait & (MCR_NOSLEEP | MCR_TRYHARD)) && !mcache_bkt_isempty(cp)) { - if (!nwretry) + if (!nwretry) { nwretry = TRUE; + } atomic_add_32(&cp->mc_nwretry_cnt, 1); goto retry_alloc; } else if (nwretry) { @@ -572,11 +588,13 @@ retry_alloc: } } - if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) + if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) { (*cp->mc_slab_log)((num - need), *top, TRUE); + } - if (!(cp->mc_flags & MCF_DEBUG)) - return (num - need); + if (!(cp->mc_flags & MCF_DEBUG)) { + return num - need; + } debug_alloc: if (cp->mc_flags & MCF_DEBUG) { @@ -602,10 +620,11 @@ debug_alloc: } /* Invoke the slab layer audit callback if auditing is enabled */ - if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) + if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) { (*cp->mc_slab_audit)(cp->mc_private, *top, TRUE); + } - return (num - need); + return num - need; } /* @@ -617,7 +636,7 @@ mcache_alloc(mcache_t *cp, int wait) mcache_obj_t *buf; (void) mcache_alloc_ext(cp, &buf, 1, wait); - return (buf); + return buf; } __private_extern__ void @@ -640,7 +659,7 @@ mcache_bkt_isempty(mcache_t *cp) * any full buckets in the cache; it is simply a way to * obtain "hints" about the state of the cache. */ - return (cp->mc_full.bl_total == 0); + return cp->mc_full.bl_total == 0; } /* @@ -649,8 +668,9 @@ mcache_bkt_isempty(mcache_t *cp) static void mcache_notify(mcache_t *cp, u_int32_t event) { - if (cp->mc_slab_notify != NULL) + if (cp->mc_slab_notify != NULL) { (*cp->mc_slab_notify)(cp->mc_private, event); + } } /* @@ -681,23 +701,25 @@ mcache_purge_cache(mcache_t *cp, boolean_t async) * Purging a cache that has no per-CPU caches or is already * in the process of being purged is rather pointless. */ - if (cp->mc_flags & MCF_NOCPUCACHE) - return (FALSE); + if (cp->mc_flags & MCF_NOCPUCACHE) { + return FALSE; + } lck_mtx_lock_spin(&cp->mc_sync_lock); if (cp->mc_purge_cnt > 0) { lck_mtx_unlock(&cp->mc_sync_lock); - return (FALSE); + return FALSE; } cp->mc_purge_cnt++; lck_mtx_unlock(&cp->mc_sync_lock); - if (async) + if (async) { mcache_dispatch(mcache_purge, cp); - else + } else { mcache_purge(cp); + } - return (TRUE); + return TRUE; } /* @@ -721,12 +743,14 @@ mcache_free_ext(mcache_t *cp, mcache_obj_t *list) mcache_obj_t *nlist; mcache_bkt_t *bkt; - if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) + if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) { (*cp->mc_slab_log)(0, list, FALSE); + } /* Invoke the slab layer audit callback if auditing is enabled */ - if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) + if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) { (*cp->mc_slab_audit)(cp->mc_private, list, FALSE); + } MCACHE_LOCK(&ccp->cc_lock); for (;;) { @@ -749,15 +773,17 @@ mcache_free_ext(mcache_t *cp, mcache_obj_t *list) ccp->cc_filled->bkt_obj[ccp->cc_objs++] = list; ccp->cc_free++; - if ((list = nlist) != NULL) + if ((list = nlist) != NULL) { continue; + } /* We are done; return to caller */ MCACHE_UNLOCK(&ccp->cc_lock); /* If there is a waiter below, notify it */ - if (cp->mc_waiter_cnt > 0) + if (cp->mc_waiter_cnt > 0) { mcache_notify(cp, MCN_RETRYALLOC); + } return; } @@ -775,8 +801,9 @@ mcache_free_ext(mcache_t *cp, mcache_obj_t *list) * happen either because MCF_NOCPUCACHE is set, or because * the bucket layer is currently being resized. */ - if (ccp->cc_bktsize == 0) + if (ccp->cc_bktsize == 0) { break; + } /* * Both of the CPU's buckets are full; try to get an empty @@ -785,9 +812,10 @@ mcache_free_ext(mcache_t *cp, mcache_obj_t *list) */ bkt = mcache_bkt_alloc(cp, &cp->mc_empty, &btp); if (bkt != NULL) { - if (ccp->cc_pfilled != NULL) + if (ccp->cc_pfilled != NULL) { mcache_bkt_free(cp, &cp->mc_full, ccp->cc_pfilled); + } mcache_cpu_refill(ccp, bkt, 0); continue; } @@ -833,8 +861,9 @@ mcache_free_ext(mcache_t *cp, mcache_obj_t *list) MCACHE_UNLOCK(&ccp->cc_lock); /* If there is a waiter below, notify it */ - if (cp->mc_waiter_cnt > 0) + if (cp->mc_waiter_cnt > 0) { mcache_notify(cp, MCN_RETRYALLOC); + } /* Advise the slab layer to purge the object(s) */ (*cp->mc_slab_free)(cp->mc_private, list, @@ -888,7 +917,7 @@ mcache_destroy(mcache_t *cp) */ /* Get the original address since we're about to free it */ - pbuf = (void **)((intptr_t)cp - sizeof (void *)); + pbuf = (void **)((intptr_t)cp - sizeof(void *)); zfree(mcache_zone, *pbuf); } @@ -904,7 +933,7 @@ mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, #pragma unused(wait) mcache_t *cp = arg; unsigned int need = num; - size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t)); + size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t)); u_int32_t flags = cp->mc_flags; void *buf, *base, **pbuf; mcache_obj_t **list = *plist; @@ -913,21 +942,22 @@ mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, for (;;) { buf = zalloc(cp->mc_slab_zone); - if (buf == NULL) + if (buf == NULL) { break; + } /* Get the aligned base address for this object */ - base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t), + base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), cp->mc_align); /* * Wind back a pointer size from the aligned base and * save the original address so we can free it later. */ - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); *pbuf = buf; - VERIFY (((intptr_t)base + cp->mc_bufsize) <= + VERIFY(((intptr_t)base + cp->mc_bufsize) <= ((intptr_t)buf + cp->mc_chunksize)); /* @@ -950,11 +980,12 @@ mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, list = *plist = &(*list)->obj_next; /* If we got them all, return to mcache */ - if (--need == 0) + if (--need == 0) { break; + } } - return (num - need); + return num - need; } /* @@ -965,7 +996,7 @@ mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged) { mcache_t *cp = arg; mcache_obj_t *nlist; - size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t)); + size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t)); u_int32_t flags = cp->mc_flags; void *base; void **pbuf; @@ -978,7 +1009,7 @@ mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged) VERIFY(IS_P2ALIGNED(base, cp->mc_align)); /* Get the original address since we're about to free it */ - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); VERIFY(((intptr_t)base + cp->mc_bufsize) <= ((intptr_t)*pbuf + cp->mc_chunksize)); @@ -993,8 +1024,9 @@ mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged) zfree(cp->mc_slab_zone, *pbuf); /* No more objects to free; return to mcache */ - if ((list = nlist) == NULL) + if ((list = nlist) == NULL) { break; + } } } @@ -1005,7 +1037,7 @@ static void mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) { mcache_t *cp = arg; - size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof (u_int64_t)); + size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t)); void *base, **pbuf; while (list != NULL) { @@ -1015,15 +1047,16 @@ mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) VERIFY(IS_P2ALIGNED(base, cp->mc_align)); /* Get the original address */ - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); VERIFY(((intptr_t)base + rsize) <= ((intptr_t)*pbuf + cp->mc_chunksize)); - if (!alloc) + if (!alloc) { mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize); - else + } else { mcache_audit_free_verify_set(NULL, base, 0, rsize); + } list = list->obj_next = next; } @@ -1065,17 +1098,19 @@ mcache_bkt_alloc(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkttype_t **btp) if ((bkt = blp->bl_list) != NULL) { blp->bl_list = bkt->bkt_next; - if (--blp->bl_total < blp->bl_min) + if (--blp->bl_total < blp->bl_min) { blp->bl_min = blp->bl_total; + } blp->bl_alloc++; } - if (btp != NULL) + if (btp != NULL) { *btp = cp->cache_bkttype; + } MCACHE_UNLOCK(&cp->mc_bkt_lock); - return (bkt); + return bkt; } /* @@ -1102,8 +1137,9 @@ mcache_cache_bkt_enable(mcache_t *cp) mcache_cpu_t *ccp; int cpu; - if (cp->mc_flags & MCF_NOCPUCACHE) + if (cp->mc_flags & MCF_NOCPUCACHE) { return; + } for (cpu = 0; cpu < ncpu; cpu++) { ccp = &cp->mc_cpu[cpu]; @@ -1142,10 +1178,12 @@ mcache_bkt_purge(mcache_t *cp) MCACHE_UNLOCK(&ccp->cc_lock); - if (bp != NULL) + if (bp != NULL) { mcache_bkt_destroy(cp, btp, bp, objs); - if (pbp != NULL) + } + if (pbp != NULL) { mcache_bkt_destroy(cp, btp, pbp, pobjs); + } } mcache_bkt_ws_zero(cp); @@ -1235,13 +1273,15 @@ mcache_bkt_ws_reap(mcache_t *cp) reap = MIN(cp->mc_full.bl_reaplimit, cp->mc_full.bl_min); while (reap-- && - (bkt = mcache_bkt_alloc(cp, &cp->mc_full, &btp)) != NULL) + (bkt = mcache_bkt_alloc(cp, &cp->mc_full, &btp)) != NULL) { mcache_bkt_destroy(cp, btp, bkt, btp->bt_bktsize); + } reap = MIN(cp->mc_empty.bl_reaplimit, cp->mc_empty.bl_min); while (reap-- && - (bkt = mcache_bkt_alloc(cp, &cp->mc_empty, &btp)) != NULL) + (bkt = mcache_bkt_alloc(cp, &cp->mc_empty, &btp)) != NULL) { mcache_bkt_destroy(cp, btp, bkt, 0); + } } static void @@ -1285,8 +1325,9 @@ mcache_reap(void) UInt32 *flag = &mcache_reaping; if (mcache_llock_owner == current_thread() || - !OSCompareAndSwap(0, 1, flag)) + !OSCompareAndSwap(0, 1, flag)) { return; + } mcache_dispatch(mcache_reap_start, flag); } @@ -1329,8 +1370,9 @@ mcache_cache_update(mcache_t *cp) * memory pressure on the system. */ lck_mtx_lock_spin(&cp->mc_sync_lock); - if (!(cp->mc_flags & MCF_NOCPUCACHE) && cp->mc_enable_cnt) + if (!(cp->mc_flags & MCF_NOCPUCACHE) && cp->mc_enable_cnt) { need_bkt_reenable = 1; + } lck_mtx_unlock(&cp->mc_sync_lock); MCACHE_LOCK(&cp->mc_bkt_lock); @@ -1342,16 +1384,18 @@ mcache_cache_update(mcache_t *cp) */ if ((unsigned int)cp->mc_chunksize < cp->cache_bkttype->bt_maxbuf && (int)(cp->mc_bkt_contention - cp->mc_bkt_contention_prev) > - mcache_bkt_contention && !need_bkt_reenable) + mcache_bkt_contention && !need_bkt_reenable) { need_bkt_resize = 1; + } - cp ->mc_bkt_contention_prev = cp->mc_bkt_contention; + cp->mc_bkt_contention_prev = cp->mc_bkt_contention; MCACHE_UNLOCK(&cp->mc_bkt_lock); - if (need_bkt_resize) + if (need_bkt_resize) { mcache_dispatch(mcache_cache_bkt_resize, cp); - else if (need_bkt_reenable) + } else if (need_bkt_reenable) { mcache_dispatch(mcache_cache_enable, cp); + } } /* @@ -1376,7 +1420,7 @@ mcache_cache_bkt_resize(void *arg) */ MCACHE_LOCK(&cp->mc_bkt_lock); cp->cache_bkttype = ++btp; - cp ->mc_bkt_contention_prev = cp->mc_bkt_contention + INT_MAX; + cp->mc_bkt_contention_prev = cp->mc_bkt_contention + INT_MAX; MCACHE_UNLOCK(&cp->mc_bkt_lock); mcache_cache_enable(cp); @@ -1436,7 +1480,7 @@ static void mcache_dispatch(void (*func)(void *), void *arg) { ASSERT(func != NULL); - timeout(func, arg, hz/1000); + timeout(func, arg, hz / 1000); } __private_extern__ void @@ -1454,21 +1498,23 @@ mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp, transaction->mca_thread = current_thread(); - bzero(stack, sizeof (stack)); + bzero(stack, sizeof(stack)); transaction->mca_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1; bcopy(&stack[1], transaction->mca_stack, - sizeof (transaction->mca_stack)); + sizeof(transaction->mca_stack)); microuptime(&now); - if (base_ts != NULL) + if (base_ts != NULL) { base = *base_ts; + } /* tstamp is in ms relative to base_ts */ transaction->mca_tstamp = ((now.tv_usec - base.tv_usec) / 1000); - if ((now.tv_sec - base.tv_sec) > 0) + if ((now.tv_sec - base.tv_sec) > 0) { transaction->mca_tstamp += ((now.tv_sec - base.tv_sec) * 1000); + } mca->mca_next_trn = - (mca->mca_next_trn + 1) % mca_trn_max; + (mca->mca_next_trn + 1) % mca_trn_max; } __private_extern__ void @@ -1477,11 +1523,12 @@ mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size) u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size)); u_int64_t *buf = (u_int64_t *)buf_arg; - VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t))); - VERIFY(IS_P2ALIGNED(size, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t))); + VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t))); - while (buf < buf_end) + while (buf < buf_end) { *buf++ = pattern; + } } __private_extern__ void * @@ -1490,14 +1537,15 @@ mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size) u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size)); u_int64_t *buf; - VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t))); - VERIFY(IS_P2ALIGNED(size, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t))); + VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t))); for (buf = buf_arg; buf < buf_end; buf++) { - if (*buf != pattern) - return (buf); + if (*buf != pattern) { + return buf; + } } - return (NULL); + return NULL; } __private_extern__ void * @@ -1507,18 +1555,18 @@ mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg, u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size)); u_int64_t *buf; - VERIFY(IS_P2ALIGNED(buf_arg, sizeof (u_int64_t))); - VERIFY(IS_P2ALIGNED(size, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t))); + VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t))); for (buf = buf_arg; buf < buf_end; buf++) { if (*buf != old) { mcache_set_pattern(old, buf_arg, (uintptr_t)buf - (uintptr_t)buf_arg); - return (buf); + return buf; } *buf = new; } - return (NULL); + return NULL; } __private_extern__ void @@ -1533,7 +1581,7 @@ mcache_audit_free_verify(mcache_audit_t *mca, void *base, size_t offset, next = ((mcache_obj_t *)addr)->obj_next; /* For the "obj_next" pointer in the buffer */ - oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof (u_int64_t)); + oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t)); *oaddr64 = MCACHE_FREE_PATTERN; if ((oaddr64 = mcache_verify_pattern(MCACHE_FREE_PATTERN, @@ -1557,7 +1605,7 @@ mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset, next = ((mcache_obj_t *)addr)->obj_next; /* For the "obj_next" pointer in the buffer */ - oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof (u_int64_t)); + oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t)); *oaddr64 = MCACHE_FREE_PATTERN; if ((oaddr64 = mcache_verify_set_pattern(MCACHE_FREE_PATTERN, @@ -1571,12 +1619,12 @@ mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset, #undef panic -#define DUMP_TRN_FMT() \ +#define DUMP_TRN_FMT() \ "%s transaction thread %p saved PC stack (%d deep):\n" \ "\t%p, %p, %p, %p, %p, %p, %p, %p\n" \ "\t%p, %p, %p, %p, %p, %p, %p, %p\n" -#define DUMP_TRN_FIELDS(s, x) \ +#define DUMP_TRN_FIELDS(s, x) \ s, \ mca->mca_trns[x].mca_thread, mca->mca_trns[x].mca_depth, \ mca->mca_trns[x].mca_stack[0], mca->mca_trns[x].mca_stack[1], \ @@ -1588,14 +1636,15 @@ mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset, mca->mca_trns[x].mca_stack[12], mca->mca_trns[x].mca_stack[13], \ mca->mca_trns[x].mca_stack[14], mca->mca_trns[x].mca_stack[15] -#define MCA_TRN_LAST ((mca->mca_next_trn + mca_trn_max) % mca_trn_max) -#define MCA_TRN_PREV ((mca->mca_next_trn + mca_trn_max - 1) % mca_trn_max) +#define MCA_TRN_LAST ((mca->mca_next_trn + mca_trn_max) % mca_trn_max) +#define MCA_TRN_PREV ((mca->mca_next_trn + mca_trn_max - 1) % mca_trn_max) __private_extern__ char * mcache_dump_mca(mcache_audit_t *mca) { - if (mca_dump_buf == NULL) - return (NULL); + if (mca_dump_buf == NULL) { + return NULL; + } snprintf(mca_dump_buf, DUMP_MCA_BUF_SIZE, "mca %p: addr %p, cache %p (%s) nxttrn %d\n" @@ -1609,7 +1658,7 @@ mcache_dump_mca(mcache_audit_t *mca) DUMP_TRN_FIELDS("last", MCA_TRN_LAST), DUMP_TRN_FIELDS("previous", MCA_TRN_PREV)); - return (mca_dump_buf); + return mca_dump_buf; } __private_extern__ void @@ -1633,5 +1682,5 @@ __private_extern__ int assfail(const char *a, const char *f, int l) { panic("assertion failed: %s, file: %s, line: %d", a, f, l); - return (0); + return 0; } diff --git a/bsd/kern/netboot.c b/bsd/kern/netboot.c index e8bdddb34..71362c2f2 100644 --- a/bsd/kern/netboot.c +++ b/bsd/kern/netboot.c @@ -2,7 +2,7 @@ * Copyright (c) 2001-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -61,15 +61,15 @@ #include //#include -extern struct filedesc filedesc0; +extern struct filedesc filedesc0; -extern int nfs_mountroot(void); /* nfs_vfsops.c */ +extern int nfs_mountroot(void); /* nfs_vfsops.c */ extern int (*mountroot)(void); -extern unsigned char rootdevice[]; +extern unsigned char rootdevice[]; -static int S_netboot = 0; -static struct netboot_info * S_netboot_info_p; +static int S_netboot = 0; +static struct netboot_info * S_netboot_info_p; void * IOBSDRegistryEntryForDeviceTree(const char * path); @@ -78,38 +78,38 @@ void IOBSDRegistryEntryRelease(void * entry); const void * -IOBSDRegistryEntryGetData(void * entry, const char * property_name, - int * packet_length); +IOBSDRegistryEntryGetData(void * entry, const char * property_name, + int * packet_length); -#define BOOTP_RESPONSE "bootp-response" -#define BSDP_RESPONSE "bsdp-response" -#define DHCP_RESPONSE "dhcp-response" +#define BOOTP_RESPONSE "bootp-response" +#define BSDP_RESPONSE "bsdp-response" +#define DHCP_RESPONSE "dhcp-response" -#define IP_FORMAT "%d.%d.%d.%d" -#define IP_CH(ip) ((u_char *)ip) -#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] +#define IP_FORMAT "%d.%d.%d.%d" +#define IP_CH(ip) ((u_char *)ip) +#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] -#define kNetBootRootPathPrefixNFS "nfs:" -#define kNetBootRootPathPrefixHTTP "http:" +#define kNetBootRootPathPrefixNFS "nfs:" +#define kNetBootRootPathPrefixHTTP "http:" typedef enum { - kNetBootImageTypeUnknown = 0, - kNetBootImageTypeNFS = 1, - kNetBootImageTypeHTTP = 2, + kNetBootImageTypeUnknown = 0, + kNetBootImageTypeNFS = 1, + kNetBootImageTypeHTTP = 2, } NetBootImageType; struct netboot_info { - struct in_addr client_ip; - struct in_addr server_ip; - char * server_name; - int server_name_length; - char * mount_point; - int mount_point_length; - char * image_path; - int image_path_length; - NetBootImageType image_type; - char * second_image_path; - int second_image_path_length; + struct in_addr client_ip; + struct in_addr server_ip; + char * server_name; + int server_name_length; + char * mount_point; + int mount_point_length; + char * image_path; + int image_path_length; + NetBootImageType image_type; + char * second_image_path; + int second_image_path_length; }; /* @@ -121,50 +121,49 @@ struct netboot_info { * * Note: * The passed in string is modified i.e. ':' is replaced by '\0'. - * Example: + * Example: * "17.202.16.17:seaport:/release/.images/Image9/CurrentHera" */ static __inline__ boolean_t parse_booter_path(char * path, struct in_addr * iaddr_p, char const * * host, - char * * mount_dir, char * * image_path) + char * * mount_dir, char * * image_path) { - char * start; - char * colon; - - /* IP address */ - start = path; - colon = strchr(start, ':'); - if (colon == NULL) { - return (FALSE); - } - *colon = '\0'; - if (inet_aton(start, iaddr_p) != 1) { - return (FALSE); - } - - /* host */ - start = colon + 1; - colon = strchr(start, ':'); - if (colon == NULL) { - return (FALSE); - } - *colon = '\0'; - *host = start; - - /* mount */ - start = colon + 1; - colon = strchr(start, ':'); - *mount_dir = start; - if (colon == NULL) { - *image_path = NULL; - } - else { - /* image path */ + char * start; + char * colon; + + /* IP address */ + start = path; + colon = strchr(start, ':'); + if (colon == NULL) { + return FALSE; + } *colon = '\0'; + if (inet_aton(start, iaddr_p) != 1) { + return FALSE; + } + + /* host */ start = colon + 1; - *image_path = start; - } - return (TRUE); + colon = strchr(start, ':'); + if (colon == NULL) { + return FALSE; + } + *colon = '\0'; + *host = start; + + /* mount */ + start = colon + 1; + colon = strchr(start, ':'); + *mount_dir = start; + if (colon == NULL) { + *image_path = NULL; + } else { + /* image path */ + *colon = '\0'; + start = colon + 1; + *image_path = start; + } + return TRUE; } /* @@ -177,24 +176,25 @@ parse_booter_path(char * path, struct in_addr * iaddr_p, char const * * host, static __inline__ char * find_colon(char * str) { - char * start = str; - char * colon; - - while ((colon = strchr(start, ':')) != NULL) { - char * dst; - char * src; - - if (colon == start) { - break; - } - if (colon[-1] != '\\') - break; - for (dst = colon - 1, src = colon; *dst != '\0'; dst++, src++) { - *dst = *src; - } - start = colon; - } - return (colon); + char * start = str; + char * colon; + + while ((colon = strchr(start, ':')) != NULL) { + char * dst; + char * src; + + if (colon == start) { + break; + } + if (colon[-1] != '\\') { + break; + } + for (dst = colon - 1, src = colon; *dst != '\0'; dst++, src++) { + *dst = *src; + } + start = colon; + } + return colon; } /* @@ -213,586 +213,580 @@ find_colon(char * str) */ static __inline__ boolean_t parse_netboot_path(char * path, struct in_addr * iaddr_p, char const * * host, - char * * mount_dir, char * * image_path) + char * * mount_dir, char * * image_path) { - static char tmp[MAX_IPv4_STR_LEN]; /* Danger - not thread safe */ - char * start; - char * colon; - - if (strncmp(path, kNetBootRootPathPrefixNFS, - strlen(kNetBootRootPathPrefixNFS)) != 0) { - return (FALSE); - } - - /* IP address */ - start = path + strlen(kNetBootRootPathPrefixNFS); - colon = strchr(start, ':'); - if (colon == NULL) { - return (FALSE); - } - *colon = '\0'; - if (inet_aton(start, iaddr_p) != 1) { - return (FALSE); - } - - /* mount point */ - start = colon + 1; - colon = find_colon(start); - *mount_dir = start; - if (colon == NULL) { - *image_path = NULL; - } - else { - /* image path */ + static char tmp[MAX_IPv4_STR_LEN]; /* Danger - not thread safe */ + char * start; + char * colon; + + if (strncmp(path, kNetBootRootPathPrefixNFS, + strlen(kNetBootRootPathPrefixNFS)) != 0) { + return FALSE; + } + + /* IP address */ + start = path + strlen(kNetBootRootPathPrefixNFS); + colon = strchr(start, ':'); + if (colon == NULL) { + return FALSE; + } *colon = '\0'; + if (inet_aton(start, iaddr_p) != 1) { + return FALSE; + } + + /* mount point */ start = colon + 1; - (void)find_colon(start); - *image_path = start; - } - *host = inet_ntop(AF_INET, iaddr_p, tmp, sizeof(tmp)); - return (TRUE); + colon = find_colon(start); + *mount_dir = start; + if (colon == NULL) { + *image_path = NULL; + } else { + /* image path */ + *colon = '\0'; + start = colon + 1; + (void)find_colon(start); + *image_path = start; + } + *host = inet_ntop(AF_INET, iaddr_p, tmp, sizeof(tmp)); + return TRUE; } static boolean_t parse_image_path(char * path, struct in_addr * iaddr_p, char const * * host, - char * * mount_dir, char * * image_path) + char * * mount_dir, char * * image_path) { - if (path[0] >= '0' && path[0] <= '9') { - return (parse_booter_path(path, iaddr_p, host, mount_dir, - image_path)); - } - return (parse_netboot_path(path, iaddr_p, host, mount_dir, - image_path)); + if (path[0] >= '0' && path[0] <= '9') { + return parse_booter_path(path, iaddr_p, host, mount_dir, + image_path); + } + return parse_netboot_path(path, iaddr_p, host, mount_dir, + image_path); } static boolean_t get_root_path(char * root_path) { - void * entry; - boolean_t found = FALSE; - const void * pkt; - int pkt_len; - - entry = IOBSDRegistryEntryForDeviceTree("/chosen"); - if (entry == NULL) { - return (FALSE); - } - pkt = IOBSDRegistryEntryGetData(entry, BSDP_RESPONSE, &pkt_len); - if (pkt != NULL && pkt_len >= (int)sizeof(struct dhcp)) { - printf("netboot: retrieving root path from BSDP response\n"); - } - else { - pkt = IOBSDRegistryEntryGetData(entry, BOOTP_RESPONSE, - &pkt_len); + void * entry; + boolean_t found = FALSE; + const void * pkt; + int pkt_len; + + entry = IOBSDRegistryEntryForDeviceTree("/chosen"); + if (entry == NULL) { + return FALSE; + } + pkt = IOBSDRegistryEntryGetData(entry, BSDP_RESPONSE, &pkt_len); if (pkt != NULL && pkt_len >= (int)sizeof(struct dhcp)) { - printf("netboot: retrieving root path from BOOTP response\n"); + printf("netboot: retrieving root path from BSDP response\n"); + } else { + pkt = IOBSDRegistryEntryGetData(entry, BOOTP_RESPONSE, + &pkt_len); + if (pkt != NULL && pkt_len >= (int)sizeof(struct dhcp)) { + printf("netboot: retrieving root path from BOOTP response\n"); + } } - } - if (pkt != NULL) { - int len; - dhcpol_t options; - const char * path; - const struct dhcp * reply; - - reply = (const struct dhcp *)pkt; - (void)dhcpol_parse_packet(&options, reply, pkt_len); - - path = (const char *)dhcpol_find(&options, - dhcptag_root_path_e, &len, NULL); - if (path) { - memcpy(root_path, path, len); - root_path[len] = '\0'; - found = TRUE; + if (pkt != NULL) { + int len; + dhcpol_t options; + const char * path; + const struct dhcp * reply; + + reply = (const struct dhcp *)pkt; + (void)dhcpol_parse_packet(&options, reply, pkt_len); + + path = (const char *)dhcpol_find(&options, + dhcptag_root_path_e, &len, NULL); + if (path) { + memcpy(root_path, path, len); + root_path[len] = '\0'; + found = TRUE; + } } - } - IOBSDRegistryEntryRelease(entry); - return (found); - + IOBSDRegistryEntryRelease(entry); + return found; } static void save_path(char * * str_p, int * length_p, char * path) { - *length_p = strlen(path) + 1; - *str_p = (char *)kalloc(*length_p); - strlcpy(*str_p, path, *length_p); - return; + *length_p = strlen(path) + 1; + *str_p = (char *)kalloc(*length_p); + strlcpy(*str_p, path, *length_p); + return; } static struct netboot_info * netboot_info_init(struct in_addr iaddr) { - boolean_t have_root_path = FALSE; - struct netboot_info * info = NULL; - char * root_path = NULL; - - info = (struct netboot_info *)kalloc(sizeof(*info)); - bzero(info, sizeof(*info)); - info->client_ip = iaddr; - info->image_type = kNetBootImageTypeUnknown; - - /* check for a booter-specified path then a NetBoot path */ - MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (root_path == NULL) - panic("netboot_info_init: M_NAMEI zone exhausted"); - if (PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) == TRUE - || PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == TRUE - || PE_parse_boot_argn("rootpath", root_path, MAXPATHLEN) == TRUE) { - if (imageboot_format_is_valid(root_path)) { - printf("netboot_info_init: rp0='%s' isn't a network path," - " ignoring\n", root_path); - } - else { - have_root_path = TRUE; - } - } - if (have_root_path == FALSE) { - have_root_path = get_root_path(root_path); - } - if (have_root_path) { - const char * server_name = NULL; - char * mount_point = NULL; - char * image_path = NULL; - struct in_addr server_ip; - - if (parse_image_path(root_path, &server_ip, &server_name, - &mount_point, &image_path)) { - info->image_type = kNetBootImageTypeNFS; - info->server_ip = server_ip; - info->server_name_length = strlen(server_name) + 1; - info->server_name = (char *)kalloc(info->server_name_length); - info->mount_point_length = strlen(mount_point) + 1; - info->mount_point = (char *)kalloc(info->mount_point_length); - strlcpy(info->server_name, server_name, info->server_name_length); - strlcpy(info->mount_point, mount_point, info->mount_point_length); - - printf("netboot: NFS Server %s Mount %s", - server_name, info->mount_point); - if (image_path != NULL) { - boolean_t needs_slash = FALSE; - - info->image_path_length = strlen(image_path) + 1; - if (image_path[0] != '/') { - needs_slash = TRUE; - info->image_path_length++; + boolean_t have_root_path = FALSE; + struct netboot_info * info = NULL; + char * root_path = NULL; + + info = (struct netboot_info *)kalloc(sizeof(*info)); + bzero(info, sizeof(*info)); + info->client_ip = iaddr; + info->image_type = kNetBootImageTypeUnknown; + + /* check for a booter-specified path then a NetBoot path */ + MALLOC_ZONE(root_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (root_path == NULL) { + panic("netboot_info_init: M_NAMEI zone exhausted"); + } + if (PE_parse_boot_argn("rp0", root_path, MAXPATHLEN) == TRUE + || PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == TRUE + || PE_parse_boot_argn("rootpath", root_path, MAXPATHLEN) == TRUE) { + if (imageboot_format_is_valid(root_path)) { + printf("netboot_info_init: rp0='%s' isn't a network path," + " ignoring\n", root_path); + } else { + have_root_path = TRUE; } - info->image_path = (char *)kalloc(info->image_path_length); - if (needs_slash) { - info->image_path[0] = '/'; - strlcpy(info->image_path + 1, image_path, - info->image_path_length - 1); + } + if (have_root_path == FALSE) { + have_root_path = get_root_path(root_path); + } + if (have_root_path) { + const char * server_name = NULL; + char * mount_point = NULL; + char * image_path = NULL; + struct in_addr server_ip; + + if (parse_image_path(root_path, &server_ip, &server_name, + &mount_point, &image_path)) { + info->image_type = kNetBootImageTypeNFS; + info->server_ip = server_ip; + info->server_name_length = strlen(server_name) + 1; + info->server_name = (char *)kalloc(info->server_name_length); + info->mount_point_length = strlen(mount_point) + 1; + info->mount_point = (char *)kalloc(info->mount_point_length); + strlcpy(info->server_name, server_name, info->server_name_length); + strlcpy(info->mount_point, mount_point, info->mount_point_length); + + printf("netboot: NFS Server %s Mount %s", + server_name, info->mount_point); + if (image_path != NULL) { + boolean_t needs_slash = FALSE; + + info->image_path_length = strlen(image_path) + 1; + if (image_path[0] != '/') { + needs_slash = TRUE; + info->image_path_length++; + } + info->image_path = (char *)kalloc(info->image_path_length); + if (needs_slash) { + info->image_path[0] = '/'; + strlcpy(info->image_path + 1, image_path, + info->image_path_length - 1); + } else { + strlcpy(info->image_path, image_path, + info->image_path_length); + } + printf(" Image %s", info->image_path); + } + printf("\n"); + } else if (strncmp(root_path, kNetBootRootPathPrefixHTTP, + strlen(kNetBootRootPathPrefixHTTP)) == 0) { + info->image_type = kNetBootImageTypeHTTP; + save_path(&info->image_path, &info->image_path_length, + root_path); + printf("netboot: HTTP URL %s\n", info->image_path); } else { - strlcpy(info->image_path, image_path, - info->image_path_length); + printf("netboot: root path uses unrecognized format\n"); } - printf(" Image %s", info->image_path); - } - printf("\n"); - } - else if (strncmp(root_path, kNetBootRootPathPrefixHTTP, - strlen(kNetBootRootPathPrefixHTTP)) == 0) { - info->image_type = kNetBootImageTypeHTTP; - save_path(&info->image_path, &info->image_path_length, - root_path); - printf("netboot: HTTP URL %s\n", info->image_path); - } - else { - printf("netboot: root path uses unrecognized format\n"); - } - - /* check for image-within-image */ - if (info->image_path != NULL) { - if (PE_parse_boot_argn(IMAGEBOOT_ROOT_ARG, root_path, MAXPATHLEN) - || PE_parse_boot_argn("rp1", root_path, MAXPATHLEN)) { - /* rp1/root-dmg is the second-level image */ - save_path(&info->second_image_path, &info->second_image_path_length, - root_path); + + /* check for image-within-image */ + if (info->image_path != NULL) { + if (PE_parse_boot_argn(IMAGEBOOT_ROOT_ARG, root_path, MAXPATHLEN) + || PE_parse_boot_argn("rp1", root_path, MAXPATHLEN)) { + /* rp1/root-dmg is the second-level image */ + save_path(&info->second_image_path, &info->second_image_path_length, + root_path); + } + } + if (info->second_image_path != NULL) { + printf("netboot: nested image %s\n", info->second_image_path); } } - if (info->second_image_path != NULL) { - printf("netboot: nested image %s\n", info->second_image_path); - } - } - FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); - return (info); + FREE_ZONE(root_path, MAXPATHLEN, M_NAMEI); + return info; } -static void +static void netboot_info_free(struct netboot_info * * info_p) { - struct netboot_info * info = *info_p; + struct netboot_info * info = *info_p; - if (info) { - if (info->mount_point) { - kfree(info->mount_point, info->mount_point_length); - } - if (info->server_name) { - kfree(info->server_name, info->server_name_length); - } - if (info->image_path) { - kfree(info->image_path, info->image_path_length); - } - if (info->second_image_path) { - kfree(info->second_image_path, info->second_image_path_length); + if (info) { + if (info->mount_point) { + kfree(info->mount_point, info->mount_point_length); + } + if (info->server_name) { + kfree(info->server_name, info->server_name_length); + } + if (info->image_path) { + kfree(info->image_path, info->image_path_length); + } + if (info->second_image_path) { + kfree(info->second_image_path, info->second_image_path_length); + } + kfree(info, sizeof(*info)); } - kfree(info, sizeof(*info)); - } - *info_p = NULL; - return; + *info_p = NULL; + return; } boolean_t netboot_iaddr(struct in_addr * iaddr_p) { - if (S_netboot_info_p == NULL) - return (FALSE); + if (S_netboot_info_p == NULL) { + return FALSE; + } - *iaddr_p = S_netboot_info_p->client_ip; - return (TRUE); + *iaddr_p = S_netboot_info_p->client_ip; + return TRUE; } boolean_t netboot_rootpath(struct in_addr * server_ip, - char * name, int name_len, - char * path, int path_len) + char * name, int name_len, + char * path, int path_len) { - if (S_netboot_info_p == NULL) - return (FALSE); - - name[0] = '\0'; - path[0] = '\0'; - - if (S_netboot_info_p->mount_point_length == 0) { - return (FALSE); - } - if (path_len < S_netboot_info_p->mount_point_length) { - printf("netboot: path too small %d < %d\n", - path_len, S_netboot_info_p->mount_point_length); - return (FALSE); - } - strlcpy(path, S_netboot_info_p->mount_point, path_len); - strlcpy(name, S_netboot_info_p->server_name, name_len); - *server_ip = S_netboot_info_p->server_ip; - return (TRUE); + if (S_netboot_info_p == NULL) { + return FALSE; + } + + name[0] = '\0'; + path[0] = '\0'; + + if (S_netboot_info_p->mount_point_length == 0) { + return FALSE; + } + if (path_len < S_netboot_info_p->mount_point_length) { + printf("netboot: path too small %d < %d\n", + path_len, S_netboot_info_p->mount_point_length); + return FALSE; + } + strlcpy(path, S_netboot_info_p->mount_point, path_len); + strlcpy(name, S_netboot_info_p->server_name, name_len); + *server_ip = S_netboot_info_p->server_ip; + return TRUE; } static boolean_t -get_ip_parameters(struct in_addr * iaddr_p, struct in_addr * netmask_p, - struct in_addr * router_p) +get_ip_parameters(struct in_addr * iaddr_p, struct in_addr * netmask_p, + struct in_addr * router_p) { - void * entry; - const void * pkt; - int pkt_len; - - - entry = IOBSDRegistryEntryForDeviceTree("/chosen"); - if (entry == NULL) { - return (FALSE); - } - pkt = IOBSDRegistryEntryGetData(entry, DHCP_RESPONSE, &pkt_len); - if (pkt != NULL && pkt_len >= (int)sizeof(struct dhcp)) { - printf("netboot: retrieving IP information from DHCP response\n"); - } - else { - pkt = IOBSDRegistryEntryGetData(entry, BOOTP_RESPONSE, &pkt_len); + void * entry; + const void * pkt; + int pkt_len; + + + entry = IOBSDRegistryEntryForDeviceTree("/chosen"); + if (entry == NULL) { + return FALSE; + } + pkt = IOBSDRegistryEntryGetData(entry, DHCP_RESPONSE, &pkt_len); if (pkt != NULL && pkt_len >= (int)sizeof(struct dhcp)) { - printf("netboot: retrieving IP information from BOOTP response\n"); - } - } - if (pkt != NULL) { - const struct in_addr * ip; - int len; - dhcpol_t options; - const struct dhcp * reply; - - reply = (const struct dhcp *)pkt; - (void)dhcpol_parse_packet(&options, reply, pkt_len); - *iaddr_p = reply->dp_yiaddr; - ip = (const struct in_addr *) - dhcpol_find(&options, - dhcptag_subnet_mask_e, &len, NULL); - if (ip) { - *netmask_p = *ip; - } - ip = (const struct in_addr *) - dhcpol_find(&options, dhcptag_router_e, &len, NULL); - if (ip) { - *router_p = *ip; - } - } - IOBSDRegistryEntryRelease(entry); - return (pkt != NULL); + printf("netboot: retrieving IP information from DHCP response\n"); + } else { + pkt = IOBSDRegistryEntryGetData(entry, BOOTP_RESPONSE, &pkt_len); + if (pkt != NULL && pkt_len >= (int)sizeof(struct dhcp)) { + printf("netboot: retrieving IP information from BOOTP response\n"); + } + } + if (pkt != NULL) { + const struct in_addr * ip; + int len; + dhcpol_t options; + const struct dhcp * reply; + + reply = (const struct dhcp *)pkt; + (void)dhcpol_parse_packet(&options, reply, pkt_len); + *iaddr_p = reply->dp_yiaddr; + ip = (const struct in_addr *) + dhcpol_find(&options, + dhcptag_subnet_mask_e, &len, NULL); + if (ip) { + *netmask_p = *ip; + } + ip = (const struct in_addr *) + dhcpol_find(&options, dhcptag_router_e, &len, NULL); + if (ip) { + *router_p = *ip; + } + } + IOBSDRegistryEntryRelease(entry); + return pkt != NULL; } static int -route_cmd(int cmd, struct in_addr d, struct in_addr g, - struct in_addr m, uint32_t more_flags, unsigned int ifscope) +route_cmd(int cmd, struct in_addr d, struct in_addr g, + struct in_addr m, uint32_t more_flags, unsigned int ifscope) { - struct sockaddr_in dst; - int error; - uint32_t flags = RTF_UP | RTF_STATIC; - struct sockaddr_in gw; - struct sockaddr_in mask; - - flags |= more_flags; - - /* destination */ - bzero((caddr_t)&dst, sizeof(dst)); - dst.sin_len = sizeof(dst); - dst.sin_family = AF_INET; - dst.sin_addr = d; - - /* gateway */ - bzero((caddr_t)&gw, sizeof(gw)); - gw.sin_len = sizeof(gw); - gw.sin_family = AF_INET; - gw.sin_addr = g; - - /* mask */ - bzero(&mask, sizeof(mask)); - mask.sin_len = sizeof(mask); - mask.sin_family = AF_INET; - mask.sin_addr = m; - - error = rtrequest_scoped(cmd, (struct sockaddr *)&dst, - (struct sockaddr *)&gw, (struct sockaddr *)&mask, flags, NULL, ifscope); - - return (error); - + struct sockaddr_in dst; + int error; + uint32_t flags = RTF_UP | RTF_STATIC; + struct sockaddr_in gw; + struct sockaddr_in mask; + + flags |= more_flags; + + /* destination */ + bzero((caddr_t)&dst, sizeof(dst)); + dst.sin_len = sizeof(dst); + dst.sin_family = AF_INET; + dst.sin_addr = d; + + /* gateway */ + bzero((caddr_t)&gw, sizeof(gw)); + gw.sin_len = sizeof(gw); + gw.sin_family = AF_INET; + gw.sin_addr = g; + + /* mask */ + bzero(&mask, sizeof(mask)); + mask.sin_len = sizeof(mask); + mask.sin_family = AF_INET; + mask.sin_addr = m; + + error = rtrequest_scoped(cmd, (struct sockaddr *)&dst, + (struct sockaddr *)&gw, (struct sockaddr *)&mask, flags, NULL, ifscope); + + return error; } static int default_route_add(struct in_addr router, boolean_t proxy_arp) { - uint32_t flags = 0; - struct in_addr zeroes = { 0 }; - - if (proxy_arp == FALSE) { - flags |= RTF_GATEWAY; - } - return (route_cmd(RTM_ADD, zeroes, router, zeroes, flags, IFSCOPE_NONE)); + uint32_t flags = 0; + struct in_addr zeroes = { 0 }; + + if (proxy_arp == FALSE) { + flags |= RTF_GATEWAY; + } + return route_cmd(RTM_ADD, zeroes, router, zeroes, flags, IFSCOPE_NONE); } static int host_route_delete(struct in_addr host, unsigned int ifscope) { - struct in_addr zeroes = { 0 }; - - return (route_cmd(RTM_DELETE, host, zeroes, zeroes, RTF_HOST, ifscope)); + struct in_addr zeroes = { 0 }; + + return route_cmd(RTM_DELETE, host, zeroes, zeroes, RTF_HOST, ifscope); } static struct ifnet * find_interface(void) { - struct ifnet * ifp = NULL; + struct ifnet * ifp = NULL; - dlil_if_lock(); - if (rootdevice[0]) { + dlil_if_lock(); + if (rootdevice[0]) { ifp = ifunit((char *)rootdevice); - } - if (ifp == NULL) { + } + if (ifp == NULL) { ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) - if ((ifp->if_flags & (IFF_LOOPBACK|IFF_POINTOPOINT)) == 0) - break; + if ((ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) == 0) { + break; + } ifnet_head_done(); - } - dlil_if_unlock(); - return (ifp); + } + dlil_if_unlock(); + return ifp; } static const struct sockaddr_in blank_sin = { - sizeof(struct sockaddr_in), - AF_INET, - 0, - { 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0 } + sizeof(struct sockaddr_in), + AF_INET, + 0, + { 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 } }; static int inet_aifaddr(struct socket * so, const char * name, - const struct in_addr * addr, - const struct in_addr * mask, - const struct in_addr * broadcast) + const struct in_addr * addr, + const struct in_addr * mask, + const struct in_addr * broadcast) { - struct ifaliasreq ifra; - - bzero(&ifra, sizeof(ifra)); - strlcpy(ifra.ifra_name, name, sizeof(ifra.ifra_name)); - if (addr) { - *((struct sockaddr_in *)(void *)&ifra.ifra_addr) = blank_sin; - ((struct sockaddr_in *)(void *)&ifra.ifra_addr)->sin_addr = *addr; - } - if (mask) { - *((struct sockaddr_in *)(void *)&ifra.ifra_mask) = blank_sin; - ((struct sockaddr_in *)(void *)&ifra.ifra_mask)->sin_addr = *mask; - } - if (broadcast) { - *((struct sockaddr_in *)(void *)&ifra.ifra_broadaddr) = blank_sin; - ((struct sockaddr_in *)(void *)&ifra.ifra_broadaddr)->sin_addr = *broadcast; - } - return (ifioctl(so, SIOCAIFADDR, (caddr_t)&ifra, current_proc())); + struct ifaliasreq ifra; + + bzero(&ifra, sizeof(ifra)); + strlcpy(ifra.ifra_name, name, sizeof(ifra.ifra_name)); + if (addr) { + *((struct sockaddr_in *)(void *)&ifra.ifra_addr) = blank_sin; + ((struct sockaddr_in *)(void *)&ifra.ifra_addr)->sin_addr = *addr; + } + if (mask) { + *((struct sockaddr_in *)(void *)&ifra.ifra_mask) = blank_sin; + ((struct sockaddr_in *)(void *)&ifra.ifra_mask)->sin_addr = *mask; + } + if (broadcast) { + *((struct sockaddr_in *)(void *)&ifra.ifra_broadaddr) = blank_sin; + ((struct sockaddr_in *)(void *)&ifra.ifra_broadaddr)->sin_addr = *broadcast; + } + return ifioctl(so, SIOCAIFADDR, (caddr_t)&ifra, current_proc()); } int netboot_mountroot(void) { - int error = 0; - struct in_addr iaddr = { 0 }; - struct ifreq ifr; - struct ifnet * ifp; - struct in_addr netmask = { 0 }; - proc_t procp = current_proc(); - struct in_addr router = { 0 }; - struct socket * so = NULL; - unsigned int try; - - bzero(&ifr, sizeof(ifr)); - - /* find the interface */ - ifp = find_interface(); - if (ifp == NULL) { - printf("netboot: no suitable interface\n"); - error = ENXIO; - goto failed; - } - snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", if_name(ifp)); - printf("netboot: using network interface '%s'\n", ifr.ifr_name); - - /* bring it up */ - if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0)) != 0) { - printf("netboot: socreate, error=%d\n", error); - goto failed; - } - ifr.ifr_flags = ifp->if_flags | IFF_UP; - error = ifioctl(so, SIOCSIFFLAGS, (caddr_t)&ifr, procp); - if (error) { - printf("netboot: SIFFLAGS, error=%d\n", error); - goto failed; - } - - /* grab information from the registry */ - if (get_ip_parameters(&iaddr, &netmask, &router) == FALSE) { - printf("netboot: can't retrieve IP parameters\n"); - goto failed; - } - printf("netboot: IP address " IP_FORMAT, IP_LIST(&iaddr)); - if (netmask.s_addr) { - printf(" netmask " IP_FORMAT, IP_LIST(&netmask)); - } - if (router.s_addr) { - printf(" router " IP_FORMAT, IP_LIST(&router)); - } - printf("\n"); - error = inet_aifaddr(so, ifr.ifr_name, &iaddr, &netmask, NULL); - if (error) { - printf("netboot: inet_aifaddr failed, %d\n", error); - goto failed; - } - if (router.s_addr == 0) { - /* enable proxy arp if we don't have a router */ - router.s_addr = iaddr.s_addr; - } - printf("netboot: adding default route " IP_FORMAT "\n", - IP_LIST(&router)); - error = default_route_add(router, router.s_addr == iaddr.s_addr); - if (error) { - printf("netboot: default_route_add failed %d\n", error); - } - - soclose(so); - - S_netboot_info_p = netboot_info_init(iaddr); - switch (S_netboot_info_p->image_type) { - default: - case kNetBootImageTypeNFS: - for (try = 1; TRUE; try++) { - error = nfs_mountroot(); - if (error == 0) { - break; - } - printf("netboot: nfs_mountroot() attempt %u failed; " - "clearing ARP entry and trying again\n", try); - /* - * error is either EHOSTDOWN or EHOSTUNREACH, which likely means - * that the port we're plugged into has spanning tree enabled, - * and either the router or the server can't answer our ARP - * requests. Clear the incomplete ARP entry by removing the - * appropriate route, depending on the error code: - * EHOSTDOWN NFS server's route - * EHOSTUNREACH router's route - */ - switch (error) { - default: - /* NOT REACHED */ - case EHOSTDOWN: - /* remove the server's arp entry */ - error = host_route_delete(S_netboot_info_p->server_ip, - ifp->if_index); - if (error) { - printf("netboot: host_route_delete(" IP_FORMAT - ") failed %d\n", - IP_LIST(&S_netboot_info_p->server_ip), error); + int error = 0; + struct in_addr iaddr = { 0 }; + struct ifreq ifr; + struct ifnet * ifp; + struct in_addr netmask = { 0 }; + proc_t procp = current_proc(); + struct in_addr router = { 0 }; + struct socket * so = NULL; + unsigned int try; + + bzero(&ifr, sizeof(ifr)); + + /* find the interface */ + ifp = find_interface(); + if (ifp == NULL) { + printf("netboot: no suitable interface\n"); + error = ENXIO; + goto failed; + } + snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", if_name(ifp)); + printf("netboot: using network interface '%s'\n", ifr.ifr_name); + + /* bring it up */ + if ((error = socreate(AF_INET, &so, SOCK_DGRAM, 0)) != 0) { + printf("netboot: socreate, error=%d\n", error); + goto failed; + } + ifr.ifr_flags = ifp->if_flags | IFF_UP; + error = ifioctl(so, SIOCSIFFLAGS, (caddr_t)&ifr, procp); + if (error) { + printf("netboot: SIFFLAGS, error=%d\n", error); + goto failed; + } + + /* grab information from the registry */ + if (get_ip_parameters(&iaddr, &netmask, &router) == FALSE) { + printf("netboot: can't retrieve IP parameters\n"); + goto failed; + } + printf("netboot: IP address " IP_FORMAT, IP_LIST(&iaddr)); + if (netmask.s_addr) { + printf(" netmask " IP_FORMAT, IP_LIST(&netmask)); + } + if (router.s_addr) { + printf(" router " IP_FORMAT, IP_LIST(&router)); + } + printf("\n"); + error = inet_aifaddr(so, ifr.ifr_name, &iaddr, &netmask, NULL); + if (error) { + printf("netboot: inet_aifaddr failed, %d\n", error); + goto failed; + } + if (router.s_addr == 0) { + /* enable proxy arp if we don't have a router */ + router.s_addr = iaddr.s_addr; + } + printf("netboot: adding default route " IP_FORMAT "\n", + IP_LIST(&router)); + error = default_route_add(router, router.s_addr == iaddr.s_addr); + if (error) { + printf("netboot: default_route_add failed %d\n", error); + } + + soclose(so); + + S_netboot_info_p = netboot_info_init(iaddr); + switch (S_netboot_info_p->image_type) { + default: + case kNetBootImageTypeNFS: + for (try = 1; TRUE; try++) { + error = nfs_mountroot(); + if (error == 0) { + break; + } + printf("netboot: nfs_mountroot() attempt %u failed; " + "clearing ARP entry and trying again\n", try); + /* + * error is either EHOSTDOWN or EHOSTUNREACH, which likely means + * that the port we're plugged into has spanning tree enabled, + * and either the router or the server can't answer our ARP + * requests. Clear the incomplete ARP entry by removing the + * appropriate route, depending on the error code: + * EHOSTDOWN NFS server's route + * EHOSTUNREACH router's route + */ + switch (error) { + default: + /* NOT REACHED */ + case EHOSTDOWN: + /* remove the server's arp entry */ + error = host_route_delete(S_netboot_info_p->server_ip, + ifp->if_index); + if (error) { + printf("netboot: host_route_delete(" IP_FORMAT + ") failed %d\n", + IP_LIST(&S_netboot_info_p->server_ip), error); + } + break; + case EHOSTUNREACH: + error = host_route_delete(router, ifp->if_index); + if (error) { + printf("netboot: host_route_delete(" IP_FORMAT + ") failed %d\n", IP_LIST(&router), error); + } + break; + } } break; - case EHOSTUNREACH: - error = host_route_delete(router, ifp->if_index); - if (error) { - printf("netboot: host_route_delete(" IP_FORMAT - ") failed %d\n", IP_LIST(&router), error); - } + case kNetBootImageTypeHTTP: + error = netboot_setup(); break; - } - } - break; - case kNetBootImageTypeHTTP: - error = netboot_setup(); - break; - } - if (error == 0) { - S_netboot = 1; - } - else { - S_netboot = 0; - } - return (error); + } + if (error == 0) { + S_netboot = 1; + } else { + S_netboot = 0; + } + return error; failed: - if (so != NULL) { - soclose(so); - } - return (error); + if (so != NULL) { + soclose(so); + } + return error; } int netboot_setup() { - int error = 0; - - if (S_netboot_info_p == NULL - || S_netboot_info_p->image_path == NULL) { - goto done; - } - printf("netboot_setup: calling imageboot_mount_image\n"); - error = imageboot_mount_image(S_netboot_info_p->image_path, -1); - if (error != 0) { - printf("netboot: failed to mount root image, %d\n", error); - } - else if (S_netboot_info_p->second_image_path != NULL) { - error = imageboot_mount_image(S_netboot_info_p->second_image_path, 0); + int error = 0; + + if (S_netboot_info_p == NULL + || S_netboot_info_p->image_path == NULL) { + goto done; + } + printf("netboot_setup: calling imageboot_mount_image\n"); + error = imageboot_mount_image(S_netboot_info_p->image_path, -1); if (error != 0) { - printf("netboot: failed to mount second root image, %d\n", error); + printf("netboot: failed to mount root image, %d\n", error); + } else if (S_netboot_info_p->second_image_path != NULL) { + error = imageboot_mount_image(S_netboot_info_p->second_image_path, 0); + if (error != 0) { + printf("netboot: failed to mount second root image, %d\n", error); + } } - } - done: - netboot_info_free(&S_netboot_info_p); - return (error); +done: + netboot_info_free(&S_netboot_info_p); + return error; } int netboot_root(void) { - return (S_netboot); + return S_netboot; } diff --git a/bsd/kern/policy_check.c b/bsd/kern/policy_check.c index 527c89c16..06ea2dcfc 100644 --- a/bsd/kern/policy_check.c +++ b/bsd/kern/policy_check.c @@ -1,5 +1,5 @@ #include -#include /* XXX printf() */ +#include /* XXX printf() */ #include #include @@ -14,7 +14,7 @@ #include #include -#include /* OSBPrintBacktrace */ +#include /* OSBPrintBacktrace */ /* forward declaration; see bsd_init.c */ @@ -27,16 +27,16 @@ int get_thread_lock_count(thread_t th); /* forced forward */ * Note: CHECK_POLICY_CHECK is probably not very useful unless you * are kernel debugging and set a breakpoint. */ -#define CHECK_POLICY_CHECK 0x00000001 /* Check on calls */ -#define CHECK_POLICY_FAIL 0x00000002 /* EPERM on fails */ -#define CHECK_POLICY_BACKTRACE 0x00000004 /* Show call stack on fails */ -#define CHECK_POLICY_PANIC 0x00000008 /* Panic on fails */ -#define CHECK_POLICY_PERIODIC 0x00000010 /* Show fails periodically */ +#define CHECK_POLICY_CHECK 0x00000001 /* Check on calls */ +#define CHECK_POLICY_FAIL 0x00000002 /* EPERM on fails */ +#define CHECK_POLICY_BACKTRACE 0x00000004 /* Show call stack on fails */ +#define CHECK_POLICY_PANIC 0x00000008 /* Panic on fails */ +#define CHECK_POLICY_PERIODIC 0x00000010 /* Show fails periodically */ static int policy_flags = 0; -#define CHECK_SET_HOOK(x) .mpo_##x = (mpo_##x##_t *)common_hook, +#define CHECK_SET_HOOK(x) .mpo_##x = (mpo_##x##_t *)common_hook, /* * Init; currently, we only print our arrival notice. @@ -56,8 +56,8 @@ hook_policy_initbsd(struct mac_policy_conf *mpc) /* Implementation */ -#define CLASS_PERIOD_LIMIT 10000 -#define CLASS_PERIOD_MULT 20 +#define CLASS_PERIOD_LIMIT 10000 +#define CLASS_PERIOD_MULT 20 static int policy_check_event = 1; static int policy_check_period = 1; @@ -67,16 +67,17 @@ static int policy_check_next = CLASS_PERIOD_MULT; static int common_hook(void) { - int i; - int rv = 0; + int i; + int rv = 0; if ((i = get_thread_lock_count(current_thread())) != 0) { /* * fail the MACF check if we hold a lock; this assumes a * a non-void (authorization) MACF hook. */ - if (policy_flags & CHECK_POLICY_FAIL) + if (policy_flags & CHECK_POLICY_FAIL) { rv = EPERM; + } /* * display a backtrace if we hold a lock and we are not @@ -84,24 +85,24 @@ common_hook(void) */ if ((policy_flags & (CHECK_POLICY_BACKTRACE | CHECK_POLICY_PANIC)) == CHECK_POLICY_BACKTRACE) { if (policy_flags & CHECK_POLICY_PERIODIC) { - /* at exponentially increasing intervals */ - if (!(policy_check_event % policy_check_period)) { - if (policy_check_event <= policy_check_next || policy_check_period == CLASS_PERIOD_LIMIT) { - /* - * According to Derek, we could - * technically get a symbolicated name - * here, if we refactered some code - * and set the "keepsyms=1" boot - * argument... - */ - OSReportWithBacktrace("calling MACF hook with mutex count %d (event %d) ", i, policy_check_event); - } - } else { - if (policy_check_period < CLASS_PERIOD_LIMIT) { - policy_check_next *= CLASS_PERIOD_MULT; - policy_check_period *= CLASS_PERIOD_MULT; + /* at exponentially increasing intervals */ + if (!(policy_check_event % policy_check_period)) { + if (policy_check_event <= policy_check_next || policy_check_period == CLASS_PERIOD_LIMIT) { + /* + * According to Derek, we could + * technically get a symbolicated name + * here, if we refactered some code + * and set the "keepsyms=1" boot + * argument... + */ + OSReportWithBacktrace("calling MACF hook with mutex count %d (event %d) ", i, policy_check_event); + } + } else { + if (policy_check_period < CLASS_PERIOD_LIMIT) { + policy_check_next *= CLASS_PERIOD_MULT; + policy_check_period *= CLASS_PERIOD_MULT; + } } - } } else { /* always */ OSReportWithBacktrace("calling MACF hook with mutex count %d (event %d) ", i, policy_check_event); @@ -109,8 +110,9 @@ common_hook(void) } /* Panic */ - if (policy_flags & CHECK_POLICY_PANIC) + if (policy_flags & CHECK_POLICY_PANIC) { panic("calling MACF hook with mutex count %d\n", i); + } /* count for non-fatal tracing */ policy_check_event++; @@ -521,10 +523,10 @@ const static struct mac_policy_ops policy_ops = { static SECURITY_READ_ONLY_LATE(struct mac_policy_conf) policy_conf = { .mpc_name = "CHECK", .mpc_fullname = "Check Assumptions Policy", - .mpc_field_off = NULL, /* no label slot */ - .mpc_labelnames = NULL, /* no policy label names */ - .mpc_labelname_count = 0, /* count of label names is 0 */ - .mpc_ops = &policy_ops, /* policy operations */ + .mpc_field_off = NULL, /* no label slot */ + .mpc_labelnames = NULL, /* no policy label names */ + .mpc_labelname_count = 0, /* count of label names is 0 */ + .mpc_ops = &policy_ops, /* policy operations */ .mpc_loadtime_flags = 0, .mpc_runtime_flags = 0, }; @@ -540,8 +542,9 @@ errno_t check_policy_init(int flags) { /* Only instantiate the module if we have been asked to do checking */ - if (!flags) + if (!flags) { return 0; + } policy_flags = flags; diff --git a/bsd/kern/posix_sem.c b/bsd/kern/posix_sem.c index 08a9a0c04..5aa96d0f1 100644 --- a/bsd/kern/posix_sem.c +++ b/bsd/kern/posix_sem.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -91,36 +91,36 @@ #define f_ops f_fglob->fg_ops #define f_offset f_fglob->fg_offset #define f_data f_fglob->fg_data -#define PSEMNAMLEN 31 /* maximum name segment length we bother with */ +#define PSEMNAMLEN 31 /* maximum name segment length we bother with */ struct pseminfo { - unsigned int psem_flags; - unsigned int psem_usecount; - mode_t psem_mode; - uid_t psem_uid; - gid_t psem_gid; - char psem_name[PSEMNAMLEN + 1]; /* segment name */ - semaphore_t psem_semobject; - struct label * psem_label; - pid_t psem_creator_pid; - uint64_t psem_creator_uniqueid; + unsigned int psem_flags; + unsigned int psem_usecount; + mode_t psem_mode; + uid_t psem_uid; + gid_t psem_gid; + char psem_name[PSEMNAMLEN + 1]; /* segment name */ + semaphore_t psem_semobject; + struct label * psem_label; + pid_t psem_creator_pid; + uint64_t psem_creator_uniqueid; }; #define PSEMINFO_NULL (struct pseminfo *)0 -#define PSEM_NONE 1 -#define PSEM_DEFINED 2 -#define PSEM_ALLOCATED 4 -#define PSEM_MAPPED 8 -#define PSEM_INUSE 0x10 -#define PSEM_REMOVED 0x20 -#define PSEM_INCREATE 0x40 -#define PSEM_INDELETE 0x80 - -struct psemcache { - LIST_ENTRY(psemcache) psem_hash; /* hash chain */ - struct pseminfo *pseminfo; /* vnode the name refers to */ - int psem_nlen; /* length of name */ - char psem_name[PSEMNAMLEN + 1]; /* segment name */ +#define PSEM_NONE 1 +#define PSEM_DEFINED 2 +#define PSEM_ALLOCATED 4 +#define PSEM_MAPPED 8 +#define PSEM_INUSE 0x10 +#define PSEM_REMOVED 0x20 +#define PSEM_INCREATE 0x40 +#define PSEM_INDELETE 0x80 + +struct psemcache { + LIST_ENTRY(psemcache) psem_hash; /* hash chain */ + struct pseminfo *pseminfo; /* vnode the name refers to */ + int psem_nlen; /* length of name */ + char psem_name[PSEMNAMLEN + 1]; /* segment name */ }; #define PSEMCACHE_NULL (struct psemcache *)0 @@ -128,19 +128,19 @@ struct psemcache { #define PSEMCACHE_FOUND (-1) #define PSEMCACHE_NEGATIVE (ENOENT) -struct psemstats { - long goodhits; /* hits that we can really use */ - long neghits; /* negative hits that we can use */ - long badhits; /* hits we must drop */ - long falsehits; /* hits with id mismatch */ - long miss; /* misses */ - long longnames; /* long names that ignore cache */ +struct psemstats { + long goodhits; /* hits that we can really use */ + long neghits; /* negative hits that we can use */ + long badhits; /* hits we must drop */ + long falsehits; /* hits with id mismatch */ + long miss; /* misses */ + long longnames; /* long names that ignore cache */ }; struct psemname { - char *psem_nameptr; /* pointer to looked up name */ - long psem_namelen; /* length of looked up component */ - u_int32_t psem_hash; /* hash value of looked up name */ + char *psem_nameptr; /* pointer to looked up name */ + long psem_namelen; /* length of looked up component */ + u_int32_t psem_hash; /* hash value of looked up name */ }; struct psemnode { @@ -155,34 +155,34 @@ struct psemnode { #define PSEMHASH(pnp) \ (&psemhashtbl[(pnp)->psem_hash & psemhash]) -LIST_HEAD(psemhashhead, psemcache) *psemhashtbl; /* Hash Table */ -u_long psemhash; /* size of hash table - 1 */ -long psemnument; /* number of cache entries allocated */ -long posix_sem_max = 10000; /* tunable for max POSIX semaphores */ - /* 10000 limits to ~1M of memory */ +LIST_HEAD(psemhashhead, psemcache) * psemhashtbl; /* Hash Table */ +u_long psemhash; /* size of hash table - 1 */ +long psemnument; /* number of cache entries allocated */ +long posix_sem_max = 10000; /* tunable for max POSIX semaphores */ + /* 10000 limits to ~1M of memory */ SYSCTL_NODE(_kern, KERN_POSIX, posix, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Posix"); SYSCTL_NODE(_kern_posix, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Semaphores"); -SYSCTL_LONG (_kern_posix_sem, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &posix_sem_max, "max"); +SYSCTL_LONG(_kern_posix_sem, OID_AUTO, max, CTLFLAG_RW | CTLFLAG_LOCKED, &posix_sem_max, "max"); -struct psemstats psemstats; /* cache effectiveness statistics */ +struct psemstats psemstats; /* cache effectiveness statistics */ static int psem_access(struct pseminfo *pinfo, int mode, kauth_cred_t cred); static int psem_cache_search(struct pseminfo **, - struct psemname *, struct psemcache **); + struct psemname *, struct psemcache **); static int psem_delete(struct pseminfo * pinfo); -static int psem_read (struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); -static int psem_write (struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); -static int psem_ioctl (struct fileproc *fp, u_long com, - caddr_t data, vfs_context_t ctx); -static int psem_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx); -static int psem_closefile (struct fileglob *fp, vfs_context_t ctx); +static int psem_read(struct fileproc *fp, struct uio *uio, + int flags, vfs_context_t ctx); +static int psem_write(struct fileproc *fp, struct uio *uio, + int flags, vfs_context_t ctx); +static int psem_ioctl(struct fileproc *fp, u_long com, + caddr_t data, vfs_context_t ctx); +static int psem_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx); +static int psem_closefile(struct fileglob *fp, vfs_context_t ctx); static int psem_unlink_internal(struct pseminfo *pinfo, struct psemcache *pcache); -static int psem_kqfilter (struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); +static int psem_kqfilter(struct fileproc *fp, struct knote *kn, + struct kevent_internal_s *kev, vfs_context_t ctx); static const struct fileops psemops = { .fo_type = DTYPE_PSXSEM, @@ -214,19 +214,18 @@ int psem_cache_purge_all(proc_t); __private_extern__ void psem_lock_init( void ) { + psx_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - psx_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - - psx_sem_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_sem_subsys_lck_grp_attr); + psx_sem_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_sem_subsys_lck_grp_attr); - psx_sem_subsys_lck_attr = lck_attr_alloc_init(); - lck_mtx_init(& psx_sem_subsys_mutex, psx_sem_subsys_lck_grp, psx_sem_subsys_lck_attr); + psx_sem_subsys_lck_attr = lck_attr_alloc_init(); + lck_mtx_init(&psx_sem_subsys_mutex, psx_sem_subsys_lck_grp, psx_sem_subsys_lck_attr); } /* - * Lookup an entry in the cache - * - * + * Lookup an entry in the cache + * + * * status of -1 is returned if matches * If the lookup determines that the name does not exist * (negative cacheing), a status of ENOENT is returned. If the lookup @@ -235,7 +234,7 @@ psem_lock_init( void ) static int psem_cache_search(struct pseminfo **psemp, struct psemname *pnp, - struct psemcache **pcache) + struct psemcache **pcache) { struct psemcache *pcp, *nnp; struct psemhashhead *pcpp; @@ -249,8 +248,9 @@ psem_cache_search(struct pseminfo **psemp, struct psemname *pnp, for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) { nnp = pcp->psem_hash.le_next; if (pcp->psem_nlen == pnp->psem_namelen && - !bcmp(pcp->psem_name, pnp->psem_nameptr, (u_int)pcp-> psem_nlen)) + !bcmp(pcp->psem_name, pnp->psem_nameptr, (u_int)pcp->psem_nlen)) { break; + } } if (pcp == 0) { @@ -259,7 +259,7 @@ psem_cache_search(struct pseminfo **psemp, struct psemname *pnp, } /* We found a "positive" match, return the vnode */ - if (pcp->pseminfo) { + if (pcp->pseminfo) { psemstats.goodhits++; /* TOUCH(ncp); */ *psemp = pcp->pseminfo; @@ -286,8 +286,9 @@ psem_cache_add(struct pseminfo *psemp, struct psemname *pnp, struct psemcache *p struct psemcache *dpcp; #if DIAGNOSTIC - if (pnp->psem_namelen > PSEMNAMLEN) + if (pnp->psem_namelen > PSEMNAMLEN) { panic("cache_enter: name too long"); + } #endif @@ -295,8 +296,9 @@ psem_cache_add(struct pseminfo *psemp, struct psemname *pnp, struct psemcache *p if (psem_cache_search(&dpinfo, pnp, &dpcp) == PSEMCACHE_FOUND) { return EEXIST; } - if (psemnument >= posix_sem_max) + if (psemnument >= posix_sem_max) { return ENOSPC; + } psemnument++; /* * Fill in cache info, if vp is NULL this is a "negative" cache entry. @@ -312,9 +314,11 @@ psem_cache_add(struct pseminfo *psemp, struct psemname *pnp, struct psemcache *p { struct psemcache *p; - for (p = pcpp->lh_first; p != 0; p = p->psem_hash.le_next) - if (p == pcp) + for (p = pcpp->lh_first; p != 0; p = p->psem_hash.le_next) { + if (p == pcp) { panic("psem:cache_enter duplicate"); + } + } } #endif LIST_INSERT_HEAD(pcpp, pcp, psem_hash); @@ -334,13 +338,15 @@ static void psem_cache_delete(struct psemcache *pcp) { #if DIAGNOSTIC - if (pcp->psem_hash.le_prev == 0) + if (pcp->psem_hash.le_prev == 0) { panic("psem namecache purge le_prev"); - if (pcp->psem_hash.le_next == pcp) + } + if (pcp->psem_hash.le_next == pcp) { panic("namecache purge le_next"); + } #endif /* DIAGNOSTIC */ LIST_REMOVE(pcp, psem_hash); - pcp->psem_hash.le_prev = NULL; + pcp->psem_hash.le_prev = NULL; psemnument--; } @@ -356,8 +362,9 @@ psem_cache_purge_all(__unused proc_t p) struct psemhashhead *pcpp; int error = 0; - if (kauth_cred_issuser(kauth_cred_get()) == 0) + if (kauth_cred_issuser(kauth_cred_get()) == 0) { return EPERM; + } PSEM_SUBSYS_LOCK(); for (pcpp = &psemhashtbl[psemhash]; pcpp >= psemhashtbl; pcpp--) { @@ -367,8 +374,9 @@ psem_cache_purge_all(__unused proc_t p) * unconditionally unlink the cache entry */ error = psem_unlink_internal(pcp->pseminfo, pcp); - if (error) + if (error) { goto out; + } } } assert(psemnument == 0); @@ -376,9 +384,10 @@ psem_cache_purge_all(__unused proc_t p) out: PSEM_SUBSYS_UNLOCK(); - if (error) + if (error) { printf("%s: Error %d removing all semaphores: %ld remain!\n", - __func__, error, psemnument); + __func__, error, psemnument); + } return error; } @@ -397,13 +406,13 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) char * nameptr; char * cp; size_t pathlen, plen; - int fmode ; + int fmode; int cmode = uap->mode; int value = uap->value; int incache = 0; struct psemcache *pcp = PSEMCACHE_NULL; - kern_return_t kret = KERN_INVALID_ADDRESS; /* default fail */ - + kern_return_t kret = KERN_INVALID_ADDRESS; /* default fail */ + AUDIT_ARG(fflags, uap->oflag); AUDIT_ARG(mode, uap->mode); AUDIT_ARG(value32, uap->value); @@ -426,7 +435,7 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) goto bad; } AUDIT_ARG(text, pnbuf); - if ( (pathlen > PSEMNAMLEN) ) { + if ((pathlen > PSEMNAMLEN)) { error = ENAMETOOLONG; goto bad; } @@ -439,7 +448,7 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) error = EINVAL; goto bad; } - } else { + } else { error = EINVAL; goto bad; } @@ -451,8 +460,8 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) nd.psem_namelen = plen; nd.psem_hash = 0; - for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { - nd.psem_hash += (unsigned char)*cp * i; + for (cp = nameptr, i = 1; *cp != 0 && i <= plen; i++, cp++) { + nd.psem_hash += (unsigned char)*cp * i; } /* @@ -460,21 +469,22 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) * left unmodified (NULL). */ error = falloc(p, &fp, &indx, vfs_context_current()); - if (error) + if (error) { goto bad; + } /* * We allocate a new entry if we are less than the maximum * allowed and the one at the front of the LRU list is in use. * Otherwise we use the one at the front of the LRU list. */ - MALLOC(pcp, struct psemcache *, sizeof(struct psemcache), M_SHM, M_WAITOK|M_ZERO); + MALLOC(pcp, struct psemcache *, sizeof(struct psemcache), M_SHM, M_WAITOK | M_ZERO); if (pcp == PSEMCACHE_NULL) { error = ENOMEM; goto bad; } - MALLOC(new_pinfo, struct pseminfo *, sizeof(struct pseminfo), M_SHM, M_WAITOK|M_ZERO); + MALLOC(new_pinfo, struct pseminfo *, sizeof(struct pseminfo), M_SHM, M_WAITOK | M_ZERO); if (new_pinfo == NULL) { error = ENOSPC; goto bad; @@ -489,34 +499,33 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) * signal success or failure, which is why we set its default value * to KERN_INVALID_ADDRESS, above. */ - + fmode = FFLAGS(uap->oflag); - - if((fmode & O_CREAT)) { - - if((value < 0) || (value > SEM_VALUE_MAX)) { + + if ((fmode & O_CREAT)) { + if ((value < 0) || (value > SEM_VALUE_MAX)) { error = EINVAL; goto bad; } - + kret = semaphore_create(kernel_task, &new_pinfo->psem_semobject, SYNC_POLICY_FIFO, value); - + if (kret != KERN_SUCCESS) { switch (kret) { - case KERN_RESOURCE_SHORTAGE: - error = ENOMEM; - break; - case KERN_PROTECTION_FAILURE: - error = EACCES; - break; - default: - error = EINVAL; + case KERN_RESOURCE_SHORTAGE: + error = ENOMEM; + break; + case KERN_PROTECTION_FAILURE: + error = EACCES; + break; + default: + error = EINVAL; } goto bad; } } - - MALLOC(new_pnode, struct psemnode *, sizeof(struct psemnode), M_SHM, M_WAITOK|M_ZERO); + + MALLOC(new_pnode, struct psemnode *, sizeof(struct psemnode), M_SHM, M_WAITOK | M_ZERO); if (new_pnode == NULL) { error = ENOSPC; goto bad; @@ -530,30 +539,31 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) goto bad_locked; } - if (error == PSEMCACHE_FOUND) + if (error == PSEMCACHE_FOUND) { incache = 1; - else + } else { incache = 0; + } cmode &= ALLPERMS; - if (((fmode & (O_CREAT | O_EXCL))==(O_CREAT | O_EXCL)) && incache) { + if (((fmode & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) && incache) { /* sem exists and opened O_EXCL */ #if notyet if (pinfo->psem_flags & PSEM_INDELETE) { } -#endif +#endif AUDIT_ARG(posix_ipc_perm, pinfo->psem_uid, - pinfo->psem_gid, pinfo->psem_mode); + pinfo->psem_gid, pinfo->psem_mode); error = EEXIST; goto bad_locked; } - if (((fmode & (O_CREAT | O_EXCL))== O_CREAT) && incache) { + if (((fmode & (O_CREAT | O_EXCL)) == O_CREAT) && incache) { /* As per POSIX, O_CREAT has no effect */ fmode &= ~O_CREAT; } - if ( (fmode & O_CREAT) ) { + if ((fmode & O_CREAT)) { /* create a new one (commit the allocation) */ pinfo = new_pinfo; pinfo->psem_flags = PSEM_DEFINED | PSEM_INCREATE; @@ -562,12 +572,12 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) pinfo->psem_uid = kauth_getuid(); pinfo->psem_gid = kauth_getgid(); bcopy(pnbuf, &pinfo->psem_name[0], PSEMNAMLEN); - pinfo->psem_name[PSEMNAMLEN]= 0; + pinfo->psem_name[PSEMNAMLEN] = 0; pinfo->psem_flags &= ~PSEM_DEFINED; pinfo->psem_flags |= PSEM_ALLOCATED; pinfo->psem_creator_pid = p->p_pid; pinfo->psem_creator_uniqueid = p->p_uniqueid; - + #if CONFIG_MACF error = mac_posixsem_check_create(kauth_cred_get(), nameptr); if (error) { @@ -581,26 +591,26 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) error = ENOENT; goto bad_locked; } - if( pinfo->psem_flags & PSEM_INDELETE) { + if (pinfo->psem_flags & PSEM_INDELETE) { error = ENOENT; goto bad_locked; - } + } AUDIT_ARG(posix_ipc_perm, pinfo->psem_uid, - pinfo->psem_gid, pinfo->psem_mode); + pinfo->psem_gid, pinfo->psem_mode); #if CONFIG_MACF error = mac_posixsem_check_open(kauth_cred_get(), pinfo); if (error) { goto bad_locked; } #endif - if ( (error = psem_access(pinfo, fmode, kauth_cred_get())) ) { + if ((error = psem_access(pinfo, fmode, kauth_cred_get()))) { goto bad_locked; } } if (!incache) { /* if successful, this will consume the pcp */ - if ( (error = psem_cache_add(pinfo, &nd, pcp)) ) { + if ((error = psem_cache_add(pinfo, &nd, pcp))) { goto bad_locked; } } @@ -637,19 +647,22 @@ sem_open(proc_t p, struct sem_open_args *uap, user_addr_t *retval) *retval = CAST_USER_ADDR_T(indx); FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); - return (0); + return 0; bad_locked: PSEM_SUBSYS_UNLOCK(); bad: - if (pcp != PSEMCACHE_NULL) + if (pcp != PSEMCACHE_NULL) { FREE(pcp, M_SHM); + } - if (new_pnode != PSEMNODE_NULL) + if (new_pnode != PSEMNODE_NULL) { FREE(new_pnode, M_SHM); + } - if (fp != NULL) + if (fp != NULL) { fp_free(p, indx, fp); + } if (new_pinfo != PSEMINFO_NULL) { /* @@ -667,9 +680,10 @@ bad: FREE(new_pinfo, M_SHM); } - if (pnbuf != NULL) + if (pnbuf != NULL) { FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); - return (error); + } + return error; } /* @@ -679,13 +693,14 @@ static int psem_access(struct pseminfo *pinfo, int mode, kauth_cred_t cred) { int mode_req = ((mode & FREAD) ? S_IRUSR : 0) | - ((mode & FWRITE) ? S_IWUSR : 0); + ((mode & FWRITE) ? S_IWUSR : 0); /* Otherwise, user id 0 always gets access. */ - if (!suser(cred, NULL)) - return (0); + if (!suser(cred, NULL)) { + return 0; + } - return(posix_cred_access(cred, pinfo->psem_uid, pinfo->psem_gid, pinfo->psem_mode, mode_req)); + return posix_cred_access(cred, pinfo->psem_uid, pinfo->psem_gid, pinfo->psem_mode, mode_req); } static int @@ -693,24 +708,27 @@ psem_unlink_internal(struct pseminfo *pinfo, struct psemcache *pcache) { PSEM_SUBSYS_ASSERT_HELD(); - if (!pinfo || !pcache) + if (!pinfo || !pcache) { return EINVAL; + } - if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) == 0) + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) == 0) { return EINVAL; + } - if (pinfo->psem_flags & PSEM_INDELETE) + if (pinfo->psem_flags & PSEM_INDELETE) { return 0; + } AUDIT_ARG(posix_ipc_perm, pinfo->psem_uid, pinfo->psem_gid, - pinfo->psem_mode); + pinfo->psem_mode); pinfo->psem_flags |= PSEM_INDELETE; pinfo->psem_usecount--; if (!pinfo->psem_usecount) { psem_delete(pinfo); - FREE(pinfo,M_SHM); + FREE(pinfo, M_SHM); } else { pinfo->psem_flags |= PSEM_REMOVED; } @@ -725,7 +743,7 @@ int sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *retval) { size_t i; - int error=0; + int error = 0; struct psemname nd; struct pseminfo *pinfo; char * nameptr; @@ -738,7 +756,7 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); if (pnbuf == NULL) { - return(ENOSPC); /* XXX non-standard */ + return ENOSPC; /* XXX non-standard */ } pathlen = MAXPATHLEN; error = copyinstr(uap->name, pnbuf, MAXPATHLEN, &pathlen); @@ -760,7 +778,7 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret error = EINVAL; goto bad; } - } else { + } else { error = EINVAL; goto bad; } @@ -768,10 +786,10 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret nd.psem_nameptr = nameptr; nd.psem_namelen = pathlen; - nd. psem_hash =0; + nd.psem_hash = 0; - for (cp = nameptr, i=1; *cp != 0 && i <= pathlen; i++, cp++) { - nd.psem_hash += (unsigned char)*cp * i; + for (cp = nameptr, i = 1; *cp != 0 && i <= pathlen; i++, cp++) { + nd.psem_hash += (unsigned char)*cp * i; } PSEM_SUBSYS_LOCK(); @@ -781,7 +799,6 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret PSEM_SUBSYS_UNLOCK(); error = EINVAL; goto bad; - } #if CONFIG_MACF @@ -791,7 +808,7 @@ sem_unlink(__unused proc_t p, struct sem_unlink_args *uap, __unused int32_t *ret goto bad; } #endif - if ( (error = psem_access(pinfo, pinfo->psem_mode, kauth_cred_get())) ) { + if ((error = psem_access(pinfo, pinfo->psem_mode, kauth_cred_get()))) { PSEM_SUBSYS_UNLOCK(); goto bad; } @@ -807,21 +824,22 @@ bad: int sem_close(proc_t p, struct sem_close_args *uap, __unused int32_t *retval) { - int fd = CAST_DOWN_EXPLICIT(int,uap->sem); + int fd = CAST_DOWN_EXPLICIT(int, uap->sem); struct fileproc *fp; int error = 0; AUDIT_ARG(fd, fd); /* XXX This seems wrong; uap->sem is a pointer */ proc_fdlock(p); - error = fp_lookup(p,fd, &fp, 1); + error = fp_lookup(p, fd, &fp, 1); if (error) { proc_fdunlock(p); - return(error); + return error; } if (fp->f_type != DTYPE_PSXSEM) { + fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return(EBADF); + return EBADF; } procfdtbl_markclosefd(p, fd); fileproc_drain(p, fp); @@ -829,30 +847,31 @@ sem_close(proc_t p, struct sem_close_args *uap, __unused int32_t *retval) error = closef_locked(fp, fp->f_fglob, p); fileproc_free(fp); proc_fdunlock(p); - return(error); + return error; } int sem_wait(proc_t p, struct sem_wait_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(sem_wait_nocancel(p, (struct sem_wait_nocancel_args *)uap, retval)); + return sem_wait_nocancel(p, (struct sem_wait_nocancel_args *)uap, retval); } int sem_wait_nocancel(proc_t p, struct sem_wait_nocancel_args *uap, __unused int32_t *retval) { - int fd = CAST_DOWN_EXPLICIT(int,uap->sem); + int fd = CAST_DOWN_EXPLICIT(int, uap->sem); struct fileproc *fp; struct pseminfo * pinfo; - struct psemnode * pnode ; + struct psemnode * pnode; kern_return_t kret; int error; error = fp_getfpsem(p, fd, &fp, &pnode); - if (error) - return (error); - if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL ) { + if (error) { + return error; + } + if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL) { error = EINVAL; goto out; } @@ -862,8 +881,8 @@ sem_wait_nocancel(proc_t p, struct sem_wait_nocancel_args *uap, __unused int32_t error = EINVAL; goto out; } - if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) - != PSEM_ALLOCATED) { + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) + != PSEM_ALLOCATED) { PSEM_SUBSYS_UNLOCK(); error = EINVAL; goto out; @@ -895,25 +914,25 @@ sem_wait_nocancel(proc_t p, struct sem_wait_nocancel_args *uap, __unused int32_t } out: fp_drop(p, fd, fp, 0); - return(error); - + return error; } int sem_trywait(proc_t p, struct sem_trywait_args *uap, __unused int32_t *retval) { - int fd = CAST_DOWN_EXPLICIT(int,uap->sem); + int fd = CAST_DOWN_EXPLICIT(int, uap->sem); struct fileproc *fp; struct pseminfo * pinfo; - struct psemnode * pnode ; + struct psemnode * pnode; kern_return_t kret; mach_timespec_t wait_time; int error; - + error = fp_getfpsem(p, fd, &fp, &pnode); - if (error) - return (error); - if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL ) { + if (error) { + return error; + } + if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL) { error = EINVAL; goto out; } @@ -923,8 +942,8 @@ sem_trywait(proc_t p, struct sem_trywait_args *uap, __unused int32_t *retval) error = EINVAL; goto out; } - if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) - != PSEM_ALLOCATED) { + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) + != PSEM_ALLOCATED) { PSEM_SUBSYS_UNLOCK(); error = EINVAL; goto out; @@ -961,23 +980,24 @@ sem_trywait(proc_t p, struct sem_trywait_args *uap, __unused int32_t *retval) } out: fp_drop(p, fd, fp, 0); - return(error); + return error; } int sem_post(proc_t p, struct sem_post_args *uap, __unused int32_t *retval) { - int fd = CAST_DOWN_EXPLICIT(int,uap->sem); + int fd = CAST_DOWN_EXPLICIT(int, uap->sem); struct fileproc *fp; struct pseminfo * pinfo; - struct psemnode * pnode ; + struct psemnode * pnode; kern_return_t kret; int error; error = fp_getfpsem(p, fd, &fp, &pnode); - if (error) - return (error); - if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL ) { + if (error) { + return error; + } + if (((pnode = (struct psemnode *)fp->f_data)) == PSEMNODE_NULL) { error = EINVAL; goto out; } @@ -987,8 +1007,8 @@ sem_post(proc_t p, struct sem_post_args *uap, __unused int32_t *retval) error = EINVAL; goto out; } - if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) - != PSEM_ALLOCATED) { + if ((pinfo->psem_flags & (PSEM_DEFINED | PSEM_ALLOCATED)) + != PSEM_ALLOCATED) { PSEM_SUBSYS_UNLOCK(); error = EINVAL; goto out; @@ -1020,43 +1040,43 @@ sem_post(proc_t p, struct sem_post_args *uap, __unused int32_t *retval) } out: fp_drop(p, fd, fp, 0); - return(error); + return error; } static int psem_close(struct psemnode *pnode, __unused int flags) { - int error=0; + int error = 0; struct pseminfo *pinfo; PSEM_SUBSYS_LOCK(); - if ((pinfo = pnode->pinfo) == PSEMINFO_NULL){ + if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) { PSEM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } if ((pinfo->psem_flags & PSEM_ALLOCATED) != PSEM_ALLOCATED) { PSEM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } #if DIAGNOSTIC - if(!pinfo->psem_usecount) { + if (!pinfo->psem_usecount) { kprintf("negative usecount in psem_close\n"); } #endif /* DIAGNOSTIC */ pinfo->psem_usecount--; - if ((pinfo->psem_flags & PSEM_REMOVED) && !pinfo->psem_usecount) { + if ((pinfo->psem_flags & PSEM_REMOVED) && !pinfo->psem_usecount) { PSEM_SUBSYS_UNLOCK(); /* lock dropped as only semaphore is destroyed here */ error = psem_delete(pinfo); - FREE(pinfo,M_SHM); + FREE(pinfo, M_SHM); } else { PSEM_SUBSYS_UNLOCK(); } /* subsystem lock is dropped when we get here */ FREE(pnode, M_SHM); - return (error); + return error; } static int @@ -1070,10 +1090,10 @@ psem_closefile(struct fileglob *fg, __unused vfs_context_t ctx) */ error = psem_close(((struct psemnode *)fg->fg_data), fg->fg_flag); - return(error); + return error; } -static int +static int psem_delete(struct pseminfo * pinfo) { kern_return_t kret; @@ -1086,48 +1106,48 @@ psem_delete(struct pseminfo * pinfo) switch (kret) { case KERN_INVALID_ADDRESS: case KERN_PROTECTION_FAILURE: - return (EINVAL); + return EINVAL; case KERN_ABORTED: case KERN_OPERATION_TIMED_OUT: - return (EINTR); + return EINTR; case KERN_SUCCESS: - return(0); + return 0; default: - return (EINVAL); + return EINVAL; } } static int psem_read(__unused struct fileproc *fp, __unused struct uio *uio, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int psem_write(__unused struct fileproc *fp, __unused struct uio *uio, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int psem_ioctl(__unused struct fileproc *fp, __unused u_long com, - __unused caddr_t data, __unused vfs_context_t ctx) + __unused caddr_t data, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int psem_select(__unused struct fileproc *fp, __unused int which, - __unused void *wql, __unused vfs_context_t ctx) + __unused void *wql, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int psem_kqfilter(__unused struct fileproc *fp, struct knote *kn, - __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) + __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) { kn->kn_flags = EV_ERROR; kn->kn_data = ENOTSUP; @@ -1141,29 +1161,29 @@ fill_pseminfo(struct psemnode *pnode, struct psem_info * info) struct vinfo_stat *sb; PSEM_SUBSYS_LOCK(); - if ((pinfo = pnode->pinfo) == PSEMINFO_NULL){ + if ((pinfo = pnode->pinfo) == PSEMINFO_NULL) { PSEM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } #if 0 if ((pinfo->psem_flags & PSEM_ALLOCATED) != PSEM_ALLOCATED) { PSEM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } #endif sb = &info->psem_stat; bzero(sb, sizeof(struct vinfo_stat)); - sb->vst_mode = pinfo->psem_mode; - sb->vst_uid = pinfo->psem_uid; - sb->vst_gid = pinfo->psem_gid; - sb->vst_size = pinfo->psem_usecount; - bcopy(&pinfo->psem_name[0], &info->psem_name[0], PSEMNAMLEN+1); + sb->vst_mode = pinfo->psem_mode; + sb->vst_uid = pinfo->psem_uid; + sb->vst_gid = pinfo->psem_gid; + sb->vst_size = pinfo->psem_usecount; + bcopy(&pinfo->psem_name[0], &info->psem_name[0], PSEMNAMLEN + 1); PSEM_SUBSYS_UNLOCK(); - return(0); + return 0; } #if CONFIG_MACF @@ -1177,12 +1197,12 @@ psem_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx) pnode = (struct psemnode *)fp->f_fglob->fg_data; if (pnode != NULL) { psem = pnode->pinfo; - if (psem != NULL) + if (psem != NULL) { mac_posixsem_vnode_label_associate( vfs_context_ucred(ctx), psem, psem->psem_label, vp, vp->v_label); + } } PSEM_SUBSYS_UNLOCK(); } #endif - diff --git a/bsd/kern/posix_shm.c b/bsd/kern/posix_shm.c index d220614db..3cd6aebd1 100644 --- a/bsd/kern/posix_shm.c +++ b/bsd/kern/posix_shm.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -67,7 +67,9 @@ #include #include #include +#include #include +#include #if CONFIG_MACF #include @@ -91,111 +93,113 @@ #define f_ops f_fglob->fg_ops #define f_offset f_fglob->fg_offset #define f_data f_fglob->fg_data -#define PSHMNAMLEN 31 /* maximum name segment length we bother with */ -struct pshmobj { - void * pshmo_memobject; - memory_object_size_t pshmo_size; - struct pshmobj * pshmo_next; -}; +/* + * Used to construct the list of memory objects + * assigned to a populated shared memory segment. + */ +typedef struct pshm_mobj { + void *pshmo_memobject; + memory_object_size_t pshmo_size; + SLIST_ENTRY(pshm_mobj) pshmo_next; +} pshm_mobj_t; -struct pshminfo { - unsigned int pshm_flags; - unsigned int pshm_usecount; - off_t pshm_length; - mode_t pshm_mode; - uid_t pshm_uid; - gid_t pshm_gid; - char pshm_name[PSHMNAMLEN + 1]; /* segment name */ - struct pshmobj *pshm_memobjects; -#if DIAGNOSTIC - unsigned int pshm_readcount; - unsigned int pshm_writecount; - proc_t pshm_proc; -#endif /* DIAGNOSTIC */ - struct label* pshm_label; -}; -#define PSHMINFO_NULL (struct pshminfo *)0 - -#define PSHM_NONE 0x001 -#define PSHM_DEFINED 0x002 -#define PSHM_ALLOCATED 0x004 -#define PSHM_MAPPED 0x008 -#define PSHM_INUSE 0x010 -#define PSHM_REMOVED 0x020 -#define PSHM_INCREATE 0x040 -#define PSHM_INDELETE 0x080 -#define PSHM_ALLOCATING 0x100 - -struct pshmcache { - LIST_ENTRY(pshmcache) pshm_hash; /* hash chain */ - struct pshminfo *pshminfo; /* vnode the name refers to */ - int pshm_nlen; /* length of name */ - char pshm_name[PSHMNAMLEN + 1]; /* segment name */ -}; -#define PSHMCACHE_NULL (struct pshmcache *)0 - -#define PSHMCACHE_NOTFOUND (0) -#define PSHMCACHE_FOUND (-1) -#define PSHMCACHE_NEGATIVE (ENOENT) - -struct pshmstats { - long goodhits; /* hits that we can really use */ - long neghits; /* negative hits that we can use */ - long badhits; /* hits we must drop */ - long falsehits; /* hits with id mismatch */ - long miss; /* misses */ - long longnames; /* long names that ignore cache */ -}; +/* + * This represents an existing Posix shared memory object. + * + * It comes into existence with a shm_open(...O_CREAT...) + * call and goes away only after it has been shm_unlink()ed + * and the last remaining shm_open() file reference is closed. + * + * To keep track of that lifetime, pshm_usecount is used as a reference + * counter. It's incremented for every successful shm_open() and + * one extra time for the shm_unlink() to release. Internally + * you can temporarily use an additional reference whenever the + * subsystem lock has to be dropped for other reasons. + */ +typedef struct internal_pshminfo { + struct pshminfo pshm_hdr; + SLIST_HEAD(pshm_mobjhead, pshm_mobj) pshm_mobjs; + RB_ENTRY(internal_pshminfo) pshm_links; /* links for red/black tree */ +} pshm_info_t; +#define pshm_flags pshm_hdr.pshm_flags +#define pshm_usecount pshm_hdr.pshm_usecount +#define pshm_length pshm_hdr.pshm_length +#define pshm_mode pshm_hdr.pshm_mode +#define pshm_uid pshm_hdr.pshm_uid +#define pshm_gid pshm_hdr.pshm_gid +#define pshm_label pshm_hdr.pshm_label + +/* Values for pshm_flags that are still used */ +#define PSHM_ALLOCATED 0x004 /* backing storage is allocated */ +#define PSHM_MAPPED 0x008 /* mapped at least once */ +#define PSHM_INUSE 0x010 /* mapped at least once */ +#define PSHM_REMOVED 0x020 /* no longer in the name cache due to shm_unlink() */ +#define PSHM_ALLOCATING 0x100 /* storage is being allocated */ -struct pshmname { - char *pshm_nameptr; /* pointer to looked up name */ - long pshm_namelen; /* length of looked up component */ - u_long pshm_hash; /* hash value of looked up name */ -}; +/* + * These handle reference counting pshm_info_t structs using pshm_usecount. + */ +static int pshm_ref(pshm_info_t *pinfo); +static void pshm_deref(pshm_info_t *pinfo); +#define PSHM_MAXCOUNT UINT_MAX + +/* + * For every shm_open, we get a new one of these. + * The only reason we don't just use pshm_info directly is that + * you can query the mapped memory objects via proc_pidinfo to + * query the mapped address. Note that even this is a hack. If + * you mmap() the same fd multiple times, we only save/report + * one address. + */ +typedef struct pshmnode { + off_t mapp_addr; + pshm_info_t *pinfo; +} pshmnode_t; -struct pshmnode { - off_t mapp_addr; - user_size_t map_size; /* XXX unused ? */ - struct pshminfo *pinfo; - unsigned int pshm_usecount; -#if DIAGNOSTIC - unsigned int readcnt; - unsigned int writecnt; -#endif -}; -#define PSHMNODE_NULL (struct pshmnode *)0 +/* compare function for the red black tree */ +static int +pshm_compare(pshm_info_t *a, pshm_info_t *b) +{ + int cmp = strncmp(a->pshm_hdr.pshm_name, b->pshm_hdr.pshm_name, PSHMNAMLEN + 1); -#define PSHMHASH(pnp) \ - (&pshmhashtbl[(pnp)->pshm_hash & pshmhash]) + if (cmp < 0) { + return -1; + } + if (cmp > 0) { + return 1; + } + return 0; +} -LIST_HEAD(pshmhashhead, pshmcache) *pshmhashtbl; /* Hash Table */ -u_long pshmhash; /* size of hash table - 1 */ -long pshmnument; /* number of cache entries allocated */ -struct pshmstats pshmstats; /* cache effectiveness statistics */ -static int pshm_read (struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); -static int pshm_write (struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); -static int pshm_ioctl (struct fileproc *fp, u_long com, - caddr_t data, vfs_context_t ctx); -static int pshm_select (struct fileproc *fp, int which, void *wql, vfs_context_t ctx); -static int pshm_close(struct pshminfo *pinfo, int dropref); -static int pshm_closefile (struct fileglob *fg, vfs_context_t ctx); +/* + * shared memory "paths" are stored in a red black tree for lookup + */ +u_long pshmnument; /* count of entries allocated in the red black tree */ +RB_HEAD(pshmhead, internal_pshminfo) pshm_head; +RB_PROTOTYPE(pshmhead, internal_pshminfo, pshm_links, pshm_compare) +RB_GENERATE(pshmhead, internal_pshminfo, pshm_links, pshm_compare) + +/* lookup, add, remove functions */ +static pshm_info_t *pshm_cache_search(pshm_info_t * look); +static void pshm_cache_add(pshm_info_t *entry); +static void pshm_cache_delete(pshm_info_t *entry); + +static int pshm_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx); +static int pshm_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx); +static int pshm_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx); +static int pshm_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx); +static int pshm_closefile(struct fileglob *fg, vfs_context_t ctx); static int pshm_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); + struct kevent_internal_s *kev, vfs_context_t ctx); -int pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, proc_t p); +static int pshm_access(pshm_info_t *pinfo, int mode, kauth_cred_t cred, proc_t p); int pshm_cache_purge_all(proc_t p); -static int pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp); -static void pshm_cache_delete(struct pshmcache *pcp); -static int pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp, - struct pshmcache **pcache, int addref); -static int pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache); +static int pshm_unlink_internal(pshm_info_t *pinfo); static const struct fileops pshmops = { .fo_type = DTYPE_PSXSHM, @@ -208,6 +212,9 @@ static const struct fileops pshmops = { .fo_drain = NULL, }; +/* + * Everything here is protected by a single mutex. + */ static lck_grp_t *psx_shm_subsys_lck_grp; static lck_grp_attr_t *psx_shm_subsys_lck_grp_attr; static lck_attr_t *psx_shm_subsys_lck_attr; @@ -218,150 +225,87 @@ static lck_mtx_t psx_shm_subsys_mutex; #define PSHM_SUBSYS_ASSERT_HELD() LCK_MTX_ASSERT(&psx_shm_subsys_mutex, LCK_MTX_ASSERT_OWNED) -/* Initialize the mutex governing access to the posix shm subsystem */ __private_extern__ void pshm_lock_init( void ) { + psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - psx_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - - psx_shm_subsys_lck_grp = lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr); + psx_shm_subsys_lck_grp = + lck_grp_alloc_init("posix shared memory", psx_shm_subsys_lck_grp_attr); - psx_shm_subsys_lck_attr = lck_attr_alloc_init(); - lck_mtx_init(& psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr); + psx_shm_subsys_lck_attr = lck_attr_alloc_init(); + lck_mtx_init(&psx_shm_subsys_mutex, psx_shm_subsys_lck_grp, psx_shm_subsys_lck_attr); } /* - * Lookup an entry in the cache - * - * - * status of -1 is returned if matches - * If the lookup determines that the name does not exist - * (negative cacheing), a status of ENOENT is returned. If the lookup - * fails, a status of zero is returned. + * Lookup an entry in the cache. Only the name is used from "look". */ - -static int -pshm_cache_search(struct pshminfo **pshmp, struct pshmname *pnp, - struct pshmcache **pcache, int addref) +static pshm_info_t * +pshm_cache_search(pshm_info_t *look) { - struct pshmcache *pcp, *nnp; - struct pshmhashhead *pcpp; - - if (pnp->pshm_namelen > PSHMNAMLEN) { - pshmstats.longnames++; - return PSHMCACHE_NOTFOUND; - } - - pcpp = PSHMHASH(pnp); - for (pcp = pcpp->lh_first; pcp != 0; pcp = nnp) { - nnp = pcp->pshm_hash.le_next; - if (pcp->pshm_nlen == pnp->pshm_namelen && - !bcmp(pcp->pshm_name, pnp->pshm_nameptr, (u_int)pcp-> pshm_nlen)) - break; - } - - if (pcp == 0) { - pshmstats.miss++; - return PSHMCACHE_NOTFOUND; - } - - /* We found a "positive" match, return the vnode */ - if (pcp->pshminfo) { - pshmstats.goodhits++; - /* TOUCH(ncp); */ - *pshmp = pcp->pshminfo; - *pcache = pcp; - if (addref) - pcp->pshminfo->pshm_usecount++; - return PSHMCACHE_FOUND; - } - - /* - * We found a "negative" match, ENOENT notifies client of this match. - */ - pshmstats.neghits++; - return PSHMCACHE_NEGATIVE; + PSHM_SUBSYS_ASSERT_HELD(); + return RB_FIND(pshmhead, &pshm_head, look); } /* - * Add an entry to the cache. - * XXX should be static? + * Add a new entry to the cache. */ -static int -pshm_cache_add(struct pshminfo *pshmp, struct pshmname *pnp, struct pshmcache *pcp) +static void +pshm_cache_add(pshm_info_t *entry) { - struct pshmhashhead *pcpp; - struct pshminfo *dpinfo; - struct pshmcache *dpcp; + pshm_info_t *conflict; -#if DIAGNOSTIC - if (pnp->pshm_namelen > PSHMNAMLEN) - panic("cache_enter: name too long"); -#endif - - - /* if the entry has already been added by some one else return */ - if (pshm_cache_search(&dpinfo, pnp, &dpcp, 0) == PSHMCACHE_FOUND) { - return EEXIST; + PSHM_SUBSYS_ASSERT_HELD(); + conflict = RB_INSERT(pshmhead, &pshm_head, entry); + if (conflict != NULL) { + panic("pshm_cache_add() found %p", conflict); } pshmnument++; +} - /* - * Fill in cache info, if vp is NULL this is a "negative" cache entry. - */ - pcp->pshminfo = pshmp; - pcp->pshm_nlen = pnp->pshm_namelen; - bcopy(pnp->pshm_nameptr, pcp->pshm_name, (unsigned)pcp->pshm_nlen); - pcpp = PSHMHASH(pnp); -#if DIAGNOSTIC - { - struct pshmcache *p; - - for (p = pcpp->lh_first; p != 0; p = p->pshm_hash.le_next) - if (p == pcp) - panic("cache_enter: duplicate"); - } -#endif - LIST_INSERT_HEAD(pcpp, pcp, pshm_hash); - return 0; +/* + * Remove the given entry from the red black tree. + */ +static void +pshm_cache_delete(pshm_info_t *entry) +{ + PSHM_SUBSYS_ASSERT_HELD(); + assert(!(entry->pshm_flags & PSHM_REMOVED)); + RB_REMOVE(pshmhead, &pshm_head, entry); + pshmnument--; } /* - * Name cache initialization, from vfs_init() when we are booting + * Initialize the red black tree. */ void pshm_cache_init(void) { - pshmhashtbl = hashinit(desiredvnodes / 8, M_SHM, &pshmhash); + RB_INIT(&pshm_head); } /* - * Invalidate all entries and delete all objects associated with it. Entire - * non Kernel entries are going away. Just dump'em all - * - * We actually just increment the v_id, that will do it. The entries will - * be purged by lookup as they get found. If the v_id wraps around, we - * need to ditch the entire cache, to avoid confusion. No valid vnode will - * ever have (v_id == 0). + * Invalidate all entries and delete all objects associated with them + * XXX - due to the reference counting, this only works if all userland + * references to it via file descriptors are also closed already. Is this + * known to be called after all user processes are killed? */ int -pshm_cache_purge_all(__unused proc_t p) +pshm_cache_purge_all(__unused proc_t proc) { - struct pshmcache *pcp, *tmppcp; - struct pshmhashhead *pcpp; + pshm_info_t *p; + pshm_info_t *tmp; int error = 0; - if (kauth_cred_issuser(kauth_cred_get()) == 0) + if (kauth_cred_issuser(kauth_cred_get()) == 0) { return EPERM; + } PSHM_SUBSYS_LOCK(); - for (pcpp = &pshmhashtbl[pshmhash]; pcpp >= pshmhashtbl; pcpp--) { - LIST_FOREACH_SAFE(pcp, pcpp, pshm_hash, tmppcp) { - assert(pcp->pshm_nlen); - error = pshm_unlink_internal(pcp->pshminfo, pcp); - if (error) - goto out; + RB_FOREACH_SAFE(p, pshmhead, &pshm_head, tmp) { + error = pshm_unlink_internal(p); + if (error) { /* XXX: why give up on failure, should keep going */ + goto out; } } assert(pshmnument == 0); @@ -369,105 +313,86 @@ pshm_cache_purge_all(__unused proc_t p) out: PSHM_SUBSYS_UNLOCK(); - if (error) - printf("%s: Error %d removing shm cache: %ld remain!\n", - __func__, error, pshmnument); + if (error) { + printf("%s: Error %d removing posix shm cache: %ld remain!\n", + __func__, error, pshmnument); + } return error; } -static void -pshm_cache_delete(struct pshmcache *pcp) +/* + * Utility to get the shared memory name from userspace and + * populate a pshm_info_t with it. If there's a problem + * reading the name or it's malformed, will return an error code. + */ +static int +pshm_get_name(pshm_info_t *pinfo, const user_addr_t user_addr) { -#if DIAGNOSTIC - if (pcp->pshm_hash.le_prev == 0) - panic("namecache purge le_prev"); - if (pcp->pshm_hash.le_next == pcp) - panic("namecache purge le_next"); -#endif /* DIAGNOSTIC */ - LIST_REMOVE(pcp, pshm_hash); - pcp->pshm_hash.le_prev = 0; - pshmnument--; -} + size_t bytes_copied = 0; + int error; + error = copyinstr(user_addr, &pinfo->pshm_hdr.pshm_name[0], PSHMNAMLEN + 1, &bytes_copied); + if (error != 0) { + return error; + } + assert(bytes_copied <= PSHMNAMLEN + 1); + assert(pinfo->pshm_hdr.pshm_name[bytes_copied - 1] == 0); + if (bytes_copied < 2) { /* 2: expect at least one character and terminating zero */ + return EINVAL; + } + AUDIT_ARG(text, &pinfo->pshm_hdr.pshm_name[0]); + return 0; +} + +/* + * Process a shm_open() system call. + */ int shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval) { - size_t i; - int indx, error; - struct pshmname nd; - struct pshminfo *pinfo; + int indx; + int error = 0; + pshm_info_t *pinfo = NULL; + pshm_info_t *new_pinfo = NULL; + pshmnode_t *new_pnode = NULL; struct fileproc *fp = NULL; - char *pnbuf = NULL; - struct pshminfo *new_pinfo = PSHMINFO_NULL; - struct pshmnode *new_pnode = PSHMNODE_NULL; - struct pshmcache *pcache = PSHMCACHE_NULL; /* ignored on return */ - char * nameptr; - char * cp; - size_t pathlen, plen; - int fmode ; - int cmode = uap->mode; - int incache = 0; - struct pshmcache *pcp = NULL; + int fmode; + int cmode = uap->mode; + bool incache = false; + bool have_label = false; AUDIT_ARG(fflags, uap->oflag); AUDIT_ARG(mode, uap->mode); - pinfo = PSHMINFO_NULL; - /* - * Preallocate everything we might need up front to avoid taking - * and dropping the lock, opening us up to race conditions. + * Allocate data structures we need. We parse the userspace name into + * a pshm_info_t, even when we don't need to O_CREAT. */ - MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (pnbuf == NULL) { + MALLOC(new_pinfo, pshm_info_t *, sizeof(pshm_info_t), M_SHM, M_WAITOK | M_ZERO); + if (new_pinfo == NULL) { error = ENOSPC; goto bad; } - pathlen = MAXPATHLEN; - error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen); - if (error) { - goto bad; - } - AUDIT_ARG(text, pnbuf); - if (pathlen > PSHMNAMLEN) { - error = ENAMETOOLONG; - goto bad; - } -#ifdef PSXSHM_NAME_RESTRICT - nameptr = pnbuf; - if (*nameptr == '/') { - while (*(nameptr++) == '/') { - plen--; - error = EINVAL; - goto bad; - } - } else { - error = EINVAL; + /* + * Get and check the name. + */ + error = pshm_get_name(new_pinfo, uap->name); + if (error != 0) { goto bad; } -#endif /* PSXSHM_NAME_RESTRICT */ - - plen = pathlen; - nameptr = pnbuf; - nd.pshm_nameptr = nameptr; - nd.pshm_namelen = plen; - nd. pshm_hash =0; - - for (cp = nameptr, i=1; *cp != 0 && i <= plen; i++, cp++) { - nd.pshm_hash += (unsigned char)*cp * i; - } /* - * attempt to allocate a new fp; if unsuccessful, the fp will be + * Attempt to allocate a new fp. If unsuccessful, the fp will be * left unmodified (NULL). */ error = falloc(p, &fp, &indx, vfs_context_current()); - if (error) + if (error) { goto bad; + } - cmode &= ALLPERMS; + cmode &= ALLPERMS; fmode = FFLAGS(uap->oflag); if ((fmode & (FREAD | FWRITE)) == 0) { @@ -476,164 +401,107 @@ shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval) } /* - * We allocate a new entry if we are less than the maximum - * allowed and the one at the front of the LRU list is in use. - * Otherwise we use the one at the front of the LRU list. + * Will need a new pnode for the file pointer */ - MALLOC(pcp, struct pshmcache *, sizeof(struct pshmcache), M_SHM, M_WAITOK|M_ZERO); - if (pcp == NULL) { + MALLOC(new_pnode, pshmnode_t *, sizeof(pshmnode_t), M_SHM, M_WAITOK | M_ZERO); + if (new_pnode == NULL) { error = ENOSPC; goto bad; } - MALLOC(new_pinfo, struct pshminfo *, sizeof(struct pshminfo), M_SHM, M_WAITOK|M_ZERO); - if (new_pinfo == PSHMINFO_NULL) { - error = ENOSPC; - goto bad; - } + /* + * If creating a new segment, fill in its information. + * If we find a pre-exisitng one in cache lookup we'll just toss this one later. + */ + if (fmode & O_CREAT) { + new_pinfo->pshm_usecount = 2; /* one each for: file pointer, shm_unlink */ + new_pinfo->pshm_length = 0; + new_pinfo->pshm_mode = cmode; + new_pinfo->pshm_uid = kauth_getuid(); + new_pinfo->pshm_gid = kauth_getgid(); + SLIST_INIT(&new_pinfo->pshm_mobjs); #if CONFIG_MACF - mac_posixshm_label_init(new_pinfo); + mac_posixshm_label_init(&new_pinfo->pshm_hdr); + have_label = true; + error = mac_posixshm_check_create(kauth_cred_get(), new_pinfo->pshm_hdr.pshm_name); + if (error) { + goto bad; + } #endif - - MALLOC(new_pnode, struct pshmnode *, sizeof(struct pshmnode), M_SHM, M_WAITOK|M_ZERO); - if (new_pnode == PSHMNODE_NULL) { - error = ENOSPC; - goto bad; } - PSHM_SUBSYS_LOCK(); - /* - * If we find the entry in the cache, this will take a reference, - * allowing us to unlock it for the permissions check. + * Look up the named shared memory segment in the cache, possibly adding + * it for O_CREAT. */ - error = pshm_cache_search(&pinfo, &nd, &pcache, 1); - - PSHM_SUBSYS_UNLOCK(); + PSHM_SUBSYS_LOCK(); - if (error == PSHMCACHE_NEGATIVE) { - error = EINVAL; - goto bad; - } + pinfo = pshm_cache_search(new_pinfo); + if (pinfo != NULL) { + incache = true; - if (error == PSHMCACHE_NOTFOUND) { - incache = 0; - if (fmode & O_CREAT) { - /* create a new one (commit the allocation) */ - pinfo = new_pinfo; - pinfo->pshm_flags = PSHM_DEFINED | PSHM_INCREATE; - pinfo->pshm_usecount = 1; /* existence reference */ - pinfo->pshm_mode = cmode; - pinfo->pshm_uid = kauth_getuid(); - pinfo->pshm_gid = kauth_getgid(); - bcopy(pnbuf, &pinfo->pshm_name[0], pathlen); - pinfo->pshm_name[pathlen]=0; -#if CONFIG_MACF - error = mac_posixshm_check_create(kauth_cred_get(), nameptr); - if (error) { - goto bad; - } - mac_posixshm_label_associate(kauth_cred_get(), pinfo, nameptr); -#endif - } - } else { - incache = 1; - if (fmode & O_CREAT) { - /* already exists */ - if ((fmode & O_EXCL)) { - AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, - pinfo->pshm_gid, - pinfo->pshm_mode); - - /* shm obj exists and opened O_EXCL */ - error = EEXIST; - goto bad; - } - - if( pinfo->pshm_flags & PSHM_INDELETE) { - error = ENOENT; - goto bad; - } - AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, - pinfo->pshm_gid, pinfo->pshm_mode); -#if CONFIG_MACF - if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) { - goto bad; - } -#endif - if ( (error = pshm_access(pinfo, fmode, kauth_cred_get(), p)) ) { - goto bad; - } - } - } - if (!(fmode & O_CREAT)) { - if (!incache) { - /* O_CREAT is not set and the object does not exist */ - error = ENOENT; - goto bad; - } - if( pinfo->pshm_flags & PSHM_INDELETE) { - error = ENOENT; - goto bad; - } -#if CONFIG_MACF - if ((error = mac_posixshm_check_open(kauth_cred_get(), pinfo, fmode))) { - goto bad; + /* Get a new reference to go with the file pointer.*/ + error = pshm_ref(pinfo); + if (error) { + pinfo = NULL; /* so cleanup code doesn't deref */ + goto bad_locked; } -#endif - if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) { - goto bad; + /* can't have pre-existing if O_EXCL */ + if ((fmode & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) { + error = EEXIST; + goto bad_locked; } - } - if (fmode & O_TRUNC) { - error = EINVAL; - goto bad; - } - - PSHM_SUBSYS_LOCK(); + /* O_TRUNC is only valid while length is not yet set */ + if ((fmode & O_TRUNC) && + (pinfo->pshm_flags & (PSHM_ALLOCATING | PSHM_ALLOCATED))) { + error = EINVAL; + goto bad_locked; + } + } else { + incache = false; -#if DIAGNOSTIC - if (fmode & FWRITE) - pinfo->pshm_writecount++; - if (fmode & FREAD) - pinfo->pshm_readcount++; -#endif - if (!incache) { - /* if successful, this will consume the pcp */ - if ( (error = pshm_cache_add(pinfo, &nd, pcp)) ) { + /* if it wasn't found, must have O_CREAT */ + if (!(fmode & O_CREAT)) { + error = ENOENT; goto bad_locked; } - /* - * add reference for the new entry; otherwise, we obtained - * one from the cache hit earlier. - */ - pinfo->pshm_usecount++; + + /* Add the new region to the cache. */ + pinfo = new_pinfo; + pshm_cache_add(pinfo); + new_pinfo = NULL; /* so that it doesn't get free'd */ } - pinfo->pshm_flags &= ~PSHM_INCREATE; - new_pnode->pinfo = pinfo; PSHM_SUBSYS_UNLOCK(); /* - * if incache, we did not use the new pcp or new_pinfo and must - * free them + * Check we have permission to access any pre-existing segment */ if (incache) { - FREE(pcp, M_SHM); - - if (new_pinfo != PSHMINFO_NULL) { + if (fmode & O_CREAT) { + AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, + pinfo->pshm_gid, pinfo->pshm_mode); + } #if CONFIG_MACF - mac_posixshm_label_destroy(new_pinfo); + if ((error = mac_posixshm_check_open(kauth_cred_get(), &pinfo->pshm_hdr, fmode))) { + goto bad; + } #endif - FREE(new_pinfo, M_SHM); + if ((error = pshm_access(pinfo, fmode, kauth_cred_get(), p))) { + goto bad; } + } else { +#if CONFIG_MACF + mac_posixshm_label_associate(kauth_cred_get(), &pinfo->pshm_hdr, pinfo->pshm_hdr.pshm_name); +#endif } proc_fdlock(p); fp->f_flag = fmode & FMASK; fp->f_ops = &pshmops; + new_pnode->pinfo = pinfo; fp->f_data = (caddr_t)new_pnode; *fdflags(p, indx) |= UF_EXCLOSE; procfdtbl_releasefd(p, indx, NULL); @@ -641,113 +509,137 @@ shm_open(proc_t p, struct shm_open_args *uap, int32_t *retval) proc_fdunlock(p); *retval = indx; - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); - return (0); + error = 0; + goto done; bad_locked: PSHM_SUBSYS_UNLOCK(); bad: /* - * If we obtained the entry from the cache, we need to drop the - * reference; holding the reference may have prevented unlinking, - * so we need to call pshm_close() to get the full effect. + * Drop any new reference to a pre-existing shared memory region. */ - if (incache) { + if (incache && pinfo != NULL) { PSHM_SUBSYS_LOCK(); - pshm_close(pinfo, 1); + pshm_deref(pinfo); PSHM_SUBSYS_UNLOCK(); } - if (pcp != NULL) - FREE(pcp, M_SHM); - - if (new_pnode != PSHMNODE_NULL) + /* + * Delete any allocated unused data structures. + */ + if (new_pnode != NULL) { FREE(new_pnode, M_SHM); + } - if (fp != NULL) + if (fp != NULL) { fp_free(p, indx, fp); + } - if (new_pinfo != PSHMINFO_NULL) { +done: + if (new_pinfo != NULL) { #if CONFIG_MACF - mac_posixshm_label_destroy(new_pinfo); + if (have_label) { + mac_posixshm_label_destroy(&new_pinfo->pshm_hdr); + } #endif FREE(new_pinfo, M_SHM); } - if (pnbuf != NULL) - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); - return (error); + return error; } +/* + * The truncate call associates memory with shared memory region. It can + * only be succesfully done with a non-zero length once per shared memory region. + */ int -pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd, - off_t length, __unused int32_t *retval) +pshm_truncate( + __unused proc_t p, + struct fileproc *fp, + __unused int fd, + off_t length, + __unused int32_t *retval) { - struct pshminfo * pinfo; - struct pshmnode * pnode ; - kern_return_t kret; + pshm_info_t *pinfo; + pshmnode_t *pnode; + kern_return_t kret; mem_entry_name_port_t mem_object; - mach_vm_size_t total_size, alloc_size; - memory_object_size_t mosize; - struct pshmobj *pshmobj, *pshmobj_next, **pshmobj_next_p; - vm_map_t user_map; -#if CONFIG_MACF - int error; -#endif + mach_vm_size_t total_size, alloc_size; + memory_object_size_t mosize; + pshm_mobj_t *pshmobj, *pshmobj_last; + vm_map_t user_map; + int error; user_map = current_map(); if (fp->f_type != DTYPE_PSXSHM) { - return(EINVAL); + return EINVAL; } - - if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) - return(EINVAL); +#if 0 + /* + * Can't enforce this yet, some third party tools don't + * specify O_RDWR like they ought to. See radar 48692182 + */ + /* ftruncate() requires write permission */ + if (!(fp->f_flag & FWRITE)) { + return EINVAL; + } +#endif PSHM_SUBSYS_LOCK(); - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) { + if (((pnode = (pshmnode_t *)fp->f_data)) == NULL) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } - if ((pinfo->pshm_flags & (PSHM_DEFINED|PSHM_ALLOCATING|PSHM_ALLOCATED)) - != PSHM_DEFINED) { + + if ((pinfo = pnode->pinfo) == NULL) { + PSHM_SUBSYS_UNLOCK(); + return EINVAL; + } + + /* We only allow one ftruncate() per lifetime of the shm object. */ + if (pinfo->pshm_flags & (PSHM_ALLOCATING | PSHM_ALLOCATED)) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } + #if CONFIG_MACF - error = mac_posixshm_check_truncate(kauth_cred_get(), pinfo, length); + error = mac_posixshm_check_truncate(kauth_cred_get(), &pinfo->pshm_hdr, length); if (error) { PSHM_SUBSYS_UNLOCK(); - return(error); + return error; } #endif + /* + * Grab an extra reference, so we can drop the lock while allocating and + * ensure the objects don't disappear. + */ + error = pshm_ref(pinfo); + if (error) { + PSHM_SUBSYS_UNLOCK(); + return error; + } + /* set ALLOCATING, so another truncate can't start */ pinfo->pshm_flags |= PSHM_ALLOCATING; - total_size = vm_map_round_page(length, - vm_map_page_mask(user_map)); - pshmobj_next_p = &pinfo->pshm_memobjects; - - for (alloc_size = 0; - alloc_size < total_size; - alloc_size += mosize) { + total_size = vm_map_round_page(length, vm_map_page_mask(user_map)); + pshmobj_last = NULL; + for (alloc_size = 0; alloc_size < total_size; alloc_size += mosize) { PSHM_SUBSYS_UNLOCK(); + /* get a memory object back some of the shared memory */ mosize = MIN(total_size - alloc_size, ANON_MAX_SIZE); - kret = mach_make_memory_entry_64( - VM_MAP_NULL, - &mosize, - 0, - MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT, - &mem_object, - 0); + kret = mach_make_memory_entry_64(VM_MAP_NULL, &mosize, 0, + MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT, &mem_object, 0); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { goto out; + } - MALLOC(pshmobj, struct pshmobj *, sizeof (struct pshmobj), - M_SHM, M_WAITOK); + /* get a list entry to track the memory object */ + MALLOC(pshmobj, pshm_mobj_t *, sizeof(pshm_mobj_t), M_SHM, M_WAITOK); if (pshmobj == NULL) { kret = KERN_NO_SPACE; mach_memory_entry_port_release(mem_object); @@ -757,79 +649,86 @@ pshm_truncate(__unused proc_t p, struct fileproc *fp, __unused int fd, PSHM_SUBSYS_LOCK(); - pshmobj->pshmo_memobject = (void *) mem_object; + /* link in the new entry */ + pshmobj->pshmo_memobject = (void *)mem_object; pshmobj->pshmo_size = mosize; - pshmobj->pshmo_next = NULL; - - *pshmobj_next_p = pshmobj; - pshmobj_next_p = &pshmobj->pshmo_next; + SLIST_NEXT(pshmobj, pshmo_next) = NULL; + + if (pshmobj_last == NULL) { + SLIST_FIRST(&pinfo->pshm_mobjs) = pshmobj; + } else { + SLIST_INSERT_AFTER(pshmobj_last, pshmobj, pshmo_next); + } + pshmobj_last = pshmobj; } - + + /* all done, change flags to ALLOCATED and return success */ pinfo->pshm_flags |= PSHM_ALLOCATED; pinfo->pshm_flags &= ~(PSHM_ALLOCATING); pinfo->pshm_length = total_size; + pshm_deref(pinfo); /* drop the "allocating" reference */ PSHM_SUBSYS_UNLOCK(); - return(0); + return 0; out: + /* clean up any partially allocated objects */ PSHM_SUBSYS_LOCK(); - for (pshmobj = pinfo->pshm_memobjects; - pshmobj != NULL; - pshmobj = pshmobj_next) { - pshmobj_next = pshmobj->pshmo_next; + while ((pshmobj = SLIST_FIRST(&pinfo->pshm_mobjs)) != NULL) { + SLIST_REMOVE_HEAD(&pinfo->pshm_mobjs, pshmo_next); + PSHM_SUBSYS_UNLOCK(); mach_memory_entry_port_release(pshmobj->pshmo_memobject); FREE(pshmobj, M_SHM); + PSHM_SUBSYS_LOCK(); } - pinfo->pshm_memobjects = NULL; pinfo->pshm_flags &= ~PSHM_ALLOCATING; + pshm_deref(pinfo); /* drop the "allocating" reference */ PSHM_SUBSYS_UNLOCK(); switch (kret) { case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: - return (ENOMEM); + return ENOMEM; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; default: - return (EINVAL); - + return EINVAL; } } int -pshm_stat(struct pshmnode *pnode, void *ub, int isstat64) +pshm_stat(pshmnode_t *pnode, void *ub, int isstat64) { - struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ + struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */ - struct pshminfo *pinfo; + pshm_info_t *pinfo; #if CONFIG_MACF int error; #endif - + PSHM_SUBSYS_LOCK(); - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL){ + if ((pinfo = pnode->pinfo) == NULL) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } #if CONFIG_MACF - error = mac_posixshm_check_stat(kauth_cred_get(), pinfo); + error = mac_posixshm_check_stat(kauth_cred_get(), &pinfo->pshm_hdr); if (error) { PSHM_SUBSYS_UNLOCK(); - return(error); + return error; } #endif if (isstat64 != 0) { sb64 = (struct stat64 *)ub; - bzero(sb64, sizeof(struct stat64)); + bzero(sb64, sizeof(struct stat64)); sb64->st_mode = pinfo->pshm_mode; sb64->st_uid = pinfo->pshm_uid; sb64->st_gid = pinfo->pshm_gid; sb64->st_size = pinfo->pshm_length; } else { sb = (struct stat *)ub; - bzero(sb, sizeof(struct stat)); + bzero(sb, sizeof(struct stat)); sb->st_mode = pinfo->pshm_mode; sb->st_uid = pinfo->pshm_uid; sb->st_gid = pinfo->pshm_gid; @@ -837,58 +736,62 @@ pshm_stat(struct pshmnode *pnode, void *ub, int isstat64) } PSHM_SUBSYS_UNLOCK(); - return(0); + return 0; } /* - * This is called only from shm_open which holds pshm_lock(); - * XXX This code is repeated many times + * Verify access to a shared memory region. */ -int -pshm_access(struct pshminfo *pinfo, int mode, kauth_cred_t cred, __unused proc_t p) +static int +pshm_access(pshm_info_t *pinfo, int mode, kauth_cred_t cred, __unused proc_t p) { int mode_req = ((mode & FREAD) ? S_IRUSR : 0) | - ((mode & FWRITE) ? S_IWUSR : 0); + ((mode & FWRITE) ? S_IWUSR : 0); /* Otherwise, user id 0 always gets access. */ - if (!suser(cred, NULL)) - return (0); + if (!suser(cred, NULL)) { + return 0; + } - return(posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req)); + return posix_cred_access(cred, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode, mode_req); } int -pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct fileproc *fp, off_t pageoff) +pshm_mmap( + __unused proc_t p, + struct mmap_args *uap, + user_addr_t *retval, + struct fileproc *fp, + off_t pageoff) { - vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr; - vm_map_size_t user_size = (vm_map_size_t)uap->len ; - vm_map_offset_t user_start_addr; - vm_map_size_t map_size, mapped_size; - int prot = uap->prot; - int max_prot = VM_PROT_DEFAULT; - int flags = uap->flags; + vm_map_offset_t user_addr = (vm_map_offset_t)uap->addr; + vm_map_size_t user_size = (vm_map_size_t)uap->len; + vm_map_offset_t user_start_addr; + vm_map_size_t map_size, mapped_size; + int prot = uap->prot; + int max_prot = VM_PROT_DEFAULT; + int flags = uap->flags; vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos; vm_object_offset_t map_pos; - vm_map_t user_map; - int alloc_flags; + vm_map_t user_map; + int alloc_flags; vm_map_kernel_flags_t vmk_flags; - boolean_t docow; - kern_return_t kret; - struct pshminfo * pinfo; - struct pshmnode * pnode; - struct pshmobj * pshmobj; -#if CONFIG_MACF - int error; -#endif - - if (user_size == 0) - return(0); - - if ((flags & MAP_SHARED) == 0) - return(EINVAL); + bool docow; + kern_return_t kret = KERN_SUCCESS; + pshm_info_t *pinfo; + pshmnode_t *pnode; + pshm_mobj_t *pshmobj; + int error; + + if (user_size == 0) { + return 0; + } + if (!(flags & MAP_SHARED)) { + return EINVAL; + } - /* Can't allow write permission if the shm_open() didn't */ + /* Can't allow write permission if the shm_open() didn't allow them. */ if (!(fp->f_flag & FWRITE)) { if (prot & VM_PROT_WRITE) { return EPERM; @@ -896,56 +799,73 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct max_prot &= ~VM_PROT_WRITE; } - if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL ) - return(EINVAL); - PSHM_SUBSYS_LOCK(); - if ((pinfo = pnode->pinfo) == PSHMINFO_NULL) { + pnode = (pshmnode_t *)fp->f_data; + if (pnode == NULL) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } - if ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED) { + pinfo = pnode->pinfo; + if (pinfo == NULL) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } + + if (!(pinfo->pshm_flags & PSHM_ALLOCATED)) { + PSHM_SUBSYS_UNLOCK(); + return EINVAL; + } + if (user_size > (vm_map_size_t)pinfo->pshm_length) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } + vm_map_size_t end_pos = 0; if (os_add_overflow(user_size, file_pos, &end_pos)) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } if (end_pos > (vm_map_size_t)pinfo->pshm_length) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } - if ((pshmobj = pinfo->pshm_memobjects) == NULL) { + + pshmobj = SLIST_FIRST(&pinfo->pshm_mobjs); + if (pshmobj == NULL) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } #if CONFIG_MACF - error = mac_posixshm_check_mmap(kauth_cred_get(), pinfo, prot, flags); + error = mac_posixshm_check_mmap(kauth_cred_get(), &pinfo->pshm_hdr, prot, flags); if (error) { PSHM_SUBSYS_UNLOCK(); - return(error); + return error; } #endif + /* Grab an extra reference, so we can drop the lock while mapping. */ + error = pshm_ref(pinfo); + if (error) { + PSHM_SUBSYS_UNLOCK(); + return error; + } PSHM_SUBSYS_UNLOCK(); user_map = current_map(); - if ((flags & MAP_FIXED) == 0) { + if (!(flags & MAP_FIXED)) { alloc_flags = VM_FLAGS_ANYWHERE; user_addr = vm_map_round_page(user_addr, - vm_map_page_mask(user_map)); + vm_map_page_mask(user_map)); } else { if (user_addr != vm_map_round_page(user_addr, - vm_map_page_mask(user_map))) - return (EINVAL); + vm_map_page_mask(user_map))) { + error = EINVAL; + goto out_deref; + } + /* * We do not get rid of the existing mappings here because * it wouldn't be atomic (see comment in mmap()). We let @@ -954,36 +874,36 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct */ alloc_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; } - docow = FALSE; + docow = false; mapped_size = 0; vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; /* reserve the entire space first... */ kret = vm_map_enter_mem_object(user_map, - &user_addr, - user_size, - 0, - alloc_flags, - vmk_flags, - VM_KERN_MEMORY_NONE, - IPC_PORT_NULL, - 0, - FALSE, - VM_PROT_NONE, - VM_PROT_NONE, - VM_INHERIT_NONE); + &user_addr, + user_size, + 0, + alloc_flags, + vmk_flags, + VM_KERN_MEMORY_NONE, + IPC_PORT_NULL, + 0, + false, + VM_PROT_NONE, + VM_PROT_NONE, + VM_INHERIT_NONE); user_start_addr = user_addr; if (kret != KERN_SUCCESS) { - goto out; + goto out_deref; } - /* ... and overwrite with the real mappings */ - for (map_pos = 0, pshmobj = pinfo->pshm_memobjects; - user_size != 0; - map_pos += pshmobj->pshmo_size, pshmobj = pshmobj->pshmo_next) { + /* Now overwrite with the real mappings. */ + for (map_pos = 0, pshmobj = SLIST_FIRST(&pinfo->pshm_mobjs); + user_size != 0; + map_pos += pshmobj->pshmo_size, pshmobj = SLIST_NEXT(pshmobj, pshmo_next)) { if (pshmobj == NULL) { /* nothing there to map !? */ - goto out; + goto out_deref; } if (file_pos >= map_pos + pshmobj->pshmo_size) { continue; @@ -1007,8 +927,9 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct prot, max_prot, VM_INHERIT_SHARE); - if (kret != KERN_SUCCESS) - goto out; + if (kret != KERN_SUCCESS) { + goto out_deref; + } user_addr += map_size; user_size -= map_size; @@ -1018,78 +939,51 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct PSHM_SUBSYS_LOCK(); pnode->mapp_addr = user_start_addr; - pnode->map_size = mapped_size; pinfo->pshm_flags |= (PSHM_MAPPED | PSHM_INUSE); PSHM_SUBSYS_UNLOCK(); -out: +out_deref: + PSHM_SUBSYS_LOCK(); + pshm_deref(pinfo); /* drop the extra reference we had while mapping. */ + PSHM_SUBSYS_UNLOCK(); if (kret != KERN_SUCCESS) { if (mapped_size != 0) { (void) mach_vm_deallocate(current_map(), - user_start_addr, - mapped_size); + user_start_addr, + mapped_size); } } switch (kret) { case KERN_SUCCESS: *retval = (user_start_addr + pageoff); - return (0); + return 0; case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: - return (ENOMEM); + return ENOMEM; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; default: - return (EINVAL); + return EINVAL; } - } +/* + * Remove a shared memory region name from the name lookup cache. + */ static int -pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache) +pshm_unlink_internal(pshm_info_t *pinfo) { - struct pshmobj *pshmobj, *pshmobj_next; - PSHM_SUBSYS_ASSERT_HELD(); - if (!pinfo || !pcache) + if (pinfo == NULL) { return EINVAL; + } - if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED)) == 0) - return EINVAL; - - if (pinfo->pshm_flags & PSHM_INDELETE) - return 0; - - pinfo->pshm_flags |= PSHM_INDELETE; - pinfo->pshm_usecount--; - - pshm_cache_delete(pcache); + pshm_cache_delete(pinfo); pinfo->pshm_flags |= PSHM_REMOVED; - /* release the existence reference */ - if (!pinfo->pshm_usecount) { -#if CONFIG_MACF - mac_posixshm_label_destroy(pinfo); -#endif - /* - * If this is the last reference going away on the object, - * then we need to destroy the backing object. The name - * has an implied but uncounted reference on the object, - * once it's created, since it's used as a rendezvous, and - * therefore may be subsequently reopened. - */ - for (pshmobj = pinfo->pshm_memobjects; - pshmobj != NULL; - pshmobj = pshmobj_next) { - mach_memory_entry_port_release(pshmobj->pshmo_memobject); - pshmobj_next = pshmobj->pshmo_next; - FREE(pshmobj, M_SHM); - } - FREE(pinfo,M_SHM); - } - - FREE(pcache, M_SHM); + /* release the "unlink" reference */ + pshm_deref(pinfo); return 0; } @@ -1097,211 +991,174 @@ pshm_unlink_internal(struct pshminfo *pinfo, struct pshmcache *pcache) int shm_unlink(proc_t p, struct shm_unlink_args *uap, __unused int32_t *retval) { - size_t i; - char * pnbuf; - size_t pathlen; - int error = 0; - - struct pshmname nd; - struct pshminfo *pinfo; - char * nameptr; - char * cp; - struct pshmcache *pcache = PSHMCACHE_NULL; + int error = 0; + pshm_info_t *pinfo = NULL; + pshm_info_t *name_pinfo = NULL; - pinfo = PSHMINFO_NULL; - - - MALLOC_ZONE(pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (pnbuf == NULL) { - return(ENOSPC); /* XXX non-standard */ - } - pathlen = MAXPATHLEN; - error = copyinstr(uap->name, (void *)pnbuf, MAXPATHLEN, &pathlen); - if (error) { - goto bad; - } - AUDIT_ARG(text, pnbuf); - if (pathlen > PSHMNAMLEN) { - error = ENAMETOOLONG; + /* + * Get the name from user args. + */ + MALLOC(name_pinfo, pshm_info_t *, sizeof(pshm_info_t), M_SHM, M_WAITOK | M_ZERO); + if (name_pinfo == NULL) { + error = ENOSPC; goto bad; } - - nameptr = pnbuf; - -#ifdef PSXSHM_NAME_RESTRICT - if (*nameptr == '/') { - while (*(nameptr++) == '/') { - pathlen--; - error = EINVAL; - goto bad; - } - } else { + error = pshm_get_name(name_pinfo, uap->name); + if (error != 0) { error = EINVAL; goto bad; } -#endif /* PSXSHM_NAME_RESTRICT */ - - nd.pshm_nameptr = nameptr; - nd.pshm_namelen = pathlen; - nd.pshm_hash = 0; - - for (cp = nameptr, i=1; *cp != 0 && i <= pathlen; i++, cp++) { - nd.pshm_hash += (unsigned char)*cp * i; - } PSHM_SUBSYS_LOCK(); - error = pshm_cache_search(&pinfo, &nd, &pcache, 0); + pinfo = pshm_cache_search(name_pinfo); - /* During unlink lookup failure also implies ENOENT */ - if (error != PSHMCACHE_FOUND) { - PSHM_SUBSYS_UNLOCK(); + if (pinfo == NULL) { error = ENOENT; - goto bad; - - } - - if ((pinfo->pshm_flags & (PSHM_DEFINED | PSHM_ALLOCATED))==0) { - PSHM_SUBSYS_UNLOCK(); - error = EINVAL; - goto bad; - } - - if (pinfo->pshm_flags & PSHM_ALLOCATING) { - /* XXX should we wait for flag to clear and then proceed ? */ - PSHM_SUBSYS_UNLOCK(); - error = EAGAIN; - goto bad; - } - - if (pinfo->pshm_flags & PSHM_INDELETE) { - PSHM_SUBSYS_UNLOCK(); - error = 0; - goto bad; + goto bad_unlock; } #if CONFIG_MACF - error = mac_posixshm_check_unlink(kauth_cred_get(), pinfo, nameptr); + error = mac_posixshm_check_unlink(kauth_cred_get(), &pinfo->pshm_hdr, name_pinfo->pshm_hdr.pshm_name); if (error) { - PSHM_SUBSYS_UNLOCK(); - goto bad; + goto bad_unlock; } #endif - AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid, - pinfo->pshm_mode); + AUDIT_ARG(posix_ipc_perm, pinfo->pshm_uid, pinfo->pshm_gid, pinfo->pshm_mode); - /* - * following file semantics, unlink should be allowed - * for users with write permission only. + /* + * Following file semantics, unlink should normally be allowed + * for users with write permission only. We also allow the creator + * of a segment to be able to delete, even w/o write permission. + * That's because there's no equivalent of write permission for the + * directory containing a file. */ - if ( (error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p)) ) { - PSHM_SUBSYS_UNLOCK(); - goto bad; + error = pshm_access(pinfo, FWRITE, kauth_cred_get(), p); + if (error != 0 && pinfo->pshm_uid != kauth_getuid()) { + goto bad_unlock; } - error = pshm_unlink_internal(pinfo, pcache); + error = pshm_unlink_internal(pinfo); +bad_unlock: PSHM_SUBSYS_UNLOCK(); - bad: - FREE_ZONE(pnbuf, MAXPATHLEN, M_NAMEI); + if (name_pinfo != NULL) { + FREE(name_pinfo, M_SHM); + } return error; } -/* already called locked */ +/* + * Add a new reference to a shared memory region. + * Fails if we will overflow the reference counter. + */ static int -pshm_close(struct pshminfo *pinfo, int dropref) +pshm_ref(pshm_info_t *pinfo) { - int error = 0; - struct pshmobj *pshmobj, *pshmobj_next; + PSHM_SUBSYS_ASSERT_HELD(); - /* - * If we are dropping the reference we took on the cache object, don't - * enforce the allocation requirement. - */ - if ( !dropref && ((pinfo->pshm_flags & PSHM_ALLOCATED) != PSHM_ALLOCATED)) { - return(EINVAL); + if (pinfo->pshm_usecount == PSHM_MAXCOUNT) { + return EMFILE; } -#if DIAGNOSTIC - if(!pinfo->pshm_usecount) { - kprintf("negative usecount in pshm_close\n"); + pinfo->pshm_usecount++; + return 0; +} + +/* + * Dereference a pshm_info_t. Delete the region if + * this was the final reference count. + */ +static void +pshm_deref(pshm_info_t *pinfo) +{ + pshm_mobj_t *pshmobj; + + PSHM_SUBSYS_ASSERT_HELD(); + if (pinfo->pshm_usecount == 0) { + panic("negative usecount in pshm_close\n"); } -#endif /* DIAGNOSTIC */ pinfo->pshm_usecount--; /* release this fd's reference */ - if ((pinfo->pshm_flags & PSHM_REMOVED) && !pinfo->pshm_usecount) { + if (pinfo->pshm_usecount == 0) { #if CONFIG_MACF - mac_posixshm_label_destroy(pinfo); + mac_posixshm_label_destroy(&pinfo->pshm_hdr); #endif PSHM_SUBSYS_UNLOCK(); + /* - * If this is the last reference going away on the object, - * then we need to destroy the backing object. + * Release references to any backing objects. */ - for (pshmobj = pinfo->pshm_memobjects; - pshmobj != NULL; - pshmobj = pshmobj_next) { + while ((pshmobj = SLIST_FIRST(&pinfo->pshm_mobjs)) != NULL) { + SLIST_REMOVE_HEAD(&pinfo->pshm_mobjs, pshmo_next); mach_memory_entry_port_release(pshmobj->pshmo_memobject); - pshmobj_next = pshmobj->pshmo_next; FREE(pshmobj, M_SHM); } + + /* free the pinfo itself */ + FREE(pinfo, M_SHM); + PSHM_SUBSYS_LOCK(); - FREE(pinfo,M_SHM); } - return (error); } /* vfs_context_t passed to match prototype for struct fileops */ static int pshm_closefile(struct fileglob *fg, __unused vfs_context_t ctx) { - int error = EINVAL; - struct pshmnode *pnode; + int error = EINVAL; + pshmnode_t *pnode; PSHM_SUBSYS_LOCK(); - if ((pnode = (struct pshmnode *)fg->fg_data) != NULL) { - if (pnode->pinfo != PSHMINFO_NULL) { - error = pshm_close(pnode->pinfo, 0); + pnode = (pshmnode_t *)fg->fg_data; + if (pnode != NULL) { + error = 0; + fg->fg_data = NULL; /* set fg_data to NULL to avoid racing close()es */ + if (pnode->pinfo != NULL) { + pshm_deref(pnode->pinfo); + pnode->pinfo = NULL; } - FREE(pnode, M_SHM); } PSHM_SUBSYS_UNLOCK(); + if (pnode != NULL) { + FREE(pnode, M_SHM); + } - return(error); + return error; } static int pshm_read(__unused struct fileproc *fp, __unused struct uio *uio, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int pshm_write(__unused struct fileproc *fp, __unused struct uio *uio, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int pshm_ioctl(__unused struct fileproc *fp, __unused u_long com, - __unused caddr_t data, __unused vfs_context_t ctx) + __unused caddr_t data, __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int pshm_select(__unused struct fileproc *fp, __unused int which, __unused void *wql, - __unused vfs_context_t ctx) + __unused vfs_context_t ctx) { - return(ENOTSUP); + return ENOTSUP; } static int -pshm_kqfilter(__unused struct fileproc *fp, struct knote *kn, - __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) +pshm_kqfilter(__unused struct fileproc *fp, struct knote *kn, + __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) { kn->kn_flags = EV_ERROR; kn->kn_data = ENOTSUP; @@ -1309,47 +1166,48 @@ pshm_kqfilter(__unused struct fileproc *fp, struct knote *kn, } int -fill_pshminfo(struct pshmnode * pshm, struct pshm_info * info) +fill_pshminfo(pshmnode_t * pshm, struct pshm_info * info) { - struct pshminfo *pinfo; + pshm_info_t *pinfo; struct vinfo_stat *sb; - + PSHM_SUBSYS_LOCK(); - if ((pinfo = pshm->pinfo) == PSHMINFO_NULL){ + if ((pinfo = pshm->pinfo) == NULL) { PSHM_SUBSYS_UNLOCK(); - return(EINVAL); + return EINVAL; } sb = &info->pshm_stat; - bzero(sb, sizeof(struct vinfo_stat)); + bzero(sb, sizeof(struct vinfo_stat)); sb->vst_mode = pinfo->pshm_mode; sb->vst_uid = pinfo->pshm_uid; sb->vst_gid = pinfo->pshm_gid; sb->vst_size = pinfo->pshm_length; info->pshm_mappaddr = pshm->mapp_addr; - bcopy(&pinfo->pshm_name[0], &info->pshm_name[0], PSHMNAMLEN+1); + bcopy(&pinfo->pshm_hdr.pshm_name[0], &info->pshm_name[0], PSHMNAMLEN + 1); PSHM_SUBSYS_UNLOCK(); - return(0); + return 0; } #if CONFIG_MACF void pshm_label_associate(struct fileproc *fp, struct vnode *vp, vfs_context_t ctx) { - struct pshmnode *pnode; - struct pshminfo *pshm; + pshmnode_t *pnode; + pshm_info_t *pshm; PSHM_SUBSYS_LOCK(); - pnode = (struct pshmnode *)fp->f_fglob->fg_data; + pnode = (pshmnode_t *)fp->f_data; if (pnode != NULL) { pshm = pnode->pinfo; - if (pshm != NULL) + if (pshm != NULL) { mac_posixshm_vnode_label_associate( - vfs_context_ucred(ctx), pshm, pshm->pshm_label, + vfs_context_ucred(ctx), &pshm->pshm_hdr, pshm->pshm_label, vp, vp->v_label); + } } PSHM_SUBSYS_UNLOCK(); } diff --git a/bsd/kern/proc_info.c b/bsd/kern/proc_info.c index 2de42fa30..8d026e5db 100644 --- a/bsd/kern/proc_info.c +++ b/bsd/kern/proc_info.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -139,7 +139,7 @@ int proc_info_internal(int callnum, int pid, int flavor, uint64_t arg, user_addr /* protos for proc_info calls */ int __attribute__ ((noinline)) proc_listpids(uint32_t type, uint32_t tyoneinfo, user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) proc_pidfdinfo(int pid, int flavor,int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +int __attribute__ ((noinline)) proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) proc_kernmsgbuf(user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) proc_pidfileportinfo(int pid, int flavor, mach_port_name_t name, user_addr_t buffer, uint32_t buffersize, int32_t *retval); @@ -155,21 +155,21 @@ int __attribute__ ((noinline)) proc_pidfdlist(proc_t p, user_addr_t buffer, uint int __attribute__ ((noinline)) proc_pidbsdinfo(proc_t p, struct proc_bsdinfo *pbsd, int zombie); int __attribute__ ((noinline)) proc_pidshortbsdinfo(proc_t p, struct proc_bsdshortinfo *pbsd_shortp, int zombie); int __attribute__ ((noinline)) proc_pidtaskinfo(proc_t p, struct proc_taskinfo *ptinfo); -int __attribute__ ((noinline)) proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo); -int __attribute__ ((noinline)) proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo *pinfo); -int __attribute__ ((noinline)) proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +int __attribute__ ((noinline)) proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo); +int __attribute__ ((noinline)) proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo *pinfo); +int __attribute__ ((noinline)) proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval); int __attribute__ ((noinline)) proc_pidregioninfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); -int __attribute__ ((noinline)) proc_pidvnodepathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +int __attribute__ ((noinline)) proc_pidregionpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +int __attribute__ ((noinline)) proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +int __attribute__ ((noinline)) proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); +int __attribute__ ((noinline)) proc_pidvnodepathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); int __attribute__ ((noinline)) proc_pidpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t *retval); int __attribute__ ((noinline)) proc_pidworkqueueinfo(proc_t p, struct proc_workqueueinfo *pwqinfo); int __attribute__ ((noinline)) proc_pidfileportlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); void __attribute__ ((noinline)) proc_piduniqidentifierinfo(proc_t p, struct proc_uniqidentifierinfo *p_uniqidinfo); void __attribute__ ((noinline)) proc_archinfo(proc_t p, struct proc_archinfo *pai); void __attribute__ ((noinline)) proc_pidcoalitioninfo(proc_t p, struct proc_pidcoalitioninfo *pci); -int __attribute__ ((noinline)) proc_pidnoteexit(proc_t p, uint64_t arg, uint32_t *data); +int __attribute__ ((noinline)) proc_pidnoteexit(proc_t p, uint64_t arg, uint32_t *data); int __attribute__ ((noinline)) proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_exitreasonbasicinfo *pberi); int __attribute__ ((noinline)) proc_pidoriginatorpid_uuid(uuid_t uuid, uint32_t buffersize, pid_t *pid); int __attribute__ ((noinline)) proc_pidlistuptrs(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval); @@ -180,9 +180,9 @@ int __attribute__ ((noinline)) proc_udata_info(pid_t pid, int flavor, user_addr_ #endif /* protos for proc_pidfdinfo calls */ -int __attribute__ ((noinline)) pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp,proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_vnodeinfopath(vnode_t vp, uint32_t vid, struct fileproc * fp,proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); -int __attribute__ ((noinline)) pid_socketinfo(socket_t so, struct fileproc *fp,proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +int __attribute__ ((noinline)) pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +int __attribute__ ((noinline)) pid_vnodeinfopath(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); +int __attribute__ ((noinline)) pid_socketinfo(socket_t so, struct fileproc *fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) pid_pseminfo(struct psemnode * psem, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) pid_pshminfo(struct pshmnode * pshm, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); int __attribute__ ((noinline)) pid_pipeinfo(struct pipe * p, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval); @@ -205,31 +205,34 @@ extern int proc_get_rusage(proc_t proc, int flavor, user_addr_t buffer, int is_z #define CHECK_SAME_USER TRUE #define NO_CHECK_SAME_USER FALSE -uint64_t get_dispatchqueue_offset_from_proc(void *p) +uint64_t +get_dispatchqueue_offset_from_proc(void *p) { - if(p != NULL) { + if (p != NULL) { proc_t pself = (proc_t)p; - return (pself->p_dispatchqueue_offset); + return pself->p_dispatchqueue_offset; } else { return (uint64_t)0; } } -uint64_t get_dispatchqueue_serialno_offset_from_proc(void *p) +uint64_t +get_dispatchqueue_serialno_offset_from_proc(void *p) { - if(p != NULL) { + if (p != NULL) { proc_t pself = (proc_t)p; - return (pself->p_dispatchqueue_serialno_offset); + return pself->p_dispatchqueue_serialno_offset; } else { return (uint64_t)0; } } -uint64_t get_return_to_kernel_offset_from_proc(void *p) +uint64_t +get_return_to_kernel_offset_from_proc(void *p) { if (p != NULL) { proc_t pself = (proc_t)p; - return (pself->p_return_to_kernel_offset); + return pself->p_return_to_kernel_offset; } else { return (uint64_t)0; } @@ -240,52 +243,51 @@ uint64_t get_return_to_kernel_offset_from_proc(void *p) int proc_info(__unused struct proc *p, struct proc_info_args * uap, int32_t *retval) { - return(proc_info_internal(uap->callnum, uap->pid, uap->flavor, uap->arg, uap->buffer, uap->buffersize, retval)); + return proc_info_internal(uap->callnum, uap->pid, uap->flavor, uap->arg, uap->buffer, uap->buffersize, retval); } -int +int proc_info_internal(int callnum, int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, int32_t * retval) { - - switch(callnum) { - case PROC_INFO_CALL_LISTPIDS: - /* pid contains type and flavor contains typeinfo */ - return(proc_listpids(pid, flavor, buffer, buffersize, retval)); - case PROC_INFO_CALL_PIDINFO: - return(proc_pidinfo(pid, flavor, arg, buffer, buffersize, retval)); - case PROC_INFO_CALL_PIDFDINFO: - return(proc_pidfdinfo(pid, flavor, (int)arg, buffer, buffersize, retval)); - case PROC_INFO_CALL_KERNMSGBUF: - return(proc_kernmsgbuf(buffer, buffersize, retval)); - case PROC_INFO_CALL_SETCONTROL: - return(proc_setcontrol(pid, flavor, arg, buffer, buffersize, retval)); - case PROC_INFO_CALL_PIDFILEPORTINFO: - return(proc_pidfileportinfo(pid, flavor, (mach_port_name_t)arg, buffer, buffersize, retval)); - case PROC_INFO_CALL_TERMINATE: - return(proc_terminate(pid, retval)); - case PROC_INFO_CALL_DIRTYCONTROL: - return(proc_dirtycontrol(pid, flavor, arg, retval)); - case PROC_INFO_CALL_PIDRUSAGE: - return (proc_pid_rusage(pid, flavor, buffer, retval)); - case PROC_INFO_CALL_PIDORIGINATORINFO: - return (proc_pidoriginatorinfo(pid, flavor, buffer, buffersize, retval)); - case PROC_INFO_CALL_LISTCOALITIONS: - return proc_listcoalitions(pid /* flavor */, flavor /* coaltype */, buffer, - buffersize, retval); - case PROC_INFO_CALL_CANUSEFGHW: - return proc_can_use_foreground_hw(pid, buffer, buffersize, retval); - case PROC_INFO_CALL_PIDDYNKQUEUEINFO: - return proc_piddynkqueueinfo(pid, flavor, (kqueue_id_t)arg, buffer, buffersize, retval); + switch (callnum) { + case PROC_INFO_CALL_LISTPIDS: + /* pid contains type and flavor contains typeinfo */ + return proc_listpids(pid, flavor, buffer, buffersize, retval); + case PROC_INFO_CALL_PIDINFO: + return proc_pidinfo(pid, flavor, arg, buffer, buffersize, retval); + case PROC_INFO_CALL_PIDFDINFO: + return proc_pidfdinfo(pid, flavor, (int)arg, buffer, buffersize, retval); + case PROC_INFO_CALL_KERNMSGBUF: + return proc_kernmsgbuf(buffer, buffersize, retval); + case PROC_INFO_CALL_SETCONTROL: + return proc_setcontrol(pid, flavor, arg, buffer, buffersize, retval); + case PROC_INFO_CALL_PIDFILEPORTINFO: + return proc_pidfileportinfo(pid, flavor, (mach_port_name_t)arg, buffer, buffersize, retval); + case PROC_INFO_CALL_TERMINATE: + return proc_terminate(pid, retval); + case PROC_INFO_CALL_DIRTYCONTROL: + return proc_dirtycontrol(pid, flavor, arg, retval); + case PROC_INFO_CALL_PIDRUSAGE: + return proc_pid_rusage(pid, flavor, buffer, retval); + case PROC_INFO_CALL_PIDORIGINATORINFO: + return proc_pidoriginatorinfo(pid, flavor, buffer, buffersize, retval); + case PROC_INFO_CALL_LISTCOALITIONS: + return proc_listcoalitions(pid /* flavor */, flavor /* coaltype */, buffer, + buffersize, retval); + case PROC_INFO_CALL_CANUSEFGHW: + return proc_can_use_foreground_hw(pid, buffer, buffersize, retval); + case PROC_INFO_CALL_PIDDYNKQUEUEINFO: + return proc_piddynkqueueinfo(pid, flavor, (kqueue_id_t)arg, buffer, buffersize, retval); #if !CONFIG_EMBEDDED - case PROC_INFO_CALL_UDATA_INFO: - return proc_udata_info(pid, flavor, buffer, buffersize, retval); + case PROC_INFO_CALL_UDATA_INFO: + return proc_udata_info(pid, flavor, buffer, buffersize, retval); #endif /* !CONFIG_EMBEDDED */ - default: - return EINVAL; + default: + return EINVAL; } - return(EINVAL); + return EINVAL; } /******************* proc_listpids routine ****************/ @@ -304,19 +306,20 @@ proc_listpids(uint32_t type, uint32_t typeinfo, user_addr_t buffer, uint32_t bu struct proclist *current_list; /* Do we have permission to look into this? */ - if ((error = proc_security_policy(PROC_NULL, PROC_INFO_CALL_LISTPIDS, type, NO_CHECK_SAME_USER))) - return (error); + if ((error = proc_security_policy(PROC_NULL, PROC_INFO_CALL_LISTPIDS, type, NO_CHECK_SAME_USER))) { + return error; + } /* if the buffer is null, return num of procs */ if (buffer == (user_addr_t)0) { *retval = ((nprocs + 20) * sizeof(int)); - return(0); + return 0; } if (buffersize < sizeof(int)) { - return(ENOMEM); + return ENOMEM; } - wantpids = buffersize/sizeof(int); + wantpids = buffersize / sizeof(int); if ((nprocs + 20) > 0) { numprocs = (uint32_t)(nprocs + 20); } @@ -326,13 +329,12 @@ proc_listpids(uint32_t type, uint32_t typeinfo, user_addr_t buffer, uint32_t bu kbuf = (char *)kalloc((vm_size_t)(numprocs * sizeof(int))); if (kbuf == NULL) { - return(ENOMEM); + return ENOMEM; } - bzero(kbuf, sizeof(int)); + bzero(kbuf, numprocs * sizeof(int)); proc_list_lock(); - n = 0; ptr = (int *)kbuf; current_list = &allproc; @@ -340,72 +342,79 @@ proc_loop: LIST_FOREACH(p, current_list, p_list) { skip = 0; switch (type) { - case PROC_PGRP_ONLY: - if (p->p_pgrpid != (pid_t)typeinfo) - skip = 1; - break; - case PROC_PPID_ONLY: - if ((p->p_ppid != (pid_t)typeinfo) && (((p->p_lflag & P_LTRACED) == 0) || (p->p_oppid != (pid_t)typeinfo))) - skip = 1; - break; - - case PROC_ALL_PIDS: - skip = 0; - break; - case PROC_TTY_ONLY: - /* racy but list lock is held */ - if ((p->p_flag & P_CONTROLT) == 0 || - (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) || - (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL || - tp->t_dev != (dev_t)typeinfo) - skip = 1; - break; - case PROC_UID_ONLY: - if (p->p_ucred == NULL) - skip = 1; - else { - kauth_cred_t my_cred; - uid_t uid; - - my_cred = kauth_cred_proc_ref(p); - uid = kauth_cred_getuid(my_cred); - kauth_cred_unref(&my_cred); - if (uid != (uid_t)typeinfo) - skip = 1; - } - break; - case PROC_RUID_ONLY: - if (p->p_ucred == NULL) + case PROC_PGRP_ONLY: + if (p->p_pgrpid != (pid_t)typeinfo) { + skip = 1; + } + break; + case PROC_PPID_ONLY: + if ((p->p_ppid != (pid_t)typeinfo) && (((p->p_lflag & P_LTRACED) == 0) || (p->p_oppid != (pid_t)typeinfo))) { + skip = 1; + } + break; + + case PROC_ALL_PIDS: + skip = 0; + break; + case PROC_TTY_ONLY: + /* racy but list lock is held */ + if ((p->p_flag & P_CONTROLT) == 0 || + (p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) || + (tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL || + tp->t_dev != (dev_t)typeinfo) { + skip = 1; + } + break; + case PROC_UID_ONLY: + if (p->p_ucred == NULL) { + skip = 1; + } else { + kauth_cred_t my_cred; + uid_t uid; + + my_cred = kauth_cred_proc_ref(p); + uid = kauth_cred_getuid(my_cred); + kauth_cred_unref(&my_cred); + if (uid != (uid_t)typeinfo) { skip = 1; - else { - kauth_cred_t my_cred; - uid_t uid; - - my_cred = kauth_cred_proc_ref(p); - uid = kauth_cred_getruid(my_cred); - kauth_cred_unref(&my_cred); - if (uid != (uid_t)typeinfo) - skip = 1; } - break; - case PROC_KDBG_ONLY: - if (p->p_kdebug == 0) { + } + break; + case PROC_RUID_ONLY: + if (p->p_ucred == NULL) { + skip = 1; + } else { + kauth_cred_t my_cred; + uid_t uid; + + my_cred = kauth_cred_proc_ref(p); + uid = kauth_cred_getruid(my_cred); + kauth_cred_unref(&my_cred); + if (uid != (uid_t)typeinfo) { skip = 1; } - break; - default: - skip = 1; - break; - }; + } + break; + case PROC_KDBG_ONLY: + if (p->p_kdebug == 0) { + skip = 1; + } + break; + default: + skip = 1; + break; + } + ; - if(skip == 0) { + if (skip == 0) { *ptr++ = p->p_pid; n++; } - if (n >= numprocs) + if (n >= numprocs) { break; + } } - + if ((n < numprocs) && (current_list == &allproc)) { current_list = &zombproc; goto proc_loop; @@ -415,72 +424,75 @@ proc_loop: ptr = (int *)kbuf; error = copyout((caddr_t)ptr, buffer, n * sizeof(int)); - if (error == 0) + if (error == 0) { *retval = (n * sizeof(int)); - kfree((void *)kbuf, (vm_size_t)(numprocs * sizeof(int))); + } + kfree(kbuf, (vm_size_t)(numprocs * sizeof(int))); - return(error); + return error; } /********************************** proc_pidfdlist routines ********************************/ -int +int proc_pidfdlist(proc_t p, user_addr_t buffer, uint32_t buffersize, int32_t *retval) { - uint32_t numfds = 0; - uint32_t needfds; - char * kbuf; - struct proc_fdinfo * pfd; - struct fileproc * fp; - int n; - int count = 0; - int error = 0; - - if (p->p_fd->fd_nfiles > 0) { - numfds = (uint32_t)p->p_fd->fd_nfiles; - } + uint32_t numfds = 0; + uint32_t needfds; + char * kbuf; + struct proc_fdinfo * pfd; + struct fileproc * fp; + int n; + int count = 0; + int error = 0; - if (buffer == (user_addr_t) 0) { - numfds += 20; - *retval = (numfds * sizeof(struct proc_fdinfo)); - return(0); - } + if (p->p_fd->fd_nfiles > 0) { + numfds = (uint32_t)p->p_fd->fd_nfiles; + } - /* buffersize is big enough atleast for one struct */ - needfds = buffersize/sizeof(struct proc_fdinfo); + if (buffer == (user_addr_t) 0) { + numfds += 20; + *retval = (numfds * sizeof(struct proc_fdinfo)); + return 0; + } - if (numfds > needfds) { - numfds = needfds; - } + /* buffersize is big enough atleast for one struct */ + needfds = buffersize / sizeof(struct proc_fdinfo); + + if (numfds > needfds) { + numfds = needfds; + } - kbuf = (char *)kalloc((vm_size_t)(numfds * sizeof(struct proc_fdinfo))); - if (kbuf == NULL) - return(ENOMEM); - bzero(kbuf, numfds * sizeof(struct proc_fdinfo)); + kbuf = (char *)kalloc((vm_size_t)(numfds * sizeof(struct proc_fdinfo))); + if (kbuf == NULL) { + return ENOMEM; + } + bzero(kbuf, numfds * sizeof(struct proc_fdinfo)); - proc_fdlock(p); + proc_fdlock(p); - pfd = (struct proc_fdinfo *)kbuf; + pfd = (struct proc_fdinfo *)kbuf; - for (n = 0; ((n < (int)numfds) && (n < p->p_fd->fd_nfiles)); n++) { - if (((fp = p->p_fd->fd_ofiles[n]) != 0) - && ((p->p_fd->fd_ofileflags[n] & UF_RESERVED) == 0)) { - file_type_t fdtype = FILEGLOB_DTYPE(fp->f_fglob); - pfd->proc_fd = n; - pfd->proc_fdtype = (fdtype != DTYPE_ATALK) ? - fdtype : PROX_FDTYPE_ATALK; - count++; - pfd++; - } + for (n = 0; ((n < (int)numfds) && (n < p->p_fd->fd_nfiles)); n++) { + if (((fp = p->p_fd->fd_ofiles[n]) != 0) + && ((p->p_fd->fd_ofileflags[n] & UF_RESERVED) == 0)) { + file_type_t fdtype = FILEGLOB_DTYPE(fp->f_fglob); + pfd->proc_fd = n; + pfd->proc_fdtype = (fdtype != DTYPE_ATALK) ? + fdtype : PROX_FDTYPE_ATALK; + count++; + pfd++; } - proc_fdunlock(p); + } + proc_fdunlock(p); - error = copyout(kbuf, buffer, count * sizeof(struct proc_fdinfo)); - kfree((void *)kbuf, (vm_size_t)(numfds * sizeof(struct proc_fdinfo))); - if (error == 0) - *retval = (count * sizeof(struct proc_fdinfo)); - return(error); + error = copyout(kbuf, buffer, count * sizeof(struct proc_fdinfo)); + kfree(kbuf, (vm_size_t)(numfds * sizeof(struct proc_fdinfo))); + if (error == 0) { + *retval = (count * sizeof(struct proc_fdinfo)); + } + return error; } /* @@ -493,7 +505,7 @@ proc_fileport_count(__unused mach_port_name_t name, uint32_t *counter = arg; *counter += 1; - return (0); + return 0; } struct fileport_fdtype_args { @@ -510,17 +522,18 @@ proc_fileport_fdtype(mach_port_name_t name, struct fileglob *fg, void *arg) file_type_t fdtype = FILEGLOB_DTYPE(fg); ffa->ffa_pfi->proc_fdtype = (fdtype != DTYPE_ATALK) ? - fdtype : PROX_FDTYPE_ATALK; + fdtype : PROX_FDTYPE_ATALK; ffa->ffa_pfi->proc_fileport = name; ffa->ffa_pfi++; - return (0); /* keep walking */ - } else - return (-1); /* stop the walk! */ + return 0; /* keep walking */ + } else { + return -1; /* stop the walk! */ + } } int proc_pidfileportlist(proc_t p, - user_addr_t buffer, uint32_t buffersize, int32_t *retval) + user_addr_t buffer, uint32_t buffersize, int32_t *retval) { void *kbuf; vm_size_t kbufsize; @@ -529,7 +542,7 @@ proc_pidfileportlist(proc_t p, struct fileport_fdtype_args ffa; int error; - needfileports = buffersize / sizeof (*pfi); + needfileports = buffersize / sizeof(*pfi); if ((user_addr_t)0 == buffer || needfileports > (uint32_t)maxfiles) { /* * Either (i) the user is asking for a fileport count, @@ -543,32 +556,34 @@ proc_pidfileportlist(proc_t p, case KERN_SUCCESS: break; case KERN_RESOURCE_SHORTAGE: - return (ENOMEM); + return ENOMEM; case KERN_INVALID_TASK: - return (ESRCH); + return ESRCH; default: - return (EINVAL); + return EINVAL; } if (numfileports == 0) { - *retval = 0; /* none at all, bail */ - return (0); + *retval = 0; /* none at all, bail */ + return 0; } if ((user_addr_t)0 == buffer) { - numfileports += 20; /* accelerate convergence */ - *retval = numfileports * sizeof (*pfi); - return (0); + numfileports += 20; /* accelerate convergence */ + *retval = numfileports * sizeof(*pfi); + return 0; } - if (needfileports > numfileports) + if (needfileports > numfileports) { needfileports = numfileports; + } } assert(buffersize >= PROC_PIDLISTFILEPORTS_SIZE); - kbufsize = (vm_size_t)needfileports * sizeof (*pfi); + kbufsize = (vm_size_t)needfileports * sizeof(*pfi); pfi = kbuf = kalloc(kbufsize); - if (kbuf == NULL) - return (ENOMEM); + if (kbuf == NULL) { + return ENOMEM; + } bzero(kbuf, kbufsize); ffa.ffa_pfi = pfi; @@ -578,11 +593,13 @@ proc_pidfileportlist(proc_t p, case KERN_SUCCESS: error = 0; pfi = ffa.ffa_pfi; - if ((numfileports = pfi - (typeof(pfi))kbuf) == 0) + if ((numfileports = pfi - (typeof(pfi))kbuf) == 0) { break; - if (numfileports > needfileports) + } + if (numfileports > needfileports) { panic("more fileports returned than requested"); - error = copyout(kbuf, buffer, numfileports * sizeof (*pfi)); + } + error = copyout(kbuf, buffer, numfileports * sizeof(*pfi)); break; case KERN_RESOURCE_SHORTAGE: error = ENOMEM; @@ -595,12 +612,13 @@ proc_pidfileportlist(proc_t p, break; } kfree(kbuf, kbufsize); - if (error == 0) - *retval = numfileports * sizeof (*pfi); - return (error); + if (error == 0) { + *retval = numfileports * sizeof(*pfi); + } + return error; } -int +int proc_pidbsdinfo(proc_t p, struct proc_bsdinfo * pbsd, int zombie) { struct tty *tp; @@ -618,81 +636,97 @@ proc_pidbsdinfo(proc_t p, struct proc_bsdinfo * pbsd, int zombie) pbsd->pbi_pid = p->p_pid; pbsd->pbi_ppid = p->p_ppid; pbsd->pbi_uid = kauth_cred_getuid(my_cred); - pbsd->pbi_gid = kauth_cred_getgid(my_cred); + pbsd->pbi_gid = kauth_cred_getgid(my_cred); pbsd->pbi_ruid = kauth_cred_getruid(my_cred); pbsd->pbi_rgid = kauth_cred_getrgid(my_cred); pbsd->pbi_svuid = kauth_cred_getsvuid(my_cred); pbsd->pbi_svgid = kauth_cred_getsvgid(my_cred); kauth_cred_unref(&my_cred); - + pbsd->pbi_nice = p->p_nice; pbsd->pbi_start_tvsec = p->p_start.tv_sec; pbsd->pbi_start_tvusec = p->p_start.tv_usec; bcopy(&p->p_comm, &pbsd->pbi_comm[0], MAXCOMLEN); pbsd->pbi_comm[MAXCOMLEN - 1] = '\0'; - bcopy(&p->p_name, &pbsd->pbi_name[0], 2*MAXCOMLEN); - pbsd->pbi_name[(2*MAXCOMLEN) - 1] = '\0'; + bcopy(&p->p_name, &pbsd->pbi_name[0], 2 * MAXCOMLEN); + pbsd->pbi_name[(2 * MAXCOMLEN) - 1] = '\0'; - pbsd->pbi_flags = 0; - if ((p->p_flag & P_SYSTEM) == P_SYSTEM) + pbsd->pbi_flags = 0; + if ((p->p_flag & P_SYSTEM) == P_SYSTEM) { pbsd->pbi_flags |= PROC_FLAG_SYSTEM; - if ((p->p_lflag & P_LTRACED) == P_LTRACED) + } + if ((p->p_lflag & P_LTRACED) == P_LTRACED) { pbsd->pbi_flags |= PROC_FLAG_TRACED; - if ((p->p_lflag & P_LEXIT) == P_LEXIT) + } + if ((p->p_lflag & P_LEXIT) == P_LEXIT) { pbsd->pbi_flags |= PROC_FLAG_INEXIT; - if ((p->p_lflag & P_LPPWAIT) == P_LPPWAIT) + } + if ((p->p_lflag & P_LPPWAIT) == P_LPPWAIT) { pbsd->pbi_flags |= PROC_FLAG_PPWAIT; - if ((p->p_flag & P_LP64) == P_LP64) + } + if ((p->p_flag & P_LP64) == P_LP64) { pbsd->pbi_flags |= PROC_FLAG_LP64; - if ((p->p_flag & P_CONTROLT) == P_CONTROLT) + } + if ((p->p_flag & P_CONTROLT) == P_CONTROLT) { pbsd->pbi_flags |= PROC_FLAG_CONTROLT; - if ((p->p_flag & P_THCWD) == P_THCWD) + } + if ((p->p_flag & P_THCWD) == P_THCWD) { pbsd->pbi_flags |= PROC_FLAG_THCWD; - if ((p->p_flag & P_SUGID) == P_SUGID) + } + if ((p->p_flag & P_SUGID) == P_SUGID) { pbsd->pbi_flags |= PROC_FLAG_PSUGID; - if ((p->p_flag & P_EXEC) == P_EXEC) + } + if ((p->p_flag & P_EXEC) == P_EXEC) { pbsd->pbi_flags |= PROC_FLAG_EXEC; + } if (sessionp != SESSION_NULL) { - if (SESS_LEADER(p, sessionp)) + if (SESS_LEADER(p, sessionp)) { pbsd->pbi_flags |= PROC_FLAG_SLEADER; - if (sessionp->s_ttyvp) + } + if (sessionp->s_ttyvp) { pbsd->pbi_flags |= PROC_FLAG_CTTY; + } } #if !CONFIG_EMBEDDED - if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) + if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) { pbsd->pbi_flags |= PROC_FLAG_DELAYIDLESLEEP; + } #endif /* !CONFIG_EMBEDDED */ - switch(PROC_CONTROL_STATE(p)) { - case P_PCTHROTTLE: - pbsd->pbi_flags |= PROC_FLAG_PC_THROTTLE; - break; - case P_PCSUSP: - pbsd->pbi_flags |= PROC_FLAG_PC_SUSP; - break; - case P_PCKILL: - pbsd->pbi_flags |= PROC_FLAG_PC_KILL; - break; - }; + switch (PROC_CONTROL_STATE(p)) { + case P_PCTHROTTLE: + pbsd->pbi_flags |= PROC_FLAG_PC_THROTTLE; + break; + case P_PCSUSP: + pbsd->pbi_flags |= PROC_FLAG_PC_SUSP; + break; + case P_PCKILL: + pbsd->pbi_flags |= PROC_FLAG_PC_KILL; + break; + } + ; + + switch (PROC_ACTION_STATE(p)) { + case P_PCTHROTTLE: + pbsd->pbi_flags |= PROC_FLAG_PA_THROTTLE; + break; + case P_PCSUSP: + pbsd->pbi_flags |= PROC_FLAG_PA_SUSP; + break; + } + ; - switch(PROC_ACTION_STATE(p)) { - case P_PCTHROTTLE: - pbsd->pbi_flags |= PROC_FLAG_PA_THROTTLE; - break; - case P_PCSUSP: - pbsd->pbi_flags |= PROC_FLAG_PA_SUSP; - break; - }; - /* if process is a zombie skip bg state */ - if ((zombie == 0) && (p->p_stat != SZOMB) && (p->task != TASK_NULL)) + if ((zombie == 0) && (p->p_stat != SZOMB) && (p->task != TASK_NULL)) { proc_get_darwinbgstate(p->task, &pbsd->pbi_flags); + } - if (zombie == 0) + if (zombie == 0) { pbsd->pbi_nfiles = p->p_fd->fd_nfiles; - + } + pbsd->e_tdev = NODEV; if (pg != PGRP_NULL) { pbsd->pbi_pgid = p->p_pgrpid; @@ -701,17 +735,19 @@ proc_pidbsdinfo(proc_t p, struct proc_bsdinfo * pbsd, int zombie) pbsd->e_tdev = tp->t_dev; pbsd->e_tpgid = sessionp->s_ttypgrpid; } - } - if (sessionp != SESSION_NULL) + } + if (sessionp != SESSION_NULL) { session_rele(sessionp); - if (pg != PGRP_NULL) + } + if (pg != PGRP_NULL) { pg_rele(pg); + } - return(0); + return 0; } -int +int proc_pidshortbsdinfo(proc_t p, struct proc_bsdshortinfo * pbsd_shortp, int zombie) { bzero(pbsd_shortp, sizeof(struct proc_bsdshortinfo)); @@ -722,81 +758,94 @@ proc_pidshortbsdinfo(proc_t p, struct proc_bsdshortinfo * pbsd_shortp, int zombi bcopy(&p->p_comm, &pbsd_shortp->pbsi_comm[0], MAXCOMLEN); pbsd_shortp->pbsi_comm[MAXCOMLEN - 1] = '\0'; - pbsd_shortp->pbsi_flags = 0; - if ((p->p_flag & P_SYSTEM) == P_SYSTEM) + pbsd_shortp->pbsi_flags = 0; + if ((p->p_flag & P_SYSTEM) == P_SYSTEM) { pbsd_shortp->pbsi_flags |= PROC_FLAG_SYSTEM; - if ((p->p_lflag & P_LTRACED) == P_LTRACED) + } + if ((p->p_lflag & P_LTRACED) == P_LTRACED) { pbsd_shortp->pbsi_flags |= PROC_FLAG_TRACED; - if ((p->p_lflag & P_LEXIT) == P_LEXIT) + } + if ((p->p_lflag & P_LEXIT) == P_LEXIT) { pbsd_shortp->pbsi_flags |= PROC_FLAG_INEXIT; - if ((p->p_lflag & P_LPPWAIT) == P_LPPWAIT) + } + if ((p->p_lflag & P_LPPWAIT) == P_LPPWAIT) { pbsd_shortp->pbsi_flags |= PROC_FLAG_PPWAIT; - if ((p->p_flag & P_LP64) == P_LP64) + } + if ((p->p_flag & P_LP64) == P_LP64) { pbsd_shortp->pbsi_flags |= PROC_FLAG_LP64; - if ((p->p_flag & P_CONTROLT) == P_CONTROLT) + } + if ((p->p_flag & P_CONTROLT) == P_CONTROLT) { pbsd_shortp->pbsi_flags |= PROC_FLAG_CONTROLT; - if ((p->p_flag & P_THCWD) == P_THCWD) + } + if ((p->p_flag & P_THCWD) == P_THCWD) { pbsd_shortp->pbsi_flags |= PROC_FLAG_THCWD; - if ((p->p_flag & P_SUGID) == P_SUGID) + } + if ((p->p_flag & P_SUGID) == P_SUGID) { pbsd_shortp->pbsi_flags |= PROC_FLAG_PSUGID; - if ((p->p_flag & P_EXEC) == P_EXEC) + } + if ((p->p_flag & P_EXEC) == P_EXEC) { pbsd_shortp->pbsi_flags |= PROC_FLAG_EXEC; + } #if !CONFIG_EMBEDDED - if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) + if ((p->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP) { pbsd_shortp->pbsi_flags |= PROC_FLAG_DELAYIDLESLEEP; + } #endif /* !CONFIG_EMBEDDED */ - switch(PROC_CONTROL_STATE(p)) { - case P_PCTHROTTLE: - pbsd_shortp->pbsi_flags |= PROC_FLAG_PC_THROTTLE; - break; - case P_PCSUSP: - pbsd_shortp->pbsi_flags |= PROC_FLAG_PC_SUSP; - break; - case P_PCKILL: - pbsd_shortp->pbsi_flags |= PROC_FLAG_PC_KILL; - break; - }; + switch (PROC_CONTROL_STATE(p)) { + case P_PCTHROTTLE: + pbsd_shortp->pbsi_flags |= PROC_FLAG_PC_THROTTLE; + break; + case P_PCSUSP: + pbsd_shortp->pbsi_flags |= PROC_FLAG_PC_SUSP; + break; + case P_PCKILL: + pbsd_shortp->pbsi_flags |= PROC_FLAG_PC_KILL; + break; + } + ; + + switch (PROC_ACTION_STATE(p)) { + case P_PCTHROTTLE: + pbsd_shortp->pbsi_flags |= PROC_FLAG_PA_THROTTLE; + break; + case P_PCSUSP: + pbsd_shortp->pbsi_flags |= PROC_FLAG_PA_SUSP; + break; + } + ; - switch(PROC_ACTION_STATE(p)) { - case P_PCTHROTTLE: - pbsd_shortp->pbsi_flags |= PROC_FLAG_PA_THROTTLE; - break; - case P_PCSUSP: - pbsd_shortp->pbsi_flags |= PROC_FLAG_PA_SUSP; - break; - }; - /* if process is a zombie skip bg state */ - if ((zombie == 0) && (p->p_stat != SZOMB) && (p->task != TASK_NULL)) + if ((zombie == 0) && (p->p_stat != SZOMB) && (p->task != TASK_NULL)) { proc_get_darwinbgstate(p->task, &pbsd_shortp->pbsi_flags); + } pbsd_shortp->pbsi_uid = p->p_uid; - pbsd_shortp->pbsi_gid = p->p_gid; + pbsd_shortp->pbsi_gid = p->p_gid; pbsd_shortp->pbsi_ruid = p->p_ruid; pbsd_shortp->pbsi_rgid = p->p_rgid; pbsd_shortp->pbsi_svuid = p->p_svuid; pbsd_shortp->pbsi_svgid = p->p_svgid; - - return(0); + + return 0; } -int +int proc_pidtaskinfo(proc_t p, struct proc_taskinfo * ptinfo) { task_t task; - + task = p->task; bzero(ptinfo, sizeof(struct proc_taskinfo)); fill_taskprocinfo(task, (struct proc_taskinfo_internal *)ptinfo); - return(0); + return 0; } -int +int proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadinfo *pthinfo) { int error = 0; @@ -805,11 +854,11 @@ proc_pidthreadinfo(proc_t p, uint64_t arg, bool thuniqueid, struct proc_threadin bzero(pthinfo, sizeof(struct proc_threadinfo)); error = fill_taskthreadinfo(p->task, threadaddr, thuniqueid, (struct proc_threadinfo_internal *)pthinfo, NULL, NULL); - if (error) - return(ESRCH); - else - return(0); - + if (error) { + return ESRCH; + } else { + return 0; + } } boolean_t @@ -825,12 +874,13 @@ bsd_hasthreadname(void *uth) } } -void +void bsd_getthreadname(void *uth, char *buffer) { struct uthread *ut = (struct uthread *)uth; - if(ut->pth_name) - bcopy(ut->pth_name,buffer,MAXTHREADNAMESIZE); + if (ut->pth_name) { + bcopy(ut->pth_name, buffer, MAXTHREADNAMESIZE); + } } /* @@ -838,7 +888,8 @@ bsd_getthreadname(void *uth, char *buffer) * callers may result in a garbled name. */ void -bsd_setthreadname(void *uth, const char *name) { +bsd_setthreadname(void *uth, const char *name) +{ struct uthread *ut = (struct uthread *)uth; char * name_buf = NULL; @@ -866,13 +917,15 @@ bsd_copythreadname(void *dst_uth, void *src_uth) struct uthread *dst_ut = (struct uthread *)dst_uth; struct uthread *src_ut = (struct uthread *)src_uth; - if (src_ut->pth_name == NULL) + if (src_ut->pth_name == NULL) { return; + } if (dst_ut->pth_name == NULL) { dst_ut->pth_name = (char *)kalloc(MAXTHREADNAMESIZE); - if (dst_ut->pth_name == NULL) + if (dst_ut->pth_name == NULL) { return; + } } bcopy(src_ut->pth_name, dst_ut->pth_name, MAXTHREADNAMESIZE); @@ -887,18 +940,19 @@ bsd_threadcdir(void * uth, void *vptr, int *vidp) vnode_t *vpp = (vnode_t *)vptr; vp = ut->uu_cdir; - if (vp != NULLVP) { + if (vp != NULLVP) { if (vpp != NULL) { *vpp = vp; - if (vidp != NULL) + if (vidp != NULL) { *vidp = vp->v_id; + } } } } -int -proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo *pinfo) +int +proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo *pinfo) { vnode_t vp = NULLVP; int vid; @@ -909,24 +963,25 @@ proc_pidthreadpathinfo(proc_t p, uint64_t arg, struct proc_threadwithpathinfo * bzero(pinfo, sizeof(struct proc_threadwithpathinfo)); error = fill_taskthreadinfo(p->task, threadaddr, 0, (struct proc_threadinfo_internal *)&pinfo->pt, (void *)&vp, &vid); - if (error) - return(ESRCH); + if (error) { + return ESRCH; + } if ((vp != NULLVP) && ((vnode_getwithvid(vp, vid)) == 0)) { - error = fill_vnodeinfo(vp, &pinfo->pvip.vip_vi) ; + error = fill_vnodeinfo(vp, &pinfo->pvip.vip_vi); if (error == 0) { count = MAXPATHLEN; vn_getpath(vp, &pinfo->pvip.vip_path[0], &count); - pinfo->pvip.vip_path[MAXPATHLEN-1] = 0; + pinfo->pvip.vip_path[MAXPATHLEN - 1] = 0; } vnode_put(vp); - } - return(error); + } + return error; } -int +int proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buffersize, int32_t *retval) { uint32_t count = 0; @@ -940,29 +995,30 @@ proc_pidlistthreads(proc_t p, bool thuniqueid, user_addr_t buffer, uint32_t buf numthreads = (uint32_t)num; } - count = buffersize/(sizeof(uint64_t)); + count = buffersize / (sizeof(uint64_t)); if (numthreads > count) { numthreads = count; } kbuf = (void *)kalloc(numthreads * sizeof(uint64_t)); - if (kbuf == NULL) - return(ENOMEM); + if (kbuf == NULL) { + return ENOMEM; + } bzero(kbuf, numthreads * sizeof(uint64_t)); - + ret = fill_taskthreadlist(p->task, kbuf, numthreads, thuniqueid); - + error = copyout(kbuf, buffer, ret); kfree(kbuf, numthreads * sizeof(uint64_t)); - if (error == 0) + if (error == 0) { *retval = ret; - return(error); - + } + return error; } -int +int proc_pidregioninfo(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) { struct proc_regioninfo preginfo; @@ -970,30 +1026,33 @@ proc_pidregioninfo(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t bzero(&preginfo, sizeof(struct proc_regioninfo)); ret = fill_procregioninfo( p->task, arg, (struct proc_regioninfo_internal *)&preginfo, (uintptr_t *)0, (uint32_t *)0); - if (ret == 0) - return(EINVAL); + if (ret == 0) { + return EINVAL; + } error = copyout(&preginfo, buffer, sizeof(struct proc_regioninfo)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct proc_regioninfo); - return(error); + } + return error; } -int +int proc_pidregionpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) { struct proc_regionwithpathinfo preginfo; int ret, error = 0; - uintptr_t vnodeaddr= 0; - uint32_t vnodeid= 0; + uintptr_t vnodeaddr = 0; + uint32_t vnodeid = 0; vnode_t vp; int count; bzero(&preginfo, sizeof(struct proc_regionwithpathinfo)); ret = fill_procregioninfo( p->task, arg, (struct proc_regioninfo_internal *)&preginfo.prp_prinfo, (uintptr_t *)&vnodeaddr, (uint32_t *)&vnodeid); - if (ret == 0) - return(EINVAL); + if (ret == 0) { + return EINVAL; + } if (vnodeaddr) { vp = (vnode_t)vnodeaddr; if ((vnode_getwithvid(vp, vnodeid)) == 0) { @@ -1002,33 +1061,36 @@ proc_pidregionpathinfo(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint count = MAXPATHLEN; vn_getpath(vp, &preginfo.prp_vip.vip_path[0], &count); /* Always make sure it is null terminated */ - preginfo.prp_vip.vip_path[MAXPATHLEN-1] = 0; + preginfo.prp_vip.vip_path[MAXPATHLEN - 1] = 0; vnode_put(vp); } } error = copyout(&preginfo, buffer, sizeof(struct proc_regionwithpathinfo)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct proc_regionwithpathinfo); - return(error); + } + return error; } int -proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) +proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) { struct proc_regionwithpathinfo preginfo; int ret, error = 0; - uintptr_t vnodeaddr= 0; - uint32_t vnodeid= 0; + uintptr_t vnodeaddr = 0; + uint32_t vnodeid = 0; vnode_t vp; int count; bzero(&preginfo, sizeof(struct proc_regionwithpathinfo)); ret = fill_procregioninfo_onlymappedvnodes( p->task, arg, (struct proc_regioninfo_internal *)&preginfo.prp_prinfo, (uintptr_t *)&vnodeaddr, (uint32_t *)&vnodeid); - if (ret == 0) - return(EINVAL); - if (!vnodeaddr) - return(EINVAL); + if (ret == 0) { + return EINVAL; + } + if (!vnodeaddr) { + return EINVAL; + } vp = (vnode_t)vnodeaddr; if ((vnode_getwithvid(vp, vnodeid)) == 0) { @@ -1037,20 +1099,21 @@ proc_pidregionpathinfo2(proc_t p, uint64_t arg, user_addr_t buffer, __unused ui count = MAXPATHLEN; vn_getpath(vp, &preginfo.prp_vip.vip_path[0], &count); /* Always make sure it is null terminated */ - preginfo.prp_vip.vip_path[MAXPATHLEN-1] = 0; + preginfo.prp_vip.vip_path[MAXPATHLEN - 1] = 0; vnode_put(vp); } else { - return(EINVAL); + return EINVAL; } error = copyout(&preginfo, buffer, sizeof(struct proc_regionwithpathinfo)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct proc_regionwithpathinfo); - return(error); + } + return error; } int -proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) +proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) { struct proc_regionwithpathinfo preginfo; int ret, error = 0; @@ -1067,63 +1130,67 @@ proc_pidregionpathinfo3(proc_t p, uint64_t arg, user_addr_t buffer, __unused ui vnodeid = 0; ret = fill_procregioninfo_onlymappedvnodes( p->task, addr, (struct proc_regioninfo_internal *)&preginfo.prp_prinfo, (uintptr_t *)&vnodeaddr, (uint32_t *)&vnodeid); - if (ret == 0) - return(EINVAL); - if (!vnodeaddr) - return(EINVAL); + if (ret == 0) { + return EINVAL; + } + if (!vnodeaddr) { + return EINVAL; + } vp = (vnode_t)vnodeaddr; if ((vnode_getwithvid(vp, vnodeid)) == 0) { /* Check if the vnode matches the filter, otherwise loop looking for the next memory region backed by a vnode */ struct vnode_attr va; - + memset(&va, 0, sizeof(va)); VATTR_INIT(&va); VATTR_WANTED(&va, va_fsid); + VATTR_WANTED(&va, va_fsid64); ret = vnode_getattr(vp, &va, vfs_context_current()); if (ret) { vnode_put(vp); - return(EINVAL); + return EINVAL; } - if (va.va_fsid == arg) { + if (vnode_get_va_fsid(&va) == arg) { /* FILL THE VNODEINFO */ error = fill_vnodeinfo(vp, &preginfo.prp_vip.vip_vi); count = MAXPATHLEN; vn_getpath(vp, &preginfo.prp_vip.vip_path[0], &count); /* Always make sure it is null terminated */ - preginfo.prp_vip.vip_path[MAXPATHLEN-1] = 0; + preginfo.prp_vip.vip_path[MAXPATHLEN - 1] = 0; vnode_put(vp); break; } vnode_put(vp); } else { - return(EINVAL); + return EINVAL; } addr = preginfo.prp_prinfo.pri_address + preginfo.prp_prinfo.pri_size; } while (1); error = copyout(&preginfo, buffer, sizeof(struct proc_regionwithpathinfo)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct proc_regionwithpathinfo); - return(error); + } + return error; } /* * Path is relative to current process directory; may different from current * thread directory. */ -int +int proc_pidvnodepathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, __unused uint32_t buffersize, int32_t *retval) { struct proc_vnodepathinfo pvninfo; int error = 0; vnode_t vncdirvp = NULLVP; - uint32_t vncdirid=0; + uint32_t vncdirid = 0; vnode_t vnrdirvp = NULLVP; - uint32_t vnrdirid=0; + uint32_t vnrdirid = 0; int count; bzero(&pvninfo, sizeof(struct proc_vnodepathinfo)); @@ -1143,11 +1210,11 @@ proc_pidvnodepathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, __unu if ((error = vnode_getwithvid(vncdirvp, vncdirid)) == 0) { /* FILL THE VNODEINFO */ error = fill_vnodeinfo(vncdirvp, &pvninfo.pvi_cdir.vip_vi); - if ( error == 0) { + if (error == 0) { count = MAXPATHLEN; vn_getpath(vncdirvp, &pvninfo.pvi_cdir.vip_path[0], &count); - pvninfo.pvi_cdir.vip_path[MAXPATHLEN-1] = 0; - } + pvninfo.pvi_cdir.vip_path[MAXPATHLEN - 1] = 0; + } vnode_put(vncdirvp); } else { goto out; @@ -1158,11 +1225,11 @@ proc_pidvnodepathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, __unu if ((error = vnode_getwithvid(vnrdirvp, vnrdirid)) == 0) { /* FILL THE VNODEINFO */ error = fill_vnodeinfo(vnrdirvp, &pvninfo.pvi_rdir.vip_vi); - if ( error == 0) { + if (error == 0) { count = MAXPATHLEN; vn_getpath(vnrdirvp, &pvninfo.pvi_rdir.vip_path[0], &count); - pvninfo.pvi_rdir.vip_path[MAXPATHLEN-1] = 0; - } + pvninfo.pvi_rdir.vip_path[MAXPATHLEN - 1] = 0; + } vnode_put(vnrdirvp); } else { goto out; @@ -1170,29 +1237,32 @@ proc_pidvnodepathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, __unu } if (error == 0) { error = copyout(&pvninfo, buffer, sizeof(struct proc_vnodepathinfo)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct proc_vnodepathinfo); + } } out: - return(error); + return error; } -int +int proc_pidpathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, uint32_t buffersize, __unused int32_t *retval) { int error; vnode_t tvp; - int len = buffersize; + int len = buffersize; char * buf; tvp = p->p_textvp; - if (tvp == NULLVP) - return(ESRCH); + if (tvp == NULLVP) { + return ESRCH; + } buf = (char *)kalloc(buffersize); - if (buf == NULL) - return(ENOMEM); + if (buf == NULL) { + return ENOMEM; + } bzero(buf, buffersize); @@ -1201,7 +1271,7 @@ proc_pidpathinfo(proc_t p, __unused uint64_t arg, user_addr_t buffer, uint32_t b error = copyout(buf, buffer, len); } kfree(buf, buffersize); - return(error); + return error; } int @@ -1214,8 +1284,9 @@ proc_pidpathinfo_internal(proc_t p, __unused uint64_t arg, char *buf, uint32_t b tvp = p->p_textvp; - if (tvp == NULLVP) - return(ESRCH); + if (tvp == NULLVP) { + return ESRCH; + } vid = vnode_vid(tvp); error = vnode_getwithvid(tvp, vid); @@ -1223,16 +1294,17 @@ proc_pidpathinfo_internal(proc_t p, __unused uint64_t arg, char *buf, uint32_t b error = vn_getpath_fsenter(tvp, buf, &len); vnode_put(tvp); if (error == 0) { - error = vnode_lookup(buf, 0, &nvp, vfs_context_current()); - if ((error == 0) && ( nvp != NULLVP)) + error = vnode_lookup(buf, 0, &nvp, vfs_context_current()); + if ((error == 0) && (nvp != NULLVP)) { vnode_put(nvp); + } } } - return(error); + return error; } -int +int proc_pidworkqueueinfo(proc_t p, struct proc_workqueueinfo *pwqinfo) { int error = 0; @@ -1240,11 +1312,11 @@ proc_pidworkqueueinfo(proc_t p, struct proc_workqueueinfo *pwqinfo) bzero(pwqinfo, sizeof(struct proc_workqueueinfo)); error = fill_procworkqueue(p, pwqinfo); - if (error) - return(ESRCH); - else - return(0); - + if (error) { + return ESRCH; + } else { + return 0; + } } @@ -1266,8 +1338,9 @@ proc_piduuidinfo(pid_t pid, uuid_t uuid_buf, uint32_t buffersize) struct proc * p = PROC_NULL; int zombref = 0; - if (buffersize < sizeof(uuid_t)) + if (buffersize < sizeof(uuid_t)) { return EINVAL; + } if ((p = proc_find(pid)) == PROC_NULL) { p = proc_find_zombref(pid); @@ -1279,10 +1352,11 @@ proc_piduuidinfo(pid_t pid, uuid_t uuid_buf, uint32_t buffersize) proc_getexecutableuuid(p, (unsigned char *)uuid_buf, buffersize); - if (zombref) + if (zombref) { proc_drop_zombref(p); - else + } else { proc_rele(p); + } return 0; } @@ -1297,19 +1371,20 @@ proc_pidoriginatorpid_uuid(uuid_t uuid, uint32_t buffersize, pid_t *pid) kern_return_t kr; int error; - /* - * Get the current voucher origin pid. The pid returned here + /* + * Get the current voucher origin pid. The pid returned here * might not be valid or may have been recycled. */ kr = thread_get_current_voucher_origin_pid(&originator_pid); /* If errors, convert errors to appropriate format */ if (kr) { - if (kr == KERN_INVALID_TASK) + if (kr == KERN_INVALID_TASK) { error = ESRCH; - else if (kr == KERN_INVALID_VALUE) + } else if (kr == KERN_INVALID_VALUE) { error = ENOATTR; - else + } else { error = EINVAL; + } return error; } @@ -1325,7 +1400,7 @@ int proc_pidoriginatoruuid(uuid_t uuid, uint32_t buffersize) { pid_t originator_pid; - return (proc_pidoriginatorpid_uuid(uuid, buffersize, &originator_pid)); + return proc_pidoriginatorpid_uuid(uuid, buffersize, &originator_pid); } /***************************** proc_pidoriginatorinfo ***************************/ @@ -1337,76 +1412,85 @@ proc_pidoriginatorinfo(int pid, int flavor, user_addr_t buffer, uint32_t buffer uint32_t size; switch (flavor) { - case PROC_PIDORIGINATOR_UUID: - size = PROC_PIDORIGINATOR_UUID_SIZE; - break; - case PROC_PIDORIGINATOR_BGSTATE: - size = PROC_PIDORIGINATOR_BGSTATE_SIZE; - break; - case PROC_PIDORIGINATOR_PID_UUID: - size = PROC_PIDORIGINATOR_PID_UUID_SIZE; - break; - default: - return(EINVAL); + case PROC_PIDORIGINATOR_UUID: + size = PROC_PIDORIGINATOR_UUID_SIZE; + break; + case PROC_PIDORIGINATOR_BGSTATE: + size = PROC_PIDORIGINATOR_BGSTATE_SIZE; + break; + case PROC_PIDORIGINATOR_PID_UUID: + size = PROC_PIDORIGINATOR_PID_UUID_SIZE; + break; + default: + return EINVAL; } - if (buffersize < size) - return(ENOMEM); + if (buffersize < size) { + return ENOMEM; + } - if (pid != 0 && pid != proc_selfpid()) - return (EINVAL); + if (pid != 0 && pid != proc_selfpid()) { + return EINVAL; + } switch (flavor) { - case PROC_PIDORIGINATOR_UUID: { - uuid_t uuid = {}; - - error = proc_pidoriginatoruuid(uuid, sizeof(uuid)); - if (error != 0) - goto out; + case PROC_PIDORIGINATOR_UUID: { + uuid_t uuid = {}; - error = copyout(uuid, buffer, size); - if (error == 0) - *retval = size; + error = proc_pidoriginatoruuid(uuid, sizeof(uuid)); + if (error != 0) { + goto out; } - break; - case PROC_PIDORIGINATOR_PID_UUID: { - struct proc_originatorinfo originator_info; - bzero(&originator_info, sizeof(originator_info)); + error = copyout(uuid, buffer, size); + if (error == 0) { + *retval = size; + } + } + break; - error = proc_pidoriginatorpid_uuid(originator_info.originator_uuid, - sizeof(uuid_t), &originator_info.originator_pid); - if (error != 0) - goto out; + case PROC_PIDORIGINATOR_PID_UUID: { + struct proc_originatorinfo originator_info; + bzero(&originator_info, sizeof(originator_info)); - error = copyout(&originator_info, buffer, size); - if (error == 0) - *retval = size; + error = proc_pidoriginatorpid_uuid(originator_info.originator_uuid, + sizeof(uuid_t), &originator_info.originator_pid); + if (error != 0) { + goto out; } - break; - case PROC_PIDORIGINATOR_BGSTATE: { - uint32_t is_backgrounded = 0; - error = proc_get_originatorbgstate(&is_backgrounded); - if (error) - goto out; + error = copyout(&originator_info, buffer, size); + if (error == 0) { + *retval = size; + } + } + break; - error = copyout(&is_backgrounded, buffer, size); - if (error == 0) - *retval = size; + case PROC_PIDORIGINATOR_BGSTATE: { + uint32_t is_backgrounded = 0; + error = proc_get_originatorbgstate(&is_backgrounded); + if (error) { + goto out; } - break; - default: - error = ENOTSUP; + error = copyout(&is_backgrounded, buffer, size); + if (error == 0) { + *retval = size; + } + } + break; + + default: + error = ENOTSUP; } out: return error; } /***************************** proc_listcoalitions ***************************/ -int proc_listcoalitions(int flavor, int type, user_addr_t buffer, - uint32_t buffersize, int32_t *retval) +int +proc_listcoalitions(int flavor, int type, user_addr_t buffer, + uint32_t buffersize, int32_t *retval) { #if CONFIG_COALITIONS int error = ENOTSUP; @@ -1474,22 +1558,25 @@ int proc_listcoalitions(int flavor, int type, user_addr_t buffer, * Only copy out what we really need. */ copyout_sz = k_buffersize; - if (ncoals_ < ncoals) + if (ncoals_ < ncoals) { copyout_sz = ncoals_ * elem_size; + } /* * copy the list up to user space * (we're guaranteed to have a non-null pointer/size here) */ error = copyout(coalinfo, buffer, - copyout_sz < buffersize ? copyout_sz : buffersize); + copyout_sz < buffersize ? copyout_sz : buffersize); - if (error == 0) + if (error == 0) { *retval = (int)copyout_sz; + } out: - if (coalinfo) + if (coalinfo) { kfree(coalinfo, k_buffersize); + } return error; #else @@ -1505,7 +1592,8 @@ out: /*************************** proc_can_use_forgeound_hw **************************/ -int proc_can_use_foreground_hw(int pid, user_addr_t u_reason, uint32_t reasonsize, int32_t *retval) +int +proc_can_use_foreground_hw(int pid, user_addr_t u_reason, uint32_t reasonsize, int32_t *retval) { proc_t p = PROC_NULL; int error = 0; @@ -1662,12 +1750,15 @@ no_leader: } out: - if (task != TASK_NULL) + if (task != TASK_NULL) { task_deallocate(task); - if (p != PROC_NULL) + } + if (p != PROC_NULL) { proc_rele(p); - if (reasonsize >= sizeof(reason) && u_reason != (user_addr_t)0) + } + if (reasonsize >= sizeof(reason) && u_reason != (user_addr_t)0) { (void)copyout(&reason, u_reason, sizeof(reason)); + } return error; } @@ -1690,135 +1781,141 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu boolean_t check_same_user; switch (flavor) { - case PROC_PIDLISTFDS: - size = PROC_PIDLISTFD_SIZE; - if (buffer == USER_ADDR_NULL) - size = 0; - break; - case PROC_PIDTBSDINFO: - size = PROC_PIDTBSDINFO_SIZE; - break; - case PROC_PIDTASKINFO: - size = PROC_PIDTASKINFO_SIZE; - break; - case PROC_PIDTASKALLINFO: - size = PROC_PIDTASKALLINFO_SIZE; - break; - case PROC_PIDTHREADINFO: - size = PROC_PIDTHREADINFO_SIZE; - break; - case PROC_PIDLISTTHREADIDS: - size = PROC_PIDLISTTHREADIDS_SIZE; - break; - case PROC_PIDLISTTHREADS: - size = PROC_PIDLISTTHREADS_SIZE; - break; - case PROC_PIDREGIONINFO: - size = PROC_PIDREGIONINFO_SIZE; - break; - case PROC_PIDREGIONPATHINFO: - size = PROC_PIDREGIONPATHINFO_SIZE; - break; - case PROC_PIDVNODEPATHINFO: - size = PROC_PIDVNODEPATHINFO_SIZE; - break; - case PROC_PIDTHREADPATHINFO: - size = PROC_PIDTHREADPATHINFO_SIZE; - break; - case PROC_PIDPATHINFO: - size = MAXPATHLEN; - break; - case PROC_PIDWORKQUEUEINFO: - /* kernel does not have workq info */ - if (pid == 0) - return(EINVAL); - else - size = PROC_PIDWORKQUEUEINFO_SIZE; - break; - case PROC_PIDT_SHORTBSDINFO: - size = PROC_PIDT_SHORTBSDINFO_SIZE; - break; - case PROC_PIDLISTFILEPORTS: - size = PROC_PIDLISTFILEPORTS_SIZE; - if (buffer == (user_addr_t)0) - size = 0; - break; - case PROC_PIDTHREADID64INFO: - size = PROC_PIDTHREADID64INFO_SIZE; - break; - case PROC_PIDUNIQIDENTIFIERINFO: - size = PROC_PIDUNIQIDENTIFIERINFO_SIZE; - break; - case PROC_PIDT_BSDINFOWITHUNIQID: - size = PROC_PIDT_BSDINFOWITHUNIQID_SIZE; - break; - case PROC_PIDARCHINFO: - size = PROC_PIDARCHINFO_SIZE; - break; - case PROC_PIDCOALITIONINFO: - size = PROC_PIDCOALITIONINFO_SIZE; - break; - case PROC_PIDNOTEEXIT: - /* - * Set findzomb explicitly because arg passed - * in is used as note exit status bits. - */ - size = PROC_PIDNOTEEXIT_SIZE; - findzomb = 1; - break; - case PROC_PIDEXITREASONINFO: - size = PROC_PIDEXITREASONINFO_SIZE; - findzomb = 1; - break; - case PROC_PIDEXITREASONBASICINFO: - size = PROC_PIDEXITREASONBASICINFOSIZE; - findzomb = 1; - break; - case PROC_PIDREGIONPATHINFO2: - size = PROC_PIDREGIONPATHINFO2_SIZE; - break; - case PROC_PIDREGIONPATHINFO3: - size = PROC_PIDREGIONPATHINFO3_SIZE; - break; - case PROC_PIDLISTUPTRS: - size = PROC_PIDLISTUPTRS_SIZE; - if (buffer == USER_ADDR_NULL) { - size = 0; - } - break; - case PROC_PIDLISTDYNKQUEUES: - size = PROC_PIDLISTDYNKQUEUES_SIZE; - if (buffer == USER_ADDR_NULL) { - size = 0; - } - break; - case PROC_PIDVMRTFAULTINFO: - size = sizeof(vm_rtfault_record_t); - if (buffer == USER_ADDR_NULL) { - size = 0; - } - break; - default: - return(EINVAL); + case PROC_PIDLISTFDS: + size = PROC_PIDLISTFD_SIZE; + if (buffer == USER_ADDR_NULL) { + size = 0; + } + break; + case PROC_PIDTBSDINFO: + size = PROC_PIDTBSDINFO_SIZE; + break; + case PROC_PIDTASKINFO: + size = PROC_PIDTASKINFO_SIZE; + break; + case PROC_PIDTASKALLINFO: + size = PROC_PIDTASKALLINFO_SIZE; + break; + case PROC_PIDTHREADINFO: + size = PROC_PIDTHREADINFO_SIZE; + break; + case PROC_PIDLISTTHREADIDS: + size = PROC_PIDLISTTHREADIDS_SIZE; + break; + case PROC_PIDLISTTHREADS: + size = PROC_PIDLISTTHREADS_SIZE; + break; + case PROC_PIDREGIONINFO: + size = PROC_PIDREGIONINFO_SIZE; + break; + case PROC_PIDREGIONPATHINFO: + size = PROC_PIDREGIONPATHINFO_SIZE; + break; + case PROC_PIDVNODEPATHINFO: + size = PROC_PIDVNODEPATHINFO_SIZE; + break; + case PROC_PIDTHREADPATHINFO: + size = PROC_PIDTHREADPATHINFO_SIZE; + break; + case PROC_PIDPATHINFO: + size = MAXPATHLEN; + break; + case PROC_PIDWORKQUEUEINFO: + /* kernel does not have workq info */ + if (pid == 0) { + return EINVAL; + } else { + size = PROC_PIDWORKQUEUEINFO_SIZE; + } + break; + case PROC_PIDT_SHORTBSDINFO: + size = PROC_PIDT_SHORTBSDINFO_SIZE; + break; + case PROC_PIDLISTFILEPORTS: + size = PROC_PIDLISTFILEPORTS_SIZE; + if (buffer == (user_addr_t)0) { + size = 0; + } + break; + case PROC_PIDTHREADID64INFO: + size = PROC_PIDTHREADID64INFO_SIZE; + break; + case PROC_PIDUNIQIDENTIFIERINFO: + size = PROC_PIDUNIQIDENTIFIERINFO_SIZE; + break; + case PROC_PIDT_BSDINFOWITHUNIQID: + size = PROC_PIDT_BSDINFOWITHUNIQID_SIZE; + break; + case PROC_PIDARCHINFO: + size = PROC_PIDARCHINFO_SIZE; + break; + case PROC_PIDCOALITIONINFO: + size = PROC_PIDCOALITIONINFO_SIZE; + break; + case PROC_PIDNOTEEXIT: + /* + * Set findzomb explicitly because arg passed + * in is used as note exit status bits. + */ + size = PROC_PIDNOTEEXIT_SIZE; + findzomb = 1; + break; + case PROC_PIDEXITREASONINFO: + size = PROC_PIDEXITREASONINFO_SIZE; + findzomb = 1; + break; + case PROC_PIDEXITREASONBASICINFO: + size = PROC_PIDEXITREASONBASICINFOSIZE; + findzomb = 1; + break; + case PROC_PIDREGIONPATHINFO2: + size = PROC_PIDREGIONPATHINFO2_SIZE; + break; + case PROC_PIDREGIONPATHINFO3: + size = PROC_PIDREGIONPATHINFO3_SIZE; + break; + case PROC_PIDLISTUPTRS: + size = PROC_PIDLISTUPTRS_SIZE; + if (buffer == USER_ADDR_NULL) { + size = 0; + } + break; + case PROC_PIDLISTDYNKQUEUES: + size = PROC_PIDLISTDYNKQUEUES_SIZE; + if (buffer == USER_ADDR_NULL) { + size = 0; + } + break; + case PROC_PIDVMRTFAULTINFO: + size = sizeof(vm_rtfault_record_t); + if (buffer == USER_ADDR_NULL) { + size = 0; + } + break; + default: + return EINVAL; } - if (buffersize < size) - return(ENOMEM); + if (buffersize < size) { + return ENOMEM; + } if ((flavor == PROC_PIDPATHINFO) && (buffersize > PROC_PIDPATHINFO_MAXSIZE)) { - return(EOVERFLOW); + return EOVERFLOW; } /* Check if we need to look for zombies */ - if ((flavor == PROC_PIDTBSDINFO) || (flavor == PROC_PIDT_SHORTBSDINFO) || (flavor == PROC_PIDT_BSDINFOWITHUNIQID) + if ((flavor == PROC_PIDTBSDINFO) || (flavor == PROC_PIDT_SHORTBSDINFO) || (flavor == PROC_PIDT_BSDINFOWITHUNIQID) || (flavor == PROC_PIDUNIQIDENTIFIERINFO)) { - if (arg) + if (arg) { findzomb = 1; + } } if ((p = proc_find(pid)) == PROC_NULL) { - if (findzomb) + if (findzomb) { p = proc_find_zombref(pid); + } if (p == PROC_NULL) { error = ESRCH; goto out; @@ -1830,311 +1927,323 @@ proc_pidinfo(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t bu /* Certain operations don't require privileges */ switch (flavor) { - case PROC_PIDT_SHORTBSDINFO: - case PROC_PIDUNIQIDENTIFIERINFO: - case PROC_PIDPATHINFO: - case PROC_PIDCOALITIONINFO: - check_same_user = NO_CHECK_SAME_USER; - break; - default: - check_same_user = CHECK_SAME_USER; - break; + case PROC_PIDT_SHORTBSDINFO: + case PROC_PIDUNIQIDENTIFIERINFO: + case PROC_PIDPATHINFO: + case PROC_PIDCOALITIONINFO: + check_same_user = NO_CHECK_SAME_USER; + break; + default: + check_same_user = CHECK_SAME_USER; + break; } /* Do we have permission to look into this? */ - if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDINFO, flavor, check_same_user))) + if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDINFO, flavor, check_same_user))) { goto out; + } switch (flavor) { - case PROC_PIDLISTFDS: { - error = proc_pidfdlist(p, buffer, buffersize, retval); - } - break; + case PROC_PIDLISTFDS: { + error = proc_pidfdlist(p, buffer, buffersize, retval); + } + break; - case PROC_PIDUNIQIDENTIFIERINFO: { - struct proc_uniqidentifierinfo p_uniqidinfo; - bzero(&p_uniqidinfo, sizeof(p_uniqidinfo)); - proc_piduniqidentifierinfo(p, &p_uniqidinfo); - error = copyout(&p_uniqidinfo, buffer, sizeof(struct proc_uniqidentifierinfo)); - if (error == 0) - *retval = sizeof(struct proc_uniqidentifierinfo); + case PROC_PIDUNIQIDENTIFIERINFO: { + struct proc_uniqidentifierinfo p_uniqidinfo; + bzero(&p_uniqidinfo, sizeof(p_uniqidinfo)); + proc_piduniqidentifierinfo(p, &p_uniqidinfo); + error = copyout(&p_uniqidinfo, buffer, sizeof(struct proc_uniqidentifierinfo)); + if (error == 0) { + *retval = sizeof(struct proc_uniqidentifierinfo); } - break; + } + break; + + case PROC_PIDT_SHORTBSDINFO: + shortversion = 1; + case PROC_PIDT_BSDINFOWITHUNIQID: + case PROC_PIDTBSDINFO: { + struct proc_bsdinfo pbsd; + struct proc_bsdshortinfo pbsd_short; + struct proc_bsdinfowithuniqid pbsd_uniqid; - case PROC_PIDT_SHORTBSDINFO: - shortversion = 1; - case PROC_PIDT_BSDINFOWITHUNIQID: - case PROC_PIDTBSDINFO: { - struct proc_bsdinfo pbsd; - struct proc_bsdshortinfo pbsd_short; - struct proc_bsdinfowithuniqid pbsd_uniqid; + if (flavor == PROC_PIDT_BSDINFOWITHUNIQID) { + uniqidversion = 1; + } - if (flavor == PROC_PIDT_BSDINFOWITHUNIQID) - uniqidversion = 1; + if (shortversion != 0) { + error = proc_pidshortbsdinfo(p, &pbsd_short, zombie); + } else { + error = proc_pidbsdinfo(p, &pbsd, zombie); + if (uniqidversion != 0) { + bzero(&pbsd_uniqid, sizeof(pbsd_uniqid)); + proc_piduniqidentifierinfo(p, &pbsd_uniqid.p_uniqidentifier); + pbsd_uniqid.pbsd = pbsd; + } + } + if (error == 0) { if (shortversion != 0) { - error = proc_pidshortbsdinfo(p, &pbsd_short, zombie); + error = copyout(&pbsd_short, buffer, sizeof(struct proc_bsdshortinfo)); + if (error == 0) { + *retval = sizeof(struct proc_bsdshortinfo); + } + } else if (uniqidversion != 0) { + error = copyout(&pbsd_uniqid, buffer, sizeof(struct proc_bsdinfowithuniqid)); + if (error == 0) { + *retval = sizeof(struct proc_bsdinfowithuniqid); + } } else { - error = proc_pidbsdinfo(p, &pbsd, zombie); - if (uniqidversion != 0) { - bzero(&pbsd_uniqid, sizeof(pbsd_uniqid)); - proc_piduniqidentifierinfo(p, &pbsd_uniqid.p_uniqidentifier); - pbsd_uniqid.pbsd = pbsd; + error = copyout(&pbsd, buffer, sizeof(struct proc_bsdinfo)); + if (error == 0) { + *retval = sizeof(struct proc_bsdinfo); } } - - if (error == 0) { - if (shortversion != 0) { - error = copyout(&pbsd_short, buffer, sizeof(struct proc_bsdshortinfo)); - if (error == 0) - *retval = sizeof(struct proc_bsdshortinfo); - } else if (uniqidversion != 0) { - error = copyout(&pbsd_uniqid, buffer, sizeof(struct proc_bsdinfowithuniqid)); - if (error == 0) - *retval = sizeof(struct proc_bsdinfowithuniqid); - } else { - error = copyout(&pbsd, buffer, sizeof(struct proc_bsdinfo)); - if (error == 0) - *retval = sizeof(struct proc_bsdinfo); - } - } } - break; + } + break; - case PROC_PIDTASKINFO: { - struct proc_taskinfo ptinfo; + case PROC_PIDTASKINFO: { + struct proc_taskinfo ptinfo; - error = proc_pidtaskinfo(p, &ptinfo); + error = proc_pidtaskinfo(p, &ptinfo); + if (error == 0) { + error = copyout(&ptinfo, buffer, sizeof(struct proc_taskinfo)); if (error == 0) { - error = copyout(&ptinfo, buffer, sizeof(struct proc_taskinfo)); - if (error == 0) - *retval = sizeof(struct proc_taskinfo); - } + *retval = sizeof(struct proc_taskinfo); + } } - break; + } + break; - case PROC_PIDTASKALLINFO: { - struct proc_taskallinfo pall; - bzero(&pall, sizeof(pall)); - error = proc_pidbsdinfo(p, &pall.pbsd, 0); - error = proc_pidtaskinfo(p, &pall.ptinfo); + case PROC_PIDTASKALLINFO: { + struct proc_taskallinfo pall; + bzero(&pall, sizeof(pall)); + error = proc_pidbsdinfo(p, &pall.pbsd, 0); + error = proc_pidtaskinfo(p, &pall.ptinfo); + if (error == 0) { + error = copyout(&pall, buffer, sizeof(struct proc_taskallinfo)); if (error == 0) { - error = copyout(&pall, buffer, sizeof(struct proc_taskallinfo)); - if (error == 0) - *retval = sizeof(struct proc_taskallinfo); + *retval = sizeof(struct proc_taskallinfo); } } - break; + } + break; - case PROC_PIDTHREADID64INFO: - thuniqueid = true; - case PROC_PIDTHREADINFO:{ + case PROC_PIDTHREADID64INFO: + thuniqueid = true; + case PROC_PIDTHREADINFO:{ struct proc_threadinfo pthinfo; - error = proc_pidthreadinfo(p, arg, thuniqueid, &pthinfo); + error = proc_pidthreadinfo(p, arg, thuniqueid, &pthinfo); + if (error == 0) { + error = copyout(&pthinfo, buffer, sizeof(struct proc_threadinfo)); if (error == 0) { - error = copyout(&pthinfo, buffer, sizeof(struct proc_threadinfo)); - if (error == 0) - *retval = sizeof(struct proc_threadinfo); - } + *retval = sizeof(struct proc_threadinfo); + } } - break; + } + break; - case PROC_PIDLISTTHREADIDS: - thuniqueid = true; - case PROC_PIDLISTTHREADS:{ - error = proc_pidlistthreads(p, thuniqueid, buffer, buffersize, retval); - } - break; + case PROC_PIDLISTTHREADIDS: + thuniqueid = true; + case PROC_PIDLISTTHREADS:{ + error = proc_pidlistthreads(p, thuniqueid, buffer, buffersize, retval); + } + break; - case PROC_PIDREGIONINFO:{ - error = proc_pidregioninfo(p, arg, buffer, buffersize, retval); - } - break; + case PROC_PIDREGIONINFO:{ + error = proc_pidregioninfo(p, arg, buffer, buffersize, retval); + } + break; - case PROC_PIDREGIONPATHINFO:{ - error = proc_pidregionpathinfo(p, arg, buffer, buffersize, retval); - } - break; + case PROC_PIDREGIONPATHINFO:{ + error = proc_pidregionpathinfo(p, arg, buffer, buffersize, retval); + } + break; - case PROC_PIDREGIONPATHINFO2:{ - error = proc_pidregionpathinfo2(p, arg, buffer, buffersize, retval); - } - break; + case PROC_PIDREGIONPATHINFO2:{ + error = proc_pidregionpathinfo2(p, arg, buffer, buffersize, retval); + } + break; - case PROC_PIDREGIONPATHINFO3:{ - error = proc_pidregionpathinfo3(p, arg, buffer, buffersize, retval); - } - break; + case PROC_PIDREGIONPATHINFO3:{ + error = proc_pidregionpathinfo3(p, arg, buffer, buffersize, retval); + } + break; - case PROC_PIDVNODEPATHINFO:{ - error = proc_pidvnodepathinfo(p, arg, buffer, buffersize, retval); - } - break; + case PROC_PIDVNODEPATHINFO:{ + error = proc_pidvnodepathinfo(p, arg, buffer, buffersize, retval); + } + break; - case PROC_PIDTHREADPATHINFO:{ - struct proc_threadwithpathinfo pinfo; + case PROC_PIDTHREADPATHINFO:{ + struct proc_threadwithpathinfo pinfo; - error = proc_pidthreadpathinfo(p, arg, &pinfo); + error = proc_pidthreadpathinfo(p, arg, &pinfo); + if (error == 0) { + error = copyout((caddr_t)&pinfo, buffer, sizeof(struct proc_threadwithpathinfo)); if (error == 0) { - error = copyout((caddr_t)&pinfo, buffer, sizeof(struct proc_threadwithpathinfo)); - if (error == 0) - *retval = sizeof(struct proc_threadwithpathinfo); + *retval = sizeof(struct proc_threadwithpathinfo); } } - break; + } + break; - case PROC_PIDPATHINFO: { - error = proc_pidpathinfo(p, arg, buffer, buffersize, retval); - } - break; + case PROC_PIDPATHINFO: { + error = proc_pidpathinfo(p, arg, buffer, buffersize, retval); + } + break; - case PROC_PIDWORKQUEUEINFO:{ - struct proc_workqueueinfo pwqinfo; + case PROC_PIDWORKQUEUEINFO:{ + struct proc_workqueueinfo pwqinfo; - error = proc_pidworkqueueinfo(p, &pwqinfo); + error = proc_pidworkqueueinfo(p, &pwqinfo); + if (error == 0) { + error = copyout(&pwqinfo, buffer, sizeof(struct proc_workqueueinfo)); if (error == 0) { - error = copyout(&pwqinfo, buffer, sizeof(struct proc_workqueueinfo)); - if (error == 0) - *retval = sizeof(struct proc_workqueueinfo); + *retval = sizeof(struct proc_workqueueinfo); } } - break; + } + break; - case PROC_PIDLISTFILEPORTS: { - error = proc_pidfileportlist(p, buffer, buffersize, retval); - } - break; + case PROC_PIDLISTFILEPORTS: { + error = proc_pidfileportlist(p, buffer, buffersize, retval); + } + break; - case PROC_PIDARCHINFO: { - struct proc_archinfo pai; - bzero(&pai, sizeof(pai)); - proc_archinfo(p, &pai); - error = copyout(&pai, buffer, sizeof(struct proc_archinfo)); - if (error == 0) { - *retval = sizeof(struct proc_archinfo); - } + case PROC_PIDARCHINFO: { + struct proc_archinfo pai; + bzero(&pai, sizeof(pai)); + proc_archinfo(p, &pai); + error = copyout(&pai, buffer, sizeof(struct proc_archinfo)); + if (error == 0) { + *retval = sizeof(struct proc_archinfo); } - break; + } + break; - case PROC_PIDCOALITIONINFO: { - struct proc_pidcoalitioninfo pci; - proc_pidcoalitioninfo(p, &pci); - error = copyout(&pci, buffer, sizeof(struct proc_pidcoalitioninfo)); - if (error == 0) { - *retval = sizeof(struct proc_pidcoalitioninfo); - } + case PROC_PIDCOALITIONINFO: { + struct proc_pidcoalitioninfo pci; + proc_pidcoalitioninfo(p, &pci); + error = copyout(&pci, buffer, sizeof(struct proc_pidcoalitioninfo)); + if (error == 0) { + *retval = sizeof(struct proc_pidcoalitioninfo); } - break; + } + break; - case PROC_PIDNOTEEXIT: { - uint32_t data; - error = proc_pidnoteexit(p, arg, &data); + case PROC_PIDNOTEEXIT: { + uint32_t data; + error = proc_pidnoteexit(p, arg, &data); + if (error == 0) { + error = copyout(&data, buffer, sizeof(data)); if (error == 0) { - error = copyout(&data, buffer, sizeof(data)); - if (error == 0) { - *retval = sizeof(data); - } + *retval = sizeof(data); } } - break; + } + break; - case PROC_PIDEXITREASONINFO: { - struct proc_exitreasoninfo eri; + case PROC_PIDEXITREASONINFO: { + struct proc_exitreasoninfo eri; - error = copyin(buffer, &eri, sizeof(eri)); - if (error != 0) { - break; - } + error = copyin(buffer, &eri, sizeof(eri)); + if (error != 0) { + break; + } - error = proc_pidexitreasoninfo(p, &eri, NULL); + error = proc_pidexitreasoninfo(p, &eri, NULL); + if (error == 0) { + error = copyout(&eri, buffer, sizeof(eri)); if (error == 0) { - error = copyout(&eri, buffer, sizeof(eri)); - if (error == 0) { - *retval = sizeof(eri); - } + *retval = sizeof(eri); } } - break; + } + break; - case PROC_PIDEXITREASONBASICINFO: { - struct proc_exitreasonbasicinfo beri; + case PROC_PIDEXITREASONBASICINFO: { + struct proc_exitreasonbasicinfo beri; - bzero(&beri, sizeof(struct proc_exitreasonbasicinfo)); + bzero(&beri, sizeof(struct proc_exitreasonbasicinfo)); - error = proc_pidexitreasoninfo(p, NULL, &beri); + error = proc_pidexitreasoninfo(p, NULL, &beri); + if (error == 0) { + error = copyout(&beri, buffer, sizeof(beri)); if (error == 0) { - error = copyout(&beri, buffer, sizeof(beri)); - if (error == 0) { - *retval = sizeof(beri); - } + *retval = sizeof(beri); } } + } + break; + + case PROC_PIDLISTUPTRS: + error = proc_pidlistuptrs(p, buffer, buffersize, retval); break; - case PROC_PIDLISTUPTRS: - error = proc_pidlistuptrs(p, buffer, buffersize, retval); - break; + case PROC_PIDLISTDYNKQUEUES: + error = kevent_copyout_proc_dynkqids(p, buffer, buffersize, retval); + break; + case PROC_PIDVMRTFAULTINFO: { + /* This interface can only be employed on the current + * process. We will eventually enforce an entitlement. + */ + *retval = 0; - case PROC_PIDLISTDYNKQUEUES: - error = kevent_copyout_proc_dynkqids(p, buffer, buffersize, retval); + if (p != current_proc()) { + error = EINVAL; break; - case PROC_PIDVMRTFAULTINFO: { - /* This interface can only be employed on the current - * process. We will eventually enforce an entitlement. - */ - *retval = 0; - - if (p != current_proc()) { - error = EINVAL; - break; - } + } - size_t kbufsz = MIN(buffersize, vmrtfaultinfo_bufsz()); - void *vmrtfbuf = kalloc(kbufsz); + size_t kbufsz = MIN(buffersize, vmrtfaultinfo_bufsz()); + void *vmrtfbuf = kalloc(kbufsz); - if (vmrtfbuf == NULL) { - error = ENOMEM; - break; - } + if (vmrtfbuf == NULL) { + error = ENOMEM; + break; + } - bzero(vmrtfbuf, kbufsz); + bzero(vmrtfbuf, kbufsz); - uint64_t effpid = get_current_unique_pid(); - /* The VM may choose to provide more comprehensive records - * for root-privileged users on internal configurations. - */ - boolean_t isroot = (suser(kauth_cred_get(), (u_short *)0) == 0); - int vmf_residue = vmrtf_extract(effpid, isroot, kbufsz, vmrtfbuf, retval); - int vmfsz = *retval * sizeof(vm_rtfault_record_t); + uint64_t effpid = get_current_unique_pid(); + /* The VM may choose to provide more comprehensive records + * for root-privileged users on internal configurations. + */ + boolean_t isroot = (suser(kauth_cred_get(), (u_short *)0) == 0); + int vmf_residue = vmrtf_extract(effpid, isroot, kbufsz, vmrtfbuf, retval); + int vmfsz = *retval * sizeof(vm_rtfault_record_t); - error = 0; - if (vmfsz) { - error = copyout(vmrtfbuf, buffer, vmfsz); - } + error = 0; + if (vmfsz) { + error = copyout(vmrtfbuf, buffer, vmfsz); + } - if (error == 0) { - if (vmf_residue) { - error = ENOMEM; - } + if (error == 0) { + if (vmf_residue) { + error = ENOMEM; } - kfree(vmrtfbuf, kbufsz); } - break; - default: - error = ENOTSUP; - break; + kfree(vmrtfbuf, kbufsz); } - + break; + default: + error = ENOTSUP; + break; + } + out: - if (gotref) + if (gotref) { proc_rele(p); - else if (zombie) + } else if (zombie) { proc_drop_zombref(p); - return(error); + } + return error; } @@ -2142,10 +2251,10 @@ int pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, __unused uint32_t buffersize, int32_t * retval) { struct vnode_fdinfo vfi; - int error= 0; + int error = 0; if ((error = vnode_getwithvid(vp, vid)) != 0) { - return(error); + return error; } bzero(&vfi, sizeof(struct vnode_fdinfo)); fill_fileinfo(fp, proc, fd, &vfi.pfi); @@ -2153,35 +2262,38 @@ pid_vnodeinfo(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int f vnode_put(vp); if (error == 0) { error = copyout((caddr_t)&vfi, buffer, sizeof(struct vnode_fdinfo)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct vnode_fdinfo); + } } - return(error); + return error; } int pid_vnodeinfopath(vnode_t vp, uint32_t vid, struct fileproc * fp, proc_t proc, int fd, user_addr_t buffer, __unused uint32_t buffersize, int32_t * retval) { struct vnode_fdinfowithpath vfip; - int count, error= 0; + int count, error = 0; if ((error = vnode_getwithvid(vp, vid)) != 0) { - return(error); + return error; } bzero(&vfip, sizeof(struct vnode_fdinfowithpath)); fill_fileinfo(fp, proc, fd, &vfip.pfi); - error = fill_vnodeinfo(vp, &vfip.pvip.vip_vi) ; + error = fill_vnodeinfo(vp, &vfip.pvip.vip_vi); if (error == 0) { count = MAXPATHLEN; vn_getpath(vp, &vfip.pvip.vip_path[0], &count); - vfip.pvip.vip_path[MAXPATHLEN-1] = 0; + vfip.pvip.vip_path[MAXPATHLEN - 1] = 0; vnode_put(vp); error = copyout((caddr_t)&vfip, buffer, sizeof(struct vnode_fdinfowithpath)); - if (error == 0) + if (error == 0) { *retval = sizeof(struct vnode_fdinfowithpath); - } else + } + } else { vnode_put(vp); - return(error); + } + return error; } void @@ -2191,25 +2303,32 @@ fill_fileinfo(struct fileproc * fp, proc_t proc, int fd, struct proc_fileinfo * fproc->fi_status = 0; fproc->fi_offset = fp->f_fglob->fg_offset; fproc->fi_type = FILEGLOB_DTYPE(fp->f_fglob); - if (fp->f_fglob->fg_count > 1) + if (fp->f_fglob->fg_count > 1) { fproc->fi_status |= PROC_FP_SHARED; + } if (proc != PROC_NULL) { - if ((FDFLAGS_GET(proc, fd) & UF_EXCLOSE) != 0) + if ((FDFLAGS_GET(proc, fd) & UF_EXCLOSE) != 0) { fproc->fi_status |= PROC_FP_CLEXEC; - if ((FDFLAGS_GET(proc, fd) & UF_FORKCLOSE) != 0) + } + if ((FDFLAGS_GET(proc, fd) & UF_FORKCLOSE) != 0) { fproc->fi_status |= PROC_FP_CLFORK; + } } if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) { fproc->fi_status |= PROC_FP_GUARDED; fproc->fi_guardflags = 0; - if (fp_isguarded(fp, GUARD_CLOSE)) + if (fp_isguarded(fp, GUARD_CLOSE)) { fproc->fi_guardflags |= PROC_FI_GUARD_CLOSE; - if (fp_isguarded(fp, GUARD_DUP)) + } + if (fp_isguarded(fp, GUARD_DUP)) { fproc->fi_guardflags |= PROC_FI_GUARD_DUP; - if (fp_isguarded(fp, GUARD_SOCKET_IPC)) + } + if (fp_isguarded(fp, GUARD_SOCKET_IPC)) { fproc->fi_guardflags |= PROC_FI_GUARD_SOCKET_IPC; - if (fp_isguarded(fp, GUARD_FILEPORT)) + } + if (fp_isguarded(fp, GUARD_FILEPORT)) { fproc->fi_guardflags |= PROC_FI_GUARD_FILEPORT; + } } } @@ -2218,29 +2337,30 @@ fill_fileinfo(struct fileproc * fp, proc_t proc, int fd, struct proc_fileinfo * int fill_vnodeinfo(vnode_t vp, struct vnode_info *vinfo) { - vfs_context_t context; - struct stat64 sb; - int error = 0; + vfs_context_t context; + struct stat64 sb; + int error = 0; - bzero(&sb, sizeof(struct stat64)); - context = vfs_context_create((vfs_context_t)0); - error = vn_stat(vp, &sb, NULL, 1, context); - (void)vfs_context_rele(context); + bzero(&sb, sizeof(struct stat64)); + context = vfs_context_create((vfs_context_t)0); + error = vn_stat(vp, &sb, NULL, 1, context); + (void)vfs_context_rele(context); - munge_vinfo_stat(&sb, &vinfo->vi_stat); + munge_vinfo_stat(&sb, &vinfo->vi_stat); - if (error != 0) - goto out; + if (error != 0) { + goto out; + } - if (vp->v_mount != dead_mountp) { - vinfo->vi_fsid = vp->v_mount->mnt_vfsstat.f_fsid; - } else { - vinfo->vi_fsid.val[0] = 0; - vinfo->vi_fsid.val[1] = 0; - } - vinfo->vi_type = vp->v_type; + if (vp->v_mount != dead_mountp) { + vinfo->vi_fsid = vp->v_mount->mnt_vfsstat.f_fsid; + } else { + vinfo->vi_fsid.val[0] = 0; + vinfo->vi_fsid.val[1] = 0; + } + vinfo->vi_type = vp->v_type; out: - return(error); + return error; } int @@ -2253,14 +2373,15 @@ pid_socketinfo(socket_t so, struct fileproc *fp, proc_t proc, int fd, user_addr_ bzero(&s, sizeof(struct socket_fdinfo)); fill_fileinfo(fp, proc, fd, &s.pfi); if ((error = fill_socketinfo(so, &s.psi)) == 0) { - if ((error = copyout(&s, buffer, sizeof(struct socket_fdinfo))) == 0) - *retval = sizeof(struct socket_fdinfo); + if ((error = copyout(&s, buffer, sizeof(struct socket_fdinfo))) == 0) { + *retval = sizeof(struct socket_fdinfo); + } } - return (error); + return error; #else #pragma unused(so, fp, proc, fd, buffer) *retval = 0; - return (ENOTSUP); + return ENOTSUP; #endif } @@ -2274,11 +2395,12 @@ pid_pseminfo(struct psemnode *psem, struct fileproc *fp, proc_t proc, int fd, us fill_fileinfo(fp, proc, fd, &pseminfo.pfi); if ((error = fill_pseminfo(psem, &pseminfo.pseminfo)) == 0) { - if ((error = copyout(&pseminfo, buffer, sizeof(struct psem_fdinfo))) == 0) + if ((error = copyout(&pseminfo, buffer, sizeof(struct psem_fdinfo))) == 0) { *retval = sizeof(struct psem_fdinfo); + } } - return(error); + return error; } int @@ -2291,11 +2413,12 @@ pid_pshminfo(struct pshmnode *pshm, struct fileproc *fp, proc_t proc, int fd, us fill_fileinfo(fp, proc, fd, &pshminfo.pfi); if ((error = fill_pshminfo(pshm, &pshminfo.pshminfo)) == 0) { - if ((error = copyout(&pshminfo, buffer, sizeof(struct pshm_fdinfo))) == 0) + if ((error = copyout(&pshminfo, buffer, sizeof(struct pshm_fdinfo))) == 0) { *retval = sizeof(struct pshm_fdinfo); + } } - return(error); + return error; } int @@ -2307,11 +2430,12 @@ pid_pipeinfo(struct pipe * p, struct fileproc *fp, proc_t proc, int fd, user_ad bzero(&pipeinfo, sizeof(struct pipe_fdinfo)); fill_fileinfo(fp, proc, fd, &pipeinfo.pfi); if ((error = fill_pipeinfo(p, &pipeinfo.pipeinfo)) == 0) { - if ((error = copyout(&pipeinfo, buffer, sizeof(struct pipe_fdinfo))) == 0) + if ((error = copyout(&pipeinfo, buffer, sizeof(struct pipe_fdinfo))) == 0) { *retval = sizeof(struct pipe_fdinfo); + } } - return(error); + return error; } int @@ -2329,15 +2453,16 @@ pid_kqueueinfo(struct kqueue * kq, struct fileproc *fp, proc_t proc, int fd, use } if ((error = fill_kqueueinfo(kq, &kqinfo.kqueueinfo)) == 0) { - if ((error = copyout(&kqinfo, buffer, sizeof(struct kqueue_fdinfo))) == 0) + if ((error = copyout(&kqinfo, buffer, sizeof(struct kqueue_fdinfo))) == 0) { *retval = sizeof(struct kqueue_fdinfo); + } } - return(error); + return error; } int -pid_atalkinfo(__unused struct atalk * at, __unused struct fileproc *fp, __unused proc_t proc, __unused int fd, __unused user_addr_t buffer, __unused uint32_t buffersize, __unused int32_t * retval) +pid_atalkinfo(__unused struct atalk * at, __unused struct fileproc *fp, __unused proc_t proc, __unused int fd, __unused user_addr_t buffer, __unused uint32_t buffersize, __unused int32_t * retval) { return ENOTSUP; } @@ -2345,7 +2470,7 @@ pid_atalkinfo(__unused struct atalk * at, __unused struct fileproc *fp, __unuse /************************** proc_pidfdinfo routine ***************************/ int -proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval) +proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffersize, int32_t * retval) { proc_t p; int error = ENOTSUP; @@ -2353,43 +2478,44 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffer uint32_t size; switch (flavor) { - case PROC_PIDFDVNODEINFO: - size = PROC_PIDFDVNODEINFO_SIZE; - break; - case PROC_PIDFDVNODEPATHINFO: - size = PROC_PIDFDVNODEPATHINFO_SIZE; - break; - case PROC_PIDFDSOCKETINFO: - size = PROC_PIDFDSOCKETINFO_SIZE; - break; - case PROC_PIDFDPSEMINFO: - size = PROC_PIDFDPSEMINFO_SIZE; - break; - case PROC_PIDFDPSHMINFO: - size = PROC_PIDFDPSHMINFO_SIZE; - break; - case PROC_PIDFDPIPEINFO: - size = PROC_PIDFDPIPEINFO_SIZE; - break; - case PROC_PIDFDKQUEUEINFO: - size = PROC_PIDFDKQUEUEINFO_SIZE; - break; - case PROC_PIDFDKQUEUE_EXTINFO: - size = PROC_PIDFDKQUEUE_EXTINFO_SIZE; - if (buffer == (user_addr_t)0) - size = 0; - break; - case PROC_PIDFDATALKINFO: - size = PROC_PIDFDATALKINFO_SIZE; - break; - - default: - return(EINVAL); + case PROC_PIDFDVNODEINFO: + size = PROC_PIDFDVNODEINFO_SIZE; + break; + case PROC_PIDFDVNODEPATHINFO: + size = PROC_PIDFDVNODEPATHINFO_SIZE; + break; + case PROC_PIDFDSOCKETINFO: + size = PROC_PIDFDSOCKETINFO_SIZE; + break; + case PROC_PIDFDPSEMINFO: + size = PROC_PIDFDPSEMINFO_SIZE; + break; + case PROC_PIDFDPSHMINFO: + size = PROC_PIDFDPSHMINFO_SIZE; + break; + case PROC_PIDFDPIPEINFO: + size = PROC_PIDFDPIPEINFO_SIZE; + break; + case PROC_PIDFDKQUEUEINFO: + size = PROC_PIDFDKQUEUEINFO_SIZE; + break; + case PROC_PIDFDKQUEUE_EXTINFO: + size = PROC_PIDFDKQUEUE_EXTINFO_SIZE; + if (buffer == (user_addr_t)0) { + size = 0; + } + break; + case PROC_PIDFDATALKINFO: + size = PROC_PIDFDATALKINFO_SIZE; + break; + default: + return EINVAL; } - if (buffersize < size) - return(ENOMEM); + if (buffersize < size) { + return ENOMEM; + } if ((p = proc_find(pid)) == PROC_NULL) { error = ESRCH; @@ -2397,126 +2523,127 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffer } /* Do we have permission to look into this? */ - if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDFDINFO, flavor, CHECK_SAME_USER))) + if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDFDINFO, flavor, CHECK_SAME_USER))) { goto out1; + } switch (flavor) { - case PROC_PIDFDVNODEINFO: { - vnode_t vp; - uint32_t vid=0; + case PROC_PIDFDVNODEINFO: { + vnode_t vp; + uint32_t vid = 0; - if ((error = fp_getfvpandvid(p, fd, &fp, &vp, &vid)) !=0) { - goto out1; - } - /* no need to be under the fdlock */ - error = pid_vnodeinfo(vp, vid, fp, p, fd, buffer, buffersize, retval); + if ((error = fp_getfvpandvid(p, fd, &fp, &vp, &vid)) != 0) { + goto out1; } - break; - - case PROC_PIDFDVNODEPATHINFO: { - vnode_t vp; - uint32_t vid=0; + /* no need to be under the fdlock */ + error = pid_vnodeinfo(vp, vid, fp, p, fd, buffer, buffersize, retval); + } + break; - if ((error = fp_getfvpandvid(p, fd, &fp, &vp, &vid)) !=0) { - goto out1; - } + case PROC_PIDFDVNODEPATHINFO: { + vnode_t vp; + uint32_t vid = 0; - /* no need to be under the fdlock */ - error = pid_vnodeinfopath(vp, vid, fp, p, fd, buffer, buffersize, retval); + if ((error = fp_getfvpandvid(p, fd, &fp, &vp, &vid)) != 0) { + goto out1; } - break; - case PROC_PIDFDSOCKETINFO: { - socket_t so; + /* no need to be under the fdlock */ + error = pid_vnodeinfopath(vp, vid, fp, p, fd, buffer, buffersize, retval); + } + break; - if ((error = fp_getfsock(p, fd, &fp, &so)) !=0) { - goto out1; - } - /* no need to be under the fdlock */ - error = pid_socketinfo(so, fp, p, fd, buffer, buffersize, retval); + case PROC_PIDFDSOCKETINFO: { + socket_t so; + + if ((error = fp_getfsock(p, fd, &fp, &so)) != 0) { + goto out1; } - break; + /* no need to be under the fdlock */ + error = pid_socketinfo(so, fp, p, fd, buffer, buffersize, retval); + } + break; - case PROC_PIDFDPSEMINFO: { - struct psemnode * psem; + case PROC_PIDFDPSEMINFO: { + struct psemnode * psem; - if ((error = fp_getfpsem(p, fd, &fp, &psem)) !=0) { - goto out1; - } - /* no need to be under the fdlock */ - error = pid_pseminfo(psem, fp, p, fd, buffer, buffersize, retval); + if ((error = fp_getfpsem(p, fd, &fp, &psem)) != 0) { + goto out1; } - break; + /* no need to be under the fdlock */ + error = pid_pseminfo(psem, fp, p, fd, buffer, buffersize, retval); + } + break; - case PROC_PIDFDPSHMINFO: { - struct pshmnode * pshm; + case PROC_PIDFDPSHMINFO: { + struct pshmnode * pshm; - if ((error = fp_getfpshm(p, fd, &fp, &pshm)) !=0) { - goto out1; - } - /* no need to be under the fdlock */ - error = pid_pshminfo(pshm, fp, p, fd, buffer, buffersize, retval); + if ((error = fp_getfpshm(p, fd, &fp, &pshm)) != 0) { + goto out1; } - break; + /* no need to be under the fdlock */ + error = pid_pshminfo(pshm, fp, p, fd, buffer, buffersize, retval); + } + break; - case PROC_PIDFDPIPEINFO: { - struct pipe * cpipe; + case PROC_PIDFDPIPEINFO: { + struct pipe * cpipe; - if ((error = fp_getfpipe(p, fd, &fp, &cpipe)) !=0) { - goto out1; - } - /* no need to be under the fdlock */ - error = pid_pipeinfo(cpipe, fp, p, fd, buffer, buffersize, retval); + if ((error = fp_getfpipe(p, fd, &fp, &cpipe)) != 0) { + goto out1; } - break; + /* no need to be under the fdlock */ + error = pid_pipeinfo(cpipe, fp, p, fd, buffer, buffersize, retval); + } + break; - case PROC_PIDFDKQUEUEINFO: { - struct kqueue * kq; + case PROC_PIDFDKQUEUEINFO: { + struct kqueue * kq; - if (fd == -1) { - if ((kq = p->p_fd->fd_wqkqueue) == NULL) { - /* wqkqueue is initialized on-demand */ - error = 0; - break; - } - } else if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) { - goto out1; + if (fd == -1) { + if ((kq = p->p_fd->fd_wqkqueue) == NULL) { + /* wqkqueue is initialized on-demand */ + error = 0; + break; } - - /* no need to be under the fdlock */ - error = pid_kqueueinfo(kq, fp, p, fd, buffer, buffersize, retval); + } else if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) { + goto out1; } - break; - case PROC_PIDFDKQUEUE_EXTINFO: { - struct kqueue * kq; + /* no need to be under the fdlock */ + error = pid_kqueueinfo(kq, fp, p, fd, buffer, buffersize, retval); + } + break; - if (fd == -1) { - if ((kq = p->p_fd->fd_wqkqueue) == NULL) { - /* wqkqueue is initialized on-demand */ - error = 0; - break; - } - } else if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) { - goto out1; - } - error = pid_kqueue_extinfo(p, kq, buffer, buffersize, retval); - } - break; + case PROC_PIDFDKQUEUE_EXTINFO: { + struct kqueue * kq; - default: { - error = EINVAL; + if (fd == -1) { + if ((kq = p->p_fd->fd_wqkqueue) == NULL) { + /* wqkqueue is initialized on-demand */ + error = 0; + break; + } + } else if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) { goto out1; } + error = pid_kqueue_extinfo(p, kq, buffer, buffersize, retval); + } + break; + + default: { + error = EINVAL; + goto out1; + } } if (fp) { - fp_drop(p, fd, fp , 0); + fp_drop(p, fd, fp, 0); } -out1 : +out1: proc_rele(p); out: - return(error); + return error; } #define MAX_UPTRS 16392 @@ -2576,21 +2703,21 @@ out: */ struct fileport_info_args { - int fia_flavor; - user_addr_t fia_buffer; - uint32_t fia_buffersize; - int32_t *fia_retval; + int fia_flavor; + user_addr_t fia_buffer; + uint32_t fia_buffersize; + int32_t *fia_retval; }; static kern_return_t proc_fileport_info(__unused mach_port_name_t name, - struct fileglob *fg, void *arg) + struct fileglob *fg, void *arg) { struct fileport_info_args *fia = arg; struct fileproc __fileproc, *fp = &__fileproc; int error; - bzero(fp, sizeof (*fp)); + bzero(fp, sizeof(*fp)); fp->f_fglob = fg; switch (fia->fia_flavor) { @@ -2604,7 +2731,7 @@ proc_fileport_info(__unused mach_port_name_t name, vp = (struct vnode *)fg->fg_data; error = pid_vnodeinfopath(vp, vnode_vid(vp), fp, PROC_NULL, 0, fia->fia_buffer, fia->fia_buffersize, fia->fia_retval); - } break; + } break; case PROC_PIDFILEPORTSOCKETINFO: { socket_t so; @@ -2616,44 +2743,44 @@ proc_fileport_info(__unused mach_port_name_t name, so = (socket_t)fg->fg_data; error = pid_socketinfo(so, fp, PROC_NULL, 0, fia->fia_buffer, fia->fia_buffersize, fia->fia_retval); - } break; + } break; case PROC_PIDFILEPORTPSHMINFO: { struct pshmnode *pshm; if (FILEGLOB_DTYPE(fg) != DTYPE_PSXSHM) { - error = EBADF; /* ick - mirror fp_getfpshm */ + error = EBADF; /* ick - mirror fp_getfpshm */ break; } pshm = (struct pshmnode *)fg->fg_data; error = pid_pshminfo(pshm, fp, PROC_NULL, 0, fia->fia_buffer, fia->fia_buffersize, fia->fia_retval); - } break; + } break; case PROC_PIDFILEPORTPIPEINFO: { struct pipe *cpipe; if (FILEGLOB_DTYPE(fg) != DTYPE_PIPE) { - error = EBADF; /* ick - mirror fp_getfpipe */ + error = EBADF; /* ick - mirror fp_getfpipe */ break; } cpipe = (struct pipe *)fg->fg_data; error = pid_pipeinfo(cpipe, fp, PROC_NULL, 0, fia->fia_buffer, fia->fia_buffersize, fia->fia_retval); - } break; + } break; default: error = EINVAL; break; } - return (error); + return error; } /************************* proc_pidfileportinfo routine *********************/ int proc_pidfileportinfo(int pid, int flavor, mach_port_name_t name, - user_addr_t buffer, uint32_t buffersize, int32_t *retval) + user_addr_t buffer, uint32_t buffersize, int32_t *retval) { proc_t p; int error = ENOTSUP; @@ -2676,19 +2803,21 @@ proc_pidfileportinfo(int pid, int flavor, mach_port_name_t name, size = PROC_PIDFILEPORTPIPEINFO_SIZE; break; default: - return (EINVAL); + return EINVAL; + } + + if (buffersize < size) { + return ENOMEM; } - - if (buffersize < size) - return (ENOMEM); if ((p = proc_find(pid)) == PROC_NULL) { error = ESRCH; goto out; } /* Do we have permission to look into this? */ - if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDFILEPORTINFO, flavor, CHECK_SAME_USER))) + if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDFILEPORTINFO, flavor, CHECK_SAME_USER))) { goto out1; + } fia.fia_flavor = flavor; fia.fia_buffer = buffer; @@ -2696,12 +2825,13 @@ proc_pidfileportinfo(int pid, int flavor, mach_port_name_t name, fia.fia_retval = retval; if (fileport_invoke(p->task, name, - proc_fileport_info, &fia, &error) != KERN_SUCCESS) + proc_fileport_info, &fia, &error) != KERN_SUCCESS) { error = EINVAL; + } out1: proc_rele(p); out: - return (error); + return error; } int @@ -2710,22 +2840,24 @@ proc_security_policy(proc_t targetp, __unused int callnum, __unused int flavor, #if CONFIG_MACF int error = 0; - if ((error = mac_proc_check_proc_info(current_proc(), targetp, callnum, flavor))) - return (error); + if ((error = mac_proc_check_proc_info(current_proc(), targetp, callnum, flavor))) { + return error; + } #endif /* The 'listpids' call doesn't have a target proc */ if (targetp == PROC_NULL) { assert(callnum == PROC_INFO_CALL_LISTPIDS && check_same_user == NO_CHECK_SAME_USER); - return (0); + return 0; } /* * Check for 'get information for processes owned by other users' privilege * root has this privilege by default */ - if (priv_check_cred(kauth_cred_get(), PRIV_GLOBAL_PROC_INFO, 0) == 0) + if (priv_check_cred(kauth_cred_get(), PRIV_GLOBAL_PROC_INFO, 0) == 0) { check_same_user = FALSE; + } if (check_same_user) { kauth_cred_t target_cred; @@ -2735,24 +2867,26 @@ proc_security_policy(proc_t targetp, __unused int callnum, __unused int flavor, target_uid = kauth_cred_getuid(target_cred); kauth_cred_unref(&target_cred); - if (kauth_getuid() != target_uid) - return(EPERM); + if (kauth_getuid() != target_uid) { + return EPERM; + } } - return(0); + return 0; } -int +int proc_kernmsgbuf(user_addr_t buffer, uint32_t buffersize, int32_t * retval) { if (suser(kauth_cred_get(), (u_short *)0) == 0) { - return(log_dmesg(buffer, buffersize, retval)); - } else - return(EPERM); + return log_dmesg(buffer, buffersize, retval); + } else { + return EPERM; + } } /* ********* process control sets on self only */ -int +int proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t buffersize, __unused int32_t * retval) { struct proc * pself = PROC_NULL; @@ -2762,83 +2896,88 @@ proc_setcontrol(int pid, int flavor, uint64_t arg, user_addr_t buffer, uint32_t char name_buf[MAXTHREADNAMESIZE]; pself = current_proc(); - if (pid != pself->p_pid) - return(EINVAL); + if (pid != pself->p_pid) { + return EINVAL; + } /* Do we have permission to look into this? */ - if ((error = proc_security_policy(pself, PROC_INFO_CALL_SETCONTROL, flavor, NO_CHECK_SAME_USER))) + if ((error = proc_security_policy(pself, PROC_INFO_CALL_SETCONTROL, flavor, NO_CHECK_SAME_USER))) { goto out; + } switch (flavor) { - case PROC_SELFSET_PCONTROL: { - if (pcontrol > P_PCMAX) - return(EINVAL); - proc_lock(pself); - /* reset existing control setting while retaining action state */ - pself->p_pcaction &= PROC_ACTION_MASK; - /* set new control state */ - pself->p_pcaction |= pcontrol; - proc_unlock(pself); + case PROC_SELFSET_PCONTROL: { + if (pcontrol > P_PCMAX) { + return EINVAL; } - break; + proc_lock(pself); + /* reset existing control setting while retaining action state */ + pself->p_pcaction &= PROC_ACTION_MASK; + /* set new control state */ + pself->p_pcaction |= pcontrol; + proc_unlock(pself); + } + break; - case PROC_SELFSET_THREADNAME: { - /* - * This is a bit ugly, as it copies the name into the kernel, and then - * invokes bsd_setthreadname again to copy it into the uthread name - * buffer. Hopefully this isn't such a hot codepath that an additional - * MAXTHREADNAMESIZE copy is a big issue. - */ - if (buffersize > (MAXTHREADNAMESIZE - 1)) { - return ENAMETOOLONG; - } + case PROC_SELFSET_THREADNAME: { + /* + * This is a bit ugly, as it copies the name into the kernel, and then + * invokes bsd_setthreadname again to copy it into the uthread name + * buffer. Hopefully this isn't such a hot codepath that an additional + * MAXTHREADNAMESIZE copy is a big issue. + */ + if (buffersize > (MAXTHREADNAMESIZE - 1)) { + return ENAMETOOLONG; + } - ut = current_uthread(); + ut = current_uthread(); - bzero(name_buf, MAXTHREADNAMESIZE); - error = copyin(buffer, name_buf, buffersize); + bzero(name_buf, MAXTHREADNAMESIZE); + error = copyin(buffer, name_buf, buffersize); - if (!error) { - bsd_setthreadname(ut, name_buf); - } + if (!error) { + bsd_setthreadname(ut, name_buf); } - break; - - case PROC_SELFSET_VMRSRCOWNER: { - /* need to to be superuser */ - if (suser(kauth_cred_get(), (u_short *)0) != 0) { - error = EPERM; - goto out; - } + } + break; - proc_lock(pself); - /* reset existing control setting while retaining action state */ - pself->p_lflag |= P_LVMRSRCOWNER; - proc_unlock(pself); + case PROC_SELFSET_VMRSRCOWNER: { + /* need to to be superuser */ + if (suser(kauth_cred_get(), (u_short *)0) != 0) { + error = EPERM; + goto out; } - break; - case PROC_SELFSET_DELAYIDLESLEEP: { - /* mark or clear the process property to delay idle sleep disk IO */ - if (pcontrol != 0) - OSBitOrAtomic(P_DELAYIDLESLEEP, &pself->p_flag); - else - OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &pself->p_flag); + proc_lock(pself); + /* reset existing control setting while retaining action state */ + pself->p_lflag |= P_LVMRSRCOWNER; + proc_unlock(pself); + } + break; + + case PROC_SELFSET_DELAYIDLESLEEP: { + /* mark or clear the process property to delay idle sleep disk IO */ + if (pcontrol != 0) { + OSBitOrAtomic(P_DELAYIDLESLEEP, &pself->p_flag); + } else { + OSBitAndAtomic(~((uint32_t)P_DELAYIDLESLEEP), &pself->p_flag); } - break; + } + break; - default: - error = ENOTSUP; + default: + error = ENOTSUP; } - + out: - return(error); + return error; } #if CONFIG_MEMORYSTATUS int -proc_dirtycontrol(int pid, int flavor, uint64_t arg, int32_t *retval) { +proc_dirtycontrol(int pid, int flavor, uint64_t arg, int32_t *retval) +{ struct proc *target_p; int error = 0; uint32_t pcontrol = (uint32_t)arg; @@ -2856,17 +2995,18 @@ proc_dirtycontrol(int pid, int flavor, uint64_t arg, int32_t *retval) { zombref = 1; } - if (target_p == PROC_NULL) - return(ESRCH); - + if (target_p == PROC_NULL) { + return ESRCH; + } } my_cred = kauth_cred_get(); target_cred = kauth_cred_proc_ref(target_p); /* Do we have permission to look into this? */ - if ((error = proc_security_policy(target_p, PROC_INFO_CALL_DIRTYCONTROL, flavor, NO_CHECK_SAME_USER))) + if ((error = proc_security_policy(target_p, PROC_INFO_CALL_DIRTYCONTROL, flavor, NO_CHECK_SAME_USER))) { goto out; + } selfpid = proc_selfpid(); if (pid == selfpid) { @@ -2874,67 +3014,69 @@ proc_dirtycontrol(int pid, int flavor, uint64_t arg, int32_t *retval) { } else if (target_p->p_ppid == selfpid) { child = TRUE; } - - switch (flavor) { - case PROC_DIRTYCONTROL_TRACK: { - /* Only allow the process itself, its parent, or root */ - if ((self == FALSE) && (child == FALSE) && kauth_cred_issuser(kauth_cred_get()) != TRUE) { - error = EPERM; - goto out; - } - error = memorystatus_dirty_track(target_p, pcontrol); + switch (flavor) { + case PROC_DIRTYCONTROL_TRACK: { + /* Only allow the process itself, its parent, or root */ + if ((self == FALSE) && (child == FALSE) && kauth_cred_issuser(kauth_cred_get()) != TRUE) { + error = EPERM; + goto out; } - break; - case PROC_DIRTYCONTROL_SET: { - /* Check privileges; use cansignal() here since the process could be terminated */ - if (!cansignal(current_proc(), my_cred, target_p, SIGKILL)) { - error = EPERM; - goto out; - } - - error = memorystatus_dirty_set(target_p, self, pcontrol); + error = memorystatus_dirty_track(target_p, pcontrol); + } + break; + + case PROC_DIRTYCONTROL_SET: { + /* Check privileges; use cansignal() here since the process could be terminated */ + if (!cansignal(current_proc(), my_cred, target_p, SIGKILL)) { + error = EPERM; + goto out; } - break; - - case PROC_DIRTYCONTROL_GET: { - /* No permissions check - dirty state is freely available */ - if (retval) { - *retval = memorystatus_dirty_get(target_p); - } else { - error = EINVAL; - } + + error = memorystatus_dirty_set(target_p, self, pcontrol); + } + break; + + case PROC_DIRTYCONTROL_GET: { + /* No permissions check - dirty state is freely available */ + if (retval) { + *retval = memorystatus_dirty_get(target_p); + } else { + error = EINVAL; } - break; - - case PROC_DIRTYCONTROL_CLEAR: { - /* Check privileges; use cansignal() here since the process could be terminated */ - if (!cansignal(current_proc(), my_cred, target_p, SIGKILL)) { - error = EPERM; - goto out; - } - - error = memorystatus_dirty_clear(target_p, pcontrol); + } + break; + + case PROC_DIRTYCONTROL_CLEAR: { + /* Check privileges; use cansignal() here since the process could be terminated */ + if (!cansignal(current_proc(), my_cred, target_p, SIGKILL)) { + error = EPERM; + goto out; } - break; + + error = memorystatus_dirty_clear(target_p, pcontrol); + } + break; } out: - if (zombref) + if (zombref) { proc_drop_zombref(target_p); - else + } else { proc_rele(target_p); + } kauth_cred_unref(&target_cred); - - return(error); + + return error; } #else int -proc_dirtycontrol(__unused int pid, __unused int flavor, __unused uint64_t arg, __unused int32_t *retval) { - return ENOTSUP; +proc_dirtycontrol(__unused int pid, __unused int flavor, __unused uint64_t arg, __unused int32_t *retval) +{ + return ENOTSUP; } #endif /* CONFIG_MEMORYSTATUS */ @@ -2960,11 +3102,11 @@ proc_terminate(int pid, int32_t *retval) #endif if (pid <= 0 || retval == NULL) { - return (EINVAL); + return EINVAL; } if ((p = proc_find(pid)) == NULL) { - return (ESRCH); + return ESRCH; } #if 0 @@ -2992,14 +3134,14 @@ proc_terminate(int pid, int32_t *retval) #endif proc_set_task_policy(p->task, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); + TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); psignal(p, sig); *retval = sig; out: proc_rele(p); - + return error; } @@ -3009,7 +3151,7 @@ out: static void munge_vinfo_stat(struct stat64 *sbp, struct vinfo_stat *vsbp) { - bzero(vsbp, sizeof(struct vinfo_stat)); + bzero(vsbp, sizeof(struct vinfo_stat)); vsbp->vst_dev = sbp->st_dev; vsbp->vst_mode = sbp->st_mode; @@ -3044,27 +3186,29 @@ proc_pid_rusage(int pid, int flavor, user_addr_t buffer, __unused int32_t *retva if ((p = proc_find(pid)) == PROC_NULL) { if ((p = proc_find_zombref(pid)) == PROC_NULL) { - return (ESRCH); + return ESRCH; } zombie = 1; } /* Do we have permission to look into this? */ - if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDRUSAGE, flavor, CHECK_SAME_USER))) + if ((error = proc_security_policy(p, PROC_INFO_CALL_PIDRUSAGE, flavor, CHECK_SAME_USER))) { goto out; + } error = proc_get_rusage(p, flavor, buffer, zombie); out: - if (zombie) + if (zombie) { proc_drop_zombref(p); - else + } else { proc_rele(p); + } - return (error); + return error; } -void +void proc_archinfo(proc_t p, struct proc_archinfo *pai) { proc_lock(p); @@ -3100,13 +3244,13 @@ proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_e * child or the parent debugger only. */ do { - if (p->p_ppid == selfpid) + if (p->p_ppid == selfpid) { break; /* parent => ok */ - + } if ((p->p_lflag & P_LTRACED) != 0 && - (p->p_oppid == selfpid)) + (p->p_oppid == selfpid)) { break; /* parent-in-waiting => ok */ - + } proc_unlock(p); return EACCES; } while (0); @@ -3146,9 +3290,9 @@ proc_pidexitreasoninfo(proc_t p, struct proc_exitreasoninfo *peri, struct proc_e return error; } -/* +/* * Wrapper to provide NOTE_EXIT_DETAIL and NOTE_EXITSTATUS - * It mimics the data that is typically captured by the + * It mimics the data that is typically captured by the * EVFILT_PROC, NOTE_EXIT event mechanism. * See filt_proc() in kern_event.c. */ @@ -3167,17 +3311,17 @@ proc_pidnoteexit(proc_t p, uint64_t flags, uint32_t *data) do { pid_t selfpid = proc_selfpid(); - if (p->p_ppid == selfpid) + if (p->p_ppid == selfpid) { break; /* parent => ok */ - + } if ((p->p_lflag & P_LTRACED) != 0 && - (p->p_oppid == selfpid)) + (p->p_oppid == selfpid)) { break; /* parent-in-waiting => ok */ - + } proc_unlock(p); - return (EACCES); + return EACCES; } while (0); - + if ((exit_flags & NOTE_EXITSTATUS) != 0) { /* The signal and exit status */ exit_data |= (p->p_xstat & NOTE_PDATAMASK); @@ -3226,12 +3370,12 @@ proc_pidnoteexit(proc_t p, uint64_t flags, uint32_t *data) *data = exit_data; - return (0); + return 0; } int proc_piddynkqueueinfo(int pid, int flavor, kqueue_id_t kq_id, - user_addr_t ubuf, uint32_t bufsize, int32_t *retval) + user_addr_t ubuf, uint32_t bufsize, int32_t *retval) { proc_t p; int err; @@ -3288,17 +3432,17 @@ proc_udata_info(int pid, int flavor, user_addr_t buffer, uint32_t bufsize, int32 goto out; } - if (bufsize != sizeof (p->p_user_data)) { + if (bufsize != sizeof(p->p_user_data)) { err = EINVAL; goto out; } switch (flavor) { case PROC_UDATA_INFO_SET: - err = copyin(buffer, &p->p_user_data, sizeof (p->p_user_data)); + err = copyin(buffer, &p->p_user_data, sizeof(p->p_user_data)); break; case PROC_UDATA_INFO_GET: - err = copyout(&p->p_user_data, buffer, sizeof (p->p_user_data)); + err = copyout(&p->p_user_data, buffer, sizeof(p->p_user_data)); break; default: err = ENOTSUP; diff --git a/bsd/kern/proc_uuid_policy.c b/bsd/kern/proc_uuid_policy.c index b9e96efea..04d1aeda4 100644 --- a/bsd/kern/proc_uuid_policy.c +++ b/bsd/kern/proc_uuid_policy.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -62,7 +62,7 @@ u_long proc_uuid_policy_hash_mask; /* Assume first byte of UUIDs are evenly distributed */ #define UUIDHASH(uuid) (&proc_uuid_policy_hashtbl[uuid[0] & proc_uuid_policy_hash_mask]) -static LIST_HEAD(proc_uuid_policy_hashhead, proc_uuid_policy_entry) *proc_uuid_policy_hashtbl; +static LIST_HEAD(proc_uuid_policy_hashhead, proc_uuid_policy_entry) * proc_uuid_policy_hashtbl; /* * On modification, invalidate cached lookups by bumping the generation count. @@ -70,10 +70,10 @@ static LIST_HEAD(proc_uuid_policy_hashhead, proc_uuid_policy_entry) *proc_uuid_p * the subsystem lock. */ static volatile int32_t proc_uuid_policy_table_gencount; -#define BUMP_PROC_UUID_POLICY_GENERATION_COUNT() do { \ - if (OSIncrementAtomic(&proc_uuid_policy_table_gencount) == (INT32_MAX - 1)) { \ - proc_uuid_policy_table_gencount = 1; \ - } \ +#define BUMP_PROC_UUID_POLICY_GENERATION_COUNT() do { \ + if (OSIncrementAtomic(&proc_uuid_policy_table_gencount) == (INT32_MAX - 1)) { \ + proc_uuid_policy_table_gencount = 1; \ + } \ } while (0) #define MAX_PROC_UUID_POLICY_COUNT 10240 @@ -81,8 +81,8 @@ static volatile int32_t proc_uuid_policy_count; struct proc_uuid_policy_entry { LIST_ENTRY(proc_uuid_policy_entry) entries; - uuid_t uuid; /* Mach-O executable UUID */ - uint32_t flags; /* policy flag for that UUID */ + uuid_t uuid; /* Mach-O executable UUID */ + uint32_t flags; /* policy flag for that UUID */ }; static int @@ -124,10 +124,11 @@ proc_uuid_policy_insert(uuid_t uuid, uint32_t flags) uuid_unparse(uuid, uuidstr); #endif - if (uuid_is_null(uuid)) + if (uuid_is_null(uuid)) { return EINVAL; + } - MALLOC(entry, struct proc_uuid_policy_entry *, sizeof(*entry), M_PROC_UUID_POLICY, M_WAITOK|M_ZERO); + MALLOC(entry, struct proc_uuid_policy_entry *, sizeof(*entry), M_PROC_UUID_POLICY, M_WAITOK | M_ZERO); memcpy(entry->uuid, uuid, sizeof(uuid_t)); entry->flags = flags; @@ -173,7 +174,7 @@ proc_uuid_policy_remove_locked(uuid_t uuid, uint32_t flags, int *should_delete) if (should_delete) { *should_delete = 0; } - + foundentry = proc_uuid_policy_lookup_locked(uuid); if (foundentry) { if (foundentry->flags == flags) { @@ -186,7 +187,7 @@ proc_uuid_policy_remove_locked(uuid_t uuid, uint32_t flags, int *should_delete) foundentry->flags &= ~flags; } } - + return foundentry; } @@ -202,8 +203,9 @@ proc_uuid_policy_remove(uuid_t uuid, uint32_t flags) uuid_unparse(uuid, uuidstr); #endif - if (uuid_is_null(uuid)) + if (uuid_is_null(uuid)) { return EINVAL; + } PROC_UUID_POLICY_SUBSYS_LOCK(); @@ -236,14 +238,14 @@ static struct proc_uuid_policy_entry * proc_uuid_policy_lookup_locked(uuid_t uuid) { struct proc_uuid_policy_entry *tmpentry, *searchentry, *foundentry = NULL; - + LIST_FOREACH_SAFE(searchentry, UUIDHASH(uuid), entries, tmpentry) { if (0 == memcmp(searchentry->uuid, uuid, sizeof(uuid_t))) { foundentry = searchentry; break; } } - + return foundentry; } @@ -258,8 +260,9 @@ proc_uuid_policy_lookup(uuid_t uuid, uint32_t *flags, int32_t *gencount) uuid_unparse(uuid, uuidstr); #endif - if (uuid_is_null(uuid) || !flags || !gencount) + if (uuid_is_null(uuid) || !flags || !gencount) { return EINVAL; + } if (*gencount == proc_uuid_policy_table_gencount) { /* @@ -297,7 +300,7 @@ proc_uuid_policy_clear(uint32_t flags) struct proc_uuid_policy_entry *tmpentry, *searchentry; struct proc_uuid_policy_hashhead deletehead = LIST_HEAD_INITIALIZER(deletehead); unsigned long hashslot; - + /* If clear call includes no flags, infer 'No Cellular' flag */ if (flags == PROC_UUID_POLICY_FLAGS_NONE) { flags = PROC_UUID_NO_CELLULAR; @@ -306,10 +309,9 @@ proc_uuid_policy_clear(uint32_t flags) PROC_UUID_POLICY_SUBSYS_LOCK(); if (proc_uuid_policy_count > 0) { - - for (hashslot=0; hashslot <= proc_uuid_policy_hash_mask; hashslot++) { + for (hashslot = 0; hashslot <= proc_uuid_policy_hash_mask; hashslot++) { struct proc_uuid_policy_hashhead *headp = &proc_uuid_policy_hashtbl[hashslot]; - + LIST_FOREACH_SAFE(searchentry, headp, entries, tmpentry) { if ((searchentry->flags & flags) == searchentry->flags) { /* We are clearing all flags for this entry, move entry to our delete list */ @@ -334,36 +336,38 @@ proc_uuid_policy_clear(uint32_t flags) } dprintf("Clearing proc uuid policy table\n"); - + return 0; } -int proc_uuid_policy_kernel(uint32_t operation, uuid_t uuid, uint32_t flags) +int +proc_uuid_policy_kernel(uint32_t operation, uuid_t uuid, uint32_t flags) { int error = 0; - + switch (operation) { - case PROC_UUID_POLICY_OPERATION_CLEAR: - error = proc_uuid_policy_clear(flags); - break; - - case PROC_UUID_POLICY_OPERATION_ADD: - error = proc_uuid_policy_insert(uuid, flags); - break; - - case PROC_UUID_POLICY_OPERATION_REMOVE: - error = proc_uuid_policy_remove(uuid, flags); - break; - - default: - error = EINVAL; - break; + case PROC_UUID_POLICY_OPERATION_CLEAR: + error = proc_uuid_policy_clear(flags); + break; + + case PROC_UUID_POLICY_OPERATION_ADD: + error = proc_uuid_policy_insert(uuid, flags); + break; + + case PROC_UUID_POLICY_OPERATION_REMOVE: + error = proc_uuid_policy_remove(uuid, flags); + break; + + default: + error = EINVAL; + break; } - + return error; } -int proc_uuid_policy(struct proc *p __unused, struct proc_uuid_policy_args *uap, int32_t *retval __unused) +int +proc_uuid_policy(struct proc *p __unused, struct proc_uuid_policy_args *uap, int32_t *retval __unused) { int error = 0; uuid_t uuid; @@ -373,19 +377,21 @@ int proc_uuid_policy(struct proc *p __unused, struct proc_uuid_policy_args *uap, error = priv_check_cred(kauth_cred_get(), PRIV_PROC_UUID_POLICY, 0); if (error) { dprintf("%s failed privilege check for proc_uuid_policy: %d\n", p->p_comm, error); - return (error); + return error; } else { dprintf("%s succeeded privilege check for proc_uuid_policy\n", p->p_comm); } - + if (uap->uuid) { - if (uap->uuidlen != sizeof(uuid_t)) + if (uap->uuidlen != sizeof(uuid_t)) { return ERANGE; - + } + error = copyin(uap->uuid, uuid, sizeof(uuid_t)); - if (error) + if (error) { return error; + } } - + return proc_uuid_policy_kernel(uap->operation, uuid, uap->flags); } diff --git a/bsd/kern/process_policy.c b/bsd/kern/process_policy.c index 8c2b5b230..f8f8f1f8b 100644 --- a/bsd/kern/process_policy.c +++ b/bsd/kern/process_policy.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -95,10 +95,10 @@ static int handle_applifecycle(int scope, int action, int policy, int policy_sub /***************************** process_policy ********************/ /* - *int process_policy(int scope, int action, int policy, int policy_subtype, - * proc_policy_attribute_t * attrp, pid_t target_pid, + * int process_policy(int scope, int action, int policy, int policy_subtype, + * proc_policy_attribute_t * attrp, pid_t target_pid, * uint64_t target_threadid) - *{ int process_policy(int scope, int action, int policy, int policy_subtype, + *{ int process_policy(int scope, int action, int policy, int policy_subtype, * user_addr_t attrp, pid_t target_pid, uint64_t target_threadid); } */ @@ -124,16 +124,18 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus #endif if ((scope != PROC_POLICY_SCOPE_PROCESS) && (scope != PROC_POLICY_SCOPE_THREAD)) { - return(EINVAL); + return EINVAL; } - if (target_pid == 0 || target_pid == proc_selfpid()) + if (target_pid == 0 || target_pid == proc_selfpid()) { target_proc = proc_self(); - else + } else { target_proc = proc_find(target_pid); + } - if (target_proc == PROC_NULL) - return(ESRCH); + if (target_proc == PROC_NULL) { + return ESRCH; + } my_cred = kauth_cred_get(); @@ -144,13 +146,13 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) && kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) #else - /* + /* * Resoure starvation control can be used by unpriv resource owner but priv at the time of ownership claim. This is * checked in low resource handle routine. So bypass the checks here. */ - if ((policy != PROC_POLICY_RESOURCE_STARVATION) && - (policy != PROC_POLICY_APPTYPE) && - (!kauth_cred_issuser(my_cred) && curp != p)) + if ((policy != PROC_POLICY_RESOURCE_STARVATION) && + (policy != PROC_POLICY_APPTYPE) && + (!kauth_cred_issuser(my_cred) && curp != target_proc)) #endif { error = EPERM; @@ -159,72 +161,74 @@ process_policy(__unused struct proc *p, struct process_policy_args * uap, __unus #if CONFIG_MACF switch (policy) { - case PROC_POLICY_BOOST: - case PROC_POLICY_RESOURCE_USAGE: + case PROC_POLICY_BOOST: + case PROC_POLICY_RESOURCE_USAGE: #if CONFIG_EMBEDDED - case PROC_POLICY_APPTYPE: - case PROC_POLICY_APP_LIFECYCLE: + case PROC_POLICY_APPTYPE: + case PROC_POLICY_APP_LIFECYCLE: #endif - /* These policies do their own appropriate mac checks */ - break; - default: - error = mac_proc_check_sched(curp, target_proc); - if (error) goto out; - break; + /* These policies do their own appropriate mac checks */ + break; + default: + error = mac_proc_check_sched(curp, target_proc); + if (error) { + goto out; + } + break; } #endif /* CONFIG_MACF */ - switch(policy) { - case PROC_POLICY_BACKGROUND: - error = ENOTSUP; - break; - case PROC_POLICY_HARDWARE_ACCESS: + switch (policy) { + case PROC_POLICY_BACKGROUND: + error = ENOTSUP; + break; + case PROC_POLICY_HARDWARE_ACCESS: + error = ENOTSUP; + break; + case PROC_POLICY_RESOURCE_STARVATION: + error = handle_lowresource(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); + break; + case PROC_POLICY_RESOURCE_USAGE: + switch (policy_subtype) { + case PROC_POLICY_RUSAGE_NONE: + case PROC_POLICY_RUSAGE_WIREDMEM: + case PROC_POLICY_RUSAGE_VIRTMEM: + case PROC_POLICY_RUSAGE_DISK: + case PROC_POLICY_RUSAGE_NETWORK: + case PROC_POLICY_RUSAGE_POWER: error = ENOTSUP; - break; - case PROC_POLICY_RESOURCE_STARVATION: - error = handle_lowresource(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); - break; - case PROC_POLICY_RESOURCE_USAGE: - switch(policy_subtype) { - case PROC_POLICY_RUSAGE_NONE: - case PROC_POLICY_RUSAGE_WIREDMEM: - case PROC_POLICY_RUSAGE_VIRTMEM: - case PROC_POLICY_RUSAGE_DISK: - case PROC_POLICY_RUSAGE_NETWORK: - case PROC_POLICY_RUSAGE_POWER: - error = ENOTSUP; - goto out; - default: - error = EINVAL; - goto out; - case PROC_POLICY_RUSAGE_CPU: - break; - } - - error = handle_cpuuse(action, attrp, target_proc, target_threadid); - break; -#if CONFIG_EMBEDDED - case PROC_POLICY_APP_LIFECYCLE: - error = handle_applifecycle(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); - break; -#endif /* CONFIG_EMBEDDED */ - case PROC_POLICY_APPTYPE: - error = handle_apptype(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); - break; - case PROC_POLICY_BOOST: - error = handle_boost(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); - break; + goto out; default: error = EINVAL; + goto out; + case PROC_POLICY_RUSAGE_CPU: break; + } + + error = handle_cpuuse(action, attrp, target_proc, target_threadid); + break; +#if CONFIG_EMBEDDED + case PROC_POLICY_APP_LIFECYCLE: + error = handle_applifecycle(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); + break; +#endif /* CONFIG_EMBEDDED */ + case PROC_POLICY_APPTYPE: + error = handle_apptype(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); + break; + case PROC_POLICY_BOOST: + error = handle_boost(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid); + break; + default: + error = EINVAL; + break; } out: proc_rele(target_proc); #if CONFIG_EMBEDDED - kauth_cred_unref(&target_cred); + kauth_cred_unref(&target_cred); #endif - return(error); + return error; } static int @@ -232,41 +236,42 @@ handle_lowresource(__unused int scope, int action, __unused int policy, int poli { int error = 0; - switch(policy_subtype) { - case PROC_POLICY_RS_NONE: - case PROC_POLICY_RS_VIRTUALMEM: - break; - default: - return(EINVAL); + switch (policy_subtype) { + case PROC_POLICY_RS_NONE: + case PROC_POLICY_RS_VIRTUALMEM: + break; + default: + return EINVAL; } - - if (action == PROC_POLICY_ACTION_RESTORE) + + if (action == PROC_POLICY_ACTION_RESTORE) { error = proc_resetpcontrol(proc_pid(proc)); - else + } else { error = EINVAL; + } - return(error); + return error; } -static int +static int handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t target_threadid) { - proc_policy_cpuusage_attr_t cpuattr = { }; + proc_policy_cpuusage_attr_t cpuattr = { }; #if CONFIG_MACF || !CONFIG_EMBEDDED - proc_t curp = current_proc(); + proc_t curp = current_proc(); #endif - Boolean privileged = FALSE; - Boolean canEnable = FALSE; - uint64_t interval = -1ULL; - int error = 0; - uint8_t percentage; + Boolean privileged = FALSE; + Boolean canEnable = FALSE; + uint64_t interval = -1ULL; + int error = 0; + uint8_t percentage; #if !CONFIG_EMBEDDED /* On macOS, tasks can only set and clear their own CPU limits. */ if ((action == PROC_POLICY_ACTION_APPLY || action == PROC_POLICY_ACTION_RESTORE) - && curp != proc) { - return (EPERM); + && curp != proc) { + return EPERM; } /* No privilege required on macOS. */ privileged = TRUE; @@ -285,292 +290,310 @@ handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t targ * the target process? */ error = mac_proc_check_sched(curp, proc); - if (error) return error; + if (error) { + return error; + } } #endif switch (action) { - case PROC_POLICY_ACTION_GET: - error = proc_get_task_ruse_cpu(proc->task, &cpuattr.ppattr_cpu_attr, - &percentage, - &cpuattr.ppattr_cpu_attr_interval, - &cpuattr.ppattr_cpu_attr_deadline); - if (error == 0) { - cpuattr.ppattr_cpu_percentage = percentage; - cpuattr.ppattr_cpu_attr_interval /= NSEC_PER_SEC; - error = copyout((proc_policy_cpuusage_attr_t *)&cpuattr, (user_addr_t)attrp, sizeof(proc_policy_cpuusage_attr_t)); - } - break; - - case PROC_POLICY_ACTION_APPLY: - case PROC_POLICY_ACTION_SET: - error = copyin((user_addr_t)attrp, (proc_policy_cpuusage_attr_t *)&cpuattr, sizeof(proc_policy_cpuusage_attr_t)); - if (error != 0) { - return (error); - } - - /* - * The process_policy API uses seconds as the units for the interval, - * but the mach task policy SPI uses nanoseconds. Do the conversion, - * but preserve -1 as it has special meaning. - */ - if (cpuattr.ppattr_cpu_attr_interval != -1ULL) { - interval = cpuattr.ppattr_cpu_attr_interval * NSEC_PER_SEC; - } else { - interval = -1ULL; - } - - error = proc_set_task_ruse_cpu(proc->task, cpuattr.ppattr_cpu_attr, - cpuattr.ppattr_cpu_percentage, - interval, - cpuattr.ppattr_cpu_attr_deadline, - privileged); - break; - - /* restore process to prior state */ - case PROC_POLICY_ACTION_RESTORE: - error = proc_clear_task_ruse_cpu(proc->task, privileged); - break; - - /* re-enable suspended monitor */ - case PROC_POLICY_ACTION_ENABLE: - error = task_resume_cpumon(proc->task); - break; - - case PROC_POLICY_ACTION_REMOVE: - - default: - error = EINVAL; - break; + case PROC_POLICY_ACTION_GET: + error = proc_get_task_ruse_cpu(proc->task, &cpuattr.ppattr_cpu_attr, + &percentage, + &cpuattr.ppattr_cpu_attr_interval, + &cpuattr.ppattr_cpu_attr_deadline); + if (error == 0) { + cpuattr.ppattr_cpu_percentage = percentage; + cpuattr.ppattr_cpu_attr_interval /= NSEC_PER_SEC; + error = copyout((proc_policy_cpuusage_attr_t *)&cpuattr, (user_addr_t)attrp, sizeof(proc_policy_cpuusage_attr_t)); + } + break; + + case PROC_POLICY_ACTION_APPLY: + case PROC_POLICY_ACTION_SET: + error = copyin((user_addr_t)attrp, (proc_policy_cpuusage_attr_t *)&cpuattr, sizeof(proc_policy_cpuusage_attr_t)); + if (error != 0) { + return error; + } + /* + * The process_policy API uses seconds as the units for the interval, + * but the mach task policy SPI uses nanoseconds. Do the conversion, + * but preserve -1 as it has special meaning. + */ + if (cpuattr.ppattr_cpu_attr_interval != -1ULL) { + interval = cpuattr.ppattr_cpu_attr_interval * NSEC_PER_SEC; + } else { + interval = -1ULL; + } + + error = proc_set_task_ruse_cpu(proc->task, cpuattr.ppattr_cpu_attr, + cpuattr.ppattr_cpu_percentage, + interval, + cpuattr.ppattr_cpu_attr_deadline, + privileged); + break; + + /* restore process to prior state */ + case PROC_POLICY_ACTION_RESTORE: + error = proc_clear_task_ruse_cpu(proc->task, privileged); + break; + + /* re-enable suspended monitor */ + case PROC_POLICY_ACTION_ENABLE: + error = task_resume_cpumon(proc->task); + break; + + case PROC_POLICY_ACTION_REMOVE: + + default: + error = EINVAL; + break; } - - return(error); + + return error; } #if CONFIG_EMBEDDED -static int +static int handle_applifecycle(__unused int scope, - int action, - __unused int policy, - int policy_subtype, - user_addr_t attrp, - proc_t proc, - uint64_t target_threadid) + int action, + __unused int policy, + int policy_subtype, + user_addr_t attrp, + proc_t proc, + uint64_t target_threadid) { int error = 0; int state = 0; - switch(policy_subtype) { - case PROC_POLICY_APPLIFE_NONE: - error = 0; - break; + switch (policy_subtype) { + case PROC_POLICY_APPLIFE_NONE: + error = 0; + break; - case PROC_POLICY_APPLIFE_STATE: - /* appstate is no longer supported */ - error = ENOTSUP; - break; + case PROC_POLICY_APPLIFE_STATE: + /* appstate is no longer supported */ + error = ENOTSUP; + break; - case PROC_POLICY_APPLIFE_DEVSTATUS: + case PROC_POLICY_APPLIFE_DEVSTATUS: #if CONFIG_MACF - /* ToDo - this should be a generic check, since we could potentially hang other behaviours here. */ - error = mac_proc_check_suspend_resume(current_proc(), MAC_PROC_CHECK_HIBERNATE); - if (error) { - error = EPERM; - goto out; - } + /* ToDo - this should be a generic check, since we could potentially hang other behaviours here. */ + error = mac_proc_check_suspend_resume(current_proc(), MAC_PROC_CHECK_HIBERNATE); + if (error) { + error = EPERM; + goto out; + } #endif #if CONFIG_MEMORYSTATUS - if (action == PROC_POLICY_ACTION_APPLY) { - /* Used as a freeze hint */ - memorystatus_on_inactivity(proc); - - /* in future use devicestatus for pid_socketshutdown() */ - error = 0; - } else + if (action == PROC_POLICY_ACTION_APPLY) { + /* Used as a freeze hint */ + memorystatus_on_inactivity(proc); + + /* in future use devicestatus for pid_socketshutdown() */ + error = 0; + } else #endif - { - error = EINVAL; - } - break; + { + error = EINVAL; + } + break; - case PROC_POLICY_APPLIFE_PIDBIND: + case PROC_POLICY_APPLIFE_PIDBIND: #if CONFIG_MACF - error = mac_proc_check_suspend_resume(current_proc(), MAC_PROC_CHECK_PIDBIND); - if (error) { - error = EPERM; - goto out; - } + error = mac_proc_check_suspend_resume(current_proc(), MAC_PROC_CHECK_PIDBIND); + if (error) { + error = EPERM; + goto out; + } #endif - error = copyin((user_addr_t)attrp, (int *)&state, sizeof(int)); - if (error != 0) - goto out; - if (action == PROC_POLICY_ACTION_APPLY) { - /* bind the thread in target_thread in current process to target_proc */ - error = proc_lf_pidbind(current_task(), target_threadid, proc->task, state); - } else - error = EINVAL; - break; - default: + error = copyin((user_addr_t)attrp, (int *)&state, sizeof(int)); + if (error != 0) { + goto out; + } + if (action == PROC_POLICY_ACTION_APPLY) { + /* bind the thread in target_thread in current process to target_proc */ + error = proc_lf_pidbind(current_task(), target_threadid, proc->task, state); + } else { error = EINVAL; - break; + } + break; + default: + error = EINVAL; + break; } out: - return(error); + return error; } #endif /* CONFIG_EMBEDDED */ static int handle_apptype( int scope, - int action, - __unused int policy, - int policy_subtype, - __unused user_addr_t attrp, - proc_t target_proc, - __unused uint64_t target_threadid) + int action, + __unused int policy, + int policy_subtype, + __unused user_addr_t attrp, + proc_t target_proc, + __unused uint64_t target_threadid) { int error = 0; - if (scope != PROC_POLICY_SCOPE_PROCESS) - return (EINVAL); + if (scope != PROC_POLICY_SCOPE_PROCESS) { + return EINVAL; + } /* Temporary compatibility with old importance donation interface until libproc is moved to new boost calls */ switch (policy_subtype) { - case PROC_POLICY_IOS_DONATEIMP: - if (action != PROC_POLICY_ACTION_ENABLE) - return (EINVAL); - if (target_proc != current_proc()) - return (EINVAL); - - /* PROCESS ENABLE APPTYPE DONATEIMP */ - task_importance_mark_donor(target_proc->task, TRUE); + case PROC_POLICY_IOS_DONATEIMP: + if (action != PROC_POLICY_ACTION_ENABLE) { + return EINVAL; + } + if (target_proc != current_proc()) { + return EINVAL; + } - return(0); + /* PROCESS ENABLE APPTYPE DONATEIMP */ + task_importance_mark_donor(target_proc->task, TRUE); - case PROC_POLICY_IOS_HOLDIMP: - if (action != PROC_POLICY_ACTION_ENABLE) - return (EINVAL); - if (target_proc != current_proc()) - return (EINVAL); + return 0; - /* PROCESS ENABLE APPTYPE HOLDIMP */ - error = task_importance_hold_legacy_external_assertion(current_task(), 1); + case PROC_POLICY_IOS_HOLDIMP: + if (action != PROC_POLICY_ACTION_ENABLE) { + return EINVAL; + } + if (target_proc != current_proc()) { + return EINVAL; + } - return(error); + /* PROCESS ENABLE APPTYPE HOLDIMP */ + error = task_importance_hold_legacy_external_assertion(current_task(), 1); - case PROC_POLICY_IOS_DROPIMP: - if (action != PROC_POLICY_ACTION_ENABLE) - return (EINVAL); - if (target_proc != current_proc()) - return (EINVAL); + return error; - /* PROCESS ENABLE APPTYPE DROPIMP */ - error = task_importance_drop_legacy_external_assertion(current_task(), 1); + case PROC_POLICY_IOS_DROPIMP: + if (action != PROC_POLICY_ACTION_ENABLE) { + return EINVAL; + } + if (target_proc != current_proc()) { + return EINVAL; + } - return(error); + /* PROCESS ENABLE APPTYPE DROPIMP */ + error = task_importance_drop_legacy_external_assertion(current_task(), 1); - default: - /* continue to TAL handling */ - break; + return error; + + default: + /* continue to TAL handling */ + break; } - if (policy_subtype != PROC_POLICY_OSX_APPTYPE_TAL) - return (EINVAL); + if (policy_subtype != PROC_POLICY_OSX_APPTYPE_TAL) { + return EINVAL; + } /* need to be super user to do this */ - if (kauth_cred_issuser(kauth_cred_get()) == 0) - return (EPERM); + if (kauth_cred_issuser(kauth_cred_get()) == 0) { + return EPERM; + } - if (proc_task_is_tal(target_proc->task) == FALSE) - return (EINVAL); + if (proc_task_is_tal(target_proc->task) == FALSE) { + return EINVAL; + } switch (action) { - case PROC_POLICY_ACTION_ENABLE: - /* PROCESS ENABLE APPTYPE TAL */ - proc_set_task_policy(target_proc->task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_TAL, - TASK_POLICY_ENABLE); - break; - case PROC_POLICY_ACTION_DISABLE: - /* PROCESS DISABLE APPTYPE TAL */ - proc_set_task_policy(target_proc->task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_TAL, - TASK_POLICY_DISABLE); - break; - default: - return (EINVAL); + case PROC_POLICY_ACTION_ENABLE: + /* PROCESS ENABLE APPTYPE TAL */ + proc_set_task_policy(target_proc->task, + TASK_POLICY_ATTRIBUTE, TASK_POLICY_TAL, + TASK_POLICY_ENABLE); + break; + case PROC_POLICY_ACTION_DISABLE: + /* PROCESS DISABLE APPTYPE TAL */ + proc_set_task_policy(target_proc->task, + TASK_POLICY_ATTRIBUTE, TASK_POLICY_TAL, + TASK_POLICY_DISABLE); + break; + default: + return EINVAL; } - return(0); + return 0; } static int handle_boost(int scope, - int action, + int action, __unused int policy, - int policy_subtype, + int policy_subtype, __unused user_addr_t attrp, - proc_t target_proc, + proc_t target_proc, __unused uint64_t target_threadid) { int error = 0; assert(policy == PROC_POLICY_BOOST); - if (scope != PROC_POLICY_SCOPE_PROCESS) - return (EINVAL); - - if (target_proc != current_proc()) - return (EINVAL); - - switch(policy_subtype) { - case PROC_POLICY_IMP_IMPORTANT: - if (task_is_importance_receiver_type(target_proc->task) == FALSE) - return (EINVAL); - - switch (action) { - case PROC_POLICY_ACTION_HOLD: - /* PROCESS HOLD BOOST IMPORTANT */ - error = task_importance_hold_legacy_external_assertion(current_task(), 1); - break; - case PROC_POLICY_ACTION_DROP: - /* PROCESS DROP BOOST IMPORTANT */ - error = task_importance_drop_legacy_external_assertion(current_task(), 1); - break; - default: - error = (EINVAL); - break; - } + if (scope != PROC_POLICY_SCOPE_PROCESS) { + return EINVAL; + } + + if (target_proc != current_proc()) { + return EINVAL; + } + + switch (policy_subtype) { + case PROC_POLICY_IMP_IMPORTANT: + if (task_is_importance_receiver_type(target_proc->task) == FALSE) { + return EINVAL; + } + + switch (action) { + case PROC_POLICY_ACTION_HOLD: + /* PROCESS HOLD BOOST IMPORTANT */ + error = task_importance_hold_legacy_external_assertion(current_task(), 1); break; + case PROC_POLICY_ACTION_DROP: + /* PROCESS DROP BOOST IMPORTANT */ + error = task_importance_drop_legacy_external_assertion(current_task(), 1); + break; + default: + error = (EINVAL); + break; + } + break; - case PROC_POLICY_IMP_DONATION: + case PROC_POLICY_IMP_DONATION: #if CONFIG_MACF - error = mac_proc_check_sched(current_proc(), target_proc); - if (error) return error; + error = mac_proc_check_sched(current_proc(), target_proc); + if (error) { + return error; + } #endif - switch (action) { - case PROC_POLICY_ACTION_SET: - /* PROCESS SET BOOST DONATION */ - task_importance_mark_donor(target_proc->task, TRUE); - break; - default: - error = (EINVAL); - break; - } + switch (action) { + case PROC_POLICY_ACTION_SET: + /* PROCESS SET BOOST DONATION */ + task_importance_mark_donor(target_proc->task, TRUE); break; - default: error = (EINVAL); break; + } + break; + + default: + error = (EINVAL); + break; } - return(error); + return error; } -/* - * KPI to determine if a pid is currently backgrounded. +/* + * KPI to determine if a pid is currently backgrounded. * Returns ESRCH if pid cannot be found or has started exiting. * Returns EINVAL if state is NULL. * Sets *state to 1 if pid is backgrounded, and 0 otherwise. @@ -580,22 +603,24 @@ proc_pidbackgrounded(pid_t pid, uint32_t* state) { proc_t target_proc = PROC_NULL; - if (state == NULL) - return(EINVAL); + if (state == NULL) { + return EINVAL; + } target_proc = proc_find(pid); - if (target_proc == PROC_NULL) - return(ESRCH); + if (target_proc == PROC_NULL) { + return ESRCH; + } - if ( proc_get_effective_task_policy(target_proc->task, TASK_POLICY_DARWIN_BG) ) { + if (proc_get_effective_task_policy(target_proc->task, TASK_POLICY_DARWIN_BG)) { *state = 1; } else { *state = 0; } proc_rele(target_proc); - return (0); + return 0; } /* @@ -616,7 +641,7 @@ proc_get_originatorbgstate(uint32_t *is_backgrounded) thread_t thread = current_thread(); bgstate = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG); - + /* If current thread or task backgrounded, return background */ if (bgstate) { *is_backgrounded = 1; @@ -636,12 +661,13 @@ proc_get_originatorbgstate(uint32_t *is_backgrounded) */ kr = thread_get_current_voucher_origin_pid(&pid); if (kr != KERN_SUCCESS) { - if (kr == KERN_INVALID_TASK) + if (kr == KERN_INVALID_TASK) { return ESRCH; - else if (kr == KERN_INVALID_VALUE) + } else if (kr == KERN_INVALID_VALUE) { return ENOATTR; - else + } else { return EINVAL; + } } ret = proc_pidbackgrounded(pid, is_backgrounded); @@ -653,29 +679,29 @@ proc_apply_resource_actions(void * bsdinfo, __unused int type, int action) { proc_t p = (proc_t)bsdinfo; - switch(action) { - case PROC_POLICY_RSRCACT_THROTTLE: - /* no need to do anything */ - break; + switch (action) { + case PROC_POLICY_RSRCACT_THROTTLE: + /* no need to do anything */ + break; - case PROC_POLICY_RSRCACT_SUSPEND: - task_suspend(p->task); - break; + case PROC_POLICY_RSRCACT_SUSPEND: + task_suspend(p->task); + break; - case PROC_POLICY_RSRCACT_TERMINATE: - psignal(p, SIGKILL); - break; + case PROC_POLICY_RSRCACT_TERMINATE: + psignal(p, SIGKILL); + break; - case PROC_POLICY_RSRCACT_NOTIFY_KQ: - /* not implemented */ - break; - - case PROC_POLICY_RSRCACT_NOTIFY_EXC: - panic("shouldn't be applying exception notification to process!"); - break; + case PROC_POLICY_RSRCACT_NOTIFY_KQ: + /* not implemented */ + break; + + case PROC_POLICY_RSRCACT_NOTIFY_EXC: + panic("shouldn't be applying exception notification to process!"); + break; } - return(0); + return 0; } int @@ -683,20 +709,18 @@ proc_restore_resource_actions(void * bsdinfo, __unused int type, int action) { proc_t p = (proc_t)bsdinfo; - switch(action) { - case PROC_POLICY_RSRCACT_THROTTLE: - case PROC_POLICY_RSRCACT_TERMINATE: - case PROC_POLICY_RSRCACT_NOTIFY_KQ: - case PROC_POLICY_RSRCACT_NOTIFY_EXC: - /* no need to do anything */ - break; - - case PROC_POLICY_RSRCACT_SUSPEND: - task_resume(p->task); - break; - + switch (action) { + case PROC_POLICY_RSRCACT_THROTTLE: + case PROC_POLICY_RSRCACT_TERMINATE: + case PROC_POLICY_RSRCACT_NOTIFY_KQ: + case PROC_POLICY_RSRCACT_NOTIFY_EXC: + /* no need to do anything */ + break; + + case PROC_POLICY_RSRCACT_SUSPEND: + task_resume(p->task); + break; } - return(0); + return 0; } - diff --git a/bsd/kern/socket_info.c b/bsd/kern/socket_info.c index 4713bf260..44ea0477a 100644 --- a/bsd/kern/socket_info.c +++ b/bsd/kern/socket_info.c @@ -62,8 +62,9 @@ fill_sockbuf_info(struct sockbuf *sb, struct sockbuf_info *sbi) sbi->sbi_flags = sb->sb_flags; sbi->sbi_timeo = (u_int32_t)(sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick; - if (sbi->sbi_timeo == 0 && sb->sb_timeo.tv_usec != 0) + if (sbi->sbi_timeo == 0 && sb->sb_timeo.tv_usec != 0) { sbi->sbi_timeo = 1; + } } static void @@ -77,10 +78,11 @@ fill_common_sockinfo(struct socket *so, struct socket_info *si) si->soi_pcb = (u_int64_t)VM_KERNEL_ADDRPERM(so->so_pcb); if (so->so_proto) { si->soi_protocol = SOCK_PROTO(so); - if (so->so_proto->pr_domain) + if (so->so_proto->pr_domain) { si->soi_family = SOCK_DOM(so); - else + } else { si->soi_family = 0; + } } else { si->soi_protocol = si->soi_family = 0; } @@ -109,8 +111,9 @@ fill_socketinfo(struct socket *so, struct socket_info *si) fill_common_sockinfo(so, si); if (so->so_pcb == NULL || so->so_proto == 0 || - so->so_proto->pr_domain == NULL) + so->so_proto->pr_domain == NULL) { goto out; + } /* * The kind of socket is determined by the triplet @@ -172,22 +175,25 @@ fill_socketinfo(struct socket *so, struct socket_info *si) unsi->unsi_conn_pcb = (uint64_t)VM_KERNEL_ADDRPERM(unp->unp_conn); - if (unp->unp_conn) + if (unp->unp_conn) { unsi->unsi_conn_so = (uint64_t) VM_KERNEL_ADDRPERM(unp->unp_conn->unp_socket); + } if (unp->unp_addr) { - size_t addrlen = unp->unp_addr->sun_len; + size_t addrlen = unp->unp_addr->sun_len; - if (addrlen > SOCK_MAXADDRLEN) + if (addrlen > SOCK_MAXADDRLEN) { addrlen = SOCK_MAXADDRLEN; + } bcopy(unp->unp_addr, &unsi->unsi_addr, addrlen); } if (unp->unp_conn && unp->unp_conn->unp_addr) { - size_t addrlen = unp->unp_conn->unp_addr->sun_len; + size_t addrlen = unp->unp_conn->unp_addr->sun_len; - if (addrlen > SOCK_MAXADDRLEN) + if (addrlen > SOCK_MAXADDRLEN) { addrlen = SOCK_MAXADDRLEN; + } bcopy(unp->unp_conn->unp_addr, &unsi->unsi_caddr, addrlen); } @@ -235,5 +241,5 @@ fill_socketinfo(struct socket *so, struct socket_info *si) out: socket_unlock(so, 0); - return (error); + return error; } diff --git a/bsd/kern/stackshot.c b/bsd/kern/stackshot.c index cb3918bae..3ef4acd7b 100644 --- a/bsd/kern/stackshot.c +++ b/bsd/kern/stackshot.c @@ -42,38 +42,38 @@ static int stackshot_kern_return_to_bsd_error(kern_return_t kr) { switch (kr) { - case KERN_SUCCESS: - return 0; - case KERN_RESOURCE_SHORTAGE: - /* could not allocate memory, or stackshot is actually bigger than - * SANE_TRACEBUF_SIZE */ - return ENOMEM; - case KERN_INSUFFICIENT_BUFFER_SIZE: - case KERN_NO_SPACE: - /* ran out of buffer to write the stackshot. Normally this error - * causes a larger buffer to be allocated in-kernel, rather than - * being returned to the user. */ - return ENOSPC; - case KERN_NO_ACCESS: - return EPERM; - case KERN_MEMORY_PRESENT: - return EEXIST; - case KERN_NOT_SUPPORTED: - return ENOTSUP; - case KERN_NOT_IN_SET: - /* requested existing buffer, but there isn't one. */ - return ENOENT; - case KERN_ABORTED: - /* kdp did not report an error, but also did not produce any data */ - return EINTR; - case KERN_FAILURE: - /* stackshot came across inconsistent data and needed to bail out */ - return EBUSY; - case KERN_OPERATION_TIMED_OUT: - /* debugger synchronization timed out */ - return ETIMEDOUT; - default: - return EINVAL; + case KERN_SUCCESS: + return 0; + case KERN_RESOURCE_SHORTAGE: + /* could not allocate memory, or stackshot is actually bigger than + * SANE_TRACEBUF_SIZE */ + return ENOMEM; + case KERN_INSUFFICIENT_BUFFER_SIZE: + case KERN_NO_SPACE: + /* ran out of buffer to write the stackshot. Normally this error + * causes a larger buffer to be allocated in-kernel, rather than + * being returned to the user. */ + return ENOSPC; + case KERN_NO_ACCESS: + return EPERM; + case KERN_MEMORY_PRESENT: + return EEXIST; + case KERN_NOT_SUPPORTED: + return ENOTSUP; + case KERN_NOT_IN_SET: + /* requested existing buffer, but there isn't one. */ + return ENOENT; + case KERN_ABORTED: + /* kdp did not report an error, but also did not produce any data */ + return EINTR; + case KERN_FAILURE: + /* stackshot came across inconsistent data and needed to bail out */ + return EBUSY; + case KERN_OPERATION_TIMED_OUT: + /* debugger synchronization timed out */ + return ETIMEDOUT; + default: + return EINVAL; } } @@ -82,7 +82,7 @@ stackshot_kern_return_to_bsd_error(kern_return_t kr) * tracing both kernel and user stacks where available. Allocates a buffer from the * kernel and maps the buffer into the calling task's address space. * - * Inputs: uap->stackshot_config_version - version of the stackshot config that is being passed + * Inputs: uap->stackshot_config_version - version of the stackshot config that is being passed * uap->stackshot_config - pointer to the stackshot config * uap->stackshot_config_size- size of the stackshot config being passed * Outputs: EINVAL if there is a problem with the arguments @@ -95,7 +95,7 @@ stackshot_kern_return_to_bsd_error(kern_return_t kr) * ENOMEM if the kernel is unable to allocate enough memory to serve the request * ENOSPC if there isn't enough space in the caller's address space to remap the buffer * ESRCH if the target PID isn't found - * returns KERN_SUCCESS on success + * returns KERN_SUCCESS on success */ int stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_args *uap, __unused int *retval) @@ -103,28 +103,28 @@ stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_arg int error = 0; kern_return_t kr; - if ((error = suser(kauth_cred_get(), &p->p_acflag))) - return(error); + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { + return error; + } - if((void*)uap->stackshot_config == NULL) { + if ((void*)uap->stackshot_config == NULL) { return EINVAL; } switch (uap->stackshot_config_version) { - case STACKSHOT_CONFIG_TYPE: - if (uap->stackshot_config_size != sizeof(stackshot_config_t)) { - return EINVAL; - } - stackshot_config_t config; - error = copyin(uap->stackshot_config, &config, sizeof(stackshot_config_t)); - if (error != KERN_SUCCESS) - { - return EFAULT; - } - kr = kern_stack_snapshot_internal(uap->stackshot_config_version, &config, sizeof(stackshot_config_t), TRUE); - return stackshot_kern_return_to_bsd_error(kr); - default: - return ENOTSUP; + case STACKSHOT_CONFIG_TYPE: + if (uap->stackshot_config_size != sizeof(stackshot_config_t)) { + return EINVAL; + } + stackshot_config_t config; + error = copyin(uap->stackshot_config, &config, sizeof(stackshot_config_t)); + if (error != KERN_SUCCESS) { + return EFAULT; + } + kr = kern_stack_snapshot_internal(uap->stackshot_config_version, &config, sizeof(stackshot_config_t), TRUE); + return stackshot_kern_return_to_bsd_error(kr); + default: + return ENOTSUP; } } @@ -133,7 +133,7 @@ stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_arg * microstackshot: Catch all system call for microstackshot related operations, including * enabling/disabling both global and windowed microstackshots as well * as retrieving windowed or global stackshots and the boot profile. - * Inputs: uap->tracebuf - address of the user space destination + * Inputs: uap->tracebuf - address of the user space destination * buffer * uap->tracebuf_size - size of the user space trace buffer * uap->flags - various flags @@ -149,8 +149,9 @@ microstackshot(struct proc *p, struct microstackshot_args *uap, int32_t *retval) int error = 0; kern_return_t kr; - if ((error = suser(kauth_cred_get(), &p->p_acflag))) - return(error); + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { + return error; + } kr = stack_microstackshot(uap->tracebuf, uap->tracebuf_size, uap->flags, retval); return stackshot_kern_return_to_bsd_error(kr); @@ -162,7 +163,7 @@ microstackshot(struct proc *p, struct microstackshot_args *uap, int32_t *retval) * tracing both kernel and user stacks where available. Allocates a buffer from the * kernel and stores the address of this buffer. * - * Inputs: reason - the reason for triggering a stackshot (unused at the moment, but in the + * Inputs: reason - the reason for triggering a stackshot (unused at the moment, but in the * future will be saved in the stackshot) * Outputs: EINVAL/ENOTSUP if there is a problem with the arguments * EPERM if the caller doesn't pass at least one KERNEL stackshot flag @@ -178,8 +179,8 @@ kern_stack_snapshot_with_reason(__unused char *reason) config.sc_pid = -1; config.sc_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_IN_KERNEL_BUFFER | - STACKSHOT_KCDATA_FORMAT | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_THREAD_WAITINFO | - STACKSHOT_NO_IO_STATS); + STACKSHOT_KCDATA_FORMAT | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_THREAD_WAITINFO | + STACKSHOT_NO_IO_STATS); config.sc_delta_timestamp = 0; config.sc_out_buffer_addr = 0; config.sc_out_size_addr = 0; diff --git a/bsd/kern/subr_eventhandler.c b/bsd/kern/subr_eventhandler.c index 276dd4b87..41c57380f 100644 --- a/bsd/kern/subr_eventhandler.c +++ b/bsd/kern/subr_eventhandler.c @@ -80,24 +80,23 @@ static lck_grp_attr_t *eventhandler_mutex_grp_attr; static lck_grp_t *eventhandler_mutex_grp; static lck_attr_t *eventhandler_mutex_attr; -static unsigned int eg_size; /* size of eventhandler_entry_generic */ -static struct mcache *eg_cache; /* mcache for eventhandler_entry_generic */ +static unsigned int eg_size; /* size of eventhandler_entry_generic */ +static struct mcache *eg_cache; /* mcache for eventhandler_entry_generic */ -static unsigned int el_size; /* size of eventhandler_list */ -static struct mcache *el_cache; /* mcache for eventhandler_list */ +static unsigned int el_size; /* size of eventhandler_list */ +static struct mcache *el_cache; /* mcache for eventhandler_list */ static lck_grp_attr_t *el_lock_grp_attr; lck_grp_t *el_lock_grp; lck_attr_t *el_lock_attr; -struct eventhandler_entry_generic -{ - struct eventhandler_entry ee; - void (* func)(void); +struct eventhandler_entry_generic { + struct eventhandler_entry ee; + void (* func)(void); }; static struct eventhandler_list *_eventhandler_find_list( - struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name); + struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name); void eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt) @@ -128,13 +127,13 @@ eventhandler_init(void) eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb); - eg_size = sizeof (struct eventhandler_entry_generic); + eg_size = sizeof(struct eventhandler_entry_generic); eg_cache = mcache_create("eventhdlr_generic", eg_size, - sizeof (uint64_t), 0, MCR_SLEEP); + sizeof(uint64_t), 0, MCR_SLEEP); - el_size = sizeof (struct eventhandler_list); + el_size = sizeof(struct eventhandler_list); el_cache = mcache_create("eventhdlr_list", el_size, - sizeof (uint64_t), 0, MCR_SLEEP); + sizeof(uint64_t), 0, MCR_SLEEP); } void @@ -150,17 +149,18 @@ eventhandler_reap_caches(boolean_t purge) */ static eventhandler_tag eventhandler_register_internal( - struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, - struct eventhandler_list *list, - const char *name, eventhandler_tag epn) + struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, + struct eventhandler_list *list, + const char *name, eventhandler_tag epn) { - struct eventhandler_list *new_list; - struct eventhandler_entry *ep; + struct eventhandler_list *new_list; + struct eventhandler_entry *ep; - VERIFY(strlen(name) <= (sizeof (new_list->el_name) - 1)); + VERIFY(strlen(name) <= (sizeof(new_list->el_name) - 1)); - if (evthdlr_lists_ctxt == NULL) + if (evthdlr_lists_ctxt == NULL) { evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb; + } VERIFY(evthdlr_lists_ctxt->eventhandler_lists_initted); /* eventhandler registered too early */ VERIFY(epn != NULL); /* cannot register NULL event */ @@ -183,7 +183,7 @@ eventhandler_register_internal( list->el_flags = 0; list->el_runcount = 0; bzero(&list->el_lock, sizeof(list->el_lock)); - (void) snprintf(list->el_name, sizeof (list->el_name), "%s", name); + (void) snprintf(list->el_name, sizeof(list->el_name), "%s", name); TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link); } } @@ -208,10 +208,11 @@ eventhandler_register_internal( break; } } - if (ep == NULL) + if (ep == NULL) { TAILQ_INSERT_TAIL(&list->el_entries, epn, ee_link); + } EHL_UNLOCK(list); - return(epn); + return epn; } eventhandler_tag @@ -219,7 +220,7 @@ eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, struct eventhandler_list *list, const char *name, void *func, struct eventhandler_entry_arg arg, int priority) { - struct eventhandler_entry_generic *eg; + struct eventhandler_entry_generic *eg; /* allocate an entry for this handler, populate it */ eg = mcache_alloc(eg_cache, MCR_SLEEP); @@ -228,13 +229,13 @@ eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, eg->ee.ee_arg = arg; eg->ee.ee_priority = priority; - return (eventhandler_register_internal(evthdlr_lists_ctxt, list, name, &eg->ee)); + return eventhandler_register_internal(evthdlr_lists_ctxt, list, name, &eg->ee); } void eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) { - struct eventhandler_entry *ep = tag; + struct eventhandler_entry *ep = tag; EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED); if (ep != NULL) { @@ -247,8 +248,9 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) * Make sure that is not the case when a specific entry * is being removed. */ - if (!TAILQ_EMPTY(&list->el_entries)) + if (!TAILQ_EMPTY(&list->el_entries)) { TAILQ_REMOVE(&list->el_entries, ep, ee_link); + } EHL_LOCK_CONVERT(list); mcache_free(eg_cache, ep); } else { @@ -271,11 +273,12 @@ eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) evhlog((LOG_DEBUG, "%s: marking all items from \"%s\" as dead", __func__, list->el_name)); TAILQ_FOREACH(ep, &list->el_entries, ee_link) - ep->ee_priority = EHE_DEAD_PRIORITY; + ep->ee_priority = EHE_DEAD_PRIORITY; } } - while (list->el_runcount > 0) + while (list->el_runcount > 0) { msleep((caddr_t)list, &list->el_lock, PSPIN, "evhrm", 0); + } EHL_UNLOCK(list); } @@ -286,16 +289,17 @@ static struct eventhandler_list * _eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name) { - struct eventhandler_list *list; + struct eventhandler_list *list; VERIFY(evthdlr_lists_ctxt != NULL); LCK_MTX_ASSERT(&evthdlr_lists_ctxt->eventhandler_mutex, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(list, &evthdlr_lists_ctxt->eventhandler_lists, el_link) { - if (!strcmp(name, list->el_name)) + if (!strcmp(name, list->el_name)) { break; + } } - return (list); + return list; } /* @@ -305,13 +309,15 @@ struct eventhandler_list * eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name) { - struct eventhandler_list *list; + struct eventhandler_list *list; - if (evthdlr_lists_ctxt == NULL) + if (evthdlr_lists_ctxt == NULL) { evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb; + } - if (!evthdlr_lists_ctxt->eventhandler_lists_initted) - return(NULL); + if (!evthdlr_lists_ctxt->eventhandler_lists_initted) { + return NULL; + } /* scan looking for the requested list */ lck_mtx_lock_spin(&evthdlr_lists_ctxt->eventhandler_mutex); @@ -322,7 +328,7 @@ eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, } lck_mtx_unlock(&evthdlr_lists_ctxt->eventhandler_mutex); - return(list); + return list; } /* @@ -343,8 +349,9 @@ eventhandler_prune_list(struct eventhandler_list *list) pruned++; } } - if (pruned > 0) + if (pruned > 0) { wakeup(list); + } } /* @@ -357,7 +364,7 @@ void eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt) { struct eventhandler_list *list = NULL; - struct eventhandler_list *list_next = NULL; + struct eventhandler_list *list_next = NULL; lck_mtx_lock(&evthdlr_lists_ctxt->eventhandler_mutex); TAILQ_FOREACH_SAFE(list, &evthdlr_lists_ctxt->eventhandler_lists, diff --git a/bsd/kern/subr_log.c b/bsd/kern/subr_log.c index ff45d5c8a..46475b683 100644 --- a/bsd/kern/subr_log.c +++ b/bsd/kern/subr_log.c @@ -111,23 +111,23 @@ uint32_t oslog_s_streamed_msgcount = 0; uint32_t oslog_s_dropped_msgcount = 0; extern uint32_t oslog_s_error_count; -#define LOG_RDPRI (PZERO + 1) +#define LOG_RDPRI (PZERO + 1) -#define LOG_NBIO 0x02 -#define LOG_ASYNC 0x04 -#define LOG_RDWAIT 0x08 +#define LOG_NBIO 0x02 +#define LOG_ASYNC 0x04 +#define LOG_RDWAIT 0x08 /* All globals should be accessed under LOG_LOCK() */ static char amsg_bufc[1024]; -static struct msgbuf aslbuf = {MSG_MAGIC, sizeof (amsg_bufc), 0, 0, amsg_bufc}; +static struct msgbuf aslbuf = {MSG_MAGIC, sizeof(amsg_bufc), 0, 0, amsg_bufc}; struct msgbuf *aslbufp __attribute__((used)) = &aslbuf; /* logsoftc only valid while log_open=1 */ struct logsoftc { - int sc_state; /* see above for possibilities */ - struct selinfo sc_selp; /* thread waiting for select */ - int sc_pgid; /* process/group for async I/O */ + int sc_state; /* see above for possibilities */ + struct selinfo sc_selp; /* thread waiting for select */ + int sc_pgid; /* process/group for async I/O */ struct msgbuf *sc_mbp; } logsoftc; @@ -152,35 +152,36 @@ struct msgbuf *oslog_streambufp __attribute__((used)) = &oslog_stream_buf; // List entries for keeping track of the streaming buffer static oslog_stream_buf_entry_t oslog_stream_buf_entries; -#define OSLOG_NUM_STREAM_ENTRIES 64 -#define OSLOG_STREAM_BUF_SIZE 4096 +#define OSLOG_NUM_STREAM_ENTRIES 64 +#define OSLOG_STREAM_BUF_SIZE 4096 -int oslog_open = 0; -int os_log_wakeup = 0; -int oslog_stream_open = 0; -int oslog_stream_buf_size = OSLOG_STREAM_BUF_SIZE; -int oslog_stream_num_entries = OSLOG_NUM_STREAM_ENTRIES; +int oslog_open = 0; +int os_log_wakeup = 0; +int oslog_stream_open = 0; +int oslog_stream_buf_bytesavail = 0; +int oslog_stream_buf_size = OSLOG_STREAM_BUF_SIZE; +int oslog_stream_num_entries = OSLOG_NUM_STREAM_ENTRIES; uint8_t __firehose_buffer_kernel_chunk_count = FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT; uint8_t __firehose_num_kernel_io_pages = FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES; /* oslogsoftc only valid while oslog_open=1 */ struct oslogsoftc { - int sc_state; /* see above for possibilities */ - struct selinfo sc_selp; /* thread waiting for select */ - int sc_pgid; /* process/group for async I/O */ + int sc_state; /* see above for possibilities */ + struct selinfo sc_selp; /* thread waiting for select */ + int sc_pgid; /* process/group for async I/O */ } oslogsoftc; struct oslog_streamsoftc { - int sc_state; /* see above for possibilities */ - struct selinfo sc_selp; /* thread waiting for select */ - int sc_pgid; /* process/group for async I/O */ + int sc_state; /* see above for possibilities */ + struct selinfo sc_selp; /* thread waiting for select */ + int sc_pgid; /* process/group for async I/O */ } oslog_streamsoftc; STAILQ_HEAD(, oslog_stream_buf_entry_s) oslog_stream_free_head = - STAILQ_HEAD_INITIALIZER(oslog_stream_free_head); + STAILQ_HEAD_INITIALIZER(oslog_stream_free_head); STAILQ_HEAD(, oslog_stream_buf_entry_s) oslog_stream_buf_head = - STAILQ_HEAD_INITIALIZER(oslog_stream_buf_head); + STAILQ_HEAD_INITIALIZER(oslog_stream_buf_head); /* defined in osfmk/kern/printf.c */ extern void oslog_lock_init(void); @@ -195,6 +196,8 @@ void bsd_log_init(void); * for lock groups. */ decl_lck_spin_data(extern, oslog_stream_lock) +#define stream_lock() lck_spin_lock(&oslog_stream_lock) +#define stream_unlock() lck_spin_unlock(&oslog_stream_lock) /* XXX wants a linker set so these can be static */ extern d_open_t logopen; @@ -219,7 +222,7 @@ extern d_select_t oslog_streamselect; void oslog_init(void); void oslog_setsize(int size); void oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen); + uint64_t stamp, const void *pubdata, size_t publen); void oslog_streamwrite_metadata_locked(oslog_stream_buf_entry_t m_entry); static oslog_stream_buf_entry_t oslog_stream_find_free_buf_entry_locked(void); static void oslog_streamwrite_append_bytes(const char *buffer, int buflen); @@ -230,8 +233,8 @@ static void oslog_streamwrite_append_bytes(const char *buffer, int buflen); * at interrupt level must be guarded with a spin lock. */ -#define LOG_LOCK() bsd_log_lock() -#define LOG_UNLOCK() bsd_log_unlock() +#define LOG_LOCK() bsd_log_lock() +#define LOG_UNLOCK() bsd_log_unlock() #if DEBUG #define LOG_SETSIZE_DEBUG(x...) kprintf(x) @@ -240,7 +243,7 @@ static void oslog_streamwrite_append_bytes(const char *buffer, int buflen); #endif static int sysctl_kern_msgbuf(struct sysctl_oid *oidp, - void *arg1, int arg2, struct sysctl_req *req); + void *arg1, int arg2, struct sysctl_req *req); /*ARGSUSED*/ int @@ -249,7 +252,7 @@ logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc * LOG_LOCK(); if (log_open) { LOG_UNLOCK(); - return (EBUSY); + return EBUSY; } if (atm_get_diagnostic_config() & ATM_ENABLE_LEGACY_LOGGING) { logsoftc.sc_mbp = msgbufp; @@ -261,12 +264,12 @@ logopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc * */ logsoftc.sc_mbp = aslbufp; } - logsoftc.sc_pgid = p->p_pid; /* signal process only */ + logsoftc.sc_pgid = p->p_pid; /* signal process only */ log_open = 1; LOG_UNLOCK(); - return (0); + return 0; } /*ARGSUSED*/ @@ -279,7 +282,7 @@ logclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused s selthreadclear(&logsoftc.sc_selp); log_open = 0; LOG_UNLOCK(); - return (0); + return 0; } @@ -289,13 +292,13 @@ oslogopen(__unused dev_t dev, __unused int flags, __unused int mode, struct proc LOG_LOCK(); if (oslog_open) { LOG_UNLOCK(); - return(EBUSY); + return EBUSY; } - oslogsoftc.sc_pgid = p->p_pid; /* signal process only */ + oslogsoftc.sc_pgid = p->p_pid; /* signal process only */ oslog_open = 1; LOG_UNLOCK(); - return (0); + return 0; } int @@ -307,7 +310,7 @@ oslogclose(__unused dev_t dev, __unused int flag, __unused int devtype, __unused selthreadclear(&oslogsoftc.sc_selp); oslog_open = 0; LOG_UNLOCK(); - return (0); + return 0; } int @@ -316,12 +319,12 @@ oslog_streamopen(__unused dev_t dev, __unused int flags, __unused int mode, stru char *oslog_stream_msg_bufc = NULL; oslog_stream_buf_entry_t entries = NULL; - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return EBUSY; } - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); // Allocate the stream buffer oslog_stream_msg_bufc = kalloc(oslog_stream_buf_size); @@ -336,9 +339,9 @@ oslog_streamopen(__unused dev_t dev, __unused int flags, __unused int mode, stru return ENOMEM; } - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); kfree(oslog_stream_msg_bufc, oslog_stream_buf_size); kfree(entries, oslog_stream_num_entries * sizeof(struct oslog_stream_buf_entry_s)); return EBUSY; @@ -370,7 +373,8 @@ oslog_streamopen(__unused dev_t dev, __unused int flags, __unused int mode, stru oslog_streambufp->msg_bufr = 0; oslog_streamsoftc.sc_pgid = p->p_pid; /* signal process only */ oslog_stream_open = 1; - lck_spin_unlock(&oslog_stream_lock); + oslog_stream_buf_bytesavail = oslog_stream_buf_size; + stream_unlock(); return 0; } @@ -382,10 +386,10 @@ oslog_streamclose(__unused dev_t dev, __unused int flag, __unused int devtype, _ char *oslog_stream_msg_bufc = NULL; oslog_stream_buf_entry_t entries = NULL; - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (oslog_stream_open == 0) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return EBADF; } @@ -407,7 +411,7 @@ oslog_streamclose(__unused dev_t dev, __unused int flag, __unused int devtype, _ oslog_stream_buf_entries = NULL; oslog_streambufp->msg_size = 0; - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); // Free the stream buffer kfree(oslog_stream_msg_bufc, oslog_stream_buf_size); @@ -438,14 +442,15 @@ logread(__unused dev_t dev, struct uio *uio, int flag) logsoftc.sc_state |= LOG_RDWAIT; LOG_UNLOCK(); /* - * If the wakeup is missed - * then wait for 5 sec and reevaluate + * If the wakeup is missed + * then wait for 5 sec and reevaluate */ if ((error = tsleep((caddr_t)mbp, LOG_RDPRI | PCATCH, - "klog", 5 * hz)) != 0) { + "klog", 5 * hz)) != 0) { /* if it times out; ignore */ - if (error != EWOULDBLOCK) - return (error); + if (error != EWOULDBLOCK) { + return error; + } } LOG_LOCK(); } @@ -455,25 +460,29 @@ logread(__unused dev_t dev, struct uio *uio, int flag) int readpos; l = mbp->msg_bufx - mbp->msg_bufr; - if (l < 0) + if (l < 0) { l = mbp->msg_size - mbp->msg_bufr; + } l = min(l, uio_resid(uio)); - if (l == 0) + if (l == 0) { break; + } readpos = mbp->msg_bufr; LOG_UNLOCK(); error = uiomove((caddr_t)&mbp->msg_bufc[readpos], l, uio); LOG_LOCK(); - if (error) + if (error) { break; + } mbp->msg_bufr = readpos + l; - if (mbp->msg_bufr >= mbp->msg_size) + if (mbp->msg_bufr >= mbp->msg_size) { mbp->msg_bufr = 0; + } } out: LOG_UNLOCK(); - return (error); + return error; } /*ARGSUSED*/ @@ -484,40 +493,42 @@ oslog_streamread(__unused dev_t dev, struct uio *uio, int flag) int copy_size = 0; static char logline[FIREHOSE_CHUNK_SIZE]; - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (!oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return EBADF; } while (STAILQ_EMPTY(&oslog_stream_buf_head)) { + assert(oslog_stream_buf_bytesavail == oslog_stream_buf_size); + if (flag & IO_NDELAY || oslog_streamsoftc.sc_state & LOG_NBIO) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return EWOULDBLOCK; } oslog_streamsoftc.sc_state |= LOG_RDWAIT; wait_result_t wr = assert_wait((event_t)oslog_streambufp, - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); if (wr == THREAD_WAITING) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); wr = thread_block(THREAD_CONTINUE_NULL); - lck_spin_lock(&oslog_stream_lock); + stream_lock(); } switch (wr) { - case THREAD_AWAKENED: - case THREAD_TIMED_OUT: - break; - default: - lck_spin_unlock(&oslog_stream_lock); - return EINTR; + case THREAD_AWAKENED: + case THREAD_TIMED_OUT: + break; + default: + stream_unlock(); + return EINTR; } } if (!oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return EBADF; } @@ -534,60 +545,64 @@ oslog_streamread(__unused dev_t dev, struct uio *uio, int flag) logpos += sizeof(uint64_t); switch (read_entry->type) { - /* Handle metadata messages */ - case oslog_stream_link_type_metadata: - { - memcpy(logline + logpos, - (read_entry->metadata), read_entry->size); - logpos += read_entry->size; + /* Handle metadata messages */ + case oslog_stream_link_type_metadata: + { + memcpy(logline + logpos, + (read_entry->metadata), read_entry->size); + logpos += read_entry->size; - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); - // Free the list entry - kfree(read_entry, (sizeof(struct oslog_stream_buf_entry_s) + read_entry->size)); - break; - } - /* Handle log messages */ - case oslog_stream_link_type_log: - { - /* ensure that the correct read entry was dequeued */ - assert(read_entry->offset == oslog_streambufp->msg_bufr); - rec_length = read_entry->size; - - // If the next log line is contiguous in the buffer, copy it out. - if(read_entry->offset + rec_length <= oslog_streambufp->msg_size) { - memcpy(logline + logpos, - oslog_streambufp->msg_bufc + read_entry->offset, rec_length); - - oslog_streambufp->msg_bufr += rec_length; - if (oslog_streambufp->msg_bufr == oslog_streambufp->msg_size) { - oslog_streambufp->msg_bufr = 0; - } - logpos += rec_length; - } else { - // Otherwise, copy until the end of the buffer, and - // copy the remaining bytes starting at index 0. - int bytes_left = oslog_streambufp->msg_size - read_entry->offset; - memcpy(logline + logpos, - oslog_streambufp->msg_bufc + read_entry->offset, bytes_left); - logpos += bytes_left; - rec_length -= bytes_left; - - memcpy(logline + logpos, (const void *)oslog_streambufp->msg_bufc, - rec_length); - oslog_streambufp->msg_bufr = rec_length; - logpos += rec_length; - } - assert(oslog_streambufp->msg_bufr < oslog_streambufp->msg_size); - STAILQ_INSERT_TAIL(&oslog_stream_free_head, read_entry, buf_entries); + // Free the list entry + kfree(read_entry, (sizeof(struct oslog_stream_buf_entry_s) + read_entry->size)); + break; + } + /* Handle log messages */ + case oslog_stream_link_type_log: + { + /* ensure that the correct read entry was dequeued */ + assert(read_entry->offset == oslog_streambufp->msg_bufr); + rec_length = read_entry->size; + + // If the next log line is contiguous in the buffer, copy it out. + if (read_entry->offset + rec_length <= oslog_streambufp->msg_size) { + memcpy(logline + logpos, + oslog_streambufp->msg_bufc + read_entry->offset, rec_length); - lck_spin_unlock(&oslog_stream_lock); - break; - } - default: - { - panic("Got unexpected log entry type: %hhu\n", read_entry->type); + oslog_streambufp->msg_bufr += rec_length; + if (oslog_streambufp->msg_bufr == oslog_streambufp->msg_size) { + oslog_streambufp->msg_bufr = 0; + } + logpos += rec_length; + } else { + // Otherwise, copy until the end of the buffer, and + // copy the remaining bytes starting at index 0. + int bytes_left = oslog_streambufp->msg_size - read_entry->offset; + memcpy(logline + logpos, + oslog_streambufp->msg_bufc + read_entry->offset, bytes_left); + logpos += bytes_left; + rec_length -= bytes_left; + + memcpy(logline + logpos, (const void *)oslog_streambufp->msg_bufc, + rec_length); + oslog_streambufp->msg_bufr = rec_length; + logpos += rec_length; } + + oslog_stream_buf_bytesavail += read_entry->size; + assert(oslog_stream_buf_bytesavail <= oslog_stream_buf_size); + + assert(oslog_streambufp->msg_bufr < oslog_streambufp->msg_size); + STAILQ_INSERT_TAIL(&oslog_stream_free_head, read_entry, buf_entries); + + stream_unlock(); + break; + } + default: + { + panic("Got unexpected log entry type: %hhu\n", read_entry->type); + } } copy_size = min(logpos, uio_resid(uio)); @@ -606,36 +621,34 @@ logselect(__unused dev_t dev, int rw, void * wql, struct proc *p) const struct msgbuf *mbp = logsoftc.sc_mbp; switch (rw) { - case FREAD: - LOG_LOCK(); + LOG_LOCK(); if (mbp->msg_bufr != mbp->msg_bufx) { LOG_UNLOCK(); - return (1); + return 1; } selrecord(p, &logsoftc.sc_selp, wql); LOG_UNLOCK(); break; } - return (0); + return 0; } int oslogselect(__unused dev_t dev, int rw, void * wql, struct proc *p) { switch (rw) { - case FREAD: LOG_LOCK(); if (os_log_wakeup) { LOG_UNLOCK(); - return (1); + return 1; } selrecord(p, &oslogsoftc.sc_selp, wql); LOG_UNLOCK(); break; } - return (0); + return 0; } int @@ -643,7 +656,7 @@ oslog_streamselect(__unused dev_t dev, int rw, void * wql, struct proc *p) { int ret = 0; - lck_spin_lock(&oslog_stream_lock); + stream_lock(); switch (rw) { case FREAD: @@ -655,7 +668,7 @@ oslog_streamselect(__unused dev_t dev, int rw, void * wql, struct proc *p) break; } - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return ret; } @@ -667,23 +680,26 @@ logwakeup(struct msgbuf *mbp) return; } - LOG_LOCK(); + LOG_LOCK(); if (!log_open) { LOG_UNLOCK(); return; } - if (NULL == mbp) + if (NULL == mbp) { mbp = logsoftc.sc_mbp; - if (mbp != logsoftc.sc_mbp) + } + if (mbp != logsoftc.sc_mbp) { goto out; + } selwakeup(&logsoftc.sc_selp); if (logsoftc.sc_state & LOG_ASYNC) { int pgid = logsoftc.sc_pgid; LOG_UNLOCK(); - if (pgid < 0) - gsignal(-pgid, SIGIO); - else + if (pgid < 0) { + gsignal(-pgid, SIGIO); + } else { proc_signal(pgid, SIGIO); + } LOG_LOCK(); } if (logsoftc.sc_state & LOG_RDWAIT) { @@ -729,9 +745,9 @@ oslog_streamwakeup(void) return; } - lck_spin_lock(&oslog_stream_lock); + stream_lock(); oslog_streamwakeup_locked(); - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); } /*ARGSUSED*/ @@ -743,27 +759,29 @@ logioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unus LOG_LOCK(); switch (com) { - /* return number of characters immediately available */ case FIONREAD: l = mbp->msg_bufx - mbp->msg_bufr; - if (l < 0) + if (l < 0) { l += mbp->msg_size; + } *(off_t *)data = l; break; case FIONBIO: - if (*(int *)data) + if (*(int *)data) { logsoftc.sc_state |= LOG_NBIO; - else + } else { logsoftc.sc_state &= ~LOG_NBIO; + } break; case FIOASYNC: - if (*(int *)data) + if (*(int *)data) { logsoftc.sc_state |= LOG_ASYNC; - else + } else { logsoftc.sc_state &= ~LOG_ASYNC; + } break; case TIOCSPGRP: @@ -776,10 +794,10 @@ logioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __unus default: LOG_UNLOCK(); - return (-1); + return -1; } LOG_UNLOCK(); - return (0); + return 0; } /*ARGSUSED*/ @@ -794,32 +812,31 @@ oslogioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __un mach_port_t mem_entry_ptr = MACH_PORT_NULL; switch (com) { - /* return number of characters immediately available */ case LOGBUFFERMAP: kernel_firehose_buffer = (firehose_buffer_t)kernel_firehose_addr; ret = mach_make_memory_entry_64(kernel_map, - &buffer_size, - (mach_vm_offset_t) kernel_firehose_buffer, - ( MAP_MEM_VM_SHARE | VM_PROT_READ ), - &mem_entry_ptr, - MACH_PORT_NULL); + &buffer_size, + (mach_vm_offset_t) kernel_firehose_buffer, + (MAP_MEM_VM_SHARE | VM_PROT_READ), + &mem_entry_ptr, + MACH_PORT_NULL); if (ret == KERN_SUCCESS) { ret = mach_vm_map_kernel(get_task_map(current_task()), - &user_addr, - buffer_size, - 0, /* mask */ - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - mem_entry_ptr, - 0, /* offset */ - FALSE, /* copy */ - VM_PROT_READ, - VM_PROT_READ, - VM_INHERIT_SHARE); + &user_addr, + buffer_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + mem_entry_ptr, + 0, /* offset */ + FALSE, /* copy */ + VM_PROT_READ, + VM_PROT_READ, + VM_INHERIT_SHARE); } if (ret == KERN_SUCCESS) { @@ -835,9 +852,9 @@ oslogioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int flag, __un __firehose_merge_updates(*(firehose_push_reply_t *)(data)); break; default: - return (-1); + return -1; } - return (0); + return 0; } /*ARGSUSED*/ @@ -846,27 +863,29 @@ oslog_streamioctl(__unused dev_t dev, u_long com, caddr_t data, __unused int fla { int err = 0; - lck_spin_lock(&oslog_stream_lock); + stream_lock(); switch (com) { case FIONBIO: - if (data && *(int *)data) + if (data && *(int *)data) { oslog_streamsoftc.sc_state |= LOG_NBIO; - else + } else { oslog_streamsoftc.sc_state &= ~LOG_NBIO; + } break; case FIOASYNC: - if (data && *(int *)data) + if (data && *(int *)data) { oslog_streamsoftc.sc_state |= LOG_ASYNC; - else + } else { oslog_streamsoftc.sc_state &= ~LOG_ASYNC; + } break; default: err = -1; break; } - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); return err; } @@ -896,8 +915,8 @@ oslog_init(void) oslog_lock_init(); kr = kmem_alloc_flags(kernel_map, &kernel_firehose_addr, - size + (2 * PAGE_SIZE), VM_KERN_MEMORY_LOG, - KMA_GUARD_FIRST | KMA_GUARD_LAST); + size + (2 * PAGE_SIZE), VM_KERN_MEMORY_LOG, + KMA_GUARD_FIRST | KMA_GUARD_LAST); if (kr != KERN_SUCCESS) { panic("Failed to allocate memory for firehose logging buffer"); } @@ -928,8 +947,9 @@ void log_putc_locked(struct msgbuf *mbp, char c) { mbp->msg_bufc[mbp->msg_bufx++] = c; - if (mbp->msg_bufx >= mbp->msg_size) + if (mbp->msg_bufx >= mbp->msg_size) { mbp->msg_bufx = 0; + } } static oslog_stream_buf_entry_t @@ -945,8 +965,7 @@ oslog_stream_find_free_buf_entry_locked(void) buf_entry = STAILQ_FIRST(&oslog_stream_free_head); if (buf_entry) { STAILQ_REMOVE_HEAD(&oslog_stream_free_head, buf_entries); - } - else { + } else { // If no list elements are available in the free-list, // consume the next log line so we can free up its list element oslog_stream_buf_entry_t prev_entry = NULL; @@ -959,8 +978,7 @@ oslog_stream_find_free_buf_entry_locked(void) if (prev_entry == NULL) { STAILQ_REMOVE_HEAD(&oslog_stream_buf_head, buf_entries); - } - else { + } else { STAILQ_REMOVE_AFTER(&oslog_stream_buf_head, prev_entry, buf_entries); } @@ -990,9 +1008,16 @@ oslog_streamwrite_append_bytes(const char *buffer, int buflen) LCK_SPIN_ASSERT(&oslog_stream_lock, LCK_ASSERT_OWNED); + assert(oslog_stream_buf_bytesavail >= buflen); + oslog_stream_buf_bytesavail -= buflen; + assert(oslog_stream_buf_bytesavail >= 0); + mbp = oslog_streambufp; - // Check if we have enough space in the stream buffer to write the data if (mbp->msg_bufx + buflen <= mbp->msg_size) { + /* + * If this will fit without needing to be split across the end + * of the buffer, copy it directly in one go. + */ memcpy((void *)(mbp->msg_bufc + mbp->msg_bufx), buffer, buflen); mbp->msg_bufx += buflen; @@ -1000,7 +1025,10 @@ oslog_streamwrite_append_bytes(const char *buffer, int buflen) mbp->msg_bufx = 0; } } else { - // Copy part of the data until the end of the stream + /* + * Copy up to the end of the stream buffer, and then put what remains + * at the beginning. + */ int bytes_left = mbp->msg_size - mbp->msg_bufx; memcpy((void *)(mbp->msg_bufc + mbp->msg_bufx), buffer, bytes_left); @@ -1014,13 +1042,11 @@ oslog_streamwrite_append_bytes(const char *buffer, int buflen) return; } - void oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen) + uint64_t stamp, const void *pubdata, size_t publen) { struct msgbuf *mbp; - int available_space = 0; oslog_stream_buf_entry_t buf_entry = NULL; oslog_stream_buf_entry_t next_entry = NULL; @@ -1040,13 +1066,7 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, assert(buf_entry != NULL); - // Ensure that we have space in the ring buffer for the current logline - if (mbp->msg_bufr > mbp->msg_bufx) { - available_space = mbp->msg_bufr - mbp->msg_bufx; - } else { - available_space = mbp->msg_size - mbp->msg_bufx + mbp->msg_bufr; - } - while(ft_length > available_space) { + while (ft_length > oslog_stream_buf_bytesavail) { oslog_stream_buf_entry_t prev_entry = NULL; next_entry = STAILQ_FIRST(&oslog_stream_buf_head); @@ -1058,8 +1078,7 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, if (prev_entry == NULL) { STAILQ_REMOVE_HEAD(&oslog_stream_buf_head, buf_entries); - } - else { + } else { STAILQ_REMOVE_AFTER(&oslog_stream_buf_head, prev_entry, buf_entries); } @@ -1069,12 +1088,13 @@ oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, } oslog_s_dropped_msgcount++; - available_space += next_entry->size; + oslog_stream_buf_bytesavail += next_entry->size; + assert(oslog_stream_buf_bytesavail <= oslog_stream_buf_size); STAILQ_INSERT_TAIL(&oslog_stream_free_head, next_entry, buf_entries); } - assert(ft_length <= available_space); + assert(ft_length <= oslog_stream_buf_bytesavail); // Write the log line and update the list entry for this record buf_entry->offset = mbp->msg_bufx; @@ -1123,10 +1143,12 @@ log_putc(char c) unread_count = msgbufp->msg_bufx - msgbufp->msg_bufr; LOG_UNLOCK(); - if (unread_count < 0) + if (unread_count < 0) { unread_count = 0 - unread_count; - if (c == '\n' || unread_count >= (msgbufp->msg_size / 2)) + } + if (c == '\n' || unread_count >= (msgbufp->msg_size / 2)) { logwakeup(msgbufp); + } } @@ -1149,16 +1171,18 @@ log_setsize(int size) int i, count; char *p, ch; - if (size > MAX_MSG_BSIZE) - return (EINVAL); + if (size > MAX_MSG_BSIZE) { + return EINVAL; + } - if (size <= 0) - return (EINVAL); + if (size <= 0) { + return EINVAL; + } new_logsize = size; if (!(new_logdata = (char*)kalloc(size))) { printf("log_setsize: unable to allocate memory\n"); - return (ENOMEM); + return ENOMEM; } bzero(new_logdata, new_logsize); @@ -1170,7 +1194,7 @@ log_setsize(int size) old_bufx = msgbufp->msg_bufx; LOG_SETSIZE_DEBUG("log_setsize(%d): old_logdata %p old_logsize %d old_bufr %d old_bufx %d\n", - size, old_logdata, old_logsize, old_bufr, old_bufx); + size, old_logdata, old_logsize, old_bufr, old_bufx); /* start "new_logsize" bytes before the write pointer */ if (new_logsize <= old_bufx) { @@ -1185,36 +1209,40 @@ log_setsize(int size) p = old_logdata + old_logsize - (count - old_bufx); } for (i = 0; i < count; i++) { - if (p >= old_logdata + old_logsize) + if (p >= old_logdata + old_logsize) { p = old_logdata; + } ch = *p++; new_logdata[i] = ch; } new_bufx = i; - if (new_bufx >= new_logsize) + if (new_bufx >= new_logsize) { new_bufx = 0; + } msgbufp->msg_bufx = new_bufx; new_bufr = old_bufx - old_bufr; /* how much were we trailing bufx by? */ - if (new_bufr < 0) + if (new_bufr < 0) { new_bufr += old_logsize; + } new_bufr = new_bufx - new_bufr; /* now relative to oldest data in new buffer */ - if (new_bufr < 0) + if (new_bufr < 0) { new_bufr += new_logsize; + } msgbufp->msg_bufr = new_bufr; msgbufp->msg_size = new_logsize; msgbufp->msg_bufc = new_logdata; LOG_SETSIZE_DEBUG("log_setsize(%d): new_logdata %p new_logsize %d new_bufr %d new_bufx %d\n", - size, new_logdata, new_logsize, new_bufr, new_bufx); + size, new_logdata, new_logsize, new_bufr, new_bufx); LOG_UNLOCK(); /* this memory is now dead - clear it so that it compresses better - in case of suspend to disk etc. */ + * in case of suspend to disk etc. */ bzero(old_logdata, old_logsize); if (old_logdata != smsg_bufc) { /* dynamic memory that must be freed */ @@ -1226,7 +1254,8 @@ log_setsize(int size) return 0; } -void oslog_setsize(int size) +void +oslog_setsize(int size) { uint16_t scale = 0; // If the size is less than the default stream buffer @@ -1243,12 +1272,12 @@ void oslog_setsize(int size) } SYSCTL_PROC(_kern, OID_AUTO, msgbuf, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, - sysctl_kern_msgbuf, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, + sysctl_kern_msgbuf, "I", ""); static int sysctl_kern_msgbuf(struct sysctl_oid *oidp __unused, - void *arg1 __unused, int arg2 __unused, struct sysctl_req *req) + void *arg1 __unused, int arg2 __unused, struct sysctl_req *req) { int old_bufsize, bufsize; int error; @@ -1258,14 +1287,15 @@ sysctl_kern_msgbuf(struct sysctl_oid *oidp __unused, LOG_UNLOCK(); error = sysctl_io_number(req, bufsize, sizeof(bufsize), &bufsize, NULL); - if (error) - return (error); + if (error) { + return error; + } if (bufsize != old_bufsize) { error = log_setsize(bufsize); } - return (error); + return error; } @@ -1289,7 +1319,7 @@ log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) /* Allocate a temporary non-circular buffer for copyout */ if (!(localbuff = (char *)kalloc(localbuff_size))) { printf("log_dmesg: unable to allocate memory\n"); - return (ENOMEM); + return ENOMEM; } /* in between here, the log could become bigger, but that's fine */ @@ -1301,21 +1331,24 @@ log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) */ p = msgbufp->msg_bufc + msgbufp->msg_bufx; for (i = newl = skip = 0; p != msgbufp->msg_bufc + msgbufp->msg_bufx - 1; ++p) { - if (p >= msgbufp->msg_bufc + msgbufp->msg_size) + if (p >= msgbufp->msg_bufc + msgbufp->msg_size) { p = msgbufp->msg_bufc; + } ch = *p; /* Skip "\n<.*>" syslog sequences. */ if (skip) { - if (ch == '>') + if (ch == '>') { newl = skip = 0; + } continue; } if (newl && ch == '<') { skip = 1; continue; } - if (ch == '\0') + if (ch == '\0') { continue; + } newl = (ch == '\n'); localbuff[i++] = ch; /* The original version of this routine contained a buffer @@ -1323,11 +1356,13 @@ log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) * so the change below to check the buffer bounds was made. * TODO: rewrite this needlessly convoluted routine. */ - if (i == (localbuff_size - 2)) + if (i == (localbuff_size - 2)) { break; + } } - if (!newl) + if (!newl) { localbuff[i++] = '\n'; + } localbuff[i++] = 0; if (buffersize >= i) { @@ -1341,11 +1376,12 @@ log_dmesg(user_addr_t buffer, uint32_t buffersize, int32_t * retval) LOG_UNLOCK(); error = copyout(copystart, buffer, copysize); - if (!error) + if (!error) { *retval = copysize; + } kfree(localbuff, localbuff_size); - return (error); + return error; } #ifdef CONFIG_XNUPOST diff --git a/bsd/kern/subr_prf.c b/bsd/kern/subr_prf.c index 840fd1e75..d090a2429 100644 --- a/bsd/kern/subr_prf.c +++ b/bsd/kern/subr_prf.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -74,8 +74,8 @@ * in bsd/dev/XXX/cons.c * * 26-MAR-1997 Umesh Vaishampayan (umeshv@NeXT.com - * Fixed tharshing format in many functions. Cleanup. - * + * Fixed tharshing format in many functions. Cleanup. + * * 17-Jun-1995 Mac Gillon (mgillon) at NeXT * Purged old history * New version based on 4.4 and NS3.3 @@ -97,7 +97,7 @@ #include #include -#include /* for cpu_number() */ +#include /* for cpu_number() */ #include #include @@ -119,26 +119,26 @@ struct snprintf_arg { * debugger_panic_str contains argument to last * call to panic. */ -extern const char *debugger_panic_str; +extern const char *debugger_panic_str; -extern void cnputc(char); /* standard console putc */ -void (*v_putc)(char) = cnputc; /* routine to putc on virtual console */ +extern void cnputc(char); /* standard console putc */ +void (*v_putc)(char) = cnputc; /* routine to putc on virtual console */ -extern struct tty cons; /* standard console tty */ -extern struct tty *constty; /* pointer to console "window" tty */ +extern struct tty cons; /* standard console tty */ +extern struct tty *constty; /* pointer to console "window" tty */ extern int __doprnt(const char *fmt, - va_list argp, - void (*)(int, void *), - void *arg, - int radix, - int is_log); + va_list argp, + void (*)(int, void *), + void *arg, + int radix, + int is_log); /* * Record cpu that panic'd and lock around panic data */ -extern void logwakeup(struct msgbuf *); -extern void halt_cpu(void); +extern void logwakeup(struct msgbuf *); +extern void halt_cpu(void); static void snprintf_func(int ch, void *arg); @@ -162,22 +162,25 @@ uprintf(const char *fmt, ...) struct putchar_args pca; va_list ap; struct session *sessp; - + sessp = proc_session(p); if (p->p_flag & P_CONTROLT && sessp != SESSION_NULL && sessp->s_ttyvp) { pca.flags = TOTTY; pca.tty = SESSION_TP(sessp); - if (pca.tty != NULL) + if (pca.tty != NULL) { tty_lock(pca.tty); + } va_start(ap, fmt); __doprnt(fmt, ap, putchar, &pca, 10, FALSE); va_end(ap); - if (pca.tty != NULL) - tty_unlock(pca.tty); + if (pca.tty != NULL) { + tty_unlock(pca.tty); + } } - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } } tpr_t @@ -188,19 +191,21 @@ tprintf_open(struct proc *p) sessp = proc_session(p); if (p->p_flag & P_CONTROLT && sessp->s_ttyvp) { - return ((tpr_t)sessp); + return (tpr_t)sessp; } - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } - return ((tpr_t) NULL); + return (tpr_t) NULL; } void tprintf_close(tpr_t sessp) { - if (sessp) + if (sessp) { session_rele((struct session *) sessp); + } } /* @@ -220,7 +225,7 @@ tprintf(tpr_t tpr, const char *fmt, ...) if (sess && (tp = SESSION_TP(sess)) != TTY_NULL) { /* ttycheckoutq(), tputchar() require a locked tp */ tty_lock(tp); - if(ttycheckoutq(tp, 0)) { + if (ttycheckoutq(tp, 0)) { pca.flags = TOTTY; /* going to the tty; leave locked */ pca.tty = tp; @@ -232,7 +237,7 @@ tprintf(tpr_t tpr, const char *fmt, ...) } pca.flags = TOLOG; - pca.tty = TTY_NULL; + pca.tty = TTY_NULL; va_start(ap, fmt); __doprnt(fmt, ap, putchar, &pca, 10, TRUE); va_end(ap); @@ -262,7 +267,7 @@ ttyprintf(struct tty *tp, const char *fmt, ...) struct putchar_args pca; pca.flags = TOTTY; pca.tty = tp; - + va_start(ap, fmt); __doprnt(fmt, ap, putchar, &pca, 10, TRUE); va_end(ap); @@ -280,8 +285,9 @@ putchar_asl(int c, void *arg) { struct putchar_args *pca = arg; - if ((pca->flags & TOLOGLOCKED) && c != '\0' && c != '\r' && c != 0177) + if ((pca->flags & TOLOGLOCKED) && c != '\0' && c != '\r' && c != 0177) { log_putc_locked(aslbufp, c); + } putchar(c, arg); } @@ -301,7 +307,7 @@ vaddlog(const char *fmt, va_list ap) bsd_log_unlock(); logwakeup(NULL); - return (0); + return 0; } void @@ -315,7 +321,7 @@ _printf(int flags, struct tty *ttyp, const char *format, ...) if (ttyp != NULL) { tty_lock(ttyp); - + va_start(ap, format); __doprnt(format, ap, putchar, &pca, 10, TRUE); va_end(ap); @@ -340,7 +346,8 @@ prf(const char *fmt, va_list ap, int flags, struct tty *ttyp) /* * Warn that a system table is full. */ -void tablefull(const char *tab) +void +tablefull(const char *tab) { log(LOG_ERR, "%s: table is full\n", tab); } @@ -360,21 +367,26 @@ putchar(int c, void *arg) struct putchar_args *pca = arg; char **sp = (char**) pca->tty; - if (debugger_panic_str) + if (debugger_panic_str) { constty = 0; + } if ((pca->flags & TOCONS) && pca->tty == NULL && constty) { pca->tty = constty; pca->flags |= TOTTY; } if ((pca->flags & TOTTY) && pca->tty && tputchar(c, pca->tty) < 0 && - (pca->flags & TOCONS) && pca->tty == constty) + (pca->flags & TOCONS) && pca->tty == constty) { constty = 0; - if ((pca->flags & TOLOG) && c != '\0' && c != '\r' && c != 0177) + } + if ((pca->flags & TOLOG) && c != '\0' && c != '\r' && c != 0177) { log_putc(c); - if ((pca->flags & TOLOGLOCKED) && c != '\0' && c != '\r' && c != 0177) + } + if ((pca->flags & TOLOGLOCKED) && c != '\0' && c != '\r' && c != 0177) { log_putc_locked(msgbufp, c); - if ((pca->flags & TOCONS) && constty == 0 && c != '\0') + } + if ((pca->flags & TOCONS) && constty == 0 && c != '\0') { (*v_putc)(c); + } if (pca->flags & TOSTR) { **sp = c; (*sp)++; @@ -398,7 +410,7 @@ vprintf_log_locked(const char *fmt, va_list ap) * Scaled down version of vsprintf(3). * * Deprecation Warning: - * vsprintf() is being deprecated. Please use vsnprintf() instead. + * vsprintf() is being deprecated. Please use vsnprintf() instead. */ int vsprintf(char *buf, const char *cfmt, va_list ap) @@ -415,7 +427,7 @@ vsprintf(char *buf, const char *cfmt, va_list ap) } return 0; } -#endif /* !CONFIG_EMBEDDED */ +#endif /* !CONFIG_EMBEDDED */ /* * Scaled down version of snprintf(3). @@ -429,7 +441,7 @@ snprintf(char *str, size_t size, const char *format, ...) va_start(ap, format); retval = vsnprintf(str, size, format, ap); va_end(ap); - return(retval); + return retval; } /* @@ -444,8 +456,9 @@ vsnprintf(char *str, size_t size, const char *format, va_list ap) info.str = str; info.remain = size; retval = __doprnt(format, ap, snprintf_func, &info, 10, FALSE); - if (info.remain >= 1) + if (info.remain >= 1) { *info.str++ = '\0'; + } return retval; } @@ -466,4 +479,3 @@ kvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_lis __doprnt(fmt, ap, func, arg, radix, TRUE); return 0; } - diff --git a/bsd/kern/subr_prof.c b/bsd/kern/subr_prof.c index 20a0f5be3..a638d8780 100644 --- a/bsd/kern/subr_prof.c +++ b/bsd/kern/subr_prof.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -84,10 +84,10 @@ #include #include -extern int sysctl_doprof(int *, u_int, user_addr_t, size_t *, - user_addr_t, size_t newlen); +extern int sysctl_doprof(int *, u_int, user_addr_t, size_t *, + user_addr_t, size_t newlen); extern int sysctl_struct(user_addr_t, size_t *, - user_addr_t, size_t, void *, int); + user_addr_t, size_t, void *, int); lck_spin_t * mcount_lock; lck_grp_t * mcount_lock_grp; @@ -106,13 +106,13 @@ void kmstartup(void) { tostruct_t *cp; - kernel_segment_command_t *sgp; /* 32 bit mach object file segment */ + kernel_segment_command_t *sgp; /* 32 bit mach object file segment */ struct gmonparam *p = &_gmonparam; - + sgp = getsegbyname("__TEXT"); p->lowpc = (u_int32_t)sgp->vmaddr; p->highpc = (u_int32_t)(sgp->vmaddr + sgp->vmsize); - + /* * Round lowpc and highpc to multiples of the density we're using * so the rest of the scaling (here and in gprof) stays in ints. @@ -121,15 +121,16 @@ kmstartup(void) p->highpc = ROUNDUP(p->highpc, HISTFRACTION * sizeof(HISTCOUNTER)); p->textsize = p->highpc - p->lowpc; printf("Profiling kernel, textsize=%lu [0x%016lx..0x%016lx]\n", - p->textsize, p->lowpc, p->highpc); + p->textsize, p->lowpc, p->highpc); p->kcountsize = p->textsize / HISTFRACTION; p->hashfraction = HASHFRACTION; p->fromssize = p->textsize / HASHFRACTION; p->tolimit = p->textsize * ARCDENSITY / 100; - if (p->tolimit < MINARCS) + if (p->tolimit < MINARCS) { p->tolimit = MINARCS; - else if (p->tolimit > MAXARCS) + } else if (p->tolimit > MAXARCS) { p->tolimit = MAXARCS; + } p->tossize = p->tolimit * sizeof(tostruct_t); /* Why not use MALLOC with M_GPROF ? */ cp = (tostruct_t *)kalloc(p->kcountsize + p->fromssize + p->tossize); @@ -143,11 +144,10 @@ kmstartup(void) p->kcount = (u_short *)cp; cp = (tostruct_t *)((vm_offset_t)cp + p->kcountsize); p->froms = (u_short *)cp; - + mcount_lock_grp = lck_grp_alloc_init("MCOUNT", LCK_GRP_ATTR_NULL); mcount_lock_attr = lck_attr_alloc_init(); mcount_lock = lck_spin_alloc_init(mcount_lock_grp, mcount_lock_attr); - } /* @@ -160,181 +160,185 @@ kmstartup(void) STATIC int sysctl_doprofhandle SYSCTL_HANDLER_ARGS { -sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, - user_addr_t newp, size_t newlen) -{ - __unused int cmd = oidp->oid_arg2; /* subcommand*/ - int *name = arg1; /* oid element argument vector */ - int namelen = arg2; /* number of oid element arguments */ - user_addr_t oldp = req->oldptr; /* user buffer copy out address */ - size_t *oldlenp = req->oldlen; /* user buffer copy out size */ - user_addr_t newp = req->newptr; /* user buffer copy in address */ - size_t newlen = req->newlen; /* user buffer copy in size */ - - struct gmonparam *gp = &_gmonparam; - int error = 0; - - /* all sysctl names at this level are terminal */ - if (namelen != 1) - return (ENOTDIR); /* overloaded */ - - switch (name[0]) { - case GPROF_STATE: - error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state); - if (error) + sysctl_doprof(int *name, u_int namelen, user_addr_t oldp, size_t * oldlenp, + user_addr_t newp, size_t newlen) + { + __unused int cmd = oidp->oid_arg2; /* subcommand*/ + int *name = arg1; /* oid element argument vector */ + int namelen = arg2; /* number of oid element arguments */ + user_addr_t oldp = req->oldptr; /* user buffer copy out address */ + size_t *oldlenp = req->oldlen; /* user buffer copy out size */ + user_addr_t newp = req->newptr; /* user buffer copy in address */ + size_t newlen = req->newlen; /* user buffer copy in size */ + + struct gmonparam *gp = &_gmonparam; + int error = 0; + + /* all sysctl names at this level are terminal */ + if (namelen != 1) { + return ENOTDIR; /* overloaded */ + } + switch (name[0]) { + case GPROF_STATE: + error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state); + if (error) { + break; + } + if (gp->state == GMON_PROF_OFF) { + stopprofclock(kernproc); + } else { + startprofclock(kernproc); + } break; - if (gp->state == GMON_PROF_OFF) - stopprofclock(kernproc); - else - startprofclock(kernproc); - break; - case GPROF_COUNT: - error = sysctl_struct(oldp, oldlenp, newp, newlen, - gp->kcount, gp->kcountsize); - break; - case GPROF_FROMS: - error = sysctl_struct(oldp, oldlenp, newp, newlen, - gp->froms, gp->fromssize); - break; - case GPROF_TOS: - error = sysctl_struct(oldp, oldlenp, newp, newlen, - gp->tos, gp->tossize); - break; - case GPROF_GMONPARAM: - error = sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp); - break; - default: - error = ENOTSUP; - break; - } + case GPROF_COUNT: + error = sysctl_struct(oldp, oldlenp, newp, newlen, + gp->kcount, gp->kcountsize); + break; + case GPROF_FROMS: + error = sysctl_struct(oldp, oldlenp, newp, newlen, + gp->froms, gp->fromssize); + break; + case GPROF_TOS: + error = sysctl_struct(oldp, oldlenp, newp, newlen, + gp->tos, gp->tossize); + break; + case GPROF_GMONPARAM: + error = sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp); + break; + default: + error = ENOTSUP; + break; + } - /* adjust index so we return the right required/consumed amount */ - if (!error) - req->oldidx += req->oldlen; + /* adjust index so we return the right required/consumed amount */ + if (!error) { + req->oldidx += req->oldlen; + } - return(error); -} -SYSCTL_PROC(_kern, KERN_PROF, prof, STLFLAG_NODE|CTLFLAG_RW | CTLFLAG_LOCKED, - 0, /* Pointer argument (arg1) */ - 0, /* Integer argument (arg2) */ - sysctl_doprofhandle, /* Handler function */ - NULL, /* No explicit data */ - ""); + return error; + } + SYSCTL_PROC(_kern, KERN_PROF, prof, STLFLAG_NODE | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, /* Pointer argument (arg1) */ + 0, /* Integer argument (arg2) */ + sysctl_doprofhandle, /* Handler function */ + NULL, /* No explicit data */ + ""); /* * mcount() called with interrupts disabled. */ -void -mcount( - uintptr_t frompc, - uintptr_t selfpc -) -{ - unsigned short *frompcindex; - tostruct_t *top, *prevtop; - struct gmonparam *p = &_gmonparam; - long toindex; - - /* - * check that we are profiling - * and that we aren't recursively invoked. - */ - if (p->state != GMON_PROF_ON) - return; - - lck_spin_lock(mcount_lock); - - /* - * check that frompcindex is a reasonable pc value. - * for example: signal catchers get called from the stack, - * not from text space. too bad. - */ - frompc -= p->lowpc; - if (frompc > p->textsize) - goto done; + void + mcount( + uintptr_t frompc, + uintptr_t selfpc + ) + { + unsigned short *frompcindex; + tostruct_t *top, *prevtop; + struct gmonparam *p = &_gmonparam; + long toindex; - frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))]; - toindex = *frompcindex; - if (toindex == 0) { /* - * first time traversing this arc + * check that we are profiling + * and that we aren't recursively invoked. */ - toindex = ++p->tos[0].link; - if (toindex >= p->tolimit) { - /* halt further profiling */ - goto overflow; + if (p->state != GMON_PROF_ON) { + return; } - *frompcindex = toindex; - top = &p->tos[toindex]; - top->selfpc = selfpc; - top->count = 1; - top->link = 0; - goto done; - } - top = &p->tos[toindex]; - if (top->selfpc == selfpc) { + + lck_spin_lock(mcount_lock); + /* - * arc at front of chain; usual case. + * check that frompcindex is a reasonable pc value. + * for example: signal catchers get called from the stack, + * not from text space. too bad. */ - top->count++; - goto done; - } - /* - * have to go looking down chain for it. - * top points to what we are looking at, - * prevtop points to previous top. - * we know it is not at the head of the chain. - */ - for (; /* goto done */; ) { - if (top->link == 0) { + frompc -= p->lowpc; + if (frompc > p->textsize) { + goto done; + } + + frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))]; + toindex = *frompcindex; + if (toindex == 0) { /* - * top is end of the chain and none of the chain - * had top->selfpc == selfpc. - * so we allocate a new tostruct - * and link it to the head of the chain. + * first time traversing this arc */ toindex = ++p->tos[0].link; if (toindex >= p->tolimit) { + /* halt further profiling */ goto overflow; } + *frompcindex = toindex; top = &p->tos[toindex]; top->selfpc = selfpc; top->count = 1; - top->link = *frompcindex; - *frompcindex = toindex; + top->link = 0; goto done; } - /* - * otherwise, check the next arc on the chain. - */ - prevtop = top; - top = &p->tos[top->link]; + top = &p->tos[toindex]; if (top->selfpc == selfpc) { /* - * there it is. - * increment its count - * move it to the head of the chain. + * arc at front of chain; usual case. */ top->count++; - toindex = prevtop->link; - prevtop->link = top->link; - top->link = *frompcindex; - *frompcindex = toindex; goto done; } - - } + /* + * have to go looking down chain for it. + * top points to what we are looking at, + * prevtop points to previous top. + * we know it is not at the head of the chain. + */ + for (; /* goto done */;) { + if (top->link == 0) { + /* + * top is end of the chain and none of the chain + * had top->selfpc == selfpc. + * so we allocate a new tostruct + * and link it to the head of the chain. + */ + toindex = ++p->tos[0].link; + if (toindex >= p->tolimit) { + goto overflow; + } + top = &p->tos[toindex]; + top->selfpc = selfpc; + top->count = 1; + top->link = *frompcindex; + *frompcindex = toindex; + goto done; + } + /* + * otherwise, check the next arc on the chain. + */ + prevtop = top; + top = &p->tos[top->link]; + if (top->selfpc == selfpc) { + /* + * there it is. + * increment its count + * move it to the head of the chain. + */ + top->count++; + toindex = prevtop->link; + prevtop->link = top->link; + top->link = *frompcindex; + *frompcindex = toindex; + goto done; + } + } done: - lck_spin_unlock(mcount_lock); - return; + lck_spin_unlock(mcount_lock); + return; overflow: - p->state = GMON_PROF_ERROR; - lck_spin_unlock(mcount_lock); - printf("mcount: tos overflow\n"); - return; -} + p->state = GMON_PROF_ERROR; + lck_spin_unlock(mcount_lock); + printf("mcount: tos overflow\n"); + return; + } #endif /* GPROF */ @@ -350,7 +354,7 @@ overflow: //K64todo - this doesn't fit into 64 bit any more, it needs 64+16 #define PC_TO_INDEX(pc, prof) \ ((user_addr_t)(((u_quad_t)((pc) - (prof)->pr_off) * \ - (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) + (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) /* * Collect user-level profiling statistics; called on a profiling tick, @@ -373,47 +377,49 @@ addupc_task(struct proc *p, user_addr_t pc, u_int ticks) u_short count; /* Testing P_PROFIL may be unnecessary, but is certainly safe. */ - if ((p->p_flag & P_PROFIL) == 0 || ticks == 0) + if ((p->p_flag & P_PROFIL) == 0 || ticks == 0) { return; + } if (proc_is64bit(p)) { - struct user_uprof *prof; - user_addr_t cell; - - for (prof = &p->p_stats->user_p_prof; prof; prof = prof->pr_next) { - off = PC_TO_INDEX(pc, prof); - cell = (prof->pr_base + off); - if (cell >= prof->pr_base && - cell < (prof->pr_size + prof->pr_base)) { - if (copyin(cell, (caddr_t) &count, sizeof(count)) == 0) { - count += ticks; - if(copyout((caddr_t) &count, cell, sizeof(count)) == 0) - return; - } - p->p_stats->user_p_prof.pr_scale = 0; - stopprofclock(p); - break; - } - } - } - else { - struct uprof *prof; - short *cell; - - for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) { - off = PC_TO_INDEX(pc,prof); - cell = (short *)(prof->pr_base + off); - if (cell >= (short *)prof->pr_base && - cell < (short*)(prof->pr_size + prof->pr_base)) { - if (copyin(CAST_USER_ADDR_T(cell), (caddr_t) &count, sizeof(count)) == 0) { - count += ticks; - if(copyout((caddr_t) &count, CAST_USER_ADDR_T(cell), sizeof(count)) == 0) - return; - } - p->p_stats->p_prof.pr_scale = 0; - stopprofclock(p); - break; - } - } + struct user_uprof *prof; + user_addr_t cell; + + for (prof = &p->p_stats->user_p_prof; prof; prof = prof->pr_next) { + off = PC_TO_INDEX(pc, prof); + cell = (prof->pr_base + off); + if (cell >= prof->pr_base && + cell < (prof->pr_size + prof->pr_base)) { + if (copyin(cell, (caddr_t) &count, sizeof(count)) == 0) { + count += ticks; + if (copyout((caddr_t) &count, cell, sizeof(count)) == 0) { + return; + } + } + p->p_stats->user_p_prof.pr_scale = 0; + stopprofclock(p); + break; + } + } + } else { + struct uprof *prof; + short *cell; + + for (prof = &p->p_stats->p_prof; prof; prof = prof->pr_next) { + off = PC_TO_INDEX(pc, prof); + cell = (short *)(prof->pr_base + off); + if (cell >= (short *)prof->pr_base && + cell < (short*)(prof->pr_size + prof->pr_base)) { + if (copyin(CAST_USER_ADDR_T(cell), (caddr_t) &count, sizeof(count)) == 0) { + count += ticks; + if (copyout((caddr_t) &count, CAST_USER_ADDR_T(cell), sizeof(count)) == 0) { + return; + } + } + p->p_stats->p_prof.pr_scale = 0; + stopprofclock(p); + break; + } + } } } diff --git a/bsd/kern/subr_sbuf.c b/bsd/kern/subr_sbuf.c index a3a89c096..e9d175fb4 100644 --- a/bsd/kern/subr_sbuf.c +++ b/bsd/kern/subr_sbuf.c @@ -51,36 +51,36 @@ #ifdef KERNEL /* MALLOC_DEFINE(M_SBUF, "sbuf", "string buffers"); */ -#define SBMALLOC(size) _MALLOC(size, M_SBUF, M_WAITOK) -#define SBFREE(buf) FREE(buf, M_SBUF) +#define SBMALLOC(size) _MALLOC(size, M_SBUF, M_WAITOK) +#define SBFREE(buf) FREE(buf, M_SBUF) #else /* KERNEL */ -#define KASSERT(e, m) -#define SBMALLOC(size) malloc(size) -#define SBFREE(buf) free(buf) -#define min(x,y) MIN(x,y) - +#define KASSERT(e, m) +#define SBMALLOC(size) malloc(size) +#define SBFREE(buf) free(buf) +#define min(x, y) MIN(x,y) + #endif /* KERNEL */ /* * Predicates */ -#define SBUF_ISDYNAMIC(s) ((s)->s_flags & SBUF_DYNAMIC) -#define SBUF_ISDYNSTRUCT(s) ((s)->s_flags & SBUF_DYNSTRUCT) -#define SBUF_ISFINISHED(s) ((s)->s_flags & SBUF_FINISHED) -#define SBUF_HASOVERFLOWED(s) ((s)->s_flags & SBUF_OVERFLOWED) -#define SBUF_HASROOM(s) ((s)->s_len < (s)->s_size - 1) -#define SBUF_FREESPACE(s) ((s)->s_size - (s)->s_len - 1) -#define SBUF_CANEXTEND(s) ((s)->s_flags & SBUF_AUTOEXTEND) +#define SBUF_ISDYNAMIC(s) ((s)->s_flags & SBUF_DYNAMIC) +#define SBUF_ISDYNSTRUCT(s) ((s)->s_flags & SBUF_DYNSTRUCT) +#define SBUF_ISFINISHED(s) ((s)->s_flags & SBUF_FINISHED) +#define SBUF_HASOVERFLOWED(s) ((s)->s_flags & SBUF_OVERFLOWED) +#define SBUF_HASROOM(s) ((s)->s_len < (s)->s_size - 1) +#define SBUF_FREESPACE(s) ((s)->s_size - (s)->s_len - 1) +#define SBUF_CANEXTEND(s) ((s)->s_flags & SBUF_AUTOEXTEND) /* * Set / clear flags */ -#define SBUF_SETFLAG(s, f) do { (s)->s_flags |= (f); } while (0) -#define SBUF_CLEARFLAG(s, f) do { (s)->s_flags &= ~(f); } while (0) +#define SBUF_SETFLAG(s, f) do { (s)->s_flags |= (f); } while (0) +#define SBUF_CLEARFLAG(s, f) do { (s)->s_flags &= ~(f); } while (0) -#define SBUF_MINEXTENDSIZE 16 /* Should be power of 2. */ -#define SBUF_MAXEXTENDSIZE PAGE_SIZE -#define SBUF_MAXEXTENDINCR PAGE_SIZE +#define SBUF_MINEXTENDSIZE 16 /* Should be power of 2. */ +#define SBUF_MAXEXTENDSIZE PAGE_SIZE +#define SBUF_MAXEXTENDINCR PAGE_SIZE /* * Debugging support @@ -104,11 +104,11 @@ _assert_sbuf_state(const char *fun, struct sbuf *s, int state) ("%s called with %sfinished or corrupt sbuf", fun, (state ? "un" : ""))); } -#define assert_sbuf_integrity(s) _assert_sbuf_integrity(__func__, (s)) -#define assert_sbuf_state(s, i) _assert_sbuf_state(__func__, (s), (i)) +#define assert_sbuf_integrity(s) _assert_sbuf_integrity(__func__, (s)) +#define assert_sbuf_state(s, i) _assert_sbuf_state(__func__, (s), (i)) #else /* KERNEL && INVARIANTS */ -#define assert_sbuf_integrity(s) do { } while (0) -#define assert_sbuf_state(s, i) do { } while (0) +#define assert_sbuf_integrity(s) do { } while (0) +#define assert_sbuf_state(s, i) do { } while (0) #endif /* KERNEL && INVARIANTS */ static int @@ -118,13 +118,14 @@ sbuf_extendsize(int size) newsize = SBUF_MINEXTENDSIZE; while (newsize < size) { - if (newsize < (int)SBUF_MAXEXTENDSIZE) + if (newsize < (int)SBUF_MAXEXTENDSIZE) { newsize *= 2; - else + } else { newsize += SBUF_MAXEXTENDINCR; + } } - return (newsize); + return newsize; } @@ -137,21 +138,24 @@ sbuf_extend(struct sbuf *s, int addlen) char *newbuf; int newsize; - if (!SBUF_CANEXTEND(s)) - return (-1); + if (!SBUF_CANEXTEND(s)) { + return -1; + } newsize = sbuf_extendsize(s->s_size + addlen); newbuf = (char *)SBMALLOC(newsize); - if (newbuf == NULL) - return (-1); + if (newbuf == NULL) { + return -1; + } bcopy(s->s_buf, newbuf, s->s_size); - if (SBUF_ISDYNAMIC(s)) + if (SBUF_ISDYNAMIC(s)) { SBFREE(s->s_buf); - else + } else { SBUF_SETFLAG(s, SBUF_DYNAMIC); + } s->s_buf = newbuf; s->s_size = newsize; - return (0); + return 0; } /* @@ -170,8 +174,9 @@ sbuf_new(struct sbuf *s, char *buf, int length, int flags) flags &= SBUF_USRFLAGMSK; if (s == NULL) { s = (struct sbuf *)SBMALLOC(sizeof *s); - if (s == NULL) - return (NULL); + if (s == NULL) { + return NULL; + } bzero(s, sizeof *s); s->s_flags = flags; SBUF_SETFLAG(s, SBUF_DYNSTRUCT); @@ -182,18 +187,20 @@ sbuf_new(struct sbuf *s, char *buf, int length, int flags) s->s_size = length; if (buf) { s->s_buf = buf; - return (s); + return s; } - if (flags & SBUF_AUTOEXTEND) + if (flags & SBUF_AUTOEXTEND) { s->s_size = sbuf_extendsize(s->s_size); + } s->s_buf = (char *)SBMALLOC(s->s_size); if (s->s_buf == NULL) { - if (SBUF_ISDYNSTRUCT(s)) + if (SBUF_ISDYNSTRUCT(s)) { SBFREE(s); - return (NULL); + } + return NULL; } SBUF_SETFLAG(s, SBUF_DYNAMIC); - return (s); + return s; } #ifdef KERNEL @@ -211,16 +218,16 @@ sbuf_uionew(struct sbuf *s, struct uio *uio, int *error) s = sbuf_new(s, NULL, uio_resid(uio) + 1, 0); if (s == NULL) { *error = ENOMEM; - return (NULL); + return NULL; } *error = uiomove(s->s_buf, uio_resid(uio), uio); if (*error != 0) { sbuf_delete(s); - return (NULL); + return NULL; } s->s_len = s->s_size - 1; *error = 0; - return (s); + return s; } #endif @@ -253,10 +260,11 @@ sbuf_setpos(struct sbuf *s, int pos) KASSERT(pos < s->s_size, ("attempt to seek past end of sbuf (%d >= %d)", pos, s->s_size)); - if (pos < 0 || pos > s->s_len) - return (-1); + if (pos < 0 || pos > s->s_len) { + return -1; + } s->s_len = pos; - return (0); + return 0; } /* @@ -270,19 +278,21 @@ sbuf_bcat(struct sbuf *s, const void *buf, size_t len) assert_sbuf_integrity(s); assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } for (; len; len--) { - if (!SBUF_HASROOM(s) && sbuf_extend(s, len) < 0) + if (!SBUF_HASROOM(s) && sbuf_extend(s, len) < 0) { break; + } s->s_buf[s->s_len++] = *str++; } if (len) { SBUF_SETFLAG(s, SBUF_OVERFLOWED); - return (-1); + return -1; } - return (0); + return 0; } #ifdef KERNEL @@ -295,20 +305,23 @@ sbuf_bcopyin(struct sbuf *s, const void *uaddr, size_t len) assert_sbuf_integrity(s); assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } - if (len == 0) - return (0); + if (len == 0) { + return 0; + } if (len > (unsigned) SBUF_FREESPACE(s)) { sbuf_extend(s, len - SBUF_FREESPACE(s)); len = min(len, SBUF_FREESPACE(s)); } - if (copyin(CAST_USER_ADDR_T(uaddr), s->s_buf + s->s_len, len) != 0) - return (-1); + if (copyin(CAST_USER_ADDR_T(uaddr), s->s_buf + s->s_len, len) != 0) { + return -1; + } s->s_len += len; - return (0); + return 0; } #endif @@ -322,7 +335,7 @@ sbuf_bcpy(struct sbuf *s, const void *buf, size_t len) assert_sbuf_state(s, 0); sbuf_clear(s); - return (sbuf_bcat(s, buf, len)); + return sbuf_bcat(s, buf, len); } /* @@ -334,19 +347,21 @@ sbuf_cat(struct sbuf *s, const char *str) assert_sbuf_integrity(s); assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } while (*str) { - if (!SBUF_HASROOM(s) && sbuf_extend(s, strlen(str)) < 0) + if (!SBUF_HASROOM(s) && sbuf_extend(s, strlen(str)) < 0) { break; + } s->s_buf[s->s_len++] = *str++; } if (*str) { SBUF_SETFLAG(s, SBUF_OVERFLOWED); - return (-1); + return -1; } - return (0); + return 0; } #ifdef KERNEL @@ -361,11 +376,13 @@ sbuf_copyin(struct sbuf *s, const void *uaddr, size_t len) assert_sbuf_integrity(s); assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } - if (len == 0) - len = SBUF_FREESPACE(s); /* XXX return 0? */ + if (len == 0) { + len = SBUF_FREESPACE(s); /* XXX return 0? */ + } if (len > (unsigned) SBUF_FREESPACE(s)) { sbuf_extend(s, len); len = min(len, SBUF_FREESPACE(s)); @@ -373,15 +390,15 @@ sbuf_copyin(struct sbuf *s, const void *uaddr, size_t len) switch (copyinstr(CAST_USER_ADDR_T(uaddr), s->s_buf + s->s_len, len + 1, &done)) { case ENAMETOOLONG: SBUF_SETFLAG(s, SBUF_OVERFLOWED); - /* fall through */ + /* fall through */ case 0: s->s_len += done - 1; break; default: - return (-1); /* XXX */ + return -1; /* XXX */ } - return (done); + return done; } #endif @@ -395,7 +412,7 @@ sbuf_cpy(struct sbuf *s, const char *str) assert_sbuf_state(s, 0); sbuf_clear(s); - return (sbuf_cat(s, str)); + return sbuf_cat(s, str); } /* @@ -413,8 +430,9 @@ sbuf_vprintf(struct sbuf *s, const char *fmt, va_list ap) KASSERT(fmt != NULL, ("%s called with a NULL format string", __func__)); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } do { va_copy(ap_copy, ap); @@ -434,15 +452,17 @@ sbuf_vprintf(struct sbuf *s, const char *fmt, va_list ap) * given sufficient space, hence the min() calculation below. */ s->s_len += min(len, SBUF_FREESPACE(s)); - if (!SBUF_HASROOM(s) && !SBUF_CANEXTEND(s)) + if (!SBUF_HASROOM(s) && !SBUF_CANEXTEND(s)) { SBUF_SETFLAG(s, SBUF_OVERFLOWED); + } KASSERT(s->s_len < s->s_size, ("wrote past end of sbuf (%d >= %d)", s->s_len, s->s_size)); - if (SBUF_HASOVERFLOWED(s)) - return (-1); - return (0); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } + return 0; } /* @@ -457,7 +477,7 @@ sbuf_printf(struct sbuf *s, const char *fmt, ...) va_start(ap, fmt); result = sbuf_vprintf(s, fmt, ap); va_end(ap); - return(result); + return result; } /* @@ -469,22 +489,24 @@ sbuf_putc(struct sbuf *s, int c) assert_sbuf_integrity(s); assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } if (!SBUF_HASROOM(s) && sbuf_extend(s, 1) < 0) { SBUF_SETFLAG(s, SBUF_OVERFLOWED); - return (-1); + return -1; + } + if (c != '\0') { + s->s_buf[s->s_len++] = c; } - if (c != '\0') - s->s_buf[s->s_len++] = c; - return (0); + return 0; } static inline int isspace(char ch) { - return (ch == ' ' || ch == '\n' || ch == '\t'); + return ch == ' ' || ch == '\n' || ch == '\t'; } /* @@ -496,13 +518,15 @@ sbuf_trim(struct sbuf *s) assert_sbuf_integrity(s); assert_sbuf_state(s, 0); - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } - while (s->s_len && isspace(s->s_buf[s->s_len-1])) + while (s->s_len && isspace(s->s_buf[s->s_len - 1])) { --s->s_len; + } - return (0); + return 0; } /* @@ -511,7 +535,7 @@ sbuf_trim(struct sbuf *s) int sbuf_overflowed(struct sbuf *s) { - return SBUF_HASOVERFLOWED(s); + return SBUF_HASOVERFLOWED(s); } /* @@ -549,8 +573,9 @@ sbuf_len(struct sbuf *s) assert_sbuf_integrity(s); /* don't care if it's finished or not */ - if (SBUF_HASOVERFLOWED(s)) - return (-1); + if (SBUF_HASOVERFLOWED(s)) { + return -1; + } return s->s_len; } @@ -565,12 +590,14 @@ sbuf_delete(struct sbuf *s) assert_sbuf_integrity(s); /* don't care if it's finished or not */ - if (SBUF_ISDYNAMIC(s)) + if (SBUF_ISDYNAMIC(s)) { SBFREE(s->s_buf); + } isdyn = SBUF_ISDYNSTRUCT(s); bzero(s, sizeof *s); - if (isdyn) + if (isdyn) { SBFREE(s); + } } /* @@ -579,6 +606,5 @@ sbuf_delete(struct sbuf *s) int sbuf_done(struct sbuf *s) { - - return(SBUF_ISFINISHED(s)); + return SBUF_ISFINISHED(s); } diff --git a/bsd/kern/subr_xxx.c b/bsd/kern/subr_xxx.c index 2c574f295..879963f19 100644 --- a/bsd/kern/subr_xxx.c +++ b/bsd/kern/subr_xxx.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -70,7 +70,7 @@ #include #include -#include /* for psignal() */ +#include /* for psignal() */ #include #ifdef GPROF @@ -89,10 +89,10 @@ bool send_sigsys = true; int enodev(void) { - return (ENODEV); + return ENODEV; } -/* +/* * Unsupported strategy function. */ void @@ -107,7 +107,7 @@ enodev_strat(void) int enxio(void) { - return (ENXIO); + return ENXIO; } /* @@ -116,7 +116,7 @@ enxio(void) int enoioctl(void) { - return (ENOTTY); + return ENOTTY; } @@ -128,7 +128,7 @@ enoioctl(void) int enosys(void) { - return (ENOSYS); + return ENOSYS; } /* @@ -140,7 +140,7 @@ enosys(void) int eopnotsupp(void) { - return (ENOTSUP); + return ENOTSUP; } /* @@ -149,7 +149,7 @@ eopnotsupp(void) int nullop(void) { - return (0); + return 0; } @@ -160,7 +160,7 @@ nullop(void) int nulldev(void) { - return (0); + return 0; } /* @@ -169,7 +169,7 @@ nulldev(void) int errsys(void) { - return(EINVAL); + return EINVAL; } void @@ -189,10 +189,10 @@ nosys(__unused struct proc *p, __unused struct nosys_args *args, __unused int32_ if (send_sigsys) { psignal_uthread(current_thread(), SIGSYS); } - return (ENOSYS); + return ENOSYS; } -#ifdef GPROF +#ifdef GPROF /* * Stub routine in case it is ever possible to free space. */ @@ -216,9 +216,8 @@ rc4_init(struct rc4_state *state __unused, const u_char *key __unused, int keyle void rc4_crypt(struct rc4_state *state __unused, - const u_char *inbuf __unused, u_char *outbuf __unused, int buflen __unused) + const u_char *inbuf __unused, u_char *outbuf __unused, int buflen __unused) { panic("rc4_crypt: unsupported kernel configuration"); } #endif /* !CRYPTO */ - diff --git a/bsd/kern/sys_coalition.c b/bsd/kern/sys_coalition.c index 4741bfc16..bfbd9c9ca 100644 --- a/bsd/kern/sys_coalition.c +++ b/bsd/kern/sys_coalition.c @@ -35,10 +35,12 @@ coalition_create_syscall(user_addr_t cidp, uint32_t flags) int role = COALITION_CREATE_FLAGS_GET_ROLE(flags); boolean_t privileged = !!(flags & COALITION_CREATE_FLAGS_PRIVILEGED); - if ((flags & (~COALITION_CREATE_FLAGS_MASK)) != 0) + if ((flags & (~COALITION_CREATE_FLAGS_MASK)) != 0) { return EINVAL; - if (type < 0 || type > COALITION_TYPE_MAX) + } + if (type < 0 || type > COALITION_TYPE_MAX) { return EINVAL; + } kr = coalition_create_internal(type, role, privileged, &coal); if (kr != KERN_SUCCESS) { @@ -183,7 +185,8 @@ coalition_reap_syscall(user_addr_t cidp, uint32_t flags) /* Syscall demux. * Returns EPERM if the calling process is not privileged to make this call. */ -int coalition(proc_t p, struct coalition_args *cap, __unused int32_t *retval) +int +coalition(proc_t p, struct coalition_args *cap, __unused int32_t *retval) { uint32_t operation = cap->operation; user_addr_t cidp = cap->cid; @@ -240,21 +243,25 @@ static int coalition_info_efficiency(coalition_t coal, user_addr_t buffer, user_size_t bufsize) { int error = 0; - if (coalition_type(coal) != COALITION_TYPE_JETSAM) + if (coalition_type(coal) != COALITION_TYPE_JETSAM) { return EINVAL; - uint64_t flags = 0; + } + uint64_t flags = 0; error = copyin(buffer, &flags, MIN(bufsize, sizeof(flags))); - if (error) + if (error) { return error; - if ((flags & COALITION_EFFICIENCY_VALID_FLAGS) == 0) + } + if ((flags & COALITION_EFFICIENCY_VALID_FLAGS) == 0) { return EINVAL; + } if (flags & COALITION_FLAGS_EFFICIENT) { - coalition_set_efficient(coal); + coalition_set_efficient(coal); } return error; } -int coalition_info(proc_t p, struct coalition_info_args *uap, __unused int32_t *retval) +int +coalition_info(proc_t p, struct coalition_info_args *uap, __unused int32_t *retval) { user_addr_t cidp = uap->cid; user_addr_t buffer = uap->buffer; @@ -319,12 +326,14 @@ static int sysctl_coalition_get_ids SYSCTL_HANDLER_ARGS error = SYSCTL_IN(req, &value, sizeof(value)); - if (error) + if (error) { return error; - if (!req->newptr) + } + if (!req->newptr) { pid = req->p->p_pid; - else + } else { pid = (int)value; + } coal_dbg("looking up coalitions for pid:%d", pid); tproc = proc_find(pid); @@ -340,7 +349,7 @@ static int sysctl_coalition_get_ids SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, coalitions, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_coalition_get_ids, "Q", "coalition ids of a given process"); + 0, 0, sysctl_coalition_get_ids, "Q", "coalition ids of a given process"); static int sysctl_coalition_get_roles SYSCTL_HANDLER_ARGS @@ -353,12 +362,14 @@ static int sysctl_coalition_get_roles SYSCTL_HANDLER_ARGS error = SYSCTL_IN(req, &value, sizeof(value)); - if (error) + if (error) { return error; - if (!req->newptr) + } + if (!req->newptr) { pid = req->p->p_pid; - else + } else { pid = (int)value; + } coal_dbg("looking up coalitions for pid:%d", pid); tproc = proc_find(pid); @@ -374,7 +385,7 @@ static int sysctl_coalition_get_roles SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, coalition_roles, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_coalition_get_roles, "I", "coalition roles of a given process"); + 0, 0, sysctl_coalition_get_roles, "I", "coalition roles of a given process"); static int sysctl_coalition_get_page_count SYSCTL_HANDLER_ARGS @@ -388,12 +399,14 @@ static int sysctl_coalition_get_page_count SYSCTL_HANDLER_ARGS error = SYSCTL_IN(req, &value, sizeof(value)); - if (error) + if (error) { return error; - if (!req->newptr) + } + if (!req->newptr) { pid = req->p->p_pid; - else + } else { pid = (int)value; + } coal_dbg("looking up coalitions for pid:%d", pid); tproc = proc_find(pid); @@ -411,7 +424,7 @@ static int sysctl_coalition_get_page_count SYSCTL_HANDLER_ARGS int ntasks = 0; pgcount[t] = coalition_get_page_count(coal, &ntasks); coal_dbg("PID:%d, Coalition:%lld, type:%d, pgcount:%lld", - pid, coalition_id(coal), t, pgcount[t]); + pid, coalition_id(coal), t, pgcount[t]); } } @@ -421,7 +434,7 @@ static int sysctl_coalition_get_page_count SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, coalition_page_count, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_coalition_get_page_count, "Q", "coalition page count of a specified process"); + 0, 0, sysctl_coalition_get_page_count, "Q", "coalition page count of a specified process"); static int sysctl_coalition_get_pid_list SYSCTL_HANDLER_ARGS @@ -442,8 +455,9 @@ static int sysctl_coalition_get_pid_list SYSCTL_HANDLER_ARGS has_pid = 0; error = SYSCTL_IN(req, &value, sizeof(value) - sizeof(value[0])); } - if (error) + if (error) { return error; + } if (!req->newptr) { type = COALITION_TYPE_RESOURCE; sort_order = COALITION_SORT_DEFAULT; @@ -451,17 +465,19 @@ static int sysctl_coalition_get_pid_list SYSCTL_HANDLER_ARGS } else { type = value[0]; sort_order = value[1]; - if (has_pid) + if (has_pid) { pid = value[2]; - else + } else { pid = req->p->p_pid; + } } - if (type < 0 || type >= COALITION_NUM_TYPES) + if (type < 0 || type >= COALITION_NUM_TYPES) { return EINVAL; + } coal_dbg("getting constituent PIDS for coalition of type %d " - "containing pid:%d (sort:%d)", type, pid, sort_order); + "containing pid:%d (sort:%d)", type, pid, sort_order); tproc = proc_find(pid); if (tproc == NULL) { coal_dbg("ERROR: Couldn't find pid:%d", pid); @@ -474,10 +490,10 @@ static int sysctl_coalition_get_pid_list SYSCTL_HANDLER_ARGS } npids = coalition_get_pid_list(coal, COALITION_ROLEMASK_ALLROLES, sort_order, - pidlist, sizeof(pidlist) / sizeof(pidlist[0])); + pidlist, sizeof(pidlist) / sizeof(pidlist[0])); if (npids > (int)(sizeof(pidlist) / sizeof(pidlist[0]))) { coal_dbg("Too many members in coalition %llu (from pid:%d): %d!", - coalition_id(coal), pid, npids); + coalition_id(coal), pid, npids); npids = sizeof(pidlist) / sizeof(pidlist[0]); } @@ -489,14 +505,15 @@ out: return -npids; } - if (npids == 0) + if (npids == 0) { return ENOENT; + } return SYSCTL_OUT(req, pidlist, sizeof(pidlist[0]) * npids); } SYSCTL_PROC(_kern, OID_AUTO, coalition_pid_list, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_coalition_get_pid_list, "I", "list of PIDS which are members of the coalition of the current process"); + 0, 0, sysctl_coalition_get_pid_list, "I", "list of PIDS which are members of the coalition of the current process"); #if DEVELOPMENT static int sysctl_coalition_notify SYSCTL_HANDLER_ARGS @@ -510,12 +527,14 @@ static int sysctl_coalition_notify SYSCTL_HANDLER_ARGS error = SYSCTL_IN(req, value, sizeof(value)); if (error) { error = SYSCTL_IN(req, value, sizeof(value) - sizeof(value[0])); - if (error) + if (error) { return error; + } should_set = 0; } - if (!req->newptr) + if (!req->newptr) { return error; + } coal = coalition_find_by_id(value[0]); if (coal == COALITION_NULL) { @@ -523,8 +542,9 @@ static int sysctl_coalition_notify SYSCTL_HANDLER_ARGS return ESRCH; } - if (should_set) + if (should_set) { coalition_set_notify(coal, (int)value[1]); + } value[0] = (uint64_t)coalition_should_notify(coal); @@ -534,12 +554,12 @@ static int sysctl_coalition_notify SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, coalition_notify, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_coalition_notify, "Q", "get/set coalition notification flag"); + 0, 0, sysctl_coalition_notify, "Q", "get/set coalition notification flag"); extern int unrestrict_coalition_syscalls; SYSCTL_INT(_kern, OID_AUTO, unrestrict_coalitions, - CTLFLAG_RW, &unrestrict_coalition_syscalls, 0, - "unrestrict the coalition interface"); + CTLFLAG_RW, &unrestrict_coalition_syscalls, 0, + "unrestrict the coalition interface"); #endif /* DEVELOPMENT */ diff --git a/bsd/kern/sys_domain.c b/bsd/kern/sys_domain.c index 76741295c..be5bec4ad 100644 --- a/bsd/kern/sys_domain.c +++ b/bsd/kern/sys_domain.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * */ @@ -43,13 +43,13 @@ struct domain *systemdomain = NULL; static void systemdomain_init(struct domain *); struct domain systemdomain_s = { - .dom_family = PF_SYSTEM, - .dom_name = "system", - .dom_init = systemdomain_init, + .dom_family = PF_SYSTEM, + .dom_name = "system", + .dom_init = systemdomain_init, }; SYSCTL_NODE(_net, PF_SYSTEM, systm, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "System domain"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "System domain"); static void diff --git a/bsd/kern/sys_generic.c b/bsd/kern/sys_generic.c index ad3d80a6b..ec07b11fc 100644 --- a/bsd/kern/sys_generic.c +++ b/bsd/kern/sys_generic.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -140,6 +140,8 @@ #include #include #include +/* for remote time api*/ +#include #if CONFIG_MACF #include @@ -155,14 +157,14 @@ extern kern_return_t IOBSDGetPlatformUUID(__darwin_uuid_t uuid, mach_timespec_t int rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval); int wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval); -__private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, - off_t offset, int flags, user_ssize_t *retval); -__private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, - off_t offset, int flags, user_ssize_t *retval); -__private_extern__ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode); -__private_extern__ void donefileread(struct proc *p, struct fileproc *fp_ret, int fd); +__private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp, + user_addr_t bufp, user_size_t nbyte, + off_t offset, int flags, user_ssize_t *retval); +__private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp, + user_addr_t bufp, user_size_t nbyte, + off_t offset, int flags, user_ssize_t *retval); +__private_extern__ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_vnode); +__private_extern__ void donefileread(struct proc *p, struct fileproc *fp_ret, int fd); /* Conflict wait queue for when selects collide (opaque type) */ struct waitq select_conflict_queue; @@ -199,7 +201,7 @@ int read(struct proc *p, struct read_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(read_nocancel(p, (struct read_nocancel_args *)uap, retval)); + return read_nocancel(p, (struct read_nocancel_args *)uap, retval); } int @@ -210,21 +212,22 @@ read_nocancel(struct proc *p, struct read_nocancel_args *uap, user_ssize_t *retv int fd = uap->fd; struct vfs_context context; - if ( (error = preparefileread(p, &fp, fd, 0)) ) - return (error); + if ((error = preparefileread(p, &fp, fd, 0))) { + return error; + } context = *(vfs_context_current()); context.vc_ucred = fp->f_fglob->fg_cred; error = dofileread(&context, fp, uap->cbuf, uap->nbyte, - (off_t)-1, 0, retval); + (off_t)-1, 0, retval); donefileread(p, fp, fd); - return (error); + return error; } -/* +/* * Pread system call * * Returns: 0 Success @@ -238,33 +241,34 @@ int pread(struct proc *p, struct pread_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(pread_nocancel(p, (struct pread_nocancel_args *)uap, retval)); + return pread_nocancel(p, (struct pread_nocancel_args *)uap, retval); } int pread_nocancel(struct proc *p, struct pread_nocancel_args *uap, user_ssize_t *retval) { - struct fileproc *fp = NULL; /* fp set by preparefileread() */ + struct fileproc *fp = NULL; /* fp set by preparefileread() */ int fd = uap->fd; int error; struct vfs_context context; - if ( (error = preparefileread(p, &fp, fd, 1)) ) + if ((error = preparefileread(p, &fp, fd, 1))) { goto out; + } context = *(vfs_context_current()); context.vc_ucred = fp->f_fglob->fg_cred; error = dofileread(&context, fp, uap->buf, uap->nbyte, - uap->offset, FOF_OFFSET, retval); - + uap->offset, FOF_OFFSET, retval); + donefileread(p, fp, fd); KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pread) | DBG_FUNC_NONE), - uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); + uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); out: - return (error); + return error; } /* @@ -276,7 +280,7 @@ donefileread(struct proc *p, struct fileproc *fp, int fd) { proc_fdlock_spin(p); fp_drop(p, fd, fp, 1); - proc_fdunlock(p); + proc_fdunlock(p); } /* @@ -291,7 +295,7 @@ int preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_pread) { vnode_t vp; - int error; + int error; struct fileproc *fp; AUDIT_ARG(fd, fd); @@ -301,15 +305,15 @@ preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_ error = fp_lookup(p, fd, &fp, 1); if (error) { - proc_fdunlock(p); - return (error); + proc_fdunlock(p); + return error; } if ((fp->f_flag & FREAD) == 0) { - error = EBADF; + error = EBADF; goto out; } if (check_for_pread && (fp->f_type != DTYPE_VNODE)) { - error = ESPIPE; + error = ESPIPE; goto out; } if (fp->f_type == DTYPE_VNODE) { @@ -318,7 +322,7 @@ preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_ if (check_for_pread && (vnode_isfifo(vp))) { error = ESPIPE; goto out; - } + } if (check_for_pread && (vp->v_flag & VISTTY)) { error = ENXIO; goto out; @@ -327,13 +331,13 @@ preparefileread(struct proc *p, struct fileproc **fp_ret, int fd, int check_for_ *fp_ret = fp; - proc_fdunlock(p); - return (0); + proc_fdunlock(p); + return 0; out: fp_drop(p, fd, fp, 1); proc_fdunlock(p); - return (error); + return error; } @@ -344,23 +348,24 @@ out: */ __private_extern__ int dofileread(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, - user_ssize_t *retval) + user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, + user_ssize_t *retval) { uio_t auio; user_ssize_t bytecnt; long error = 0; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (nbyte > INT_MAX) - return (EINVAL); + if (nbyte > INT_MAX) { + return EINVAL; + } if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) { - auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_READ, + &uio_buf[0], sizeof(uio_buf)); } else { - auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_READ, + &uio_buf[0], sizeof(uio_buf)); } uio_addiov(auio, bufp, nbyte); @@ -368,17 +373,18 @@ dofileread(vfs_context_t ctx, struct fileproc *fp, if ((error = fo_read(fp, auio, flags, ctx))) { if (uio_resid(auio) != bytecnt && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) + error == EINTR || error == EWOULDBLOCK)) { error = 0; + } } bytecnt -= uio_resid(auio); *retval = bytecnt; - return (error); + return error; } -/* +/* * Scatter read system call. * * Returns: 0 Success @@ -391,7 +397,7 @@ int readv(struct proc *p, struct readv_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(readv_nocancel(p, (struct readv_nocancel_args *)uap, retval)); + return readv_nocancel(p, (struct readv_nocancel_args *)uap, retval); } int @@ -402,14 +408,15 @@ readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *re struct user_iovec *iovp; /* Verify range bedfore calling uio_create() */ - if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) - return (EINVAL); + if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) { + return EINVAL; + } /* allocate a uio large enough to hold the number of iovecs passed */ auio = uio_create(uap->iovcnt, 0, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), - UIO_READ); - + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), + UIO_READ); + /* get location of iovecs within the uio. then copyin the iovecs from * user space. */ @@ -419,13 +426,13 @@ readv_nocancel(struct proc *p, struct readv_nocancel_args *uap, user_ssize_t *re goto ExitThisRoutine; } error = copyin_user_iovec_array(uap->iovp, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - uap->iovcnt, iovp); + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + uap->iovcnt, iovp); if (error) { goto ExitThisRoutine; } - - /* finalize uio_t for use and do the IO + + /* finalize uio_t for use and do the IO */ error = uio_calculateresid(auio); if (error) { @@ -437,7 +444,7 @@ ExitThisRoutine: if (auio != NULL) { uio_free(auio); } - return (error); + return error; } /* @@ -452,23 +459,23 @@ int write(struct proc *p, struct write_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(write_nocancel(p, (struct write_nocancel_args *)uap, retval)); - + return write_nocancel(p, (struct write_nocancel_args *)uap, retval); } int write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *retval) { struct fileproc *fp; - int error; + int error; int fd = uap->fd; bool wrote_some = false; AUDIT_ARG(fd, fd); - error = fp_lookup(p,fd,&fp,0); - if (error) - return(error); + error = fp_lookup(p, fd, &fp, 0); + if (error) { + return error; + } if ((fp->f_flag & FWRITE) == 0) { error = EBADF; } else if (FP_ISGUARDED(fp, GUARD_WRITE)) { @@ -480,18 +487,19 @@ write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *re context.vc_ucred = fp->f_fglob->fg_cred; error = dofilewrite(&context, fp, uap->cbuf, uap->nbyte, - (off_t)-1, 0, retval); + (off_t)-1, 0, retval); wrote_some = *retval > 0; } - if (wrote_some) - fp_drop_written(p, fd, fp); - else - fp_drop(p, fd, fp, 0); - return(error); + if (wrote_some) { + fp_drop_written(p, fd, fp); + } else { + fp_drop(p, fd, fp, 0); + } + return error; } -/* +/* * pwrite system call * * Returns: 0 Success @@ -506,23 +514,24 @@ int pwrite(struct proc *p, struct pwrite_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval)); + return pwrite_nocancel(p, (struct pwrite_nocancel_args *)uap, retval); } int pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *retval) { - struct fileproc *fp; - int error; + struct fileproc *fp; + int error; int fd = uap->fd; vnode_t vp = (vnode_t)0; bool wrote_some = false; AUDIT_ARG(fd, fd); - error = fp_lookup(p,fd,&fp,0); - if (error) - return(error); + error = fp_lookup(p, fd, &fp, 0); + if (error) { + return error; + } if ((fp->f_flag & FWRITE) == 0) { error = EBADF; @@ -542,7 +551,7 @@ pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t * if (vnode_isfifo(vp)) { error = ESPIPE; goto errout; - } + } if ((vp->v_flag & VISTTY)) { error = ENXIO; goto errout; @@ -552,20 +561,21 @@ pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t * goto errout; } - error = dofilewrite(&context, fp, uap->buf, uap->nbyte, - uap->offset, FOF_OFFSET, retval); - wrote_some = *retval > 0; - } + error = dofilewrite(&context, fp, uap->buf, uap->nbyte, + uap->offset, FOF_OFFSET, retval); + wrote_some = *retval > 0; + } errout: - if (wrote_some) - fp_drop_written(p, fd, fp); - else - fp_drop(p, fd, fp, 0); + if (wrote_some) { + fp_drop_written(p, fd, fp); + } else { + fp_drop(p, fd, fp, 0); + } KERNEL_DEBUG_CONSTANT((BSDDBG_CODE(DBG_BSD_SC_EXTENDED_INFO, SYS_pwrite) | DBG_FUNC_NONE), - uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); - - return(error); + uap->fd, uap->nbyte, (unsigned int)((uap->offset >> 32)), (unsigned int)(uap->offset), 0); + + return error; } /* @@ -574,35 +584,36 @@ errout: * :EPIPE * :??? [indirect through struct fileops] */ -__private_extern__ int +__private_extern__ int dofilewrite(vfs_context_t ctx, struct fileproc *fp, - user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, - user_ssize_t *retval) -{ + user_addr_t bufp, user_size_t nbyte, off_t offset, int flags, + user_ssize_t *retval) +{ uio_t auio; long error = 0; user_ssize_t bytecnt; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; if (nbyte > INT_MAX) { *retval = 0; - return (EINVAL); + return EINVAL; } if (IS_64BIT_PROCESS(vfs_context_proc(ctx))) { - auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_WRITE, - &uio_buf[0], sizeof(uio_buf)); + auio = uio_createwithbuffer(1, offset, UIO_USERSPACE64, UIO_WRITE, + &uio_buf[0], sizeof(uio_buf)); } else { - auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_WRITE, - &uio_buf[0], sizeof(uio_buf)); + auio = uio_createwithbuffer(1, offset, UIO_USERSPACE32, UIO_WRITE, + &uio_buf[0], sizeof(uio_buf)); } uio_addiov(auio, bufp, nbyte); - bytecnt = nbyte; + bytecnt = nbyte; if ((error = fo_write(fp, auio, flags, ctx))) { if (uio_resid(auio) != bytecnt && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) + error == EINTR || error == EWOULDBLOCK)) { error = 0; + } /* The socket layer handles SIGPIPE */ if (error == EPIPE && fp->f_type != DTYPE_SOCKET && (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) { @@ -613,17 +624,17 @@ dofilewrite(vfs_context_t ctx, struct fileproc *fp, bytecnt -= uio_resid(auio); *retval = bytecnt; - return (error); + return error; } - -/* - * Gather write system call - */ + +/* + * Gather write system call + */ int writev(struct proc *p, struct writev_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(writev_nocancel(p, (struct writev_nocancel_args *)uap, retval)); + return writev_nocancel(p, (struct writev_nocancel_args *)uap, retval); } int @@ -638,14 +649,15 @@ writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t * AUDIT_ARG(fd, uap->fd); /* Verify range bedfore calling uio_create() */ - if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) - return (EINVAL); + if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) { + return EINVAL; + } /* allocate a uio large enough to hold the number of iovecs passed */ auio = uio_create(uap->iovcnt, 0, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), - UIO_WRITE); - + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), + UIO_WRITE); + /* get location of iovecs within the uio. then copyin the iovecs from * user space. */ @@ -655,13 +667,13 @@ writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t * goto ExitThisRoutine; } error = copyin_user_iovec_array(uap->iovp, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - uap->iovcnt, iovp); + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + uap->iovcnt, iovp); if (error) { goto ExitThisRoutine; } - - /* finalize uio_t for use and do the IO + + /* finalize uio_t for use and do the IO */ error = uio_calculateresid(auio); if (error) { @@ -669,9 +681,10 @@ writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t * } error = fp_lookup(p, uap->fd, &fp, 0); - if (error) + if (error) { goto ExitThisRoutine; - + } + if ((fp->f_flag & FWRITE) == 0) { error = EBADF; } else if (FP_ISGUARDED(fp, GUARD_WRITE)) { @@ -682,17 +695,18 @@ writev_nocancel(struct proc *p, struct writev_nocancel_args *uap, user_ssize_t * error = wr_uio(p, fp, auio, retval); wrote_some = *retval > 0; } - - if (wrote_some) - fp_drop_written(p, uap->fd, fp); - else - fp_drop(p, uap->fd, fp, 0); + + if (wrote_some) { + fp_drop_written(p, uap->fd, fp); + } else { + fp_drop(p, uap->fd, fp, 0); + } ExitThisRoutine: if (auio != NULL) { uio_free(auio); } - return (error); + return error; } @@ -709,16 +723,18 @@ wr_uio(struct proc *p, struct fileproc *fp, uio_t uio, user_ssize_t *retval) error = fo_write(fp, uio, 0, &context); if (error) { if (uio_resid(uio) != count && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) - error = 0; + error == EINTR || error == EWOULDBLOCK)) { + error = 0; + } /* The socket layer handles SIGPIPE */ if (error == EPIPE && fp->f_type != DTYPE_SOCKET && - (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) - psignal(p, SIGPIPE); + (fp->f_fglob->fg_lflags & FG_NOSIGPIPE) == 0) { + psignal(p, SIGPIPE); + } } *retval = count - uio_resid(uio); - return(error); + return error; } @@ -730,8 +746,9 @@ rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval) user_ssize_t count; struct vfs_context context = *vfs_context_current(); - if ( (error = preparefileread(p, &fp, fdes, 0)) ) - return (error); + if ((error = preparefileread(p, &fp, fdes, 0))) { + return error; + } count = uio_resid(uio); @@ -740,15 +757,16 @@ rd_uio(struct proc *p, int fdes, uio_t uio, user_ssize_t *retval) error = fo_read(fp, uio, 0, &context); if (error) { - if (uio_resid(uio) != count && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) - error = 0; + if (uio_resid(uio) != count && (error == ERESTART || + error == EINTR || error == EWOULDBLOCK)) { + error = 0; + } } *retval = count - uio_resid(uio); donefileread(p, fp, fdes); - return (error); + return error; } /* @@ -773,7 +791,7 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) caddr_t datap = NULL, memp = NULL; boolean_t is64bit = FALSE; int tmp = 0; -#define STK_PARAMS 128 +#define STK_PARAMS 128 char stkbuf[STK_PARAMS] = {}; int fd = uap->fd; u_long com = uap->com; @@ -784,10 +802,11 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) is64bit = proc_is64bit(p); #if CONFIG_AUDIT - if (is64bit) + if (is64bit) { AUDIT_ARG(value64, com); - else + } else { AUDIT_ARG(cmd, CAST_DOWN_EXPLICIT(int, com)); + } #endif /* CONFIG_AUDIT */ /* @@ -795,47 +814,49 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) * copied to/from the user's address space. */ size = IOCPARM_LEN(com); - if (size > IOCPARM_MAX) - return ENOTTY; - if (size > sizeof (stkbuf)) { - if ((memp = (caddr_t)kalloc(size)) == 0) + if (size > IOCPARM_MAX) { + return ENOTTY; + } + if (size > sizeof(stkbuf)) { + if ((memp = (caddr_t)kalloc(size)) == 0) { return ENOMEM; + } datap = memp; - } else + } else { datap = &stkbuf[0]; + } if (com & IOC_IN) { if (size) { error = copyin(uap->data, datap, size); - if (error) + if (error) { goto out_nofp; + } } else { /* XXX - IOC_IN and no size? we should proably return an error here!! */ if (is64bit) { *(user_addr_t *)datap = uap->data; - } - else { + } else { *(uint32_t *)datap = (uint32_t)uap->data; } } - } else if ((com & IOC_OUT) && size) + } else if ((com & IOC_OUT) && size) { /* * Zero the buffer so the user always * gets back something deterministic. */ bzero(datap, size); - else if (com & IOC_VOID) { + } else if (com & IOC_VOID) { /* XXX - this is odd since IOC_VOID means no parameters */ if (is64bit) { *(user_addr_t *)datap = uap->data; - } - else { + } else { *(uint32_t *)datap = (uint32_t)uap->data; } } proc_fdlock(p); - error = fp_lookup(p,fd,&fp,1); - if (error) { + error = fp_lookup(p, fd, &fp, 1); + if (error) { proc_fdunlock(p); goto out_nofp; } @@ -843,16 +864,17 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) AUDIT_ARG(file, p, fp); if ((fp->f_flag & (FREAD | FWRITE)) == 0) { - error = EBADF; - goto out; + error = EBADF; + goto out; } context.vc_ucred = fp->f_fglob->fg_cred; #if CONFIG_MACF error = mac_file_check_ioctl(context.vc_ucred, fp->f_fglob, com); - if (error) + if (error) { goto out; + } #endif switch (com) { @@ -865,18 +887,20 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) break; case FIONBIO: - if ( (tmp = *(int *)datap) ) + if ((tmp = *(int *)datap)) { fp->f_flag |= FNONBLOCK; - else + } else { fp->f_flag &= ~FNONBLOCK; + } error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context); break; case FIOASYNC: - if ( (tmp = *(int *)datap) ) + if ((tmp = *(int *)datap)) { fp->f_flag |= FASYNC; - else + } else { fp->f_flag &= ~FASYNC; + } error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context); break; @@ -887,7 +911,7 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) break; } if (fp->f_type == DTYPE_PIPE) { - error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context); + error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context); break; } if (tmp <= 0) { @@ -919,8 +943,9 @@ ioctl(struct proc *p, struct ioctl_args *uap, __unused int32_t *retval) * Copy any data to user, size was * already set and checked above. */ - if (error == 0 && (com & IOC_OUT) && size) + if (error == 0 && (com & IOC_OUT) && size) { error = copyout(datap, uap->data, (u_int)size); + } break; } out: @@ -928,18 +953,19 @@ out: proc_fdunlock(p); out_nofp: - if (memp) + if (memp) { kfree(memp, size); - return(error); + } + return error; } -int selwait, nselcoll; +int selwait, nselcoll; #define SEL_FIRSTPASS 1 #define SEL_SECONDPASS 2 extern int selcontinue(int error); extern int selprocess(int error, int sel_pass); static int selscan(struct proc *p, struct _select * sel, struct _select_data * seldata, - int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset); + int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset); static int selcount(struct proc *p, u_int32_t *ibits, int nfd, int *count); static int seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wakeup, int fromselcount); static int seldrop(struct proc *p, u_int32_t *ibits, int nfd); @@ -979,8 +1005,9 @@ select_nocancel(struct proc *p, struct select_nocancel_args *uap, int32_t *retva atv.tv_sec = atv32.tv_sec; atv.tv_usec = atv32.tv_usec; } - if (err) + if (err) { return err; + } if (itimerfix(&atv)) { err = EINVAL; @@ -1075,7 +1102,7 @@ select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeo int error = 0; u_int ni, nw; thread_t th_act; - struct uthread *uth; + struct uthread *uth; struct _select *sel; struct _select_data *seldata; int needzerofill = 1; @@ -1094,17 +1121,17 @@ select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeo seldata->count = 0; if (uap->nd < 0) { - return (EINVAL); + return EINVAL; } /* select on thread of process that already called proc_exit() */ if (p->p_fd == NULL) { - return (EBADF); + return EBADF; } - if (uap->nd > p->p_fd->fd_nfiles) + if (uap->nd > p->p_fd->fd_nfiles) { uap->nd = p->p_fd->fd_nfiles; /* forgiving; slightly wrong */ - + } nw = howmany(uap->nd, NFDBITS); ni = nw * sizeof(fd_mask); @@ -1121,8 +1148,9 @@ select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeo int nbytes = 3 * ni; /* Free previous allocation, if any */ - if (sel->ibits != NULL) + if (sel->ibits != NULL) { FREE(sel->ibits, M_TEMP); + } if (sel->obits != NULL) { FREE(sel->obits, M_TEMP); /* NULL out; subsequent ibits allocation may fail */ @@ -1130,13 +1158,14 @@ select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeo } MALLOC(sel->ibits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO); - if (sel->ibits == NULL) - return (EAGAIN); + if (sel->ibits == NULL) { + return EAGAIN; + } MALLOC(sel->obits, u_int32_t *, nbytes, M_TEMP, M_WAITOK | M_ZERO); if (sel->obits == NULL) { FREE(sel->ibits, M_TEMP); sel->ibits = NULL; - return (EAGAIN); + return EAGAIN; } sel->nbytes = nbytes; needzerofill = 0; @@ -1150,22 +1179,22 @@ select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeo /* * get the bits from the user address space */ -#define getbits(name, x) \ +#define getbits(name, x) \ do { \ - if (uap->name && (error = copyin(uap->name, \ - (caddr_t)&sel->ibits[(x) * nw], ni))) \ - goto continuation; \ + if (uap->name && (error = copyin(uap->name, \ + (caddr_t)&sel->ibits[(x) * nw], ni))) \ + goto continuation; \ } while (0) getbits(in, 0); getbits(ou, 1); getbits(ex, 2); -#undef getbits +#undef getbits seldata->abstime = timeout; - if ( (error = selcount(p, sel->ibits, uap->nd, &count)) ) { - goto continuation; + if ((error = selcount(p, sel->ibits, uap->nd, &count))) { + goto continuation; } /* @@ -1194,25 +1223,29 @@ select_internal(struct proc *p, struct select_nocancel_args *uap, uint64_t timeo if (sz > uth->uu_wqstate_sz) { /* (re)allocate a buffer to hold waitq pointers */ if (uth->uu_wqset) { - if (waitq_set_is_valid(uth->uu_wqset)) + if (waitq_set_is_valid(uth->uu_wqset)) { waitq_set_deinit(uth->uu_wqset); + } FREE(uth->uu_wqset, M_SELECT); - } else if (uth->uu_wqstate_sz && !uth->uu_wqset) + } else if (uth->uu_wqstate_sz && !uth->uu_wqset) { panic("select: thread structure corrupt! " - "uu_wqstate_sz:%ld, wqstate_buf == NULL", - uth->uu_wqstate_sz); + "uu_wqstate_sz:%ld, wqstate_buf == NULL", + uth->uu_wqstate_sz); + } uth->uu_wqstate_sz = sz; MALLOC(uth->uu_wqset, struct waitq_set *, sz, M_SELECT, M_WAITOK); - if (!uth->uu_wqset) + if (!uth->uu_wqset) { panic("can't allocate %ld bytes for wqstate buffer", - uth->uu_wqstate_sz); + uth->uu_wqstate_sz); + } waitq_set_init(uth->uu_wqset, - SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST, NULL, NULL); + SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, NULL, NULL); } - if (!waitq_set_is_valid(uth->uu_wqset)) + if (!waitq_set_is_valid(uth->uu_wqset)) { waitq_set_init(uth->uu_wqset, - SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST, NULL, NULL); + SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, NULL, NULL); + } /* the last chunk of our buffer is an array of waitq pointers */ seldata->wqp = (uint64_t *)((char *)(uth->uu_wqset) + ALIGN(sizeof(struct waitq_set))); @@ -1229,7 +1262,7 @@ continuation: * need to wait_subqueue_unlink_all(), since we haven't set * anything at this point. */ - return (error); + return error; } return selprocess(0, SEL_FIRSTPASS); @@ -1254,7 +1287,7 @@ selprocess(int error, int sel_pass) int ncoll; u_int ni, nw; thread_t th_act; - struct uthread *uth; + struct uthread *uth; struct proc *p; struct select_nocancel_args *uap; int *retval; @@ -1274,13 +1307,16 @@ selprocess(int error, int sel_pass) uap = seldata->args; retval = seldata->retval; - if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) + if ((error != 0) && (sel_pass == SEL_FIRSTPASS)) { unwind = 0; - if (seldata->count == 0) + } + if (seldata->count == 0) { unwind = 0; + } retry: - if (error != 0) + if (error != 0) { goto done; + } ncoll = nselcoll; OSBitOrAtomic(P_SELECT, &p->p_flag); @@ -1304,11 +1340,12 @@ retry: } if (uap->tv) { - uint64_t now; + uint64_t now; clock_get_uptime(&now); - if (now >= seldata->abstime) + if (now >= seldata->abstime) { goto done; + } } if (doretry) { @@ -1327,7 +1364,7 @@ retry: } /* No spurious wakeups due to colls,no need to check for them */ - if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) { + if ((sel_pass == SEL_SECONDPASS) || ((p->p_flag & P_SELECT) == 0)) { sel_pass = SEL_FIRSTPASS; goto retry; } @@ -1335,28 +1372,30 @@ retry: OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag); /* if the select is just for timeout skip check */ - if (seldata->count && (sel_pass == SEL_SECONDPASS)) + if (seldata->count && (sel_pass == SEL_SECONDPASS)) { panic("selprocess: 2nd pass assertwaiting"); + } /* waitq_set has waitqueue as first element */ wait_result = waitq_assert_wait64_leeway((struct waitq *)uth->uu_wqset, - NO_EVENT64, THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, - seldata->abstime, - TIMEOUT_NO_LEEWAY); + NO_EVENT64, THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + seldata->abstime, + TIMEOUT_NO_LEEWAY); if (wait_result != THREAD_AWAKENED) { /* there are no preposted events */ error = tsleep1(NULL, PSOCK | PCATCH, - "select", 0, selcontinue); - } else { + "select", 0, selcontinue); + } else { prepost = 1; error = 0; } if (error == 0) { sel_pass = SEL_SECONDPASS; - if (!prepost) + if (!prepost) { somewakeup = 1; + } goto retry; } done: @@ -1372,18 +1411,20 @@ done: } OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag); /* select is not restarted after signals... */ - if (error == ERESTART) + if (error == ERESTART) { error = EINTR; - if (error == EWOULDBLOCK) + } + if (error == EWOULDBLOCK) { error = 0; + } nw = howmany(uap->nd, NFDBITS); ni = nw * sizeof(fd_mask); -#define putbits(name, x) \ +#define putbits(name, x) \ do { \ - if (uap->name && (error2 = \ - copyout((caddr_t)&sel->obits[(x) * nw], uap->name, ni))) \ - error = error2; \ + if (uap->name && (error2 = \ + copyout((caddr_t)&sel->obits[(x) * nw], uap->name, ni))) \ + error = error2; \ } while (0) if (error == 0) { @@ -1402,7 +1443,7 @@ done: uth->uu_flag &= ~UT_SAS_OLDMASK; } - return(error); + return error; } @@ -1417,7 +1458,8 @@ done: * Conditions: * proc_fdlock is held */ -static void selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset) +static void +selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set *wqset) { int valid_set = waitq_set_is_valid(wqset); int valid_q = !!wqp_id; @@ -1429,12 +1471,14 @@ static void selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set * */ /* unlink the underlying waitq from the input set (thread waitq set) */ - if (valid_q && valid_set) + if (valid_q && valid_set) { waitq_unlink_by_prepost_id(wqp_id, wqset); + } /* allow passing a NULL/invalid fp for seldrop unwind */ - if (!fp || !(fp->f_flags & (FP_INSELECT|FP_SELCONFLICT))) + if (!fp || !(fp->f_flags & (FP_INSELECT | FP_SELCONFLICT))) { return; + } /* * We can always remove the conflict queue from our thread's set: this @@ -1443,8 +1487,9 @@ static void selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set * * be linked with the global conflict queue, and the last waiter * on the fp clears the CONFLICT marker. */ - if (valid_set && (fp->f_flags & FP_SELCONFLICT)) + if (valid_set && (fp->f_flags & FP_SELCONFLICT)) { waitq_unlink(&select_conflict_queue, wqset); + } /* jca: TODO: * This isn't quite right - we don't actually know if this @@ -1471,14 +1516,16 @@ static void selunlinkfp(struct fileproc *fp, uint64_t wqp_id, struct waitq_set * * Conditions: * proc_fdlock is held */ -static uint64_t sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset) +static uint64_t +sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set *wqset) { struct waitq *f_wq = NULL; if ((fp->f_flags & FP_INSELECT) != FP_INSELECT) { - if (wq_data) + if (wq_data) { panic("non-null data:%p on fp:%p not in select?!" - "(wqset:%p)", wq_data, fp, wqset); + "(wqset:%p)", wq_data, fp, wqset); + } return 0; } @@ -1497,13 +1544,15 @@ static uint64_t sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set */ if (wq_data) { memcpy(&f_wq, wq_data, sizeof(f_wq)); - if (!waitq_is_valid(f_wq)) + if (!waitq_is_valid(f_wq)) { f_wq = NULL; + } } /* record the first thread's wqset in the fileproc structure */ - if (!fp->f_wset) + if (!fp->f_wset) { fp->f_wset = (void *)wqset; + } /* handles NULL f_wq */ return waitq_get_prepost_id(f_wq); @@ -1528,14 +1577,14 @@ static uint64_t sellinkfp(struct fileproc *fp, void **wq_data, struct waitq_set */ static int selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, - int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset) + int nfd, int32_t *retval, int sel_pass, struct waitq_set *wqset) { struct filedesc *fdp = p->p_fd; int msk, i, j, fd; u_int32_t bits; struct fileproc *fp; - int n = 0; /* count of bits */ - int nc = 0; /* bit vector offset (nc'th bit) */ + int n = 0; /* count of bits */ + int nc = 0; /* bit vector offset (nc'th bit) */ static int flag[3] = { FREAD, FWRITE, 0 }; u_int32_t *iptr, *optr; u_int nw; @@ -1549,8 +1598,8 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, * in Beaker1C ; verify that the p->p_fd is valid */ if (fdp == NULL) { - *retval=0; - return(EIO); + *retval = 0; + return EIO; } ibits = sel->ibits; obits = sel->obits; @@ -1571,15 +1620,16 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, optr = (u_int32_t *)&obits[msk * nw]; for (i = 0; i < nfd; i += NFDBITS) { - bits = iptr[i/NFDBITS]; + bits = iptr[i / NFDBITS]; while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1 << j); - if (fd < fdp->fd_nfiles) + if (fd < fdp->fd_nfiles) { fp = fdp->fd_ofiles[fd]; - else + } else { fp = NULL; + } if (fp == NULL || (fdp->fd_ofileflags[fd] & UF_RESERVED)) { /* @@ -1587,7 +1637,7 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, * fd, let the caller unwind... */ proc_fdunlock(p); - return(EBADF); + return EBADF; } if (sel_pass == SEL_SECONDPASS) { reserved_link = 0; @@ -1596,11 +1646,12 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, } else { reserved_link = waitq_link_reserve((struct waitq *)wqset); rl_ptr = &reserved_link; - if (fp->f_flags & FP_INSELECT) + if (fp->f_flags & FP_INSELECT) { /* someone is already in select on this fp */ fp->f_flags |= FP_SELCONFLICT; - else + } else { fp->f_flags |= FP_INSELECT; + } waitq_set_lazy_init_link(wqset); } @@ -1615,8 +1666,8 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, /* The select; set the bit, if true */ if (fp->f_ops && fp->f_type - && fo_select(fp, flag[msk], rl_ptr, &context)) { - optr[fd/NFDBITS] |= (1 << (fd % NFDBITS)); + && fo_select(fp, flag[msk], rl_ptr, &context)) { + optr[fd / NFDBITS] |= (1 << (fd % NFDBITS)); n++; } if (sel_pass == SEL_FIRSTPASS) { @@ -1627,8 +1678,9 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, * will have been updated by selrecord to be a pointer * to the selinfo's waitq. */ - if (reserved_link == rsvd) + if (reserved_link == rsvd) { rl_ptr = NULL; /* fo_select never called selrecord() */ + } /* * Hook up the thread's waitq set either to * the fileproc structure, or to the global @@ -1644,7 +1696,7 @@ selscan(struct proc *p, struct _select *sel, struct _select_data * seldata, proc_fdunlock(p); *retval = n; - return (0); + return 0; } int poll_callback(struct kqueue *, struct kevent_internal_s *, void *); @@ -1659,7 +1711,7 @@ int poll(struct proc *p, struct poll_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(poll_nocancel(p, (struct poll_nocancel_args *)uap, retval)); + return poll_nocancel(p, (struct poll_nocancel_args *)uap, retval); } @@ -1684,12 +1736,14 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) * safe, but not overly restrictive. */ if (nfds > OPEN_MAX || - (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE))) - return (EINVAL); + (nfds > p->p_rlimit[RLIMIT_NOFILE].rlim_cur && (proc_suser(p) || nfds > FD_SETSIZE))) { + return EINVAL; + } kq = kqueue_alloc(p, 0); - if (kq == NULL) - return (EAGAIN); + if (kq == NULL) { + return EAGAIN; + } ni = nfds * sizeof(struct pollfd) + sizeof(struct poll_continue_args); MALLOC(cont, struct poll_continue_args *, ni, M_TEMP, M_WAITOK); @@ -1697,11 +1751,12 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) error = EAGAIN; goto out; } - + fds = (struct pollfd *)&cont[1]; error = copyin(uap->fds, fds, nfds * sizeof(struct pollfd)); - if (error) + if (error) { goto out; + } if (uap->timeout != -1) { struct timeval rtv; @@ -1737,20 +1792,22 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) struct kevent_internal_s kev = { .ident = fds[i].fd, .flags = EV_ADD | EV_ONESHOT | EV_POLL, - .udata = CAST_USER_ADDR_T(&fds[i]) }; + .udata = CAST_USER_ADDR_T(&fds[i]) + }; /* Handle input events */ - if (events & ( POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP )) { + if (events & (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND | POLLHUP)) { kev.filter = EVFILT_READ; - if (events & ( POLLPRI | POLLRDBAND )) + if (events & (POLLPRI | POLLRDBAND)) { kev.flags |= EV_OOBAND; + } rc = kevent_register(kq, &kev, &knlc); assert((rc & FILTER_REGISTER_WAIT) == 0); } /* Handle output events */ if ((kev.flags & EV_ERROR) == 0 && - (events & ( POLLOUT | POLLWRNORM | POLLWRBAND ))) { + (events & (POLLOUT | POLLWRNORM | POLLWRBAND))) { kev.filter = EVFILT_WRITE; rc = kevent_register(kq, &kev, &knlc); assert((rc & FILTER_REGISTER_WAIT) == 0); @@ -1758,17 +1815,21 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) /* Handle BSD extension vnode events */ if ((kev.flags & EV_ERROR) == 0 && - (events & ( POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE ))) { + (events & (POLLEXTEND | POLLATTRIB | POLLNLINK | POLLWRITE))) { kev.filter = EVFILT_VNODE; kev.fflags = 0; - if (events & POLLEXTEND) + if (events & POLLEXTEND) { kev.fflags |= NOTE_EXTEND; - if (events & POLLATTRIB) + } + if (events & POLLATTRIB) { kev.fflags |= NOTE_ATTRIB; - if (events & POLLNLINK) + } + if (events & POLLNLINK) { kev.fflags |= NOTE_LINK; - if (events & POLLWRITE) + } + if (events & POLLWRITE) { kev.fflags |= NOTE_WRITE; + } rc = kevent_register(kq, &kev, &knlc); assert((rc & FILTER_REGISTER_WAIT) == 0); } @@ -1776,8 +1837,9 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) if (kev.flags & EV_ERROR) { fds[i].revents = POLLNVAL; rfds++; - } else + } else { fds[i].revents = 0; + } } /* @@ -1788,16 +1850,18 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) * out. If a subset of the provided FDs failed to register, then we * will still call the kqueue_scan function. */ - if (nfds && (rfds == nfds)) + if (nfds && (rfds == nfds)) { goto done; + } /* * If any events have trouble registering, an event has fired and we * shouldn't wait for events in kqueue_scan -- use the current time as * the deadline. */ - if (rfds) + if (rfds) { getmicrouptime(&atv); + } /* scan for, and possibly wait for, the kevents to trigger */ cont->pca_fds = uap->fds; @@ -1806,24 +1870,27 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) error = kqueue_scan(kq, poll_callback, NULL, cont, NULL, &atv, p); rfds = cont->pca_rfds; - done: +done: OSBitAndAtomic(~((uint32_t)P_SELECT), &p->p_flag); /* poll is not restarted after signals... */ - if (error == ERESTART) + if (error == ERESTART) { error = EINTR; - if (error == EWOULDBLOCK) + } + if (error == EWOULDBLOCK) { error = 0; + } if (error == 0) { error = copyout(fds, uap->fds, nfds * sizeof(struct pollfd)); *retval = rfds; } - out: - if (NULL != cont) +out: + if (NULL != cont) { FREE(cont, M_TEMP); + } kqueue_dealloc(kq); - return (error); + return error; } int @@ -1835,51 +1902,59 @@ poll_callback(__unused struct kqueue *kq, struct kevent_internal_s *kevp, void * short mask = 0; /* convert the results back into revents */ - if (kevp->flags & EV_EOF) + if (kevp->flags & EV_EOF) { fds->revents |= POLLHUP; - if (kevp->flags & EV_ERROR) + } + if (kevp->flags & EV_ERROR) { fds->revents |= POLLERR; + } switch (kevp->filter) { case EVFILT_READ: - if (fds->revents & POLLHUP) - mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND ); - else { + if (fds->revents & POLLHUP) { + mask = (POLLIN | POLLRDNORM | POLLPRI | POLLRDBAND); + } else { mask = (POLLIN | POLLRDNORM); - if (kevp->flags & EV_OOBAND) + if (kevp->flags & EV_OOBAND) { mask |= (POLLPRI | POLLRDBAND); + } } fds->revents |= (fds->events & mask); break; case EVFILT_WRITE: - if (!(fds->revents & POLLHUP)) - fds->revents |= (fds->events & ( POLLOUT | POLLWRNORM | POLLWRBAND )); + if (!(fds->revents & POLLHUP)) { + fds->revents |= (fds->events & (POLLOUT | POLLWRNORM | POLLWRBAND)); + } break; case EVFILT_VNODE: - if (kevp->fflags & NOTE_EXTEND) + if (kevp->fflags & NOTE_EXTEND) { fds->revents |= (fds->events & POLLEXTEND); - if (kevp->fflags & NOTE_ATTRIB) + } + if (kevp->fflags & NOTE_ATTRIB) { fds->revents |= (fds->events & POLLATTRIB); - if (kevp->fflags & NOTE_LINK) + } + if (kevp->fflags & NOTE_LINK) { fds->revents |= (fds->events & POLLNLINK); - if (kevp->fflags & NOTE_WRITE) + } + if (kevp->fflags & NOTE_WRITE) { fds->revents |= (fds->events & POLLWRITE); + } break; } - if (fds->revents != 0 && prev_revents == 0) + if (fds->revents != 0 && prev_revents == 0) { cont->pca_rfds++; + } return 0; } - + int seltrue(__unused dev_t dev, __unused int flag, __unused struct proc *p) { - - return (1); + return 1; } /* @@ -1916,7 +1991,7 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) int n = 0; u_int32_t *iptr; u_int nw; - int error=0; + int error = 0; int dropcount; int need_wakeup = 0; @@ -1926,7 +2001,7 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) */ if (fdp == NULL) { *countp = 0; - return(EIO); + return EIO; } nw = howmany(nfd, NFDBITS); @@ -1934,20 +2009,21 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) for (msk = 0; msk < 3; msk++) { iptr = (u_int32_t *)&ibits[msk * nw]; for (i = 0; i < nfd; i += NFDBITS) { - bits = iptr[i/NFDBITS]; + bits = iptr[i / NFDBITS]; while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1 << j); - if (fd < fdp->fd_nfiles) + if (fd < fdp->fd_nfiles) { fp = fdp->fd_ofiles[fd]; - else + } else { fp = NULL; + } if (fp == NULL || - (fdp->fd_ofileflags[fd] & UF_RESERVED)) { - *countp = 0; - error = EBADF; - goto bad; + (fdp->fd_ofileflags[fd] & UF_RESERVED)) { + *countp = 0; + error = EBADF; + goto bad; } fp->f_iocount++; n++; @@ -1957,13 +2033,14 @@ selcount(struct proc *p, u_int32_t *ibits, int nfd, int *countp) proc_fdunlock(p); *countp = n; - return (0); + return 0; bad: dropcount = 0; - - if (n == 0) + + if (n == 0) { goto out; + } /* Ignore error return; it's already EBADF */ (void)seldrop_locked(p, ibits, nfd, n, &need_wakeup, 1); @@ -1972,7 +2049,7 @@ out: if (need_wakeup) { wakeup(&p->p_fpdrainwait); } - return(error); + return error; } @@ -2022,7 +2099,7 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak * in Beaker1C ; verify that the p->p_fd is valid */ if (fdp == NULL) { - return(EIO); + return EIO; } nw = howmany(nfd, NFDBITS); @@ -2032,16 +2109,17 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak for (msk = 0; msk < 3; msk++) { iptr = (u_int32_t *)&ibits[msk * nw]; for (i = 0; i < nfd; i += NFDBITS) { - bits = iptr[i/NFDBITS]; + bits = iptr[i / NFDBITS]; while ((j = ffs(bits)) && (fd = i + --j) < nfd) { bits &= ~(1 << j); fp = fdp->fd_ofiles[fd]; /* * If we've already dropped as many as were - * counted/scanned, then we are done. + * counted/scanned, then we are done. */ - if ((fromselcount != 0) && (++dropcount > lim)) + if ((fromselcount != 0) && (++dropcount > lim)) { goto done; + } /* * unlink even potentially NULL fileprocs. @@ -2049,8 +2127,8 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak * still need to cleanup the waitq links! */ selunlinkfp(fp, - seldata->wqp ? seldata->wqp[nc] : 0, - uth->uu_wqset); + seldata->wqp ? seldata->wqp[nc] : 0, + uth->uu_wqset); nc++; @@ -2061,8 +2139,9 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak } fp->f_iocount--; - if (fp->f_iocount < 0) + if (fp->f_iocount < 0) { panic("f_iocount overdecrement!"); + } if (fp->f_iocount == 0) { /* @@ -2071,8 +2150,9 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak * and is also responsible for waking up anyone * waiting on iocounts to drain. */ - if (fp->f_flags & FP_SELCONFLICT) + if (fp->f_flags & FP_SELCONFLICT) { fp->f_flags &= ~FP_SELCONFLICT; + } if (p->p_fpdrainwait) { p->p_fpdrainwait = 0; *need_wakeup = 1; @@ -2082,7 +2162,7 @@ seldrop_locked(struct proc *p, u_int32_t *ibits, int nfd, int lim, int *need_wak } } done: - return (error); + return error; } @@ -2098,7 +2178,7 @@ seldrop(struct proc *p, u_int32_t *ibits, int nfd) if (need_wakeup) { wakeup(&p->p_fpdrainwait); } - return (error); + return error; } /* @@ -2107,7 +2187,7 @@ seldrop(struct proc *p, u_int32_t *ibits, int nfd) void selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data) { - thread_t cur_act = current_thread(); + thread_t cur_act = current_thread(); struct uthread * ut = get_bsdthread_info(cur_act); /* on input, s_data points to the 64-bit ID of a reserved link object */ uint64_t *reserved_link = (uint64_t *)s_data; @@ -2115,8 +2195,9 @@ selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data) /* need to look at collisions */ /*do not record if this is second pass of select */ - if (!s_data) + if (!s_data) { return; + } if ((sip->si_flags & SI_INITED) == 0) { waitq_init(&sip->si_waitq, SYNC_POLICY_FIFO); @@ -2124,15 +2205,16 @@ selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data) sip->si_flags &= ~SI_CLEAR; } - if (sip->si_flags & SI_RECORDED) + if (sip->si_flags & SI_RECORDED) { sip->si_flags |= SI_COLL; - else + } else { sip->si_flags &= ~SI_COLL; + } sip->si_flags |= SI_RECORDED; /* note: this checks for pre-existing linkage */ waitq_link(&sip->si_waitq, ut->uu_wqset, - WAITQ_SHOULD_LOCK, reserved_link); + WAITQ_SHOULD_LOCK, reserved_link); /* * Always consume the reserved link. @@ -2165,7 +2247,6 @@ selrecord(__unused struct proc *selector, struct selinfo *sip, void *s_data) void selwakeup(struct selinfo *sip) { - if ((sip->si_flags & SI_INITED) == 0) { return; } @@ -2181,13 +2262,12 @@ selwakeup(struct selinfo *sip) if (sip->si_flags & SI_RECORDED) { waitq_wakeup64_all(&sip->si_waitq, NO_EVENT64, - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); sip->si_flags &= ~SI_RECORDED; } - } -void +void selthreadclear(struct selinfo *sip) { struct waitq *wq; @@ -2196,8 +2276,8 @@ selthreadclear(struct selinfo *sip) return; } if (sip->si_flags & SI_RECORDED) { - selwakeup(sip); - sip->si_flags &= ~(SI_RECORDED | SI_COLL); + selwakeup(sip); + sip->si_flags &= ~(SI_RECORDED | SI_COLL); } sip->si_flags |= SI_CLEAR; sip->si_flags &= ~SI_INITED; @@ -2217,13 +2297,13 @@ selthreadclear(struct selinfo *sip) -#define DBG_POST 0x10 -#define DBG_WATCH 0x11 -#define DBG_WAIT 0x12 -#define DBG_MOD 0x13 -#define DBG_EWAKEUP 0x14 -#define DBG_ENQUEUE 0x15 -#define DBG_DEQUEUE 0x16 +#define DBG_POST 0x10 +#define DBG_WATCH 0x11 +#define DBG_WAIT 0x12 +#define DBG_MOD 0x13 +#define DBG_EWAKEUP 0x14 +#define DBG_ENQUEUE 0x15 +#define DBG_DEQUEUE 0x16 #define DBG_MISC_POST MISCDBG_CODE(DBG_EVENT,DBG_POST) #define DBG_MISC_WATCH MISCDBG_CODE(DBG_EVENT,DBG_WATCH) @@ -2234,13 +2314,13 @@ selthreadclear(struct selinfo *sip) #define DBG_MISC_DEQUEUE MISCDBG_CODE(DBG_EVENT,DBG_DEQUEUE) -#define EVPROCDEQUE(p, evq) do { \ - proc_lock(p); \ - if (evq->ee_flags & EV_QUEUED) { \ - TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \ - evq->ee_flags &= ~EV_QUEUED; \ - } \ - proc_unlock(p); \ +#define EVPROCDEQUE(p, evq) do { \ + proc_lock(p); \ + if (evq->ee_flags & EV_QUEUED) { \ + TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); \ + evq->ee_flags &= ~EV_QUEUED; \ + } \ + proc_unlock(p); \ } while (0); @@ -2251,18 +2331,19 @@ selthreadclear(struct selinfo *sip) void evsofree(struct socket *sp) { - struct eventqelt *evq, *next; - proc_t p; + struct eventqelt *evq, *next; + proc_t p; - if (sp == NULL) - return; + if (sp == NULL) { + return; + } for (evq = sp->so_evlist.tqh_first; evq != NULL; evq = next) { - next = evq->ee_slist.tqe_next; + next = evq->ee_slist.tqe_next; p = evq->ee_proc; if (evq->ee_flags & EV_QUEUED) { - EVPROCDEQUE(p, evq); + EVPROCDEQUE(p, evq); } TAILQ_REMOVE(&sp->so_evlist, evq, ee_slist); // remove from socket q FREE(evq, M_TEMP); @@ -2277,11 +2358,11 @@ evsofree(struct socket *sp) void evpipefree(struct pipe *cpipe) { - struct eventqelt *evq, *next; - proc_t p; + struct eventqelt *evq, *next; + proc_t p; for (evq = cpipe->pipe_evlist.tqh_first; evq != NULL; evq = next) { - next = evq->ee_slist.tqe_next; + next = evq->ee_slist.tqe_next; p = evq->ee_proc; EVPROCDEQUE(p, evq); @@ -2301,19 +2382,19 @@ evpipefree(struct pipe *cpipe) static void evprocenque(struct eventqelt *evq) { - proc_t p; + proc_t p; assert(evq); p = evq->ee_proc; - KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_START, (uint32_t)evq, evq->ee_flags, evq->ee_eventmask,0,0); + KERNEL_DEBUG(DBG_MISC_ENQUEUE | DBG_FUNC_START, (uint32_t)evq, evq->ee_flags, evq->ee_eventmask, 0, 0); proc_lock(p); if (evq->ee_flags & EV_QUEUED) { - proc_unlock(p); + proc_unlock(p); - KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_MISC_ENQUEUE | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } evq->ee_flags |= EV_QUEUED; @@ -2324,7 +2405,7 @@ evprocenque(struct eventqelt *evq) wakeup(&p->p_evlist); - KERNEL_DEBUG(DBG_MISC_ENQUEUE|DBG_FUNC_END, 0,0,0,0,0); + KERNEL_DEBUG(DBG_MISC_ENQUEUE | DBG_FUNC_END, 0, 0, 0, 0, 0); } @@ -2334,58 +2415,57 @@ evprocenque(struct eventqelt *evq) void postpipeevent(struct pipe *pipep, int event) { - int mask; + int mask; struct eventqelt *evq; - if (pipep == NULL) - return; - KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, event,0,0,1,0); + if (pipep == NULL) { + return; + } + KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_START, event, 0, 0, 1, 0); for (evq = pipep->pipe_evlist.tqh_first; - evq != NULL; evq = evq->ee_slist.tqe_next) { - - if (evq->ee_eventmask == 0) - continue; - mask = 0; + evq != NULL; evq = evq->ee_slist.tqe_next) { + if (evq->ee_eventmask == 0) { + continue; + } + mask = 0; switch (event & (EV_RWBYTES | EV_RCLOSED | EV_WCLOSED)) { - case EV_RWBYTES: - if ((evq->ee_eventmask & EV_RE) && pipep->pipe_buffer.cnt) { - mask |= EV_RE; - evq->ee_req.er_rcnt = pipep->pipe_buffer.cnt; - } - if ((evq->ee_eventmask & EV_WR) && - (MAX(pipep->pipe_buffer.size,PIPE_SIZE) - pipep->pipe_buffer.cnt) >= PIPE_BUF) { - - if (pipep->pipe_state & PIPE_EOF) { - mask |= EV_WR|EV_RESET; - break; - } - mask |= EV_WR; - evq->ee_req.er_wcnt = MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt; - } - break; + if ((evq->ee_eventmask & EV_RE) && pipep->pipe_buffer.cnt) { + mask |= EV_RE; + evq->ee_req.er_rcnt = pipep->pipe_buffer.cnt; + } + if ((evq->ee_eventmask & EV_WR) && + (MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt) >= PIPE_BUF) { + if (pipep->pipe_state & PIPE_EOF) { + mask |= EV_WR | EV_RESET; + break; + } + mask |= EV_WR; + evq->ee_req.er_wcnt = MAX(pipep->pipe_buffer.size, PIPE_SIZE) - pipep->pipe_buffer.cnt; + } + break; case EV_WCLOSED: case EV_RCLOSED: - if ((evq->ee_eventmask & EV_RE)) { - mask |= EV_RE|EV_RCLOSED; - } - if ((evq->ee_eventmask & EV_WR)) { - mask |= EV_WR|EV_WCLOSED; - } - break; + if ((evq->ee_eventmask & EV_RE)) { + mask |= EV_RE | EV_RCLOSED; + } + if ((evq->ee_eventmask & EV_WR)) { + mask |= EV_WR | EV_WCLOSED; + } + break; default: - return; + return; } if (mask) { - /* + /* * disarm... postevents are nops until this event is 'read' via * waitevent and then re-armed via modwatch */ - evq->ee_eventmask = 0; + evq->ee_eventmask = 0; /* * since events are disarmed until after the waitevent @@ -2396,14 +2476,14 @@ postpipeevent(struct pipe *pipep, int event) * the pipe lock, and we're updating the event outside * of the proc lock, which it will hold */ - evq->ee_req.er_eventbits |= mask; + evq->ee_req.er_eventbits |= mask; - KERNEL_DEBUG(DBG_MISC_POST, (uint32_t)evq, evq->ee_req.er_eventbits, mask, 1,0); + KERNEL_DEBUG(DBG_MISC_POST, (uint32_t)evq, evq->ee_req.er_eventbits, mask, 1, 0); evprocenque(evq); } } - KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, 0,0,0,1,0); + KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_END, 0, 0, 0, 1, 0); } #if SOCKETS @@ -2415,175 +2495,178 @@ postpipeevent(struct pipe *pipep, int event) void postevent(struct socket *sp, struct sockbuf *sb, int event) { - int mask; - struct eventqelt *evq; - struct tcpcb *tp; + int mask; + struct eventqelt *evq; + struct tcpcb *tp; - if (sb) - sp = sb->sb_so; - if (sp == NULL) - return; + if (sb) { + sp = sb->sb_so; + } + if (sp == NULL) { + return; + } - KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_START, (int)sp, event, 0, 0, 0); + KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_START, (int)sp, event, 0, 0, 0); for (evq = sp->so_evlist.tqh_first; - evq != NULL; evq = evq->ee_slist.tqe_next) { - - if (evq->ee_eventmask == 0) - continue; - mask = 0; + evq != NULL; evq = evq->ee_slist.tqe_next) { + if (evq->ee_eventmask == 0) { + continue; + } + mask = 0; /* ready for reading: - - byte cnt >= receive low water mark - - read-half of conn closed - - conn pending for listening sock - - socket error pending - - ready for writing - - byte cnt avail >= send low water mark - - write half of conn closed - - socket error pending - - non-blocking conn completed successfully - - exception pending - - out of band data - - sock at out of band mark - */ + * - byte cnt >= receive low water mark + * - read-half of conn closed + * - conn pending for listening sock + * - socket error pending + * + * ready for writing + * - byte cnt avail >= send low water mark + * - write half of conn closed + * - socket error pending + * - non-blocking conn completed successfully + * + * exception pending + * - out of band data + * - sock at out of band mark + */ switch (event & EV_DMASK) { - case EV_OOB: - if ((evq->ee_eventmask & EV_EX)) { - if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) - mask |= EV_EX|EV_OOB; - } - break; - - case EV_RWBYTES|EV_OOB: - if ((evq->ee_eventmask & EV_EX)) { - if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) - mask |= EV_EX|EV_OOB; - } - /* - * fall into the next case - */ + if ((evq->ee_eventmask & EV_EX)) { + if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) { + mask |= EV_EX | EV_OOB; + } + } + break; + + case EV_RWBYTES | EV_OOB: + if ((evq->ee_eventmask & EV_EX)) { + if (sp->so_oobmark || ((sp->so_state & SS_RCVATMARK))) { + mask |= EV_EX | EV_OOB; + } + } + /* + * fall into the next case + */ case EV_RWBYTES: - if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) { - /* for AFP/OT purposes; may go away in future */ - if ((SOCK_DOM(sp) == PF_INET || - SOCK_DOM(sp) == PF_INET6) && - SOCK_PROTO(sp) == IPPROTO_TCP && - (sp->so_error == ECONNREFUSED || - sp->so_error == ECONNRESET)) { - if (sp->so_pcb == NULL || - sotoinpcb(sp)->inp_state == - INPCB_STATE_DEAD || - (tp = sototcpcb(sp)) == NULL || - tp->t_state == TCPS_CLOSED) { - mask |= EV_RE|EV_RESET; - break; - } - } - mask |= EV_RE; - evq->ee_req.er_rcnt = sp->so_rcv.sb_cc; - - if (sp->so_state & SS_CANTRCVMORE) { - mask |= EV_FIN; - break; - } - } - if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) { - /* for AFP/OT purposes; may go away in future */ - if ((SOCK_DOM(sp) == PF_INET || - SOCK_DOM(sp) == PF_INET6) && - SOCK_PROTO(sp) == IPPROTO_TCP && - (sp->so_error == ECONNREFUSED || - sp->so_error == ECONNRESET)) { - if (sp->so_pcb == NULL || - sotoinpcb(sp)->inp_state == - INPCB_STATE_DEAD || - (tp = sototcpcb(sp)) == NULL || - tp->t_state == TCPS_CLOSED) { - mask |= EV_WR|EV_RESET; - break; - } - } - mask |= EV_WR; - evq->ee_req.er_wcnt = sbspace(&sp->so_snd); - } - break; + if ((evq->ee_eventmask & EV_RE) && soreadable(sp)) { + /* for AFP/OT purposes; may go away in future */ + if ((SOCK_DOM(sp) == PF_INET || + SOCK_DOM(sp) == PF_INET6) && + SOCK_PROTO(sp) == IPPROTO_TCP && + (sp->so_error == ECONNREFUSED || + sp->so_error == ECONNRESET)) { + if (sp->so_pcb == NULL || + sotoinpcb(sp)->inp_state == + INPCB_STATE_DEAD || + (tp = sototcpcb(sp)) == NULL || + tp->t_state == TCPS_CLOSED) { + mask |= EV_RE | EV_RESET; + break; + } + } + mask |= EV_RE; + evq->ee_req.er_rcnt = sp->so_rcv.sb_cc; + + if (sp->so_state & SS_CANTRCVMORE) { + mask |= EV_FIN; + break; + } + } + if ((evq->ee_eventmask & EV_WR) && sowriteable(sp)) { + /* for AFP/OT purposes; may go away in future */ + if ((SOCK_DOM(sp) == PF_INET || + SOCK_DOM(sp) == PF_INET6) && + SOCK_PROTO(sp) == IPPROTO_TCP && + (sp->so_error == ECONNREFUSED || + sp->so_error == ECONNRESET)) { + if (sp->so_pcb == NULL || + sotoinpcb(sp)->inp_state == + INPCB_STATE_DEAD || + (tp = sototcpcb(sp)) == NULL || + tp->t_state == TCPS_CLOSED) { + mask |= EV_WR | EV_RESET; + break; + } + } + mask |= EV_WR; + evq->ee_req.er_wcnt = sbspace(&sp->so_snd); + } + break; case EV_RCONN: - if ((evq->ee_eventmask & EV_RE)) { - mask |= EV_RE|EV_RCONN; - evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one - } - break; + if ((evq->ee_eventmask & EV_RE)) { + mask |= EV_RE | EV_RCONN; + evq->ee_req.er_rcnt = sp->so_qlen + 1; // incl this one + } + break; case EV_WCONN: - if ((evq->ee_eventmask & EV_WR)) { - mask |= EV_WR|EV_WCONN; - } - break; + if ((evq->ee_eventmask & EV_WR)) { + mask |= EV_WR | EV_WCONN; + } + break; case EV_RCLOSED: - if ((evq->ee_eventmask & EV_RE)) { - mask |= EV_RE|EV_RCLOSED; - } - break; + if ((evq->ee_eventmask & EV_RE)) { + mask |= EV_RE | EV_RCLOSED; + } + break; case EV_WCLOSED: - if ((evq->ee_eventmask & EV_WR)) { - mask |= EV_WR|EV_WCLOSED; - } - break; + if ((evq->ee_eventmask & EV_WR)) { + mask |= EV_WR | EV_WCLOSED; + } + break; case EV_FIN: - if (evq->ee_eventmask & EV_RE) { - mask |= EV_RE|EV_FIN; - } - break; + if (evq->ee_eventmask & EV_RE) { + mask |= EV_RE | EV_FIN; + } + break; case EV_RESET: case EV_TIMEOUT: - if (evq->ee_eventmask & EV_RE) { - mask |= EV_RE | event; - } - if (evq->ee_eventmask & EV_WR) { - mask |= EV_WR | event; - } - break; + if (evq->ee_eventmask & EV_RE) { + mask |= EV_RE | event; + } + if (evq->ee_eventmask & EV_WR) { + mask |= EV_WR | event; + } + break; default: - KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, -1, 0, 0, 0); - return; + KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_END, (int)sp, -1, 0, 0, 0); + return; } /* switch */ KERNEL_DEBUG(DBG_MISC_POST, (int)evq, evq->ee_eventmask, evq->ee_req.er_eventbits, mask, 0); if (mask) { - /* + /* * disarm... postevents are nops until this event is 'read' via * waitevent and then re-armed via modwatch */ - evq->ee_eventmask = 0; + evq->ee_eventmask = 0; /* * since events are disarmed until after the waitevent * the ee_req.er_xxxx fields can't change once we've * inserted this event into the proc queue... - * since waitevent can't see this event until we + * since waitevent can't see this event until we * enqueue it, waitevent will see a 'consistent' * snapshot of the event, even though it won't hold * the socket lock, and we're updating the event outside * of the proc lock, which it will hold */ - evq->ee_req.er_eventbits |= mask; + evq->ee_req.er_eventbits |= mask; evprocenque(evq); } } - KERNEL_DEBUG(DBG_MISC_POST|DBG_FUNC_END, (int)sp, 0, 0, 0, 0); + KERNEL_DEBUG(DBG_MISC_POST | DBG_FUNC_END, (int)sp, 0, 0, 0, 0); } #endif /* SOCKETS */ @@ -2610,42 +2693,43 @@ watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval) struct fileproc *fp = NULL; int error; - KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_START, 0, 0, 0, 0, 0); // get a qelt and fill with users req MALLOC(evq, struct eventqelt *, sizeof(struct eventqelt), M_TEMP, M_WAITOK); - if (evq == NULL) - return (ENOMEM); + if (evq == NULL) { + return ENOMEM; + } erp = &evq->ee_req; // get users request pkt if (IS_64BIT_PROCESS(p)) { - error = copyin(uap->u_req, (caddr_t)erp, sizeof(struct eventreq64)); + error = copyin(uap->u_req, (caddr_t)erp, sizeof(struct eventreq64)); } else { - struct eventreq32 er32; + struct eventreq32 er32; - error = copyin(uap->u_req, (caddr_t)&er32, sizeof(struct eventreq32)); + error = copyin(uap->u_req, (caddr_t)&er32, sizeof(struct eventreq32)); if (error == 0) { - /* - * the user only passes in the - * er_type, er_handle and er_data... - * the other fields are initialized - * below, so don't bother to copy - */ - erp->er_type = er32.er_type; - erp->er_handle = er32.er_handle; - erp->er_data = (user_addr_t)er32.er_data; + /* + * the user only passes in the + * er_type, er_handle and er_data... + * the other fields are initialized + * below, so don't bother to copy + */ + erp->er_type = er32.er_type; + erp->er_handle = er32.er_handle; + erp->er_data = (user_addr_t)er32.er_data; } } if (error) { - FREE(evq, M_TEMP); - KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0); + FREE(evq, M_TEMP); + KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, error, 0, 0, 0, 0); - return(error); + return error; } - KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0); + KERNEL_DEBUG(DBG_MISC_WATCH, erp->er_handle, uap->u_eventmask, (uint32_t)evq, 0, 0); // validate, freeing qelt if errors error = 0; @@ -2672,26 +2756,26 @@ watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval) if (error) { FREE(evq, M_TEMP); - KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, error,0,0,0,0); - return(error); + KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, error, 0, 0, 0, 0); + return error; } - + /* * only allow one watch per file per proc */ - for ( ; np != NULL; np = np->ee_slist.tqe_next) { + for (; np != NULL; np = np->ee_slist.tqe_next) { if (np->ee_proc == p) { #if SOCKETS - if (fp->f_type == DTYPE_SOCKET) + if (fp->f_type == DTYPE_SOCKET) { socket_unlock((struct socket *)fp->f_data, 1); - else + } else #endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); + PIPE_UNLOCK((struct pipe *)fp->f_data); fp_drop(p, erp->er_handle, fp, 0); FREE(evq, M_TEMP); - - KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0); - return(EINVAL); + + KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); + return EINVAL; } } erp->er_ecnt = erp->er_rcnt = erp->er_wcnt = erp->er_eventbits = 0; @@ -2715,8 +2799,8 @@ watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval) } fp_drop_event(p, erp->er_handle, fp); - KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, 0,0,0,0,0); - return(0); + KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, 0, 0, 0, 0, 0); + return 0; } @@ -2731,14 +2815,14 @@ watchevent(proc_t p, struct watchevent_args *uap, __unused int *retval) int waitevent(proc_t p, struct waitevent_args *uap, int *retval) { - int error = 0; + int error = 0; struct eventqelt *evq; struct eventreq64 *erp; uint64_t abstime, interval; boolean_t fast_poll = FALSE; union { - struct eventreq64 er64; - struct eventreq32 er32; + struct eventreq64 er64; + struct eventreq32 er32; } uer = {}; interval = 0; @@ -2749,19 +2833,21 @@ waitevent(proc_t p, struct waitevent_args *uap, int *retval) * check for fast poll method */ if (IS_64BIT_PROCESS(p)) { - if (uap->tv == (user_addr_t)-1) - fast_poll = TRUE; - } else if (uap->tv == (user_addr_t)((uint32_t)-1)) - fast_poll = TRUE; + if (uap->tv == (user_addr_t)-1) { + fast_poll = TRUE; + } + } else if (uap->tv == (user_addr_t)((uint32_t)-1)) { + fast_poll = TRUE; + } if (fast_poll == TRUE) { - if (p->p_evlist.tqh_first == NULL) { - KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_NONE, -1,0,0,0,0); + if (p->p_evlist.tqh_first == NULL) { + KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_NONE, -1, 0, 0, 0, 0); /* * poll failed */ - *retval = 1; - return (0); + *retval = 1; + return 0; } proc_lock(p); goto retry; @@ -2779,76 +2865,80 @@ waitevent(proc_t p, struct waitevent_args *uap, int *retval) atv.tv_usec = atv32.tv_usec; } - if (error) - return(error); + if (error) { + return error; + } if (itimerfix(&atv)) { error = EINVAL; - return(error); + return error; } interval = tvtoabstime(&atv); } - KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_START, 0, 0, 0, 0, 0); proc_lock(p); retry: if ((evq = p->p_evlist.tqh_first) != NULL) { - /* + /* * found one... make a local copy while it's still on the queue * to prevent it from changing while in the midst of copying * don't want to hold the proc lock across a copyout because * it might block on a page fault at the target in user space */ - erp = &evq->ee_req; + erp = &evq->ee_req; - if (IS_64BIT_PROCESS(p)) - bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof (struct eventreq64)); - else { - uer.er32.er_type = erp->er_type; - uer.er32.er_handle = erp->er_handle; - uer.er32.er_data = (uint32_t)erp->er_data; - uer.er32.er_ecnt = erp->er_ecnt; - uer.er32.er_rcnt = erp->er_rcnt; - uer.er32.er_wcnt = erp->er_wcnt; - uer.er32.er_eventbits = erp->er_eventbits; + if (IS_64BIT_PROCESS(p)) { + bcopy((caddr_t)erp, (caddr_t)&uer.er64, sizeof(struct eventreq64)); + } else { + uer.er32.er_type = erp->er_type; + uer.er32.er_handle = erp->er_handle; + uer.er32.er_data = (uint32_t)erp->er_data; + uer.er32.er_ecnt = erp->er_ecnt; + uer.er32.er_rcnt = erp->er_rcnt; + uer.er32.er_wcnt = erp->er_wcnt; + uer.er32.er_eventbits = erp->er_eventbits; } - TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); + TAILQ_REMOVE(&p->p_evlist, evq, ee_plist); evq->ee_flags &= ~EV_QUEUED; proc_unlock(p); - if (IS_64BIT_PROCESS(p)) - error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64)); - else - error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32)); + if (IS_64BIT_PROCESS(p)) { + error = copyout((caddr_t)&uer.er64, uap->u_req, sizeof(struct eventreq64)); + } else { + error = copyout((caddr_t)&uer.er32, uap->u_req, sizeof(struct eventreq32)); + } - KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error, - evq->ee_req.er_handle,evq->ee_req.er_eventbits,(uint32_t)evq,0); - return (error); - } - else { + KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_END, error, + evq->ee_req.er_handle, evq->ee_req.er_eventbits, (uint32_t)evq, 0); + return error; + } else { if (uap->tv && interval == 0) { proc_unlock(p); *retval = 1; // poll failed - KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, error,0,0,0,0); - return (error); + KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_END, error, 0, 0, 0, 0); + return error; } - if (interval != 0) + if (interval != 0) { clock_absolutetime_interval_to_deadline(interval, &abstime); - else - abstime = 0; + } else { + abstime = 0; + } - KERNEL_DEBUG(DBG_MISC_WAIT, 1,(uint32_t)&p->p_evlist,0,0,0); + KERNEL_DEBUG(DBG_MISC_WAIT, 1, (uint32_t)&p->p_evlist, 0, 0, 0); error = msleep1(&p->p_evlist, &p->p_mlock, (PSOCK | PCATCH), "waitevent", abstime); - KERNEL_DEBUG(DBG_MISC_WAIT, 2,(uint32_t)&p->p_evlist,0,0,0); + KERNEL_DEBUG(DBG_MISC_WAIT, 2, (uint32_t)&p->p_evlist, 0, 0, 0); - if (error == 0) + if (error == 0) { goto retry; - if (error == ERESTART) + } + if (error == ERESTART) { error = EINTR; + } if (error == EWOULDBLOCK) { *retval = 1; error = 0; @@ -2856,8 +2946,8 @@ retry: } proc_unlock(p); - KERNEL_DEBUG(DBG_MISC_WAIT|DBG_FUNC_END, 0,0,0,0,0); - return (error); + KERNEL_DEBUG(DBG_MISC_WAIT | DBG_FUNC_END, 0, 0, 0, 0, 0); + return error; } @@ -2871,12 +2961,12 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) { struct eventreq64 er; struct eventreq64 *erp = &er; - struct eventqelt *evq = NULL; /* protected by error return */ + struct eventqelt *evq = NULL; /* protected by error return */ int error; struct fileproc *fp; int flag; - KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_START, 0, 0, 0, 0, 0); /* * get user's request pkt @@ -2885,8 +2975,8 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) * those 2 fields */ if ((error = copyin(uap->u_req, (caddr_t)erp, sizeof(er.er_type) + sizeof(er.er_handle)))) { - KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0); - return(error); + KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, error, 0, 0, 0, 0); + return error; } proc_fdlock(p); @@ -2909,8 +2999,8 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) if (error) { proc_fdunlock(p); - KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, error,0,0,0,0); - return(error); + KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, error, 0, 0, 0, 0); + return error; } if ((uap->u_eventmask == EV_RM) && (fp->f_flags & FP_WAITEVENT)) { @@ -2919,22 +3009,23 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) proc_fdunlock(p); // locate event if possible - for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) { - if (evq->ee_proc == p) - break; + for (; evq != NULL; evq = evq->ee_slist.tqe_next) { + if (evq->ee_proc == p) { + break; + } } if (evq == NULL) { #if SOCKETS - if (fp->f_type == DTYPE_SOCKET) + if (fp->f_type == DTYPE_SOCKET) { socket_unlock((struct socket *)fp->f_data, 1); - else + } else #endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); + PIPE_UNLOCK((struct pipe *)fp->f_data); fp_drop(p, erp->er_handle, fp, 0); - KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, EINVAL,0,0,0,0); - return(EINVAL); + KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); + return EINVAL; } - KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle,uap->u_eventmask,(uint32_t)evq,0,0); + KERNEL_DEBUG(DBG_MISC_MOD, erp->er_handle, uap->u_eventmask, (uint32_t)evq, 0, 0); if (uap->u_eventmask == EV_RM) { EVPROCDEQUE(p, evq); @@ -2951,18 +3042,17 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) } fp_drop(p, erp->er_handle, fp, 0); FREE(evq, M_TEMP); - KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, 0,0,0,0,0); - return(0); + KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, 0, 0, 0, 0, 0); + return 0; } switch (uap->u_eventmask & EV_MASK) { - case 0: flag = 0; break; case EV_RE: case EV_WR: - case EV_RE|EV_WR: + case EV_RE | EV_WR: flag = EV_RWBYTES; break; @@ -2970,22 +3060,22 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) flag = EV_OOB; break; - case EV_EX|EV_RE: - case EV_EX|EV_WR: - case EV_EX|EV_RE|EV_WR: - flag = EV_OOB|EV_RWBYTES; + case EV_EX | EV_RE: + case EV_EX | EV_WR: + case EV_EX | EV_RE | EV_WR: + flag = EV_OOB | EV_RWBYTES; break; default: #if SOCKETS - if (fp->f_type == DTYPE_SOCKET) + if (fp->f_type == DTYPE_SOCKET) { socket_unlock((struct socket *)fp->f_data, 1); - else + } else #endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); + PIPE_UNLOCK((struct pipe *)fp->f_data); fp_drop(p, erp->er_handle, fp, 0); - KERNEL_DEBUG(DBG_MISC_WATCH|DBG_FUNC_END, EINVAL,0,0,0,0); - return(EINVAL); + KERNEL_DEBUG(DBG_MISC_WATCH | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); + return EINVAL; } /* * since we're holding the socket/pipe lock, the event @@ -3021,8 +3111,8 @@ modwatch(proc_t p, struct modwatch_args *uap, __unused int *retval) PIPE_UNLOCK((struct pipe *)fp->f_data); } fp_drop(p, erp->er_handle, fp, 0); - KERNEL_DEBUG(DBG_MISC_MOD|DBG_FUNC_END, evq->ee_req.er_handle,evq->ee_eventmask,(uint32_t)fp->f_data,flag,0); - return(0); + KERNEL_DEBUG(DBG_MISC_MOD | DBG_FUNC_END, evq->ee_req.er_handle, evq->ee_eventmask, (uint32_t)fp->f_data, flag, 0); + return 0; } /* this routine is called from the close of fd with proc_fdlock held */ @@ -3036,36 +3126,36 @@ waitevent_close(struct proc *p, struct fileproc *fp) #if SOCKETS if (fp->f_type == DTYPE_SOCKET) { - socket_lock((struct socket *)fp->f_data, 1); + socket_lock((struct socket *)fp->f_data, 1); evq = ((struct socket *)fp->f_data)->so_evlist.tqh_first; } else #endif /* SOCKETS */ if (fp->f_type == DTYPE_PIPE) { - PIPE_LOCK((struct pipe *)fp->f_data); + PIPE_LOCK((struct pipe *)fp->f_data); evq = ((struct pipe *)fp->f_data)->pipe_evlist.tqh_first; - } - else { - return(EINVAL); + } else { + return EINVAL; } proc_fdunlock(p); // locate event if possible - for ( ; evq != NULL; evq = evq->ee_slist.tqe_next) { - if (evq->ee_proc == p) - break; + for (; evq != NULL; evq = evq->ee_slist.tqe_next) { + if (evq->ee_proc == p) { + break; + } } if (evq == NULL) { #if SOCKETS - if (fp->f_type == DTYPE_SOCKET) - socket_unlock((struct socket *)fp->f_data, 1); - else + if (fp->f_type == DTYPE_SOCKET) { + socket_unlock((struct socket *)fp->f_data, 1); + } else #endif /* SOCKETS */ - PIPE_UNLOCK((struct pipe *)fp->f_data); + PIPE_UNLOCK((struct pipe *)fp->f_data); proc_fdlock(p); - return(EINVAL); + return EINVAL; } EVPROCDEQUE(p, evq); @@ -3083,7 +3173,7 @@ waitevent_close(struct proc *p, struct fileproc *fp) proc_fdlock(p); - return(0); + return 0; } @@ -3109,33 +3199,35 @@ gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retv { kern_return_t kret; int error; - mach_timespec_t mach_ts; /* for IOKit call */ - __darwin_uuid_t uuid_kern = {}; /* for IOKit call */ + mach_timespec_t mach_ts; /* for IOKit call */ + __darwin_uuid_t uuid_kern = {}; /* for IOKit call */ if (!uap->spi) { #if CONFIG_EMBEDDED #if CONFIG_MACF if ((error = mac_system_check_info(kauth_cred_get(), "hw.uuid")) != 0) { /* EPERM invokes userspace upcall if present */ - return (error); + return error; } #endif #endif } /* Convert the 32/64 bit timespec into a mach_timespec_t */ - if ( proc_is64bit(p) ) { + if (proc_is64bit(p)) { struct user64_timespec ts; error = copyin(uap->timeoutp, &ts, sizeof(ts)); - if (error) - return (error); + if (error) { + return error; + } mach_ts.tv_sec = ts.tv_sec; mach_ts.tv_nsec = ts.tv_nsec; } else { struct user32_timespec ts; - error = copyin(uap->timeoutp, &ts, sizeof(ts) ); - if (error) - return (error); + error = copyin(uap->timeoutp, &ts, sizeof(ts)); + if (error) { + return error; + } mach_ts.tv_sec = ts.tv_sec; mach_ts.tv_nsec = ts.tv_nsec; } @@ -3154,7 +3246,7 @@ gethostuuid(struct proc *p, struct gethostuuid_args *uap, __unused int32_t *retv error = EWOULDBLOCK; } - return (error); + return error; } /* @@ -3178,101 +3270,109 @@ ledger(struct proc *p, struct ledger_args *args, __unused int32_t *retval) /* Finish copying in the necessary args before taking the proc lock */ error = 0; len = 0; - if (args->cmd == LEDGER_ENTRY_INFO) - error = copyin(args->arg3, (char *)&len, sizeof (len)); - else if (args->cmd == LEDGER_TEMPLATE_INFO) - error = copyin(args->arg2, (char *)&len, sizeof (len)); - else if (args->cmd == LEDGER_LIMIT) + if (args->cmd == LEDGER_ENTRY_INFO) { + error = copyin(args->arg3, (char *)&len, sizeof(len)); + } else if (args->cmd == LEDGER_TEMPLATE_INFO) { + error = copyin(args->arg2, (char *)&len, sizeof(len)); + } else if (args->cmd == LEDGER_LIMIT) #ifdef LEDGER_DEBUG - error = copyin(args->arg2, (char *)&lla, sizeof (lla)); + { error = copyin(args->arg2, (char *)&lla, sizeof(lla));} #else - return (EINVAL); + { return EINVAL; } #endif - else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) - return (EINVAL); + else if ((args->cmd < 0) || (args->cmd > LEDGER_MAX_CMD)) { + return EINVAL; + } - if (error) - return (error); - if (len < 0) - return (EINVAL); + if (error) { + return error; + } + if (len < 0) { + return EINVAL; + } rval = 0; if (args->cmd != LEDGER_TEMPLATE_INFO) { pid = args->arg1; proc = proc_find(pid); - if (proc == NULL) - return (ESRCH); + if (proc == NULL) { + return ESRCH; + } #if CONFIG_MACF error = mac_proc_check_ledger(p, proc, args->cmd); if (error) { proc_rele(proc); - return (error); + return error; } #endif task = proc->task; } - + switch (args->cmd) { #ifdef LEDGER_DEBUG - case LEDGER_LIMIT: { - if (!kauth_cred_issuser(kauth_cred_get())) - rval = EPERM; - rval = ledger_limit(task, &lla); - proc_rele(proc); - break; + case LEDGER_LIMIT: { + if (!kauth_cred_issuser(kauth_cred_get())) { + rval = EPERM; } + rval = ledger_limit(task, &lla); + proc_rele(proc); + break; + } #endif - case LEDGER_INFO: { - struct ledger_info info = {}; - - rval = ledger_info(task, &info); - proc_rele(proc); - if (rval == 0) - rval = copyout(&info, args->arg2, - sizeof (info)); - break; + case LEDGER_INFO: { + struct ledger_info info = {}; + + rval = ledger_info(task, &info); + proc_rele(proc); + if (rval == 0) { + rval = copyout(&info, args->arg2, + sizeof(info)); } + break; + } - case LEDGER_ENTRY_INFO: { - void *buf; - int sz; + case LEDGER_ENTRY_INFO: { + void *buf; + int sz; - rval = ledger_get_task_entry_info_multiple(task, &buf, &len); - proc_rele(proc); - if ((rval == 0) && (len >= 0)) { - sz = len * sizeof (struct ledger_entry_info); - rval = copyout(buf, args->arg2, sz); - kfree(buf, sz); - } - if (rval == 0) - rval = copyout(&len, args->arg3, sizeof (len)); - break; + rval = ledger_get_task_entry_info_multiple(task, &buf, &len); + proc_rele(proc); + if ((rval == 0) && (len >= 0)) { + sz = len * sizeof(struct ledger_entry_info); + rval = copyout(buf, args->arg2, sz); + kfree(buf, sz); + } + if (rval == 0) { + rval = copyout(&len, args->arg3, sizeof(len)); } + break; + } - case LEDGER_TEMPLATE_INFO: { - void *buf; - int sz; + case LEDGER_TEMPLATE_INFO: { + void *buf; + int sz; - rval = ledger_template_info(&buf, &len); - if ((rval == 0) && (len >= 0)) { - sz = len * sizeof (struct ledger_template_info); - rval = copyout(buf, args->arg1, sz); - kfree(buf, sz); - } - if (rval == 0) - rval = copyout(&len, args->arg2, sizeof (len)); - break; + rval = ledger_template_info(&buf, &len); + if ((rval == 0) && (len >= 0)) { + sz = len * sizeof(struct ledger_template_info); + rval = copyout(buf, args->arg1, sz); + kfree(buf, sz); } + if (rval == 0) { + rval = copyout(&len, args->arg2, sizeof(len)); + } + break; + } - default: - panic("ledger syscall logic error -- command type %d", args->cmd); - proc_rele(proc); - rval = EINVAL; + default: + panic("ledger syscall logic error -- command type %d", args->cmd); + proc_rele(proc); + rval = EINVAL; } - return (rval); + return rval; } int @@ -3290,8 +3390,9 @@ telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t break; #endif /* CONFIG_TELEMETRY */ case TELEMETRY_CMD_VOUCHER_NAME: - if (thread_set_voucher_name((mach_port_name_t)args->deadline)) + if (thread_set_voucher_name((mach_port_name_t)args->deadline)) { error = EINVAL; + } break; default: @@ -3299,7 +3400,7 @@ telemetry(__unused struct proc *p, struct telemetry_args *args, __unused int32_t break; } - return (error); + return error; } #if DEVELOPMENT || DEBUG @@ -3313,17 +3414,20 @@ struct g_wqset { static queue_head_t g_wqset_list; static struct waitq_set *g_waitq_set = NULL; -static inline struct waitq_set *sysctl_get_wqset(int idx) +static inline struct waitq_set * +sysctl_get_wqset(int idx) { struct g_wqset *gwqs; - if (!g_wqset_num) + if (!g_wqset_num) { queue_init(&g_wqset_list); + } /* don't bother with locks: this is test-only code! */ qe_foreach_element(gwqs, &g_wqset_list, link) { - if ((int)(wqset_id(gwqs->wqset) & 0xffffffff) == idx) + if ((int)(wqset_id(gwqs->wqset) & 0xffffffff) == idx) { return gwqs->wqset; + } } /* allocate a new one */ @@ -3331,7 +3435,7 @@ static inline struct waitq_set *sysctl_get_wqset(int idx) gwqs = (struct g_wqset *)kalloc(sizeof(*gwqs)); assert(gwqs != NULL); - gwqs->wqset = waitq_set_alloc(SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST, NULL); + gwqs->wqset = waitq_set_alloc(SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, NULL); enqueue_tail(&g_wqset_list, &gwqs->link); printf("[WQ]: created new waitq set 0x%llx\n", wqset_id(gwqs->wqset)); @@ -3342,15 +3446,18 @@ static inline struct waitq_set *sysctl_get_wqset(int idx) static int g_wq_init = 0; static struct waitq g_wq[MAX_GLOBAL_TEST_QUEUES]; -static inline struct waitq *global_test_waitq(int idx) +static inline struct waitq * +global_test_waitq(int idx) { - if (idx < 0) + if (idx < 0) { return NULL; + } if (!g_wq_init) { g_wq_init = 1; - for (int i = 0; i < MAX_GLOBAL_TEST_QUEUES; i++) + for (int i = 0; i < MAX_GLOBAL_TEST_QUEUES; i++) { waitq_init(&g_wq[i], SYNC_POLICY_FIFO); + } } return &g_wq[idx % MAX_GLOBAL_TEST_QUEUES]; @@ -3366,11 +3473,13 @@ static int sysctl_waitq_wakeup_one SYSCTL_HANDLER_ARGS int64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } if (event64 < 0) { index = (int)((-event64) & 0xffffffff); @@ -3384,15 +3493,15 @@ static int sysctl_waitq_wakeup_one SYSCTL_HANDLER_ARGS event64 = 0; printf("[WQ]: Waking one thread on waitq [%d] event:0x%llx\n", - index, event64); + index, event64); kr = waitq_wakeup64_one(waitq, (event64_t)event64, THREAD_AWAKENED, - WAITQ_ALL_PRIORITIES); + WAITQ_ALL_PRIORITIES); printf("[WQ]: \tkr=%d\n", kr); return SYSCTL_OUT(req, &kr, sizeof(kr)); } SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_one, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_wakeup_one, "Q", "wakeup one thread waiting on given event"); + 0, 0, sysctl_waitq_wakeup_one, "Q", "wakeup one thread waiting on given event"); static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS @@ -3405,11 +3514,13 @@ static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS int64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } if (event64 < 0) { index = (int)((-event64) & 0xffffffff); @@ -3423,15 +3534,15 @@ static int sysctl_waitq_wakeup_all SYSCTL_HANDLER_ARGS event64 = 0; printf("[WQ]: Waking all threads on waitq [%d] event:0x%llx\n", - index, event64); + index, event64); kr = waitq_wakeup64_all(waitq, (event64_t)event64, - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); printf("[WQ]: \tkr=%d\n", kr); return SYSCTL_OUT(req, &kr, sizeof(kr)); } SYSCTL_PROC(_kern, OID_AUTO, waitq_wakeup_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_wakeup_all, "Q", "wakeup all threads waiting on given event"); + 0, 0, sysctl_waitq_wakeup_all, "Q", "wakeup all threads waiting on given event"); static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS @@ -3444,11 +3555,13 @@ static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS int64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } if (event64 < 0) { index = (int)((-event64) & 0xffffffff); @@ -3462,16 +3575,17 @@ static int sysctl_waitq_wait SYSCTL_HANDLER_ARGS event64 = 0; printf("[WQ]: Current thread waiting on waitq [%d] event:0x%llx\n", - index, event64); + index, event64); kr = waitq_assert_wait64(waitq, (event64_t)event64, THREAD_INTERRUPTIBLE, 0); - if (kr == THREAD_WAITING) + if (kr == THREAD_WAITING) { thread_block(THREAD_CONTINUE_NULL); + } printf("[WQ]: \tWoke Up: kr=%d\n", kr); return SYSCTL_OUT(req, &kr, sizeof(kr)); } SYSCTL_PROC(_kern, OID_AUTO, waitq_wait, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_wait, "Q", "start waiting on given event"); + 0, 0, sysctl_waitq_wait, "Q", "start waiting on given event"); static int sysctl_wqset_select SYSCTL_HANDLER_ARGS @@ -3482,11 +3596,13 @@ static int sysctl_wqset_select SYSCTL_HANDLER_ARGS uint64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { goto out; + } wqset = sysctl_get_wqset((int)(event64 & 0xffffffff)); g_waitq_set = wqset; @@ -3495,15 +3611,16 @@ static int sysctl_wqset_select SYSCTL_HANDLER_ARGS printf("[WQ]: selected wqset 0x%llx\n", event64); out: - if (g_waitq_set) + if (g_waitq_set) { event64 = wqset_id(g_waitq_set); - else + } else { event64 = (uint64_t)(-1); + } return SYSCTL_OUT(req, &event64, sizeof(event64)); } SYSCTL_PROC(_kern, OID_AUTO, wqset_select, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_wqset_select, "Q", "select/create a global waitq set"); + 0, 0, sysctl_wqset_select, "Q", "select/create a global waitq set"); static int sysctl_waitq_link SYSCTL_HANDLER_ARGS @@ -3518,22 +3635,26 @@ static int sysctl_waitq_link SYSCTL_HANDLER_ARGS int64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } - if (!g_waitq_set) + if (!g_waitq_set) { g_waitq_set = sysctl_get_wqset(1); + } wqset = g_waitq_set; if (event64 < 0) { struct waitq_set *tmp; index = (int)((-event64) & 0xffffffff); tmp = sysctl_get_wqset(index); - if (tmp == wqset) + if (tmp == wqset) { goto out; + } waitq = wqset_waitq(tmp); index = -index; } else { @@ -3542,7 +3663,7 @@ static int sysctl_waitq_link SYSCTL_HANDLER_ARGS } printf("[WQ]: linking waitq [%d] to global wqset (0x%llx)\n", - index, wqset_id(wqset)); + index, wqset_id(wqset)); reserved_link = waitq_link_reserve(waitq); kr = waitq_link(waitq, wqset, WAITQ_SHOULD_LOCK, &reserved_link); waitq_link_release(reserved_link); @@ -3553,7 +3674,7 @@ out: return SYSCTL_OUT(req, &kr, sizeof(kr)); } SYSCTL_PROC(_kern, OID_AUTO, waitq_link, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_link, "Q", "link global waitq to test waitq set"); + 0, 0, sysctl_waitq_link, "Q", "link global waitq to test waitq set"); static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS @@ -3567,21 +3688,24 @@ static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS uint64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } - if (!g_waitq_set) + if (!g_waitq_set) { g_waitq_set = sysctl_get_wqset(1); + } wqset = g_waitq_set; index = (int)event64; waitq = global_test_waitq(index); printf("[WQ]: unlinking waitq [%d] from global wqset (0x%llx)\n", - index, wqset_id(wqset)); + index, wqset_id(wqset)); kr = waitq_unlink(waitq, wqset); printf("[WQ]: \tkr=%d\n", kr); @@ -3589,7 +3713,7 @@ static int sysctl_waitq_unlink SYSCTL_HANDLER_ARGS return SYSCTL_OUT(req, &kr, sizeof(kr)); } SYSCTL_PROC(_kern, OID_AUTO, waitq_unlink, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_unlink, "Q", "unlink global waitq from test waitq set"); + 0, 0, sysctl_waitq_unlink, "Q", "unlink global waitq from test waitq set"); static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS @@ -3600,11 +3724,13 @@ static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS int error, index; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } index = (int)event64; waitq = global_test_waitq(index); @@ -3615,7 +3741,7 @@ static int sysctl_waitq_clear_prepost SYSCTL_HANDLER_ARGS return SYSCTL_OUT(req, &event64, sizeof(event64)); } SYSCTL_PROC(_kern, OID_AUTO, waitq_clear_prepost, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_clear_prepost, "Q", "clear prepost on given waitq"); + 0, 0, sysctl_waitq_clear_prepost, "Q", "clear prepost on given waitq"); static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS @@ -3627,18 +3753,21 @@ static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS uint64_t event64 = 0; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { return SYSCTL_OUT(req, &event64, sizeof(event64)); + } - if (!g_waitq_set) + if (!g_waitq_set) { g_waitq_set = sysctl_get_wqset(1); + } wqset = g_waitq_set; printf("[WQ]: unlinking all queues from global wqset (0x%llx)\n", - wqset_id(wqset)); + wqset_id(wqset)); kr = waitq_set_unlink_all(wqset); printf("[WQ]: \tkr=%d\n", kr); @@ -3646,7 +3775,7 @@ static int sysctl_wqset_unlink_all SYSCTL_HANDLER_ARGS return SYSCTL_OUT(req, &kr, sizeof(kr)); } SYSCTL_PROC(_kern, OID_AUTO, wqset_unlink_all, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_wqset_unlink_all, "Q", "unlink all queues from test waitq set"); + 0, 0, sysctl_wqset_unlink_all, "Q", "unlink all queues from test waitq set"); static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS @@ -3657,11 +3786,13 @@ static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS int error, index; error = SYSCTL_IN(req, &event64, sizeof(event64)); - if (error) + if (error) { return error; + } - if (!req->newptr) + if (!req->newptr) { goto out; + } index = (int)((event64) & 0xffffffff); wqset = sysctl_get_wqset(index); @@ -3671,15 +3802,16 @@ static int sysctl_wqset_clear_preposts SYSCTL_HANDLER_ARGS waitq_set_clear_preposts(wqset); out: - if (wqset) + if (wqset) { event64 = wqset_id(wqset); - else + } else { event64 = (uint64_t)(-1); + } return SYSCTL_OUT(req, &event64, sizeof(event64)); } SYSCTL_PROC(_kern, OID_AUTO, wqset_clear_preposts, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_wqset_clear_preposts, "Q", "clear preposts on given waitq set"); + 0, 0, sysctl_wqset_clear_preposts, "Q", "clear preposts on given waitq set"); #endif /* CONFIG_WAITQ_DEBUG */ @@ -3687,11 +3819,12 @@ static int sysctl_waitq_set_nelem SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int nelem; + int nelem; /* Read only */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } nelem = sysctl_helper_waitq_set_nelem(); @@ -3699,9 +3832,194 @@ sysctl_waitq_set_nelem SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kern, OID_AUTO, n_ltable_entries, CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_waitq_set_nelem, "I", "ltable elementis currently used"); + 0, 0, sysctl_waitq_set_nelem, "I", "ltable elementis currently used"); + + +#endif /* DEVELOPMENT || DEBUG */ + +/*Remote Time api*/ +SYSCTL_NODE(_machdep, OID_AUTO, remotetime, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "Remote time api"); +#if DEVELOPMENT || DEBUG +#if CONFIG_MACH_BRIDGE_SEND_TIME +extern _Atomic uint32_t bt_init_flag; +extern uint32_t mach_bridge_timer_enable(uint32_t, int); + +SYSCTL_INT(_machdep_remotetime, OID_AUTO, bridge_timer_init_flag, + CTLFLAG_RD | CTLFLAG_LOCKED, &bt_init_flag, 0, ""); + +static int sysctl_mach_bridge_timer_enable SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + uint32_t value = 0; + int error = 0; + /* User is querying buffer size */ + if (req->oldptr == USER_ADDR_NULL && req->newptr == USER_ADDR_NULL) { + req->oldidx = sizeof(value); + return 0; + } + if (bt_init_flag) { + if (req->newptr) { + int new_value = 0; + error = SYSCTL_IN(req, &new_value, sizeof(new_value)); + if (error) { + return error; + } + if (new_value == 0 || new_value == 1) { + value = mach_bridge_timer_enable(new_value, 1); + } else { + return EPERM; + } + } else { + value = mach_bridge_timer_enable(0, 0); + } + } + error = SYSCTL_OUT(req, &value, sizeof(value)); + return error; +} + +SYSCTL_PROC(_machdep_remotetime, OID_AUTO, bridge_timer_enable, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_mach_bridge_timer_enable, "I", ""); + +#endif /* CONFIG_MACH_BRIDGE_SEND_TIME */ + +static int sysctl_mach_bridge_remote_time SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + uint64_t ltime = 0, rtime = 0; + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = sizeof(rtime); + return 0; + } + if (req->newptr) { + int error = SYSCTL_IN(req, <ime, sizeof(ltime)); + if (error) { + return error; + } + } + rtime = mach_bridge_remote_time(ltime); + return SYSCTL_OUT(req, &rtime, sizeof(rtime)); +} +SYSCTL_PROC(_machdep_remotetime, OID_AUTO, mach_bridge_remote_time, + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_mach_bridge_remote_time, "Q", ""); #endif /* DEVELOPMENT || DEBUG */ +#if CONFIG_MACH_BRIDGE_RECV_TIME +extern struct bt_params bt_params_get_latest(void); + +static int sysctl_mach_bridge_conversion_params SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + struct bt_params params = {}; + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = sizeof(struct bt_params); + return 0; + } + if (req->newptr) { + return EPERM; + } + params = bt_params_get_latest(); + return SYSCTL_OUT(req, ¶ms, MIN(sizeof(params), req->oldlen)); +} + +SYSCTL_PROC(_machdep_remotetime, OID_AUTO, conversion_params, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, + 0, sysctl_mach_bridge_conversion_params, "S,bt_params", ""); + +#endif /* CONFIG_MACH_BRIDGE_RECV_TIME */ + + + +static int +sysctl_kern_tcsm_available SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + uint32_t value = machine_csv(CPUVN_CI) ? 1 : 0; + + if (req->newptr) { + return EINVAL; + } + + return SYSCTL_OUT(req, &value, sizeof(value)); +} +SYSCTL_PROC(_kern, OID_AUTO, tcsm_available, + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY, + 0, 0, sysctl_kern_tcsm_available, "I", ""); + + +static int +sysctl_kern_tcsm_enable SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + uint32_t soflags = 0; + uint32_t old_value = thread_get_no_smt() ? 1 : 0; + + int error = SYSCTL_IN(req, &soflags, sizeof(soflags)); + if (error) { + return error; + } + + if (soflags && machine_csv(CPUVN_CI)) { + thread_set_no_smt(true); + machine_tecs(current_thread()); + } + + return SYSCTL_OUT(req, &old_value, sizeof(old_value)); +} +SYSCTL_PROC(_kern, OID_AUTO, tcsm_enable, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED | CTLFLAG_ANYBODY, + 0, 0, sysctl_kern_tcsm_enable, "I", ""); + + +#if DEVELOPMENT || DEBUG +extern void sysctl_task_set_no_smt(char no_smt); +extern char sysctl_task_get_no_smt(void); + +static int +sysctl_kern_sched_task_set_no_smt SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, arg1, arg2) + char buff[4]; + + int error = SYSCTL_IN(req, buff, 1); + if (error) { + return error; + } + char no_smt = buff[0]; + + if (!req->newptr) { + goto out; + } + + sysctl_task_set_no_smt(no_smt); +out: + no_smt = sysctl_task_get_no_smt(); + buff[0] = no_smt; + + return SYSCTL_OUT(req, buff, 1); +} + +SYSCTL_PROC(_kern, OID_AUTO, sched_task_set_no_smt, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, + 0, 0, sysctl_kern_sched_task_set_no_smt, "A", ""); + +static int +sysctl_kern_sched_thread_set_no_smt(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + int new_value, changed; + int old_value = thread_get_no_smt() ? 1 : 0; + int error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed); + + if (changed) { + thread_set_no_smt(!!new_value); + } + return error; +} + +SYSCTL_PROC(_kern, OID_AUTO, sched_thread_set_no_smt, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, + 0, 0, sysctl_kern_sched_thread_set_no_smt, "I", ""); +#endif diff --git a/bsd/kern/sys_persona.c b/bsd/kern/sys_persona.c index d00584e97..e2964c118 100644 --- a/bsd/kern/sys_persona.c +++ b/bsd/kern/sys_persona.c @@ -37,18 +37,21 @@ #include -static int kpersona_copyin(user_addr_t infop, struct kpersona_info *kinfo) +static int +kpersona_copyin(user_addr_t infop, struct kpersona_info *kinfo) { uint32_t info_v = 0; int error; error = copyin(infop, &info_v, sizeof(info_v)); - if (error) + if (error) { return error; + } /* only support a single version of the struct for now */ - if (info_v != PERSONA_INFO_V1) + if (info_v != PERSONA_INFO_V1) { return EINVAL; + } error = copyin(infop, kinfo, sizeof(*kinfo)); @@ -58,26 +61,30 @@ static int kpersona_copyin(user_addr_t infop, struct kpersona_info *kinfo) return error; } -static int kpersona_copyout(struct kpersona_info *kinfo, user_addr_t infop) +static int +kpersona_copyout(struct kpersona_info *kinfo, user_addr_t infop) { uint32_t info_v; int error; error = copyin(infop, &info_v, sizeof(info_v)); - if (error) + if (error) { return error; + } /* only support a single version of the struct for now */ /* TODO: in the future compare info_v to kinfo->persona_info_version */ - if (info_v != PERSONA_INFO_V1) + if (info_v != PERSONA_INFO_V1) { return EINVAL; + } error = copyout(kinfo, infop, sizeof(*kinfo)); return error; } -static int kpersona_alloc_syscall(user_addr_t infop, user_addr_t idp) +static int +kpersona_alloc_syscall(user_addr_t infop, user_addr_t idp) { int error; struct kpersona_info kinfo; @@ -89,21 +96,25 @@ static int kpersona_alloc_syscall(user_addr_t infop, user_addr_t idp) * TODO: rdar://problem/19981151 * Add entitlement check! */ - if (!kauth_cred_issuser(kauth_cred_get())) + if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; + } error = kpersona_copyin(infop, &kinfo); - if (error) + if (error) { return error; + } login = kinfo.persona_name[0] ? kinfo.persona_name : NULL; - if (kinfo.persona_id != PERSONA_ID_NONE && kinfo.persona_id != (uid_t)0) + if (kinfo.persona_id != PERSONA_ID_NONE && kinfo.persona_id != (uid_t)0) { id = kinfo.persona_id; + } error = 0; persona = persona_alloc(id, login, kinfo.persona_type, &error); - if (!persona) + if (!persona) { return error; + } error = persona_init_begin(persona); if (error) { @@ -112,20 +123,23 @@ static int kpersona_alloc_syscall(user_addr_t infop, user_addr_t idp) if (kinfo.persona_gid) { error = persona_set_gid(persona, kinfo.persona_gid); - if (error) + if (error) { goto out_persona_err; + } } if (kinfo.persona_ngroups > 0) { /* force gmuid 0 to *opt-out* of memberd */ - if (kinfo.persona_gmuid == 0) + if (kinfo.persona_gmuid == 0) { kinfo.persona_gmuid = KAUTH_UID_NONE; + } error = persona_set_groups(persona, kinfo.persona_groups, - kinfo.persona_ngroups, - kinfo.persona_gmuid); - if (error) + kinfo.persona_ngroups, + kinfo.persona_gmuid); + if (error) { goto out_persona_err; + } } error = copyout(&persona->pna_id, idp, sizeof(persona->pna_id)); @@ -155,29 +169,34 @@ out_persona_err: #if PERSONA_DEBUG printf("%s: ERROR:%d\n", __func__, error); #endif - if (persona) + if (persona) { persona_put(persona); + } return error; } -static int kpersona_dealloc_syscall(user_addr_t idp) +static int +kpersona_dealloc_syscall(user_addr_t idp) { int error = 0; uid_t persona_id; struct persona *persona; - if (!kauth_cred_issuser(kauth_cred_get())) + if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; + } error = copyin(idp, &persona_id, sizeof(persona_id)); - if (error) + if (error) { return error; + } /* invalidate the persona (deny subsequent spawn/fork) */ persona = persona_lookup_and_invalidate(persona_id); - if (!persona) + if (!persona) { return ESRCH; + } /* one reference from the _lookup() */ persona_put(persona); @@ -188,13 +207,15 @@ static int kpersona_dealloc_syscall(user_addr_t idp) return error; } -static int kpersona_get_syscall(user_addr_t idp) +static int +kpersona_get_syscall(user_addr_t idp) { int error; struct persona *persona = current_persona_get(); - if (!persona) + if (!persona) { return ESRCH; + } error = copyout(&persona->pna_id, idp, sizeof(persona->pna_id)); persona_put(persona); @@ -202,7 +223,8 @@ static int kpersona_get_syscall(user_addr_t idp) return error; } -static int kpersona_info_syscall(user_addr_t idp, user_addr_t infop) +static int +kpersona_info_syscall(user_addr_t idp, user_addr_t infop) { int error; uid_t persona_id; @@ -210,8 +232,9 @@ static int kpersona_info_syscall(user_addr_t idp, user_addr_t infop) struct kpersona_info kinfo; error = copyin(idp, &persona_id, sizeof(persona_id)); - if (error) + if (error) { return error; + } /* * TODO: rdar://problem/19981151 @@ -219,12 +242,13 @@ static int kpersona_info_syscall(user_addr_t idp, user_addr_t infop) */ persona = persona_lookup(persona_id); - if (!persona) + if (!persona) { return ESRCH; + } persona_dbg("FOUND: persona: id:%d, gid:%d, login:\"%s\"", - persona->pna_id, persona_get_gid(persona), - persona->pna_login); + persona->pna_id, persona_get_gid(persona), + persona->pna_login); memset(&kinfo, 0, sizeof(kinfo)); kinfo.persona_info_version = PERSONA_INFO_V1; @@ -249,7 +273,8 @@ static int kpersona_info_syscall(user_addr_t idp, user_addr_t infop) return error; } -static int kpersona_pidinfo_syscall(user_addr_t idp, user_addr_t infop) +static int +kpersona_pidinfo_syscall(user_addr_t idp, user_addr_t infop) { int error; pid_t pid; @@ -257,16 +282,19 @@ static int kpersona_pidinfo_syscall(user_addr_t idp, user_addr_t infop) struct kpersona_info kinfo; error = copyin(idp, &pid, sizeof(pid)); - if (error) + if (error) { return error; + } if (!kauth_cred_issuser(kauth_cred_get()) - && (pid != current_proc()->p_pid)) + && (pid != current_proc()->p_pid)) { return EPERM; + } persona = persona_proc_get(pid); - if (!persona) + if (!persona) { return ESRCH; + } memset(&kinfo, 0, sizeof(kinfo)); kinfo.persona_info_version = PERSONA_INFO_V1; @@ -287,7 +315,8 @@ static int kpersona_pidinfo_syscall(user_addr_t idp, user_addr_t infop) return error; } -static int kpersona_find_syscall(user_addr_t infop, user_addr_t idp, user_addr_t idlenp) +static int +kpersona_find_syscall(user_addr_t infop, user_addr_t idp, user_addr_t idlenp) { int error; struct kpersona_info kinfo; @@ -296,21 +325,24 @@ static int kpersona_find_syscall(user_addr_t infop, user_addr_t idp, user_addr_t struct persona **persona = NULL; error = copyin(idlenp, &u_idlen, sizeof(u_idlen)); - if (error) + if (error) { return error; + } - if (u_idlen > g_max_personas) + if (u_idlen > g_max_personas) { u_idlen = g_max_personas; + } error = kpersona_copyin(infop, &kinfo); - if (error) + if (error) { goto out; + } login = kinfo.persona_name[0] ? kinfo.persona_name : NULL; if (u_idlen > 0) { MALLOC(persona, struct persona **, sizeof(*persona) * u_idlen, - M_TEMP, M_WAITOK|M_ZERO); + M_TEMP, M_WAITOK | M_ZERO); if (!persona) { error = ENOMEM; goto out; @@ -319,24 +351,28 @@ static int kpersona_find_syscall(user_addr_t infop, user_addr_t idp, user_addr_t k_idlen = u_idlen; error = persona_find(login, kinfo.persona_id, persona, &k_idlen); - if (error) + if (error) { goto out; + } /* copyout all the IDs of each persona we found */ for (size_t i = 0; i < k_idlen; i++) { - if (i >= u_idlen) + if (i >= u_idlen) { break; + } error = copyout(&persona[i]->pna_id, - idp + (i * sizeof(persona[i]->pna_id)), - sizeof(persona[i]->pna_id)); - if (error) + idp + (i * sizeof(persona[i]->pna_id)), + sizeof(persona[i]->pna_id)); + if (error) { goto out; + } } out: if (persona) { - for (size_t i = 0; i < u_idlen; i++) + for (size_t i = 0; i < u_idlen; i++) { persona_put(persona[i]); + } FREE(persona, M_TEMP); } @@ -349,7 +385,8 @@ out: /* * Syscall entry point / demux. */ -int persona(__unused proc_t p, struct persona_args *pargs, __unused int32_t *retval) +int +persona(__unused proc_t p, struct persona_args *pargs, __unused int32_t *retval) { int error; uint32_t op = pargs->operation; diff --git a/bsd/kern/sys_pipe.c b/bsd/kern/sys_pipe.c index c6725337b..cf0e5f2b0 100644 --- a/bsd/kern/sys_pipe.c +++ b/bsd/kern/sys_pipe.c @@ -20,7 +20,7 @@ * Copyright (c) 2003-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -29,10 +29,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -40,7 +40,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -57,10 +57,10 @@ * do. * * Pipes are implemented as circular buffers. Following are the valid states in pipes operations - * + * * _________________________________ * 1. |_________________________________| r=w, c=0 - * + * * _________________________________ * 2. |__r:::::wc_______________________| r <= w , c > 0 * @@ -75,28 +75,28 @@ * a-z define the steps in a program flow * 1-4 are the states as defined aboe * Action: is what file operation is done on the pipe - * + * * Current:None Action: initialize with size M=200 * a. State 1 ( r=0, w=0, c=0) - * + * * Current: a Action: write(100) (w < M) * b. State 2 (r=0, w=100, c=100) - * + * * Current: b Action: write(100) (w = M-w) * c. State 4 (r=0,w=0,c=200) - * + * * Current: b Action: read(70) ( r < c ) * d. State 2(r=70,w=100,c=30) - * + * * Current: d Action: write(75) ( w < (m-w)) * e. State 2 (r=70,w=175,c=105) - * + * * Current: d Action: write(110) ( w > (m-w)) * f. State 3 (r=70,w=10,c=140) - * + * * Current: d Action: read(30) (r >= c ) * g. State 1 (r=100,w=100,c=0) - * + * */ /* @@ -105,12 +105,12 @@ * dynamically change to larger sizes based on usage. The buffer size is never * reduced. The total amount of kernel memory used is governed by maxpipekva. * In case of dynamic expansion limit is reached, the output thread is blocked - * until the pipe buffer empties enough to continue. + * until the pipe buffer empties enough to continue. * * In order to limit the resource use of pipes, two sysctls exist: * * kern.ipc.maxpipekva - This is a hard limit on the amount of pageable - * address space available to us in pipe_map. + * address space available to us in pipe_map. * * Memory usage may be monitored through the sysctls * kern.ipc.pipes, kern.ipc.pipekva. @@ -160,20 +160,20 @@ #define f_data f_fglob->fg_data /* - * interfaces to the outside world exported through file operations + * interfaces to the outside world exported through file operations */ static int pipe_read(struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); + int flags, vfs_context_t ctx); static int pipe_write(struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); + int flags, vfs_context_t ctx); static int pipe_close(struct fileglob *fg, vfs_context_t ctx); static int pipe_select(struct fileproc *fp, int which, void * wql, - vfs_context_t ctx); + vfs_context_t ctx); static int pipe_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); + struct kevent_internal_s *kev, vfs_context_t ctx); static int pipe_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, - vfs_context_t ctx); -static int pipe_drain(struct fileproc *fp,vfs_context_t ctx); + vfs_context_t ctx); +static int pipe_drain(struct fileproc *fp, vfs_context_t ctx); static const struct fileops pipeops = { .fo_type = DTYPE_PIPE, @@ -197,17 +197,17 @@ static int filt_pipewritetouch(struct knote *kn, struct kevent_internal_s *kev); static int filt_pipewriteprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); SECURITY_READ_ONLY_EARLY(struct filterops) pipe_rfiltops = { - .f_isfd = 1, - .f_detach = filt_pipedetach, - .f_event = filt_piperead, + .f_isfd = 1, + .f_detach = filt_pipedetach, + .f_event = filt_piperead, .f_touch = filt_pipereadtouch, .f_process = filt_pipereadprocess, }; SECURITY_READ_ONLY_EARLY(struct filterops) pipe_wfiltops = { - .f_isfd = 1, - .f_detach = filt_pipedetach, - .f_event = filt_pipewrite, + .f_isfd = 1, + .f_detach = filt_pipedetach, + .f_event = filt_pipewrite, .f_touch = filt_pipewritetouch, .f_process = filt_pipewriteprocess, }; @@ -221,18 +221,18 @@ int maxpipekva __attribute__((used)) = PIPE_KVAMAX; /* allowing 16MB max. */ #if PIPE_SYSCTLS SYSCTL_DECL(_kern_ipc); -SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RD|CTLFLAG_LOCKED, - &maxpipekva, 0, "Pipe KVA limit"); -SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekvawired, CTLFLAG_RW|CTLFLAG_LOCKED, - &maxpipekvawired, 0, "Pipe KVA wired limit"); -SYSCTL_INT(_kern_ipc, OID_AUTO, pipes, CTLFLAG_RD|CTLFLAG_LOCKED, - &amountpipes, 0, "Current # of pipes"); -SYSCTL_INT(_kern_ipc, OID_AUTO, bigpipes, CTLFLAG_RD|CTLFLAG_LOCKED, - &nbigpipe, 0, "Current # of big pipes"); -SYSCTL_INT(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD|CTLFLAG_LOCKED, - &amountpipekva, 0, "Pipe KVA usage"); -SYSCTL_INT(_kern_ipc, OID_AUTO, pipekvawired, CTLFLAG_RD|CTLFLAG_LOCKED, - &amountpipekvawired, 0, "Pipe wired KVA usage"); +SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekva, CTLFLAG_RD | CTLFLAG_LOCKED, + &maxpipekva, 0, "Pipe KVA limit"); +SYSCTL_INT(_kern_ipc, OID_AUTO, maxpipekvawired, CTLFLAG_RW | CTLFLAG_LOCKED, + &maxpipekvawired, 0, "Pipe KVA wired limit"); +SYSCTL_INT(_kern_ipc, OID_AUTO, pipes, CTLFLAG_RD | CTLFLAG_LOCKED, + &amountpipes, 0, "Current # of pipes"); +SYSCTL_INT(_kern_ipc, OID_AUTO, bigpipes, CTLFLAG_RD | CTLFLAG_LOCKED, + &nbigpipe, 0, "Current # of big pipes"); +SYSCTL_INT(_kern_ipc, OID_AUTO, pipekva, CTLFLAG_RD | CTLFLAG_LOCKED, + &amountpipekva, 0, "Pipe KVA usage"); +SYSCTL_INT(_kern_ipc, OID_AUTO, pipekvawired, CTLFLAG_RD | CTLFLAG_LOCKED, + &amountpipekvawired, 0, "Pipe wired KVA usage"); #endif static void pipeclose(struct pipe *cpipe); @@ -248,21 +248,21 @@ static __inline void pipeio_unlock(struct pipe *cpipe); extern int postpipeevent(struct pipe *, int); extern void evpipefree(struct pipe *cpipe); -static lck_grp_t *pipe_mtx_grp; -static lck_attr_t *pipe_mtx_attr; -static lck_grp_attr_t *pipe_mtx_grp_attr; +static lck_grp_t *pipe_mtx_grp; +static lck_attr_t *pipe_mtx_attr; +static lck_grp_attr_t *pipe_mtx_grp_attr; static zone_t pipe_zone; -#define MAX_PIPESIZE(pipe) ( MAX(PIPE_SIZE, (pipe)->pipe_buffer.size) ) +#define MAX_PIPESIZE(pipe) ( MAX(PIPE_SIZE, (pipe)->pipe_buffer.size) ) -#define PIPE_GARBAGE_AGE_LIMIT 5000 /* In milliseconds */ -#define PIPE_GARBAGE_QUEUE_LIMIT 32000 +#define PIPE_GARBAGE_AGE_LIMIT 5000 /* In milliseconds */ +#define PIPE_GARBAGE_QUEUE_LIMIT 32000 struct pipe_garbage { - struct pipe *pg_pipe; - struct pipe_garbage *pg_next; - uint64_t pg_timestamp; + struct pipe *pg_pipe; + struct pipe_garbage *pg_next; + uint64_t pg_timestamp; }; static zone_t pipe_garbage_zone; @@ -279,11 +279,11 @@ SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, pipeinit, NULL); void pipeinit(void) { - nbigpipe=0; + nbigpipe = 0; vm_size_t zone_size; - + zone_size = 8192 * sizeof(struct pipe); - pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone"); + pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone"); /* allocate lock group attribute and group for pipe mutexes */ @@ -298,17 +298,16 @@ pipeinit(void) */ zone_size = (PIPE_GARBAGE_QUEUE_LIMIT + 20) * sizeof(struct pipe_garbage); - pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage), + pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage), zone_size, 4096, "pipe garbage zone"); pipe_garbage_lock = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr); - } -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED /* Bitmap for things to touch in pipe_touch() */ -#define PIPE_ATIME 0x00000001 /* time of last access */ -#define PIPE_MTIME 0x00000002 /* time of last modification */ -#define PIPE_CTIME 0x00000004 /* time of last status change */ +#define PIPE_ATIME 0x00000001 /* time of last access */ +#define PIPE_MTIME 0x00000002 /* time of last modification */ +#define PIPE_CTIME 0x00000004 /* time of last status change */ static void pipe_touch(struct pipe *tpipe, int touch) @@ -334,16 +333,16 @@ pipe_touch(struct pipe *tpipe, int touch) } #endif -static const unsigned int pipesize_blocks[] = {512,1024,2048,4096, 4096 * 2, PIPE_SIZE , PIPE_SIZE * 4 }; +static const unsigned int pipesize_blocks[] = {512, 1024, 2048, 4096, 4096 * 2, PIPE_SIZE, PIPE_SIZE * 4 }; -/* - * finds the right size from possible sizes in pipesize_blocks - * returns the size which matches max(current,expected) +/* + * finds the right size from possible sizes in pipesize_blocks + * returns the size which matches max(current,expected) */ -static int +static int choose_pipespace(unsigned long current, unsigned long expected) { - int i = sizeof(pipesize_blocks)/sizeof(unsigned int) -1; + int i = sizeof(pipesize_blocks) / sizeof(unsigned int) - 1; unsigned long target; /* @@ -352,16 +351,16 @@ choose_pipespace(unsigned long current, unsigned long expected) */ assert(PIPE_BUF == pipesize_blocks[0]); - if (expected > current) + if (expected > current) { target = expected; - else + } else { target = current; + } - while ( i >0 && pipesize_blocks[i-1] > target) { - i=i-1; - + while (i > 0 && pipesize_blocks[i - 1] > target) { + i = i - 1; } - + return pipesize_blocks[i]; } @@ -373,27 +372,28 @@ choose_pipespace(unsigned long current, unsigned long expected) * Required: PIPE_LOCK and io lock to be held by caller. * returns 0 on success or no expansion possible */ -static int +static int expand_pipespace(struct pipe *p, int target_size) { struct pipe tmp, oldpipe; int error; tmp.pipe_buffer.buffer = 0; - + if (p->pipe_buffer.size >= (unsigned) target_size) { return 0; /* the existing buffer is max size possible */ } - + /* create enough space in the target */ error = pipespace(&tmp, target_size); - if (error != 0) - return (error); + if (error != 0) { + return error; + } oldpipe.pipe_buffer.buffer = p->pipe_buffer.buffer; oldpipe.pipe_buffer.size = p->pipe_buffer.size; - + memcpy(tmp.pipe_buffer.buffer, p->pipe_buffer.buffer, p->pipe_buffer.size); - if (p->pipe_buffer.cnt > 0 && p->pipe_buffer.in <= p->pipe_buffer.out ){ + if (p->pipe_buffer.cnt > 0 && p->pipe_buffer.in <= p->pipe_buffer.out) { /* we are in State 3 and need extra copying for read to be consistent */ memcpy(&tmp.pipe_buffer.buffer[p->pipe_buffer.size], p->pipe_buffer.buffer, p->pipe_buffer.size); p->pipe_buffer.in += p->pipe_buffer.size; @@ -409,11 +409,11 @@ expand_pipespace(struct pipe *p, int target_size) /* * The pipe system call for the DTYPE_PIPE type of pipes - * + * * returns: - * FREAD | fd0 | -->[struct rpipe] --> |~~buffer~~| \ + * FREAD | fd0 | -->[struct rpipe] --> |~~buffer~~| \ * (pipe_mutex) - * FWRITE | fd1 | -->[struct wpipe] --X / + * FWRITE | fd1 | -->[struct wpipe] --X / */ /* ARGSUSED */ @@ -425,34 +425,36 @@ pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval) lck_mtx_t *pmtx; int fd, error; - if ((pmtx = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr)) == NULL) - return (ENOMEM); - + if ((pmtx = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr)) == NULL) { + return ENOMEM; + } + rpipe = wpipe = NULL; if (pipe_create(&rpipe) || pipe_create(&wpipe)) { - error = ENFILE; + error = ENFILE; goto freepipes; } - /* + /* * allocate the space for the normal I/O direction up * front... we'll delay the allocation for the other * direction until a write actually occurs (most likely it won't)... - */ + */ error = pipespace(rpipe, choose_pipespace(rpipe->pipe_buffer.size, 0)); - if (error) - goto freepipes; + if (error) { + goto freepipes; + } TAILQ_INIT(&rpipe->pipe_evlist); TAILQ_INIT(&wpipe->pipe_evlist); error = falloc(p, &rf, &fd, vfs_context_current()); if (error) { - goto freepipes; + goto freepipes; } retval[0] = fd; /* - * for now we'll create half-duplex pipes(refer returns section above). + * for now we'll create half-duplex pipes(refer returns section above). * this is what we've always supported.. */ rf->f_flag = FREAD; @@ -462,7 +464,7 @@ pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval) error = falloc(p, &wf, &fd, vfs_context_current()); if (error) { fp_free(p, retval[0], rf); - goto freepipes; + goto freepipes; } wf->f_flag = FWRITE; wf->f_data = (caddr_t)wpipe; @@ -471,7 +473,7 @@ pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval) rpipe->pipe_peer = wpipe; wpipe->pipe_peer = rpipe; /* both structures share the same mutex */ - rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; + rpipe->pipe_mtxp = wpipe->pipe_mtxp = pmtx; retval[1] = fd; #if CONFIG_MACF @@ -495,73 +497,75 @@ pipe(proc_t p, __unused struct pipe_args *uap, int32_t *retval) proc_fdunlock(p); - return (0); + return 0; freepipes: - pipeclose(rpipe); - pipeclose(wpipe); + pipeclose(rpipe); + pipeclose(wpipe); lck_mtx_free(pmtx, pipe_mtx_grp); - return (error); + return error; } int pipe_stat(struct pipe *cpipe, void *ub, int isstat64) { #if CONFIG_MACF - int error; + int error; #endif - int pipe_size = 0; - int pipe_count; - struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ + int pipe_size = 0; + int pipe_count; + struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */ - if (cpipe == NULL) - return (EBADF); + if (cpipe == NULL) { + return EBADF; + } PIPE_LOCK(cpipe); #if CONFIG_MACF error = mac_pipe_check_stat(kauth_cred_get(), cpipe); if (error) { PIPE_UNLOCK(cpipe); - return (error); + return error; } #endif if (cpipe->pipe_buffer.buffer == 0) { - /* must be stat'ing the write fd */ - if (cpipe->pipe_peer) { - /* the peer still exists, use it's info */ - pipe_size = MAX_PIPESIZE(cpipe->pipe_peer); + /* must be stat'ing the write fd */ + if (cpipe->pipe_peer) { + /* the peer still exists, use it's info */ + pipe_size = MAX_PIPESIZE(cpipe->pipe_peer); pipe_count = cpipe->pipe_peer->pipe_buffer.cnt; } else { pipe_count = 0; } } else { - pipe_size = MAX_PIPESIZE(cpipe); + pipe_size = MAX_PIPESIZE(cpipe); pipe_count = cpipe->pipe_buffer.cnt; } /* * since peer's buffer is setup ouside of lock * we might catch it in transient state */ - if (pipe_size == 0) + if (pipe_size == 0) { pipe_size = MAX(PIPE_SIZE, pipesize_blocks[0]); + } if (isstat64 != 0) { - sb64 = (struct stat64 *)ub; + sb64 = (struct stat64 *)ub; bzero(sb64, sizeof(*sb64)); sb64->st_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; sb64->st_blksize = pipe_size; sb64->st_size = pipe_count; sb64->st_blocks = (sb64->st_size + sb64->st_blksize - 1) / sb64->st_blksize; - + sb64->st_uid = kauth_getuid(); sb64->st_gid = kauth_getgid(); - + sb64->st_atimespec.tv_sec = cpipe->st_atimespec.tv_sec; sb64->st_atimespec.tv_nsec = cpipe->st_atimespec.tv_nsec; - + sb64->st_mtimespec.tv_sec = cpipe->st_mtimespec.tv_sec; sb64->st_mtimespec.tv_nsec = cpipe->st_mtimespec.tv_nsec; @@ -569,26 +573,26 @@ pipe_stat(struct pipe *cpipe, void *ub, int isstat64) sb64->st_ctimespec.tv_nsec = cpipe->st_ctimespec.tv_nsec; /* - * Return a relatively unique inode number based on the current - * address of this pipe's struct pipe. This number may be recycled - * relatively quickly. - */ + * Return a relatively unique inode number based on the current + * address of this pipe's struct pipe. This number may be recycled + * relatively quickly. + */ sb64->st_ino = (ino64_t)VM_KERNEL_ADDRPERM((uintptr_t)cpipe); } else { - sb = (struct stat *)ub; + sb = (struct stat *)ub; bzero(sb, sizeof(*sb)); sb->st_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; sb->st_blksize = pipe_size; sb->st_size = pipe_count; sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize; - + sb->st_uid = kauth_getuid(); sb->st_gid = kauth_getgid(); - + sb->st_atimespec.tv_sec = cpipe->st_atimespec.tv_sec; sb->st_atimespec.tv_nsec = cpipe->st_atimespec.tv_nsec; - + sb->st_mtimespec.tv_sec = cpipe->st_mtimespec.tv_sec; sb->st_mtimespec.tv_nsec = cpipe->st_mtimespec.tv_nsec; @@ -596,10 +600,10 @@ pipe_stat(struct pipe *cpipe, void *ub, int isstat64) sb->st_ctimespec.tv_nsec = cpipe->st_ctimespec.tv_nsec; /* - * Return a relatively unique inode number based on the current - * address of this pipe's struct pipe. This number may be recycled - * relatively quickly. - */ + * Return a relatively unique inode number based on the current + * address of this pipe's struct pipe. This number may be recycled + * relatively quickly. + */ sb->st_ino = (ino_t)VM_KERNEL_ADDRPERM((uintptr_t)cpipe); } PIPE_UNLOCK(cpipe); @@ -612,7 +616,7 @@ pipe_stat(struct pipe *cpipe, void *ub, int isstat64) * XXX is associated with pipes, since they are implemented via a * XXX struct fileops indirection rather than as FS objects. */ - return (0); + return 0; } @@ -627,11 +631,13 @@ pipespace(struct pipe *cpipe, int size) { vm_offset_t buffer; - if (size <= 0) - return(EINVAL); + if (size <= 0) { + return EINVAL; + } - if ((buffer = (vm_offset_t)kalloc(size)) == 0 ) - return(ENOMEM); + if ((buffer = (vm_offset_t)kalloc(size)) == 0) { + return ENOMEM; + } /* free old resources if we're resizing */ pipe_free_kmem(cpipe); @@ -644,7 +650,7 @@ pipespace(struct pipe *cpipe, int size) OSAddAtomic(1, &amountpipes); OSAddAtomic(cpipe->pipe_buffer.size, &amountpipekva); - return (0); + return 0; } /* @@ -656,8 +662,9 @@ pipe_create(struct pipe **cpipep) struct pipe *cpipe; cpipe = (struct pipe *)zalloc(pipe_zone); - if ((*cpipep = cpipe) == NULL) - return (ENOMEM); + if ((*cpipep = cpipe) == NULL) { + return ENOMEM; + } /* * protect so pipespace or pipeclose don't follow a junk pointer @@ -665,11 +672,11 @@ pipe_create(struct pipe **cpipep) */ bzero(cpipe, sizeof *cpipe); -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED /* Initial times are all the time of creation of the pipe */ pipe_touch(cpipe, PIPE_ATIME | PIPE_MTIME | PIPE_CTIME); #endif - return (0); + return 0; } @@ -683,12 +690,13 @@ pipeio_lock(struct pipe *cpipe, int catch) while (cpipe->pipe_state & PIPE_LOCKFL) { cpipe->pipe_state |= PIPE_LWANT; error = msleep(cpipe, PIPE_MTX(cpipe), catch ? (PRIBIO | PCATCH) : PRIBIO, - "pipelk", 0); - if (error != 0) - return (error); + "pipelk", 0); + if (error != 0) { + return error; + } } cpipe->pipe_state |= PIPE_LOCKFL; - return (0); + return 0; } /* @@ -714,17 +722,19 @@ pipeselwakeup(struct pipe *cpipe, struct pipe *spipe) cpipe->pipe_state &= ~PIPE_SEL; selwakeup(&cpipe->pipe_sel); } - if (cpipe->pipe_state & PIPE_KNOTE) - KNOTE(&cpipe->pipe_sel.si_note, 1); + if (cpipe->pipe_state & PIPE_KNOTE) { + KNOTE(&cpipe->pipe_sel.si_note, 1); + } postpipeevent(cpipe, EV_RWBYTES); if (spipe && (spipe->pipe_state & PIPE_ASYNC) && spipe->pipe_pgid) { - if (spipe->pipe_pgid < 0) - gsignal(-spipe->pipe_pgid, SIGIO); - else - proc_signal(spipe->pipe_pgid, SIGIO); - } + if (spipe->pipe_pgid < 0) { + gsignal(-spipe->pipe_pgid, SIGIO); + } else { + proc_signal(spipe->pipe_pgid, SIGIO); + } + } } /* @@ -734,7 +744,7 @@ pipeselwakeup(struct pipe *cpipe, struct pipe *spipe) /* ARGSUSED */ static int pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags, - __unused vfs_context_t ctx) + __unused vfs_context_t ctx) { struct pipe *rpipe = (struct pipe *)fp->f_data; int error; @@ -745,13 +755,15 @@ pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags, ++rpipe->pipe_busy; error = pipeio_lock(rpipe, 1); - if (error) + if (error) { goto unlocked_error; + } #if CONFIG_MACF error = mac_pipe_check_read(kauth_cred_get(), rpipe); - if (error) + if (error) { goto locked_error; + } #endif @@ -762,30 +774,34 @@ pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags, if (rpipe->pipe_buffer.cnt > 0) { /* * # bytes to read is min( bytes from read pointer until end of buffer, - * total unread bytes, + * total unread bytes, * user requested byte count) */ size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; - if (size > rpipe->pipe_buffer.cnt) + if (size > rpipe->pipe_buffer.cnt) { size = rpipe->pipe_buffer.cnt; + } // LP64todo - fix this! - if (size > (u_int) uio_resid(uio)) + if (size > (u_int) uio_resid(uio)) { size = (u_int) uio_resid(uio); + } PIPE_UNLOCK(rpipe); /* we still hold io lock.*/ error = uiomove( - &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], - size, uio); + &rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], + size, uio); PIPE_LOCK(rpipe); - if (error) + if (error) { break; + } rpipe->pipe_buffer.out += size; - if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) + if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) { rpipe->pipe_buffer.out = 0; + } rpipe->pipe_buffer.cnt -= size; - + /* * If there is no more to read in the pipe, reset * its pointers to the beginning. This improves @@ -816,11 +832,12 @@ pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags, /* * Break if some data was read in previous iteration. */ - if (nread > 0) + if (nread > 0) { break; + } /* - * Unlock the pipe buffer for our remaining processing. + * Unlock the pipe buffer for our remaining processing. * We will either break out with an error or we will * sleep and relock to loop. */ @@ -835,11 +852,13 @@ pipe_read(struct fileproc *fp, struct uio *uio, __unused int flags, } else { rpipe->pipe_state |= PIPE_WANTR; error = msleep(rpipe, PIPE_MTX(rpipe), PRIBIO | PCATCH, "piperd", 0); - if (error == 0) - error = pipeio_lock(rpipe, 1); + if (error == 0) { + error = pipeio_lock(rpipe, 1); + } } - if (error) + if (error) { goto unlocked_error; + } } } #if CONFIG_MACF @@ -854,7 +873,7 @@ unlocked_error: * PIPE_WANT processing only makes sense if pipe_busy is 0. */ if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { - rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); + rpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTW); wakeup(rpipe); } else if (rpipe->pipe_buffer.cnt < rpipe->pipe_buffer.size) { /* @@ -866,26 +885,27 @@ unlocked_error: } } - if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) > 0) + if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) > 0) { pipeselwakeup(rpipe, rpipe->pipe_peer); + } -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED /* update last read time */ pipe_touch(rpipe, PIPE_ATIME); #endif PIPE_UNLOCK(rpipe); - return (error); + return error; } /* - * perform a write of n bytes into the read side of buffer. Since + * perform a write of n bytes into the read side of buffer. Since * pipes are unidirectional a write is meant to be read by the otherside only. */ static int pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, - __unused vfs_context_t ctx) + __unused vfs_context_t ctx) { int error = 0; int orig_resid; @@ -905,13 +925,13 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, */ if (wpipe == NULL || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF))) { PIPE_UNLOCK(rpipe); - return (EPIPE); + return EPIPE; } #if CONFIG_MACF error = mac_pipe_check_write(kauth_cred_get(), wpipe); if (error) { PIPE_UNLOCK(rpipe); - return (error); + return error; } #endif ++wpipe->pipe_busy; @@ -925,71 +945,71 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, * fd[1], so allocating space for both ends is a waste... */ - if ( wpipe->pipe_buffer.buffer == 0 || ( - (unsigned)orig_resid > wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt && - amountpipekva < maxpipekva ) ) { - - pipe_size = choose_pipespace(wpipe->pipe_buffer.size, wpipe->pipe_buffer.cnt + orig_resid); + if (wpipe->pipe_buffer.buffer == 0 || ( + (unsigned)orig_resid > wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt && + amountpipekva < maxpipekva)) { + pipe_size = choose_pipespace(wpipe->pipe_buffer.size, wpipe->pipe_buffer.cnt + orig_resid); } if (pipe_size) { - /* + /* * need to do initial allocation or resizing of pipe - * holding both structure and io locks. + * holding both structure and io locks. */ if ((error = pipeio_lock(wpipe, 1)) == 0) { - if (wpipe->pipe_buffer.cnt == 0) + if (wpipe->pipe_buffer.cnt == 0) { error = pipespace(wpipe, pipe_size); - else + } else { error = expand_pipespace(wpipe, pipe_size); - + } + pipeio_unlock(wpipe); - + /* allocation failed */ - if (wpipe->pipe_buffer.buffer == 0) - error = ENOMEM; + if (wpipe->pipe_buffer.buffer == 0) { + error = ENOMEM; + } } if (error) { - /* + /* * If an error occurred unbusy and return, waking up any pending * readers. */ - --wpipe->pipe_busy; - if ((wpipe->pipe_busy == 0) && + --wpipe->pipe_busy; + if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { - wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); + wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); wakeup(wpipe); } PIPE_UNLOCK(rpipe); - return(error); + return error; } } while (uio_resid(uio)) { - - retrywrite: +retrywrite: space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; /* Writes of size <= PIPE_BUF must be atomic. */ - if ((space < uio_resid(uio)) && (orig_resid <= PIPE_BUF)) + if ((space < uio_resid(uio)) && (orig_resid <= PIPE_BUF)) { space = 0; + } if (space > 0) { - - if ((error = pipeio_lock(wpipe,1)) == 0) { - int size; /* Transfer size */ - int segsize; /* first segment to transfer */ + if ((error = pipeio_lock(wpipe, 1)) == 0) { + int size; /* Transfer size */ + int segsize; /* first segment to transfer */ if (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) { pipeio_unlock(wpipe); - error = EPIPE; + error = EPIPE; break; } - /* + /* * If a process blocked in pipeio_lock, our * value for space might be bad... the mutex * is dropped while we're blocked */ - if (space > (int)(wpipe->pipe_buffer.size - + if (space > (int)(wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt)) { pipeio_unlock(wpipe); goto retrywrite; @@ -1000,47 +1020,50 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, * and free space in pipe buffer. */ // LP64todo - fix this! - if (space > uio_resid(uio)) + if (space > uio_resid(uio)) { size = uio_resid(uio); - else + } else { size = space; + } /* - * First segment to transfer is minimum of + * First segment to transfer is minimum of * transfer size and contiguous space in * pipe buffer. If first segment to transfer * is less than the transfer size, we've got * a wraparound in the buffer. */ - segsize = wpipe->pipe_buffer.size - - wpipe->pipe_buffer.in; - if (segsize > size) + segsize = wpipe->pipe_buffer.size - + wpipe->pipe_buffer.in; + if (segsize > size) { segsize = size; - + } + /* Transfer first segment */ PIPE_UNLOCK(rpipe); - error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], - segsize, uio); + error = uiomove(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], + segsize, uio); PIPE_LOCK(rpipe); - + if (error == 0 && segsize < size) { - /* + /* * Transfer remaining part now, to * support atomic writes. Wraparound * happened. (State 3) */ - if (wpipe->pipe_buffer.in + segsize != - wpipe->pipe_buffer.size) + if (wpipe->pipe_buffer.in + segsize != + wpipe->pipe_buffer.size) { panic("Expected pipe buffer " "wraparound disappeared"); - + } + PIPE_UNLOCK(rpipe); error = uiomove( - &wpipe->pipe_buffer.buffer[0], - size - segsize, uio); + &wpipe->pipe_buffer.buffer[0], + size - segsize, uio); PIPE_LOCK(rpipe); } - /* + /* * readers never know to read until count is updated. */ if (error == 0) { @@ -1049,24 +1072,25 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, wpipe->pipe_buffer.size) { if (wpipe->pipe_buffer.in != size - segsize + - wpipe->pipe_buffer.size) + wpipe->pipe_buffer.size) { panic("Expected " "wraparound bad"); + } wpipe->pipe_buffer.in = size - segsize; } - + wpipe->pipe_buffer.cnt += size; if (wpipe->pipe_buffer.cnt > - wpipe->pipe_buffer.size) + wpipe->pipe_buffer.size) { panic("Pipe buffer overflow"); - + } } pipeio_unlock(wpipe); } - if (error) + if (error) { break; - + } } else { /* * If the "read-side" has been blocked, wake it up now. @@ -1091,7 +1115,7 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, if (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) { error = EPIPE; break; - } + } /* * We have no more space and have something to offer, @@ -1103,8 +1127,9 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, error = msleep(wpipe, PIPE_MTX(wpipe), PRIBIO | PCATCH, "pipewr", 0); - if (error != 0) + if (error != 0) { break; + } } } --wpipe->pipe_busy; @@ -1128,14 +1153,14 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, pipeselwakeup(wpipe, wpipe); } -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED /* Update modification, status change (# of bytes in pipe) times */ pipe_touch(rpipe, PIPE_MTIME | PIPE_CTIME); pipe_touch(wpipe, PIPE_MTIME | PIPE_CTIME); #endif PIPE_UNLOCK(rpipe); - return (error); + return error; } /* @@ -1144,7 +1169,7 @@ pipe_write(struct fileproc *fp, struct uio *uio, __unused int flags, /* ARGSUSED 3 */ static int pipe_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, - __unused vfs_context_t ctx) + __unused vfs_context_t ctx) { struct pipe *mpipe = (struct pipe *)fp->f_data; #if CONFIG_MACF @@ -1158,15 +1183,14 @@ pipe_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, if (error) { PIPE_UNLOCK(mpipe); - return (error); + return error; } #endif switch (cmd) { - case FIONBIO: PIPE_UNLOCK(mpipe); - return (0); + return 0; case FIOASYNC: if (*(int *)data) { @@ -1175,28 +1199,27 @@ pipe_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, mpipe->pipe_state &= ~PIPE_ASYNC; } PIPE_UNLOCK(mpipe); - return (0); + return 0; case FIONREAD: *(int *)data = mpipe->pipe_buffer.cnt; PIPE_UNLOCK(mpipe); - return (0); + return 0; case TIOCSPGRP: mpipe->pipe_pgid = *(int *)data; PIPE_UNLOCK(mpipe); - return (0); + return 0; case TIOCGPGRP: *(int *)data = mpipe->pipe_pgid; PIPE_UNLOCK(mpipe); - return (0); - + return 0; } PIPE_UNLOCK(mpipe); - return (ENOTTY); + return ENOTTY; } @@ -1207,13 +1230,14 @@ pipe_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) struct pipe *wpipe; int retnum = 0; - if (rpipe == NULL || rpipe == (struct pipe *)-1) - return (retnum); + if (rpipe == NULL || rpipe == (struct pipe *)-1) { + return retnum; + } PIPE_LOCK(rpipe); wpipe = rpipe->pipe_peer; - + #if CONFIG_MACF /* @@ -1223,44 +1247,42 @@ pipe_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) */ if (mac_pipe_check_select(vfs_context_ucred(ctx), rpipe, which)) { PIPE_UNLOCK(rpipe); - return (0); + return 0; } #endif - switch (which) { - - case FREAD: + switch (which) { + case FREAD: if ((rpipe->pipe_state & PIPE_DIRECTW) || (rpipe->pipe_buffer.cnt > 0) || (rpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF))) { - - retnum = 1; + retnum = 1; } else { - rpipe->pipe_state |= PIPE_SEL; - selrecord(vfs_context_proc(ctx), &rpipe->pipe_sel, wql); + rpipe->pipe_state |= PIPE_SEL; + selrecord(vfs_context_proc(ctx), &rpipe->pipe_sel, wql); } break; - case FWRITE: - if (wpipe) + case FWRITE: + if (wpipe) { wpipe->pipe_state |= PIPE_WSELECT; + } if (wpipe == NULL || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF)) || (((wpipe->pipe_state & PIPE_DIRECTW) == 0) && - (MAX_PIPESIZE(wpipe) - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) { - - retnum = 1; + (MAX_PIPESIZE(wpipe) - wpipe->pipe_buffer.cnt) >= PIPE_BUF)) { + retnum = 1; } else { - wpipe->pipe_state |= PIPE_SEL; + wpipe->pipe_state |= PIPE_SEL; selrecord(vfs_context_proc(ctx), &wpipe->pipe_sel, wql); } break; - case 0: - rpipe->pipe_state |= PIPE_SEL; + case 0: + rpipe->pipe_state |= PIPE_SEL; selrecord(vfs_context_proc(ctx), &rpipe->pipe_sel, wql); break; - } + } PIPE_UNLOCK(rpipe); - return (retnum); + return retnum; } @@ -1268,16 +1290,17 @@ pipe_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) static int pipe_close(struct fileglob *fg, __unused vfs_context_t ctx) { - struct pipe *cpipe; + struct pipe *cpipe; proc_fdlock_spin(vfs_context_proc(ctx)); cpipe = (struct pipe *)fg->fg_data; fg->fg_data = NULL; proc_fdunlock(vfs_context_proc(ctx)); - if (cpipe) - pipeclose(cpipe); + if (cpipe) { + pipeclose(cpipe); + } - return (0); + return 0; } static void @@ -1286,8 +1309,8 @@ pipe_free_kmem(struct pipe *cpipe) if (cpipe->pipe_buffer.buffer != NULL) { OSAddAtomic(-(cpipe->pipe_buffer.size), &amountpipekva); OSAddAtomic(-1, &amountpipes); - kfree((void *)cpipe->pipe_buffer.buffer, - cpipe->pipe_buffer.size); + kfree(cpipe->pipe_buffer.buffer, + cpipe->pipe_buffer.size); cpipe->pipe_buffer.buffer = NULL; cpipe->pipe_buffer.size = 0; } @@ -1301,12 +1324,14 @@ pipeclose(struct pipe *cpipe) { struct pipe *ppipe; - if (cpipe == NULL) + if (cpipe == NULL) { return; + } /* partially created pipes won't have a valid mutex. */ - if (PIPE_MTX(cpipe) != NULL) + if (PIPE_MTX(cpipe) != NULL) { PIPE_LOCK(cpipe); - + } + /* * If the other side is blocked, wake it up saying that @@ -1315,35 +1340,36 @@ pipeclose(struct pipe *cpipe) cpipe->pipe_state &= ~PIPE_DRAIN; cpipe->pipe_state |= PIPE_EOF; pipeselwakeup(cpipe, cpipe); - + while (cpipe->pipe_busy) { cpipe->pipe_state |= PIPE_WANT; wakeup(cpipe); - msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); + msleep(cpipe, PIPE_MTX(cpipe), PRIBIO, "pipecl", 0); } #if CONFIG_MACF /* * Free the shared pipe label only after the two ends are disconnected. */ - if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL) + if (cpipe->pipe_label != NULL && cpipe->pipe_peer == NULL) { mac_pipe_label_destroy(cpipe); + } #endif /* * Disconnect from peer */ if ((ppipe = cpipe->pipe_peer) != NULL) { - ppipe->pipe_state &= ~(PIPE_DRAIN); ppipe->pipe_state |= PIPE_EOF; pipeselwakeup(ppipe, ppipe); wakeup(ppipe); - if (cpipe->pipe_state & PIPE_KNOTE) - KNOTE(&ppipe->pipe_sel.si_note, 1); + if (cpipe->pipe_state & PIPE_KNOTE) { + KNOTE(&ppipe->pipe_sel.si_note, 1); + } postpipeevent(ppipe, EV_RCLOSED); @@ -1377,7 +1403,6 @@ pipeclose(struct pipe *cpipe) zfree(pipe_zone, cpipe); pipe_garbage_collect(NULL); } - } /*ARGSUSED*/ @@ -1401,14 +1426,15 @@ filt_piperead_common(struct knote *kn, struct pipe *rpipe) } else { int64_t lowwat = 1; if (kn->kn_sfflags & NOTE_LOWAT) { - if (rpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(rpipe)) + if (rpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(rpipe)) { lowwat = MAX_PIPESIZE(rpipe); - else if (kn->kn_sdata > lowwat) + } else if (kn->kn_sdata > lowwat) { lowwat = kn->kn_sdata; + } } retval = kn->kn_data >= lowwat; } - return (retval); + return retval; } static int @@ -1458,7 +1484,7 @@ filt_pipereadprocess(struct knote *kn, struct filt_process_s *data, struct keven } PIPE_UNLOCK(rpipe); - return (retval); + return retval; } /*ARGSUSED*/ @@ -1475,20 +1501,21 @@ filt_pipewrite_common(struct knote *kn, struct pipe *rpipe) if ((wpipe == NULL) || (wpipe->pipe_state & (PIPE_DRAIN | PIPE_EOF))) { kn->kn_data = 0; - kn->kn_flags |= EV_EOF; - return (1); + kn->kn_flags |= EV_EOF; + return 1; } kn->kn_data = MAX_PIPESIZE(wpipe) - wpipe->pipe_buffer.cnt; int64_t lowwat = PIPE_BUF; if (kn->kn_sfflags & NOTE_LOWAT) { - if (wpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(wpipe)) + if (wpipe->pipe_buffer.size && kn->kn_sdata > MAX_PIPESIZE(wpipe)) { lowwat = MAX_PIPESIZE(wpipe); - else if (kn->kn_sdata > lowwat) + } else if (kn->kn_sdata > lowwat) { lowwat = kn->kn_sdata; + } } - return (kn->kn_data >= lowwat); + return kn->kn_data >= lowwat; } /*ARGSUSED*/ @@ -1546,7 +1573,7 @@ filt_pipewriteprocess(struct knote *kn, struct filt_process_s *data, struct keve /*ARGSUSED*/ static int pipe_kqfilter(__unused struct fileproc *fp, struct knote *kn, - __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) + __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) { struct pipe *cpipe = (struct pipe *)kn->kn_fp->f_data; int res; @@ -1581,26 +1608,28 @@ pipe_kqfilter(__unused struct fileproc *fp, struct knote *kn, /* * other end of pipe has been closed */ - PIPE_UNLOCK(cpipe); + PIPE_UNLOCK(cpipe); kn->kn_flags = EV_ERROR; kn->kn_data = EPIPE; return 0; } - if (cpipe->pipe_peer) - cpipe = cpipe->pipe_peer; + if (cpipe->pipe_peer) { + cpipe = cpipe->pipe_peer; + } /* determine inital state */ res = filt_pipewrite_common(kn, cpipe); break; default: - PIPE_UNLOCK(cpipe); + PIPE_UNLOCK(cpipe); kn->kn_flags = EV_ERROR; kn->kn_data = EINVAL; return 0; } - if (KNOTE_ATTACH(&cpipe->pipe_sel.si_note, kn)) - cpipe->pipe_state |= PIPE_KNOTE; + if (KNOTE_ATTACH(&cpipe->pipe_sel.si_note, kn)) { + cpipe->pipe_state |= PIPE_KNOTE; + } PIPE_UNLOCK(cpipe); return res; @@ -1614,15 +1643,16 @@ filt_pipedetach(struct knote *kn) PIPE_LOCK(cpipe); if (kn->kn_filter == EVFILT_WRITE) { - if (cpipe->pipe_peer == NULL) { - PIPE_UNLOCK(cpipe); + if (cpipe->pipe_peer == NULL) { + PIPE_UNLOCK(cpipe); return; } cpipe = cpipe->pipe_peer; } if (cpipe->pipe_state & PIPE_KNOTE) { - if (KNOTE_DETACH(&cpipe->pipe_sel.si_note, kn)) - cpipe->pipe_state &= ~PIPE_KNOTE; + if (KNOTE_DETACH(&cpipe->pipe_sel.si_note, kn)) { + cpipe->pipe_state &= ~PIPE_KNOTE; + } } PIPE_UNLOCK(cpipe); } @@ -1631,47 +1661,49 @@ int fill_pipeinfo(struct pipe * cpipe, struct pipe_info * pinfo) { #if CONFIG_MACF - int error; + int error; #endif struct timespec now; struct vinfo_stat * ub; int pipe_size = 0; int pipe_count; - if (cpipe == NULL) - return (EBADF); + if (cpipe == NULL) { + return EBADF; + } PIPE_LOCK(cpipe); #if CONFIG_MACF error = mac_pipe_check_stat(kauth_cred_get(), cpipe); if (error) { PIPE_UNLOCK(cpipe); - return (error); + return error; } #endif if (cpipe->pipe_buffer.buffer == 0) { - /* + /* * must be stat'ing the write fd */ - if (cpipe->pipe_peer) { - /* + if (cpipe->pipe_peer) { + /* * the peer still exists, use it's info */ - pipe_size = MAX_PIPESIZE(cpipe->pipe_peer); + pipe_size = MAX_PIPESIZE(cpipe->pipe_peer); pipe_count = cpipe->pipe_peer->pipe_buffer.cnt; } else { pipe_count = 0; } } else { - pipe_size = MAX_PIPESIZE(cpipe); + pipe_size = MAX_PIPESIZE(cpipe); pipe_count = cpipe->pipe_buffer.cnt; } /* * since peer's buffer is setup ouside of lock * we might catch it in transient state */ - if (pipe_size == 0) + if (pipe_size == 0) { pipe_size = PIPE_SIZE; + } ub = &pinfo->pipe_stat; @@ -1679,8 +1711,9 @@ fill_pipeinfo(struct pipe * cpipe, struct pipe_info * pinfo) ub->vst_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; ub->vst_blksize = pipe_size; ub->vst_size = pipe_count; - if (ub->vst_blksize != 0) + if (ub->vst_blksize != 0) { ub->vst_blocks = (ub->vst_size + ub->vst_blksize - 1) / ub->vst_blksize; + } ub->vst_nlink = 1; ub->vst_uid = kauth_getuid(); @@ -1707,30 +1740,29 @@ fill_pipeinfo(struct pipe * cpipe, struct pipe_info * pinfo) PIPE_UNLOCK(cpipe); - return (0); + return 0; } -static int +static int pipe_drain(struct fileproc *fp, __unused vfs_context_t ctx) { - /* Note: fdlock already held */ struct pipe *ppipe, *cpipe = (struct pipe *)(fp->f_fglob->fg_data); if (cpipe) { PIPE_LOCK(cpipe); - cpipe->pipe_state |= PIPE_DRAIN; + cpipe->pipe_state |= PIPE_DRAIN; cpipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); wakeup(cpipe); - + /* Must wake up peer: a writer sleeps on the read side */ if ((ppipe = cpipe->pipe_peer)) { ppipe->pipe_state |= PIPE_DRAIN; ppipe->pipe_state &= ~(PIPE_WANTR | PIPE_WANTW); wakeup(ppipe); } - + PIPE_UNLOCK(cpipe); return 0; } @@ -1739,7 +1771,7 @@ pipe_drain(struct fileproc *fp, __unused vfs_context_t ctx) } - /* +/* * When a thread sets a write-select on a pipe, it creates an implicit, * untracked dependency between that thread and the peer of the pipe * on which the select is set. If the peer pipe is closed and freed @@ -1748,7 +1780,7 @@ pipe_drain(struct fileproc *fp, __unused vfs_context_t ctx) * we notice whenever a dangerous select() is set on a pipe, and * defer the final deletion of the pipe until that select()s are all * resolved. Since we can't currently detect exactly when that - * resolution happens, we use a simple garbage collection queue to + * resolution happens, we use a simple garbage collection queue to * reap the at-risk pipes 'later'. */ static void @@ -1768,8 +1800,9 @@ pipe_garbage_collect(struct pipe *cpipe) old = now - old; while ((pgp = pipe_garbage_head) && pgp->pg_timestamp < old) { pipe_garbage_head = pgp->pg_next; - if (pipe_garbage_head == NULL) + if (pipe_garbage_head == NULL) { pipe_garbage_tail = NULL; + } pipe_garbage_count--; zfree(pipe_zone, pgp->pg_pipe); zfree(pipe_garbage_zone, pgp); @@ -1798,16 +1831,18 @@ pipe_garbage_collect(struct pipe *cpipe) pgp->pg_timestamp = now; pgp->pg_next = NULL; - if (pipe_garbage_tail) + if (pipe_garbage_tail) { pipe_garbage_tail->pg_next = pgp; + } pipe_garbage_tail = pgp; - if (pipe_garbage_head == NULL) + if (pipe_garbage_head == NULL) { pipe_garbage_head = pipe_garbage_tail; + } - if (pipe_garbage_count++ >= PIPE_GARBAGE_QUEUE_LIMIT) + if (pipe_garbage_count++ >= PIPE_GARBAGE_QUEUE_LIMIT) { panic("Length of pipe garbage queue exceeded %d", PIPE_GARBAGE_QUEUE_LIMIT); + } } lck_mtx_unlock(pipe_garbage_lock); } - diff --git a/bsd/kern/sys_reason.c b/bsd/kern/sys_reason.c index ea260cdc3..518df53ab 100644 --- a/bsd/kern/sys_reason.c +++ b/bsd/kern/sys_reason.c @@ -46,16 +46,16 @@ extern int maxproc; /* * Lock group attributes for os_reason subsystem */ -lck_grp_attr_t *os_reason_lock_grp_attr; -lck_grp_t *os_reason_lock_grp; -lck_attr_t *os_reason_lock_attr; +lck_grp_attr_t *os_reason_lock_grp_attr; +lck_grp_t *os_reason_lock_grp; +lck_attr_t *os_reason_lock_attr; -#define OS_REASON_RESERVE_COUNT 100 -#define OS_REASON_MAX_COUNT (maxproc + 100) +#define OS_REASON_RESERVE_COUNT 100 +#define OS_REASON_MAX_COUNT (maxproc + 100) static struct zone *os_reason_zone; static int os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, - boolean_t can_block); + boolean_t can_block); void os_reason_init() @@ -73,7 +73,7 @@ os_reason_init() * Create OS reason zone. */ os_reason_zone = zinit(sizeof(struct os_reason), OS_REASON_MAX_COUNT * sizeof(struct os_reason), - OS_REASON_MAX_COUNT, "os reasons"); + OS_REASON_MAX_COUNT, "os reasons"); if (os_reason_zone == NULL) { panic("failed to initialize os_reason_zone"); } @@ -113,10 +113,10 @@ os_reason_create(uint32_t osr_namespace, uint64_t osr_code) */ if (os_reason_debug_disabled) { kprintf("os_reason_create: failed to allocate reason with namespace: %u, code : %llu\n", - osr_namespace, osr_code); + osr_namespace, osr_code); } else { panic("os_reason_create: failed to allocate reason with namespace: %u, code: %llu\n", - osr_namespace, osr_code); + osr_namespace, osr_code); } #endif return new_reason; @@ -164,7 +164,7 @@ os_reason_dealloc_buffer(os_reason_t cur_reason) * Returns: * 0 on success * EINVAL if the passed reason pointer is invalid or the requested size is - * larger than REASON_BUFFER_MAX_SIZE + * larger than REASON_BUFFER_MAX_SIZE * EIO if we fail to initialize the kcdata buffer */ int @@ -183,7 +183,7 @@ os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize) * Returns: * 0 on success * EINVAL if the passed reason pointer is invalid or the requested size is - * larger than REASON_BUFFER_MAX_SIZE + * larger than REASON_BUFFER_MAX_SIZE * ENOMEM if unable to allocate memory for the buffer * EIO if we fail to initialize the kcdata buffer */ @@ -195,7 +195,7 @@ os_reason_alloc_buffer_noblock(os_reason_t cur_reason, uint32_t osr_bufsize) static int os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, - boolean_t can_block) + boolean_t can_block) { if (cur_reason == OS_REASON_NULL) { return EINVAL; @@ -230,7 +230,7 @@ os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, cur_reason->osr_bufsize = osr_bufsize; if (kcdata_memory_static_init(&cur_reason->osr_kcd_descriptor, (mach_vm_address_t) cur_reason->osr_kcd_buf, - KCDATA_BUFFER_BEGIN_OS_REASON, osr_bufsize, KCFLAG_USE_MEMCOPY) != KERN_SUCCESS) { + KCDATA_BUFFER_BEGIN_OS_REASON, osr_bufsize, KCFLAG_USE_MEMCOPY) != KERN_SUCCESS) { os_reason_dealloc_buffer(cur_reason); lck_mtx_unlock(&cur_reason->osr_lock); diff --git a/bsd/kern/sys_socket.c b/bsd/kern/sys_socket.c index cc4d778bd..9988ba7c6 100644 --- a/bsd/kern/sys_socket.c +++ b/bsd/kern/sys_socket.c @@ -73,7 +73,7 @@ #include #include #include -#include /* XXX */ +#include /* XXX */ #include #include #include @@ -112,9 +112,9 @@ const struct fileops socketops = { static int soo_read(struct fileproc *fp, struct uio *uio, __unused int flags, #if !CONFIG_MACF_SOCKET - __unused + __unused #endif - vfs_context_t ctx) + vfs_context_t ctx) { struct socket *so; int stat; @@ -128,25 +128,26 @@ soo_read(struct fileproc *fp, struct uio *uio, __unused int flags, if ((so = (struct socket *)fp->f_fglob->fg_data) == NULL) { /* This is not a valid open file descriptor */ - return (EBADF); + return EBADF; } #if CONFIG_MACF_SOCKET error = mac_socket_check_receive(vfs_context_ucred(ctx), so); - if (error) - return (error); + if (error) { + return error; + } #endif /* CONFIG_MACF_SOCKET */ fsoreceive = so->so_proto->pr_usrreqs->pru_soreceive; stat = (*fsoreceive)(so, 0, uio, 0, 0, 0); - return (stat); + return stat; } /* ARGSUSED */ static int soo_write(struct fileproc *fp, struct uio *uio, __unused int flags, - vfs_context_t ctx) + vfs_context_t ctx) { struct socket *so; int stat; @@ -161,14 +162,15 @@ soo_write(struct fileproc *fp, struct uio *uio, __unused int flags, if ((so = (struct socket *)fp->f_fglob->fg_data) == NULL) { /* This is not a valid open file descriptor */ - return (EBADF); + return EBADF; } #if CONFIG_MACF_SOCKET /* JMM - have to fetch the socket's remote addr */ error = mac_socket_check_send(vfs_context_ucred(ctx), so, NULL); - if (error) - return (error); + if (error) { + return error; + } #endif /* CONFIG_MACF_SOCKET */ fsosend = so->so_proto->pr_usrreqs->pru_sosend; @@ -177,10 +179,11 @@ soo_write(struct fileproc *fp, struct uio *uio, __unused int flags, /* Generation of SIGPIPE can be controlled per socket */ procp = vfs_context_proc(ctx); - if (stat == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) + if (stat == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) { psignal(procp, SIGPIPE); + } - return (stat); + return stat; } __private_extern__ int @@ -191,8 +194,9 @@ soioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) #if CONFIG_MACF_SOCKET_SUBSET error = mac_socket_check_ioctl(kauth_cred_get(), so, cmd); - if (error) - return (error); + if (error) { + return error; + } #endif socket_lock(so, 1); @@ -213,24 +217,26 @@ soioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) default: error = sflt_ioctl(so, cmd, data); - if (error != 0) + if (error != 0) { goto out; + } break; } } switch (cmd) { - case FIONBIO: /* int */ - bcopy(data, &int_arg, sizeof (int_arg)); - if (int_arg) + case FIONBIO: /* int */ + bcopy(data, &int_arg, sizeof(int_arg)); + if (int_arg) { so->so_state |= SS_NBIO; - else + } else { so->so_state &= ~SS_NBIO; + } goto out; - case FIOASYNC: /* int */ - bcopy(data, &int_arg, sizeof (int_arg)); + case FIOASYNC: /* int */ + bcopy(data, &int_arg, sizeof(int_arg)); if (int_arg) { so->so_state |= SS_ASYNC; so->so_rcv.sb_flags |= SB_ASYNC; @@ -242,35 +248,35 @@ soioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) } goto out; - case FIONREAD: /* int */ - bcopy(&so->so_rcv.sb_cc, data, sizeof (u_int32_t)); + case FIONREAD: /* int */ + bcopy(&so->so_rcv.sb_cc, data, sizeof(u_int32_t)); goto out; - case SIOCSPGRP: /* int */ - bcopy(data, &so->so_pgid, sizeof (pid_t)); + case SIOCSPGRP: /* int */ + bcopy(data, &so->so_pgid, sizeof(pid_t)); goto out; - case SIOCGPGRP: /* int */ - bcopy(&so->so_pgid, data, sizeof (pid_t)); + case SIOCGPGRP: /* int */ + bcopy(&so->so_pgid, data, sizeof(pid_t)); goto out; - case SIOCATMARK: /* int */ + case SIOCATMARK: /* int */ int_arg = (so->so_state & SS_RCVATMARK) != 0; - bcopy(&int_arg, data, sizeof (int_arg)); + bcopy(&int_arg, data, sizeof(int_arg)); goto out; - case SIOCSETOT: /* int; deprecated */ + case SIOCSETOT: /* int; deprecated */ error = EOPNOTSUPP; goto out; - case SIOCGASSOCIDS32: /* so_aidreq32 */ - case SIOCGASSOCIDS64: /* so_aidreq64 */ - case SIOCGCONNIDS32: /* so_cidreq32 */ - case SIOCGCONNIDS64: /* so_cidreq64 */ - case SIOCGCONNINFO32: /* so_cinforeq32 */ - case SIOCGCONNINFO64: /* so_cinforeq64 */ - case SIOCSCONNORDER: /* so_cordreq */ - case SIOCGCONNORDER: /* so_cordreq */ + case SIOCGASSOCIDS32: /* so_aidreq32 */ + case SIOCGASSOCIDS64: /* so_aidreq64 */ + case SIOCGCONNIDS32: /* so_cidreq32 */ + case SIOCGCONNIDS64: /* so_cidreq64 */ + case SIOCGCONNINFO32: /* so_cinforeq32 */ + case SIOCGCONNINFO64: /* so_cinforeq64 */ + case SIOCSCONNORDER: /* so_cordreq */ + case SIOCGCONNORDER: /* so_cordreq */ error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, NULL, p); goto out; @@ -284,20 +290,22 @@ soioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) if (IOCGROUP(cmd) == 'i') { error = ifioctllocked(so, cmd, data, p); } else { - if (IOCGROUP(cmd) == 'r') + if (IOCGROUP(cmd) == 'r') { error = rtioctl(cmd, data, p); - else + } else { error = (*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data, NULL, p); + } } out: socket_unlock(so, 1); - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } - return (error); + return error; } int @@ -308,10 +316,10 @@ soo_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx) if ((so = (struct socket *)fp->f_fglob->fg_data) == NULL) { /* This is not a valid open file descriptor */ - return (EBADF); + return EBADF; } - return (soioctl(so, cmd, data, procp)); + return soioctl(so, cmd, data, procp); } int @@ -321,20 +329,21 @@ soo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) int retnum = 0; proc_t procp; - if (so == NULL || so == (struct socket *)-1) - return (0); + if (so == NULL || so == (struct socket *)-1) { + return 0; + } procp = vfs_context_proc(ctx); #if CONFIG_MACF_SOCKET - if (mac_socket_check_select(vfs_context_ucred(ctx), so, which) != 0) - return (0); + if (mac_socket_check_select(vfs_context_ucred(ctx), so, which) != 0) { + return 0; + } #endif /* CONFIG_MACF_SOCKET */ socket_lock(so, 1); switch (which) { - case FREAD: so->so_rcv.sb_flags |= SB_SEL; if (soreadable(so)) { @@ -368,7 +377,7 @@ soo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) done: socket_unlock(so, 1); - return (retnum); + return retnum; } int @@ -382,36 +391,41 @@ soo_stat(struct socket *so, void *ub, int isstat64) #if CONFIG_MACF_SOCKET_SUBSET ret = mac_socket_check_stat(kauth_cred_get(), so); - if (ret) - return (ret); + if (ret) { + return ret; + } #endif if (isstat64 != 0) { sb64 = (struct stat64 *)ub; - bzero((caddr_t)sb64, sizeof (*sb64)); + bzero((caddr_t)sb64, sizeof(*sb64)); } else { sb = (struct stat *)ub; - bzero((caddr_t)sb, sizeof (*sb)); + bzero((caddr_t)sb, sizeof(*sb)); } socket_lock(so, 1); if (isstat64 != 0) { sb64->st_mode = S_IFSOCK; if ((so->so_state & SS_CANTRCVMORE) == 0 || - so->so_rcv.sb_cc != 0) + so->so_rcv.sb_cc != 0) { sb64->st_mode |= S_IRUSR | S_IRGRP | S_IROTH; - if ((so->so_state & SS_CANTSENDMORE) == 0) + } + if ((so->so_state & SS_CANTSENDMORE) == 0) { sb64->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH; + } sb64->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; sb64->st_uid = kauth_cred_getuid(so->so_cred); sb64->st_gid = kauth_cred_getgid(so->so_cred); } else { sb->st_mode = S_IFSOCK; if ((so->so_state & SS_CANTRCVMORE) == 0 || - so->so_rcv.sb_cc != 0) + so->so_rcv.sb_cc != 0) { sb->st_mode |= S_IRUSR | S_IRGRP | S_IROTH; - if ((so->so_state & SS_CANTSENDMORE) == 0) + } + if ((so->so_state & SS_CANTSENDMORE) == 0) { sb->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH; + } sb->st_size = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; sb->st_uid = kauth_cred_getuid(so->so_cred); sb->st_gid = kauth_cred_getgid(so->so_cred); @@ -419,7 +433,7 @@ soo_stat(struct socket *so, void *ub, int isstat64) ret = (*so->so_proto->pr_usrreqs->pru_sense)(so, ub, isstat64); socket_unlock(so, 1); - return (ret); + return ret; } /* ARGSUSED */ @@ -432,10 +446,11 @@ soo_close(struct fileglob *fg, __unused vfs_context_t ctx) sp = (struct socket *)fg->fg_data; fg->fg_data = NULL; - if (sp) + if (sp) { error = soclose(sp); + } - return (error); + return error; } static int @@ -456,7 +471,7 @@ soo_drain(struct fileproc *fp, __unused vfs_context_t ctx) socket_unlock(so, 1); } - return (error); + return error; } /* diff --git a/bsd/kern/sys_ulock.c b/bsd/kern/sys_ulock.c index 0d8664c7e..b8046c66e 100644 --- a/bsd/kern/sys_ulock.c +++ b/bsd/kern/sys_ulock.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -62,6 +62,7 @@ #define XNU_TEST_BITMAP #include +#include #include /* @@ -92,7 +93,7 @@ static lck_grp_t *ull_lck_grp; typedef lck_spin_t ull_lock_t; #define ull_lock_init(ull) lck_spin_init(&ull->ull_lock, ull_lck_grp, NULL) #define ull_lock_destroy(ull) lck_spin_destroy(&ull->ull_lock, ull_lck_grp) -#define ull_lock(ull) lck_spin_lock(&ull->ull_lock) +#define ull_lock(ull) lck_spin_lock_grp(&ull->ull_lock, ull_lck_grp) #define ull_unlock(ull) lck_spin_unlock(&ull->ull_lock) #define ull_assert_owned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_OWNED) #define ull_assert_notwned(ull) LCK_SPIN_ASSERT(&ull->ull_lock, LCK_ASSERT_NOTOWNED) @@ -101,15 +102,15 @@ typedef lck_spin_t ull_lock_t; #define EVENT_TO_ULOCK(event) ((ull_t *)event) typedef struct __attribute__((packed)) { - user_addr_t ulk_addr; - pid_t ulk_pid; + user_addr_t ulk_addr; + pid_t ulk_pid; } ulk_t; inline static bool ull_key_match(ulk_t *a, ulk_t *b) { - return ((a->ulk_pid == b->ulk_pid) && - (a->ulk_addr == b->ulk_addr)); + return (a->ulk_pid == b->ulk_pid) && + (a->ulk_addr == b->ulk_addr); } typedef struct ull { @@ -118,21 +119,21 @@ typedef struct ull { * i.e. it may be out of date WRT the real value in userspace. */ thread_t ull_owner; /* holds +1 thread reference */ - ulk_t ull_key; - ulk_t ull_saved_key; - ull_lock_t ull_lock; - uint ull_bucket_index; - int32_t ull_nwaiters; - int32_t ull_max_nwaiters; - int32_t ull_refcount; - uint8_t ull_opcode; + ulk_t ull_key; + ulk_t ull_saved_key; + ull_lock_t ull_lock; + uint ull_bucket_index; + int32_t ull_nwaiters; + int32_t ull_max_nwaiters; + int32_t ull_refcount; + uint8_t ull_opcode; struct turnstile *ull_turnstile; - queue_chain_t ull_hash_link; + queue_chain_t ull_hash_link; } ull_t; extern void ulock_initialize(void); -#define ULL_MUST_EXIST 0x0001 +#define ULL_MUST_EXIST 0x0001 static ull_t *ull_get(ulk_t *, uint32_t, ull_t **); static void ull_put(ull_t *); @@ -166,13 +167,13 @@ static ull_bucket_t *ull_bucket; static uint32_t ull_nzalloc = 0; static zone_t ull_zone; -#define ull_bucket_lock(i) lck_spin_lock(&ull_bucket[i].ulb_lock) +#define ull_bucket_lock(i) lck_spin_lock_grp(&ull_bucket[i].ulb_lock, ull_lck_grp) #define ull_bucket_unlock(i) lck_spin_unlock(&ull_bucket[i].ulb_lock) static __inline__ uint32_t -ull_hash_index(char *key, size_t length) +ull_hash_index(const void *key, size_t length) { - uint32_t hash = jenkins_hash(key, length); + uint32_t hash = os_hash_jenkins(key, length); hash &= (ull_hash_buckets - 1); @@ -185,7 +186,7 @@ ull_hash_index(char *key, size_t length) */ static_assert(sizeof(ulk_t) == sizeof(user_addr_t) + sizeof(pid_t)); -#define ULL_INDEX(keyp) ull_hash_index((char *)keyp, sizeof *keyp) +#define ULL_INDEX(keyp) ull_hash_index(keyp, sizeof *keyp) void ulock_initialize(void) @@ -199,7 +200,7 @@ ulock_initialize(void) ull_hash_buckets = (1 << (bit_ceiling(thread_max) - 2)); kprintf("%s>thread_max=%d, ull_hash_buckets=%d\n", __FUNCTION__, thread_max, ull_hash_buckets); - assert(ull_hash_buckets >= thread_max/4); + assert(ull_hash_buckets >= thread_max / 4); ull_bucket = (ull_bucket_t *)kalloc(sizeof(ull_bucket_t) * ull_hash_buckets); assert(ull_bucket != NULL); @@ -210,8 +211,8 @@ ulock_initialize(void) } ull_zone = zinit(sizeof(ull_t), - thread_max * sizeof(ull_t), - 0, "ulocks"); + thread_max * sizeof(ull_t), + 0, "ulocks"); zone_change(ull_zone, Z_NOENCRYPT, TRUE); } @@ -539,7 +540,7 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) struct turnstile *ts; ts = turnstile_prepare((uintptr_t)ull, &ull->ull_turnstile, - TURNSTILE_NULL, TURNSTILE_ULOCK); + TURNSTILE_NULL, TURNSTILE_ULOCK); thread_set_pending_block_hint(self, kThreadWaitUserLock); if (flags & ULF_WAIT_WORKQ_DATA_CONTENTION) { @@ -551,10 +552,10 @@ ulock_wait(struct proc *p, struct ulock_wait_args *args, int32_t *retval) } turnstile_update_inheritor(ts, owner_thread, - (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); + (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); wr = waitq_assert_wait64(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), - interruptible, deadline); + interruptible, deadline); ull_unlock(ull); @@ -762,14 +763,14 @@ ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retva struct turnstile *ts; ts = turnstile_prepare((uintptr_t)ull, &ull->ull_turnstile, - TURNSTILE_NULL, TURNSTILE_ULOCK); + TURNSTILE_NULL, TURNSTILE_ULOCK); if (flags & ULF_WAKE_ALL) { waitq_wakeup64_all(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), - THREAD_AWAKENED, 0); + THREAD_AWAKENED, 0); } else if (flags & ULF_WAKE_THREAD) { kern_return_t kr = waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), - wake_thread, THREAD_AWAKENED); + wake_thread, THREAD_AWAKENED); if (kr != KERN_SUCCESS) { assert(kr == KERN_NOT_WAITING); ret = EALREADY; @@ -783,7 +784,7 @@ ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retva * */ waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(ULOCK_TO_EVENT(ull)), - THREAD_AWAKENED, WAITQ_SELECT_MAX_PRI); + THREAD_AWAKENED, WAITQ_SELECT_MAX_PRI); } /* @@ -798,7 +799,7 @@ ulock_wake(struct proc *p, struct ulock_wake_args *args, __unused int32_t *retva if (ull->ull_owner == current_thread()) { turnstile_update_inheritor(ts, THREAD_NULL, - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); old_owner = ull->ull_owner; ull->ull_owner = THREAD_NULL; diff --git a/bsd/kern/sys_work_interval.c b/bsd/kern/sys_work_interval.c index bbfdd5e61..203086a1b 100644 --- a/bsd/kern/sys_work_interval.c +++ b/bsd/kern/sys_work_interval.c @@ -40,7 +40,7 @@ int work_interval_ctl(__unused proc_t p, struct work_interval_ctl_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { uint32_t operation = uap->operation; int error = 0; @@ -51,117 +51,128 @@ work_interval_ctl(__unused proc_t p, struct work_interval_ctl_args *uap, struct kern_work_interval_create_args create_args; switch (operation) { - case WORK_INTERVAL_OPERATION_CREATE: - return ENOTSUP; - case WORK_INTERVAL_OPERATION_CREATE2: - if (uap->arg == USER_ADDR_NULL || uap->work_interval_id != 0) - return EINVAL; - if (uap->len < sizeof(create_params)) - return EINVAL; - - if ((error = copyin(uap->arg, &create_params, sizeof(create_params)))) - return error; - - if ((error = priv_check_cred(kauth_cred_get(), PRIV_WORK_INTERVAL, 0)) != 0) { - return error; - } - - create_args = (struct kern_work_interval_create_args) { - .wica_id = create_params.wicp_id, - .wica_port = create_params.wicp_port, - .wica_create_flags = create_params.wicp_create_flags, - }; - - kret = kern_work_interval_create(current_thread(), &create_args); - - /* thread already has a work interval */ - if (kret == KERN_FAILURE) - return EALREADY; - - /* port copyout failed */ - if (kret == KERN_RESOURCE_SHORTAGE) - return ENOMEM; - - /* some other failure */ - if (kret != KERN_SUCCESS) - return EINVAL; - - create_params = (struct work_interval_create_params) { - .wicp_id = create_args.wica_id, - .wicp_port = create_args.wica_port, - .wicp_create_flags = create_args.wica_create_flags, - }; - - if ((error = copyout(&create_params, uap->arg, sizeof(create_params)))) { - kern_work_interval_destroy(current_thread(), create_args.wica_id); - return error; - } - break; - case WORK_INTERVAL_OPERATION_DESTROY: - if (uap->arg != USER_ADDR_NULL || uap->work_interval_id == 0) { - return EINVAL; - } - - /* - * No privilege check, we assume a previous WORK_INTERVAL_OPERATION_CREATE - * operation would have allocated a work interval ID for the current - * thread, which the scheduler will validate. - */ - kret = kern_work_interval_destroy(current_thread(), uap->work_interval_id); - if (kret != KERN_SUCCESS) - return EINVAL; - - break; - case WORK_INTERVAL_OPERATION_NOTIFY: - if (uap->arg == USER_ADDR_NULL || uap->work_interval_id == 0) - return EINVAL; - - if (uap->len < sizeof(notification)) - return EINVAL; - - /* - * No privilege check, we assume a previous WORK_INTERVAL_OPERATION_CREATE - * operation would have allocated a work interval ID for the current - * thread, which the scheduler will validate. - */ - if ((error = copyin(uap->arg, ¬ification, sizeof(notification)))) - return error; - - struct kern_work_interval_args kwi_args = { - .work_interval_id = uap->work_interval_id, - .start = notification.start, - .finish = notification.finish, - .deadline = notification.deadline, - .next_start = notification.next_start, - .notify_flags = notification.notify_flags, - .create_flags = notification.create_flags, - }; - - kret = kern_work_interval_notify(current_thread(), &kwi_args); - if (kret != KERN_SUCCESS) - return EINVAL; - - break; - case WORK_INTERVAL_OPERATION_JOIN: - if (uap->arg != USER_ADDR_NULL) { - return EINVAL; - } - - /* - * No privilege check, because the work interval port - * is a capability. - */ - kret = kern_work_interval_join(current_thread(), - (mach_port_name_t)uap->work_interval_id); - if (kret != KERN_SUCCESS) - return EINVAL; - - break; - - default: - return ENOTSUP; + case WORK_INTERVAL_OPERATION_CREATE: + return ENOTSUP; + case WORK_INTERVAL_OPERATION_CREATE2: + if (uap->arg == USER_ADDR_NULL || uap->work_interval_id != 0) { + return EINVAL; + } + if (uap->len < sizeof(create_params)) { + return EINVAL; + } + + if ((error = copyin(uap->arg, &create_params, sizeof(create_params)))) { + return error; + } + + if ((error = priv_check_cred(kauth_cred_get(), PRIV_WORK_INTERVAL, 0)) != 0) { + return error; + } + + create_args = (struct kern_work_interval_create_args) { + .wica_id = create_params.wicp_id, + .wica_port = create_params.wicp_port, + .wica_create_flags = create_params.wicp_create_flags, + }; + + kret = kern_work_interval_create(current_thread(), &create_args); + + /* thread already has a work interval */ + if (kret == KERN_FAILURE) { + return EALREADY; + } + + /* port copyout failed */ + if (kret == KERN_RESOURCE_SHORTAGE) { + return ENOMEM; + } + + /* some other failure */ + if (kret != KERN_SUCCESS) { + return EINVAL; + } + + create_params = (struct work_interval_create_params) { + .wicp_id = create_args.wica_id, + .wicp_port = create_args.wica_port, + .wicp_create_flags = create_args.wica_create_flags, + }; + + if ((error = copyout(&create_params, uap->arg, sizeof(create_params)))) { + kern_work_interval_destroy(current_thread(), create_args.wica_id); + return error; + } + break; + case WORK_INTERVAL_OPERATION_DESTROY: + if (uap->arg != USER_ADDR_NULL || uap->work_interval_id == 0) { + return EINVAL; + } + + /* + * No privilege check, we assume a previous WORK_INTERVAL_OPERATION_CREATE + * operation would have allocated a work interval ID for the current + * thread, which the scheduler will validate. + */ + kret = kern_work_interval_destroy(current_thread(), uap->work_interval_id); + if (kret != KERN_SUCCESS) { + return EINVAL; + } + + break; + case WORK_INTERVAL_OPERATION_NOTIFY: + if (uap->arg == USER_ADDR_NULL || uap->work_interval_id == 0) { + return EINVAL; + } + + if (uap->len < sizeof(notification)) { + return EINVAL; + } + + /* + * No privilege check, we assume a previous WORK_INTERVAL_OPERATION_CREATE + * operation would have allocated a work interval ID for the current + * thread, which the scheduler will validate. + */ + if ((error = copyin(uap->arg, ¬ification, sizeof(notification)))) { + return error; + } + + struct kern_work_interval_args kwi_args = { + .work_interval_id = uap->work_interval_id, + .start = notification.start, + .finish = notification.finish, + .deadline = notification.deadline, + .next_start = notification.next_start, + .notify_flags = notification.notify_flags, + .create_flags = notification.create_flags, + }; + + kret = kern_work_interval_notify(current_thread(), &kwi_args); + if (kret != KERN_SUCCESS) { + return EINVAL; + } + + break; + case WORK_INTERVAL_OPERATION_JOIN: + if (uap->arg != USER_ADDR_NULL) { + return EINVAL; + } + + /* + * No privilege check, because the work interval port + * is a capability. + */ + kret = kern_work_interval_join(current_thread(), + (mach_port_name_t)uap->work_interval_id); + if (kret != KERN_SUCCESS) { + return EINVAL; + } + + break; + + default: + return ENOTSUP; } - return (error); + return error; } - diff --git a/bsd/kern/syscalls.master b/bsd/kern/syscalls.master index 278dd224f..811d42826 100644 --- a/bsd/kern/syscalls.master +++ b/bsd/kern/syscalls.master @@ -841,4 +841,4 @@ #else 530 AUE_NULL ALL { int enosys(void); } #endif // CONFIG_WORKQUEUE -531 AUE_NULL ALL { int enosys(void); } +531 AUE_NULL ALL { uint64_t __mach_bridge_remote_time(uint64_t local_timestamp); } diff --git a/bsd/kern/sysv_ipc.c b/bsd/kern/sysv_ipc.c index 8f56757c4..926ce9f7e 100644 --- a/bsd/kern/sysv_ipc.c +++ b/bsd/kern/sysv_ipc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: sysv_ipc.c,v 1.7 1994/06/29 06:33:11 cgd Exp $ */ @@ -60,7 +60,7 @@ #include #include -#include /* mode constants */ +#include /* mode constants */ #include #include @@ -100,27 +100,30 @@ int ipcperm(kauth_cred_t cred, struct ipc_perm *perm, int mode_req) { - uid_t uid = kauth_cred_getuid(cred); /* avoid multiple calls */ - int want_mod_controlinfo = (mode_req & IPC_M); - int is_member; - mode_t mode_owner = (perm->mode & S_IRWXU); - mode_t mode_group = (perm->mode & S_IRWXG) << 3; - mode_t mode_world = (perm->mode & S_IRWXO) << 6; + uid_t uid = kauth_cred_getuid(cred); /* avoid multiple calls */ + int want_mod_controlinfo = (mode_req & IPC_M); + int is_member; + mode_t mode_owner = (perm->mode & S_IRWXU); + mode_t mode_group = (perm->mode & S_IRWXG) << 3; + mode_t mode_world = (perm->mode & S_IRWXO) << 6; /* Grant all rights to super user */ - if (!suser(cred, (u_short *)NULL)) - return (0); + if (!suser(cred, (u_short *)NULL)) { + return 0; + } /* Grant or deny rights based on ownership */ if (uid == perm->cuid || uid == perm->uid) { - if (want_mod_controlinfo) - return (0); + if (want_mod_controlinfo) { + return 0; + } - return ((mode_req & mode_owner) == mode_req ? 0 : EACCES); + return (mode_req & mode_owner) == mode_req ? 0 : EACCES; } else { /* everyone else who wants to modify control info is denied */ - if (want_mod_controlinfo) - return (EPERM); + if (want_mod_controlinfo) { + return EPERM; + } } /* @@ -129,30 +132,30 @@ ipcperm(kauth_cred_t cred, struct ipc_perm *perm, int mode_req) * common case. */ if ((mode_req & mode_group & mode_world) == mode_req) { - return (0); + return 0; } else { if ((mode_req & mode_group) != mode_req) { if ((!kauth_cred_ismember_gid(cred, perm->gid, &is_member) && is_member) && ((perm->gid == perm->cgid) || - (!kauth_cred_ismember_gid(cred, perm->cgid, &is_member) && is_member))) { - return (EACCES); + (!kauth_cred_ismember_gid(cred, perm->cgid, &is_member) && is_member))) { + return EACCES; } else { if ((mode_req & mode_world) != mode_req) { - return (EACCES); + return EACCES; } else { - return (0); + return 0; } } } else { if ((!kauth_cred_ismember_gid(cred, perm->gid, &is_member) && is_member) || ((perm->gid != perm->cgid) && - (!kauth_cred_ismember_gid(cred, perm->cgid, &is_member) && is_member))) { - return (0); + (!kauth_cred_ismember_gid(cred, perm->cgid, &is_member) && is_member))) { + return 0; } else { if ((mode_req & mode_world) != mode_req) { - return (EACCES); + return EACCES; } else { - return (0); + return 0; } } } diff --git a/bsd/kern/sysv_msg.c b/bsd/kern/sysv_msg.c index fad2cfbf7..4e141439d 100644 --- a/bsd/kern/sysv_msg.c +++ b/bsd/kern/sysv_msg.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -81,9 +81,9 @@ static int msginit(void *); /* Uncomment this line to see MAC debugging output. */ /* #define MAC_DEBUG */ #if CONFIG_MACF_DEBUG -#define MPRINTF(a) printf(a) +#define MPRINTF(a) printf(a) #else -#define MPRINTF(a) +#define MPRINTF(a) #endif static void msg_freehdr(struct msg *msghdr); @@ -95,13 +95,13 @@ static sy_call_t *msgcalls[] = { (sy_call_t *)msgsnd, (sy_call_t *)msgrcv }; -static int nfree_msgmaps; /* # of free map entries */ -static short free_msgmaps; /* free map entries list head */ -static struct msg *free_msghdrs; /* list of free msg headers */ -char *msgpool; /* MSGMAX byte long msg buffer pool */ -struct msgmap *msgmaps; /* MSGSEG msgmap structures */ -struct msg *msghdrs; /* MSGTQL msg headers */ -struct msqid_kernel *msqids; /* MSGMNI msqid_kernel structs (wrapping user_msqid_ds structs) */ +static int nfree_msgmaps; /* # of free map entries */ +static short free_msgmaps; /* free map entries list head */ +static struct msg *free_msghdrs; /* list of free msg headers */ +char *msgpool; /* MSGMAX byte long msg buffer pool */ +struct msgmap *msgmaps; /* MSGSEG msgmap structures */ +struct msg *msghdrs; /* MSGTQL msg headers */ +struct msqid_kernel *msqids; /* MSGMNI msqid_kernel structs (wrapping user_msqid_ds structs) */ static lck_grp_t *sysv_msg_subsys_lck_grp; static lck_grp_attr_t *sysv_msg_subsys_lck_grp_attr; @@ -115,19 +115,19 @@ void sysv_msg_lock_init(void); #ifdef __APPLE_API_PRIVATE - int msgmax, /* max chars in a message */ - msgmni, /* max message queue identifiers */ - msgmnb, /* max chars in a queue */ - msgtql, /* max messages in system */ - msgssz, /* size of a message segment (see notes above) */ - msgseg; /* number of message segments */ +int msgmax, /* max chars in a message */ + msgmni, /* max message queue identifiers */ + msgmnb, /* max chars in a queue */ + msgtql, /* max messages in system */ + msgssz, /* size of a message segment (see notes above) */ + msgseg; /* number of message segments */ struct msginfo msginfo = { - MSGMAX, /* = (MSGSSZ*MSGSEG) : max chars in a message */ - MSGMNI, /* = 40 : max message queue identifiers */ - MSGMNB, /* = 2048 : max chars in a queue */ - MSGTQL, /* = 40 : max messages in system */ - MSGSSZ, /* = 8 : size of a message segment (2^N long) */ - MSGSEG /* = 2048 : number of message segments */ + MSGMAX, /* = (MSGSSZ*MSGSEG) : max chars in a message */ + MSGMNI, /* = 40 : max message queue identifiers */ + MSGMNB, /* = 2048 : max chars in a queue */ + MSGTQL, /* = 40 : max messages in system */ + MSGSSZ, /* = 8 : size of a message segment (2^N long) */ + MSGSEG /* = 2048 : number of message segments */ }; #endif /* __APPLE_API_PRIVATE */ @@ -146,9 +146,9 @@ sysv_msg_lock_init( void ) static __inline__ user_time_t sysv_msgtime(void) { - struct timeval tv; + struct timeval tv; microtime(&tv); - return (tv.tv_sec); + return tv.tv_sec; } /* @@ -157,29 +157,29 @@ sysv_msgtime(void) static void msqid_ds_kerneltouser32(struct user_msqid_ds *in, struct user32_msqid_ds *out) { - out->msg_perm = in->msg_perm; - out->msg_qnum = in->msg_qnum; - out->msg_cbytes = in->msg_cbytes; /* for ipcs */ - out->msg_qbytes = in->msg_qbytes; - out->msg_lspid = in->msg_lspid; - out->msg_lrpid = in->msg_lrpid; - out->msg_stime = in->msg_stime; /* XXX loss of range */ - out->msg_rtime = in->msg_rtime; /* XXX loss of range */ - out->msg_ctime = in->msg_ctime; /* XXX loss of range */ + out->msg_perm = in->msg_perm; + out->msg_qnum = in->msg_qnum; + out->msg_cbytes = in->msg_cbytes; /* for ipcs */ + out->msg_qbytes = in->msg_qbytes; + out->msg_lspid = in->msg_lspid; + out->msg_lrpid = in->msg_lrpid; + out->msg_stime = in->msg_stime; /* XXX loss of range */ + out->msg_rtime = in->msg_rtime; /* XXX loss of range */ + out->msg_ctime = in->msg_ctime; /* XXX loss of range */ } static void msqid_ds_kerneltouser64(struct user_msqid_ds *in, struct user64_msqid_ds *out) { - out->msg_perm = in->msg_perm; - out->msg_qnum = in->msg_qnum; - out->msg_cbytes = in->msg_cbytes; /* for ipcs */ - out->msg_qbytes = in->msg_qbytes; - out->msg_lspid = in->msg_lspid; - out->msg_lrpid = in->msg_lrpid; - out->msg_stime = in->msg_stime; /* XXX loss of range */ - out->msg_rtime = in->msg_rtime; /* XXX loss of range */ - out->msg_ctime = in->msg_ctime; /* XXX loss of range */ + out->msg_perm = in->msg_perm; + out->msg_qnum = in->msg_qnum; + out->msg_cbytes = in->msg_cbytes; /* for ipcs */ + out->msg_qbytes = in->msg_qbytes; + out->msg_lspid = in->msg_lspid; + out->msg_lrpid = in->msg_lrpid; + out->msg_stime = in->msg_stime; /* XXX loss of range */ + out->msg_rtime = in->msg_rtime; /* XXX loss of range */ + out->msg_ctime = in->msg_ctime; /* XXX loss of range */ } /* @@ -190,29 +190,29 @@ msqid_ds_kerneltouser64(struct user_msqid_ds *in, struct user64_msqid_ds *out) static void msqid_ds_user32tokernel(struct user32_msqid_ds *in, struct user_msqid_ds *out) { - out->msg_ctime = in->msg_ctime; - out->msg_rtime = in->msg_rtime; - out->msg_stime = in->msg_stime; - out->msg_lrpid = in->msg_lrpid; - out->msg_lspid = in->msg_lspid; - out->msg_qbytes = in->msg_qbytes; - out->msg_cbytes = in->msg_cbytes; /* for ipcs */ - out->msg_qnum = in->msg_qnum; - out->msg_perm = in->msg_perm; + out->msg_ctime = in->msg_ctime; + out->msg_rtime = in->msg_rtime; + out->msg_stime = in->msg_stime; + out->msg_lrpid = in->msg_lrpid; + out->msg_lspid = in->msg_lspid; + out->msg_qbytes = in->msg_qbytes; + out->msg_cbytes = in->msg_cbytes; /* for ipcs */ + out->msg_qnum = in->msg_qnum; + out->msg_perm = in->msg_perm; } static void msqid_ds_user64tokernel(struct user64_msqid_ds *in, struct user_msqid_ds *out) { - out->msg_ctime = in->msg_ctime; - out->msg_rtime = in->msg_rtime; - out->msg_stime = in->msg_stime; - out->msg_lrpid = in->msg_lrpid; - out->msg_lspid = in->msg_lspid; - out->msg_qbytes = in->msg_qbytes; - out->msg_cbytes = in->msg_cbytes; /* for ipcs */ - out->msg_qnum = in->msg_qnum; - out->msg_perm = in->msg_perm; + out->msg_ctime = in->msg_ctime; + out->msg_rtime = in->msg_rtime; + out->msg_stime = in->msg_stime; + out->msg_lrpid = in->msg_lrpid; + out->msg_lspid = in->msg_lspid; + out->msg_qbytes = in->msg_qbytes; + out->msg_cbytes = in->msg_cbytes; /* for ipcs */ + out->msg_qnum = in->msg_qnum; + out->msg_perm = in->msg_perm; } /* This routine assumes the system is locked prior to calling this routine */ @@ -223,8 +223,9 @@ msginit(__unused void *dummy) int i; /* Lazy initialization on first system call; we don't have SYSINIT(). */ - if (initted) - return (initted); + if (initted) { + return initted; + } /* * msginfo.msgssz should be a power of two for efficiency reasons. @@ -232,9 +233,10 @@ msginit(__unused void *dummy) * or greater than about 256 so ... */ i = 8; - while (i < 1024 && i != msginfo.msgssz) + while (i < 1024 && i != msginfo.msgssz) { i <<= 1; - if (i != msginfo.msgssz) { + } + if (i != msginfo.msgssz) { printf("msginfo.msgssz=%d (0x%x) not a small power of 2; resetting to %d\n", msginfo.msgssz, msginfo.msgssz, MSGSSZ); msginfo.msgssz = MSGSSZ; } @@ -256,24 +258,24 @@ msginit(__unused void *dummy) goto bad; } MALLOC(msgmaps, struct msgmap *, - sizeof(struct msgmap) * msginfo.msgseg, - M_SHM, M_WAITOK); + sizeof(struct msgmap) * msginfo.msgseg, + M_SHM, M_WAITOK); if (msgmaps == NULL) { printf("msginit: can't allocate msgmaps"); goto bad; } MALLOC(msghdrs, struct msg *, - sizeof(struct msg) * msginfo.msgtql, - M_SHM, M_WAITOK); + sizeof(struct msg) * msginfo.msgtql, + M_SHM, M_WAITOK); if (msghdrs == NULL) { printf("msginit: can't allocate msghdrs"); goto bad; } MALLOC(msqids, struct msqid_kernel *, - sizeof(struct msqid_kernel) * msginfo.msgmni, - M_SHM, M_WAITOK); + sizeof(struct msqid_kernel) * msginfo.msgmni, + M_SHM, M_WAITOK); if (msqids == NULL) { printf("msginit: can't allocate msqids"); goto bad; @@ -282,9 +284,10 @@ msginit(__unused void *dummy) /* init msgmaps */ for (i = 0; i < msginfo.msgseg; i++) { - if (i > 0) - msgmaps[i-1].next = i; - msgmaps[i].next = -1; /* implies entry is available */ + if (i > 0) { + msgmaps[i - 1].next = i; + } + msgmaps[i].next = -1; /* implies entry is available */ } free_msgmaps = 0; nfree_msgmaps = msginfo.msgseg; @@ -293,19 +296,20 @@ msginit(__unused void *dummy) /* init msghdrs */ for (i = 0; i < msginfo.msgtql; i++) { msghdrs[i].msg_type = 0; - if (i > 0) - msghdrs[i-1].msg_next = &msghdrs[i]; + if (i > 0) { + msghdrs[i - 1].msg_next = &msghdrs[i]; + } msghdrs[i].msg_next = NULL; #if CONFIG_MACF mac_sysvmsg_label_init(&msghdrs[i]); #endif - } + } free_msghdrs = &msghdrs[0]; /* init msqids */ for (i = 0; i < msginfo.msgmni; i++) { - msqids[i].u.msg_qbytes = 0; /* implies entry is available */ - msqids[i].u.msg_perm._seq = 0; /* reset to a known value */ + msqids[i].u.msg_qbytes = 0; /* implies entry is available */ + msqids[i].u.msg_perm._seq = 0; /* reset to a known value */ msqids[i].u.msg_perm.mode = 0; #if CONFIG_MACF mac_sysvmsq_label_init(&msqids[i]); @@ -315,16 +319,20 @@ msginit(__unused void *dummy) initted = 1; bad: if (!initted) { - if (msgpool != NULL) + if (msgpool != NULL) { _FREE(msgpool, M_SHM); - if (msgmaps != NULL) + } + if (msgmaps != NULL) { FREE(msgmaps, M_SHM); - if (msghdrs != NULL) + } + if (msghdrs != NULL) { FREE(msghdrs, M_SHM); - if (msqids != NULL) + } + if (msqids != NULL) { FREE(msqids, M_SHM); + } } - return (initted); + return initted; } /* @@ -333,28 +341,29 @@ bad: * Entry point for all MSG calls: msgctl, msgget, msgsnd, msgrcv * * Parameters: p Process requesting the call - * uap User argument descriptor (see below) - * retval Return value of the selected msg call + * uap User argument descriptor (see below) + * retval Return value of the selected msg call * * Indirect parameters: uap->which msg call to invoke (index in array of msg calls) - * uap->a2 User argument descriptor - * + * uap->a2 User argument descriptor + * * Returns: 0 Success - * !0 Not success + * !0 Not success * * Implicit returns: retval Return value of the selected msg call * * DEPRECATED: This interface should not be used to call the other MSG - * functions (msgctl, msgget, msgsnd, msgrcv). The correct - * usage is to call the other MSG functions directly. + * functions (msgctl, msgget, msgsnd, msgrcv). The correct + * usage is to call the other MSG functions directly. * */ int msgsys(struct proc *p, struct msgsys_args *uap, int32_t *retval) { - if (uap->which >= sizeof(msgcalls)/sizeof(msgcalls[0])) - return (EINVAL); - return ((*msgcalls[uap->which])(p, &uap->a2, retval)); + if (uap->which >= sizeof(msgcalls) / sizeof(msgcalls[0])) { + return EINVAL; + } + return (*msgcalls[uap->which])(p, &uap->a2, retval); } static void @@ -362,20 +371,23 @@ msg_freehdr(struct msg *msghdr) { while (msghdr->msg_ts > 0) { short next; - if (msghdr->msg_spot < 0 || msghdr->msg_spot >= msginfo.msgseg) + if (msghdr->msg_spot < 0 || msghdr->msg_spot >= msginfo.msgseg) { panic("msghdr->msg_spot out of range"); + } next = msgmaps[msghdr->msg_spot].next; msgmaps[msghdr->msg_spot].next = free_msgmaps; free_msgmaps = msghdr->msg_spot; nfree_msgmaps++; msghdr->msg_spot = next; - if (msghdr->msg_ts >= msginfo.msgssz) + if (msghdr->msg_ts >= msginfo.msgssz) { msghdr->msg_ts -= msginfo.msgssz; - else + } else { msghdr->msg_ts = 0; + } } - if (msghdr->msg_spot != -1) + if (msghdr->msg_spot != -1) { panic("msghdr->msg_spot != -1"); + } msghdr->msg_next = free_msghdrs; free_msghdrs = msghdr; #if CONFIG_MACF @@ -440,20 +452,21 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) } #if CONFIG_MACF eval = mac_sysvmsq_check_msqctl(kauth_cred_get(), msqptr, cmd); - if (eval) + if (eval) { goto msgctlout; + } #endif eval = 0; rval = 0; switch (cmd) { - case IPC_RMID: { struct msg *msghdr; - if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_M))) + if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_M))) { goto msgctlout; + } #if CONFIG_MACF /* * Check that the thread has MAC access permissions to @@ -467,8 +480,9 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) for (msghdr = msqptr->u.msg_first; msghdr != NULL; msghdr = msghdr->msg_next) { eval = mac_sysvmsq_check_msgrmid(kauth_cred_get(), msghdr); - if (eval) + if (eval) { goto msgctlout; + } } #endif /* Free the message headers */ @@ -484,12 +498,14 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) msg_freehdr(msghdr_tmp); } - if (msqptr->u.msg_cbytes != 0) + if (msqptr->u.msg_cbytes != 0) { panic("msg_cbytes is messed up"); - if (msqptr->u.msg_qnum != 0) + } + if (msqptr->u.msg_qnum != 0) { panic("msg_qnum is messed up"); + } - msqptr->u.msg_qbytes = 0; /* Mark it as free */ + msqptr->u.msg_qbytes = 0; /* Mark it as free */ #if CONFIG_MACF mac_sysvmsq_label_recycle(msqptr); #endif @@ -497,11 +513,12 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) wakeup((caddr_t)msqptr); } - break; + break; case IPC_SET: - if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_M))) + if ((eval = ipcperm(cred, &msqptr->u.msg_perm, IPC_M))) { goto msgctlout; + } SYSV_MSG_SUBSYS_UNLOCK(); @@ -517,15 +534,17 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) msqid_ds_user32tokernel(&tmpds, &msqbuf); } - if (eval) - return(eval); + if (eval) { + return eval; + } SYSV_MSG_SUBSYS_LOCK(); if (msqbuf.msg_qbytes > msqptr->u.msg_qbytes) { eval = suser(cred, &p->p_acflag); - if (eval) + if (eval) { goto msgctlout; + } } @@ -535,7 +554,7 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) printf("can't increase msg_qbytes beyond %d (truncating)\n", msginfo.msgmnb); #endif - msqbuf.msg_qbytes = msginfo.msgmnb; /* silently restrict qbytes to system limit */ + msqbuf.msg_qbytes = msginfo.msgmnb; /* silently restrict qbytes to system limit */ } if (msqbuf.msg_qbytes == 0) { #ifdef MSG_DEBUG_OK @@ -544,8 +563,8 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) eval = EINVAL; goto msgctlout; } - msqptr->u.msg_perm.uid = msqbuf.msg_perm.uid; /* change the owner */ - msqptr->u.msg_perm.gid = msqbuf.msg_perm.gid; /* change the owner */ + msqptr->u.msg_perm.uid = msqbuf.msg_perm.uid; /* change the owner */ + msqptr->u.msg_perm.gid = msqbuf.msg_perm.gid; /* change the owner */ msqptr->u.msg_perm.mode = (msqptr->u.msg_perm.mode & ~0777) | (msqbuf.msg_perm.mode & 0777); msqptr->u.msg_qbytes = msqbuf.msg_qbytes; @@ -581,11 +600,12 @@ msgctl(struct proc *p, struct msgctl_args *uap, int32_t *retval) goto msgctlout; } - if (eval == 0) + if (eval == 0) { *retval = rval; + } msgctlout: SYSV_MSG_SUBSYS_UNLOCK(); - return(eval); + return eval; } int @@ -612,8 +632,9 @@ msgget(__unused struct proc *p, struct msgget_args *uap, int32_t *retval) for (msqid = 0; msqid < msginfo.msgmni; msqid++) { msqptr = &msqids[msqid]; if (msqptr->u.msg_qbytes != 0 && - msqptr->u.msg_perm._key == key) + msqptr->u.msg_perm._key == key) { break; + } } if (msqid < msginfo.msgmni) { #ifdef MSG_DEBUG_OK @@ -635,8 +656,9 @@ msgget(__unused struct proc *p, struct msgget_args *uap, int32_t *retval) } #if CONFIG_MACF eval = mac_sysvmsq_check_msqget(cred, msqptr); - if (eval) + if (eval) { goto msggetout; + } #endif goto found; } @@ -655,8 +677,9 @@ msgget(__unused struct proc *p, struct msgget_args *uap, int32_t *retval) */ msqptr = &msqids[msqid]; if (msqptr->u.msg_qbytes == 0 && - (msqptr->u.msg_perm.mode & MSG_LOCKED) == 0) + (msqptr->u.msg_perm.mode & MSG_LOCKED) == 0) { break; + } } if (msqid == msginfo.msgmni) { #ifdef MSG_DEBUG_OK @@ -704,7 +727,7 @@ found: eval = 0; msggetout: SYSV_MSG_SUBSYS_UNLOCK(); - return(eval); + return eval; } @@ -712,7 +735,7 @@ int msgsnd(struct proc *p, struct msgsnd_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(msgsnd_nocancel(p, (struct msgsnd_nocancel_args *)uap, retval)); + return msgsnd_nocancel(p, (struct msgsnd_nocancel_args *)uap, retval); } int @@ -720,7 +743,7 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva { int msqid = uap->msqid; user_addr_t user_msgp = uap->msgp; - size_t msgsz = (size_t)uap->msgsz; /* limit to 4G */ + size_t msgsz = (size_t)uap->msgsz; /* limit to 4G */ int msgflg = uap->msgflg; int segs_needed, eval; struct msqid_kernel *msqptr; @@ -778,8 +801,9 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva #if CONFIG_MACF eval = mac_sysvmsq_check_msqsnd(kauth_cred_get(), msqptr); - if (eval) + if (eval) { goto msgsndout; + } #endif segs_needed = (msgsz + msginfo.msgssz - 1) / msginfo.msgssz; #ifdef MSG_DEBUG_OK @@ -869,7 +893,7 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva we_own_it = 0; } else { /* Force later arrivals to wait for our - request */ + * request */ #ifdef MSG_DEBUG_OK printf("we own the user_msqid_ds\n"); #endif @@ -884,8 +908,9 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva #ifdef MSG_DEBUG_OK printf("good morning, eval=%d\n", eval); #endif - if (we_own_it) + if (we_own_it) { msqptr->u.msg_perm.mode &= ~MSG_LOCKED; + } if (eval != 0) { #ifdef MSG_DEBUG_OK printf("msgsnd: interrupted system call\n"); @@ -904,9 +929,7 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva #endif eval = EIDRM; goto msgsndout; - } - } else { #ifdef MSG_DEBUG_OK printf("got all the resources that we need\n"); @@ -920,21 +943,26 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva * Make sure! */ - if (msqptr->u.msg_perm.mode & MSG_LOCKED) + if (msqptr->u.msg_perm.mode & MSG_LOCKED) { panic("msg_perm.mode & MSG_LOCKED"); - if (segs_needed > nfree_msgmaps) + } + if (segs_needed > nfree_msgmaps) { panic("segs_needed > nfree_msgmaps"); - if (msgsz + msqptr->u.msg_cbytes > msqptr->u.msg_qbytes) + } + if (msgsz + msqptr->u.msg_cbytes > msqptr->u.msg_qbytes) { panic("msgsz + msg_cbytes > msg_qbytes"); - if (free_msghdrs == NULL) + } + if (free_msghdrs == NULL) { panic("no more msghdrs"); + } /* * Re-lock the user_msqid_ds in case we page-fault when copying in * the message */ - if ((msqptr->u.msg_perm.mode & MSG_LOCKED) != 0) + if ((msqptr->u.msg_perm.mode & MSG_LOCKED) != 0) { panic("user_msqid_ds is already locked"); + } msqptr->u.msg_perm.mode |= MSG_LOCKED; /* @@ -953,15 +981,19 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva */ while (segs_needed > 0) { - if (nfree_msgmaps <= 0) + if (nfree_msgmaps <= 0) { panic("not enough msgmaps"); - if (free_msgmaps == -1) + } + if (free_msgmaps == -1) { panic("nil free_msgmaps"); + } next = free_msgmaps; - if (next <= -1) + if (next <= -1) { panic("next too low #1"); - if (next >= msginfo.msgseg) + } + if (next >= msginfo.msgseg) { panic("next out of range #1"); + } #ifdef MSG_DEBUG_OK printf("allocating segment %d to message\n", next); #endif @@ -980,15 +1012,15 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva SYSV_MSG_SUBSYS_UNLOCK(); eval = copyin(user_msgp, &msgtype, sizeof(msgtype)); SYSV_MSG_SUBSYS_LOCK(); - msghdr->msg_type = CAST_DOWN(long,msgtype); - user_msgp = user_msgp + sizeof(msgtype); /* ptr math */ + msghdr->msg_type = CAST_DOWN(long, msgtype); + user_msgp = user_msgp + sizeof(msgtype); /* ptr math */ } else { SYSV_MSG_SUBSYS_UNLOCK(); int32_t msg_type32; eval = copyin(user_msgp, &msg_type32, sizeof(msg_type32)); msghdr->msg_type = msg_type32; SYSV_MSG_SUBSYS_LOCK(); - user_msgp = user_msgp + sizeof(msg_type32); /* ptr math */ + user_msgp = user_msgp + sizeof(msg_type32); /* ptr math */ } if (eval != 0) { @@ -1023,14 +1055,17 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva while (msgsz > 0) { size_t tlen; /* compare input (size_t) value against restrict (int) value */ - if (msgsz > (size_t)msginfo.msgssz) + if (msgsz > (size_t)msginfo.msgssz) { tlen = msginfo.msgssz; - else + } else { tlen = msgsz; - if (next <= -1) + } + if (next <= -1) { panic("next too low #2"); - if (next >= msginfo.msgseg) + } + if (next >= msginfo.msgseg) { panic("next out of range #2"); + } SYSV_MSG_SUBSYS_UNLOCK(); eval = copyin(user_msgp, &msgpool[next * msginfo.msgssz], tlen); @@ -1047,11 +1082,12 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva goto msgsndout; } msgsz -= tlen; - user_msgp = user_msgp + tlen; /* ptr math */ + user_msgp = user_msgp + tlen; /* ptr math */ next = msgmaps[next].next; } - if (next != -1) + if (next != -1) { panic("didn't use all the msg segments"); + } /* * We've got the message. Unlock the user_msqid_ds. @@ -1119,7 +1155,7 @@ msgsnd_nocancel(struct proc *p, struct msgsnd_nocancel_args *uap, int32_t *retva msgsndout: SYSV_MSG_SUBSYS_UNLOCK(); - return(eval); + return eval; } @@ -1127,7 +1163,7 @@ int msgrcv(struct proc *p, struct msgrcv_args *uap, user_ssize_t *retval) { __pthread_testcancel(1); - return(msgrcv_nocancel(p, (struct msgrcv_nocancel_args *)uap, retval)); + return msgrcv_nocancel(p, (struct msgrcv_nocancel_args *)uap, retval); } int @@ -1135,8 +1171,8 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * { int msqid = uap->msqid; user_addr_t user_msgp = uap->msgp; - size_t msgsz = (size_t)uap->msgsz; /* limit to 4G */ - long msgtyp = (long)uap->msgtyp; /* limit to 32 bits */ + size_t msgsz = (size_t)uap->msgsz; /* limit to 4G */ + long msgtyp = (long)uap->msgtyp; /* limit to 32 bits */ int msgflg = uap->msgflg; size_t len; struct msqid_kernel *msqptr; @@ -1195,8 +1231,9 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * #if CONFIG_MACF eval = mac_sysvmsq_check_msqrcv(kauth_cred_get(), msqptr); - if (eval) + if (eval) { goto msgrcvout; + } #endif msghdr = NULL; while (msghdr == NULL) { @@ -1215,16 +1252,18 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * #if CONFIG_MACF eval = mac_sysvmsq_check_msgrcv(kauth_cred_get(), msghdr); - if (eval) + if (eval) { goto msgrcvout; + } #endif if (msqptr->u.msg_first == msqptr->u.msg_last) { msqptr->u.msg_first = NULL; msqptr->u.msg_last = NULL; } else { msqptr->u.msg_first = msghdr->msg_next; - if (msqptr->u.msg_first == NULL) + if (msqptr->u.msg_first == NULL) { panic("msg_first/last messed up #1"); + } } } } else { @@ -1260,24 +1299,27 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * } #if CONFIG_MACF eval = mac_sysvmsq_check_msgrcv( - kauth_cred_get(), msghdr); - if (eval) + kauth_cred_get(), msghdr); + if (eval) { goto msgrcvout; + } #endif *prev = msghdr->msg_next; if (msghdr == msqptr->u.msg_last) { if (previous == NULL) { if (prev != - &msqptr->u.msg_first) + &msqptr->u.msg_first) { panic("msg_first/last messed up #2"); + } msqptr->u.msg_first = NULL; msqptr->u.msg_last = NULL; } else { if (prev == - &msqptr->u.msg_first) + &msqptr->u.msg_first) { panic("msg_first/last messed up #3"); + } msqptr->u.msg_last = previous; } @@ -1295,8 +1337,9 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * * If there is one then bail out of this loop. */ - if (msghdr != NULL) + if (msghdr != NULL) { break; + } /* * Hmph! No message found. Does the user want to wait? @@ -1379,8 +1422,9 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * printf("found a message, msgsz=%ld, msg_ts=%d\n", msgsz, msghdr->msg_ts); #endif - if (msgsz > msghdr->msg_ts) + if (msgsz > msghdr->msg_ts) { msgsz = msghdr->msg_ts; + } /* * Return the type to the user. @@ -1395,13 +1439,13 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * SYSV_MSG_SUBSYS_UNLOCK(); eval = copyout(&msgtype, user_msgp, sizeof(msgtype)); SYSV_MSG_SUBSYS_LOCK(); - user_msgp = user_msgp + sizeof(msgtype); /* ptr math */ + user_msgp = user_msgp + sizeof(msgtype); /* ptr math */ } else { msg_type32 = msghdr->msg_type; SYSV_MSG_SUBSYS_UNLOCK(); eval = copyout(&msg_type32, user_msgp, sizeof(msg_type32)); SYSV_MSG_SUBSYS_LOCK(); - user_msgp = user_msgp + sizeof(msg_type32); /* ptr math */ + user_msgp = user_msgp + sizeof(msg_type32); /* ptr math */ } if (eval != 0) { @@ -1424,14 +1468,17 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * size_t tlen; /* compare input (size_t) value against restrict (int) value */ - if (msgsz > (size_t)msginfo.msgssz) + if (msgsz > (size_t)msginfo.msgssz) { tlen = msginfo.msgssz; - else + } else { tlen = msgsz; - if (next <= -1) + } + if (next <= -1) { panic("next too low #3"); - if (next >= msginfo.msgseg) + } + if (next >= msginfo.msgseg) { panic("next out of range #3"); + } SYSV_MSG_SUBSYS_UNLOCK(); eval = copyout(&msgpool[next * msginfo.msgssz], user_msgp, tlen); @@ -1445,7 +1492,7 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * wakeup((caddr_t)msqptr); goto msgrcvout; } - user_msgp = user_msgp + tlen; /* ptr math */ + user_msgp = user_msgp + tlen; /* ptr math */ next = msgmaps[next].next; } @@ -1459,12 +1506,12 @@ msgrcv_nocancel(struct proc *p, struct msgrcv_nocancel_args *uap, user_ssize_t * eval = 0; msgrcvout: SYSV_MSG_SUBSYS_UNLOCK(); - return(eval); + return eval; } static int IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int error; int cursor; @@ -1472,8 +1519,8 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, struct user32_IPCS_command u32; struct user_IPCS_command u64; } ipcs; - struct user32_msqid_ds msqid_ds32 = {}; /* post conversion, 32 bit version */ - struct user64_msqid_ds msqid_ds64 = {}; /* post conversion, 64 bit version */ + struct user32_msqid_ds msqid_ds32 = {}; /* post conversion, 32 bit version */ + struct user64_msqid_ds msqid_ds64 = {}; /* post conversion, 64 bit version */ void *msqid_dsp; size_t ipcs_sz; size_t msqid_ds_sz; @@ -1489,26 +1536,27 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, /* Copy in the command structure */ if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) { - return(error); + return error; } - if (!IS_64BIT_PROCESS(p)) /* convert in place */ + if (!IS_64BIT_PROCESS(p)) { /* convert in place */ ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data); + } /* Let us version this interface... */ if (ipcs.u64.ipcs_magic != IPCS_MAGIC) { - return(EINVAL); + return EINVAL; } SYSV_MSG_SUBSYS_LOCK(); - switch(ipcs.u64.ipcs_op) { - case IPCS_MSG_CONF: /* Obtain global configuration data */ + switch (ipcs.u64.ipcs_op) { + case IPCS_MSG_CONF: /* Obtain global configuration data */ if (ipcs.u64.ipcs_datalen != sizeof(struct msginfo)) { error = ERANGE; break; } - if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */ + if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */ error = EINVAL; break; } @@ -1517,7 +1565,7 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, SYSV_MSG_SUBSYS_LOCK(); break; - case IPCS_MSG_ITER: /* Iterate over existing segments */ + case IPCS_MSG_ITER: /* Iterate over existing segments */ /* Not done up top so we can set limits via sysctl (later) */ if (!msginit(0)) { error = ENOMEM; @@ -1533,9 +1581,10 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, error = EINVAL; break; } - for( ; cursor < msginfo.msgmni; cursor++) { - if (msqids[cursor].u.msg_qbytes != 0) /* allocated */ + for (; cursor < msginfo.msgmni; cursor++) { + if (msqids[cursor].u.msg_qbytes != 0) { /* allocated */ break; + } continue; } if (cursor == msginfo.msgmni) { @@ -1543,7 +1592,7 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, break; } - msqid_dsp = &msqids[cursor]; /* default: 64 bit */ + msqid_dsp = &msqids[cursor]; /* default: 64 bit */ /* * If necessary, convert the 64 bit kernel segment @@ -1563,8 +1612,9 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, /* update cursor */ ipcs.u64.ipcs_cursor = cursor + 1; - if (!IS_64BIT_PROCESS(p)) /* convert in place */ - ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data); + if (!IS_64BIT_PROCESS(p)) { /* convert in place */ + ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data); + } error = SYSCTL_OUT(req, &ipcs, ipcs_sz); } SYSV_MSG_SUBSYS_LOCK(); @@ -1576,13 +1626,13 @@ IPCS_msg_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, } SYSV_MSG_SUBSYS_UNLOCK(); - return(error); + return error; } SYSCTL_DECL(_kern_sysv_ipcs); SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, msg, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - 0, 0, IPCS_msg_sysctl, - "S,IPCS_msg_command", - "ipcs msg command interface"); + 0, 0, IPCS_msg_sysctl, + "S,IPCS_msg_command", + "ipcs msg command interface"); #endif /* SYSV_MSG */ diff --git a/bsd/kern/sysv_sem.c b/bsd/kern/sysv_sem.c index ecfafb55f..795fd6d02 100644 --- a/bsd/kern/sysv_sem.c +++ b/bsd/kern/sysv_sem.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -73,12 +73,12 @@ /* Uncomment this line to see MAC debugging output. */ /* #define MAC_DEBUG */ #if CONFIG_MACF_DEBUG -#define MPRINTF(a) printf(a) +#define MPRINTF(a) printf(a) #else -#define MPRINTF(a) +#define MPRINTF(a) #endif -#define M_SYSVSEM M_TEMP +#define M_SYSVSEM M_TEMP /* Hard system limits to avoid resource starvation / DOS attacks. @@ -102,22 +102,22 @@ static struct seminfo limitseminfo = { * and not allocate the memory for them up front. */ struct seminfo seminfo = { - SEMMAP, /* Unused, # of entries in semaphore map */ - 0, /* # of semaphore identifiers */ - 0, /* # of semaphores in system */ - 0, /* # of undo entries in system */ - SEMMSL, /* max # of semaphores per id */ - SEMOPM, /* max # of operations per semop call */ - SEMUME, /* max # of undo entries per process */ - SEMUSZ, /* size in bytes of undo structure */ - SEMVMX, /* semaphore maximum value */ - SEMAEM /* adjust on exit max value */ + SEMMAP, /* Unused, # of entries in semaphore map */ + 0, /* # of semaphore identifiers */ + 0, /* # of semaphores in system */ + 0, /* # of undo entries in system */ + SEMMSL, /* max # of semaphores per id */ + SEMOPM, /* max # of operations per semop call */ + SEMUME, /* max # of undo entries per process */ + SEMUSZ, /* size in bytes of undo structure */ + SEMVMX, /* semaphore maximum value */ + SEMAEM /* adjust on exit max value */ }; static int semu_alloc(struct proc *p); -static int semundo_adjust(struct proc *p, int *supidx, - int semid, int semnum, int adjval); +static int semundo_adjust(struct proc *p, int *supidx, + int semid, int semnum, int adjval); static void semundo_clear(int semid, int semnum); /* XXX casting to (sy_call_t *) is bogus, as usual. */ @@ -126,11 +126,11 @@ static sy_call_t *semcalls[] = { (sy_call_t *)semop }; -static int semtot = 0; /* # of used semaphores */ -struct semid_kernel *sema = NULL; /* semaphore id pool */ -struct sem *sem_pool = NULL; /* semaphore pool */ -static int semu_list_idx = -1; /* active undo structures */ -struct sem_undo *semu = NULL; /* semaphore undo pool */ +static int semtot = 0; /* # of used semaphores */ +struct semid_kernel *sema = NULL; /* semaphore id pool */ +struct sem *sem_pool = NULL; /* semaphore pool */ +static int semu_list_idx = -1; /* active undo structures */ +struct sem_undo *semu = NULL; /* semaphore undo pool */ void sysv_sem_lock_init(void); @@ -146,21 +146,20 @@ static lck_mtx_t sysv_sem_subsys_mutex; __private_extern__ void sysv_sem_lock_init( void ) { + sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - sysv_sem_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); + sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_sem_subsys_lock", sysv_sem_subsys_lck_grp_attr); - sysv_sem_subsys_lck_grp = lck_grp_alloc_init("sysv_sem_subsys_lock", sysv_sem_subsys_lck_grp_attr); - - sysv_sem_subsys_lck_attr = lck_attr_alloc_init(); - lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr); + sysv_sem_subsys_lck_attr = lck_attr_alloc_init(); + lck_mtx_init(&sysv_sem_subsys_mutex, sysv_sem_subsys_lck_grp, sysv_sem_subsys_lck_attr); } static __inline__ user_time_t sysv_semtime(void) { - struct timeval tv; + struct timeval tv; microtime(&tv); - return (tv.tv_sec); + return tv.tv_sec; } /* @@ -176,20 +175,20 @@ static void semid_ds_kernelto32(struct user_semid_ds *in, struct user32_semid_ds *out) { out->sem_perm = in->sem_perm; - out->sem_base = CAST_DOWN_EXPLICIT(__int32_t,in->sem_base); + out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base); out->sem_nsems = in->sem_nsems; - out->sem_otime = in->sem_otime; /* XXX loses precision */ - out->sem_ctime = in->sem_ctime; /* XXX loses precision */ + out->sem_otime = in->sem_otime; /* XXX loses precision */ + out->sem_ctime = in->sem_ctime; /* XXX loses precision */ } static void semid_ds_kernelto64(struct user_semid_ds *in, struct user64_semid_ds *out) { out->sem_perm = in->sem_perm; - out->sem_base = CAST_DOWN_EXPLICIT(__int32_t,in->sem_base); + out->sem_base = CAST_DOWN_EXPLICIT(__int32_t, in->sem_base); out->sem_nsems = in->sem_nsems; - out->sem_otime = in->sem_otime; /* XXX loses precision */ - out->sem_ctime = in->sem_ctime; /* XXX loses precision */ + out->sem_otime = in->sem_otime; /* XXX loses precision */ + out->sem_ctime = in->sem_ctime; /* XXX loses precision */ } /* @@ -229,31 +228,31 @@ semid_ds_64tokernel(struct user64_semid_ds *in, struct user_semid_ds *out) * Entry point for all SEM calls: semctl, semget, semop * * Parameters: p Process requesting the call - * uap User argument descriptor (see below) - * retval Return value of the selected sem call + * uap User argument descriptor (see below) + * retval Return value of the selected sem call * * Indirect parameters: uap->which sem call to invoke (index in array of sem calls) - * uap->a2 User argument descriptor - * + * uap->a2 User argument descriptor + * * Returns: 0 Success * !0 Not success * * Implicit returns: retval Return value of the selected sem call * * DEPRECATED: This interface should not be used to call the other SEM - * functions (semctl, semget, semop). The correct usage is - * to call the other SEM functions directly. + * functions (semctl, semget, semop). The correct usage is + * to call the other SEM functions directly. * */ int semsys(struct proc *p, struct semsys_args *uap, int32_t *retval) { - /* The individual calls handling the locking now */ - if (uap->which >= sizeof(semcalls)/sizeof(semcalls[0])) - return (EINVAL); - return ((*semcalls[uap->which])(p, &uap->a2, retval)); + if (uap->which >= sizeof(semcalls) / sizeof(semcalls[0])) { + return EINVAL; + } + return (*semcalls[uap->which])(p, &uap->a2, retval); } /* @@ -268,35 +267,33 @@ grow_semu_array(int newSize) int i; struct sem_undo *newSemu; - if (newSize <= seminfo.semmnu) + if (newSize <= seminfo.semmnu) { return 1; - if (newSize > limitseminfo.semmnu) /* enforce hard limit */ - { + } + if (newSize > limitseminfo.semmnu) { /* enforce hard limit */ #ifdef SEM_DEBUG printf("undo structure hard limit of %d reached, requested %d\n", - limitseminfo.semmnu, newSize); + limitseminfo.semmnu, newSize); #endif return 0; } - newSize = (newSize/SEMMNU_INC + 1) * SEMMNU_INC; + newSize = (newSize / SEMMNU_INC + 1) * SEMMNU_INC; newSize = newSize > limitseminfo.semmnu ? limitseminfo.semmnu : newSize; #ifdef SEM_DEBUG printf("growing semu[] from %d to %d\n", seminfo.semmnu, newSize); #endif - MALLOC(newSemu, struct sem_undo *, sizeof (struct sem_undo) * newSize, - M_SYSVSEM, M_WAITOK | M_ZERO); - if (NULL == newSemu) - { + MALLOC(newSemu, struct sem_undo *, sizeof(struct sem_undo) * newSize, + M_SYSVSEM, M_WAITOK | M_ZERO); + if (NULL == newSemu) { #ifdef SEM_DEBUG printf("allocation failed. no changes made.\n"); #endif return 0; } - /* copy the old data to the new array */ - for (i = 0; i < seminfo.semmnu; i++) - { + /* copy the old data to the new array */ + for (i = 0; i < seminfo.semmnu; i++) { newSemu[i] = semu[i]; } /* @@ -306,8 +303,9 @@ grow_semu_array(int newSize) */ /* Clean up the old array */ - if (semu) + if (semu) { FREE(semu, M_SYSVSEM); + } semu = newSemu; seminfo.semmnu = newSize; @@ -329,27 +327,26 @@ grow_sema_array(int newSize) struct semid_kernel *newSema; int i; - if (newSize <= seminfo.semmni) + if (newSize <= seminfo.semmni) { return 0; - if (newSize > limitseminfo.semmni) /* enforce hard limit */ - { + } + if (newSize > limitseminfo.semmni) { /* enforce hard limit */ #ifdef SEM_DEBUG printf("identifier hard limit of %d reached, requested %d\n", - limitseminfo.semmni, newSize); + limitseminfo.semmni, newSize); #endif return 0; } - newSize = (newSize/SEMMNI_INC + 1) * SEMMNI_INC; + newSize = (newSize / SEMMNI_INC + 1) * SEMMNI_INC; newSize = newSize > limitseminfo.semmni ? limitseminfo.semmni : newSize; #ifdef SEM_DEBUG printf("growing sema[] from %d to %d\n", seminfo.semmni, newSize); #endif MALLOC(newSema, struct semid_kernel *, - sizeof (struct semid_kernel) * newSize, - M_SYSVSEM, M_WAITOK | M_ZERO); - if (NULL == newSema) - { + sizeof(struct semid_kernel) * newSize, + M_SYSVSEM, M_WAITOK | M_ZERO); + if (NULL == newSema) { #ifdef SEM_DEBUG printf("allocation failed. no changes made.\n"); #endif @@ -357,8 +354,7 @@ grow_sema_array(int newSize) } /* copy over the old ids */ - for (i = 0; i < seminfo.semmni; i++) - { + for (i = 0; i < seminfo.semmni; i++) { newSema[i] = sema[i]; /* This is a hack. What we really want to be able to * do is change the value a process is waiting on @@ -368,17 +364,17 @@ grow_sema_array(int newSize) * semaphore set is really not available yet, and then * sleep on the correct, reallocated semid_kernel pointer. */ - if (sema[i].u.sem_perm.mode & SEM_ALLOC) + if (sema[i].u.sem_perm.mode & SEM_ALLOC) { wakeup((caddr_t)&sema[i]); + } } #if CONFIG_MACF - for (i = seminfo.semmni; i < newSize; i++) - { + for (i = seminfo.semmni; i < newSize; i++) { mac_sysvsem_label_init(&newSema[i]); } #endif - + /* * The new elements (from newSema[i] to newSema[newSize-1]) have their * "sem_base" and "sem_perm.mode" set to 0 (i.e. NULL) by the M_ZERO @@ -386,8 +382,9 @@ grow_sema_array(int newSize) */ /* Clean up the old array */ - if (sema) + if (sema) { FREE(sema, M_SYSVSEM); + } sema = newSema; seminfo.semmni = newSize; @@ -410,25 +407,26 @@ grow_sem_pool(int new_pool_size) struct sem *sem_free; int i; - if (new_pool_size < semtot) + if (new_pool_size < semtot) { return 0; + } /* enforce hard limit */ if (new_pool_size > limitseminfo.semmns) { #ifdef SEM_DEBUG printf("semaphore hard limit of %d reached, requested %d\n", - limitseminfo.semmns, new_pool_size); + limitseminfo.semmns, new_pool_size); #endif return 0; } - new_pool_size = (new_pool_size/SEMMNS_INC + 1) * SEMMNS_INC; + new_pool_size = (new_pool_size / SEMMNS_INC + 1) * SEMMNS_INC; new_pool_size = new_pool_size > limitseminfo.semmns ? limitseminfo.semmns : new_pool_size; #ifdef SEM_DEBUG printf("growing sem_pool array from %d to %d\n", seminfo.semmns, new_pool_size); #endif - MALLOC(new_sem_pool, struct sem *, sizeof (struct sem) * new_pool_size, - M_SYSVSEM, M_WAITOK | M_ZERO | M_NULL); + MALLOC(new_sem_pool, struct sem *, sizeof(struct sem) * new_pool_size, + M_SYSVSEM, M_WAITOK | M_ZERO | M_NULL); if (NULL == new_sem_pool) { #ifdef SEM_DEBUG printf("allocation failed. no changes made.\n"); @@ -437,23 +435,27 @@ grow_sem_pool(int new_pool_size) } /* We have our new memory, now copy the old contents over */ - if (sem_pool) - for(i = 0; i < seminfo.semmns; i++) + if (sem_pool) { + for (i = 0; i < seminfo.semmns; i++) { new_sem_pool[i] = sem_pool[i]; + } + } /* Update our id structures to point to the new semaphores */ - for(i = 0; i < seminfo.semmni; i++) { - if (sema[i].u.sem_perm.mode & SEM_ALLOC) /* ID in use */ - sema[i].u.sem_base = new_sem_pool + - (sema[i].u.sem_base - sem_pool); + for (i = 0; i < seminfo.semmni; i++) { + if (sema[i].u.sem_perm.mode & SEM_ALLOC) { /* ID in use */ + sema[i].u.sem_base = new_sem_pool + + (sema[i].u.sem_base - sem_pool); + } } sem_free = sem_pool; sem_pool = new_sem_pool; /* clean up the old array */ - if (sem_free != NULL) + if (sem_free != NULL) { FREE(sem_free, M_SYSVSEM); + } seminfo.semmns = new_pool_size; #ifdef SEM_DEBUG @@ -513,12 +515,13 @@ semu_alloc(struct proc *p) supidx = &semu_list_idx; while (*supidx != -1) { suptr = SEMU(*supidx); - if (suptr->un_cnt == 0) { + if (suptr->un_cnt == 0) { suptr->un_proc = NULL; *supidx = suptr->un_next_idx; did_something = 1; - } else + } else { supidx = &(suptr->un_next_idx); + } } /* If we didn't free anything. Try expanding @@ -526,9 +529,11 @@ semu_alloc(struct proc *p) * then fail. We expand last to get the * most reuse out of existing resources. */ - if (!did_something) - if (!grow_semu_array(seminfo.semmnu + 1)) + if (!did_something) { + if (!grow_semu_array(seminfo.semmnu + 1)) { return -1; + } + } } else { /* * The second pass failed even though we freed @@ -548,7 +553,7 @@ semu_alloc(struct proc *p) */ static int semundo_adjust(struct proc *p, int *supidx, int semid, - int semnum, int adjval) + int semnum, int adjval) { struct sem_undo *suptr; int suidx; @@ -570,11 +575,13 @@ semundo_adjust(struct proc *p, int *supidx, int semid, } } if (suidx == -1) { - if (adjval == 0) - return(0); + if (adjval == 0) { + return 0; + } suidx = semu_alloc(p); - if (suidx == -1) - return(ENOSPC); + if (suidx == -1) { + return ENOSPC; + } *supidx = suidx; } } @@ -586,14 +593,16 @@ semundo_adjust(struct proc *p, int *supidx, int semid, suptr = SEMU(suidx); new_sueptr = NULL; for (i = 0, suepptr = &suptr->un_ent, sueptr = suptr->un_ent; - i < suptr->un_cnt; - i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) { - if (sueptr->une_id != semid || sueptr->une_num != semnum) + i < suptr->un_cnt; + i++, suepptr = &sueptr->une_next, sueptr = sueptr->une_next) { + if (sueptr->une_id != semid || sueptr->une_num != semnum) { continue; - if (adjval == 0) + } + if (adjval == 0) { sueptr->une_adjval = 0; - else + } else { sueptr->une_adjval += adjval; + } if (sueptr->une_adjval == 0) { suptr->un_cnt--; *suepptr = sueptr->une_next; @@ -615,8 +624,8 @@ semundo_adjust(struct proc *p, int *supidx, int semid, } /* allocate a new semaphore undo entry */ - MALLOC(new_sueptr, struct undo *, sizeof (struct undo), - M_SYSVSEM, M_WAITOK); + MALLOC(new_sueptr, struct undo *, sizeof(struct undo), + M_SYSVSEM, M_WAITOK); if (new_sueptr == NULL) { return ENOMEM; } @@ -657,8 +666,9 @@ semundo_clear(int semid, int semnum) sueptr = *suepptr; continue; } - if (semnum != -1) + if (semnum != -1) { break; + } } i++; suepptr = &sueptr->une_next; @@ -685,7 +695,7 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) int i, rval, eval; struct user_semid_ds sbuf; struct semid_kernel *semakptr; - + AUDIT_ARG(svipc_cmd, cmd); AUDIT_ARG(svipc_id, semid); @@ -714,8 +724,9 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) } #if CONFIG_MACF eval = mac_sysvsem_check_semctl(cred, semakptr, cmd); - if (eval) + if (eval) { goto semctlout; + } #endif eval = 0; @@ -723,18 +734,21 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) switch (cmd) { case IPC_RMID: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) { goto semctlout; + } semakptr->u.sem_perm.cuid = kauth_cred_getuid(cred); semakptr->u.sem_perm.uid = kauth_cred_getuid(cred); semtot -= semakptr->u.sem_nsems; - for (i = semakptr->u.sem_base - sem_pool; i < semtot; i++) + for (i = semakptr->u.sem_base - sem_pool; i < semtot; i++) { sem_pool[i] = sem_pool[i + semakptr->u.sem_nsems]; + } for (i = 0; i < seminfo.semmni; i++) { if ((sema[i].u.sem_perm.mode & SEM_ALLOC) && - sema[i].u.sem_base > semakptr->u.sem_base) + sema[i].u.sem_base > semakptr->u.sem_base) { sema[i].u.sem_base -= semakptr->u.sem_nsems; + } } semakptr->u.sem_perm.mode = 0; #if CONFIG_MACF @@ -745,8 +759,9 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case IPC_SET: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_M))) { + goto semctlout; + } if (IS_64BIT_PROCESS(p)) { struct user64_semid_ds ds64; @@ -757,7 +772,7 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) eval = copyin(user_arg.buf, &ds32, sizeof(ds32)); semid_ds_32tokernel(&ds32, &sbuf); } - + if (eval != 0) { goto semctlout; } @@ -770,8 +785,9 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case IPC_STAT: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) { + goto semctlout; + } if (IS_64BIT_PROCESS(p)) { struct user64_semid_ds semid_ds64; @@ -787,8 +803,9 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case GETNCNT: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) { + goto semctlout; + } if (semnum < 0 || semnum >= semakptr->u.sem_nsems) { eval = EINVAL; goto semctlout; @@ -797,8 +814,9 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case GETPID: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) { + goto semctlout; + } if (semnum < 0 || semnum >= semakptr->u.sem_nsems) { eval = EINVAL; goto semctlout; @@ -807,8 +825,9 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case GETVAL: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) { + goto semctlout; + } if (semnum < 0 || semnum >= semakptr->u.sem_nsems) { eval = EINVAL; goto semctlout; @@ -817,22 +836,25 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case GETALL: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) { + goto semctlout; + } /* XXXXXXXXXXXXXXXX TBD XXXXXXXXXXXXXXXX */ for (i = 0; i < semakptr->u.sem_nsems; i++) { /* XXX could be done in one go... */ eval = copyout((caddr_t)&semakptr->u.sem_base[i].semval, user_arg.array + (i * sizeof(unsigned short)), sizeof(unsigned short)); - if (eval != 0) + if (eval != 0) { break; + } } break; case GETZCNT: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_R))) { + goto semctlout; + } if (semnum < 0 || semnum >= semakptr->u.sem_nsems) { eval = EINVAL; goto semctlout; @@ -841,35 +863,32 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case SETVAL: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) - { + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) { #ifdef SEM_DEBUG printf("Invalid credentials for write\n"); #endif - goto semctlout; + goto semctlout; } - if (semnum < 0 || semnum >= semakptr->u.sem_nsems) - { + if (semnum < 0 || semnum >= semakptr->u.sem_nsems) { #ifdef SEM_DEBUG printf("Invalid number out of range for set\n"); #endif eval = EINVAL; goto semctlout; } - + /* * Cast down a pointer instead of using 'val' member directly * to avoid introducing endieness and a pad field into the * header file. Ugly, but it works. */ u_int newsemval = CAST_DOWN_EXPLICIT(u_int, user_arg.buf); - + /* - * The check is being performed as unsigned values to match + * The check is being performed as unsigned values to match * eventual destination - */ - if (newsemval > (u_int)seminfo.semvmx) - { + */ + if (newsemval > (u_int)seminfo.semvmx) { #ifdef SEM_DEBUG printf("Out of range sem value for set\n"); #endif @@ -884,16 +903,18 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; case SETALL: - if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) - goto semctlout; + if ((eval = ipcperm(cred, &semakptr->u.sem_perm, IPC_W))) { + goto semctlout; + } /*** XXXXXXXXXXXX TBD ********/ for (i = 0; i < semakptr->u.sem_nsems; i++) { /* XXX could be done in one go... */ eval = copyin(user_arg.array + (i * sizeof(unsigned short)), (caddr_t)&semakptr->u.sem_base[i].semval, sizeof(unsigned short)); - if (eval != 0) + if (eval != 0) { break; + } semakptr->u.sem_base[i].sempid = p->p_pid; } /* XXX scottl Should there be a MAC call here? */ @@ -902,15 +923,16 @@ semctl(struct proc *p, struct semctl_args *uap, int32_t *retval) break; default: - eval = EINVAL; - goto semctlout; + eval = EINVAL; + goto semctlout; } - if (eval == 0) + if (eval == 0) { *retval = rval; + } semctlout: SYSV_SEM_SUBSYS_UNLOCK(); - return(eval); + return eval; } int @@ -923,29 +945,32 @@ semget(__unused struct proc *p, struct semget_args *uap, int32_t *retval) kauth_cred_t cred = kauth_cred_get(); #ifdef SEM_DEBUG - if (key != IPC_PRIVATE) + if (key != IPC_PRIVATE) { printf("semget(0x%x, %d, 0%o)\n", key, nsems, semflg); - else + } else { printf("semget(IPC_PRIVATE, %d, 0%o)\n", nsems, semflg); + } #endif SYSV_SEM_SUBSYS_LOCK(); - + if (key != IPC_PRIVATE) { for (semid = 0; semid < seminfo.semmni; semid++) { if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) && - sema[semid].u.sem_perm._key == key) + sema[semid].u.sem_perm._key == key) { break; + } } if (semid < seminfo.semmni) { #ifdef SEM_DEBUG printf("found public key\n"); #endif if ((eval = ipcperm(cred, &sema[semid].u.sem_perm, - semflg & 0700))) + semflg & 0700))) { goto semgetout; + } if (nsems < 0 || sema[semid].u.sem_nsems < nsems) { #ifdef SEM_DEBUG printf("too small\n"); @@ -962,8 +987,9 @@ semget(__unused struct proc *p, struct semget_args *uap, int32_t *retval) } #if CONFIG_MACF eval = mac_sysvsem_check_semget(cred, &sema[semid]); - if (eval) + if (eval) { goto semgetout; + } #endif goto found; } @@ -995,15 +1021,15 @@ semget(__unused struct proc *p, struct semget_args *uap, int32_t *retval) } } for (semid = 0; semid < seminfo.semmni; semid++) { - if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0) + if ((sema[semid].u.sem_perm.mode & SEM_ALLOC) == 0) { break; + } } if (semid == seminfo.semmni) { #ifdef SEM_DEBUG printf("no more id's available\n"); #endif - if (!grow_sema_array(seminfo.semmni + 1)) - { + if (!grow_sema_array(seminfo.semmni + 1)) { #ifdef SEM_DEBUG printf("failed to grow sema array\n"); #endif @@ -1028,7 +1054,7 @@ semget(__unused struct proc *p, struct semget_args *uap, int32_t *retval) sema[semid].u.sem_base = &sem_pool[semtot]; semtot += nsems; bzero(sema[semid].u.sem_base, - sizeof(sema[semid].u.sem_base[0])*nsems); + sizeof(sema[semid].u.sem_base[0]) * nsems); #if CONFIG_MACF mac_sysvsem_label_associate(cred, &sema[semid]); #endif @@ -1054,7 +1080,7 @@ found: semgetout: SYSV_SEM_SUBSYS_UNLOCK(); - return(eval); + return eval; } int @@ -1064,8 +1090,8 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) int nsops = uap->nsops; struct sembuf sops[seminfo.semopm]; struct semid_kernel *semakptr; - struct sembuf *sopptr = NULL; /* protected by 'semptr' */ - struct sem *semptr = NULL; /* protected by 'if' */ + struct sembuf *sopptr = NULL; /* protected by 'semptr' */ + struct sem *semptr = NULL; /* protected by 'if' */ int supidx = -1; int i, j, eval; int do_wakeup, do_undos; @@ -1078,7 +1104,7 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) printf("call to semop(%d, 0x%x, %d)\n", semid, sops, nsops); #endif - semid = IPCID_TO_IX(semid); /* Convert back to zero origin */ + semid = IPCID_TO_IX(semid); /* Convert back to zero origin */ if (semid < 0 || semid >= seminfo.semmni) { eval = EINVAL; @@ -1110,7 +1136,7 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) eval = E2BIG; goto semopout; } - + /* OK for LP64, since sizeof(struct sembuf) is currently invariant */ if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) { #ifdef SEM_DEBUG @@ -1124,9 +1150,10 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) /* * Initial pass thru sops to see what permissions are needed. */ - j = 0; /* permission needed */ - for (i = 0; i < nsops; i++) + j = 0; /* permission needed */ + for (i = 0; i < nsops; i++) { j |= (sops[i].sem_op == 0) ? SEM_R : SEM_A; + } /* * The MAC hook checks whether the thread has read (and possibly @@ -1134,8 +1161,9 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) * sopptr->sem_op value. */ eval = mac_sysvsem_check_semop(kauth_cred_get(), semakptr, j); - if (eval) + if (eval) { goto semopout; + } #endif /* @@ -1178,11 +1206,13 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) } else { semptr->semval += sopptr->sem_op; if (semptr->semval == 0 && - semptr->semzcnt > 0) + semptr->semzcnt > 0) { do_wakeup = 1; + } } - if (sopptr->sem_flg & SEM_UNDO) + if (sopptr->sem_flg & SEM_UNDO) { do_undos = 1; + } } else if (sopptr->sem_op == 0) { if (semptr->semval > 0) { #ifdef SEM_DEBUG @@ -1191,29 +1221,33 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) break; } } else { - if (semptr->semncnt > 0) + if (semptr->semncnt > 0) { do_wakeup = 1; + } semptr->semval += sopptr->sem_op; - if (sopptr->sem_flg & SEM_UNDO) + if (sopptr->sem_flg & SEM_UNDO) { do_undos = 1; + } } } /* * Did we get through the entire vector? */ - if (i >= nsops) + if (i >= nsops) { goto done; + } /* * No ... rollback anything that we've already done */ #ifdef SEM_DEBUG - printf("semop: rollback 0 through %d\n", i-1); + printf("semop: rollback 0 through %d\n", i - 1); #endif - for (j = 0; j < i; j++) + for (j = 0; j < i; j++) { semakptr->u.sem_base[sops[j].sem_num].semval -= sops[j].sem_op; + } /* * If the request that we couldn't satisfy has the @@ -1224,10 +1258,11 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) goto semopout; } - if (sopptr->sem_op == 0) + if (sopptr->sem_op == 0) { semptr->semzcnt++; - else + } else { semptr->semncnt++; + } #ifdef SEM_DEBUG printf("semop: good night!\n"); @@ -1237,9 +1272,9 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) * waiting for. We will get the lock back after we * wake up. */ - eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex , (PZERO - 4) | PCATCH, + eval = msleep((caddr_t)semakptr, &sysv_sem_subsys_mutex, (PZERO - 4) | PCATCH, "semwait", 0); - + #ifdef SEM_DEBUG printf("semop: good morning (eval=%d)!\n", eval); #endif @@ -1250,7 +1285,7 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) /* * IMPORTANT: while we were asleep, the semaphore array might * have been reallocated somewhere else (see grow_sema_array()). - * When we wake up, we have to re-lookup the semaphore + * When we wake up, we have to re-lookup the semaphore * structures and re-validate them. */ @@ -1262,7 +1297,7 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) * XXX POSIX: Third test this 'if' and 'EINTR' precedence may * fail testing; if so, we will need to revert this code. */ - semakptr = &sema[semid]; /* sema may have been reallocated */ + semakptr = &sema[semid]; /* sema may have been reallocated */ if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0 || semakptr->u.sem_perm._seq != IPCID_TO_SEQ(uap->semid) || sopptr->sem_num >= semakptr->u.sem_nsems) { @@ -1278,7 +1313,7 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) #ifdef EIDRM eval = EIDRM; #else - eval = EINVAL; /* Ancient past */ + eval = EINVAL; /* Ancient past */ #endif } goto semopout; @@ -1291,10 +1326,11 @@ semop(struct proc *p, struct semop_args *uap, int32_t *retval) * we were sleeping, updating our sem_base pointer. */ semptr = &semakptr->u.sem_base[sopptr->sem_num]; - if (sopptr->sem_op == 0) + if (sopptr->sem_op == 0) { semptr->semzcnt--; - else + } else { semptr->semncnt--; + } if (eval != 0) { /* EINTR */ goto semopout; @@ -1313,15 +1349,18 @@ done: */ int adjval; - if ((sops[i].sem_flg & SEM_UNDO) == 0) + if ((sops[i].sem_flg & SEM_UNDO) == 0) { continue; + } adjval = sops[i].sem_op; - if (adjval == 0) + if (adjval == 0) { continue; + } eval = semundo_adjust(p, &supidx, semid, sops[i].sem_num, -adjval); - if (eval == 0) + if (eval == 0) { continue; + } /* * Oh-Oh! We ran out of either sem_undo's or undo's. @@ -1333,19 +1372,23 @@ done: * out of space as we roll things back out. */ for (j = i - 1; j >= 0; j--) { - if ((sops[j].sem_flg & SEM_UNDO) == 0) + if ((sops[j].sem_flg & SEM_UNDO) == 0) { continue; + } adjval = sops[j].sem_op; - if (adjval == 0) + if (adjval == 0) { continue; + } if (semundo_adjust(p, &supidx, semid, - sops[j].sem_num, adjval) != 0) + sops[j].sem_num, adjval) != 0) { panic("semop - can't undo undos"); + } } - for (j = 0; j < nsops; j++) + for (j = 0; j < nsops; j++) { semakptr->u.sem_base[sops[j].sem_num].semval -= sops[j].sem_op; + } #ifdef SEM_DEBUG printf("eval = %d from semundo_adjust\n", eval); @@ -1382,7 +1425,7 @@ done: eval = 0; semopout: SYSV_SEM_SUBSYS_UNLOCK(); - return(eval); + return eval; } /* @@ -1403,8 +1446,7 @@ semexit(struct proc *p) */ SYSV_SEM_SUBSYS_LOCK(); - if (!sem_pool) - { + if (!sem_pool) { SYSV_SEM_SUBSYS_UNLOCK(); return; } @@ -1418,12 +1460,14 @@ semexit(struct proc *p) for (supidx = &semu_list_idx; (suidx = *supidx) != -1; supidx = &suptr->un_next_idx) { suptr = SEMU(suidx); - if (suptr->un_proc == p) + if (suptr->un_proc == p) { break; + } } - if (suidx == -1) + if (suidx == -1) { goto unlock; + } #ifdef SEM_DEBUG printf("proc @%08x has undo structure with %d entries\n", p, @@ -1447,34 +1491,38 @@ semexit(struct proc *p) adjval = sueptr->une_adjval; semakptr = &sema[semid]; - if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) + if ((semakptr->u.sem_perm.mode & SEM_ALLOC) == 0) { panic("semexit - semid not allocated"); - if (semnum >= semakptr->u.sem_nsems) + } + if (semnum >= semakptr->u.sem_nsems) { panic("semexit - semnum out of range"); + } #ifdef SEM_DEBUG printf("semexit: %08x id=%d num=%d(adj=%d) ; sem=%d\n", - suptr->un_proc, - semid, - semnum, - adjval, - semakptr->u.sem_base[semnum].semval); + suptr->un_proc, + semid, + semnum, + adjval, + semakptr->u.sem_base[semnum].semval); #endif if (adjval < 0) { - if (semakptr->u.sem_base[semnum].semval < -adjval) + if (semakptr->u.sem_base[semnum].semval < -adjval) { semakptr->u.sem_base[semnum].semval = 0; - else + } else { semakptr->u.sem_base[semnum].semval += adjval; - } else + } + } else { semakptr->u.sem_base[semnum].semval += adjval; + } - /* Maybe we should build a list of semakptr's to wake - * up, finish all access to data structures, release the - * subsystem lock, and wake all the processes. Something - * to think about. - */ + /* Maybe we should build a list of semakptr's to wake + * up, finish all access to data structures, release the + * subsystem lock, and wake all the processes. Something + * to think about. + */ #ifdef SEM_WAKEUP sem_wakeup((caddr_t)semakptr); #else @@ -1501,79 +1549,80 @@ semexit(struct proc *p) unlock: /* - * There is a semaphore leak (i.e. memory leak) in this code. - * We should be deleting the IPC_PRIVATE semaphores when they are - * no longer needed, and we dont. We would have to track which processes - * know about which IPC_PRIVATE semaphores, updating the list after - * every fork. We can't just delete them semaphore when the process - * that created it dies, because that process may well have forked - * some children. So we need to wait until all of it's children have - * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore - * with the creating group ID, count the number of processes left in - * that group, and delete the semaphore when the group is gone. - * Until that code gets implemented we will leak IPC_PRIVATE semaphores. - * There is an upper bound on the size of our semaphore array, so - * leaking the semaphores should not work as a DOS attack. - * - * Please note that the original BSD code this file is based on had the - * same leaky semaphore problem. - */ + * There is a semaphore leak (i.e. memory leak) in this code. + * We should be deleting the IPC_PRIVATE semaphores when they are + * no longer needed, and we dont. We would have to track which processes + * know about which IPC_PRIVATE semaphores, updating the list after + * every fork. We can't just delete them semaphore when the process + * that created it dies, because that process may well have forked + * some children. So we need to wait until all of it's children have + * died, and so on. Maybe we should tag each IPC_PRIVATE sempahore + * with the creating group ID, count the number of processes left in + * that group, and delete the semaphore when the group is gone. + * Until that code gets implemented we will leak IPC_PRIVATE semaphores. + * There is an upper bound on the size of our semaphore array, so + * leaking the semaphores should not work as a DOS attack. + * + * Please note that the original BSD code this file is based on had the + * same leaky semaphore problem. + */ SYSV_SEM_SUBSYS_UNLOCK(); } /* (struct sysctl_oid *oidp, void *arg1, int arg2, \ - struct sysctl_req *req) */ + * struct sysctl_req *req) */ static int sysctl_seminfo(__unused struct sysctl_oid *oidp, void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int error = 0; error = SYSCTL_OUT(req, arg1, sizeof(int)); - if (error || req->newptr == USER_ADDR_NULL) - return(error); + if (error || req->newptr == USER_ADDR_NULL) { + return error; + } SYSV_SEM_SUBSYS_LOCK(); /* Set the values only if shared memory is not initialised */ - if ((sem_pool == NULL) && - (sema == NULL) && - (semu == NULL) && - (semu_list_idx == -1)) { - if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) { - goto out; - } - } else + if ((sem_pool == NULL) && + (sema == NULL) && + (semu == NULL) && + (semu_list_idx == -1)) { + if ((error = SYSCTL_IN(req, arg1, sizeof(int)))) { + goto out; + } + } else { error = EINVAL; + } out: SYSV_SEM_SUBSYS_UNLOCK(); - return(error); - + return error; } /* SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW, 0, "SYSV"); */ extern struct sysctl_oid_list sysctl__kern_sysv_children; SYSCTL_PROC(_kern_sysv, OID_AUTO, semmni, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &limitseminfo.semmni, 0, &sysctl_seminfo ,"I","semmni"); + &limitseminfo.semmni, 0, &sysctl_seminfo, "I", "semmni"); SYSCTL_PROC(_kern_sysv, OID_AUTO, semmns, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &limitseminfo.semmns, 0, &sysctl_seminfo ,"I","semmns"); + &limitseminfo.semmns, 0, &sysctl_seminfo, "I", "semmns"); SYSCTL_PROC(_kern_sysv, OID_AUTO, semmnu, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &limitseminfo.semmnu, 0, &sysctl_seminfo ,"I","semmnu"); + &limitseminfo.semmnu, 0, &sysctl_seminfo, "I", "semmnu"); SYSCTL_PROC(_kern_sysv, OID_AUTO, semmsl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &limitseminfo.semmsl, 0, &sysctl_seminfo ,"I","semmsl"); - + &limitseminfo.semmsl, 0, &sysctl_seminfo, "I", "semmsl"); + SYSCTL_PROC(_kern_sysv, OID_AUTO, semume, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &limitseminfo.semume, 0, &sysctl_seminfo ,"I","semume"); + &limitseminfo.semume, 0, &sysctl_seminfo, "I", "semume"); static int IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int error; int cursor; @@ -1581,8 +1630,8 @@ IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, struct user32_IPCS_command u32; struct user_IPCS_command u64; } ipcs; - struct user32_semid_ds semid_ds32; /* post conversion, 32 bit version */ - struct user64_semid_ds semid_ds64; /* post conversion, 64 bit version */ + struct user32_semid_ds semid_ds32; /* post conversion, 32 bit version */ + struct user64_semid_ds semid_ds64; /* post conversion, 64 bit version */ void *semid_dsp; size_t ipcs_sz; size_t semid_ds_sz; @@ -1598,44 +1647,46 @@ IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, /* Copy in the command structure */ if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) { - return(error); + return error; } - if (!IS_64BIT_PROCESS(p)) /* convert in place */ + if (!IS_64BIT_PROCESS(p)) { /* convert in place */ ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data); + } /* Let us version this interface... */ if (ipcs.u64.ipcs_magic != IPCS_MAGIC) { - return(EINVAL); + return EINVAL; } SYSV_SEM_SUBSYS_LOCK(); - switch(ipcs.u64.ipcs_op) { - case IPCS_SEM_CONF: /* Obtain global configuration data */ + switch (ipcs.u64.ipcs_op) { + case IPCS_SEM_CONF: /* Obtain global configuration data */ if (ipcs.u64.ipcs_datalen != sizeof(struct seminfo)) { error = ERANGE; break; } - if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */ + if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */ error = EINVAL; break; } error = copyout(&seminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen); break; - case IPCS_SEM_ITER: /* Iterate over existing segments */ + case IPCS_SEM_ITER: /* Iterate over existing segments */ cursor = ipcs.u64.ipcs_cursor; if (cursor < 0 || cursor >= seminfo.semmni) { error = ERANGE; break; } - if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz ) { + if (ipcs.u64.ipcs_datalen != (int)semid_ds_sz) { error = EINVAL; break; } - for( ; cursor < seminfo.semmni; cursor++) { - if (sema[cursor].u.sem_perm.mode & SEM_ALLOC) + for (; cursor < seminfo.semmni; cursor++) { + if (sema[cursor].u.sem_perm.mode & SEM_ALLOC) { break; + } continue; } if (cursor == seminfo.semmni) { @@ -1643,7 +1694,7 @@ IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, break; } - semid_dsp = &sema[cursor].u; /* default: 64 bit */ + semid_dsp = &sema[cursor].u; /* default: 64 bit */ /* * If necessary, convert the 64 bit kernel segment @@ -1664,8 +1715,9 @@ IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, /* update cursor */ ipcs.u64.ipcs_cursor = cursor + 1; - if (!IS_64BIT_PROCESS(p)) /* convert in place */ - ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data); + if (!IS_64BIT_PROCESS(p)) { /* convert in place */ + ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data); + } error = SYSCTL_OUT(req, &ipcs, ipcs_sz); } @@ -1676,13 +1728,13 @@ IPCS_sem_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, break; } SYSV_SEM_SUBSYS_UNLOCK(); - return(error); + return error; } SYSCTL_DECL(_kern_sysv_ipcs); SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, sem, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - 0, 0, IPCS_sem_sysctl, - "S,IPCS_sem_command", - "ipcs sem command interface"); + 0, 0, IPCS_sem_sysctl, + "S,IPCS_sem_command", + "ipcs sem command interface"); #endif /* SYSV_SEM */ diff --git a/bsd/kern/sysv_shm.c b/bsd/kern/sysv_shm.c index 9a240bbf9..99ad6602e 100644 --- a/bsd/kern/sysv_shm.c +++ b/bsd/kern/sysv_shm.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */ @@ -62,7 +62,7 @@ * is included in support of clause 2.2 (b) of the Apple Public License, * Version 2.0. * Copyright (c) 2005-2006 SPARTA, Inc. -*/ + */ #include @@ -101,9 +101,9 @@ /* Uncomment this line to see MAC debugging output. */ /* #define MAC_DEBUG */ #if CONFIG_MACF_DEBUG -#define MPRINTF(a) printf a +#define MPRINTF(a) printf a #else -#define MPRINTF(a) +#define MPRINTF(a) #endif #if SYSV_SHM @@ -130,13 +130,13 @@ static sy_call_t *shmcalls[] = { (sy_call_t *)shmctl }; -#define SHMSEG_FREE 0x0200 -#define SHMSEG_REMOVED 0x0400 -#define SHMSEG_ALLOCATED 0x0800 -#define SHMSEG_WANTED 0x1000 +#define SHMSEG_FREE 0x0200 +#define SHMSEG_REMOVED 0x0400 +#define SHMSEG_ALLOCATED 0x0800 +#define SHMSEG_WANTED 0x1000 static int shm_last_free, shm_nused, shm_committed; -struct shmid_kernel *shmsegs; /* 64 bit version */ +struct shmid_kernel *shmsegs; /* 64 bit version */ static int shm_inited = 0; /* @@ -147,14 +147,14 @@ static int shm_inited = 0; * of anonymous memory. */ struct shm_handle { - void * shm_object; /* named entry for this chunk*/ - memory_object_size_t shm_handle_size; /* size of this chunk */ - struct shm_handle *shm_handle_next; /* next chunk */ + void * shm_object; /* named entry for this chunk*/ + memory_object_size_t shm_handle_size; /* size of this chunk */ + struct shm_handle *shm_handle_next; /* next chunk */ }; struct shmmap_state { - mach_vm_address_t va; /* user address */ - int shmid; /* segment id */ + mach_vm_address_t va; /* user address */ + int shmid; /* segment id */ }; static void shm_deallocate_segment(struct shmid_kernel *); @@ -163,11 +163,11 @@ static struct shmid_kernel *shm_find_segment_by_shmid(int); static int shm_delete_mapping(struct proc *, struct shmmap_state *, int); #ifdef __APPLE_API_PRIVATE -#define DEFAULT_SHMMAX (4 * 1024 * 1024) -#define DEFAULT_SHMMIN 1 -#define DEFAULT_SHMMNI 32 -#define DEFAULT_SHMSEG 8 -#define DEFAULT_SHMALL 1024 +#define DEFAULT_SHMMAX (4 * 1024 * 1024) +#define DEFAULT_SHMMIN 1 +#define DEFAULT_SHMMNI 32 +#define DEFAULT_SHMSEG 8 +#define DEFAULT_SHMALL 1024 struct shminfo shminfo = { DEFAULT_SHMMAX, @@ -188,9 +188,9 @@ void sysv_shm_lock_init(void); static __inline__ time_t sysv_shmtime(void) { - struct timeval tv; + struct timeval tv; microtime(&tv); - return (tv.tv_sec); + return tv.tv_sec; } /* @@ -210,7 +210,7 @@ shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out) out->shm_atime = in->shm_atime; out->shm_dtime = in->shm_dtime; out->shm_ctime = in->shm_ctime; - out->shm_internal = CAST_DOWN_EXPLICIT(int,in->shm_internal); + out->shm_internal = CAST_DOWN_EXPLICIT(int, in->shm_internal); } /* @@ -238,10 +238,12 @@ shm_find_segment_by_key(key_t key) { int i; - for (i = 0; i < shminfo.shmmni; i++) + for (i = 0; i < shminfo.shmmni; i++) { if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) && - shmsegs[i].u.shm_perm._key == key) + shmsegs[i].u.shm_perm._key == key) { return i; + } + } return -1; } @@ -252,13 +254,15 @@ shm_find_segment_by_shmid(int shmid) struct shmid_kernel *shmseg; segnum = IPCID_TO_IX(shmid); - if (segnum < 0 || segnum >= shminfo.shmmni) + if (segnum < 0 || segnum >= shminfo.shmmni) { return NULL; + } shmseg = &shmsegs[segnum]; if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED)) != SHMSEG_ALLOCATED || - shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid)) + shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid)) { return NULL; + } return shmseg; } @@ -268,14 +272,14 @@ shm_deallocate_segment(struct shmid_kernel *shmseg) struct shm_handle *shm_handle, *shm_handle_next; mach_vm_size_t size; - for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal); /* tunnel */ - shm_handle != NULL; - shm_handle = shm_handle_next) { + for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */ + shm_handle != NULL; + shm_handle = shm_handle_next) { shm_handle_next = shm_handle->shm_handle_next; mach_memory_entry_port_release(shm_handle->shm_object); - FREE((caddr_t) shm_handle, M_SHM); + FREE(shm_handle, M_SHM); } - shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */ + shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */ size = mach_vm_round_page(shmseg->u.shm_segsz); shm_committed -= btoc(size); shm_nused--; @@ -288,7 +292,7 @@ shm_deallocate_segment(struct shmid_kernel *shmseg) static int shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s, - int deallocate) + int deallocate) { struct shmid_kernel *shmseg; int segnum, result; @@ -296,11 +300,12 @@ shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s, segnum = IPCID_TO_IX(shmmap_s->shmid); shmseg = &shmsegs[segnum]; - size = mach_vm_round_page(shmseg->u.shm_segsz); /* XXX done for us? */ + size = mach_vm_round_page(shmseg->u.shm_segsz); /* XXX done for us? */ if (deallocate) { - result = mach_vm_deallocate(current_map(), shmmap_s->va, size); - if (result != KERN_SUCCESS) - return EINVAL; + result = mach_vm_deallocate(current_map(), shmmap_s->va, size); + if (result != KERN_SUCCESS) { + return EINVAL; + } } shmmap_s->shmid = SHMID_UNALLOCATED; shmseg->u.shm_dtime = sysv_shmtime(); @@ -331,7 +336,7 @@ shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval) } shmmap_s = (struct shmmap_state *)p->vm_shm; - if (shmmap_s == NULL) { + if (shmmap_s == NULL) { shmdtret = EINVAL; goto shmdt_out; } @@ -355,13 +360,15 @@ shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval) */ shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)]; shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr); - if (shmdtret) + if (shmdtret) { goto shmdt_out; + } #endif i = shm_delete_mapping(p, shmmap_s, 1); - if (i == 0) + if (i == 0) { *retval = 0; + } shmdtret = i; shmdt_out: SYSV_SHM_SUBSYS_UNLOCK(); @@ -372,17 +379,17 @@ int shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) { int error, i, flags; - struct shmid_kernel *shmseg; - struct shmmap_state *shmmap_s = NULL; - struct shm_handle *shm_handle; - mach_vm_address_t attach_va; /* attach address in/out */ - mach_vm_size_t map_size; /* size of map entry */ - mach_vm_size_t mapped_size; + struct shmid_kernel *shmseg; + struct shmmap_state *shmmap_s = NULL; + struct shm_handle *shm_handle; + mach_vm_address_t attach_va; /* attach address in/out */ + mach_vm_size_t map_size; /* size of map entry */ + mach_vm_size_t mapped_size; vm_prot_t prot; - size_t size; - kern_return_t rv; - int shmat_ret; - int vm_flags; + size_t size; + kern_return_t rv; + int shmat_ret; + int vm_flags; shmat_ret = 0; @@ -407,8 +414,8 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) /* +1 for the sentinel */ if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) { - shmat_ret = ENOMEM; - goto shmat_out; + shmat_ret = ENOMEM; + goto shmat_out; } MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK | M_NULL); @@ -434,7 +441,7 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm); error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm, - (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W); + (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R | IPC_W); if (error) { shmat_ret = error; goto shmat_out; @@ -460,16 +467,18 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) map_size = mach_vm_round_page(shmseg->u.shm_segsz); prot = VM_PROT_READ; - if ((uap->shmflg & SHM_RDONLY) == 0) + if ((uap->shmflg & SHM_RDONLY) == 0) { prot |= VM_PROT_WRITE; + } flags = MAP_ANON | MAP_SHARED; - if (uap->shmaddr) + if (uap->shmaddr) { flags |= MAP_FIXED; + } attach_va = (mach_vm_address_t)uap->shmaddr; - if (uap->shmflg & SHM_RND) - attach_va &= ~(SHMLBA-1); - else if ((attach_va & (SHMLBA-1)) != 0) { + if (uap->shmflg & SHM_RND) { + attach_va &= ~(SHMLBA - 1); + } else if ((attach_va & (SHMLBA - 1)) != 0) { shmat_ret = EINVAL; goto shmat_out; } @@ -484,18 +493,18 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) /* first reserve enough space... */ rv = mach_vm_map_kernel(current_map(), - &attach_va, - map_size, - 0, - vm_flags, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - IPC_PORT_NULL, - 0, - FALSE, - VM_PROT_NONE, - VM_PROT_NONE, - VM_INHERIT_NONE); + &attach_va, + map_size, + 0, + vm_flags, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + IPC_PORT_NULL, + 0, + FALSE, + VM_PROT_NONE, + VM_PROT_NONE, + VM_INHERIT_NONE); if (rv != KERN_SUCCESS) { goto out; } @@ -504,14 +513,13 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) /* ... then map the shared memory over the reserved space */ for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */ - shm_handle != NULL; - shm_handle = shm_handle->shm_handle_next) { - + shm_handle != NULL; + shm_handle = shm_handle->shm_handle_next) { rv = vm_map_enter_mem_object( - current_map(), /* process map */ - &attach_va, /* attach address */ + current_map(), /* process map */ + &attach_va, /* attach address */ shm_handle->shm_handle_size, /* segment size */ - (mach_vm_offset_t)0, /* alignment mask */ + (mach_vm_offset_t)0, /* alignment mask */ VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, @@ -521,8 +529,9 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) prot, prot, VM_INHERIT_SHARE); - if (rv != KERN_SUCCESS) + if (rv != KERN_SUCCESS) { goto out; + } mapped_size += shm_handle->shm_handle_size; attach_va = attach_va + shm_handle->shm_handle_size; @@ -532,14 +541,14 @@ shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval) shmseg->u.shm_lpid = p->p_pid; shmseg->u.shm_atime = sysv_shmtime(); shmseg->u.shm_nattch++; - *retval = shmmap_s->va; /* XXX return -1 on error */ + *retval = shmmap_s->va; /* XXX return -1 on error */ shmat_ret = 0; goto shmat_out; out: if (mapped_size > 0) { (void) mach_vm_deallocate(current_map(), - shmmap_s->va, - mapped_size); + shmmap_s->va, + mapped_size); } switch (rv) { case KERN_INVALID_ADDRESS: @@ -597,7 +606,7 @@ shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval) goto shmctl_out; } - /* XXAUDIT: This is the perms BEFORE any change by this call. This + /* XXAUDIT: This is the perms BEFORE any change by this call. This * may not be what is desired. */ AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm); @@ -620,18 +629,18 @@ shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval) if (IS_64BIT_PROCESS(p)) { struct user_shmid_ds shmid_ds = {}; memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds)); - + /* Clear kernel reserved pointer before copying to user space */ shmid_ds.shm_internal = USER_ADDR_NULL; - + error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds)); } else { struct user32_shmid_ds shmid_ds32 = {}; shmid_ds_64to32(&shmseg->u, &shmid_ds32); - + /* Clear kernel reserved pointer before copying to user space */ shmid_ds32.shm_internal = (user32_addr_t)0; - + error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32)); } if (error) { @@ -707,8 +716,9 @@ shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval) */ shmseg->u.shm_perm.mode |= SHMSEG_WANTED; error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0); - if (error) + if (error) { return error; + } return EAGAIN; } @@ -720,20 +730,24 @@ shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval) * verify that it matches the requested mode; otherwise, we fail with * EACCES (access denied). */ - if ((shmseg->u.shm_perm.mode & mode) != mode) + if ((shmseg->u.shm_perm.mode & mode) != mode) { return EACCES; + } #if CONFIG_MACF error = mac_sysvshm_check_shmget(kauth_cred_get(), shmseg, uap->shmflg); - if (error) - return (error); + if (error) { + return error; + } #endif - if (uap->size && uap->size > shmseg->u.shm_segsz) + if (uap->size && uap->size > shmseg->u.shm_segsz) { return EINVAL; + } - if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) + if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) { return EEXIST; + } *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm); return 0; @@ -741,7 +755,7 @@ shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval) static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, - int *retval) + int *retval) { int i, segnum, shmid; kauth_cred_t cred = kauth_cred_get(); @@ -753,25 +767,30 @@ shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, struct shm_handle *shm_handle_next, **shm_handle_next_p; if (uap->size <= 0 || - uap->size < (user_size_t)shminfo.shmmin || - uap->size > (user_size_t)shminfo.shmmax) { + uap->size < (user_size_t)shminfo.shmmin || + uap->size > (user_size_t)shminfo.shmmax) { return EINVAL; } - if (shm_nused >= shminfo.shmmni) /* any shmids left? */ + if (shm_nused >= shminfo.shmmni) { /* any shmids left? */ return ENOSPC; + } if (mach_vm_round_page_overflow(uap->size, &total_size)) { return EINVAL; } - if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall) + if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall) { return ENOMEM; + } if (shm_last_free < 0) { - for (i = 0; i < shminfo.shmmni; i++) - if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) + for (i = 0; i < shminfo.shmmni; i++) { + if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) { break; - if (i == shminfo.shmmni) + } + } + if (i == shminfo.shmmni) { panic("shmseg free count inconsistent"); + } segnum = i; - } else { + } else { segnum = shm_last_free; shm_last_free = -1; } @@ -788,8 +807,8 @@ shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, shm_handle_next_p = NULL; for (alloc_size = 0; - alloc_size < total_size; - alloc_size += size) { + alloc_size < total_size; + alloc_size += size) { size = MIN(total_size - alloc_size, ANON_MAX_SIZE); kret = mach_make_memory_entry_64( VM_MAP_NULL, @@ -797,9 +816,10 @@ shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, (memory_object_offset_t) 0, MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT, (ipc_port_t *) &mem_object, 0); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { goto out; - + } + MALLOC(shm_handle, struct shm_handle *, sizeof(struct shm_handle), M_SHM, M_WAITOK); if (shm_handle == NULL) { kret = KERN_NO_SPACE; @@ -846,14 +866,14 @@ shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, *retval = shmid; AUDIT_ARG(svipc_id, shmid); return 0; -out: +out: if (kret != KERN_SUCCESS) { - for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal); /* tunnel */ - shm_handle != NULL; - shm_handle = shm_handle_next) { + for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */ + shm_handle != NULL; + shm_handle = shm_handle_next) { shm_handle_next = shm_handle->shm_handle_next; mach_memory_entry_port_release(shm_handle->shm_object); - FREE((caddr_t) shm_handle, M_SHM); + FREE(shm_handle, M_SHM); } shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */ } @@ -861,13 +881,12 @@ out: switch (kret) { case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: - return (ENOMEM); + return ENOMEM; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; default: - return (EINVAL); + return EINVAL; } - } int @@ -886,12 +905,13 @@ shmget(struct proc *p, struct shmget_args *uap, int32_t *retval) mode = uap->shmflg & ACCESSPERMS; if (uap->key != IPC_PRIVATE) { - again: +again: segnum = shm_find_segment_by_key(uap->key); if (segnum >= 0) { error = shmget_existing(uap, mode, segnum, retval); - if (error == EAGAIN) + if (error == EAGAIN) { goto again; + } shmget_ret = error; goto shmget_out; } @@ -912,30 +932,30 @@ shmget_out: * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl * * Parameters: p Process requesting the call - * uap User argument descriptor (see below) - * retval Return value of the selected shm call + * uap User argument descriptor (see below) + * retval Return value of the selected shm call * * Indirect parameters: uap->which msg call to invoke (index in array of shm calls) - * uap->a2 User argument descriptor - * + * uap->a2 User argument descriptor + * * Returns: 0 Success - * !0 Not success + * !0 Not success * * Implicit returns: retval Return value of the selected shm call * - * DEPRECATED: This interface should not be used to call the other SHM - * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct - * usage is to call the other SHM functions directly. + * DEPRECATED: This interface should not be used to call the other SHM + * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct + * usage is to call the other SHM functions directly. */ int shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval) { - /* The routine that we are dispatching already does this */ - if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0])) + if (uap->which >= sizeof(shmcalls) / sizeof(shmcalls[0])) { return EINVAL; - return ((*shmcalls[uap->which])(p, &uap->a2, retval)); + } + return (*shmcalls[uap->which])(p, &uap->a2, retval); } /* @@ -1005,7 +1025,7 @@ shmcleanup(struct proc *p, int deallocate) } } - FREE((caddr_t)p->vm_shm, M_SHM); + FREE(p->vm_shm, M_SHM); p->vm_shm = NULL; SYSV_SHM_SUBSYS_UNLOCK(); } @@ -1070,32 +1090,32 @@ shminit(void) __private_extern__ void sysv_shm_lock_init( void ) { - sysv_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); - + sysv_shm_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr); - + sysv_shm_subsys_lck_attr = lck_attr_alloc_init(); lck_mtx_init(&sysv_shm_subsys_mutex, sysv_shm_subsys_lck_grp, sysv_shm_subsys_lck_attr); } /* (struct sysctl_oid *oidp, void *arg1, int arg2, \ - struct sysctl_req *req) */ + * struct sysctl_req *req) */ static int sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int error = 0; int sysctl_shminfo_ret = 0; - int64_t saved_shmmax; - int64_t saved_shmmin; + int64_t saved_shmmax; + int64_t saved_shmmin; int64_t saved_shmseg; int64_t saved_shmmni; int64_t saved_shmall; error = SYSCTL_OUT(req, arg1, sizeof(int64_t)); - if (error || req->newptr == USER_ADDR_NULL) - return(error); + if (error || req->newptr == USER_ADDR_NULL) { + return error; + } SYSV_SHM_SUBSYS_LOCK(); @@ -1104,11 +1124,11 @@ sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1, sysctl_shminfo_ret = EPERM; goto sysctl_shminfo_out; } - saved_shmmax = shminfo.shmmax; - saved_shmmin = shminfo.shmmin; - saved_shmseg = shminfo.shmseg; - saved_shmmni = shminfo.shmmni; - saved_shmall = shminfo.shmall; + saved_shmmax = shminfo.shmmax; + saved_shmmin = shminfo.shmmin; + saved_shmseg = shminfo.shmseg; + saved_shmmni = shminfo.shmmni; + saved_shmall = shminfo.shmall; if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))) != 0) { sysctl_shminfo_ret = error; @@ -1122,38 +1142,34 @@ sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1, sysctl_shminfo_ret = EINVAL; goto sysctl_shminfo_out; } - } - else if (arg1 == &shminfo.shmmin) { + } else if (arg1 == &shminfo.shmmin) { if (shminfo.shmmin < 0) { shminfo.shmmin = saved_shmmin; sysctl_shminfo_ret = EINVAL; goto sysctl_shminfo_out; } + } else if (arg1 == &shminfo.shmseg) { + /* add a sanity check - 20847256 */ + if (shminfo.shmseg > INT32_MAX || shminfo.shmseg < 0) { + shminfo.shmseg = saved_shmseg; + sysctl_shminfo_ret = EINVAL; + goto sysctl_shminfo_out; + } + } else if (arg1 == &shminfo.shmmni) { + /* add a sanity check - 20847256 */ + if (shminfo.shmmni > INT32_MAX || shminfo.shmmni < 0) { + shminfo.shmmni = saved_shmmni; + sysctl_shminfo_ret = EINVAL; + goto sysctl_shminfo_out; + } + } else if (arg1 == &shminfo.shmall) { + /* add a sanity check - 20847256 */ + if (shminfo.shmall > INT32_MAX || shminfo.shmall < 0) { + shminfo.shmall = saved_shmall; + sysctl_shminfo_ret = EINVAL; + goto sysctl_shminfo_out; + } } - else if (arg1 == &shminfo.shmseg) { - /* add a sanity check - 20847256 */ - if (shminfo.shmseg > INT32_MAX || shminfo.shmseg < 0) { - shminfo.shmseg = saved_shmseg; - sysctl_shminfo_ret = EINVAL; - goto sysctl_shminfo_out; - } - } - else if (arg1 == &shminfo.shmmni) { - /* add a sanity check - 20847256 */ - if (shminfo.shmmni > INT32_MAX || shminfo.shmmni < 0) { - shminfo.shmmni = saved_shmmni; - sysctl_shminfo_ret = EINVAL; - goto sysctl_shminfo_out; - } - } - else if (arg1 == &shminfo.shmall) { - /* add a sanity check - 20847256 */ - if (shminfo.shmall > INT32_MAX || shminfo.shmall < 0) { - shminfo.shmall = saved_shmall; - sysctl_shminfo_ret = EINVAL; - goto sysctl_shminfo_out; - } - } sysctl_shminfo_ret = 0; sysctl_shminfo_out: SYSV_SHM_SUBSYS_UNLOCK(); @@ -1162,7 +1178,7 @@ sysctl_shminfo_out: static int IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int error; int cursor; @@ -1170,8 +1186,8 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, struct user32_IPCS_command u32; struct user_IPCS_command u64; } ipcs; - struct user32_shmid_ds shmid_ds32 = {}; /* post conversion, 32 bit version */ - struct user_shmid_ds shmid_ds; /* 64 bit version */ + struct user32_shmid_ds shmid_ds32 = {}; /* post conversion, 32 bit version */ + struct user_shmid_ds shmid_ds; /* 64 bit version */ void *shmid_dsp; size_t ipcs_sz = sizeof(struct user_IPCS_command); size_t shmid_ds_sz = sizeof(struct user_shmid_ds); @@ -1193,8 +1209,9 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, goto ipcs_shm_sysctl_out; } - if (!IS_64BIT_PROCESS(p)) /* convert in place */ + if (!IS_64BIT_PROCESS(p)) { /* convert in place */ ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data); + } /* Let us version this interface... */ if (ipcs.u64.ipcs_magic != IPCS_MAGIC) { @@ -1202,8 +1219,8 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, goto ipcs_shm_sysctl_out; } - switch(ipcs.u64.ipcs_op) { - case IPCS_SHM_CONF: /* Obtain global configuration data */ + switch (ipcs.u64.ipcs_op) { + case IPCS_SHM_CONF: /* Obtain global configuration data */ if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) { if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */ error = ENOMEM; @@ -1215,7 +1232,7 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen); break; - case IPCS_SHM_ITER: /* Iterate over existing segments */ + case IPCS_SHM_ITER: /* Iterate over existing segments */ cursor = ipcs.u64.ipcs_cursor; if (cursor < 0 || cursor >= shminfo.shmmni) { error = ERANGE; @@ -1225,9 +1242,10 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, error = EINVAL; break; } - for( ; cursor < shminfo.shmmni; cursor++) { - if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED) + for (; cursor < shminfo.shmmni; cursor++) { + if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED) { break; + } continue; } if (cursor == shminfo.shmmni) { @@ -1235,7 +1253,7 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, break; } - shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */ + shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */ /* * If necessary, convert the 64 bit kernel segment @@ -1243,17 +1261,17 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, */ if (!IS_64BIT_PROCESS(p)) { shmid_ds_64to32(shmid_dsp, &shmid_ds32); - + /* Clear kernel reserved pointer before copying to user space */ shmid_ds32.shm_internal = (user32_addr_t)0; - + shmid_dsp = &shmid_ds32; } else { memcpy(&shmid_ds, shmid_dsp, sizeof(shmid_ds)); /* Clear kernel reserved pointer before copying to user space */ shmid_ds.shm_internal = USER_ADDR_NULL; - + shmid_dsp = &shmid_ds; } error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen); @@ -1261,10 +1279,11 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, /* update cursor */ ipcs.u64.ipcs_cursor = cursor + 1; - if (!IS_64BIT_PROCESS(p)) /* convert in place */ - ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data); + if (!IS_64BIT_PROCESS(p)) { /* convert in place */ + ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data); + } - error = SYSCTL_OUT(req, &ipcs, ipcs_sz); + error = SYSCTL_OUT(req, &ipcs, ipcs_sz); } break; @@ -1274,32 +1293,32 @@ IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, } ipcs_shm_sysctl_out: SYSV_SHM_SUBSYS_UNLOCK(); - return(error); + return error; } SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV"); SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - &shminfo.shmmax, 0, &sysctl_shminfo ,"Q","shmmax"); + &shminfo.shmmax, 0, &sysctl_shminfo, "Q", "shmmax"); SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - &shminfo.shmmin, 0, &sysctl_shminfo ,"Q","shmmin"); + &shminfo.shmmin, 0, &sysctl_shminfo, "Q", "shmmin"); SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - &shminfo.shmmni, 0, &sysctl_shminfo ,"Q","shmmni"); + &shminfo.shmmni, 0, &sysctl_shminfo, "Q", "shmmni"); SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - &shminfo.shmseg, 0, &sysctl_shminfo ,"Q","shmseg"); + &shminfo.shmseg, 0, &sysctl_shminfo, "Q", "shmseg"); SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, - &shminfo.shmall, 0, &sysctl_shminfo ,"Q","shmall"); + &shminfo.shmall, 0, &sysctl_shminfo, "Q", "shmall"); SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS"); SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - 0, 0, IPCS_shm_sysctl, - "S,IPCS_shm_command", - "ipcs shm command interface"); + 0, 0, IPCS_shm_sysctl, + "S,IPCS_shm_command", + "ipcs shm command interface"); #endif /* SYSV_SHM */ /* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */ diff --git a/bsd/kern/trace_codes b/bsd/kern/trace_codes index 2e1361f4f..3cbac2c73 100644 --- a/bsd/kern/trace_codes +++ b/bsd/kern/trace_codes @@ -1211,6 +1211,9 @@ 0x5310294 CPUPM_IDLE_EXIT1 0x5310298 CPUPM_PST_QOS_CONT 0x531029C CPUPM_MID +0x53102A0 CPUPM_PST_LOAD_CAPTURE +0x53102A4 CPUPM_PSTATE_HWP_MODE +0x53102A8 CPUPM_PST_WR_REASON 0x5330000 HIBERNATE 0x5330004 HIBERNATE_WRITE_IMAGE 0x5330008 HIBERNATE_MACHINE_INIT diff --git a/bsd/kern/tty.c b/bsd/kern/tty.c index ed7e79658..417357add 100644 --- a/bsd/kern/tty.c +++ b/bsd/kern/tty.c @@ -2,7 +2,7 @@ * Copyright (c) 1997-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -92,9 +92,9 @@ * only when _all_ openers leave open(). */ #include -#define TTYDEFCHARS 1 +#define TTYDEFCHARS 1 #include -#undef TTYDEFCHARS +#undef TTYDEFCHARS #include #include #include @@ -112,28 +112,28 @@ #include #include -#include /* averunnable */ +#include /* averunnable */ #include #include -static lck_grp_t *tty_lck_grp; -static lck_grp_attr_t *tty_lck_grp_attr; +static lck_grp_t *tty_lck_grp; +static lck_grp_attr_t *tty_lck_grp_attr; static lck_attr_t *tty_lck_attr; __private_extern__ int ttnread(struct tty *tp); -static void ttyecho(int c, struct tty *tp); -static int ttyoutput(int c, struct tty *tp); -static void ttypend(struct tty *tp); -static void ttyretype(struct tty *tp); -static void ttyrub(int c, struct tty *tp); -static void ttyrubo(struct tty *tp, int count); -static void ttystop(struct tty *tp, int rw); -static void ttyunblock(struct tty *tp); -static int ttywflush(struct tty *tp); -static int proc_compare(proc_t p1, proc_t p2); +static void ttyecho(int c, struct tty *tp); +static int ttyoutput(int c, struct tty *tp); +static void ttypend(struct tty *tp); +static void ttyretype(struct tty *tp); +static void ttyrub(int c, struct tty *tp); +static void ttyrubo(struct tty *tp, int count); +static void ttystop(struct tty *tp, int rw); +static void ttyunblock(struct tty *tp); +static int ttywflush(struct tty *tp); +static int proc_compare(proc_t p1, proc_t p2); void ttyhold(struct tty *tp); -static void ttydeallocate(struct tty *tp); +static void ttydeallocate(struct tty *tp); static int isctty(proc_t p, struct tty *tp); static int isctty_sp(proc_t p, struct tty *tp, struct session *sessp); @@ -145,86 +145,86 @@ static int isctty_sp(proc_t p, struct tty *tp, struct session *sessp); * are 0 then the character needs no special processing on output; classes * other than 0 might be translated or (not currently) require delays. */ -#define E 0x00 /* Even parity. */ -#define O 0x80 /* Odd parity. */ -#define PARITY(c) (char_type[c] & O) +#define E 0x00 /* Even parity. */ +#define O 0x80 /* Odd parity. */ +#define PARITY(c) (char_type[c] & O) -#define ALPHA 0x40 /* Alpha or underscore. */ -#define ISALPHA(c) (char_type[(c) & TTY_CHARMASK] & ALPHA) +#define ALPHA 0x40 /* Alpha or underscore. */ +#define ISALPHA(c) (char_type[(c) & TTY_CHARMASK] & ALPHA) -#define CCLASSMASK 0x3f -#define CCLASS(c) (char_type[c] & CCLASSMASK) +#define CCLASSMASK 0x3f +#define CCLASS(c) (char_type[c] & CCLASSMASK) /* 0b10xxxxxx is the mask for UTF-8 continuations */ -#define CCONT(c) ((c & 0xc0) == 0x80) +#define CCONT(c) ((c & 0xc0) == 0x80) -#define BS BACKSPACE -#define CC CONTROL -#define CR RETURN -#define NA ORDINARY | ALPHA -#define NL NEWLINE -#define NO ORDINARY -#define TB TAB -#define VT VTAB +#define BS BACKSPACE +#define CC CONTROL +#define CR RETURN +#define NA ORDINARY | ALPHA +#define NL NEWLINE +#define NO ORDINARY +#define TB TAB +#define VT VTAB static u_char const char_type[] = { - E|CC, O|CC, O|CC, E|CC, O|CC, E|CC, E|CC, O|CC, /* nul - bel */ - O|BS, E|TB, E|NL, O|CC, E|VT, O|CR, O|CC, E|CC, /* bs - si */ - O|CC, E|CC, E|CC, O|CC, E|CC, O|CC, O|CC, E|CC, /* dle - etb */ - E|CC, O|CC, O|CC, E|CC, O|CC, E|CC, E|CC, O|CC, /* can - us */ - O|NO, E|NO, E|NO, O|NO, E|NO, O|NO, O|NO, E|NO, /* sp - ' */ - E|NO, O|NO, O|NO, E|NO, O|NO, E|NO, E|NO, O|NO, /* ( - / */ - E|NA, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* 0 - 7 */ - O|NA, E|NA, E|NO, O|NO, E|NO, O|NO, O|NO, E|NO, /* 8 - ? */ - O|NO, E|NA, E|NA, O|NA, E|NA, O|NA, O|NA, E|NA, /* @ - G */ - E|NA, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* H - O */ - E|NA, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* P - W */ - O|NA, E|NA, E|NA, O|NO, E|NO, O|NO, O|NO, O|NA, /* X - _ */ - E|NO, O|NA, O|NA, E|NA, O|NA, E|NA, E|NA, O|NA, /* ` - g */ - O|NA, E|NA, E|NA, O|NA, E|NA, O|NA, O|NA, E|NA, /* h - o */ - O|NA, E|NA, E|NA, O|NA, E|NA, O|NA, O|NA, E|NA, /* p - w */ - E|NA, O|NA, O|NA, E|NO, O|NO, E|NO, E|NO, O|CC, /* x - del */ + E | CC, O | CC, O | CC, E | CC, O | CC, E | CC, E | CC, O | CC, /* nul - bel */ + O | BS, E | TB, E | NL, O | CC, E | VT, O | CR, O | CC, E | CC, /* bs - si */ + O | CC, E | CC, E | CC, O | CC, E | CC, O | CC, O | CC, E | CC, /* dle - etb */ + E | CC, O | CC, O | CC, E | CC, O | CC, E | CC, E | CC, O | CC, /* can - us */ + O | NO, E | NO, E | NO, O | NO, E | NO, O | NO, O | NO, E | NO, /* sp - ' */ + E | NO, O | NO, O | NO, E | NO, O | NO, E | NO, E | NO, O | NO, /* ( - / */ + E | NA, O | NA, O | NA, E | NA, O | NA, E | NA, E | NA, O | NA, /* 0 - 7 */ + O | NA, E | NA, E | NO, O | NO, E | NO, O | NO, O | NO, E | NO, /* 8 - ? */ + O | NO, E | NA, E | NA, O | NA, E | NA, O | NA, O | NA, E | NA, /* @ - G */ + E | NA, O | NA, O | NA, E | NA, O | NA, E | NA, E | NA, O | NA, /* H - O */ + E | NA, O | NA, O | NA, E | NA, O | NA, E | NA, E | NA, O | NA, /* P - W */ + O | NA, E | NA, E | NA, O | NO, E | NO, O | NO, O | NO, O | NA, /* X - _ */ + E | NO, O | NA, O | NA, E | NA, O | NA, E | NA, E | NA, O | NA, /* ` - g */ + O | NA, E | NA, E | NA, O | NA, E | NA, O | NA, O | NA, E | NA, /* h - o */ + O | NA, E | NA, E | NA, O | NA, E | NA, O | NA, O | NA, E | NA, /* p - w */ + E | NA, O | NA, O | NA, E | NO, O | NO, E | NO, E | NO, O | CC, /* x - del */ /* * Meta chars; should be settable per character set; * for now, treat them all as normal characters. */ - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, - NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, NA, NA, NA, NA, }; -#undef BS -#undef CC -#undef CR -#undef NA -#undef NL -#undef NO -#undef TB -#undef VT +#undef BS +#undef CC +#undef CR +#undef NA +#undef NL +#undef NO +#undef TB +#undef VT /* Macros to clear/set/test flags. */ -#define SET(t, f) (t) |= (f) -#define CLR(t, f) (t) &= ~(f) -#define ISSET(t, f) ((t) & (f)) +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) /* * Input control starts when we would not be able to fit the maximum * contents of the ping-pong buffers and finishes when we would be able * to fit that much plus 1/8 more. */ -#define I_HIGH_WATER (TTYHOG - 2 * 256) /* XXX */ -#define I_LOW_WATER ((TTYHOG - 2 * 256) * 7 / 8) /* XXX */ +#define I_HIGH_WATER (TTYHOG - 2 * 256) /* XXX */ +#define I_LOW_WATER ((TTYHOG - 2 * 256) * 7 / 8) /* XXX */ static void termios32to64(struct termios32 *in, struct user_termios *out) @@ -278,7 +278,7 @@ void tty_init(void) { tty_lck_grp_attr = lck_grp_attr_alloc_init(); - tty_lck_grp = lck_grp_alloc_init("tty", tty_lck_grp_attr); + tty_lck_grp = lck_grp_alloc_init("tty", tty_lck_grp_attr); tty_lck_attr = lck_attr_alloc_init(); } @@ -297,7 +297,7 @@ tty_init(void) void tty_lock(struct tty *tp) { - TTY_LOCK_NOTOWNED(tp); /* debug assert */ + TTY_LOCK_NOTOWNED(tp); /* debug assert */ lck_mtx_lock(&tp->t_lock); } @@ -316,7 +316,7 @@ tty_lock(struct tty *tp) void tty_unlock(struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ lck_mtx_unlock(&tp->t_lock); } @@ -330,18 +330,19 @@ tty_unlock(struct tty *tp) int ttyopen(dev_t device, struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ tp->t_dev = device; if (!ISSET(tp->t_state, TS_ISOPEN)) { SET(tp->t_state, TS_ISOPEN); if (ISSET(tp->t_cflag, CLOCAL)) { - SET(tp->t_state, TS_CONNECTED); } + SET(tp->t_state, TS_CONNECTED); + } bzero(&tp->t_winsize, sizeof(tp->t_winsize)); } - return (0); + return 0; } /* @@ -363,7 +364,7 @@ ttyclose(struct tty *tp) struct session * oldsessp; struct knote *kn; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ if (constty == tp) { constty = NULL; @@ -371,10 +372,10 @@ ttyclose(struct tty *tp) /* * Closing current console tty; disable printing of console - * messages at bottom-level driver. + * messages at bottom-level driver. */ (*cdevsw[major(tp->t_dev)].d_ioctl) - (tp->t_dev, KMIOCDISABLCONS, NULL, 0, current_proc()); + (tp->t_dev, KMIOCDISABLCONS, NULL, 0, current_proc()); } ttyflush(tp, FREAD | FWRITE); @@ -386,16 +387,19 @@ ttyclose(struct tty *tp) oldsessp = tp->t_session; tp->t_pgrp = NULL; tp->t_session = NULL; - if (oldsessp != SESSION_NULL) + if (oldsessp != SESSION_NULL) { oldsessp->s_ttypgrpid = NO_PID; + } proc_list_unlock(); /* drop the reference on prev session and pgrp */ /* SAFE: All callers drop the lock on return */ tty_unlock(tp); - if (oldsessp != SESSION_NULL) + if (oldsessp != SESSION_NULL) { session_rele(oldsessp); - if (oldpg != PGRP_NULL) + } + if (oldpg != PGRP_NULL) { pg_rele(oldpg); + } tty_lock(tp); tp->t_state = 0; SLIST_FOREACH(kn, &tp->t_wsel.si_note, kn_selnext) { @@ -407,18 +411,18 @@ ttyclose(struct tty *tp) } selthreadclear(&tp->t_rsel); - return (0); + return 0; } -#define FLUSHQ(q) { \ - if ((q)->c_cc) \ - ndflush(q, (q)->c_cc); \ +#define FLUSHQ(q) { \ + if ((q)->c_cc) \ + ndflush(q, (q)->c_cc); \ } /* Is 'c' a line delimiter ("break" character)? */ -#define TTBREAKC(c, lflag) \ - ((c) == '\n' || (((c) == cc[VEOF] || \ - (c) == cc[VEOL] || ((c) == cc[VEOL2] && lflag & IEXTEN)) && \ +#define TTBREAKC(c, lflag) \ + ((c) == '\n' || (((c) == cc[VEOF] || \ + (c) == cc[VEOL] || ((c) == cc[VEOL2] && lflag & IEXTEN)) && \ (c) != _POSIX_VDISABLE)) /* @@ -439,16 +443,17 @@ ttyinput(int c, struct tty *tp) tcflag_t iflag, lflag; cc_t *cc; int i, err; - int retval = 0; /* default return value */ + int retval = 0; /* default return value */ + + TTY_LOCK_OWNED(tp); /* debug assert */ - TTY_LOCK_OWNED(tp); /* debug assert */ - /* * If input is pending take it first. */ lflag = tp->t_lflag; - if (ISSET(lflag, PENDIN)) + if (ISSET(lflag, PENDIN)) { ttypend(tp); + } /* * Gather stats. */ @@ -471,8 +476,9 @@ ttyinput(int c, struct tty *tp) if (tp->t_rawq.c_cc + tp->t_canq.c_cc > I_HIGH_WATER - 3 && (!ISSET(lflag, ICANON) || tp->t_canq.c_cc != 0) && (ISSET(tp->t_cflag, CRTS_IFLOW) || ISSET(iflag, IXOFF)) && - !ISSET(tp->t_state, TS_TBLOCK)) + !ISSET(tp->t_state, TS_TBLOCK)) { ttyblock(tp); + } /* Handle exceptional conditions (break, parity, framing). */ cc = tp->t_cc; @@ -482,7 +488,7 @@ ttyinput(int c, struct tty *tp) if (ISSET(err, TTY_BI)) { if (ISSET(iflag, IGNBRK)) { goto out; - } + } if (ISSET(iflag, BRKINT)) { ttyflush(tp, FREAD | FWRITE); /* SAFE: All callers drop the lock on return */ @@ -491,29 +497,32 @@ ttyinput(int c, struct tty *tp) tty_lock(tp); goto endcase; } - if (ISSET(iflag, PARMRK)) + if (ISSET(iflag, PARMRK)) { goto parmrk; + } } else if ((ISSET(err, TTY_PE) && ISSET(iflag, INPCK)) - || ISSET(err, TTY_FE)) { + || ISSET(err, TTY_FE)) { if (ISSET(iflag, IGNPAR)) { goto out; - } - else if (ISSET(iflag, PARMRK)) { + } else if (ISSET(iflag, PARMRK)) { parmrk: if (tp->t_rawq.c_cc + tp->t_canq.c_cc > - MAX_INPUT - 3) + MAX_INPUT - 3) { goto input_overflow; + } (void)putc(0377 | TTY_QUOTE, &tp->t_rawq); (void)putc(0 | TTY_QUOTE, &tp->t_rawq); (void)putc(c | TTY_QUOTE, &tp->t_rawq); goto endcase; - } else + } else { c = 0; + } } } - if (!ISSET(tp->t_state, TS_TYPEN) && ISSET(iflag, ISTRIP)) + if (!ISSET(tp->t_state, TS_TYPEN) && ISSET(iflag, ISTRIP)) { CLR(c, 0x80); + } if (!ISSET(lflag, EXTPROC)) { /* * Check for literal nexting very first @@ -540,20 +549,22 @@ parmrk: if (ISSET(lflag, ECHOE)) { (void)ttyoutput('^', tp); (void)ttyoutput('\b', tp); - } else + } else { ttyecho(c, tp); + } } SET(tp->t_state, TS_LNCH); goto endcase; } if (CCEQ(cc[VDISCARD], c)) { - if (ISSET(lflag, FLUSHO)) + if (ISSET(lflag, FLUSHO)) { CLR(tp->t_lflag, FLUSHO); - else { + } else { ttyflush(tp, FWRITE); ttyecho(c, tp); - if (tp->t_rawq.c_cc + tp->t_canq.c_cc) + if (tp->t_rawq.c_cc + tp->t_canq.c_cc) { ttyretype(tp); + } SET(tp->t_lflag, FLUSHO); } goto startoutput; @@ -564,8 +575,9 @@ parmrk: */ if (ISSET(lflag, ISIG)) { if (CCEQ(cc[VINTR], c) || CCEQ(cc[VQUIT], c)) { - if (!ISSET(lflag, NOFLSH)) + if (!ISSET(lflag, NOFLSH)) { ttyflush(tp, FREAD | FWRITE); + } ttyecho(c, tp); /* * SAFE: All callers drop the lock on return; @@ -583,8 +595,9 @@ parmrk: goto endcase; } if (CCEQ(cc[VSUSP], c)) { - if (!ISSET(lflag, NOFLSH)) + if (!ISSET(lflag, NOFLSH)) { ttyflush(tp, FREAD); + } ttyecho(c, tp); /* SAFE: All callers drop the lock on return */ tty_unlock(tp); @@ -600,19 +613,20 @@ parmrk: if (CCEQ(cc[VSTOP], c)) { if (!ISSET(tp->t_state, TS_TTSTOP)) { SET(tp->t_state, TS_TTSTOP); - ttystop(tp, 0); + ttystop(tp, 0); goto out; } if (!CCEQ(cc[VSTART], c)) { goto out; - } + } /* * if VSTART == VSTOP then toggle */ goto endcase; } - if (CCEQ(cc[VSTART], c)) + if (CCEQ(cc[VSTART], c)) { goto restartoutput; + } } /* * IGNCR, ICRNL, & INLCR @@ -620,11 +634,12 @@ parmrk: if (c == '\r') { if (ISSET(iflag, IGNCR)) { goto out; - } - else if (ISSET(iflag, ICRNL)) + } else if (ISSET(iflag, ICRNL)) { c = '\n'; - } else if (c == '\n' && ISSET(iflag, INLCR)) + } + } else if (c == '\n' && ISSET(iflag, INLCR)) { c = '\r'; + } } if (!ISSET(tp->t_lflag, EXTPROC) && ISSET(lflag, ICANON)) { /* @@ -639,7 +654,7 @@ parmrk: if (ISSET(iflag, IUTF8)) { do { ttyrub((c = unputc(&tp->t_rawq)), tp); - } while(tp->t_rawq.c_cc && CCONT(c)); + } while (tp->t_rawq.c_cc && CCONT(c)); } else { ttyrub(unputc(&tp->t_rawq), tp); } @@ -652,14 +667,16 @@ parmrk: if (CCEQ(cc[VKILL], c)) { if (ISSET(lflag, ECHOKE) && tp->t_rawq.c_cc == tp->t_rocount && - !ISSET(lflag, ECHOPRT)) - while (tp->t_rawq.c_cc) + !ISSET(lflag, ECHOPRT)) { + while (tp->t_rawq.c_cc) { ttyrub(unputc(&tp->t_rawq), tp); - else { + } + } else { ttyecho(c, tp); if (ISSET(lflag, ECHOK) || - ISSET(lflag, ECHOKE)) + ISSET(lflag, ECHOKE)) { ttyecho('\n', tp); + } FLUSHQ(&tp->t_rawq); tp->t_rocount = 0; } @@ -675,18 +692,21 @@ parmrk: /* * erase whitespace */ - while ((c = unputc(&tp->t_rawq)) == ' ' || c == '\t') + while ((c = unputc(&tp->t_rawq)) == ' ' || c == '\t') { ttyrub(c, tp); - if (c == -1) + } + if (c == -1) { goto endcase; + } /* * erase last char of word and remember the * next chars type (for ALTWERASE) */ ttyrub(c, tp); c = unputc(&tp->t_rawq); - if (c == -1) + if (c == -1) { goto endcase; + } if (c == ' ' || c == '\t') { (void)putc(c, &tp->t_rawq); goto endcase; @@ -698,8 +718,9 @@ parmrk: do { ttyrub(c, tp); c = unputc(&tp->t_rawq); - if (c == -1) + if (c == -1) { goto endcase; + } } while (c != ' ' && c != '\t' && (!ISSET(lflag, ALTWERASE) || ISALPHA(c) == ctype)); (void)putc(c, &tp->t_rawq); @@ -722,8 +743,9 @@ parmrk: tty_pgsignal(tp, SIGINFO, 1); tty_lock(tp); } - if (!ISSET(lflag, NOKERNINFO)) + if (!ISSET(lflag, NOKERNINFO)) { ttyinfo_locked(tp); + } goto endcase; } } @@ -733,15 +755,17 @@ parmrk: if (tp->t_rawq.c_cc + tp->t_canq.c_cc >= MAX_INPUT) { input_overflow: if (ISSET(iflag, IMAXBEL)) { - if (tp->t_outq.c_cc < tp->t_hiwat) + if (tp->t_outq.c_cc < tp->t_hiwat) { (void)ttyoutput(CTRL('g'), tp); + } } goto endcase; } - if ( c == 0377 && ISSET(iflag, PARMRK) && !ISSET(iflag, ISTRIP) - && ISSET(iflag, IGNBRK|IGNPAR) != (IGNBRK|IGNPAR)) + if (c == 0377 && ISSET(iflag, PARMRK) && !ISSET(iflag, ISTRIP) + && ISSET(iflag, IGNBRK | IGNPAR) != (IGNBRK | IGNPAR)) { (void)putc(0377 | TTY_QUOTE, &tp->t_rawq); + } /* * Put data char in q for user and @@ -757,8 +781,9 @@ input_overflow: tp->t_rocount = 0; catq(&tp->t_rawq, &tp->t_canq); ttwakeup(tp); - } else if (tp->t_rocount++ == 0) + } else if (tp->t_rocount++ == 0) { tp->t_rocol = tp->t_column; + } if (ISSET(tp->t_state, TS_ERASE)) { /* * end of prterase \.../ @@ -786,8 +811,8 @@ endcase: */ if (ISSET(tp->t_state, TS_TTSTOP) && !ISSET(iflag, IXANY) && cc[VSTART] != cc[VSTOP]) { - goto out; - } + goto out; + } restartoutput: CLR(tp->t_lflag, FLUSHO); @@ -798,7 +823,7 @@ startoutput: retval = ttstart(tp); out: - return (retval); + return retval; } @@ -824,17 +849,19 @@ ttyoutput(int c, struct tty *tp) tcflag_t oflag; int col; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ oflag = tp->t_oflag; if (!ISSET(oflag, OPOST)) { - if (ISSET(tp->t_lflag, FLUSHO)) - return (-1); - if (putc(c, &tp->t_outq)) - return (c); + if (ISSET(tp->t_lflag, FLUSHO)) { + return -1; + } + if (putc(c, &tp->t_outq)) { + return c; + } tk_nout++; tp->t_outcc++; - return (-1); + return -1; } /* * Do tab expansion if OXTABS is set. Special case if we external @@ -852,10 +879,11 @@ ttyoutput(int c, struct tty *tp) tp->t_outcc += c; } tp->t_column += c; - return (c == col ? -1 : '\t'); + return c == col ? -1 : '\t'; + } + if (c == CEOT && ISSET(oflag, ONOEOT)) { + return -1; } - if (c == CEOT && ISSET(oflag, ONOEOT)) - return (-1); /* * Newline translation: if ONLCR is set, @@ -864,25 +892,30 @@ ttyoutput(int c, struct tty *tp) if (c == '\n' && ISSET(tp->t_oflag, ONLCR)) { tk_nout++; tp->t_outcc++; - if (putc('\r', &tp->t_outq)) - return (c); - } - /* If OCRNL is set, translate "\r" into "\n". */ - else if (c == '\r' && ISSET(tp->t_oflag, OCRNL)) - c = '\n'; - /* If ONOCR is set, don't transmit CRs when on column 0. */ - else if (c == '\r' && ISSET(tp->t_oflag, ONOCR) && tp->t_column == 0) - return (-1); + if (putc('\r', &tp->t_outq)) { + return c; + } + } + /* If OCRNL is set, translate "\r" into "\n". */ + else if (c == '\r' && ISSET(tp->t_oflag, OCRNL)) { + c = '\n'; + } + /* If ONOCR is set, don't transmit CRs when on column 0. */ + else if (c == '\r' && ISSET(tp->t_oflag, ONOCR) && tp->t_column == 0) { + return -1; + } tk_nout++; tp->t_outcc++; - if (!ISSET(tp->t_lflag, FLUSHO) && putc(c, &tp->t_outq)) - return (c); + if (!ISSET(tp->t_lflag, FLUSHO) && putc(c, &tp->t_outq)) { + return c; + } col = tp->t_column; switch (CCLASS(c)) { case BACKSPACE: - if (col > 0) + if (col > 0) { --col; + } break; case CONTROL: break; @@ -898,7 +931,7 @@ ttyoutput(int c, struct tty *tp) break; } tp->t_column = col; - return (-1); + return -1; } /* @@ -954,18 +987,18 @@ ttyclrpgrphup(struct tty *tp) * Notes: This is supported to ensure the line discipline interfaces * all have the same locking semantics. * - * This function is called from + * This function is called from */ int ttioctl(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) { - int retval; + int retval; tty_lock(tp); retval = ttioctl_locked(tp, cmd, data, flag, p); tty_unlock(tp); - return (retval); + return retval; } @@ -1036,7 +1069,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) struct session *sessp, *oldsessp; struct tty *oldtp; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ ut = (struct uthread *)get_bsdthread_info(current_thread()); /* If the ioctl involves modification, signal if in the background. */ @@ -1100,28 +1133,30 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) break; } - switch (cmd) { /* Process the ioctl. */ - case FIOASYNC: /* set/clear async i/o */ - if (*(int *)data) + switch (cmd) { /* Process the ioctl. */ + case FIOASYNC: /* set/clear async i/o */ + if (*(int *)data) { SET(tp->t_state, TS_ASYNC); - else + } else { CLR(tp->t_state, TS_ASYNC); + } break; - case FIONBIO: /* set/clear non-blocking i/o */ - break; /* XXX: delete. */ - case FIONREAD: /* get # bytes to read */ + case FIONBIO: /* set/clear non-blocking i/o */ + break; /* XXX: delete. */ + case FIONREAD: /* get # bytes to read */ *(int *)data = ttnread(tp); break; - case TIOCEXCL: /* set exclusive use of tty */ + case TIOCEXCL: /* set exclusive use of tty */ SET(tp->t_state, TS_XCLUDE); break; - case TIOCFLUSH: { /* flush buffers */ + case TIOCFLUSH: { /* flush buffers */ int flags = *(int *)data; - if (flags == 0) + if (flags == 0) { flags = FREAD | FWRITE; - else + } else { flags &= FREAD | FWRITE; + } ttyflush(tp, flags); break; } @@ -1131,54 +1166,56 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) /* No break - Fall through to BSD code */ } - case TIOCCONS: { /* become virtual console */ + case TIOCCONS: { /* become virtual console */ if (*(int *)data) { if (constty && constty != tp && ISSET(constty->t_state, TS_CONNECTED)) { error = EBUSY; goto out; } - if ( (error = suser(kauth_cred_get(), &p->p_acflag)) ) + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { goto out; + } constty = tp; } else if (tp == constty) { constty = NULL; } if (constty) { (*cdevsw[major(constty->t_dev)].d_ioctl) - (constty->t_dev, KMIOCDISABLCONS, NULL, 0, p); + (constty->t_dev, KMIOCDISABLCONS, NULL, 0, p); } else { (*cdevsw[major(tp->t_dev)].d_ioctl) - (tp->t_dev, KMIOCDISABLCONS, NULL, 0, p); + (tp->t_dev, KMIOCDISABLCONS, NULL, 0, p); } break; } - case TIOCDRAIN: /* wait till output drained */ + case TIOCDRAIN: /* wait till output drained */ error = ttywait(tp); - if (error) + if (error) { goto out; + } break; - case TIOCGETA_32: /* get termios struct */ + case TIOCGETA_32: /* get termios struct */ #ifdef __LP64__ termios64to32((struct user_termios *)&tp->t_termios, (struct termios32 *)data); #else bcopy(&tp->t_termios, data, sizeof(struct termios)); #endif break; - case TIOCGETA_64: /* get termios struct */ + case TIOCGETA_64: /* get termios struct */ #ifdef __LP64__ bcopy(&tp->t_termios, data, sizeof(struct termios)); #else termios32to64((struct termios32 *)&tp->t_termios, (struct user_termios *)data); #endif break; - case TIOCGETD: /* get line discipline */ + case TIOCGETD: /* get line discipline */ *(int *)data = tp->t_line; break; - case TIOCGWINSZ: /* get window size */ + case TIOCGWINSZ: /* get window size */ *(struct winsize *)data = tp->t_winsize; break; - case TIOCGPGRP: /* get pgrp of tty */ + case TIOCGPGRP: /* get pgrp of tty */ if (!isctty(p, tp)) { error = ENOTTY; goto out; @@ -1186,54 +1223,56 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) *(int *)data = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PID; break; #ifdef TIOCHPCL - case TIOCHPCL: /* hang up on last close */ + case TIOCHPCL: /* hang up on last close */ SET(tp->t_cflag, HUPCL); break; #endif - case TIOCNXCL: /* reset exclusive use of tty */ + case TIOCNXCL: /* reset exclusive use of tty */ CLR(tp->t_state, TS_XCLUDE); break; - case TIOCOUTQ: /* output queue size */ + case TIOCOUTQ: /* output queue size */ *(int *)data = tp->t_outq.c_cc; break; - case TIOCSETA_32: /* set termios struct */ + case TIOCSETA_32: /* set termios struct */ case TIOCSETA_64: - case TIOCSETAW_32: /* drain output, set */ + case TIOCSETAW_32: /* drain output, set */ case TIOCSETAW_64: - case TIOCSETAF_32: /* drn out, fls in, set */ + case TIOCSETAF_32: /* drn out, fls in, set */ case TIOCSETAF_64: - { /* drn out, fls in, set */ + { /* drn out, fls in, set */ struct termios *t = (struct termios *)data; struct termios lcl_termios; #ifdef __LP64__ - if (cmd==TIOCSETA_32 || cmd==TIOCSETAW_32 || cmd==TIOCSETAF_32) { + if (cmd == TIOCSETA_32 || cmd == TIOCSETAW_32 || cmd == TIOCSETAF_32) { termios32to64((struct termios32 *)data, (struct user_termios *)&lcl_termios); t = &lcl_termios; } #else - if (cmd==TIOCSETA_64 || cmd==TIOCSETAW_64 || cmd==TIOCSETAF_64) { + if (cmd == TIOCSETA_64 || cmd == TIOCSETAW_64 || cmd == TIOCSETAF_64) { termios64to32((struct user_termios *)data, (struct termios32 *)&lcl_termios); t = &lcl_termios; } #endif #if 0 - /* XXX bogus test; always false */ + /* XXX bogus test; always false */ if (t->c_ispeed < 0 || t->c_ospeed < 0) { error = EINVAL; goto out; } -#endif /* 0 - leave in; may end up being a conformance issue */ - if (t->c_ispeed == 0) +#endif /* 0 - leave in; may end up being a conformance issue */ + if (t->c_ispeed == 0) { t->c_ispeed = t->c_ospeed; + } if (cmd == TIOCSETAW_32 || cmd == TIOCSETAF_32 || cmd == TIOCSETAW_64 || cmd == TIOCSETAF_64) { error = ttywait(tp); if (error) { goto out; } - if (cmd == TIOCSETAF_32 || cmd == TIOCSETAF_64) + if (cmd == TIOCSETAF_32 || cmd == TIOCSETAF_64) { ttyflush(tp, FREAD); + } } if (!ISSET(t->c_cflag, CIGNORE)) { /* @@ -1257,11 +1296,12 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) ttwwakeup(tp); } if ((ISSET(tp->t_state, TS_CARR_ON) || - ISSET(t->c_cflag, CLOCAL)) && - !ISSET(tp->t_state, TS_ZOMBIE)) + ISSET(t->c_cflag, CLOCAL)) && + !ISSET(tp->t_state, TS_ZOMBIE)) { SET(tp->t_state, TS_CONNECTED); - else + } else { CLR(tp->t_state, TS_CONNECTED); + } tp->t_cflag = t->c_cflag; tp->t_ispeed = t->c_ispeed; tp->t_ospeed = t->c_ospeed; @@ -1269,9 +1309,9 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) } if (ISSET(t->c_lflag, ICANON) != ISSET(tp->t_lflag, ICANON) && cmd != TIOCSETAF_32 && cmd != TIOCSETAF_64) { - if (ISSET(t->c_lflag, ICANON)) + if (ISSET(t->c_lflag, ICANON)) { SET(tp->t_lflag, PENDIN); - else { + } else { /* * XXX we really shouldn't allow toggling * ICANON while we're in a non-termios line @@ -1279,12 +1319,12 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) * panicing for a null queue. */ if (tp->t_rawq.c_cs && tp->t_canq.c_cs) { - struct clist tq; + struct clist tq; - catq(&tp->t_rawq, &tp->t_canq); - tq = tp->t_rawq; - tp->t_rawq = tp->t_canq; - tp->t_canq = tq; + catq(&tp->t_rawq, &tp->t_canq); + tq = tp->t_rawq; + tp->t_rawq = tp->t_canq; + tp->t_canq = tq; } CLR(tp->t_lflag, PENDIN); } @@ -1295,18 +1335,20 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) /* * Make the EXTPROC bit read only. */ - if (ISSET(tp->t_lflag, EXTPROC)) + if (ISSET(tp->t_lflag, EXTPROC)) { SET(t->c_lflag, EXTPROC); - else + } else { CLR(t->c_lflag, EXTPROC); + } tp->t_lflag = t->c_lflag | ISSET(tp->t_lflag, PENDIN); if (t->c_cc[VMIN] != tp->t_cc[VMIN] || - t->c_cc[VTIME] != tp->t_cc[VTIME]) + t->c_cc[VTIME] != tp->t_cc[VTIME]) { ttwakeup(tp); + } bcopy(t->c_cc, tp->t_cc, sizeof(t->c_cc)); break; } - case TIOCSETD: { /* set line discipline */ + case TIOCSETD: { /* set line discipline */ int t = *(int *)data; dev_t device = tp->t_dev; @@ -1330,7 +1372,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) } break; } - case TIOCSTART: /* start output, like ^Q */ + case TIOCSTART: /* start output, like ^Q */ if (ISSET(tp->t_state, TS_TTSTOP) || ISSET(tp->t_lflag, FLUSHO)) { CLR(tp->t_lflag, FLUSHO); @@ -1338,7 +1380,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) ttstart(tp); } break; - case TIOCSTI: /* simulate terminal input */ + case TIOCSTI: /* simulate terminal input */ if (suser(kauth_cred_get(), NULL) && (flag & FREAD) == 0) { error = EPERM; goto out; @@ -1349,10 +1391,10 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) } (*linesw[tp->t_line].l_rint)(*(u_char *)data, tp); break; - case TIOCSTOP: /* stop output, like ^S */ + case TIOCSTOP: /* stop output, like ^S */ if (!ISSET(tp->t_state, TS_TTSTOP)) { SET(tp->t_state, TS_TTSTOP); - ttystop(tp, 0); + ttystop(tp, 0); } break; case TIOCIXON: @@ -1361,7 +1403,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) case TIOCIXOFF: ttyblock(tp); break; - case TIOCSCTTY: /* become controlling tty */ + case TIOCSCTTY: /* become controlling tty */ /* Session ctty vnode pointer set in vnode layer. */ sessp = proc_session(p); if (sessp == SESSION_NULL) { @@ -1418,8 +1460,9 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) proc_list_lock(); oldsessp = tp->t_session; oldpg = tp->t_pgrp; - if (oldsessp != SESSION_NULL) + if (oldsessp != SESSION_NULL) { oldsessp->s_ttypgrpid = NO_PID; + } /* do not drop refs on sessp and pg as tp holds them */ tp->t_session = sessp; tp->t_pgrp = pg; @@ -1428,35 +1471,40 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) /* SAFE: All callers drop the lock on return */ tty_unlock(tp); /* drop the reference on prev session and pgrp */ - if (oldsessp != SESSION_NULL) + if (oldsessp != SESSION_NULL) { session_rele(oldsessp); - if (oldpg != PGRP_NULL) + } + if (oldpg != PGRP_NULL) { pg_rele(oldpg); - if (NULL != oldtp) + } + if (NULL != oldtp) { ttyfree(oldtp); + } tty_lock(tp); break; - case TIOCSPGRP: { /* set pgrp of tty */ + case TIOCSPGRP: { /* set pgrp of tty */ struct pgrp *pgrp = PGRP_NULL; sessp = proc_session(p); if (!isctty_sp(p, tp, sessp)) { - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } error = ENOTTY; goto out; - } - else if ((pgrp = pgfind(*(int *)data)) == PGRP_NULL) { - if (sessp != SESSION_NULL) + } else if ((pgrp = pgfind(*(int *)data)) == PGRP_NULL) { + if (sessp != SESSION_NULL) { session_rele(sessp); + } error = EINVAL; goto out; - } else if (pgrp->pg_session != sessp) { + } else if (pgrp->pg_session != sessp) { /* SAFE: All callers drop the lock on return */ tty_unlock(tp); - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } pg_rele(pgrp); tty_lock(tp); error = EPERM; @@ -1468,8 +1516,9 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) * case. */ if (ISSET(tp->t_state, TS_PGRPHUP)) { - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } pg_rele(pgrp); error = EPERM; goto out; @@ -1498,19 +1547,21 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) /* SAFE: All callers drop the lock on return */ tty_unlock(tp); - if (oldpg != PGRP_NULL) + if (oldpg != PGRP_NULL) { pg_rele(oldpg); - if (sessp != SESSION_NULL) + } + if (sessp != SESSION_NULL) { session_rele(sessp); + } tty_lock(tp); break; } - case TIOCSTAT: /* simulate control-T */ + case TIOCSTAT: /* simulate control-T */ ttyinfo_locked(tp); break; - case TIOCSWINSZ: /* set window size */ + case TIOCSWINSZ: /* set window size */ if (bcmp((caddr_t)&tp->t_winsize, data, - sizeof (struct winsize))) { + sizeof(struct winsize))) { tp->t_winsize = *(struct winsize *)data; /* SAFE: All callers drop the lock on return */ tty_unlock(tp); @@ -1529,7 +1580,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) break; case TIOCGDRAINWAIT: *(int *)data = tp->t_timeout / hz; - break; + break; default: error = ttcompat(tp, cmd, data, flag, p); goto out; @@ -1537,7 +1588,7 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) error = 0; out: - return(error); + return error; } @@ -1612,14 +1663,14 @@ out: int ttselect(dev_t dev, int rw, void *wql, proc_t p) { - int rv; + int rv; struct tty *tp = cdevsw[major(dev)].d_ttys[minor(dev)]; tty_lock(tp); rv = ttyselect(tp, rw, wql, p); tty_unlock(tp); - return (rv); + return rv; } @@ -1631,17 +1682,19 @@ ttnread(struct tty *tp) { int nread; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if (ISSET(tp->t_lflag, PENDIN)) + if (ISSET(tp->t_lflag, PENDIN)) { ttypend(tp); + } nread = tp->t_canq.c_cc; if (!ISSET(tp->t_lflag, ICANON)) { nread += tp->t_rawq.c_cc; - if (nread < tp->t_cc[VMIN] && tp->t_cc[VTIME] == 0) + if (nread < tp->t_cc[VMIN] && tp->t_cc[VTIME] == 0) { nread = 0; + } } - return (nread); + return nread; } @@ -1668,29 +1721,32 @@ ttywait(struct tty *tp) { int error; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ error = 0; while ((tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY)) && - ISSET(tp->t_state, TS_CONNECTED) && tp->t_oproc) { + ISSET(tp->t_state, TS_CONNECTED) && tp->t_oproc) { (*tp->t_oproc)(tp); if ((tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY)) && ISSET(tp->t_state, TS_CONNECTED)) { SET(tp->t_state, TS_SO_OCOMPLETE); error = ttysleep(tp, TSA_OCOMPLETE(tp), - TTOPRI | PCATCH, "ttywai", - tp->t_timeout); + TTOPRI | PCATCH, "ttywai", + tp->t_timeout); if (error) { - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { error = EIO; + } break; } - } else + } else { break; + } } - if (!error && (tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY))) + if (!error && (tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY))) { error = EIO; - return (error); + } + return error; } /* @@ -1701,7 +1757,7 @@ ttywait(struct tty *tp) static void ttystop(struct tty *tp, int rw) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ (*cdevsw[major(tp->t_dev)].d_stop)(tp, rw); } @@ -1716,11 +1772,12 @@ ttywflush(struct tty *tp) { int error; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if ((error = ttywait(tp)) == 0) + if ((error = ttywait(tp)) == 0) { ttyflush(tp, FREAD); - return (error); + } + return error; } /* @@ -1731,7 +1788,7 @@ ttywflush(struct tty *tp) void ttyflush(struct tty *tp, int rw) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ #if 0 again: @@ -1740,8 +1797,8 @@ again: FLUSHQ(&tp->t_outq); CLR(tp->t_state, TS_TTSTOP); } - ttystop(tp, rw); - if (rw & FREAD) { + ttystop(tp, rw); + if (rw & FREAD) { FLUSHQ(&tp->t_canq); FLUSHQ(&tp->t_rawq); CLR(tp->t_lflag, PENDIN); @@ -1750,8 +1807,9 @@ again: CLR(tp->t_state, TS_LOCAL); ttwakeup(tp); if (ISSET(tp->t_state, TS_TBLOCK)) { - if (rw & FWRITE) + if (rw & FWRITE) { FLUSHQ(&tp->t_outq); + } ttyunblock(tp); /* @@ -1776,7 +1834,7 @@ again: */ SET(tp->t_state, TS_SO_OCOMPLETE); ttysleep(tp, TSA_OCOMPLETE(tp), TTOPRI, - "ttyfls", hz / 10); + "ttyfls", hz / 10); /* * Don't try sending the stop character again. */ @@ -1816,12 +1874,13 @@ termioschars(struct termios *t) void ttyblock(struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ SET(tp->t_state, TS_TBLOCK); if (ISSET(tp->t_iflag, IXOFF) && tp->t_cc[VSTOP] != _POSIX_VDISABLE && - putc(tp->t_cc[VSTOP], &tp->t_outq) != 0) - CLR(tp->t_state, TS_TBLOCK); /* try again later */ + putc(tp->t_cc[VSTOP], &tp->t_outq) != 0) { + CLR(tp->t_state, TS_TBLOCK); /* try again later */ + } ttstart(tp); } @@ -1836,12 +1895,13 @@ ttyblock(struct tty *tp) static void ttyunblock(struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ CLR(tp->t_state, TS_TBLOCK); if (ISSET(tp->t_iflag, IXOFF) && tp->t_cc[VSTART] != _POSIX_VDISABLE && - putc(tp->t_cc[VSTART], &tp->t_outq) != 0) - SET(tp->t_state, TS_TBLOCK); /* try again later */ + putc(tp->t_cc[VSTART], &tp->t_outq) != 0) { + SET(tp->t_state, TS_TBLOCK); /* try again later */ + } ttstart(tp); } @@ -1865,12 +1925,13 @@ ttyunblock(struct tty *tp) int ttstart(struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if (tp->t_oproc != NULL) /* XXX: Kludge for pty. */ + if (tp->t_oproc != NULL) { /* XXX: Kludge for pty. */ (*tp->t_oproc)(tp); + } - return (0); + return 0; } @@ -1884,12 +1945,13 @@ ttstart(struct tty *tp) int ttylclose(struct tty *tp, int flag) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if ( (flag & FNONBLOCK) || ttywflush(tp)) + if ((flag & FNONBLOCK) || ttywflush(tp)) { ttyflush(tp, FREAD | FWRITE); + } - return (0); + return 0; } @@ -1905,9 +1967,9 @@ ttylclose(struct tty *tp, int flag) int ttymodem(struct tty *tp, int flag) { - int rval = 1; /* default return value */ + int rval = 1; /* default return value */ - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ if (ISSET(tp->t_state, TS_CARR_ON) && ISSET(tp->t_cflag, MDMBUF)) { /* @@ -1922,7 +1984,7 @@ ttymodem(struct tty *tp, int flag) } else if (!ISSET(tp->t_state, TS_CAR_OFLOW)) { SET(tp->t_state, TS_CAR_OFLOW); SET(tp->t_state, TS_TTSTOP); - ttystop(tp, 0); + ttystop(tp, 0); } } else if (flag == 0) { /* @@ -1933,8 +1995,9 @@ ttymodem(struct tty *tp, int flag) !ISSET(tp->t_cflag, CLOCAL)) { SET(tp->t_state, TS_ZOMBIE); CLR(tp->t_state, TS_CONNECTED); - if (tp->t_session && tp->t_session->s_leader) + if (tp->t_session && tp->t_session->s_leader) { psignal(tp->t_session->s_leader, SIGHUP); + } ttyflush(tp, FREAD | FWRITE); rval = 0; goto out; @@ -1944,15 +2007,16 @@ ttymodem(struct tty *tp, int flag) * Carrier now on. */ SET(tp->t_state, TS_CARR_ON); - if (!ISSET(tp->t_state, TS_ZOMBIE)) + if (!ISSET(tp->t_state, TS_ZOMBIE)) { SET(tp->t_state, TS_CONNECTED); + } wakeup(TSA_CARR_ON(tp)); ttwakeup(tp); ttwwakeup(tp); } out: - return (rval); + return rval; } @@ -1968,15 +2032,16 @@ ttypend(struct tty *tp) struct clist tq; int c; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ CLR(tp->t_lflag, PENDIN); SET(tp->t_state, TS_TYPEN); tq = tp->t_rawq; tp->t_rawq.c_cc = 0; tp->t_rawq.c_cf = tp->t_rawq.c_cl = NULL; - while ((c = getc(&tq)) >= 0) + while ((c = getc(&tq)) >= 0) { ttyinput(c, tp); + } CLR(tp->t_state, TS_TYPEN); } @@ -1998,11 +2063,11 @@ ttread(struct tty *tp, struct uio *uio, int flag) proc_t p = current_proc(); int first, error = 0; int has_etime = 0, last_cc = 0; - long slp = 0; /* XXX this should be renamed `timo'. */ + long slp = 0; /* XXX this should be renamed `timo'. */ struct uthread *ut; struct pgrp * pg; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ ut = (struct uthread *)get_bsdthread_info(current_thread()); @@ -2013,7 +2078,7 @@ loop: */ if (ISSET(lflag, PENDIN)) { ttypend(tp); - lflag = tp->t_lflag; /* XXX ttypend() clobbers it */ + lflag = tp->t_lflag; /* XXX ttypend() clobbers it */ } /* @@ -2021,14 +2086,14 @@ loop: */ if (isbackground(p, tp)) { if ((p->p_sigignore & sigmask(SIGTTIN)) || - (ut->uu_sigmask & sigmask(SIGTTIN)) || + (ut->uu_sigmask & sigmask(SIGTTIN)) || p->p_lflag & P_LPPWAIT) { - error = EIO; + error = EIO; goto err; } pg = proc_pgrp(p); if (pg == PGRP_NULL) { - error = EIO; + error = EIO; goto err; } if (pg->pg_jobc == 0) { @@ -2036,7 +2101,7 @@ loop: tty_unlock(tp); pg_rele(pg); tty_lock(tp); - error = EIO; + error = EIO; goto err; } /* SAFE: All callers drop the lock on return */ @@ -2069,10 +2134,11 @@ loop: qp = ISSET(lflag, ICANON) ? &tp->t_canq : &tp->t_rawq; if (flag & IO_NDELAY) { - if (qp->c_cc > 0) + if (qp->c_cc > 0) { goto read; + } if (ISSET(lflag, ICANON) || cc[VMIN] != 0) { - error = EWOULDBLOCK; + error = EWOULDBLOCK; } /* else polling - returning 0 */ goto err; @@ -2081,7 +2147,7 @@ loop: int m = cc[VMIN]; long t = cc[VTIME]; struct timeval timecopy; - struct timeval etime = {0, 0}; /* protected by !has_etime */ + struct timeval etime = {0, 0}; /* protected by !has_etime */ /* * Check each of the four combinations. @@ -2092,22 +2158,26 @@ loop: * into slp. */ if (t == 0) { - if (qp->c_cc < m) + if (qp->c_cc < m) { goto sleep; - if (qp->c_cc > 0) + } + if (qp->c_cc > 0) { goto read; + } /* m, t and qp->c_cc are all 0. 0 is enough input. */ goto err; } - t *= 100000; /* time in us */ + t *= 100000; /* time in us */ #define diff(t1, t2) (((t1).tv_sec - (t2).tv_sec) * 1000000 + \ - ((t1).tv_usec - (t2).tv_usec)) + ((t1).tv_usec - (t2).tv_usec)) if (m > 0) { - if (qp->c_cc <= 0) + if (qp->c_cc <= 0) { goto sleep; - if (qp->c_cc >= m) + } + if (qp->c_cc >= m) { goto read; + } microuptime(&timecopy); if (!has_etime) { /* first character, start timer */ @@ -2116,7 +2186,7 @@ loop: etime.tv_sec = t / 1000000; etime.tv_usec = (t - (etime.tv_sec * 1000000)); timeradd(&etime, &timecopy, &etime); - + slp = t; } else if (qp->c_cc > last_cc) { /* got a character, restart timer */ @@ -2128,15 +2198,17 @@ loop: slp = t; } else { /* nothing, check expiration */ - if (timercmp(&etime, &timecopy, <=)) + if (timercmp(&etime, &timecopy, <=)) { goto read; + } slp = diff(etime, timecopy); } last_cc = qp->c_cc; - } else { /* m == 0 */ - if (qp->c_cc > 0) + } else { /* m == 0 */ + if (qp->c_cc > 0) { goto read; + } microuptime(&timecopy); if (!has_etime) { has_etime = 1; @@ -2147,7 +2219,7 @@ loop: slp = t; } else { - if (timercmp(&etime, &timecopy, <=)) { + if (timercmp(&etime, &timecopy, <=)) { /* Timed out, but 0 is enough input. */ goto err; } @@ -2173,11 +2245,11 @@ sleep: * There is no input, or not enough input and we can block. */ error = ttysleep(tp, TSA_HUP_OR_INPUT(tp), TTIPRI | PCATCH, - ISSET(tp->t_state, TS_CONNECTED) ? - "ttyin" : "ttyhup", (int)slp); - if (error == EWOULDBLOCK) + ISSET(tp->t_state, TS_CONNECTED) ? + "ttyin" : "ttyhup", (int)slp); + if (error == EWOULDBLOCK) { error = 0; - else if (error) { + } else if (error) { goto err; } /* @@ -2195,8 +2267,9 @@ read: */ first = 1; if (ISSET(lflag, ICANON) - || (ISSET(lflag, IEXTEN | ISIG) == (IEXTEN | ISIG)) ) + || (ISSET(lflag, IEXTEN | ISIG) == (IEXTEN | ISIG))) { goto slowcase; + } for (;;) { char ibuf[IBUFSIZ]; int icc; @@ -2204,8 +2277,9 @@ read: icc = MIN(uio_resid(uio), IBUFSIZ); icc = q_to_b(qp, (u_char *)ibuf, icc); if (icc <= 0) { - if (first) + if (first) { goto loop; + } break; } error = uiomove(ibuf, icc, uio); @@ -2213,10 +2287,12 @@ read: * XXX if there was an error then we should ungetc() the * unmoved chars and reduce icc here. */ - if (error) + if (error) { break; - if (uio_resid(uio) == 0) + } + if (uio_resid(uio) == 0) { break; + } first = 0; } goto out; @@ -2224,8 +2300,9 @@ slowcase: for (;;) { c = getc(qp); if (c < 0) { - if (first) + if (first) { goto loop; + } break; } /* @@ -2243,9 +2320,10 @@ slowcase: tty_lock(tp); if (first) { error = ttysleep(tp, &ttread, TTIPRI | PCATCH, - "ttybg3", hz); - if (error) + "ttybg3", hz); + if (error) { break; + } goto loop; } break; @@ -2253,23 +2331,27 @@ slowcase: /* * Interpret EOF only in canonical mode. */ - if (CCEQ(cc[VEOF], c) && ISSET(lflag, ICANON)) + if (CCEQ(cc[VEOF], c) && ISSET(lflag, ICANON)) { break; + } /* * Give user character. */ - error = ureadc(c, uio); - if (error) + error = ureadc(c, uio); + if (error) { /* XXX should ungetc(c, qp). */ break; - if (uio_resid(uio) == 0) + } + if (uio_resid(uio) == 0) { break; + } /* * In canonical mode check for a "break character" * marking the end of a "line of input". */ - if (ISSET(lflag, ICANON) && TTBREAKC(c, lflag)) + if (ISSET(lflag, ICANON) && TTBREAKC(c, lflag)) { break; + } first = 0; } @@ -2279,11 +2361,12 @@ out: * the input queue has gone down. */ if (ISSET(tp->t_state, TS_TBLOCK) && - tp->t_rawq.c_cc + tp->t_canq.c_cc <= I_LOW_WATER) + tp->t_rawq.c_cc + tp->t_canq.c_cc <= I_LOW_WATER) { ttyunblock(tp); + } err: - return (error); + return error; } @@ -2305,24 +2388,26 @@ ttycheckoutq(struct tty *tp, int wait) sigset_t oldsig; struct uthread *ut; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ ut = (struct uthread *)get_bsdthread_info(current_thread()); hiwat = tp->t_hiwat; oldsig = wait ? ut->uu_siglist : 0; - if (tp->t_outq.c_cc > hiwat + OBUFSIZ + 100) + if (tp->t_outq.c_cc > hiwat + OBUFSIZ + 100) { while (tp->t_outq.c_cc > hiwat) { ttstart(tp); - if (tp->t_outq.c_cc <= hiwat) + if (tp->t_outq.c_cc <= hiwat) { break; + } if (wait == 0 || ut->uu_siglist != oldsig) { - return (0); + return 0; } SET(tp->t_state, TS_SO_OLOWAT); ttysleep(tp, TSA_OLOWAT(tp), PZERO - 1, "ttoutq", hz); } - return (1); + } + return 1; } @@ -2345,7 +2430,7 @@ ttwrite(struct tty *tp, struct uio *uio, int flag) struct uthread *ut; struct pgrp * pg; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ ut = (struct uthread *)get_bsdthread_info(current_thread()); hiwat = tp->t_hiwat; @@ -2354,8 +2439,9 @@ ttwrite(struct tty *tp, struct uio *uio, int flag) cc = 0; loop: if (ISSET(tp->t_state, TS_ZOMBIE)) { - if (uio_resid(uio) == count) + if (uio_resid(uio) == count) { error = EIO; + } goto out; } if (!ISSET(tp->t_state, TS_CONNECTED)) { @@ -2364,9 +2450,10 @@ loop: goto out; } error = ttysleep(tp, TSA_CARR_ON(tp), TTIPRI | PCATCH, - "ttydcd", 0); + "ttydcd", 0); if (error) { - goto out; } + goto out; + } goto loop; } /* @@ -2377,7 +2464,6 @@ loop: ISSET(tp->t_lflag, TOSTOP) && (p->p_lflag & P_LPPWAIT) == 0 && (p->p_sigignore & sigmask(SIGTTOU)) == 0 && (ut->uu_sigmask & sigmask(SIGTTOU)) == 0) { - pg = proc_pgrp(p); if (pg == PGRP_NULL) { error = EIO; @@ -2403,7 +2489,7 @@ loop: * process, that's handled in the signal sending code. */ error = EINTR; - goto out; + goto out; } /* * Process the user's data in at most OBUFSIZ chunks. Perform any @@ -2413,10 +2499,11 @@ loop: while (uio_resid(uio) > 0 || cc > 0) { if (ISSET(tp->t_lflag, FLUSHO)) { uio_setresid(uio, 0); - return (0); + return 0; } - if (tp->t_outq.c_cc > hiwat) + if (tp->t_outq.c_cc > hiwat) { goto ovhiwat; + } /* * Grab a hunk of data from the user, unless we have some * leftover from last time. @@ -2440,11 +2527,11 @@ loop: * immediately. */ while (cc > 0) { - if (!ISSET(tp->t_oflag, OPOST)) + if (!ISSET(tp->t_oflag, OPOST)) { ce = cc; - else { + } else { ce = cc - scanc((u_int)cc, (u_char *)cp, - char_type, CCLASSMASK); + char_type, CCLASSMASK); /* * If ce is zero, then we're processing * a special character through ttyoutput. @@ -2458,8 +2545,9 @@ loop: cp++; cc--; if (ISSET(tp->t_lflag, FLUSHO) || - tp->t_outq.c_cc > hiwat) + tp->t_outq.c_cc > hiwat) { goto ovhiwat; + } continue; } } @@ -2484,8 +2572,9 @@ loop: goto overfull; } if (ISSET(tp->t_lflag, FLUSHO) || - tp->t_outq.c_cc > hiwat) + tp->t_outq.c_cc > hiwat) { break; + } } ttstart(tp); } @@ -2496,7 +2585,7 @@ out: * (the call will either return short or restart with a new uio). */ uio_setresid(uio, (uio_resid(uio) + cc)); - return (error); + return error; overfull: @@ -2519,15 +2608,17 @@ ovhiwat: } if (flag & IO_NDELAY) { uio_setresid(uio, (uio_resid(uio) + cc)); - return (uio_resid(uio) == count ? EWOULDBLOCK : 0); + return uio_resid(uio) == count ? EWOULDBLOCK : 0; } SET(tp->t_state, TS_SO_OLOWAT); error = ttysleep(tp, TSA_OLOWAT(tp), TTOPRI | PCATCH, "ttywri", - tp->t_timeout); - if (error == EWOULDBLOCK) + tp->t_timeout); + if (error == EWOULDBLOCK) { error = EIO; - if (error) + } + if (error) { goto out; + } goto loop; } @@ -2545,10 +2636,11 @@ ttyrub(int c, struct tty *tp) int savecol; int tabc; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if (!ISSET(tp->t_lflag, ECHO) || ISSET(tp->t_lflag, EXTPROC)) + if (!ISSET(tp->t_lflag, ECHO) || ISSET(tp->t_lflag, EXTPROC)) { return; + } CLR(tp->t_lflag, FLUSHO); if (ISSET(tp->t_lflag, ECHOE)) { if (tp->t_rocount == 0) { @@ -2558,13 +2650,13 @@ ttyrub(int c, struct tty *tp) ttyretype(tp); return; } - if (c == ('\t' | TTY_QUOTE) || c == ('\n' | TTY_QUOTE)) + if (c == ('\t' | TTY_QUOTE) || c == ('\n' | TTY_QUOTE)) { ttyrubo(tp, 2); - else { + } else { CLR(c, ~TTY_CHARMASK); switch (CCLASS(c)) { case ORDINARY: - if(!(ISSET(tp->t_iflag, IUTF8) && CCONT(c))) { + if (!(ISSET(tp->t_iflag, IUTF8) && CCONT(c))) { ttyrubo(tp, 1); } break; @@ -2573,8 +2665,9 @@ ttyrub(int c, struct tty *tp) case NEWLINE: case RETURN: case VTAB: - if (ISSET(tp->t_lflag, ECHOCTL)) + if (ISSET(tp->t_lflag, ECHOCTL)) { ttyrubo(tp, 2); + } break; case TAB: if (tp->t_rocount < tp->t_rawq.c_cc) { @@ -2586,21 +2679,24 @@ ttyrub(int c, struct tty *tp) SET(tp->t_lflag, FLUSHO); tp->t_column = tp->t_rocol; for (cp = firstc(&tp->t_rawq, &tabc); cp; - cp = nextc(&tp->t_rawq, cp, &tabc)) + cp = nextc(&tp->t_rawq, cp, &tabc)) { ttyecho(tabc, tp); + } CLR(tp->t_lflag, FLUSHO); CLR(tp->t_state, TS_CNTTB); /* savecol will now be length of the tab. */ savecol -= tp->t_column; tp->t_column += savecol; - if (savecol > 8) - savecol = 8; /* overflow fixup */ - while (--savecol >= 0) + if (savecol > 8) { + savecol = 8; /* overflow fixup */ + } + while (--savecol >= 0) { (void)ttyoutput('\b', tp); + } break; - default: /* XXX */ -#define PANICSTR "ttyrub: would panic c = %d, val = %d\n" + default: /* XXX */ +#define PANICSTR "ttyrub: would panic c = %d, val = %d\n" printf(PANICSTR, c, CCLASS(c)); #ifdef notdef panic(PANICSTR, c, CCLASS(c)); @@ -2613,8 +2709,9 @@ ttyrub(int c, struct tty *tp) (void)ttyoutput('\\', tp); } ttyecho(c, tp); - } else + } else { ttyecho(tp->t_cc[VERASE], tp); + } --tp->t_rocount; } @@ -2627,7 +2724,7 @@ ttyrub(int c, struct tty *tp) static void ttyrubo(struct tty *tp, int count) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ while (count-- > 0) { (void)ttyoutput('\b', tp); @@ -2650,11 +2747,12 @@ ttyretype(struct tty *tp) u_char *cp; int c; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ /* Echo the reprint character. */ - if (tp->t_cc[VREPRINT] != _POSIX_VDISABLE) + if (tp->t_cc[VREPRINT] != _POSIX_VDISABLE) { ttyecho(tp->t_cc[VREPRINT], tp); + } (void)ttyoutput('\n', tp); @@ -2663,10 +2761,12 @@ ttyretype(struct tty *tp) * FIX: NEXTC IS BROKEN - DOESN'T CHECK QUOTE * BIT OF FIRST CHAR. */ - for (cp = firstc(&tp->t_canq, &c); cp; cp = nextc(&tp->t_canq, cp, &c)) + for (cp = firstc(&tp->t_canq, &c); cp; cp = nextc(&tp->t_canq, cp, &c)) { ttyecho(c, tp); - for (cp = firstc(&tp->t_rawq, &c); cp; cp = nextc(&tp->t_rawq, cp, &c)) + } + for (cp = firstc(&tp->t_rawq, &c); cp; cp = nextc(&tp->t_rawq, cp, &c)) { ttyecho(c, tp); + } CLR(tp->t_state, TS_ERASE); tp->t_rocount = tp->t_rawq.c_cc; @@ -2682,23 +2782,26 @@ ttyretype(struct tty *tp) static void ttyecho(int c, struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if (!ISSET(tp->t_state, TS_CNTTB)) + if (!ISSET(tp->t_state, TS_CNTTB)) { CLR(tp->t_lflag, FLUSHO); + } if ((!ISSET(tp->t_lflag, ECHO) && - (c != '\n' || !ISSET(tp->t_lflag, ECHONL))) || - ISSET(tp->t_lflag, EXTPROC)) + (c != '\n' || !ISSET(tp->t_lflag, ECHONL))) || + ISSET(tp->t_lflag, EXTPROC)) { return; + } if (ISSET(tp->t_lflag, ECHOCTL) && ((ISSET(c, TTY_CHARMASK) <= 037 && c != '\t' && c != '\n') || ISSET(c, TTY_CHARMASK) == 0177)) { (void)ttyoutput('^', tp); CLR(c, ~TTY_CHARMASK); - if (c == 0177) + if (c == 0177) { c = '?'; - else + } else { c += 'A' - 1; + } } (void)ttyoutput(c, tp); } @@ -2712,7 +2815,7 @@ ttyecho(int c, struct tty *tp) void ttwakeup(struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ selwakeup(&tp->t_rsel); KNOTE(&tp->t_rsel.si_note, 1); @@ -2743,7 +2846,7 @@ ttwakeup(struct tty *tp) void ttwwakeup(struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ if (tp->t_outq.c_cc <= tp->t_lowat) { selwakeup(&tp->t_wsel); @@ -2772,10 +2875,12 @@ ttwwakeup(struct tty *tp) int ttspeedtab(int speed, struct speedtab *table) { - for ( ; table->sp_speed != -1; table++) - if (table->sp_speed == speed) - return (table->sp_code); - return (-1); + for (; table->sp_speed != -1; table++) { + if (table->sp_speed == speed) { + return table->sp_code; + } + } + return -1; } @@ -2793,26 +2898,26 @@ ttsetwater(struct tty *tp) int cps; unsigned int x; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ -#define CLAMP(x, h, l) ((x) > h ? h : ((x) < l) ? l : (x)) +#define CLAMP(x, h, l) ((x) > h ? h : ((x) < l) ? l : (x)) cps = tp->t_ospeed / 10; tp->t_lowat = x = CLAMP(cps / 2, TTMAXLOWAT, TTMINLOWAT); x += cps; x = CLAMP(x, TTMAXHIWAT, TTMINHIWAT); tp->t_hiwat = roundup(x, CBSIZE); -#undef CLAMP +#undef CLAMP } /* ttyinfo has been converted to the MACH kernel */ #include /* XXX Should be in Mach header , but doesn't work */ -extern kern_return_t thread_info_internal(thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, - mach_msg_type_number_t *thread_info_count); +extern kern_return_t thread_info_internal(thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count); /* @@ -2823,23 +2928,24 @@ extern kern_return_t thread_info_internal(thread_t thread, void ttyinfo_locked(struct tty *tp) { - int load; - thread_t thread; - uthread_t uthread; - proc_t p; - proc_t pick; + int load; + thread_t thread; + uthread_t uthread; + proc_t p; + proc_t pick; pid_t pickpid; - const char *state; - struct timeval utime; - struct timeval stime; - thread_basic_info_data_t basic_info; - mach_msg_type_number_t mmtn = THREAD_BASIC_INFO_COUNT; + const char *state; + struct timeval utime; + struct timeval stime; + thread_basic_info_data_t basic_info; + mach_msg_type_number_t mmtn = THREAD_BASIC_INFO_COUNT; struct pgrp * pg; - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ - if (ttycheckoutq(tp,0) == 0) + if (ttycheckoutq(tp, 0) == 0) { return; + } /* Print load average. */ load = (averunnable.ldavg[0] * 100 + FSCALE / 2) >> FSHIFT; @@ -2853,7 +2959,7 @@ ttyinfo_locked(struct tty *tp) ttyprintf(tp, "not a controlling terminal\n"); tp->t_rocount = 0; return; -} + } if (tp->t_pgrp == NULL) { ttyprintf(tp, "no foreground process group\n"); tp->t_rocount = 0; @@ -2889,8 +2995,9 @@ ttyinfo_locked(struct tty *tp) tty_lock(tp); pick = proc_find(pickpid); - if (pick == PROC_NULL) + if (pick == PROC_NULL) { return; + } if (TAILQ_EMPTY(&pick->p_uthlist) || (uthread = TAILQ_FIRST(&pick->p_uthlist)) == NULL || @@ -2902,7 +3009,7 @@ ttyinfo_locked(struct tty *tp) return; } - switch(basic_info.run_state) { + switch (basic_info.run_state) { case TH_STATE_RUNNING: state = "running"; break; @@ -2927,11 +3034,11 @@ ttyinfo_locked(struct tty *tp) /* Print command, pid, state, utime, and stime */ ttyprintf(tp, " cmd: %s %d %s %ld.%02du %ld.%02ds\n", - pick->p_comm, - pick->p_pid, - state, - (long)utime.tv_sec, utime.tv_usec / 10000, - (long)stime.tv_sec, stime.tv_usec / 10000); + pick->p_comm, + pick->p_pid, + state, + (long)utime.tv_sec, utime.tv_usec / 10000, + (long)stime.tv_sec, stime.tv_usec / 10000); tp->t_rocount = 0; } @@ -2948,7 +3055,7 @@ ttyinfo_locked(struct tty *tp) * 3) The sleeper with the shortest sleep time is next. * 4) Further ties are broken by picking the highest pid. */ -#define ISRUN(p) (((p)->p_stat == SRUN) || ((p)->p_stat == SIDL)) +#define ISRUN(p) (((p)->p_stat == SRUN) || ((p)->p_stat == SIDL)) #define TESTAB(a, b) ((a)<<1 | (b)) #define ONLYA 2 #define ONLYB 1 @@ -2964,51 +3071,56 @@ proc_compare(proc_t p1, proc_t p2) { /* NOTE THIS FN needs to be NON BLOCKING */ - if (p1 == NULL) - return (1); + if (p1 == NULL) { + return 1; + } /* * see if at least one of them is runnable */ switch (TESTAB(ISRUN(p1), ISRUN(p2))) { case ONLYA: - return (0); + return 0; case ONLYB: - return (1); + return 1; case BOTH: /* * tie - favor one with highest recent cpu utilization */ #ifdef _PROC_HAS_SCHEDINFO_ /* Without the support the fields are always zero */ - if (p2->p_estcpu > p1->p_estcpu) - return (1); - if (p1->p_estcpu > p2->p_estcpu) - return (0); + if (p2->p_estcpu > p1->p_estcpu) { + return 1; + } + if (p1->p_estcpu > p2->p_estcpu) { + return 0; + } #endif /* _PROC_HAS_SCHEDINFO_ */ - return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ + return p2->p_pid > p1->p_pid; /* tie - return highest pid */ } /* - * weed out zombies + * weed out zombies */ switch (TESTAB(p1->p_stat == SZOMB, p2->p_stat == SZOMB)) { case ONLYA: - return (1); + return 1; case ONLYB: - return (0); + return 0; case BOTH: - return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ + return p2->p_pid > p1->p_pid; /* tie - return highest pid */ } /* * pick the one with the smallest sleep time */ #ifdef _PROC_HAS_SCHEDINFO_ /* Without the support the fields are always zero */ - if (p2->p_slptime > p1->p_slptime) - return (0); - if (p1->p_slptime > p2->p_slptime) - return (1); + if (p2->p_slptime > p1->p_slptime) { + return 0; + } + if (p1->p_slptime > p2->p_slptime) { + return 1; + } #endif /* _PROC_HAS_SCHEDINFO_ */ - return (p2->p_pid > p1->p_pid); /* tie - return highest pid */ + return p2->p_pid > p1->p_pid; /* tie - return highest pid */ } @@ -3022,16 +3134,17 @@ proc_compare(proc_t p1, proc_t p2) int tputchar(int c, struct tty *tp) { - TTY_LOCK_OWNED(tp); /* debug assert */ + TTY_LOCK_OWNED(tp); /* debug assert */ if (!ISSET(tp->t_state, TS_CONNECTED)) { - return (-1); + return -1; } - if (c == '\n') + if (c == '\n') { (void)ttyoutput('\r', tp); + } (void)ttyoutput(c, tp); ttstart(tp); - return (0); + return 0; } @@ -3075,9 +3188,10 @@ ttysleep(struct tty *tp, void *chan, int pri, const char *wmesg, int timo) gen = tp->t_gen; /* Use of msleep0() avoids conversion timo/timespec/timo */ error = msleep0(chan, &tp->t_lock, pri, wmesg, timo, (int (*)(int))0); - if (error) - return (error); - return (tp->t_gen == gen ? 0 : ERESTART); + if (error) { + return error; + } + return tp->t_gen == gen ? 0 : ERESTART; } @@ -3097,7 +3211,7 @@ ttymalloc(void) { struct tty *tp; - MALLOC(tp, struct tty *, sizeof(struct tty), M_TTYS, M_WAITOK|M_ZERO); + MALLOC(tp, struct tty *, sizeof(struct tty), M_TTYS, M_WAITOK | M_ZERO); if (tp != NULL) { /* XXX: default to TTYCLSIZE(1024) chars for now */ clalloc(&tp->t_rawq, TTYCLSIZE, 1); @@ -3109,7 +3223,7 @@ ttymalloc(void) klist_init(&tp->t_wsel.si_note); tp->t_refcnt = 1; } - return (tp); + return tp; } /* @@ -3137,8 +3251,9 @@ ttyfree(struct tty *tp) ttydeallocate(tp); } else if (tp->t_refcnt < 0) { panic("%s: freeing free tty %p", __func__, tp); - } else + } else { tty_unlock(tp); + } } /* @@ -3150,7 +3265,7 @@ ttyfree(struct tty *tp) static void ttydeallocate(struct tty *tp) { - TTY_LOCK_NOTOWNED(tp); /* debug assert */ + TTY_LOCK_NOTOWNED(tp); /* debug assert */ #if DEBUG if (!(SLIST_EMPTY(&tp->t_rsel.si_note) && SLIST_EMPTY(&tp->t_wsel.si_note))) { @@ -3174,10 +3289,10 @@ isbackground(proc_t p, struct tty *tp) { TTY_LOCK_OWNED(tp); - return (tp->t_session != NULL && p->p_pgrp != NULL && (p->p_pgrp != tp->t_pgrp) && isctty_sp(p, tp, p->p_pgrp->pg_session)); + return tp->t_session != NULL && p->p_pgrp != NULL && (p->p_pgrp != tp->t_pgrp) && isctty_sp(p, tp, p->p_pgrp->pg_session); } -static int +static int isctty(proc_t p, struct tty *tp) { int retval; @@ -3186,14 +3301,13 @@ isctty(proc_t p, struct tty *tp) sessp = proc_session(p); retval = (sessp == tp->t_session && p->p_flag & P_CONTROLT); session_rele(sessp); - return(retval); + return retval; } static int isctty_sp(proc_t p, struct tty *tp, struct session *sessp) { - return(sessp == tp->t_session && p->p_flag & P_CONTROLT); - + return sessp == tp->t_session && p->p_flag & P_CONTROLT; } @@ -3364,7 +3478,7 @@ tty_set_knote_hook(struct knote *kn) * might happen before we can unlink it. */ kr = waitq_set_init(&tmp_wqs, SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, NULL, - NULL); + NULL); assert(kr == KERN_SUCCESS); /* @@ -3491,7 +3605,7 @@ filt_ttydetach(struct knote *kn) case FWRITE: si = &tp->t_wsel; break; - /* knote_get_seltype will panic on default */ + /* knote_get_seltype will panic on default */ } KNOTE_DETACH(&si->si_note, kn); diff --git a/bsd/kern/tty_compat.c b/bsd/kern/tty_compat.c index 4348bd133..4bd2d0273 100644 --- a/bsd/kern/tty_compat.c +++ b/bsd/kern/tty_compat.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -86,7 +86,7 @@ #include static int ttcompatgetflags(struct tty *tp); -static void ttcompatsetflags(struct tty *tp, struct termios *t); +static void ttcompatsetflags(struct tty *tp, struct termios *t); static void ttcompatsetlflags(struct tty *tp, struct termios *t); static int ttcompatspeedtab(int speed, struct speedtab *table); @@ -98,26 +98,26 @@ static int ttcompatspeedtab(int speed, struct speedtab *table); * name space. */ static struct speedtab compatspeeds[] = { -#define MAX_SPEED 17 +#define MAX_SPEED 17 { 115200, 17 }, { 57600, 16 }, { 38400, 15 }, { 19200, 14 }, - { 9600, 13 }, - { 4800, 12 }, - { 2400, 11 }, - { 1800, 10 }, - { 1200, 9 }, - { 600, 8 }, - { 300, 7 }, - { 200, 6 }, - { 150, 5 }, - { 134, 4 }, - { 110, 3 }, - { 75, 2 }, - { 50, 1 }, - { 0, 0 }, - { -1, -1 }, + { 9600, 13 }, + { 4800, 12 }, + { 2400, 11 }, + { 1800, 10 }, + { 1200, 9 }, + { 600, 8 }, + { 300, 7 }, + { 200, 6 }, + { 150, 5 }, + { 134, 4 }, + { 110, 3 }, + { 75, 2 }, + { 50, 1 }, + { 0, 0 }, + { -1, -1 }, }; static int compatspcodes[] = { 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, @@ -149,12 +149,15 @@ static int compatspcodes[] = { static int ttcompatspeedtab(int speed, struct speedtab *table) { - if (speed == 0) - return (0); /* hangup */ - for ( ; table->sp_speed > 0; table++) - if (table->sp_speed <= speed) /* nearest one, rounded down */ - return (table->sp_code); - return (1); /* 50, min and not hangup */ + if (speed == 0) { + return 0; /* hangup */ + } + for (; table->sp_speed > 0; table++) { + if (table->sp_speed <= speed) { /* nearest one, rounded down */ + return table->sp_code; + } + } + return 1; /* 50, min and not hangup */ } @@ -212,36 +215,38 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) { switch (*com) { case TIOCSETP: - /* - * Wait for all characters queued for output to drain, then - * Discard all characters queued for input, and then set - * the input and output speeds and device flags, per the - * contents of the struct sgttyb that 'data' points to. - */ + /* + * Wait for all characters queued for output to drain, then + * Discard all characters queued for input, and then set + * the input and output speeds and device flags, per the + * contents of the struct sgttyb that 'data' points to. + */ case TIOCSETN: /* * Same as TIOCSETP, but the output is not drained, and any * pending input is not discarded. */ - { + { struct sgttyb *sg = (struct sgttyb *)data; int speed; - if ((speed = sg->sg_ispeed) > MAX_SPEED || speed < 0) - return(EINVAL); - else if (speed != ttcompatspeedtab(tp->t_ispeed, compatspeeds)) + if ((speed = sg->sg_ispeed) > MAX_SPEED || speed < 0) { + return EINVAL; + } else if (speed != ttcompatspeedtab(tp->t_ispeed, compatspeeds)) { term->c_ispeed = compatspcodes[speed]; - else + } else { term->c_ispeed = tp->t_ispeed; - if ((speed = sg->sg_ospeed) > MAX_SPEED || speed < 0) - return(EINVAL); - else if (speed != ttcompatspeedtab(tp->t_ospeed, compatspeeds)) + } + if ((speed = sg->sg_ospeed) > MAX_SPEED || speed < 0) { + return EINVAL; + } else if (speed != ttcompatspeedtab(tp->t_ospeed, compatspeeds)) { term->c_ospeed = compatspcodes[speed]; - else + } else { term->c_ospeed = tp->t_ospeed; + } term->c_cc[VERASE] = sg->sg_erase; term->c_cc[VKILL] = sg->sg_kill; - tp->t_flags = (tp->t_flags&0xffff0000) | (sg->sg_flags&0xffff); + tp->t_flags = (tp->t_flags & 0xffff0000) | (sg->sg_flags & 0xffff); ttcompatsetflags(tp, term); *com = (*com == TIOCSETP) ? TIOCSETAF : TIOCSETA; break; @@ -251,7 +256,7 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) * Set the terminal control characters per the contents of * the struct tchars that 'data' points to. */ - { + { struct tchars *tc = (struct tchars *)data; cc_t *cc; @@ -262,8 +267,9 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) cc[VSTOP] = tc->t_stopc; cc[VEOF] = tc->t_eofc; cc[VEOL] = tc->t_brkc; - if (tc->t_brkc == -1) + if (tc->t_brkc == -1) { cc[VEOL2] = _POSIX_VDISABLE; + } *com = TIOCSETA; break; } @@ -287,32 +293,33 @@ ttsetcompat(struct tty *tp, u_long *com, caddr_t data, struct termios *term) break; } case TIOCLBIS: - /* - * Set the bits in the terminal state local flags word - * (16 bits) for the terminal to the current bits OR - * those in the 16 bit value pointed to by 'data'. - */ + /* + * Set the bits in the terminal state local flags word + * (16 bits) for the terminal to the current bits OR + * those in the 16 bit value pointed to by 'data'. + */ case TIOCLBIC: - /* - * Clear the bits in the terminal state local flags word - * for the terminal to the current bits AND those bits NOT - * in the 16 bit value pointed to by 'data'. - */ + /* + * Clear the bits in the terminal state local flags word + * for the terminal to the current bits AND those bits NOT + * in the 16 bit value pointed to by 'data'. + */ case TIOCLSET: /* * Set the terminal state local flags word to exactly those * bits that correspond to the 16 bit value pointed to by * 'data'. */ - if (*com == TIOCLSET) - tp->t_flags = (tp->t_flags&0xffff) | *(int *)data<<16; - else { + if (*com == TIOCLSET) { + tp->t_flags = (tp->t_flags & 0xffff) | *(int *)data << 16; + } else { tp->t_flags = - (ttcompatgetflags(tp)&0xffff0000)|(tp->t_flags&0xffff); - if (*com == TIOCLBIS) - tp->t_flags |= *(int *)data<<16; - else - tp->t_flags &= ~(*(int *)data<<16); + (ttcompatgetflags(tp) & 0xffff0000) | (tp->t_flags & 0xffff); + if (*com == TIOCLBIS) { + tp->t_flags |= *(int *)data << 16; + } else { + tp->t_flags &= ~(*(int *)data << 16); + } } ttcompatsetlflags(tp, term); *com = TIOCSETA; @@ -377,8 +384,9 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) int error; term = tp->t_termios; - if ((error = ttsetcompat(tp, &com, data, &term)) != 0) + if ((error = ttsetcompat(tp, &com, data, &term)) != 0) { return error; + } return ttioctl_locked(tp, com, (caddr_t) &term, flag, p); } case TIOCGETP: @@ -386,15 +394,16 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) * Get the current input and output speeds, and device * flags, into the structure pointed to by 'data'. */ - { + { struct sgttyb *sg = (struct sgttyb *)data; cc_t *cc = tp->t_cc; sg->sg_ospeed = ttcompatspeedtab(tp->t_ospeed, compatspeeds); - if (tp->t_ispeed == 0) + if (tp->t_ispeed == 0) { sg->sg_ispeed = sg->sg_ospeed; - else + } else { sg->sg_ispeed = ttcompatspeedtab(tp->t_ispeed, compatspeeds); + } sg->sg_erase = cc[VERASE]; sg->sg_kill = cc[VKILL]; sg->sg_flags = tp->t_flags = ttcompatgetflags(tp); @@ -405,7 +414,7 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) * Get the terminal control characters into the struct * tchars that 'data' points to. */ - { + { struct tchars *tc = (struct tchars *)data; cc_t *cc = tp->t_cc; @@ -440,9 +449,9 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) * value pointed to by 'data'. */ tp->t_flags = - (ttcompatgetflags(tp) & 0xffff0000UL) - | (tp->t_flags & 0xffff); - *(int *)data = tp->t_flags>>16; + (ttcompatgetflags(tp) & 0xffff0000UL) + | (tp->t_flags & 0xffff); + *(int *)data = tp->t_flags >> 16; break; case OTIOCGETD: @@ -458,29 +467,31 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) * Set the current line discipline based on the value of the * int pointed to by 'data'. */ - { + { int ldisczero = 0; - return (ttioctl_locked(tp, TIOCSETD, - *(int *)data == 2 ? (caddr_t)&ldisczero : data, flag, p)); - } + return ttioctl_locked(tp, TIOCSETD, + *(int *)data == 2 ? (caddr_t)&ldisczero : data, flag, p); + } case OTIOCCONS: /* * Become the console device. */ *(int *)data = 1; - return (ttioctl_locked(tp, TIOCCONS, data, flag, p)); + return ttioctl_locked(tp, TIOCCONS, data, flag, p); case TIOCGSID: /* * Get the current session ID (controlling process' PID). */ - if (tp->t_session == NULL) + if (tp->t_session == NULL) { return ENOTTY; + } - if (tp->t_session->s_leader == NULL) + if (tp->t_session->s_leader == NULL) { return ENOTTY; + } *(int *) data = tp->t_session->s_leader->p_pid; break; @@ -489,13 +500,13 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) /* * This ioctl is not handled at this layer. */ - return (ENOTTY); + return ENOTTY; } /* * Successful 'get' operation. */ - return (0); + return 0; } /* @@ -517,59 +528,73 @@ ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, struct proc *p) static int ttcompatgetflags(struct tty *tp) { - tcflag_t iflag = tp->t_iflag; - tcflag_t lflag = tp->t_lflag; - tcflag_t oflag = tp->t_oflag; - tcflag_t cflag = tp->t_cflag; + tcflag_t iflag = tp->t_iflag; + tcflag_t lflag = tp->t_lflag; + tcflag_t oflag = tp->t_oflag; + tcflag_t cflag = tp->t_cflag; int flags = 0; - if (iflag&IXOFF) + if (iflag & IXOFF) { flags |= TANDEM; - if (iflag&ICRNL || oflag&ONLCR) + } + if (iflag & ICRNL || oflag & ONLCR) { flags |= CRMOD; - if ((cflag&CSIZE) == CS8) { + } + if ((cflag & CSIZE) == CS8) { flags |= PASS8; - if (iflag&ISTRIP) + if (iflag & ISTRIP) { flags |= ANYP; - } - else if (cflag&PARENB) { - if (iflag&INPCK) { - if (cflag&PARODD) + } + } else if (cflag & PARENB) { + if (iflag & INPCK) { + if (cflag & PARODD) { flags |= ODDP; - else + } else { flags |= EVENP; - } else + } + } else { flags |= EVENP | ODDP; + } } - if ((lflag&ICANON) == 0) { + if ((lflag & ICANON) == 0) { /* fudge */ - if (iflag&(INPCK|ISTRIP|IXON) || lflag&(IEXTEN|ISIG) - || (cflag&(CSIZE|PARENB)) != CS8) + if (iflag & (INPCK | ISTRIP | IXON) || lflag & (IEXTEN | ISIG) + || (cflag & (CSIZE | PARENB)) != CS8) { flags |= CBREAK; - else + } else { flags |= RAW; + } } - if (!(flags&RAW) && !(oflag&OPOST) && (cflag&(CSIZE|PARENB)) == CS8) + if (!(flags & RAW) && !(oflag & OPOST) && (cflag & (CSIZE | PARENB)) == CS8) { flags |= LITOUT; - if (cflag&MDMBUF) + } + if (cflag & MDMBUF) { flags |= MDMBUF; - if ((cflag&HUPCL) == 0) + } + if ((cflag & HUPCL) == 0) { flags |= NOHANG; - if (oflag&OXTABS) + } + if (oflag & OXTABS) { flags |= XTABS; - if (lflag&ECHOE) - flags |= CRTERA|CRTBS; - if (lflag&ECHOKE) - flags |= CRTKIL|CRTBS; - if (lflag&ECHOPRT) + } + if (lflag & ECHOE) { + flags |= CRTERA | CRTBS; + } + if (lflag & ECHOKE) { + flags |= CRTKIL | CRTBS; + } + if (lflag & ECHOPRT) { flags |= PRTERA; - if (lflag&ECHOCTL) + } + if (lflag & ECHOCTL) { flags |= CTLECH; - if ((iflag&IXANY) == 0) + } + if ((iflag & IXANY) == 0) { flags |= DECCTQ; - flags |= lflag&(ECHO|TOSTOP|FLUSHO|PENDIN|NOFLSH); - return (flags); + } + flags |= lflag & (ECHO | TOSTOP | FLUSHO | PENDIN | NOFLSH); + return flags; } /* @@ -590,27 +615,29 @@ static void ttcompatsetflags(struct tty *tp, struct termios *t) { int flags = tp->t_flags; - tcflag_t iflag = t->c_iflag; - tcflag_t oflag = t->c_oflag; - tcflag_t lflag = t->c_lflag; - tcflag_t cflag = t->c_cflag; + tcflag_t iflag = t->c_iflag; + tcflag_t oflag = t->c_oflag; + tcflag_t lflag = t->c_lflag; + tcflag_t cflag = t->c_cflag; if (flags & RAW) { iflag = IGNBRK; - lflag &= ~(ECHOCTL|ISIG|ICANON|IEXTEN); + lflag &= ~(ECHOCTL | ISIG | ICANON | IEXTEN); } else { - iflag &= ~(PARMRK|IGNPAR|IGNCR|INLCR); - iflag |= BRKINT|IXON|IMAXBEL; - lflag |= ISIG|IEXTEN|ECHOCTL; /* XXX was echoctl on ? */ - if (flags & XTABS) + iflag &= ~(PARMRK | IGNPAR | IGNCR | INLCR); + iflag |= BRKINT | IXON | IMAXBEL; + lflag |= ISIG | IEXTEN | ECHOCTL; /* XXX was echoctl on ? */ + if (flags & XTABS) { oflag |= OXTABS; - else + } else { oflag &= ~OXTABS; - if (flags & CBREAK) + } + if (flags & CBREAK) { lflag &= ~ICANON; - else + } else { lflag |= ICANON; - if (flags&CRMOD) { + } + if (flags & CRMOD) { iflag |= ICRNL; oflag |= ONLCR; } else { @@ -618,45 +645,51 @@ ttcompatsetflags(struct tty *tp, struct termios *t) oflag &= ~ONLCR; } } - if (flags&ECHO) + if (flags & ECHO) { lflag |= ECHO; - else + } else { lflag &= ~ECHO; + } - cflag &= ~(CSIZE|PARENB); - if (flags&(RAW|LITOUT|PASS8)) { + cflag &= ~(CSIZE | PARENB); + if (flags & (RAW | LITOUT | PASS8)) { cflag |= CS8; - if (!(flags&(RAW|PASS8)) - || (flags&(RAW|PASS8|ANYP)) == (PASS8|ANYP)) + if (!(flags & (RAW | PASS8)) + || (flags & (RAW | PASS8 | ANYP)) == (PASS8 | ANYP)) { iflag |= ISTRIP; - else + } else { iflag &= ~ISTRIP; - if (flags&(RAW|LITOUT)) + } + if (flags & (RAW | LITOUT)) { oflag &= ~OPOST; - else + } else { oflag |= OPOST; + } } else { - cflag |= CS7|PARENB; + cflag |= CS7 | PARENB; iflag |= ISTRIP; oflag |= OPOST; } /* XXX don't set INPCK if RAW or PASS8? */ - if ((flags&(EVENP|ODDP)) == EVENP) { + if ((flags & (EVENP | ODDP)) == EVENP) { iflag |= INPCK; cflag &= ~PARODD; - } else if ((flags&(EVENP|ODDP)) == ODDP) { + } else if ((flags & (EVENP | ODDP)) == ODDP) { iflag |= INPCK; cflag |= PARODD; - } else + } else { iflag &= ~INPCK; - if (flags&TANDEM) + } + if (flags & TANDEM) { iflag |= IXOFF; - else + } else { iflag &= ~IXOFF; - if ((flags&DECCTQ) == 0) + } + if ((flags & DECCTQ) == 0) { iflag |= IXANY; - else + } else { iflag &= ~IXANY; + } t->c_iflag = iflag; t->c_oflag = oflag; t->c_lflag = lflag; @@ -681,46 +714,54 @@ static void ttcompatsetlflags(struct tty *tp, struct termios *t) { int flags = tp->t_flags; - tcflag_t iflag = t->c_iflag; - tcflag_t oflag = t->c_oflag; - tcflag_t lflag = t->c_lflag; - tcflag_t cflag = t->c_cflag; + tcflag_t iflag = t->c_iflag; + tcflag_t oflag = t->c_oflag; + tcflag_t lflag = t->c_lflag; + tcflag_t cflag = t->c_cflag; - iflag &= ~(PARMRK|IGNPAR|IGNCR|INLCR); - if (flags&CRTERA) + iflag &= ~(PARMRK | IGNPAR | IGNCR | INLCR); + if (flags & CRTERA) { lflag |= ECHOE; - else + } else { lflag &= ~ECHOE; - if (flags&CRTKIL) + } + if (flags & CRTKIL) { lflag |= ECHOKE; - else + } else { lflag &= ~ECHOKE; - if (flags&PRTERA) + } + if (flags & PRTERA) { lflag |= ECHOPRT; - else + } else { lflag &= ~ECHOPRT; - if (flags&CTLECH) + } + if (flags & CTLECH) { lflag |= ECHOCTL; - else + } else { lflag &= ~ECHOCTL; - if (flags&TANDEM) + } + if (flags & TANDEM) { iflag |= IXOFF; - else + } else { iflag &= ~IXOFF; - if ((flags&DECCTQ) == 0) + } + if ((flags & DECCTQ) == 0) { iflag |= IXANY; - else + } else { iflag &= ~IXANY; - if (flags & MDMBUF) + } + if (flags & MDMBUF) { cflag |= MDMBUF; - else + } else { cflag &= ~MDMBUF; - if (flags&NOHANG) + } + if (flags & NOHANG) { cflag &= ~HUPCL; - else + } else { cflag |= HUPCL; - lflag &= ~(TOSTOP|FLUSHO|PENDIN|NOFLSH); - lflag |= flags&(TOSTOP|FLUSHO|PENDIN|NOFLSH); + } + lflag &= ~(TOSTOP | FLUSHO | PENDIN | NOFLSH); + lflag |= flags & (TOSTOP | FLUSHO | PENDIN | NOFLSH); /* * The next if-else statement is copied from above so don't bother @@ -730,20 +771,22 @@ ttcompatsetlflags(struct tty *tp, struct termios *t) * the change is not available here and skipping the RAW case would * make the code different from above. */ - cflag &= ~(CSIZE|PARENB); - if (flags&(RAW|LITOUT|PASS8)) { + cflag &= ~(CSIZE | PARENB); + if (flags & (RAW | LITOUT | PASS8)) { cflag |= CS8; - if (!(flags&(RAW|PASS8)) - || (flags&(RAW|PASS8|ANYP)) == (PASS8|ANYP)) + if (!(flags & (RAW | PASS8)) + || (flags & (RAW | PASS8 | ANYP)) == (PASS8 | ANYP)) { iflag |= ISTRIP; - else + } else { iflag &= ~ISTRIP; - if (flags&(RAW|LITOUT)) + } + if (flags & (RAW | LITOUT)) { oflag &= ~OPOST; - else + } else { oflag |= OPOST; + } } else { - cflag |= CS7|PARENB; + cflag |= CS7 | PARENB; iflag |= ISTRIP; oflag |= OPOST; } diff --git a/bsd/kern/tty_conf.c b/bsd/kern/tty_conf.c index b72d464d2..51ddadd8c 100644 --- a/bsd/kern/tty_conf.c +++ b/bsd/kern/tty_conf.c @@ -2,7 +2,7 @@ * Copyright (c) 1997-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -74,14 +74,14 @@ #define MAXLDISC 8 #endif -#define l_noopen ((l_open_t *) &enodev) -#define l_noclose ((l_close_t *) &enodev) -#define l_noread ((l_read_t *) &enodev) -#define l_nowrite ((l_write_t *) &enodev) -#define l_norint ((l_rint_t *) &enodev) +#define l_noopen ((l_open_t *) &enodev) +#define l_noclose ((l_close_t *) &enodev) +#define l_noread ((l_read_t *) &enodev) +#define l_nowrite ((l_write_t *) &enodev) +#define l_norint ((l_rint_t *) &enodev) -static l_ioctl_t l_noioctl; -static l_start_t l_nostart; +static l_ioctl_t l_noioctl; +static l_start_t l_nostart; /* * XXX it probably doesn't matter what the entries other than the l_open @@ -93,23 +93,23 @@ static l_start_t l_nostart; { l_noopen, l_noclose, l_noread, l_nowrite, \ l_noioctl, l_norint, l_nostart, ttymodem } -struct linesw linesw[MAXLDISC] = +struct linesw linesw[MAXLDISC] = { - /* 0- termios */ - { ttyopen, ttylclose, ttread, ttwrite, - l_noioctl, ttyinput, ttwwakeup, ttymodem }, - NODISC(1), /* 1- defunct */ - /* 2- NTTYDISC */ - { ttyopen, ttylclose, ttread, ttwrite, - l_noioctl, ttyinput, ttwwakeup, ttymodem }, - NODISC(3), /* TABLDISC */ - NODISC(4), /* SLIPDISC */ - NODISC(5), /* PPPDISC */ - NODISC(6), /* loadable */ - NODISC(7), /* loadable */ + /* 0- termios */ + { ttyopen, ttylclose, ttread, ttwrite, + l_noioctl, ttyinput, ttwwakeup, ttymodem }, + NODISC(1), /* 1- defunct */ + /* 2- NTTYDISC */ + { ttyopen, ttylclose, ttread, ttwrite, + l_noioctl, ttyinput, ttwwakeup, ttymodem }, + NODISC(3), /* TABLDISC */ + NODISC(4), /* SLIPDISC */ + NODISC(5), /* PPPDISC */ + NODISC(6), /* loadable */ + NODISC(7), /* loadable */ }; -const int nlinesw = sizeof (linesw) / sizeof (linesw[0]); +const int nlinesw = sizeof(linesw) / sizeof(linesw[0]); static struct linesw nodisc = NODISC(0); @@ -129,17 +129,18 @@ ldisc_register(int discipline, struct linesw *linesw_p) if (discipline == LDISC_LOAD) { int i; - for (i = LOADABLE_LDISC; i < MAXLDISC; i++) + for (i = LOADABLE_LDISC; i < MAXLDISC; i++) { if (bcmp(linesw + i, &nodisc, sizeof(nodisc)) == 0) { slot = i; } - } - else if (discipline >= 0 && discipline < MAXLDISC) { + } + } else if (discipline >= 0 && discipline < MAXLDISC) { slot = discipline; } - if (slot != -1 && linesw_p) + if (slot != -1 && linesw_p) { linesw[slot] = *linesw_p; + } return slot; } @@ -164,7 +165,7 @@ ldisc_deregister(int discipline) */ static int l_noioctl(__unused struct tty *tp, __unused u_long cmd, __unused caddr_t data, - __unused int flags, __unused struct proc *p) + __unused int flags, __unused struct proc *p) { return ENOTTY; } diff --git a/bsd/kern/tty_dev.c b/bsd/kern/tty_dev.c index 1e1fe83f4..ccfd752fb 100644 --- a/bsd/kern/tty_dev.c +++ b/bsd/kern/tty_dev.c @@ -2,7 +2,7 @@ * Copyright (c) 1997-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -75,13 +75,13 @@ #include #include #include -#include /* _devfs_setattr() */ -#include /* _devfs_setattr() */ +#include /* _devfs_setattr() */ +#include /* _devfs_setattr() */ #include #include #include #include -#include /* DEVFS_LOCK()/DEVFS_UNLOCK() */ +#include /* DEVFS_LOCK()/DEVFS_UNLOCK() */ #if CONFIG_MACF #include @@ -96,21 +96,21 @@ static int _devfs_setattr(void *, unsigned short, uid_t, gid_t); * Forward declarations */ static void ptcwakeup(struct tty *tp, int flag); -__XNU_PRIVATE_EXTERN d_open_t ptsopen; -__XNU_PRIVATE_EXTERN d_close_t ptsclose; -__XNU_PRIVATE_EXTERN d_read_t ptsread; -__XNU_PRIVATE_EXTERN d_write_t ptswrite; -__XNU_PRIVATE_EXTERN d_ioctl_t ptyioctl; /* common ioctl */ -__XNU_PRIVATE_EXTERN d_stop_t ptsstop; -__XNU_PRIVATE_EXTERN d_reset_t ptsreset; -__XNU_PRIVATE_EXTERN d_select_t ptsselect; -__XNU_PRIVATE_EXTERN d_open_t ptcopen; -__XNU_PRIVATE_EXTERN d_close_t ptcclose; -__XNU_PRIVATE_EXTERN d_read_t ptcread; -__XNU_PRIVATE_EXTERN d_write_t ptcwrite; -__XNU_PRIVATE_EXTERN d_stop_t ptcstop; /* NO-OP */ -__XNU_PRIVATE_EXTERN d_reset_t ptcreset; -__XNU_PRIVATE_EXTERN d_select_t ptcselect; +__XNU_PRIVATE_EXTERN d_open_t ptsopen; +__XNU_PRIVATE_EXTERN d_close_t ptsclose; +__XNU_PRIVATE_EXTERN d_read_t ptsread; +__XNU_PRIVATE_EXTERN d_write_t ptswrite; +__XNU_PRIVATE_EXTERN d_ioctl_t ptyioctl; /* common ioctl */ +__XNU_PRIVATE_EXTERN d_stop_t ptsstop; +__XNU_PRIVATE_EXTERN d_reset_t ptsreset; +__XNU_PRIVATE_EXTERN d_select_t ptsselect; +__XNU_PRIVATE_EXTERN d_open_t ptcopen; +__XNU_PRIVATE_EXTERN d_close_t ptcclose; +__XNU_PRIVATE_EXTERN d_read_t ptcread; +__XNU_PRIVATE_EXTERN d_write_t ptcwrite; +__XNU_PRIVATE_EXTERN d_stop_t ptcstop; /* NO-OP */ +__XNU_PRIVATE_EXTERN d_reset_t ptcreset; +__XNU_PRIVATE_EXTERN d_select_t ptcselect; /* * XXX Should be devfs function... and use VATTR mechanisms, per @@ -125,11 +125,11 @@ __XNU_PRIVATE_EXTERN d_select_t ptcselect; static int _devfs_setattr(void * handle, unsigned short mode, uid_t uid, gid_t gid) { - devdirent_t *direntp = (devdirent_t *)handle; - devnode_t *devnodep; - int error = EACCES; - vfs_context_t ctx = vfs_context_current();; - struct vnode_attr va; + devdirent_t *direntp = (devdirent_t *)handle; + devnode_t *devnodep; + int error = EACCES; + vfs_context_t ctx = vfs_context_current();; + struct vnode_attr va; VATTR_INIT(&va); VATTR_SET(&va, va_uid, uid); @@ -161,8 +161,9 @@ _devfs_setattr(void * handle, unsigned short mode, uid_t uid, gid_t gid) snprintf(name, sizeof(name), "/dev/%s", direntp->de_name); NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ctx); error = namei(&nd); - if (error) + if (error) { goto out; + } error = vnode_setattr(nd.ni_vp, &va, ctx); vnode_put(nd.ni_vp); nameidone(&nd); @@ -170,10 +171,10 @@ _devfs_setattr(void * handle, unsigned short mode, uid_t uid, gid_t gid) } out: - return(error); + return error; } -#define BUFSIZ 100 /* Chunk size iomoved to/from user */ +#define BUFSIZ 100 /* Chunk size iomoved to/from user */ static struct tty_dev_t *tty_dev_head; @@ -268,37 +269,41 @@ ptsopen(dev_t dev, int flag, __unused int devtype, __unused struct proc *p) tty_lock(tp); if ((tp->t_state & TS_ISOPEN) == 0) { - termioschars(&tp->t_termios); /* Set up default chars */ + termioschars(&tp->t_termios); /* Set up default chars */ tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; tp->t_cflag = TTYDEF_CFLAG; tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; - ttsetwater(tp); /* would be done in xxparam() */ + ttsetwater(tp); /* would be done in xxparam() */ } else if ((tp->t_state & TS_XCLUDE) && kauth_cred_issuser(kauth_cred_get())) { - error = EBUSY; + error = EBUSY; goto out; } - if (tp->t_oproc) /* Ctrlr still around. */ + if (tp->t_oproc) { /* Ctrlr still around. */ (void)(*linesw[tp->t_line].l_modem)(tp, 1); + } while ((tp->t_state & TS_CARR_ON) == 0) { - if (flag&FNONBLOCK) + if (flag & FNONBLOCK) { break; + } error = ttysleep(tp, TSA_CARR_ON(tp), TTIPRI | PCATCH, __FUNCTION__, 0); - if (error) + if (error) { goto out; + } } error = (*linesw[tp->t_line].l_open)(dev, tp); /* Successful open; mark as open by the slave */ pti->pt_flags |= PF_OPEN_S; CLR(tp->t_state, TS_IOCTL_NOT_OK); - if (error == 0) - ptcwakeup(tp, FREAD|FWRITE); + if (error == 0) { + ptcwakeup(tp, FREAD | FWRITE); + } out: tty_unlock(tp); - return (error); + return error; } __private_extern__ int @@ -311,20 +316,21 @@ ptsclose(dev_t dev, int flag, __unused int mode, __unused proc_t p) * are fixed. They are hanging with a deadlock * where close() will not complete without t_timeout set */ -#define FIX_VSX_HANG 1 -#ifdef FIX_VSX_HANG +#define FIX_VSX_HANG 1 +#ifdef FIX_VSX_HANG int save_timeout; #endif struct tty_dev_t *driver; struct ptmx_ioctl *pti = pty_get_ioctl(dev, 0, &driver); struct tty *tp; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); -#ifdef FIX_VSX_HANG +#ifdef FIX_VSX_HANG save_timeout = tp->t_timeout; tp->t_timeout = 60; #endif @@ -338,7 +344,7 @@ ptsclose(dev_t dev, int flag, __unused int mode, __unused proc_t p) * Flush data and notify any waiters on the master side of this PTY. */ ptsstop(tp, FREAD | FWRITE); -#ifdef FIX_VSX_HANG +#ifdef FIX_VSX_HANG tp->t_timeout = save_timeout; #endif tty_unlock(tp); @@ -349,7 +355,7 @@ ptsclose(dev_t dev, int flag, __unused int mode, __unused proc_t p) /* unconditional, just like ttyclose() */ pty_free_ioctl(dev, PF_OPEN_S); - return (err); + return err; } __private_extern__ int @@ -362,8 +368,9 @@ ptsread(dev_t dev, struct uio *uio, int flag) struct uthread *ut; struct pgrp *pg; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); @@ -400,8 +407,9 @@ again: tty_lock(tp); error = ttysleep(tp, &ptsread, TTIPRI | PCATCH | PTTYBLOCK, __FUNCTION__, hz); - if (error) - goto out; + if (error) { + goto out; + } } if (tp->t_canq.c_cc == 0) { if (flag & IO_NDELAY) { @@ -409,8 +417,9 @@ again: goto out; } error = ttysleep(tp, TSA_PTS_READ(tp), TTIPRI | PCATCH, __FUNCTION__, 0); - if (error) - goto out; + if (error) { + goto out; + } goto again; } while (tp->t_canq.c_cc > 1 && uio_resid(uio) > 0) { @@ -422,20 +431,23 @@ again: cc = MIN(cc, tp->t_canq.c_cc - 1); cc = q_to_b(&tp->t_canq, (u_char *)buf, cc); error = uiomove(buf, cc, uio); - if (error) + if (error) { break; + } } - if (tp->t_canq.c_cc == 1) + if (tp->t_canq.c_cc == 1) { (void) getc(&tp->t_canq); - if (tp->t_canq.c_cc) - goto out; - } else - if (tp->t_oproc) - error = (*linesw[tp->t_line].l_read)(tp, uio, flag); + } + if (tp->t_canq.c_cc) { + goto out; + } + } else if (tp->t_oproc) { + error = (*linesw[tp->t_line].l_read)(tp, uio, flag); + } ptcwakeup(tp, FWRITE); out: tty_unlock(tp); - return (error); + return error; } /* @@ -450,19 +462,21 @@ ptswrite(dev_t dev, struct uio *uio, int flag) struct tty *tp; int error; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); - if (tp->t_oproc == 0) + if (tp->t_oproc == 0) { error = EIO; - else - error = (*linesw[tp->t_line].l_write)(tp, uio, flag); + } else { + error = (*linesw[tp->t_line].l_write)(tp, uio, flag); + } tty_unlock(tp); - return (error); + return error; } /* @@ -477,10 +491,12 @@ static void ptsstart(struct tty *tp) { struct ptmx_ioctl *pti = pty_get_ioctl(tp->t_dev, 0, NULL); - if (pti == NULL) + if (pti == NULL) { goto out; - if (tp->t_state & TS_TTSTOP) - goto out; + } + if (tp->t_state & TS_TTSTOP) { + goto out; + } if (pti->pt_flags & PF_STOPPED) { pti->pt_flags &= ~PF_STOPPED; pti->pt_send = TIOCPKT_START; @@ -497,8 +513,9 @@ static void ptcwakeup(struct tty *tp, int flag) { struct ptmx_ioctl *pti = pty_get_ioctl(tp->t_dev, 0, NULL); - if (pti == NULL) + if (pti == NULL) { return; + } if (flag & FREAD) { selwakeup(&pti->pt_selr); @@ -518,9 +535,9 @@ ptcopen(dev_t dev, __unused int flag, __unused int devtype, __unused proc_t p) struct tty_dev_t *driver; struct ptmx_ioctl *pti = pty_get_ioctl(dev, PF_OPEN_M, &driver); if (pti == NULL) { - return (ENXIO); + return ENXIO; } else if (pti == (struct ptmx_ioctl*)-1) { - return (EREDRIVEOPEN); + return EREDRIVEOPEN; } struct tty *tp = pti->pt_tty; @@ -613,7 +630,7 @@ ptcclose(dev_t dev, __unused int flags, __unused int fmt, __unused proc_t p) } #endif - return (0); + return 0; } __private_extern__ int @@ -624,8 +641,9 @@ ptcread(dev_t dev, struct uio *uio, int flag) char buf[BUFSIZ]; int error = 0, cc; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); @@ -639,43 +657,50 @@ ptcread(dev_t dev, struct uio *uio, int flag) if (tp->t_state & TS_ISOPEN) { if (pti->pt_flags & PF_PKT && pti->pt_send) { error = ureadc((int)pti->pt_send, uio); - if (error) + if (error) { goto out; + } if (pti->pt_send & TIOCPKT_IOCTL) { cc = MIN((int)uio_resid(uio), - (int)sizeof(tp->t_termios)); + (int)sizeof(tp->t_termios)); uiomove((caddr_t)&tp->t_termios, cc, - uio); + uio); } pti->pt_send = 0; goto out; } if (pti->pt_flags & PF_UCNTL && pti->pt_ucntl) { error = ureadc((int)pti->pt_ucntl, uio); - if (error) + if (error) { goto out; + } pti->pt_ucntl = 0; goto out; } - if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) + if (tp->t_outq.c_cc && (tp->t_state & TS_TTSTOP) == 0) { break; + } + } + if ((tp->t_state & TS_CONNECTED) == 0) { + goto out; /* EOF */ } - if ((tp->t_state & TS_CONNECTED) == 0) - goto out; /* EOF */ if (flag & IO_NDELAY) { error = EWOULDBLOCK; goto out; } error = ttysleep(tp, TSA_PTC_READ(tp), TTIPRI | PCATCH, __FUNCTION__, 0); - if (error) - goto out; + if (error) { + goto out; + } } - if (pti->pt_flags & (PF_PKT|PF_UCNTL)) + if (pti->pt_flags & (PF_PKT | PF_UCNTL)) { error = ureadc(0, uio); + } while (uio_resid(uio) > 0 && error == 0) { cc = q_to_b(&tp->t_outq, (u_char *)buf, MIN((int)uio_resid(uio), BUFSIZ)); - if (cc <= 0) + if (cc <= 0) { break; + } error = uiomove(buf, cc, uio); } (*linesw[tp->t_line].l_start)(tp); @@ -683,7 +708,7 @@ ptcread(dev_t dev, struct uio *uio, int flag) out: tty_unlock(tp); - return (error); + return error; } /* @@ -697,22 +722,26 @@ ptsstop(struct tty* tp, int flush) struct ptmx_ioctl *pti = pty_get_ioctl(tp->t_dev, 0, NULL); int flag; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } /* note: FLUSHREAD and FLUSHWRITE already ok */ if (flush == 0) { flush = TIOCPKT_STOP; pti->pt_flags |= PF_STOPPED; - } else + } else { pti->pt_flags &= ~PF_STOPPED; + } pti->pt_send |= flush; /* change of perspective */ flag = 0; - if (flush & FREAD) + if (flush & FREAD) { flag |= FWRITE; - if (flush & FWRITE) + } + if (flush & FWRITE) { flag |= FREAD; + } ptcwakeup(tp, flag); return 0; } @@ -720,7 +749,7 @@ ptsstop(struct tty* tp, int flush) __private_extern__ int ptsreset(__unused int uban) { - return (0); + return 0; } int @@ -730,11 +759,13 @@ ptsselect(dev_t dev, int rw, void *wql, proc_t p) struct tty *tp; int retval = 0; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; - if (tp == NULL) - return (ENXIO); + if (tp == NULL) { + return ENXIO; + } tty_lock(tp); @@ -759,7 +790,7 @@ ptsselect(dev_t dev, int rw, void *wql, proc_t p) } if ((tp->t_outq.c_cc <= tp->t_lowat) && - ISSET(tp->t_state, TS_CONNECTED)) { + ISSET(tp->t_state, TS_CONNECTED)) { retval = tp->t_hiwat - tp->t_outq.c_cc; break; } @@ -769,7 +800,7 @@ ptsselect(dev_t dev, int rw, void *wql, proc_t p) } tty_unlock(tp); - return (retval); + return retval; } __private_extern__ int @@ -780,8 +811,9 @@ ptcselect(dev_t dev, int rw, void *wql, proc_t p) struct tty *tp; int retval = 0; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); @@ -790,22 +822,21 @@ ptcselect(dev_t dev, int rw, void *wql, proc_t p) goto out; } switch (rw) { - case FREAD: /* * Need to block timeouts (ttrstart). */ - if ((tp->t_state&TS_ISOPEN) && - tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) { + if ((tp->t_state & TS_ISOPEN) && + tp->t_outq.c_cc && (tp->t_state & TS_TTSTOP) == 0) { retval = (driver->fix_7828447) ? tp->t_outq.c_cc : 1; break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case 0: /* exceptional */ - if ((tp->t_state&TS_ISOPEN) && - (((pti->pt_flags & PF_PKT) && pti->pt_send) || - ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl))) { + if ((tp->t_state & TS_ISOPEN) && + (((pti->pt_flags & PF_PKT) && pti->pt_send) || + ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl))) { retval = 1; break; } @@ -814,19 +845,19 @@ ptcselect(dev_t dev, int rw, void *wql, proc_t p) case FWRITE: - if (tp->t_state&TS_ISOPEN) { + if (tp->t_state & TS_ISOPEN) { if (pti->pt_flags & PF_REMOTE) { if (tp->t_canq.c_cc == 0) { retval = (driver->fix_7828447) ? (TTYHOG - 1) : 1; break; - } + } } else { retval = (TTYHOG - 2) - (tp->t_rawq.c_cc + tp->t_canq.c_cc); if (retval > 0) { retval = (driver->fix_7828447) ? retval : 1; break; } - if (tp->t_canq.c_cc == 0 && (tp->t_lflag&ICANON)) { + if (tp->t_canq.c_cc == 0 && (tp->t_lflag & ICANON)) { retval = 1; break; } @@ -835,24 +866,23 @@ ptcselect(dev_t dev, int rw, void *wql, proc_t p) } selrecord(p, &pti->pt_selw, wql); break; - } out: tty_unlock(tp); - return (retval); + return retval; } __private_extern__ int ptcstop(__unused struct tty *tp, __unused int flush) { - return (0); + return 0; } __private_extern__ int ptcreset(__unused int uban) { - return (0); + return 0; } __private_extern__ int @@ -866,26 +896,30 @@ ptcwrite(dev_t dev, struct uio *uio, int flag) int wcnt = 0; int error = 0; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); again: - if ((tp->t_state & TS_ISOPEN) == 0) + if ((tp->t_state & TS_ISOPEN) == 0) { goto block; + } if (pti->pt_flags & PF_REMOTE) { - if (tp->t_canq.c_cc) + if (tp->t_canq.c_cc) { goto block; + } while ((uio_resid(uio) > 0 || cc > 0) && - tp->t_canq.c_cc < TTYHOG - 1) { + tp->t_canq.c_cc < TTYHOG - 1) { if (cc == 0) { cc = MIN((int)uio_resid(uio), BUFSIZ); cc = MIN(cc, TTYHOG - 1 - tp->t_canq.c_cc); cp = locbuf; error = uiomove((caddr_t)cp, cc, uio); - if (error) + if (error) { goto out; + } /* check again for safety */ if ((tp->t_state & TS_ISOPEN) == 0) { /* adjust as usual */ @@ -904,8 +938,9 @@ again: * we don't fail here since (TTYHOG - 1) is * not a multiple of CBSIZE. */ - if (cc > 0) + if (cc > 0) { break; + } } } /* adjust for data copied in but not written */ @@ -920,8 +955,9 @@ again: cc = MIN((int)uio_resid(uio), BUFSIZ); cp = locbuf; error = uiomove((caddr_t)cp, cc, uio); - if (error) + if (error) { goto out; + } /* check again for safety */ if ((tp->t_state & TS_ISOPEN) == 0) { /* adjust for data copied in but not written */ @@ -932,7 +968,7 @@ again: } while (cc > 0) { if ((tp->t_rawq.c_cc + tp->t_canq.c_cc) >= TTYHOG - 2 && - (tp->t_canq.c_cc > 0 || !(tp->t_lflag&ICANON))) { + (tp->t_canq.c_cc > 0 || !(tp->t_lflag & ICANON))) { wakeup(TSA_HUP_OR_INPUT(tp)); goto block; } @@ -945,7 +981,7 @@ again: out: tty_unlock(tp); - return (error); + return error; block: /* @@ -961,8 +997,9 @@ block: if (flag & IO_NDELAY) { /* adjust for data copied in but not written */ uio_setresid(uio, (uio_resid(uio) + cc)); - if (wcnt == 0) + if (wcnt == 0) { error = EWOULDBLOCK; + } goto out; } error = ttysleep(tp, TSA_PTC_WRITE(tp), TTOPRI | PCATCH, __FUNCTION__, 0); @@ -983,8 +1020,9 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) int stop, error = 0; int allow_ext_ioctl = 1; - if (pti == NULL) - return (ENXIO); + if (pti == NULL) { + return ENXIO; + } tp = pti->pt_tty; tty_lock(tp); @@ -1025,10 +1063,8 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) tp->t_lflag &= ~EXTPROC; } goto out; - } else - if (cdevsw[major(dev)].d_open == ptcopen) { + } else if (cdevsw[major(dev)].d_open == ptcopen) { switch (cmd) { - case TIOCGPGRP: /* * We aviod calling ttioctl on the controller since, @@ -1039,32 +1075,35 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) case TIOCPKT: if (*(int *)data) { - if (pti->pt_flags & PF_UCNTL) { + if (pti->pt_flags & PF_UCNTL) { error = EINVAL; goto out; } pti->pt_flags |= PF_PKT; - } else + } else { pti->pt_flags &= ~PF_PKT; + } goto out; case TIOCUCNTL: if (*(int *)data) { - if (pti->pt_flags & PF_PKT) { + if (pti->pt_flags & PF_PKT) { error = EINVAL; goto out; } pti->pt_flags |= PF_UCNTL; - } else + } else { pti->pt_flags &= ~PF_UCNTL; + } goto out; case TIOCREMOTE: - if (*(int *)data) + if (*(int *)data) { pti->pt_flags |= PF_REMOTE; - else + } else { pti->pt_flags &= ~PF_REMOTE; - ttyflush(tp, FREAD|FWRITE); + } + ttyflush(tp, FREAD | FWRITE); goto out; case TIOCSETP: @@ -1085,11 +1124,13 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) error = EINVAL; goto out; } - if ((tp->t_lflag&NOFLSH) == 0) - ttyflush(tp, FREAD|FWRITE); + if ((tp->t_lflag & NOFLSH) == 0) { + ttyflush(tp, FREAD | FWRITE); + } if ((*(unsigned int *)data == SIGINFO) && - ((tp->t_lflag&NOKERNINFO) == 0)) + ((tp->t_lflag & NOKERNINFO) == 0)) { ttyinfo_locked(tp); + } /* * SAFE: All callers drop the lock on return and * SAFE: the linesw[] will short circut this call @@ -1101,31 +1142,31 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) tty_lock(tp); goto out; - case TIOCPTYGRANT: /* grantpt(3) */ + case TIOCPTYGRANT: /* grantpt(3) */ /* * Change the uid of the slave to that of the calling * thread, change the gid of the slave to GID_TTY, * change the mode to 0620 (rw--w----). */ - { - error = _devfs_setattr(pti->pt_devhandle, 0620, kauth_getuid(), GID_TTY); - if (major(dev) == driver->master) { - if (driver->mac_notify) { + { + error = _devfs_setattr(pti->pt_devhandle, 0620, kauth_getuid(), GID_TTY); + if (major(dev) == driver->master) { + if (driver->mac_notify) { #if CONFIG_MACF - if (!error) { - tty_unlock(tp); - mac_pty_notify_grant(p, tp, dev, NULL); - tty_lock(tp); - } -#endif - } else { - error = 0; + if (!error) { + tty_unlock(tp); + mac_pty_notify_grant(p, tp, dev, NULL); + tty_lock(tp); } +#endif + } else { + error = 0; } - goto out; } + goto out; + } - case TIOCPTYGNAME: /* ptsname(3) */ + case TIOCPTYGNAME: /* ptsname(3) */ /* * Report the name of the slave device in *data * (128 bytes max.). Use the same template string @@ -1134,8 +1175,8 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) pty_get_name(dev, data, 128); error = 0; goto out; - - case TIOCPTYUNLK: /* unlockpt(3) */ + + case TIOCPTYUNLK: /* unlockpt(3) */ /* * Unlock the slave device so that it can be opened. */ @@ -1186,8 +1227,8 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) /* * If external processing and packet mode send ioctl packet. */ - if ((tp->t_lflag&EXTPROC) && (pti->pt_flags & PF_PKT)) { - switch(cmd) { + if ((tp->t_lflag & EXTPROC) && (pti->pt_flags & PF_PKT)) { + switch (cmd) { case TIOCSETA_32: case TIOCSETAW_32: case TIOCSETAF_32: @@ -1208,7 +1249,7 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) } } stop = (tp->t_iflag & IXON) && CCEQ(cc[VSTOP], CTRL('s')) - && CCEQ(cc[VSTART], CTRL('q')); + && CCEQ(cc[VSTART], CTRL('q')); if (pti->pt_flags & PF_NOSTOP) { if (stop) { pti->pt_send &= ~TIOCPKT_NOSTOP; @@ -1227,5 +1268,5 @@ ptyioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) out: tty_unlock(tp); - return (error); + return error; } diff --git a/bsd/kern/tty_dev.h b/bsd/kern/tty_dev.h index 49fc715db..19a1edd12 100644 --- a/bsd/kern/tty_dev.h +++ b/bsd/kern/tty_dev.h @@ -35,32 +35,32 @@ * structures themselves pointed to from this list come and go as needed. */ struct ptmx_ioctl { - struct tty *pt_tty; /* pointer to ttymalloc()'ed data */ - int pt_flags; - struct selinfo pt_selr; - struct selinfo pt_selw; - u_char pt_send; - u_char pt_ucntl; - void *pt_devhandle; /* cloned slave device handle */ + struct tty *pt_tty; /* pointer to ttymalloc()'ed data */ + int pt_flags; + struct selinfo pt_selr; + struct selinfo pt_selw; + u_char pt_send; + u_char pt_ucntl; + void *pt_devhandle; /* cloned slave device handle */ }; -#define PF_PKT 0x0008 /* packet mode */ -#define PF_STOPPED 0x0010 /* user told stopped */ -#define PF_REMOTE 0x0020 /* remote and flow controlled input */ -#define PF_NOSTOP 0x0040 -#define PF_UCNTL 0x0080 /* user control mode */ -#define PF_UNLOCKED 0x0100 /* slave unlock (master open resets) */ -#define PF_OPEN_M 0x0200 /* master is open */ -#define PF_OPEN_S 0x0400 /* slave is open */ +#define PF_PKT 0x0008 /* packet mode */ +#define PF_STOPPED 0x0010 /* user told stopped */ +#define PF_REMOTE 0x0020 /* remote and flow controlled input */ +#define PF_NOSTOP 0x0040 +#define PF_UCNTL 0x0080 /* user control mode */ +#define PF_UNLOCKED 0x0100 /* slave unlock (master open resets) */ +#define PF_OPEN_M 0x0200 /* master is open */ +#define PF_OPEN_S 0x0400 /* slave is open */ struct tty_dev_t { - int master; // master major device number - int slave; // slave major device number - unsigned int fix_7828447:1, - fix_7070978:1, - mac_notify:1, - open_reset:1, - _reserved:28; + int master; // master major device number + int slave; // slave major device number + unsigned int fix_7828447:1, + fix_7070978:1, + mac_notify:1, + open_reset:1, + _reserved:28; #if __LP64__ int _pad; #endif diff --git a/bsd/kern/tty_ptmx.c b/bsd/kern/tty_ptmx.c index 893b1d912..6da6e641a 100644 --- a/bsd/kern/tty_ptmx.c +++ b/bsd/kern/tty_ptmx.c @@ -64,7 +64,7 @@ * Pseudo-teletype Driver * (Actually two drivers, requiring two entries in 'cdevsw') */ -#include "pty.h" /* XXX */ +#include "pty.h" /* XXX */ #include #include @@ -81,7 +81,7 @@ #include #include #include -#include /* DEVFS_LOCK()/DEVFS_UNLOCK() */ +#include /* DEVFS_LOCK()/DEVFS_UNLOCK() */ #include #if CONFIG_MACF @@ -99,76 +99,77 @@ static int ptmx_free_ioctl(int minor, int open_flag); static int ptmx_get_name(int minor, char *buffer, size_t size); static void ptsd_revoke_knotes(int minor, struct tty *tp); -extern d_open_t ptsopen; -extern d_close_t ptsclose; -extern d_read_t ptsread; -extern d_write_t ptswrite; -extern d_ioctl_t ptyioctl; -extern d_stop_t ptsstop; -extern d_reset_t ptsreset; -extern d_select_t ptsselect; - -extern d_open_t ptcopen; -extern d_close_t ptcclose; -extern d_read_t ptcread; -extern d_write_t ptcwrite; -extern d_stop_t ptcstop; -extern d_reset_t ptcreset; -extern d_select_t ptcselect; - -static int ptmx_major; /* dynamically assigned major number */ +extern d_open_t ptsopen; +extern d_close_t ptsclose; +extern d_read_t ptsread; +extern d_write_t ptswrite; +extern d_ioctl_t ptyioctl; +extern d_stop_t ptsstop; +extern d_reset_t ptsreset; +extern d_select_t ptsselect; + +extern d_open_t ptcopen; +extern d_close_t ptcclose; +extern d_read_t ptcread; +extern d_write_t ptcwrite; +extern d_stop_t ptcstop; +extern d_reset_t ptcreset; +extern d_select_t ptcselect; + +static int ptmx_major; /* dynamically assigned major number */ static struct cdevsw ptmx_cdev = { - ptcopen, ptcclose, ptcread, ptcwrite, - ptyioctl, ptcstop, ptcreset, 0, - ptcselect, eno_mmap, eno_strat, eno_getc, - eno_putc, D_TTY + ptcopen, ptcclose, ptcread, ptcwrite, + ptyioctl, ptcstop, ptcreset, 0, + ptcselect, eno_mmap, eno_strat, eno_getc, + eno_putc, D_TTY }; -static int ptsd_major; /* dynamically assigned major number */ +static int ptsd_major; /* dynamically assigned major number */ static struct cdevsw ptsd_cdev = { - ptsopen, ptsclose, ptsread, ptswrite, - ptyioctl, ptsstop, ptsreset, 0, - ptsselect, eno_mmap, eno_strat, eno_getc, - eno_putc, D_TTY + ptsopen, ptsclose, ptsread, ptswrite, + ptyioctl, ptsstop, ptsreset, 0, + ptsselect, eno_mmap, eno_strat, eno_getc, + eno_putc, D_TTY }; /* * ptmx == /dev/ptmx * ptsd == /dev/pts[0123456789]{3} */ -#define PTMX_TEMPLATE "ptmx" -#define PTSD_TEMPLATE "ttys%03d" +#define PTMX_TEMPLATE "ptmx" +#define PTSD_TEMPLATE "ttys%03d" /* * System-wide limit on the max number of cloned ptys */ -#define PTMX_MAX_DEFAULT 511 /* 512 entries */ -#define PTMX_MAX_HARD 999 /* 1000 entries, due to PTSD_TEMPLATE */ +#define PTMX_MAX_DEFAULT 511 /* 512 entries */ +#define PTMX_MAX_HARD 999 /* 1000 entries, due to PTSD_TEMPLATE */ -static int ptmx_max = PTMX_MAX_DEFAULT; /* default # of clones we allow */ +static int ptmx_max = PTMX_MAX_DEFAULT; /* default # of clones we allow */ /* Range enforcement for the sysctl */ static int sysctl_ptmx_max(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int new_value, changed; int error = sysctl_io_number(req, ptmx_max, sizeof(int), &new_value, &changed); if (changed) { - if (new_value > 0 && new_value <= PTMX_MAX_HARD) + if (new_value > 0 && new_value <= PTMX_MAX_HARD) { ptmx_max = new_value; - else + } else { error = EINVAL; + } } - return(error); + return error; } -SYSCTL_NODE(_kern, KERN_TTY, tty, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "TTY"); +SYSCTL_NODE(_kern, KERN_TTY, tty, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "TTY"); SYSCTL_PROC(_kern_tty, OID_AUTO, ptmx_max, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ptmx_max, 0, &sysctl_ptmx_max, "I", "ptmx_max"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ptmx_max, 0, &sysctl_ptmx_max, "I", "ptmx_max"); -static int ptmx_clone(dev_t dev, int minor); +static int ptmx_clone(dev_t dev, int minor); static struct tty_dev_t _ptmx_driver; @@ -183,7 +184,7 @@ ptmx_init( __unused int config_count) /* Get a major number for /dev/ptmx */ if ((ptmx_major = cdevsw_add(-15, &ptmx_cdev)) == -1) { printf("ptmx_init: failed to obtain /dev/ptmx major number\n"); - return (ENOENT); + return ENOENT; } if (cdevsw_setkqueueok(ptmx_major, &ptmx_cdev, CDEVSW_IS_PTC) == -1) { @@ -194,7 +195,7 @@ ptmx_init( __unused int config_count) if ((ptsd_major = cdevsw_add(-15, &ptsd_cdev)) == -1) { (void)cdevsw_remove(ptmx_major, &ptmx_cdev); printf("ptmx_init: failed to obtain /dev/ptmx major number\n"); - return (ENOENT); + return ENOENT; } if (cdevsw_setkqueueok(ptsd_major, &ptsd_cdev, CDEVSW_IS_PTS) == -1) { @@ -203,8 +204,8 @@ ptmx_init( __unused int config_count) /* Create the /dev/ptmx device {,0} */ (void)devfs_make_node_clone(makedev(ptmx_major, 0), - DEVFS_CHAR, UID_ROOT, GID_TTY, 0666, - ptmx_clone, PTMX_TEMPLATE); + DEVFS_CHAR, UID_ROOT, GID_TTY, 0666, + ptmx_clone, PTMX_TEMPLATE); _ptmx_driver.master = ptmx_major; _ptmx_driver.slave = ptsd_major; @@ -219,16 +220,16 @@ ptmx_init( __unused int config_count) _ptmx_driver.revoke = &ptsd_revoke_knotes; tty_dev_register(&_ptmx_driver); - return (0); + return 0; } static struct _ptmx_ioctl_state { - struct ptmx_ioctl **pis_ioctl_list; /* pointer vector */ - int pis_total; /* total slots */ - int pis_free; /* free slots */ + struct ptmx_ioctl **pis_ioctl_list; /* pointer vector */ + int pis_total; /* total slots */ + int pis_free; /* free slots */ } _state; -#define PTMX_GROW_VECTOR 16 /* Grow by this many slots at a time */ +#define PTMX_GROW_VECTOR 16 /* Grow by this many slots at a time */ /* * Given a minor number, return the corresponding structure for that minor @@ -251,7 +252,6 @@ ptmx_get_ioctl(int minor, int open_flag) struct ptmx_ioctl *new_ptmx_ioctl; if (open_flag & PF_OPEN_M) { - /* * If we are about to allocate more memory, but we have * already hit the administrative limit, then fail the @@ -262,19 +262,19 @@ ptmx_get_ioctl(int minor, int open_flag) * snapping to the nearest PTMX_GROW_VECTOR... */ if ((_state.pis_total - _state.pis_free) >= ptmx_max) { - return (NULL); + return NULL; } - MALLOC(new_ptmx_ioctl, struct ptmx_ioctl *, sizeof(struct ptmx_ioctl), M_TTYS, M_WAITOK|M_ZERO); + MALLOC(new_ptmx_ioctl, struct ptmx_ioctl *, sizeof(struct ptmx_ioctl), M_TTYS, M_WAITOK | M_ZERO); if (new_ptmx_ioctl == NULL) { - return (NULL); + return NULL; } if ((new_ptmx_ioctl->pt_tty = ttymalloc()) == NULL) { FREE(new_ptmx_ioctl, M_TTYS); - return (NULL); + return NULL; } - + /* * Hold the DEVFS_LOCK() over this whole operation; devfs * itself does this over malloc/free as well, so this should @@ -288,12 +288,12 @@ ptmx_get_ioctl(int minor, int open_flag) struct ptmx_ioctl **old_pis_ioctl_list = NULL; /* Yes. */ - MALLOC(new_pis_ioctl_list, struct ptmx_ioctl **, sizeof(struct ptmx_ioctl *) * (_state.pis_total + PTMX_GROW_VECTOR), M_TTYS, M_WAITOK|M_ZERO); + MALLOC(new_pis_ioctl_list, struct ptmx_ioctl **, sizeof(struct ptmx_ioctl *) * (_state.pis_total + PTMX_GROW_VECTOR), M_TTYS, M_WAITOK | M_ZERO); if (new_pis_ioctl_list == NULL) { ttyfree(new_ptmx_ioctl->pt_tty); DEVFS_UNLOCK(); FREE(new_ptmx_ioctl, M_TTYS); - return (NULL); + return NULL; } /* If this is not the first time, copy the old over */ @@ -302,8 +302,9 @@ ptmx_get_ioctl(int minor, int open_flag) _state.pis_ioctl_list = new_pis_ioctl_list; _state.pis_free += PTMX_GROW_VECTOR; _state.pis_total += PTMX_GROW_VECTOR; - if (old_pis_ioctl_list) + if (old_pis_ioctl_list) { FREE(old_pis_ioctl_list, M_TTYS); + } } /* is minor in range now? */ @@ -311,7 +312,7 @@ ptmx_get_ioctl(int minor, int open_flag) ttyfree(new_ptmx_ioctl->pt_tty); DEVFS_UNLOCK(); FREE(new_ptmx_ioctl, M_TTYS); - return (NULL); + return NULL; } if (_state.pis_ioctl_list[minor] != NULL) { @@ -321,7 +322,6 @@ ptmx_get_ioctl(int minor, int open_flag) /* Special error value so we know to redrive the open, we've been raced */ return (struct ptmx_ioctl*)-1; - } /* Vector is large enough; grab a new ptmx_ioctl */ @@ -337,19 +337,19 @@ ptmx_get_ioctl(int minor, int open_flag) /* Create the /dev/ttysXXX device {,XXX} */ _state.pis_ioctl_list[minor]->pt_devhandle = devfs_make_node( - makedev(ptsd_major, minor), - DEVFS_CHAR, UID_ROOT, GID_TTY, 0620, - PTSD_TEMPLATE, minor); + makedev(ptsd_major, minor), + DEVFS_CHAR, UID_ROOT, GID_TTY, 0620, + PTSD_TEMPLATE, minor); if (_state.pis_ioctl_list[minor]->pt_devhandle == NULL) { printf("devfs_make_node() call failed for ptmx_get_ioctl()!!!!\n"); } } if (minor < 0 || minor >= _state.pis_total) { - return (NULL); + return NULL; } - return (_state.pis_ioctl_list[minor]); + return _state.pis_ioctl_list[minor]; } /* @@ -364,7 +364,7 @@ ptmx_free_ioctl(int minor, int open_flag) if (minor < 0 || minor >= _state.pis_total) { DEVFS_UNLOCK(); - return (-1); + return -1; } _state.pis_ioctl_list[minor]->pt_flags &= ~(open_flag); @@ -374,9 +374,9 @@ ptmx_free_ioctl(int minor, int open_flag) * a notification on the last close of a device, and we will have * cleared both the master and the slave open bits in the flags. */ - if (!(_state.pis_ioctl_list[minor]->pt_flags & (PF_OPEN_M|PF_OPEN_S))) { + if (!(_state.pis_ioctl_list[minor]->pt_flags & (PF_OPEN_M | PF_OPEN_S))) { /* Mark as free so it can be reallocated later */ - old_ptmx_ioctl = _state.pis_ioctl_list[ minor]; + old_ptmx_ioctl = _state.pis_ioctl_list[minor]; _state.pis_ioctl_list[minor] = NULL; _state.pis_free++; } @@ -390,13 +390,14 @@ ptmx_free_ioctl(int minor, int open_flag) * XXX Conditional to be removed when/if tty/pty reference * XXX counting and mutex implemented. */ - if (old_ptmx_ioctl->pt_devhandle != NULL) + if (old_ptmx_ioctl->pt_devhandle != NULL) { devfs_remove(old_ptmx_ioctl->pt_devhandle); + } ttyfree(old_ptmx_ioctl->pt_tty); FREE(old_ptmx_ioctl, M_TTYS); } - return (0); /* Success */ + return 0; /* Success */ } static int @@ -428,16 +429,18 @@ ptmx_clone(__unused dev_t dev, int action) if (action == DEVFS_CLONE_ALLOC) { /* First one */ - if (_state.pis_total == 0) - return (0); + if (_state.pis_total == 0) { + return 0; + } /* * Note: We can add hinting on free slots, if this linear search * ends up being a performance bottleneck... */ - for(i = 0; i < _state.pis_total; i++) { - if (_state.pis_ioctl_list[ i] == NULL) + for (i = 0; i < _state.pis_total; i++) { + if (_state.pis_ioctl_list[i] == NULL) { break; + } } /* @@ -452,9 +455,9 @@ ptmx_clone(__unused dev_t dev, int action) * XXX explicit return. */ - return (i); /* empty slot or next slot */ + return i; /* empty slot or next slot */ } - return(-1); + return -1; } @@ -538,7 +541,7 @@ ptsd_kqops_common(struct knote *kn, struct tty *tp) case EVFILT_WRITE: if ((tp->t_outq.c_cc <= tp->t_lowat) && - (tp->t_state & TS_CONNECTED)) { + (tp->t_state & TS_CONNECTED)) { kn->kn_data = tp->t_outq.c_cn - tp->t_outq.c_cc; retval = 1; } @@ -546,7 +549,7 @@ ptsd_kqops_common(struct knote *kn, struct tty *tp) default: panic("ptsd kevent: unexpected filter: %d, kn = %p, tty = %p", - kn->kn_filter, kn, tp); + kn->kn_filter, kn, tp); break; } @@ -608,7 +611,7 @@ ptsd_kqops_touch(struct knote *kn, struct kevent_internal_s *kev) static int ptsd_kqops_process(struct knote *kn, __unused struct filt_process_s *data, - struct kevent_internal_s *kev) + struct kevent_internal_s *kev) { struct tty *tp = kn->kn_hook; int ret; @@ -664,7 +667,7 @@ ptsd_kqfilter(dev_t dev, struct knote *kn) break; default: panic("ptsd kevent: unexpected filter: %d, kn = %p, tty = %p", - kn->kn_filter, kn, tp); + kn->kn_filter, kn, tp); break; } @@ -767,7 +770,7 @@ ptmx_kqfilter(dev_t dev, struct knote *kn) break; default: panic("ptmx kevent: unexpected filter: %d, kn = %p, tty = %p", - kn->kn_filter, kn, tp); + kn->kn_filter, kn, tp); break; } @@ -831,7 +834,7 @@ ptmx_kqops_common(struct knote *kn, struct ptmx_ioctl *pti, struct tty *tp) retval = tp->t_outq.c_cc; kn->kn_data = retval; } else if (((pti->pt_flags & PF_PKT) && pti->pt_send) || - ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl)) { + ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl)) { retval = 1; } break; @@ -854,7 +857,7 @@ ptmx_kqops_common(struct knote *kn, struct ptmx_ioctl *pti, struct tty *tp) default: panic("ptmx kevent: unexpected filter: %d, kn = %p, tty = %p", - kn->kn_filter, kn, tp); + kn->kn_filter, kn, tp); break; } @@ -916,7 +919,7 @@ ptmx_kqops_touch(struct knote *kn, struct kevent_internal_s *kev) static int ptmx_kqops_process(struct knote *kn, __unused struct filt_process_s *data, - struct kevent_internal_s *kev) + struct kevent_internal_s *kev) { struct ptmx_ioctl *pti = ptmx_knote_ioctl(kn); struct tty *tp = ptmx_knote_tty(kn); diff --git a/bsd/kern/tty_pty.c b/bsd/kern/tty_pty.c index c5f899a46..75628a905 100644 --- a/bsd/kern/tty_pty.c +++ b/bsd/kern/tty_pty.c @@ -2,7 +2,7 @@ * Copyright (c) 1997-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -64,7 +64,7 @@ * Pseudo-teletype Driver * (Actually two drivers, requiring two entries in 'cdevsw') */ -#include "pty.h" /* XXX */ +#include "pty.h" /* XXX */ #include #include @@ -88,8 +88,8 @@ #if NPTY == 1 #undef NPTY -#define NPTY 32 /* crude XXX */ -#warning You have only one pty defined, redefining to 32. +#define NPTY 32 /* crude XXX */ +#warning You have only one pty defined, redefining to 32. #endif /* @@ -108,8 +108,8 @@ pty_init(__unused int n_ptys) } #else // DEVFS #include -#define START_CHAR 'p' -#define HEX_BASE 16 +#define START_CHAR 'p' +#define HEX_BASE 16 static struct tty_dev_t _pty_driver; @@ -120,7 +120,7 @@ pty_get_ioctl(int minor, int open_flag) return NULL; } struct ptmx_ioctl *pti = &pt_ioctl[minor]; - if (open_flag & (PF_OPEN_M|PF_OPEN_S)) { + if (open_flag & (PF_OPEN_M | PF_OPEN_S)) { if (!pti->pt_tty) { pti->pt_tty = ttymalloc(); } @@ -135,8 +135,8 @@ static int pty_get_name(int minor, char *buffer, size_t size) { return snprintf(buffer, size, "/dev/tty%c%x", - START_CHAR + (minor / HEX_BASE), - minor % HEX_BASE); + START_CHAR + (minor / HEX_BASE), + minor % HEX_BASE); } int @@ -155,11 +155,11 @@ pty_init(int n_ptys) goto done; } pt_ioctl[m].pt_devhandle = devfs_make_node(makedev(PTS_MAJOR, m), - DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, - "tty%c%x", j + START_CHAR, i); + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, + "tty%c%x", j + START_CHAR, i); (void)devfs_make_node(makedev(PTC_MAJOR, m), - DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, - "pty%c%x", j + START_CHAR, i); + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, + "pty%c%x", j + START_CHAR, i); } } diff --git a/bsd/kern/tty_subr.c b/bsd/kern/tty_subr.c index bfac6579b..c00ccdbe8 100644 --- a/bsd/kern/tty_subr.c +++ b/bsd/kern/tty_subr.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -72,7 +72,7 @@ * defined we allocate an array of bits -- 1/8th as much memory but * setbit(), clrbit(), and isset() take more cpu. If QBITS is * undefined, we just use an array of bytes. - * + * * If TTY_QUOTE functionality isn't required by a line discipline, * it can free c_cq and set it to NULL. This speeds things up, * and also does not use any extra memory. This is useful for (say) @@ -82,9 +82,9 @@ #define QBITS #ifdef QBITS -#define QMEM(n) ((((n)-1)/NBBY)+1) +#define QMEM(n) ((((n)-1)/NBBY)+1) #else -#define QMEM(n) (n) +#define QMEM(n) (n) #endif @@ -104,34 +104,38 @@ int clalloc(struct clist *clp, int size, int quot) { MALLOC_ZONE(clp->c_cs, u_char *, size, M_TTYS, M_WAITOK); - if (!clp->c_cs) - return (-1); + if (!clp->c_cs) { + return -1; + } bzero(clp->c_cs, size); - if(quot) { + if (quot) { MALLOC_ZONE(clp->c_cq, u_char *, QMEM(size), M_TTYS, M_WAITOK); if (!clp->c_cq) { FREE_ZONE(clp->c_cs, size, M_TTYS); - return (-1); + return -1; } bzero(clp->c_cs, QMEM(size)); - } else + } else { clp->c_cq = (u_char *)0; + } clp->c_cf = clp->c_cl = (u_char *)0; clp->c_ce = clp->c_cs + size; clp->c_cn = size; clp->c_cc = 0; - return (0); + return 0; } void clfree(struct clist *clp) { - if(clp->c_cs) + if (clp->c_cs) { FREE_ZONE(clp->c_cs, clp->c_cn, M_TTYS); - if(clp->c_cq) + } + if (clp->c_cq) { FREE_ZONE(clp->c_cq, QMEM(clp->c_cn), M_TTYS); + } clp->c_cs = clp->c_cq = (u_char *)0; } @@ -144,23 +148,28 @@ getc(struct clist *clp) { int c = -1; - if (clp->c_cc == 0) + if (clp->c_cc == 0) { goto out; + } c = *clp->c_cf & 0xff; if (clp->c_cq) { #ifdef QBITS - if (isset(clp->c_cq, clp->c_cf - clp->c_cs) ) + if (isset(clp->c_cq, clp->c_cf - clp->c_cs)) { c |= TTY_QUOTE; + } #else - if (*(clp->c_cf - clp->c_cs + clp->c_cq)) + if (*(clp->c_cf - clp->c_cs + clp->c_cq)) { c |= TTY_QUOTE; + } #endif } - if (++clp->c_cf == clp->c_ce) + if (++clp->c_cf == clp->c_ce) { clp->c_cf = clp->c_cs; - if (--clp->c_cc == 0) + } + if (--clp->c_cc == 0) { clp->c_cf = clp->c_cl = (u_char *)0; + } out: return c; } @@ -178,20 +187,24 @@ q_to_b(struct clist *clp, u_char *cp, int count) /* optimize this while loop */ while (count > 0 && clp->c_cc > 0) { cc = clp->c_cl - clp->c_cf; - if (clp->c_cf >= clp->c_cl) + if (clp->c_cf >= clp->c_cl) { cc = clp->c_ce - clp->c_cf; - if (cc > count) + } + if (cc > count) { cc = count; + } bcopy(clp->c_cf, p, cc); count -= cc; p += cc; clp->c_cc -= cc; clp->c_cf += cc; - if (clp->c_cf == clp->c_ce) + if (clp->c_cf == clp->c_ce) { clp->c_cf = clp->c_cs; + } } - if (clp->c_cc == 0) + if (clp->c_cc == 0) { clp->c_cf = clp->c_cl = (u_char *)0; + } return p - cp; } @@ -206,13 +219,15 @@ ndqb(struct clist *clp, int flag) int i; int cc; - if ((cc = clp->c_cc) == 0) + if ((cc = clp->c_cc) == 0) { goto out; + } if (flag == 0) { count = clp->c_cl - clp->c_cf; - if (count <= 0) + if (count <= 0) { count = clp->c_ce - clp->c_cf; + } goto out; } @@ -221,14 +236,16 @@ ndqb(struct clist *clp, int flag) while (cc-- > 0 && !(clp->c_cs[i++] & (flag & ~TTY_QUOTE) || isset(clp->c_cq, i))) { count++; - if (i == clp->c_cn) + if (i == clp->c_cn) { break; + } } } else { while (cc-- > 0 && !(clp->c_cs[i++] & flag)) { count++; - if (i == clp->c_cn) + if (i == clp->c_cn) { break; + } } } out: @@ -251,18 +268,22 @@ ndflush(struct clist *clp, int count) /* optimize this while loop */ while (count > 0 && clp->c_cc > 0) { cc = clp->c_cl - clp->c_cf; - if (clp->c_cf >= clp->c_cl) + if (clp->c_cf >= clp->c_cl) { cc = clp->c_ce - clp->c_cf; - if (cc > count) + } + if (cc > count) { cc = count; + } count -= cc; clp->c_cc -= cc; clp->c_cf += cc; - if (clp->c_cf == clp->c_ce) + if (clp->c_cf == clp->c_ce) { clp->c_cf = clp->c_cs; + } } - if (clp->c_cc == 0) + if (clp->c_cc == 0) { clp->c_cf = clp->c_cl = (u_char *)0; + } } /* @@ -278,7 +299,7 @@ putc(int c, struct clist *clp) #if DIAGNOSTIC //printf("putc: required clalloc\n"); #endif - if(clalloc(clp, 1024, 1)) { + if (clalloc(clp, 1024, 1)) { out: return -1; } @@ -286,17 +307,19 @@ out: clp->c_cf = clp->c_cl = clp->c_cs; } - if (clp->c_cc == clp->c_cn) + if (clp->c_cc == clp->c_cn) { goto out; + } *clp->c_cl = c & 0xff; i = clp->c_cl - clp->c_cs; if (clp->c_cq) { #ifdef QBITS - if (c & TTY_QUOTE) - setbit(clp->c_cq, i); - else + if (c & TTY_QUOTE) { + setbit(clp->c_cq, i); + } else { clrbit(clp->c_cq, i); + } #else q = clp->c_cq + i; *q = (c & TTY_QUOTE) ? 1 : 0; @@ -304,8 +327,9 @@ out: } clp->c_cc++; clp->c_cl++; - if (clp->c_cl == clp->c_ce) + if (clp->c_cl == clp->c_ce) { clp->c_cl = clp->c_cs; + } return 0; } @@ -323,29 +347,31 @@ clrbits(u_char *cp, int off, int len) int i; u_char mask; - if(len==1) { + if (len == 1) { clrbit(cp, off); return; } sby = off / NBBY; sbi = off % NBBY; - eby = (off+len) / NBBY; - ebi = (off+len) % NBBY; + eby = (off + len) / NBBY; + ebi = (off + len) % NBBY; if (sby == eby) { mask = ((1 << (ebi - sbi)) - 1) << sbi; cp[sby] &= ~mask; } else { - mask = (1<c_cc == 0) { @@ -369,22 +396,26 @@ b_to_q(const u_char *cp, int count, struct clist *clp) #if DIAGNOSTIC printf("b_to_q: required clalloc\n"); #endif - if(clalloc(clp, 1024, 1)) + if (clalloc(clp, 1024, 1)) { goto out; + } } clp->c_cf = clp->c_cl = clp->c_cs; } - if (clp->c_cc == clp->c_cn) + if (clp->c_cc == clp->c_cn) { goto out; + } /* optimize this while loop */ while (count > 0 && clp->c_cc < clp->c_cn) { cc = clp->c_ce - clp->c_cl; - if (clp->c_cf > clp->c_cl) + if (clp->c_cf > clp->c_cl) { cc = clp->c_cf - clp->c_cl; - if (cc > count) + } + if (cc > count) { cc = count; + } bcopy(p, clp->c_cl, cc); if (clp->c_cq) { #ifdef QBITS @@ -397,8 +428,9 @@ b_to_q(const u_char *cp, int count, struct clist *clp) count -= cc; clp->c_cc += cc; clp->c_cl += cc; - if (clp->c_cl == clp->c_ce) + if (clp->c_cl == clp->c_ce) { clp->c_cl = clp->c_cs; + } } out: return count; @@ -423,20 +455,25 @@ nextc(struct clist *clp, u_char *cp, int *c) */ cc = clp->c_cc; } - if (cc == 0 || cp == NULL) + if (cc == 0 || cp == NULL) { return NULL; - if (--cc == 0) + } + if (--cc == 0) { return NULL; - if (++cp == clp->c_ce) + } + if (++cp == clp->c_ce) { cp = clp->c_cs; + } *c = *cp & 0xff; if (clp->c_cq) { #ifdef QBITS - if (isset(clp->c_cq, cp - clp->c_cs)) + if (isset(clp->c_cq, cp - clp->c_cs)) { *c |= TTY_QUOTE; + } #else - if (*(clp->c_cf - clp->c_cs + clp->c_cq)) + if (*(clp->c_cf - clp->c_cs + clp->c_cq)) { *c |= TTY_QUOTE; + } #endif } return cp; @@ -458,17 +495,20 @@ firstc(struct clist *clp, int *c) u_char *cp; cc = clp->c_cc; - if (cc == 0) + if (cc == 0) { return NULL; + } cp = clp->c_cf; *c = *cp & 0xff; - if(clp->c_cq) { + if (clp->c_cq) { #ifdef QBITS - if (isset(clp->c_cq, cp - clp->c_cs)) + if (isset(clp->c_cq, cp - clp->c_cs)) { *c |= TTY_QUOTE; + } #else - if (*(cp - clp->c_cs + clp->c_cq)) + if (*(cp - clp->c_cs + clp->c_cq)) { *c |= TTY_QUOTE; + } #endif } return clp->c_cf; @@ -482,27 +522,32 @@ unputc(struct clist *clp) { unsigned int c = -1; - if (clp->c_cc == 0) + if (clp->c_cc == 0) { goto out; + } - if (clp->c_cl == clp->c_cs) + if (clp->c_cl == clp->c_cs) { clp->c_cl = clp->c_ce - 1; - else + } else { --clp->c_cl; + } clp->c_cc--; c = *clp->c_cl & 0xff; if (clp->c_cq) { #ifdef QBITS - if (isset(clp->c_cq, clp->c_cl - clp->c_cs)) + if (isset(clp->c_cq, clp->c_cl - clp->c_cs)) { c |= TTY_QUOTE; + } #else - if (*(clp->c_cf - clp->c_cs + clp->c_cq)) + if (*(clp->c_cf - clp->c_cs + clp->c_cq)) { c |= TTY_QUOTE; + } #endif } - if (clp->c_cc == 0) + if (clp->c_cc == 0) { clp->c_cf = clp->c_cl = (u_char *)0; + } out: return c; } @@ -515,6 +560,7 @@ catq(struct clist *from, struct clist *to) { int c; - while ((c = getc(from)) != -1) + while ((c = getc(from)) != -1) { putc(c, to); + } } diff --git a/bsd/kern/tty_tty.c b/bsd/kern/tty_tty.c index 84960728d..8cbdfaf62 100644 --- a/bsd/kern/tty_tty.c +++ b/bsd/kern/tty_tty.c @@ -2,7 +2,7 @@ * Copyright (c) 1997-2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -91,54 +91,57 @@ cttyopen(dev_t dev, int flag, __unused int mode, proc_t p) int cttyflag, doclose = 0; struct session *sessp; - if (ttyvp == NULL) - return (ENXIO); + if (ttyvp == NULL) { + return ENXIO; + } context.vc_thread = current_thread(); context.vc_ucred = kauth_cred_proc_ref(p); sessp = proc_session(p); session_lock(sessp); - cttyflag = sessp->s_flags & S_CTTYREF; + cttyflag = sessp->s_flags & S_CTTYREF; session_unlock(sessp); /* * A little hack--this device, used by many processes, - * happens to do an open on another device, which can - * cause unhappiness if the second-level open blocks indefinitely + * happens to do an open on another device, which can + * cause unhappiness if the second-level open blocks indefinitely * (as could be the case if the master side has hung up). Since * we know that this driver doesn't care about the serializing * opens and closes, we can drop the lock. To avoid opencount leak, - * open the vnode only for the first time. + * open the vnode only for the first time. */ if (cttyflag == 0) { devsw_unlock(dev, S_IFCHR); error = VNOP_OPEN(ttyvp, flag, &context); devsw_lock(dev, S_IFCHR); - if (error) + if (error) { goto out; - + } + /* * If S_CTTYREF is set, some other thread did an open * and was able to set the flag, now perform a close, else * set the flag. */ session_lock(sessp); - if (cttyflag == (sessp->s_flags & S_CTTYREF)) + if (cttyflag == (sessp->s_flags & S_CTTYREF)) { sessp->s_flags |= S_CTTYREF; - else + } else { doclose = 1; + } session_unlock(sessp); /* * We have to take a reference here to make sure a close - * gets called during revoke. Note that once a controlling + * gets called during revoke. Note that once a controlling * tty gets opened by this driver, the only way close will * get called is when the session leader , whose controlling - * tty is ttyvp, exits and vnode is revoked. We cannot + * tty is ttyvp, exits and vnode is revoked. We cannot * redirect close from this driver because underlying controlling - * terminal might change and close may get redirected to a + * terminal might change and close may get redirected to a * wrong vnode causing panic. */ if (doclose) { @@ -155,7 +158,7 @@ out: vnode_put(ttyvp); kauth_cred_unref(&context.vc_ucred); - return (error); + return error; } int @@ -165,8 +168,9 @@ cttyread(__unused dev_t dev, struct uio *uio, int flag) struct vfs_context context; int error; - if (ttyvp == NULL) - return (EIO); + if (ttyvp == NULL) { + return EIO; + } context.vc_thread = current_thread(); context.vc_ucred = NOCRED; @@ -174,7 +178,7 @@ cttyread(__unused dev_t dev, struct uio *uio, int flag) error = VNOP_READ(ttyvp, uio, flag, &context); vnode_put(ttyvp); - return (error); + return error; } int @@ -184,8 +188,9 @@ cttywrite(__unused dev_t dev, struct uio *uio, int flag) struct vfs_context context; int error; - if (ttyvp == NULL) - return (EIO); + if (ttyvp == NULL) { + return EIO; + } context.vc_thread = current_thread(); context.vc_ucred = NOCRED; @@ -193,7 +198,7 @@ cttywrite(__unused dev_t dev, struct uio *uio, int flag) error = VNOP_WRITE(ttyvp, uio, flag, &context); vnode_put(ttyvp); - return (error); + return error; } int @@ -204,9 +209,10 @@ cttyioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, proc_t p) struct session *sessp; int error = 0; - if (ttyvp == NULL) - return (EIO); - if (cmd == TIOCSCTTY) { /* don't allow controlling tty to be set */ + if (ttyvp == NULL) { + return EIO; + } + if (cmd == TIOCSCTTY) { /* don't allow controlling tty to be set */ error = EINVAL; /* to controlling tty -- infinite recursion */ goto out; } @@ -214,13 +220,15 @@ cttyioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, proc_t p) sessp = proc_session(p); if (!SESS_LEADER(p, sessp)) { OSBitAndAtomic(~((uint32_t)P_CONTROLT), &p->p_flag); - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } error = 0; goto out; } else { - if (sessp != SESSION_NULL) + if (sessp != SESSION_NULL) { session_rele(sessp); + } error = EINVAL; goto out; } @@ -231,7 +239,7 @@ cttyioctl(__unused dev_t dev, u_long cmd, caddr_t addr, int flag, proc_t p) error = VNOP_IOCTL(ttyvp, cmd, addr, flag, &context); out: vnode_put(ttyvp); - return (error); + return error; } int @@ -244,11 +252,12 @@ cttyselect(__unused dev_t dev, int flag, void* wql, __unused proc_t p) context.vc_thread = current_thread(); context.vc_ucred = NOCRED; - if (ttyvp == NULL) - return (1); /* try operation to get EOF/failure */ - error = VNOP_SELECT(ttyvp, flag, FREAD|FWRITE, wql, &context); + if (ttyvp == NULL) { + return 1; /* try operation to get EOF/failure */ + } + error = VNOP_SELECT(ttyvp, flag, FREAD | FWRITE, wql, &context); vnode_put(ttyvp); - return (error); + return error; } /* This returns vnode with ioref */ @@ -263,16 +272,16 @@ cttyvp(proc_t p) session_lock(sessp); vp = (p->p_flag & P_CONTROLT ? sessp->s_ttyvp : NULLVP); - vid = sessp->s_ttyvid; + vid = sessp->s_ttyvid; session_unlock(sessp); session_rele(sessp); if (vp != NULLVP) { /* cannot get an IO reference, return NULLVP */ - if (vnode_getwithvid(vp, vid) != 0) + if (vnode_getwithvid(vp, vid) != 0) { vp = NULLVP; + } } - return(vp); + return vp; } - diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index abf7e6f53..01c615b28 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * File: ubc_subr.c * Author: Umesh Vaishampayan [umeshv@apple.com] * 05-Aug-1999 umeshv Created. @@ -34,7 +34,7 @@ * * Caller of UBC functions MUST have a valid reference on the vnode. * - */ + */ #include #include @@ -78,10 +78,10 @@ /* XXX These should be in a BSD accessible Mach header, but aren't. */ extern kern_return_t memory_object_pages_resident(memory_object_control_t, - boolean_t *); -extern kern_return_t memory_object_signed(memory_object_control_t control, - boolean_t is_signed); -extern boolean_t memory_object_is_signed(memory_object_control_t); + boolean_t *); +extern kern_return_t memory_object_signed(memory_object_control_t control, + boolean_t is_signed); +extern boolean_t memory_object_is_signed(memory_object_control_t); /* XXX Same for those. */ @@ -90,11 +90,11 @@ extern void Debugger(const char *message); /* XXX no one uses this interface! */ kern_return_t ubc_page_op_with_control( - memory_object_control_t control, - off_t f_offset, - int ops, - ppnum_t *phys_entryp, - int *flagsp); + memory_object_control_t control, + off_t f_offset, + int ops, + ppnum_t *phys_entryp, + int *flagsp); #if DIAGNOSTIC @@ -115,8 +115,8 @@ static void ubc_cs_free(struct ubc_info *uip); static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob); static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob); -struct zone *ubc_info_zone; -static uint32_t cs_blob_generation_count = 1; +struct zone *ubc_info_zone; +static uint32_t cs_blob_generation_count = 1; /* * CODESIGNING @@ -125,7 +125,7 @@ static uint32_t cs_blob_generation_count = 1; extern int cs_debug; -#define PAGE_SHIFT_4K (12) +#define PAGE_SHIFT_4K (12) static boolean_t cs_valid_range( @@ -152,52 +152,53 @@ typedef void (*cs_md_update)(void *ctx, const void *data, size_t size); typedef void (*cs_md_final)(void *hash, void *ctx); struct cs_hash { - uint8_t cs_type; /* type code as per code signing */ - size_t cs_size; /* size of effective hash (may be truncated) */ - size_t cs_digest_size; /* size of native hash */ - cs_md_init cs_init; - cs_md_update cs_update; - cs_md_final cs_final; + uint8_t cs_type; /* type code as per code signing */ + size_t cs_size; /* size of effective hash (may be truncated) */ + size_t cs_digest_size;/* size of native hash */ + cs_md_init cs_init; + cs_md_update cs_update; + cs_md_final cs_final; }; -uint8_t cs_hash_type( - struct cs_hash const * const cs_hash) +uint8_t +cs_hash_type( + struct cs_hash const * const cs_hash) { - return cs_hash->cs_type; + return cs_hash->cs_type; } static const struct cs_hash cs_hash_sha1 = { - .cs_type = CS_HASHTYPE_SHA1, - .cs_size = CS_SHA1_LEN, - .cs_digest_size = SHA_DIGEST_LENGTH, - .cs_init = (cs_md_init)SHA1Init, - .cs_update = (cs_md_update)SHA1Update, - .cs_final = (cs_md_final)SHA1Final, + .cs_type = CS_HASHTYPE_SHA1, + .cs_size = CS_SHA1_LEN, + .cs_digest_size = SHA_DIGEST_LENGTH, + .cs_init = (cs_md_init)SHA1Init, + .cs_update = (cs_md_update)SHA1Update, + .cs_final = (cs_md_final)SHA1Final, }; #if CRYPTO_SHA2 static const struct cs_hash cs_hash_sha256 = { - .cs_type = CS_HASHTYPE_SHA256, - .cs_size = SHA256_DIGEST_LENGTH, - .cs_digest_size = SHA256_DIGEST_LENGTH, - .cs_init = (cs_md_init)SHA256_Init, - .cs_update = (cs_md_update)SHA256_Update, - .cs_final = (cs_md_final)SHA256_Final, + .cs_type = CS_HASHTYPE_SHA256, + .cs_size = SHA256_DIGEST_LENGTH, + .cs_digest_size = SHA256_DIGEST_LENGTH, + .cs_init = (cs_md_init)SHA256_Init, + .cs_update = (cs_md_update)SHA256_Update, + .cs_final = (cs_md_final)SHA256_Final, }; static const struct cs_hash cs_hash_sha256_truncate = { - .cs_type = CS_HASHTYPE_SHA256_TRUNCATED, - .cs_size = CS_SHA256_TRUNCATED_LEN, - .cs_digest_size = SHA256_DIGEST_LENGTH, - .cs_init = (cs_md_init)SHA256_Init, - .cs_update = (cs_md_update)SHA256_Update, - .cs_final = (cs_md_final)SHA256_Final, + .cs_type = CS_HASHTYPE_SHA256_TRUNCATED, + .cs_size = CS_SHA256_TRUNCATED_LEN, + .cs_digest_size = SHA256_DIGEST_LENGTH, + .cs_init = (cs_md_init)SHA256_Init, + .cs_update = (cs_md_update)SHA256_Update, + .cs_final = (cs_md_final)SHA256_Final, }; static const struct cs_hash cs_hash_sha384 = { - .cs_type = CS_HASHTYPE_SHA384, - .cs_size = SHA384_DIGEST_LENGTH, - .cs_digest_size = SHA384_DIGEST_LENGTH, - .cs_init = (cs_md_init)SHA384_Init, - .cs_update = (cs_md_update)SHA384_Update, - .cs_final = (cs_md_final)SHA384_Final, + .cs_type = CS_HASHTYPE_SHA384, + .cs_size = SHA384_DIGEST_LENGTH, + .cs_digest_size = SHA384_DIGEST_LENGTH, + .cs_init = (cs_md_init)SHA384_Init, + .cs_update = (cs_md_update)SHA384_Update, + .cs_final = (cs_md_final)SHA384_Final, }; #endif @@ -219,9 +220,9 @@ cs_find_md(uint8_t type) } union cs_hash_union { - SHA1_CTX sha1ctxt; - SHA256_CTX sha256ctx; - SHA384_CTX sha384ctx; + SHA1_CTX sha1ctxt; + SHA256_CTX sha256ctx; + SHA384_CTX sha384ctx; }; @@ -242,10 +243,12 @@ hash_rank(const CS_CodeDirectory *cd) uint32_t type = cd->hashType; unsigned int n; - for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) - if (hashPriorities[n] == type) + for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) { + if (hashPriorities[n] == type) { return n + 1; - return 0; /* not supported */ + } + } + return 0; /* not supported */ } @@ -265,20 +268,20 @@ hashes( assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound)); - if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { + if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { /* Get first scatter struct */ const SC_Scatter *scatter = (const SC_Scatter*) - ((const char*)cd + ntohl(cd->scatterOffset)); - uint32_t hashindex=0, scount, sbase=0; + ((const char*)cd + ntohl(cd->scatterOffset)); + uint32_t hashindex = 0, scount, sbase = 0; /* iterate all scatter structs */ do { - if((const char*)scatter > (const char*)cd + ntohl(cd->length)) { - if(cs_debug) { + if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) { + if (cs_debug) { printf("CODE SIGNING: Scatter extends past Code Directory\n"); } return NULL; } - + scount = ntohl(scatter->count); uint32_t new_base = ntohl(scatter->base); @@ -286,13 +289,13 @@ hashes( if (scount == 0) { return NULL; } - - if((hashindex > 0) && (new_base <= sbase)) { - if(cs_debug) { + + if ((hashindex > 0) && (new_base <= sbase)) { + if (cs_debug) { printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n", - sbase, new_base); + sbase, new_base); } - return NULL; /* unordered scatter array */ + return NULL; /* unordered scatter array */ } sbase = new_base; @@ -300,31 +303,31 @@ hashes( if (sbase > page) { return NULL; } - - if (sbase+scount >= page) { - /* Found the scatter struct that is + + if (sbase + scount >= page) { + /* Found the scatter struct that is * referencing our page */ /* base = address of first hash covered by scatter */ - base = (const unsigned char *)cd + ntohl(cd->hashOffset) + - hashindex * hash_len; + base = (const unsigned char *)cd + ntohl(cd->hashOffset) + + hashindex * hash_len; /* top = address of first hash after this scatter */ top = base + scount * hash_len; - if (!cs_valid_range(base, top, lower_bound, - upper_bound) || + if (!cs_valid_range(base, top, lower_bound, + upper_bound) || hashindex > nCodeSlots) { return NULL; } - + break; } - - /* this scatter struct is before the page we're looking + + /* this scatter struct is before the page we're looking * for. Iterate. */ - hashindex+=scount; + hashindex += scount; scatter++; - } while(1); - + } while (1); + hash = base + (page - sbase) * hash_len; } else { base = (const unsigned char *)cd + ntohl(cd->hashOffset); @@ -337,9 +340,9 @@ hashes( hash = base + page * hash_len; } - + if (!cs_valid_range(hash, hash + hash_len, - lower_bound, upper_bound)) { + lower_bound, upper_bound)) { hash = NULL; } @@ -364,37 +367,45 @@ cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) { struct cs_hash const *hashtype; - if (length < sizeof(*cd)) + if (length < sizeof(*cd)) { return EBADEXEC; - if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) + } + if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) { return EBADEXEC; - if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) + } + if (cd->pageSize < PAGE_SHIFT_4K || cd->pageSize > PAGE_SHIFT) { return EBADEXEC; + } hashtype = cs_find_md(cd->hashType); - if (hashtype == NULL) + if (hashtype == NULL) { return EBADEXEC; + } - if (cd->hashSize != hashtype->cs_size) + if (cd->hashSize != hashtype->cs_size) { return EBADEXEC; + } - if (length < ntohl(cd->hashOffset)) + if (length < ntohl(cd->hashOffset)) { return EBADEXEC; + } /* check that nSpecialSlots fits in the buffer in front of hashOffset */ - if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) + if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) { return EBADEXEC; + } /* check that codeslots fits in the buffer */ - if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) + if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) { return EBADEXEC; - - if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) { + } - if (length < ntohl(cd->scatterOffset)) + if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) { + if (length < ntohl(cd->scatterOffset)) { return EBADEXEC; + } const SC_Scatter *scatter = (const SC_Scatter *) - (((const uint8_t *)cd) + ntohl(cd->scatterOffset)); + (((const uint8_t *)cd) + ntohl(cd->scatterOffset)); uint32_t nPages = 0; /* @@ -402,15 +413,18 @@ cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) * length of the scatter buffer array, we have to * check each entry. */ - while(1) { + while (1) { /* check that the end of each scatter buffer in within the length */ - if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) + if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) { return EBADEXEC; + } uint32_t scount = ntohl(scatter->count); - if (scount == 0) + if (scount == 0) { break; - if (nPages + scount < nPages) + } + if (nPages + scount < nPages) { return EBADEXEC; + } nPages += scount; scatter++; @@ -418,29 +432,34 @@ cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) /* XXX check that targetOffset doesn't overlap */ } #if 0 /* rdar://12579439 */ - if (nPages != ntohl(cd->nCodeSlots)) + if (nPages != ntohl(cd->nCodeSlots)) { return EBADEXEC; + } #endif } - if (length < ntohl(cd->identOffset)) + if (length < ntohl(cd->identOffset)) { return EBADEXEC; + } /* identifier is NUL terminated string */ if (cd->identOffset) { const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset); - if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) + if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) { return EBADEXEC; + } } /* team identifier is NULL terminated string */ if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) { - if (length < ntohl(cd->teamOffset)) + if (length < ntohl(cd->teamOffset)) { return EBADEXEC; + } const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset); - if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) + if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) { return EBADEXEC; + } } return 0; @@ -453,8 +472,9 @@ cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) static int cs_validate_blob(const CS_GenericBlob *blob, size_t length) { - if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) + if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) { return EBADEXEC; + } return 0; } @@ -493,8 +513,9 @@ cs_validate_csblob( length = blob_size; error = cs_validate_blob(blob, length); - if (error) + if (error) { return error; + } length = ntohl(blob->length); if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) { @@ -506,47 +527,54 @@ cs_validate_csblob( const CS_CodeDirectory *sha1_cd = NULL; #endif - if (length < sizeof(CS_SuperBlob)) + if (length < sizeof(CS_SuperBlob)) { return EBADEXEC; + } sb = (const CS_SuperBlob *)blob; count = ntohl(sb->count); /* check that the array of BlobIndex fits in the rest of the data */ - if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) + if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) { return EBADEXEC; + } /* now check each BlobIndex */ for (n = 0; n < count; n++) { const CS_BlobIndex *blobIndex = &sb->index[n]; uint32_t type = ntohl(blobIndex->type); uint32_t offset = ntohl(blobIndex->offset); - if (length < offset) + if (length < offset) { return EBADEXEC; + } const CS_GenericBlob *subBlob = - (const CS_GenericBlob *)(const void *)(addr + offset); + (const CS_GenericBlob *)(const void *)(addr + offset); size_t subLength = length - offset; - if ((error = cs_validate_blob(subBlob, subLength)) != 0) + if ((error = cs_validate_blob(subBlob, subLength)) != 0) { return error; + } subLength = ntohl(subBlob->length); /* extra validation for CDs, that is also returned */ if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) { const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob; - if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) + if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) { return error; + } unsigned int rank = hash_rank(candidate); - if (cs_debug > 3) + if (cs_debug > 3) { printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n); + } if (best_cd == NULL || rank > best_rank) { best_cd = candidate; best_rank = rank; - if (cs_debug > 2) + if (cs_debug > 2) { printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank); + } *rcd = best_cd; } else if (best_cd != NULL && rank == best_rank) { /* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */ @@ -596,7 +624,7 @@ cs_validate_csblob( if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) { if (sha1_cd->flags != (*rcd)->flags) { printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n", - (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags); + (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags); *rcd = NULL; return EBADEXEC; } @@ -604,18 +632,18 @@ cs_validate_csblob( *rcd = sha1_cd; } #endif - } else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) { - - if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) + if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) { return error; + } *rcd = (const CS_CodeDirectory *)blob; } else { return EBADEXEC; } - if (*rcd == NULL) + if (*rcd == NULL) { return EBADEXEC; + } return 0; } @@ -626,7 +654,7 @@ cs_validate_csblob( * Find an blob from the superblob/code directory. The blob must have * been been validated by cs_validate_csblob() before calling * this. Use csblob_find_blob() instead. - * + * * Will also find a "raw" code directory if its stored as well as * searching the superblob. * @@ -649,20 +677,24 @@ csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32 size_t n, count = ntohl(sb->count); for (n = 0; n < count; n++) { - if (ntohl(sb->index[n].type) != type) + if (ntohl(sb->index[n].type) != type) { continue; + } uint32_t offset = ntohl(sb->index[n].offset); - if (length - sizeof(const CS_GenericBlob) < offset) + if (length - sizeof(const CS_GenericBlob) < offset) { return NULL; + } blob = (const CS_GenericBlob *)(const void *)(addr + offset); - if (ntohl(blob->magic) != magic) + if (ntohl(blob->magic) != magic) { continue; + } return blob; } } else if (type == CSSLOT_CODEDIRECTORY - && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY - && magic == CSMAGIC_CODEDIRECTORY) + && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY + && magic == CSMAGIC_CODEDIRECTORY) { return blob; + } return NULL; } @@ -670,8 +702,9 @@ csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32 const CS_GenericBlob * csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic) { - if ((csblob->csb_flags & CS_VALID) == 0) + if ((csblob->csb_flags & CS_VALID) == 0) { return NULL; + } return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic); } @@ -679,10 +712,11 @@ static const uint8_t * find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot) { /* there is no zero special slot since that is the first code slot */ - if (ntohl(cd->nSpecialSlots) < slot || slot == 0) + if (ntohl(cd->nSpecialSlots) < slot || slot == 0) { return NULL; + } - return ((const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot)); + return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot); } static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 }; @@ -699,8 +733,9 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le *out_start = NULL; *out_length = 0; - if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) - return EBADEXEC; + if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) { + return EBADEXEC; + } code_dir = csblob->csb_cd; @@ -712,8 +747,9 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS); if (embedded_hash == NULL) { - if (entitlements) + if (entitlements) { return EBADEXEC; + } return 0; } else if (entitlements == NULL) { if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) { @@ -727,8 +763,9 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length)); csblob->csb_hashtype->cs_final(computed_hash, &context); - if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) + if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) { return EBADEXEC; + } *out_start = __DECONST(void *, entitlements); *out_length = ntohl(entitlements->length); @@ -745,7 +782,7 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le /* * ubc_init - * + * * Initialization of the zone for Unified Buffer Cache. * * Parameters: (void) @@ -758,11 +795,11 @@ csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_le __private_extern__ void ubc_init(void) { - int i; + int i; - i = (vm_size_t) sizeof (struct ubc_info); + i = (vm_size_t) sizeof(struct ubc_info); - ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone"); + ubc_info_zone = zinit(i, 10000 * i, 8192, "ubc_info zone"); zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE); } @@ -783,7 +820,7 @@ ubc_init(void) int ubc_info_init(struct vnode *vp) { - return(ubc_info_init_internal(vp, 0, 0)); + return ubc_info_init_internal(vp, 0, 0); } @@ -802,7 +839,7 @@ ubc_info_init(struct vnode *vp) int ubc_info_init_withsize(struct vnode *vp, off_t filesize) { - return(ubc_info_init_internal(vp, 1, filesize)); + return ubc_info_init_internal(vp, 1, filesize); } @@ -837,7 +874,7 @@ ubc_info_init_withsize(struct vnode *vp, off_t filesize) static int ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize) { - struct ubc_info *uip; + struct ubc_info *uip; void * pager; int error = 0; kern_return_t kret; @@ -850,7 +887,6 @@ ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize) * attach one; otherwise, we will reuse the one that's there. */ if (uip == UBC_INFO_NULL) { - uip = (struct ubc_info *) zalloc(ubc_info_zone); bzero((char *)uip, sizeof(struct ubc_info)); @@ -898,26 +934,28 @@ ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize) * vnode_pager_setup() returned here. */ kret = memory_object_create_named(pager, - (memory_object_size_t)uip->ui_size, &control); - vnode_pager_deallocate(pager); - if (kret != KERN_SUCCESS) + (memory_object_size_t)uip->ui_size, &control); + vnode_pager_deallocate(pager); + if (kret != KERN_SUCCESS) { panic("ubc_info_init: memory_object_create_named returned %d", kret); + } assert(control); - uip->ui_control = control; /* cache the value of the mo control */ - SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */ + uip->ui_control = control; /* cache the value of the mo control */ + SET(uip->ui_flags, UI_HASOBJREF); /* with a named reference */ if (withfsize == 0) { /* initialize the size */ error = vnode_size(vp, &uip->ui_size, vfs_context_current()); - if (error) + if (error) { uip->ui_size = 0; + } } else { uip->ui_size = filesize; } - vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */ + vp->v_lflag |= VNAMED_UBC; /* vnode has a named ubc reference */ - return (error); + return error; } @@ -944,9 +982,10 @@ ubc_info_free(struct ubc_info *uip) kauth_cred_unref(&uip->ui_ucred); } - if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) + if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) { memory_object_control_deallocate(uip->ui_control); - + } + cluster_release(uip); ubc_cs_free(uip); @@ -958,10 +997,11 @@ ubc_info_free(struct ubc_info *uip) void ubc_info_deallocate(struct ubc_info *uip) { - ubc_info_free(uip); + ubc_info_free(uip); } -errno_t mach_to_bsd_errno(kern_return_t mach_err) +errno_t +mach_to_bsd_errno(kern_return_t mach_err) { switch (mach_err) { case KERN_SUCCESS: @@ -1074,32 +1114,36 @@ errno_t mach_to_bsd_errno(kern_return_t mach_err) * flushed, if the new size is not aligned to a page * boundary. This is usually indicative of an I/O error. */ -errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts) +errno_t +ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts) { - off_t osize; /* ui_size before change */ + off_t osize; /* ui_size before change */ off_t lastpg, olastpgend, lastoff; struct ubc_info *uip; memory_object_control_t control; kern_return_t kret = KERN_SUCCESS; - if (nsize < (off_t)0) + if (nsize < (off_t)0) { return EINVAL; + } - if (!UBCINFOEXISTS(vp)) + if (!UBCINFOEXISTS(vp)) { return ENOENT; + } uip = vp->v_ubcinfo; osize = uip->ui_size; - if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) + if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) { return EAGAIN; + } /* * Update the size before flushing the VM */ uip->ui_size = nsize; - if (nsize >= osize) { /* Nothing more to do */ + if (nsize >= osize) { /* Nothing more to do */ if (nsize > osize) { lock_vnode_and_post(vp, NOTE_EXTEND); } @@ -1120,8 +1164,8 @@ errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts) lastoff = (nsize & PAGE_MASK_64); if (lastoff) { - upl_t upl; - upl_page_info_t *pl; + upl_t upl; + upl_page_info_t *pl; /* * new EOF ends up in the middle of a page @@ -1130,39 +1174,44 @@ errno_t ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts) */ kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret); + if (kret != KERN_SUCCESS) { + panic("ubc_setsize: ubc_create_upl (error = %d)\n", kret); + } - if (upl_valid_page(pl, 0)) - cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL); + if (upl_valid_page(pl, 0)) { + cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL); + } ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); lastpg += PAGE_SIZE_64; } if (olastpgend > lastpg) { - int flags; + int flags; - if (lastpg == 0) + if (lastpg == 0) { flags = MEMORY_OBJECT_DATA_FLUSH_ALL; - else + } else { flags = MEMORY_OBJECT_DATA_FLUSH; + } /* * invalidate the pages beyond the new EOF page * */ kret = memory_object_lock_request(control, - (memory_object_offset_t)lastpg, - (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, - MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE); - if (kret != KERN_SUCCESS) - printf("ubc_setsize: invalidate failed (error = %d)\n", kret); + (memory_object_offset_t)lastpg, + (memory_object_size_t)(olastpgend - lastpg), NULL, NULL, + MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE); + if (kret != KERN_SUCCESS) { + printf("ubc_setsize: invalidate failed (error = %d)\n", kret); + } } return mach_to_bsd_errno(kret); } // Returns true for success -int ubc_setsize(vnode_t vp, off_t nsize) +int +ubc_setsize(vnode_t vp, off_t nsize) { return ubc_setsize_ex(vp, nsize, 0) == 0; } @@ -1189,11 +1238,12 @@ off_t ubc_getsize(struct vnode *vp) { /* people depend on the side effect of this working this way - * as they call this for directory + * as they call this for directory */ - if (!UBCINFOEXISTS(vp)) - return ((off_t)0); - return (vp->v_ubcinfo->ui_size); + if (!UBCINFOEXISTS(vp)) { + return (off_t)0; + } + return vp->v_ubcinfo->ui_size; } @@ -1224,7 +1274,7 @@ __private_extern__ int ubc_umount(struct mount *mp) { vnode_iterate(mp, 0, ubc_umcallback, 0); - return(0); + return 0; } @@ -1237,12 +1287,10 @@ ubc_umount(struct mount *mp) static int ubc_umcallback(vnode_t vp, __unused void * args) { - if (UBCINFOEXISTS(vp)) { - (void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL); } - return (VNODE_RETURNED); + return VNODE_RETURNED; } @@ -1264,10 +1312,11 @@ ubc_umcallback(vnode_t vp, __unused void * args) kauth_cred_t ubc_getcred(struct vnode *vp) { - if (UBCINFOEXISTS(vp)) - return (vp->v_ubcinfo->ui_ucred); + if (UBCINFOEXISTS(vp)) { + return vp->v_ubcinfo->ui_ucred; + } - return (NOCRED); + return NOCRED; } @@ -1333,8 +1382,9 @@ ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread) kauth_cred_t credp; struct uthread *uthread = get_bsdthread_info(thread); - if (!UBCINFOEXISTS(vp)) - return (1); + if (!UBCINFOEXISTS(vp)) { + return 1; + } vnode_lock(vp); @@ -1349,10 +1399,10 @@ ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread) uip->ui_ucred = uthread->uu_ucred; kauth_cred_ref(uip->ui_ucred); } - } + } vnode_unlock(vp); - return (0); + return 0; } @@ -1380,7 +1430,7 @@ ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread) * not be used, as it is incompatible with per-thread credentials; * it exists for legacy KPI reasons. * - * DEPRECATION: ubc_setcred() is being deprecated. Please use + * DEPRECATION: ubc_setcred() is being deprecated. Please use * ubc_setthreadcred() instead. */ int @@ -1390,8 +1440,9 @@ ubc_setcred(struct vnode *vp, proc_t p) kauth_cred_t credp; /* If there is no ubc_info, deny the operation */ - if ( !UBCINFOEXISTS(vp)) - return (0); + if (!UBCINFOEXISTS(vp)) { + return 0; + } /* * Check to see if there is already a credential reference in the @@ -1402,10 +1453,10 @@ ubc_setcred(struct vnode *vp, proc_t p) credp = uip->ui_ucred; if (!IS_VALID_CRED(credp)) { uip->ui_ucred = kauth_cred_proc_ref(p); - } + } vnode_unlock(vp); - return (1); + return 1; } /* @@ -1426,10 +1477,11 @@ ubc_setcred(struct vnode *vp, proc_t p) __private_extern__ memory_object_t ubc_getpager(struct vnode *vp) { - if (UBCINFOEXISTS(vp)) - return (vp->v_ubcinfo->ui_pager); + if (UBCINFOEXISTS(vp)) { + return vp->v_ubcinfo->ui_pager; + } - return (0); + return 0; } @@ -1459,10 +1511,11 @@ ubc_getpager(struct vnode *vp) memory_object_control_t ubc_getobject(struct vnode *vp, __unused int flags) { - if (UBCINFOEXISTS(vp)) - return((vp->v_ubcinfo->ui_control)); + if (UBCINFOEXISTS(vp)) { + return vp->v_ubcinfo->ui_control; + } - return (MEMORY_OBJECT_CONTROL_NULL); + return MEMORY_OBJECT_CONTROL_NULL; } /* @@ -1498,11 +1551,12 @@ ubc_blktooff(vnode_t vp, daddr64_t blkno) if (UBCINFOEXISTS(vp)) { error = VNOP_BLKTOOFF(vp, blkno, &file_offset); - if (error) + if (error) { file_offset = -1; + } } - return (file_offset); + return file_offset; } @@ -1541,11 +1595,12 @@ ubc_offtoblk(vnode_t vp, off_t offset) if (UBCINFOEXISTS(vp)) { error = VNOP_OFFTOBLK(vp, offset, &blkno); - if (error) + if (error) { blkno = -1; + } } - return (blkno); + return blkno; } @@ -1563,12 +1618,13 @@ ubc_offtoblk(vnode_t vp, off_t offset) int ubc_pages_resident(vnode_t vp) { - kern_return_t kret; - boolean_t has_pages_resident; - - if (!UBCINFOEXISTS(vp)) - return (0); - + kern_return_t kret; + boolean_t has_pages_resident; + + if (!UBCINFOEXISTS(vp)) { + return 0; + } + /* * The following call may fail if an invalid ui_control is specified, * or if there is no VM object associated with the control object. In @@ -1576,14 +1632,16 @@ ubc_pages_resident(vnode_t vp) * result in correct behavior. */ kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident); - - if (kret != KERN_SUCCESS) - return (0); - - if (has_pages_resident == TRUE) - return (1); - - return (0); + + if (kret != KERN_SUCCESS) { + return 0; + } + + if (has_pages_resident == TRUE) { + return 1; + } + + return 0; } /* @@ -1622,17 +1680,19 @@ ubc_pages_resident(vnode_t vp) errno_t ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags) { - int retval; + int retval; int io_errno = 0; - - if (resid_off) - *resid_off = beg_off; - retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno); + if (resid_off) { + *resid_off = beg_off; + } + + retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno); - if (retval == 0 && io_errno == 0) - return (EINVAL); - return (io_errno); + if (retval == 0 && io_errno == 0) { + return EINVAL; + } + return io_errno; } @@ -1702,42 +1762,49 @@ ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags) static int ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno) { - memory_object_size_t tsize; - kern_return_t kret; + memory_object_size_t tsize; + kern_return_t kret; int request_flags = 0; int flush_flags = MEMORY_OBJECT_RETURN_NONE; - - if ( !UBCINFOEXISTS(vp)) - return (0); - if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) - return (0); - if (end_off <= beg_off) - return (1); - - if (flags & UBC_INVALIDATE) - /* + + if (!UBCINFOEXISTS(vp)) { + return 0; + } + if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) { + return 0; + } + if (end_off <= beg_off) { + return 1; + } + + if (flags & UBC_INVALIDATE) { + /* * discard the resident pages */ request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE); + } - if (flags & UBC_SYNC) - /* + if (flags & UBC_SYNC) { + /* * wait for all the I/O to complete before returning */ - request_flags |= MEMORY_OBJECT_IO_SYNC; + request_flags |= MEMORY_OBJECT_IO_SYNC; + } - if (flags & UBC_PUSHDIRTY) - /* + if (flags & UBC_PUSHDIRTY) { + /* * we only return the dirty pages in the range */ - flush_flags = MEMORY_OBJECT_RETURN_DIRTY; + flush_flags = MEMORY_OBJECT_RETURN_DIRTY; + } - if (flags & UBC_PUSHALL) - /* + if (flags & UBC_PUSHALL) { + /* * then return all the interesting pages in the range (both * dirty and precious) to the pager */ - flush_flags = MEMORY_OBJECT_RETURN_ALL; + flush_flags = MEMORY_OBJECT_RETURN_ALL; + } beg_off = trunc_page_64(beg_off); end_off = round_page_64(end_off); @@ -1745,12 +1812,12 @@ ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, i /* flush and/or invalidate pages in the range requested */ kret = memory_object_lock_request(vp->v_ubcinfo->ui_control, - beg_off, tsize, - (memory_object_offset_t *)resid_off, - io_errno, flush_flags, request_flags, - VM_PROT_NO_CHANGE); - - return ((kret == KERN_SUCCESS) ? 1 : 0); + beg_off, tsize, + (memory_object_offset_t *)resid_off, + io_errno, flush_flags, request_flags, + VM_PROT_NO_CHANGE); + + return (kret == KERN_SUCCESS) ? 1 : 0; } @@ -1813,14 +1880,13 @@ ubc_map(vnode_t vp, int flags) int need_wakeup = 0; if (UBCINFOEXISTS(vp)) { - vnode_lock(vp); uip = vp->v_ubcinfo; while (ISSET(uip->ui_flags, UI_MAPBUSY)) { SET(uip->ui_flags, UI_MAPWAITING); (void) msleep(&uip->ui_flags, &vp->v_lock, - PRIBIO, "ubc_map", NULL); + PRIBIO, "ubc_map", NULL); } SET(uip->ui_flags, UI_MAPBUSY); vnode_unlock(vp); @@ -1829,14 +1895,14 @@ ubc_map(vnode_t vp, int flags) /* * rdar://problem/22587101 required that we stop propagating - * EPERM up the stack. Otherwise, we would have to funnel up + * EPERM up the stack. Otherwise, we would have to funnel up * the error at all the call sites for memory_object_map(). - * The risk is in having to undo the map/object/entry state at + * The risk is in having to undo the map/object/entry state at * all these call sites. It would also affect more than just mmap() * e.g. vm_remap(). * * if (error != EPERM) - * error = 0; + * error = 0; */ error = 0; @@ -1844,8 +1910,9 @@ ubc_map(vnode_t vp, int flags) vnode_lock_spin(vp); if (error == 0) { - if ( !ISSET(uip->ui_flags, UI_ISMAPPED)) - need_ref = 1; + if (!ISSET(uip->ui_flags, UI_ISMAPPED)) { + need_ref = 1; + } SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED)); if (flags & PROT_WRITE) { SET(uip->ui_flags, UI_MAPPEDWRITE); @@ -1859,18 +1926,20 @@ ubc_map(vnode_t vp, int flags) } vnode_unlock(vp); - if (need_wakeup) + if (need_wakeup) { wakeup(&uip->ui_flags); + } if (need_ref) { /* * Make sure we get a ref as we can't unwind from here */ - if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) + if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) { panic("%s : VNODE_REF_FORCE failed\n", __FUNCTION__); + } } } - return (error); + return error; } @@ -1906,14 +1975,15 @@ ubc_destroy_named(vnode_t vp) kern_return_t kret; if (UBCINFOEXISTS(vp)) { - uip = vp->v_ubcinfo; + uip = vp->v_ubcinfo; /* Terminate the memory object */ control = ubc_getobject(vp, UBC_HOLDOBJECT); if (control != MEMORY_OBJECT_CONTROL_NULL) { - kret = memory_object_destroy(control, 0); - if (kret != KERN_SUCCESS) - panic("ubc_destroy_named: memory_object_destroy failed"); + kret = memory_object_destroy(control, 0); + if (kret != KERN_SUCCESS) { + panic("ubc_destroy_named: memory_object_destroy failed"); + } } } } @@ -1946,9 +2016,10 @@ ubc_destroy_named(vnode_t vp) int ubc_isinuse(struct vnode *vp, int busycount) { - if ( !UBCINFOEXISTS(vp)) - return (0); - return(ubc_isinuse_locked(vp, busycount, 0)); + if (!UBCINFOEXISTS(vp)) { + return 0; + } + return ubc_isinuse_locked(vp, busycount, 0); } @@ -1986,15 +2057,18 @@ ubc_isinuse_locked(struct vnode *vp, int busycount, int locked) int retval = 0; - if (!locked) + if (!locked) { vnode_lock_spin(vp); + } - if ((vp->v_usecount - vp->v_kusecount) > busycount) + if ((vp->v_usecount - vp->v_kusecount) > busycount) { retval = 1; + } - if (!locked) + if (!locked) { vnode_unlock(vp); - return (retval); + } + return retval; } @@ -2021,11 +2095,12 @@ __private_extern__ void ubc_unmap(struct vnode *vp) { struct ubc_info *uip; - int need_rele = 0; - int need_wakeup = 0; + int need_rele = 0; + int need_wakeup = 0; - if (vnode_getwithref(vp)) - return; + if (vnode_getwithref(vp)) { + return; + } if (UBCINFOEXISTS(vp)) { bool want_fsevent = false; @@ -2036,13 +2111,14 @@ ubc_unmap(struct vnode *vp) while (ISSET(uip->ui_flags, UI_MAPBUSY)) { SET(uip->ui_flags, UI_MAPWAITING); (void) msleep(&uip->ui_flags, &vp->v_lock, - PRIBIO, "ubc_unmap", NULL); + PRIBIO, "ubc_unmap", NULL); } SET(uip->ui_flags, UI_MAPBUSY); if (ISSET(uip->ui_flags, UI_ISMAPPED)) { - if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) + if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) { want_fsevent = true; + } need_rele = 1; @@ -2055,37 +2131,38 @@ ubc_unmap(struct vnode *vp) vnode_unlock(vp); if (need_rele) { - vfs_context_t ctx = vfs_context_current(); + vfs_context_t ctx = vfs_context_current(); - (void)VNOP_MNOMAP(vp, ctx); + (void)VNOP_MNOMAP(vp, ctx); #if CONFIG_FSE - /* - * Why do we want an fsevent here? Normally the - * content modified fsevent is posted when a file is - * closed and only if it's written to via conventional - * means. It's perfectly legal to close a file and - * keep your mappings and we don't currently track - * whether it was written to via a mapping. - * Therefore, we need to post an fsevent here if the - * file was mapped writable. This may result in false - * events, i.e. we post a notification when nothing - * has really changed. - */ - if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) { - add_fsevent(FSE_CONTENT_MODIFIED, ctx, - FSE_ARG_VNODE, vp, - FSE_ARG_DONE); - } + /* + * Why do we want an fsevent here? Normally the + * content modified fsevent is posted when a file is + * closed and only if it's written to via conventional + * means. It's perfectly legal to close a file and + * keep your mappings and we don't currently track + * whether it was written to via a mapping. + * Therefore, we need to post an fsevent here if the + * file was mapped writable. This may result in false + * events, i.e. we post a notification when nothing + * has really changed. + */ + if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) { + add_fsevent(FSE_CONTENT_MODIFIED, ctx, + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); + } #endif - vnode_rele(vp); + vnode_rele(vp); } vnode_lock_spin(vp); - if (need_rele) + if (need_rele) { CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE); + } CLR(uip->ui_flags, UI_MAPBUSY); @@ -2095,9 +2172,9 @@ ubc_unmap(struct vnode *vp) } vnode_unlock(vp); - if (need_wakeup) - wakeup(&uip->ui_flags); - + if (need_wakeup) { + wakeup(&uip->ui_flags); + } } /* * the drop of the vnode ref will cleanup @@ -2183,23 +2260,24 @@ ubc_unmap(struct vnode *vp) */ kern_return_t ubc_page_op( - struct vnode *vp, - off_t f_offset, - int ops, - ppnum_t *phys_entryp, - int *flagsp) + struct vnode *vp, + off_t f_offset, + int ops, + ppnum_t *phys_entryp, + int *flagsp) { - memory_object_control_t control; + memory_object_control_t control; control = ubc_getobject(vp, UBC_FLAGS_NONE); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } - return (memory_object_page_op(control, - (memory_object_offset_t)f_offset, - ops, - phys_entryp, - flagsp)); + return memory_object_page_op(control, + (memory_object_offset_t)f_offset, + ops, + phys_entryp, + flagsp); } @@ -2263,23 +2341,24 @@ ubc_page_op( */ kern_return_t ubc_range_op( - struct vnode *vp, - off_t f_offset_beg, - off_t f_offset_end, + struct vnode *vp, + off_t f_offset_beg, + off_t f_offset_end, int ops, int *range) { - memory_object_control_t control; + memory_object_control_t control; control = ubc_getobject(vp, UBC_FLAGS_NONE); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } - return (memory_object_range_op(control, - (memory_object_offset_t)f_offset_beg, - (memory_object_offset_t)f_offset_end, - ops, - range)); + return memory_object_range_op(control, + (memory_object_offset_t)f_offset_beg, + (memory_object_offset_t)f_offset_end, + ops, + range); } @@ -2305,12 +2384,12 @@ ubc_range_op( * multiple of the page size * KERN_INVALID_ARGUMENT There is no ubc_info associated with * the vnode, or there is no memory object - * control associated with the ubc_info + * control associated with the ubc_info * memory_object_upl_request:KERN_INVALID_VALUE * The supplied upl_flags argument is * invalid * Implicit Returns: - * *uplp (modified) + * *uplp (modified) * *plp (modified) If non-NULL, the value of *plp will be * modified to point to the internal page * list; this modification may occur even @@ -2323,59 +2402,61 @@ ubc_range_op( */ kern_return_t ubc_create_upl_external( - struct vnode *vp, - off_t f_offset, - int bufsize, - upl_t *uplp, - upl_page_info_t **plp, - int uplflags) + struct vnode *vp, + off_t f_offset, + int bufsize, + upl_t *uplp, + upl_page_info_t **plp, + int uplflags) { - return (ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt())); + return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt()); } kern_return_t ubc_create_upl_kernel( - struct vnode *vp, - off_t f_offset, - int bufsize, - upl_t *uplp, - upl_page_info_t **plp, - int uplflags, + struct vnode *vp, + off_t f_offset, + int bufsize, + upl_t *uplp, + upl_page_info_t **plp, + int uplflags, vm_tag_t tag) { - memory_object_control_t control; - kern_return_t kr; + memory_object_control_t control; + kern_return_t kr; - if (plp != NULL) + if (plp != NULL) { *plp = NULL; + } *uplp = NULL; - - if (bufsize & 0xfff) + + if (bufsize & 0xfff) { return KERN_INVALID_ARGUMENT; + } - if (bufsize > MAX_UPL_SIZE_BYTES) + if (bufsize > MAX_UPL_SIZE_BYTES) { return KERN_INVALID_ARGUMENT; + } if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) { - if (uplflags & UPL_UBC_MSYNC) { uplflags &= UPL_RET_ONLY_DIRTY; uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE | - UPL_SET_INTERNAL | UPL_SET_LITE; - + UPL_SET_INTERNAL | UPL_SET_LITE; } else if (uplflags & UPL_UBC_PAGEOUT) { uplflags &= UPL_RET_ONLY_DIRTY; - if (uplflags & UPL_RET_ONLY_DIRTY) + if (uplflags & UPL_RET_ONLY_DIRTY) { uplflags |= UPL_NOBLOCK; + } uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE | - UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE; + UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE; } else { uplflags |= UPL_RET_ONLY_ABSENT | - UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | - UPL_SET_INTERNAL | UPL_SET_LITE; + UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | + UPL_SET_INTERNAL | UPL_SET_LITE; /* * if the requested size == PAGE_SIZE, we don't want to set @@ -2385,29 +2466,33 @@ ubc_create_upl_kernel( * since we're only asking for a single page, we can block w/o fear * of tying up pages while waiting for more to become available */ - if (bufsize > PAGE_SIZE) + if (bufsize > PAGE_SIZE) { uplflags |= UPL_NOBLOCK; + } } } else { uplflags &= ~UPL_FOR_PAGEOUT; if (uplflags & UPL_WILL_BE_DUMPED) { uplflags &= ~UPL_WILL_BE_DUMPED; - uplflags |= (UPL_NO_SYNC|UPL_SET_INTERNAL); - } else - uplflags |= (UPL_NO_SYNC|UPL_CLEAN_IN_PLACE|UPL_SET_INTERNAL); + uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL); + } else { + uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL); + } } control = ubc_getobject(vp, UBC_FLAGS_NONE); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag); - if (kr == KERN_SUCCESS && plp != NULL) + if (kr == KERN_SUCCESS && plp != NULL) { *plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp); + } return kr; } - - + + /* * ubc_upl_maxbufsize * @@ -2417,11 +2502,11 @@ ubc_create_upl_kernel( * * Returns: maximum size buffer (in bytes) ubc_create_upl( ) will take. */ -upl_size_t +upl_size_t ubc_upl_maxbufsize( void) { - return(MAX_UPL_SIZE_BYTES); + return MAX_UPL_SIZE_BYTES; } /* @@ -2443,10 +2528,10 @@ ubc_upl_maxbufsize( */ kern_return_t ubc_upl_map( - upl_t upl, - vm_offset_t *dst_addr) + upl_t upl, + vm_offset_t *dst_addr) { - return (vm_upl_map(kernel_map, upl, dst_addr)); + return vm_upl_map(kernel_map, upl, dst_addr); } @@ -2464,9 +2549,9 @@ ubc_upl_map( */ kern_return_t ubc_upl_unmap( - upl_t upl) + upl_t upl) { - return(vm_upl_unmap(kernel_map, upl)); + return vm_upl_unmap(kernel_map, upl); } @@ -2495,10 +2580,10 @@ ubc_upl_unmap( */ kern_return_t ubc_upl_commit( - upl_t upl) + upl_t upl) { - upl_page_info_t *pl; - kern_return_t kr; + upl_page_info_t *pl; + kern_return_t kr; pl = UPL_GET_INTERNAL_PAGE_LIST(upl); kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT); @@ -2561,17 +2646,18 @@ ubc_upl_commit( */ kern_return_t ubc_upl_commit_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int flags) + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int flags) { - upl_page_info_t *pl; - boolean_t empty; - kern_return_t kr; + upl_page_info_t *pl; + boolean_t empty; + kern_return_t kr; - if (flags & UPL_COMMIT_FREE_ON_EMPTY) + if (flags & UPL_COMMIT_FREE_ON_EMPTY) { flags |= UPL_COMMIT_NOTIFY_EMPTY; + } if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { return KERN_INVALID_ARGUMENT; @@ -2580,10 +2666,11 @@ ubc_upl_commit_range( pl = UPL_GET_INTERNAL_PAGE_LIST(upl); kr = upl_commit_range(upl, offset, size, flags, - pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty); + pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty); - if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) + if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) { upl_deallocate(upl); + } return kr; } @@ -2636,21 +2723,23 @@ ubc_upl_commit_range( */ kern_return_t ubc_upl_abort_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int abort_flags) + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int abort_flags) { - kern_return_t kr; - boolean_t empty = FALSE; + kern_return_t kr; + boolean_t empty = FALSE; - if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) + if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) { abort_flags |= UPL_ABORT_NOTIFY_EMPTY; + } kr = upl_abort_range(upl, offset, size, abort_flags, &empty); - if((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) + if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) { upl_deallocate(upl); + } return kr; } @@ -2700,10 +2789,10 @@ ubc_upl_abort_range( */ kern_return_t ubc_upl_abort( - upl_t upl, - int abort_type) + upl_t upl, + int abort_type) { - kern_return_t kr; + kern_return_t kr; kr = upl_abort(upl, abort_type); upl_deallocate(upl); @@ -2731,38 +2820,42 @@ ubc_upl_abort( */ upl_page_info_t * ubc_upl_pageinfo( - upl_t upl) -{ - return (UPL_GET_INTERNAL_PAGE_LIST(upl)); + upl_t upl) +{ + return UPL_GET_INTERNAL_PAGE_LIST(upl); } -int +int UBCINFOEXISTS(const struct vnode * vp) { - return((vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL)); + return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL); } void ubc_upl_range_needed( - upl_t upl, - int index, - int count) + upl_t upl, + int index, + int count) { upl_range_needed(upl, index, count); } -boolean_t ubc_is_mapped(const struct vnode *vp, boolean_t *writable) +boolean_t +ubc_is_mapped(const struct vnode *vp, boolean_t *writable) { - if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) + if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) { return FALSE; - if (writable) + } + if (writable) { *writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE); + } return TRUE; } -boolean_t ubc_is_mapped_writable(const struct vnode *vp) +boolean_t +ubc_is_mapped_writable(const struct vnode *vp) { boolean_t writable; return ubc_is_mapped(vp, &writable) && writable; @@ -2788,13 +2881,13 @@ SYSCTL_INT(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blo * Function: csblob_parse_teamid * * Description: This function returns a pointer to the team id - stored within the codedirectory of the csblob. - If the codedirectory predates team-ids, it returns - NULL. - This does not copy the name but returns a pointer to - it within the CD. Subsequently, the CD must be - available when this is used. -*/ + * stored within the codedirectory of the csblob. + * If the codedirectory predates team-ids, it returns + * NULL. + * This does not copy the name but returns a pointer to + * it within the CD. Subsequently, the CD must be + * available when this is used. + */ static const char * csblob_parse_teamid(struct cs_blob *csblob) @@ -2803,15 +2896,18 @@ csblob_parse_teamid(struct cs_blob *csblob) cd = csblob->csb_cd; - if (ntohl(cd->version) < CS_SUPPORTSTEAMID) + if (ntohl(cd->version) < CS_SUPPORTSTEAMID) { return NULL; + } - if (cd->teamOffset == 0) + if (cd->teamOffset == 0) { return NULL; + } const char *name = ((const char *)cd) + ntohl(cd->teamOffset); - if (cs_debug > 1) + if (cs_debug > 1) { printf("found team-id %s in cdblob\n", name); + } return name; } @@ -2819,10 +2915,10 @@ csblob_parse_teamid(struct cs_blob *csblob) kern_return_t ubc_cs_blob_allocate( - vm_offset_t *blob_addr_p, - vm_size_t *blob_size_p) + vm_offset_t *blob_addr_p, + vm_size_t *blob_size_p) { - kern_return_t kr = KERN_FAILURE; + kern_return_t kr = KERN_FAILURE; { *blob_addr_p = (vm_offset_t) kalloc_tag(*blob_size_p, VM_KERN_MEMORY_SECURITY); @@ -2839,8 +2935,8 @@ ubc_cs_blob_allocate( void ubc_cs_blob_deallocate( - vm_offset_t blob_addr, - vm_size_t blob_size) + vm_offset_t blob_addr, + vm_size_t blob_size) { #if PMAP_CS if (blob_size > pmap_cs_blob_limit) { @@ -2848,7 +2944,7 @@ ubc_cs_blob_deallocate( } else #endif { - kfree((void *) blob_addr, blob_size); + kfree(blob_addr, blob_size); } } @@ -2869,7 +2965,7 @@ ubc_cs_supports_multilevel_hash(struct cs_blob *blob) { const CS_CodeDirectory *cd; - + /* * Only applies to binaries that ship as part of the OS, * primarily the shared cache. @@ -2899,9 +2995,8 @@ ubc_cs_supports_multilevel_hash(struct cs_blob *blob) * Scatter lists must also have ranges that have an integral number of hashes */ if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { - const SC_Scatter *scatter = (const SC_Scatter*) - ((const char*)cd + ntohl(cd->scatterOffset)); + ((const char*)cd + ntohl(cd->scatterOffset)); /* iterate all scatter structs to make sure they are all aligned */ do { uint32_t sbase = ntohl(scatter->base); @@ -2921,7 +3016,7 @@ ubc_cs_supports_multilevel_hash(struct cs_blob *blob) } scatter++; - } while(1); + } while (1); } /* Covered range must be a multiple of the new page size */ @@ -2951,17 +3046,17 @@ ubc_cs_supports_multilevel_hash(struct cs_blob *blob) */ static int ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optional_new_cd_size, - vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p, - CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p) + vm_address_t *new_blob_addr_p, vm_size_t *new_blob_size_p, + CS_CodeDirectory **new_cd_p, CS_GenericBlob const **new_entitlements_p) { - const CS_CodeDirectory *old_cd, *cd; - CS_CodeDirectory *new_cd; + const CS_CodeDirectory *old_cd, *cd; + CS_CodeDirectory *new_cd; const CS_GenericBlob *entitlements; vm_offset_t new_blob_addr; vm_size_t new_blob_size; vm_size_t new_cdsize; - kern_return_t kr; - int error; + kern_return_t kr; + int error; old_cd = blob->csb_cd; @@ -2981,18 +3076,18 @@ ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optiona if (kr != KERN_SUCCESS) { if (cs_debug > 1) { printf("CODE SIGNING: Failed to allocate memory for new Code Signing Blob: %d\n", - kr); + kr); } return ENOMEM; } - CS_SuperBlob *new_superblob; + CS_SuperBlob *new_superblob; new_superblob = (CS_SuperBlob *)new_blob_addr; new_superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE); new_superblob->length = htonl((uint32_t)new_blob_size); if (blob->csb_entitlements_blob) { - vm_size_t ent_offset, cd_offset; + vm_size_t ent_offset, cd_offset; cd_offset = sizeof(CS_SuperBlob) + 2 * sizeof(CS_BlobIndex); ent_offset = cd_offset + new_cdsize; @@ -3021,7 +3116,7 @@ ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optiona if (error) { printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n", - error); + error); ubc_cs_blob_deallocate(new_blob_addr, new_blob_size); return error; @@ -3043,19 +3138,19 @@ ubc_cs_reconstitute_code_signature(struct cs_blob const *blob, vm_size_t optiona static int ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) { - const CS_CodeDirectory *old_cd, *cd; - CS_CodeDirectory *new_cd; + const CS_CodeDirectory *old_cd, *cd; + CS_CodeDirectory *new_cd; const CS_GenericBlob *entitlements; vm_offset_t new_blob_addr; vm_size_t new_blob_size; vm_size_t new_cdsize; - int error; + int error; - uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift); + uint32_t hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift); if (cs_debug > 1) { printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n", - (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT); + (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT); } old_cd = blob->csb_cd; @@ -3065,8 +3160,8 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize; error = ubc_cs_reconstitute_code_signature(blob, new_cdsize, - &new_blob_addr, &new_blob_size, &new_cd, - &entitlements); + &new_blob_addr, &new_blob_size, &new_cd, + &entitlements); if (error != 0) { printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error); return error; @@ -3085,7 +3180,7 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) { SC_Scatter *scatter = (SC_Scatter*) - ((char *)new_cd + ntohl(new_cd->scatterOffset)); + ((char *)new_cd + ntohl(new_cd->scatterOffset)); /* iterate all scatter structs to scale their counts */ do { uint32_t scount = ntohl(scatter->count); @@ -3103,7 +3198,7 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) scatter->base = htonl(sbase); scatter++; - } while(1); + } while (1); } /* For each group of hashes, hash them together */ @@ -3112,7 +3207,7 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) uint32_t hash_index; for (hash_index = 0; hash_index < nCodeSlots; hash_index++) { - union cs_hash_union mdctx; + union cs_hash_union mdctx; uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift; const unsigned char *src = src_base + hash_index * source_hash_len; @@ -3125,15 +3220,14 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements); if (error != 0) { - printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n", - error); + error); ubc_cs_blob_deallocate(new_blob_addr, new_blob_size); return error; } - /* New Code Directory is ready for use, swap it out in the blob structure */ + /* New Code Directory is ready for use, swap it out in the blob structure */ ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); blob->csb_mem_size = new_blob_size; @@ -3149,9 +3243,9 @@ ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob) blob->csb_hash_pagemask = PAGE_MASK; blob->csb_hash_pageshift = PAGE_SHIFT; blob->csb_end_offset = ntohl(cd->codeLimit); - if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { + if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { const SC_Scatter *scatter = (const SC_Scatter*) - ((const char*)cd + ntohl(cd->scatterOffset)); + ((const char*)cd + ntohl(cd->scatterOffset)); blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE; } else { blob->csb_start_offset = 0; @@ -3174,19 +3268,20 @@ cs_blob_create_validated( vm_address_t * const addr, vm_size_t size, struct cs_blob ** const ret_blob, - CS_CodeDirectory const ** const ret_cd) + CS_CodeDirectory const ** const ret_cd) { - struct cs_blob *blob; - int error = EINVAL; + struct cs_blob *blob; + int error = EINVAL; const CS_CodeDirectory *cd; const CS_GenericBlob *entitlements; - union cs_hash_union mdctx; - size_t length; + union cs_hash_union mdctx; + size_t length; - if (ret_blob) - *ret_blob = NULL; + if (ret_blob) { + *ret_blob = NULL; + } - blob = (struct cs_blob *) kalloc(sizeof (struct cs_blob)); + blob = (struct cs_blob *) kalloc(sizeof(struct cs_blob)); if (blob == NULL) { return ENOMEM; } @@ -3212,16 +3307,15 @@ cs_blob_create_validated( */ length = (size_t) size; error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, - length, &cd, &entitlements); + length, &cd, &entitlements); if (error) { - - if (cs_debug) + if (cs_debug) { printf("CODESIGNING: csblob invalid: %d\n", error); + } /* * The vnode checker can't make the rest of this function * succeed if csblob validation failed, so bail */ goto out; - } else { const unsigned char *md_base; uint8_t hash[CS_HASH_MAX_SIZE]; @@ -3230,8 +3324,9 @@ cs_blob_create_validated( blob->csb_cd = cd; blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */ blob->csb_hashtype = cs_find_md(cd->hashType); - if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) + if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) { panic("validated CodeDirectory but unsupported type"); + } blob->csb_hash_pageshift = cd->pageSize; blob->csb_hash_pagesize = (1U << cd->pageSize); @@ -3239,9 +3334,9 @@ cs_blob_create_validated( blob->csb_hash_firstlevel_pagesize = 0; blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + blob->csb_hash_pagemask) & ~((vm_offset_t)blob->csb_hash_pagemask)); - if((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { + if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) { const SC_Scatter *scatter = (const SC_Scatter*) - ((const char*)cd + ntohl(cd->scatterOffset)); + ((const char*)cd + ntohl(cd->scatterOffset)); blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * blob->csb_hash_pagesize; } else { blob->csb_start_offset = 0; @@ -3257,23 +3352,23 @@ cs_blob_create_validated( memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN); } - error = 0; + error = 0; out: - if (error != 0) { - cs_blob_free(blob); - blob = NULL; - cd = NULL; - } - - if (ret_blob != NULL) { - *ret_blob = blob; - } - if (ret_cd != NULL) { - *ret_cd = cd; - } - - return error; + if (error != 0) { + cs_blob_free(blob); + blob = NULL; + cd = NULL; + } + + if (ret_blob != NULL) { + *ret_blob = blob; + } + if (ret_cd != NULL) { + *ret_cd = cd; + } + + return error; } /* @@ -3281,74 +3376,77 @@ out: */ void cs_blob_free( - struct cs_blob * const blob) + struct cs_blob * const blob) { - if (blob != NULL) { - if (blob->csb_mem_kaddr) { - ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); - blob->csb_mem_kaddr = 0; - } - if (blob->csb_entitlements != NULL) { - osobject_release(blob->csb_entitlements); - blob->csb_entitlements = NULL; - } - kfree(blob, sizeof (*blob)); - } + if (blob != NULL) { + if (blob->csb_mem_kaddr) { + ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); + blob->csb_mem_kaddr = 0; + } + if (blob->csb_entitlements != NULL) { + osobject_release(blob->csb_entitlements); + blob->csb_entitlements = NULL; + } + (kfree)(blob, sizeof(*blob)); + } } int ubc_cs_blob_add( - struct vnode *vp, - cpu_type_t cputype, - off_t base_offset, - vm_address_t *addr, - vm_size_t size, + struct vnode *vp, + cpu_type_t cputype, + off_t base_offset, + vm_address_t *addr, + vm_size_t size, struct image_params *imgp, - __unused int flags, - struct cs_blob **ret_blob) + __unused int flags, + struct cs_blob **ret_blob) { - kern_return_t kr; - struct ubc_info *uip; - struct cs_blob *blob, *oblob; - int error; + kern_return_t kr; + struct ubc_info *uip; + struct cs_blob *blob, *oblob; + int error; CS_CodeDirectory const *cd; - off_t blob_start_offset, blob_end_offset; - boolean_t record_mtime; + off_t blob_start_offset, blob_end_offset; + boolean_t record_mtime; record_mtime = FALSE; - if (ret_blob) - *ret_blob = NULL; - - /* Create the struct cs_blob wrapper that will be attached to the vnode. - * Validates the passed in blob in the process. */ - error = cs_blob_create_validated(addr, size, &blob, &cd); - - if (error != 0) { + if (ret_blob) { + *ret_blob = NULL; + } + + /* Create the struct cs_blob wrapper that will be attached to the vnode. + * Validates the passed in blob in the process. */ + error = cs_blob_create_validated(addr, size, &blob, &cd); + + if (error != 0) { printf("malform code signature blob: %d\n", error); - return error; - } + return error; + } - blob->csb_cpu_type = cputype; + blob->csb_cpu_type = cputype; blob->csb_base_offset = base_offset; /* * Let policy module check whether the blob's signature is accepted. */ #if CONFIG_MACF - unsigned int cs_flags = blob->csb_flags; + unsigned int cs_flags = blob->csb_flags; unsigned int signer_type = blob->csb_signer_type; error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags); - blob->csb_flags = cs_flags; + blob->csb_flags = cs_flags; blob->csb_signer_type = signer_type; if (error) { - if (cs_debug) + if (cs_debug) { printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); + } goto out; } if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !(blob->csb_flags & CS_PLATFORM_BINARY)) { - if (cs_debug) + if (cs_debug) { printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid); + } error = EPERM; goto out; } @@ -3366,8 +3464,8 @@ ubc_cs_blob_add( CS_GenericBlob const *new_entitlements = NULL; error = ubc_cs_reconstitute_code_signature(blob, 0, - &new_mem_kaddr, &new_mem_size, - &new_cd, &new_entitlements); + &new_mem_kaddr, &new_mem_size, + &new_cd, &new_entitlements); if (error != 0) { printf("failed code signature reconstitution: %d\n", error); @@ -3387,8 +3485,9 @@ ubc_cs_blob_add( if (blob->csb_flags & CS_PLATFORM_BINARY) { - if (cs_debug > 1) + if (cs_debug > 1) { printf("check_signature[pid: %d]: platform binary\n", current_proc()->p_pid); + } blob->csb_platform_binary = 1; blob->csb_platform_path = !!(blob->csb_flags & CS_PLATFORM_PATH); } else { @@ -3396,10 +3495,11 @@ ubc_cs_blob_add( blob->csb_platform_path = 0; blob->csb_teamid = csblob_parse_teamid(blob); if (cs_debug > 1) { - if (blob->csb_teamid) + if (blob->csb_teamid) { printf("check_signature[pid: %d]: team-id is %s\n", current_proc()->p_pid, blob->csb_teamid); - else + } else { printf("check_signature[pid: %d]: no team-id\n", current_proc()->p_pid); + } } } @@ -3427,7 +3527,7 @@ ubc_cs_blob_add( } vnode_lock(vp); - if (! UBCINFOEXISTS(vp)) { + if (!UBCINFOEXISTS(vp)) { vnode_unlock(vp); error = ENOENT; goto out; @@ -3436,88 +3536,88 @@ ubc_cs_blob_add( /* check if this new blob overlaps with an existing blob */ for (oblob = uip->cs_blobs; - oblob != NULL; - oblob = oblob->csb_next) { - off_t oblob_start_offset, oblob_end_offset; - - if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices - vnode_unlock(vp); - error = EALREADY; - goto out; - } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices - if (!oblob->csb_platform_binary) { - vnode_unlock(vp); - error = EALREADY; - goto out; - } - } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices - if (oblob->csb_platform_binary || + oblob != NULL; + oblob = oblob->csb_next) { + off_t oblob_start_offset, oblob_end_offset; + + if (blob->csb_signer_type != oblob->csb_signer_type) { // signer type needs to be the same for slices + vnode_unlock(vp); + error = EALREADY; + goto out; + } else if (blob->csb_platform_binary) { //platform binary needs to be the same for app slices + if (!oblob->csb_platform_binary) { + vnode_unlock(vp); + error = EALREADY; + goto out; + } + } else if (blob->csb_teamid) { //teamid binary needs to be the same for app slices + if (oblob->csb_platform_binary || oblob->csb_teamid == NULL || strcmp(oblob->csb_teamid, blob->csb_teamid) != 0) { vnode_unlock(vp); error = EALREADY; goto out; } - } else { // non teamid binary needs to be the same for app slices - if (oblob->csb_platform_binary || - oblob->csb_teamid != NULL) { + } else { // non teamid binary needs to be the same for app slices + if (oblob->csb_platform_binary || + oblob->csb_teamid != NULL) { vnode_unlock(vp); error = EALREADY; goto out; } - } - - oblob_start_offset = (oblob->csb_base_offset + - oblob->csb_start_offset); - oblob_end_offset = (oblob->csb_base_offset + - oblob->csb_end_offset); - if (blob_start_offset >= oblob_end_offset || - blob_end_offset <= oblob_start_offset) { - /* no conflict with this existing blob */ - } else { - /* conflict ! */ - if (blob_start_offset == oblob_start_offset && - blob_end_offset == oblob_end_offset && - blob->csb_mem_size == oblob->csb_mem_size && - blob->csb_flags == oblob->csb_flags && - (blob->csb_cpu_type == CPU_TYPE_ANY || - oblob->csb_cpu_type == CPU_TYPE_ANY || - blob->csb_cpu_type == oblob->csb_cpu_type) && - !bcmp(blob->csb_cdhash, - oblob->csb_cdhash, - CS_CDHASH_LEN)) { - /* - * We already have this blob: - * we'll return success but - * throw away the new blob. - */ - if (oblob->csb_cpu_type == CPU_TYPE_ANY) { - /* - * The old blob matches this one - * but doesn't have any CPU type. - * Update it with whatever the caller - * provided this time. - */ - oblob->csb_cpu_type = cputype; - } - - /* The signature is still accepted, so update the - * generation count. */ - uip->cs_add_gen = cs_blob_generation_count; - - vnode_unlock(vp); - if (ret_blob) - *ret_blob = oblob; - error = EAGAIN; - goto out; - } else { - /* different blob: reject the new one */ - vnode_unlock(vp); - error = EALREADY; - goto out; - } - } + } + oblob_start_offset = (oblob->csb_base_offset + + oblob->csb_start_offset); + oblob_end_offset = (oblob->csb_base_offset + + oblob->csb_end_offset); + if (blob_start_offset >= oblob_end_offset || + blob_end_offset <= oblob_start_offset) { + /* no conflict with this existing blob */ + } else { + /* conflict ! */ + if (blob_start_offset == oblob_start_offset && + blob_end_offset == oblob_end_offset && + blob->csb_mem_size == oblob->csb_mem_size && + blob->csb_flags == oblob->csb_flags && + (blob->csb_cpu_type == CPU_TYPE_ANY || + oblob->csb_cpu_type == CPU_TYPE_ANY || + blob->csb_cpu_type == oblob->csb_cpu_type) && + !bcmp(blob->csb_cdhash, + oblob->csb_cdhash, + CS_CDHASH_LEN)) { + /* + * We already have this blob: + * we'll return success but + * throw away the new blob. + */ + if (oblob->csb_cpu_type == CPU_TYPE_ANY) { + /* + * The old blob matches this one + * but doesn't have any CPU type. + * Update it with whatever the caller + * provided this time. + */ + oblob->csb_cpu_type = cputype; + } + + /* The signature is still accepted, so update the + * generation count. */ + uip->cs_add_gen = cs_blob_generation_count; + + vnode_unlock(vp); + if (ret_blob) { + *ret_blob = oblob; + } + error = EAGAIN; + goto out; + } else { + /* different blob: reject the new one */ + vnode_unlock(vp); + error = EALREADY; + goto out; + } + } } @@ -3551,7 +3651,7 @@ ubc_cs_blob_add( if (cs_blob_count > cs_blob_count_peak) { cs_blob_count_peak = cs_blob_count; /* XXX atomic ? */ } - OSAddAtomic((SInt32) +blob->csb_mem_size, &cs_blob_size); + OSAddAtomic((SInt32) + blob->csb_mem_size, &cs_blob_size); if ((SInt32) cs_blob_size > cs_blob_size_peak) { cs_blob_size_peak = (SInt32) cs_blob_size; /* XXX atomic ? */ } @@ -3564,14 +3664,14 @@ ubc_cs_blob_add( const char *name = vnode_getname_printable(vp); p = current_proc(); printf("CODE SIGNING: proc %d(%s) " - "loaded %s signatures for file (%s) " - "range 0x%llx:0x%llx flags 0x%x\n", - p->p_pid, p->p_comm, - blob->csb_cpu_type == -1 ? "detached" : "embedded", - name, - blob->csb_base_offset + blob->csb_start_offset, - blob->csb_base_offset + blob->csb_end_offset, - blob->csb_flags); + "loaded %s signatures for file (%s) " + "range 0x%llx:0x%llx flags 0x%x\n", + p->p_pid, p->p_comm, + blob->csb_cpu_type == -1 ? "detached" : "embedded", + name, + blob->csb_base_offset + blob->csb_start_offset, + blob->csb_base_offset + blob->csb_end_offset, + blob->csb_flags); vnode_putname_printable(name); } @@ -3581,22 +3681,24 @@ ubc_cs_blob_add( vnode_mtime(vp, &uip->cs_mtime, vfs_context_current()); } - if (ret_blob) + if (ret_blob) { *ret_blob = blob; + } - error = 0; /* success ! */ + error = 0; /* success ! */ out: if (error) { - if (cs_debug) + if (cs_debug) { printf("check_signature[pid: %d]: error = %d\n", current_proc()->p_pid, error); + } - cs_blob_free(blob); + cs_blob_free(blob); } if (error == EAGAIN) { /* - * See above: error is EAGAIN if we were asked + * See above: error is EAGAIN if we were asked * to add an existing blob again. We cleaned the new * blob and we want to return success. */ @@ -3609,8 +3711,8 @@ out: void csvnode_print_debug(struct vnode *vp) { - const char *name = NULL; - struct ubc_info *uip; + const char *name = NULL; + struct ubc_info *uip; struct cs_blob *blob; name = vnode_getname_printable(vp); @@ -3621,7 +3723,7 @@ csvnode_print_debug(struct vnode *vp) vnode_lock_spin(vp); - if (! UBCINFOEXISTS(vp)) { + if (!UBCINFOEXISTS(vp)) { blob = NULL; goto out; } @@ -3629,40 +3731,39 @@ csvnode_print_debug(struct vnode *vp) uip = vp->v_ubcinfo; for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) { printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n", - (unsigned long)blob->csb_start_offset, - (unsigned long)blob->csb_end_offset, - blob->csb_flags, - blob->csb_platform_binary ? "yes" : "no", - blob->csb_platform_path ? "yes" : "no", - blob->csb_teamid ? blob->csb_teamid : ""); + (unsigned long)blob->csb_start_offset, + (unsigned long)blob->csb_end_offset, + blob->csb_flags, + blob->csb_platform_binary ? "yes" : "no", + blob->csb_platform_path ? "yes" : "no", + blob->csb_teamid ? blob->csb_teamid : ""); } out: vnode_unlock(vp); - } struct cs_blob * ubc_cs_blob_get( - struct vnode *vp, - cpu_type_t cputype, - off_t offset) + struct vnode *vp, + cpu_type_t cputype, + off_t offset) { - struct ubc_info *uip; - struct cs_blob *blob; + struct ubc_info *uip; + struct cs_blob *blob; off_t offset_in_blob; vnode_lock_spin(vp); - if (! UBCINFOEXISTS(vp)) { + if (!UBCINFOEXISTS(vp)) { blob = NULL; goto out; } uip = vp->v_ubcinfo; for (blob = uip->cs_blobs; - blob != NULL; - blob = blob->csb_next) { + blob != NULL; + blob = blob->csb_next) { if (cputype != -1 && blob->csb_cpu_type == cputype) { break; } @@ -3684,16 +3785,16 @@ out: static void ubc_cs_free( - struct ubc_info *uip) + struct ubc_info *uip) { - struct cs_blob *blob, *next_blob; + struct cs_blob *blob, *next_blob; for (blob = uip->cs_blobs; - blob != NULL; - blob = next_blob) { + blob != NULL; + blob = next_blob) { next_blob = blob->csb_next; OSAddAtomic(-1, &cs_blob_count); - OSAddAtomic((SInt32) -blob->csb_mem_size, &cs_blob_size); + OSAddAtomic((SInt32) - blob->csb_mem_size, &cs_blob_size); cs_blob_free(blob); } #if CHECK_CS_VALIDATION_BITMAP @@ -3709,7 +3810,7 @@ ubc_cs_free( */ int ubc_cs_generation_check( - struct vnode *vp) + struct vnode *vp) { int retval = ENEEDAUTH; @@ -3725,7 +3826,7 @@ ubc_cs_generation_check( int ubc_cs_blob_revalidate( - struct vnode *vp, + struct vnode *vp, struct cs_blob *blob, struct image_params *imgp, int flags @@ -3740,7 +3841,7 @@ ubc_cs_blob_revalidate( size = blob->csb_mem_size; error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr, - size, &cd, &entitlements); + size, &cd, &entitlements); if (error) { if (cs_debug) { printf("CODESIGNING: csblob invalid: %d\n", error); @@ -3748,8 +3849,8 @@ ubc_cs_blob_revalidate( goto out; } - unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; - unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN; + unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID; + unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN; if (blob->csb_reconstituted) { /* @@ -3783,7 +3884,7 @@ ubc_cs_blob_revalidate( #if CONFIG_MACF error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags); if (cs_debug && error) { - printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); + printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); } #else (void)flags; @@ -3792,13 +3893,14 @@ ubc_cs_blob_revalidate( /* update generation number if success */ vnode_lock_spin(vp); - blob->csb_flags = cs_flags; + blob->csb_flags = cs_flags; blob->csb_signer_type = signer_type; if (UBCINFOEXISTS(vp)) { - if (error == 0) + if (error == 0) { vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count; - else + } else { vp->v_ubcinfo->cs_add_gen = 0; + } } vnode_unlock(vp); @@ -3817,10 +3919,10 @@ cs_blob_reset_cache() struct cs_blob * ubc_get_cs_blobs( - struct vnode *vp) + struct vnode *vp) { - struct ubc_info *uip; - struct cs_blob *blobs; + struct ubc_info *uip; + struct cs_blob *blobs; /* * No need to take the vnode lock here. The caller must be holding @@ -3837,7 +3939,7 @@ ubc_get_cs_blobs( * vnode lock, for example. */ - if (! UBCINFOEXISTS(vp)) { + if (!UBCINFOEXISTS(vp)) { blobs = NULL; goto out; } @@ -3851,12 +3953,12 @@ out: void ubc_get_cs_mtime( - struct vnode *vp, - struct timespec *cs_mtime) + struct vnode *vp, + struct timespec *cs_mtime) { - struct ubc_info *uip; + struct ubc_info *uip; - if (! UBCINFOEXISTS(vp)) { + if (!UBCINFOEXISTS(vp)) { cs_mtime->tv_sec = 0; cs_mtime->tv_nsec = 0; return; @@ -3871,34 +3973,34 @@ unsigned long cs_validate_page_no_hash = 0; unsigned long cs_validate_page_bad_hash = 0; static boolean_t cs_validate_hash( - struct cs_blob *blobs, - memory_object_t pager, - memory_object_offset_t page_offset, - const void *data, - vm_size_t *bytes_processed, - unsigned *tainted) + struct cs_blob *blobs, + memory_object_t pager, + memory_object_offset_t page_offset, + const void *data, + vm_size_t *bytes_processed, + unsigned *tainted) { - union cs_hash_union mdctx; - struct cs_hash const *hashtype = NULL; - unsigned char actual_hash[CS_HASH_MAX_SIZE]; - unsigned char expected_hash[CS_HASH_MAX_SIZE]; - boolean_t found_hash; - struct cs_blob *blob; - const CS_CodeDirectory *cd; - const unsigned char *hash; - boolean_t validated; - off_t offset; /* page offset in the file */ - size_t size; - off_t codeLimit = 0; - const char *lower_bound, *upper_bound; - vm_offset_t kaddr, blob_addr; + union cs_hash_union mdctx; + struct cs_hash const *hashtype = NULL; + unsigned char actual_hash[CS_HASH_MAX_SIZE]; + unsigned char expected_hash[CS_HASH_MAX_SIZE]; + boolean_t found_hash; + struct cs_blob *blob; + const CS_CodeDirectory *cd; + const unsigned char *hash; + boolean_t validated; + off_t offset; /* page offset in the file */ + size_t size; + off_t codeLimit = 0; + const char *lower_bound, *upper_bound; + vm_offset_t kaddr, blob_addr; /* retrieve the expected hash */ found_hash = FALSE; for (blob = blobs; - blob != NULL; - blob = blob->csb_next) { + blob != NULL; + blob = blob->csb_next) { offset = page_offset - blob->csb_base_offset; if (offset < blob->csb_start_offset || offset >= blob->csb_end_offset) { @@ -3915,24 +4017,27 @@ cs_validate_hash( blob_addr = kaddr + blob->csb_mem_offset; lower_bound = CAST_DOWN(char *, blob_addr); upper_bound = lower_bound + blob->csb_mem_size; - + cd = blob->csb_cd; if (cd != NULL) { /* all CD's that have been injected is already validated */ hashtype = blob->csb_hashtype; - if (hashtype == NULL) + if (hashtype == NULL) { panic("unknown hash type ?"); - if (hashtype->cs_digest_size > sizeof(actual_hash)) + } + if (hashtype->cs_digest_size > sizeof(actual_hash)) { panic("hash size too large"); - if (offset & blob->csb_hash_pagemask) + } + if (offset & blob->csb_hash_pagemask) { panic("offset not aligned to cshash boundary"); + } codeLimit = ntohl(cd->codeLimit); - hash = hashes(cd, (uint32_t)(offset>>blob->csb_hash_pageshift), - hashtype->cs_size, - lower_bound, upper_bound); + hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift), + hashtype->cs_size, + lower_bound, upper_bound); if (hash != NULL) { bcopy(hash, expected_hash, hashtype->cs_size); found_hash = TRUE; @@ -3954,13 +4059,12 @@ cs_validate_hash( cs_validate_page_no_hash++; if (cs_debug > 1) { printf("CODE SIGNING: cs_validate_page: " - "mobj %p off 0x%llx: no hash to validate !?\n", - pager, page_offset); + "mobj %p off 0x%llx: no hash to validate !?\n", + pager, page_offset); } validated = FALSE; *tainted = 0; } else { - *tainted = 0; size = blob->csb_hash_pagesize; @@ -3979,10 +4083,10 @@ cs_validate_hash( if (blob->csb_hash_firstlevel_pagesize) { const unsigned char *partial_data = (const unsigned char *)data; size_t i; - for (i=0; i < size;) { - union cs_hash_union partialctx; + for (i = 0; i < size;) { + union cs_hash_union partialctx; unsigned char partial_digest[CS_HASH_MAX_SIZE]; - size_t partial_size = MIN(size-i, blob->csb_hash_firstlevel_pagesize); + size_t partial_size = MIN(size - i, blob->csb_hash_firstlevel_pagesize); hashtype->cs_init(&partialctx); hashtype->cs_update(&partialctx, partial_data, partial_size); @@ -4004,39 +4108,39 @@ cs_validate_hash( if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) { if (cs_debug) { printf("CODE SIGNING: cs_validate_page: " - "mobj %p off 0x%llx size 0x%lx: " - "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != " - "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n", - pager, page_offset, size, - asha1[0], asha1[1], asha1[2], - asha1[3], asha1[4], - esha1[0], esha1[1], esha1[2], - esha1[3], esha1[4]); + "mobj %p off 0x%llx size 0x%lx: " + "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != " + "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n", + pager, page_offset, size, + asha1[0], asha1[1], asha1[2], + asha1[3], asha1[4], + esha1[0], esha1[1], esha1[2], + esha1[3], esha1[4]); } cs_validate_page_bad_hash++; *tainted |= CS_VALIDATE_TAINTED; } else { if (cs_debug > 10) { printf("CODE SIGNING: cs_validate_page: " - "mobj %p off 0x%llx size 0x%lx: " - "SHA1 OK\n", - pager, page_offset, size); + "mobj %p off 0x%llx size 0x%lx: " + "SHA1 OK\n", + pager, page_offset, size); } } validated = TRUE; } - + return validated; } boolean_t cs_validate_range( - struct vnode *vp, - memory_object_t pager, - memory_object_offset_t page_offset, - const void *data, - vm_size_t dsize, - unsigned *tainted) + struct vnode *vp, + memory_object_t pager, + memory_object_offset_t page_offset, + const void *data, + vm_size_t dsize, + unsigned *tainted) { vm_size_t offset_in_range; boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */ @@ -4046,18 +4150,18 @@ cs_validate_range( *tainted = 0; for (offset_in_range = 0; - offset_in_range < dsize; - /* offset_in_range updated based on bytes processed */) { + offset_in_range < dsize; + /* offset_in_range updated based on bytes processed */) { unsigned subrange_tainted = 0; boolean_t subrange_validated; vm_size_t bytes_processed = 0; subrange_validated = cs_validate_hash(blobs, - pager, - page_offset + offset_in_range, - (const void *)((const char *)data + offset_in_range), - &bytes_processed, - &subrange_tainted); + pager, + page_offset + offset_in_range, + (const void *)((const char *)data + offset_in_range), + &bytes_processed, + &subrange_tainted); *tainted |= subrange_tainted; @@ -4078,20 +4182,20 @@ cs_validate_range( int ubc_cs_getcdhash( - vnode_t vp, - off_t offset, - unsigned char *cdhash) + vnode_t vp, + off_t offset, + unsigned char *cdhash) { - struct cs_blob *blobs, *blob; - off_t rel_offset; - int ret; + struct cs_blob *blobs, *blob; + off_t rel_offset; + int ret; vnode_lock(vp); blobs = ubc_get_cs_blobs(vp); for (blob = blobs; - blob != NULL; - blob = blob->csb_next) { + blob != NULL; + blob = blob->csb_next) { /* compute offset relative to this blob */ rel_offset = offset - blob->csb_base_offset; if (rel_offset >= blob->csb_start_offset && @@ -4106,7 +4210,7 @@ ubc_cs_getcdhash( ret = EBADEXEC; /* XXX any better error ? */ } else { /* get the SHA1 hash of that blob */ - bcopy(blob->csb_cdhash, cdhash, sizeof (blob->csb_cdhash)); + bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash)); ret = 0; } @@ -4117,13 +4221,13 @@ ubc_cs_getcdhash( boolean_t ubc_cs_is_range_codesigned( - vnode_t vp, - mach_vm_offset_t start, - mach_vm_size_t size) + vnode_t vp, + mach_vm_offset_t start, + mach_vm_size_t size) { - struct cs_blob *csblob; - mach_vm_offset_t blob_start; - mach_vm_offset_t blob_end; + struct cs_blob *csblob; + mach_vm_offset_t blob_start; + mach_vm_offset_t blob_end; if (vp == NULL) { /* no file: no code signature */ @@ -4150,9 +4254,9 @@ ubc_cs_is_range_codesigned( * would have to iterate if the blob does not cover the full range. */ blob_start = (mach_vm_offset_t) (csblob->csb_base_offset + - csblob->csb_start_offset); + csblob->csb_start_offset); blob_end = (mach_vm_offset_t) (csblob->csb_base_offset + - csblob->csb_end_offset); + csblob->csb_end_offset); if (blob_start > start || blob_end < (start + size)) { /* range not fully covered by this code-signing blob */ return FALSE; @@ -4162,8 +4266,8 @@ ubc_cs_is_range_codesigned( } #if CHECK_CS_VALIDATION_BITMAP -#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3) -extern boolean_t root_fs_upgrade_try; +#define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3) +extern boolean_t root_fs_upgrade_try; /* * Should we use the code-sign bitmap to avoid repeated code-sign validation? @@ -4172,30 +4276,30 @@ extern boolean_t root_fs_upgrade_try; * b) Has someone tried to mount the root filesystem read-write? * If answers are (a) yes AND (b) no, then we can use the bitmap. */ -#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try) +#define USE_CODE_SIGN_BITMAP(vp) ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try) kern_return_t ubc_cs_validation_bitmap_allocate( - vnode_t vp) + vnode_t vp) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; struct ubc_info *uip; - char *target_bitmap; - vm_object_size_t bitmap_size; + char *target_bitmap; + vm_object_size_t bitmap_size; - if ( ! USE_CODE_SIGN_BITMAP(vp) || (! UBCINFOEXISTS(vp))) { + if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) { kr = KERN_INVALID_ARGUMENT; } else { uip = vp->v_ubcinfo; - if ( uip->cs_valid_bitmap == NULL ) { + if (uip->cs_valid_bitmap == NULL) { bitmap_size = stob(uip->ui_size); - target_bitmap = (char*) kalloc( (vm_size_t)bitmap_size ); + target_bitmap = (char*) kalloc((vm_size_t)bitmap_size ); if (target_bitmap == 0) { kr = KERN_NO_SPACE; } else { kr = KERN_SUCCESS; } - if( kr == KERN_SUCCESS ) { + if (kr == KERN_SUCCESS) { memset( target_bitmap, 0, (size_t)bitmap_size); uip->cs_valid_bitmap = (void*)target_bitmap; uip->cs_valid_bitmap_size = bitmap_size; @@ -4206,30 +4310,29 @@ ubc_cs_validation_bitmap_allocate( } kern_return_t -ubc_cs_check_validation_bitmap ( - vnode_t vp, - memory_object_offset_t offset, - int optype) +ubc_cs_check_validation_bitmap( + vnode_t vp, + memory_object_offset_t offset, + int optype) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; - if ( ! USE_CODE_SIGN_BITMAP(vp) || ! UBCINFOEXISTS(vp)) { + if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) { kr = KERN_INVALID_ARGUMENT; } else { struct ubc_info *uip = vp->v_ubcinfo; - char *target_bitmap = uip->cs_valid_bitmap; + char *target_bitmap = uip->cs_valid_bitmap; - if ( target_bitmap == NULL ) { - kr = KERN_INVALID_ARGUMENT; + if (target_bitmap == NULL) { + kr = KERN_INVALID_ARGUMENT; } else { - uint64_t bit, byte; + uint64_t bit, byte; bit = atop_64( offset ); byte = bit >> 3; - if ( byte > uip->cs_valid_bitmap_size ) { - kr = KERN_INVALID_ARGUMENT; + if (byte > uip->cs_valid_bitmap_size) { + kr = KERN_INVALID_ARGUMENT; } else { - if (optype == CS_BITMAP_SET) { target_bitmap[byte] |= (1 << (bit & 07)); kr = KERN_SUCCESS; @@ -4237,7 +4340,7 @@ ubc_cs_check_validation_bitmap ( target_bitmap[byte] &= ~(1 << (bit & 07)); kr = KERN_SUCCESS; } else if (optype == CS_BITMAP_CHECK) { - if ( target_bitmap[byte] & (1 << (bit & 07))) { + if (target_bitmap[byte] & (1 << (bit & 07))) { kr = KERN_SUCCESS; } else { kr = KERN_FAILURE; @@ -4251,16 +4354,16 @@ ubc_cs_check_validation_bitmap ( void ubc_cs_validation_bitmap_deallocate( - vnode_t vp) + vnode_t vp) { struct ubc_info *uip; - void *target_bitmap; - vm_object_size_t bitmap_size; + void *target_bitmap; + vm_object_size_t bitmap_size; - if ( UBCINFOEXISTS(vp)) { + if (UBCINFOEXISTS(vp)) { uip = vp->v_ubcinfo; - if ( (target_bitmap = uip->cs_valid_bitmap) != NULL ) { + if ((target_bitmap = uip->cs_valid_bitmap) != NULL) { bitmap_size = uip->cs_valid_bitmap_size; kfree( target_bitmap, (vm_size_t) bitmap_size ); uip->cs_valid_bitmap = NULL; @@ -4268,19 +4371,24 @@ ubc_cs_validation_bitmap_deallocate( } } #else -kern_return_t ubc_cs_validation_bitmap_allocate(__unused vnode_t vp){ +kern_return_t +ubc_cs_validation_bitmap_allocate(__unused vnode_t vp) +{ return KERN_INVALID_ARGUMENT; } -kern_return_t ubc_cs_check_validation_bitmap( - __unused struct vnode *vp, +kern_return_t +ubc_cs_check_validation_bitmap( + __unused struct vnode *vp, __unused memory_object_offset_t offset, - __unused int optype){ - + __unused int optype) +{ return KERN_INVALID_ARGUMENT; } -void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){ +void +ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp) +{ return; } #endif /* CHECK_CS_VALIDATION_BITMAP */ @@ -4288,31 +4396,31 @@ void ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp){ #if PMAP_CS kern_return_t cs_associate_blob_with_mapping( - void *pmap, - vm_map_offset_t start, - vm_map_size_t size, - vm_object_offset_t offset, - void *blobs_p) + void *pmap, + vm_map_offset_t start, + vm_map_size_t size, + vm_object_offset_t offset, + void *blobs_p) { - off_t blob_start_offset, blob_end_offset; - kern_return_t kr; - struct cs_blob *blobs, *blob; - vm_offset_t kaddr; + off_t blob_start_offset, blob_end_offset; + kern_return_t kr; + struct cs_blob *blobs, *blob; + vm_offset_t kaddr; struct pmap_cs_code_directory *cd_entry = NULL; if (!pmap_cs) { return KERN_NOT_SUPPORTED; } - + blobs = (struct cs_blob *)blobs_p; for (blob = blobs; - blob != NULL; - blob = blob->csb_next) { + blob != NULL; + blob = blob->csb_next) { blob_start_offset = (blob->csb_base_offset + - blob->csb_start_offset); + blob->csb_start_offset); blob_end_offset = (blob->csb_base_offset + - blob->csb_end_offset); + blob->csb_end_offset); if ((off_t) offset < blob_start_offset || (off_t) offset >= blob_end_offset || (off_t) (offset + size) <= blob_start_offset || @@ -4334,9 +4442,9 @@ cs_associate_blob_with_mapping( if (cd_entry != NULL) { kr = pmap_cs_associate(pmap, - cd_entry, - start, - size); + cd_entry, + start, + size); } else { kr = KERN_CODESIGN_ERROR; } diff --git a/bsd/kern/uipc_domain.c b/bsd/kern/uipc_domain.c index 4433e81bd..6b798e832 100644 --- a/bsd/kern/uipc_domain.c +++ b/bsd/kern/uipc_domain.c @@ -95,14 +95,14 @@ static void detach_domain(struct domain *); static struct protosw *pffindprotonotype_locked(int, int, int); static struct domain *pffinddomain_locked(int); -static boolean_t domain_timeout_run; /* domain timer is scheduled to run */ +static boolean_t domain_timeout_run; /* domain timer is scheduled to run */ static boolean_t domain_draining; static void domain_sched_timeout(void); static void domain_timeout(void *); -lck_grp_t *domain_proto_mtx_grp; -lck_attr_t *domain_proto_mtx_attr; -static lck_grp_attr_t *domain_proto_mtx_grp_attr; +lck_grp_t *domain_proto_mtx_grp; +lck_attr_t *domain_proto_mtx_attr; +static lck_grp_attr_t *domain_proto_mtx_grp_attr; decl_lck_mtx_data(static, domain_proto_mtx); decl_lck_mtx_data(static, domain_timeout_mtx); @@ -115,9 +115,9 @@ SYSCTL_DECL(_kern_ipc); static int sysctl_do_drain_domains SYSCTL_HANDLER_ARGS; SYSCTL_PROC(_kern_ipc, OID_AUTO, do_drain_domains, - CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, - 0, 0, - sysctl_do_drain_domains, "I", "force manual drain domains"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, + sysctl_do_drain_domains, "I", "force manual drain domains"); #endif /* DEVELOPMENT || DEBUG */ @@ -128,8 +128,9 @@ pr_init_old(struct protosw *pp, struct domain *dp) VERIFY(pp->pr_flags & PR_OLD); VERIFY(pp->pr_old != NULL); - if (pp->pr_old->pr_init != NULL) + if (pp->pr_old->pr_init != NULL) { pp->pr_old->pr_init(); + } } static void @@ -139,8 +140,9 @@ init_proto(struct protosw *pp, struct domain *dp) if (!(pp->pr_flags & PR_INITIALIZED)) { TAILQ_INIT(&pp->pr_filter_head); - if (pp->pr_init != NULL) + if (pp->pr_init != NULL) { pp->pr_init(pp, dp); + } pp->pr_flags |= PR_INITIALIZED; } } @@ -182,8 +184,9 @@ dom_init_old(struct domain *dp) VERIFY(dp->dom_flags & DOM_OLD); VERIFY(dp->dom_old != NULL); - if (dp->dom_old->dom_init != NULL) + if (dp->dom_old->dom_init != NULL) { dp->dom_old->dom_init(); + } } static void @@ -196,19 +199,22 @@ init_domain(struct domain *dp) domain_proto_mtx_attr); dp->dom_mtx = &dp->dom_mtx_s; TAILQ_INIT(&dp->dom_protosw); - if (dp->dom_init != NULL) + if (dp->dom_init != NULL) { dp->dom_init(dp); + } dp->dom_flags |= DOM_INITIALIZED; } /* Recompute for new protocol */ - if (_max_linkhdr < 16) /* XXX - Sheesh; everything's ether? */ + if (_max_linkhdr < 16) { /* XXX - Sheesh; everything's ether? */ _max_linkhdr = 16; - _max_linkhdr = max_linkhdr; /* round it up */ + } + _max_linkhdr = max_linkhdr; /* round it up */ - if (dp->dom_protohdrlen > _max_protohdr) + if (dp->dom_protohdrlen > _max_protohdr) { _max_protohdr = dp->dom_protohdrlen; - _max_protohdr = max_protohdr; /* round it up */ + } + _max_protohdr = max_protohdr; /* round it up */ max_hdr = max_linkhdr + max_protohdr; max_datalen = MHLEN - max_hdr; @@ -283,7 +289,7 @@ net_add_domain_old(struct domain_old *odp) /* NOTREACHED */ } - dp = _MALLOC(sizeof (*dp), M_TEMP, M_WAITOK | M_ZERO); + dp = _MALLOC(sizeof(*dp), M_TEMP, M_WAITOK | M_ZERO); if (dp == NULL) { /* * There is really nothing better than to panic here, @@ -296,23 +302,23 @@ net_add_domain_old(struct domain_old *odp) } /* Copy everything but dom_init, dom_mtx, dom_next and dom_refs */ - dp->dom_family = odp->dom_family; - dp->dom_flags = (odp->dom_flags & DOMF_USERFLAGS) | DOM_OLD; - dp->dom_name = odp->dom_name; - dp->dom_init = dom_init_old; - dp->dom_externalize = odp->dom_externalize; - dp->dom_dispose = odp->dom_dispose; - dp->dom_rtattach = odp->dom_rtattach; - dp->dom_rtoffset = odp->dom_rtoffset; - dp->dom_maxrtkey = odp->dom_maxrtkey; - dp->dom_protohdrlen = odp->dom_protohdrlen; - dp->dom_old = odp; + dp->dom_family = odp->dom_family; + dp->dom_flags = (odp->dom_flags & DOMF_USERFLAGS) | DOM_OLD; + dp->dom_name = odp->dom_name; + dp->dom_init = dom_init_old; + dp->dom_externalize = odp->dom_externalize; + dp->dom_dispose = odp->dom_dispose; + dp->dom_rtattach = odp->dom_rtattach; + dp->dom_rtoffset = odp->dom_rtoffset; + dp->dom_maxrtkey = odp->dom_maxrtkey; + dp->dom_protohdrlen = odp->dom_protohdrlen; + dp->dom_old = odp; attach_domain(dp); init_domain(dp); /* Point the mutex back to the internal structure's */ - odp->dom_mtx = dp->dom_mtx; + odp->dom_mtx = dp->dom_mtx; domain_guard_release(guard); } @@ -335,11 +341,13 @@ net_del_domain_old(struct domain_old *odp) } TAILQ_FOREACH_SAFE(dp1, &domains, dom_entry, dp2) { - if (!(dp1->dom_flags & DOM_OLD)) + if (!(dp1->dom_flags & DOM_OLD)) { continue; + } VERIFY(dp1->dom_old != NULL); - if (odp == dp1->dom_old) + if (odp == dp1->dom_old) { break; + } } if (dp1 != NULL) { struct protosw *pp1, *pp2; @@ -350,10 +358,12 @@ net_del_domain_old(struct domain_old *odp) /* Remove all protocols attached to this domain */ TAILQ_FOREACH_SAFE(pp1, &dp1->dom_protosw, pr_entry, pp2) { detach_proto(pp1, dp1); - if (pp1->pr_usrreqs->pru_flags & PRUF_OLD) + if (pp1->pr_usrreqs->pru_flags & PRUF_OLD) { FREE(pp1->pr_usrreqs, M_TEMP); - if (pp1->pr_flags & PR_OLD) + } + if (pp1->pr_flags & PR_OLD) { FREE(pp1, M_TEMP); + } } detach_domain(dp1); @@ -363,7 +373,7 @@ net_del_domain_old(struct domain_old *odp) } done: domain_guard_release(guard); - return (error); + return error; } /* @@ -400,15 +410,17 @@ net_add_proto(struct protosw *pp, struct domain *dp, int doinit) TAILQ_FOREACH(pp1, &dp->dom_protosw, pr_entry) { if (pp1->pr_type == pp->pr_type && - pp1->pr_protocol == pp->pr_protocol) - return (EEXIST); + pp1->pr_protocol == pp->pr_protocol) { + return EEXIST; + } } attach_proto(pp, dp); - if (doinit) + if (doinit) { net_init_proto(pp, dp); + } - return (0); + return 0; } void @@ -446,10 +458,12 @@ net_add_proto_old(struct protosw_old *opp, struct domain_old *odp) /* Make sure the domain has been added via net_add_domain */ TAILQ_FOREACH(dp, &domains, dom_entry) { - if (!(dp->dom_flags & DOM_OLD)) + if (!(dp->dom_flags & DOM_OLD)) { continue; - if (dp->dom_old == odp) + } + if (dp->dom_old == odp) { break; + } } if (dp == NULL) { error = EINVAL; @@ -470,35 +484,35 @@ net_add_proto_old(struct protosw_old *opp, struct domain_old *odp) /* NOTREACHED */ } - pru = _MALLOC(sizeof (*pru), M_TEMP, M_WAITOK | M_ZERO); + pru = _MALLOC(sizeof(*pru), M_TEMP, M_WAITOK | M_ZERO); if (pru == NULL) { error = ENOMEM; goto done; } - pru->pru_flags = PRUF_OLD; - pru->pru_abort = opru->pru_abort; - pru->pru_accept = opru->pru_accept; - pru->pru_attach = opru->pru_attach; - pru->pru_bind = opru->pru_bind; - pru->pru_connect = opru->pru_connect; - pru->pru_connect2 = opru->pru_connect2; - pru->pru_control = opru->pru_control; - pru->pru_detach = opru->pru_detach; - pru->pru_disconnect = opru->pru_disconnect; - pru->pru_listen = opru->pru_listen; - pru->pru_peeraddr = opru->pru_peeraddr; - pru->pru_rcvd = opru->pru_rcvd; - pru->pru_rcvoob = opru->pru_rcvoob; - pru->pru_send = opru->pru_send; - pru->pru_sense = opru->pru_sense; - pru->pru_shutdown = opru->pru_shutdown; - pru->pru_sockaddr = opru->pru_sockaddr; - pru->pru_sosend = opru->pru_sosend; - pru->pru_soreceive = opru->pru_soreceive; - pru->pru_sopoll = opru->pru_sopoll; - - pp = _MALLOC(sizeof (*pp), M_TEMP, M_WAITOK | M_ZERO); + pru->pru_flags = PRUF_OLD; + pru->pru_abort = opru->pru_abort; + pru->pru_accept = opru->pru_accept; + pru->pru_attach = opru->pru_attach; + pru->pru_bind = opru->pru_bind; + pru->pru_connect = opru->pru_connect; + pru->pru_connect2 = opru->pru_connect2; + pru->pru_control = opru->pru_control; + pru->pru_detach = opru->pru_detach; + pru->pru_disconnect = opru->pru_disconnect; + pru->pru_listen = opru->pru_listen; + pru->pru_peeraddr = opru->pru_peeraddr; + pru->pru_rcvd = opru->pru_rcvd; + pru->pru_rcvoob = opru->pru_rcvoob; + pru->pru_send = opru->pru_send; + pru->pru_sense = opru->pru_sense; + pru->pru_shutdown = opru->pru_shutdown; + pru->pru_sockaddr = opru->pru_sockaddr; + pru->pru_sosend = opru->pru_sosend; + pru->pru_soreceive = opru->pru_soreceive; + pru->pru_sopoll = opru->pru_sopoll; + + pp = _MALLOC(sizeof(*pp), M_TEMP, M_WAITOK | M_ZERO); if (pp == NULL) { error = ENOMEM; goto done; @@ -519,21 +533,21 @@ net_add_proto_old(struct protosw_old *opp, struct domain_old *odp) } /* Copy everything but pr_init, pr_next, pr_domain, pr_protosw */ - pp->pr_type = opp->pr_type; - pp->pr_protocol = opp->pr_protocol; - pp->pr_flags = (opp->pr_flags & PRF_USERFLAGS) | PR_OLD; - pp->pr_input = opp->pr_input; - pp->pr_output = opp->pr_output; - pp->pr_ctlinput = opp->pr_ctlinput; - pp->pr_ctloutput = opp->pr_ctloutput; - pp->pr_usrreqs = pru; - pp->pr_init = pr_init_old; - pp->pr_drain = opp->pr_drain; - pp->pr_sysctl = opp->pr_sysctl; - pp->pr_lock = opp->pr_lock; - pp->pr_unlock = opp->pr_unlock; - pp->pr_getlock = opp->pr_getlock; - pp->pr_old = opp; + pp->pr_type = opp->pr_type; + pp->pr_protocol = opp->pr_protocol; + pp->pr_flags = (opp->pr_flags & PRF_USERFLAGS) | PR_OLD; + pp->pr_input = opp->pr_input; + pp->pr_output = opp->pr_output; + pp->pr_ctlinput = opp->pr_ctlinput; + pp->pr_ctloutput = opp->pr_ctloutput; + pp->pr_usrreqs = pru; + pp->pr_init = pr_init_old; + pp->pr_drain = opp->pr_drain; + pp->pr_sysctl = opp->pr_sysctl; + pp->pr_lock = opp->pr_lock; + pp->pr_unlock = opp->pr_unlock; + pp->pr_getlock = opp->pr_getlock; + pp->pr_old = opp; /* attach as well as initialize */ attach_proto(pp, dp); @@ -544,14 +558,16 @@ done: "error %d\n", __func__, odp->dom_family, odp->dom_name, opp->pr_protocol, error); - if (pru != NULL) + if (pru != NULL) { FREE(pru, M_TEMP); - if (pp != NULL) + } + if (pp != NULL) { FREE(pp, M_TEMP); + } } domain_guard_release(guard); - return (error); + return error; } /* @@ -575,19 +591,23 @@ net_del_proto(int type, int protocol, struct domain *dp) domain_proto_mtx_lock_assert_held(); TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if (pp->pr_type == type && pp->pr_protocol == protocol) + if (pp->pr_type == type && pp->pr_protocol == protocol) { break; + } + } + if (pp == NULL) { + return ENXIO; } - if (pp == NULL) - return (ENXIO); detach_proto(pp, dp); - if (pp->pr_usrreqs->pru_flags & PRUF_OLD) + if (pp->pr_usrreqs->pru_flags & PRUF_OLD) { FREE(pp->pr_usrreqs, M_TEMP); - if (pp->pr_flags & PR_OLD) + } + if (pp->pr_flags & PR_OLD) { FREE(pp, M_TEMP); + } - return (0); + return 0; } /* @@ -609,10 +629,12 @@ net_del_proto_old(int type, int protocol, struct domain_old *odp) /* Make sure the domain has been added via net_add_domain */ TAILQ_FOREACH(dp, &domains, dom_entry) { - if (!(dp->dom_flags & DOM_OLD)) + if (!(dp->dom_flags & DOM_OLD)) { continue; - if (dp->dom_old == odp) + } + if (dp->dom_old == odp) { break; + } } if (dp == NULL) { error = ENXIO; @@ -620,22 +642,25 @@ net_del_proto_old(int type, int protocol, struct domain_old *odp) } TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if (pp->pr_type == type && pp->pr_protocol == protocol) + if (pp->pr_type == type && pp->pr_protocol == protocol) { break; + } } if (pp == NULL) { error = ENXIO; goto done; } detach_proto(pp, dp); - if (pp->pr_usrreqs->pru_flags & PRUF_OLD) + if (pp->pr_usrreqs->pru_flags & PRUF_OLD) { FREE(pp->pr_usrreqs, M_TEMP); - if (pp->pr_flags & PR_OLD) + } + if (pp->pr_flags & PR_OLD) { FREE(pp, M_TEMP); + } done: domain_guard_release(guard); - return (error); + return error; } static void @@ -688,8 +713,9 @@ domain_timeout(void *arg) guard = domain_guard_deploy(); TAILQ_FOREACH(dp, &domains, dom_entry) { TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if (pp->pr_drain != NULL) + if (pp->pr_drain != NULL) { (*pp->pr_drain)(); + } } } domain_guard_release(guard); @@ -749,14 +775,14 @@ domaininit(void) attach_domain(&keydomain_s); #endif /* IPSEC */ attach_domain(&ndrvdomain_s); - attach_domain(&routedomain_s); /* must be last domain */ + attach_domain(&routedomain_s); /* must be last domain */ /* * Now ask them all to init (XXX including the routing domain, * see above) */ TAILQ_FOREACH(dp, &domains, dom_entry) - init_domain(dp); + init_domain(dp); domain_guard_release(guard); } @@ -769,10 +795,11 @@ pffinddomain_locked(int pf) domain_proto_mtx_lock_assert_held(); TAILQ_FOREACH(dp, &domains, dom_entry) { - if (dp->dom_family == pf) + if (dp->dom_family == pf) { break; + } } - return (dp); + return dp; } struct protosw * @@ -783,16 +810,18 @@ pffindtype(int family, int type) domain_guard_t guard; guard = domain_guard_deploy(); - if ((dp = pffinddomain_locked(family)) == NULL) + if ((dp = pffinddomain_locked(family)) == NULL) { goto done; + } TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if (pp->pr_type != 0 && pp->pr_type == type) + if (pp->pr_type != 0 && pp->pr_type == type) { goto done; + } } done: domain_guard_release(guard); - return (pp); + return pp; } /* @@ -807,7 +836,7 @@ pffinddomain(int pf) guard = domain_guard_deploy(); dp = pffinddomain_locked(pf); domain_guard_release(guard); - return (dp); + return dp; } /* @@ -821,10 +850,11 @@ pffinddomain_old(int pf) domain_guard_t guard; guard = domain_guard_deploy(); - if ((dp = pffinddomain_locked(pf)) != NULL && (dp->dom_flags & DOM_OLD)) + if ((dp = pffinddomain_locked(pf)) != NULL && (dp->dom_flags & DOM_OLD)) { odp = dp->dom_old; + } domain_guard_release(guard); - return (odp); + return odp; } /* @@ -839,7 +869,7 @@ pffindproto(int family, int protocol, int type) guard = domain_guard_deploy(); pp = pffindproto_locked(family, protocol, type); domain_guard_release(guard); - return (pp); + return pp; } struct protosw * @@ -851,22 +881,26 @@ pffindproto_locked(int family, int protocol, int type) domain_proto_mtx_lock_assert_held(); - if (family == 0) - return (0); + if (family == 0) { + return 0; + } dp = pffinddomain_locked(family); - if (dp == NULL) - return (NULL); + if (dp == NULL) { + return NULL; + } TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if ((pp->pr_protocol == protocol) && (pp->pr_type == type)) - return (pp); + if ((pp->pr_protocol == protocol) && (pp->pr_type == type)) { + return pp; + } if (type == SOCK_RAW && pp->pr_type == SOCK_RAW && - pp->pr_protocol == 0 && maybe == NULL) + pp->pr_protocol == 0 && maybe == NULL) { maybe = pp; + } } - return (maybe); + return maybe; } /* @@ -881,10 +915,11 @@ pffindproto_old(int family, int protocol, int type) guard = domain_guard_deploy(); if ((pp = pffindproto_locked(family, protocol, type)) != NULL && - (pp->pr_flags & PR_OLD)) + (pp->pr_flags & PR_OLD)) { opr = pp->pr_old; + } domain_guard_release(guard); - return (opr); + return opr; } static struct protosw * @@ -896,18 +931,21 @@ pffindprotonotype_locked(int family, int protocol, int type) domain_proto_mtx_lock_assert_held(); - if (family == 0) - return (0); + if (family == 0) { + return 0; + } dp = pffinddomain_locked(family); - if (dp == NULL) - return (NULL); + if (dp == NULL) { + return NULL; + } TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if (pp->pr_protocol == protocol) - return (pp); + if (pp->pr_protocol == protocol) { + return pp; + } } - return (NULL); + return NULL; } struct protosw * @@ -916,13 +954,14 @@ pffindprotonotype(int family, int protocol) struct protosw *pp; domain_guard_t guard; - if (protocol == 0) - return (NULL); + if (protocol == 0) { + return NULL; + } guard = domain_guard_deploy(); pp = pffindprotonotype_locked(family, protocol, 0); domain_guard_release(guard); - return (pp); + return pp; } void @@ -938,14 +977,16 @@ pfctlinput2(int cmd, struct sockaddr *sa, void *ctlparam) struct protosw *pp; domain_guard_t guard; - if (sa == NULL) + if (sa == NULL) { return; + } guard = domain_guard_deploy(); TAILQ_FOREACH(dp, &domains, dom_entry) { TAILQ_FOREACH(pp, &dp->dom_protosw, pr_entry) { - if (pp->pr_ctlinput != NULL) + if (pp->pr_ctlinput != NULL) { (*pp->pr_ctlinput)(cmd, sa, ctlparam, NULL); + } } } domain_guard_release(guard); @@ -959,8 +1000,9 @@ net_update_uptime_with_time(const struct timeval *tvp) * Round up the timer to the nearest integer value because otherwise * we might setup networking timers that are off by almost 1 second. */ - if (tvp->tv_usec > 500000) + if (tvp->tv_usec > 500000) { _net_uptime++; + } } void @@ -979,8 +1021,9 @@ net_update_uptime(void) void net_uptime2timeval(struct timeval *tv) { - if (tv == NULL) + if (tv == NULL) { return; + } tv->tv_usec = 0; tv->tv_sec = net_uptime(); @@ -994,10 +1037,11 @@ net_uptime2timeval(struct timeval *tv) u_int64_t net_uptime(void) { - if (_net_uptime == 0) + if (_net_uptime == 0) { net_update_uptime(); + } - return (_net_uptime); + return _net_uptime; } void @@ -1021,11 +1065,11 @@ domain_guard_deploy(void) if (marks != net_thread_marks_none) { LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(&domain_proto_mtx); - } - else + } else { LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_OWNED); + } - return ((domain_guard_t)(const void*)marks); + return (domain_guard_t)(const void*)marks; } void @@ -1037,9 +1081,9 @@ domain_guard_release(domain_guard_t guard) LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_OWNED); lck_mtx_unlock(&domain_proto_mtx); net_thread_marks_pop(marks); - } - else + } else { LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); + } } domain_unguard_t @@ -1051,11 +1095,11 @@ domain_unguard_deploy(void) if (marks != net_thread_marks_none) { LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_OWNED); lck_mtx_unlock(&domain_proto_mtx); - } - else + } else { LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); + } - return ((domain_unguard_t)(const void*)marks); + return (domain_unguard_t)(const void*)marks; } void @@ -1067,14 +1111,14 @@ domain_unguard_release(domain_unguard_t unguard) LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(&domain_proto_mtx); net_thread_unmarks_pop(marks); - } - else + } else { LCK_MTX_ASSERT(&domain_proto_mtx, LCK_MTX_ASSERT_OWNED); + } } #if (DEVELOPMENT || DEBUG) - + static int sysctl_do_drain_domains SYSCTL_HANDLER_ARGS { @@ -1082,13 +1126,14 @@ sysctl_do_drain_domains SYSCTL_HANDLER_ARGS int error; int dummy = 0; - error = sysctl_handle_int(oidp, &dummy, 0, req); - if (error || req->newptr == USER_ADDR_NULL) - return (error); + error = sysctl_handle_int(oidp, &dummy, 0, req); + if (error || req->newptr == USER_ADDR_NULL) { + return error; + } net_drain_domains(); - return (0); + return 0; } #endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/kern/uipc_mbuf.c b/bsd/kern/uipc_mbuf.c index f33335a38..c5b86b7b8 100644 --- a/bsd/kern/uipc_mbuf.c +++ b/bsd/kern/uipc_mbuf.c @@ -307,7 +307,7 @@ /* kernel translater */ extern vm_offset_t kmem_mb_alloc(vm_map_t, int, int, kern_return_t *); extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); -extern vm_map_t mb_map; /* special map */ +extern vm_map_t mb_map; /* special map */ static uint32_t mb_kmem_contig_failed; static uint32_t mb_kmem_failed; @@ -320,11 +320,11 @@ static uint64_t mb_kmem_contig_failed_size; static uint64_t mb_kmem_failed_size; static uint32_t mb_kmem_stats[6]; static const char *mb_kmem_stats_labels[] = { "INVALID_ARGUMENT", - "INVALID_ADDRESS", - "RESOURCE_SHORTAGE", - "NO_SPACE", - "KERN_FAILURE", - "OTHERS" }; + "INVALID_ADDRESS", + "RESOURCE_SHORTAGE", + "NO_SPACE", + "KERN_FAILURE", + "OTHERS" }; /* Global lock */ decl_lck_mtx_data(static, mbuf_mlock_data); @@ -345,42 +345,42 @@ static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */ static uint32_t mbuf_worker_run_cnt; static uint64_t mbuf_worker_last_runtime; static uint64_t mbuf_drain_last_runtime; -static int mbuf_worker_ready; /* worker thread is runnable */ -static int ncpu; /* number of CPUs */ -static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */ -static ppnum_t mcl_pages; /* Size of array (# physical pages) */ -static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ -static mcache_t *ref_cache; /* Cache of cluster reference & flags */ +static int mbuf_worker_ready; /* worker thread is runnable */ +static int ncpu; /* number of CPUs */ +static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */ +static ppnum_t mcl_pages; /* Size of array (# physical pages) */ +static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */ +static mcache_t *ref_cache; /* Cache of cluster reference & flags */ static mcache_t *mcl_audit_con_cache; /* Audit contents cache */ -static unsigned int mbuf_debug; /* patchable mbuf mcache flags */ +static unsigned int mbuf_debug; /* patchable mbuf mcache flags */ static unsigned int mb_normalized; /* number of packets "normalized" */ -#define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */ -#define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */ +#define MB_GROWTH_AGGRESSIVE 1 /* Threshold: 1/2 of total */ +#define MB_GROWTH_NORMAL 2 /* Threshold: 3/4 of total */ typedef enum { - MC_MBUF = 0, /* Regular mbuf */ - MC_CL, /* Cluster */ - MC_BIGCL, /* Large (4KB) cluster */ - MC_16KCL, /* Jumbo (16KB) cluster */ - MC_MBUF_CL, /* mbuf + cluster */ - MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */ - MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */ + MC_MBUF = 0, /* Regular mbuf */ + MC_CL, /* Cluster */ + MC_BIGCL, /* Large (4KB) cluster */ + MC_16KCL, /* Jumbo (16KB) cluster */ + MC_MBUF_CL, /* mbuf + cluster */ + MC_MBUF_BIGCL, /* mbuf + large (4KB) cluster */ + MC_MBUF_16KCL /* mbuf + jumbo (16KB) cluster */ } mbuf_class_t; -#define MBUF_CLASS_MIN MC_MBUF -#define MBUF_CLASS_MAX MC_MBUF_16KCL -#define MBUF_CLASS_LAST MC_16KCL -#define MBUF_CLASS_VALID(c) \ +#define MBUF_CLASS_MIN MC_MBUF +#define MBUF_CLASS_MAX MC_MBUF_16KCL +#define MBUF_CLASS_LAST MC_16KCL +#define MBUF_CLASS_VALID(c) \ ((int)(c) >= MBUF_CLASS_MIN && (int)(c) <= MBUF_CLASS_MAX) -#define MBUF_CLASS_COMPOSITE(c) \ +#define MBUF_CLASS_COMPOSITE(c) \ ((int)(c) > MBUF_CLASS_LAST) /* * mbuf specific mcache allocation request flags. */ -#define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */ +#define MCR_COMP MCR_USR1 /* for MC_MBUF_{CL,BIGCL,16KCL} caches */ /* * Per-cluster slab structure. @@ -408,20 +408,20 @@ typedef enum { * Each slab controls a page of memory. */ typedef struct mcl_slab { - struct mcl_slab *sl_next; /* neighboring slab */ - u_int8_t sl_class; /* controlling mbuf class */ - int8_t sl_refcnt; /* outstanding allocations */ - int8_t sl_chunks; /* chunks (bufs) in this slab */ - u_int16_t sl_flags; /* slab flags (see below) */ - u_int16_t sl_len; /* slab length */ - void *sl_base; /* base of allocated memory */ - void *sl_head; /* first free buffer */ - TAILQ_ENTRY(mcl_slab) sl_link; /* next/prev slab on freelist */ + struct mcl_slab *sl_next; /* neighboring slab */ + u_int8_t sl_class; /* controlling mbuf class */ + int8_t sl_refcnt; /* outstanding allocations */ + int8_t sl_chunks; /* chunks (bufs) in this slab */ + u_int16_t sl_flags; /* slab flags (see below) */ + u_int16_t sl_len; /* slab length */ + void *sl_base; /* base of allocated memory */ + void *sl_head; /* first free buffer */ + TAILQ_ENTRY(mcl_slab) sl_link; /* next/prev slab on freelist */ } mcl_slab_t; -#define SLF_MAPPED 0x0001 /* backed by a mapped page */ -#define SLF_PARTIAL 0x0002 /* part of another slab */ -#define SLF_DETACHED 0x0004 /* not in slab freelist */ +#define SLF_MAPPED 0x0001 /* backed by a mapped page */ +#define SLF_PARTIAL 0x0002 /* part of another slab */ +#define SLF_DETACHED 0x0004 /* not in slab freelist */ /* * The array of slabs are broken into groups of arrays per 1MB of kernel @@ -429,33 +429,33 @@ typedef struct mcl_slab { * whenever a new piece of memory mapped in from the VM crosses the 1MB * boundary. */ -#define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT) +#define NSLABSPMB ((1 << MBSHIFT) >> PAGE_SHIFT) typedef struct mcl_slabg { - mcl_slab_t *slg_slab; /* group of slabs */ + mcl_slab_t *slg_slab; /* group of slabs */ } mcl_slabg_t; /* * Number of slabs needed to control a 16KB cluster object. */ -#define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT) +#define NSLABSP16KB (M16KCLBYTES >> PAGE_SHIFT) /* * Per-cluster audit structure. */ typedef struct { - mcache_audit_t **cl_audit; /* array of audits */ + mcache_audit_t **cl_audit; /* array of audits */ } mcl_audit_t; typedef struct { - struct thread *msa_thread; /* thread doing transaction */ - struct thread *msa_pthread; /* previous transaction thread */ - uint32_t msa_tstamp; /* transaction timestamp (ms) */ - uint32_t msa_ptstamp; /* prev transaction timestamp (ms) */ - uint16_t msa_depth; /* pc stack depth */ - uint16_t msa_pdepth; /* previous transaction pc stack */ - void *msa_stack[MCACHE_STACK_DEPTH]; - void *msa_pstack[MCACHE_STACK_DEPTH]; + struct thread *msa_thread; /* thread doing transaction */ + struct thread *msa_pthread; /* previous transaction thread */ + uint32_t msa_tstamp; /* transaction timestamp (ms) */ + uint32_t msa_ptstamp; /* prev transaction timestamp (ms) */ + uint16_t msa_depth; /* pc stack depth */ + uint16_t msa_pdepth; /* previous transaction pc stack */ + void *msa_stack[MCACHE_STACK_DEPTH]; + void *msa_pstack[MCACHE_STACK_DEPTH]; } mcl_scratch_audit_t; typedef struct { @@ -469,103 +469,103 @@ typedef struct { * cluster cache case). Note that we don't save the contents of * clusters when they are freed; we simply pattern-fill them. */ - u_int8_t sc_mbuf[(MSIZE - _MHLEN) + sizeof (_m_ext_t)]; - mcl_scratch_audit_t sc_scratch __attribute__((aligned(8))); + u_int8_t sc_mbuf[(MSIZE - _MHLEN) + sizeof(_m_ext_t)]; + mcl_scratch_audit_t sc_scratch __attribute__((aligned(8))); } mcl_saved_contents_t; -#define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t)) +#define AUDIT_CONTENTS_SIZE (sizeof (mcl_saved_contents_t)) -#define MCA_SAVED_MBUF_PTR(_mca) \ - ((struct mbuf *)(void *)((mcl_saved_contents_t *) \ +#define MCA_SAVED_MBUF_PTR(_mca) \ + ((struct mbuf *)(void *)((mcl_saved_contents_t *) \ (_mca)->mca_contents)->sc_mbuf) -#define MCA_SAVED_MBUF_SIZE \ +#define MCA_SAVED_MBUF_SIZE \ (sizeof (((mcl_saved_contents_t *)0)->sc_mbuf)) -#define MCA_SAVED_SCRATCH_PTR(_mca) \ +#define MCA_SAVED_SCRATCH_PTR(_mca) \ (&((mcl_saved_contents_t *)(_mca)->mca_contents)->sc_scratch) /* * mbuf specific mcache audit flags */ -#define MB_INUSE 0x01 /* object has not been returned to slab */ -#define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */ -#define MB_SCVALID 0x04 /* object has valid saved contents */ +#define MB_INUSE 0x01 /* object has not been returned to slab */ +#define MB_COMP_INUSE 0x02 /* object has not been returned to cslab */ +#define MB_SCVALID 0x04 /* object has valid saved contents */ /* * Each of the following two arrays hold up to nmbclusters elements. */ -static mcl_audit_t *mclaudit; /* array of cluster audit information */ -static unsigned int maxclaudit; /* max # of entries in audit table */ -static mcl_slabg_t **slabstbl; /* cluster slabs table */ -static unsigned int maxslabgrp; /* max # of entries in slabs table */ -static unsigned int slabgrp; /* # of entries in slabs table */ +static mcl_audit_t *mclaudit; /* array of cluster audit information */ +static unsigned int maxclaudit; /* max # of entries in audit table */ +static mcl_slabg_t **slabstbl; /* cluster slabs table */ +static unsigned int maxslabgrp; /* max # of entries in slabs table */ +static unsigned int slabgrp; /* # of entries in slabs table */ /* Globals */ -int nclusters; /* # of clusters for non-jumbo (legacy) sizes */ -int njcl; /* # of clusters for jumbo sizes */ -int njclbytes; /* size of a jumbo cluster */ -unsigned char *mbutl; /* first mapped cluster address */ -unsigned char *embutl; /* ending virtual address of mclusters */ -int _max_linkhdr; /* largest link-level header */ -int _max_protohdr; /* largest protocol header */ -int max_hdr; /* largest link+protocol header */ -int max_datalen; /* MHLEN - max_hdr */ - -static boolean_t mclverify; /* debug: pattern-checking */ -static boolean_t mcltrace; /* debug: stack tracing */ -static boolean_t mclfindleak; /* debug: leak detection */ -static boolean_t mclexpleak; /* debug: expose leak info to user space */ - -static struct timeval mb_start; /* beginning of time */ +int nclusters; /* # of clusters for non-jumbo (legacy) sizes */ +int njcl; /* # of clusters for jumbo sizes */ +int njclbytes; /* size of a jumbo cluster */ +unsigned char *mbutl; /* first mapped cluster address */ +unsigned char *embutl; /* ending virtual address of mclusters */ +int _max_linkhdr; /* largest link-level header */ +int _max_protohdr; /* largest protocol header */ +int max_hdr; /* largest link+protocol header */ +int max_datalen; /* MHLEN - max_hdr */ + +static boolean_t mclverify; /* debug: pattern-checking */ +static boolean_t mcltrace; /* debug: stack tracing */ +static boolean_t mclfindleak; /* debug: leak detection */ +static boolean_t mclexpleak; /* debug: expose leak info to user space */ + +static struct timeval mb_start; /* beginning of time */ /* mbuf leak detection variables */ static struct mleak_table mleak_table; static mleak_stat_t *mleak_stat; -#define MLEAK_STAT_SIZE(n) \ +#define MLEAK_STAT_SIZE(n) \ __builtin_offsetof(mleak_stat_t, ml_trace[n]) struct mallocation { - mcache_obj_t *element; /* the alloc'ed element, NULL if unused */ - u_int32_t trace_index; /* mtrace index for corresponding backtrace */ - u_int32_t count; /* How many objects were requested */ - u_int64_t hitcount; /* for determining hash effectiveness */ + mcache_obj_t *element; /* the alloc'ed element, NULL if unused */ + u_int32_t trace_index; /* mtrace index for corresponding backtrace */ + u_int32_t count; /* How many objects were requested */ + u_int64_t hitcount; /* for determining hash effectiveness */ }; struct mtrace { - u_int64_t collisions; - u_int64_t hitcount; - u_int64_t allocs; - u_int64_t depth; - uintptr_t addr[MLEAK_STACK_DEPTH]; + u_int64_t collisions; + u_int64_t hitcount; + u_int64_t allocs; + u_int64_t depth; + uintptr_t addr[MLEAK_STACK_DEPTH]; }; /* Size must be a power of two for the zhash to be able to just mask off bits */ -#define MLEAK_ALLOCATION_MAP_NUM 512 -#define MLEAK_TRACE_MAP_NUM 256 +#define MLEAK_ALLOCATION_MAP_NUM 512 +#define MLEAK_TRACE_MAP_NUM 256 /* * Sample factor for how often to record a trace. This is overwritable * by the boot-arg mleak_sample_factor. */ -#define MLEAK_SAMPLE_FACTOR 500 +#define MLEAK_SAMPLE_FACTOR 500 /* * Number of top leakers recorded. */ -#define MLEAK_NUM_TRACES 5 +#define MLEAK_NUM_TRACES 5 -#define MB_LEAK_SPACING_64 " " +#define MB_LEAK_SPACING_64 " " #define MB_LEAK_SPACING_32 " " -#define MB_LEAK_HDR_32 "\n\ +#define MB_LEAK_HDR_32 "\n\ trace [1] trace [2] trace [3] trace [4] trace [5] \n\ ---------- ---------- ---------- ---------- ---------- \n\ " -#define MB_LEAK_HDR_64 "\n\ +#define MB_LEAK_HDR_64 "\n\ trace [1] trace [2] trace [3] \ - trace [4] trace [5] \n\ + trace [4] trace [5] \n\ ------------------ ------------------ ------------------ \ ------------------ ------------------ \n\ " @@ -587,12 +587,12 @@ static lck_grp_attr_t *mleak_lock_grp_attr; /* *Failed* large allocations. */ struct mtracelarge { - uint64_t size; - uint64_t depth; - uintptr_t addr[MLEAK_STACK_DEPTH]; + uint64_t size; + uint64_t depth; + uintptr_t addr[MLEAK_STACK_DEPTH]; }; -#define MTRACELARGE_NUM_TRACES 5 +#define MTRACELARGE_NUM_TRACES 5 static struct mtracelarge mtracelarge_table[MTRACELARGE_NUM_TRACES]; static void mtracelarge_register(size_t size); @@ -607,51 +607,51 @@ lck_rw_t *mbuf_tx_compl_tbl_lock = &mbuf_tx_compl_tbl_lck_rw_data; extern u_int32_t high_sb_max; /* The minimum number of objects that are allocated, to start. */ -#define MINCL 32 -#define MINBIGCL (MINCL >> 1) -#define MIN16KCL (MINCL >> 2) +#define MINCL 32 +#define MINBIGCL (MINCL >> 1) +#define MIN16KCL (MINCL >> 2) /* Low watermarks (only map in pages once free counts go below) */ -#define MBIGCL_LOWAT MINBIGCL -#define M16KCL_LOWAT MIN16KCL +#define MBIGCL_LOWAT MINBIGCL +#define M16KCL_LOWAT MIN16KCL typedef struct { - mbuf_class_t mtbl_class; /* class type */ - mcache_t *mtbl_cache; /* mcache for this buffer class */ + mbuf_class_t mtbl_class; /* class type */ + mcache_t *mtbl_cache; /* mcache for this buffer class */ TAILQ_HEAD(mcl_slhead, mcl_slab) mtbl_slablist; /* slab list */ - mcache_obj_t *mtbl_cobjlist; /* composite objects freelist */ - mb_class_stat_t *mtbl_stats; /* statistics fetchable via sysctl */ - u_int32_t mtbl_maxsize; /* maximum buffer size */ - int mtbl_minlimit; /* minimum allowed */ - int mtbl_maxlimit; /* maximum allowed */ - u_int32_t mtbl_wantpurge; /* purge during next reclaim */ - uint32_t mtbl_avgtotal; /* average total on iOS */ - u_int32_t mtbl_expand; /* worker should expand the class */ + mcache_obj_t *mtbl_cobjlist; /* composite objects freelist */ + mb_class_stat_t *mtbl_stats; /* statistics fetchable via sysctl */ + u_int32_t mtbl_maxsize; /* maximum buffer size */ + int mtbl_minlimit; /* minimum allowed */ + int mtbl_maxlimit; /* maximum allowed */ + u_int32_t mtbl_wantpurge; /* purge during next reclaim */ + uint32_t mtbl_avgtotal; /* average total on iOS */ + u_int32_t mtbl_expand; /* worker should expand the class */ } mbuf_table_t; -#define m_class(c) mbuf_table[c].mtbl_class -#define m_cache(c) mbuf_table[c].mtbl_cache -#define m_slablist(c) mbuf_table[c].mtbl_slablist -#define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist -#define m_maxsize(c) mbuf_table[c].mtbl_maxsize -#define m_minlimit(c) mbuf_table[c].mtbl_minlimit -#define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit -#define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge -#define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname -#define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size -#define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total -#define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active -#define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree -#define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt -#define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt -#define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt -#define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified -#define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt -#define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt -#define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal -#define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported -#define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt -#define m_region_expand(c) mbuf_table[c].mtbl_expand +#define m_class(c) mbuf_table[c].mtbl_class +#define m_cache(c) mbuf_table[c].mtbl_cache +#define m_slablist(c) mbuf_table[c].mtbl_slablist +#define m_cobjlist(c) mbuf_table[c].mtbl_cobjlist +#define m_maxsize(c) mbuf_table[c].mtbl_maxsize +#define m_minlimit(c) mbuf_table[c].mtbl_minlimit +#define m_maxlimit(c) mbuf_table[c].mtbl_maxlimit +#define m_wantpurge(c) mbuf_table[c].mtbl_wantpurge +#define m_cname(c) mbuf_table[c].mtbl_stats->mbcl_cname +#define m_size(c) mbuf_table[c].mtbl_stats->mbcl_size +#define m_total(c) mbuf_table[c].mtbl_stats->mbcl_total +#define m_active(c) mbuf_table[c].mtbl_stats->mbcl_active +#define m_infree(c) mbuf_table[c].mtbl_stats->mbcl_infree +#define m_slab_cnt(c) mbuf_table[c].mtbl_stats->mbcl_slab_cnt +#define m_alloc_cnt(c) mbuf_table[c].mtbl_stats->mbcl_alloc_cnt +#define m_free_cnt(c) mbuf_table[c].mtbl_stats->mbcl_free_cnt +#define m_notified(c) mbuf_table[c].mtbl_stats->mbcl_notified +#define m_purge_cnt(c) mbuf_table[c].mtbl_stats->mbcl_purge_cnt +#define m_fail_cnt(c) mbuf_table[c].mtbl_stats->mbcl_fail_cnt +#define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal +#define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported +#define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt +#define m_region_expand(c) mbuf_table[c].mtbl_expand static mbuf_table_t mbuf_table[] = { /* @@ -660,13 +660,13 @@ static mbuf_table_t mbuf_table[] = { * usage patterns on iOS. */ { MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)), - NULL, NULL, 0, 0, 0, 0, 3000, 0 }, + NULL, NULL, 0, 0, 0, 0, 3000, 0 }, { MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)), - NULL, NULL, 0, 0, 0, 0, 2000, 0 }, + NULL, NULL, 0, 0, 0, 0, 2000, 0 }, { MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)), - NULL, NULL, 0, 0, 0, 0, 1000, 0 }, + NULL, NULL, 0, 0, 0, 0, 1000, 0 }, { MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)), - NULL, NULL, 0, 0, 0, 0, 200, 0 }, + NULL, NULL, 0, 0, 0, 0, 200, 0 }, /* * The following are special caches; they serve as intermediate * caches backed by the above rudimentary caches. Each object @@ -680,42 +680,45 @@ static mbuf_table_t mbuf_table[] = { { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 200, 0 }, }; -#define NELEM(a) (sizeof (a) / sizeof ((a)[0])) +#define NELEM(a) (sizeof (a) / sizeof ((a)[0])) static uint32_t m_avgtotal(mbuf_class_t c) { - return (mbuf_table[c].mtbl_avgtotal); + return mbuf_table[c].mtbl_avgtotal; } -static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */ -static int mb_waiters; /* number of waiters */ +static void *mb_waitchan = &mbuf_table; /* wait channel for all caches */ +static int mb_waiters; /* number of waiters */ boolean_t mb_peak_newreport = FALSE; boolean_t mb_peak_firstreport = FALSE; /* generate a report by default after 1 week of uptime */ -#define MBUF_PEAK_FIRST_REPORT_THRESHOLD 604800 +#define MBUF_PEAK_FIRST_REPORT_THRESHOLD 604800 -#define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */ -static struct timeval mb_wdtstart; /* watchdog start timestamp */ +#define MB_WDT_MAXTIME 10 /* # of secs before watchdog panic */ +static struct timeval mb_wdtstart; /* watchdog start timestamp */ static char *mbuf_dump_buf; -#define MBUF_DUMP_BUF_SIZE 4096 +#define MBUF_DUMP_BUF_SIZE 4096 /* - * mbuf watchdog is enabled by default on embedded platforms. It is - * also toggeable via the kern.ipc.mb_watchdog sysctl. - * Garbage collection is also enabled by default on embedded platforms. + * mbuf watchdog is enabled by default. It is also toggeable via the + * kern.ipc.mb_watchdog sysctl. + * Garbage collection is enabled by default on embedded platforms. * mb_drain_maxint controls the amount of time to wait (in seconds) before * consecutive calls to mbuf_drain(). */ -#if CONFIG_EMBEDDED +#if CONFIG_EMBEDDED || DEVELOPMENT || DEBUG static unsigned int mb_watchdog = 1; -static unsigned int mb_drain_maxint = 60; #else static unsigned int mb_watchdog = 0; +#endif +#if CONFIG_EMBEDDED +static unsigned int mb_drain_maxint = 60; +#else static unsigned int mb_drain_maxint = 0; #endif /* CONFIG_EMBEDDED */ @@ -814,10 +817,10 @@ static size_t mbwdog_logging_used; static void mbuf_drain_locked(boolean_t); /* flags for m_copyback0 */ -#define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */ -#define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */ -#define M_COPYBACK0_COW 0x0004 /* do copy-on-write */ -#define M_COPYBACK0_EXTEND 0x0008 /* extend chain */ +#define M_COPYBACK0_COPYBACK 0x0001 /* copyback from cp */ +#define M_COPYBACK0_PRESERVE 0x0002 /* preserve original data */ +#define M_COPYBACK0_COW 0x0004 /* do copy-on-write */ +#define M_COPYBACK0_EXTEND 0x0008 /* extend chain */ /* * This flag is set for all mbufs that come out of and into the composite @@ -830,34 +833,34 @@ static void mbuf_drain_locked(boolean_t); * such a time, this flag will be cleared from the mbufs and the objects * will be freed into their own separate freelists. */ -#define EXTF_COMPOSITE 0x1 +#define EXTF_COMPOSITE 0x1 /* * This flag indicates that the external cluster is read-only, i.e. it is * or was referred to by more than one mbufs. Once set, this flag is never * cleared. */ -#define EXTF_READONLY 0x2 +#define EXTF_READONLY 0x2 /* * This flag indicates that the external cluster is paired with the mbuf. * Pairing implies an external free routine defined which will be invoked * when the reference count drops to the minimum at m_free time. This * flag is never cleared. */ -#define EXTF_PAIRED 0x4 +#define EXTF_PAIRED 0x4 -#define EXTF_MASK \ +#define EXTF_MASK \ (EXTF_COMPOSITE | EXTF_READONLY | EXTF_PAIRED) -#define MEXT_MINREF(m) ((m_get_rfa(m))->minref) -#define MEXT_REF(m) ((m_get_rfa(m))->refcnt) -#define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt) -#define MEXT_FLAGS(m) ((m_get_rfa(m))->flags) -#define MEXT_PRIV(m) ((m_get_rfa(m))->priv) -#define MEXT_PMBUF(m) ((m_get_rfa(m))->paired) -#define MEXT_TOKEN(m) ((m_get_rfa(m))->ext_token) -#define MBUF_IS_COMPOSITE(m) \ - (MEXT_REF(m) == MEXT_MINREF(m) && \ +#define MEXT_MINREF(m) ((m_get_rfa(m))->minref) +#define MEXT_REF(m) ((m_get_rfa(m))->refcnt) +#define MEXT_PREF(m) ((m_get_rfa(m))->prefcnt) +#define MEXT_FLAGS(m) ((m_get_rfa(m))->flags) +#define MEXT_PRIV(m) ((m_get_rfa(m))->priv) +#define MEXT_PMBUF(m) ((m_get_rfa(m))->paired) +#define MEXT_TOKEN(m) ((m_get_rfa(m))->ext_token) +#define MBUF_IS_COMPOSITE(m) \ + (MEXT_REF(m) == MEXT_MINREF(m) && \ (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_COMPOSITE) /* * This macro can be used to test if the mbuf is paired to an external @@ -865,54 +868,54 @@ static void mbuf_drain_locked(boolean_t); * is important, as EXTF_PAIRED alone is insufficient since it is immutable, * and thus survives calls to m_free_paired. */ -#define MBUF_IS_PAIRED(m) \ - (((m)->m_flags & M_EXT) && \ - (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \ +#define MBUF_IS_PAIRED(m) \ + (((m)->m_flags & M_EXT) && \ + (MEXT_FLAGS(m) & EXTF_MASK) == EXTF_PAIRED && \ MEXT_PMBUF(m) == (m)) /* * Macros used to verify the integrity of the mbuf. */ -#define _MCHECK(m) { \ - if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \ - if (mclaudit == NULL) \ - panic("MCHECK: m_type=%d m=%p", \ - (u_int16_t)(m)->m_type, m); \ - else \ - mcl_audit_mcheck_panic(m); \ - } \ -} - -#define MBUF_IN_MAP(addr) \ - ((unsigned char *)(addr) >= mbutl && \ +#define _MCHECK(m) { \ + if ((m)->m_type != MT_FREE && !MBUF_IS_PAIRED(m)) { \ + if (mclaudit == NULL) \ + panic("MCHECK: m_type=%d m=%p", \ + (u_int16_t)(m)->m_type, m); \ + else \ + mcl_audit_mcheck_panic(m); \ + } \ +} + +#define MBUF_IN_MAP(addr) \ + ((unsigned char *)(addr) >= mbutl && \ (unsigned char *)(addr) < embutl) -#define MRANGE(addr) { \ - if (!MBUF_IN_MAP(addr)) \ - panic("MRANGE: address out of range 0x%p", addr); \ +#define MRANGE(addr) { \ + if (!MBUF_IN_MAP(addr)) \ + panic("MRANGE: address out of range 0x%p", addr); \ } /* * Macro version of mtod. */ -#define MTOD(m, t) ((t)((m)->m_data)) +#define MTOD(m, t) ((t)((m)->m_data)) /* * Macros to obtain page index given a base cluster address */ -#define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT) -#define PGTOM(x) (mbutl + (x << PAGE_SHIFT)) +#define MTOPG(x) (((unsigned char *)x - mbutl) >> PAGE_SHIFT) +#define PGTOM(x) (mbutl + (x << PAGE_SHIFT)) /* * Macro to find the mbuf index relative to a base. */ -#define MBPAGEIDX(c, m) \ +#define MBPAGEIDX(c, m) \ (((unsigned char *)(m) - (unsigned char *)(c)) >> MSIZESHIFT) /* * Same thing for 2KB cluster index. */ -#define CLPAGEIDX(c, m) \ +#define CLPAGEIDX(c, m) \ (((unsigned char *)(m) - (unsigned char *)(c)) >> MCLSHIFT) /* @@ -924,64 +927,64 @@ static void mbuf_drain_locked(boolean_t); /* * Macros used during mbuf and cluster initialization. */ -#define MBUF_INIT_PKTHDR(m) { \ - (m)->m_pkthdr.rcvif = NULL; \ - (m)->m_pkthdr.pkt_hdr = NULL; \ - (m)->m_pkthdr.len = 0; \ - (m)->m_pkthdr.csum_flags = 0; \ - (m)->m_pkthdr.csum_data = 0; \ - (m)->m_pkthdr.vlan_tag = 0; \ - m_classifier_init(m, 0); \ - m_tag_init(m, 1); \ - m_scratch_init(m); \ - m_redzone_init(m); \ -} - -#define MBUF_INIT(m, pkthdr, type) { \ - _MCHECK(m); \ - (m)->m_next = (m)->m_nextpkt = NULL; \ - (m)->m_len = 0; \ - (m)->m_type = type; \ - if ((pkthdr) == 0) { \ - (m)->m_data = (m)->m_dat; \ - (m)->m_flags = 0; \ - } else { \ - (m)->m_data = (m)->m_pktdat; \ - (m)->m_flags = M_PKTHDR; \ - MBUF_INIT_PKTHDR(m); \ - } \ -} - -#define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \ - priv, pm) { \ - (m)->m_data = (m)->m_ext.ext_buf = (buf); \ - (m)->m_flags |= M_EXT; \ - m_set_ext((m), (rfa), (free), (arg)); \ - (m)->m_ext.ext_size = (size); \ - MEXT_MINREF(m) = (min); \ - MEXT_REF(m) = (ref); \ - MEXT_PREF(m) = (pref); \ - MEXT_FLAGS(m) = (flag); \ - MEXT_PRIV(m) = (priv); \ - MEXT_PMBUF(m) = (pm); \ -} - -#define MBUF_CL_INIT(m, buf, rfa, ref, flag) \ - MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \ +#define MBUF_INIT_PKTHDR(m) { \ + (m)->m_pkthdr.rcvif = NULL; \ + (m)->m_pkthdr.pkt_hdr = NULL; \ + (m)->m_pkthdr.len = 0; \ + (m)->m_pkthdr.csum_flags = 0; \ + (m)->m_pkthdr.csum_data = 0; \ + (m)->m_pkthdr.vlan_tag = 0; \ + m_classifier_init(m, 0); \ + m_tag_init(m, 1); \ + m_scratch_init(m); \ + m_redzone_init(m); \ +} + +#define MBUF_INIT(m, pkthdr, type) { \ + _MCHECK(m); \ + (m)->m_next = (m)->m_nextpkt = NULL; \ + (m)->m_len = 0; \ + (m)->m_type = type; \ + if ((pkthdr) == 0) { \ + (m)->m_data = (m)->m_dat; \ + (m)->m_flags = 0; \ + } else { \ + (m)->m_data = (m)->m_pktdat; \ + (m)->m_flags = M_PKTHDR; \ + MBUF_INIT_PKTHDR(m); \ + } \ +} + +#define MEXT_INIT(m, buf, size, free, arg, rfa, min, ref, pref, flag, \ + priv, pm) { \ + (m)->m_data = (m)->m_ext.ext_buf = (buf); \ + (m)->m_flags |= M_EXT; \ + m_set_ext((m), (rfa), (free), (arg)); \ + (m)->m_ext.ext_size = (size); \ + MEXT_MINREF(m) = (min); \ + MEXT_REF(m) = (ref); \ + MEXT_PREF(m) = (pref); \ + MEXT_FLAGS(m) = (flag); \ + MEXT_PRIV(m) = (priv); \ + MEXT_PMBUF(m) = (pm); \ +} + +#define MBUF_CL_INIT(m, buf, rfa, ref, flag) \ + MEXT_INIT(m, buf, m_maxsize(MC_CL), NULL, NULL, rfa, 0, \ ref, 0, flag, 0, NULL) -#define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \ - MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \ +#define MBUF_BIGCL_INIT(m, buf, rfa, ref, flag) \ + MEXT_INIT(m, buf, m_maxsize(MC_BIGCL), m_bigfree, NULL, rfa, 0, \ ref, 0, flag, 0, NULL) -#define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \ - MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \ +#define MBUF_16KCL_INIT(m, buf, rfa, ref, flag) \ + MEXT_INIT(m, buf, m_maxsize(MC_16KCL), m_16kfree, NULL, rfa, 0, \ ref, 0, flag, 0, NULL) /* * Macro to convert BSD malloc sleep flag to mcache's */ -#define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP) +#define MSLEEPF(f) ((!((f) & M_DONTWAIT)) ? MCR_SLEEP : MCR_NOSLEEP) /* * The structure that holds all mbuf class statistics exportable via sysctl. @@ -990,11 +993,11 @@ static void mbuf_drain_locked(boolean_t); * that allows for a more accurate view of the state of the allocator. */ struct mb_stat *mb_stat; -struct omb_stat *omb_stat; /* For backwards compatibility */ +struct omb_stat *omb_stat; /* For backwards compatibility */ -#define MB_STAT_SIZE(n) \ +#define MB_STAT_SIZE(n) \ __builtin_offsetof(mb_stat_t, mbs_class[n]) -#define OMB_STAT_SIZE(n) \ +#define OMB_STAT_SIZE(n) \ ((size_t)(&((struct omb_stat *)0)->mbs_class[n])) /* @@ -1010,7 +1013,7 @@ struct omb_stat *omb_stat; /* For backwards compatibility */ */ struct mbstat mbstat; -#define MBSTAT_MTYPES_MAX \ +#define MBSTAT_MTYPES_MAX \ (sizeof (mbstat.m_mtypes) / sizeof (mbstat.m_mtypes[0])) /* @@ -1025,33 +1028,33 @@ struct mbstat mbstat; * anything beyond that (up to type 255) is considered a corner case. */ typedef struct { - unsigned int cpu_mtypes[MT_MAX]; + unsigned int cpu_mtypes[MT_MAX]; } __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE), packed)) mtypes_cpu_t; typedef struct { - mtypes_cpu_t mbs_cpu[1]; + mtypes_cpu_t mbs_cpu[1]; } mbuf_mtypes_t; -static mbuf_mtypes_t *mbuf_mtypes; /* per-CPU statistics */ +static mbuf_mtypes_t *mbuf_mtypes; /* per-CPU statistics */ -#define MBUF_MTYPES_SIZE(n) \ +#define MBUF_MTYPES_SIZE(n) \ ((size_t)(&((mbuf_mtypes_t *)0)->mbs_cpu[n])) -#define MTYPES_CPU(p) \ +#define MTYPES_CPU(p) \ ((mtypes_cpu_t *)(void *)((char *)(p) + MBUF_MTYPES_SIZE(cpu_number()))) -#define mtype_stat_add(type, n) { \ - if ((unsigned)(type) < MT_MAX) { \ - mtypes_cpu_t *mbs = MTYPES_CPU(mbuf_mtypes); \ - atomic_add_32(&mbs->cpu_mtypes[type], n); \ - } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \ - atomic_add_16((int16_t *)&mbstat.m_mtypes[type], n); \ - } \ +#define mtype_stat_add(type, n) { \ + if ((unsigned)(type) < MT_MAX) { \ + mtypes_cpu_t *mbs = MTYPES_CPU(mbuf_mtypes); \ + atomic_add_32(&mbs->cpu_mtypes[type], n); \ + } else if ((unsigned)(type) < (unsigned)MBSTAT_MTYPES_MAX) { \ + atomic_add_16((int16_t *)&mbstat.m_mtypes[type], n); \ + } \ } -#define mtype_stat_sub(t, n) mtype_stat_add(t, -(n)) -#define mtype_stat_inc(t) mtype_stat_add(t, 1) -#define mtype_stat_dec(t) mtype_stat_sub(t, 1) +#define mtype_stat_sub(t, n) mtype_stat_add(t, -(n)) +#define mtype_stat_inc(t) mtype_stat_add(t, 1) +#define mtype_stat_dec(t) mtype_stat_sub(t, 1) static void mbuf_mtypes_sync(boolean_t locked) @@ -1059,26 +1062,31 @@ mbuf_mtypes_sync(boolean_t locked) int m, n; mtypes_cpu_t mtc; - if (locked) + if (locked) { LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + } - bzero(&mtc, sizeof (mtc)); + bzero(&mtc, sizeof(mtc)); for (m = 0; m < ncpu; m++) { mtypes_cpu_t *scp = &mbuf_mtypes->mbs_cpu[m]; mtypes_cpu_t temp; bcopy(&scp->cpu_mtypes, &temp.cpu_mtypes, - sizeof (temp.cpu_mtypes)); + sizeof(temp.cpu_mtypes)); - for (n = 0; n < MT_MAX; n++) + for (n = 0; n < MT_MAX; n++) { mtc.cpu_mtypes[n] += temp.cpu_mtypes[n]; + } } - if (!locked) + if (!locked) { lck_mtx_lock(mbuf_mlock); - for (n = 0; n < MT_MAX; n++) + } + for (n = 0; n < MT_MAX; n++) { mbstat.m_mtypes[n] = mtc.cpu_mtypes[n]; - if (!locked) + } + if (!locked) { lck_mtx_unlock(mbuf_mlock); + } } static int @@ -1087,7 +1095,7 @@ mbstat_sysctl SYSCTL_HANDLER_ARGS #pragma unused(oidp, arg1, arg2) mbuf_mtypes_sync(FALSE); - return (SYSCTL_OUT(req, &mbstat, sizeof (mbstat))); + return SYSCTL_OUT(req, &mbstat, sizeof(mbstat)); } static void @@ -1106,22 +1114,25 @@ mbuf_stat_sync(void) bktsize = ccp->cc_bktsize; sp = mbuf_table[k].mtbl_stats; - if (cp->mc_flags & MCF_NOCPUCACHE) + if (cp->mc_flags & MCF_NOCPUCACHE) { sp->mbcl_mc_state = MCS_DISABLED; - else if (cp->mc_purge_cnt > 0) + } else if (cp->mc_purge_cnt > 0) { sp->mbcl_mc_state = MCS_PURGING; - else if (bktsize == 0) + } else if (bktsize == 0) { sp->mbcl_mc_state = MCS_OFFLINE; - else + } else { sp->mbcl_mc_state = MCS_ONLINE; + } sp->mbcl_mc_cached = 0; for (m = 0; m < ncpu; m++) { ccp = &cp->mc_cpu[m]; - if (ccp->cc_objs > 0) + if (ccp->cc_objs > 0) { sp->mbcl_mc_cached += ccp->cc_objs; - if (ccp->cc_pobjs > 0) + } + if (ccp->cc_pobjs > 0) { sp->mbcl_mc_cached += ccp->cc_pobjs; + } } sp->mbcl_mc_cached += (cp->mc_full.bl_total * bktsize); sp->mbcl_active = sp->mbcl_total - sp->mbcl_mc_cached - @@ -1179,7 +1190,7 @@ mb_stat_sysctl SYSCTL_HANDLER_ARGS oc = &omb_stat->mbs_class[0]; c = &mb_stat->mbs_class[0]; for (k = 0; k < omb_stat->mbs_cnt; k++, oc++, c++) { - (void) snprintf(oc->mbcl_cname, sizeof (oc->mbcl_cname), + (void) snprintf(oc->mbcl_cname, sizeof(oc->mbcl_cname), "%s", c->mbcl_cname); oc->mbcl_size = c->mbcl_size; oc->mbcl_total = c->mbcl_total; @@ -1208,7 +1219,7 @@ mb_stat_sysctl SYSCTL_HANDLER_ARGS lck_mtx_unlock(mbuf_mlock); - return (SYSCTL_OUT(req, statp, statsz)); + return SYSCTL_OUT(req, statp, statsz); } static int @@ -1218,15 +1229,16 @@ mleak_top_trace_sysctl SYSCTL_HANDLER_ARGS int i; /* Ensure leak tracing turned on */ - if (!mclfindleak || !mclexpleak) - return (ENXIO); + if (!mclfindleak || !mclexpleak) { + return ENXIO; + } lck_mtx_lock(mleak_lock); mleak_update_stats(); i = SYSCTL_OUT(req, mleak_stat, MLEAK_STAT_SIZE(MLEAK_NUM_TRACES)); lck_mtx_unlock(mleak_lock); - return (i); + return i; } static int @@ -1236,14 +1248,15 @@ mleak_table_sysctl SYSCTL_HANDLER_ARGS int i = 0; /* Ensure leak tracing turned on */ - if (!mclfindleak || !mclexpleak) - return (ENXIO); + if (!mclfindleak || !mclexpleak) { + return ENXIO; + } lck_mtx_lock(mleak_lock); - i = SYSCTL_OUT(req, &mleak_table, sizeof (mleak_table)); + i = SYSCTL_OUT(req, &mleak_table, sizeof(mleak_table)); lck_mtx_unlock(mleak_lock); - return (i); + return i; } static inline void @@ -1263,8 +1276,9 @@ m_incref(struct mbuf *m) * we don't clear the flag when the refcount goes back to the * minimum, to simplify code calling m_mclhasreference(). */ - if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) + if (new > (MEXT_MINREF(m) + 1) && !(MEXT_FLAGS(m) & EXTF_READONLY)) { (void) OSBitOrAtomic16(EXTF_READONLY, &MEXT_FLAGS(m)); + } } static inline u_int16_t @@ -1279,7 +1293,7 @@ m_decref(struct mbuf *m) ASSERT(old != 0); } while (!OSCompareAndSwap16(old, new, addr)); - return (new); + return new; } static void @@ -1297,8 +1311,9 @@ mbuf_table_init(void) VERIFY(mb_stat != NULL); mb_stat->mbs_cnt = NELEM(mbuf_table); - for (m = 0; m < NELEM(mbuf_table); m++) + for (m = 0; m < NELEM(mbuf_table); m++) { mbuf_table[m].mtbl_stats = &mb_stat->mbs_class[m]; + } #if CONFIG_MBUF_JUMBO config_mbuf_jumbo = 1; @@ -1347,15 +1362,15 @@ mbuf_table_init(void) * 1/32th of the shared region is reserved for pure 2KB and 4KB * clusters (1/64th each.) */ - c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */ + c = P2ROUNDDOWN((nclusters >> 6), NCLPG); /* in 2KB unit */ b = P2ROUNDDOWN((nclusters >> (6 + NCLPBGSHIFT)), NBCLPG); /* in 4KB unit */ - s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */ + s = nclusters - (c + (b << NCLPBGSHIFT)); /* in 2KB unit */ /* * 1/64th (c) is reserved for 2KB clusters. */ m_minlimit(MC_CL) = c; - m_maxlimit(MC_CL) = s + c; /* in 2KB unit */ + m_maxlimit(MC_CL) = s + c; /* in 2KB unit */ m_maxsize(MC_CL) = m_size(MC_CL) = MCLBYTES; (void) snprintf(m_cname(MC_CL), MAX_MBUF_CNAME, "cl"); @@ -1364,7 +1379,7 @@ mbuf_table_init(void) * It cannot be turned into 2KB clusters or mbufs. */ m_minlimit(MC_BIGCL) = b; - m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b; /* in 4KB unit */ + m_maxlimit(MC_BIGCL) = (s >> NCLPBGSHIFT) + b; /* in 4KB unit */ m_maxsize(MC_BIGCL) = m_size(MC_BIGCL) = MBIGCLBYTES; (void) snprintf(m_cname(MC_BIGCL), MAX_MBUF_CNAME, "bigcl"); @@ -1372,7 +1387,7 @@ mbuf_table_init(void) * The remaining 31/32ths (s) are all-purpose (mbufs, 2KB, or 4KB) */ m_minlimit(MC_MBUF) = 0; - m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT); /* in mbuf unit */ + m_maxlimit(MC_MBUF) = (s << NMBPCLSHIFT); /* in mbuf unit */ m_maxsize(MC_MBUF) = m_size(MC_MBUF) = MSIZE; (void) snprintf(m_cname(MC_MBUF), MAX_MBUF_CNAME, "mbuf"); @@ -1395,7 +1410,7 @@ mbuf_table_init(void) * And for jumbo classes. */ m_minlimit(MC_16KCL) = 0; - m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT); /* in 16KB unit */ + m_maxlimit(MC_16KCL) = (njcl >> NCLPJCLSHIFT); /* in 16KB unit */ m_maxsize(MC_16KCL) = m_size(MC_16KCL) = M16KCLBYTES; (void) snprintf(m_cname(MC_16KCL), MAX_MBUF_CNAME, "16kcl"); @@ -1408,7 +1423,7 @@ mbuf_table_init(void) /* * Initialize the legacy mbstat structure. */ - bzero(&mbstat, sizeof (mbstat)); + bzero(&mbstat, sizeof(mbstat)); mbstat.m_msize = m_maxsize(MC_MBUF); mbstat.m_mclbytes = m_maxsize(MC_CL); mbstat.m_minclsize = MINCLSIZE; @@ -1419,26 +1434,26 @@ mbuf_table_init(void) #if defined(__LP64__) typedef struct ncl_tbl { - uint64_t nt_maxmem; /* memory (sane) size */ - uint32_t nt_mbpool; /* mbuf pool size */ + uint64_t nt_maxmem; /* memory (sane) size */ + uint32_t nt_mbpool; /* mbuf pool size */ } ncl_tbl_t; /* Non-server */ static ncl_tbl_t ncl_table[] = { - { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ }, - { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (96 << MBSHIFT) /* 96 MB */ }, - { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (128 << MBSHIFT) /* 128 MB */ }, + { (1ULL << GBSHIFT) /* 1 GB */, (64 << MBSHIFT) /* 64 MB */ }, + { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (96 << MBSHIFT) /* 96 MB */ }, + { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (128 << MBSHIFT) /* 128 MB */ }, { 0, 0 } }; /* Server */ static ncl_tbl_t ncl_table_srv[] = { - { (1ULL << GBSHIFT) /* 1 GB */, (96 << MBSHIFT) /* 96 MB */ }, - { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (128 << MBSHIFT) /* 128 MB */ }, - { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (160 << MBSHIFT) /* 160 MB */ }, - { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (192 << MBSHIFT) /* 192 MB */ }, - { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (256 << MBSHIFT) /* 256 MB */ }, - { (1ULL << (GBSHIFT + 6)) /* 64 GB */, (384 << MBSHIFT) /* 384 MB */ }, + { (1ULL << GBSHIFT) /* 1 GB */, (96 << MBSHIFT) /* 96 MB */ }, + { (1ULL << (GBSHIFT + 2)) /* 4 GB */, (128 << MBSHIFT) /* 128 MB */ }, + { (1ULL << (GBSHIFT + 3)) /* 8 GB */, (160 << MBSHIFT) /* 160 MB */ }, + { (1ULL << (GBSHIFT + 4)) /* 16 GB */, (192 << MBSHIFT) /* 192 MB */ }, + { (1ULL << (GBSHIFT + 5)) /* 32 GB */, (256 << MBSHIFT) /* 256 MB */ }, + { (1ULL << (GBSHIFT + 6)) /* 64 GB */, (384 << MBSHIFT) /* 384 MB */ }, { 0, 0 } }; #endif /* __LP64__ */ @@ -1452,8 +1467,9 @@ mbuf_default_ncl(int server, uint64_t mem) /* * 32-bit kernel (default to 64MB of mbuf pool for >= 1GB RAM). */ - if ((n = ((mem / 16) / MCLBYTES)) > 32768) + if ((n = ((mem / 16) / MCLBYTES)) > 32768) { n = 32768; + } #else unsigned int n, i; ncl_tbl_t *tbl = (server ? ncl_table_srv : ncl_table); @@ -1462,13 +1478,14 @@ mbuf_default_ncl(int server, uint64_t mem) */ n = tbl[0].nt_mbpool; for (i = 0; tbl[i].nt_mbpool != 0; i++) { - if (mem < tbl[i].nt_maxmem) + if (mem < tbl[i].nt_maxmem) { break; + } n = tbl[i].nt_mbpool; } n >>= MCLSHIFT; #endif /* !__LP64__ */ - return (n); + return n; } __private_extern__ void @@ -1550,22 +1567,23 @@ mbinit(void) /* Module specific scratch space (32-bit alignment requirement) */ _CASSERT(!(offsetof(struct mbuf, m_pkthdr.pkt_mpriv) % - sizeof (uint32_t))); + sizeof(uint32_t))); /* Initialize random red zone cookie value */ - _CASSERT(sizeof (mb_redzone_cookie) == - sizeof (((struct pkthdr *)0)->redzone)); - read_random(&mb_redzone_cookie, sizeof (mb_redzone_cookie)); - read_random(&mb_obscure_extref, sizeof (mb_obscure_extref)); - read_random(&mb_obscure_extfree, sizeof (mb_obscure_extfree)); + _CASSERT(sizeof(mb_redzone_cookie) == + sizeof(((struct pkthdr *)0)->redzone)); + read_random(&mb_redzone_cookie, sizeof(mb_redzone_cookie)); + read_random(&mb_obscure_extref, sizeof(mb_obscure_extref)); + read_random(&mb_obscure_extfree, sizeof(mb_obscure_extfree)); mb_obscure_extref |= 0x3; mb_obscure_extfree |= 0x3; /* Make sure we don't save more than we should */ - _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof (struct mbuf)); + _CASSERT(MCA_SAVED_MBUF_SIZE <= sizeof(struct mbuf)); - if (nmbclusters == 0) + if (nmbclusters == 0) { nmbclusters = NMBCLUSTERS; + } /* This should be a sane (at least even) value by now */ VERIFY(nmbclusters != 0 && !(nmbclusters & 0x1)); @@ -1589,7 +1607,7 @@ mbinit(void) */ maxslabgrp = (P2ROUNDUP(nmbclusters, (MBSIZE >> MCLSHIFT)) << MCLSHIFT) >> MBSHIFT; - MALLOC(slabstbl, mcl_slabg_t **, maxslabgrp * sizeof (mcl_slabg_t *), + MALLOC(slabstbl, mcl_slabg_t * *, maxslabgrp * sizeof(mcl_slabg_t *), M_TEMP, M_WAITOK | M_ZERO); VERIFY(slabstbl != NULL); @@ -1600,24 +1618,24 @@ mbinit(void) * * This yields mcl_audit_t units, each one representing a page. */ - PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof (mbuf_debug)); + PE_parse_boot_argn("mbuf_debug", &mbuf_debug, sizeof(mbuf_debug)); mbuf_debug |= mcache_getflags(); if (mbuf_debug & MCF_DEBUG) { int l; mcl_audit_t *mclad; maxclaudit = ((maxslabgrp << MBSHIFT) >> PAGE_SHIFT); - MALLOC(mclaudit, mcl_audit_t *, maxclaudit * sizeof (*mclaudit), + MALLOC(mclaudit, mcl_audit_t *, maxclaudit * sizeof(*mclaudit), M_TEMP, M_WAITOK | M_ZERO); VERIFY(mclaudit != NULL); for (l = 0, mclad = mclaudit; l < maxclaudit; l++) { - MALLOC(mclad[l].cl_audit, mcache_audit_t **, + MALLOC(mclad[l].cl_audit, mcache_audit_t * *, NMBPG * sizeof(mcache_audit_t *), M_TEMP, M_WAITOK | M_ZERO); VERIFY(mclad[l].cl_audit != NULL); } mcl_audit_con_cache = mcache_create("mcl_audit_contents", - AUDIT_CONTENTS_SIZE, sizeof (u_int64_t), 0, MCR_SLEEP); + AUDIT_CONTENTS_SIZE, sizeof(u_int64_t), 0, MCR_SLEEP); VERIFY(mcl_audit_con_cache != NULL); } mclverify = (mbuf_debug & MCF_VERIFY); @@ -1651,26 +1669,28 @@ mbinit(void) /* Calculate the number of pages assigned to the cluster pool */ mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE; - MALLOC(mcl_paddr, ppnum_t *, mcl_pages * sizeof (ppnum_t), + MALLOC(mcl_paddr, ppnum_t *, mcl_pages * sizeof(ppnum_t), M_TEMP, M_WAITOK); VERIFY(mcl_paddr != NULL); /* Register with the I/O Bus mapper */ mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages); - bzero((char *)mcl_paddr, mcl_pages * sizeof (ppnum_t)); + bzero((char *)mcl_paddr, mcl_pages * sizeof(ppnum_t)); embutl = (mbutl + (nmbclusters * MCLBYTES)); VERIFY(((embutl - mbutl) % MBIGCLBYTES) == 0); /* Prime up the freelist */ - PE_parse_boot_argn("initmcl", &initmcl, sizeof (initmcl)); + PE_parse_boot_argn("initmcl", &initmcl, sizeof(initmcl)); if (initmcl != 0) { - initmcl >>= NCLPBGSHIFT; /* become a 4K unit */ - if (initmcl > m_maxlimit(MC_BIGCL)) + initmcl >>= NCLPBGSHIFT; /* become a 4K unit */ + if (initmcl > m_maxlimit(MC_BIGCL)) { initmcl = m_maxlimit(MC_BIGCL); + } } - if (initmcl < m_minlimit(MC_BIGCL)) + if (initmcl < m_minlimit(MC_BIGCL)) { initmcl = m_minlimit(MC_BIGCL); + } lck_mtx_lock(mbuf_mlock); @@ -1699,7 +1719,7 @@ mbinit(void) NULL, &thread); thread_deallocate(thread); - ref_cache = mcache_create("mext_ref", sizeof (struct ext_ref), + ref_cache = mcache_create("mext_ref", sizeof(struct ext_ref), 0, 0, MCR_SLEEP); /* Create the cache for each class */ @@ -1728,11 +1748,13 @@ mbinit(void) * be populated) since it simplifies the code. */ if ((m_class(m) == MC_MBUF_16KCL || m_class(m) == MC_16KCL) && - njcl == 0) + njcl == 0) { flags |= MCF_NOCPUCACHE; + } - if (!mclfindleak) + if (!mclfindleak) { flags |= MCF_NOLEAKLOG; + } m_cache(m) = mcache_create_ext(m_cname(m), m_maxsize(m), allocfunc, freefunc, auditfunc, logfunc, mbuf_slab_notify, @@ -1793,7 +1815,6 @@ mbinit(void) } lck_rw_init(mbuf_tx_compl_tbl_lock, mbuf_tx_compl_tbl_lck_grp, mbuf_tx_compl_tbl_lck_attr); - } /* @@ -1818,15 +1839,16 @@ slab_alloc(mbuf_class_t class, int wait) * slabs, this probably doesn't make much of a difference. */ if ((class == MC_MBUF || class == MC_CL || class == MC_BIGCL) - && (wait & MCR_COMP)) + && (wait & MCR_COMP)) { sp = (mcl_slab_t *)TAILQ_LAST(&m_slablist(class), mcl_slhead); - else + } else { sp = (mcl_slab_t *)TAILQ_FIRST(&m_slablist(class)); + } if (sp == NULL) { VERIFY(m_infree(class) == 0 && m_slab_cnt(class) == 0); /* The slab list for this class is empty */ - return (NULL); + return NULL; } VERIFY(m_infree(class) > 0); @@ -1852,8 +1874,9 @@ slab_alloc(mbuf_class_t class, int wait) mcache_audit_t *mca = mcl_audit_buf2mca(class, buf); mca->mca_uflags = 0; /* Save contents on mbuf objects only */ - if (class == MC_MBUF) + if (class == MC_MBUF) { mca->mca_uflags |= MB_SCVALID; + } } if (class == MC_CL) { @@ -1904,8 +1927,9 @@ slab_alloc(mbuf_class_t class, int wait) * If auditing is turned on, this check is * deferred until later in mbuf_slab_audit(). */ - if (mclaudit == NULL) + if (mclaudit == NULL) { _MCHECK((struct mbuf *)buf); + } /* * Since we have incremented the reference count above, * an mbuf slab (formerly a 4KB cluster slab that was cut @@ -1926,7 +1950,7 @@ slab_alloc(mbuf_class_t class, int wait) slab_remove(sp, class); } - return (buf); + return buf; } /* @@ -1951,7 +1975,7 @@ slab_free(mbuf_class_t class, mcache_obj_t *buf) while (mb_clalloc_busy) { mb_clalloc_waiters++; (void) msleep(mb_clalloc_waitchan, mbuf_mlock, - (PZERO-1), "m_clalloc", NULL); + (PZERO - 1), "m_clalloc", NULL); LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); } @@ -2079,7 +2103,7 @@ slab_free(mbuf_class_t class, mcache_obj_t *buf) } reinit_supercl = true; } else if (class == MC_CL && sp->sl_refcnt == 0 && - m_total(class) >= (m_minlimit(class) + NCLPG) && + m_total(class) >= (m_minlimit(class) + NCLPG) && m_total(super_class) < m_maxlimit(super_class)) { int i = NCLPG; @@ -2127,9 +2151,10 @@ slab_free(mbuf_class_t class, mcache_obj_t *buf) slab_init(sp, super_class, SLF_MAPPED, sp->sl_base, sp->sl_base, PAGE_SIZE, 0, 1); - if (mclverify) + if (mclverify) { mcache_set_pattern(MCACHE_FREE_PATTERN, (caddr_t)sp->sl_base, sp->sl_len); + } ((mcache_obj_t *)(sp->sl_base))->obj_next = NULL; if (super_class == MC_BIGCL) { @@ -2146,8 +2171,9 @@ slab_free(mbuf_class_t class, mcache_obj_t *buf) } /* Reinsert the slab to the class's slab list */ - if (slab_is_detached(sp)) + if (slab_is_detached(sp)) { slab_insert(sp, class); + } /* We're done; let others enter */ mb_clalloc_busy = FALSE; @@ -2201,12 +2227,14 @@ mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait) (void) freelist_populate(class, 1, (wait & MCR_NOSLEEP) ? M_DONTWAIT : M_WAIT); - if (m_infree(class) > 0) + if (m_infree(class) > 0) { continue; + } /* Check if there's anything at the cache layer */ - if (mbuf_cached_above(class, wait)) + if (mbuf_cached_above(class, wait)) { break; + } /* watchdog checkpoint */ mbuf_watchdog(); @@ -2228,8 +2256,9 @@ mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait) * request without having to go to sleep. */ if (mbuf_worker_ready && - mbuf_sleep(class, need, wait)) + mbuf_sleep(class, need, wait)) { break; + } LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); } @@ -2238,7 +2267,7 @@ mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait) m_alloc_cnt(class) += num - need; lck_mtx_unlock(mbuf_mlock); - return (num - need); + return num - need; } /* @@ -2263,20 +2292,23 @@ mbuf_slab_free(void *arg, mcache_obj_t *list, __unused int purged) list->obj_next = NULL; slab_free(class, list); ++num; - if ((list = nlist) == NULL) + if ((list = nlist) == NULL) { break; + } } m_free_cnt(class) += num; - if ((w = mb_waiters) > 0) + if ((w = mb_waiters) > 0) { mb_waiters = 0; + } if (w) { mbwdog_logger("waking up all threads"); } lck_mtx_unlock(mbuf_mlock); - if (w != 0) + if (w != 0) { wakeup(mb_waitchan); + } } /* @@ -2309,13 +2341,15 @@ mbuf_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) ASSERT(!(mca->mca_uflags & MB_SCVALID)); } /* Record this transaction */ - if (mcltrace) + if (mcltrace) { mcache_buffer_log(mca, list, m_cache(class), &mb_start); + } - if (alloc) + if (alloc) { mca->mca_uflags |= MB_INUSE; - else + } else { mca->mca_uflags &= ~MB_INUSE; + } /* Unpair the object (unconditionally) */ mca->mca_uptr = NULL; lck_mtx_unlock(mbuf_mlock); @@ -2338,8 +2372,9 @@ mbuf_slab_notify(void *arg, u_int32_t reason) ASSERT(MBUF_CLASS_VALID(class)); - if (reason != MCN_RETRYALLOC) + if (reason != MCN_RETRYALLOC) { return; + } lck_mtx_lock(mbuf_mlock); if ((w = mb_waiters) > 0) { @@ -2351,8 +2386,9 @@ mbuf_slab_notify(void *arg, u_int32_t reason) } lck_mtx_unlock(mbuf_mlock); - if (w != 0) + if (w != 0) { wakeup(mb_waitchan); + } } /* @@ -2408,12 +2444,13 @@ cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num) (*list)->obj_next = NULL; list = *plist = &(*list)->obj_next; - if (--need == 0) + if (--need == 0) { break; + } } m_infree(class) -= (num - need); - return (num - need); + return num - need; } /* @@ -2495,8 +2532,9 @@ cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) */ if (purged) { /* Restore constructed mbuf fields */ - if (mclaudit != NULL) + if (mclaudit != NULL) { mcl_audit_restore_mbuf(m, mca, TRUE); + } MEXT_MINREF(m) = 0; MEXT_REF(m) = 0; @@ -2516,8 +2554,9 @@ cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) m->m_next = m->m_nextpkt = NULL; /* Save mbuf fields and make auditing happy */ - if (mclaudit != NULL) + if (mclaudit != NULL) { mcl_audit_mbuf(mca, o, FALSE, FALSE); + } VERIFY(m_total(class) > 0); m_total(class)--; @@ -2528,12 +2567,13 @@ cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) /* And free the cluster */ ((mcache_obj_t *)cl)->obj_next = NULL; - if (class == MC_MBUF_CL) + if (class == MC_MBUF_CL) { slab_free(MC_CL, cl); - else if (class == MC_MBUF_BIGCL) + } else if (class == MC_MBUF_BIGCL) { slab_free(MC_BIGCL, cl); - else + } else { slab_free(MC_16KCL, cl); + } } ++num; @@ -2549,7 +2589,7 @@ cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) mcache_free_ext(ref_cache, ref_list); } - return (num); + return num; } /* @@ -2593,7 +2633,7 @@ mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed, if (num == needed) { m_alloc_cnt(class) += num; lck_mtx_unlock(mbuf_mlock); - return (needed); + return needed; } lck_mtx_unlock(mbuf_mlock); @@ -2613,8 +2653,9 @@ mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed, * of the already-constructed composite objects are available. */ wait |= MCR_COMP; - if (!(wait & MCR_NOSLEEP)) + if (!(wait & MCR_NOSLEEP)) { wait |= MCR_FAILOK; + } /* allocate mbufs */ needed = mcache_alloc_ext(m_cache(MC_MBUF), &mp_list, needed, wait); @@ -2699,12 +2740,13 @@ mbuf_cslab_alloc(void *arg, mcache_obj_t ***plist, unsigned int needed, mcache_set_pattern(MCACHE_FREE_PATTERN, m, m_maxsize(MC_MBUF)); - if (class == MC_MBUF_CL) + if (class == MC_MBUF_CL) { size = m_maxsize(MC_CL); - else if (class == MC_MBUF_BIGCL) + } else if (class == MC_MBUF_BIGCL) { size = m_maxsize(MC_BIGCL); - else + } else { size = m_maxsize(MC_16KCL); + } mcache_set_pattern(MCACHE_FREE_PATTERN, cl, size); @@ -2731,12 +2773,15 @@ fail: /* * Free up what's left of the above. */ - if (mp_list != NULL) + if (mp_list != NULL) { mcache_free_ext(m_cache(MC_MBUF), mp_list); - if (clp_list != NULL) + } + if (clp_list != NULL) { mcache_free_ext(m_cache(cl_class), clp_list); - if (ref_list != NULL) + } + if (ref_list != NULL) { mcache_free_ext(ref_cache, ref_list); + } lck_mtx_lock(mbuf_mlock); if (num > 0 || cnum > 0) { @@ -2744,11 +2789,12 @@ fail: VERIFY(m_total(class) <= m_maxlimit(class)); m_alloc_cnt(class) += num + cnum; } - if ((num + cnum) < want) + if ((num + cnum) < want) { m_fail_cnt(class) += (want - (num + cnum)); + } lck_mtx_unlock(mbuf_mlock); - return (num + cnum); + return num + cnum; } /* @@ -2770,16 +2816,18 @@ mbuf_cslab_free(void *arg, mcache_obj_t *list, int purged) num = cslab_free(class, list, purged); m_free_cnt(class) += num; - if ((w = mb_waiters) > 0) + if ((w = mb_waiters) > 0) { mb_waiters = 0; + } if (w) { mbwdog_logger("waking up all threads"); } lck_mtx_unlock(mbuf_mlock); - if (w != 0) + if (w != 0) { wakeup(mb_waitchan); + } } /* @@ -2801,12 +2849,13 @@ mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) void *cl; ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); - if (class == MC_MBUF_CL) + if (class == MC_MBUF_CL) { cl_class = MC_CL; - else if (class == MC_MBUF_BIGCL) + } else if (class == MC_MBUF_BIGCL) { cl_class = MC_BIGCL; - else + } else { cl_class = MC_16KCL; + } cl_size = m_maxsize(cl_class); while ((m = ms = (struct mbuf *)list) != NULL) { @@ -2814,33 +2863,37 @@ mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) /* Do the mbuf sanity checks and record its transaction */ mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); mcl_audit_mbuf(mca, m, TRUE, alloc); - if (mcltrace) + if (mcltrace) { mcache_buffer_log(mca, m, m_cache(class), &mb_start); + } - if (alloc) + if (alloc) { mca->mca_uflags |= MB_COMP_INUSE; - else + } else { mca->mca_uflags &= ~MB_COMP_INUSE; + } /* * Use the shadow mbuf in the audit structure if we are * freeing, since the contents of the actual mbuf has been * pattern-filled by the above call to mcl_audit_mbuf(). */ - if (!alloc && mclverify) + if (!alloc && mclverify) { ms = MCA_SAVED_MBUF_PTR(mca); + } /* Do the cluster sanity checks and record its transaction */ cl = ms->m_ext.ext_buf; clsp = slab_get(cl); VERIFY(ms->m_flags == M_EXT && cl != NULL); VERIFY(m_get_rfa(ms) != NULL && MBUF_IS_COMPOSITE(ms)); - if (class == MC_MBUF_CL) + if (class == MC_MBUF_CL) { VERIFY(clsp->sl_refcnt >= 1 && clsp->sl_refcnt <= NCLPG); - else + } else { VERIFY(clsp->sl_refcnt >= 1 && clsp->sl_refcnt <= NBCLPG); + } if (class == MC_MBUF_16KCL) { int k; @@ -2855,13 +2908,15 @@ mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) mca = mcl_audit_buf2mca(cl_class, cl); mcl_audit_cluster(mca, cl, cl_size, alloc, FALSE); - if (mcltrace) + if (mcltrace) { mcache_buffer_log(mca, cl, m_cache(class), &mb_start); + } - if (alloc) + if (alloc) { mca->mca_uflags |= MB_COMP_INUSE; - else + } else { mca->mca_uflags &= ~MB_COMP_INUSE; + } lck_mtx_unlock(mbuf_mlock); list = list->obj_next; @@ -2870,9 +2925,8 @@ mbuf_cslab_audit(void *arg, mcache_obj_t *list, boolean_t alloc) static void m_vm_error_stats(uint32_t *cnt, uint64_t *ts, uint64_t *size, - uint64_t alloc_size, kern_return_t error) + uint64_t alloc_size, kern_return_t error) { - *cnt = *cnt + 1; *ts = net_uptime(); if (size) { @@ -2922,17 +2976,18 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) /* Set if a buffer allocation needs allocation of multiple pages */ large_buffer = ((bufsize == m_maxsize(MC_16KCL)) && - PAGE_SIZE < M16KCLBYTES); + PAGE_SIZE < M16KCLBYTES); VERIFY(bufsize == m_maxsize(MC_BIGCL) || bufsize == m_maxsize(MC_16KCL)); VERIFY((bufsize == PAGE_SIZE) || (bufsize > PAGE_SIZE && bufsize == m_maxsize(MC_16KCL))); - if (bufsize == m_size(MC_BIGCL)) + if (bufsize == m_size(MC_BIGCL)) { class = MC_BIGCL; - else + } else { class = MC_16KCL; + } LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); @@ -2947,7 +3002,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) while (mb_clalloc_busy) { mb_clalloc_waiters++; (void) msleep(mb_clalloc_waitchan, mbuf_mlock, - (PZERO-1), "m_clalloc", NULL); + (PZERO - 1), "m_clalloc", NULL); LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); } @@ -2959,8 +3014,9 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) * to grow the pool asynchronously using the mbuf worker thread. */ i = m_howmany(num, bufsize); - if (i <= 0 || (wait & M_DONTWAIT)) + if (i <= 0 || (wait & M_DONTWAIT)) { goto out; + } lck_mtx_unlock(mbuf_mlock); @@ -3104,8 +3160,9 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) /* One for the entire 16KB */ sp = slab_get(m16kcl); - if (mclaudit != NULL) + if (mclaudit != NULL) { mcl_audit_init(m16kcl, &mca_list, NULL, 0, 1); + } VERIFY(sp->sl_refcnt == 0 && sp->sl_flags == 0); slab_init(sp, MC_16KCL, SLF_MAPPED, @@ -3136,8 +3193,9 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) } VERIFY(mca_list == NULL && con_list == NULL); - if (!mb_peak_newreport && mbuf_report_usage(class)) + if (!mb_peak_newreport && mbuf_report_usage(class)) { mb_peak_newreport = TRUE; + } /* We're done; let others enter */ mb_clalloc_busy = FALSE; @@ -3146,7 +3204,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) wakeup(mb_clalloc_waitchan); } - return (count); + return count; out: LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); @@ -3180,8 +3238,9 @@ out: m_region_expand(MC_BIGCL) = i; } } - if (m_infree(MC_BIGCL) >= num) - return (1); + if (m_infree(MC_BIGCL) >= num) { + return 1; + } } else { if (i > 0) { /* @@ -3193,10 +3252,11 @@ out: m_region_expand(MC_16KCL) = i; } } - if (m_infree(MC_16KCL) >= num) - return (1); + if (m_infree(MC_16KCL) >= num) { + return 1; + } } - return (0); + return 0; } /* @@ -3217,8 +3277,9 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) || PAGE_SIZE == m_maxsize(MC_16KCL)); - if (m_maxsize(class) >= PAGE_SIZE) - return(m_clalloc(num, wait, m_maxsize(class)) != 0); + if (m_maxsize(class) >= PAGE_SIZE) { + return m_clalloc(num, wait, m_maxsize(class)) != 0; + } /* * The rest of the function will allocate pages and will slice @@ -3228,10 +3289,11 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) numpages = (num * m_size(class) + PAGE_SIZE - 1) / PAGE_SIZE; /* Currently assume that pages are 4K or 16K */ - if (PAGE_SIZE == m_maxsize(MC_BIGCL)) + if (PAGE_SIZE == m_maxsize(MC_BIGCL)) { super_class = MC_BIGCL; - else + } else { super_class = MC_16KCL; + } i = m_clalloc(numpages, wait, m_maxsize(super_class)); @@ -3241,11 +3303,13 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) for (count = 0; count < numpages; count++) { /* respect totals, minlimit, maxlimit */ if (m_total(super_class) <= m_minlimit(super_class) || - m_total(class) >= m_maxlimit(class)) + m_total(class) >= m_maxlimit(class)) { break; + } - if ((o = slab_alloc(super_class, wait)) == NULL) + if ((o = slab_alloc(super_class, wait)) == NULL) { break; + } struct mbuf *m = (struct mbuf *)o; union mcluster *c = (union mcluster *)o; @@ -3280,15 +3344,17 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) VERIFY(m_total(super_class) >= 1); m_total(super_class)--; - if (super_class == MC_BIGCL) + if (super_class == MC_BIGCL) { mbstat.m_bigclusters = m_total(MC_BIGCL); + } m_total(class) += numobj; VERIFY(m_total(class) <= m_maxlimit(class)); m_infree(class) += numobj; - if (!mb_peak_newreport && mbuf_report_usage(class)) + if (!mb_peak_newreport && mbuf_report_usage(class)) { mb_peak_newreport = TRUE; + } i = numobj; if (class == MC_MBUF) { @@ -3337,14 +3403,15 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) /* Insert into the mbuf or 2k or 4k slab list */ slab_insert(sp, class); - if ((i = mb_waiters) > 0) + if ((i = mb_waiters) > 0) { mb_waiters = 0; + } if (i != 0) { mbwdog_logger("waking up all threads"); wakeup(mb_waitchan); } } - return (count != 0); + return count != 0; } /* @@ -3359,8 +3426,9 @@ freelist_init(mbuf_class_t class) VERIFY(m_total(class) == 0); VERIFY(m_minlimit(class) > 0); - while (m_total(class) < m_minlimit(class)) + while (m_total(class) < m_minlimit(class)) { (void) freelist_populate(class, m_minlimit(class), M_WAIT); + } VERIFY(m_total(class) >= m_minlimit(class)); } @@ -3375,24 +3443,28 @@ mbuf_cached_above(mbuf_class_t class, int wait) { switch (class) { case MC_MBUF: - if (wait & MCR_COMP) - return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL)) || - !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL))); + if (wait & MCR_COMP) { + return !mcache_bkt_isempty(m_cache(MC_MBUF_CL)) || + !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL)); + } break; case MC_CL: - if (wait & MCR_COMP) - return (!mcache_bkt_isempty(m_cache(MC_MBUF_CL))); + if (wait & MCR_COMP) { + return !mcache_bkt_isempty(m_cache(MC_MBUF_CL)); + } break; case MC_BIGCL: - if (wait & MCR_COMP) - return (!mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL))); + if (wait & MCR_COMP) { + return !mcache_bkt_isempty(m_cache(MC_MBUF_BIGCL)); + } break; case MC_16KCL: - if (wait & MCR_COMP) - return (!mcache_bkt_isempty(m_cache(MC_MBUF_16KCL))); + if (wait & MCR_COMP) { + return !mcache_bkt_isempty(m_cache(MC_MBUF_16KCL)); + } break; case MC_MBUF_CL: @@ -3405,7 +3477,7 @@ mbuf_cached_above(mbuf_class_t class, int wait) /* NOTREACHED */ } - return (!mcache_bkt_isempty(m_cache(class))); + return !mcache_bkt_isempty(m_cache(class)); } /* @@ -3425,7 +3497,7 @@ mbuf_steal(mbuf_class_t class, unsigned int num) case MC_CL: case MC_BIGCL: case MC_16KCL: - return (FALSE); + return FALSE; case MC_MBUF_CL: case MC_MBUF_BIGCL: @@ -3437,8 +3509,9 @@ mbuf_steal(mbuf_class_t class, unsigned int num) } /* And destroy them to get back the raw objects */ - if (top != NULL) + if (top != NULL) { (void) cslab_free(class, top, 1); + } break; default: @@ -3446,7 +3519,7 @@ mbuf_steal(mbuf_class_t class, unsigned int num) /* NOTREACHED */ } - return (tot == num); + return tot == num; } static void @@ -3476,21 +3549,24 @@ m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp) m_wantpurge(MC_MBUF)++; m_wantpurge(MC_BIGCL)++; m_wantpurge(MC_MBUF_BIGCL)++; - if (!comp) + if (!comp) { m_wantpurge(MC_MBUF_CL)++; + } break; case MC_BIGCL: m_wantpurge(MC_MBUF)++; m_wantpurge(MC_CL)++; m_wantpurge(MC_MBUF_CL)++; - if (!comp) + if (!comp) { m_wantpurge(MC_MBUF_BIGCL)++; + } break; case MC_16KCL: - if (!comp) + if (!comp) { m_wantpurge(MC_MBUF_16KCL)++; + } break; default: @@ -3513,8 +3589,9 @@ m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp) * purge and disable the per-CPU caches layer when * we don't have enough; it's the last resort. */ - if (!mbuf_steal(m, num)) + if (!mbuf_steal(m, num)) { bmap |= (1 << m); + } } } @@ -3552,8 +3629,9 @@ m_get_common(int wait, short type, int hdr) int mcflags = MSLEEPF(wait); /* Is this due to a non-blocking retry? If so, then try harder */ - if (mcflags & MCR_NOSLEEP) + if (mcflags & MCR_NOSLEEP) { mcflags |= MCR_TRYHARD; + } m = mcache_alloc(m_cache(MC_MBUF), mcflags); if (m != NULL) { @@ -3563,46 +3641,46 @@ m_get_common(int wait, short type, int hdr) #if CONFIG_MACF_NET if (hdr && mac_init_mbuf(m, wait) != 0) { m_free(m); - return (NULL); + return NULL; } #endif /* MAC_NET */ } - return (m); + return m; } /* * Space allocation routines; these are also available as macros * for critical paths. */ -#define _M_GET(wait, type) m_get_common(wait, type, 0) -#define _M_GETHDR(wait, type) m_get_common(wait, type, 1) -#define _M_RETRY(wait, type) _M_GET(wait, type) -#define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type) -#define _MGET(m, how, type) ((m) = _M_GET(how, type)) -#define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type)) +#define _M_GET(wait, type) m_get_common(wait, type, 0) +#define _M_GETHDR(wait, type) m_get_common(wait, type, 1) +#define _M_RETRY(wait, type) _M_GET(wait, type) +#define _M_RETRYHDR(wait, type) _M_GETHDR(wait, type) +#define _MGET(m, how, type) ((m) = _M_GET(how, type)) +#define _MGETHDR(m, how, type) ((m) = _M_GETHDR(how, type)) struct mbuf * m_get(int wait, int type) { - return (_M_GET(wait, type)); + return _M_GET(wait, type); } struct mbuf * m_gethdr(int wait, int type) { - return (_M_GETHDR(wait, type)); + return _M_GETHDR(wait, type); } struct mbuf * m_retry(int wait, int type) { - return (_M_RETRY(wait, type)); + return _M_RETRY(wait, type); } struct mbuf * m_retryhdr(int wait, int type) { - return (_M_RETRYHDR(wait, type)); + return _M_RETRYHDR(wait, type); } struct mbuf * @@ -3611,9 +3689,10 @@ m_getclr(int wait, int type) struct mbuf *m; _MGET(m, wait, type); - if (m != NULL) + if (m != NULL) { bzero(MTOD(m, caddr_t), MLEN); - return (m); + } + return m; } static int @@ -3638,11 +3717,11 @@ m_free_paired(struct mbuf *m) } while (!OSCompareAndSwap16(oprefcnt, prefcnt, addr)); if (prefcnt > 1) { - return (1); + return 1; } else if (prefcnt == 1) { (*(m_get_ext_free(m)))(m->m_ext.ext_buf, m->m_ext.ext_size, m_get_ext_arg(m)); - return (1); + return 1; } else if (prefcnt == 0) { VERIFY(MBUF_IS_PAIRED(m)); @@ -3685,7 +3764,7 @@ m_free_paired(struct mbuf *m) * count on the external cluster held for the paired mbuf should * now be dropped. */ - return (0); + return 0; } struct mbuf * @@ -3693,8 +3772,9 @@ m_free(struct mbuf *m) { struct mbuf *n = m->m_next; - if (m->m_type == MT_FREE) + if (m->m_type == MT_FREE) { panic("m_free: freeing an already freed mbuf"); + } if (m->m_flags & M_PKTHDR) { /* Check for scratch area overflow */ @@ -3710,8 +3790,9 @@ m_free(struct mbuf *m) u_int32_t composite; m_ext_free_func_t m_free_func; - if (MBUF_IS_PAIRED(m) && m_free_paired(m)) - return (n); + if (MBUF_IS_PAIRED(m) && m_free_paired(m)) { + return n; + } refcnt = m_decref(m); composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); @@ -3755,7 +3836,7 @@ m_free(struct mbuf *m) VERIFY(m_free_func == m_16kfree); mcache_free(m_cache(MC_MBUF_16KCL), m); } - return (n); + return n; } } @@ -3770,7 +3851,7 @@ m_free(struct mbuf *m) mcache_free(m_cache(MC_MBUF), m); - return (n); + return n; } __private_extern__ struct mbuf * @@ -3786,8 +3867,9 @@ m_clattach(struct mbuf *m, int type, caddr_t extbuf, * allocate a new one or free any existing below. */ if ((m != NULL && MBUF_IS_PAIRED(m)) || - (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) - return (NULL); + (m == NULL && (m = _M_GETHDR(wait, type)) == NULL)) { + return NULL; + } if (m->m_flags & M_EXT) { u_int16_t refcnt; @@ -3839,15 +3921,16 @@ m_clattach(struct mbuf *m, int type, caddr_t extbuf, * Allocate a new mbuf, since we didn't divorce * the composite mbuf + cluster pair above. */ - if ((m = _M_GETHDR(wait, type)) == NULL) - return (NULL); + if ((m = _M_GETHDR(wait, type)) == NULL) { + return NULL; + } } } if (rfa == NULL && (rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) { m_free(m); - return (NULL); + return NULL; } if (!pair) { @@ -3858,7 +3941,7 @@ m_clattach(struct mbuf *m, int type, caddr_t extbuf, 1, 1, 1, EXTF_PAIRED, 0, m); } - return (m); + return m; } /* @@ -3873,8 +3956,9 @@ m_getcl(int wait, int type, int flags) int hdr = (flags & M_PKTHDR); /* Is this due to a non-blocking retry? If so, then try harder */ - if (mcflags & MCR_NOSLEEP) + if (mcflags & MCR_NOSLEEP) { mcflags |= MCR_TRYHARD; + } m = mcache_alloc(m_cache(MC_MBUF_CL), mcflags); if (m != NULL) { @@ -3899,11 +3983,11 @@ m_getcl(int wait, int type, int flags) #if CONFIG_MACF_NET if (hdr && mac_init_mbuf(m, wait) != 0) { m_freem(m); - return (NULL); + return NULL; } #endif /* MAC_NET */ } - return (m); + return m; } /* m_mclget() add an mbuf cluster to a normal mbuf */ @@ -3912,8 +3996,9 @@ m_mclget(struct mbuf *m, int wait) { struct ext_ref *rfa; - if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) - return (m); + if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) { + return m; + } m->m_ext.ext_buf = m_mclalloc(wait); if (m->m_ext.ext_buf != NULL) { @@ -3921,7 +4006,7 @@ m_mclget(struct mbuf *m, int wait) } else { mcache_free(ref_cache, rfa); } - return (m); + return m; } /* Allocate an mbuf cluster */ @@ -3931,10 +4016,11 @@ m_mclalloc(int wait) int mcflags = MSLEEPF(wait); /* Is this due to a non-blocking retry? If so, then try harder */ - if (mcflags & MCR_NOSLEEP) + if (mcflags & MCR_NOSLEEP) { mcflags |= MCR_TRYHARD; + } - return (mcache_alloc(m_cache(MC_CL), mcflags)); + return mcache_alloc(m_cache(MC_CL), mcflags); } /* Free an mbuf cluster */ @@ -3951,12 +4037,13 @@ m_mclfree(caddr_t p) int m_mclhasreference(struct mbuf *m) { - if (!(m->m_flags & M_EXT)) - return (0); + if (!(m->m_flags & M_EXT)) { + return 0; + } ASSERT(m_get_rfa(m) != NULL); - return ((MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0); + return (MEXT_FLAGS(m) & EXTF_READONLY) ? 1 : 0; } __private_extern__ caddr_t @@ -3965,10 +4052,11 @@ m_bigalloc(int wait) int mcflags = MSLEEPF(wait); /* Is this due to a non-blocking retry? If so, then try harder */ - if (mcflags & MCR_NOSLEEP) + if (mcflags & MCR_NOSLEEP) { mcflags |= MCR_TRYHARD; + } - return (mcache_alloc(m_cache(MC_BIGCL), mcflags)); + return mcache_alloc(m_cache(MC_BIGCL), mcflags); } __private_extern__ void @@ -3983,8 +4071,9 @@ m_mbigget(struct mbuf *m, int wait) { struct ext_ref *rfa; - if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) - return (m); + if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) { + return m; + } m->m_ext.ext_buf = m_bigalloc(wait); if (m->m_ext.ext_buf != NULL) { @@ -3992,7 +4081,7 @@ m_mbigget(struct mbuf *m, int wait) } else { mcache_free(ref_cache, rfa); } - return (m); + return m; } __private_extern__ caddr_t @@ -4001,10 +4090,11 @@ m_16kalloc(int wait) int mcflags = MSLEEPF(wait); /* Is this due to a non-blocking retry? If so, then try harder */ - if (mcflags & MCR_NOSLEEP) + if (mcflags & MCR_NOSLEEP) { mcflags |= MCR_TRYHARD; + } - return (mcache_alloc(m_cache(MC_16KCL), mcflags)); + return mcache_alloc(m_cache(MC_16KCL), mcflags); } __private_extern__ void @@ -4019,8 +4109,9 @@ m_m16kget(struct mbuf *m, int wait) { struct ext_ref *rfa; - if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) - return (m); + if ((rfa = mcache_alloc(ref_cache, MSLEEPF(wait))) == NULL) { + return m; + } m->m_ext.ext_buf = m_16kalloc(wait); if (m->m_ext.ext_buf != NULL) { @@ -4028,7 +4119,7 @@ m_m16kget(struct mbuf *m, int wait) } else { mcache_free(ref_cache, rfa); } - return (m); + return m; } /* @@ -4049,14 +4140,15 @@ m_copy_pkthdr(struct mbuf *to, struct mbuf *from) /* We will be taking over the tags of 'to' */ m_tag_delete_chain(to, NULL); } - to->m_pkthdr = from->m_pkthdr; /* especially tags */ - m_classifier_init(from, 0); /* purge classifier info */ - m_tag_init(from, 1); /* purge all tags from src */ - m_scratch_init(from); /* clear src scratch area */ + to->m_pkthdr = from->m_pkthdr; /* especially tags */ + m_classifier_init(from, 0); /* purge classifier info */ + m_tag_init(from, 1); /* purge all tags from src */ + m_scratch_init(from); /* clear src scratch area */ to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); - if ((to->m_flags & M_EXT) == 0) + if ((to->m_flags & M_EXT) == 0) { to->m_data = to->m_pktdat; - m_redzone_init(to); /* setup red zone on dst */ + } + m_redzone_init(to); /* setup red zone on dst */ } /* @@ -4079,12 +4171,13 @@ m_dup_pkthdr(struct mbuf *to, struct mbuf *from, int how) m_tag_delete_chain(to, NULL); } to->m_flags = (from->m_flags & M_COPYFLAGS) | (to->m_flags & M_EXT); - if ((to->m_flags & M_EXT) == 0) + if ((to->m_flags & M_EXT) == 0) { to->m_data = to->m_pktdat; + } to->m_pkthdr = from->m_pkthdr; - m_redzone_init(to); /* setup red zone on dst */ - m_tag_init(to, 0); /* preserve dst static tags */ - return (m_tag_copy_chain(to, from, how)); + m_redzone_init(to); /* setup red zone on dst */ + m_tag_init(to, 0); /* preserve dst static tags */ + return m_tag_copy_chain(to, from, how); } void @@ -4093,7 +4186,7 @@ m_copy_pftag(struct mbuf *to, struct mbuf *from) memcpy(m_pftag(to), m_pftag(from), sizeof(struct pf_mtag)); #if PF_ECN m_pftag(to)->pftag_hdr = NULL; - m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET|PF_TAG_HDR_INET6); + m_pftag(to)->pftag_flags &= ~(PF_TAG_HDR_INET | PF_TAG_HDR_INET6); #endif /* PF_ECN */ } @@ -4105,17 +4198,20 @@ m_classifier_init(struct mbuf *m, uint32_t pktf_mask) m->m_pkthdr.pkt_proto = 0; m->m_pkthdr.pkt_flowsrc = 0; m->m_pkthdr.pkt_flowid = 0; - m->m_pkthdr.pkt_flags &= pktf_mask; /* caller-defined mask */ + m->m_pkthdr.pkt_flags &= pktf_mask; /* caller-defined mask */ /* preserve service class and interface info for loopback packets */ - if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) + if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP)) { (void) m_set_service_class(m, MBUF_SC_BE); - if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) + } + if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) { m->m_pkthdr.pkt_ifainfo = 0; + } /* * Preserve timestamp if requested */ - if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID)) + if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID)) { m->m_pkthdr.pkt_timestamp = 0; + } } void @@ -4174,16 +4270,18 @@ m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, * overrides MCR_SLEEP, since this thread will not go to sleep * if we can't get all the buffers. */ - if (!wantall || (mcflags & MCR_NOSLEEP)) + if (!wantall || (mcflags & MCR_NOSLEEP)) { mcflags |= MCR_TRYHARD; + } /* Allocate the composite mbuf + cluster elements from the cache */ - if (bufsize == m_maxsize(MC_CL)) + if (bufsize == m_maxsize(MC_CL)) { cp = m_cache(MC_MBUF_CL); - else if (bufsize == m_maxsize(MC_BIGCL)) + } else if (bufsize == m_maxsize(MC_BIGCL)) { cp = m_cache(MC_MBUF_BIGCL); - else + } else { cp = m_cache(MC_MBUF_16KCL); + } needed = mcache_alloc_ext(cp, &mp_list, needed, mcflags); for (pnum = 0; pnum < needed; pnum++) { @@ -4219,14 +4317,16 @@ m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, } *np = m; - if (num_with_pkthdrs > 0) + if (num_with_pkthdrs > 0) { np = &m->m_nextpkt; - else + } else { np = &m->m_next; + } } ASSERT(pnum != *num_needed || mp_list == NULL); - if (mp_list != NULL) + if (mp_list != NULL) { mcache_free_ext(cp, mp_list); + } if (pnum > 0) { mtype_stat_add(MT_DATA, pnum); @@ -4234,19 +4334,20 @@ m_getpackets_internal(unsigned int *num_needed, int num_with_pkthdrs, } if (wantall && (pnum != *num_needed)) { - if (top != NULL) + if (top != NULL) { m_freem_list(top); - return (NULL); + } + return NULL; } if (pnum > *num_needed) { printf("%s: File a radar related to . \ needed = %u, pnum = %u, num_needed = %u \n", - __func__, needed, pnum, *num_needed); + __func__, needed, pnum, *num_needed); } *num_needed = pnum; - return (top); + return top; } /* @@ -4274,8 +4375,9 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, mcache_obj_t *mp_list = NULL, *rmp_list = NULL; mcache_t *cp = NULL, *rcp = NULL; - if (*numlist == 0) - return (NULL); + if (*numlist == 0) { + return NULL; + } top = NULL; np = ⊤ @@ -4285,10 +4387,11 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, bufsize = packetlen; } else if (packetlen > m_maxsize(MC_CL)) { /* Use 4KB if jumbo cluster pool isn't available */ - if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) + if (packetlen <= m_maxsize(MC_BIGCL) || njcl == 0) { bufsize = m_maxsize(MC_BIGCL); - else + } else { bufsize = m_maxsize(MC_16KCL); + } } else { bufsize = m_maxsize(MC_CL); } @@ -4297,7 +4400,7 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, (wantsize == m_maxsize(MC_16KCL) && njcl > 0)) { bufsize = wantsize; } else { - return (NULL); + return NULL; } if (bufsize <= MHLEN) { @@ -4320,7 +4423,7 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, if (maxsegments != NULL) { if (*maxsegments && nsegs > *maxsegments) { *maxsegments = nsegs; - return (NULL); + return NULL; } *maxsegments = nsegs; } @@ -4331,8 +4434,9 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, * overrides MCR_SLEEP, since this thread will not go to sleep * if we can't get all the buffers. */ - if (!wantall || (mcflags & MCR_NOSLEEP)) + if (!wantall || (mcflags & MCR_NOSLEEP)) { mcflags |= MCR_TRYHARD; + } /* * Simple case where all elements in the lists/chains are mbufs. @@ -4355,8 +4459,9 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, * trim the number down (if it's odd) in order to avoid * creating a partial segment chain. */ - if (bufsize > MHLEN && (needed & 0x1)) + if (bufsize > MHLEN && (needed & 0x1)) { needed--; + } while (num < needed) { struct mbuf *m; @@ -4394,8 +4499,9 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, num /= nsegs; /* We've got them all; return to caller */ - if (num == *numlist) - return (top); + if (num == *numlist) { + return top; + } goto fail; } @@ -4426,12 +4532,13 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, * in the chain use the same cluster size; use the * smaller of the cluster sizes. */ - if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) + if (njcl > 0 && resid > m_maxsize(MC_BIGCL)) { r_bufsize = m_maxsize(MC_16KCL); - else if (resid > m_maxsize(MC_CL)) + } else if (resid > m_maxsize(MC_CL)) { r_bufsize = m_maxsize(MC_BIGCL); - else + } else { r_bufsize = m_maxsize(MC_CL); + } } else { /* Use the same cluster size as the other segments */ resid = 0; @@ -4446,16 +4553,18 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, * elements that can be allocated so that we know how many * segment chains we can afford to create. */ - if (r_bufsize <= m_maxsize(MC_CL)) + if (r_bufsize <= m_maxsize(MC_CL)) { rcp = m_cache(MC_MBUF_CL); - else if (r_bufsize <= m_maxsize(MC_BIGCL)) + } else if (r_bufsize <= m_maxsize(MC_BIGCL)) { rcp = m_cache(MC_MBUF_BIGCL); - else + } else { rcp = m_cache(MC_MBUF_16KCL); + } needed = mcache_alloc_ext(rcp, &rmp_list, *numlist, mcflags); - if (needed == 0) + if (needed == 0) { goto fail; + } /* This is temporarily reduced for calculation */ ASSERT(nsegs > 1); @@ -4466,18 +4575,20 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, * Attempt to allocate the rest of the composite mbuf + cluster * elements for the number of segment chains that we need. */ - if (bufsize <= m_maxsize(MC_CL)) + if (bufsize <= m_maxsize(MC_CL)) { cp = m_cache(MC_MBUF_CL); - else if (bufsize <= m_maxsize(MC_BIGCL)) + } else if (bufsize <= m_maxsize(MC_BIGCL)) { cp = m_cache(MC_MBUF_BIGCL); - else + } else { cp = m_cache(MC_MBUF_16KCL); + } needed = mcache_alloc_ext(cp, &mp_list, needed * nsegs, mcflags); /* Round it down to avoid creating a partial segment chain */ needed = (needed / nsegs) * nsegs; - if (needed == 0) + if (needed == 0) { goto fail; + } if (resid > 0) { /* @@ -4522,8 +4633,9 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, flag = MEXT_FLAGS(m); pkthdr = (nsegs == 1 || (num % nsegs) == 1); - if (pkthdr) + if (pkthdr) { first = m; + } MBUF_INIT(m, pkthdr, MT_DATA); if (m_free_func == m_16kfree) { MBUF_16KCL_INIT(m, cl, rfa, 1, flag); @@ -4541,13 +4653,15 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, #endif /* MAC_NET */ *np = m; - if ((num % nsegs) == 0) + if ((num % nsegs) == 0) { np = &first->m_nextpkt; - else + } else { np = &m->m_next; + } - if (num == needed) + if (num == needed) { break; + } } if (num > 0) { @@ -4560,21 +4674,23 @@ m_allocpacket_internal(unsigned int *numlist, size_t packetlen, /* We've got them all; return to caller */ if (num == *numlist) { ASSERT(mp_list == NULL && rmp_list == NULL); - return (top); + return top; } fail: /* Free up what's left of the above */ - if (mp_list != NULL) + if (mp_list != NULL) { mcache_free_ext(cp, mp_list); - if (rmp_list != NULL) + } + if (rmp_list != NULL) { mcache_free_ext(rcp, rmp_list); + } if (wantall && top != NULL) { m_freem(top); - return (NULL); + return NULL; } *numlist = num; - return (top); + return top; } /* @@ -4586,8 +4702,8 @@ m_getpacket_how(int wait) { unsigned int num_needed = 1; - return (m_getpackets_internal(&num_needed, 1, wait, 1, - m_maxsize(MC_CL))); + return m_getpackets_internal(&num_needed, 1, wait, 1, + m_maxsize(MC_CL)); } /* @@ -4599,8 +4715,8 @@ m_getpacket(void) { unsigned int num_needed = 1; - return (m_getpackets_internal(&num_needed, 1, M_WAIT, 1, - m_maxsize(MC_CL))); + return m_getpackets_internal(&num_needed, 1, M_WAIT, 1, + m_maxsize(MC_CL)); } /* @@ -4615,8 +4731,8 @@ m_getpackets(int num_needed, int num_with_pkthdrs, int how) { unsigned int n = num_needed; - return (m_getpackets_internal(&n, num_with_pkthdrs, how, 0, - m_maxsize(MC_CL))); + return m_getpackets_internal(&n, num_with_pkthdrs, how, 0, + m_maxsize(MC_CL)); } /* @@ -4634,14 +4750,15 @@ m_getpackethdrs(int num_needed, int how) while (num_needed--) { m = _M_RETRYHDR(how, MT_DATA); - if (m == NULL) + if (m == NULL) { break; + } *np = m; np = &m->m_nextpkt; } - return (top); + return top; } /* @@ -4676,8 +4793,9 @@ m_freem_list(struct mbuf *m) u_int16_t refcnt; m_ext_free_func_t m_free_func; - if (m->m_type == MT_FREE) + if (m->m_type == MT_FREE) { panic("m_free: freeing an already freed mbuf"); + } if (m->m_flags & M_PKTHDR) { /* Check for scratch area overflow */ @@ -4728,16 +4846,17 @@ m_freem_list(struct mbuf *m) * Amortize the costs of atomic operations * by doing them at the end, if possible. */ - if (m->m_type == MT_DATA) + if (m->m_type == MT_DATA) { mt_data++; - else if (m->m_type == MT_HEADER) + } else if (m->m_type == MT_HEADER) { mt_header++; - else if (m->m_type == MT_SONAME) + } else if (m->m_type == MT_SONAME) { mt_soname++; - else if (m->m_type == MT_TAG) + } else if (m->m_type == MT_TAG) { mt_tag++; - else + } else { mtype_stat_dec(m->m_type); + } m->m_type = MT_FREE; m->m_flags = M_EXT; @@ -4767,16 +4886,17 @@ simple_free: * Amortize the costs of atomic operations * by doing them at the end, if possible. */ - if (m->m_type == MT_DATA) + if (m->m_type == MT_DATA) { mt_data++; - else if (m->m_type == MT_HEADER) + } else if (m->m_type == MT_HEADER) { mt_header++; - else if (m->m_type == MT_SONAME) + } else if (m->m_type == MT_SONAME) { mt_soname++; - else if (m->m_type == MT_TAG) + } else if (m->m_type == MT_TAG) { mt_tag++; - else if (m->m_type != MT_FREE) + } else if (m->m_type != MT_FREE) { mtype_stat_dec(m->m_type); + } m->m_type = MT_FREE; m->m_flags = m->m_len = 0; @@ -4791,42 +4911,56 @@ simple_free: m = nextpkt; } - if (mt_free > 0) + if (mt_free > 0) { mtype_stat_add(MT_FREE, mt_free); - if (mt_data > 0) + } + if (mt_data > 0) { mtype_stat_sub(MT_DATA, mt_data); - if (mt_header > 0) + } + if (mt_header > 0) { mtype_stat_sub(MT_HEADER, mt_header); - if (mt_soname > 0) + } + if (mt_soname > 0) { mtype_stat_sub(MT_SONAME, mt_soname); - if (mt_tag > 0) + } + if (mt_tag > 0) { mtype_stat_sub(MT_TAG, mt_tag); + } - if (mp_list != NULL) + if (mp_list != NULL) { mcache_free_ext(m_cache(MC_MBUF), mp_list); - if (mcl_list != NULL) + } + if (mcl_list != NULL) { mcache_free_ext(m_cache(MC_CL), mcl_list); - if (mbc_list != NULL) + } + if (mbc_list != NULL) { mcache_free_ext(m_cache(MC_BIGCL), mbc_list); - if (m16k_list != NULL) + } + if (m16k_list != NULL) { mcache_free_ext(m_cache(MC_16KCL), m16k_list); - if (m_mcl_list != NULL) + } + if (m_mcl_list != NULL) { mcache_free_ext(m_cache(MC_MBUF_CL), m_mcl_list); - if (m_mbc_list != NULL) + } + if (m_mbc_list != NULL) { mcache_free_ext(m_cache(MC_MBUF_BIGCL), m_mbc_list); - if (m_m16k_list != NULL) + } + if (m_m16k_list != NULL) { mcache_free_ext(m_cache(MC_MBUF_16KCL), m_m16k_list); - if (ref_list != NULL) + } + if (ref_list != NULL) { mcache_free_ext(ref_cache, ref_list); + } - return (pktcount); + return pktcount; } void m_freem(struct mbuf *m) { - while (m != NULL) + while (m != NULL) { m = m_free(m); + } } /* @@ -4852,7 +4986,7 @@ m_align(struct mbuf *m, int len) VERIFY(len >= 0); VERIFY(len <= M_SIZE(m)); adjust = M_SIZE(m) - len; - m->m_data += adjust &~ (sizeof(long) - 1); + m->m_data += adjust & ~(sizeof(long) - 1); } /* @@ -4867,7 +5001,7 @@ m_prepend(struct mbuf *m, int len, int how) _MGET(mn, how, m->m_type); if (mn == NULL) { m_freem(m); - return (NULL); + return NULL; } if (m->m_flags & M_PKTHDR) { M_COPY_PKTHDR(mn, m); @@ -4883,7 +5017,7 @@ m_prepend(struct mbuf *m, int len, int how) M_ALIGN(m, len); } m->m_len = len; - return (m); + return m; } /* @@ -4900,9 +5034,10 @@ m_prepend_2(struct mbuf *m, int len, int how, int align) } else { m = m_prepend(m, len, how); } - if ((m) && (m->m_flags & M_PKTHDR)) + if ((m) && (m->m_flags & M_PKTHDR)) { m->m_pkthdr.len += len; - return (m); + } + return m; } /* @@ -4920,8 +5055,9 @@ m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) struct mbuf *top; int copyhdr = 0; - if (off < 0 || len < 0) + if (off < 0 || len < 0) { panic("m_copym: invalid offset %d or len %d", off, len); + } VERIFY((mode != M_COPYM_MUST_COPY_HDR && mode != M_COPYM_MUST_MOVE_HDR) || (m->m_flags & M_PKTHDR)); @@ -4933,8 +5069,9 @@ m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) } while (off >= m->m_len) { - if (m->m_next == NULL) + if (m->m_next == NULL) { panic("m_copym: invalid mbuf chain"); + } off -= m->m_len; m = m->m_next; } @@ -4943,19 +5080,22 @@ m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) while (len > 0) { if (m == NULL) { - if (len != M_COPYALL) + if (len != M_COPYALL) { panic("m_copym: len != M_COPYALL"); + } break; } - if (copyhdr) + if (copyhdr) { n = _M_RETRYHDR(wait, m->m_type); - else + } else { n = _M_RETRY(wait, m->m_type); + } *np = n; - if (n == NULL) + if (n == NULL) { goto nospace; + } if (copyhdr != 0) { if ((mode == M_COPYM_MOVE_HDR) || @@ -4963,13 +5103,15 @@ m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) M_COPY_PKTHDR(n, mhdr); } else if ((mode == M_COPYM_COPY_HDR) || (mode == M_COPYM_MUST_COPY_HDR)) { - if (m_dup_pkthdr(n, mhdr, wait) == 0) + if (m_dup_pkthdr(n, mhdr, wait) == 0) { goto nospace; + } } - if (len == M_COPYALL) + if (len == M_COPYALL) { n->m_pkthdr.len -= off0; - else + } else { n->m_pkthdr.len = len; + } copyhdr = 0; /* * There is data to copy from the packet header mbuf @@ -4990,41 +5132,45 @@ m_copym_mode(struct mbuf *m, int off0, int len, int wait, uint32_t mode) /* * Limit to the capacity of the destination */ - if (n->m_flags & M_PKTHDR) + if (n->m_flags & M_PKTHDR) { n->m_len = MIN(n->m_len, MHLEN); - else + } else { n->m_len = MIN(n->m_len, MLEN); + } - if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) + if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) { panic("%s n %p copy overflow", - __func__, n); + __func__, n); + } - bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t), + bcopy(MTOD(m, caddr_t) + off, MTOD(n, caddr_t), (unsigned)n->m_len); } - if (len != M_COPYALL) + if (len != M_COPYALL) { len -= n->m_len; + } off = 0; m = m->m_next; np = &n->m_next; } - if (top == NULL) + if (top == NULL) { MCFail++; + } - return (top); + return top; nospace: m_freem(top); MCFail++; - return (NULL); + return NULL; } struct mbuf * m_copym(struct mbuf *m, int off0, int len, int wait) { - return (m_copym_mode(m, off0, len, wait, M_COPYM_MOVE_HDR)); + return m_copym_mode(m, off0, len, wait, M_COPYM_MOVE_HDR); } /* @@ -5046,8 +5192,9 @@ m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, mcache_obj_t *list = NULL; int needed = 0; - if (off == 0 && (m->m_flags & M_PKTHDR)) + if (off == 0 && (m->m_flags & M_PKTHDR)) { copyhdr = 1; + } if (m_lastm != NULL && *m_lastm != NULL) { m = *m_lastm; @@ -5074,12 +5221,14 @@ m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, * MCR_TRYHARD so that we may reclaim buffers from other places * before giving up. */ - if (mcflags & MCR_NOSLEEP) + if (mcflags & MCR_NOSLEEP) { mcflags |= MCR_TRYHARD; + } if (mcache_alloc_ext(m_cache(MC_MBUF), &list, needed, - mcflags) != needed) + mcflags) != needed) { goto nospace; + } needed = 0; while (len > 0) { @@ -5113,8 +5262,9 @@ m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, M_COPY_PKTHDR(n, m); } else if ((mode == M_COPYM_COPY_HDR) || (mode == M_COPYM_MUST_COPY_HDR)) { - if (m_dup_pkthdr(n, m, wait) == 0) + if (m_dup_pkthdr(n, m, wait) == 0) { goto nospace; + } } n->m_pkthdr.len = len; copyhdr = 0; @@ -5127,11 +5277,12 @@ m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, n->m_data = m->m_data + off; n->m_flags |= M_EXT; } else { - if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) + if (MTOD(n, char *) + n->m_len > ((char *)n) + MSIZE) { panic("%s n %p copy overflow", - __func__, n); + __func__, n); + } - bcopy(MTOD(m, caddr_t)+off, MTOD(n, caddr_t), + bcopy(MTOD(m, caddr_t) + off, MTOD(n, caddr_t), (unsigned)n->m_len); } len -= n->m_len; @@ -5158,15 +5309,17 @@ m_copym_with_hdrs(struct mbuf *m0, int off0, int len0, int wait, mtype_stat_sub(MT_FREE, needed + 1); ASSERT(list == NULL); - return (top); + return top; nospace: - if (list != NULL) + if (list != NULL) { mcache_free_ext(m_cache(MC_MBUF), list); - if (top != NULL) + } + if (top != NULL) { m_freem(top); + } MCFail++; - return (NULL); + return NULL; } /* @@ -5192,8 +5345,9 @@ m_copydata(struct mbuf *m, int off, int len, void *vp) __func__, m0, off0, len0); /* NOTREACHED */ } - if (off < m->m_len) + if (off < m->m_len) { break; + } off -= m->m_len; m = m->m_next; } @@ -5219,8 +5373,9 @@ m_copydata(struct mbuf *m, int off, int len, void *vp) void m_cat(struct mbuf *m, struct mbuf *n) { - while (m->m_next) + while (m->m_next) { m = m->m_next; + } while (n) { if ((m->m_flags & M_EXT) || m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) { @@ -5243,8 +5398,9 @@ m_adj(struct mbuf *mp, int req_len) struct mbuf *m; int count; - if ((m = mp) == NULL) + if ((m = mp) == NULL) { return; + } if (len >= 0) { /* * Trim from head. @@ -5261,8 +5417,9 @@ m_adj(struct mbuf *mp, int req_len) } } m = mp; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { m->m_pkthdr.len -= (req_len - len); + } } else { /* * Trim from tail. Scan the mbuf chain, @@ -5275,28 +5432,32 @@ m_adj(struct mbuf *mp, int req_len) count = 0; for (;;) { count += m->m_len; - if (m->m_next == (struct mbuf *)0) + if (m->m_next == (struct mbuf *)0) { break; + } m = m->m_next; } if (m->m_len >= len) { m->m_len -= len; m = mp; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { m->m_pkthdr.len -= len; + } return; } count -= len; - if (count < 0) + if (count < 0) { count = 0; + } /* * Correct length for chain is "count". * Find the mbuf with last data, adjust its length, * and toss data from remaining mbufs on chain. */ m = mp; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { m->m_pkthdr.len = count; + } for (; m; m = m->m_next) { if (m->m_len >= count) { m->m_len = count; @@ -5304,8 +5465,9 @@ m_adj(struct mbuf *mp, int req_len) } count -= m->m_len; } - while ((m = m->m_next)) + while ((m = m->m_next)) { m->m_len = 0; + } } } @@ -5328,7 +5490,7 @@ m_pullup(struct mbuf *n, int len) /* check invalid arguments */ if (n == NULL) { - panic("%s: n == NULL", __func__); + panic("%s: n == NULL", __func__); } if (len < 0) { os_log_info(OS_LOG_DEFAULT, "%s: failed negative len %d", @@ -5354,17 +5516,20 @@ m_pullup(struct mbuf *n, int len) */ if ((n->m_flags & M_EXT) == 0 && len < &n->m_dat[MLEN] - n->m_data && n->m_next != NULL) { - if (n->m_len >= len) - return (n); + if (n->m_len >= len) { + return n; + } m = n; n = n->m_next; len -= m->m_len; } else { - if (len > MHLEN) + if (len > MHLEN) { goto bad; + } _MGET(m, M_DONTWAIT, n->m_type); - if (m == 0) + if (m == 0) { goto bad; + } m->m_len = 0; if (n->m_flags & M_PKTHDR) { M_COPY_PKTHDR(m, n); @@ -5380,21 +5545,22 @@ m_pullup(struct mbuf *n, int len) m->m_len += count; n->m_len -= count; space -= count; - if (n->m_len != 0) + if (n->m_len != 0) { n->m_data += count; - else + } else { n = m_free(n); + } } while (len > 0 && n != NULL); if (len > 0) { (void) m_free(m); goto bad; } m->m_next = n; - return (m); + return m; bad: m_freem(n); MPFail++; - return (0); + return 0; } /* @@ -5410,11 +5576,13 @@ m_copyup(struct mbuf *n, int len, int dstoff) struct mbuf *m; int count, space; - if (len > (MHLEN - dstoff)) + if (len > (MHLEN - dstoff)) { goto bad; + } MGET(m, M_DONTWAIT, n->m_type); - if (m == NULL) + if (m == NULL) { goto bad; + } m->m_len = 0; if (n->m_flags & M_PKTHDR) { m_copy_pkthdr(m, n); @@ -5430,21 +5598,22 @@ m_copyup(struct mbuf *n, int len, int dstoff) m->m_len += count; n->m_len -= count; space -= count; - if (n->m_len) + if (n->m_len) { n->m_data += count; - else + } else { n = m_free(n); + } } while (len > 0 && n); if (len > 0) { (void) m_free(m); goto bad; } m->m_next = n; - return (m); + return m; bad: m_freem(n); MSFail++; - return (NULL); + return NULL; } /* @@ -5455,7 +5624,7 @@ bad: struct mbuf * m_split(struct mbuf *m0, int len0, int wait) { - return (m_split0(m0, len0, wait, 1)); + return m_split0(m0, len0, wait, 1); } static struct mbuf * @@ -5468,10 +5637,12 @@ m_split0(struct mbuf *m0, int len0, int wait, int copyhdr) * First iterate to the mbuf which contains the first byte of * data at offset len0 */ - for (m = m0; m && len > m->m_len; m = m->m_next) + for (m = m0; m && len > m->m_len; m = m->m_next) { len -= m->m_len; - if (m == NULL) - return (NULL); + } + if (m == NULL) { + return NULL; + } /* * len effectively is now the offset in the current * mbuf where we have to perform split. @@ -5488,18 +5659,21 @@ m_split0(struct mbuf *m0, int len0, int wait, int copyhdr) */ if (copyhdr && (m0->m_flags & M_PKTHDR) && remain == 0) { _MGETHDR(n, wait, m0->m_type); - if (n == NULL) - return (NULL); + if (n == NULL) { + return NULL; + } n->m_next = m->m_next; m->m_next = NULL; n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; n->m_pkthdr.len = m0->m_pkthdr.len - len0; m0->m_pkthdr.len = len0; - return (n); - } if (copyhdr && (m0->m_flags & M_PKTHDR)) { + return n; + } + if (copyhdr && (m0->m_flags & M_PKTHDR)) { _MGETHDR(n, wait, m0->m_type); - if (n == NULL) - return (NULL); + if (n == NULL) { + return NULL; + } n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif; n->m_pkthdr.len = m0->m_pkthdr.len - len0; m0->m_pkthdr.len = len0; @@ -5510,27 +5684,31 @@ m_split0(struct mbuf *m0, int len0, int wait, int copyhdr) * of head chain and first mbuf of current chain * pointing to different data offsets */ - if (m->m_flags & M_EXT) + if (m->m_flags & M_EXT) { goto extpacket; + } if (remain > MHLEN) { /* m can't be the lead packet */ MH_ALIGN(n, 0); n->m_next = m_split(m, len, wait); if (n->m_next == NULL) { (void) m_free(n); - return (NULL); - } else - return (n); - } else + return NULL; + } else { + return n; + } + } else { MH_ALIGN(n, remain); + } } else if (remain == 0) { n = m->m_next; m->m_next = NULL; - return (n); + return n; } else { _MGET(n, wait, m->m_type); - if (n == NULL) - return (NULL); + if (n == NULL) { + return NULL; + } if ((m->m_flags & M_EXT) == 0) { VERIFY(remain <= MLEN); @@ -5550,7 +5728,7 @@ extpacket: m->m_len = len; n->m_next = m->m_next; m->m_next = NULL; - return (n); + return n; } /* @@ -5573,12 +5751,13 @@ m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, * If 'off' is non-zero, packet is trailer-encapsulated, * so we have to skip the type and length fields. */ - cp += off + 2 * sizeof (u_int16_t); - totlen -= 2 * sizeof (u_int16_t); + cp += off + 2 * sizeof(u_int16_t); + totlen -= 2 * sizeof(u_int16_t); } _MGETHDR(m, M_DONTWAIT, MT_DATA); - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = totlen; m->m_len = MHLEN; @@ -5588,7 +5767,7 @@ m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, _MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(top); - return (NULL); + return NULL; } m->m_len = MLEN; } @@ -5599,10 +5778,11 @@ m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, m->m_len = len = MIN(len, m_maxsize(MC_CL)); } else { /* give up when it's out of cluster mbufs */ - if (top != NULL) + if (top != NULL) { m_freem(top); + } m_freem(m); - return (NULL); + return NULL; } } else { /* @@ -5610,29 +5790,32 @@ m_devget(char *buf, int totlen, int off0, struct ifnet *ifp, */ if (len < m->m_len) { if (top == NULL && - len + max_linkhdr <= m->m_len) + len + max_linkhdr <= m->m_len) { m->m_data += max_linkhdr; + } m->m_len = len; } else { len = m->m_len; } } - if (copy) + if (copy) { copy(cp, MTOD(m, caddr_t), (unsigned)len); - else + } else { bcopy(cp, MTOD(m, caddr_t), (unsigned)len); + } cp += len; *mp = m; mp = &m->m_next; totlen -= len; - if (cp == epkt) + if (cp == epkt) { cp = buf; + } } - return (top); + return top; } #ifndef MBUF_GROWTH_NORMAL_THRESH -#define MBUF_GROWTH_NORMAL_THRESH 25 +#define MBUF_GROWTH_NORMAL_THRESH 25 #endif /* @@ -5673,13 +5856,14 @@ m_howmany(int num, size_t bufsize) mbwdog_logger("maxed out nclusters (%u >= %u) or njcl (%u >= %u)", sumclusters, nclusters, (m_16kclusters << NCLPJCLSHIFT), njcl); - return (0); + return 0; } if (bufsize == m_maxsize(MC_BIGCL)) { /* Under minimum */ - if (m_bigclusters < m_minlimit(MC_BIGCL)) - return (m_minlimit(MC_BIGCL) - m_bigclusters); + if (m_bigclusters < m_minlimit(MC_BIGCL)) { + return m_minlimit(MC_BIGCL) - m_bigclusters; + } percent_pool = ((sumclusters - freeclusters) * 100) / sumclusters; @@ -5689,10 +5873,11 @@ m_howmany(int num, size_t bufsize) * If a light/normal user, grow conservatively (75%) * If a heavy user, grow aggressively (50%) */ - if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) + if (percent_kmem < MBUF_GROWTH_NORMAL_THRESH) { mb_growth = MB_GROWTH_NORMAL; - else + } else { mb_growth = MB_GROWTH_AGGRESSIVE; + } if (percent_kmem < 5) { /* For initial allocations */ @@ -5701,15 +5886,18 @@ m_howmany(int num, size_t bufsize) /* Return if >= MBIGCL_LOWAT clusters available */ if (m_infree(MC_BIGCL) >= MBIGCL_LOWAT && m_total(MC_BIGCL) >= - MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) - return (0); + MBIGCL_LOWAT + m_minlimit(MC_BIGCL)) { + return 0; + } /* Ensure at least num clusters are accessible */ - if (num >= m_infree(MC_BIGCL)) + if (num >= m_infree(MC_BIGCL)) { i = num - m_infree(MC_BIGCL); - if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) + } + if (num > m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)) { j = num - (m_total(MC_BIGCL) - m_minlimit(MC_BIGCL)); + } i = MAX(i, j); @@ -5718,36 +5906,41 @@ m_howmany(int num, size_t bufsize) * or percent_pool > 50 (aggressive growth). */ mb_growth_thresh = 100 - (100 / (1 << mb_growth)); - if (percent_pool > mb_growth_thresh) + if (percent_pool > mb_growth_thresh) { j = ((sumclusters + num) >> mb_growth) - freeclusters; + } i = MAX(i, j); } /* Check to ensure we didn't go over limits */ - if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) + if (i + m_bigclusters >= m_maxlimit(MC_BIGCL)) { i = m_maxlimit(MC_BIGCL) - m_bigclusters; - if ((i << 1) + sumclusters >= nclusters) + } + if ((i << 1) + sumclusters >= nclusters) { i = (nclusters - sumclusters) >> 1; + } VERIFY((m_total(MC_BIGCL) + i) <= m_maxlimit(MC_BIGCL)); VERIFY(sumclusters + (i << 1) <= nclusters); - } else { /* 16K CL */ VERIFY(njcl > 0); /* Ensure at least num clusters are available */ - if (num >= m_16kclfree) + if (num >= m_16kclfree) { i = num - m_16kclfree; + } /* Always grow 16KCL pool aggressively */ - if (((m_16kclusters + num) >> 1) > m_16kclfree) + if (((m_16kclusters + num) >> 1) > m_16kclfree) { j = ((m_16kclusters + num) >> 1) - m_16kclfree; + } i = MAX(i, j); /* Check to ensure we don't go over limit */ - if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL)) + if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL)) { i = m_maxlimit(MC_16KCL) - m_total(MC_16KCL); + } } - return (i); + return i; } /* * Return the number of bytes in the mbuf chain, m. @@ -5758,13 +5951,15 @@ m_length(struct mbuf *m) struct mbuf *m0; unsigned int pktlen; - if (m->m_flags & M_PKTHDR) - return (m->m_pkthdr.len); + if (m->m_flags & M_PKTHDR) { + return m->m_pkthdr.len; + } pktlen = 0; - for (m0 = m; m0 != NULL; m0 = m0->m_next) + for (m0 = m; m0 != NULL; m0 = m0->m_next) { pktlen += m0->m_len; - return (pktlen); + } + return pktlen; } /* @@ -5780,8 +5975,9 @@ m_copyback(struct mbuf *m0, int off, int len, const void *cp) int error; #endif /* DEBUG */ - if (m0 == NULL) + if (m0 == NULL) { return; + } #if DEBUG error = @@ -5790,8 +5986,9 @@ m_copyback(struct mbuf *m0, int off, int len, const void *cp) M_COPYBACK0_COPYBACK | M_COPYBACK0_EXTEND, M_DONTWAIT); #if DEBUG - if (error != 0 || (m0 != NULL && origm != m0)) + if (error != 0 || (m0 != NULL && origm != m0)) { panic("m_copyback"); + } #endif /* DEBUG */ } @@ -5811,9 +6008,9 @@ m_copyback_cow(struct mbuf *m0, int off, int len, const void *cp, int how) * just free the chain. */ m_freem(m0); - return (NULL); + return NULL; } - return (m0); + return m0; } /* @@ -5831,8 +6028,9 @@ m_makewritable(struct mbuf **mp, int off, int len, int how) #endif /* DEBUG */ #if 0 /* M_COPYALL is large enough */ - if (len == M_COPYALL) + if (len == M_COPYALL) { len = m_length(*mp) - off; /* XXX */ + } #endif error = m_copyback0(mp, off, len, NULL, @@ -5840,15 +6038,18 @@ m_makewritable(struct mbuf **mp, int off, int len, int how) #if DEBUG reslen = 0; - for (n = *mp; n; n = n->m_next) + for (n = *mp; n; n = n->m_next) { reslen += n->m_len; - if (origlen != reslen) + } + if (origlen != reslen) { panic("m_makewritable: length changed"); - if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) + } + if (((*mp)->m_flags & M_PKTHDR) && reslen != (*mp)->m_pkthdr.len) { panic("m_makewritable: inconsist"); + } #endif /* DEBUG */ - return (error); + return error; } static int @@ -5871,7 +6072,7 @@ m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags, * assuming that M_COPYBACK0_EXTEND and M_COPYBACK0_COW are exclusive. */ - VERIFY((~flags & (M_COPYBACK0_EXTEND|M_COPYBACK0_COW)) != 0); + VERIFY((~flags & (M_COPYBACK0_EXTEND | M_COPYBACK0_COW)) != 0); mp = mp0; m = *mp; @@ -5881,8 +6082,9 @@ m_copyback0(struct mbuf **mp0, int off, int len, const void *vp, int flags, if (m->m_next == NULL) { int tspace; extend: - if (!(flags & M_COPYBACK0_EXTEND)) + if (!(flags & M_COPYBACK0_EXTEND)) { goto out; + } /* * try to make some space at the end of "m". @@ -5937,8 +6139,9 @@ extend: */ #if DIAGNOSTIC - if (!(flags & M_COPYBACK0_COW)) + if (!(flags & M_COPYBACK0_COW)) { panic("m_copyback0: read-only"); + } #endif /* DIAGNOSTIC */ /* @@ -5947,8 +6150,9 @@ extend: */ if (off > 0 && len < mlen) { n = m_split0(m, off, how, 0); - if (n == NULL) + if (n == NULL) { goto enobufs; + } m->m_next = n; mp = &m->m_next; m = n; @@ -5965,28 +6169,32 @@ extend: * allocate a new mbuf. copy packet header if needed. */ n = _M_GET(how, m->m_type); - if (n == NULL) + if (n == NULL) { goto enobufs; + } if (off == 0 && (m->m_flags & M_PKTHDR)) { M_COPY_PKTHDR(n, m); n->m_len = MHLEN; } else { - if (len >= MINCLSIZE) + if (len >= MINCLSIZE) { MCLGET(n, M_DONTWAIT); + } n->m_len = (n->m_flags & M_EXT) ? MCLBYTES : MLEN; } - if (n->m_len > len) + if (n->m_len > len) { n->m_len = len; + } /* * free the region which has been overwritten. * copying data from old mbufs if requested. */ - if (flags & M_COPYBACK0_PRESERVE) + if (flags & M_COPYBACK0_PRESERVE) { datap = mtod(n, char *); - else + } else { datap = NULL; + } eatlen = n->m_len; VERIFY(off == 0 || eatlen >= mlen); if (off > 0) { @@ -6011,11 +6219,13 @@ extend: m->m_data += mlen; m->m_len -= mlen; eatlen -= mlen; - if (m->m_len == 0) + if (m->m_len == 0) { *mp = m = m_free(m); + } } - if (eatlen > 0) + if (eatlen > 0) { n->m_len -= eatlen; + } n->m_next = m; *mp = m = n; continue; @@ -6029,8 +6239,9 @@ extend: mlen += off; off = 0; totlen += mlen; - if (len == 0) + if (len == 0) { break; + } if (m->m_next == NULL) { goto extend; } @@ -6043,10 +6254,10 @@ out: m->m_pkthdr.len = totlen; } - return (0); + return 0; enobufs: - return (ENOBUFS); + return ENOBUFS; } uint64_t @@ -6054,13 +6265,15 @@ mcl_to_paddr(char *addr) { vm_offset_t base_phys; - if (!MBUF_IN_MAP(addr)) - return (0); + if (!MBUF_IN_MAP(addr)) { + return 0; + } base_phys = mcl_paddr[atop_64(addr - (char *)mbutl)]; - if (base_phys == 0) - return (0); - return ((uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK))); + if (base_phys == 0) { + return 0; + } + return (uint64_t)(ptoa_64(base_phys) | ((uint64_t)addr & PAGE_MASK)); } /* @@ -6081,8 +6294,9 @@ m_dup(struct mbuf *m, int how) np = ⊤ top = NULL; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { copyhdr = 1; + } /* * Quick check: if we have one mbuf and its data fits in an @@ -6092,19 +6306,21 @@ m_dup(struct mbuf *m, int how) /* Then just move the data into an mbuf and be done... */ if (copyhdr) { if (m->m_pkthdr.len <= MHLEN && m->m_len <= MHLEN) { - if ((n = _M_GETHDR(how, m->m_type)) == NULL) - return (NULL); + if ((n = _M_GETHDR(how, m->m_type)) == NULL) { + return NULL; + } n->m_len = m->m_len; m_dup_pkthdr(n, m, how); bcopy(m->m_data, n->m_data, m->m_len); - return (n); + return n; } } else if (m->m_len <= MLEN) { - if ((n = _M_GET(how, m->m_type)) == NULL) - return (NULL); + if ((n = _M_GET(how, m->m_type)) == NULL) { + return NULL; + } bcopy(m->m_data, n->m_data, m->m_len); n->m_len = m->m_len; - return (n); + return n; } } while (m != NULL) { @@ -6112,19 +6328,22 @@ m_dup(struct mbuf *m, int how) printf("<%x: %x, %x, %x\n", m, m->m_flags, m->m_len, m->m_data); #endif - if (copyhdr) + if (copyhdr) { n = _M_GETHDR(how, m->m_type); - else + } else { n = _M_GET(how, m->m_type); - if (n == NULL) + } + if (n == NULL) { goto nospace; + } if (m->m_flags & M_EXT) { - if (m->m_len <= m_maxsize(MC_CL)) + if (m->m_len <= m_maxsize(MC_CL)) { MCLGET(n, how); - else if (m->m_len <= m_maxsize(MC_BIGCL)) + } else if (m->m_len <= m_maxsize(MC_BIGCL)) { n = m_mbigget(n, how); - else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) + } else if (m->m_len <= m_maxsize(MC_16KCL) && njcl > 0) { n = m_m16kget(n, how); + } if (!(n->m_flags & M_EXT)) { (void) m_free(n); goto nospace; @@ -6135,8 +6354,9 @@ m_dup(struct mbuf *m, int how) /* Don't use M_COPY_PKTHDR: preserve m_data */ m_dup_pkthdr(n, m, how); copyhdr = 0; - if (!(n->m_flags & M_EXT)) + if (!(n->m_flags & M_EXT)) { n->m_data = n->m_pktdat; + } } n->m_len = m->m_len; /* @@ -6153,21 +6373,22 @@ m_dup(struct mbuf *m, int how) #endif } - if (top == NULL) + if (top == NULL) { MDFail++; - return (top); + } + return top; nospace: m_freem(top); MDFail++; - return (NULL); + return NULL; } -#define MBUF_MULTIPAGES(m) \ - (((m)->m_flags & M_EXT) && \ - ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \ - && (m)->m_len > PAGE_SIZE) || \ - (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \ +#define MBUF_MULTIPAGES(m) \ + (((m)->m_flags & M_EXT) && \ + ((IS_P2ALIGNED((m)->m_data, PAGE_SIZE) \ + && (m)->m_len > PAGE_SIZE) || \ + (!IS_P2ALIGNED((m)->m_data, PAGE_SIZE) && \ P2ROUNDUP((m)->m_data, PAGE_SIZE) < ((uintptr_t)(m)->m_data + (m)->m_len)))) static struct mbuf * @@ -6188,13 +6409,14 @@ m_expand(struct mbuf *m, struct mbuf **last) struct mbuf *n; data = data0; - if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) + if (IS_P2ALIGNED(data, PAGE_SIZE) && len0 > PAGE_SIZE) { len = PAGE_SIZE; - else if (!IS_P2ALIGNED(data, PAGE_SIZE) && - P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) + } else if (!IS_P2ALIGNED(data, PAGE_SIZE) && + P2ROUNDUP(data, PAGE_SIZE) < (data + len0)) { len = P2ROUNDUP(data, PAGE_SIZE) - data; - else + } else { len = len0; + } VERIFY(len > 0); VERIFY(m->m_flags & M_EXT); @@ -6207,8 +6429,9 @@ m_expand(struct mbuf *m, struct mbuf **last) data0 += len; len0 -= len; - if (len0 == 0) + if (len0 == 0) { break; + } n = _M_RETRY(M_DONTWAIT, MT_DATA); if (n == NULL) { @@ -6222,7 +6445,7 @@ m_expand(struct mbuf *m, struct mbuf **last) n->m_flags |= M_EXT; m = n; } - return (top); + return top; } struct mbuf * @@ -6256,9 +6479,10 @@ m_normalize(struct mbuf *m) } m = n; } - if (expanded) + if (expanded) { atomic_add_32(&mb_normalized, 1); - return (top); + } + return top; } /* @@ -6274,16 +6498,18 @@ m_append(struct mbuf *m0, int len, caddr_t cp) struct mbuf *m, *n; int remainder, space; - for (m = m0; m->m_next != NULL; m = m->m_next) + for (m = m0; m->m_next != NULL; m = m->m_next) { ; + } remainder = len; space = M_TRAILINGSPACE(m); if (space > 0) { /* * Copy into available space. */ - if (space > remainder) + if (space > remainder) { space = remainder; + } bcopy(cp, mtod(m, caddr_t) + m->m_len, space); m->m_len += space; cp += space; @@ -6295,8 +6521,9 @@ m_append(struct mbuf *m0, int len, caddr_t cp) * and allocate a cluster instead. */ n = m_get(M_WAITOK, m->m_type); - if (n == NULL) + if (n == NULL) { break; + } n->m_len = min(MLEN, remainder); bcopy(cp, mtod(n, caddr_t), n->m_len); cp += n->m_len; @@ -6304,17 +6531,19 @@ m_append(struct mbuf *m0, int len, caddr_t cp) m->m_next = n; m = n; } - if (m0->m_flags & M_PKTHDR) + if (m0->m_flags & M_PKTHDR) { m0->m_pkthdr.len += len - remainder; - return (remainder == 0); + } + return remainder == 0; } struct mbuf * m_last(struct mbuf *m) { - while (m->m_next != NULL) + while (m->m_next != NULL) { m = m->m_next; - return (m); + } + return m; } unsigned int @@ -6326,7 +6555,7 @@ m_fixhdr(struct mbuf *m0) len = m_length2(m0, NULL); m0->m_pkthdr.len = len; - return (len); + return len; } unsigned int @@ -6338,12 +6567,14 @@ m_length2(struct mbuf *m0, struct mbuf **last) len = 0; for (m = m0; m != NULL; m = m->m_next) { len += m->m_len; - if (m->m_next == NULL) + if (m->m_next == NULL) { break; + } } - if (last != NULL) + if (last != NULL) { *last = m; - return (len); + } + return len; } /* @@ -6367,20 +6598,23 @@ m_defrag_offset(struct mbuf *m0, u_int32_t off, int how) struct mbuf *m_new = NULL, *m_final = NULL; int progress = 0, length, pktlen; - if (!(m0->m_flags & M_PKTHDR)) - return (m0); + if (!(m0->m_flags & M_PKTHDR)) { + return m0; + } VERIFY(off < MHLEN); m_fixhdr(m0); /* Needed sanity check */ pktlen = m0->m_pkthdr.len + off; - if (pktlen > MHLEN) + if (pktlen > MHLEN) { m_final = m_getcl(how, MT_DATA, M_PKTHDR); - else + } else { m_final = m_gethdr(how, MT_DATA); + } - if (m_final == NULL) + if (m_final == NULL) { goto nospace; + } if (off > 0) { pktlen -= off; @@ -6394,48 +6628,55 @@ m_defrag_offset(struct mbuf *m0, u_int32_t off, int how) */ VERIFY(m0->m_pkthdr.pkt_hdr == NULL); - if (m_dup_pkthdr(m_final, m0, how) == 0) + if (m_dup_pkthdr(m_final, m0, how) == 0) { goto nospace; + } m_new = m_final; while (progress < pktlen) { length = pktlen - progress; - if (length > MCLBYTES) + if (length > MCLBYTES) { length = MCLBYTES; + } length -= ((m_new == m_final) ? off : 0); - if (length < 0) + if (length < 0) { goto nospace; + } if (m_new == NULL) { - if (length > MLEN) + if (length > MLEN) { m_new = m_getcl(how, MT_DATA, 0); - else + } else { m_new = m_get(how, MT_DATA); - if (m_new == NULL) + } + if (m_new == NULL) { goto nospace; + } } m_copydata(m0, progress, length, mtod(m_new, caddr_t)); progress += length; m_new->m_len = length; - if (m_new != m_final) + if (m_new != m_final) { m_cat(m_final, m_new); + } m_new = NULL; } m_freem(m0); m0 = m_final; - return (m0); + return m0; nospace: - if (m_final) + if (m_final) { m_freem(m_final); - return (NULL); + } + return NULL; } struct mbuf * m_defrag(struct mbuf *m0, int how) { - return (m_defrag_offset(m0, 0, how)); + return m_defrag_offset(m0, 0, how); } void @@ -6449,13 +6690,13 @@ m_mchtype(struct mbuf *m, int t) void * m_mtod(struct mbuf *m) { - return (MTOD(m, void *)); + return MTOD(m, void *); } struct mbuf * m_dtom(void *x) { - return ((struct mbuf *)((uintptr_t)(x) & ~(MSIZE-1))); + return (struct mbuf *)((uintptr_t)(x) & ~(MSIZE - 1)); } void @@ -6470,26 +6711,25 @@ m_mcheck(struct mbuf *m) struct mbuf * m_getptr(struct mbuf *m, int loc, int *off) { - while (loc >= 0) { /* Normal end of search. */ if (m->m_len > loc) { *off = loc; - return (m); + return m; } else { loc -= m->m_len; if (m->m_next == NULL) { if (loc == 0) { /* Point at the end of valid data. */ *off = m->m_len; - return (m); + return m; } - return (NULL); + return NULL; } m = m->m_next; } } - return (NULL); + return NULL; } /* @@ -6545,8 +6785,9 @@ mbuf_watchdog(void) struct timeval now; unsigned int since; - if (mb_waiters == 0 || !mb_watchdog) + if (mb_waiters == 0 || !mb_watchdog) { return; + } microuptime(&now); since = now.tv_sec - mb_wdtstart.tv_sec; @@ -6603,10 +6844,11 @@ mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) * If this is the first waiter, arm the watchdog timer. Otherwise * check if we need to panic the system due to watchdog timeout. */ - if (mb_waiters == 0) + if (mb_waiters == 0) { microuptime(&mb_wdtstart); - else + } else { mbuf_watchdog(); + } mb_waiters++; m_region_expand(class) += m_total(class) + num; @@ -6617,7 +6859,7 @@ mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) mbuf_worker_needs_wakeup = FALSE; } mbwdog_logger("waiting (%d mbufs in class %s)", num, m_cname(class)); - (void) msleep(mb_waitchan, mbuf_mlock, (PZERO-1), m_cname(class), NULL); + (void) msleep(mb_waitchan, mbuf_mlock, (PZERO - 1), m_cname(class), NULL); mbwdog_logger("woke up (%d mbufs in class %s) ", num, m_cname(class)); /* We are now up; stop getting notified until next round */ @@ -6632,7 +6874,7 @@ mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) mcache_retry = TRUE; } done: - return (mcache_retry); + return mcache_retry; } __attribute__((noreturn)) @@ -6674,8 +6916,9 @@ mbuf_worker_thread(void) /* Adjust to current number of cluster in use */ n = m_region_expand(MC_CL) - (m_total(MC_CL) - m_infree(MC_CL)); - if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) + if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) { n = m_maxlimit(MC_CL) - m_total(MC_CL); + } if (n > 0) { mb_expand_cl_total += n; } @@ -6692,8 +6935,9 @@ mbuf_worker_thread(void) /* Adjust to current number of 4 KB cluster in use */ n = m_region_expand(MC_BIGCL) - (m_total(MC_BIGCL) - m_infree(MC_BIGCL)); - if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) + if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) { n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL); + } if (n > 0) { mb_expand_bigcl_total += n; } @@ -6710,8 +6954,9 @@ mbuf_worker_thread(void) /* Adjust to current number of 16 KB cluster in use */ n = m_region_expand(MC_16KCL) - (m_total(MC_16KCL) - m_infree(MC_16KCL)); - if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) + if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) { n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL); + } if (n > 0) { mb_expand_16kcl_total += n; } @@ -6737,12 +6982,13 @@ mbuf_worker_thread(void) m_total(MC_16KCL); if (total_mbufs < total_clusters) { mbwdog_logger("expanding MC_MBUF by %d", - total_clusters - total_mbufs); + total_clusters - total_mbufs); } while (total_mbufs < total_clusters) { mb_expand_cnt++; - if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) + if (freelist_populate(MC_MBUF, 1, M_WAIT) == 0) { break; + } total_mbufs = m_total(MC_MBUF); total_clusters = m_total(MC_BIGCL) + m_total(MC_CL) + m_total(MC_16KCL); @@ -6802,7 +7048,7 @@ slab_get(void *buf) lck_mtx_unlock(mbuf_mlock); /* This is a new buffer; create the slabs group for it */ - MALLOC(slg, mcl_slabg_t *, sizeof (*slg), M_TEMP, + MALLOC(slg, mcl_slabg_t *, sizeof(*slg), M_TEMP, M_WAITOK | M_ZERO); MALLOC(slg->slg_slab, mcl_slab_t *, sizeof(mcl_slab_t) * NSLABSPMB, M_TEMP, M_WAITOK | M_ZERO); @@ -6818,8 +7064,9 @@ slab_get(void *buf) slabstbl[ix] = slg; /* Chain each slab in the group to its forward neighbor */ - for (k = 1; k < NSLABSPMB; k++) + for (k = 1; k < NSLABSPMB; k++) { slg->slg_slab[k - 1].sl_next = &slg->slg_slab[k]; + } VERIFY(slg->slg_slab[NSLABSPMB - 1].sl_next == NULL); /* And chain the last slab in the previous group to this */ @@ -6834,7 +7081,7 @@ slab_get(void *buf) ix = MTOPG(buf) % NSLABSPMB; VERIFY(ix < NSLABSPMB); - return (&slg->slg_slab[ix]); + return &slg->slg_slab[ix]; } static void @@ -6897,8 +7144,8 @@ slab_remove(mcl_slab_t *sp, mbuf_class_t class) static boolean_t slab_inrange(mcl_slab_t *sp, void *buf) { - return ((uintptr_t)buf >= (uintptr_t)sp->sl_base && - (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len)); + return (uintptr_t)buf >= (uintptr_t)sp->sl_base && + (uintptr_t)buf < ((uintptr_t)sp->sl_base + sp->sl_len); } #undef panic @@ -6912,8 +7159,9 @@ slab_nextptr_panic(mcl_slab_t *sp, void *addr) for (i = 0; i < sp->sl_chunks; i++, buf += chunk_len) { void *next = ((mcache_obj_t *)buf)->obj_next; - if (next != addr) + if (next != addr) { continue; + } if (!mclverify) { if (next != NULL && !MBUF_IN_MAP(next)) { mcache_t *cp = m_cache(sp->sl_class); @@ -6942,9 +7190,9 @@ slab_detach(mcl_slab_t *sp) static boolean_t slab_is_detached(mcl_slab_t *sp) { - return ((intptr_t)sp->sl_link.tqe_next == -1 && - (intptr_t)sp->sl_link.tqe_prev == -1 && - (sp->sl_flags & SLF_DETACHED)); + return (intptr_t)sp->sl_link.tqe_next == -1 && + (intptr_t)sp->sl_link.tqe_prev == -1 && + (sp->sl_flags & SLF_DETACHED); } static void @@ -6963,18 +7211,20 @@ mcl_audit_init(void *buf, mcache_audit_t **mca_list, VERIFY(ix < maxclaudit); /* Make sure we haven't been here before */ - for (i = 0; i < num; i++) + for (i = 0; i < num; i++) { VERIFY(mclaudit[ix].cl_audit[i] == NULL); + } mca = mca_tail = *mca_list; - if (save_contents) + if (save_contents) { con = *con_list; + } for (i = 0; i < num; i++) { mcache_audit_t *next; next = mca->mca_next; - bzero(mca, sizeof (*mca)); + bzero(mca, sizeof(*mca)); mca->mca_next = next; mclaudit[ix].cl_audit[i] = mca; @@ -6984,8 +7234,8 @@ mcl_audit_init(void *buf, mcache_audit_t **mca_list, (mcl_saved_contents_t *)(void *)con; VERIFY(msc != NULL); - VERIFY(IS_P2ALIGNED(msc, sizeof (u_int64_t))); - VERIFY(con_size == sizeof (*msc)); + VERIFY(IS_P2ALIGNED(msc, sizeof(u_int64_t))); + VERIFY(con_size == sizeof(*msc)); mca->mca_contents_size = con_size; mca->mca_contents = msc; con = con->obj_next; @@ -6996,8 +7246,9 @@ mcl_audit_init(void *buf, mcache_audit_t **mca_list, mca = mca->mca_next; } - if (save_contents) + if (save_contents) { *con_list = con; + } *mca_list = mca_tail->mca_next; mca_tail->mca_next = NULL; @@ -7017,9 +7268,10 @@ mcl_audit_free(void *buf, unsigned int num) for (i = 0; i < num; i++) { mca = mclaudit[ix].cl_audit[i]; mclaudit[ix].cl_audit[i] = NULL; - if (mca->mca_contents) + if (mca->mca_contents) { mcache_free(mcl_audit_con_cache, mca->mca_contents); + } } mcache_free_ext(mcache_audit_cache, (mcache_obj_t *)mca_list); @@ -7082,7 +7334,7 @@ mcl_audit_buf2mca(mbuf_class_t class, mcache_obj_t *mobj) /* NOTREACHED */ } - return (mca); + return mca; } static void @@ -7095,8 +7347,9 @@ mcl_audit_mbuf(mcache_audit_t *mca, void *addr, boolean_t composite, VERIFY(mca->mca_contents != NULL && mca->mca_contents_size == AUDIT_CONTENTS_SIZE); - if (mclverify) + if (mclverify) { mcl_audit_verify_nextptr(next, mca); + } if (!alloc) { /* Save constructed mbuf fields */ @@ -7187,18 +7440,19 @@ mcl_audit_scratch(mcache_audit_t *mca) msa->msa_pthread = msa->msa_thread; msa->msa_thread = current_thread(); - bcopy(msa->msa_stack, msa->msa_pstack, sizeof (msa->msa_pstack)); + bcopy(msa->msa_stack, msa->msa_pstack, sizeof(msa->msa_pstack)); msa->msa_pdepth = msa->msa_depth; - bzero(stack, sizeof (stack)); + bzero(stack, sizeof(stack)); msa->msa_depth = OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1; - bcopy(&stack[1], msa->msa_stack, sizeof (msa->msa_stack)); + bcopy(&stack[1], msa->msa_stack, sizeof(msa->msa_stack)); msa->msa_ptstamp = msa->msa_tstamp; microuptime(&now); /* tstamp is in ms relative to base_ts */ msa->msa_tstamp = ((now.tv_usec - mb_start.tv_usec) / 1000); - if ((now.tv_sec - mb_start.tv_sec) > 0) + if ((now.tv_sec - mb_start.tv_sec) > 0) { msa->msa_tstamp += ((now.tv_sec - mb_start.tv_sec) * 1000); + } } static void @@ -7233,17 +7487,19 @@ mleak_activate(void) mleak_table.mleak_sample_factor = MLEAK_SAMPLE_FACTOR; PE_parse_boot_argn("mleak_sample_factor", &mleak_table.mleak_sample_factor, - sizeof (mleak_table.mleak_sample_factor)); + sizeof(mleak_table.mleak_sample_factor)); - if (mleak_table.mleak_sample_factor == 0) + if (mleak_table.mleak_sample_factor == 0) { mclfindleak = 0; + } - if (mclfindleak == 0) + if (mclfindleak == 0) { return; + } vm_size_t alloc_size = - mleak_alloc_buckets * sizeof (struct mallocation); - vm_size_t trace_size = mleak_trace_buckets * sizeof (struct mtrace); + mleak_alloc_buckets * sizeof(struct mallocation); + vm_size_t trace_size = mleak_trace_buckets * sizeof(struct mtrace); MALLOC(mleak_allocations, struct mallocation *, alloc_size, M_TEMP, M_WAITOK | M_ZERO); @@ -7267,11 +7523,13 @@ mleak_logger(u_int32_t num, mcache_obj_t *addr, boolean_t alloc) { int temp; - if (mclfindleak == 0) + if (mclfindleak == 0) { return; + } - if (!alloc) - return (mleak_free(addr)); + if (!alloc) { + return mleak_free(addr); + } temp = atomic_add_32_ov(&mleak_table.mleak_capture, 1); @@ -7298,7 +7556,7 @@ mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) /* Quit if someone else modifying the tables */ if (!lck_mtx_try_lock_spin(mleak_lock)) { mleak_table.total_conflicts++; - return (FALSE); + return FALSE; } allocation = &mleak_allocations[hashaddr((uintptr_t)addr, @@ -7320,7 +7578,7 @@ mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) trace_index == allocation->trace_index) { mleak_table.alloc_collisions++; lck_mtx_unlock(mleak_lock); - return (TRUE); + return TRUE; } /* @@ -7328,12 +7586,12 @@ mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) * Size of zero = trace bucket is free. */ if (trace->allocs > 0 && - bcmp(trace->addr, bt, (depth * sizeof (uintptr_t))) != 0) { + bcmp(trace->addr, bt, (depth * sizeof(uintptr_t))) != 0) { /* Different, unique trace, but the same hash! Bail out. */ trace->collisions++; mleak_table.trace_collisions++; lck_mtx_unlock(mleak_lock); - return (TRUE); + return TRUE; } else if (trace->allocs > 0) { /* Same trace, already added, so increment refcount */ trace->allocs++; @@ -7345,7 +7603,7 @@ mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) } mleak_table.trace_recorded++; trace->allocs = 1; - memcpy(trace->addr, bt, (depth * sizeof (uintptr_t))); + memcpy(trace->addr, bt, (depth * sizeof(uintptr_t))); trace->depth = depth; trace->collisions = 0; } @@ -7368,7 +7626,7 @@ mleak_log(uintptr_t *bt, mcache_obj_t *addr, uint32_t depth, int num) mleak_table.outstanding_allocs++; lck_mtx_unlock(mleak_lock); - return (TRUE); + return TRUE; } static void @@ -7386,10 +7644,12 @@ mleak_free(mcache_obj_t *addr) struct mtrace *trace; trace = &mleak_traces[allocation->trace_index]; /* allocs = 0 means trace bucket is unused */ - if (trace->allocs > 0) + if (trace->allocs > 0) { trace->allocs--; - if (trace->allocs == 0) + } + if (trace->allocs == 0) { trace->depth = 0; + } /* NULL element means alloc bucket is unused */ allocation->element = NULL; mleak_table.outstanding_allocs--; @@ -7406,41 +7666,45 @@ mleak_sort_traces() int i, j, k; struct mtrace *swap; - for(i = 0; i < MLEAK_NUM_TRACES; i++) + for (i = 0; i < MLEAK_NUM_TRACES; i++) { mleak_top_trace[i] = NULL; + } - for(i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) - { - if (mleak_traces[i].allocs <= 0) + for (i = 0, j = 0; j < MLEAK_NUM_TRACES && i < mleak_trace_buckets; i++) { + if (mleak_traces[i].allocs <= 0) { continue; + } mleak_top_trace[j] = &mleak_traces[i]; for (k = j; k > 0; k--) { if (mleak_top_trace[k]->allocs <= - mleak_top_trace[k-1]->allocs) + mleak_top_trace[k - 1]->allocs) { break; + } - swap = mleak_top_trace[k-1]; - mleak_top_trace[k-1] = mleak_top_trace[k]; + swap = mleak_top_trace[k - 1]; + mleak_top_trace[k - 1] = mleak_top_trace[k]; mleak_top_trace[k] = swap; } j++; } j--; - for(; i < mleak_trace_buckets; i++) { - if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) + for (; i < mleak_trace_buckets; i++) { + if (mleak_traces[i].allocs <= mleak_top_trace[j]->allocs) { continue; + } mleak_top_trace[j] = &mleak_traces[i]; for (k = j; k > 0; k--) { if (mleak_top_trace[k]->allocs <= - mleak_top_trace[k-1]->allocs) + mleak_top_trace[k - 1]->allocs) { break; + } - swap = mleak_top_trace[k-1]; - mleak_top_trace[k-1] = mleak_top_trace[k]; + swap = mleak_top_trace[k - 1]; + mleak_top_trace[k - 1] = mleak_top_trace[k]; mleak_top_trace[k] = swap; } } @@ -7463,54 +7727,56 @@ mleak_update_stats() mleak_sort_traces(); mltr = &mleak_stat->ml_trace[0]; - bzero(mltr, sizeof (*mltr) * MLEAK_NUM_TRACES); + bzero(mltr, sizeof(*mltr) * MLEAK_NUM_TRACES); for (i = 0; i < MLEAK_NUM_TRACES; i++) { int j; if (mleak_top_trace[i] == NULL || - mleak_top_trace[i]->allocs == 0) + mleak_top_trace[i]->allocs == 0) { continue; + } - mltr->mltr_collisions = mleak_top_trace[i]->collisions; - mltr->mltr_hitcount = mleak_top_trace[i]->hitcount; - mltr->mltr_allocs = mleak_top_trace[i]->allocs; - mltr->mltr_depth = mleak_top_trace[i]->depth; + mltr->mltr_collisions = mleak_top_trace[i]->collisions; + mltr->mltr_hitcount = mleak_top_trace[i]->hitcount; + mltr->mltr_allocs = mleak_top_trace[i]->allocs; + mltr->mltr_depth = mleak_top_trace[i]->depth; VERIFY(mltr->mltr_depth <= MLEAK_STACK_DEPTH); - for (j = 0; j < mltr->mltr_depth; j++) + for (j = 0; j < mltr->mltr_depth; j++) { mltr->mltr_addr[j] = mleak_top_trace[i]->addr[j]; + } mltr++; } } static struct mbtypes { - int mt_type; - const char *mt_name; + int mt_type; + const char *mt_name; } mbtypes[] = { - { MT_DATA, "data" }, - { MT_OOBDATA, "oob data" }, - { MT_CONTROL, "ancillary data" }, - { MT_HEADER, "packet headers" }, - { MT_SOCKET, "socket structures" }, - { MT_PCB, "protocol control blocks" }, - { MT_RTABLE, "routing table entries" }, - { MT_HTABLE, "IMP host table entries" }, - { MT_ATABLE, "address resolution tables" }, - { MT_FTABLE, "fragment reassembly queue headers" }, - { MT_SONAME, "socket names and addresses" }, - { MT_SOOPTS, "socket options" }, - { MT_RIGHTS, "access rights" }, - { MT_IFADDR, "interface addresses" }, - { MT_TAG, "packet tags" }, - { 0, NULL } + { MT_DATA, "data" }, + { MT_OOBDATA, "oob data" }, + { MT_CONTROL, "ancillary data" }, + { MT_HEADER, "packet headers" }, + { MT_SOCKET, "socket structures" }, + { MT_PCB, "protocol control blocks" }, + { MT_RTABLE, "routing table entries" }, + { MT_HTABLE, "IMP host table entries" }, + { MT_ATABLE, "address resolution tables" }, + { MT_FTABLE, "fragment reassembly queue headers" }, + { MT_SONAME, "socket names and addresses" }, + { MT_SOOPTS, "socket options" }, + { MT_RIGHTS, "access rights" }, + { MT_IFADDR, "interface addresses" }, + { MT_TAG, "packet tags" }, + { 0, NULL } }; -#define MBUF_DUMP_BUF_CHK() { \ - clen -= k; \ - if (clen < 1) \ - goto done; \ - c += k; \ +#define MBUF_DUMP_BUF_CHK() { \ + clen -= k; \ + if (clen < 1) \ + goto done; \ + c += k; \ } static char * @@ -7521,7 +7787,7 @@ mbuf_dump(void) u_int32_t m_mbufs = 0, m_clfree = 0, m_bigclfree = 0; u_int32_t m_mbufclfree = 0, m_mbufbigclfree = 0; u_int32_t m_16kclusters = 0, m_16kclfree = 0, m_mbuf16kclfree = 0; - int nmbtypes = sizeof (mbstat.m_mtypes) / sizeof (short); + int nmbtypes = sizeof(mbstat.m_mtypes) / sizeof(short); uint8_t seen[256]; struct mbtypes *mp; mb_class_stat_t *sp; @@ -7562,7 +7828,6 @@ mbuf_dump(void) totfree += (sp->mbcl_mc_cached + sp->mbcl_infree) * sp->mbcl_size; totreturned += sp->mbcl_release_cnt; - } /* adjust free counts to include composite caches */ @@ -7571,14 +7836,16 @@ mbuf_dump(void) m_16kclfree += m_mbuf16kclfree; totmbufs = 0; - for (mp = mbtypes; mp->mt_name != NULL; mp++) + for (mp = mbtypes; mp->mt_name != NULL; mp++) { totmbufs += mbstat.m_mtypes[mp->mt_type]; - if (totmbufs > m_mbufs) + } + if (totmbufs > m_mbufs) { totmbufs = m_mbufs; + } k = snprintf(c, clen, "%lu/%u mbufs in use:\n", totmbufs, m_mbufs); MBUF_DUMP_BUF_CHK(); - bzero(&seen, sizeof (seen)); + bzero(&seen, sizeof(seen)); for (mp = mbtypes; mp->mt_name != NULL; mp++) { if (mbstat.m_mtypes[mp->mt_type] != 0) { seen[mp->mt_type] = 1; @@ -7588,12 +7855,13 @@ mbuf_dump(void) } } seen[MT_FREE] = 1; - for (i = 0; i < nmbtypes; i++) + for (i = 0; i < nmbtypes; i++) { if (!seen[i] && mbstat.m_mtypes[i] != 0) { k = snprintf(c, clen, "\t%u mbufs allocated to " "\n", mbstat.m_mtypes[i], i); MBUF_DUMP_BUF_CHK(); } + } if ((m_mbufs - totmbufs) > 0) { k = snprintf(c, clen, "\t%lu mbufs allocated to caches\n", m_mbufs - totmbufs); @@ -7649,8 +7917,8 @@ mbuf_dump(void) "VM return codes: "); MBUF_DUMP_BUF_CHK(); for (i = 0; - i < sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]); - i++) { + i < sizeof(mb_kmem_stats) / sizeof(mb_kmem_stats[0]); + i++) { k = snprintf(c, clen, "%s: %u ", mb_kmem_stats_labels[i], mb_kmem_stats[i]); MBUF_DUMP_BUF_CHK(); @@ -7687,8 +7955,9 @@ mbuf_dump(void) for (j = 0; j < MTRACELARGE_NUM_TRACES; j++) { struct mtracelarge *trace = &mtracelarge_table[j]; - if (trace->size == 0 || trace->depth == 0) + if (trace->size == 0 || trace->depth == 0) { continue; + } if (printed_banner == false) { k = snprintf(c, clen, "\nlargest allocation failure backtraces:\n"); @@ -7751,10 +8020,11 @@ mbuf_dump(void) MBUF_DUMP_BUF_CHK(); } - if (mleak_stat->ml_isaddr64) + if (mleak_stat->ml_isaddr64) { k = snprintf(c, clen, MB_LEAK_HDR_64); - else + } else { k = snprintf(c, clen, MB_LEAK_HDR_32); + } MBUF_DUMP_BUF_CHK(); for (i = 0; i < MLEAK_STACK_DEPTH; i++) { @@ -7766,20 +8036,21 @@ mbuf_dump(void) if (mleak_stat->ml_isaddr64) { k = snprintf(c, clen, "0x%0llx ", (uint64_t)VM_KERNEL_UNSLIDE( - mltr->mltr_addr[i])); + mltr->mltr_addr[i])); } else { k = snprintf(c, clen, "0x%08x ", (uint32_t)VM_KERNEL_UNSLIDE( - mltr->mltr_addr[i])); + mltr->mltr_addr[i])); } } else { - if (mleak_stat->ml_isaddr64) + if (mleak_stat->ml_isaddr64) { k = snprintf(c, clen, MB_LEAK_SPACING_64); - else + } else { k = snprintf(c, clen, MB_LEAK_SPACING_32); + } } MBUF_DUMP_BUF_CHK(); } @@ -7787,7 +8058,7 @@ mbuf_dump(void) MBUF_DUMP_BUF_CHK(); } done: - return (mbuf_dump_buf); + return mbuf_dump_buf; } #undef MBUF_DUMP_BUF_CHK @@ -7833,27 +8104,27 @@ m_reinit(struct mbuf *m, int hdr) m->m_flags &= ~M_PKTHDR; } - return (ret); + return ret; } int m_ext_set_prop(struct mbuf *m, uint32_t o, uint32_t n) { ASSERT(m->m_flags & M_EXT); - return (atomic_test_set_32(&MEXT_PRIV(m), o, n)); + return atomic_test_set_32(&MEXT_PRIV(m), o, n); } uint32_t m_ext_get_prop(struct mbuf *m) { ASSERT(m->m_flags & M_EXT); - return (MEXT_PRIV(m)); + return MEXT_PRIV(m); } int m_ext_paired_is_active(struct mbuf *m) { - return (MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1); + return MBUF_IS_PAIRED(m) ? (MEXT_PREF(m) > MEXT_MINREF(m)) : 1; } void @@ -7903,7 +8174,7 @@ m_scratch_init(struct mbuf *m) /* NOTREACHED */ } - bzero(&pkt->pkt_mpriv, sizeof (pkt->pkt_mpriv)); + bzero(&pkt->pkt_mpriv, sizeof(pkt->pkt_mpriv)); } /* @@ -7932,13 +8203,14 @@ m_scratch_get(struct mbuf *m, u_int8_t **p) lck_mtx_lock(mbuf_mlock); mca = mcl_audit_buf2mca(MC_MBUF, (mcache_obj_t *)m); - if (mca->mca_uflags & MB_SCVALID) + if (mca->mca_uflags & MB_SCVALID) { mcl_audit_scratch(mca); + } lck_mtx_unlock(mbuf_mlock); } *p = (u_int8_t *)&pkt->pkt_mpriv; - return (sizeof (pkt->pkt_mpriv)); + return sizeof(pkt->pkt_mpriv); } static void @@ -8020,40 +8292,44 @@ m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free, __private_extern__ inline struct ext_ref * m_get_rfa(struct mbuf *m) { - if (m->m_ext.ext_refflags == NULL) - return (NULL); - else - return ((struct ext_ref *)(((uintptr_t)m->m_ext.ext_refflags) ^ mb_obscure_extref)); + if (m->m_ext.ext_refflags == NULL) { + return NULL; + } else { + return (struct ext_ref *)(((uintptr_t)m->m_ext.ext_refflags) ^ mb_obscure_extref); + } } __private_extern__ inline m_ext_free_func_t m_get_ext_free(struct mbuf *m) { struct ext_ref *rfa; - if (m->m_ext.ext_free == NULL) - return (NULL); + if (m->m_ext.ext_free == NULL) { + return NULL; + } rfa = m_get_rfa(m); - if (rfa == NULL) - return ((m_ext_free_func_t)((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree)); - else - return ((m_ext_free_func_t)(((uintptr_t)m->m_ext.ext_free) - ^ rfa->ext_token)); + if (rfa == NULL) { + return (m_ext_free_func_t)((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree); + } else { + return (m_ext_free_func_t)(((uintptr_t)m->m_ext.ext_free) + ^ rfa->ext_token); + } } __private_extern__ inline caddr_t m_get_ext_arg(struct mbuf *m) { struct ext_ref *rfa; - if (m->m_ext.ext_arg == NULL) - return (NULL); + if (m->m_ext.ext_arg == NULL) { + return NULL; + } rfa = m_get_rfa(m); if (rfa == NULL) { - return ((caddr_t)((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree)); + return (caddr_t)((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree); } else { - return ((caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^ - rfa->ext_token)); + return (caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^ + rfa->ext_token); } } @@ -8068,14 +8344,16 @@ static boolean_t mbuf_report_usage(mbuf_class_t cl) { /* if a report is already in progress, nothing to do */ - if (mb_peak_newreport) - return (TRUE); + if (mb_peak_newreport) { + return TRUE; + } if (m_total(cl) > m_peak(cl) && m_total(cl) >= (m_maxlimit(cl) >> 4) && - (m_total(cl) - m_peak(cl)) >= (m_peak(cl) >> 5)) - return (TRUE); - return (FALSE); + (m_total(cl) - m_peak(cl)) >= (m_peak(cl) >> 5)) { + return TRUE; + } + return FALSE; } __private_extern__ void @@ -8106,8 +8384,9 @@ mbuf_report_peak_usage(void) * Since a report is being generated before 1 week, * we do not need to force another one later */ - if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD) + if (uptime < MBUF_PEAK_FIRST_REPORT_THRESHOLD) { mb_peak_firstreport = TRUE; + } for (i = 0; i < NELEM(mbuf_table); i++) { m_peak(m_class(i)) = m_total(m_class(i)); @@ -8148,11 +8427,12 @@ mbuf_report_peak_usage(void) static int mbuf_drain_checks(boolean_t ignore_waiters) { - - if (mb_drain_maxint == 0) + if (mb_drain_maxint == 0) { return 0; - if (!ignore_waiters && mb_waiters != 0) + } + if (!ignore_waiters && mb_waiters != 0) { return 0; + } return 1; } @@ -8176,8 +8456,9 @@ mbuf_drain_locked(boolean_t ignore_waiters) static ppnum_t scratch_pa = 0; LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); - if (!mbuf_drain_checks(ignore_waiters)) + if (!mbuf_drain_checks(ignore_waiters)) { return; + } if (scratch_pa == 0) { bzero(scratch, sizeof(scratch)); scratch_pa = pmap_find_phys(kernel_pmap, (addr64_t)scratch); @@ -8186,9 +8467,11 @@ mbuf_drain_locked(boolean_t ignore_waiters) /* * Panic if a driver wrote to our scratch memory. */ - for (k = 0; k < sizeof(scratch); k++) - if (scratch[k]) + for (k = 0; k < sizeof(scratch); k++) { + if (scratch[k]) { panic("suspect DMA to freed address"); + } + } } /* * Don't free memory too often as that could cause excessive @@ -8200,8 +8483,9 @@ mbuf_drain_locked(boolean_t ignore_waiters) if (interval <= mb_drain_maxint) { return; } - if (interval <= mb_drain_maxint * 5) + if (interval <= mb_drain_maxint * 5) { purge_caches = TRUE; + } } mbuf_drain_last_runtime = net_uptime(); /* @@ -8220,16 +8504,19 @@ mbuf_drain_locked(boolean_t ignore_waiters) * caching for a few seconds, but the mbuf worker thread will * re-enable them again. */ - if (purge_caches == TRUE) + if (purge_caches == TRUE) { for (mc = 0; mc < NELEM(mbuf_table); mc++) { - if (m_total(mc) < m_avgtotal(mc)) + if (m_total(mc) < m_avgtotal(mc)) { continue; + } lck_mtx_unlock(mbuf_mlock); ret = mcache_purge_cache(m_cache(mc), FALSE); lck_mtx_lock(mbuf_mlock); - if (ret == TRUE) + if (ret == TRUE) { m_purge_cnt(mc)++; + } } + } /* * Move the objects from the composite class freelist to * the rudimentary slabs list, but keep at least 10% of the average @@ -8261,31 +8548,36 @@ mbuf_drain_locked(boolean_t ignore_waiters) * Process only unused slabs occupying memory. */ if (sp->sl_refcnt != 0 || sp->sl_len == 0 || - sp->sl_base == NULL) + sp->sl_base == NULL) { continue; + } if (m_total(mc) < m_avgtotal(mc) || - m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) + m_infree(mc) < 0.1 * m_avgtotal(mc) + m_minlimit(mc)) { break; + } slab_remove(sp, mc); switch (mc) { case MC_MBUF: m_infree(mc) -= NMBPG; m_total(mc) -= NMBPG; - if (mclaudit != NULL) + if (mclaudit != NULL) { mcl_audit_free(sp->sl_base, NMBPG); + } break; case MC_CL: m_infree(mc) -= NCLPG; m_total(mc) -= NCLPG; - if (mclaudit != NULL) + if (mclaudit != NULL) { mcl_audit_free(sp->sl_base, NMBPG); + } break; case MC_BIGCL: { m_infree(mc) -= NBCLPG; m_total(mc) -= NBCLPG; - if (mclaudit != NULL) + if (mclaudit != NULL) { mcl_audit_free(sp->sl_base, NMBPG); + } break; } case MC_16KCL: @@ -8347,8 +8639,9 @@ __private_extern__ void mbuf_drain(boolean_t ignore_waiters) { LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_NOTOWNED); - if (!mbuf_drain_checks(ignore_waiters)) + if (!mbuf_drain_checks(ignore_waiters)) { return; + } lck_mtx_lock(mbuf_mlock); mbuf_drain_locked(ignore_waiters); lck_mtx_unlock(mbuf_mlock); @@ -8362,13 +8655,14 @@ m_drain_force_sysctl SYSCTL_HANDLER_ARGS int val = 0, err; err = sysctl_handle_int(oidp, &val, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } if (val) { mbuf_drain(TRUE); } - return (err); + return err; } #if DEBUG || DEVELOPMENT @@ -8383,9 +8677,10 @@ _mbwdog_logger(const char *func, const int line, const char *fmt, ...) LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); if (mbwdog_logging == NULL) { mbwdog_logging = _MALLOC(mbwdog_logging_size, - M_TEMP, M_ZERO|M_NOWAIT); - if (mbwdog_logging == NULL) + M_TEMP, M_ZERO | M_NOWAIT); + if (mbwdog_logging == NULL) { return; + } } va_start(ap, fmt); vsnprintf(p, sizeof(p), fmt, ap); @@ -8397,8 +8692,9 @@ _mbwdog_logger(const char *func, const int line, const char *fmt, ...) current_proc()->p_pid, (uint64_t)VM_KERNEL_ADDRPERM(current_thread()), func, line, p); - if (len < 0) + if (len < 0) { return; + } if (mbwdog_logging_used + len > mbwdog_logging_size) { mbwdog_logging_used = mbwdog_logging_used / 2; memmove(mbwdog_logging, mbwdog_logging + mbwdog_logging_used, @@ -8413,7 +8709,7 @@ static int sysctl_mbwdog_log SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - return SYSCTL_OUT(req, mbwdog_logging, mbwdog_logging_used); + return SYSCTL_OUT(req, mbwdog_logging, mbwdog_logging_used); } SYSCTL_DECL(_kern_ipc); SYSCTL_PROC(_kern_ipc, OID_AUTO, mbwdog_log, @@ -8423,7 +8719,8 @@ SYSCTL_PROC(_kern_ipc, OID_AUTO, mbwdog_log, static int mbtest_val; static int mbtest_running; -static void mbtest_thread(__unused void *arg) +static void +mbtest_thread(__unused void *arg) { int i; int scale_down = 1; @@ -8456,7 +8753,8 @@ static void mbtest_thread(__unused void *arg) wakeup_one((caddr_t)&mbtest_running); } -static void sysctl_mbtest(void) +static void +sysctl_mbtest(void) { /* We launch three threads - wait for all of them */ OSIncrementAtomic(&mbtest_running); @@ -8480,15 +8778,17 @@ mbtest SYSCTL_HANDLER_ARGS val = oldval; error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } - if (val != oldval) + if (val != oldval) { sysctl_mbtest(); + } mbtest_val = val; - return (error); + return error; } #endif // DEBUG || DEVELOPMENT @@ -8508,7 +8808,6 @@ mtracelarge_register(size_t size) memcmp(bt, trace->addr, depth * sizeof(uintptr_t)) == 0) { return; } - } for (i = 0; i < MTRACELARGE_NUM_TRACES; i++) { trace = &mtracelarge_table[i]; diff --git a/bsd/kern/uipc_mbuf2.c b/bsd/kern/uipc_mbuf2.c index 31edbc0ce..90bef57a0 100644 --- a/bsd/kern/uipc_mbuf2.c +++ b/bsd/kern/uipc_mbuf2.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: uipc_mbuf.c,v 1.40 1999/04/01 00:23:25 thorpej Exp $ */ @@ -30,7 +30,7 @@ /* * Copyright (C) 1999 WIDE Project. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -42,7 +42,7 @@ * 3. Neither the name of the project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE @@ -139,11 +139,12 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) #endif /* check invalid arguments. */ - if (m == NULL) + if (m == NULL) { panic("m == NULL in m_pulldown()"); + } if (len > MCLBYTES) { m_freem(m); - return NULL; /* impossible */ + return NULL; /* impossible */ } #if defined(PULLDOWN_STAT) && INET6 @@ -153,23 +154,23 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) #if defined(PULLDOWN_STAT) && INET6 /* statistics for m_pullup */ ip6stat.ip6s_pullup++; - if (off + len > MHLEN) + if (off + len > MHLEN) { ip6stat.ip6s_pullup_fail++; - else { + } else { int dlen, mlen; dlen = (prev == m) ? prevlen : m->m_len; mlen = (prev == m) ? prevmlen : m->m_len + M_TRAILINGSPACE(m); - if (dlen >= off + len) + if (dlen >= off + len) { ip6stat.ip6s_pullup--; /* call will not be made! */ - else if ((m->m_flags & M_EXT) != 0) { + } else if ((m->m_flags & M_EXT) != 0) { ip6stat.ip6s_pullup_alloc++; ip6stat.ip6s_pullup_copy++; } else { - if (mlen >= off + len) + if (mlen >= off + len) { ip6stat.ip6s_pullup_copy++; - else { + } else { ip6stat.ip6s_pullup_alloc++; ip6stat.ip6s_pullup_copy++; } @@ -181,9 +182,9 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) /* statistics for m_pullup2 */ ip6stat.ip6s_pullup2++; - if (off + len > MCLBYTES) + if (off + len > MCLBYTES) { ip6stat.ip6s_pullup2_fail++; - else { + } else { int dlen, mlen; dlen = (prev == m) ? prevlen : m->m_len; @@ -191,20 +192,20 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) prevlen = off + len; prevmlen = mlen; - if (dlen >= off + len) + if (dlen >= off + len) { ip6stat.ip6s_pullup2--; /* call will not be made! */ - else if ((m->m_flags & M_EXT) != 0) { + } else if ((m->m_flags & M_EXT) != 0) { ip6stat.ip6s_pullup2_alloc++; ip6stat.ip6s_pullup2_copy++; prevmlen = (off + len > MHLEN) ? MCLBYTES : MHLEN; } else { - if (mlen >= off + len) + if (mlen >= off + len) { ip6stat.ip6s_pullup2_copy++; - else { + } else { ip6stat.ip6s_pullup2_alloc++; ip6stat.ip6s_pullup2_copy++; prevmlen = (off + len > MHLEN) ? MCLBYTES - : MHLEN; + : MHLEN; } } } @@ -213,13 +214,14 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) #endif #ifdef PULLDOWN_DEBUG - { - struct mbuf *t; - printf("before:"); - for (t = m; t; t = t->m_next) - printf(" %d", t->m_len); - printf("\n"); - } + { + struct mbuf *t; + printf("before:"); + for (t = m; t; t = t->m_next) { + printf(" %d", t->m_len); + } + printf("\n"); + } #endif n = m; @@ -230,19 +232,21 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) * mbuf chain. */ while (n != NULL && off > 0) { - if (n->m_len > off) + if (n->m_len > off) { break; + } off -= n->m_len; n = n->m_next; } /* be sure to point non-empty mbuf */ - while (n != NULL && n->m_len == 0) + while (n != NULL && n->m_len == 0) { n = n->m_next; + } if (!n) { m_freem(m); - return NULL; /* mbuf chain too short */ + return NULL; /* mbuf chain too short */ } /* @@ -259,8 +263,9 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) * to get offset, we should split the mbuf even when the length * is contained in current mbuf. */ - if ((off == 0 || offp) && len <= n->m_len - off) + if ((off == 0 || offp) && len <= n->m_len - off) { goto ok; + } #if defined(PULLDOWN_STAT) && INET6 ip6stat.ip6s_pulldown_copy++; @@ -276,7 +281,7 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) o = m_copym(n, off, n->m_len - off, M_DONTWAIT); if (o == NULL) { m_freem(m); - return NULL; /* ENOBUFS */ + return NULL; /* ENOBUFS */ } n->m_len = off; o->m_next = n->m_next; @@ -301,26 +306,28 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) * if not, we can do nothing about the chain. */ olen = 0; - for (o = n->m_next; o != NULL; o = o->m_next) + for (o = n->m_next; o != NULL; o = o->m_next) { olen += o->m_len; + } if (hlen + olen < len) { m_freem(m); - return NULL; /* mbuf chain too short */ + return NULL; /* mbuf chain too short */ } /* * easy cases first. * we need to use m_copydata() to get data from m_next, 0>. */ - if ((n->m_flags & M_EXT) == 0) + if ((n->m_flags & M_EXT) == 0) { sharedcluster = 0; - else { - if (m_get_ext_free(n) != NULL) + } else { + if (m_get_ext_free(n) != NULL) { sharedcluster = 1; - else if (m_mclhasreference(n)) + } else if (m_mclhasreference(n)) { sharedcluster = 1; - else + } else { sharedcluster = 0; + } } /* @@ -329,7 +336,7 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) * and adjust the length of next one accordingly. */ if ((off == 0 || offp) && M_TRAILINGSPACE(n) >= tlen - && !sharedcluster) { + && !sharedcluster) { m_copydata(n->m_next, 0, tlen, mtod(n, caddr_t) + n->m_len); n->m_len += tlen; m_adj(n->m_next, tlen); @@ -363,14 +370,14 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) MGET(o, M_DONTWAIT, m->m_type); if (o == NULL) { m_freem(m); - return NULL; /* ENOBUFS */ + return NULL; /* ENOBUFS */ } - if (len > MHLEN) { /* use MHLEN just for safety */ + if (len > MHLEN) { /* use MHLEN just for safety */ MCLGET(o, M_DONTWAIT); if ((o->m_flags & M_EXT) == 0) { m_freem(m); m_free(o); - return NULL; /* ENOBUFS */ + return NULL; /* ENOBUFS */ } } /* get hlen from into */ @@ -388,16 +395,18 @@ m_pulldown(struct mbuf *m, int off, int len, int *offp) ok: #ifdef PULLDOWN_DEBUG - { - struct mbuf *t; - printf("after:"); - for (t = m; t; t = t->m_next) - printf("%c%d", t == n ? '*' : ' ', t->m_len); - printf(" (off=%d)\n", off); - } + { + struct mbuf *t; + printf("after:"); + for (t = m; t; t = t->m_next) { + printf("%c%d", t == n ? '*' : ' ', t->m_len); + } + printf(" (off=%d)\n", off); + } #endif - if (offp) + if (offp) { *offp = off; + } return n; } @@ -411,11 +420,13 @@ m_tag_create(u_int32_t id, u_int16_t type, int len, int wait, struct mbuf *buf) struct m_tag *t = NULL; struct m_tag *p; - if (len < 0) - return (NULL); + if (len < 0) { + return NULL; + } - if (len + sizeof (struct m_tag) + sizeof (struct m_taghdr) > MLEN) - return (m_tag_alloc(id, type, len, wait)); + if (len + sizeof(struct m_tag) + sizeof(struct m_taghdr) > MLEN) { + return m_tag_alloc(id, type, len, wait); + } /* * We've exhausted all external cases. Now, go through the m_tag @@ -423,10 +434,10 @@ m_tag_create(u_int32_t id, u_int16_t type, int len, int wait, struct mbuf *buf) * If not (t == NULL), call m_tag_alloc to store it in a new mbuf. */ p = SLIST_FIRST(&buf->m_pkthdr.tags); - while(p != NULL) { + while (p != NULL) { /* 2KCL m_tag */ if (M_TAG_ALIGN(p->m_tag_len) + - sizeof (struct m_taghdr) > MLEN) { + sizeof(struct m_taghdr) > MLEN) { p = SLIST_NEXT(p, m_tag_link); continue; } @@ -436,13 +447,13 @@ m_tag_create(u_int32_t id, u_int16_t type, int len, int wait, struct mbuf *buf) struct mbuf *m = m_dtom(p); struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data; - VERIFY(IS_P2ALIGNED(hdr + 1, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t))); VERIFY(m->m_flags & M_TAGHDR && !(m->m_flags & M_EXT)); /* The mbuf can store this m_tag */ if (M_TAG_ALIGN(len) <= MLEN - m->m_len) { t = (struct m_tag *)(void *)(m->m_data + m->m_len); - VERIFY(IS_P2ALIGNED(t, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t))); hdr->refcnt++; m->m_len += M_TAG_ALIGN(len); VERIFY(m->m_len <= MLEN); @@ -452,16 +463,18 @@ m_tag_create(u_int32_t id, u_int16_t type, int len, int wait, struct mbuf *buf) p = SLIST_NEXT(p, m_tag_link); } - if (t == NULL) - return (m_tag_alloc(id, type, len, wait)); + if (t == NULL) { + return m_tag_alloc(id, type, len, wait); + } t->m_tag_cookie = M_TAG_VALID_PATTERN; t->m_tag_type = type; t->m_tag_len = len; t->m_tag_id = id; - if (len > 0) + if (len > 0) { bzero(t + 1, len); - return (t); + } + return t; } /* Get a packet tag structure along with specified data following. */ @@ -470,43 +483,47 @@ m_tag_alloc(u_int32_t id, u_int16_t type, int len, int wait) { struct m_tag *t; - if (len < 0) - return (NULL); + if (len < 0) { + return NULL; + } - if (M_TAG_ALIGN(len) + sizeof (struct m_taghdr) <= MLEN) { + if (M_TAG_ALIGN(len) + sizeof(struct m_taghdr) <= MLEN) { struct mbuf *m = m_get(wait, MT_TAG); struct m_taghdr *hdr; - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } m->m_flags |= M_TAGHDR; hdr = (struct m_taghdr *)(void *)m->m_data; - VERIFY(IS_P2ALIGNED(hdr + 1, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t))); hdr->refcnt = 1; - m->m_len += sizeof (struct m_taghdr); + m->m_len += sizeof(struct m_taghdr); t = (struct m_tag *)(void *)(m->m_data + m->m_len); - VERIFY(IS_P2ALIGNED(t, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t))); m->m_len += M_TAG_ALIGN(len); VERIFY(m->m_len <= MLEN); - } else if (len + sizeof (struct m_tag) <= MCLBYTES) { + } else if (len + sizeof(struct m_tag) <= MCLBYTES) { t = (struct m_tag *)(void *)m_mclalloc(wait); - } else { - t = NULL; + } else { + t = NULL; } - if (t == NULL) - return (NULL); + if (t == NULL) { + return NULL; + } - VERIFY(IS_P2ALIGNED(t, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(t, sizeof(u_int64_t))); t->m_tag_cookie = M_TAG_VALID_PATTERN; t->m_tag_type = type; t->m_tag_len = len; t->m_tag_id = id; - if (len > 0) + if (len > 0) { bzero(t + 1, len); - return (t); + } + return t; } @@ -515,25 +532,27 @@ void m_tag_free(struct m_tag *t) { #if CONFIG_MACF_NET - if (t != NULL && - t->m_tag_id == KERNEL_MODULE_TAG_ID && - t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) + if (t != NULL && + t->m_tag_id == KERNEL_MODULE_TAG_ID && + t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) { mac_mbuf_tag_destroy(t); + } #endif - if (t == NULL) + if (t == NULL) { return; + } VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN); - if (M_TAG_ALIGN(t->m_tag_len) + sizeof (struct m_taghdr) <= MLEN) { + if (M_TAG_ALIGN(t->m_tag_len) + sizeof(struct m_taghdr) <= MLEN) { struct mbuf * m = m_dtom(t); VERIFY(m->m_flags & M_TAGHDR); struct m_taghdr *hdr = (struct m_taghdr *)(void *)m->m_data; - VERIFY(IS_P2ALIGNED(hdr + 1, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(hdr + 1, sizeof(u_int64_t))); /* No other tags in this mbuf */ - if(--hdr->refcnt == 0) { + if (--hdr->refcnt == 0) { m_free(m); return; } @@ -590,8 +609,9 @@ m_tag_delete_chain(struct mbuf *m, struct m_tag *t) } else { p = SLIST_FIRST(&m->m_pkthdr.tags); } - if (p == NULL) + if (p == NULL) { return; + } VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN); while ((q = SLIST_NEXT(p, m_tag_link)) != NULL) { @@ -617,11 +637,12 @@ m_tag_locate(struct mbuf *m, u_int32_t id, u_int16_t type, struct m_tag *t) } while (p != NULL) { VERIFY(p->m_tag_cookie == M_TAG_VALID_PATTERN); - if (p->m_tag_id == id && p->m_tag_type == type) - return (p); + if (p->m_tag_id == id && p->m_tag_type == type) { + return p; + } p = SLIST_NEXT(p, m_tag_link); } - return (NULL); + return NULL; } /* Copy a single tag. */ @@ -633,8 +654,9 @@ m_tag_copy(struct m_tag *t, int how) VERIFY(t != NULL); p = m_tag_alloc(t->m_tag_id, t->m_tag_type, t->m_tag_len, how); - if (p == NULL) - return (NULL); + if (p == NULL) { + return NULL; + } #if CONFIG_MACF_NET /* * XXXMAC: we should probably pass off the initialization, and @@ -642,17 +664,17 @@ m_tag_copy(struct m_tag *t, int how) * special from the mbuf code? */ if (t != NULL && - t->m_tag_id == KERNEL_MODULE_TAG_ID && + t->m_tag_id == KERNEL_MODULE_TAG_ID && t->m_tag_type == KERNEL_TAG_TYPE_MACLABEL) { if (mac_mbuf_tag_init(p, how) != 0) { m_tag_free(p); - return (NULL); + return NULL; } mac_mbuf_tag_copy(t, p); } else #endif bcopy(t + 1, p + 1, t->m_tag_len); /* Copy the data */ - return (p); + return p; } /* @@ -674,7 +696,7 @@ m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how) t = m_tag_copy(p, how); if (t == NULL) { m_tag_delete_chain(to, NULL); - return (0); + return 0; } if (tprev == NULL) { SLIST_INSERT_HEAD(&to->m_pkthdr.tags, t, m_tag_link); @@ -683,7 +705,7 @@ m_tag_copy_chain(struct mbuf *to, struct mbuf *from, int how) tprev = t; } } - return (1); + return 1; } /* Initialize dynamic and static tags on an mbuf. */ @@ -699,7 +721,7 @@ m_tag_init(struct mbuf *m, int all) */ if (all) { bzero(&m->m_pkthdr.builtin_mtag._net_mtag, - sizeof (m->m_pkthdr.builtin_mtag._net_mtag)); + sizeof(m->m_pkthdr.builtin_mtag._net_mtag)); } } @@ -709,7 +731,7 @@ m_tag_first(struct mbuf *m) { VERIFY(m->m_flags & M_PKTHDR); - return (SLIST_FIRST(&m->m_pkthdr.tags)); + return SLIST_FIRST(&m->m_pkthdr.tags); } /* Get next tag in chain. */ @@ -720,21 +742,21 @@ m_tag_next(struct mbuf *m, struct m_tag *t) VERIFY(t != NULL); VERIFY(t->m_tag_cookie == M_TAG_VALID_PATTERN); - return (SLIST_NEXT(t, m_tag_link)); + return SLIST_NEXT(t, m_tag_link); } int m_set_traffic_class(struct mbuf *m, mbuf_traffic_class_t tc) { - u_int32_t val = MBUF_TC2SCVAL(tc); /* just the val portion */ + u_int32_t val = MBUF_TC2SCVAL(tc); /* just the val portion */ - return (m_set_service_class(m, m_service_class_from_val(val))); + return m_set_service_class(m, m_service_class_from_val(val)); } mbuf_traffic_class_t m_get_traffic_class(struct mbuf *m) { - return (MBUF_SC2TC(m_get_service_class(m))); + return MBUF_SC2TC(m_get_service_class(m)); } int @@ -744,12 +766,13 @@ m_set_service_class(struct mbuf *m, mbuf_svc_class_t sc) VERIFY(m->m_flags & M_PKTHDR); - if (MBUF_VALID_SC(sc)) + if (MBUF_VALID_SC(sc)) { m->m_pkthdr.pkt_svc = sc; - else + } else { error = EINVAL; + } - return (error); + return error; } mbuf_svc_class_t @@ -759,12 +782,13 @@ m_get_service_class(struct mbuf *m) VERIFY(m->m_flags & M_PKTHDR); - if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) + if (MBUF_VALID_SC(m->m_pkthdr.pkt_svc)) { sc = m->m_pkthdr.pkt_svc; - else + } else { sc = MBUF_SC_BE; + } - return (sc); + return sc; } mbuf_svc_class_t @@ -774,34 +798,34 @@ m_service_class_from_idx(u_int32_t i) switch (i) { case SCIDX_BK_SYS: - return (MBUF_SC_BK_SYS); + return MBUF_SC_BK_SYS; case SCIDX_BK: - return (MBUF_SC_BK); + return MBUF_SC_BK; case SCIDX_BE: - return (MBUF_SC_BE); + return MBUF_SC_BE; case SCIDX_RD: - return (MBUF_SC_RD); + return MBUF_SC_RD; case SCIDX_OAM: - return (MBUF_SC_OAM); + return MBUF_SC_OAM; case SCIDX_AV: - return (MBUF_SC_AV); + return MBUF_SC_AV; case SCIDX_RV: - return (MBUF_SC_RV); + return MBUF_SC_RV; case SCIDX_VI: - return (MBUF_SC_VI); + return MBUF_SC_VI; case SCIDX_VO: - return (MBUF_SC_VO); + return MBUF_SC_VO; case SCIDX_CTL: - return (MBUF_SC_CTL); + return MBUF_SC_CTL; default: break; @@ -809,7 +833,7 @@ m_service_class_from_idx(u_int32_t i) VERIFY(0); /* NOTREACHED */ - return (sc); + return sc; } mbuf_svc_class_t @@ -819,34 +843,34 @@ m_service_class_from_val(u_int32_t v) switch (v) { case SCVAL_BK_SYS: - return (MBUF_SC_BK_SYS); + return MBUF_SC_BK_SYS; case SCVAL_BK: - return (MBUF_SC_BK); + return MBUF_SC_BK; case SCVAL_BE: - return (MBUF_SC_BE); + return MBUF_SC_BE; case SCVAL_RD: - return (MBUF_SC_RD); + return MBUF_SC_RD; case SCVAL_OAM: - return (MBUF_SC_OAM); + return MBUF_SC_OAM; case SCVAL_AV: - return (MBUF_SC_AV); + return MBUF_SC_AV; case SCVAL_RV: - return (MBUF_SC_RV); + return MBUF_SC_RV; case SCVAL_VI: - return (MBUF_SC_VI); + return MBUF_SC_VI; case SCVAL_VO: - return (MBUF_SC_VO); + return MBUF_SC_VO; case SCVAL_CTL: - return (MBUF_SC_CTL); + return MBUF_SC_CTL; default: break; @@ -854,16 +878,16 @@ m_service_class_from_val(u_int32_t v) VERIFY(0); /* NOTREACHED */ - return (sc); + return sc; } uint16_t m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff, uint32_t datalen, uint32_t sum) { - uint32_t total_sub = 0; /* total to subtract */ - uint32_t mlen = m_pktlen(m); /* frame length */ - uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */ + uint32_t total_sub = 0; /* total to subtract */ + uint32_t mlen = m_pktlen(m); /* frame length */ + uint32_t bytes = (dataoff + datalen); /* bytes covered by sum */ int len; ASSERT(bytes <= mlen); @@ -874,10 +898,11 @@ m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff, * into account the start offset. */ len = (dataoff - start); - if (len > 0) + if (len > 0) { total_sub = m_sum16(m, start, len); - else if (len < 0) + } else if (len < 0) { sum += m_sum16(m, dataoff, -len); + } /* * Take care of excluding any postpended extraneous octets. @@ -894,17 +919,19 @@ m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff, "len %u]", __func__, m0, off0, len); /* NOTREACHED */ } - if (off < m->m_len) + if (off < m->m_len) { break; + } off -= m->m_len; m = m->m_next; } /* if we started on odd-alignment, swap the value */ - if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) + if ((uintptr_t)(mtod(m, uint8_t *) + off) & 1) { total_sub += ((extra << 8) & 0xffff) | (extra >> 8); - else + } else { total_sub += extra; + } total_sub = (total_sub >> 16) + (total_sub & 0xffff); } @@ -913,18 +940,19 @@ m_adj_sum16(struct mbuf *m, uint32_t start, uint32_t dataoff, * 1's complement subtract any extraneous octets. */ if (total_sub != 0) { - if (total_sub >= sum) + if (total_sub >= sum) { sum = ~(total_sub - sum) & 0xffff; - else + } else { sum -= total_sub; + } } /* fold 32-bit to 16-bit */ - sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */ - sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ - sum = (sum >> 16) + (sum & 0xffff); /* final carry */ + sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */ + sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ + sum = (sum >> 16) + (sum & 0xffff); /* final carry */ - return (sum & 0xffff); + return sum & 0xffff; } uint16_t @@ -945,5 +973,5 @@ m_sum16(struct mbuf *m, uint32_t off, uint32_t len) /* NOTREACHED */ } - return (os_cpu_in_cksum_mbuf(m, len, off, 0)); + return os_cpu_in_cksum_mbuf(m, len, off, 0); } diff --git a/bsd/kern/uipc_proto.c b/bsd/kern/uipc_proto.c index f8eb5e418..71d3c60d6 100644 --- a/bsd/kern/uipc_proto.c +++ b/bsd/kern/uipc_proto.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ @@ -81,31 +81,31 @@ static void pre_unp_init(struct domain *); extern struct domain localdomain_s; static struct protosw localsw[] = { -{ - .pr_type = SOCK_STREAM, - .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS|PR_PCBLOCK, - .pr_ctloutput = uipc_ctloutput, - .pr_usrreqs = &uipc_usrreqs, - .pr_lock = unp_lock, - .pr_unlock = unp_unlock, - .pr_getlock = unp_getlock -}, -{ - .pr_type = SOCK_DGRAM, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_RIGHTS, - .pr_ctloutput = uipc_ctloutput, - .pr_usrreqs = &uipc_usrreqs, - .pr_lock = unp_lock, - .pr_unlock = unp_unlock, - .pr_getlock = unp_getlock -}, -{ - .pr_ctlinput = raw_ctlinput, - .pr_usrreqs = &raw_usrreqs, -}, + { + .pr_type = SOCK_STREAM, + .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD | PR_RIGHTS | PR_PCBLOCK, + .pr_ctloutput = uipc_ctloutput, + .pr_usrreqs = &uipc_usrreqs, + .pr_lock = unp_lock, + .pr_unlock = unp_unlock, + .pr_getlock = unp_getlock + }, + { + .pr_type = SOCK_DGRAM, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_RIGHTS, + .pr_ctloutput = uipc_ctloutput, + .pr_usrreqs = &uipc_usrreqs, + .pr_lock = unp_lock, + .pr_unlock = unp_unlock, + .pr_getlock = unp_getlock + }, + { + .pr_ctlinput = raw_ctlinput, + .pr_usrreqs = &raw_usrreqs, + }, }; -static int local_proto_count = (sizeof (localsw) / sizeof (struct protosw)); +static int local_proto_count = (sizeof(localsw) / sizeof(struct protosw)); static void pre_unp_init(struct domain *dp) @@ -118,23 +118,24 @@ pre_unp_init(struct domain *dp) localdomain = dp; - for (i = 0, pr = &localsw[0]; i < local_proto_count; i++, pr++) + for (i = 0, pr = &localsw[0]; i < local_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); + } unp_init(); } struct domain localdomain_s = { - .dom_family = PF_LOCAL, - .dom_name = "unix", - .dom_init = pre_unp_init, - .dom_externalize = unp_externalize, - .dom_dispose = unp_dispose, + .dom_family = PF_LOCAL, + .dom_name = "unix", + .dom_init = pre_unp_init, + .dom_externalize = unp_externalize, + .dom_dispose = unp_dispose, }; -SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW|CTLFLAG_LOCKED, - NULL, "Local domain"); -SYSCTL_NODE(_net_local, SOCK_STREAM, stream, CTLFLAG_RW|CTLFLAG_LOCKED, - NULL, "SOCK_STREAM"); -SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW|CTLFLAG_LOCKED, - NULL, "SOCK_DGRAM"); +SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_LOCKED, + NULL, "Local domain"); +SYSCTL_NODE(_net_local, SOCK_STREAM, stream, CTLFLAG_RW | CTLFLAG_LOCKED, + NULL, "SOCK_STREAM"); +SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW | CTLFLAG_LOCKED, + NULL, "SOCK_DGRAM"); diff --git a/bsd/kern/uipc_socket.c b/bsd/kern/uipc_socket.c index 348afd442..4d6e12a70 100644 --- a/bsd/kern/uipc_socket.c +++ b/bsd/kern/uipc_socket.c @@ -134,48 +134,48 @@ #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1))) #if DEBUG || DEVELOPMENT -#define DEBUG_KERNEL_ADDRPERM(_v) (_v) +#define DEBUG_KERNEL_ADDRPERM(_v) (_v) #else -#define DEBUG_KERNEL_ADDRPERM(_v) VM_KERNEL_ADDRPERM(_v) +#define DEBUG_KERNEL_ADDRPERM(_v) VM_KERNEL_ADDRPERM(_v) #endif /* TODO: this should be in a header file somewhere */ extern char *proc_name_address(void *p); -static u_int32_t so_cache_hw; /* High water mark for socache */ -static u_int32_t so_cache_timeouts; /* number of timeouts */ -static u_int32_t so_cache_max_freed; /* max freed per timeout */ -static u_int32_t cached_sock_count = 0; -STAILQ_HEAD(, socket) so_cache_head; -int max_cached_sock_count = MAX_CACHED_SOCKETS; -static u_int32_t so_cache_time; -static int socketinit_done; -static struct zone *so_cache_zone; - -static lck_grp_t *so_cache_mtx_grp; -static lck_attr_t *so_cache_mtx_attr; -static lck_grp_attr_t *so_cache_mtx_grp_attr; -static lck_mtx_t *so_cache_mtx; +static u_int32_t so_cache_hw; /* High water mark for socache */ +static u_int32_t so_cache_timeouts; /* number of timeouts */ +static u_int32_t so_cache_max_freed; /* max freed per timeout */ +static u_int32_t cached_sock_count = 0; +STAILQ_HEAD(, socket) so_cache_head; +int max_cached_sock_count = MAX_CACHED_SOCKETS; +static u_int32_t so_cache_time; +static int socketinit_done; +static struct zone *so_cache_zone; + +static lck_grp_t *so_cache_mtx_grp; +static lck_attr_t *so_cache_mtx_attr; +static lck_grp_attr_t *so_cache_mtx_grp_attr; +static lck_mtx_t *so_cache_mtx; #include -static int filt_sorattach(struct knote *kn, struct kevent_internal_s *kev); -static void filt_sordetach(struct knote *kn); -static int filt_soread(struct knote *kn, long hint); -static int filt_sortouch(struct knote *kn, struct kevent_internal_s *kev); -static int filt_sorprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); +static int filt_sorattach(struct knote *kn, struct kevent_internal_s *kev); +static void filt_sordetach(struct knote *kn); +static int filt_soread(struct knote *kn, long hint); +static int filt_sortouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_sorprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); -static int filt_sowattach(struct knote *kn, struct kevent_internal_s *kev); -static void filt_sowdetach(struct knote *kn); -static int filt_sowrite(struct knote *kn, long hint); -static int filt_sowtouch(struct knote *kn, struct kevent_internal_s *kev); -static int filt_sowprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); +static int filt_sowattach(struct knote *kn, struct kevent_internal_s *kev); +static void filt_sowdetach(struct knote *kn); +static int filt_sowrite(struct knote *kn, long hint); +static int filt_sowtouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_sowprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); -static int filt_sockattach(struct knote *kn, struct kevent_internal_s *kev); -static void filt_sockdetach(struct knote *kn); -static int filt_sockev(struct knote *kn, long hint); -static int filt_socktouch(struct knote *kn, struct kevent_internal_s *kev); -static int filt_sockprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); +static int filt_sockattach(struct knote *kn, struct kevent_internal_s *kev); +static void filt_sockdetach(struct knote *kn); +static int filt_sockev(struct knote *kn, long hint); +static int filt_socktouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_sockprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); static int sooptcopyin_timeval(struct sockopt *, struct timeval *); static int sooptcopyout_timeval(struct sockopt *, const struct timeval *); @@ -218,46 +218,46 @@ SECURITY_READ_ONLY_EARLY(struct filterops) soexcept_filtops = { SYSCTL_DECL(_kern_ipc); -#define EVEN_MORE_LOCKING_DEBUG 0 +#define EVEN_MORE_LOCKING_DEBUG 0 int socket_debug = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, socket_debug, - CTLFLAG_RW | CTLFLAG_LOCKED, &socket_debug, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &socket_debug, 0, ""); static unsigned long sodefunct_calls = 0; SYSCTL_LONG(_kern_ipc, OID_AUTO, sodefunct_calls, CTLFLAG_LOCKED, &sodefunct_calls, ""); static int socket_zone = M_SOCKET; -so_gen_t so_gencnt; /* generation count for sockets */ +so_gen_t so_gencnt; /* generation count for sockets */ MALLOC_DEFINE(M_SONAME, "soname", "socket name"); MALLOC_DEFINE(M_PCB, "pcb", "protocol control block"); -#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) -#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) -#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1) -#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3) -#define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1) -#define DBG_FNC_SOSEND_LIST NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 3) -#define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8)) -#define DBG_FNC_SORECEIVE_LIST NETDBG_CODE(DBG_NETSOCK, (8 << 8) | 3) -#define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8)) +#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) +#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) +#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1) +#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3) +#define DBG_FNC_SOSEND NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 1) +#define DBG_FNC_SOSEND_LIST NETDBG_CODE(DBG_NETSOCK, (4 << 8) | 3) +#define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8)) +#define DBG_FNC_SORECEIVE_LIST NETDBG_CODE(DBG_NETSOCK, (8 << 8) | 3) +#define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8)) -#define MAX_SOOPTGETM_SIZE (128 * MCLBYTES) +#define MAX_SOOPTGETM_SIZE (128 * MCLBYTES) int somaxconn = SOMAXCONN; SYSCTL_INT(_kern_ipc, KIPC_SOMAXCONN, somaxconn, - CTLFLAG_RW | CTLFLAG_LOCKED, &somaxconn, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &somaxconn, 0, ""); /* Should we get a maximum also ??? */ static int sosendmaxchain = 65536; static int sosendminchain = 16384; static int sorecvmincopy = 16384; SYSCTL_INT(_kern_ipc, OID_AUTO, sosendminchain, - CTLFLAG_RW | CTLFLAG_LOCKED, &sosendminchain, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &sosendminchain, 0, ""); SYSCTL_INT(_kern_ipc, OID_AUTO, sorecvmincopy, - CTLFLAG_RW | CTLFLAG_LOCKED, &sorecvmincopy, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &sorecvmincopy, 0, ""); /* * Set to enable jumbo clusters (if available) for large writes when @@ -265,7 +265,7 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, sorecvmincopy, */ int sosendjcl = 1; SYSCTL_INT(_kern_ipc, OID_AUTO, sosendjcl, - CTLFLAG_RW | CTLFLAG_LOCKED, &sosendjcl, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &sosendjcl, 0, ""); /* * Set this to ignore SOF_MULTIPAGES and use jumbo clusters for large @@ -280,7 +280,7 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, sosendjcl, */ int sosendjcl_ignore_capab = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, sosendjcl_ignore_capab, - CTLFLAG_RW | CTLFLAG_LOCKED, &sosendjcl_ignore_capab, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &sosendjcl_ignore_capab, 0, ""); /* * Set this to ignore SOF1_IF_2KCL and use big clusters for large @@ -293,37 +293,37 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, sosendjcl_ignore_capab, */ int sosendbigcl_ignore_capab = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, sosendbigcl_ignore_capab, - CTLFLAG_RW | CTLFLAG_LOCKED, &sosendbigcl_ignore_capab, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &sosendbigcl_ignore_capab, 0, ""); int sodefunctlog = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, sodefunctlog, CTLFLAG_RW | CTLFLAG_LOCKED, - &sodefunctlog, 0, ""); + &sodefunctlog, 0, ""); int sothrottlelog = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, sothrottlelog, CTLFLAG_RW | CTLFLAG_LOCKED, - &sothrottlelog, 0, ""); + &sothrottlelog, 0, ""); int sorestrictrecv = 1; SYSCTL_INT(_kern_ipc, OID_AUTO, sorestrictrecv, CTLFLAG_RW | CTLFLAG_LOCKED, - &sorestrictrecv, 0, "Enable inbound interface restrictions"); + &sorestrictrecv, 0, "Enable inbound interface restrictions"); int sorestrictsend = 1; SYSCTL_INT(_kern_ipc, OID_AUTO, sorestrictsend, CTLFLAG_RW | CTLFLAG_LOCKED, - &sorestrictsend, 0, "Enable outbound interface restrictions"); + &sorestrictsend, 0, "Enable outbound interface restrictions"); int soreserveheadroom = 1; SYSCTL_INT(_kern_ipc, OID_AUTO, soreserveheadroom, CTLFLAG_RW | CTLFLAG_LOCKED, - &soreserveheadroom, 0, "To allocate contiguous datagram buffers"); + &soreserveheadroom, 0, "To allocate contiguous datagram buffers"); #if (DEBUG || DEVELOPMENT) int so_notsent_lowat_check = 1; -SYSCTL_INT(_kern_ipc, OID_AUTO, notsent_lowat, CTLFLAG_RW|CTLFLAG_LOCKED, +SYSCTL_INT(_kern_ipc, OID_AUTO, notsent_lowat, CTLFLAG_RW | CTLFLAG_LOCKED, &so_notsent_lowat_check, 0, "enable/disable notsnet lowat check"); #endif /* DEBUG || DEVELOPMENT */ int so_accept_list_waits = 0; #if (DEBUG || DEVELOPMENT) -SYSCTL_INT(_kern_ipc, OID_AUTO, accept_list_waits, CTLFLAG_RW|CTLFLAG_LOCKED, +SYSCTL_INT(_kern_ipc, OID_AUTO, accept_list_waits, CTLFLAG_RW | CTLFLAG_LOCKED, &so_accept_list_waits, 0, "number of waits for listener incomp list"); #endif /* DEBUG || DEVELOPMENT */ @@ -333,7 +333,7 @@ extern struct inpcbinfo tcbinfo; extern int get_inpcb_str_size(void); extern int get_tcp_str_size(void); -vm_size_t so_cache_zone_element_size; +vm_size_t so_cache_zone_element_size; static int sodelayed_copy(struct socket *, struct uio *, struct mbuf **, user_ssize_t *); @@ -345,26 +345,26 @@ static void cached_sock_free(struct socket *); * Set to zero to disable further setting of the option */ -#define SO_IDLE_BK_IDLE_MAX_PER_PROC 1 -#define SO_IDLE_BK_IDLE_TIME 600 -#define SO_IDLE_BK_IDLE_RCV_HIWAT 131072 +#define SO_IDLE_BK_IDLE_MAX_PER_PROC 1 +#define SO_IDLE_BK_IDLE_TIME 600 +#define SO_IDLE_BK_IDLE_RCV_HIWAT 131072 struct soextbkidlestat soextbkidlestat; SYSCTL_UINT(_kern_ipc, OID_AUTO, maxextbkidleperproc, - CTLFLAG_RW | CTLFLAG_LOCKED, &soextbkidlestat.so_xbkidle_maxperproc, 0, - "Maximum of extended background idle sockets per process"); + CTLFLAG_RW | CTLFLAG_LOCKED, &soextbkidlestat.so_xbkidle_maxperproc, 0, + "Maximum of extended background idle sockets per process"); SYSCTL_UINT(_kern_ipc, OID_AUTO, extbkidletime, CTLFLAG_RW | CTLFLAG_LOCKED, - &soextbkidlestat.so_xbkidle_time, 0, - "Time in seconds to keep extended background idle sockets"); + &soextbkidlestat.so_xbkidle_time, 0, + "Time in seconds to keep extended background idle sockets"); SYSCTL_UINT(_kern_ipc, OID_AUTO, extbkidlercvhiwat, CTLFLAG_RW | CTLFLAG_LOCKED, - &soextbkidlestat.so_xbkidle_rcvhiwat, 0, - "High water mark for extended background idle sockets"); + &soextbkidlestat.so_xbkidle_rcvhiwat, 0, + "High water mark for extended background idle sockets"); SYSCTL_STRUCT(_kern_ipc, OID_AUTO, extbkidlestat, CTLFLAG_RD | CTLFLAG_LOCKED, - &soextbkidlestat, soextbkidlestat, ""); + &soextbkidlestat, soextbkidlestat, ""); int so_set_extended_bk_idle(struct socket *, int); @@ -376,7 +376,7 @@ int so_set_extended_bk_idle(struct socket *, int); */ __private_extern__ u_int32_t sotcdb = 0; SYSCTL_INT(_kern_ipc, OID_AUTO, sotcdb, CTLFLAG_RW | CTLFLAG_LOCKED, - &sotcdb, 0, ""); + &sotcdb, 0, ""); void socketinit(void) @@ -407,7 +407,7 @@ socketinit(void) socketinit_done = 1; PE_parse_boot_argn("socket_debug", &socket_debug, - sizeof (socket_debug)); + sizeof(socket_debug)); /* * allocate lock group attribute and group for socket cache mutex @@ -429,7 +429,7 @@ socketinit(void) } STAILQ_INIT(&so_cache_head); - so_cache_zone_element_size = (vm_size_t)(sizeof (struct socket) + 4 + so_cache_zone_element_size = (vm_size_t)(sizeof(struct socket) + 4 + get_inpcb_str_size() + 4 + get_tcp_str_size()); so_cache_zone = zinit(so_cache_zone_element_size, @@ -453,7 +453,7 @@ socketinit(void) static void cached_sock_alloc(struct socket **so, int waitok) { - caddr_t temp; + caddr_t temp; uintptr_t offset; lck_mtx_lock(so_cache_mtx); @@ -469,22 +469,23 @@ cached_sock_alloc(struct socket **so, int waitok) lck_mtx_unlock(so_cache_mtx); temp = (*so)->so_saved_pcb; - bzero((caddr_t)*so, sizeof (struct socket)); + bzero((caddr_t)*so, sizeof(struct socket)); (*so)->so_saved_pcb = temp; } else { - lck_mtx_unlock(so_cache_mtx); - if (waitok) + if (waitok) { *so = (struct socket *)zalloc(so_cache_zone); - else + } else { *so = (struct socket *)zalloc_noblock(so_cache_zone); + } - if (*so == NULL) + if (*so == NULL) { return; + } - bzero((caddr_t)*so, sizeof (struct socket)); + bzero((caddr_t)*so, sizeof(struct socket)); /* * Define offsets for extra structures into our @@ -493,7 +494,7 @@ cached_sock_alloc(struct socket **so, int waitok) */ offset = (uintptr_t)*so; - offset += sizeof (struct socket); + offset += sizeof(struct socket); offset = ALIGN(offset); @@ -512,7 +513,6 @@ cached_sock_alloc(struct socket **so, int waitok) static void cached_sock_free(struct socket *so) { - lck_mtx_lock(so_cache_mtx); so_cache_time = net_uptime(); @@ -521,8 +521,9 @@ cached_sock_free(struct socket *so) lck_mtx_unlock(so_cache_mtx); zfree(so_cache_zone, so); } else { - if (so_cache_hw < cached_sock_count) + if (so_cache_hw < cached_sock_count) { so_cache_hw = cached_sock_count; + } STAILQ_INSERT_TAIL(&so_cache_head, so, so_cache_ent); @@ -539,15 +540,16 @@ so_update_last_owner_locked(struct socket *so, proc_t self) * last_pid and last_upid should remain zero for sockets * created using sock_socket. The check above achieves that */ - if (self == PROC_NULL) + if (self == PROC_NULL) { self = current_proc(); + } if (so->last_upid != proc_uniqueid(self) || so->last_pid != proc_pid(self)) { so->last_upid = proc_uniqueid(self); so->last_pid = proc_pid(self); proc_getexecutableuuid(self, so->last_uuid, - sizeof (so->last_uuid)); + sizeof(so->last_uuid)); } proc_pidoriginatoruuid(so->so_vuuid, sizeof(so->so_vuuid)); } @@ -556,8 +558,9 @@ so_update_last_owner_locked(struct socket *so, proc_t self) void so_update_policy(struct socket *so) { - if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) + if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { (void) inp_update_policy(sotoinpcb(so)); + } } #if NECP @@ -565,17 +568,18 @@ static void so_update_necp_policy(struct socket *so, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr) { - if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) + if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { inp_update_necp_policy(sotoinpcb(so), override_local_addr, override_remote_addr, 0); + } } #endif /* NECP */ boolean_t so_cache_timer(void) { - struct socket *p; - int n_freed = 0; + struct socket *p; + int n_freed = 0; boolean_t rc = FALSE; lck_mtx_lock(so_cache_mtx); @@ -586,8 +590,9 @@ so_cache_timer(void) VERIFY(cached_sock_count > 0); p = STAILQ_FIRST(&so_cache_head); if ((so_cache_time - p->cache_timestamp) < - SO_CACHE_TIME_LIMIT) + SO_CACHE_TIME_LIMIT) { break; + } STAILQ_REMOVE_HEAD(&so_cache_head, so_cache_ent); --cached_sock_count; @@ -601,11 +606,12 @@ so_cache_timer(void) } /* Schedule again if there is more to cleanup */ - if (!STAILQ_EMPTY(&so_cache_head)) + if (!STAILQ_EMPTY(&so_cache_head)) { rc = TRUE; + } lck_mtx_unlock(so_cache_mtx); - return (rc); + return rc; } /* @@ -623,10 +629,11 @@ soalloc(int waitok, int dom, int type) if ((dom == PF_INET) && (type == SOCK_STREAM)) { cached_sock_alloc(&so, waitok); } else { - MALLOC_ZONE(so, struct socket *, sizeof (*so), socket_zone, + MALLOC_ZONE(so, struct socket *, sizeof(*so), socket_zone, M_WAITOK); - if (so != NULL) - bzero(so, sizeof (*so)); + if (so != NULL) { + bzero(so, sizeof(*so)); + } } if (so != NULL) { so->so_gencnt = OSIncrementAtomic64((SInt64 *)&so_gencnt); @@ -641,12 +648,12 @@ soalloc(int waitok, int dom, int type) /* Convert waitok to M_WAITOK/M_NOWAIT for MAC Framework. */ if (mac_socket_label_init(so, !waitok) != 0) { sodealloc(so); - return (NULL); + return NULL; } #endif /* MAC_SOCKET */ } - return (so); + return so; } int @@ -664,87 +671,94 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, VERIFY(aso != NULL); *aso = NULL; - if (proto != 0) + if (proto != 0) { prp = pffindproto(dom, proto, type); - else + } else { prp = pffindtype(dom, type); + } if (prp == NULL || prp->pr_usrreqs->pru_attach == NULL) { - if (pffinddomain(dom) == NULL) - return (EAFNOSUPPORT); + if (pffinddomain(dom) == NULL) { + return EAFNOSUPPORT; + } if (proto != 0) { - if (pffindprotonotype(dom, proto) != NULL) - return (EPROTOTYPE); + if (pffindprotonotype(dom, proto) != NULL) { + return EPROTOTYPE; + } } - return (EPROTONOSUPPORT); + return EPROTONOSUPPORT; + } + if (prp->pr_type != type) { + return EPROTOTYPE; } - if (prp->pr_type != type) - return (EPROTOTYPE); so = soalloc(1, dom, type); - if (so == NULL) - return (ENOBUFS); + if (so == NULL) { + return ENOBUFS; + } switch (dom) { - case PF_LOCAL: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_local_total); - break; - case PF_INET: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet_total); - if (type == SOCK_STREAM) { - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_stream_total); - } else { - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_total); - } - break; - case PF_ROUTE: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_route_total); - break; - case PF_NDRV: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_ndrv_total); - break; - case PF_KEY: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_key_total); - break; - case PF_INET6: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet6_total); - if (type == SOCK_STREAM) { - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_stream_total); - } else { - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_dgram_total); - } - break; - case PF_SYSTEM: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_system_total); - break; - case PF_MULTIPATH: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_multipath_total); - break; - default: - INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_other_total); - break; + case PF_LOCAL: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_local_total); + break; + case PF_INET: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet_total); + if (type == SOCK_STREAM) { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_stream_total); + } else { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_total); + } + break; + case PF_ROUTE: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_route_total); + break; + case PF_NDRV: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_ndrv_total); + break; + case PF_KEY: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_key_total); + break; + case PF_INET6: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_inet6_total); + if (type == SOCK_STREAM) { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_stream_total); + } else { + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet6_dgram_total); + } + break; + case PF_SYSTEM: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_system_total); + break; + case PF_MULTIPATH: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_multipath_total); + break; + default: + INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_domain_other_total); + break; } - if (flags & SOCF_ASYNC) + if (flags & SOCF_ASYNC) { so->so_state |= SS_NBIO; + } TAILQ_INIT(&so->so_incomp); TAILQ_INIT(&so->so_comp); so->so_type = type; so->last_upid = proc_uniqueid(p); so->last_pid = proc_pid(p); - proc_getexecutableuuid(p, so->last_uuid, sizeof (so->last_uuid)); + proc_getexecutableuuid(p, so->last_uuid, sizeof(so->last_uuid)); proc_pidoriginatoruuid(so->so_vuuid, sizeof(so->so_vuuid)); if (ep != PROC_NULL && ep != p) { so->e_upid = proc_uniqueid(ep); so->e_pid = proc_pid(ep); - proc_getexecutableuuid(ep, so->e_uuid, sizeof (so->e_uuid)); + proc_getexecutableuuid(ep, so->e_uuid, sizeof(so->e_uuid)); so->so_flags |= SOF_DELEGATED; } so->so_cred = kauth_cred_proc_ref(p); - if (!suser(kauth_cred_get(), NULL)) + if (!suser(kauth_cred_get(), NULL)) { so->so_state |= SS_PRIV; + } so->so_proto = prp; so->so_rcv.sb_flags |= SB_RECV; @@ -773,8 +787,8 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, so->so_state |= SS_NOFDREF; VERIFY(so->so_usecount > 0); so->so_usecount--; - sofreelastref(so, 1); /* will deallocate the socket */ - return (error); + sofreelastref(so, 1); /* will deallocate the socket */ + return error; } atomic_add_32(&prp->pr_domain->dom_refs, 1); @@ -783,8 +797,9 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, /* Attach socket filters for this protocol */ sflt_initsock(so); #if TCPDEBUG - if (tcpconsdebug == 2) + if (tcpconsdebug == 2) { so->so_options |= SO_DEBUG; + } #endif so_set_default_traffic_class(so); @@ -825,7 +840,7 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, *aso = so; - return (0); + return 0; } /* @@ -842,8 +857,8 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, int socreate(int dom, struct socket **aso, int type, int proto) { - return (socreate_internal(dom, aso, type, proto, current_proc(), 0, - PROC_NULL)); + return socreate_internal(dom, aso, type, proto, current_proc(), 0, + PROC_NULL); } int @@ -864,10 +879,11 @@ socreate_delegate(int dom, struct socket **aso, int type, int proto, pid_t epid) * socreate_internal since it calls soalloc with M_WAITOK */ done: - if (ep != PROC_NULL) + if (ep != PROC_NULL) { proc_rele(ep); + } - return (error); + return error; } /* @@ -898,8 +914,9 @@ sobindlock(struct socket *so, struct sockaddr *nam, int dolock) struct proc *p = current_proc(); int error = 0; - if (dolock) + if (dolock) { socket_lock(so, 1); + } so_update_last_owner_locked(so, p); so_update_policy(so); @@ -924,16 +941,19 @@ sobindlock(struct socket *so, struct sockaddr *nam, int dolock) /* Socket filter */ error = sflt_bind(so, nam); - if (error == 0) + if (error == 0) { error = (*so->so_proto->pr_usrreqs->pru_bind)(so, nam, p); + } out: - if (dolock) + if (dolock) { socket_unlock(so, 1); + } - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } - return (error); + return error; } void @@ -964,7 +984,7 @@ sodealloc(struct socket *so) if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) { cached_sock_free(so); } else { - FREE_ZONE(so, sizeof (*so), so->so_zone); + FREE_ZONE(so, sizeof(*so), so->so_zone); } } @@ -1017,7 +1037,7 @@ solisten(struct socket *so, int backlog) * reject the request now. */ if ((so->so_state & - (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) || + (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) || (so->so_flags & SOF_DEFUNCT)) { error = EINVAL; if (so->so_flags & SOF_DEFUNCT) { @@ -1036,17 +1056,20 @@ solisten(struct socket *so, int backlog) } error = sflt_listen(so); - if (error == 0) + if (error == 0) { error = (*so->so_proto->pr_usrreqs->pru_listen)(so, p); + } if (error) { - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } goto out; } - if (TAILQ_EMPTY(&so->so_comp)) + if (TAILQ_EMPTY(&so->so_comp)) { so->so_options |= SO_ACCEPTCONN; + } /* * POSIX: The implementation may have an upper limit on the length of * the listen queue-either global or per accepting socket. If backlog @@ -1061,13 +1084,14 @@ solisten(struct socket *so, int backlog) * in which case the length of the listen queue may be set to an * implementation-defined minimum value. */ - if (backlog <= 0 || backlog > somaxconn) + if (backlog <= 0 || backlog > somaxconn) { backlog = somaxconn; + } so->so_qlimit = backlog; out: socket_unlock(so, 1); - return (error); + return error; } /* @@ -1149,8 +1173,8 @@ sofreelastref(struct socket *so, int dealloc) if (!(so->so_flags & SOF_PCBCLEARING) || !(so->so_state & SS_NOFDREF)) { selthreadclear(&so->so_snd.sb_sel); selthreadclear(&so->so_rcv.sb_sel); - so->so_rcv.sb_flags &= ~(SB_SEL|SB_UPCALL); - so->so_snd.sb_flags &= ~(SB_SEL|SB_UPCALL); + so->so_rcv.sb_flags &= ~(SB_SEL | SB_UPCALL); + so->so_snd.sb_flags &= ~(SB_SEL | SB_UPCALL); so->so_event = sonullevent; return; } @@ -1187,15 +1211,15 @@ sofreelastref(struct socket *so, int dealloc) */ selthreadclear(&so->so_snd.sb_sel); selthreadclear(&so->so_rcv.sb_sel); - so->so_rcv.sb_flags &= ~(SB_SEL|SB_UPCALL); - so->so_snd.sb_flags &= ~(SB_SEL|SB_UPCALL); + so->so_rcv.sb_flags &= ~(SB_SEL | SB_UPCALL); + so->so_snd.sb_flags &= ~(SB_SEL | SB_UPCALL); so->so_event = sonullevent; return; } else { if (head->so_proto->pr_getlock != NULL) { so_release_accept_list(head); - socket_unlock(head, 1); - } + socket_unlock(head, 1); + } printf("sofree: not queued\n"); } } @@ -1206,15 +1230,16 @@ sofreelastref(struct socket *so, int dealloc) if (so->so_flags & SOF_FLOW_DIVERT) { flow_divert_detach(so); } -#endif /* FLOW_DIVERT */ +#endif /* FLOW_DIVERT */ /* 3932268: disable upcall */ so->so_rcv.sb_flags &= ~SB_UPCALL; - so->so_snd.sb_flags &= ~(SB_UPCALL|SB_SNDBYTE_CNT); + so->so_snd.sb_flags &= ~(SB_UPCALL | SB_SNDBYTE_CNT); so->so_event = sonullevent; - if (dealloc) + if (dealloc) { sodealloc(so); + } } void @@ -1222,18 +1247,20 @@ soclose_wait_locked(struct socket *so) { lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); /* * Double check here and return if there's no outstanding upcall; * otherwise proceed further only if SOF_UPCALLCLOSEWAIT is set. */ - if (!so->so_upcallusecount || !(so->so_flags & SOF_UPCALLCLOSEWAIT)) + if (!so->so_upcallusecount || !(so->so_flags & SOF_UPCALLCLOSEWAIT)) { return; + } so->so_rcv.sb_flags &= ~SB_UPCALL; so->so_snd.sb_flags &= ~SB_UPCALL; so->so_flags |= SOF_CLOSEWAIT; @@ -1262,8 +1289,9 @@ soclose_locked(struct socket *so) sflt_notify(so, sock_evt_closing, NULL); - if (so->so_upcallusecount) + if (so->so_upcallusecount) { soclose_wait_locked(so); + } #if CONTENT_FILTER /* @@ -1312,11 +1340,13 @@ again: * otherwise, remove the incomp socket from the queue * and let soabort trigger the appropriate cleanup. */ - if (sp->so_flags & SOF_OVERFLOW) + if (sp->so_flags & SOF_OVERFLOW) { continue; + } - if (persocklock != 0) + if (persocklock != 0) { socket_lock(sp, 1); + } /* * Radar 27945981 @@ -1337,14 +1367,16 @@ again: __func__, sp); } - if (persocklock != 0) + if (persocklock != 0) { socket_unlock(sp, 1); + } } TAILQ_FOREACH_SAFE(sp, &so->so_comp, so_list, sonext) { /* Dequeue from so_comp since sofree() won't do it */ - if (persocklock != 0) + if (persocklock != 0) { socket_lock(sp, 1); + } if (sp->so_state & SS_COMP) { sp->so_state &= ~SS_COMP; @@ -1358,12 +1390,13 @@ again: __func__, sp); } - if (persocklock) + if (persocklock) { socket_unlock(sp, 1); } + } if (incomp_overflow_only == 0 && !TAILQ_EMPTY(&so->so_incomp)) { -#if (DEBUG|DEVELOPMENT) +#if (DEBUG | DEVELOPMENT) panic("%s head %p so_comp not empty\n", __func__, so); #endif /* (DEVELOPMENT || DEBUG) */ @@ -1371,7 +1404,7 @@ again: } if (!TAILQ_EMPTY(&so->so_comp)) { -#if (DEBUG|DEVELOPMENT) +#if (DEBUG | DEVELOPMENT) panic("%s head %p so_comp not empty\n", __func__, so); #endif /* (DEVELOPMENT || DEBUG) */ @@ -1391,21 +1424,24 @@ again: if (so->so_state & SS_ISCONNECTED) { if ((so->so_state & SS_ISDISCONNECTING) == 0) { error = sodisconnectlocked(so); - if (error) + if (error) { goto drop; + } } if (so->so_options & SO_LINGER) { lck_mtx_t *mutex_held; if ((so->so_state & SS_ISDISCONNECTING) && - (so->so_state & SS_NBIO)) + (so->so_state & SS_NBIO)) { goto drop; - if (so->so_proto->pr_getlock != NULL) + } + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } while (so->so_state & SS_ISCONNECTED) { - ts.tv_sec = (so->so_linger/100); + ts.tv_sec = (so->so_linger / 100); ts.tv_nsec = (so->so_linger % 100) * NSEC_PER_USEC * 1000 * 10; error = msleep((caddr_t)&so->so_timeo, @@ -1415,8 +1451,9 @@ again: * It's OK when the time fires, * don't report an error */ - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { error = 0; + } break; } } @@ -1429,8 +1466,9 @@ drop: } if (so->so_pcb != NULL && !(so->so_flags & SOF_PCBCLEARING)) { int error2 = (*so->so_proto->pr_usrreqs->pru_detach)(so); - if (error == 0) + if (error == 0) { error = error2; + } } if (so->so_usecount <= 0) { panic("soclose: usecount is zero so=%p\n", so); @@ -1444,8 +1482,9 @@ discard: } so->so_state |= SS_NOFDREF; - if ((so->so_flags & SOF_KNOTE) != 0) + if ((so->so_flags & SOF_KNOTE) != 0) { KNOTE(&so->so_klist, SO_FILT_HINT_LOCKED); + } atomic_add_32(&so->so_proto->pr_domain->dom_refs, -1); evsofree(so); @@ -1453,7 +1492,7 @@ discard: VERIFY(so->so_usecount > 0); so->so_usecount--; sofree(so); - return (error); + return error; } int @@ -1470,12 +1509,13 @@ soclose(struct socket *so) * retained in kernel remove its reference */ so->so_usecount--; - if (so->so_usecount < 2) + if (so->so_usecount < 2) { panic("soclose: retaincnt non null and so=%p " "usecount=%d\n", so, so->so_usecount); + } } socket_unlock(so, 1); - return (error); + return error; } /* @@ -1490,10 +1530,11 @@ soabort(struct socket *so) #ifdef MORE_LOCKING_DEBUG lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); #endif @@ -1502,10 +1543,10 @@ soabort(struct socket *so) error = (*so->so_proto->pr_usrreqs->pru_abort)(so); if (error) { sofree(so); - return (error); + return error; } } - return (0); + return 0; } int @@ -1513,8 +1554,9 @@ soacceptlock(struct socket *so, struct sockaddr **nam, int dolock) { int error; - if (dolock) + if (dolock) { socket_lock(so, 1); + } so_update_last_owner_locked(so, PROC_NULL); so_update_policy(so); @@ -1522,20 +1564,22 @@ soacceptlock(struct socket *so, struct sockaddr **nam, int dolock) so_update_necp_policy(so, NULL, NULL); #endif /* NECP */ - if ((so->so_state & SS_NOFDREF) == 0) + if ((so->so_state & SS_NOFDREF) == 0) { panic("soaccept: !NOFDREF"); + } so->so_state &= ~SS_NOFDREF; error = (*so->so_proto->pr_usrreqs->pru_accept)(so, nam); - if (dolock) + if (dolock) { socket_unlock(so, 1); - return (error); + } + return error; } int soaccept(struct socket *so, struct sockaddr **nam) { - return (soacceptlock(so, nam, 1)); + return soacceptlock(so, nam, 1); } int @@ -1591,7 +1635,7 @@ done: /* Callee checks for NULL pointer */ sock_freeaddr(remote); sock_freeaddr(local); - return (error); + return error; } /* @@ -1613,8 +1657,9 @@ soconnectlock(struct socket *so, struct sockaddr *nam, int dolock) int error; struct proc *p = current_proc(); - if (dolock) + if (dolock) { socket_lock(so, 1); + } so_update_last_owner_locked(so, p); so_update_policy(so); @@ -1636,15 +1681,17 @@ soconnectlock(struct socket *so, struct sockaddr *nam, int dolock) (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), error); } - if (dolock) + if (dolock) { socket_unlock(so, 1); - return (error); + } + return error; } if ((so->so_restrictions & SO_RESTRICT_DENY_OUT) != 0) { - if (dolock) + if (dolock) { socket_unlock(so, 1); - return (EPERM); + } + return EPERM; } /* @@ -1653,7 +1700,7 @@ soconnectlock(struct socket *so, struct sockaddr *nam, int dolock) * This allows user to disconnect by connecting to, e.g., * a null address. */ - if (so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING) && + if (so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING) && ((so->so_proto->pr_flags & PR_CONNREQUIRED) || (error = sodisconnectlocked(so)))) { error = EISCONN; @@ -1664,22 +1711,24 @@ soconnectlock(struct socket *so, struct sockaddr *nam, int dolock) */ error = sflt_connectout(so, nam); if (error != 0) { - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } } else { error = (*so->so_proto->pr_usrreqs->pru_connect) (so, nam, p); } } - if (dolock) + if (dolock) { socket_unlock(so, 1); - return (error); + } + return error; } int soconnect(struct socket *so, struct sockaddr *nam) { - return (soconnectlock(so, nam, 1)); + return soconnectlock(so, nam, 1); } /* @@ -1696,15 +1745,17 @@ soconnect2(struct socket *so1, struct socket *so2) int error; socket_lock(so1, 1); - if (so2->so_proto->pr_lock) + if (so2->so_proto->pr_lock) { socket_lock(so2, 1); + } error = (*so1->so_proto->pr_usrreqs->pru_connect2)(so1, so2); socket_unlock(so1, 1); - if (so2->so_proto->pr_lock) + if (so2->so_proto->pr_lock) { socket_unlock(so2, 1); - return (error); + } + return error; } int @@ -1731,11 +1782,12 @@ soconnectxlocked(struct socket *so, struct sockaddr *src, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), error); } - return (error); + return error; } - if ((so->so_restrictions & SO_RESTRICT_DENY_OUT) != 0) - return (EPERM); + if ((so->so_restrictions & SO_RESTRICT_DENY_OUT) != 0) { + return EPERM; + } /* * If protocol is connection-based, can only connect once @@ -1743,7 +1795,7 @@ soconnectxlocked(struct socket *so, struct sockaddr *src, * try to disconnect first. This allows user to disconnect * by connecting to, e.g., a null address. */ - if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) && + if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) && !(so->so_proto->pr_flags & PR_MULTICONN) && ((so->so_proto->pr_flags & PR_CONNREQUIRED) || (error = sodisconnectlocked(so)) != 0)) { @@ -1757,8 +1809,9 @@ soconnectxlocked(struct socket *so, struct sockaddr *src, if (error != 0) { /* Disable PRECONNECT_DATA, as we don't need to send a SYN anymore. */ so->so_flags1 &= ~SOF1_PRECONNECT_DATA; - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } } else { error = (*so->so_proto->pr_usrreqs->pru_connectx) (so, src, dst, p, ifscope, aid, pcid, @@ -1766,7 +1819,7 @@ soconnectxlocked(struct socket *so, struct sockaddr *src, } } - return (error); + return error; } int @@ -1784,11 +1837,12 @@ sodisconnectlocked(struct socket *so) } error = (*so->so_proto->pr_usrreqs->pru_disconnect)(so); - if (error == 0) + if (error == 0) { sflt_notify(so, sock_evt_disconnected, NULL); + } bad: - return (error); + return error; } /* Locking version */ @@ -1800,7 +1854,7 @@ sodisconnect(struct socket *so) socket_lock(so, 1); error = sodisconnectlocked(so); socket_unlock(so, 1); - return (error); + return error; } int @@ -1818,10 +1872,11 @@ sodisconnectxlocked(struct socket *so, sae_associd_t aid, sae_connid_t cid) * The event applies only for the session, not for * the disconnection of individual subflows. */ - if (so->so_state & (SS_ISDISCONNECTING|SS_ISDISCONNECTED)) + if (so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) { sflt_notify(so, sock_evt_disconnected, NULL); + } } - return (error); + return error; } int @@ -1832,10 +1887,10 @@ sodisconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid) socket_lock(so, 1); error = sodisconnectxlocked(so, aid, cid); socket_unlock(so, 1); - return (error); + return error; } -#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) +#define SBLOCKWAIT(f) (((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT) /* * sosendcheck will lock the socket buffer if it isn't locked and @@ -1854,9 +1909,9 @@ sosendcheck(struct socket *so, struct sockaddr *addr, user_ssize_t resid, int32_t clen, int32_t atomic, int flags, int *sblocked, struct mbuf *control) { - int error = 0; + int error = 0; int32_t space; - int assumelock = 0; + int assumelock = 0; restart: if (*sblocked == 0) { @@ -1873,9 +1928,10 @@ restart: } else { error = sblock(&so->so_snd, SBLOCKWAIT(flags)); if (error) { - if (so->so_flags & SOF_DEFUNCT) + if (so->so_flags & SOF_DEFUNCT) { goto defunct; - return (error); + } + return error; } *sblocked = 1; } @@ -1892,7 +1948,7 @@ defunct: __func__, proc_selfpid(), proc_best_name(current_proc()), (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), error); - return (error); + return error; } if (so->so_state & SS_CANTSENDMORE) { @@ -1901,44 +1957,47 @@ defunct: * Can re-inject data of half closed connections */ if ((so->so_state & SS_ISDISCONNECTED) == 0 && - so->so_snd.sb_cfil_thread == current_thread() && - cfil_sock_data_pending(&so->so_snd) != 0) + so->so_snd.sb_cfil_thread == current_thread() && + cfil_sock_data_pending(&so->so_snd) != 0) { CFIL_LOG(LOG_INFO, - "so %llx ignore SS_CANTSENDMORE", - (uint64_t)DEBUG_KERNEL_ADDRPERM(so)); - else + "so %llx ignore SS_CANTSENDMORE", + (uint64_t)DEBUG_KERNEL_ADDRPERM(so)); + } else #endif /* CONTENT_FILTER */ - return (EPIPE); + return EPIPE; } if (so->so_error) { error = so->so_error; so->so_error = 0; - return (error); + return error; } if ((so->so_state & SS_ISCONNECTED) == 0) { if ((so->so_proto->pr_flags & PR_CONNREQUIRED) != 0) { if (((so->so_state & SS_ISCONFIRMING) == 0) && (resid != 0 || clen == 0) && - !(so->so_flags1 & SOF1_PRECONNECT_DATA)) - return (ENOTCONN); - - } else if (addr == 0 && !(flags&MSG_HOLD)) { - return ((so->so_proto->pr_flags & PR_CONNREQUIRED) ? - ENOTCONN : EDESTADDRREQ); + !(so->so_flags1 & SOF1_PRECONNECT_DATA)) { + return ENOTCONN; + } + } else if (addr == 0 && !(flags & MSG_HOLD)) { + return (so->so_proto->pr_flags & PR_CONNREQUIRED) ? + ENOTCONN : EDESTADDRREQ; } } - if (so->so_flags & SOF_ENABLE_MSGS) + if (so->so_flags & SOF_ENABLE_MSGS) { space = msgq_sbspace(so, control); - else + } else { space = sbspace(&so->so_snd); + } - if (flags & MSG_OOB) + if (flags & MSG_OOB) { space += 1024; + } if ((atomic && resid > so->so_snd.sb_hiwat) || - clen > so->so_snd.sb_hiwat) - return (EMSGSIZE); + clen > so->so_snd.sb_hiwat) { + return EMSGSIZE; + } if ((space < resid + clen && (atomic || (space < (int32_t)so->so_snd.sb_lowat) || @@ -1950,27 +2009,28 @@ defunct: */ if (so->so_flags1 & SOF1_PRECONNECT_DATA) { if (space == 0) { - return (EWOULDBLOCK); + return EWOULDBLOCK; } if (space < (int32_t)so->so_snd.sb_lowat) { - return (0); + return 0; } } if ((so->so_state & SS_NBIO) || (flags & MSG_NBIO) || assumelock) { - return (EWOULDBLOCK); + return EWOULDBLOCK; } - sbunlock(&so->so_snd, TRUE); /* keep socket locked */ + sbunlock(&so->so_snd, TRUE); /* keep socket locked */ *sblocked = 0; error = sbwait(&so->so_snd); if (error) { - if (so->so_flags & SOF_DEFUNCT) + if (so->so_flags & SOF_DEFUNCT) { goto defunct; - return (error); + } + return error; } goto restart; } - return (0); + return 0; } /* @@ -2048,10 +2108,11 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, uint16_t headroom = 0; boolean_t en_tracing = FALSE; - if (uio != NULL) + if (uio != NULL) { resid = uio_resid(uio); - else + } else { resid = top->m_pkthdr.len; + } KERNEL_DEBUG((DBG_FNC_SOSEND | DBG_FUNC_START), so, resid, so->so_snd.sb_cc, so->so_snd.sb_lowat, so->so_snd.sb_hiwat); @@ -2117,23 +2178,27 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, (so->so_proto->pr_flags & PR_ATOMIC); OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd); - if (control != NULL) + if (control != NULL) { clen = control->m_len; + } - if (soreserveheadroom != 0) + if (soreserveheadroom != 0) { headroom = so->so_pktheadroom; + } do { error = sosendcheck(so, addr, resid, clen, atomic, flags, &sblocked, control); - if (error) + if (error) { goto out_locked; + } mp = ⊤ - if (so->so_flags & SOF_ENABLE_MSGS) + if (so->so_flags & SOF_ENABLE_MSGS) { space = msgq_sbspace(so, control); - else + } else { space = sbspace(&so->so_snd) - clen; + } space += ((flags & MSG_OOB) ? 1024 : 0); do { @@ -2142,8 +2207,9 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, * Data is prepackaged in "top". */ resid = 0; - if (flags & MSG_EOR) + if (flags & MSG_EOR) { top->m_flags |= M_EOR; + } } else { int chainlength; int bytes_to_copy; @@ -2154,13 +2220,15 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, bytes_to_copy = imin(resid, space); bytes_to_alloc = bytes_to_copy; - if (top == NULL) + if (top == NULL) { bytes_to_alloc += headroom; + } - if (sosendminchain > 0) + if (sosendminchain > 0) { chainlength = 0; - else + } else { chainlength = sosendmaxchain; + } /* * Use big 4 KB cluster when the outgoing interface @@ -2209,14 +2277,15 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if ((bytes_to_alloc - (num_needed * M16KCLBYTES)) - >= MINCLSIZE) + >= MINCLSIZE) { num_needed++; + } freelist = m_getpackets_internal( - (unsigned int *)&num_needed, - hdrs_needed, M_WAIT, 0, - M16KCLBYTES); + (unsigned int *)&num_needed, + hdrs_needed, M_WAIT, 0, + M16KCLBYTES); /* * Fall back to 4K cluster size * if allocation failed @@ -2231,14 +2300,15 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if ((bytes_to_alloc - (num_needed * MBIGCLBYTES)) >= - MINCLSIZE) + MINCLSIZE) { num_needed++; + } freelist = m_getpackets_internal( - (unsigned int *)&num_needed, - hdrs_needed, M_WAIT, 0, - MBIGCLBYTES); + (unsigned int *)&num_needed, + hdrs_needed, M_WAIT, 0, + MBIGCLBYTES); /* * Fall back to cluster size * if allocation failed @@ -2260,9 +2330,9 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, MCLBYTES; freelist = m_getpackets_internal( - (unsigned int *)&num_needed, - hdrs_needed, M_WAIT, 0, - MCLBYTES); + (unsigned int *)&num_needed, + hdrs_needed, M_WAIT, 0, + MCLBYTES); /* * Fall back to a single mbuf * if allocation failed @@ -2274,14 +2344,15 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if ((bytes_to_alloc - (num_needed * MCLBYTES)) >= - MINCLSIZE) + MINCLSIZE) { num_needed++; + } freelist = m_getpackets_internal( - (unsigned int *)&num_needed, - hdrs_needed, M_WAIT, 0, - MCLBYTES); + (unsigned int *)&num_needed, + hdrs_needed, M_WAIT, 0, + MCLBYTES); /* * Fall back to a single mbuf * if allocation failed @@ -2302,12 +2373,13 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, * reserving the socket headroom */ if (freelist == NULL) { - if (top == NULL) + if (top == NULL) { MGETHDR(freelist, M_WAIT, MT_DATA); - else + } else { MGET(freelist, M_WAIT, MT_DATA); + } if (freelist == NULL) { error = ENOBUFS; @@ -2329,14 +2401,15 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, freelist = m->m_next; m->m_next = NULL; - if ((m->m_flags & M_EXT)) + if ((m->m_flags & M_EXT)) { mlen = m->m_ext.ext_size - M_LEADINGSPACE(m); - else if ((m->m_flags & M_PKTHDR)) + } else if ((m->m_flags & M_PKTHDR)) { mlen = MHLEN - M_LEADINGSPACE(m); - else + } else { mlen = MLEN - M_LEADINGSPACE(m); + } len = imin(mlen, bytes_to_copy); chainlength += len; @@ -2351,40 +2424,44 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, m->m_len = len; *mp = m; top->m_pkthdr.len += len; - if (error) + if (error) { break; + } mp = &m->m_next; if (resid <= 0) { - if (flags & MSG_EOR) + if (flags & MSG_EOR) { top->m_flags |= M_EOR; + } break; } bytes_to_copy = min(resid, space); - } while (space > 0 && (chainlength < sosendmaxchain || atomic || resid < MINCLSIZE)); socket_lock(so, 0); - if (error) + if (error) { goto out_locked; + } } - if (flags & (MSG_HOLD|MSG_SEND)) { + if (flags & (MSG_HOLD | MSG_SEND)) { /* Enqueue for later, go away if HOLD */ struct mbuf *mb1; if (so->so_temp && (flags & MSG_FLUSH)) { m_freem(so->so_temp); so->so_temp = NULL; } - if (so->so_temp) + if (so->so_temp) { so->so_tail->m_next = top; - else + } else { so->so_temp = top; + } mb1 = top; - while (mb1->m_next) + while (mb1->m_next) { mb1 = mb1->m_next; + } so->so_tail = mb1; if (flags & MSG_HOLD) { top = NULL; @@ -2392,8 +2469,9 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, } top = so->so_temp; } - if (dontroute) + if (dontroute) { so->so_options |= SO_DONTROUTE; + } /* * Compute flags here, for pru_send and NKEs @@ -2437,7 +2515,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, clen = 0; control = NULL; top = NULL; - } + } goto out_locked; } #endif /* CONTENT_FILTER */ @@ -2453,35 +2531,43 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, error = (*so->so_proto->pr_usrreqs->pru_send) (so, sendflags, top, addr, control, p); - if (flags & MSG_SEND) + if (flags & MSG_SEND) { so->so_temp = NULL; + } - if (dontroute) + if (dontroute) { so->so_options &= ~SO_DONTROUTE; + } clen = 0; control = control_copy; control_copy = NULL; top = NULL; mp = ⊤ - if (error) + if (error) { goto out_locked; + } } while (resid && space > 0); } while (resid); out_locked: - if (sblocked) - sbunlock(&so->so_snd, FALSE); /* will unlock socket */ - else + if (sblocked) { + sbunlock(&so->so_snd, FALSE); /* will unlock socket */ + } else { socket_unlock(so, 1); - if (top != NULL) + } + if (top != NULL) { m_freem(top); - if (control != NULL) + } + if (control != NULL) { m_freem(control); - if (freelist != NULL) + } + if (freelist != NULL) { m_freem_list(freelist); - if (control_copy != NULL) + } + if (control_copy != NULL) { m_freem(control_copy); + } soclearfastopen(so); @@ -2495,7 +2581,7 @@ out_locked: KERNEL_DEBUG(DBG_FNC_SOSEND | DBG_FUNC_END, so, resid, so->so_snd.sb_cc, space, error); - return (error); + return error; } int @@ -2510,7 +2596,7 @@ sosend_reinject(struct socket *so, struct sockaddr *addr, struct mbuf *top, stru * If control is not NULL, top must be packet header */ VERIFY(top != NULL && - (control == NULL || top->m_flags & M_PKTHDR)); + (control == NULL || top->m_flags & M_PKTHDR)); /* * If control is not passed in, see if we can get it @@ -2533,12 +2619,13 @@ sosend_reinject(struct socket *so, struct sockaddr *addr, struct mbuf *top, stru } } } - if (control_end != NULL) + if (control_end != NULL) { control_end->m_next = NULL; + } } int error = (*so->so_proto->pr_usrreqs->pru_send) - (so, sendflags, top, addr, control, current_proc()); + (so, sendflags, top, addr, control, current_proc()); return error; } @@ -2613,8 +2700,9 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) error = sosendcheck(so, NULL, resid, 0, atomic, flags, &sblocked, NULL); - if (error) + if (error) { goto release; + } /* * Use big 4 KB clusters when the outgoing interface does not prefer @@ -2622,8 +2710,9 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) */ bigcl = !(so->so_flags1 & SOF1_IF_2KCL) || sosendbigcl_ignore_capab; - if (soreserveheadroom != 0) + if (soreserveheadroom != 0) { headroom = so->so_pktheadroom; + } do { int i; @@ -2632,10 +2721,11 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) size_t maxpktlen = 0; int bytes_to_alloc; - if (sosendminchain > 0) + if (sosendminchain > 0) { chainlength = 0; - else + } else { chainlength = sosendmaxchain; + } socket_unlock(so, 0); @@ -2649,18 +2739,21 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) len = uio_resid(auio); /* Do nothing for empty messages */ - if (len == 0) + if (len == 0) { continue; + } num_needed += 1; uiolast += 1; - if (len > maxpktlen) + if (len > maxpktlen) { maxpktlen = len; + } chainlength += len; - if (chainlength > sosendmaxchain) + if (chainlength > sosendmaxchain) { break; + } } /* * Nothing left to send @@ -2683,19 +2776,19 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) if (bytes_to_alloc > MCLBYTES && bytes_to_alloc <= MBIGCLBYTES && bigcl) { freelist = m_getpackets_internal( - (unsigned int *)&num_needed, - num_needed, M_WAIT, 1, - MBIGCLBYTES); + (unsigned int *)&num_needed, + num_needed, M_WAIT, 1, + MBIGCLBYTES); } else if (bytes_to_alloc > _MHLEN && bytes_to_alloc <= MCLBYTES) { freelist = m_getpackets_internal( - (unsigned int *)&num_needed, - num_needed, M_WAIT, 1, - MCLBYTES); + (unsigned int *)&num_needed, + num_needed, M_WAIT, 1, + MCLBYTES); } else { freelist = m_allocpacket_internal( - (unsigned int *)&num_needed, - bytes_to_alloc, NULL, M_WAIT, 1, 0); + (unsigned int *)&num_needed, + bytes_to_alloc, NULL, M_WAIT, 1, 0); } if (freelist == NULL) { @@ -2716,8 +2809,9 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) bytes_to_copy = uio_resid(auio); /* Do nothing for empty messages */ - if (bytes_to_copy == 0) + if (bytes_to_copy == 0) { continue; + } /* * Leave headroom for protocol headers * in the first mbuf of the chain @@ -2725,14 +2819,15 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) m->m_data += headroom; for (n = m; n != NULL; n = n->m_next) { - if ((m->m_flags & M_EXT)) + if ((m->m_flags & M_EXT)) { mlen = m->m_ext.ext_size - M_LEADINGSPACE(m); - else if ((m->m_flags & M_PKTHDR)) + } else if ((m->m_flags & M_PKTHDR)) { mlen = MHLEN - M_LEADINGSPACE(m); - else + } else { mlen = MLEN - M_LEADINGSPACE(m); + } len = imin(mlen, bytes_to_copy); /* @@ -2741,8 +2836,9 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) */ error = uiomove(mtod(n, caddr_t), len, auio); - if (error != 0) + if (error != 0) { break; + } n->m_len = len; m->m_pkthdr.len += len; @@ -2753,26 +2849,29 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) } if (m->m_pkthdr.len == 0) { printf( - "%s:%d so %llx pkt %llx type %u len null\n", - __func__, __LINE__, - (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - (uint64_t)DEBUG_KERNEL_ADDRPERM(m), - m->m_type); + "%s:%d so %llx pkt %llx type %u len null\n", + __func__, __LINE__, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + (uint64_t)DEBUG_KERNEL_ADDRPERM(m), + m->m_type); } - if (error != 0) + if (error != 0) { break; + } m = m->m_nextpkt; } socket_lock(so, 0); - if (error) + if (error) { goto release; + } top = freelist; freelist = NULL; - if (dontroute) + if (dontroute) { so->so_options |= SO_DONTROUTE; + } if ((flags & MSG_SKIPCFIL) == 0) { struct mbuf **prevnextp = NULL; @@ -2787,8 +2886,9 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) */ error = sflt_data_out(so, NULL, &m, NULL, 0); - if (error != 0 && error != EJUSTRETURN) + if (error != 0 && error != EJUSTRETURN) { goto release; + } #if CONTENT_FILTER if (error == 0) { @@ -2797,8 +2897,9 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) */ error = cfil_sock_data_out(so, NULL, m, NULL, 0); - if (error != 0 && error != EJUSTRETURN) + if (error != 0 && error != EJUSTRETURN) { goto release; + } } #endif /* CONTENT_FILTER */ /* @@ -2807,42 +2908,49 @@ sosend_list(struct socket *so, struct uio **uioarray, u_int uiocnt, int flags) */ if (error == EJUSTRETURN) { error = 0; - if (prevnextp != NULL) + if (prevnextp != NULL) { *prevnextp = nextpkt; - else + } else { top = nextpkt; + } } m = nextpkt; - if (m != NULL) + if (m != NULL) { prevnextp = &m->m_nextpkt; + } } } - if (top != NULL) + if (top != NULL) { error = (*so->so_proto->pr_usrreqs->pru_send_list) (so, 0, top, NULL, NULL, p); + } - if (dontroute) + if (dontroute) { so->so_options &= ~SO_DONTROUTE; + } top = NULL; uiofirst = uiolast; } while (resid > 0 && error == 0); release: - if (sblocked) - sbunlock(&so->so_snd, FALSE); /* will unlock socket */ - else + if (sblocked) { + sbunlock(&so->so_snd, FALSE); /* will unlock socket */ + } else { socket_unlock(so, 1); + } out: - if (top != NULL) + if (top != NULL) { m_freem(top); - if (freelist != NULL) + } + if (freelist != NULL) { m_freem_list(freelist); + } KERNEL_DEBUG(DBG_FNC_SOSEND_LIST | DBG_FUNC_END, so, resid, so->so_snd.sb_cc, 0, error); - return (error); + return error; } /* @@ -2918,8 +3026,9 @@ soreceive_addr(struct proc *p, struct socket *so, struct sockaddr **psa, * the record in front of any packets which may have * been appended while we dropped the lock. */ - for (m = m0; m->m_next != NULL; m = m->m_next) + for (m = m0; m->m_next != NULL; m = m->m_next) { sballoc(&so->so_rcv, m); + } sballoc(&so->so_rcv, m); if (so->so_rcv.sb_mb == NULL) { so->so_rcv.sb_lastrecord = m0; @@ -2961,7 +3070,7 @@ done: *mp = m; *nextrecordp = nextrecord; - return (error); + return error; } /* @@ -3033,8 +3142,9 @@ soreceive_ctl(struct socket *so, struct mbuf **controlp, int flags, sb_rcv->sb_mb = nextrecord; SB_EMPTY_FIXUP(sb_rcv); } - if (nextrecord == NULL) + if (nextrecord == NULL) { sb_rcv->sb_lastrecord = m; + } } SBLASTRECORDCHK(&so->so_rcv, "soreceive ctl"); @@ -3083,16 +3193,17 @@ soreceive_ctl(struct socket *so, struct mbuf **controlp, int flags, * records when the socket was unlocked above for * externalizing SCM_RIGHTS. */ - if (m != NULL) + if (m != NULL) { nextrecord = sb_rcv->sb_mb->m_nextpkt; - else + } else { nextrecord = sb_rcv->sb_mb; + } done: *mp = m; *nextrecordp = nextrecord; - return (error); + return error; } /* @@ -3153,8 +3264,9 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, * Sanity check on the length passed by caller as we are making 'int' * comparisons */ - if (orig_resid < 0 || orig_resid > INT_MAX) - return (EINVAL); + if (orig_resid < 0 || orig_resid > INT_MAX) { + return EINVAL; + } KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_START, so, uio_resid(uio), so->so_rcv.sb_cc, so->so_rcv.sb_lowat, @@ -3171,14 +3283,17 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, } #endif mp = mp0; - if (psa != NULL) + if (psa != NULL) { *psa = NULL; - if (controlp != NULL) + } + if (controlp != NULL) { *controlp = NULL; - if (flagsp != NULL) - flags = *flagsp &~ MSG_EOR; - else + } + if (flagsp != NULL) { + flags = *flagsp & ~MSG_EOR; + } else { flags = 0; + } /* * If a recv attempt is made on a previously-accepted socket @@ -3198,10 +3313,11 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, * prior to being returned from sodefunct(); there should * be no data on its receive list, so panic otherwise. */ - if (so->so_state & SS_DEFUNCT) + if (so->so_state & SS_DEFUNCT) { sb_empty_assert(sb, __func__); + } socket_unlock(so, 1); - return (error); + return error; } if ((so->so_flags1 & SOF1_PRECONNECT_DATA) && @@ -3216,7 +3332,7 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, if (error) { socket_unlock(so, 1); - return (error); + return error; } } @@ -3252,11 +3368,12 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, socket_unlock(so, 1); KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, ENOBUFS, 0, 0, 0, 0); - return (ENOBUFS); + return ENOBUFS; } error = (*pr->pr_usrreqs->pru_rcvoob)(so, m, flags & MSG_PEEK); - if (error) + if (error) { goto bad; + } socket_unlock(so, 0); do { error = uiomove(mtod(m, caddr_t), @@ -3265,8 +3382,9 @@ soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, } while (uio_resid(uio) && error == 0 && m != NULL); socket_lock(so, 0); bad: - if (m != NULL) + if (m != NULL) { m_freem(m); + } if ((so->so_options & SO_WANTOOBFLAG) != 0) { if (error == EWOULDBLOCK || error == EINVAL) { @@ -3291,11 +3409,12 @@ bad: KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } nooob: - if (mp != NULL) + if (mp != NULL) { *mp = NULL; + } if (so->so_state & SS_ISCONFIRMING && uio_resid(uio)) { (*pr->pr_usrreqs->pru_rcvd)(so, 0); @@ -3305,9 +3424,10 @@ nooob: delayed_copy_len = 0; restart: #ifdef MORE_LOCKING_DEBUG - if (so->so_usecount <= 1) + if (so->so_usecount <= 1) { printf("soreceive: sblock so=0x%llx ref=%d on socket\n", (uint64_t)DEBUG_KERNEL_ADDRPERM(so), so->so_usecount); + } #endif /* * See if the socket has been closed (SS_NOFDREF|SS_CANTRCVMORE) @@ -3328,7 +3448,7 @@ restart: if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) == (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW)) { socket_unlock(so, 1); - return (0); + return 0; } error = sblock(&so->so_rcv, SBLOCKWAIT(flags)); @@ -3341,7 +3461,7 @@ restart: VM_KERNEL_ADDRPERM(so), 0, (int64_t)(orig_resid - uio_resid(uio))); } - return (error); + return error; } m = so->so_rcv.sb_mb; @@ -3371,11 +3491,13 @@ restart: SB_MB_CHECK(&so->so_rcv); if (so->so_error) { - if (m != NULL) + if (m != NULL) { goto dontblock; + } error = so->so_error; - if ((flags & MSG_PEEK) == 0) + if ((flags & MSG_PEEK) == 0) { so->so_error = 0; + } goto release; } if (so->so_state & SS_CANTRCVMORE) { @@ -3384,47 +3506,52 @@ restart: * Deal with half closed connections */ if ((so->so_state & SS_ISDISCONNECTED) == 0 && - cfil_sock_data_pending(&so->so_rcv) != 0) + cfil_sock_data_pending(&so->so_rcv) != 0) { CFIL_LOG(LOG_INFO, - "so %llx ignore SS_CANTRCVMORE", - (uint64_t)DEBUG_KERNEL_ADDRPERM(so)); - else + "so %llx ignore SS_CANTRCVMORE", + (uint64_t)DEBUG_KERNEL_ADDRPERM(so)); + } else #endif /* CONTENT_FILTER */ - if (m != NULL) + if (m != NULL) { goto dontblock; - else + } else { goto release; + } } - for (; m != NULL; m = m->m_next) + for (; m != NULL; m = m->m_next) { if (m->m_type == MT_OOBDATA || (m->m_flags & M_EOR)) { m = so->so_rcv.sb_mb; goto dontblock; } - if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && + } + if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0 && (so->so_proto->pr_flags & PR_CONNREQUIRED)) { error = ENOTCONN; goto release; } - if (uio_resid(uio) == 0) + if (uio_resid(uio) == 0) { goto release; + } if ((so->so_state & SS_NBIO) || - (flags & (MSG_DONTWAIT|MSG_NBIO))) { + (flags & (MSG_DONTWAIT | MSG_NBIO))) { error = EWOULDBLOCK; goto release; } SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); - sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ + sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ #if EVEN_MORE_LOCKING_DEBUG - if (socket_debug) + if (socket_debug) { printf("Waiting for socket data\n"); + } #endif error = sbwait(&so->so_rcv); #if EVEN_MORE_LOCKING_DEBUG - if (socket_debug) + if (socket_debug) { printf("SORECEIVE - sbwait returned %d\n", error); + } #endif if (so->so_usecount < 1) { panic("%s: after 2nd sblock so=%p ref=%d on socket\n", @@ -3440,7 +3567,7 @@ restart: VM_KERNEL_ADDRPERM(so), 0, (int64_t)(orig_resid - uio_resid(uio))); } - return (error); + return error; } goto restart; } @@ -3453,10 +3580,11 @@ dontblock: if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) { error = soreceive_addr(p, so, psa, flags, &m, &nextrecord, mp0 == NULL); - if (error == ERESTART) + if (error == ERESTART) { goto restart; - else if (error != 0) + } else if (error != 0) { goto release; + } orig_resid = 0; } @@ -3468,8 +3596,9 @@ dontblock: */ if (m != NULL && m->m_type == MT_CONTROL) { error = soreceive_ctl(so, controlp, flags, &m, &nextrecord); - if (error != 0) + if (error != 0) { goto release; + } orig_resid = 0; } @@ -3485,7 +3614,7 @@ dontblock: struct mbuf *seq_cm; seq_cm = sbcreatecontrol((caddr_t)&m->m_pkthdr.msg_seq, - sizeof (uint32_t), SCM_SEQNUM, SOL_SOCKET); + sizeof(uint32_t), SCM_SEQNUM, SOL_SOCKET); if (seq_cm == NULL) { /* unable to allocate a control mbuf */ error = ENOBUFS; @@ -3512,12 +3641,14 @@ dontblock: nextrecord); /* NOTREACHED */ } - if (nextrecord == NULL) + if (nextrecord == NULL) { so->so_rcv.sb_lastrecord = m; + } } type = m->m_type; - if (type == MT_OOBDATA) + if (type == MT_OOBDATA) { flags |= MSG_OOB; + } } else { if (!(flags & MSG_PEEK)) { SB_EMPTY_FIXUP(&so->so_rcv); @@ -3529,18 +3660,20 @@ dontblock: moff = 0; offset = 0; - if (!(flags & MSG_PEEK) && uio_resid(uio) > sorecvmincopy) + if (!(flags & MSG_PEEK) && uio_resid(uio) > sorecvmincopy) { can_delay = 1; - else + } else { can_delay = 0; + } need_event = 0; while (m != NULL && (uio_resid(uio) - delayed_copy_len) > 0 && error == 0) { if (m->m_type == MT_OOBDATA) { - if (type != MT_OOBDATA) + if (type != MT_OOBDATA) { break; + } } else if (type == MT_OOBDATA) { break; } @@ -3555,10 +3688,12 @@ dontblock: } so->so_state &= ~SS_RCVATMARK; len = uio_resid(uio) - delayed_copy_len; - if (so->so_oobmark && len > so->so_oobmark - offset) + if (so->so_oobmark && len > so->so_oobmark - offset) { len = so->so_oobmark - offset; - if (len > m->m_len - moff) + } + if (len > m->m_len - moff) { len = m->m_len - moff; + } /* * If mp is set, just pass back the mbufs. * Otherwise copy them out via the uio, then free. @@ -3611,15 +3746,17 @@ dontblock: (int)len, uio); socket_lock(so, 0); - if (error) + if (error) { goto release; + } } } else { uio_setresid(uio, (uio_resid(uio) - len)); } if (len == m->m_len - moff) { - if (m->m_flags & M_EOR) + if (m->m_flags & M_EOR) { flags |= MSG_EOR; + } if (flags & MSG_PEEK) { m = m->m_next; moff = 0; @@ -3642,10 +3779,10 @@ dontblock: if (so->so_msg_state->msg_uno_bytes > m->m_len) { so->so_msg_state-> - msg_uno_bytes -= m->m_len; + msg_uno_bytes -= m->m_len; } else { so->so_msg_state-> - msg_uno_bytes = 0; + msg_uno_bytes = 0; } m->m_flags &= ~M_UNORDERED_DATA; } @@ -3656,18 +3793,20 @@ dontblock: so->so_rcv.sb_mb = m = m->m_next; *mp = NULL; } else { - if (free_list == NULL) + if (free_list == NULL) { free_list = m; - else + } else { ml->m_next = m; + } ml = m; so->so_rcv.sb_mb = m = m->m_next; ml->m_next = NULL; } if (m != NULL) { m->m_nextpkt = nextrecord; - if (nextrecord == NULL) + if (nextrecord == NULL) { so->so_rcv.sb_lastrecord = m; + } } else { so->so_rcv.sb_mb = nextrecord; SB_EMPTY_FIXUP(&so->so_rcv); @@ -3682,10 +3821,11 @@ dontblock: if (mp != NULL) { int copy_flag; - if (flags & MSG_DONTWAIT) + if (flags & MSG_DONTWAIT) { copy_flag = M_DONTWAIT; - else + } else { copy_flag = M_WAIT; + } *mp = m_copym(m, 0, len, copy_flag); /* * Failed to allocate an mbuf? @@ -3719,12 +3859,14 @@ dontblock: } } else { offset += len; - if (offset == so->so_oobmark) + if (offset == so->so_oobmark) { break; + } } } - if (flags & MSG_EOR) + if (flags & MSG_EOR) { break; + } /* * If the MSG_WAITALL or MSG_WAITSTREAM flag is set * (for non-atomic socket), we must not quit until @@ -3733,15 +3875,16 @@ dontblock: * count but without error. Keep sockbuf locked * against other readers. */ - while (flags & (MSG_WAITALL|MSG_WAITSTREAM) && m == NULL && + while (flags & (MSG_WAITALL | MSG_WAITSTREAM) && m == NULL && (uio_resid(uio) - delayed_copy_len) > 0 && !sosendallatonce(so) && !nextrecord) { if (so->so_error || ((so->so_state & SS_CANTRCVMORE) #if CONTENT_FILTER && cfil_sock_data_pending(&so->so_rcv) == 0 #endif /* CONTENT_FILTER */ - )) + )) { goto release; + } /* * Depending on the protocol (e.g. TCP), the following @@ -3754,8 +3897,9 @@ dontblock: */ if (pr->pr_flags & PR_WANTRCVD && so->so_pcb && (((struct inpcb *)so->so_pcb)->inp_state != - INPCB_STATE_DEAD)) + INPCB_STATE_DEAD)) { (*pr->pr_usrreqs->pru_rcvd)(so, flags); + } SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 2"); SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 2"); @@ -3787,8 +3931,9 @@ dontblock: error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len); - if (error) + if (error) { goto release; + } } m = so->so_rcv.sb_mb; if (m != NULL) { @@ -3810,8 +3955,9 @@ dontblock: flags |= MSG_RCVMORE; } else { flags |= MSG_TRUNC; - if ((flags & MSG_PEEK) == 0) + if ((flags & MSG_PEEK) == 0) { (void) sbdroprecord(&so->so_rcv); + } } } @@ -3823,8 +3969,9 @@ dontblock: * is set), and so we set the flag now based on what we know * prior to calling pru_rcvd. */ - if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) + if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) { flags |= MSG_HAVEMORE; + } if ((flags & MSG_PEEK) == 0) { if (m == NULL) { @@ -3844,30 +3991,34 @@ dontblock: } SBLASTRECORDCHK(&so->so_rcv, "soreceive 4"); SBLASTMBUFCHK(&so->so_rcv, "soreceive 4"); - if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) + if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) { (*pr->pr_usrreqs->pru_rcvd)(so, flags); + } } if (delayed_copy_len) { error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len); - if (error) + if (error) { goto release; + } } if (free_list != NULL) { m_freem_list(free_list); free_list = NULL; } - if (need_event) + if (need_event) { postevent(so, 0, EV_OOB); + } if (orig_resid == uio_resid(uio) && orig_resid && (flags & MSG_EOR) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { - sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ + sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ goto restart; } - if (flagsp != NULL) + if (flagsp != NULL) { *flagsp |= flags; + } release: #ifdef MORE_LOCKING_DEBUG if (so->so_usecount <= 1) { @@ -3876,13 +4027,15 @@ release: /* NOTREACHED */ } #endif - if (delayed_copy_len) + if (delayed_copy_len) { error = sodelayed_copy(so, uio, &free_list, &delayed_copy_len); + } - if (free_list != NULL) + if (free_list != NULL) { m_freem_list(free_list); + } - sbunlock(&so->so_rcv, FALSE); /* will unlock socket */ + sbunlock(&so->so_rcv, FALSE); /* will unlock socket */ if (en_tracing) { KERNEL_ENERGYTRACE(kEnTrActKernSockRead, DBG_FUNC_END, @@ -3893,7 +4046,7 @@ release: KERNEL_DEBUG(DBG_FNC_SORECEIVE | DBG_FUNC_END, so, uio_resid(uio), so->so_rcv.sb_cc, 0, error); - return (error); + return error; } /* @@ -3922,7 +4075,7 @@ sodelayed_copy(struct socket *so, struct uio *uio, struct mbuf **free_list, socket_lock(so, 0); - return (error); + return error; } static int @@ -3940,8 +4093,9 @@ sodelayed_copy_list(struct socket *so, struct recv_msg_elem *msgarray, auio = msgarray[i].uio; for (m = ml; m != NULL; m = m->m_next) { error = uiomove(mtod(m, caddr_t), m->m_len, auio); - if (error != 0) + if (error != 0) { goto out; + } } } out: @@ -3950,7 +4104,7 @@ out: *free_list = NULL; *resid = 0; - return (error); + return error; } int @@ -3986,10 +4140,11 @@ soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int uiocnt, * - Protocol must support packet chains * - The uio array is NULL (should we panic?) */ - if (flagsp != NULL) + if (flagsp != NULL) { flags = *flagsp; - else + } else { flags = 0; + } if (flags & ~(MSG_PEEK | MSG_WAITALL | MSG_DONTWAIT | MSG_NEEDSA | MSG_NBIO)) { printf("%s invalid flags 0x%x\n", __func__, flags); @@ -4028,10 +4183,11 @@ soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int uiocnt, goto out; } - if (!(flags & MSG_PEEK) && sorecvmincopy > 0) + if (!(flags & MSG_PEEK) && sorecvmincopy > 0) { can_delay = 1; - else + } else { can_delay = 0; + } socket_lock(so, 1); so_update_last_owner_locked(so, p); @@ -4059,8 +4215,9 @@ soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int uiocnt, * prior to being returned from sodefunct(); there should * be no data on its receive list, so panic otherwise. */ - if (so->so_state & SS_DEFUNCT) + if (so->so_state & SS_DEFUNCT) { sb_empty_assert(sb, __func__); + } goto release; } @@ -4115,20 +4272,21 @@ restart: if (so->so_error) { error = so->so_error; - if ((flags & MSG_PEEK) == 0) + if ((flags & MSG_PEEK) == 0) { so->so_error = 0; + } goto release; } if (so->so_state & SS_CANTRCVMORE) { goto release; } - if ((so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0 && + if ((so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0 && (so->so_proto->pr_flags & PR_CONNREQUIRED)) { error = ENOTCONN; goto release; } if ((so->so_state & SS_NBIO) || - (flags & (MSG_DONTWAIT|MSG_NBIO))) { + (flags & (MSG_DONTWAIT | MSG_NBIO))) { error = EWOULDBLOCK; goto release; } @@ -4143,7 +4301,7 @@ restart: SBLASTRECORDCHK(&so->so_rcv, "soreceive sbwait 1"); SBLASTMBUFCHK(&so->so_rcv, "soreceive sbwait 1"); - sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ + sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ sblocked = 0; error = sbwait(&so->so_rcv); @@ -4172,16 +4330,18 @@ restart: if ((pr->pr_flags & PR_ADDR) && m->m_type == MT_SONAME) { error = soreceive_addr(p, so, psa, flags, &m, &nextrecord, 1); - if (error == ERESTART) + if (error == ERESTART) { goto restart; - else if (error != 0) + } else if (error != 0) { goto release; + } } if (m != NULL && m->m_type == MT_CONTROL) { error = soreceive_ctl(so, controlp, flags, &m, &nextrecord); - if (error != 0) + if (error != 0) { goto release; + } } if (m->m_pkthdr.len == 0) { @@ -4199,15 +4359,18 @@ restart: ml = NULL; pktlen = 0; while (m != NULL && (len = resid - pktlen) >= 0 && error == 0) { - if (m->m_len == 0) + if (m->m_len == 0) { panic("%p m_len zero", m); - if (m->m_type == 0) + } + if (m->m_type == 0) { panic("%p m_type zero", m); + } /* * Clip to the residual length */ - if (len > m->m_len) + if (len > m->m_len) { len = m->m_len; + } pktlen += len; /* * Copy the mbufs via the uio or delay the copy @@ -4220,8 +4383,9 @@ restart: socket_unlock(so, 0); error = uiomove(mtod(m, caddr_t), (int)len, auio); socket_lock(so, 0); - if (error) + if (error) { goto release; + } } else { delayed_copy_len += len; } @@ -4237,21 +4401,24 @@ restart: /* * Set the first packet to the head of the free list */ - if (free_list == NULL) + if (free_list == NULL) { free_list = m; + } /* * Link current packet to tail of free list */ if (ml == NULL) { - if (free_tail != NULL) + if (free_tail != NULL) { free_tail->m_nextpkt = m; + } free_tail = m; } /* * Link current mbuf to last mbuf of current packet */ - if (ml != NULL) + if (ml != NULL) { ml->m_next = m; + } ml = m; /* @@ -4262,8 +4429,9 @@ restart: if (m != NULL) { m->m_nextpkt = nextrecord; - if (nextrecord == NULL) + if (nextrecord == NULL) { so->so_rcv.sb_lastrecord = m; + } } else { so->so_rcv.sb_mb = nextrecord; SB_EMPTY_FIXUP(&so->so_rcv); @@ -4294,17 +4462,19 @@ restart: * Copyout first the freelist then the partial mbuf */ socket_unlock(so, 0); - if (delayed_copy_len) + if (delayed_copy_len) { error = sodelayed_copy_list(so, msgarray, uiocnt, &free_list, &delayed_copy_len); + } if (error == 0) { error = uiomove(mtod(m, caddr_t), (int)len, auio); } socket_lock(so, 0); - if (error) + if (error) { goto release; + } m->m_data += len; m->m_len -= len; @@ -4346,13 +4516,14 @@ restart: if (npkts < uiocnt && error == 0 && (flags & (MSG_RCVMORE | MSG_TRUNC)) == 0 && (so->so_state & SS_CANTRCVMORE) == 0) { - sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ + sbunlock(&so->so_rcv, TRUE); /* keep socket locked */ sblocked = 0; goto next; } - if (flagsp != NULL) + if (flagsp != NULL) { *flagsp |= flags; + } release: /* @@ -4361,32 +4532,38 @@ release: * That way the caller won't be surprised if it receives less data * than requested. */ - if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) + if ((so->so_options & SO_WANTMORE) && so->so_rcv.sb_cc > 0) { flags |= MSG_HAVEMORE; + } - if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) + if (pr->pr_flags & PR_WANTRCVD && so->so_pcb) { (*pr->pr_usrreqs->pru_rcvd)(so, flags); + } - if (sblocked) - sbunlock(&so->so_rcv, FALSE); /* will unlock socket */ - else + if (sblocked) { + sbunlock(&so->so_rcv, FALSE); /* will unlock socket */ + } else { socket_unlock(so, 1); + } - if (delayed_copy_len) + if (delayed_copy_len) { error = sodelayed_copy_list(so, msgarray, uiocnt, &free_list, &delayed_copy_len); + } out: /* * Amortize the cost of freeing the mbufs */ - if (free_list != NULL) + if (free_list != NULL) { m_freem_list(free_list); - if (free_others != NULL) + } + if (free_others != NULL) { m_freem_list(free_others); + } KERNEL_DEBUG(DBG_FNC_SORECEIVE_LIST | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } /* @@ -4420,7 +4597,7 @@ soshutdown(struct socket *so, int how) case SHUT_RDWR: socket_lock(so, 1); if ((so->so_state & - (SS_ISCONNECTED|SS_ISCONNECTING|SS_ISDISCONNECTING)) == 0) { + (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) { error = ENOTCONN; } else { error = soshutdownlock(so, how); @@ -4434,7 +4611,7 @@ soshutdown(struct socket *so, int how) KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN | DBG_FUNC_END, how, error, 0, 0, 0); - return (error); + return error; } int @@ -4465,7 +4642,7 @@ soshutdownlock_final(struct socket *so, int how) } done: KERNEL_DEBUG(DBG_FNC_SOSHUTDOWN, how, 1, 0, 0, 0); - return (error); + return error; } int @@ -4492,7 +4669,7 @@ soshutdownlock(struct socket *so, int how) error = soshutdownlock_final(so, how); done: - return (error); + return error; } void @@ -4510,12 +4687,12 @@ sowflush(struct socket *so) (void) sblock(sb, SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT); VERIFY(sb->sb_flags & SB_LOCK); - sb->sb_flags &= ~(SB_SEL|SB_UPCALL); - sb->sb_flags |= SB_DROP; - sb->sb_upcall = NULL; - sb->sb_upcallarg = NULL; + sb->sb_flags &= ~(SB_SEL | SB_UPCALL); + sb->sb_flags |= SB_DROP; + sb->sb_upcall = NULL; + sb->sb_upcallarg = NULL; - sbunlock(sb, TRUE); /* keep socket locked */ + sbunlock(sb, TRUE); /* keep socket locked */ selthreadclear(&sb->sb_sel); sbrelease(sb); @@ -4534,10 +4711,11 @@ sorflush(struct socket *so) * as part of sofreelastref(), and at that time, pr_getlock() may no * longer be able to return us the lock; this will be fixed in future. */ - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); #endif /* notyet */ @@ -4564,20 +4742,20 @@ sorflush(struct socket *so) * Make sure to not carry over SB_LOCK in "asb", as we need * to acquire it later as part of sbrelease(). */ - bzero(&asb, sizeof (asb)); - asb.sb_cc = sb->sb_cc; - asb.sb_hiwat = sb->sb_hiwat; - asb.sb_mbcnt = sb->sb_mbcnt; - asb.sb_mbmax = sb->sb_mbmax; - asb.sb_ctl = sb->sb_ctl; - asb.sb_lowat = sb->sb_lowat; - asb.sb_mb = sb->sb_mb; - asb.sb_mbtail = sb->sb_mbtail; - asb.sb_lastrecord = sb->sb_lastrecord; - asb.sb_so = sb->sb_so; - asb.sb_flags = sb->sb_flags; - asb.sb_flags &= ~(SB_LOCK|SB_SEL|SB_KNOTE|SB_UPCALL); - asb.sb_flags |= SB_DROP; + bzero(&asb, sizeof(asb)); + asb.sb_cc = sb->sb_cc; + asb.sb_hiwat = sb->sb_hiwat; + asb.sb_mbcnt = sb->sb_mbcnt; + asb.sb_mbmax = sb->sb_mbmax; + asb.sb_ctl = sb->sb_ctl; + asb.sb_lowat = sb->sb_lowat; + asb.sb_mb = sb->sb_mb; + asb.sb_mbtail = sb->sb_mbtail; + asb.sb_lastrecord = sb->sb_lastrecord; + asb.sb_so = sb->sb_so; + asb.sb_flags = sb->sb_flags; + asb.sb_flags &= ~(SB_LOCK | SB_SEL | SB_KNOTE | SB_UPCALL); + asb.sb_flags |= SB_DROP; /* * Ideally we'd bzero() these and preserve the ones we need; @@ -4588,23 +4766,23 @@ sorflush(struct socket *so) * Setting SB_DROP acts as a barrier to prevent further appends. * Clearing SB_SEL is done for selthreadclear() below. */ - sb->sb_cc = 0; - sb->sb_hiwat = 0; - sb->sb_mbcnt = 0; - sb->sb_mbmax = 0; - sb->sb_ctl = 0; - sb->sb_lowat = 0; - sb->sb_mb = NULL; - sb->sb_mbtail = NULL; - sb->sb_lastrecord = NULL; - sb->sb_timeo.tv_sec = 0; - sb->sb_timeo.tv_usec = 0; - sb->sb_upcall = NULL; - sb->sb_upcallarg = NULL; - sb->sb_flags &= ~(SB_SEL|SB_UPCALL); - sb->sb_flags |= SB_DROP; - - sbunlock(sb, TRUE); /* keep socket locked */ + sb->sb_cc = 0; + sb->sb_hiwat = 0; + sb->sb_mbcnt = 0; + sb->sb_mbmax = 0; + sb->sb_ctl = 0; + sb->sb_lowat = 0; + sb->sb_mb = NULL; + sb->sb_mbtail = NULL; + sb->sb_lastrecord = NULL; + sb->sb_timeo.tv_sec = 0; + sb->sb_timeo.tv_usec = 0; + sb->sb_upcall = NULL; + sb->sb_upcallarg = NULL; + sb->sb_flags &= ~(SB_SEL | SB_UPCALL); + sb->sb_flags |= SB_DROP; + + sbunlock(sb, TRUE); /* keep socket locked */ /* * Note that selthreadclear() is called on the original "sb" and @@ -4614,8 +4792,9 @@ sorflush(struct socket *so) */ selthreadclear(&sb->sb_sel); - if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) + if ((pr->pr_flags & PR_RIGHTS) && pr->pr_domain->dom_dispose) { (*pr->pr_domain->dom_dispose)(asb.sb_mb); + } sbrelease(&asb); } @@ -4634,7 +4813,7 @@ sorflush(struct socket *so) int sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) { - size_t valsize; + size_t valsize; /* * If the user gives us more than we wanted, we ignore it, @@ -4642,16 +4821,19 @@ sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) * wants, we return EINVAL. On success, sopt->sopt_valsize * is set to however much we actually retrieved. */ - if ((valsize = sopt->sopt_valsize) < minlen) - return (EINVAL); - if (valsize > len) + if ((valsize = sopt->sopt_valsize) < minlen) { + return EINVAL; + } + if (valsize > len) { sopt->sopt_valsize = valsize = len; + } - if (sopt->sopt_p != kernproc) - return (copyin(sopt->sopt_val, buf, valsize)); + if (sopt->sopt_p != kernproc) { + return copyin(sopt->sopt_val, buf, valsize); + } bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), buf, valsize); - return (0); + return 0; } /* @@ -4664,44 +4846,48 @@ sooptcopyin(struct sockopt *sopt, void *buf, size_t len, size_t minlen) static int sooptcopyin_timeval(struct sockopt *sopt, struct timeval *tv_p) { - int error; + int error; if (proc_is64bit(sopt->sopt_p)) { - struct user64_timeval tv64; + struct user64_timeval tv64; - if (sopt->sopt_valsize < sizeof (tv64)) - return (EINVAL); + if (sopt->sopt_valsize < sizeof(tv64)) { + return EINVAL; + } - sopt->sopt_valsize = sizeof (tv64); + sopt->sopt_valsize = sizeof(tv64); if (sopt->sopt_p != kernproc) { - error = copyin(sopt->sopt_val, &tv64, sizeof (tv64)); - if (error != 0) - return (error); + error = copyin(sopt->sopt_val, &tv64, sizeof(tv64)); + if (error != 0) { + return error; + } } else { bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), &tv64, - sizeof (tv64)); + sizeof(tv64)); } if (tv64.tv_sec < 0 || tv64.tv_sec > LONG_MAX || - tv64.tv_usec < 0 || tv64.tv_usec >= 1000000) - return (EDOM); + tv64.tv_usec < 0 || tv64.tv_usec >= 1000000) { + return EDOM; + } tv_p->tv_sec = tv64.tv_sec; tv_p->tv_usec = tv64.tv_usec; } else { - struct user32_timeval tv32; + struct user32_timeval tv32; - if (sopt->sopt_valsize < sizeof (tv32)) - return (EINVAL); + if (sopt->sopt_valsize < sizeof(tv32)) { + return EINVAL; + } - sopt->sopt_valsize = sizeof (tv32); + sopt->sopt_valsize = sizeof(tv32); if (sopt->sopt_p != kernproc) { - error = copyin(sopt->sopt_val, &tv32, sizeof (tv32)); + error = copyin(sopt->sopt_val, &tv32, sizeof(tv32)); if (error != 0) { - return (error); + return error; } } else { bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), &tv32, - sizeof (tv32)); + sizeof(tv32)); } #ifndef __LP64__ /* @@ -4709,13 +4895,14 @@ sooptcopyin_timeval(struct sockopt *sopt, struct timeval *tv_p) * limited range of data type" */ if (tv32.tv_sec < 0 || tv32.tv_sec > LONG_MAX || - tv32.tv_usec < 0 || tv32.tv_usec >= 1000000) - return (EDOM); + tv32.tv_usec < 0 || tv32.tv_usec >= 1000000) { + return EDOM; + } #endif tv_p->tv_sec = tv32.tv_sec; tv_p->tv_usec = tv32.tv_usec; } - return (0); + return 0; } int @@ -4728,21 +4915,25 @@ soopt_cred_check(struct socket *so, int priv, boolean_t allow_root) if (so->so_flags & SOF_DELEGATED) { ep = proc_find(so->e_pid); - if (ep) + if (ep) { cred = kauth_cred_proc_ref(ep); + } } uid = kauth_cred_getuid(cred ? cred : so->so_cred); /* uid is 0 for root */ - if (uid != 0 || !allow_root) + if (uid != 0 || !allow_root) { error = priv_check_cred(cred ? cred : so->so_cred, priv, 0); - if (cred) + } + if (cred) { kauth_cred_unref(&cred); - if (ep != PROC_NULL) + } + if (ep != PROC_NULL) { proc_rele(ep); + } - return (error); + return error; } /* @@ -4768,18 +4959,20 @@ soopt_cred_check(struct socket *so, int priv, boolean_t allow_root) int sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) { - int error, optval; - struct linger l; - struct timeval tv; + int error, optval; + struct linger l; + struct timeval tv; #if CONFIG_MACF_SOCKET struct mac extmac; #endif /* MAC_SOCKET */ - if (sopt->sopt_dir != SOPT_SET) + if (sopt->sopt_dir != SOPT_SET) { sopt->sopt_dir = SOPT_SET; + } - if (dolock) + if (dolock) { socket_lock(so, 1); + } if ((so->so_state & (SS_CANTRCVMORE | SS_CANTSENDMORE)) == (SS_CANTRCVMORE | SS_CANTSENDMORE) && @@ -4791,8 +4984,9 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) error = sflt_setsockopt(so, sopt); if (error != 0) { - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } goto out; } @@ -4812,23 +5006,26 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) * return value indicates that the option is unsupported. */ if (so->so_proto != NULL && (error = so->so_proto->pr_usrreqs-> - pru_socheckopt(so, sopt)) != 0) + pru_socheckopt(so, sopt)) != 0) { goto out; + } error = 0; switch (sopt->sopt_name) { case SO_LINGER: case SO_LINGER_SEC: - error = sooptcopyin(sopt, &l, sizeof (l), sizeof (l)); - if (error != 0) + error = sooptcopyin(sopt, &l, sizeof(l), sizeof(l)); + if (error != 0) { goto out; + } so->so_linger = (sopt->sopt_name == SO_LINGER) ? l.l_linger : l.l_linger * hz; - if (l.l_onoff != 0) + if (l.l_onoff != 0) { so->so_options |= SO_LINGER; - else + } else { so->so_options &= ~SO_LINGER; + } break; case SO_DEBUG: @@ -4847,24 +5044,27 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_WANTOOBFLAG: case SO_NOWAKEFROMSLEEP: case SO_NOAPNFALLBK: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval) + } + if (optval) { so->so_options |= sopt->sopt_name; - else + } else { so->so_options &= ~sopt->sopt_name; + } break; case SO_SNDBUF: case SO_RCVBUF: case SO_SNDLOWAT: case SO_RCVLOWAT: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; + } /* * Values < 1 make no sense for any of these @@ -4923,8 +5123,9 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) so->so_rcv.sb_hiwat : optval; data_len = so->so_rcv.sb_cc - so->so_rcv.sb_ctl; - if (data_len >= so->so_rcv.sb_lowat) - sorwakeup(so); + if (data_len >= so->so_rcv.sb_lowat) { + sorwakeup(so); + } break; } } @@ -4933,8 +5134,9 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_SNDTIMEO: case SO_RCVTIMEO: error = sooptcopyin_timeval(sopt, &tv); - if (error != 0) + if (error != 0) { goto out; + } switch (sopt->sopt_name) { case SO_SNDTIMEO: @@ -4949,46 +5151,53 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_NKE: { struct so_nke nke; - error = sooptcopyin(sopt, &nke, sizeof (nke), - sizeof (nke)); - if (error != 0) + error = sooptcopyin(sopt, &nke, sizeof(nke), + sizeof(nke)); + if (error != 0) { goto out; + } error = sflt_attach_internal(so, nke.nke_handle); break; } case SO_NOSIGPIPE: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval != 0) + } + if (optval != 0) { so->so_flags |= SOF_NOSIGPIPE; - else + } else { so->so_flags &= ~SOF_NOSIGPIPE; + } break; case SO_NOADDRERR: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval != 0) + } + if (optval != 0) { so->so_flags |= SOF_NOADDRAVAIL; - else + } else { so->so_flags &= ~SOF_NOADDRAVAIL; + } break; case SO_REUSESHAREUID: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval != 0) + } + if (optval != 0) { so->so_flags |= SOF_REUSESHAREUID; - else + } else { so->so_flags &= ~SOF_REUSESHAREUID; + } break; case SO_NOTIFYCONFLICT: @@ -4996,21 +5205,24 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) error = EPERM; goto out; } - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval != 0) + } + if (optval != 0) { so->so_flags |= SOF_NOTIFYCONFLICT; - else + } else { so->so_flags &= ~SOF_NOTIFYCONFLICT; + } break; case SO_RESTRICTIONS: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; + } error = so_set_restrictions(so, optval); break; @@ -5023,16 +5235,19 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) } error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error != 0) + if (error != 0) { goto out; + } if (optval != 0) { error = soopt_cred_check(so, PRIV_NET_RESTRICTED_AWDL, false); - if (error == 0) + if (error == 0) { inp_set_awdl_unrestricted( - sotoinpcb(so)); - } else + sotoinpcb(so)); + } + } else { inp_clear_awdl_unrestricted(sotoinpcb(so)); + } break; case SO_INTCOPROC_ALLOW: if (SOCK_DOM(so) != PF_INET6) { @@ -5041,24 +5256,28 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) } error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error != 0) + if (error != 0) { goto out; + } if (optval != 0 && - inp_get_intcoproc_allowed(sotoinpcb(so)) == FALSE) { + inp_get_intcoproc_allowed(sotoinpcb(so)) == FALSE) { error = soopt_cred_check(so, PRIV_NET_RESTRICTED_INTCOPROC, false); - if (error == 0) + if (error == 0) { inp_set_intcoproc_allowed( - sotoinpcb(so)); - } else if (optval == 0) + sotoinpcb(so)); + } + } else if (optval == 0) { inp_clear_intcoproc_allowed(sotoinpcb(so)); + } break; case SO_LABEL: #if CONFIG_MACF_SOCKET - if ((error = sooptcopyin(sopt, &extmac, sizeof (extmac), - sizeof (extmac))) != 0) + if ((error = sooptcopyin(sopt, &extmac, sizeof(extmac), + sizeof(extmac))) != 0) { goto out; + } error = mac_setsockopt_label(proc_ucred(sopt->sopt_p), so, &extmac); @@ -5068,34 +5287,39 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) break; case SO_UPCALLCLOSEWAIT: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval != 0) + } + if (optval != 0) { so->so_flags |= SOF_UPCALLCLOSEWAIT; - else + } else { so->so_flags &= ~SOF_UPCALLCLOSEWAIT; + } break; case SO_RANDOMPORT: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval != 0) + } + if (optval != 0) { so->so_flags |= SOF_BINDRANDOMPORT; - else + } else { so->so_flags &= ~SOF_BINDRANDOMPORT; + } break; case SO_NP_EXTENSIONS: { struct so_np_extensions sonpx; - error = sooptcopyin(sopt, &sonpx, sizeof (sonpx), - sizeof (sonpx)); - if (error != 0) + error = sooptcopyin(sopt, &sonpx, sizeof(sonpx), + sizeof(sonpx)); + if (error != 0) { goto out; + } if (sonpx.npx_mask & ~SONPX_MASK_VALID) { error = EINVAL; goto out; @@ -5104,41 +5328,46 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) * Only one bit defined for now */ if ((sonpx.npx_mask & SONPX_SETOPTSHUT)) { - if ((sonpx.npx_flags & SONPX_SETOPTSHUT)) + if ((sonpx.npx_flags & SONPX_SETOPTSHUT)) { so->so_flags |= SOF_NPX_SETOPTSHUT; - else + } else { so->so_flags &= ~SOF_NPX_SETOPTSHUT; + } } break; } case SO_TRAFFIC_CLASS: { - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; + } if (optval >= SO_TC_NET_SERVICE_OFFSET) { int netsvc = optval - SO_TC_NET_SERVICE_OFFSET; error = so_set_net_service_type(so, netsvc); goto out; } error = so_set_traffic_class(so, optval); - if (error != 0) + if (error != 0) { goto out; + } so->so_flags1 &= ~SOF1_TC_NET_SERV_TYPE; so->so_netsvctype = _NET_SERVICE_TYPE_UNSPEC; break; } case SO_RECV_TRAFFIC_CLASS: { - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval == 0) + } + if (optval == 0) { so->so_flags &= ~SOF_RECV_TRAFFIC_CLASS; - else + } else { so->so_flags |= SOF_RECV_TRAFFIC_CLASS; + } break; } @@ -5147,12 +5376,14 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) struct so_tcdbg so_tcdbg; error = sooptcopyin(sopt, &so_tcdbg, - sizeof (struct so_tcdbg), sizeof (struct so_tcdbg)); - if (error != 0) + sizeof(struct so_tcdbg), sizeof(struct so_tcdbg)); + if (error != 0) { goto out; + } error = so_set_tcdbg(so, &so_tcdbg); - if (error != 0) + if (error != 0) { goto out; + } break; } #endif /* (DEVELOPMENT || DEBUG) */ @@ -5160,33 +5391,38 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_PRIVILEGED_TRAFFIC_CLASS: error = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_TRAFFIC_CLASS, 0); - if (error != 0) + if (error != 0) { goto out; - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error != 0) + } + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error != 0) { goto out; - if (optval == 0) + } + if (optval == 0) { so->so_flags &= ~SOF_PRIVILEGED_TRAFFIC_CLASS; - else + } else { so->so_flags |= SOF_PRIVILEGED_TRAFFIC_CLASS; + } break; #if (DEVELOPMENT || DEBUG) case SO_DEFUNCTIT: error = sosetdefunct(current_proc(), so, 0, FALSE); - if (error == 0) + if (error == 0) { error = sodefunct(current_proc(), so, 0); + } break; #endif /* (DEVELOPMENT || DEBUG) */ case SO_DEFUNCTOK: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); if (error != 0 || (so->so_flags & SOF_DEFUNCT)) { - if (error == 0) + if (error == 0) { error = EBADF; + } goto out; } /* @@ -5199,10 +5435,11 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) error = EPERM; goto out; } - if (optval) + if (optval) { so->so_flags &= ~SOF_NODEFUNCT; - else + } else { so->so_flags |= SOF_NODEFUNCT; + } if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { @@ -5220,12 +5457,12 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) "TCP" : "UDP", inet_ntop(SOCK_DOM(so), ((SOCK_DOM(so) == PF_INET) ? (void *)&inp->inp_laddr.s_addr : - (void *)&inp->in6p_laddr), s, sizeof (s)), + (void *)&inp->in6p_laddr), s, sizeof(s)), ntohs(inp->in6p_lport), inet_ntop(SOCK_DOM(so), (SOCK_DOM(so) == PF_INET) ? (void *)&inp->inp_faddr.s_addr : - (void *)&inp->in6p_faddr, d, sizeof (d)), + (void *)&inp->in6p_faddr, d, sizeof(d)), ntohs(inp->in6p_fport), (so->so_flags & SOF_NODEFUNCT) ? "not " : ""); @@ -5248,10 +5485,11 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) break; case SO_OPPORTUNISTIC: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error == 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error == 0) { error = so_set_opportunistic(so, optval); + } break; case SO_FLUSH: @@ -5260,10 +5498,11 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) break; case SO_RECV_ANYIF: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error == 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error == 0) { error = so_set_recv_anyif(so, optval); + } break; case SO_TRAFFIC_MGT_BACKGROUND: { @@ -5276,13 +5515,14 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_FLOW_DIVERT_TOKEN: error = flow_divert_token_set(so, sopt); break; -#endif /* FLOW_DIVERT */ +#endif /* FLOW_DIVERT */ case SO_DELEGATED: - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } error = so_set_effective_pid(so, optval, sopt->sopt_p); break; @@ -5290,9 +5530,10 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_DELEGATED_UUID: { uuid_t euuid; - if ((error = sooptcopyin(sopt, &euuid, sizeof (euuid), - sizeof (euuid))) != 0) + if ((error = sooptcopyin(sopt, &euuid, sizeof(euuid), + sizeof(euuid))) != 0) { break; + } error = so_set_effective_uuid(so, euuid, sopt->sopt_p); break; @@ -5321,7 +5562,7 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) } error = sooptcopyin(sopt, &inp->necp_client_uuid, - sizeof(uuid_t), sizeof(uuid_t)); + sizeof(uuid_t), sizeof(uuid_t)); if (error != 0) { goto out; } @@ -5348,32 +5589,36 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) #endif /* NECP */ case SO_EXTENDED_BK_IDLE: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error == 0) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error == 0) { error = so_set_extended_bk_idle(so, optval); + } break; case SO_MARK_CELLFALLBACK: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error != 0) + if (error != 0) { goto out; + } if (optval < 0) { error = EINVAL; goto out; } - if (optval == 0) + if (optval == 0) { so->so_flags1 &= ~SOF1_CELLFALLBACK; - else + } else { so->so_flags1 |= SOF1_CELLFALLBACK; + } break; case SO_NET_SERVICE_TYPE: { error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error != 0) + if (error != 0) { goto out; + } error = so_set_net_service_type(so, optval); break; } @@ -5381,16 +5626,19 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_QOSMARKING_POLICY_OVERRIDE: error = priv_check_cred(kauth_cred_get(), PRIV_NET_QOSMARKING_POLICY_OVERRIDE, 0); - if (error != 0) + if (error != 0) { goto out; + } error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error != 0) + if (error != 0) { goto out; - if (optval == 0) + } + if (optval == 0) { so->so_flags1 &= ~SOF1_QOSMARKING_POLICY_OVERRIDE; - else + } else { so->so_flags1 |= SOF1_QOSMARKING_POLICY_OVERRIDE; + } break; default: @@ -5403,17 +5651,18 @@ sosetoptlock(struct socket *so, struct sockopt *sopt, int dolock) } } out: - if (dolock) + if (dolock) { socket_unlock(so, 1); - return (error); + } + return error; } /* Helper routines for getsockopt */ int sooptcopyout(struct sockopt *sopt, void *buf, size_t len) { - int error; - size_t valsize; + int error; + size_t valsize; error = 0; @@ -5429,32 +5678,33 @@ sooptcopyout(struct sockopt *sopt, void *buf, size_t len) valsize = min(len, sopt->sopt_valsize); sopt->sopt_valsize = valsize; if (sopt->sopt_val != USER_ADDR_NULL) { - if (sopt->sopt_p != kernproc) + if (sopt->sopt_p != kernproc) { error = copyout(buf, sopt->sopt_val, valsize); - else + } else { bcopy(buf, CAST_DOWN(caddr_t, sopt->sopt_val), valsize); + } } - return (error); + return error; } static int sooptcopyout_timeval(struct sockopt *sopt, const struct timeval *tv_p) { - int error; - size_t len; - struct user64_timeval tv64 = {}; - struct user32_timeval tv32 = {}; - const void * val; - size_t valsize; + int error; + size_t len; + struct user64_timeval tv64 = {}; + struct user32_timeval tv32 = {}; + const void * val; + size_t valsize; error = 0; if (proc_is64bit(sopt->sopt_p)) { - len = sizeof (tv64); + len = sizeof(tv64); tv64.tv_sec = tv_p->tv_sec; tv64.tv_usec = tv_p->tv_usec; val = &tv64; } else { - len = sizeof (tv32); + len = sizeof(tv32); tv32.tv_sec = tv_p->tv_sec; tv32.tv_usec = tv_p->tv_usec; val = &tv32; @@ -5462,12 +5712,13 @@ sooptcopyout_timeval(struct sockopt *sopt, const struct timeval *tv_p) valsize = min(len, sopt->sopt_valsize); sopt->sopt_valsize = valsize; if (sopt->sopt_val != USER_ADDR_NULL) { - if (sopt->sopt_p != kernproc) + if (sopt->sopt_p != kernproc) { error = copyout(val, sopt->sopt_val, valsize); - else + } else { bcopy(val, CAST_DOWN(caddr_t, sopt->sopt_val), valsize); + } } - return (error); + return error; } /* @@ -5480,23 +5731,26 @@ sooptcopyout_timeval(struct sockopt *sopt, const struct timeval *tv_p) int sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock) { - int error, optval; - struct linger l; - struct timeval tv; + int error, optval; + struct linger l; + struct timeval tv; #if CONFIG_MACF_SOCKET struct mac extmac; #endif /* MAC_SOCKET */ - if (sopt->sopt_dir != SOPT_GET) + if (sopt->sopt_dir != SOPT_GET) { sopt->sopt_dir = SOPT_GET; + } - if (dolock) + if (dolock) { socket_lock(so, 1); + } error = sflt_getsockopt(so, sopt); if (error != 0) { - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } goto out; } @@ -5516,8 +5770,9 @@ sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock) * return value indicates that the option is unsupported. */ if (so->so_proto != NULL && (error = so->so_proto->pr_usrreqs-> - pru_socheckopt(so, sopt)) != 0) + pru_socheckopt(so, sopt)) != 0) { goto out; + } error = 0; switch (sopt->sopt_name) { @@ -5526,7 +5781,7 @@ sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock) l.l_onoff = ((so->so_options & SO_LINGER) ? 1 : 0); l.l_linger = (sopt->sopt_name == SO_LINGER) ? so->so_linger : so->so_linger / hz; - error = sooptcopyout(sopt, &l, sizeof (l)); + error = sooptcopyout(sopt, &l, sizeof(l)); break; case SO_USELOOPBACK: @@ -5547,7 +5802,7 @@ sogetoptlock(struct socket *so, struct sockopt *sopt, int dolock) case SO_NOAPNFALLBK: optval = so->so_options & sopt->sopt_name; integer: - error = sooptcopyout(sopt, &optval, sizeof (optval)); + error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case SO_TYPE: @@ -5564,8 +5819,9 @@ integer: while (m1 != NULL) { if (m1->m_type == MT_DATA || m1->m_type == MT_HEADER || - m1->m_type == MT_OOBDATA) + m1->m_type == MT_OOBDATA) { pkt_total += m1->m_len; + } m1 = m1->m_next; } optval = pkt_total; @@ -5583,8 +5839,9 @@ integer: while (m1 != NULL) { if (m1->m_type == MT_DATA || m1->m_type == MT_HEADER || - m1->m_type == MT_OOBDATA) + m1->m_type == MT_OOBDATA) { cnt += 1; + } m1 = m1->m_nextpkt; } optval = cnt; @@ -5662,30 +5919,33 @@ integer: if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { optval = inp_get_awdl_unrestricted( - sotoinpcb(so)); + sotoinpcb(so)); goto integer; - } else + } else { error = EOPNOTSUPP; + } break; case SO_INTCOPROC_ALLOW: if (SOCK_DOM(so) == PF_INET6) { optval = inp_get_intcoproc_allowed( - sotoinpcb(so)); + sotoinpcb(so)); goto integer; - } else + } else { error = EOPNOTSUPP; + } break; case SO_LABEL: #if CONFIG_MACF_SOCKET - if ((error = sooptcopyin(sopt, &extmac, sizeof (extmac), - sizeof (extmac))) != 0 || + if ((error = sooptcopyin(sopt, &extmac, sizeof(extmac), + sizeof(extmac))) != 0 || (error = mac_socket_label_get(proc_ucred( - sopt->sopt_p), so, &extmac)) != 0) + sopt->sopt_p), so, &extmac)) != 0) { break; + } - error = sooptcopyout(sopt, &extmac, sizeof (extmac)); + error = sooptcopyout(sopt, &extmac, sizeof(extmac)); #else error = EOPNOTSUPP; #endif /* MAC_SOCKET */ @@ -5693,13 +5953,14 @@ integer: case SO_PEERLABEL: #if CONFIG_MACF_SOCKET - if ((error = sooptcopyin(sopt, &extmac, sizeof (extmac), - sizeof (extmac))) != 0 || + if ((error = sooptcopyin(sopt, &extmac, sizeof(extmac), + sizeof(extmac))) != 0 || (error = mac_socketpeer_label_get(proc_ucred( - sopt->sopt_p), so, &extmac)) != 0) + sopt->sopt_p), so, &extmac)) != 0) { break; + } - error = sooptcopyout(sopt, &extmac, sizeof (extmac)); + error = sooptcopyout(sopt, &extmac, sizeof(extmac)); #else error = EOPNOTSUPP; #endif /* MAC_SOCKET */ @@ -5722,7 +5983,7 @@ integer: sonpx.npx_mask = SONPX_MASK_VALID; error = sooptcopyout(sopt, &sonpx, - sizeof (struct so_np_extensions)); + sizeof(struct so_np_extensions)); break; } @@ -5736,7 +5997,7 @@ integer: case SO_TRAFFIC_CLASS_STATS: error = sooptcopyout(sopt, &so->so_tc_stats, - sizeof (so->so_tc_stats)); + sizeof(so->so_tc_stats)); break; #if (DEVELOPMENT || DEBUG) @@ -5782,7 +6043,7 @@ integer: case SO_FLOW_DIVERT_TOKEN: error = flow_divert_token_get(so, sopt); break; -#endif /* FLOW_DIVERT */ +#endif /* FLOW_DIVERT */ #if NECP case SO_NECP_ATTRIBUTES: @@ -5814,10 +6075,10 @@ integer: sock_id = cfil_sock_id_from_socket(so); error = sooptcopyout(sopt, &sock_id, - sizeof(cfil_sock_id_t)); + sizeof(cfil_sock_id_t)); break; } -#endif /* CONTENT_FILTER */ +#endif /* CONTENT_FILTER */ case SO_EXTENDED_BK_IDLE: optval = (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED); @@ -5827,10 +6088,11 @@ integer: ? 1 : 0; goto integer; case SO_NET_SERVICE_TYPE: { - if ((so->so_flags1 & SOF1_TC_NET_SERV_TYPE)) + if ((so->so_flags1 & SOF1_TC_NET_SERV_TYPE)) { optval = so->so_netsvctype; - else + } else { optval = NET_SERVICE_TYPE_BE; + } goto integer; } case SO_NETSVC_MARKING_LEVEL: @@ -5843,9 +6105,10 @@ integer: } } out: - if (dolock) + if (dolock) { socket_unlock(so, 1); - return (error); + } + return error; } /* @@ -5860,18 +6123,20 @@ soopt_getm(struct sockopt *sopt, struct mbuf **mp) int sopt_size = sopt->sopt_valsize; int how; - if (sopt_size <= 0 || sopt_size > MCLBYTES) - return (EMSGSIZE); + if (sopt_size <= 0 || sopt_size > MCLBYTES) { + return EMSGSIZE; + } how = sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT; MGET(m, how, MT_DATA); - if (m == NULL) - return (ENOBUFS); + if (m == NULL) { + return ENOBUFS; + } if (sopt_size > MLEN) { MCLGET(m, how); if ((m->m_flags & M_EXT) == 0) { m_free(m); - return (ENOBUFS); + return ENOBUFS; } m->m_len = min(MCLBYTES, sopt_size); } else { @@ -5885,14 +6150,14 @@ soopt_getm(struct sockopt *sopt, struct mbuf **mp) MGET(m, how, MT_DATA); if (m == NULL) { m_freem(*mp); - return (ENOBUFS); + return ENOBUFS; } if (sopt_size > MLEN) { MCLGET(m, how); if ((m->m_flags & M_EXT) == 0) { m_freem(*mp); m_freem(m); - return (ENOBUFS); + return ENOBUFS; } m->m_len = min(MCLBYTES, sopt_size); } else { @@ -5902,7 +6167,7 @@ soopt_getm(struct sockopt *sopt, struct mbuf **mp) m_prev->m_next = m; m_prev = m; } - return (0); + return 0; } /* copyin sopt data into mbuf chain */ @@ -5911,8 +6176,9 @@ soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) { struct mbuf *m0 = m; - if (sopt->sopt_val == USER_ADDR_NULL) - return (0); + if (sopt->sopt_val == USER_ADDR_NULL) { + return 0; + } while (m != NULL && sopt->sopt_valsize >= m->m_len) { if (sopt->sopt_p != kernproc) { int error; @@ -5921,7 +6187,7 @@ soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) m->m_len); if (error != 0) { m_freem(m0); - return (error); + return error; } } else { bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), @@ -5936,7 +6202,7 @@ soopt_mcopyin(struct sockopt *sopt, struct mbuf *m) panic("soopt_mcopyin"); /* NOTREACHED */ } - return (0); + return 0; } /* copyout mbuf chain data into soopt */ @@ -5946,8 +6212,9 @@ soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) struct mbuf *m0 = m; size_t valsize = 0; - if (sopt->sopt_val == USER_ADDR_NULL) - return (0); + if (sopt->sopt_val == USER_ADDR_NULL) { + return 0; + } while (m != NULL && sopt->sopt_valsize >= m->m_len) { if (sopt->sopt_p != kernproc) { int error; @@ -5956,7 +6223,7 @@ soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) m->m_len); if (error != 0) { m_freem(m0); - return (error); + return error; } } else { bcopy(mtod(m, char *), @@ -5970,19 +6237,20 @@ soopt_mcopyout(struct sockopt *sopt, struct mbuf *m) if (m != NULL) { /* enough soopt buffer should be given from user-land */ m_freem(m0); - return (EINVAL); + return EINVAL; } sopt->sopt_valsize = valsize; - return (0); + return 0; } void sohasoutofband(struct socket *so) { - if (so->so_pgid < 0) + if (so->so_pgid < 0) { gsignal(-so->so_pgid, SIGURG); - else if (so->so_pgid > 0) + } else if (so->so_pgid > 0) { proc_signal(so->so_pgid, SIGURG); + } selwakeup(&so->so_rcv.sb_sel); if (so->so_rcv.sb_flags & SB_KNOTE) { KNOTE(&so->so_rcv.sb_sel.si_note, @@ -6001,17 +6269,23 @@ sopoll(struct socket *so, int events, kauth_cred_t cred, void * wql) so_update_last_owner_locked(so, PROC_NULL); so_update_policy(so); - if (events & (POLLIN | POLLRDNORM)) - if (soreadable(so)) + if (events & (POLLIN | POLLRDNORM)) { + if (soreadable(so)) { revents |= events & (POLLIN | POLLRDNORM); + } + } - if (events & (POLLOUT | POLLWRNORM)) - if (sowriteable(so)) + if (events & (POLLOUT | POLLWRNORM)) { + if (sowriteable(so)) { revents |= events & (POLLOUT | POLLWRNORM); + } + } - if (events & (POLLPRI | POLLRDBAND)) - if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) + if (events & (POLLPRI | POLLRDBAND)) { + if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { revents |= events & (POLLPRI | POLLRDBAND); + } + } if (revents == 0) { if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) { @@ -6034,12 +6308,12 @@ sopoll(struct socket *so, int events, kauth_cred_t cred, void * wql) } socket_unlock(so, 1); - return (revents); + return revents; } int soo_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx) + struct kevent_internal_s *kev, vfs_context_t ctx) { #pragma unused(fp) #if !CONFIG_MACF_SOCKET @@ -6106,9 +6380,9 @@ filt_soread_common(struct knote *kn, struct socket *so) */ kn->kn_data = so->so_qlen; - is_not_empty = ! TAILQ_EMPTY(&so->so_comp); + is_not_empty = !TAILQ_EMPTY(&so->so_comp); - return (is_not_empty); + return is_not_empty; } /* socket isn't a listener */ @@ -6123,7 +6397,7 @@ filt_soread_common(struct knote *kn, struct socket *so) if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) { kn->kn_fflags |= NOTE_OOB; kn->kn_data -= so->so_oobmark; - return (1); + return 1; } } @@ -6131,27 +6405,28 @@ filt_soread_common(struct knote *kn, struct socket *so) #if CONTENT_FILTER && cfil_sock_data_pending(&so->so_rcv) == 0 #endif /* CONTENT_FILTER */ - ) { + ) { kn->kn_flags |= EV_EOF; kn->kn_fflags = so->so_error; - return (1); + return 1; } - if (so->so_error) { /* temporary udp error */ - return (1); + if (so->so_error) { /* temporary udp error */ + return 1; } - int64_t lowwat = so->so_rcv.sb_lowat; + int64_t lowwat = so->so_rcv.sb_lowat; /* * Ensure that when NOTE_LOWAT is used, the derived * low water mark is bounded by socket's rcv buf's * high and low water mark values. */ if (kn->kn_sfflags & NOTE_LOWAT) { - if (kn->kn_sdata > so->so_rcv.sb_hiwat) + if (kn->kn_sdata > so->so_rcv.sb_hiwat) { lowwat = so->so_rcv.sb_hiwat; - else if (kn->kn_sdata > lowwat) + } else if (kn->kn_sdata > lowwat) { lowwat = kn->kn_sdata; + } } /* @@ -6159,10 +6434,11 @@ filt_soread_common(struct knote *kn, struct socket *so) * overrides sb_lowat, check for NOTE_LOWAT case * first. */ - if (kn->kn_sfflags & NOTE_LOWAT) - return (kn->kn_data >= lowwat); + if (kn->kn_sfflags & NOTE_LOWAT) { + return kn->kn_data >= lowwat; + } - return (so->so_rcv.sb_cc >= lowwat); + return so->so_rcv.sb_cc >= lowwat; } static int @@ -6184,8 +6460,9 @@ filt_sorattach(struct knote *kn, __unused struct kevent_internal_s *kev) } else { kn->kn_hookid = 0; } - if (KNOTE_ATTACH(&so->so_rcv.sb_sel.si_note, kn)) + if (KNOTE_ATTACH(&so->so_rcv.sb_sel.si_note, kn)) { so->so_rcv.sb_flags |= SB_KNOTE; + } /* indicate if event is already fired */ return filt_soread_common(kn, so); @@ -6197,9 +6474,11 @@ filt_sordetach(struct knote *kn) struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; socket_lock(so, 1); - if (so->so_rcv.sb_flags & SB_KNOTE) - if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn)) + if (so->so_rcv.sb_flags & SB_KNOTE) { + if (KNOTE_DETACH(&so->so_rcv.sb_sel.si_note, kn)) { so->so_rcv.sb_flags &= ~SB_KNOTE; + } + } socket_unlock(so, 1); } @@ -6210,13 +6489,15 @@ filt_soread(struct knote *kn, long hint) struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; int retval; - if ((hint & SO_FILT_HINT_LOCKED) == 0) + if ((hint & SO_FILT_HINT_LOCKED) == 0) { socket_lock(so, 1); + } retval = filt_soread_common(kn, so); - if ((hint & SO_FILT_HINT_LOCKED) == 0) + if ((hint & SO_FILT_HINT_LOCKED) == 0) { socket_unlock(so, 1); + } return retval; } @@ -6268,10 +6549,11 @@ so_wait_for_if_feedback(struct socket *so) if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) && (so->so_state & SS_ISCONNECTED)) { struct inpcb *inp = sotoinpcb(so); - if (INP_WAIT_FOR_IF_FEEDBACK(inp)) - return (1); + if (INP_WAIT_FOR_IF_FEEDBACK(inp)) { + return 1; + } } - return (0); + return 0; } static int @@ -6285,7 +6567,7 @@ filt_sowrite_common(struct knote *kn, struct socket *so) kn->kn_fflags = so->so_error; return 1; } - if (so->so_error) { /* temporary udp error */ + if (so->so_error) { /* temporary udp error */ return 1; } if (!socanwrite(so)) { @@ -6294,12 +6576,13 @@ filt_sowrite_common(struct knote *kn, struct socket *so) if (so->so_flags1 & SOF1_PRECONNECT_DATA) { return 1; } - int64_t lowwat = so->so_snd.sb_lowat; + int64_t lowwat = so->so_snd.sb_lowat; if (kn->kn_sfflags & NOTE_LOWAT) { - if (kn->kn_sdata > so->so_snd.sb_hiwat) + if (kn->kn_sdata > so->so_snd.sb_hiwat) { lowwat = so->so_snd.sb_hiwat; - else if (kn->kn_sdata > lowwat) + } else if (kn->kn_sdata > lowwat) { lowwat = kn->kn_sdata; + } } if (kn->kn_data >= lowwat) { if ((so->so_flags & SOF_NOTSENT_LOWAT) @@ -6325,9 +6608,10 @@ filt_sowrite_common(struct knote *kn, struct socket *so) ret = 1; } } - if (so_wait_for_if_feedback(so)) + if (so_wait_for_if_feedback(so)) { ret = 0; - return (ret); + } + return ret; } static int @@ -6336,8 +6620,9 @@ filt_sowattach(struct knote *kn, __unused struct kevent_internal_s *kev) struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; /* socket locked */ - if (KNOTE_ATTACH(&so->so_snd.sb_sel.si_note, kn)) + if (KNOTE_ATTACH(&so->so_snd.sb_sel.si_note, kn)) { so->so_snd.sb_flags |= SB_KNOTE; + } /* determine if its already fired */ return filt_sowrite_common(kn, so); @@ -6349,9 +6634,11 @@ filt_sowdetach(struct knote *kn) struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; socket_lock(so, 1); - if (so->so_snd.sb_flags & SB_KNOTE) - if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn)) + if (so->so_snd.sb_flags & SB_KNOTE) { + if (KNOTE_DETACH(&so->so_snd.sb_sel.si_note, kn)) { so->so_snd.sb_flags &= ~SB_KNOTE; + } + } socket_unlock(so, 1); } @@ -6362,13 +6649,15 @@ filt_sowrite(struct knote *kn, long hint) struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; int ret; - if ((hint & SO_FILT_HINT_LOCKED) == 0) + if ((hint & SO_FILT_HINT_LOCKED) == 0) { socket_lock(so, 1); + } ret = filt_sowrite_common(kn, so); - if ((hint & SO_FILT_HINT_LOCKED) == 0) + if ((hint & SO_FILT_HINT_LOCKED) == 0) { socket_unlock(so, 1); + } return ret; } @@ -6452,8 +6741,9 @@ filt_sockev_common(struct knote *kn, struct socket *so, long ev_hint) } if (ev_hint & SO_FILT_HINT_CONNINFO_UPDATED) { if (so->so_proto != NULL && - (so->so_proto->pr_flags & PR_EVCONNINFO)) + (so->so_proto->pr_flags & PR_EVCONNINFO)) { kn->kn_fflags |= NOTE_CONNINFO_UPDATED; + } } if ((ev_hint & SO_FILT_HINT_NOTIFY_ACK) || @@ -6514,10 +6804,11 @@ filt_sockev_common(struct knote *kn, struct socket *so, long ev_hint) level_trigger &= EVFILT_SOCK_LEVEL_TRIGGER_MASK; /* Do not deliver level triggerred events more than once */ - if ((kn->kn_fflags & ~level_trigger) != 0) + if ((kn->kn_fflags & ~level_trigger) != 0) { ret = 1; + } - return (ret); + return ret; } static int @@ -6527,8 +6818,9 @@ filt_sockattach(struct knote *kn, __unused struct kevent_internal_s *kev) /* socket locked */ kn->kn_hookid = 0; - if (KNOTE_ATTACH(&so->so_klist, kn)) + if (KNOTE_ATTACH(&so->so_klist, kn)) { so->so_flags |= SOF_KNOTE; + } /* determine if event already fired */ return filt_sockev_common(kn, so, 0); @@ -6540,9 +6832,11 @@ filt_sockdetach(struct knote *kn) struct socket *so = (struct socket *)kn->kn_fp->f_fglob->fg_data; socket_lock(so, 1); - if ((so->so_flags & SOF_KNOTE) != 0) - if (KNOTE_DETACH(&so->so_klist, kn)) + if ((so->so_flags & SOF_KNOTE) != 0) { + if (KNOTE_DETACH(&so->so_klist, kn)) { so->so_flags &= ~SOF_KNOTE; + } + } socket_unlock(so, 1); } @@ -6560,8 +6854,9 @@ filt_sockev(struct knote *kn, long hint) ret = filt_sockev_common(kn, so, ev_hint); - if (locked) + if (locked) { socket_unlock(so, 1); + } return ret; } @@ -6638,19 +6933,22 @@ filt_sockprocess( * ateast once and still avoid waking up the application * multiple times as long as the event is active. */ - if (kn->kn_fflags != 0) + if (kn->kn_fflags != 0) { kn->kn_hookid |= (kn->kn_fflags & - EVFILT_SOCK_LEVEL_TRIGGER_MASK); + EVFILT_SOCK_LEVEL_TRIGGER_MASK); + } /* * NOTE_RESUME and NOTE_SUSPEND are an exception, deliver * only one of them and remember the last one that was * delivered last */ - if (kn->kn_fflags & NOTE_SUSPEND) + if (kn->kn_fflags & NOTE_SUSPEND) { kn->kn_hookid &= ~NOTE_RESUME; - if (kn->kn_fflags & NOTE_RESUME) + } + if (kn->kn_fflags & NOTE_RESUME) { kn->kn_hookid &= ~NOTE_SUSPEND; + } if (kn->kn_flags & EV_CLEAR) { kn->kn_data = 0; @@ -6672,18 +6970,20 @@ get_sockev_state(struct socket *so, u_int32_t *statep) * If the state variable is already used by a previous event, * reset it. */ - if (state != 0) + if (state != 0) { return; + } - if (so->so_state & SS_ISCONNECTED) + if (so->so_state & SS_ISCONNECTED) { state |= SOCKEV_CONNECTED; - else + } else { state &= ~(SOCKEV_CONNECTED); + } state |= ((so->so_state & SS_ISDISCONNECTED) ? SOCKEV_DISCONNECTED : 0); *(statep) = state; } -#define SO_LOCK_HISTORY_STR_LEN \ +#define SO_LOCK_HISTORY_STR_LEN \ (2 * SO_LCKDBG_MAX * (2 + (2 * sizeof (void *)) + 1) + 1) __private_extern__ const char * @@ -6693,14 +6993,14 @@ solockhistory_nr(struct socket *so) int i; static char lock_history_str[SO_LOCK_HISTORY_STR_LEN]; - bzero(lock_history_str, sizeof (lock_history_str)); + bzero(lock_history_str, sizeof(lock_history_str)); for (i = SO_LCKDBG_MAX - 1; i >= 0; i--) { n += snprintf(lock_history_str + n, SO_LOCK_HISTORY_STR_LEN - n, "%p:%p ", so->lock_lr[(so->next_lock_lr + i) % SO_LCKDBG_MAX], so->unlock_lr[(so->next_unlock_lr + i) % SO_LCKDBG_MAX]); } - return (lock_history_str); + return lock_history_str; } void @@ -6718,10 +7018,11 @@ socket_lock(struct socket *so, int refcount) LCK_MTX_ASSERT_NOTOWNED); #endif lck_mtx_lock(so->so_proto->pr_domain->dom_mtx); - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; } } @@ -6730,10 +7031,11 @@ socket_lock_assert_owned(struct socket *so) { lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); } @@ -6743,12 +7045,13 @@ socket_try_lock(struct socket *so) { lck_mtx_t *mtx; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mtx = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mtx = so->so_proto->pr_domain->dom_mtx; + } - return (lck_mtx_try_lock(mtx)); + return lck_mtx_try_lock(mtx); } void @@ -6772,7 +7075,7 @@ socket_unlock(struct socket *so, int refcount) LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); #endif so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; if (refcount) { if (so->so_usecount <= 0) { @@ -6784,8 +7087,9 @@ socket_unlock(struct socket *so, int refcount) } so->so_usecount--; - if (so->so_usecount == 0) + if (so->so_usecount == 0) { sofreelastref(so, 1); + } } lck_mtx_unlock(mutex_held); } @@ -6797,10 +7101,11 @@ sofree(struct socket *so) { lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); sofreelastref(so, 0); @@ -6809,8 +7114,8 @@ sofree(struct socket *so) void soreference(struct socket *so) { - socket_lock(so, 1); /* locks & take one reference on socket */ - socket_unlock(so, 0); /* unlock only */ + socket_lock(so, 1); /* locks & take one reference on socket */ + socket_unlock(so, 0); /* unlock only */ } void @@ -6828,32 +7133,35 @@ sodereference(struct socket *so) void somultipages(struct socket *so, boolean_t set) { - if (set) + if (set) { so->so_flags |= SOF_MULTIPAGES; - else + } else { so->so_flags &= ~SOF_MULTIPAGES; + } } void soif2kcl(struct socket *so, boolean_t set) { - if (set) + if (set) { so->so_flags1 |= SOF1_IF_2KCL; - else + } else { so->so_flags1 &= ~SOF1_IF_2KCL; + } } int -so_isdstlocal(struct socket *so) { - +so_isdstlocal(struct socket *so) +{ struct inpcb *inp = (struct inpcb *)so->so_pcb; - if (SOCK_DOM(so) == PF_INET) - return (inaddr_local(inp->inp_faddr)); - else if (SOCK_DOM(so) == PF_INET6) - return (in6addr_local(&inp->in6p_faddr)); + if (SOCK_DOM(so) == PF_INET) { + return inaddr_local(inp->inp_faddr); + } else if (SOCK_DOM(so) == PF_INET6) { + return in6addr_local(&inp->in6p_faddr); + } - return (0); + return 0; } int @@ -6887,7 +7195,7 @@ sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce) (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), err); } - return (err); + return err; } so->so_flags &= ~SOF_NODEFUNCT; if (p != PROC_NULL) { @@ -6928,7 +7236,7 @@ sosetdefunct(struct proc *p, struct socket *so, int level, boolean_t noforce) proc_best_name(p), level, (uint64_t)DEBUG_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), err); - return (err); + return err; } else { OSIncrementAtomic(&soextbkidlestat.so_xbkidle_forced); } @@ -6963,7 +7271,7 @@ done: (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) ? " extbkidle" : ""); } - return (err); + return err; } int @@ -6975,8 +7283,9 @@ sodefunct(struct proc *p, struct socket *so, int level) panic("%s improperly called", __func__); /* NOTREACHED */ } - if (so->so_state & SS_DEFUNCT) + if (so->so_state & SS_DEFUNCT) { goto done; + } rcv = &so->so_rcv; snd = &so->so_snd; @@ -6988,27 +7297,27 @@ sodefunct(struct proc *p, struct socket *so, int level) if (p != PROC_NULL) { SODEFUNCTLOG( - "%s[%d, %s]: (target pid %d name %s level %d) " - "so 0x%llx [%s %s:%d -> %s:%d] is now defunct " - "[rcv_si 0x%x, snd_si 0x%x, rcv_fl 0x%x, " - " snd_fl 0x%x]\n", __func__, - proc_selfpid(), proc_best_name(current_proc()), - proc_pid(p), proc_best_name(p), level, - (uint64_t)DEBUG_KERNEL_ADDRPERM(so), - (SOCK_TYPE(so) == SOCK_STREAM) ? "TCP" : "UDP", - inet_ntop(SOCK_DOM(so), ((SOCK_DOM(so) == PF_INET) ? - (void *)&inp->inp_laddr.s_addr : - (void *)&inp->in6p_laddr), - s, sizeof (s)), ntohs(inp->in6p_lport), - inet_ntop(SOCK_DOM(so), (SOCK_DOM(so) == PF_INET) ? - (void *)&inp->inp_faddr.s_addr : - (void *)&inp->in6p_faddr, - d, sizeof (d)), ntohs(inp->in6p_fport), - (uint32_t)rcv->sb_sel.si_flags, - (uint32_t)snd->sb_sel.si_flags, - rcv->sb_flags, snd->sb_flags); - } - } else if (p != PROC_NULL) { + "%s[%d, %s]: (target pid %d name %s level %d) " + "so 0x%llx [%s %s:%d -> %s:%d] is now defunct " + "[rcv_si 0x%x, snd_si 0x%x, rcv_fl 0x%x, " + " snd_fl 0x%x]\n", __func__, + proc_selfpid(), proc_best_name(current_proc()), + proc_pid(p), proc_best_name(p), level, + (uint64_t)DEBUG_KERNEL_ADDRPERM(so), + (SOCK_TYPE(so) == SOCK_STREAM) ? "TCP" : "UDP", + inet_ntop(SOCK_DOM(so), ((SOCK_DOM(so) == PF_INET) ? + (void *)&inp->inp_laddr.s_addr : + (void *)&inp->in6p_laddr), + s, sizeof(s)), ntohs(inp->in6p_lport), + inet_ntop(SOCK_DOM(so), (SOCK_DOM(so) == PF_INET) ? + (void *)&inp->inp_faddr.s_addr : + (void *)&inp->in6p_faddr, + d, sizeof(d)), ntohs(inp->in6p_fport), + (uint32_t)rcv->sb_sel.si_flags, + (uint32_t)snd->sb_sel.si_flags, + rcv->sb_flags, snd->sb_flags); + } + } else if (p != PROC_NULL) { SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s level %d) " "so 0x%llx [%d,%d] is now defunct [rcv_si 0x%x, " "snd_si 0x%x, rcv_fl 0x%x, snd_fl 0x%x]\n", __func__, @@ -7028,11 +7337,12 @@ sodefunct(struct proc *p, struct socket *so, int level) sbwakeup(snd); so->so_flags1 |= SOF1_DEFUNCTINPROG; - if (rcv->sb_flags & SB_LOCK) - sbunlock(rcv, TRUE); /* keep socket locked */ - if (snd->sb_flags & SB_LOCK) - sbunlock(snd, TRUE); /* keep socket locked */ - + if (rcv->sb_flags & SB_LOCK) { + sbunlock(rcv, TRUE); /* keep socket locked */ + } + if (snd->sb_flags & SB_LOCK) { + sbunlock(snd, TRUE); /* keep socket locked */ + } /* * Flush the buffers and disconnect. We explicitly call shutdown * on both data directions to ensure that SS_CANT{RCV,SEND}MORE @@ -7047,11 +7357,13 @@ sodefunct(struct proc *p, struct socket *so, int level) * Explicitly handle connectionless-protocol disconnection * and release any remaining data in the socket buffers. */ - if (!(so->so_state & SS_ISDISCONNECTED)) + if (!(so->so_state & SS_ISDISCONNECTED)) { (void) soisdisconnected(so); + } - if (so->so_error == 0) + if (so->so_error == 0) { so->so_error = EBADF; + } if (rcv->sb_cc != 0) { rcv->sb_flags &= ~SB_SEL; @@ -7067,14 +7379,15 @@ sodefunct(struct proc *p, struct socket *so, int level) OSIncrementAtomicLong((volatile long *)&sodefunct_calls); done: - return (0); + return 0; } int soresume(struct proc *p, struct socket *so, int locked) { - if (locked == 0) + if (locked == 0) { socket_lock(so, 1); + } if (so->so_flags1 & SOF1_EXTEND_BK_IDLE_INPROG) { SODEFUNCTLOG("%s[%d, %s]: (target pid %d name %s) so 0x%llx " @@ -7092,10 +7405,11 @@ soresume(struct proc *p, struct socket *so, int locked) OSDecrementAtomic(&soextbkidlestat.so_xbkidle_active); VERIFY(soextbkidlestat.so_xbkidle_active >= 0); } - if (locked == 0) + if (locked == 0) { socket_unlock(so, 1); + } - return (0); + return 0; } /* @@ -7124,7 +7438,7 @@ so_set_extended_bk_idle(struct socket *so, int optval) /* * Unlock socket to avoid lock ordering issue with * the proc fd table lock - */ + */ socket_unlock(so, 0); proc_fdlock(p); @@ -7136,15 +7450,18 @@ so_set_extended_bk_idle(struct socket *so, int optval) if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) + FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { continue; + } so2 = (struct socket *)fp->f_fglob->fg_data; if (so != so2 && - so2->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) + so2->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) { count++; - if (count >= soextbkidlestat.so_xbkidle_maxperproc) + } + if (count >= soextbkidlestat.so_xbkidle_maxperproc) { break; + } } proc_fdunlock(p); @@ -7169,7 +7486,7 @@ so_set_extended_bk_idle(struct socket *so, int optval) "is" : "not"); } - return (error); + return error; } static void @@ -7234,27 +7551,28 @@ so_check_extended_bk_idle_time(struct socket *so) } } - return (ret); + return ret; } void resume_proc_sockets(proc_t p) { if (p->p_ladvflag & P_LXBKIDLEINPROG) { - struct filedesc *fdp; + struct filedesc *fdp; int i; proc_fdlock(p); fdp = p->p_fd; for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp; + struct fileproc *fp; struct socket *so; fp = fdp->fd_ofiles[i]; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) + FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { continue; + } so = (struct socket *)fp->f_fglob->fg_data; (void) soresume(p, so, 0); @@ -7275,14 +7593,15 @@ so_set_recv_anyif(struct socket *so, int optval) #else if (SOCK_DOM(so) == PF_INET) { #endif /* !INET6 */ - if (optval) + if (optval) { sotoinpcb(so)->inp_flags |= INP_RECV_ANYIF; - else + } else { sotoinpcb(so)->inp_flags &= ~INP_RECV_ANYIF; + } } - return (ret); + return ret; } __private_extern__ int @@ -7298,7 +7617,7 @@ so_get_recv_anyif(struct socket *so) ret = (sotoinpcb(so)->inp_flags & INP_RECV_ANYIF) ? 1 : 0; } - return (ret); + return ret; } int @@ -7330,8 +7649,9 @@ so_set_restrictions(struct socket *so, uint32_t vals) /* we can only set, not clear restrictions */ if ((nocell_new - nocell_old) == 0 && - (noexpensive_new - noexpensive_old) == 0) - return (0); + (noexpensive_new - noexpensive_old) == 0) { + return 0; + } #if INET6 if (SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) { #else @@ -7349,18 +7669,19 @@ so_set_restrictions(struct socket *so, uint32_t vals) } } - if (SOCK_DOM(so) == PF_MULTIPATH) + if (SOCK_DOM(so) == PF_MULTIPATH) { mptcp_set_restrictions(so); + } - return (0); + return 0; } uint32_t so_get_restrictions(struct socket *so) { - return (so->so_restrictions & (SO_RESTRICT_DENY_IN | - SO_RESTRICT_DENY_OUT | - SO_RESTRICT_DENY_CELLULAR | SO_RESTRICT_DENY_EXPENSIVE)); + return so->so_restrictions & (SO_RESTRICT_DENY_IN | + SO_RESTRICT_DENY_OUT | + SO_RESTRICT_DENY_CELLULAR | SO_RESTRICT_DENY_EXPENSIVE); } int @@ -7424,7 +7745,7 @@ so_set_effective_pid(struct socket *so, int epid, struct proc *p) so->so_flags |= SOF_DELEGATED; so->e_upid = proc_uniqueid(ep); so->e_pid = proc_pid(ep); - proc_getexecutableuuid(ep, so->e_uuid, sizeof (so->e_uuid)); + proc_getexecutableuuid(ep, so->e_uuid, sizeof(so->e_uuid)); } done: if (error == 0 && net_io_policy_log) { @@ -7455,10 +7776,11 @@ done: #endif /* NECP */ } - if (ep != PROC_NULL) + if (ep != PROC_NULL) { proc_rele(ep); + } - return (error); + return error; } int @@ -7485,7 +7807,7 @@ so_set_effective_uuid(struct socket *so, uuid_t euuid, struct proc *p) } /* Get the UUID of the issuing process */ - proc_getexecutableuuid(p, uuid, sizeof (uuid)); + proc_getexecutableuuid(p, uuid, sizeof(uuid)); /* * If this is issued by a process that's recorded as the @@ -7555,7 +7877,7 @@ done: #endif /* NECP */ } - return (error); + return error; } void @@ -7569,15 +7891,15 @@ netpolicy_post_msg(uint32_t ev_code, struct netpolicy_event_data *ev_data, * structure, but the caller can provide for a longer event * structure to post, depending on the event code. */ - VERIFY(ev_data != NULL && ev_datalen >= sizeof (*ev_data)); + VERIFY(ev_data != NULL && ev_datalen >= sizeof(*ev_data)); - bzero(&ev_msg, sizeof (ev_msg)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_NETPOLICY_SUBCLASS; - ev_msg.event_code = ev_code; + bzero(&ev_msg, sizeof(ev_msg)); + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_NETPOLICY_SUBCLASS; + ev_msg.event_code = ev_code; - ev_msg.dv[0].data_ptr = ev_data; + ev_msg.dv[0].data_ptr = ev_data; ev_msg.dv[0].data_length = ev_datalen; kev_post_msg(&ev_msg); @@ -7597,7 +7919,7 @@ socket_post_kev_msg(uint32_t ev_code, ev_msg.event_code = ev_code; ev_msg.dv[0].data_ptr = ev_data; - ev_msg.dv[0]. data_length = ev_datalen; + ev_msg.dv[0].data_length = ev_datalen; kev_post_msg(&ev_msg); } @@ -7616,16 +7938,18 @@ socket_post_kev_msg_closed(struct socket *so) if (err == 0) { memcpy(&ev.ev_data.kev_sockname, socksa, min(socksa->sa_len, - sizeof (ev.ev_data.kev_sockname))); + sizeof(ev.ev_data.kev_sockname))); memcpy(&ev.ev_data.kev_peername, peersa, min(peersa->sa_len, - sizeof (ev.ev_data.kev_peername))); + sizeof(ev.ev_data.kev_peername))); socket_post_kev_msg(KEV_SOCKET_CLOSED, - &ev.ev_data, sizeof (ev)); + &ev.ev_data, sizeof(ev)); } } - if (socksa != NULL) + if (socksa != NULL) { FREE(socksa, M_SONAME); - if (peersa != NULL) + } + if (peersa != NULL) { FREE(peersa, M_SONAME); + } } diff --git a/bsd/kern/uipc_socket2.c b/bsd/kern/uipc_socket2.c index 264819a7c..34b295590 100644 --- a/bsd/kern/uipc_socket2.c +++ b/bsd/kern/uipc_socket2.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2015 Apple Inc. All rights reserved. + * Copyright (c) 1998-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -103,8 +103,8 @@ #include #endif -#define DBG_FNC_SBDROP NETDBG_CODE(DBG_NETSOCK, 4) -#define DBG_FNC_SBAPPEND NETDBG_CODE(DBG_NETSOCK, 5) +#define DBG_FNC_SBDROP NETDBG_CODE(DBG_NETSOCK, 4) +#define DBG_FNC_SBAPPEND NETDBG_CODE(DBG_NETSOCK, 5) SYSCTL_DECL(_kern_ipc); @@ -130,18 +130,18 @@ static int soqlencomp = 0; * higher limit on sb_max that is checked when sb_max gets set through sysctl. */ -u_int32_t sb_max = SB_MAX; /* XXX should be static */ -u_int32_t high_sb_max = SB_MAX; +u_int32_t sb_max = SB_MAX; /* XXX should be static */ +u_int32_t high_sb_max = SB_MAX; -static u_int32_t sb_efficiency = 8; /* parameter for sbreserve() */ +static u_int32_t sb_efficiency = 8; /* parameter for sbreserve() */ int32_t total_sbmb_cnt __attribute__((aligned(8))) = 0; int32_t total_sbmb_cnt_floor __attribute__((aligned(8))) = 0; int32_t total_sbmb_cnt_peak __attribute__((aligned(8))) = 0; int64_t sbmb_limreached __attribute__((aligned(8))) = 0; -u_int32_t net_io_policy_log = 0; /* log socket policy changes */ +u_int32_t net_io_policy_log = 0; /* log socket policy changes */ #if CONFIG_PROC_UUID_POLICY -u_int32_t net_io_policy_uuid = 1; /* enable UUID socket policy */ +u_int32_t net_io_policy_uuid = 1; /* enable UUID socket policy */ #endif /* CONFIG_PROC_UUID_POLICY */ /* @@ -176,7 +176,7 @@ u_int32_t net_io_policy_uuid = 1; /* enable UUID socket policy */ void soisconnecting(struct socket *so) { - so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING); + so->so_state &= ~(SS_ISCONNECTED | SS_ISDISCONNECTING); so->so_state |= SS_ISCONNECTING; sflt_notify(so, sock_evt_connecting, NULL); @@ -185,7 +185,7 @@ soisconnecting(struct socket *so) void soisconnected(struct socket *so) { - so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); + so->so_state &= ~(SS_ISCONNECTING | SS_ISDISCONNECTING | SS_ISCONFIRMING); so->so_state |= SS_ISCONNECTED; soreserve_preconnect(so, 0); @@ -195,7 +195,7 @@ soisconnected(struct socket *so) if (so->so_head != NULL && (so->so_state & SS_INCOMP)) { struct socket *head = so->so_head; int locked = 0; - + /* * Enforce lock order when the protocol has per socket locks */ @@ -244,16 +244,16 @@ soisconnected(struct socket *so) boolean_t socanwrite(struct socket *so) { - return ((so->so_state & SS_ISCONNECTED) || + return (so->so_state & SS_ISCONNECTED) || !(so->so_proto->pr_flags & PR_CONNREQUIRED) || - (so->so_flags1 & SOF1_PRECONNECT_DATA)); + (so->so_flags1 & SOF1_PRECONNECT_DATA); } void soisdisconnecting(struct socket *so) { so->so_state &= ~SS_ISCONNECTING; - so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE); + so->so_state |= (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE); soevent(so, SO_FILT_HINT_LOCKED); sflt_notify(so, sock_evt_disconnecting, NULL); wakeup((caddr_t)&so->so_timeo); @@ -264,8 +264,8 @@ soisdisconnecting(struct socket *so) void soisdisconnected(struct socket *so) { - so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); - so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED); + so->so_state &= ~(SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); + so->so_state |= (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED | SO_FILT_HINT_CONNINFO_UPDATED); sflt_notify(so, sock_evt_disconnected, NULL); @@ -287,8 +287,8 @@ soisdisconnected(struct socket *so) void sodisconnectwakeup(struct socket *so) { - so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); - so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED); + so->so_state &= ~(SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); + so->so_state |= (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_DISCONNECTED | SO_FILT_HINT_CONNINFO_UPDATED); wakeup((caddr_t)&so->so_timeo); @@ -316,10 +316,11 @@ sonewconn_internal(struct socket *head, int connstatus) struct socket *so; lck_mtx_t *mutex_held; - if (head->so_proto->pr_getlock != NULL) + if (head->so_proto->pr_getlock != NULL) { mutex_held = (*head->so_proto->pr_getlock)(head, 0); - else + } else { mutex_held = head->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); if (!soqlencomp) { @@ -338,24 +339,27 @@ sonewconn_internal(struct socket *head, int connstatus) * as so_qlen so that we fail immediately below. */ so_qlen = head->so_qlen - head->so_incqlen; - if (head->so_incqlen > somaxconn) + if (head->so_incqlen > somaxconn) { so_qlen = somaxconn; + } } if (so_qlen >= - (soqlimitcompat ? head->so_qlimit : (3 * head->so_qlimit / 2))) - return ((struct socket *)0); + (soqlimitcompat ? head->so_qlimit : (3 * head->so_qlimit / 2))) { + return (struct socket *)0; + } so = soalloc(1, SOCK_DOM(head), head->so_type); - if (so == NULL) - return ((struct socket *)0); + if (so == NULL) { + return (struct socket *)0; + } /* check if head was closed during the soalloc */ if (head->so_proto == NULL) { sodealloc(so); - return ((struct socket *)0); + return (struct socket *)0; } so->so_type = head->so_type; - so->so_options = head->so_options &~ SO_ACCEPTCONN; + so->so_options = head->so_options & ~SO_ACCEPTCONN; so->so_linger = head->so_linger; so->so_state = head->so_state | SS_NOFDREF; so->so_proto = head->so_proto; @@ -365,23 +369,23 @@ sonewconn_internal(struct socket *head, int connstatus) so->so_cred = head->so_cred; so->last_pid = head->last_pid; so->last_upid = head->last_upid; - memcpy(so->last_uuid, head->last_uuid, sizeof (so->last_uuid)); + memcpy(so->last_uuid, head->last_uuid, sizeof(so->last_uuid)); if (head->so_flags & SOF_DELEGATED) { so->e_pid = head->e_pid; so->e_upid = head->e_upid; - memcpy(so->e_uuid, head->e_uuid, sizeof (so->e_uuid)); + memcpy(so->e_uuid, head->e_uuid, sizeof(so->e_uuid)); } /* inherit socket options stored in so_flags */ so->so_flags = head->so_flags & (SOF_NOSIGPIPE | SOF_NOADDRAVAIL | SOF_REUSESHAREUID | SOF_NOTIFYCONFLICT | SOF_BINDRANDOMPORT | SOF_NPX_SETOPTSHUT | - SOF_NODEFUNCT | SOF_PRIVILEGED_TRAFFIC_CLASS| SOF_NOTSENT_LOWAT | + SOF_NODEFUNCT | SOF_PRIVILEGED_TRAFFIC_CLASS | SOF_NOTSENT_LOWAT | SOF_USELRO | SOF_DELEGATED); so->so_usecount = 1; so->next_lock_lr = 0; so->next_unlock_lr = 0; - so->so_rcv.sb_flags |= SB_RECV; /* XXX */ + so->so_rcv.sb_flags |= SB_RECV; /* XXX */ so->so_rcv.sb_so = so->so_snd.sb_so = so; TAILQ_INIT(&so->so_evlist); @@ -397,7 +401,7 @@ sonewconn_internal(struct socket *head, int connstatus) if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) { sodealloc(so); - return ((struct socket *)0); + return (struct socket *)0; } so->so_rcv.sb_flags |= (head->so_rcv.sb_flags & SB_USRSIZE); so->so_snd.sb_flags |= (head->so_snd.sb_flags & SB_USRSIZE); @@ -406,14 +410,16 @@ sonewconn_internal(struct socket *head, int connstatus) * Must be done with head unlocked to avoid deadlock * for protocol with per socket mutexes. */ - if (head->so_proto->pr_unlock) + if (head->so_proto->pr_unlock) { socket_unlock(head, 0); + } if (((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL) != 0) || error) { sodealloc(so); - if (head->so_proto->pr_unlock) + if (head->so_proto->pr_unlock) { socket_lock(head, 0); - return ((struct socket *)0); + } + return (struct socket *)0; } if (head->so_proto->pr_unlock) { socket_lock(head, 0); @@ -424,7 +430,7 @@ sonewconn_internal(struct socket *head, int connstatus) if ((head->so_options & SO_ACCEPTCONN) == 0) { so->so_state &= ~SS_NOFDREF; soclose(so); - return ((struct socket *)0); + return (struct socket *)0; } } @@ -464,7 +470,7 @@ sonewconn_internal(struct socket *head, int connstatus) sorwakeup(head); wakeup((caddr_t)&head->so_timeo); } - return (so); + return so; } @@ -473,10 +479,10 @@ sonewconn(struct socket *head, int connstatus, const struct sockaddr *from) { int error = sflt_connectin(head, from); if (error) { - return (NULL); + return NULL; } - return (sonewconn_internal(head, connstatus)); + return sonewconn_internal(head, connstatus); } /* @@ -540,13 +546,14 @@ sbwait(struct sockbuf *sb) (uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so), SOCK_TYPE(so), error); } - return (error); + return error; } - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); @@ -581,14 +588,15 @@ sbwait(struct sockbuf *sb) } } - return (error); + return error; } void sbwakeup(struct sockbuf *sb) { - if (sb->sb_waiters > 0) + if (sb->sb_waiters > 0) { wakeup((caddr_t)&sb->sb_cc); + } } /* @@ -612,10 +620,11 @@ sowakeup(struct socket *so, struct sockbuf *sb) selwakeup(&sb->sb_sel); sbwakeup(sb); if (so->so_state & SS_ASYNC) { - if (so->so_pgid < 0) + if (so->so_pgid < 0) { gsignal(-so->so_pgid, SIGIO); - else if (so->so_pgid > 0) + } else if (so->so_pgid > 0) { proc_signal(so->so_pgid, SIGIO); + } } if (sb->sb_flags & SB_KNOTE) { KNOTE(&sb->sb_sel.si_note, SO_FILT_HINT_LOCKED); @@ -630,17 +639,20 @@ sowakeup(struct socket *so, struct sockbuf *sb) /* Let close know that we're about to do an upcall */ so->so_upcallusecount++; - if (lock) + if (lock) { socket_unlock(so, 0); + } (*sb_upcall)(so, sb_upcallarg, M_DONTWAIT); - if (lock) + if (lock) { socket_lock(so, 0); + } so->so_upcallusecount--; /* Tell close that it's safe to proceed */ if ((so->so_flags & SOF_CLOSEWAIT) && - so->so_upcallusecount == 0) + so->so_upcallusecount == 0) { wakeup((caddr_t)&so->so_upcallusecount); + } } #if CONTENT_FILTER /* @@ -648,11 +660,13 @@ sowakeup(struct socket *so, struct sockbuf *sb) */ if ((so->so_flags & SOF_CONTENT_FILTER) != 0) { if ((sb->sb_flags & SB_RECV)) { - if (so->so_state & (SS_CANTRCVMORE)) + if (so->so_state & (SS_CANTRCVMORE)) { cfil_sock_notify_shutdown(so, SHUT_RD); + } } else { - if (so->so_state & (SS_CANTSENDMORE)) + if (so->so_state & (SS_CANTSENDMORE)) { cfil_sock_notify_shutdown(so, SHUT_WR); + } } } #endif /* CONTENT_FILTER */ @@ -697,29 +711,48 @@ sowakeup(struct socket *so, struct sockbuf *sb) int soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc) { - if (sbreserve(&so->so_snd, sndcc) == 0) + /* + * We do not want to fail the creation of a socket + * when kern.ipc.maxsockbuf is less than the + * default socket buffer socket size of the protocol + * so force the buffer sizes to be at most the + * limit enforced by sbreserve() + */ + uint64_t maxcc = (uint64_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); + if (sndcc > maxcc) { + sndcc = maxcc; + } + if (rcvcc > maxcc) { + rcvcc = maxcc; + } + if (sbreserve(&so->so_snd, sndcc) == 0) { goto bad; - else + } else { so->so_snd.sb_idealsize = sndcc; + } - if (sbreserve(&so->so_rcv, rcvcc) == 0) + if (sbreserve(&so->so_rcv, rcvcc) == 0) { goto bad2; - else + } else { so->so_rcv.sb_idealsize = rcvcc; + } - if (so->so_rcv.sb_lowat == 0) + if (so->so_rcv.sb_lowat == 0) { so->so_rcv.sb_lowat = 1; - if (so->so_snd.sb_lowat == 0) + } + if (so->so_snd.sb_lowat == 0) { so->so_snd.sb_lowat = MCLBYTES; - if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) + } + if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) { so->so_snd.sb_lowat = so->so_snd.sb_hiwat; - return (0); + } + return 0; bad2: so->so_snd.sb_flags &= ~SB_SEL; selthreadclear(&so->so_snd.sb_sel); sbrelease(&so->so_snd); bad: - return (ENOBUFS); + return ENOBUFS; } void @@ -738,13 +771,15 @@ soreserve_preconnect(struct socket *so, unsigned int pre_cc) int sbreserve(struct sockbuf *sb, u_int32_t cc) { - if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES)) - return (0); + if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES)) { + return 0; + } sb->sb_hiwat = cc; sb->sb_mbmax = min(cc * sb_efficiency, sb_max); - if (sb->sb_lowat > sb->sb_hiwat) + if (sb->sb_lowat > sb->sb_hiwat) { sb->sb_lowat = sb->sb_hiwat; - return (1); + } + return 1; } /* @@ -796,41 +831,46 @@ sbappend(struct sockbuf *sb, struct mbuf *m) struct socket *so = sb->sb_so; if (m == NULL || (sb->sb_flags & SB_DROP)) { - if (m != NULL) + if (m != NULL) { m_freem(m); - return (0); + } + return 0; } SBLASTRECORDCHK(sb, "sbappend 1"); - if (sb->sb_lastrecord != NULL && (sb->sb_mbtail->m_flags & M_EOR)) - return (sbappendrecord(sb, m)); + if (sb->sb_lastrecord != NULL && (sb->sb_mbtail->m_flags & M_EOR)) { + return sbappendrecord(sb, m); + } if (sb->sb_flags & SB_RECV && !(m && m->m_flags & M_SKIPCFIL)) { int error = sflt_data_in(so, NULL, &m, NULL, 0); SBLASTRECORDCHK(sb, "sbappend 2"); #if CONTENT_FILTER - if (error == 0) + if (error == 0) { error = cfil_sock_data_in(so, NULL, m, NULL, 0); + } #endif /* CONTENT_FILTER */ if (error != 0) { - if (error != EJUSTRETURN) + if (error != EJUSTRETURN) { m_freem(m); - return (0); + } + return 0; } } else if (m) { m->m_flags &= ~M_SKIPCFIL; } /* If this is the first record, it's also the last record */ - if (sb->sb_lastrecord == NULL) + if (sb->sb_lastrecord == NULL) { sb->sb_lastrecord = m; + } sbcompress(sb, m, sb->sb_mbtail); SBLASTRECORDCHK(sb, "sbappend 3"); - return (1); + return 1; } /* @@ -842,9 +882,10 @@ sbappendstream(struct sockbuf *sb, struct mbuf *m) struct socket *so = sb->sb_so; if (m == NULL || (sb->sb_flags & SB_DROP)) { - if (m != NULL) + if (m != NULL) { m_freem(m); - return (0); + } + return 0; } if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) { @@ -860,14 +901,16 @@ sbappendstream(struct sockbuf *sb, struct mbuf *m) SBLASTRECORDCHK(sb, "sbappendstream 1"); #if CONTENT_FILTER - if (error == 0) + if (error == 0) { error = cfil_sock_data_in(so, NULL, m, NULL, 0); + } #endif /* CONTENT_FILTER */ if (error != 0) { - if (error != EJUSTRETURN) + if (error != EJUSTRETURN) { m_freem(m); - return (0); + } + return 0; } } else if (m) { m->m_flags &= ~M_SKIPCFIL; @@ -876,7 +919,7 @@ sbappendstream(struct sockbuf *sb, struct mbuf *m) sbcompress(sb, m, sb->sb_mbtail); sb->sb_lastrecord = sb->sb_mb; SBLASTRECORDCHK(sb, "sbappendstream 2"); - return (1); + return 1; } #ifdef SOCKBUF_DEBUG @@ -888,15 +931,17 @@ sbcheck(struct sockbuf *sb) u_int32_t len = 0, mbcnt = 0; lck_mtx_t *mutex_held; - if (sb->sb_so->so_proto->pr_getlock != NULL) + if (sb->sb_so->so_proto->pr_getlock != NULL) { mutex_held = (*sb->sb_so->so_proto->pr_getlock)(sb->sb_so, 0); - else + } else { mutex_held = sb->sb_so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); - if (sbchecking == 0) + if (sbchecking == 0) { return; + } for (m = sb->sb_mb; m; m = n) { n = m->m_nextpkt; @@ -904,8 +949,9 @@ sbcheck(struct sockbuf *sb) len += m->m_len; mbcnt += MSIZE; /* XXX pretty sure this is bogus */ - if (m->m_flags & M_EXT) + if (m->m_flags & M_EXT) { mbcnt += m->m_ext.ext_size; + } } } if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) { @@ -920,8 +966,9 @@ sblastrecordchk(struct sockbuf *sb, const char *where) { struct mbuf *m = sb->sb_mb; - while (m && m->m_nextpkt) + while (m && m->m_nextpkt) { m = m->m_nextpkt; + } if (m != sb->sb_lastrecord) { printf("sblastrecordchk: mb 0x%llx lastrecord 0x%llx " @@ -930,8 +977,9 @@ sblastrecordchk(struct sockbuf *sb, const char *where) (uint64_t)VM_KERNEL_ADDRPERM(sb->sb_lastrecord), (uint64_t)VM_KERNEL_ADDRPERM(m)); printf("packet chain:\n"); - for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) + for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { printf("\t0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(m)); + } panic("sblastrecordchk from %s", where); } } @@ -942,11 +990,13 @@ sblastmbufchk(struct sockbuf *sb, const char *where) struct mbuf *m = sb->sb_mb; struct mbuf *n; - while (m && m->m_nextpkt) + while (m && m->m_nextpkt) { m = m->m_nextpkt; + } - while (m && m->m_next) + while (m && m->m_next) { m = m->m_next; + } if (m != sb->sb_mbtail) { printf("sblastmbufchk: mb 0x%llx mbtail 0x%llx last 0x%llx\n", @@ -956,9 +1006,10 @@ sblastmbufchk(struct sockbuf *sb, const char *where) printf("packet tree:\n"); for (m = sb->sb_mb; m != NULL; m = m->m_nextpkt) { printf("\t"); - for (n = m; n != NULL; n = n->m_next) + for (n = m; n != NULL; n = n->m_next) { printf("0x%llx ", (uint64_t)VM_KERNEL_ADDRPERM(n)); + } printf("\n"); } panic("sblastmbufchk from %s", where); @@ -975,17 +1026,19 @@ sbappendrecord(struct sockbuf *sb, struct mbuf *m0) int space = 0; if (m0 == NULL || (sb->sb_flags & SB_DROP)) { - if (m0 != NULL) + if (m0 != NULL) { m_freem(m0); - return (0); + } + return 0; } - for (m = m0; m != NULL; m = m->m_next) + for (m = m0; m != NULL; m = m->m_next) { space += m->m_len; + } if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) { m_freem(m0); - return (0); + return 0; } if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) { @@ -993,15 +1046,17 @@ sbappendrecord(struct sockbuf *sb, struct mbuf *m0) sock_data_filt_flag_record); #if CONTENT_FILTER - if (error == 0) + if (error == 0) { error = cfil_sock_data_in(sb->sb_so, NULL, m0, NULL, 0); + } #endif /* CONTENT_FILTER */ if (error != 0) { SBLASTRECORDCHK(sb, "sbappendrecord 1"); - if (error != EJUSTRETURN) + if (error != EJUSTRETURN) { m_freem(m0); - return (0); + } + return 0; } } else if (m0) { m0->m_flags &= ~M_SKIPCFIL; @@ -1028,7 +1083,7 @@ sbappendrecord(struct sockbuf *sb, struct mbuf *m0) } sbcompress(sb, m, m0); SBLASTRECORDCHK(sb, "sbappendrecord 3"); - return (1); + return 1; } /* @@ -1042,8 +1097,9 @@ sbinsertoob(struct sockbuf *sb, struct mbuf *m0) struct mbuf *m; struct mbuf **mp; - if (m0 == 0) - return (0); + if (m0 == 0) { + return 0; + } SBLASTRECORDCHK(sb, "sbinsertoob 1"); @@ -1054,15 +1110,16 @@ sbinsertoob(struct sockbuf *sb, struct mbuf *m0) SBLASTRECORDCHK(sb, "sbinsertoob 2"); #if CONTENT_FILTER - if (error == 0) + if (error == 0) { error = cfil_sock_data_in(sb->sb_so, NULL, m0, NULL, 0); + } #endif /* CONTENT_FILTER */ if (error) { if (error != EJUSTRETURN) { m_freem(m0); } - return (0); + return 0; } } else if (m0) { m0->m_flags &= ~M_SKIPCFIL; @@ -1072,14 +1129,14 @@ sbinsertoob(struct sockbuf *sb, struct mbuf *m0) m = *mp; again: switch (m->m_type) { - case MT_OOBDATA: - continue; /* WANT next train */ + continue; /* WANT next train */ case MT_CONTROL: m = m->m_next; - if (m) - goto again; /* inspect THIS train further */ + if (m) { + goto again; /* inspect THIS train further */ + } } break; } @@ -1102,7 +1159,7 @@ again: } sbcompress(sb, m, m0); SBLASTRECORDCHK(sb, "sbinsertoob 3"); - return (1); + return 1; } /* @@ -1118,30 +1175,36 @@ sbconcat_mbufs(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, struct struct mbuf *m = NULL, *n = NULL; int space = 0; - if (m0 && (m0->m_flags & M_PKTHDR) == 0) + if (m0 && (m0->m_flags & M_PKTHDR) == 0) { panic("sbconcat_mbufs"); + } - if (m0) + if (m0) { space += m0->m_pkthdr.len; + } for (n = control; n; n = n->m_next) { space += n->m_len; - if (n->m_next == 0) /* keep pointer to last control buf */ + if (n->m_next == 0) { /* keep pointer to last control buf */ break; + } } if (asa != NULL) { - if (asa->sa_len > MLEN) - return (NULL); + if (asa->sa_len > MLEN) { + return NULL; + } space += asa->sa_len; } - if (sb != NULL && space > sbspace(sb)) - return (NULL); + if (sb != NULL && space > sbspace(sb)) { + return NULL; + } - if (n) - n->m_next = m0; /* concatenate data to control */ - else + if (n) { + n->m_next = m0; /* concatenate data to control */ + } else { control = m0; + } if (asa != NULL) { MGET(m, M_DONTWAIT, MT_SONAME); @@ -1150,7 +1213,7 @@ sbconcat_mbufs(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, struct /* unchain control and data if necessary */ n->m_next = NULL; } - return (NULL); + return NULL; } m->m_len = asa->sa_len; bcopy((caddr_t)asa, mtod(m, caddr_t), asa->sa_len); @@ -1160,7 +1223,7 @@ sbconcat_mbufs(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, struct m = control; } - return (m); + return m; } /* @@ -1176,14 +1239,17 @@ sbappendchain(struct sockbuf *sb, struct mbuf *m, int space) { struct mbuf *n, *nlast; - if (m == NULL) - return (0); + if (m == NULL) { + return 0; + } - if (space != 0 && space > sbspace(sb)) - return (0); + if (space != 0 && space > sbspace(sb)) { + return 0; + } - for (n = m; n->m_next != NULL; n = n->m_next) + for (n = m; n->m_next != NULL; n = n->m_next) { sballoc(sb, n); + } sballoc(sb, n); nlast = n; @@ -1199,7 +1265,7 @@ sbappendchain(struct sockbuf *sb, struct mbuf *m, int space) SBLASTRECORDCHK(sb, "sbappendadddr 2"); postevent(0, sb, EV_RWBYTES); - return (1); + return 1; } /* @@ -1218,20 +1284,25 @@ sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, boolean_t sb_unix = (sb->sb_flags & SB_UNIX); struct mbuf *mbuf_chain = NULL; - if (error_out) + if (error_out) { *error_out = 0; + } - if (m0 && (m0->m_flags & M_PKTHDR) == 0) + if (m0 && (m0->m_flags & M_PKTHDR) == 0) { panic("sbappendaddrorfree"); + } if (sb->sb_flags & SB_DROP) { - if (m0 != NULL) + if (m0 != NULL) { m_freem(m0); - if (control != NULL && !sb_unix) + } + if (control != NULL && !sb_unix) { m_freem(control); - if (error_out != NULL) + } + if (error_out != NULL) { *error_out = EINVAL; - return (0); + } + return 0; } /* Call socket data in filters */ @@ -1241,21 +1312,25 @@ sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, SBLASTRECORDCHK(sb, __func__); #if CONTENT_FILTER - if (error == 0) + if (error == 0) { error = cfil_sock_data_in(sb->sb_so, asa, m0, control, 0); + } #endif /* CONTENT_FILTER */ if (error) { if (error != EJUSTRETURN) { - if (m0) + if (m0) { m_freem(m0); - if (control != NULL && !sb_unix) + } + if (control != NULL && !sb_unix) { m_freem(control); - if (error_out) + } + if (error_out) { *error_out = error; + } } - return (0); + return 0; } } else if (m0) { m0->m_flags &= ~M_SKIPCFIL; @@ -1265,15 +1340,41 @@ sbappendaddr(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, SBLASTRECORDCHK(sb, "sbappendadddr 1"); result = sbappendchain(sb, mbuf_chain, 0); if (result == 0) { - if (m0) + if (m0) { m_freem(m0); - if (control != NULL && !sb_unix) + } + if (control != NULL && !sb_unix) { m_freem(control); - if (error_out) + } + if (error_out) { *error_out = ENOBUFS; + } } - return (result); + return result; +} + +inline boolean_t +is_cmsg_valid(struct mbuf *control, struct cmsghdr *cmsg) +{ + if (cmsg == NULL) { + return FALSE; + } + + if (cmsg->cmsg_len < sizeof(struct cmsghdr)) { + return FALSE; + } + + if ((uint8_t *)control->m_data >= (uint8_t *)cmsg + cmsg->cmsg_len) { + return FALSE; + } + + if ((uint8_t *)control->m_data + control->m_len < + (uint8_t *)cmsg + cmsg->cmsg_len) { + return FALSE; + } + + return TRUE; } static int @@ -1283,24 +1384,29 @@ sbappendcontrol_internal(struct sockbuf *sb, struct mbuf *m0, struct mbuf *m, *mlast, *n; int space = 0; - if (control == 0) + if (control == 0) { panic("sbappendcontrol"); + } - for (m = control; ; m = m->m_next) { + for (m = control;; m = m->m_next) { space += m->m_len; - if (m->m_next == 0) + if (m->m_next == 0) { break; + } } - n = m; /* save pointer to last control buffer */ - for (m = m0; m; m = m->m_next) + n = m; /* save pointer to last control buffer */ + for (m = m0; m; m = m->m_next) { space += m->m_len; - if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) - return (0); - n->m_next = m0; /* concatenate data to control */ + } + if (space > sbspace(sb) && !(sb->sb_flags & SB_UNIX)) { + return 0; + } + n->m_next = m0; /* concatenate data to control */ SBLASTRECORDCHK(sb, "sbappendcontrol 1"); - for (m = control; m->m_next != NULL; m = m->m_next) + for (m = control; m->m_next != NULL; m = m->m_next) { sballoc(sb, m); + } sballoc(sb, m); mlast = m; @@ -1316,27 +1422,31 @@ sbappendcontrol_internal(struct sockbuf *sb, struct mbuf *m0, SBLASTRECORDCHK(sb, "sbappendcontrol 2"); postevent(0, sb, EV_RWBYTES); - return (1); + return 1; } int -sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, +sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, int *error_out) { int result = 0; boolean_t sb_unix = (sb->sb_flags & SB_UNIX); - if (error_out) + if (error_out) { *error_out = 0; + } if (sb->sb_flags & SB_DROP) { - if (m0 != NULL) + if (m0 != NULL) { m_freem(m0); - if (control != NULL && !sb_unix) + } + if (control != NULL && !sb_unix) { m_freem(control); - if (error_out != NULL) + } + if (error_out != NULL) { *error_out = EINVAL; - return (0); + } + return 0; } if (sb->sb_flags & SB_RECV && !(m0 && m0->m_flags & M_SKIPCFIL)) { @@ -1346,21 +1456,25 @@ sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, SBLASTRECORDCHK(sb, __func__); #if CONTENT_FILTER - if (error == 0) + if (error == 0) { error = cfil_sock_data_in(sb->sb_so, NULL, m0, control, 0); + } #endif /* CONTENT_FILTER */ if (error) { if (error != EJUSTRETURN) { - if (m0) + if (m0) { m_freem(m0); - if (control != NULL && !sb_unix) + } + if (control != NULL && !sb_unix) { m_freem(control); - if (error_out) + } + if (error_out) { *error_out = error; + } } - return (0); + return 0; } } else if (m0) { m0->m_flags &= ~M_SKIPCFIL; @@ -1368,15 +1482,18 @@ sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control, result = sbappendcontrol_internal(sb, m0, control); if (result == 0) { - if (m0) + if (m0) { m_freem(m0); - if (control != NULL && !sb_unix) + } + if (control != NULL && !sb_unix) { m_freem(control); - if (error_out) + } + if (error_out) { *error_out = ENOBUFS; + } } - return (result); + return result; } /* @@ -1392,8 +1509,9 @@ sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, int ret = 0; struct socket *so = sb->sb_so; - if (m == NULL) - return (0); + if (m == NULL) { + return 0; + } VERIFY((m->m_flags & M_PKTHDR) && m_pktlen(m) > 0); VERIFY(so->so_msg_state != NULL); @@ -1403,7 +1521,7 @@ sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, m->m_pkthdr.msg_seq = seqnum; /* find last mbuf and set M_EOR */ - for (m_eor = m; ; m_eor = m_eor->m_next) { + for (m_eor = m;; m_eor = m_eor->m_next) { /* * If the msg is unordered, we need to account for * these bytes in receive socket buffer size. Otherwise, @@ -1418,8 +1536,9 @@ sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, } else { m_eor->m_flags &= ~M_UNORDERED_DATA; } - if (m_eor->m_next == NULL) + if (m_eor->m_next == NULL) { break; + } } /* set EOR flag at end of byte blob */ @@ -1436,7 +1555,7 @@ sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, } if (!unordered && (sb->sb_mbtail != NULL) && - !(sb->sb_mbtail->m_flags & M_UNORDERED_DATA)) { + !(sb->sb_mbtail->m_flags & M_UNORDERED_DATA)) { sb->sb_mbtail->m_flags &= ~M_EOR; sbcompress(sb, m, sb->sb_mbtail); ret = 1; @@ -1444,7 +1563,7 @@ sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, ret = sbappendrecord(sb, m); } VERIFY(sb->sb_mbtail->m_flags & M_EOR); - return (ret); + return ret; } /* @@ -1453,17 +1572,17 @@ sbappendmsgstream_rcv(struct sockbuf *sb, struct mbuf *m, uint32_t seqnum, */ int sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, uint32_t seqnum, - int unordered) + int unordered) { int ret = 0; if ((m != NULL) && m_pktlen(m) <= 0 && !((so->so_flags & SOF_MP_SUBFLOW) && - (m->m_flags & M_PKTHDR) && - (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN))) { + (m->m_flags & M_PKTHDR) && + (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN))) { m_freem(m); - return (ret); + return ret; } if (so->so_flags & SOF_ENABLE_MSGS) { @@ -1477,7 +1596,7 @@ sbappendstream_rcvdemux(struct socket *so, struct mbuf *m, uint32_t seqnum, else { ret = sbappendstream(&so->so_rcv, m); } - return (ret); + return ret; } #if MPTCP @@ -1488,8 +1607,8 @@ sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m) VERIFY(m == NULL || (m->m_flags & M_PKTHDR)); /* SB_NOCOMPRESS must be set prevent loss of M_PKTHDR data */ - VERIFY((sb->sb_flags & (SB_RECV|SB_NOCOMPRESS)) == - (SB_RECV|SB_NOCOMPRESS)); + VERIFY((sb->sb_flags & (SB_RECV | SB_NOCOMPRESS)) == + (SB_RECV | SB_NOCOMPRESS)); if (m == NULL || m_pktlen(m) == 0 || (sb->sb_flags & SB_DROP) || (so->so_state & SS_CANTRCVMORE)) { @@ -1497,11 +1616,11 @@ sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m) m_pktlen(m) == 0 && (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN)) { mptcp_input(tptomptp(sototcpcb(so))->mpt_mpte, m); - return (1); + return 1; } else if (m != NULL) { m_freem(m); } - return (0); + return 0; } /* the socket is not closed, so SOF_MP_SUBFLOW must be set */ VERIFY(so->so_flags & SOF_MP_SUBFLOW); @@ -1519,7 +1638,7 @@ sbappendmptcpstream_rcv(struct sockbuf *sb, struct mbuf *m) sbcompress(sb, m, sb->sb_mbtail); sb->sb_lastrecord = sb->sb_mb; SBLASTRECORDCHK(sb, __func__); - return (1); + return 1; } #endif /* MPTCP */ @@ -1535,16 +1654,18 @@ sbappendmsg_snd(struct sockbuf *sb, struct mbuf *m) VERIFY(so->so_msg_state != NULL); - if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) + if (m->m_nextpkt != NULL || (sb->sb_mb != sb->sb_lastrecord)) { panic("sbappendstream: nexpkt %p || mb %p != lastrecord %p\n", m->m_nextpkt, sb->sb_mb, sb->sb_lastrecord); + } SBLASTMBUFCHK(sb, __func__); if (m == NULL || (sb->sb_flags & SB_DROP) || so->so_msg_state == NULL) { - if (m != NULL) + if (m != NULL) { m_freem(m); - return (0); + } + return 0; } priq = &so->so_msg_state->msg_priq[m->m_pkthdr.msg_pri]; @@ -1601,7 +1722,7 @@ sbappendmsg_snd(struct sockbuf *sb, struct mbuf *m) SBLASTRECORDCHK(sb, "sbappendstream 2"); postevent(0, sb, EV_RWBYTES); - return (1); + return 1; } /* @@ -1687,8 +1808,9 @@ sbpull_unordered_data(struct socket *so, int32_t off, int32_t len) * the mbuf that is being dequeued, update * it to point to the new head. */ - if (priq->msgq_lastmsg == m) + if (priq->msgq_lastmsg == m) { priq->msgq_lastmsg = priq->msgq_head; + } m->m_nextpkt = NULL; mend->m_next = NULL; @@ -1717,8 +1839,9 @@ sbpull_unordered_data(struct socket *so, int32_t off, int32_t len) topull = (off + len) - so->so_msg_state->msg_serial_bytes; - if (priq->msgq_flags & MSGQ_MSG_NOTDONE) + if (priq->msgq_flags & MSGQ_MSG_NOTDONE) { break; + } } else { --i; } @@ -1740,8 +1863,9 @@ sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) if (m == NULL) { /* There is nothing to compress; just update the tail */ - for (; n->m_next != NULL; n = n->m_next) + for (; n->m_next != NULL; n = n->m_next) { ; + } sb->sb_mbtail = n; goto done; } @@ -1750,8 +1874,9 @@ sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) eor |= m->m_flags & M_EOR; if (compress && m->m_len == 0 && (eor == 0 || (((o = m->m_next) || (o = n)) && o->m_type == m->m_type))) { - if (sb->sb_lastrecord == m) + if (sb->sb_lastrecord == m) { sb->sb_lastrecord = m->m_next; + } m = m_free(m); continue; } @@ -1782,10 +1907,11 @@ sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) m = m_free(m); continue; } - if (n != NULL) + if (n != NULL) { n->m_next = m; - else + } else { sb->sb_mb = m; + } sb->sb_mbtail = m; sballoc(sb, m); n = m; @@ -1794,10 +1920,11 @@ sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n) n->m_next = NULL; } if (eor != 0) { - if (n != NULL) + if (n != NULL) { n->m_flags |= eor; - else + } else { printf("semi-panic: sbcompress\n"); + } } done: SBLASTMBUFCHK(sb, __func__); @@ -1822,8 +1949,9 @@ sbflush_priq(struct msg_priq *priq) { struct mbuf *m; m = priq->msgq_head; - if (m != NULL) + if (m != NULL) { m_freem_list(m); + } priq->msgq_head = priq->msgq_tail = priq->msgq_lastmsg = NULL; priq->msgq_bytes = priq->msgq_flags = 0; } @@ -1866,8 +1994,9 @@ sbflush(struct sockbuf *sb) * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty: * we would loop forever. Panic instead. */ - if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len)) + if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len)) { break; + } sbdrop(sb, (int)sb->sb_cc); } @@ -1883,7 +2012,7 @@ sbflush(struct sockbuf *sb) sb_empty_assert(sb, __func__); postevent(0, sb, EV_RWBYTES); - sbunlock(sb, TRUE); /* keep socket locked */ + sbunlock(sb, TRUE); /* keep socket locked */ } /* @@ -1907,8 +2036,8 @@ sbdrop(struct sockbuf *sb, int len) #if MPTCP if (m != NULL && len > 0 && !(sb->sb_flags & SB_RECV) && ((sb->sb_so->so_flags & SOF_MP_SUBFLOW) || - (SOCK_CHECK_DOM(sb->sb_so, PF_MULTIPATH) && - SOCK_CHECK_PROTO(sb->sb_so, IPPROTO_TCP))) && + (SOCK_CHECK_DOM(sb->sb_so, PF_MULTIPATH) && + SOCK_CHECK_PROTO(sb->sb_so, IPPROTO_TCP))) && !(sb->sb_so->so_flags1 & SOF1_POST_FALLBACK_SYNC)) { mptcp_preproc_sbdrop(sb->sb_so, m, (unsigned int)len); } @@ -1945,7 +2074,7 @@ sbdrop(struct sockbuf *sb, int len) if (!(sb->sb_flags & SB_RECV) && (sb->sb_so->so_flags & SOF_ENABLE_MSGS)) { sb->sb_so->so_msg_state-> - msg_serial_bytes = 0; + msg_serial_bytes = 0; } break; } @@ -1958,11 +2087,13 @@ sbdrop(struct sockbuf *sb, int len) m->m_data += len; sb->sb_cc -= len; /* update the send byte count */ - if (sb->sb_flags & SB_SNDBYTE_CNT) - inp_decr_sndbytes_total(sb->sb_so, len); + if (sb->sb_flags & SB_SNDBYTE_CNT) { + inp_decr_sndbytes_total(sb->sb_so, len); + } if (m->m_type != MT_DATA && m->m_type != MT_HEADER && - m->m_type != MT_OOBDATA) + m->m_type != MT_OOBDATA) { sb->sb_ctl -= len; + } break; } len -= m->m_len; @@ -2043,19 +2174,21 @@ sbcreatecontrol(caddr_t p, int size, int type, int level) struct cmsghdr *cp; struct mbuf *m; - if (CMSG_SPACE((u_int)size) > MLEN) - return ((struct mbuf *)NULL); - if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL) - return ((struct mbuf *)NULL); + if (CMSG_SPACE((u_int)size) > MLEN) { + return (struct mbuf *)NULL; + } + if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL) { + return (struct mbuf *)NULL; + } cp = mtod(m, struct cmsghdr *); - VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); /* XXX check size? */ (void) memcpy(CMSG_DATA(cp), p, size); m->m_len = CMSG_SPACE(size); cp->cmsg_len = CMSG_LEN(size); cp->cmsg_level = level; cp->cmsg_type = type; - return (m); + return m; } struct mbuf ** @@ -2066,20 +2199,20 @@ sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf **mp) if (*mp == NULL) { *mp = sbcreatecontrol(p, size, type, level); - return (mp); + return mp; } if (CMSG_SPACE((u_int)size) + (*mp)->m_len > MLEN) { mp = &(*mp)->m_next; *mp = sbcreatecontrol(p, size, type, level); - return (mp); + return mp; } m = *mp; cp = (struct cmsghdr *)(void *)(mtod(m, char *) + m->m_len); /* CMSG_SPACE ensures 32-bit alignment */ - VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); m->m_len += CMSG_SPACE(size); /* XXX check size? */ @@ -2088,7 +2221,7 @@ sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf **mp) cp->cmsg_level = level; cp->cmsg_type = type; - return (mp); + return mp; } @@ -2100,42 +2233,42 @@ int pru_abort_notsupp(struct socket *so) { #pragma unused(so) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_accept_notsupp(struct socket *so, struct sockaddr **nam) { #pragma unused(so, nam) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_attach_notsupp(struct socket *so, int proto, struct proc *p) { #pragma unused(so, proto, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p) { #pragma unused(so, nam, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct proc *p) { #pragma unused(so, nam, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_connect2_notsupp(struct socket *so1, struct socket *so2) { #pragma unused(so1, so2) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2145,7 +2278,7 @@ pru_connectx_notsupp(struct socket *so, struct sockaddr *src, uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written) { #pragma unused(so, src, dst, p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2153,56 +2286,56 @@ pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, struct proc *p) { #pragma unused(so, cmd, data, ifp, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_detach_notsupp(struct socket *so) { #pragma unused(so) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_disconnect_notsupp(struct socket *so) { #pragma unused(so) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_disconnectx_notsupp(struct socket *so, sae_associd_t aid, sae_connid_t cid) { #pragma unused(so, aid, cid) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_listen_notsupp(struct socket *so, struct proc *p) { #pragma unused(so, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam) { #pragma unused(so, nam) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_rcvd_notsupp(struct socket *so, int flags) { #pragma unused(so, flags) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags) { #pragma unused(so, m, flags) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2210,7 +2343,7 @@ pru_send_notsupp(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p) { #pragma unused(so, flags, m, addr, control, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2218,7 +2351,7 @@ pru_send_list_notsupp(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, struct mbuf *control, struct proc *p) { #pragma unused(so, flags, m, addr, control, p) - return (EOPNOTSUPP); + return EOPNOTSUPP; } /* @@ -2240,7 +2373,7 @@ pru_sense_null(struct socket *so, void *ub, int isstat64) sb->st_blksize = so->so_snd.sb_hiwat; } - return (0); + return 0; } @@ -2249,7 +2382,7 @@ pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags) { #pragma unused(so, addr, uio, top, control, flags) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2257,7 +2390,7 @@ pru_sosend_list_notsupp(struct socket *so, struct uio **uio, u_int uiocnt, int flags) { #pragma unused(so, uio, uiocnt, flags) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2265,7 +2398,7 @@ pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { #pragma unused(so, paddr, uio, mp0, controlp, flagsp) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2273,28 +2406,28 @@ pru_soreceive_list_notsupp(struct socket *so, struct recv_msg_elem *recv_msg_array, u_int uiocnt, int *flagsp) { #pragma unused(so, recv_msg_array, uiocnt, flagsp) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_shutdown_notsupp(struct socket *so) { #pragma unused(so) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam) { #pragma unused(so, nam) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int pru_sopoll_notsupp(struct socket *so, int events, kauth_cred_t cred, void *wql) { #pragma unused(so, events, cred, wql) - return (EOPNOTSUPP); + return EOPNOTSUPP; } int @@ -2304,20 +2437,20 @@ pru_socheckopt_null(struct socket *so, struct sockopt *sopt) /* * Allow all options for set/get by default. */ - return (0); + return 0; } static int pru_preconnect_null(struct socket *so) { #pragma unused(so) - return (0); + return 0; } void pru_sanitize(struct pr_usrreqs *pru) { -#define DEFAULT(foo, bar) if ((foo) == NULL) (foo) = (bar) +#define DEFAULT(foo, bar) if ((foo) == NULL) (foo) = (bar) DEFAULT(pru->pru_abort, pru_abort_notsupp); DEFAULT(pru->pru_accept, pru_accept_notsupp); DEFAULT(pru->pru_attach, pru_attach_notsupp); @@ -2359,8 +2492,8 @@ pru_sanitize(struct pr_usrreqs *pru) int sb_notify(struct sockbuf *sb) { - return (sb->sb_waiters > 0 || - (sb->sb_flags & (SB_SEL|SB_ASYNC|SB_UPCALL|SB_KNOTE))); + return sb->sb_waiters > 0 || + (sb->sb_flags & (SB_SEL | SB_ASYNC | SB_UPCALL | SB_KNOTE)); } /* @@ -2376,22 +2509,25 @@ sbspace(struct sockbuf *sb) int space = imin((int)(sb->sb_hiwat - sb->sb_cc), (int)(sb->sb_mbmax - sb->sb_mbcnt)); - if (sb->sb_preconn_hiwat != 0) + if (sb->sb_preconn_hiwat != 0) { space = imin((int)(sb->sb_preconn_hiwat - sb->sb_cc), space); + } - if (space < 0) + if (space < 0) { space = 0; + } /* Compensate for data being processed by content filters */ #if CONTENT_FILTER pending = cfil_sock_data_space(sb); #endif /* CONTENT_FILTER */ - if (pending > space) + if (pending > space) { space = 0; - else + } else { space -= pending; + } - return (space); + return space; } /* @@ -2404,39 +2540,41 @@ msgq_sbspace(struct socket *so, struct mbuf *control) int space = 0, error; u_int32_t msgpri = 0; VERIFY(so->so_type == SOCK_STREAM && - SOCK_PROTO(so) == IPPROTO_TCP); + SOCK_PROTO(so) == IPPROTO_TCP); if (control != NULL) { error = tcp_get_msg_priority(control, &msgpri); - if (error) - return (0); + if (error) { + return 0; + } } else { msgpri = MSG_PRI_0; } space = (so->so_snd.sb_idealsize / MSG_PRI_COUNT) - so->so_msg_state->msg_priq[msgpri].msgq_bytes; - if (space < 0) + if (space < 0) { space = 0; - return (space); + } + return space; } /* do we have to send all at once on a socket? */ int sosendallatonce(struct socket *so) { - return (so->so_proto->pr_flags & PR_ATOMIC); + return so->so_proto->pr_flags & PR_ATOMIC; } /* can we read something from so? */ int soreadable(struct socket *so) { - return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || - ((so->so_state & SS_CANTRCVMORE) + return so->so_rcv.sb_cc >= so->so_rcv.sb_lowat || + ((so->so_state & SS_CANTRCVMORE) #if CONTENT_FILTER - && cfil_sock_data_pending(&so->so_rcv) == 0 + && cfil_sock_data_pending(&so->so_rcv) == 0 #endif /* CONTENT_FILTER */ - ) || - so->so_comp.tqh_first || so->so_error); + ) || + so->so_comp.tqh_first || so->so_error; } /* can we write something to so? */ @@ -2445,34 +2583,37 @@ int sowriteable(struct socket *so) { if ((so->so_state & SS_CANTSENDMORE) || - so->so_error > 0) - return (1); - if (so_wait_for_if_feedback(so) || !socanwrite(so)) - return (0); - if (so->so_flags1 & SOF1_PRECONNECT_DATA) - return(1); + so->so_error > 0) { + return 1; + } + if (so_wait_for_if_feedback(so) || !socanwrite(so)) { + return 0; + } + if (so->so_flags1 & SOF1_PRECONNECT_DATA) { + return 1; + } if (sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat) { if (so->so_flags & SOF_NOTSENT_LOWAT) { if ((SOCK_DOM(so) == PF_INET6 || SOCK_DOM(so) == PF_INET) && so->so_type == SOCK_STREAM) { - return (tcp_notsent_lowat_check(so)); + return tcp_notsent_lowat_check(so); } #if MPTCP else if ((SOCK_DOM(so) == PF_MULTIPATH) && (SOCK_PROTO(so) == IPPROTO_TCP)) { - return (mptcp_notsent_lowat_check(so)); + return mptcp_notsent_lowat_check(so); } #endif else { - return (1); + return 1; } } else { - return (1); + return 1; } } - return (0); + return 0; } /* adjust counters in sb reflecting allocation of m */ @@ -2483,8 +2624,9 @@ sballoc(struct sockbuf *sb, struct mbuf *m) u_int32_t cnt = 1; sb->sb_cc += m->m_len; if (m->m_type != MT_DATA && m->m_type != MT_HEADER && - m->m_type != MT_OOBDATA) + m->m_type != MT_OOBDATA) { sb->sb_ctl += m->m_len; + } sb->sb_mbcnt += MSIZE; if (m->m_flags & M_EXT) { @@ -2493,8 +2635,9 @@ sballoc(struct sockbuf *sb, struct mbuf *m) } OSAddAtomic(cnt, &total_sbmb_cnt); VERIFY(total_sbmb_cnt > 0); - if (total_sbmb_cnt > total_sbmb_cnt_peak) + if (total_sbmb_cnt > total_sbmb_cnt_peak) { total_sbmb_cnt_peak = total_sbmb_cnt; + } /* * If data is being added to the send socket buffer, @@ -2514,8 +2657,9 @@ sbfree(struct sockbuf *sb, struct mbuf *m) sb->sb_cc -= m->m_len; if (m->m_type != MT_DATA && m->m_type != MT_HEADER && - m->m_type != MT_OOBDATA) + m->m_type != MT_OOBDATA) { sb->sb_ctl -= m->m_len; + } sb->sb_mbcnt -= MSIZE; if (m->m_flags & M_EXT) { sb->sb_mbcnt -= m->m_ext.ext_size; @@ -2523,15 +2667,17 @@ sbfree(struct sockbuf *sb, struct mbuf *m) } OSAddAtomic(cnt, &total_sbmb_cnt); VERIFY(total_sbmb_cnt >= 0); - if (total_sbmb_cnt < total_sbmb_cnt_floor) + if (total_sbmb_cnt < total_sbmb_cnt_floor) { total_sbmb_cnt_floor = total_sbmb_cnt; + } /* * If data is being removed from the send socket buffer, * update the send byte count */ - if (sb->sb_flags & SB_SNDBYTE_CNT) + if (sb->sb_flags & SB_SNDBYTE_CNT) { inp_decr_sndbytes_total(sb->sb_so, m->m_len); + } } /* @@ -2571,16 +2717,18 @@ sblock(struct sockbuf *sb, uint32_t flags) * Don't panic if we are defunct because SB_LOCK has * been cleared by sodefunct() */ - if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) + if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) { panic("%s: SB_LOCK not held for %p\n", __func__, sb); + } /* Keep the sockbuf locked */ - return (0); + return 0; } - if ((sb->sb_flags & SB_LOCK) && !(flags & SBL_WAIT)) - return (EWOULDBLOCK); + if ((sb->sb_flags & SB_LOCK) && !(flags & SBL_WAIT)) { + return EWOULDBLOCK; + } /* * We may get here from sorflush(), in which case "sb" may not * point to the real socket buffer. Use the actual socket buffer @@ -2594,8 +2742,8 @@ sblock(struct sockbuf *sb, uint32_t flags) * until it clears the */ while ((sb->sb_flags & SB_LOCK) || - ((so->so_flags & SOF_CONTENT_FILTER) && - sb->sb_cfil_thread != NULL)) { + ((so->so_flags & SOF_CONTENT_FILTER) && + sb->sb_cfil_thread != NULL)) { lck_mtx_t *mutex_held; /* @@ -2604,10 +2752,11 @@ sblock(struct sockbuf *sb, uint32_t flags) * at that time pr_getlock() may no longer be able to return * us the lock. This will be fixed in future. */ - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); @@ -2631,11 +2780,12 @@ sblock(struct sockbuf *sb, uint32_t flags) SOCK_DOM(so), SOCK_TYPE(so), error); } - if (error != 0) - return (error); + if (error != 0) { + return error; + } } sb->sb_flags |= SB_LOCK; - return (0); + return 0; } /* @@ -2694,13 +2844,14 @@ sbunlock(struct sockbuf *sb, boolean_t keeplocked) } } - if (!keeplocked) { /* unlock on exit */ + if (!keeplocked) { /* unlock on exit */ lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); @@ -2715,22 +2866,25 @@ sbunlock(struct sockbuf *sb, boolean_t keeplocked) void sorwakeup(struct socket *so) { - if (sb_notify(&so->so_rcv)) + if (sb_notify(&so->so_rcv)) { sowakeup(so, &so->so_rcv); + } } void sowwakeup(struct socket *so) { - if (sb_notify(&so->so_snd)) + if (sb_notify(&so->so_snd)) { sowakeup(so, &so->so_snd); + } } void soevent(struct socket *so, long hint) { - if (so->so_flags & SOF_KNOTE) + if (so->so_flags & SOF_KNOTE) { KNOTE(&so->so_klist, hint); + } soevupcall(so, hint); @@ -2741,8 +2895,9 @@ soevent(struct socket *so, long hint) if ((hint & SO_FILT_HINT_IFDENIED) && !(so->so_flags & SOF_MP_SUBFLOW) && !(so->so_restrictions & SO_RESTRICT_DENY_CELLULAR) && - !(so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) + !(so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) { soevent_ifdenied(so); + } } void @@ -2752,8 +2907,9 @@ soevupcall(struct socket *so, u_int32_t hint) caddr_t so_eventarg = so->so_eventarg; hint &= so->so_eventmask; - if (hint != 0) + if (hint != 0) { so->so_event(so, so_eventarg, hint); + } } } @@ -2762,7 +2918,7 @@ soevent_ifdenied(struct socket *so) { struct kev_netpolicy_ifdenied ev_ifdenied; - bzero(&ev_ifdenied, sizeof (ev_ifdenied)); + bzero(&ev_ifdenied, sizeof(ev_ifdenied)); /* * The event consumer is interested about the effective {upid,pid,uuid} * info which can be different than the those related to the process @@ -2811,7 +2967,7 @@ soevent_ifdenied(struct socket *so) " [delegated]" : "")); } netpolicy_post_msg(KEV_NETPOLICY_IFDENIED, &ev_ifdenied.ev_data, - sizeof (ev_ifdenied)); + sizeof(ev_ifdenied)); } } @@ -2825,9 +2981,10 @@ dup_sockaddr(struct sockaddr *sa, int canwait) MALLOC(sa2, struct sockaddr *, sa->sa_len, M_SONAME, canwait ? M_WAITOK : M_NOWAIT); - if (sa2) + if (sa2) { bcopy(sa, sa2, sa->sa_len); - return (sa2); + } + return sa2; } /* @@ -2841,7 +2998,7 @@ dup_sockaddr(struct sockaddr *sa, int canwait) void sotoxsocket(struct socket *so, struct xsocket *xso) { - xso->xso_len = sizeof (*xso); + xso->xso_len = sizeof(*xso); xso->xso_so = (_XSOCKET_PTR(struct socket *))VM_KERNEL_ADDRPERM(so); xso->so_type = so->so_type; xso->so_options = (short)(so->so_options & 0xffff); @@ -2872,7 +3029,7 @@ sotoxsocket(struct socket *so, struct xsocket *xso) void sotoxsocket64(struct socket *so, struct xsocket64 *xso) { - xso->xso_len = sizeof (*xso); + xso->xso_len = sizeof(*xso); xso->xso_so = (u_int64_t)VM_KERNEL_ADDRPERM(so); xso->so_type = so->so_type; xso->so_options = (short)(so->so_options & 0xffff); @@ -2916,8 +3073,9 @@ sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) xsb->sb_flags = sb->sb_flags; xsb->sb_timeo = (short) (sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick; - if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) + if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) { xsb->sb_timeo = 1; + } } /* @@ -2927,45 +3085,47 @@ sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb) inline int soisthrottled(struct socket *so) { - return (so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND); + return so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND; } inline int soisprivilegedtraffic(struct socket *so) { - return ((so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS) ? 1 : 0); + return (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS) ? 1 : 0; } inline int soissrcbackground(struct socket *so) { - return ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND) || - IS_SO_TC_BACKGROUND(so->so_traffic_class)); + return (so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND) || + IS_SO_TC_BACKGROUND(so->so_traffic_class); } inline int soissrcrealtime(struct socket *so) { - return (so->so_traffic_class >= SO_TC_AV && - so->so_traffic_class <= SO_TC_VO); + return so->so_traffic_class >= SO_TC_AV && + so->so_traffic_class <= SO_TC_VO; } inline int soissrcbesteffort(struct socket *so) { - return (so->so_traffic_class == SO_TC_BE || - so->so_traffic_class == SO_TC_RD || - so->so_traffic_class == SO_TC_OAM); + return so->so_traffic_class == SO_TC_BE || + so->so_traffic_class == SO_TC_RD || + so->so_traffic_class == SO_TC_OAM; } void soclearfastopen(struct socket *so) { - if (so->so_flags1 & SOF1_PRECONNECT_DATA) + if (so->so_flags1 & SOF1_PRECONNECT_DATA) { so->so_flags1 &= ~SOF1_PRECONNECT_DATA; + } - if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) + if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) { so->so_flags1 &= ~SOF1_DATA_IDEMPOTENT; + } } void @@ -2979,7 +3139,7 @@ sonullevent(struct socket *so, void *arg, uint32_t hint) * branch of the MIB. */ SYSCTL_NODE(_kern, KERN_IPC, ipc, - CTLFLAG_RW|CTLFLAG_LOCKED|CTLFLAG_ANYBODY, 0, "IPC"); + CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "IPC"); /* Check that the maximum socket buffer size is within a range */ @@ -2989,7 +3149,7 @@ sysctl_sb_max SYSCTL_HANDLER_ARGS #pragma unused(oidp, arg1, arg2) u_int32_t new_value; int changed = 0; - int error = sysctl_io_number(req, sb_max, sizeof (u_int32_t), + int error = sysctl_io_number(req, sb_max, sizeof(u_int32_t), &new_value, &changed); if (!error && changed) { if (new_value > LOW_SB_MAX && new_value <= high_sb_max) { @@ -2998,28 +3158,28 @@ sysctl_sb_max SYSCTL_HANDLER_ARGS error = ERANGE; } } - return (error); + return error; } SYSCTL_PROC(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &sb_max, 0, &sysctl_sb_max, "IU", "Maximum socket buffer size"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &sb_max, 0, &sysctl_sb_max, "IU", "Maximum socket buffer size"); SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, - CTLFLAG_RW | CTLFLAG_LOCKED, &sb_efficiency, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &sb_efficiency, 0, ""); SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, - CTLFLAG_RD | CTLFLAG_LOCKED, &nmbclusters, 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &nmbclusters, 0, ""); SYSCTL_INT(_kern_ipc, OID_AUTO, njcl, - CTLFLAG_RD | CTLFLAG_LOCKED, &njcl, 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &njcl, 0, ""); SYSCTL_INT(_kern_ipc, OID_AUTO, njclbytes, - CTLFLAG_RD | CTLFLAG_LOCKED, &njclbytes, 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &njclbytes, 0, ""); SYSCTL_INT(_kern_ipc, KIPC_SOQLIMITCOMPAT, soqlimitcompat, - CTLFLAG_RW | CTLFLAG_LOCKED, &soqlimitcompat, 1, - "Enable socket queue limit compatibility"); + CTLFLAG_RW | CTLFLAG_LOCKED, &soqlimitcompat, 1, + "Enable socket queue limit compatibility"); /* * Hack alert -- rdar://33572856 @@ -3040,7 +3200,7 @@ sysctl_soqlencomp SYSCTL_HANDLER_ARGS #pragma unused(oidp, arg1, arg2) u_int32_t new_value; int changed = 0; - int error = sysctl_io_number(req, soqlencomp, sizeof (u_int32_t), + int error = sysctl_io_number(req, soqlencomp, sizeof(u_int32_t), &new_value, &changed); if (!error && changed) { soqlencomp = new_value; @@ -3049,28 +3209,28 @@ sysctl_soqlencomp SYSCTL_HANDLER_ARGS tcptv_persmin_val = 6 * TCP_RETRANSHZ; } } - return (error); + return error; } SYSCTL_PROC(_kern_ipc, OID_AUTO, soqlencomp, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &soqlencomp, 0, &sysctl_soqlencomp, "IU", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &soqlencomp, 0, &sysctl_soqlencomp, "IU", ""); SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt, CTLFLAG_RD | CTLFLAG_LOCKED, - &total_sbmb_cnt, 0, ""); + &total_sbmb_cnt, 0, ""); SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt_peak, CTLFLAG_RD | CTLFLAG_LOCKED, - &total_sbmb_cnt_peak, 0, ""); + &total_sbmb_cnt_peak, 0, ""); SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt_floor, CTLFLAG_RD | CTLFLAG_LOCKED, - &total_sbmb_cnt_floor, 0, ""); + &total_sbmb_cnt_floor, 0, ""); SYSCTL_QUAD(_kern_ipc, OID_AUTO, sbmb_limreached, CTLFLAG_RD | CTLFLAG_LOCKED, - &sbmb_limreached, ""); + &sbmb_limreached, ""); SYSCTL_NODE(_kern_ipc, OID_AUTO, io_policy, CTLFLAG_RW, 0, "network IO policy"); SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED, - &net_io_policy_log, 0, ""); + &net_io_policy_log, 0, ""); #if CONFIG_PROC_UUID_POLICY SYSCTL_INT(_kern_ipc_io_policy, OID_AUTO, uuid, CTLFLAG_RW | CTLFLAG_LOCKED, - &net_io_policy_uuid, 0, ""); + &net_io_policy_uuid, 0, ""); #endif /* CONFIG_PROC_UUID_POLICY */ diff --git a/bsd/kern/uipc_syscalls.c b/bsd/kern/uipc_syscalls.c index eb2c9fa6a..b89e2cc49 100644 --- a/bsd/kern/uipc_syscalls.c +++ b/bsd/kern/uipc_syscalls.c @@ -104,37 +104,37 @@ #include #endif /* MAC_SOCKET_SUBSET */ -#define f_flag f_fglob->fg_flag -#define f_type f_fglob->fg_ops->fo_type -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data - -#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) -#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) -#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1) -#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3) -#define DBG_FNC_SENDMSG NETDBG_CODE(DBG_NETSOCK, (1 << 8) | 1) -#define DBG_FNC_SENDTO NETDBG_CODE(DBG_NETSOCK, (2 << 8) | 1) -#define DBG_FNC_SENDIT NETDBG_CODE(DBG_NETSOCK, (3 << 8) | 1) -#define DBG_FNC_RECVFROM NETDBG_CODE(DBG_NETSOCK, (5 << 8)) -#define DBG_FNC_RECVMSG NETDBG_CODE(DBG_NETSOCK, (6 << 8)) -#define DBG_FNC_RECVIT NETDBG_CODE(DBG_NETSOCK, (7 << 8)) -#define DBG_FNC_SENDFILE NETDBG_CODE(DBG_NETSOCK, (10 << 8)) -#define DBG_FNC_SENDFILE_WAIT NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 1)) -#define DBG_FNC_SENDFILE_READ NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 2)) -#define DBG_FNC_SENDFILE_SEND NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 3)) -#define DBG_FNC_SENDMSG_X NETDBG_CODE(DBG_NETSOCK, (11 << 8)) -#define DBG_FNC_RECVMSG_X NETDBG_CODE(DBG_NETSOCK, (12 << 8)) +#define f_flag f_fglob->fg_flag +#define f_type f_fglob->fg_ops->fo_type +#define f_msgcount f_fglob->fg_msgcount +#define f_cred f_fglob->fg_cred +#define f_ops f_fglob->fg_ops +#define f_offset f_fglob->fg_offset +#define f_data f_fglob->fg_data + +#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETSOCK, 0) +#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETSOCK, 2) +#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETSOCK, 1) +#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETSOCK, 3) +#define DBG_FNC_SENDMSG NETDBG_CODE(DBG_NETSOCK, (1 << 8) | 1) +#define DBG_FNC_SENDTO NETDBG_CODE(DBG_NETSOCK, (2 << 8) | 1) +#define DBG_FNC_SENDIT NETDBG_CODE(DBG_NETSOCK, (3 << 8) | 1) +#define DBG_FNC_RECVFROM NETDBG_CODE(DBG_NETSOCK, (5 << 8)) +#define DBG_FNC_RECVMSG NETDBG_CODE(DBG_NETSOCK, (6 << 8)) +#define DBG_FNC_RECVIT NETDBG_CODE(DBG_NETSOCK, (7 << 8)) +#define DBG_FNC_SENDFILE NETDBG_CODE(DBG_NETSOCK, (10 << 8)) +#define DBG_FNC_SENDFILE_WAIT NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 1)) +#define DBG_FNC_SENDFILE_READ NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 2)) +#define DBG_FNC_SENDFILE_SEND NETDBG_CODE(DBG_NETSOCK, ((10 << 8) | 3)) +#define DBG_FNC_SENDMSG_X NETDBG_CODE(DBG_NETSOCK, (11 << 8)) +#define DBG_FNC_RECVMSG_X NETDBG_CODE(DBG_NETSOCK, (12 << 8)) #if DEBUG || DEVELOPMENT -#define DEBUG_KERNEL_ADDRPERM(_v) (_v) -#define DBG_PRINTF(...) printf(__VA_ARGS__) +#define DEBUG_KERNEL_ADDRPERM(_v) (_v) +#define DBG_PRINTF(...) printf(__VA_ARGS__) #else -#define DEBUG_KERNEL_ADDRPERM(_v) VM_KERNEL_ADDRPERM(_v) -#define DBG_PRINTF(...) do { } while (0) +#define DEBUG_KERNEL_ADDRPERM(_v) VM_KERNEL_ADDRPERM(_v) +#define DBG_PRINTF(...) do { } while (0) #endif /* TODO: should be in header file */ @@ -180,10 +180,10 @@ SYSCTL_DECL(_kern_ipc); static u_int somaxsendmsgx = 100; SYSCTL_UINT(_kern_ipc, OID_AUTO, maxsendmsgx, - CTLFLAG_RW | CTLFLAG_LOCKED, &somaxsendmsgx, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &somaxsendmsgx, 0, ""); static u_int somaxrecvmsgx = 100; SYSCTL_UINT(_kern_ipc, OID_AUTO, maxrecvmsgx, - CTLFLAG_RW | CTLFLAG_LOCKED, &somaxrecvmsgx, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &somaxrecvmsgx, 0, ""); /* * System call interface to the socket abstraction. @@ -206,30 +206,30 @@ extern const struct fileops socketops; */ int socket(struct proc *p, - struct socket_args *uap, - int32_t *retval) + struct socket_args *uap, + int32_t *retval) { - return (socket_common(p, uap->domain, uap->type, uap->protocol, - proc_selfpid(), retval, 0)); + return socket_common(p, uap->domain, uap->type, uap->protocol, + proc_selfpid(), retval, 0); } int socket_delegate(struct proc *p, - struct socket_delegate_args *uap, - int32_t *retval) + struct socket_delegate_args *uap, + int32_t *retval) { return socket_common(p, uap->domain, uap->type, uap->protocol, - uap->epid, retval, 1); + uap->epid, retval, 1); } static int socket_common(struct proc *p, - int domain, - int type, - int protocol, - pid_t epid, - int32_t *retval, - int delegate) + int domain, + int type, + int protocol, + pid_t epid, + int32_t *retval, + int delegate) { struct socket *so; struct fileproc *fp; @@ -238,28 +238,31 @@ socket_common(struct proc *p, AUDIT_ARG(socket, domain, type, protocol); #if CONFIG_MACF_SOCKET_SUBSET if ((error = mac_socket_check_create(kauth_cred_get(), domain, - type, protocol)) != 0) - return (error); + type, protocol)) != 0) { + return error; + } #endif /* MAC_SOCKET_SUBSET */ if (delegate) { error = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0); - if (error) - return (EACCES); + if (error) { + return EACCES; + } } error = falloc(p, &fp, &fd, vfs_context_current()); if (error) { - return (error); + return error; } - fp->f_flag = FREAD|FWRITE; + fp->f_flag = FREAD | FWRITE; fp->f_ops = &socketops; - if (delegate) + if (delegate) { error = socreate_delegate(domain, &so, type, protocol, epid); - else + } else { error = socreate(domain, &so, type, protocol); + } if (error) { fp_free(p, fd, fp); @@ -278,7 +281,7 @@ socket_common(struct proc *p, fd, 0, (int64_t)VM_KERNEL_ADDRPERM(so)); } } - return (error); + return error; } /* @@ -306,8 +309,9 @@ bind(__unused proc_t p, struct bind_args *uap, __unused int32_t *retval) AUDIT_ARG(fd, uap->s); error = file_socket(uap->s, &so); - if (error != 0) - return (error); + if (error != 0) { + return error; + } if (so == NULL) { error = EBADF; goto out; @@ -316,7 +320,7 @@ bind(__unused proc_t p, struct bind_args *uap, __unused int32_t *retval) error = EDESTADDRREQ; goto out; } - if (uap->namelen > sizeof (ss)) { + if (uap->namelen > sizeof(ss)) { error = getsockaddr(so, &sa, uap->name, uap->namelen, TRUE); } else { error = getsockaddr_s(so, &ss, uap->name, uap->namelen, TRUE); @@ -325,21 +329,24 @@ bind(__unused proc_t p, struct bind_args *uap, __unused int32_t *retval) want_free = FALSE; } } - if (error != 0) + if (error != 0) { goto out; + } AUDIT_ARG(sockaddr, vfs_context_cwd(vfs_context_current()), sa); #if CONFIG_MACF_SOCKET_SUBSET if ((sa != NULL && sa->sa_family == AF_SYSTEM) || - (error = mac_socket_check_bind(kauth_cred_get(), so, sa)) == 0) - error = sobindlock(so, sa, 1); /* will lock socket */ + (error = mac_socket_check_bind(kauth_cred_get(), so, sa)) == 0) { + error = sobindlock(so, sa, 1); /* will lock socket */ + } #else - error = sobindlock(so, sa, 1); /* will lock socket */ + error = sobindlock(so, sa, 1); /* will lock socket */ #endif /* MAC_SOCKET_SUBSET */ - if (want_free) + if (want_free) { FREE(sa, M_SONAME); + } out: file_drop(uap->s); - return (error); + return error; } /* @@ -361,23 +368,26 @@ listen(__unused struct proc *p, struct listen_args *uap, AUDIT_ARG(fd, uap->s); error = file_socket(uap->s, &so); - if (error) - return (error); + if (error) { + return error; + } if (so != NULL) #if CONFIG_MACF_SOCKET_SUBSET { error = mac_socket_check_listen(kauth_cred_get(), so); - if (error == 0) + if (error == 0) { error = solisten(so, uap->backlog); + } } #else - error = solisten(so, uap->backlog); + { error = solisten(so, uap->backlog);} #endif /* MAC_SOCKET_SUBSET */ - else + else { error = EBADF; + } file_drop(uap->s); - return (error); + return error; } /* @@ -409,7 +419,7 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, lck_mtx_t *mutex_held; int fd = uap->s; int newfd; - short fflag; /* type must match fp->f_flag */ + short fflag; /* type must match fp->f_flag */ int dosocklock = 0; *retval = -1; @@ -418,28 +428,31 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, if (uap->name) { error = copyin(uap->anamelen, (caddr_t)&namelen, - sizeof (socklen_t)); - if (error) - return (error); + sizeof(socklen_t)); + if (error) { + return error; + } } error = fp_getfsock(p, fd, &fp, &head); if (error) { - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = ENOTSOCK; - return (error); + } + return error; } if (head == NULL) { error = EBADF; goto out; } #if CONFIG_MACF_SOCKET_SUBSET - if ((error = mac_socket_check_accept(kauth_cred_get(), head)) != 0) + if ((error = mac_socket_check_accept(kauth_cred_get(), head)) != 0) { goto out; + } #endif /* MAC_SOCKET_SUBSET */ socket_lock(head, 1); - if (head->so_proto->pr_getlock != NULL) { + if (head->so_proto->pr_getlock != NULL) { mutex_held = (*head->so_proto->pr_getlock)(head, PR_F_WILLUNLOCK); dosocklock = 1; } else { @@ -468,14 +481,16 @@ check_again: head->so_error = ECONNABORTED; break; } - if (head->so_usecount < 1) + if (head->so_usecount < 1) { panic("accept: head=%p refcount=%d\n", head, head->so_usecount); + } error = msleep((caddr_t)&head->so_timeo, mutex_held, PSOCK | PCATCH, "accept", 0); - if (head->so_usecount < 1) + if (head->so_usecount < 1) { panic("accept: 2 head=%p refcount=%d\n", head, head->so_usecount); + } if ((head->so_state & SS_DRAINING)) { error = ECONNABORTED; } @@ -568,8 +583,9 @@ check_again: fp->f_data = (caddr_t)so; socket_lock(head, 0); - if (dosocklock) + if (dosocklock) { socket_lock(so, 1); + } /* Sync socket non-blocking/async state with file flags */ if (fp->f_flag & FNONBLOCK) { @@ -592,26 +608,28 @@ check_again: socket_unlock(head, 1); if (sa == NULL) { namelen = 0; - if (uap->name) + if (uap->name) { goto gotnoname; + } error = 0; goto releasefd; } AUDIT_ARG(sockaddr, vfs_context_cwd(vfs_context_current()), sa); if (uap->name) { - socklen_t sa_len; + socklen_t sa_len; /* save sa_len before it is destroyed */ sa_len = sa->sa_len; namelen = MIN(namelen, sa_len); error = copyout(sa, uap->name, namelen); - if (!error) + if (!error) { /* return the actual, untruncated address length */ namelen = sa_len; + } gotnoname: error = copyout((caddr_t)&namelen, uap->anamelen, - sizeof (socklen_t)); + sizeof(socklen_t)); } FREE(sa, M_SONAME); @@ -625,8 +643,9 @@ releasefd: SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL); } - if (dosocklock) + if (dosocklock) { socket_unlock(so, 1); + } proc_fdlock(p); procfdtbl_releasefd(p, newfd, NULL); @@ -640,15 +659,15 @@ out: KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_START, newfd, 0, (int64_t)VM_KERNEL_ADDRPERM(so)); } - return (error); + return error; } int accept(struct proc *p, struct accept_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (accept_nocancel(p, (struct accept_nocancel_args *)uap, - retval)); + return accept_nocancel(p, (struct accept_nocancel_args *)uap, + retval); } /* @@ -678,8 +697,8 @@ int connect(struct proc *p, struct connect_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (connect_nocancel(p, (struct connect_nocancel_args *)uap, - retval)); + return connect_nocancel(p, (struct connect_nocancel_args *)uap, + retval); } int @@ -695,8 +714,9 @@ connect_nocancel(proc_t p, struct connect_nocancel_args *uap, int32_t *retval) AUDIT_ARG(fd, uap->s); error = file_socket(fd, &so); - if (error != 0) - return (error); + if (error != 0) { + return error; + } if (so == NULL) { error = EBADF; goto out; @@ -709,25 +729,29 @@ connect_nocancel(proc_t p, struct connect_nocancel_args *uap, int32_t *retval) dgram = (so->so_type == SOCK_DGRAM); /* Get socket address now before we obtain socket lock */ - if (uap->namelen > sizeof (ss)) { + if (uap->namelen > sizeof(ss)) { error = getsockaddr(so, &sa, uap->name, uap->namelen, !dgram); } else { error = getsockaddr_s(so, &ss, uap->name, uap->namelen, !dgram); - if (error == 0) + if (error == 0) { sa = (struct sockaddr *)&ss; + } } - if (error != 0) + if (error != 0) { goto out; + } error = connectit(so, sa); - if (sa != NULL && sa != SA(&ss)) + if (sa != NULL && sa != SA(&ss)) { FREE(sa, M_SONAME); - if (error == ERESTART) + } + if (error == ERESTART) { error = EINTR; + } out: file_drop(fd); - return (error); + return error; } static int @@ -749,8 +773,9 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) AUDIT_ARG(fd, uap->socket); error = file_socket(fd, &so); - if (error != 0) - return (error); + if (error != 0) { + return error; + } if (so == NULL) { error = EBADF; goto out; @@ -763,8 +788,9 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) if (IS_64BIT_PROCESS(p)) { error = copyin(uap->endpoints, (caddr_t)&ep64, sizeof(ep64)); - if (error != 0) + if (error != 0) { goto out; + } ep.sae_srcif = ep64.sae_srcif; ep.sae_srcaddr = ep64.sae_srcaddr; @@ -773,8 +799,9 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) ep.sae_dstaddrlen = ep64.sae_dstaddrlen; } else { error = copyin(uap->endpoints, (caddr_t)&ep32, sizeof(ep32)); - if (error != 0) + if (error != 0) { goto out; + } ep.sae_srcif = ep32.sae_srcif; ep.sae_srcaddr = ep32.sae_srcaddr; @@ -791,16 +818,18 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) /* Get socket address now before we obtain socket lock */ if (ep.sae_srcaddr != USER_ADDR_NULL) { - if (ep.sae_srcaddrlen > sizeof (ss)) { + if (ep.sae_srcaddrlen > sizeof(ss)) { error = getsockaddr(so, &src, ep.sae_srcaddr, ep.sae_srcaddrlen, dgram); } else { error = getsockaddr_s(so, &ss, ep.sae_srcaddr, ep.sae_srcaddrlen, dgram); - if (error == 0) + if (error == 0) { src = (struct sockaddr *)&ss; + } } - if (error) + if (error) { goto out; + } } if (ep.sae_dstaddr == USER_ADDR_NULL) { @@ -809,27 +838,29 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) } /* Get socket address now before we obtain socket lock */ - if (ep.sae_dstaddrlen > sizeof (sd)) { + if (ep.sae_dstaddrlen > sizeof(sd)) { error = getsockaddr(so, &dst, ep.sae_dstaddr, ep.sae_dstaddrlen, dgram); } else { error = getsockaddr_s(so, &sd, ep.sae_dstaddr, ep.sae_dstaddrlen, dgram); - if (error == 0) + if (error == 0) { dst = (struct sockaddr *)&sd; + } } - if (error) + if (error) { goto out; + } VERIFY(dst != NULL); if (uap->iov != USER_ADDR_NULL) { /* Verify range before calling uio_create() */ - if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV){ + if (uap->iovcnt <= 0 || uap->iovcnt > UIO_MAXIOV) { error = EINVAL; goto out; } - if (uap->len == USER_ADDR_NULL){ + if (uap->len == USER_ADDR_NULL) { error = EINVAL; goto out; } @@ -854,10 +885,11 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) goto out; } error = copyin_user_iovec_array(uap->iov, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - uap->iovcnt, iovp); - if (error != 0) + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + uap->iovcnt, iovp); + if (error != 0) { goto out; + } /* finish setup of uio_t */ error = uio_calculateresid(auio); @@ -868,32 +900,37 @@ connectx_nocancel(struct proc *p, struct connectx_args *uap, int *retval) error = connectitx(so, src, dst, p, ep.sae_srcif, uap->associd, &cid, auio, uap->flags, &bytes_written); - if (error == ERESTART) + if (error == ERESTART) { error = EINTR; + } if (uap->len != USER_ADDR_NULL) { - error1 = copyout(&bytes_written, uap->len, sizeof (uap->len)); + error1 = copyout(&bytes_written, uap->len, sizeof(uap->len)); /* give precedence to connectitx errors */ - if ((error1 != 0) && (error == 0)) + if ((error1 != 0) && (error == 0)) { error = error1; + } } if (uap->connid != USER_ADDR_NULL) { - error1 = copyout(&cid, uap->connid, sizeof (cid)); + error1 = copyout(&cid, uap->connid, sizeof(cid)); /* give precedence to connectitx errors */ - if ((error1 != 0) && (error == 0)) + if ((error1 != 0) && (error == 0)) { error = error1; + } } out: file_drop(fd); if (auio != NULL) { uio_free(auio); } - if (src != NULL && src != SA(&ss)) + if (src != NULL && src != SA(&ss)) { FREE(src, M_SONAME); - if (dst != NULL && dst != SA(&sd)) + } + if (dst != NULL && dst != SA(&sd)) { FREE(dst, M_SONAME); - return (error); + } + return error; } int @@ -904,7 +941,7 @@ connectx(struct proc *p, struct connectx_args *uap, int *retval) * an unofficial cancellation point. */ __pthread_testcancel(1); - return (connectx_nocancel(p, uap, retval)); + return connectx_nocancel(p, uap, retval); } static int @@ -914,8 +951,9 @@ connectit(struct socket *so, struct sockaddr *sa) AUDIT_ARG(sockaddr, vfs_context_cwd(vfs_context_current()), sa); #if CONFIG_MACF_SOCKET_SUBSET - if ((error = mac_socket_check_connect(kauth_cred_get(), so, sa)) != 0) - return (error); + if ((error = mac_socket_check_connect(kauth_cred_get(), so, sa)) != 0) { + return error; + } #endif /* MAC_SOCKET_SUBSET */ socket_lock(so, 1); @@ -935,17 +973,19 @@ connectit(struct socket *so, struct sockaddr *sa) while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } error = msleep((caddr_t)&so->so_timeo, mutex_held, PSOCK | PCATCH, __func__, 0); if (so->so_state & SS_DRAINING) { error = ECONNABORTED; } - if (error != 0) + if (error != 0) { break; + } } if (error == 0) { error = so->so_error; @@ -953,7 +993,7 @@ connectit(struct socket *so, struct sockaddr *sa) } out: socket_unlock(so, 1); - return (error); + return error; } static int @@ -969,8 +1009,9 @@ connectitx(struct socket *so, struct sockaddr *src, AUDIT_ARG(sockaddr, vfs_context_cwd(vfs_context_current()), dst); #if CONFIG_MACF_SOCKET_SUBSET - if ((error = mac_socket_check_connect(kauth_cred_get(), so, dst)) != 0) - return (error); + if ((error = mac_socket_check_connect(kauth_cred_get(), so, dst)) != 0) { + return error; + } #endif /* MAC_SOCKET_SUBSET */ socket_lock(so, 1); @@ -983,8 +1024,9 @@ connectitx(struct socket *so, struct sockaddr *src, (flags & CONNECT_DATA_IDEMPOTENT)) { so->so_flags1 |= SOF1_DATA_IDEMPOTENT; - if (flags & CONNECT_DATA_AUTHENTICATED) + if (flags & CONNECT_DATA_AUTHENTICATED) { so->so_flags1 |= SOF1_DATA_AUTHENTICATED; + } } /* @@ -996,8 +1038,9 @@ connectitx(struct socket *so, struct sockaddr *src, * Case 4: CONNECT_RESUME_ON_READ_WRITE not set, no data (regular case) */ if ((so->so_proto->pr_flags & PR_PRECONN_WRITE) && - ((flags & CONNECT_RESUME_ON_READ_WRITE) || auio)) + ((flags & CONNECT_RESUME_ON_READ_WRITE) || auio)) { so->so_flags1 |= SOF1_PRECONNECT_DATA; + } /* * If a user sets data idempotent and does not pass an uio, or @@ -1033,17 +1076,19 @@ connectitx(struct socket *so, struct sockaddr *src, while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { lck_mtx_t *mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } error = msleep((caddr_t)&so->so_timeo, mutex_held, PSOCK | PCATCH, __func__, 0); if (so->so_state & SS_DRAINING) { error = ECONNABORTED; } - if (error != 0) + if (error != 0) { break; + } } if (error == 0) { error = so->so_error; @@ -1051,7 +1096,7 @@ connectitx(struct socket *so, struct sockaddr *src, } out: socket_unlock(so, 1); - return (error); + return error; } int @@ -1063,7 +1108,7 @@ peeloff(struct proc *p, struct peeloff_args *uap, int *retval) * an unofficial cancellation point. */ __pthread_testcancel(1); - return (0); + return 0; } int @@ -1074,7 +1119,7 @@ disconnectx(struct proc *p, struct disconnectx_args *uap, int *retval) * an unofficial cancellation point. */ __pthread_testcancel(1); - return (disconnectx_nocancel(p, uap, retval)); + return disconnectx_nocancel(p, uap, retval); } static int @@ -1086,8 +1131,9 @@ disconnectx_nocancel(struct proc *p, struct disconnectx_args *uap, int *retval) int error; error = file_socket(fd, &so); - if (error != 0) - return (error); + if (error != 0) { + return error; + } if (so == NULL) { error = EBADF; goto out; @@ -1096,7 +1142,7 @@ disconnectx_nocancel(struct proc *p, struct disconnectx_args *uap, int *retval) error = sodisconnectx(so, uap->aid, uap->cid); out: file_drop(fd); - return (error); + return error; } /* @@ -1126,17 +1172,19 @@ socketpair(struct proc *p, struct socketpair_args *uap, AUDIT_ARG(socket, uap->domain, uap->type, uap->protocol); error = socreate(uap->domain, &so1, uap->type, uap->protocol); - if (error) - return (error); + if (error) { + return error; + } error = socreate(uap->domain, &so2, uap->type, uap->protocol); - if (error) + if (error) { goto free1; + } error = falloc(p, &fp1, &fd, vfs_context_current()); if (error) { goto free2; } - fp1->f_flag = FREAD|FWRITE; + fp1->f_flag = FREAD | FWRITE; fp1->f_ops = &socketops; fp1->f_data = (caddr_t)so1; sv[0] = fd; @@ -1145,7 +1193,7 @@ socketpair(struct proc *p, struct socketpair_args *uap, if (error) { goto free3; } - fp2->f_flag = FREAD|FWRITE; + fp2->f_flag = FREAD | FWRITE; fp2->f_ops = &socketops; fp2->f_data = (caddr_t)so2; sv[1] = fd; @@ -1164,8 +1212,9 @@ socketpair(struct proc *p, struct socketpair_args *uap, } } - if ((error = copyout(sv, uap->rsv, 2 * sizeof (int))) != 0) + if ((error = copyout(sv, uap->rsv, 2 * sizeof(int))) != 0) { goto free4; + } proc_fdlock(p); procfdtbl_releasefd(p, sv[0], NULL); @@ -1174,7 +1223,7 @@ socketpair(struct proc *p, struct socketpair_args *uap, fp_drop(p, sv[1], fp2, 1); proc_fdunlock(p); - return (0); + return 0; free4: fp_free(p, sv[1], fp2); free3: @@ -1183,7 +1232,7 @@ free2: (void) soclose(so2); free1: (void) soclose(so1); - return (error); + return error; } /* @@ -1240,7 +1289,7 @@ sendit(struct proc *p, struct socket *so, struct user_msghdr *mp, uio_t uiop, KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_START, 0, 0, 0, 0, 0); if (mp->msg_name != USER_ADDR_NULL) { - if (mp->msg_namelen > sizeof (ss)) { + if (mp->msg_namelen > sizeof(ss)) { error = getsockaddr(so, &to, mp->msg_name, mp->msg_namelen, TRUE); } else { @@ -1251,19 +1300,21 @@ sendit(struct proc *p, struct socket *so, struct user_msghdr *mp, uio_t uiop, want_free = FALSE; } } - if (error != 0) + if (error != 0) { goto out; + } AUDIT_ARG(sockaddr, vfs_context_cwd(vfs_context_current()), to); } if (mp->msg_control != USER_ADDR_NULL) { - if (mp->msg_controllen < sizeof (struct cmsghdr)) { + if (mp->msg_controllen < sizeof(struct cmsghdr)) { error = EINVAL; goto bad; } error = sockargs(&control, mp->msg_control, mp->msg_controllen, MT_CONTROL); - if (error != 0) + if (error != 0) { goto bad; + } } #if CONFIG_MACF_SOCKET_SUBSET @@ -1274,30 +1325,35 @@ sendit(struct proc *p, struct socket *so, struct user_msghdr *mp, uio_t uiop, */ if (to != NULL && !(so->so_state & SS_DEFUNCT) && - (error = mac_socket_check_send(kauth_cred_get(), so, to)) != 0) + (error = mac_socket_check_send(kauth_cred_get(), so, to)) != 0) { goto bad; + } #endif /* MAC_SOCKET_SUBSET */ len = uio_resid(uiop); error = so->so_proto->pr_usrreqs->pru_sosend(so, to, uiop, 0, - control, flags); + control, flags); if (error != 0) { if (uio_resid(uiop) != len && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) + error == EINTR || error == EWOULDBLOCK)) { error = 0; + } /* Generation of SIGPIPE can be controlled per socket */ - if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) + if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) { psignal(p, SIGPIPE); + } } - if (error == 0) + if (error == 0) { *retval = (int)(len - uio_resid(uiop)); + } bad: - if (to != NULL && want_free) + if (to != NULL && want_free) { FREE(to, M_SONAME); + } out: KERNEL_DEBUG(DBG_FNC_SENDIT | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } /* @@ -1310,13 +1366,13 @@ int sendto(struct proc *p, struct sendto_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (sendto_nocancel(p, (struct sendto_nocancel_args *)uap, retval)); + return sendto_nocancel(p, (struct sendto_nocancel_args *)uap, retval); } int sendto_nocancel(struct proc *p, - struct sendto_nocancel_args *uap, - int32_t *retval) + struct sendto_nocancel_args *uap, + int32_t *retval) { struct user_msghdr msg; int error; @@ -1344,8 +1400,9 @@ sendto_nocancel(struct proc *p, msg.msg_flags = 0; error = file_socket(uap->s, &so); - if (error) + if (error) { goto done; + } if (so == NULL) { error = EBADF; @@ -1355,12 +1412,13 @@ sendto_nocancel(struct proc *p, file_drop(uap->s); done: - if (auio != NULL) + if (auio != NULL) { uio_free(auio); + } KERNEL_DEBUG(DBG_FNC_SENDTO | DBG_FUNC_END, error, *retval, 0, 0, 0); - return (error); + return error; } /* @@ -1373,8 +1431,8 @@ int sendmsg(struct proc *p, struct sendmsg_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (sendmsg_nocancel(p, (struct sendmsg_nocancel_args *)uap, - retval)); + return sendmsg_nocancel(p, (struct sendmsg_nocancel_args *)uap, + retval); } int @@ -1385,7 +1443,7 @@ sendmsg_nocancel(struct proc *p, struct sendmsg_nocancel_args *uap, struct user64_msghdr msg64; struct user_msghdr user_msg; caddr_t msghdrp; - int size_of_msghdr; + int size_of_msghdr; int error; uio_t auio = NULL; struct user_iovec *iovp; @@ -1395,15 +1453,15 @@ sendmsg_nocancel(struct proc *p, struct sendmsg_nocancel_args *uap, AUDIT_ARG(fd, uap->s); if (IS_64BIT_PROCESS(p)) { msghdrp = (caddr_t)&msg64; - size_of_msghdr = sizeof (msg64); + size_of_msghdr = sizeof(msg64); } else { msghdrp = (caddr_t)&msg32; - size_of_msghdr = sizeof (msg32); + size_of_msghdr = sizeof(msg32); } error = copyin(uap->msg, msghdrp, size_of_msghdr); if (error) { KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } if (IS_64BIT_PROCESS(p)) { @@ -1427,7 +1485,7 @@ sendmsg_nocancel(struct proc *p, struct sendmsg_nocancel_args *uap, if (user_msg.msg_iovlen <= 0 || user_msg.msg_iovlen > UIO_MAXIOV) { KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, EMSGSIZE, 0, 0, 0, 0); - return (EMSGSIZE); + return EMSGSIZE; } /* allocate a uio large enough to hold the number of iovecs passed */ @@ -1450,10 +1508,11 @@ sendmsg_nocancel(struct proc *p, struct sendmsg_nocancel_args *uap, goto done; } error = copyin_user_iovec_array(user_msg.msg_iov, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - user_msg.msg_iovlen, iovp); - if (error) + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + user_msg.msg_iovlen, iovp); + if (error) { goto done; + } user_msg.msg_iov = CAST_USER_ADDR_T(iovp); /* finish setup of uio_t */ @@ -1484,7 +1543,7 @@ done: } KERNEL_DEBUG(DBG_FNC_SENDMSG | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } int @@ -1525,18 +1584,19 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) /* * Clip to max currently allowed */ - if (uap->cnt > somaxsendmsgx) + if (uap->cnt > somaxsendmsgx) { uap->cnt = somaxsendmsgx; + } user_msg_x = _MALLOC(uap->cnt * sizeof(struct user_msghdr_x), - M_TEMP, M_WAITOK | M_ZERO); + M_TEMP, M_WAITOK | M_ZERO); if (user_msg_x == NULL) { DBG_PRINTF("%s _MALLOC() user_msg_x failed\n", __func__); error = ENOMEM; goto out; } uiop = _MALLOC(uap->cnt * sizeof(struct uio *), - M_TEMP, M_WAITOK | M_ZERO); + M_TEMP, M_WAITOK | M_ZERO); if (uiop == NULL) { DBG_PRINTF("%s _MALLOC() uiop failed\n", __func__); error = ENOMEM; @@ -1544,10 +1604,10 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) } size_of_msghdr = IS_64BIT_PROCESS(p) ? - sizeof(struct user64_msghdr_x) : sizeof(struct user32_msghdr_x); + sizeof(struct user64_msghdr_x) : sizeof(struct user32_msghdr_x); umsgp = _MALLOC(uap->cnt * size_of_msghdr, - M_TEMP, M_WAITOK | M_ZERO); + M_TEMP, M_WAITOK | M_ZERO); if (umsgp == NULL) { printf("%s _MALLOC() user_msg_x failed\n", __func__); error = ENOMEM; @@ -1559,8 +1619,8 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) goto out; } error = internalize_user_msghdr_array(umsgp, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - UIO_WRITE, uap->cnt, user_msg_x, uiop); + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + UIO_WRITE, uap->cnt, user_msg_x, uiop); if (error) { DBG_PRINTF("%s copyin_user_msghdr_array() failed\n", __func__); goto out; @@ -1590,12 +1650,14 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) /* * No support for address or ancillary data (yet) */ - if (mp->msg_name != USER_ADDR_NULL || mp->msg_namelen != 0) + if (mp->msg_name != USER_ADDR_NULL || mp->msg_namelen != 0) { has_addr_or_ctl = 1; + } if (mp->msg_control != USER_ADDR_NULL || - mp->msg_controllen != 0) + mp->msg_controllen != 0) { has_addr_or_ctl = 1; + } #if CONFIG_MACF_SOCKET_SUBSET /* @@ -1611,8 +1673,9 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) if (to != NULL && !(so->so_state & SS_DEFUNCT) && (error = mac_socket_check_send(kauth_cred_get(), so, to)) - != 0) + != 0) { goto out; + } #endif /* MAC_SOCKET_SUBSET */ } @@ -1644,8 +1707,9 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) error = sendit(p, so, &user_msg, auio, uap->flags, &tmpval); - if (error != 0) + if (error != 0) { break; + } } } len_after = uio_array_resid(uiop, uap->cnt); @@ -1655,11 +1719,13 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) if (error != 0) { if (len_after != len_before && (error == ERESTART || error == EINTR || error == EWOULDBLOCK || - error == ENOBUFS)) + error == ENOBUFS)) { error = 0; + } /* Generation of SIGPIPE can be controlled per socket */ - if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) + if (error == EPIPE && !(so->so_flags & SOF_NOSIGPIPE)) { psignal(p, SIGPIPE); + } } if (error == 0) { uiocnt = externalize_user_msghdr_array(umsgp, @@ -1669,20 +1735,23 @@ sendmsg_x(struct proc *p, struct sendmsg_x_args *uap, user_ssize_t *retval) *retval = (int)(uiocnt); } out: - if (need_drop) + if (need_drop) { file_drop(uap->s); - if (umsgp != NULL) + } + if (umsgp != NULL) { _FREE(umsgp, M_TEMP); + } if (uiop != NULL) { free_uio_array(uiop, uap->cnt); _FREE(uiop, M_TEMP); } - if (user_msg_x != NULL) + if (user_msg_x != NULL) { _FREE(user_msg_x, M_TEMP); + } KERNEL_DEBUG(DBG_FNC_SENDMSG_X | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } @@ -1698,17 +1767,18 @@ copyout_sa(struct sockaddr *fromsa, user_addr_t name, socklen_t *namelen) len = 0; } else { #ifndef MIN -#define MIN(a, b) ((a) > (b) ? (b) : (a)) +#define MIN(a, b) ((a) > (b) ? (b) : (a)) #endif sa_len = fromsa->sa_len; len = MIN((unsigned int)len, sa_len); error = copyout(fromsa, name, (unsigned)len); - if (error) + if (error) { goto out; + } } *namelen = sa_len; out: - return (0); + return 0; } static int @@ -1767,8 +1837,9 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, tocopy = len; } error = copyout(tmp_buffer, ctlbuf, tocopy); - if (error) + if (error) { goto out; + } } else { if (cp_size > buflen) { panic("cp_size > buflen, something" @@ -1781,8 +1852,9 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, tocopy = len; } error = copyout((caddr_t) cp, ctlbuf, tocopy); - if (error) + if (error) { goto out; + } } ctlbuf += tocopy; @@ -1798,7 +1870,7 @@ copyout_control(struct proc *p, struct mbuf *m, user_addr_t control, } *controllen = ctlbuf - control; out: - return (error); + return error; } /* @@ -1839,19 +1911,19 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, if ((error = fp_lookup(p, s, &fp, 1))) { KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, error, 0, 0, 0, 0); proc_fdunlock(p); - return (error); + return error; } if (fp->f_type != DTYPE_SOCKET) { fp_drop(p, s, fp, 1); proc_fdunlock(p); - return (ENOTSOCK); + return ENOTSOCK; } so = (struct socket *)fp->f_data; if (so == NULL) { fp_drop(p, s, fp, 1); proc_fdunlock(p); - return (EBADF); + return EBADF; } proc_fdunlock(p); @@ -1865,8 +1937,9 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, if (!(so->so_state & SS_DEFUNCT) && !(so->so_state & SS_ISCONNECTED) && !(so->so_proto->pr_flags & PR_CONNREQUIRED) && - (error = mac_socket_check_receive(kauth_cred_get(), so)) != 0) + (error = mac_socket_check_receive(kauth_cred_get(), so)) != 0) { goto out1; + } #endif /* MAC_SOCKET_SUBSET */ if (uio_resid(uiop) < 0) { KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, EINVAL, 0, 0, 0, 0); @@ -1878,27 +1951,31 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, error = so->so_proto->pr_usrreqs->pru_soreceive(so, &fromsa, uiop, (struct mbuf **)0, mp->msg_control ? &control : (struct mbuf **)0, &mp->msg_flags); - if (fromsa) + if (fromsa) { AUDIT_ARG(sockaddr, vfs_context_cwd(vfs_context_current()), fromsa); + } if (error) { if (uio_resid(uiop) != len && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) + error == EINTR || error == EWOULDBLOCK)) { error = 0; + } } - if (error) + if (error) { goto out; + } *retval = len - uio_resid(uiop); if (mp->msg_name) { error = copyout_sa(fromsa, mp->msg_name, &mp->msg_namelen); - if (error) + if (error) { goto out; + } /* return the actual, untruncated address length */ if (namelenp && (error = copyout((caddr_t)&mp->msg_namelen, namelenp, - sizeof (int)))) { + sizeof(int)))) { goto out; } } @@ -1908,14 +1985,16 @@ recvit(struct proc *p, int s, struct user_msghdr *mp, uio_t uiop, &mp->msg_controllen, &mp->msg_flags); } out: - if (fromsa) + if (fromsa) { FREE(fromsa, M_SONAME); - if (control) + } + if (control) { m_freem(control); + } KERNEL_DEBUG(DBG_FNC_RECVIT | DBG_FUNC_END, error, 0, 0, 0, 0); out1: fp_drop(p, s, fp, 0); - return (error); + return error; } /* @@ -1937,8 +2016,8 @@ int recvfrom(struct proc *p, struct recvfrom_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (recvfrom_nocancel(p, (struct recvfrom_nocancel_args *)uap, - retval)); + return recvfrom_nocancel(p, (struct recvfrom_nocancel_args *)uap, + retval); } int @@ -1954,9 +2033,10 @@ recvfrom_nocancel(struct proc *p, struct recvfrom_nocancel_args *uap, if (uap->fromlenaddr) { error = copyin(uap->fromlenaddr, - (caddr_t)&msg.msg_namelen, sizeof (msg.msg_namelen)); - if (error) - return (error); + (caddr_t)&msg.msg_namelen, sizeof(msg.msg_namelen)); + if (error) { + return error; + } } else { msg.msg_namelen = 0; } @@ -1965,7 +2045,7 @@ recvfrom_nocancel(struct proc *p, struct recvfrom_nocancel_args *uap, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), UIO_READ); if (auio == NULL) { - return (ENOMEM); + return ENOMEM; } uio_addiov(auio, uap->buf, uap->len); @@ -1982,7 +2062,7 @@ recvfrom_nocancel(struct proc *p, struct recvfrom_nocancel_args *uap, KERNEL_DEBUG(DBG_FNC_RECVFROM | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } /* @@ -2000,8 +2080,8 @@ int recvmsg(struct proc *p, struct recvmsg_args *uap, int32_t *retval) { __pthread_testcancel(1); - return (recvmsg_nocancel(p, (struct recvmsg_nocancel_args *)uap, - retval)); + return recvmsg_nocancel(p, (struct recvmsg_nocancel_args *)uap, + retval); } int @@ -2012,7 +2092,7 @@ recvmsg_nocancel(struct proc *p, struct recvmsg_nocancel_args *uap, struct user64_msghdr msg64; struct user_msghdr user_msg; caddr_t msghdrp; - int size_of_msghdr; + int size_of_msghdr; user_addr_t uiov; int error; uio_t auio = NULL; @@ -2022,15 +2102,15 @@ recvmsg_nocancel(struct proc *p, struct recvmsg_nocancel_args *uap, AUDIT_ARG(fd, uap->s); if (IS_64BIT_PROCESS(p)) { msghdrp = (caddr_t)&msg64; - size_of_msghdr = sizeof (msg64); + size_of_msghdr = sizeof(msg64); } else { msghdrp = (caddr_t)&msg32; - size_of_msghdr = sizeof (msg32); + size_of_msghdr = sizeof(msg32); } error = copyin(uap->msg, msghdrp, size_of_msghdr); if (error) { KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } /* only need to copy if user process is not 64-bit */ @@ -2055,7 +2135,7 @@ recvmsg_nocancel(struct proc *p, struct recvmsg_nocancel_args *uap, if (user_msg.msg_iovlen <= 0 || user_msg.msg_iovlen > UIO_MAXIOV) { KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_END, EMSGSIZE, 0, 0, 0, 0); - return (EMSGSIZE); + return EMSGSIZE; } user_msg.msg_flags = uap->flags; @@ -2081,10 +2161,11 @@ recvmsg_nocancel(struct proc *p, struct recvmsg_nocancel_args *uap, uiov = user_msg.msg_iov; user_msg.msg_iov = CAST_USER_ADDR_T(iovp); error = copyin_user_iovec_array(uiov, - IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, - user_msg.msg_iovlen, iovp); - if (error) + IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32, + user_msg.msg_iovlen, iovp); + if (error) { goto done; + } /* finish setup of uio_t */ error = uio_calculateresid(auio); @@ -2119,7 +2200,7 @@ done: uio_free(auio); } KERNEL_DEBUG(DBG_FNC_RECVMSG | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } int @@ -2154,8 +2235,9 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) error = EINVAL; goto out; } - if (uap->cnt > somaxrecvmsgx) + if (uap->cnt > somaxrecvmsgx) { uap->cnt = somaxrecvmsgx; + } user_msg_x = _MALLOC(uap->cnt * sizeof(struct user_msghdr_x), M_TEMP, M_WAITOK | M_ZERO); @@ -2219,8 +2301,9 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) if (!(so->so_state & SS_DEFUNCT) && !(so->so_state & SS_ISCONNECTED) && !(so->so_proto->pr_flags & PR_CONNREQUIRED) && - (error = mac_socket_check_receive(kauth_cred_get(), so)) != 0) + (error = mac_socket_check_receive(kauth_cred_get(), so)) != 0) { goto out; + } #endif /* MAC_SOCKET_SUBSET */ len_before = recv_msg_array_resid(recv_msg_array, uap->cnt); @@ -2245,8 +2328,9 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) /* * Do not block if we got at least one packet */ - if (i > 0) + if (i > 0) { flags |= MSG_DONTWAIT; + } psa = (recv_msg_elem->which & SOCK_MSG_SA) ? &recv_msg_elem->psa : NULL; @@ -2255,8 +2339,9 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) error = so->so_proto->pr_usrreqs->pru_soreceive(so, psa, auio, (struct mbuf **)0, controlp, &flags); - if (error) + if (error) { break; + } /* * We have some data */ @@ -2264,11 +2349,13 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) /* * Stop on partial copy */ - if (flags & (MSG_RCVMORE | MSG_TRUNC)) + if (flags & (MSG_RCVMORE | MSG_TRUNC)) { break; + } } - if ((uap->flags & MSG_DONTWAIT) == 0) + if ((uap->flags & MSG_DONTWAIT) == 0) { flags &= ~MSG_DONTWAIT; + } uap->flags = flags; } @@ -2276,10 +2363,11 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) if (error) { if (len_after != len_before && (error == ERESTART || - error == EINTR || error == EWOULDBLOCK)) + error == EINTR || error == EWOULDBLOCK)) { error = 0; - else + } else { goto out; + } } uiocnt = externalize_recv_msghdr_array(umsgp, @@ -2301,30 +2389,36 @@ recvmsg_x(struct proc *p, struct recvmsg_x_args *uap, user_ssize_t *retval) if (mp->msg_name) { error = copyout_sa(fromsa, mp->msg_name, &mp->msg_namelen); - if (error) + if (error) { goto out; + } } if (mp->msg_control) { error = copyout_control(p, recv_msg_elem->controlp, mp->msg_control, &mp->msg_controllen, &mp->msg_flags); - if (error) + if (error) { goto out; + } } } out: - if (need_drop) + if (need_drop) { file_drop(uap->s); - if (umsgp != NULL) + } + if (umsgp != NULL) { _FREE(umsgp, M_TEMP); - if (recv_msg_array != NULL) + } + if (recv_msg_array != NULL) { free_recv_msg_array(recv_msg_array, uap->cnt); - if (user_msg_x != NULL) + } + if (user_msg_x != NULL) { _FREE(user_msg_x, M_TEMP); + } KERNEL_DEBUG(DBG_FNC_RECVMSG_X | DBG_FUNC_END, error, 0, 0, 0, 0); - return (error); + return error; } /* @@ -2357,8 +2451,9 @@ shutdown(__unused struct proc *p, struct shutdown_args *uap, AUDIT_ARG(fd, uap->s); error = file_socket(uap->s, &so); - if (error) - return (error); + if (error) { + return error; + } if (so == NULL) { error = EBADF; goto out; @@ -2366,7 +2461,7 @@ shutdown(__unused struct proc *p, struct shutdown_args *uap, error = soshutdown((struct socket *)so, uap->how); out: file_drop(uap->s); - return (error); + return error; } /* @@ -2394,13 +2489,15 @@ setsockopt(struct proc *p, struct setsockopt_args *uap, int error; AUDIT_ARG(fd, uap->s); - if (uap->val == 0 && uap->valsize != 0) - return (EFAULT); + if (uap->val == 0 && uap->valsize != 0) { + return EFAULT; + } /* No bounds checking on size (it's unsigned) */ error = file_socket(uap->s, &so); - if (error) - return (error); + if (error) { + return error; + } sopt.sopt_dir = SOPT_SET; sopt.sopt_level = uap->level; @@ -2415,13 +2512,14 @@ setsockopt(struct proc *p, struct setsockopt_args *uap, } #if CONFIG_MACF_SOCKET_SUBSET if ((error = mac_socket_check_setsockopt(kauth_cred_get(), so, - &sopt)) != 0) + &sopt)) != 0) { goto out; + } #endif /* MAC_SOCKET_SUBSET */ - error = sosetoptlock(so, &sopt, 1); /* will lock socket */ + error = sosetoptlock(so, &sopt, 1); /* will lock socket */ out: file_drop(uap->s); - return (error); + return error; } @@ -2441,19 +2539,21 @@ int getsockopt(struct proc *p, struct getsockopt_args *uap, __unused int32_t *retval) { - int error; - socklen_t valsize; - struct sockopt sopt; + int error; + socklen_t valsize; + struct sockopt sopt; struct socket *so; error = file_socket(uap->s, &so); - if (error) - return (error); + if (error) { + return error; + } if (uap->val) { error = copyin(uap->avalsize, (caddr_t)&valsize, - sizeof (valsize)); - if (error) + sizeof(valsize)); + if (error) { goto out; + } /* No bounds checking on size (it's unsigned) */ } else { valsize = 0; @@ -2471,18 +2571,19 @@ getsockopt(struct proc *p, struct getsockopt_args *uap, } #if CONFIG_MACF_SOCKET_SUBSET if ((error = mac_socket_check_getsockopt(kauth_cred_get(), so, - &sopt)) != 0) + &sopt)) != 0) { goto out; + } #endif /* MAC_SOCKET_SUBSET */ - error = sogetoptlock((struct socket *)so, &sopt, 1); /* will lock */ + error = sogetoptlock((struct socket *)so, &sopt, 1); /* will lock */ if (error == 0) { valsize = sopt.sopt_valsize; error = copyout((caddr_t)&valsize, uap->avalsize, - sizeof (valsize)); + sizeof(valsize)); } out: file_drop(uap->s); - return (error); + return error; } @@ -2512,11 +2613,13 @@ getsockname(__unused struct proc *p, struct getsockname_args *uap, int error; error = file_socket(uap->fdes, &so); - if (error) - return (error); - error = copyin(uap->alen, (caddr_t)&len, sizeof (socklen_t)); - if (error) + if (error) { + return error; + } + error = copyin(uap->alen, (caddr_t)&len, sizeof(socklen_t)); + if (error) { goto out; + } if (so == NULL) { error = EBADF; goto out; @@ -2526,12 +2629,14 @@ getsockname(__unused struct proc *p, struct getsockname_args *uap, error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, &sa); if (error == 0) { error = sflt_getsockname(so, &sa); - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } } socket_unlock(so, 1); - if (error) + if (error) { goto bad; + } if (sa == 0) { len = 0; goto gotnothing; @@ -2540,18 +2645,20 @@ getsockname(__unused struct proc *p, struct getsockname_args *uap, sa_len = sa->sa_len; len = MIN(len, sa_len); error = copyout((caddr_t)sa, uap->asa, len); - if (error) + if (error) { goto bad; + } /* return the actual, untruncated address length */ len = sa_len; gotnothing: - error = copyout((caddr_t)&len, uap->alen, sizeof (socklen_t)); + error = copyout((caddr_t)&len, uap->alen, sizeof(socklen_t)); bad: - if (sa) + if (sa) { FREE(sa, M_SONAME); + } out: file_drop(uap->fdes); - return (error); + return error; } /* @@ -2580,8 +2687,9 @@ getpeername(__unused struct proc *p, struct getpeername_args *uap, int error; error = file_socket(uap->fdes, &so); - if (error) - return (error); + if (error) { + return error; + } if (so == NULL) { error = EBADF; goto out; @@ -2597,12 +2705,12 @@ getpeername(__unused struct proc *p, struct getpeername_args *uap, goto out; } - if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) { + if ((so->so_state & (SS_ISCONNECTED | SS_ISCONFIRMING)) == 0) { socket_unlock(so, 1); error = ENOTCONN; goto out; } - error = copyin(uap->alen, (caddr_t)&len, sizeof (socklen_t)); + error = copyin(uap->alen, (caddr_t)&len, sizeof(socklen_t)); if (error) { socket_unlock(so, 1); goto out; @@ -2611,12 +2719,14 @@ getpeername(__unused struct proc *p, struct getpeername_args *uap, error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, &sa); if (error == 0) { error = sflt_getpeername(so, &sa); - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { error = 0; + } } socket_unlock(so, 1); - if (error) + if (error) { goto bad; + } if (sa == 0) { len = 0; goto gotnothing; @@ -2624,17 +2734,20 @@ getpeername(__unused struct proc *p, struct getpeername_args *uap, sa_len = sa->sa_len; len = MIN(len, sa_len); error = copyout(sa, uap->asa, len); - if (error) + if (error) { goto bad; + } /* return the actual, untruncated address length */ len = sa_len; gotnothing: - error = copyout((caddr_t)&len, uap->alen, sizeof (socklen_t)); + error = copyout((caddr_t)&len, uap->alen, sizeof(socklen_t)); bad: - if (sa) FREE(sa, M_SONAME); + if (sa) { + FREE(sa, M_SONAME); + } out: file_drop(uap->fdes); - return (error); + return error; } int @@ -2646,31 +2759,35 @@ sockargs(struct mbuf **mp, user_addr_t data, int buflen, int type) size_t alloc_buflen = (size_t)buflen; - if (alloc_buflen > INT_MAX/2) - return (EINVAL); + if (alloc_buflen > INT_MAX / 2) { + return EINVAL; + } #ifdef __LP64__ /* * The fd's in the buffer must expand to be pointers, thus we need twice * as much space */ - if (type == MT_CONTROL) - alloc_buflen = ((buflen - sizeof(struct cmsghdr))*2) + + if (type == MT_CONTROL) { + alloc_buflen = ((buflen - sizeof(struct cmsghdr)) * 2) + sizeof(struct cmsghdr); + } #endif if (alloc_buflen > MLEN) { - if (type == MT_SONAME && alloc_buflen <= 112) - alloc_buflen = MLEN; /* unix domain compat. hack */ - else if (alloc_buflen > MCLBYTES) - return (EINVAL); + if (type == MT_SONAME && alloc_buflen <= 112) { + alloc_buflen = MLEN; /* unix domain compat. hack */ + } else if (alloc_buflen > MCLBYTES) { + return EINVAL; + } } m = m_get(M_WAIT, type); - if (m == NULL) - return (ENOBUFS); + if (m == NULL) { + return ENOBUFS; + } if (alloc_buflen > MLEN) { MCLGET(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { m_free(m); - return (ENOBUFS); + return ENOBUFS; } } /* @@ -2689,7 +2806,7 @@ sockargs(struct mbuf **mp, user_addr_t data, int buflen, int type) sa->sa_len = buflen; } } - return (error); + return error; } /* @@ -2708,15 +2825,17 @@ getsockaddr(struct socket *so, struct sockaddr **namp, user_addr_t uaddr, struct sockaddr *sa; int error; - if (len > SOCK_MAXADDRLEN) - return (ENAMETOOLONG); + if (len > SOCK_MAXADDRLEN) { + return ENAMETOOLONG; + } - if (len < offsetof(struct sockaddr, sa_data[0])) - return (EINVAL); + if (len < offsetof(struct sockaddr, sa_data[0])) { + return EINVAL; + } MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK | M_ZERO); if (sa == NULL) { - return (ENOMEM); + return ENOMEM; } error = copyin(uaddr, (caddr_t)sa, len); if (error) { @@ -2730,13 +2849,14 @@ getsockaddr(struct socket *so, struct sockaddr **namp, user_addr_t uaddr, */ if (translate_unspec && sa->sa_family == AF_UNSPEC && SOCK_CHECK_DOM(so, PF_INET) && - len == sizeof (struct sockaddr_in)) + len == sizeof(struct sockaddr_in)) { sa->sa_family = AF_INET; + } sa->sa_len = len; *namp = sa; } - return (error); + return error; } static int @@ -2746,17 +2866,19 @@ getsockaddr_s(struct socket *so, struct sockaddr_storage *ss, int error; if (ss == NULL || uaddr == USER_ADDR_NULL || - len < offsetof(struct sockaddr, sa_data[0])) - return (EINVAL); + len < offsetof(struct sockaddr, sa_data[0])) { + return EINVAL; + } /* * sockaddr_storage size is less than SOCK_MAXADDRLEN, * so the check here is inclusive. */ - if (len > sizeof (*ss)) - return (ENAMETOOLONG); + if (len > sizeof(*ss)) { + return ENAMETOOLONG; + } - bzero(ss, sizeof (*ss)); + bzero(ss, sizeof(*ss)); error = copyin(uaddr, (caddr_t)ss, len); if (error == 0) { /* @@ -2767,12 +2889,13 @@ getsockaddr_s(struct socket *so, struct sockaddr_storage *ss, */ if (translate_unspec && ss->ss_family == AF_UNSPEC && SOCK_CHECK_DOM(so, PF_INET) && - len == sizeof (struct sockaddr_in)) + len == sizeof(struct sockaddr_in)) { ss->ss_family = AF_INET; + } ss->ss_len = len; } - return (error); + return error; } int @@ -2836,24 +2959,28 @@ internalize_user_msghdr_array(const void *src, int spacetype, int direction, goto done; } error = copyin_user_iovec_array(user_msg->msg_iov, - spacetype, user_msg->msg_iovlen, iovp); - if (error) + spacetype, user_msg->msg_iovlen, iovp); + if (error) { goto done; + } user_msg->msg_iov = CAST_USER_ADDR_T(iovp); error = uio_calculateresid(auio); - if (error) + if (error) { goto done; + } user_msg->msg_datalen = uio_resid(auio); - if (user_msg->msg_name && user_msg->msg_namelen) + if (user_msg->msg_name && user_msg->msg_namelen) { namecnt++; - if (user_msg->msg_control && user_msg->msg_controllen) + } + if (user_msg->msg_control && user_msg->msg_controllen) { ctlcnt++; + } } done: - return (error); + return error; } int @@ -2915,24 +3042,28 @@ internalize_recv_msghdr_array(const void *src, int spacetype, int direction, goto done; } error = copyin_user_iovec_array(user_msg->msg_iov, - spacetype, user_msg->msg_iovlen, iovp); - if (error) + spacetype, user_msg->msg_iovlen, iovp); + if (error) { goto done; + } user_msg->msg_iov = CAST_USER_ADDR_T(iovp); error = uio_calculateresid(recv_msg_elem->uio); - if (error) + if (error) { goto done; + } user_msg->msg_datalen = uio_resid(recv_msg_elem->uio); - if (user_msg->msg_name && user_msg->msg_namelen) + if (user_msg->msg_name && user_msg->msg_namelen) { recv_msg_elem->which |= SOCK_MSG_SA; - if (user_msg->msg_control && user_msg->msg_controllen) + } + if (user_msg->msg_control && user_msg->msg_controllen) { recv_msg_elem->which |= SOCK_MSG_CONTROL; + } } done: - return (error); + return error; } u_int @@ -2949,11 +3080,13 @@ externalize_user_msghdr_array(void *dst, int spacetype, int direction, uio_t auio = uiop[i]; user_ssize_t len = user_msg->msg_datalen - uio_resid(auio); - if (user_msg->msg_datalen != 0 && len == 0) + if (user_msg->msg_datalen != 0 && len == 0) { seenlast = 1; + } - if (seenlast == 0) - retcnt ++; + if (seenlast == 0) { + retcnt++; + } if (spacetype == UIO_USERSPACE64) { struct user64_msghdr_x *msghdr64; @@ -2962,7 +3095,6 @@ externalize_user_msghdr_array(void *dst, int spacetype, int direction, msghdr64->msg_flags = user_msg->msg_flags; msghdr64->msg_datalen = len; - } else { struct user32_msghdr_x *msghdr32; @@ -2972,7 +3104,7 @@ externalize_user_msghdr_array(void *dst, int spacetype, int direction, msghdr32->msg_datalen = len; } } - return (retcnt); + return retcnt; } u_int @@ -2992,15 +3124,18 @@ externalize_recv_msghdr_array(void *dst, int spacetype, int direction, len = user_msg->msg_datalen - uio_resid(recv_msg_elem->uio); if (direction == UIO_READ) { - if ((recv_msg_elem->which & SOCK_MSG_DATA) == 0) + if ((recv_msg_elem->which & SOCK_MSG_DATA) == 0) { seenlast = 1; + } } else { - if (user_msg->msg_datalen != 0 && len == 0) + if (user_msg->msg_datalen != 0 && len == 0) { seenlast = 1; + } } - if (seenlast == 0) - retcnt ++; + if (seenlast == 0) { + retcnt++; + } if (spacetype == UIO_USERSPACE64) { struct user64_msghdr_x *msghdr64; @@ -3009,7 +3144,6 @@ externalize_recv_msghdr_array(void *dst, int spacetype, int direction, msghdr64->msg_flags = user_msg->msg_flags; msghdr64->msg_datalen = len; - } else { struct user32_msghdr_x *msghdr32; @@ -3019,7 +3153,7 @@ externalize_recv_msghdr_array(void *dst, int spacetype, int direction, msghdr32->msg_datalen = len; } } - return (retcnt); + return retcnt; } void @@ -3028,8 +3162,9 @@ free_uio_array(struct uio **uiop, u_int count) u_int i; for (i = 0; i < count; i++) { - if (uiop[i] != NULL) + if (uiop[i] != NULL) { uio_free(uiop[i]); + } } } @@ -3042,10 +3177,11 @@ uio_array_resid(struct uio **uiop, u_int count) for (i = 0; i < count; i++) { struct uio *auio = uiop[i]; - if (auio != NULL) + if (auio != NULL) { len += uio_resid(auio); + } } - return (len); + return len; } int @@ -3064,15 +3200,17 @@ uio_array_is_valid(struct uio **uiop, u_int count) * Sanity check on the validity of the iovec: * no point of going over sb_max */ - if (resid < 0 || (u_int32_t)resid > sb_max) - return (0); + if (resid < 0 || (u_int32_t)resid > sb_max) { + return 0; + } len += resid; - if (len < 0 || (u_int32_t)len > sb_max) - return (0); + if (len < 0 || (u_int32_t)len > sb_max) { + return 0; + } } } - return (1); + return 1; } @@ -3084,7 +3222,7 @@ alloc_recv_msg_array(u_int count) recv_msg_array = _MALLOC(count * sizeof(struct recv_msg_elem), M_TEMP, M_WAITOK | M_ZERO); - return (recv_msg_array); + return recv_msg_array; } void @@ -3095,12 +3233,15 @@ free_recv_msg_array(struct recv_msg_elem *recv_msg_array, u_int count) for (i = 0; i < count; i++) { struct recv_msg_elem *recv_msg_elem = recv_msg_array + i; - if (recv_msg_elem->uio != NULL) + if (recv_msg_elem->uio != NULL) { uio_free(recv_msg_elem->uio); - if (recv_msg_elem->psa != NULL) + } + if (recv_msg_elem->psa != NULL) { _FREE(recv_msg_elem->psa, M_TEMP); - if (recv_msg_elem->controlp != NULL) + } + if (recv_msg_elem->controlp != NULL) { m_freem(recv_msg_elem->controlp); + } } _FREE(recv_msg_array, M_TEMP); } @@ -3115,10 +3256,11 @@ recv_msg_array_resid(struct recv_msg_elem *recv_msg_array, u_int count) for (i = 0; i < count; i++) { struct recv_msg_elem *recv_msg_elem = recv_msg_array + i; - if (recv_msg_elem->uio != NULL) + if (recv_msg_elem->uio != NULL) { len += uio_resid(recv_msg_elem->uio); + } } - return (len); + return len; } int @@ -3137,31 +3279,33 @@ recv_msg_array_is_valid(struct recv_msg_elem *recv_msg_array, u_int count) * Sanity check on the validity of the iovec: * no point of going over sb_max */ - if (resid < 0 || (u_int32_t)resid > sb_max) - return (0); + if (resid < 0 || (u_int32_t)resid > sb_max) { + return 0; + } len += resid; - if (len < 0 || (u_int32_t)len > sb_max) - return (0); + if (len < 0 || (u_int32_t)len > sb_max) { + return 0; + } } } - return (1); + return 1; } #if SENDFILE -#define SFUIOBUFS 64 +#define SFUIOBUFS 64 /* Macros to compute the number of mbufs needed depending on cluster size */ -#define HOWMANY_16K(n) ((((unsigned int)(n) - 1) >> M16KCLSHIFT) + 1) -#define HOWMANY_4K(n) ((((unsigned int)(n) - 1) >> MBIGCLSHIFT) + 1) +#define HOWMANY_16K(n) ((((unsigned int)(n) - 1) >> M16KCLSHIFT) + 1) +#define HOWMANY_4K(n) ((((unsigned int)(n) - 1) >> MBIGCLSHIFT) + 1) /* Upper send limit in bytes (SFUIOBUFS * PAGESIZE) */ -#define SENDFILE_MAX_BYTES (SFUIOBUFS << PGSHIFT) +#define SENDFILE_MAX_BYTES (SFUIOBUFS << PGSHIFT) /* Upper send limit in the number of mbuf clusters */ -#define SENDFILE_MAX_16K HOWMANY_16K(SENDFILE_MAX_BYTES) -#define SENDFILE_MAX_4K HOWMANY_4K(SENDFILE_MAX_BYTES) +#define SENDFILE_MAX_16K HOWMANY_16K(SENDFILE_MAX_BYTES) +#define SENDFILE_MAX_4K HOWMANY_4K(SENDFILE_MAX_BYTES) static void alloc_sendpkt(int how, size_t pktlen, unsigned int *maxchunks, @@ -3169,8 +3313,9 @@ alloc_sendpkt(int how, size_t pktlen, unsigned int *maxchunks, { unsigned int needed; - if (pktlen == 0) + if (pktlen == 0) { panic("%s: pktlen (%ld) must be non-zero\n", __func__, pktlen); + } /* * Try to allocate for the whole thing. Since we want full control @@ -3197,8 +3342,9 @@ alloc_sendpkt(int how, size_t pktlen, unsigned int *maxchunks, needed = 1; *m = m_getpackets_internal(&needed, 1, M_WAIT, 1, MBIGCLBYTES); } - if (*m == NULL) + if (*m == NULL) { panic("%s: blocking allocation returned NULL\n", __func__); + } *maxchunks = needed; } @@ -3286,8 +3432,9 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) #if CONFIG_MACF_SOCKET_SUBSET /* JMM - fetch connected sockaddr? */ error = mac_socket_check_send(context.vc_ucred, so, NULL); - if (error) + if (error) { goto done2; + } #endif /* @@ -3295,7 +3442,7 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) * Should it applies to size of header and trailer? * JMM - error handling? */ - copyin(uap->nbytes, &nbytes, sizeof (off_t)); + copyin(uap->nbytes, &nbytes, sizeof(off_t)); /* * If specified, get the pointer to the sf_hdtr struct for @@ -3304,17 +3451,18 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) if (uap->hdtr != USER_ADDR_NULL) { caddr_t hdtrp; - bzero(&user_hdtr, sizeof (user_hdtr)); + bzero(&user_hdtr, sizeof(user_hdtr)); if (IS_64BIT_PROCESS(p)) { hdtrp = (caddr_t)&user64_hdtr; - sizeof_hdtr = sizeof (user64_hdtr); + sizeof_hdtr = sizeof(user64_hdtr); } else { hdtrp = (caddr_t)&user32_hdtr; - sizeof_hdtr = sizeof (user32_hdtr); + sizeof_hdtr = sizeof(user32_hdtr); } error = copyin(uap->hdtr, hdtrp, sizeof_hdtr); - if (error) + if (error) { goto done2; + } if (IS_64BIT_PROCESS(p)) { user_hdtr.headers = user64_hdtr.headers; user_hdtr.hdr_cnt = user64_hdtr.hdr_cnt; @@ -3331,7 +3479,7 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) * Send any headers. Wimp out and use writev(2). */ if (user_hdtr.headers != USER_ADDR_NULL) { - bzero(&nuap, sizeof (struct writev_args)); + bzero(&nuap, sizeof(struct writev_args)); nuap.fd = uap->s; nuap.iovp = user_hdtr.headers; nuap.iovcnt = user_hdtr.hdr_cnt; @@ -3363,15 +3511,15 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) socket_unlock(so, 1); goto done2; } - for (off = uap->offset; ; off += xfsize, sbytes += xfsize) { - mbuf_t m0 = NULL, m; - unsigned int nbufs = SFUIOBUFS, i; - uio_t auio; - char uio_buf[UIO_SIZEOF(SFUIOBUFS)]; /* 1 KB !!! */ - size_t uiolen; - user_ssize_t rlen; - off_t pgoff; - size_t pktlen; + for (off = uap->offset;; off += xfsize, sbytes += xfsize) { + mbuf_t m0 = NULL, m; + unsigned int nbufs = SFUIOBUFS, i; + uio_t auio; + char uio_buf[UIO_SIZEOF(SFUIOBUFS)]; /* 1 KB !!! */ + size_t uiolen; + user_ssize_t rlen; + off_t pgoff; + size_t pktlen; boolean_t jumbocl; /* @@ -3394,21 +3542,27 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) } } - if (xfsize > SENDFILE_MAX_BYTES) + if (xfsize > SENDFILE_MAX_BYTES) { xfsize = SENDFILE_MAX_BYTES; - else if (xfsize > PAGE_SIZE) + } else if (xfsize > PAGE_SIZE) { xfsize = trunc_page(xfsize); + } pgoff = off & PAGE_MASK_64; - if (pgoff > 0 && PAGE_SIZE - pgoff < xfsize) + if (pgoff > 0 && PAGE_SIZE - pgoff < xfsize) { xfsize = PAGE_SIZE_64 - pgoff; - if (nbytes && xfsize > (nbytes - sbytes)) + } + if (nbytes && xfsize > (nbytes - sbytes)) { xfsize = nbytes - sbytes; - if (xfsize <= 0) + } + if (xfsize <= 0) { break; - if (off + xfsize > file_size) + } + if (off + xfsize > file_size) { xfsize = file_size - off; - if (xfsize <= 0) + } + if (xfsize <= 0) { break; + } /* * Attempt to use larger than system page-size clusters for @@ -3421,14 +3575,15 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) socket_unlock(so, 0); alloc_sendpkt(M_WAIT, xfsize, &nbufs, &m0, jumbocl); pktlen = mbuf_pkthdr_maxlen(m0); - if (pktlen < (size_t)xfsize) + if (pktlen < (size_t)xfsize) { xfsize = pktlen; + } auio = uio_createwithbuffer(nbufs, off, UIO_SYSSPACE, - UIO_READ, &uio_buf[0], sizeof (uio_buf)); + UIO_READ, &uio_buf[0], sizeof(uio_buf)); if (auio == NULL) { printf("sendfile failed. nbufs = %d. %s", nbufs, - "File a radar related to rdar://10146739.\n"); + "File a radar related to rdar://10146739.\n"); mbuf_freem(m0); error = ENXIO; socket_lock(so, 0); @@ -3440,17 +3595,19 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) i++, m = mbuf_next(m)) { size_t mlen = mbuf_maxlen(m); - if (mlen + uiolen > (size_t)xfsize) + if (mlen + uiolen > (size_t)xfsize) { mlen = xfsize - uiolen; + } mbuf_setlen(m, mlen); uio_addiov(auio, CAST_USER_ADDR_T(mbuf_datastart(m)), mlen); uiolen += mlen; } - if (xfsize != uio_resid(auio)) + if (xfsize != uio_resid(auio)) { printf("sendfile: xfsize: %lld != uio_resid(auio): " - "%lld\n", xfsize, (long long)uio_resid(auio)); + "%lld\n", xfsize, (long long)uio_resid(auio)); + } KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_READ | DBG_FUNC_START), uap->s, (unsigned int)((xfsize >> 32) & 0x0ffffffff), @@ -3475,16 +3632,18 @@ sendfile(struct proc *p, struct sendfile_args *uap, __unused int *retval) // printf("sendfile: fo_read 0 bytes, EOF\n"); break; } - if (xfsize + off > file_size) + if (xfsize + off > file_size) { printf("sendfile: xfsize: %lld + off: %lld > file_size:" "%lld\n", xfsize, off, file_size); + } for (i = 0, m = m0, rlen = 0; i < nbufs && m != NULL && rlen < xfsize; i++, m = mbuf_next(m)) { size_t mlen = mbuf_maxlen(m); - if (rlen + mlen > (size_t)xfsize) + if (rlen + mlen > (size_t)xfsize) { mlen = xfsize - rlen; + } mbuf_setlen(m, mlen); rlen += mlen; @@ -3527,7 +3686,7 @@ retry_space: KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_WAIT | DBG_FUNC_START), uap->s, 0, 0, 0, 0); error = sbwait(&so->so_snd); - KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_WAIT| + KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE_WAIT | DBG_FUNC_END), uap->s, 0, 0, 0, 0); /* * An error from sbwait usually indicates that we've @@ -3569,13 +3728,13 @@ retry_space: goto done3; } } - sbunlock(&so->so_snd, FALSE); /* will unlock socket */ + sbunlock(&so->so_snd, FALSE); /* will unlock socket */ /* * Send trailers. Wimp out and use writev(2). */ if (uap->hdtr != USER_ADDR_NULL && user_hdtr.trailers != USER_ADDR_NULL) { - bzero(&nuap, sizeof (struct writev_args)); + bzero(&nuap, sizeof(struct writev_args)); nuap.fd = uap->s; nuap.iovp = user_hdtr.trailers; nuap.iovcnt = user_hdtr.trl_cnt; @@ -3592,14 +3751,14 @@ done1: done: if (uap->nbytes != USER_ADDR_NULL) { /* XXX this appears bogus for some early failure conditions */ - copyout(&sbytes, uap->nbytes, sizeof (off_t)); + copyout(&sbytes, uap->nbytes, sizeof(off_t)); } KERNEL_DEBUG_CONSTANT((DBG_FNC_SENDFILE | DBG_FUNC_END), uap->s, (unsigned int)((sbytes >> 32) & 0x0ffffffff), (unsigned int)(sbytes & 0x0ffffffff), error, 0); - return (error); + return error; done3: - sbunlock(&so->so_snd, FALSE); /* will unlock socket */ + sbunlock(&so->so_snd, FALSE); /* will unlock socket */ goto done2; } diff --git a/bsd/kern/uipc_usrreq.c b/bsd/kern/uipc_usrreq.c index 16a044f7f..2925a6fee 100644 --- a/bsd/kern/uipc_usrreq.c +++ b/bsd/kern/uipc_usrreq.c @@ -71,7 +71,7 @@ #include #include #include -#include /* XXX must be before */ +#include /* XXX must be before */ #include #include #include @@ -103,28 +103,28 @@ /* * Maximum number of FDs that can be passed in an mbuf */ -#define UIPC_MAX_CMSG_FD 512 - -#define f_msgcount f_fglob->fg_msgcount -#define f_cred f_fglob->fg_cred -#define f_ops f_fglob->fg_ops -#define f_offset f_fglob->fg_offset -#define f_data f_fglob->fg_data -struct zone *unp_zone; -static unp_gen_t unp_gencnt; -static u_int unp_count; - -static lck_attr_t *unp_mtx_attr; -static lck_grp_t *unp_mtx_grp; -static lck_grp_attr_t *unp_mtx_grp_attr; -static lck_rw_t *unp_list_mtx; - -static lck_mtx_t *unp_disconnect_lock; -static lck_mtx_t *unp_connect_lock; +#define UIPC_MAX_CMSG_FD 512 + +#define f_msgcount f_fglob->fg_msgcount +#define f_cred f_fglob->fg_cred +#define f_ops f_fglob->fg_ops +#define f_offset f_fglob->fg_offset +#define f_data f_fglob->fg_data +struct zone *unp_zone; +static unp_gen_t unp_gencnt; +static u_int unp_count; + +static lck_attr_t *unp_mtx_attr; +static lck_grp_t *unp_mtx_grp; +static lck_grp_attr_t *unp_mtx_grp_attr; +static lck_rw_t *unp_list_mtx; + +static lck_mtx_t *unp_disconnect_lock; +static lck_mtx_t *unp_connect_lock; static u_int disconnect_in_progress; extern lck_mtx_t *uipc_lock; -static struct unp_head unp_shead, unp_dhead; +static struct unp_head unp_shead, unp_dhead; /* * mDNSResponder tracing. When enabled, endpoints connected to @@ -134,11 +134,11 @@ static struct unp_head unp_shead, unp_dhead; * to the data itself; this assumes ipc_msg_hdr in dnssd_ipc.h * of mDNSResponder stays the same. */ -#define MDNSRESPONDER_PATH "/var/run/mDNSResponder" +#define MDNSRESPONDER_PATH "/var/run/mDNSResponder" -static int unpst_tracemdns; /* enable tracing */ +static int unpst_tracemdns; /* enable tracing */ -#define MDNS_IPC_MSG_HDR_VERSION_1 1 +#define MDNS_IPC_MSG_HDR_VERSION_1 1 struct mdns_ipc_msg_hdr { uint32_t version; @@ -161,23 +161,23 @@ struct mdns_ipc_msg_hdr { * need a proper out-of-band * lock pushdown */ -static struct sockaddr sun_noname = { sizeof (sun_noname), AF_LOCAL, { 0 } }; -static ino_t unp_ino; /* prototype for fake inode numbers */ - -static int unp_attach(struct socket *); -static void unp_detach(struct unpcb *); -static int unp_bind(struct unpcb *, struct sockaddr *, proc_t); -static int unp_connect(struct socket *, struct sockaddr *, proc_t); -static void unp_disconnect(struct unpcb *); -static void unp_shutdown(struct unpcb *); -static void unp_drop(struct unpcb *, int); -__private_extern__ void unp_gc(void); -static void unp_scan(struct mbuf *, void (*)(struct fileglob *, void *arg), void *arg); -static void unp_mark(struct fileglob *, __unused void *); -static void unp_discard(struct fileglob *, void *); -static int unp_internalize(struct mbuf *, proc_t); -static int unp_listen(struct unpcb *, proc_t); -static void unpcb_to_compat(struct unpcb *, struct unpcb_compat *); +static struct sockaddr sun_noname = { sizeof(sun_noname), AF_LOCAL, { 0 } }; +static ino_t unp_ino; /* prototype for fake inode numbers */ + +static int unp_attach(struct socket *); +static void unp_detach(struct unpcb *); +static int unp_bind(struct unpcb *, struct sockaddr *, proc_t); +static int unp_connect(struct socket *, struct sockaddr *, proc_t); +static void unp_disconnect(struct unpcb *); +static void unp_shutdown(struct unpcb *); +static void unp_drop(struct unpcb *, int); +__private_extern__ void unp_gc(void); +static void unp_scan(struct mbuf *, void (*)(struct fileglob *, void *arg), void *arg); +static void unp_mark(struct fileglob *, __unused void *); +static void unp_discard(struct fileglob *, void *); +static int unp_internalize(struct mbuf *, proc_t); +static int unp_listen(struct unpcb *, proc_t); +static void unpcb_to_compat(struct unpcb *, struct unpcb_compat *); static void unp_get_locks_in_order(struct socket *so, struct socket *conn_so); static void @@ -207,12 +207,13 @@ uipc_abort(struct socket *so) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } unp_drop(unp, ECONNABORTED); unp_detach(unp); sofree(so); - return (0); + return 0; } static int @@ -220,8 +221,9 @@ uipc_accept(struct socket *so, struct sockaddr **nam) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } /* * Pass back name of connected socket, @@ -234,7 +236,7 @@ uipc_accept(struct socket *so, struct sockaddr **nam) } else { *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1); } - return (0); + return 0; } /* @@ -247,9 +249,10 @@ uipc_attach(struct socket *so, __unused int proto, __unused proc_t p) { struct unpcb *unp = sotounpcb(so); - if (unp != 0) - return (EISCONN); - return (unp_attach(so)); + if (unp != 0) { + return EISCONN; + } + return unp_attach(so); } static int @@ -257,10 +260,11 @@ uipc_bind(struct socket *so, struct sockaddr *nam, proc_t p) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } - return (unp_bind(unp, nam, p)); + return unp_bind(unp, nam, p); } /* @@ -273,9 +277,10 @@ uipc_connect(struct socket *so, struct sockaddr *nam, proc_t p) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); - return (unp_connect(so, nam, p)); + if (unp == 0) { + return EINVAL; + } + return unp_connect(so, nam, p); } /* @@ -289,10 +294,11 @@ uipc_connect2(struct socket *so1, struct socket *so2) { struct unpcb *unp = sotounpcb(so1); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } - return (unp_connect2(so1, so2)); + return unp_connect2(so1, so2); } /* control is EOPNOTSUPP */ @@ -302,12 +308,13 @@ uipc_detach(struct socket *so) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } LCK_MTX_ASSERT(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED); unp_detach(unp); - return (0); + return 0; } static int @@ -315,10 +322,11 @@ uipc_disconnect(struct socket *so) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } unp_disconnect(unp); - return (0); + return 0; } /* @@ -330,9 +338,10 @@ uipc_listen(struct socket *so, __unused proc_t p) { struct unpcb *unp = sotounpcb(so); - if (unp == 0 || unp->unp_vnode == 0) - return (EINVAL); - return (unp_listen(unp, p)); + if (unp == 0 || unp->unp_vnode == 0) { + return EINVAL; + } + return unp_listen(unp, p); } static int @@ -340,15 +349,16 @@ uipc_peeraddr(struct socket *so, struct sockaddr **nam) { struct unpcb *unp = sotounpcb(so); - if (unp == NULL) - return (EINVAL); + if (unp == NULL) { + return EINVAL; + } if (unp->unp_conn != NULL && unp->unp_conn->unp_addr != NULL) { *nam = dup_sockaddr((struct sockaddr *) unp->unp_conn->unp_addr, 1); } else { *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1); } - return (0); + return 0; } static int @@ -357,18 +367,20 @@ uipc_rcvd(struct socket *so, __unused int flags) struct unpcb *unp = sotounpcb(so); struct socket *so2; - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } switch (so->so_type) { case SOCK_DGRAM: panic("uipc_rcvd DGRAM?"); - /*NOTREACHED*/ + /*NOTREACHED*/ case SOCK_STREAM: -#define rcv (&so->so_rcv) -#define snd (&so2->so_snd) - if (unp->unp_conn == 0) +#define rcv (&so->so_rcv) +#define snd (&so2->so_snd) + if (unp->unp_conn == 0) { break; + } so2 = unp->unp_conn->unp_socket; unp_get_locks_in_order(so, so2); @@ -391,7 +403,7 @@ uipc_rcvd(struct socket *so, __unused int flags) default: panic("uipc_rcvd unknown socktype"); } - return (0); + return 0; } /* pru_rcvoob is EOPNOTSUPP */ @@ -437,8 +449,9 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, socket_unlock(so, 0); error = unp_internalize(control, p); socket_lock(so, 0); - if (error) + if (error) { goto release; + } } switch (so->so_type) { @@ -452,8 +465,9 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, break; } error = unp_connect(so, nam, p); - if (error) + if (error) { break; + } } else { if (unp->unp_conn == 0) { error = ENOTCONN; @@ -462,13 +476,15 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, } so2 = unp->unp_conn->unp_socket; - if (so != so2) + if (so != so2) { unp_get_locks_in_order(so, so2); + } - if (unp->unp_addr) + if (unp->unp_addr) { from = (struct sockaddr *)unp->unp_addr; - else + } else { from = &sun_noname; + } /* * sbappendaddr() will fail when the receiver runs out of * space; in contrast to SOCK_STREAM, we will lose messages @@ -485,19 +501,21 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, control = NULL; } - if (so != so2) + if (so != so2) { socket_unlock(so2, 1); + } m = NULL; - if (nam) + if (nam) { unp_disconnect(unp); + } break; } case SOCK_STREAM: { int didreceive = 0; -#define rcv (&so2->so_rcv) -#define snd (&so->so_snd) +#define rcv (&so2->so_rcv) +#define snd (&so->so_snd) /* Connect if not connected yet. */ /* * Note: A better implementation would complain @@ -506,8 +524,9 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, if ((so->so_state & SS_ISCONNECTED) == 0) { if (nam) { error = unp_connect(so, nam, p); - if (error) - break; /* XXX */ + if (error) { + break; /* XXX */ + } } else { error = ENOTCONN; break; @@ -518,8 +537,9 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, error = EPIPE; break; } - if (unp->unp_conn == 0) + if (unp->unp_conn == 0) { panic("uipc_send connected but no connection?"); + } so2 = unp->unp_conn->unp_socket; unp_get_locks_in_order(so, so2); @@ -537,8 +557,8 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, if (unp->unp_flags & UNP_TRACE_MDNS) { struct mdns_ipc_msg_hdr hdr; - if (mbuf_copydata(m, 0, sizeof (hdr), &hdr) == 0 && - hdr.version == ntohl(MDNS_IPC_MSG_HDR_VERSION_1)) { + if (mbuf_copydata(m, 0, sizeof(hdr), &hdr) == 0 && + hdr.version == ntohl(MDNS_IPC_MSG_HDR_VERSION_1)) { printf("%s[mDNSResponder] pid=%d (%s): op=0x%x\n", __func__, p->p_pid, p->p_comm, ntohl(hdr.op)); } @@ -577,8 +597,8 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, m = NULL; #undef snd #undef rcv - } - break; + } + break; default: panic("uipc_send unknown socktype"); @@ -600,11 +620,13 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, } release: - if (control) + if (control) { m_freem(control); - if (m) + } + if (m) { m_freem(m); - return (error); + } + return error; } static int @@ -614,16 +636,18 @@ uipc_sense(struct socket *so, void *ub, int isstat64) struct socket *so2; blksize_t blksize; - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } blksize = so->so_snd.sb_hiwat; if (so->so_type == SOCK_STREAM && unp->unp_conn != 0) { so2 = unp->unp_conn->unp_socket; blksize += so2->so_rcv.sb_cc; } - if (unp->unp_ino == 0) + if (unp->unp_ino == 0) { unp->unp_ino = unp_ino++; + } if (isstat64 != 0) { struct stat64 *sb64; @@ -641,7 +665,7 @@ uipc_sense(struct socket *so, void *ub, int isstat64) sb->st_ino = (ino_t)(uintptr_t)unp->unp_ino; } - return (0); + return 0; } /* @@ -658,11 +682,12 @@ uipc_shutdown(struct socket *so) { struct unpcb *unp = sotounpcb(so); - if (unp == 0) - return (EINVAL); + if (unp == 0) { + return EINVAL; + } socantsendmore(so); unp_shutdown(unp); - return (0); + return 0; } /* @@ -674,34 +699,35 @@ uipc_sockaddr(struct socket *so, struct sockaddr **nam) { struct unpcb *unp = sotounpcb(so); - if (unp == NULL) - return (EINVAL); + if (unp == NULL) { + return EINVAL; + } if (unp->unp_addr != NULL) { *nam = dup_sockaddr((struct sockaddr *)unp->unp_addr, 1); } else { *nam = dup_sockaddr((struct sockaddr *)&sun_noname, 1); } - return (0); + return 0; } struct pr_usrreqs uipc_usrreqs = { - .pru_abort = uipc_abort, - .pru_accept = uipc_accept, - .pru_attach = uipc_attach, - .pru_bind = uipc_bind, - .pru_connect = uipc_connect, - .pru_connect2 = uipc_connect2, - .pru_detach = uipc_detach, - .pru_disconnect = uipc_disconnect, - .pru_listen = uipc_listen, - .pru_peeraddr = uipc_peeraddr, - .pru_rcvd = uipc_rcvd, - .pru_send = uipc_send, - .pru_sense = uipc_sense, - .pru_shutdown = uipc_shutdown, - .pru_sockaddr = uipc_sockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = uipc_abort, + .pru_accept = uipc_accept, + .pru_attach = uipc_attach, + .pru_bind = uipc_bind, + .pru_connect = uipc_connect, + .pru_connect2 = uipc_connect2, + .pru_detach = uipc_detach, + .pru_disconnect = uipc_disconnect, + .pru_listen = uipc_listen, + .pru_peeraddr = uipc_peeraddr, + .pru_rcvd = uipc_rcvd, + .pru_send = uipc_send, + .pru_sense = uipc_sense, + .pru_shutdown = uipc_shutdown, + .pru_sockaddr = uipc_sockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; int @@ -718,12 +744,13 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) case LOCAL_PEERCRED: if (unp->unp_flags & UNP_HAVEPC) { error = sooptcopyout(sopt, &unp->unp_peercred, - sizeof (unp->unp_peercred)); + sizeof(unp->unp_peercred)); } else { - if (so->so_type == SOCK_STREAM) + if (so->so_type == SOCK_STREAM) { error = ENOTCONN; - else + } else { error = EINVAL; + } } break; case LOCAL_PEERPID: @@ -733,16 +760,18 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) break; } peerso = unp->unp_conn->unp_socket; - if (peerso == NULL) + if (peerso == NULL) { panic("peer is connected but has no socket?"); + } unp_get_locks_in_order(so, peerso); if (sopt->sopt_name == LOCAL_PEEREPID && - peerso->so_flags & SOF_DELEGATED) + peerso->so_flags & SOF_DELEGATED) { peerpid = peerso->e_pid; - else + } else { peerpid = peerso->last_pid; + } socket_unlock(peerso, 1); - error = sooptcopyout(sopt, &peerpid, sizeof (peerpid)); + error = sooptcopyout(sopt, &peerpid, sizeof(peerpid)); break; case LOCAL_PEERUUID: case LOCAL_PEEREUUID: @@ -751,16 +780,18 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) break; } peerso = unp->unp_conn->unp_socket; - if (peerso == NULL) + if (peerso == NULL) { panic("peer is connected but has no socket?"); + } unp_get_locks_in_order(so, peerso); if (sopt->sopt_name == LOCAL_PEEREUUID && - peerso->so_flags & SOF_DELEGATED) + peerso->so_flags & SOF_DELEGATED) { error = sooptcopyout(sopt, &peerso->e_uuid, - sizeof (peerso->e_uuid)); - else + sizeof(peerso->e_uuid)); + } else { error = sooptcopyout(sopt, &peerso->last_uuid, - sizeof (peerso->last_uuid)); + sizeof(peerso->last_uuid)); + } socket_unlock(peerso, 1); break; default: @@ -774,7 +805,7 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -786,28 +817,28 @@ uipc_ctloutput(struct socket *so, struct sockopt *sopt) * be large enough for at least one max-size datagram plus address. */ #ifndef PIPSIZ -#define PIPSIZ 8192 +#define PIPSIZ 8192 #endif -static u_int32_t unpst_sendspace = PIPSIZ; -static u_int32_t unpst_recvspace = PIPSIZ; -static u_int32_t unpdg_sendspace = 2*1024; /* really max datagram size */ -static u_int32_t unpdg_recvspace = 4*1024; +static u_int32_t unpst_sendspace = PIPSIZ; +static u_int32_t unpst_recvspace = PIPSIZ; +static u_int32_t unpdg_sendspace = 2 * 1024; /* really max datagram size */ +static u_int32_t unpdg_recvspace = 4 * 1024; -static int unp_rights; /* file descriptors in flight */ -static int unp_disposed; /* discarded file descriptors */ +static int unp_rights; /* file descriptors in flight */ +static int unp_disposed; /* discarded file descriptors */ SYSCTL_DECL(_net_local_stream); SYSCTL_INT(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED, - &unpst_sendspace, 0, ""); + &unpst_sendspace, 0, ""); SYSCTL_INT(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED, - &unpst_recvspace, 0, ""); + &unpst_recvspace, 0, ""); SYSCTL_INT(_net_local_stream, OID_AUTO, tracemdns, CTLFLAG_RW | CTLFLAG_LOCKED, - &unpst_tracemdns, 0, ""); + &unpst_tracemdns, 0, ""); SYSCTL_DECL(_net_local_dgram); SYSCTL_INT(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED, - &unpdg_sendspace, 0, ""); + &unpdg_sendspace, 0, ""); SYSCTL_INT(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED, - &unpdg_recvspace, 0, ""); + &unpdg_recvspace, 0, ""); SYSCTL_DECL(_net_local); SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD | CTLFLAG_LOCKED, &unp_rights, 0, ""); @@ -824,7 +855,6 @@ unp_attach(struct socket *so) if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { switch (so->so_type) { - case SOCK_STREAM: error = soreserve(so, unpst_sendspace, unpst_recvspace); break; @@ -836,16 +866,18 @@ unp_attach(struct socket *so) default: panic("unp_attach"); } - if (error) - return (error); + if (error) { + return error; + } } unp = (struct unpcb *)zalloc(unp_zone); - if (unp == NULL) - return (ENOBUFS); - bzero(unp, sizeof (*unp)); + if (unp == NULL) { + return ENOBUFS; + } + bzero(unp, sizeof(*unp)); lck_mtx_init(&unp->unp_mtx, - unp_mtx_grp, unp_mtx_attr); + unp_mtx_grp, unp_mtx_attr); lck_rw_lock_exclusive(unp_list_mtx); LIST_INIT(&unp->unp_refs); @@ -876,7 +908,7 @@ unp_attach(struct socket *so) */ so->so_rcv.sb_flags |= SB_UNIX; so->so_snd.sb_flags |= SB_UNIX; - return (0); + return 0; } static void @@ -905,11 +937,13 @@ unp_detach(struct unpcb *unp) unp->unp_vnode = NULL; } lck_mtx_unlock(unp_connect_lock); - if (tvp != NULL) - vnode_rele(tvp); /* drop the usecount */ + if (tvp != NULL) { + vnode_rele(tvp); /* drop the usecount */ + } } - if (unp->unp_conn) + if (unp->unp_conn) { unp_disconnect(unp); + } while (unp->unp_refs.lh_first) { struct unpcb *unp2 = NULL; @@ -925,15 +959,15 @@ unp_detach(struct unpcb *unp) lck_mtx_lock(unp_disconnect_lock); while (disconnect_in_progress != 0) { (void)msleep((caddr_t)&disconnect_in_progress, unp_disconnect_lock, - PSOCK, "disconnect", NULL); + PSOCK, "disconnect", NULL); } disconnect_in_progress = 1; lck_mtx_unlock(unp_disconnect_lock); /* Now we are sure that any unpcb socket disconnect is not happening */ if (unp->unp_refs.lh_first != NULL) { - unp2 = unp->unp_refs.lh_first; - socket_lock(unp2->unp_socket, 1); + unp2 = unp->unp_refs.lh_first; + socket_lock(unp2->unp_socket, 1); } lck_mtx_lock(unp_disconnect_lock); @@ -943,8 +977,8 @@ unp_detach(struct unpcb *unp) if (unp2 != NULL) { /* We already locked this socket and have a reference on it */ - unp_drop(unp2, ECONNRESET); - socket_unlock(unp2->unp_socket, 1); + unp_drop(unp2, ECONNRESET); + socket_unlock(unp2->unp_socket, 1); } } @@ -984,29 +1018,33 @@ unp_bind( char buf[SOCK_MAXADDRLEN]; if (nam->sa_family != 0 && nam->sa_family != AF_UNIX) { - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } /* * Check if the socket is already bound to an address */ - if (unp->unp_vnode != NULL) - return (EINVAL); + if (unp->unp_vnode != NULL) { + return EINVAL; + } /* * Check if the socket may have been shut down */ if ((so->so_state & (SS_CANTRCVMORE | SS_CANTSENDMORE)) == - (SS_CANTRCVMORE | SS_CANTSENDMORE)) - return (EINVAL); + (SS_CANTRCVMORE | SS_CANTSENDMORE)) { + return EINVAL; + } namelen = soun->sun_len - offsetof(struct sockaddr_un, sun_path); - if (namelen <= 0) - return (EINVAL); + if (namelen <= 0) { + return EINVAL; + } /* * Note: sun_path is not a zero terminated "C" string */ - if (namelen >= SOCK_MAXADDRLEN) - return (EINVAL); + if (namelen >= SOCK_MAXADDRLEN) { + return EINVAL; + } bcopy(soun->sun_path, buf, namelen); buf[namelen] = 0; @@ -1018,7 +1056,7 @@ unp_bind( error = namei(&nd); if (error) { socket_lock(so, 0); - return (error); + return error; } dvp = nd.ni_dvp; vp = nd.ni_vp; @@ -1034,7 +1072,7 @@ unp_bind( vnode_put(vp); socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } VATTR_INIT(&va); @@ -1066,16 +1104,28 @@ unp_bind( if (error) { socket_lock(so, 0); - return (error); + return error; } - vnode_ref(vp); /* gain a longterm reference */ + socket_lock(so, 0); + + if (unp->unp_vnode != NULL) { + vnode_put(vp); /* drop the iocount */ + return EINVAL; + } + + error = vnode_ref(vp); /* gain a longterm reference */ + if (error) { + vnode_put(vp); /* drop the iocount */ + return error; + } + vp->v_socket = unp->unp_socket; unp->unp_vnode = vp; unp->unp_addr = (struct sockaddr_un *)dup_sockaddr(nam, 1); - vnode_put(vp); /* drop the iocount */ + vnode_put(vp); /* drop the iocount */ - return (0); + return 0; } @@ -1100,7 +1150,7 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) { struct sockaddr_un *soun = (struct sockaddr_un *)nam; struct vnode *vp; - struct socket *so2, *so3, *list_so=NULL; + struct socket *so2, *so3, *list_so = NULL; struct unpcb *unp, *unp2, *unp3; vfs_context_t ctx = vfs_context_current(); int error, len; @@ -1108,20 +1158,22 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) char buf[SOCK_MAXADDRLEN]; if (nam->sa_family != 0 && nam->sa_family != AF_UNIX) { - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } unp = sotounpcb(so); so2 = so3 = NULL; len = nam->sa_len - offsetof(struct sockaddr_un, sun_path); - if (len <= 0) - return (EINVAL); + if (len <= 0) { + return EINVAL; + } /* * Note: sun_path is not a zero terminated "C" string */ - if (len >= SOCK_MAXADDRLEN) - return (EINVAL); + if (len >= SOCK_MAXADDRLEN) { + return EINVAL; + } bcopy(soun->sun_path, buf, len); buf[len] = 0; @@ -1132,7 +1184,7 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) error = namei(&nd); if (error) { socket_lock(so, 0); - return (error); + return error; } nameidone(&nd); vp = nd.ni_vp; @@ -1227,9 +1279,10 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) } unp2 = sotounpcb(so2); unp3 = sotounpcb(so3); - if (unp2->unp_addr) + if (unp2->unp_addr) { unp3->unp_addr = (struct sockaddr_un *) dup_sockaddr((struct sockaddr *)unp2->unp_addr, 1); + } /* * unp_peercred management: @@ -1267,10 +1320,10 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) socket_unlock(so2, 1); socket_lock(so3, 0); sofreelastref(so3, 1); - goto out; + goto out; } memcpy(&unp->unp_peercred, &unp2->unp_peercred, - sizeof (unp->unp_peercred)); + sizeof(unp->unp_peercred)); unp->unp_flags |= UNP_HAVEPC; #if CONFIG_MACF_SOCKET @@ -1295,7 +1348,7 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) */ if (unpst_tracemdns && !strncmp(soun->sun_path, MDNSRESPONDER_PATH, - sizeof (MDNSRESPONDER_PATH))) { + sizeof(MDNSRESPONDER_PATH))) { unp->unp_flags |= UNP_TRACE_MDNS; unp2->unp_flags |= UNP_TRACE_MDNS; } @@ -1325,7 +1378,7 @@ decref_out: out: LCK_MTX_ASSERT(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED); vnode_put(vp); - return (error); + return error; } /* @@ -1339,8 +1392,9 @@ unp_connect2(struct socket *so, struct socket *so2) struct unpcb *unp = sotounpcb(so); struct unpcb *unp2; - if (so2->so_type != so->so_type) - return (EPROTOTYPE); + if (so2->so_type != so->so_type) { + return EPROTOTYPE; + } unp2 = sotounpcb(so2); @@ -1348,20 +1402,20 @@ unp_connect2(struct socket *so, struct socket *so2) LCK_MTX_ASSERT(&unp2->unp_mtx, LCK_MTX_ASSERT_OWNED); /* Verify both sockets are still opened */ - if (unp == 0 || unp2 == 0) - return (EINVAL); + if (unp == 0 || unp2 == 0) { + return EINVAL; + } unp->unp_conn = unp2; so2->so_usecount++; switch (so->so_type) { - case SOCK_DGRAM: LIST_INSERT_HEAD(&unp2->unp_refs, unp, unp_reflink); if (so != so2) { /* Avoid lock order reversals due to drop/acquire in soisconnected. */ - /* Keep an extra reference on so2 that will be dropped + /* Keep an extra reference on so2 that will be dropped * soon after getting the locks in order */ socket_unlock(so2, 0); @@ -1411,7 +1465,7 @@ unp_connect2(struct socket *so, struct socket *so2) } LCK_MTX_ASSERT(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED); LCK_MTX_ASSERT(&unp2->unp_mtx, LCK_MTX_ASSERT_OWNED); - return (0); + return 0; } static void @@ -1433,7 +1487,7 @@ unp_disconnect(struct unpcb *unp) so_locked = 0; } (void)msleep((caddr_t)&disconnect_in_progress, unp_disconnect_lock, - PSOCK, "disconnect", NULL); + PSOCK, "disconnect", NULL); } disconnect_in_progress = 1; lck_mtx_unlock(unp_disconnect_lock); @@ -1486,7 +1540,7 @@ try_again: so_locked = 0; (void)msleep(waitso->so_pcb, &unp->unp_mtx, - PSOCK | PDROP, "unpdisconnect", NULL); + PSOCK | PDROP, "unpdisconnect", NULL); goto try_again; } @@ -1498,16 +1552,17 @@ try_again: VERIFY(so2->so_usecount > 0); so2->so_usecount--; - if (unp->unp_flags & UNP_TRACE_MDNS) + if (unp->unp_flags & UNP_TRACE_MDNS) { unp->unp_flags &= ~UNP_TRACE_MDNS; + } switch (unp->unp_socket->so_type) { - case SOCK_DGRAM: LIST_REMOVE(unp, unp_reflink); unp->unp_socket->so_state &= ~SS_ISCONNECTED; - if (so != so2) + if (so != so2) { socket_unlock(so2, 1); + } break; case SOCK_STREAM: @@ -1519,14 +1574,15 @@ try_again: * we release all locks except the socket lock, this will avoid * a deadlock. */ - unp->unp_socket->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); - unp->unp_socket->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED); + unp->unp_socket->so_state &= ~(SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); + unp->unp_socket->so_state |= (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); - unp2->unp_socket->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING); - unp->unp_socket->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED); + unp2->unp_socket->so_state &= ~(SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); + unp->unp_socket->so_state |= (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED); - if (unp2->unp_flags & UNP_TRACE_MDNS) + if (unp2->unp_flags & UNP_TRACE_MDNS) { unp2->unp_flags &= ~UNP_TRACE_MDNS; + } strdisconn = 1; break; @@ -1544,7 +1600,7 @@ out: soisdisconnected(so2); socket_unlock(so2, 1); - socket_lock(so,0); + socket_lock(so, 0); soisdisconnected(so); } LCK_MTX_ASSERT(&unp->unp_mtx, LCK_MTX_ASSERT_OWNED); @@ -1614,15 +1670,15 @@ unp_pcblist SYSCTL_HANDLER_ARGS */ if (req->oldptr == USER_ADDR_NULL) { n = unp_count; - req->oldidx = 2 * sizeof (xug) + (n + n / 8) * - sizeof (struct xunpcb); + req->oldidx = 2 * sizeof(xug) + (n + n / 8) * + sizeof(struct xunpcb); lck_rw_done(unp_list_mtx); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { lck_rw_done(unp_list_mtx); - return (EPERM); + return EPERM; } /* @@ -1631,38 +1687,39 @@ unp_pcblist SYSCTL_HANDLER_ARGS gencnt = unp_gencnt; n = unp_count; - bzero(&xug, sizeof (xug)); - xug.xug_len = sizeof (xug); + bzero(&xug, sizeof(xug)); + xug.xug_len = sizeof(xug); xug.xug_count = n; xug.xug_gen = gencnt; xug.xug_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xug, sizeof (xug)); + error = SYSCTL_OUT(req, &xug, sizeof(xug)); if (error) { lck_rw_done(unp_list_mtx); - return (error); + return error; } /* * We are done if there is no pcb */ - if (n == 0) { + if (n == 0) { lck_rw_done(unp_list_mtx); - return (0); + return 0; } - MALLOC(unp_list, struct unpcb **, n * sizeof (*unp_list), + MALLOC(unp_list, struct unpcb **, n * sizeof(*unp_list), M_TEMP, M_WAITOK); if (unp_list == 0) { lck_rw_done(unp_list_mtx); - return (ENOMEM); + return ENOMEM; } for (unp = head->lh_first, i = 0; unp && i < n; unp = unp->unp_link.le_next) { - if (unp->unp_gencnt <= gencnt) + if (unp->unp_gencnt <= gencnt) { unp_list[i++] = unp; + } } - n = i; /* in case we lost some during malloc */ + n = i; /* in case we lost some during malloc */ error = 0; for (i = 0; i < n; i++) { @@ -1670,24 +1727,26 @@ unp_pcblist SYSCTL_HANDLER_ARGS if (unp->unp_gencnt <= gencnt) { struct xunpcb xu; - bzero(&xu, sizeof (xu)); - xu.xu_len = sizeof (xu); + bzero(&xu, sizeof(xu)); + xu.xu_len = sizeof(xu); xu.xu_unpp = (_UNPCB_PTR(struct unpcb_compat *)) VM_KERNEL_ADDRPERM(unp); /* * XXX - need more locking here to protect against * connect/disconnect races for SMP. */ - if (unp->unp_addr) + if (unp->unp_addr) { bcopy(unp->unp_addr, &xu.xu_addr, unp->unp_addr->sun_len); - if (unp->unp_conn && unp->unp_conn->unp_addr) + } + if (unp->unp_conn && unp->unp_conn->unp_addr) { bcopy(unp->unp_conn->unp_addr, &xu.xu_caddr, unp->unp_conn->unp_addr->sun_len); + } unpcb_to_compat(unp, &xu.xu_unp); sotoxsocket(unp->unp_socket, &xu.xu_socket); - error = SYSCTL_OUT(req, &xu, sizeof (xu)); + error = SYSCTL_OUT(req, &xu, sizeof(xu)); } } if (!error) { @@ -1698,26 +1757,26 @@ unp_pcblist SYSCTL_HANDLER_ARGS * while we were processing this request, and it * might be necessary to retry. */ - bzero(&xug, sizeof (xug)); - xug.xug_len = sizeof (xug); + bzero(&xug, sizeof(xug)); + xug.xug_len = sizeof(xug); xug.xug_gen = unp_gencnt; xug.xug_sogen = so_gencnt; xug.xug_count = unp_count; - error = SYSCTL_OUT(req, &xug, sizeof (xug)); + error = SYSCTL_OUT(req, &xug, sizeof(xug)); } FREE(unp_list, M_TEMP); lck_rw_done(unp_list_mtx); - return (error); + return error; } SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", - "List of active local datagram sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist, "S,xunpcb", + "List of active local datagram sockets"); SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", - "List of active local stream sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist, "S,xunpcb", + "List of active local stream sockets"); #if !CONFIG_EMBEDDED @@ -1740,15 +1799,15 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS */ if (req->oldptr == USER_ADDR_NULL) { n = unp_count; - req->oldidx = 2 * sizeof (xug) + (n + n / 8) * - (sizeof (struct xunpcb64)); + req->oldidx = 2 * sizeof(xug) + (n + n / 8) * + (sizeof(struct xunpcb64)); lck_rw_done(unp_list_mtx); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { lck_rw_done(unp_list_mtx); - return (EPERM); + return EPERM; } /* @@ -1757,45 +1816,46 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS gencnt = unp_gencnt; n = unp_count; - bzero(&xug, sizeof (xug)); - xug.xug_len = sizeof (xug); + bzero(&xug, sizeof(xug)); + xug.xug_len = sizeof(xug); xug.xug_count = n; xug.xug_gen = gencnt; xug.xug_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xug, sizeof (xug)); + error = SYSCTL_OUT(req, &xug, sizeof(xug)); if (error) { lck_rw_done(unp_list_mtx); - return (error); + return error; } /* * We are done if there is no pcb */ - if (n == 0) { + if (n == 0) { lck_rw_done(unp_list_mtx); - return (0); + return 0; } - MALLOC(unp_list, struct unpcb **, n * sizeof (*unp_list), + MALLOC(unp_list, struct unpcb **, n * sizeof(*unp_list), M_TEMP, M_WAITOK); if (unp_list == 0) { lck_rw_done(unp_list_mtx); - return (ENOMEM); + return ENOMEM; } for (unp = head->lh_first, i = 0; unp && i < n; unp = unp->unp_link.le_next) { - if (unp->unp_gencnt <= gencnt) + if (unp->unp_gencnt <= gencnt) { unp_list[i++] = unp; + } } - n = i; /* in case we lost some during malloc */ + n = i; /* in case we lost some during malloc */ error = 0; for (i = 0; i < n; i++) { unp = unp_list[i]; if (unp->unp_gencnt <= gencnt) { struct xunpcb64 xu; - size_t xu_len = sizeof(struct xunpcb64); + size_t xu_len = sizeof(struct xunpcb64); bzero(&xu, xu_len); xu.xu_len = xu_len; @@ -1821,20 +1881,23 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS xu.xunp_mbcnt = unp->unp_mbcnt; xu.xunp_gencnt = unp->unp_gencnt; - if (unp->unp_socket) + if (unp->unp_socket) { sotoxsocket64(unp->unp_socket, &xu.xu_socket); + } /* * XXX - need more locking here to protect against * connect/disconnect races for SMP. */ - if (unp->unp_addr) - bcopy(unp->unp_addr, &xu.xunp_addr, - unp->unp_addr->sun_len); - if (unp->unp_conn && unp->unp_conn->unp_addr) - bcopy(unp->unp_conn->unp_addr, - &xu.xunp_caddr, - unp->unp_conn->unp_addr->sun_len); + if (unp->unp_addr) { + bcopy(unp->unp_addr, &xu.xunp_addr, + unp->unp_addr->sun_len); + } + if (unp->unp_conn && unp->unp_conn->unp_addr) { + bcopy(unp->unp_conn->unp_addr, + &xu.xunp_caddr, + unp->unp_conn->unp_addr->sun_len); + } error = SYSCTL_OUT(req, &xu, xu_len); } @@ -1847,26 +1910,26 @@ unp_pcblist64 SYSCTL_HANDLER_ARGS * while we were processing this request, and it * might be necessary to retry. */ - bzero(&xug, sizeof (xug)); - xug.xug_len = sizeof (xug); + bzero(&xug, sizeof(xug)); + xug.xug_len = sizeof(xug); xug.xug_gen = unp_gencnt; xug.xug_sogen = so_gencnt; xug.xug_count = unp_count; - error = SYSCTL_OUT(req, &xug, sizeof (xug)); + error = SYSCTL_OUT(req, &xug, sizeof(xug)); } FREE(unp_list, M_TEMP); lck_rw_done(unp_list_mtx); - return (error); + return error; } SYSCTL_PROC(_net_local_dgram, OID_AUTO, pcblist64, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist64, "S,xunpcb64", - "List of active local datagram sockets 64 bit"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + (caddr_t)(long)SOCK_DGRAM, 0, unp_pcblist64, "S,xunpcb64", + "List of active local datagram sockets 64 bit"); SYSCTL_PROC(_net_local_stream, OID_AUTO, pcblist64, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist64, "S,xunpcb64", - "List of active local stream sockets 64 bit"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + (caddr_t)(long)SOCK_STREAM, 0, unp_pcblist64, "S,xunpcb64", + "List of active local stream sockets 64 bit"); #endif /* !CONFIG_EMBEDDED */ @@ -1900,18 +1963,18 @@ unp_drop(struct unpcb *unp, int errno) int unp_externalize(struct mbuf *rights) { - proc_t p = current_proc(); /* XXX */ + proc_t p = current_proc(); /* XXX */ int i; struct cmsghdr *cm = mtod(rights, struct cmsghdr *); struct fileglob **rp = (struct fileglob **)(cm + 1); int *fds = (int *)(cm + 1); struct fileproc *fp; struct fileproc **fileproc_l; - int newfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int); + int newfds = (cm->cmsg_len - sizeof(*cm)) / sizeof(int); int f, error = 0; MALLOC(fileproc_l, struct fileproc **, - newfds * sizeof (struct fileproc *), M_TEMP, M_WAITOK); + newfds * sizeof(struct fileproc *), M_TEMP, M_WAITOK); if (fileproc_l == NULL) { error = ENOMEM; goto discard; @@ -1949,15 +2012,16 @@ unp_externalize(struct mbuf *rights) continue; } #endif - if (fdalloc(p, 0, &f)) + if (fdalloc(p, 0, &f)) { panic("unp_externalize:fdalloc"); + } fp = fileproc_alloc_init(NULL); - if (fp == NULL) + if (fp == NULL) { panic("unp_externalize: MALLOC_ZONE"); + } fp->f_iocount = 0; fp->f_fglob = rp[i]; if (fg_removeuipc_mark(rp[i])) { - /* * Take an iocount on the fp for completing the * removal from the global msg queue @@ -1983,31 +2047,34 @@ unp_externalize(struct mbuf *rights) fp_drop(p, fds[i], fileproc_l[i], 0); fileproc_l[i] = NULL; } - if (fds[i] != 0) + if (fds[i] != 0) { (void) OSAddAtomic(-1, &unp_rights); + } } discard: - if (fileproc_l != NULL) + if (fileproc_l != NULL) { FREE(fileproc_l, M_TEMP); + } if (error) { for (i = 0; i < newfds; i++) { unp_discard(*rp, p); *rp++ = NULL; } } - return (error); + return error; } void unp_init(void) { _CASSERT(UIPC_MAX_CMSG_FD >= (MCLBYTES / sizeof(int))); - unp_zone = zinit(sizeof (struct unpcb), - (nmbclusters * sizeof (struct unpcb)), 4096, "unpzone"); + unp_zone = zinit(sizeof(struct unpcb), + (nmbclusters * sizeof(struct unpcb)), 4096, "unpzone"); - if (unp_zone == 0) + if (unp_zone == 0) { panic("unp_init"); + } LIST_INIT(&unp_dhead); LIST_INIT(&unp_shead); @@ -2021,20 +2088,22 @@ unp_init(void) unp_mtx_attr = lck_attr_alloc_init(); if ((unp_list_mtx = lck_rw_alloc_init(unp_mtx_grp, - unp_mtx_attr)) == NULL) - return; /* pretty much dead if this fails... */ - + unp_mtx_attr)) == NULL) { + return; /* pretty much dead if this fails... */ + } if ((unp_disconnect_lock = lck_mtx_alloc_init(unp_mtx_grp, - unp_mtx_attr)) == NULL) + unp_mtx_attr)) == NULL) { return; + } if ((unp_connect_lock = lck_mtx_alloc_init(unp_mtx_grp, - unp_mtx_attr)) == NULL) + unp_mtx_attr)) == NULL) { return; + } } #ifndef MIN -#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif /* @@ -2056,9 +2125,9 @@ unp_internalize(struct mbuf *control, proc_t p) /* 64bit: cmsg_len is 'uint32_t', m_len is 'long' */ if (cm->cmsg_type != SCM_RIGHTS || cm->cmsg_level != SOL_SOCKET || (socklen_t)cm->cmsg_len != (socklen_t)control->m_len) { - return (EINVAL); + return EINVAL; } - oldfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int); + oldfds = (cm->cmsg_len - sizeof(*cm)) / sizeof(int); bzero(fg_ins, sizeof(fg_ins)); proc_fdlock(p); @@ -2068,15 +2137,15 @@ unp_internalize(struct mbuf *control, proc_t p) struct fileproc *tmpfp; if (((error = fdgetf_noref(p, fds[i], &tmpfp)) != 0)) { proc_fdunlock(p); - return (error); + return error; } else if (!file_issendable(p, tmpfp)) { proc_fdunlock(p); - return (EINVAL); + return EINVAL; } else if (FP_ISGUARDED(tmpfp, GUARD_SOCKET_IPC)) { error = fp_guard_exception(p, - fds[i], tmpfp, kGUARD_EXC_SOCKET_IPC); + fds[i], tmpfp, kGUARD_EXC_SOCKET_IPC); proc_fdunlock(p); - return (error); + return error; } } rp = (struct fileglob **)(cm + 1); @@ -2086,8 +2155,9 @@ unp_internalize(struct mbuf *control, proc_t p) */ for (i = (oldfds - 1); i >= 0; i--) { (void) fdgetf_noref(p, fds[i], &fp); - if (fg_insertuipc_mark(fp->f_fglob)) + if (fg_insertuipc_mark(fp->f_fglob)) { fg_ins[i / 8] |= 0x80 >> (i % 8); + } rp[i] = fp->f_fglob; } proc_fdunlock(p); @@ -2100,22 +2170,23 @@ unp_internalize(struct mbuf *control, proc_t p) (void) OSAddAtomic(1, &unp_rights); } - return (0); + return 0; } -static int unp_defer, unp_gcing, unp_gcwait; +static int unp_defer, unp_gcing, unp_gcwait; static thread_t unp_gcthread = NULL; /* always called under uipc_lock */ void unp_gc_wait(void) { - if (unp_gcthread == current_thread()) + if (unp_gcthread == current_thread()) { return; + } while (unp_gcing != 0) { unp_gcwait = 1; - msleep(&unp_gcing, uipc_lock, 0 , "unp_gc_wait", NULL); + msleep(&unp_gcing, uipc_lock, 0, "unp_gc_wait", NULL); } } @@ -2145,7 +2216,7 @@ unp_gc(void) */ for (fg = fmsghead.lh_first; fg != 0; fg = fg->f_msglist.le_next) { lck_mtx_lock(&fg->fg_lock); - fg->fg_flag &= ~(FMARK|FDEFER); + fg->fg_flag &= ~(FMARK | FDEFER); lck_mtx_unlock(&fg->fg_lock); } do { @@ -2202,7 +2273,7 @@ unp_gc(void) continue; } if (so->so_proto->pr_domain != localdomain || - (so->so_proto->pr_flags&PR_RIGHTS) == 0) { + (so->so_proto->pr_flags & PR_RIGHTS) == 0) { lck_mtx_unlock(&fg->fg_lock); continue; } @@ -2276,10 +2347,11 @@ unp_gc(void) * * 91/09/19, bsy@cs.cmu.edu */ - extra_ref = _MALLOC(nfiles * sizeof (struct fileglob *), + extra_ref = _MALLOC(nfiles * sizeof(struct fileglob *), M_FILEGLOB, M_WAITOK); - if (extra_ref == NULL) + if (extra_ref == NULL) { goto bail; + } for (nunref = 0, fg = fmsghead.lh_first, fpp = extra_ref; fg != 0; fg = nextfg) { lck_mtx_lock(&fg->fg_lock); @@ -2324,12 +2396,13 @@ unp_gc(void) socket_unlock(so, 0); } } - for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) + for (i = nunref, fpp = extra_ref; --i >= 0; ++fpp) { closef_locked((struct fileproc *)0, *fpp, (proc_t)NULL); + } - FREE((caddr_t)extra_ref, M_FILEGLOB); + FREE(extra_ref, M_FILEGLOB); bail: - lck_mtx_lock(uipc_lock); + lck_mtx_lock(uipc_lock); unp_gcing = 0; unp_gcthread = NULL; @@ -2339,8 +2412,9 @@ bail: } lck_mtx_unlock(uipc_lock); - if (need_gcwakeup != 0) + if (need_gcwakeup != 0) { wakeup(&unp_gcing); + } } void @@ -2361,7 +2435,7 @@ unp_listen(struct unpcb *unp, proc_t p) cru2x(safecred, &unp->unp_peercred); kauth_cred_unref(&safecred); unp->unp_flags |= UNP_HAVEPCCACHED; - return (0); + return 0; } static void @@ -2374,20 +2448,23 @@ unp_scan(struct mbuf *m0, void (*op)(struct fileglob *, void *arg), void *arg) int qfds; while (m0) { - for (m = m0; m; m = m->m_next) + for (m = m0; m; m = m->m_next) { if (m->m_type == MT_CONTROL && - (size_t)m->m_len >= sizeof (*cm)) { + (size_t)m->m_len >= sizeof(*cm)) { cm = mtod(m, struct cmsghdr *); if (cm->cmsg_level != SOL_SOCKET || - cm->cmsg_type != SCM_RIGHTS) + cm->cmsg_type != SCM_RIGHTS) { continue; - qfds = (cm->cmsg_len - sizeof (*cm)) / - sizeof (int); + } + qfds = (cm->cmsg_len - sizeof(*cm)) / + sizeof(int); rp = (struct fileglob **)(cm + 1); - for (i = 0; i < qfds; i++) + for (i = 0; i < qfds; i++) { (*op)(*rp++, arg); - break; /* XXX, but saves time */ + } + break; /* XXX, but saves time */ } + } m0 = m0->m_act; } } @@ -2401,7 +2478,7 @@ unp_mark(struct fileglob *fg, __unused void *arg) lck_mtx_unlock(&fg->fg_lock); return; } - fg->fg_flag |= (FMARK|FDEFER); + fg->fg_flag |= (FMARK | FDEFER); lck_mtx_unlock(&fg->fg_lock); @@ -2411,9 +2488,9 @@ unp_mark(struct fileglob *fg, __unused void *arg) static void unp_discard(struct fileglob *fg, void *p) { - if (p == NULL) - p = current_proc(); /* XXX */ - + if (p == NULL) { + p = current_proc(); /* XXX */ + } (void) OSAddAtomic(1, &unp_disposed); if (fg_removeuipc_mark(fg)) { VERIFY(fg->fg_lflags & FG_RMMSGQ); @@ -2428,62 +2505,70 @@ unp_discard(struct fileglob *fg, void *p) int unp_lock(struct socket *so, int refcount, void * lr) - { - void * lr_saved; - if (lr == 0) - lr_saved = (void *) __builtin_return_address(0); - else lr_saved = lr; - - if (so->so_pcb) { - lck_mtx_lock(&((struct unpcb *)so->so_pcb)->unp_mtx); - } else { - panic("unp_lock: so=%p NO PCB! lr=%p ref=0x%x\n", - so, lr_saved, so->so_usecount); - } - - if (so->so_usecount < 0) - panic("unp_lock: so=%p so_pcb=%p lr=%p ref=0x%x\n", - so, so->so_pcb, lr_saved, so->so_usecount); - - if (refcount) { +{ + void * lr_saved; + if (lr == 0) { + lr_saved = (void *) __builtin_return_address(0); + } else { + lr_saved = lr; + } + + if (so->so_pcb) { + lck_mtx_lock(&((struct unpcb *)so->so_pcb)->unp_mtx); + } else { + panic("unp_lock: so=%p NO PCB! lr=%p ref=0x%x\n", + so, lr_saved, so->so_usecount); + } + + if (so->so_usecount < 0) { + panic("unp_lock: so=%p so_pcb=%p lr=%p ref=0x%x\n", + so, so->so_pcb, lr_saved, so->so_usecount); + } + + if (refcount) { VERIFY(so->so_usecount > 0); so->so_usecount++; } - so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; - return (0); + so->lock_lr[so->next_lock_lr] = lr_saved; + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; + return 0; } int unp_unlock(struct socket *so, int refcount, void * lr) { - void * lr_saved; - lck_mtx_t * mutex_held = NULL; + void * lr_saved; + lck_mtx_t * mutex_held = NULL; struct unpcb *unp = sotounpcb(so); - if (lr == 0) - lr_saved = (void *) __builtin_return_address(0); - else lr_saved = lr; - - if (refcount) - so->so_usecount--; - - if (so->so_usecount < 0) - panic("unp_unlock: so=%p usecount=%x\n", so, so->so_usecount); - if (so->so_pcb == NULL) { - panic("unp_unlock: so=%p NO PCB usecount=%x\n", so, so->so_usecount); - } else { - mutex_held = &((struct unpcb *)so->so_pcb)->unp_mtx; - } - LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); - so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; - - if (so->so_usecount == 0 && (so->so_flags & SOF_PCBCLEARING)) { + if (lr == 0) { + lr_saved = (void *) __builtin_return_address(0); + } else { + lr_saved = lr; + } + + if (refcount) { + so->so_usecount--; + } + + if (so->so_usecount < 0) { + panic("unp_unlock: so=%p usecount=%x\n", so, so->so_usecount); + } + if (so->so_pcb == NULL) { + panic("unp_unlock: so=%p NO PCB usecount=%x\n", so, so->so_usecount); + } else { + mutex_held = &((struct unpcb *)so->so_pcb)->unp_mtx; + } + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); + so->unlock_lr[so->next_unlock_lr] = lr_saved; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; + + if (so->so_usecount == 0 && (so->so_flags & SOF_PCBCLEARING)) { sofreelastref(so, 1); - if (unp->unp_addr) + if (unp->unp_addr) { FREE(unp->unp_addr, M_SONAME); + } lck_mtx_unlock(mutex_held); @@ -2495,21 +2580,22 @@ unp_unlock(struct socket *so, int refcount, void * lr) lck_mtx_unlock(mutex_held); } - return (0); + return 0; } lck_mtx_t * unp_getlock(struct socket *so, __unused int flags) { - struct unpcb *unp = (struct unpcb *)so->so_pcb; + struct unpcb *unp = (struct unpcb *)so->so_pcb; - if (so->so_pcb) { - if (so->so_usecount < 0) - panic("unp_getlock: so=%p usecount=%x\n", so, so->so_usecount); - return(&unp->unp_mtx); - } else { - panic("unp_getlock: so=%p NULL so_pcb\n", so); - return (so->so_proto->pr_domain->dom_mtx); - } + if (so->so_pcb) { + if (so->so_usecount < 0) { + panic("unp_getlock: so=%p usecount=%x\n", so, so->so_usecount); + } + return &unp->unp_mtx; + } else { + panic("unp_getlock: so=%p NULL so_pcb\n", so); + return so->so_proto->pr_domain->dom_mtx; + } } diff --git a/bsd/libkern/bcd.c b/bsd/libkern/bcd.c index a8cbec8ee..1ef895eb9 100644 --- a/bsd/libkern/bcd.c +++ b/bsd/libkern/bcd.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,7 +33,7 @@ #include "libkern.h" u_char const bcd2bin_data[] = { - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 0, 0, 0, 0, 0, 0, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 0, 0, 0, 0, 0, 0, diff --git a/bsd/libkern/copyio.h b/bsd/libkern/copyio.h index 1bec805e4..8162ded60 100644 --- a/bsd/libkern/copyio.h +++ b/bsd/libkern/copyio.h @@ -32,8 +32,8 @@ __BEGIN_DECLS -int copyin(const user_addr_t uaddr, void *kaddr, size_t len); -int copyout(const void *kaddr, user_addr_t udaddr, size_t len); +int copyin(const user_addr_t uaddr, void *kaddr, size_t len); +int copyout(const void *kaddr, user_addr_t udaddr, size_t len); #if defined (_FORTIFY_SOURCE) && _FORTIFY_SOURCE == 0 /* FORTIFY_SOURCE disabled */ @@ -50,7 +50,7 @@ __copyin_chk(const user_addr_t uaddr, void *kaddr, size_t len, size_t chk_size) __attribute__((always_inline)) static inline int __copyout_chk(const void *kaddr, user_addr_t uaddr, size_t len, size_t chk_size) { - if (chk_size < len) { + if (chk_size < len) { panic("__copyout_chk object size check failed: uaddr %p, kaddr %p, (%zu < %zu)", (void*)uaddr, kaddr, len, chk_size); } return copyout(kaddr, uaddr, len); diff --git a/bsd/libkern/crc16.c b/bsd/libkern/crc16.c index 9424dee87..416767db5 100644 --- a/bsd/libkern/crc16.c +++ b/bsd/libkern/crc16.c @@ -71,10 +71,11 @@ crc16(uint16_t crc, const void *buf, size_t size) p = buf; - while (size--) + while (size--) { crc = crc16_tab[(crc ^ (*p++)) & 0xFF] ^ (crc >> 8); + } - return crc; + return crc; } #if KASAN diff --git a/bsd/libkern/crc32.c b/bsd/libkern/crc32.c index d8f5e345d..a503508b3 100644 --- a/bsd/libkern/crc32.c +++ b/bsd/libkern/crc32.c @@ -45,16 +45,16 @@ static uint32_t crc32_tab[] = { 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, - 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, - 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, - 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, - 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, + 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, @@ -97,8 +97,9 @@ crc32(uint32_t crc, const void *buf, size_t size) p = buf; crc = crc ^ ~0U; - while (size--) + while (size--) { crc = crc32_tab[(crc ^ *p++) & 0xFF] ^ (crc >> 8); + } return crc ^ ~0U; } diff --git a/bsd/libkern/libkern.h b/bsd/libkern/libkern.h index 73545298d..9a4ecb489 100644 --- a/bsd/libkern/libkern.h +++ b/bsd/libkern/libkern.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -71,7 +71,7 @@ #include #include -#include /* for platform-specific va_list */ +#include /* for platform-specific va_list */ #include #include #include @@ -86,88 +86,88 @@ #ifdef __APPLE_API_OBSOLETE /* BCD conversions. */ -extern u_char const bcd2bin_data[]; -extern u_char const bin2bcd_data[]; +extern u_char const bcd2bin_data[]; +extern u_char const bin2bcd_data[]; -#define bcd2bin(bcd) (bcd2bin_data[bcd]) -#define bin2bcd(bin) (bin2bcd_data[bin]) +#define bcd2bin(bcd) (bcd2bin_data[bcd]) +#define bin2bcd(bin) (bin2bcd_data[bin]) #endif /* __APPLE_API_OBSOLETE */ #ifdef __APPLE_API_PRIVATE -extern char const hex2ascii_data[]; +extern char const hex2ascii_data[]; -#define hex2ascii(hex) (hex2ascii_data[hex]) +#define hex2ascii(hex) (hex2ascii_data[hex]) #endif /* __APPLE_API_PRIVATE */ __BEGIN_DECLS static inline int imax(int a, int b) { - return (a > b ? a : b); + return a > b ? a : b; } static inline int imin(int a, int b) { - return (a < b ? a : b); + return a < b ? a : b; } static inline long lmax(long a, long b) { - return (a > b ? a : b); + return a > b ? a : b; } static inline long lmin(long a, long b) { - return (a < b ? a : b); + return a < b ? a : b; } static inline u_int max(u_int a, u_int b) { - return (a > b ? a : b); + return a > b ? a : b; } static inline u_int min(u_int a, u_int b) { - return (a < b ? a : b); + return a < b ? a : b; } static inline u_int32_t ulmax(u_int32_t a, u_int32_t b) { - return (a > b ? a : b); + return a > b ? a : b; } static inline u_int32_t ulmin(u_int32_t a, u_int32_t b) { - return (a < b ? a : b); + return a < b ? a : b; } /* Prototypes for non-quad routines. */ -extern int ffs(int); -extern int ffsll(unsigned long long); -extern int fls(int); -extern int flsll(unsigned long long); -extern u_int32_t random(void); -extern int scanc(u_int, u_char *, const u_char *, int); -extern int skpc(int, int, char *); -extern long strtol(const char*, char **, int); -extern u_long strtoul(const char *, char **, int); -extern quad_t strtoq(const char *, char **, int); +extern int ffs(int); +extern int ffsll(unsigned long long); +extern int fls(int); +extern int flsll(unsigned long long); +extern u_int32_t random(void); +extern int scanc(u_int, u_char *, const u_char *, int); +extern int skpc(int, int, char *); +extern long strtol(const char*, char **, int); +extern u_long strtoul(const char *, char **, int); +extern quad_t strtoq(const char *, char **, int); extern u_quad_t strtouq(const char *, char **, int); -extern char *strsep(char **, const char *); -extern void *memchr(const void *, int, size_t); -extern void url_decode(char *str); +extern char *strsep(char **, const char *); +extern void *memchr(const void *, int, size_t); +extern void url_decode(char *str); -int snprintf(char *, size_t, const char *, ...) __printflike(3,4); +int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); /* sprintf() is being deprecated. Please use snprintf() instead. */ -int sprintf(char *bufp, const char *, ...) __deprecated __printflike(2,3); -int sscanf(const char *, char const *, ...) __scanflike(2,3); -int printf(const char *, ...) __printflike(1,2); +int sprintf(char *bufp, const char *, ...) __deprecated __printflike(2, 3); +int sscanf(const char *, char const *, ...) __scanflike(2, 3); +int printf(const char *, ...) __printflike(1, 2); #if KERNEL_PRIVATE -int _consume_printf_args(int, ...); +int _consume_printf_args(int, ...); #endif #if CONFIG_NO_PRINTF_STRINGS @@ -178,38 +178,41 @@ int _consume_printf_args(int, ...); #endif #endif -uint16_t crc16(uint16_t crc, const void *bufp, size_t len); -uint32_t crc32(uint32_t crc, const void *bufp, size_t len); +uint16_t crc16(uint16_t crc, const void *bufp, size_t len); +uint32_t crc32(uint32_t crc, const void *bufp, size_t len); #if XNU_KERNEL_PRIVATE #if KASAN uint16_t __nosan_crc16(uint16_t crc, const void *bufp, size_t len); #else static inline uint16_t -__nosan_crc16(uint16_t crc, const void *bufp, size_t len) { return crc16(crc, bufp, len); } +__nosan_crc16(uint16_t crc, const void *bufp, size_t len) +{ + return crc16(crc, bufp, len); +} #endif #endif -int copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done); -int copyinstr(const user_addr_t uaddr, void *kaddr, size_t len, size_t *done); -int copyoutstr(const void *kaddr, user_addr_t udaddr, size_t len, size_t *done); +int copystr(const void *kfaddr, void *kdaddr, size_t len, size_t *done); +int copyinstr(const user_addr_t uaddr, void *kaddr, size_t len, size_t *done); +int copyoutstr(const void *kaddr, user_addr_t udaddr, size_t len, size_t *done); #if XNU_KERNEL_PRIVATE extern int copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes); #endif int vsscanf(const char *, char const *, va_list); -extern int vprintf(const char *, va_list) __printflike(1,0); -extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3,0); +extern int vprintf(const char *, va_list) __printflike(1, 0); +extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0); #if XNU_KERNEL_PRIVATE -extern int vprintf_log_locked(const char *, va_list) __printflike(1,0); -extern void osobject_retain(void * object); -extern void osobject_release(void * object); +extern int vprintf_log_locked(const char *, va_list) __printflike(1, 0); +extern void osobject_retain(void * object); +extern void osobject_release(void * object); #endif /* vsprintf() is being deprecated. Please use vsnprintf() instead. */ -extern int vsprintf(char *bufp, const char *, va_list) __deprecated __printflike(2,0); +extern int vsprintf(char *bufp, const char *, va_list) __deprecated __printflike(2, 0); #ifdef KERNEL_PRIVATE #ifdef __arm__ diff --git a/bsd/libkern/memchr.c b/bsd/libkern/memchr.c index 28b335d46..a3eb09e49 100644 --- a/bsd/libkern/memchr.c +++ b/bsd/libkern/memchr.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,9 +34,10 @@ memchr(const void *bigptr, int ch, size_t length) { const char *big = (const char *)bigptr; size_t n; - for (n = 0; n < length; n++) - if (big[n] == ch) + for (n = 0; n < length; n++) { + if (big[n] == ch) { return __DECONST(void *, &big[n]); + } + } return NULL; } - diff --git a/bsd/libkern/random.c b/bsd/libkern/random.c index 070080f90..8b5521a35 100644 --- a/bsd/libkern/random.c +++ b/bsd/libkern/random.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -75,6 +75,5 @@ random(void) { /* Zero all but bottom 31 bits, also works for 64-bit longs */ u_int32_t mask = (u_int32_t)-1 >> ((sizeof(u_int32_t) * 8) - 31); - return (mask & RandomULong()); + return mask & RandomULong(); } - diff --git a/bsd/libkern/scanc.c b/bsd/libkern/scanc.c index 4d2ec8f8b..d8d1c51cd 100644 --- a/bsd/libkern/scanc.c +++ b/bsd/libkern/scanc.c @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -69,6 +69,8 @@ scanc(u_int size, u_char *cp, const u_char table[], int mask0) u_char mask; mask = mask0; - for (end = &cp[size]; cp != end && (table[*cp] & mask) == 0; ++cp); - return (end - cp); + for (end = &cp[size]; cp != end && (table[*cp] & mask) == 0; ++cp) { + ; + } + return end - cp; } diff --git a/bsd/libkern/skpc.c b/bsd/libkern/skpc.c index cfe1de92d..16cba0997 100644 --- a/bsd/libkern/skpc.c +++ b/bsd/libkern/skpc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -69,6 +69,8 @@ skpc(int mask0, int size, char *cp0) mask = mask0; cp = (u_char *)cp0; - for (end = &cp[size]; cp < end && *cp == mask; ++cp); - return (end - cp); + for (end = &cp[size]; cp < end && *cp == mask; ++cp) { + ; + } + return end - cp; } diff --git a/bsd/libkern/strsep.c b/bsd/libkern/strsep.c index 6e0942254..3c4c13c7e 100644 --- a/bsd/libkern/strsep.c +++ b/bsd/libkern/strsep.c @@ -83,19 +83,21 @@ strsep(char **stringp, const char *delim) int c, sc; char *tok; - if ((s = *stringp) == NULL) - return (NULL); + if ((s = *stringp) == NULL) { + return NULL; + } for (tok = s;;) { c = *s++; spanp = delim; do { if ((sc = *spanp++) == c) { - if (c == 0) + if (c == 0) { s = NULL; - else + } else { s[-1] = 0; + } *stringp = s; - return (tok); + return tok; } } while (sc != 0); } diff --git a/bsd/libkern/url_encode.c b/bsd/libkern/url_encode.c index 0e3cf1562..353dbbdce 100644 --- a/bsd/libkern/url_encode.c +++ b/bsd/libkern/url_encode.c @@ -73,7 +73,7 @@ url_decode(char *str) /* overwrite the '%' with the new char, and bump the rest of the * string down a few characters */ *esc++ = c; - str = memmove(esc, str, strlen(str)+1); + str = memmove(esc, str, strlen(str) + 1); } } else { str++; diff --git a/bsd/machine/_limits.h b/bsd/machine/_limits.h index 736a5886b..f037c1019 100644 --- a/bsd/machine/_limits.h +++ b/bsd/machine/_limits.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE__LIMITS_H_ diff --git a/bsd/machine/_mcontext.h b/bsd/machine/_mcontext.h index e22043742..dc98841eb 100644 --- a/bsd/machine/_mcontext.h +++ b/bsd/machine/_mcontext.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if defined (__i386__) || defined (__x86_64__) diff --git a/bsd/machine/_param.h b/bsd/machine/_param.h index 96b0c2fef..1a6ffc066 100644 --- a/bsd/machine/_param.h +++ b/bsd/machine/_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if defined (__i386__) || defined (__x86_64__) diff --git a/bsd/machine/_types.h b/bsd/machine/_types.h index be86a2368..47dbe8c18 100644 --- a/bsd/machine/_types.h +++ b/bsd/machine/_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE__TYPES_H_ diff --git a/bsd/machine/byte_order.h b/bsd/machine/byte_order.h index 4008142fe..394320c35 100644 --- a/bsd/machine/byte_order.h +++ b/bsd/machine/byte_order.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1995 NeXT Computer, Inc. */ -#ifndef _BSD_MACHINE_BYTE_ORDER_H_ +#ifndef _BSD_MACHINE_BYTE_ORDER_H_ #define _BSD_MACHINE_BYTE_ORDER_H_ - + #include -#endif /* _BSD_MACHINE_BYTE_ORDER_H_ */ +#endif /* _BSD_MACHINE_BYTE_ORDER_H_ */ diff --git a/bsd/machine/cons.h b/bsd/machine/cons.h index 43d364ef0..58b85fa75 100644 --- a/bsd/machine/cons.h +++ b/bsd/machine/cons.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,43 +22,43 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Copyright (c) 1987 NeXT, Inc. */ - + struct consdev { - char *cn_name; /* name of device in dev_name_list */ - int (*cn_probe)(void); /* probe and fill in consdev info */ - int (*cn_init)(void); /* turn on as console */ - int (*cn_getc)(void); /* kernel getchar interface */ - int (*cn_putc)(void); /* kernel putchar interface */ - struct tty *cn_tp; /* tty structure for console device */ - dev_t cn_dev; /* major/minor of device */ - short cn_pri; /* pecking order; the higher the better */ + char *cn_name; /* name of device in dev_name_list */ + int (*cn_probe)(void); /* probe and fill in consdev info */ + int (*cn_init)(void); /* turn on as console */ + int (*cn_getc)(void); /* kernel getchar interface */ + int (*cn_putc)(void); /* kernel putchar interface */ + struct tty *cn_tp; /* tty structure for console device */ + dev_t cn_dev; /* major/minor of device */ + short cn_pri; /* pecking order; the higher the better */ }; /* values for cn_pri - reflect our policy for console selection */ -#define CN_DEAD 0 /* device doesn't exist */ -#define CN_NORMAL 1 /* device exists but is nothing special */ -#define CN_INTERNAL 2 /* "internal" bit-mapped display */ -#define CN_REMOTE 3 /* serial interface with remote bit set */ +#define CN_DEAD 0 /* device doesn't exist */ +#define CN_NORMAL 1 /* device exists but is nothing special */ +#define CN_INTERNAL 2 /* "internal" bit-mapped display */ +#define CN_REMOTE 3 /* serial interface with remote bit set */ /* XXX */ -#define CONSMAJOR 0 +#define CONSMAJOR 0 #ifdef KERNEL #include #include -extern struct consdev constab[]; -extern struct consdev *cn_tab; -extern struct tty *cn_tty; +extern struct consdev constab[]; +extern struct consdev *cn_tab; +extern struct tty *cn_tty; -extern struct tty *constty; /* current console device */ +extern struct tty *constty; /* current console device */ int consopen(dev_t, int, int, struct proc *); int consclose(dev_t, int, int, struct proc *); @@ -80,4 +80,3 @@ int kmioctl(dev_t, u_long, caddr_t, int, struct proc *); int kmputc(dev_t, char); #endif - diff --git a/bsd/machine/dis_tables.h b/bsd/machine/dis_tables.h index 7ac37dd7e..012cecdcc 100644 --- a/bsd/machine/dis_tables.h +++ b/bsd/machine/dis_tables.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_DIS_TABLES_H_ diff --git a/bsd/machine/disklabel.h b/bsd/machine/disklabel.h index a29df81d3..c3c3d665b 100644 --- a/bsd/machine/disklabel.h +++ b/bsd/machine/disklabel.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_CPU_H_ diff --git a/bsd/machine/endian.h b/bsd/machine/endian.h index 9cefbf79a..682a15155 100644 --- a/bsd/machine/endian.h +++ b/bsd/machine/endian.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/machine/exec.h b/bsd/machine/exec.h index d4bc6a86a..f93f56bcf 100644 --- a/bsd/machine/exec.h +++ b/bsd/machine/exec.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,11 +34,11 @@ #include struct exec_info { - char path[MAXPATHLEN]; - int ac; - int ec; - char **av; - char **ev; + char path[MAXPATHLEN]; + int ac; + int ec; + char **av; + char **ev; }; int grade_binary(cpu_type_t, cpu_subtype_t); diff --git a/bsd/machine/fasttrap_isa.h b/bsd/machine/fasttrap_isa.h index 7f31b4eec..ae834bcb6 100644 --- a/bsd/machine/fasttrap_isa.h +++ b/bsd/machine/fasttrap_isa.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_FASTTRAP_ISA_H_ diff --git a/bsd/machine/limits.h b/bsd/machine/limits.h index 39b348a7b..7ae09a12e 100644 --- a/bsd/machine/limits.h +++ b/bsd/machine/limits.h @@ -1,7 +1,7 @@ /* This is the `system' limits.h, independent of any particular - compiler. GCC provides its own limits.h which can be found in - /usr/lib/gcc, although it is not very informative. - This file is public domain. */ + * compiler. GCC provides its own limits.h which can be found in + * /usr/lib/gcc, although it is not very informative. + * This file is public domain. */ #if defined (__i386__) || defined(__x86_64__) #include #elif defined (__arm__) || defined (__arm64__) diff --git a/bsd/machine/param.h b/bsd/machine/param.h index 74b280059..6f9f03e70 100644 --- a/bsd/machine/param.h +++ b/bsd/machine/param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/machine/profile.h b/bsd/machine/profile.h index 14f2977ee..b538fdf31 100644 --- a/bsd/machine/profile.h +++ b/bsd/machine/profile.h @@ -2,7 +2,7 @@ * Copyright (c) 1997-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/machine/psl.h b/bsd/machine/psl.h index 6c260a01b..26177b30f 100644 --- a/bsd/machine/psl.h +++ b/bsd/machine/psl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_PSL_H_ diff --git a/bsd/machine/ptrace.h b/bsd/machine/ptrace.h index cb5ecd990..3da4abf9f 100644 --- a/bsd/machine/ptrace.h +++ b/bsd/machine/ptrace.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/machine/reboot.h b/bsd/machine/reboot.h index 0a00f2ec2..ae3b8bfb0 100644 --- a/bsd/machine/reboot.h +++ b/bsd/machine/reboot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_REBOOT_H_ diff --git a/bsd/machine/reg.h b/bsd/machine/reg.h index 8f4128740..1188bfd94 100644 --- a/bsd/machine/reg.h +++ b/bsd/machine/reg.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_REG_H_ diff --git a/bsd/machine/signal.h b/bsd/machine/signal.h index 46b23f231..d8a6a2433 100644 --- a/bsd/machine/signal.h +++ b/bsd/machine/signal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_SIGNAL_H_ diff --git a/bsd/machine/smp.h b/bsd/machine/smp.h index f97a38fe8..54bb6e01e 100644 --- a/bsd/machine/smp.h +++ b/bsd/machine/smp.h @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_SMP_H_ diff --git a/bsd/machine/types.h b/bsd/machine/types.h index c14795279..e2241c06c 100644 --- a/bsd/machine/types.h +++ b/bsd/machine/types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/bsd/machine/vmparam.h b/bsd/machine/vmparam.h index 3817b5a67..2acc4208c 100644 --- a/bsd/machine/vmparam.h +++ b/bsd/machine/vmparam.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _BSD_MACHINE_VMPARAM_H_ diff --git a/bsd/man/man2/chflags.2 b/bsd/man/man2/chflags.2 index 973a27817..3463d7180 100644 --- a/bsd/man/man2/chflags.2 +++ b/bsd/man/man2/chflags.2 @@ -93,11 +93,6 @@ The and .Dq SF_APPEND flags may only be set or unset by the super-user. -They may be set at any time, but normally may only be unset when -the system is in single-user mode. -(See -.Xr launchd 8 -for details.) .Sh RETURN VALUES Upon successful completion, a value of 0 is returned. Otherwise, -1 is returned and the global variable diff --git a/bsd/man/man2/fcntl.2 b/bsd/man/man2/fcntl.2 index 0ea25179c..abafe4017 100644 --- a/bsd/man/man2/fcntl.2 +++ b/bsd/man/man2/fcntl.2 @@ -56,7 +56,7 @@ .\" .\" @(#)fcntl.2 8.2 (Berkeley) 1/12/94 .\" -.Dd August 24, 2017 +.Dd August 8, 2018 .Dt FCNTL 2 .Os BSD 4.2 .Sh NAME @@ -494,6 +494,9 @@ The modes are as follows: .Bl -tag -width F_PEOFPOSMODEX -offset indent .It Dv F_PEOFPOSMODE Allocate from the physical end of file. +In this case, +.Fa fst_length +indicates the number of newly allocated bytes desired. .It Dv F_VOLPOSMODE Allocate from the volume offset. .El diff --git a/bsd/miscfs/deadfs/dead_vnops.c b/bsd/miscfs/deadfs/dead_vnops.c index 4af10dd02..18eade2fa 100644 --- a/bsd/miscfs/deadfs/dead_vnops.c +++ b/bsd/miscfs/deadfs/dead_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -70,25 +70,25 @@ #include #include -int chkvnlock(vnode_t vp); +int chkvnlock(vnode_t vp); /* * Prototypes for dead operations on vnodes. */ -int dead_badop(void *); -int dead_ebadf(void *); -int dead_lookup(struct vnop_lookup_args *); +int dead_badop(void *); +int dead_ebadf(void *); +int dead_lookup(struct vnop_lookup_args *); #define dead_create (int (*)(struct vnop_create_args *))dead_badop #define dead_mknod (int (*)(struct vnop_mknod_args *))dead_badop -int dead_open(struct vnop_open_args *); +int dead_open(struct vnop_open_args *); #define dead_close (int (*)(struct vnop_close_args *))nullop #define dead_access (int (*)(struct vnop_access_args *))dead_ebadf #define dead_getattr (int (*)(struct vnop_getattr_args *))dead_ebadf #define dead_setattr (int (*)(struct vnop_setattr_args *))dead_ebadf -int dead_read(struct vnop_read_args *); -int dead_write(struct vnop_write_args *); -int dead_ioctl(struct vnop_ioctl_args *); -int dead_select(struct vnop_select_args *); +int dead_read(struct vnop_read_args *); +int dead_write(struct vnop_write_args *); +int dead_ioctl(struct vnop_ioctl_args *); +int dead_select(struct vnop_select_args *); #define dead_mmap (int (*)(struct vnop_mmap_args *))dead_badop #define dead_fsync (int (*)(struct vnop_fsync_args *))nullop #define dead_remove (int (*)(struct vnop_remove_args ))dead_badop @@ -101,58 +101,58 @@ int dead_select(struct vnop_select_args *); #define dead_readlink (int (*)(struct vnop_readlink_args *))dead_ebadf #define dead_inactive (int (*)(struct vnop_inactive_args *))nullop #define dead_reclaim (int (*)(struct vnop_reclaim_args *))nullop -int dead_strategy(struct vnop_strategy_args *); +int dead_strategy(struct vnop_strategy_args *); #define dead_pathconf (int (*)(struct vnop_pathconf_args *))dead_ebadf #define dead_advlock (int (*)(struct vnop_advlock_args *))dead_ebadf #define dead_bwrite (int (*)(struct vnop_bwrite_args *))nullop -int dead_pagein(struct vnop_pagein_args *); -int dead_pageout(struct vnop_pageout_args *); +int dead_pagein(struct vnop_pagein_args *); +int dead_pageout(struct vnop_pageout_args *); int dead_blktooff(struct vnop_blktooff_args *); int dead_offtoblk(struct vnop_offtoblk_args *); int dead_blockmap(struct vnop_blockmap_args *); #define VOPFUNC int (*)(void *) -int (**dead_vnodeop_p)(void *); +int(**dead_vnodeop_p)(void *); struct vnodeopv_entry_desc dead_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)dead_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)dead_create }, /* create */ - { &vnop_open_desc, (VOPFUNC)dead_open }, /* open */ - { &vnop_mknod_desc, (VOPFUNC)dead_mknod }, /* mknod */ - { &vnop_close_desc, (VOPFUNC)dead_close }, /* close */ - { &vnop_access_desc, (VOPFUNC)dead_access }, /* access */ - { &vnop_getattr_desc, (VOPFUNC)dead_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)dead_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)dead_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)dead_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)dead_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)dead_select }, /* select */ - { &vnop_mmap_desc, (VOPFUNC)dead_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)dead_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)dead_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)dead_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)dead_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)dead_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)dead_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)dead_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)dead_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)dead_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)dead_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)dead_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)dead_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)dead_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)dead_advlock }, /* advlock */ - { &vnop_bwrite_desc, (VOPFUNC)dead_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)dead_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (VOPFUNC)dead_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)dead_blockmap }, /* blockmap */ + { &vnop_lookup_desc, (VOPFUNC)dead_lookup }, /* lookup */ + { &vnop_create_desc, (VOPFUNC)dead_create }, /* create */ + { &vnop_open_desc, (VOPFUNC)dead_open }, /* open */ + { &vnop_mknod_desc, (VOPFUNC)dead_mknod }, /* mknod */ + { &vnop_close_desc, (VOPFUNC)dead_close }, /* close */ + { &vnop_access_desc, (VOPFUNC)dead_access }, /* access */ + { &vnop_getattr_desc, (VOPFUNC)dead_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)dead_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)dead_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)dead_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)dead_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)dead_select }, /* select */ + { &vnop_mmap_desc, (VOPFUNC)dead_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)dead_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)dead_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)dead_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)dead_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)dead_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)dead_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)dead_symlink }, /* symlink */ + { &vnop_readdir_desc, (VOPFUNC)dead_readdir }, /* readdir */ + { &vnop_readlink_desc, (VOPFUNC)dead_readlink }, /* readlink */ + { &vnop_inactive_desc, (VOPFUNC)dead_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)dead_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)dead_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)dead_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)dead_advlock }, /* advlock */ + { &vnop_bwrite_desc, (VOPFUNC)dead_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)dead_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (VOPFUNC)dead_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (VOPFUNC)dead_blockmap }, /* blockmap */ { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } }; struct vnodeopv_desc dead_vnodeop_opv_desc = - { &dead_vnodeop_p, dead_vnodeop_entries }; +{ &dead_vnodeop_p, dead_vnodeop_entries }; /* * Trivial lookup routine that always fails. @@ -161,9 +161,8 @@ struct vnodeopv_desc dead_vnodeop_opv_desc = int dead_lookup(struct vnop_lookup_args *ap) { - *ap->a_vpp = NULL; - return (ENOTDIR); + return ENOTDIR; } /* @@ -173,7 +172,7 @@ dead_lookup(struct vnop_lookup_args *ap) int dead_open(__unused struct vnop_open_args *ap) { - return (ENXIO); + return ENXIO; } /* @@ -183,15 +182,16 @@ dead_open(__unused struct vnop_open_args *ap) int dead_read(struct vnop_read_args *ap) { - - if (chkvnlock(ap->a_vp)) + if (chkvnlock(ap->a_vp)) { panic("dead_read: lock"); + } /* * Return EOF for character devices, EIO for others */ - if (ap->a_vp->v_type != VCHR) - return (EIO); - return (0); + if (ap->a_vp->v_type != VCHR) { + return EIO; + } + return 0; } /* @@ -201,10 +201,10 @@ dead_read(struct vnop_read_args *ap) int dead_write(struct vnop_write_args *ap) { - - if (chkvnlock(ap->a_vp)) + if (chkvnlock(ap->a_vp)) { panic("dead_write: lock"); - return (EIO); + } + return EIO; } /* @@ -214,21 +214,20 @@ dead_write(struct vnop_write_args *ap) int dead_ioctl(struct vnop_ioctl_args *ap) { - - if (!chkvnlock(ap->a_vp)) - return (EBADF); - return (VCALL(ap->a_vp, VOFFSET(vnop_ioctl), ap)); + if (!chkvnlock(ap->a_vp)) { + return EBADF; + } + return VCALL(ap->a_vp, VOFFSET(vnop_ioctl), ap); } /* ARGSUSED */ int dead_select(__unused struct vnop_select_args *ap) { - /* * Let the user find out that the descriptor is gone. */ - return (1); + return 1; } /* @@ -237,13 +236,12 @@ dead_select(__unused struct vnop_select_args *ap) int dead_strategy(struct vnop_strategy_args *ap) { - if (buf_vnode(ap->a_bp) == NULL || !chkvnlock(buf_vnode(ap->a_bp))) { - buf_seterror(ap->a_bp, EIO); + buf_seterror(ap->a_bp, EIO); buf_biodone(ap->a_bp); - return (EIO); + return EIO; } - return (VNOP_STRATEGY(ap->a_bp)); + return VNOP_STRATEGY(ap->a_bp); } /* @@ -252,11 +250,11 @@ dead_strategy(struct vnop_strategy_args *ap) int dead_blockmap(struct vnop_blockmap_args *ap) { - - if (!chkvnlock(ap->a_vp)) - return (EIO); - return (VNOP_BLOCKMAP(ap->a_vp, ap->a_foffset, ap->a_size, ap->a_bpn, - ap->a_run, ap->a_poff, ap->a_flags, ap->a_context)); + if (!chkvnlock(ap->a_vp)) { + return EIO; + } + return VNOP_BLOCKMAP(ap->a_vp, ap->a_foffset, ap->a_size, ap->a_bpn, + ap->a_run, ap->a_poff, ap->a_flags, ap->a_context); } /* @@ -266,8 +264,7 @@ dead_blockmap(struct vnop_blockmap_args *ap) int dead_ebadf(__unused void *dummy) { - - return (EBADF); + return EBADF; } /* @@ -277,10 +274,9 @@ dead_ebadf(__unused void *dummy) int dead_badop(__unused void *dummy) { - panic("dead_badop called"); /* NOTREACHED */ - return (-1); + return -1; } /* @@ -290,7 +286,7 @@ dead_badop(__unused void *dummy) int chkvnlock(__unused vnode_t vp) { - return (0); + return 0; } @@ -298,19 +294,21 @@ chkvnlock(__unused vnode_t vp) int dead_blktooff(struct vnop_blktooff_args *ap) { - if (!chkvnlock(ap->a_vp)) - return (EIO); + if (!chkvnlock(ap->a_vp)) { + return EIO; + } - *ap->a_offset = (off_t)-1; /* failure */ - return (0); + *ap->a_offset = (off_t)-1; /* failure */ + return 0; } /* Blktooff */ int dead_offtoblk(struct vnop_offtoblk_args *ap) { - if (!chkvnlock(ap->a_vp)) - return (EIO); + if (!chkvnlock(ap->a_vp)) { + return EIO; + } - *ap->a_lblkno = (daddr64_t)-1; /* failure */ - return (0); + *ap->a_lblkno = (daddr64_t)-1; /* failure */ + return 0; } diff --git a/bsd/miscfs/devfs/devfs.h b/bsd/miscfs/devfs/devfs.h index c2cc577c5..966926d4f 100644 --- a/bsd/miscfs/devfs/devfs.h +++ b/bsd/miscfs/devfs/devfs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright 1997,1998 Julian Elischer. All rights reserved. * julian@freebsd.org - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: @@ -37,7 +37,7 @@ * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -49,24 +49,24 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * miscfs/devfs/devfs.h */ #ifndef _MISCFS_DEVFS_DEVFS_H_ -#define _MISCFS_DEVFS_DEVFS_H_ +#define _MISCFS_DEVFS_DEVFS_H_ #include #include -#define DEVFS_CHAR 0 -#define DEVFS_BLOCK 1 +#define DEVFS_CHAR 0 +#define DEVFS_BLOCK 1 /* * Argument to clone callback after dev */ -#define DEVFS_CLONE_ALLOC 1 /* Allocate minor number slot */ -#define DEVFS_CLONE_FREE 0 /* Free minor number slot */ +#define DEVFS_CLONE_ALLOC 1 /* Allocate minor number slot */ +#define DEVFS_CLONE_FREE 0 /* Free minor number slot */ __BEGIN_DECLS @@ -80,7 +80,7 @@ __BEGIN_DECLS * the supplied dev.. * * Parameters: - * dev - the dev_t value to associate + * dev - the dev_t value to associate * chrblk - block or character device (DEVFS_CHAR or DEVFS_BLOCK) * uid, gid - ownership * perms - permissions @@ -89,9 +89,9 @@ __BEGIN_DECLS * Returns: * A handle to a device node if successful, NULL otherwise. */ -void * devfs_make_node_clone(dev_t dev, int chrblk, uid_t uid, gid_t gid, - int perms, int (*clone)(dev_t dev, int action), - const char *fmt, ...); +void * devfs_make_node_clone(dev_t dev, int chrblk, uid_t uid, gid_t gid, + int perms, int (*clone)(dev_t dev, int action), + const char *fmt, ...); /* * Function: devfs_make_node @@ -100,7 +100,7 @@ void * devfs_make_node_clone(dev_t dev, int chrblk, uid_t uid, gid_t gid, * Create a device node with the given pathname in the devfs namespace. * * Parameters: - * dev - the dev_t value to associate + * dev - the dev_t value to associate * chrblk - block or character device (DEVFS_CHAR or DEVFS_BLOCK) * uid, gid - ownership * perms - permissions @@ -108,8 +108,8 @@ void * devfs_make_node_clone(dev_t dev, int chrblk, uid_t uid, gid_t gid, * Returns: * A handle to a device node if successful, NULL otherwise. */ -void * devfs_make_node(dev_t dev, int chrblk, uid_t uid, gid_t gid, - int perms, const char *fmt, ...); +void * devfs_make_node(dev_t dev, int chrblk, uid_t uid, gid_t gid, + int perms, const char *fmt, ...); #ifdef BSD_KERNEL_PRIVATE /* @@ -121,7 +121,7 @@ void * devfs_make_node(dev_t dev, int chrblk, uid_t uid, gid_t gid, * Returns: * 0 if successful, -1 if failed */ -int devfs_make_link(void * handle, char *fmt, ...); +int devfs_make_link(void * handle, char *fmt, ...); #endif /* BSD_KERNEL_PRIVATE */ /* @@ -131,24 +131,24 @@ int devfs_make_link(void * handle, char *fmt, ...); * Remove the device node returned by devfs_make_node() along with * any links created with devfs_make_link(). */ -void devfs_remove(void * handle); +void devfs_remove(void * handle); __END_DECLS #ifdef __APPLE_API_PRIVATE /* XXX */ -#define UID_ROOT 0 -#define UID_BIN 3 -#define UID_UUCP 66 +#define UID_ROOT 0 +#define UID_BIN 3 +#define UID_UUCP 66 /* XXX */ -#define GID_WHEEL 0 -#define GID_KMEM 2 -#define GID_TTY 4 -#define GID_OPERATOR 5 -#define GID_BIN 7 -#define GID_GAMES 13 -#define GID_DIALER 68 +#define GID_WHEEL 0 +#define GID_KMEM 2 +#define GID_TTY 4 +#define GID_OPERATOR 5 +#define GID_BIN 7 +#define GID_GAMES 13 +#define GID_DIALER 68 #define GID_WINDOWSERVER 88 #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/miscfs/devfs/devfs_fdesc_support.c b/bsd/miscfs/devfs/devfs_fdesc_support.c index 5d9355efc..28806babe 100644 --- a/bsd/miscfs/devfs/devfs_fdesc_support.c +++ b/bsd/miscfs/devfs/devfs_fdesc_support.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -74,7 +74,7 @@ #include #include #include -#include /* boottime */ +#include /* boottime */ #include #include #include @@ -95,20 +95,20 @@ #include #include -#define FDL_WANT 0x01 -#define FDL_LOCKED 0x02 +#define FDL_WANT 0x01 +#define FDL_LOCKED 0x02 static int fdcache_lock; -#if (FD_STDIN != FD_STDOUT-1) || (FD_STDOUT != FD_STDERR-1) -FD_STDIN, FD_STDOUT, FD_STDERR must be a sequence n, n+1, n+2 +#if (FD_STDIN != FD_STDOUT - 1) || (FD_STDOUT != FD_STDERR - 1) +FD_STDIN, FD_STDOUT, FD_STDERR must be a sequence n, n + 1, n + 2 #endif -#define NFDCACHE 3 +#define NFDCACHE 3 #define FD_NHASH(ix) \ (&fdhashtbl[(ix) & fdhash]) -LIST_HEAD(fdhashhead, fdescnode) *fdhashtbl; +LIST_HEAD(fdhashhead, fdescnode) * fdhashtbl; u_long fdhash; static int fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context); @@ -116,13 +116,13 @@ static int fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context); lck_mtx_t fdesc_mtx; lck_grp_t *fdesc_lckgrp; -static void +static void fdesc_lock(void) { lck_mtx_lock(&fdesc_mtx); } -static void +static void fdesc_unlock(void) { lck_mtx_unlock(&fdesc_mtx); @@ -138,7 +138,7 @@ devfs_fdesc_init() int error = 0; devnode_t *rootdir = dev_root->de_dnp; devdirent_t *direntp; - + /* XXX Make sure you have the right path... */ fdhashtbl = hashinit(NFDCACHE, M_CACHE, &fdhash); fdesc_lckgrp = lck_grp_alloc_init("fdesc", NULL); @@ -149,7 +149,7 @@ devfs_fdesc_init() devfs_fdesc_makelinks(); DEVFS_UNLOCK(); - return(error); + return error; } /* @@ -184,7 +184,7 @@ devfs_fdesc_makelinks() printf("Couldn't make stderr, err %d.\n", error); goto bad; } - + return 0; bad: @@ -216,7 +216,7 @@ fdesc_allocvp(fdntype ftype, int ix, struct mount *mp, struct vnode **vpp, enum loop: for (fd = fc->lh_first; fd != 0; fd = fd->fd_hash.le_next) { if (fd->fd_ix == ix && vnode_mount(fd->fd_vnode) == mp) { - vid = vnode_vid(fd->fd_vnode); + vid = vnode_vid(fd->fd_vnode); fdesc_unlock(); if (vnode_getwithvid(fd->fd_vnode, vid)) { @@ -227,7 +227,7 @@ loop: *vpp = fd->fd_vnode; (*vpp)->v_type = vtype; - return (error); + return error; } } @@ -262,7 +262,7 @@ loop: fdesc_lock(); goto out; } - + (*vpp)->v_tag = VT_FDESC; fd->fd_vnode = *vpp; fd->fd_type = ftype; @@ -270,7 +270,7 @@ loop: fd->fd_link = NULL; fd->fd_ix = ix; fd->fd_fd = fdno; - + fdesc_lock(); LIST_INSERT_HEAD(fc, fd, fd_hash); @@ -282,10 +282,10 @@ out: fdcache_lock &= ~FDL_WANT; wakeup((caddr_t) &fdcache_lock); } - + fdesc_unlock(); - return (error); + return error; } /* @@ -309,18 +309,19 @@ devfs_devfd_lookup(struct vnop_lookup_args *ap) if (cnp->cn_namelen == 1 && *pname == '.') { *vpp = dvp; - - if ( (error = vnode_get(dvp)) ) { - return(error); + + if ((error = vnode_get(dvp))) { + return error; } - return (0); + return 0; } fd = 0; while (*pname >= '0' && *pname <= '9') { fd = 10 * fd + *pname++ - '0'; - if (fd >= numfiles) + if (fd >= numfiles) { break; + } } if (*pname != '\0') { @@ -329,21 +330,22 @@ devfs_devfd_lookup(struct vnop_lookup_args *ap) } if (fd < 0 || fd >= numfiles || - *fdfile(p, fd) == NULL || - (*fdflags(p, fd) & UF_RESERVED)) { + *fdfile(p, fd) == NULL || + (*fdflags(p, fd) & UF_RESERVED)) { error = EBADF; goto bad; } - error = fdesc_allocvp(Fdesc, FD_DESC+fd, dvp->v_mount, &fvp, VNON, fd); - if (error) + error = fdesc_allocvp(Fdesc, FD_DESC + fd, dvp->v_mount, &fvp, VNON, fd); + if (error) { goto bad; + } *vpp = fvp; - return (0); + return 0; bad: *vpp = NULL; - return (error); + return error; } int @@ -354,8 +356,9 @@ fdesc_open(struct vnop_open_args *ap) uthread_t uu; int error = 0; - if (thr == NULL) - return (EINVAL); + if (thr == NULL) { + return EINVAL; + } uu = get_bsdthread_info(thr); @@ -363,21 +366,21 @@ fdesc_open(struct vnop_open_args *ap) case Fdesc: /* * XXX Kludge: set uu->uu_dupfd to contain the value of the - * the file descriptor being sought for duplication. The error + * the file descriptor being sought for duplication. The error * return ensures that the vnode for this device will be * released by vn_open. Open will detect this special error and * take the actions in dupfdopen. Other callers of vn_open or * vnop_open will simply report the error. */ - uu->uu_dupfd = VTOFDESC(vp)->fd_fd; /* XXX */ + uu->uu_dupfd = VTOFDESC(vp)->fd_fd; /* XXX */ error = ENODEV; break; - default: + default: panic("Invalid type for fdesc node!"); break; } - return (error); + return error; } static int @@ -388,18 +391,20 @@ fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context) struct stat stb; int error; - if ((error = fp_lookup(p, fd, &fp, 0))) - return (error); + if ((error = fp_lookup(p, fd, &fp, 0))) { + return error; + } switch (FILEGLOB_DTYPE(fp->f_fglob)) { case DTYPE_VNODE: - if((error = vnode_getwithref((struct vnode *) fp->f_fglob->fg_data)) != 0) { + if ((error = vnode_getwithref((struct vnode *) fp->f_fglob->fg_data)) != 0) { break; } if ((error = vnode_authorize((struct vnode *)fp->f_fglob->fg_data, - NULL, - KAUTH_VNODE_READ_ATTRIBUTES | KAUTH_VNODE_READ_SECURITY, - a_context)) == 0) + NULL, + KAUTH_VNODE_READ_ATTRIBUTES | KAUTH_VNODE_READ_SECURITY, + a_context)) == 0) { error = vnode_getattr((struct vnode *)fp->f_fglob->fg_data, vap, a_context); + } if (error == 0 && vap->va_type == VDIR) { /* * directories can cause loops in the namespace, @@ -407,7 +412,7 @@ fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context) * * XXX ACLs break this, of course */ - vap->va_mode &= ~((VEXEC)|(VEXEC>>3)|(VEXEC>>6)); + vap->va_mode &= ~((VEXEC) | (VEXEC >> 3) | (VEXEC >> 6)); } (void)vnode_put((struct vnode *) fp->f_fglob->fg_data); break; @@ -415,17 +420,18 @@ fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context) case DTYPE_SOCKET: case DTYPE_PIPE: #if SOCKETS - if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) + if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { error = soo_stat((struct socket *)fp->f_fglob->fg_data, (void *)&stb, 0); - else + } else #endif /* SOCKETS */ - error = pipe_stat((struct pipe *)fp->f_fglob->fg_data, (void *)&stb, 0); + error = pipe_stat((struct pipe *)fp->f_fglob->fg_data, (void *)&stb, 0); if (error == 0) { - if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) - VATTR_RETURN(vap, va_type, VSOCK); - else - VATTR_RETURN(vap, va_type, VFIFO); + if (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_SOCKET) { + VATTR_RETURN(vap, va_type, VSOCK); + } else { + VATTR_RETURN(vap, va_type, VFIFO); + } VATTR_RETURN(vap, va_mode, stb.st_mode); VATTR_RETURN(vap, va_nlink, stb.st_nlink); @@ -450,7 +456,7 @@ fdesc_attr(int fd, struct vnode_attr *vap, vfs_context_t a_context) } fp_drop(p, fd, fp, 0); - return (error); + return error; } int @@ -469,21 +475,21 @@ fdesc_getattr(struct vnop_getattr_args *ap) default: panic("Invalid type for an fdesc node!\n"); - break; + break; } - - /* + + /* * Yes, we do this without locking, but this value is always just * a snapshot. */ if (error == 0) { vp->v_type = vap->va_type; - + /* We need an inactive to reset type to VNON */ vnode_setneedinactive(vp); } - return (error); + return error; } int @@ -502,12 +508,13 @@ fdesc_setattr(struct vnop_setattr_args *ap) break; default: panic("Invalid type for an fdesc node!\n"); - return (EACCES); + return EACCES; } fd = VTOFDESC(ap->a_vp)->fd_fd; - if ((error = fp_lookup(vfs_context_proc(ap->a_context), fd, &fp, 0))) - return (error); + if ((error = fp_lookup(vfs_context_proc(ap->a_context), fd, &fp, 0))) { + return error; + } /* * Can setattr the underlying vnode, but not sockets! @@ -515,8 +522,9 @@ fdesc_setattr(struct vnop_setattr_args *ap) switch (FILEGLOB_DTYPE(fp->f_fglob)) { case DTYPE_VNODE: { - if ((error = vnode_getwithref((struct vnode *) fp->f_fglob->fg_data)) != 0) + if ((error = vnode_getwithref((struct vnode *) fp->f_fglob->fg_data)) != 0) { break; + } error = vnode_setattr((struct vnode *) fp->f_fglob->fg_data, ap->a_vap, ap->a_context); (void)vnode_put((struct vnode *) fp->f_fglob->fg_data); break; @@ -533,25 +541,25 @@ fdesc_setattr(struct vnop_setattr_args *ap) } fp_drop(p, fd, fp, 0); - return (error); + return error; } #define UIO_MX 16 /* -static struct dirtmp { - u_int32_t d_fileno; - u_short d_reclen; - u_short d_namlen; - char d_name[8]; -} rootent[] = { - { FD_DEVFD, UIO_MX, 2, "fd" }, - { FD_STDIN, UIO_MX, 5, "stdin" }, - { FD_STDOUT, UIO_MX, 6, "stdout" }, - { FD_STDERR, UIO_MX, 6, "stderr" }, - { 0, 0, 0, "" } -}; -*/ + * static struct dirtmp { + * u_int32_t d_fileno; + * u_short d_reclen; + * u_short d_namlen; + * char d_name[8]; + * } rootent[] = { + * { FD_DEVFD, UIO_MX, 2, "fd" }, + * { FD_STDIN, UIO_MX, 5, "stdin" }, + * { FD_STDOUT, UIO_MX, 6, "stdout" }, + * { FD_STDERR, UIO_MX, 6, "stderr" }, + * { 0, 0, 0, "" } + * }; + */ /* Only called on /dev/fd */ int @@ -565,20 +573,23 @@ devfs_devfd_readdir(struct vnop_readdir_args *ap) * We don't allow exporting fdesc mounts, and currently local * requests do not need cookies. */ - if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) - return (EINVAL); + if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) { + return EINVAL; + } /* * There needs to be space for at least one entry. */ - if (uio_resid(uio) < UIO_MX) - return (EINVAL); + if (uio_resid(uio) < UIO_MX) { + return EINVAL; + } i = uio->uio_offset / UIO_MX; error = 0; while (uio_resid(uio) >= UIO_MX) { - if (i >= p->p_fd->fd_nfiles) + if (i >= p->p_fd->fd_nfiles) { break; + } if (*fdfile(p, i) != NULL && !(*fdflags(p, i) & UF_RESERVED)) { struct dirent d; @@ -587,7 +598,7 @@ devfs_devfd_readdir(struct vnop_readdir_args *ap) bzero((caddr_t) dp, UIO_MX); dp->d_namlen = snprintf(dp->d_name, sizeof(dp->d_name), - "%d", i); + "%d", i); dp->d_reclen = UIO_MX; dp->d_type = DT_UNKNOWN; dp->d_fileno = i + FD_STDIN; @@ -595,38 +606,39 @@ devfs_devfd_readdir(struct vnop_readdir_args *ap) * And ship to userland */ error = uiomove((caddr_t) dp, UIO_MX, uio); - if (error) + if (error) { break; + } } i++; } uio->uio_offset = i * UIO_MX; - return (error); + return error; } int fdesc_read(__unused struct vnop_read_args *ap) { - return (ENOTSUP); + return ENOTSUP; } int fdesc_write(__unused struct vnop_write_args *ap) { - return (ENOTSUP); + return ENOTSUP; } int fdesc_ioctl(__unused struct vnop_ioctl_args *ap) { - return (ENOTSUP); + return ENOTSUP; } int fdesc_select(__unused struct vnop_select_args *ap) { - return (ENOTSUP); + return ENOTSUP; } int @@ -640,7 +652,7 @@ fdesc_inactive(struct vnop_inactive_args *ap) */ vp->v_type = VNON; - return (0); + return 0; } int @@ -654,10 +666,10 @@ fdesc_reclaim(struct vnop_reclaim_args *ap) LIST_REMOVE(fd, fd_hash); FREE(vp->v_data, M_TEMP); vp->v_data = NULL; - + fdesc_unlock(); - return (0); + return 0; } /* @@ -666,28 +678,27 @@ fdesc_reclaim(struct vnop_reclaim_args *ap) int fdesc_pathconf(struct vnop_pathconf_args *ap) { - switch (ap->a_name) { case _PC_LINK_MAX: *ap->a_retval = LINK_MAX; - return (0); + return 0; case _PC_MAX_CANON: *ap->a_retval = MAX_CANON; - return (0); + return 0; case _PC_MAX_INPUT: *ap->a_retval = MAX_INPUT; - return (0); + return 0; case _PC_PIPE_BUF: *ap->a_retval = PIPE_BUF; - return (0); + return 0; case _PC_CHOWN_RESTRICTED: - *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ - return (0); + *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ + return 0; case _PC_VDISABLE: *ap->a_retval = _POSIX_VDISABLE; - return (0); + return 0; default: - return (EINVAL); + return EINVAL; } /* NOTREACHED */ } @@ -698,8 +709,7 @@ fdesc_pathconf(struct vnop_pathconf_args *ap) int fdesc_badop(void) { - - return (ENOTSUP); + return ENOTSUP; /* NOTREACHED */ } @@ -710,7 +720,7 @@ fdesc_badop(void) #define fdesc_close (int (*) (struct vnop_close_args *))nullop #define fdesc_access (int (*) (struct vnop_access_args *))nullop #define fdesc_mmap (int (*) (struct vnop_mmap_args *))eopnotsupp -#define fdesc_revoke nop_revoke +#define fdesc_revoke nop_revoke #define fdesc_fsync (int (*) (struct vnop_fsync_args *))nullop #define fdesc_remove (int (*) (struct vnop_remove_args *))eopnotsupp #define fdesc_link (int (*) (struct vnop_link_args *))eopnotsupp @@ -725,47 +735,46 @@ fdesc_badop(void) #define fdesc_offtoblk (int (*) (struct vnop_offtoblk_args *))eopnotsupp #define fdesc_blockmap (int (*) (struct vnop_blockmap_args *))eopnotsupp -int (**fdesc_vnodeop_p)(void *); +int(**fdesc_vnodeop_p)(void *); struct vnodeopv_entry_desc devfs_fdesc_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)vn_default_error}, /* lookup */ - { &vnop_create_desc, (VOPFUNC)fdesc_create }, /* create */ - { &vnop_mknod_desc, (VOPFUNC)fdesc_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)fdesc_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)fdesc_close }, /* close */ - { &vnop_access_desc, (VOPFUNC)fdesc_access }, /* access */ - { &vnop_getattr_desc, (VOPFUNC)fdesc_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)fdesc_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)fdesc_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)fdesc_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)fdesc_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)fdesc_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)fdesc_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)fdesc_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)fdesc_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)fdesc_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)fdesc_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)fdesc_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)fdesc_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)fdesc_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)fdesc_symlink }, /* symlink */ + { &vnop_lookup_desc, (VOPFUNC)vn_default_error}, /* lookup */ + { &vnop_create_desc, (VOPFUNC)fdesc_create }, /* create */ + { &vnop_mknod_desc, (VOPFUNC)fdesc_mknod }, /* mknod */ + { &vnop_open_desc, (VOPFUNC)fdesc_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)fdesc_close }, /* close */ + { &vnop_access_desc, (VOPFUNC)fdesc_access }, /* access */ + { &vnop_getattr_desc, (VOPFUNC)fdesc_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)fdesc_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)fdesc_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)fdesc_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)fdesc_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)fdesc_select }, /* select */ + { &vnop_revoke_desc, (VOPFUNC)fdesc_revoke }, /* revoke */ + { &vnop_mmap_desc, (VOPFUNC)fdesc_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)fdesc_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)fdesc_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)fdesc_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)fdesc_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)fdesc_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)fdesc_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)fdesc_symlink }, /* symlink */ { &vnop_readdir_desc, (VOPFUNC)vn_default_error},/* readdir */ { &vnop_readlink_desc, (VOPFUNC)err_readlink}, /* readlink */ { &vnop_inactive_desc, (VOPFUNC)fdesc_inactive },/* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)fdesc_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)fdesc_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)fdesc_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)fdesc_advlock }, /* advlock */ - { &vnop_bwrite_desc, (VOPFUNC)fdesc_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)fdesc_blktooff }, /* blktooff */ - { &vnop_blktooff_desc, (VOPFUNC)fdesc_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)fdesc_blockmap }, /* blockmap */ + { &vnop_reclaim_desc, (VOPFUNC)fdesc_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)fdesc_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)fdesc_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)fdesc_advlock }, /* advlock */ + { &vnop_bwrite_desc, (VOPFUNC)fdesc_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)fdesc_blktooff }, /* blktooff */ + { &vnop_blktooff_desc, (VOPFUNC)fdesc_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (VOPFUNC)fdesc_blockmap }, /* blockmap */ { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL } }; struct vnodeopv_desc devfs_fdesc_vnodeop_opv_desc = - { &fdesc_vnodeop_p, devfs_fdesc_vnodeop_entries }; - +{ &fdesc_vnodeop_p, devfs_fdesc_vnodeop_entries }; diff --git a/bsd/miscfs/devfs/devfs_proto.h b/bsd/miscfs/devfs/devfs_proto.h index 0485fb52d..4a4d5d277 100644 --- a/bsd/miscfs/devfs/devfs_proto.h +++ b/bsd/miscfs/devfs/devfs_proto.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* THIS FILE HAS BEEN PRODUCED AUTOMATICALLY */ @@ -33,22 +33,22 @@ __BEGIN_DECLS #ifdef __APPLE_API_PRIVATE -int devfs_sinit(void); -devdirent_t * dev_findname(devnode_t * dir, const char *name); -int dev_add_name(const char * name, devnode_t * dirnode, devdirent_t * back, +int devfs_sinit(void); +devdirent_t * dev_findname(devnode_t * dir, const char *name); +int dev_add_name(const char * name, devnode_t * dirnode, devdirent_t * back, devnode_t * dnp, devdirent_t * *dirent_pp); -int dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, - devnode_t * *dn_pp, struct devfsmount *dvm); -void devnode_free(devnode_t * dnp); -int dev_dup_plane(struct devfsmount *devfs_mp_p); -void devfs_free_plane(struct devfsmount *devfs_mp_p); -int dev_free_name(devdirent_t * dirent_p); -int devfs_dntovn(devnode_t * dnp, struct vnode **vn_pp, struct proc * p); -int dev_add_entry(const char *name, devnode_t * parent, int type, devnode_type_t * typeinfo, - devnode_t * proto, struct devfsmount *dvm, devdirent_t * *nm_pp); -int devfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, - vfs_context_t context); -int devfs_kernel_mount(char * mntname); +int dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, + devnode_t * *dn_pp, struct devfsmount *dvm); +void devnode_free(devnode_t * dnp); +int dev_dup_plane(struct devfsmount *devfs_mp_p); +void devfs_free_plane(struct devfsmount *devfs_mp_p); +int dev_free_name(devdirent_t * dirent_p); +int devfs_dntovn(devnode_t * dnp, struct vnode **vn_pp, struct proc * p); +int dev_add_entry(const char *name, devnode_t * parent, int type, devnode_type_t * typeinfo, + devnode_t * proto, struct devfsmount *dvm, devdirent_t * *nm_pp); +int devfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, + vfs_context_t context); +int devfs_kernel_mount(char * mntname); #endif /* __APPLE_API_PRIVATE */ __END_DECLS diff --git a/bsd/miscfs/devfs/devfs_tree.c b/bsd/miscfs/devfs/devfs_tree.c index 6358c5c6b..fe32a3c68 100644 --- a/bsd/miscfs/devfs/devfs_tree.c +++ b/bsd/miscfs/devfs/devfs_tree.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright 1997,1998 Julian Elischer. All rights reserved. * julian@freebsd.org - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: @@ -38,7 +38,7 @@ * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -50,7 +50,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * devfs_tree.c */ /* @@ -76,7 +76,7 @@ * vnode instead of the existing one that has the mounted_here * field filled in; the net effect was that the filesystem mounted * on top of us would never show up - * - added devfs_stats to store how many data structures are actually + * - added devfs_stats to store how many data structures are actually * allocated */ @@ -99,7 +99,7 @@ #include #include #include -#define BSD_KERNEL_PRIVATE 1 /* devfs_make_link() prototype */ +#define BSD_KERNEL_PRIVATE 1 /* devfs_make_link() prototype */ #include "devfs.h" #include "devfsdefs.h" @@ -112,48 +112,48 @@ #endif typedef struct devfs_vnode_event { - vnode_t dve_vp; - uint32_t dve_vid; - uint32_t dve_events; + vnode_t dve_vp; + uint32_t dve_vid; + uint32_t dve_events; } *devfs_vnode_event_t; -/* - * Size of stack buffer (fast path) for notifications. If +/* + * Size of stack buffer (fast path) for notifications. If * the number of mounts is small, no need to malloc a buffer. */ -#define NUM_STACK_ENTRIES 5 +#define NUM_STACK_ENTRIES 5 typedef struct devfs_event_log { - size_t del_max; - size_t del_used; - devfs_vnode_event_t del_entries; + size_t del_max; + size_t del_used; + devfs_vnode_event_t del_entries; } *devfs_event_log_t; - -static void dev_free_hier(devdirent_t *); -static int devfs_propogate(devdirent_t *, devdirent_t *, devfs_event_log_t); -static int dev_finddir(const char *, devnode_t *, int, devnode_t **, devfs_event_log_t); -static int dev_dup_entry(devnode_t *, devdirent_t *, devdirent_t **, struct devfsmount *); -void devfs_ref_node(devnode_t *); -void devfs_rele_node(devnode_t *); -static void devfs_record_event(devfs_event_log_t, devnode_t*, uint32_t); -static int devfs_init_event_log(devfs_event_log_t, uint32_t, devfs_vnode_event_t); -static void devfs_release_event_log(devfs_event_log_t, int); -static void devfs_bulk_notify(devfs_event_log_t); -static devdirent_t *devfs_make_node_internal(dev_t, devfstype_t type, uid_t, gid_t, int, - int (*clone)(dev_t dev, int action), const char *fmt, va_list ap); +static void dev_free_hier(devdirent_t *); +static int devfs_propogate(devdirent_t *, devdirent_t *, devfs_event_log_t); +static int dev_finddir(const char *, devnode_t *, int, devnode_t **, devfs_event_log_t); +static int dev_dup_entry(devnode_t *, devdirent_t *, devdirent_t **, struct devfsmount *); +void devfs_ref_node(devnode_t *); +void devfs_rele_node(devnode_t *); +static void devfs_record_event(devfs_event_log_t, devnode_t*, uint32_t); +static int devfs_init_event_log(devfs_event_log_t, uint32_t, devfs_vnode_event_t); +static void devfs_release_event_log(devfs_event_log_t, int); +static void devfs_bulk_notify(devfs_event_log_t); +static devdirent_t *devfs_make_node_internal(dev_t, devfstype_t type, uid_t, gid_t, int, + int (*clone)(dev_t dev, int action), const char *fmt, va_list ap); -lck_grp_t * devfs_lck_grp; -lck_grp_attr_t * devfs_lck_grp_attr; -lck_attr_t * devfs_lck_attr; -lck_mtx_t devfs_mutex; -lck_mtx_t devfs_attr_mutex; -devdirent_t * dev_root = NULL; /* root of backing tree */ -struct devfs_stats devfs_stats; /* hold stats */ +lck_grp_t * devfs_lck_grp; +lck_grp_attr_t * devfs_lck_grp_attr; +lck_attr_t * devfs_lck_attr; +lck_mtx_t devfs_mutex; +lck_mtx_t devfs_attr_mutex; -static ino_t devfs_unique_fileno = 0; +devdirent_t * dev_root = NULL; /* root of backing tree */ +struct devfs_stats devfs_stats; /* hold stats */ + +static ino_t devfs_unique_fileno = 0; #ifdef HIDDEN_MOUNTPOINT static struct mount *devfs_hidden_mount; @@ -162,8 +162,8 @@ static struct mount *devfs_hidden_mount; static int devfs_ready = 0; static uint32_t devfs_nmountplanes = 0; /* The first plane is not used for a mount */ -#define DEVFS_NOCREATE FALSE -#define DEVFS_CREATE TRUE +#define DEVFS_NOCREATE FALSE +#define DEVFS_CREATE TRUE /* * Set up the root directory node in the backing plane @@ -178,9 +178,9 @@ static uint32_t devfs_nmountplanes = 0; /* The first plane is not used for a mou int devfs_sinit(void) { - int error; + int error; - devfs_lck_grp_attr = lck_grp_attr_alloc_init(); + devfs_lck_grp_attr = lck_grp_attr_alloc_init(); devfs_lck_grp = lck_grp_alloc_init("devfs_lock", devfs_lck_grp_attr); devfs_lck_attr = lck_attr_alloc_init(); @@ -189,17 +189,17 @@ devfs_sinit(void) lck_mtx_init(&devfs_attr_mutex, devfs_lck_grp, devfs_lck_attr); DEVFS_LOCK(); - error = dev_add_entry("root", NULL, DEV_DIR, NULL, NULL, NULL, &dev_root); + error = dev_add_entry("root", NULL, DEV_DIR, NULL, NULL, NULL, &dev_root); DEVFS_UNLOCK(); if (error) { - printf("devfs_sinit: dev_add_entry failed "); - return (ENOTSUP); + printf("devfs_sinit: dev_add_entry failed "); + return ENOTSUP; } #ifdef HIDDEN_MOUNTPOINT MALLOC(devfs_hidden_mount, struct mount *, sizeof(struct mount), - M_MOUNT, M_WAITOK); - bzero(devfs_hidden_mount,sizeof(struct mount)); + M_MOUNT, M_WAITOK); + bzero(devfs_hidden_mount, sizeof(struct mount)); mount_lock_init(devfs_hidden_mount); TAILQ_INIT(&devfs_hidden_mount->mnt_vnodelist); TAILQ_INIT(&devfs_hidden_mount->mnt_workerqueue); @@ -216,16 +216,16 @@ devfs_sinit(void) mp->mnt_realrootvp = NULLVP; mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; - devfs_mount(devfs_hidden_mount,"dummy",NULL,NULL,NULL); - dev_root->de_dnp->dn_dvm - = (struct devfsmount *)devfs_hidden_mount->mnt_data; + devfs_mount(devfs_hidden_mount, "dummy", NULL, NULL, NULL); + dev_root->de_dnp->dn_dvm + = (struct devfsmount *)devfs_hidden_mount->mnt_data; #endif /* HIDDEN_MOUNTPOINT */ #if CONFIG_MACF mac_devfs_label_associate_directory("/", strlen("/"), dev_root->de_dnp, "/"); #endif devfs_ready = 1; - return (0); + return 0; } /***********************************************************************\ @@ -237,174 +237,182 @@ devfs_sinit(void) /*************************************************************** - * Search down the linked list off a dir to find "name" - * return the devnode_t * for that node. - * - * called with DEVFS_LOCK held - ***************************************************************/ +* Search down the linked list off a dir to find "name" +* return the devnode_t * for that node. +* +* called with DEVFS_LOCK held +***************************************************************/ devdirent_t * dev_findname(devnode_t * dir, const char *name) { devdirent_t * newfp; - if (dir->dn_type != DEV_DIR) return 0;/*XXX*/ /* printf?*/ - - if (name[0] == '.') - { - if(name[1] == 0) - { + if (dir->dn_type != DEV_DIR) { + return 0; /*XXX*/ /* printf?*/ + } + if (name[0] == '.') { + if (name[1] == 0) { return dir->dn_typeinfo.Dir.myname; } - if((name[1] == '.') && (name[2] == 0)) - { + if ((name[1] == '.') && (name[2] == 0)) { /* for root, .. == . */ return dir->dn_typeinfo.Dir.parent->dn_typeinfo.Dir.myname; } } newfp = dir->dn_typeinfo.Dir.dirlist; - while(newfp) - { - if(!(strncmp(name, newfp->de_name, sizeof(newfp->de_name)))) + while (newfp) { + if (!(strncmp(name, newfp->de_name, sizeof(newfp->de_name)))) { return newfp; + } newfp = newfp->de_next; } return NULL; } /*********************************************************************** - * Given a starting node (0 for root) and a pathname, return the node - * for the end item on the path. It MUST BE A DIRECTORY. If the 'DEVFS_CREATE' - * option is true, then create any missing nodes in the path and create - * and return the final node as well. - * This is used to set up a directory, before making nodes in it.. - * - * called with DEVFS_LOCK held - ***********************************************************************/ +* Given a starting node (0 for root) and a pathname, return the node +* for the end item on the path. It MUST BE A DIRECTORY. If the 'DEVFS_CREATE' +* option is true, then create any missing nodes in the path and create +* and return the final node as well. +* This is used to set up a directory, before making nodes in it.. +* +* called with DEVFS_LOCK held +***********************************************************************/ static int -dev_finddir(const char * path, - devnode_t * dirnode, - int create, - devnode_t * * dn_pp, - devfs_event_log_t delp) +dev_finddir(const char * path, + devnode_t * dirnode, + int create, + devnode_t * * dn_pp, + devfs_event_log_t delp) { - devnode_t * dnp = NULL; - int error = 0; - const char * scan; + devnode_t * dnp = NULL; + int error = 0; + const char * scan; #if CONFIG_MACF char fullpath[DEVMAXPATHSIZE]; #endif - if (!dirnode) /* dirnode == NULL means start at root */ - dirnode = dev_root->de_dnp; + if (!dirnode) { /* dirnode == NULL means start at root */ + dirnode = dev_root->de_dnp; + } - if (dirnode->dn_type != DEV_DIR) - return ENOTDIR; + if (dirnode->dn_type != DEV_DIR) { + return ENOTDIR; + } - if (strlen(path) > (DEVMAXPATHSIZE - 1)) - return ENAMETOOLONG; + if (strlen(path) > (DEVMAXPATHSIZE - 1)) { + return ENAMETOOLONG; + } #if CONFIG_MACF - strlcpy (fullpath, path, DEVMAXPATHSIZE); + strlcpy(fullpath, path, DEVMAXPATHSIZE); #endif scan = path; - while (*scan == '/') - scan++; + while (*scan == '/') { + scan++; + } *dn_pp = NULL; while (1) { - char component[DEVMAXPATHSIZE]; - devdirent_t * dirent_p; - const char * start; - - if (*scan == 0) { - /* we hit the end of the string, we're done */ - *dn_pp = dirnode; - break; - } - start = scan; - while (*scan != '/' && *scan) - scan++; + char component[DEVMAXPATHSIZE]; + devdirent_t * dirent_p; + const char * start; - strlcpy(component, start, (scan - start) + 1); - if (*scan == '/') - scan++; - - dirent_p = dev_findname(dirnode, component); - if (dirent_p) { - dnp = dirent_p->de_dnp; - if (dnp->dn_type != DEV_DIR) { - error = ENOTDIR; - break; + if (*scan == 0) { + /* we hit the end of the string, we're done */ + *dn_pp = dirnode; + break; } - } - else { - if (!create) { - error = ENOENT; - break; + start = scan; + while (*scan != '/' && *scan) { + scan++; } - error = dev_add_entry(component, dirnode, - DEV_DIR, NULL, NULL, NULL, &dirent_p); - if (error) - break; - dnp = dirent_p->de_dnp; + + strlcpy(component, start, (scan - start) + 1); + if (*scan == '/') { + scan++; + } + + dirent_p = dev_findname(dirnode, component); + if (dirent_p) { + dnp = dirent_p->de_dnp; + if (dnp->dn_type != DEV_DIR) { + error = ENOTDIR; + break; + } + } else { + if (!create) { + error = ENOENT; + break; + } + error = dev_add_entry(component, dirnode, + DEV_DIR, NULL, NULL, NULL, &dirent_p); + if (error) { + break; + } + dnp = dirent_p->de_dnp; #if CONFIG_MACF - mac_devfs_label_associate_directory( - dirnode->dn_typeinfo.Dir.myname->de_name, - strlen(dirnode->dn_typeinfo.Dir.myname->de_name), - dnp, fullpath); + mac_devfs_label_associate_directory( + dirnode->dn_typeinfo.Dir.myname->de_name, + strlen(dirnode->dn_typeinfo.Dir.myname->de_name), + dnp, fullpath); #endif - devfs_propogate(dirnode->dn_typeinfo.Dir.myname, dirent_p, delp); - } - dirnode = dnp; /* continue relative to this directory */ + devfs_propogate(dirnode->dn_typeinfo.Dir.myname, dirent_p, delp); + } + dirnode = dnp; /* continue relative to this directory */ } - return (error); + return error; } /*********************************************************************** - * Add a new NAME element to the devfs - * If we're creating a root node, then dirname is NULL - * Basically this creates a new namespace entry for the device node - * - * Creates a name node, and links it to the supplied node - * - * called with DEVFS_LOCK held - ***********************************************************************/ +* Add a new NAME element to the devfs +* If we're creating a root node, then dirname is NULL +* Basically this creates a new namespace entry for the device node +* +* Creates a name node, and links it to the supplied node +* +* called with DEVFS_LOCK held +***********************************************************************/ int -dev_add_name(const char * name, devnode_t * dirnode, __unused devdirent_t * back, +dev_add_name(const char * name, devnode_t * dirnode, __unused devdirent_t * back, devnode_t * dnp, devdirent_t * *dirent_pp) { - devdirent_t * dirent_p = NULL; + devdirent_t * dirent_p = NULL; - if(dirnode != NULL ) { - if(dirnode->dn_type != DEV_DIR) return(ENOTDIR); - - if( dev_findname(dirnode,name)) - return(EEXIST); + if (dirnode != NULL) { + if (dirnode->dn_type != DEV_DIR) { + return ENOTDIR; + } + + if (dev_findname(dirnode, name)) { + return EEXIST; + } } /* * make sure the name is legal * slightly misleading in the case of NULL */ - if (!name || (strlen(name) > (DEVMAXNAMESIZE - 1))) - return (ENAMETOOLONG); + if (!name || (strlen(name) > (DEVMAXNAMESIZE - 1))) { + return ENAMETOOLONG; + } /* - * Allocate and fill out a new directory entry + * Allocate and fill out a new directory entry */ - MALLOC(dirent_p, devdirent_t *, sizeof(devdirent_t), - M_DEVFSNAME, M_WAITOK); + MALLOC(dirent_p, devdirent_t *, sizeof(devdirent_t), + M_DEVFSNAME, M_WAITOK); if (!dirent_p) { - return ENOMEM; + return ENOMEM; } - bzero(dirent_p,sizeof(devdirent_t)); + bzero(dirent_p, sizeof(devdirent_t)); /* inherrit our parent's mount info */ /*XXX*/ /* a kludge but.... */ - if(dirnode && ( dnp->dn_dvm == NULL)) { + if (dirnode && (dnp->dn_dvm == NULL)) { dnp->dn_dvm = dirnode->dn_dvm; /* if(!dnp->dn_dvm) printf("parent had null dvm "); */ } @@ -415,13 +423,13 @@ dev_add_name(const char * name, devnode_t * dirnode, __unused devdirent_t * back * this stops it from being accidentally freed later. */ dirent_p->de_dnp = dnp; - dnp->dn_links++ ; /* implicit from our own name-node */ + dnp->dn_links++; /* implicit from our own name-node */ - /* + /* * Make sure that we can find all the links that reference a node * so that we can get them all if we need to zap the node. */ - if(dnp->dn_linklist) { + if (dnp->dn_linklist) { dirent_p->de_nextlink = dnp->dn_linklist; dirent_p->de_prevlinkp = dirent_p->de_nextlink->de_prevlinkp; dirent_p->de_nextlink->de_prevlinkp = &(dirent_p->de_nextlink); @@ -433,22 +441,22 @@ dev_add_name(const char * name, devnode_t * dirnode, __unused devdirent_t * back dnp->dn_linklist = dirent_p; /* - * If the node is a directory, then we need to handle the + * If the node is a directory, then we need to handle the * creation of the .. link. * A NULL dirnode indicates a root node, so point to ourself. */ - if(dnp->dn_type == DEV_DIR) { + if (dnp->dn_type == DEV_DIR) { dnp->dn_typeinfo.Dir.myname = dirent_p; /* * If we are unlinking from an old dir, decrement its links * as we point our '..' elsewhere - * Note: it's up to the calling code to remove the + * Note: it's up to the calling code to remove the * us from the original directory's list */ - if(dnp->dn_typeinfo.Dir.parent) { + if (dnp->dn_typeinfo.Dir.parent) { dnp->dn_typeinfo.Dir.parent->dn_links--; } - if(dirnode) { + if (dirnode) { dnp->dn_typeinfo.Dir.parent = dirnode; } else { dnp->dn_typeinfo.Dir.parent = dnp; @@ -466,14 +474,14 @@ dev_add_name(const char * name, devnode_t * dirnode, __unused devdirent_t * back * Check if we are not making a root node.. * (i.e. have parent) */ - if(dirnode) { + if (dirnode) { /* - * Put it on the END of the linked list of directory entries - */ + * Put it on the END of the linked list of directory entries + */ dirent_p->de_parent = dirnode; /* null for root */ dirent_p->de_prevp = dirnode->dn_typeinfo.Dir.dirlast; - dirent_p->de_next = *(dirent_p->de_prevp); /* should be NULL */ - /*right?*/ + dirent_p->de_next = *(dirent_p->de_prevp); /* should be NULL */ + /*right?*/ *(dirent_p->de_prevp) = dirent_p; dirnode->dn_typeinfo.Dir.dirlast = &(dirent_p->de_next); dirnode->dn_typeinfo.Dir.entrycount++; @@ -482,31 +490,31 @@ dev_add_name(const char * name, devnode_t * dirnode, __unused devdirent_t * back *dirent_pp = dirent_p; DEVFS_INCR_ENTRIES(); - return 0 ; + return 0; } /*********************************************************************** - * Add a new element to the devfs plane. - * - * Creates a new dev_node to go with it if the prototype should not be - * reused. (Is a DIR, or we select SPLIT_DEVS at compile time) - * typeinfo gives us info to make our node if we don't have a prototype. - * If typeinfo is null and proto exists, then the typeinfo field of - * the proto is used intead in the DEVFS_CREATE case. - * note the 'links' count is 0 (except if a dir) - * but it is only cleared on a transition - * so this is ok till we link it to something - * Even in SPLIT_DEVS mode, - * if the node already exists on the wanted plane, just return it - * - * called with DEVFS_LOCK held +* Add a new element to the devfs plane. +* +* Creates a new dev_node to go with it if the prototype should not be +* reused. (Is a DIR, or we select SPLIT_DEVS at compile time) +* typeinfo gives us info to make our node if we don't have a prototype. +* If typeinfo is null and proto exists, then the typeinfo field of +* the proto is used intead in the DEVFS_CREATE case. +* note the 'links' count is 0 (except if a dir) +* but it is only cleared on a transition +* so this is ok till we link it to something +* Even in SPLIT_DEVS mode, +* if the node already exists on the wanted plane, just return it +* +* called with DEVFS_LOCK held ***********************************************************************/ int dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, - devnode_t * *dn_pp, struct devfsmount *dvm) + devnode_t * *dn_pp, struct devfsmount *dvm) { - devnode_t * dnp = NULL; + devnode_t * dnp = NULL; #if defined SPLIT_DEVS /* @@ -515,29 +523,30 @@ dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, */ if (proto) { dnp = proto->dn_nextsibling; - while( dnp != proto) { + while (dnp != proto) { if (dnp->dn_dvm == dvm) { *dn_pp = dnp; - return (0); + return 0; } dnp = dnp->dn_nextsibling; } - if (typeinfo == NULL) + if (typeinfo == NULL) { typeinfo = &(proto->dn_typeinfo); + } } -#else /* SPLIT_DEVS */ - if ( proto ) { +#else /* SPLIT_DEVS */ + if (proto) { switch (proto->type) { - case DEV_BDEV: - case DEV_CDEV: - *dn_pp = proto; - return 0; + case DEV_BDEV: + case DEV_CDEV: + *dn_pp = proto; + return 0; } } -#endif /* SPLIT_DEVS */ +#endif /* SPLIT_DEVS */ MALLOC(dnp, devnode_t *, sizeof(devnode_t), M_DEVFSNODE, M_WAITOK); if (!dnp) { - return ENOMEM; + return ENOMEM; } /* @@ -560,9 +569,9 @@ dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, mac_devfs_label_copy(proto->dn_label, dnp->dn_label); #endif } else { - struct timeval tv; + struct timeval tv; - /* + /* * We have no prototype, so start off with a clean slate */ microtime(&tv); @@ -585,7 +594,7 @@ dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, /* * fill out the dev node according to type */ - switch(entrytype) { + switch (entrytype) { case DEV_DIR: /* * As it's a directory, make sure @@ -603,30 +612,30 @@ dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, * that we use (by default) for directories */ dnp->dn_ops = &devfs_vnodeop_p; - dnp->dn_mode |= 0555; /* default perms */ + dnp->dn_mode |= 0555; /* default perms */ break; case DEV_SLNK: /* * As it's a symlink allocate and store the link info * Symlinks should only ever be created by the user, - * so they are not on the back plane and should not be + * so they are not on the back plane and should not be * propogated forward.. a bit like directories in that way.. * A symlink only exists on one plane and has its own * node.. therefore we might be on any random plane. */ - MALLOC(dnp->dn_typeinfo.Slnk.name, char *, - typeinfo->Slnk.namelen+1, - M_DEVFSNODE, M_WAITOK); + MALLOC(dnp->dn_typeinfo.Slnk.name, char *, + typeinfo->Slnk.namelen + 1, + M_DEVFSNODE, M_WAITOK); if (!dnp->dn_typeinfo.Slnk.name) { - FREE(dnp,M_DEVFSNODE); + FREE(dnp, M_DEVFSNODE); return ENOMEM; } strlcpy(dnp->dn_typeinfo.Slnk.name, typeinfo->Slnk.name, - typeinfo->Slnk.namelen + 1); + typeinfo->Slnk.namelen + 1); dnp->dn_typeinfo.Slnk.namelen = typeinfo->Slnk.namelen; DEVFS_INCR_STRINGSPACE(dnp->dn_typeinfo.Slnk.namelen + 1); dnp->dn_ops = &devfs_vnodeop_p; - dnp->dn_mode |= 0555; /* default perms */ + dnp->dn_mode |= 0555; /* default perms */ break; case DEV_CDEV: case DEV_BDEV: @@ -642,7 +651,7 @@ dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, /* /dev/fd is special */ case DEV_DEVFD: dnp->dn_ops = &devfs_devfd_vnodeop_p; - dnp->dn_mode |= 0555; /* default perms */ + dnp->dn_mode |= 0555; /* default perms */ break; #endif /* FDESC */ @@ -652,7 +661,7 @@ dev_add_node(int entrytype, devnode_type_t * typeinfo, devnode_t * proto, *dn_pp = dnp; DEVFS_INCR_NODES(); - return 0 ; + return 0; } @@ -665,12 +674,12 @@ devnode_free(devnode_t * dnp) #if CONFIG_MACF mac_devfs_label_destroy(dnp); #endif - if (dnp->dn_type == DEV_SLNK) { - DEVFS_DECR_STRINGSPACE(dnp->dn_typeinfo.Slnk.namelen + 1); - FREE(dnp->dn_typeinfo.Slnk.name, M_DEVFSNODE); - } - DEVFS_DECR_NODES(); - FREE(dnp, M_DEVFSNODE); + if (dnp->dn_type == DEV_SLNK) { + DEVFS_DECR_STRINGSPACE(dnp->dn_typeinfo.Slnk.namelen + 1); + FREE(dnp->dn_typeinfo.Slnk.name, M_DEVFSNODE); + } + DEVFS_DECR_NODES(); + FREE(dnp, M_DEVFSNODE); } @@ -680,87 +689,83 @@ devnode_free(devnode_t * dnp) static void devfs_dn_free(devnode_t * dnp) { - if(--dnp->dn_links <= 0 ) /* can be -1 for initial free, on error */ - { + if (--dnp->dn_links <= 0) { /* can be -1 for initial free, on error */ /*probably need to do other cleanups XXX */ if (dnp->dn_nextsibling != dnp) { - devnode_t * * prevp = dnp->dn_prevsiblingp; + devnode_t * * prevp = dnp->dn_prevsiblingp; *prevp = dnp->dn_nextsibling; dnp->dn_nextsibling->dn_prevsiblingp = prevp; - } /* Can only free if there are no references; otherwise, wait for last vnode to be reclaimed */ if (dnp->dn_refcount == 0) { - devnode_free(dnp); - } - else { - dnp->dn_lflags |= DN_DELETE; + devnode_free(dnp); + } else { + dnp->dn_lflags |= DN_DELETE; } } } /***********************************************************************\ -* Front Node Operations * +* Front Node Operations * * Add or delete a chain of front nodes * \***********************************************************************/ /*********************************************************************** - * Given a directory backing node, and a child backing node, add the - * appropriate front nodes to the front nodes of the directory to - * represent the child node to the user - * - * on failure, front nodes will either be correct or not exist for each - * front dir, however dirs completed will not be stripped of completed - * frontnodes on failure of a later frontnode - * - * This allows a new node to be propogated through all mounted planes - * - * called with DEVFS_LOCK held - ***********************************************************************/ +* Given a directory backing node, and a child backing node, add the +* appropriate front nodes to the front nodes of the directory to +* represent the child node to the user +* +* on failure, front nodes will either be correct or not exist for each +* front dir, however dirs completed will not be stripped of completed +* frontnodes on failure of a later frontnode +* +* This allows a new node to be propogated through all mounted planes +* +* called with DEVFS_LOCK held +***********************************************************************/ static int -devfs_propogate(devdirent_t * parent,devdirent_t * child, devfs_event_log_t delp) +devfs_propogate(devdirent_t * parent, devdirent_t * child, devfs_event_log_t delp) { - int error; + int error; devdirent_t * newnmp; - devnode_t * dnp = child->de_dnp; - devnode_t * pdnp = parent->de_dnp; - devnode_t * adnp = parent->de_dnp; + devnode_t * dnp = child->de_dnp; + devnode_t * pdnp = parent->de_dnp; + devnode_t * adnp = parent->de_dnp; int type = child->de_dnp->dn_type; uint32_t events; - + events = (dnp->dn_type == DEV_DIR ? VNODE_EVENT_DIR_CREATED : VNODE_EVENT_FILE_CREATED); if (delp != NULL) { devfs_record_event(delp, pdnp, events); } /*********************************************** - * Find the other instances of the parent node - ***********************************************/ + * Find the other instances of the parent node + ***********************************************/ for (adnp = pdnp->dn_nextsibling; - adnp != pdnp; - adnp = adnp->dn_nextsibling) - { + adnp != pdnp; + adnp = adnp->dn_nextsibling) { /* * Make the node, using the original as a prototype) * if the node already exists on that plane it won't be * re-made.. */ if ((error = dev_add_entry(child->de_name, adnp, type, - NULL, dnp, adnp->dn_dvm, - &newnmp)) != 0) { - printf("duplicating %s failed\n",child->de_name); + NULL, dnp, adnp->dn_dvm, + &newnmp)) != 0) { + printf("duplicating %s failed\n", child->de_name); } else { if (delp != NULL) { devfs_record_event(delp, adnp, events); - /* + /* * Slightly subtle. We're guaranteed that there will * only be a vnode hooked into this devnode if we're creating * a new link to an existing node; otherwise, the devnode is new * and no one can have looked it up yet. If we're making a link, - * then the buffer is large enough for two nodes in each + * then the buffer is large enough for two nodes in each * plane; otherwise, there's no vnode and this call will * do nothing. */ @@ -768,7 +773,7 @@ devfs_propogate(devdirent_t * parent,devdirent_t * child, devfs_event_log_t delp } } } - return 0; /* for now always succeed */ + return 0; /* for now always succeed */ } static uint32_t @@ -777,30 +782,29 @@ remove_notify_count(devnode_t *dnp) uint32_t notify_count = 0; devnode_t *dnp2; - /* - * Could need to notify for one removed node on each mount and + /* + * Could need to notify for one removed node on each mount and * one parent for each such node. */ notify_count = devfs_nmountplanes; - notify_count += dnp->dn_links; + notify_count += dnp->dn_links; for (dnp2 = dnp->dn_nextsibling; dnp2 != dnp; dnp2 = dnp2->dn_nextsibling) { - notify_count += dnp2->dn_links; + notify_count += dnp2->dn_links; } return notify_count; - } /*********************************************************************** - * remove all instances of this devicename [for backing nodes..] - * note.. if there is another link to the node (non dir nodes only) - * then the devfs_node will still exist as the ref count will be non-0 - * removing a directory node will remove all sup-nodes on all planes (ZAP) - * - * Used by device drivers to remove nodes that are no longer relevant - * The argument is the 'cookie' they were given when they created the node - * this function is exported.. see devfs.h - ***********************************************************************/ +* remove all instances of this devicename [for backing nodes..] +* note.. if there is another link to the node (non dir nodes only) +* then the devfs_node will still exist as the ref count will be non-0 +* removing a directory node will remove all sup-nodes on all planes (ZAP) +* +* Used by device drivers to remove nodes that are no longer relevant +* The argument is the 'cookie' they were given when they created the node +* this function is exported.. see devfs.h +***********************************************************************/ void devfs_remove(void *dirent_p) { @@ -809,10 +813,10 @@ devfs_remove(void *dirent_p) boolean_t lastlink; struct devfs_event_log event_log; uint32_t log_count = 0; - int do_notify = 0; - int need_free = 0; + int do_notify = 0; + int need_free = 0; struct devfs_vnode_event stackbuf[NUM_STACK_ENTRIES]; - + DEVFS_LOCK(); if (!devfs_ready) { @@ -829,7 +833,7 @@ wrongsize: if (devfs_init_event_log(&event_log, log_count, NULL) == 0) { do_notify = 1; need_free = 1; - } + } DEVFS_LOCK(); new_count = remove_notify_count(dnp); @@ -853,15 +857,14 @@ wrongsize: /* keep removing the next sibling till only we exist. */ while ((dnp2 = dnp->dn_nextsibling) != dnp) { - /* * Keep removing the next front node till no more exist */ - dnp->dn_nextsibling = dnp2->dn_nextsibling; + dnp->dn_nextsibling = dnp2->dn_nextsibling; dnp->dn_nextsibling->dn_prevsiblingp = &(dnp->dn_nextsibling); dnp2->dn_nextsibling = dnp2; dnp2->dn_prevsiblingp = &(dnp2->dn_nextsibling); - + /* This file has been deleted in this plane */ if (do_notify != 0) { devfs_record_event(&event_log, dnp2, VNODE_EVENT_DELETE); @@ -901,7 +904,7 @@ out: devfs_release_event_log(&event_log, need_free); } - return ; + return; } @@ -917,11 +920,12 @@ out: int dev_dup_plane(struct devfsmount *devfs_mp_p) { - devdirent_t * new; - int error = 0; + devdirent_t * new; + int error = 0; - if ((error = dev_dup_entry(NULL, dev_root, &new, devfs_mp_p))) - return error; + if ((error = dev_dup_entry(NULL, dev_root, &new, devfs_mp_p))) { + return error; + } devfs_mp_p->plane_root = new; devfs_nmountplanes++; return error; @@ -930,10 +934,10 @@ dev_dup_plane(struct devfsmount *devfs_mp_p) /*************************************************************** - * Free a whole plane - * - * called with DEVFS_LOCK held - ***************************************************************/ +* Free a whole plane +* +* called with DEVFS_LOCK held +***************************************************************/ void devfs_free_plane(struct devfsmount *devfs_mp_p) { @@ -947,52 +951,52 @@ devfs_free_plane(struct devfsmount *devfs_mp_p) devfs_mp_p->plane_root = NULL; devfs_nmountplanes--; - if (devfs_nmountplanes > (devfs_nmountplanes+1)) { + if (devfs_nmountplanes > (devfs_nmountplanes + 1)) { panic("plane count wrapped around.\n"); } } /*************************************************************** - * Create and link in a new front element.. - * Parent can be 0 for a root node - * Not presently usable to make a symlink XXX - * (Ok, symlinks don't propogate) - * recursively will create subnodes corresponding to equivalent - * child nodes in the base level - * - * called with DEVFS_LOCK held - ***************************************************************/ +* Create and link in a new front element.. +* Parent can be 0 for a root node +* Not presently usable to make a symlink XXX +* (Ok, symlinks don't propogate) +* recursively will create subnodes corresponding to equivalent +* child nodes in the base level +* +* called with DEVFS_LOCK held +***************************************************************/ static int dev_dup_entry(devnode_t * parent, devdirent_t * back, devdirent_t * *dnm_pp, - struct devfsmount *dvm) + struct devfsmount *dvm) { - devdirent_t * entry_p = NULL; - devdirent_t * newback; - devdirent_t * newfront; - int error; - devnode_t * dnp = back->de_dnp; + devdirent_t * entry_p = NULL; + devdirent_t * newback; + devdirent_t * newfront; + int error; + devnode_t * dnp = back->de_dnp; int type = dnp->dn_type; /* * go get the node made (if we need to) * use the back one as a prototype */ - error = dev_add_entry(back->de_name, parent, type, NULL, dnp, - parent?parent->dn_dvm:dvm, &entry_p); - if (!error && (entry_p == NULL)) { - error = ENOMEM; /* Really can't happen, but make static analyzer happy */ - } + error = dev_add_entry(back->de_name, parent, type, NULL, dnp, + parent?parent->dn_dvm:dvm, &entry_p); + if (!error && (entry_p == NULL)) { + error = ENOMEM; /* Really can't happen, but make static analyzer happy */ + } if (error != 0) { - printf("duplicating %s failed\n",back->de_name); - goto out; + printf("duplicating %s failed\n", back->de_name); + goto out; } /* * If we have just made the root, then insert the pointer to the * mount information */ - if(dvm) { + if (dvm) { entry_p->de_dnp->dn_dvm = dvm; } @@ -1001,14 +1005,11 @@ dev_dup_entry(devnode_t * parent, devdirent_t * back, devdirent_t * *dnm_pp, * subnodes in it.... * note that this time we don't pass on the mount info.. */ - if (type == DEV_DIR) - { - for(newback = back->de_dnp->dn_typeinfo.Dir.dirlist; - newback; newback = newback->de_next) - { - if((error = dev_dup_entry(entry_p->de_dnp, - newback, &newfront, NULL)) != 0) - { + if (type == DEV_DIR) { + for (newback = back->de_dnp->dn_typeinfo.Dir.dirlist; + newback; newback = newback->de_next) { + if ((error = dev_dup_entry(entry_p->de_dnp, + newback, &newfront, NULL)) != 0) { break; /* back out with an error */ } } @@ -1020,63 +1021,59 @@ out: /*************************************************************** - * Free a name node - * remember that if there are other names pointing to the - * dev_node then it may not get freed yet - * can handle if there is no dnp - * - * called with DEVFS_LOCK held - ***************************************************************/ +* Free a name node +* remember that if there are other names pointing to the +* dev_node then it may not get freed yet +* can handle if there is no dnp +* +* called with DEVFS_LOCK held +***************************************************************/ int dev_free_name(devdirent_t * dirent_p) { - devnode_t * parent = dirent_p->de_parent; - devnode_t * dnp = dirent_p->de_dnp; + devnode_t * parent = dirent_p->de_parent; + devnode_t * dnp = dirent_p->de_dnp; - if(dnp) { - if(dnp->dn_type == DEV_DIR) - { - devnode_t * p; + if (dnp) { + if (dnp->dn_type == DEV_DIR) { + devnode_t * p; - if(dnp->dn_typeinfo.Dir.dirlist) - return (ENOTEMPTY); + if (dnp->dn_typeinfo.Dir.dirlist) { + return ENOTEMPTY; + } p = dnp->dn_typeinfo.Dir.parent; - devfs_dn_free(dnp); /* account for '.' */ - devfs_dn_free(p); /* '..' */ + devfs_dn_free(dnp); /* account for '.' */ + devfs_dn_free(p); /* '..' */ } /* * unlink us from the list of links for this node * If we are the only link, it's easy! * if we are a DIR of course there should not be any * other links. - */ - if(dirent_p->de_nextlink == dirent_p) { - dnp->dn_linklist = NULL; + */ + if (dirent_p->de_nextlink == dirent_p) { + dnp->dn_linklist = NULL; } else { - if(dnp->dn_linklist == dirent_p) { + if (dnp->dn_linklist == dirent_p) { dnp->dn_linklist = dirent_p->de_nextlink; } } devfs_dn_free(dnp); } - + dirent_p->de_nextlink->de_prevlinkp = dirent_p->de_prevlinkp; *(dirent_p->de_prevlinkp) = dirent_p->de_nextlink; /* * unlink ourselves from the directory on this plane */ - if(parent) /* if not fs root */ - { - if( (*dirent_p->de_prevp = dirent_p->de_next) )/* yes, assign */ - { + if (parent) { /* if not fs root */ + if ((*dirent_p->de_prevp = dirent_p->de_next)) {/* yes, assign */ dirent_p->de_next->de_prevp = dirent_p->de_prevp; - } - else - { + } else { parent->dn_typeinfo.Dir.dirlast - = dirent_p->de_prevp; + = dirent_p->de_prevp; } parent->dn_typeinfo.Dir.entrycount--; parent->dn_len -= strlen(dirent_p->de_name) + 8; @@ -1089,25 +1086,23 @@ dev_free_name(devdirent_t * dirent_p) /*************************************************************** - * Free a hierarchy starting at a directory node name - * remember that if there are other names pointing to the - * dev_node then it may not get freed yet - * can handle if there is no dnp - * leave the node itself allocated. - * - * called with DEVFS_LOCK held - ***************************************************************/ +* Free a hierarchy starting at a directory node name +* remember that if there are other names pointing to the +* dev_node then it may not get freed yet +* can handle if there is no dnp +* leave the node itself allocated. +* +* called with DEVFS_LOCK held +***************************************************************/ static void dev_free_hier(devdirent_t * dirent_p) { - devnode_t * dnp = dirent_p->de_dnp; + devnode_t * dnp = dirent_p->de_dnp; - if(dnp) { - if(dnp->dn_type == DEV_DIR) - { - while(dnp->dn_typeinfo.Dir.dirlist) - { + if (dnp) { + if (dnp->dn_type == DEV_DIR) { + while (dnp->dn_typeinfo.Dir.dirlist) { dev_free_hier(dnp->dn_typeinfo.Dir.dirlist); dev_free_name(dnp->dn_typeinfo.Dir.dirlist); } @@ -1136,7 +1131,7 @@ devfs_dntovn(devnode_t * dnp, struct vnode **vn_pp, __unused struct proc * p) int markroot = 0; int nretries = 0; int n_minor = DEVFS_CLONE_ALLOC; /* new minor number for clone device */ - + /* * We should never come in and find that our devnode has been marked for delete. * The lookup should have held the lock from entry until now; it should not have @@ -1155,8 +1150,8 @@ retry: vn_p = dnp->dn_vn; if (vn_p) { /* already has a vnode */ - uint32_t vid; - + uint32_t vid; + vid = vnode_vid(vn_p); DEVFS_UNLOCK(); @@ -1170,26 +1165,26 @@ retry: * can be quite frequently reclaimed by revoke(2) or by the * exit of a controlling process. */ - error = vnode_getwithvid_drainok(vn_p, vid); + error = vnode_getwithvid_drainok(vn_p, vid); - DEVFS_LOCK(); + DEVFS_LOCK(); if (dnp->dn_lflags & DN_DELETE) { - /* + /* * our BUSY node got marked for * deletion while the DEVFS lock * was dropped... */ - if (error == 0) { - /* + if (error == 0) { + /* * vnode_getwithvid returned a valid ref * which we need to drop */ - vnode_put(vn_p); + vnode_put(vn_p); } - - /* - * This entry is no longer in the namespace. This is only + + /* + * This entry is no longer in the namespace. This is only * possible for lookup: no other path would not find an existing * vnode. Therefore, ENOENT is a valid result. */ @@ -1218,44 +1213,45 @@ retry: nretries++; goto retry; } - if ( !error) - *vn_pp = vn_p; + if (!error) { + *vn_pp = vn_p; + } goto out; } - /* - * If we get here, then we've beaten any deletes; + /* + * If we get here, then we've beaten any deletes; * if someone sets DN_DELETE during a subsequent drop * of the devfs lock, we'll still vend a vnode. */ if (dnp->dn_lflags & DN_CREATE) { dnp->dn_lflags |= DN_CREATEWAIT; - msleep(&dnp->dn_lflags, &devfs_mutex, PRIBIO, 0 , 0); + msleep(&dnp->dn_lflags, &devfs_mutex, PRIBIO, 0, 0); goto retry; } dnp->dn_lflags |= DN_CREATE; switch (dnp->dn_type) { - case DEV_SLNK: - vtype = VLNK; - break; - case DEV_DIR: - if (dnp->dn_typeinfo.Dir.parent == dnp) { - markroot = 1; - } - vtype = VDIR; - break; - case DEV_BDEV: - case DEV_CDEV: - vtype = (dnp->dn_type == DEV_BDEV) ? VBLK : VCHR; - break; + case DEV_SLNK: + vtype = VLNK; + break; + case DEV_DIR: + if (dnp->dn_typeinfo.Dir.parent == dnp) { + markroot = 1; + } + vtype = VDIR; + break; + case DEV_BDEV: + case DEV_CDEV: + vtype = (dnp->dn_type == DEV_BDEV) ? VBLK : VCHR; + break; #if FDESC - case DEV_DEVFD: - vtype = VDIR; - break; + case DEV_DEVFD: + vtype = VDIR; + break; #endif /* FDESC */ } vfsp.vnfs_mp = dnp->dn_dvm->mount; @@ -1265,7 +1261,7 @@ retry: vfsp.vnfs_fsnode = dnp; vfsp.vnfs_cnp = 0; vfsp.vnfs_vops = *(dnp->dn_ops); - + if (vtype == VBLK || vtype == VCHR) { /* * Ask the clone minor number function for a new minor number @@ -1273,7 +1269,7 @@ retry: * limit has been reached, this function will return -1. */ if (dnp->dn_clone != NULL) { - int n_major = major(dnp->dn_typeinfo.dev); + int n_major = major(dnp->dn_typeinfo.dev); n_minor = (*dnp->dn_clone)(dnp->dn_typeinfo.dev, DEVFS_CLONE_ALLOC); if (n_minor == -1) { @@ -1283,7 +1279,7 @@ retry: vfsp.vnfs_rdev = makedev(n_major, n_minor);; } else { - vfsp.vnfs_rdev = dnp->dn_typeinfo.dev; + vfsp.vnfs_rdev = dnp->dn_typeinfo.dev; } } else { vfsp.vnfs_rdev = 0; @@ -1297,7 +1293,7 @@ retry: DEVFS_UNLOCK(); error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &vn_p); - + /* Do this before grabbing the lock */ if (error == 0) { vnode_setneedinactive(vn_p); @@ -1306,28 +1302,29 @@ retry: DEVFS_LOCK(); if (error == 0) { - vnode_settag(vn_p, VT_DEVFS); + vnode_settag(vn_p, VT_DEVFS); - if ((dnp->dn_clone != NULL) && (dnp->dn_vn != NULLVP) ) - panic("devfs_dntovn: cloning device with a vnode?\n"); + if ((dnp->dn_clone != NULL) && (dnp->dn_vn != NULLVP)) { + panic("devfs_dntovn: cloning device with a vnode?\n"); + } - *vn_pp = vn_p; + *vn_pp = vn_p; - /* - * Another vnode that has this devnode as its v_data. - * This reference, unlike the one taken at the start - * of the function, persists until a VNOP_RECLAIM - * comes through for this vnode. - */ - devfs_ref_node(dnp); + /* + * Another vnode that has this devnode as its v_data. + * This reference, unlike the one taken at the start + * of the function, persists until a VNOP_RECLAIM + * comes through for this vnode. + */ + devfs_ref_node(dnp); - /* - * A cloned vnode is not hooked into the devnode; every lookup - * gets a new vnode. - */ - if (dnp->dn_clone == NULL) { - dnp->dn_vn = vn_p; - } + /* + * A cloned vnode is not hooked into the devnode; every lookup + * gets a new vnode. + */ + if (dnp->dn_clone == NULL) { + dnp->dn_vn = vn_p; + } } else if (n_minor != DEVFS_CLONE_ALLOC) { /* * If we failed the create, we need to release the cloned minor @@ -1338,7 +1335,7 @@ retry: * assumed that any state to be released will be released when * the vnode is dropped, instead. */ - (void)(*dnp->dn_clone)(dnp->dn_typeinfo.dev, DEVFS_CLONE_FREE); + (void)(*dnp->dn_clone)(dnp->dn_typeinfo.dev, DEVFS_CLONE_FREE); } dnp->dn_lflags &= ~DN_CREATE; @@ -1348,7 +1345,7 @@ retry: } out: - /* + /* * Release the reference we took to prevent deletion while we weren't holding the lock. * If not returning success, then dropping this reference could delete the devnode; * no one should access a devnode after a call to devfs_dntovn fails. @@ -1363,59 +1360,55 @@ out: * while the devfs lock is not held. */ void -devfs_ref_node(devnode_t *dnp) +devfs_ref_node(devnode_t *dnp) { dnp->dn_refcount++; } /* - * Release a reference on a devnode. If the devnode is marked for + * Release a reference on a devnode. If the devnode is marked for * free and the refcount is dropped to zero, do the free. */ -void +void devfs_rele_node(devnode_t *dnp) { dnp->dn_refcount--; if (dnp->dn_refcount < 0) { panic("devfs_rele_node: devnode with a negative refcount!\n"); - } else if ((dnp->dn_refcount == 0) && (dnp->dn_lflags & DN_DELETE)) { + } else if ((dnp->dn_refcount == 0) && (dnp->dn_lflags & DN_DELETE)) { devnode_free(dnp); } - } /*********************************************************************** - * add a whole device, with no prototype.. make name element and node - * Used for adding the original device entries - * - * called with DEVFS_LOCK held - ***********************************************************************/ +* add a whole device, with no prototype.. make name element and node +* Used for adding the original device entries +* +* called with DEVFS_LOCK held +***********************************************************************/ int dev_add_entry(const char *name, devnode_t * parent, int type, devnode_type_t * typeinfo, - devnode_t * proto, struct devfsmount *dvm, devdirent_t * *nm_pp) + devnode_t * proto, struct devfsmount *dvm, devdirent_t * *nm_pp) { - devnode_t * dnp; - int error = 0; + devnode_t * dnp; + int error = 0; - if ((error = dev_add_node(type, typeinfo, proto, &dnp, - (parent?parent->dn_dvm:dvm))) != 0) - { + if ((error = dev_add_node(type, typeinfo, proto, &dnp, + (parent?parent->dn_dvm:dvm))) != 0) { printf("devfs: %s: base node allocation failed (Errno=%d)\n", - name,error); + name, error); return error; } - if ((error = dev_add_name(name ,parent ,NULL, dnp, nm_pp)) != 0) - { + if ((error = dev_add_name(name, parent, NULL, dnp, nm_pp)) != 0) { devfs_dn_free(dnp); /* 1->0 for dir, 0->(-1) for other */ printf("devfs: %s: name slot allocation failed (Errno=%d)\n", - name,error); - + name, error); } return error; } static void -devfs_bulk_notify(devfs_event_log_t delp) +devfs_bulk_notify(devfs_event_log_t delp) { uint32_t i; for (i = 0; i < delp->del_used; i++) { @@ -1427,7 +1420,7 @@ devfs_bulk_notify(devfs_event_log_t delp) } } -static void +static void devfs_record_event(devfs_event_log_t delp, devnode_t *dnp, uint32_t events) { if (delp->del_used >= delp->del_max) { @@ -1445,11 +1438,11 @@ devfs_record_event(devfs_event_log_t delp, devnode_t *dnp, uint32_t events) } static int -devfs_init_event_log(devfs_event_log_t delp, uint32_t count, devfs_vnode_event_t buf) +devfs_init_event_log(devfs_event_log_t delp, uint32_t count, devfs_vnode_event_t buf) { devfs_vnode_event_t dvearr; - if (buf == NULL) { + if (buf == NULL) { MALLOC(dvearr, devfs_vnode_event_t, count * sizeof(struct devfs_vnode_event), M_TEMP, M_WAITOK | M_ZERO); if (dvearr == NULL) { return ENOMEM; @@ -1485,7 +1478,7 @@ devfs_release_event_log(devfs_event_log_t delp, int need_free) * Create a device node with the given pathname in the devfs namespace. * * Parameters: - * dev - the dev_t value to associate + * dev - the dev_t value to associate * chrblk - block or character device (DEVFS_CHAR or DEVFS_BLOCK) * uid, gid - ownership * perms - permissions @@ -1496,22 +1489,22 @@ devfs_release_event_log(devfs_event_log_t delp, int need_free) */ void * devfs_make_node_clone(dev_t dev, int chrblk, uid_t uid, - gid_t gid, int perms, int (*clone)(dev_t dev, int action), - const char *fmt, ...) + gid_t gid, int perms, int (*clone)(dev_t dev, int action), + const char *fmt, ...) { - devdirent_t * new_dev = NULL; - devfstype_t type; + devdirent_t * new_dev = NULL; + devfstype_t type; va_list ap; switch (chrblk) { - case DEVFS_CHAR: - type = DEV_CDEV; - break; - case DEVFS_BLOCK: - type = DEV_BDEV; - break; - default: - goto out; + case DEVFS_CHAR: + type = DEV_CDEV; + break; + case DEVFS_BLOCK: + type = DEV_BDEV; + break; + default: + goto out; } va_start(ap, fmt); @@ -1529,7 +1522,7 @@ out: * Create a device node with the given pathname in the devfs namespace. * * Parameters: - * dev - the dev_t value to associate + * dev - the dev_t value to associate * chrblk - block or character device (DEVFS_CHAR or DEVFS_BLOCK) * uid, gid - ownership * perms - permissions @@ -1539,58 +1532,60 @@ out: */ void * devfs_make_node(dev_t dev, int chrblk, uid_t uid, - gid_t gid, int perms, const char *fmt, ...) + gid_t gid, int perms, const char *fmt, ...) { - devdirent_t * new_dev = NULL; + devdirent_t * new_dev = NULL; devfstype_t type; va_list ap; - if (chrblk != DEVFS_CHAR && chrblk != DEVFS_BLOCK) + if (chrblk != DEVFS_CHAR && chrblk != DEVFS_BLOCK) { goto out; + } type = (chrblk == DEVFS_BLOCK ? DEV_BDEV : DEV_CDEV); va_start(ap, fmt); new_dev = devfs_make_node_internal(dev, type, uid, gid, perms, NULL, fmt, ap); va_end(ap); - + out: return new_dev; } static devdirent_t * -devfs_make_node_internal(dev_t dev, devfstype_t type, uid_t uid, - gid_t gid, int perms, int (*clone)(dev_t dev, int action), const char *fmt, va_list ap) +devfs_make_node_internal(dev_t dev, devfstype_t type, uid_t uid, + gid_t gid, int perms, int (*clone)(dev_t dev, int action), const char *fmt, va_list ap) { - devdirent_t * new_dev = NULL; + devdirent_t * new_dev = NULL; devnode_t * dnp; - devnode_type_t typeinfo; + devnode_type_t typeinfo; - char *name, buf[256]; /* XXX */ - const char *path; + char *name, buf[256]; /* XXX */ + const char *path; #if CONFIG_MACF char buff[sizeof(buf)]; #endif - int i; - uint32_t log_count; + int i; + uint32_t log_count; struct devfs_event_log event_log; struct devfs_vnode_event stackbuf[NUM_STACK_ENTRIES]; - int need_free = 0; + int need_free = 0; vsnprintf(buf, sizeof(buf), fmt, ap); #if CONFIG_MACF bcopy(buf, buff, sizeof(buff)); - buff[sizeof(buff)-1] = 0; + buff[sizeof(buff) - 1] = 0; #endif name = NULL; - for(i=strlen(buf); i>0; i--) - if(buf[i] == '/') { - name=&buf[i]; - buf[i]=0; + for (i = strlen(buf); i > 0; i--) { + if (buf[i] == '/') { + name = &buf[i]; + buf[i] = 0; break; } + } if (name) { *name++ = '\0'; @@ -1622,7 +1617,7 @@ wrongsize: log_count = log_count * 2; goto wrongsize; } - + if (!devfs_ready) { printf("devfs_make_node: not ready for devices!\n"); goto out; @@ -1630,17 +1625,17 @@ wrongsize: /* find/create directory path ie. mkdir -p */ if (dev_finddir(path, NULL, DEVFS_CREATE, &dnp, &event_log) == 0) { - typeinfo.dev = dev; - if (dev_add_entry(name, dnp, type, &typeinfo, NULL, NULL, &new_dev) == 0) { - new_dev->de_dnp->dn_gid = gid; - new_dev->de_dnp->dn_uid = uid; - new_dev->de_dnp->dn_mode |= perms; - new_dev->de_dnp->dn_clone = clone; + typeinfo.dev = dev; + if (dev_add_entry(name, dnp, type, &typeinfo, NULL, NULL, &new_dev) == 0) { + new_dev->de_dnp->dn_gid = gid; + new_dev->de_dnp->dn_uid = uid; + new_dev->de_dnp->dn_mode |= perms; + new_dev->de_dnp->dn_clone = clone; #if CONFIG_MACF - mac_devfs_label_associate_device(dev, new_dev->de_dnp, buff); + mac_devfs_label_associate_device(dev, new_dev->de_dnp, buff); #endif - devfs_propogate(dnp->dn_typeinfo.Dir.myname, new_dev, &event_log); - } + devfs_propogate(dnp->dn_typeinfo.Dir.myname, new_dev, &event_log); + } } out: @@ -1663,11 +1658,11 @@ out: int devfs_make_link(void *original, char *fmt, ...) { - devdirent_t * new_dev = NULL; - devdirent_t * orig = (devdirent_t *) original; - devnode_t * dirnode; /* devnode for parent directory */ + devdirent_t * new_dev = NULL; + devdirent_t * orig = (devdirent_t *) original; + devnode_t * dirnode; /* devnode for parent directory */ struct devfs_event_log event_log; - uint32_t log_count; + uint32_t log_count; va_list ap; char *p, buf[256]; /* XXX */ @@ -1688,17 +1683,17 @@ devfs_make_link(void *original, char *fmt, ...) p = NULL; - for(i=strlen(buf); i>0; i--) { - if(buf[i] == '/') { - p=&buf[i]; - buf[i]=0; - break; + for (i = strlen(buf); i > 0; i--) { + if (buf[i] == '/') { + p = &buf[i]; + buf[i] = 0; + break; } } - - /* - * One slot for each directory, one for each devnode - * whose link count changes + + /* + * One slot for each directory, one for each devnode + * whose link count changes */ log_count = devfs_nmountplanes * 2; wrongsize: @@ -1717,15 +1712,17 @@ wrongsize: } if (p) { - *p++ = '\0'; + *p++ = '\0'; if (dev_finddir(buf, NULL, DEVFS_CREATE, &dirnode, &event_log) - || dev_add_name(p, dirnode, NULL, orig->de_dnp, &new_dev)) - goto fail; + || dev_add_name(p, dirnode, NULL, orig->de_dnp, &new_dev)) { + goto fail; + } } else { - if (dev_finddir("", NULL, DEVFS_CREATE, &dirnode, &event_log) - || dev_add_name(buf, dirnode, NULL, orig->de_dnp, &new_dev)) - goto fail; + if (dev_finddir("", NULL, DEVFS_CREATE, &dirnode, &event_log) + || dev_add_name(buf, dirnode, NULL, orig->de_dnp, &new_dev)) { + goto fail; + } } devfs_propogate(dirnode->dn_typeinfo.Dir.myname, new_dev, &event_log); fail: @@ -1733,5 +1730,5 @@ fail: devfs_bulk_notify(&event_log); devfs_release_event_log(&event_log, 1); - return ((new_dev != NULL) ? 0 : -1); + return (new_dev != NULL) ? 0 : -1; } diff --git a/bsd/miscfs/devfs/devfs_vfsops.c b/bsd/miscfs/devfs/devfs_vfsops.c index db3e249e9..ab3986460 100644 --- a/bsd/miscfs/devfs/devfs_vfsops.c +++ b/bsd/miscfs/devfs/devfs_vfsops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- * Copyright 1997,1998 Julian Elischer. All rights reserved. * julian@freebsd.org - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: @@ -37,7 +37,7 @@ * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -49,7 +49,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * devfs_vfsops.c * */ @@ -101,7 +101,7 @@ extern boolean_t dev_kmem_enabled; /*- * Called from the generic VFS startups. * This is the second stage of DEVFS initialisation. - * The probed devices have already been loaded and the + * The probed devices have already been loaded and the * basic structure of the DEVFS created. * We take the oportunity to mount the hidden DEVFS layer, so that * devices from devfs get sync'd. @@ -109,39 +109,40 @@ extern boolean_t dev_kmem_enabled; static int devfs_init(__unused struct vfsconf *vfsp) { - if (devfs_sinit()) - return (ENOTSUP); - devfs_make_node(makedev(0, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0622, "console"); - devfs_make_node(makedev(2, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0666, "tty"); + if (devfs_sinit()) { + return ENOTSUP; + } + devfs_make_node(makedev(0, 0), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0622, "console"); + devfs_make_node(makedev(2, 0), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "tty"); #if CONFIG_DEV_KMEM if (dev_kmem_enabled) { /* (3,0) reserved for /dev/mem physical memory */ - devfs_make_node(makedev(3, 1), DEVFS_CHAR, - UID_ROOT, GID_KMEM, 0640, "kmem"); + devfs_make_node(makedev(3, 1), DEVFS_CHAR, + UID_ROOT, GID_KMEM, 0640, "kmem"); } #endif - devfs_make_node(makedev(3, 2), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0666, "null"); - devfs_make_node(makedev(3, 3), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0666, "zero"); + devfs_make_node(makedev(3, 2), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "null"); + devfs_make_node(makedev(3, 3), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0666, "zero"); uint32_t logging_config = atm_get_diagnostic_config(); devfs_make_node(makedev(6, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0600, "klog"); + UID_ROOT, GID_WHEEL, 0600, "klog"); - if ( !(logging_config & ATM_TRACE_DISABLE) ) { + if (!(logging_config & ATM_TRACE_DISABLE)) { devfs_make_node(makedev(7, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0600, "oslog"); + UID_ROOT, GID_WHEEL, 0600, "oslog"); if (cdevsw_setkqueueok(7, (&(cdevsw[7])), 0) == -1) { - return (ENOTSUP); + return ENOTSUP; } devfs_make_node(makedev(8, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0600, "oslog_stream"); + UID_ROOT, GID_WHEEL, 0600, "oslog_stream"); if (cdevsw_setkqueueok(8, (&(cdevsw[8])), 0) == -1) { - return (ENOTSUP); + return ENOTSUP; } } @@ -150,7 +151,7 @@ devfs_init(__unused struct vfsconf *vfsp) devfs_fdesc_init(); #endif - return 0; + return 0; } /*- @@ -170,14 +171,13 @@ devfs_init(__unused struct vfsconf *vfsp) int devfs_mount(struct mount *mp, __unused vnode_t devvp, __unused user_addr_t data, vfs_context_t ctx) { - struct devfsmount *devfs_mp_p; /* devfs specific mount info */ + struct devfsmount *devfs_mp_p; /* devfs specific mount info */ int error; /*- * If they just want to update, we don't need to do anything. */ - if (mp->mnt_flag & MNT_UPDATE) - { + if (mp->mnt_flag & MNT_UPDATE) { return 0; } @@ -191,10 +191,11 @@ devfs_mount(struct mount *mp, __unused vnode_t devvp, __unused user_addr_t data, */ MALLOC(devfs_mp_p, struct devfsmount *, sizeof(struct devfsmount), - M_DEVFSMNT, M_WAITOK); - if (devfs_mp_p == NULL) - return (ENOMEM); - bzero(devfs_mp_p,sizeof(*devfs_mp_p)); + M_DEVFSMNT, M_WAITOK); + if (devfs_mp_p == NULL) { + return ENOMEM; + } + bzero(devfs_mp_p, sizeof(*devfs_mp_p)); devfs_mp_p->mount = mp; /*- @@ -211,10 +212,11 @@ devfs_mount(struct mount *mp, __unused vnode_t devvp, __unused user_addr_t data, if (error) { mp->mnt_data = (qaddr_t)0; - FREE((caddr_t)devfs_mp_p, M_DEVFSMNT); - return (error); - } else - DEVFS_INCR_MOUNTS(); + FREE(devfs_mp_p, M_DEVFSMNT); + return error; + } else { + DEVFS_INCR_MOUNTS(); + } /*- * Copy in the name of the directory the filesystem @@ -222,9 +224,9 @@ devfs_mount(struct mount *mp, __unused vnode_t devvp, __unused user_addr_t data, * And we clear the remainder of the character strings * to be tidy. */ - + bzero(mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN); - bcopy("devfs",mp->mnt_vfsstat.f_mntfromname, 5); + bcopy("devfs", mp->mnt_vfsstat.f_mntfromname, 5); (void)devfs_statfs(mp, &mp->mnt_vfsstat, ctx); return 0; @@ -247,14 +249,15 @@ devfs_unmount( struct mount *mp, int mntflags, __unused vfs_context_t ctx) int flags = 0; int force = 0; int error; - + if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; force = 1; } error = vflush(mp, NULLVP, flags); - if (error && !force) + if (error && !force) { return error; + } DEVFS_LOCK(); devfs_free_plane(devfs_mp_p); @@ -262,7 +265,7 @@ devfs_unmount( struct mount *mp, int mntflags, __unused vfs_context_t ctx) DEVFS_DECR_MOUNTS(); - FREE((caddr_t)devfs_mp_p, M_DEVFSMNT); + FREE(devfs_mp_p, M_DEVFSMNT); mp->mnt_data = (qaddr_t)0; mp->mnt_flag &= ~MNT_LOCAL; @@ -293,14 +296,14 @@ devfs_statfs( struct mount *mp, struct vfsstatfs *sbp, __unused vfs_context_t ct * Fill in the stat block. */ //sbp->f_type = mp->mnt_vfsstat.f_type; - sbp->f_flags = 0; /* XXX */ + sbp->f_flags = 0; /* XXX */ sbp->f_bsize = 512; sbp->f_iosize = 512; sbp->f_blocks = (devfs_stats.mounts * sizeof(struct devfsmount) - + devfs_stats.nodes * sizeof(devnode_t) - + devfs_stats.entries * sizeof(devdirent_t) - + devfs_stats.stringspace - ) / sbp->f_bsize; + + devfs_stats.nodes * sizeof(devnode_t) + + devfs_stats.entries * sizeof(devdirent_t) + + devfs_stats.stringspace + ) / sbp->f_bsize; sbp->f_bfree = 0; sbp->f_bavail = 0; sbp->f_files = devfs_stats.nodes; @@ -320,10 +323,10 @@ devfs_vfs_getattr(__unused mount_t mp, struct vfs_attr *fsap, __unused vfs_conte VFSATTR_RETURN(fsap, f_iosize, 512); if (VFSATTR_IS_ACTIVE(fsap, f_blocks) || VFSATTR_IS_ACTIVE(fsap, f_bused)) { fsap->f_blocks = (devfs_stats.mounts * sizeof(struct devfsmount) - + devfs_stats.nodes * sizeof(devnode_t) - + devfs_stats.entries * sizeof(devdirent_t) - + devfs_stats.stringspace - ) / fsap->f_bsize; + + devfs_stats.nodes * sizeof(devnode_t) + + devfs_stats.entries * sizeof(devdirent_t) + + devfs_stats.stringspace + ) / fsap->f_bsize; fsap->f_bused = fsap->f_blocks; VFSATTR_SET_SUPPORTED(fsap, f_blocks); VFSATTR_SET_SUPPORTED(fsap, f_bused); @@ -333,116 +336,116 @@ devfs_vfs_getattr(__unused mount_t mp, struct vfs_attr *fsap, __unused vfs_conte VFSATTR_RETURN(fsap, f_files, devfs_stats.nodes); VFSATTR_RETURN(fsap, f_ffree, 0); VFSATTR_RETURN(fsap, f_fssubtype, 0); - + if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) { fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_SYMBOLICLINKS | - VOL_CAP_FMT_HARDLINKS | - VOL_CAP_FMT_NO_ROOT_TIMES | - VOL_CAP_FMT_CASE_SENSITIVE | - VOL_CAP_FMT_CASE_PRESERVING | - VOL_CAP_FMT_FAST_STATFS | - VOL_CAP_FMT_2TB_FILESIZE | - VOL_CAP_FMT_HIDDEN_FILES; + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS | + VOL_CAP_FMT_2TB_FILESIZE | + VOL_CAP_FMT_HIDDEN_FILES; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_ATTRLIST ; + VOL_CAP_INT_ATTRLIST; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0; - + fsap->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_PERSISTENTOBJECTIDS | - VOL_CAP_FMT_SYMBOLICLINKS | - VOL_CAP_FMT_HARDLINKS | - VOL_CAP_FMT_JOURNAL | - VOL_CAP_FMT_JOURNAL_ACTIVE | - VOL_CAP_FMT_NO_ROOT_TIMES | - VOL_CAP_FMT_SPARSE_FILES | - VOL_CAP_FMT_ZERO_RUNS | - VOL_CAP_FMT_CASE_SENSITIVE | - VOL_CAP_FMT_CASE_PRESERVING | - VOL_CAP_FMT_FAST_STATFS | - VOL_CAP_FMT_2TB_FILESIZE | - VOL_CAP_FMT_OPENDENYMODES | - VOL_CAP_FMT_HIDDEN_FILES | - VOL_CAP_FMT_PATH_FROM_ID | - VOL_CAP_FMT_NO_VOLUME_SIZES; + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + VOL_CAP_FMT_JOURNAL_ACTIVE | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_SPARSE_FILES | + VOL_CAP_FMT_ZERO_RUNS | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS | + VOL_CAP_FMT_2TB_FILESIZE | + VOL_CAP_FMT_OPENDENYMODES | + VOL_CAP_FMT_HIDDEN_FILES | + VOL_CAP_FMT_PATH_FROM_ID | + VOL_CAP_FMT_NO_VOLUME_SIZES; fsap->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_SEARCHFS | - VOL_CAP_INT_ATTRLIST | - VOL_CAP_INT_NFSEXPORT | - VOL_CAP_INT_READDIRATTR | - VOL_CAP_INT_EXCHANGEDATA | - VOL_CAP_INT_COPYFILE | - VOL_CAP_INT_ALLOCATE | - VOL_CAP_INT_VOL_RENAME | - VOL_CAP_INT_ADVLOCK | - VOL_CAP_INT_FLOCK | - VOL_CAP_INT_EXTENDED_SECURITY | - VOL_CAP_INT_USERACCESS | - VOL_CAP_INT_MANLOCK | - VOL_CAP_INT_EXTENDED_ATTR | - VOL_CAP_INT_NAMEDSTREAMS; + VOL_CAP_INT_SEARCHFS | + VOL_CAP_INT_ATTRLIST | + VOL_CAP_INT_NFSEXPORT | + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK | + VOL_CAP_INT_EXTENDED_SECURITY | + VOL_CAP_INT_USERACCESS | + VOL_CAP_INT_MANLOCK | + VOL_CAP_INT_EXTENDED_ATTR | + VOL_CAP_INT_NAMEDSTREAMS; fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0; fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0; - + VFSATTR_SET_SUPPORTED(fsap, f_capabilities); } - + if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) { fsap->f_attributes.validattr.commonattr = - ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | - ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | - ATTR_CMN_PAROBJID | - ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | - ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | - ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; + ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | + ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | + ATTR_CMN_PAROBJID | + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | + ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; fsap->f_attributes.validattr.volattr = - ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | - ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | - ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | - ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | - ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | - ATTR_VOL_ATTRIBUTES; + ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | + ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | + ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | + ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | + ATTR_VOL_ATTRIBUTES; fsap->f_attributes.validattr.dirattr = - ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS; + ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS; fsap->f_attributes.validattr.fileattr = - ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | - ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | - ATTR_FILE_DATALENGTH; + ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | + ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | + ATTR_FILE_DATALENGTH; fsap->f_attributes.validattr.forkattr = 0; - + fsap->f_attributes.nativeattr.commonattr = - ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | - ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | - ATTR_CMN_PAROBJID | - ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | - ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | - ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; + ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | + ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | + ATTR_CMN_PAROBJID | + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | + ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; fsap->f_attributes.nativeattr.volattr = - ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | - ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | - ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | - ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | - ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | - ATTR_VOL_ATTRIBUTES; + ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | + ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | + ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | + ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | + ATTR_VOL_ATTRIBUTES; fsap->f_attributes.nativeattr.dirattr = - ATTR_DIR_MOUNTSTATUS; + ATTR_DIR_MOUNTSTATUS; fsap->f_attributes.nativeattr.fileattr = - ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | - ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | - ATTR_FILE_DATALENGTH; + ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | + ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | + ATTR_FILE_DATALENGTH; fsap->f_attributes.nativeattr.forkattr = 0; VFSATTR_SET_SUPPORTED(fsap, f_attributes); } - + return 0; } static int devfs_sync(__unused struct mount *mp, __unused int waitfor, __unused vfs_context_t ctx) { - return (0); + return 0; } @@ -458,24 +461,24 @@ devfs_vget(__unused struct mount *mp, __unused ino64_t ino, __unused struct vnod */ static int -devfs_fhtovp (__unused struct mount *mp, __unused int fhlen, __unused unsigned char *fhp, __unused struct vnode **vpp, __unused vfs_context_t ctx) +devfs_fhtovp(__unused struct mount *mp, __unused int fhlen, __unused unsigned char *fhp, __unused struct vnode **vpp, __unused vfs_context_t ctx) { - return (EINVAL); + return EINVAL; } static int -devfs_vptofh (__unused struct vnode *vp, __unused int *fhlenp, __unused unsigned char *fhp, __unused vfs_context_t ctx) +devfs_vptofh(__unused struct vnode *vp, __unused int *fhlenp, __unused unsigned char *fhp, __unused vfs_context_t ctx) { - return (EINVAL); + return EINVAL; } static int -devfs_sysctl(__unused int *name, __unused u_int namelen, __unused user_addr_t oldp, - __unused size_t *oldlenp, __unused user_addr_t newp, - __unused size_t newlen, __unused vfs_context_t ctx) +devfs_sysctl(__unused int *name, __unused u_int namelen, __unused user_addr_t oldp, + __unused size_t *oldlenp, __unused user_addr_t newp, + __unused size_t newlen, __unused vfs_context_t ctx) { - return (ENOTSUP); + return ENOTSUP; } #include @@ -495,10 +498,10 @@ devfs_kernel_mount(char * mntname) error = kernel_mount(fsname, NULLVP, NULLVP, mntname, NULL, 0, MNT_DONTBROWSE, KERNEL_MOUNT_NOAUTH, ctx); if (error) { printf("devfs_kernel_mount: kernel_mount failed: %d\n", error); - return (error); + return error; } - return (0); + return 0; } struct vfsops devfs_vfsops = { diff --git a/bsd/miscfs/devfs/devfs_vnops.c b/bsd/miscfs/devfs/devfs_vnops.c index 41029dd7c..3377d3a35 100644 --- a/bsd/miscfs/devfs/devfs_vnops.c +++ b/bsd/miscfs/devfs/devfs_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright 1997,1998 Julian Elischer. All rights reserved. * julian@freebsd.org - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: @@ -37,7 +37,7 @@ * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -49,7 +49,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * devfs_vnops.c */ @@ -110,20 +110,19 @@ #include "fdesc.h" #endif /* FDESC */ -static int devfs_update(struct vnode *vp, struct timeval *access, - struct timeval *modify); -void devfs_rele_node(devnode_t *); -static void devfs_consider_time_update(devnode_t *dnp, uint32_t just_changed_flags); -static boolean_t devfs_update_needed(long now_s, long last_s); -static boolean_t devfs_is_name_protected(struct vnode *dvp, const char *name); -void dn_times_locked(devnode_t * dnp, struct timeval *t1, struct timeval *t2, struct timeval *t3, uint32_t just_changed_flags); -void dn_times_now(devnode_t *dnp, uint32_t just_changed_flags); -void dn_mark_for_delayed_times_update(devnode_t *dnp, uint32_t just_changed_flags); - -void +static int devfs_update(struct vnode *vp, struct timeval *access, + struct timeval *modify); +void devfs_rele_node(devnode_t *); +static void devfs_consider_time_update(devnode_t *dnp, uint32_t just_changed_flags); +static boolean_t devfs_update_needed(long now_s, long last_s); +static boolean_t devfs_is_name_protected(struct vnode *dvp, const char *name); +void dn_times_locked(devnode_t * dnp, struct timeval *t1, struct timeval *t2, struct timeval *t3, uint32_t just_changed_flags); +void dn_times_now(devnode_t *dnp, uint32_t just_changed_flags); +void dn_mark_for_delayed_times_update(devnode_t *dnp, uint32_t just_changed_flags); + +void dn_times_locked(devnode_t * dnp, struct timeval *t1, struct timeval *t2, struct timeval *t3, uint32_t just_changed_flags) { - lck_mtx_assert(&devfs_attr_mutex, LCK_MTX_ASSERT_OWNED); if (just_changed_flags & DEVFS_UPDATE_ACCESS) { @@ -193,23 +192,23 @@ dn_times_now(devnode_t * dnp, uint32_t just_changed_flags) static boolean_t devfs_is_name_protected(struct vnode *dvp, const char *name) { - /* - * Only names in root are protected. E.g. /dev/null is protected, - * but /dev/foo/null isn't. - */ - if (!vnode_isvroot(dvp)) - return FALSE; - - if ((strcmp("console", name) == 0) || - (strcmp("tty", name) == 0) || - (strcmp("null", name) == 0) || - (strcmp("zero", name) == 0) || - (strcmp("klog", name) == 0)) { - - return TRUE; - } - - return FALSE; + /* + * Only names in root are protected. E.g. /dev/null is protected, + * but /dev/foo/null isn't. + */ + if (!vnode_isvroot(dvp)) { + return FALSE; + } + + if ((strcmp("console", name) == 0) || + (strcmp("tty", name) == 0) || + (strcmp("null", name) == 0) || + (strcmp("zero", name) == 0) || + (strcmp("klog", name) == 0)) { + return TRUE; + } + + return FALSE; } @@ -252,12 +251,12 @@ devfs_is_name_protected(struct vnode *dvp, const char *name) */ static int devfs_lookup(struct vnop_lookup_args *ap) - /*struct vnop_lookup_args { - struct vnode * a_dvp; directory vnode ptr - struct vnode ** a_vpp; where to put the result - struct componentname * a_cnp; the name we want - vfs_context_t a_context; - };*/ +/*struct vnop_lookup_args { + * struct vnode * a_dvp; directory vnode ptr + * struct vnode ** a_vpp; where to put the result + * struct componentname * a_cnp; the name we want + * vfs_context_t a_context; + * };*/ { struct componentname *cnp = ap->a_cnp; vfs_context_t ctx = cnp->cn_context; @@ -269,9 +268,9 @@ devfs_lookup(struct vnop_lookup_args *ap) devdirent_t * nodename; int flags = cnp->cn_flags; int op = cnp->cn_nameiop; /* LOOKUP, CREATE, RENAME, or DELETE */ - int wantparent = flags & (LOCKPARENT|WANTPARENT); + int wantparent = flags & (LOCKPARENT | WANTPARENT); int error = 0; - char heldchar; /* the char at the end of the name componet */ + char heldchar; /* the char at the end of the name componet */ retry: @@ -284,7 +283,7 @@ retry: * Make sure that our node is a directory as well. */ if (dir_node->dn_type != DEV_DIR) { - return (ENOTDIR); + return ENOTDIR; } DEVFS_LOCK(); @@ -301,10 +300,10 @@ retry: cnp->cn_nameptr[cnp->cn_namelen] = heldchar; if (nodename) { - /* entry exists */ - node = nodename->de_dnp; + /* entry exists */ + node = nodename->de_dnp; - /* Do potential vnode allocation here inside the lock + /* Do potential vnode allocation here inside the lock * to make sure that our device node has a non-NULL dn_vn * associated with it. The device node might otherwise * get deleted out from under us (see devfs_dn_free()). @@ -314,8 +313,9 @@ retry: DEVFS_UNLOCK(); if (error) { - if (error == EAGAIN) - goto retry; + if (error == EAGAIN) { + goto retry; + } return error; } if (!nodename) { @@ -328,7 +328,7 @@ retry: * or we're at the last component, but we're not creating * or renaming, return ENOENT. */ - if (!(flags & ISLASTCN) || !(op == CREATE || op == RENAME)) { + if (!(flags & ISLASTCN) || !(op == CREATE || op == RENAME)) { return ENOENT; } /* @@ -342,7 +342,7 @@ retry: * NB - if the directory is unlocked, then this * information cannot be used. */ - return (EJUSTRETURN); + return EJUSTRETURN; } /* * from this point forward, we need to vnode_put the reference @@ -357,21 +357,20 @@ retry: * on and lock the node, being careful with ".". */ if (op == DELETE && (flags & ISLASTCN)) { - /* * we are trying to delete '.'. What does this mean? XXX */ if (dir_node == node) { - if (*result_vnode) { - vnode_put(*result_vnode); - *result_vnode = NULL; - } - if ( ((error = vnode_get(dir_vnode)) == 0) ) { - *result_vnode = dir_vnode; + if (*result_vnode) { + vnode_put(*result_vnode); + *result_vnode = NULL; + } + if (((error = vnode_get(dir_vnode)) == 0)) { + *result_vnode = dir_vnode; } - return (error); + return error; } - return (0); + return 0; } /* @@ -381,16 +380,15 @@ retry: * regular file, or empty directory. */ if (op == RENAME && wantparent && (flags & ISLASTCN)) { - /* * Careful about locking second node. * This can only occur if the target is ".". */ if (dir_node == node) { - error = EISDIR; + error = EISDIR; goto drop_ref; } - return (0); + return 0; } /* @@ -414,37 +412,37 @@ retry: * that point backwards in the directory structure. */ if ((flags & ISDOTDOT) == 0 && dir_node == node) { - if (*result_vnode) { - vnode_put(*result_vnode); - *result_vnode = NULL; + if (*result_vnode) { + vnode_put(*result_vnode); + *result_vnode = NULL; } - if ( (error = vnode_get(dir_vnode)) ) { - return (error); + if ((error = vnode_get(dir_vnode))) { + return error; } *result_vnode = dir_vnode; } - return (0); + return 0; drop_ref: if (*result_vnode) { - vnode_put(*result_vnode); + vnode_put(*result_vnode); *result_vnode = NULL; } - return (error); + return error; } static int devfs_getattr(struct vnop_getattr_args *ap) - /*struct vnop_getattr_args { - struct vnode *a_vp; - struct vnode_attr *a_vap; - kauth_cred_t a_cred; - struct proc *a_p; - } */ +/*struct vnop_getattr_args { + * struct vnode *a_vp; + * struct vnode_attr *a_vap; + * kauth_cred_t a_cred; + * struct proc *a_p; + * } */ { struct vnode *vp = ap->a_vp; struct vnode_attr *vap = ap->a_vap; - devnode_t * file_node; + devnode_t * file_node; struct timeval now; @@ -458,29 +456,28 @@ devfs_getattr(struct vnop_getattr_args *ap) * the vp, not the file_node; if we getting information on a * cloning device, we want the cloned information, not the template. */ - switch (file_node->dn_type) - { - case DEV_DIR: + switch (file_node->dn_type) { + case DEV_DIR: #if FDESC - case DEV_DEVFD: /* Like a directory */ + case DEV_DEVFD: /* Like a directory */ #endif /* FDESC */ - VATTR_RETURN(vap, va_rdev, 0); + VATTR_RETURN(vap, va_rdev, 0); vap->va_mode |= (S_IFDIR); break; - case DEV_CDEV: + case DEV_CDEV: VATTR_RETURN(vap, va_rdev, vp->v_rdev); vap->va_mode |= (S_IFCHR); break; - case DEV_BDEV: + case DEV_BDEV: VATTR_RETURN(vap, va_rdev, vp->v_rdev); vap->va_mode |= (S_IFBLK); break; - case DEV_SLNK: + case DEV_SLNK: VATTR_RETURN(vap, va_rdev, 0); vap->va_mode |= (S_IFLNK); break; default: - VATTR_RETURN(vap, va_rdev, 0); /* default value only */ + VATTR_RETURN(vap, va_rdev, 0); /* default value only */ } VATTR_RETURN(vap, va_type, vp->v_type); VATTR_RETURN(vap, va_nlink, file_node->dn_links); @@ -491,12 +488,13 @@ devfs_getattr(struct vnop_getattr_args *ap) VATTR_RETURN(vap, va_data_size, file_node->dn_len); /* return an override block size (advisory) */ - if (vp->v_type == VBLK) + if (vp->v_type == VBLK) { VATTR_RETURN(vap, va_iosize, BLKDEV_IOSIZE); - else if (vp->v_type == VCHR) + } else if (vp->v_type == VCHR) { VATTR_RETURN(vap, va_iosize, MAXPHYSIO); - else + } else { VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize); + } DEVFS_ATTR_LOCK_SPIN(); @@ -509,10 +507,12 @@ devfs_getattr(struct vnop_getattr_args *ap) file_node->dn_ctime.tv_sec = boottime_sec(); file_node->dn_ctime.tv_nsec = 0; } - if (file_node->dn_mtime.tv_sec == 0) - file_node->dn_mtime = file_node->dn_ctime; - if (file_node->dn_atime.tv_sec == 0) - file_node->dn_atime = file_node->dn_ctime; + if (file_node->dn_mtime.tv_sec == 0) { + file_node->dn_mtime = file_node->dn_ctime; + } + if (file_node->dn_atime.tv_sec == 0) { + file_node->dn_atime = file_node->dn_ctime; + } VATTR_RETURN(vap, va_change_time, file_node->dn_ctime); VATTR_RETURN(vap, va_modify_time, file_node->dn_mtime); VATTR_RETURN(vap, va_access_time, file_node->dn_atime); @@ -537,29 +537,28 @@ devfs_getattr(struct vnop_getattr_args *ap) static int devfs_setattr(struct vnop_setattr_args *ap) - /*struct vnop_setattr_args { - struct vnode *a_vp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ +/*struct vnop_setattr_args { + * struct vnode *a_vp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */ { - struct vnode *vp = ap->a_vp; - struct vnode_attr *vap = ap->a_vap; - int error = 0; - devnode_t * file_node; - struct timeval atimeval, mtimeval; - - DEVFS_LOCK(); - - file_node = VTODN(vp); - /* - * Go through the fields and update if set. - */ - if (VATTR_IS_ACTIVE(vap, va_access_time) || VATTR_IS_ACTIVE(vap, va_modify_time)) { - - - if (VATTR_IS_ACTIVE(vap, va_access_time)) + struct vnode *vp = ap->a_vp; + struct vnode_attr *vap = ap->a_vap; + int error = 0; + devnode_t * file_node; + struct timeval atimeval, mtimeval; + + DEVFS_LOCK(); + + file_node = VTODN(vp); + /* + * Go through the fields and update if set. + */ + if (VATTR_IS_ACTIVE(vap, va_access_time) || VATTR_IS_ACTIVE(vap, va_modify_time)) { + if (VATTR_IS_ACTIVE(vap, va_access_time)) { file_node->dn_access = 1; + } if (VATTR_IS_ACTIVE(vap, va_modify_time)) { file_node->dn_change = 1; file_node->dn_update = 1; @@ -568,36 +567,39 @@ devfs_setattr(struct vnop_setattr_args *ap) atimeval.tv_usec = vap->va_access_time.tv_nsec / 1000; mtimeval.tv_sec = vap->va_modify_time.tv_sec; mtimeval.tv_usec = vap->va_modify_time.tv_nsec / 1000; - - if ( (error = devfs_update(vp, &atimeval, &mtimeval)) ) + + if ((error = devfs_update(vp, &atimeval, &mtimeval))) { goto exit; - } - VATTR_SET_SUPPORTED(vap, va_access_time); - VATTR_SET_SUPPORTED(vap, va_change_time); - - /* - * Change the permissions. - */ - if (VATTR_IS_ACTIVE(vap, va_mode)) { - file_node->dn_mode &= ~07777; - file_node->dn_mode |= vap->va_mode & 07777; - } - VATTR_SET_SUPPORTED(vap, va_mode); - - /* - * Change the owner. - */ - if (VATTR_IS_ACTIVE(vap, va_uid)) - file_node->dn_uid = vap->va_uid; - VATTR_SET_SUPPORTED(vap, va_uid); - - /* - * Change the group. - */ - if (VATTR_IS_ACTIVE(vap, va_gid)) - file_node->dn_gid = vap->va_gid; - VATTR_SET_SUPPORTED(vap, va_gid); - exit: + } + } + VATTR_SET_SUPPORTED(vap, va_access_time); + VATTR_SET_SUPPORTED(vap, va_change_time); + + /* + * Change the permissions. + */ + if (VATTR_IS_ACTIVE(vap, va_mode)) { + file_node->dn_mode &= ~07777; + file_node->dn_mode |= vap->va_mode & 07777; + } + VATTR_SET_SUPPORTED(vap, va_mode); + + /* + * Change the owner. + */ + if (VATTR_IS_ACTIVE(vap, va_uid)) { + file_node->dn_uid = vap->va_uid; + } + VATTR_SET_SUPPORTED(vap, va_uid); + + /* + * Change the group. + */ + if (VATTR_IS_ACTIVE(vap, va_gid)) { + file_node->dn_gid = vap->va_gid; + } + VATTR_SET_SUPPORTED(vap, va_gid); +exit: DEVFS_UNLOCK(); return error; @@ -606,12 +608,12 @@ devfs_setattr(struct vnop_setattr_args *ap) #if CONFIG_MACF static int devfs_setlabel(struct vnop_setlabel_args *ap) - /* struct vnop_setlabel_args { - struct vnodeop_desc *a_desc; - struct vnode *a_vp; - struct label *a_vl; - vfs_context_t a_context; - } */ +/* struct vnop_setlabel_args { + * struct vnodeop_desc *a_desc; + * struct vnode *a_vp; + * struct label *a_vl; + * vfs_context_t a_context; + * } */ { struct vnode *vp; struct devnode *de; @@ -622,75 +624,77 @@ devfs_setlabel(struct vnop_setlabel_args *ap) mac_vnode_label_update(ap->a_context, vp, ap->a_vl); mac_devfs_label_update(vp->v_mount, de, vp); - return (0); + return 0; } #endif static int devfs_read(struct vnop_read_args *ap) - /* struct vnop_read_args { - struct vnode *a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ +/* struct vnop_read_args { + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */ { - devnode_t * dn_p = VTODN(ap->a_vp); + devnode_t * dn_p = VTODN(ap->a_vp); switch (ap->a_vp->v_type) { - case VDIR: { - dn_p->dn_access = 1; + case VDIR: { + dn_p->dn_access = 1; - return VNOP_READDIR(ap->a_vp, ap->a_uio, 0, NULL, NULL, ap->a_context); - } - default: { - printf("devfs_read(): bad file type %d", ap->a_vp->v_type); - return(EINVAL); - } + return VNOP_READDIR(ap->a_vp, ap->a_uio, 0, NULL, NULL, ap->a_context); + } + default: { + printf("devfs_read(): bad file type %d", ap->a_vp->v_type); + return EINVAL; + } } } static int devfs_close(struct vnop_close_args *ap) - /* struct vnop_close_args { - struct vnode *a_vp; - int a_fflag; - vfs_context_t a_context; - } */ +/* struct vnop_close_args { + * struct vnode *a_vp; + * int a_fflag; + * vfs_context_t a_context; + * } */ { - struct vnode * vp = ap->a_vp; - devnode_t * dnp; + struct vnode * vp = ap->a_vp; + devnode_t * dnp; if (vnode_isinuse(vp, 1)) { - DEVFS_LOCK(); - dnp = VTODN(vp); - if (dnp) - dn_times_now(dnp, 0); - DEVFS_UNLOCK(); + DEVFS_LOCK(); + dnp = VTODN(vp); + if (dnp) { + dn_times_now(dnp, 0); + } + DEVFS_UNLOCK(); } - return (0); + return 0; } static int devfsspec_close(struct vnop_close_args *ap) - /* struct vnop_close_args { - struct vnode *a_vp; - int a_fflag; - vfs_context_t a_context; - } */ +/* struct vnop_close_args { + * struct vnode *a_vp; + * int a_fflag; + * vfs_context_t a_context; + * } */ { - struct vnode * vp = ap->a_vp; - devnode_t * dnp; + struct vnode * vp = ap->a_vp; + devnode_t * dnp; if (vnode_isinuse(vp, 0)) { - DEVFS_LOCK(); - dnp = VTODN(vp); - if (dnp) - dn_times_now(dnp, 0); - DEVFS_UNLOCK(); + DEVFS_LOCK(); + dnp = VTODN(vp); + if (dnp) { + dn_times_now(dnp, 0); + } + DEVFS_UNLOCK(); } - return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap)); + return VOCALL(spec_vnodeop_p, VOFFSET(vnop_close), ap); } static boolean_t @@ -713,7 +717,7 @@ devfs_update_needed(long now_s, long last_s) static void devfs_consider_time_update(devnode_t *dnp, uint32_t just_changed_flags) { - struct timeval now; + struct timeval now; long now_s; microtime(&now); @@ -746,34 +750,34 @@ devfs_consider_time_update(devnode_t *dnp, uint32_t just_changed_flags) static int devfsspec_read(struct vnop_read_args *ap) - /* struct vnop_read_args { - struct vnode *a_vp; - struct uio *a_uio; - int a_ioflag; - kauth_cred_t a_cred; - } */ +/* struct vnop_read_args { + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_ioflag; + * kauth_cred_t a_cred; + * } */ { - devnode_t * dnp = VTODN(ap->a_vp); + devnode_t * dnp = VTODN(ap->a_vp); devfs_consider_time_update(dnp, DEVFS_UPDATE_ACCESS); - return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap)); + return VOCALL(spec_vnodeop_p, VOFFSET(vnop_read), ap); } static int devfsspec_write(struct vnop_write_args *ap) - /* struct vnop_write_args { - struct vnode *a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ +/* struct vnop_write_args { + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */ { - devnode_t * dnp = VTODN(ap->a_vp); + devnode_t * dnp = VTODN(ap->a_vp); devfs_consider_time_update(dnp, DEVFS_UPDATE_CHANGE | DEVFS_UPDATE_MOD); - return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap)); + return VOCALL(spec_vnodeop_p, VOFFSET(vnop_write), ap); } /* @@ -781,33 +785,33 @@ devfsspec_write(struct vnop_write_args *ap) */ static int devfs_write(struct vnop_write_args *ap) - /* struct vnop_write_args { - struct vnode *a_vp; - struct uio *a_uio; - int a_ioflag; - kauth_cred_t a_cred; - } */ +/* struct vnop_write_args { + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_ioflag; + * kauth_cred_t a_cred; + * } */ { switch (ap->a_vp->v_type) { case VDIR: - return(EISDIR); + return EISDIR; default: printf("devfs_write(): bad file type %d", ap->a_vp->v_type); - return (EINVAL); + return EINVAL; } } -/* +/* * Deviates from UFS naming convention because there is a KPI function * called devfs_remove(). */ static int devfs_vnop_remove(struct vnop_remove_args *ap) - /* struct vnop_remove_args { - struct vnode *a_dvp; - struct vnode *a_vp; - struct componentname *a_cnp; - } */ +/* struct vnop_remove_args { + * struct vnode *a_dvp; + * struct vnode *a_vp; + * struct componentname *a_cnp; + * } */ { struct vnode *vp = ap->a_vp; struct vnode *dvp = ap->a_dvp; @@ -833,7 +837,7 @@ devfs_vnop_remove(struct vnop_remove_args *ap) tnp = dev_findname(tdp, cnp->cn_nameptr); if (tnp == NULL) { - error = ENOENT; + error = ENOENT; goto abort; } @@ -843,7 +847,7 @@ devfs_vnop_remove(struct vnop_remove_args *ap) if (devfs_is_name_protected(dvp, cnp->cn_nameptr)) { error = EINVAL; goto abort; -} + } /* * Make sure that we don't try do something stupid @@ -852,8 +856,8 @@ devfs_vnop_remove(struct vnop_remove_args *ap) /* * Avoid ".", "..", and aliases of "." for obvious reasons. */ - if ( (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') - || (cnp->cn_flags&ISDOTDOT) ) { + if ((cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') + || (cnp->cn_flags & ISDOTDOT)) { error = EINVAL; goto abort; } @@ -861,8 +865,8 @@ devfs_vnop_remove(struct vnop_remove_args *ap) } /*********************************** - * Start actually doing things.... * - ***********************************/ + * Start actually doing things.... * + ***********************************/ devfs_consider_time_update(tdp, DEVFS_UPDATE_CHANGE | DEVFS_UPDATE_MOD); /* @@ -870,27 +874,27 @@ devfs_vnop_remove(struct vnop_remove_args *ap) * to it. Also, ensure source and target are compatible * (both directories, or both not directories). */ - if (( doingdirectory) && (tp->dn_links > 2)) { - error = ENOTEMPTY; - goto abort; + if ((doingdirectory) && (tp->dn_links > 2)) { + error = ENOTEMPTY; + goto abort; } dev_free_name(tnp); abort: DEVFS_UNLOCK(); - return (error); + return error; } /* */ static int devfs_link(struct vnop_link_args *ap) - /*struct vnop_link_args { - struct vnode *a_tdvp; - struct vnode *a_vp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ +/*struct vnop_link_args { + * struct vnode *a_tdvp; + * struct vnode *a_vp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */ { struct vnode *vp = ap->a_vp; struct vnode *tdvp = ap->a_tdvp; @@ -916,31 +920,31 @@ devfs_link(struct vnop_link_args *ap) */ /* can lookup dnode safely for tdvp outside of devfs lock as it is not aliased */ tdp = VTODN(tdvp); - + if (tdvp->v_mount != vp->v_mount) { - return (EXDEV); + return EXDEV; } DEVFS_LOCK(); fp = VTODN(vp); /*********************************** - * Start actually doing things.... * - ***********************************/ + * Start actually doing things.... * + ***********************************/ dn_times_now(fp, DEVFS_UPDATE_CHANGE); if (!error) { - error = dev_add_name(cnp->cn_nameptr, tdp, NULL, fp, &tnp); + error = dev_add_name(cnp->cn_nameptr, tdp, NULL, fp, &tnp); } out1: DEVFS_UNLOCK(); - return (error); + return error; } /* * Rename system call. Seems overly complicated to me... - * rename("foo", "bar"); + * rename("foo", "bar"); * is essentially * unlink("bar"); * link("foo", "bar"); @@ -967,15 +971,15 @@ out1: */ static int devfs_rename(struct vnop_rename_args *ap) - /*struct vnop_rename_args { - struct vnode *a_fdvp; - struct vnode *a_fvp; - struct componentname *a_fcnp; - struct vnode *a_tdvp; - struct vnode *a_tvp; - struct componentname *a_tcnp; - vfs_context_t a_context; - } */ +/*struct vnop_rename_args { + * struct vnode *a_fdvp; + * struct vnode *a_fvp; + * struct componentname *a_fcnp; + * struct vnode *a_tdvp; + * struct vnode *a_tvp; + * struct componentname *a_tcnp; + * vfs_context_t a_context; + * } */ { struct vnode *tvp = ap->a_tvp; struct vnode *tdvp = ap->a_tdvp; @@ -984,7 +988,7 @@ devfs_rename(struct vnop_rename_args *ap) struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; devnode_t *fp, *fdp, *tp, *tdp; - devdirent_t *fnp,*tnp; + devdirent_t *fnp, *tnp; int doingdirectory = 0; int error = 0; @@ -1009,7 +1013,7 @@ devfs_rename(struct vnop_rename_args *ap) fnp = dev_findname(fdp, fcnp->cn_nameptr); if (fnp == NULL) { - error = ENOENT; + error = ENOENT; goto out; } tp = NULL; @@ -1019,12 +1023,12 @@ devfs_rename(struct vnop_rename_args *ap) tnp = dev_findname(tdp, tcnp->cn_nameptr); if (tnp == NULL) { - error = ENOENT; + error = ENOENT; goto out; } tp = VTODN(tvp); } - + /* * Make sure that we don't try do something stupid */ @@ -1032,11 +1036,11 @@ devfs_rename(struct vnop_rename_args *ap) /* * Avoid ".", "..", and aliases of "." for obvious reasons. */ - if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') - || (fcnp->cn_flags&ISDOTDOT) - || (tcnp->cn_namelen == 1 && tcnp->cn_nameptr[0] == '.') - || (tcnp->cn_flags&ISDOTDOT) - || (tdp == fp )) { + if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') + || (fcnp->cn_flags & ISDOTDOT) + || (tcnp->cn_namelen == 1 && tcnp->cn_nameptr[0] == '.') + || (tcnp->cn_flags & ISDOTDOT) + || (tdp == fp)) { error = EINVAL; goto out; } @@ -1058,13 +1062,13 @@ devfs_rename(struct vnop_rename_args *ap) * directory hierarchy above the target, as this would * orphan everything below the source directory. Also * the user must have write permission in the source so - * as to be able to change "..". + * as to be able to change "..". */ if (doingdirectory && (tdp != fdp)) { devnode_t * tmp, *ntmp; tmp = tdp; do { - if(tmp == fp) { + if (tmp == fp) { /* XXX unlock stuff here probably */ error = EINVAL; goto out; @@ -1074,8 +1078,8 @@ devfs_rename(struct vnop_rename_args *ap) } /*********************************** - * Start actually doing things.... * - ***********************************/ + * Start actually doing things.... * + ***********************************/ dn_times_now(fp, DEVFS_UPDATE_CHANGE); /* @@ -1101,21 +1105,21 @@ devfs_rename(struct vnop_rename_args *ap) /* * If the target exists zap it (unless it's a non-empty directory) * We could do that as well but won't - */ + */ if (tp) { /* * Target must be empty if a directory and have no links * to it. Also, ensure source and target are compatible * (both directories, or both not directories). */ - if (( doingdirectory) && (tp->dn_links > 2)) { - error = ENOTEMPTY; + if ((doingdirectory) && (tp->dn_links > 2)) { + error = ENOTEMPTY; goto bad; } dev_free_name(tnp); tp = NULL; } - dev_add_name(tcnp->cn_nameptr,tdp,NULL,fp,&tnp); + dev_add_name(tcnp->cn_nameptr, tdp, NULL, fp, &tnp); fnp->de_dnp = NULL; fp->dn_links--; /* one less link to it.. */ @@ -1124,18 +1128,18 @@ bad: fp->dn_links--; /* we added one earlier*/ out: DEVFS_UNLOCK(); - return (error); + return error; } static int devfs_mkdir(struct vnop_mkdir_args *ap) - /*struct vnop_mkdir_args { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ +/*struct vnop_mkdir_args { + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */ { struct componentname * cnp = ap->a_cnp; vfs_context_t ctx = cnp->cn_context; @@ -1144,16 +1148,16 @@ devfs_mkdir(struct vnop_mkdir_args *ap) devnode_t * dir_p; devdirent_t * nm_p; devnode_t * dev_p; - struct vnode_attr * vap = ap->a_vap; + struct vnode_attr * vap = ap->a_vap; struct vnode * * vpp = ap->a_vpp; DEVFS_LOCK(); dir_p = VTODN(ap->a_dvp); - error = dev_add_entry(cnp->cn_nameptr, dir_p, DEV_DIR, - NULL, NULL, NULL, &nm_p); + error = dev_add_entry(cnp->cn_nameptr, dir_p, DEV_DIR, + NULL, NULL, NULL, &nm_p); if (error) { - goto failure; + goto failure; } dev_p = nm_p->de_dnp; dev_p->dn_uid = dir_p->dn_uid; @@ -1175,19 +1179,19 @@ failure: */ static int devfs_rmdir(struct vnop_rmdir_args *ap) - /* struct vnop_rmdir_args { - struct vnode *a_dvp; - struct vnode *a_vp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ +/* struct vnop_rmdir_args { + * struct vnode *a_dvp; + * struct vnode *a_vp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */ { struct vnop_remove_args ra; ra.a_dvp = ap->a_dvp; ra.a_vp = ap->a_vp; ra.a_cnp = ap->a_cnp; - ra.a_flags = 0; /* XXX */ + ra.a_flags = 0; /* XXX */ ra.a_context = ap->a_context; return devfs_vnop_remove(&ra); @@ -1196,21 +1200,21 @@ devfs_rmdir(struct vnop_rmdir_args *ap) static int devfs_symlink(struct vnop_symlink_args *ap) - /*struct vnop_symlink_args { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - char *a_target; - vfs_context_t a_context; - } */ +/*struct vnop_symlink_args { + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * char *a_target; + * vfs_context_t a_context; + * } */ { int error; devdirent_t *newent; DEVFS_LOCK(); error = devfs_make_symlink(VTODN(ap->a_dvp), ap->a_cnp->cn_nameptr, ap->a_vap->va_mode, ap->a_target, &newent); - + if (error == 0) { error = devfs_dntovn(newent->de_dnp, ap->a_vpp, vfs_context_proc(ap->a_context)); } @@ -1218,7 +1222,6 @@ devfs_symlink(struct vnop_symlink_args *ap) DEVFS_UNLOCK(); return error; - } /* Called with devfs locked */ @@ -1233,10 +1236,10 @@ devfs_make_symlink(devnode_t *dir_p, char *name, int mode, char *target, devdire typeinfo.Slnk.name = target; typeinfo.Slnk.namelen = strlen(target); - error = dev_add_entry(name, dir_p, DEV_SLNK, - &typeinfo, NULL, NULL, &nm_p); + error = dev_add_entry(name, dir_p, DEV_SLNK, + &typeinfo, NULL, NULL, &nm_p); if (error) { - goto failure; + goto failure; } dev_p = nm_p->de_dnp; dev_p->dn_uid = dir_p->dn_uid; @@ -1258,29 +1261,29 @@ failure: */ static int devfs_mknod(struct vnop_mknod_args *ap) - /* struct vnop_mknod_args { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ +/* struct vnop_mknod_args { + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */ { - struct componentname * cnp = ap->a_cnp; + struct componentname * cnp = ap->a_cnp; vfs_context_t ctx = cnp->cn_context; struct proc *p = vfs_context_proc(ctx); - devnode_t * dev_p; - devdirent_t * devent; - devnode_t * dir_p; /* devnode for parent directory */ - struct vnode * dvp = ap->a_dvp; - int error = 0; - devnode_type_t typeinfo; - struct vnode_attr * vap = ap->a_vap; + devnode_t * dev_p; + devdirent_t * devent; + devnode_t * dir_p; /* devnode for parent directory */ + struct vnode * dvp = ap->a_dvp; + int error = 0; + devnode_type_t typeinfo; + struct vnode_attr * vap = ap->a_vap; struct vnode ** vpp = ap->a_vpp; *vpp = NULL; if (!(vap->va_type == VBLK) && !(vap->va_type == VCHR)) { - return (EINVAL); /* only support mknod of special files */ + return EINVAL; /* only support mknod of special files */ } typeinfo.dev = vap->va_rdev; @@ -1288,16 +1291,17 @@ devfs_mknod(struct vnop_mknod_args *ap) dir_p = VTODN(dvp); - error = dev_add_entry(cnp->cn_nameptr, dir_p, - (vap->va_type == VBLK) ? DEV_BDEV : DEV_CDEV, - &typeinfo, NULL, NULL, &devent); + error = dev_add_entry(cnp->cn_nameptr, dir_p, + (vap->va_type == VBLK) ? DEV_BDEV : DEV_CDEV, + &typeinfo, NULL, NULL, &devent); if (error) { - goto failure; + goto failure; } dev_p = devent->de_dnp; error = devfs_dntovn(dev_p, vpp, p); - if (error) - goto failure; + if (error) { + goto failure; + } dev_p->dn_uid = vap->va_uid; dev_p->dn_gid = vap->va_gid; dev_p->dn_mode = vap->va_mode; @@ -1307,7 +1311,7 @@ devfs_mknod(struct vnop_mknod_args *ap) failure: DEVFS_UNLOCK(); - return (error); + return error; } /* @@ -1315,33 +1319,35 @@ failure: */ static int devfs_readdir(struct vnop_readdir_args *ap) - /*struct vnop_readdir_args { - struct vnode *a_vp; - struct uio *a_uio; - int a_flags; - int *a_eofflag; - int *a_numdirent; - vfs_context_t a_context; - } */ +/*struct vnop_readdir_args { + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_flags; + * int *a_eofflag; + * int *a_numdirent; + * vfs_context_t a_context; + * } */ { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct dirent dirent; devnode_t * dir_node; - devdirent_t * name_node; + devdirent_t * name_node; const char *name; int error = 0; int reclen; int nodenumber; - int startpos,pos; + int startpos, pos; - if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) - return (EINVAL); + if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) { + return EINVAL; + } /* set up refs to dir */ dir_node = VTODN(vp); - if (dir_node->dn_type != DEV_DIR) - return(ENOTDIR); + if (dir_node->dn_type != DEV_DIR) { + return ENOTDIR; + } pos = 0; startpos = uio->uio_offset; @@ -1350,21 +1356,20 @@ devfs_readdir(struct vnop_readdir_args *ap) name_node = dir_node->dn_typeinfo.Dir.dirlist; nodenumber = 0; - while ((name_node || (nodenumber < 2)) && (uio_resid(uio) > 0)) - { - switch(nodenumber) - { - case 0: + while ((name_node || (nodenumber < 2)) && (uio_resid(uio) > 0)) { + switch (nodenumber) { + case 0: dirent.d_fileno = dir_node->dn_ino; name = "."; dirent.d_namlen = 1; dirent.d_type = DT_DIR; break; - case 1: - if(dir_node->dn_typeinfo.Dir.parent) + case 1: + if (dir_node->dn_typeinfo.Dir.parent) { dirent.d_fileno = dir_node->dn_typeinfo.Dir.parent->dn_ino; - else + } else { dirent.d_fileno = dir_node->dn_ino; + } name = ".."; dirent.d_namlen = 2; dirent.d_type = DT_DIR; @@ -1373,7 +1378,7 @@ devfs_readdir(struct vnop_readdir_args *ap) dirent.d_fileno = name_node->de_dnp->dn_ino; dirent.d_namlen = strlen(name_node->de_name); name = name_node->de_name; - switch(name_node->de_dnp->dn_type) { + switch (name_node->de_dnp->dn_type) { case DEV_BDEV: dirent.d_type = DT_BLK; break; @@ -1390,23 +1395,25 @@ devfs_readdir(struct vnop_readdir_args *ap) dirent.d_type = DT_UNKNOWN; } } -#define GENERIC_DIRSIZ(dp) \ +#define GENERIC_DIRSIZ(dp) \ ((sizeof (struct dirent) - (MAXNAMLEN+1)) + (((dp)->d_namlen+1 + 3) &~ 3)) reclen = dirent.d_reclen = GENERIC_DIRSIZ(&dirent); - if(pos >= startpos) /* made it to the offset yet? */ - { - if (uio_resid(uio) < reclen) /* will it fit? */ + if (pos >= startpos) { /* made it to the offset yet? */ + if (uio_resid(uio) < reclen) { /* will it fit? */ break; + } strlcpy(dirent.d_name, name, DEVMAXNAMESIZE); - if ((error = uiomove ((caddr_t)&dirent, - dirent.d_reclen, uio)) != 0) + if ((error = uiomove((caddr_t)&dirent, + dirent.d_reclen, uio)) != 0) { break; + } } pos += reclen; - if((nodenumber >1) && name_node) + if ((nodenumber > 1) && name_node) { name_node = name_node->de_next; + } nodenumber++; } DEVFS_UNLOCK(); @@ -1414,7 +1421,7 @@ devfs_readdir(struct vnop_readdir_args *ap) devfs_consider_time_update(dir_node, DEVFS_UPDATE_ACCESS); - return (error); + return error; } @@ -1422,11 +1429,11 @@ devfs_readdir(struct vnop_readdir_args *ap) */ static int devfs_readlink(struct vnop_readlink_args *ap) - /*struct vnop_readlink_args { - struct vnode *a_vp; - struct uio *a_uio; - vfs_context_t a_context; - } */ +/*struct vnop_readlink_args { + * struct vnode *a_vp; + * struct uio *a_uio; + * vfs_context_t a_context; + * } */ { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; @@ -1437,39 +1444,39 @@ devfs_readlink(struct vnop_readlink_args *ap) lnk_node = VTODN(vp); if (lnk_node->dn_type != DEV_SLNK) { - error = EINVAL; + error = EINVAL; goto out; } - error = uiomove(lnk_node->dn_typeinfo.Slnk.name, - lnk_node->dn_typeinfo.Slnk.namelen, uio); -out: + error = uiomove(lnk_node->dn_typeinfo.Slnk.name, + lnk_node->dn_typeinfo.Slnk.namelen, uio); +out: return error; } static int devfs_reclaim(struct vnop_reclaim_args *ap) - /*struct vnop_reclaim_args { - struct vnode *a_vp; - } */ +/*struct vnop_reclaim_args { + * struct vnode *a_vp; + * } */ { - struct vnode * vp = ap->a_vp; - devnode_t * dnp; - - DEVFS_LOCK(); + struct vnode * vp = ap->a_vp; + devnode_t * dnp; - dnp = VTODN(vp); + DEVFS_LOCK(); + + dnp = VTODN(vp); - if (dnp) { - /* If this is a cloning device, it didn't have a dn_vn anyway */ - dnp->dn_vn = NULL; - vnode_clearfsnode(vp); + if (dnp) { + /* If this is a cloning device, it didn't have a dn_vn anyway */ + dnp->dn_vn = NULL; + vnode_clearfsnode(vp); - /* This could delete the node, if we are the last vnode */ - devfs_rele_node(dnp); - } - DEVFS_UNLOCK(); + /* This could delete the node, if we are the last vnode */ + devfs_rele_node(dnp); + } + DEVFS_UNLOCK(); - return(0); + return 0; } @@ -1479,11 +1486,11 @@ devfs_reclaim(struct vnop_reclaim_args *ap) static int devs_vnop_pathconf( struct vnop_pathconf_args /* { - struct vnode *a_vp; - int a_name; - int *a_retval; - vfs_context_t a_context; - } */ *ap) + * struct vnode *a_vp; + * int a_name; + * int *a_retval; + * vfs_context_t a_context; + * } */*ap) { switch (ap->a_name) { case _PC_LINK_MAX: @@ -1491,13 +1498,13 @@ devs_vnop_pathconf( *ap->a_retval = 32767; break; case _PC_NAME_MAX: - *ap->a_retval = DEVMAXNAMESIZE - 1; /* includes NUL */ + *ap->a_retval = DEVMAXNAMESIZE - 1; /* includes NUL */ break; case _PC_PATH_MAX: - *ap->a_retval = DEVMAXPATHSIZE - 1; /* XXX nonconformant */ + *ap->a_retval = DEVMAXPATHSIZE - 1; /* XXX nonconformant */ break; case _PC_CHOWN_RESTRICTED: - *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ + *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ break; case _PC_NO_TRUNC: *ap->a_retval = 0; @@ -1509,10 +1516,10 @@ devs_vnop_pathconf( *ap->a_retval = 1; break; default: - return (EINVAL); + return EINVAL; } - return (0); + return 0; } @@ -1526,24 +1533,24 @@ devs_vnop_pathconf( * struct vnop_inactive_args { * struct vnode *a_vp; * vfs_context_t a_context; - * } + * } */ static int devfs_inactive(__unused struct vnop_inactive_args *ap) { - vnode_t vp = ap->a_vp; + vnode_t vp = ap->a_vp; devnode_t *dnp = VTODN(vp); - /* + /* * Cloned vnodes are not linked in anywhere, so they - * can just be recycled. + * can just be recycled. */ if (dnp->dn_clone != NULL) { vnode_recycle(vp); } - return (0); + return 0; } /* @@ -1557,11 +1564,11 @@ devfs_update(struct vnode *vp, struct timeval *access, struct timeval *modify) ip = VTODN(vp); if (vp->v_mount->mnt_flag & MNT_RDONLY) { - ip->dn_access = 0; - ip->dn_change = 0; - ip->dn_update = 0; + ip->dn_access = 0; + ip->dn_change = 0; + ip->dn_update = 0; - return (0); + return 0; } DEVFS_ATTR_LOCK_SPIN(); @@ -1569,127 +1576,125 @@ devfs_update(struct vnode *vp, struct timeval *access, struct timeval *modify) dn_times_locked(ip, access, modify, &now, DEVFS_UPDATE_ACCESS | DEVFS_UPDATE_MOD); DEVFS_ATTR_UNLOCK(); - return (0); + return 0; } #define VOPFUNC int (*)(void *) /* The following ops are used by directories and symlinks */ -int (**devfs_vnodeop_p)(void *); +int(**devfs_vnodeop_p)(void *); static struct vnodeopv_entry_desc devfs_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)devfs_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ - { &vnop_whiteout_desc, (VOPFUNC)err_whiteout }, /* whiteout */ - { &vnop_mknod_desc, (VOPFUNC)devfs_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)nop_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)devfs_close }, /* close */ - { &vnop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)devfs_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)devfs_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)err_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)err_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)devfs_vnop_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)devfs_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)devfs_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)devfs_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)devfs_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)devfs_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)devfs_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)devfs_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)devs_vnop_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ + { &vnop_lookup_desc, (VOPFUNC)devfs_lookup }, /* lookup */ + { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ + { &vnop_whiteout_desc, (VOPFUNC)err_whiteout }, /* whiteout */ + { &vnop_mknod_desc, (VOPFUNC)devfs_mknod }, /* mknod */ + { &vnop_open_desc, (VOPFUNC)nop_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)devfs_close }, /* close */ + { &vnop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)devfs_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)devfs_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)err_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)err_select }, /* select */ + { &vnop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ + { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)devfs_vnop_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)devfs_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)devfs_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)devfs_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)devfs_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)devfs_symlink }, /* symlink */ + { &vnop_readdir_desc, (VOPFUNC)devfs_readdir }, /* readdir */ + { &vnop_readlink_desc, (VOPFUNC)devfs_readlink }, /* readlink */ + { &vnop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)devs_vnop_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ { &vnop_bwrite_desc, (VOPFUNC)err_bwrite }, - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)err_blockmap }, /* blockmap */ + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (VOPFUNC)err_blockmap }, /* blockmap */ #if CONFIG_MACF { &vnop_setlabel_desc, (VOPFUNC)devfs_setlabel }, /* setlabel */ #endif - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } + { (struct vnodeop_desc*)NULL, (int (*)(void *))NULL } }; struct vnodeopv_desc devfs_vnodeop_opv_desc = - { &devfs_vnodeop_p, devfs_vnodeop_entries }; +{ &devfs_vnodeop_p, devfs_vnodeop_entries }; /* The following ops are used by the device nodes */ -int (**devfs_spec_vnodeop_p)(void *); +int(**devfs_spec_vnodeop_p)(void *); static struct vnodeopv_entry_desc devfs_spec_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */ - { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)devfsspec_close }, /* close */ - { &vnop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)devfsspec_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)devfsspec_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)devfs_vnop_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)devfs_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ + { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */ + { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */ + { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)devfsspec_close }, /* close */ + { &vnop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)devfsspec_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)devfsspec_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */ + { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)devfs_vnop_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)devfs_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */ + { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */ + { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */ + { &vnop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */ { &vnop_bwrite_desc, (VOPFUNC)vn_bwrite }, - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ - { &vnop_blktooff_desc, (VOPFUNC)spec_offtoblk }, /* blkofftoblk */ - { &vnop_blockmap_desc, (VOPFUNC)spec_blockmap }, /* blockmap */ + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ + { &vnop_blktooff_desc, (VOPFUNC)spec_offtoblk }, /* blkofftoblk */ + { &vnop_blockmap_desc, (VOPFUNC)spec_blockmap }, /* blockmap */ #if CONFIG_MACF - { &vnop_setlabel_desc, (VOPFUNC)devfs_setlabel }, /* setlabel */ + { &vnop_setlabel_desc, (VOPFUNC)devfs_setlabel }, /* setlabel */ #endif - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } + { (struct vnodeop_desc*)NULL, (int (*)(void *))NULL } }; struct vnodeopv_desc devfs_spec_vnodeop_opv_desc = - { &devfs_spec_vnodeop_p, devfs_spec_vnodeop_entries }; +{ &devfs_spec_vnodeop_p, devfs_spec_vnodeop_entries }; #if FDESC -int (**devfs_devfd_vnodeop_p)(void*); +int(**devfs_devfd_vnodeop_p)(void*); static struct vnodeopv_entry_desc devfs_devfd_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)devfs_devfd_lookup}, /* lookup */ - { &vnop_open_desc, (VOPFUNC)nop_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)devfs_close }, /* close */ - { &vnop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ - { &vnop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ - { &vnop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ - { &vnop_readdir_desc, (VOPFUNC)devfs_devfd_readdir}, /* readdir */ - { &vnop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ - { &vnop_pathconf_desc, (VOPFUNC)devs_vnop_pathconf }, /* pathconf */ + { &vnop_lookup_desc, (VOPFUNC)devfs_devfd_lookup}, /* lookup */ + { &vnop_open_desc, (VOPFUNC)nop_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)devfs_close }, /* close */ + { &vnop_getattr_desc, (VOPFUNC)devfs_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)devfs_setattr }, /* setattr */ + { &vnop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ + { &vnop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ + { &vnop_readdir_desc, (VOPFUNC)devfs_devfd_readdir}, /* readdir */ + { &vnop_inactive_desc, (VOPFUNC)devfs_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)devfs_reclaim }, /* reclaim */ + { &vnop_pathconf_desc, (VOPFUNC)devs_vnop_pathconf }, /* pathconf */ #if CONFIG_MACF { &vnop_setlabel_desc, (VOPFUNC)devfs_setlabel }, /* setlabel */ #endif - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } + { (struct vnodeop_desc*)NULL, (int (*)(void *))NULL } }; struct vnodeopv_desc devfs_devfd_vnodeop_opv_desc = - { &devfs_devfd_vnodeop_p, devfs_devfd_vnodeop_entries}; +{ &devfs_devfd_vnodeop_p, devfs_devfd_vnodeop_entries}; #endif /* FDESC */ - - diff --git a/bsd/miscfs/devfs/devfsdefs.h b/bsd/miscfs/devfs/devfsdefs.h index 6fdac8849..502e4daa5 100644 --- a/bsd/miscfs/devfs/devfsdefs.h +++ b/bsd/miscfs/devfs/devfsdefs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright 1997,1998 Julian Elischer. All rights reserved. * julian@freebsd.org - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: @@ -37,7 +37,7 @@ * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ``AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -49,7 +49,7 @@ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * devfsdefs.h */ /* @@ -74,111 +74,109 @@ __BEGIN_DECLS #ifdef __APPLE_API_PRIVATE -#define DEVMAXNAMESIZE 32 /* XXX */ -#define DEVMAXPATHSIZE 128 /* XXX */ +#define DEVMAXNAMESIZE 32 /* XXX */ +#define DEVMAXPATHSIZE 128 /* XXX */ typedef enum { - DEV_DIR, - DEV_BDEV, - DEV_CDEV, - DEV_SLNK, + DEV_DIR, + DEV_BDEV, + DEV_CDEV, + DEV_SLNK, #if FDESC - DEV_DEVFD + DEV_DEVFD #endif /* FDESC */ } devfstype_t; -extern int (**devfs_vnodeop_p)(void *); /* our own vector array for dirs */ -extern int (**devfs_spec_vnodeop_p)(void *); /* our own vector array for devs */ +extern int(**devfs_vnodeop_p)(void *); /* our own vector array for dirs */ +extern int(**devfs_spec_vnodeop_p)(void *); /* our own vector array for devs */ extern struct vfsops devfs_vfsops; -typedef struct devnode devnode_t; -typedef struct devdirent devdirent_t; -typedef union devnode_type devnode_type_t; +typedef struct devnode devnode_t; +typedef struct devdirent devdirent_t; +typedef union devnode_type devnode_type_t; struct devfs_stats { - int nodes; - int entries; - int mounts; - int stringspace; + int nodes; + int entries; + int mounts; + int stringspace; }; union devnode_type { - dev_t dev; - struct { - devdirent_t * dirlist; - devdirent_t * * dirlast; - devnode_t * parent; - devdirent_t * myname; /* my entry in .. */ - int entrycount; - }Dir; - struct { - char * name; /* must be allocated separately */ - int namelen; - }Slnk; + dev_t dev; + struct { + devdirent_t * dirlist; + devdirent_t * * dirlast; + devnode_t * parent; + devdirent_t * myname; /* my entry in .. */ + int entrycount; + }Dir; + struct { + char * name;/* must be allocated separately */ + int namelen; + }Slnk; }; #define DEV_MAX_VNODE_RETRY 8 /* Max number of retries when we try to - get a vnode for the devnode */ -struct devnode -{ - devfstype_t dn_type; - /* - * Number of vnodes that point to this devnode. Note, we do not - * add another reference for a lookup which finds an existing - * vnode; a reference is added when a vnode is created and removed - * when a vnode is reclaimed. A devnode will not be freed while - * there are outstanding references. A refcount can be added to - * prevent the free of a devnode in situations where there is not - * guaranteed to be a vnode holding a ref, but it is important to + * get a vnode for the devnode */ +struct devnode { + devfstype_t dn_type; + /* + * Number of vnodes that point to this devnode. Note, we do not + * add another reference for a lookup which finds an existing + * vnode; a reference is added when a vnode is created and removed + * when a vnode is reclaimed. A devnode will not be freed while + * there are outstanding references. A refcount can be added to + * prevent the free of a devnode in situations where there is not + * guaranteed to be a vnode holding a ref, but it is important to * make sure that a deferred delete eventually happens if it is - * blocked behind that reference. - */ - int dn_refcount; - u_short dn_mode; - uid_t dn_uid; - gid_t dn_gid; - struct timespec dn_atime;/* time of last access */ - struct timespec dn_mtime;/* time of last modification */ - struct timespec dn_ctime;/* time file changed */ - int (***dn_ops)(void *);/* yuk... pointer to pointer(s) to funcs */ - int dn_links;/* how many file links does this node have? */ - struct devfsmount * dn_dvm; /* the mount structure for this 'plane' */ - struct vnode * dn_vn; /* address of last vnode that represented us */ - int dn_len; /* of any associated info (e.g. dir data) */ - devdirent_t * dn_linklist;/* circular list of hardlinks to this node */ - devnode_t * dn_nextsibling; /* the list of equivalent nodes */ - devnode_t * * dn_prevsiblingp;/* backpointer for the above */ - devnode_type_t dn_typeinfo; - int dn_change; - int dn_update; - int dn_access; - int dn_lflags; - ino_t dn_ino; - int (*dn_clone)(dev_t dev, int action); /* get minor # */ - struct label * dn_label; /* security label */ + * blocked behind that reference. + */ + int dn_refcount; + u_short dn_mode; + uid_t dn_uid; + gid_t dn_gid; + struct timespec dn_atime;/* time of last access */ + struct timespec dn_mtime;/* time of last modification */ + struct timespec dn_ctime;/* time file changed */ + int(***dn_ops)(void *);/* yuk... pointer to pointer(s) to funcs */ + int dn_links;/* how many file links does this node have? */ + struct devfsmount * dn_dvm; /* the mount structure for this 'plane' */ + struct vnode * dn_vn;/* address of last vnode that represented us */ + int dn_len;/* of any associated info (e.g. dir data) */ + devdirent_t * dn_linklist;/* circular list of hardlinks to this node */ + devnode_t * dn_nextsibling;/* the list of equivalent nodes */ + devnode_t * * dn_prevsiblingp;/* backpointer for the above */ + devnode_type_t dn_typeinfo; + int dn_change; + int dn_update; + int dn_access; + int dn_lflags; + ino_t dn_ino; + int (*dn_clone)(dev_t dev, int action);/* get minor # */ + struct label * dn_label; /* security label */ }; -#define DN_DELETE 0x02 -#define DN_CREATE 0x04 -#define DN_CREATEWAIT 0x08 +#define DN_DELETE 0x02 +#define DN_CREATE 0x04 +#define DN_CREATEWAIT 0x08 -struct devdirent -{ - /*-----------------------directory entry fields-------------*/ - char de_name[DEVMAXNAMESIZE]; - devnode_t * de_dnp; /* the "inode" (devnode) pointer */ - devnode_t * de_parent; /* backpointer to the directory itself */ - devdirent_t * de_next; /* next object in this directory */ - devdirent_t * *de_prevp; /* previous pointer in directory linked list */ - devdirent_t * de_nextlink; /* next hardlink to this node */ - devdirent_t * *de_prevlinkp; /* previous hardlink pointer for this node */ +struct devdirent { + /*-----------------------directory entry fields-------------*/ + char de_name[DEVMAXNAMESIZE]; + devnode_t * de_dnp; /* the "inode" (devnode) pointer */ + devnode_t * de_parent; /* backpointer to the directory itself */ + devdirent_t * de_next; /* next object in this directory */ + devdirent_t * *de_prevp; /* previous pointer in directory linked list */ + devdirent_t * de_nextlink;/* next hardlink to this node */ + devdirent_t * *de_prevlinkp;/* previous hardlink pointer for this node */ }; -extern devdirent_t * dev_root; -extern struct devfs_stats devfs_stats; -extern lck_mtx_t devfs_mutex; -extern lck_mtx_t devfs_attr_mutex; +extern devdirent_t * dev_root; +extern struct devfs_stats devfs_stats; +extern lck_mtx_t devfs_mutex; +extern lck_mtx_t devfs_attr_mutex; /* * Rules for front nodes: @@ -192,10 +190,9 @@ extern lck_mtx_t devfs_attr_mutex; * DEVFS specific per/mount information, used to link a monted fs to a * particular 'plane' of front nodes. */ -struct devfsmount -{ - struct mount * mount; /* vfs mount struct for this fs */ - devdirent_t * plane_root;/* the root of this 'plane' */ +struct devfsmount { + struct mount * mount;/* vfs mount struct for this fs */ + devdirent_t * plane_root;/* the root of this 'plane' */ }; /* @@ -203,22 +200,22 @@ struct devfsmount */ #include #include -#include /* required for OSAddAtomic() */ +#include /* required for OSAddAtomic() */ //#define HIDDEN_MOUNTPOINT 1 /* misc */ -#define M_DEVFSNAME M_DEVFS -#define M_DEVFSNODE M_DEVFS -#define M_DEVFSMNT M_DEVFS +#define M_DEVFSNAME M_DEVFS +#define M_DEVFSNODE M_DEVFS +#define M_DEVFSMNT M_DEVFS -#define VTODN(vp) ((devnode_t *)(vp)->v_data) +#define VTODN(vp) ((devnode_t *)(vp)->v_data) -#define DEVFS_LOCK() lck_mtx_lock(&devfs_mutex) -#define DEVFS_UNLOCK() lck_mtx_unlock(&devfs_mutex) +#define DEVFS_LOCK() lck_mtx_lock(&devfs_mutex) +#define DEVFS_UNLOCK() lck_mtx_unlock(&devfs_mutex) -#define DEVFS_ATTR_LOCK_SPIN() lck_mtx_lock_spin(&devfs_attr_mutex); -#define DEVFS_ATTR_UNLOCK() lck_mtx_unlock(&devfs_attr_mutex); +#define DEVFS_ATTR_LOCK_SPIN() lck_mtx_lock_spin(&devfs_attr_mutex); +#define DEVFS_ATTR_UNLOCK() lck_mtx_unlock(&devfs_attr_mutex); /* * XXX all the (SInt32 *) casts below assume sizeof(int) == sizeof(long) @@ -226,82 +223,82 @@ struct devfsmount static __inline__ void DEVFS_INCR_ENTRIES(void) { - OSAddAtomic(1, &devfs_stats.entries); + OSAddAtomic(1, &devfs_stats.entries); } static __inline__ void DEVFS_DECR_ENTRIES(void) { - OSAddAtomic(-1, &devfs_stats.entries); + OSAddAtomic(-1, &devfs_stats.entries); } static __inline__ void DEVFS_INCR_NODES(void) { - OSAddAtomic(1, &devfs_stats.nodes); + OSAddAtomic(1, &devfs_stats.nodes); } static __inline__ void DEVFS_DECR_NODES(void) { - OSAddAtomic(-1, &devfs_stats.nodes); + OSAddAtomic(-1, &devfs_stats.nodes); } static __inline__ void DEVFS_INCR_MOUNTS(void) { - OSAddAtomic(1, &devfs_stats.mounts); + OSAddAtomic(1, &devfs_stats.mounts); } static __inline__ void DEVFS_DECR_MOUNTS(void) { - OSAddAtomic(-1, &devfs_stats.mounts); + OSAddAtomic(-1, &devfs_stats.mounts); } static __inline__ void DEVFS_INCR_STRINGSPACE(int space) { - OSAddAtomic(space, &devfs_stats.stringspace); + OSAddAtomic(space, &devfs_stats.stringspace); } static __inline__ void DEVFS_DECR_STRINGSPACE(int space) { - OSAddAtomic(-space, &devfs_stats.stringspace); + OSAddAtomic(-space, &devfs_stats.stringspace); } -/* +/* * Access, change, and modify times are protected by a separate lock, * which allows tty times to be updated (no more than once per second) * in the I/O path without too much fear of contention. * - * For getattr, update times to current time if the last update was recent; - * preserve legacy behavior that frequent stats can yield sub-second resolutions. + * For getattr, update times to current time if the last update was recent; + * preserve legacy behavior that frequent stats can yield sub-second resolutions. * If the last time is old, however, we know that the event that triggered * the need for an update was no more than 1s after the last update. In that case, * use (last update + 1s) as the time, avoiding the illusion that last update happened * much later than it really did. */ -#define DEVFS_LAZY_UPDATE_SECONDS 1 +#define DEVFS_LAZY_UPDATE_SECONDS 1 -#define DEVFS_UPDATE_CHANGE 0x1 -#define DEVFS_UPDATE_MOD 0x2 -#define DEVFS_UPDATE_ACCESS 0x4 +#define DEVFS_UPDATE_CHANGE 0x1 +#define DEVFS_UPDATE_MOD 0x2 +#define DEVFS_UPDATE_ACCESS 0x4 static __inline__ void dn_copy_times(devnode_t * target, devnode_t * source) { - DEVFS_ATTR_LOCK_SPIN(); - target->dn_atime = source->dn_atime; - target->dn_mtime = source->dn_mtime; - target->dn_ctime = source->dn_ctime; - DEVFS_ATTR_UNLOCK(); - return; + DEVFS_ATTR_LOCK_SPIN(); + target->dn_atime = source->dn_atime; + target->dn_mtime = source->dn_mtime; + target->dn_ctime = source->dn_ctime; + DEVFS_ATTR_UNLOCK(); + return; } #ifdef BSD_KERNEL_PRIVATE -int devfs_make_symlink(devnode_t *dir_p, char *name, int mode, char *target, devdirent_t **newent); +int devfs_make_symlink(devnode_t *dir_p, char *name, int mode, char *target, devdirent_t **newent); #endif /* BSD_KERNEL_PRIVATE */ #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/miscfs/devfs/fdesc.h b/bsd/miscfs/devfs/fdesc.h index f4ff749ef..f024c55e3 100644 --- a/bsd/miscfs/devfs/fdesc.h +++ b/bsd/miscfs/devfs/fdesc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -73,29 +73,29 @@ #ifdef __APPLE_API_PRIVATE #ifdef KERNEL -#define FD_ROOT 2 -#define FD_DEVFD 3 -#define FD_STDIN 4 -#define FD_STDOUT 5 -#define FD_STDERR 6 -#define FD_DESC 8 -#define FD_MAX 12 +#define FD_ROOT 2 +#define FD_DEVFD 3 +#define FD_STDIN 4 +#define FD_STDOUT 5 +#define FD_STDERR 6 +#define FD_DESC 8 +#define FD_MAX 12 typedef enum { Fdesc, } fdntype; struct fdescnode { - LIST_ENTRY(fdescnode) fd_hash; /* Hash list */ - struct vnode *fd_vnode; /* Back ptr to vnode */ - fdntype fd_type; /* Type of this node */ - long fd_fd; /* Fd to be dup'ed */ - const char *fd_link; /* Link to fd/n */ - int fd_ix; /* filesystem index */ + LIST_ENTRY(fdescnode) fd_hash; /* Hash list */ + struct vnode *fd_vnode; /* Back ptr to vnode */ + fdntype fd_type; /* Type of this node */ + long fd_fd; /* Fd to be dup'ed */ + const char *fd_link; /* Link to fd/n */ + int fd_ix; /* filesystem index */ }; -#define VFSTOFDESC(mp) ((struct fdescmount *)((mp)->mnt_data)) -#define VTOFDESC(vp) ((struct fdescnode *)(vp)->v_data) +#define VFSTOFDESC(mp) ((struct fdescmount *)((mp)->mnt_data)) +#define VTOFDESC(vp) ((struct fdescnode *)(vp)->v_data) __BEGIN_DECLS extern int fdesc_allocvp(fdntype, int, struct mount *, struct vnode **, enum vtype, int); @@ -118,8 +118,8 @@ extern int fdesc_select(struct vnop_select_args *ap); extern int fdesc_setattr(struct vnop_setattr_args *ap); extern int fdesc_write(struct vnop_write_args *ap); -extern int (**fdesc_vnodeop_p)(void *); -extern int (**devfs_devfd_vnodeop_p)(void*); +extern int(**fdesc_vnodeop_p)(void *); +extern int(**devfs_devfd_vnodeop_p)(void*); extern struct vfsops fdesc_vfsops; __END_DECLS diff --git a/bsd/miscfs/fifofs/fifo.h b/bsd/miscfs/fifofs/fifo.h index d60c05874..0bcb4c0c2 100644 --- a/bsd/miscfs/fifofs/fifo.h +++ b/bsd/miscfs/fifofs/fifo.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -73,19 +73,19 @@ __BEGIN_DECLS * the state associated with the FIFO. */ struct fifoinfo { - unsigned int fi_flags; - struct socket *fi_readsock; - struct socket *fi_writesock; - long fi_readers; - long fi_writers; - unsigned int fi_count; + unsigned int fi_flags; + struct socket *fi_readsock; + struct socket *fi_writesock; + long fi_readers; + long fi_writers; + unsigned int fi_count; }; -#define FIFO_INCREATE 1 -#define FIFO_CREATEWAIT 2 -#define FIFO_CREATED 4 +#define FIFO_INCREATE 1 +#define FIFO_CREATEWAIT 2 +#define FIFO_CREATED 4 -int fifo_close_internal (vnode_t, int, vfs_context_t, int); +int fifo_close_internal(vnode_t, int, vfs_context_t, int); int fifo_freespace(struct vnode *vp, long *count); int fifo_charcount(struct vnode *vp, int *count); @@ -94,14 +94,14 @@ int fifo_charcount(struct vnode *vp, int *count); /* * Prototypes for fifo operations on vnodes. */ -int fifo_ebadf(void *); +int fifo_ebadf(void *); #define fifo_create (int (*) (struct vnop_create_args *))err_create #define fifo_mknod (int (*) (struct vnop_mknod_args *))err_mknod #define fifo_access (int (*) (struct vnop_access_args *))fifo_ebadf #define fifo_getattr (int (*) (struct vnop_getattr_args *))fifo_ebadf #define fifo_setattr (int (*) (struct vnop_setattr_args *))fifo_ebadf -#define fifo_revoke nop_revoke +#define fifo_revoke nop_revoke #define fifo_mmap (int (*) (struct vnop_mmap_args *))err_mmap #define fifo_fsync (int (*) (struct vnop_fsync_args *))nullop #define fifo_remove (int (*) (struct vnop_remove_args *))err_remove @@ -119,16 +119,16 @@ int fifo_ebadf(void *); #define fifo_bwrite (int (*) (struct vnop_bwrite_args *))nullop #define fifo_blktooff (int (*) (struct vnop_blktooff_args *))err_blktooff -int fifo_lookup (struct vnop_lookup_args *); -int fifo_open (struct vnop_open_args *); -int fifo_close (struct vnop_close_args *); -int fifo_read (struct vnop_read_args *); -int fifo_write (struct vnop_write_args *); -int fifo_ioctl (struct vnop_ioctl_args *); -int fifo_select (struct vnop_select_args *); -int fifo_inactive (struct vnop_inactive_args *); -int fifo_pathconf (struct vnop_pathconf_args *); -int fifo_advlock (struct vnop_advlock_args *); +int fifo_lookup(struct vnop_lookup_args *); +int fifo_open(struct vnop_open_args *); +int fifo_close(struct vnop_close_args *); +int fifo_read(struct vnop_read_args *); +int fifo_write(struct vnop_write_args *); +int fifo_ioctl(struct vnop_ioctl_args *); +int fifo_select(struct vnop_select_args *); +int fifo_inactive(struct vnop_inactive_args *); +int fifo_pathconf(struct vnop_pathconf_args *); +int fifo_advlock(struct vnop_advlock_args *); #endif /* KERNEL */ diff --git a/bsd/miscfs/fifofs/fifo_vnops.c b/bsd/miscfs/fifofs/fifo_vnops.c index 6146bcbfa..956824de4 100644 --- a/bsd/miscfs/fifofs/fifo_vnops.c +++ b/bsd/miscfs/fifofs/fifo_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -79,48 +79,48 @@ #define VOPFUNC int (*)(void *) -int (**fifo_vnodeop_p)(void *); +int(**fifo_vnodeop_p)(void *); struct vnodeopv_entry_desc fifo_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ - { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)fifo_close }, /* close */ - { &vnop_access_desc, (VOPFUNC)fifo_access }, /* access */ - { &vnop_getattr_desc, (VOPFUNC)fifo_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)fifo_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)fifo_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)fifo_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)fifo_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)fifo_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)fifo_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ - { &vnop_bwrite_desc, (VOPFUNC)fifo_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)err_blockmap }, /* blockmap */ - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } + { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */ + { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ + { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ + { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)fifo_close }, /* close */ + { &vnop_access_desc, (VOPFUNC)fifo_access }, /* access */ + { &vnop_getattr_desc, (VOPFUNC)fifo_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)fifo_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)fifo_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)fifo_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */ + { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */ + { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)fifo_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ + { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ + { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ + { &vnop_inactive_desc, (VOPFUNC)fifo_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)fifo_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */ + { &vnop_bwrite_desc, (VOPFUNC)fifo_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (VOPFUNC)err_blockmap }, /* blockmap */ + { (struct vnodeop_desc*)NULL, (int (*)(void *))NULL } }; struct vnodeopv_desc fifo_vnodeop_opv_desc = - { &fifo_vnodeop_p, fifo_vnodeop_entries }; +{ &fifo_vnodeop_p, fifo_vnodeop_entries }; /* * Trivial lookup routine that always fails. @@ -129,9 +129,8 @@ struct vnodeopv_desc fifo_vnodeop_opv_desc = int fifo_lookup(struct vnop_lookup_args *ap) { - *ap->a_vpp = NULL; - return (ENOTDIR); + return ENOTDIR; } /* @@ -153,48 +152,49 @@ retry: fip = vp->v_fifoinfo; - if (fip == (struct fifoinfo *)0) + if (fip == (struct fifoinfo *)0) { panic("fifo_open with no fifoinfo"); + } if ((fip->fi_flags & FIFO_CREATED) == 0) { if (fip->fi_flags & FIFO_INCREATE) { - fip->fi_flags |= FIFO_CREATEWAIT; + fip->fi_flags |= FIFO_CREATEWAIT; error = msleep(&fip->fi_flags, &vp->v_lock, PRIBIO | PCATCH, "fifocreatewait", NULL); if (error) { vnode_unlock(vp); - return(error); + return error; } goto retry; } else { - fip->fi_flags |= FIFO_INCREATE; + fip->fi_flags |= FIFO_INCREATE; vnode_unlock(vp); - if ( (error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0)) ) { - goto bad1; + if ((error = socreate(AF_LOCAL, &rso, SOCK_STREAM, 0))) { + goto bad1; } - if ( (error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0)) ) { + if ((error = socreate(AF_LOCAL, &wso, SOCK_STREAM, 0))) { (void)soclose(rso); - goto bad1; + goto bad1; } - if ( (error = soconnect2(wso, rso)) ) { + if ((error = soconnect2(wso, rso))) { (void)soclose(wso); (void)soclose(rso); - goto bad1; + goto bad1; } fip->fi_readers = fip->fi_writers = 0; - /* Lock ordering between wso and rso does not matter here - * because they are just created and no one has a reference to them - */ - socket_lock(wso, 1); + /* Lock ordering between wso and rso does not matter here + * because they are just created and no one has a reference to them + */ + socket_lock(wso, 1); wso->so_state |= SS_CANTRCVMORE; wso->so_snd.sb_lowat = PIPE_BUF; - socket_unlock(wso, 1); + socket_unlock(wso, 1); - socket_lock(rso, 1); + socket_lock(rso, 1); rso->so_state |= SS_CANTSENDMORE; - socket_unlock(rso, 1); + socket_unlock(rso, 1); vnode_lock(vp); fip->fi_readsock = rso; @@ -202,7 +202,7 @@ retry: fip->fi_flags |= FIFO_CREATED; fip->fi_flags &= ~FIFO_INCREATE; - + if ((fip->fi_flags & FIFO_CREATEWAIT)) { fip->fi_flags &= ~FIFO_CREATEWAIT; wakeup(&fip->fi_flags); @@ -220,8 +220,9 @@ retry: fip->fi_writesock->so_state &= ~SS_CANTSENDMORE; socket_unlock(fip->fi_writesock, 1); - if (fip->fi_writers > 0) + if (fip->fi_writers > 0) { wakeup((caddr_t)&fip->fi_writers); + } } } if (ap->a_mode & FWRITE) { @@ -230,62 +231,67 @@ retry: socket_lock(fip->fi_readsock, 1); fip->fi_readsock->so_state &= ~SS_CANTRCVMORE; socket_unlock(fip->fi_readsock, 1); - - if (fip->fi_readers > 0) + + if (fip->fi_readers > 0) { wakeup((caddr_t)&fip->fi_readers); + } } } if ((ap->a_mode & FREAD) && (ap->a_mode & O_NONBLOCK) == 0) { if (fip->fi_writers == 0) { error = msleep((caddr_t)&fip->fi_readers, &vp->v_lock, - PCATCH | PSOCK, "fifoor", NULL); - if (error) + PCATCH | PSOCK, "fifoor", NULL); + if (error) { goto bad; + } if (fip->fi_readers == 1) { - if (fip->fi_writers > 0) + if (fip->fi_writers > 0) { wakeup((caddr_t)&fip->fi_writers); + } } } } if (ap->a_mode & FWRITE) { if (ap->a_mode & O_NONBLOCK) { if (fip->fi_readers == 0) { - error = ENXIO; - goto bad; + error = ENXIO; + goto bad; } } else { if (fip->fi_readers == 0) { - error = msleep((caddr_t)&fip->fi_writers,&vp->v_lock, - PCATCH | PSOCK, "fifoow", NULL); - if (error) + error = msleep((caddr_t)&fip->fi_writers, &vp->v_lock, + PCATCH | PSOCK, "fifoow", NULL); + if (error) { goto bad; + } if (fip->fi_writers == 1) { - if (fip->fi_readers > 0) + if (fip->fi_readers > 0) { wakeup((caddr_t)&fip->fi_readers); + } } } } } vnode_unlock(vp); - return (0); + return 0; bad: fifo_close_internal(vp, ap->a_mode, ap->a_context, 1); vnode_unlock(vp); - return (error); + return error; bad1: vnode_lock(vp); fip->fi_flags &= ~FIFO_INCREATE; - + if ((fip->fi_flags & FIFO_CREATEWAIT)) { fip->fi_flags &= ~FIFO_CREATEWAIT; wakeup(&fip->fi_flags); } vnode_unlock(vp); - return (error); + return error; } /* @@ -301,18 +307,20 @@ fifo_read(struct vnop_read_args *ap) int rflags; #if DIAGNOSTIC - if (uio->uio_rw != UIO_READ) + if (uio->uio_rw != UIO_READ) { panic("fifo_read mode"); + } #endif - if (uio_resid(uio) == 0) - return (0); + if (uio_resid(uio) == 0) { + return 0; + } rflags = (ap->a_ioflag & IO_NDELAY) ? MSG_NBIO : 0; startresid = uio_resid(uio); - /* fifo conformance - if we have a reader open on the fifo but no - * writers then we need to make sure we do not block. We do that by + /* fifo conformance - if we have a reader open on the fifo but no + * writers then we need to make sure we do not block. We do that by * checking the receive buffer and if empty set error to EWOULDBLOCK. * If error is set to EWOULDBLOCK we skip the call into soreceive */ @@ -326,11 +334,11 @@ fifo_read(struct vnop_read_args *ap) /* skip soreceive to avoid blocking when we have no writers */ if (error != EWOULDBLOCK) { error = soreceive(rso, (struct sockaddr **)0, uio, (struct mbuf **)0, - (struct mbuf **)0, &rflags); - if (error == 0) + (struct mbuf **)0, &rflags); + if (error == 0) { lock_vnode_and_post(ap->a_vp, 0); - } - else { + } + } else { /* clear EWOULDBLOCK and return EOF (zero) */ error = 0; } @@ -342,7 +350,7 @@ fifo_read(struct vnop_read_args *ap) rso->so_state &= ~SS_CANTRCVMORE; socket_unlock(rso, 1); } - return (error); + return error; } /* @@ -355,15 +363,17 @@ fifo_write(struct vnop_write_args *ap) int error; #if DIAGNOSTIC - if (ap->a_uio->uio_rw != UIO_WRITE) + if (ap->a_uio->uio_rw != UIO_WRITE) { panic("fifo_write mode"); + } #endif error = sosend(wso, (struct sockaddr *)0, ap->a_uio, NULL, - (struct mbuf *)0, (ap->a_ioflag & IO_NDELAY) ? MSG_NBIO : 0); - if (error == 0) + (struct mbuf *)0, (ap->a_ioflag & IO_NDELAY) ? MSG_NBIO : 0); + if (error == 0) { lock_vnode_and_post(ap->a_vp, 0); + } - return (error); + return error; } /* @@ -376,23 +386,26 @@ fifo_ioctl(struct vnop_ioctl_args *ap) struct fileglob filefg; int error; - if (ap->a_command == FIONBIO) - return (0); + if (ap->a_command == FIONBIO) { + return 0; + } bzero(&filetmp, sizeof(struct fileproc)); filetmp.f_fglob = &filefg; if (ap->a_fflag & FREAD) { filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_context); - if (error) - return (error); + if (error) { + return error; + } } if (ap->a_fflag & FWRITE) { filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; error = soo_ioctl(&filetmp, ap->a_command, ap->a_data, ap->a_context); - if (error) - return (error); + if (error) { + return error; + } } - return (0); + return 0; } int @@ -407,22 +420,24 @@ fifo_select(struct vnop_select_args *ap) if (ap->a_which & FREAD) { filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_readsock; ready = soo_select(&filetmp, ap->a_which, ap->a_wql, ap->a_context); - if (ready) - return (ready); + if (ready) { + return ready; + } } if (ap->a_which & FWRITE) { filetmp.f_fglob->fg_data = (caddr_t)ap->a_vp->v_fifoinfo->fi_writesock; ready = soo_select(&filetmp, ap->a_which, ap->a_wql, ap->a_context); - if (ready) - return (ready); + if (ready) { + return ready; + } } - return (0); + return 0; } int fifo_inactive(__unused struct vnop_inactive_args *ap) { - return (0); + return 0; } @@ -443,19 +458,20 @@ fifo_close_internal(vnode_t vp, int fflag, __unused vfs_context_t context, int l struct socket *rso; struct socket *wso; - if (!locked) + if (!locked) { vnode_lock(vp); + } if ((fip->fi_flags & FIFO_CREATED) == 0) { - if (!locked) + if (!locked) { vnode_unlock(vp); - return(0); - + } + return 0; } - + if (fflag & FREAD) { fip->fi_readers--; - if (fip->fi_readers == 0){ + if (fip->fi_readers == 0) { socket_lock(fip->fi_writesock, 1); socantsendmore(fip->fi_writesock); socket_unlock(fip->fi_writesock, 1); @@ -472,16 +488,18 @@ fifo_close_internal(vnode_t vp, int fflag, __unused vfs_context_t context, int l } #if 0 if (vnode_isinuse_locked(vp, 0, 1)) { - if (!locked) + if (!locked) { vnode_unlock(vp); - return (0); + } + return 0; } #endif if (fip->fi_writers || fip->fi_readers) { - if (!locked) + if (!locked) { vnode_unlock(vp); - return (0); + } + return 0; } wso = fip->fi_writesock; @@ -489,14 +507,16 @@ fifo_close_internal(vnode_t vp, int fflag, __unused vfs_context_t context, int l fip->fi_readsock = NULL; fip->fi_writesock = NULL; fip->fi_flags &= ~FIFO_CREATED; - if (!locked) + if (!locked) { vnode_unlock(vp); + } error1 = soclose(rso); error2 = soclose(wso); - if (error1) - return (error1); - return (error2); + if (error1) { + return error1; + } + return error2; } /* @@ -508,7 +528,7 @@ fifo_printinfo(struct vnode *vp) struct fifoinfo *fip = vp->v_fifoinfo; printf(", fifo with %ld readers and %ld writers", - fip->fi_readers, fip->fi_writers); + fip->fi_readers, fip->fi_writers); } /* @@ -520,15 +540,15 @@ fifo_pathconf(struct vnop_pathconf_args *ap) switch (ap->a_name) { case _PC_LINK_MAX: *ap->a_retval = LINK_MAX; - return (0); + return 0; case _PC_PIPE_BUF: *ap->a_retval = PIPE_BUF; - return (0); + return 0; case _PC_CHOWN_RESTRICTED: - *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ - return (0); + *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ + return 0; default: - return (EINVAL); + return EINVAL; } /* NOTREACHED */ } @@ -539,8 +559,7 @@ fifo_pathconf(struct vnop_pathconf_args *ap) int fifo_ebadf(__unused void *dummy) { - - return (EBADF); + return EBADF; } /* @@ -549,14 +568,13 @@ fifo_ebadf(__unused void *dummy) int fifo_advlock(__unused struct vnop_advlock_args *ap) { - - return (ENOTSUP); + return ENOTSUP; } /* You'd certainly better have an iocount on the vnode! */ int -fifo_freespace(struct vnode *vp, long *count) +fifo_freespace(struct vnode *vp, long *count) { struct socket *rsock; rsock = vp->v_fifoinfo->fi_readsock; @@ -567,7 +585,7 @@ fifo_freespace(struct vnode *vp, long *count) } int -fifo_charcount(struct vnode *vp, int *count) +fifo_charcount(struct vnode *vp, int *count) { int mcount; int err = sock_ioctl(vp->v_fifoinfo->fi_readsock, FIONREAD, (void*)&mcount); diff --git a/bsd/miscfs/mockfs/mockfs.h b/bsd/miscfs/mockfs/mockfs.h index 3662af4bf..dab2af734 100644 --- a/bsd/miscfs/mockfs/mockfs.h +++ b/bsd/miscfs/mockfs/mockfs.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -72,4 +72,3 @@ typedef struct mockfs_mount * mockfs_mount_t; #endif /* MOCKFS */ #endif /* MOCKFS_H */ - diff --git a/bsd/miscfs/mockfs/mockfs_fsnode.c b/bsd/miscfs/mockfs/mockfs_fsnode.c index c6a2582f7..8771bd43c 100644 --- a/bsd/miscfs/mockfs/mockfs_fsnode.c +++ b/bsd/miscfs/mockfs/mockfs_fsnode.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,7 +54,8 @@ * * Returns 0 on success, or an error. */ -int mockfs_fsnode_create(mount_t mp, uint8_t type, mockfs_fsnode_t * fsnpp) +int +mockfs_fsnode_create(mount_t mp, uint8_t type, mockfs_fsnode_t * fsnpp) { int rvalue; uint64_t new_size; @@ -68,20 +69,20 @@ int mockfs_fsnode_create(mount_t mp, uint8_t type, mockfs_fsnode_t * fsnpp) } switch (type) { - case MOCKFS_ROOT: - break; - case MOCKFS_DEV: - break; - case MOCKFS_FILE: - /* - * For a regular file, size is meaningful, but it will always be equal to the - * size of the backing device. - */ - new_size = mp->mnt_devvp->v_specinfo->si_devsize; - break; - default: - rvalue = EINVAL; - goto done; + case MOCKFS_ROOT: + break; + case MOCKFS_DEV: + break; + case MOCKFS_FILE: + /* + * For a regular file, size is meaningful, but it will always be equal to the + * size of the backing device. + */ + new_size = mp->mnt_devvp->v_specinfo->si_devsize; + break; + default: + rvalue = EINVAL; + goto done; } MALLOC(*fsnpp, typeof(*fsnpp), sizeof(**fsnpp), M_TEMP, M_WAITOK | M_ZERO); @@ -106,7 +107,8 @@ done: * * Returns 0 on success, or an error. */ -int mockfs_fsnode_destroy(mockfs_fsnode_t fsnp) +int +mockfs_fsnode_destroy(mockfs_fsnode_t fsnp) { int rvalue; @@ -125,8 +127,9 @@ int mockfs_fsnode_destroy(mockfs_fsnode_t fsnp) * For now, panic in this case; I don't expect anyone to ask us to destroy a node with a live * vfs reference, but this will tell me if that assumption is untrue. */ - if (fsnp->vp) + if (fsnp->vp) { panic("mockfs_fsnode_destroy called on node with live vnode; fsnp = %p (in case gdb is screwing with you)", fsnp); + } /* * If this node has children, we need to destroy them. @@ -136,20 +139,26 @@ int mockfs_fsnode_destroy(mockfs_fsnode_t fsnp) * we've failed to destroy the subtree, which means someone called destroy when they should * not have done so). */ - if (fsnp->child_a) - if ((rvalue = mockfs_fsnode_destroy(fsnp->child_a))) + if (fsnp->child_a) { + if ((rvalue = mockfs_fsnode_destroy(fsnp->child_a))) { panic("mockfs_fsnode_destroy failed on child_a; fsnp = %p (in case gdb is screwing with you), rvalue = %d", fsnp, rvalue); + } + } - if (fsnp->child_b) - if ((rvalue = mockfs_fsnode_destroy(fsnp->child_b))) + if (fsnp->child_b) { + if ((rvalue = mockfs_fsnode_destroy(fsnp->child_b))) { panic("mockfs_fsnode_destroy failed on child_b; fsnp = %p (in case gdb is screwing with you), rvalue = %d", fsnp, rvalue); + } + } /* * We need to orphan this node before we destroy it. */ - if (fsnp->parent) - if ((rvalue = mockfs_fsnode_orphan(fsnp))) + if (fsnp->parent) { + if ((rvalue = mockfs_fsnode_orphan(fsnp))) { panic("mockfs_fsnode_orphan failed during destroy; fsnp = %p (in case gdb is screwing with you), rvalue = %d", fsnp, rvalue); + } + } FREE(fsnp, M_TEMP); done: @@ -162,7 +171,8 @@ done: * * Returns 0 on success, or an error. */ -int mockfs_fsnode_adopt(mockfs_fsnode_t parent, mockfs_fsnode_t child) +int +mockfs_fsnode_adopt(mockfs_fsnode_t parent, mockfs_fsnode_t child) { int rvalue; @@ -189,18 +199,16 @@ int mockfs_fsnode_adopt(mockfs_fsnode_t parent, mockfs_fsnode_t child) * TODO: Enforce that the parent cannot have two children of the same type (for the moment, this is * implicit in the structure of the tree constructed by mockfs_mountroot, so we don't need to * worry about it). - * + * * Can the parent support another child (food, shelter, unused pointers)? */ if (!parent->child_a) { parent->child_a = child; child->parent = parent; - } - else if (!parent->child_b) { + } else if (!parent->child_b) { parent->child_b = child; child->parent = parent; - } - else { + } else { rvalue = ENOMEM; } @@ -213,7 +221,8 @@ done: * * Returns 0 on success, or an error. */ -int mockfs_fsnode_orphan(mockfs_fsnode_t fsnp) +int +mockfs_fsnode_orphan(mockfs_fsnode_t fsnp) { int rvalue; mockfs_fsnode_t parent; @@ -228,21 +237,21 @@ int mockfs_fsnode_orphan(mockfs_fsnode_t fsnp) /* * Disallow orphaning a node with a live vnode for now. */ - if (fsnp->vp) + if (fsnp->vp) { panic("mockfs_fsnode_orphan called on node with live vnode; fsnp = %p (in case gdb is screwing with you)", fsnp); + } parent = fsnp->parent; if (parent->child_a == fsnp) { parent->child_a = NULL; fsnp->parent = NULL; - } - else if (parent->child_b == fsnp) { + } else if (parent->child_b == fsnp) { parent->child_b = NULL; fsnp->parent = NULL; - } - else + } else { panic("mockfs_fsnode_orphan insanity, fsnp->parent != parent->child; fsnp = %p (in case gdb is screwing with you)", fsnp); + } done: return rvalue; @@ -254,28 +263,30 @@ done: * requested type. This method exists to support lookup (which is responsible for mapping names, which * we have no conception of currently, onto vnodes). * - * This should be safe, as we are walking the read-only parts of the filesystem structure (not touching + * This should be safe, as we are walking the read-only parts of the filesystem structure (not touching * the vnode). * * Returns 0 on success, or an error. */ -int mockfs_fsnode_child_by_type(mockfs_fsnode_t parent, uint8_t type, mockfs_fsnode_t * child) +int +mockfs_fsnode_child_by_type(mockfs_fsnode_t parent, uint8_t type, mockfs_fsnode_t * child) { int rvalue; - + rvalue = 0; - + if (!parent || !child) { rvalue = EINVAL; goto done; } - if ((parent->child_a) && (parent->child_a->type == type)) + if ((parent->child_a) && (parent->child_a->type == type)) { *child = parent->child_a; - else if ((parent->child_b) && (parent->child_b->type == type)) + } else if ((parent->child_b) && (parent->child_b->type == type)) { *child = parent->child_b; - else + } else { rvalue = ENOENT; + } done: return rvalue; @@ -288,7 +299,8 @@ done: * * Returns 0 on success, or an error. */ -int mockfs_fsnode_vnode(mockfs_fsnode_t fsnp, vnode_t * vpp) +int +mockfs_fsnode_vnode(mockfs_fsnode_t fsnp, vnode_t * vpp) { int rvalue; memory_object_control_t ubc_mem_object; @@ -311,8 +323,7 @@ int mockfs_fsnode_vnode(mockfs_fsnode_t fsnp, vnode_t * vpp) if (!rvalue) { *vpp = fsnp->vp; } - } - else { + } else { /* * We need to create the vnode; this will be unpleasant. */ @@ -336,17 +347,20 @@ int mockfs_fsnode_vnode(mockfs_fsnode_t fsnp, vnode_t * vpp) */ ubc_mem_object = ubc_getobject(fsnp->vp, 0); - if (!ubc_mem_object) + if (!ubc_mem_object) { panic("mockfs_fsvnode failed to get ubc_mem_object for a new vnode"); + } rvalue = pager_map_to_phys_contiguous(ubc_mem_object, 0, (mockfs_mnt->mockfs_memdev_base << PAGE_SHIFT), fsnp->size); - if (rvalue) + if (rvalue) { panic("mockfs_fsnode_vnode failed to create fictitious pages for a memory-backed device; rvalue = %d", rvalue); + } } - if (!rvalue) + if (!rvalue) { *vpp = fsnp->vp; + } } lck_mtx_unlock(&mockfs_mnt->mockfs_mnt_mtx); @@ -363,7 +377,8 @@ done: * * Returns 0 on success, or an error. */ -int mockfs_fsnode_drop_vnode(mockfs_fsnode_t fsnp) +int +mockfs_fsnode_drop_vnode(mockfs_fsnode_t fsnp) { int rvalue; mockfs_mount_t mockfs_mnt; @@ -391,4 +406,3 @@ int mockfs_fsnode_drop_vnode(mockfs_fsnode_t fsnp) done: return rvalue; } - diff --git a/bsd/miscfs/mockfs/mockfs_fsnode.h b/bsd/miscfs/mockfs/mockfs_fsnode.h index 0d9a2b9f9..f2620bfbc 100644 --- a/bsd/miscfs/mockfs/mockfs_fsnode.h +++ b/bsd/miscfs/mockfs/mockfs_fsnode.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -84,4 +84,3 @@ int mockfs_fsnode_drop_vnode(mockfs_fsnode_t fsnp); #endif /* MOCKFS */ #endif /* MOCKFS_FSNODE_H */ - diff --git a/bsd/miscfs/mockfs/mockfs_vfsops.c b/bsd/miscfs/mockfs/mockfs_vfsops.c index 6116e53aa..4a524a054 100644 --- a/bsd/miscfs/mockfs/mockfs_vfsops.c +++ b/bsd/miscfs/mockfs/mockfs_vfsops.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -63,7 +63,8 @@ int mockfs_init(__unused struct vfsconf * vfsc); * * Returns 0 on success, or an error. */ -int mockfs_mountroot(mount_t mp, vnode_t rvp, __unused vfs_context_t ctx) +int +mockfs_mountroot(mount_t mp, vnode_t rvp, __unused vfs_context_t ctx) { int rvalue = 0; mockfs_fsnode_t root_fsnode = NULL; @@ -116,23 +117,28 @@ int mockfs_mountroot(mount_t mp, vnode_t rvp, __unused vfs_context_t ctx) * All of the needed nodes/structures have been set up; now we just need to establish the relationships * between the various mockfs nodes. */ - if ((rvalue = mockfs_fsnode_adopt(root_fsnode, dev_fsnode))) + if ((rvalue = mockfs_fsnode_adopt(root_fsnode, dev_fsnode))) { goto done; + } - if ((rvalue = mockfs_fsnode_adopt(root_fsnode, file_fsnode))) + if ((rvalue = mockfs_fsnode_adopt(root_fsnode, file_fsnode))) { goto done; + } mockfs_mount_data->mockfs_root = root_fsnode; - mp->mnt_data = (typeof(mp->mnt_data)) mockfs_mount_data; + mp->mnt_data = (typeof(mp->mnt_data))mockfs_mount_data; done: if (rvalue) { - if (file_fsnode) + if (file_fsnode) { mockfs_fsnode_destroy(file_fsnode); - if (dev_fsnode) + } + if (dev_fsnode) { mockfs_fsnode_destroy(dev_fsnode); - if (root_fsnode) + } + if (root_fsnode) { mockfs_fsnode_destroy(root_fsnode); + } if (mockfs_mount_data) { lck_mtx_destroy(&mockfs_mount_data->mockfs_mnt_mtx, mockfs_mtx_grp); FREE(mockfs_mount_data, M_TEMP); @@ -148,7 +154,8 @@ done: * * Returns 0 on success, or an error. */ -int mockfs_unmount(struct mount *mp, int mntflags, __unused vfs_context_t ctx) +int +mockfs_unmount(struct mount *mp, int mntflags, __unused vfs_context_t ctx) { int rvalue; int vflush_flags; @@ -160,7 +167,7 @@ int mockfs_unmount(struct mount *mp, int mntflags, __unused vfs_context_t ctx) /* * Reclaim the vnodes for the mount (forcibly, if requested; given that mockfs only support mountroot - * at the moment, this should ALWAYS be forced), + * at the moment, this should ALWAYS be forced), */ if (mntflags & MNT_FORCE) { vflush_flags |= FORCECLOSE; @@ -168,8 +175,9 @@ int mockfs_unmount(struct mount *mp, int mntflags, __unused vfs_context_t ctx) rvalue = vflush(mp, NULL, vflush_flags); - if (rvalue) + if (rvalue) { return rvalue; + } /* * Past this point, errors are likely to be unrecoverable, so panic if we're given any excuse; we @@ -181,8 +189,9 @@ int mockfs_unmount(struct mount *mp, int mntflags, __unused vfs_context_t ctx) mockfs_mnt->mockfs_root = NULL; rvalue = mockfs_fsnode_destroy(root_fsnode); - if (rvalue) + if (rvalue) { panic("mockfs_unmount: Failed to destroy the fsnode tree"); + } lck_mtx_destroy(&mockfs_mnt->mockfs_mnt_mtx, mockfs_mtx_grp); FREE(mockfs_mnt, M_TEMP); @@ -197,7 +206,8 @@ int mockfs_unmount(struct mount *mp, int mntflags, __unused vfs_context_t ctx) * * Returns 0 on success, or an error. */ -int mockfs_root(mount_t mp, vnode_t * vpp, __unused vfs_context_t ctx) +int +mockfs_root(mount_t mp, vnode_t * vpp, __unused vfs_context_t ctx) { int rvalue; @@ -211,9 +221,10 @@ int mockfs_root(mount_t mp, vnode_t * vpp, __unused vfs_context_t ctx) * * Returns 0. */ -int mockfs_sync(__unused struct mount *mp, __unused int waitfor, __unused vfs_context_t ctx) +int +mockfs_sync(__unused struct mount *mp, __unused int waitfor, __unused vfs_context_t ctx) { - return (0); + return 0; } /* @@ -223,7 +234,8 @@ int mockfs_sync(__unused struct mount *mp, __unused int waitfor, __unused vfs_co * * Returns 0 on success, or an error. */ -int mockfs_init(__unused struct vfsconf * vfsc) +int +mockfs_init(__unused struct vfsconf * vfsc) { mockfs_mtx_attr = lck_attr_alloc_init(); mockfs_grp_attr = lck_grp_attr_alloc_init(); @@ -237,7 +249,7 @@ int mockfs_init(__unused struct vfsconf * vfsc) panic("mockfs_init failed to allocate lock information"); } - return (0); + return 0; } struct vfsops mockfs_vfsops = { diff --git a/bsd/miscfs/mockfs/mockfs_vnops.c b/bsd/miscfs/mockfs/mockfs_vnops.c index 7df942594..406eddfc2 100644 --- a/bsd/miscfs/mockfs/mockfs_vnops.c +++ b/bsd/miscfs/mockfs/mockfs_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -81,7 +81,8 @@ int mockfs_blockmap(struct vnop_blockmap_args * ap); * * Returns 0 on success, or an error. */ -int mockfs_lookup(struct vnop_lookup_args * ap) +int +mockfs_lookup(struct vnop_lookup_args * ap) { char held_char; int rvalue; @@ -116,21 +117,22 @@ int mockfs_lookup(struct vnop_lookup_args * ap) * accidentally commit a change to the init_process pathname. We map from name to node type * here, as mockfs doesn't current use names; just unique types. */ - if (!strncmp(cnp->cn_nameptr, "sbin", 5)) + if (!strncmp(cnp->cn_nameptr, "sbin", 5)) { target_fsnode = fsnode; - else if (!strncmp(cnp->cn_nameptr, "dev", 4)) + } else if (!strncmp(cnp->cn_nameptr, "dev", 4)) { mockfs_fsnode_child_by_type(fsnode, MOCKFS_DEV, &target_fsnode); - else if (!strncmp(cnp->cn_nameptr, "launchd", 8)) + } else if (!strncmp(cnp->cn_nameptr, "launchd", 8)) { mockfs_fsnode_child_by_type(fsnode, MOCKFS_FILE, &target_fsnode); - else + } else { rvalue = ENOENT; + } cnp->cn_nameptr[cnp->cn_namelen] = held_char; - if (target_fsnode) + if (target_fsnode) { rvalue = mockfs_fsnode_vnode(target_fsnode, vpp); - } - else { + } + } else { /* * We aren't looking in root; the query may actually be reasonable, but we're not * going to support it. @@ -138,7 +140,7 @@ int mockfs_lookup(struct vnop_lookup_args * ap) rvalue = ENOENT; } - return rvalue; + return rvalue; } /* @@ -157,7 +159,8 @@ int mockfs_lookup(struct vnop_lookup_args * ap) * * Returns 0 on success, or an error. */ -int mockfs_getattr(struct vnop_getattr_args * ap) +int +mockfs_getattr(struct vnop_getattr_args * ap) { /* * For the moment, we don't actually care about most attributes. We'll @@ -179,7 +182,7 @@ int mockfs_getattr(struct vnop_getattr_args * ap) VATTR_RETURN(vap, va_data_size, fsnode->size); VATTR_RETURN(vap, va_data_alloc, fsnode->size); - return (0); + return 0; } /* @@ -201,7 +204,8 @@ int mockfs_getattr(struct vnop_getattr_args * ap) * * Returns 0 on success, or an error. */ -int mockfs_read(struct vnop_read_args * ap) +int +mockfs_read(struct vnop_read_args * ap) { int rvalue; vnode_t vp; @@ -216,8 +220,7 @@ int mockfs_read(struct vnop_read_args * ap) */ if (vp->v_type == VREG) { rvalue = cluster_read(vp, ap->a_uio, fsnode->size, ap->a_ioflag); - } - else { + } else { /* * You've tried to read from a nonregular file; I hate you. */ @@ -239,7 +242,8 @@ int mockfs_read(struct vnop_read_args * ap) * is always in memory, we have very little to do as part of reclaim, so we'll just zero a few pointers and let * VFS reclaim the vnode. */ -int mockfs_reclaim(struct vnop_reclaim_args * ap) +int +mockfs_reclaim(struct vnop_reclaim_args * ap) { int rvalue; vnode_t vp; @@ -265,7 +269,8 @@ int mockfs_reclaim(struct vnop_reclaim_args * ap) * * Returns 0 on success, or an error. */ -int mockfs_strategy(struct vnop_strategy_args * ap) +int +mockfs_strategy(struct vnop_strategy_args * ap) { int rvalue; vnode_t dvp; @@ -280,8 +285,7 @@ int mockfs_strategy(struct vnop_strategy_args * ap) if (dvp) { rvalue = buf_strategy(dvp, ap); vnode_put(dvp); - } - else { + } else { /* * I'm not certain this is the BEST error to return for this case. */ @@ -310,7 +314,8 @@ int mockfs_strategy(struct vnop_strategy_args * ap) * * Returns 0 on success, or an error. */ -int mockfs_pagein(struct vnop_pagein_args * ap) +int +mockfs_pagein(struct vnop_pagein_args * ap) { mockfs_fsnode_t fsnode; mockfs_mount_t mockfs_mnt; @@ -319,16 +324,17 @@ int mockfs_pagein(struct vnop_pagein_args * ap) * Nothing special needed from us; just nab the filesize and kick the work over to cluster_pagein. */ fsnode = (mockfs_fsnode_t) ap->a_vp->v_data; - mockfs_mnt = ((mockfs_mount_t) fsnode->mnt->mnt_data); + mockfs_mnt = ((mockfs_mount_t) fsnode->mnt->mnt_data); /* * If we represent a memory backed device, we should be pointing directly to the backing store; we should never * see a pagein in this case. */ - if (mockfs_mnt->mockfs_memory_backed) + if (mockfs_mnt->mockfs_memory_backed) { panic("mockfs_pagein called for a memory-backed device"); + } - return cluster_pagein(ap->a_vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size, fsnode->size, ap->a_flags); + return cluster_pagein(ap->a_vp, ap->a_pl, ap->a_pl_offset, ap->a_f_offset, ap->a_size, fsnode->size, ap->a_flags); } /* @@ -338,7 +344,7 @@ int mockfs_pagein(struct vnop_pagein_args * ap) * off_t a_foffset; // File offset we are interested in * size_t a_size; // Size of the region we are interested in * daddr64_t *a_bpn; // Return parameter: physical block number the region we are interest in starts at - * size_t *a_run; // Return parameter: number of contiguous bytes of data + * size_t *a_run; // Return parameter: number of contiguous bytes of data * void *a_poff; // Unused, as far as I know * int a_flags; // Used to distinguish reads and writes; we don't care * vfs_context_t a_context; // We don't care about this (for now) @@ -353,7 +359,8 @@ int mockfs_pagein(struct vnop_pagein_args * ap) * * Returns 0 on success, or an error. */ -int mockfs_blockmap(struct vnop_blockmap_args * ap) +int +mockfs_blockmap(struct vnop_blockmap_args * ap) { int rvalue; off_t foffset; @@ -376,8 +383,9 @@ int mockfs_blockmap(struct vnop_blockmap_args * ap) * be satisfied from the UBC, and any called to blockmap (inidicating an attempted IO to the backing store) * is therefore disallowed. */ - if (((mockfs_mount_t) fsnode->mnt->mnt_data)->mockfs_memory_backed) + if (((mockfs_mount_t) fsnode->mnt->mnt_data)->mockfs_memory_backed) { printf("mockfs_blockmap called for a memory-backed device\n"); + } /* * This will ultimately be simple; the vnode must be VREG (init), and the mapping will be 1 to 1. @@ -391,15 +399,14 @@ int mockfs_blockmap(struct vnop_blockmap_args * ap) /* We've been asked for more data than the backing device can provide; we're done. */ panic("mockfs_blockmap was asked for a region that extended past the end of the backing device"); } - } - else { + } else { rvalue = ENOTSUP; } return rvalue; } -int (**mockfs_vnodeop_p)(void *); +int(**mockfs_vnodeop_p)(void *); struct vnodeopv_entry_desc mockfs_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC) vn_default_error }, /* default */ { &vnop_lookup_desc, (VOPFUNC) mockfs_lookup }, /* lookup */ @@ -443,4 +450,3 @@ struct vnodeopv_desc mockfs_vnodeop_opv_desc = { &mockfs_vnodeop_p, mockfs_vnodeop_entries }; - diff --git a/bsd/miscfs/mockfs/mockfs_vnops.h b/bsd/miscfs/mockfs/mockfs_vnops.h index 4a5314f53..ca7827e88 100644 --- a/bsd/miscfs/mockfs/mockfs_vnops.h +++ b/bsd/miscfs/mockfs/mockfs_vnops.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,9 +31,8 @@ #if MOCKFS -extern int (**mockfs_vnodeop_p)(void *); +extern int(**mockfs_vnodeop_p)(void *); #endif /* MOCKFS */ #endif /* MOCKFS_VNOPS_H */ - diff --git a/bsd/miscfs/nullfs/null_subr.c b/bsd/miscfs/nullfs/null_subr.c index 79e9d208d..3743d52b2 100644 --- a/bsd/miscfs/nullfs/null_subr.c +++ b/bsd/miscfs/nullfs/null_subr.c @@ -89,10 +89,10 @@ static lck_grp_attr_t * null_hashlck_grp_attr; static u_long null_hash_mask; /* os x doesn't have hashes built into vnode. gonna try doing what freebsd does - anyway - Don't want to create a dependency on vnode_internal.h and the real struct - vnode. - 9 is an eyeball of the log 2 size of vnode */ + * anyway + * Don't want to create a dependency on vnode_internal.h and the real struct + * vnode. + * 9 is an eyeball of the log 2 size of vnode */ static int vnsz2log = 9; static int null_hashins(struct mount *, struct null_node *, struct vnode **); @@ -145,7 +145,7 @@ nullfs_init(__unused struct vfsconf * vfsp) lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr); null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask); NULLFSDEBUG("%s finished\n", __FUNCTION__); - return (0); + return 0; error: printf("NULLFS: failed to get lock element\n"); if (null_hashlck_grp_attr) { @@ -182,7 +182,7 @@ nullfs_uninit() lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } - return (0); + return 0; } /* @@ -250,10 +250,10 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp) ovp = NULLTOV(oxp); if (oxp->null_lowervid != vnode_vid(oxp->null_lowervp)) { /*vp doesn't exist so return null (not sure we are actually gonna catch - recycle right now - This is an exceptional case right now, it suggests the vnode we are - trying to add has been recycled - don't add it.*/ + * recycle right now + * This is an exceptional case right now, it suggests the vnode we are + * trying to add has been recycled + * don't add it.*/ error = EIO; goto end; } @@ -303,7 +303,7 @@ null_nodecreate(struct vnode * lowervp) /* assumption is that vnode has iocount on it after vnode create */ int null_getnewvnode( - struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) { struct vnode_fsparam vnfs_param; int error = 0; @@ -350,7 +350,7 @@ null_getnewvnode( */ int null_nodeget( - struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) { struct vnode * vp; int error; @@ -363,7 +363,7 @@ null_nodeget( /* null_hashget checked the vid, so if we got something here its legit to * the best of our knowledge*/ /* if we found something then there is an iocount on vpp, - if we didn't find something then vpp shouldn't be used by the caller */ + * if we didn't find something then vpp shouldn't be used by the caller */ return error; } @@ -372,8 +372,7 @@ null_nodeget( * duplicates later, when adding new vnode to hash. */ error = vnode_ref(lowervp); // take a ref on lowervp so we let the system know we care about it - if(error) - { + if (error) { // Failed to get a reference on the lower vp so bail. Lowervp may be gone already. return error; } @@ -382,7 +381,7 @@ null_nodeget( if (error) { vnode_rele(lowervp); - return (error); + return error; } /* @@ -401,5 +400,5 @@ null_nodeget( /* vp has an iocount from null_getnewvnode */ *vpp = vp; - return (0); + return 0; } diff --git a/bsd/miscfs/nullfs/null_vfsops.c b/bsd/miscfs/nullfs/null_vfsops.c index 5a191d2e1..0e8330da7 100644 --- a/bsd/miscfs/nullfs/null_vfsops.c +++ b/bsd/miscfs/nullfs/null_vfsops.c @@ -127,8 +127,9 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v NULLFSDEBUG("nullfs_mount(mp = %p) %llx\n", (void *)mp, vfs_flags(mp)); - if (vfs_flags(mp) & MNT_ROOTFS) - return (EOPNOTSUPP); + if (vfs_flags(mp) & MNT_ROOTFS) { + return EOPNOTSUPP; + } /* * Update is a no-op @@ -166,12 +167,11 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v } /* lowervrootvp has an iocount after vnode_lookup, drop that for a usecount. - Keep this to signal what we want to keep around the thing we are mirroring. - Drop it in unmount.*/ + * Keep this to signal what we want to keep around the thing we are mirroring. + * Drop it in unmount.*/ error = vnode_ref(lowerrootvp); vnode_put(lowerrootvp); - if (error) - { + if (error) { // If vnode_ref failed, then null it out so it can't be used anymore in cleanup. lowerrootvp = NULL; goto error; @@ -211,7 +211,7 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v xmp->nullm_rootvp = vp; /* read the flags the user set, but then ignore some of them, we will only - allow them if they are set on the lower file system */ + * allow them if they are set on the lower file system */ uint64_t flags = vfs_flags(mp) & (~(MNT_IGNORE_OWNERSHIP | MNT_LOCAL)); uint64_t lowerflags = vfs_flags(vnode_mount(lowerrootvp)) & (MNT_LOCAL | MNT_QUARANTINE | MNT_IGNORE_OWNERSHIP | MNT_NOEXEC); @@ -285,7 +285,7 @@ nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, v MAC_PERFORM(mount_label_associate, cred, vnode_mount(lowerrootvp), vfs_mntlabel(mp)); NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", sp->f_mntfromname, sp->f_mntonname); - return (0); + return 0; error: if (xmp) { @@ -321,7 +321,7 @@ nullfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx) /* check entitlement or superuser*/ if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT) && - vfs_context_suser(ctx) != 0) { + vfs_context_suser(ctx) != 0) { return EPERM; } @@ -339,14 +339,12 @@ nullfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx) vnode_getalways(vp); error = vflush(mp, vp, flags); - if (error) - { + if (error) { vnode_put(vp); - return (error); + return error; } - if (vnode_isinuse(vp,1) && flags == 0) - { + if (vnode_isinuse(vp, 1) && flags == 0) { vnode_put(vp); return EBUSY; } @@ -376,7 +374,7 @@ nullfs_unmount(struct mount * mp, int mntflags, __unused vfs_context_t ctx) uint64_t vflags = vfs_flags(mp); vfs_setflags(mp, vflags & ~MNT_LOCAL); - return (0); + return 0; } static int @@ -393,8 +391,9 @@ nullfs_root(struct mount * mp, struct vnode ** vpp, __unused vfs_context_t ctx) vp = MOUNTTONULLMOUNT(mp)->nullm_rootvp; error = vnode_get(vp); - if (error) + if (error) { return error; + } *vpp = vp; return 0; @@ -436,48 +435,61 @@ nullfs_vfs_getattr(struct mount * mp, struct vfs_attr * vfap, vfs_context_t ctx) capabilities.valid[VOL_CAPABILITIES_INTERFACES] &= ~(VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_ATTRLIST | VOL_CAP_INT_READDIRATTR | VOL_CAP_INT_EXCHANGEDATA | - VOL_CAP_INT_COPYFILE | VOL_CAP_INT_ALLOCATE | VOL_CAP_INT_VOL_RENAME | VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK); + VOL_CAP_INT_COPYFILE | VOL_CAP_INT_ALLOCATE | VOL_CAP_INT_VOL_RENAME | VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK); } } - if (VFSATTR_IS_ACTIVE(vfap, f_create_time)) + if (VFSATTR_IS_ACTIVE(vfap, f_create_time)) { VFSATTR_RETURN(vfap, f_create_time, tzero); + } - if (VFSATTR_IS_ACTIVE(vfap, f_modify_time)) + if (VFSATTR_IS_ACTIVE(vfap, f_modify_time)) { VFSATTR_RETURN(vfap, f_modify_time, tzero); + } - if (VFSATTR_IS_ACTIVE(vfap, f_access_time)) + if (VFSATTR_IS_ACTIVE(vfap, f_access_time)) { VFSATTR_RETURN(vfap, f_access_time, tzero); + } - if (VFSATTR_IS_ACTIVE(vfap, f_bsize)) + if (VFSATTR_IS_ACTIVE(vfap, f_bsize)) { VFSATTR_RETURN(vfap, f_bsize, sp->f_bsize); + } - if (VFSATTR_IS_ACTIVE(vfap, f_iosize)) + if (VFSATTR_IS_ACTIVE(vfap, f_iosize)) { VFSATTR_RETURN(vfap, f_iosize, sp->f_iosize); + } - if (VFSATTR_IS_ACTIVE(vfap, f_owner)) + if (VFSATTR_IS_ACTIVE(vfap, f_owner)) { VFSATTR_RETURN(vfap, f_owner, 0); + } - if (VFSATTR_IS_ACTIVE(vfap, f_blocks)) + if (VFSATTR_IS_ACTIVE(vfap, f_blocks)) { VFSATTR_RETURN(vfap, f_blocks, sp->f_blocks); + } - if (VFSATTR_IS_ACTIVE(vfap, f_bfree)) + if (VFSATTR_IS_ACTIVE(vfap, f_bfree)) { VFSATTR_RETURN(vfap, f_bfree, sp->f_bfree); + } - if (VFSATTR_IS_ACTIVE(vfap, f_bavail)) + if (VFSATTR_IS_ACTIVE(vfap, f_bavail)) { VFSATTR_RETURN(vfap, f_bavail, sp->f_bavail); + } - if (VFSATTR_IS_ACTIVE(vfap, f_bused)) + if (VFSATTR_IS_ACTIVE(vfap, f_bused)) { VFSATTR_RETURN(vfap, f_bused, sp->f_bused); + } - if (VFSATTR_IS_ACTIVE(vfap, f_files)) + if (VFSATTR_IS_ACTIVE(vfap, f_files)) { VFSATTR_RETURN(vfap, f_files, sp->f_files); + } - if (VFSATTR_IS_ACTIVE(vfap, f_ffree)) + if (VFSATTR_IS_ACTIVE(vfap, f_ffree)) { VFSATTR_RETURN(vfap, f_ffree, sp->f_ffree); + } - if (VFSATTR_IS_ACTIVE(vfap, f_fssubtype)) + if (VFSATTR_IS_ACTIVE(vfap, f_fssubtype)) { VFSATTR_RETURN(vfap, f_fssubtype, 0); + } if (VFSATTR_IS_ACTIVE(vfap, f_capabilities)) { memcpy(&vfap->f_capabilities, &capabilities, sizeof(vol_capabilities_attr_t)); @@ -525,7 +537,7 @@ nullfs_sync(__unused struct mount * mp, __unused int waitfor, __unused vfs_conte /* * XXX - Assumes no data cached at null layer. */ - return (0); + return 0; } @@ -540,18 +552,17 @@ nullfs_vfs_start(__unused struct mount * mp, __unused int flags, __unused vfs_co extern struct vnodeopv_desc nullfs_vnodeop_opv_desc; struct vnodeopv_desc * nullfs_vnodeopv_descs[] = { - &nullfs_vnodeop_opv_desc, + &nullfs_vnodeop_opv_desc, }; struct vfsops nullfs_vfsops = { - .vfs_mount = nullfs_mount, - .vfs_unmount = nullfs_unmount, - .vfs_start = nullfs_vfs_start, - .vfs_root = nullfs_root, - .vfs_getattr = nullfs_vfs_getattr, - .vfs_sync = nullfs_sync, - .vfs_init = nullfs_init, - .vfs_sysctl = NULL, - .vfs_setattr = NULL, + .vfs_mount = nullfs_mount, + .vfs_unmount = nullfs_unmount, + .vfs_start = nullfs_vfs_start, + .vfs_root = nullfs_root, + .vfs_getattr = nullfs_vfs_getattr, + .vfs_sync = nullfs_sync, + .vfs_init = nullfs_init, + .vfs_sysctl = NULL, + .vfs_setattr = NULL, }; - diff --git a/bsd/miscfs/nullfs/null_vnops.c b/bsd/miscfs/nullfs/null_vnops.c index 05e28abc1..ebfe2e7e1 100644 --- a/bsd/miscfs/nullfs/null_vnops.c +++ b/bsd/miscfs/nullfs/null_vnops.c @@ -179,14 +179,14 @@ nullfs_special_getattr(struct vnop_getattr_args * args) VATTR_RETURN(args->a_vap, va_filerev, 0); VATTR_RETURN(args->a_vap, va_gen, 0); VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People - shouldn't be enocouraged to poke around in them */ + * shouldn't be enocouraged to poke around in them */ if (ino == NULL_SECOND_INO) { VATTR_RETURN(args->a_vap, va_parentid, NULL_ROOT_INO); /* no parent at the root, so - the only other vnode that - goes through this path is - second and its parent is - 1.*/ + * the only other vnode that + * goes through this path is + * second and its parent is + * 1.*/ } if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) { @@ -298,10 +298,10 @@ nullfs_close(struct vnop_close_args * args) } /* get lvp's parent, if possible, even if it isn't set. - - lvp is expected to have an iocount before and after this call. - - if a dvpp is populated the returned vnode has an iocount. */ + * + * lvp is expected to have an iocount before and after this call. + * + * if a dvpp is populated the returned vnode has an iocount. */ static int null_get_lowerparent(vnode_t lvp, vnode_t * dvpp, vfs_context_t ctx) { @@ -379,7 +379,6 @@ null_special_lookup(struct vnop_lookup_args * ap) error = vnode_get(vp); } } - } else if (dvp == null_mp->nullm_secondvp) { /* handle . and .. */ if (cnp->cn_nameptr[0] == '.') { @@ -397,21 +396,21 @@ null_special_lookup(struct vnop_lookup_args * ap) /* nullmp->nullm_lowerrootvp was set at mount time so don't need to lock to * access it */ /* v_name should be null terminated but cn_nameptr is not necessarily. - cn_namelen is the number of characters before the null in either case */ + * cn_namelen is the number of characters before the null in either case */ error = vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid); if (error) { goto end; } /* We don't want to mess with case insensitivity and unicode, so the plan to - check here is - 1. try to get the lower root's parent - 2. If we get a parent, then perform a lookup on the lower file system - using the parent and the passed in cnp - 3. If that worked and we got a vp, then see if the vp is lowerrootvp. If - so we got a match - 4. Anything else results in ENOENT. - */ + * check here is + * 1. try to get the lower root's parent + * 2. If we get a parent, then perform a lookup on the lower file system + * using the parent and the passed in cnp + * 3. If that worked and we got a vp, then see if the vp is lowerrootvp. If + * so we got a match + * 4. Anything else results in ENOENT. + */ error = null_get_lowerparent(null_mp->nullm_lowerrootvp, &ldvp, ap->a_context); if (error == 0) { @@ -464,7 +463,7 @@ null_lookup(struct vnop_lookup_args * ap) mp = vnode_mount(dvp); /* rename and delete are not allowed. this is a read only file system */ if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME || cnp->cn_nameiop == CREATE) { - return (EROFS); + return EROFS; } null_mp = MOUNTTONULLMOUNT(mp); @@ -530,7 +529,7 @@ notdot: vnode_put(lvp); } - return (error); + return error; } /* @@ -541,7 +540,7 @@ null_inactive(__unused struct vnop_inactive_args * ap) { NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); - return (0); + return 0; } static int @@ -568,9 +567,9 @@ null_reclaim(struct vnop_reclaim_args * ap) * got hashed */ if (xp->null_flags & NULL_FLAG_HASHED) { /* only call this if we actually made it into the hash list. reclaim gets - called also to - clean up a vnode that got created when it didn't need to under race - conditions */ + * called also to + * clean up a vnode that got created when it didn't need to under race + * conditions */ null_hashrem(xp); } vnode_getwithref(lowervp); @@ -635,15 +634,15 @@ nullfs_special_readdir(struct vnop_readdir_args * ap) ino_t ino = 0; const char * name = NULL; - if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) - return (EINVAL); + if (ap->a_flags & (VNODE_READDIR_EXTENDED | VNODE_READDIR_REQSEEKOFF)) { + return EINVAL; + } if (offset == 0) { /* . case */ if (vp == null_mp->nullm_rootvp) { ino = NULL_ROOT_INO; - } else /* only get here if vp matches nullm_rootvp or nullm_secondvp */ - { + } else { /* only get here if vp matches nullm_rootvp or nullm_secondvp */ ino = NULL_SECOND_INO; } error = store_entry_special(ino, ".", uio); @@ -670,13 +669,12 @@ nullfs_special_readdir(struct vnop_readdir_args * ap) if (vp == null_mp->nullm_rootvp) { ino = NULL_SECOND_INO; name = "d"; - } else /* only get here if vp matches nullm_rootvp or nullm_secondvp */ - { + } else { /* only get here if vp matches nullm_rootvp or nullm_secondvp */ ino = NULL_THIRD_INO; if (vnode_getwithvid(null_mp->nullm_lowerrootvp, null_mp->nullm_lowerrootvid)) { /* In this case the lower file system has been ripped out from under us, - but we don't want to error out - Instead we just want d to look empty. */ + * but we don't want to error out + * Instead we just want d to look empty. */ error = 0; goto out; } @@ -699,7 +697,7 @@ nullfs_special_readdir(struct vnop_readdir_args * ap) out: if (error == EMSGSIZE) { error = 0; /* return success if we ran out of space, but we wanted to make - sure that we didn't update offset and items incorrectly */ + * sure that we didn't update offset and items incorrectly */ } uio_setoffset(uio, offset); if (ap->a_numdirent) { @@ -925,13 +923,12 @@ nullfs_pagein(struct vnop_pagein_args * ap) error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context); bytes_remaining = uio_resid(auio); - if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) - { + if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) { /* zero bytes that weren't read in to the upl */ bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining); } - exit: +exit: kret = ubc_upl_unmap(upl); if (KERN_SUCCESS != kret) { panic("nullfs_pagein: ubc_upl_unmap() failed with (%d)", kret); @@ -941,16 +938,14 @@ nullfs_pagein(struct vnop_pagein_args * ap) uio_free(auio); } - exit_no_unmap: +exit_no_unmap: if ((ap->a_flags & UPL_NOCOMMIT) == 0) { if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) { /* only commit what was read in (page aligned)*/ bytes_to_commit = ap->a_size - bytes_remaining; - if (bytes_to_commit) - { + if (bytes_to_commit) { /* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/ - if (bytes_to_commit & PAGE_MASK) - { + if (bytes_to_commit & PAGE_MASK) { bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1); assert(bytes_to_commit <= (off_t)ap->a_size); @@ -958,7 +953,7 @@ nullfs_pagein(struct vnop_pagein_args * ap) } ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY); } - + /* abort anything thats left */ if (bytes_remaining) { ubc_upl_abort_range(upl, ap->a_pl_offset + bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); @@ -968,7 +963,7 @@ nullfs_pagein(struct vnop_pagein_args * ap) } } vnode_put(lvp); - } else if((ap->a_flags & UPL_NOCOMMIT) == 0) { + } else if ((ap->a_flags & UPL_NOCOMMIT) == 0) { ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } return error; @@ -1012,7 +1007,7 @@ nullfs_read(struct vnop_read_args * ap) if (error) { NULLFSDEBUG("VNOP_READ failed: %d\n", error); } - end: +end: vnode_put(lvp); } return error; @@ -1023,15 +1018,15 @@ nullfs_read(struct vnop_read_args * ap) */ static struct vnodeopv_entry_desc nullfs_vnodeop_entries[] = { - {&vnop_default_desc, (vop_t)nullfs_default}, {&vnop_getattr_desc, (vop_t)nullfs_getattr}, - {&vnop_open_desc, (vop_t)nullfs_open}, {&vnop_close_desc, (vop_t)nullfs_close}, - {&vnop_inactive_desc, (vop_t)null_inactive}, {&vnop_reclaim_desc, (vop_t)null_reclaim}, - {&vnop_lookup_desc, (vop_t)null_lookup}, {&vnop_readdir_desc, (vop_t)nullfs_readdir}, - {&vnop_readlink_desc, (vop_t)nullfs_readlink}, {&vnop_pathconf_desc, (vop_t)nullfs_pathconf}, - {&vnop_fsync_desc, (vop_t)nullfs_fsync}, {&vnop_mmap_desc, (vop_t)nullfs_mmap}, - {&vnop_mnomap_desc, (vop_t)nullfs_mnomap}, {&vnop_getxattr_desc, (vop_t)nullfs_getxattr}, - {&vnop_pagein_desc, (vop_t)nullfs_pagein}, {&vnop_read_desc, (vop_t)nullfs_read}, - {&vnop_listxattr_desc, (vop_t)nullfs_listxattr}, {NULL, NULL}, + {&vnop_default_desc, (vop_t)nullfs_default}, {&vnop_getattr_desc, (vop_t)nullfs_getattr}, + {&vnop_open_desc, (vop_t)nullfs_open}, {&vnop_close_desc, (vop_t)nullfs_close}, + {&vnop_inactive_desc, (vop_t)null_inactive}, {&vnop_reclaim_desc, (vop_t)null_reclaim}, + {&vnop_lookup_desc, (vop_t)null_lookup}, {&vnop_readdir_desc, (vop_t)nullfs_readdir}, + {&vnop_readlink_desc, (vop_t)nullfs_readlink}, {&vnop_pathconf_desc, (vop_t)nullfs_pathconf}, + {&vnop_fsync_desc, (vop_t)nullfs_fsync}, {&vnop_mmap_desc, (vop_t)nullfs_mmap}, + {&vnop_mnomap_desc, (vop_t)nullfs_mnomap}, {&vnop_getxattr_desc, (vop_t)nullfs_getxattr}, + {&vnop_pagein_desc, (vop_t)nullfs_pagein}, {&vnop_read_desc, (vop_t)nullfs_read}, + {&vnop_listxattr_desc, (vop_t)nullfs_listxattr}, {NULL, NULL}, }; struct vnodeopv_desc nullfs_vnodeop_opv_desc = {&nullfs_vnodeop_p, nullfs_vnodeop_entries}; diff --git a/bsd/miscfs/nullfs/nullfs.h b/bsd/miscfs/nullfs/nullfs.h index 80d8f174c..766194f6d 100644 --- a/bsd/miscfs/nullfs/nullfs.h +++ b/bsd/miscfs/nullfs/nullfs.h @@ -88,13 +88,13 @@ typedef int (*vop_t)(void *); struct null_mount { struct vnode * nullm_rootvp; /* Reference to root null_node (inode 1) */ struct vnode * nullm_secondvp; /* Reference to virtual directory vnode to wrap app - bundles (inode 2) */ + * bundles (inode 2) */ struct vnode * nullm_thirdcovervp; /* Reference to vnode that covers - lowerrootvp (inode 3) */ + * lowerrootvp (inode 3) */ struct vnode * nullm_lowerrootvp; /* reference to the root of the tree we are - relocating (in the other file system) */ + * relocating (in the other file system) */ uint32_t nullm_lowerrootvid; /* store the lower root vid so we can check - before we build the shadow vnode lazily*/ + * before we build the shadow vnode lazily*/ lck_mtx_t nullm_lock; /* lock to protect vps above */ uint64_t nullm_flags; }; @@ -111,7 +111,7 @@ struct null_node { struct vnode * null_lowervp; /* VREFed once */ struct vnode * null_vnode; /* Back pointer */ uint32_t null_lowervid; /* vid for lowervp to detect lowervp getting recycled out - from under us */ + * from under us */ uint32_t null_myvid; uint32_t null_flags; }; @@ -136,10 +136,10 @@ int nullfs_init_lck(lck_mtx_t * lck); int nullfs_destroy_lck(lck_mtx_t * lck); int nullfs_uninit(void); int null_nodeget( - struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root); + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root); int null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp); int null_getnewvnode( - struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root); + struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root); void null_hashrem(struct null_node * xp); int nullfs_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp); diff --git a/bsd/miscfs/routefs/routefs.h b/bsd/miscfs/routefs/routefs.h index b6ba01974..61abd5fcc 100644 --- a/bsd/miscfs/routefs/routefs.h +++ b/bsd/miscfs/routefs/routefs.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MISCFS_ROUTEFS_DEVFS_H_ -#define _MISCFS_ROUTEFS_DEVFS_H_ +#define _MISCFS_ROUTEFS_DEVFS_H_ #include @@ -37,16 +37,15 @@ __BEGIN_DECLS #ifdef BSD_KERNEL_PRIVATE struct routefs_args { - char route_path[MAXPATHLEN]; /* path name of the target route */ - vnode_t route_rvp; /* vnode of the target of route */ - + char route_path[MAXPATHLEN];/* path name of the target route */ + vnode_t route_rvp; /* vnode of the target of route */ }; struct routefs_mount { - char route_path[MAXPATHLEN]; /* path name of the target route */ - mount_t route_mount; - vnode_t route_rvp; /* vnode of the target of route */ - int route_vpvid; /* vnode of the target of route */ + char route_path[MAXPATHLEN];/* path name of the target route */ + mount_t route_mount; + vnode_t route_rvp; /* vnode of the target of route */ + int route_vpvid; /* vnode of the target of route */ }; diff --git a/bsd/miscfs/routefs/routefs_ops.c b/bsd/miscfs/routefs/routefs_ops.c index db6db101d..d066326e1 100644 --- a/bsd/miscfs/routefs/routefs_ops.c +++ b/bsd/miscfs/routefs/routefs_ops.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -55,11 +55,11 @@ static int routefs_statfs( struct mount *mp, struct vfsstatfs *sbp, __unused vfs static int routefs_vfs_getattr(__unused mount_t mp, struct vfs_attr *fsap, __unused vfs_context_t ctx); static int routefs_sync(__unused struct mount *mp, __unused int waitfor, __unused vfs_context_t ctx); static int routefs_vget(__unused struct mount *mp, __unused ino64_t ino, __unused struct vnode **vpp, __unused vfs_context_t ctx); -static int routefs_fhtovp (__unused struct mount *mp, __unused int fhlen, __unused unsigned char *fhp, __unused struct vnode **vpp, __unused vfs_context_t ctx); -static int routefs_vptofh (__unused struct vnode *vp, __unused int *fhlenp, __unused unsigned char *fhp, __unused vfs_context_t ctx); +static int routefs_fhtovp(__unused struct mount *mp, __unused int fhlen, __unused unsigned char *fhp, __unused struct vnode **vpp, __unused vfs_context_t ctx); +static int routefs_vptofh(__unused struct vnode *vp, __unused int *fhlenp, __unused unsigned char *fhp, __unused vfs_context_t ctx); static int routefs_sysctl(__unused int *name, __unused u_int namelen, __unused user_addr_t oldp, - __unused size_t *oldlenp, __unused user_addr_t newp, - __unused size_t newlen, __unused vfs_context_t ctx); + __unused size_t *oldlenp, __unused user_addr_t newp, + __unused size_t newlen, __unused vfs_context_t ctx); static int routefserr_lookup(__unused struct vnop_lookup_args * args); static int routefserr_setlabel(__unused struct vnop_setlabel_args * args); @@ -78,47 +78,46 @@ static boolean_t _fs_alreadyMounted = FALSE; /* atleast a mount of this filesys static int routefs_init(__unused struct vfsconf *vfsp) { - routefs_lck_grp_attr = lck_grp_attr_alloc_init(); - routefs_lck_grp = lck_grp_alloc_init("routefs_lock", routefs_lck_grp_attr); - routefs_lck_attr = lck_attr_alloc_init(); - lck_mtx_init(&routefs_mutex, routefs_lck_grp, routefs_lck_attr); - _lock_inited = 1; - - return 0; + routefs_lck_grp_attr = lck_grp_attr_alloc_init(); + routefs_lck_grp = lck_grp_alloc_init("routefs_lock", routefs_lck_grp_attr); + routefs_lck_attr = lck_attr_alloc_init(); + lck_mtx_init(&routefs_mutex, routefs_lck_grp, routefs_lck_attr); + _lock_inited = 1; + + return 0; } static int -routefs_mount(struct mount *mp, __unused vnode_t devvp, user_addr_t data, vfs_context_t ctx) +routefs_mount(struct mount *mp, __unused vnode_t devvp, user_addr_t data, vfs_context_t ctx) { - struct routefs_mount *routefs_mp_p = NULL; /* routefs specific mount info */ - int error=EINVAL; - struct routefs_args * rargs = (struct routefs_args *)data; - + struct routefs_mount *routefs_mp_p = NULL; /* routefs specific mount info */ + int error = EINVAL; + struct routefs_args * rargs = (struct routefs_args *)data; + /*- * If they just want to update, we don't need to do anything. */ - if (mp->mnt_flag & MNT_UPDATE) - { + if (mp->mnt_flag & MNT_UPDATE) { return 0; } - - /* check for root mount only */ - if ((error = proc_suser(current_proc()))!= 0) { - goto out; - } - - if (vfs_iskernelmount(mp) == FALSE) { - error = EPERM; - goto out; - } - - if (_fs_alreadyMounted == TRUE) { - /* if a filesystem is mounted, it needs to be unmounted prior to mount again */ - error = EPERM; - goto out; - } - + + /* check for root mount only */ + if ((error = proc_suser(current_proc())) != 0) { + goto out; + } + + if (vfs_iskernelmount(mp) == FALSE) { + error = EPERM; + goto out; + } + + if (_fs_alreadyMounted == TRUE) { + /* if a filesystem is mounted, it needs to be unmounted prior to mount again */ + error = EPERM; + goto out; + } + /* Advisory locking should be handled at the VFS layer */ vfs_setlocklocal(mp); @@ -129,28 +128,29 @@ routefs_mount(struct mount *mp, __unused vnode_t devvp, user_addr_t data, vfs_c */ MALLOC(routefs_mp_p, struct routefs_mount *, sizeof(struct routefs_mount), - M_TEMP, M_WAITOK); - if (routefs_mp_p == NULL) - return (ENOMEM); + M_TEMP, M_WAITOK); + if (routefs_mp_p == NULL) { + return ENOMEM; + } bzero(routefs_mp_p, sizeof(*routefs_mp_p)); - + routefs_mp_p->route_mount = mp; - if (rargs->route_rvp == NULLVP) { - error = EACCES; - goto out; - } - - strlcpy(routefs_mp_p->route_path,rargs->route_path, MAXPATHLEN); - routefs_mp_p->route_rvp = rargs->route_rvp; - routefs_mp_p->route_vpvid = vnode_vid(rargs->route_rvp); - - if (vnode_ref(routefs_mp_p->route_rvp) != 0) { - error = EACCES; - goto out; - } - - /* + if (rargs->route_rvp == NULLVP) { + error = EACCES; + goto out; + } + + strlcpy(routefs_mp_p->route_path, rargs->route_path, MAXPATHLEN); + routefs_mp_p->route_rvp = rargs->route_rvp; + routefs_mp_p->route_vpvid = vnode_vid(rargs->route_rvp); + + if (vnode_ref(routefs_mp_p->route_rvp) != 0) { + error = EACCES; + goto out; + } + + /* * Fill out some fields */ __IGNORE_WCASTALIGN(mp->mnt_data = (qaddr_t)routefs_mp_p); @@ -164,17 +164,18 @@ routefs_mount(struct mount *mp, __unused vnode_t devvp, user_addr_t data, vfs_c * And we clear the remainder of the character strings * to be tidy. */ - + bzero(mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN); - bcopy("routefs",mp->mnt_vfsstat.f_mntfromname, 5); + bcopy("routefs", mp->mnt_vfsstat.f_mntfromname, 5); (void)routefs_statfs(mp, &mp->mnt_vfsstat, ctx); - _fs_alreadyMounted = TRUE; /* yep, fs is in play now */ + _fs_alreadyMounted = TRUE; /* yep, fs is in play now */ error = 0; out: - if (error != 0) { - if (routefs_mp_p != NULL) - FREE((caddr_t)routefs_mp_p, M_TEMP); - } + if (error != 0) { + if (routefs_mp_p != NULL) { + FREE(routefs_mp_p, M_TEMP); + } + } return error; } @@ -195,30 +196,30 @@ routefs_unmount( struct mount *mp, int mntflags, __unused vfs_context_t ctx) int flags = 0; int force = 0; int error; - - /* check for root unmount only */ - if ((error = proc_suser(current_proc()))!= 0) { - return(error); - } + + /* check for root unmount only */ + if ((error = proc_suser(current_proc())) != 0) { + return error; + } if (mntflags & MNT_FORCE) { flags |= FORCECLOSE; force = 1; } - /* giveup the ioref of vnode, no longer need it */ - if (routefs_mp_p->route_rvp != NULLVP) { - if (vnode_getwithref(routefs_mp_p->route_rvp) == 0) { - vnode_rele(routefs_mp_p->route_rvp); - vnode_put(routefs_mp_p->route_rvp); - routefs_mp_p->route_rvp = NULLVP; - } - } - /* no vnodes, ignore any errors */ - (void)vflush(mp, NULLVP, flags); - FREE((caddr_t)routefs_mp_p, M_TEMP); + /* giveup the ioref of vnode, no longer need it */ + if (routefs_mp_p->route_rvp != NULLVP) { + if (vnode_getwithref(routefs_mp_p->route_rvp) == 0) { + vnode_rele(routefs_mp_p->route_rvp); + vnode_put(routefs_mp_p->route_rvp); + routefs_mp_p->route_rvp = NULLVP; + } + } + /* no vnodes, ignore any errors */ + (void)vflush(mp, NULLVP, flags); + FREE(routefs_mp_p, M_TEMP); mp->mnt_data = (qaddr_t)0; mp->mnt_flag &= ~MNT_LOCAL; - _fs_alreadyMounted = FALSE; /* unmounted the fs, only one allowed at a time */ + _fs_alreadyMounted = FALSE; /* unmounted the fs, only one allowed at a time */ return 0; } @@ -227,38 +228,40 @@ static int routefs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t ctx) { struct routefs_mount *routefs_mp_p = (struct routefs_mount *)(mp->mnt_data); - int error=0; - - /* check for nullvp incase its being rolled */ - if (routefs_mp_p->route_rvp == NULLVP) { - ROUTEFS_LOCK(); - if (routefs_mp_p->route_rvp == NULLVP) { - ROUTEFS_UNLOCK(); - error = EACCES; - goto out; - } - ROUTEFS_UNLOCK(); - } - if (vnode_getwithvid(routefs_mp_p->route_rvp, routefs_mp_p->route_vpvid) != 0) { - /* only one in the path., since no vnodes with this, you can hold across this call */ - ROUTEFS_LOCK(); - if (vnode_getwithref(routefs_mp_p->route_rvp) == 0) { - vnode_rele(routefs_mp_p->route_rvp); - vnode_put(routefs_mp_p->route_rvp); - routefs_mp_p->route_rvp = NULLVP; - routefs_mp_p->route_vpvid = -1; - error = vnode_lookup(routefs_mp_p->route_path, FREAD|O_DIRECTORY, &routefs_mp_p->route_rvp, ctx); - if (error == 0) - routefs_mp_p->route_vpvid = vnode_vid(routefs_mp_p->route_rvp); - } else { - error = EACCES; - } - ROUTEFS_UNLOCK(); - - if (error != 0) - goto out; - } - *vpp = routefs_mp_p->route_rvp; + int error = 0; + + /* check for nullvp incase its being rolled */ + if (routefs_mp_p->route_rvp == NULLVP) { + ROUTEFS_LOCK(); + if (routefs_mp_p->route_rvp == NULLVP) { + ROUTEFS_UNLOCK(); + error = EACCES; + goto out; + } + ROUTEFS_UNLOCK(); + } + if (vnode_getwithvid(routefs_mp_p->route_rvp, routefs_mp_p->route_vpvid) != 0) { + /* only one in the path., since no vnodes with this, you can hold across this call */ + ROUTEFS_LOCK(); + if (vnode_getwithref(routefs_mp_p->route_rvp) == 0) { + vnode_rele(routefs_mp_p->route_rvp); + vnode_put(routefs_mp_p->route_rvp); + routefs_mp_p->route_rvp = NULLVP; + routefs_mp_p->route_vpvid = -1; + error = vnode_lookup(routefs_mp_p->route_path, FREAD | O_DIRECTORY, &routefs_mp_p->route_rvp, ctx); + if (error == 0) { + routefs_mp_p->route_vpvid = vnode_vid(routefs_mp_p->route_rvp); + } + } else { + error = EACCES; + } + ROUTEFS_UNLOCK(); + + if (error != 0) { + goto out; + } + } + *vpp = routefs_mp_p->route_rvp; out: return error; } @@ -272,10 +275,10 @@ routefs_statfs( struct mount *mp, struct vfsstatfs *sbp, __unused vfs_context_t * Fill in the stat block. */ //sbp->f_type = mp->mnt_vfsstat.f_type; - sbp->f_flags = 0; /* XXX */ + sbp->f_flags = 0; /* XXX */ sbp->f_bsize = 512; sbp->f_iosize = 512; - sbp->f_blocks = (sizeof(struct routefs_mount)+ sbp->f_bsize) / sbp->f_bsize; + sbp->f_blocks = (sizeof(struct routefs_mount) + sbp->f_bsize) / sbp->f_bsize; sbp->f_bfree = 0; sbp->f_bavail = 0; sbp->f_files = 0; @@ -294,7 +297,7 @@ routefs_vfs_getattr(__unused mount_t mp, struct vfs_attr *fsap, __unused vfs_con VFSATTR_RETURN(fsap, f_bsize, 512); VFSATTR_RETURN(fsap, f_iosize, 512); if (VFSATTR_IS_ACTIVE(fsap, f_blocks) || VFSATTR_IS_ACTIVE(fsap, f_bused)) { - fsap->f_blocks = (sizeof(struct routefs_mount)+ fsap->f_bsize) / fsap->f_bsize; + fsap->f_blocks = (sizeof(struct routefs_mount) + fsap->f_bsize) / fsap->f_bsize; fsap->f_bused = fsap->f_blocks; VFSATTR_SET_SUPPORTED(fsap, f_blocks); VFSATTR_SET_SUPPORTED(fsap, f_bused); @@ -304,116 +307,116 @@ routefs_vfs_getattr(__unused mount_t mp, struct vfs_attr *fsap, __unused vfs_con VFSATTR_RETURN(fsap, f_files, 0); VFSATTR_RETURN(fsap, f_ffree, 0); VFSATTR_RETURN(fsap, f_fssubtype, 0); - + if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) { fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_SYMBOLICLINKS | - VOL_CAP_FMT_HARDLINKS | - VOL_CAP_FMT_NO_ROOT_TIMES | - VOL_CAP_FMT_CASE_SENSITIVE | - VOL_CAP_FMT_CASE_PRESERVING | - VOL_CAP_FMT_FAST_STATFS | - VOL_CAP_FMT_2TB_FILESIZE | - VOL_CAP_FMT_HIDDEN_FILES; + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS | + VOL_CAP_FMT_2TB_FILESIZE | + VOL_CAP_FMT_HIDDEN_FILES; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_ATTRLIST ; + VOL_CAP_INT_ATTRLIST; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0; - + fsap->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_PERSISTENTOBJECTIDS | - VOL_CAP_FMT_SYMBOLICLINKS | - VOL_CAP_FMT_HARDLINKS | - VOL_CAP_FMT_JOURNAL | - VOL_CAP_FMT_JOURNAL_ACTIVE | - VOL_CAP_FMT_NO_ROOT_TIMES | - VOL_CAP_FMT_SPARSE_FILES | - VOL_CAP_FMT_ZERO_RUNS | - VOL_CAP_FMT_CASE_SENSITIVE | - VOL_CAP_FMT_CASE_PRESERVING | - VOL_CAP_FMT_FAST_STATFS | - VOL_CAP_FMT_2TB_FILESIZE | - VOL_CAP_FMT_OPENDENYMODES | - VOL_CAP_FMT_HIDDEN_FILES | - VOL_CAP_FMT_PATH_FROM_ID | - VOL_CAP_FMT_NO_VOLUME_SIZES; + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + VOL_CAP_FMT_SYMBOLICLINKS | + VOL_CAP_FMT_HARDLINKS | + VOL_CAP_FMT_JOURNAL | + VOL_CAP_FMT_JOURNAL_ACTIVE | + VOL_CAP_FMT_NO_ROOT_TIMES | + VOL_CAP_FMT_SPARSE_FILES | + VOL_CAP_FMT_ZERO_RUNS | + VOL_CAP_FMT_CASE_SENSITIVE | + VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS | + VOL_CAP_FMT_2TB_FILESIZE | + VOL_CAP_FMT_OPENDENYMODES | + VOL_CAP_FMT_HIDDEN_FILES | + VOL_CAP_FMT_PATH_FROM_ID | + VOL_CAP_FMT_NO_VOLUME_SIZES; fsap->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_SEARCHFS | - VOL_CAP_INT_ATTRLIST | - VOL_CAP_INT_NFSEXPORT | - VOL_CAP_INT_READDIRATTR | - VOL_CAP_INT_EXCHANGEDATA | - VOL_CAP_INT_COPYFILE | - VOL_CAP_INT_ALLOCATE | - VOL_CAP_INT_VOL_RENAME | - VOL_CAP_INT_ADVLOCK | - VOL_CAP_INT_FLOCK | - VOL_CAP_INT_EXTENDED_SECURITY | - VOL_CAP_INT_USERACCESS | - VOL_CAP_INT_MANLOCK | - VOL_CAP_INT_EXTENDED_ATTR | - VOL_CAP_INT_NAMEDSTREAMS; + VOL_CAP_INT_SEARCHFS | + VOL_CAP_INT_ATTRLIST | + VOL_CAP_INT_NFSEXPORT | + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + VOL_CAP_INT_ADVLOCK | + VOL_CAP_INT_FLOCK | + VOL_CAP_INT_EXTENDED_SECURITY | + VOL_CAP_INT_USERACCESS | + VOL_CAP_INT_MANLOCK | + VOL_CAP_INT_EXTENDED_ATTR | + VOL_CAP_INT_NAMEDSTREAMS; fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0; fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0; - + VFSATTR_SET_SUPPORTED(fsap, f_capabilities); } - + if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) { fsap->f_attributes.validattr.commonattr = - ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | - ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | - ATTR_CMN_PAROBJID | - ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | - ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | - ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; + ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | + ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | + ATTR_CMN_PAROBJID | + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | + ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; fsap->f_attributes.validattr.volattr = - ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | - ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | - ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | - ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | - ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | - ATTR_VOL_ATTRIBUTES; + ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | + ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | + ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | + ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | + ATTR_VOL_ATTRIBUTES; fsap->f_attributes.validattr.dirattr = - ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS; + ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS; fsap->f_attributes.validattr.fileattr = - ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | - ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | - ATTR_FILE_DATALENGTH; + ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | + ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | + ATTR_FILE_DATALENGTH; fsap->f_attributes.validattr.forkattr = 0; - + fsap->f_attributes.nativeattr.commonattr = - ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | - ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | - ATTR_CMN_PAROBJID | - ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | - ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | - ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; + ATTR_CMN_NAME | ATTR_CMN_DEVID | ATTR_CMN_FSID | + ATTR_CMN_OBJTYPE | ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | + ATTR_CMN_PAROBJID | + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | + ATTR_CMN_OWNERID | ATTR_CMN_GRPID | ATTR_CMN_ACCESSMASK | + ATTR_CMN_FLAGS | ATTR_CMN_USERACCESS | ATTR_CMN_FILEID; fsap->f_attributes.nativeattr.volattr = - ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | - ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | - ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | - ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | - ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | - ATTR_VOL_ATTRIBUTES; + ATTR_VOL_FSTYPE | ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | + ATTR_VOL_OBJCOUNT | ATTR_VOL_MAXOBJCOUNT | + ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | + ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | + ATTR_VOL_ATTRIBUTES; fsap->f_attributes.nativeattr.dirattr = - ATTR_DIR_MOUNTSTATUS; + ATTR_DIR_MOUNTSTATUS; fsap->f_attributes.nativeattr.fileattr = - ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | - ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | - ATTR_FILE_DATALENGTH; + ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | + ATTR_FILE_IOBLOCKSIZE | ATTR_FILE_DEVTYPE | + ATTR_FILE_DATALENGTH; fsap->f_attributes.nativeattr.forkattr = 0; VFSATTR_SET_SUPPORTED(fsap, f_attributes); } - + return 0; } static int routefs_sync(__unused struct mount *mp, __unused int waitfor, __unused vfs_context_t ctx) { - return (0); + return 0; } @@ -424,24 +427,24 @@ routefs_vget(__unused struct mount *mp, __unused ino64_t ino, __unused struct vn } static int -routefs_fhtovp (__unused struct mount *mp, __unused int fhlen, __unused unsigned char *fhp, __unused struct vnode **vpp, __unused vfs_context_t ctx) +routefs_fhtovp(__unused struct mount *mp, __unused int fhlen, __unused unsigned char *fhp, __unused struct vnode **vpp, __unused vfs_context_t ctx) { - return (EINVAL); + return EINVAL; } static int -routefs_vptofh (__unused struct vnode *vp, __unused int *fhlenp, __unused unsigned char *fhp, __unused vfs_context_t ctx) +routefs_vptofh(__unused struct vnode *vp, __unused int *fhlenp, __unused unsigned char *fhp, __unused vfs_context_t ctx) { - return (EINVAL); + return EINVAL; } static int routefs_sysctl(__unused int *name, __unused u_int namelen, __unused user_addr_t oldp, - __unused size_t *oldlenp, __unused user_addr_t newp, - __unused size_t newlen, __unused vfs_context_t ctx) + __unused size_t *oldlenp, __unused user_addr_t newp, + __unused size_t newlen, __unused vfs_context_t ctx) { - return (ENOTSUP); + return ENOTSUP; } #include @@ -454,33 +457,34 @@ routefs_sysctl(__unused int *name, __unused u_int namelen, __unused user_addr_t int routefs_kernel_mount(char * routepath) { - int error = EINVAL; + int error = EINVAL; vfs_context_t ctx = vfs_context_kernel(); char fsname[] = "routefs"; - struct routefs_args args; - char mounthere[] = MOBILE_DIR_PATH; /* !const because of internal casting */ - - bzero(&args, sizeof(struct routefs_args)); - strlcpy(args.route_path, routepath, MAXPATHLEN); - error = vnode_lookup(args.route_path, FREAD|O_DIRECTORY, &args.route_rvp, ctx); - if (error) { - goto out; + struct routefs_args args; + char mounthere[] = MOBILE_DIR_PATH; /* !const because of internal casting */ + + bzero(&args, sizeof(struct routefs_args)); + strlcpy(args.route_path, routepath, MAXPATHLEN); + error = vnode_lookup(args.route_path, FREAD | O_DIRECTORY, &args.route_rvp, ctx); + if (error) { + goto out; } - if (!vnode_isdir(args.route_rvp)) { - error = EACCES; - goto out; - } + if (!vnode_isdir(args.route_rvp)) { + error = EACCES; + goto out; + } - error = kernel_mount(fsname, NULLVP, NULLVP, mounthere, &args, 0, MNT_DONTBROWSE, KERNEL_MOUNT_NOAUTH, ctx); + error = kernel_mount(fsname, NULLVP, NULLVP, mounthere, &args, 0, MNT_DONTBROWSE, KERNEL_MOUNT_NOAUTH, ctx); if (error) { goto out; } out: - if(args.route_rvp != NULLVP) - (void) vnode_put(args.route_rvp); - return (error); + if (args.route_rvp != NULLVP) { + (void) vnode_put(args.route_rvp); + } + return error; } struct vfsops routefs_vfsops = { @@ -498,65 +502,63 @@ struct vfsops routefs_vfsops = { // There are other VFS ops that we do not support }; -static int routefserr_lookup(__unused struct vnop_lookup_args * args) +static int +routefserr_lookup(__unused struct vnop_lookup_args * args) { - return (ENOTSUP); + return ENOTSUP; } -static int routefserr_setlabel(__unused struct vnop_setlabel_args * args) +static int +routefserr_setlabel(__unused struct vnop_setlabel_args * args) { - return (ENOTSUP); - + return ENOTSUP; } #define VOPFUNC int (*)(void *) /* The following ops are used by directories and symlinks */ -int (**routefs_vnodeop_p)(void *); +int(**routefs_vnodeop_p)(void *); static struct vnodeopv_entry_desc routefs_vnodeop_entries[] = { - { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)routefserr_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ - { &vnop_whiteout_desc, (VOPFUNC)err_whiteout }, /* whiteout */ - { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)err_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)err_close }, /* close */ - { &vnop_getattr_desc, (VOPFUNC)err_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)err_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)err_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)err_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)err_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)err_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)err_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)err_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)err_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ - { &vnop_bwrite_desc, (VOPFUNC)err_bwrite }, - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)err_blockmap }, /* blockmap */ + { &vnop_default_desc, (VOPFUNC)vn_default_error }, + { &vnop_lookup_desc, (VOPFUNC)routefserr_lookup }, /* lookup */ + { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ + { &vnop_whiteout_desc, (VOPFUNC)err_whiteout }, /* whiteout */ + { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ + { &vnop_open_desc, (VOPFUNC)err_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)err_close }, /* close */ + { &vnop_getattr_desc, (VOPFUNC)err_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)err_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)err_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)err_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)err_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)err_select }, /* select */ + { &vnop_revoke_desc, (VOPFUNC)err_revoke }, /* revoke */ + { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)nop_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ + { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ + { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ + { &vnop_inactive_desc, (VOPFUNC)err_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)err_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)err_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)err_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ + { &vnop_bwrite_desc, (VOPFUNC)err_bwrite }, + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)err_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (VOPFUNC)err_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (VOPFUNC)err_blockmap }, /* blockmap */ #if CONFIG_MACF - { &vnop_setlabel_desc, (VOPFUNC)routefserr_setlabel }, /* setlabel */ + { &vnop_setlabel_desc, (VOPFUNC)routefserr_setlabel }, /* setlabel */ #endif - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } + { (struct vnodeop_desc*)NULL, (int (*)(void *))NULL } }; struct vnodeopv_desc routefs_vnodeop_opv_desc = { &routefs_vnodeop_p, routefs_vnodeop_entries }; - - - diff --git a/bsd/miscfs/specfs/spec_vnops.c b/bsd/miscfs/specfs/spec_vnops.c index 702787a41..7b6f18d9c 100644 --- a/bsd/miscfs/specfs/spec_vnops.c +++ b/bsd/miscfs/specfs/spec_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -100,99 +100,99 @@ #include /* XXX following three prototypes should be in a header file somewhere */ -extern dev_t chrtoblk(dev_t dev); -extern boolean_t iskmemdev(dev_t dev); -extern int bpfkqfilter(dev_t dev, struct knote *kn); +extern dev_t chrtoblk(dev_t dev); +extern boolean_t iskmemdev(dev_t dev); +extern int bpfkqfilter(dev_t dev, struct knote *kn); extern int ptsd_kqfilter(dev_t, struct knote *); extern int ptmx_kqfilter(dev_t, struct knote *); struct vnode *speclisth[SPECHSZ]; /* symbolic sleep message strings for devices */ -char devopn[] = "devopn"; -char devio[] = "devio"; -char devwait[] = "devwait"; -char devin[] = "devin"; -char devout[] = "devout"; -char devioc[] = "devioc"; -char devcls[] = "devcls"; +char devopn[] = "devopn"; +char devio[] = "devio"; +char devwait[] = "devwait"; +char devin[] = "devin"; +char devout[] = "devout"; +char devioc[] = "devioc"; +char devcls[] = "devcls"; #define VOPFUNC int (*)(void *) -int (**spec_vnodeop_p)(void *); +int(**spec_vnodeop_p)(void *); struct vnodeopv_entry_desc spec_vnodeop_entries[] = { { &vnop_default_desc, (VOPFUNC)vn_default_error }, - { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ - { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ - { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ - { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */ - { &vnop_close_desc, (VOPFUNC)spec_close }, /* close */ - { &vnop_access_desc, (VOPFUNC)spec_access }, /* access */ - { &vnop_getattr_desc, (VOPFUNC)spec_getattr }, /* getattr */ - { &vnop_setattr_desc, (VOPFUNC)spec_setattr }, /* setattr */ - { &vnop_read_desc, (VOPFUNC)spec_read }, /* read */ - { &vnop_write_desc, (VOPFUNC)spec_write }, /* write */ - { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ - { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */ - { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ - { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ - { &vnop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ - { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ - { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ - { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ - { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ - { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ - { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ - { &vnop_inactive_desc, (VOPFUNC)nop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (VOPFUNC)nop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ - { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ - { &vnop_bwrite_desc, (VOPFUNC)spec_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ - { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (VOPFUNC)spec_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (VOPFUNC)spec_blockmap }, /* blockmap */ - { (struct vnodeop_desc*)NULL, (int(*)(void *))NULL } + { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */ + { &vnop_create_desc, (VOPFUNC)err_create }, /* create */ + { &vnop_mknod_desc, (VOPFUNC)err_mknod }, /* mknod */ + { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */ + { &vnop_close_desc, (VOPFUNC)spec_close }, /* close */ + { &vnop_access_desc, (VOPFUNC)spec_access }, /* access */ + { &vnop_getattr_desc, (VOPFUNC)spec_getattr }, /* getattr */ + { &vnop_setattr_desc, (VOPFUNC)spec_setattr }, /* setattr */ + { &vnop_read_desc, (VOPFUNC)spec_read }, /* read */ + { &vnop_write_desc, (VOPFUNC)spec_write }, /* write */ + { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */ + { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */ + { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */ + { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */ + { &vnop_fsync_desc, (VOPFUNC)spec_fsync }, /* fsync */ + { &vnop_remove_desc, (VOPFUNC)err_remove }, /* remove */ + { &vnop_link_desc, (VOPFUNC)err_link }, /* link */ + { &vnop_rename_desc, (VOPFUNC)err_rename }, /* rename */ + { &vnop_mkdir_desc, (VOPFUNC)err_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (VOPFUNC)err_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (VOPFUNC)err_symlink }, /* symlink */ + { &vnop_readdir_desc, (VOPFUNC)err_readdir }, /* readdir */ + { &vnop_readlink_desc, (VOPFUNC)err_readlink }, /* readlink */ + { &vnop_inactive_desc, (VOPFUNC)nop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (VOPFUNC)nop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */ + { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */ + { &vnop_bwrite_desc, (VOPFUNC)spec_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (VOPFUNC)err_pagein }, /* Pagein */ + { &vnop_pageout_desc, (VOPFUNC)err_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (VOPFUNC)spec_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (VOPFUNC)spec_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (VOPFUNC)spec_blockmap }, /* blockmap */ + { (struct vnodeop_desc*)NULL, (int (*)(void *))NULL } }; struct vnodeopv_desc spec_vnodeop_opv_desc = - { &spec_vnodeop_p, spec_vnodeop_entries }; +{ &spec_vnodeop_p, spec_vnodeop_entries }; static void set_blocksize(vnode_t, dev_t); -#define LOWPRI_TIER1_WINDOW_MSECS 25 -#define LOWPRI_TIER2_WINDOW_MSECS 100 -#define LOWPRI_TIER3_WINDOW_MSECS 500 +#define LOWPRI_TIER1_WINDOW_MSECS 25 +#define LOWPRI_TIER2_WINDOW_MSECS 100 +#define LOWPRI_TIER3_WINDOW_MSECS 500 -#define LOWPRI_TIER1_IO_PERIOD_MSECS 40 -#define LOWPRI_TIER2_IO_PERIOD_MSECS 85 -#define LOWPRI_TIER3_IO_PERIOD_MSECS 200 +#define LOWPRI_TIER1_IO_PERIOD_MSECS 40 +#define LOWPRI_TIER2_IO_PERIOD_MSECS 85 +#define LOWPRI_TIER3_IO_PERIOD_MSECS 200 #define LOWPRI_TIER1_IO_PERIOD_SSD_MSECS 5 #define LOWPRI_TIER2_IO_PERIOD_SSD_MSECS 15 #define LOWPRI_TIER3_IO_PERIOD_SSD_MSECS 25 -int throttle_windows_msecs[THROTTLE_LEVEL_END + 1] = { +int throttle_windows_msecs[THROTTLE_LEVEL_END + 1] = { 0, LOWPRI_TIER1_WINDOW_MSECS, LOWPRI_TIER2_WINDOW_MSECS, LOWPRI_TIER3_WINDOW_MSECS, }; -int throttle_io_period_msecs[THROTTLE_LEVEL_END + 1] = { +int throttle_io_period_msecs[THROTTLE_LEVEL_END + 1] = { 0, LOWPRI_TIER1_IO_PERIOD_MSECS, LOWPRI_TIER2_IO_PERIOD_MSECS, LOWPRI_TIER3_IO_PERIOD_MSECS, }; -int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = { +int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = { 0, LOWPRI_TIER1_IO_PERIOD_SSD_MSECS, LOWPRI_TIER2_IO_PERIOD_SSD_MSECS, @@ -200,29 +200,29 @@ int throttle_io_period_ssd_msecs[THROTTLE_LEVEL_END + 1] = { }; -int throttled_count[THROTTLE_LEVEL_END + 1]; +int throttled_count[THROTTLE_LEVEL_END + 1]; struct _throttle_io_info_t { - lck_mtx_t throttle_lock; - - struct timeval throttle_last_write_timestamp; - struct timeval throttle_min_timer_deadline; - struct timeval throttle_window_start_timestamp[THROTTLE_LEVEL_END + 1]; /* window starts at both the beginning and completion of an I/O */ - struct timeval throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1]; - pid_t throttle_last_IO_pid[THROTTLE_LEVEL_END + 1]; - struct timeval throttle_start_IO_period_timestamp[THROTTLE_LEVEL_END + 1]; + lck_mtx_t throttle_lock; + + struct timeval throttle_last_write_timestamp; + struct timeval throttle_min_timer_deadline; + struct timeval throttle_window_start_timestamp[THROTTLE_LEVEL_END + 1]; /* window starts at both the beginning and completion of an I/O */ + struct timeval throttle_last_IO_timestamp[THROTTLE_LEVEL_END + 1]; + pid_t throttle_last_IO_pid[THROTTLE_LEVEL_END + 1]; + struct timeval throttle_start_IO_period_timestamp[THROTTLE_LEVEL_END + 1]; int32_t throttle_inflight_count[THROTTLE_LEVEL_END + 1]; - TAILQ_HEAD( , uthread) throttle_uthlist[THROTTLE_LEVEL_END + 1]; /* Lists of throttled uthreads */ - int throttle_next_wake_level; + TAILQ_HEAD(, uthread) throttle_uthlist[THROTTLE_LEVEL_END + 1]; /* Lists of throttled uthreads */ + int throttle_next_wake_level; - thread_call_t throttle_timer_call; - int32_t throttle_timer_ref; - int32_t throttle_timer_active; + thread_call_t throttle_timer_call; + int32_t throttle_timer_ref; + int32_t throttle_timer_active; - int32_t throttle_io_count; - int32_t throttle_io_count_begin; - int *throttle_io_periods; + int32_t throttle_io_count; + int32_t throttle_io_count_begin; + int *throttle_io_periods; uint32_t throttle_io_period_num; int32_t throttle_refcnt; @@ -234,7 +234,7 @@ struct _throttle_io_info_t { struct _throttle_io_info_t _throttle_io_info[LOWPRI_MAX_NUM_DEV]; -int lowpri_throttle_enabled = 1; +int lowpri_throttle_enabled = 1; static void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level); @@ -249,44 +249,43 @@ void throttle_info_mount_reset_period(mount_t mp, int isssd); int spec_lookup(struct vnop_lookup_args *ap) { - *ap->a_vpp = NULL; - return (ENOTDIR); + return ENOTDIR; } static void set_blocksize(struct vnode *vp, dev_t dev) { - int (*size)(dev_t); - int rsize; + int (*size)(dev_t); + int rsize; - if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) { - rsize = (*size)(dev); - if (rsize <= 0) /* did size fail? */ - vp->v_specsize = DEV_BSIZE; - else - vp->v_specsize = rsize; - } - else - vp->v_specsize = DEV_BSIZE; + if ((major(dev) < nblkdev) && (size = bdevsw[major(dev)].d_psize)) { + rsize = (*size)(dev); + if (rsize <= 0) { /* did size fail? */ + vp->v_specsize = DEV_BSIZE; + } else { + vp->v_specsize = rsize; + } + } else { + vp->v_specsize = DEV_BSIZE; + } } void set_fsblocksize(struct vnode *vp) { - if (vp->v_type == VBLK) { dev_t dev = (dev_t)vp->v_rdev; int maj = major(dev); - if ((u_int)maj >= (u_int)nblkdev) + if ((u_int)maj >= (u_int)nblkdev) { return; + } vnode_lock(vp); set_blocksize(vp, dev); vnode_unlock(vp); } - } @@ -306,33 +305,37 @@ spec_open(struct vnop_open_args *ap) /* * Don't allow open if fs is mounted -nodev. */ - if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) - return (ENXIO); + if (vp->v_mount && (vp->v_mount->mnt_flag & MNT_NODEV)) { + return ENXIO; + } switch (vp->v_type) { - case VCHR: - if ((u_int)maj >= (u_int)nchrdev) - return (ENXIO); + if ((u_int)maj >= (u_int)nchrdev) { + return ENXIO; + } if (cred != FSCRED && (ap->a_mode & FWRITE)) { /* * When running in very secure mode, do not allow * opens for writing of any disk character devices. */ - if (securelevel >= 2 && isdisk(dev, VCHR)) - return (EPERM); + if (securelevel >= 2 && isdisk(dev, VCHR)) { + return EPERM; + } /* Never allow writing to /dev/mem or /dev/kmem */ - if (iskmemdev(dev)) - return (EPERM); + if (iskmemdev(dev)) { + return EPERM; + } /* * When running in secure mode, do not allow opens for * writing of character devices whose corresponding block * devices are currently mounted. */ if (securelevel >= 1) { - if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error)) - return (error); + if ((bdev = chrtoblk(dev)) != NODEV && check_mountedon(bdev, VBLK, &error)) { + return error; + } } } @@ -346,12 +349,11 @@ spec_open(struct vnop_open_args *ap) devsw_unlock(dev, S_IFCHR); if (error == 0 && cdevsw[maj].d_type == D_DISK && !vp->v_un.vu_specinfo->si_initted) { - int isssd = 0; + int isssd = 0; uint64_t throttle_mask = 0; uint32_t devbsdunit = 0; if (VNOP_IOCTL(vp, DKIOCGETTHROTTLEMASK, (caddr_t)&throttle_mask, 0, NULL) == 0) { - if (throttle_mask != 0 && VNOP_IOCTL(vp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ap->a_context) == 0) { /* @@ -361,7 +363,7 @@ spec_open(struct vnop_open_args *ap) devbsdunit = num_trailing_0(throttle_mask); vnode_lock(vp); - + vp->v_un.vu_specinfo->si_isssd = isssd; vp->v_un.vu_specinfo->si_devbsdunit = devbsdunit; vp->v_un.vu_specinfo->si_throttle_mask = throttle_mask; @@ -377,24 +379,27 @@ spec_open(struct vnop_open_args *ap) vnode_unlock(vp); } } - return (error); + return error; case VBLK: - if ((u_int)maj >= (u_int)nblkdev) - return (ENXIO); + if ((u_int)maj >= (u_int)nblkdev) { + return ENXIO; + } /* * When running in very secure mode, do not allow * opens for writing of any disk block devices. */ if (securelevel >= 2 && cred != FSCRED && - (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) - return (EPERM); + (ap->a_mode & FWRITE) && bdevsw[maj].d_type == D_DISK) { + return EPERM; + } /* * Do not allow opens of block devices that are * currently mounted. */ - if ( (error = vfs_mountedon(vp)) ) - return (error); + if ((error = vfs_mountedon(vp))) { + return error; + } devsw_lock(dev, S_IFBLK); error = (*bdevsw[maj].d_open)(dev, ap->a_mode, S_IFBLK, p); @@ -404,47 +409,47 @@ spec_open(struct vnop_open_args *ap) devsw_unlock(dev, S_IFBLK); if (!error) { - u_int64_t blkcnt; - u_int32_t blksize; + u_int64_t blkcnt; + u_int32_t blksize; int setsize = 0; u_int32_t size512 = 512; - if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) { + if (!VNOP_IOCTL(vp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, ap->a_context)) { /* Switch to 512 byte sectors (temporarily) */ if (!VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, ap->a_context)) { - /* Get the number of 512 byte physical blocks. */ - if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) { + /* Get the number of 512 byte physical blocks. */ + if (!VNOP_IOCTL(vp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, ap->a_context)) { setsize = 1; - } + } } /* If it doesn't set back, we can't recover */ - if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context)) - error = ENXIO; - } + if (VNOP_IOCTL(vp, DKIOCSETBLOCKSIZE, (caddr_t)&blksize, FWRITE, ap->a_context)) { + error = ENXIO; + } + } vnode_lock(vp); - set_blocksize(vp, dev); + set_blocksize(vp, dev); - /* - * Cache the size in bytes of the block device for later - * use by spec_write(). - */ - if (setsize) + /* + * Cache the size in bytes of the block device for later + * use by spec_write(). + */ + if (setsize) { vp->v_specdevsize = blkcnt * (u_int64_t)size512; - else - vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */ - + } else { + vp->v_specdevsize = (u_int64_t)0; /* Default: Can't get */ + } vnode_unlock(vp); - } - return(error); + return error; default: - panic("spec_open type"); + panic("spec_open type"); } - return (0); + return 0; } /* @@ -458,65 +463,70 @@ spec_read(struct vnop_read_args *ap) struct buf *bp; daddr64_t bn, nextbn; long bsize, bscale; - int devBlockSize=0; + int devBlockSize = 0; int n, on; int error = 0; dev_t dev; #if DIAGNOSTIC - if (uio->uio_rw != UIO_READ) + if (uio->uio_rw != UIO_READ) { panic("spec_read mode"); - if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) + } + if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { panic("spec_read proc"); + } #endif - if (uio_resid(uio) == 0) - return (0); + if (uio_resid(uio) == 0) { + return 0; + } switch (vp->v_type) { - case VCHR: - { - struct _throttle_io_info_t *throttle_info = NULL; - int thread_throttle_level; - if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) { + { + struct _throttle_io_info_t *throttle_info = NULL; + int thread_throttle_level; + if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) { throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit]; - thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); - } + thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); + } error = (*cdevsw[major(vp->v_rdev)].d_read) - (vp->v_rdev, uio, ap->a_ioflag); + (vp->v_rdev, uio, ap->a_ioflag); - if (throttle_info) { - throttle_info_end_io_internal(throttle_info, thread_throttle_level); - } - - return (error); + if (throttle_info) { + throttle_info_end_io_internal(throttle_info, thread_throttle_level); } + return error; + } + case VBLK: - if (uio->uio_offset < 0) - return (EINVAL); + if (uio->uio_offset < 0) { + return EINVAL; + } dev = vp->v_rdev; devBlockSize = vp->v_specsize; - if (devBlockSize > PAGE_SIZE) - return (EINVAL); + if (devBlockSize > PAGE_SIZE) { + return EINVAL; + } - bscale = PAGE_SIZE / devBlockSize; + bscale = PAGE_SIZE / devBlockSize; bsize = bscale * devBlockSize; do { on = uio->uio_offset % bsize; - bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ (bscale - 1)); - + bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~(bscale - 1)); + if (vp->v_speclastr + bscale == bn) { - nextbn = bn + bscale; + nextbn = bn + bscale; error = buf_breadn(vp, bn, (int)bsize, &nextbn, - (int *)&bsize, 1, NOCRED, &bp); - } else - error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp); + (int *)&bsize, 1, NOCRED, &bp); + } else { + error = buf_bread(vp, bn, (int)bsize, NOCRED, &bp); + } vnode_lock(vp); vp->v_speclastr = bn; @@ -524,26 +534,28 @@ spec_read(struct vnop_read_args *ap) n = bsize - buf_resid(bp); if ((on > n) || error) { - if (!error) - error = EINVAL; + if (!error) { + error = EINVAL; + } buf_brelse(bp); - return (error); + return error; } n = min((unsigned)(n - on), uio_resid(uio)); error = uiomove((char *)buf_dataptr(bp) + on, n, uio); - if (n + on == bsize) + if (n + on == bsize) { buf_markaged(bp); + } buf_brelse(bp); } while (error == 0 && uio_resid(uio) > 0 && n != 0); - return (error); + return error; default: panic("spec_read type"); } /* NOTREACHED */ - return (0); + return 0; } /* @@ -558,62 +570,66 @@ spec_write(struct vnop_write_args *ap) daddr64_t bn; int bsize, blkmask, bscale; int io_sync; - int devBlockSize=0; + int devBlockSize = 0; int n, on; int error = 0; dev_t dev; #if DIAGNOSTIC - if (uio->uio_rw != UIO_WRITE) + if (uio->uio_rw != UIO_WRITE) { panic("spec_write mode"); - if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) + } + if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { panic("spec_write proc"); + } #endif switch (vp->v_type) { - case VCHR: - { - struct _throttle_io_info_t *throttle_info = NULL; - int thread_throttle_level; - if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) { + { + struct _throttle_io_info_t *throttle_info = NULL; + int thread_throttle_level; + if (cdevsw[major(vp->v_rdev)].d_type == D_DISK && vp->v_un.vu_specinfo->si_throttleable) { throttle_info = &_throttle_io_info[vp->v_un.vu_specinfo->si_devbsdunit]; - thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); + thread_throttle_level = throttle_info_update_internal(throttle_info, NULL, 0, vp->v_un.vu_specinfo->si_isssd, TRUE, NULL); microuptime(&throttle_info->throttle_last_write_timestamp); - } + } error = (*cdevsw[major(vp->v_rdev)].d_write) - (vp->v_rdev, uio, ap->a_ioflag); - - if (throttle_info) { - throttle_info_end_io_internal(throttle_info, thread_throttle_level); - } + (vp->v_rdev, uio, ap->a_ioflag); - return (error); + if (throttle_info) { + throttle_info_end_io_internal(throttle_info, thread_throttle_level); } + return error; + } + case VBLK: - if (uio_resid(uio) == 0) - return (0); - if (uio->uio_offset < 0) - return (EINVAL); + if (uio_resid(uio) == 0) { + return 0; + } + if (uio->uio_offset < 0) { + return EINVAL; + } io_sync = (ap->a_ioflag & IO_SYNC); dev = (vp->v_rdev); devBlockSize = vp->v_specsize; - if (devBlockSize > PAGE_SIZE) - return(EINVAL); + if (devBlockSize > PAGE_SIZE) { + return EINVAL; + } - bscale = PAGE_SIZE / devBlockSize; + bscale = PAGE_SIZE / devBlockSize; blkmask = bscale - 1; bsize = bscale * devBlockSize; - + do { - bn = (daddr64_t)((uio->uio_offset / devBlockSize) &~ blkmask); + bn = (daddr64_t)((uio->uio_offset / devBlockSize) & ~blkmask); on = uio->uio_offset % bsize; n = min((unsigned)(bsize - on), uio_resid(uio)); @@ -631,48 +647,51 @@ spec_write(struct vnop_write_args *ap) if (n == bsize && vp->v_specdevsize != (u_int64_t)0 && (uio->uio_offset + (u_int64_t)n) > vp->v_specdevsize) { - /* reduce the size of the read to what is there */ - n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize; + /* reduce the size of the read to what is there */ + n = (uio->uio_offset + (u_int64_t)n) - vp->v_specdevsize; } - if (n == bsize) - bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE); - else - error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp); + if (n == bsize) { + bp = buf_getblk(vp, bn, bsize, 0, 0, BLK_WRITE); + } else { + error = (int)buf_bread(vp, bn, bsize, NOCRED, &bp); + } /* Translate downstream error for upstream, if needed */ - if (!error) + if (!error) { error = (int)buf_error(bp); + } if (error) { buf_brelse(bp); - return (error); + return error; } n = min(n, bsize - buf_resid(bp)); error = uiomove((char *)buf_dataptr(bp) + on, n, uio); if (error) { buf_brelse(bp); - return (error); + return error; } buf_markaged(bp); - if (io_sync) - error = buf_bwrite(bp); - else { - if ((n + on) == bsize) - error = buf_bawrite(bp); - else - error = buf_bdwrite(bp); + if (io_sync) { + error = buf_bwrite(bp); + } else { + if ((n + on) == bsize) { + error = buf_bawrite(bp); + } else { + error = buf_bdwrite(bp); + } } } while (error == 0 && uio_resid(uio) > 0 && n != 0); - return (error); + return error; default: panic("spec_write type"); } /* NOTREACHED */ - return (0); + return 0; } /* @@ -683,22 +702,22 @@ spec_ioctl(struct vnop_ioctl_args *ap) { proc_t p = vfs_context_proc(ap->a_context); dev_t dev = ap->a_vp->v_rdev; - int retval = 0; + int retval = 0; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_START, - dev, ap->a_command, ap->a_fflag, ap->a_vp->v_type, 0); + dev, ap->a_command, ap->a_fflag, ap->a_vp->v_type, 0); switch (ap->a_vp->v_type) { - case VCHR: retval = (*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, - ap->a_fflag, p); + ap->a_fflag, p); break; case VBLK: retval = (*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p); - if (!retval && ap->a_command == DKIOCSETBLOCKSIZE) + if (!retval && ap->a_command == DKIOCSETBLOCKSIZE) { ap->a_vp->v_specsize = *(uint32_t *)ap->a_data; + } break; default: @@ -706,9 +725,9 @@ spec_ioctl(struct vnop_ioctl_args *ap) /* NOTREACHED */ } KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_END, - dev, ap->a_command, ap->a_fflag, retval, 0); + dev, ap->a_command, ap->a_fflag, retval, 0); - return (retval); + return retval; } int @@ -718,9 +737,8 @@ spec_select(struct vnop_select_args *ap) dev_t dev; switch (ap->a_vp->v_type) { - default: - return (1); /* XXX */ + return 1; /* XXX */ case VCHR: dev = ap->a_vp->v_rdev; @@ -792,14 +810,15 @@ spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev) int spec_fsync_internal(vnode_t vp, int waitfor, __unused vfs_context_t context) { - if (vp->v_type == VCHR) - return (0); + if (vp->v_type == VCHR) { + return 0; + } /* * Flush all dirty buffers associated with a block device. */ buf_flushdirtyblks(vp, (waitfor == MNT_WAIT || waitfor == MNT_DWAIT), 0, "spec_fsync"); - return (0); + return 0; } int @@ -815,14 +834,14 @@ spec_fsync(struct vnop_fsync_args *ap) void throttle_init(void); -#if 0 -#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) \ - do { \ - if ((debug_info)->alloc) \ - printf("%s: "format, __FUNCTION__, ## args); \ +#if 0 +#define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) \ + do { \ + if ((debug_info)->alloc) \ + printf("%s: "format, __FUNCTION__, ## args); \ } while(0) -#else +#else #define DEBUG_ALLOC_THROTTLE_INFO(format, debug_info, args...) #endif @@ -858,8 +877,9 @@ num_trailing_0(uint64_t n) * since in most cases the number of trailing 0s is very small, * we simply counting sequentially from the lowest bit */ - if (n == 0) + if (n == 0) { return sizeof(n) * 8; + } int count = 0; while (!ISSET(n, 1)) { n >>= 1; @@ -880,22 +900,23 @@ throttle_info_rel(struct _throttle_io_info_t *info) { SInt32 oldValue = OSDecrementAtomic(&info->throttle_refcnt); - DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", - info, (int)(oldValue -1), info ); + DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", + info, (int)(oldValue - 1), info ); /* The reference count just went negative, very bad */ - if (oldValue == 0) + if (oldValue == 0) { panic("throttle info ref cnt went negative!"); + } - /* - * Once reference count is zero, no one else should be able to take a - * reference + /* + * Once reference count is zero, no one else should be able to take a + * reference */ if ((info->throttle_refcnt == 0) && (info->throttle_alloc)) { DEBUG_ALLOC_THROTTLE_INFO("Freeing info = %p\n", info); - + lck_mtx_destroy(&info->throttle_lock, throttle_lock_grp); - FREE(info, M_TEMP); + FREE(info, M_TEMP); } return oldValue; } @@ -911,11 +932,12 @@ throttle_info_ref(struct _throttle_io_info_t *info) { SInt32 oldValue = OSIncrementAtomic(&info->throttle_refcnt); - DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", - info, (int)(oldValue -1), info ); + DEBUG_ALLOC_THROTTLE_INFO("refcnt = %d info = %p\n", + info, (int)(oldValue - 1), info ); /* Allocated items should never have a reference of zero */ - if (info->throttle_alloc && (oldValue == 0)) + if (info->throttle_alloc && (oldValue == 0)) { panic("Taking a reference without calling create throttle info!\n"); + } return oldValue; } @@ -932,16 +954,16 @@ throttle_info_ref(struct _throttle_io_info_t *info) */ static uint32_t throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count, int wakelevel) -{ +{ struct timeval elapsed; struct timeval now; struct timeval period; - uint64_t elapsed_msecs; - int throttle_level; - int level; - int msecs; - boolean_t throttled = FALSE; - boolean_t need_timer = FALSE; + uint64_t elapsed_msecs; + int throttle_level; + int level; + int msecs; + boolean_t throttled = FALSE; + boolean_t need_timer = FALSE; microuptime(&now); @@ -949,8 +971,9 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count info->throttle_io_count_begin = info->throttle_io_count; info->throttle_io_period_num++; - while (wakelevel >= THROTTLE_LEVEL_THROTTLED) + while (wakelevel >= THROTTLE_LEVEL_THROTTLED) { info->throttle_start_IO_period_timestamp[wakelevel--] = now; + } info->throttle_min_timer_deadline = now; @@ -961,15 +984,12 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count timevaladd(&info->throttle_min_timer_deadline, &period); } for (throttle_level = THROTTLE_LEVEL_START; throttle_level < THROTTLE_LEVEL_END; throttle_level++) { - elapsed = now; timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) { - if (!TAILQ_EMPTY(&info->throttle_uthlist[level])) { - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[throttle_level]) { /* * we had an I/O occur at a higher priority tier within @@ -987,23 +1007,24 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count break; } } - if (throttled == TRUE) + if (throttled == TRUE) { break; + } } if (throttled == TRUE) { - uint64_t deadline = 0; + uint64_t deadline = 0; struct timeval target; struct timeval min_target; - /* + /* * we've got at least one tier still in a throttled window * so we need a timer running... compute the next deadline * and schedule it */ - for (level = throttle_level+1; level <= THROTTLE_LEVEL_END; level++) { - - if (TAILQ_EMPTY(&info->throttle_uthlist[level])) + for (level = throttle_level + 1; level <= THROTTLE_LEVEL_END; level++) { + if (TAILQ_EMPTY(&info->throttle_uthlist[level])) { continue; + } target = info->throttle_start_IO_period_timestamp[level]; @@ -1012,15 +1033,16 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count period.tv_usec = (msecs % 1000) * 1000; timevaladd(&target, &period); - + if (need_timer == FALSE || timevalcmp(&target, &min_target, <)) { min_target = target; need_timer = TRUE; } } if (timevalcmp(&info->throttle_min_timer_deadline, &now, >)) { - if (timevalcmp(&info->throttle_min_timer_deadline, &min_target, >)) - min_target = info->throttle_min_timer_deadline; + if (timevalcmp(&info->throttle_min_timer_deadline, &min_target, >)) { + min_target = info->throttle_min_timer_deadline; + } } if (info->throttle_timer_active) { @@ -1032,8 +1054,9 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count * proceed and eventually re-run this function */ need_timer = FALSE; - } else + } else { info->throttle_timer_active = 0; + } } if (need_timer == TRUE) { /* @@ -1043,7 +1066,7 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count * 32-bit which allows us to use the clock_interval_to_deadline() * routine. */ - int target_msecs; + int target_msecs; if (info->throttle_timer_ref == 0) { /* @@ -1071,7 +1094,7 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count info->throttle_timer_active = 1; } } - return (throttle_level); + return throttle_level; } @@ -1079,19 +1102,19 @@ static void throttle_timer(struct _throttle_io_info_t *info) { uthread_t ut, utlist; - struct timeval elapsed; - struct timeval now; - uint64_t elapsed_msecs; - int throttle_level; - int level; - int wake_level; - caddr_t wake_address = NULL; - boolean_t update_io_count = FALSE; - boolean_t need_wakeup = FALSE; - boolean_t need_release = FALSE; + struct timeval elapsed; + struct timeval now; + uint64_t elapsed_msecs; + int throttle_level; + int level; + int wake_level; + caddr_t wake_address = NULL; + boolean_t update_io_count = FALSE; + boolean_t need_wakeup = FALSE; + boolean_t need_release = FALSE; ut = NULL; - lck_mtx_lock(&info->throttle_lock); + lck_mtx_lock(&info->throttle_lock); info->throttle_timer_active = 0; microuptime(&now); @@ -1101,11 +1124,9 @@ throttle_timer(struct _throttle_io_info_t *info) elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); if (elapsed_msecs >= (uint64_t)info->throttle_io_periods[THROTTLE_LEVEL_THROTTLED]) { - wake_level = info->throttle_next_wake_level; for (level = THROTTLE_LEVEL_START; level < THROTTLE_LEVEL_END; level++) { - elapsed = now; timevalsub(&elapsed, &info->throttle_start_IO_period_timestamp[wake_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); @@ -1121,20 +1142,21 @@ throttle_timer(struct _throttle_io_info_t *info) info->throttle_next_wake_level = wake_level - 1; - if (info->throttle_next_wake_level == THROTTLE_LEVEL_START) + if (info->throttle_next_wake_level == THROTTLE_LEVEL_START) { info->throttle_next_wake_level = THROTTLE_LEVEL_END; + } break; } wake_level--; - if (wake_level == THROTTLE_LEVEL_START) + if (wake_level == THROTTLE_LEVEL_START) { wake_level = THROTTLE_LEVEL_END; + } } } if (need_wakeup == TRUE) { if (!TAILQ_EMPTY(&info->throttle_uthlist[wake_level])) { - ut = (uthread_t)TAILQ_FIRST(&info->throttle_uthlist[wake_level]); TAILQ_REMOVE(&info->throttle_uthlist[wake_level], ut, uu_throttlelist); ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE; @@ -1142,18 +1164,18 @@ throttle_timer(struct _throttle_io_info_t *info) wake_address = (caddr_t)&ut->uu_on_throttlelist; } - } else + } else { wake_level = THROTTLE_LEVEL_START; + } - throttle_level = throttle_timer_start(info, update_io_count, wake_level); + throttle_level = throttle_timer_start(info, update_io_count, wake_level); - if (wake_address != NULL) + if (wake_address != NULL) { wakeup(wake_address); + } for (level = THROTTLE_LEVEL_THROTTLED; level <= throttle_level; level++) { - TAILQ_FOREACH_SAFE(ut, &info->throttle_uthlist[level], uu_throttlelist, utlist) { - TAILQ_REMOVE(&info->throttle_uthlist[level], ut, uu_throttlelist); ut->uu_on_throttlelist = THROTTLE_LEVEL_NONE; ut->uu_is_throttled = false; @@ -1165,10 +1187,11 @@ throttle_timer(struct _throttle_io_info_t *info) info->throttle_timer_ref = 0; need_release = TRUE; } - lck_mtx_unlock(&info->throttle_lock); + lck_mtx_unlock(&info->throttle_lock); - if (need_release == TRUE) + if (need_release == TRUE) { throttle_info_rel(info); + } } @@ -1183,10 +1206,11 @@ throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel start_timer = TRUE; } - if (insert_tail == TRUE) + if (insert_tail == TRUE) { TAILQ_INSERT_TAIL(&info->throttle_uthlist[mylevel], ut, uu_throttlelist); - else + } else { TAILQ_INSERT_HEAD(&info->throttle_uthlist[mylevel], ut, uu_throttlelist); + } ut->uu_on_throttlelist = mylevel; @@ -1202,7 +1226,7 @@ throttle_add_to_list(struct _throttle_io_info_t *info, uthread_t ut, int mylevel } } } - return (level); + return level; } static void @@ -1219,24 +1243,30 @@ throttle_init_throttle_window(void) */ /* Override global values with device-tree properties */ - if (PE_get_default("kern.io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_get_default("kern.io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size; + } - if (PE_get_default("kern.io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_get_default("kern.io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size; + } - if (PE_get_default("kern.io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_get_default("kern.io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size; - + } + /* Override with boot-args */ - if (PE_parse_boot_argn("io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_parse_boot_argn("io_throttle_window_tier1", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER1] = throttle_window_size; + } - if (PE_parse_boot_argn("io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) + if (PE_parse_boot_argn("io_throttle_window_tier2", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER2] = throttle_window_size; - - if (PE_parse_boot_argn("io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) + } + + if (PE_parse_boot_argn("io_throttle_window_tier3", &throttle_window_size, sizeof(throttle_window_size))) { throttle_windows_msecs[THROTTLE_LEVEL_TIER3] = throttle_window_size; + } } static void @@ -1253,65 +1283,71 @@ throttle_init_throttle_period(struct _throttle_io_info_t *info, boolean_t isssd) */ /* Assign global defaults */ - if ((isssd == TRUE) && (info->throttle_is_fusion_with_priority == 0)) + if ((isssd == TRUE) && (info->throttle_is_fusion_with_priority == 0)) { info->throttle_io_periods = &throttle_io_period_ssd_msecs[0]; - else + } else { info->throttle_io_periods = &throttle_io_period_msecs[0]; + } /* Override global values with device-tree properties */ - if (PE_get_default("kern.io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_get_default("kern.io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size; - - if (PE_get_default("kern.io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) + } + + if (PE_get_default("kern.io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size; + } - if (PE_get_default("kern.io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_get_default("kern.io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size; - + } + /* Override with boot-args */ - if (PE_parse_boot_argn("io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_parse_boot_argn("io_throttle_period_tier1", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER1] = throttle_period_size; - - if (PE_parse_boot_argn("io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) + } + + if (PE_parse_boot_argn("io_throttle_period_tier2", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER2] = throttle_period_size; + } - if (PE_parse_boot_argn("io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) + if (PE_parse_boot_argn("io_throttle_period_tier3", &throttle_period_size, sizeof(throttle_period_size))) { info->throttle_io_periods[THROTTLE_LEVEL_TIER3] = throttle_period_size; - + } } #if CONFIG_IOSCHED -extern void vm_io_reprioritize_init(void); -int iosched_enabled = 1; +extern void vm_io_reprioritize_init(void); +int iosched_enabled = 1; #endif void throttle_init(void) { - struct _throttle_io_info_t *info; - int i; - int level; + struct _throttle_io_info_t *info; + int i; + int level; #if CONFIG_IOSCHED - int iosched; + int iosched; #endif - /* - * allocate lock group attribute and group - */ - throttle_lock_grp_attr = lck_grp_attr_alloc_init(); - throttle_lock_grp = lck_grp_alloc_init("throttle I/O", throttle_lock_grp_attr); + /* + * allocate lock group attribute and group + */ + throttle_lock_grp_attr = lck_grp_attr_alloc_init(); + throttle_lock_grp = lck_grp_alloc_init("throttle I/O", throttle_lock_grp_attr); /* Update throttle parameters based on device tree configuration */ throttle_init_throttle_window(); - /* - * allocate the lock attribute - */ - throttle_lock_attr = lck_attr_alloc_init(); + /* + * allocate the lock attribute + */ + throttle_lock_attr = lck_attr_alloc_init(); for (i = 0; i < LOWPRI_MAX_NUM_DEV; i++) { - info = &_throttle_io_info[i]; - - lck_mtx_init(&info->throttle_lock, throttle_lock_grp, throttle_lock_attr); + info = &_throttle_io_info[i]; + + lck_mtx_init(&info->throttle_lock, throttle_lock_grp, throttle_lock_attr); info->throttle_timer_call = thread_call_allocate((thread_call_func_t)throttle_timer, (thread_call_param_t)info); for (level = 0; level <= THROTTLE_LEVEL_END; level++) { @@ -1337,10 +1373,11 @@ throttle_init(void) void sys_override_io_throttle(boolean_t enable_override) { - if (enable_override) + if (enable_override) { lowpri_throttle_enabled = 0; - else + } else { lowpri_throttle_enabled = 1; + } } int rethrottle_wakeups = 0; @@ -1375,15 +1412,16 @@ rethrottle_thread(uthread_t ut) * If uthread doesn't have throttle state, then there's no chance * of it needing a rethrottle. */ - if (ut->uu_throttle_info == NULL) + if (ut->uu_throttle_info == NULL) { return; + } boolean_t s = ml_set_interrupts_enabled(FALSE); lck_spin_lock(&ut->uu_rethrottle_lock); - if (!ut->uu_is_throttled) + if (!ut->uu_is_throttled) { ut->uu_was_rethrottled = true; - else { + } else { int my_new_level = throttle_get_thread_throttle_level(ut); if (my_new_level != ut->uu_on_throttlelist) { @@ -1415,13 +1453,14 @@ rethrottle_thread(uthread_t ut) void * throttle_info_create(void) { - struct _throttle_io_info_t *info; - int level; + struct _throttle_io_info_t *info; + int level; MALLOC(info, struct _throttle_io_info_t *, sizeof(*info), M_TEMP, M_ZERO | M_WAITOK); /* Should never happen but just in case */ - if (info == NULL) + if (info == NULL) { return NULL; + } /* Mark that this one was allocated and needs to be freed */ DEBUG_ALLOC_THROTTLE_INFO("Creating info = %p\n", info, info ); info->throttle_alloc = TRUE; @@ -1442,17 +1481,18 @@ throttle_info_create(void) /* * KPI routine * - * Release the throttle info pointer if all the reference are gone. Should be - * called to release reference taken by throttle_info_create - */ + * Release the throttle info pointer if all the reference are gone. Should be + * called to release reference taken by throttle_info_create + */ void throttle_info_release(void *throttle_info) { DEBUG_ALLOC_THROTTLE_INFO("Releaseing info = %p\n", - (struct _throttle_io_info_t *)throttle_info, - (struct _throttle_io_info_t *)throttle_info); - if (throttle_info) /* Just to be careful */ + (struct _throttle_io_info_t *)throttle_info, + (struct _throttle_io_info_t *)throttle_info); + if (throttle_info) { /* Just to be careful */ throttle_info_rel(throttle_info); + } } /* @@ -1461,20 +1501,22 @@ throttle_info_release(void *throttle_info) * File Systems that create an info structure, need to call this routine in * their mount routine (used by cluster code). File Systems that call this in * their mount routines must call throttle_info_mount_rel in their unmount - * routines. + * routines. */ -void +void throttle_info_mount_ref(mount_t mp, void *throttle_info) { - if ((throttle_info == NULL) || (mp == NULL)) + if ((throttle_info == NULL) || (mp == NULL)) { return; + } throttle_info_ref(throttle_info); /* * We already have a reference release it before adding the new one */ - if (mp->mnt_throttle_info) + if (mp->mnt_throttle_info) { throttle_info_rel(mp->mnt_throttle_info); + } mp->mnt_throttle_info = throttle_info; } @@ -1487,12 +1529,13 @@ throttle_info_mount_ref(mount_t mp, void *throttle_info) int throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle) { - int dev_index; + int dev_index; struct _throttle_io_info_t *info; - if (throttle_info_handle == NULL) + if (throttle_info_handle == NULL) { return EINVAL; - + } + dev_index = num_trailing_0(throttle_mask); info = &_throttle_io_info[dev_index]; throttle_info_ref(info); @@ -1520,12 +1563,13 @@ throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle) * * File Systems that throttle_info_mount_ref, must call this routine in their * umount routine. - */ + */ void throttle_info_mount_rel(mount_t mp) { - if (mp->mnt_throttle_info) + if (mp->mnt_throttle_info) { throttle_info_rel(mp->mnt_throttle_info); + } mp->mnt_throttle_info = NULL; } @@ -1540,12 +1584,13 @@ throttle_info_mount_reset_period(mount_t mp, int isssd) { struct _throttle_io_info_t *info; - if (mp == NULL) + if (mp == NULL) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) + } else if (mp->mnt_throttle_info == NULL) { info = &_throttle_io_info[mp->mnt_devbsdunit]; - else + } else { info = mp->mnt_throttle_info; + } throttle_init_throttle_period(info, isssd); } @@ -1553,14 +1598,15 @@ throttle_info_mount_reset_period(mount_t mp, int isssd) void throttle_info_get_last_io_time(mount_t mp, struct timeval *tv) { - struct _throttle_io_info_t *info; + struct _throttle_io_info_t *info; - if (mp == NULL) + if (mp == NULL) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) + } else if (mp->mnt_throttle_info == NULL) { info = &_throttle_io_info[mp->mnt_devbsdunit]; - else + } else { info = mp->mnt_throttle_info; + } *tv = info->throttle_last_write_timestamp; } @@ -1568,36 +1614,40 @@ throttle_info_get_last_io_time(mount_t mp, struct timeval *tv) void update_last_io_time(mount_t mp) { - struct _throttle_io_info_t *info; - - if (mp == NULL) + struct _throttle_io_info_t *info; + + if (mp == NULL) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) + } else if (mp->mnt_throttle_info == NULL) { info = &_throttle_io_info[mp->mnt_devbsdunit]; - else + } else { info = mp->mnt_throttle_info; + } microuptime(&info->throttle_last_write_timestamp); - if (mp != NULL) + if (mp != NULL) { mp->mnt_last_write_completed_timestamp = info->throttle_last_write_timestamp; + } } int throttle_get_io_policy(uthread_t *ut) { - if (ut != NULL) + if (ut != NULL) { *ut = get_bsdthread_info(current_thread()); + } - return (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO)); + return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); } int throttle_get_passive_io_policy(uthread_t *ut) { - if (ut != NULL) + if (ut != NULL) { *ut = get_bsdthread_info(current_thread()); + } - return (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_PASSIVE_IO)); + return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_PASSIVE_IO); } @@ -1614,15 +1664,17 @@ throttle_get_thread_throttle_level(uthread_t ut) * Return a throttle level given an existing I/O tier (such as returned by throttle_get_io_policy) */ static int -throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { +throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) +{ int thread_throttle_level = io_tier; int user_idle_level; assert(ut != NULL); /* Bootcache misses should always be throttled */ - if (ut->uu_throttle_bc) + if (ut->uu_throttle_bc) { thread_throttle_level = THROTTLE_LEVEL_TIER3; + } /* * Issue tier3 I/O as tier2 when the user is idle @@ -1638,7 +1690,7 @@ throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { } } - return (thread_throttle_level); + return thread_throttle_level; } /* @@ -1651,15 +1703,16 @@ throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { static int throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int * throttling_level) { - struct _throttle_io_info_t *info = throttle_info; + struct _throttle_io_info_t *info = throttle_info; struct timeval elapsed; struct timeval now; uint64_t elapsed_msecs; - int thread_throttle_level; - int throttle_level; + int thread_throttle_level; + int throttle_level; - if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL)) < THROTTLE_LEVEL_THROTTLED) - return (THROTTLE_DISENGAGED); + if ((thread_throttle_level = throttle_get_thread_throttle_level(NULL)) < THROTTLE_LEVEL_THROTTLED) { + return THROTTLE_DISENGAGED; + } microuptime(&now); @@ -1671,8 +1724,9 @@ throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) + if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) { break; + } } if (throttle_level >= thread_throttle_level) { /* @@ -1680,28 +1734,30 @@ throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int * that affect the throttle level of this thread, * so go ahead and treat as normal I/O */ - return (THROTTLE_DISENGAGED); + return THROTTLE_DISENGAGED; } - if (mylevel) + if (mylevel) { *mylevel = thread_throttle_level; - if (throttling_level) + } + if (throttling_level) { *throttling_level = throttle_level; + } if (info->throttle_io_count != info->throttle_io_count_begin) { /* * we've already issued at least one throttleable I/O * in the current I/O window, so avoid issuing another one */ - return (THROTTLE_NOW); + return THROTTLE_NOW; } /* * we're in the throttle window, so * cut the I/O size back */ - return (THROTTLE_ENGAGED); + return THROTTLE_ENGAGED; } -/* +/* * If we have a mount point and it has a throttle info pointer then * use it to do the check, otherwise use the device unit number to find * the correct throttle info array element. @@ -1709,35 +1765,38 @@ throttle_io_will_be_throttled_internal(void * throttle_info, int * mylevel, int int throttle_io_will_be_throttled(__unused int lowpri_window_msecs, mount_t mp) { - struct _throttle_io_info_t *info; + struct _throttle_io_info_t *info; /* * Should we just return zero if no mount point */ - if (mp == NULL) - info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; - else if (mp->mnt_throttle_info == NULL) - info = &_throttle_io_info[mp->mnt_devbsdunit]; - else - info = mp->mnt_throttle_info; + if (mp == NULL) { + info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; + } else if (mp->mnt_throttle_info == NULL) { + info = &_throttle_io_info[mp->mnt_devbsdunit]; + } else { + info = mp->mnt_throttle_info; + } if (info->throttle_is_fusion_with_priority) { uthread_t ut = get_bsdthread_info(current_thread()); - if (ut->uu_lowpri_window == 0) - return (THROTTLE_DISENGAGED); + if (ut->uu_lowpri_window == 0) { + return THROTTLE_DISENGAGED; + } } - if (info->throttle_disabled) - return (THROTTLE_DISENGAGED); - else + if (info->throttle_disabled) { + return THROTTLE_DISENGAGED; + } else { return throttle_io_will_be_throttled_internal(info, NULL, NULL); + } } -/* +/* * Routine to increment I/O throttling counters maintained in the proc */ -static void +static void throttle_update_proc_stats(pid_t throttling_pid, int count) { proc_t throttling_proc; @@ -1745,7 +1804,7 @@ throttle_update_proc_stats(pid_t throttling_pid, int count) /* The throttled_proc is always the current proc; so we are not concerned with refs */ OSAddAtomic64(count, &(throttled_proc->was_throttled)); - + /* The throttling pid might have exited by now */ throttling_proc = proc_find(throttling_pid); if (throttling_proc != PROC_NULL) { @@ -1764,48 +1823,53 @@ throttle_lowpri_io(int sleep_amount) { uthread_t ut; struct _throttle_io_info_t *info; - int throttle_type = 0; - int mylevel = 0; - int throttling_level = THROTTLE_LEVEL_NONE; - int sleep_cnt = 0; + int throttle_type = 0; + int mylevel = 0; + int throttling_level = THROTTLE_LEVEL_NONE; + int sleep_cnt = 0; uint32_t throttle_io_period_num = 0; boolean_t insert_tail = TRUE; boolean_t s; ut = get_bsdthread_info(current_thread()); - if (ut->uu_lowpri_window == 0) - return (0); + if (ut->uu_lowpri_window == 0) { + return 0; + } info = ut->uu_throttle_info; if (info == NULL) { ut->uu_throttle_bc = false; ut->uu_lowpri_window = 0; - return (0); + return 0; } lck_mtx_lock(&info->throttle_lock); assert(ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED); - if (sleep_amount == 0) + if (sleep_amount == 0) { goto done; + } - if (sleep_amount == 1 && !ut->uu_throttle_bc) + if (sleep_amount == 1 && !ut->uu_throttle_bc) { sleep_amount = 0; + } throttle_io_period_num = info->throttle_io_period_num; ut->uu_was_rethrottled = false; - while ( (throttle_type = throttle_io_will_be_throttled_internal(info, &mylevel, &throttling_level)) ) { - + while ((throttle_type = throttle_io_will_be_throttled_internal(info, &mylevel, &throttling_level))) { if (throttle_type == THROTTLE_ENGAGED) { - if (sleep_amount == 0) - break; - if (info->throttle_io_period_num < throttle_io_period_num) + if (sleep_amount == 0) { break; - if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) + } + if (info->throttle_io_period_num < throttle_io_period_num) { break; + } + if ((info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) { + break; + } } /* * keep the same position in the list if "rethrottle_thread" changes our throttle level and @@ -1823,8 +1887,9 @@ throttle_lowpri_io(int sleep_amount) insert_tail = TRUE; } if (ut->uu_on_throttlelist < THROTTLE_LEVEL_THROTTLED) { - if (throttle_add_to_list(info, ut, mylevel, insert_tail) == THROTTLE_LEVEL_END) + if (throttle_add_to_list(info, ut, mylevel, insert_tail) == THROTTLE_LEVEL_END) { goto done; + } } assert(throttling_level >= THROTTLE_LEVEL_START && throttling_level <= THROTTLE_LEVEL_END); @@ -1836,7 +1901,6 @@ throttle_lowpri_io(int sleep_amount) * with "rethrottle_thread" */ if (ut->uu_was_rethrottled) { - lck_spin_unlock(&ut->uu_rethrottle_lock); ml_set_interrupts_enabled(s); lck_mtx_yield(&info->throttle_lock); @@ -1847,11 +1911,11 @@ throttle_lowpri_io(int sleep_amount) continue; } KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, PROCESS_THROTTLED)) | DBG_FUNC_NONE, - info->throttle_last_IO_pid[throttling_level], throttling_level, proc_selfpid(), mylevel, 0); - + info->throttle_last_IO_pid[throttling_level], throttling_level, proc_selfpid(), mylevel, 0); + if (sleep_cnt == 0) { KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, - throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); + throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); throttled_count[mylevel]++; } ut->uu_wmesg = "throttle_lowpri_io"; @@ -1874,11 +1938,11 @@ throttle_lowpri_io(int sleep_amount) lck_mtx_lock(&info->throttle_lock); sleep_cnt++; - - if (sleep_amount == 0) + + if (sleep_amount == 0) { insert_tail = FALSE; - else if (info->throttle_io_period_num < throttle_io_period_num || - (info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) { + } else if (info->throttle_io_period_num < throttle_io_period_num || + (info->throttle_io_period_num - throttle_io_period_num) >= (uint32_t)sleep_amount) { insert_tail = FALSE; sleep_amount = 0; } @@ -1892,11 +1956,11 @@ done: if (sleep_cnt) { KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, - throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); + throttle_windows_msecs[mylevel], info->throttle_io_periods[mylevel], info->throttle_io_count, 0, 0); /* * We update the stats for the last pid which opened a throttle window for the throttled thread. * This might not be completely accurate since the multiple throttles seen by the lower tier pid - * might have been caused by various higher prio pids. However, updating these stats accurately + * might have been caused by various higher prio pids. However, updating these stats accurately * means doing a proc_find while holding the throttle lock which leads to deadlock. */ throttle_update_proc_stats(info->throttle_last_IO_pid[throttling_level], sleep_cnt); @@ -1908,7 +1972,7 @@ done: throttle_info_rel(info); - return (sleep_cnt); + return sleep_cnt; } /* @@ -1919,24 +1983,28 @@ done: * * explanations about these policies are in the man page of setiopolicy_np */ -void throttle_set_thread_io_policy(int policy) +void +throttle_set_thread_io_policy(int policy) { proc_set_thread_policy(current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_IOPOL, policy); } -int throttle_get_thread_effective_io_policy() +int +throttle_get_thread_effective_io_policy() { return proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); } -void throttle_info_reset_window(uthread_t ut) +void +throttle_info_reset_window(uthread_t ut) { struct _throttle_io_info_t *info; - if (ut == NULL) + if (ut == NULL) { ut = get_bsdthread_info(current_thread()); + } - if ( (info = ut->uu_throttle_info) ) { + if ((info = ut->uu_throttle_info)) { throttle_info_rel(info); ut->uu_throttle_info = NULL; @@ -1946,16 +2014,17 @@ void throttle_info_reset_window(uthread_t ut) } static -void throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle, boolean_t isssd) +void +throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t *info, boolean_t BC_throttle, boolean_t isssd) { - if (lowpri_throttle_enabled == 0 || info->throttle_disabled) + if (lowpri_throttle_enabled == 0 || info->throttle_disabled) { return; + } if (info->throttle_io_periods == 0) { throttle_init_throttle_period(info, isssd); } if (ut->uu_throttle_info == NULL) { - ut->uu_throttle_info = info; throttle_info_ref(info); DEBUG_ALLOC_THROTTLE_INFO("updating info = %p\n", info, info ); @@ -1971,7 +2040,9 @@ void throttle_info_set_initial_window(uthread_t ut, struct _throttle_io_info_t * * * Only affects IO that was sent through spec_strategy */ -void throttle_info_end_io(buf_t bp) { +void +throttle_info_end_io(buf_t bp) +{ mount_t mp; struct bufattr *bap; struct _throttle_io_info_t *info; @@ -2002,7 +2073,9 @@ void throttle_info_end_io(buf_t bp) { * Decrement inflight count initially incremented by throttle_info_update_internal */ static -void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level) { +void +throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level) +{ if (throttle_level == THROTTLE_LEVEL_NONE) { return; } @@ -2017,15 +2090,18 @@ void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttl * throttle_info_end_io_internal to avoid leaking in-flight I/O. */ static -int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap) +int +throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap) { - int thread_throttle_level; + int thread_throttle_level; - if (lowpri_throttle_enabled == 0 || info->throttle_disabled) + if (lowpri_throttle_enabled == 0 || info->throttle_disabled) { return THROTTLE_LEVEL_NONE; + } - if (ut == NULL) + if (ut == NULL) { ut = get_bsdthread_info(current_thread()); + } if (bap && inflight && !ut->uu_throttle_bc) { thread_throttle_level = GET_BUFATTR_IO_TIER(bap); @@ -2037,7 +2113,7 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut } if (thread_throttle_level != THROTTLE_LEVEL_NONE) { - if(!ISSET(flags, B_PASSIVE)) { + if (!ISSET(flags, B_PASSIVE)) { info->throttle_last_IO_pid[thread_throttle_level] = proc_selfpid(); if (inflight && !ut->uu_throttle_bc) { if (NULL != bap) { @@ -2048,7 +2124,7 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut microuptime(&info->throttle_window_start_timestamp[thread_throttle_level]); } KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, OPEN_THROTTLE_WINDOW)) | DBG_FUNC_NONE, - current_proc()->p_pid, thread_throttle_level, 0, 0, 0); + current_proc()->p_pid, thread_throttle_level, 0, 0, 0); } microuptime(&info->throttle_last_IO_timestamp[thread_throttle_level]); } @@ -2065,7 +2141,7 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut * do the delay just before we return from the system * call that triggered this I/O or from vnode_pagein */ - OSAddAtomic(1, &info->throttle_io_count); + OSAddAtomic(1, &info->throttle_io_count); throttle_info_set_initial_window(ut, info, FALSE, isssd); } @@ -2073,7 +2149,8 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut return thread_throttle_level; } -void *throttle_info_update_by_mount(mount_t mp) +void * +throttle_info_update_by_mount(mount_t mp) { struct _throttle_io_info_t *info; uthread_t ut; @@ -2082,14 +2159,17 @@ void *throttle_info_update_by_mount(mount_t mp) ut = get_bsdthread_info(current_thread()); if (mp != NULL) { - if (disk_conditioner_mount_is_ssd(mp)) + if (disk_conditioner_mount_is_ssd(mp)) { isssd = TRUE; + } info = &_throttle_io_info[mp->mnt_devbsdunit]; - } else + } else { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; + } - if (!ut->uu_lowpri_window) + if (!ut->uu_lowpri_window) { throttle_info_set_initial_window(ut, info, FALSE, isssd); + } return info; } @@ -2101,10 +2181,12 @@ void *throttle_info_update_by_mount(mount_t mp) * this is usually called before every I/O, used for throttled I/O * book keeping. This routine has low overhead and does not sleep */ -void throttle_info_update(void *throttle_info, int flags) +void +throttle_info_update(void *throttle_info, int flags) { - if (throttle_info) + if (throttle_info) { throttle_info_update_internal(throttle_info, NULL, flags, FALSE, FALSE, NULL); + } } /* @@ -2113,7 +2195,8 @@ void throttle_info_update(void *throttle_info, int flags) * this is usually called before every I/O, used for throttled I/O * book keeping. This routine has low overhead and does not sleep */ -void throttle_info_update_by_mask(void *throttle_info_handle, int flags) +void +throttle_info_update_by_mask(void *throttle_info_handle, int flags) { void *throttle_info = throttle_info_handle; @@ -2127,17 +2210,19 @@ void throttle_info_update_by_mask(void *throttle_info_handle, int flags) } /* * KPI routine - * - * This routine marks the throttle info as disabled. Used for mount points which + * + * This routine marks the throttle info as disabled. Used for mount points which * support I/O scheduling. */ -void throttle_info_disable_throttle(int devno, boolean_t isfusion) +void +throttle_info_disable_throttle(int devno, boolean_t isfusion) { struct _throttle_io_info_t *info; - if (devno < 0 || devno >= LOWPRI_MAX_NUM_DEV) + if (devno < 0 || devno >= LOWPRI_MAX_NUM_DEV) { panic("Illegal devno (%d) passed into throttle_info_disable_throttle()", devno); + } info = &_throttle_io_info[devno]; // don't disable software throttling on devices that are part of a fusion device @@ -2148,34 +2233,34 @@ void throttle_info_disable_throttle(int devno, boolean_t isfusion) } info->throttle_disabled = !info->throttle_is_fusion_with_priority; return; -} +} /* * KPI routine (private) * Called to determine if this IO is being throttled to this level so that it can be treated specially */ -int throttle_info_io_will_be_throttled(void * throttle_info, int policy) +int +throttle_info_io_will_be_throttled(void * throttle_info, int policy) { - struct _throttle_io_info_t *info = throttle_info; + struct _throttle_io_info_t *info = throttle_info; struct timeval elapsed; uint64_t elapsed_msecs; - int throttle_level; - int thread_throttle_level; - - switch (policy) { - - case IOPOL_THROTTLE: - thread_throttle_level = THROTTLE_LEVEL_TIER3; - break; - case IOPOL_UTILITY: - thread_throttle_level = THROTTLE_LEVEL_TIER2; - break; - case IOPOL_STANDARD: - thread_throttle_level = THROTTLE_LEVEL_TIER1; - break; - default: - thread_throttle_level = THROTTLE_LEVEL_TIER0; + int throttle_level; + int thread_throttle_level; + + switch (policy) { + case IOPOL_THROTTLE: + thread_throttle_level = THROTTLE_LEVEL_TIER3; + break; + case IOPOL_UTILITY: + thread_throttle_level = THROTTLE_LEVEL_TIER2; + break; + case IOPOL_STANDARD: + thread_throttle_level = THROTTLE_LEVEL_TIER1; + break; + default: + thread_throttle_level = THROTTLE_LEVEL_TIER0; break; } for (throttle_level = THROTTLE_LEVEL_START; throttle_level < thread_throttle_level; throttle_level++) { @@ -2187,23 +2272,25 @@ int throttle_info_io_will_be_throttled(void * throttle_info, int policy) timevalsub(&elapsed, &info->throttle_window_start_timestamp[throttle_level]); elapsed_msecs = (uint64_t)elapsed.tv_sec * (uint64_t)1000 + (elapsed.tv_usec / 1000); - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) + if (elapsed_msecs < (uint64_t)throttle_windows_msecs[thread_throttle_level]) { break; + } } if (throttle_level >= thread_throttle_level) { /* * we're beyond all of the throttle windows * so go ahead and treat as normal I/O */ - return (THROTTLE_DISENGAGED); + return THROTTLE_DISENGAGED; } /* * we're in the throttle window */ - return (THROTTLE_ENGAGED); + return THROTTLE_ENGAGED; } -int throttle_lowpri_window(void) +int +throttle_lowpri_window(void) { struct uthread *ut = get_bsdthread_info(current_thread()); return ut->uu_lowpri_window; @@ -2217,15 +2304,15 @@ int upl_get_cached_tier(void *); int spec_strategy(struct vnop_strategy_args *ap) { - buf_t bp; - int bflags; - int io_tier; - int passive; - dev_t bdev; + buf_t bp; + int bflags; + int io_tier; + int passive; + dev_t bdev; uthread_t ut; mount_t mp; - struct bufattr *bap; - int strategy_ret; + struct bufattr *bap; + int strategy_ret; struct _throttle_io_info_t *throttle_info; boolean_t isssd = FALSE; boolean_t inflight = FALSE; @@ -2236,28 +2323,30 @@ spec_strategy(struct vnop_strategy_args *ap) proc_t curproc = current_proc(); #endif /* !CONFIG_EMBEDDED */ - bp = ap->a_bp; + bp = ap->a_bp; bdev = buf_device(bp); mp = buf_vnode(bp)->v_mount; bap = &bp->b_attr; #if CONFIG_IOSCHED - if (bp->b_flags & B_CLUSTER) { - - io_tier = upl_get_cached_tier(bp->b_upl); + if (bp->b_flags & B_CLUSTER) { + io_tier = upl_get_cached_tier(bp->b_upl); - if (io_tier == -1) - io_tier = throttle_get_io_policy(&ut); + if (io_tier == -1) { + io_tier = throttle_get_io_policy(&ut); + } #if DEVELOPMENT || DEBUG - else { - int my_io_tier = throttle_get_io_policy(&ut); + else { + int my_io_tier = throttle_get_io_policy(&ut); - if (io_tier != my_io_tier) - KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, IO_TIER_UPL_MISMATCH)) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), my_io_tier, io_tier, 0, 0); - } + if (io_tier != my_io_tier) { + KERNEL_DEBUG_CONSTANT((FSDBG_CODE(DBG_THROTTLE, IO_TIER_UPL_MISMATCH)) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), my_io_tier, io_tier, 0, 0); + } + } #endif - } else - io_tier = throttle_get_io_policy(&ut); + } else { + io_tier = throttle_get_io_policy(&ut); + } #else io_tier = throttle_get_io_policy(&ut); #endif @@ -2278,11 +2367,12 @@ spec_strategy(struct vnop_strategy_args *ap) #endif /* CONFIG_IOSCHED */ } - if (bp->b_flags & B_META) + if (bp->b_flags & B_META) { bap->ba_flags |= BA_META; + } #if CONFIG_IOSCHED - /* + /* * For I/O Scheduling, we currently do not have a way to track and expedite metadata I/Os. * To ensure we dont get into priority inversions due to metadata I/Os, we use the following rules: * For metadata reads, ceil all I/Os to IOSCHED_METADATA_TIER & mark them passive if the I/O tier was upgraded @@ -2302,7 +2392,7 @@ spec_strategy(struct vnop_strategy_args *ap) } } #endif /* CONFIG_IOSCHED */ - + SET_BUFATTR_IO_TIER(bap, io_tier); if (passive) { @@ -2311,35 +2401,43 @@ spec_strategy(struct vnop_strategy_args *ap) } #if !CONFIG_EMBEDDED - if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)) + if ((curproc != NULL) && ((curproc->p_flag & P_DELAYIDLESLEEP) == P_DELAYIDLESLEEP)) { bap->ba_flags |= BA_DELAYIDLESLEEP; + } #endif /* !CONFIG_EMBEDDED */ - + bflags = bp->b_flags; - if (((bflags & B_READ) == 0) && ((bflags & B_ASYNC) == 0)) + if (((bflags & B_READ) == 0) && ((bflags & B_ASYNC) == 0)) { bufattr_markquickcomplete(bap); + } - if (bflags & B_READ) - code |= DKIO_READ; - if (bflags & B_ASYNC) - code |= DKIO_ASYNC; + if (bflags & B_READ) { + code |= DKIO_READ; + } + if (bflags & B_ASYNC) { + code |= DKIO_ASYNC; + } - if (bap->ba_flags & BA_META) - code |= DKIO_META; - else if (bflags & B_PAGEIO) - code |= DKIO_PAGING; + if (bap->ba_flags & BA_META) { + code |= DKIO_META; + } else if (bflags & B_PAGEIO) { + code |= DKIO_PAGING; + } - if (io_tier != 0) + if (io_tier != 0) { code |= DKIO_THROTTLE; + } code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK); - if (bflags & B_PASSIVE) + if (bflags & B_PASSIVE) { code |= DKIO_PASSIVE; + } - if (bap->ba_flags & BA_NOCACHE) + if (bap->ba_flags & BA_NOCACHE) { code |= DKIO_NOCACHE; + } if (upgrade) { code |= DKIO_TIER_UPGRADE; @@ -2348,14 +2446,15 @@ spec_strategy(struct vnop_strategy_args *ap) if (kdebug_enable) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, - buf_kernel_addrperm_addr(bp), bdev, buf_blkno(bp), buf_count(bp), 0); - } + buf_kernel_addrperm_addr(bp), bdev, buf_blkno(bp), buf_count(bp), 0); + } thread_update_io_stats(current_thread(), buf_count(bp), code); if (mp != NULL) { - if (disk_conditioner_mount_is_ssd(mp)) + if (disk_conditioner_mount_is_ssd(mp)) { isssd = TRUE; + } /* * Partially initialized mounts don't have a final devbsdunit and should not be tracked. * Verify that devbsdunit is initialized (non-zero) or that 0 is the correct initialized value @@ -2365,9 +2464,9 @@ spec_strategy(struct vnop_strategy_args *ap) inflight = TRUE; } throttle_info = &_throttle_io_info[mp->mnt_devbsdunit]; - - } else + } else { throttle_info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; + } throttle_info_update_internal(throttle_info, ut, bflags, isssd, inflight, bap); @@ -2396,29 +2495,28 @@ spec_strategy(struct vnop_strategy_args *ap) * the boot cache too often. * * Note that typical strategy routines are defined with - * a void return so we'll get garbage here. In the + * a void return so we'll get garbage here. In the * unlikely case the garbage matches our special return * value, it's not a big deal since we're only adjusting * the throttling delay. - */ + */ #define IO_SATISFIED_BY_CACHE ((int)0xcafefeed) #define IO_SHOULD_BE_THROTTLED ((int)0xcafebeef) - typedef int strategy_fcn_ret_t(struct buf *bp); - + typedef int strategy_fcn_ret_t(struct buf *bp); + strategy_ret = (*(strategy_fcn_ret_t*)bdevsw[major(bdev)].d_strategy)(bp); // disk conditioner needs to track when this I/O actually starts // which means track it after `strategy` which may include delays // from inflight I/Os microuptime(&bp->b_timestamp_tv); - + if (IO_SATISFIED_BY_CACHE == strategy_ret) { /* * If this was a throttled IO satisfied by the boot cache, * don't delay the thread. */ throttle_info_reset_window(ut); - } else if (IO_SHOULD_BE_THROTTLED == strategy_ret) { /* * If the boot cache indicates this IO should be throttled, @@ -2426,7 +2524,7 @@ spec_strategy(struct vnop_strategy_args *ap) */ throttle_info_set_initial_window(ut, throttle_info, TRUE, isssd); } - return (0); + return 0; } @@ -2436,7 +2534,7 @@ spec_strategy(struct vnop_strategy_args *ap) int spec_blockmap(__unused struct vnop_blockmap_args *ap) { - return (ENOTSUP); + return ENOTSUP; } @@ -2454,7 +2552,6 @@ spec_close(struct vnop_close_args *ap) struct session *sessp; switch (vp->v_type) { - case VCHR: /* * Hack: a tty device that is a controlling terminal @@ -2479,7 +2576,7 @@ spec_close(struct vnop_close_args *ap) sessp->s_ttyvid = 0; sessp->s_ttyp = TTY_NULL; sessp->s_ttypgrpid = NO_PID; - } + } session_unlock(sessp); if (tp != TTY_NULL) { @@ -2499,14 +2596,16 @@ spec_close(struct vnop_close_args *ap) session_rele(sessp); } - if (--vp->v_specinfo->si_opencount < 0) + if (--vp->v_specinfo->si_opencount < 0) { panic("negative open count (c, %u, %u)", major(dev), minor(dev)); + } /* * close on last reference or on vnode revoke call */ - if (vcount(vp) == 0 || (flags & IO_REVOKE) != 0) + if (vcount(vp) == 0 || (flags & IO_REVOKE) != 0) { error = cdevsw[major(dev)].d_close(dev, flags, S_IFCHR, p); + } devsw_unlock(dev, S_IFCHR); break; @@ -2520,7 +2619,7 @@ spec_close(struct vnop_close_args *ap) if (vcount(vp) > 1) { vp->v_specinfo->si_opencount--; devsw_unlock(dev, S_IFBLK); - return (0); + return 0; } devsw_unlock(dev, S_IFBLK); @@ -2529,27 +2628,31 @@ spec_close(struct vnop_close_args *ap) * we must invalidate any in core blocks, so that * we can, for instance, change floppy disks. */ - if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context))) - return (error); + if ((error = spec_fsync_internal(vp, MNT_WAIT, ap->a_context))) { + return error; + } error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0); - if (error) - return (error); + if (error) { + return error; + } devsw_lock(dev, S_IFBLK); - if (--vp->v_specinfo->si_opencount < 0) + if (--vp->v_specinfo->si_opencount < 0) { panic("negative open count (b, %u, %u)", major(dev), minor(dev)); + } - if (vcount(vp) == 0) + if (vcount(vp) == 0) { error = bdevsw[major(dev)].d_close(dev, flags, S_IFBLK, p); + } devsw_unlock(dev, S_IFBLK); break; default: panic("spec_close: not special"); - return(EBADF); + return EBADF; } return error; @@ -2561,28 +2664,27 @@ spec_close(struct vnop_close_args *ap) int spec_pathconf(struct vnop_pathconf_args *ap) { - switch (ap->a_name) { case _PC_LINK_MAX: *ap->a_retval = LINK_MAX; - return (0); + return 0; case _PC_MAX_CANON: *ap->a_retval = MAX_CANON; - return (0); + return 0; case _PC_MAX_INPUT: *ap->a_retval = MAX_INPUT; - return (0); + return 0; case _PC_PIPE_BUF: *ap->a_retval = PIPE_BUF; - return (0); + return 0; case _PC_CHOWN_RESTRICTED: - *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ - return (0); + *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */ + return 0; case _PC_VDISABLE: *ap->a_retval = _POSIX_VDISABLE; - return (0); + return 0; default: - return (EINVAL); + return EINVAL; } /* NOTREACHED */ } @@ -2593,8 +2695,7 @@ spec_pathconf(struct vnop_pathconf_args *ap) int spec_ebadf(__unused void *dummy) { - - return (EBADF); + return EBADF; } /* Blktooff derives file offset from logical block number */ @@ -2606,19 +2707,19 @@ spec_blktooff(struct vnop_blktooff_args *ap) switch (vp->v_type) { case VCHR: *ap->a_offset = (off_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; case VBLK: printf("spec_blktooff: not implemented for VBLK\n"); *ap->a_offset = (off_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; default: panic("spec_blktooff type"); } /* NOTREACHED */ - return (0); + return 0; } /* Offtoblk derives logical block number from file offset */ @@ -2630,19 +2731,19 @@ spec_offtoblk(struct vnop_offtoblk_args *ap) switch (vp->v_type) { case VCHR: *ap->a_lblkno = (daddr64_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; case VBLK: printf("spec_offtoblk: not implemented for VBLK\n"); *ap->a_lblkno = (daddr64_t)-1; /* failure */ - return (ENOTSUP); + return ENOTSUP; default: panic("spec_offtoblk type"); } /* NOTREACHED */ - return (0); + return 0; } static void filt_specdetach(struct knote *kn); @@ -2789,7 +2890,8 @@ spec_knote_select_and_link(struct knote *kn) return selres; } -static void filt_spec_common(struct knote *kn, int selres) +static void +filt_spec_common(struct knote *kn, int selres) { if (kn->kn_vnode_use_ofst) { if (kn->kn_fp->f_fglob->fg_offset >= (uint32_t)selres) { @@ -2920,7 +3022,7 @@ filt_specprocess(struct knote *kn, struct filt_process_s *data, struct kevent_in vnode_put(vp); res = ((kn->kn_sfflags & NOTE_LOWAT) != 0) ? - (kn->kn_data >= kn->kn_sdata) : kn->kn_data; + (kn->kn_data >= kn->kn_sdata) : kn->kn_data; if (res) { *kev = kn->kn_kevent; @@ -2943,4 +3045,3 @@ filt_specpeek(struct knote *kn) return kn->kn_data != 0; } - diff --git a/bsd/miscfs/specfs/specdev.h b/bsd/miscfs/specfs/specdev.h index 3d6d0258f..ae4e79ae0 100644 --- a/bsd/miscfs/specfs/specdev.h +++ b/bsd/miscfs/specfs/specdev.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -75,20 +75,20 @@ * in vgone. */ struct specinfo { - struct vnode **si_hashchain; - struct vnode *si_specnext; - long si_flags; - dev_t si_rdev; + struct vnode **si_hashchain; + struct vnode *si_specnext; + long si_flags; + dev_t si_rdev; int32_t si_opencount; - daddr_t si_size; /* device block size in bytes */ - daddr64_t si_lastr; /* last read blkno (read-ahead) */ - u_int64_t si_devsize; /* actual device size in bytes */ + daddr_t si_size; /* device block size in bytes */ + daddr64_t si_lastr; /* last read blkno (read-ahead) */ + u_int64_t si_devsize; /* actual device size in bytes */ - u_int8_t si_initted; - u_int8_t si_throttleable; - u_int16_t si_isssd; - u_int32_t si_devbsdunit; - u_int64_t si_throttle_mask; + u_int8_t si_initted; + u_int8_t si_throttleable; + u_int16_t si_isssd; + u_int32_t si_devbsdunit; + u_int64_t si_throttle_mask; }; /* * Exported shorthand @@ -104,17 +104,17 @@ struct specinfo { /* * Flags for specinfo */ -#define SI_MOUNTEDON 0x0001 /* block special device is mounted on */ -#define SI_ALIASED 0x0002 /* multiple active vnodes refer to this device */ +#define SI_MOUNTEDON 0x0001 /* block special device is mounted on */ +#define SI_ALIASED 0x0002 /* multiple active vnodes refer to this device */ /* * Special device management */ -#define SPECHSZ 64 -#if ((SPECHSZ&(SPECHSZ-1)) == 0) -#define SPECHASH(rdev) (((rdev>>21)+(rdev))&(SPECHSZ-1)) +#define SPECHSZ 64 +#if ((SPECHSZ & (SPECHSZ - 1)) == 0) +#define SPECHASH(rdev) (((rdev>>21)+(rdev))&(SPECHSZ-1)) #else -#define SPECHASH(rdev) (((unsigned)((rdev>>21)+(rdev)))%SPECHSZ) +#define SPECHASH(rdev) (((unsigned)((rdev>>21)+(rdev)))%SPECHSZ) #endif extern struct vnode *speclisth[SPECHSZ]; @@ -122,39 +122,39 @@ extern struct vnode *speclisth[SPECHSZ]; /* * Prototypes for special file operations on vnodes. */ -extern int (**spec_vnodeop_p)(void *); -struct nameidata; -struct componentname; -struct flock; -struct buf; -struct uio; +extern int(**spec_vnodeop_p)(void *); +struct nameidata; +struct componentname; +struct flock; +struct buf; +struct uio; __BEGIN_DECLS #ifdef BSD_KERNEL_PRIVATE -int spec_blktooff (struct vnop_blktooff_args *); -int spec_offtoblk (struct vnop_offtoblk_args *); -int spec_fsync_internal (vnode_t, int, vfs_context_t); -int spec_blockmap (struct vnop_blockmap_args *); -int spec_kqfilter (vnode_t vp, struct knote *kn, struct kevent_internal_s *kev); +int spec_blktooff(struct vnop_blktooff_args *); +int spec_offtoblk(struct vnop_offtoblk_args *); +int spec_fsync_internal(vnode_t, int, vfs_context_t); +int spec_blockmap(struct vnop_blockmap_args *); +int spec_kqfilter(vnode_t vp, struct knote *kn, struct kevent_internal_s *kev); #endif /* BSD_KERNEL_PRIVATE */ -int spec_ebadf(void *); +int spec_ebadf(void *); -int spec_lookup (struct vnop_lookup_args *); +int spec_lookup(struct vnop_lookup_args *); #define spec_create (int (*) (struct vnop_access_args *))err_create #define spec_mknod (int (*) (struct vnop_access_args *))err_mknod -int spec_open (struct vnop_open_args *); -int spec_close (struct vnop_close_args *); +int spec_open(struct vnop_open_args *); +int spec_close(struct vnop_close_args *); #define spec_access (int (*) (struct vnop_access_args *))spec_ebadf #define spec_getattr (int (*) (struct vnop_getattr_args *))spec_ebadf #define spec_setattr (int (*) (struct vnop_setattr_args *))spec_ebadf -int spec_read (struct vnop_read_args *); -int spec_write (struct vnop_write_args *); -int spec_ioctl (struct vnop_ioctl_args *); -int spec_select (struct vnop_select_args *); +int spec_read(struct vnop_read_args *); +int spec_write(struct vnop_write_args *); +int spec_ioctl(struct vnop_ioctl_args *); +int spec_select(struct vnop_select_args *); #define spec_revoke (int (*) (struct vnop_access_args *))nop_revoke #define spec_mmap (int (*) (struct vnop_access_args *))err_mmap -int spec_fsync (struct vnop_fsync_args *); +int spec_fsync(struct vnop_fsync_args *); #define spec_remove (int (*) (struct vnop_access_args *))err_remove #define spec_link (int (*) (struct vnop_access_args *))err_link #define spec_rename (int (*) (struct vnop_access_args *))err_rename @@ -167,9 +167,9 @@ int spec_fsync (struct vnop_fsync_args *); #define spec_reclaim (int (*) (struct vnop_access_args *))nop_reclaim #define spec_lock (int (*) (struct vnop_access_args *))nop_lock #define spec_unlock (int (*)(struct vnop_access_args *))nop_unlock -int spec_strategy (struct vnop_strategy_args *); +int spec_strategy(struct vnop_strategy_args *); #define spec_islocked (int (*) (struct vnop_access_args *))nop_islocked -int spec_pathconf (struct vnop_pathconf_args *); +int spec_pathconf(struct vnop_pathconf_args *); #define spec_advlock (int (*) (struct vnop_access_args *))err_advlock #define spec_blkatoff (int (*) (struct vnop_access_args *))err_blkatoff #define spec_valloc (int (*) (struct vnop_access_args *))err_valloc diff --git a/bsd/miscfs/union/union.h b/bsd/miscfs/union/union.h index eee3e5a87..8b29ce7f3 100644 --- a/bsd/miscfs/union/union.h +++ b/bsd/miscfs/union/union.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ diff --git a/bsd/net/altq/altq.h b/bsd/net/altq/altq.h index a3b18841f..c58ffaa81 100644 --- a/bsd/net/altq/altq.h +++ b/bsd/net/altq/altq.h @@ -55,18 +55,18 @@ * SUCH DAMAGE. */ #ifndef _ALTQ_ALTQ_H_ -#define _ALTQ_ALTQ_H_ +#define _ALTQ_ALTQ_H_ #ifdef PRIVATE #include /* altq discipline type */ -#define ALTQT_NONE PKTSCHEDT_NONE /* reserved */ -#define ALTQT_CBQ PKTSCHEDT_CBQ /* cbq */ -#define ALTQT_HFSC PKTSCHEDT_HFSC /* hfsc */ -#define ALTQT_PRIQ PKTSCHEDT_PRIQ /* priority queue */ -#define ALTQT_FAIRQ PKTSCHEDT_FAIRQ /* fairq */ -#define ALTQT_QFQ PKTSCHEDT_QFQ /* quick fair queueing */ -#define ALTQT_MAX PKTSCHEDT_MAX /* should be max disc type + 1 */ +#define ALTQT_NONE PKTSCHEDT_NONE /* reserved */ +#define ALTQT_CBQ PKTSCHEDT_CBQ /* cbq */ +#define ALTQT_HFSC PKTSCHEDT_HFSC /* hfsc */ +#define ALTQT_PRIQ PKTSCHEDT_PRIQ /* priority queue */ +#define ALTQT_FAIRQ PKTSCHEDT_FAIRQ /* fairq */ +#define ALTQT_QFQ PKTSCHEDT_QFQ /* quick fair queueing */ +#define ALTQT_MAX PKTSCHEDT_MAX /* should be max disc type + 1 */ #endif /* PRIVATE */ #endif /* _ALTQ_ALTQ_H_ */ diff --git a/bsd/net/altq/altq_cbq.h b/bsd/net/altq/altq_cbq.h index 3a47e8df0..572416059 100644 --- a/bsd/net/altq/altq_cbq.h +++ b/bsd/net/altq/altq_cbq.h @@ -60,7 +60,7 @@ */ #ifndef _NET_ALTQ_ALTQ_CBQ_H_ -#define _NET_ALTQ_ALTQ_CBQ_H_ +#define _NET_ALTQ_ALTQ_CBQ_H_ #include #include diff --git a/bsd/net/altq/altq_fairq.h b/bsd/net/altq/altq_fairq.h index f9c20940a..976242cf2 100644 --- a/bsd/net/altq/altq_fairq.h +++ b/bsd/net/altq/altq_fairq.h @@ -28,14 +28,14 @@ /* * Copyright (c) 2008 The DragonFly Project. All rights reserved. - * + * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright @@ -45,7 +45,7 @@ * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS @@ -58,12 +58,12 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * $DragonFly: src/sys/net/altq/altq_fairq.h,v 1.1 2008/04/06 18:58:15 dillon Exp $ */ #ifndef _NET_ALTQ_ALTQ_FAIRQ_H_ -#define _NET_ALTQ_ALTQ_FAIRQ_H_ +#define _NET_ALTQ_ALTQ_FAIRQ_H_ #include #include diff --git a/bsd/net/altq/altq_hfsc.h b/bsd/net/altq/altq_hfsc.h index 0addc4fea..80eef1d64 100644 --- a/bsd/net/altq/altq_hfsc.h +++ b/bsd/net/altq/altq_hfsc.h @@ -59,7 +59,7 @@ * changes without encumbrance. */ #ifndef _NET_ALTQ_ALTQ_HFSC_H_ -#define _NET_ALTQ_ALTQ_HFSC_H_ +#define _NET_ALTQ_ALTQ_HFSC_H_ #include #include diff --git a/bsd/net/altq/altq_priq.h b/bsd/net/altq/altq_priq.h index f1f92e939..76126897c 100644 --- a/bsd/net/altq/altq_priq.h +++ b/bsd/net/altq/altq_priq.h @@ -55,7 +55,7 @@ */ #ifndef _NET_ALTQ_ALTQ_PRIQ_H_ -#define _NET_ALTQ_ALTQ_PRIQ_H_ +#define _NET_ALTQ_ALTQ_PRIQ_H_ #include #include diff --git a/bsd/net/altq/altq_qfq.h b/bsd/net/altq/altq_qfq.h index 942fca369..9bc5de99a 100644 --- a/bsd/net/altq/altq_qfq.h +++ b/bsd/net/altq/altq_qfq.h @@ -27,7 +27,7 @@ */ #ifndef _NET_ALTQ_ALTQ_QFQ_H_ -#define _NET_ALTQ_ALTQ_QFQ_H_ +#define _NET_ALTQ_ALTQ_QFQ_H_ #include #include diff --git a/bsd/net/bpf.c b/bsd/net/bpf.c index 860b73848..52047280c 100644 --- a/bsd/net/bpf.c +++ b/bsd/net/bpf.c @@ -76,9 +76,9 @@ #include "bpf.h" #ifndef __GNUC__ -#define inline +#define inline #else -#define inline __inline +#define inline __inline #endif #include @@ -140,10 +140,10 @@ extern int tvtohz(struct timeval *); -#define BPF_BUFSIZE 4096 -#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) +#define BPF_BUFSIZE 4096 +#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) -#define PRINET 26 /* interruptible */ +#define PRINET 26 /* interruptible */ #define ISAKMP_HDR_SIZE (sizeof(struct isakmp) + sizeof(struct isakmp_gen)) #define ESP_HDR_SIZE sizeof(struct newesp) @@ -155,13 +155,13 @@ typedef void (*pktcopyfunc_t)(const void *, void *, size_t); */ static unsigned int bpf_bufsize = BPF_BUFSIZE; SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED, - &bpf_bufsize, 0, ""); + &bpf_bufsize, 0, ""); __private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE; SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW | CTLFLAG_LOCKED, - &bpf_maxbufsize, 0, ""); + &bpf_maxbufsize, 0, ""); static unsigned int bpf_maxdevices = 256; SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RW | CTLFLAG_LOCKED, - &bpf_maxdevices, 0, ""); + &bpf_maxdevices, 0, ""); /* * bpf_wantpktap controls the defaul visibility of DLT_PKTAP * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP @@ -173,17 +173,17 @@ static unsigned int bpf_wantpktap = 1; static unsigned int bpf_wantpktap = 0; #endif SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED, - &bpf_wantpktap, 0, ""); + &bpf_wantpktap, 0, ""); static int bpf_debug = 0; SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED, - &bpf_debug, 0, ""); + &bpf_debug, 0, ""); /* * bpf_iflist is the list of interfaces; each corresponds to an ifnet * bpf_dtab holds pointer to the descriptors, indexed by minor device # */ -static struct bpf_if *bpf_iflist; +static struct bpf_if *bpf_iflist; #ifdef __APPLE__ /* * BSD now stores the bpf_d in the dev_t which is a struct @@ -195,41 +195,41 @@ static struct bpf_if *bpf_iflist; * BPF_DEV_RESERVED: device opening or closing * other: device opened with pointer to storage */ -#define BPF_DEV_RESERVED ((struct bpf_d *)(uintptr_t)1) -static struct bpf_d **bpf_dtab = NULL; +#define BPF_DEV_RESERVED ((struct bpf_d *)(uintptr_t)1) +static struct bpf_d **bpf_dtab = NULL; static unsigned int bpf_dtab_size = 0; -static unsigned int nbpfilter = 0; +static unsigned int nbpfilter = 0; decl_lck_mtx_data(static, bpf_mlock_data); -static lck_mtx_t *bpf_mlock = &bpf_mlock_data; -static lck_grp_t *bpf_mlock_grp; -static lck_grp_attr_t *bpf_mlock_grp_attr; -static lck_attr_t *bpf_mlock_attr; +static lck_mtx_t *bpf_mlock = &bpf_mlock_data; +static lck_grp_t *bpf_mlock_grp; +static lck_grp_attr_t *bpf_mlock_grp_attr; +static lck_attr_t *bpf_mlock_attr; #endif /* __APPLE__ */ -static int bpf_allocbufs(struct bpf_d *); -static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp); -static int bpf_detachd(struct bpf_d *d, int); -static void bpf_freed(struct bpf_d *); -static int bpf_movein(struct uio *, int, - struct mbuf **, struct sockaddr *, int *); -static int bpf_setif(struct bpf_d *, ifnet_t ifp, bool, bool); -static void bpf_timed_out(void *, void *); -static void bpf_wakeup(struct bpf_d *); -static u_int get_pkt_trunc_len(u_char *, u_int); -static void catchpacket(struct bpf_d *, struct bpf_packet *, u_int, int); -static void reset_d(struct bpf_d *); -static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long); -static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *); -static int bpf_setdlt(struct bpf_d *, u_int); -static int bpf_set_traffic_class(struct bpf_d *, int); -static void bpf_set_packet_service_class(struct mbuf *, int); - -static void bpf_acquire_d(struct bpf_d *); -static void bpf_release_d(struct bpf_d *); - -static int bpf_devsw_installed; +static int bpf_allocbufs(struct bpf_d *); +static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp); +static int bpf_detachd(struct bpf_d *d, int); +static void bpf_freed(struct bpf_d *); +static int bpf_movein(struct uio *, int, + struct mbuf **, struct sockaddr *, int *); +static int bpf_setif(struct bpf_d *, ifnet_t ifp, bool, bool); +static void bpf_timed_out(void *, void *); +static void bpf_wakeup(struct bpf_d *); +static u_int get_pkt_trunc_len(u_char *, u_int); +static void catchpacket(struct bpf_d *, struct bpf_packet *, u_int, int); +static void reset_d(struct bpf_d *); +static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long); +static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *); +static int bpf_setdlt(struct bpf_d *, u_int); +static int bpf_set_traffic_class(struct bpf_d *, int); +static void bpf_set_packet_service_class(struct mbuf *, int); + +static void bpf_acquire_d(struct bpf_d *); +static void bpf_release_d(struct bpf_d *); + +static int bpf_devsw_installed; void bpf_init(void *unused); static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m); @@ -238,33 +238,33 @@ static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m); * Darwin differs from BSD here, the following are static * on BSD and not static on Darwin. */ - d_open_t bpfopen; - d_close_t bpfclose; - d_read_t bpfread; - d_write_t bpfwrite; - ioctl_fcn_t bpfioctl; - select_fcn_t bpfselect; +d_open_t bpfopen; +d_close_t bpfclose; +d_read_t bpfread; +d_write_t bpfwrite; +ioctl_fcn_t bpfioctl; +select_fcn_t bpfselect; /* Darwin's cdevsw struct differs slightly from BSDs */ -#define CDEV_MAJOR 23 +#define CDEV_MAJOR 23 static struct cdevsw bpf_cdevsw = { - /* open */ bpfopen, - /* close */ bpfclose, - /* read */ bpfread, - /* write */ bpfwrite, - /* ioctl */ bpfioctl, - /* stop */ eno_stop, - /* reset */ eno_reset, - /* tty */ NULL, - /* select */ bpfselect, - /* mmap */ eno_mmap, - /* strategy */ eno_strat, - /* getc */ eno_getc, - /* putc */ eno_putc, - /* type */ 0 + /* open */ bpfopen, + /* close */ bpfclose, + /* read */ bpfread, + /* write */ bpfwrite, + /* ioctl */ bpfioctl, + /* stop */ eno_stop, + /* reset */ eno_reset, + /* tty */ NULL, + /* select */ bpfselect, + /* mmap */ eno_mmap, + /* strategy */ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* type */ 0 }; -#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) +#define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data) static int bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, @@ -277,7 +277,6 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, int hlen; switch (linktype) { - #if SLIP case DLT_SLIP: sa_family = AF_INET; @@ -318,13 +317,13 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, * specified anyway. */ sa_family = AF_UNSPEC; - hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ + hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ break; #endif case DLT_PPP: sa_family = AF_UNSPEC; - hlen = 4; /* This should match PPP_HDRLEN */ + hlen = 4; /* This should match PPP_HDRLEN */ break; case DLT_APPLE_IP_OVER_IEEE1394: @@ -332,7 +331,7 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, hlen = sizeof(struct firewire_header); break; - case DLT_IEEE802_11: /* IEEE 802.11 wireless */ + case DLT_IEEE802_11: /* IEEE 802.11 wireless */ sa_family = AF_IEEE80211; hlen = 0; break; @@ -343,14 +342,15 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, break; default: - return (EIO); + return EIO; } // LP64todo - fix this! len = uio_resid(uio); *datlen = len - hlen; - if ((unsigned)len > MCLBYTES) - return (EIO); + if ((unsigned)len > MCLBYTES) { + return EIO; + } if (sockp) { /* @@ -363,7 +363,7 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, * for the link level header. */ if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) { - return (EIO); + return EIO; } sockp->sa_family = sa_family; } else { @@ -377,8 +377,9 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, } MGETHDR(m, M_WAIT, MT_DATA); - if (m == 0) - return (ENOBUFS); + if (m == 0) { + return ENOBUFS; + } if ((unsigned)len > MHLEN) { MCLGET(m, M_WAIT); if ((m->m_flags & M_EXT) == 0) { @@ -398,35 +399,37 @@ bpf_movein(struct uio *uio, int linktype, struct mbuf **mp, m->m_len -= hlen; m->m_data += hlen; /* XXX */ error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); - if (error) + if (error) { goto bad; + } } error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); - if (error) + if (error) { goto bad; + } /* Check for multicast destination */ switch (linktype) { - case DLT_EN10MB: { - struct ether_header *eh; - - eh = mtod(m, struct ether_header *); - if (ETHER_IS_MULTICAST(eh->ether_dhost)) { - if (_ether_cmp(etherbroadcastaddr, - eh->ether_dhost) == 0) { - m->m_flags |= M_BCAST; - } else { - m->m_flags |= M_MCAST; - } + case DLT_EN10MB: { + struct ether_header *eh; + + eh = mtod(m, struct ether_header *); + if (ETHER_IS_MULTICAST(eh->ether_dhost)) { + if (_ether_cmp(etherbroadcastaddr, + eh->ether_dhost) == 0) { + m->m_flags |= M_BCAST; + } else { + m->m_flags |= M_MCAST; } - break; } + break; + } } - return (0); + return 0; bad: m_freem(m); - return (error); + return error; } #ifdef __APPLE__ @@ -439,11 +442,12 @@ bad: static void bpf_make_dev_t(int maj) { - static int bpf_growing = 0; - unsigned int cur_size = nbpfilter, i; + static int bpf_growing = 0; + unsigned int cur_size = nbpfilter, i; - if (nbpfilter >= bpf_maxdevices) + if (nbpfilter >= bpf_maxdevices) { return; + } while (bpf_growing) { /* Wait until new device has been created */ @@ -463,7 +467,7 @@ bpf_make_dev_t(int maj) new_dtab_size = bpf_dtab_size + NBPFILTER; new_dtab = (struct bpf_d **)_MALLOC( - sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT); + sizeof(struct bpf_d *) * new_dtab_size, M_DEVBUF, M_WAIT); if (new_dtab == 0) { printf("bpf_make_dev_t: malloc bpf_dtab failed\n"); goto done; @@ -477,13 +481,14 @@ bpf_make_dev_t(int maj) old_dtab = bpf_dtab; bpf_dtab = new_dtab; bpf_dtab_size = new_dtab_size; - if (old_dtab != NULL) + if (old_dtab != NULL) { _FREE(old_dtab, M_DEVBUF); + } } i = nbpfilter++; (void) devfs_make_node(makedev(maj, i), - DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, - "bpf%d", i); + DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, + "bpf%d", i); done: bpf_growing = 0; wakeup((caddr_t)&bpf_growing); @@ -498,7 +503,7 @@ static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp) { int first = bp->bif_dlist == NULL; - int error = 0; + int error = 0; /* * Point d at bp, and add d to the interface's list of listeners. @@ -518,7 +523,7 @@ bpf_attachd(struct bpf_d *d, struct bpf_if *bp) if (first) { /* Find the default bpf entry for this ifp */ if (bp->bif_ifp->if_bpf == NULL) { - struct bpf_if *tmp, *primary = NULL; + struct bpf_if *tmp, *primary = NULL; for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) { if (tmp->bif_ifp == bp->bif_ifp) { @@ -529,13 +534,15 @@ bpf_attachd(struct bpf_d *d, struct bpf_if *bp) bp->bif_ifp->if_bpf = primary; } /* Only call dlil_set_bpf_tap for primary dlt */ - if (bp->bif_ifp->if_bpf == bp) + if (bp->bif_ifp->if_bpf == bp) { dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback); + } - if (bp->bif_tap != NULL) + if (bp->bif_tap != NULL) { error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, BPF_TAP_INPUT_OUTPUT); + } } /* @@ -548,7 +555,7 @@ bpf_attachd(struct bpf_d *d, struct bpf_if *bp) } else { d->bd_flags &= ~BPF_FINALIZE_PKTAP; } - return (error); + return error; } /* @@ -567,8 +574,9 @@ bpf_detachd(struct bpf_d *d, int closing) /* * Some other thread already detached */ - if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0) + if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0) { goto done; + } /* * This thread is doing the detach */ @@ -577,17 +585,19 @@ bpf_detachd(struct bpf_d *d, int closing) ifp = d->bd_bif->bif_ifp; bp = d->bd_bif; - if (bpf_debug != 0) + if (bpf_debug != 0) { printf("%s: %llx %s%s\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(d), if_name(ifp), closing ? " closing" : ""); + } /* Remove d from the interface's descriptor list. */ p = &bp->bif_dlist; while (*p != d) { p = &(*p)->bd_next; - if (*p == 0) + if (*p == 0) { panic("bpf_detachd: descriptor not in list"); + } } *p = (*p)->bd_next; if (bp->bif_dlist == 0) { @@ -595,16 +605,21 @@ bpf_detachd(struct bpf_d *d, int closing) * Let the driver know that there are no more listeners. */ /* Only call dlil_set_bpf_tap for primary dlt */ - if (bp->bif_ifp->if_bpf == bp) + if (bp->bif_ifp->if_bpf == bp) { dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL); - if (bp->bif_tap) + } + if (bp->bif_tap) { bp->bif_tap(ifp, bp->bif_dlt, BPF_TAP_DISABLE); + } - for (bp = bpf_iflist; bp; bp = bp->bif_next) - if (bp->bif_ifp == ifp && bp->bif_dlist != 0) + for (bp = bpf_iflist; bp; bp = bp->bif_next) { + if (bp->bif_ifp == ifp && bp->bif_dlist != 0) { break; - if (bp == NULL) + } + } + if (bp == NULL) { ifp->if_bpf = NULL; + } } d->bd_bif = NULL; /* @@ -645,16 +660,18 @@ done: /* * When closing makes sure no other thread refer to the bpf_d */ - if (bpf_debug != 0) + if (bpf_debug != 0) { printf("%s: %llx done\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); + } /* * Let the caller know the bpf_d is closed */ - if (bpf_closed) - return (1); - else - return (0); + if (bpf_closed) { + return 1; + } else { + return 0; + } } /* @@ -672,8 +689,8 @@ bpf_start_timer(struct bpf_d *d) tv.tv_usec = (d->bd_rtout % hz) * tick; clock_interval_to_deadline( - (uint64_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec, - NSEC_PER_USEC, &deadline); + (uint64_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec, + NSEC_PER_USEC, &deadline); /* * The state is BPF_IDLE, so the timer hasn't * been started yet, and hasn't gone off yet; @@ -705,7 +722,7 @@ bpf_stop_timer(struct bpf_d *d) * find the state is BPF_IDLE, and just release the * lock and return. */ - return (thread_call_cancel(d->bd_thread_call)); + return thread_call_cancel(d->bd_thread_call); } void @@ -728,8 +745,9 @@ bpf_release_d(struct bpf_d *d) LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED); - if (d->bd_refcnt <= 0) + if (d->bd_refcnt <= 0) { panic("%s: %p refcnt <= 0", __func__, d); + } d->bd_refcnt -= 1; @@ -738,8 +756,9 @@ bpf_release_d(struct bpf_d *d) if (d->bd_refcnt == 0) { /* Assert the device is detached */ - if ((d->bd_flags & BPF_DETACHED) == 0) + if ((d->bd_flags & BPF_DETACHED) == 0) { panic("%s: %p BPF_DETACHED not set", __func__, d); + } _FREE(d, M_DEVBUF); } @@ -752,14 +771,14 @@ bpf_release_d(struct bpf_d *d) /* ARGSUSED */ int bpfopen(dev_t dev, int flags, __unused int fmt, - struct proc *p) + struct proc *p) { struct bpf_d *d; lck_mtx_lock(bpf_mlock); if ((unsigned int) minor(dev) >= nbpfilter) { lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } /* * New device nodes are created on demand when opening the last one. @@ -770,8 +789,9 @@ bpfopen(dev_t dev, int flags, __unused int fmt, * last node. If not all processes are blocked, they could unexpectedly * get ENOENT and abort their opening loop. */ - if ((unsigned int) minor(dev) == (nbpfilter - 1)) + if ((unsigned int) minor(dev) == (nbpfilter - 1)) { bpf_make_dev_t(major(dev)); + } /* * Each minor can be opened by only one process. If the requested @@ -787,7 +807,7 @@ bpfopen(dev_t dev, int flags, __unused int fmt, bpf_dtab[minor(dev)] = BPF_DEV_RESERVED; } else { lck_mtx_unlock(bpf_mlock); - return (EBUSY); + return EBUSY; } d = (struct bpf_d *)_MALLOC(sizeof(struct bpf_d), M_DEVBUF, M_WAIT | M_ZERO); @@ -796,7 +816,7 @@ bpfopen(dev_t dev, int flags, __unused int fmt, printf("bpfopen: malloc bpf_d failed\n"); bpf_dtab[minor(dev)] = NULL; lck_mtx_unlock(bpf_mlock); - return (ENOMEM); + return ENOMEM; } /* Mark "in use" and do most initialization. */ @@ -808,10 +828,11 @@ bpfopen(dev_t dev, int flags, __unused int fmt, d->bd_state = BPF_IDLE; d->bd_traffic_class = SO_TC_BE; d->bd_flags |= BPF_DETACHED; - if (bpf_wantpktap) + if (bpf_wantpktap) { d->bd_flags |= BPF_WANT_PKTAP; - else + } else { d->bd_flags &= ~BPF_WANT_PKTAP; + } d->bd_thread_call = thread_call_allocate(bpf_timed_out, d); if (d->bd_thread_call == NULL) { printf("bpfopen: malloc thread call failed\n"); @@ -819,7 +840,7 @@ bpfopen(dev_t dev, int flags, __unused int fmt, bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENOMEM); + return ENOMEM; } d->bd_opened_by = p; uuid_generate(d->bd_uuid); @@ -831,7 +852,7 @@ bpfopen(dev_t dev, int flags, __unused int fmt, bpf_dtab[minor(dev)] = d; /* Mark opened */ lck_mtx_unlock(bpf_mlock); - return (0); + return 0; } /* @@ -851,7 +872,7 @@ bpfclose(dev_t dev, __unused int flags, __unused int fmt, d = bpf_dtab[minor(dev)]; if (d == NULL || d == BPF_DEV_RESERVED) { lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } /* @@ -859,9 +880,10 @@ bpfclose(dev_t dev, __unused int flags, __unused int fmt, */ d->bd_flags |= BPF_CLOSING; - if (bpf_debug != 0) + if (bpf_debug != 0) { printf("%s: %llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); + } bpf_dtab[minor(dev)] = BPF_DEV_RESERVED; /* Reserve while closing */ @@ -869,90 +891,94 @@ bpfclose(dev_t dev, __unused int flags, __unused int fmt, * Deal with any in-progress timeouts. */ switch (d->bd_state) { - case BPF_IDLE: - /* - * Not waiting for a timeout, and no timeout happened. - */ - break; + case BPF_IDLE: + /* + * Not waiting for a timeout, and no timeout happened. + */ + break; - case BPF_WAITING: + case BPF_WAITING: + /* + * Waiting for a timeout. + * Cancel any timer that has yet to go off, + * and mark the state as "closing". + * Then drop the lock to allow any timers that + * *have* gone off to run to completion, and wait + * for them to finish. + */ + if (!bpf_stop_timer(d)) { /* - * Waiting for a timeout. - * Cancel any timer that has yet to go off, - * and mark the state as "closing". - * Then drop the lock to allow any timers that - * *have* gone off to run to completion, and wait - * for them to finish. + * There was no pending call, so the call must + * have been in progress. Wait for the call to + * complete; we have to drop the lock while + * waiting. to let the in-progrss call complete */ - if (!bpf_stop_timer(d)) { - /* - * There was no pending call, so the call must - * have been in progress. Wait for the call to - * complete; we have to drop the lock while - * waiting. to let the in-progrss call complete - */ - d->bd_state = BPF_DRAINING; - while (d->bd_state == BPF_DRAINING) - msleep((caddr_t)d, bpf_mlock, PRINET, - "bpfdraining", NULL); + d->bd_state = BPF_DRAINING; + while (d->bd_state == BPF_DRAINING) { + msleep((caddr_t)d, bpf_mlock, PRINET, + "bpfdraining", NULL); } - d->bd_state = BPF_IDLE; - break; + } + d->bd_state = BPF_IDLE; + break; - case BPF_TIMED_OUT: - /* - * Timer went off, and the timeout routine finished. - */ - d->bd_state = BPF_IDLE; - break; + case BPF_TIMED_OUT: + /* + * Timer went off, and the timeout routine finished. + */ + d->bd_state = BPF_IDLE; + break; - case BPF_DRAINING: - /* - * Another thread is blocked on a close waiting for - * a timeout to finish. - * This "shouldn't happen", as the first thread to enter - * bpfclose() will set bpf_dtab[minor(dev)] to 1, and - * all subsequent threads should see that and fail with - * ENXIO. - */ - panic("Two threads blocked in a BPF close"); - break; + case BPF_DRAINING: + /* + * Another thread is blocked on a close waiting for + * a timeout to finish. + * This "shouldn't happen", as the first thread to enter + * bpfclose() will set bpf_dtab[minor(dev)] to 1, and + * all subsequent threads should see that and fail with + * ENXIO. + */ + panic("Two threads blocked in a BPF close"); + break; } - if (d->bd_bif) + if (d->bd_bif) { bpf_detachd(d, 1); + } selthreadclear(&d->bd_sel); #if CONFIG_MACF_NET mac_bpfdesc_label_destroy(d); #endif thread_call_free(d->bd_thread_call); - while (d->bd_hbuf_read != 0) + while (d->bd_hbuf_read != 0) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } bpf_freed(d); /* Mark free in same context as bpfopen comes to check */ - bpf_dtab[minor(dev)] = NULL; /* Mark closed */ + bpf_dtab[minor(dev)] = NULL; /* Mark closed */ bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (0); + return 0; } -#define BPF_SLEEP bpf_sleep +#define BPF_SLEEP bpf_sleep static int bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo) { u_int64_t abstime = 0; - if (timo != 0) + if (timo != 0) { clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime); + } - return (msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime)); + return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime); } static void @@ -963,11 +989,13 @@ bpf_finalize_pktap(struct bpf_hdr *hp, struct pktap_header *pktaphdr) pktap_v2_hdr = (struct pktap_v2_hdr *)pktaphdr; - if (pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP) + if (pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP) { pktap_v2_finalize_proc_info(pktap_v2_hdr); + } } else { - if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) + if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) { pktap_finalize_proc_info(pktaphdr); + } if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) { hp->bh_tstamp.tv_sec = pktaphdr->pth_tstamp.tv_sec; @@ -981,9 +1009,9 @@ bpf_finalize_pktap(struct bpf_hdr *hp, struct pktap_header *pktaphdr) * into the hold slot, and the free buffer into the store slot. * Zero the length of the new store buffer. */ -#define ROTATE_BUFFERS(d) \ +#define ROTATE_BUFFERS(d) \ if (d->bd_hbuf_read != 0) \ - panic("rotating bpf buffers during read"); \ + panic("rotating bpf buffers during read"); \ (d)->bd_hbuf = (d)->bd_sbuf; \ (d)->bd_hlen = (d)->bd_slen; \ (d)->bd_hcnt = (d)->bd_scnt; \ @@ -1009,7 +1037,7 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) if (d == NULL || d == BPF_DEV_RESERVED || (d->bd_flags & BPF_CLOSING) != 0) { lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } bpf_acquire_d(d); @@ -1021,22 +1049,24 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) if (uio_resid(uio) != d->bd_bufsize) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (EINVAL); + return EINVAL; } - if (d->bd_state == BPF_WAITING) + if (d->bd_state == BPF_WAITING) { bpf_stop_timer(d); + } timed_out = (d->bd_state == BPF_TIMED_OUT); d->bd_state = BPF_IDLE; - while (d->bd_hbuf_read != 0) + while (d->bd_hbuf_read != 0) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } if ((d->bd_flags & BPF_CLOSING) != 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } /* * If the hold buffer is empty, then do a timed sleep, which @@ -1068,31 +1098,32 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) if (d->bd_bif == NULL) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } if (ioflag & IO_NDELAY) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (EWOULDBLOCK); + return EWOULDBLOCK; } - error = BPF_SLEEP(d, PRINET|PCATCH, "bpf", d->bd_rtout); + error = BPF_SLEEP(d, PRINET | PCATCH, "bpf", d->bd_rtout); /* * Make sure device is still opened */ if ((d->bd_flags & BPF_CLOSING) != 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } - while (d->bd_hbuf_read != 0) + while (d->bd_hbuf_read != 0) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } if ((d->bd_flags & BPF_CLOSING) != 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } if (error == EINTR || error == ERESTART) { @@ -1122,7 +1153,7 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) __func__, (uint64_t)VM_KERNEL_ADDRPERM(d)); error = EINTR; } - return (error); + return error; } if (error == EWOULDBLOCK) { /* @@ -1130,18 +1161,19 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) * which may be nothing. If there is something * in the store buffer, we can rotate the buffers. */ - if (d->bd_hbuf) + if (d->bd_hbuf) { /* * We filled up the buffer in between * getting the timeout and arriving * here, so we don't need to rotate. */ break; + } if (d->bd_slen == 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (0); + return 0; } ROTATE_BUFFERS(d); break; @@ -1180,12 +1212,13 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) ehp = (struct bpf_hdr_ext *)(void *)p; if ((flowid = ehp->bh_flowid) != 0) { - if (ehp->bh_proto == IPPROTO_TCP) + if (ehp->bh_proto == IPPROTO_TCP) { found = inp_findinpcb_procinfo(&tcbinfo, flowid, &soprocinfo); - else if (ehp->bh_proto == IPPROTO_UDP) + } else if (ehp->bh_proto == IPPROTO_UDP) { found = inp_findinpcb_procinfo(&udbinfo, flowid, &soprocinfo); + } if (found == 1) { ehp->bh_pid = soprocinfo.spi_pid; proc_name(ehp->bh_pid, ehp->bh_comm, @@ -1238,7 +1271,7 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) if ((d->bd_flags & BPF_CLOSING) != 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } d->bd_hbuf_read = 0; @@ -1250,8 +1283,7 @@ bpfread(dev_t dev, struct uio *uio, int ioflag) bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (error); - + return error; } /* @@ -1265,12 +1297,14 @@ bpf_wakeup(struct bpf_d *d) d->bd_state = BPF_IDLE; } wakeup((caddr_t)d); - if (d->bd_async && d->bd_sig && d->bd_sigio) + if (d->bd_async && d->bd_sig && d->bd_sigio) { pgsigio(d->bd_sigio, d->bd_sig); + } selwakeup(&d->bd_sel); - if ((d->bd_flags & BPF_KNOTE)) + if ((d->bd_flags & BPF_KNOTE)) { KNOTE(&d->bd_sel.si_note, 1); + } } static void @@ -1285,8 +1319,9 @@ bpf_timed_out(void *arg, __unused void *dummy) * now stuff to read, wake it up. */ d->bd_state = BPF_TIMED_OUT; - if (d->bd_slen != 0) + if (d->bd_slen != 0) { bpf_wakeup(d); + } } else if (d->bd_state == BPF_DRAINING) { /* * A close is waiting for this to finish. @@ -1299,7 +1334,7 @@ bpf_timed_out(void *arg, __unused void *dummy) } /* keep in sync with bpf_movein above: */ -#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) +#define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header)) int bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) @@ -1308,7 +1343,7 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) struct ifnet *ifp; struct mbuf *m = NULL; int error; - char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; + char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN]; int datlen = 0; int bif_dlt; int bd_hdrcmplt; @@ -1319,7 +1354,7 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) if (d == NULL || d == BPF_DEV_RESERVED || (d->bd_flags & BPF_CLOSING) != 0) { lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } bpf_acquire_d(d); @@ -1327,7 +1362,7 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) if (d->bd_bif == 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } ifp = d->bd_bif->bif_ifp; @@ -1335,12 +1370,12 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) if ((ifp->if_flags & IFF_UP) == 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENETDOWN); + return ENETDOWN; } if (uio_resid(uio) == 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (0); + return 0; } ((struct sockaddr *)dst_buf)->sa_len = sizeof(dst_buf); @@ -1355,15 +1390,15 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) lck_mtx_unlock(bpf_mlock); error = bpf_movein(uio, bif_dlt, &m, - bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf, - &datlen); + bd_hdrcmplt ? NULL : (struct sockaddr *)dst_buf, + &datlen); /* take the lock again */ lck_mtx_lock(bpf_mlock); if (error) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (error); + return error; } /* verify the device is still open */ @@ -1371,21 +1406,21 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) bpf_release_d(d); lck_mtx_unlock(bpf_mlock); m_freem(m); - return (ENXIO); + return ENXIO; } if (d->bd_bif == NULL) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); m_free(m); - return (ENXIO); + return ENXIO; } if ((unsigned)datlen > ifp->if_mtu) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); m_freem(m); - return (EMSGSIZE); + return EMSGSIZE; } #if CONFIG_MACF_NET @@ -1400,10 +1435,11 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) * The driver frees the mbuf. */ if (d->bd_hdrcmplt) { - if (d->bd_bif->bif_send) + if (d->bd_bif->bif_send) { error = d->bd_bif->bif_send(ifp, d->bd_bif->bif_dlt, m); - else + } else { error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL); + } } else { error = dlil_output(ifp, PF_INET, m, NULL, (struct sockaddr *)dst_buf, 0, NULL); @@ -1413,7 +1449,7 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (error); + return error; } /* @@ -1423,8 +1459,9 @@ bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag) static void reset_d(struct bpf_d *d) { - if (d->bd_hbuf_read != 0) + if (d->bd_hbuf_read != 0) { panic("resetting buffers during read"); + } if (d->bd_hbuf) { /* Free the hold buffer. */ @@ -1448,13 +1485,15 @@ bpf_get_device_from_uuid(uuid_t uuid) struct bpf_d *d = bpf_dtab[i]; if (d == NULL || d == BPF_DEV_RESERVED || - (d->bd_flags & BPF_CLOSING) != 0) + (d->bd_flags & BPF_CLOSING) != 0) { continue; - if (uuid_compare(uuid, d->bd_uuid) == 0) - return (d); + } + if (uuid_compare(uuid, d->bd_uuid) == 0) { + return d; + } } - return (NULL); + return NULL; } /* @@ -1480,25 +1519,27 @@ bpf_setup(struct bpf_d *d_to, uuid_t uuid_from, ifnet_t ifp) os_log_info(OS_LOG_DEFAULT, "%s: uuids not found error %d", __func__, error); - return (error); + return error; } if (d_from->bd_opened_by != d_to->bd_opened_by) { error = EACCES; os_log_info(OS_LOG_DEFAULT, "%s: processes not matching error %d", __func__, error); - return (error); + return error; } /* * Prevent any read while copying */ - while (d_to->bd_hbuf_read != 0) + while (d_to->bd_hbuf_read != 0) { msleep((caddr_t)d_to, bpf_mlock, PRINET, __func__, NULL); + } d_to->bd_hbuf_read = 1; - while (d_from->bd_hbuf_read != 0) + while (d_from->bd_hbuf_read != 0) { msleep((caddr_t)d_from, bpf_mlock, PRINET, __func__, NULL); + } d_from->bd_hbuf_read = 1; /* @@ -1576,7 +1617,7 @@ done: d_to->bd_hbuf_read = 0; wakeup((caddr_t)d_to); - return (error); + return error; } /* @@ -1620,17 +1661,17 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, if (d == NULL || d == BPF_DEV_RESERVED || (d->bd_flags & BPF_CLOSING) != 0) { lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } bpf_acquire_d(d); - if (d->bd_state == BPF_WAITING) + if (d->bd_state == BPF_WAITING) { bpf_stop_timer(d); + } d->bd_state = BPF_IDLE; switch (cmd) { - default: error = EINVAL; break; @@ -1638,42 +1679,43 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, /* * Check for read packet available. */ - case FIONREAD: /* int */ - { - int n; + case FIONREAD: /* int */ + { + int n; - n = d->bd_slen; - if (d->bd_hbuf && d->bd_hbuf_read == 0) - n += d->bd_hlen; - - bcopy(&n, addr, sizeof (n)); - break; + n = d->bd_slen; + if (d->bd_hbuf && d->bd_hbuf_read == 0) { + n += d->bd_hlen; } - case SIOCGIFADDR: /* struct ifreq */ - { - struct ifnet *ifp; + bcopy(&n, addr, sizeof(n)); + break; + } - if (d->bd_bif == 0) - error = EINVAL; - else { - ifp = d->bd_bif->bif_ifp; - error = ifnet_ioctl(ifp, 0, cmd, addr); - } - break; + case SIOCGIFADDR: /* struct ifreq */ + { + struct ifnet *ifp; + + if (d->bd_bif == 0) { + error = EINVAL; + } else { + ifp = d->bd_bif->bif_ifp; + error = ifnet_ioctl(ifp, 0, cmd, addr); } + break; + } /* * Get buffer len [for read()]. */ - case BIOCGBLEN: /* u_int */ - bcopy(&d->bd_bufsize, addr, sizeof (u_int)); + case BIOCGBLEN: /* u_int */ + bcopy(&d->bd_bufsize, addr, sizeof(u_int)); break; /* * Set buffer length. */ - case BIOCSBLEN: { /* u_int */ + case BIOCSBLEN: { /* u_int */ u_int size; unsigned int maxbufsize = bpf_maxbufsize; @@ -1693,7 +1735,7 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, error = EINVAL; break; } - bcopy(addr, &size, sizeof (size)); + bcopy(addr, &size, sizeof(size)); if (size > maxbufsize) { d->bd_bufsize = maxbufsize; @@ -1712,27 +1754,27 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, } /* It's a read/write ioctl */ - bcopy(&d->bd_bufsize, addr, sizeof (u_int)); + bcopy(&d->bd_bufsize, addr, sizeof(u_int)); break; } /* * Set link layer read filter. */ case BIOCSETF32: - case BIOCSETFNR32: { /* struct bpf_program32 */ + case BIOCSETFNR32: { /* struct bpf_program32 */ struct bpf_program32 prg32; - bcopy(addr, &prg32, sizeof (prg32)); + bcopy(addr, &prg32, sizeof(prg32)); error = bpf_setf(d, prg32.bf_len, CAST_USER_ADDR_T(prg32.bf_insns), cmd); break; } case BIOCSETF64: - case BIOCSETFNR64: { /* struct bpf_program64 */ + case BIOCSETFNR64: { /* struct bpf_program64 */ struct bpf_program64 prg64; - bcopy(addr, &prg64, sizeof (prg64)); + bcopy(addr, &prg64, sizeof(prg64)); error = bpf_setf(d, prg64.bf_len, prg64.bf_insns, cmd); break; } @@ -1767,25 +1809,27 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, lck_mtx_unlock(bpf_mlock); error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1); lck_mtx_lock(bpf_mlock); - if (error == 0) + if (error == 0) { d->bd_promisc = 1; + } } break; /* * Get device parameters. */ - case BIOCGDLT: /* u_int */ - if (d->bd_bif == 0) + case BIOCGDLT: /* u_int */ + if (d->bd_bif == 0) { error = EINVAL; - else - bcopy(&d->bd_bif->bif_dlt, addr, sizeof (u_int)); + } else { + bcopy(&d->bd_bif->bif_dlt, addr, sizeof(u_int)); + } break; /* * Get a list of supported data link types. */ - case BIOCGDLTLIST: /* struct bpf_dltlist */ + case BIOCGDLTLIST: /* struct bpf_dltlist */ if (d->bd_bif == NULL) { error = EINVAL; } else { @@ -1796,13 +1840,13 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, /* * Set data link type. */ - case BIOCSDLT: /* u_int */ + case BIOCSDLT: /* u_int */ if (d->bd_bif == NULL) { error = EINVAL; } else { u_int dlt; - bcopy(addr, &dlt, sizeof (dlt)); + bcopy(addr, &dlt, sizeof(dlt)); if (dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) { @@ -1815,41 +1859,42 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, /* * Get interface name. */ - case BIOCGETIF: /* struct ifreq */ - if (d->bd_bif == 0) + case BIOCGETIF: /* struct ifreq */ + if (d->bd_bif == 0) { error = EINVAL; - else { + } else { struct ifnet *const ifp = d->bd_bif->bif_ifp; snprintf(((struct ifreq *)(void *)addr)->ifr_name, - sizeof (ifr.ifr_name), "%s", if_name(ifp)); + sizeof(ifr.ifr_name), "%s", if_name(ifp)); } break; /* * Set interface. */ - case BIOCSETIF: { /* struct ifreq */ - ifnet_t ifp; + case BIOCSETIF: { /* struct ifreq */ + ifnet_t ifp; - bcopy(addr, &ifr, sizeof (ifr)); + bcopy(addr, &ifr, sizeof(ifr)); ifr.ifr_name[IFNAMSIZ - 1] = '\0'; ifp = ifunit(ifr.ifr_name); - if (ifp == NULL) + if (ifp == NULL) { error = ENXIO; - else + } else { error = bpf_setif(d, ifp, true, false); + } break; } /* * Set read timeout. */ - case BIOCSRTIMEOUT32: { /* struct user32_timeval */ + case BIOCSRTIMEOUT32: { /* struct user32_timeval */ struct user32_timeval _tv; struct timeval tv; - bcopy(addr, &_tv, sizeof (_tv)); + bcopy(addr, &_tv, sizeof(_tv)); tv.tv_sec = _tv.tv_sec; tv.tv_usec = _tv.tv_usec; @@ -1857,16 +1902,17 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, * Subtract 1 tick from tvtohz() since this isn't * a one-shot timer. */ - if ((error = itimerfix(&tv)) == 0) + if ((error = itimerfix(&tv)) == 0) { d->bd_rtout = tvtohz(&tv) - 1; + } break; } - case BIOCSRTIMEOUT64: { /* struct user64_timeval */ + case BIOCSRTIMEOUT64: { /* struct user64_timeval */ struct user64_timeval _tv; struct timeval tv; - bcopy(addr, &_tv, sizeof (_tv)); + bcopy(addr, &_tv, sizeof(_tv)); tv.tv_sec = _tv.tv_sec; tv.tv_usec = _tv.tv_usec; @@ -1874,100 +1920,101 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, * Subtract 1 tick from tvtohz() since this isn't * a one-shot timer. */ - if ((error = itimerfix(&tv)) == 0) + if ((error = itimerfix(&tv)) == 0) { d->bd_rtout = tvtohz(&tv) - 1; + } break; } /* * Get read timeout. */ - case BIOCGRTIMEOUT32: { /* struct user32_timeval */ + case BIOCGRTIMEOUT32: { /* struct user32_timeval */ struct user32_timeval tv; - bzero(&tv, sizeof (tv)); + bzero(&tv, sizeof(tv)); tv.tv_sec = d->bd_rtout / hz; tv.tv_usec = (d->bd_rtout % hz) * tick; - bcopy(&tv, addr, sizeof (tv)); + bcopy(&tv, addr, sizeof(tv)); break; } - case BIOCGRTIMEOUT64: { /* struct user64_timeval */ + case BIOCGRTIMEOUT64: { /* struct user64_timeval */ struct user64_timeval tv; - bzero(&tv, sizeof (tv)); + bzero(&tv, sizeof(tv)); tv.tv_sec = d->bd_rtout / hz; tv.tv_usec = (d->bd_rtout % hz) * tick; - bcopy(&tv, addr, sizeof (tv)); + bcopy(&tv, addr, sizeof(tv)); break; } /* * Get packet stats. */ - case BIOCGSTATS: { /* struct bpf_stat */ + case BIOCGSTATS: { /* struct bpf_stat */ struct bpf_stat bs; - bzero(&bs, sizeof (bs)); + bzero(&bs, sizeof(bs)); bs.bs_recv = d->bd_rcount; bs.bs_drop = d->bd_dcount; - bcopy(&bs, addr, sizeof (bs)); + bcopy(&bs, addr, sizeof(bs)); break; } /* * Set immediate mode. */ - case BIOCIMMEDIATE: /* u_int */ + case BIOCIMMEDIATE: /* u_int */ d->bd_immediate = *(u_int *)(void *)addr; break; - case BIOCVERSION: { /* struct bpf_version */ + case BIOCVERSION: { /* struct bpf_version */ struct bpf_version bv; - bzero(&bv, sizeof (bv)); + bzero(&bv, sizeof(bv)); bv.bv_major = BPF_MAJOR_VERSION; bv.bv_minor = BPF_MINOR_VERSION; - bcopy(&bv, addr, sizeof (bv)); + bcopy(&bv, addr, sizeof(bv)); break; } /* * Get "header already complete" flag */ - case BIOCGHDRCMPLT: /* u_int */ - bcopy(&d->bd_hdrcmplt, addr, sizeof (u_int)); + case BIOCGHDRCMPLT: /* u_int */ + bcopy(&d->bd_hdrcmplt, addr, sizeof(u_int)); break; /* * Set "header already complete" flag */ - case BIOCSHDRCMPLT: /* u_int */ - bcopy(addr, &int_arg, sizeof (int_arg)); + case BIOCSHDRCMPLT: /* u_int */ + bcopy(addr, &int_arg, sizeof(int_arg)); d->bd_hdrcmplt = int_arg ? 1 : 0; break; /* * Get "see sent packets" flag */ - case BIOCGSEESENT: /* u_int */ - bcopy(&d->bd_seesent, addr, sizeof (u_int)); + case BIOCGSEESENT: /* u_int */ + bcopy(&d->bd_seesent, addr, sizeof(u_int)); break; /* * Set "see sent packets" flag */ - case BIOCSSEESENT: /* u_int */ - bcopy(addr, &d->bd_seesent, sizeof (u_int)); + case BIOCSSEESENT: /* u_int */ + bcopy(addr, &d->bd_seesent, sizeof(u_int)); break; /* * Set traffic service class */ - case BIOCSETTC: { /* int */ + case BIOCSETTC: { /* int */ int tc; - bcopy(addr, &tc, sizeof (int)); + bcopy(addr, &tc, sizeof(int)); error = bpf_set_traffic_class(d, tc); break; } @@ -1975,15 +2022,15 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, /* * Get traffic service class */ - case BIOCGETTC: /* int */ - bcopy(&d->bd_traffic_class, addr, sizeof (int)); + case BIOCGETTC: /* int */ + bcopy(&d->bd_traffic_class, addr, sizeof(int)); break; - case FIONBIO: /* Non-blocking I/O; int */ + case FIONBIO: /* Non-blocking I/O; int */ break; - case FIOASYNC: /* Send signal on receive packets; int */ - bcopy(addr, &d->bd_async, sizeof (int)); + case FIOASYNC: /* Send signal on receive packets; int */ + bcopy(addr, &d->bd_async, sizeof(int)); break; #ifndef __APPLE__ case FIOSETOWN: @@ -2004,34 +2051,36 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, *(int *)addr = -fgetown(d->bd_sigio); break; #endif - case BIOCSRSIG: { /* Set receive signal; u_int */ + case BIOCSRSIG: { /* Set receive signal; u_int */ u_int sig; - bcopy(addr, &sig, sizeof (u_int)); + bcopy(addr, &sig, sizeof(u_int)); - if (sig >= NSIG) + if (sig >= NSIG) { error = EINVAL; - else + } else { d->bd_sig = sig; + } break; } - case BIOCGRSIG: /* u_int */ - bcopy(&d->bd_sig, addr, sizeof (u_int)); + case BIOCGRSIG: /* u_int */ + bcopy(&d->bd_sig, addr, sizeof(u_int)); break; #ifdef __APPLE__ - case BIOCSEXTHDR: /* u_int */ - bcopy(addr, &int_arg, sizeof (int_arg)); - if (int_arg) + case BIOCSEXTHDR: /* u_int */ + bcopy(addr, &int_arg, sizeof(int_arg)); + if (int_arg) { d->bd_flags |= BPF_EXTENDED_HDR; - else + } else { d->bd_flags &= ~BPF_EXTENDED_HDR; + } break; - case BIOCGIFATTACHCOUNT: { /* struct ifreq */ - ifnet_t ifp; + case BIOCGIFATTACHCOUNT: { /* struct ifreq */ + ifnet_t ifp; struct bpf_if *bp; - bcopy(addr, &ifr, sizeof (ifr)); + bcopy(addr, &ifr, sizeof(ifr)); ifr.ifr_name[IFNAMSIZ - 1] = '\0'; ifp = ifunit(ifr.ifr_name); if (ifp == NULL) { @@ -2042,56 +2091,59 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { struct bpf_d *bpf_d; - if (bp->bif_ifp == NULL || bp->bif_ifp != ifp) + if (bp->bif_ifp == NULL || bp->bif_ifp != ifp) { continue; + } for (bpf_d = bp->bif_dlist; bpf_d; bpf_d = bpf_d->bd_next) { ifr.ifr_intval += 1; } } - bcopy(&ifr, addr, sizeof (ifr)); + bcopy(&ifr, addr, sizeof(ifr)); break; } - case BIOCGWANTPKTAP: /* u_int */ + case BIOCGWANTPKTAP: /* u_int */ int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0; - bcopy(&int_arg, addr, sizeof (int_arg)); + bcopy(&int_arg, addr, sizeof(int_arg)); break; - case BIOCSWANTPKTAP: /* u_int */ - bcopy(addr, &int_arg, sizeof (int_arg)); - if (int_arg) + case BIOCSWANTPKTAP: /* u_int */ + bcopy(addr, &int_arg, sizeof(int_arg)); + if (int_arg) { d->bd_flags |= BPF_WANT_PKTAP; - else + } else { d->bd_flags &= ~BPF_WANT_PKTAP; + } break; #endif case BIOCSHEADDROP: - bcopy(addr, &int_arg, sizeof (int_arg)); + bcopy(addr, &int_arg, sizeof(int_arg)); d->bd_headdrop = int_arg ? 1 : 0; break; case BIOCGHEADDROP: - bcopy(&d->bd_headdrop, addr, sizeof (int)); + bcopy(&d->bd_headdrop, addr, sizeof(int)); break; case BIOCSTRUNCATE: bcopy(addr, &int_arg, sizeof(int_arg)); - if (int_arg) - d->bd_flags |= BPF_TRUNCATE; - else + if (int_arg) { + d->bd_flags |= BPF_TRUNCATE; + } else { d->bd_flags &= ~BPF_TRUNCATE; + } break; case BIOCGETUUID: - bcopy(&d->bd_uuid, addr, sizeof (uuid_t)); + bcopy(&d->bd_uuid, addr, sizeof(uuid_t)); break; case BIOCSETUP: { struct bpf_setup_args bsa; - ifnet_t ifp; + ifnet_t ifp; - bcopy(addr, &bsa, sizeof (struct bpf_setup_args)); + bcopy(addr, &bsa, sizeof(struct bpf_setup_args)); bsa.bsa_ifname[IFNAMSIZ - 1] = 0; ifp = ifunit(bsa.bsa_ifname); if (ifp == NULL) { @@ -2100,29 +2152,30 @@ bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags, "%s: ifnet not found for %s error %d", __func__, bsa.bsa_ifname, error); break; - } + } error = bpf_setup(d, bsa.bsa_uuid, ifp); break; } case BIOCSPKTHDRV2: bcopy(addr, &int_arg, sizeof(int_arg)); - if (int_arg != 0) + if (int_arg != 0) { d->bd_flags |= BPF_PKTHDRV2; - else + } else { d->bd_flags &= ~BPF_PKTHDRV2; + } break; case BIOCGPKTHDRV2: int_arg = d->bd_flags & BPF_PKTHDRV2 ? 1 : 0; - bcopy(&int_arg, addr, sizeof (int)); + bcopy(&int_arg, addr, sizeof(int)); break; } bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (error); + return error; } /* @@ -2136,46 +2189,54 @@ bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns, struct bpf_insn *fcode, *old; u_int flen, size; - while (d->bd_hbuf_read != 0) + while (d->bd_hbuf_read != 0) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } - if ((d->bd_flags & BPF_CLOSING) != 0) - return (ENXIO); + if ((d->bd_flags & BPF_CLOSING) != 0) { + return ENXIO; + } old = d->bd_filter; if (bf_insns == USER_ADDR_NULL) { - if (bf_len != 0) - return (EINVAL); + if (bf_len != 0) { + return EINVAL; + } d->bd_filter = NULL; reset_d(d); - if (old != 0) - FREE((caddr_t)old, M_DEVBUF); - return (0); + if (old != 0) { + FREE(old, M_DEVBUF); + } + return 0; } flen = bf_len; - if (flen > BPF_MAXINSNS) - return (EINVAL); + if (flen > BPF_MAXINSNS) { + return EINVAL; + } size = flen * sizeof(struct bpf_insn); fcode = (struct bpf_insn *) _MALLOC(size, M_DEVBUF, M_WAIT); #ifdef __APPLE__ - if (fcode == NULL) - return (ENOBUFS); + if (fcode == NULL) { + return ENOBUFS; + } #endif if (copyin(bf_insns, (caddr_t)fcode, size) == 0 && bpf_validate(fcode, (int)flen)) { d->bd_filter = fcode; - if (cmd == BIOCSETF32 || cmd == BIOCSETF64) + if (cmd == BIOCSETF32 || cmd == BIOCSETF64) { reset_d(d); + } - if (old != 0) - FREE((caddr_t)old, M_DEVBUF); + if (old != 0) { + FREE(old, M_DEVBUF); + } - return (0); + return 0; } - FREE((caddr_t)fcode, M_DEVBUF); - return (EINVAL); + FREE(fcode, M_DEVBUF); + return EINVAL; } /* @@ -2189,11 +2250,13 @@ bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read) struct bpf_if *bp; int error; - while (d->bd_hbuf_read != 0 && !has_hbuf_read) + while (d->bd_hbuf_read != 0 && !has_hbuf_read) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } - if ((d->bd_flags & BPF_CLOSING) != 0) - return (ENXIO); + if ((d->bd_flags & BPF_CLOSING) != 0) { + return ENXIO; + } /* * Look through attached interfaces for the named one. @@ -2201,43 +2264,49 @@ bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read) for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { struct ifnet *ifp = bp->bif_ifp; - if (ifp == 0 || ifp != theywant) + if (ifp == 0 || ifp != theywant) { continue; + } /* * Do not use DLT_PKTAP, unless requested explicitly */ - if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) + if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) { continue; + } /* * Skip the coprocessor interface */ - if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) + if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) { continue; + } /* * We found the requested interface. * Allocate the packet buffers. */ error = bpf_allocbufs(d); - if (error != 0) - return (error); + if (error != 0) { + return error; + } /* * Detach if attached to something else. */ if (bp != d->bd_bif) { if (d->bd_bif != NULL) { - if (bpf_detachd(d, 0) != 0) - return (ENXIO); + if (bpf_detachd(d, 0) != 0) { + return ENXIO; + } + } + if (bpf_attachd(d, bp) != 0) { + return ENXIO; } - if (bpf_attachd(d, bp) != 0) - return (ENXIO); } if (do_reset) { - reset_d(d); + reset_d(d); } - return (0); + return 0; } /* Not found. */ - return (ENXIO); + return ENXIO; } /* @@ -2246,14 +2315,14 @@ bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read) static int bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p) { - u_int n; - int error; - struct ifnet *ifp; - struct bpf_if *bp; - user_addr_t dlist; + u_int n; + int error; + struct ifnet *ifp; + struct bpf_if *bp; + user_addr_t dlist; struct bpf_dltlist bfl; - bcopy(addr, &bfl, sizeof (bfl)); + bcopy(addr, &bfl, sizeof(bfl)); if (proc_is64bit(p)) { dlist = (user_addr_t)bfl.bfl_u.bflu_pad; } else { @@ -2265,29 +2334,32 @@ bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p) error = 0; for (bp = bpf_iflist; bp; bp = bp->bif_next) { - if (bp->bif_ifp != ifp) + if (bp->bif_ifp != ifp) { continue; + } /* * Do not use DLT_PKTAP, unless requested explicitly */ - if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) + if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) { continue; + } if (dlist != USER_ADDR_NULL) { if (n >= bfl.bfl_len) { - return (ENOMEM); + return ENOMEM; } error = copyout(&bp->bif_dlt, dlist, - sizeof (bp->bif_dlt)); - if (error != 0) + sizeof(bp->bif_dlt)); + if (error != 0) { break; - dlist += sizeof (bp->bif_dlt); + } + dlist += sizeof(bp->bif_dlt); } n++; } bfl.bfl_len = n; - bcopy(&bfl, addr, sizeof (bfl)); + bcopy(&bfl, addr, sizeof(bfl)); - return (error); + return error; } /* @@ -2300,14 +2372,17 @@ bpf_setdlt(struct bpf_d *d, uint32_t dlt) struct ifnet *ifp; struct bpf_if *bp; - if (d->bd_bif->bif_dlt == dlt) - return (0); + if (d->bd_bif->bif_dlt == dlt) { + return 0; + } - while (d->bd_hbuf_read != 0) + while (d->bd_hbuf_read != 0) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } - if ((d->bd_flags & BPF_CLOSING) != 0) - return (ENXIO); + if ((d->bd_flags & BPF_CLOSING) != 0) { + return ENXIO; + } ifp = d->bd_bif->bif_ifp; for (bp = bpf_iflist; bp; bp = bp->bif_next) { @@ -2324,14 +2399,15 @@ bpf_setdlt(struct bpf_d *d, uint32_t dlt) } if (bp != NULL) { opromisc = d->bd_promisc; - if (bpf_detachd(d, 0) != 0) - return (ENXIO); + if (bpf_detachd(d, 0) != 0) { + return ENXIO; + } error = bpf_attachd(d, bp); if (error) { printf("bpf_setdlt: bpf_attachd %s%d failed (%d)\n", ifnet_name(bp->bif_ifp), ifnet_unit(bp->bif_ifp), error); - return (error); + return error; } reset_d(d); if (opromisc) { @@ -2347,7 +2423,7 @@ bpf_setdlt(struct bpf_d *d, uint32_t dlt) } } } - return (bp == NULL ? EINVAL : 0); + return bp == NULL ? EINVAL : 0; } static int @@ -2355,19 +2431,21 @@ bpf_set_traffic_class(struct bpf_d *d, int tc) { int error = 0; - if (!SO_VALID_TC(tc)) + if (!SO_VALID_TC(tc)) { error = EINVAL; - else + } else { d->bd_traffic_class = tc; + } - return (error); + return error; } static void bpf_set_packet_service_class(struct mbuf *m, int tc) { - if (!(m->m_flags & M_PKTHDR)) + if (!(m->m_flags & M_PKTHDR)) { return; + } VERIFY(SO_VALID_TC(tc)); (void) m_set_service_class(m, so_tc2msc(tc)); @@ -2391,7 +2469,7 @@ bpfselect(dev_t dev, int which, void * wql, struct proc *p) if (d == NULL || d == BPF_DEV_RESERVED || (d->bd_flags & BPF_CLOSING) != 0) { lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } bpf_acquire_d(d); @@ -2399,45 +2477,46 @@ bpfselect(dev_t dev, int which, void * wql, struct proc *p) if (d->bd_bif == NULL) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } - while (d->bd_hbuf_read != 0) + while (d->bd_hbuf_read != 0) { msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_reading", NULL); + } if ((d->bd_flags & BPF_CLOSING) != 0) { bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ENXIO); + return ENXIO; } switch (which) { - case FREAD: - if (d->bd_hlen != 0 || - ((d->bd_immediate || - d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) - ret = 1; /* read has data to return */ - else { - /* - * Read has no data to return. - * Make the select wait, and start a timer if - * necessary. - */ - selrecord(p, &d->bd_sel, wql); - bpf_start_timer(d); - } - break; + case FREAD: + if (d->bd_hlen != 0 || + ((d->bd_immediate || + d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) { + ret = 1; /* read has data to return */ + } else { + /* + * Read has no data to return. + * Make the select wait, and start a timer if + * necessary. + */ + selrecord(p, &d->bd_sel, wql); + bpf_start_timer(d); + } + break; - case FWRITE: - /* can't determine whether a write would block */ - ret = 1; - break; + case FWRITE: + /* can't determine whether a write would block */ + ret = 1; + break; } bpf_release_d(d); lck_mtx_unlock(bpf_mlock); - return (ret); + return ret; } /* @@ -2483,10 +2562,11 @@ filt_bpfread_common(struct knote *kn, struct bpf_d *d) d->bd_slen : d->bd_hlen); int64_t lowwat = 1; if (kn->kn_sfflags & NOTE_LOWAT) { - if (kn->kn_sdata > d->bd_bufsize) + if (kn->kn_sdata > d->bd_bufsize) { lowwat = d->bd_bufsize; - else if (kn->kn_sdata > lowwat) + } else if (kn->kn_sdata > lowwat) { lowwat = kn->kn_sdata; + } } ready = (kn->kn_data >= lowwat); } else { @@ -2509,10 +2589,11 @@ filt_bpfread_common(struct knote *kn, struct bpf_d *d) d->bd_state == BPF_TIMED_OUT ? d->bd_slen : d->bd_hlen); ready = (kn->kn_data > 0); } - if (!ready) + if (!ready) { bpf_start_timer(d); + } - return (ready); + return ready; } int @@ -2528,7 +2609,7 @@ bpfkqfilter(dev_t dev, struct knote *kn) kn->kn_filter != EVFILT_READ) { kn->kn_flags = EV_ERROR; kn->kn_data = EINVAL; - return (0); + return 0; } lck_mtx_lock(bpf_mlock); @@ -2541,7 +2622,7 @@ bpfkqfilter(dev_t dev, struct knote *kn) lck_mtx_unlock(bpf_mlock); kn->kn_flags = EV_ERROR; kn->kn_data = ENXIO; - return (0); + return 0; } kn->kn_hook = d; @@ -2554,7 +2635,7 @@ bpfkqfilter(dev_t dev, struct knote *kn) lck_mtx_unlock(bpf_mlock); - return (res); + return res; } static void @@ -2576,7 +2657,7 @@ filt_bpfread(struct knote *kn, long hint) #pragma unused(hint) struct bpf_d *d = (struct bpf_d *)kn->kn_hook; - return (filt_bpfread_common(kn, d)); + return filt_bpfread_common(kn, d); } static int @@ -2596,7 +2677,7 @@ filt_bpftouch(struct knote *kn, struct kevent_internal_s *kev) lck_mtx_unlock(bpf_mlock); - return (res); + return res; } static int @@ -2614,7 +2695,7 @@ filt_bpfprocess(struct knote *kn, struct filt_process_s *data, } lck_mtx_unlock(bpf_mlock); - return (res); + return res; } /* @@ -2629,8 +2710,9 @@ bpf_mcopy(struct mbuf * m, void *dst_arg, size_t len) dst = dst_arg; while (len > 0) { - if (m == 0) + if (m == 0) { panic("bpf_mcopy"); + } count = min(m->m_len, len); bcopy(mbuf_data(m), dst, count); m = m->m_next; @@ -2641,12 +2723,12 @@ bpf_mcopy(struct mbuf * m, void *dst_arg, size_t len) static inline void bpf_tap_imp( - ifnet_t ifp, - u_int32_t dlt, + ifnet_t ifp, + u_int32_t dlt, struct bpf_packet *bpf_pkt, - int outbound) + int outbound) { - struct bpf_d *d; + struct bpf_d *d; u_int slen; struct bpf_if *bp; @@ -2686,8 +2768,9 @@ bpf_tap_imp( struct bpf_packet bpf_pkt_tmp; struct pktap_header_buffer bpfp_header_tmp; - if (outbound && !d->bd_seesent) + if (outbound && !d->bd_seesent) { continue; + } ++d->bd_rcount; slen = bpf_filter(d->bd_filter, (u_char *)bpf_pkt, @@ -2714,16 +2797,17 @@ bpf_tap_imp( !!(d->bd_flags & BPF_TRUNCATE)); } - if (d->bd_flags & BPF_TRUNCATE) { + if (d->bd_flags & BPF_TRUNCATE) { slen = min(slen, get_pkt_trunc_len((u_char *)bpf_pkt, - bpf_pkt->bpfp_total_length)); - } + bpf_pkt->bpfp_total_length)); + } } if (slen != 0) { #if CONFIG_MACF_NET - if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0) + if (mac_bpfdesc_check_receive(d, bp->bif_ifp) != 0) { continue; + } #endif catchpacket(d, bpf_pkt, slen, outbound); } @@ -2736,12 +2820,12 @@ done: static inline void bpf_tap_mbuf( - ifnet_t ifp, - u_int32_t dlt, - mbuf_t m, - void* hdr, - size_t hlen, - int outbound) + ifnet_t ifp, + u_int32_t dlt, + mbuf_t m, + void* hdr, + size_t hlen, + int outbound) { struct bpf_packet bpf_pkt; struct mbuf *m0; @@ -2753,8 +2837,9 @@ bpf_tap_mbuf( bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF; bpf_pkt.bpfp_mbuf = m; bpf_pkt.bpfp_total_length = 0; - for (m0 = m; m0 != NULL; m0 = m0->m_next) + for (m0 = m; m0 != NULL; m0 = m0->m_next) { bpf_pkt.bpfp_total_length += m0->m_len; + } bpf_pkt.bpfp_header = hdr; if (hdr != NULL) { bpf_pkt.bpfp_total_length += hlen; @@ -2767,32 +2852,33 @@ bpf_tap_mbuf( void bpf_tap_out( - ifnet_t ifp, - u_int32_t dlt, - mbuf_t m, - void* hdr, - size_t hlen) + ifnet_t ifp, + u_int32_t dlt, + mbuf_t m, + void* hdr, + size_t hlen) { bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 1); } void bpf_tap_in( - ifnet_t ifp, - u_int32_t dlt, - mbuf_t m, - void* hdr, - size_t hlen) + ifnet_t ifp, + u_int32_t dlt, + mbuf_t m, + void* hdr, + size_t hlen) { bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 0); } /* Callback registered with Ethernet driver. */ -static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) +static int +bpf_tap_callback(struct ifnet *ifp, struct mbuf *m) { bpf_tap_mbuf(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL); - return (0); + return 0; } @@ -2806,7 +2892,7 @@ bpf_copydata(struct bpf_packet *pkt, size_t off, size_t len, void* out_data) err = EINVAL; } - return (err); + return err; } static void @@ -2814,7 +2900,7 @@ copy_bpf_packet(struct bpf_packet * pkt, void * dst, size_t len) { /* copy the optional header */ if (pkt->bpfp_header_length != 0) { - size_t count = min(len, pkt->bpfp_header_length); + size_t count = min(len, pkt->bpfp_header_length); bcopy(pkt->bpfp_header, dst, count); len -= count; dst += count; @@ -2842,10 +2928,11 @@ get_esp_trunc_len(__unused struct bpf_packet *pkt, __unused uint16_t off, */ uint16_t trunc_len = ESP_HDR_SIZE + 1; - if (trunc_len > remaining_caplen) - return (remaining_caplen); + if (trunc_len > remaining_caplen) { + return remaining_caplen; + } - return (trunc_len); + return trunc_len; } static uint16_t @@ -2857,10 +2944,11 @@ get_isakmp_trunc_len(__unused struct bpf_packet *pkt, __unused uint16_t off, */ uint16_t trunc_len = ISAKMP_HDR_SIZE; - if (trunc_len > remaining_caplen) - return (remaining_caplen); + if (trunc_len > remaining_caplen) { + return remaining_caplen; + } - return (trunc_len); + return trunc_len; } static uint16_t @@ -2872,8 +2960,9 @@ get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint16_t off, char payload[remaining_caplen]; err = bpf_copydata(pkt, off, remaining_caplen, payload); - if (err != 0) - return (remaining_caplen); + if (err != 0) { + return remaining_caplen; + } /* * They are three cases: * - IKE: payload start with 4 bytes header set to zero before ISAKMP header @@ -2881,8 +2970,8 @@ get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint16_t off, * - otherwise it's ESP */ if (remaining_caplen >= 4 && - payload[0] == 0 && payload[1] == 0 && - payload[2] == 0 && payload[3] == 0) { + payload[0] == 0 && payload[1] == 0 && + payload[2] == 0 && payload[3] == 0) { trunc_len = 4 + get_isakmp_trunc_len(pkt, off + 4, remaining_caplen - 4); } else if (remaining_caplen == 1) { trunc_len = 1; @@ -2890,11 +2979,11 @@ get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint16_t off, trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen); } - if (trunc_len > remaining_caplen) - return (remaining_caplen); - - return (trunc_len); + if (trunc_len > remaining_caplen) { + return remaining_caplen; + } + return trunc_len; } static uint16_t @@ -2903,13 +2992,15 @@ get_udp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining int err = 0; uint16_t trunc_len = sizeof(struct udphdr); /* By default no UDP payload */ - if (trunc_len >= remaining_caplen) - return (remaining_caplen); + if (trunc_len >= remaining_caplen) { + return remaining_caplen; + } struct udphdr udphdr; err = bpf_copydata(pkt, off, sizeof(struct udphdr), &udphdr); - if (err != 0) - return (remaining_caplen); + if (err != 0) { + return remaining_caplen; + } u_short sport, dport; @@ -2922,7 +3013,7 @@ get_udp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining */ trunc_len = remaining_caplen; } else if ((sport == PORT_BOOTPS && dport == PORT_BOOTPC) || - (sport == PORT_BOOTPC && dport == PORT_BOOTPS)) { + (sport == PORT_BOOTPC && dport == PORT_BOOTPS)) { /* * Full UDP payload for BOOTP and DHCP */ @@ -2937,10 +3028,11 @@ get_udp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining trunc_len += get_isakmp_natt_trunc_len(pkt, off + sizeof(struct udphdr), remaining_caplen - sizeof(struct udphdr)); } - if (trunc_len >= remaining_caplen) - return (remaining_caplen); + if (trunc_len >= remaining_caplen) { + return remaining_caplen; + } - return (trunc_len); + return trunc_len; } static uint16_t @@ -2948,13 +3040,15 @@ get_tcp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining { int err = 0; uint16_t trunc_len = sizeof(struct tcphdr); /* By default no TCP payload */ - if (trunc_len >= remaining_caplen) - return (remaining_caplen); + if (trunc_len >= remaining_caplen) { + return remaining_caplen; + } struct tcphdr tcphdr; err = bpf_copydata(pkt, off, sizeof(struct tcphdr), &tcphdr); - if (err != 0) - return (remaining_caplen); + if (err != 0) { + return remaining_caplen; + } u_short sport, dport; sport = EXTRACT_SHORT(&tcphdr.th_sport); @@ -2968,10 +3062,11 @@ get_tcp_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining } else { trunc_len = tcphdr.th_off << 2; } - if (trunc_len >= remaining_caplen) - return (remaining_caplen); + if (trunc_len >= remaining_caplen) { + return remaining_caplen; + } - return (trunc_len); + return trunc_len; } static uint16_t @@ -3021,10 +3116,11 @@ get_proto_trunc_len(uint8_t proto, struct bpf_packet *pkt, uint16_t off, const u break; } } - if (trunc_len >= remaining_caplen) - return (remaining_caplen); + if (trunc_len >= remaining_caplen) { + return remaining_caplen; + } - return (trunc_len); + return trunc_len; } static uint16_t @@ -3032,27 +3128,31 @@ get_ip_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining_ { int err = 0; uint16_t iplen = sizeof(struct ip); - if (iplen >= remaining_caplen) - return (remaining_caplen); + if (iplen >= remaining_caplen) { + return remaining_caplen; + } struct ip iphdr; err = bpf_copydata(pkt, off, sizeof(struct ip), &iphdr); - if (err != 0) - return (remaining_caplen); + if (err != 0) { + return remaining_caplen; + } uint8_t proto = 0; iplen = iphdr.ip_hl << 2; - if (iplen >= remaining_caplen) - return (remaining_caplen); + if (iplen >= remaining_caplen) { + return remaining_caplen; + } proto = iphdr.ip_p; iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen); - if (iplen >= remaining_caplen) - return (remaining_caplen); + if (iplen >= remaining_caplen) { + return remaining_caplen; + } - return (iplen); + return iplen; } static uint16_t @@ -3060,13 +3160,15 @@ get_ip6_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining { int err = 0; uint16_t iplen = sizeof(struct ip6_hdr); - if (iplen >= remaining_caplen) - return (remaining_caplen); + if (iplen >= remaining_caplen) { + return remaining_caplen; + } struct ip6_hdr ip6hdr; err = bpf_copydata(pkt, off, sizeof(struct ip6_hdr), &ip6hdr); - if (err != 0) - return (remaining_caplen); + if (err != 0) { + return remaining_caplen; + } uint8_t proto = 0; @@ -3076,10 +3178,11 @@ get_ip6_trunc_len(struct bpf_packet *pkt, uint16_t off, const uint16_t remaining proto = ip6hdr.ip6_nxt; iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen); - if (iplen >= remaining_caplen) - return (remaining_caplen); + if (iplen >= remaining_caplen) { + return remaining_caplen; + } - return (iplen); + return iplen; } static uint16_t @@ -3087,14 +3190,16 @@ get_ether_trunc_len(struct bpf_packet *pkt, int off, const uint16_t remaining_ca { int err = 0; uint16_t ethlen = sizeof(struct ether_header); - if (ethlen >= remaining_caplen) - return (remaining_caplen); + if (ethlen >= remaining_caplen) { + return remaining_caplen; + } struct ether_header eh; u_short type; err = bpf_copydata(pkt, off, sizeof(struct ether_header), &eh); - if (err != 0) - return (remaining_caplen); + if (err != 0) { + return remaining_caplen; + } type = EXTRACT_SHORT(&eh.ether_type); /* Include full ARP */ @@ -3108,10 +3213,10 @@ get_ether_trunc_len(struct bpf_packet *pkt, int off, const uint16_t remaining_ca remaining_caplen); } else if (type == ETHERTYPE_IPV6) { ethlen += get_ip6_trunc_len(pkt, sizeof(struct ether_header), - remaining_caplen); + remaining_caplen); } } - return (ethlen); + return ethlen; } static uint32_t @@ -3127,7 +3232,7 @@ get_pkt_trunc_len(u_char *p, u_int len) * pkt->bpfp_header_length is (pktap->pth_length + pre_adjust) * pre is the offset to the L3 header after the bpfp_header, or length * of L2 header after bpfp_header, if present. - */ + */ uint32_t pre = pktap->pth_frame_pre_length - (pkt->bpfp_header_length - pktap->pth_length); @@ -3137,8 +3242,9 @@ get_pkt_trunc_len(u_char *p, u_int len) pktap->pth_protocol_family == AF_INET6) { /* Contains L2 header */ if (pre > 0) { - if (pre < sizeof(struct ether_header)) + if (pre < sizeof(struct ether_header)) { goto too_short; + } out_pkt_len = get_ether_trunc_len(pkt, 0, in_pkt_len); } else if (pre == 0) { @@ -3168,7 +3274,7 @@ get_pkt_trunc_len(u_char *p, u_int len) } done: tlen = pkt->bpfp_header_length + out_pkt_len + pre; - return (tlen); + return tlen; too_short: out_pkt_len = in_pkt_len; goto done; @@ -3181,7 +3287,7 @@ too_short: */ static void catchpacket(struct bpf_d *d, struct bpf_packet * pkt, - u_int snaplen, int outbound) + u_int snaplen, int outbound) { struct bpf_hdr *hp; struct bpf_hdr_ext *ehp; @@ -3200,11 +3306,13 @@ catchpacket(struct bpf_d *d, struct bpf_packet * pkt, * we hit the buffer size limit). */ totlen = hdrlen + min(snaplen, pkt->bpfp_total_length); - if (totlen > d->bd_bufsize) + if (totlen > d->bd_bufsize) { totlen = d->bd_bufsize; + } - if (hdrlen > totlen) + if (hdrlen > totlen) { return; + } /* * Round up the end of the previous packet to the next longword. @@ -3244,13 +3352,14 @@ catchpacket(struct bpf_d *d, struct bpf_packet * pkt, } do_wakeup = 1; curlen = 0; - } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) + } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) { /* * Immediate mode is set, or the read timeout has * already expired during a select call. A packet * arrived, so the reader should be woken up. */ do_wakeup = 1; + } /* * Append the bpf header. @@ -3260,7 +3369,7 @@ catchpacket(struct bpf_d *d, struct bpf_packet * pkt, struct mbuf *m; m = (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) - ? pkt->bpfp_mbuf : NULL; + ? pkt->bpfp_mbuf : NULL; ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen); memset(ehp, 0, sizeof(*ehp)); ehp->bh_tstamp.tv_sec = tv.tv_sec; @@ -3279,28 +3388,32 @@ catchpacket(struct bpf_d *d, struct bpf_packet * pkt, ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT; /* only do lookups on non-raw INPCB */ - if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID| - PKTF_FLOW_LOCALSRC|PKTF_FLOW_RAWSOCK)) == - (PKTF_FLOW_ID|PKTF_FLOW_LOCALSRC) && + if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID | + PKTF_FLOW_LOCALSRC | PKTF_FLOW_RAWSOCK)) == + (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC) && m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) { ehp->bh_flowid = m->m_pkthdr.pkt_flowid; ehp->bh_proto = m->m_pkthdr.pkt_proto; } ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc); - if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) + if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) { ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT; - if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ) + } + if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ) { ehp->bh_pktflags |= BPF_PKTFLAGS_START_SEQ; - if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) + } + if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) { ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT; + } if (m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA) { ehp->bh_unsent_bytes = m->m_pkthdr.bufstatus_if; ehp->bh_unsent_snd = m->m_pkthdr.bufstatus_sndbuf; } - } else + } else { ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN; + } payload = (u_char *)ehp + hdrlen; } else { hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen); @@ -3318,8 +3431,9 @@ catchpacket(struct bpf_d *d, struct bpf_packet * pkt, d->bd_slen = curlen + totlen; d->bd_scnt += 1; - if (do_wakeup) + if (do_wakeup) { bpf_wakeup(d); + } } /* @@ -3342,20 +3456,21 @@ bpf_allocbufs(struct bpf_d *d) } d->bd_fbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); - if (d->bd_fbuf == NULL) - return (ENOBUFS); + if (d->bd_fbuf == NULL) { + return ENOBUFS; + } d->bd_sbuf = (caddr_t) _MALLOC(d->bd_bufsize, M_DEVBUF, M_WAIT); if (d->bd_sbuf == NULL) { FREE(d->bd_fbuf, M_DEVBUF); d->bd_fbuf = NULL; - return (ENOBUFS); + return ENOBUFS; } d->bd_slen = 0; d->bd_hlen = 0; d->bd_scnt = 0; d->bd_hcnt = 0; - return (0); + return 0; } /* @@ -3370,18 +3485,22 @@ bpf_freed(struct bpf_d *d) * been detached from its interface and it yet hasn't been marked * free. */ - if (d->bd_hbuf_read != 0) + if (d->bd_hbuf_read != 0) { panic("bpf buffer freed during read"); + } if (d->bd_sbuf != 0) { FREE(d->bd_sbuf, M_DEVBUF); - if (d->bd_hbuf != 0) + if (d->bd_hbuf != 0) { FREE(d->bd_hbuf, M_DEVBUF); - if (d->bd_fbuf != 0) + } + if (d->bd_fbuf != 0) { FREE(d->bd_fbuf, M_DEVBUF); + } + } + if (d->bd_filter) { + FREE(d->bd_filter, M_DEVBUF); } - if (d->bd_filter) - FREE((caddr_t)d->bd_filter, M_DEVBUF); } /* @@ -3397,11 +3516,11 @@ bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) errno_t bpf_attach( - ifnet_t ifp, - u_int32_t dlt, - u_int32_t hdrlen, - bpf_send_func send, - bpf_tap_func tap) + ifnet_t ifp, + u_int32_t dlt, + u_int32_t hdrlen, + bpf_send_func send, + bpf_tap_func tap) { struct bpf_if *bp; struct bpf_if *bp_new; @@ -3412,8 +3531,9 @@ bpf_attach( bp_new = (struct bpf_if *) _MALLOC(sizeof(*bp_new), M_DEVBUF, M_WAIT | M_ZERO); - if (bp_new == 0) + if (bp_new == 0) { panic("bpfattach"); + } lck_mtx_lock(bpf_mlock); @@ -3444,9 +3564,9 @@ bpf_attach( if (found) { lck_mtx_unlock(bpf_mlock); printf("bpfattach - %s with dlt %d is already attached\n", - if_name(ifp), dlt); + if_name(ifp), dlt); FREE(bp_new, M_DEVBUF); - return (EEXIST); + return EEXIST; } bp_new->bif_ifp = ifp; @@ -3492,11 +3612,12 @@ bpf_attach( lck_mtx_unlock(bpf_mlock); #ifndef __APPLE__ - if (bootverbose) + if (bootverbose) { printf("bpf: %s attached\n", if_name(ifp)); + } #endif - return (0); + return 0; } /* @@ -3508,11 +3629,12 @@ bpf_attach( void bpfdetach(struct ifnet *ifp) { - struct bpf_if *bp, *bp_prev, *bp_next; - struct bpf_d *d; + struct bpf_if *bp, *bp_prev, *bp_next; + struct bpf_d *d; - if (bpf_debug != 0) + if (bpf_debug != 0) { printf("%s: %s\n", __func__, if_name(ifp)); + } lck_mtx_lock(bpf_mlock); @@ -3530,10 +3652,11 @@ bpfdetach(struct ifnet *ifp) continue; } /* Unlink from the interface list */ - if (bp_prev) + if (bp_prev) { bp_prev->bif_next = bp->bif_next; - else + } else { bpf_iflist = bp->bif_next; + } /* Detach the devices attached to the interface */ while ((d = bp->bif_dlist) != NULL) { @@ -3557,8 +3680,8 @@ void bpf_init(__unused void *unused) { #ifdef __APPLE__ - int i; - int maj; + int i; + int maj; if (bpf_devsw_installed == 0) { bpf_devsw_installed = 1; @@ -3568,12 +3691,15 @@ bpf_init(__unused void *unused) lck_mtx_init(bpf_mlock, bpf_mlock_grp, bpf_mlock_attr); maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); if (maj == -1) { - if (bpf_mlock_attr) + if (bpf_mlock_attr) { lck_attr_free(bpf_mlock_attr); - if (bpf_mlock_grp) + } + if (bpf_mlock_grp) { lck_grp_free(bpf_mlock_grp); - if (bpf_mlock_grp_attr) + } + if (bpf_mlock_grp_attr) { lck_grp_attr_free(bpf_mlock_grp_attr); + } bpf_mlock = NULL; bpf_mlock_attr = NULL; @@ -3584,8 +3710,9 @@ bpf_init(__unused void *unused) return; } - for (i = 0; i < NBPFILTER; i++) + for (i = 0; i < NBPFILTER; i++) { bpf_make_dev_t(maj); + } } #else cdevsw_add(&bpf_cdevsw); @@ -3593,21 +3720,19 @@ bpf_init(__unused void *unused) } #ifndef __APPLE__ -SYSINIT(bpfdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE+CDEV_MAJOR, bpf_drvinit, NULL) +SYSINIT(bpfdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR, bpf_drvinit, NULL) #endif #if CONFIG_MACF_NET struct label * mac_bpfdesc_label_get(struct bpf_d *d) { - - return (d->bd_label); + return d->bd_label; } void mac_bpfdesc_label_set(struct bpf_d *d, struct label *label) { - d->bd_label = label; } #endif diff --git a/bsd/net/bpf.h b/bsd/net/bpf.h index 0457a93ab..f7af8e9e4 100644 --- a/bsd/net/bpf.h +++ b/bsd/net/bpf.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -88,7 +88,7 @@ #include struct bpf_setup_args { - uuid_t bsa_uuid; + uuid_t bsa_uuid; char bsa_ifname[IFNAMSIZ]; }; #endif /* PRIVATE */ @@ -106,24 +106,24 @@ struct bpf_setup_args { #define EXTRACT_SHORT(p) ((u_int16_t)ntohs(*(u_int16_t *)(void *)p)) #define EXTRACT_LONG(p) (ntohl(*(u_int32_t *)(void *)p)) #else -#define EXTRACT_SHORT(p)\ - ((u_int16_t)\ - ((u_int16_t)*((u_char *)p+0)<<8|\ - (u_int16_t)*((u_char *)p+1)<<0)) -#define EXTRACT_LONG(p)\ - ((u_int32_t)*((u_char *)p+0)<<24|\ - (u_int32_t)*((u_char *)p+1)<<16|\ - (u_int32_t)*((u_char *)p+2)<<8|\ - (u_int32_t)*((u_char *)p+3)<<0) +#define EXTRACT_SHORT(p) \ + ((u_int16_t)\ + ((u_int16_t)*((u_char *)p+0)<<8|\ + (u_int16_t)*((u_char *)p+1)<<0)) +#define EXTRACT_LONG(p) \ + ((u_int32_t)*((u_char *)p+0)<<24|\ + (u_int32_t)*((u_char *)p+1)<<16|\ + (u_int32_t)*((u_char *)p+2)<<8|\ + (u_int32_t)*((u_char *)p+3)<<0) #endif #endif /* KERNEL */ /* BSD style release date */ -#define BPF_RELEASE 199606 +#define BPF_RELEASE 199606 -typedef int32_t bpf_int32; -typedef u_int32_t bpf_u_int32; +typedef int32_t bpf_int32; +typedef u_int32_t bpf_u_int32; /* * Alignment macros. BPF_WORDALIGN rounds up to the next @@ -151,13 +151,13 @@ struct bpf_program { * WARNING - keep in sync with bpf_program */ struct bpf_program64 { - u_int bf_len; - user64_addr_t bf_insns __attribute__((aligned(8))); + u_int bf_len; + user64_addr_t bf_insns __attribute__((aligned(8))); }; struct bpf_program32 { - u_int bf_len; - user32_addr_t bf_insns; + u_int bf_len; + user32_addr_t bf_insns; }; #endif /* KERNEL_PRIVATE */ @@ -165,8 +165,8 @@ struct bpf_program32 { * Struct returned by BIOCGSTATS. */ struct bpf_stat { - u_int bs_recv; /* number of packets received */ - u_int bs_drop; /* number of packets dropped */ + u_int bs_recv; /* number of packets received */ + u_int bs_drop; /* number of packets dropped */ }; /* @@ -195,70 +195,70 @@ struct bpf_version { #define BPF_MAJOR_VERSION 1 #define BPF_MINOR_VERSION 1 -#define BIOCGBLEN _IOR('B',102, u_int) -#define BIOCSBLEN _IOWR('B',102, u_int) -#define BIOCSETF _IOW('B',103, struct bpf_program) +#define BIOCGBLEN _IOR('B',102, u_int) +#define BIOCSBLEN _IOWR('B',102, u_int) +#define BIOCSETF _IOW('B',103, struct bpf_program) #ifdef KERNEL_PRIVATE -#define BIOCSETF64 _IOW('B',103, struct bpf_program64) -#define BIOCSETF32 _IOW('B',103, struct bpf_program32) +#define BIOCSETF64 _IOW('B',103, struct bpf_program64) +#define BIOCSETF32 _IOW('B',103, struct bpf_program32) #endif /* KERNEL_PRIVATE */ -#define BIOCFLUSH _IO('B',104) -#define BIOCPROMISC _IO('B',105) -#define BIOCGDLT _IOR('B',106, u_int) -#define BIOCGETIF _IOR('B',107, struct ifreq) -#define BIOCSETIF _IOW('B',108, struct ifreq) -#define BIOCSRTIMEOUT _IOW('B',109, struct timeval) +#define BIOCFLUSH _IO('B',104) +#define BIOCPROMISC _IO('B',105) +#define BIOCGDLT _IOR('B',106, u_int) +#define BIOCGETIF _IOR('B',107, struct ifreq) +#define BIOCSETIF _IOW('B',108, struct ifreq) +#define BIOCSRTIMEOUT _IOW('B',109, struct timeval) #ifdef KERNEL_PRIVATE -#define BIOCSRTIMEOUT64 _IOW('B',109, struct user64_timeval) -#define BIOCSRTIMEOUT32 _IOW('B',109, struct user32_timeval) +#define BIOCSRTIMEOUT64 _IOW('B',109, struct user64_timeval) +#define BIOCSRTIMEOUT32 _IOW('B',109, struct user32_timeval) #endif /* KERNEL_PRIVATE */ -#define BIOCGRTIMEOUT _IOR('B',110, struct timeval) +#define BIOCGRTIMEOUT _IOR('B',110, struct timeval) #ifdef KERNEL_PRIVATE -#define BIOCGRTIMEOUT64 _IOR('B',110, struct user64_timeval) -#define BIOCGRTIMEOUT32 _IOR('B',110, struct user32_timeval) +#define BIOCGRTIMEOUT64 _IOR('B',110, struct user64_timeval) +#define BIOCGRTIMEOUT32 _IOR('B',110, struct user32_timeval) #endif /* KERNEL_PRIVATE */ -#define BIOCGSTATS _IOR('B',111, struct bpf_stat) -#define BIOCIMMEDIATE _IOW('B',112, u_int) -#define BIOCVERSION _IOR('B',113, struct bpf_version) -#define BIOCGRSIG _IOR('B',114, u_int) -#define BIOCSRSIG _IOW('B',115, u_int) -#define BIOCGHDRCMPLT _IOR('B',116, u_int) -#define BIOCSHDRCMPLT _IOW('B',117, u_int) -#define BIOCGSEESENT _IOR('B',118, u_int) -#define BIOCSSEESENT _IOW('B',119, u_int) +#define BIOCGSTATS _IOR('B',111, struct bpf_stat) +#define BIOCIMMEDIATE _IOW('B',112, u_int) +#define BIOCVERSION _IOR('B',113, struct bpf_version) +#define BIOCGRSIG _IOR('B',114, u_int) +#define BIOCSRSIG _IOW('B',115, u_int) +#define BIOCGHDRCMPLT _IOR('B',116, u_int) +#define BIOCSHDRCMPLT _IOW('B',117, u_int) +#define BIOCGSEESENT _IOR('B',118, u_int) +#define BIOCSSEESENT _IOW('B',119, u_int) #define BIOCSDLT _IOW('B',120, u_int) #define BIOCGDLTLIST _IOWR('B',121, struct bpf_dltlist) #ifdef PRIVATE -#define BIOCGETTC _IOR('B', 122, int) -#define BIOCSETTC _IOW('B', 123, int) -#define BIOCSEXTHDR _IOW('B', 124, u_int) -#define BIOCGIFATTACHCOUNT _IOWR('B', 125, struct ifreq) +#define BIOCGETTC _IOR('B', 122, int) +#define BIOCSETTC _IOW('B', 123, int) +#define BIOCSEXTHDR _IOW('B', 124, u_int) +#define BIOCGIFATTACHCOUNT _IOWR('B', 125, struct ifreq) #endif /* PRIVATE */ #define BIOCSETFNR _IOW('B', 126, struct bpf_program) #ifdef KERNEL_PRIVATE -#define BIOCSETFNR64 _IOW('B',126, struct bpf_program64) -#define BIOCSETFNR32 _IOW('B',126, struct bpf_program32) +#define BIOCSETFNR64 _IOW('B',126, struct bpf_program64) +#define BIOCSETFNR32 _IOW('B',126, struct bpf_program32) #endif /* KERNEL_PRIVATE */ #ifdef PRIVATE -#define BIOCGWANTPKTAP _IOR('B', 127, u_int) -#define BIOCSWANTPKTAP _IOWR('B', 127, u_int) +#define BIOCGWANTPKTAP _IOR('B', 127, u_int) +#define BIOCSWANTPKTAP _IOWR('B', 127, u_int) #define BIOCSHEADDROP _IOW('B', 128, int) #define BIOCGHEADDROP _IOR('B', 128, int) -#define BIOCSTRUNCATE _IOW('B', 129, u_int) -#define BIOCGETUUID _IOR('B', 130, uuid_t) -#define BIOCSETUP _IOW('B', 131, struct bpf_setup_args) -#define BIOCSPKTHDRV2 _IOW('B', 132, int) -#define BIOCGPKTHDRV2 _IOW('B', 133, int) +#define BIOCSTRUNCATE _IOW('B', 129, u_int) +#define BIOCGETUUID _IOR('B', 130, uuid_t) +#define BIOCSETUP _IOW('B', 131, struct bpf_setup_args) +#define BIOCSPKTHDRV2 _IOW('B', 132, int) +#define BIOCGPKTHDRV2 _IOW('B', 133, int) #endif /* PRIVATE */ /* * Structure prepended to each packet. */ struct bpf_hdr { - struct BPF_TIMEVAL bh_tstamp; /* time stamp */ - bpf_u_int32 bh_caplen; /* length of captured portion */ - bpf_u_int32 bh_datalen; /* original length of packet */ - u_short bh_hdrlen; /* length of bpf header (this struct - plus alignment padding) */ + struct BPF_TIMEVAL bh_tstamp; /* time stamp */ + bpf_u_int32 bh_caplen; /* length of captured portion */ + bpf_u_int32 bh_datalen; /* original length of packet */ + u_short bh_hdrlen; /* length of bpf header (this struct + * plus alignment padding) */ }; #ifdef KERNEL /* @@ -266,7 +266,7 @@ struct bpf_hdr { * will insist on inserting padding; hence, sizeof(struct bpf_hdr) won't work. * Only the kernel needs to know about it; applications use bh_hdrlen. */ -#define SIZEOF_BPF_HDR (sizeof(struct bpf_hdr) <= 20 ? 18 : \ +#define SIZEOF_BPF_HDR (sizeof(struct bpf_hdr) <= 20 ? 18 : \ sizeof(struct bpf_hdr)) #endif #ifdef PRIVATE @@ -275,36 +275,36 @@ struct bpf_hdr { * It includes padding and spare fields that we can use later if desired. */ struct bpf_hdr_ext { - struct BPF_TIMEVAL bh_tstamp; /* time stamp */ - bpf_u_int32 bh_caplen; /* length of captured portion */ - bpf_u_int32 bh_datalen; /* original length of packet */ - u_short bh_hdrlen; /* length of bpf header */ - u_short bh_flags; -#define BPF_HDR_EXT_FLAGS_DIR_IN 0x0000 -#define BPF_HDR_EXT_FLAGS_DIR_OUT 0x0001 - pid_t bh_pid; /* process PID */ - char bh_comm[MAXCOMLEN+1]; /* process command */ - u_char _bh_pad2[1]; - u_char bh_pktflags; -#define BPF_PKTFLAGS_TCP_REXMT 0x0001 -#define BPF_PKTFLAGS_START_SEQ 0x0002 -#define BPF_PKTFLAGS_LAST_PKT 0x0004 - u_char bh_proto; /* kernel reserved; 0 in userland */ - bpf_u_int32 bh_svc; /* service class */ - bpf_u_int32 bh_flowid; /* kernel reserved; 0 in userland */ - bpf_u_int32 bh_unsent_bytes; /* unsent bytes at interface */ - bpf_u_int32 bh_unsent_snd; /* unsent bytes at socket buffer */ + struct BPF_TIMEVAL bh_tstamp; /* time stamp */ + bpf_u_int32 bh_caplen; /* length of captured portion */ + bpf_u_int32 bh_datalen; /* original length of packet */ + u_short bh_hdrlen; /* length of bpf header */ + u_short bh_flags; +#define BPF_HDR_EXT_FLAGS_DIR_IN 0x0000 +#define BPF_HDR_EXT_FLAGS_DIR_OUT 0x0001 + pid_t bh_pid; /* process PID */ + char bh_comm[MAXCOMLEN + 1]; /* process command */ + u_char _bh_pad2[1]; + u_char bh_pktflags; +#define BPF_PKTFLAGS_TCP_REXMT 0x0001 +#define BPF_PKTFLAGS_START_SEQ 0x0002 +#define BPF_PKTFLAGS_LAST_PKT 0x0004 + u_char bh_proto; /* kernel reserved; 0 in userland */ + bpf_u_int32 bh_svc; /* service class */ + bpf_u_int32 bh_flowid; /* kernel reserved; 0 in userland */ + bpf_u_int32 bh_unsent_bytes; /* unsent bytes at interface */ + bpf_u_int32 bh_unsent_snd; /* unsent bytes at socket buffer */ }; -#define BPF_CONTROL_NAME "com.apple.net.bpf" +#define BPF_CONTROL_NAME "com.apple.net.bpf" struct bpf_mtag { - char bt_comm[MAXCOMLEN]; - pid_t bt_pid; - bpf_u_int32 bt_svc; - unsigned char bt_direction; -#define BPF_MTAG_DIR_IN 0 -#define BPF_MTAG_DIR_OUT 1 + char bt_comm[MAXCOMLEN]; + pid_t bt_pid; + bpf_u_int32 bt_svc; + unsigned char bt_direction; +#define BPF_MTAG_DIR_IN 0 +#define BPF_MTAG_DIR_OUT 1 }; #endif /* PRIVATE */ @@ -312,19 +312,19 @@ struct bpf_mtag { /* * Data-link level type codes. */ -#define DLT_NULL 0 /* no link-layer encapsulation */ -#define DLT_EN10MB 1 /* Ethernet (10Mb) */ -#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */ -#define DLT_AX25 3 /* Amateur Radio AX.25 */ -#define DLT_PRONET 4 /* Proteon ProNET Token Ring */ -#define DLT_CHAOS 5 /* Chaos */ -#define DLT_IEEE802 6 /* IEEE 802 Networks */ -#define DLT_ARCNET 7 /* ARCNET */ -#define DLT_SLIP 8 /* Serial Line IP */ -#define DLT_PPP 9 /* Point-to-point Protocol */ -#define DLT_FDDI 10 /* FDDI */ -#define DLT_ATM_RFC1483 11 /* LLC/SNAP encapsulated atm */ -#define DLT_RAW 12 /* raw IP */ +#define DLT_NULL 0 /* no link-layer encapsulation */ +#define DLT_EN10MB 1 /* Ethernet (10Mb) */ +#define DLT_EN3MB 2 /* Experimental Ethernet (3Mb) */ +#define DLT_AX25 3 /* Amateur Radio AX.25 */ +#define DLT_PRONET 4 /* Proteon ProNET Token Ring */ +#define DLT_CHAOS 5 /* Chaos */ +#define DLT_IEEE802 6 /* IEEE 802 Networks */ +#define DLT_ARCNET 7 /* ARCNET */ +#define DLT_SLIP 8 /* Serial Line IP */ +#define DLT_PPP 9 /* Point-to-point Protocol */ +#define DLT_FDDI 10 /* FDDI */ +#define DLT_ATM_RFC1483 11 /* LLC/SNAP encapsulated atm */ +#define DLT_RAW 12 /* raw IP */ /* * These are values from BSD/OS's "bpf.h". @@ -339,8 +339,8 @@ struct bpf_mtag { * continue to compile - even though they won't correctly read * files of these types. */ -#define DLT_SLIP_BSDOS 15 /* BSD/OS Serial Line IP */ -#define DLT_PPP_BSDOS 16 /* BSD/OS Point-to-point Protocol */ +#define DLT_SLIP_BSDOS 15 /* BSD/OS Serial Line IP */ +#define DLT_PPP_BSDOS 16 /* BSD/OS Point-to-point Protocol */ /* * 17 was used for DLT_PFLOG in OpenBSD; it no longer is. @@ -370,17 +370,17 @@ struct bpf_mtag { * have the relevant header files, so it's not that useful on * other platforms. */ -#define DLT_PFSYNC 18 /* Packet filter state syncing */ +#define DLT_PFSYNC 18 /* Packet filter state syncing */ -#define DLT_ATM_CLIP 19 /* Linux Classical-IP over ATM */ +#define DLT_ATM_CLIP 19 /* Linux Classical-IP over ATM */ /* * These values are defined by NetBSD; other platforms should refrain from * using them for other purposes, so that NetBSD savefiles with link * types of 50 or 51 can be read as this type on all platforms. */ -#define DLT_PPP_SERIAL 50 /* PPP over serial with HDLC encapsulation */ -#define DLT_PPP_ETHER 51 /* PPP over Ethernet */ +#define DLT_PPP_SERIAL 50 /* PPP over serial with HDLC encapsulation */ +#define DLT_PPP_ETHER 51 /* PPP over Ethernet */ /* * The Axent Raptor firewall - now the Symantec Enterprise Firewall - uses @@ -389,7 +389,7 @@ struct bpf_mtag { * Ethernet type, and 36 bytes that appear to be 0 in at least one capture * I've seen. */ -#define DLT_SYMANTEC_FIREWALL 99 +#define DLT_SYMANTEC_FIREWALL 99 /* * Values between 100 and 103 are used in capture file headers as @@ -408,7 +408,7 @@ struct bpf_mtag { * DLT_MATCHING_MIN is the lowest such value; DLT_MATCHING_MAX is * the highest such value. */ -#define DLT_MATCHING_MIN 104 +#define DLT_MATCHING_MIN 104 /* * This value was defined by libpcap 0.5; platforms that have defined @@ -424,10 +424,10 @@ struct bpf_mtag { * libpcap 0.5 defined it as DLT_CHDLC; we define DLT_CHDLC as well, * for source compatibility with programs written for libpcap 0.5. */ -#define DLT_C_HDLC 104 /* Cisco HDLC */ -#define DLT_CHDLC DLT_C_HDLC +#define DLT_C_HDLC 104 /* Cisco HDLC */ +#define DLT_CHDLC DLT_C_HDLC -#define DLT_IEEE802_11 105 /* IEEE 802.11 wireless */ +#define DLT_IEEE802_11 105 /* IEEE 802.11 wireless */ /* * Values between 106 and 107 are used in capture file headers as @@ -441,7 +441,7 @@ struct bpf_mtag { * DLT_FR and DLT_FRELAY packets start with the Q.922 Frame Relay header * (DLCI, etc.). */ -#define DLT_FRELAY 107 +#define DLT_FRELAY 107 /* * OpenBSD DLT_LOOP, for loopback devices; it's like DLT_NULL, except @@ -452,7 +452,7 @@ struct bpf_mtag { * define DLT_LOOP as 12 in its version, as per the comment above - * and should not use 108 for any purpose. */ -#define DLT_LOOP 108 +#define DLT_LOOP 108 /* * Values between 109 and 112 are used in capture file headers as @@ -465,104 +465,104 @@ struct bpf_mtag { * DLT_SLIP_BSDOS in NetBSD, so we don't use 13 for it in OSes other * than OpenBSD. */ -#define DLT_ENC 109 +#define DLT_ENC 109 /* * This is for Linux cooked sockets. */ -#define DLT_LINUX_SLL 113 +#define DLT_LINUX_SLL 113 /* * Apple LocalTalk hardware. */ -#define DLT_LTALK 114 +#define DLT_LTALK 114 /* * Acorn Econet. */ -#define DLT_ECONET 115 +#define DLT_ECONET 115 /* * Reserved for use with OpenBSD ipfilter. */ -#define DLT_IPFILTER 116 +#define DLT_IPFILTER 116 /* * For use in capture-file headers as a link-layer type corresponding * to OpenBSD PF (Packet Filter) log. */ -#define DLT_PFLOG 117 +#define DLT_PFLOG 117 /* * Registered for Cisco-internal use. */ -#define DLT_CISCO_IOS 118 +#define DLT_CISCO_IOS 118 /* * Reserved for 802.11 cards using the Prism II chips, with a link-layer * header including Prism monitor mode information plus an 802.11 * header. */ -#define DLT_PRISM_HEADER 119 +#define DLT_PRISM_HEADER 119 /* * Reserved for Aironet 802.11 cards, with an Aironet link-layer header * (see Doug Ambrisko's FreeBSD patches). */ -#define DLT_AIRONET_HEADER 120 +#define DLT_AIRONET_HEADER 120 /* * Reserved for Siemens HiPath HDLC. XXX */ -#define DLT_HHDLC 121 +#define DLT_HHDLC 121 /* * Reserved for RFC 2625 IP-over-Fibre Channel. */ -#define DLT_IP_OVER_FC 122 +#define DLT_IP_OVER_FC 122 /* * Reserved for Full Frontal ATM on Solaris. */ -#define DLT_SUNATM 123 +#define DLT_SUNATM 123 /* * Reserved as per request from Kent Dahlgren * for private use. */ -#define DLT_RIO 124 /* RapidIO */ -#define DLT_PCI_EXP 125 /* PCI Express */ -#define DLT_AURORA 126 /* Xilinx Aurora link layer */ +#define DLT_RIO 124 /* RapidIO */ +#define DLT_PCI_EXP 125 /* PCI Express */ +#define DLT_AURORA 126 /* Xilinx Aurora link layer */ /* * BSD header for 802.11 plus a number of bits of link-layer information * including radio information. */ #ifndef DLT_IEEE802_11_RADIO -#define DLT_IEEE802_11_RADIO 127 +#define DLT_IEEE802_11_RADIO 127 #endif /* * Reserved for TZSP encapsulation. */ -#define DLT_TZSP 128 /* Tazmen Sniffer Protocol */ +#define DLT_TZSP 128 /* Tazmen Sniffer Protocol */ /* * Reserved for Linux ARCNET. */ -#define DLT_ARCNET_LINUX 129 +#define DLT_ARCNET_LINUX 129 /* * Juniper-private data link types. */ -#define DLT_JUNIPER_MLPPP 130 -#define DLT_JUNIPER_MLFR 131 -#define DLT_JUNIPER_ES 132 -#define DLT_JUNIPER_GGSN 133 -#define DLT_JUNIPER_MFR 134 -#define DLT_JUNIPER_ATM2 135 -#define DLT_JUNIPER_SERVICES 136 -#define DLT_JUNIPER_ATM1 137 +#define DLT_JUNIPER_MLPPP 130 +#define DLT_JUNIPER_MLFR 131 +#define DLT_JUNIPER_ES 132 +#define DLT_JUNIPER_GGSN 133 +#define DLT_JUNIPER_MFR 134 +#define DLT_JUNIPER_ATM2 135 +#define DLT_JUNIPER_SERVICES 136 +#define DLT_JUNIPER_ATM1 137 /* * Apple IP-over-IEEE 1394, as per a request from Dieter Siegmund @@ -579,32 +579,32 @@ struct bpf_mtag { * with "firewire_type" being an Ethernet type value, rather than, * for example, raw GASP frames being handed up. */ -#define DLT_APPLE_IP_OVER_IEEE1394 138 +#define DLT_APPLE_IP_OVER_IEEE1394 138 /* * Various SS7 encapsulations, as per a request from Jeff Morriss * and subsequent discussions. */ -#define DLT_MTP2_WITH_PHDR 139 /* pseudo-header with various info, followed by MTP2 */ -#define DLT_MTP2 140 /* MTP2, without pseudo-header */ -#define DLT_MTP3 141 /* MTP3, without pseudo-header or MTP2 */ -#define DLT_SCCP 142 /* SCCP, without pseudo-header or MTP2 or MTP3 */ +#define DLT_MTP2_WITH_PHDR 139 /* pseudo-header with various info, followed by MTP2 */ +#define DLT_MTP2 140 /* MTP2, without pseudo-header */ +#define DLT_MTP3 141 /* MTP3, without pseudo-header or MTP2 */ +#define DLT_SCCP 142 /* SCCP, without pseudo-header or MTP2 or MTP3 */ /* * Reserved for DOCSIS. */ -#define DLT_DOCSIS 143 +#define DLT_DOCSIS 143 /* * Reserved for Linux IrDA. */ -#define DLT_LINUX_IRDA 144 +#define DLT_LINUX_IRDA 144 /* * Reserved for IBM SP switch and IBM Next Federation switch. */ -#define DLT_IBM_SP 145 -#define DLT_IBM_SN 146 +#define DLT_IBM_SP 145 +#define DLT_IBM_SN 146 /* * Reserved for private use. If you have some link-layer header type @@ -631,22 +631,22 @@ struct bpf_mtag { * Instead, ask "tcpdump-workers@tcpdump.org" for a new DLT_ value, * as per the comment above, and use the type you're given. */ -#define DLT_USER0 147 -#define DLT_USER1 148 -#define DLT_USER2 149 -#define DLT_USER3 150 -#define DLT_USER4 151 -#define DLT_USER5 152 -#define DLT_USER6 153 -#define DLT_USER7 154 -#define DLT_USER8 155 -#define DLT_USER9 156 -#define DLT_USER10 157 -#define DLT_USER11 158 -#define DLT_USER12 159 -#define DLT_USER13 160 -#define DLT_USER14 161 -#define DLT_USER15 162 +#define DLT_USER0 147 +#define DLT_USER1 148 +#define DLT_USER2 149 +#define DLT_USER3 150 +#define DLT_USER4 151 +#define DLT_USER5 152 +#define DLT_USER6 153 +#define DLT_USER7 154 +#define DLT_USER8 155 +#define DLT_USER9 156 +#define DLT_USER10 157 +#define DLT_USER11 158 +#define DLT_USER12 159 +#define DLT_USER13 160 +#define DLT_USER14 161 +#define DLT_USER15 162 #ifdef PRIVATE /* @@ -654,9 +654,9 @@ struct bpf_mtag { */ #define DLT_USER0_APPLE_INTERNAL DLT_USER0 /* rdar://12019509 */ #define DLT_USER1_APPLE_INTERNAL DLT_USER1 /* rdar://12019509 */ -#define DLT_PKTAP DLT_USER2 /* rdar://11779467 */ +#define DLT_PKTAP DLT_USER2 /* rdar://11779467 */ #define DLT_USER3_APPLE_INTERNAL DLT_USER3 /* rdar://19614531 */ -#define DLT_USER4_APPLE_INTERNAL DLT_USER4 /* rdar://19614531 */ +#define DLT_USER4_APPLE_INTERNAL DLT_USER4 /* rdar://19614531 */ #endif /* PRIVATE */ /* @@ -669,7 +669,7 @@ struct bpf_mtag { * but it might be used by some non-AVS drivers now or in the * future. */ -#define DLT_IEEE802_11_RADIO_AVS 163 /* 802.11 plus AVS radio header */ +#define DLT_IEEE802_11_RADIO_AVS 163 /* 802.11 plus AVS radio header */ /* * Juniper-private data link type, as per request from @@ -682,7 +682,7 @@ struct bpf_mtag { /* * Reserved for BACnet MS/TP. */ -#define DLT_BACNET_MS_TP 165 +#define DLT_BACNET_MS_TP 165 /* * Another PPP variant as per request from Karsten Keil . @@ -698,14 +698,14 @@ struct bpf_mtag { * The first byte of the PPP header (0xff03) is modified to accomodate * the direction - 0x00 = IN, 0x01 = OUT. */ -#define DLT_PPP_PPPD 166 +#define DLT_PPP_PPPD 166 /* * Names for backwards compatibility with older versions of some PPP * software; new software should use DLT_PPP_PPPD. */ -#define DLT_PPP_WITH_DIRECTION DLT_PPP_PPPD -#define DLT_LINUX_PPP_WITHDIRECTION DLT_PPP_PPPD +#define DLT_PPP_WITH_DIRECTION DLT_PPP_PPPD +#define DLT_LINUX_PPP_WITHDIRECTION DLT_PPP_PPPD /* * Juniper-private data link type, as per request from @@ -716,16 +716,16 @@ struct bpf_mtag { #define DLT_JUNIPER_PPPOE 167 #define DLT_JUNIPER_PPPOE_ATM 168 -#define DLT_GPRS_LLC 169 /* GPRS LLC */ -#define DLT_GPF_T 170 /* GPF-T (ITU-T G.7041/Y.1303) */ -#define DLT_GPF_F 171 /* GPF-F (ITU-T G.7041/Y.1303) */ +#define DLT_GPRS_LLC 169 /* GPRS LLC */ +#define DLT_GPF_T 170 /* GPF-T (ITU-T G.7041/Y.1303) */ +#define DLT_GPF_F 171 /* GPF-F (ITU-T G.7041/Y.1303) */ /* * Requested by Oolan Zimmer for use in Gcom's T1/E1 line * monitoring equipment. */ -#define DLT_GCOM_T1E1 172 -#define DLT_GCOM_SERIAL 173 +#define DLT_GCOM_T1E1 172 +#define DLT_GCOM_SERIAL 173 /* * Juniper-private data link type, as per request from @@ -740,8 +740,8 @@ struct bpf_mtag { * http://www.endace.com/support/EndaceRecordFormat.pdf) in front of * the link-layer header. */ -#define DLT_ERF_ETH 175 /* Ethernet */ -#define DLT_ERF_POS 176 /* Packet-over-SONET */ +#define DLT_ERF_ETH 175 /* Ethernet */ +#define DLT_ERF_POS 176 /* Packet-over-SONET */ /* * Requested by Daniele Orlandi for raw LAPD @@ -749,7 +749,7 @@ struct bpf_mtag { * includes additional information before the LAPD header, so it's * not necessarily a generic LAPD header. */ -#define DLT_LINUX_LAPD 177 +#define DLT_LINUX_LAPD 177 /* * Juniper-private data link type, as per request from @@ -796,25 +796,25 @@ struct bpf_mtag { * USB packets, beginning with a USB setup header; requested by * Paolo Abeni . */ -#define DLT_USB 186 +#define DLT_USB 186 /* * Bluetooth HCI UART transport layer (part H:4); requested by * Paolo Abeni. */ -#define DLT_BLUETOOTH_HCI_H4 187 +#define DLT_BLUETOOTH_HCI_H4 187 /* * IEEE 802.16 MAC Common Part Sublayer; requested by Maria Cruz * . */ -#define DLT_IEEE802_16_MAC_CPS 188 +#define DLT_IEEE802_16_MAC_CPS 188 /* * USB packets, beginning with a Linux USB header; requested by * Paolo Abeni . */ -#define DLT_USB_LINUX 189 +#define DLT_USB_LINUX 189 /* * Controller Area Network (CAN) v. 2.0B packets. @@ -829,19 +829,19 @@ struct bpf_mtag { * IEEE 802.15.4, with address fields padded, as is done by Linux * drivers; requested by Juergen Schimmer. */ -#define DLT_IEEE802_15_4_LINUX 191 +#define DLT_IEEE802_15_4_LINUX 191 /* * Per Packet Information encapsulated packets. * DLT_ requested by Gianluca Varenni . */ -#define DLT_PPI 192 +#define DLT_PPI 192 /* * Header for 802.16 MAC Common Part Sublayer plus a radiotap radio header; * requested by Charles Clancy. */ -#define DLT_IEEE802_16_MAC_CPS_RADIO 193 +#define DLT_IEEE802_16_MAC_CPS_RADIO 193 /* * Juniper-private data link type, as per request from @@ -855,34 +855,34 @@ struct bpf_mtag { * IEEE 802.15.4, exactly as it appears in the spec (no padding, no * nothing); requested by Mikko Saarnivala . */ -#define DLT_IEEE802_15_4 195 +#define DLT_IEEE802_15_4 195 /* * Various link-layer types, with a pseudo-header, for SITA * (http://www.sita.aero/); requested by Fulko Hew (fulko.hew@gmail.com). */ -#define DLT_SITA 196 +#define DLT_SITA 196 /* * Various link-layer types, with a pseudo-header, for Endace DAG cards; * encapsulates Endace ERF records. Requested by Stephen Donnelly * . */ -#define DLT_ERF 197 +#define DLT_ERF 197 /* * Special header prepended to Ethernet packets when capturing from a * u10 Networks board. Requested by Phil Mulholland * . */ -#define DLT_RAIF1 198 +#define DLT_RAIF1 198 /* * IPMB packet for IPMI, beginning with the I2C slave address, followed * by the netFn and LUN, etc.. Requested by Chanthy Toeung * . */ -#define DLT_IPMB 199 +#define DLT_IPMB 199 /* * Juniper-private data link type, as per request from @@ -895,7 +895,7 @@ struct bpf_mtag { * Bluetooth HCI UART transport layer (part H:4), with pseudo-header * that includes direction information; requested by Paolo Abeni. */ -#define DLT_BLUETOOTH_HCI_H4_WITH_PHDR 201 +#define DLT_BLUETOOTH_HCI_H4_WITH_PHDR 201 /* * AX.25 packet with a 1-byte KISS header; see @@ -978,40 +978,40 @@ struct bpf_mtag { */ #define DLT_IEEE802_15_4_NONASK_PHY 215 -/* +/* * David Gibson requested this for * captures from the Linux kernel /dev/input/eventN devices. This * is used to communicate keystrokes and mouse movements from the - * Linux kernel to display systems, such as Xorg. + * Linux kernel to display systems, such as Xorg. */ -#define DLT_LINUX_EVDEV 216 +#define DLT_LINUX_EVDEV 216 /* * GSM Um and Abis interfaces, preceded by a "gsmtap" header. * * Requested by Harald Welte . */ -#define DLT_GSMTAP_UM 217 -#define DLT_GSMTAP_ABIS 218 +#define DLT_GSMTAP_UM 217 +#define DLT_GSMTAP_ABIS 218 /* * MPLS, with an MPLS label as the link-layer header. * Requested by Michele Marchetto on behalf * of OpenBSD. */ -#define DLT_MPLS 219 +#define DLT_MPLS 219 /* * USB packets, beginning with a Linux USB header, with the USB header * padded to 64 bytes; required for memory-mapped access. */ -#define DLT_USB_LINUX_MMAPPED 220 +#define DLT_USB_LINUX_MMAPPED 220 /* * DECT packets, with a pseudo-header; requested by * Matthias Wenzel . */ -#define DLT_DECT 221 +#define DLT_DECT 221 /* * From: "Lidwa, Eric (GSFC-582.0)[SGT INC]" @@ -1031,13 +1031,13 @@ struct bpf_mtag { * * Requested by Sam Roberts . */ -#define DLT_WIHART 223 +#define DLT_WIHART 223 /* * Fibre Channel FC-2 frames, beginning with a Frame_Header. * Requested by Kahou Lei . */ -#define DLT_FC_2 224 +#define DLT_FC_2 224 /* * Fibre Channel FC-2 frames, beginning with an encoding of the @@ -1051,7 +1051,7 @@ struct bpf_mtag { * * Requested by Kahou Lei . */ -#define DLT_FC_2_WITH_FRAME_DELIMS 225 +#define DLT_FC_2_WITH_FRAME_DELIMS 225 /* * Solaris ipnet pseudo-header; requested by Darren Reed . @@ -1099,7 +1099,7 @@ struct bpf_mtag { * An IPv4 or IPv6 datagram follows the pseudo-header; dli_family indicates * which of those it is. */ -#define DLT_IPNET 226 +#define DLT_IPNET 226 /* * CAN (Controller Area Network) frames, with a pseudo-header as supplied @@ -1108,21 +1108,21 @@ struct bpf_mtag { * * Requested by Felix Obenhuber . */ -#define DLT_CAN_SOCKETCAN 227 +#define DLT_CAN_SOCKETCAN 227 /* * Raw IPv4/IPv6; different from DLT_RAW in that the DLT_ value specifies * whether it's v4 or v6. Requested by Darren Reed . */ -#define DLT_IPV4 228 -#define DLT_IPV6 229 +#define DLT_IPV4 228 +#define DLT_IPV6 229 /* * IEEE 802.15.4, exactly as it appears in the spec (no padding, no * nothing), and with no FCS at the end of the frame; requested by * Jon Smirl . */ -#define DLT_IEEE802_15_4_NOFCS 230 +#define DLT_IEEE802_15_4_NOFCS 230 /* * Raw D-Bus: @@ -1140,15 +1140,15 @@ struct bpf_mtag { * * Requested by Martin Vidner . */ -#define DLT_DBUS 231 +#define DLT_DBUS 231 /* * Juniper-private data link type, as per request from * Hannes Gredler . */ -#define DLT_JUNIPER_VS 232 -#define DLT_JUNIPER_SRX_E2E 233 -#define DLT_JUNIPER_FIBRECHANNEL 234 +#define DLT_JUNIPER_VS 232 +#define DLT_JUNIPER_SRX_E2E 233 +#define DLT_JUNIPER_FIBRECHANNEL 234 /* * DVB-CI (DVB Common Interface for communication between a PC Card @@ -1160,34 +1160,34 @@ struct bpf_mtag { * * Requested by Martin Kaiser . */ -#define DLT_DVB_CI 235 +#define DLT_DVB_CI 235 /* * Variant of 3GPP TS 27.010 multiplexing protocol (similar to, but * *not* the same as, 27.010). Requested by Hans-Christoph Schemmel * . */ -#define DLT_MUX27010 236 +#define DLT_MUX27010 236 /* * STANAG 5066 D_PDUs. Requested by M. Baris Demiray * . */ -#define DLT_STANAG_5066_D_PDU 237 +#define DLT_STANAG_5066_D_PDU 237 /* * Juniper-private data link type, as per request from * Hannes Gredler . */ -#define DLT_JUNIPER_ATM_CEMIC 238 +#define DLT_JUNIPER_ATM_CEMIC 238 /* - * NetFilter LOG messages + * NetFilter LOG messages * (payload of netlink NFNL_SUBSYS_ULOG/NFULNL_MSG_PACKET packets) * * Requested by Jakub Zawadzki */ -#define DLT_NFLOG 239 +#define DLT_NFLOG 239 /* * Hilscher Gesellschaft fuer Systemautomation mbH link-layer type @@ -1197,7 +1197,7 @@ struct bpf_mtag { * * Requested by Holger P. Frommer */ -#define DLT_NETANALYZER 240 +#define DLT_NETANALYZER 240 /* * Hilscher Gesellschaft fuer Systemautomation mbH link-layer type @@ -1208,21 +1208,21 @@ struct bpf_mtag { * * Requested by Holger P. Frommer */ -#define DLT_NETANALYZER_TRANSPARENT 241 +#define DLT_NETANALYZER_TRANSPARENT 241 /* * IP-over-Infiniband, as specified by RFC 4391. * * Requested by Petr Sumbera . */ -#define DLT_IPOIB 242 +#define DLT_IPOIB 242 /* * MPEG-2 transport stream (ISO 13818-1/ITU-T H.222.0). * * Requested by Guy Martin . */ -#define DLT_MPEG_2_TS 243 +#define DLT_MPEG_2_TS 243 /* * ng4T GmbH's UMTS Iub/Iur-over-ATM and Iub/Iur-over-IP format as @@ -1230,7 +1230,7 @@ struct bpf_mtag { * * Requested by Jens Grimmer . */ -#define DLT_NG40 244 +#define DLT_NG40 244 /* * Pseudo-header giving adapter number and flags, followed by an NFC @@ -1240,79 +1240,79 @@ struct bpf_mtag { * * Requested by Mike Wakerly . */ -#define DLT_NFC_LLCP 245 +#define DLT_NFC_LLCP 245 /* * USB packets, beginning with a Darwin (macOS, etc.) USB header. */ -#define DLT_USB_DARWIN 266 +#define DLT_USB_DARWIN 266 -#define DLT_MATCHING_MAX 266 /* highest value in the "matching" range */ +#define DLT_MATCHING_MAX 266 /* highest value in the "matching" range */ /* * The instruction encodings. */ /* instruction classes */ #define BPF_CLASS(code) ((code) & 0x07) -#define BPF_LD 0x00 -#define BPF_LDX 0x01 -#define BPF_ST 0x02 -#define BPF_STX 0x03 -#define BPF_ALU 0x04 -#define BPF_JMP 0x05 -#define BPF_RET 0x06 -#define BPF_MISC 0x07 +#define BPF_LD 0x00 +#define BPF_LDX 0x01 +#define BPF_ST 0x02 +#define BPF_STX 0x03 +#define BPF_ALU 0x04 +#define BPF_JMP 0x05 +#define BPF_RET 0x06 +#define BPF_MISC 0x07 /* ld/ldx fields */ -#define BPF_SIZE(code) ((code) & 0x18) -#define BPF_W 0x00 -#define BPF_H 0x08 -#define BPF_B 0x10 -#define BPF_MODE(code) ((code) & 0xe0) -#define BPF_IMM 0x00 -#define BPF_ABS 0x20 -#define BPF_IND 0x40 -#define BPF_MEM 0x60 -#define BPF_LEN 0x80 -#define BPF_MSH 0xa0 +#define BPF_SIZE(code) ((code) & 0x18) +#define BPF_W 0x00 +#define BPF_H 0x08 +#define BPF_B 0x10 +#define BPF_MODE(code) ((code) & 0xe0) +#define BPF_IMM 0x00 +#define BPF_ABS 0x20 +#define BPF_IND 0x40 +#define BPF_MEM 0x60 +#define BPF_LEN 0x80 +#define BPF_MSH 0xa0 /* alu/jmp fields */ -#define BPF_OP(code) ((code) & 0xf0) -#define BPF_ADD 0x00 -#define BPF_SUB 0x10 -#define BPF_MUL 0x20 -#define BPF_DIV 0x30 -#define BPF_OR 0x40 -#define BPF_AND 0x50 -#define BPF_LSH 0x60 -#define BPF_RSH 0x70 -#define BPF_NEG 0x80 -#define BPF_JA 0x00 -#define BPF_JEQ 0x10 -#define BPF_JGT 0x20 -#define BPF_JGE 0x30 -#define BPF_JSET 0x40 -#define BPF_SRC(code) ((code) & 0x08) -#define BPF_K 0x00 -#define BPF_X 0x08 +#define BPF_OP(code) ((code) & 0xf0) +#define BPF_ADD 0x00 +#define BPF_SUB 0x10 +#define BPF_MUL 0x20 +#define BPF_DIV 0x30 +#define BPF_OR 0x40 +#define BPF_AND 0x50 +#define BPF_LSH 0x60 +#define BPF_RSH 0x70 +#define BPF_NEG 0x80 +#define BPF_JA 0x00 +#define BPF_JEQ 0x10 +#define BPF_JGT 0x20 +#define BPF_JGE 0x30 +#define BPF_JSET 0x40 +#define BPF_SRC(code) ((code) & 0x08) +#define BPF_K 0x00 +#define BPF_X 0x08 /* ret - BPF_K and BPF_X also apply */ -#define BPF_RVAL(code) ((code) & 0x18) -#define BPF_A 0x10 +#define BPF_RVAL(code) ((code) & 0x18) +#define BPF_A 0x10 /* misc */ #define BPF_MISCOP(code) ((code) & 0xf8) -#define BPF_TAX 0x00 -#define BPF_TXA 0x80 +#define BPF_TAX 0x00 +#define BPF_TXA 0x80 /* * The instruction data structure. */ struct bpf_insn { - u_short code; - u_char jt; - u_char jf; - bpf_u_int32 k; + u_short code; + u_char jt; + u_char jf; + bpf_u_int32 k; }; /* @@ -1327,10 +1327,10 @@ struct bpf_insn { * Structure to retrieve available DLTs for the interface. */ struct bpf_dltlist { - u_int32_t bfl_len; /* number of bfd_list array */ + u_int32_t bfl_len; /* number of bfd_list array */ union { - u_int32_t *bflu_list; /* array of DLTs */ - u_int64_t bflu_pad; + u_int32_t *bflu_list; /* array of DLTs */ + u_int64_t bflu_pad; } bfl_u; }; #define bfl_list bfl_u.bflu_list @@ -1343,148 +1343,148 @@ struct bpf_dltlist { #define PORT_BOOTPS 67 #define PORT_BOOTPC 68 #define PORT_ISAKMP 500 -#define PORT_ISAKMP_NATT 4500 /* rfc3948 */ +#define PORT_ISAKMP_NATT 4500 /* rfc3948 */ /* Forward declerations */ struct ifnet; struct mbuf; -#define BPF_PACKET_TYPE_MBUF 0 +#define BPF_PACKET_TYPE_MBUF 0 struct bpf_packet { - int bpfp_type; - void * bpfp_header; /* optional */ - size_t bpfp_header_length; + int bpfp_type; + void * bpfp_header; /* optional */ + size_t bpfp_header_length; union { - struct mbuf *bpfpu_mbuf; - void * bpfpu_ptr; + struct mbuf *bpfpu_mbuf; + void * bpfpu_ptr; } bpfp_u; -#define bpfp_mbuf bpfp_u.bpfpu_mbuf -#define bpfp_ptr bpfp_u.bpfpu_ptr - size_t bpfp_total_length; /* length including optional header */ +#define bpfp_mbuf bpfp_u.bpfpu_mbuf +#define bpfp_ptr bpfp_u.bpfpu_ptr + size_t bpfp_total_length; /* length including optional header */ }; -extern int bpf_validate(const struct bpf_insn *, int); -extern void bpfdetach(struct ifnet *); -extern void bpfilterattach(int); -extern u_int bpf_filter(const struct bpf_insn *, u_char *, u_int, u_int); +extern int bpf_validate(const struct bpf_insn *, int); +extern void bpfdetach(struct ifnet *); +extern void bpfilterattach(int); +extern u_int bpf_filter(const struct bpf_insn *, u_char *, u_int, u_int); #endif /* KERNEL_PRIVATE */ #ifdef KERNEL #ifndef BPF_TAP_MODE_T #define BPF_TAP_MODE_T /*! - @enum BPF tap mode - @abstract Constants defining interface families. - @constant BPF_MODE_DISABLED Disable bpf. - @constant BPF_MODE_INPUT Enable input only. - @constant BPF_MODE_OUTPUT Enable output only. - @constant BPF_MODE_INPUT_OUTPUT Enable input and output. -*/ + * @enum BPF tap mode + * @abstract Constants defining interface families. + * @constant BPF_MODE_DISABLED Disable bpf. + * @constant BPF_MODE_INPUT Enable input only. + * @constant BPF_MODE_OUTPUT Enable output only. + * @constant BPF_MODE_INPUT_OUTPUT Enable input and output. + */ enum { - BPF_MODE_DISABLED = 0, - BPF_MODE_INPUT = 1, - BPF_MODE_OUTPUT = 2, - BPF_MODE_INPUT_OUTPUT = 3 + BPF_MODE_DISABLED = 0, + BPF_MODE_INPUT = 1, + BPF_MODE_OUTPUT = 2, + BPF_MODE_INPUT_OUTPUT = 3 }; /*! - @typedef bpf_tap_mode - @abstract Mode for tapping. BPF_MODE_DISABLED/BPF_MODE_INPUT_OUTPUT etc. -*/ + * @typedef bpf_tap_mode + * @abstract Mode for tapping. BPF_MODE_DISABLED/BPF_MODE_INPUT_OUTPUT etc. + */ typedef u_int32_t bpf_tap_mode; #endif /* !BPF_TAP_MODE_T */ /*! - @typedef bpf_send_func - @discussion bpf_send_func is called when a bpf file descriptor is - used to send a raw packet on the interface. The mbuf and data - link type are specified. The callback is responsible for - releasing the mbuf whether or not it returns an error. - @param interface The interface the packet is being sent on. - @param data_link_type The data link type the bpf device is attached to. - @param packet The packet to be sent. + * @typedef bpf_send_func + * @discussion bpf_send_func is called when a bpf file descriptor is + * used to send a raw packet on the interface. The mbuf and data + * link type are specified. The callback is responsible for + * releasing the mbuf whether or not it returns an error. + * @param interface The interface the packet is being sent on. + * @param data_link_type The data link type the bpf device is attached to. + * @param packet The packet to be sent. */ typedef errno_t (*bpf_send_func)(ifnet_t interface, u_int32_t data_link_type, mbuf_t packet); /*! - @typedef bpf_tap_func - @discussion bpf_tap_func is called when the tap state of the - interface changes. This happens when a bpf device attaches to an - interface or detaches from an interface. The tap mode will join - together (bit or) the modes of all bpf devices using that - interface for that dlt. If you return an error from this - function, the bpf device attach attempt that triggered the tap - will fail. If this function was called bacuse the tap state was - decreasing (tap in or out is stopping), the error will be - ignored. - @param interface The interface being tapped. - @param data_link_type The data link type being tapped. - @param direction The direction of the tap. + * @typedef bpf_tap_func + * @discussion bpf_tap_func is called when the tap state of the + * interface changes. This happens when a bpf device attaches to an + * interface or detaches from an interface. The tap mode will join + * together (bit or) the modes of all bpf devices using that + * interface for that dlt. If you return an error from this + * function, the bpf device attach attempt that triggered the tap + * will fail. If this function was called bacuse the tap state was + * decreasing (tap in or out is stopping), the error will be + * ignored. + * @param interface The interface being tapped. + * @param data_link_type The data link type being tapped. + * @param direction The direction of the tap. */ typedef errno_t (*bpf_tap_func)(ifnet_t interface, u_int32_t data_link_type, bpf_tap_mode direction); /*! - @function bpfattach - @discussion Registers an interface with BPF. This allows bpf devices - to attach to your interface to capture packets. Your interface - will be unregistered automatically when your interface is - detached. - @param interface The interface to register with BPF. - @param data_link_type The data link type of the interface. See the - DLT_* defines in bpf.h. - @param header_length The length, in bytes, of the data link header. + * @function bpfattach + * @discussion Registers an interface with BPF. This allows bpf devices + * to attach to your interface to capture packets. Your interface + * will be unregistered automatically when your interface is + * detached. + * @param interface The interface to register with BPF. + * @param data_link_type The data link type of the interface. See the + * DLT_* defines in bpf.h. + * @param header_length The length, in bytes, of the data link header. */ extern void bpfattach(ifnet_t interface, u_int data_link_type, u_int header_length); /*! - @function bpf_attach - @discussion Registers an interface with BPF. This allows bpf devices - to attach to your interface to capture and transmit packets. - Your interface will be unregistered automatically when your - interface is detached. You may register multiple times with - different data link types. An 802.11 interface would use this to - allow clients to pick whether they want just an ethernet style - frame or the 802.11 wireless headers as well. The first dlt you - register will be considered the default. Any bpf device attaches - that do not specify a data link type will use the default. - @param interface The interface to register with BPF. - @param data_link_type The data link type of the interface. See the - DLT_* defines in bpf.h. - @param header_length The length, in bytes, of the data link header. - @param send See the bpf_send_func described above. - @param tap See the bpf_tap_func described above. + * @function bpf_attach + * @discussion Registers an interface with BPF. This allows bpf devices + * to attach to your interface to capture and transmit packets. + * Your interface will be unregistered automatically when your + * interface is detached. You may register multiple times with + * different data link types. An 802.11 interface would use this to + * allow clients to pick whether they want just an ethernet style + * frame or the 802.11 wireless headers as well. The first dlt you + * register will be considered the default. Any bpf device attaches + * that do not specify a data link type will use the default. + * @param interface The interface to register with BPF. + * @param data_link_type The data link type of the interface. See the + * DLT_* defines in bpf.h. + * @param header_length The length, in bytes, of the data link header. + * @param send See the bpf_send_func described above. + * @param tap See the bpf_tap_func described above. */ extern errno_t bpf_attach(ifnet_t interface, u_int32_t data_link_type, u_int32_t header_length, bpf_send_func send, bpf_tap_func tap); /*! - @function bpf_tap_in - @discussion Call this function when your interface receives a - packet. This function will check if any bpf devices need a - a copy of the packet. - @param interface The interface the packet was received on. - @param dlt The data link type of the packet. - @param packet The packet received. - @param header An optional pointer to a header that will be prepended. - @param header_len If the header was specified, the length of the header. + * @function bpf_tap_in + * @discussion Call this function when your interface receives a + * packet. This function will check if any bpf devices need a + * a copy of the packet. + * @param interface The interface the packet was received on. + * @param dlt The data link type of the packet. + * @param packet The packet received. + * @param header An optional pointer to a header that will be prepended. + * @param header_len If the header was specified, the length of the header. */ extern void bpf_tap_in(ifnet_t interface, u_int32_t dlt, mbuf_t packet, void *header, size_t header_len); /*! - @function bpf_tap_out - @discussion Call this function when your interface transmits a - packet. This function will check if any bpf devices need a - a copy of the packet. - @param interface The interface the packet was or will be transmitted on. - @param dlt The data link type of the packet. - @param packet The packet received. - @param header An optional pointer to a header that will be prepended. - @param header_len If the header was specified, the length of the header. + * @function bpf_tap_out + * @discussion Call this function when your interface transmits a + * packet. This function will check if any bpf devices need a + * a copy of the packet. + * @param interface The interface the packet was or will be transmitted on. + * @param dlt The data link type of the packet. + * @param packet The packet received. + * @param header An optional pointer to a header that will be prepended. + * @param header_len If the header was specified, the length of the header. */ extern void bpf_tap_out(ifnet_t interface, u_int32_t dlt, mbuf_t packet, void *header, size_t header_len); diff --git a/bsd/net/bpf_compat.h b/bsd/net/bpf_compat.h index 560571434..b2577c075 100644 --- a/bsd/net/bpf_compat.h +++ b/bsd/net/bpf_compat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- diff --git a/bsd/net/bpf_filter.c b/bsd/net/bpf_filter.c index 7fbafb3c0..12d3a6e37 100644 --- a/bsd/net/bpf_filter.c +++ b/bsd/net/bpf_filter.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -85,37 +85,37 @@ extern unsigned int bpf_maxbufsize; static inline u_int32_t get_word_from_buffers(u_char * cp, u_char * np, int num_from_cp) { - u_int32_t val; + u_int32_t val; switch (num_from_cp) { case 1: val = ((u_int32_t)cp[0] << 24) | - ((u_int32_t)np[0] << 16) | - ((u_int32_t)np[1] << 8) | - (u_int32_t)np[2]; + ((u_int32_t)np[0] << 16) | + ((u_int32_t)np[1] << 8) | + (u_int32_t)np[2]; break; case 2: val = ((u_int32_t)cp[0] << 24) | - ((u_int32_t)cp[1] << 16) | - ((u_int32_t)np[0] << 8) | - (u_int32_t)np[1]; + ((u_int32_t)cp[1] << 16) | + ((u_int32_t)np[0] << 8) | + (u_int32_t)np[1]; break; default: val = ((u_int32_t)cp[0] << 24) | - ((u_int32_t)cp[1] << 16) | - ((u_int32_t)cp[2] << 8) | - (u_int32_t)np[0]; + ((u_int32_t)cp[1] << 16) | + ((u_int32_t)cp[2] << 8) | + (u_int32_t)np[0]; break; } - return (val); + return val; } static u_char * m_hdr_offset(struct mbuf **m_p, void * hdr, size_t hdrlen, bpf_u_int32 * k_p, size_t * len_p) { - u_char *cp; + u_char *cp; bpf_u_int32 k = *k_p; size_t len; @@ -128,8 +128,9 @@ m_hdr_offset(struct mbuf **m_p, void * hdr, size_t hdrlen, bpf_u_int32 * k_p, while (k >= len) { k -= len; m = m->m_next; - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } len = m->m_len; } cp = mtod(m, u_char *) + k; @@ -144,7 +145,7 @@ m_hdr_offset(struct mbuf **m_p, void * hdr, size_t hdrlen, bpf_u_int32 * k_p, cp = (u_char *)hdr + k; } *len_p = len; - return (cp); + return cp; } static u_int32_t @@ -154,19 +155,21 @@ m_xword(struct mbuf *m, void * hdr, size_t hdrlen, bpf_u_int32 k, int *err) u_char *cp, *np; cp = m_hdr_offset(&m, hdr, hdrlen, &k, &len); - if (cp == NULL) + if (cp == NULL) { goto bad; + } if (len - k >= 4) { *err = 0; return EXTRACT_LONG(cp); } - if (m == 0 || m->m_len + len - k < 4) + if (m == 0 || m->m_len + len - k < 4) { goto bad; + } *err = 0; np = mtod(m, u_char *); return get_word_from_buffers(cp, np, len - k); - bad: +bad: *err = 1; return 0; } @@ -178,17 +181,19 @@ m_xhalf(struct mbuf *m, void * hdr, size_t hdrlen, bpf_u_int32 k, int *err) u_char *cp; cp = m_hdr_offset(&m, hdr, hdrlen, &k, &len); - if (cp == NULL) + if (cp == NULL) { goto bad; + } if (len - k >= 2) { *err = 0; return EXTRACT_SHORT(cp); } - if (m == 0) + if (m == 0) { goto bad; + } *err = 0; return (cp[0] << 8) | mtod(m, u_char *)[0]; - bad: +bad: *err = 1; return 0; } @@ -200,22 +205,22 @@ m_xbyte(struct mbuf *m, void * hdr, size_t hdrlen, bpf_u_int32 k, int *err) u_char *cp; cp = m_hdr_offset(&m, hdr, hdrlen, &k, &len); - if (cp == NULL) + if (cp == NULL) { goto bad; + } *err = 0; - return (*cp); - bad: + return *cp; +bad: *err = 1; return 0; - } static u_int32_t bp_xword(struct bpf_packet *bp, bpf_u_int32 k, int *err) { - void * hdr = bp->bpfp_header; - size_t hdrlen = bp->bpfp_header_length; + void * hdr = bp->bpfp_header; + size_t hdrlen = bp->bpfp_header_length; switch (bp->bpfp_type) { case BPF_PACKET_TYPE_MBUF: @@ -225,14 +230,13 @@ bp_xword(struct bpf_packet *bp, bpf_u_int32 k, int *err) } *err = 1; return 0; - } static u_int16_t bp_xhalf(struct bpf_packet *bp, bpf_u_int32 k, int *err) { - void * hdr = bp->bpfp_header; - size_t hdrlen = bp->bpfp_header_length; + void * hdr = bp->bpfp_header; + size_t hdrlen = bp->bpfp_header_length; switch (bp->bpfp_type) { case BPF_PACKET_TYPE_MBUF: @@ -242,14 +246,13 @@ bp_xhalf(struct bpf_packet *bp, bpf_u_int32 k, int *err) } *err = 1; return 0; - } static u_int8_t bp_xbyte(struct bpf_packet *bp, bpf_u_int32 k, int *err) { - void * hdr = bp->bpfp_header; - size_t hdrlen = bp->bpfp_header_length; + void * hdr = bp->bpfp_header; + size_t hdrlen = bp->bpfp_header_length; switch (bp->bpfp_type) { case BPF_PACKET_TYPE_MBUF: @@ -259,7 +262,6 @@ bp_xbyte(struct bpf_packet *bp, bpf_u_int32 k, int *err) } *err = 1; return 0; - } #endif @@ -282,60 +284,64 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) bzero(mem, sizeof(mem)); - if (pc == 0) + if (pc == 0) { /* * No filter means accept all. */ - return (u_int)-1; + return (u_int) - 1; + } --pc; while (1) { ++pc; switch (pc->code) { - default: #ifdef KERNEL return 0; #else /* KERNEL */ abort(); #endif /* KERNEL */ - case BPF_RET|BPF_K: + case BPF_RET | BPF_K: return (u_int)pc->k; - case BPF_RET|BPF_A: + case BPF_RET | BPF_A: return (u_int)A; - case BPF_LD|BPF_W|BPF_ABS: + case BPF_LD | BPF_W | BPF_ABS: k = pc->k; if (k > buflen || sizeof(int32_t) > buflen - k) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } A = bp_xword(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } continue; #else /* KERNEL */ return 0; #endif /* KERNEL */ } #if BPF_ALIGN - if (((intptr_t)(p + k) & 3) != 0) + if (((intptr_t)(p + k) & 3) != 0) { A = EXTRACT_LONG(&p[k]); - else + } else #endif /* BPF_ALIGN */ - A = ntohl(*(int32_t *)(void *)(p + k)); + A = ntohl(*(int32_t *)(void *)(p + k)); continue; - case BPF_LD|BPF_H|BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: k = pc->k; if (k > buflen || sizeof(int16_t) > buflen - k) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } A = bp_xhalf(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } continue; #else /* KERNEL */ return 0; @@ -344,15 +350,17 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) A = EXTRACT_SHORT(&p[k]); continue; - case BPF_LD|BPF_B|BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: k = pc->k; if (k >= buflen) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } A = bp_xbyte(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } continue; #else /* KERNEL */ return 0; @@ -361,47 +369,51 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) A = p[k]; continue; - case BPF_LD|BPF_W|BPF_LEN: + case BPF_LD | BPF_W | BPF_LEN: A = wirelen; continue; - case BPF_LDX|BPF_W|BPF_LEN: + case BPF_LDX | BPF_W | BPF_LEN: X = wirelen; continue; - case BPF_LD|BPF_W|BPF_IND: + case BPF_LD | BPF_W | BPF_IND: k = X + pc->k; if (pc->k > buflen || X > buflen - pc->k || sizeof(int32_t) > buflen - k) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } A = bp_xword(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } continue; #else /* KERNEL */ return 0; #endif /* KERNEL */ } #if BPF_ALIGN - if (((intptr_t)(p + k) & 3) != 0) + if (((intptr_t)(p + k) & 3) != 0) { A = EXTRACT_LONG(&p[k]); - else + } else #endif /* BPF_ALIGN */ - A = ntohl(*(int32_t *)(void *)(p + k)); + A = ntohl(*(int32_t *)(void *)(p + k)); continue; - case BPF_LD|BPF_H|BPF_IND: + case BPF_LD | BPF_H | BPF_IND: k = X + pc->k; if (X > buflen || pc->k > buflen - X || sizeof(int16_t) > buflen - k) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } A = bp_xhalf(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } continue; #else /* KERNEL */ return 0; @@ -410,15 +422,17 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) A = EXTRACT_SHORT(&p[k]); continue; - case BPF_LD|BPF_B|BPF_IND: + case BPF_LD | BPF_B | BPF_IND: k = X + pc->k; if (pc->k >= buflen || X >= buflen - pc->k) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } A = bp_xbyte(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } continue; #else /* KERNEL */ return 0; @@ -427,15 +441,17 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) A = p[k]; continue; - case BPF_LDX|BPF_MSH|BPF_B: + case BPF_LDX | BPF_MSH | BPF_B: k = pc->k; if (k >= buflen) { #ifdef KERNEL - if (buflen != 0) + if (buflen != 0) { return 0; + } X = bp_xbyte(bp, k, &merr); - if (merr != 0) + if (merr != 0) { return 0; + } X = (X & 0xf) << 2; continue; #else @@ -445,145 +461,148 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) X = (p[pc->k] & 0xf) << 2; continue; - case BPF_LD|BPF_IMM: + case BPF_LD | BPF_IMM: A = pc->k; continue; - case BPF_LDX|BPF_IMM: + case BPF_LDX | BPF_IMM: X = pc->k; continue; - case BPF_LD|BPF_MEM: + case BPF_LD | BPF_MEM: A = mem[pc->k]; continue; - case BPF_LDX|BPF_MEM: + case BPF_LDX | BPF_MEM: X = mem[pc->k]; continue; case BPF_ST: - if (pc->k >= BPF_MEMWORDS) + if (pc->k >= BPF_MEMWORDS) { return 0; + } mem[pc->k] = A; continue; case BPF_STX: - if (pc->k >= BPF_MEMWORDS) + if (pc->k >= BPF_MEMWORDS) { return 0; + } mem[pc->k] = X; continue; - case BPF_JMP|BPF_JA: + case BPF_JMP | BPF_JA: pc += pc->k; continue; - case BPF_JMP|BPF_JGT|BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: pc += (A > pc->k) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JGE|BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: pc += (A >= pc->k) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JEQ|BPF_K: + case BPF_JMP | BPF_JEQ | BPF_K: pc += (A == pc->k) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JSET|BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: pc += (A & pc->k) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JGT|BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: pc += (A > X) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JGE|BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: pc += (A >= X) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JEQ|BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: pc += (A == X) ? pc->jt : pc->jf; continue; - case BPF_JMP|BPF_JSET|BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: pc += (A & X) ? pc->jt : pc->jf; continue; - case BPF_ALU|BPF_ADD|BPF_X: + case BPF_ALU | BPF_ADD | BPF_X: A += X; continue; - case BPF_ALU|BPF_SUB|BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: A -= X; continue; - case BPF_ALU|BPF_MUL|BPF_X: + case BPF_ALU | BPF_MUL | BPF_X: A *= X; continue; - case BPF_ALU|BPF_DIV|BPF_X: - if (X == 0) + case BPF_ALU | BPF_DIV | BPF_X: + if (X == 0) { return 0; + } A /= X; continue; - case BPF_ALU|BPF_AND|BPF_X: + case BPF_ALU | BPF_AND | BPF_X: A &= X; continue; - case BPF_ALU|BPF_OR|BPF_X: + case BPF_ALU | BPF_OR | BPF_X: A |= X; continue; - case BPF_ALU|BPF_LSH|BPF_X: + case BPF_ALU | BPF_LSH | BPF_X: A <<= X; continue; - case BPF_ALU|BPF_RSH|BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: A >>= X; continue; - case BPF_ALU|BPF_ADD|BPF_K: + case BPF_ALU | BPF_ADD | BPF_K: A += pc->k; continue; - case BPF_ALU|BPF_SUB|BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: A -= pc->k; continue; - case BPF_ALU|BPF_MUL|BPF_K: + case BPF_ALU | BPF_MUL | BPF_K: A *= pc->k; continue; - case BPF_ALU|BPF_DIV|BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: A /= pc->k; continue; - case BPF_ALU|BPF_AND|BPF_K: + case BPF_ALU | BPF_AND | BPF_K: A &= pc->k; continue; - case BPF_ALU|BPF_OR|BPF_K: + case BPF_ALU | BPF_OR | BPF_K: A |= pc->k; continue; - case BPF_ALU|BPF_LSH|BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: A <<= pc->k; continue; - case BPF_ALU|BPF_RSH|BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: A >>= pc->k; continue; - case BPF_ALU|BPF_NEG: + case BPF_ALU | BPF_NEG: A = -A; continue; - case BPF_MISC|BPF_TAX: + case BPF_MISC | BPF_TAX: X = A; continue; - case BPF_MISC|BPF_TXA: + case BPF_MISC | BPF_TXA: A = X; continue; } @@ -594,7 +613,7 @@ bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) /* * Return true if the 'fcode' is a valid filter program. * The constraints are that each jump be forward and to a valid - * code, that memory accesses are within valid ranges (to the + * code, that memory accesses are within valid ranges (to the * extent that this can be checked statically; loads of packet data * have to be, and are, also checked at run time), and that * the code terminates with either an accept or reject. @@ -608,109 +627,116 @@ bpf_validate(const struct bpf_insn *f, int len) u_int i, from; const struct bpf_insn *p; - if (len < 1 || len > BPF_MAXINSNS) + if (len < 1 || len > BPF_MAXINSNS) { return 0; - + } + for (i = 0; i < ((u_int)len); ++i) { p = &f[i]; switch (BPF_CLASS(p->code)) { - /* - * Check that memory operations use valid addresses - */ - case BPF_LD: - case BPF_LDX: - switch (BPF_MODE(p->code)) { - case BPF_IMM: - break; - case BPF_ABS: - case BPF_IND: - case BPF_MSH: - /* - * More strict check with actual packet length - * is done runtime. - */ - if (p->k >= bpf_maxbufsize) - return 0; - break; - case BPF_MEM: - if (p->k >= BPF_MEMWORDS) - return 0; - break; - case BPF_LEN: - break; - default: - return 0; - } + /* + * Check that memory operations use valid addresses + */ + case BPF_LD: + case BPF_LDX: + switch (BPF_MODE(p->code)) { + case BPF_IMM: break; - case BPF_ST: - case BPF_STX: - if (p->k >= BPF_MEMWORDS) + case BPF_ABS: + case BPF_IND: + case BPF_MSH: + /* + * More strict check with actual packet length + * is done runtime. + */ + if (p->k >= bpf_maxbufsize) { return 0; + } break; - case BPF_ALU: - switch (BPF_OP(p->code)) { - case BPF_ADD: - case BPF_SUB: - case BPF_MUL: - case BPF_OR: - case BPF_AND: - case BPF_LSH: - case BPF_RSH: - case BPF_NEG: - break; - case BPF_DIV: - /* - * Check for constant division by 0 - */ - if(BPF_SRC(p->code) == BPF_K && p->k == 0) - return 0; - break; - default: - return 0; + case BPF_MEM: + if (p->k >= BPF_MEMWORDS) { + return 0; } break; - case BPF_JMP: + case BPF_LEN: + break; + default: + return 0; + } + break; + case BPF_ST: + case BPF_STX: + if (p->k >= BPF_MEMWORDS) { + return 0; + } + break; + case BPF_ALU: + switch (BPF_OP(p->code)) { + case BPF_ADD: + case BPF_SUB: + case BPF_MUL: + case BPF_OR: + case BPF_AND: + case BPF_LSH: + case BPF_RSH: + case BPF_NEG: + break; + case BPF_DIV: /* - * Check that jumps are within the code block, - * and that unconditional branches don't go - * backwards as a result of an overflow. - * Unconditional branches have a 32-bit offset, - * so they could overflow; we check to make - * sure they don't. Conditional branches have - * an 8-bit offset, and the from address is - * less than equal to BPF_MAXINSNS, and we assume that - * BPF_MAXINSNS is sufficiently small that adding 255 - * to it won't overlflow - * - * We know that len is <= BPF_MAXINSNS, and we - * assume that BPF_MAXINSNS is less than the maximum - * size of a u_int, so that i+1 doesn't overflow + * Check for constant division by 0 */ - from = i+1; - switch (BPF_OP(p->code)) { - case BPF_JA: - if (from + p->k < from || from + p->k >= ((u_int)len)) - return 0; - break; - case BPF_JEQ: - case BPF_JGT: - case BPF_JGE: - case BPF_JSET: - if (from + p->jt >= ((u_int)len) || from + p->jf >= ((u_int)len)) - return 0; - break; - default: - return 0; + if (BPF_SRC(p->code) == BPF_K && p->k == 0) { + return 0; } break; - case BPF_RET: + default: + return 0; + } + break; + case BPF_JMP: + /* + * Check that jumps are within the code block, + * and that unconditional branches don't go + * backwards as a result of an overflow. + * Unconditional branches have a 32-bit offset, + * so they could overflow; we check to make + * sure they don't. Conditional branches have + * an 8-bit offset, and the from address is + * less than equal to BPF_MAXINSNS, and we assume that + * BPF_MAXINSNS is sufficiently small that adding 255 + * to it won't overlflow + * + * We know that len is <= BPF_MAXINSNS, and we + * assume that BPF_MAXINSNS is less than the maximum + * size of a u_int, so that i+1 doesn't overflow + */ + from = i + 1; + switch (BPF_OP(p->code)) { + case BPF_JA: + if (from + p->k < from || from + p->k >= ((u_int)len)) { + return 0; + } break; - case BPF_MISC: + case BPF_JEQ: + case BPF_JGT: + case BPF_JGE: + case BPF_JSET: + if (from + p->jt >= ((u_int)len) || from + p->jf >= ((u_int)len)) { + return 0; + } break; default: return 0; + } + break; + case BPF_RET: + break; + case BPF_MISC: + break; + default: + return 0; } } - return BPF_CLASS(f[len - 1].code) == BPF_RET; + return BPF_CLASS(f[len - 1].code) == BPF_RET; } #endif diff --git a/bsd/net/bpfdesc.h b/bsd/net/bpfdesc.h index 8e18cb937..ce9899f0a 100644 --- a/bsd/net/bpfdesc.h +++ b/bsd/net/bpfdesc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -84,7 +84,7 @@ * Descriptor associated with each open bpf file. */ struct bpf_d { - struct bpf_d *bd_next; /* Linked list of descriptors */ + struct bpf_d *bd_next; /* Linked list of descriptors */ /* * Buffer slots: two mbuf clusters buffer the incoming packets. * The model has three slots. Sbuf is always occupied. @@ -94,98 +94,98 @@ struct bpf_d { * fbuf (free) - When read is done, put cluster here. * On receiving, if sbuf is full and fbuf is 0, packet is dropped. */ - caddr_t bd_sbuf; /* store slot */ - caddr_t bd_hbuf; /* hold slot */ - caddr_t bd_fbuf; /* free slot */ - int bd_slen; /* current length of store buffer */ - int bd_hlen; /* current length of hold buffer */ - u_int32_t bd_scnt; /* number of packets in store buffer */ - u_int32_t bd_hcnt; /* number of packets in hold buffer */ + caddr_t bd_sbuf; /* store slot */ + caddr_t bd_hbuf; /* hold slot */ + caddr_t bd_fbuf; /* free slot */ + int bd_slen; /* current length of store buffer */ + int bd_hlen; /* current length of hold buffer */ + u_int32_t bd_scnt; /* number of packets in store buffer */ + u_int32_t bd_hcnt; /* number of packets in hold buffer */ - int bd_bufsize; /* absolute length of buffers */ - int bd_hbuf_read; /* reading from hbuf */ - int bd_headdrop; /* Keep newer packets */ + int bd_bufsize; /* absolute length of buffers */ + int bd_hbuf_read; /* reading from hbuf */ + int bd_headdrop; /* Keep newer packets */ - struct bpf_if *bd_bif; /* interface descriptor */ - u_int32_t bd_rtout; /* Read timeout in 'ticks' */ - struct bpf_insn *bd_filter; /* filter code */ - u_int32_t bd_rcount; /* number of packets received */ - u_int32_t bd_dcount; /* number of packets dropped */ + struct bpf_if *bd_bif; /* interface descriptor */ + u_int32_t bd_rtout; /* Read timeout in 'ticks' */ + struct bpf_insn *bd_filter; /* filter code */ + u_int32_t bd_rcount; /* number of packets received */ + u_int32_t bd_dcount; /* number of packets dropped */ - u_char bd_promisc; /* true if listening promiscuously */ - u_char bd_state; /* idle, waiting, or timed out */ - u_char bd_immediate; /* true to return on packet arrival */ - int bd_async; /* non-zero if packet reception should generate signal */ - int bd_sig; /* signal to send upon packet reception */ + u_char bd_promisc; /* true if listening promiscuously */ + u_char bd_state; /* idle, waiting, or timed out */ + u_char bd_immediate; /* true to return on packet arrival */ + int bd_async; /* non-zero if packet reception should generate signal */ + int bd_sig; /* signal to send upon packet reception */ #ifdef __APPLE__ - pid_t bd_sigio; + pid_t bd_sigio; #else - struct sigio * bd_sigio; /* information for async I/O */ + struct sigio * bd_sigio; /* information for async I/O */ #endif #if BSD < 199103 - u_char bd_selcoll; /* true if selects collide */ - int bd_timedout; - struct proc * bd_selproc; /* process that last selected us */ + u_char bd_selcoll; /* true if selects collide */ + int bd_timedout; + struct proc * bd_selproc; /* process that last selected us */ #else - u_char bd_pad; /* explicit alignment */ - struct selinfo bd_sel; /* bsd select info */ + u_char bd_pad; /* explicit alignment */ + struct selinfo bd_sel; /* bsd select info */ #endif - int bd_hdrcmplt; /* false to fill in src lladdr automatically */ - int bd_seesent; /* true if bpf should see sent packets */ - int bd_oflags; /* device open flags */ - thread_call_t bd_thread_call; /* for BPF timeouts with select */ + int bd_hdrcmplt; /* false to fill in src lladdr automatically */ + int bd_seesent; /* true if bpf should see sent packets */ + int bd_oflags; /* device open flags */ + thread_call_t bd_thread_call; /* for BPF timeouts with select */ #if CONFIG_MACF_NET - struct label * bd_label; /* MAC label for descriptor */ + struct label * bd_label; /* MAC label for descriptor */ #endif - int bd_traffic_class; /* traffic service class */ - int bd_flags; /* flags */ + int bd_traffic_class; /* traffic service class */ + int bd_flags; /* flags */ - int bd_refcnt; -#define BPF_REF_HIST 4 /* how many callers to keep around */ - void *bd_ref_lr[BPF_REF_HIST]; - void *bd_unref_lr[BPF_REF_HIST]; - int bd_next_ref_lr; - int bd_next_unref_lr; + int bd_refcnt; +#define BPF_REF_HIST 4 /* how many callers to keep around */ + void *bd_ref_lr[BPF_REF_HIST]; + void *bd_unref_lr[BPF_REF_HIST]; + int bd_next_ref_lr; + int bd_next_unref_lr; - struct proc *bd_opened_by; - uuid_t bd_uuid; + struct proc *bd_opened_by; + uuid_t bd_uuid; }; /* Values for bd_state */ -#define BPF_IDLE 0 /* no select in progress or kqueue pending */ -#define BPF_WAITING 1 /* waiting for read timeout in select/kqueue */ -#define BPF_TIMED_OUT 2 /* read timeout has expired in select/kqueue */ -#define BPF_DRAINING 3 /* waiting for timeout routine to finish during close */ +#define BPF_IDLE 0 /* no select in progress or kqueue pending */ +#define BPF_WAITING 1 /* waiting for read timeout in select/kqueue */ +#define BPF_TIMED_OUT 2 /* read timeout has expired in select/kqueue */ +#define BPF_DRAINING 3 /* waiting for timeout routine to finish during close */ /* Test whether a BPF is ready for read(). */ -#define bpf_ready(bd) ((bd)->bd_hlen != 0 || \ - (((bd)->bd_immediate || (bd)->bd_state == BPF_TIMED_OUT) && \ - (bd)->bd_slen != 0)) +#define bpf_ready(bd) ((bd)->bd_hlen != 0 || \ + (((bd)->bd_immediate || (bd)->bd_state == BPF_TIMED_OUT) && \ + (bd)->bd_slen != 0)) /* Values for bd_flags */ -#define BPF_EXTENDED_HDR 0x0001 /* process req. the extended header */ -#define BPF_WANT_PKTAP 0x0002 /* knows how to handle DLT_PKTAP */ -#define BPF_FINALIZE_PKTAP 0x0004 /* finalize pktap header on read */ -#define BPF_KNOTE 0x0008 /* kernel note attached */ -#define BPF_DETACHING 0x0010 /* bpf_d is being detached */ -#define BPF_DETACHED 0x0020 /* bpf_d is detached */ -#define BPF_CLOSING 0x0040 /* bpf_d is being closed */ -#define BPF_TRUNCATE 0x0080 /* truncate the packet payload */ -#define BPF_PKTHDRV2 0x0100 /* pktap header version 2 */ +#define BPF_EXTENDED_HDR 0x0001 /* process req. the extended header */ +#define BPF_WANT_PKTAP 0x0002 /* knows how to handle DLT_PKTAP */ +#define BPF_FINALIZE_PKTAP 0x0004 /* finalize pktap header on read */ +#define BPF_KNOTE 0x0008 /* kernel note attached */ +#define BPF_DETACHING 0x0010 /* bpf_d is being detached */ +#define BPF_DETACHED 0x0020 /* bpf_d is detached */ +#define BPF_CLOSING 0x0040 /* bpf_d is being closed */ +#define BPF_TRUNCATE 0x0080 /* truncate the packet payload */ +#define BPF_PKTHDRV2 0x0100 /* pktap header version 2 */ /* * Descriptor associated with each attached hardware interface. */ struct bpf_if { - struct bpf_if *bif_next; /* list of all interfaces */ - struct bpf_d *bif_dlist; /* descriptor list */ - u_int bif_dlt; /* link layer type */ - u_int bif_hdrlen; /* length of header (with padding) */ - u_int bif_exthdrlen; /* length of ext header */ - struct ifnet *bif_ifp; /* corresponding interface */ - bpf_send_func bif_send; - bpf_tap_func bif_tap; + struct bpf_if *bif_next; /* list of all interfaces */ + struct bpf_d *bif_dlist; /* descriptor list */ + u_int bif_dlt; /* link layer type */ + u_int bif_hdrlen; /* length of header (with padding) */ + u_int bif_exthdrlen; /* length of ext header */ + struct ifnet *bif_ifp; /* corresponding interface */ + bpf_send_func bif_send; + bpf_tap_func bif_tap; }; #endif /* KERNEL_PRIVATE */ diff --git a/bsd/net/bridgestp.c b/bsd/net/bridgestp.c index 972a1ae14..fc64a1624 100644 --- a/bsd/net/bridgestp.c +++ b/bsd/net/bridgestp.c @@ -95,12 +95,12 @@ #include decl_lck_mtx_data(static, bstp_task_mtx_data); -static lck_mtx_t *bstp_task_mtx = &bstp_task_mtx_data; -static lck_grp_t *bstp_task_grp = NULL; -static lck_attr_t *bstp_task_attr = NULL; -static thread_t bstp_task_thread; -static TAILQ_HEAD(bstp_task_queue, bstp_task) - bstp_task_queue = TAILQ_HEAD_INITIALIZER(bstp_task_queue); +static lck_mtx_t *bstp_task_mtx = &bstp_task_mtx_data; +static lck_grp_t *bstp_task_grp = NULL; +static lck_attr_t *bstp_task_attr = NULL; +static thread_t bstp_task_thread; +static TAILQ_HEAD(bstp_task_queue, bstp_task) +bstp_task_queue = TAILQ_HEAD_INITIALIZER(bstp_task_queue); static struct bstp_task *bstp_task_queue_running = NULL; static void bstp_create_task_thread(void); @@ -117,104 +117,105 @@ static void bstp_task_drain(struct bstp_task *); -#define BSTP_LOCK_INIT(_bs) (_bs)->bs_mtx = lck_mtx_alloc_init(bstp_lock_grp, bstp_lock_attr) -#define BSTP_LOCK_DESTROY(_bs) lck_mtx_free((_bs)->bs_mtx, bstp_lock_grp) -#define BSTP_LOCK(_bs) lck_mtx_lock((_bs)->bs_mtx) -#define BSTP_UNLOCK(_bs) lck_mtx_unlock((_bs)->bs_mtx) -#define BSTP_LOCK_ASSERT(_bs) LCK_MTX_ASSERT((_bs)->bs_mtx, LCK_MTX_ASSERT_OWNED) +#define BSTP_LOCK_INIT(_bs) (_bs)->bs_mtx = lck_mtx_alloc_init(bstp_lock_grp, bstp_lock_attr) +#define BSTP_LOCK_DESTROY(_bs) lck_mtx_free((_bs)->bs_mtx, bstp_lock_grp) +#define BSTP_LOCK(_bs) lck_mtx_lock((_bs)->bs_mtx) +#define BSTP_UNLOCK(_bs) lck_mtx_unlock((_bs)->bs_mtx) +#define BSTP_LOCK_ASSERT(_bs) LCK_MTX_ASSERT((_bs)->bs_mtx, LCK_MTX_ASSERT_OWNED) -#ifdef BRIDGESTP_DEBUG -#define DPRINTF(fmt, arg...) printf("bstp: " fmt, ##arg) +#ifdef BRIDGESTP_DEBUG +#define DPRINTF(fmt, arg...) printf("bstp: " fmt, ##arg) #else -#define DPRINTF(fmt, arg...) +#define DPRINTF(fmt, arg...) #endif -#define PV2ADDR(pv, eaddr) do { \ - eaddr[0] = pv >> 40; \ - eaddr[1] = pv >> 32; \ - eaddr[2] = pv >> 24; \ - eaddr[3] = pv >> 16; \ - eaddr[4] = pv >> 8; \ - eaddr[5] = pv >> 0; \ +#define PV2ADDR(pv, eaddr) do { \ + eaddr[0] = pv >> 40; \ + eaddr[1] = pv >> 32; \ + eaddr[2] = pv >> 24; \ + eaddr[3] = pv >> 16; \ + eaddr[4] = pv >> 8; \ + eaddr[5] = pv >> 0; \ } while (0) -#define INFO_BETTER 1 -#define INFO_SAME 0 -#define INFO_WORSE -1 +#define INFO_BETTER 1 +#define INFO_SAME 0 +#define INFO_WORSE -1 LIST_HEAD(, bstp_state) bstp_list; decl_lck_mtx_data(static, bstp_list_mtx_data); -static lck_mtx_t *bstp_list_mtx = &bstp_list_mtx_data; -static lck_grp_t *bstp_lock_grp = NULL; -static lck_attr_t *bstp_lock_attr = NULL; - -static void bstp_transmit(struct bstp_state *, struct bstp_port *); -static void bstp_transmit_bpdu(struct bstp_state *, struct bstp_port *); -static void bstp_transmit_tcn(struct bstp_state *, struct bstp_port *); -static void bstp_decode_bpdu(struct bstp_port *, struct bstp_cbpdu *, - struct bstp_config_unit *); -static void bstp_send_bpdu(struct bstp_state *, struct bstp_port *, - struct bstp_cbpdu *); -static void bstp_enqueue(struct ifnet *, struct mbuf *); -static int bstp_pdu_flags(struct bstp_port *); -static void bstp_received_stp(struct bstp_state *, struct bstp_port *, - struct mbuf **, struct bstp_tbpdu *); -static void bstp_received_rstp(struct bstp_state *, struct bstp_port *, - struct mbuf **, struct bstp_tbpdu *); -static void bstp_received_tcn(struct bstp_state *, struct bstp_port *, - struct bstp_tcn_unit *); -static void bstp_received_bpdu(struct bstp_state *, struct bstp_port *, - struct bstp_config_unit *); -static int bstp_pdu_rcvtype(struct bstp_port *, struct bstp_config_unit *); -static int bstp_pdu_bettersame(struct bstp_port *, int); -static int bstp_info_cmp(struct bstp_pri_vector *, - struct bstp_pri_vector *); -static int bstp_info_superior(struct bstp_pri_vector *, - struct bstp_pri_vector *); -static void bstp_assign_roles(struct bstp_state *); -static void bstp_update_roles(struct bstp_state *, struct bstp_port *); -static void bstp_update_state(struct bstp_state *, struct bstp_port *); -static void bstp_update_tc(struct bstp_port *); -static void bstp_update_info(struct bstp_port *); -static void bstp_set_other_tcprop(struct bstp_port *); -static void bstp_set_all_reroot(struct bstp_state *); -static void bstp_set_all_sync(struct bstp_state *); -static void bstp_set_port_state(struct bstp_port *, int); -static void bstp_set_port_role(struct bstp_port *, int); -static void bstp_set_port_proto(struct bstp_port *, int); -static void bstp_set_port_tc(struct bstp_port *, int); -static void bstp_set_timer_tc(struct bstp_port *); -static void bstp_set_timer_msgage(struct bstp_port *); -static int bstp_rerooted(struct bstp_state *, struct bstp_port *); -static uint32_t bstp_calc_path_cost(struct bstp_port *); -static void bstp_notify_state(void *, int); -static void bstp_notify_rtage(void *, int); -static void bstp_ifupdstatus(struct bstp_state *, struct bstp_port *); -static void bstp_enable_port(struct bstp_state *, struct bstp_port *); -static void bstp_disable_port(struct bstp_state *, struct bstp_port *); -static void bstp_tick(void *); -static void bstp_timer_start(struct bstp_timer *, uint16_t); -static void bstp_timer_stop(struct bstp_timer *); -static void bstp_timer_latch(struct bstp_timer *); -static int bstp_timer_expired(struct bstp_timer *); -static void bstp_hello_timer_expiry(struct bstp_state *, - struct bstp_port *); -static void bstp_message_age_expiry(struct bstp_state *, - struct bstp_port *); -static void bstp_migrate_delay_expiry(struct bstp_state *, - struct bstp_port *); -static void bstp_edge_delay_expiry(struct bstp_state *, - struct bstp_port *); -static int bstp_addr_cmp(const uint8_t *, const uint8_t *); -static int bstp_same_bridgeid(uint64_t, uint64_t); -static void bstp_reinit(struct bstp_state *); +static lck_mtx_t *bstp_list_mtx = &bstp_list_mtx_data; +static lck_grp_t *bstp_lock_grp = NULL; +static lck_attr_t *bstp_lock_attr = NULL; + +static void bstp_transmit(struct bstp_state *, struct bstp_port *); +static void bstp_transmit_bpdu(struct bstp_state *, struct bstp_port *); +static void bstp_transmit_tcn(struct bstp_state *, struct bstp_port *); +static void bstp_decode_bpdu(struct bstp_port *, struct bstp_cbpdu *, + struct bstp_config_unit *); +static void bstp_send_bpdu(struct bstp_state *, struct bstp_port *, + struct bstp_cbpdu *); +static void bstp_enqueue(struct ifnet *, struct mbuf *); +static int bstp_pdu_flags(struct bstp_port *); +static void bstp_received_stp(struct bstp_state *, struct bstp_port *, + struct mbuf **, struct bstp_tbpdu *); +static void bstp_received_rstp(struct bstp_state *, struct bstp_port *, + struct mbuf **, struct bstp_tbpdu *); +static void bstp_received_tcn(struct bstp_state *, struct bstp_port *, + struct bstp_tcn_unit *); +static void bstp_received_bpdu(struct bstp_state *, struct bstp_port *, + struct bstp_config_unit *); +static int bstp_pdu_rcvtype(struct bstp_port *, struct bstp_config_unit *); +static int bstp_pdu_bettersame(struct bstp_port *, int); +static int bstp_info_cmp(struct bstp_pri_vector *, + struct bstp_pri_vector *); +static int bstp_info_superior(struct bstp_pri_vector *, + struct bstp_pri_vector *); +static void bstp_assign_roles(struct bstp_state *); +static void bstp_update_roles(struct bstp_state *, struct bstp_port *); +static void bstp_update_state(struct bstp_state *, struct bstp_port *); +static void bstp_update_tc(struct bstp_port *); +static void bstp_update_info(struct bstp_port *); +static void bstp_set_other_tcprop(struct bstp_port *); +static void bstp_set_all_reroot(struct bstp_state *); +static void bstp_set_all_sync(struct bstp_state *); +static void bstp_set_port_state(struct bstp_port *, int); +static void bstp_set_port_role(struct bstp_port *, int); +static void bstp_set_port_proto(struct bstp_port *, int); +static void bstp_set_port_tc(struct bstp_port *, int); +static void bstp_set_timer_tc(struct bstp_port *); +static void bstp_set_timer_msgage(struct bstp_port *); +static int bstp_rerooted(struct bstp_state *, struct bstp_port *); +static uint32_t bstp_calc_path_cost(struct bstp_port *); +static void bstp_notify_state(void *, int); +static void bstp_notify_rtage(void *, int); +static void bstp_ifupdstatus(struct bstp_state *, struct bstp_port *); +static void bstp_enable_port(struct bstp_state *, struct bstp_port *); +static void bstp_disable_port(struct bstp_state *, struct bstp_port *); +static void bstp_tick(void *); +static void bstp_timer_start(struct bstp_timer *, uint16_t); +static void bstp_timer_stop(struct bstp_timer *); +static void bstp_timer_latch(struct bstp_timer *); +static int bstp_timer_expired(struct bstp_timer *); +static void bstp_hello_timer_expiry(struct bstp_state *, + struct bstp_port *); +static void bstp_message_age_expiry(struct bstp_state *, + struct bstp_port *); +static void bstp_migrate_delay_expiry(struct bstp_state *, + struct bstp_port *); +static void bstp_edge_delay_expiry(struct bstp_state *, + struct bstp_port *); +static int bstp_addr_cmp(const uint8_t *, const uint8_t *); +static int bstp_same_bridgeid(uint64_t, uint64_t); +static void bstp_reinit(struct bstp_state *); static void bstp_transmit(struct bstp_state *bs, struct bstp_port *bp) { - if (bs->bs_running == 0) + if (bs->bs_running == 0) { return; + } /* * a PDU can only be sent if we have tx quota left and the @@ -225,23 +226,24 @@ bstp_transmit(struct bstp_state *bs, struct bstp_port *bp) bstp_hello_timer_expiry(bs, bp); return; } - if (bp->bp_txcount > bs->bs_txholdcount) + if (bp->bp_txcount > bs->bs_txholdcount) { /* Ran out of karma */ return; + } if (bp->bp_protover == BSTP_PROTO_RSTP) { bstp_transmit_bpdu(bs, bp); bp->bp_tc_ack = 0; } else { /* STP */ switch (bp->bp_role) { - case BSTP_ROLE_DESIGNATED: - bstp_transmit_bpdu(bs, bp); - bp->bp_tc_ack = 0; - break; + case BSTP_ROLE_DESIGNATED: + bstp_transmit_bpdu(bs, bp); + bp->bp_tc_ack = 0; + break; - case BSTP_ROLE_ROOT: - bstp_transmit_tcn(bs, bp); - break; + case BSTP_ROLE_ROOT: + bstp_transmit_tcn(bs, bp); + break; } } bstp_timer_start(&bp->bp_hello_timer, bp->bp_desg_htime); @@ -272,13 +274,13 @@ bstp_transmit_bpdu(struct bstp_state *bs, struct bstp_port *bp) bpdu.cbu_flags = bstp_pdu_flags(bp); switch (bp->bp_protover) { - case BSTP_PROTO_STP: - bpdu.cbu_bpdutype = BSTP_MSGTYPE_CFG; - break; + case BSTP_PROTO_STP: + bpdu.cbu_bpdutype = BSTP_MSGTYPE_CFG; + break; - case BSTP_PROTO_RSTP: - bpdu.cbu_bpdutype = BSTP_MSGTYPE_RSTP; - break; + case BSTP_PROTO_RSTP: + bpdu.cbu_bpdutype = BSTP_MSGTYPE_RSTP; + break; } bstp_send_bpdu(bs, bp, &bpdu); @@ -292,17 +294,19 @@ bstp_transmit_tcn(struct bstp_state *bs, struct bstp_port *bp) struct ether_header *eh; struct mbuf *m; int touched = bs ? 1 : 0; - + touched++; KASSERT(bp == bs->bs_root_port, ("%s: bad root port\n", __func__)); - if ((ifp->if_flags & IFF_RUNNING) == 0) + if ((ifp->if_flags & IFF_RUNNING) == 0) { return; + } MGETHDR(m, M_DONTWAIT, MT_DATA); - if (m == NULL) + if (m == NULL) { return; + } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = sizeof(*eh) + sizeof(bpdu); @@ -362,40 +366,40 @@ bstp_decode_bpdu(struct bstp_port *bp, struct bstp_cbpdu *cpdu, /* Strip off unused flags in STP mode */ flags = cpdu->cbu_flags; switch (cpdu->cbu_protover) { - case BSTP_PROTO_STP: - flags &= BSTP_PDU_STPMASK; - /* A STP BPDU explicitly conveys a Designated Port */ - cu->cu_role = BSTP_ROLE_DESIGNATED; - break; + case BSTP_PROTO_STP: + flags &= BSTP_PDU_STPMASK; + /* A STP BPDU explicitly conveys a Designated Port */ + cu->cu_role = BSTP_ROLE_DESIGNATED; + break; - case BSTP_PROTO_RSTP: - flags &= BSTP_PDU_RSTPMASK; - break; + case BSTP_PROTO_RSTP: + flags &= BSTP_PDU_RSTPMASK; + break; } cu->cu_topology_change_ack = - (flags & BSTP_PDU_F_TCA) ? 1 : 0; + (flags & BSTP_PDU_F_TCA) ? 1 : 0; cu->cu_proposal = - (flags & BSTP_PDU_F_P) ? 1 : 0; + (flags & BSTP_PDU_F_P) ? 1 : 0; cu->cu_agree = - (flags & BSTP_PDU_F_A) ? 1 : 0; + (flags & BSTP_PDU_F_A) ? 1 : 0; cu->cu_learning = - (flags & BSTP_PDU_F_L) ? 1 : 0; + (flags & BSTP_PDU_F_L) ? 1 : 0; cu->cu_forwarding = - (flags & BSTP_PDU_F_F) ? 1 : 0; + (flags & BSTP_PDU_F_F) ? 1 : 0; cu->cu_topology_change = - (flags & BSTP_PDU_F_TC) ? 1 : 0; + (flags & BSTP_PDU_F_TC) ? 1 : 0; switch ((flags & BSTP_PDU_PRMASK) >> BSTP_PDU_PRSHIFT) { - case BSTP_PDU_F_ROOT: - cu->cu_role = BSTP_ROLE_ROOT; - break; - case BSTP_PDU_F_ALT: - cu->cu_role = BSTP_ROLE_ALTERNATE; - break; - case BSTP_PDU_F_DESG: - cu->cu_role = BSTP_ROLE_DESIGNATED; - break; + case BSTP_PDU_F_ROOT: + cu->cu_role = BSTP_ROLE_ROOT; + break; + case BSTP_PDU_F_ALT: + cu->cu_role = BSTP_ROLE_ALTERNATE; + break; + case BSTP_PDU_F_DESG: + cu->cu_role = BSTP_ROLE_DESIGNATED; + break; } } @@ -411,12 +415,14 @@ bstp_send_bpdu(struct bstp_state *bs, struct bstp_port *bp, ifp = bp->bp_ifp; - if ((ifp->if_flags & IFF_RUNNING) == 0) + if ((ifp->if_flags & IFF_RUNNING) == 0) { return; + } MGETHDR(m, M_DONTWAIT, MT_DATA); - if (m == NULL) + if (m == NULL) { return; + } eh = mtod(m, struct ether_header *); @@ -428,25 +434,25 @@ bstp_send_bpdu(struct bstp_state *bs, struct bstp_port *bp, memcpy(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN); switch (bpdu->cbu_bpdutype) { - case BSTP_MSGTYPE_CFG: - bpdu->cbu_protover = BSTP_PROTO_STP; - m->m_pkthdr.len = sizeof(*eh) + BSTP_BPDU_STP_LEN; - eh->ether_type = htons(BSTP_BPDU_STP_LEN); - memcpy(mtod(m, caddr_t) + sizeof(*eh), bpdu, - BSTP_BPDU_STP_LEN); - break; + case BSTP_MSGTYPE_CFG: + bpdu->cbu_protover = BSTP_PROTO_STP; + m->m_pkthdr.len = sizeof(*eh) + BSTP_BPDU_STP_LEN; + eh->ether_type = htons(BSTP_BPDU_STP_LEN); + memcpy(mtod(m, caddr_t) + sizeof(*eh), bpdu, + BSTP_BPDU_STP_LEN); + break; - case BSTP_MSGTYPE_RSTP: - bpdu->cbu_protover = BSTP_PROTO_RSTP; - bpdu->cbu_versionlen = htons(0); - m->m_pkthdr.len = sizeof(*eh) + BSTP_BPDU_RSTP_LEN; - eh->ether_type = htons(BSTP_BPDU_RSTP_LEN); - memcpy(mtod(m, caddr_t) + sizeof(*eh), bpdu, - BSTP_BPDU_RSTP_LEN); - break; + case BSTP_MSGTYPE_RSTP: + bpdu->cbu_protover = BSTP_PROTO_RSTP; + bpdu->cbu_versionlen = htons(0); + m->m_pkthdr.len = sizeof(*eh) + BSTP_BPDU_RSTP_LEN; + eh->ether_type = htons(BSTP_BPDU_RSTP_LEN); + memcpy(mtod(m, caddr_t) + sizeof(*eh), bpdu, + BSTP_BPDU_RSTP_LEN); + break; - default: - panic("not implemented"); + default: + panic("not implemented"); } m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len; @@ -460,9 +466,9 @@ bstp_enqueue(struct ifnet *dst_ifp, struct mbuf *m) { errno_t error = 0; u_int32_t len = m->m_pkthdr.len; - - m->m_flags |= M_PROTO1; //set to avoid loops - + + m->m_flags |= M_PROTO1; //set to avoid loops + error = ifnet_output_raw(dst_ifp, 0, m); if (error == 0) { (void) ifnet_stat_increment_out(dst_ifp, 1, len, 0); @@ -476,56 +482,60 @@ bstp_pdu_flags(struct bstp_port *bp) { int flags = 0; - if (bp->bp_proposing && bp->bp_state != BSTP_IFSTATE_FORWARDING) + if (bp->bp_proposing && bp->bp_state != BSTP_IFSTATE_FORWARDING) { flags |= BSTP_PDU_F_P; + } - if (bp->bp_agree) + if (bp->bp_agree) { flags |= BSTP_PDU_F_A; + } - if (bp->bp_tc_timer.active) + if (bp->bp_tc_timer.active) { flags |= BSTP_PDU_F_TC; + } - if (bp->bp_tc_ack) + if (bp->bp_tc_ack) { flags |= BSTP_PDU_F_TCA; + } switch (bp->bp_state) { - case BSTP_IFSTATE_LEARNING: - flags |= BSTP_PDU_F_L; - break; + case BSTP_IFSTATE_LEARNING: + flags |= BSTP_PDU_F_L; + break; - case BSTP_IFSTATE_FORWARDING: - flags |= (BSTP_PDU_F_L | BSTP_PDU_F_F); - break; + case BSTP_IFSTATE_FORWARDING: + flags |= (BSTP_PDU_F_L | BSTP_PDU_F_F); + break; } switch (bp->bp_role) { - case BSTP_ROLE_ROOT: - flags |= - (BSTP_PDU_F_ROOT << BSTP_PDU_PRSHIFT); - break; + case BSTP_ROLE_ROOT: + flags |= + (BSTP_PDU_F_ROOT << BSTP_PDU_PRSHIFT); + break; - case BSTP_ROLE_ALTERNATE: - case BSTP_ROLE_BACKUP: /* fall through */ - flags |= - (BSTP_PDU_F_ALT << BSTP_PDU_PRSHIFT); - break; + case BSTP_ROLE_ALTERNATE: + case BSTP_ROLE_BACKUP: /* fall through */ + flags |= + (BSTP_PDU_F_ALT << BSTP_PDU_PRSHIFT); + break; - case BSTP_ROLE_DESIGNATED: - flags |= - (BSTP_PDU_F_DESG << BSTP_PDU_PRSHIFT); - break; + case BSTP_ROLE_DESIGNATED: + flags |= + (BSTP_PDU_F_DESG << BSTP_PDU_PRSHIFT); + break; } /* Strip off unused flags in either mode */ switch (bp->bp_protover) { - case BSTP_PROTO_STP: - flags &= BSTP_PDU_STPMASK; - break; - case BSTP_PROTO_RSTP: - flags &= BSTP_PDU_RSTPMASK; - break; + case BSTP_PROTO_STP: + flags &= BSTP_PDU_STPMASK; + break; + case BSTP_PROTO_RSTP: + flags &= BSTP_PDU_RSTPMASK; + break; } - return (flags); + return flags; } struct mbuf * @@ -538,7 +548,7 @@ bstp_input(struct bstp_port *bp, __unused struct ifnet *ifp, struct mbuf *m) if (bp->bp_active == 0) { m_freem(m); - return (NULL); + return NULL; } BSTP_LOCK(bs); @@ -546,43 +556,50 @@ bstp_input(struct bstp_port *bp, __unused struct ifnet *ifp, struct mbuf *m) eh = mtod(m, struct ether_header *); len = ntohs(eh->ether_type); - if (len < sizeof(tpdu)) + if (len < sizeof(tpdu)) { goto out; + } m_adj(m, ETHER_HDR_LEN); - if (m->m_pkthdr.len > len) + if (m->m_pkthdr.len > len) { m_adj(m, len - m->m_pkthdr.len); + } if ((unsigned int)m->m_len < sizeof(tpdu) && - (m = m_pullup(m, sizeof(tpdu))) == NULL) + (m = m_pullup(m, sizeof(tpdu))) == NULL) { goto out; + } memcpy(&tpdu, mtod(m, caddr_t), sizeof(tpdu)); /* basic packet checks */ if (tpdu.tbu_dsap != LLC_8021D_LSAP || tpdu.tbu_ssap != LLC_8021D_LSAP || - tpdu.tbu_ctl != LLC_UI) + tpdu.tbu_ctl != LLC_UI) { goto out; - if (tpdu.tbu_protoid != BSTP_PROTO_ID) + } + if (tpdu.tbu_protoid != BSTP_PROTO_ID) { goto out; + } /* * We can treat later versions of the PDU as the same as the maximum * version we implement. All additional parameters/flags are ignored. */ - if (tpdu.tbu_protover > BSTP_PROTO_MAX) + if (tpdu.tbu_protover > BSTP_PROTO_MAX) { tpdu.tbu_protover = BSTP_PROTO_MAX; + } if (tpdu.tbu_protover != bp->bp_protover) { /* * Wait for the migration delay timer to expire before changing * protocol version to avoid flip-flops. */ - if (bp->bp_flags & BSTP_PORT_CANMIGRATE) + if (bp->bp_flags & BSTP_PORT_CANMIGRATE) { bstp_set_port_proto(bp, tpdu.tbu_protover); - else + } else { goto out; + } } /* Clear operedge upon receiving a PDU on the port */ @@ -591,19 +608,20 @@ bstp_input(struct bstp_port *bp, __unused struct ifnet *ifp, struct mbuf *m) BSTP_DEFAULT_MIGRATE_DELAY); switch (tpdu.tbu_protover) { - case BSTP_PROTO_STP: - bstp_received_stp(bs, bp, &m, &tpdu); - break; + case BSTP_PROTO_STP: + bstp_received_stp(bs, bp, &m, &tpdu); + break; - case BSTP_PROTO_RSTP: - bstp_received_rstp(bs, bp, &m, &tpdu); - break; + case BSTP_PROTO_RSTP: + bstp_received_rstp(bs, bp, &m, &tpdu); + break; } out: BSTP_UNLOCK(bs); - if (m) + if (m) { m_freem(m); - return (NULL); + } + return NULL; } static void @@ -621,8 +639,9 @@ bstp_received_stp(struct bstp_state *bs, struct bstp_port *bp, break; case BSTP_MSGTYPE_CFG: if ((*mp)->m_len < BSTP_BPDU_STP_LEN && - (*mp = m_pullup(*mp, BSTP_BPDU_STP_LEN)) == NULL) + (*mp = m_pullup(*mp, BSTP_BPDU_STP_LEN)) == NULL) { return; + } memcpy(&cpdu, mtod(*mp, caddr_t), BSTP_BPDU_STP_LEN); bstp_decode_bpdu(bp, &cpdu, cu); @@ -638,12 +657,14 @@ bstp_received_rstp(struct bstp_state *bs, struct bstp_port *bp, struct bstp_cbpdu cpdu; struct bstp_config_unit *cu = &bp->bp_msg_cu; - if (tpdu->tbu_bpdutype != BSTP_MSGTYPE_RSTP) + if (tpdu->tbu_bpdutype != BSTP_MSGTYPE_RSTP) { return; + } if ((*mp)->m_len < BSTP_BPDU_RSTP_LEN && - (*mp = m_pullup(*mp, BSTP_BPDU_RSTP_LEN)) == NULL) + (*mp = m_pullup(*mp, BSTP_BPDU_RSTP_LEN)) == NULL) { return; + } memcpy(&cpdu, mtod(*mp, caddr_t), BSTP_BPDU_RSTP_LEN); bstp_decode_bpdu(bp, &cpdu, cu); @@ -668,84 +689,94 @@ bstp_received_bpdu(struct bstp_state *bs, struct bstp_port *bp, /* We need to have transitioned to INFO_MINE before proceeding */ switch (bp->bp_infois) { - case BSTP_INFO_DISABLED: - case BSTP_INFO_AGED: - return; + case BSTP_INFO_DISABLED: + case BSTP_INFO_AGED: + return; } type = bstp_pdu_rcvtype(bp, cu); switch (type) { - case BSTP_PDU_SUPERIOR: - bs->bs_allsynced = 0; - bp->bp_agreed = 0; - bp->bp_proposing = 0; + case BSTP_PDU_SUPERIOR: + bs->bs_allsynced = 0; + bp->bp_agreed = 0; + bp->bp_proposing = 0; - if (cu->cu_proposal && cu->cu_forwarding == 0) - bp->bp_proposed = 1; - if (cu->cu_topology_change) - bp->bp_rcvdtc = 1; - if (cu->cu_topology_change_ack) - bp->bp_rcvdtca = 1; - - if (bp->bp_agree && - !bstp_pdu_bettersame(bp, BSTP_INFO_RECEIVED)) - bp->bp_agree = 0; - - /* copy the received priority and timers to the port */ - bp->bp_port_pv = cu->cu_pv; - bp->bp_port_msg_age = cu->cu_message_age; - bp->bp_port_max_age = cu->cu_max_age; - bp->bp_port_fdelay = cu->cu_forward_delay; - bp->bp_port_htime = - (cu->cu_hello_time > BSTP_MIN_HELLO_TIME ? - cu->cu_hello_time : BSTP_MIN_HELLO_TIME); - - /* set expiry for the new info */ - bstp_set_timer_msgage(bp); - - bp->bp_infois = BSTP_INFO_RECEIVED; - bstp_assign_roles(bs); - break; + if (cu->cu_proposal && cu->cu_forwarding == 0) { + bp->bp_proposed = 1; + } + if (cu->cu_topology_change) { + bp->bp_rcvdtc = 1; + } + if (cu->cu_topology_change_ack) { + bp->bp_rcvdtca = 1; + } - case BSTP_PDU_REPEATED: - if (cu->cu_proposal && cu->cu_forwarding == 0) - bp->bp_proposed = 1; - if (cu->cu_topology_change) - bp->bp_rcvdtc = 1; - if (cu->cu_topology_change_ack) - bp->bp_rcvdtca = 1; + if (bp->bp_agree && + !bstp_pdu_bettersame(bp, BSTP_INFO_RECEIVED)) { + bp->bp_agree = 0; + } - /* rearm the age timer */ - bstp_set_timer_msgage(bp); - break; + /* copy the received priority and timers to the port */ + bp->bp_port_pv = cu->cu_pv; + bp->bp_port_msg_age = cu->cu_message_age; + bp->bp_port_max_age = cu->cu_max_age; + bp->bp_port_fdelay = cu->cu_forward_delay; + bp->bp_port_htime = + (cu->cu_hello_time > BSTP_MIN_HELLO_TIME ? + cu->cu_hello_time : BSTP_MIN_HELLO_TIME); - case BSTP_PDU_INFERIOR: - if (cu->cu_learning) { - bp->bp_agreed = 1; - bp->bp_proposing = 0; - } - break; + /* set expiry for the new info */ + bstp_set_timer_msgage(bp); - case BSTP_PDU_INFERIORALT: - /* - * only point to point links are allowed fast - * transitions to forwarding. - */ - if (cu->cu_agree && bp->bp_ptp_link) { - bp->bp_agreed = 1; - bp->bp_proposing = 0; - } else - bp->bp_agreed = 0; - - if (cu->cu_topology_change) - bp->bp_rcvdtc = 1; - if (cu->cu_topology_change_ack) - bp->bp_rcvdtca = 1; - break; + bp->bp_infois = BSTP_INFO_RECEIVED; + bstp_assign_roles(bs); + break; + + case BSTP_PDU_REPEATED: + if (cu->cu_proposal && cu->cu_forwarding == 0) { + bp->bp_proposed = 1; + } + if (cu->cu_topology_change) { + bp->bp_rcvdtc = 1; + } + if (cu->cu_topology_change_ack) { + bp->bp_rcvdtca = 1; + } + + /* rearm the age timer */ + bstp_set_timer_msgage(bp); + break; + + case BSTP_PDU_INFERIOR: + if (cu->cu_learning) { + bp->bp_agreed = 1; + bp->bp_proposing = 0; + } + break; + + case BSTP_PDU_INFERIORALT: + /* + * only point to point links are allowed fast + * transitions to forwarding. + */ + if (cu->cu_agree && bp->bp_ptp_link) { + bp->bp_agreed = 1; + bp->bp_proposing = 0; + } else { + bp->bp_agreed = 0; + } + + if (cu->cu_topology_change) { + bp->bp_rcvdtc = 1; + } + if (cu->cu_topology_change_ack) { + bp->bp_rcvdtca = 1; + } + break; - case BSTP_PDU_OTHER: - return; /* do nothing */ + case BSTP_PDU_OTHER: + return; /* do nothing */ } /* update the state machines with the new data */ bstp_update_state(bs, bp); @@ -761,39 +792,42 @@ bstp_pdu_rcvtype(struct bstp_port *bp, struct bstp_config_unit *cu) switch (cu->cu_role) { case BSTP_ROLE_DESIGNATED: - if (bstp_info_superior(&bp->bp_port_pv, &cu->cu_pv)) + if (bstp_info_superior(&bp->bp_port_pv, &cu->cu_pv)) { /* bpdu priority is superior */ type = BSTP_PDU_SUPERIOR; - else if (bstp_info_cmp(&bp->bp_port_pv, &cu->cu_pv) == + } else if (bstp_info_cmp(&bp->bp_port_pv, &cu->cu_pv) == INFO_SAME) { if (bp->bp_port_msg_age != cu->cu_message_age || bp->bp_port_max_age != cu->cu_max_age || bp->bp_port_fdelay != cu->cu_forward_delay || - bp->bp_port_htime != cu->cu_hello_time) + bp->bp_port_htime != cu->cu_hello_time) { /* bpdu priority is equal and timers differ */ type = BSTP_PDU_SUPERIOR; - else + } else { /* bpdu is equal */ type = BSTP_PDU_REPEATED; - } else + } + } else { /* bpdu priority is worse */ type = BSTP_PDU_INFERIOR; + } break; case BSTP_ROLE_ROOT: case BSTP_ROLE_ALTERNATE: case BSTP_ROLE_BACKUP: - if (bstp_info_cmp(&bp->bp_port_pv, &cu->cu_pv) <= INFO_SAME) + if (bstp_info_cmp(&bp->bp_port_pv, &cu->cu_pv) <= INFO_SAME) { /* * not a designated port and priority is the same or * worse */ type = BSTP_PDU_INFERIORALT; + } break; } - return (type); + return type; } static int @@ -801,42 +835,52 @@ bstp_pdu_bettersame(struct bstp_port *bp, int newinfo) { if (newinfo == BSTP_INFO_RECEIVED && bp->bp_infois == BSTP_INFO_RECEIVED && - bstp_info_cmp(&bp->bp_port_pv, &bp->bp_msg_cu.cu_pv) >= INFO_SAME) - return (1); + bstp_info_cmp(&bp->bp_port_pv, &bp->bp_msg_cu.cu_pv) >= INFO_SAME) { + return 1; + } if (newinfo == BSTP_INFO_MINE && bp->bp_infois == BSTP_INFO_MINE && - bstp_info_cmp(&bp->bp_port_pv, &bp->bp_desg_pv) >= INFO_SAME) - return (1); + bstp_info_cmp(&bp->bp_port_pv, &bp->bp_desg_pv) >= INFO_SAME) { + return 1; + } - return (0); + return 0; } static int bstp_info_cmp(struct bstp_pri_vector *pv, struct bstp_pri_vector *cpv) { - if (cpv->pv_root_id < pv->pv_root_id) - return (INFO_BETTER); - if (cpv->pv_root_id > pv->pv_root_id) - return (INFO_WORSE); + if (cpv->pv_root_id < pv->pv_root_id) { + return INFO_BETTER; + } + if (cpv->pv_root_id > pv->pv_root_id) { + return INFO_WORSE; + } - if (cpv->pv_cost < pv->pv_cost) - return (INFO_BETTER); - if (cpv->pv_cost > pv->pv_cost) - return (INFO_WORSE); + if (cpv->pv_cost < pv->pv_cost) { + return INFO_BETTER; + } + if (cpv->pv_cost > pv->pv_cost) { + return INFO_WORSE; + } - if (cpv->pv_dbridge_id < pv->pv_dbridge_id) - return (INFO_BETTER); - if (cpv->pv_dbridge_id > pv->pv_dbridge_id) - return (INFO_WORSE); + if (cpv->pv_dbridge_id < pv->pv_dbridge_id) { + return INFO_BETTER; + } + if (cpv->pv_dbridge_id > pv->pv_dbridge_id) { + return INFO_WORSE; + } - if (cpv->pv_dport_id < pv->pv_dport_id) - return (INFO_BETTER); - if (cpv->pv_dport_id > pv->pv_dport_id) - return (INFO_WORSE); + if (cpv->pv_dport_id < pv->pv_dport_id) { + return INFO_BETTER; + } + if (cpv->pv_dport_id > pv->pv_dport_id) { + return INFO_WORSE; + } - return (INFO_SAME); + return INFO_SAME; } /* @@ -851,9 +895,10 @@ bstp_info_superior(struct bstp_pri_vector *pv, { if (bstp_info_cmp(pv, cpv) == INFO_BETTER || (bstp_same_bridgeid(pv->pv_dbridge_id, cpv->pv_dbridge_id) && - (cpv->pv_dport_id & 0xfff) == (pv->pv_dport_id & 0xfff))) - return (1); - return (0); + (cpv->pv_dport_id & 0xfff) == (pv->pv_dport_id & 0xfff))) { + return 1; + } + return 0; } static void @@ -872,8 +917,9 @@ bstp_assign_roles(struct bstp_state *bs) /* check if any recieved info supersedes us */ LIST_FOREACH(bp, &bs->bs_bplist, bp_next) { - if (bp->bp_infois != BSTP_INFO_RECEIVED) + if (bp->bp_infois != BSTP_INFO_RECEIVED) { continue; + } pv = bp->bp_port_pv; pv.pv_cost += bp->bp_path_cost; @@ -931,8 +977,9 @@ bstp_assign_roles(struct bstp_state *bs) (bp->bp_port_msg_age != rbp->bp_port_msg_age || bp->bp_port_max_age != rbp->bp_port_max_age || bp->bp_port_fdelay != rbp->bp_port_fdelay || - bp->bp_port_htime != rbp->bp_port_htime))) + bp->bp_port_htime != rbp->bp_port_htime))) { bstp_update_info(bp); + } break; case BSTP_INFO_RECEIVED: @@ -944,7 +991,7 @@ bstp_assign_roles(struct bstp_state *bs) bstp_set_port_role(bp, BSTP_ROLE_ROOT); bs->bs_root_port = bp; } else if (bstp_info_cmp(&bp->bp_port_pv, - &bp->bp_desg_pv) == INFO_BETTER) { + &bp->bp_desg_pv) == INFO_BETTER) { /* * the port priority is lower than the root * port. @@ -953,8 +1000,8 @@ bstp_assign_roles(struct bstp_state *bs) bstp_update_info(bp); } else { if (bstp_same_bridgeid( - bp->bp_port_pv.pv_dbridge_id, - bs->bs_bridge_pv.pv_dbridge_id)) { + bp->bp_port_pv.pv_dbridge_id, + bs->bs_bridge_pv.pv_dbridge_id)) { /* * the designated bridge refers to * another port on this bridge. @@ -988,7 +1035,7 @@ bstp_update_state(struct bstp_state *bs, struct bstp_port *bp) synced = 1; LIST_FOREACH(bp2, &bs->bs_bplist, bp_next) { if (!(bp2->bp_synced || - bp2->bp_role == BSTP_ROLE_ROOT)) { + bp2->bp_role == BSTP_ROLE_ROOT)) { synced = 0; break; } @@ -1108,7 +1155,7 @@ bstp_update_roles(struct bstp_state *bs, struct bstp_port *bp) bp->bp_flags |= BSTP_PORT_NEWINFO; bstp_timer_start(&bp->bp_edge_delay_timer, (bp->bp_ptp_link ? BSTP_DEFAULT_MIGRATE_DELAY : - bp->bp_desg_max_age)); + bp->bp_desg_max_age)); DPRINTF("%s -> DESIGNATED_PROPOSE\n", bp->bp_ifp->if_xname); } @@ -1119,8 +1166,9 @@ bstp_update_roles(struct bstp_state *bs, struct bstp_port *bp) (bp->bp_recent_root_timer.active == 0 || !bp->bp_reroot) && !bp->bp_sync) { #ifdef BRIDGESTP_DEBUG - if (bp->bp_agreed) + if (bp->bp_agreed) { DPRINTF("%s -> AGREED\n", bp->bp_ifp->if_xname); + } #endif /* BRIDGESTP_DEBUG */ /* * If agreed|operedge then go straight to forwarding, @@ -1131,8 +1179,9 @@ bstp_update_roles(struct bstp_state *bs, struct bstp_port *bp) bstp_set_port_state(bp, BSTP_IFSTATE_FORWARDING); bp->bp_agreed = bp->bp_protover; - } else if (bp->bp_state == BSTP_IFSTATE_DISCARDING) + } else if (bp->bp_state == BSTP_IFSTATE_DISCARDING) { bstp_set_port_state(bp, BSTP_IFSTATE_LEARNING); + } } if (((bp->bp_sync && !bp->bp_synced) || @@ -1150,65 +1199,73 @@ bstp_update_roles(struct bstp_state *bs, struct bstp_port *bp) break; } - if (bp->bp_flags & BSTP_PORT_NEWINFO) + if (bp->bp_flags & BSTP_PORT_NEWINFO) { bstp_transmit(bs, bp); + } } static void bstp_update_tc(struct bstp_port *bp) { switch (bp->bp_tcstate) { - case BSTP_TCSTATE_ACTIVE: - if ((bp->bp_role != BSTP_ROLE_DESIGNATED && - bp->bp_role != BSTP_ROLE_ROOT) || bp->bp_operedge) - bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING); + case BSTP_TCSTATE_ACTIVE: + if ((bp->bp_role != BSTP_ROLE_DESIGNATED && + bp->bp_role != BSTP_ROLE_ROOT) || bp->bp_operedge) { + bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING); + } - if (bp->bp_rcvdtcn) - bstp_set_port_tc(bp, BSTP_TCSTATE_TCN); - if (bp->bp_rcvdtc) - bstp_set_port_tc(bp, BSTP_TCSTATE_TC); + if (bp->bp_rcvdtcn) { + bstp_set_port_tc(bp, BSTP_TCSTATE_TCN); + } + if (bp->bp_rcvdtc) { + bstp_set_port_tc(bp, BSTP_TCSTATE_TC); + } - if (bp->bp_tc_prop && !bp->bp_operedge) - bstp_set_port_tc(bp, BSTP_TCSTATE_PROPAG); + if (bp->bp_tc_prop && !bp->bp_operedge) { + bstp_set_port_tc(bp, BSTP_TCSTATE_PROPAG); + } - if (bp->bp_rcvdtca) - bstp_set_port_tc(bp, BSTP_TCSTATE_ACK); - break; + if (bp->bp_rcvdtca) { + bstp_set_port_tc(bp, BSTP_TCSTATE_ACK); + } + break; - case BSTP_TCSTATE_INACTIVE: - if ((bp->bp_state == BSTP_IFSTATE_LEARNING || - bp->bp_state == BSTP_IFSTATE_FORWARDING) && - bp->bp_fdbflush == 0) - bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING); - break; + case BSTP_TCSTATE_INACTIVE: + if ((bp->bp_state == BSTP_IFSTATE_LEARNING || + bp->bp_state == BSTP_IFSTATE_FORWARDING) && + bp->bp_fdbflush == 0) { + bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING); + } + break; - case BSTP_TCSTATE_LEARNING: - if (bp->bp_rcvdtc || bp->bp_rcvdtcn || bp->bp_rcvdtca || - bp->bp_tc_prop) - bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING); - else if (bp->bp_role != BSTP_ROLE_DESIGNATED && - bp->bp_role != BSTP_ROLE_ROOT && - bp->bp_state == BSTP_IFSTATE_DISCARDING) - bstp_set_port_tc(bp, BSTP_TCSTATE_INACTIVE); - - if ((bp->bp_role == BSTP_ROLE_DESIGNATED || - bp->bp_role == BSTP_ROLE_ROOT) && - bp->bp_state == BSTP_IFSTATE_FORWARDING && - !bp->bp_operedge) - bstp_set_port_tc(bp, BSTP_TCSTATE_DETECTED); - break; + case BSTP_TCSTATE_LEARNING: + if (bp->bp_rcvdtc || bp->bp_rcvdtcn || bp->bp_rcvdtca || + bp->bp_tc_prop) { + bstp_set_port_tc(bp, BSTP_TCSTATE_LEARNING); + } else if (bp->bp_role != BSTP_ROLE_DESIGNATED && + bp->bp_role != BSTP_ROLE_ROOT && + bp->bp_state == BSTP_IFSTATE_DISCARDING) { + bstp_set_port_tc(bp, BSTP_TCSTATE_INACTIVE); + } - /* these are transient states and go straight back to ACTIVE */ - case BSTP_TCSTATE_DETECTED: - case BSTP_TCSTATE_TCN: - case BSTP_TCSTATE_TC: - case BSTP_TCSTATE_PROPAG: - case BSTP_TCSTATE_ACK: - DPRINTF("Invalid TC state for %s\n", - bp->bp_ifp->if_xname); - break; - } + if ((bp->bp_role == BSTP_ROLE_DESIGNATED || + bp->bp_role == BSTP_ROLE_ROOT) && + bp->bp_state == BSTP_IFSTATE_FORWARDING && + !bp->bp_operedge) { + bstp_set_port_tc(bp, BSTP_TCSTATE_DETECTED); + } + break; + /* these are transient states and go straight back to ACTIVE */ + case BSTP_TCSTATE_DETECTED: + case BSTP_TCSTATE_TCN: + case BSTP_TCSTATE_TC: + case BSTP_TCSTATE_PROPAG: + case BSTP_TCSTATE_ACK: + DPRINTF("Invalid TC state for %s\n", + bp->bp_ifp->if_xname); + break; + } } static void @@ -1219,8 +1276,9 @@ bstp_update_info(struct bstp_port *bp) bp->bp_proposing = 0; bp->bp_proposed = 0; - if (bp->bp_agreed && !bstp_pdu_bettersame(bp, BSTP_INFO_MINE)) + if (bp->bp_agreed && !bstp_pdu_bettersame(bp, BSTP_INFO_MINE)) { bp->bp_agreed = 0; + } if (bp->bp_synced && !bp->bp_agreed) { bp->bp_synced = 0; @@ -1249,8 +1307,9 @@ bstp_set_other_tcprop(struct bstp_port *bp) BSTP_LOCK_ASSERT(bs); LIST_FOREACH(bp2, &bs->bs_bplist, bp_next) { - if (bp2 == bp) + if (bp2 == bp) { continue; + } bp2->bp_tc_prop = 1; } } @@ -1263,7 +1322,7 @@ bstp_set_all_reroot(struct bstp_state *bs) BSTP_LOCK_ASSERT(bs); LIST_FOREACH(bp, &bs->bs_bplist, bp_next) - bp->bp_reroot = 1; + bp->bp_reroot = 1; } static void @@ -1275,7 +1334,7 @@ bstp_set_all_sync(struct bstp_state *bs) LIST_FOREACH(bp, &bs->bs_bplist, bp_next) { bp->bp_sync = 1; - bp->bp_synced = 0; /* Not explicit in spec */ + bp->bp_synced = 0; /* Not explicit in spec */ } bs->bs_allsynced = 0; @@ -1284,34 +1343,35 @@ bstp_set_all_sync(struct bstp_state *bs) static void bstp_set_port_state(struct bstp_port *bp, int state) { - if (bp->bp_state == state) + if (bp->bp_state == state) { return; + } bp->bp_state = state; switch (bp->bp_state) { - case BSTP_IFSTATE_DISCARDING: - DPRINTF("state changed to DISCARDING on %s\n", - bp->bp_ifp->if_xname); - break; + case BSTP_IFSTATE_DISCARDING: + DPRINTF("state changed to DISCARDING on %s\n", + bp->bp_ifp->if_xname); + break; - case BSTP_IFSTATE_LEARNING: - DPRINTF("state changed to LEARNING on %s\n", - bp->bp_ifp->if_xname); + case BSTP_IFSTATE_LEARNING: + DPRINTF("state changed to LEARNING on %s\n", + bp->bp_ifp->if_xname); - bstp_timer_start(&bp->bp_forward_delay_timer, - bp->bp_protover == BSTP_PROTO_RSTP ? - bp->bp_desg_htime : bp->bp_desg_fdelay); - break; + bstp_timer_start(&bp->bp_forward_delay_timer, + bp->bp_protover == BSTP_PROTO_RSTP ? + bp->bp_desg_htime : bp->bp_desg_fdelay); + break; - case BSTP_IFSTATE_FORWARDING: - DPRINTF("state changed to FORWARDING on %s\n", - bp->bp_ifp->if_xname); + case BSTP_IFSTATE_FORWARDING: + DPRINTF("state changed to FORWARDING on %s\n", + bp->bp_ifp->if_xname); - bstp_timer_stop(&bp->bp_forward_delay_timer); - /* Record that we enabled forwarding */ - bp->bp_forward_transitions++; - break; + bstp_timer_stop(&bp->bp_forward_delay_timer); + /* Record that we enabled forwarding */ + bp->bp_forward_transitions++; + break; } /* notify the parent bridge */ @@ -1323,32 +1383,33 @@ bstp_set_port_role(struct bstp_port *bp, int role) { struct bstp_state *bs = bp->bp_bs; - if (bp->bp_role == role) + if (bp->bp_role == role) { return; + } /* perform pre-change tasks */ switch (bp->bp_role) { - case BSTP_ROLE_DISABLED: - bstp_timer_start(&bp->bp_forward_delay_timer, - bp->bp_desg_max_age); - break; + case BSTP_ROLE_DISABLED: + bstp_timer_start(&bp->bp_forward_delay_timer, + bp->bp_desg_max_age); + break; - case BSTP_ROLE_BACKUP: - bstp_timer_start(&bp->bp_recent_backup_timer, - bp->bp_desg_htime * 2); - /* fall through */ - case BSTP_ROLE_ALTERNATE: - bstp_timer_start(&bp->bp_forward_delay_timer, - bp->bp_desg_fdelay); - bp->bp_sync = 0; - bp->bp_synced = 1; - bp->bp_reroot = 0; - break; + case BSTP_ROLE_BACKUP: + bstp_timer_start(&bp->bp_recent_backup_timer, + bp->bp_desg_htime * 2); + /* fall through */ + case BSTP_ROLE_ALTERNATE: + bstp_timer_start(&bp->bp_forward_delay_timer, + bp->bp_desg_fdelay); + bp->bp_sync = 0; + bp->bp_synced = 1; + bp->bp_reroot = 0; + break; - case BSTP_ROLE_ROOT: - bstp_timer_start(&bp->bp_recent_root_timer, - BSTP_DEFAULT_FORWARD_DELAY); - break; + case BSTP_ROLE_ROOT: + bstp_timer_start(&bp->bp_recent_root_timer, + BSTP_DEFAULT_FORWARD_DELAY); + break; } bp->bp_role = role; @@ -1358,34 +1419,34 @@ bstp_set_port_role(struct bstp_port *bp, int role) /* initialise the new role */ switch (bp->bp_role) { - case BSTP_ROLE_DISABLED: - case BSTP_ROLE_ALTERNATE: - case BSTP_ROLE_BACKUP: - DPRINTF("%s role -> ALT/BACK/DISABLED\n", - bp->bp_ifp->if_xname); - bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); - bstp_timer_stop(&bp->bp_recent_root_timer); - bstp_timer_latch(&bp->bp_forward_delay_timer); - bp->bp_sync = 0; - bp->bp_synced = 1; - bp->bp_reroot = 0; - break; + case BSTP_ROLE_DISABLED: + case BSTP_ROLE_ALTERNATE: + case BSTP_ROLE_BACKUP: + DPRINTF("%s role -> ALT/BACK/DISABLED\n", + bp->bp_ifp->if_xname); + bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); + bstp_timer_stop(&bp->bp_recent_root_timer); + bstp_timer_latch(&bp->bp_forward_delay_timer); + bp->bp_sync = 0; + bp->bp_synced = 1; + bp->bp_reroot = 0; + break; - case BSTP_ROLE_ROOT: - DPRINTF("%s role -> ROOT\n", - bp->bp_ifp->if_xname); - bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); - bstp_timer_latch(&bp->bp_recent_root_timer); - bp->bp_proposing = 0; - break; + case BSTP_ROLE_ROOT: + DPRINTF("%s role -> ROOT\n", + bp->bp_ifp->if_xname); + bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); + bstp_timer_latch(&bp->bp_recent_root_timer); + bp->bp_proposing = 0; + break; - case BSTP_ROLE_DESIGNATED: - DPRINTF("%s role -> DESIGNATED\n", - bp->bp_ifp->if_xname); - bstp_timer_start(&bp->bp_hello_timer, - bp->bp_desg_htime); - bp->bp_agree = 0; - break; + case BSTP_ROLE_DESIGNATED: + DPRINTF("%s role -> DESIGNATED\n", + bp->bp_ifp->if_xname); + bstp_timer_start(&bp->bp_hello_timer, + bp->bp_desg_htime); + bp->bp_agree = 0; + break; } /* let the TC state know that the role changed */ @@ -1399,24 +1460,25 @@ bstp_set_port_proto(struct bstp_port *bp, int proto) /* supported protocol versions */ switch (proto) { - case BSTP_PROTO_STP: - /* we can downgrade protocols only */ - bstp_timer_stop(&bp->bp_migrate_delay_timer); - /* clear unsupported features */ - bp->bp_operedge = 0; - /* STP compat mode only uses 16 bits of the 32 */ - if (bp->bp_path_cost > 65535) - bp->bp_path_cost = 65535; - break; + case BSTP_PROTO_STP: + /* we can downgrade protocols only */ + bstp_timer_stop(&bp->bp_migrate_delay_timer); + /* clear unsupported features */ + bp->bp_operedge = 0; + /* STP compat mode only uses 16 bits of the 32 */ + if (bp->bp_path_cost > 65535) { + bp->bp_path_cost = 65535; + } + break; - case BSTP_PROTO_RSTP: - bstp_timer_start(&bp->bp_migrate_delay_timer, - bs->bs_migration_delay); - break; + case BSTP_PROTO_RSTP: + bstp_timer_start(&bp->bp_migrate_delay_timer, + bs->bs_migration_delay); + break; - default: - DPRINTF("Unsupported STP version %d\n", proto); - return; + default: + DPRINTF("Unsupported STP version %d\n", proto); + return; } bp->bp_protover = proto; @@ -1432,71 +1494,72 @@ bstp_set_port_tc(struct bstp_port *bp, int state) /* initialise the new state */ switch (bp->bp_tcstate) { - case BSTP_TCSTATE_ACTIVE: - DPRINTF("%s -> TC_ACTIVE\n", bp->bp_ifp->if_xname); - /* nothing to do */ - break; + case BSTP_TCSTATE_ACTIVE: + DPRINTF("%s -> TC_ACTIVE\n", bp->bp_ifp->if_xname); + /* nothing to do */ + break; - case BSTP_TCSTATE_INACTIVE: - bstp_timer_stop(&bp->bp_tc_timer); - /* flush routes on the parent bridge */ - bp->bp_fdbflush = 1; - bstp_task_enqueue(&bp->bp_rtagetask); - bp->bp_tc_ack = 0; - DPRINTF("%s -> TC_INACTIVE\n", bp->bp_ifp->if_xname); - break; + case BSTP_TCSTATE_INACTIVE: + bstp_timer_stop(&bp->bp_tc_timer); + /* flush routes on the parent bridge */ + bp->bp_fdbflush = 1; + bstp_task_enqueue(&bp->bp_rtagetask); + bp->bp_tc_ack = 0; + DPRINTF("%s -> TC_INACTIVE\n", bp->bp_ifp->if_xname); + break; - case BSTP_TCSTATE_LEARNING: - bp->bp_rcvdtc = 0; - bp->bp_rcvdtcn = 0; - bp->bp_rcvdtca = 0; - bp->bp_tc_prop = 0; - DPRINTF("%s -> TC_LEARNING\n", bp->bp_ifp->if_xname); - break; + case BSTP_TCSTATE_LEARNING: + bp->bp_rcvdtc = 0; + bp->bp_rcvdtcn = 0; + bp->bp_rcvdtca = 0; + bp->bp_tc_prop = 0; + DPRINTF("%s -> TC_LEARNING\n", bp->bp_ifp->if_xname); + break; - case BSTP_TCSTATE_DETECTED: - bstp_set_timer_tc(bp); - bstp_set_other_tcprop(bp); - /* send out notification */ - bp->bp_flags |= BSTP_PORT_NEWINFO; - bstp_transmit(bs, bp); - /* reviewed for getmicrotime usage */ - getmicrotime(&bs->bs_last_tc_time); - DPRINTF("%s -> TC_DETECTED\n", bp->bp_ifp->if_xname); - bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ - break; + case BSTP_TCSTATE_DETECTED: + bstp_set_timer_tc(bp); + bstp_set_other_tcprop(bp); + /* send out notification */ + bp->bp_flags |= BSTP_PORT_NEWINFO; + bstp_transmit(bs, bp); + /* reviewed for getmicrotime usage */ + getmicrotime(&bs->bs_last_tc_time); + DPRINTF("%s -> TC_DETECTED\n", bp->bp_ifp->if_xname); + bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ + break; - case BSTP_TCSTATE_TCN: - bstp_set_timer_tc(bp); - DPRINTF("%s -> TC_TCN\n", bp->bp_ifp->if_xname); - /* fall through */ - case BSTP_TCSTATE_TC: - bp->bp_rcvdtc = 0; - bp->bp_rcvdtcn = 0; - if (bp->bp_role == BSTP_ROLE_DESIGNATED) - bp->bp_tc_ack = 1; - - bstp_set_other_tcprop(bp); - DPRINTF("%s -> TC_TC\n", bp->bp_ifp->if_xname); - bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ - break; + case BSTP_TCSTATE_TCN: + bstp_set_timer_tc(bp); + DPRINTF("%s -> TC_TCN\n", bp->bp_ifp->if_xname); + /* fall through */ + case BSTP_TCSTATE_TC: + bp->bp_rcvdtc = 0; + bp->bp_rcvdtcn = 0; + if (bp->bp_role == BSTP_ROLE_DESIGNATED) { + bp->bp_tc_ack = 1; + } - case BSTP_TCSTATE_PROPAG: - /* flush routes on the parent bridge */ - bp->bp_fdbflush = 1; - bstp_task_enqueue(&bp->bp_rtagetask); - bp->bp_tc_prop = 0; - bstp_set_timer_tc(bp); - DPRINTF("%s -> TC_PROPAG\n", bp->bp_ifp->if_xname); - bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ - break; + bstp_set_other_tcprop(bp); + DPRINTF("%s -> TC_TC\n", bp->bp_ifp->if_xname); + bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ + break; - case BSTP_TCSTATE_ACK: - bstp_timer_stop(&bp->bp_tc_timer); - bp->bp_rcvdtca = 0; - DPRINTF("%s -> TC_ACK\n", bp->bp_ifp->if_xname); - bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ - break; + case BSTP_TCSTATE_PROPAG: + /* flush routes on the parent bridge */ + bp->bp_fdbflush = 1; + bstp_task_enqueue(&bp->bp_rtagetask); + bp->bp_tc_prop = 0; + bstp_set_timer_tc(bp); + DPRINTF("%s -> TC_PROPAG\n", bp->bp_ifp->if_xname); + bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ + break; + + case BSTP_TCSTATE_ACK: + bstp_timer_stop(&bp->bp_tc_timer); + bp->bp_rcvdtca = 0; + DPRINTF("%s -> TC_ACK\n", bp->bp_ifp->if_xname); + bp->bp_tcstate = BSTP_TCSTATE_ACTIVE; /* UCT */ + break; } } @@ -1505,20 +1568,21 @@ bstp_set_timer_tc(struct bstp_port *bp) { struct bstp_state *bs = bp->bp_bs; - if (bp->bp_tc_timer.active) + if (bp->bp_tc_timer.active) { return; + } switch (bp->bp_protover) { - case BSTP_PROTO_RSTP: - bstp_timer_start(&bp->bp_tc_timer, - bp->bp_desg_htime + BSTP_TICK_VAL); - bp->bp_flags |= BSTP_PORT_NEWINFO; - break; + case BSTP_PROTO_RSTP: + bstp_timer_start(&bp->bp_tc_timer, + bp->bp_desg_htime + BSTP_TICK_VAL); + bp->bp_flags |= BSTP_PORT_NEWINFO; + break; - case BSTP_PROTO_STP: - bstp_timer_start(&bp->bp_tc_timer, - bs->bs_root_max_age + bs->bs_root_fdelay); - break; + case BSTP_PROTO_STP: + bstp_timer_start(&bp->bp_tc_timer, + bs->bs_root_max_age + bs->bs_root_fdelay); + break; } } @@ -1529,9 +1593,10 @@ bstp_set_timer_msgage(struct bstp_port *bp) bp->bp_port_max_age) { bstp_timer_start(&bp->bp_message_age_timer, bp->bp_port_htime * 3); - } else + } else { /* expires immediately */ bstp_timer_start(&bp->bp_message_age_timer, 0); + } } static int @@ -1541,14 +1606,15 @@ bstp_rerooted(struct bstp_state *bs, struct bstp_port *bp) int rr_set = 0; LIST_FOREACH(bp2, &bs->bs_bplist, bp_next) { - if (bp2 == bp) + if (bp2 == bp) { continue; + } if (bp2->bp_recent_root_timer.active) { rr_set = 1; break; } } - return (!rr_set); + return !rr_set; } int @@ -1558,17 +1624,19 @@ bstp_set_htime(struct bstp_state *bs, int t) t *= BSTP_TICK_VAL; /* value can only be changed in leagacy stp mode */ - if (bs->bs_protover != BSTP_PROTO_STP) - return (EPERM); + if (bs->bs_protover != BSTP_PROTO_STP) { + return EPERM; + } - if (t < BSTP_MIN_HELLO_TIME || t > BSTP_MAX_HELLO_TIME) - return (EINVAL); + if (t < BSTP_MIN_HELLO_TIME || t > BSTP_MAX_HELLO_TIME) { + return EINVAL; + } BSTP_LOCK(bs); bs->bs_bridge_htime = t; bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1577,14 +1645,15 @@ bstp_set_fdelay(struct bstp_state *bs, int t) /* convert seconds to ticks */ t *= BSTP_TICK_VAL; - if (t < BSTP_MIN_FORWARD_DELAY || t > BSTP_MAX_FORWARD_DELAY) - return (EINVAL); + if (t < BSTP_MIN_FORWARD_DELAY || t > BSTP_MAX_FORWARD_DELAY) { + return EINVAL; + } BSTP_LOCK(bs); bs->bs_bridge_fdelay = t; bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1593,14 +1662,15 @@ bstp_set_maxage(struct bstp_state *bs, int t) /* convert seconds to ticks */ t *= BSTP_TICK_VAL; - if (t < BSTP_MIN_MAX_AGE || t > BSTP_MAX_MAX_AGE) - return (EINVAL); + if (t < BSTP_MIN_MAX_AGE || t > BSTP_MAX_MAX_AGE) { + return EINVAL; + } BSTP_LOCK(bs); bs->bs_bridge_max_age = t; bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1609,15 +1679,16 @@ bstp_set_holdcount(struct bstp_state *bs, int count) struct bstp_port *bp; if (count < BSTP_MIN_HOLD_COUNT || - count > BSTP_MAX_HOLD_COUNT) - return (EINVAL); + count > BSTP_MAX_HOLD_COUNT) { + return EINVAL; + } BSTP_LOCK(bs); bs->bs_txholdcount = count; LIST_FOREACH(bp, &bs->bs_bplist, bp_next) - bp->bp_txcount = 0; + bp->bp_txcount = 0; BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1626,13 +1697,13 @@ bstp_set_protocol(struct bstp_state *bs, int proto) struct bstp_port *bp; switch (proto) { - /* Supported protocol versions */ - case BSTP_PROTO_STP: - case BSTP_PROTO_RSTP: - break; + /* Supported protocol versions */ + case BSTP_PROTO_STP: + case BSTP_PROTO_RSTP: + break; - default: - return (EINVAL); + default: + return EINVAL; } BSTP_LOCK(bs); @@ -1649,14 +1720,15 @@ bstp_set_protocol(struct bstp_state *bs, int proto) } bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int bstp_set_priority(struct bstp_state *bs, int pri) { - if (pri < 0 || pri > BSTP_MAX_PRIORITY) - return (EINVAL); + if (pri < 0 || pri > BSTP_MAX_PRIORITY) { + return EINVAL; + } /* Limit to steps of 4096 */ pri -= pri % 4096; @@ -1665,7 +1737,7 @@ bstp_set_priority(struct bstp_state *bs, int pri) bs->bs_bridge_priority = pri; bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1673,8 +1745,9 @@ bstp_set_port_priority(struct bstp_port *bp, int pri) { struct bstp_state *bs = bp->bp_bs; - if (pri < 0 || pri > BSTP_MAX_PORT_PRIORITY) - return (EINVAL); + if (pri < 0 || pri > BSTP_MAX_PORT_PRIORITY) { + return EINVAL; + } /* Limit to steps of 16 */ pri -= pri % 16; @@ -1683,7 +1756,7 @@ bstp_set_port_priority(struct bstp_port *bp, int pri) bp->bp_priority = pri; bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1691,16 +1764,18 @@ bstp_set_path_cost(struct bstp_port *bp, uint32_t path_cost) { struct bstp_state *bs = bp->bp_bs; - if (path_cost > BSTP_MAX_PATH_COST) - return (EINVAL); + if (path_cost > BSTP_MAX_PATH_COST) { + return EINVAL; + } /* STP compat mode only uses 16 bits of the 32 */ - if (bp->bp_protover == BSTP_PROTO_STP && path_cost > 65535) + if (bp->bp_protover == BSTP_PROTO_STP && path_cost > 65535) { path_cost = 65535; + } BSTP_LOCK(bs); - if (path_cost == 0) { /* use auto */ + if (path_cost == 0) { /* use auto */ bp->bp_flags &= ~BSTP_PORT_ADMCOST; bp->bp_path_cost = bstp_calc_path_cost(bp); } else { @@ -1709,7 +1784,7 @@ bstp_set_path_cost(struct bstp_port *bp, uint32_t path_cost) } bstp_reinit(bs); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1718,12 +1793,13 @@ bstp_set_edge(struct bstp_port *bp, int set) struct bstp_state *bs = bp->bp_bs; BSTP_LOCK(bs); - if ((bp->bp_operedge = set) == 0) + if ((bp->bp_operedge = set) == 0) { bp->bp_flags &= ~BSTP_PORT_ADMEDGE; - else + } else { bp->bp_flags |= BSTP_PORT_ADMEDGE; + } BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1735,12 +1811,14 @@ bstp_set_autoedge(struct bstp_port *bp, int set) if (set) { bp->bp_flags |= BSTP_PORT_AUTOEDGE; /* we may be able to transition straight to edge */ - if (bp->bp_edge_delay_timer.active == 0) + if (bp->bp_edge_delay_timer.active == 0) { bstp_edge_delay_expiry(bs, bp); - } else + } + } else { bp->bp_flags &= ~BSTP_PORT_AUTOEDGE; + } BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1751,7 +1829,7 @@ bstp_set_ptp(struct bstp_port *bp, int set) BSTP_LOCK(bs); bp->bp_ptp_link = set; BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -1762,12 +1840,14 @@ bstp_set_autoptp(struct bstp_port *bp, int set) BSTP_LOCK(bs); if (set) { bp->bp_flags |= BSTP_PORT_AUTOPTP; - if (bp->bp_role != BSTP_ROLE_DISABLED) + if (bp->bp_role != BSTP_ROLE_DISABLED) { bstp_ifupdstatus(bs, bp); - } else + } + } else { bp->bp_flags &= ~BSTP_PORT_AUTOPTP; + } BSTP_UNLOCK(bs); - return (0); + return 0; } /* @@ -1780,29 +1860,33 @@ bstp_calc_path_cost(struct bstp_port *bp) uint32_t path_cost; /* If the priority has been manually set then retain the value */ - if (bp->bp_flags & BSTP_PORT_ADMCOST) + if (bp->bp_flags & BSTP_PORT_ADMCOST) { return bp->bp_path_cost; + } if (bp->bp_if_link_state == LINK_STATE_DOWN) { /* Recalc when the link comes up again */ bp->bp_flags |= BSTP_PORT_PNDCOST; - return (BSTP_DEFAULT_PATH_COST); + return BSTP_DEFAULT_PATH_COST; } - if (ifp->if_baudrate < 1000) - return (BSTP_DEFAULT_PATH_COST); + if (ifp->if_baudrate < 1000) { + return BSTP_DEFAULT_PATH_COST; + } - /* formula from section 17.14, IEEE Std 802.1D-2004 */ + /* formula from section 17.14, IEEE Std 802.1D-2004 */ path_cost = 20000000000ULL / (ifp->if_baudrate / 1000); - if (path_cost > BSTP_MAX_PATH_COST) + if (path_cost > BSTP_MAX_PATH_COST) { path_cost = BSTP_MAX_PATH_COST; + } /* STP compat mode only uses 16 bits of the 32 */ - if (bp->bp_protover == BSTP_PROTO_STP && path_cost > 65535) + if (bp->bp_protover == BSTP_PROTO_STP && path_cost > 65535) { path_cost = 65535; + } - return (path_cost); + return path_cost; } /* @@ -1815,8 +1899,9 @@ bstp_notify_state(void *arg, __unused int pending) struct bstp_port *bp = (struct bstp_port *)arg; struct bstp_state *bs = bp->bp_bs; - if (bp->bp_active == 1 && bs->bs_state_cb != NULL) + if (bp->bp_active == 1 && bs->bs_state_cb != NULL) { (*bs->bs_state_cb)(bp->bp_ifp, bp->bp_state); + } } /* @@ -1832,19 +1917,20 @@ bstp_notify_rtage(void *arg, __unused int pending) BSTP_LOCK(bs); switch (bp->bp_protover) { - case BSTP_PROTO_STP: - /* convert to seconds */ - age = bp->bp_desg_fdelay / BSTP_TICK_VAL; - break; + case BSTP_PROTO_STP: + /* convert to seconds */ + age = bp->bp_desg_fdelay / BSTP_TICK_VAL; + break; - case BSTP_PROTO_RSTP: - age = 0; - break; + case BSTP_PROTO_RSTP: + age = 0; + break; } BSTP_UNLOCK(bs); - if (bp->bp_active == 1 && bs->bs_rtage_cb != NULL) + if (bp->bp_active == 1 && bs->bs_rtage_cb != NULL) { (*bs->bs_rtage_cb)(bp->bp_ifp, age); + } /* flush is complete */ BSTP_LOCK(bs); @@ -1903,21 +1989,24 @@ bstp_ifupdstatus(struct bstp_state *bs, struct bstp_port *bp) bp->bp_flags &= ~BSTP_PORT_PNDCOST; } - if (bp->bp_role == BSTP_ROLE_DISABLED) + if (bp->bp_role == BSTP_ROLE_DISABLED) { bstp_enable_port(bs, bp); + } } else { if (bp->bp_role != BSTP_ROLE_DISABLED) { bstp_disable_port(bs, bp); if ((bp->bp_flags & BSTP_PORT_ADMEDGE) && - bp->bp_protover == BSTP_PROTO_RSTP) + bp->bp_protover == BSTP_PROTO_RSTP) { bp->bp_operedge = 1; + } } } return; } - if (bp->bp_infois != BSTP_INFO_DISABLED) + if (bp->bp_infois != BSTP_INFO_DISABLED) { bstp_disable_port(bs, bp); + } } static void @@ -1943,13 +2032,14 @@ bstp_tick(void *arg) BSTP_LOCK(bs); - if (bs->bs_running == 0) + if (bs->bs_running == 0) { return; + } /* slow timer to catch missed link events */ if (bstp_timer_expired(&bs->bs_link_timer)) { LIST_FOREACH(bp, &bs->bs_bplist, bp_next) - bstp_ifupdstatus(bs, bp); + bstp_ifupdstatus(bs, bp); bstp_timer_start(&bs->bs_link_timer, BSTP_LINK_TIMER); } @@ -1960,23 +2050,28 @@ bstp_tick(void *arg) bstp_timer_expired(&bp->bp_forward_delay_timer); bstp_timer_expired(&bp->bp_recent_backup_timer); - if (bstp_timer_expired(&bp->bp_hello_timer)) + if (bstp_timer_expired(&bp->bp_hello_timer)) { bstp_hello_timer_expiry(bs, bp); + } - if (bstp_timer_expired(&bp->bp_message_age_timer)) + if (bstp_timer_expired(&bp->bp_message_age_timer)) { bstp_message_age_expiry(bs, bp); + } - if (bstp_timer_expired(&bp->bp_migrate_delay_timer)) + if (bstp_timer_expired(&bp->bp_migrate_delay_timer)) { bstp_migrate_delay_expiry(bs, bp); + } - if (bstp_timer_expired(&bp->bp_edge_delay_timer)) + if (bstp_timer_expired(&bp->bp_edge_delay_timer)) { bstp_edge_delay_expiry(bs, bp); + } /* update the various state machines for the port */ bstp_update_state(bs, bp); - if (bp->bp_txcount > 0) + if (bp->bp_txcount > 0) { bp->bp_txcount--; + } } BSTP_UNLOCK(bs); @@ -2012,14 +2107,15 @@ bstp_timer_latch(struct bstp_timer *t) static int bstp_timer_expired(struct bstp_timer *t) { - if (t->active == 0 || t->latched) - return (0); + if (t->active == 0 || t->latched) { + return 0; + } t->value -= BSTP_TICK_VAL; if (t->value <= 0) { bstp_timer_stop(t); - return (1); + return 1; } - return (0); + return 0; } static void @@ -2028,7 +2124,7 @@ bstp_hello_timer_expiry(struct bstp_state *bs, struct bstp_port *bp) if ((bp->bp_flags & BSTP_PORT_NEWINFO) || bp->bp_role == BSTP_ROLE_DESIGNATED || (bp->bp_role == BSTP_ROLE_ROOT && - bp->bp_tc_timer.active == 1)) { + bp->bp_tc_timer.active == 1)) { bstp_timer_start(&bp->bp_hello_timer, bp->bp_desg_htime); bp->bp_flags |= BSTP_PORT_NEWINFO; bstp_transmit(bs, bp); @@ -2071,7 +2167,7 @@ bstp_addr_cmp(const uint8_t *a, const uint8_t *b) d = ((int)a[i]) - ((int)b[i]); } - return (d); + return d; } /* @@ -2086,10 +2182,11 @@ bstp_same_bridgeid(uint64_t id1, uint64_t id2) PV2ADDR(id1, addr1); PV2ADDR(id2, addr2); - if (bstp_addr_cmp(addr1, addr2) == 0) - return (1); + if (bstp_addr_cmp(addr1, addr2) == 0) { + return 1; + } - return (0); + return 0; } void @@ -2098,7 +2195,7 @@ bstp_reinit(struct bstp_state *bs) struct bstp_port *bp; struct ifnet *ifp, *mif; u_char *e_addr; - static const u_char llzero[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ + static const u_char llzero[ETHER_ADDR_LEN]; /* 00:00:00:00:00:00 */ BSTP_LOCK_ASSERT(bs); @@ -2111,11 +2208,13 @@ bstp_reinit(struct bstp_state *bs) */ ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (ifp->if_type != IFT_ETHER) + if (ifp->if_type != IFT_ETHER) { continue; + } - if (bstp_addr_cmp(IF_LLADDR(ifp), llzero) == 0) + if (bstp_addr_cmp(IF_LLADDR(ifp), llzero) == 0) { continue; + } if (mif == NULL) { mif = ifp; @@ -2158,8 +2257,9 @@ bstp_reinit(struct bstp_state *bs) bs->bs_bridge_pv.pv_dport_id = 0; bs->bs_bridge_pv.pv_port_id = 0; - if (bs->bs_running) + if (bs->bs_running) { bsd_untimeout(bstp_tick, bs); + } LIST_FOREACH(bp, &bs->bs_bplist, bp_next) { bp->bp_port_id = (bp->bp_priority << 8) | @@ -2231,7 +2331,7 @@ bstp_stop(struct bstp_state *bs) BSTP_LOCK(bs); LIST_FOREACH(bp, &bs->bs_bplist, bp_next) - bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); + bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); bs->bs_running = 0; bsd_untimeout(bstp_tick, bs); @@ -2252,14 +2352,14 @@ bstp_create(struct bstp_state *bs, struct bstp_port *bp, struct ifnet *ifp) /* Init state */ bp->bp_infois = BSTP_INFO_DISABLED; - bp->bp_flags = BSTP_PORT_AUTOEDGE|BSTP_PORT_AUTOPTP; + bp->bp_flags = BSTP_PORT_AUTOEDGE | BSTP_PORT_AUTOPTP; bstp_set_port_state(bp, BSTP_IFSTATE_DISCARDING); bstp_set_port_proto(bp, bs->bs_protover); bstp_set_port_role(bp, BSTP_ROLE_DISABLED); bstp_set_port_tc(bp, BSTP_TCSTATE_INACTIVE); bp->bp_path_cost = bstp_calc_path_cost(bp); BSTP_UNLOCK(bs); - return (0); + return 0; } int @@ -2271,11 +2371,11 @@ bstp_enable(struct bstp_port *bp) KASSERT(bp->bp_active == 0, ("already a bstp member")); switch (ifp->if_type) { - case IFT_ETHER: /* These can do spanning tree. */ - break; - default: - /* Nothing else can. */ - return (EINVAL); + case IFT_ETHER: /* These can do spanning tree. */ + break; + default: + /* Nothing else can. */ + return EINVAL; } BSTP_LOCK(bs); @@ -2285,7 +2385,7 @@ bstp_enable(struct bstp_port *bp) bstp_reinit(bs); bstp_update_roles(bs, bp); BSTP_UNLOCK(bs); - return (0); + return 0; } void @@ -2340,7 +2440,7 @@ static void bstp_create_task_thread(void) { kern_return_t error; - + lck_grp_attr_t *lck_grp_attr = NULL; lck_grp_attr = lck_grp_attr_alloc_init(); @@ -2362,31 +2462,32 @@ bstp_task_thread_func(void) struct bstp_task *bt, *tvar; lck_mtx_lock(bstp_task_mtx); - + do { - while(TAILQ_EMPTY(&bstp_task_queue)) { + while (TAILQ_EMPTY(&bstp_task_queue)) { wakeup(&bstp_task_queue_running); msleep(&bstp_task_queue, bstp_task_mtx, PZERO, "bstp_task_queue", NULL); } - - TAILQ_FOREACH_SAFE(bt, &bstp_task_queue, bt_next, tvar) { + + TAILQ_FOREACH_SAFE(bt, &bstp_task_queue, bt_next, tvar) { int count = bt->bt_count; - + bt->bt_count = 0; - + bstp_task_queue_running = bt; lck_mtx_unlock(bstp_task_mtx); - + (*bt->bt_func)(bt->bt_context, count); - + lck_mtx_lock(bstp_task_mtx); bstp_task_queue_running = NULL; - if (bt->bt_count == 0) - TAILQ_REMOVE(&bstp_task_queue, bt, bt_next); + if (bt->bt_count == 0) { + TAILQ_REMOVE(&bstp_task_queue, bt, bt_next); + } } } while (1); - + /* UNREACHED */ } @@ -2401,12 +2502,12 @@ bstp_task_enqueue(struct bstp_task *bt) wakeup(&bstp_task_queue); return; } - + bt->bt_count = 1; TAILQ_INSERT_TAIL(&bstp_task_queue, bt, bt_next); - + lck_mtx_unlock(bstp_task_mtx); - + wakeup(&bstp_task_queue); } @@ -2421,5 +2522,3 @@ bstp_task_drain(struct bstp_task *bt) } lck_mtx_unlock(bstp_task_mtx); } - - diff --git a/bsd/net/bridgestp.h b/bsd/net/bridgestp.h index 412fface8..73691353f 100644 --- a/bsd/net/bridgestp.h +++ b/bsd/net/bridgestp.h @@ -108,109 +108,109 @@ #include #include /* STP port states */ -#define BSTP_IFSTATE_DISABLED 0 -#define BSTP_IFSTATE_LISTENING 1 -#define BSTP_IFSTATE_LEARNING 2 -#define BSTP_IFSTATE_FORWARDING 3 -#define BSTP_IFSTATE_BLOCKING 4 -#define BSTP_IFSTATE_DISCARDING 5 - -#define BSTP_TCSTATE_ACTIVE 1 -#define BSTP_TCSTATE_DETECTED 2 -#define BSTP_TCSTATE_INACTIVE 3 -#define BSTP_TCSTATE_LEARNING 4 -#define BSTP_TCSTATE_PROPAG 5 -#define BSTP_TCSTATE_ACK 6 -#define BSTP_TCSTATE_TC 7 -#define BSTP_TCSTATE_TCN 8 - -#define BSTP_ROLE_DISABLED 0 -#define BSTP_ROLE_ROOT 1 -#define BSTP_ROLE_DESIGNATED 2 -#define BSTP_ROLE_ALTERNATE 3 -#define BSTP_ROLE_BACKUP 4 +#define BSTP_IFSTATE_DISABLED 0 +#define BSTP_IFSTATE_LISTENING 1 +#define BSTP_IFSTATE_LEARNING 2 +#define BSTP_IFSTATE_FORWARDING 3 +#define BSTP_IFSTATE_BLOCKING 4 +#define BSTP_IFSTATE_DISCARDING 5 + +#define BSTP_TCSTATE_ACTIVE 1 +#define BSTP_TCSTATE_DETECTED 2 +#define BSTP_TCSTATE_INACTIVE 3 +#define BSTP_TCSTATE_LEARNING 4 +#define BSTP_TCSTATE_PROPAG 5 +#define BSTP_TCSTATE_ACK 6 +#define BSTP_TCSTATE_TC 7 +#define BSTP_TCSTATE_TCN 8 + +#define BSTP_ROLE_DISABLED 0 +#define BSTP_ROLE_ROOT 1 +#define BSTP_ROLE_DESIGNATED 2 +#define BSTP_ROLE_ALTERNATE 3 +#define BSTP_ROLE_BACKUP 4 #ifdef XNU_KERNEL_PRIVATE /* STP port flags */ -#define BSTP_PORT_CANMIGRATE 0x0001 -#define BSTP_PORT_NEWINFO 0x0002 -#define BSTP_PORT_DISPUTED 0x0004 -#define BSTP_PORT_ADMCOST 0x0008 -#define BSTP_PORT_AUTOEDGE 0x0010 -#define BSTP_PORT_AUTOPTP 0x0020 -#define BSTP_PORT_ADMEDGE 0x0040 -#define BSTP_PORT_PNDCOST 0x0080 +#define BSTP_PORT_CANMIGRATE 0x0001 +#define BSTP_PORT_NEWINFO 0x0002 +#define BSTP_PORT_DISPUTED 0x0004 +#define BSTP_PORT_ADMCOST 0x0008 +#define BSTP_PORT_AUTOEDGE 0x0010 +#define BSTP_PORT_AUTOPTP 0x0020 +#define BSTP_PORT_ADMEDGE 0x0040 +#define BSTP_PORT_PNDCOST 0x0080 /* BPDU priority */ -#define BSTP_PDU_SUPERIOR 1 -#define BSTP_PDU_REPEATED 2 -#define BSTP_PDU_INFERIOR 3 -#define BSTP_PDU_INFERIORALT 4 -#define BSTP_PDU_OTHER 5 +#define BSTP_PDU_SUPERIOR 1 +#define BSTP_PDU_REPEATED 2 +#define BSTP_PDU_INFERIOR 3 +#define BSTP_PDU_INFERIORALT 4 +#define BSTP_PDU_OTHER 5 /* BPDU flags */ -#define BSTP_PDU_PRMASK 0x0c /* Port Role */ -#define BSTP_PDU_PRSHIFT 2 /* Port Role offset */ -#define BSTP_PDU_F_UNKN 0x00 /* Unknown port (00) */ -#define BSTP_PDU_F_ALT 0x01 /* Alt/Backup port (01) */ -#define BSTP_PDU_F_ROOT 0x02 /* Root port (10) */ -#define BSTP_PDU_F_DESG 0x03 /* Designated port (11) */ - -#define BSTP_PDU_STPMASK 0x81 /* strip unused STP flags */ -#define BSTP_PDU_RSTPMASK 0x7f /* strip unused RSTP flags */ -#define BSTP_PDU_F_TC 0x01 /* Topology change */ -#define BSTP_PDU_F_P 0x02 /* Proposal flag */ -#define BSTP_PDU_F_L 0x10 /* Learning flag */ -#define BSTP_PDU_F_F 0x20 /* Forwarding flag */ -#define BSTP_PDU_F_A 0x40 /* Agreement flag */ -#define BSTP_PDU_F_TCA 0x80 /* Topology change ack */ +#define BSTP_PDU_PRMASK 0x0c /* Port Role */ +#define BSTP_PDU_PRSHIFT 2 /* Port Role offset */ +#define BSTP_PDU_F_UNKN 0x00 /* Unknown port (00) */ +#define BSTP_PDU_F_ALT 0x01 /* Alt/Backup port (01) */ +#define BSTP_PDU_F_ROOT 0x02 /* Root port (10) */ +#define BSTP_PDU_F_DESG 0x03 /* Designated port (11) */ + +#define BSTP_PDU_STPMASK 0x81 /* strip unused STP flags */ +#define BSTP_PDU_RSTPMASK 0x7f /* strip unused RSTP flags */ +#define BSTP_PDU_F_TC 0x01 /* Topology change */ +#define BSTP_PDU_F_P 0x02 /* Proposal flag */ +#define BSTP_PDU_F_L 0x10 /* Learning flag */ +#define BSTP_PDU_F_F 0x20 /* Forwarding flag */ +#define BSTP_PDU_F_A 0x40 /* Agreement flag */ +#define BSTP_PDU_F_TCA 0x80 /* Topology change ack */ /* * Spanning tree defaults. */ -#define BSTP_DEFAULT_MAX_AGE (20 * 256) -#define BSTP_DEFAULT_HELLO_TIME (2 * 256) -#define BSTP_DEFAULT_FORWARD_DELAY (15 * 256) -#define BSTP_DEFAULT_HOLD_TIME (1 * 256) -#define BSTP_DEFAULT_MIGRATE_DELAY (3 * 256) -#define BSTP_DEFAULT_HOLD_COUNT 6 -#define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000 -#define BSTP_DEFAULT_PORT_PRIORITY 0x80 -#define BSTP_DEFAULT_PATH_COST 55 -#define BSTP_MIN_HELLO_TIME (1 * 256) -#define BSTP_MIN_MAX_AGE (6 * 256) -#define BSTP_MIN_FORWARD_DELAY (4 * 256) -#define BSTP_MIN_HOLD_COUNT 1 -#define BSTP_MAX_HELLO_TIME (2 * 256) -#define BSTP_MAX_MAX_AGE (40 * 256) -#define BSTP_MAX_FORWARD_DELAY (30 * 256) -#define BSTP_MAX_HOLD_COUNT 10 -#define BSTP_MAX_PRIORITY 61440 -#define BSTP_MAX_PORT_PRIORITY 240 -#define BSTP_MAX_PATH_COST 200000000 +#define BSTP_DEFAULT_MAX_AGE (20 * 256) +#define BSTP_DEFAULT_HELLO_TIME (2 * 256) +#define BSTP_DEFAULT_FORWARD_DELAY (15 * 256) +#define BSTP_DEFAULT_HOLD_TIME (1 * 256) +#define BSTP_DEFAULT_MIGRATE_DELAY (3 * 256) +#define BSTP_DEFAULT_HOLD_COUNT 6 +#define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000 +#define BSTP_DEFAULT_PORT_PRIORITY 0x80 +#define BSTP_DEFAULT_PATH_COST 55 +#define BSTP_MIN_HELLO_TIME (1 * 256) +#define BSTP_MIN_MAX_AGE (6 * 256) +#define BSTP_MIN_FORWARD_DELAY (4 * 256) +#define BSTP_MIN_HOLD_COUNT 1 +#define BSTP_MAX_HELLO_TIME (2 * 256) +#define BSTP_MAX_MAX_AGE (40 * 256) +#define BSTP_MAX_FORWARD_DELAY (30 * 256) +#define BSTP_MAX_HOLD_COUNT 10 +#define BSTP_MAX_PRIORITY 61440 +#define BSTP_MAX_PORT_PRIORITY 240 +#define BSTP_MAX_PATH_COST 200000000 /* BPDU message types */ -#define BSTP_MSGTYPE_CFG 0x00 /* Configuration */ -#define BSTP_MSGTYPE_RSTP 0x02 /* Rapid STP */ -#define BSTP_MSGTYPE_TCN 0x80 /* Topology chg notification */ +#define BSTP_MSGTYPE_CFG 0x00 /* Configuration */ +#define BSTP_MSGTYPE_RSTP 0x02 /* Rapid STP */ +#define BSTP_MSGTYPE_TCN 0x80 /* Topology chg notification */ /* Protocol versions */ -#define BSTP_PROTO_ID 0x00 -#define BSTP_PROTO_STP 0x00 -#define BSTP_PROTO_RSTP 0x02 -#define BSTP_PROTO_MAX BSTP_PROTO_RSTP +#define BSTP_PROTO_ID 0x00 +#define BSTP_PROTO_STP 0x00 +#define BSTP_PROTO_RSTP 0x02 +#define BSTP_PROTO_MAX BSTP_PROTO_RSTP -#define BSTP_INFO_RECIEVED 1 /* compat */ -#define BSTP_INFO_RECEIVED 1 -#define BSTP_INFO_MINE 2 -#define BSTP_INFO_AGED 3 -#define BSTP_INFO_DISABLED 4 +#define BSTP_INFO_RECIEVED 1 /* compat */ +#define BSTP_INFO_RECEIVED 1 +#define BSTP_INFO_MINE 2 +#define BSTP_INFO_AGED 3 +#define BSTP_INFO_DISABLED 4 -#define BSTP_MESSAGE_AGE_INCR (1 * 256) /* in 256ths of a second */ -#define BSTP_TICK_VAL (1 * 256) /* in 256ths of a second */ -#define BSTP_LINK_TIMER (BSTP_TICK_VAL * 15) +#define BSTP_MESSAGE_AGE_INCR (1 * 256) /* in 256ths of a second */ +#define BSTP_TICK_VAL (1 * 256) /* in 256ths of a second */ +#define BSTP_LINK_TIMER (BSTP_TICK_VAL * 15) /* * Driver callbacks for STP state changes @@ -218,8 +218,8 @@ typedef void (*bstp_state_cb_t)(struct ifnet *, int); typedef void (*bstp_rtage_cb_t)(struct ifnet *, int); struct bstp_cb_ops { - bstp_state_cb_t bcb_state; - bstp_rtage_cb_t bcb_rtage; + bstp_state_cb_t bcb_state; + bstp_rtage_cb_t bcb_rtage; }; /* @@ -230,144 +230,144 @@ struct bstp_cb_ops { /* configuration bridge protocol data unit */ struct bstp_cbpdu { - uint8_t cbu_dsap; /* LLC: destination sap */ - uint8_t cbu_ssap; /* LLC: source sap */ - uint8_t cbu_ctl; /* LLC: control */ - uint16_t cbu_protoid; /* protocol id */ - uint8_t cbu_protover; /* protocol version */ - uint8_t cbu_bpdutype; /* message type */ - uint8_t cbu_flags; /* flags (below) */ + uint8_t cbu_dsap; /* LLC: destination sap */ + uint8_t cbu_ssap; /* LLC: source sap */ + uint8_t cbu_ctl; /* LLC: control */ + uint16_t cbu_protoid; /* protocol id */ + uint8_t cbu_protover; /* protocol version */ + uint8_t cbu_bpdutype; /* message type */ + uint8_t cbu_flags; /* flags (below) */ /* root id */ - uint16_t cbu_rootpri; /* root priority */ - uint8_t cbu_rootaddr[6]; /* root address */ + uint16_t cbu_rootpri; /* root priority */ + uint8_t cbu_rootaddr[6]; /* root address */ - uint32_t cbu_rootpathcost; /* root path cost */ + uint32_t cbu_rootpathcost; /* root path cost */ /* bridge id */ - uint16_t cbu_bridgepri; /* bridge priority */ - uint8_t cbu_bridgeaddr[6]; /* bridge address */ - - uint16_t cbu_portid; /* port id */ - uint16_t cbu_messageage; /* current message age */ - uint16_t cbu_maxage; /* maximum age */ - uint16_t cbu_hellotime; /* hello time */ - uint16_t cbu_forwarddelay; /* forwarding delay */ - uint8_t cbu_versionlen; /* version 1 length */ + uint16_t cbu_bridgepri; /* bridge priority */ + uint8_t cbu_bridgeaddr[6]; /* bridge address */ + + uint16_t cbu_portid; /* port id */ + uint16_t cbu_messageage; /* current message age */ + uint16_t cbu_maxage; /* maximum age */ + uint16_t cbu_hellotime; /* hello time */ + uint16_t cbu_forwarddelay; /* forwarding delay */ + uint8_t cbu_versionlen; /* version 1 length */ } __attribute__((__packed__)); -#define BSTP_BPDU_STP_LEN (3 + 35) /* LLC + STP pdu */ -#define BSTP_BPDU_RSTP_LEN (3 + 36) /* LLC + RSTP pdu */ +#define BSTP_BPDU_STP_LEN (3 + 35) /* LLC + STP pdu */ +#define BSTP_BPDU_RSTP_LEN (3 + 36) /* LLC + RSTP pdu */ /* topology change notification bridge protocol data unit */ struct bstp_tbpdu { - uint8_t tbu_dsap; /* LLC: destination sap */ - uint8_t tbu_ssap; /* LLC: source sap */ - uint8_t tbu_ctl; /* LLC: control */ - uint16_t tbu_protoid; /* protocol id */ - uint8_t tbu_protover; /* protocol version */ - uint8_t tbu_bpdutype; /* message type */ + uint8_t tbu_dsap; /* LLC: destination sap */ + uint8_t tbu_ssap; /* LLC: source sap */ + uint8_t tbu_ctl; /* LLC: control */ + uint16_t tbu_protoid; /* protocol id */ + uint8_t tbu_protover; /* protocol version */ + uint8_t tbu_bpdutype; /* message type */ } __attribute__((__packed__)); /* * Timekeeping structure used in spanning tree code. */ - + typedef void bstp_task_func_t(void *context, int count); - + struct bstp_task { - TAILQ_ENTRY(bstp_task) bt_next; - int bt_count; - bstp_task_func_t *bt_func; - void *bt_context; + TAILQ_ENTRY(bstp_task) bt_next; + int bt_count; + bstp_task_func_t *bt_func; + void *bt_context; }; struct bstp_timer { - int active; - int latched; - int value; + int active; + int latched; + int value; }; struct bstp_pri_vector { - uint64_t pv_root_id; - uint32_t pv_cost; - uint64_t pv_dbridge_id; - uint16_t pv_dport_id; - uint16_t pv_port_id; + uint64_t pv_root_id; + uint32_t pv_cost; + uint64_t pv_dbridge_id; + uint16_t pv_dport_id; + uint16_t pv_port_id; }; struct bstp_config_unit { - struct bstp_pri_vector cu_pv; - uint16_t cu_message_age; - uint16_t cu_max_age; - uint16_t cu_forward_delay; - uint16_t cu_hello_time; - uint8_t cu_message_type; - uint8_t cu_topology_change_ack; - uint8_t cu_topology_change; - uint8_t cu_proposal; - uint8_t cu_agree; - uint8_t cu_learning; - uint8_t cu_forwarding; - uint8_t cu_role; + struct bstp_pri_vector cu_pv; + uint16_t cu_message_age; + uint16_t cu_max_age; + uint16_t cu_forward_delay; + uint16_t cu_hello_time; + uint8_t cu_message_type; + uint8_t cu_topology_change_ack; + uint8_t cu_topology_change; + uint8_t cu_proposal; + uint8_t cu_agree; + uint8_t cu_learning; + uint8_t cu_forwarding; + uint8_t cu_role; }; struct bstp_tcn_unit { - uint8_t tu_message_type; + uint8_t tu_message_type; }; struct bstp_port { - LIST_ENTRY(bstp_port) bp_next; - struct ifnet *bp_ifp; /* parent if */ - struct bstp_state *bp_bs; - uint8_t bp_active; - uint8_t bp_protover; - uint32_t bp_flags; - uint32_t bp_path_cost; - uint16_t bp_port_msg_age; - uint16_t bp_port_max_age; - uint16_t bp_port_fdelay; - uint16_t bp_port_htime; - uint16_t bp_desg_msg_age; - uint16_t bp_desg_max_age; - uint16_t bp_desg_fdelay; - uint16_t bp_desg_htime; - struct bstp_timer bp_edge_delay_timer; - struct bstp_timer bp_forward_delay_timer; - struct bstp_timer bp_hello_timer; - struct bstp_timer bp_message_age_timer; - struct bstp_timer bp_migrate_delay_timer; - struct bstp_timer bp_recent_backup_timer; - struct bstp_timer bp_recent_root_timer; - struct bstp_timer bp_tc_timer; + LIST_ENTRY(bstp_port) bp_next; + struct ifnet *bp_ifp; /* parent if */ + struct bstp_state *bp_bs; + uint8_t bp_active; + uint8_t bp_protover; + uint32_t bp_flags; + uint32_t bp_path_cost; + uint16_t bp_port_msg_age; + uint16_t bp_port_max_age; + uint16_t bp_port_fdelay; + uint16_t bp_port_htime; + uint16_t bp_desg_msg_age; + uint16_t bp_desg_max_age; + uint16_t bp_desg_fdelay; + uint16_t bp_desg_htime; + struct bstp_timer bp_edge_delay_timer; + struct bstp_timer bp_forward_delay_timer; + struct bstp_timer bp_hello_timer; + struct bstp_timer bp_message_age_timer; + struct bstp_timer bp_migrate_delay_timer; + struct bstp_timer bp_recent_backup_timer; + struct bstp_timer bp_recent_root_timer; + struct bstp_timer bp_tc_timer; struct bstp_config_unit bp_msg_cu; - struct bstp_pri_vector bp_desg_pv; - struct bstp_pri_vector bp_port_pv; - uint16_t bp_port_id; - uint8_t bp_state; - uint8_t bp_tcstate; - uint8_t bp_role; - uint8_t bp_infois; - uint8_t bp_tc_ack; - uint8_t bp_tc_prop; - uint8_t bp_fdbflush; - uint8_t bp_priority; - uint8_t bp_ptp_link; - uint8_t bp_agree; - uint8_t bp_agreed; - uint8_t bp_sync; - uint8_t bp_synced; - uint8_t bp_proposing; - uint8_t bp_proposed; - uint8_t bp_operedge; - uint8_t bp_reroot; - uint8_t bp_rcvdtc; - uint8_t bp_rcvdtca; - uint8_t bp_rcvdtcn; - uint32_t bp_forward_transitions; - uint8_t bp_txcount; - struct bstp_task bp_statetask; - struct bstp_task bp_rtagetask; - uint32_t bp_if_link_state; /* cache of the parent if link state */ + struct bstp_pri_vector bp_desg_pv; + struct bstp_pri_vector bp_port_pv; + uint16_t bp_port_id; + uint8_t bp_state; + uint8_t bp_tcstate; + uint8_t bp_role; + uint8_t bp_infois; + uint8_t bp_tc_ack; + uint8_t bp_tc_prop; + uint8_t bp_fdbflush; + uint8_t bp_priority; + uint8_t bp_ptp_link; + uint8_t bp_agree; + uint8_t bp_agreed; + uint8_t bp_sync; + uint8_t bp_synced; + uint8_t bp_proposing; + uint8_t bp_proposed; + uint8_t bp_operedge; + uint8_t bp_reroot; + uint8_t bp_rcvdtc; + uint8_t bp_rcvdtca; + uint8_t bp_rcvdtcn; + uint32_t bp_forward_transitions; + uint8_t bp_txcount; + struct bstp_task bp_statetask; + struct bstp_task bp_rtagetask; + uint32_t bp_if_link_state; /* cache of the parent if link state */ }; /* @@ -381,54 +381,54 @@ struct bstp_port { * Software state for each bridge STP. */ struct bstp_state { - LIST_ENTRY(bstp_state) bs_list; - uint8_t bs_running; - lck_mtx_t *bs_mtx; - struct bstp_pri_vector bs_bridge_pv; - struct bstp_pri_vector bs_root_pv; - struct bstp_port *bs_root_port; - uint8_t bs_protover; - uint16_t bs_migration_delay; - uint16_t bs_edge_delay; - uint16_t bs_bridge_max_age; - uint16_t bs_bridge_fdelay; - uint16_t bs_bridge_htime; - uint16_t bs_root_msg_age; - uint16_t bs_root_max_age; - uint16_t bs_root_fdelay; - uint16_t bs_root_htime; - uint16_t bs_hold_time; - uint16_t bs_bridge_priority; - uint8_t bs_txholdcount; - uint8_t bs_allsynced; - struct bstp_timer bs_link_timer; - struct timeval bs_last_tc_time; - LIST_HEAD(, bstp_port) bs_bplist; - bstp_state_cb_t bs_state_cb; - bstp_rtage_cb_t bs_rtage_cb; + LIST_ENTRY(bstp_state) bs_list; + uint8_t bs_running; + lck_mtx_t *bs_mtx; + struct bstp_pri_vector bs_bridge_pv; + struct bstp_pri_vector bs_root_pv; + struct bstp_port *bs_root_port; + uint8_t bs_protover; + uint16_t bs_migration_delay; + uint16_t bs_edge_delay; + uint16_t bs_bridge_max_age; + uint16_t bs_bridge_fdelay; + uint16_t bs_bridge_htime; + uint16_t bs_root_msg_age; + uint16_t bs_root_max_age; + uint16_t bs_root_fdelay; + uint16_t bs_root_htime; + uint16_t bs_hold_time; + uint16_t bs_bridge_priority; + uint8_t bs_txholdcount; + uint8_t bs_allsynced; + struct bstp_timer bs_link_timer; + struct timeval bs_last_tc_time; + LIST_HEAD(, bstp_port) bs_bplist; + bstp_state_cb_t bs_state_cb; + bstp_rtage_cb_t bs_rtage_cb; }; -void bstp_attach(struct bstp_state *, struct bstp_cb_ops *); -void bstp_detach(struct bstp_state *); -void bstp_init(struct bstp_state *); -void bstp_stop(struct bstp_state *); -int bstp_create(struct bstp_state *, struct bstp_port *, struct ifnet *); -int bstp_enable(struct bstp_port *); -void bstp_disable(struct bstp_port *); -void bstp_destroy(struct bstp_port *); -void bstp_linkstate(struct ifnet *, int); -int bstp_set_htime(struct bstp_state *, int); -int bstp_set_fdelay(struct bstp_state *, int); -int bstp_set_maxage(struct bstp_state *, int); -int bstp_set_holdcount(struct bstp_state *, int); -int bstp_set_protocol(struct bstp_state *, int); -int bstp_set_priority(struct bstp_state *, int); -int bstp_set_port_priority(struct bstp_port *, int); -int bstp_set_path_cost(struct bstp_port *, uint32_t); -int bstp_set_edge(struct bstp_port *, int); -int bstp_set_autoedge(struct bstp_port *, int); -int bstp_set_ptp(struct bstp_port *, int); -int bstp_set_autoptp(struct bstp_port *, int); +void bstp_attach(struct bstp_state *, struct bstp_cb_ops *); +void bstp_detach(struct bstp_state *); +void bstp_init(struct bstp_state *); +void bstp_stop(struct bstp_state *); +int bstp_create(struct bstp_state *, struct bstp_port *, struct ifnet *); +int bstp_enable(struct bstp_port *); +void bstp_disable(struct bstp_port *); +void bstp_destroy(struct bstp_port *); +void bstp_linkstate(struct ifnet *, int); +int bstp_set_htime(struct bstp_state *, int); +int bstp_set_fdelay(struct bstp_state *, int); +int bstp_set_maxage(struct bstp_state *, int); +int bstp_set_holdcount(struct bstp_state *, int); +int bstp_set_protocol(struct bstp_state *, int); +int bstp_set_priority(struct bstp_state *, int); +int bstp_set_port_priority(struct bstp_port *, int); +int bstp_set_path_cost(struct bstp_port *, uint32_t); +int bstp_set_edge(struct bstp_port *, int); +int bstp_set_autoedge(struct bstp_port *, int); +int bstp_set_ptp(struct bstp_port *, int); +int bstp_set_autoptp(struct bstp_port *, int); struct mbuf *bstp_input(struct bstp_port *, struct ifnet *, struct mbuf *); void bstp_sys_init(void); @@ -436,4 +436,3 @@ void bstp_sys_init(void); #endif /* XNU_KERNEL_PRIVATE */ #endif /* __BRIDGESTP_H__ */ - diff --git a/bsd/net/classq/classq.c b/bsd/net/classq/classq.c index 35d86188f..434a18dfa 100644 --- a/bsd/net/classq/classq.c +++ b/bsd/net/classq/classq.c @@ -74,12 +74,12 @@ #include -u_int32_t classq_verbose = 0; /* more noise if greater than 1 */ +u_int32_t classq_verbose = 0; /* more noise if greater than 1 */ -SYSCTL_NODE(_net, OID_AUTO, classq, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "classq"); +SYSCTL_NODE(_net, OID_AUTO, classq, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "classq"); -SYSCTL_UINT(_net_classq, OID_AUTO, verbose, CTLFLAG_RW|CTLFLAG_LOCKED, - &classq_verbose, 0, "Class queue verbosity level"); +SYSCTL_UINT(_net_classq, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED, + &classq_verbose, 0, "Class queue verbosity level"); void _qinit(class_queue_t *q, int type, int lim, classq_pkt_type_t ptype) @@ -177,20 +177,22 @@ _getq(class_queue_t *q) if (pkt == NULL) { VERIFY(qlen(q) == 0); - if (qsize(q) > 0) + if (qsize(q) > 0) { qsize(q) = 0; - return (NULL); + } + return NULL; } VERIFY(qlen(q) > 0); qlen(q)--; /* qsize is an approximation, so adjust if necessary */ - if (((int)qsize(q) - pkt_len) > 0) + if (((int)qsize(q) - pkt_len) > 0) { qsize(q) -= pkt_len; - else if (qsize(q) != 0) + } else if (qsize(q) != 0) { qsize(q) = 0; + } - return (pkt); + return pkt; } static void * @@ -233,27 +235,28 @@ _getq_flow_or_scidx(class_queue_t *q, u_int32_t val, boolean_t isflowid) qlen(q)--; /* qsize is an approximation, so adjust if necessary */ - if (((int)qsize(q) - pkt_len) > 0) + if (((int)qsize(q) - pkt_len) > 0) { qsize(q) -= pkt_len; - else if (qsize(q) != 0) + } else if (qsize(q) != 0) { qsize(q) = 0; + } } - return (pkt); + return pkt; } /* get a packet of a specific flow beginning from the head of the queue */ void * _getq_flow(class_queue_t *q, u_int32_t flow) { - return (_getq_flow_or_scidx(q, flow, TRUE)); + return _getq_flow_or_scidx(q, flow, TRUE); } /* Get a packet whose MBUF_SCIDX() < scidx from head of queue */ void * _getq_scidx_lt(class_queue_t *q, u_int32_t scidx) { - return (_getq_flow_or_scidx(q, scidx, FALSE)); + return _getq_flow_or_scidx(q, scidx, FALSE); } /* get all packets (chained) starting from the head of the queue */ @@ -266,8 +269,9 @@ _getq_all(class_queue_t *q, void **last, u_int32_t *qlenp, switch (qptype(q)) { case QP_MBUF: pkt = MBUFQ_FIRST(&qmbufq(q)); - if (last != NULL) + if (last != NULL) { *last = MBUFQ_LAST(&qmbufq(q)); + } MBUFQ_INIT(&qmbufq(q)); break; @@ -277,15 +281,17 @@ _getq_all(class_queue_t *q, void **last, u_int32_t *qlenp, /* NOTREACHED */ } - if (qlenp != NULL) + if (qlenp != NULL) { *qlenp = qlen(q); - if (qsizep != NULL) + } + if (qsizep != NULL) { *qsizep = qsize(q); + } qlen(q) = 0; qsize(q) = 0; - return (pkt); + return pkt; } static inline struct mbuf * @@ -311,10 +317,11 @@ _getq_tail_mbuf(class_queue_t *q) --qlen(q); /* qsize is an approximation, so adjust if necessary */ - if (((int)qsize(q) - m_length(m)) > 0) + if (((int)qsize(q) - m_length(m)) > 0) { qsize(q) -= m_length(m); - else if (qsize(q) != 0) + } else if (qsize(q) != 0) { qsize(q) = 0; + } if (qempty(q)) { VERIFY(m == MBUFQ_FIRST(head)); @@ -324,7 +331,7 @@ _getq_tail_mbuf(class_queue_t *q) head->mq_last = &MBUFQ_NEXT(n); } } - return (m); + return m; } /* drop a packet at the tail of the queue */ @@ -343,7 +350,7 @@ _getq_tail(class_queue_t *q) /* NOTREACHED */ } - return (t); + return t; } static inline struct mbuf * @@ -360,46 +367,51 @@ _getq_random_mbuf(class_queue_t *q) n = qlen(q); if (n == 0) { VERIFY(MBUFQ_EMPTY(head)); - if (qsize(q) > 0) + if (qsize(q) > 0) { qsize(q) = 0; - return (NULL); + } + return NULL; } m = MBUFQ_FIRST(head); - read_frandom(&rnd, sizeof (rnd)); + read_frandom(&rnd, sizeof(rnd)); n = (rnd % n) + 1; if (n == 1) { - if ((MBUFQ_FIRST(head) = MBUFQ_NEXT(m)) == NULL) + if ((MBUFQ_FIRST(head) = MBUFQ_NEXT(m)) == NULL) { (head)->mq_last = &MBUFQ_FIRST(head); + } } else { struct mbuf *p = NULL; VERIFY(n > 1); while (n--) { - if (MBUFQ_NEXT(m) == NULL) + if (MBUFQ_NEXT(m) == NULL) { break; + } p = m; m = MBUFQ_NEXT(m); } VERIFY(p != NULL && MBUFQ_NEXT(p) == m); - if ((MBUFQ_NEXT(p) = MBUFQ_NEXT(m)) == NULL) + if ((MBUFQ_NEXT(p) = MBUFQ_NEXT(m)) == NULL) { (head)->mq_last = &MBUFQ_NEXT(p); + } } VERIFY(qlen(q) > 0); --qlen(q); /* qsize is an approximation, so adjust if necessary */ - if (((int)qsize(q) - m_length(m)) > 0) + if (((int)qsize(q) - m_length(m)) > 0) { qsize(q) -= m_length(m); - else if (qsize(q) != 0) + } else if (qsize(q) != 0) { qsize(q) = 0; + } MBUFQ_NEXT(m) = NULL; - return (m); + return m; } /* randomly select a packet in the queue */ @@ -418,7 +430,7 @@ _getq_random(class_queue_t *q) /* NOTREACHED */ } - return (r); + return r; } static inline void @@ -428,13 +440,15 @@ _removeq_mbuf(class_queue_t *q, struct mbuf *m) struct mbuf *m0, **mtail; m0 = MBUFQ_FIRST(head); - if (m0 == NULL) + if (m0 == NULL) { return; + } if (m0 != m) { while (MBUFQ_NEXT(m0) != m) { - if (m0 == NULL) + if (m0 == NULL) { return; + } m0 = MBUFQ_NEXT(m0); } mtail = &MBUFQ_NEXT(m0); @@ -443,17 +457,19 @@ _removeq_mbuf(class_queue_t *q, struct mbuf *m) } *mtail = MBUFQ_NEXT(m); - if (*mtail == NULL) + if (*mtail == NULL) { head->mq_last = mtail; + } VERIFY(qlen(q) > 0); --qlen(q); /* qsize is an approximation, so adjust if necessary */ - if (((int)qsize(q) - m_length(m)) > 0) + if (((int)qsize(q) - m_length(m)) > 0) { qsize(q) -= m_length(m); - else if (qsize(q) != 0) + } else if (qsize(q) != 0) { qsize(q) = 0; + } MBUFQ_NEXT(m) = NULL; } @@ -510,19 +526,23 @@ _flushq_flow_mbuf(class_queue_t *q, u_int32_t flow, u_int32_t *cnt, qlen(q) -= c; /* qsize is an approximation, so adjust if necessary */ - if (((int)qsize(q) - l) > 0) + if (((int)qsize(q) - l) > 0) { qsize(q) -= l; - else if (qsize(q) != 0) + } else if (qsize(q) != 0) { qsize(q) = 0; + } } - if (!MBUFQ_EMPTY(&freeq)) + if (!MBUFQ_EMPTY(&freeq)) { m_freem_list(MBUFQ_FIRST(&freeq)); + } - if (cnt != NULL) + if (cnt != NULL) { *cnt = c; - if (len != NULL) + } + if (len != NULL) { *len = l; + } } void diff --git a/bsd/net/classq/classq.h b/bsd/net/classq/classq.h index f36f9d727..93ded92bc 100644 --- a/bsd/net/classq/classq.h +++ b/bsd/net/classq/classq.h @@ -65,7 +65,7 @@ * class queue definitions extracted from rm_class.h. */ #ifndef _NET_CLASSQ_CLASSQ_H_ -#define _NET_CLASSQ_CLASSQ_H_ +#define _NET_CLASSQ_CLASSQ_H_ #ifdef PRIVATE #ifdef __cplusplus @@ -77,7 +77,7 @@ extern "C" { */ typedef enum classq_pkt_type { QP_INVALID = 0, - QP_MBUF, /* mbuf packet */ + QP_MBUF, /* mbuf packet */ } classq_pkt_type_t; /* @@ -97,17 +97,17 @@ typedef enum classq_state { QS_SUSPENDED } classq_state_t; -#define DEFAULT_QLIMIT 128 /* default */ +#define DEFAULT_QLIMIT 128 /* default */ -#define CLASSQ_DEQUEUE_MAX_PKT_LIMIT 2048 -#define CLASSQ_DEQUEUE_MAX_BYTE_LIMIT (1024 * 1024) +#define CLASSQ_DEQUEUE_MAX_PKT_LIMIT 2048 +#define CLASSQ_DEQUEUE_MAX_BYTE_LIMIT (1024 * 1024) /* * generic packet counter */ struct pktcntr { - u_int64_t packets; - u_int64_t bytes; + u_int64_t packets; + u_int64_t bytes; }; #ifdef BSD_KERNEL_PRIVATE @@ -122,46 +122,46 @@ typedef struct _class_queue_ { union { MBUFQ_HEAD(mq_head) __mbufq; /* mbuf packet queue */ } __pktq_u; - u_int32_t qlen; /* Queue length (in number of packets) */ - u_int32_t qlim; /* Queue limit (in number of packets*) */ - u_int64_t qsize; /* Approx. queue size (in number of bytes) */ - classq_type_t qtype; /* Queue type */ - classq_state_t qstate; /* Queue state */ - classq_pkt_type_t qptype; /* Packet type */ + u_int32_t qlen; /* Queue length (in number of packets) */ + u_int32_t qlim; /* Queue limit (in number of packets*) */ + u_int64_t qsize; /* Approx. queue size (in number of bytes) */ + classq_type_t qtype; /* Queue type */ + classq_state_t qstate; /* Queue state */ + classq_pkt_type_t qptype; /* Packet type */ } class_queue_t; -#define qmbufq(q) (q)->__pktq_u.__mbufq /* Get mbuf packet queue */ -#define qptype(q) (q)->qptype /* Get queue packet type */ -#define qtype(q) (q)->qtype /* Get queue type */ -#define qstate(q) (q)->qstate /* Get queue state */ -#define qlimit(q) (q)->qlim /* Max packets to be queued */ -#define qlen(q) (q)->qlen /* Current queue length. */ -#define qsize(q) (q)->qsize /* Approx. bytes in queue */ - -#define qhead(q) MBUFQ_FIRST(&qmbufq(q)) - -#define qempty(q) (qlen(q) == 0) /* Is the queue empty?? */ -#define q_is_red(q) (qtype(q) == Q_RED) /* Is the queue a RED queue */ -#define q_is_rio(q) (qtype(q) == Q_RIO) /* Is the queue a RIO queue */ -#define q_is_blue(q) (qtype(q) == Q_BLUE) /* Is the queue a BLUE queue */ -#define q_is_sfb(q) (qtype(q) == Q_SFB) /* Is the queue a SFB queue */ -#define q_is_red_or_rio(q) (qtype(q) == Q_RED || qtype(q) == Q_RIO) -#define q_is_suspended(q) (qstate(q) == QS_SUSPENDED) - -#define PKTCNTR_ADD(_cntr, _pkt, _len) do { \ - (_cntr)->packets += (_pkt); \ - (_cntr)->bytes += (_len); \ +#define qmbufq(q) (q)->__pktq_u.__mbufq /* Get mbuf packet queue */ +#define qptype(q) (q)->qptype /* Get queue packet type */ +#define qtype(q) (q)->qtype /* Get queue type */ +#define qstate(q) (q)->qstate /* Get queue state */ +#define qlimit(q) (q)->qlim /* Max packets to be queued */ +#define qlen(q) (q)->qlen /* Current queue length. */ +#define qsize(q) (q)->qsize /* Approx. bytes in queue */ + +#define qhead(q) MBUFQ_FIRST(&qmbufq(q)) + +#define qempty(q) (qlen(q) == 0) /* Is the queue empty?? */ +#define q_is_red(q) (qtype(q) == Q_RED) /* Is the queue a RED queue */ +#define q_is_rio(q) (qtype(q) == Q_RIO) /* Is the queue a RIO queue */ +#define q_is_blue(q) (qtype(q) == Q_BLUE) /* Is the queue a BLUE queue */ +#define q_is_sfb(q) (qtype(q) == Q_SFB) /* Is the queue a SFB queue */ +#define q_is_red_or_rio(q) (qtype(q) == Q_RED || qtype(q) == Q_RIO) +#define q_is_suspended(q) (qstate(q) == QS_SUSPENDED) + +#define PKTCNTR_ADD(_cntr, _pkt, _len) do { \ + (_cntr)->packets += (_pkt); \ + (_cntr)->bytes += (_len); \ } while (0) -#define PKTCNTR_CLEAR(_cntr) do { \ - (_cntr)->packets = 0; \ - (_cntr)->bytes = 0; \ +#define PKTCNTR_CLEAR(_cntr) do { \ + (_cntr)->packets = 0; \ + (_cntr)->bytes = 0; \ } while (0) /* flags for mark_ecn() */ -#define CLASSQF_ECN4 0x01 /* use packet marking for IPv4 packets */ -#define CLASSQF_ECN6 0x02 /* use packet marking for IPv6 packets */ -#define CLASSQF_ECN (CLASSQF_ECN4 | CLASSQF_ECN6) +#define CLASSQF_ECN4 0x01 /* use packet marking for IPv4 packets */ +#define CLASSQF_ECN6 0x02 /* use packet marking for IPv6 packets */ +#define CLASSQF_ECN (CLASSQF_ECN4 | CLASSQF_ECN6) extern u_int32_t classq_verbose; @@ -184,8 +184,8 @@ extern void classq_init(void); #if PF_ECN extern u_int8_t read_dsfield(struct mbuf *, struct pf_mtag *); -extern void write_dsfield(struct mbuf *, struct pf_mtag *, u_int8_t); -extern int mark_ecn(struct mbuf *, struct pf_mtag *, int); +extern void write_dsfield(struct mbuf *, struct pf_mtag *, u_int8_t); +extern int mark_ecn(struct mbuf *, struct pf_mtag *, int); #endif /* PF_ECN */ #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/net/classq/classq_blue.h b/bsd/net/classq/classq_blue.h index 777122b94..c349a57f8 100644 --- a/bsd/net/classq/classq_blue.h +++ b/bsd/net/classq/classq_blue.h @@ -56,7 +56,7 @@ */ #ifndef _NET_CLASSQ_CLASSQ_BLUE_H_ -#define _NET_CLASSQ_CLASSQ_BLUE_H_ +#define _NET_CLASSQ_CLASSQ_BLUE_H_ #ifdef PRIVATE #ifdef BSD_KERNEL_PRIVATE @@ -68,11 +68,11 @@ extern "C" { #endif struct blue_stats { - int32_t q_pmark; - u_int32_t _pad; - u_int64_t drop_forced; - u_int64_t drop_unforced; - u_int64_t marked_packets; + int32_t q_pmark; + u_int32_t _pad; + u_int64_t drop_forced; + u_int64_t drop_unforced; + u_int64_t marked_packets; }; #ifdef __cplusplus diff --git a/bsd/net/classq/classq_fq_codel.c b/bsd/net/classq/classq_fq_codel.c index f004be340..75a568d2c 100644 --- a/bsd/net/classq/classq_fq_codel.c +++ b/bsd/net/classq/classq_fq_codel.c @@ -47,23 +47,24 @@ #include #include -static uint32_t flowq_size; /* size of flowq */ -static struct mcache *flowq_cache = NULL; /* mcache for flowq */ +static uint32_t flowq_size; /* size of flowq */ +static struct mcache *flowq_cache = NULL; /* mcache for flowq */ -#define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */ +#define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */ -#define DTYPE_NODROP 0 /* no drop */ -#define DTYPE_FORCED 1 /* a "forced" drop */ -#define DTYPE_EARLY 2 /* an "unforced" (early) drop */ +#define DTYPE_NODROP 0 /* no drop */ +#define DTYPE_FORCED 1 /* a "forced" drop */ +#define DTYPE_EARLY 2 /* an "unforced" (early) drop */ void fq_codel_init(void) { - if (flowq_cache != NULL) + if (flowq_cache != NULL) { return; + } - flowq_size = sizeof (fq_t); - flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof (uint64_t), + flowq_size = sizeof(fq_t); + flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof(uint64_t), 0, MCR_SLEEP); if (flowq_cache == NULL) { panic("%s: failed to allocate flowq_cache", __func__); @@ -84,7 +85,7 @@ fq_alloc(classq_pkt_type_t ptype) fq = mcache_alloc(flowq_cache, MCR_SLEEP); if (fq == NULL) { log(LOG_ERR, "%s: unable to allocate from flowq_cache\n"); - return (NULL); + return NULL; } bzero(fq, flowq_size); @@ -92,7 +93,7 @@ fq_alloc(classq_pkt_type_t ptype) if (ptype == QP_MBUF) { MBUFQ_INIT(&fq->fq_mbufq); } - return (fq); + return fq; } void @@ -111,8 +112,9 @@ fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl, u_int64_t maxgetqtime; if (FQ_IS_DELAYHIGH(flowq) || flowq->fq_getqtime == 0 || fq_empty(flowq) || - flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) + flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) { return; + } maxgetqtime = flowq->fq_getqtime + fqs->fqs_update_interval; if ((*now) > maxgetqtime) { /* @@ -133,15 +135,17 @@ fq_head_drop(fq_if_t *fqs, fq_t *fq) struct ifclassq *ifq = fqs->fqs_ifq; _PKTSCHED_PKT_INIT(&pkt); - if (fq_getq_flow_internal(fqs, fq, &pkt) == NULL) + if (fq_getq_flow_internal(fqs, fq, &pkt) == NULL) { return; + } pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL, NULL, NULL); *pkt_timestamp = 0; - if (pkt.pktsched_ptype == QP_MBUF) + if (pkt.pktsched_ptype == QP_MBUF) { *pkt_flags &= ~PKTF_PRIV_GUARDED; + } IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt)); IFCQ_CONVERT_LOCK(ifq); @@ -184,7 +188,7 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) /* drop the packet if we could not allocate a flow queue */ fq_cl->fcl_stat.fcl_drop_memfailure++; IFCQ_CONVERT_LOCK(fqs->fqs_ifq); - return (CLASSQEQ_DROP); + return CLASSQEQ_DROP; } VERIFY(fq->fq_ptype == pkt->pktsched_ptype); @@ -215,7 +219,6 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) } fq_cl->fcl_stat.fcl_drop_early++; } - } /* Set the return code correctly */ @@ -290,14 +293,14 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) fq_if_is_flow_heavy(fqs, fq); } else { IFCQ_CONVERT_LOCK(fqs->fqs_ifq); - return ((ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP); + return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP; } /* * If the queue is not currently active, add it to the end of new * flows list for that service class. */ - if ((fq->fq_flags & (FQF_NEW_FLOW|FQF_OLD_FLOW)) == 0) { + if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) { VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL); STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink); fq->fq_flags |= FQF_NEW_FLOW; @@ -306,7 +309,7 @@ fq_addq(fq_if_t *fqs, pktsched_pkt_t *pkt, fq_if_classq_t *fq_cl) fq->fq_deficit = fq_cl->fcl_quantum; } - return (ret); + return ret; } void * @@ -318,8 +321,9 @@ fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) struct ifclassq *ifq = fqs->fqs_ifq; fq_dequeue(fq, p); - if (p == NULL) - return (NULL); + if (p == NULL) { + return NULL; + } pktsched_pkt_encap(pkt, fq->fq_ptype, p); plen = pktsched_get_pkt_len(pkt); @@ -334,10 +338,11 @@ fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) IFCQ_DEC_BYTES(ifq, plen); /* Reset getqtime so that we don't count idle times */ - if (fq_empty(fq)) + if (fq_empty(fq)) { fq->fq_getqtime = 0; + } - return (p); + return p; } void * @@ -352,8 +357,9 @@ fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) uint64_t *pkt_timestamp; p = fq_getq_flow_internal(fqs, fq, pkt); - if (p == NULL) - return (NULL); + if (p == NULL) { + return NULL; + } pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL, NULL, &pkt_tx_start_seq); @@ -362,17 +368,20 @@ fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) now = (now_ts.tv_sec * NSEC_PER_SEC) + now_ts.tv_nsec; /* this will compute qdelay in nanoseconds */ - if (now > *pkt_timestamp) + if (now > *pkt_timestamp) { qdelay = now - *pkt_timestamp; + } fq_cl = &fqs->fqs_classq[fq->fq_sc_index]; if (fq->fq_min_qdelay == 0 || - (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) + (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) { fq->fq_min_qdelay = qdelay; + } if (now >= fq->fq_updatetime) { if (fq->fq_min_qdelay > fqs->fqs_target_qdelay) { - if (!FQ_IS_DELAYHIGH(fq)) + if (!FQ_IS_DELAYHIGH(fq)) { FQ_SET_DELAY_HIGH(fq); + } } else { FQ_CLEAR_DELAY_HIGH(fq); } @@ -398,8 +407,9 @@ fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt) fq_if_is_flow_heavy(fqs, fq); *pkt_timestamp = 0; - if (pkt->pktsched_ptype == QP_MBUF) + if (pkt->pktsched_ptype == QP_MBUF) { *pkt_flags &= ~PKTF_PRIV_GUARDED; + } - return (p); + return p; } diff --git a/bsd/net/classq/classq_fq_codel.h b/bsd/net/classq/classq_fq_codel.h index 7f9411802..29b81a9db 100644 --- a/bsd/net/classq/classq_fq_codel.h +++ b/bsd/net/classq/classq_fq_codel.h @@ -27,7 +27,7 @@ */ #ifndef _NET_CLASSQ_CLASSQ_FQ_CODEL_H -#define _NET_CLASSQ_CLASSQ_FQ_CODEL_H +#define _NET_CLASSQ_CLASSQ_FQ_CODEL_H #ifdef PRIVATE #ifdef BSD_KERNEL_PRIVATE #include @@ -39,12 +39,12 @@ extern "C" { #endif -#define FQ_MIN_FC_THRESHOLD_BYTES 7500 -#define FQ_IS_DELAYHIGH(_fq_) ((_fq_)->fq_flags & FQF_DELAY_HIGH) -#define FQ_SET_DELAY_HIGH(_fq_) do { \ +#define FQ_MIN_FC_THRESHOLD_BYTES 7500 +#define FQ_IS_DELAYHIGH(_fq_) ((_fq_)->fq_flags & FQF_DELAY_HIGH) +#define FQ_SET_DELAY_HIGH(_fq_) do { \ (_fq_)->fq_flags |= FQF_DELAY_HIGH; \ } while (0) -#define FQ_CLEAR_DELAY_HIGH(_fq_) do { \ +#define FQ_CLEAR_DELAY_HIGH(_fq_) do { \ (_fq_)->fq_flags &= ~FQF_DELAY_HIGH; \ } while (0) @@ -52,34 +52,34 @@ typedef struct flowq { union { MBUFQ_HEAD(mbufq_head) __mbufq; /* mbuf packet queue */ } __fq_pktq_u; -#define FQF_FLOWCTL_CAPABLE 0x01 /* Use flow control instead of drop */ -#define FQF_DELAY_HIGH 0x02 /* Min delay is greater than target */ -#define FQF_NEW_FLOW 0x04 /* Currently on new flows queue */ -#define FQF_OLD_FLOW 0x08 /* Currently on old flows queue */ -#define FQF_FLOWCTL_ON 0x10 /* Currently flow controlled */ - u_int8_t fq_flags; /* flags */ - u_int8_t fq_sc_index; /* service_class index */ - int16_t fq_deficit; /* Deficit for scheduling */ - u_int32_t fq_bytes; /* Number of bytes in the queue */ - u_int64_t fq_min_qdelay; /* min queue delay for Codel */ - u_int64_t fq_updatetime; /* next update interval */ - u_int64_t fq_getqtime; /* last dequeue time */ +#define FQF_FLOWCTL_CAPABLE 0x01 /* Use flow control instead of drop */ +#define FQF_DELAY_HIGH 0x02 /* Min delay is greater than target */ +#define FQF_NEW_FLOW 0x04 /* Currently on new flows queue */ +#define FQF_OLD_FLOW 0x08 /* Currently on old flows queue */ +#define FQF_FLOWCTL_ON 0x10 /* Currently flow controlled */ + u_int8_t fq_flags; /* flags */ + u_int8_t fq_sc_index; /* service_class index */ + int16_t fq_deficit; /* Deficit for scheduling */ + u_int32_t fq_bytes; /* Number of bytes in the queue */ + u_int64_t fq_min_qdelay; /* min queue delay for Codel */ + u_int64_t fq_updatetime; /* next update interval */ + u_int64_t fq_getqtime; /* last dequeue time */ SLIST_ENTRY(flowq) fq_hashlink; /* for flow queue hash table */ STAILQ_ENTRY(flowq) fq_actlink; /* for new/old flow queues */ - u_int32_t fq_flowhash; /* Flow hash */ - classq_pkt_type_t fq_ptype; /* Packet type */ + u_int32_t fq_flowhash; /* Flow hash */ + classq_pkt_type_t fq_ptype; /* Packet type */ } fq_t; -#define fq_mbufq __fq_pktq_u.__mbufq +#define fq_mbufq __fq_pktq_u.__mbufq -#define fq_empty(_q) MBUFQ_EMPTY(&(_q)->fq_mbufq) +#define fq_empty(_q) MBUFQ_EMPTY(&(_q)->fq_mbufq) -#define fq_enqueue(_q, _p) MBUFQ_ENQUEUE(&(_q)->fq_mbufq, (mbuf_t)_p) +#define fq_enqueue(_q, _p) MBUFQ_ENQUEUE(&(_q)->fq_mbufq, (mbuf_t)_p) -#define fq_dequeue(_q, _p) do { \ - mbuf_t _m; \ - MBUFQ_DEQUEUE(&(_q)->fq_mbufq, _m); \ - (_p) = _m; \ +#define fq_dequeue(_q, _p) do { \ + mbuf_t _m; \ + MBUFQ_DEQUEUE(&(_q)->fq_mbufq, _m); \ + (_p) = _m; \ } while (0) struct fq_codel_sched_data; diff --git a/bsd/net/classq/classq_red.h b/bsd/net/classq/classq_red.h index ceae286dc..f7a325983 100644 --- a/bsd/net/classq/classq_red.h +++ b/bsd/net/classq/classq_red.h @@ -56,7 +56,7 @@ */ #ifndef _NET_CLASSQ_CLASSQ_RED_H_ -#define _NET_CLASSQ_CLASSQ_RED_H_ +#define _NET_CLASSQ_CLASSQ_RED_H_ #ifdef PRIVATE #ifdef BSD_KERNEL_PRIVATE @@ -72,17 +72,17 @@ extern "C" { * disciplines (e.g., CBQ) */ struct redparams { - int th_min; /* red min threshold */ - int th_max; /* red max threshold */ - int inv_pmax; /* inverse of max drop probability */ + int th_min; /* red min threshold */ + int th_max; /* red max threshold */ + int inv_pmax; /* inverse of max drop probability */ }; struct red_stats { - int32_t q_avg; - u_int32_t _pad; - u_int32_t drop_forced; - u_int32_t drop_unforced; - u_int32_t marked_packets; + int32_t q_avg; + u_int32_t _pad; + u_int32_t drop_forced; + u_int32_t drop_unforced; + u_int32_t marked_packets; }; #ifdef __cplusplus diff --git a/bsd/net/classq/classq_rio.h b/bsd/net/classq/classq_rio.h index 8f0a12a07..3729765f7 100644 --- a/bsd/net/classq/classq_rio.h +++ b/bsd/net/classq/classq_rio.h @@ -56,7 +56,7 @@ */ #ifndef _NET_CLASSQ_CLASSQ_RIO_H_ -#define _NET_CLASSQ_CLASSQ_RIO_H_ +#define _NET_CLASSQ_CLASSQ_RIO_H_ #ifdef PRIVATE #ifdef BSD_KERNEL_PRIVATE @@ -71,7 +71,7 @@ extern "C" { * RIO: RED with IN/OUT bit * (extended to support more than 2 drop precedence values) */ -#define RIO_NDROPPREC 3 /* number of drop precedence values */ +#define RIO_NDROPPREC 3 /* number of drop precedence values */ #ifdef __cplusplus } diff --git a/bsd/net/classq/classq_sfb.c b/bsd/net/classq/classq_sfb.c index 438abf2c3..73f0ca03d 100644 --- a/bsd/net/classq/classq_sfb.c +++ b/bsd/net/classq/classq_sfb.c @@ -95,19 +95,19 @@ * across platforms for 1-word key (32-bit flowhash value). See flowhash.h * for other alternatives. We only need 16-bit hash output. */ -#define SFB_HASH net_flowhash_mh3_x86_32 -#define SFB_HASHMASK HASHMASK(16) +#define SFB_HASH net_flowhash_mh3_x86_32 +#define SFB_HASHMASK HASHMASK(16) -#define SFB_BINMASK(_x) \ +#define SFB_BINMASK(_x) \ ((_x) & HASHMASK(SFB_BINS_SHIFT)) -#define SFB_BINST(_sp, _l, _n, _c) \ +#define SFB_BINST(_sp, _l, _n, _c) \ (&(*(_sp)->sfb_bins)[_c].stats[_l][_n]) -#define SFB_BINFT(_sp, _l, _n, _c) \ +#define SFB_BINFT(_sp, _l, _n, _c) \ (&(*(_sp)->sfb_bins)[_c].freezetime[_l][_n]) -#define SFB_FC_LIST(_sp, _n) \ +#define SFB_FC_LIST(_sp, _n) \ (&(*(_sp)->sfb_fc_lists)[_n]) /* @@ -116,9 +116,9 @@ * uplink speed is not known, a default value is chosen and is randomized * to be within the following range. */ -#define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */ -#define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */ -#define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */ +#define HOLDTIME_BASE (100ULL * 1000 * 1000) /* 100ms */ +#define HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10ms */ +#define HOLDTIME_MAX (100ULL * 1000 * 1000) /* 100ms */ /* * The pboxtime parameter determines the bandwidth allocated for rogue @@ -126,34 +126,34 @@ * is not known, a default value is chosen and is randomized to be within * the following range. */ -#define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */ -#define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */ -#define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */ +#define PBOXTIME_BASE (300ULL * 1000 * 1000) /* 300ms */ +#define PBOXTIME_MIN (30ULL * 1000 * 1000) /* 30ms */ +#define PBOXTIME_MAX (300ULL * 1000 * 1000) /* 300ms */ /* * Target queueing delay is the amount of extra delay that can be added * to accommodate variations in the link bandwidth. The queue should be * large enough to induce this much delay and nothing more than that. */ -#define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */ -#define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */ -#define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */ +#define TARGET_QDELAY_BASE (10ULL * 1000 * 1000) /* 10ms */ +#define TARGET_QDELAY_MIN (10ULL * 1000) /* 10us */ +#define TARGET_QDELAY_MAX (20ULL * 1000 * 1000 * 1000) /* 20s */ /* * Update interval for checking the extra delay added by the queue. This * should be 90-95 percentile of RTT experienced by any TCP connection * so that it will take care of the burst traffic. */ -#define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */ -#define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */ -#define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */ +#define UPDATE_INTERVAL_BASE (100ULL * 1000 * 1000) /* 100ms */ +#define UPDATE_INTERVAL_MIN (100ULL * 1000 * 1000) /* 100ms */ +#define UPDATE_INTERVAL_MAX (10ULL * 1000 * 1000 * 1000) /* 10s */ -#define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin)) +#define SFB_RANDOM(sp, tmin, tmax) ((sfb_random(sp) % (tmax)) + (tmin)) -#define SFB_PKT_PBOX 0x1 /* in penalty box */ +#define SFB_PKT_PBOX 0x1 /* in penalty box */ /* The following mantissa values are in SFB_FP_SHIFT Q format */ -#define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */ +#define SFB_MAX_PMARK (1 << SFB_FP_SHIFT) /* Q14 representation of 1.00 */ /* * These are d1 (increment) and d2 (decrement) parameters, used to determine @@ -165,69 +165,69 @@ * against packet loss, it can quickly reach to a substantial increase in * traffic load. */ -#define SFB_INCREMENT 82 /* Q14 representation of 0.005 */ -#define SFB_DECREMENT 16 /* Q14 representation of 0.001 */ +#define SFB_INCREMENT 82 /* Q14 representation of 0.005 */ +#define SFB_DECREMENT 16 /* Q14 representation of 0.001 */ -#define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */ -#define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */ +#define SFB_PMARK_TH 16056 /* Q14 representation of 0.98 */ +#define SFB_PMARK_WARM 3276 /* Q14 representation of 0.2 */ -#define SFB_PMARK_INC(_bin) do { \ - (_bin)->pmark += sfb_increment; \ - if ((_bin)->pmark > SFB_MAX_PMARK) \ - (_bin)->pmark = SFB_MAX_PMARK; \ +#define SFB_PMARK_INC(_bin) do { \ + (_bin)->pmark += sfb_increment; \ + if ((_bin)->pmark > SFB_MAX_PMARK) \ + (_bin)->pmark = SFB_MAX_PMARK; \ } while (0) -#define SFB_PMARK_DEC(_bin) do { \ - if ((_bin)->pmark > 0) { \ - (_bin)->pmark -= sfb_decrement; \ - if ((_bin)->pmark < 0) \ - (_bin)->pmark = 0; \ - } \ +#define SFB_PMARK_DEC(_bin) do { \ + if ((_bin)->pmark > 0) { \ + (_bin)->pmark -= sfb_decrement; \ + if ((_bin)->pmark < 0) \ + (_bin)->pmark = 0; \ + } \ } while (0) /* Minimum nuber of bytes in queue to get flow controlled */ -#define SFB_MIN_FC_THRESHOLD_BYTES 7500 +#define SFB_MIN_FC_THRESHOLD_BYTES 7500 -#define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \ - (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \ - (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \ - (qsize((_q_)) >> 3)); \ +#define SFB_SET_DELAY_HIGH(_sp_, _q_) do { \ + (_sp_)->sfb_flags |= SFBF_DELAYHIGH; \ + (_sp_)->sfb_fc_threshold = max(SFB_MIN_FC_THRESHOLD_BYTES, \ + (qsize((_q_)) >> 3)); \ } while (0) -#define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED) -#define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH) -#define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */ +#define SFB_QUEUE_DELAYBASED(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYBASED) +#define SFB_IS_DELAYHIGH(_sp_) ((_sp_)->sfb_flags & SFBF_DELAYHIGH) +#define SFB_QUEUE_DELAYBASED_MAXSIZE 2048 /* max pkts */ -#define HINTERVAL_MIN (10) /* 10 seconds */ -#define HINTERVAL_MAX (20) /* 20 seconds */ -#define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN) +#define HINTERVAL_MIN (10) /* 10 seconds */ +#define HINTERVAL_MAX (20) /* 20 seconds */ +#define SFB_HINTERVAL(sp) ((sfb_random(sp) % HINTERVAL_MAX) + HINTERVAL_MIN) -#define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */ -#define DEQUEUE_SPIKE(_new, _old) \ +#define DEQUEUE_DECAY 7 /* ilog2 of EWMA decay rate, (128) */ +#define DEQUEUE_SPIKE(_new, _old) \ ((u_int64_t)ABS((int64_t)(_new) - (int64_t)(_old)) > ((_old) << 11)) -#define ABS(v) (((v) > 0) ? (v) : -(v)) +#define ABS(v) (((v) > 0) ? (v) : -(v)) -#define SFB_ZONE_MAX 32 /* maximum elements in zone */ -#define SFB_ZONE_NAME "classq_sfb" /* zone name */ +#define SFB_ZONE_MAX 32 /* maximum elements in zone */ +#define SFB_ZONE_NAME "classq_sfb" /* zone name */ -#define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */ -#define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */ +#define SFB_BINS_ZONE_MAX 32 /* maximum elements in zone */ +#define SFB_BINS_ZONE_NAME "classq_sfb_bins" /* zone name */ -#define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */ -#define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */ +#define SFB_FCL_ZONE_MAX 32 /* maximum elements in zone */ +#define SFB_FCL_ZONE_NAME "classq_sfb_fcl" /* zone name */ /* Place the flow control entries in current bin on level 0 */ -#define SFB_FC_LEVEL 0 +#define SFB_FC_LEVEL 0 -static unsigned int sfb_size; /* size of zone element */ -static struct zone *sfb_zone; /* zone for sfb */ +static unsigned int sfb_size; /* size of zone element */ +static struct zone *sfb_zone; /* zone for sfb */ -static unsigned int sfb_bins_size; /* size of zone element */ -static struct zone *sfb_bins_zone; /* zone for sfb_bins */ +static unsigned int sfb_bins_size; /* size of zone element */ +static struct zone *sfb_bins_zone; /* zone for sfb_bins */ -static unsigned int sfb_fcl_size; /* size of zone element */ -static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */ +static unsigned int sfb_fcl_size; /* size of zone element */ +static struct zone *sfb_fcl_zone; /* zone for sfb_fc_lists */ /* internal function prototypes */ static u_int32_t sfb_random(struct sfb *); @@ -260,53 +260,53 @@ static int sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin); static void sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *, struct timespec *); -SYSCTL_NODE(_net_classq, OID_AUTO, sfb, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "SFB"); +SYSCTL_NODE(_net_classq, OID_AUTO, sfb, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "SFB"); -static u_int64_t sfb_holdtime = 0; /* 0 indicates "automatic" */ -SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, holdtime, CTLFLAG_RW|CTLFLAG_LOCKED, +static u_int64_t sfb_holdtime = 0; /* 0 indicates "automatic" */ +SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, holdtime, CTLFLAG_RW | CTLFLAG_LOCKED, &sfb_holdtime, "SFB freeze time in nanoseconds"); -static u_int64_t sfb_pboxtime = 0; /* 0 indicates "automatic" */ -SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, pboxtime, CTLFLAG_RW|CTLFLAG_LOCKED, +static u_int64_t sfb_pboxtime = 0; /* 0 indicates "automatic" */ +SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, pboxtime, CTLFLAG_RW | CTLFLAG_LOCKED, &sfb_pboxtime, "SFB penalty box time in nanoseconds"); static u_int64_t sfb_hinterval; -SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, hinterval, CTLFLAG_RW|CTLFLAG_LOCKED, +SYSCTL_QUAD(_net_classq_sfb, OID_AUTO, hinterval, CTLFLAG_RW | CTLFLAG_LOCKED, &sfb_hinterval, "SFB hash interval in nanoseconds"); static u_int32_t sfb_increment = SFB_INCREMENT; -SYSCTL_UINT(_net_classq_sfb, OID_AUTO, increment, CTLFLAG_RW|CTLFLAG_LOCKED, +SYSCTL_UINT(_net_classq_sfb, OID_AUTO, increment, CTLFLAG_RW | CTLFLAG_LOCKED, &sfb_increment, SFB_INCREMENT, "SFB increment [d1]"); static u_int32_t sfb_decrement = SFB_DECREMENT; -SYSCTL_UINT(_net_classq_sfb, OID_AUTO, decrement, CTLFLAG_RW|CTLFLAG_LOCKED, +SYSCTL_UINT(_net_classq_sfb, OID_AUTO, decrement, CTLFLAG_RW | CTLFLAG_LOCKED, &sfb_decrement, SFB_DECREMENT, "SFB decrement [d2]"); -static u_int32_t sfb_allocation = 0; /* 0 means "automatic" */ -SYSCTL_UINT(_net_classq_sfb, OID_AUTO, allocation, CTLFLAG_RW|CTLFLAG_LOCKED, +static u_int32_t sfb_allocation = 0; /* 0 means "automatic" */ +SYSCTL_UINT(_net_classq_sfb, OID_AUTO, allocation, CTLFLAG_RW | CTLFLAG_LOCKED, &sfb_allocation, 0, "SFB bin allocation"); static u_int32_t sfb_ratelimit = 0; -SYSCTL_UINT(_net_classq_sfb, OID_AUTO, ratelimit, CTLFLAG_RW|CTLFLAG_LOCKED, - &sfb_ratelimit, 0, "SFB rate limit"); +SYSCTL_UINT(_net_classq_sfb, OID_AUTO, ratelimit, CTLFLAG_RW | CTLFLAG_LOCKED, + &sfb_ratelimit, 0, "SFB rate limit"); -#define KBPS (1ULL * 1000) /* 1 Kbits per second */ -#define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */ -#define GBPS (MBPS * 1000) /* 1 Gbits per second */ +#define KBPS (1ULL * 1000) /* 1 Kbits per second */ +#define MBPS (1ULL * 1000 * 1000) /* 1 Mbits per second */ +#define GBPS (MBPS * 1000) /* 1 Gbits per second */ struct sfb_time_tbl { - u_int64_t speed; /* uplink speed */ - u_int64_t holdtime; /* hold time */ - u_int64_t pboxtime; /* penalty box time */ + u_int64_t speed; /* uplink speed */ + u_int64_t holdtime; /* hold time */ + u_int64_t pboxtime; /* penalty box time */ }; static struct sfb_time_tbl sfb_ttbl[] = { - { 1 * MBPS, HOLDTIME_BASE * 1000, PBOXTIME_BASE * 1000 }, - { 10 * MBPS, HOLDTIME_BASE * 100, PBOXTIME_BASE * 100 }, - { 100 * MBPS, HOLDTIME_BASE * 10, PBOXTIME_BASE * 10 }, - { 1 * GBPS, HOLDTIME_BASE, PBOXTIME_BASE }, - { 10 * GBPS, HOLDTIME_BASE / 10, PBOXTIME_BASE / 10 }, - { 100 * GBPS, HOLDTIME_BASE / 100, PBOXTIME_BASE / 100 }, + { 1 * MBPS, HOLDTIME_BASE * 1000, PBOXTIME_BASE * 1000 }, + { 10 * MBPS, HOLDTIME_BASE * 100, PBOXTIME_BASE * 100 }, + { 100 * MBPS, HOLDTIME_BASE * 10, PBOXTIME_BASE * 10 }, + { 1 * GBPS, HOLDTIME_BASE, PBOXTIME_BASE }, + { 10 * GBPS, HOLDTIME_BASE / 10, PBOXTIME_BASE / 10 }, + { 100 * GBPS, HOLDTIME_BASE / 100, PBOXTIME_BASE / 100 }, { 0, 0, 0 } }; @@ -316,7 +316,7 @@ sfb_init(void) _CASSERT(SFBF_ECN4 == CLASSQF_ECN4); _CASSERT(SFBF_ECN6 == CLASSQF_ECN6); - sfb_size = sizeof (struct sfb); + sfb_size = sizeof(struct sfb); sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size, 0, SFB_ZONE_NAME); if (sfb_zone == NULL) { @@ -326,7 +326,7 @@ sfb_init(void) zone_change(sfb_zone, Z_EXPAND, TRUE); zone_change(sfb_zone, Z_CALLERACCT, TRUE); - sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins); + sfb_bins_size = sizeof(*((struct sfb *)0)->sfb_bins); sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size, 0, SFB_BINS_ZONE_NAME); if (sfb_bins_zone == NULL) { @@ -336,7 +336,7 @@ sfb_init(void) zone_change(sfb_bins_zone, Z_EXPAND, TRUE); zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE); - sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists); + sfb_fcl_size = sizeof(*((struct sfb *)0)->sfb_fc_lists); sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size, 0, SFB_FCL_ZONE_NAME); if (sfb_fcl_zone == NULL) { @@ -351,7 +351,7 @@ static u_int32_t sfb_random(struct sfb *sp) { IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd); - return (RandomULong()); + return RandomULong(); } static void @@ -368,8 +368,9 @@ sfb_calc_holdtime(struct sfb *sp, u_int64_t outbw) n = sfb_ttbl[0].holdtime; for (i = 0; sfb_ttbl[i].speed != 0; i++) { - if (outbw < sfb_ttbl[i].speed) + if (outbw < sfb_ttbl[i].speed) { break; + } n = sfb_ttbl[i].holdtime; } holdtime = n; @@ -391,8 +392,9 @@ sfb_calc_pboxtime(struct sfb *sp, u_int64_t outbw) n = sfb_ttbl[0].pboxtime; for (i = 0; sfb_ttbl[i].speed != 0; i++) { - if (outbw < sfb_ttbl[i].speed) + if (outbw < sfb_ttbl[i].speed) { break; + } n = sfb_ttbl[i].pboxtime; } pboxtime = n; @@ -414,10 +416,11 @@ sfb_calc_hinterval(struct sfb *sp, u_int64_t *t) hinterval = *t; } - if (sfb_hinterval != 0) + if (sfb_hinterval != 0) { hinterval = sfb_hinterval; - else if (t == NULL || hinterval == 0) + } else if (t == NULL || hinterval == 0) { hinterval = ((u_int64_t)SFB_HINTERVAL(sp) * NSEC_PER_SEC); + } net_nsectimer(&hinterval, &sp->sfb_hinterval); @@ -448,14 +451,14 @@ sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags) sp = zalloc(sfb_zone); if (sp == NULL) { log(LOG_ERR, "%s: SFB unable to allocate\n", if_name(ifp)); - return (NULL); + return NULL; } bzero(sp, sfb_size); if ((sp->sfb_bins = zalloc(sfb_bins_zone)) == NULL) { log(LOG_ERR, "%s: SFB unable to allocate bins\n", if_name(ifp)); sfb_destroy(sp); - return (NULL); + return NULL; } bzero(sp->sfb_bins, sfb_bins_size); @@ -463,12 +466,13 @@ sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags) log(LOG_ERR, "%s: SFB unable to allocate flow control lists\n", if_name(ifp)); sfb_destroy(sp); - return (NULL); + return NULL; } bzero(sp->sfb_fc_lists, sfb_fcl_size); - for (i = 0; i < SFB_BINS; ++i) + for (i = 0; i < SFB_BINS; ++i) { STAILQ_INIT(&SFB_FC_LIST(sp, i)->fclist); + } sp->sfb_ifp = ifp; sp->sfb_qlim = qlim; @@ -484,7 +488,7 @@ sfb_alloc(struct ifnet *ifp, u_int32_t qid, u_int32_t qlim, u_int32_t flags) sfb_resetq(sp, CLASSQ_EV_INIT); - return (sp); + return sp; } static void @@ -507,8 +511,9 @@ sfb_fclists_clean(struct sfb *sp) /* Move all the flow control entries to the flowadv list */ for (i = 0; i < SFB_BINS; ++i) { struct sfb_fcl *fcl = SFB_FC_LIST(sp, i); - if (!STAILQ_EMPTY(&fcl->fclist)) + if (!STAILQ_EMPTY(&fcl->fclist)) { sfb_fclist_append(sp, fcl); + } } } @@ -557,14 +562,16 @@ sfb_resetq(struct sfb *sp, cqev_t ev) sfb_calc_update_interval(sp, eff_rate); if (ev == CLASSQ_EV_LINK_DOWN || - ev == CLASSQ_EV_LINK_UP) + ev == CLASSQ_EV_LINK_UP) { sfb_fclists_clean(sp); + } - bzero(sp->sfb_bins, sizeof (*sp->sfb_bins)); - bzero(&sp->sfb_stats, sizeof (sp->sfb_stats)); + bzero(sp->sfb_bins, sizeof(*sp->sfb_bins)); + bzero(&sp->sfb_stats, sizeof(sp->sfb_stats)); - if (ev == CLASSQ_EV_LINK_DOWN || !classq_verbose) + if (ev == CLASSQ_EV_LINK_DOWN || !classq_verbose) { return; + } log(LOG_DEBUG, "%s: SFB qid=%d, holdtime=%llu nsec, " "pboxtime=%llu nsec, allocation=%d, drop_thresh=%d, " @@ -574,7 +581,7 @@ sfb_resetq(struct sfb *sp, cqev_t ev) if_name(ifp), sp->sfb_qid, (u_int64_t)sp->sfb_holdtime.tv_nsec, (u_int64_t)sp->sfb_pboxtime.tv_nsec, (u_int32_t)sp->sfb_allocation, (u_int32_t)sp->sfb_drop_thresh, - (int)sp->sfb_hinterval.tv_sec, (int)sizeof (*sp->sfb_bins), + (int)sp->sfb_hinterval.tv_sec, (int)sizeof(*sp->sfb_bins), eff_rate, (u_int64_t)sp->sfb_target_qdelay, (u_int64_t)sp->sfb_update_interval.tv_sec, (u_int64_t)sp->sfb_update_interval.tv_nsec, sp->sfb_flags); @@ -598,13 +605,13 @@ sfb_getstats(struct sfb *sp, struct sfb_stats *sps) net_timernsec(&sp->sfb_update_interval, &sps->update_interval); *(&(sps->sfbstats)) = *(&(sp->sfb_stats)); - _CASSERT(sizeof ((*sp->sfb_bins)[0].stats) == - sizeof (sps->binstats[0].stats)); + _CASSERT(sizeof((*sp->sfb_bins)[0].stats) == + sizeof(sps->binstats[0].stats)); bcopy(&(*sp->sfb_bins)[0].stats, &sps->binstats[0].stats, - sizeof (sps->binstats[0].stats)); + sizeof(sps->binstats[0].stats)); bcopy(&(*sp->sfb_bins)[1].stats, &sps->binstats[1].stats, - sizeof (sps->binstats[1].stats)); + sizeof(sps->binstats[1].stats)); } static void @@ -612,8 +619,9 @@ sfb_swap_bins(struct sfb *sp, u_int32_t len) { int i, j, s; - if (sp->sfb_flags & SFBF_SUSPENDED) + if (sp->sfb_flags & SFBF_SUSPENDED) { return; + } s = sp->sfb_current; VERIFY((s + (s ^ 1)) == 1); @@ -622,7 +630,7 @@ sfb_swap_bins(struct sfb *sp, u_int32_t len) sp->sfb_clearpkts = len; sp->sfb_stats.num_rehash++; - s = (sp->sfb_current ^= 1); /* flip the bit (swap current) */ + s = (sp->sfb_current ^= 1); /* flip the bit (swap current) */ if (classq_verbose) { log(LOG_DEBUG, "%s: SFB qid=%d, set %d is now current, " @@ -631,34 +639,38 @@ sfb_swap_bins(struct sfb *sp, u_int32_t len) /* clear freezetime for all current bins */ bzero(&(*sp->sfb_bins)[s].freezetime, - sizeof ((*sp->sfb_bins)[s].freezetime)); + sizeof((*sp->sfb_bins)[s].freezetime)); /* clear/adjust bin statistics and flow control lists */ for (i = 0; i < SFB_BINS; i++) { struct sfb_fcl *fcl = SFB_FC_LIST(sp, i); - if (!STAILQ_EMPTY(&fcl->fclist)) + if (!STAILQ_EMPTY(&fcl->fclist)) { sfb_fclist_append(sp, fcl); + } for (j = 0; j < SFB_LEVELS; j++) { struct sfbbinstats *cbin, *wbin; - cbin = SFB_BINST(sp, j, i, s); /* current */ - wbin = SFB_BINST(sp, j, i, s ^ 1); /* warm-up */ + cbin = SFB_BINST(sp, j, i, s); /* current */ + wbin = SFB_BINST(sp, j, i, s ^ 1); /* warm-up */ cbin->pkts = 0; cbin->bytes = 0; - if (cbin->pmark > SFB_MAX_PMARK) + if (cbin->pmark > SFB_MAX_PMARK) { cbin->pmark = SFB_MAX_PMARK; - if (cbin->pmark < 0) + } + if (cbin->pmark < 0) { cbin->pmark = 0; + } /* * Keep pmark from before to identify * non-responsives immediately. */ - if (wbin->pmark > SFB_PMARK_WARM) + if (wbin->pmark > SFB_PMARK_WARM) { wbin->pmark = SFB_PMARK_WARM; + } } } } @@ -687,20 +699,23 @@ sfb_pcheck(struct sfb *sp, uint32_t pkt_sfb_hash) if (SFB_BINST(sp, 0, SFB_BINMASK(pkt_sfb_hash8[(s << 1)]), s)->pmark < SFB_PMARK_TH || SFB_BINST(sp, 1, SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]), - s)->pmark < SFB_PMARK_TH) - return (0); + s)->pmark < SFB_PMARK_TH) { + return 0; + } #else /* SFB_LEVELS != 2 */ for (i = 0; i < SFB_LEVELS; i++) { - if (s == 0) /* set 0, bin index [0,1] */ + if (s == 0) { /* set 0, bin index [0,1] */ n = SFB_BINMASK(pkt_sfb_hash8[i]); - else /* set 1, bin index [2,3] */ + } else { /* set 1, bin index [2,3] */ n = SFB_BINMASK(pkt_sfb_hash8[i + 2]); + } - if (SFB_BINST(sp, i, n, s)->pmark < SFB_PMARK_TH) - return (0); + if (SFB_BINST(sp, i, n, s)->pmark < SFB_PMARK_TH) { + return 0; + } } #endif /* SFB_LEVELS != 2 */ - return (1); + return 1; } static int @@ -711,8 +726,9 @@ sfb_penalize(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t *pkt_sfb_flags, uint8_t *pkt_sfb_hash8 = (uint8_t *)&pkt_sfb_hash; /* If minimum pmark of current bins is < SFB_PMARK_TH, we're done */ - if (!sfb_ratelimit || !sfb_pcheck(sp, pkt_sfb_hash)) - return (0); + if (!sfb_ratelimit || !sfb_pcheck(sp, pkt_sfb_hash)) { + return 0; + } net_timersub(now, &sp->sfb_pboxfreeze, &delta); if (net_timercmp(&delta, &sp->sfb_pboxtime, <)) { @@ -732,20 +748,23 @@ sfb_penalize(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t *pkt_sfb_flags, /* Level 0: bin index at [0] for set 0; [2] for set 1 */ n = SFB_BINMASK(pkt_sfb_hash8[(w << 1)]); bin = SFB_BINST(sp, 0, n, w); - if (bin->pkts >= sp->sfb_allocation) + if (bin->pkts >= sp->sfb_allocation) { sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, w), now); + } /* Level 0: bin index at [1] for set 0; [3] for set 1 */ n = SFB_BINMASK(pkt_sfb_hash8[(w << 1) + 1]); bin = SFB_BINST(sp, 1, n, w); - if (bin->pkts >= sp->sfb_allocation) + if (bin->pkts >= sp->sfb_allocation) { sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, w), now); + } #else /* SFB_LEVELS != 2 */ for (i = 0; i < SFB_LEVELS; i++) { - if (w == 0) /* set 0, bin index [0,1] */ + if (w == 0) { /* set 0, bin index [0,1] */ n = SFB_BINMASK(pkt_sfb_hash8[i]); - else /* set 1, bin index [2,3] */ + } else { /* set 1, bin index [2,3] */ n = SFB_BINMASK(pkt_sfb_hash8[i + 2]); + } bin = SFB_BINST(sp, i, n, w); if (bin->pkts >= sp->sfb_allocation) { @@ -754,14 +773,14 @@ sfb_penalize(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t *pkt_sfb_flags, } } #endif /* SFB_LEVELS != 2 */ - return (1); + return 1; } /* non-conformant or else misclassified flow; queue it anyway */ *pkt_sfb_flags |= SFB_PKT_PBOX; *(&sp->sfb_pboxfreeze) = *now; - return (0); + return 0; } static void @@ -783,24 +802,25 @@ sfb_adjust_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft, /* increment/decrement marking probability */ *ft = *now; - if (inc) + if (inc) { SFB_PMARK_INC(bin); - else + } else { SFB_PMARK_DEC(bin); + } } static void sfb_decrement_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft, struct timespec *now) { - return (sfb_adjust_bin(sp, bin, ft, now, FALSE)); + return sfb_adjust_bin(sp, bin, ft, now, FALSE); } static void sfb_increment_bin(struct sfb *sp, struct sfbbinstats *bin, struct timespec *ft, struct timespec *now) { - return (sfb_adjust_bin(sp, bin, ft, now, TRUE)); + return sfb_adjust_bin(sp, bin, ft, now, TRUE); } static inline void @@ -830,21 +850,24 @@ sfb_dq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len, bin->pkts--; bin->bytes -= pkt_len; - if (bin->pkts == 0) + if (bin->pkts == 0) { sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now); + } /* Deliver flow control feedback to the sockets */ if (SFB_QUEUE_DELAYBASED(sp)) { if (!(SFB_IS_DELAYHIGH(sp)) || bin->bytes <= sp->sfb_fc_threshold || - bin->pkts == 0 || qsize == 0) + bin->pkts == 0 || qsize == 0) { fcl = SFB_FC_LIST(sp, n); + } } else if (bin->pkts <= (sp->sfb_allocation >> 2)) { - fcl = SFB_FC_LIST(sp, n); + fcl = SFB_FC_LIST(sp, n); } - if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist)) + if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist)) { sfb_fclist_append(sp, fcl); + } fcl = NULL; /* Level 1: bin index at [1] for set 0; [3] for set 1 */ @@ -854,34 +877,40 @@ sfb_dq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len, VERIFY(bin->pkts > 0 && bin->bytes >= (u_int64_t)pkt_len); bin->pkts--; bin->bytes -= pkt_len; - if (bin->pkts == 0) + if (bin->pkts == 0) { sfb_decrement_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now); + } #else /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */ for (i = 0; i < SFB_LEVELS; i++) { - if (s == 0) /* set 0, bin index [0,1] */ + if (s == 0) { /* set 0, bin index [0,1] */ n = SFB_BINMASK(pkt_sfb_hash8[i]); - else /* set 1, bin index [2,3] */ + } else { /* set 1, bin index [2,3] */ n = SFB_BINMASK(pkt_sfb_hash8[i + 2]); + } bin = SFB_BINST(sp, i, n, s); VERIFY(bin->pkts > 0 && bin->bytes >= pkt_len); bin->pkts--; bin->bytes -= pkt_len; - if (bin->pkts == 0) + if (bin->pkts == 0) { sfb_decrement_bin(sp, bin, SFB_BINFT(sp, i, n, s), now); - if (i != SFB_FC_LEVEL) + } + if (i != SFB_FC_LEVEL) { continue; + } if (SFB_QUEUE_DELAYBASED(sp)) { if (!(SFB_IS_DELAYHIGH(sp)) || - bin->bytes <= sp->sfb_fc_threshold) + bin->bytes <= sp->sfb_fc_threshold) { fcl = SFB_FC_LIST(sp, n); + } } else if (bin->pkts <= (sp->sfb_allocation >> 2)) { fcl = SFB_FC_LIST(sp, n); } - if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist)) + if (fcl != NULL && !STAILQ_EMPTY(&fcl->fclist)) { sfb_fclist_append(sp, fcl); + } fcl = NULL; } #endif /* SFB_LEVELS != 2 || SFB_FC_LEVEL != 0 */ @@ -917,10 +946,11 @@ sfb_eq_update_bins(struct sfb *sp, uint32_t pkt_sfb_hash, uint32_t pkt_len) #else /* SFB_LEVELS != 2 */ for (i = 0; i < SFB_LEVELS; i++) { - if (s == 0) /* set 0, bin index [0,1] */ + if (s == 0) { /* set 0, bin index [0,1] */ n = SFB_BINMASK(pkt_sfb_hash8[i]); - else /* set 1, bin index [2,3] */ + } else { /* set 1, bin index [2,3] */ n = SFB_BINMASK(pkt_sfb_hash8[i + 2]); + } bin = SFB_BINST(sp, i, n, s); bin->pkts++; @@ -943,7 +973,7 @@ sfb_bin_addfcentry(struct sfb *sp, pktsched_pkt_t *pkt, uint32_t pkt_sfb_hash, if (flowid == 0) { sp->sfb_stats.null_flowid++; - return (FALSE); + return FALSE; } /* @@ -955,7 +985,7 @@ sfb_bin_addfcentry(struct sfb *sp, pktsched_pkt_t *pkt, uint32_t pkt_sfb_hash, if ((uint8_t)fce->fce_flowsrc_type == flowsrc && fce->fce_flowid == flowid) { /* Already on flow control list; just return */ - return (TRUE); + return TRUE; } } @@ -967,7 +997,7 @@ sfb_bin_addfcentry(struct sfb *sp, pktsched_pkt_t *pkt, uint32_t pkt_sfb_hash, sp->sfb_stats.flow_controlled++; } - return (fce != NULL); + return fce != NULL; } /* @@ -984,14 +1014,16 @@ sfb_bin_mark_or_drop(struct sfb *sp, struct sfbbinstats *bin) * bytes than the flowcontrol threshold. */ if (SFB_IS_DELAYHIGH(sp) && - bin->bytes >= (sp->sfb_fc_threshold << 1)) + bin->bytes >= (sp->sfb_fc_threshold << 1)) { ret = 1; + } } else { if (bin->pkts >= sp->sfb_allocation && - bin->pkts >= sp->sfb_drop_thresh) - ret = 1; /* drop or mark */ + bin->pkts >= sp->sfb_drop_thresh) { + ret = 1; /* drop or mark */ + } } - return (ret); + return ret; } /* @@ -1020,47 +1052,55 @@ sfb_drop_early(struct sfb *sp, uint32_t pkt_sfb_hash, u_int16_t *pmin, /* Level 0: bin index at [0] for set 0; [2] for set 1 */ n = SFB_BINMASK(pkt_sfb_hash8[(s << 1)]); bin = SFB_BINST(sp, 0, n, s); - if (*pmin > (u_int16_t)bin->pmark) + if (*pmin > (u_int16_t)bin->pmark) { *pmin = (u_int16_t)bin->pmark; + } /* Update SFB probability */ - if (bin->pkts >= sp->sfb_allocation) + if (bin->pkts >= sp->sfb_allocation) { sfb_increment_bin(sp, bin, SFB_BINFT(sp, 0, n, s), now); + } ret = sfb_bin_mark_or_drop(sp, bin); /* Level 1: bin index at [1] for set 0; [3] for set 1 */ n = SFB_BINMASK(pkt_sfb_hash8[(s << 1) + 1]); bin = SFB_BINST(sp, 1, n, s); - if (*pmin > (u_int16_t)bin->pmark) + if (*pmin > (u_int16_t)bin->pmark) { *pmin = (u_int16_t)bin->pmark; + } - if (bin->pkts >= sp->sfb_allocation) + if (bin->pkts >= sp->sfb_allocation) { sfb_increment_bin(sp, bin, SFB_BINFT(sp, 1, n, s), now); + } #else /* SFB_LEVELS != 2 */ for (i = 0; i < SFB_LEVELS; i++) { - if (s == 0) /* set 0, bin index [0,1] */ + if (s == 0) { /* set 0, bin index [0,1] */ n = SFB_BINMASK(pkt_sfb_hash8[i]); - else /* set 1, bin index [2,3] */ + } else { /* set 1, bin index [2,3] */ n = SFB_BINMASK(pkt_sfb_hash8[i + 2]); + } bin = SFB_BINST(sp, i, n, s); - if (*pmin > (u_int16_t)bin->pmark) + if (*pmin > (u_int16_t)bin->pmark) { *pmin = (u_int16_t)bin->pmark; + } - if (bin->pkts >= sp->sfb_allocation) + if (bin->pkts >= sp->sfb_allocation) { sfb_increment_bin(sp, bin, SFB_BINFT(sp, i, n, s), now); - if (i == SFB_FC_LEVEL) + } + if (i == SFB_FC_LEVEL) { ret = sfb_bin_mark_or_drop(sp, bin); + } } #endif /* SFB_LEVELS != 2 */ - if (sp->sfb_flags & SFBF_SUSPENDED) - ret = 1; /* drop or mark */ - - return (ret); + if (sp->sfb_flags & SFBF_SUSPENDED) { + ret = 1; /* drop or mark */ + } + return ret; } void @@ -1071,8 +1111,9 @@ sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *q, if (!SFB_QUEUE_DELAYBASED(sp) || SFB_IS_DELAYHIGH(sp) || qsize(q) <= SFB_MIN_FC_THRESHOLD_BYTES || - !net_timerisset(&sp->sfb_getqtime)) + !net_timerisset(&sp->sfb_getqtime)) { return; + } net_timeradd(&sp->sfb_getqtime, &sp->sfb_update_interval, &max_getqtime); @@ -1086,9 +1127,9 @@ sfb_detect_dequeue_stall(struct sfb *sp, class_queue_t *q, } } -#define DTYPE_NODROP 0 /* no drop */ -#define DTYPE_FORCED 1 /* a "forced" drop */ -#define DTYPE_EARLY 2 /* an "unforced" (early) drop */ +#define DTYPE_NODROP 0 /* no drop */ +#define DTYPE_FORCED 1 /* a "forced" drop */ +#define DTYPE_EARLY 2 /* an "unforced" (early) drop */ int sfb_addq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt, @@ -1149,15 +1190,16 @@ sfb_addq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt, * If getq time is not set because this is the first packet * or after idle time, set it now so that we can detect a stall. */ - if (qsize(q) == 0 && !net_timerisset(&sp->sfb_getqtime)) + if (qsize(q) == 0 && !net_timerisset(&sp->sfb_getqtime)) { *(&sp->sfb_getqtime) = *(&now); + } *pkt_sfb_flags = 0; pkt_sfb_hash16[s] = - (SFB_HASH(&pkt_flowid, sizeof (pkt_flowid), + (SFB_HASH(&pkt_flowid, sizeof(pkt_flowid), (*sp->sfb_bins)[s].fudge) & SFB_HASHMASK); pkt_sfb_hash16[s ^ 1] = - (SFB_HASH(&pkt_flowid, sizeof (pkt_flowid), + (SFB_HASH(&pkt_flowid, sizeof(pkt_flowid), (*sp->sfb_bins)[s ^ 1].fudge) & SFB_HASHMASK); /* check if the queue has been stalled */ @@ -1203,10 +1245,11 @@ sfb_addq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt, sp->sfb_stats.drop_pbox++; } - if (SFB_QUEUE_DELAYBASED(sp)) + if (SFB_QUEUE_DELAYBASED(sp)) { maxqsize = SFB_QUEUE_DELAYBASED_MAXSIZE; - else + } else { maxqsize = qlimit(q); + } /* * When the queue length hits the queue limit, make it a forced @@ -1254,17 +1297,18 @@ sfb_addq(struct sfb *sp, class_queue_t *q, pktsched_pkt_t *pkt, _addq(q, pkt->pktsched_pkt); } else { IFCQ_CONVERT_LOCK(&sp->sfb_ifp->if_snd); - return ((ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP); + return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP; } - if (!(*pkt_sfb_flags & SFB_PKT_PBOX)) + if (!(*pkt_sfb_flags & SFB_PKT_PBOX)) { sfb_eq_update_bins(sp, *pkt_sfb_hash, pktsched_get_pkt_len(pkt)); - else + } else { sp->sfb_stats.pbox_packets++; + } /* successfully queued */ - return (ret); + return ret; } static void * @@ -1279,27 +1323,30 @@ sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge, uint32_t *pkt_sfb_hash; void *p; - if (!purge && (sp->sfb_flags & SFBF_SUSPENDED)) - return (NULL); + if (!purge && (sp->sfb_flags & SFBF_SUSPENDED)) { + return NULL; + } nanouptime(&now); /* flow of 0 means head of queue */ if ((p = ((flow == 0) ? _getq(q) : _getq_flow(q, flow))) == NULL) { - if (!purge) + if (!purge) { net_timerclear(&sp->sfb_getqtime); - return (NULL); + } + return NULL; } ptype = qptype(q); - pktsched_pkt_encap(pkt, ptype, p); + pktsched_pkt_encap(pkt, ptype, p); pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL, NULL, NULL); pkt_sfb_hash = pktsched_get_pkt_sfb_vars(pkt, &pkt_sfb_flags); /* See comments in */ - if (ptype == QP_MBUF) + if (ptype == QP_MBUF) { VERIFY(*pkt_flags & PKTF_PRIV_GUARDED); + } if (!purge) { /* calculate EWMA of dequeues */ @@ -1317,8 +1364,9 @@ sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge, * average, weigh the average more against * the old value. */ - if (DEQUEUE_SPIKE(new, avg)) + if (DEQUEUE_SPIKE(new, avg)) { decay += 5; + } avg = (((avg << decay) - avg) + new) >> decay; } else { avg = new; @@ -1331,20 +1379,22 @@ sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge, if (!purge && SFB_QUEUE_DELAYBASED(sp)) { u_int64_t dequeue_ns, queue_delay = 0; net_timernsec(&now, &dequeue_ns); - if (dequeue_ns > *pkt_timestamp) + if (dequeue_ns > *pkt_timestamp) { queue_delay = dequeue_ns - *pkt_timestamp; + } if (sp->sfb_min_qdelay == 0 || - (queue_delay > 0 && queue_delay < sp->sfb_min_qdelay)) + (queue_delay > 0 && queue_delay < sp->sfb_min_qdelay)) { sp->sfb_min_qdelay = queue_delay; + } if (net_timercmp(&now, &sp->sfb_update_time, >=)) { if (sp->sfb_min_qdelay > sp->sfb_target_qdelay) { - if (!SFB_IS_DELAYHIGH(sp)) + if (!SFB_IS_DELAYHIGH(sp)) { SFB_SET_DELAY_HIGH(sp, q); + } } else { sp->sfb_flags &= ~(SFBF_DELAYHIGH); sp->sfb_fc_threshold = 0; - } net_timeradd(&now, &sp->sfb_update_interval, &sp->sfb_update_time); @@ -1364,8 +1414,9 @@ sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge, */ if (*pkt_sfb_flags & SFB_PKT_PBOX) { *pkt_sfb_flags &= ~SFB_PKT_PBOX; - if (sp->sfb_clearpkts > 0) + if (sp->sfb_clearpkts > 0) { sp->sfb_clearpkts--; + } } else if (sp->sfb_clearpkts > 0) { sp->sfb_clearpkts--; } else { @@ -1374,8 +1425,9 @@ sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge, } /* See comments in */ - if (ptype == QP_MBUF) + if (ptype == QP_MBUF) { *pkt_flags &= ~PKTF_PRIV_GUARDED; + } /* * If the queue becomes empty before the update interval, reset @@ -1388,7 +1440,7 @@ sfb_getq_flow(struct sfb *sp, class_queue_t *q, u_int32_t flow, boolean_t purge, net_timerclear(&sp->sfb_update_time); net_timerclear(&sp->sfb_getqtime); } - return (p); + return p; } void @@ -1411,10 +1463,12 @@ sfb_purgeq(struct sfb *sp, class_queue_t *q, u_int32_t flow, u_int32_t *packets, pktsched_free_pkt(&pkt); } - if (packets != NULL) + if (packets != NULL) { *packets = cnt; - if (bytes != NULL) + } + if (bytes != NULL) { *bytes = len; + } } void @@ -1429,8 +1483,9 @@ sfb_updateq(struct sfb *sp, cqev_t ev) u_int64_t eff_rate = ifnet_output_linkrate(ifp); /* update parameters only if rate has changed */ - if (eff_rate == sp->sfb_eff_rate) + if (eff_rate == sp->sfb_eff_rate) { break; + } if (classq_verbose) { log(LOG_DEBUG, "%s: SFB qid=%d, adapting to new " @@ -1470,14 +1525,15 @@ sfb_suspendq(struct sfb *sp, class_queue_t *q, boolean_t on) VERIFY(ifp != NULL); if ((on && (sp->sfb_flags & SFBF_SUSPENDED)) || - (!on && !(sp->sfb_flags & SFBF_SUSPENDED))) - return (0); + (!on && !(sp->sfb_flags & SFBF_SUSPENDED))) { + return 0; + } if (!(sp->sfb_flags & SFBF_FLOWCTL)) { log(LOG_ERR, "%s: SFB qid=%d, unable to %s queue since " "flow-control is not enabled", if_name(ifp), sp->sfb_qid, (on ? "suspend" : "resume")); - return (ENOTSUP); + return ENOTSUP; } if (classq_verbose) { @@ -1492,5 +1548,5 @@ sfb_suspendq(struct sfb *sp, class_queue_t *q, boolean_t on) sfb_swap_bins(sp, qlen(q)); } - return (0); + return 0; } diff --git a/bsd/net/classq/classq_sfb.h b/bsd/net/classq/classq_sfb.h index 480ee5d73..487a29f2e 100644 --- a/bsd/net/classq/classq_sfb.h +++ b/bsd/net/classq/classq_sfb.h @@ -27,7 +27,7 @@ */ #ifndef _NET_CLASSQ_CLASSQ_SFB_H_ -#define _NET_CLASSQ_CLASSQ_SFB_H_ +#define _NET_CLASSQ_CLASSQ_SFB_H_ #ifdef PRIVATE #ifdef BSD_KERNEL_PRIVATE @@ -41,45 +41,45 @@ extern "C" { #endif -#define SFB_FP_SHIFT 14 /* fixed-point shift (Q14) */ -#define SFB_LEVELS 2 /* L */ -#define SFB_BINS_SHIFT 5 -#define SFB_BINS (1 << SFB_BINS_SHIFT) /* N */ +#define SFB_FP_SHIFT 14 /* fixed-point shift (Q14) */ +#define SFB_LEVELS 2 /* L */ +#define SFB_BINS_SHIFT 5 +#define SFB_BINS (1 << SFB_BINS_SHIFT) /* N */ struct sfbstats { - u_int64_t drop_early; - u_int64_t drop_pbox; - u_int64_t drop_queue; - u_int64_t marked_packets; - u_int64_t pbox_packets; - u_int64_t pbox_time; - u_int64_t hold_time; - u_int64_t dequeue_avg; - u_int64_t rehash_intval; - u_int64_t num_rehash; - u_int64_t null_flowid; - u_int64_t flow_controlled; - u_int64_t flow_feedback; - u_int64_t dequeue_stall; + u_int64_t drop_early; + u_int64_t drop_pbox; + u_int64_t drop_queue; + u_int64_t marked_packets; + u_int64_t pbox_packets; + u_int64_t pbox_time; + u_int64_t hold_time; + u_int64_t dequeue_avg; + u_int64_t rehash_intval; + u_int64_t num_rehash; + u_int64_t null_flowid; + u_int64_t flow_controlled; + u_int64_t flow_feedback; + u_int64_t dequeue_stall; }; struct sfbbinstats { - int16_t pmark; /* marking probability in Q format */ - u_int16_t pkts; /* number of packets */ - u_int32_t bytes; /* number of bytes */ + int16_t pmark; /* marking probability in Q format */ + u_int16_t pkts; /* number of packets */ + u_int32_t bytes; /* number of bytes */ }; struct sfb_stats { - u_int32_t allocation; - u_int32_t dropthresh; - u_int32_t clearpkts; - u_int32_t current; - u_int64_t target_qdelay; - u_int64_t update_interval; - u_int64_t min_estdelay; - u_int32_t delay_fcthreshold; - u_int32_t flags; - struct sfbstats sfbstats; + u_int32_t allocation; + u_int32_t dropthresh; + u_int32_t clearpkts; + u_int32_t current; + u_int64_t target_qdelay; + u_int64_t update_interval; + u_int64_t min_estdelay; + u_int32_t delay_fcthreshold; + u_int32_t flags; + struct sfbstats sfbstats; struct sfbbins { struct sfbbinstats stats[SFB_LEVELS][SFB_BINS]; } binstats[2] __attribute__((aligned(8))); @@ -87,68 +87,68 @@ struct sfb_stats { #ifdef BSD_KERNEL_PRIVATE struct sfb_bins { - u_int32_t fudge; - struct sfbbinstats stats[SFB_LEVELS][SFB_BINS]; - struct timespec freezetime[SFB_LEVELS][SFB_BINS]; + u_int32_t fudge; + struct sfbbinstats stats[SFB_LEVELS][SFB_BINS]; + struct timespec freezetime[SFB_LEVELS][SFB_BINS]; }; struct sfb_fcl { - u_int32_t cnt; - struct flowadv_fclist fclist; + u_int32_t cnt; + struct flowadv_fclist fclist; }; /* SFB flags */ -#define SFBF_ECN4 0x01 /* use packet marking for IPv4 packets */ -#define SFBF_ECN6 0x02 /* use packet marking for IPv6 packets */ -#define SFBF_ECN (SFBF_ECN4 | SFBF_ECN6) -#define SFBF_FLOWCTL 0x04 /* enable flow control advisories */ -#define SFBF_DELAYBASED 0x08 /* queueing is delay based */ -#define SFBF_DELAYHIGH 0x10 /* Estimated delay is greater than target */ -#define SFBF_LAST_PKT_DROPPED 0x20 /* Last packet dropped */ -#define SFBF_SUSPENDED 0x1000 /* queue is suspended */ - -#define SFBF_USERFLAGS \ +#define SFBF_ECN4 0x01 /* use packet marking for IPv4 packets */ +#define SFBF_ECN6 0x02 /* use packet marking for IPv6 packets */ +#define SFBF_ECN (SFBF_ECN4 | SFBF_ECN6) +#define SFBF_FLOWCTL 0x04 /* enable flow control advisories */ +#define SFBF_DELAYBASED 0x08 /* queueing is delay based */ +#define SFBF_DELAYHIGH 0x10 /* Estimated delay is greater than target */ +#define SFBF_LAST_PKT_DROPPED 0x20 /* Last packet dropped */ +#define SFBF_SUSPENDED 0x1000 /* queue is suspended */ + +#define SFBF_USERFLAGS \ (SFBF_ECN4 | SFBF_ECN6 | SFBF_FLOWCTL | SFBF_DELAYBASED) typedef struct sfb { /* variables for internal use */ - u_int32_t sfb_flags; /* SFB flags */ - u_int32_t sfb_qlim; - u_int32_t sfb_qid; - u_int16_t sfb_allocation; - u_int16_t sfb_drop_thresh; - u_int32_t sfb_clearpkts; - u_int64_t sfb_eff_rate; /* last known effective rate */ - struct timespec sfb_getqtime; /* last dequeue timestamp */ - struct timespec sfb_holdtime; /* random holdtime in nsec */ - struct ifnet *sfb_ifp; /* back pointer to ifnet */ + u_int32_t sfb_flags; /* SFB flags */ + u_int32_t sfb_qlim; + u_int32_t sfb_qid; + u_int16_t sfb_allocation; + u_int16_t sfb_drop_thresh; + u_int32_t sfb_clearpkts; + u_int64_t sfb_eff_rate; /* last known effective rate */ + struct timespec sfb_getqtime; /* last dequeue timestamp */ + struct timespec sfb_holdtime; /* random holdtime in nsec */ + struct ifnet *sfb_ifp; /* back pointer to ifnet */ /* target queue delay and interval for queue sizing */ - u_int64_t sfb_target_qdelay; - struct timespec sfb_update_interval; - u_int64_t sfb_fc_threshold; /* for flow control feedback */ + u_int64_t sfb_target_qdelay; + struct timespec sfb_update_interval; + u_int64_t sfb_fc_threshold; /* for flow control feedback */ /* variables for computing estimated delay of the queue */ - u_int64_t sfb_min_qdelay; - struct timespec sfb_update_time; + u_int64_t sfb_min_qdelay; + struct timespec sfb_update_time; /* moving hash function */ - struct timespec sfb_hinterval; /* random reset interval in sec */ - struct timespec sfb_nextreset; /* reset deadline */ + struct timespec sfb_hinterval; /* random reset interval in sec */ + struct timespec sfb_nextreset; /* reset deadline */ /* penalty box */ - struct timespec sfb_pboxtime; /* random pboxtime in nsec */ - struct timespec sfb_pboxfreeze; + struct timespec sfb_pboxtime; /* random pboxtime in nsec */ + struct timespec sfb_pboxfreeze; /* B[L][N] bins (2 sets: current and warm-up) */ - u_int32_t sfb_current; /* current set (0 or 1) */ - struct sfb_bins (*sfb_bins)[2]; + u_int32_t sfb_current; /* current set (0 or 1) */ + struct sfb_bins (*sfb_bins)[ 2]; /* Flow control lists for current set */ - struct sfb_fcl (*sfb_fc_lists)[SFB_BINS]; + struct sfb_fcl (*sfb_fc_lists)[ SFB_BINS]; /* statistics */ - struct sfbstats sfb_stats __attribute__((aligned(8))); + struct sfbstats sfb_stats __attribute__((aligned(8))); } sfb_t; extern void sfb_init(void); diff --git a/bsd/net/classq/classq_subr.c b/bsd/net/classq/classq_subr.c index 7c93bc65e..1256b3c3e 100644 --- a/bsd/net/classq/classq_subr.c +++ b/bsd/net/classq/classq_subr.c @@ -55,12 +55,12 @@ static void *ifclassq_tbr_dequeue_common(struct ifclassq *, mbuf_svc_class_t, boolean_t, classq_pkt_type_t *); static u_int64_t ifclassq_target_qdelay = 0; -SYSCTL_QUAD(_net_classq, OID_AUTO, target_qdelay, CTLFLAG_RW|CTLFLAG_LOCKED, +SYSCTL_QUAD(_net_classq, OID_AUTO, target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED, &ifclassq_target_qdelay, "target queue delay in nanoseconds"); static u_int64_t ifclassq_update_interval = 0; SYSCTL_QUAD(_net_classq, OID_AUTO, update_interval, - CTLFLAG_RW|CTLFLAG_LOCKED, &ifclassq_update_interval, + CTLFLAG_RW | CTLFLAG_LOCKED, &ifclassq_update_interval, "update interval in nanoseconds"); static int32_t ifclassq_sched_fq_codel; @@ -76,8 +76,9 @@ classq_init(void) fq_codel_scheduler_init(); if (!PE_parse_boot_argn("fq_codel", &ifclassq_sched_fq_codel, - sizeof (ifclassq_sched_fq_codel))) + sizeof(ifclassq_sched_fq_codel))) { ifclassq_sched_fq_codel = 1; + } } int @@ -92,8 +93,8 @@ ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) ifq->ifcq_ifp = ifp; IFCQ_LEN(ifq) = 0; IFCQ_BYTES(ifq) = 0; - bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); - bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); + bzero(&ifq->ifcq_xmitcnt, sizeof(ifq->ifcq_xmitcnt)); + bzero(&ifq->ifcq_dropcnt, sizeof(ifq->ifcq_dropcnt)); VERIFY(!IFCQ_TBR_IS_ENABLED(ifq)); VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); @@ -108,8 +109,9 @@ ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) if (ifp->if_eflags & IFEF_TXSTART) { u_int32_t maxlen = 0; - if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) + if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) { maxlen = if_sndq_maxlen; + } IFCQ_SET_MAXLEN(ifq, maxlen); if (IFCQ_MAXLEN(ifq) != if_sndq_maxlen && @@ -122,11 +124,12 @@ ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse) } ifq->ifcq_sflags = sflags; err = ifclassq_pktsched_setup(ifq); - if (err == 0) + if (err == 0) { ifq->ifcq_flags = (IFCQF_READY | IFCQF_ENABLED); + } } IFCQ_UNLOCK(ifq); - return (err); + return err; } void @@ -159,8 +162,8 @@ ifclassq_teardown(struct ifnet *ifp) IFCQ_LEN(ifq) = 0; IFCQ_BYTES(ifq) = 0; IFCQ_MAXLEN(ifq) = 0; - bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt)); - bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt)); + bzero(&ifq->ifcq_xmitcnt, sizeof(ifq->ifcq_xmitcnt)); + bzero(&ifq->ifcq_dropcnt, sizeof(ifq->ifcq_dropcnt)); IFCQ_UNLOCK(ifq); } @@ -204,15 +207,16 @@ ifclassq_pktsched_setup(struct ifclassq *ifq) /* NOTREACHED */ } - return (err); + return err; } void ifclassq_set_maxlen(struct ifclassq *ifq, u_int32_t maxqlen) { IFCQ_LOCK(ifq); - if (maxqlen == 0) + if (maxqlen == 0) { maxqlen = if_sndq_maxlen; + } IFCQ_SET_MAXLEN(ifq, maxqlen); IFCQ_UNLOCK(ifq); } @@ -220,7 +224,7 @@ ifclassq_set_maxlen(struct ifclassq *ifq, u_int32_t maxqlen) u_int32_t ifclassq_get_maxlen(struct ifclassq *ifq) { - return (IFCQ_MAXLEN(ifq)); + return IFCQ_MAXLEN(ifq); } int @@ -240,15 +244,16 @@ ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets, } IFCQ_UNLOCK(ifq); - return (err); + return err; } inline void ifclassq_set_packet_metadata(struct ifclassq *ifq, struct ifnet *ifp, void *p, classq_pkt_type_t ptype) { - if (!IFNET_IS_CELLULAR(ifp)) + if (!IFNET_IS_CELLULAR(ifp)) { return; + } switch (ptype) { case QP_MBUF: { @@ -284,7 +289,7 @@ ifclassq_enqueue(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, IFCQ_ENQUEUE(ifq, p, ptype, err, pdrop); IFCQ_UNLOCK(ifq); - return (err); + return err; } errno_t @@ -292,8 +297,8 @@ ifclassq_dequeue(struct ifclassq *ifq, u_int32_t pkt_limit, u_int32_t byte_limit, void **head, void **tail, u_int32_t *cnt, u_int32_t *len, classq_pkt_type_t *ptype) { - return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, pkt_limit, - byte_limit, head, tail, cnt, len, FALSE, ptype)); + return ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, pkt_limit, + byte_limit, head, tail, cnt, len, FALSE, ptype); } errno_t @@ -301,8 +306,8 @@ ifclassq_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t pkt_limit, u_int32_t byte_limit, void **head, void **tail, u_int32_t *cnt, u_int32_t *len, classq_pkt_type_t *ptype) { - return (ifclassq_dequeue_common(ifq, sc, pkt_limit, byte_limit, - head, tail, cnt, len, TRUE, ptype)); + return ifclassq_dequeue_common(ifq, sc, pkt_limit, byte_limit, + head, tail, cnt, len, TRUE, ptype); } static errno_t @@ -312,7 +317,7 @@ ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, classq_pkt_type_t *ptype) { struct ifnet *ifp = ifq->ifcq_ifp; - u_int32_t i = 0, l = 0, lock_spin = 1 ; + u_int32_t i = 0, l = 0, lock_spin = 1; void **first, *last; VERIFY(!drvmgt || MBUF_VALID_SC(sc)); @@ -320,8 +325,9 @@ ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, *ptype = 0; - if (IFCQ_TBR_IS_ENABLED(ifq)) + if (IFCQ_TBR_IS_ENABLED(ifq)) { goto dequeue_loop; + } /* * If the scheduler support dequeueing multiple packets at the @@ -330,32 +336,36 @@ ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, if (drvmgt && ifq->ifcq_dequeue_sc_multi != NULL) { int err; - if (lock_spin) + if (lock_spin) { IFCQ_LOCK_SPIN(ifq); - else + } else { IFCQ_LOCK(ifq); + } err = ifq->ifcq_dequeue_sc_multi(ifq, sc, pkt_limit, byte_limit, head, tail, cnt, len, ptype); IFCQ_UNLOCK(ifq); - if (err == 0 && (*head) == NULL) + if (err == 0 && (*head) == NULL) { err = EAGAIN; - return (err); + } + return err; } else if (ifq->ifcq_dequeue_multi != NULL) { int err; - if (lock_spin) + if (lock_spin) { IFCQ_LOCK_SPIN(ifq); - else + } else { IFCQ_LOCK(ifq); + } err = ifq->ifcq_dequeue_multi(ifq, pkt_limit, byte_limit, head, tail, cnt, len, ptype); IFCQ_UNLOCK(ifq); - if (err == 0 && (*head) == NULL) + if (err == 0 && (*head) == NULL) { err = EAGAIN; - return (err); + } + return err; } dequeue_loop: @@ -363,27 +373,31 @@ dequeue_loop: first = &(*head); last = NULL; - if (lock_spin) + if (lock_spin) { IFCQ_LOCK_SPIN(ifq); - else + } else { IFCQ_LOCK(ifq); + } while (i < pkt_limit && l < byte_limit) { classq_pkt_type_t tmp_ptype; if (drvmgt) { - if (IFCQ_TBR_IS_ENABLED(ifq)) + if (IFCQ_TBR_IS_ENABLED(ifq)) { IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head, &tmp_ptype); - else + } else { IFCQ_DEQUEUE_SC(ifq, sc, *head, &tmp_ptype); + } } else { - if (IFCQ_TBR_IS_ENABLED(ifq)) + if (IFCQ_TBR_IS_ENABLED(ifq)) { IFCQ_TBR_DEQUEUE(ifq, *head, &tmp_ptype); - else + } else { IFCQ_DEQUEUE(ifq, *head, &tmp_ptype); + } } - if (*head == NULL) + if (*head == NULL) { break; + } switch (tmp_ptype) { case QP_MBUF: @@ -407,14 +421,17 @@ dequeue_loop: IFCQ_UNLOCK(ifq); - if (tail != NULL) + if (tail != NULL) { *tail = last; - if (cnt != NULL) + } + if (cnt != NULL) { *cnt = i; - if (len != NULL) + } + if (len != NULL) { *len = l; + } - return ((*first != NULL) ? 0 : EAGAIN); + return (*first != NULL) ? 0 : EAGAIN; } void @@ -446,7 +463,7 @@ ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline, ifq->ifcq_dequeue_sc_multi = dequeue_sc_multi; ifq->ifcq_request = request; - return (0); + return 0; } int @@ -463,7 +480,7 @@ ifclassq_detach(struct ifclassq *ifq) ifq->ifcq_dequeue_sc = NULL; ifq->ifcq_request = NULL; - return (0); + return 0; } int @@ -473,18 +490,20 @@ ifclassq_getqstats(struct ifclassq *ifq, u_int32_t qid, void *ubuf, struct if_ifclassq_stats *ifqs; int err; - if (*nbytes < sizeof (*ifqs)) - return (EINVAL); + if (*nbytes < sizeof(*ifqs)) { + return EINVAL; + } - ifqs = _MALLOC(sizeof (*ifqs), M_TEMP, M_WAITOK | M_ZERO); - if (ifqs == NULL) - return (ENOMEM); + ifqs = _MALLOC(sizeof(*ifqs), M_TEMP, M_WAITOK | M_ZERO); + if (ifqs == NULL) { + return ENOMEM; + } IFCQ_LOCK(ifq); if (!IFCQ_IS_READY(ifq)) { IFCQ_UNLOCK(ifq); _FREE(ifqs, M_TEMP); - return (ENXIO); + return ENXIO; } ifqs->ifqs_len = IFCQ_LEN(ifq); @@ -497,12 +516,13 @@ ifclassq_getqstats(struct ifclassq *ifq, u_int32_t qid, void *ubuf, IFCQ_UNLOCK(ifq); if (err == 0 && (err = copyout((caddr_t)ifqs, - (user_addr_t)(uintptr_t)ubuf, sizeof (*ifqs))) == 0) - *nbytes = sizeof (*ifqs); + (user_addr_t)(uintptr_t)ubuf, sizeof(*ifqs))) == 0) { + *nbytes = sizeof(*ifqs); + } _FREE(ifqs, M_TEMP); - return (err); + return err; } const char * @@ -536,7 +556,7 @@ ifclassq_ev2str(cqev_t ev) break; } - return (c); + return c; } /* @@ -546,21 +566,21 @@ ifclassq_ev2str(cqev_t ev) * depth: byte << 32 * */ -#define TBR_SHIFT 32 -#define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) -#define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) +#define TBR_SHIFT 32 +#define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT) +#define TBR_UNSCALE(x) ((x) >> TBR_SHIFT) void * ifclassq_tbr_dequeue(struct ifclassq *ifq, classq_pkt_type_t *ptype) { - return (ifclassq_tbr_dequeue_common(ifq, MBUF_SC_UNSPEC, FALSE, ptype)); + return ifclassq_tbr_dequeue_common(ifq, MBUF_SC_UNSPEC, FALSE, ptype); } void * ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc, classq_pkt_type_t *ptype) { - return (ifclassq_tbr_dequeue_common(ifq, sc, TRUE, ptype)); + return ifclassq_tbr_dequeue_common(ifq, sc, TRUE, ptype); } static void * @@ -586,23 +606,26 @@ ifclassq_tbr_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, tbr->tbr_token = tbr->tbr_depth; } else { tbr->tbr_token += interval * tbr->tbr_rate; - if (tbr->tbr_token > tbr->tbr_depth) + if (tbr->tbr_token > tbr->tbr_depth) { tbr->tbr_token = tbr->tbr_depth; + } } tbr->tbr_last = now; } /* if token is still negative, don't allow dequeue */ - if (tbr->tbr_token <= 0) - return (NULL); + if (tbr->tbr_token <= 0) { + return NULL; + } /* * ifclassq takes precedence over ALTQ queue; * ifcq_drain count is adjusted by the caller. */ - if (drvmgt) - IFCQ_DEQUEUE_SC(ifq, sc, p, ptype); - else - IFCQ_DEQUEUE(ifq, p, ptype); + if (drvmgt) { + IFCQ_DEQUEUE_SC(ifq, sc, p, ptype); + } else { + IFCQ_DEQUEUE(ifq, p, ptype); + } if (p != NULL) { switch (*ptype) { @@ -617,7 +640,7 @@ ifclassq_tbr_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc, } } - return (p); + return p; } /* @@ -644,27 +667,32 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, if (profile->percent > 0) { u_int64_t eff_rate; - if (profile->percent > 100) - return (EINVAL); - if ((eff_rate = ifp->if_output_bw.eff_bw) == 0) - return (ENODEV); + if (profile->percent > 100) { + return EINVAL; + } + if ((eff_rate = ifp->if_output_bw.eff_bw) == 0) { + return ENODEV; + } rate = (eff_rate * profile->percent) / 100; } if (rate == 0) { - if (!IFCQ_TBR_IS_ENABLED(ifq)) - return (ENOENT); + if (!IFCQ_TBR_IS_ENABLED(ifq)) { + return ENOENT; + } - if (pktsched_verbose) + if (pktsched_verbose) { printf("%s: TBR disabled\n", if_name(ifp)); + } /* disable this TBR */ ifq->ifcq_flags &= ~IFCQF_TBR; - bzero(tbr, sizeof (*tbr)); + bzero(tbr, sizeof(*tbr)); ifnet_set_start_cycle(ifp, NULL); - if (update) + if (update) { ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); - return (0); + } + return 0; } if (pktsched_verbose) { @@ -674,7 +702,7 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, } /* set the new TBR */ - bzero(tbr, sizeof (*tbr)); + bzero(tbr, sizeof(*tbr)); tbr->tbr_rate_raw = rate; tbr->tbr_percent = profile->percent; ifq->ifcq_flags |= IFCQF_TBR; @@ -695,15 +723,17 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, int64_t ival, idepth = 0; int i; - if (mtu < IF_MINMTU) + if (mtu < IF_MINMTU) { mtu = IF_MINMTU; + } ival = pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC); /* 10ms */ - for (i = 1; ; i++) { + for (i = 1;; i++) { idepth = TBR_SCALE(i * mtu); - if ((idepth / tbr->tbr_rate) > ival) + if ((idepth / tbr->tbr_rate) > ival) { break; + } } VERIFY(idepth > 0); @@ -724,7 +754,7 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, if (tbr->tbr_rate > 0 && (ifp->if_flags & IFF_UP)) { struct timespec ts = - { 0, pktsched_abs_to_nsecs(tbr->tbr_filluptime) }; + { 0, pktsched_abs_to_nsecs(tbr->tbr_filluptime) }; if (pktsched_verbose) { printf("%s: TBR calculated tokens %lld " "filluptime %llu ns\n", if_name(ifp), @@ -745,10 +775,11 @@ ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile, } ifnet_set_start_cycle(ifp, NULL); } - if (update && tbr->tbr_rate_raw != old_rate) + if (update && tbr->tbr_rate_raw != old_rate) { ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH); + } - return (0); + return 0; } void @@ -757,15 +788,17 @@ ifclassq_calc_target_qdelay(struct ifnet *ifp, u_int64_t *if_target_qdelay) u_int64_t qdelay = 0; qdelay = IFCQ_TARGET_QDELAY(&ifp->if_snd); - if (ifclassq_target_qdelay != 0) + if (ifclassq_target_qdelay != 0) { qdelay = ifclassq_target_qdelay; + } /* * If we do not know the effective bandwidth, use the default * target queue delay. */ - if (qdelay == 0) + if (qdelay == 0) { qdelay = IFQ_TARGET_DELAY; + } /* * If a delay has been added to ifnet start callback for @@ -773,8 +806,9 @@ ifclassq_calc_target_qdelay(struct ifnet *ifp, u_int64_t *if_target_qdelay) * because the packets can be in the queue longer. */ if ((ifp->if_eflags & IFEF_ENQUEUE_MULTI) && - ifp->if_start_delay_timeout > 0) + ifp->if_start_delay_timeout > 0) { qdelay += ifp->if_start_delay_timeout; + } *(if_target_qdelay) = qdelay; } @@ -785,12 +819,14 @@ ifclassq_calc_update_interval(u_int64_t *update_interval) u_int64_t uint = 0; /* If the system level override is set, use it */ - if (ifclassq_update_interval != 0) + if (ifclassq_update_interval != 0) { uint = ifclassq_update_interval; + } /* Otherwise use the default value */ - if (uint == 0) + if (uint == 0) { uint = IFQ_UPDATE_INTERVAL; + } *update_interval = uint; } diff --git a/bsd/net/classq/classq_util.c b/bsd/net/classq/classq_util.c index 56aa56eea..f009b4ee0 100644 --- a/bsd/net/classq/classq_util.c +++ b/bsd/net/classq/classq_util.c @@ -93,29 +93,33 @@ read_dsfield(struct mbuf *m, struct pf_mtag *t) u_int8_t ds_field = 0; if (t->pftag_hdr == NULL || - !(t->pftag_flags & (PF_TAG_HDR_INET|PF_TAG_HDR_INET6))) - return ((u_int8_t)0); + !(t->pftag_flags & (PF_TAG_HDR_INET | PF_TAG_HDR_INET6))) { + return (u_int8_t)0; + } /* verify that hdr is within the mbuf data */ - for (m0 = m; m0 != NULL; m0 = m0->m_next) + for (m0 = m; m0 != NULL; m0 = m0->m_next) { if (((caddr_t)t->pftag_hdr >= m0->m_data) && - ((caddr_t)t->pftag_hdr < m0->m_data + m0->m_len)) + ((caddr_t)t->pftag_hdr < m0->m_data + m0->m_len)) { break; + } + } if (m0 == NULL) { /* ick, tag info is stale */ printf("%s: can't locate header!\n", __func__); - return ((u_int8_t)0); + return (u_int8_t)0; } if (t->pftag_flags & PF_TAG_HDR_INET) { struct ip *ip = (struct ip *)(void *)t->pftag_hdr; - if (((uintptr_t)ip + sizeof (*ip)) > - ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) - return (0); /* out of bounds */ - - if (ip->ip_v != 4) - return ((u_int8_t)0); /* version mismatch! */ + if (((uintptr_t)ip + sizeof(*ip)) > + ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) { + return 0; /* out of bounds */ + } + if (ip->ip_v != 4) { + return (u_int8_t)0; /* version mismatch! */ + } ds_field = ip->ip_tos; } #if INET6 @@ -123,17 +127,18 @@ read_dsfield(struct mbuf *m, struct pf_mtag *t) struct ip6_hdr *ip6 = (struct ip6_hdr *)(void *)t->pftag_hdr; u_int32_t flowlabel; - if (((uintptr_t)ip6 + sizeof (*ip6)) > - ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) - return (0); /* out of bounds */ - + if (((uintptr_t)ip6 + sizeof(*ip6)) > + ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) { + return 0; /* out of bounds */ + } flowlabel = ntohl(ip6->ip6_flow); - if ((flowlabel >> 28) != 6) - return ((u_int8_t)0); /* version mismatch! */ + if ((flowlabel >> 28) != 6) { + return (u_int8_t)0; /* version mismatch! */ + } ds_field = (flowlabel >> 20) & 0xff; } #endif - return (ds_field); + return ds_field; } void @@ -142,14 +147,17 @@ write_dsfield(struct mbuf *m, struct pf_mtag *t, u_int8_t dsfield) struct mbuf *m0; if (t->pftag_hdr == NULL || - !(t->pftag_flags & (PF_TAG_HDR_INET|PF_TAG_HDR_INET6))) + !(t->pftag_flags & (PF_TAG_HDR_INET | PF_TAG_HDR_INET6))) { return; + } /* verify that hdr is within the mbuf data */ - for (m0 = m; m0 != NULL; m0 = m0->m_next) + for (m0 = m; m0 != NULL; m0 = m0->m_next) { if (((caddr_t)t->pftag_hdr >= m0->m_data) && - ((caddr_t)t->pftag_hdr < m0->m_data + m0->m_len)) + ((caddr_t)t->pftag_hdr < m0->m_data + m0->m_len)) { break; + } + } if (m0 == NULL) { /* ick, tag info is stale */ printf("%s: can't locate header!\n", __func__); @@ -161,16 +169,18 @@ write_dsfield(struct mbuf *m, struct pf_mtag *t, u_int8_t dsfield) u_int8_t old; int32_t sum; - if (((uintptr_t)ip + sizeof (*ip)) > - ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) - return; /* out of bounds */ - - if (ip->ip_v != 4) - return; /* version mismatch! */ + if (((uintptr_t)ip + sizeof(*ip)) > + ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) { + return; /* out of bounds */ + } + if (ip->ip_v != 4) { + return; /* version mismatch! */ + } old = ip->ip_tos; - dsfield |= old & 3; /* leave CU bits */ - if (old == dsfield) + dsfield |= old & 3; /* leave CU bits */ + if (old == dsfield) { return; + } ip->ip_tos = dsfield; /* * update checksum (from RFC1624) @@ -188,13 +198,14 @@ write_dsfield(struct mbuf *m, struct pf_mtag *t, u_int8_t dsfield) struct ip6_hdr *ip6 = (struct ip6_hdr *)t->pftag_hdr; u_int32_t flowlabel; - if (((uintptr_t)ip6 + sizeof (*ip6)) > - ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) - return; /* out of bounds */ - + if (((uintptr_t)ip6 + sizeof(*ip6)) > + ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) { + return; /* out of bounds */ + } flowlabel = ntohl(ip6->ip6_flow); - if ((flowlabel >> 28) != 6) - return; /* version mismatch! */ + if ((flowlabel >> 28) != 6) { + return; /* version mismatch! */ + } flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20); ip6->ip6_flow = htonl(flowlabel); } @@ -208,50 +219,56 @@ write_dsfield(struct mbuf *m, struct pf_mtag *t, u_int8_t dsfield) int mark_ecn(struct mbuf *m, struct pf_mtag *t, int flags) { - struct mbuf *m0; - void *hdr; - int af; + struct mbuf *m0; + void *hdr; + int af; if ((hdr = t->pftag_hdr) == NULL || - !(t->pftag_flags & (PF_TAG_HDR_INET|PF_TAG_HDR_INET6))) - return (0); + !(t->pftag_flags & (PF_TAG_HDR_INET | PF_TAG_HDR_INET6))) { + return 0; + } /* verify that hdr is within the mbuf data */ - for (m0 = m; m0 != NULL; m0 = m0->m_next) + for (m0 = m; m0 != NULL; m0 = m0->m_next) { if (((caddr_t)hdr >= m0->m_data) && - ((caddr_t)hdr < m0->m_data + m0->m_len)) + ((caddr_t)hdr < m0->m_data + m0->m_len)) { break; + } + } if (m0 == NULL) { /* ick, tag info is stale */ printf("%s: can't locate header!\n", __func__); - return (0); + return 0; } - if (t->pftag_flags & PF_TAG_HDR_INET) + if (t->pftag_flags & PF_TAG_HDR_INET) { af = AF_INET; - else if (t->pftag_flags & PF_TAG_HDR_INET6) + } else if (t->pftag_flags & PF_TAG_HDR_INET6) { af = AF_INET6; - else + } else { af = AF_UNSPEC; + } switch (af) { case AF_INET: - if (flags & CLASSQF_ECN4) { /* REDF_ECN4 == BLUEF_ECN4 */ + if (flags & CLASSQF_ECN4) { /* REDF_ECN4 == BLUEF_ECN4 */ struct ip *ip = hdr; u_int8_t otos; int sum; - if (((uintptr_t)ip + sizeof (*ip)) > - ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) - return (0); /* out of bounds */ - - if (ip->ip_v != 4) - return (0); /* version mismatch! */ - if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) - return (0); /* not-ECT */ - if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) - return (1); /* already marked */ - + if (((uintptr_t)ip + sizeof(*ip)) > + ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) { + return 0; /* out of bounds */ + } + if (ip->ip_v != 4) { + return 0; /* version mismatch! */ + } + if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT) { + return 0; /* not-ECT */ + } + if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE) { + return 1; /* already marked */ + } /* * ecn-capable but not marked, * mark CE and update checksum @@ -270,40 +287,43 @@ mark_ecn(struct mbuf *m, struct pf_mtag *t, int flags) sum += (sum >> 16); /* add carry */ ip->ip_sum = htons(~sum & 0xffff); } - return (1); + return 1; } break; #if INET6 case AF_INET6: - if (flags & CLASSQF_ECN6) { /* REDF_ECN6 == BLUEF_ECN6 */ + if (flags & CLASSQF_ECN6) { /* REDF_ECN6 == BLUEF_ECN6 */ struct ip6_hdr *ip6 = hdr; u_int32_t flowlabel; - if (((uintptr_t)ip6 + sizeof (*ip6)) > - ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) - return (0); /* out of bounds */ - + if (((uintptr_t)ip6 + sizeof(*ip6)) > + ((uintptr_t)mbuf_datastart(m0) + mbuf_maxlen(m0))) { + return 0; /* out of bounds */ + } flowlabel = ntohl(ip6->ip6_flow); - if ((flowlabel >> 28) != 6) - return (0); /* version mismatch! */ + if ((flowlabel >> 28) != 6) { + return 0; /* version mismatch! */ + } if ((flowlabel & (IPTOS_ECN_MASK << 20)) == - (IPTOS_ECN_NOTECT << 20)) - return (0); /* not-ECT */ + (IPTOS_ECN_NOTECT << 20)) { + return 0; /* not-ECT */ + } if ((flowlabel & (IPTOS_ECN_MASK << 20)) == - (IPTOS_ECN_CE << 20)) - return (1); /* already marked */ + (IPTOS_ECN_CE << 20)) { + return 1; /* already marked */ + } /* * ecn-capable but not marked, mark CE */ flowlabel |= (IPTOS_ECN_CE << 20); ip6->ip6_flow = htonl(flowlabel); - return (1); + return 1; } break; #endif /* INET6 */ } /* not marked */ - return (0); + return 0; } #endif /* PF_ECN */ diff --git a/bsd/net/classq/if_classq.h b/bsd/net/classq/if_classq.h index 18e9bc7f8..98f019796 100644 --- a/bsd/net/classq/if_classq.h +++ b/bsd/net/classq/if_classq.h @@ -27,45 +27,45 @@ */ #ifndef _NET_CLASSQ_IF_CLASSQ_H_ -#define _NET_CLASSQ_IF_CLASSQ_H_ +#define _NET_CLASSQ_IF_CLASSQ_H_ #ifdef PRIVATE -#define IFCQ_SC_MAX 10 /* max number of queues */ +#define IFCQ_SC_MAX 10 /* max number of queues */ #ifdef BSD_KERNEL_PRIVATE #include /* maximum number of packets stored across all queues */ -#define IFCQ_DEFAULT_PKT_DROP_LIMIT 2048 +#define IFCQ_DEFAULT_PKT_DROP_LIMIT 2048 /* classq request types */ typedef enum cqrq { - CLASSQRQ_PURGE = 1, /* purge all packets */ - CLASSQRQ_PURGE_SC = 2, /* purge service class (and flow) */ - CLASSQRQ_EVENT = 3, /* interface events */ - CLASSQRQ_THROTTLE = 4, /* throttle packets */ - CLASSQRQ_STAT_SC = 5, /* get service class queue stats */ + CLASSQRQ_PURGE = 1, /* purge all packets */ + CLASSQRQ_PURGE_SC = 2, /* purge service class (and flow) */ + CLASSQRQ_EVENT = 3, /* interface events */ + CLASSQRQ_THROTTLE = 4, /* throttle packets */ + CLASSQRQ_STAT_SC = 5, /* get service class queue stats */ } cqrq_t; /* classq purge_sc request argument */ typedef struct cqrq_purge_sc { - mbuf_svc_class_t sc; /* (in) service class */ - u_int32_t flow; /* (in) 0 means all flows */ - u_int32_t packets; /* (out) purged packets */ - u_int32_t bytes; /* (out) purged bytes */ + mbuf_svc_class_t sc; /* (in) service class */ + u_int32_t flow; /* (in) 0 means all flows */ + u_int32_t packets; /* (out) purged packets */ + u_int32_t bytes; /* (out) purged bytes */ } cqrq_purge_sc_t; /* classq throttle request argument */ typedef struct cqrq_throttle { - u_int32_t set; /* set or get */ - u_int32_t level; /* (in/out) throttling level */ + u_int32_t set; /* set or get */ + u_int32_t level; /* (in/out) throttling level */ } cqrq_throttle_t; /* classq service class stats request argument */ typedef struct cqrq_stat_sc { - mbuf_svc_class_t sc; /* (in) service class */ - u_int32_t packets; /* (out) packets enqueued */ - u_int32_t bytes; /* (out) bytes enqueued */ + mbuf_svc_class_t sc; /* (in) service class */ + u_int32_t packets; /* (out) packets enqueued */ + u_int32_t bytes; /* (out) bytes enqueued */ } cqrq_stat_sc_t; /* @@ -77,23 +77,23 @@ typedef struct cqrq_stat_sc { * in a device independent manner. */ struct tb_regulator { - u_int64_t tbr_rate_raw; /* (unscaled) token bucket rate */ - u_int32_t tbr_percent; /* token bucket rate in percentage */ - int64_t tbr_rate; /* (scaled) token bucket rate */ - int64_t tbr_depth; /* (scaled) token bucket depth */ + u_int64_t tbr_rate_raw; /* (unscaled) token bucket rate */ + u_int32_t tbr_percent; /* token bucket rate in percentage */ + int64_t tbr_rate; /* (scaled) token bucket rate */ + int64_t tbr_depth; /* (scaled) token bucket depth */ - int64_t tbr_token; /* (scaled) current token */ - int64_t tbr_filluptime; /* (scaled) time to fill up bucket */ - u_int64_t tbr_last; /* last time token was updated */ + int64_t tbr_token; /* (scaled) current token */ + int64_t tbr_filluptime; /* (scaled) time to fill up bucket */ + u_int64_t tbr_last; /* last time token was updated */ - /* needed for poll-and-dequeue */ + /* needed for poll-and-dequeue */ }; /* simple token bucket meter profile */ struct tb_profile { - u_int64_t rate; /* rate in bit-per-sec */ - u_int32_t percent; /* rate in percentage */ - u_int32_t depth; /* depth in bytes */ + u_int64_t rate; /* rate in bit-per-sec */ + u_int32_t percent; /* rate in percentage */ + u_int32_t depth; /* depth in bytes */ }; struct ifclassq; @@ -118,19 +118,19 @@ typedef int (*ifclassq_req_func)(struct ifclassq *, enum cqrq, void *); struct ifclassq { decl_lck_mtx_data(, ifcq_lock); - struct ifnet *ifcq_ifp; /* back pointer to interface */ - u_int32_t ifcq_len; /* packet count */ - u_int32_t ifcq_maxlen; - struct pktcntr ifcq_xmitcnt; - struct pktcntr ifcq_dropcnt; - - u_int32_t ifcq_type; /* scheduler type */ - u_int32_t ifcq_flags; /* flags */ - u_int32_t ifcq_sflags; /* scheduler flags */ - u_int32_t ifcq_target_qdelay; /* target queue delay */ - u_int32_t ifcq_bytes; /* bytes count */ - u_int32_t ifcq_pkt_drop_limit; - void *ifcq_disc; /* for scheduler-specific use */ + struct ifnet *ifcq_ifp; /* back pointer to interface */ + u_int32_t ifcq_len; /* packet count */ + u_int32_t ifcq_maxlen; + struct pktcntr ifcq_xmitcnt; + struct pktcntr ifcq_dropcnt; + + u_int32_t ifcq_type; /* scheduler type */ + u_int32_t ifcq_flags; /* flags */ + u_int32_t ifcq_sflags; /* scheduler flags */ + u_int32_t ifcq_target_qdelay; /* target queue delay */ + u_int32_t ifcq_bytes; /* bytes count */ + u_int32_t ifcq_pkt_drop_limit; + void *ifcq_disc; /* for scheduler-specific use */ /* * ifcq_disc_slots[] represents the leaf classes configured for the * corresponding discpline/scheduler, ordered by their corresponding @@ -151,50 +151,50 @@ struct ifclassq { * scheduler classes. */ struct ifclassq_disc_slot { - u_int32_t qid; - void *cl; + u_int32_t qid; + void *cl; } ifcq_disc_slots[IFCQ_SC_MAX]; /* for discipline use */ - ifclassq_enq_func ifcq_enqueue; - ifclassq_deq_func ifcq_dequeue; - ifclassq_deq_sc_func ifcq_dequeue_sc; + ifclassq_enq_func ifcq_enqueue; + ifclassq_deq_func ifcq_dequeue; + ifclassq_deq_sc_func ifcq_dequeue_sc; ifclassq_deq_multi_func ifcq_dequeue_multi; ifclassq_deq_sc_multi_func ifcq_dequeue_sc_multi; - ifclassq_req_func ifcq_request; + ifclassq_req_func ifcq_request; /* token bucket regulator */ - struct tb_regulator ifcq_tbr; /* TBR */ + struct tb_regulator ifcq_tbr; /* TBR */ }; /* ifcq_flags */ -#define IFCQF_READY 0x01 /* ifclassq supports discipline */ -#define IFCQF_ENABLED 0x02 /* ifclassq is in use */ -#define IFCQF_TBR 0x04 /* Token Bucket Regulator is in use */ +#define IFCQF_READY 0x01 /* ifclassq supports discipline */ +#define IFCQF_ENABLED 0x02 /* ifclassq is in use */ +#define IFCQF_TBR 0x04 /* Token Bucket Regulator is in use */ -#define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY) -#define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED) -#define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR) +#define IFCQ_IS_READY(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_READY) +#define IFCQ_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_ENABLED) +#define IFCQ_TBR_IS_ENABLED(_ifcq) ((_ifcq)->ifcq_flags & IFCQF_TBR) /* classq enqueue return value */ /* packet has to be dropped */ -#define CLASSQEQ_DROP (-1) +#define CLASSQEQ_DROP (-1) /* packet successfully enqueued */ -#define CLASSQEQ_SUCCESS 0 +#define CLASSQEQ_SUCCESS 0 /* packet enqueued; give flow control feedback */ -#define CLASSQEQ_SUCCESS_FC 1 +#define CLASSQEQ_SUCCESS_FC 1 /* packet needs to be dropped due to flowcontrol; give flow control feedback */ -#define CLASSQEQ_DROP_FC 2 +#define CLASSQEQ_DROP_FC 2 /* packet needs to be dropped due to suspension; give flow control feedback */ -#define CLASSQEQ_DROP_SP 3 +#define CLASSQEQ_DROP_SP 3 /* interface event argument for CLASSQRQ_EVENT */ typedef enum cqev { CLASSQ_EV_INIT = 0, - CLASSQ_EV_LINK_BANDWIDTH = 1, /* link bandwidth has changed */ - CLASSQ_EV_LINK_LATENCY = 2, /* link latency has changed */ - CLASSQ_EV_LINK_MTU = 3, /* link MTU has changed */ - CLASSQ_EV_LINK_UP = 4, /* link is now up */ - CLASSQ_EV_LINK_DOWN = 5, /* link is now down */ + CLASSQ_EV_LINK_BANDWIDTH = 1, /* link bandwidth has changed */ + CLASSQ_EV_LINK_LATENCY = 2, /* link latency has changed */ + CLASSQ_EV_LINK_MTU = 3, /* link MTU has changed */ + CLASSQ_EV_LINK_UP = 4, /* link is now up */ + CLASSQ_EV_LINK_DOWN = 5, /* link is now down */ } cqev_t; #endif /* BSD_KERNEL_PRIVATE */ @@ -206,15 +206,15 @@ typedef enum cqev { extern "C" { #endif struct if_ifclassq_stats { - u_int32_t ifqs_len; - u_int32_t ifqs_maxlen; - struct pktcntr ifqs_xmitcnt; - struct pktcntr ifqs_dropcnt; - u_int32_t ifqs_scheduler; + u_int32_t ifqs_len; + u_int32_t ifqs_maxlen; + struct pktcntr ifqs_xmitcnt; + struct pktcntr ifqs_dropcnt; + u_int32_t ifqs_scheduler; union { - struct tcq_classstats ifqs_tcq_stats; - struct qfq_classstats ifqs_qfq_stats; - struct fq_codel_classstats ifqs_fq_codel_stats; + struct tcq_classstats ifqs_tcq_stats; + struct qfq_classstats ifqs_qfq_stats; + struct fq_codel_classstats ifqs_fq_codel_stats; }; } __attribute__((aligned(8))); @@ -226,110 +226,110 @@ struct if_ifclassq_stats { /* * For ifclassq lock */ -#define IFCQ_LOCK_ASSERT_HELD(_ifcq) \ +#define IFCQ_LOCK_ASSERT_HELD(_ifcq) \ LCK_MTX_ASSERT(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_OWNED) -#define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \ +#define IFCQ_LOCK_ASSERT_NOTHELD(_ifcq) \ LCK_MTX_ASSERT(&(_ifcq)->ifcq_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IFCQ_LOCK(_ifcq) \ +#define IFCQ_LOCK(_ifcq) \ lck_mtx_lock(&(_ifcq)->ifcq_lock) -#define IFCQ_LOCK_SPIN(_ifcq) \ +#define IFCQ_LOCK_SPIN(_ifcq) \ lck_mtx_lock_spin(&(_ifcq)->ifcq_lock) -#define IFCQ_CONVERT_LOCK(_ifcq) do { \ - IFCQ_LOCK_ASSERT_HELD(_ifcq); \ - lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \ +#define IFCQ_CONVERT_LOCK(_ifcq) do { \ + IFCQ_LOCK_ASSERT_HELD(_ifcq); \ + lck_mtx_convert_spin(&(_ifcq)->ifcq_lock); \ } while (0) -#define IFCQ_UNLOCK(_ifcq) \ +#define IFCQ_UNLOCK(_ifcq) \ lck_mtx_unlock(&(_ifcq)->ifcq_lock) /* * For ifclassq operations */ -#define IFCQ_ENQUEUE(_ifq, _p, _t, _err, _drop) do { \ - (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _p, _t, _drop); \ +#define IFCQ_ENQUEUE(_ifq, _p, _t, _err, _drop) do { \ + (_err) = (*(_ifq)->ifcq_enqueue)(_ifq, _p, _t, _drop); \ } while (0) -#define IFCQ_DEQUEUE(_ifq, _p, _t) do { \ - (_p) = (*(_ifq)->ifcq_dequeue)(_ifq, _t); \ +#define IFCQ_DEQUEUE(_ifq, _p, _t) do { \ + (_p) = (*(_ifq)->ifcq_dequeue)(_ifq, _t); \ } while (0) -#define IFCQ_DEQUEUE_SC(_ifq, _sc, _p, _t) do { \ - (_p) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, _t); \ +#define IFCQ_DEQUEUE_SC(_ifq, _sc, _p, _t) do { \ + (_p) = (*(_ifq)->ifcq_dequeue_sc)(_ifq, _sc, _t); \ } while (0) -#define IFCQ_TBR_DEQUEUE(_ifcq, _p, _t) do { \ - (_p) = ifclassq_tbr_dequeue(_ifcq, _t); \ +#define IFCQ_TBR_DEQUEUE(_ifcq, _p, _t) do { \ + (_p) = ifclassq_tbr_dequeue(_ifcq, _t); \ } while (0) -#define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _p, _t) do { \ - (_p) = ifclassq_tbr_dequeue_sc(_ifcq, _sc, _t); \ +#define IFCQ_TBR_DEQUEUE_SC(_ifcq, _sc, _p, _t) do { \ + (_p) = ifclassq_tbr_dequeue_sc(_ifcq, _sc, _t); \ } while (0) -#define IFCQ_PURGE(_ifq) do { \ - (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \ +#define IFCQ_PURGE(_ifq) do { \ + (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE, NULL); \ } while (0) -#define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \ - cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \ - (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \ - (_packets) = _req.packets; \ - (_bytes) = _req.bytes; \ +#define IFCQ_PURGE_SC(_ifq, _sc, _flow, _packets, _bytes) do { \ + cqrq_purge_sc_t _req = { _sc, _flow, 0, 0 }; \ + (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_PURGE_SC, &_req); \ + (_packets) = _req.packets; \ + (_bytes) = _req.bytes; \ } while (0) -#define IFCQ_UPDATE(_ifq, _ev) do { \ - (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \ - (void *)(_ev)); \ +#define IFCQ_UPDATE(_ifq, _ev) do { \ + (void) (*(_ifq)->ifcq_request)(_ifq, CLASSQRQ_EVENT, \ + (void *)(_ev)); \ } while (0) -#define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \ - cqrq_throttle_t _req = { 1, _level }; \ - (_err) = (*(_ifq)->ifcq_request) \ - (_ifq, CLASSQRQ_THROTTLE, &_req); \ +#define IFCQ_SET_THROTTLE(_ifq, _level, _err) do { \ + cqrq_throttle_t _req = { 1, _level }; \ + (_err) = (*(_ifq)->ifcq_request) \ + (_ifq, CLASSQRQ_THROTTLE, &_req); \ } while (0) -#define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \ - cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \ - (_err) = (*(_ifq)->ifcq_request) \ - (_ifq, CLASSQRQ_THROTTLE, &_req); \ - (_level) = _req.level; \ +#define IFCQ_GET_THROTTLE(_ifq, _level, _err) do { \ + cqrq_throttle_t _req = { 0, IFNET_THROTTLE_OFF }; \ + (_err) = (*(_ifq)->ifcq_request) \ + (_ifq, CLASSQRQ_THROTTLE, &_req); \ + (_level) = _req.level; \ } while (0) -#define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \ - cqrq_stat_sc_t _req = { _sc, 0, 0 }; \ - (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \ - if ((_packets) != NULL) \ - (*(_packets)) = _req.packets; \ - if ((_bytes) != NULL) \ - (*(_bytes)) = _req.bytes; \ +#define IFCQ_LEN_SC(_ifq, _sc, _packets, _bytes, _err) do { \ + cqrq_stat_sc_t _req = { _sc, 0, 0 }; \ + (_err) = (*(ifq)->ifcq_request)(_ifq, CLASSQRQ_STAT_SC, &_req); \ + if ((_packets) != NULL) \ + (*(_packets)) = _req.packets; \ + if ((_bytes) != NULL) \ + (*(_bytes)) = _req.bytes; \ } while (0) -#define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len) -#define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen) -#define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0) -#define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++) -#define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--) -#define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen) -#define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len)) -#define IFCQ_TARGET_QDELAY(_ifcq) ((_ifcq)->ifcq_target_qdelay) -#define IFCQ_BYTES(_ifcq) ((_ifcq)->ifcq_bytes) -#define IFCQ_INC_BYTES(_ifcq, _len) \ +#define IFCQ_LEN(_ifcq) ((_ifcq)->ifcq_len) +#define IFCQ_QFULL(_ifcq) (IFCQ_LEN(_ifcq) >= (_ifcq)->ifcq_maxlen) +#define IFCQ_IS_EMPTY(_ifcq) (IFCQ_LEN(_ifcq) == 0) +#define IFCQ_INC_LEN(_ifcq) (IFCQ_LEN(_ifcq)++) +#define IFCQ_DEC_LEN(_ifcq) (IFCQ_LEN(_ifcq)--) +#define IFCQ_MAXLEN(_ifcq) ((_ifcq)->ifcq_maxlen) +#define IFCQ_SET_MAXLEN(_ifcq, _len) ((_ifcq)->ifcq_maxlen = (_len)) +#define IFCQ_TARGET_QDELAY(_ifcq) ((_ifcq)->ifcq_target_qdelay) +#define IFCQ_BYTES(_ifcq) ((_ifcq)->ifcq_bytes) +#define IFCQ_INC_BYTES(_ifcq, _len) \ ((_ifcq)->ifcq_bytes = (_ifcq)->ifcq_bytes + (_len)) -#define IFCQ_DEC_BYTES(_ifcq, _len) \ +#define IFCQ_DEC_BYTES(_ifcq, _len) \ ((_ifcq)->ifcq_bytes = (_ifcq)->ifcq_bytes - (_len)) -#define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \ - PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \ +#define IFCQ_XMIT_ADD(_ifcq, _pkt, _len) do { \ + PKTCNTR_ADD(&(_ifcq)->ifcq_xmitcnt, _pkt, _len); \ } while (0) -#define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \ - PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \ +#define IFCQ_DROP_ADD(_ifcq, _pkt, _len) do { \ + PKTCNTR_ADD(&(_ifcq)->ifcq_dropcnt, _pkt, _len); \ } while (0) -#define IFCQ_PKT_DROP_LIMIT(_ifcq) ((_ifcq)->ifcq_pkt_drop_limit) +#define IFCQ_PKT_DROP_LIMIT(_ifcq) ((_ifcq)->ifcq_pkt_drop_limit) extern int ifclassq_setup(struct ifnet *, u_int32_t, boolean_t); extern void ifclassq_teardown(struct ifnet *); diff --git a/bsd/net/content_filter.c b/bsd/net/content_filter.c index ae7ff13ab..49f16a7df 100644 --- a/bsd/net/content_filter.c +++ b/bsd/net/content_filter.c @@ -32,7 +32,7 @@ * also kept in kernel buffer until the user space agents makes a pass or drop * decision. This unidirectional flow of content avoids unnecessary data copies * back to the kernel. - * + * * A user space filter agent opens a kernel control socket with the name * CONTENT_FILTER_CONTROL_NAME to attach to the socket content filter subsystem. * When connected, a "struct content_filter" is created and set as the @@ -323,7 +323,7 @@ #include #include -#define MAX_CONTENT_FILTER 2 +#define MAX_CONTENT_FILTER 2 struct cfil_entry; @@ -332,24 +332,24 @@ struct cfil_entry; * It's created and associated with a kernel control socket instance */ struct content_filter { - kern_ctl_ref cf_kcref; - u_int32_t cf_kcunit; - u_int32_t cf_flags; + kern_ctl_ref cf_kcref; + u_int32_t cf_kcunit; + u_int32_t cf_flags; - uint32_t cf_necp_control_unit; + uint32_t cf_necp_control_unit; - uint32_t cf_sock_count; + uint32_t cf_sock_count; TAILQ_HEAD(, cfil_entry) cf_sock_entries; }; -#define CFF_ACTIVE 0x01 -#define CFF_DETACHING 0x02 -#define CFF_FLOW_CONTROLLED 0x04 +#define CFF_ACTIVE 0x01 +#define CFF_DETACHING 0x02 +#define CFF_FLOW_CONTROLLED 0x04 struct content_filter **content_filters = NULL; -uint32_t cfil_active_count = 0; /* Number of active content filters */ -uint32_t cfil_sock_attached_count = 0; /* Number of sockets attachements */ -uint32_t cfil_sock_udp_attached_count = 0; /* Number of UDP sockets attachements */ +uint32_t cfil_active_count = 0; /* Number of active content filters */ +uint32_t cfil_sock_attached_count = 0; /* Number of sockets attachements */ +uint32_t cfil_sock_udp_attached_count = 0; /* Number of UDP sockets attachements */ uint32_t cfil_close_wait_timeout = 1000; /* in milliseconds */ static kern_ctl_ref cfil_kctlref = NULL; @@ -359,7 +359,7 @@ static lck_attr_t *cfil_lck_attr = NULL; static lck_grp_t *cfil_lck_grp = NULL; decl_lck_rw_data(static, cfil_lck_rw); -#define CFIL_RW_LCK_MAX 8 +#define CFIL_RW_LCK_MAX 8 int cfil_rw_nxt_lck = 0; void* cfil_rw_lock_history[CFIL_RW_LCK_MAX]; @@ -367,21 +367,21 @@ void* cfil_rw_lock_history[CFIL_RW_LCK_MAX]; int cfil_rw_nxt_unlck = 0; void* cfil_rw_unlock_history[CFIL_RW_LCK_MAX]; -#define CONTENT_FILTER_ZONE_NAME "content_filter" -#define CONTENT_FILTER_ZONE_MAX 10 -static struct zone *content_filter_zone = NULL; /* zone for content_filter */ +#define CONTENT_FILTER_ZONE_NAME "content_filter" +#define CONTENT_FILTER_ZONE_MAX 10 +static struct zone *content_filter_zone = NULL; /* zone for content_filter */ -#define CFIL_INFO_ZONE_NAME "cfil_info" -#define CFIL_INFO_ZONE_MAX 1024 -static struct zone *cfil_info_zone = NULL; /* zone for cfil_info */ +#define CFIL_INFO_ZONE_NAME "cfil_info" +#define CFIL_INFO_ZONE_MAX 1024 +static struct zone *cfil_info_zone = NULL; /* zone for cfil_info */ MBUFQ_HEAD(cfil_mqhead); struct cfil_queue { - uint64_t q_start; /* offset of first byte in queue */ - uint64_t q_end; /* offset of last byte in queue */ - struct cfil_mqhead q_mq; + uint64_t q_start; /* offset of first byte in queue */ + uint64_t q_end; /* offset of last byte in queue */ + struct cfil_mqhead q_mq; }; /* @@ -391,50 +391,50 @@ struct cfil_queue { */ struct cfil_entry { TAILQ_ENTRY(cfil_entry) cfe_link; - struct content_filter *cfe_filter; + struct content_filter *cfe_filter; - struct cfil_info *cfe_cfil_info; - uint32_t cfe_flags; - uint32_t cfe_necp_control_unit; - struct timeval cfe_last_event; /* To user space */ - struct timeval cfe_last_action; /* From user space */ + struct cfil_info *cfe_cfil_info; + uint32_t cfe_flags; + uint32_t cfe_necp_control_unit; + struct timeval cfe_last_event; /* To user space */ + struct timeval cfe_last_action; /* From user space */ struct cfe_buf { /* * cfe_pending_q holds data that has been delivered to * the filter and for which we are waiting for an action */ - struct cfil_queue cfe_pending_q; + struct cfil_queue cfe_pending_q; /* * This queue is for data that has not be delivered to * the content filter (new data, pass peek or flow control) */ - struct cfil_queue cfe_ctl_q; + struct cfil_queue cfe_ctl_q; - uint64_t cfe_pass_offset; - uint64_t cfe_peek_offset; - uint64_t cfe_peeked; + uint64_t cfe_pass_offset; + uint64_t cfe_peek_offset; + uint64_t cfe_peeked; } cfe_snd, cfe_rcv; }; -#define CFEF_CFIL_ATTACHED 0x0001 /* was attached to filter */ -#define CFEF_SENT_SOCK_ATTACHED 0x0002 /* sock attach event was sent */ -#define CFEF_DATA_START 0x0004 /* can send data event */ -#define CFEF_FLOW_CONTROLLED 0x0008 /* wait for flow control lift */ -#define CFEF_SENT_DISCONNECT_IN 0x0010 /* event was sent */ -#define CFEF_SENT_DISCONNECT_OUT 0x0020 /* event was sent */ -#define CFEF_SENT_SOCK_CLOSED 0x0040 /* closed event was sent */ -#define CFEF_CFIL_DETACHED 0x0080 /* filter was detached */ - - -#define CFI_ADD_TIME_LOG(cfil, t1, t0, op) \ - struct timeval _tdiff; \ - if ((cfil)->cfi_op_list_ctr < CFI_MAX_TIME_LOG_ENTRY) { \ - timersub(t1, t0, &_tdiff); \ - (cfil)->cfi_op_time[(cfil)->cfi_op_list_ctr] = (uint32_t)(_tdiff.tv_sec * 1000 + _tdiff.tv_usec / 1000);\ - (cfil)->cfi_op_list[(cfil)->cfi_op_list_ctr] = (unsigned char)op; \ - (cfil)->cfi_op_list_ctr ++; \ - } +#define CFEF_CFIL_ATTACHED 0x0001 /* was attached to filter */ +#define CFEF_SENT_SOCK_ATTACHED 0x0002 /* sock attach event was sent */ +#define CFEF_DATA_START 0x0004 /* can send data event */ +#define CFEF_FLOW_CONTROLLED 0x0008 /* wait for flow control lift */ +#define CFEF_SENT_DISCONNECT_IN 0x0010 /* event was sent */ +#define CFEF_SENT_DISCONNECT_OUT 0x0020 /* event was sent */ +#define CFEF_SENT_SOCK_CLOSED 0x0040 /* closed event was sent */ +#define CFEF_CFIL_DETACHED 0x0080 /* filter was detached */ + + +#define CFI_ADD_TIME_LOG(cfil, t1, t0, op) \ + struct timeval _tdiff; \ + if ((cfil)->cfi_op_list_ctr < CFI_MAX_TIME_LOG_ENTRY) { \ + timersub(t1, t0, &_tdiff); \ + (cfil)->cfi_op_time[(cfil)->cfi_op_list_ctr] = (uint32_t)(_tdiff.tv_sec * 1000 + _tdiff.tv_usec / 1000);\ + (cfil)->cfi_op_list[(cfil)->cfi_op_list_ctr] = (unsigned char)op; \ + (cfil)->cfi_op_list_ctr ++; \ + } struct cfil_hash_entry; @@ -444,14 +444,14 @@ struct cfil_hash_entry; * There is a struct cfil_info per socket */ struct cfil_info { - TAILQ_ENTRY(cfil_info) cfi_link; - struct socket *cfi_so; - uint64_t cfi_flags; - uint64_t cfi_sock_id; - struct timeval64 cfi_first_event; - uint32_t cfi_op_list_ctr; - uint32_t cfi_op_time[CFI_MAX_TIME_LOG_ENTRY]; /* time interval in microseconds since first event */ - unsigned char cfi_op_list[CFI_MAX_TIME_LOG_ENTRY]; + TAILQ_ENTRY(cfil_info) cfi_link; + struct socket *cfi_so; + uint64_t cfi_flags; + uint64_t cfi_sock_id; + struct timeval64 cfi_first_event; + uint32_t cfi_op_list_ctr; + uint32_t cfi_op_time[CFI_MAX_TIME_LOG_ENTRY]; /* time interval in microseconds since first event */ + unsigned char cfi_op_list[CFI_MAX_TIME_LOG_ENTRY]; struct cfi_buf { /* @@ -460,44 +460,44 @@ struct cfil_info { * this socket and data in the flow queue * cfi_pending_mbcnt counts in sballoc() "chars of mbufs used" */ - uint64_t cfi_pending_first; - uint64_t cfi_pending_last; - uint32_t cfi_pending_mbcnt; - uint32_t cfi_pending_mbnum; - uint32_t cfi_tail_drop_cnt; + uint64_t cfi_pending_first; + uint64_t cfi_pending_last; + uint32_t cfi_pending_mbcnt; + uint32_t cfi_pending_mbnum; + uint32_t cfi_tail_drop_cnt; /* * cfi_pass_offset is the minimum of all the filters */ - uint64_t cfi_pass_offset; + uint64_t cfi_pass_offset; /* * cfi_inject_q holds data that needs to be re-injected * into the socket after filtering and that can * be queued because of flow control */ - struct cfil_queue cfi_inject_q; + struct cfil_queue cfi_inject_q; } cfi_snd, cfi_rcv; - struct cfil_entry cfi_entries[MAX_CONTENT_FILTER]; + struct cfil_entry cfi_entries[MAX_CONTENT_FILTER]; struct cfil_hash_entry *cfi_hash_entry; } __attribute__((aligned(8))); -#define CFIF_DROP 0x0001 /* drop action applied */ -#define CFIF_CLOSE_WAIT 0x0002 /* waiting for filter to close */ -#define CFIF_SOCK_CLOSED 0x0004 /* socket is closed */ -#define CFIF_RETRY_INJECT_IN 0x0010 /* inject in failed */ -#define CFIF_RETRY_INJECT_OUT 0x0020 /* inject out failed */ -#define CFIF_SHUT_WR 0x0040 /* shutdown write */ -#define CFIF_SHUT_RD 0x0080 /* shutdown read */ +#define CFIF_DROP 0x0001 /* drop action applied */ +#define CFIF_CLOSE_WAIT 0x0002 /* waiting for filter to close */ +#define CFIF_SOCK_CLOSED 0x0004 /* socket is closed */ +#define CFIF_RETRY_INJECT_IN 0x0010 /* inject in failed */ +#define CFIF_RETRY_INJECT_OUT 0x0020 /* inject out failed */ +#define CFIF_SHUT_WR 0x0040 /* shutdown write */ +#define CFIF_SHUT_RD 0x0080 /* shutdown read */ -#define CFI_MASK_GENCNT 0xFFFFFFFF00000000 /* upper 32 bits */ -#define CFI_SHIFT_GENCNT 32 -#define CFI_MASK_FLOWHASH 0x00000000FFFFFFFF /* lower 32 bits */ -#define CFI_SHIFT_FLOWHASH 0 +#define CFI_MASK_GENCNT 0xFFFFFFFF00000000 /* upper 32 bits */ +#define CFI_SHIFT_GENCNT 32 +#define CFI_MASK_FLOWHASH 0x00000000FFFFFFFF /* lower 32 bits */ +#define CFI_SHIFT_FLOWHASH 0 TAILQ_HEAD(cfil_sock_head, cfil_info) cfil_sock_head; -#define CFIL_QUEUE_VERIFY(x) if (cfil_debug) cfil_queue_verify(x) -#define CFIL_INFO_VERIFY(x) if (cfil_debug) cfil_info_verify(x) +#define CFIL_QUEUE_VERIFY(x) if (cfil_debug) cfil_queue_verify(x) +#define CFIL_INFO_VERIFY(x) if (cfil_debug) cfil_info_verify(x) /* * UDP Socket Support @@ -507,9 +507,9 @@ LIST_HEAD(cfilhashhead, cfil_hash_entry); #define CFIL_HASH(laddr, faddr, lport, fport) ((faddr) ^ ((laddr) >> 16) ^ (fport) ^ (lport)) #define IS_UDP(so) (so && so->so_proto->pr_type == SOCK_DGRAM && so->so_proto->pr_protocol == IPPROTO_UDP) #define UNCONNECTED(inp) (inp && (((inp->inp_vflag & INP_IPV4) && (inp->inp_faddr.s_addr == INADDR_ANY)) || \ - ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)))) + ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)))) #define IS_ENTRY_ATTACHED(cfil_info, kcunit) (cfil_info != NULL && (kcunit <= MAX_CONTENT_FILTER) && \ - cfil_info->cfi_entries[kcunit - 1].cfe_filter != NULL) + cfil_info->cfi_entries[kcunit - 1].cfe_filter != NULL) #define IS_DNS(local, remote) (check_port(local, 53) || check_port(remote, 53) || check_port(local, 5353) || check_port(remote, 5353)) /* @@ -539,23 +539,23 @@ static unsigned int cfil_udp_gc_mbuf_cnt_max = UDP_FLOW_GC_MBUF_CNT_MAX; * Hash entry for cfil_info */ struct cfil_hash_entry { - LIST_ENTRY(cfil_hash_entry) cfentry_link; - struct cfil_info *cfentry_cfil; - u_short cfentry_fport; - u_short cfentry_lport; - sa_family_t cfentry_family; - u_int32_t cfentry_flowhash; - u_int32_t cfentry_lastused; - union { - /* foreign host table entry */ - struct in_addr_4in6 addr46; - struct in6_addr addr6; - } cfentry_faddr; - union { - /* local host table entry */ - struct in_addr_4in6 addr46; - struct in6_addr addr6; - } cfentry_laddr; + LIST_ENTRY(cfil_hash_entry) cfentry_link; + struct cfil_info *cfentry_cfil; + u_short cfentry_fport; + u_short cfentry_lport; + sa_family_t cfentry_family; + u_int32_t cfentry_flowhash; + u_int32_t cfentry_lastused; + union { + /* foreign host table entry */ + struct in_addr_4in6 addr46; + struct in6_addr addr6; + } cfentry_faddr; + union { + /* local host table entry */ + struct in_addr_4in6 addr46; + struct in6_addr addr6; + } cfentry_laddr; }; /* @@ -565,10 +565,10 @@ struct cfil_hash_entry { * keyed by the flow 4-tuples . */ struct cfil_db { - struct socket *cfdb_so; - uint32_t cfdb_count; /* Number of total content filters */ - struct cfilhashhead *cfdb_hashbase; - u_long cfdb_hashmask; + struct socket *cfdb_so; + uint32_t cfdb_count; /* Number of total content filters */ + struct cfilhashhead *cfdb_hashbase; + u_long cfdb_hashmask; struct cfil_hash_entry *cfdb_only_entry; /* Optimization for connected UDP */ }; @@ -616,56 +616,56 @@ int cfil_debug = 1; * Sysctls for logs and statistics */ static int sysctl_cfil_filter_list(struct sysctl_oid *, void *, int, - struct sysctl_req *); + struct sysctl_req *); static int sysctl_cfil_sock_list(struct sysctl_oid *, void *, int, - struct sysctl_req *); + struct sysctl_req *); -SYSCTL_NODE(_net, OID_AUTO, cfil, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "cfil"); +SYSCTL_NODE(_net, OID_AUTO, cfil, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "cfil"); -SYSCTL_INT(_net_cfil, OID_AUTO, log, CTLFLAG_RW|CTLFLAG_LOCKED, - &cfil_log_level, 0, ""); +SYSCTL_INT(_net_cfil, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED, + &cfil_log_level, 0, ""); -SYSCTL_INT(_net_cfil, OID_AUTO, debug, CTLFLAG_RW|CTLFLAG_LOCKED, - &cfil_debug, 0, ""); +SYSCTL_INT(_net_cfil, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, + &cfil_debug, 0, ""); -SYSCTL_UINT(_net_cfil, OID_AUTO, sock_attached_count, CTLFLAG_RD|CTLFLAG_LOCKED, - &cfil_sock_attached_count, 0, ""); +SYSCTL_UINT(_net_cfil, OID_AUTO, sock_attached_count, CTLFLAG_RD | CTLFLAG_LOCKED, + &cfil_sock_attached_count, 0, ""); -SYSCTL_UINT(_net_cfil, OID_AUTO, active_count, CTLFLAG_RD|CTLFLAG_LOCKED, - &cfil_active_count, 0, ""); +SYSCTL_UINT(_net_cfil, OID_AUTO, active_count, CTLFLAG_RD | CTLFLAG_LOCKED, + &cfil_active_count, 0, ""); -SYSCTL_UINT(_net_cfil, OID_AUTO, close_wait_timeout, CTLFLAG_RW|CTLFLAG_LOCKED, - &cfil_close_wait_timeout, 0, ""); +SYSCTL_UINT(_net_cfil, OID_AUTO, close_wait_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, + &cfil_close_wait_timeout, 0, ""); static int cfil_sbtrim = 1; -SYSCTL_UINT(_net_cfil, OID_AUTO, sbtrim, CTLFLAG_RW|CTLFLAG_LOCKED, - &cfil_sbtrim, 0, ""); +SYSCTL_UINT(_net_cfil, OID_AUTO, sbtrim, CTLFLAG_RW | CTLFLAG_LOCKED, + &cfil_sbtrim, 0, ""); -SYSCTL_PROC(_net_cfil, OID_AUTO, filter_list, CTLFLAG_RD|CTLFLAG_LOCKED, - 0, 0, sysctl_cfil_filter_list, "S,cfil_filter_stat", ""); +SYSCTL_PROC(_net_cfil, OID_AUTO, filter_list, CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_cfil_filter_list, "S,cfil_filter_stat", ""); -SYSCTL_PROC(_net_cfil, OID_AUTO, sock_list, CTLFLAG_RD|CTLFLAG_LOCKED, - 0, 0, sysctl_cfil_sock_list, "S,cfil_sock_stat", ""); +SYSCTL_PROC(_net_cfil, OID_AUTO, sock_list, CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_cfil_sock_list, "S,cfil_sock_stat", ""); -SYSCTL_STRUCT(_net_cfil, OID_AUTO, stats, CTLFLAG_RD|CTLFLAG_LOCKED, - &cfil_stats, cfil_stats, ""); +SYSCTL_STRUCT(_net_cfil, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED, + &cfil_stats, cfil_stats, ""); /* * Forward declaration to appease the compiler */ static int cfil_action_data_pass(struct socket *, struct cfil_info *, uint32_t, int, - uint64_t, uint64_t); + uint64_t, uint64_t); static int cfil_action_drop(struct socket *, struct cfil_info *, uint32_t); static int cfil_action_bless_client(uint32_t, struct cfil_msg_hdr *); static int cfil_dispatch_closed_event(struct socket *, struct cfil_info *, int); static int cfil_data_common(struct socket *, struct cfil_info *, int, struct sockaddr *, - struct mbuf *, struct mbuf *, uint32_t); + struct mbuf *, struct mbuf *, uint32_t); static int cfil_data_filter(struct socket *, struct cfil_info *, uint32_t, int, - struct mbuf *, uint64_t); + struct mbuf *, uint64_t); static void fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *, - struct in_addr, u_int16_t); + struct in_addr, u_int16_t); static void fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *, - struct in6_addr *, u_int16_t); + struct in6_addr *, u_int16_t); ; static int cfil_dispatch_attach_event(struct socket *, struct cfil_info *, uint32_t); static void cfil_info_free(struct cfil_info *); @@ -677,7 +677,7 @@ static int cfil_service_pending_queue(struct socket *, struct cfil_info *, uint3 static int cfil_data_service_ctl_q(struct socket *, struct cfil_info *, uint32_t, int); static void cfil_info_verify(struct cfil_info *); static int cfil_update_data_offsets(struct socket *, struct cfil_info *, uint32_t, int, - uint64_t, uint64_t); + uint64_t, uint64_t); static int cfil_acquire_sockbuf(struct socket *, struct cfil_info *, int); static void cfil_release_sockbuf(struct socket *, int); static int cfil_filters_attached(struct socket *); @@ -699,20 +699,20 @@ void cfil_db_delete_entry(struct cfil_db *, struct cfil_hash_entry *); struct cfil_hash_entry *cfil_sock_udp_get_flow(struct socket *, uint32_t, bool, struct sockaddr *, struct sockaddr *); struct cfil_info *cfil_db_get_cfil_info(struct cfil_db *, cfil_sock_id_t); static errno_t cfil_sock_udp_handle_data(bool, struct socket *, struct sockaddr *, struct sockaddr *, - struct mbuf *, struct mbuf *, uint32_t); + struct mbuf *, struct mbuf *, uint32_t); static int32_t cfil_sock_udp_data_pending(struct sockbuf *, bool); static void cfil_sock_udp_is_closed(struct socket *); -static int cfil_sock_udp_notify_shutdown(struct socket *, int , int, int); +static int cfil_sock_udp_notify_shutdown(struct socket *, int, int, int); static int cfil_sock_udp_shutdown(struct socket *, int *); static void cfil_sock_udp_close_wait(struct socket *); static void cfil_sock_udp_buf_update(struct sockbuf *); static int cfil_filters_udp_attached(struct socket *, bool); static void cfil_get_flow_address_v6(struct cfil_hash_entry *, struct inpcb *, - struct in6_addr **, struct in6_addr **, - u_int16_t *, u_int16_t *); + struct in6_addr **, struct in6_addr **, + u_int16_t *, u_int16_t *); static void cfil_get_flow_address(struct cfil_hash_entry *, struct inpcb *, - struct in_addr *, struct in_addr *, - u_int16_t *, u_int16_t *); + struct in_addr *, struct in_addr *, + u_int16_t *, u_int16_t *); static void cfil_info_log(int, struct cfil_info *, const char *); void cfil_filter_show(u_int32_t); void cfil_info_show(void); @@ -794,7 +794,7 @@ cfil_rw_lock_shared_to_exclusive(lck_rw_t *lck) cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved; cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX; } - return (upgraded); + return upgraded; } static void @@ -838,17 +838,19 @@ cfil_data_length(struct mbuf *m, int *retmbcnt, int *retmbnum) // Locate the start of data for (m0 = m; m0 != NULL; m0 = m0->m_next) { - if (m0->m_flags & M_PKTHDR) + if (m0->m_flags & M_PKTHDR) { break; + } } if (m0 == NULL) { CFIL_LOG(LOG_ERR, "cfil_data_length: no M_PKTHDR"); - return (0); + return 0; } m = m0; - if (retmbcnt == NULL && retmbnum == NULL) - return (m_length(m)); + if (retmbcnt == NULL && retmbnum == NULL) { + return m_length(m); + } pktlen = 0; mbcnt = 0; @@ -857,8 +859,9 @@ cfil_data_length(struct mbuf *m, int *retmbcnt, int *retmbnum) pktlen += m0->m_len; mbnum++; mbcnt += MSIZE; - if (m0->m_flags & M_EXT) + if (m0->m_flags & M_EXT) { mbcnt += m0->m_ext.ext_size; + } } if (retmbcnt) { *retmbcnt = mbcnt; @@ -866,7 +869,7 @@ cfil_data_length(struct mbuf *m, int *retmbcnt, int *retmbnum) if (retmbnum) { *retmbnum = mbnum; } - return (pktlen); + return pktlen; } static struct mbuf * @@ -876,8 +879,9 @@ cfil_data_start(struct mbuf *m) // Locate the start of data for (m0 = m; m0 != NULL; m0 = m0->m_next) { - if (m0->m_flags & M_PKTHDR) + if (m0->m_flags & M_PKTHDR) { break; + } } return m0; } @@ -902,32 +906,32 @@ cfil_queue_drain(struct cfil_queue *cfq) cfq->q_end = 0; MBUFQ_DRAIN(&cfq->q_mq); - return (drained); + return drained; } /* Return 1 when empty, 0 otherwise */ static inline int cfil_queue_empty(struct cfil_queue *cfq) { - return (MBUFQ_EMPTY(&cfq->q_mq)); + return MBUFQ_EMPTY(&cfq->q_mq); } static inline uint64_t cfil_queue_offset_first(struct cfil_queue *cfq) { - return (cfq->q_start); + return cfq->q_start; } static inline uint64_t cfil_queue_offset_last(struct cfil_queue *cfq) { - return (cfq->q_end); + return cfq->q_end; } static inline uint64_t cfil_queue_len(struct cfil_queue *cfq) { - return (cfq->q_end - cfq->q_start); + return cfq->q_end - cfq->q_start; } /* @@ -950,8 +954,8 @@ cfil_queue_verify(struct cfil_queue *cfq) * are different */ VERIFY((MBUFQ_EMPTY(&cfq->q_mq) && cfq->q_start == cfq->q_end) || - (!MBUFQ_EMPTY(&cfq->q_mq) && - cfq->q_start != cfq->q_end)); + (!MBUFQ_EMPTY(&cfq->q_mq) && + cfq->q_start != cfq->q_end)); MBUFQ_FOREACH(chain, &cfq->q_mq) { size_t chainsize = 0; @@ -961,27 +965,31 @@ cfil_queue_verify(struct cfil_queue *cfq) m = cfil_data_start(m); if (m == NULL || - m == (void *)M_TAG_FREE_PATTERN || - m->m_next == (void *)M_TAG_FREE_PATTERN || - m->m_nextpkt == (void *)M_TAG_FREE_PATTERN) + m == (void *)M_TAG_FREE_PATTERN || + m->m_next == (void *)M_TAG_FREE_PATTERN || + m->m_nextpkt == (void *)M_TAG_FREE_PATTERN) { panic("%s - mq %p is free at %p", __func__, - &cfq->q_mq, m); + &cfq->q_mq, m); + } for (n = m; n != NULL; n = n->m_next) { if (n->m_type != MT_DATA && - n->m_type != MT_HEADER && - n->m_type != MT_OOBDATA) - panic("%s - %p unsupported type %u", __func__, - n, n->m_type); + n->m_type != MT_HEADER && + n->m_type != MT_OOBDATA) { + panic("%s - %p unsupported type %u", __func__, + n, n->m_type); + } chainsize += n->m_len; } - if (mlen != chainsize) + if (mlen != chainsize) { panic("%s - %p m_length() %u != chainsize %lu", - __func__, m, mlen, chainsize); + __func__, m, mlen, chainsize); + } queuesize += chainsize; } - if (queuesize != cfq->q_end - cfq->q_start) + if (queuesize != cfq->q_end - cfq->q_start) { panic("%s - %p queuesize %llu != offsetdiffs %llu", __func__, - m, queuesize, cfq->q_end - cfq->q_start); + m, queuesize, cfq->q_end - cfq->q_start); + } } static void @@ -1012,14 +1020,14 @@ cfil_queue_remove(struct cfil_queue *cfq, mbuf_t m, size_t len) static mbuf_t cfil_queue_first(struct cfil_queue *cfq) { - return (MBUFQ_FIRST(&cfq->q_mq)); + return MBUFQ_FIRST(&cfq->q_mq); } static mbuf_t cfil_queue_next(struct cfil_queue *cfq, mbuf_t m) { #pragma unused(cfq) - return (MBUFQ_NEXT(m)); + return MBUFQ_NEXT(m); } static void @@ -1059,14 +1067,16 @@ cfil_info_verify(struct cfil_info *cfil_info) { int i; - if (cfil_info == NULL) + if (cfil_info == NULL) { return; + } cfil_info_buf_verify(&cfil_info->cfi_snd); cfil_info_buf_verify(&cfil_info->cfi_rcv); - for (i = 0; i < MAX_CONTENT_FILTER; i++) + for (i = 0; i < MAX_CONTENT_FILTER; i++) { cfil_entry_verify(&cfil_info->cfi_entries[i]); + } } static void @@ -1089,9 +1099,9 @@ verify_content_filter(struct content_filter *cfc) */ static errno_t cfil_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo) + void **unitinfo) { - errno_t error = 0; + errno_t error = 0; struct content_filter *cfc = NULL; CFIL_LOG(LOG_NOTICE, ""); @@ -1111,10 +1121,10 @@ cfil_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, cfil_rw_unlock_exclusive(&cfil_lck_rw); MALLOC(tmp, - struct content_filter **, - MAX_CONTENT_FILTER * sizeof(struct content_filter *), - M_TEMP, - M_WAITOK | M_ZERO); + struct content_filter **, + MAX_CONTENT_FILTER * sizeof(struct content_filter *), + M_TEMP, + M_WAITOK | M_ZERO); cfil_rw_lock_exclusive(&cfil_lck_rw); @@ -1124,10 +1134,11 @@ cfil_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, goto done; } /* Another thread may have won the race */ - if (content_filters != NULL) + if (content_filters != NULL) { FREE(tmp, M_TEMP); - else + } else { content_filters = tmp; + } } if (sac->sc_unit == 0 || sac->sc_unit > MAX_CONTENT_FILTER) { @@ -1151,25 +1162,27 @@ cfil_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, } cfil_rw_unlock_exclusive(&cfil_lck_rw); done: - if (error != 0 && cfc != NULL) + if (error != 0 && cfc != NULL) { zfree(content_filter_zone, cfc); + } - if (error == 0) + if (error == 0) { OSIncrementAtomic(&cfil_stats.cfs_ctl_connect_ok); - else + } else { OSIncrementAtomic(&cfil_stats.cfs_ctl_connect_fail); + } CFIL_LOG(LOG_INFO, "return %d cfil_active_count %u kcunit %u", - error, cfil_active_count, sac->sc_unit); + error, cfil_active_count, sac->sc_unit); - return (error); + return error; } static errno_t cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) { #pragma unused(kctlref) - errno_t error = 0; + errno_t error = 0; struct content_filter *cfc; struct cfil_entry *entry; uint64_t sock_flow_id = 0; @@ -1183,19 +1196,20 @@ cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) } if (kcunit > MAX_CONTENT_FILTER) { CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)", - kcunit, MAX_CONTENT_FILTER); + kcunit, MAX_CONTENT_FILTER); error = EINVAL; goto done; } cfc = (struct content_filter *)unitinfo; - if (cfc == NULL) + if (cfc == NULL) { goto done; + } cfil_rw_lock_exclusive(&cfil_lck_rw); if (content_filters[kcunit - 1] != cfc || cfc->cf_kcunit != kcunit) { CFIL_LOG(LOG_ERR, "bad unit info %u)", - kcunit); + kcunit); cfil_rw_unlock_exclusive(&cfil_lck_rw); goto done; } @@ -1221,7 +1235,7 @@ cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) /* Need to let data flow immediately */ entry->cfe_flags |= CFEF_SENT_SOCK_ATTACHED | - CFEF_DATA_START; + CFEF_DATA_START; /* * Respect locking hierarchy @@ -1240,12 +1254,12 @@ cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) } (void) cfil_action_data_pass(so, cfil_info, kcunit, 1, - CFM_MAX_OFFSET, - CFM_MAX_OFFSET); + CFM_MAX_OFFSET, + CFM_MAX_OFFSET); (void) cfil_action_data_pass(so, cfil_info, kcunit, 0, - CFM_MAX_OFFSET, - CFM_MAX_OFFSET); + CFM_MAX_OFFSET, + CFM_MAX_OFFSET); cfil_rw_lock_exclusive(&cfil_lck_rw); @@ -1255,7 +1269,7 @@ cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) * cfil_acquire_sockbuf() */ if (entry->cfe_filter == NULL || - (so->so_cfil == NULL && cfil_db_get_cfil_info(so->so_cfil_db, sock_flow_id) == NULL)) { + (so->so_cfil == NULL && cfil_db_get_cfil_info(so->so_cfil_db, sock_flow_id) == NULL)) { goto release; } @@ -1265,11 +1279,11 @@ cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) cfil_info_log(LOG_DEBUG, cfil_info, "CFIL: LIFECYCLE: - FILTER DISCONNECTED"); #endif CFIL_LOG(LOG_NOTICE, "so %llx detached %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); if ((cfil_info->cfi_flags & CFIF_CLOSE_WAIT) && cfil_filters_attached(so) == 0) { CFIL_LOG(LOG_NOTICE, "so %llx waking", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); wakeup((caddr_t)cfil_info); } @@ -1300,15 +1314,16 @@ release: zfree(content_filter_zone, cfc); done: - if (error == 0) + if (error == 0) { OSIncrementAtomic(&cfil_stats.cfs_ctl_disconnect_ok); - else + } else { OSIncrementAtomic(&cfil_stats.cfs_ctl_disconnect_fail); + } CFIL_LOG(LOG_INFO, "return %d cfil_active_count %u kcunit %u", - error, cfil_active_count, kcunit); + error, cfil_active_count, kcunit); - return (error); + return error; } /* @@ -1334,11 +1349,12 @@ cfil_acquire_sockbuf(struct socket *so, struct cfil_info *cfil_info, int outgoin * filter threads have released the sockbuf */ while ((sb->sb_flags & SB_LOCK) || - (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp)) { - if (so->so_proto->pr_getlock != NULL) + (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp)) { + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); @@ -1346,7 +1362,7 @@ cfil_acquire_sockbuf(struct socket *so, struct cfil_info *cfil_info, int outgoin VERIFY(sb->sb_wantlock != 0); msleep(&sb->sb_flags, mutex_held, PSOCK, "cfil_acquire_sockbuf", - NULL); + NULL); VERIFY(sb->sb_wantlock != 0); sb->sb_wantlock--; @@ -1366,15 +1382,15 @@ cfil_acquire_sockbuf(struct socket *so, struct cfil_info *cfil_info, int outgoin /* We acquire the socket buffer when we need to cleanup */ if (cfil_info == NULL) { CFIL_LOG(LOG_ERR, "so %llx cfil detached", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = 0; } else if (cfil_info->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_ERR, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = EPIPE; } - return (error); + return error; } static void @@ -1385,16 +1401,18 @@ cfil_release_sockbuf(struct socket *so, int outgoing) socket_lock_assert_owned(so); - if (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp) + if (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp) { panic("%s sb_cfil_thread %p not current %p", __func__, - sb->sb_cfil_thread, tp); + sb->sb_cfil_thread, tp); + } /* * Don't panic if we are defunct because SB_LOCK has * been cleared by sodefunct() */ - if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) + if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) { panic("%s SB_LOCK not set on %p", __func__, - sb); + sb); + } /* * We can unlock when the thread unwinds to the last reference */ @@ -1403,31 +1421,33 @@ cfil_release_sockbuf(struct socket *so, int outgoing) sb->sb_cfil_thread = NULL; sb->sb_flags &= ~SB_LOCK; - if (sb->sb_wantlock > 0) + if (sb->sb_wantlock > 0) { wakeup(&sb->sb_flags); + } } } cfil_sock_id_t cfil_sock_id_from_socket(struct socket *so) { - if ((so->so_flags & SOF_CONTENT_FILTER) && so->so_cfil) - return (so->so_cfil->cfi_sock_id); - else - return (CFIL_SOCK_ID_NONE); + if ((so->so_flags & SOF_CONTENT_FILTER) && so->so_cfil) { + return so->so_cfil->cfi_sock_id; + } else { + return CFIL_SOCK_ID_NONE; + } } static bool cfil_socket_safe_lock(struct inpcb *inp) { - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { - socket_lock(inp->inp_socket, 1); - if (in_pcb_checkstate(inp, WNT_RELEASE, 1) != WNT_STOPUSING) { - return true; - } - socket_unlock(inp->inp_socket, 1); - } - return false; + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { + socket_lock(inp->inp_socket, 1); + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) != WNT_STOPUSING) { + return true; + } + socket_unlock(inp->inp_socket, 1); + } + return false; } static struct socket * @@ -1443,19 +1463,21 @@ cfil_socket_from_sock_id(cfil_sock_id_t cfil_sock_id, bool udp_only) CFIL_LOG(LOG_ERR, "CFIL: VERDICT: search for socket: id %llu gencnt %llx flowhash %x", cfil_sock_id, gencnt, flowhash); #endif - if (udp_only) + if (udp_only) { goto find_udp; + } pcbinfo = &tcbinfo; lck_rw_lock_shared(pcbinfo->ipi_lock); LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { if (inp->inp_state != INPCB_STATE_DEAD && - inp->inp_socket != NULL && - inp->inp_flowhash == flowhash && - (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt && - inp->inp_socket->so_cfil != NULL) { - if (cfil_socket_safe_lock(inp)) + inp->inp_socket != NULL && + inp->inp_flowhash == flowhash && + (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt && + inp->inp_socket->so_cfil != NULL) { + if (cfil_socket_safe_lock(inp)) { so = inp->inp_socket; + } break; } } @@ -1470,11 +1492,12 @@ find_udp: lck_rw_lock_shared(pcbinfo->ipi_lock); LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { if (inp->inp_state != INPCB_STATE_DEAD && - inp->inp_socket != NULL && - inp->inp_socket->so_cfil_db != NULL && - (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) { - if (cfil_socket_safe_lock(inp)) + inp->inp_socket != NULL && + inp->inp_socket->so_cfil_db != NULL && + (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) { + if (cfil_socket_safe_lock(inp)) { so = inp->inp_socket; + } break; } } @@ -1484,11 +1507,11 @@ done: if (so == NULL) { OSIncrementAtomic(&cfil_stats.cfs_sock_id_not_found); CFIL_LOG(LOG_DEBUG, - "no socket for sock_id %llx gencnt %llx flowhash %x", - cfil_sock_id, gencnt, flowhash); + "no socket for sock_id %llx gencnt %llx flowhash %x", + cfil_sock_id, gencnt, flowhash); } - return (so); + return so; } static struct socket * @@ -1501,11 +1524,12 @@ cfil_socket_from_client_uuid(uuid_t necp_client_uuid, bool *cfil_attached) lck_rw_lock_shared(pcbinfo->ipi_lock); LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { if (inp->inp_state != INPCB_STATE_DEAD && - inp->inp_socket != NULL && - uuid_compare(inp->necp_client_uuid, necp_client_uuid) == 0) { + inp->inp_socket != NULL && + uuid_compare(inp->necp_client_uuid, necp_client_uuid) == 0) { *cfil_attached = (inp->inp_socket->so_cfil != NULL); - if (cfil_socket_safe_lock(inp)) + if (cfil_socket_safe_lock(inp)) { so = inp->inp_socket; + } break; } } @@ -1518,26 +1542,27 @@ cfil_socket_from_client_uuid(uuid_t necp_client_uuid, bool *cfil_attached) lck_rw_lock_shared(pcbinfo->ipi_lock); LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { if (inp->inp_state != INPCB_STATE_DEAD && - inp->inp_socket != NULL && - uuid_compare(inp->necp_client_uuid, necp_client_uuid) == 0) { + inp->inp_socket != NULL && + uuid_compare(inp->necp_client_uuid, necp_client_uuid) == 0) { *cfil_attached = (inp->inp_socket->so_cfil_db != NULL); - if (cfil_socket_safe_lock(inp)) + if (cfil_socket_safe_lock(inp)) { so = inp->inp_socket; + } break; } } lck_rw_done(pcbinfo->ipi_lock); done: - return (so); + return so; } static errno_t cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, - int flags) + int flags) { #pragma unused(kctlref, flags) - errno_t error = 0; + errno_t error = 0; struct cfil_msg_hdr *msghdr; struct content_filter *cfc = (struct content_filter *)unitinfo; struct socket *so; @@ -1554,7 +1579,7 @@ cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, } if (kcunit > MAX_CONTENT_FILTER) { CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)", - kcunit, MAX_CONTENT_FILTER); + kcunit, MAX_CONTENT_FILTER); error = EINVAL; goto done; } @@ -1577,42 +1602,42 @@ cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, } /* Validate action operation */ switch (msghdr->cfm_op) { - case CFM_OP_DATA_UPDATE: - OSIncrementAtomic( - &cfil_stats.cfs_ctl_action_data_update); - break; - case CFM_OP_DROP: - OSIncrementAtomic(&cfil_stats.cfs_ctl_action_drop); - break; - case CFM_OP_BLESS_CLIENT: - if (msghdr->cfm_len != sizeof(struct cfil_msg_bless_client)) { - OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); - error = EINVAL; - CFIL_LOG(LOG_ERR, "bad len: %u for op %u", - msghdr->cfm_len, - msghdr->cfm_op); - goto done; - } - error = cfil_action_bless_client(kcunit, msghdr); - goto done; - default: - OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_op); - CFIL_LOG(LOG_ERR, "bad op %u", msghdr->cfm_op); + case CFM_OP_DATA_UPDATE: + OSIncrementAtomic( + &cfil_stats.cfs_ctl_action_data_update); + break; + case CFM_OP_DROP: + OSIncrementAtomic(&cfil_stats.cfs_ctl_action_drop); + break; + case CFM_OP_BLESS_CLIENT: + if (msghdr->cfm_len != sizeof(struct cfil_msg_bless_client)) { + OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); error = EINVAL; + CFIL_LOG(LOG_ERR, "bad len: %u for op %u", + msghdr->cfm_len, + msghdr->cfm_op); goto done; } - if (msghdr->cfm_len != sizeof(struct cfil_msg_action)) { - OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); - error = EINVAL; - CFIL_LOG(LOG_ERR, "bad len: %u for op %u", - msghdr->cfm_len, - msghdr->cfm_op); - goto done; - } + error = cfil_action_bless_client(kcunit, msghdr); + goto done; + default: + OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_op); + CFIL_LOG(LOG_ERR, "bad op %u", msghdr->cfm_op); + error = EINVAL; + goto done; + } + if (msghdr->cfm_len != sizeof(struct cfil_msg_action)) { + OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); + error = EINVAL; + CFIL_LOG(LOG_ERR, "bad len: %u for op %u", + msghdr->cfm_len, + msghdr->cfm_op); + goto done; + } cfil_rw_lock_shared(&cfil_lck_rw); if (cfc != (void *)content_filters[kcunit - 1]) { CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u", - kcunit); + kcunit); error = EINVAL; cfil_rw_unlock_shared(&cfil_lck_rw); goto done; @@ -1623,39 +1648,39 @@ cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, so = cfil_socket_from_sock_id(msghdr->cfm_sock_id, false); if (so == NULL) { CFIL_LOG(LOG_NOTICE, "bad sock_id %llx", - msghdr->cfm_sock_id); + msghdr->cfm_sock_id); error = EINVAL; goto done; } cfil_info = so->so_cfil_db != NULL ? - cfil_db_get_cfil_info(so->so_cfil_db, msghdr->cfm_sock_id) : so->so_cfil; + cfil_db_get_cfil_info(so->so_cfil_db, msghdr->cfm_sock_id) : so->so_cfil; if (cfil_info == NULL) { CFIL_LOG(LOG_NOTICE, "so %llx not attached", - (uint64_t)VM_KERNEL_ADDRPERM(so), msghdr->cfm_sock_id); + (uint64_t)VM_KERNEL_ADDRPERM(so), msghdr->cfm_sock_id); error = EINVAL; goto unlock; } else if (cfil_info->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_NOTICE, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = EINVAL; goto unlock; } entry = &cfil_info->cfi_entries[kcunit - 1]; if (entry->cfe_filter == NULL) { CFIL_LOG(LOG_NOTICE, "so %llx no filter", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = EINVAL; goto unlock; } - if (entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) + if (entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) { entry->cfe_flags |= CFEF_DATA_START; - else { + } else { CFIL_LOG(LOG_ERR, - "so %llx attached not sent for %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); + "so %llx attached not sent for %u", + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); error = EINVAL; goto unlock; } @@ -1666,60 +1691,66 @@ cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, action_msg = (struct cfil_msg_action *)msghdr; switch (msghdr->cfm_op) { - case CFM_OP_DATA_UPDATE: + case CFM_OP_DATA_UPDATE: #if VERDICT_DEBUG - CFIL_LOG(LOG_ERR, "CFIL: VERDICT RECEIVED: ", - (uint64_t)VM_KERNEL_ADDRPERM(so), - cfil_info->cfi_sock_id, - action_msg->cfa_in_peek_offset, action_msg->cfa_in_pass_offset, - action_msg->cfa_out_peek_offset, action_msg->cfa_out_pass_offset); + CFIL_LOG(LOG_ERR, "CFIL: VERDICT RECEIVED: ", + (uint64_t)VM_KERNEL_ADDRPERM(so), + cfil_info->cfi_sock_id, + action_msg->cfa_in_peek_offset, action_msg->cfa_in_pass_offset, + action_msg->cfa_out_peek_offset, action_msg->cfa_out_pass_offset); #endif - if (action_msg->cfa_out_peek_offset != 0 || - action_msg->cfa_out_pass_offset != 0) - error = cfil_action_data_pass(so, cfil_info, kcunit, 1, - action_msg->cfa_out_pass_offset, - action_msg->cfa_out_peek_offset); - if (error == EJUSTRETURN) - error = 0; - if (error != 0) - break; - if (action_msg->cfa_in_peek_offset != 0 || - action_msg->cfa_in_pass_offset != 0) - error = cfil_action_data_pass(so, cfil_info, kcunit, 0, - action_msg->cfa_in_pass_offset, - action_msg->cfa_in_peek_offset); - if (error == EJUSTRETURN) - error = 0; + if (action_msg->cfa_out_peek_offset != 0 || + action_msg->cfa_out_pass_offset != 0) { + error = cfil_action_data_pass(so, cfil_info, kcunit, 1, + action_msg->cfa_out_pass_offset, + action_msg->cfa_out_peek_offset); + } + if (error == EJUSTRETURN) { + error = 0; + } + if (error != 0) { break; + } + if (action_msg->cfa_in_peek_offset != 0 || + action_msg->cfa_in_pass_offset != 0) { + error = cfil_action_data_pass(so, cfil_info, kcunit, 0, + action_msg->cfa_in_pass_offset, + action_msg->cfa_in_peek_offset); + } + if (error == EJUSTRETURN) { + error = 0; + } + break; - case CFM_OP_DROP: - error = cfil_action_drop(so, cfil_info, kcunit); - break; + case CFM_OP_DROP: + error = cfil_action_drop(so, cfil_info, kcunit); + break; - default: - error = EINVAL; - break; + default: + error = EINVAL; + break; } unlock: socket_unlock(so, 1); done: mbuf_freem(m); - if (error == 0) + if (error == 0) { OSIncrementAtomic(&cfil_stats.cfs_ctl_send_ok); - else + } else { OSIncrementAtomic(&cfil_stats.cfs_ctl_send_bad); + } - return (error); + return error; } static errno_t cfil_ctl_getopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, - int opt, void *data, size_t *len) + int opt, void *data, size_t *len) { #pragma unused(kctlref, opt) struct cfil_info *cfil_info = NULL; - errno_t error = 0; + errno_t error = 0; struct content_filter *cfc = (struct content_filter *)unitinfo; CFIL_LOG(LOG_NOTICE, ""); @@ -1733,138 +1764,138 @@ cfil_ctl_getopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, } if (kcunit > MAX_CONTENT_FILTER) { CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)", - kcunit, MAX_CONTENT_FILTER); + kcunit, MAX_CONTENT_FILTER); error = EINVAL; goto done; } if (cfc != (void *)content_filters[kcunit - 1]) { CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u", - kcunit); + kcunit); error = EINVAL; goto done; } switch (opt) { - case CFIL_OPT_NECP_CONTROL_UNIT: - if (*len < sizeof(uint32_t)) { - CFIL_LOG(LOG_ERR, "len too small %lu", *len); - error = EINVAL; - goto done; - } - if (data != NULL) { - *(uint32_t *)data = cfc->cf_necp_control_unit; - } - break; - case CFIL_OPT_GET_SOCKET_INFO: - if (*len != sizeof(struct cfil_opt_sock_info)) { - CFIL_LOG(LOG_ERR, "len does not match %lu", *len); - error = EINVAL; - goto done; - } - if (data == NULL) { - CFIL_LOG(LOG_ERR, "data not passed"); - error = EINVAL; - goto done; - } + case CFIL_OPT_NECP_CONTROL_UNIT: + if (*len < sizeof(uint32_t)) { + CFIL_LOG(LOG_ERR, "len too small %lu", *len); + error = EINVAL; + goto done; + } + if (data != NULL) { + *(uint32_t *)data = cfc->cf_necp_control_unit; + } + break; + case CFIL_OPT_GET_SOCKET_INFO: + if (*len != sizeof(struct cfil_opt_sock_info)) { + CFIL_LOG(LOG_ERR, "len does not match %lu", *len); + error = EINVAL; + goto done; + } + if (data == NULL) { + CFIL_LOG(LOG_ERR, "data not passed"); + error = EINVAL; + goto done; + } - struct cfil_opt_sock_info *sock_info = - (struct cfil_opt_sock_info *) data; + struct cfil_opt_sock_info *sock_info = + (struct cfil_opt_sock_info *) data; - // Unlock here so that we never hold both cfil_lck_rw and the - // socket_lock at the same time. Otherwise, this can deadlock - // because soclose() takes the socket_lock and then exclusive - // cfil_lck_rw and we require the opposite order. + // Unlock here so that we never hold both cfil_lck_rw and the + // socket_lock at the same time. Otherwise, this can deadlock + // because soclose() takes the socket_lock and then exclusive + // cfil_lck_rw and we require the opposite order. - // WARNING: Be sure to never use anything protected - // by cfil_lck_rw beyond this point. - // WARNING: Be sure to avoid fallthrough and - // goto return_already_unlocked from this branch. - cfil_rw_unlock_shared(&cfil_lck_rw); + // WARNING: Be sure to never use anything protected + // by cfil_lck_rw beyond this point. + // WARNING: Be sure to avoid fallthrough and + // goto return_already_unlocked from this branch. + cfil_rw_unlock_shared(&cfil_lck_rw); - // Search (TCP+UDP) and lock socket - struct socket *sock = - cfil_socket_from_sock_id(sock_info->cfs_sock_id, false); - if (sock == NULL) { + // Search (TCP+UDP) and lock socket + struct socket *sock = + cfil_socket_from_sock_id(sock_info->cfs_sock_id, false); + if (sock == NULL) { #if LIFECYCLE_DEBUG - CFIL_LOG(LOG_ERR, "CFIL: GET_SOCKET_INFO failed: bad sock_id %llu", - sock_info->cfs_sock_id); + CFIL_LOG(LOG_ERR, "CFIL: GET_SOCKET_INFO failed: bad sock_id %llu", + sock_info->cfs_sock_id); #endif - error = ENOENT; - goto return_already_unlocked; - } + error = ENOENT; + goto return_already_unlocked; + } - cfil_info = (sock->so_cfil_db != NULL) ? - cfil_db_get_cfil_info(sock->so_cfil_db, sock_info->cfs_sock_id) : sock->so_cfil; + cfil_info = (sock->so_cfil_db != NULL) ? + cfil_db_get_cfil_info(sock->so_cfil_db, sock_info->cfs_sock_id) : sock->so_cfil; - if (cfil_info == NULL) { + if (cfil_info == NULL) { #if LIFECYCLE_DEBUG - CFIL_LOG(LOG_ERR, "CFIL: GET_SOCKET_INFO failed: so %llx not attached, cannot fetch info", - (uint64_t)VM_KERNEL_ADDRPERM(sock)); + CFIL_LOG(LOG_ERR, "CFIL: GET_SOCKET_INFO failed: so %llx not attached, cannot fetch info", + (uint64_t)VM_KERNEL_ADDRPERM(sock)); #endif - error = EINVAL; - socket_unlock(sock, 1); - goto return_already_unlocked; - } + error = EINVAL; + socket_unlock(sock, 1); + goto return_already_unlocked; + } - // Fill out family, type, and protocol - sock_info->cfs_sock_family = sock->so_proto->pr_domain->dom_family; - sock_info->cfs_sock_type = sock->so_proto->pr_type; - sock_info->cfs_sock_protocol = sock->so_proto->pr_protocol; - - // Source and destination addresses - struct inpcb *inp = sotoinpcb(sock); - if (inp->inp_vflag & INP_IPV6) { - struct in6_addr *laddr = NULL, *faddr = NULL; - u_int16_t lport = 0, fport = 0; - - cfil_get_flow_address_v6(cfil_info->cfi_hash_entry, inp, - &laddr, &faddr, &lport, &fport); - fill_ip6_sockaddr_4_6(&sock_info->cfs_local, laddr, lport); - fill_ip6_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport); - } else if (inp->inp_vflag & INP_IPV4) { - struct in_addr laddr = {0}, faddr = {0}; - u_int16_t lport = 0, fport = 0; - - cfil_get_flow_address(cfil_info->cfi_hash_entry, inp, - &laddr, &faddr, &lport, &fport); - fill_ip_sockaddr_4_6(&sock_info->cfs_local, laddr, lport); - fill_ip_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport); - } + // Fill out family, type, and protocol + sock_info->cfs_sock_family = sock->so_proto->pr_domain->dom_family; + sock_info->cfs_sock_type = sock->so_proto->pr_type; + sock_info->cfs_sock_protocol = sock->so_proto->pr_protocol; + + // Source and destination addresses + struct inpcb *inp = sotoinpcb(sock); + if (inp->inp_vflag & INP_IPV6) { + struct in6_addr *laddr = NULL, *faddr = NULL; + u_int16_t lport = 0, fport = 0; + + cfil_get_flow_address_v6(cfil_info->cfi_hash_entry, inp, + &laddr, &faddr, &lport, &fport); + fill_ip6_sockaddr_4_6(&sock_info->cfs_local, laddr, lport); + fill_ip6_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport); + } else if (inp->inp_vflag & INP_IPV4) { + struct in_addr laddr = {0}, faddr = {0}; + u_int16_t lport = 0, fport = 0; + + cfil_get_flow_address(cfil_info->cfi_hash_entry, inp, + &laddr, &faddr, &lport, &fport); + fill_ip_sockaddr_4_6(&sock_info->cfs_local, laddr, lport); + fill_ip_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport); + } - // Set the pid info - sock_info->cfs_pid = sock->last_pid; - memcpy(sock_info->cfs_uuid, sock->last_uuid, sizeof(uuid_t)); + // Set the pid info + sock_info->cfs_pid = sock->last_pid; + memcpy(sock_info->cfs_uuid, sock->last_uuid, sizeof(uuid_t)); - if (sock->so_flags & SOF_DELEGATED) { - sock_info->cfs_e_pid = sock->e_pid; - memcpy(sock_info->cfs_e_uuid, sock->e_uuid, sizeof(uuid_t)); - } else { - sock_info->cfs_e_pid = sock->last_pid; - memcpy(sock_info->cfs_e_uuid, sock->last_uuid, sizeof(uuid_t)); - } + if (sock->so_flags & SOF_DELEGATED) { + sock_info->cfs_e_pid = sock->e_pid; + memcpy(sock_info->cfs_e_uuid, sock->e_uuid, sizeof(uuid_t)); + } else { + sock_info->cfs_e_pid = sock->last_pid; + memcpy(sock_info->cfs_e_uuid, sock->last_uuid, sizeof(uuid_t)); + } - socket_unlock(sock, 1); + socket_unlock(sock, 1); - goto return_already_unlocked; - default: - error = ENOPROTOOPT; - break; + goto return_already_unlocked; + default: + error = ENOPROTOOPT; + break; } done: cfil_rw_unlock_shared(&cfil_lck_rw); - return (error); + return error; -return_already_unlocked: +return_already_unlocked: - return (error); + return error; } static errno_t cfil_ctl_setopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, - int opt, void *data, size_t len) + int opt, void *data, size_t len) { #pragma unused(kctlref, opt) - errno_t error = 0; + errno_t error = 0; struct content_filter *cfc = (struct content_filter *)unitinfo; CFIL_LOG(LOG_NOTICE, ""); @@ -1878,41 +1909,41 @@ cfil_ctl_setopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, } if (kcunit > MAX_CONTENT_FILTER) { CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)", - kcunit, MAX_CONTENT_FILTER); + kcunit, MAX_CONTENT_FILTER); error = EINVAL; goto done; } if (cfc != (void *)content_filters[kcunit - 1]) { CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u", - kcunit); + kcunit); error = EINVAL; goto done; } switch (opt) { - case CFIL_OPT_NECP_CONTROL_UNIT: - if (len < sizeof(uint32_t)) { - CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (cfc->cf_necp_control_unit != 0) { - CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT " - "already set %u", - cfc->cf_necp_control_unit); - error = EINVAL; - goto done; - } - cfc->cf_necp_control_unit = *(uint32_t *)data; - break; - default: - error = ENOPROTOOPT; - break; + case CFIL_OPT_NECP_CONTROL_UNIT: + if (len < sizeof(uint32_t)) { + CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (cfc->cf_necp_control_unit != 0) { + CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT " + "already set %u", + cfc->cf_necp_control_unit); + error = EINVAL; + goto done; + } + cfc->cf_necp_control_unit = *(uint32_t *)data; + break; + default: + error = ENOPROTOOPT; + break; } done: cfil_rw_unlock_exclusive(&cfil_lck_rw); - return (error); + return error; } @@ -1935,23 +1966,24 @@ cfil_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, int flags) } if (kcunit > MAX_CONTENT_FILTER) { CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)", - kcunit, MAX_CONTENT_FILTER); + kcunit, MAX_CONTENT_FILTER); OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad); return; } cfil_rw_lock_shared(&cfil_lck_rw); if (cfc != (void *)content_filters[kcunit - 1]) { CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u", - kcunit); + kcunit); OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad); goto done; } /* Let's assume the flow control is lifted */ if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { - if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) + if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) { cfil_rw_lock_exclusive(&cfil_lck_rw); + } - cfc->cf_flags &= ~CFF_FLOW_CONTROLLED; + cfc->cf_flags &= ~CFF_FLOW_CONTROLLED; cfil_rw_lock_exclusive_to_shared(&cfil_lck_rw); LCK_RW_ASSERT(&cfil_lck_rw, LCK_RW_ASSERT_SHARED); @@ -1968,13 +2000,16 @@ cfil_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, int flags) /* Find an entry that is flow controlled */ TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) { if (entry->cfe_cfil_info == NULL || - entry->cfe_cfil_info->cfi_so == NULL) + entry->cfe_cfil_info->cfi_so == NULL) { continue; - if ((entry->cfe_flags & CFEF_FLOW_CONTROLLED) == 0) + } + if ((entry->cfe_flags & CFEF_FLOW_CONTROLLED) == 0) { continue; + } } - if (entry == NULL) + if (entry == NULL) { break; + } OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_flow_lift); @@ -1986,15 +2021,18 @@ cfil_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, int flags) do { error = cfil_acquire_sockbuf(so, cfil_info, 1); - if (error == 0) + if (error == 0) { error = cfil_data_service_ctl_q(so, cfil_info, kcunit, 1); + } cfil_release_sockbuf(so, 1); - if (error != 0) + if (error != 0) { break; + } error = cfil_acquire_sockbuf(so, cfil_info, 0); - if (error == 0) + if (error == 0) { error = cfil_data_service_ctl_q(so, cfil_info, kcunit, 0); + } cfil_release_sockbuf(so, 0); } while (0); @@ -2011,12 +2049,12 @@ void cfil_init(void) { struct kern_ctl_reg kern_ctl; - errno_t error = 0; - vm_size_t content_filter_size = 0; /* size of content_filter */ - vm_size_t cfil_info_size = 0; /* size of cfil_info */ - vm_size_t cfil_hash_entry_size = 0; /* size of cfil_hash_entry */ - vm_size_t cfil_db_size = 0; /* size of cfil_db */ - unsigned int mbuf_limit = 0; + errno_t error = 0; + vm_size_t content_filter_size = 0; /* size of content_filter */ + vm_size_t cfil_info_size = 0; /* size of cfil_info */ + vm_size_t cfil_hash_entry_size = 0; /* size of cfil_hash_entry */ + vm_size_t cfil_db_size = 0; /* size of cfil_db */ + unsigned int mbuf_limit = 0; CFIL_LOG(LOG_NOTICE, ""); @@ -2032,39 +2070,39 @@ cfil_init(void) * Runtime time verifications */ VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_in_enqueued, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_out_enqueued, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_in_peeked, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_out_peeked, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_pending_q_in_enqueued, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_pending_q_out_enqueued, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_in_enqueued, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_enqueued, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_in_passed, - sizeof(uint32_t))); + sizeof(uint32_t))); VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_passed, - sizeof(uint32_t))); + sizeof(uint32_t))); /* * Zone for content filters kernel control sockets */ content_filter_size = sizeof(struct content_filter); content_filter_zone = zinit(content_filter_size, - CONTENT_FILTER_ZONE_MAX * content_filter_size, - 0, - CONTENT_FILTER_ZONE_NAME); + CONTENT_FILTER_ZONE_MAX * content_filter_size, + 0, + CONTENT_FILTER_ZONE_NAME); if (content_filter_zone == NULL) { panic("%s: zinit(%s) failed", __func__, - CONTENT_FILTER_ZONE_NAME); + CONTENT_FILTER_ZONE_NAME); /* NOTREACHED */ } zone_change(content_filter_zone, Z_CALLERACCT, FALSE); @@ -2075,9 +2113,9 @@ cfil_init(void) */ cfil_info_size = sizeof(struct cfil_info); cfil_info_zone = zinit(cfil_info_size, - CFIL_INFO_ZONE_MAX * cfil_info_size, - 0, - CFIL_INFO_ZONE_NAME); + CFIL_INFO_ZONE_MAX * cfil_info_size, + 0, + CFIL_INFO_ZONE_NAME); if (cfil_info_zone == NULL) { panic("%s: zinit(%s) failed", __func__, CFIL_INFO_ZONE_NAME); /* NOTREACHED */ @@ -2085,33 +2123,33 @@ cfil_init(void) zone_change(cfil_info_zone, Z_CALLERACCT, FALSE); zone_change(cfil_info_zone, Z_EXPAND, TRUE); - /* - * Zone for content filters cfil hash entries and db - */ - cfil_hash_entry_size = sizeof(struct cfil_hash_entry); - cfil_hash_entry_zone = zinit(cfil_hash_entry_size, - CFIL_HASH_ENTRY_ZONE_MAX * cfil_hash_entry_size, - 0, - CFIL_HASH_ENTRY_ZONE_NAME); - if (cfil_hash_entry_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, CFIL_HASH_ENTRY_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(cfil_hash_entry_zone, Z_CALLERACCT, FALSE); - zone_change(cfil_hash_entry_zone, Z_EXPAND, TRUE); - - cfil_db_size = sizeof(struct cfil_db); - cfil_db_zone = zinit(cfil_db_size, - CFIL_DB_ZONE_MAX * cfil_db_size, - 0, - CFIL_DB_ZONE_NAME); - if (cfil_db_zone == NULL) { - panic("%s: zinit(%s) failed", __func__, CFIL_DB_ZONE_NAME); - /* NOTREACHED */ - } - zone_change(cfil_db_zone, Z_CALLERACCT, FALSE); - zone_change(cfil_db_zone, Z_EXPAND, TRUE); - + /* + * Zone for content filters cfil hash entries and db + */ + cfil_hash_entry_size = sizeof(struct cfil_hash_entry); + cfil_hash_entry_zone = zinit(cfil_hash_entry_size, + CFIL_HASH_ENTRY_ZONE_MAX * cfil_hash_entry_size, + 0, + CFIL_HASH_ENTRY_ZONE_NAME); + if (cfil_hash_entry_zone == NULL) { + panic("%s: zinit(%s) failed", __func__, CFIL_HASH_ENTRY_ZONE_NAME); + /* NOTREACHED */ + } + zone_change(cfil_hash_entry_zone, Z_CALLERACCT, FALSE); + zone_change(cfil_hash_entry_zone, Z_EXPAND, TRUE); + + cfil_db_size = sizeof(struct cfil_db); + cfil_db_zone = zinit(cfil_db_size, + CFIL_DB_ZONE_MAX * cfil_db_size, + 0, + CFIL_DB_ZONE_NAME); + if (cfil_db_zone == NULL) { + panic("%s: zinit(%s) failed", __func__, CFIL_DB_ZONE_NAME); + /* NOTREACHED */ + } + zone_change(cfil_db_zone, Z_CALLERACCT, FALSE); + zone_change(cfil_db_zone, Z_EXPAND, TRUE); + /* * Allocate locks */ @@ -2121,7 +2159,7 @@ cfil_init(void) /* NOTREACHED */ } cfil_lck_grp = lck_grp_alloc_init("content filter", - cfil_lck_grp_attr); + cfil_lck_grp_attr); if (cfil_lck_grp == NULL) { panic("%s: lck_grp_alloc_init failed", __func__); /* NOTREACHED */ @@ -2140,7 +2178,7 @@ cfil_init(void) */ bzero(&kern_ctl, sizeof(kern_ctl)); strlcpy(kern_ctl.ctl_name, CONTENT_FILTER_CONTROL_NAME, - sizeof(kern_ctl.ctl_name)); + sizeof(kern_ctl.ctl_name)); kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_EXTENDED; kern_ctl.ctl_sendsize = 512 * 1024; /* enough? */ kern_ctl.ctl_recvsize = 512 * 1024; /* enough? */ @@ -2158,7 +2196,7 @@ cfil_init(void) // Spawn thread for gargage collection if (kernel_thread_start(cfil_udp_gc_thread_func, NULL, - &cfil_udp_gc_thread) != KERN_SUCCESS) { + &cfil_udp_gc_thread) != KERN_SUCCESS) { panic_plain("%s: Can't create UDP GC thread", __func__); /* NOTREACHED */ } @@ -2183,8 +2221,9 @@ cfil_info_alloc(struct socket *so, struct cfil_hash_entry *hash_entry) socket_lock_assert_owned(so); cfil_info = zalloc(cfil_info_zone); - if (cfil_info == NULL) + if (cfil_info == NULL) { goto done; + } bzero(cfil_info, sizeof(struct cfil_info)); cfil_queue_init(&cfil_info->cfi_snd.cfi_inject_q); @@ -2219,28 +2258,29 @@ cfil_info_alloc(struct socket *so, struct cfil_hash_entry *hash_entry) } cfil_rw_lock_exclusive(&cfil_lck_rw); - + /* * Create a cfi_sock_id that's not the socket pointer! */ - - if (hash_entry == NULL) { + + if (hash_entry == NULL) { // This is the TCP case, cfil_info is tracked per socket - if (inp->inp_flowhash == 0) - inp->inp_flowhash = inp_calc_flowhash(inp); - - so->so_cfil = cfil_info; - cfil_info->cfi_so = so; - cfil_info->cfi_sock_id = - ((so->so_gencnt << 32) | inp->inp_flowhash); - } else { - // This is the UDP case, cfil_info is tracked in per-socket hash + if (inp->inp_flowhash == 0) { + inp->inp_flowhash = inp_calc_flowhash(inp); + } + + so->so_cfil = cfil_info; + cfil_info->cfi_so = so; + cfil_info->cfi_sock_id = + ((so->so_gencnt << 32) | inp->inp_flowhash); + } else { + // This is the UDP case, cfil_info is tracked in per-socket hash cfil_info->cfi_so = so; - hash_entry->cfentry_cfil = cfil_info; + hash_entry->cfentry_cfil = cfil_info; cfil_info->cfi_hash_entry = hash_entry; - cfil_info->cfi_sock_id = ((so->so_gencnt << 32) | (hash_entry->cfentry_flowhash & 0xffffffff)); + cfil_info->cfi_sock_id = ((so->so_gencnt << 32) | (hash_entry->cfentry_flowhash & 0xffffffff)); CFIL_LOG(LOG_DEBUG, "CFIL: UDP inp_flowhash %x so_gencnt %llx entry flowhash %x sockID %llx", - inp->inp_flowhash, so->so_gencnt, hash_entry->cfentry_flowhash, cfil_info->cfi_sock_id); + inp->inp_flowhash, so->so_gencnt, hash_entry->cfentry_flowhash, cfil_info->cfi_sock_id); // Wake up gc thread if this is first flow added if (cfil_sock_udp_attached_count == 0) { @@ -2248,7 +2288,7 @@ cfil_info_alloc(struct socket *so, struct cfil_hash_entry *hash_entry) } cfil_sock_udp_attached_count++; - } + } TAILQ_INSERT_TAIL(&cfil_sock_head, cfil_info, cfi_link); @@ -2257,12 +2297,13 @@ cfil_info_alloc(struct socket *so, struct cfil_hash_entry *hash_entry) cfil_rw_unlock_exclusive(&cfil_lck_rw); done: - if (cfil_info != NULL) + if (cfil_info != NULL) { OSIncrementAtomic(&cfil_stats.cfs_cfi_alloc_ok); - else + } else { OSIncrementAtomic(&cfil_stats.cfs_cfi_alloc_fail); + } - return (cfil_info); + return cfil_info; } int @@ -2278,15 +2319,17 @@ cfil_info_attach_unit(struct socket *so, uint32_t filter_control_unit, struct cf cfil_rw_lock_exclusive(&cfil_lck_rw); for (kcunit = 1; - content_filters != NULL && kcunit <= MAX_CONTENT_FILTER; - kcunit++) { + content_filters != NULL && kcunit <= MAX_CONTENT_FILTER; + kcunit++) { struct content_filter *cfc = content_filters[kcunit - 1]; struct cfil_entry *entry; - if (cfc == NULL) + if (cfc == NULL) { continue; - if (cfc->cf_necp_control_unit != filter_control_unit) + } + if (cfc->cf_necp_control_unit != filter_control_unit) { continue; + } entry = &cfil_info->cfi_entries[kcunit - 1]; @@ -2302,7 +2345,7 @@ cfil_info_attach_unit(struct socket *so, uint32_t filter_control_unit, struct cf cfil_rw_unlock_exclusive(&cfil_lck_rw); - return (attached); + return attached; } static void @@ -2312,24 +2355,26 @@ cfil_info_free(struct cfil_info *cfil_info) uint64_t in_drain = 0; uint64_t out_drained = 0; - if (cfil_info == NULL) + if (cfil_info == NULL) { return; + } CFIL_LOG(LOG_INFO, ""); cfil_rw_lock_exclusive(&cfil_lck_rw); for (kcunit = 1; - content_filters != NULL && kcunit <= MAX_CONTENT_FILTER; - kcunit++) { + content_filters != NULL && kcunit <= MAX_CONTENT_FILTER; + kcunit++) { struct cfil_entry *entry; struct content_filter *cfc; entry = &cfil_info->cfi_entries[kcunit - 1]; /* Don't be silly and try to detach twice */ - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { continue; + } cfc = content_filters[kcunit - 1]; @@ -2342,8 +2387,9 @@ cfil_info_free(struct cfil_info *cfil_info) verify_content_filter(cfc); } - if (cfil_info->cfi_hash_entry != NULL) + if (cfil_info->cfi_hash_entry != NULL) { cfil_sock_udp_attached_count--; + } cfil_sock_attached_count--; TAILQ_REMOVE(&cfil_sock_head, cfil_info, cfi_link); @@ -2361,10 +2407,12 @@ cfil_info_free(struct cfil_info *cfil_info) } cfil_rw_unlock_exclusive(&cfil_lck_rw); - if (out_drained) + if (out_drained) { OSIncrementAtomic(&cfil_stats.cfs_flush_out_free); - if (in_drain) + } + if (in_drain) { OSIncrementAtomic(&cfil_stats.cfs_flush_in_free); + } zfree(cfil_info_zone, cfil_info); } @@ -2383,16 +2431,18 @@ cfil_sock_attach(struct socket *so) /* Limit ourselves to TCP that are not MPTCP subflows */ if ((so->so_proto->pr_domain->dom_family != PF_INET && - so->so_proto->pr_domain->dom_family != PF_INET6) || - so->so_proto->pr_type != SOCK_STREAM || - so->so_proto->pr_protocol != IPPROTO_TCP || - (so->so_flags & SOF_MP_SUBFLOW) != 0 || - (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0) + so->so_proto->pr_domain->dom_family != PF_INET6) || + so->so_proto->pr_type != SOCK_STREAM || + so->so_proto->pr_protocol != IPPROTO_TCP || + (so->so_flags & SOF_MP_SUBFLOW) != 0 || + (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0) { goto done; + } filter_control_unit = necp_socket_get_content_filter_control_unit(so); - if (filter_control_unit == 0) + if (filter_control_unit == 0) { goto done; + } if ((filter_control_unit & NECP_MASK_USERSPACE_ONLY) != 0) { OSIncrementAtomic(&cfil_stats.cfs_sock_userspace_only); @@ -2415,13 +2465,13 @@ cfil_sock_attach(struct socket *so) } if (cfil_info_attach_unit(so, filter_control_unit, so->so_cfil) == 0) { CFIL_LOG(LOG_ERR, "cfil_info_attach_unit(%u) failed", - filter_control_unit); + filter_control_unit); OSIncrementAtomic(&cfil_stats.cfs_sock_attach_failed); goto done; } CFIL_LOG(LOG_INFO, "so %llx filter_control_unit %u sockID %llx", - (uint64_t)VM_KERNEL_ADDRPERM(so), - filter_control_unit, so->so_cfil->cfi_sock_id); + (uint64_t)VM_KERNEL_ADDRPERM(so), + filter_control_unit, so->so_cfil->cfi_sock_id); so->so_flags |= SOF_CONTENT_FILTER; OSIncrementAtomic(&cfil_stats.cfs_sock_attached); @@ -2431,14 +2481,15 @@ cfil_sock_attach(struct socket *so) error = cfil_dispatch_attach_event(so, so->so_cfil, filter_control_unit); /* We can recover from flow control or out of memory errors */ - if (error == ENOBUFS || error == ENOMEM) + if (error == ENOBUFS || error == ENOMEM) { error = 0; - else if (error != 0) + } else if (error != 0) { goto done; + } CFIL_INFO_VERIFY(so->so_cfil); done: - return (error); + return error; } /* @@ -2450,7 +2501,7 @@ cfil_sock_detach(struct socket *so) { if (IS_UDP(so)) { cfil_db_free(so); - return (0); + return 0; } if (so->so_cfil) { @@ -2463,7 +2514,7 @@ cfil_sock_detach(struct socket *so) so->so_cfil = NULL; OSIncrementAtomic(&cfil_stats.cfs_sock_detached); } - return (0); + return 0; } static int @@ -2489,27 +2540,32 @@ cfil_dispatch_attach_event(struct socket *so, struct cfil_info *cfil_info, uint3 for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { cfc = content_filters[kcunit - 1]; - if (cfc == NULL) + if (cfc == NULL) { continue; - if (cfc->cf_necp_control_unit != filter_control_unit) + } + if (cfc->cf_necp_control_unit != filter_control_unit) { continue; + } entry = &cfil_info->cfi_entries[kcunit - 1]; - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { continue; + } VERIFY(cfc == entry->cfe_filter); break; } - if (entry == NULL || entry->cfe_filter == NULL) + if (entry == NULL || entry->cfe_filter == NULL) { goto done; + } - if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED)) + if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED)) { goto done; + } CFIL_LOG(LOG_INFO, "so %llx filter_control_unit %u kcunit %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), filter_control_unit, kcunit); + (uint64_t)VM_KERNEL_ADDRPERM(so), filter_control_unit, kcunit); /* Would be wasteful to try when flow controlled */ if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { @@ -2539,14 +2595,14 @@ cfil_dispatch_attach_event(struct socket *so, struct cfil_info *cfil_info, uint3 #if LIFECYCLE_DEBUG CFIL_LOG(LOG_DEBUG, "CFIL: LIFECYCLE: SENDING ATTACH UP ", - entry->cfe_cfil_info->cfi_sock_id); + entry->cfe_cfil_info->cfi_sock_id); #endif error = ctl_enqueuedata(entry->cfe_filter->cf_kcref, - entry->cfe_filter->cf_kcunit, - &msg_attached, - sizeof(struct cfil_msg_sock_attached), - CTL_DATA_EOR); + entry->cfe_filter->cf_kcunit, + &msg_attached, + sizeof(struct cfil_msg_sock_attached), + CTL_DATA_EOR); if (error != 0) { CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed: %d", error); goto done; @@ -2564,19 +2620,21 @@ done: entry->cfe_flags |= CFEF_FLOW_CONTROLLED; OSIncrementAtomic(&cfil_stats.cfs_attach_event_flow_control); - if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) + if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) { cfil_rw_lock_exclusive(&cfil_lck_rw); + } cfc->cf_flags |= CFF_FLOW_CONTROLLED; cfil_rw_unlock_exclusive(&cfil_lck_rw); } else { - if (error != 0) + if (error != 0) { OSIncrementAtomic(&cfil_stats.cfs_attach_event_fail); + } cfil_rw_unlock_shared(&cfil_lck_rw); } - return (error); + return error; } static int @@ -2594,25 +2652,27 @@ cfil_dispatch_disconnect_event(struct socket *so, struct cfil_info *cfil_info, u cfil_rw_lock_shared(&cfil_lck_rw); entry = &cfil_info->cfi_entries[kcunit - 1]; - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } cfc = entry->cfe_filter; - if (cfc == NULL) + if (cfc == NULL) { goto done; + } CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); /* * Send the disconnection event once */ if ((outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_OUT)) || - (!outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_IN))) { + (!outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_IN))) { CFIL_LOG(LOG_INFO, "so %llx disconnect already sent", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); goto done; } @@ -2622,7 +2682,7 @@ cfil_dispatch_disconnect_event(struct socket *so, struct cfil_info *cfil_info, u */ if (outgoing && cfil_queue_empty(&entrybuf->cfe_ctl_q) == 0) { CFIL_LOG(LOG_INFO, "so %llx control queue not empty", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = EBUSY; goto done; } @@ -2634,8 +2694,8 @@ cfil_dispatch_disconnect_event(struct socket *so, struct cfil_info *cfil_info, u #if LIFECYCLE_DEBUG cfil_info_log(LOG_ERR, cfil_info, outgoing ? - "CFIL: LIFECYCLE: OUT - SENDING DISCONNECT UP": - "CFIL: LIFECYCLE: IN - SENDING DISCONNECT UP"); + "CFIL: LIFECYCLE: OUT - SENDING DISCONNECT UP": + "CFIL: LIFECYCLE: IN - SENDING DISCONNECT UP"); #endif bzero(&msg_disconnected, sizeof(struct cfil_msg_hdr)); @@ -2643,13 +2703,13 @@ cfil_dispatch_disconnect_event(struct socket *so, struct cfil_info *cfil_info, u msg_disconnected.cfm_version = CFM_VERSION_CURRENT; msg_disconnected.cfm_type = CFM_TYPE_EVENT; msg_disconnected.cfm_op = outgoing ? CFM_OP_DISCONNECT_OUT : - CFM_OP_DISCONNECT_IN; + CFM_OP_DISCONNECT_IN; msg_disconnected.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id; error = ctl_enqueuedata(entry->cfe_filter->cf_kcref, - entry->cfe_filter->cf_kcunit, - &msg_disconnected, - sizeof(struct cfil_msg_hdr), - CTL_DATA_EOR); + entry->cfe_filter->cf_kcunit, + &msg_disconnected, + sizeof(struct cfil_msg_hdr), + CTL_DATA_EOR); if (error != 0) { CFIL_LOG(LOG_ERR, "ctl_enqueuembuf() failed: %d", error); mbuf_freem(msg); @@ -2672,20 +2732,22 @@ done: OSIncrementAtomic( &cfil_stats.cfs_disconnect_event_flow_control); - if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) + if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) { cfil_rw_lock_exclusive(&cfil_lck_rw); + } cfc->cf_flags |= CFF_FLOW_CONTROLLED; cfil_rw_unlock_exclusive(&cfil_lck_rw); } else { - if (error != 0) + if (error != 0) { OSIncrementAtomic( &cfil_stats.cfs_disconnect_event_fail); + } cfil_rw_unlock_shared(&cfil_lck_rw); } - return (error); + return error; } int @@ -2702,11 +2764,12 @@ cfil_dispatch_closed_event(struct socket *so, struct cfil_info *cfil_info, int k entry = &cfil_info->cfi_entries[kcunit - 1]; cfc = entry->cfe_filter; - if (cfc == NULL) + if (cfc == NULL) { goto done; + } CFIL_LOG(LOG_INFO, "so %llx kcunit %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); /* Would be wasteful to try when flow controlled */ if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { @@ -2716,10 +2779,12 @@ cfil_dispatch_closed_event(struct socket *so, struct cfil_info *cfil_info, int k /* * Send a single closed message per filter */ - if ((entry->cfe_flags & CFEF_SENT_SOCK_CLOSED) != 0) + if ((entry->cfe_flags & CFEF_SENT_SOCK_CLOSED) != 0) { goto done; - if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) + } + if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) { goto done; + } microuptime(&entry->cfe_last_event); CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_event, &cfil_info->cfi_first_event, CFM_OP_SOCKET_CLOSED); @@ -2732,30 +2797,30 @@ cfil_dispatch_closed_event(struct socket *so, struct cfil_info *cfil_info, int k msg_closed.cfc_msghdr.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id; msg_closed.cfc_first_event.tv_sec = cfil_info->cfi_first_event.tv_sec; msg_closed.cfc_first_event.tv_usec = cfil_info->cfi_first_event.tv_usec; - memcpy(msg_closed.cfc_op_time, cfil_info->cfi_op_time, sizeof(uint32_t)*CFI_MAX_TIME_LOG_ENTRY); - memcpy(msg_closed.cfc_op_list, cfil_info->cfi_op_list, sizeof(unsigned char)*CFI_MAX_TIME_LOG_ENTRY); + memcpy(msg_closed.cfc_op_time, cfil_info->cfi_op_time, sizeof(uint32_t) * CFI_MAX_TIME_LOG_ENTRY); + memcpy(msg_closed.cfc_op_list, cfil_info->cfi_op_list, sizeof(unsigned char) * CFI_MAX_TIME_LOG_ENTRY); msg_closed.cfc_op_list_ctr = cfil_info->cfi_op_list_ctr; #if LIFECYCLE_DEBUG CFIL_LOG(LOG_ERR, "CFIL: LIFECYCLE: SENDING CLOSED UP: op ctr %d, start time %llu.%llu", msg_closed.cfc_msghdr.cfm_sock_id, cfil_info->cfi_op_list_ctr, cfil_info->cfi_first_event.tv_sec, cfil_info->cfi_first_event.tv_usec); #endif /* for debugging - if (msg_closed.cfc_op_list_ctr > CFI_MAX_TIME_LOG_ENTRY) { - msg_closed.cfc_op_list_ctr = CFI_MAX_TIME_LOG_ENTRY; // just in case - } - for (unsigned int i = 0; i < msg_closed.cfc_op_list_ctr ; i++) { - CFIL_LOG(LOG_ERR, "MD: socket %llu event %2u, time + %u msec", msg_closed.cfc_msghdr.cfm_sock_id, (unsigned short)msg_closed.cfc_op_list[i], msg_closed.cfc_op_time[i]); - } - */ + * if (msg_closed.cfc_op_list_ctr > CFI_MAX_TIME_LOG_ENTRY) { + * msg_closed.cfc_op_list_ctr = CFI_MAX_TIME_LOG_ENTRY; // just in case + * } + * for (unsigned int i = 0; i < msg_closed.cfc_op_list_ctr ; i++) { + * CFIL_LOG(LOG_ERR, "MD: socket %llu event %2u, time + %u msec", msg_closed.cfc_msghdr.cfm_sock_id, (unsigned short)msg_closed.cfc_op_list[i], msg_closed.cfc_op_time[i]); + * } + */ error = ctl_enqueuedata(entry->cfe_filter->cf_kcref, - entry->cfe_filter->cf_kcunit, - &msg_closed, - sizeof(struct cfil_msg_sock_closed), - CTL_DATA_EOR); + entry->cfe_filter->cf_kcunit, + &msg_closed, + sizeof(struct cfil_msg_sock_closed), + CTL_DATA_EOR); if (error != 0) { CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed: %d", - error); + error); goto done; } @@ -2767,25 +2832,27 @@ done: entry->cfe_flags |= CFEF_FLOW_CONTROLLED; OSIncrementAtomic(&cfil_stats.cfs_closed_event_flow_control); - if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) + if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) { cfil_rw_lock_exclusive(&cfil_lck_rw); + } cfc->cf_flags |= CFF_FLOW_CONTROLLED; cfil_rw_unlock_exclusive(&cfil_lck_rw); } else { - if (error != 0) + if (error != 0) { OSIncrementAtomic(&cfil_stats.cfs_closed_event_fail); + } cfil_rw_unlock_shared(&cfil_lck_rw); } - return (error); + return error; } static void fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *sin46, - struct in6_addr *ip6, u_int16_t port) + struct in6_addr *ip6, u_int16_t port) { struct sockaddr_in6 *sin6 = &sin46->sin6; @@ -2801,7 +2868,7 @@ fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *sin46, static void fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *sin46, - struct in_addr ip, u_int16_t port) + struct in_addr ip, u_int16_t port) { struct sockaddr_in *sin = &sin46->sin; @@ -2813,8 +2880,8 @@ fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *sin46, static void cfil_get_flow_address_v6(struct cfil_hash_entry *entry, struct inpcb *inp, - struct in6_addr **laddr, struct in6_addr **faddr, - u_int16_t *lport, u_int16_t *fport) + struct in6_addr **laddr, struct in6_addr **faddr, + u_int16_t *lport, u_int16_t *fport) { if (entry != NULL) { *laddr = &entry->cfentry_laddr.addr6; @@ -2831,8 +2898,8 @@ cfil_get_flow_address_v6(struct cfil_hash_entry *entry, struct inpcb *inp, static void cfil_get_flow_address(struct cfil_hash_entry *entry, struct inpcb *inp, - struct in_addr *laddr, struct in_addr *faddr, - u_int16_t *lport, u_int16_t *fport) + struct in_addr *laddr, struct in_addr *faddr, + u_int16_t *lport, u_int16_t *fport) { if (entry != NULL) { *laddr = entry->cfentry_laddr.addr46.ia46_addr4; @@ -2849,7 +2916,7 @@ cfil_get_flow_address(struct cfil_hash_entry *entry, struct inpcb *inp, static int cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing, - struct mbuf *data, unsigned int copyoffset, unsigned int copylen) + struct mbuf *data, unsigned int copyoffset, unsigned int copylen) { errno_t error = 0; struct mbuf *copy = NULL; @@ -2866,14 +2933,16 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ cfil_rw_lock_shared(&cfil_lck_rw); entry = &cfil_info->cfi_entries[kcunit - 1]; - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } cfc = entry->cfe_filter; - if (cfc == NULL) + if (cfc == NULL) { goto done; + } data = cfil_data_start(data); if (data == NULL || (data->m_flags & M_PKTHDR) == 0) { @@ -2882,7 +2951,7 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ } CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); socket_lock_assert_owned(so); @@ -2894,7 +2963,7 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ /* Make a copy of the data to pass to kernel control socket */ copy = m_copym_mode(data, copyoffset, copylen, M_DONTWAIT, - M_COPYM_NOOP_HDR); + M_COPYM_NOOP_HDR); if (copy == NULL) { CFIL_LOG(LOG_ERR, "m_copym_mode() failed"); error = ENOMEM; @@ -2922,9 +2991,9 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ data_req->cfd_msghdr.cfm_version = 1; data_req->cfd_msghdr.cfm_type = CFM_TYPE_EVENT; data_req->cfd_msghdr.cfm_op = - outgoing ? CFM_OP_DATA_OUT : CFM_OP_DATA_IN; + outgoing ? CFM_OP_DATA_OUT : CFM_OP_DATA_IN; data_req->cfd_msghdr.cfm_sock_id = - entry->cfe_cfil_info->cfi_sock_id; + entry->cfe_cfil_info->cfi_sock_id; data_req->cfd_start_offset = entrybuf->cfe_peeked; data_req->cfd_end_offset = entrybuf->cfe_peeked + copylen; @@ -2938,7 +3007,7 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ u_int16_t lport = 0, fport = 0; cfil_get_flow_address_v6(cfil_info->cfi_hash_entry, inp, - &laddr, &faddr, &lport, &fport); + &laddr, &faddr, &lport, &fport); if (outgoing) { fill_ip6_sockaddr_4_6(&data_req->cfc_src, laddr, lport); fill_ip6_sockaddr_4_6(&data_req->cfc_dst, faddr, fport); @@ -2951,7 +3020,7 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ u_int16_t lport = 0, fport = 0; cfil_get_flow_address(cfil_info->cfi_hash_entry, inp, - &laddr, &faddr, &lport, &fport); + &laddr, &faddr, &lport, &fport); if (outgoing) { fill_ip_sockaddr_4_6(&data_req->cfc_src, laddr, lport); @@ -2967,8 +3036,8 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ /* Pass the message to the content filter */ error = ctl_enqueuembuf(entry->cfe_filter->cf_kcref, - entry->cfe_filter->cf_kcunit, - msg, CTL_DATA_EOR); + entry->cfe_filter->cf_kcunit, + msg, CTL_DATA_EOR); if (error != 0) { CFIL_LOG(LOG_ERR, "ctl_enqueuembuf() failed: %d", error); mbuf_freem(msg); @@ -2979,7 +3048,7 @@ cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_ #if VERDICT_DEBUG CFIL_LOG(LOG_ERR, "CFIL: VERDICT ACTION: so %llx sockID %llu outgoing %d: mbuf %llx copyoffset %u copylen %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, outgoing, (uint64_t)VM_KERNEL_ADDRPERM(data), copyoffset, copylen); + (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, outgoing, (uint64_t)VM_KERNEL_ADDRPERM(data), copyoffset, copylen); #endif done: @@ -2988,19 +3057,21 @@ done: OSIncrementAtomic( &cfil_stats.cfs_data_event_flow_control); - if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) + if (!cfil_rw_lock_shared_to_exclusive(&cfil_lck_rw)) { cfil_rw_lock_exclusive(&cfil_lck_rw); + } cfc->cf_flags |= CFF_FLOW_CONTROLLED; cfil_rw_unlock_exclusive(&cfil_lck_rw); } else { - if (error != 0) + if (error != 0) { OSIncrementAtomic(&cfil_stats.cfs_data_event_fail); + } cfil_rw_unlock_shared(&cfil_lck_rw); } - return (error); + return error; } /* @@ -3016,27 +3087,30 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t struct cfe_buf *entrybuf; uint64_t currentoffset = 0; - if (cfil_info == NULL) - return (0); + if (cfil_info == NULL) { + return 0; + } CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); socket_lock_assert_owned(so); entry = &cfil_info->cfi_entries[kcunit - 1]; - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } /* Send attached message if not yet done */ if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) { error = cfil_dispatch_attach_event(so, cfil_info, kcunit); if (error != 0) { /* We can recover from flow control */ - if (error == ENOBUFS || error == ENOMEM) + if (error == ENOBUFS || error == ENOMEM) { error = 0; + } goto done; } } else if ((entry->cfe_flags & CFEF_DATA_START) == 0) { @@ -3046,19 +3120,19 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t #if DATA_DEBUG CFIL_LOG(LOG_DEBUG, "CFIL: SERVICE CTL-Q: pass_offset %llu peeked %llu peek_offset %llu", - entrybuf->cfe_pass_offset, - entrybuf->cfe_peeked, - entrybuf->cfe_peek_offset); + entrybuf->cfe_pass_offset, + entrybuf->cfe_peeked, + entrybuf->cfe_peek_offset); #endif /* Move all data that can pass */ while ((data = cfil_queue_first(&entrybuf->cfe_ctl_q)) != NULL && - entrybuf->cfe_ctl_q.q_start < entrybuf->cfe_pass_offset) { + entrybuf->cfe_ctl_q.q_start < entrybuf->cfe_pass_offset) { datalen = cfil_data_length(data, NULL, NULL); tmp = data; if (entrybuf->cfe_ctl_q.q_start + datalen <= - entrybuf->cfe_pass_offset) { + entrybuf->cfe_pass_offset) { /* * The first mbuf can fully pass */ @@ -3068,20 +3142,20 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t * The first mbuf can partially pass */ copylen = entrybuf->cfe_pass_offset - - entrybuf->cfe_ctl_q.q_start; + entrybuf->cfe_ctl_q.q_start; } VERIFY(copylen <= datalen); #if DATA_DEBUG CFIL_LOG(LOG_DEBUG, - "CFIL: SERVICE CTL-Q PASSING: %llx first %llu peeked %llu pass %llu peek %llu" - "datalen %u copylen %u", - (uint64_t)VM_KERNEL_ADDRPERM(tmp), - entrybuf->cfe_ctl_q.q_start, - entrybuf->cfe_peeked, - entrybuf->cfe_pass_offset, - entrybuf->cfe_peek_offset, - datalen, copylen); + "CFIL: SERVICE CTL-Q PASSING: %llx first %llu peeked %llu pass %llu peek %llu" + "datalen %u copylen %u", + (uint64_t)VM_KERNEL_ADDRPERM(tmp), + entrybuf->cfe_ctl_q.q_start, + entrybuf->cfe_peeked, + entrybuf->cfe_pass_offset, + entrybuf->cfe_peek_offset, + datalen, copylen); #endif /* @@ -3089,51 +3163,56 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t * implicitly */ if (entrybuf->cfe_ctl_q.q_start + copylen > - entrybuf->cfe_peeked) + entrybuf->cfe_peeked) { entrybuf->cfe_peeked = - entrybuf->cfe_ctl_q.q_start + copylen; + entrybuf->cfe_ctl_q.q_start + copylen; + } /* * Stop on partial pass */ - if (copylen < datalen) + if (copylen < datalen) { break; + } /* All good, move full data from ctl queue to pending queue */ cfil_queue_remove(&entrybuf->cfe_ctl_q, data, datalen); cfil_queue_enqueue(&entrybuf->cfe_pending_q, data, datalen); - if (outgoing) + if (outgoing) { OSAddAtomic64(datalen, - &cfil_stats.cfs_pending_q_out_enqueued); - else + &cfil_stats.cfs_pending_q_out_enqueued); + } else { OSAddAtomic64(datalen, - &cfil_stats.cfs_pending_q_in_enqueued); + &cfil_stats.cfs_pending_q_in_enqueued); + } } CFIL_INFO_VERIFY(cfil_info); - if (tmp != NULL) + if (tmp != NULL) { CFIL_LOG(LOG_DEBUG, - "%llx first %llu peeked %llu pass %llu peek %llu" - "datalen %u copylen %u", - (uint64_t)VM_KERNEL_ADDRPERM(tmp), - entrybuf->cfe_ctl_q.q_start, - entrybuf->cfe_peeked, - entrybuf->cfe_pass_offset, - entrybuf->cfe_peek_offset, - datalen, copylen); + "%llx first %llu peeked %llu pass %llu peek %llu" + "datalen %u copylen %u", + (uint64_t)VM_KERNEL_ADDRPERM(tmp), + entrybuf->cfe_ctl_q.q_start, + entrybuf->cfe_peeked, + entrybuf->cfe_pass_offset, + entrybuf->cfe_peek_offset, + datalen, copylen); + } tmp = NULL; /* Now deal with remaining data the filter wants to peek at */ for (data = cfil_queue_first(&entrybuf->cfe_ctl_q), - currentoffset = entrybuf->cfe_ctl_q.q_start; - data != NULL && currentoffset < entrybuf->cfe_peek_offset; - data = cfil_queue_next(&entrybuf->cfe_ctl_q, data), - currentoffset += datalen) { + currentoffset = entrybuf->cfe_ctl_q.q_start; + data != NULL && currentoffset < entrybuf->cfe_peek_offset; + data = cfil_queue_next(&entrybuf->cfe_ctl_q, data), + currentoffset += datalen) { datalen = cfil_data_length(data, NULL, NULL); tmp = data; /* We've already peeked at this mbuf */ - if (currentoffset + datalen <= entrybuf->cfe_peeked) + if (currentoffset + datalen <= entrybuf->cfe_peeked) { continue; + } /* * The data in the first mbuf may have been * partially peeked at @@ -3146,60 +3225,64 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t * Do not copy more than needed */ if (currentoffset + copyoffset + copylen > - entrybuf->cfe_peek_offset) { + entrybuf->cfe_peek_offset) { copylen = entrybuf->cfe_peek_offset - - (currentoffset + copyoffset); + (currentoffset + copyoffset); } #if DATA_DEBUG CFIL_LOG(LOG_DEBUG, - "CFIL: SERVICE CTL-Q PEEKING: %llx current %llu peeked %llu pass %llu peek %llu " - "datalen %u copylen %u copyoffset %u", - (uint64_t)VM_KERNEL_ADDRPERM(tmp), - currentoffset, - entrybuf->cfe_peeked, - entrybuf->cfe_pass_offset, - entrybuf->cfe_peek_offset, - datalen, copylen, copyoffset); + "CFIL: SERVICE CTL-Q PEEKING: %llx current %llu peeked %llu pass %llu peek %llu " + "datalen %u copylen %u copyoffset %u", + (uint64_t)VM_KERNEL_ADDRPERM(tmp), + currentoffset, + entrybuf->cfe_peeked, + entrybuf->cfe_pass_offset, + entrybuf->cfe_peek_offset, + datalen, copylen, copyoffset); #endif /* * Stop if there is nothing more to peek at */ - if (copylen == 0) + if (copylen == 0) { break; + } /* * Let the filter get a peek at this span of data */ error = cfil_dispatch_data_event(so, cfil_info, kcunit, - outgoing, data, copyoffset, copylen); + outgoing, data, copyoffset, copylen); if (error != 0) { /* On error, leave data in ctl_q */ break; } entrybuf->cfe_peeked += copylen; - if (outgoing) + if (outgoing) { OSAddAtomic64(copylen, - &cfil_stats.cfs_ctl_q_out_peeked); - else + &cfil_stats.cfs_ctl_q_out_peeked); + } else { OSAddAtomic64(copylen, - &cfil_stats.cfs_ctl_q_in_peeked); + &cfil_stats.cfs_ctl_q_in_peeked); + } /* Stop when data could not be fully peeked at */ - if (copylen + copyoffset < datalen) + if (copylen + copyoffset < datalen) { break; + } } CFIL_INFO_VERIFY(cfil_info); - if (tmp != NULL) + if (tmp != NULL) { CFIL_LOG(LOG_DEBUG, - "%llx first %llu peeked %llu pass %llu peek %llu" - "datalen %u copylen %u copyoffset %u", - (uint64_t)VM_KERNEL_ADDRPERM(tmp), - currentoffset, - entrybuf->cfe_peeked, - entrybuf->cfe_pass_offset, - entrybuf->cfe_peek_offset, - datalen, copylen, copyoffset); + "%llx first %llu peeked %llu pass %llu peek %llu" + "datalen %u copylen %u copyoffset %u", + (uint64_t)VM_KERNEL_ADDRPERM(tmp), + currentoffset, + entrybuf->cfe_peeked, + entrybuf->cfe_pass_offset, + entrybuf->cfe_peek_offset, + datalen, copylen, copyoffset); + } /* * Process data that has passed the filter @@ -3207,35 +3290,37 @@ cfil_data_service_ctl_q(struct socket *so, struct cfil_info *cfil_info, uint32_t error = cfil_service_pending_queue(so, cfil_info, kcunit, outgoing); if (error != 0) { CFIL_LOG(LOG_ERR, "cfil_service_pending_queue() error %d", - error); + error); goto done; } /* * Dispatch disconnect events that could not be sent */ - if (cfil_info == NULL) + if (cfil_info == NULL) { goto done; - else if (outgoing) { + } else if (outgoing) { if ((cfil_info->cfi_flags & CFIF_SHUT_WR) && - !(entry->cfe_flags & CFEF_SENT_DISCONNECT_OUT)) + !(entry->cfe_flags & CFEF_SENT_DISCONNECT_OUT)) { cfil_dispatch_disconnect_event(so, cfil_info, kcunit, 1); + } } else { if ((cfil_info->cfi_flags & CFIF_SHUT_RD) && - !(entry->cfe_flags & CFEF_SENT_DISCONNECT_IN)) + !(entry->cfe_flags & CFEF_SENT_DISCONNECT_IN)) { cfil_dispatch_disconnect_event(so, cfil_info, kcunit, 0); + } } done: CFIL_LOG(LOG_DEBUG, - "first %llu peeked %llu pass %llu peek %llu", - entrybuf->cfe_ctl_q.q_start, - entrybuf->cfe_peeked, - entrybuf->cfe_pass_offset, - entrybuf->cfe_peek_offset); + "first %llu peeked %llu pass %llu peek %llu", + entrybuf->cfe_ctl_q.q_start, + entrybuf->cfe_peeked, + entrybuf->cfe_pass_offset, + entrybuf->cfe_peek_offset); CFIL_INFO_VERIFY(cfil_info); - return (error); + return error; } /* @@ -3245,22 +3330,23 @@ done: */ int cfil_data_filter(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing, - struct mbuf *data, uint64_t datalen) + struct mbuf *data, uint64_t datalen) { errno_t error = 0; struct cfil_entry *entry; struct cfe_buf *entrybuf; CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); socket_lock_assert_owned(so); entry = &cfil_info->cfi_entries[kcunit - 1]; - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } /* Are we attached to the filter? */ if (entry->cfe_filter == NULL) { @@ -3270,17 +3356,18 @@ cfil_data_filter(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit /* Dispatch to filters */ cfil_queue_enqueue(&entrybuf->cfe_ctl_q, data, datalen); - if (outgoing) + if (outgoing) { OSAddAtomic64(datalen, - &cfil_stats.cfs_ctl_q_out_enqueued); - else + &cfil_stats.cfs_ctl_q_out_enqueued); + } else { OSAddAtomic64(datalen, - &cfil_stats.cfs_ctl_q_in_enqueued); + &cfil_stats.cfs_ctl_q_in_enqueued); + } error = cfil_data_service_ctl_q(so, cfil_info, kcunit, outgoing); if (error != 0) { CFIL_LOG(LOG_ERR, "cfil_data_service_ctl_q() error %d", - error); + error); } /* * We have to return EJUSTRETURN in all cases to avoid double free @@ -3291,7 +3378,7 @@ done: CFIL_INFO_VERIFY(cfil_info); CFIL_LOG(LOG_INFO, "return %d", error); - return (error); + return error; } /* @@ -3311,8 +3398,9 @@ cfil_service_inject_queue(struct socket *so, struct cfil_info *cfil_info, int ou int need_rwakeup = 0; int count = 0; - if (cfil_info == NULL) - return (0); + if (cfil_info == NULL) { + return 0; + } socket_lock_assert_owned(so); @@ -3325,12 +3413,13 @@ cfil_service_inject_queue(struct socket *so, struct cfil_info *cfil_info, int ou } inject_q = &cfi_buf->cfi_inject_q; - if (cfil_queue_empty(inject_q)) - return (0); + if (cfil_queue_empty(inject_q)) { + return 0; + } #if DATA_DEBUG | VERDICT_DEBUG CFIL_LOG(LOG_ERR, "CFIL: SERVICE INJECT-Q: outgoing %d queue len %llu", - (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing, cfil_queue_len(inject_q)); + (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing, cfil_queue_len(inject_q)); #endif while ((data = cfil_queue_first(inject_q)) != NULL) { @@ -3338,8 +3427,8 @@ cfil_service_inject_queue(struct socket *so, struct cfil_info *cfil_info, int ou #if DATA_DEBUG CFIL_LOG(LOG_DEBUG, "CFIL: SERVICE INJECT-Q: <%s>: data %llx datalen %u (mbcnt %u)", - remote_addr_ptr ? "UNCONNECTED" : "CONNECTED", - (uint64_t)VM_KERNEL_ADDRPERM(so), (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, mbcnt); + remote_addr_ptr ? "UNCONNECTED" : "CONNECTED", + (uint64_t)VM_KERNEL_ADDRPERM(so), (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, mbcnt); #endif /* Remove data from queue and adjust stats */ @@ -3370,42 +3459,48 @@ cfil_service_inject_queue(struct socket *so, struct cfil_info *cfil_info, int ou * of fix sock_inject_data_in() */ if (IS_UDP(so) == TRUE) { - if (sbappendchain(&so->so_rcv, data, 0)) + if (sbappendchain(&so->so_rcv, data, 0)) { need_rwakeup = 1; + } } else { - if (sbappendstream(&so->so_rcv, data)) + if (sbappendstream(&so->so_rcv, data)) { need_rwakeup = 1; + } } } - if (outgoing) + if (outgoing) { OSAddAtomic64(datalen, - &cfil_stats.cfs_inject_q_out_passed); - else + &cfil_stats.cfs_inject_q_out_passed); + } else { OSAddAtomic64(datalen, - &cfil_stats.cfs_inject_q_in_passed); + &cfil_stats.cfs_inject_q_in_passed); + } count++; } #if DATA_DEBUG | VERDICT_DEBUG CFIL_LOG(LOG_ERR, "CFIL: SERVICE INJECT-Q: injected %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), count); + (uint64_t)VM_KERNEL_ADDRPERM(so), count); #endif /* A single wakeup is for several packets is more efficient */ if (need_rwakeup) { - if (outgoing == TRUE) + if (outgoing == TRUE) { sowwakeup(so); - else + } else { sorwakeup(so); + } } if (error != 0 && cfil_info) { - if (error == ENOBUFS) + if (error == ENOBUFS) { OSIncrementAtomic(&cfil_stats.cfs_inject_q_nobufs); - if (error == ENOMEM) + } + if (error == ENOMEM) { OSIncrementAtomic(&cfil_stats.cfs_inject_q_nomem); + } if (outgoing) { cfil_info->cfi_flags |= CFIF_RETRY_INJECT_OUT; @@ -3421,20 +3516,21 @@ cfil_service_inject_queue(struct socket *so, struct cfil_info *cfil_info, int ou */ if (cfil_info && (cfil_info->cfi_flags & CFIF_SHUT_WR)) { cfil_sock_notify_shutdown(so, SHUT_WR); - if (cfil_sock_data_pending(&so->so_snd) == 0) + if (cfil_sock_data_pending(&so->so_snd) == 0) { soshutdownlock_final(so, SHUT_WR); + } } if (cfil_info && (cfil_info->cfi_flags & CFIF_CLOSE_WAIT)) { if (cfil_filters_attached(so) == 0) { CFIL_LOG(LOG_INFO, "so %llx waking", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); wakeup((caddr_t)cfil_info); } } CFIL_INFO_VERIFY(cfil_info); - return (error); + return error; } static int @@ -3449,15 +3545,16 @@ cfil_service_pending_queue(struct socket *so, struct cfil_info *cfil_info, uint3 struct cfil_queue *pending_q; CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); socket_lock_assert_owned(so); entry = &cfil_info->cfi_entries[kcunit - 1]; - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } pending_q = &entrybuf->cfe_pending_q; @@ -3473,26 +3570,28 @@ cfil_service_pending_queue(struct socket *so, struct cfil_info *cfil_info, uint3 #if DATA_DEBUG CFIL_LOG(LOG_DEBUG, - "CFIL: SERVICE PENDING-Q: data %llx datalen %u passlen %llu curlen %llu", - (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, - passlen, curlen); + "CFIL: SERVICE PENDING-Q: data %llx datalen %u passlen %llu curlen %llu", + (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, + passlen, curlen); #endif - if (curlen + datalen > passlen) + if (curlen + datalen > passlen) { break; + } cfil_queue_remove(pending_q, data, datalen); curlen += datalen; for (kcunit += 1; - kcunit <= MAX_CONTENT_FILTER; - kcunit++) { + kcunit <= MAX_CONTENT_FILTER; + kcunit++) { error = cfil_data_filter(so, cfil_info, kcunit, outgoing, - data, datalen); + data, datalen); /* 0 means passed so we can continue */ - if (error != 0) + if (error != 0) { break; + } } /* When data has passed all filters, re-inject */ if (error == 0) { @@ -3501,25 +3600,25 @@ cfil_service_pending_queue(struct socket *so, struct cfil_info *cfil_info, uint3 &cfil_info->cfi_snd.cfi_inject_q, data, datalen); OSAddAtomic64(datalen, - &cfil_stats.cfs_inject_q_out_enqueued); + &cfil_stats.cfs_inject_q_out_enqueued); } else { cfil_queue_enqueue( &cfil_info->cfi_rcv.cfi_inject_q, data, datalen); OSAddAtomic64(datalen, - &cfil_stats.cfs_inject_q_in_enqueued); + &cfil_stats.cfs_inject_q_in_enqueued); } } } CFIL_INFO_VERIFY(cfil_info); - return (error); + return error; } int cfil_update_data_offsets(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing, - uint64_t pass_offset, uint64_t peek_offset) + uint64_t pass_offset, uint64_t peek_offset) { errno_t error = 0; struct cfil_entry *entry = NULL; @@ -3532,48 +3631,51 @@ cfil_update_data_offsets(struct socket *so, struct cfil_info *cfil_info, uint32_ if (cfil_info == NULL) { CFIL_LOG(LOG_ERR, "so %llx cfil detached", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = 0; goto done; } else if (cfil_info->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_ERR, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = EPIPE; goto done; } entry = &cfil_info->cfi_entries[kcunit - 1]; - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } /* Record updated offsets for this content filter */ if (pass_offset > entrybuf->cfe_pass_offset) { entrybuf->cfe_pass_offset = pass_offset; - if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) + if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) { entrybuf->cfe_peek_offset = entrybuf->cfe_pass_offset; + } updated = 1; } else { CFIL_LOG(LOG_INFO, "pass_offset %llu <= cfe_pass_offset %llu", - pass_offset, entrybuf->cfe_pass_offset); + pass_offset, entrybuf->cfe_pass_offset); } /* Filter does not want or need to see data that's allowed to pass */ if (peek_offset > entrybuf->cfe_pass_offset && - peek_offset > entrybuf->cfe_peek_offset) { + peek_offset > entrybuf->cfe_peek_offset) { entrybuf->cfe_peek_offset = peek_offset; updated = 1; } /* Nothing to do */ - if (updated == 0) + if (updated == 0) { goto done; + } /* Move data held in control queue to pending queue if needed */ error = cfil_data_service_ctl_q(so, cfil_info, kcunit, outgoing); if (error != 0) { CFIL_LOG(LOG_ERR, "cfil_data_service_ctl_q() error %d", - error); + error); goto done; } error = EJUSTRETURN; @@ -3593,24 +3695,24 @@ done: entry->cfe_flags |= CFEF_CFIL_DETACHED; #if LIFECYCLE_DEBUG cfil_info_log(LOG_ERR, cfil_info, outgoing ? - "CFIL: LIFECYCLE: OUT - PASSED ALL - DETACH": - "CFIL: LIFECYCLE: IN - PASSED ALL - DETACH"); + "CFIL: LIFECYCLE: OUT - PASSED ALL - DETACH": + "CFIL: LIFECYCLE: IN - PASSED ALL - DETACH"); #endif CFIL_LOG(LOG_INFO, "so %llx detached %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); if ((cfil_info->cfi_flags & CFIF_CLOSE_WAIT) && cfil_filters_attached(so) == 0) { #if LIFECYCLE_DEBUG cfil_info_log(LOG_ERR, cfil_info, "CFIL: LIFECYCLE: WAKING"); #endif CFIL_LOG(LOG_INFO, "so %llx waking", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); wakeup((caddr_t)cfil_info); } } CFIL_INFO_VERIFY(cfil_info); CFIL_LOG(LOG_INFO, "return %d", error); - return (error); + return error; } /* @@ -3625,52 +3727,57 @@ cfil_set_socket_pass_offset(struct socket *so, struct cfil_info *cfil_info, int uint32_t kcunit; uint64_t pass_offset = 0; - if (cfil_info == NULL) - return (0); + if (cfil_info == NULL) { + return 0; + } CFIL_LOG(LOG_INFO, "so %llx outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing); socket_lock_assert_owned(so); - if (outgoing) + if (outgoing) { cfi_buf = &cfil_info->cfi_snd; - else + } else { cfi_buf = &cfil_info->cfi_rcv; + } CFIL_LOG(LOG_DEBUG, "CFIL: outgoing %d cfi_pending_first %llu cfi_pending_last %llu", - (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, outgoing, - cfi_buf->cfi_pending_first, cfi_buf->cfi_pending_last); + (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, outgoing, + cfi_buf->cfi_pending_first, cfi_buf->cfi_pending_last); if (cfi_buf->cfi_pending_last - cfi_buf->cfi_pending_first == 0) { for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { entry = &cfil_info->cfi_entries[kcunit - 1]; /* Are we attached to a filter? */ - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { continue; + } - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } if (pass_offset == 0 || - entrybuf->cfe_pass_offset < pass_offset) + entrybuf->cfe_pass_offset < pass_offset) { pass_offset = entrybuf->cfe_pass_offset; + } } cfi_buf->cfi_pass_offset = pass_offset; } CFIL_LOG(LOG_DEBUG, "CFIL: , cfi_pass_offset %llu", - (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, cfi_buf->cfi_pass_offset); + (uint64_t)VM_KERNEL_ADDRPERM(so), cfil_info->cfi_sock_id, cfi_buf->cfi_pass_offset); - return (0); + return 0; } int cfil_action_data_pass(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing, - uint64_t pass_offset, uint64_t peek_offset) + uint64_t pass_offset, uint64_t peek_offset) { errno_t error = 0; @@ -3681,13 +3788,13 @@ cfil_action_data_pass(struct socket *so, struct cfil_info *cfil_info, uint32_t k error = cfil_acquire_sockbuf(so, cfil_info, outgoing); if (error != 0) { CFIL_LOG(LOG_INFO, "so %llx %s dropped", - (uint64_t)VM_KERNEL_ADDRPERM(so), - outgoing ? "out" : "in"); + (uint64_t)VM_KERNEL_ADDRPERM(so), + outgoing ? "out" : "in"); goto release; } error = cfil_update_data_offsets(so, cfil_info, kcunit, outgoing, - pass_offset, peek_offset); + pass_offset, peek_offset); cfil_service_inject_queue(so, cfil_info, outgoing); @@ -3696,7 +3803,7 @@ release: CFIL_INFO_VERIFY(cfil_info); cfil_release_sockbuf(so, outgoing); - return (error); + return error; } @@ -3707,8 +3814,9 @@ cfil_flush_queues(struct socket *so, struct cfil_info *cfil_info) int kcunit; uint64_t drained; - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || cfil_info == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || cfil_info == NULL) { goto done; + } socket_lock_assert_owned(so); @@ -3728,12 +3836,13 @@ cfil_flush_queues(struct socket *so, struct cfil_info *cfil_info) drained += cfil_queue_drain(&cfil_info->cfi_snd.cfi_inject_q); if (drained) { - if (cfil_info->cfi_flags & CFIF_DROP) + if (cfil_info->cfi_flags & CFIF_DROP) { OSIncrementAtomic( &cfil_stats.cfs_flush_out_drop); - else + } else { OSIncrementAtomic( &cfil_stats.cfs_flush_out_close); + } } } cfil_release_sockbuf(so, 1); @@ -3747,20 +3856,21 @@ cfil_flush_queues(struct socket *so, struct cfil_info *cfil_info) for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { entry = &cfil_info->cfi_entries[kcunit - 1]; - drained += cfil_queue_drain( - &entry->cfe_rcv.cfe_ctl_q); - drained += cfil_queue_drain( - &entry->cfe_rcv.cfe_pending_q); + drained += cfil_queue_drain( + &entry->cfe_rcv.cfe_ctl_q); + drained += cfil_queue_drain( + &entry->cfe_rcv.cfe_pending_q); } drained += cfil_queue_drain(&cfil_info->cfi_rcv.cfi_inject_q); if (drained) { - if (cfil_info->cfi_flags & CFIF_DROP) + if (cfil_info->cfi_flags & CFIF_DROP) { OSIncrementAtomic( &cfil_stats.cfs_flush_in_drop); - else + } else { OSIncrementAtomic( &cfil_stats.cfs_flush_in_close); + } } } cfil_release_sockbuf(so, 0); @@ -3775,16 +3885,18 @@ cfil_action_drop(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit struct cfil_entry *entry; struct proc *p; - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || cfil_info == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || cfil_info == NULL) { goto done; + } socket_lock_assert_owned(so); entry = &cfil_info->cfi_entries[kcunit - 1]; /* Are we attached to the filter? */ - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { goto done; + } cfil_info->cfi_flags |= CFIF_DROP; @@ -3796,13 +3908,14 @@ cfil_action_drop(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit */ if (so->so_cfil_db == NULL) { error = sosetdefunct(p, so, - SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL, - FALSE); + SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL, + FALSE); /* Flush the socket buffer and disconnect */ - if (error == 0) + if (error == 0) { error = sodefunct(p, so, - SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL); + SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL); + } } /* The filter is done, mark as detached */ @@ -3811,7 +3924,7 @@ cfil_action_drop(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit cfil_info_log(LOG_ERR, cfil_info, "CFIL: LIFECYCLE: DROP - DETACH"); #endif CFIL_LOG(LOG_INFO, "so %llx detached %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); + (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); /* Pending data needs to go */ cfil_flush_queues(so, cfil_info); @@ -3819,12 +3932,12 @@ cfil_action_drop(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit if (cfil_info && (cfil_info->cfi_flags & CFIF_CLOSE_WAIT)) { if (cfil_filters_attached(so) == 0) { CFIL_LOG(LOG_INFO, "so %llx waking", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); wakeup((caddr_t)cfil_info); } } done: - return (error); + return error; } int @@ -3843,15 +3956,15 @@ cfil_action_bless_client(uint32_t kcunit, struct cfil_msg_hdr *msghdr) } else { // The client gets a pass automatically cfil_info = (so->so_cfil_db != NULL) ? - cfil_db_get_cfil_info(so->so_cfil_db, msghdr->cfm_sock_id) : so->so_cfil; + cfil_db_get_cfil_info(so->so_cfil_db, msghdr->cfm_sock_id) : so->so_cfil; if (cfil_attached) { #if VERDICT_DEBUG if (cfil_info != NULL) { CFIL_LOG(LOG_ERR, "CFIL: VERDICT RECEIVED: BLESS %s ", - cfil_info->cfi_hash_entry ? "UDP" : "TCP", - (uint64_t)VM_KERNEL_ADDRPERM(so), - cfil_info->cfi_sock_id); + cfil_info->cfi_hash_entry ? "UDP" : "TCP", + (uint64_t)VM_KERNEL_ADDRPERM(so), + cfil_info->cfi_sock_id); } #endif (void)cfil_action_data_pass(so, cfil_info, kcunit, 1, CFM_MAX_OFFSET, CFM_MAX_OFFSET); @@ -3862,7 +3975,7 @@ cfil_action_bless_client(uint32_t kcunit, struct cfil_msg_hdr *msghdr) socket_unlock(so, 1); } - return (error); + return error; } static int @@ -3873,25 +3986,28 @@ cfil_update_entry_offsets(struct socket *so, struct cfil_info *cfil_info, int ou uint32_t kcunit; CFIL_LOG(LOG_INFO, "so %llx outgoing %d datalen %u", - (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing, datalen); + (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing, datalen); for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { entry = &cfil_info->cfi_entries[kcunit - 1]; /* Are we attached to the filter? */ - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { continue; + } - if (outgoing) + if (outgoing) { entrybuf = &entry->cfe_snd; - else + } else { entrybuf = &entry->cfe_rcv; + } entrybuf->cfe_ctl_q.q_start += datalen; entrybuf->cfe_pass_offset = entrybuf->cfe_ctl_q.q_start; entrybuf->cfe_peeked = entrybuf->cfe_ctl_q.q_start; - if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) + if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) { entrybuf->cfe_peek_offset = entrybuf->cfe_pass_offset; + } entrybuf->cfe_ctl_q.q_end += datalen; @@ -3899,12 +4015,12 @@ cfil_update_entry_offsets(struct socket *so, struct cfil_info *cfil_info, int ou entrybuf->cfe_pending_q.q_end += datalen; } CFIL_INFO_VERIFY(cfil_info); - return (0); + return 0; } int cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, struct sockaddr *to, - struct mbuf *data, struct mbuf *control, uint32_t flags) + struct mbuf *data, struct mbuf *control, uint32_t flags) { #pragma unused(to, control, flags) errno_t error = 0; @@ -3917,22 +4033,23 @@ cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, s if (cfil_info == NULL) { CFIL_LOG(LOG_ERR, "so %llx cfil detached", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = 0; goto done; } else if (cfil_info->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_ERR, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); error = EPIPE; goto done; } datalen = cfil_data_length(data, &mbcnt, &mbnum); - if (outgoing) + if (outgoing) { cfi_buf = &cfil_info->cfi_snd; - else + } else { cfi_buf = &cfil_info->cfi_rcv; + } cfi_buf->cfi_pending_last += datalen; cfi_buf->cfi_pending_mbcnt += mbcnt; @@ -3940,25 +4057,25 @@ cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, s if (IS_UDP(so)) { if (cfi_buf->cfi_pending_mbnum > cfil_udp_gc_mbuf_num_max || - cfi_buf->cfi_pending_mbcnt > cfil_udp_gc_mbuf_cnt_max) { + cfi_buf->cfi_pending_mbcnt > cfil_udp_gc_mbuf_cnt_max) { cfi_buf->cfi_tail_drop_cnt++; cfi_buf->cfi_pending_mbcnt -= mbcnt; cfi_buf->cfi_pending_mbnum -= mbnum; - return (EPIPE); + return EPIPE; } } cfil_info_buf_verify(cfi_buf); #if DATA_DEBUG - CFIL_LOG(LOG_DEBUG, "CFIL: QUEUEING DATA: %s: data %llx len %u flags 0x%x nextpkt %llx - cfi_pending_last %llu cfi_pending_mbcnt %u cfi_pass_offset %llu", - (uint64_t)VM_KERNEL_ADDRPERM(so), - outgoing ? "OUT" : "IN", - (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, data->m_flags, - (uint64_t)VM_KERNEL_ADDRPERM(data->m_nextpkt), - cfi_buf->cfi_pending_last, - cfi_buf->cfi_pending_mbcnt, - cfi_buf->cfi_pass_offset); + CFIL_LOG(LOG_DEBUG, "CFIL: QUEUEING DATA: %s: data %llx len %u flags 0x%x nextpkt %llx - cfi_pending_last %llu cfi_pending_mbcnt %u cfi_pass_offset %llu", + (uint64_t)VM_KERNEL_ADDRPERM(so), + outgoing ? "OUT" : "IN", + (uint64_t)VM_KERNEL_ADDRPERM(data), datalen, data->m_flags, + (uint64_t)VM_KERNEL_ADDRPERM(data->m_nextpkt), + cfi_buf->cfi_pending_last, + cfi_buf->cfi_pending_mbcnt, + cfi_buf->cfi_pass_offset); #endif /* Fast path when below pass offset */ @@ -3979,16 +4096,17 @@ cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, s (void) cfil_udp_save_socket_state(cfil_info, data); chain = sbconcat_mbufs(NULL, outgoing ? NULL : to, data, control); if (chain == NULL) { - return (ENOBUFS); + return ENOBUFS; } data = chain; } error = cfil_data_filter(so, cfil_info, kcunit, outgoing, data, - datalen); + datalen); } /* 0 means passed so continue with next filter */ - if (error != 0) + if (error != 0) { break; + } } } @@ -4002,7 +4120,7 @@ cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, s done: CFIL_INFO_VERIFY(cfil_info); - return (error); + return error; } /* @@ -4010,44 +4128,47 @@ done: */ int cfil_sock_data_out(struct socket *so, struct sockaddr *to, - struct mbuf *data, struct mbuf *control, uint32_t flags) + struct mbuf *data, struct mbuf *control, uint32_t flags) { int error = 0; - + if (IS_UDP(so)) { - return (cfil_sock_udp_handle_data(TRUE, so, NULL, to, data, control, flags)); - } + return cfil_sock_udp_handle_data(TRUE, so, NULL, to, data, control, flags); + } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) - return (0); + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { + return 0; + } socket_lock_assert_owned(so); if (so->so_cfil->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_ERR, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); - return (EPIPE); + (uint64_t)VM_KERNEL_ADDRPERM(so)); + return EPIPE; } if (control != NULL) { CFIL_LOG(LOG_ERR, "so %llx control", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); OSIncrementAtomic(&cfil_stats.cfs_data_out_control); } if ((flags & MSG_OOB)) { CFIL_LOG(LOG_ERR, "so %llx MSG_OOB", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); OSIncrementAtomic(&cfil_stats.cfs_data_out_oob); } - if ((so->so_snd.sb_flags & SB_LOCK) == 0) + if ((so->so_snd.sb_flags & SB_LOCK) == 0) { panic("so %p SB_LOCK not set", so); + } - if (so->so_snd.sb_cfil_thread != NULL) + if (so->so_snd.sb_cfil_thread != NULL) { panic("%s sb_cfil_thread %p not NULL", __func__, - so->so_snd.sb_cfil_thread); + so->so_snd.sb_cfil_thread); + } error = cfil_data_common(so, so->so_cfil, 1, to, data, control, flags); - return (error); + return error; } /* @@ -4055,37 +4176,38 @@ cfil_sock_data_out(struct socket *so, struct sockaddr *to, */ int cfil_sock_data_in(struct socket *so, struct sockaddr *from, - struct mbuf *data, struct mbuf *control, uint32_t flags) + struct mbuf *data, struct mbuf *control, uint32_t flags) { int error = 0; if (IS_UDP(so)) { - return (cfil_sock_udp_handle_data(FALSE, so, NULL, from, data, control, flags)); - } + return cfil_sock_udp_handle_data(FALSE, so, NULL, from, data, control, flags); + } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) - return (0); + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { + return 0; + } socket_lock_assert_owned(so); if (so->so_cfil->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_ERR, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); - return (EPIPE); + (uint64_t)VM_KERNEL_ADDRPERM(so)); + return EPIPE; } if (control != NULL) { CFIL_LOG(LOG_ERR, "so %llx control", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); OSIncrementAtomic(&cfil_stats.cfs_data_in_control); } if (data->m_type == MT_OOBDATA) { CFIL_LOG(LOG_ERR, "so %llx MSG_OOB", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); OSIncrementAtomic(&cfil_stats.cfs_data_in_oob); } error = cfil_data_common(so, so->so_cfil, 0, from, data, control, flags); - return (error); + return error; } /* @@ -4102,16 +4224,17 @@ cfil_sock_shutdown(struct socket *so, int *how) int error = 0; if (IS_UDP(so)) { - return (cfil_sock_udp_shutdown(so, how)); + return cfil_sock_udp_shutdown(so, how); } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { goto done; + } socket_lock_assert_owned(so); CFIL_LOG(LOG_INFO, "so %llx how %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), *how); + (uint64_t)VM_KERNEL_ADDRPERM(so), *how); /* * Check the state of the socket before the content filter @@ -4129,7 +4252,7 @@ cfil_sock_shutdown(struct socket *so, int *how) if ((so->so_cfil->cfi_flags & CFIF_DROP) != 0) { CFIL_LOG(LOG_ERR, "so %llx drop set", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); goto done; } @@ -4173,7 +4296,7 @@ cfil_sock_shutdown(struct socket *so, int *how) } } done: - return (error); + return error; } /* @@ -4191,8 +4314,9 @@ cfil_sock_is_closed(struct socket *so) return; } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { return; + } CFIL_LOG(LOG_INFO, "so %llx", (uint64_t)VM_KERNEL_ADDRPERM(so)); @@ -4205,8 +4329,9 @@ cfil_sock_is_closed(struct socket *so) /* Last chance to push passed data out */ error = cfil_acquire_sockbuf(so, so->so_cfil, 1); - if (error == 0) + if (error == 0) { cfil_service_inject_queue(so, so->so_cfil, 1); + } cfil_release_sockbuf(so, 1); so->so_cfil->cfi_flags |= CFIF_SOCK_CLOSED; @@ -4234,21 +4359,24 @@ cfil_sock_notify_shutdown(struct socket *so, int how) return; } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { return; + } CFIL_LOG(LOG_INFO, "so %llx how %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), how); + (uint64_t)VM_KERNEL_ADDRPERM(so), how); socket_lock_assert_owned(so); for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { /* Disconnect incoming side */ - if (how != SHUT_WR) + if (how != SHUT_WR) { error = cfil_dispatch_disconnect_event(so, so->so_cfil, kcunit, 0); + } /* Disconnect outgoing side */ - if (how != SHUT_RD) + if (how != SHUT_RD) { error = cfil_dispatch_disconnect_event(so, so->so_cfil, kcunit, 1); + } } } @@ -4263,8 +4391,9 @@ cfil_filters_attached(struct socket *so) return cfil_filters_udp_attached(so, FALSE); } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) - return (0); + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { + return 0; + } socket_lock_assert_owned(so); @@ -4272,17 +4401,20 @@ cfil_filters_attached(struct socket *so) entry = &so->so_cfil->cfi_entries[kcunit - 1]; /* Are we attached to the filter? */ - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { continue; - if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) + } + if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) { continue; - if ((entry->cfe_flags & CFEF_CFIL_DETACHED) != 0) + } + if ((entry->cfe_flags & CFEF_CFIL_DETACHED) != 0) { continue; + } attached = 1; break; } - return (attached); + return attached; } /* @@ -4301,15 +4433,17 @@ cfil_sock_close_wait(struct socket *so) return; } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { return; + } CFIL_LOG(LOG_INFO, "so %llx", (uint64_t)VM_KERNEL_ADDRPERM(so)); - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); while (cfil_filters_attached(so)) { @@ -4322,24 +4456,25 @@ cfil_sock_close_wait(struct socket *so) * Make sure we need to wait after the filter are notified * of the disconnection */ - if (cfil_filters_attached(so) == 0) + if (cfil_filters_attached(so) == 0) { break; + } CFIL_LOG(LOG_INFO, "so %llx waiting", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); ts.tv_sec = cfil_close_wait_timeout / 1000; ts.tv_nsec = (cfil_close_wait_timeout % 1000) * - NSEC_PER_USEC * 1000; + NSEC_PER_USEC * 1000; OSIncrementAtomic(&cfil_stats.cfs_close_wait); so->so_cfil->cfi_flags |= CFIF_CLOSE_WAIT; error = msleep((caddr_t)so->so_cfil, mutex_held, - PSOCK | PCATCH, "cfil_sock_close_wait", &ts); + PSOCK | PCATCH, "cfil_sock_close_wait", &ts); so->so_cfil->cfi_flags &= ~CFIF_CLOSE_WAIT; CFIL_LOG(LOG_NOTICE, "so %llx timed out %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), (error != 0)); + (uint64_t)VM_KERNEL_ADDRPERM(so), (error != 0)); /* * Force close in case of timeout @@ -4349,7 +4484,6 @@ cfil_sock_close_wait(struct socket *so) break; } } - } /* @@ -4362,7 +4496,7 @@ cfil_sock_data_pending(struct sockbuf *sb) uint64_t pending = 0; if (IS_UDP(so)) { - return (cfil_sock_udp_data_pending(sb, FALSE)); + return cfil_sock_udp_data_pending(sb, FALSE); } if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil != NULL) { @@ -4370,20 +4504,22 @@ cfil_sock_data_pending(struct sockbuf *sb) socket_lock_assert_owned(so); - if ((sb->sb_flags & SB_RECV) == 0) + if ((sb->sb_flags & SB_RECV) == 0) { cfi_buf = &so->so_cfil->cfi_snd; - else + } else { cfi_buf = &so->so_cfil->cfi_rcv; + } pending = cfi_buf->cfi_pending_last - - cfi_buf->cfi_pending_first; + cfi_buf->cfi_pending_first; /* * If we are limited by the "chars of mbufs used" roughly * adjust so we won't overcommit */ - if (pending > (uint64_t)cfi_buf->cfi_pending_mbcnt) + if (pending > (uint64_t)cfi_buf->cfi_pending_mbcnt) { pending = cfi_buf->cfi_pending_mbcnt; + } } VERIFY(pending < INT32_MAX); @@ -4402,29 +4538,31 @@ cfil_sock_data_space(struct sockbuf *sb) uint64_t pending = 0; if (IS_UDP(so)) { - return (cfil_sock_udp_data_pending(sb, TRUE)); + return cfil_sock_udp_data_pending(sb, TRUE); } if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil != NULL && - so->so_snd.sb_cfil_thread != current_thread()) { + so->so_snd.sb_cfil_thread != current_thread()) { struct cfi_buf *cfi_buf; socket_lock_assert_owned(so); - if ((sb->sb_flags & SB_RECV) == 0) + if ((sb->sb_flags & SB_RECV) == 0) { cfi_buf = &so->so_cfil->cfi_snd; - else + } else { cfi_buf = &so->so_cfil->cfi_rcv; + } pending = cfi_buf->cfi_pending_last - - cfi_buf->cfi_pending_first; + cfi_buf->cfi_pending_first; /* * If we are limited by the "chars of mbufs used" roughly * adjust so we won't overcommit */ - if ((uint64_t)cfi_buf->cfi_pending_mbcnt > pending) + if ((uint64_t)cfi_buf->cfi_pending_mbcnt > pending) { pending = cfi_buf->cfi_pending_mbcnt; + } } VERIFY(pending < INT32_MAX); @@ -4444,43 +4582,48 @@ cfil_sock_buf_update(struct sockbuf *sb) int error; struct socket *so = sb->sb_so; - if (IS_UDP(so)) { + if (IS_UDP(so)) { cfil_sock_udp_buf_update(sb); return; - } + } - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || so->so_cfil == NULL) { return; + } - if (!cfil_sbtrim) + if (!cfil_sbtrim) { return; + } socket_lock_assert_owned(so); if ((sb->sb_flags & SB_RECV) == 0) { - if ((so->so_cfil->cfi_flags & CFIF_RETRY_INJECT_OUT) == 0) + if ((so->so_cfil->cfi_flags & CFIF_RETRY_INJECT_OUT) == 0) { return; + } outgoing = 1; OSIncrementAtomic(&cfil_stats.cfs_inject_q_out_retry); } else { - if ((so->so_cfil->cfi_flags & CFIF_RETRY_INJECT_IN) == 0) + if ((so->so_cfil->cfi_flags & CFIF_RETRY_INJECT_IN) == 0) { return; + } outgoing = 0; OSIncrementAtomic(&cfil_stats.cfs_inject_q_in_retry); } CFIL_LOG(LOG_NOTICE, "so %llx outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing); error = cfil_acquire_sockbuf(so, so->so_cfil, outgoing); - if (error == 0) + if (error == 0) { cfil_service_inject_queue(so, so->so_cfil, outgoing); + } cfil_release_sockbuf(so, outgoing); } int sysctl_cfil_filter_list(struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { #pragma unused(oidp, arg1, arg2) int error = 0; @@ -4488,8 +4631,9 @@ sysctl_cfil_filter_list(struct sysctl_oid *oidp, void *arg1, int arg2, u_int32_t i; /* Read only */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } cfil_rw_lock_shared(&cfil_lck_rw); @@ -4497,8 +4641,9 @@ sysctl_cfil_filter_list(struct sysctl_oid *oidp, void *arg1, int arg2, struct cfil_filter_stat filter_stat; struct content_filter *cfc = content_filters[i]; - if (cfc == NULL) + if (cfc == NULL) { continue; + } /* If just asking for the size */ if (req->oldptr == USER_ADDR_NULL) { @@ -4514,13 +4659,15 @@ sysctl_cfil_filter_list(struct sysctl_oid *oidp, void *arg1, int arg2, filter_stat.cfs_necp_control_unit = cfc->cf_necp_control_unit; error = SYSCTL_OUT(req, &filter_stat, - sizeof (struct cfil_filter_stat)); - if (error != 0) + sizeof(struct cfil_filter_stat)); + if (error != 0) { break; + } } /* If just asking for the size */ - if (req->oldptr == USER_ADDR_NULL) + if (req->oldptr == USER_ADDR_NULL) { req->oldidx = len; + } cfil_rw_unlock_shared(&cfil_lck_rw); @@ -4532,11 +4679,12 @@ sysctl_cfil_filter_list(struct sysctl_oid *oidp, void *arg1, int arg2, } #endif - return (error); + return error; } -static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) +static int +sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, + struct sysctl_req *req) { #pragma unused(oidp, arg1, arg2) int error = 0; @@ -4544,8 +4692,9 @@ static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, struct cfil_info *cfi; /* Read only */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } cfil_rw_lock_shared(&cfil_lck_rw); @@ -4554,7 +4703,7 @@ static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, */ if (req->oldptr == USER_ADDR_NULL) { req->oldidx = cfil_sock_attached_count * - sizeof(struct cfil_sock_stat); + sizeof(struct cfil_sock_stat); /* Bump the length in case new sockets gets attached */ req->oldidx += req->oldidx >> 3; goto done; @@ -4573,15 +4722,15 @@ static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, if (so != NULL) { stat.cfs_pid = so->last_pid; memcpy(stat.cfs_uuid, so->last_uuid, - sizeof(uuid_t)); + sizeof(uuid_t)); if (so->so_flags & SOF_DELEGATED) { stat.cfs_e_pid = so->e_pid; memcpy(stat.cfs_e_uuid, so->e_uuid, - sizeof(uuid_t)); + sizeof(uuid_t)); } else { stat.cfs_e_pid = so->last_pid; memcpy(stat.cfs_e_uuid, so->last_uuid, - sizeof(uuid_t)); + sizeof(uuid_t)); } stat.cfs_sock_family = so->so_proto->pr_domain->dom_family; @@ -4590,22 +4739,22 @@ static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, } stat.cfs_snd.cbs_pending_first = - cfi->cfi_snd.cfi_pending_first; + cfi->cfi_snd.cfi_pending_first; stat.cfs_snd.cbs_pending_last = - cfi->cfi_snd.cfi_pending_last; + cfi->cfi_snd.cfi_pending_last; stat.cfs_snd.cbs_inject_q_len = - cfil_queue_len(&cfi->cfi_snd.cfi_inject_q); + cfil_queue_len(&cfi->cfi_snd.cfi_inject_q); stat.cfs_snd.cbs_pass_offset = - cfi->cfi_snd.cfi_pass_offset; + cfi->cfi_snd.cfi_pass_offset; stat.cfs_rcv.cbs_pending_first = - cfi->cfi_rcv.cfi_pending_first; + cfi->cfi_rcv.cfi_pending_first; stat.cfs_rcv.cbs_pending_last = - cfi->cfi_rcv.cfi_pending_last; + cfi->cfi_rcv.cfi_pending_last; stat.cfs_rcv.cbs_inject_q_len = - cfil_queue_len(&cfi->cfi_rcv.cfi_inject_q); + cfil_queue_len(&cfi->cfi_rcv.cfi_inject_q); stat.cfs_rcv.cbs_pass_offset = - cfi->cfi_rcv.cfi_pass_offset; + cfi->cfi_rcv.cfi_pass_offset; for (i = 0; i < MAX_CONTENT_FILTER; i++) { struct cfil_entry_stat *estat; @@ -4618,31 +4767,31 @@ static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, estat->ces_len = sizeof(struct cfil_entry_stat); estat->ces_filter_id = entry->cfe_filter ? - entry->cfe_filter->cf_kcunit : 0; + entry->cfe_filter->cf_kcunit : 0; estat->ces_flags = entry->cfe_flags; estat->ces_necp_control_unit = - entry->cfe_necp_control_unit; + entry->cfe_necp_control_unit; estat->ces_last_event.tv_sec = - (int64_t)entry->cfe_last_event.tv_sec; + (int64_t)entry->cfe_last_event.tv_sec; estat->ces_last_event.tv_usec = - (int64_t)entry->cfe_last_event.tv_usec; + (int64_t)entry->cfe_last_event.tv_usec; estat->ces_last_action.tv_sec = - (int64_t)entry->cfe_last_action.tv_sec; + (int64_t)entry->cfe_last_action.tv_sec; estat->ces_last_action.tv_usec = - (int64_t)entry->cfe_last_action.tv_usec; + (int64_t)entry->cfe_last_action.tv_usec; ebuf = &entry->cfe_snd; sbuf = &estat->ces_snd; sbuf->cbs_pending_first = - cfil_queue_offset_first(&ebuf->cfe_pending_q); + cfil_queue_offset_first(&ebuf->cfe_pending_q); sbuf->cbs_pending_last = - cfil_queue_offset_last(&ebuf->cfe_pending_q); + cfil_queue_offset_last(&ebuf->cfe_pending_q); sbuf->cbs_ctl_first = - cfil_queue_offset_first(&ebuf->cfe_ctl_q); + cfil_queue_offset_first(&ebuf->cfe_ctl_q); sbuf->cbs_ctl_last = - cfil_queue_offset_last(&ebuf->cfe_ctl_q); + cfil_queue_offset_last(&ebuf->cfe_ctl_q); sbuf->cbs_pass_offset = ebuf->cfe_pass_offset; sbuf->cbs_peek_offset = ebuf->cfe_peek_offset; sbuf->cbs_peeked = ebuf->cfe_peeked; @@ -4650,21 +4799,22 @@ static int sysctl_cfil_sock_list(struct sysctl_oid *oidp, void *arg1, int arg2, ebuf = &entry->cfe_rcv; sbuf = &estat->ces_rcv; sbuf->cbs_pending_first = - cfil_queue_offset_first(&ebuf->cfe_pending_q); + cfil_queue_offset_first(&ebuf->cfe_pending_q); sbuf->cbs_pending_last = - cfil_queue_offset_last(&ebuf->cfe_pending_q); + cfil_queue_offset_last(&ebuf->cfe_pending_q); sbuf->cbs_ctl_first = - cfil_queue_offset_first(&ebuf->cfe_ctl_q); + cfil_queue_offset_first(&ebuf->cfe_ctl_q); sbuf->cbs_ctl_last = - cfil_queue_offset_last(&ebuf->cfe_ctl_q); + cfil_queue_offset_last(&ebuf->cfe_ctl_q); sbuf->cbs_pass_offset = ebuf->cfe_pass_offset; sbuf->cbs_peek_offset = ebuf->cfe_peek_offset; sbuf->cbs_peeked = ebuf->cfe_peeked; } error = SYSCTL_OUT(req, &stat, - sizeof (struct cfil_sock_stat)); - if (error != 0) + sizeof(struct cfil_sock_stat)); + if (error != 0) { break; + } } done: cfil_rw_unlock_shared(&cfil_lck_rw); @@ -4675,7 +4825,7 @@ done: } #endif - return (error); + return error; } /* @@ -4684,253 +4834,257 @@ done: static void cfil_hash_entry_log(int level, struct socket *so, struct cfil_hash_entry *entry, uint64_t sockId, const char* msg) { - char local[MAX_IPv6_STR_LEN+6]; - char remote[MAX_IPv6_STR_LEN+6]; - const void *addr; + char local[MAX_IPv6_STR_LEN + 6]; + char remote[MAX_IPv6_STR_LEN + 6]; + const void *addr; // No sock or not UDP, no-op - if (so == NULL || entry == NULL) { - return; - } - - local[0] = remote[0] = 0x0; - - switch (entry->cfentry_family) { - case AF_INET6: - addr = &entry->cfentry_laddr.addr6; - inet_ntop(AF_INET6, addr, local, sizeof(local)); - addr = &entry->cfentry_faddr.addr6; - inet_ntop(AF_INET6, addr, remote, sizeof(local)); - break; - case AF_INET: - addr = &entry->cfentry_laddr.addr46.ia46_addr4.s_addr; - inet_ntop(AF_INET, addr, local, sizeof(local)); - addr = &entry->cfentry_faddr.addr46.ia46_addr4.s_addr; - inet_ntop(AF_INET, addr, remote, sizeof(local)); - break; - default: - return; - } - + if (so == NULL || entry == NULL) { + return; + } + + local[0] = remote[0] = 0x0; + + switch (entry->cfentry_family) { + case AF_INET6: + addr = &entry->cfentry_laddr.addr6; + inet_ntop(AF_INET6, addr, local, sizeof(local)); + addr = &entry->cfentry_faddr.addr6; + inet_ntop(AF_INET6, addr, remote, sizeof(local)); + break; + case AF_INET: + addr = &entry->cfentry_laddr.addr46.ia46_addr4.s_addr; + inet_ntop(AF_INET, addr, local, sizeof(local)); + addr = &entry->cfentry_faddr.addr46.ia46_addr4.s_addr; + inet_ntop(AF_INET, addr, remote, sizeof(local)); + break; + default: + return; + } + CFIL_LOG(level, "<%s>: lport %d fport %d laddr %s faddr %s", - msg, - (uint64_t)VM_KERNEL_ADDRPERM(so), entry, sockId, - ntohs(entry->cfentry_lport), ntohs(entry->cfentry_fport), local, remote); + msg, + (uint64_t)VM_KERNEL_ADDRPERM(so), entry, sockId, + ntohs(entry->cfentry_lport), ntohs(entry->cfentry_fport), local, remote); } static void cfil_inp_log(int level, struct socket *so, const char* msg) { - struct inpcb *inp = NULL; - char local[MAX_IPv6_STR_LEN+6]; - char remote[MAX_IPv6_STR_LEN+6]; - const void *addr; - - if (so == NULL) { - return; - } - - inp = sotoinpcb(so); - if (inp == NULL) { - return; - } - - local[0] = remote[0] = 0x0; + struct inpcb *inp = NULL; + char local[MAX_IPv6_STR_LEN + 6]; + char remote[MAX_IPv6_STR_LEN + 6]; + const void *addr; + + if (so == NULL) { + return; + } + + inp = sotoinpcb(so); + if (inp == NULL) { + return; + } + + local[0] = remote[0] = 0x0; #if INET6 - if (inp->inp_vflag & INP_IPV6) { - addr = &inp->in6p_laddr.s6_addr32; - inet_ntop(AF_INET6, addr, local, sizeof(local)); - addr = &inp->in6p_faddr.s6_addr32; - inet_ntop(AF_INET6, addr, remote, sizeof(local)); - } else + if (inp->inp_vflag & INP_IPV6) { + addr = &inp->in6p_laddr.s6_addr32; + inet_ntop(AF_INET6, addr, local, sizeof(local)); + addr = &inp->in6p_faddr.s6_addr32; + inet_ntop(AF_INET6, addr, remote, sizeof(local)); + } else #endif /* INET6 */ - { - addr = &inp->inp_laddr.s_addr; - inet_ntop(AF_INET, addr, local, sizeof(local)); - addr = &inp->inp_faddr.s_addr; - inet_ntop(AF_INET, addr, remote, sizeof(local)); - } - - if (so->so_cfil != NULL) + { + addr = &inp->inp_laddr.s_addr; + inet_ntop(AF_INET, addr, local, sizeof(local)); + addr = &inp->inp_faddr.s_addr; + inet_ntop(AF_INET, addr, remote, sizeof(local)); + } + + if (so->so_cfil != NULL) { CFIL_LOG(level, "<%s>: <%s so %llx - flags 0x%x 0x%x, sockID %llu> lport %d fport %d laddr %s faddr %s", - msg, IS_UDP(so) ? "UDP" : "TCP", - (uint64_t)VM_KERNEL_ADDRPERM(so), inp->inp_flags, inp->inp_socket->so_flags, so->so_cfil->cfi_sock_id, - ntohs(inp->inp_lport), ntohs(inp->inp_fport), local, remote); - else + msg, IS_UDP(so) ? "UDP" : "TCP", + (uint64_t)VM_KERNEL_ADDRPERM(so), inp->inp_flags, inp->inp_socket->so_flags, so->so_cfil->cfi_sock_id, + ntohs(inp->inp_lport), ntohs(inp->inp_fport), local, remote); + } else { CFIL_LOG(level, "<%s>: <%s so %llx - flags 0x%x 0x%x> lport %d fport %d laddr %s faddr %s", - msg, IS_UDP(so) ? "UDP" : "TCP", - (uint64_t)VM_KERNEL_ADDRPERM(so), inp->inp_flags, inp->inp_socket->so_flags, - ntohs(inp->inp_lport), ntohs(inp->inp_fport), local, remote); + msg, IS_UDP(so) ? "UDP" : "TCP", + (uint64_t)VM_KERNEL_ADDRPERM(so), inp->inp_flags, inp->inp_socket->so_flags, + ntohs(inp->inp_lport), ntohs(inp->inp_fport), local, remote); + } } static void cfil_info_log(int level, struct cfil_info *cfil_info, const char* msg) { - if (cfil_info == NULL) + if (cfil_info == NULL) { return; + } - if (cfil_info->cfi_hash_entry != NULL) + if (cfil_info->cfi_hash_entry != NULL) { cfil_hash_entry_log(level, cfil_info->cfi_so, cfil_info->cfi_hash_entry, cfil_info->cfi_sock_id, msg); - else + } else { cfil_inp_log(level, cfil_info->cfi_so, msg); + } } errno_t cfil_db_init(struct socket *so) { - errno_t error = 0; - struct cfil_db *db = NULL; - - CFIL_LOG(LOG_INFO, ""); - - db = zalloc(cfil_db_zone); - if (db == NULL) { - error = ENOMEM; - goto done; - } - bzero(db, sizeof(struct cfil_db)); - db->cfdb_so = so; - db->cfdb_hashbase = hashinit(CFILHASHSIZE, M_CFIL, &db->cfdb_hashmask); - if (db->cfdb_hashbase == NULL) { - zfree(cfil_db_zone, db); - db = NULL; - error = ENOMEM; - goto done; - } - - so->so_cfil_db = db; + errno_t error = 0; + struct cfil_db *db = NULL; + + CFIL_LOG(LOG_INFO, ""); + + db = zalloc(cfil_db_zone); + if (db == NULL) { + error = ENOMEM; + goto done; + } + bzero(db, sizeof(struct cfil_db)); + db->cfdb_so = so; + db->cfdb_hashbase = hashinit(CFILHASHSIZE, M_CFIL, &db->cfdb_hashmask); + if (db->cfdb_hashbase == NULL) { + zfree(cfil_db_zone, db); + db = NULL; + error = ENOMEM; + goto done; + } + + so->so_cfil_db = db; done: - return (error); + return error; } void cfil_db_free(struct socket *so) { - struct cfil_hash_entry *entry = NULL; - struct cfil_hash_entry *temp_entry = NULL; - struct cfilhashhead *cfilhash = NULL; - struct cfil_db *db = NULL; - - CFIL_LOG(LOG_INFO, ""); - - if (so == NULL || so->so_cfil_db == NULL) { - return; - } - db = so->so_cfil_db; + struct cfil_hash_entry *entry = NULL; + struct cfil_hash_entry *temp_entry = NULL; + struct cfilhashhead *cfilhash = NULL; + struct cfil_db *db = NULL; + + CFIL_LOG(LOG_INFO, ""); + + if (so == NULL || so->so_cfil_db == NULL) { + return; + } + db = so->so_cfil_db; #if LIFECYCLE_DEBUG CFIL_LOG(LOG_ERR, "CFIL: LIFECYCLE: freeing db (count == %d)", - (uint64_t)VM_KERNEL_ADDRPERM(so), db, db->cfdb_count); + (uint64_t)VM_KERNEL_ADDRPERM(so), db, db->cfdb_count); #endif - for (int i = 0; i < CFILHASHSIZE; i++) { - cfilhash = &db->cfdb_hashbase[i]; - LIST_FOREACH_SAFE(entry, cfilhash, cfentry_link, temp_entry) { - if (entry->cfentry_cfil != NULL) { + for (int i = 0; i < CFILHASHSIZE; i++) { + cfilhash = &db->cfdb_hashbase[i]; + LIST_FOREACH_SAFE(entry, cfilhash, cfentry_link, temp_entry) { + if (entry->cfentry_cfil != NULL) { #if LIFECYCLE_DEBUG cfil_info_log(LOG_ERR, entry->cfentry_cfil, "CFIL: LIFECYCLE: DB FREE CLEAN UP"); #endif - cfil_info_free(entry->cfentry_cfil); - OSIncrementAtomic(&cfil_stats.cfs_sock_detached); - entry->cfentry_cfil = NULL; - } - - cfil_db_delete_entry(db, entry); - if (so->so_flags & SOF_CONTENT_FILTER) { - if (db->cfdb_count == 0) - so->so_flags &= ~SOF_CONTENT_FILTER; - VERIFY(so->so_usecount > 0); - so->so_usecount--; - } - } - } - - // Make sure all entries are cleaned up! - VERIFY(db->cfdb_count == 0); + cfil_info_free(entry->cfentry_cfil); + OSIncrementAtomic(&cfil_stats.cfs_sock_detached); + entry->cfentry_cfil = NULL; + } + + cfil_db_delete_entry(db, entry); + if (so->so_flags & SOF_CONTENT_FILTER) { + if (db->cfdb_count == 0) { + so->so_flags &= ~SOF_CONTENT_FILTER; + } + VERIFY(so->so_usecount > 0); + so->so_usecount--; + } + } + } + + // Make sure all entries are cleaned up! + VERIFY(db->cfdb_count == 0); #if LIFECYCLE_DEBUG - CFIL_LOG(LOG_ERR, "CFIL: LIFECYCLE: so usecount %d", so->so_usecount); + CFIL_LOG(LOG_ERR, "CFIL: LIFECYCLE: so usecount %d", so->so_usecount); #endif - FREE(db->cfdb_hashbase, M_CFIL); - zfree(cfil_db_zone, db); - so->so_cfil_db = NULL; + FREE(db->cfdb_hashbase, M_CFIL); + zfree(cfil_db_zone, db); + so->so_cfil_db = NULL; } static bool fill_cfil_hash_entry_from_address(struct cfil_hash_entry *entry, bool isLocal, struct sockaddr *addr) { - struct sockaddr_in *sin = NULL; - struct sockaddr_in6 *sin6 = NULL; - - if (entry == NULL || addr == NULL) { - return FALSE; - } - - switch (addr->sa_family) { - case AF_INET: - sin = satosin(addr); - if (sin->sin_len != sizeof(*sin)) { - return FALSE; - } - if (isLocal == TRUE) { - entry->cfentry_lport = sin->sin_port; - entry->cfentry_laddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; - } else { - entry->cfentry_fport = sin->sin_port; - entry->cfentry_faddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; - } - entry->cfentry_family = AF_INET; - return TRUE; - case AF_INET6: - sin6 = satosin6(addr); - if (sin6->sin6_len != sizeof(*sin6)) { - return FALSE; - } - if (isLocal == TRUE) { - entry->cfentry_lport = sin6->sin6_port; - entry->cfentry_laddr.addr6 = sin6->sin6_addr; - } else { - entry->cfentry_fport = sin6->sin6_port; - entry->cfentry_faddr.addr6 = sin6->sin6_addr; - } - entry->cfentry_family = AF_INET6; - return TRUE; - default: - return FALSE; - } + struct sockaddr_in *sin = NULL; + struct sockaddr_in6 *sin6 = NULL; + + if (entry == NULL || addr == NULL) { + return FALSE; + } + + switch (addr->sa_family) { + case AF_INET: + sin = satosin(addr); + if (sin->sin_len != sizeof(*sin)) { + return FALSE; + } + if (isLocal == TRUE) { + entry->cfentry_lport = sin->sin_port; + entry->cfentry_laddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; + } else { + entry->cfentry_fport = sin->sin_port; + entry->cfentry_faddr.addr46.ia46_addr4.s_addr = sin->sin_addr.s_addr; + } + entry->cfentry_family = AF_INET; + return TRUE; + case AF_INET6: + sin6 = satosin6(addr); + if (sin6->sin6_len != sizeof(*sin6)) { + return FALSE; + } + if (isLocal == TRUE) { + entry->cfentry_lport = sin6->sin6_port; + entry->cfentry_laddr.addr6 = sin6->sin6_addr; + } else { + entry->cfentry_fport = sin6->sin6_port; + entry->cfentry_faddr.addr6 = sin6->sin6_addr; + } + entry->cfentry_family = AF_INET6; + return TRUE; + default: + return FALSE; + } } static bool fill_cfil_hash_entry_from_inp(struct cfil_hash_entry *entry, bool isLocal, struct inpcb *inp) { - if (entry == NULL || inp == NULL) { - return FALSE; - } - - if (inp->inp_vflag & INP_IPV4) { - if (isLocal == TRUE) { - entry->cfentry_lport = inp->inp_lport; - entry->cfentry_laddr.addr46.ia46_addr4.s_addr = inp->inp_laddr.s_addr; - } else { - entry->cfentry_fport = inp->inp_fport; - entry->cfentry_faddr.addr46.ia46_addr4.s_addr = inp->inp_faddr.s_addr; - } - entry->cfentry_family = AF_INET; - return TRUE; - } else if (inp->inp_vflag & INP_IPV6) { - if (isLocal == TRUE) { - entry->cfentry_lport = inp->inp_lport; - entry->cfentry_laddr.addr6 = inp->in6p_laddr; - } else { - entry->cfentry_fport = inp->inp_fport; - entry->cfentry_faddr.addr6 = inp->in6p_faddr; - } - entry->cfentry_family = AF_INET6; - return TRUE; - } - return FALSE; + if (entry == NULL || inp == NULL) { + return FALSE; + } + + if (inp->inp_vflag & INP_IPV4) { + if (isLocal == TRUE) { + entry->cfentry_lport = inp->inp_lport; + entry->cfentry_laddr.addr46.ia46_addr4.s_addr = inp->inp_laddr.s_addr; + } else { + entry->cfentry_fport = inp->inp_fport; + entry->cfentry_faddr.addr46.ia46_addr4.s_addr = inp->inp_faddr.s_addr; + } + entry->cfentry_family = AF_INET; + return TRUE; + } else if (inp->inp_vflag & INP_IPV6) { + if (isLocal == TRUE) { + entry->cfentry_lport = inp->inp_lport; + entry->cfentry_laddr.addr6 = inp->in6p_laddr; + } else { + entry->cfentry_fport = inp->inp_fport; + entry->cfentry_faddr.addr6 = inp->in6p_faddr; + } + entry->cfentry_family = AF_INET6; + return TRUE; + } + return FALSE; } bool @@ -4944,26 +5098,26 @@ check_port(struct sockaddr *addr, u_short port) } switch (addr->sa_family) { - case AF_INET: - sin = satosin(addr); - if (sin->sin_len != sizeof(*sin)) { - return FALSE; - } - if (port == ntohs(sin->sin_port)) { - return TRUE; - } - break; - case AF_INET6: - sin6 = satosin6(addr); - if (sin6->sin6_len != sizeof(*sin6)) { - return FALSE; - } - if (port == ntohs(sin6->sin6_port)) { - return TRUE; - } - break; - default: - break; + case AF_INET: + sin = satosin(addr); + if (sin->sin_len != sizeof(*sin)) { + return FALSE; + } + if (port == ntohs(sin->sin_port)) { + return TRUE; + } + break; + case AF_INET6: + sin6 = satosin6(addr); + if (sin6->sin6_len != sizeof(*sin6)) { + return FALSE; + } + if (port == ntohs(sin6->sin6_port)) { + return TRUE; + } + break; + default: + break; } return FALSE; } @@ -4984,191 +5138,194 @@ cfil_db_lookup_entry_with_sockid(struct cfil_db *db, u_int64_t sock_id) LIST_FOREACH(nextentry, cfilhash, cfentry_link) { if (nextentry->cfentry_cfil != NULL && - nextentry->cfentry_cfil->cfi_sock_id == sock_id) { + nextentry->cfentry_cfil->cfi_sock_id == sock_id) { CFIL_LOG(LOG_DEBUG, "CFIL: UDP matched ", - (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), nextentry->cfentry_cfil->cfi_sock_id, flowhash); + (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), nextentry->cfentry_cfil->cfi_sock_id, flowhash); cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, nextentry, 0, "CFIL: UDP found entry"); return nextentry; } } CFIL_LOG(LOG_DEBUG, "CFIL: UDP NOT matched ", - (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), sock_id, flowhash); + (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), sock_id, flowhash); return NULL; } struct cfil_hash_entry * cfil_db_lookup_entry(struct cfil_db *db, struct sockaddr *local, struct sockaddr *remote) { - struct cfil_hash_entry matchentry; - struct cfil_hash_entry *nextentry = NULL; - struct inpcb *inp = sotoinpcb(db->cfdb_so); - u_int32_t hashkey_faddr = 0, hashkey_laddr = 0; - int inp_hash_element = 0; - struct cfilhashhead *cfilhash = NULL; - - CFIL_LOG(LOG_INFO, ""); - - if (inp == NULL) { - goto done; - } - - if (local != NULL) { - fill_cfil_hash_entry_from_address(&matchentry, TRUE, local); - } else { - fill_cfil_hash_entry_from_inp(&matchentry, TRUE, inp); - } - if (remote != NULL) { - fill_cfil_hash_entry_from_address(&matchentry, FALSE, remote); - } else { - fill_cfil_hash_entry_from_inp(&matchentry, FALSE, inp); - } - + struct cfil_hash_entry matchentry; + struct cfil_hash_entry *nextentry = NULL; + struct inpcb *inp = sotoinpcb(db->cfdb_so); + u_int32_t hashkey_faddr = 0, hashkey_laddr = 0; + int inp_hash_element = 0; + struct cfilhashhead *cfilhash = NULL; + + CFIL_LOG(LOG_INFO, ""); + + if (inp == NULL) { + goto done; + } + + if (local != NULL) { + fill_cfil_hash_entry_from_address(&matchentry, TRUE, local); + } else { + fill_cfil_hash_entry_from_inp(&matchentry, TRUE, inp); + } + if (remote != NULL) { + fill_cfil_hash_entry_from_address(&matchentry, FALSE, remote); + } else { + fill_cfil_hash_entry_from_inp(&matchentry, FALSE, inp); + } + #if INET6 - if (inp->inp_vflag & INP_IPV6) { - hashkey_faddr = matchentry.cfentry_faddr.addr6.s6_addr32[3]; - hashkey_laddr = matchentry.cfentry_laddr.addr6.s6_addr32[3]; - } else + if (inp->inp_vflag & INP_IPV6) { + hashkey_faddr = matchentry.cfentry_faddr.addr6.s6_addr32[3]; + hashkey_laddr = matchentry.cfentry_laddr.addr6.s6_addr32[3]; + } else #endif /* INET6 */ - { - hashkey_faddr = matchentry.cfentry_faddr.addr46.ia46_addr4.s_addr; - hashkey_laddr = matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr; - } - - inp_hash_element = CFIL_HASH(hashkey_laddr, hashkey_faddr, - matchentry.cfentry_lport, matchentry.cfentry_fport); - inp_hash_element &= db->cfdb_hashmask; - - cfilhash = &db->cfdb_hashbase[inp_hash_element]; - - LIST_FOREACH(nextentry, cfilhash, cfentry_link) { - + { + hashkey_faddr = matchentry.cfentry_faddr.addr46.ia46_addr4.s_addr; + hashkey_laddr = matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr; + } + + inp_hash_element = CFIL_HASH(hashkey_laddr, hashkey_faddr, + matchentry.cfentry_lport, matchentry.cfentry_fport); + inp_hash_element &= db->cfdb_hashmask; + + cfilhash = &db->cfdb_hashbase[inp_hash_element]; + + LIST_FOREACH(nextentry, cfilhash, cfentry_link) { #if INET6 - if ((inp->inp_vflag & INP_IPV6) && - nextentry->cfentry_lport == matchentry.cfentry_lport && - nextentry->cfentry_fport == matchentry.cfentry_fport && - IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_laddr.addr6, &matchentry.cfentry_laddr.addr6) && - IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_faddr.addr6, &matchentry.cfentry_faddr.addr6)) { + if ((inp->inp_vflag & INP_IPV6) && + nextentry->cfentry_lport == matchentry.cfentry_lport && + nextentry->cfentry_fport == matchentry.cfentry_fport && + IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_laddr.addr6, &matchentry.cfentry_laddr.addr6) && + IN6_ARE_ADDR_EQUAL(&nextentry->cfentry_faddr.addr6, &matchentry.cfentry_faddr.addr6)) { #if DATA_DEBUG - cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP V6 found entry"); + cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP V6 found entry"); #endif - return nextentry; - } else + return nextentry; + } else #endif /* INET6 */ - if (nextentry->cfentry_lport == matchentry.cfentry_lport && - nextentry->cfentry_fport == matchentry.cfentry_fport && - nextentry->cfentry_laddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr && - nextentry->cfentry_faddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_faddr.addr46.ia46_addr4.s_addr) { + if (nextentry->cfentry_lport == matchentry.cfentry_lport && + nextentry->cfentry_fport == matchentry.cfentry_fport && + nextentry->cfentry_laddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_laddr.addr46.ia46_addr4.s_addr && + nextentry->cfentry_faddr.addr46.ia46_addr4.s_addr == matchentry.cfentry_faddr.addr46.ia46_addr4.s_addr) { #if DATA_DEBUG - cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP V4 found entry"); + cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP V4 found entry"); #endif - return nextentry; - } - } - + return nextentry; + } + } + done: #if DATA_DEBUG - cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP no entry found"); + cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, &matchentry, 0, "CFIL LOOKUP ENTRY: UDP no entry found"); #endif - return NULL; + return NULL; } void cfil_db_delete_entry(struct cfil_db *db, struct cfil_hash_entry *hash_entry) { - if (hash_entry == NULL) - return; - - LIST_REMOVE(hash_entry, cfentry_link); - zfree(cfil_hash_entry_zone, hash_entry); - db->cfdb_count--; - if (db->cfdb_only_entry == hash_entry) - db->cfdb_only_entry = NULL; + if (hash_entry == NULL) { + return; + } + if (db == NULL || db->cfdb_count == 0) { + return; + } + db->cfdb_count--; + if (db->cfdb_only_entry == hash_entry) { + db->cfdb_only_entry = NULL; + } + LIST_REMOVE(hash_entry, cfentry_link); + zfree(cfil_hash_entry_zone, hash_entry); } struct cfil_hash_entry * cfil_db_add_entry(struct cfil_db *db, struct sockaddr *local, struct sockaddr *remote) { - struct cfil_hash_entry *entry = NULL; - struct inpcb *inp = sotoinpcb(db->cfdb_so); - u_int32_t hashkey_faddr = 0, hashkey_laddr = 0; - int inp_hash_element = 0; - struct cfilhashhead *cfilhash = NULL; - - CFIL_LOG(LOG_INFO, ""); - - if (inp == NULL) { - goto done; - } - - entry = zalloc(cfil_hash_entry_zone); - if (entry == NULL) { - goto done; - } - bzero(entry, sizeof(struct cfil_hash_entry)); - - if (local != NULL) { - fill_cfil_hash_entry_from_address(entry, TRUE, local); - } else { - fill_cfil_hash_entry_from_inp(entry, TRUE, inp); - } - if (remote != NULL) { - fill_cfil_hash_entry_from_address(entry, FALSE, remote); - } else { - fill_cfil_hash_entry_from_inp(entry, FALSE, inp); - } - entry->cfentry_lastused = net_uptime(); + struct cfil_hash_entry *entry = NULL; + struct inpcb *inp = sotoinpcb(db->cfdb_so); + u_int32_t hashkey_faddr = 0, hashkey_laddr = 0; + int inp_hash_element = 0; + struct cfilhashhead *cfilhash = NULL; + + CFIL_LOG(LOG_INFO, ""); + + if (inp == NULL) { + goto done; + } + + entry = zalloc(cfil_hash_entry_zone); + if (entry == NULL) { + goto done; + } + bzero(entry, sizeof(struct cfil_hash_entry)); + + if (local != NULL) { + fill_cfil_hash_entry_from_address(entry, TRUE, local); + } else { + fill_cfil_hash_entry_from_inp(entry, TRUE, inp); + } + if (remote != NULL) { + fill_cfil_hash_entry_from_address(entry, FALSE, remote); + } else { + fill_cfil_hash_entry_from_inp(entry, FALSE, inp); + } + entry->cfentry_lastused = net_uptime(); #if INET6 - if (inp->inp_vflag & INP_IPV6) { - hashkey_faddr = entry->cfentry_faddr.addr6.s6_addr32[3]; - hashkey_laddr = entry->cfentry_laddr.addr6.s6_addr32[3]; - } else + if (inp->inp_vflag & INP_IPV6) { + hashkey_faddr = entry->cfentry_faddr.addr6.s6_addr32[3]; + hashkey_laddr = entry->cfentry_laddr.addr6.s6_addr32[3]; + } else #endif /* INET6 */ - { - hashkey_faddr = entry->cfentry_faddr.addr46.ia46_addr4.s_addr; - hashkey_laddr = entry->cfentry_laddr.addr46.ia46_addr4.s_addr; - } - entry->cfentry_flowhash = CFIL_HASH(hashkey_laddr, hashkey_faddr, - entry->cfentry_lport, entry->cfentry_fport); - inp_hash_element = entry->cfentry_flowhash & db->cfdb_hashmask; - - cfilhash = &db->cfdb_hashbase[inp_hash_element]; - - LIST_INSERT_HEAD(cfilhash, entry, cfentry_link); - db->cfdb_count++; + { + hashkey_faddr = entry->cfentry_faddr.addr46.ia46_addr4.s_addr; + hashkey_laddr = entry->cfentry_laddr.addr46.ia46_addr4.s_addr; + } + entry->cfentry_flowhash = CFIL_HASH(hashkey_laddr, hashkey_faddr, + entry->cfentry_lport, entry->cfentry_fport); + inp_hash_element = entry->cfentry_flowhash & db->cfdb_hashmask; + + cfilhash = &db->cfdb_hashbase[inp_hash_element]; + + LIST_INSERT_HEAD(cfilhash, entry, cfentry_link); + db->cfdb_count++; db->cfdb_only_entry = entry; cfil_hash_entry_log(LOG_DEBUG, db->cfdb_so, entry, 0, "CFIL: cfil_db_add_entry: ADDED"); - + done: - CFIL_LOG(LOG_DEBUG, "CFIL: UDP total count %d", (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), db->cfdb_count); - return entry; + CFIL_LOG(LOG_DEBUG, "CFIL: UDP total count %d", (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), db->cfdb_count); + return entry; } struct cfil_info * cfil_db_get_cfil_info(struct cfil_db *db, cfil_sock_id_t id) { - struct cfil_hash_entry *hash_entry = NULL; + struct cfil_hash_entry *hash_entry = NULL; - CFIL_LOG(LOG_INFO, ""); + CFIL_LOG(LOG_INFO, ""); - if (db == NULL || id == 0) { - CFIL_LOG(LOG_DEBUG, "CFIL: UDP NULL DB ", - (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), id); - return NULL; - } + if (db == NULL || id == 0) { + CFIL_LOG(LOG_DEBUG, "CFIL: UDP NULL DB ", + (uint64_t)VM_KERNEL_ADDRPERM(db->cfdb_so), id); + return NULL; + } // This is an optimization for connected UDP socket which only has one flow. // No need to do the hash lookup. if (db->cfdb_count == 1) { if (db->cfdb_only_entry && db->cfdb_only_entry->cfentry_cfil && - db->cfdb_only_entry->cfentry_cfil->cfi_sock_id == id) { - return (db->cfdb_only_entry->cfentry_cfil); + db->cfdb_only_entry->cfentry_cfil->cfi_sock_id == id) { + return db->cfdb_only_entry->cfentry_cfil; } } hash_entry = cfil_db_lookup_entry_with_sockid(db, id); - return (hash_entry != NULL ? hash_entry->cfentry_cfil : NULL); + return hash_entry != NULL ? hash_entry->cfentry_cfil : NULL; } struct cfil_hash_entry * @@ -5178,101 +5335,104 @@ cfil_sock_udp_get_flow(struct socket *so, uint32_t filter_control_unit, bool out struct cfil_hash_entry *hash_entry = NULL; errno_t error = 0; - socket_lock_assert_owned(so); + socket_lock_assert_owned(so); // If new socket, allocate cfil db if (so->so_cfil_db == NULL) { if (cfil_db_init(so) != 0) { - return (NULL); + return NULL; } } - // See if flow already exists. - hash_entry = cfil_db_lookup_entry(so->so_cfil_db, local, remote); - if (hash_entry != NULL) { - return (hash_entry); - } + // See if flow already exists. + hash_entry = cfil_db_lookup_entry(so->so_cfil_db, local, remote); + if (hash_entry != NULL) { + return hash_entry; + } - hash_entry = cfil_db_add_entry(so->so_cfil_db, local, remote); - if (hash_entry == NULL) { - OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem); - CFIL_LOG(LOG_ERR, "CFIL: UDP failed to add entry"); - return (NULL); - } + hash_entry = cfil_db_add_entry(so->so_cfil_db, local, remote); + if (hash_entry == NULL) { + OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem); + CFIL_LOG(LOG_ERR, "CFIL: UDP failed to add entry"); + return NULL; + } - if (cfil_info_alloc(so, hash_entry) == NULL || - hash_entry->cfentry_cfil == NULL) { - cfil_db_delete_entry(so->so_cfil_db, hash_entry); - CFIL_LOG(LOG_ERR, "CFIL: UDP failed to alloc cfil_info"); - OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem); - return (NULL); - } + if (cfil_info_alloc(so, hash_entry) == NULL || + hash_entry->cfentry_cfil == NULL) { + cfil_db_delete_entry(so->so_cfil_db, hash_entry); + CFIL_LOG(LOG_ERR, "CFIL: UDP failed to alloc cfil_info"); + OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem); + return NULL; + } #if LIFECYCLE_DEBUG cfil_info_log(LOG_ERR, hash_entry->cfentry_cfil, "CFIL: LIFECYCLE: ADDED"); #endif - if (cfil_info_attach_unit(so, filter_control_unit, hash_entry->cfentry_cfil) == 0) { + if (cfil_info_attach_unit(so, filter_control_unit, hash_entry->cfentry_cfil) == 0) { + cfil_info_free(hash_entry->cfentry_cfil); + cfil_db_delete_entry(so->so_cfil_db, hash_entry); CFIL_LOG(LOG_ERR, "CFIL: UDP cfil_info_attach_unit(%u) failed", - filter_control_unit); - OSIncrementAtomic(&cfil_stats.cfs_sock_attach_failed); - return (NULL); - } - CFIL_LOG(LOG_DEBUG, "CFIL: UDP filter_control_unit %u sockID %llu attached", - (uint64_t)VM_KERNEL_ADDRPERM(so), - filter_control_unit, hash_entry->cfentry_cfil->cfi_sock_id); - - so->so_flags |= SOF_CONTENT_FILTER; - OSIncrementAtomic(&cfil_stats.cfs_sock_attached); - - /* Hold a reference on the socket for each flow */ - so->so_usecount++; - - error = cfil_dispatch_attach_event(so, hash_entry->cfentry_cfil, filter_control_unit); - /* We can recover from flow control or out of memory errors */ - if (error != 0 && error != ENOBUFS && error != ENOMEM) - return (NULL); - - CFIL_INFO_VERIFY(hash_entry->cfentry_cfil); - return (hash_entry); + filter_control_unit); + OSIncrementAtomic(&cfil_stats.cfs_sock_attach_failed); + return NULL; + } + CFIL_LOG(LOG_DEBUG, "CFIL: UDP filter_control_unit %u sockID %llu attached", + (uint64_t)VM_KERNEL_ADDRPERM(so), + filter_control_unit, hash_entry->cfentry_cfil->cfi_sock_id); + + so->so_flags |= SOF_CONTENT_FILTER; + OSIncrementAtomic(&cfil_stats.cfs_sock_attached); + + /* Hold a reference on the socket for each flow */ + so->so_usecount++; + + error = cfil_dispatch_attach_event(so, hash_entry->cfentry_cfil, filter_control_unit); + /* We can recover from flow control or out of memory errors */ + if (error != 0 && error != ENOBUFS && error != ENOMEM) { + return NULL; + } + + CFIL_INFO_VERIFY(hash_entry->cfentry_cfil); + return hash_entry; } errno_t cfil_sock_udp_handle_data(bool outgoing, struct socket *so, - struct sockaddr *local, struct sockaddr *remote, - struct mbuf *data, struct mbuf *control, uint32_t flags) + struct sockaddr *local, struct sockaddr *remote, + struct mbuf *data, struct mbuf *control, uint32_t flags) { #pragma unused(outgoing, so, local, remote, data, control, flags) - errno_t error = 0; - uint32_t filter_control_unit; + errno_t error = 0; + uint32_t filter_control_unit; struct cfil_hash_entry *hash_entry = NULL; struct cfil_info *cfil_info = NULL; - socket_lock_assert_owned(so); - - if (cfil_active_count == 0) { - CFIL_LOG(LOG_DEBUG, "CFIL: UDP no active filter"); - OSIncrementAtomic(&cfil_stats.cfs_sock_attach_in_vain); - return (error); - } - - filter_control_unit = necp_socket_get_content_filter_control_unit(so); - if (filter_control_unit == 0) { - CFIL_LOG(LOG_DEBUG, "CFIL: UDP failed to get control unit"); - return (error); - } - - if ((filter_control_unit & NECP_MASK_USERSPACE_ONLY) != 0) { - CFIL_LOG(LOG_DEBUG, "CFIL: UDP user space only"); - OSIncrementAtomic(&cfil_stats.cfs_sock_userspace_only); - return (error); - } - - hash_entry = cfil_sock_udp_get_flow(so, filter_control_unit, outgoing, local, remote); - if (hash_entry == NULL || hash_entry->cfentry_cfil == NULL) { + socket_lock_assert_owned(so); + + if (cfil_active_count == 0) { + CFIL_LOG(LOG_DEBUG, "CFIL: UDP no active filter"); + OSIncrementAtomic(&cfil_stats.cfs_sock_attach_in_vain); + return error; + } + + filter_control_unit = necp_socket_get_content_filter_control_unit(so); + if (filter_control_unit == 0) { + CFIL_LOG(LOG_DEBUG, "CFIL: UDP failed to get control unit"); + return error; + } + + if ((filter_control_unit & NECP_MASK_USERSPACE_ONLY) != 0) { + CFIL_LOG(LOG_DEBUG, "CFIL: UDP user space only"); + OSIncrementAtomic(&cfil_stats.cfs_sock_userspace_only); + return error; + } + + hash_entry = cfil_sock_udp_get_flow(so, filter_control_unit, outgoing, local, remote); + if (hash_entry == NULL || hash_entry->cfentry_cfil == NULL) { CFIL_LOG(LOG_ERR, "CFIL: Falied to create UDP flow"); - return (EPIPE); - } + return EPIPE; + } // Update last used timestamp, this is for flow Idle TO hash_entry->cfentry_lastused = net_uptime(); cfil_info = hash_entry->cfentry_cfil; @@ -5281,20 +5441,20 @@ cfil_sock_udp_handle_data(bool outgoing, struct socket *so, #if DATA_DEBUG cfil_hash_entry_log(LOG_DEBUG, so, hash_entry, 0, "CFIL: UDP DROP"); #endif - return (EPIPE); + return EPIPE; } if (control != NULL) { OSIncrementAtomic(&cfil_stats.cfs_data_in_control); } if (data->m_type == MT_OOBDATA) { CFIL_LOG(LOG_ERR, "so %llx MSG_OOB", - (uint64_t)VM_KERNEL_ADDRPERM(so)); + (uint64_t)VM_KERNEL_ADDRPERM(so)); OSIncrementAtomic(&cfil_stats.cfs_data_in_oob); } error = cfil_data_common(so, cfil_info, outgoing, remote, data, control, flags); - return (error); + return error; } /* @@ -5316,15 +5476,16 @@ cfil_filters_udp_attached(struct socket *so, bool need_wait) errno_t error = 0; int kcunit; int attached = 0; + uint64_t sock_flow_id = 0; socket_lock_assert_owned(so); if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil_db != NULL) { - - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); db = so->so_cfil_db; @@ -5333,9 +5494,7 @@ cfil_filters_udp_attached(struct socket *so, bool need_wait) cfilhash = &db->cfdb_hashbase[i]; LIST_FOREACH_SAFE(hash_entry, cfilhash, cfentry_link, temp_hash_entry) { - if (hash_entry->cfentry_cfil != NULL) { - cfil_info = hash_entry->cfentry_cfil; for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { entry = &cfil_info->cfi_entries[kcunit - 1]; @@ -5345,10 +5504,12 @@ cfil_filters_udp_attached(struct socket *so, bool need_wait) continue; } - if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) + if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) { continue; - if ((entry->cfe_flags & CFEF_CFIL_DETACHED) != 0) + } + if ((entry->cfe_flags & CFEF_CFIL_DETACHED) != 0) { continue; + } attached = 1; @@ -5359,12 +5520,22 @@ cfil_filters_udp_attached(struct socket *so, bool need_wait) ts.tv_sec = cfil_close_wait_timeout / 1000; ts.tv_nsec = (cfil_close_wait_timeout % 1000) * - NSEC_PER_USEC * 1000; + NSEC_PER_USEC * 1000; OSIncrementAtomic(&cfil_stats.cfs_close_wait); cfil_info->cfi_flags |= CFIF_CLOSE_WAIT; + sock_flow_id = cfil_info->cfi_sock_id; + error = msleep((caddr_t)cfil_info, mutex_held, - PSOCK | PCATCH, "cfil_filters_udp_attached", &ts); + PSOCK | PCATCH, "cfil_filters_udp_attached", &ts); + + // Woke up from sleep, validate if cfil_info is still valid + if (so->so_cfil_db == NULL || + (cfil_info != cfil_db_get_cfil_info(so->so_cfil_db, sock_flow_id))) { + // cfil_info is not valid, do not continue + goto done; + } + cfil_info->cfi_flags &= ~CFIF_CLOSE_WAIT; #if LIFECYCLE_DEBUG @@ -5380,7 +5551,6 @@ cfil_filters_udp_attached(struct socket *so, bool need_wait) cfil_info_log(LOG_ERR, cfil_info, "CFIL: LIFECYCLE: WAIT FOR FLOW TIMED OUT, FORCE DETACH"); #endif entry->cfe_flags |= CFEF_CFIL_DETACHED; - break; } } goto done; @@ -5391,7 +5561,7 @@ cfil_filters_udp_attached(struct socket *so, bool need_wait) } done: - return (attached); + return attached; } int32_t @@ -5409,28 +5579,28 @@ cfil_sock_udp_data_pending(struct sockbuf *sb, bool check_thread) socket_lock_assert_owned(so); if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil_db != NULL && - (check_thread == FALSE || so->so_snd.sb_cfil_thread != current_thread())) { - + (check_thread == FALSE || so->so_snd.sb_cfil_thread != current_thread())) { db = so->so_cfil_db; for (int i = 0; i < CFILHASHSIZE; i++) { cfilhash = &db->cfdb_hashbase[i]; LIST_FOREACH_SAFE(hash_entry, cfilhash, cfentry_link, temp_hash_entry) { - if (hash_entry->cfentry_cfil != NULL) { - if ((sb->sb_flags & SB_RECV) == 0) + if ((sb->sb_flags & SB_RECV) == 0) { cfi_buf = &hash_entry->cfentry_cfil->cfi_snd; - else + } else { cfi_buf = &hash_entry->cfentry_cfil->cfi_rcv; + } pending = cfi_buf->cfi_pending_last - cfi_buf->cfi_pending_first; /* * If we are limited by the "chars of mbufs used" roughly * adjust so we won't overcommit */ - if ((uint64_t)cfi_buf->cfi_pending_mbcnt > pending) + if ((uint64_t)cfi_buf->cfi_pending_mbcnt > pending) { pending = cfi_buf->cfi_pending_mbcnt; + } total_pending += pending; } @@ -5440,8 +5610,8 @@ cfil_sock_udp_data_pending(struct sockbuf *sb, bool check_thread) VERIFY(total_pending < INT32_MAX); #if DATA_DEBUG CFIL_LOG(LOG_DEBUG, "CFIL: total pending %llu ", - (uint64_t)VM_KERNEL_ADDRPERM(so), - total_pending, check_thread); + (uint64_t)VM_KERNEL_ADDRPERM(so), + total_pending, check_thread); #endif } @@ -5463,14 +5633,12 @@ cfil_sock_udp_notify_shutdown(struct socket *so, int how, int drop_flag, int shu socket_lock_assert_owned(so); if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil_db != NULL) { - db = so->so_cfil_db; for (int i = 0; i < CFILHASHSIZE; i++) { cfilhash = &db->cfdb_hashbase[i]; LIST_FOREACH_SAFE(hash_entry, cfilhash, cfentry_link, temp_hash_entry) { - if (hash_entry->cfentry_cfil != NULL) { cfil_info = hash_entry->cfentry_cfil; @@ -5506,7 +5674,7 @@ cfil_sock_udp_notify_shutdown(struct socket *so, int how, int drop_flag, int shu if (done_count == 0) { error = ENOTCONN; } - return (error); + return error; } int @@ -5514,13 +5682,14 @@ cfil_sock_udp_shutdown(struct socket *so, int *how) { int error = 0; - if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || (so->so_cfil_db == NULL)) + if ((so->so_flags & SOF_CONTENT_FILTER) == 0 || (so->so_cfil_db == NULL)) { goto done; + } socket_lock_assert_owned(so); CFIL_LOG(LOG_INFO, "so %llx how %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), *how); + (uint64_t)VM_KERNEL_ADDRPERM(so), *how); /* * Check the state of the socket before the content filter @@ -5541,16 +5710,18 @@ cfil_sock_udp_shutdown(struct socket *so, int *how) */ if (*how != SHUT_WR) { error = cfil_sock_udp_notify_shutdown(so, SHUT_RD, CFIF_DROP, CFIF_SHUT_RD); - if (error != 0) + if (error != 0) { goto done; + } } /* * shutdown write: SHUT_WR or SHUT_RDWR */ if (*how != SHUT_RD) { error = cfil_sock_udp_notify_shutdown(so, SHUT_WR, CFIF_DROP, CFIF_SHUT_WR); - if (error != 0) + if (error != 0) { goto done; + } /* * When outgoing data is pending, we delay the shutdown at the @@ -5571,7 +5742,7 @@ cfil_sock_udp_shutdown(struct socket *so, int *how) } } done: - return (error); + return error; } void @@ -5589,8 +5760,9 @@ cfil_sock_udp_close_wait(struct socket *so) * Make sure we need to wait after the filter are notified * of the disconnection */ - if (cfil_filters_udp_attached(so, TRUE) == 0) + if (cfil_filters_udp_attached(so, TRUE) == 0) { break; + } } } @@ -5608,7 +5780,6 @@ cfil_sock_udp_is_closed(struct socket *so) socket_lock_assert_owned(so); if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil_db != NULL) { - db = so->so_cfil_db; for (int i = 0; i < CFILHASHSIZE; i++) { @@ -5616,7 +5787,6 @@ cfil_sock_udp_is_closed(struct socket *so) LIST_FOREACH_SAFE(hash_entry, cfilhash, cfentry_link, temp_hash_entry) { if (hash_entry->cfentry_cfil != NULL) { - cfil_info = hash_entry->cfentry_cfil; for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { @@ -5626,8 +5796,9 @@ cfil_sock_udp_is_closed(struct socket *so) /* Last chance to push passed data out */ error = cfil_acquire_sockbuf(so, cfil_info, 1); - if (error == 0) + if (error == 0) { cfil_service_inject_queue(so, cfil_info, 1); + } cfil_release_sockbuf(so, 1); cfil_info->cfi_flags |= CFIF_SOCK_CLOSED; @@ -5657,9 +5828,9 @@ cfil_sock_udp_buf_update(struct sockbuf *sb) socket_lock_assert_owned(so); if ((so->so_flags & SOF_CONTENT_FILTER) != 0 && so->so_cfil_db != NULL) { - - if (!cfil_sbtrim) + if (!cfil_sbtrim) { return; + } db = so->so_cfil_db; @@ -5668,27 +5839,29 @@ cfil_sock_udp_buf_update(struct sockbuf *sb) LIST_FOREACH_SAFE(hash_entry, cfilhash, cfentry_link, temp_hash_entry) { if (hash_entry->cfentry_cfil != NULL) { - cfil_info = hash_entry->cfentry_cfil; if ((sb->sb_flags & SB_RECV) == 0) { - if ((cfil_info->cfi_flags & CFIF_RETRY_INJECT_OUT) == 0) + if ((cfil_info->cfi_flags & CFIF_RETRY_INJECT_OUT) == 0) { return; + } outgoing = 1; OSIncrementAtomic(&cfil_stats.cfs_inject_q_out_retry); } else { - if ((cfil_info->cfi_flags & CFIF_RETRY_INJECT_IN) == 0) + if ((cfil_info->cfi_flags & CFIF_RETRY_INJECT_IN) == 0) { return; + } outgoing = 0; OSIncrementAtomic(&cfil_stats.cfs_inject_q_in_retry); } CFIL_LOG(LOG_NOTICE, "so %llx outgoing %d", - (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing); + (uint64_t)VM_KERNEL_ADDRPERM(so), outgoing); error = cfil_acquire_sockbuf(so, cfil_info, outgoing); - if (error == 0) + if (error == 0) { cfil_service_inject_queue(so, cfil_info, outgoing); + } cfil_release_sockbuf(so, outgoing); } } @@ -5719,32 +5892,34 @@ cfil_filter_show(u_int32_t kcunit) cfc = content_filters[kcunit - 1]; CFIL_LOG(LOG_ERR, "CFIL: FILTER SHOW: Filter flags <%lx>:", - kcunit, cfc->cf_sock_count, (unsigned long)cfc->cf_flags); - if (cfc->cf_flags & CFF_DETACHING) + kcunit, cfc->cf_sock_count, (unsigned long)cfc->cf_flags); + if (cfc->cf_flags & CFF_DETACHING) { CFIL_LOG(LOG_ERR, "CFIL: FILTER SHOW: - DETACHING"); - if (cfc->cf_flags & CFF_ACTIVE) + } + if (cfc->cf_flags & CFF_ACTIVE) { CFIL_LOG(LOG_ERR, "CFIL: FILTER SHOW: - ACTIVE"); - if (cfc->cf_flags & CFF_FLOW_CONTROLLED) + } + if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { CFIL_LOG(LOG_ERR, "CFIL: FILTER SHOW: - FLOW CONTROLLED"); + } TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) { - if (entry->cfe_cfil_info && entry->cfe_cfil_info->cfi_so) { struct cfil_info *cfil_info = entry->cfe_cfil_info; count++; - if (entry->cfe_flags & CFEF_CFIL_DETACHED) + if (entry->cfe_flags & CFEF_CFIL_DETACHED) { cfil_info_log(LOG_ERR, cfil_info, "CFIL: FILTER SHOW: - DETACHED"); - else + } else { cfil_info_log(LOG_ERR, cfil_info, "CFIL: FILTER SHOW: - ATTACHED"); + } } } CFIL_LOG(LOG_ERR, "CFIL: FILTER SHOW: Filter - total entries shown: %d", count); cfil_rw_unlock_shared(&cfil_lck_rw); - } void @@ -5758,25 +5933,31 @@ cfil_info_show(void) CFIL_LOG(LOG_ERR, "CFIL: INFO SHOW: count %d", cfil_sock_attached_count); TAILQ_FOREACH(cfil_info, &cfil_sock_head, cfi_link) { - count++; cfil_info_log(LOG_ERR, cfil_info, "CFIL: INFO SHOW"); - if (cfil_info->cfi_flags & CFIF_DROP) + if (cfil_info->cfi_flags & CFIF_DROP) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - DROP"); - if (cfil_info->cfi_flags & CFIF_CLOSE_WAIT) + } + if (cfil_info->cfi_flags & CFIF_CLOSE_WAIT) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - CLOSE_WAIT"); - if (cfil_info->cfi_flags & CFIF_SOCK_CLOSED) + } + if (cfil_info->cfi_flags & CFIF_SOCK_CLOSED) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - SOCK_CLOSED"); - if (cfil_info->cfi_flags & CFIF_RETRY_INJECT_IN) + } + if (cfil_info->cfi_flags & CFIF_RETRY_INJECT_IN) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - RETRY_INJECT_IN"); - if (cfil_info->cfi_flags & CFIF_RETRY_INJECT_OUT) + } + if (cfil_info->cfi_flags & CFIF_RETRY_INJECT_OUT) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - RETRY_INJECT_OUT"); - if (cfil_info->cfi_flags & CFIF_SHUT_WR) + } + if (cfil_info->cfi_flags & CFIF_SHUT_WR) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - SHUT_WR"); - if (cfil_info->cfi_flags & CFIF_SHUT_RD) + } + if (cfil_info->cfi_flags & CFIF_SHUT_RD) { CFIL_LOG(LOG_ERR, "CFIL: INFO FLAG - SHUT_RD"); + } } CFIL_LOG(LOG_ERR, "CFIL: INFO SHOW: total cfil_info shown: %d", count); @@ -5788,7 +5969,7 @@ bool cfil_info_idle_timed_out(struct cfil_info *cfil_info, int timeout, u_int32_t current_time) { if (cfil_info && cfil_info->cfi_hash_entry && - (current_time - cfil_info->cfi_hash_entry->cfentry_lastused >= (u_int32_t)timeout)) { + (current_time - cfil_info->cfi_hash_entry->cfentry_lastused >= (u_int32_t)timeout)) { #if GC_DEBUG cfil_info_log(LOG_ERR, cfil_info, "CFIL: flow IDLE timeout expired"); #endif @@ -5804,8 +5985,9 @@ cfil_info_action_timed_out(struct cfil_info *cfil_info, int timeout) struct timeval current_tv; struct timeval diff_time; - if (cfil_info == NULL) + if (cfil_info == NULL) { return false; + } /* * If we have queued up more data than passed offset and we haven't received @@ -5813,18 +5995,18 @@ cfil_info_action_timed_out(struct cfil_info *cfil_info, int timeout) * return action timed out. */ if (cfil_info->cfi_snd.cfi_pending_last > cfil_info->cfi_snd.cfi_pass_offset || - cfil_info->cfi_rcv.cfi_pending_last > cfil_info->cfi_rcv.cfi_pass_offset) { - + cfil_info->cfi_rcv.cfi_pending_last > cfil_info->cfi_rcv.cfi_pass_offset) { microuptime(¤t_tv); for (int kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { entry = &cfil_info->cfi_entries[kcunit - 1]; - if (entry->cfe_filter == NULL) + if (entry->cfe_filter == NULL) { continue; + } if (cfil_info->cfi_snd.cfi_pending_last > entry->cfe_snd.cfe_pass_offset || - cfil_info->cfi_rcv.cfi_pending_last > entry->cfe_rcv.cfe_pass_offset) { + cfil_info->cfi_rcv.cfi_pending_last > entry->cfe_rcv.cfe_pass_offset) { // haven't gotten an action from this filter, check timeout timersub(¤t_tv, &entry->cfe_last_action, &diff_time); if (diff_time.tv_sec >= timeout) { @@ -5842,20 +6024,21 @@ cfil_info_action_timed_out(struct cfil_info *cfil_info, int timeout) bool cfil_info_buffer_threshold_exceeded(struct cfil_info *cfil_info) { - if (cfil_info == NULL) + if (cfil_info == NULL) { return false; + } /* * Clean up flow if it exceeded queue thresholds */ if (cfil_info->cfi_snd.cfi_tail_drop_cnt || - cfil_info->cfi_rcv.cfi_tail_drop_cnt) { + cfil_info->cfi_rcv.cfi_tail_drop_cnt) { #if GC_DEBUG CFIL_LOG(LOG_ERR, "CFIL: queue threshold exceeded: mbuf max tail drop count ", - cfil_udp_gc_mbuf_num_max, - cfil_udp_gc_mbuf_cnt_max, - cfil_info->cfi_snd.cfi_tail_drop_cnt, - cfil_info->cfi_rcv.cfi_tail_drop_cnt); + cfil_udp_gc_mbuf_num_max, + cfil_udp_gc_mbuf_cnt_max, + cfil_info->cfi_snd.cfi_tail_drop_cnt, + cfil_info->cfi_rcv.cfi_tail_drop_cnt); cfil_info_log(LOG_ERR, cfil_info, "CFIL: queue threshold exceeded"); #endif return true; @@ -5869,14 +6052,14 @@ cfil_udp_gc_thread_sleep(bool forever) { if (forever) { (void) assert_wait((event_t) &cfil_sock_udp_attached_count, - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); } else { uint64_t deadline = 0; nanoseconds_to_absolutetime(UDP_FLOW_GC_RUN_INTERVAL_NSEC, &deadline); clock_absolutetime_interval_to_deadline(deadline, &deadline); (void) assert_wait_deadline(&cfil_sock_udp_attached_count, - THREAD_INTERRUPTIBLE, deadline); + THREAD_INTERRUPTIBLE, deadline); } } @@ -5919,13 +6102,14 @@ cfil_info_udp_expire(void *v, wait_result_t w) } TAILQ_FOREACH(cfil_info, &cfil_sock_head, cfi_link) { - if (expired_count >= UDP_FLOW_GC_MAX_COUNT) + if (expired_count >= UDP_FLOW_GC_MAX_COUNT) { break; + } if (IS_UDP(cfil_info->cfi_so)) { if (cfil_info_idle_timed_out(cfil_info, UDP_FLOW_GC_IDLE_TO, current_time) || - cfil_info_action_timed_out(cfil_info, UDP_FLOW_GC_ACTION_TO) || - cfil_info_buffer_threshold_exceeded(cfil_info)) { + cfil_info_action_timed_out(cfil_info, UDP_FLOW_GC_ACTION_TO) || + cfil_info_buffer_threshold_exceeded(cfil_info)) { expired_array[expired_count] = cfil_info->cfi_sock_id; expired_count++; } @@ -5933,11 +6117,11 @@ cfil_info_udp_expire(void *v, wait_result_t w) } cfil_rw_unlock_shared(&cfil_lck_rw); - if (expired_count == 0) + if (expired_count == 0) { goto go_sleep; + } for (uint32_t i = 0; i < expired_count; i++) { - // Search for socket (UDP only and lock so) so = cfil_socket_from_sock_id(expired_array[i], true); if (so == NULL) { @@ -5965,8 +6149,9 @@ cfil_info_udp_expire(void *v, wait_result_t w) OSIncrementAtomic(&cfil_stats.cfs_sock_detached); if (so->so_flags & SOF_CONTENT_FILTER) { - if (db->cfdb_count == 0) + if (db->cfdb_count == 0) { so->so_flags &= ~SOF_CONTENT_FILTER; + } VERIFY(so->so_usecount > 0); so->so_usecount--; } @@ -5997,13 +6182,13 @@ cfil_udp_save_socket_state(struct cfil_info *cfil_info, struct mbuf *m) struct cfil_hash_entry *hash_entry = NULL; if (cfil_info == NULL || cfil_info->cfi_so == NULL || - cfil_info->cfi_hash_entry == NULL || m == NULL || !(m->m_flags & M_PKTHDR)) { + cfil_info->cfi_hash_entry == NULL || m == NULL || !(m->m_flags & M_PKTHDR)) { return NULL; } /* Allocate a tag */ tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP, - sizeof(struct cfil_tag), M_DONTWAIT, m); + sizeof(struct cfil_tag), M_DONTWAIT, m); if (tag) { ctag = (struct cfil_tag*)(tag + 1); @@ -6013,22 +6198,22 @@ cfil_udp_save_socket_state(struct cfil_info *cfil_info, struct mbuf *m) hash_entry = cfil_info->cfi_hash_entry; if (hash_entry->cfentry_family == AF_INET6) { fill_ip6_sockaddr_4_6(&ctag->cfil_faddr, - &hash_entry->cfentry_faddr.addr6, - hash_entry->cfentry_fport); + &hash_entry->cfentry_faddr.addr6, + hash_entry->cfentry_fport); } else if (hash_entry->cfentry_family == AF_INET) { fill_ip_sockaddr_4_6(&ctag->cfil_faddr, - hash_entry->cfentry_faddr.addr46.ia46_addr4, - hash_entry->cfentry_fport); + hash_entry->cfentry_faddr.addr46.ia46_addr4, + hash_entry->cfentry_fport); } m_tag_prepend(m, tag); - return (tag); + return tag; } return NULL; } struct m_tag * cfil_udp_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, short *options, - struct sockaddr **faddr) + struct sockaddr **faddr) { struct m_tag *tag = NULL; struct cfil_tag *ctag = NULL; @@ -6036,12 +6221,15 @@ cfil_udp_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, short *opt tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_CFIL_UDP, NULL); if (tag) { ctag = (struct cfil_tag *)(tag + 1); - if (state_change_cnt) + if (state_change_cnt) { *state_change_cnt = ctag->cfil_so_state_change_cnt; - if (options) + } + if (options) { *options = ctag->cfil_so_options; - if (faddr) + } + if (faddr) { *faddr = (struct sockaddr *) &ctag->cfil_faddr; + } /* * Unlink tag and hand it over to caller. @@ -6052,5 +6240,3 @@ cfil_udp_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, short *opt } return NULL; } - - diff --git a/bsd/net/content_filter.h b/bsd/net/content_filter.h index 55249920b..6af66bb70 100644 --- a/bsd/net/content_filter.h +++ b/bsd/net/content_filter.h @@ -22,7 +22,7 @@ */ #ifndef __CONTENT_FILTER_H__ -#define __CONTENT_FILTER_H__ +#define __CONTENT_FILTER_H__ #include #include @@ -47,14 +47,14 @@ __BEGIN_DECLS * to be set in the sc_id field of sockaddr_ctl for connect(2) * Note: the sc_unit is ephemeral */ -#define CONTENT_FILTER_CONTROL_NAME "com.apple.content-filter" +#define CONTENT_FILTER_CONTROL_NAME "com.apple.content-filter" /* * Opaque socket identifier */ typedef uint64_t cfil_sock_id_t; -#define CFIL_SOCK_ID_NONE UINT64_MAX +#define CFIL_SOCK_ID_NONE UINT64_MAX /* @@ -62,36 +62,36 @@ typedef uint64_t cfil_sock_id_t; * To set or get the NECP filter control unit for the kernel control socket * The option level is SYSPROTO_CONTROL */ -#define CFIL_OPT_NECP_CONTROL_UNIT 1 /* uint32_t */ +#define CFIL_OPT_NECP_CONTROL_UNIT 1 /* uint32_t */ /* * CFIL_OPT_GET_SOCKET_INFO - * To get information about a given socket that is being filtered. + * To get information about a given socket that is being filtered. */ -#define CFIL_OPT_GET_SOCKET_INFO 2 /* uint32_t */ +#define CFIL_OPT_GET_SOCKET_INFO 2 /* uint32_t */ /* * struct cfil_opt_sock_info * - * Contains information about a socket that is being filtered. + * Contains information about a socket that is being filtered. */ struct cfil_opt_sock_info { - cfil_sock_id_t cfs_sock_id; - int cfs_sock_family; /* e.g. PF_INET */ - int cfs_sock_type; /* e.g. SOCK_STREAM */ - int cfs_sock_protocol; /* e.g. IPPROTO_TCP */ - union sockaddr_in_4_6 cfs_local; - union sockaddr_in_4_6 cfs_remote; - pid_t cfs_pid; - pid_t cfs_e_pid; - uuid_t cfs_uuid; - uuid_t cfs_e_uuid; + cfil_sock_id_t cfs_sock_id; + int cfs_sock_family; /* e.g. PF_INET */ + int cfs_sock_type; /* e.g. SOCK_STREAM */ + int cfs_sock_protocol; /* e.g. IPPROTO_TCP */ + union sockaddr_in_4_6 cfs_local; + union sockaddr_in_4_6 cfs_remote; + pid_t cfs_pid; + pid_t cfs_e_pid; + uuid_t cfs_uuid; + uuid_t cfs_e_uuid; }; /* * How many filter may be active simultaneously */ -#define CFIL_MAX_FILTER_COUNT 2 +#define CFIL_MAX_FILTER_COUNT 2 /* * Types of messages @@ -101,25 +101,25 @@ struct cfil_opt_sock_info { * A message in entirely represented by a packet sent or received * on a Content Filter kernel control socket. */ -#define CFM_TYPE_EVENT 1 /* message from kernel */ -#define CFM_TYPE_ACTION 2 /* message to kernel */ +#define CFM_TYPE_EVENT 1 /* message from kernel */ +#define CFM_TYPE_ACTION 2 /* message to kernel */ /* * Operations associated with events from kernel */ -#define CFM_OP_SOCKET_ATTACHED 1 /* a socket has been attached */ -#define CFM_OP_SOCKET_CLOSED 2 /* a socket is being closed */ -#define CFM_OP_DATA_OUT 3 /* data being sent */ -#define CFM_OP_DATA_IN 4 /* data being received */ -#define CFM_OP_DISCONNECT_OUT 5 /* no more outgoing data */ -#define CFM_OP_DISCONNECT_IN 6 /* no more incoming data */ +#define CFM_OP_SOCKET_ATTACHED 1 /* a socket has been attached */ +#define CFM_OP_SOCKET_CLOSED 2 /* a socket is being closed */ +#define CFM_OP_DATA_OUT 3 /* data being sent */ +#define CFM_OP_DATA_IN 4 /* data being received */ +#define CFM_OP_DISCONNECT_OUT 5 /* no more outgoing data */ +#define CFM_OP_DISCONNECT_IN 6 /* no more incoming data */ /* * Operations associated with action from filter to kernel */ -#define CFM_OP_DATA_UPDATE 16 /* update pass or peek offsets */ -#define CFM_OP_DROP 17 /* shutdown socket, no more data */ -#define CFM_OP_BLESS_CLIENT 18 /* mark a client flow as already filtered, passes a uuid */ +#define CFM_OP_DATA_UPDATE 16 /* update pass or peek offsets */ +#define CFM_OP_DROP 17 /* shutdown socket, no more data */ +#define CFM_OP_BLESS_CLIENT 18 /* mark a client flow as already filtered, passes a uuid */ /* * struct cfil_msg_hdr @@ -127,14 +127,14 @@ struct cfil_opt_sock_info { * Header common to all messages */ struct cfil_msg_hdr { - uint32_t cfm_len; /* total length */ - uint32_t cfm_version; - uint32_t cfm_type; - uint32_t cfm_op; - cfil_sock_id_t cfm_sock_id; + uint32_t cfm_len; /* total length */ + uint32_t cfm_version; + uint32_t cfm_type; + uint32_t cfm_op; + cfil_sock_id_t cfm_sock_id; }; -#define CFM_VERSION_CURRENT 1 +#define CFM_VERSION_CURRENT 1 /* * struct cfil_msg_sock_attached @@ -149,15 +149,15 @@ struct cfil_msg_hdr { * Valid Op: CFM_OP_SOCKET_ATTACHED */ struct cfil_msg_sock_attached { - struct cfil_msg_hdr cfs_msghdr; - int cfs_sock_family; /* e.g. PF_INET */ - int cfs_sock_type; /* e.g. SOCK_STREAM */ - int cfs_sock_protocol; /* e.g. IPPROTO_TCP */ - int cfs_unused; /* padding */ - pid_t cfs_pid; - pid_t cfs_e_pid; - uuid_t cfs_uuid; - uuid_t cfs_e_uuid; + struct cfil_msg_hdr cfs_msghdr; + int cfs_sock_family; /* e.g. PF_INET */ + int cfs_sock_type; /* e.g. SOCK_STREAM */ + int cfs_sock_protocol; /* e.g. IPPROTO_TCP */ + int cfs_unused; /* padding */ + pid_t cfs_pid; + pid_t cfs_e_pid; + uuid_t cfs_uuid; + uuid_t cfs_e_uuid; }; /* @@ -176,11 +176,11 @@ struct cfil_msg_sock_attached { * Valid Ops: CFM_OP_DATA_OUT, CFM_OP_DATA_IN */ struct cfil_msg_data_event { - struct cfil_msg_hdr cfd_msghdr; - union sockaddr_in_4_6 cfc_src; - union sockaddr_in_4_6 cfc_dst; - uint64_t cfd_start_offset; - uint64_t cfd_end_offset; + struct cfil_msg_hdr cfd_msghdr; + union sockaddr_in_4_6 cfc_src; + union sockaddr_in_4_6 cfc_dst; + uint64_t cfd_start_offset; + uint64_t cfd_end_offset; /* Actual content data immediatly follows */ }; @@ -198,11 +198,11 @@ struct cfil_msg_data_event { * Valid Op: CFM_OP_SOCKET_CLOSED */ struct cfil_msg_sock_closed { - struct cfil_msg_hdr cfc_msghdr; - struct timeval64 cfc_first_event; - uint32_t cfc_op_list_ctr; - uint32_t cfc_op_time[CFI_MAX_TIME_LOG_ENTRY]; /* time interval in microseconds since first event */ - unsigned char cfc_op_list[CFI_MAX_TIME_LOG_ENTRY]; + struct cfil_msg_hdr cfc_msghdr; + struct timeval64 cfc_first_event; + uint32_t cfc_op_list_ctr; + uint32_t cfc_op_time[CFI_MAX_TIME_LOG_ENTRY]; /* time interval in microseconds since first event */ + unsigned char cfc_op_list[CFI_MAX_TIME_LOG_ENTRY]; } __attribute__((aligned(8))); /* @@ -223,11 +223,11 @@ struct cfil_msg_sock_closed { * if you don't value the corresponding peek offset to be updated. */ struct cfil_msg_action { - struct cfil_msg_hdr cfa_msghdr; - uint64_t cfa_in_pass_offset; - uint64_t cfa_in_peek_offset; - uint64_t cfa_out_pass_offset; - uint64_t cfa_out_peek_offset; + struct cfil_msg_hdr cfa_msghdr; + uint64_t cfa_in_pass_offset; + uint64_t cfa_in_peek_offset; + uint64_t cfa_out_pass_offset; + uint64_t cfa_out_peek_offset; }; /* @@ -240,110 +240,110 @@ struct cfil_msg_action { * Valid Ops: CFM_OP_BLESS_CLIENT */ struct cfil_msg_bless_client { - struct cfil_msg_hdr cfb_msghdr; + struct cfil_msg_hdr cfb_msghdr; uuid_t cfb_client_uuid; }; -#define CFM_MAX_OFFSET UINT64_MAX +#define CFM_MAX_OFFSET UINT64_MAX /* * Statistics retrieved via sysctl(3) */ struct cfil_filter_stat { - uint32_t cfs_len; - uint32_t cfs_filter_id; - uint32_t cfs_flags; - uint32_t cfs_sock_count; - uint32_t cfs_necp_control_unit; + uint32_t cfs_len; + uint32_t cfs_filter_id; + uint32_t cfs_flags; + uint32_t cfs_sock_count; + uint32_t cfs_necp_control_unit; }; struct cfil_entry_stat { - uint32_t ces_len; - uint32_t ces_filter_id; - uint32_t ces_flags; - uint32_t ces_necp_control_unit; - struct timeval64 ces_last_event; - struct timeval64 ces_last_action; + uint32_t ces_len; + uint32_t ces_filter_id; + uint32_t ces_flags; + uint32_t ces_necp_control_unit; + struct timeval64 ces_last_event; + struct timeval64 ces_last_action; struct cfe_buf_stat { - uint64_t cbs_pending_first; - uint64_t cbs_pending_last; - uint64_t cbs_ctl_first; - uint64_t cbs_ctl_last; - uint64_t cbs_pass_offset; - uint64_t cbs_peek_offset; - uint64_t cbs_peeked; + uint64_t cbs_pending_first; + uint64_t cbs_pending_last; + uint64_t cbs_ctl_first; + uint64_t cbs_ctl_last; + uint64_t cbs_pass_offset; + uint64_t cbs_peek_offset; + uint64_t cbs_peeked; } ces_snd, ces_rcv; }; struct cfil_sock_stat { - uint32_t cfs_len; - int cfs_sock_family; - int cfs_sock_type; - int cfs_sock_protocol; - cfil_sock_id_t cfs_sock_id; - uint64_t cfs_flags; - pid_t cfs_pid; - pid_t cfs_e_pid; - uuid_t cfs_uuid; - uuid_t cfs_e_uuid; + uint32_t cfs_len; + int cfs_sock_family; + int cfs_sock_type; + int cfs_sock_protocol; + cfil_sock_id_t cfs_sock_id; + uint64_t cfs_flags; + pid_t cfs_pid; + pid_t cfs_e_pid; + uuid_t cfs_uuid; + uuid_t cfs_e_uuid; struct cfi_buf_stat { - uint64_t cbs_pending_first; - uint64_t cbs_pending_last; - uint64_t cbs_pass_offset; - uint64_t cbs_inject_q_len; + uint64_t cbs_pending_first; + uint64_t cbs_pending_last; + uint64_t cbs_pass_offset; + uint64_t cbs_inject_q_len; } cfs_snd, cfs_rcv; - struct cfil_entry_stat ces_entries[CFIL_MAX_FILTER_COUNT]; + struct cfil_entry_stat ces_entries[CFIL_MAX_FILTER_COUNT]; }; /* * Global statistics */ struct cfil_stats { - int32_t cfs_ctl_connect_ok; - int32_t cfs_ctl_connect_fail; - int32_t cfs_ctl_disconnect_ok; - int32_t cfs_ctl_disconnect_fail; - int32_t cfs_ctl_send_ok; - int32_t cfs_ctl_send_bad; - int32_t cfs_ctl_rcvd_ok; - int32_t cfs_ctl_rcvd_bad; - int32_t cfs_ctl_rcvd_flow_lift; - int32_t cfs_ctl_action_data_update; - int32_t cfs_ctl_action_drop; - int32_t cfs_ctl_action_bad_op; - int32_t cfs_ctl_action_bad_len; - - int32_t cfs_sock_id_not_found; - - int32_t cfs_cfi_alloc_ok; - int32_t cfs_cfi_alloc_fail; - - int32_t cfs_sock_userspace_only; - int32_t cfs_sock_attach_in_vain; - int32_t cfs_sock_attach_already; - int32_t cfs_sock_attach_no_mem; - int32_t cfs_sock_attach_failed; - int32_t cfs_sock_attached; - int32_t cfs_sock_detached; - - int32_t cfs_attach_event_ok; - int32_t cfs_attach_event_flow_control; - int32_t cfs_attach_event_fail; - - int32_t cfs_closed_event_ok; - int32_t cfs_closed_event_flow_control; - int32_t cfs_closed_event_fail; - - int32_t cfs_data_event_ok; - int32_t cfs_data_event_flow_control; - int32_t cfs_data_event_fail; - - int32_t cfs_disconnect_in_event_ok; - int32_t cfs_disconnect_out_event_ok; - int32_t cfs_disconnect_event_flow_control; - int32_t cfs_disconnect_event_fail; - - int32_t cfs_ctl_q_not_started; + int32_t cfs_ctl_connect_ok; + int32_t cfs_ctl_connect_fail; + int32_t cfs_ctl_disconnect_ok; + int32_t cfs_ctl_disconnect_fail; + int32_t cfs_ctl_send_ok; + int32_t cfs_ctl_send_bad; + int32_t cfs_ctl_rcvd_ok; + int32_t cfs_ctl_rcvd_bad; + int32_t cfs_ctl_rcvd_flow_lift; + int32_t cfs_ctl_action_data_update; + int32_t cfs_ctl_action_drop; + int32_t cfs_ctl_action_bad_op; + int32_t cfs_ctl_action_bad_len; + + int32_t cfs_sock_id_not_found; + + int32_t cfs_cfi_alloc_ok; + int32_t cfs_cfi_alloc_fail; + + int32_t cfs_sock_userspace_only; + int32_t cfs_sock_attach_in_vain; + int32_t cfs_sock_attach_already; + int32_t cfs_sock_attach_no_mem; + int32_t cfs_sock_attach_failed; + int32_t cfs_sock_attached; + int32_t cfs_sock_detached; + + int32_t cfs_attach_event_ok; + int32_t cfs_attach_event_flow_control; + int32_t cfs_attach_event_fail; + + int32_t cfs_closed_event_ok; + int32_t cfs_closed_event_flow_control; + int32_t cfs_closed_event_fail; + + int32_t cfs_data_event_ok; + int32_t cfs_data_event_flow_control; + int32_t cfs_data_event_fail; + + int32_t cfs_disconnect_in_event_ok; + int32_t cfs_disconnect_out_event_ok; + int32_t cfs_disconnect_event_flow_control; + int32_t cfs_disconnect_event_fail; + + int32_t cfs_ctl_q_not_started; int32_t cfs_close_wait; int32_t cfs_close_wait_timeout; @@ -355,47 +355,46 @@ struct cfil_stats { int32_t cfs_flush_in_free; int32_t cfs_flush_out_free; - int32_t cfs_inject_q_nomem; - int32_t cfs_inject_q_nobufs; - int32_t cfs_inject_q_detached; - int32_t cfs_inject_q_in_fail; - int32_t cfs_inject_q_out_fail; + int32_t cfs_inject_q_nomem; + int32_t cfs_inject_q_nobufs; + int32_t cfs_inject_q_detached; + int32_t cfs_inject_q_in_fail; + int32_t cfs_inject_q_out_fail; - int32_t cfs_inject_q_in_retry; - int32_t cfs_inject_q_out_retry; + int32_t cfs_inject_q_in_retry; + int32_t cfs_inject_q_out_retry; int32_t cfs_data_in_control; int32_t cfs_data_in_oob; int32_t cfs_data_out_control; int32_t cfs_data_out_oob; - int64_t cfs_ctl_q_in_enqueued __attribute__((aligned(8))); - int64_t cfs_ctl_q_out_enqueued __attribute__((aligned(8))); - int64_t cfs_ctl_q_in_peeked __attribute__((aligned(8))); - int64_t cfs_ctl_q_out_peeked __attribute__((aligned(8))); + int64_t cfs_ctl_q_in_enqueued __attribute__((aligned(8))); + int64_t cfs_ctl_q_out_enqueued __attribute__((aligned(8))); + int64_t cfs_ctl_q_in_peeked __attribute__((aligned(8))); + int64_t cfs_ctl_q_out_peeked __attribute__((aligned(8))); - int64_t cfs_pending_q_in_enqueued __attribute__((aligned(8))); - int64_t cfs_pending_q_out_enqueued __attribute__((aligned(8))); - - int64_t cfs_inject_q_in_enqueued __attribute__((aligned(8))); - int64_t cfs_inject_q_out_enqueued __attribute__((aligned(8))); - int64_t cfs_inject_q_in_passed __attribute__((aligned(8))); - int64_t cfs_inject_q_out_passed __attribute__((aligned(8))); + int64_t cfs_pending_q_in_enqueued __attribute__((aligned(8))); + int64_t cfs_pending_q_out_enqueued __attribute__((aligned(8))); + int64_t cfs_inject_q_in_enqueued __attribute__((aligned(8))); + int64_t cfs_inject_q_out_enqueued __attribute__((aligned(8))); + int64_t cfs_inject_q_in_passed __attribute__((aligned(8))); + int64_t cfs_inject_q_out_passed __attribute__((aligned(8))); }; #endif /* PRIVATE */ #ifdef BSD_KERNEL_PRIVATE -#define M_SKIPCFIL M_PROTO5 +#define M_SKIPCFIL M_PROTO5 extern int cfil_log_level; -#define CFIL_LOG(level, fmt, ...) \ +#define CFIL_LOG(level, fmt, ...) \ do { \ if (cfil_log_level >= level) \ - printf("%s:%d " fmt "\n",\ - __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + printf("%s:%d " fmt "\n",\ + __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0) @@ -405,11 +404,11 @@ extern errno_t cfil_sock_attach(struct socket *so); extern errno_t cfil_sock_detach(struct socket *so); extern int cfil_sock_data_out(struct socket *so, struct sockaddr *to, - struct mbuf *data, struct mbuf *control, - uint32_t flags); + struct mbuf *data, struct mbuf *control, + uint32_t flags); extern int cfil_sock_data_in(struct socket *so, struct sockaddr *from, - struct mbuf *data, struct mbuf *control, - uint32_t flags); + struct mbuf *data, struct mbuf *control, + uint32_t flags); extern int cfil_sock_shutdown(struct socket *so, int *how); extern void cfil_sock_is_closed(struct socket *so); @@ -423,7 +422,7 @@ extern void cfil_sock_buf_update(struct sockbuf *sb); extern cfil_sock_id_t cfil_sock_id_from_socket(struct socket *so); extern struct m_tag *cfil_udp_get_socket_state(struct mbuf *m, uint32_t *state_change_cnt, - short *options, struct sockaddr **faddr); + short *options, struct sockaddr **faddr); #endif /* BSD_KERNEL_PRIVATE */ __END_DECLS diff --git a/bsd/net/devtimer.c b/bsd/net/devtimer.c index 45eb31f47..4e4edad68 100644 --- a/bsd/net/devtimer.c +++ b/bsd/net/devtimer.c @@ -2,7 +2,7 @@ * Copyright (c) 2004,2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * devtimer.c + * devtimer.c * - timer source based on */ @@ -42,10 +42,10 @@ #include #include #include -#include +#include #ifdef DEVTIMER_DEBUG -#define _devtimer_printf printf +#define _devtimer_printf printf #else /* !DEVTIMER_DEBUG */ static __inline__ void _devtimer_printf(__unused const char * fmt, ...) @@ -54,228 +54,215 @@ _devtimer_printf(__unused const char * fmt, ...) #endif /* !DEVTIMER_DEBUG */ struct devtimer_s { - void * dt_callout; - devtimer_timeout_func dt_timeout_func; - devtimer_process_func dt_process_func; - void * dt_arg0; - void * dt_arg1; - void * dt_arg2; - int dt_generation; - UInt32 dt_retain_count; + void * dt_callout; + devtimer_timeout_func dt_timeout_func; + devtimer_process_func dt_process_func; + void * dt_arg0; + void * dt_arg1; + void * dt_arg2; + int dt_generation; + struct os_refcnt dt_retain_count; }; -#define M_DEVTIMER M_DEVBUF +#define M_DEVTIMER M_DEVBUF static __inline__ void timeval_add(struct timeval tv1, struct timeval tv2, - struct timeval * result) + struct timeval * result) { - result->tv_sec = tv1.tv_sec + tv2.tv_sec; - result->tv_usec = tv1.tv_usec + tv2.tv_usec; - if (result->tv_usec > DEVTIMER_USECS_PER_SEC) { - result->tv_usec -= DEVTIMER_USECS_PER_SEC; - result->tv_sec++; - } - return; + result->tv_sec = tv1.tv_sec + tv2.tv_sec; + result->tv_usec = tv1.tv_usec + tv2.tv_usec; + if (result->tv_usec > DEVTIMER_USECS_PER_SEC) { + result->tv_usec -= DEVTIMER_USECS_PER_SEC; + result->tv_sec++; + } + return; } static __inline__ uint64_t timeval_to_absolutetime(struct timeval tv) { - uint64_t secs; - uint64_t usecs; + uint64_t secs; + uint64_t usecs; - clock_interval_to_absolutetime_interval(tv.tv_sec, NSEC_PER_SEC, - &secs); - clock_interval_to_absolutetime_interval(tv.tv_usec, NSEC_PER_USEC, - &usecs); - return (secs + usecs); + clock_interval_to_absolutetime_interval(tv.tv_sec, NSEC_PER_SEC, + &secs); + clock_interval_to_absolutetime_interval(tv.tv_usec, NSEC_PER_USEC, + &usecs); + return secs + usecs; } __private_extern__ int devtimer_valid(devtimer_ref timer) { - return (timer->dt_callout != NULL); + return timer->dt_callout != NULL; } __private_extern__ void devtimer_retain(devtimer_ref timer) { - OSIncrementAtomic(&timer->dt_retain_count); - return; + os_ref_retain(&timer->dt_retain_count); } __private_extern__ void devtimer_invalidate(devtimer_ref timer) { - devtimer_cancel(timer); - timer->dt_arg0 = NULL; - if (timer->dt_callout != NULL) { - thread_call_free(timer->dt_callout); - timer->dt_callout = NULL; - } - return; + devtimer_cancel(timer); + timer->dt_arg0 = NULL; + if (timer->dt_callout != NULL) { + thread_call_free(timer->dt_callout); + timer->dt_callout = NULL; + } + return; } __private_extern__ void devtimer_release(devtimer_ref timer) { - UInt32 old_retain_count; - - old_retain_count = OSDecrementAtomic(&timer->dt_retain_count); - switch (old_retain_count) { - case 0: - panic("devtimer_release: retain count is 0\n"); - break; - case 1: - devtimer_invalidate(timer); - FREE(timer, M_DEVTIMER); - _devtimer_printf("devtimer: timer released\n"); - break; - default: - break; - } - return; + if (os_ref_release(&timer->dt_retain_count) == 0) { + devtimer_invalidate(timer); + FREE(timer, M_DEVTIMER); + _devtimer_printf("devtimer: timer released\n"); + } } static void devtimer_process(void * param0, void * param1) { - int generation = *(int*)param1; - devtimer_process_func process_func; - devtimer_timeout_func timeout_func; - devtimer_ref timer = (devtimer_ref)param0; + int generation = *(int*)param1; + devtimer_process_func process_func; + devtimer_timeout_func timeout_func; + devtimer_ref timer = (devtimer_ref)param0; - process_func = timer->dt_process_func; - if (process_func != NULL) { - (*process_func)(timer, devtimer_process_func_event_lock); - } - timeout_func = timer->dt_timeout_func; - if (timeout_func != NULL) { - timer->dt_timeout_func = NULL; - if (timer->dt_generation == generation) { - (*timeout_func)(timer->dt_arg0, timer->dt_arg1, timer->dt_arg2); + process_func = timer->dt_process_func; + if (process_func != NULL) { + (*process_func)(timer, devtimer_process_func_event_lock); + } + timeout_func = timer->dt_timeout_func; + if (timeout_func != NULL) { + timer->dt_timeout_func = NULL; + if (timer->dt_generation == generation) { + (*timeout_func)(timer->dt_arg0, timer->dt_arg1, timer->dt_arg2); + } + } + devtimer_release(timer); + if (process_func != NULL) { + (*process_func)(timer, devtimer_process_func_event_unlock); } - } - devtimer_release(timer); - if (process_func != NULL) { - (*process_func)(timer, devtimer_process_func_event_unlock); - } - return; + return; } __private_extern__ void * devtimer_arg0(devtimer_ref timer) { - return (timer->dt_arg0); + return timer->dt_arg0; } __private_extern__ devtimer_ref devtimer_create(devtimer_process_func process_func, void * arg0) { - devtimer_ref timer; + devtimer_ref timer; - timer = _MALLOC(sizeof(*timer), M_DEVTIMER, M_WAITOK | M_ZERO); - if (timer == NULL) { - return (timer); - } - devtimer_retain(timer); - timer->dt_callout = thread_call_allocate(devtimer_process, timer); - if (timer->dt_callout == NULL) { - _devtimer_printf("devtimer: thread_call_allocate failed\n"); - devtimer_release(timer); - timer = NULL; - } - timer->dt_process_func = process_func; - timer->dt_arg0 = arg0; - return (timer); + timer = _MALLOC(sizeof(*timer), M_DEVTIMER, M_WAITOK | M_ZERO); + if (timer == NULL) { + return timer; + } + os_ref_init(&timer->dt_retain_count, NULL); + timer->dt_callout = thread_call_allocate(devtimer_process, timer); + if (timer->dt_callout == NULL) { + _devtimer_printf("devtimer: thread_call_allocate failed\n"); + devtimer_release(timer); + timer = NULL; + } + timer->dt_process_func = process_func; + timer->dt_arg0 = arg0; + return timer; } __private_extern__ void -devtimer_set_absolute(devtimer_ref timer, - struct timeval abs_time, - devtimer_timeout_func timeout_func, - void * arg1, void * arg2) +devtimer_set_absolute(devtimer_ref timer, + struct timeval abs_time, + devtimer_timeout_func timeout_func, + void * arg1, void * arg2) { - if (timer->dt_callout == NULL) { - printf("devtimer_set_absolute: uninitialized/freed timer\n"); - return; - } - devtimer_cancel(timer); - if (timeout_func == NULL) { + if (timer->dt_callout == NULL) { + printf("devtimer_set_absolute: uninitialized/freed timer\n"); + return; + } + devtimer_cancel(timer); + if (timeout_func == NULL) { + return; + } + timer->dt_timeout_func = timeout_func; + timer->dt_arg1 = arg1; + timer->dt_arg2 = arg2; + _devtimer_printf("devtimer: wakeup time is (%d.%d)\n", + abs_time.tv_sec, abs_time.tv_usec); + timer->dt_generation++; + devtimer_retain(timer); + thread_call_enter1_delayed(timer->dt_callout, + &timer->dt_generation, + timeval_to_absolutetime(abs_time)); return; - } - timer->dt_timeout_func = timeout_func; - timer->dt_arg1 = arg1; - timer->dt_arg2 = arg2; - _devtimer_printf("devtimer: wakeup time is (%d.%d)\n", - abs_time.tv_sec, abs_time.tv_usec); - timer->dt_generation++; - devtimer_retain(timer); - thread_call_enter1_delayed(timer->dt_callout, - &timer->dt_generation, - timeval_to_absolutetime(abs_time)); - return; } __private_extern__ void -devtimer_set_relative(devtimer_ref timer, - struct timeval rel_time, - devtimer_timeout_func timeout_func, - void * arg1, void * arg2) +devtimer_set_relative(devtimer_ref timer, + struct timeval rel_time, + devtimer_timeout_func timeout_func, + void * arg1, void * arg2) { - struct timeval abs_time; - struct timeval current_time; + struct timeval abs_time; + struct timeval current_time; - current_time = devtimer_current_time(); - timeval_add(current_time, rel_time, &abs_time); - devtimer_set_absolute(timer, abs_time, timeout_func, arg1, arg2); - return; + current_time = devtimer_current_time(); + timeval_add(current_time, rel_time, &abs_time); + devtimer_set_absolute(timer, abs_time, timeout_func, arg1, arg2); + return; } __private_extern__ void devtimer_cancel(devtimer_ref timer) { - if (timer->dt_timeout_func != NULL) { - timer->dt_timeout_func = NULL; - if (timer->dt_callout != NULL) { - _devtimer_printf("devtimer: cancelling timer source\n"); - if (thread_call_cancel(timer->dt_callout)) { - devtimer_release(timer); - } - else { - _devtimer_printf("devtimer: delayed release\n"); - } + if (timer->dt_timeout_func != NULL) { + timer->dt_timeout_func = NULL; + if (timer->dt_callout != NULL) { + _devtimer_printf("devtimer: cancelling timer source\n"); + if (thread_call_cancel(timer->dt_callout)) { + devtimer_release(timer); + } else { + _devtimer_printf("devtimer: delayed release\n"); + } + } } - } - return; + return; } __private_extern__ int devtimer_enabled(devtimer_ref timer) { - return (timer->dt_timeout_func != NULL); + return timer->dt_timeout_func != NULL; } __private_extern__ int32_t devtimer_current_secs(void) { - struct timeval tv; + struct timeval tv; - tv = devtimer_current_time(); - return (tv.tv_sec); + tv = devtimer_current_time(); + return tv.tv_sec; } __private_extern__ struct timeval devtimer_current_time(void) { - struct timeval tv; - clock_sec_t sec; - clock_usec_t usec; + struct timeval tv; + clock_sec_t sec; + clock_usec_t usec; - clock_get_system_microtime(&sec, &usec); - tv.tv_sec = sec; - tv.tv_usec = usec; - return (tv); + clock_get_system_microtime(&sec, &usec); + tv.tv_sec = sec; + tv.tv_usec = usec; + return tv; } diff --git a/bsd/net/devtimer.h b/bsd/net/devtimer.h index 9504f22e8..c0275b2a5 100644 --- a/bsd/net/devtimer.h +++ b/bsd/net/devtimer.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * devtimer.h + * devtimer.h * - timer source based on */ @@ -38,17 +38,17 @@ #include #include -#define DEVTIMER_USECS_PER_SEC (1000 * 1000) +#define DEVTIMER_USECS_PER_SEC (1000 * 1000) enum { - devtimer_process_func_event_lock, - devtimer_process_func_event_unlock, + devtimer_process_func_event_lock, + devtimer_process_func_event_unlock, }; typedef int devtimer_process_func_event; typedef struct devtimer_s * devtimer_ref; typedef void (*devtimer_process_func)(devtimer_ref timer, - devtimer_process_func_event event); + devtimer_process_func_event event); typedef void (*devtimer_timeout_func)(void * arg0, void * arg1, void * arg2); int @@ -71,15 +71,15 @@ devtimer_release(devtimer_ref timer); void devtimer_set_absolute(devtimer_ref t, - struct timeval abs_time, - devtimer_timeout_func func, - void * arg1, void * arg2); + struct timeval abs_time, + devtimer_timeout_func func, + void * arg1, void * arg2); void devtimer_set_relative(devtimer_ref t, - struct timeval rel_time, - devtimer_timeout_func func, - void * arg1, void * arg2); + struct timeval rel_time, + devtimer_timeout_func func, + void * arg1, void * arg2); void devtimer_cancel(devtimer_ref t); diff --git a/bsd/net/dlil.c b/bsd/net/dlil.c index cd4d8d963..39ce6ac9e 100644 --- a/bsd/net/dlil.c +++ b/bsd/net/dlil.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2018 Apple Inc. All rights reserved. + * Copyright (c) 1999-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -131,31 +131,31 @@ #endif /* NECP */ -#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0) -#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2) -#define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8)) -#define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8)) -#define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8)) +#define DBG_LAYER_BEG DLILDBG_CODE(DBG_DLIL_STATIC, 0) +#define DBG_LAYER_END DLILDBG_CODE(DBG_DLIL_STATIC, 2) +#define DBG_FNC_DLIL_INPUT DLILDBG_CODE(DBG_DLIL_STATIC, (1 << 8)) +#define DBG_FNC_DLIL_OUTPUT DLILDBG_CODE(DBG_DLIL_STATIC, (2 << 8)) +#define DBG_FNC_DLIL_IFOUT DLILDBG_CODE(DBG_DLIL_STATIC, (3 << 8)) -#define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */ -#define MAX_LINKADDR 4 /* LONGWORDS */ -#define M_NKE M_IFADDR +#define MAX_FRAME_TYPE_SIZE 4 /* LONGWORDS */ +#define MAX_LINKADDR 4 /* LONGWORDS */ +#define M_NKE M_IFADDR #if 1 -#define DLIL_PRINTF printf +#define DLIL_PRINTF printf #else -#define DLIL_PRINTF kprintf +#define DLIL_PRINTF kprintf #endif -#define IF_DATA_REQUIRE_ALIGNED_64(f) \ +#define IF_DATA_REQUIRE_ALIGNED_64(f) \ _CASSERT(!(offsetof(struct if_data_internal, f) % sizeof (u_int64_t))) -#define IFNET_IF_DATA_REQUIRE_ALIGNED_64(f) \ +#define IFNET_IF_DATA_REQUIRE_ALIGNED_64(f) \ _CASSERT(!(offsetof(struct ifnet, if_data.f) % sizeof (u_int64_t))) enum { - kProtoKPI_v1 = 1, - kProtoKPI_v2 = 2 + kProtoKPI_v1 = 1, + kProtoKPI_v2 = 2 }; /* @@ -165,101 +165,101 @@ enum { * a reference to it is valid, via if_proto_ref(). */ struct if_proto { - SLIST_ENTRY(if_proto) next_hash; - u_int32_t refcount; - u_int32_t detached; - struct ifnet *ifp; - protocol_family_t protocol_family; - int proto_kpi; - union { + SLIST_ENTRY(if_proto) next_hash; + u_int32_t refcount; + u_int32_t detached; + struct ifnet *ifp; + protocol_family_t protocol_family; + int proto_kpi; + union { struct { - proto_media_input input; - proto_media_preout pre_output; - proto_media_event event; - proto_media_ioctl ioctl; - proto_media_detached detached; - proto_media_resolve_multi resolve_multi; - proto_media_send_arp send_arp; + proto_media_input input; + proto_media_preout pre_output; + proto_media_event event; + proto_media_ioctl ioctl; + proto_media_detached detached; + proto_media_resolve_multi resolve_multi; + proto_media_send_arp send_arp; } v1; struct { - proto_media_input_v2 input; - proto_media_preout pre_output; - proto_media_event event; - proto_media_ioctl ioctl; - proto_media_detached detached; - proto_media_resolve_multi resolve_multi; - proto_media_send_arp send_arp; + proto_media_input_v2 input; + proto_media_preout pre_output; + proto_media_event event; + proto_media_ioctl ioctl; + proto_media_detached detached; + proto_media_resolve_multi resolve_multi; + proto_media_send_arp send_arp; } v2; } kpi; }; SLIST_HEAD(proto_hash_entry, if_proto); -#define DLIL_SDLDATALEN \ +#define DLIL_SDLDATALEN \ (DLIL_SDLMAXLEN - offsetof(struct sockaddr_dl, sdl_data[0])) struct dlil_ifnet { - struct ifnet dl_if; /* public ifnet */ + struct ifnet dl_if; /* public ifnet */ /* * DLIL private fields, protected by dl_if_lock */ decl_lck_mtx_data(, dl_if_lock); - TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet link */ - u_int32_t dl_if_flags; /* flags (below) */ - u_int32_t dl_if_refcnt; /* refcnt */ + TAILQ_ENTRY(dlil_ifnet) dl_if_link; /* dlil_ifnet link */ + u_int32_t dl_if_flags; /* flags (below) */ + u_int32_t dl_if_refcnt; /* refcnt */ void (*dl_if_trace)(struct dlil_ifnet *, int); /* ref trace callback */ - void *dl_if_uniqueid; /* unique interface id */ - size_t dl_if_uniqueid_len; /* length of the unique id */ - char dl_if_namestorage[IFNAMSIZ]; /* interface name storage */ - char dl_if_xnamestorage[IFXNAMSIZ]; /* external name storage */ + void *dl_if_uniqueid; /* unique interface id */ + size_t dl_if_uniqueid_len; /* length of the unique id */ + char dl_if_namestorage[IFNAMSIZ]; /* interface name storage */ + char dl_if_xnamestorage[IFXNAMSIZ]; /* external name storage */ struct { - struct ifaddr ifa; /* lladdr ifa */ - u_int8_t asdl[DLIL_SDLMAXLEN]; /* addr storage */ - u_int8_t msdl[DLIL_SDLMAXLEN]; /* mask storage */ + struct ifaddr ifa; /* lladdr ifa */ + u_int8_t asdl[DLIL_SDLMAXLEN]; /* addr storage */ + u_int8_t msdl[DLIL_SDLMAXLEN]; /* mask storage */ } dl_if_lladdr; u_int8_t dl_if_descstorage[IF_DESCSIZE]; /* desc storage */ struct dlil_threading_info dl_if_inpstorage; /* input thread storage */ - ctrace_t dl_if_attach; /* attach PC stacktrace */ - ctrace_t dl_if_detach; /* detach PC stacktrace */ + ctrace_t dl_if_attach; /* attach PC stacktrace */ + ctrace_t dl_if_detach; /* detach PC stacktrace */ }; /* Values for dl_if_flags (private to DLIL) */ -#define DLIF_INUSE 0x1 /* DLIL ifnet recycler, ifnet in use */ -#define DLIF_REUSE 0x2 /* DLIL ifnet recycles, ifnet is not new */ -#define DLIF_DEBUG 0x4 /* has debugging info */ +#define DLIF_INUSE 0x1 /* DLIL ifnet recycler, ifnet in use */ +#define DLIF_REUSE 0x2 /* DLIL ifnet recycles, ifnet is not new */ +#define DLIF_DEBUG 0x4 /* has debugging info */ -#define IF_REF_TRACE_HIST_SIZE 8 /* size of ref trace history */ +#define IF_REF_TRACE_HIST_SIZE 8 /* size of ref trace history */ /* For gdb */ __private_extern__ unsigned int if_ref_trace_hist_size = IF_REF_TRACE_HIST_SIZE; struct dlil_ifnet_dbg { - struct dlil_ifnet dldbg_dlif; /* dlil_ifnet */ - u_int16_t dldbg_if_refhold_cnt; /* # ifnet references */ - u_int16_t dldbg_if_refrele_cnt; /* # ifnet releases */ + struct dlil_ifnet dldbg_dlif; /* dlil_ifnet */ + u_int16_t dldbg_if_refhold_cnt; /* # ifnet references */ + u_int16_t dldbg_if_refrele_cnt; /* # ifnet releases */ /* * Circular lists of ifnet_{reference,release} callers. */ - ctrace_t dldbg_if_refhold[IF_REF_TRACE_HIST_SIZE]; - ctrace_t dldbg_if_refrele[IF_REF_TRACE_HIST_SIZE]; + ctrace_t dldbg_if_refhold[IF_REF_TRACE_HIST_SIZE]; + ctrace_t dldbg_if_refrele[IF_REF_TRACE_HIST_SIZE]; }; -#define DLIL_TO_IFP(s) (&s->dl_if) -#define IFP_TO_DLIL(s) ((struct dlil_ifnet *)s) +#define DLIL_TO_IFP(s) (&s->dl_if) +#define IFP_TO_DLIL(s) ((struct dlil_ifnet *)s) struct ifnet_filter { - TAILQ_ENTRY(ifnet_filter) filt_next; - u_int32_t filt_skip; - u_int32_t filt_flags; - ifnet_t filt_ifp; - const char *filt_name; - void *filt_cookie; - protocol_family_t filt_protocol; - iff_input_func filt_input; - iff_output_func filt_output; - iff_event_func filt_event; - iff_ioctl_func filt_ioctl; - iff_detached_func filt_detached; + TAILQ_ENTRY(ifnet_filter) filt_next; + u_int32_t filt_skip; + u_int32_t filt_flags; + ifnet_t filt_ifp; + const char *filt_name; + void *filt_cookie; + protocol_family_t filt_protocol; + iff_input_func filt_input; + iff_output_func filt_output; + iff_event_func filt_event; + iff_ioctl_func filt_ioctl; + iff_detached_func filt_detached; }; struct proto_input_entry; @@ -276,48 +276,48 @@ decl_lck_mtx_data(static, dlil_ifnet_lock); u_int32_t dlil_filter_disable_tso_count = 0; #if DEBUG -static unsigned int ifnet_debug = 1; /* debugging (enabled) */ +static unsigned int ifnet_debug = 1; /* debugging (enabled) */ #else -static unsigned int ifnet_debug; /* debugging (disabled) */ +static unsigned int ifnet_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int dlif_size; /* size of dlil_ifnet to allocate */ -static unsigned int dlif_bufsize; /* size of dlif_size + headroom */ -static struct zone *dlif_zone; /* zone for dlil_ifnet */ +static unsigned int dlif_size; /* size of dlil_ifnet to allocate */ +static unsigned int dlif_bufsize; /* size of dlif_size + headroom */ +static struct zone *dlif_zone; /* zone for dlil_ifnet */ -#define DLIF_ZONE_MAX IFNETS_MAX /* maximum elements in zone */ -#define DLIF_ZONE_NAME "ifnet" /* zone name */ +#define DLIF_ZONE_MAX IFNETS_MAX /* maximum elements in zone */ +#define DLIF_ZONE_NAME "ifnet" /* zone name */ -static unsigned int dlif_filt_size; /* size of ifnet_filter */ -static struct zone *dlif_filt_zone; /* zone for ifnet_filter */ +static unsigned int dlif_filt_size; /* size of ifnet_filter */ +static struct zone *dlif_filt_zone; /* zone for ifnet_filter */ -#define DLIF_FILT_ZONE_MAX 8 /* maximum elements in zone */ -#define DLIF_FILT_ZONE_NAME "ifnet_filter" /* zone name */ +#define DLIF_FILT_ZONE_MAX 8 /* maximum elements in zone */ +#define DLIF_FILT_ZONE_NAME "ifnet_filter" /* zone name */ -static unsigned int dlif_phash_size; /* size of ifnet proto hash table */ -static struct zone *dlif_phash_zone; /* zone for ifnet proto hash table */ +static unsigned int dlif_phash_size; /* size of ifnet proto hash table */ +static struct zone *dlif_phash_zone; /* zone for ifnet proto hash table */ -#define DLIF_PHASH_ZONE_MAX DLIF_ZONE_MAX /* maximum elements in zone */ -#define DLIF_PHASH_ZONE_NAME "ifnet_proto_hash" /* zone name */ +#define DLIF_PHASH_ZONE_MAX DLIF_ZONE_MAX /* maximum elements in zone */ +#define DLIF_PHASH_ZONE_NAME "ifnet_proto_hash" /* zone name */ -static unsigned int dlif_proto_size; /* size of if_proto */ -static struct zone *dlif_proto_zone; /* zone for if_proto */ +static unsigned int dlif_proto_size; /* size of if_proto */ +static struct zone *dlif_proto_zone; /* zone for if_proto */ -#define DLIF_PROTO_ZONE_MAX (DLIF_ZONE_MAX*2) /* maximum elements in zone */ -#define DLIF_PROTO_ZONE_NAME "ifnet_proto" /* zone name */ +#define DLIF_PROTO_ZONE_MAX (DLIF_ZONE_MAX*2) /* maximum elements in zone */ +#define DLIF_PROTO_ZONE_NAME "ifnet_proto" /* zone name */ -static unsigned int dlif_tcpstat_size; /* size of tcpstat_local to allocate */ +static unsigned int dlif_tcpstat_size; /* size of tcpstat_local to allocate */ static unsigned int dlif_tcpstat_bufsize; /* size of dlif_tcpstat_size + headroom */ -static struct zone *dlif_tcpstat_zone; /* zone for tcpstat_local */ +static struct zone *dlif_tcpstat_zone; /* zone for tcpstat_local */ -#define DLIF_TCPSTAT_ZONE_MAX 1 /* maximum elements in zone */ -#define DLIF_TCPSTAT_ZONE_NAME "ifnet_tcpstat" /* zone name */ +#define DLIF_TCPSTAT_ZONE_MAX 1 /* maximum elements in zone */ +#define DLIF_TCPSTAT_ZONE_NAME "ifnet_tcpstat" /* zone name */ -static unsigned int dlif_udpstat_size; /* size of udpstat_local to allocate */ -static unsigned int dlif_udpstat_bufsize; /* size of dlif_udpstat_size + headroom */ -static struct zone *dlif_udpstat_zone; /* zone for udpstat_local */ +static unsigned int dlif_udpstat_size; /* size of udpstat_local to allocate */ +static unsigned int dlif_udpstat_bufsize; /* size of dlif_udpstat_size + headroom */ +static struct zone *dlif_udpstat_zone; /* zone for udpstat_local */ -#define DLIF_UDPSTAT_ZONE_MAX 1 /* maximum elements in zone */ -#define DLIF_UDPSTAT_ZONE_NAME "ifnet_udpstat" /* zone name */ +#define DLIF_UDPSTAT_ZONE_MAX 1 /* maximum elements in zone */ +#define DLIF_UDPSTAT_ZONE_NAME "ifnet_udpstat" /* zone name */ static u_int32_t net_rtref; @@ -332,7 +332,7 @@ static void if_proto_ref(struct if_proto *); static void if_proto_free(struct if_proto *); static struct if_proto *find_attached_proto(struct ifnet *, u_int32_t); static u_int32_t dlil_ifp_protolist(struct ifnet *ifp, protocol_family_t *list, - u_int32_t list_count); + u_int32_t list_count); static void if_flt_monitor_busy(struct ifnet *); static void if_flt_monitor_unbusy(struct ifnet *); static void if_flt_monitor_enter(struct ifnet *); @@ -403,7 +403,7 @@ static void dlil_input_packet_list_common(struct ifnet *, struct mbuf *, u_int32_t, ifnet_model_t, boolean_t); static errno_t ifnet_input_common(struct ifnet *, struct mbuf *, struct mbuf *, const struct ifnet_stat_increment_param *, boolean_t, boolean_t); -static int dlil_is_clat_needed(protocol_family_t , mbuf_t ); +static int dlil_is_clat_needed(protocol_family_t, mbuf_t ); static errno_t dlil_clat46(ifnet_t, protocol_family_t *, mbuf_t *); static errno_t dlil_clat64(ifnet_t, protocol_family_t *, mbuf_t *); #if DEBUG || DEVELOPMENT @@ -455,29 +455,29 @@ static int sysctl_input_thread_termination_spin SYSCTL_HANDLER_ARGS; /* The following are protected by dlil_ifnet_lock */ static TAILQ_HEAD(, ifnet) ifnet_detaching_head; static u_int32_t ifnet_detaching_cnt; -static void *ifnet_delayed_run; /* wait channel for detaching thread */ +static void *ifnet_delayed_run; /* wait channel for detaching thread */ decl_lck_mtx_data(static, ifnet_fc_lock); static uint32_t ifnet_flowhash_seed; struct ifnet_flowhash_key { - char ifk_name[IFNAMSIZ]; - uint32_t ifk_unit; - uint32_t ifk_flags; - uint32_t ifk_eflags; - uint32_t ifk_capabilities; - uint32_t ifk_capenable; - uint32_t ifk_output_sched_model; - uint32_t ifk_rand1; - uint32_t ifk_rand2; + char ifk_name[IFNAMSIZ]; + uint32_t ifk_unit; + uint32_t ifk_flags; + uint32_t ifk_eflags; + uint32_t ifk_capabilities; + uint32_t ifk_capenable; + uint32_t ifk_output_sched_model; + uint32_t ifk_rand1; + uint32_t ifk_rand2; }; /* Flow control entry per interface */ struct ifnet_fc_entry { RB_ENTRY(ifnet_fc_entry) ifce_entry; - u_int32_t ifce_flowhash; - struct ifnet *ifce_ifp; + u_int32_t ifce_flowhash; + struct ifnet *ifce_ifp; }; static uint32_t ifnet_calc_flowhash(struct ifnet *); @@ -492,19 +492,19 @@ RB_HEAD(ifnet_fc_tree, ifnet_fc_entry) ifnet_fc_tree; RB_PROTOTYPE(ifnet_fc_tree, ifnet_fc_entry, ifce_entry, ifce_cmp); RB_GENERATE(ifnet_fc_tree, ifnet_fc_entry, ifce_entry, ifce_cmp); -static unsigned int ifnet_fc_zone_size; /* sizeof ifnet_fc_entry */ -static struct zone *ifnet_fc_zone; /* ifnet_fc_entry zone */ +static unsigned int ifnet_fc_zone_size; /* sizeof ifnet_fc_entry */ +static struct zone *ifnet_fc_zone; /* ifnet_fc_entry zone */ -#define IFNET_FC_ZONE_NAME "ifnet_fc_zone" -#define IFNET_FC_ZONE_MAX 32 +#define IFNET_FC_ZONE_NAME "ifnet_fc_zone" +#define IFNET_FC_ZONE_MAX 32 extern void bpfdetach(struct ifnet *); extern void proto_input_run(void); extern uint32_t udp_count_opportunistic(unsigned int ifindex, - u_int32_t flags); + u_int32_t flags); extern uint32_t tcp_count_opportunistic(unsigned int ifindex, - u_int32_t flags); + u_int32_t flags); __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *); @@ -533,70 +533,70 @@ SYSCTL_DECL(_net_link_generic_system); SYSCTL_INT(_net_link_generic_system, OID_AUTO, dlil_verbose, CTLFLAG_RW | CTLFLAG_LOCKED, &dlil_verbose, 0, "Log DLIL error messages"); -#define IF_SNDQ_MINLEN 32 +#define IF_SNDQ_MINLEN 32 u_int32_t if_sndq_maxlen = IFQ_MAXLEN; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, sndq_maxlen, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &if_sndq_maxlen, IFQ_MAXLEN, sysctl_sndq_maxlen, "I", "Default transmit queue max length"); -#define IF_RCVQ_MINLEN 32 -#define IF_RCVQ_MAXLEN 256 +#define IF_RCVQ_MINLEN 32 +#define IF_RCVQ_MAXLEN 256 u_int32_t if_rcvq_maxlen = IF_RCVQ_MAXLEN; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, rcvq_maxlen, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &if_rcvq_maxlen, IFQ_MAXLEN, sysctl_rcvq_maxlen, "I", "Default receive queue max length"); -#define IF_RXPOLL_DECAY 2 /* ilog2 of EWMA decay rate (4) */ +#define IF_RXPOLL_DECAY 2 /* ilog2 of EWMA decay rate (4) */ static u_int32_t if_rxpoll_decay = IF_RXPOLL_DECAY; SYSCTL_UINT(_net_link_generic_system, OID_AUTO, rxpoll_decay, CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_decay, IF_RXPOLL_DECAY, "ilog2 of EWMA decay rate of avg inbound packets"); -#define IF_RXPOLL_MODE_HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10 ms */ -#define IF_RXPOLL_MODE_HOLDTIME (1000ULL * 1000 * 1000) /* 1 sec */ +#define IF_RXPOLL_MODE_HOLDTIME_MIN (10ULL * 1000 * 1000) /* 10 ms */ +#define IF_RXPOLL_MODE_HOLDTIME (1000ULL * 1000 * 1000) /* 1 sec */ static u_int64_t if_rxpoll_mode_holdtime = IF_RXPOLL_MODE_HOLDTIME; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, rxpoll_freeze_time, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_mode_holdtime, IF_RXPOLL_MODE_HOLDTIME, sysctl_rxpoll_mode_holdtime, "Q", "input poll mode freeze time"); -#define IF_RXPOLL_SAMPLETIME_MIN (1ULL * 1000 * 1000) /* 1 ms */ -#define IF_RXPOLL_SAMPLETIME (10ULL * 1000 * 1000) /* 10 ms */ +#define IF_RXPOLL_SAMPLETIME_MIN (1ULL * 1000 * 1000) /* 1 ms */ +#define IF_RXPOLL_SAMPLETIME (10ULL * 1000 * 1000) /* 10 ms */ static u_int64_t if_rxpoll_sample_holdtime = IF_RXPOLL_SAMPLETIME; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, rxpoll_sample_time, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_sample_holdtime, IF_RXPOLL_SAMPLETIME, sysctl_rxpoll_sample_holdtime, "Q", "input poll sampling time"); -#define IF_RXPOLL_INTERVALTIME_MIN (1ULL * 1000) /* 1 us */ -#define IF_RXPOLL_INTERVALTIME (1ULL * 1000 * 1000) /* 1 ms */ +#define IF_RXPOLL_INTERVALTIME_MIN (1ULL * 1000) /* 1 us */ +#define IF_RXPOLL_INTERVALTIME (1ULL * 1000 * 1000) /* 1 ms */ static u_int64_t if_rxpoll_interval_time = IF_RXPOLL_INTERVALTIME; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, rxpoll_interval_time, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_interval_time, IF_RXPOLL_INTERVALTIME, sysctl_rxpoll_interval_time, "Q", "input poll interval (time)"); -#define IF_RXPOLL_INTERVAL_PKTS 0 /* 0 (disabled) */ +#define IF_RXPOLL_INTERVAL_PKTS 0 /* 0 (disabled) */ static u_int32_t if_rxpoll_interval_pkts = IF_RXPOLL_INTERVAL_PKTS; SYSCTL_UINT(_net_link_generic_system, OID_AUTO, rxpoll_interval_pkts, CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_interval_pkts, IF_RXPOLL_INTERVAL_PKTS, "input poll interval (packets)"); -#define IF_RXPOLL_WLOWAT 10 +#define IF_RXPOLL_WLOWAT 10 static u_int32_t if_rxpoll_wlowat = IF_RXPOLL_WLOWAT; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, rxpoll_wakeups_lowat, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_wlowat, IF_RXPOLL_WLOWAT, sysctl_rxpoll_wlowat, "I", "input poll wakeup low watermark"); -#define IF_RXPOLL_WHIWAT 100 +#define IF_RXPOLL_WHIWAT 100 static u_int32_t if_rxpoll_whiwat = IF_RXPOLL_WHIWAT; SYSCTL_PROC(_net_link_generic_system, OID_AUTO, rxpoll_wakeups_hiwat, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_whiwat, IF_RXPOLL_WHIWAT, sysctl_rxpoll_whiwat, "I", "input poll wakeup high watermark"); -static u_int32_t if_rxpoll_max = 0; /* 0 (automatic) */ +static u_int32_t if_rxpoll_max = 0; /* 0 (automatic) */ SYSCTL_UINT(_net_link_generic_system, OID_AUTO, rxpoll_max, CTLFLAG_RW | CTLFLAG_LOCKED, &if_rxpoll_max, 0, "max packets per poll call"); @@ -656,11 +656,11 @@ SYSCTL_UINT(_net_link_generic_system, OID_AUTO, start_delay_disabled, CTLFLAG_RW | CTLFLAG_LOCKED, &ifnet_delay_start_disabled, 0, "number of times start was delayed"); -#define HWCKSUM_DBG_PARTIAL_FORCED 0x1 /* forced partial checksum */ -#define HWCKSUM_DBG_PARTIAL_RXOFF_ADJ 0x2 /* adjust start offset */ -#define HWCKSUM_DBG_FINALIZE_FORCED 0x10 /* forced finalize */ -#define HWCKSUM_DBG_MASK \ - (HWCKSUM_DBG_PARTIAL_FORCED | HWCKSUM_DBG_PARTIAL_RXOFF_ADJ | \ +#define HWCKSUM_DBG_PARTIAL_FORCED 0x1 /* forced partial checksum */ +#define HWCKSUM_DBG_PARTIAL_RXOFF_ADJ 0x2 /* adjust start offset */ +#define HWCKSUM_DBG_FINALIZE_FORCED 0x10 /* forced finalize */ +#define HWCKSUM_DBG_MASK \ + (HWCKSUM_DBG_PARTIAL_FORCED | HWCKSUM_DBG_PARTIAL_RXOFF_ADJ | \ HWCKSUM_DBG_FINALIZE_FORCED) static uint32_t hwcksum_dbg_mode = 0; @@ -739,11 +739,11 @@ uint32_t tx_chain_len_count = 0; SYSCTL_UINT(_net_link_generic_system, OID_AUTO, tx_chain_len_count, CTLFLAG_RW | CTLFLAG_LOCKED, &tx_chain_len_count, 0, ""); -static uint32_t threshold_notify = 1; /* enable/disable */ +static uint32_t threshold_notify = 1; /* enable/disable */ SYSCTL_UINT(_net_link_generic_system, OID_AUTO, threshold_notify, CTLFLAG_RW | CTLFLAG_LOCKED, &threshold_notify, 0, ""); -static uint32_t threshold_interval = 2; /* in seconds */ +static uint32_t threshold_interval = 2; /* in seconds */ SYSCTL_UINT(_net_link_generic_system, OID_AUTO, threshold_interval, CTLFLAG_RW | CTLFLAG_LOCKED, &threshold_interval, 0, ""); @@ -754,18 +754,18 @@ SYSCTL_NODE(_net_link_generic_system, OID_AUTO, get_kao_frames, #endif /* DEVELOPMENT || DEBUG */ struct net_api_stats net_api_stats; -SYSCTL_STRUCT(_net, OID_AUTO, api_stats, CTLFLAG_RD|CTLFLAG_LOCKED, - &net_api_stats, net_api_stats, ""); +SYSCTL_STRUCT(_net, OID_AUTO, api_stats, CTLFLAG_RD | CTLFLAG_LOCKED, + &net_api_stats, net_api_stats, ""); unsigned int net_rxpoll = 1; unsigned int net_affinity = 1; static kern_return_t dlil_affinity_set(struct thread *, u_int32_t); -extern u_int32_t inject_buckets; +extern u_int32_t inject_buckets; -static lck_grp_attr_t *dlil_grp_attributes = NULL; -static lck_attr_t *dlil_lck_attributes = NULL; +static lck_grp_attr_t *dlil_grp_attributes = NULL; +static lck_attr_t *dlil_lck_attributes = NULL; /* DLIL data threshold thread call */ static void dlil_dt_tcall_fn(thread_call_param_t, thread_call_param_t); @@ -786,41 +786,41 @@ SYSCTL_UINT(_net_link_generic_system, OID_AUTO, rcv_mit_interval, #endif /* DEVELOPMENT || DEBUG */ -#define DLIL_INPUT_CHECK(m, ifp) { \ - struct ifnet *_rcvif = mbuf_pkthdr_rcvif(m); \ - if (_rcvif == NULL || (ifp != lo_ifp && _rcvif != ifp) || \ - !(mbuf_flags(m) & MBUF_PKTHDR)) { \ - panic_plain("%s: invalid mbuf %p\n", __func__, m); \ - /* NOTREACHED */ \ - } \ +#define DLIL_INPUT_CHECK(m, ifp) { \ + struct ifnet *_rcvif = mbuf_pkthdr_rcvif(m); \ + if (_rcvif == NULL || (ifp != lo_ifp && _rcvif != ifp) || \ + !(mbuf_flags(m) & MBUF_PKTHDR)) { \ + panic_plain("%s: invalid mbuf %p\n", __func__, m); \ + /* NOTREACHED */ \ + } \ } -#define DLIL_EWMA(old, new, decay) do { \ - u_int32_t _avg; \ - if ((_avg = (old)) > 0) \ - _avg = (((_avg << (decay)) - _avg) + (new)) >> (decay); \ - else \ - _avg = (new); \ - (old) = _avg; \ +#define DLIL_EWMA(old, new, decay) do { \ + u_int32_t _avg; \ + if ((_avg = (old)) > 0) \ + _avg = (((_avg << (decay)) - _avg) + (new)) >> (decay); \ + else \ + _avg = (new); \ + (old) = _avg; \ } while (0) -#define MBPS (1ULL * 1000 * 1000) -#define GBPS (MBPS * 1000) +#define MBPS (1ULL * 1000 * 1000) +#define GBPS (MBPS * 1000) struct rxpoll_time_tbl { - u_int64_t speed; /* downlink speed */ - u_int32_t plowat; /* packets low watermark */ - u_int32_t phiwat; /* packets high watermark */ - u_int32_t blowat; /* bytes low watermark */ - u_int32_t bhiwat; /* bytes high watermark */ + u_int64_t speed; /* downlink speed */ + u_int32_t plowat; /* packets low watermark */ + u_int32_t phiwat; /* packets high watermark */ + u_int32_t blowat; /* bytes low watermark */ + u_int32_t bhiwat; /* bytes high watermark */ }; static struct rxpoll_time_tbl rxpoll_tbl[] = { - { 10 * MBPS, 2, 8, (1 * 1024), (6 * 1024) }, - { 100 * MBPS, 10, 40, (4 * 1024), (64 * 1024) }, - { 1 * GBPS, 10, 40, (4 * 1024), (64 * 1024) }, - { 10 * GBPS, 10, 40, (4 * 1024), (64 * 1024) }, - { 100 * GBPS, 10, 40, (4 * 1024), (64 * 1024) }, + { 10 * MBPS, 2, 8, (1 * 1024), (6 * 1024) }, + { 100 * MBPS, 10, 40, (4 * 1024), (64 * 1024) }, + { 1 * GBPS, 10, 40, (4 * 1024), (64 * 1024) }, + { 10 * GBPS, 10, 40, (4 * 1024), (64 * 1024) }, + { 100 * GBPS, 10, 40, (4 * 1024), (64 * 1024) }, { 0, 0, 0, 0, 0 } }; @@ -833,15 +833,15 @@ proto_hash_value(u_int32_t protocol_family) * here; future changes must be applied there as well. */ switch (protocol_family) { - case PF_INET: - return (0); - case PF_INET6: - return (1); - case PF_VLAN: - return (2); - case PF_UNSPEC: - default: - return (3); + case PF_INET: + return 0; + case PF_INET6: + return 1; + case PF_VLAN: + return 2; + case PF_UNSPEC: + default: + return 3; } } @@ -856,16 +856,19 @@ find_attached_proto(struct ifnet *ifp, u_int32_t protocol_family) ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED); - if (ifp->if_proto_hash != NULL) + if (ifp->if_proto_hash != NULL) { proto = SLIST_FIRST(&ifp->if_proto_hash[i]); + } - while (proto != NULL && proto->protocol_family != protocol_family) + while (proto != NULL && proto->protocol_family != protocol_family) { proto = SLIST_NEXT(proto, next_hash); + } - if (proto != NULL) + if (proto != NULL) { if_proto_ref(proto); + } - return (proto); + return proto; } static void @@ -885,19 +888,22 @@ if_proto_free(struct if_proto *proto) struct kev_dl_proto_data ev_pr_data; oldval = atomic_add_32_ov(&proto->refcount, -1); - if (oldval > 1) + if (oldval > 1) { return; + } /* No more reference on this, protocol must have been detached */ VERIFY(proto->detached); if (proto->proto_kpi == kProtoKPI_v1) { - if (proto->kpi.v1.detached) + if (proto->kpi.v1.detached) { proto->kpi.v1.detached(ifp, proto->protocol_family); + } } if (proto->proto_kpi == kProtoKPI_v2) { - if (proto->kpi.v2.detached) + if (proto->kpi.v2.detached) { proto->kpi.v2.detached(ifp, proto->protocol_family); + } } /* @@ -917,7 +923,7 @@ if_proto_free(struct if_proto *proto) dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_DETACHED, (struct net_event_data *)&ev_pr_data, - sizeof (struct kev_dl_proto_data)); + sizeof(struct kev_dl_proto_data)); if (ev_pr_data.proto_remaining_count == 0) { /* @@ -964,8 +970,9 @@ ifnet_lock_assert(struct ifnet *ifp, ifnet_lock_assert_t what) panic("bad ifnet assert type: %d", what); /* NOTREACHED */ } - if (ass) + if (ass) { LCK_RW_ASSERT(&ifp->if_lock, type); + } } __private_extern__ void @@ -1063,13 +1070,14 @@ static u_int32_t dlil_ifp_protolist(struct ifnet *ifp, protocol_family_t *list, u_int32_t list_count) { - u_int32_t count = 0; - int i; + u_int32_t count = 0; + int i; ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED); - if (ifp->if_proto_hash == NULL) + if (ifp->if_proto_hash == NULL) { goto done; + } for (i = 0; i < PROTO_HASH_SLOTS; i++) { struct if_proto *proto; @@ -1081,7 +1089,7 @@ dlil_ifp_protolist(struct ifnet *ifp, protocol_family_t *list, } } done: - return (count); + return count; } __private_extern__ u_int32_t @@ -1090,7 +1098,7 @@ if_get_protolist(struct ifnet * ifp, u_int32_t *protolist, u_int32_t count) ifnet_lock_shared(ifp); count = dlil_ifp_protolist(ifp, protolist, count); ifnet_lock_done(ifp); - return (count); + return count; } __private_extern__ void @@ -1107,21 +1115,21 @@ dlil_post_msg(struct ifnet *ifp, u_int32_t event_subclass, struct net_event_data ev_data; struct kev_msg ev_msg; - bzero(&ev_msg, sizeof (ev_msg)); - bzero(&ev_data, sizeof (ev_data)); + bzero(&ev_msg, sizeof(ev_msg)); + bzero(&ev_data, sizeof(ev_data)); /* * a net event always starts with a net_event_data structure * but the caller can generate a simple net event or * provide a longer event structure to post */ - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = event_subclass; - ev_msg.event_code = event_code; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = event_subclass; + ev_msg.event_code = event_code; if (event_data == NULL) { event_data = &ev_data; - event_data_len = sizeof (struct net_event_data); + event_data_len = sizeof(struct net_event_data); } strlcpy(&event_data->if_name[0], ifp->if_name, IFNAMSIZ); @@ -1134,8 +1142,8 @@ dlil_post_msg(struct ifnet *ifp, u_int32_t event_subclass, /* Don't update interface generation for quality and RRC state changess */ bool update_generation = (event_subclass != KEV_DL_SUBCLASS || - (event_code != KEV_DL_LINK_QUALITY_METRIC_CHANGED && - event_code != KEV_DL_RRC_STATE_CHANGED)); + (event_code != KEV_DL_LINK_QUALITY_METRIC_CHANGED && + event_code != KEV_DL_RRC_STATE_CHANGED)); dlil_event_internal(ifp, &ev_msg, update_generation); } @@ -1146,8 +1154,9 @@ dlil_alloc_local_stats(struct ifnet *ifp) int ret = EINVAL; void *buf, *base, **pbuf; - if (ifp == NULL) + if (ifp == NULL) { goto end; + } if (ifp->if_tcp_stat == NULL && ifp->if_udp_stat == NULL) { /* allocate tcpstat_local structure */ @@ -1159,8 +1168,8 @@ dlil_alloc_local_stats(struct ifnet *ifp) bzero(buf, dlif_tcpstat_bufsize); /* Get the 64-bit aligned base address for this object */ - base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t), - sizeof (u_int64_t)); + base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), + sizeof(u_int64_t)); VERIFY(((intptr_t)base + dlif_tcpstat_size) <= ((intptr_t)buf + dlif_tcpstat_bufsize)); @@ -1168,7 +1177,7 @@ dlil_alloc_local_stats(struct ifnet *ifp) * Wind back a pointer size from the aligned base and * save the original address so we can free it later. */ - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); *pbuf = buf; ifp->if_tcp_stat = base; @@ -1181,8 +1190,8 @@ dlil_alloc_local_stats(struct ifnet *ifp) bzero(buf, dlif_udpstat_bufsize); /* Get the 64-bit aligned base address for this object */ - base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t), - sizeof (u_int64_t)); + base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), + sizeof(u_int64_t)); VERIFY(((intptr_t)base + dlif_udpstat_size) <= ((intptr_t)buf + dlif_udpstat_bufsize)); @@ -1190,19 +1199,19 @@ dlil_alloc_local_stats(struct ifnet *ifp) * Wind back a pointer size from the aligned base and * save the original address so we can free it later. */ - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); *pbuf = buf; ifp->if_udp_stat = base; - VERIFY(IS_P2ALIGNED(ifp->if_tcp_stat, sizeof (u_int64_t)) && - IS_P2ALIGNED(ifp->if_udp_stat, sizeof (u_int64_t))); + VERIFY(IS_P2ALIGNED(ifp->if_tcp_stat, sizeof(u_int64_t)) && + IS_P2ALIGNED(ifp->if_udp_stat, sizeof(u_int64_t))); ret = 0; } if (ifp->if_ipv4_stat == NULL) { MALLOC(ifp->if_ipv4_stat, struct if_tcp_ecn_stat *, - sizeof (struct if_tcp_ecn_stat), M_TEMP, M_WAITOK|M_ZERO); + sizeof(struct if_tcp_ecn_stat), M_TEMP, M_WAITOK | M_ZERO); if (ifp->if_ipv4_stat == NULL) { ret = ENOMEM; goto end; @@ -1211,7 +1220,7 @@ dlil_alloc_local_stats(struct ifnet *ifp) if (ifp->if_ipv6_stat == NULL) { MALLOC(ifp->if_ipv6_stat, struct if_tcp_ecn_stat *, - sizeof (struct if_tcp_ecn_stat), M_TEMP, M_WAITOK|M_ZERO); + sizeof(struct if_tcp_ecn_stat), M_TEMP, M_WAITOK | M_ZERO); if (ifp->if_ipv6_stat == NULL) { ret = ENOMEM; goto end; @@ -1221,13 +1230,13 @@ end: if (ret != 0) { if (ifp->if_tcp_stat != NULL) { pbuf = (void **) - ((intptr_t)ifp->if_tcp_stat - sizeof (void *)); + ((intptr_t)ifp->if_tcp_stat - sizeof(void *)); zfree(dlif_tcpstat_zone, *pbuf); ifp->if_tcp_stat = NULL; } if (ifp->if_udp_stat != NULL) { pbuf = (void **) - ((intptr_t)ifp->if_udp_stat - sizeof (void *)); + ((intptr_t)ifp->if_udp_stat - sizeof(void *)); zfree(dlif_udpstat_zone, *pbuf); ifp->if_udp_stat = NULL; } @@ -1241,7 +1250,7 @@ end: } } - return (ret); + return ret; } static int @@ -1274,7 +1283,7 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) lck_mtx_init(&inp->input_lck, inp->lck_grp, dlil_lck_attributes); inp->mode = IFNET_MODEL_INPUT_POLL_OFF; - inp->ifp = ifp; /* NULL for main input thread */ + inp->ifp = ifp; /* NULL for main input thread */ net_timerclear(&inp->mode_holdtime); net_timerclear(&inp->mode_lasttime); @@ -1305,7 +1314,7 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) error = kernel_thread_start(func, inp, &inp->input_thr); if (error == KERN_SUCCESS) { ml_thread_policy(inp->input_thr, MACHINE_GROUP, - (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_NETISR)); + (MACHINE_NETWORK_GROUP | MACHINE_NETWORK_NETISR)); /* * We create an affinity set so that the matching workloop * thread or the starter thread (for loopback) can be @@ -1318,7 +1327,7 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) * Randomize to reduce the probability * of affinity tag namespace collision. */ - read_frandom(&tag, sizeof (tag)); + read_frandom(&tag, sizeof(tag)); if (dlil_affinity_set(tp, tag) == KERN_SUCCESS) { thread_reference(tp); inp->tag = tag; @@ -1335,7 +1344,7 @@ dlil_create_input_thread(ifnet_t ifp, struct dlil_threading_info *inp) } OSAddAtomic(1, &cur_dlil_input_threads); - return (error); + return error; } #if TEST_INPUT_THREAD_TERMINATION @@ -1349,14 +1358,16 @@ sysctl_input_thread_termination_spin SYSCTL_HANDLER_ARGS i = if_input_thread_termination_spin; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (net_rxpoll == 0) - return (ENXIO); + if (net_rxpoll == 0) { + return ENXIO; + } if_input_thread_termination_spin = i; - return (err); + return err; } #endif /* TEST_INPUT_THREAD_TERMINATION */ @@ -1368,11 +1379,11 @@ dlil_clean_threading_info(struct dlil_threading_info *inp) inp->input_waiting = 0; inp->wtot = 0; - bzero(inp->input_name, sizeof (inp->input_name)); + bzero(inp->input_name, sizeof(inp->input_name)); inp->ifp = NULL; VERIFY(qhead(&inp->rcvq_pkts) == NULL && qempty(&inp->rcvq_pkts)); qlimit(&inp->rcvq_pkts) = 0; - bzero(&inp->stats, sizeof (inp->stats)); + bzero(&inp->stats, sizeof(inp->stats)); VERIFY(!inp->net_affinity); inp->input_thr = THREAD_NULL; @@ -1381,9 +1392,9 @@ dlil_clean_threading_info(struct dlil_threading_info *inp) VERIFY(inp->tag == 0); inp->mode = IFNET_MODEL_INPUT_POLL_OFF; - bzero(&inp->tstats, sizeof (inp->tstats)); - bzero(&inp->pstats, sizeof (inp->pstats)); - bzero(&inp->sstats, sizeof (inp->sstats)); + bzero(&inp->tstats, sizeof(inp->tstats)); + bzero(&inp->pstats, sizeof(inp->pstats)); + bzero(&inp->sstats, sizeof(inp->sstats)); net_timerclear(&inp->mode_holdtime); net_timerclear(&inp->mode_lasttime); @@ -1408,10 +1419,10 @@ dlil_terminate_input_thread(struct dlil_threading_info *inp) #if TEST_INPUT_THREAD_TERMINATION { /* do something useless that won't get optimized away */ - uint32_t v = 1; + uint32_t v = 1; for (uint32_t i = 0; - i < if_input_thread_termination_spin; - i++) { + i < if_input_thread_termination_spin; + i++) { v = (i + 1) * v; } printf("the value is %d\n", v); @@ -1442,10 +1453,10 @@ dlil_affinity_set(struct thread *tp, u_int32_t tag) { thread_affinity_policy_data_t policy; - bzero(&policy, sizeof (policy)); + bzero(&policy, sizeof(policy)); policy.affinity_tag = tag; - return (thread_policy_set(tp, THREAD_AFFINITY_POLICY, - (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT)); + return thread_policy_set(tp, THREAD_AFFINITY_POLICY, + (thread_policy_t)&policy, THREAD_AFFINITY_POLICY_COUNT); } void @@ -1565,19 +1576,19 @@ dlil_init(void) _CASSERT(DLIL_MODARGLEN == IFNET_MODARGLEN); PE_parse_boot_argn("net_affinity", &net_affinity, - sizeof (net_affinity)); + sizeof(net_affinity)); - PE_parse_boot_argn("net_rxpoll", &net_rxpoll, sizeof (net_rxpoll)); + PE_parse_boot_argn("net_rxpoll", &net_rxpoll, sizeof(net_rxpoll)); - PE_parse_boot_argn("net_rtref", &net_rtref, sizeof (net_rtref)); + PE_parse_boot_argn("net_rtref", &net_rtref, sizeof(net_rtref)); - PE_parse_boot_argn("ifnet_debug", &ifnet_debug, sizeof (ifnet_debug)); + PE_parse_boot_argn("ifnet_debug", &ifnet_debug, sizeof(ifnet_debug)); - dlif_size = (ifnet_debug == 0) ? sizeof (struct dlil_ifnet) : - sizeof (struct dlil_ifnet_dbg); + dlif_size = (ifnet_debug == 0) ? sizeof(struct dlil_ifnet) : + sizeof(struct dlil_ifnet_dbg); /* Enforce 64-bit alignment for dlil_ifnet structure */ - dlif_bufsize = dlif_size + sizeof (void *) + sizeof (u_int64_t); - dlif_bufsize = P2ROUNDUP(dlif_bufsize, sizeof (u_int64_t)); + dlif_bufsize = dlif_size + sizeof(void *) + sizeof(u_int64_t); + dlif_bufsize = P2ROUNDUP(dlif_bufsize, sizeof(u_int64_t)); dlif_zone = zinit(dlif_bufsize, DLIF_ZONE_MAX * dlif_bufsize, 0, DLIF_ZONE_NAME); if (dlif_zone == NULL) { @@ -1588,7 +1599,7 @@ dlil_init(void) zone_change(dlif_zone, Z_EXPAND, TRUE); zone_change(dlif_zone, Z_CALLERACCT, FALSE); - dlif_filt_size = sizeof (struct ifnet_filter); + dlif_filt_size = sizeof(struct ifnet_filter); dlif_filt_zone = zinit(dlif_filt_size, DLIF_FILT_ZONE_MAX * dlif_filt_size, 0, DLIF_FILT_ZONE_NAME); if (dlif_filt_zone == NULL) { @@ -1599,7 +1610,7 @@ dlil_init(void) zone_change(dlif_filt_zone, Z_EXPAND, TRUE); zone_change(dlif_filt_zone, Z_CALLERACCT, FALSE); - dlif_phash_size = sizeof (struct proto_hash_entry) * PROTO_HASH_SLOTS; + dlif_phash_size = sizeof(struct proto_hash_entry) * PROTO_HASH_SLOTS; dlif_phash_zone = zinit(dlif_phash_size, DLIF_PHASH_ZONE_MAX * dlif_phash_size, 0, DLIF_PHASH_ZONE_NAME); if (dlif_phash_zone == NULL) { @@ -1610,7 +1621,7 @@ dlil_init(void) zone_change(dlif_phash_zone, Z_EXPAND, TRUE); zone_change(dlif_phash_zone, Z_CALLERACCT, FALSE); - dlif_proto_size = sizeof (struct if_proto); + dlif_proto_size = sizeof(struct if_proto); dlif_proto_zone = zinit(dlif_proto_size, DLIF_PROTO_ZONE_MAX * dlif_proto_size, 0, DLIF_PROTO_ZONE_NAME); if (dlif_proto_zone == NULL) { @@ -1621,12 +1632,12 @@ dlil_init(void) zone_change(dlif_proto_zone, Z_EXPAND, TRUE); zone_change(dlif_proto_zone, Z_CALLERACCT, FALSE); - dlif_tcpstat_size = sizeof (struct tcpstat_local); + dlif_tcpstat_size = sizeof(struct tcpstat_local); /* Enforce 64-bit alignment for tcpstat_local structure */ dlif_tcpstat_bufsize = - dlif_tcpstat_size + sizeof (void *) + sizeof (u_int64_t); + dlif_tcpstat_size + sizeof(void *) + sizeof(u_int64_t); dlif_tcpstat_bufsize = - P2ROUNDUP(dlif_tcpstat_bufsize, sizeof (u_int64_t)); + P2ROUNDUP(dlif_tcpstat_bufsize, sizeof(u_int64_t)); dlif_tcpstat_zone = zinit(dlif_tcpstat_bufsize, DLIF_TCPSTAT_ZONE_MAX * dlif_tcpstat_bufsize, 0, DLIF_TCPSTAT_ZONE_NAME); @@ -1638,12 +1649,12 @@ dlil_init(void) zone_change(dlif_tcpstat_zone, Z_EXPAND, TRUE); zone_change(dlif_tcpstat_zone, Z_CALLERACCT, FALSE); - dlif_udpstat_size = sizeof (struct udpstat_local); + dlif_udpstat_size = sizeof(struct udpstat_local); /* Enforce 64-bit alignment for udpstat_local structure */ dlif_udpstat_bufsize = - dlif_udpstat_size + sizeof (void *) + sizeof (u_int64_t); + dlif_udpstat_size + sizeof(void *) + sizeof(u_int64_t); dlif_udpstat_bufsize = - P2ROUNDUP(dlif_udpstat_bufsize, sizeof (u_int64_t)); + P2ROUNDUP(dlif_udpstat_bufsize, sizeof(u_int64_t)); dlif_udpstat_zone = zinit(dlif_udpstat_bufsize, DLIF_TCPSTAT_ZONE_MAX * dlif_udpstat_bufsize, 0, DLIF_UDPSTAT_ZONE_NAME); @@ -1689,7 +1700,7 @@ dlil_init(void) /* Setup interface flow control related items */ lck_mtx_init(&ifnet_fc_lock, dlil_lock_group, dlil_lck_attributes); - ifnet_fc_zone_size = sizeof (struct ifnet_fc_entry); + ifnet_fc_zone_size = sizeof(struct ifnet_fc_entry); ifnet_fc_zone = zinit(ifnet_fc_zone_size, IFNET_FC_ZONE_MAX * ifnet_fc_zone_size, 0, IFNET_FC_ZONE_NAME); if (ifnet_fc_zone == NULL) { @@ -1749,7 +1760,6 @@ dlil_init(void) /* NOTREACHED */ } thread_deallocate(thread); - } static void @@ -1795,7 +1805,7 @@ if_flt_monitor_leave(struct ifnet *ifp) } __private_extern__ int -dlil_attach_filter(struct ifnet *ifp, const struct iff_filter *if_filter, +dlil_attach_filter(struct ifnet *ifp, const struct iff_filter *if_filter, interface_filter_t *filter_ref, u_int32_t flags) { int retval = 0; @@ -1866,14 +1876,15 @@ done: DLIL_PRINTF("%s: failed to attach %s (err=%d)\n", if_name(ifp), if_filter->iff_name, retval); } - if (retval != 0 && filter != NULL) + if (retval != 0 && filter != NULL) { zfree(dlif_filt_zone, filter); + } - return (retval); + return retval; } static int -dlil_detach_filter_internal(interface_filter_t filter, int detached) +dlil_detach_filter_internal(interface_filter_t filter, int detached) { int retval = 0; @@ -1886,8 +1897,9 @@ dlil_detach_filter_internal(interface_filter_t filter, int detached) lck_mtx_lock(&ifp->if_flt_lock); TAILQ_FOREACH(entry, &ifp->if_flt_head, filt_next) { - if (entry != filter || entry->filt_skip) + if (entry != filter || entry->filt_skip) { continue; + } /* * We've found a match; since it's possible * that the thread gets blocked in the monitor, @@ -1895,7 +1907,7 @@ dlil_detach_filter_internal(interface_filter_t filter, int detached) * not be detached since we still have a use * count held during filter attach. */ - entry->filt_skip = 1; /* skip input/output */ + entry->filt_skip = 1; /* skip input/output */ lck_mtx_unlock(&ifp->if_flt_lock); ifnet_head_done(); @@ -1925,14 +1937,16 @@ dlil_detach_filter_internal(interface_filter_t filter, int detached) goto done; } - if (dlil_verbose) + if (dlil_verbose) { printf("%s filter detached\n", filter->filt_name); + } destroy: /* Call the detached function if there is one */ - if (filter->filt_detached) + if (filter->filt_detached) { filter->filt_detached(filter->filt_cookie, filter->filt_ifp); + } /* * Decrease filter count and route_generation ID to let TCP @@ -1954,14 +1968,15 @@ done: filter->filt_name, retval); } - return (retval); + return retval; } __private_extern__ void dlil_detach_filter(interface_filter_t filter) { - if (filter == NULL) + if (filter == NULL) { return; + } dlil_detach_filter_internal(filter, 0); } @@ -2027,20 +2042,23 @@ dlil_main_input_thread_func(void *v, wait_result_t w) * We should think about putting some thread starvation * safeguards if we deal with long chains of packets. */ - if (m_loop != NULL) + if (m_loop != NULL) { dlil_input_packet_list_extended(lo_ifp, m_loop, m_cnt_loop, inp->mode); + } - if (m != NULL) + if (m != NULL) { dlil_input_packet_list_extended(NULL, m, m_cnt, inp->mode); + } - if (proto_req) + if (proto_req) { proto_input_run(); + } } /* NOTREACHED */ - VERIFY(0); /* we should never get here */ + VERIFY(0); /* we should never get here */ } /* @@ -2088,7 +2106,7 @@ dlil_input_thread_func(void *v, wait_result_t w) * (and the benefits might not worth the trouble.) */ VERIFY(!(inp->input_waiting & - (DLIL_PROTO_WAITING|DLIL_PROTO_REGISTER))); + (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER))); /* Packets for this interface */ m_cnt = qlen(&inp->rcvq_pkts); @@ -2098,8 +2116,9 @@ dlil_input_thread_func(void *v, wait_result_t w) lck_mtx_unlock(&inp->input_lck); /* Free up pending packets */ - if (m != NULL) + if (m != NULL) { mbuf_freem_list(m); + } dlil_terminate_input_thread(inp); /* NOTREACHED */ @@ -2108,7 +2127,7 @@ dlil_input_thread_func(void *v, wait_result_t w) inp->wtot = 0; - dlil_input_stats_sync(ifp, inp); + dlil_input_stats_sync(ifp, inp); lck_mtx_unlock(&inp->input_lck); @@ -2117,13 +2136,14 @@ dlil_input_thread_func(void *v, wait_result_t w) * We should think about putting some thread starvation * safeguards if we deal with long chains of packets. */ - if (m != NULL) + if (m != NULL) { dlil_input_packet_list_extended(NULL, m, m_cnt, inp->mode); + } } /* NOTREACHED */ - VERIFY(0); /* we should never get here */ + VERIFY(0); /* we should never get here */ } /* @@ -2149,8 +2169,9 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) lck_mtx_lock_spin(&inp->input_lck); - if ((ival = inp->rxpoll_ival) < IF_RXPOLL_INTERVALTIME_MIN) + if ((ival = inp->rxpoll_ival) < IF_RXPOLL_INTERVALTIME_MIN) { ival = IF_RXPOLL_INTERVALTIME_MIN; + } /* Link parameters changed? */ if (ifp->if_poll_update != 0) { @@ -2179,15 +2200,16 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) * (and the benefits might not worth the trouble.) */ VERIFY(!(inp->input_waiting & - (DLIL_PROTO_WAITING|DLIL_PROTO_REGISTER))); + (DLIL_PROTO_WAITING | DLIL_PROTO_REGISTER))); if (inp->input_waiting & DLIL_INPUT_TERMINATE) { /* Free up pending packets */ lck_mtx_convert_spin(&inp->input_lck); _flushq(&inp->rcvq_pkts); if (inp->input_mit_tcall != NULL) { - if (thread_call_isactive(inp->input_mit_tcall)) + if (thread_call_isactive(inp->input_mit_tcall)) { thread_call_cancel(inp->input_mit_tcall); + } } lck_mtx_unlock(&inp->input_lck); @@ -2207,8 +2229,9 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) VERIFY(m != NULL || m_cnt == 0); nanouptime(&now); - if (!net_timerisset(&inp->sample_lasttime)) + if (!net_timerisset(&inp->sample_lasttime)) { *(&inp->sample_lasttime) = *(&now); + } net_timersub(&now, &inp->sample_lasttime, &delta); if (if_rxpoll && net_timerisset(&inp->sample_holdtime)) { @@ -2217,27 +2240,32 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) /* Accumulate statistics for current sampling */ PKTCNTR_ADD(&inp->sstats, m_cnt, m_size); - if (net_timercmp(&delta, &inp->sample_holdtime, <)) + if (net_timercmp(&delta, &inp->sample_holdtime, <)) { goto skip; + } *(&inp->sample_lasttime) = *(&now); /* Calculate min/max of inbound bytes */ btot = (u_int32_t)inp->sstats.bytes; - if (inp->rxpoll_bmin == 0 || inp->rxpoll_bmin > btot) + if (inp->rxpoll_bmin == 0 || inp->rxpoll_bmin > btot) { inp->rxpoll_bmin = btot; - if (btot > inp->rxpoll_bmax) + } + if (btot > inp->rxpoll_bmax) { inp->rxpoll_bmax = btot; + } /* Calculate EWMA of inbound bytes */ DLIL_EWMA(inp->rxpoll_bavg, btot, if_rxpoll_decay); /* Calculate min/max of inbound packets */ ptot = (u_int32_t)inp->sstats.packets; - if (inp->rxpoll_pmin == 0 || inp->rxpoll_pmin > ptot) + if (inp->rxpoll_pmin == 0 || inp->rxpoll_pmin > ptot) { inp->rxpoll_pmin = ptot; - if (ptot > inp->rxpoll_pmax) + } + if (ptot > inp->rxpoll_pmax) { inp->rxpoll_pmax = ptot; + } /* Calculate EWMA of inbound packets */ DLIL_EWMA(inp->rxpoll_pavg, ptot, if_rxpoll_decay); @@ -2250,8 +2278,9 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) inp->wtot = 0; if (dlil_verbose) { - if (!net_timerisset(&inp->dbg_lasttime)) + if (!net_timerisset(&inp->dbg_lasttime)) { *(&inp->dbg_lasttime) = *(&now); + } net_timersub(&now, &inp->dbg_lasttime, &delta); if (net_timercmp(&delta, &dlil_dbgrate, >=)) { *(&inp->dbg_lasttime) = *(&now); @@ -2275,12 +2304,14 @@ dlil_rxpoll_input_thread_func(void *v, wait_result_t w) } /* Perform mode transition, if necessary */ - if (!net_timerisset(&inp->mode_lasttime)) + if (!net_timerisset(&inp->mode_lasttime)) { *(&inp->mode_lasttime) = *(&now); + } net_timersub(&now, &inp->mode_lasttime, &delta); - if (net_timercmp(&delta, &inp->mode_holdtime, <)) + if (net_timercmp(&delta, &inp->mode_holdtime, <)) { goto skip; + } if (inp->rxpoll_pavg <= inp->rxpoll_plowat && inp->rxpoll_bavg <= inp->rxpoll_blowat && @@ -2330,7 +2361,7 @@ skip: } if ((err = ((*ifp->if_input_ctl)(ifp, - IFNET_CTL_SET_INPUT_MODEL, sizeof (p), &p))) != 0) { + IFNET_CTL_SET_INPUT_MODEL, sizeof(p), &p))) != 0) { printf("%s: error setting polling mode " "to %s (%d)\n", if_name(ifp), (mode == IFNET_MODEL_INPUT_POLL_ON) ? @@ -2341,8 +2372,9 @@ skip: case IFNET_MODEL_INPUT_POLL_OFF: ifnet_set_poll_cycle(ifp, NULL); inp->rxpoll_offreq++; - if (err != 0) + if (err != 0) { inp->rxpoll_offerr++; + } break; case IFNET_MODEL_INPUT_POLL_ON: @@ -2350,8 +2382,9 @@ skip: ifnet_set_poll_cycle(ifp, &ts); ifnet_poll(ifp); inp->rxpoll_onreq++; - if (err != 0) + if (err != 0) { inp->rxpoll_onerr++; + } break; default: @@ -2368,12 +2401,13 @@ skip: * We should think about putting some thread starvation * safeguards if we deal with long chains of packets. */ - if (m != NULL) + if (m != NULL) { dlil_input_packet_list_extended(NULL, m, m_cnt, mode); + } } /* NOTREACHED */ - VERIFY(0); /* we should never get here */ + VERIFY(0); /* we should never get here */ } /* @@ -2388,29 +2422,36 @@ dlil_rxpoll_set_params(struct ifnet *ifp, struct ifnet_poll_params *p, u_int64_t sample_holdtime, inbw; VERIFY(ifp != NULL); - if (!(ifp->if_eflags & IFEF_RXPOLL) || (inp = ifp->if_inp) == NULL) - return (ENXIO); + if (!(ifp->if_eflags & IFEF_RXPOLL) || (inp = ifp->if_inp) == NULL) { + return ENXIO; + } if (p != NULL) { if ((p->packets_lowat == 0 && p->packets_hiwat != 0) || - (p->packets_lowat != 0 && p->packets_hiwat == 0)) - return (EINVAL); - if (p->packets_lowat != 0 && /* hiwat must be non-zero */ - p->packets_lowat >= p->packets_hiwat) - return (EINVAL); + (p->packets_lowat != 0 && p->packets_hiwat == 0)) { + return EINVAL; + } + if (p->packets_lowat != 0 && /* hiwat must be non-zero */ + p->packets_lowat >= p->packets_hiwat) { + return EINVAL; + } if ((p->bytes_lowat == 0 && p->bytes_hiwat != 0) || - (p->bytes_lowat != 0 && p->bytes_hiwat == 0)) - return (EINVAL); - if (p->bytes_lowat != 0 && /* hiwat must be non-zero */ - p->bytes_lowat >= p->bytes_hiwat) - return (EINVAL); + (p->bytes_lowat != 0 && p->bytes_hiwat == 0)) { + return EINVAL; + } + if (p->bytes_lowat != 0 && /* hiwat must be non-zero */ + p->bytes_lowat >= p->bytes_hiwat) { + return EINVAL; + } if (p->interval_time != 0 && - p->interval_time < IF_RXPOLL_INTERVALTIME_MIN) + p->interval_time < IF_RXPOLL_INTERVALTIME_MIN) { p->interval_time = IF_RXPOLL_INTERVALTIME_MIN; + } } - if (!locked) + if (!locked) { lck_mtx_lock(&inp->input_lck); + } LCK_MTX_ASSERT(&inp->input_lck, LCK_MTX_ASSERT_OWNED); @@ -2422,11 +2463,12 @@ dlil_rxpoll_set_params(struct ifnet *ifp, struct ifnet_poll_params *p, * make sure to keep the driver's values. Clearing if_poll_update * will achieve that. */ - if (p != NULL && !locked && ifp->if_poll_update != 0) + if (p != NULL && !locked && ifp->if_poll_update != 0) { ifp->if_poll_update = 0; + } if ((inbw = ifnet_input_linkrate(ifp)) == 0 && p == NULL) { - sample_holdtime = 0; /* polling is disabled */ + sample_holdtime = 0; /* polling is disabled */ inp->rxpoll_wlowat = inp->rxpoll_plowat = inp->rxpoll_blowat = 0; inp->rxpoll_whiwat = inp->rxpoll_phiwat = @@ -2439,8 +2481,9 @@ dlil_rxpoll_set_params(struct ifnet *ifp, struct ifnet_poll_params *p, unsigned int n, i; for (n = 0, i = 0; rxpoll_tbl[i].speed != 0; i++) { - if (inbw < rxpoll_tbl[i].speed) + if (inbw < rxpoll_tbl[i].speed) { break; + } n = i; } /* auto-tune if caller didn't specify a value */ @@ -2485,10 +2528,11 @@ dlil_rxpoll_set_params(struct ifnet *ifp, struct ifnet_poll_params *p, inp->rxpoll_whiwat, inp->rxpoll_blowat, inp->rxpoll_bhiwat); } - if (!locked) + if (!locked) { lck_mtx_unlock(&inp->input_lck); + } - return (0); + return 0; } /* @@ -2500,10 +2544,11 @@ dlil_rxpoll_get_params(struct ifnet *ifp, struct ifnet_poll_params *p) struct dlil_threading_info *inp; VERIFY(ifp != NULL && p != NULL); - if (!(ifp->if_eflags & IFEF_RXPOLL) || (inp = ifp->if_inp) == NULL) - return (ENXIO); + if (!(ifp->if_eflags & IFEF_RXPOLL) || (inp = ifp->if_inp) == NULL) { + return ENXIO; + } - bzero(p, sizeof (*p)); + bzero(p, sizeof(*p)); lck_mtx_lock(&inp->input_lck); p->packets_limit = inp->rxpoll_plim; @@ -2514,21 +2559,21 @@ dlil_rxpoll_get_params(struct ifnet *ifp, struct ifnet_poll_params *p) p->interval_time = inp->rxpoll_ival; lck_mtx_unlock(&inp->input_lck); - return (0); + return 0; } errno_t ifnet_input(struct ifnet *ifp, struct mbuf *m_head, const struct ifnet_stat_increment_param *s) { - return (ifnet_input_common(ifp, m_head, NULL, s, FALSE, FALSE)); + return ifnet_input_common(ifp, m_head, NULL, s, FALSE, FALSE); } errno_t ifnet_input_extended(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, const struct ifnet_stat_increment_param *s) { - return (ifnet_input_common(ifp, m_head, m_tail, s, TRUE, FALSE)); + return ifnet_input_common(ifp, m_head, m_tail, s, TRUE, FALSE); } static errno_t @@ -2542,9 +2587,10 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, errno_t err = 0; if ((m_head == NULL && !poll) || (s == NULL && ext)) { - if (m_head != NULL) + if (m_head != NULL) { mbuf_freem_list(m_head); - return (EINVAL); + } + return EINVAL; } VERIFY(m_head != NULL || (s == NULL && m_tail == NULL && !ext && poll)); @@ -2557,9 +2603,10 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, * prevent it from being detached (will be released below.) */ if (ifp == NULL || (ifp != lo_ifp && !ifnet_is_attached(ifp, 1))) { - if (m_head != NULL) + if (m_head != NULL) { mbuf_freem_list(m_head); - return (EINVAL); + } + return EINVAL; } input_func = ifp->if_input_dlil; @@ -2569,13 +2616,15 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, last = m_head; while (m_head != NULL) { #if IFNET_INPUT_SANITY_CHK - if (dlil_input_sanity_check != 0) + if (dlil_input_sanity_check != 0) { DLIL_INPUT_CHECK(last, ifp); + } #endif /* IFNET_INPUT_SANITY_CHK */ m_cnt++; m_size += m_length(last); - if (mbuf_nextpkt(last) == NULL) + if (mbuf_nextpkt(last) == NULL) { break; + } last = mbuf_nextpkt(last); } m_tail = last; @@ -2587,8 +2636,9 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, DLIL_INPUT_CHECK(last, ifp); m_cnt++; m_size += m_length(last); - if (mbuf_nextpkt(last) == NULL) + if (mbuf_nextpkt(last) == NULL) { break; + } last = mbuf_nextpkt(last); } } else { @@ -2623,7 +2673,7 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, } if (s == NULL) { - bzero(&_s, sizeof (_s)); + bzero(&_s, sizeof(_s)); s = &_s; } else { _s = *s; @@ -2638,14 +2688,14 @@ ifnet_input_common(struct ifnet *ifp, struct mbuf *m_head, struct mbuf *m_tail, ifnet_decr_iorefcnt(ifp); } - return (err); + return err; } errno_t dlil_output_handler(struct ifnet *ifp, struct mbuf *m) { - return (ifp->if_output(ifp, m)); + return ifp->if_output(ifp, m); } errno_t @@ -2657,8 +2707,9 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, u_int32_t m_cnt = s->packets_in; u_int32_t m_size = s->bytes_in; - if ((inp = ifp->if_inp) == NULL) + if ((inp = ifp->if_inp) == NULL) { inp = dlil_main_input_thread; + } /* * If there is a matching DLIL input thread associated with an @@ -2718,8 +2769,9 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, u_int32_t count; struct mbuf *m0; - for (m0 = m_head, count = 0; m0; m0 = mbuf_nextpkt(m0)) + for (m0 = m_head, count = 0; m0; m0 = mbuf_nextpkt(m0)) { count++; + } if (count != m_cnt) { panic_plain("%s: invalid packet count %d " @@ -2739,8 +2791,9 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, * other cases involving dedicated input threads will * have their stats synchronized there. */ - if (inp == dlil_main_input_thread) + if (inp == dlil_main_input_thread) { dlil_input_stats_sync(ifp, inp); + } if (inp->input_mit_tcall && qlen(&inp->rcvq_pkts) >= dlil_rcv_mit_pkts_min && @@ -2753,7 +2806,7 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, clock_interval_to_deadline(dlil_rcv_mit_interval, 1, &deadline); (void) thread_call_enter_delayed( - inp->input_mit_tcall, deadline); + inp->input_mit_tcall, deadline); } } else { inp->input_waiting |= DLIL_INPUT_WAITING; @@ -2764,15 +2817,16 @@ dlil_input_handler(struct ifnet *ifp, struct mbuf *m_head, } lck_mtx_unlock(&inp->input_lck); - return (0); + return 0; } static void ifnet_start_common(struct ifnet *ifp, boolean_t resetfc) { - if (!(ifp->if_eflags & IFEF_TXSTART)) + if (!(ifp->if_eflags & IFEF_TXSTART)) { return; + } /* * If the starter thread is inactive, signal it to do work, * unless the interface is being flow controlled from below, @@ -2817,7 +2871,7 @@ ifnet_start_thread_fn(void *v, wait_result_t w) /* Construct the name for this thread, and then apply it. */ bzero(thread_name, sizeof(thread_name)); - (void) snprintf(thread_name, sizeof (thread_name), + (void) snprintf(thread_name, sizeof(thread_name), "ifnet_start_%s", ifp->if_xname); thread_set_thread_name(ifp->if_start_thread, thread_name); @@ -2848,7 +2902,7 @@ ifnet_start_thread_fn(void *v, wait_result_t w) } } - (void) snprintf(ifname, sizeof (ifname), "%s_starter", if_name(ifp)); + (void) snprintf(ifname, sizeof(ifname), "%s_starter", if_name(ifp)); lck_mtx_lock_spin(&ifp->if_start_lock); @@ -2943,8 +2997,9 @@ ifnet_start_thread_fn(void *v, wait_result_t w) ts = &delay_start_ts; } - if (ts != NULL && ts->tv_sec == 0 && ts->tv_nsec == 0) + if (ts != NULL && ts->tv_sec == 0 && ts->tv_nsec == 0) { ts = NULL; + } } /* NOTREACHED */ @@ -2953,14 +3008,16 @@ ifnet_start_thread_fn(void *v, wait_result_t w) void ifnet_set_start_cycle(struct ifnet *ifp, struct timespec *ts) { - if (ts == NULL) - bzero(&ifp->if_start_cycle, sizeof (ifp->if_start_cycle)); - else + if (ts == NULL) { + bzero(&ifp->if_start_cycle, sizeof(ifp->if_start_cycle)); + } else { *(&ifp->if_start_cycle) = *ts; + } - if (ts != NULL && ts->tv_nsec != 0 && dlil_verbose) + if (ts != NULL && ts->tv_nsec != 0 && dlil_verbose) { printf("%s: restart interval set to %lu nsec\n", if_name(ifp), ts->tv_nsec); + } } static void @@ -2987,9 +3044,9 @@ ifnet_poll_thread_fn(void *v, wait_result_t w) struct timespec *ts = NULL; struct ifnet_stat_increment_param s; - snprintf(ifname, sizeof (ifname), "%s_poller", + snprintf(ifname, sizeof(ifname), "%s_poller", if_name(ifp)); - bzero(&s, sizeof (s)); + bzero(&s, sizeof(s)); lck_mtx_lock_spin(&ifp->if_poll_lock); @@ -3053,7 +3110,7 @@ ifnet_poll_thread_fn(void *v, wait_result_t w) /* invoke the driver's input poll routine */ ((*ifp->if_input_poll)(ifp, 0, m_lim, &m_head, &m_tail, - &m_cnt, &m_totlen)); + &m_cnt, &m_totlen)); if (m_head != NULL) { VERIFY(m_tail != NULL && m_cnt > 0); @@ -3105,8 +3162,9 @@ ifnet_poll_thread_fn(void *v, wait_result_t w) * until ifnet_poll() is called again. */ ts = &ifp->if_poll_cycle; - if (ts->tv_sec == 0 && ts->tv_nsec == 0) + if (ts->tv_sec == 0 && ts->tv_nsec == 0) { ts = NULL; + } } /* NOTREACHED */ @@ -3115,21 +3173,24 @@ ifnet_poll_thread_fn(void *v, wait_result_t w) void ifnet_set_poll_cycle(struct ifnet *ifp, struct timespec *ts) { - if (ts == NULL) - bzero(&ifp->if_poll_cycle, sizeof (ifp->if_poll_cycle)); - else + if (ts == NULL) { + bzero(&ifp->if_poll_cycle, sizeof(ifp->if_poll_cycle)); + } else { *(&ifp->if_poll_cycle) = *ts; + } - if (ts != NULL && ts->tv_nsec != 0 && dlil_verbose) + if (ts != NULL && ts->tv_nsec != 0 && dlil_verbose) { printf("%s: poll interval set to %lu nsec\n", if_name(ifp), ts->tv_nsec); + } } void ifnet_purge(struct ifnet *ifp) { - if (ifp != NULL && (ifp->if_eflags & IFEF_TXSTART)) + if (ifp != NULL && (ifp->if_eflags & IFEF_TXSTART)) { if_qflush(ifp, 0); + } } void @@ -3137,12 +3198,13 @@ ifnet_update_sndq(struct ifclassq *ifq, cqev_t ev) { IFCQ_LOCK_ASSERT_HELD(ifq); - if (!(IFCQ_IS_READY(ifq))) + if (!(IFCQ_IS_READY(ifq))) { return; + } if (IFCQ_TBR_IS_ENABLED(ifq)) { struct tb_profile tb = { ifq->ifcq_tbr.tbr_rate_raw, - ifq->ifcq_tbr.tbr_percent, 0 }; + ifq->ifcq_tbr.tbr_percent, 0 }; (void) ifclassq_tbr_set(ifq, &tb, FALSE); } @@ -3154,8 +3216,9 @@ ifnet_update_rcv(struct ifnet *ifp, cqev_t ev) { switch (ev) { case CLASSQ_EV_LINK_BANDWIDTH: - if (net_rxpoll && (ifp->if_eflags & IFEF_RXPOLL)) + if (net_rxpoll && (ifp->if_eflags & IFEF_RXPOLL)) { ifp->if_poll_update++; + } break; default: @@ -3170,46 +3233,50 @@ ifnet_set_output_sched_model(struct ifnet *ifp, u_int32_t model) u_int32_t omodel; errno_t err; - if (ifp == NULL || model >= IFNET_SCHED_MODEL_MAX) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART)) - return (ENXIO); + if (ifp == NULL || model >= IFNET_SCHED_MODEL_MAX) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART)) { + return ENXIO; + } ifq = &ifp->if_snd; IFCQ_LOCK(ifq); omodel = ifp->if_output_sched_model; ifp->if_output_sched_model = model; - if ((err = ifclassq_pktsched_setup(ifq)) != 0) + if ((err = ifclassq_pktsched_setup(ifq)) != 0) { ifp->if_output_sched_model = omodel; + } IFCQ_UNLOCK(ifq); - return (err); + return err; } errno_t ifnet_set_sndq_maxlen(struct ifnet *ifp, u_int32_t maxqlen) { - if (ifp == NULL) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART)) - return (ENXIO); + if (ifp == NULL) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART)) { + return ENXIO; + } ifclassq_set_maxlen(&ifp->if_snd, maxqlen); - return (0); + return 0; } errno_t ifnet_get_sndq_maxlen(struct ifnet *ifp, u_int32_t *maxqlen) { - if (ifp == NULL || maxqlen == NULL) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART)) - return (ENXIO); + if (ifp == NULL || maxqlen == NULL) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART)) { + return ENXIO; + } *maxqlen = ifclassq_get_maxlen(&ifp->if_snd); - return (0); + return 0; } errno_t @@ -3217,15 +3284,16 @@ ifnet_get_sndq_len(struct ifnet *ifp, u_int32_t *pkts) { errno_t err; - if (ifp == NULL || pkts == NULL) + if (ifp == NULL || pkts == NULL) { err = EINVAL; - else if (!(ifp->if_eflags & IFEF_TXSTART)) + } else if (!(ifp->if_eflags & IFEF_TXSTART)) { err = ENXIO; - else + } else { err = ifclassq_get_len(&ifp->if_snd, MBUF_SC_UNSPEC, pkts, NULL); + } - return (err); + return err; } errno_t @@ -3235,14 +3303,15 @@ ifnet_get_service_class_sndq_len(struct ifnet *ifp, mbuf_svc_class_t sc, errno_t err; if (ifp == NULL || !MBUF_VALID_SC(sc) || - (pkts == NULL && bytes == NULL)) + (pkts == NULL && bytes == NULL)) { err = EINVAL; - else if (!(ifp->if_eflags & IFEF_TXSTART)) + } else if (!(ifp->if_eflags & IFEF_TXSTART)) { err = ENXIO; - else + } else { err = ifclassq_get_len(&ifp->if_snd, sc, pkts, bytes); + } - return (err); + return err; } errno_t @@ -3250,22 +3319,24 @@ ifnet_set_rcvq_maxlen(struct ifnet *ifp, u_int32_t maxqlen) { struct dlil_threading_info *inp; - if (ifp == NULL) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_RXPOLL) || ifp->if_inp == NULL) - return (ENXIO); + if (ifp == NULL) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_RXPOLL) || ifp->if_inp == NULL) { + return ENXIO; + } - if (maxqlen == 0) + if (maxqlen == 0) { maxqlen = if_rcvq_maxlen; - else if (maxqlen < IF_RCVQ_MINLEN) + } else if (maxqlen < IF_RCVQ_MINLEN) { maxqlen = IF_RCVQ_MINLEN; + } inp = ifp->if_inp; lck_mtx_lock(&inp->input_lck); qlimit(&inp->rcvq_pkts) = maxqlen; lck_mtx_unlock(&inp->input_lck); - return (0); + return 0; } errno_t @@ -3273,16 +3344,17 @@ ifnet_get_rcvq_maxlen(struct ifnet *ifp, u_int32_t *maxqlen) { struct dlil_threading_info *inp; - if (ifp == NULL || maxqlen == NULL) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_RXPOLL) || ifp->if_inp == NULL) - return (ENXIO); + if (ifp == NULL || maxqlen == NULL) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_RXPOLL) || ifp->if_inp == NULL) { + return ENXIO; + } inp = ifp->if_inp; lck_mtx_lock(&inp->input_lck); *maxqlen = qlimit(&inp->rcvq_pkts); lck_mtx_unlock(&inp->input_lck); - return (0); + return 0; } void @@ -3343,13 +3415,15 @@ ifnet_enqueue_common(struct ifnet *ifp, void *p, classq_pkt_type_t ptype, m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) { if (!(m->m_pkthdr.pkt_flags & PKTF_SO_BACKGROUND)) { ifp->if_fg_sendts = _net_uptime; - if (fg_ts != NULL) + if (fg_ts != NULL) { *fg_ts = _net_uptime; + } } if (m->m_pkthdr.pkt_flags & PKTF_SO_REALTIME) { ifp->if_rt_sendts = _net_uptime; - if (rt_ts != NULL) + if (rt_ts != NULL) { *rt_ts = _net_uptime; + } } } break; @@ -3437,17 +3511,18 @@ ifnet_enqueue_common(struct ifnet *ifp, void *p, classq_pkt_type_t ptype, * be dequeueing from other unsuspended queues. */ if (!(ifp->if_eflags & IFEF_ENQUEUE_MULTI) && - ((error == 0 && flush) || error == EQFULL || error == EQSUSPENDED)) + ((error == 0 && flush) || error == EQFULL || error == EQSUSPENDED)) { ifnet_start(ifp); + } - return (error); + return error; } errno_t ifnet_enqueue(struct ifnet *ifp, struct mbuf *m) { boolean_t pdrop; - return (ifnet_enqueue_mbuf(ifp, m, TRUE, &pdrop)); + return ifnet_enqueue_mbuf(ifp, m, TRUE, &pdrop); } errno_t @@ -3460,20 +3535,20 @@ ifnet_enqueue_mbuf(struct ifnet *ifp, struct mbuf *m, boolean_t flush, m_freem_list(m); *pdrop = TRUE; } - return (EINVAL); + return EINVAL; } else if (!(ifp->if_eflags & IFEF_TXSTART) || !IF_FULLY_ATTACHED(ifp)) { /* flag tested without lock for performance */ m_freem(m); *pdrop = TRUE; - return (ENXIO); + return ENXIO; } else if (!(ifp->if_flags & IFF_UP)) { m_freem(m); *pdrop = TRUE; - return (ENETDOWN); + return ENETDOWN; } - return (ifnet_enqueue_common(ifp, m, QP_MBUF, flush, pdrop)); + return ifnet_enqueue_common(ifp, m, QP_MBUF, flush, pdrop); } @@ -3482,20 +3557,22 @@ ifnet_dequeue(struct ifnet *ifp, struct mbuf **mp) { errno_t rc; classq_pkt_type_t ptype; - if (ifp == NULL || mp == NULL) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART) || - ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) - return (ENXIO); - if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL || mp == NULL) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART) || + ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) { + return ENXIO; + } + if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } rc = ifclassq_dequeue(&ifp->if_snd, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, (void **)mp, NULL, NULL, NULL, &ptype); VERIFY((*mp == NULL) || (ptype == QP_MBUF)); ifnet_decr_iorefcnt(ifp); - return (rc); + return rc; } errno_t @@ -3504,20 +3581,22 @@ ifnet_dequeue_service_class(struct ifnet *ifp, mbuf_svc_class_t sc, { errno_t rc; classq_pkt_type_t ptype; - if (ifp == NULL || mp == NULL || !MBUF_VALID_SC(sc)) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART) || - ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) - return (ENXIO); - if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL || mp == NULL || !MBUF_VALID_SC(sc)) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART) || + ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) { + return ENXIO; + } + if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } rc = ifclassq_dequeue_sc(&ifp->if_snd, sc, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, (void **)mp, NULL, NULL, - NULL, &ptype); + NULL, &ptype); VERIFY((*mp == NULL) || (ptype == QP_MBUF)); ifnet_decr_iorefcnt(ifp); - return (rc); + return rc; } errno_t @@ -3526,20 +3605,22 @@ ifnet_dequeue_multi(struct ifnet *ifp, u_int32_t pkt_limit, { errno_t rc; classq_pkt_type_t ptype; - if (ifp == NULL || head == NULL || pkt_limit < 1) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART) || - ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) - return (ENXIO); - if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL || head == NULL || pkt_limit < 1) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART) || + ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) { + return ENXIO; + } + if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } rc = ifclassq_dequeue(&ifp->if_snd, pkt_limit, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, (void **)head, (void **)tail, cnt, len, &ptype); VERIFY((*head == NULL) || (ptype == QP_MBUF)); ifnet_decr_iorefcnt(ifp); - return (rc); + return rc; } errno_t @@ -3548,19 +3629,21 @@ ifnet_dequeue_multi_bytes(struct ifnet *ifp, u_int32_t byte_limit, { errno_t rc; classq_pkt_type_t ptype; - if (ifp == NULL || head == NULL || byte_limit < 1) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART) || - ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) - return (ENXIO); - if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL || head == NULL || byte_limit < 1) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART) || + ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) { + return ENXIO; + } + if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } rc = ifclassq_dequeue(&ifp->if_snd, CLASSQ_DEQUEUE_MAX_PKT_LIMIT, byte_limit, (void **)head, (void **)tail, cnt, len, &ptype); VERIFY((*head == NULL) || (ptype == QP_MBUF)); ifnet_decr_iorefcnt(ifp); - return (rc); + return rc; } errno_t @@ -3571,20 +3654,22 @@ ifnet_dequeue_service_class_multi(struct ifnet *ifp, mbuf_svc_class_t sc, errno_t rc; classq_pkt_type_t ptype; if (ifp == NULL || head == NULL || pkt_limit < 1 || - !MBUF_VALID_SC(sc)) - return (EINVAL); - else if (!(ifp->if_eflags & IFEF_TXSTART) || - ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) - return (ENXIO); - if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + !MBUF_VALID_SC(sc)) { + return EINVAL; + } else if (!(ifp->if_eflags & IFEF_TXSTART) || + ifp->if_output_sched_model >= IFNET_SCHED_MODEL_MAX) { + return ENXIO; + } + if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } rc = ifclassq_dequeue_sc(&ifp->if_snd, sc, pkt_limit, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, (void **)head, (void **)tail, cnt, len, &ptype); VERIFY((*head == NULL) || (ptype == QP_MBUF)); ifnet_decr_iorefcnt(ifp); - return (rc); + return rc; } #if !CONFIG_EMBEDDED @@ -3593,12 +3678,14 @@ ifnet_framer_stub(struct ifnet *ifp, struct mbuf **m, const struct sockaddr *dest, const char *dest_linkaddr, const char *frame_type, u_int32_t *pre, u_int32_t *post) { - if (pre != NULL) + if (pre != NULL) { *pre = 0; - if (post != NULL) + } + if (post != NULL) { *post = 0; + } - return (ifp->if_framer_legacy(ifp, m, dest, dest_linkaddr, frame_type)); + return ifp->if_framer_legacy(ifp, m, dest, dest_linkaddr, frame_type); } #endif /* !CONFIG_EMBEDDED */ @@ -3630,7 +3717,7 @@ dlil_interface_filters_input(struct ifnet *ifp, struct mbuf **m_p, /* we're done with the filter list */ if_flt_monitor_unbusy(ifp); lck_mtx_unlock(&ifp->if_flt_lock); - return (result); + return result; } } } @@ -3642,10 +3729,11 @@ dlil_interface_filters_input(struct ifnet *ifp, struct mbuf **m_p, * Strip away M_PROTO1 bit prior to sending packet up the stack as * it is meant to be local to a subsystem -- if_bridge for M_PROTO1 */ - if (*m_p != NULL) + if (*m_p != NULL) { (*m_p)->m_flags &= ~M_PROTO1; + } - return (0); + return 0; } static int @@ -3676,7 +3764,7 @@ dlil_interface_filters_output(struct ifnet *ifp, struct mbuf **m_p, /* we're done with the filter list */ if_flt_monitor_unbusy(ifp); lck_mtx_unlock(&ifp->if_flt_lock); - return (result); + return result; } } } @@ -3684,7 +3772,7 @@ dlil_interface_filters_output(struct ifnet *ifp, struct mbuf **m_p, if_flt_monitor_unbusy(ifp); lck_mtx_unlock(&ifp->if_flt_lock); - return (0); + return 0; } static void @@ -3695,8 +3783,8 @@ dlil_ifproto_input(struct if_proto * ifproto, mbuf_t m) if (ifproto->proto_kpi == kProtoKPI_v1) { /* Version 1 protocols get one packet at a time */ while (m != NULL) { - char * frame_header; - mbuf_t next_packet; + char * frame_header; + mbuf_t next_packet; next_packet = m->m_nextpkt; m->m_nextpkt = NULL; @@ -3704,16 +3792,18 @@ dlil_ifproto_input(struct if_proto * ifproto, mbuf_t m) m->m_pkthdr.pkt_hdr = NULL; error = (*ifproto->kpi.v1.input)(ifproto->ifp, ifproto->protocol_family, m, frame_header); - if (error != 0 && error != EJUSTRETURN) + if (error != 0 && error != EJUSTRETURN) { m_freem(m); + } m = next_packet; } } else if (ifproto->proto_kpi == kProtoKPI_v2) { /* Version 2 protocols support packet lists */ error = (*ifproto->kpi.v2.input)(ifproto->ifp, ifproto->protocol_family, m); - if (error != 0 && error != EJUSTRETURN) + if (error != 0 && error != EJUSTRETURN) { m_freem_list(m); + } } } @@ -3723,27 +3813,36 @@ dlil_input_stats_add(const struct ifnet_stat_increment_param *s, { struct ifnet_stat_increment_param *d = &inp->stats; - if (s->packets_in != 0) + if (s->packets_in != 0) { d->packets_in += s->packets_in; - if (s->bytes_in != 0) + } + if (s->bytes_in != 0) { d->bytes_in += s->bytes_in; - if (s->errors_in != 0) + } + if (s->errors_in != 0) { d->errors_in += s->errors_in; + } - if (s->packets_out != 0) + if (s->packets_out != 0) { d->packets_out += s->packets_out; - if (s->bytes_out != 0) + } + if (s->bytes_out != 0) { d->bytes_out += s->bytes_out; - if (s->errors_out != 0) + } + if (s->errors_out != 0) { d->errors_out += s->errors_out; + } - if (s->collisions != 0) + if (s->collisions != 0) { d->collisions += s->collisions; - if (s->dropped != 0) + } + if (s->dropped != 0) { d->dropped += s->dropped; + } - if (poll) + if (poll) { PKTCNTR_ADD(&inp->tstats, s->packets_in, s->bytes_in); + } } static void @@ -3812,15 +3911,15 @@ dlil_input_stats_sync(struct ifnet *ifp, struct dlil_threading_info *inp) __private_extern__ void dlil_input_packet_list(struct ifnet *ifp, struct mbuf *m) { - return (dlil_input_packet_list_common(ifp, m, 0, - IFNET_MODEL_INPUT_POLL_OFF, FALSE)); + return dlil_input_packet_list_common(ifp, m, 0, + IFNET_MODEL_INPUT_POLL_OFF, FALSE); } __private_extern__ void dlil_input_packet_list_extended(struct ifnet *ifp, struct mbuf *m, u_int32_t cnt, ifnet_model_t mode) { - return (dlil_input_packet_list_common(ifp, m, cnt, mode, TRUE)); + return dlil_input_packet_list_common(ifp, m, cnt, mode, TRUE); } static void @@ -3830,9 +3929,9 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, int error = 0; protocol_family_t protocol_family; mbuf_t next_packet; - ifnet_t ifp = ifp_param; + ifnet_t ifp = ifp_param; char *frame_header = NULL; - struct if_proto *last_ifproto = NULL; + struct if_proto *last_ifproto = NULL; mbuf_t pkt_first = NULL; mbuf_t *pkt_next = NULL; u_int32_t poll_thresh = 0, poll_ival = 0; @@ -3840,20 +3939,23 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); if (ext && mode == IFNET_MODEL_INPUT_POLL_ON && cnt > 1 && - (poll_ival = if_rxpoll_interval_pkts) > 0) + (poll_ival = if_rxpoll_interval_pkts) > 0) { poll_thresh = cnt; + } while (m != NULL) { struct if_proto *ifproto = NULL; int iorefcnt = 0; - uint32_t pktf_mask; /* pkt flags to preserve */ + uint32_t pktf_mask; /* pkt flags to preserve */ - if (ifp_param == NULL) + if (ifp_param == NULL) { ifp = m->m_pkthdr.rcvif; + } if ((ifp->if_eflags & IFEF_RXPOLL) && poll_thresh != 0 && - poll_ival > 0 && (--poll_thresh % poll_ival) == 0) + poll_ival > 0 && (--poll_thresh % poll_ival) == 0) { ifnet_poll(ifp); + } /* Check if this mbuf looks valid */ MBUF_INPUT_CHECK(m, ifp); @@ -3884,7 +3986,7 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, * info to allow for connectivity between loopback * and local interface addresses. */ - pktf_mask = (PKTF_LOOP|PKTF_IFAINFO); + pktf_mask = (PKTF_LOOP | PKTF_IFAINFO); } /* make sure packet comes in clean */ @@ -3898,8 +4000,9 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, &protocol_family); ifnet_lock_done(ifp); if (error != 0) { - if (error == EJUSTRETURN) + if (error == EJUSTRETURN) { goto next; + } protocol_family = 0; } @@ -3922,8 +4025,9 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, if (ifp->if_type == IFT_ETHER) { ehp = (struct ether_header *)(void *)frame_header; /* Skip RX Ethernet packets if they are not IPV6 */ - if (ntohs(ehp->ether_type) != ETHERTYPE_IPV6) + if (ntohs(ehp->ether_type) != ETHERTYPE_IPV6) { goto skip_clat; + } /* Keep a copy of frame_header for Ethernet packets */ bcopy(frame_header, (caddr_t)&eh, ETHER_HDR_LEN); @@ -3936,8 +4040,9 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, goto next; } /* Native v6 should be No-op */ - if (protocol_family != PF_INET) + if (protocol_family != PF_INET) { goto skip_clat; + } /* Do this only for translated v4 packets. */ switch (ifp->if_type) { @@ -3967,9 +4072,10 @@ dlil_input_packet_list_common(struct ifnet *ifp_param, struct mbuf *m, } skip_clat: if (hwcksum_dbg != 0 && !(ifp->if_flags & IFF_LOOPBACK) && - !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) + !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) { dlil_input_cksum_dbg(ifp, m, frame_header, protocol_family); + } /* * For partial checksum offload, we expect the driver to @@ -3999,19 +4105,22 @@ skip_clat: } } - if (clat_debug) + if (clat_debug) { pktap_input(ifp, protocol_family, m, frame_header); + } - if (m->m_flags & (M_BCAST|M_MCAST)) + if (m->m_flags & (M_BCAST | M_MCAST)) { atomic_add_64(&ifp->if_imcasts, 1); + } /* run interface filters, exclude VLAN packets PR-3586856 */ if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0) { error = dlil_interface_filters_input(ifp, &m, &frame_header, protocol_family); if (error != 0) { - if (error != EJUSTRETURN) + if (error != EJUSTRETURN) { m_freem(m); + } goto next; } } @@ -4032,7 +4141,7 @@ skip_clat: VERIFY(ifproto == NULL); ifnet_lock_shared(ifp); /* callee holds a proto refcnt upon success */ - ifproto = find_attached_proto(ifp, protocol_family); + ifproto = find_attached_proto(ifp, protocol_family); ifnet_lock_done(ifp); } if (ifproto == NULL) { @@ -4074,10 +4183,12 @@ next: m = next_packet; /* update the driver's multicast filter, if needed */ - if (ifp->if_updatemcasts > 0 && if_mcasts_update(ifp) == 0) + if (ifp->if_updatemcasts > 0 && if_mcasts_update(ifp) == 0) { ifp->if_updatemcasts = 0; - if (iorefcnt == 1) + } + if (iorefcnt == 1) { ifnet_decr_iorefcnt(ifp); + } } KERNEL_DEBUG(DBG_FNC_DLIL_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); @@ -4089,15 +4200,16 @@ if_mcasts_update(struct ifnet *ifp) errno_t err; err = ifnet_ioctl(ifp, 0, SIOCADDMULTI, NULL); - if (err == EAFNOSUPPORT) + if (err == EAFNOSUPPORT) { err = 0; + } printf("%s: %s %d suspended link-layer multicast membership(s) " "(err=%d)\n", if_name(ifp), (err == 0 ? "successfully restored" : "failed to restore"), ifp->if_updatemcasts, err); /* just return success */ - return (0); + return 0; } /* If ifp is set, we will increment the generation for the interface */ @@ -4112,7 +4224,7 @@ dlil_post_complete_msg(struct ifnet *ifp, struct kev_msg *event) necp_update_all_clients(); #endif /* NECP */ - return (kev_post_msg(event)); + return kev_post_msg(event); } __private_extern__ void @@ -4121,8 +4233,8 @@ dlil_post_sifflags_msg(struct ifnet * ifp) struct kev_msg ev_msg; struct net_event_data ev_data; - bzero(&ev_data, sizeof (ev_data)); - bzero(&ev_msg, sizeof (ev_msg)); + bzero(&ev_data, sizeof(ev_data)); + bzero(&ev_msg, sizeof(ev_msg)); ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; ev_msg.kev_subclass = KEV_DL_SUBCLASS; @@ -4136,7 +4248,7 @@ dlil_post_sifflags_msg(struct ifnet * ifp) dlil_post_complete_msg(ifp, &ev_msg); } -#define TMP_IF_PROTO_ARR_SIZE 10 +#define TMP_IF_PROTO_ARR_SIZE 10 static int dlil_event_internal(struct ifnet *ifp, struct kev_msg *event, bool update_generation) { @@ -4169,8 +4281,9 @@ dlil_event_internal(struct ifnet *ifp, struct kev_msg *event, bool update_genera lck_mtx_unlock(&ifp->if_flt_lock); /* Get an io ref count if the interface is attached */ - if (!ifnet_is_attached(ifp, 1)) + if (!ifnet_is_attached(ifp, 1)) { goto done; + } /* * An embedded tmp_list_entry in if_proto may still get @@ -4186,7 +4299,7 @@ dlil_event_internal(struct ifnet *ifp, struct kev_msg *event, bool update_genera tmp_ifproto_arr = tmp_ifproto_stack_arr; } else { MALLOC(tmp_ifproto_arr, struct if_proto **, - sizeof (*tmp_ifproto_arr) * if_proto_count, + sizeof(*tmp_ifproto_arr) * if_proto_count, M_TEMP, M_ZERO); if (tmp_ifproto_arr == NULL) { ifnet_lock_done(ifp); @@ -4229,13 +4342,14 @@ cleanup: } /* Pass the event to the interface */ - if (ifp->if_event != NULL) + if (ifp->if_event != NULL) { ifp->if_event(ifp, event); + } /* Release the io ref count */ ifnet_decr_iorefcnt(ifp); done: - return (dlil_post_complete_msg(update_generation ? ifp : NULL, event)); + return dlil_post_complete_msg(update_generation ? ifp : NULL, event); } errno_t @@ -4244,10 +4358,11 @@ ifnet_event(ifnet_t ifp, struct kern_event_msg *event) struct kev_msg kev_msg; int result = 0; - if (ifp == NULL || event == NULL) - return (EINVAL); + if (ifp == NULL || event == NULL) { + return EINVAL; + } - bzero(&kev_msg, sizeof (kev_msg)); + bzero(&kev_msg, sizeof(kev_msg)); kev_msg.vendor_code = event->vendor_code; kev_msg.kev_class = event->kev_class; kev_msg.kev_subclass = event->kev_subclass; @@ -4258,7 +4373,7 @@ ifnet_event(ifnet_t ifp, struct kern_event_msg *event) result = dlil_event_internal(ifp, &kev_msg, TRUE); - return (result); + return result; } #if CONFIG_MACF_NET @@ -4276,37 +4391,41 @@ dlil_get_socket_type(struct mbuf **mp, int family, int raw) switch (family) { case PF_INET: m = m_pullup(*mp, sizeof(struct ip)); - if (m == NULL) + if (m == NULL) { break; + } *mp = m; ip = mtod(m, struct ip *); - if (ip->ip_p == IPPROTO_TCP) + if (ip->ip_p == IPPROTO_TCP) { type = SOCK_STREAM; - else if (ip->ip_p == IPPROTO_UDP) + } else if (ip->ip_p == IPPROTO_UDP) { type = SOCK_DGRAM; + } break; case PF_INET6: m = m_pullup(*mp, sizeof(struct ip6_hdr)); - if (m == NULL) + if (m == NULL) { break; + } *mp = m; ip6 = mtod(m, struct ip6_hdr *); - if (ip6->ip6_nxt == IPPROTO_TCP) + if (ip6->ip6_nxt == IPPROTO_TCP) { type = SOCK_STREAM; - else if (ip6->ip6_nxt == IPPROTO_UDP) + } else if (ip6->ip6_nxt == IPPROTO_UDP) { type = SOCK_DGRAM; + } break; } } - return (type); + return type; } #endif static void dlil_count_chain_len(mbuf_t m, struct chain_len_stats *cls) { - mbuf_t n = m; + mbuf_t n = m; int chainlen = 0; while (n != NULL) { @@ -4314,24 +4433,24 @@ dlil_count_chain_len(mbuf_t m, struct chain_len_stats *cls) n = n->m_next; } switch (chainlen) { - case 0: - break; - case 1: - atomic_add_64(&cls->cls_one, 1); - break; - case 2: - atomic_add_64(&cls->cls_two, 1); - break; - case 3: - atomic_add_64(&cls->cls_three, 1); - break; - case 4: - atomic_add_64(&cls->cls_four, 1); - break; - case 5: - default: - atomic_add_64(&cls->cls_five_or_more, 1); - break; + case 0: + break; + case 1: + atomic_add_64(&cls->cls_one, 1); + break; + case 2: + atomic_add_64(&cls->cls_two, 1); + break; + case 3: + atomic_add_64(&cls->cls_three, 1); + break; + case 4: + atomic_add_64(&cls->cls_four, 1); + break; + case 5: + default: + atomic_add_64(&cls->cls_five_or_more, 1); + break; } } @@ -4361,10 +4480,10 @@ dlil_output(ifnet_t ifp, protocol_family_t proto_family, mbuf_t packetlist, int retval = 0; char frame_type_buffer[MAX_FRAME_TYPE_SIZE * 4]; char dst_linkaddr_buffer[MAX_LINKADDR * 4]; - struct if_proto *proto = NULL; - mbuf_t m = NULL; - mbuf_t send_head = NULL; - mbuf_t *send_tail = &send_head; + struct if_proto *proto = NULL; + mbuf_t m = NULL; + mbuf_t send_head = NULL; + mbuf_t *send_tail = &send_head; int iorefcnt = 0; u_int32_t pre = 0, post = 0; u_int32_t fpkts = 0, fbytes = 0; @@ -4390,8 +4509,9 @@ dlil_output(ifnet_t ifp, protocol_family_t proto_family, mbuf_t packetlist, VERIFY(ifp->if_output_dlil != NULL); /* update the driver's multicast filter, if needed */ - if (ifp->if_updatemcasts > 0 && if_mcasts_update(ifp) == 0) + if (ifp->if_updatemcasts > 0 && if_mcasts_update(ifp) == 0) { ifp->if_updatemcasts = 0; + } frame_type = frame_type_buffer; dst_linkaddr = dst_linkaddr_buffer; @@ -4409,8 +4529,9 @@ dlil_output(ifnet_t ifp, protocol_family_t proto_family, mbuf_t packetlist, } preout_again: - if (packetlist == NULL) + if (packetlist == NULL) { goto cleanup; + } m = packetlist; packetlist = packetlist->m_nextpkt; @@ -4423,7 +4544,7 @@ preout_again: */ if (proto_family == PF_INET && IS_INTF_CLAT46(ifp) && (ifp->if_type == IFT_CELLULAR || - dlil_is_clat_needed(proto_family, m))) { + dlil_is_clat_needed(proto_family, m))) { retval = dlil_clat46(ifp, &proto_family, &m); /* * Go to the next packet if translation fails @@ -4446,8 +4567,9 @@ preout_again: struct sockaddr_in6 dest6; did_clat46 = TRUE; - if (proto != NULL) + if (proto != NULL) { if_proto_free(proto); + } ifnet_lock_shared(ifp); /* callee holds a proto refcnt upon success */ proto = find_attached_proto(ifp, proto_family); @@ -4504,8 +4626,9 @@ preout_again: frame_type, dst_linkaddr); if (retval != 0) { - if (retval == EJUSTRETURN) + if (retval == EJUSTRETURN) { goto preout_again; + } m_freem(m); m = NULL; goto cleanup; @@ -4536,9 +4659,9 @@ preout_again: */ if (proto_family == PF_INET && IS_INTF_CLAT46(ifp) && (ifp->if_type == IFT_CELLULAR || - dlil_is_clat_needed(proto_family, m))) { + dlil_is_clat_needed(proto_family, m))) { retval = dlil_clat46(ifp, &proto_family, &m); - /* Goto the next packet if the translation fails */ + /* Goto the next packet if the translation fails */ if (retval != 0) { m_freem(m); m = NULL; @@ -4551,14 +4674,13 @@ preout_again: if (!raw && proto_family == PF_INET) { struct ip *ip = mtod(m, struct ip *); DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL, - struct ip *, ip, struct ifnet *, ifp, - struct ip *, ip, struct ip6_hdr *, NULL); - + struct ip *, ip, struct ifnet *, ifp, + struct ip *, ip, struct ip6_hdr *, NULL); } else if (!raw && proto_family == PF_INET6) { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL, - struct ip6_hdr *, ip6, struct ifnet *, ifp, - struct ip *, NULL, struct ip6_hdr *, ip6); + struct ip6_hdr *, ip6, struct ifnet *, ifp, + struct ip *, NULL, struct ip6_hdr *, ip6); } #endif /* CONFIG_DTRACE */ @@ -4582,8 +4704,9 @@ preout_again: retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type, &pre, &post); if (retval != 0) { - if (retval != EJUSTRETURN) + if (retval != EJUSTRETURN) { m_freem(m); + } goto next; } @@ -4598,9 +4721,10 @@ preout_again: m->m_pkthdr.csum_tx_start += pre; } - if (hwcksum_dbg != 0 && !(ifp->if_flags & IFF_LOOPBACK)) + if (hwcksum_dbg != 0 && !(ifp->if_flags & IFF_LOOPBACK)) { dlil_output_cksum_dbg(ifp, m, pre, proto_family); + } /* * Clear the ifp if it was set above, and to be @@ -4611,8 +4735,9 @@ preout_again: * are clearing the one that will go down to the * layer below. */ - if (rcvif_set && m->m_pkthdr.rcvif == ifp) + if (rcvif_set && m->m_pkthdr.rcvif == ifp) { m->m_pkthdr.rcvif = NULL; + } } /* @@ -4623,8 +4748,9 @@ preout_again: retval = dlil_interface_filters_output(ifp, &m, proto_family); if (retval != 0) { - if (retval != EJUSTRETURN) + if (retval != EJUSTRETURN) { m_freem(m); + } goto next; } } @@ -4643,8 +4769,9 @@ preout_again: * not cross page(s), the following is a no-op. */ if (!(ifp->if_hwassist & IFNET_MULTIPAGES)) { - if ((m = m_normalize(m)) == NULL) + if ((m = m_normalize(m)) == NULL) { goto next; + } } /* @@ -4684,8 +4811,8 @@ preout_again: * final checksum and we shouldn't recompute it. */ if ((m->m_pkthdr.pkt_flags & PKTF_FORWARDED) && - (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID|CSUM_PARTIAL)) == - (CSUM_DATA_VALID|CSUM_PARTIAL)) { + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) == + (CSUM_DATA_VALID | CSUM_PARTIAL)) { m->m_pkthdr.csum_flags &= ~CSUM_TX_FLAGS; m->m_pkthdr.csum_data = 0; } @@ -4739,8 +4866,9 @@ next: m->m_nextpkt = NULL; } /* Reset the proto family to old proto family for CLAT */ - if (did_clat46) + if (did_clat46) { proto_family = old_proto_family; + } } while (m != NULL); if (send_head != NULL) { @@ -4783,8 +4911,9 @@ next: } if (retval == 0) { enq_cnt++; - if (flen > 0) + if (flen > 0) { fpkts++; + } } if (retval != 0 && dlil_verbose) { printf("%s: output error on %s " @@ -4803,24 +4932,30 @@ next: KERNEL_DEBUG(DBG_FNC_DLIL_OUTPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); cleanup: - if (fbytes > 0) + if (fbytes > 0) { ifp->if_fbytes += fbytes; - if (fpkts > 0) + } + if (fpkts > 0) { ifp->if_fpackets += fpkts; - if (proto != NULL) + } + if (proto != NULL) { if_proto_free(proto); - if (packetlist) /* if any packets are left, clean up */ + } + if (packetlist) { /* if any packets are left, clean up */ mbuf_freem_list(packetlist); - if (retval == EJUSTRETURN) + } + if (retval == EJUSTRETURN) { retval = 0; - if (iorefcnt == 1) + } + if (iorefcnt == 1) { ifnet_decr_iorefcnt(ifp); + } if (rt != NULL) { rtfree(rt); rt = NULL; } - return (retval); + return retval; } /* @@ -4831,23 +4966,25 @@ static int dlil_is_clat_needed(protocol_family_t proto_family, mbuf_t m) { int ret = 0; - switch(proto_family) { + switch (proto_family) { case PF_INET: { struct ip *iph = mtod(m, struct ip *); - if (CLAT46_NEEDED(ntohl(iph->ip_dst.s_addr))) + if (CLAT46_NEEDED(ntohl(iph->ip_dst.s_addr))) { ret = 1; + } break; } case PF_INET6: { struct ip6_hdr *ip6h = mtod(m, struct ip6_hdr *); if ((size_t)m_pktlen(m) >= sizeof(struct ip6_hdr) && - CLAT64_NEEDED(&ip6h->ip6_dst)) + CLAT64_NEEDED(&ip6h->ip6_dst)) { ret = 1; + } break; } } - return (ret); + return ret; } /* * @brief This routine translates IPv4 packet to IPv6 packet, @@ -4908,12 +5045,14 @@ dlil_clat46(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) is_frag = TRUE; /* If the offset is not zero, it is not first frag */ - if (ip_frag_off != 0) + if (ip_frag_off != 0) { is_first_frag = FALSE; + } /* If IP_MF is set, then it is not last frag */ - if (ntohs(iph->ip_off) & IP_MF) + if (ntohs(iph->ip_off) & IP_MF) { is_last_frag = FALSE; + } } /* @@ -4942,7 +5081,7 @@ dlil_clat46(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) error = (nat464_translate_46(pbuf, off, iph->ip_tos, iph->ip_p, iph->ip_ttl, *src, dst, tot_len) == NT_NAT64) ? 0 : -1; - iph = NULL; /* Invalidate iph as pbuf has been modified */ + iph = NULL; /* Invalidate iph as pbuf has been modified */ if (error != 0) { ip6stat.ip6s_clat464_out_46transfail_drop++; @@ -4972,8 +5111,9 @@ dlil_clat46(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) } cleanup: - if (ia6_clat_src != NULL) + if (ia6_clat_src != NULL) { IFA_REMREF(&ia6_clat_src->ia_ifa); + } if (pbuf_is_valid(pbuf)) { *m = pbuf->pb_mbuf; @@ -4989,7 +5129,7 @@ cleanup: ip6stat.ip6s_clat464_out_success++; } - return (error); + return error; } /* @@ -5023,14 +5163,14 @@ dlil_clat64(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) ((size_t)(*m)->m_len < sizeof(struct ip6_hdr) && (*m = m_pullup(*m, sizeof(struct ip6_hdr))) == NULL)) { ip6stat.ip6s_clat464_in_tooshort_drop++; - return (-1); + return -1; } ip6h = mtod(*m, struct ip6_hdr *); /* Validate that mbuf contains IP payload equal to ip6_plen */ if ((size_t)(*m)->m_pkthdr.len < ntohs(ip6h->ip6_plen) + sizeof(struct ip6_hdr)) { ip6stat.ip6s_clat464_in_tooshort_drop++; - return (-1); + return -1; } osrc = ip6h->ip6_src; @@ -5042,8 +5182,9 @@ dlil_clat64(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) * may get set before IPv6 configuration has taken place. */ ia6_clat_dst = in6ifa_ifpwithflag(ifp, IN6_IFF_CLAT46); - if (ia6_clat_dst == NULL) + if (ia6_clat_dst == NULL) { goto done; + } /* * Check if the original dest in the packet is same as the reserved @@ -5110,8 +5251,9 @@ dlil_clat64(ifnet_t ifp, protocol_family_t *proto_family, mbuf_t *m) } cleanup: - if (ia4_clat_dst != NULL) + if (ia4_clat_dst != NULL) { IFA_REMREF(&ia4_clat_dst->ia_ifa); + } if (pbuf_is_valid(pbuf)) { *m = pbuf->pb_mbuf; @@ -5129,7 +5271,7 @@ cleanup: } /* CLAT traffic */ done: - return (error); + return error; } errno_t @@ -5140,12 +5282,14 @@ ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code, int retval = EOPNOTSUPP; int result = 0; - if (ifp == NULL || ioctl_code == 0) - return (EINVAL); + if (ifp == NULL || ioctl_code == 0) { + return EINVAL; + } /* Get an io ref count if the interface is attached */ - if (!ifnet_is_attached(ifp, 1)) - return (EOPNOTSUPP); + if (!ifnet_is_attached(ifp, 1)) { + return EOPNOTSUPP; + } /* * Run the interface filters first. @@ -5167,8 +5311,9 @@ ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code, /* Only update retval if no one has handled the ioctl */ if (retval == EOPNOTSUPP || result == EJUSTRETURN) { - if (result == ENOTSUP) + if (result == ENOTSUP) { result = EOPNOTSUPP; + } retval = result; if (retval != 0 && retval != EOPNOTSUPP) { /* we're done with the filter list */ @@ -5185,7 +5330,7 @@ ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code, /* Allow the protocol to handle the ioctl */ if (proto_fam != 0) { - struct if_proto *proto; + struct if_proto *proto; /* callee holds a proto refcnt upon success */ ifnet_lock_shared(ifp); @@ -5196,18 +5341,21 @@ ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code, (proto->proto_kpi == kProtoKPI_v1 ? proto->kpi.v1.ioctl : proto->kpi.v2.ioctl); result = EOPNOTSUPP; - if (ioctlp != NULL) + if (ioctlp != NULL) { result = ioctlp(ifp, proto_fam, ioctl_code, ioctl_arg); + } if_proto_free(proto); /* Only update retval if no one has handled the ioctl */ if (retval == EOPNOTSUPP || result == EJUSTRETURN) { - if (result == ENOTSUP) + if (result == ENOTSUP) { result = EOPNOTSUPP; + } retval = result; - if (retval && retval != EOPNOTSUPP) + if (retval && retval != EOPNOTSUPP) { goto cleanup; + } } } } @@ -5219,13 +5367,15 @@ ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code, * If it returns EOPNOTSUPP, ignore that, we may have * already handled this in the protocol or family. */ - if (ifp->if_ioctl) + if (ifp->if_ioctl) { result = (*ifp->if_ioctl)(ifp, ioctl_code, ioctl_arg); + } /* Only update retval if no one has handled the ioctl */ if (retval == EOPNOTSUPP || result == EJUSTRETURN) { - if (result == ENOTSUP) + if (result == ENOTSUP) { result = EOPNOTSUPP; + } retval = result; if (retval && retval != EOPNOTSUPP) { goto cleanup; @@ -5233,41 +5383,44 @@ ifnet_ioctl(ifnet_t ifp, protocol_family_t proto_fam, u_long ioctl_code, } cleanup: - if (retval == EJUSTRETURN) + if (retval == EJUSTRETURN) { retval = 0; + } ifnet_decr_iorefcnt(ifp); - return (retval); + return retval; } __private_extern__ errno_t dlil_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func callback) { - errno_t error = 0; + errno_t error = 0; if (ifp->if_set_bpf_tap) { /* Get an io reference on the interface if it is attached */ - if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } error = ifp->if_set_bpf_tap(ifp, mode, callback); ifnet_decr_iorefcnt(ifp); } - return (error); + return error; } errno_t dlil_resolve_multi(struct ifnet *ifp, const struct sockaddr *proto_addr, struct sockaddr *ll_addr, size_t ll_len) { - errno_t result = EOPNOTSUPP; + errno_t result = EOPNOTSUPP; struct if_proto *proto; const struct sockaddr *verify; proto_media_resolve_multi resolvep; - if (!ifnet_is_attached(ifp, 1)) - return (result); + if (!ifnet_is_attached(ifp, 1)) { + return result; + } bzero(ll_addr, ll_len); @@ -5278,23 +5431,25 @@ dlil_resolve_multi(struct ifnet *ifp, const struct sockaddr *proto_addr, if (proto != NULL) { resolvep = (proto->proto_kpi == kProtoKPI_v1 ? proto->kpi.v1.resolve_multi : proto->kpi.v2.resolve_multi); - if (resolvep != NULL) + if (resolvep != NULL) { result = resolvep(ifp, proto_addr, (struct sockaddr_dl *)(void *)ll_addr, ll_len); + } if_proto_free(proto); } /* Let the interface verify the multicast address */ if ((result == EOPNOTSUPP || result == 0) && ifp->if_check_multi) { - if (result == 0) + if (result == 0) { verify = ll_addr; - else + } else { verify = proto_addr; + } result = ifp->if_check_multi(ifp, verify); } ifnet_decr_iorefcnt(ifp); - return (result); + return result; } __private_extern__ errno_t @@ -5303,7 +5458,7 @@ dlil_send_arp_internal(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto) { struct if_proto *proto; - errno_t result = 0; + errno_t result = 0; /* callee holds a proto refcnt upon success */ ifnet_lock_shared(ifp); @@ -5312,7 +5467,7 @@ dlil_send_arp_internal(ifnet_t ifp, u_short arpop, if (proto == NULL) { result = ENOTSUP; } else { - proto_media_send_arp arpp; + proto_media_send_arp arpp; arpp = (proto->proto_kpi == kProtoKPI_v1 ? proto->kpi.v1.send_arp : proto->kpi.v2.send_arp); if (arpp == NULL) { @@ -5321,8 +5476,9 @@ dlil_send_arp_internal(ifnet_t ifp, u_short arpop, switch (arpop) { case ARPOP_REQUEST: arpstat.txrequests++; - if (target_hw != NULL) + if (target_hw != NULL) { arpstat.txurequests++; + } break; case ARPOP_REPLY: arpstat.txreplies++; @@ -5334,14 +5490,14 @@ dlil_send_arp_internal(ifnet_t ifp, u_short arpop, if_proto_free(proto); } - return (result); + return result; } struct net_thread_marks { }; static const struct net_thread_marks net_thread_marks_base = { }; __private_extern__ const net_thread_marks_t net_thread_marks_none = - &net_thread_marks_base; + &net_thread_marks_base; __private_extern__ net_thread_marks_t net_thread_marks_push(u_int32_t push) @@ -5353,11 +5509,12 @@ net_thread_marks_push(u_int32_t push) struct uthread *uth = get_bsdthread_info(current_thread()); pop = push & ~uth->uu_network_marks; - if (pop != 0) + if (pop != 0) { uth->uu_network_marks |= pop; + } } - return ((net_thread_marks_t)&base[pop]); + return (net_thread_marks_t)&base[pop]; } __private_extern__ net_thread_marks_t @@ -5370,11 +5527,12 @@ net_thread_unmarks_push(u_int32_t unpush) struct uthread *uth = get_bsdthread_info(current_thread()); unpop = unpush & uth->uu_network_marks; - if (unpop != 0) + if (unpop != 0) { uth->uu_network_marks &= ~unpop; + } } - return ((net_thread_marks_t)&base[unpop]); + return (net_thread_marks_t)&base[unpop]; } __private_extern__ void @@ -5414,10 +5572,10 @@ net_thread_is_marked(u_int32_t check) { if (check != 0) { struct uthread *uth = get_bsdthread_info(current_thread()); - return (uth->uu_network_marks & check); + return uth->uu_network_marks & check; + } else { + return 0; } - else - return (0); } __private_extern__ u_int32_t @@ -5425,10 +5583,10 @@ net_thread_is_unmarked(u_int32_t check) { if (check != 0) { struct uthread *uth = get_bsdthread_info(current_thread()); - return (~uth->uu_network_marks & check); + return ~uth->uu_network_marks & check; + } else { + return 0; } - else - return (0); } static __inline__ int @@ -5436,9 +5594,9 @@ _is_announcement(const struct sockaddr_in * sender_sin, const struct sockaddr_in * target_sin) { if (sender_sin == NULL) { - return (FALSE); + return FALSE; } - return (sender_sin->sin_addr.s_addr == target_sin->sin_addr.s_addr); + return sender_sin->sin_addr.s_addr == target_sin->sin_addr.s_addr; } __private_extern__ errno_t @@ -5446,15 +5604,16 @@ dlil_send_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, const struct sockaddr *sender_proto, const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto0, u_int32_t rtflags) { - errno_t result = 0; + errno_t result = 0; const struct sockaddr_in * sender_sin; const struct sockaddr_in * target_sin; struct sockaddr_inarp target_proto_sinarp; struct sockaddr *target_proto = (void *)(uintptr_t)target_proto0; if (target_proto == NULL || (sender_proto != NULL && - sender_proto->sa_family != target_proto->sa_family)) - return (EINVAL); + sender_proto->sa_family != target_proto->sa_family)) { + return EINVAL; + } /* * If the target is a (default) router, provide that @@ -5462,7 +5621,7 @@ dlil_send_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, */ if (rtflags & RTF_ROUTER) { bcopy(target_proto, &target_proto_sinarp, - sizeof (struct sockaddr_in)); + sizeof(struct sockaddr_in)); target_proto_sinarp.sin_other |= SIN_ROUTER; target_proto = (struct sockaddr *)&target_proto_sinarp; } @@ -5479,9 +5638,9 @@ dlil_send_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, IN_LINKLOCAL(ntohl(target_sin->sin_addr.s_addr)) && ipv4_ll_arp_aware != 0 && arpop == ARPOP_REQUEST && !_is_announcement(target_sin, sender_sin)) { - ifnet_t *ifp_list; - u_int32_t count; - u_int32_t ifp_on; + ifnet_t *ifp_list; + u_int32_t count; + u_int32_t ifp_on; result = ENOTSUP; @@ -5498,8 +5657,9 @@ dlil_send_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, * ARPing. This may mean that we don't ARP on * the interface the subnet route points to. */ - if (!(cur_ifp->if_eflags & IFEF_ARPLL)) + if (!(cur_ifp->if_eflags & IFEF_ARPLL)) { continue; + } /* Find the source IP address */ ifnet_lock_shared(cur_ifp); @@ -5548,7 +5708,7 @@ dlil_send_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, sender_proto, target_hw, target_proto); } - return (result); + return result; } /* @@ -5561,10 +5721,11 @@ ifnet_lookup(struct ifnet *ifp) LCK_RW_ASSERT(&ifnet_head_lock, LCK_RW_ASSERT_HELD); TAILQ_FOREACH(_ifp, &ifnet_head, if_link) { - if (_ifp == ifp) + if (_ifp == ifp) { break; + } } - return (_ifp != NULL); + return _ifp != NULL; } /* @@ -5579,12 +5740,13 @@ ifnet_is_attached(struct ifnet *ifp, int refio) lck_mtx_lock_spin(&ifp->if_ref_lock); if ((ret = IF_FULLY_ATTACHED(ifp))) { - if (refio > 0) + if (refio > 0) { ifp->if_refio++; + } } lck_mtx_unlock(&ifp->if_ref_lock); - return (ret); + return ret; } /* @@ -5614,8 +5776,9 @@ ifnet_decr_iorefcnt(struct ifnet *ifp) * if there are no more outstanding io references, wakeup the * ifnet_detach thread if detaching flag is set. */ - if (ifp->if_refio == 0 && (ifp->if_refflags & IFRF_DETACHING)) + if (ifp->if_refio == 0 && (ifp->if_refflags & IFRF_DETACHING)) { wakeup(&(ifp->if_refio)); + } lck_mtx_unlock(&ifp->if_ref_lock); } @@ -5650,8 +5813,9 @@ dlil_if_ref(struct ifnet *ifp) { struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp; - if (dl_if == NULL) - return (EINVAL); + if (dl_if == NULL) { + return EINVAL; + } lck_mtx_lock_spin(&dl_if->dl_if_lock); ++dl_if->dl_if_refcnt; @@ -5659,11 +5823,12 @@ dlil_if_ref(struct ifnet *ifp) panic("%s: wraparound refcnt for ifp=%p", __func__, ifp); /* NOTREACHED */ } - if (dl_if->dl_if_trace != NULL) + if (dl_if->dl_if_trace != NULL) { (*dl_if->dl_if_trace)(dl_if, TRUE); + } lck_mtx_unlock(&dl_if->dl_if_lock); - return (0); + return 0; } errno_t @@ -5672,8 +5837,9 @@ dlil_if_free(struct ifnet *ifp) struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp; bool need_release = FALSE; - if (dl_if == NULL) - return (EINVAL); + if (dl_if == NULL) { + return EINVAL; + } lck_mtx_lock_spin(&dl_if->dl_if_lock); switch (dl_if->dl_if_refcnt) { @@ -5690,13 +5856,14 @@ dlil_if_free(struct ifnet *ifp) break; } --dl_if->dl_if_refcnt; - if (dl_if->dl_if_trace != NULL) + if (dl_if->dl_if_trace != NULL) { (*dl_if->dl_if_trace)(dl_if, FALSE); + } lck_mtx_unlock(&dl_if->dl_if_lock); if (need_release) { dlil_if_release(ifp); } - return (0); + return 0; } static errno_t @@ -5717,7 +5884,7 @@ dlil_attach_protocol_internal(struct if_proto *proto, if (_proto != NULL) { ifnet_lock_done(ifp); if_proto_free(_proto); - return (EEXIST); + return EEXIST; } /* @@ -5728,20 +5895,22 @@ dlil_attach_protocol_internal(struct if_proto *proto, demux_count); if (retval) { ifnet_lock_done(ifp); - return (retval); + return retval; } /* * Insert the protocol in the hash */ prev_proto = SLIST_FIRST(&ifp->if_proto_hash[hash_value]); - while (prev_proto != NULL && SLIST_NEXT(prev_proto, next_hash) != NULL) + while (prev_proto != NULL && SLIST_NEXT(prev_proto, next_hash) != NULL) { prev_proto = SLIST_NEXT(prev_proto, next_hash); - if (prev_proto) + } + if (prev_proto) { SLIST_INSERT_AFTER(prev_proto, proto, next_hash); - else + } else { SLIST_INSERT_HEAD(&ifp->if_proto_hash[hash_value], proto, next_hash); + } /* hold a proto refcnt for attach */ if_proto_ref(proto); @@ -5757,11 +5926,11 @@ dlil_attach_protocol_internal(struct if_proto *proto, dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PROTO_ATTACHED, (struct net_event_data *)&ev_pr_data, - sizeof (struct kev_dl_proto_data)); + sizeof(struct kev_dl_proto_data)); if (proto_count != NULL) { *proto_count = ev_pr_data.proto_remaining_count; } - return (retval); + return retval; } errno_t @@ -5803,8 +5972,8 @@ ifnet_attach_protocol(ifnet_t ifp, protocol_family_t protocol, ifproto->kpi.v1.send_arp = proto_details->send_arp; retval = dlil_attach_protocol_internal(ifproto, - proto_details->demux_list, proto_details->demux_count, - &proto_count); + proto_details->demux_list, proto_details->demux_count, + &proto_count); end: if (retval != 0 && retval != EEXIST && ifp != NULL) { @@ -5813,8 +5982,8 @@ end: } else { if (dlil_verbose) { printf("%s: attached v1 protocol %d (count = %d)\n", - if_name(ifp), - protocol, proto_count); + if_name(ifp), + protocol, proto_count); } } ifnet_head_done(); @@ -5830,7 +5999,7 @@ end: } else if (ifproto != NULL) { zfree(dlif_proto_zone, ifproto); } - return (retval); + return retval; } errno_t @@ -5872,8 +6041,8 @@ ifnet_attach_protocol_v2(ifnet_t ifp, protocol_family_t protocol, ifproto->kpi.v2.send_arp = proto_details->send_arp; retval = dlil_attach_protocol_internal(ifproto, - proto_details->demux_list, proto_details->demux_count, - &proto_count); + proto_details->demux_list, proto_details->demux_count, + &proto_count); end: if (retval != 0 && retval != EEXIST && ifp != NULL) { @@ -5882,8 +6051,8 @@ end: } else { if (dlil_verbose) { printf("%s: attached v2 protocol %d (count = %d)\n", - if_name(ifp), - protocol, proto_count); + if_name(ifp), + protocol, proto_count); } } ifnet_head_done(); @@ -5899,14 +6068,14 @@ end: } else if (ifproto != NULL) { zfree(dlif_proto_zone, ifproto); } - return (retval); + return retval; } errno_t ifnet_detach_protocol(ifnet_t ifp, protocol_family_t proto_family) { struct if_proto *proto = NULL; - int retval = 0; + int retval = 0; if (ifp == NULL || proto_family == 0) { retval = EINVAL; @@ -5923,8 +6092,9 @@ ifnet_detach_protocol(ifnet_t ifp, protocol_family_t proto_family) } /* call family module del_proto */ - if (ifp->if_del_proto) + if (ifp->if_del_proto) { ifp->if_del_proto(ifp, proto->protocol_family); + } SLIST_REMOVE(&ifp->if_proto_hash[proto_hash_value(proto_family)], proto, if_proto, next_hash); @@ -5964,7 +6134,7 @@ ifnet_detach_protocol(ifnet_t ifp, protocol_family_t proto_family) if_proto_free(proto); end: - return (retval); + return retval; } @@ -5973,7 +6143,7 @@ ifproto_media_input_v1(struct ifnet *ifp, protocol_family_t protocol, struct mbuf *packet, char *header) { #pragma unused(ifp, protocol, packet, header) - return (ENXIO); + return ENXIO; } static errno_t @@ -5981,8 +6151,7 @@ ifproto_media_input_v2(struct ifnet *ifp, protocol_family_t protocol, struct mbuf *packet) { #pragma unused(ifp, protocol, packet) - return (ENXIO); - + return ENXIO; } static errno_t @@ -5991,8 +6160,7 @@ ifproto_media_preout(struct ifnet *ifp, protocol_family_t protocol, char *link_layer_dest) { #pragma unused(ifp, protocol, packet, dest, route, frame_type, link_layer_dest) - return (ENXIO); - + return ENXIO; } static void @@ -6007,7 +6175,7 @@ ifproto_media_ioctl(struct ifnet *ifp, protocol_family_t protocol, unsigned long command, void *argument) { #pragma unused(ifp, protocol, command, argument) - return (ENXIO); + return ENXIO; } static errno_t @@ -6015,7 +6183,7 @@ ifproto_media_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr, struct sockaddr_dl *out_ll, size_t ll_len) { #pragma unused(ifp, proto_addr, out_ll, ll_len) - return (ENXIO); + return ENXIO; } static errno_t @@ -6024,7 +6192,7 @@ ifproto_media_send_arp(struct ifnet *ifp, u_short arpop, const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto) { #pragma unused(ifp, arpop, sender_hw, sender_proto, target_hw, target_proto) - return (ENXIO); + return ENXIO; } extern int if_next_index(void); @@ -6041,8 +6209,9 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) u_int32_t sflags = 0; int err; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } /* * Serialize ifnet attach using dlil_ifnet_lock, in order to @@ -6057,7 +6226,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) if (tmp_if == ifp) { ifnet_head_done(); dlil_if_unlock(); - return (EEXIST); + return EEXIST; } } @@ -6082,7 +6251,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifnet_lock_done(ifp); ifnet_head_done(); dlil_if_unlock(); - return (EINVAL); + return EINVAL; } } @@ -6096,7 +6265,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifnet_lock_done(ifp); ifnet_head_done(); dlil_if_unlock(); - return (ENODEV); + return ENODEV; } /* Allocate protocol hash table */ @@ -6106,7 +6275,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifnet_lock_done(ifp); ifnet_head_done(); dlil_if_unlock(); - return (ENOBUFS); + return ENOBUFS; } bzero(ifp->if_proto_hash, dlif_phash_size); @@ -6134,7 +6303,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifnet_lock_done(ifp); ifnet_head_done(); dlil_if_unlock(); - return (ENOBUFS); + return ENOBUFS; } ifp->if_index = idx; } @@ -6147,7 +6316,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifnet_lock_done(ifp); ifnet_head_done(); dlil_if_unlock(); - return (ENOBUFS); + return ENOBUFS; } VERIFY(ifnet_addrs[ifp->if_index - 1] == NULL); @@ -6173,7 +6342,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* Clear stats (save and restore other fields that we care) */ if_data_saved = ifp->if_data; - bzero(&ifp->if_data, sizeof (ifp->if_data)); + bzero(&ifp->if_data, sizeof(ifp->if_data)); ifp->if_data.ifi_type = if_data_saved.ifi_type; ifp->if_data.ifi_typelen = if_data_saved.ifi_typelen; ifp->if_data.ifi_physical = if_data_saved.ifi_physical; @@ -6192,15 +6361,18 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* By default, use SFB and enable flow advisory */ sflags = PKTSCHEDF_QALG_SFB; - if (if_flowadv) + if (if_flowadv) { sflags |= PKTSCHEDF_QALG_FLOWCTL; + } - if (if_delaybased_queue) + if (if_delaybased_queue) { sflags |= PKTSCHEDF_QALG_DELAYBASED; + } if (ifp->if_output_sched_model == - IFNET_SCHED_MODEL_DRIVER_MANAGED) + IFNET_SCHED_MODEL_DRIVER_MANAGED) { sflags |= PKTSCHEDF_QALG_DRIVER_MANAGED; + } /* Initialize transmit queue(s) */ err = ifclassq_setup(ifp, sflags, (dl_if->dl_if_flags & DLIF_REUSE)); @@ -6212,7 +6384,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* Sanity checks on the input thread storage */ dl_inp = &dl_if->dl_if_inpstorage; - bzero(&dl_inp->stats, sizeof (dl_inp->stats)); + bzero(&dl_inp->stats, sizeof(dl_inp->stats)); VERIFY(dl_inp->input_waiting == 0); VERIFY(dl_inp->wtot == 0); VERIFY(dl_inp->ifp == NULL); @@ -6225,9 +6397,9 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) VERIFY(dl_inp->poll_thr == THREAD_NULL); VERIFY(dl_inp->tag == 0); VERIFY(dl_inp->mode == IFNET_MODEL_INPUT_POLL_OFF); - bzero(&dl_inp->tstats, sizeof (dl_inp->tstats)); - bzero(&dl_inp->pstats, sizeof (dl_inp->pstats)); - bzero(&dl_inp->sstats, sizeof (dl_inp->sstats)); + bzero(&dl_inp->tstats, sizeof(dl_inp->tstats)); + bzero(&dl_inp->pstats, sizeof(dl_inp->pstats)); + bzero(&dl_inp->sstats, sizeof(dl_inp->sstats)); #if IFNET_INPUT_SANITY_CHK VERIFY(dl_inp->input_mbuf_cnt == 0); #endif /* IFNET_INPUT_SANITY_CHK */ @@ -6252,7 +6424,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) if (ifp->if_inp != NULL && ifp->if_inp->input_mit_tcall == NULL) { ifp->if_inp->input_mit_tcall = thread_call_allocate_with_priority(dlil_mit_tcall_fn, - ifp, THREAD_CALL_PRIORITY_KERNEL); + ifp, THREAD_CALL_PRIORITY_KERNEL); } /* @@ -6275,10 +6447,10 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) panic_plain("%s: " "ifp=%p couldn't get a start thread; " "err=%d", __func__, ifp, err); - /* NOTREACHED */ + /* NOTREACHED */ } ml_thread_policy(ifp->if_start_thread, MACHINE_GROUP, - (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_WORKLOOP)); + (MACHINE_NETWORK_GROUP | MACHINE_NETWORK_WORKLOOP)); } else { ifp->if_flowhash = 0; } @@ -6304,7 +6476,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* NOTREACHED */ } ml_thread_policy(ifp->if_poll_thread, MACHINE_GROUP, - (MACHINE_NETWORK_GROUP|MACHINE_NETWORK_WORKLOOP)); + (MACHINE_NETWORK_GROUP | MACHINE_NETWORK_WORKLOOP)); } VERIFY(ifp->if_desc.ifd_maxlen == IF_DESCSIZE); @@ -6320,8 +6492,9 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { IFMA_LOCK(ifma); if (ifma->ifma_addr->sa_family == AF_LINK || - ifma->ifma_addr->sa_family == AF_UNSPEC) + ifma->ifma_addr->sa_family == AF_UNSPEC) { ifp->if_updatemcasts++; + } IFMA_UNLOCK(ifma); } @@ -6331,7 +6504,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) } /* Clear logging parameters */ - bzero(&ifp->if_log, sizeof (ifp->if_log)); + bzero(&ifp->if_log, sizeof(ifp->if_log)); /* Clear foreground/realtime activity timestamps */ ifp->if_fg_sendts = 0; @@ -6349,9 +6522,9 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* Reset interface state */ bzero(&ifp->if_interface_state, sizeof(ifp->if_interface_state)); ifp->if_interface_state.valid_bitmask |= - IF_INTERFACE_STATE_INTERFACE_AVAILABILITY_VALID; + IF_INTERFACE_STATE_INTERFACE_AVAILABILITY_VALID; ifp->if_interface_state.interface_availability = - IF_INTERFACE_STATE_INTERFACE_AVAILABLE; + IF_INTERFACE_STATE_INTERFACE_AVAILABLE; /* Initialize Link Quality Metric (loopback [lo0] is always good) */ if (ifp == lo_ifp) { @@ -6386,7 +6559,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifp->if_eflags |= IFEF_QOSMARKING_ENABLED; #if (DEVELOPMENT || DEBUG) printf("%s fastlane enabled on %s\n", - __func__, ifp->if_xname); + __func__, ifp->if_xname); #endif /* (DEVELOPMENT || DEBUG) */ } } @@ -6400,11 +6573,11 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) ifp->if_fwd_cacheok = 1; /* Clean up any existing cached routes */ ROUTE_RELEASE(&ifp->if_fwd_route); - bzero(&ifp->if_fwd_route, sizeof (ifp->if_fwd_route)); + bzero(&ifp->if_fwd_route, sizeof(ifp->if_fwd_route)); ROUTE_RELEASE(&ifp->if_src_route); - bzero(&ifp->if_src_route, sizeof (ifp->if_src_route)); + bzero(&ifp->if_src_route, sizeof(ifp->if_src_route)); ROUTE_RELEASE(&ifp->if_src_route6); - bzero(&ifp->if_src_route6, sizeof (ifp->if_src_route6)); + bzero(&ifp->if_src_route6, sizeof(ifp->if_src_route6)); lck_mtx_unlock(&ifp->if_cached_route_lock); ifnet_llreach_ifattach(ifp, (dl_if->dl_if_flags & DLIF_REUSE)); @@ -6446,7 +6619,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) lck_mtx_lock(rnh_lock); ifnet_lock_exclusive(ifp); lck_mtx_lock_spin(&ifp->if_ref_lock); - ifp->if_refflags = IFRF_ATTACHED; /* clears embryonic */ + ifp->if_refflags = IFRF_ATTACHED; /* clears embryonic */ lck_mtx_unlock(&ifp->if_ref_lock); if (net_rtref) { /* boot-args override; enable idle notification */ @@ -6456,7 +6629,6 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) /* apply previous request(s) to set the idle flags, if any */ (void) ifnet_set_idle_flags_locked(ifp, ifp->if_idle_new_flags, ifp->if_idle_new_flags_mask); - } ifnet_lock_done(ifp); lck_mtx_unlock(rnh_lock); @@ -6476,7 +6648,7 @@ ifnet_attach(ifnet_t ifp, const struct sockaddr_dl *ll_addr) (dl_if->dl_if_flags & DLIF_REUSE) ? " (recycled)" : ""); } - return (0); + return 0; } /* @@ -6494,21 +6666,22 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) { struct ifaddr *ifa, *oifa; struct sockaddr_dl *asdl, *msdl; - char workbuf[IFNAMSIZ*2]; + char workbuf[IFNAMSIZ * 2]; int namelen, masklen, socksize; struct dlil_ifnet *dl_if = (struct dlil_ifnet *)ifp; ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE); VERIFY(ll_addr == NULL || ll_addr->sdl_alen == ifp->if_addrlen); - namelen = snprintf(workbuf, sizeof (workbuf), "%s", + namelen = snprintf(workbuf, sizeof(workbuf), "%s", if_name(ifp)); masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + ((namelen > 0) ? namelen : 0); socksize = masklen + ifp->if_addrlen; -#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof (u_int32_t) - 1))) - if ((u_int32_t)socksize < sizeof (struct sockaddr_dl)) +#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof (u_int32_t) - 1))) + if ((u_int32_t)socksize < sizeof(struct sockaddr_dl)) { socksize = sizeof(struct sockaddr_dl); + } socksize = ROUNDUP(socksize); #undef ROUNDUP @@ -6523,10 +6696,11 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) * This same space will be used when if_addrlen shrinks. */ if (ifa == NULL || ifa == &dl_if->dl_if_lladdr.ifa) { - int ifasize = sizeof (*ifa) + 2 * SOCK_MAXADDRLEN; + int ifasize = sizeof(*ifa) + 2 * SOCK_MAXADDRLEN; ifa = _MALLOC(ifasize, M_IFADDR, M_WAITOK | M_ZERO); - if (ifa == NULL) - return (NULL); + if (ifa == NULL) { + return NULL; + } ifa_lock_init(ifa); /* Don't set IFD_ALLOC, as this is permanent */ ifa->ifa_debug = IFD_LINK; @@ -6553,9 +6727,9 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) IFA_LOCK(ifa); /* address and mask sockaddr_dl locations */ asdl = (struct sockaddr_dl *)(void *)&dl_if->dl_if_lladdr.asdl; - bzero(asdl, sizeof (dl_if->dl_if_lladdr.asdl)); + bzero(asdl, sizeof(dl_if->dl_if_lladdr.asdl)); msdl = (struct sockaddr_dl *)(void *)&dl_if->dl_if_lladdr.msdl; - bzero(msdl, sizeof (dl_if->dl_if_lladdr.msdl)); + bzero(msdl, sizeof(dl_if->dl_if_lladdr.msdl)); } /* hold a permanent reference for the ifnet itself */ @@ -6571,7 +6745,7 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) asdl->sdl_family = AF_LINK; if (namelen > 0) { bcopy(workbuf, asdl->sdl_data, min(namelen, - sizeof (asdl->sdl_data))); + sizeof(asdl->sdl_data))); asdl->sdl_nlen = namelen; } else { asdl->sdl_nlen = 0; @@ -6586,14 +6760,16 @@ dlil_alloc_lladdr(struct ifnet *ifp, const struct sockaddr_dl *ll_addr) } ifa->ifa_netmask = (struct sockaddr *)msdl; msdl->sdl_len = masklen; - while (namelen > 0) + while (namelen > 0) { msdl->sdl_data[--namelen] = 0xff; + } IFA_UNLOCK(ifa); - if (oifa != NULL) + if (oifa != NULL) { IFA_REMREF(oifa); + } - return (ifa); + return ifa; } static void @@ -6613,12 +6789,14 @@ ifnet_detach(ifnet_t ifp) struct ifnet *delegated_ifp; struct nd_ifinfo *ndi = NULL; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } ndi = ND_IFINFO(ifp); - if (NULL != ndi) + if (NULL != ndi) { ndi->cga_initialized = FALSE; + } lck_mtx_lock(rnh_lock); ifnet_head_lock_exclusive(); @@ -6638,14 +6816,14 @@ ifnet_detach(ifnet_t ifp) ifnet_lock_done(ifp); ifnet_head_done(); lck_mtx_unlock(rnh_lock); - return (EINVAL); + return EINVAL; } else if (ifp->if_refflags & IFRF_DETACHING) { /* Interface has already been detached */ lck_mtx_unlock(&ifp->if_ref_lock); ifnet_lock_done(ifp); ifnet_head_done(); lck_mtx_unlock(rnh_lock); - return (ENXIO); + return ENXIO; } VERIFY(!(ifp->if_refflags & IFRF_EMBRYONIC)); /* Indicate this interface is being detached */ @@ -6678,7 +6856,7 @@ ifnet_detach(ifnet_t ifp) ifp->if_link.tqe_next = NULL; ifp->if_link.tqe_prev = NULL; if (ifp->if_ordered_link.tqe_next != NULL || - ifp->if_ordered_link.tqe_prev != NULL) { + ifp->if_ordered_link.tqe_prev != NULL) { ifnet_remove_from_ordered_list(ifp); } ifindex2ifnet[ifp->if_index] = NULL; @@ -6690,11 +6868,11 @@ ifnet_detach(ifnet_t ifp) ctrace_record(&((struct dlil_ifnet *)ifp)->dl_if_detach); /* Clear logging parameters */ - bzero(&ifp->if_log, sizeof (ifp->if_log)); + bzero(&ifp->if_log, sizeof(ifp->if_log)); /* Clear delegated interface info (reference released below) */ delegated_ifp = ifp->if_delegated.ifp; - bzero(&ifp->if_delegated, sizeof (ifp->if_delegated)); + bzero(&ifp->if_delegated, sizeof(ifp->if_delegated)); /* Reset interface state */ bzero(&ifp->if_interface_state, sizeof(ifp->if_interface_state)); @@ -6705,28 +6883,34 @@ ifnet_detach(ifnet_t ifp) /* Release reference held on the delegated interface */ - if (delegated_ifp != NULL) + if (delegated_ifp != NULL) { ifnet_release(delegated_ifp); + } /* Reset Link Quality Metric (unless loopback [lo0]) */ - if (ifp != lo_ifp) + if (ifp != lo_ifp) { if_lqm_update(ifp, IFNET_LQM_THRESH_OFF, 0); + } /* Reset TCP local statistics */ - if (ifp->if_tcp_stat != NULL) + if (ifp->if_tcp_stat != NULL) { bzero(ifp->if_tcp_stat, sizeof(*ifp->if_tcp_stat)); + } /* Reset UDP local statistics */ - if (ifp->if_udp_stat != NULL) + if (ifp->if_udp_stat != NULL) { bzero(ifp->if_udp_stat, sizeof(*ifp->if_udp_stat)); + } /* Reset ifnet IPv4 stats */ - if (ifp->if_ipv4_stat != NULL) + if (ifp->if_ipv4_stat != NULL) { bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat)); + } /* Reset ifnet IPv6 stats */ - if (ifp->if_ipv6_stat != NULL) + if (ifp->if_ipv6_stat != NULL) { bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat)); + } /* Release memory held for interface link status report */ if (ifp->if_link_status != NULL) { @@ -6777,7 +6961,7 @@ ifnet_detach(ifnet_t ifp) ifnet_detaching_enqueue(ifp); dlil_if_unlock(); - return (0); + return 0; } static void @@ -6807,7 +6991,7 @@ ifnet_detaching_dequeue(void) ifp->if_detaching_link.tqe_next = NULL; ifp->if_detaching_link.tqe_prev = NULL; } - return (ifp); + return ifp; } static int @@ -6878,7 +7062,7 @@ ifnet_detach_final(struct ifnet *ifp) printf("%s: Waiting for IO references on %s interface " "to be released\n", __func__, if_name(ifp)); (void) msleep(&(ifp->if_refio), &ifp->if_ref_lock, - (PZERO - 1), "ifnet_ioref_wait", NULL); + (PZERO - 1), "ifnet_ioref_wait", NULL); } lck_mtx_unlock(&ifp->if_ref_lock); @@ -6989,7 +7173,7 @@ ifnet_detach_final(struct ifnet *ifp) inp->wloop_thr = THREAD_NULL; ptp = inp->poll_thr; inp->poll_thr = THREAD_NULL; - tp = inp->input_thr; /* don't nullify now */ + tp = inp->input_thr; /* don't nullify now */ inp->tag = 0; inp->net_affinity = FALSE; lck_mtx_unlock(&inp->input_lck); @@ -7029,7 +7213,7 @@ ifnet_detach_final(struct ifnet *ifp) /* wait for the input thread to terminate */ lck_mtx_lock_spin(&inp->input_lck); while ((inp->input_waiting & DLIL_INPUT_TERMINATE_COMPLETE) - == 0) { + == 0) { (void) msleep(&inp->input_waiting, &inp->input_lck, (PZERO - 1) | PSPIN, inp->input_name, NULL); } @@ -7038,7 +7222,6 @@ ifnet_detach_final(struct ifnet *ifp) /* clean-up input thread state */ dlil_clean_threading_info(inp); - } /* The driver might unload, so point these to ourselves */ @@ -7103,11 +7286,11 @@ ifnet_detach_final(struct ifnet *ifp) lck_mtx_lock(&ifp->if_cached_route_lock); VERIFY(!ifp->if_fwd_cacheok); ROUTE_RELEASE(&ifp->if_fwd_route); - bzero(&ifp->if_fwd_route, sizeof (ifp->if_fwd_route)); + bzero(&ifp->if_fwd_route, sizeof(ifp->if_fwd_route)); ROUTE_RELEASE(&ifp->if_src_route); - bzero(&ifp->if_src_route, sizeof (ifp->if_src_route)); + bzero(&ifp->if_src_route, sizeof(ifp->if_src_route)); ROUTE_RELEASE(&ifp->if_src_route6); - bzero(&ifp->if_src_route6, sizeof (ifp->if_src_route6)); + bzero(&ifp->if_src_route6, sizeof(ifp->if_src_route6)); lck_mtx_unlock(&ifp->if_cached_route_lock); VERIFY(ifp->if_data_threshold == 0); @@ -7129,11 +7312,13 @@ ifnet_detach_final(struct ifnet *ifp) } ifp->if_refflags &= ~IFRF_DETACHING; lck_mtx_unlock(&ifp->if_ref_lock); - if (if_free != NULL) + if (if_free != NULL) { if_free(ifp); + } - if (dlil_verbose) + if (dlil_verbose) { printf("%s: detached\n", if_name(ifp)); + } /* Release reference held during ifnet attach */ ifnet_release(ifp); @@ -7144,7 +7329,7 @@ ifp_if_output(struct ifnet *ifp, struct mbuf *m) { #pragma unused(ifp) m_freem_list(m); - return (0); + return 0; } void @@ -7160,7 +7345,7 @@ ifp_if_input(struct ifnet *ifp, struct mbuf *m_head, { #pragma unused(ifp, m_tail, s, poll, tp) m_freem_list(m_head); - return (ENXIO); + return ENXIO; } static void @@ -7168,21 +7353,25 @@ ifp_if_input_poll(struct ifnet *ifp, u_int32_t flags, u_int32_t max_cnt, struct mbuf **m_head, struct mbuf **m_tail, u_int32_t *cnt, u_int32_t *len) { #pragma unused(ifp, flags, max_cnt) - if (m_head != NULL) + if (m_head != NULL) { *m_head = NULL; - if (m_tail != NULL) + } + if (m_tail != NULL) { *m_tail = NULL; - if (cnt != NULL) + } + if (cnt != NULL) { *cnt = 0; - if (len != NULL) + } + if (len != NULL) { *len = 0; + } } static errno_t ifp_if_ctl(struct ifnet *ifp, ifnet_ctl_cmd_t cmd, u_int32_t arglen, void *arg) { #pragma unused(ifp, cmd, arglen, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; } static errno_t @@ -7190,7 +7379,7 @@ ifp_if_demux(struct ifnet *ifp, struct mbuf *m, char *fh, protocol_family_t *pf) { #pragma unused(ifp, fh, pf) m_freem(m); - return (EJUSTRETURN); + return EJUSTRETURN; } static errno_t @@ -7198,21 +7387,21 @@ ifp_if_add_proto(struct ifnet *ifp, protocol_family_t pf, const struct ifnet_demux_desc *da, u_int32_t dc) { #pragma unused(ifp, pf, da, dc) - return (EINVAL); + return EINVAL; } static errno_t ifp_if_del_proto(struct ifnet *ifp, protocol_family_t pf) { #pragma unused(ifp, pf) - return (EINVAL); + return EINVAL; } static errno_t ifp_if_check_multi(struct ifnet *ifp, const struct sockaddr *sa) { #pragma unused(ifp, sa) - return (EOPNOTSUPP); + return EOPNOTSUPP; } #if CONFIG_EMBEDDED @@ -7228,9 +7417,9 @@ ifp_if_framer(struct ifnet *ifp, struct mbuf **m, { #pragma unused(ifp, m, sa, ll, t) #if CONFIG_EMBEDDED - return (ifp_if_framer_extended(ifp, m, sa, ll, t, pre, post)); + return ifp_if_framer_extended(ifp, m, sa, ll, t, pre, post); #else - return (ifp_if_framer_extended(ifp, m, sa, ll, t, NULL, NULL)); + return ifp_if_framer_extended(ifp, m, sa, ll, t, NULL, NULL); #endif /* !CONFIG_EMBEDDED */ } @@ -7243,19 +7432,21 @@ ifp_if_framer_extended(struct ifnet *ifp, struct mbuf **m, m_freem(*m); *m = NULL; - if (pre != NULL) + if (pre != NULL) { *pre = 0; - if (post != NULL) + } + if (post != NULL) { *post = 0; + } - return (EJUSTRETURN); + return EJUSTRETURN; } errno_t ifp_if_ioctl(struct ifnet *ifp, unsigned long cmd, void *arg) { #pragma unused(ifp, cmd, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; } static errno_t @@ -7263,7 +7454,7 @@ ifp_if_set_bpf_tap(struct ifnet *ifp, bpf_tap_mode tm, bpf_packet_func f) { #pragma unused(ifp, tm, f) /* XXX not sure what to do here */ - return (0); + return 0; } static void @@ -7278,7 +7469,8 @@ ifp_if_event(struct ifnet *ifp, const struct kev_msg *e) #pragma unused(ifp, e) } -int dlil_if_acquire(u_int32_t family, const void *uniqueid, +int +dlil_if_acquire(u_int32_t family, const void *uniqueid, size_t uniqueid_len, const char *ifxname, struct ifnet **ifp) { struct ifnet *ifp1 = NULL; @@ -7296,8 +7488,9 @@ int dlil_if_acquire(u_int32_t family, const void *uniqueid, TAILQ_FOREACH(dlifp1, &dlil_ifnet_head, dl_if_link) { ifp1 = (struct ifnet *)dlifp1; - if (ifp1->if_family != family) + if (ifp1->if_family != family) { continue; + } /* * If interface is in use, return EBUSY if either unique id @@ -7320,10 +7513,11 @@ int dlil_if_acquire(u_int32_t family, const void *uniqueid, ret = EBUSY; goto end; } else { - dlifp1->dl_if_flags |= (DLIF_INUSE|DLIF_REUSE); + dlifp1->dl_if_flags |= (DLIF_INUSE | DLIF_REUSE); /* Cache the first interface that can be recycled */ - if (*ifp == NULL) + if (*ifp == NULL) { *ifp = ifp1; + } /* * XXX Do not break or jump to end as we have to traverse * the whole list to ensure there are no name collisions @@ -7335,8 +7529,9 @@ int dlil_if_acquire(u_int32_t family, const void *uniqueid, } /* If there's an interface that can be recycled, use that */ - if (*ifp != NULL) + if (*ifp != NULL) { goto end; + } /* no interface found, allocate a new one */ buf = zalloc(dlif_zone); @@ -7347,15 +7542,15 @@ int dlil_if_acquire(u_int32_t family, const void *uniqueid, bzero(buf, dlif_bufsize); /* Get the 64-bit aligned base address for this object */ - base = (void *)P2ROUNDUP((intptr_t)buf + sizeof (u_int64_t), - sizeof (u_int64_t)); + base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t), + sizeof(u_int64_t)); VERIFY(((intptr_t)base + dlif_size) <= ((intptr_t)buf + dlif_bufsize)); /* * Wind back a pointer size from the aligned base and * save the original address so we can free it later. */ - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); *pbuf = buf; dlifp1 = base; @@ -7445,14 +7640,14 @@ int dlil_if_acquire(u_int32_t family, const void *uniqueid, end: dlil_if_unlock(); - VERIFY(dlifp1 == NULL || (IS_P2ALIGNED(dlifp1, sizeof (u_int64_t)) && - IS_P2ALIGNED(&ifp1->if_data, sizeof (u_int64_t)))); + VERIFY(dlifp1 == NULL || (IS_P2ALIGNED(dlifp1, sizeof(u_int64_t)) && + IS_P2ALIGNED(&ifp1->if_data, sizeof(u_int64_t)))); - return (ret); + return ret; } __private_extern__ void -dlil_if_release(ifnet_t ifp) +dlil_if_release(ifnet_t ifp) { struct dlil_ifnet *dlifp = (struct dlil_ifnet *)ifp; @@ -7525,7 +7720,7 @@ ifp_src_route_copyout(struct ifnet *ifp, struct route *dst) lck_mtx_lock_spin(&ifp->if_cached_route_lock); lck_mtx_convert_spin(&ifp->if_cached_route_lock); - route_copyout(dst, &ifp->if_src_route, sizeof (*dst)); + route_copyout(dst, &ifp->if_src_route, sizeof(*dst)); lck_mtx_unlock(&ifp->if_cached_route_lock); } @@ -7537,7 +7732,7 @@ ifp_src_route_copyin(struct ifnet *ifp, struct route *src) lck_mtx_convert_spin(&ifp->if_cached_route_lock); if (ifp->if_fwd_cacheok) { - route_copyin(src, &ifp->if_src_route, sizeof (*src)); + route_copyin(src, &ifp->if_src_route, sizeof(*src)); } else { ROUTE_RELEASE(src); } @@ -7552,7 +7747,7 @@ ifp_src_route6_copyout(struct ifnet *ifp, struct route_in6 *dst) lck_mtx_convert_spin(&ifp->if_cached_route_lock); route_copyout((struct route *)dst, (struct route *)&ifp->if_src_route6, - sizeof (*dst)); + sizeof(*dst)); lck_mtx_unlock(&ifp->if_cached_route_lock); } @@ -7565,7 +7760,7 @@ ifp_src_route6_copyin(struct ifnet *ifp, struct route_in6 *src) if (ifp->if_fwd_cacheok) { route_copyin((struct route *)src, - (struct route *)&ifp->if_src_route6, sizeof (*src)); + (struct route *)&ifp->if_src_route6, sizeof(*src)); } else { ROUTE_RELEASE(src); } @@ -7574,10 +7769,10 @@ ifp_src_route6_copyin(struct ifnet *ifp, struct route_in6 *src) #endif /* INET6 */ struct rtentry * -ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip) +ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip) { - struct route src_rt; - struct sockaddr_in *dst; + struct route src_rt; + struct sockaddr_in *dst; dst = (struct sockaddr_in *)(void *)(&src_rt.ro_dst); @@ -7586,8 +7781,8 @@ ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip) if (ROUTE_UNUSABLE(&src_rt) || src_ip.s_addr != dst->sin_addr.s_addr) { ROUTE_RELEASE(&src_rt); if (dst->sin_family != AF_INET) { - bzero(&src_rt.ro_dst, sizeof (src_rt.ro_dst)); - dst->sin_len = sizeof (src_rt.ro_dst); + bzero(&src_rt.ro_dst, sizeof(src_rt.ro_dst)); + dst->sin_len = sizeof(src_rt.ro_dst); dst->sin_family = AF_INET; } dst->sin_addr = src_ip; @@ -7598,14 +7793,14 @@ ifnet_cached_rtlookup_inet(struct ifnet *ifp, struct in_addr src_ip) if (src_rt.ro_rt != NULL) { /* retain a ref, copyin consumes one */ - struct rtentry *rte = src_rt.ro_rt; + struct rtentry *rte = src_rt.ro_rt; RT_ADDREF(rte); ifp_src_route_copyin(ifp, &src_rt); src_rt.ro_rt = rte; } } - return (src_rt.ro_rt); + return src_rt.ro_rt; } #if INET6 @@ -7620,22 +7815,22 @@ ifnet_cached_rtlookup_inet6(struct ifnet *ifp, struct in6_addr *src_ip6) !IN6_ARE_ADDR_EQUAL(src_ip6, &src_rt.ro_dst.sin6_addr)) { ROUTE_RELEASE(&src_rt); if (src_rt.ro_dst.sin6_family != AF_INET6) { - bzero(&src_rt.ro_dst, sizeof (src_rt.ro_dst)); - src_rt.ro_dst.sin6_len = sizeof (src_rt.ro_dst); + bzero(&src_rt.ro_dst, sizeof(src_rt.ro_dst)); + src_rt.ro_dst.sin6_len = sizeof(src_rt.ro_dst); src_rt.ro_dst.sin6_family = AF_INET6; } src_rt.ro_dst.sin6_scope_id = in6_addr2scopeid(ifp, src_ip6); bcopy(src_ip6, &src_rt.ro_dst.sin6_addr, - sizeof (src_rt.ro_dst.sin6_addr)); + sizeof(src_rt.ro_dst.sin6_addr)); if (src_rt.ro_rt == NULL) { src_rt.ro_rt = rtalloc1_scoped( - (struct sockaddr *)&src_rt.ro_dst, 0, 0, - ifp->if_index); + (struct sockaddr *)&src_rt.ro_dst, 0, 0, + ifp->if_index); if (src_rt.ro_rt != NULL) { /* retain a ref, copyin consumes one */ - struct rtentry *rte = src_rt.ro_rt; + struct rtentry *rte = src_rt.ro_rt; RT_ADDREF(rte); ifp_src_route6_copyin(ifp, &src_rt); src_rt.ro_rt = rte; @@ -7643,7 +7838,7 @@ ifnet_cached_rtlookup_inet6(struct ifnet *ifp, struct in6_addr *src_ip6) } } - return (src_rt.ro_rt); + return src_rt.ro_rt; } #endif /* INET6 */ @@ -7674,8 +7869,9 @@ if_lqm_update(struct ifnet *ifp, int lqm, int locked) /* * Take the lock if needed */ - if (!locked) + if (!locked) { ifnet_lock_exclusive(ifp); + } if (lqm == ifp->if_interface_state.lqm_state && (ifp->if_interface_state.valid_bitmask & @@ -7683,12 +7879,13 @@ if_lqm_update(struct ifnet *ifp, int lqm, int locked) /* * Release the lock if was not held by the caller */ - if (!locked) + if (!locked) { ifnet_lock_done(ifp); - return; /* nothing to update */ + } + return; /* nothing to update */ } ifp->if_interface_state.valid_bitmask |= - IF_INTERFACE_STATE_LQM_STATE_VALID; + IF_INTERFACE_STATE_LQM_STATE_VALID; ifp->if_interface_state.lqm_state = lqm; /* @@ -7696,17 +7893,18 @@ if_lqm_update(struct ifnet *ifp, int lqm, int locked) */ ifnet_lock_done(ifp); - bzero(&ev_lqm_data, sizeof (ev_lqm_data)); + bzero(&ev_lqm_data, sizeof(ev_lqm_data)); ev_lqm_data.link_quality_metric = lqm; dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_LINK_QUALITY_METRIC_CHANGED, - (struct net_event_data *)&ev_lqm_data, sizeof (ev_lqm_data)); + (struct net_event_data *)&ev_lqm_data, sizeof(ev_lqm_data)); /* * Reacquire the lock for the caller */ - if (locked) + if (locked) { ifnet_lock_exclusive(ifp); + } } static void @@ -7716,8 +7914,9 @@ if_rrc_state_update(struct ifnet *ifp, unsigned int rrc_state) if (rrc_state == ifp->if_interface_state.rrc_state && (ifp->if_interface_state.valid_bitmask & - IF_INTERFACE_STATE_RRC_STATE_VALID)) + IF_INTERFACE_STATE_RRC_STATE_VALID)) { return; + } ifp->if_interface_state.valid_bitmask |= IF_INTERFACE_STATE_RRC_STATE_VALID; @@ -7750,14 +7949,14 @@ if_state_update(struct ifnet *ifp, (if_interface_state->valid_bitmask & IF_INTERFACE_STATE_RRC_STATE_VALID)) { ifnet_lock_done(ifp); - return (ENOTSUP); + return ENOTSUP; } if ((if_interface_state->valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && (if_interface_state->lqm_state < IFNET_LQM_MIN || if_interface_state->lqm_state > IFNET_LQM_MAX)) { ifnet_lock_done(ifp); - return (EINVAL); + return EINVAL; } if ((if_interface_state->valid_bitmask & IF_INTERFACE_STATE_RRC_STATE_VALID) && @@ -7766,7 +7965,7 @@ if_state_update(struct ifnet *ifp, if_interface_state->rrc_state != IF_INTERFACE_STATE_RRC_STATE_CONNECTED) { ifnet_lock_done(ifp); - return (EINVAL); + return EINVAL; } if (if_interface_state->valid_bitmask & @@ -7797,10 +7996,11 @@ if_state_update(struct ifnet *ifp, * to fire. This will be done when there is an explicit * notification that the interface became available. */ - if (if_index_available > 0) + if (if_index_available > 0) { tcp_interface_send_probe(if_index_available); + } - return (0); + return 0; } void @@ -7842,12 +8042,13 @@ if_probe_connectivity(struct ifnet *ifp, u_int32_t conn_probe) ifnet_lock_exclusive(ifp); if (conn_probe > 1) { ifnet_lock_done(ifp); - return (EINVAL); + return EINVAL; } - if (conn_probe == 0) + if (conn_probe == 0) { ifp->if_eflags &= ~IFEF_PROBE_CONNECTIVITY; - else + } else { ifp->if_eflags |= IFEF_PROBE_CONNECTIVITY; + } ifnet_lock_done(ifp); #if NECP @@ -7855,7 +8056,7 @@ if_probe_connectivity(struct ifnet *ifp, u_int32_t conn_probe) #endif /* NECP */ tcp_probe_connectivity(ifp, conn_probe); - return (0); + return 0; } /* for uuid.c */ @@ -7875,14 +8076,14 @@ uuid_get_ethernet(u_int8_t *node) IFA_UNLOCK(ifp->if_lladdr); ifnet_lock_done(ifp); ifnet_head_done(); - return (0); + return 0; } IFA_UNLOCK(ifp->if_lladdr); ifnet_lock_done(ifp); } ifnet_head_done(); - return (-1); + return -1; } static int @@ -7895,14 +8096,16 @@ sysctl_rxpoll SYSCTL_HANDLER_ARGS i = if_rxpoll; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (net_rxpoll == 0) - return (ENXIO); + if (net_rxpoll == 0) { + return ENXIO; + } if_rxpoll = i; - return (err); + return err; } static int @@ -7915,15 +8118,17 @@ sysctl_rxpoll_mode_holdtime SYSCTL_HANDLER_ARGS q = if_rxpoll_mode_holdtime; err = sysctl_handle_quad(oidp, &q, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (q < IF_RXPOLL_MODE_HOLDTIME_MIN) + if (q < IF_RXPOLL_MODE_HOLDTIME_MIN) { q = IF_RXPOLL_MODE_HOLDTIME_MIN; + } if_rxpoll_mode_holdtime = q; - return (err); + return err; } static int @@ -7936,15 +8141,17 @@ sysctl_rxpoll_sample_holdtime SYSCTL_HANDLER_ARGS q = if_rxpoll_sample_holdtime; err = sysctl_handle_quad(oidp, &q, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (q < IF_RXPOLL_SAMPLETIME_MIN) + if (q < IF_RXPOLL_SAMPLETIME_MIN) { q = IF_RXPOLL_SAMPLETIME_MIN; + } if_rxpoll_sample_holdtime = q; - return (err); + return err; } static int @@ -7957,15 +8164,17 @@ sysctl_rxpoll_interval_time SYSCTL_HANDLER_ARGS q = if_rxpoll_interval_time; err = sysctl_handle_quad(oidp, &q, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (q < IF_RXPOLL_INTERVALTIME_MIN) + if (q < IF_RXPOLL_INTERVALTIME_MIN) { q = IF_RXPOLL_INTERVALTIME_MIN; + } if_rxpoll_interval_time = q; - return (err); + return err; } static int @@ -7978,14 +8187,16 @@ sysctl_rxpoll_wlowat SYSCTL_HANDLER_ARGS i = if_rxpoll_wlowat; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (i == 0 || i >= if_rxpoll_whiwat) - return (EINVAL); + if (i == 0 || i >= if_rxpoll_whiwat) { + return EINVAL; + } if_rxpoll_wlowat = i; - return (err); + return err; } static int @@ -7998,14 +8209,16 @@ sysctl_rxpoll_whiwat SYSCTL_HANDLER_ARGS i = if_rxpoll_whiwat; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (i <= if_rxpoll_wlowat) - return (EINVAL); + if (i <= if_rxpoll_wlowat) { + return EINVAL; + } if_rxpoll_whiwat = i; - return (err); + return err; } static int @@ -8017,14 +8230,16 @@ sysctl_sndq_maxlen SYSCTL_HANDLER_ARGS i = if_sndq_maxlen; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (i < IF_SNDQ_MINLEN) + if (i < IF_SNDQ_MINLEN) { i = IF_SNDQ_MINLEN; + } if_sndq_maxlen = i; - return (err); + return err; } static int @@ -8036,14 +8251,16 @@ sysctl_rcvq_maxlen SYSCTL_HANDLER_ARGS i = if_rcvq_maxlen; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (i < IF_RCVQ_MINLEN) + if (i < IF_RCVQ_MINLEN) { i = IF_RCVQ_MINLEN; + } if_rcvq_maxlen = i; - return (err); + return err; } void @@ -8058,18 +8275,18 @@ dlil_node_present(struct ifnet *ifp, struct sockaddr *sa, VERIFY(sa); VERIFY(sa->sa_family == AF_LINK || sa->sa_family == AF_INET6); - bzero(&kev, sizeof (kev)); + bzero(&kev, sizeof(kev)); sin6 = &kev.sin6_node_address; sdl = &kev.sdl_node_address; nd6_alt_node_addr_decompose(ifp, sa, sdl, sin6); kev.rssi = rssi; kev.link_quality_metric = lqm; kev.node_proximity_metric = npm; - bcopy(srvinfo, kev.node_service_info, sizeof (kev.node_service_info)); + bcopy(srvinfo, kev.node_service_info, sizeof(kev.node_service_info)); nd6_alt_node_present(ifp, sin6, sdl, rssi, lqm, npm); dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_NODE_PRESENCE, - &kev.link_data, sizeof (kev)); + &kev.link_data, sizeof(kev)); } void @@ -8083,19 +8300,19 @@ dlil_node_absent(struct ifnet *ifp, struct sockaddr *sa) VERIFY(sa); VERIFY(sa->sa_family == AF_LINK || sa->sa_family == AF_INET6); - bzero(&kev, sizeof (kev)); + bzero(&kev, sizeof(kev)); sin6 = &kev.sin6_node_address; sdl = &kev.sdl_node_address; nd6_alt_node_addr_decompose(ifp, sa, sdl, sin6); nd6_alt_node_absent(ifp, sin6); dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_NODE_ABSENCE, - &kev.link_data, sizeof (kev)); + &kev.link_data, sizeof(kev)); } const void * dlil_ifaddr_bytes(const struct sockaddr_dl *sdl, size_t *sizep, - kauth_cred_t *credp) + kauth_cred_t *credp) { const u_int8_t *bytes; size_t size; @@ -8112,11 +8329,12 @@ dlil_ifaddr_bytes(const struct sockaddr_dl *sdl, size_t *sizep, default: credp = NULL; break; - }; + } + ; if (credp && mac_system_check_info(*credp, "net.link.addr")) { static const u_int8_t unspec[FIREWIRE_EUI64_LEN] = { - [0] = 2 + [0] = 2 }; bytes = unspec; @@ -8126,8 +8344,10 @@ dlil_ifaddr_bytes(const struct sockaddr_dl *sdl, size_t *sizep, #pragma unused(credp) #endif - if (sizep != NULL) *sizep = size; - return (bytes); + if (sizep != NULL) { + *sizep = size; + } + return bytes; } void @@ -8139,19 +8359,20 @@ dlil_report_issues(struct ifnet *ifp, u_int8_t modid[DLIL_MODIDLEN], VERIFY(ifp != NULL); VERIFY(modid != NULL); - _CASSERT(sizeof (kev.modid) == DLIL_MODIDLEN); - _CASSERT(sizeof (kev.info) == DLIL_MODARGLEN); + _CASSERT(sizeof(kev.modid) == DLIL_MODIDLEN); + _CASSERT(sizeof(kev.info) == DLIL_MODARGLEN); - bzero(&kev, sizeof (kev)); + bzero(&kev, sizeof(kev)); microtime(&tv); kev.timestamp = tv.tv_sec; bcopy(modid, &kev.modid, DLIL_MODIDLEN); - if (info != NULL) + if (info != NULL) { bcopy(info, &kev.info, DLIL_MODARGLEN); + } dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_ISSUES, - &kev.link_data, sizeof (kev)); + &kev.link_data, sizeof(kev)); } errno_t @@ -8167,19 +8388,22 @@ ifnet_getset_opportunistic(ifnet_t ifp, u_long cmd, struct ifreq *ifr, /* * XXX: Use priv_check_cred() instead of root check? */ - if ((result = proc_suser(p)) != 0) - return (result); + if ((result = proc_suser(p)) != 0) { + return result; + } if (ifr->ifr_opportunistic.ifo_flags == - IFRIFOF_BLOCK_OPPORTUNISTIC) + IFRIFOF_BLOCK_OPPORTUNISTIC) { level = IFNET_THROTTLE_OPPORTUNISTIC; - else if (ifr->ifr_opportunistic.ifo_flags == 0) + } else if (ifr->ifr_opportunistic.ifo_flags == 0) { level = IFNET_THROTTLE_OFF; - else + } else { result = EINVAL; + } - if (result == 0) + if (result == 0) { result = ifnet_set_throttle(ifp, level); + } } else if ((result = ifnet_get_throttle(ifp, &level)) == 0) { ifr->ifr_opportunistic.ifo_flags = 0; if (level == IFNET_THROTTLE_OPPORTUNISTIC) { @@ -8195,18 +8419,19 @@ ifnet_getset_opportunistic(ifnet_t ifp, u_long cmd, struct ifreq *ifr, if (result == 0) { uint32_t flags = 0; flags |= (cmd == SIOCSIFOPPORTUNISTIC) ? - INPCB_OPPORTUNISTIC_SETCMD : 0; + INPCB_OPPORTUNISTIC_SETCMD : 0; flags |= (level == IFNET_THROTTLE_OPPORTUNISTIC) ? - INPCB_OPPORTUNISTIC_THROTTLEON : 0; + INPCB_OPPORTUNISTIC_THROTTLEON : 0; ifr->ifr_opportunistic.ifo_inuse = udp_count_opportunistic(ifp->if_index, flags) + tcp_count_opportunistic(ifp->if_index, flags); } - if (result == EALREADY) + if (result == EALREADY) { result = 0; + } - return (result); + return result; } int @@ -8215,19 +8440,21 @@ ifnet_get_throttle(struct ifnet *ifp, u_int32_t *level) struct ifclassq *ifq; int err = 0; - if (!(ifp->if_eflags & IFEF_TXSTART)) - return (ENXIO); + if (!(ifp->if_eflags & IFEF_TXSTART)) { + return ENXIO; + } *level = IFNET_THROTTLE_OFF; ifq = &ifp->if_snd; IFCQ_LOCK(ifq); /* Throttling works only for IFCQ, not ALTQ instances */ - if (IFCQ_IS_ENABLED(ifq)) + if (IFCQ_IS_ENABLED(ifq)) { IFCQ_GET_THROTTLE(ifq, *level, err); + } IFCQ_UNLOCK(ifq); - return (err); + return err; } int @@ -8236,8 +8463,9 @@ ifnet_set_throttle(struct ifnet *ifp, u_int32_t level) struct ifclassq *ifq; int err = 0; - if (!(ifp->if_eflags & IFEF_TXSTART)) - return (ENXIO); + if (!(ifp->if_eflags & IFEF_TXSTART)) { + return ENXIO; + } ifq = &ifp->if_snd; @@ -8246,22 +8474,24 @@ ifnet_set_throttle(struct ifnet *ifp, u_int32_t level) case IFNET_THROTTLE_OPPORTUNISTIC: break; default: - return (EINVAL); + return EINVAL; } IFCQ_LOCK(ifq); - if (IFCQ_IS_ENABLED(ifq)) + if (IFCQ_IS_ENABLED(ifq)) { IFCQ_SET_THROTTLE(ifq, level, err); + } IFCQ_UNLOCK(ifq); if (err == 0) { printf("%s: throttling level set to %d\n", if_name(ifp), level); - if (level == IFNET_THROTTLE_OFF) + if (level == IFNET_THROTTLE_OFF) { ifnet_start(ifp); + } } - return (err); + return err; } errno_t @@ -8277,23 +8507,27 @@ ifnet_getset_log(ifnet_t ifp, u_long cmd, struct ifreq *ifr, if (cmd == SIOCSIFLOG) { if ((result = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (result); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return result; + } level = ifr->ifr_log.ifl_level; - if (level < IFNET_LOG_MIN || level > IFNET_LOG_MAX) + if (level < IFNET_LOG_MIN || level > IFNET_LOG_MAX) { result = EINVAL; + } flags = ifr->ifr_log.ifl_flags; - if ((flags &= IFNET_LOGF_MASK) == 0) + if ((flags &= IFNET_LOGF_MASK) == 0) { result = EINVAL; + } category = ifr->ifr_log.ifl_category; subcategory = ifr->ifr_log.ifl_subcategory; - if (result == 0) + if (result == 0) { result = ifnet_set_log(ifp, level, flags, category, subcategory); + } } else { result = ifnet_get_log(ifp, &level, &flags, &category, &subcategory); @@ -8305,7 +8539,7 @@ ifnet_getset_log(ifnet_t ifp, u_long cmd, struct ifreq *ifr, } } - return (result); + return result; } int @@ -8326,7 +8560,7 @@ ifnet_set_log(struct ifnet *ifp, int32_t level, uint32_t flags, if (ifp->if_output_ctl != NULL) { struct ifnet_log_params l; - bzero(&l, sizeof (l)); + bzero(&l, sizeof(l)); l.level = level; l.flags = flags; l.flags &= ~IFNET_LOGF_DLIL; @@ -8336,7 +8570,7 @@ ifnet_set_log(struct ifnet *ifp, int32_t level, uint32_t flags, /* Send this request to lower layers */ if (l.flags != 0) { err = ifp->if_output_ctl(ifp, IFNET_CTL_SET_LOG, - sizeof (l), &l); + sizeof(l), &l); } } else if ((flags & ~IFNET_LOGF_DLIL) && ifp->if_output_ctl == NULL) { /* @@ -8345,15 +8579,17 @@ ifnet_set_log(struct ifnet *ifp, int32_t level, uint32_t flags, * silently ignore facilities other than ours. */ flags &= IFNET_LOGF_DLIL; - if (flags == 0 && (!(ifp->if_log.flags & IFNET_LOGF_DLIL))) + if (flags == 0 && (!(ifp->if_log.flags & IFNET_LOGF_DLIL))) { level = 0; + } } if (err == 0) { - if ((ifp->if_log.level = level) == IFNET_LOG_DEFAULT) + if ((ifp->if_log.level = level) == IFNET_LOG_DEFAULT) { ifp->if_log.flags = 0; - else + } else { ifp->if_log.flags |= flags; + } log(LOG_INFO, "%s: logging level set to %d flags=%b " "arg=%b, category=%d subcategory=%d\n", if_name(ifp), @@ -8362,23 +8598,27 @@ ifnet_set_log(struct ifnet *ifp, int32_t level, uint32_t flags, category, subcategory); } - return (err); + return err; } int ifnet_get_log(struct ifnet *ifp, int32_t *level, uint32_t *flags, int32_t *category, int32_t *subcategory) { - if (level != NULL) + if (level != NULL) { *level = ifp->if_log.level; - if (flags != NULL) + } + if (flags != NULL) { *flags = ifp->if_log.flags; - if (category != NULL) + } + if (category != NULL) { *category = ifp->if_log.category; - if (subcategory != NULL) + } + if (subcategory != NULL) { *subcategory = ifp->if_log.subcategory; + } - return (0); + return 0; } int @@ -8390,29 +8630,30 @@ ifnet_notify_address(struct ifnet *ifp, int af) (void) pf_ifaddr_hook(ifp); #endif /* PF */ - if (ifp->if_output_ctl == NULL) - return (EOPNOTSUPP); + if (ifp->if_output_ctl == NULL) { + return EOPNOTSUPP; + } - bzero(&na, sizeof (na)); + bzero(&na, sizeof(na)); na.address_family = af; - return (ifp->if_output_ctl(ifp, IFNET_CTL_NOTIFY_ADDRESS, - sizeof (na), &na)); + return ifp->if_output_ctl(ifp, IFNET_CTL_NOTIFY_ADDRESS, + sizeof(na), &na); } errno_t ifnet_flowid(struct ifnet *ifp, uint32_t *flowid) { if (ifp == NULL || flowid == NULL) { - return (EINVAL); + return EINVAL; } else if (!(ifp->if_eflags & IFEF_TXSTART) || !IF_FULLY_ATTACHED(ifp)) { - return (ENXIO); + return ENXIO; } *flowid = ifp->if_flowhash; - return (0); + return 0; } errno_t @@ -8421,10 +8662,10 @@ ifnet_disable_output(struct ifnet *ifp) int err; if (ifp == NULL) { - return (EINVAL); + return EINVAL; } else if (!(ifp->if_eflags & IFEF_TXSTART) || !IF_FULLY_ATTACHED(ifp)) { - return (ENXIO); + return ENXIO; } if ((err = ifnet_fc_add(ifp)) == 0) { @@ -8432,21 +8673,21 @@ ifnet_disable_output(struct ifnet *ifp) ifp->if_start_flags |= IFSF_FLOW_CONTROLLED; lck_mtx_unlock(&ifp->if_start_lock); } - return (err); + return err; } errno_t ifnet_enable_output(struct ifnet *ifp) { if (ifp == NULL) { - return (EINVAL); + return EINVAL; } else if (!(ifp->if_eflags & IFEF_TXSTART) || !IF_FULLY_ATTACHED(ifp)) { - return (ENXIO); + return ENXIO; } ifnet_start_common(ifp, TRUE); - return (0); + return 0; } void @@ -8456,16 +8697,18 @@ ifnet_flowadv(uint32_t flowhash) struct ifnet *ifp; ifce = ifnet_fc_get(flowhash); - if (ifce == NULL) + if (ifce == NULL) { return; + } VERIFY(ifce->ifce_ifp != NULL); ifp = ifce->ifce_ifp; /* flow hash gets recalculated per attach, so check */ if (ifnet_is_attached(ifp, 1)) { - if (ifp->if_flowhash == flowhash) + if (ifp->if_flowhash == flowhash) { (void) ifnet_enable_output(ifp); + } ifnet_decr_iorefcnt(ifp); } ifnet_fc_entry_free(ifce); @@ -8477,7 +8720,7 @@ ifnet_flowadv(uint32_t flowhash) static inline int ifce_cmp(const struct ifnet_fc_entry *fc1, const struct ifnet_fc_entry *fc2) { - return (fc1->ifce_flowhash - fc2->ifce_flowhash); + return fc1->ifce_flowhash - fc2->ifce_flowhash; } static int @@ -8490,7 +8733,7 @@ ifnet_fc_add(struct ifnet *ifp) VERIFY(ifp->if_flowhash != 0); flowhash = ifp->if_flowhash; - bzero(&keyfc, sizeof (keyfc)); + bzero(&keyfc, sizeof(keyfc)); keyfc.ifce_flowhash = flowhash; lck_mtx_lock_spin(&ifnet_fc_lock); @@ -8498,7 +8741,7 @@ ifnet_fc_add(struct ifnet *ifp) if (ifce != NULL && ifce->ifce_ifp == ifp) { /* Entry is already in ifnet_fc_tree, return */ lck_mtx_unlock(&ifnet_fc_lock); - return (0); + return 0; } if (ifce != NULL) { @@ -8509,7 +8752,7 @@ ifnet_fc_add(struct ifnet *ifp) * avoid adding a second one when there is a collision. */ lck_mtx_unlock(&ifnet_fc_lock); - return (EAGAIN); + return EAGAIN; } /* become regular mutex */ @@ -8519,7 +8762,7 @@ ifnet_fc_add(struct ifnet *ifp) if (ifce == NULL) { /* memory allocation failed */ lck_mtx_unlock(&ifnet_fc_lock); - return (ENOMEM); + return ENOMEM; } bzero(ifce, ifnet_fc_zone_size); @@ -8528,7 +8771,7 @@ ifnet_fc_add(struct ifnet *ifp) RB_INSERT(ifnet_fc_tree, &ifnet_fc_tree, ifce); lck_mtx_unlock(&ifnet_fc_lock); - return (0); + return 0; } static struct ifnet_fc_entry * @@ -8537,7 +8780,7 @@ ifnet_fc_get(uint32_t flowhash) struct ifnet_fc_entry keyfc, *ifce; struct ifnet *ifp; - bzero(&keyfc, sizeof (keyfc)); + bzero(&keyfc, sizeof(keyfc)); keyfc.ifce_flowhash = flowhash; lck_mtx_lock_spin(&ifnet_fc_lock); @@ -8545,7 +8788,7 @@ ifnet_fc_get(uint32_t flowhash) if (ifce == NULL) { /* Entry is not present in ifnet_fc_tree, return */ lck_mtx_unlock(&ifnet_fc_lock); - return (NULL); + return NULL; } RB_REMOVE(ifnet_fc_tree, &ifnet_fc_tree, ifce); @@ -8566,7 +8809,7 @@ ifnet_fc_get(uint32_t flowhash) } lck_mtx_unlock(&ifnet_fc_lock); - return (ifce); + return ifce; } static void @@ -8581,12 +8824,13 @@ ifnet_calc_flowhash(struct ifnet *ifp) struct ifnet_flowhash_key fh __attribute__((aligned(8))); uint32_t flowhash = 0; - if (ifnet_flowhash_seed == 0) + if (ifnet_flowhash_seed == 0) { ifnet_flowhash_seed = RandomULong(); + } - bzero(&fh, sizeof (fh)); + bzero(&fh, sizeof(fh)); - (void) snprintf(fh.ifk_name, sizeof (fh.ifk_name), "%s", ifp->if_name); + (void) snprintf(fh.ifk_name, sizeof(fh.ifk_name), "%s", ifp->if_name); fh.ifk_unit = ifp->if_unit; fh.ifk_flags = ifp->if_flags; fh.ifk_eflags = ifp->if_eflags; @@ -8597,14 +8841,14 @@ ifnet_calc_flowhash(struct ifnet *ifp) fh.ifk_rand2 = RandomULong(); try_again: - flowhash = net_flowhash(&fh, sizeof (fh), ifnet_flowhash_seed); + flowhash = net_flowhash(&fh, sizeof(fh), ifnet_flowhash_seed); if (flowhash == 0) { /* try to get a non-zero flowhash */ ifnet_flowhash_seed = RandomULong(); goto try_again; } - return (flowhash); + return flowhash; } int @@ -8622,10 +8866,10 @@ ifnet_set_netsignature(struct ifnet *ifp, uint8_t family, uint8_t len, /* Allow clearing the signature */ IN_IFEXTRA(ifp)->netsig_len = 0; bzero(IN_IFEXTRA(ifp)->netsig, - sizeof (IN_IFEXTRA(ifp)->netsig)); + sizeof(IN_IFEXTRA(ifp)->netsig)); if_inetdata_lock_done(ifp); break; - } else if (len > sizeof (IN_IFEXTRA(ifp)->netsig)) { + } else if (len > sizeof(IN_IFEXTRA(ifp)->netsig)) { error = EINVAL; if_inetdata_lock_done(ifp); break; @@ -8645,10 +8889,10 @@ ifnet_set_netsignature(struct ifnet *ifp, uint8_t family, uint8_t len, /* Allow clearing the signature */ IN6_IFEXTRA(ifp)->netsig_len = 0; bzero(IN6_IFEXTRA(ifp)->netsig, - sizeof (IN6_IFEXTRA(ifp)->netsig)); + sizeof(IN6_IFEXTRA(ifp)->netsig)); if_inet6data_lock_done(ifp); break; - } else if (len > sizeof (IN6_IFEXTRA(ifp)->netsig)) { + } else if (len > sizeof(IN6_IFEXTRA(ifp)->netsig)) { error = EINVAL; if_inet6data_lock_done(ifp); break; @@ -8666,7 +8910,7 @@ ifnet_set_netsignature(struct ifnet *ifp, uint8_t family, uint8_t len, break; } - return (error); + return error; } int @@ -8675,8 +8919,9 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, { int error = 0; - if (ifp == NULL || len == NULL || data == NULL) - return (EINVAL); + if (ifp == NULL || len == NULL || data == NULL) { + return EINVAL; + } switch (family) { case AF_INET: @@ -8687,10 +8932,11 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, if_inetdata_lock_done(ifp); break; } - if ((*len = IN_IFEXTRA(ifp)->netsig_len) > 0) + if ((*len = IN_IFEXTRA(ifp)->netsig_len) > 0) { bcopy(IN_IFEXTRA(ifp)->netsig, data, *len); - else + } else { error = ENOENT; + } } else { error = ENOMEM; } @@ -8705,10 +8951,11 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, if_inet6data_lock_done(ifp); break; } - if ((*len = IN6_IFEXTRA(ifp)->netsig_len) > 0) + if ((*len = IN6_IFEXTRA(ifp)->netsig_len) > 0) { bcopy(IN6_IFEXTRA(ifp)->netsig, data, *len); - else + } else { error = ENOENT; + } } else { error = ENOMEM; } @@ -8720,10 +8967,11 @@ ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len, break; } - if (error == 0 && flags != NULL) + if (error == 0 && flags != NULL) { *flags = 0; + } - return (error); + return error; } #if INET6 @@ -8756,11 +9004,11 @@ ifnet_set_nat64prefix(struct ifnet *ifp, struct ipv6_prefix *prefixes) continue; } else if (prefix_len != NAT64_PREFIX_LEN_32 && - prefix_len != NAT64_PREFIX_LEN_40 && - prefix_len != NAT64_PREFIX_LEN_48 && - prefix_len != NAT64_PREFIX_LEN_56 && - prefix_len != NAT64_PREFIX_LEN_64 && - prefix_len != NAT64_PREFIX_LEN_96) { + prefix_len != NAT64_PREFIX_LEN_40 && + prefix_len != NAT64_PREFIX_LEN_48 && + prefix_len != NAT64_PREFIX_LEN_56 && + prefix_len != NAT64_PREFIX_LEN_64 && + prefix_len != NAT64_PREFIX_LEN_96) { clat_log0((LOG_DEBUG, "NAT64 prefixlen is incorrect %d\n", prefix_len)); error = EINVAL; @@ -8786,10 +9034,11 @@ ifnet_set_nat64prefix(struct ifnet *ifp, struct ipv6_prefix *prefixes) out: if_inet6data_lock_done(ifp); - if (error == 0 && one_set != 0) + if (error == 0 && one_set != 0) { necp_update_all_clients(); + } - return (error); + return error; } int @@ -8797,8 +9046,9 @@ ifnet_get_nat64prefix(struct ifnet *ifp, struct ipv6_prefix *prefixes) { int i, found_one = 0, error = 0; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } if_inet6data_lock_shared(ifp); @@ -8808,8 +9058,9 @@ ifnet_get_nat64prefix(struct ifnet *ifp, struct ipv6_prefix *prefixes) } for (i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) { - if (IN6_IFEXTRA(ifp)->nat64_prefixes[i].prefix_len != 0) + if (IN6_IFEXTRA(ifp)->nat64_prefixes[i].prefix_len != 0) { found_one = 1; + } } if (found_one == 0) { @@ -8817,14 +9068,15 @@ ifnet_get_nat64prefix(struct ifnet *ifp, struct ipv6_prefix *prefixes) goto out; } - if (prefixes) + if (prefixes) { bcopy(IN6_IFEXTRA(ifp)->nat64_prefixes, prefixes, sizeof(IN6_IFEXTRA(ifp)->nat64_prefixes)); + } out: if_inet6data_lock_done(ifp); - return (error); + return error; } #endif @@ -8836,16 +9088,19 @@ dlil_output_cksum_dbg(struct ifnet *ifp, struct mbuf *m, uint32_t hoff, uint32_t did_sw; if (!(hwcksum_dbg_mode & HWCKSUM_DBG_FINALIZE_FORCED) || - (m->m_pkthdr.csum_flags & (CSUM_TSO_IPV4|CSUM_TSO_IPV6))) + (m->m_pkthdr.csum_flags & (CSUM_TSO_IPV4 | CSUM_TSO_IPV6))) { return; + } switch (pf) { case PF_INET: did_sw = in_finalize_cksum(m, hoff, m->m_pkthdr.csum_flags); - if (did_sw & CSUM_DELAY_IP) + if (did_sw & CSUM_DELAY_IP) { hwcksum_dbg_finalized_hdr++; - if (did_sw & CSUM_DELAY_DATA) + } + if (did_sw & CSUM_DELAY_DATA) { hwcksum_dbg_finalized_data++; + } break; #if INET6 case PF_INET6: @@ -8858,8 +9113,9 @@ dlil_output_cksum_dbg(struct ifnet *ifp, struct mbuf *m, uint32_t hoff, */ did_sw = in6_finalize_cksum(m, hoff, -1, -1, m->m_pkthdr.csum_flags); - if (did_sw & CSUM_DELAY_IPV6_DATA) + if (did_sw & CSUM_DELAY_IPV6_DATA) { hwcksum_dbg_finalized_data++; + } break; #endif /* INET6 */ default: @@ -8905,8 +9161,9 @@ dlil_input_cksum_dbg(struct ifnet *ifp, struct mbuf *m, char *frame_header, if (hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_FORCED) { uint32_t foff = hwcksum_dbg_partial_rxoff_forced; - if (foff > (uint32_t)m->m_pkthdr.len) + if (foff > (uint32_t)m->m_pkthdr.len) { return; + } m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS; @@ -8978,8 +9235,9 @@ dlil_input_cksum_dbg(struct ifnet *ifp, struct mbuf *m, char *frame_header, if (hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_RXOFF_ADJ) { uint32_t aoff = hwcksum_dbg_partial_rxoff_adj; - if (aoff == rxoff || aoff > (uint32_t)m->m_pkthdr.len) + if (aoff == rxoff || aoff > (uint32_t)m->m_pkthdr.len) { return; + } sum = m_adj_sum16(m, rxoff, aoff, m_pktlen(m) - aoff, sum); @@ -9002,18 +9260,21 @@ sysctl_hwcksum_dbg_mode SYSCTL_HANDLER_ARGS i = hwcksum_dbg_mode; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (hwcksum_dbg == 0) - return (ENODEV); + if (hwcksum_dbg == 0) { + return ENODEV; + } - if ((i & ~HWCKSUM_DBG_MASK) != 0) - return (EINVAL); + if ((i & ~HWCKSUM_DBG_MASK) != 0) { + return EINVAL; + } hwcksum_dbg_mode = (i & HWCKSUM_DBG_MASK); - return (err); + return err; } static int @@ -9026,15 +9287,17 @@ sysctl_hwcksum_dbg_partial_rxoff_forced SYSCTL_HANDLER_ARGS i = hwcksum_dbg_partial_rxoff_forced; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_FORCED)) - return (ENODEV); + if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_FORCED)) { + return ENODEV; + } hwcksum_dbg_partial_rxoff_forced = i; - return (err); + return err; } static int @@ -9047,15 +9310,17 @@ sysctl_hwcksum_dbg_partial_rxoff_adj SYSCTL_HANDLER_ARGS i = hwcksum_dbg_partial_rxoff_adj; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_RXOFF_ADJ)) - return (ENODEV); + if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_RXOFF_ADJ)) { + return ENODEV; + } hwcksum_dbg_partial_rxoff_adj = i; - return (err); + return err; } static int @@ -9065,15 +9330,14 @@ sysctl_tx_chain_len_stats SYSCTL_HANDLER_ARGS int err; if (req->oldptr == USER_ADDR_NULL) { - } if (req->newptr != USER_ADDR_NULL) { - return (EPERM); + return EPERM; } err = SYSCTL_OUT(req, &tx_chain_len_stats, sizeof(struct chain_len_stats)); - return (err); + return err; } @@ -9117,28 +9381,28 @@ static uint8_t sumdata[] = { /* Precomputed 16-bit 1's complement sums for various spans of the above data */ static struct { - boolean_t init; - uint16_t len; - uint16_t sumr; /* reference */ - uint16_t sumrp; /* reference, precomputed */ + boolean_t init; + uint16_t len; + uint16_t sumr; /* reference */ + uint16_t sumrp; /* reference, precomputed */ } sumtbl[] = { - { FALSE, 0, 0, 0x0000 }, - { FALSE, 1, 0, 0x001f }, - { FALSE, 2, 0, 0x8b1f }, - { FALSE, 3, 0, 0x8b27 }, - { FALSE, 7, 0, 0x790e }, - { FALSE, 11, 0, 0xcb6d }, - { FALSE, 20, 0, 0x20dd }, - { FALSE, 27, 0, 0xbabd }, - { FALSE, 32, 0, 0xf3e8 }, - { FALSE, 37, 0, 0x197d }, - { FALSE, 43, 0, 0x9eae }, - { FALSE, 64, 0, 0x4678 }, + { FALSE, 0, 0, 0x0000 }, + { FALSE, 1, 0, 0x001f }, + { FALSE, 2, 0, 0x8b1f }, + { FALSE, 3, 0, 0x8b27 }, + { FALSE, 7, 0, 0x790e }, + { FALSE, 11, 0, 0xcb6d }, + { FALSE, 20, 0, 0x20dd }, + { FALSE, 27, 0, 0xbabd }, + { FALSE, 32, 0, 0xf3e8 }, + { FALSE, 37, 0, 0x197d }, + { FALSE, 43, 0, 0x9eae }, + { FALSE, 64, 0, 0x4678 }, { FALSE, 127, 0, 0x9399 }, { FALSE, 256, 0, 0xd147 }, { FALSE, 325, 0, 0x0358 }, }; -#define SUMTBL_MAX ((int)sizeof (sumtbl) / (int)sizeof (sumtbl[0])) +#define SUMTBL_MAX ((int)sizeof (sumtbl) / (int)sizeof (sumtbl[0])) static void dlil_verify_sum16(void) @@ -9148,26 +9412,26 @@ dlil_verify_sum16(void) int n; /* Make sure test data plus extra room for alignment fits in cluster */ - _CASSERT((sizeof (sumdata) + (sizeof (uint64_t) * 2)) <= MCLBYTES); + _CASSERT((sizeof(sumdata) + (sizeof(uint64_t) * 2)) <= MCLBYTES); kprintf("DLIL: running SUM16 self-tests ... "); m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); - m_align(m, sizeof(sumdata) + (sizeof (uint64_t) * 2)); + m_align(m, sizeof(sumdata) + (sizeof(uint64_t) * 2)); - buf = mtod(m, uint8_t *); /* base address */ + buf = mtod(m, uint8_t *); /* base address */ for (n = 0; n < SUMTBL_MAX; n++) { uint16_t len = sumtbl[n].len; int i; /* Verify for all possible alignments */ - for (i = 0; i < (int)sizeof (uint64_t); i++) { + for (i = 0; i < (int)sizeof(uint64_t); i++) { uint16_t sum, sumr; uint8_t *c; /* Copy over test data to mbuf */ - VERIFY(len <= sizeof (sumdata)); + VERIFY(len <= sizeof(sumdata)); c = buf + i; bcopy(sumdata, c, len); @@ -9230,41 +9494,41 @@ dlil_verify_sum16(void) } #endif /* DEBUG || DEVELOPMENT */ -#define CASE_STRINGIFY(x) case x: return #x +#define CASE_STRINGIFY(x) case x: return #x __private_extern__ const char * dlil_kev_dl_code_str(u_int32_t event_code) { switch (event_code) { - CASE_STRINGIFY(KEV_DL_SIFFLAGS); - CASE_STRINGIFY(KEV_DL_SIFMETRICS); - CASE_STRINGIFY(KEV_DL_SIFMTU); - CASE_STRINGIFY(KEV_DL_SIFPHYS); - CASE_STRINGIFY(KEV_DL_SIFMEDIA); - CASE_STRINGIFY(KEV_DL_SIFGENERIC); - CASE_STRINGIFY(KEV_DL_ADDMULTI); - CASE_STRINGIFY(KEV_DL_DELMULTI); - CASE_STRINGIFY(KEV_DL_IF_ATTACHED); - CASE_STRINGIFY(KEV_DL_IF_DETACHING); - CASE_STRINGIFY(KEV_DL_IF_DETACHED); - CASE_STRINGIFY(KEV_DL_LINK_OFF); - CASE_STRINGIFY(KEV_DL_LINK_ON); - CASE_STRINGIFY(KEV_DL_PROTO_ATTACHED); - CASE_STRINGIFY(KEV_DL_PROTO_DETACHED); - CASE_STRINGIFY(KEV_DL_LINK_ADDRESS_CHANGED); - CASE_STRINGIFY(KEV_DL_WAKEFLAGS_CHANGED); - CASE_STRINGIFY(KEV_DL_IF_IDLE_ROUTE_REFCNT); - CASE_STRINGIFY(KEV_DL_IFCAP_CHANGED); - CASE_STRINGIFY(KEV_DL_LINK_QUALITY_METRIC_CHANGED); - CASE_STRINGIFY(KEV_DL_NODE_PRESENCE); - CASE_STRINGIFY(KEV_DL_NODE_ABSENCE); - CASE_STRINGIFY(KEV_DL_MASTER_ELECTED); - CASE_STRINGIFY(KEV_DL_ISSUES); - CASE_STRINGIFY(KEV_DL_IFDELEGATE_CHANGED); + CASE_STRINGIFY(KEV_DL_SIFFLAGS); + CASE_STRINGIFY(KEV_DL_SIFMETRICS); + CASE_STRINGIFY(KEV_DL_SIFMTU); + CASE_STRINGIFY(KEV_DL_SIFPHYS); + CASE_STRINGIFY(KEV_DL_SIFMEDIA); + CASE_STRINGIFY(KEV_DL_SIFGENERIC); + CASE_STRINGIFY(KEV_DL_ADDMULTI); + CASE_STRINGIFY(KEV_DL_DELMULTI); + CASE_STRINGIFY(KEV_DL_IF_ATTACHED); + CASE_STRINGIFY(KEV_DL_IF_DETACHING); + CASE_STRINGIFY(KEV_DL_IF_DETACHED); + CASE_STRINGIFY(KEV_DL_LINK_OFF); + CASE_STRINGIFY(KEV_DL_LINK_ON); + CASE_STRINGIFY(KEV_DL_PROTO_ATTACHED); + CASE_STRINGIFY(KEV_DL_PROTO_DETACHED); + CASE_STRINGIFY(KEV_DL_LINK_ADDRESS_CHANGED); + CASE_STRINGIFY(KEV_DL_WAKEFLAGS_CHANGED); + CASE_STRINGIFY(KEV_DL_IF_IDLE_ROUTE_REFCNT); + CASE_STRINGIFY(KEV_DL_IFCAP_CHANGED); + CASE_STRINGIFY(KEV_DL_LINK_QUALITY_METRIC_CHANGED); + CASE_STRINGIFY(KEV_DL_NODE_PRESENCE); + CASE_STRINGIFY(KEV_DL_NODE_ABSENCE); + CASE_STRINGIFY(KEV_DL_MASTER_ELECTED); + CASE_STRINGIFY(KEV_DL_ISSUES); + CASE_STRINGIFY(KEV_DL_IFDELEGATE_CHANGED); default: break; } - return (""); + return ""; } static void @@ -9337,8 +9601,9 @@ sysctl_get_kao_frames SYSCTL_HANDLER_ARGS * Only root can get look at other people TCP frames */ error = proc_suser(current_proc()); - if (error != 0) + if (error != 0) { goto done; + } /* * Validate the input parameters */ @@ -9401,9 +9666,10 @@ sysctl_get_kao_frames SYSCTL_HANDLER_ARGS } } done: - if (frames_array != NULL) + if (frames_array != NULL) { _FREE(frames_array, M_TEMP); - return (error); + } + return error; } #endif /* DEVELOPMENT || DEBUG */ diff --git a/bsd/net/dlil.h b/bsd/net/dlil.h index 526e82f46..881cf0505 100644 --- a/bsd/net/dlil.h +++ b/bsd/net/dlil.h @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef DLIL_H -#define DLIL_H +#define DLIL_H #ifdef KERNEL #include @@ -55,9 +55,9 @@ enum { * variants.native_type_length. */ /* Ethernet specific types */ -#define DLIL_DESC_ETYPE2 4 -#define DLIL_DESC_SAP 5 -#define DLIL_DESC_SNAP 6 +#define DLIL_DESC_ETYPE2 4 +#define DLIL_DESC_SAP 5 +#define DLIL_DESC_SNAP 6 #ifdef KERNEL_PRIVATE #include @@ -70,46 +70,46 @@ enum { #ifdef BSD_KERNEL_PRIVATE /* Operations on timespecs. */ -#define net_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_nsec = 0 +#define net_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_nsec = 0 -#define net_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_nsec) +#define net_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_nsec) -#define net_timercmp(tvp, uvp, cmp) \ - (((tvp)->tv_sec == (uvp)->tv_sec) ? \ - ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ +#define net_timercmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) -#define net_timeradd(tvp, uvp, vvp) do { \ - (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ - (vvp)->tv_nsec = (tvp)->tv_nsec + (uvp)->tv_nsec; \ - if ((vvp)->tv_nsec >= (long)NSEC_PER_SEC) { \ - (vvp)->tv_sec++; \ - (vvp)->tv_nsec -= NSEC_PER_SEC; \ - } \ +#define net_timeradd(tvp, uvp, vvp) do { \ + (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ + (vvp)->tv_nsec = (tvp)->tv_nsec + (uvp)->tv_nsec; \ + if ((vvp)->tv_nsec >= (long)NSEC_PER_SEC) { \ + (vvp)->tv_sec++; \ + (vvp)->tv_nsec -= NSEC_PER_SEC; \ + } \ } while (0) -#define net_timersub(tvp, uvp, vvp) do { \ - (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ - (vvp)->tv_nsec = (tvp)->tv_nsec - (uvp)->tv_nsec; \ - if ((vvp)->tv_nsec < 0) { \ - (vvp)->tv_sec--; \ - (vvp)->tv_nsec += NSEC_PER_SEC; \ - } \ +#define net_timersub(tvp, uvp, vvp) do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_nsec = (tvp)->tv_nsec - (uvp)->tv_nsec; \ + if ((vvp)->tv_nsec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_nsec += NSEC_PER_SEC; \ + } \ } while (0) -#define net_timernsec(tvp, nsp) do { \ - *(nsp) = (tvp)->tv_nsec; \ - if ((tvp)->tv_sec > 0) \ - *(nsp) += ((tvp)->tv_sec * NSEC_PER_SEC); \ +#define net_timernsec(tvp, nsp) do { \ + *(nsp) = (tvp)->tv_nsec; \ + if ((tvp)->tv_sec > 0) \ + *(nsp) += ((tvp)->tv_sec * NSEC_PER_SEC); \ } while (0) #if defined(__x86_64__) || defined(__arm64__) -#define net_nsectimer(nsp, tvp) do { \ - u_int64_t __nsp = *(nsp); \ - net_timerclear(tvp); \ - uint64_t __sec = __nsp / NSEC_PER_SEC; \ - (tvp)->tv_sec = (__darwin_time_t)__sec; \ - (tvp)->tv_nsec = (long)(__nsp - __sec * NSEC_PER_SEC); \ +#define net_nsectimer(nsp, tvp) do { \ + u_int64_t __nsp = *(nsp); \ + net_timerclear(tvp); \ + uint64_t __sec = __nsp / NSEC_PER_SEC; \ + (tvp)->tv_sec = (__darwin_time_t)__sec; \ + (tvp)->tv_nsec = (long)(__nsp - __sec * NSEC_PER_SEC); \ } while (0) #else /* 32 bit */ /* @@ -123,17 +123,17 @@ enum { * The approximation of seconds is correct or too low by 1 unit. * So we fix it by computing the remainder. */ -#define net_nsectimer(nsp, tvp) do { \ - u_int64_t __nsp = *(nsp); \ - net_timerclear(tvp); \ - uint32_t __a = (uint32_t)(__nsp >> 29); \ - const uint32_t __inv = 0x89705F41; \ - uint32_t __sec = (uint32_t)(((uint64_t)__a * __inv) >> 32); \ - uint32_t __rem = (uint32_t)(__nsp - __sec * NSEC_PER_SEC); \ - __sec += ((__rem >= NSEC_PER_SEC) ? 1 : 0); \ - (tvp)->tv_sec = (__darwin_time_t)__sec; \ - (tvp)->tv_nsec = \ - (long)((__rem >= NSEC_PER_SEC) ? (__rem - NSEC_PER_SEC) : __rem); \ +#define net_nsectimer(nsp, tvp) do { \ + u_int64_t __nsp = *(nsp); \ + net_timerclear(tvp); \ + uint32_t __a = (uint32_t)(__nsp >> 29); \ + const uint32_t __inv = 0x89705F41; \ + uint32_t __sec = (uint32_t)(((uint64_t)__a * __inv) >> 32); \ + uint32_t __rem = (uint32_t)(__nsp - __sec * NSEC_PER_SEC); \ + __sec += ((__rem >= NSEC_PER_SEC) ? 1 : 0); \ + (tvp)->tv_sec = (__darwin_time_t)__sec; \ + (tvp)->tv_nsec = \ + (long)((__rem >= NSEC_PER_SEC) ? (__rem - NSEC_PER_SEC) : __rem); \ } while(0) #endif /* 32 bit */ @@ -143,92 +143,92 @@ struct ether_header; struct sockaddr_dl; struct iff_filter; -#define DLIL_THREADNAME_LEN 32 +#define DLIL_THREADNAME_LEN 32 /* * DLIL input thread info */ struct dlil_threading_info { decl_lck_mtx_data(, input_lck); - lck_grp_t *lck_grp; /* lock group (for lock stats) */ - u_int32_t input_waiting; /* DLIL condition of thread */ - u_int32_t wtot; /* # of wakeup requests */ - char input_name[DLIL_THREADNAME_LEN]; /* name storage */ - struct ifnet *ifp; /* pointer to interface */ - class_queue_t rcvq_pkts; /* queue of pkts */ + lck_grp_t *lck_grp; /* lock group (for lock stats) */ + u_int32_t input_waiting; /* DLIL condition of thread */ + u_int32_t wtot; /* # of wakeup requests */ + char input_name[DLIL_THREADNAME_LEN]; /* name storage */ + struct ifnet *ifp; /* pointer to interface */ + class_queue_t rcvq_pkts; /* queue of pkts */ struct ifnet_stat_increment_param stats; /* incremental statistics */ /* * Thread affinity (workloop and DLIL threads). */ - boolean_t net_affinity; /* affinity set is available */ - struct thread *input_thr; /* input thread */ - struct thread *wloop_thr; /* workloop thread */ - struct thread *poll_thr; /* poll thread */ - u_int32_t tag; /* affinity tag */ + boolean_t net_affinity; /* affinity set is available */ + struct thread *input_thr; /* input thread */ + struct thread *wloop_thr; /* workloop thread */ + struct thread *poll_thr; /* poll thread */ + u_int32_t tag; /* affinity tag */ /* * Opportunistic polling. */ - ifnet_model_t mode; /* current mode */ - struct pktcntr tstats; /* incremental polling statistics */ - struct if_rxpoll_stats pstats; /* polling statistics */ -#define rxpoll_offreq pstats.ifi_poll_off_req -#define rxpoll_offerr pstats.ifi_poll_off_err -#define rxpoll_onreq pstats.ifi_poll_on_req -#define rxpoll_onerr pstats.ifi_poll_on_err -#define rxpoll_wavg pstats.ifi_poll_wakeups_avg -#define rxpoll_wlowat pstats.ifi_poll_wakeups_lowat -#define rxpoll_whiwat pstats.ifi_poll_wakeups_hiwat -#define rxpoll_pavg pstats.ifi_poll_packets_avg -#define rxpoll_pmin pstats.ifi_poll_packets_min -#define rxpoll_pmax pstats.ifi_poll_packets_max -#define rxpoll_plowat pstats.ifi_poll_packets_lowat -#define rxpoll_phiwat pstats.ifi_poll_packets_hiwat -#define rxpoll_bavg pstats.ifi_poll_bytes_avg -#define rxpoll_bmin pstats.ifi_poll_bytes_min -#define rxpoll_bmax pstats.ifi_poll_bytes_max -#define rxpoll_blowat pstats.ifi_poll_bytes_lowat -#define rxpoll_bhiwat pstats.ifi_poll_bytes_hiwat -#define rxpoll_plim pstats.ifi_poll_packets_limit -#define rxpoll_ival pstats.ifi_poll_interval_time - struct pktcntr sstats; /* packets and bytes per sampling */ - struct timespec mode_holdtime; /* mode holdtime in nsec */ - struct timespec mode_lasttime; /* last mode change time in nsec */ - struct timespec sample_holdtime; /* sampling holdtime in nsec */ - struct timespec sample_lasttime; /* last sampling time in nsec */ - struct timespec dbg_lasttime; /* last debug message time in nsec */ + ifnet_model_t mode; /* current mode */ + struct pktcntr tstats; /* incremental polling statistics */ + struct if_rxpoll_stats pstats; /* polling statistics */ +#define rxpoll_offreq pstats.ifi_poll_off_req +#define rxpoll_offerr pstats.ifi_poll_off_err +#define rxpoll_onreq pstats.ifi_poll_on_req +#define rxpoll_onerr pstats.ifi_poll_on_err +#define rxpoll_wavg pstats.ifi_poll_wakeups_avg +#define rxpoll_wlowat pstats.ifi_poll_wakeups_lowat +#define rxpoll_whiwat pstats.ifi_poll_wakeups_hiwat +#define rxpoll_pavg pstats.ifi_poll_packets_avg +#define rxpoll_pmin pstats.ifi_poll_packets_min +#define rxpoll_pmax pstats.ifi_poll_packets_max +#define rxpoll_plowat pstats.ifi_poll_packets_lowat +#define rxpoll_phiwat pstats.ifi_poll_packets_hiwat +#define rxpoll_bavg pstats.ifi_poll_bytes_avg +#define rxpoll_bmin pstats.ifi_poll_bytes_min +#define rxpoll_bmax pstats.ifi_poll_bytes_max +#define rxpoll_blowat pstats.ifi_poll_bytes_lowat +#define rxpoll_bhiwat pstats.ifi_poll_bytes_hiwat +#define rxpoll_plim pstats.ifi_poll_packets_limit +#define rxpoll_ival pstats.ifi_poll_interval_time + struct pktcntr sstats; /* packets and bytes per sampling */ + struct timespec mode_holdtime; /* mode holdtime in nsec */ + struct timespec mode_lasttime; /* last mode change time in nsec */ + struct timespec sample_holdtime; /* sampling holdtime in nsec */ + struct timespec sample_lasttime; /* last sampling time in nsec */ + struct timespec dbg_lasttime; /* last debug message time in nsec */ #if IFNET_INPUT_SANITY_CHK /* * For debugging. */ - u_int64_t input_mbuf_cnt; /* total # of packets processed */ + u_int64_t input_mbuf_cnt; /* total # of packets processed */ #endif - thread_call_t input_mit_tcall; /* coalescing input processing */ + thread_call_t input_mit_tcall; /* coalescing input processing */ }; /* * DLIL input thread info (for main/loopback input thread) */ struct dlil_main_threading_info { - struct dlil_threading_info inp; - class_queue_t lo_rcvq_pkts; /* queue of lo0 pkts */ + struct dlil_threading_info inp; + class_queue_t lo_rcvq_pkts; /* queue of lo0 pkts */ }; /* * The following are shared with kpi_protocol.c so that it may wakeup * the input thread to run through packets queued for protocol input. -*/ -#define DLIL_INPUT_RUNNING 0x80000000 -#define DLIL_INPUT_WAITING 0x40000000 -#define DLIL_PROTO_REGISTER 0x20000000 -#define DLIL_PROTO_WAITING 0x10000000 -#define DLIL_INPUT_TERMINATE 0x08000000 -#define DLIL_INPUT_TERMINATE_COMPLETE 0x04000000 + */ +#define DLIL_INPUT_RUNNING 0x80000000 +#define DLIL_INPUT_WAITING 0x40000000 +#define DLIL_PROTO_REGISTER 0x20000000 +#define DLIL_PROTO_WAITING 0x10000000 +#define DLIL_INPUT_TERMINATE 0x08000000 +#define DLIL_INPUT_TERMINATE_COMPLETE 0x04000000 /* * Flags for dlil_attach_filter() */ #define DLIL_IFF_TSO 0x01 /* Interface filter supports TSO */ -#define DLIL_IFF_INTERNAL 0x02 /* Apple internal -- do not count towards stats */ +#define DLIL_IFF_INTERNAL 0x02 /* Apple internal -- do not count towards stats */ extern int dlil_verbose; extern uint32_t hwcksum_dbg; @@ -256,9 +256,9 @@ extern errno_t dlil_send_arp_internal(ifnet_t, u_int16_t, * net_thread_is_unmarked functions to control the bits in the uu_network_marks * field of the uthread structure. */ -#define NET_THREAD_HELD_PF 0x1 /* thread is holding PF lock */ -#define NET_THREAD_HELD_DOMAIN 0x2 /* thread is holding domain_proto_mtx */ -#define NET_THREAD_CKREQ_LLADDR 0x4 /* thread reqs MACF check for LLADDR */ +#define NET_THREAD_HELD_PF 0x1 /* thread is holding PF lock */ +#define NET_THREAD_HELD_DOMAIN 0x2 /* thread is holding domain_proto_mtx */ +#define NET_THREAD_CKREQ_LLADDR 0x4 /* thread reqs MACF check for LLADDR */ /* * net_thread_marks_t is a pointer to a phantom structure type used for @@ -356,7 +356,7 @@ extern const void *dlil_ifaddr_bytes(const struct sockaddr_dl *, size_t *, extern void dlil_report_issues(struct ifnet *, u_int8_t[DLIL_MODIDLEN], u_int8_t[DLIL_MODARGLEN]); -#define PROTO_HASH_SLOTS 4 +#define PROTO_HASH_SLOTS 4 extern int proto_hash_value(u_int32_t); @@ -381,8 +381,9 @@ __attribute__((always_inline)) static inline void ifp_inc_traffic_class_in(struct ifnet *ifp, struct mbuf *m) { - if (!(m->m_flags & M_PKTHDR)) + if (!(m->m_flags & M_PKTHDR)) { return; + } switch (m_get_traffic_class(m)) { case MBUF_TC_BE: @@ -420,8 +421,9 @@ __attribute__((always_inline)) static inline void ifp_inc_traffic_class_out(struct ifnet *ifp, struct mbuf *m) { - if (!(m->m_flags & M_PKTHDR)) + if (!(m->m_flags & M_PKTHDR)) { return; + } switch (m_get_traffic_class(m)) { case MBUF_TC_BE: diff --git a/bsd/net/ether_if_module.c b/bsd/net/ether_if_module.c index 3335bb6c4..35acf1064 100644 --- a/bsd/net/ether_if_module.c +++ b/bsd/net/ether_if_module.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -70,7 +70,7 @@ #include -#define etherbroadcastaddr fugly +#define etherbroadcastaddr fugly #include #include #include @@ -79,20 +79,20 @@ #include #include #include -#include /* For M_LOOP */ +#include /* For M_LOOP */ #include #include #undef etherbroadcastaddr /* -#if INET -#include -#include - -#include -#include -#endif -*/ + #if INET + #include + #include + * + #include + #include + #endif + */ #include #include #include @@ -109,13 +109,13 @@ #include SYSCTL_DECL(_net_link); -SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW|CTLFLAG_LOCKED, 0, +SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Ethernet"); struct en_desc { - u_int16_t type; /* Type of protocol stored in data */ - u_int32_t protocol_family; /* Protocol family */ - u_int32_t data[2]; /* Protocol data */ + u_int16_t type; /* Type of protocol stored in data */ + u_int32_t protocol_family; /* Protocol family */ + u_int32_t data[2]; /* Protocol data */ }; /* descriptors are allocated in blocks of ETHER_DESC_BLK_SIZE */ @@ -130,17 +130,17 @@ struct en_desc { */ struct ether_desc_blk_str { u_int32_t n_max_used; - u_int32_t n_count; - u_int32_t n_used; + u_int32_t n_count; + u_int32_t n_used; struct en_desc block_ptr[1]; }; /* Size of the above struct before the array of struct en_desc */ -#define ETHER_DESC_HEADER_SIZE \ +#define ETHER_DESC_HEADER_SIZE \ ((size_t) offsetof(struct ether_desc_blk_str, block_ptr)) __private_extern__ u_char etherbroadcastaddr[ETHER_ADDR_LEN] = - { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* * Release all descriptor entries owned by this protocol (there may be several). @@ -155,8 +155,9 @@ ether_del_proto(ifnet_t ifp, protocol_family_t protocol_family) u_int32_t current = 0; int found = 0; - if (desc_blk == NULL) - return (0); + if (desc_blk == NULL) { + return 0; + } for (current = desc_blk->n_max_used; current > 0; current--) { if (desc_blk->block_ptr[current - 1].protocol_family == @@ -174,11 +175,12 @@ ether_del_proto(ifnet_t ifp, protocol_family_t protocol_family) /* Decrement n_max_used */ for (; desc_blk->n_max_used > 0 && desc_blk->block_ptr[desc_blk->n_max_used - 1].type == 0; - desc_blk->n_max_used--) + desc_blk->n_max_used--) { ; + } } - return (0); + return 0; } static int @@ -194,22 +196,25 @@ ether_add_proto_internal(struct ifnet *ifp, protocol_family_t protocol, /* These types are supported */ /* Top three are preferred */ case DLIL_DESC_ETYPE2: - if (demux->datalen != 2) - return (EINVAL); + if (demux->datalen != 2) { + return EINVAL; + } break; case DLIL_DESC_SAP: - if (demux->datalen != 3) - return (EINVAL); + if (demux->datalen != 3) { + return EINVAL; + } break; case DLIL_DESC_SNAP: - if (demux->datalen != 5) - return (EINVAL); + if (demux->datalen != 5) { + return EINVAL; + } break; default: - return (ENOTSUP); + return ENOTSUP; } /* Verify a matching descriptor does not exist */ @@ -221,7 +226,7 @@ ether_add_proto_internal(struct ifnet *ifp, protocol_family_t protocol, DLIL_DESC_ETYPE2 && desc_blk->block_ptr[i].data[0] == *(u_int16_t*)demux->data) { - return (EADDRINUSE); + return EADDRINUSE; } } break; @@ -232,7 +237,7 @@ ether_add_proto_internal(struct ifnet *ifp, protocol_family_t protocol, demux->type && bcmp(desc_blk->block_ptr[i].data, demux->data, demux->datalen) == 0) { - return (EADDRINUSE); + return EADDRINUSE; } } break; @@ -250,20 +255,20 @@ ether_add_proto_internal(struct ifnet *ifp, protocol_family_t protocol, if (desc_blk) { new_count += desc_blk->n_count; - old_size = desc_blk->n_count * sizeof (struct en_desc) + + old_size = desc_blk->n_count * sizeof(struct en_desc) + ETHER_DESC_HEADER_SIZE; i = desc_blk->n_used; } - new_size = new_count * sizeof (struct en_desc) + + new_size = new_count * sizeof(struct en_desc) + ETHER_DESC_HEADER_SIZE; tmp = _MALLOC(new_size, M_IFADDR, M_WAITOK); - if (tmp == NULL) { + if (tmp == NULL) { /* * Remove any previous descriptors set in the call. */ - return (ENOMEM); + return ENOMEM; } bzero(((char *)tmp) + old_size, new_size - old_size); @@ -307,7 +312,7 @@ ether_add_proto_internal(struct ifnet *ifp, protocol_family_t protocol, break; case DLIL_DESC_SNAP: { - u_int8_t* pDest = ((u_int8_t*)&ed->data[0]) + 3; + u_int8_t* pDest = ((u_int8_t*)&ed->data[0]) + 3; ed->type = DLIL_DESC_SNAP; bcopy(demux->data, pDest, 5); break; @@ -316,11 +321,11 @@ ether_add_proto_internal(struct ifnet *ifp, protocol_family_t protocol, desc_blk->n_used++; - return (0); + return 0; } int -ether_add_proto(ifnet_t ifp, protocol_family_t protocol, +ether_add_proto(ifnet_t ifp, protocol_family_t protocol, const struct ifnet_demux_desc *demux_list, u_int32_t demux_count) { int error = 0; @@ -334,7 +339,7 @@ ether_add_proto(ifnet_t ifp, protocol_family_t protocol, } } - return (error); + return error; } int @@ -342,39 +347,40 @@ ether_demux(ifnet_t ifp, mbuf_t m, char *frame_header, protocol_family_t *protocol_family) { struct ether_header *eh = (struct ether_header *)(void *)frame_header; - u_short ether_type = eh->ether_type; + u_short ether_type = eh->ether_type; u_int16_t type; u_int8_t *data; u_int32_t i = 0; struct ether_desc_blk_str *desc_blk = (struct ether_desc_blk_str *)ifp->if_family_cookie; u_int32_t maxd = desc_blk ? desc_blk->n_max_used : 0; - struct en_desc *ed = desc_blk ? desc_blk->block_ptr : NULL; + struct en_desc *ed = desc_blk ? desc_blk->block_ptr : NULL; u_int32_t extProto1 = 0; u_int32_t extProto2 = 0; if (eh->ether_dhost[0] & 1) { /* Check for broadcast */ - if (_ether_cmp(etherbroadcastaddr, eh->ether_dhost) == 0) + if (_ether_cmp(etherbroadcastaddr, eh->ether_dhost) == 0) { m->m_flags |= M_BCAST; - else + } else { m->m_flags |= M_MCAST; + } } if (m->m_flags & M_HASFCS) { - /* - * If the M_HASFCS is set by the driver we want to make sure - * that we strip off the trailing FCS data before handing it - * up the stack. - */ - m_adj(m, -ETHER_CRC_LEN); - m->m_flags &= ~M_HASFCS; - } + /* + * If the M_HASFCS is set by the driver we want to make sure + * that we strip off the trailing FCS data before handing it + * up the stack. + */ + m_adj(m, -ETHER_CRC_LEN); + m->m_flags &= ~M_HASFCS; + } if (ifp->if_eflags & IFEF_BOND) { /* if we're bonded, bond "protocol" gets all the packets */ *protocol_family = PF_BOND; - return (0); + return 0; } if ((eh->ether_dhost[0] & 1) == 0) { @@ -394,19 +400,19 @@ ether_demux(ifnet_t ifp, mbuf_t m, char *frame_header, if ((m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) != 0) { if (EVL_VLANOFTAG(m->m_pkthdr.vlan_tag) != 0) { *protocol_family = PF_VLAN; - return (0); + return 0; } /* the packet is just priority-tagged, clear the bit */ m->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID; } else if (ether_type == htons(ETHERTYPE_VLAN)) { - struct ether_vlan_header * evl; + struct ether_vlan_header * evl; evl = (struct ether_vlan_header *)(void *)frame_header; if (m->m_len < ETHER_VLAN_ENCAP_LEN || ntohs(evl->evl_proto) == ETHERTYPE_VLAN || EVL_VLANOFTAG(ntohs(evl->evl_tag)) != 0) { *protocol_family = PF_VLAN; - return (0); + return 0; } /* the packet is just priority-tagged */ @@ -418,24 +424,24 @@ ether_demux(ifnet_t ifp, mbuf_t m, char *frame_header, m->m_data += ETHER_VLAN_ENCAP_LEN; m->m_pkthdr.len -= ETHER_VLAN_ENCAP_LEN; m->m_pkthdr.csum_flags = 0; /* can't trust hardware checksum */ - } else if (ether_type == htons(ETHERTYPE_ARP)) + } else if (ether_type == htons(ETHERTYPE_ARP)) { m->m_pkthdr.pkt_flags |= PKTF_INET_RESOLVE; /* ARP packet */ - + } data = mtod(m, u_int8_t*); /* - * Determine the packet's protocol type and stuff the protocol into - * longs for quick compares. - */ + * Determine the packet's protocol type and stuff the protocol into + * longs for quick compares. + */ if (ntohs(ether_type) <= 1500) { - bcopy(data, &extProto1, sizeof (u_int32_t)); + bcopy(data, &extProto1, sizeof(u_int32_t)); /* SAP or SNAP */ if ((extProto1 & htonl(0xFFFFFF00)) == htonl(0xAAAA0300)) { /* SNAP */ type = DLIL_DESC_SNAP; - bcopy(data + sizeof (u_int32_t), &extProto2, - sizeof (u_int32_t)); + bcopy(data + sizeof(u_int32_t), &extProto2, + sizeof(u_int32_t)); extProto1 &= htonl(0x000000FF); } else { type = DLIL_DESC_SAP; @@ -446,15 +452,15 @@ ether_demux(ifnet_t ifp, mbuf_t m, char *frame_header, } /* - * Search through the connected protocols for a match. - */ + * Search through the connected protocols for a match. + */ switch (type) { case DLIL_DESC_ETYPE2: for (i = 0; i < maxd; i++) { if ((ed[i].type == type) && (ed[i].data[0] == ether_type)) { *protocol_family = ed[i].protocol_family; - return (0); + return 0; } } break; @@ -464,7 +470,7 @@ ether_demux(ifnet_t ifp, mbuf_t m, char *frame_header, if ((ed[i].type == type) && (ed[i].data[0] == extProto1)) { *protocol_family = ed[i].protocol_family; - return (0); + return 0; } } break; @@ -473,15 +479,15 @@ ether_demux(ifnet_t ifp, mbuf_t m, char *frame_header, for (i = 0; i < maxd; i++) { if ((ed[i].type == type) && (ed[i].data[0] == extProto1) && - (ed[i].data[1] == extProto2)) { + (ed[i].data[1] == extProto2)) { *protocol_family = ed[i].protocol_family; - return (0); + return 0; } } - break; + break; } - return (ENOENT); + return ENOENT; } /* @@ -503,11 +509,11 @@ ether_frameout(struct ifnet *ifp, struct mbuf **m, #endif /* KPI_INTERFACE_EMBEDDED */ { #if KPI_INTERFACE_EMBEDDED - return (ether_frameout_extended(ifp, m, ndest, edst, ether_type, - prepend_len, postpend_len)); + return ether_frameout_extended(ifp, m, ndest, edst, ether_type, + prepend_len, postpend_len); #else /* !KPI_INTERFACE_EMBEDDED */ - return (ether_frameout_extended(ifp, m, ndest, edst, ether_type, - NULL, NULL)); + return ether_frameout_extended(ifp, m, ndest, edst, ether_type, + NULL, NULL); #endif /* !KPI_INTERFACE_EMBEDDED */ } @@ -523,7 +529,7 @@ ether_frameout_extended(struct ifnet *ifp, struct mbuf **m, const char *ether_type, u_int32_t *prepend_len, u_int32_t *postpend_len) { struct ether_header *eh; - int hlen; /* link layer header length */ + int hlen; /* link layer header length */ hlen = ETHER_HDR_LEN; @@ -547,7 +553,7 @@ ether_frameout_extended(struct ifnet *ifp, struct mbuf **m, } else if (_ether_cmp(edst, IF_LLADDR(ifp)) == 0) { dlil_output(lo_ifp, ndest->sa_family, *m, NULL, ndest, 0, NULL); - return (EJUSTRETURN); + return EJUSTRETURN; } } @@ -555,67 +561,72 @@ ether_frameout_extended(struct ifnet *ifp, struct mbuf **m, * Add local net header. If no space in first mbuf, * allocate another. */ - M_PREPEND(*m, sizeof (struct ether_header), M_DONTWAIT, 0); - if (*m == NULL) - return (EJUSTRETURN); + M_PREPEND(*m, sizeof(struct ether_header), M_DONTWAIT, 0); + if (*m == NULL) { + return EJUSTRETURN; + } - if (prepend_len != NULL) - *prepend_len = sizeof (struct ether_header); - if (postpend_len != NULL) + if (prepend_len != NULL) { + *prepend_len = sizeof(struct ether_header); + } + if (postpend_len != NULL) { *postpend_len = 0; + } eh = mtod(*m, struct ether_header *); (void) memcpy(&eh->ether_type, ether_type, sizeof(eh->ether_type)); (void) memcpy(eh->ether_dhost, edst, ETHER_ADDR_LEN); ifnet_lladdr_copy_bytes(ifp, eh->ether_shost, ETHER_ADDR_LEN); - return (0); + return 0; } errno_t ether_check_multi(ifnet_t ifp, const struct sockaddr *proto_addr) { #pragma unused(ifp) - errno_t result = EAFNOSUPPORT; + errno_t result = EAFNOSUPPORT; const u_char *e_addr; /* * AF_SPEC and AF_LINK don't require translation. We do * want to verify that they specify a valid multicast. */ - switch(proto_addr->sa_family) { + switch (proto_addr->sa_family) { case AF_UNSPEC: e_addr = (const u_char*)&proto_addr->sa_data[0]; - if ((e_addr[0] & 0x01) != 0x01) + if ((e_addr[0] & 0x01) != 0x01) { result = EADDRNOTAVAIL; - else + } else { result = 0; + } break; case AF_LINK: e_addr = CONST_LLADDR((const struct sockaddr_dl*) (uintptr_t)(size_t)proto_addr); - if ((e_addr[0] & 0x01) != 0x01) + if ((e_addr[0] & 0x01) != 0x01) { result = EADDRNOTAVAIL; - else + } else { result = 0; + } break; } - return (result); + return result; } int ether_ioctl(ifnet_t ifp, u_int32_t command, void *data) { #pragma unused(ifp, command, data) - return (EOPNOTSUPP); + return EOPNOTSUPP; } __private_extern__ int ether_family_init(void) { - errno_t error = 0; + errno_t error = 0; /* Register protocol registration functions */ if ((error = proto_register_plumber(PF_INET, APPLE_IF_FAM_ETHERNET, @@ -647,5 +658,5 @@ ether_family_init(void) done: - return (error); + return error; } diff --git a/bsd/net/ether_if_module.h b/bsd/net/ether_if_module.h index 9c1a58281..82beafab5 100644 --- a/bsd/net/ether_if_module.h +++ b/bsd/net/ether_if_module.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/net/ether_inet6_pr_module.c b/bsd/net/ether_inet6_pr_module.c index ee014a3dd..816995437 100644 --- a/bsd/net/ether_inet6_pr_module.c +++ b/bsd/net/ether_inet6_pr_module.c @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -99,7 +99,7 @@ #include static const u_char etherip6allnodes[ETHER_ADDR_LEN] = - { 0x33, 0x33, 0, 0, 0, 1 }; +{ 0x33, 0x33, 0, 0, 0, 1 }; /* * Process a received Ethernet packet; @@ -114,7 +114,7 @@ ether_inet6_input(ifnet_t ifp, protocol_family_t protocol, struct ether_header *eh = (struct ether_header *)(void *)header; u_int16_t ether_type; - bcopy(&eh->ether_type, ðer_type, sizeof (ether_type)); + bcopy(&eh->ether_type, ðer_type, sizeof(ether_type)); if (ether_type == htons(ETHERTYPE_IPV6)) { struct ifnet *mifp; @@ -146,13 +146,14 @@ ether_inet6_input(ifnet_t ifp, protocol_family_t protocol, } } - if (proto_input(protocol, packet) != 0) + if (proto_input(protocol, packet) != 0) { m_freem(packet); + } } else { m_freem(packet); } - return (EJUSTRETURN); + return EJUSTRETURN; } static errno_t @@ -161,7 +162,7 @@ ether_inet6_pre_output(ifnet_t ifp, protocol_family_t protocol_family, char *type, char *edst) { #pragma unused(protocol_family) - errno_t result; + errno_t result; struct sockaddr_dl sdl; struct mbuf *m = *m0; @@ -171,16 +172,16 @@ ether_inet6_pre_output(ifnet_t ifp, protocol_family_t protocol_family, m->m_flags |= M_LOOP; result = nd6_lookup_ipv6(ifp, (const struct sockaddr_in6 *) - (uintptr_t)(size_t)dst_netaddr, &sdl, sizeof (sdl), route, *m0); + (uintptr_t)(size_t)dst_netaddr, &sdl, sizeof(sdl), route, *m0); if (result == 0) { u_int16_t ethertype_ipv6 = htons(ETHERTYPE_IPV6); - bcopy(ðertype_ipv6, type, sizeof (ethertype_ipv6)); + bcopy(ðertype_ipv6, type, sizeof(ethertype_ipv6)); bcopy(LLADDR(&sdl), edst, sdl.sdl_alen); } - return (result); + return result; } static int @@ -192,14 +193,17 @@ ether_inet6_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr, const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)(uintptr_t)(size_t)proto_addr; - if (proto_addr->sa_family != AF_INET6) - return (EAFNOSUPPORT); + if (proto_addr->sa_family != AF_INET6) { + return EAFNOSUPPORT; + } - if (proto_addr->sa_len < sizeof (struct sockaddr_in6)) - return (EINVAL); + if (proto_addr->sa_len < sizeof(struct sockaddr_in6)) { + return EINVAL; + } - if (ll_len < minsize) - return (EMSGSIZE); + if (ll_len < minsize) { + return EMSGSIZE; + } bzero(out_ll, minsize); out_ll->sdl_len = minsize; @@ -211,7 +215,7 @@ ether_inet6_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr, out_ll->sdl_slen = 0; ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, LLADDR(out_ll)); - return (0); + return 0; } static errno_t @@ -222,7 +226,7 @@ ether_inet6_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, int error = 0; switch (command) { - case SIOCSIFADDR: /* struct ifaddr pointer */ + case SIOCSIFADDR: /* struct ifaddr pointer */ /* * Note: caller of ifnet_ioctl() passes in pointer to * struct ifaddr as parameter to SIOCSIFADDR, for legacy @@ -234,7 +238,7 @@ ether_inet6_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, } break; - case SIOCGIFADDR: { /* struct ifreq */ + case SIOCGIFADDR: { /* struct ifreq */ struct ifreq *ifr = (struct ifreq *)(void *)data; (void) ifnet_guarded_lladdr_copy_bytes(ifp, ifr->ifr_addr.sa_data, ETHER_ADDR_LEN); @@ -245,22 +249,22 @@ ether_inet6_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, error = EOPNOTSUPP; break; } - return (error); + return error; } errno_t ether_attach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) { #pragma unused(protocol_family) - struct ifnet_attach_proto_param proto; + struct ifnet_attach_proto_param proto; struct ifnet_demux_desc demux[1]; u_short en_6native = htons(ETHERTYPE_IPV6); - errno_t error; + errno_t error; - bzero(&proto, sizeof (proto)); + bzero(&proto, sizeof(proto)); demux[0].type = DLIL_DESC_ETYPE2; demux[0].data = &en_6native; - demux[0].datalen = sizeof (en_6native); + demux[0].datalen = sizeof(en_6native); proto.demux_list = demux; proto.demux_count = 1; proto.input = ether_inet6_input; @@ -273,7 +277,7 @@ ether_attach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) if_name(ifp)); } - return (error); + return error; } void diff --git a/bsd/net/ether_inet_pr_module.c b/bsd/net/ether_inet_pr_module.c index b47c9dfba..436c8fc7b 100644 --- a/bsd/net/ether_inet_pr_module.c +++ b/bsd/net/ether_inet_pr_module.c @@ -107,26 +107,27 @@ extern void *kdp_get_interface(void); extern void kdp_set_ip_and_mac_addresses(struct in_addr *ipaddr, struct ether_addr *macaddr); -#define _ip_copy(dst, src) \ +#define _ip_copy(dst, src) \ bcopy(src, dst, sizeof (struct in_addr)) static void ether_inet_arp_input(struct ifnet *ifp, struct mbuf *m) { struct ether_arp *ea; - struct sockaddr_dl sender_hw; - struct sockaddr_in sender_ip; - struct sockaddr_in target_ip; + struct sockaddr_dl sender_hw; + struct sockaddr_in sender_ip; + struct sockaddr_in target_ip; - if (mbuf_len(m) < sizeof (*ea) && mbuf_pullup(&m, sizeof (*ea)) != 0) + if (mbuf_len(m) < sizeof(*ea) && mbuf_pullup(&m, sizeof(*ea)) != 0) { return; + } ea = mbuf_data(m); /* Verify this is an ethernet/ip arp and address lengths are correct */ if (ntohs(ea->arp_hrd) != ARPHRD_ETHER || ntohs(ea->arp_pro) != ETHERTYPE_IP || - ea->arp_pln != sizeof (struct in_addr) || + ea->arp_pln != sizeof(struct in_addr) || ea->arp_hln != ETHER_ADDR_LEN) { mbuf_freem(m); return; @@ -138,15 +139,15 @@ ether_inet_arp_input(struct ifnet *ifp, struct mbuf *m) return; } - bzero(&sender_ip, sizeof (sender_ip)); - sender_ip.sin_len = sizeof (sender_ip); + bzero(&sender_ip, sizeof(sender_ip)); + sender_ip.sin_len = sizeof(sender_ip); sender_ip.sin_family = AF_INET; _ip_copy(&sender_ip.sin_addr, ea->arp_spa); target_ip = sender_ip; _ip_copy(&target_ip.sin_addr, ea->arp_tpa); - bzero(&sender_hw, sizeof (sender_hw)); - sender_hw.sdl_len = sizeof (sender_hw); + bzero(&sender_hw, sizeof(sender_hw)); + sender_hw.sdl_len = sizeof(sender_hw); sender_hw.sdl_family = AF_LINK; sender_hw.sdl_type = IFT_ETHER; sender_hw.sdl_alen = ETHER_ADDR_LEN; @@ -166,13 +167,13 @@ ether_inet_arp_input(struct ifnet *ifp, struct mbuf *m) * the ether header, which is provided separately. */ static errno_t -ether_inet_input(ifnet_t ifp, protocol_family_t protocol_family, +ether_inet_input(ifnet_t ifp, protocol_family_t protocol_family, mbuf_t m_list) { #pragma unused(ifp, protocol_family) - mbuf_t m; - mbuf_t *tailptr = &m_list; - mbuf_t nextpkt; + mbuf_t m; + mbuf_t *tailptr = &m_list; + mbuf_t nextpkt; /* Strip ARP and non-IP packets out of the list */ for (m = m_list; m; m = nextpkt) { @@ -206,10 +207,11 @@ ether_inet_input(ifnet_t ifp, protocol_family_t protocol_family, } else { /* Pass ARP packets to arp input */ m->m_nextpkt = NULL; - if (eh->ether_type == htons(ETHERTYPE_ARP)) + if (eh->ether_type == htons(ETHERTYPE_ARP)) { ether_inet_arp_input(mifp, m); - else + } else { mbuf_freem(m); + } } } @@ -220,7 +222,7 @@ ether_inet_input(ifnet_t ifp, protocol_family_t protocol_family, mbuf_freem_list(m_list); } - return (EJUSTRETURN); + return EJUSTRETURN; } static errno_t @@ -233,8 +235,9 @@ ether_inet_pre_output(ifnet_t ifp, protocol_family_t protocol_family, const struct ether_header *eh; errno_t result = 0; - if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) - return (ENETDOWN); + if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) { + return ENETDOWN; + } /* * Tell ether_frameout it's ok to loop packet unless negated below. @@ -247,12 +250,12 @@ ether_inet_pre_output(ifnet_t ifp, protocol_family_t protocol_family, result = arp_lookup_ip(ifp, (const struct sockaddr_in *)(uintptr_t)(size_t)dst_netaddr, - &ll_dest, sizeof (ll_dest), (route_t)route, *m0); + &ll_dest, sizeof(ll_dest), (route_t)route, *m0); if (result == 0) { u_int16_t ethertype_ip = htons(ETHERTYPE_IP); bcopy(LLADDR(&ll_dest), edst, ETHER_ADDR_LEN); - bcopy(ðertype_ip, type, sizeof (ethertype_ip)); + bcopy(ðertype_ip, type, sizeof(ethertype_ip)); } break; } @@ -263,7 +266,7 @@ ether_inet_pre_output(ifnet_t ifp, protocol_family_t protocol_family, eh = (const struct ether_header *)(uintptr_t)(size_t) dst_netaddr->sa_data; (void) memcpy(edst, eh->ether_dhost, 6); - bcopy(&eh->ether_type, type, sizeof (u_short)); + bcopy(&eh->ether_type, type, sizeof(u_short)); break; default: @@ -274,7 +277,7 @@ ether_inet_pre_output(ifnet_t ifp, protocol_family_t protocol_family, break; } - return (result); + return result; } static errno_t @@ -286,14 +289,17 @@ ether_inet_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr, const struct sockaddr_in *sin = (const struct sockaddr_in *)(uintptr_t)(size_t)proto_addr; - if (proto_addr->sa_family != AF_INET) - return (EAFNOSUPPORT); + if (proto_addr->sa_family != AF_INET) { + return EAFNOSUPPORT; + } - if (proto_addr->sa_len < sizeof (struct sockaddr_in)) - return (EINVAL); + if (proto_addr->sa_len < sizeof(struct sockaddr_in)) { + return EINVAL; + } - if (ll_len < minsize) - return (EMSGSIZE); + if (ll_len < minsize) { + return EMSGSIZE; + } bzero(out_ll, minsize); out_ll->sdl_len = minsize; @@ -305,7 +311,7 @@ ether_inet_resolve_multi(ifnet_t ifp, const struct sockaddr *proto_addr, out_ll->sdl_slen = 0; ETHER_MAP_IP_MULTICAST(&sin->sin_addr, LLADDR(out_ll)); - return (0); + return 0; } static errno_t @@ -316,8 +322,8 @@ ether_inet_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, int error = 0; switch (command) { - case SIOCSIFADDR: /* struct ifaddr pointer */ - case SIOCAIFADDR: { /* struct ifaddr pointer */ + case SIOCSIFADDR: /* struct ifaddr pointer */ + case SIOCAIFADDR: { /* struct ifaddr pointer */ /* * Note: caller of ifnet_ioctl() passes in pointer to * struct ifaddr as parameter to SIOC{A,S}IFADDR, for @@ -330,13 +336,15 @@ ether_inet_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, ifnet_ioctl(ifp, 0, SIOCSIFFLAGS, NULL); } - if (ifaddr_address_family(ifa) != AF_INET) + if (ifaddr_address_family(ifa) != AF_INET) { break; + } inet_arp_init_ifaddr(ifp, ifa); - if (command != SIOCSIFADDR) + if (command != SIOCSIFADDR) { break; + } /* * Register new IP and MAC addresses with the kernel @@ -348,13 +356,14 @@ ether_inet_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, */ if ((kdp_get_interface() != 0 && kdp_get_interface() == ifp->if_softc) || - (kdp_get_interface() == 0 && ifp->if_unit == 0)) + (kdp_get_interface() == 0 && ifp->if_unit == 0)) { kdp_set_ip_and_mac_addresses(&(IA_SIN(ifa)->sin_addr), (struct ether_addr *)IF_LLADDR(ifp)); + } break; } - case SIOCGIFADDR: { /* struct ifreq */ + case SIOCGIFADDR: { /* struct ifreq */ struct ifreq *ifr = data; ifnet_guarded_lladdr_copy_bytes(ifp, ifr->ifr_addr.sa_data, ETHER_ADDR_LEN); @@ -366,7 +375,7 @@ ether_inet_prmod_ioctl(ifnet_t ifp, protocol_family_t protocol_family, break; } - return (error); + return error; } static void @@ -376,7 +385,7 @@ ether_inet_event(ifnet_t ifp, protocol_family_t protocol, #pragma unused(protocol) ifaddr_t *addresses; - if (event->vendor_code != KEV_VENDOR_APPLE || + if (event->vendor_code != KEV_VENDOR_APPLE || event->kev_class != KEV_NETWORK_CLASS || event->kev_subclass != KEV_DL_SUBCLASS || event->event_code != KEV_DL_LINK_ADDRESS_CHANGED) { @@ -399,8 +408,8 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, const struct sockaddr *sender_proto, const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto) { - mbuf_t m; - errno_t result; + mbuf_t m; + errno_t result; struct ether_header *eh; struct ether_arp *ea; const struct sockaddr_in *sender_ip = @@ -409,34 +418,38 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, (const struct sockaddr_inarp *)(uintptr_t)(size_t)target_proto; char *datap; - if (target_ip == NULL) - return (EINVAL); + if (target_ip == NULL) { + return EINVAL; + } if ((sender_ip && sender_ip->sin_family != AF_INET) || - target_ip->sin_family != AF_INET) - return (EAFNOSUPPORT); + target_ip->sin_family != AF_INET) { + return EAFNOSUPPORT; + } result = mbuf_gethdr(MBUF_DONTWAIT, MBUF_TYPE_DATA, &m); - if (result != 0) - return (result); + if (result != 0) { + return result; + } - mbuf_setlen(m, sizeof (*ea)); - mbuf_pkthdr_setlen(m, sizeof (*ea)); + mbuf_setlen(m, sizeof(*ea)); + mbuf_pkthdr_setlen(m, sizeof(*ea)); /* Move the data pointer in the mbuf to the end, aligned to 4 bytes */ datap = mbuf_datastart(m); datap += mbuf_trailingspace(m); datap -= (((uintptr_t)datap) & 0x3); - mbuf_setdata(m, datap, sizeof (*ea)); + mbuf_setdata(m, datap, sizeof(*ea)); ea = mbuf_data(m); /* * Prepend the ethernet header, we will send the raw frame; * callee frees the original mbuf when allocation fails. */ - result = mbuf_prepend(&m, sizeof (*eh), MBUF_DONTWAIT); - if (result != 0) - return (result); + result = mbuf_prepend(&m, sizeof(*eh), MBUF_DONTWAIT); + if (result != 0) { + return result; + } eh = mbuf_data(m); eh->ether_type = htons(ETHERTYPE_ARP); @@ -447,23 +460,23 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, /* Fill out the arp header */ ea->arp_pro = htons(ETHERTYPE_IP); - ea->arp_hln = sizeof (ea->arp_sha); - ea->arp_pln = sizeof (ea->arp_spa); + ea->arp_hln = sizeof(ea->arp_sha); + ea->arp_pln = sizeof(ea->arp_spa); ea->arp_hrd = htons(ARPHRD_ETHER); ea->arp_op = htons(arpop); /* Sender Hardware */ if (sender_hw != NULL) { bcopy(CONST_LLADDR(sender_hw), ea->arp_sha, - sizeof (ea->arp_sha)); + sizeof(ea->arp_sha)); } else { ifnet_lladdr_copy_bytes(ifp, ea->arp_sha, ETHER_ADDR_LEN); } - ifnet_lladdr_copy_bytes(ifp, eh->ether_shost, sizeof (eh->ether_shost)); + ifnet_lladdr_copy_bytes(ifp, eh->ether_shost, sizeof(eh->ether_shost)); /* Sender IP */ if (sender_ip != NULL) { - bcopy(&sender_ip->sin_addr, ea->arp_spa, sizeof (ea->arp_spa)); + bcopy(&sender_ip->sin_addr, ea->arp_spa, sizeof(ea->arp_spa)); } else { struct ifaddr *ifa; @@ -475,7 +488,7 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, ifa->ifa_addr->sa_family == AF_INET) { bcopy(&((struct sockaddr_in *)(void *) ifa->ifa_addr)->sin_addr, ea->arp_spa, - sizeof (ea->arp_spa)); + sizeof(ea->arp_spa)); IFA_UNLOCK(ifa); break; } @@ -485,29 +498,30 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, if (ifa == NULL) { mbuf_freem(m); - return (ENXIO); + return ENXIO; } } /* Target Hardware */ if (target_hw == NULL) { - bzero(ea->arp_tha, sizeof (ea->arp_tha)); + bzero(ea->arp_tha, sizeof(ea->arp_tha)); bcopy(etherbroadcastaddr, eh->ether_dhost, - sizeof (eh->ether_dhost)); + sizeof(eh->ether_dhost)); m->m_flags |= M_BCAST; } else { bcopy(CONST_LLADDR(target_hw), ea->arp_tha, - sizeof (ea->arp_tha)); + sizeof(ea->arp_tha)); bcopy(CONST_LLADDR(target_hw), eh->ether_dhost, - sizeof (eh->ether_dhost)); + sizeof(eh->ether_dhost)); if (bcmp(eh->ether_dhost, etherbroadcastaddr, - ETHER_ADDR_LEN) == 0) + ETHER_ADDR_LEN) == 0) { m->m_flags |= M_BCAST; + } } /* Target IP */ - bcopy(&target_ip->sin_addr, ea->arp_tpa, sizeof (ea->arp_tpa)); + bcopy(&target_ip->sin_addr, ea->arp_tpa, sizeof(ea->arp_tpa)); /* * PKTF_{INET,INET6}_RESOLVE_RTR are mutually exclusive, so make @@ -520,8 +534,9 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, * the packet accordingly so that the driver can find out, * in case it needs to perform driver-specific action(s). */ - if (arpop == ARPOP_REQUEST && (target_ip->sin_other & SIN_ROUTER)) + if (arpop == ARPOP_REQUEST && (target_ip->sin_other & SIN_ROUTER)) { m->m_pkthdr.pkt_flags |= PKTF_RESOLVE_RTR; + } if (ifp->if_eflags & IFEF_TXSTART) { /* @@ -533,7 +548,7 @@ ether_inet_arp(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, ifnet_output_raw(ifp, PF_INET, m); - return (0); + return 0; } errno_t @@ -544,19 +559,19 @@ ether_attach_inet(struct ifnet *ifp, protocol_family_t proto_family) struct ifnet_demux_desc demux[2]; u_short en_native = htons(ETHERTYPE_IP); u_short arp_native = htons(ETHERTYPE_ARP); - errno_t error; + errno_t error; - bzero(&demux[0], sizeof (demux)); + bzero(&demux[0], sizeof(demux)); demux[0].type = DLIL_DESC_ETYPE2; demux[0].data = &en_native; - demux[0].datalen = sizeof (en_native); + demux[0].datalen = sizeof(en_native); demux[1].type = DLIL_DESC_ETYPE2; demux[1].data = &arp_native; - demux[1].datalen = sizeof (arp_native); + demux[1].datalen = sizeof(arp_native); - bzero(&proto, sizeof (proto)); + bzero(&proto, sizeof(proto)); proto.demux_list = demux; - proto.demux_count = sizeof (demux) / sizeof (demux[0]); + proto.demux_count = sizeof(demux) / sizeof(demux[0]); proto.input = ether_inet_input; proto.pre_output = ether_inet_pre_output; proto.ioctl = ether_inet_prmod_ioctl; @@ -569,7 +584,7 @@ ether_attach_inet(struct ifnet *ifp, protocol_family_t proto_family) printf("WARNING: %s can't attach ip to %s\n", __func__, if_name(ifp)); } - return (error); + return error; } void diff --git a/bsd/net/etherdefs.h b/bsd/net/etherdefs.h index 82ac3713f..df602c958 100644 --- a/bsd/net/etherdefs.h +++ b/bsd/net/etherdefs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -58,12 +58,12 @@ #include #warning net/etherdefs.h is obsolete! Use net/ethernet.h -#include +#include /* * Ethernet address - 6 octets */ -#define NUM_EN_ADDR_BYTES ETHER_ADDR_LEN +#define NUM_EN_ADDR_BYTES ETHER_ADDR_LEN typedef struct ether_addr enet_addr_t; @@ -72,16 +72,16 @@ typedef struct ether_header ether_header_t; #define IFTYPE_ETHERNET "10MB Ethernet" -#define ETHERHDRSIZE ETHER_HDR_LEN -#define ETHERMAXPACKET ETHER_MAX_LEN -#define ETHERMINPACKET ETHER_MIN_LEN -#define ETHERCRC ETHER_CRC_LEN +#define ETHERHDRSIZE ETHER_HDR_LEN +#define ETHERMAXPACKET ETHER_MAX_LEN +#define ETHERMINPACKET ETHER_MIN_LEN +#define ETHERCRC ETHER_CRC_LEN /* * Byte and bit in an enet_addr_t defining individual/group destination. */ -#define EA_GROUP_BYTE 0 -#define EA_GROUP_BIT 0x01 +#define EA_GROUP_BYTE 0 +#define EA_GROUP_BIT 0x01 #endif /* KERNEL && !__APPLE_API_OBSOLETE */ diff --git a/bsd/net/ethernet.h b/bsd/net/ethernet.h index 23719456b..9dda79efb 100644 --- a/bsd/net/ethernet.h +++ b/bsd/net/ethernet.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,37 +33,37 @@ #ifndef _NET_ETHERNET_H_ #define _NET_ETHERNET_H_ #include -#include /* u_ types */ +#include /* u_ types */ /* * The number of bytes in an ethernet (MAC) address. */ -#define ETHER_ADDR_LEN 6 +#define ETHER_ADDR_LEN 6 /* * The number of bytes in the type field. */ -#define ETHER_TYPE_LEN 2 +#define ETHER_TYPE_LEN 2 /* * The number of bytes in the trailing CRC field. */ -#define ETHER_CRC_LEN 4 +#define ETHER_CRC_LEN 4 /* * The length of the combined header. */ -#define ETHER_HDR_LEN (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN) +#define ETHER_HDR_LEN (ETHER_ADDR_LEN*2+ETHER_TYPE_LEN) /* * The minimum packet length. */ -#define ETHER_MIN_LEN 64 +#define ETHER_MIN_LEN 64 /* * The maximum packet length. */ -#define ETHER_MAX_LEN 1518 +#define ETHER_MAX_LEN 1518 /* * Mbuf adjust factor to force 32-bit alignment of IP header. @@ -76,37 +76,37 @@ /* * A macro to validate a length with */ -#define ETHER_IS_VALID_LEN(foo) \ +#define ETHER_IS_VALID_LEN(foo) \ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) /* * Structure of a 10Mb/s Ethernet header. */ -typedef struct ether_header { - u_char ether_dhost[ETHER_ADDR_LEN]; - u_char ether_shost[ETHER_ADDR_LEN]; - u_short ether_type; +typedef struct ether_header { + u_char ether_dhost[ETHER_ADDR_LEN]; + u_char ether_shost[ETHER_ADDR_LEN]; + u_short ether_type; } ether_header_t; /* * Structure of a 48-bit Ethernet address. */ -typedef struct ether_addr { +typedef struct ether_addr { u_char octet[ETHER_ADDR_LEN]; } ether_addr_t; #define ether_addr_octet octet -#define ETHERTYPE_PUP 0x0200 /* PUP protocol */ -#define ETHERTYPE_IP 0x0800 /* IP protocol */ -#define ETHERTYPE_ARP 0x0806 /* Addr. resolution protocol */ -#define ETHERTYPE_REVARP 0x8035 /* reverse Addr. resolution protocol */ -#define ETHERTYPE_VLAN 0x8100 /* IEEE 802.1Q VLAN tagging */ -#define ETHERTYPE_IPV6 0x86dd /* IPv6 */ -#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */ -#define ETHERTYPE_RSN_PREAUTH 0x88c7 /* 802.11i / RSN Pre-Authentication */ -#define ETHERTYPE_PTP 0x88f7 /* IEEE 1588 Precision Time Protocol */ -#define ETHERTYPE_LOOPBACK 0x9000 /* used to test interfaces */ +#define ETHERTYPE_PUP 0x0200 /* PUP protocol */ +#define ETHERTYPE_IP 0x0800 /* IP protocol */ +#define ETHERTYPE_ARP 0x0806 /* Addr. resolution protocol */ +#define ETHERTYPE_REVARP 0x8035 /* reverse Addr. resolution protocol */ +#define ETHERTYPE_VLAN 0x8100 /* IEEE 802.1Q VLAN tagging */ +#define ETHERTYPE_IPV6 0x86dd /* IPv6 */ +#define ETHERTYPE_PAE 0x888e /* EAPOL PAE/802.1x */ +#define ETHERTYPE_RSN_PREAUTH 0x88c7 /* 802.11i / RSN Pre-Authentication */ +#define ETHERTYPE_PTP 0x88f7 /* IEEE 1588 Precision Time Protocol */ +#define ETHERTYPE_LOOPBACK 0x9000 /* used to test interfaces */ /* XXX - add more useful types here */ /* @@ -114,21 +114,21 @@ typedef struct ether_addr { * (type-ETHERTYPE_TRAIL)*512 bytes of data followed * by an ETHER type (as given above) and then the (variable-length) header. */ -#define ETHERTYPE_TRAIL 0x1000 /* Trailer packet */ -#define ETHERTYPE_NTRAILER 16 +#define ETHERTYPE_TRAIL 0x1000 /* Trailer packet */ +#define ETHERTYPE_NTRAILER 16 -#define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) -#define ETHERMIN (ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) +#define ETHERMTU (ETHER_MAX_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) +#define ETHERMIN (ETHER_MIN_LEN-ETHER_HDR_LEN-ETHER_CRC_LEN) #ifdef KERNEL_PRIVATE /* * The following are used by ethernet interfaces. */ -struct ether_addr *ether_aton(const char *); +struct ether_addr *ether_aton(const char *); #ifdef BSD_KERNEL_PRIVATE -extern u_char etherbroadcastaddr[ETHER_ADDR_LEN]; +extern u_char etherbroadcastaddr[ETHER_ADDR_LEN]; #if defined (__arm__) @@ -137,7 +137,7 @@ extern u_char etherbroadcastaddr[ETHER_ADDR_LEN]; static __inline__ int _ether_cmp(const void * a, const void * b) { - return (memcmp(a, b, ETHER_ADDR_LEN)); + return memcmp(a, b, ETHER_ADDR_LEN); } #else /* __arm__ */ @@ -147,13 +147,13 @@ _ether_cmp(const void * a, const void * b) { const u_int16_t * a_s = (const u_int16_t *)a; const u_int16_t * b_s = (const u_int16_t *)b; - + if (a_s[0] != b_s[0] - || a_s[1] != b_s[1] - || a_s[2] != b_s[2]) { - return (1); + || a_s[1] != b_s[1] + || a_s[2] != b_s[2]) { + return 1; } - return (0); + return 0; } #endif /* __arm__ */ @@ -171,11 +171,11 @@ _ether_cmp(const void * a, const void * b) */ __BEGIN_DECLS -int ether_hostton(const char *, struct ether_addr *); -int ether_line(const char *, struct ether_addr *, char *); -char *ether_ntoa(const struct ether_addr *); -struct ether_addr *ether_aton(const char *); -int ether_ntohost(char *, const struct ether_addr *); +int ether_hostton(const char *, struct ether_addr *); +int ether_line(const char *, struct ether_addr *, char *); +char *ether_ntoa(const struct ether_addr *); +struct ether_addr *ether_aton(const char *); +int ether_ntohost(char *, const struct ether_addr *); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/net/firewire.h b/bsd/net/firewire.h index da8450b9d..dfa51cb7e 100644 --- a/bsd/net/firewire.h +++ b/bsd/net/firewire.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,32 +37,32 @@ /* * The number of bytes in a FireWire EUI-64. */ -#define FIREWIRE_EUI64_LEN 8 +#define FIREWIRE_EUI64_LEN 8 /* * The number of bytes in the type field. */ -#define FIREWIRE_TYPE_LEN 2 +#define FIREWIRE_TYPE_LEN 2 /* * The length of the header provided by the FireWire network device. */ -#define FIREWIRE_HDR_LEN (FIREWIRE_EUI64_LEN*2+FIREWIRE_TYPE_LEN) +#define FIREWIRE_HDR_LEN (FIREWIRE_EUI64_LEN*2+FIREWIRE_TYPE_LEN) /* * The minimum packet length. */ -#define FIREWIRE_MIN_LEN 64 +#define FIREWIRE_MIN_LEN 64 /* * The maximum packet length. */ -#define FIREWIRE_MAX_LEN 4096 +#define FIREWIRE_MAX_LEN 4096 /* * A macro to validate a length with */ -#define FIREWIRE_IS_VALID_LEN(foo) \ +#define FIREWIRE_IS_VALID_LEN(foo) \ ((foo) >= FIREWIRE_MIN_LEN && (foo) <= FIREWIRE_MAX_LEN) /* @@ -71,16 +71,16 @@ * The device uses a simplified header with just the non-changing * EUI-64 addresses and ethernet type specified; */ -struct firewire_header { - u_char firewire_dhost[FIREWIRE_EUI64_LEN]; - u_char firewire_shost[FIREWIRE_EUI64_LEN]; - u_short firewire_type; /* ethertype */ +struct firewire_header { + u_char firewire_dhost[FIREWIRE_EUI64_LEN]; + u_char firewire_shost[FIREWIRE_EUI64_LEN]; + u_short firewire_type; /* ethertype */ }; /* * Format of FireWire EUI-64. */ -struct firewire_eui64 { +struct firewire_eui64 { u_char octet[FIREWIRE_EUI64_LEN]; }; @@ -88,17 +88,17 @@ struct firewire_eui64 { * Format of FireWire hardware address. */ struct firewire_address { - u_char eui64[FIREWIRE_EUI64_LEN]; - u_char maxRec; - u_char spd; - u_int16_t unicastFifoHi; - u_int32_t unicastFifoLo; + u_char eui64[FIREWIRE_EUI64_LEN]; + u_char maxRec; + u_char spd; + u_int16_t unicastFifoHi; + u_int32_t unicastFifoLo; }; -#define FIREWIRE_ADDR_LEN 16 /* sizeof(struct firewire_address) */ +#define FIREWIRE_ADDR_LEN 16 /* sizeof(struct firewire_address) */ -#define FIREWIRE_MTU (FIREWIRE_MAX_LEN - FIREWIRE_HDR_LEN) -#define FIREWIRE_MIN (FIREWIRE_MIN_LEN - FIREWIRE_HDR_LEN) +#define FIREWIRE_MTU (FIREWIRE_MAX_LEN - FIREWIRE_HDR_LEN) +#define FIREWIRE_MIN (FIREWIRE_MIN_LEN - FIREWIRE_HDR_LEN) #endif /* !_NET_FIREWIRE_H_ */ diff --git a/bsd/net/flowadv.c b/bsd/net/flowadv.c index 214c5c715..020081305 100644 --- a/bsd/net/flowadv.c +++ b/bsd/net/flowadv.c @@ -96,8 +96,8 @@ #include /* Lock group and attribute for fadv_lock */ -static lck_grp_t *fadv_lock_grp; -static lck_grp_attr_t *fadv_lock_grp_attr; +static lck_grp_t *fadv_lock_grp; +static lck_grp_attr_t *fadv_lock_grp_attr; decl_lck_mtx_data(static, fadv_lock); /* protected by fadv_lock */ @@ -105,11 +105,11 @@ static STAILQ_HEAD(fadv_head, flowadv_fcentry) fadv_list; static thread_t fadv_thread = THREAD_NULL; static uint32_t fadv_active; -static unsigned int fadv_zone_size; /* size of flowadv_fcentry */ -static struct zone *fadv_zone; /* zone for flowadv_fcentry */ +static unsigned int fadv_zone_size; /* size of flowadv_fcentry */ +static struct zone *fadv_zone; /* zone for flowadv_fcentry */ -#define FADV_ZONE_MAX 32 /* maximum elements in zone */ -#define FADV_ZONE_NAME "fadv_zone" /* zone name */ +#define FADV_ZONE_MAX 32 /* maximum elements in zone */ +#define FADV_ZONE_NAME "fadv_zone" /* zone name */ static int flowadv_thread_cont(int); static void flowadv_thread_func(void *, wait_result_t); @@ -124,8 +124,8 @@ flowadv_init(void) fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr); lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL); - fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry), - sizeof (u_int64_t)); + fadv_zone_size = P2ROUNDUP(sizeof(struct flowadv_fcentry), + sizeof(u_int64_t)); fadv_zone = zinit(fadv_zone_size, FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME); if (fadv_zone == NULL) { @@ -150,10 +150,11 @@ flowadv_alloc_entry(int how) struct flowadv_fcentry *fce; fce = (how == M_WAITOK) ? zalloc(fadv_zone) : zalloc_noblock(fadv_zone); - if (fce != NULL) + if (fce != NULL) { bzero(fce, fadv_zone_size); + } - return (fce); + return fce; } void @@ -165,28 +166,32 @@ flowadv_free_entry(struct flowadv_fcentry *fce) void flowadv_add(struct flowadv_fclist *fcl) { - if (STAILQ_EMPTY(fcl)) + if (STAILQ_EMPTY(fcl)) { return; + } lck_mtx_lock_spin(&fadv_lock); STAILQ_CONCAT(&fadv_list, fcl); VERIFY(!STAILQ_EMPTY(&fadv_list)); - if (!fadv_active && fadv_thread != THREAD_NULL) + if (!fadv_active && fadv_thread != THREAD_NULL) { wakeup_one((caddr_t)&fadv_list); + } lck_mtx_unlock(&fadv_lock); } void -flowadv_add_entry(struct flowadv_fcentry *fce) { +flowadv_add_entry(struct flowadv_fcentry *fce) +{ lck_mtx_lock_spin(&fadv_lock); STAILQ_INSERT_HEAD(&fadv_list, fce, fce_link); VERIFY(!STAILQ_EMPTY(&fadv_list)); - if (!fadv_active && fadv_thread != THREAD_NULL) + if (!fadv_active && fadv_thread != THREAD_NULL) { wakeup_one((caddr_t)&fadv_list); + } lck_mtx_unlock(&fadv_lock); } @@ -233,8 +238,9 @@ flowadv_thread_cont(int err) lck_mtx_lock_spin(&fadv_lock); /* if there's no pending request, we're done */ - if (STAILQ_EMPTY(&fadv_list)) + if (STAILQ_EMPTY(&fadv_list)) { break; + } } fadv_active = 0; } diff --git a/bsd/net/flowadv.h b/bsd/net/flowadv.h index 76ae8a498..c3872a425 100644 --- a/bsd/net/flowadv.h +++ b/bsd/net/flowadv.h @@ -27,26 +27,26 @@ */ #ifndef _NET_FLOWADV_H_ -#define _NET_FLOWADV_H_ +#define _NET_FLOWADV_H_ #ifdef KERNEL_PRIVATE #include #include -#define FADV_SUCCESS 0 /* success */ -#define FADV_FLOW_CONTROLLED 1 /* regular flow control */ -#define FADV_SUSPENDED 2 /* flow control due to suspension */ +#define FADV_SUCCESS 0 /* success */ +#define FADV_FLOW_CONTROLLED 1 /* regular flow control */ +#define FADV_SUSPENDED 2 /* flow control due to suspension */ struct flowadv { - int32_t code; /* FADV advisory code */ + int32_t code; /* FADV advisory code */ }; #ifdef BSD_KERNEL_PRIVATE struct flowadv_fcentry { STAILQ_ENTRY(flowadv_fcentry) fce_link; - u_int32_t fce_flowsrc_type; /* FLOWSRC values */ - u_int32_t fce_flowid; + u_int32_t fce_flowsrc_type; /* FLOWSRC values */ + u_int32_t fce_flowid; }; STAILQ_HEAD(flowadv_fclist, flowadv_fcentry); diff --git a/bsd/net/flowhash.c b/bsd/net/flowhash.c index 85761bd68..45d01169c 100644 --- a/bsd/net/flowhash.c +++ b/bsd/net/flowhash.c @@ -54,12 +54,12 @@ static inline u_int64_t getblock64(const u_int64_t *, int); static inline u_int32_t mh3_fmix32(u_int32_t); static inline u_int64_t mh3_fmix64(u_int64_t); -#define ALIGNED16(v) ((((uintptr_t)(v)) & 1) == 0) -#define ALIGNED32(v) ((((uintptr_t)(v)) & 3) == 0) -#define ALIGNED64(v) ((((uintptr_t)(v)) & 7) == 0) +#define ALIGNED16(v) ((((uintptr_t)(v)) & 1) == 0) +#define ALIGNED32(v) ((((uintptr_t)(v)) & 3) == 0) +#define ALIGNED64(v) ((((uintptr_t)(v)) & 7) == 0) -#define ROTL32(x, r) (((x) << (r)) | ((x) >> (32 - (r)))) -#define ROTL64(x, r) (((x) << (r)) | ((x) >> (64 - (r)))) +#define ROTL32(x, r) (((x) << (r)) | ((x) >> (32 - (r)))) +#define ROTL64(x, r) (((x) << (r)) | ((x) >> (64 - (r)))) /* * The following hash algorithms are selected based on performance: @@ -77,13 +77,13 @@ net_flowhash_fn_t *net_flowhash = net_flowhash_jhash; static inline u_int32_t getblock32(const u_int32_t *p, int i) { - return (p[i]); + return p[i]; } static inline u_int64_t getblock64(const u_int64_t *p, int i) { - return (p[i]); + return p[i]; } #else /* !__i386__ && !__x86_64__ && !__arm64__*/ static inline u_int32_t @@ -109,7 +109,7 @@ getblock32(const u_int32_t *p, int i) ((u_int32_t)bytes[0]); #endif /* LITTLE_ENDIAN */ } - return (value); + return value; } static inline u_int64_t @@ -143,7 +143,7 @@ getblock64(const u_int64_t *p, int i) ((u_int64_t)bytes[0]); #endif /* LITTLE_ENDIAN */ } - return (value); + return value; } #endif /* !__i386__ && !__x86_64 && !__arm64__ */ @@ -156,7 +156,7 @@ mh3_fmix32(u_int32_t h) h *= 0xc2b2ae35; h ^= h >> 16; - return (h); + return h; } static inline u_int64_t @@ -168,14 +168,14 @@ mh3_fmix64(u_int64_t k) k *= 0xc4ceb9fe1a85ec53LLU; k ^= k >> 33; - return (k); + return k; } /* * MurmurHash3_x86_32 */ -#define MH3_X86_32_C1 0xcc9e2d51 -#define MH3_X86_32_C2 0x1b873593 +#define MH3_X86_32_C1 0xcc9e2d51 +#define MH3_X86_32_C2 0x1b873593 u_int32_t net_flowhash_mh3_x86_32(const void *key, u_int32_t len, const u_int32_t seed) @@ -209,31 +209,32 @@ net_flowhash_mh3_x86_32(const void *key, u_int32_t len, const u_int32_t seed) switch (len & 3) { case 3: k1 ^= tail[2] << 16; - /* FALLTHRU */ + /* FALLTHRU */ case 2: k1 ^= tail[1] << 8; - /* FALLTHRU */ + /* FALLTHRU */ case 1: k1 ^= tail[0]; k1 *= MH3_X86_32_C1; k1 = ROTL32(k1, 15); k1 *= MH3_X86_32_C2; h1 ^= k1; - }; + } + ; /* finalization */ h1 ^= len; h1 = mh3_fmix32(h1); - return (h1); + return h1; } /* * MurmurHash3_x64_128 */ -#define MH3_X64_128_C1 0x87c37b91114253d5LLU -#define MH3_X64_128_C2 0x4cf5ad432745937fLLU +#define MH3_X64_128_C1 0x87c37b91114253d5LLU +#define MH3_X64_128_C2 0x4cf5ad432745937fLLU u_int32_t net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) @@ -255,9 +256,9 @@ net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) k1 *= MH3_X64_128_C1; #if defined(__x86_64__) - __asm__ ( "rol $31, %[k1]\n\t" :[k1] "+r" (k1) : :); + __asm__ ( "rol $31, %[k1]\n\t" :[k1] "+r" (k1) : :); #elif defined(__arm64__) - __asm__ ( "ror %[k1], %[k1], #(64-31)\n\t" :[k1] "+r" (k1) : :); + __asm__ ( "ror %[k1], %[k1], #(64-31)\n\t" :[k1] "+r" (k1) : :); #else /* !__x86_64__ && !__arm64__ */ k1 = ROTL64(k1, 31); #endif /* !__x86_64__ && !__arm64__ */ @@ -265,35 +266,35 @@ net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) h1 ^= k1; #if defined(__x86_64__) - __asm__ ( "rol $27, %[h1]\n\t" :[h1] "+r" (h1) : :); + __asm__ ( "rol $27, %[h1]\n\t" :[h1] "+r" (h1) : :); #elif defined(__arm64__) - __asm__ ( "ror %[h1], %[h1], #(64-27)\n\t" :[h1] "+r" (h1) : :); + __asm__ ( "ror %[h1], %[h1], #(64-27)\n\t" :[h1] "+r" (h1) : :); #else /* !__x86_64__ && !__arm64__ */ - h1 = ROTL64(h1, 27); + h1 = ROTL64(h1, 27); #endif /* !__x86_64__ && !__arm64__ */ h1 += h2; h1 = h1 * 5 + 0x52dce729; k2 *= MH3_X64_128_C2; #if defined(__x86_64__) - __asm__ ( "rol $33, %[k2]\n\t" :[k2] "+r" (k2) : :); + __asm__ ( "rol $33, %[k2]\n\t" :[k2] "+r" (k2) : :); #elif defined(__arm64__) - __asm__ ( "ror %[k2], %[k2], #(64-33)\n\t" :[k2] "+r" (k2) : :); + __asm__ ( "ror %[k2], %[k2], #(64-33)\n\t" :[k2] "+r" (k2) : :); #else /* !__x86_64__ && !__arm64__ */ - k2 = ROTL64(k2, 33); + k2 = ROTL64(k2, 33); #endif /* !__x86_64__ && !__arm64__ */ k2 *= MH3_X64_128_C1; h2 ^= k2; #if defined(__x86_64__) - __asm__ ( "rol $31, %[h2]\n\t" :[h2] "+r" (h2) : :); + __asm__ ( "rol $31, %[h2]\n\t" :[h2] "+r" (h2) : :); #elif defined(__arm64__) - __asm__ ( "ror %[h2], %[h2], #(64-31)\n\t" :[h2] "+r" (h2) : :); + __asm__ ( "ror %[h2], %[h2], #(64-31)\n\t" :[h2] "+r" (h2) : :); #else /* !__x86_64__ && !__arm64__ */ - h2 = ROTL64(h2, 31); + h2 = ROTL64(h2, 31); #endif /* !__x86_64__ && !__arm64__ */ h2 += h1; - h2 = h2 * 5+ 0x38495ab5; + h2 = h2 * 5 + 0x38495ab5; } /* tail */ @@ -304,69 +305,70 @@ net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) switch (len & 15) { case 15: k2 ^= ((u_int64_t)tail[14]) << 48; - /* FALLTHRU */ + /* FALLTHRU */ case 14: k2 ^= ((u_int64_t)tail[13]) << 40; - /* FALLTHRU */ + /* FALLTHRU */ case 13: k2 ^= ((u_int64_t)tail[12]) << 32; - /* FALLTHRU */ + /* FALLTHRU */ case 12: k2 ^= ((u_int64_t)tail[11]) << 24; - /* FALLTHRU */ + /* FALLTHRU */ case 11: k2 ^= ((u_int64_t)tail[10]) << 16; - /* FALLTHRU */ + /* FALLTHRU */ case 10: k2 ^= ((u_int64_t)tail[9]) << 8; - /* FALLTHRU */ + /* FALLTHRU */ case 9: k2 ^= ((u_int64_t)tail[8]) << 0; k2 *= MH3_X64_128_C2; #if defined(__x86_64__) - __asm__ ( "rol $33, %[k2]\n\t" :[k2] "+r" (k2) : :); + __asm__ ( "rol $33, %[k2]\n\t" :[k2] "+r" (k2) : :); #elif defined(__arm64__) - __asm__ ( "ror %[k2], %[k2], #(64-33)\n\t" :[k2] "+r" (k2) : :); + __asm__ ( "ror %[k2], %[k2], #(64-33)\n\t" :[k2] "+r" (k2) : :); #else /* !__x86_64__ && !__arm64__ */ - k2 = ROTL64(k2, 33); + k2 = ROTL64(k2, 33); #endif /* !__x86_64__ && !__arm64__ */ k2 *= MH3_X64_128_C1; h2 ^= k2; - /* FALLTHRU */ + /* FALLTHRU */ case 8: k1 ^= ((u_int64_t)tail[7]) << 56; - /* FALLTHRU */ + /* FALLTHRU */ case 7: k1 ^= ((u_int64_t)tail[6]) << 48; - /* FALLTHRU */ + /* FALLTHRU */ case 6: k1 ^= ((u_int64_t)tail[5]) << 40; - /* FALLTHRU */ + /* FALLTHRU */ case 5: k1 ^= ((u_int64_t)tail[4]) << 32; - /* FALLTHRU */ + /* FALLTHRU */ case 4: k1 ^= ((u_int64_t)tail[3]) << 24; - /* FALLTHRU */ + /* FALLTHRU */ case 3: k1 ^= ((u_int64_t)tail[2]) << 16; - /* FALLTHRU */ + /* FALLTHRU */ case 2: k1 ^= ((u_int64_t)tail[1]) << 8; - /* FALLTHRU */ + /* FALLTHRU */ case 1: k1 ^= ((u_int64_t)tail[0]) << 0; k1 *= MH3_X64_128_C1; #if defined(__x86_64__) - __asm__ ( "rol $31, %[k1]\n\t" :[k1] "+r" (k1) : :); + __asm__ ( "rol $31, %[k1]\n\t" :[k1] "+r" (k1) : :); #elif defined(__arm64__) - __asm__ ( "ror %[k1], %[k1], #(64-31)\n\t" :[k1] "+r" (k1) : :); + __asm__ ( "ror %[k1], %[k1], #(64-31)\n\t" :[k1] "+r" (k1) : :); #else /* !__x86_64__ && !__arm64__ */ - k1 = ROTL64(k1, 31); + k1 = ROTL64(k1, 31); #endif /* !__x86_64__ && !__arm64__ */ k1 *= MH3_X64_128_C2; h1 ^= k1; - }; + } + ; /* finalization */ h1 ^= len; @@ -382,28 +384,28 @@ net_flowhash_mh3_x64_128(const void *key, u_int32_t len, const u_int32_t seed) h2 += h1; /* throw all but lowest 32-bit */ - return (h1 & 0xffffffff); + return h1 & 0xffffffff; } -#define JHASH_INIT 0xdeadbeef +#define JHASH_INIT 0xdeadbeef -#define JHASH_MIX(a, b, c) { \ - a -= c; a ^= ROTL32(c, 4); c += b; \ - b -= a; b ^= ROTL32(a, 6); a += c; \ - c -= b; c ^= ROTL32(b, 8); b += a; \ - a -= c; a ^= ROTL32(c, 16); c += b; \ - b -= a; b ^= ROTL32(a, 19); a += c; \ - c -= b; c ^= ROTL32(b, 4); b += a; \ +#define JHASH_MIX(a, b, c) { \ + a -= c; a ^= ROTL32(c, 4); c += b; \ + b -= a; b ^= ROTL32(a, 6); a += c; \ + c -= b; c ^= ROTL32(b, 8); b += a; \ + a -= c; a ^= ROTL32(c, 16); c += b; \ + b -= a; b ^= ROTL32(a, 19); a += c; \ + c -= b; c ^= ROTL32(b, 4); b += a; \ } -#define JHASH_FINAL(a, b, c) { \ - c ^= b; c -= ROTL32(b, 14); \ - a ^= c; a -= ROTL32(c, 11); \ - b ^= a; b -= ROTL32(a, 25); \ - c ^= b; c -= ROTL32(b, 16); \ - a ^= c; a -= ROTL32(c, 4); \ - b ^= a; b -= ROTL32(a, 14); \ - c ^= b; c -= ROTL32(b, 24); \ +#define JHASH_FINAL(a, b, c) { \ + c ^= b; c -= ROTL32(b, 14); \ + a ^= c; a -= ROTL32(c, 11); \ + b ^= a; b -= ROTL32(a, 25); \ + c ^= b; c -= ROTL32(b, 16); \ + a ^= c; a -= ROTL32(c, 4); \ + b ^= a; b -= ROTL32(a, 14); \ + c ^= b; c -= ROTL32(b, 24); \ } #if BYTE_ORDER == BIG_ENDIAN @@ -508,12 +510,12 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) case 0: /* zero length requires no mixing */ - return (c); + return c; } JHASH_FINAL(a, b, c); - return (c); + return c; } /* need to read the key one byte at a time */ @@ -542,49 +544,49 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) switch (len) { case 12: c += k[11]; - /* FALLTHRU */ + /* FALLTHRU */ case 11: c += ((u_int32_t)k[10]) << 8; - /* FALLTHRU */ + /* FALLTHRU */ case 10: c += ((u_int32_t)k[9]) << 16; - /* FALLTHRU */ + /* FALLTHRU */ case 9: c += ((u_int32_t)k[8]) << 24; - /* FALLTHRU */ + /* FALLTHRU */ case 8: b += k[7]; - /* FALLTHRU */ + /* FALLTHRU */ case 7: b += ((u_int32_t)k[6]) << 8; - /* FALLTHRU */ + /* FALLTHRU */ case 6: b += ((u_int32_t)k[5]) << 16; - /* FALLTHRU */ + /* FALLTHRU */ case 5: b += ((u_int32_t)k[4]) << 24; - /* FALLTHRU */ + /* FALLTHRU */ case 4: a += k[3]; - /* FALLTHRU */ + /* FALLTHRU */ case 3: a += ((u_int32_t)k[2]) << 8; - /* FALLTHRU */ + /* FALLTHRU */ case 2: a += ((u_int32_t)k[1]) << 16; - /* FALLTHRU */ + /* FALLTHRU */ case 1: a += ((u_int32_t)k[0]) << 24; break; case 0: /* zero length requires no mixing */ - return (c); + return c; } JHASH_FINAL(a, b, c); - return (c); + return c; } #else /* LITTLE_ENDIAN */ /* @@ -697,162 +699,162 @@ net_flowhash_jhash(const void *key, u_int32_t len, const u_int32_t seed) case 0: /* zero length requires no mixing */ - return (c); + return c; } JHASH_FINAL(a, b, c); - return (c); + return c; } #if !defined(__i386__) && !defined(__x86_64__) else if (ALIGNED16(key)) { #endif /* !defined(__i386__) && !defined(__x86_64__) */ - /* read 16-bit chunks */ - const u_int16_t *k = (const u_int16_t *)key; - const u_int8_t *k8; - - /* all but last block: aligned reads and different mixing */ - while (len > 12) { - a += k[0] + (((u_int32_t)k[1]) << 16); - b += k[2] + (((u_int32_t)k[3]) << 16); - c += k[4] + (((u_int32_t)k[5]) << 16); - JHASH_MIX(a, b, c); - len -= 12; - k += 6; - } - - /* handle the last (probably partial) block */ - k8 = (const u_int8_t *)k; - switch (len) { - case 12: - c += k[4] + (((u_int32_t)k[5]) << 16); - b += k[2] + (((u_int32_t)k[3]) << 16); - a += k[0] + (((u_int32_t)k[1]) << 16); - break; - - case 11: - c += ((u_int32_t)k8[10]) << 16; - /* FALLTHRU */ - case 10: - c += k[4]; - b += k[2] + (((u_int32_t)k[3]) << 16); - a += k[0] + (((u_int32_t)k[1]) << 16); - break; - - case 9: - c += k8[8]; - /* FALLTHRU */ - case 8: - b += k[2] + (((u_int32_t)k[3]) << 16); - a += k[0] + (((u_int32_t)k[1]) << 16); - break; - - case 7: - b += ((u_int32_t)k8[6]) << 16; - /* FALLTHRU */ - case 6: - b += k[2]; - a += k[0] + (((u_int32_t)k[1]) << 16); - break; - - case 5: - b += k8[4]; - /* FALLTHRU */ - case 4: - a += k[0] + (((u_int32_t)k[1]) << 16); - break; - - case 3: - a += ((u_int32_t)k8[2]) << 16; - /* FALLTHRU */ - case 2: - a += k[0]; - break; - - case 1: - a += k8[0]; - break; - - case 0: - /* zero length requires no mixing */ - return (c); - } - - JHASH_FINAL(a, b, c); + /* read 16-bit chunks */ + const u_int16_t *k = (const u_int16_t *)key; + const u_int8_t *k8; - return (c); -#if !defined(__i386__) && !defined(__x86_64__) - } - - /* need to read the key one byte at a time */ - const u_int8_t *k = (const u_int8_t *)key; - - /* all but the last block: affect some 32 bits of (a,b,c) */ + /* all but last block: aligned reads and different mixing */ while (len > 12) { - a += k[0]; - a += ((u_int32_t)k[1]) << 8; - a += ((u_int32_t)k[2]) << 16; - a += ((u_int32_t)k[3]) << 24; - b += k[4]; - b += ((u_int32_t)k[5]) << 8; - b += ((u_int32_t)k[6]) << 16; - b += ((u_int32_t)k[7]) << 24; - c += k[8]; - c += ((u_int32_t)k[9]) << 8; - c += ((u_int32_t)k[10]) << 16; - c += ((u_int32_t)k[11]) << 24; + a += k[0] + (((u_int32_t)k[1]) << 16); + b += k[2] + (((u_int32_t)k[3]) << 16); + c += k[4] + (((u_int32_t)k[5]) << 16); JHASH_MIX(a, b, c); len -= 12; - k += 12; + k += 6; } - /* last block: affect all 32 bits of (c) */ + /* handle the last (probably partial) block */ + k8 = (const u_int8_t *)k; switch (len) { case 12: - c += ((u_int32_t)k[11]) << 24; - /* FALLTHRU */ + c += k[4] + (((u_int32_t)k[5]) << 16); + b += k[2] + (((u_int32_t)k[3]) << 16); + a += k[0] + (((u_int32_t)k[1]) << 16); + break; + case 11: - c += ((u_int32_t)k[10]) << 16; - /* FALLTHRU */ + c += ((u_int32_t)k8[10]) << 16; + /* FALLTHRU */ case 10: - c += ((u_int32_t)k[9]) << 8; - /* FALLTHRU */ + c += k[4]; + b += k[2] + (((u_int32_t)k[3]) << 16); + a += k[0] + (((u_int32_t)k[1]) << 16); + break; + case 9: - c += k[8]; - /* FALLTHRU */ + c += k8[8]; + /* FALLTHRU */ case 8: - b += ((u_int32_t)k[7]) << 24; - /* FALLTHRU */ + b += k[2] + (((u_int32_t)k[3]) << 16); + a += k[0] + (((u_int32_t)k[1]) << 16); + break; + case 7: - b += ((u_int32_t)k[6]) << 16; - /* FALLTHRU */ + b += ((u_int32_t)k8[6]) << 16; + /* FALLTHRU */ case 6: - b += ((u_int32_t)k[5]) << 8; - /* FALLTHRU */ + b += k[2]; + a += k[0] + (((u_int32_t)k[1]) << 16); + break; + case 5: - b += k[4]; - /* FALLTHRU */ + b += k8[4]; + /* FALLTHRU */ case 4: - a += ((u_int32_t)k[3]) << 24; - /* FALLTHRU */ + a += k[0] + (((u_int32_t)k[1]) << 16); + break; + case 3: - a += ((u_int32_t)k[2]) << 16; - /* FALLTHRU */ + a += ((u_int32_t)k8[2]) << 16; + /* FALLTHRU */ case 2: - a += ((u_int32_t)k[1]) << 8; - /* FALLTHRU */ - case 1: a += k[0]; break; + case 1: + a += k8[0]; + break; + case 0: /* zero length requires no mixing */ - return (c); + return c; } JHASH_FINAL(a, b, c); - return (c); + return c; +#if !defined(__i386__) && !defined(__x86_64__) +} + +/* need to read the key one byte at a time */ +const u_int8_t *k = (const u_int8_t *)key; + +/* all but the last block: affect some 32 bits of (a,b,c) */ +while (len > 12) { + a += k[0]; + a += ((u_int32_t)k[1]) << 8; + a += ((u_int32_t)k[2]) << 16; + a += ((u_int32_t)k[3]) << 24; + b += k[4]; + b += ((u_int32_t)k[5]) << 8; + b += ((u_int32_t)k[6]) << 16; + b += ((u_int32_t)k[7]) << 24; + c += k[8]; + c += ((u_int32_t)k[9]) << 8; + c += ((u_int32_t)k[10]) << 16; + c += ((u_int32_t)k[11]) << 24; + JHASH_MIX(a, b, c); + len -= 12; + k += 12; +} + +/* last block: affect all 32 bits of (c) */ +switch (len) { +case 12: + c += ((u_int32_t)k[11]) << 24; +/* FALLTHRU */ +case 11: + c += ((u_int32_t)k[10]) << 16; +/* FALLTHRU */ +case 10: + c += ((u_int32_t)k[9]) << 8; +/* FALLTHRU */ +case 9: + c += k[8]; +/* FALLTHRU */ +case 8: + b += ((u_int32_t)k[7]) << 24; +/* FALLTHRU */ +case 7: + b += ((u_int32_t)k[6]) << 16; +/* FALLTHRU */ +case 6: + b += ((u_int32_t)k[5]) << 8; +/* FALLTHRU */ +case 5: + b += k[4]; +/* FALLTHRU */ +case 4: + a += ((u_int32_t)k[3]) << 24; +/* FALLTHRU */ +case 3: + a += ((u_int32_t)k[2]) << 16; +/* FALLTHRU */ +case 2: + a += ((u_int32_t)k[1]) << 8; +/* FALLTHRU */ +case 1: + a += k[0]; + break; + +case 0: + /* zero length requires no mixing */ + return c; +} + +JHASH_FINAL(a, b, c); + +return c; #endif /* !defined(__i386__) && !defined(__x86_64__) */ } #endif /* LITTLE_ENDIAN */ diff --git a/bsd/net/flowhash.h b/bsd/net/flowhash.h index dc7c3b58f..8f29c8d6d 100644 --- a/bsd/net/flowhash.h +++ b/bsd/net/flowhash.h @@ -27,7 +27,7 @@ */ #ifndef _NET_FLOWHASH_H_ -#define _NET_FLOWHASH_H_ +#define _NET_FLOWHASH_H_ #include @@ -39,7 +39,7 @@ extern "C" { * If 32-bit hash value is too large, use this macro to truncate * it to n-bit; masking is a faster operation than modulus. */ -#define HASHMASK(n) ((1UL << (n)) - 1) +#define HASHMASK(n) ((1UL << (n)) - 1) /* * Returns 32-bit hash value. Hashes which are capable of returning diff --git a/bsd/net/ieee8023ad.h b/bsd/net/ieee8023ad.h index 7a2494d23..46f40763c 100644 --- a/bsd/net/ieee8023ad.h +++ b/bsd/net/ieee8023ad.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,7 +30,7 @@ * ieee8023ad.h */ -/* +/* * Modification History * * May 14, 2004 Dieter Siegmund (dieter@apple.com) @@ -39,15 +39,15 @@ #ifndef _NET_IEEE8023AD_H_ -#define _NET_IEEE8023AD_H_ +#define _NET_IEEE8023AD_H_ #include -#define IEEE8023AD_SLOW_PROTO_ETHERTYPE 0x8809 +#define IEEE8023AD_SLOW_PROTO_ETHERTYPE 0x8809 #define IEEE8023AD_SLOW_PROTO_MULTICAST { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x02 } -#define IEEE8023AD_SLOW_PROTO_SUBTYPE_LACP 1 -#define IEEE8023AD_SLOW_PROTO_SUBTYPE_LA_MARKER_PROTOCOL 2 -#define IEEE8023AD_SLOW_PROTO_SUBTYPE_RESERVED_START 3 -#define IEEE8023AD_SLOW_PROTO_SUBTYPE_RESERVED_END 10 +#define IEEE8023AD_SLOW_PROTO_SUBTYPE_LACP 1 +#define IEEE8023AD_SLOW_PROTO_SUBTYPE_LA_MARKER_PROTOCOL 2 +#define IEEE8023AD_SLOW_PROTO_SUBTYPE_RESERVED_START 3 +#define IEEE8023AD_SLOW_PROTO_SUBTYPE_RESERVED_END 10 #endif /* _NET_IEEE8023AD_H_ */ diff --git a/bsd/net/if.c b/bsd/net/if.c index 7bf8dc604..79921cbdb 100644 --- a/bsd/net/if.c +++ b/bsd/net/if.c @@ -96,6 +96,7 @@ #include #include #include +#include #include #include #include @@ -131,6 +132,8 @@ #include #endif +#include + /* * System initialization */ @@ -138,9 +141,9 @@ extern char *proc_name_address(void *); /* Lock group and attribute for ifaddr lock */ -lck_attr_t *ifa_mtx_attr; -lck_grp_t *ifa_mtx_grp; -static lck_grp_attr_t *ifa_mtx_grp_attr; +lck_attr_t *ifa_mtx_attr; +lck_grp_t *ifa_mtx_grp; +static lck_grp_attr_t *ifa_mtx_grp_attr; static int ifioctl_ifreq(struct socket *, u_long, struct ifreq *, struct proc *); @@ -169,13 +172,13 @@ static int if_clone_list(int, int *, user_addr_t); MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); -struct ifnethead ifnet_head = TAILQ_HEAD_INITIALIZER(ifnet_head); +struct ifnethead ifnet_head = TAILQ_HEAD_INITIALIZER(ifnet_head); /* ifnet_ordered_head and if_ordered_count are protected by the ifnet_head lock */ -struct ifnethead ifnet_ordered_head = TAILQ_HEAD_INITIALIZER(ifnet_ordered_head); -static u_int32_t if_ordered_count = 0; +struct ifnethead ifnet_ordered_head = TAILQ_HEAD_INITIALIZER(ifnet_ordered_head); +static u_int32_t if_ordered_count = 0; -static int if_cloners_count; +static int if_cloners_count; LIST_HEAD(, if_clone) if_cloners = LIST_HEAD_INITIALIZER(if_cloners); static struct ifaddr *ifa_ifwithnet_common(const struct sockaddr *, @@ -191,27 +194,27 @@ static void ifma_free(struct ifmultiaddr *); static void ifma_trace(struct ifmultiaddr *, int); #if DEBUG -static unsigned int ifma_debug = 1; /* debugging (enabled) */ +static unsigned int ifma_debug = 1; /* debugging (enabled) */ #else -static unsigned int ifma_debug; /* debugging (disabled) */ +static unsigned int ifma_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int ifma_size; /* size of zone element */ -static struct zone *ifma_zone; /* zone for ifmultiaddr */ +static unsigned int ifma_size; /* size of zone element */ +static struct zone *ifma_zone; /* zone for ifmultiaddr */ -#define IFMA_TRACE_HIST_SIZE 32 /* size of trace history */ +#define IFMA_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int ifma_trace_hist_size = IFMA_TRACE_HIST_SIZE; struct ifmultiaddr_dbg { - struct ifmultiaddr ifma; /* ifmultiaddr */ - u_int16_t ifma_refhold_cnt; /* # of ref */ - u_int16_t ifma_refrele_cnt; /* # of rele */ + struct ifmultiaddr ifma; /* ifmultiaddr */ + u_int16_t ifma_refhold_cnt; /* # of ref */ + u_int16_t ifma_refrele_cnt; /* # of rele */ /* * Circular lists of IFA_ADDREF and IFA_REMREF callers. */ - ctrace_t ifma_refhold[IFMA_TRACE_HIST_SIZE]; - ctrace_t ifma_refrele[IFMA_TRACE_HIST_SIZE]; + ctrace_t ifma_refhold[IFMA_TRACE_HIST_SIZE]; + ctrace_t ifma_refrele[IFMA_TRACE_HIST_SIZE]; /* * Trash list linkage */ @@ -222,21 +225,21 @@ struct ifmultiaddr_dbg { static TAILQ_HEAD(, ifmultiaddr_dbg) ifma_trash_head; static decl_lck_mtx_data(, ifma_trash_lock); -#define IFMA_ZONE_MAX 64 /* maximum elements in zone */ -#define IFMA_ZONE_NAME "ifmultiaddr" /* zone name */ +#define IFMA_ZONE_MAX 64 /* maximum elements in zone */ +#define IFMA_ZONE_NAME "ifmultiaddr" /* zone name */ #if INET6 /* * XXX: declare here to avoid to include many inet6 related files.. * should be more generalized? */ -extern void nd6_setmtu(struct ifnet *); +extern void nd6_setmtu(struct ifnet *); extern lck_mtx_t *nd6_mutex; #endif -SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Link layers"); -SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Generic link-management"); +SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Link layers"); +SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Generic link-management"); SYSCTL_DECL(_net_link_generic_system); @@ -257,10 +260,10 @@ ifa_init(void) ifa_mtx_grp = lck_grp_alloc_init("ifaddr", ifa_mtx_grp_attr); ifa_mtx_attr = lck_attr_alloc_init(); - PE_parse_boot_argn("ifa_debug", &ifma_debug, sizeof (ifma_debug)); + PE_parse_boot_argn("ifa_debug", &ifma_debug, sizeof(ifma_debug)); - ifma_size = (ifma_debug == 0) ? sizeof (struct ifmultiaddr) : - sizeof (struct ifmultiaddr_dbg); + ifma_size = (ifma_debug == 0) ? sizeof(struct ifmultiaddr) : + sizeof(struct ifmultiaddr_dbg); ifma_zone = zinit(ifma_size, IFMA_ZONE_MAX * ifma_size, 0, IFMA_ZONE_NAME); @@ -275,7 +278,7 @@ ifa_init(void) TAILQ_INIT(&ifma_trash_head); PE_parse_boot_argn("intcoproc_unrestricted", &intcoproc_unrestricted, - sizeof (intcoproc_unrestricted)); + sizeof(intcoproc_unrestricted)); } /* @@ -324,13 +327,15 @@ if_attach_ifa_common(struct ifnet *ifp, struct ifaddr *ifa, int link) } IFA_ADDREF_LOCKED(ifa); ifa->ifa_debug |= IFD_ATTACHED; - if (link) + if (link) { TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); - else + } else { TAILQ_INSERT_TAIL(&ifp->if_addrhead, ifa, ifa_link); + } - if (ifa->ifa_attached != NULL) + if (ifa->ifa_attached != NULL) { (*ifa->ifa_attached)(ifa); + } } @@ -372,8 +377,9 @@ if_detach_ifa_common(struct ifnet *ifp, struct ifaddr *ifa, int link) } else if (ifa->ifa_debug & IFD_DEBUG) { struct ifaddr *ifa2; TAILQ_FOREACH(ifa2, &ifp->if_addrhead, ifa_link) { - if (ifa2 == ifa) + if (ifa2 == ifa) { break; + } } if (ifa2 != ifa) { panic("%s: Attempt to detach a stray address ifa=%p", @@ -389,12 +395,13 @@ if_detach_ifa_common(struct ifnet *ifp, struct ifaddr *ifa, int link) } ifa->ifa_debug &= ~(IFD_ATTACHED | IFD_DETACHING); - if (ifa->ifa_detached != NULL) + if (ifa->ifa_detached != NULL) { (*ifa->ifa_detached)(ifa); + } } -#define INITIAL_IF_INDEXLIM 8 +#define INITIAL_IF_INDEXLIM 8 /* * Function: if_next_index @@ -414,16 +421,16 @@ int if_next_index(void); __private_extern__ int if_next_index(void) { - static int if_indexlim = 0; - int new_index; + static int if_indexlim = 0; + int new_index; new_index = ++if_index; if (if_index > if_indexlim) { - unsigned n; - int new_if_indexlim; - caddr_t new_ifnet_addrs; - caddr_t new_ifindex2ifnet; - caddr_t old_ifnet_addrs; + unsigned n; + int new_if_indexlim; + caddr_t new_ifnet_addrs; + caddr_t new_ifindex2ifnet; + caddr_t old_ifnet_addrs; old_ifnet_addrs = (caddr_t)ifnet_addrs; if (ifnet_addrs == NULL) { @@ -437,11 +444,11 @@ if_next_index(void) new_ifnet_addrs = _MALLOC(n, M_IFADDR, M_WAITOK | M_ZERO); if (new_ifnet_addrs == NULL) { --if_index; - return (-1); + return -1; } new_ifindex2ifnet = new_ifnet_addrs - + new_if_indexlim * sizeof(caddr_t); + + new_if_indexlim * sizeof(caddr_t); if (ifnet_addrs != NULL) { /* copy the existing data */ bcopy((caddr_t)ifnet_addrs, new_ifnet_addrs, @@ -461,7 +468,7 @@ if_next_index(void) _FREE((caddr_t)old_ifnet_addrs, M_IFADDR); } } - return (new_index); + return new_index; } /* @@ -478,11 +485,13 @@ if_clone_create(char *name, int len, void *params) int err; ifc = if_clone_lookup(name, &unit); - if (ifc == NULL) - return (EINVAL); + if (ifc == NULL) { + return EINVAL; + } - if (ifunit(name) != NULL) - return (EEXIST); + if (ifunit(name) != NULL) { + return EEXIST; + } bytoff = bitoff = 0; wildcard = (unit == UINT32_MAX); @@ -491,23 +500,27 @@ if_clone_create(char *name, int len, void *params) */ if (wildcard) { while ((bytoff < ifc->ifc_bmlen) && - (ifc->ifc_units[bytoff] == 0xff)) + (ifc->ifc_units[bytoff] == 0xff)) { bytoff++; - if (bytoff >= ifc->ifc_bmlen) - return (ENOSPC); - while ((ifc->ifc_units[bytoff] & (1 << bitoff)) != 0) + } + if (bytoff >= ifc->ifc_bmlen) { + return ENOSPC; + } + while ((ifc->ifc_units[bytoff] & (1 << bitoff)) != 0) { bitoff++; + } unit = (bytoff << 3) + bitoff; } - if (unit > ifc->ifc_maxunit) - return (ENXIO); + if (unit > ifc->ifc_maxunit) { + return ENXIO; + } lck_mtx_lock(&ifc->ifc_mutex); err = (*ifc->ifc_create)(ifc, unit, params); if (err != 0) { lck_mtx_unlock(&ifc->ifc_mutex); - return (err); + return err; } if (!wildcard) { @@ -524,9 +537,11 @@ if_clone_create(char *name, int len, void *params) /* In the wildcard case, we need to update the name. */ if (wildcard) { - for (dp = name; *dp != '\0'; dp++); - if (snprintf(dp, len - (dp-name), "%d", unit) > - len - (dp-name) - 1) { + for (dp = name; *dp != '\0'; dp++) { + ; + } + if (snprintf(dp, len - (dp - name), "%d", unit) > + len - (dp - name) - 1) { /* * This can only be a programmer error and * there's no straightforward way to recover if @@ -535,11 +550,10 @@ if_clone_create(char *name, int len, void *params) panic("%s: interface name too long", __func__); /* NOTREACHED */ } - } lck_mtx_unlock(&ifc->ifc_mutex); - return (0); + return 0; } /* @@ -594,9 +608,10 @@ if_clone_destroy(const char *name) lck_mtx_unlock(&ifc->ifc_mutex); done: - if (ifp != NULL) + if (ifp != NULL) { ifnet_decr_iorefcnt(ifp); - return (error); + } + return error; } /* @@ -610,10 +625,11 @@ if_clone_lookup(const char *name, u_int32_t *unitp) const char *cp; u_int32_t i; - for (ifc = LIST_FIRST(&if_cloners); ifc != NULL; ) { + for (ifc = LIST_FIRST(&if_cloners); ifc != NULL;) { for (cp = name, i = 0; i < ifc->ifc_namelen; i++, cp++) { - if (ifc->ifc_name[i] != *cp) + if (ifc->ifc_name[i] != *cp) { goto next_ifc; + } } goto found_name; next_ifc: @@ -621,7 +637,7 @@ next_ifc: } /* No match. */ - return ((struct if_clone *)NULL); + return (struct if_clone *)NULL; found_name: if (*cp == '\0') { @@ -630,15 +646,16 @@ found_name: for (i = 0; *cp != '\0'; cp++) { if (*cp < '0' || *cp > '9') { /* Bogus unit number. */ - return (NULL); + return NULL; } i = (i * 10) + (*cp - '0'); } } - if (unitp != NULL) + if (unitp != NULL) { *unitp = i; - return (ifc); + } + return ifc; } void * @@ -649,10 +666,11 @@ if_clone_softc_allocate(const struct if_clone *ifc) VERIFY(ifc != NULL); p_clone = zalloc(ifc->ifc_zone); - if (p_clone != NULL) + if (p_clone != NULL) { bzero(p_clone, ifc->ifc_softc_size); + } - return (p_clone); + return p_clone; } void @@ -683,20 +701,22 @@ if_clone_attach(struct if_clone *ifc) */ maxclone = ifc->ifc_maxunit + 1; len = maxclone >> 3; - if ((len << 3) < maxclone) + if ((len << 3) < maxclone) { len++; + } ifc->ifc_units = _MALLOC(len, M_CLONE, M_WAITOK | M_ZERO); - if (ifc->ifc_units == NULL) - return (ENOBUFS); + if (ifc->ifc_units == NULL) { + return ENOBUFS; + } ifc->ifc_bmlen = len; lck_mtx_init(&ifc->ifc_mutex, ifnet_lock_group, ifnet_lock_attr); if (ifc->ifc_softc_size != 0) { - ifc->ifc_zone = zinit(ifc->ifc_softc_size, + ifc->ifc_zone = zinit(ifc->ifc_softc_size, ifc->ifc_zone_max_elem * ifc->ifc_softc_size, 0, ifc->ifc_name); if (ifc->ifc_zone == NULL) { FREE(ifc->ifc_units, M_CLONE); - return (ENOBUFS); + return ENOBUFS; } zone_change(ifc->ifc_zone, Z_EXPAND, TRUE); zone_change(ifc->ifc_zone, Z_CALLERACCT, FALSE); @@ -717,7 +737,7 @@ if_clone_attach(struct if_clone *ifc) ifc->ifc_units[bytoff] |= (1 << bitoff); } - return (0); + return 0; } /* @@ -728,8 +748,9 @@ if_clone_detach(struct if_clone *ifc) { LIST_REMOVE(ifc, ifc_list); FREE(ifc->ifc_units, M_CLONE); - if (ifc->ifc_softc_size != 0) + if (ifc->ifc_softc_size != 0) { zdestroy(ifc->ifc_zone); + } lck_mtx_destroy(&ifc->ifc_mutex, ifnet_lock_group); if_cloners_count--; @@ -748,11 +769,12 @@ if_clone_list(int count, int *ret_total, user_addr_t dst) *ret_total = if_cloners_count; if (dst == USER_ADDR_NULL) { /* Just asking how many there are. */ - return (0); + return 0; } - if (count < 0) - return (EINVAL); + if (count < 0) { + return EINVAL; + } count = (if_cloners_count < count) ? if_cloners_count : count; @@ -761,11 +783,12 @@ if_clone_list(int count, int *ret_total, user_addr_t dst) bzero(outbuf, sizeof(outbuf)); strlcpy(outbuf, ifc->ifc_name, IFNAMSIZ); error = copyout(outbuf, dst, IFNAMSIZ); - if (error) + if (error) { break; + } } - return (error); + return error; } u_int32_t @@ -778,10 +801,11 @@ if_functional_type(struct ifnet *ifp, bool exclude_delegate) } else if ((exclude_delegate && (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI)) || (!exclude_delegate && IFNET_IS_WIFI(ifp))) { - if (ifp->if_eflags & IFEF_AWDL) + if (ifp->if_eflags & IFEF_AWDL) { ret = IFRTYPE_FUNCTIONAL_WIFI_AWDL; - else + } else { ret = IFRTYPE_FUNCTIONAL_WIFI_INFRA; + } } else if ((exclude_delegate && (ifp->if_type == IFT_CELLULAR)) || (!exclude_delegate && IFNET_IS_CELLULAR(ifp))) { @@ -798,7 +822,7 @@ if_functional_type(struct ifnet *ifp, bool exclude_delegate) } } - return (ret); + return ret; } /* @@ -808,7 +832,7 @@ if_functional_type(struct ifnet *ifp, bool exclude_delegate) __private_extern__ struct in_ifaddr * ifa_foraddr(unsigned int addr) { - return (ifa_foraddr_scoped(addr, IFSCOPE_NONE)); + return ifa_foraddr_scoped(addr, IFSCOPE_NONE); } /* @@ -826,14 +850,14 @@ ifa_foraddr_scoped(unsigned int addr, unsigned int scope) IFA_LOCK_SPIN(&ia->ia_ifa); if (ia->ia_addr.sin_addr.s_addr == addr && (scope == IFSCOPE_NONE || ia->ia_ifp->if_index == scope)) { - IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for caller */ + IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for caller */ IFA_UNLOCK(&ia->ia_ifa); break; } IFA_UNLOCK(&ia->ia_ifa); } lck_rw_done(in_ifaddr_rwlock); - return (ia); + return ia; } #if INET6 @@ -843,7 +867,7 @@ ifa_foraddr_scoped(unsigned int addr, unsigned int scope) __private_extern__ struct in6_ifaddr * ifa_foraddr6(struct in6_addr *addr6) { - return (ifa_foraddr6_scoped(addr6, IFSCOPE_NONE)); + return ifa_foraddr6_scoped(addr6, IFSCOPE_NONE); } __private_extern__ struct in6_ifaddr * @@ -864,7 +888,7 @@ ifa_foraddr6_scoped(struct in6_addr *addr6, unsigned int scope) } lck_rw_done(&in6_ifaddr_rwlock); - return (ia); + return ia; } #endif /* INET6 */ @@ -880,7 +904,7 @@ ifa_ifpgetprimary(struct ifnet *ifp, int family) TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { IFA_LOCK_SPIN(ifa); if (ifa->ifa_addr->sa_family == family) { - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -888,19 +912,20 @@ ifa_ifpgetprimary(struct ifnet *ifp, int family) } ifnet_lock_done(ifp); - return (ifa); + return ifa; } static inline int ifa_equal(const struct sockaddr *sa1, const struct sockaddr *sa2) { - - if (!sa1 || !sa2) + if (!sa1 || !sa2) { return 0; - if (sa1->sa_len != sa2->sa_len) + } + if (sa1->sa_len != sa2->sa_len) { return 0; + } - return (bcmp(sa1, sa2, sa1->sa_len) == 0); + return bcmp(sa1, sa2, sa1->sa_len) == 0; } /* @@ -925,7 +950,7 @@ ifa_ifwithaddr_locked(const struct sockaddr *addr) } if (ifa_equal(addr, ifa->ifa_addr)) { result = ifa; - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -935,7 +960,7 @@ ifa_ifwithaddr_locked(const struct sockaddr *addr) ifa->ifa_broadaddr->sa_len != 0 && ifa_equal(ifa->ifa_broadaddr, addr)) { result = ifa; - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -944,7 +969,7 @@ ifa_ifwithaddr_locked(const struct sockaddr *addr) ifnet_lock_done(ifp); } - return (result); + return result; } struct ifaddr * @@ -958,7 +983,7 @@ ifa_ifwithaddr(const struct sockaddr *addr) ifnet_head_done(); - return (result); + return result; } /* * Locate the point to point interface with a given destination address. @@ -974,7 +999,7 @@ ifa_ifwithdstaddr(const struct sockaddr *addr) ifnet_head_lock_shared(); for (ifp = ifnet_head.tqh_first; ifp && !result; ifp = ifp->if_link.tqe_next) { - if ((ifp->if_flags & IFF_POINTOPOINT)) { + if ((ifp->if_flags & IFF_POINTOPOINT)) { ifnet_lock_shared(ifp); for (ifa = ifp->if_addrhead.tqh_first; ifa; ifa = ifa->ifa_link.tqe_next) { @@ -986,7 +1011,7 @@ ifa_ifwithdstaddr(const struct sockaddr *addr) } if (ifa_equal(addr, ifa->ifa_dstaddr)) { result = ifa; - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -996,7 +1021,7 @@ ifa_ifwithdstaddr(const struct sockaddr *addr) } } ifnet_head_done(); - return (result); + return result; } /* @@ -1008,11 +1033,12 @@ ifa_ifwithaddr_scoped_locked(const struct sockaddr *addr, unsigned int ifscope) struct ifaddr *result = NULL; struct ifnet *ifp; - if (ifscope == IFSCOPE_NONE) - return (ifa_ifwithaddr_locked(addr)); + if (ifscope == IFSCOPE_NONE) { + return ifa_ifwithaddr_locked(addr); + } if (ifscope > (unsigned int)if_index) { - return (NULL); + return NULL; } ifp = ifindex2ifnet[ifscope]; @@ -1034,7 +1060,7 @@ ifa_ifwithaddr_scoped_locked(const struct sockaddr *addr, unsigned int ifscope) } if (ifa_equal(addr, ifa->ifa_addr)) { result = ifa; - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -1044,7 +1070,7 @@ ifa_ifwithaddr_scoped_locked(const struct sockaddr *addr, unsigned int ifscope) ifa->ifa_broadaddr->sa_len != 0 && ifa_equal(ifa->ifa_broadaddr, addr)) { result = ifa; - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -1053,7 +1079,7 @@ ifa_ifwithaddr_scoped_locked(const struct sockaddr *addr, unsigned int ifscope) ifnet_lock_done(ifp); } - return (result); + return result; } struct ifaddr * @@ -1067,19 +1093,19 @@ ifa_ifwithaddr_scoped(const struct sockaddr *addr, unsigned int ifscope) ifnet_head_done(); - return (result); + return result; } struct ifaddr * ifa_ifwithnet(const struct sockaddr *addr) { - return (ifa_ifwithnet_common(addr, IFSCOPE_NONE)); + return ifa_ifwithnet_common(addr, IFSCOPE_NONE); } struct ifaddr * ifa_ifwithnet_scoped(const struct sockaddr *addr, unsigned int ifscope) { - return (ifa_ifwithnet_common(addr, ifscope)); + return ifa_ifwithnet_common(addr, ifscope); } /* @@ -1096,11 +1122,12 @@ ifa_ifwithnet_common(const struct sockaddr *addr, unsigned int ifscope) const char *addr_data = addr->sa_data, *cplim; #if INET6 - if (af != AF_INET && af != AF_INET6) + if (af != AF_INET && af != AF_INET6) { #else - if (af != AF_INET) + if (af != AF_INET) { #endif /* !INET6 */ ifscope = IFSCOPE_NONE; + } ifnet_head_lock_shared(); /* @@ -1112,11 +1139,12 @@ ifa_ifwithnet_common(const struct sockaddr *addr, unsigned int ifscope) (const struct sockaddr_dl *)(uintptr_t)(size_t)addr; if (sdl->sdl_index && sdl->sdl_index <= if_index) { ifa = ifnet_addrs[sdl->sdl_index - 1]; - if (ifa != NULL) + if (ifa != NULL) { IFA_ADDREF(ifa); + } ifnet_head_done(); - return (ifa); + return ifa; } } @@ -1127,7 +1155,7 @@ ifa_ifwithnet_common(const struct sockaddr *addr, unsigned int ifscope) for (ifp = ifnet_head.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { ifnet_lock_shared(ifp); for (ifa = ifp->if_addrhead.tqh_first; ifa; - ifa = ifa->ifa_link.tqe_next) { + ifa = ifa->ifa_link.tqe_next) { const char *cp, *cp2, *cp3; IFA_LOCK(ifa); @@ -1163,9 +1191,11 @@ next: cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; - while (cp3 < cplim) - if ((*cp++ ^ *cp2++) & *cp3++) + while (cp3 < cplim) { + if ((*cp++ ^ *cp2++) & *cp3++) { goto next; /* next address! */ + } + } /* * If the netmask of what we just found * is more specific than what we had before @@ -1176,10 +1206,11 @@ next: if (ifa_maybe == NULL || rn_refines((caddr_t)ifa->ifa_netmask, (caddr_t)ifa_maybe->ifa_netmask)) { - IFA_ADDREF_LOCKED(ifa); /* ifa_maybe */ + IFA_ADDREF_LOCKED(ifa); /* ifa_maybe */ IFA_UNLOCK(ifa); - if (ifa_maybe != NULL) + if (ifa_maybe != NULL) { IFA_REMREF(ifa_maybe); + } ifa_maybe = ifa; } else { IFA_UNLOCK(ifa); @@ -1188,17 +1219,19 @@ next: } ifnet_lock_done(ifp); - if (ifa != NULL) + if (ifa != NULL) { break; + } } ifnet_head_done(); - if (ifa == NULL) + if (ifa == NULL) { ifa = ifa_maybe; - else if (ifa_maybe != NULL) + } else if (ifa_maybe != NULL) { IFA_REMREF(ifa_maybe); + } - return (ifa); + return ifa; } /* @@ -1211,10 +1244,11 @@ ifaof_ifpforaddr_select(const struct sockaddr *addr, struct ifnet *ifp) { u_int af = addr->sa_family; - if (af == AF_INET6) - return (in6_selectsrc_core_ifa(__DECONST(struct sockaddr_in6 *, addr), ifp, 0)); + if (af == AF_INET6) { + return in6_selectsrc_core_ifa(__DECONST(struct sockaddr_in6 *, addr), ifp, 0); + } - return (ifaof_ifpforaddr(addr, ifp)); + return ifaof_ifpforaddr(addr, ifp); } /* @@ -1234,25 +1268,26 @@ ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) struct ifaddr *better_ifa_maybe = NULL; u_int af = addr->sa_family; - if (af >= AF_MAX) - return (NULL); + if (af >= AF_MAX) { + return NULL; + } ifnet_lock_shared(ifp); for (ifa = ifp->if_addrhead.tqh_first; ifa; - ifa = ifa->ifa_link.tqe_next) { + ifa = ifa->ifa_link.tqe_next) { IFA_LOCK(ifa); if (ifa->ifa_addr->sa_family != af) { IFA_UNLOCK(ifa); continue; } if (ifa_maybe == NULL) { - IFA_ADDREF_LOCKED(ifa); /* for ifa_maybe */ + IFA_ADDREF_LOCKED(ifa); /* for ifa_maybe */ ifa_maybe = ifa; } if (ifa->ifa_netmask == 0) { if (ifa_equal(addr, ifa->ifa_addr) || ifa_equal(addr, ifa->ifa_dstaddr)) { - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -1261,14 +1296,14 @@ ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) } if (ifp->if_flags & IFF_POINTOPOINT) { if (ifa_equal(addr, ifa->ifa_dstaddr)) { - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } } else { if (ifa_equal(addr, ifa->ifa_addr)) { /* exact match */ - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -1277,9 +1312,11 @@ ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) cp3 = ifa->ifa_netmask->sa_data; cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; - for (; cp3 < cplim; cp3++) - if ((*cp++ ^ *cp2++) & *cp3) + for (; cp3 < cplim; cp3++) { + if ((*cp++ ^ *cp2++) & *cp3) { break; + } + } if (cp3 == cplim) { /* subnet match */ if (better_ifa_maybe == NULL) { @@ -1304,12 +1341,14 @@ ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp) ifnet_lock_done(ifp); - if (better_ifa_maybe != NULL) + if (better_ifa_maybe != NULL) { IFA_REMREF(better_ifa_maybe); - if (ifa_maybe != NULL) + } + if (ifa_maybe != NULL) { IFA_REMREF(ifa_maybe); + } - return (ifa); + return ifa; } #include @@ -1331,8 +1370,9 @@ link_rtrequest(int cmd, struct rtentry *rt, struct sockaddr *sa) RT_LOCK_ASSERT_HELD(rt); if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) || - ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0)) + ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0)) { return; + } /* Become a regular mutex, just in case */ RT_CONVERT_LOCK(rt); @@ -1343,8 +1383,9 @@ link_rtrequest(int cmd, struct rtentry *rt, struct sockaddr *sa) IFA_LOCK_SPIN(ifa); ifa_rtrequest = ifa->ifa_rtrequest; IFA_UNLOCK(ifa); - if (ifa_rtrequest != NULL && ifa_rtrequest != link_rtrequest) + if (ifa_rtrequest != NULL && ifa_rtrequest != link_rtrequest) { ifa_rtrequest(cmd, rt, sa); + } IFA_REMREF(ifa); } } @@ -1365,7 +1406,7 @@ if_updown( struct ifnet *ifp, int up) { int i; struct ifaddr **ifa; - struct timespec tv; + struct timespec tv; struct ifclassq *ifq = &ifp->if_snd; /* Wait until no one else is changing the up/down state */ @@ -1379,7 +1420,7 @@ if_updown( struct ifnet *ifp, int up) /* Verify that the interface isn't already in the right state */ if ((!up && (ifp->if_flags & IFF_UP) == 0) || - (up && (ifp->if_flags & IFF_UP) == IFF_UP)) { + (up && (ifp->if_flags & IFF_UP) == IFF_UP)) { return; } @@ -1454,16 +1495,19 @@ if_qflush(struct ifnet *ifp, int ifq_locked) { struct ifclassq *ifq = &ifp->if_snd; - if (!ifq_locked) + if (!ifq_locked) { IFCQ_LOCK(ifq); + } - if (IFCQ_IS_ENABLED(ifq)) + if (IFCQ_IS_ENABLED(ifq)) { IFCQ_PURGE(ifq); + } VERIFY(IFCQ_IS_EMPTY(ifq)); - if (!ifq_locked) + if (!ifq_locked) { IFCQ_UNLOCK(ifq); + } } void @@ -1477,19 +1521,24 @@ if_qflush_sc(struct ifnet *ifp, mbuf_svc_class_t sc, u_int32_t flow, VERIFY(sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sc)); VERIFY(flow != 0); - if (!ifq_locked) + if (!ifq_locked) { IFCQ_LOCK(ifq); + } - if (IFCQ_IS_ENABLED(ifq)) + if (IFCQ_IS_ENABLED(ifq)) { IFCQ_PURGE_SC(ifq, sc, flow, cnt, len); + } - if (!ifq_locked) + if (!ifq_locked) { IFCQ_UNLOCK(ifq); + } - if (packets != NULL) + if (packets != NULL) { *packets = cnt + a_cnt; - if (bytes != NULL) + } + if (bytes != NULL) { *bytes = len + a_len; + } } /* @@ -1504,24 +1553,29 @@ ifunit_extract(const char *src, char *dst, size_t dstlen, int *unit) char c; int u; - if (src == NULL || dst == NULL || dstlen == 0 || unit == NULL) - return (-1); + if (src == NULL || dst == NULL || dstlen == 0 || unit == NULL) { + return -1; + } len = strlen(src); - if (len < 2 || len > dstlen) - return (-1); + if (len < 2 || len > dstlen) { + return -1; + } cp = src + len - 1; c = *cp; - if (c < '0' || c > '9') - return (-1); /* trailing garbage */ + if (c < '0' || c > '9') { + return -1; /* trailing garbage */ + } u = 0; m = 1; do { - if (cp == src) - return (-1); /* no interface name */ + if (cp == src) { + return -1; /* no interface name */ + } u += (c - '0') * m; - if (u > 1000000) - return (-1); /* number is unreasonable */ + if (u > 1000000) { + return -1; /* number is unreasonable */ + } m *= 10; c = *--cp; } while (c >= '0' && c <= '9'); @@ -1530,7 +1584,7 @@ ifunit_extract(const char *src, char *dst, size_t dstlen, int *unit) dst[len] = '\0'; *unit = u; - return (0); + return 0; } /* @@ -1544,11 +1598,12 @@ ifunit_common(const char *name, boolean_t hold) struct ifnet *ifp; int unit; - if (ifunit_extract(name, namebuf, sizeof (namebuf), &unit) < 0) - return (NULL); + if (ifunit_extract(name, namebuf, sizeof(namebuf), &unit) < 0) { + return NULL; + } /* for safety, since we use strcmp() below */ - namebuf[sizeof (namebuf) - 1] = '\0'; + namebuf[sizeof(namebuf) - 1] = '\0'; /* * Now search all the interfaces for this name/number @@ -1559,24 +1614,27 @@ ifunit_common(const char *name, boolean_t hold) * Use strcmp() rather than strncmp() here, * since we want to match the entire string. */ - if (strcmp(ifp->if_name, namebuf)) + if (strcmp(ifp->if_name, namebuf)) { continue; - if (unit == ifp->if_unit) + } + if (unit == ifp->if_unit) { break; + } } /* if called from ifunit_ref() and ifnet is not attached, bail */ - if (hold && ifp != NULL && !ifnet_is_attached(ifp, 1)) + if (hold && ifp != NULL && !ifnet_is_attached(ifp, 1)) { ifp = NULL; + } ifnet_head_done(); - return (ifp); + return ifp; } struct ifnet * ifunit(const char *name) { - return (ifunit_common(name, FALSE)); + return ifunit_common(name, FALSE); } /* @@ -1587,7 +1645,7 @@ ifunit(const char *name) struct ifnet * ifunit_ref(const char *name) { - return (ifunit_common(name, TRUE)); + return ifunit_common(name, TRUE); } /* @@ -1597,12 +1655,13 @@ ifunit_ref(const char *name) struct ifnet * if_withname(struct sockaddr *sa) { - char ifname[IFNAMSIZ+1]; + char ifname[IFNAMSIZ + 1]; struct sockaddr_dl *sdl = (struct sockaddr_dl *)(void *)sa; if ((sa->sa_family != AF_LINK) || (sdl->sdl_nlen == 0) || - (sdl->sdl_nlen > IFNAMSIZ)) - return (NULL); + (sdl->sdl_nlen > IFNAMSIZ)) { + return NULL; + } /* * ifunit wants a null-terminated name. It may not be null-terminated @@ -1613,7 +1672,7 @@ if_withname(struct sockaddr *sa) bcopy(sdl->sdl_data, ifname, sdl->sdl_nlen); ifname[sdl->sdl_nlen] = '\0'; - return (ifunit(ifname)); + return ifunit(ifname); } static __attribute__((noinline)) int @@ -1622,22 +1681,22 @@ ifioctl_ifconf(u_long cmd, caddr_t data) int error = 0; switch (cmd) { - case OSIOCGIFCONF32: /* struct ifconf32 */ - case SIOCGIFCONF32: { /* struct ifconf32 */ + case OSIOCGIFCONF32: /* struct ifconf32 */ + case SIOCGIFCONF32: { /* struct ifconf32 */ struct ifconf32 ifc; - bcopy(data, &ifc, sizeof (ifc)); + bcopy(data, &ifc, sizeof(ifc)); error = ifconf(cmd, CAST_USER_ADDR_T(ifc.ifc_req), &ifc.ifc_len); - bcopy(&ifc, data, sizeof (ifc)); + bcopy(&ifc, data, sizeof(ifc)); break; } - case SIOCGIFCONF64: /* struct ifconf64 */ - case OSIOCGIFCONF64: { /* struct ifconf64 */ + case SIOCGIFCONF64: /* struct ifconf64 */ + case OSIOCGIFCONF64: { /* struct ifconf64 */ struct ifconf64 ifc; - bcopy(data, &ifc, sizeof (ifc)); + bcopy(data, &ifc, sizeof(ifc)); error = ifconf(cmd, ifc.ifc_req, &ifc.ifc_len); - bcopy(&ifc, data, sizeof (ifc)); + bcopy(&ifc, data, sizeof(ifc)); break; } @@ -1646,7 +1705,7 @@ ifioctl_ifconf(u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -1655,21 +1714,21 @@ ifioctl_ifclone(u_long cmd, caddr_t data) int error = 0; switch (cmd) { - case SIOCIFGCLONERS32: { /* struct if_clonereq32 */ + case SIOCIFGCLONERS32: { /* struct if_clonereq32 */ struct if_clonereq32 ifcr; - bcopy(data, &ifcr, sizeof (ifcr)); + bcopy(data, &ifcr, sizeof(ifcr)); error = if_clone_list(ifcr.ifcr_count, &ifcr.ifcr_total, CAST_USER_ADDR_T(ifcr.ifcru_buffer)); - bcopy(&ifcr, data, sizeof (ifcr)); + bcopy(&ifcr, data, sizeof(ifcr)); break; } - case SIOCIFGCLONERS64: { /* struct if_clonereq64 */ + case SIOCIFGCLONERS64: { /* struct if_clonereq64 */ struct if_clonereq64 ifcr; - bcopy(data, &ifcr, sizeof (ifcr)); + bcopy(data, &ifcr, sizeof(ifcr)); error = if_clone_list(ifcr.ifcr_count, &ifcr.ifcr_total, ifcr.ifcru_buffer); - bcopy(&ifcr, data, sizeof (ifcr)); + bcopy(&ifcr, data, sizeof(ifcr)); break; } @@ -1678,7 +1737,7 @@ ifioctl_ifclone(u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -1691,13 +1750,14 @@ ifioctl_ifdesc(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) VERIFY(ifp != NULL); switch (cmd) { - case SIOCSIFDESC: { /* struct if_descreq */ - if ((error = proc_suser(p)) != 0) + case SIOCSIFDESC: { /* struct if_descreq */ + if ((error = proc_suser(p)) != 0) { break; + } ifnet_lock_exclusive(ifp); - bcopy(&ifdr->ifdr_len, &ifdr_len, sizeof (ifdr_len)); - if (ifdr_len > sizeof (ifdr->ifdr_desc) || + bcopy(&ifdr->ifdr_len, &ifdr_len, sizeof(ifdr_len)); + if (ifdr_len > sizeof(ifdr->ifdr_desc) || ifdr_len > ifp->if_desc.ifd_maxlen) { error = EINVAL; ifnet_lock_done(ifp); @@ -1713,11 +1773,11 @@ ifioctl_ifdesc(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) break; } - case SIOCGIFDESC: { /* struct if_descreq */ + case SIOCGIFDESC: { /* struct if_descreq */ ifnet_lock_shared(ifp); - ifdr_len = MIN(ifp->if_desc.ifd_len, sizeof (ifdr->ifdr_desc)); - bcopy(&ifdr_len, &ifdr->ifdr_len, sizeof (ifdr_len)); - bzero(&ifdr->ifdr_desc, sizeof (ifdr->ifdr_desc)); + ifdr_len = MIN(ifp->if_desc.ifd_len, sizeof(ifdr->ifdr_desc)); + bcopy(&ifdr_len, &ifdr->ifdr_len, sizeof(ifdr_len)); + bzero(&ifdr->ifdr_desc, sizeof(ifdr->ifdr_desc)); if (ifdr_len > 0) { bcopy(ifp->if_desc.ifd_desc, ifdr->ifdr_desc, ifdr_len); } @@ -1730,7 +1790,7 @@ ifioctl_ifdesc(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -1745,11 +1805,12 @@ ifioctl_linkparams(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) ifq = &ifp->if_snd; switch (cmd) { - case SIOCSIFLINKPARAMS: { /* struct if_linkparamsreq */ + case SIOCSIFLINKPARAMS: { /* struct if_linkparamsreq */ struct tb_profile tb = { 0, 0, 0 }; - if ((error = proc_suser(p)) != 0) + if ((error = proc_suser(p)) != 0) { break; + } IFCQ_LOCK(ifq); @@ -1759,48 +1820,50 @@ ifioctl_linkparams(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) break; } bcopy(&iflpr->iflpr_output_tbr_rate, &tb.rate, - sizeof (tb.rate)); + sizeof(tb.rate)); bcopy(&iflpr->iflpr_output_tbr_percent, &tb.percent, - sizeof (tb.percent)); + sizeof(tb.percent)); error = ifclassq_tbr_set(ifq, &tb, TRUE); IFCQ_UNLOCK(ifq); break; } - case SIOCGIFLINKPARAMS: { /* struct if_linkparamsreq */ + case SIOCGIFLINKPARAMS: { /* struct if_linkparamsreq */ u_int32_t sched_type = PKTSCHEDT_NONE, flags = 0; u_int64_t tbr_bw = 0, tbr_pct = 0; IFCQ_LOCK(ifq); - if (IFCQ_IS_ENABLED(ifq)) + if (IFCQ_IS_ENABLED(ifq)) { sched_type = ifq->ifcq_type; + } bcopy(&sched_type, &iflpr->iflpr_output_sched, - sizeof (iflpr->iflpr_output_sched)); + sizeof(iflpr->iflpr_output_sched)); if (IFCQ_TBR_IS_ENABLED(ifq)) { tbr_bw = ifq->ifcq_tbr.tbr_rate_raw; tbr_pct = ifq->ifcq_tbr.tbr_percent; } bcopy(&tbr_bw, &iflpr->iflpr_output_tbr_rate, - sizeof (iflpr->iflpr_output_tbr_rate)); + sizeof(iflpr->iflpr_output_tbr_rate)); bcopy(&tbr_pct, &iflpr->iflpr_output_tbr_percent, - sizeof (iflpr->iflpr_output_tbr_percent)); + sizeof(iflpr->iflpr_output_tbr_percent)); IFCQ_UNLOCK(ifq); if (ifp->if_output_sched_model == - IFNET_SCHED_MODEL_DRIVER_MANAGED) + IFNET_SCHED_MODEL_DRIVER_MANAGED) { flags |= IFLPRF_DRVMANAGED; - bcopy(&flags, &iflpr->iflpr_flags, sizeof (iflpr->iflpr_flags)); + } + bcopy(&flags, &iflpr->iflpr_flags, sizeof(iflpr->iflpr_flags)); bcopy(&ifp->if_output_bw, &iflpr->iflpr_output_bw, - sizeof (iflpr->iflpr_output_bw)); + sizeof(iflpr->iflpr_output_bw)); bcopy(&ifp->if_input_bw, &iflpr->iflpr_input_bw, - sizeof (iflpr->iflpr_input_bw)); + sizeof(iflpr->iflpr_input_bw)); bcopy(&ifp->if_output_lt, &iflpr->iflpr_output_lt, - sizeof (iflpr->iflpr_output_lt)); + sizeof(iflpr->iflpr_output_lt)); bcopy(&ifp->if_input_lt, &iflpr->iflpr_input_lt, - sizeof (iflpr->iflpr_input_lt)); + sizeof(iflpr->iflpr_input_lt)); break; } @@ -1809,7 +1872,7 @@ ifioctl_linkparams(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -1822,14 +1885,15 @@ ifioctl_qstats(struct ifnet *ifp, u_long cmd, caddr_t data) VERIFY(ifp != NULL); switch (cmd) { - case SIOCGIFQUEUESTATS: { /* struct if_qstatsreq */ - bcopy(&ifqr->ifqr_slot, &ifqr_slot, sizeof (ifqr_slot)); - bcopy(&ifqr->ifqr_len, &ifqr_len, sizeof (ifqr_len)); + case SIOCGIFQUEUESTATS: { /* struct if_qstatsreq */ + bcopy(&ifqr->ifqr_slot, &ifqr_slot, sizeof(ifqr_slot)); + bcopy(&ifqr->ifqr_len, &ifqr_len, sizeof(ifqr_len)); error = ifclassq_getqstats(&ifp->if_snd, ifqr_slot, ifqr->ifqr_buf, &ifqr_len); - if (error != 0) + if (error != 0) { ifqr_len = 0; - bcopy(&ifqr_len, &ifqr->ifqr_len, sizeof (ifqr_len)); + } + bcopy(&ifqr_len, &ifqr->ifqr_len, sizeof(ifqr_len)); break; } @@ -1838,7 +1902,7 @@ ifioctl_qstats(struct ifnet *ifp, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -1851,24 +1915,26 @@ ifioctl_throttle(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) VERIFY(ifp != NULL); switch (cmd) { - case SIOCSIFTHROTTLE: { /* struct if_throttlereq */ + case SIOCSIFTHROTTLE: { /* struct if_throttlereq */ /* * XXX: Use priv_check_cred() instead of root check? */ - if ((error = proc_suser(p)) != 0) + if ((error = proc_suser(p)) != 0) { break; + } - bcopy(&ifthr->ifthr_level, &ifthr_level, sizeof (ifthr_level)); + bcopy(&ifthr->ifthr_level, &ifthr_level, sizeof(ifthr_level)); error = ifnet_set_throttle(ifp, ifthr_level); - if (error == EALREADY) + if (error == EALREADY) { error = 0; + } break; } - case SIOCGIFTHROTTLE: { /* struct if_throttlereq */ + case SIOCGIFTHROTTLE: { /* struct if_throttlereq */ if ((error = ifnet_get_throttle(ifp, &ifthr_level)) == 0) { bcopy(&ifthr_level, &ifthr->ifthr_level, - sizeof (ifthr_level)); + sizeof(ifthr_level)); } break; } @@ -1878,7 +1944,7 @@ ifioctl_throttle(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static int @@ -1897,10 +1963,10 @@ ifioctl_getnetagents(struct ifnet *ifp, u_int32_t *count, user_addr_t uuid_p) if (!uuid_is_null(*netagent_uuid)) { if (uuid_p != USER_ADDR_NULL) { error = copyout(netagent_uuid, - uuid_p + sizeof(uuid_t) * valid_netagent_count, - sizeof(uuid_t)); + uuid_p + sizeof(uuid_t) * valid_netagent_count, + sizeof(uuid_t)); if (error != 0) { - return (error); + return error; } } valid_netagent_count++; @@ -1909,11 +1975,11 @@ ifioctl_getnetagents(struct ifnet *ifp, u_int32_t *count, user_addr_t uuid_p) } *count = valid_netagent_count; - return (0); + return 0; } -#define IF_MAXAGENTS 64 -#define IF_AGENT_INCREMENT 8 +#define IF_MAXAGENTS 64 +#define IF_AGENT_INCREMENT 8 static int if_add_netagent_locked(struct ifnet *ifp, uuid_t new_agent_uuid) { @@ -1930,23 +1996,23 @@ if_add_netagent_locked(struct ifnet *ifp, uuid_t new_agent_uuid) break; } if (first_empty_slot == NULL && - uuid_is_null(*netagent_uuid)) { + uuid_is_null(*netagent_uuid)) { first_empty_slot = netagent_uuid; } } } if (already_added) { /* Already added agent, don't return an error */ - return (0); + return 0; } if (first_empty_slot == NULL) { if (ifp->if_agentcount >= IF_MAXAGENTS) { /* No room for another netagent UUID, bail */ - return (ENOMEM); + return ENOMEM; } else { /* Calculate new array size */ u_int32_t new_agent_count = - MIN(ifp->if_agentcount + IF_AGENT_INCREMENT, + MIN(ifp->if_agentcount + IF_AGENT_INCREMENT, IF_MAXAGENTS); /* Reallocate array */ @@ -1954,7 +2020,7 @@ if_add_netagent_locked(struct ifnet *ifp, uuid_t new_agent_uuid) sizeof(uuid_t) * new_agent_count, M_NETAGENT, M_WAITOK | M_ZERO); if (new_agent_array == NULL) { - return (ENOMEM); + return ENOMEM; } /* Save new array */ @@ -1970,7 +2036,7 @@ if_add_netagent_locked(struct ifnet *ifp, uuid_t new_agent_uuid) } uuid_copy(*first_empty_slot, new_agent_uuid); netagent_post_updated_interfaces(new_agent_uuid); - return (0); + return 0; } int @@ -1984,7 +2050,7 @@ if_add_netagent(struct ifnet *ifp, uuid_t new_agent_uuid) ifnet_lock_done(ifp); - return (error); + return error; } static int @@ -2004,10 +2070,11 @@ if_delete_netagent_locked(struct ifnet *ifp, uuid_t remove_agent_uuid) } } } - if (removed_agent_id) + if (removed_agent_id) { netagent_post_updated_interfaces(remove_agent_uuid); + } - return (0); + return 0; } int @@ -2021,7 +2088,7 @@ if_delete_netagent(struct ifnet *ifp, uuid_t remove_agent_uuid) ifnet_lock_done(ifp); - return (error); + return error; } boolean_t @@ -2029,8 +2096,9 @@ if_check_netagent(struct ifnet *ifp, uuid_t find_agent_uuid) { boolean_t found = FALSE; - if (!ifp || uuid_is_null(find_agent_uuid)) + if (!ifp || uuid_is_null(find_agent_uuid)) { return FALSE; + } ifnet_lock_shared(ifp); @@ -2062,60 +2130,60 @@ ifioctl_netagent(struct ifnet *ifp, u_long cmd, caddr_t data, struct proc *p) /* Get an io ref count if the interface is attached */ if (!ifnet_is_attached(ifp, 1)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } if (cmd == SIOCAIFAGENTID || - cmd == SIOCDIFAGENTID) { + cmd == SIOCDIFAGENTID) { ifnet_lock_exclusive(ifp); } else { ifnet_lock_shared(ifp); } switch (cmd) { - case SIOCAIFAGENTID: { /* struct if_agentidreq */ - // TODO: Use priv_check_cred() instead of root check - if ((error = proc_suser(p)) != 0) { - break; - } - error = if_add_netagent_locked(ifp, ifar->ifar_uuid); + case SIOCAIFAGENTID: { /* struct if_agentidreq */ + // TODO: Use priv_check_cred() instead of root check + if ((error = proc_suser(p)) != 0) { break; } - case SIOCDIFAGENTID: { /* struct if_agentidreq */ - // TODO: Use priv_check_cred() instead of root check - if ((error = proc_suser(p)) != 0) { - break; - } - error = if_delete_netagent_locked(ifp, ifar->ifar_uuid); + error = if_add_netagent_locked(ifp, ifar->ifar_uuid); + break; + } + case SIOCDIFAGENTID: { /* struct if_agentidreq */ + // TODO: Use priv_check_cred() instead of root check + if ((error = proc_suser(p)) != 0) { break; } - case SIOCGIFAGENTIDS32: { /* struct if_agentidsreq32 */ - bcopy(data, &u.s32, sizeof(u.s32)); - error = ifioctl_getnetagents(ifp, &u.s32.ifar_count, - u.s32.ifar_uuids); - if (error == 0) { - bcopy(&u.s32, data, sizeof(u.s32)); - } - break; + error = if_delete_netagent_locked(ifp, ifar->ifar_uuid); + break; + } + case SIOCGIFAGENTIDS32: { /* struct if_agentidsreq32 */ + bcopy(data, &u.s32, sizeof(u.s32)); + error = ifioctl_getnetagents(ifp, &u.s32.ifar_count, + u.s32.ifar_uuids); + if (error == 0) { + bcopy(&u.s32, data, sizeof(u.s32)); } - case SIOCGIFAGENTIDS64: { /* struct if_agentidsreq64 */ - bcopy(data, &u.s64, sizeof(u.s64)); - error = ifioctl_getnetagents(ifp, &u.s64.ifar_count, - u.s64.ifar_uuids); - if (error == 0) { - bcopy(&u.s64, data, sizeof(u.s64)); - } - break; + break; + } + case SIOCGIFAGENTIDS64: { /* struct if_agentidsreq64 */ + bcopy(data, &u.s64, sizeof(u.s64)); + error = ifioctl_getnetagents(ifp, &u.s64.ifar_count, + u.s64.ifar_uuids); + if (error == 0) { + bcopy(&u.s64, data, sizeof(u.s64)); } - default: - VERIFY(0); - /* NOTREACHED */ + break; + } + default: + VERIFY(0); + /* NOTREACHED */ } ifnet_lock_done(ifp); ifnet_decr_iorefcnt(ifp); - return (error); + return error; } void @@ -2151,7 +2219,7 @@ ifnet_increment_generation(ifnet_t interface) u_int32_t ifnet_get_generation(ifnet_t interface) { - return (interface->if_generation); + return interface->if_generation; } void @@ -2181,7 +2249,7 @@ ifnet_reset_order(u_int32_t *ordered_indices, u_int32_t count) ordered_indices[order_index] > (uint32_t)if_index) { error = EINVAL; ifnet_head_done(); - return (error); + return error; } } // Flush current ordered list @@ -2210,7 +2278,7 @@ ifnet_reset_order(u_int32_t *ordered_indices, u_int32_t count) necp_update_all_clients(); - return (error); + return error; } int @@ -2220,26 +2288,26 @@ if_set_qosmarking_mode(struct ifnet *ifp, u_int32_t mode) u_int32_t old_mode = ifp->if_qosmarking_mode; switch (mode) { - case IFRTYPE_QOSMARKING_MODE_NONE: - ifp->if_qosmarking_mode = IFRTYPE_QOSMARKING_MODE_NONE; - ifp->if_eflags &= ~IFEF_QOSMARKING_CAPABLE; - break; - case IFRTYPE_QOSMARKING_FASTLANE: - ifp->if_qosmarking_mode = IFRTYPE_QOSMARKING_FASTLANE; - ifp->if_eflags |= IFEF_QOSMARKING_CAPABLE; - if (net_qos_policy_capable_enabled != 0) - ifp->if_eflags |= IFEF_QOSMARKING_ENABLED; - break; - default: - error = EINVAL; - break; + case IFRTYPE_QOSMARKING_MODE_NONE: + ifp->if_qosmarking_mode = IFRTYPE_QOSMARKING_MODE_NONE; + ifp->if_eflags &= ~IFEF_QOSMARKING_CAPABLE; + break; + case IFRTYPE_QOSMARKING_FASTLANE: + ifp->if_qosmarking_mode = IFRTYPE_QOSMARKING_FASTLANE; + ifp->if_eflags |= IFEF_QOSMARKING_CAPABLE; + if (net_qos_policy_capable_enabled != 0) { + ifp->if_eflags |= IFEF_QOSMARKING_ENABLED; + } + break; + default: + error = EINVAL; + break; } if (error == 0 && old_mode != ifp->if_qosmarking_mode) { dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_QOS_MODE_CHANGED, NULL, sizeof(struct kev_dl_rrc_state)); - } - return (error); + return error; } static __attribute__((noinline)) int @@ -2248,11 +2316,11 @@ ifioctl_iforder(u_long cmd, caddr_t data) int error = 0; u_int32_t *ordered_indices = NULL; if (data == NULL) { - return (EINVAL); + return EINVAL; } switch (cmd) { - case SIOCSIFORDER: { /* struct if_order */ + case SIOCSIFORDER: { /* struct if_order */ struct if_order *ifo = (struct if_order *)(void *)data; if (ifo->ifo_count > (u_int32_t)if_index) { @@ -2260,7 +2328,7 @@ ifioctl_iforder(u_long cmd, caddr_t data) break; } - size_t length = (ifo->ifo_count * sizeof(u_int32_t)); + size_t length = (ifo->ifo_count * sizeof(u_int32_t)); if (length > 0) { if (ifo->ifo_ordered_indices == USER_ADDR_NULL) { error = EINVAL; @@ -2280,9 +2348,9 @@ ifioctl_iforder(u_long cmd, caddr_t data) /* ordered_indices should not contain duplicates */ bool found_duplicate = FALSE; - for (uint32_t i = 0; i < (ifo->ifo_count - 1) && !found_duplicate ; i++){ - for (uint32_t j = i + 1; j < ifo->ifo_count && !found_duplicate ; j++){ - if (ordered_indices[j] == ordered_indices[i]){ + for (uint32_t i = 0; i < (ifo->ifo_count - 1) && !found_duplicate; i++) { + for (uint32_t j = i + 1; j < ifo->ifo_count && !found_duplicate; j++) { + if (ordered_indices[j] == ordered_indices[i]) { error = EINVAL; found_duplicate = TRUE; break; @@ -2309,7 +2377,7 @@ ifioctl_iforder(u_long cmd, caddr_t data) _FREE(ordered_indices, M_NECP); } - return (error); + return error; } static __attribute__((noinline)) int @@ -2322,24 +2390,25 @@ ifioctl_netsignature(struct ifnet *ifp, u_long cmd, caddr_t data) VERIFY(ifp != NULL); switch (cmd) { - case SIOCSIFNETSIGNATURE: /* struct if_nsreq */ - if (ifnsr->ifnsr_len > sizeof (ifnsr->ifnsr_data)) { + case SIOCSIFNETSIGNATURE: /* struct if_nsreq */ + if (ifnsr->ifnsr_len > sizeof(ifnsr->ifnsr_data)) { error = EINVAL; break; } - bcopy(&ifnsr->ifnsr_flags, &flags, sizeof (flags)); + bcopy(&ifnsr->ifnsr_flags, &flags, sizeof(flags)); error = ifnet_set_netsignature(ifp, ifnsr->ifnsr_family, ifnsr->ifnsr_len, flags, ifnsr->ifnsr_data); break; - case SIOCGIFNETSIGNATURE: /* struct if_nsreq */ - ifnsr->ifnsr_len = sizeof (ifnsr->ifnsr_data); + case SIOCGIFNETSIGNATURE: /* struct if_nsreq */ + ifnsr->ifnsr_len = sizeof(ifnsr->ifnsr_data); error = ifnet_get_netsignature(ifp, ifnsr->ifnsr_family, &ifnsr->ifnsr_len, &flags, ifnsr->ifnsr_data); - if (error == 0) - bcopy(&flags, &ifnsr->ifnsr_flags, sizeof (flags)); - else + if (error == 0) { + bcopy(&flags, &ifnsr->ifnsr_flags, sizeof(flags)); + } else { ifnsr->ifnsr_len = 0; + } break; default: @@ -2347,7 +2416,7 @@ ifioctl_netsignature(struct ifnet *ifp, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } #if INET6 @@ -2360,16 +2429,18 @@ ifioctl_nat64prefix(struct ifnet *ifp, u_long cmd, caddr_t data) VERIFY(ifp != NULL); switch (cmd) { - case SIOCSIFNAT64PREFIX: /* struct if_nat64req */ + case SIOCSIFNAT64PREFIX: /* struct if_nat64req */ error = ifnet_set_nat64prefix(ifp, ifnat64->ifnat64_prefixes); - if (error != 0) + if (error != 0) { ip6stat.ip6s_clat464_plat64_pfx_setfail++; + } break; - case SIOCGIFNAT64PREFIX: /* struct if_nat64req */ + case SIOCGIFNAT64PREFIX: /* struct if_nat64req */ error = ifnet_get_nat64prefix(ifp, ifnat64->ifnat64_prefixes); - if (error != 0) + if (error != 0) { ip6stat.ip6s_clat464_plat64_pfx_getfail++; + } break; default: @@ -2377,7 +2448,7 @@ ifioctl_nat64prefix(struct ifnet *ifp, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -2390,24 +2461,24 @@ ifioctl_clat46addr(struct ifnet *ifp, u_long cmd, caddr_t data) VERIFY(ifp != NULL); switch (cmd) { - case SIOCGIFCLAT46ADDR: - ia6_clat = in6ifa_ifpwithflag(ifp, IN6_IFF_CLAT46); - if (ia6_clat == NULL) { - error = ENOENT; - break; - } - - bcopy(&ia6_clat->ia_addr.sin6_addr, &ifclat46->ifclat46_addr.v6_address, - sizeof(ifclat46->ifclat46_addr.v6_address)); - ifclat46->ifclat46_addr.v6_prefixlen = ia6_clat->ia_plen; - IFA_REMREF(&ia6_clat->ia_ifa); + case SIOCGIFCLAT46ADDR: + ia6_clat = in6ifa_ifpwithflag(ifp, IN6_IFF_CLAT46); + if (ia6_clat == NULL) { + error = ENOENT; break; - default: - VERIFY(0); - /* NOTREACHED */ + } + + bcopy(&ia6_clat->ia_addr.sin6_addr, &ifclat46->ifclat46_addr.v6_address, + sizeof(ifclat46->ifclat46_addr.v6_address)); + ifclat46->ifclat46_addr.v6_prefixlen = ia6_clat->ia_plen; + IFA_REMREF(&ia6_clat->ia_ifa); + break; + default: + VERIFY(0); + /* NOTREACHED */ } - return (error); + return error; } #endif @@ -2416,10 +2487,10 @@ static int ifioctl_get_protolist(struct ifnet *ifp, u_int32_t * ret_count, user_addr_t ifpl) { - u_int32_t actual_count; - u_int32_t count; - int error = 0; - u_int32_t *list = NULL; + u_int32_t actual_count; + u_int32_t count; + int error = 0; + u_int32_t *list = NULL; /* find out how many */ count = if_get_protolist(ifp, NULL, 0); @@ -2447,12 +2518,12 @@ ifioctl_get_protolist(struct ifnet *ifp, u_int32_t * ret_count, error = copyout((caddr_t)list, ifpl, count * sizeof(*list)); } - done: +done: if (list != NULL) { if_free_protolist(list); } *ret_count = count; - return (error); + return error; } static __attribute__((noinline)) int @@ -2461,8 +2532,8 @@ ifioctl_protolist(struct ifnet *ifp, u_long cmd, caddr_t data) int error = 0; switch (cmd) { - case SIOCGIFPROTOLIST32: { /* struct if_protolistreq32 */ - struct if_protolistreq32 ifpl; + case SIOCGIFPROTOLIST32: { /* struct if_protolistreq32 */ + struct if_protolistreq32 ifpl; bcopy(data, &ifpl, sizeof(ifpl)); if (ifpl.ifpl_reserved != 0) { @@ -2474,8 +2545,8 @@ ifioctl_protolist(struct ifnet *ifp, u_long cmd, caddr_t data) bcopy(&ifpl, data, sizeof(ifpl)); break; } - case SIOCGIFPROTOLIST64: { /* struct if_protolistreq64 */ - struct if_protolistreq64 ifpl; + case SIOCGIFPROTOLIST64: { /* struct if_protolistreq64 */ + struct if_protolistreq64 ifpl; bcopy(data, &ifpl, sizeof(ifpl)); if (ifpl.ifpl_reserved != 0) { @@ -2492,7 +2563,7 @@ ifioctl_protolist(struct ifnet *ifp, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } /* @@ -2502,21 +2573,20 @@ static bool ifioctl_restrict_intcoproc(unsigned long cmd, const char *ifname, struct ifnet *ifp, struct proc *p) { - if (intcoproc_unrestricted == TRUE) { - return (false); + return false; } if (proc_pid(p) == 0) { - return (false); + return false; } if (ifname) { ifp = ifunit(ifname); } if (ifp == NULL) { - return (false); + return false; } if (!IFNET_IS_INTCOPROC(ifp)) { - return (false); + return false; } switch (cmd) { case SIOCGIFBRDADDR: @@ -2554,6 +2624,8 @@ ifioctl_restrict_intcoproc(unsigned long cmd, const char *ifname, case SIOCGIFSTATUS: case SIOCGIFMEDIA32: case SIOCGIFMEDIA64: + case SIOCGIFXMEDIA32: + case SIOCGIFXMEDIA64: case SIOCGIFDESC: case SIOCGIFLINKPARAMS: case SIOCGIFQUEUESTATS: @@ -2568,17 +2640,138 @@ ifioctl_restrict_intcoproc(unsigned long cmd, const char *ifname, case SIOCGIFNETMASK_IN6: case SIOCGIFPROTOLIST32: case SIOCGIFPROTOLIST64: - return (false); + return false; default: #if (DEBUG || DEVELOPMENT) printf("%s: cmd 0x%lx not allowed (pid %u)\n", __func__, cmd, proc_pid(p)); #endif - return (true); + return true; + } + return false; +} + +/* + * Given a media word, return one suitable for an application + * using the original encoding. + */ +static int +compat_media(int media) +{ + if (IFM_TYPE(media) == IFM_ETHER && IFM_SUBTYPE(media) > IFM_OTHER) { + media &= ~IFM_TMASK; + media |= IFM_OTHER; } - return (false); + return media; +} + +static int +compat_ifmu_ulist(struct ifnet *ifp, u_long cmd, void *data) +{ + struct ifmediareq *ifmr = (struct ifmediareq *)data; + user_addr_t user_addr; + int i; + int *media_list = NULL; + int error = 0; + bool list_modified = false; + + user_addr = (cmd == SIOCGIFMEDIA64) ? + ((struct ifmediareq64 *)ifmr)->ifmu_ulist : + CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); + if (user_addr == USER_ADDR_NULL || ifmr->ifm_count == 0) { + return 0; + } + MALLOC(media_list, int *, ifmr->ifm_count * sizeof(int), + M_TEMP, M_WAITOK | M_ZERO); + if (media_list == NULL) { + os_log_error(OS_LOG_DEFAULT, + "%s: %s MALLOC() failed", + __func__, ifp->if_xname); + error = ENOMEM; + goto done; + } + error = copyin(user_addr, media_list, ifmr->ifm_count * sizeof(int)); + if (error != 0) { + os_log_error(OS_LOG_DEFAULT, + "%s: %s copyin() error %d", + __func__, ifp->if_xname, error); + goto done; + } + for (i = 0; i < ifmr->ifm_count; i++) { + int old_media, new_media; + + old_media = media_list[i]; + + new_media = compat_media(old_media); + if (new_media == old_media) { + continue; + } + if (if_verbose != 0) { + os_log_info(OS_LOG_DEFAULT, + "%s: %s converted extended media %08x to compat media %08x", + __func__, ifp->if_xname, old_media, new_media); + } + media_list[i] = new_media; + list_modified = true; + } + if (list_modified) { + error = copyout(media_list, user_addr, ifmr->ifm_count * sizeof(int)); + if (error != 0) { + os_log_error(OS_LOG_DEFAULT, + "%s: %s copyout() error %d", + __func__, ifp->if_xname, error); + goto done; + } + } +done: + if (media_list != NULL) { + FREE(media_list, M_TEMP); + } + return error; +} + +static int +compat_ifmediareq(struct ifnet *ifp, u_long cmd, void *data) +{ + struct ifmediareq *ifmr = (struct ifmediareq *)data; + int error; + + ifmr->ifm_active = compat_media(ifmr->ifm_active); + ifmr->ifm_current = compat_media(ifmr->ifm_current); + + error = compat_ifmu_ulist(ifp, cmd, data); + + return error; } +static int +ifioctl_get_media(struct ifnet *ifp, struct socket *so, u_long cmd, caddr_t data) +{ + int error = 0; + + /* + * An ifnet must not implement SIOCGIFXMEDIA as it gets the extended + * media subtypes macros from + */ + switch (cmd) { + case SIOCGIFMEDIA32: + case SIOCGIFXMEDIA32: + error = ifnet_ioctl(ifp, SOCK_DOM(so), SIOCGIFMEDIA32, data); + break; + case SIOCGIFMEDIA64: + case SIOCGIFXMEDIA64: + error = ifnet_ioctl(ifp, SOCK_DOM(so), SIOCGIFMEDIA64, data); + break; + } + if (if_verbose != 0 && error != 0) { + os_log(OS_LOG_DEFAULT, "%s: first ifnet_ioctl(%s, %08lx) error %d", + __func__, ifp->if_xname, cmd, error); + } + if (error == 0 && (cmd == SIOCGIFMEDIA32 || cmd == SIOCGIFMEDIA64)) { + error = compat_ifmediareq(ifp, cmd, data); + } + return error; +} /* * Interface ioctls. * @@ -2595,121 +2788,121 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) struct ifstat *ifs = NULL; int error = 0; - bzero(ifname, sizeof (ifname)); + bzero(ifname, sizeof(ifname)); /* * ioctls which don't require ifp, or ifreq ioctls */ switch (cmd) { - case OSIOCGIFCONF32: /* struct ifconf32 */ - case SIOCGIFCONF32: /* struct ifconf32 */ - case SIOCGIFCONF64: /* struct ifconf64 */ - case OSIOCGIFCONF64: /* struct ifconf64 */ + case OSIOCGIFCONF32: /* struct ifconf32 */ + case SIOCGIFCONF32: /* struct ifconf32 */ + case SIOCGIFCONF64: /* struct ifconf64 */ + case OSIOCGIFCONF64: /* struct ifconf64 */ error = ifioctl_ifconf(cmd, data); goto done; - case SIOCIFGCLONERS32: /* struct if_clonereq32 */ - case SIOCIFGCLONERS64: /* struct if_clonereq64 */ + case SIOCIFGCLONERS32: /* struct if_clonereq32 */ + case SIOCIFGCLONERS64: /* struct if_clonereq64 */ error = ifioctl_ifclone(cmd, data); goto done; - case SIOCGIFAGENTDATA32: /* struct netagent_req32 */ - case SIOCGIFAGENTDATA64: /* struct netagent_req64 */ + case SIOCGIFAGENTDATA32: /* struct netagent_req32 */ + case SIOCGIFAGENTDATA64: /* struct netagent_req64 */ case SIOCGIFAGENTLIST32: /* struct netagentlist_req32 */ case SIOCGIFAGENTLIST64: /* struct netagentlist_req64 */ error = netagent_ioctl(cmd, data); goto done; - case SIOCSIFORDER: /* struct if_order */ + case SIOCSIFORDER: /* struct if_order */ error = ifioctl_iforder(cmd, data); goto done; - case SIOCSIFDSTADDR: /* struct ifreq */ - case SIOCSIFADDR: /* struct ifreq */ - case SIOCSIFBRDADDR: /* struct ifreq */ - case SIOCSIFNETMASK: /* struct ifreq */ - case OSIOCGIFADDR: /* struct ifreq */ - case OSIOCGIFDSTADDR: /* struct ifreq */ - case OSIOCGIFBRDADDR: /* struct ifreq */ - case OSIOCGIFNETMASK: /* struct ifreq */ - case SIOCSIFKPI: /* struct ifreq */ + case SIOCSIFDSTADDR: /* struct ifreq */ + case SIOCSIFADDR: /* struct ifreq */ + case SIOCSIFBRDADDR: /* struct ifreq */ + case SIOCSIFNETMASK: /* struct ifreq */ + case OSIOCGIFADDR: /* struct ifreq */ + case OSIOCGIFDSTADDR: /* struct ifreq */ + case OSIOCGIFBRDADDR: /* struct ifreq */ + case OSIOCGIFNETMASK: /* struct ifreq */ + case SIOCSIFKPI: /* struct ifreq */ if (so->so_proto == NULL) { error = EOPNOTSUPP; goto done; } - /* FALLTHRU */ - case SIOCIFCREATE: /* struct ifreq */ - case SIOCIFCREATE2: /* struct ifreq */ - case SIOCIFDESTROY: /* struct ifreq */ - case SIOCGIFFLAGS: /* struct ifreq */ - case SIOCGIFEFLAGS: /* struct ifreq */ - case SIOCGIFCAP: /* struct ifreq */ + /* FALLTHRU */ + case SIOCIFCREATE: /* struct ifreq */ + case SIOCIFCREATE2: /* struct ifreq */ + case SIOCIFDESTROY: /* struct ifreq */ + case SIOCGIFFLAGS: /* struct ifreq */ + case SIOCGIFEFLAGS: /* struct ifreq */ + case SIOCGIFCAP: /* struct ifreq */ #if CONFIG_MACF_NET - case SIOCGIFMAC: /* struct ifreq */ - case SIOCSIFMAC: /* struct ifreq */ + case SIOCGIFMAC: /* struct ifreq */ + case SIOCSIFMAC: /* struct ifreq */ #endif /* CONFIG_MACF_NET */ - case SIOCGIFMETRIC: /* struct ifreq */ - case SIOCGIFMTU: /* struct ifreq */ - case SIOCGIFPHYS: /* struct ifreq */ - case SIOCSIFFLAGS: /* struct ifreq */ - case SIOCSIFCAP: /* struct ifreq */ - case SIOCSIFMETRIC: /* struct ifreq */ - case SIOCSIFPHYS: /* struct ifreq */ - case SIOCSIFMTU: /* struct ifreq */ - case SIOCADDMULTI: /* struct ifreq */ - case SIOCDELMULTI: /* struct ifreq */ - case SIOCDIFPHYADDR: /* struct ifreq */ - case SIOCSIFMEDIA: /* struct ifreq */ - case SIOCSIFGENERIC: /* struct ifreq */ - case SIOCSIFLLADDR: /* struct ifreq */ - case SIOCSIFALTMTU: /* struct ifreq */ - case SIOCSIFVLAN: /* struct ifreq */ - case SIOCSIFBOND: /* struct ifreq */ - case SIOCGIFLLADDR: /* struct ifreq */ - case SIOCGIFTYPE: /* struct ifreq */ - case SIOCGIFFUNCTIONALTYPE: /* struct ifreq */ - case SIOCGIFPSRCADDR: /* struct ifreq */ - case SIOCGIFPDSTADDR: /* struct ifreq */ - case SIOCGIFGENERIC: /* struct ifreq */ - case SIOCGIFDEVMTU: /* struct ifreq */ - case SIOCGIFVLAN: /* struct ifreq */ - case SIOCGIFBOND: /* struct ifreq */ - case SIOCGIFWAKEFLAGS: /* struct ifreq */ - case SIOCGIFGETRTREFCNT: /* struct ifreq */ - case SIOCSIFOPPORTUNISTIC: /* struct ifreq */ - case SIOCGIFOPPORTUNISTIC: /* struct ifreq */ - case SIOCGIFLINKQUALITYMETRIC: /* struct ifreq */ - case SIOCSIFLOG: /* struct ifreq */ - case SIOCGIFLOG: /* struct ifreq */ - case SIOCGIFDELEGATE: /* struct ifreq */ - case SIOCGIFEXPENSIVE: /* struct ifreq */ - case SIOCSIFEXPENSIVE: /* struct ifreq */ - case SIOCSIF2KCL: /* struct ifreq */ - case SIOCGIF2KCL: /* struct ifreq */ - case SIOCSIFINTERFACESTATE: /* struct ifreq */ - case SIOCGIFINTERFACESTATE: /* struct ifreq */ - case SIOCSIFPROBECONNECTIVITY: /* struct ifreq */ - case SIOCGIFPROBECONNECTIVITY: /* struct ifreq */ - case SIOCGSTARTDELAY: /* struct ifreq */ - case SIOCSIFTIMESTAMPENABLE: /* struct ifreq */ - case SIOCSIFTIMESTAMPDISABLE: /* struct ifreq */ - case SIOCGIFTIMESTAMPENABLED: /* struct ifreq */ + case SIOCGIFMETRIC: /* struct ifreq */ + case SIOCGIFMTU: /* struct ifreq */ + case SIOCGIFPHYS: /* struct ifreq */ + case SIOCSIFFLAGS: /* struct ifreq */ + case SIOCSIFCAP: /* struct ifreq */ + case SIOCSIFMETRIC: /* struct ifreq */ + case SIOCSIFPHYS: /* struct ifreq */ + case SIOCSIFMTU: /* struct ifreq */ + case SIOCADDMULTI: /* struct ifreq */ + case SIOCDELMULTI: /* struct ifreq */ + case SIOCDIFPHYADDR: /* struct ifreq */ + case SIOCSIFMEDIA: /* struct ifreq */ + case SIOCSIFGENERIC: /* struct ifreq */ + case SIOCSIFLLADDR: /* struct ifreq */ + case SIOCSIFALTMTU: /* struct ifreq */ + case SIOCSIFVLAN: /* struct ifreq */ + case SIOCSIFBOND: /* struct ifreq */ + case SIOCGIFLLADDR: /* struct ifreq */ + case SIOCGIFTYPE: /* struct ifreq */ + case SIOCGIFFUNCTIONALTYPE: /* struct ifreq */ + case SIOCGIFPSRCADDR: /* struct ifreq */ + case SIOCGIFPDSTADDR: /* struct ifreq */ + case SIOCGIFGENERIC: /* struct ifreq */ + case SIOCGIFDEVMTU: /* struct ifreq */ + case SIOCGIFVLAN: /* struct ifreq */ + case SIOCGIFBOND: /* struct ifreq */ + case SIOCGIFWAKEFLAGS: /* struct ifreq */ + case SIOCGIFGETRTREFCNT: /* struct ifreq */ + case SIOCSIFOPPORTUNISTIC: /* struct ifreq */ + case SIOCGIFOPPORTUNISTIC: /* struct ifreq */ + case SIOCGIFLINKQUALITYMETRIC: /* struct ifreq */ + case SIOCSIFLOG: /* struct ifreq */ + case SIOCGIFLOG: /* struct ifreq */ + case SIOCGIFDELEGATE: /* struct ifreq */ + case SIOCGIFEXPENSIVE: /* struct ifreq */ + case SIOCSIFEXPENSIVE: /* struct ifreq */ + case SIOCSIF2KCL: /* struct ifreq */ + case SIOCGIF2KCL: /* struct ifreq */ + case SIOCSIFINTERFACESTATE: /* struct ifreq */ + case SIOCGIFINTERFACESTATE: /* struct ifreq */ + case SIOCSIFPROBECONNECTIVITY: /* struct ifreq */ + case SIOCGIFPROBECONNECTIVITY: /* struct ifreq */ + case SIOCGSTARTDELAY: /* struct ifreq */ + case SIOCSIFTIMESTAMPENABLE: /* struct ifreq */ + case SIOCSIFTIMESTAMPDISABLE: /* struct ifreq */ + case SIOCGIFTIMESTAMPENABLED: /* struct ifreq */ #if (DEBUG || DEVELOPMENT) - case SIOCSIFDISABLEOUTPUT: /* struct ifreq */ + case SIOCSIFDISABLEOUTPUT: /* struct ifreq */ #endif /* (DEBUG || DEVELOPMENT) */ - case SIOCGECNMODE: /* struct ifreq */ + case SIOCGECNMODE: /* struct ifreq */ case SIOCSECNMODE: - case SIOCSQOSMARKINGMODE: /* struct ifreq */ - case SIOCSQOSMARKINGENABLED: /* struct ifreq */ - case SIOCGQOSMARKINGMODE: /* struct ifreq */ - case SIOCGQOSMARKINGENABLED: /* struct ifreq */ - case SIOCSIFLOWINTERNET: /* struct ifreq */ - case SIOCGIFLOWINTERNET: /* struct ifreq */ - case SIOCGIFLOWPOWER: /* struct ifreq */ - case SIOCSIFLOWPOWER: /* struct ifreq */ - { /* struct ifreq */ + case SIOCSQOSMARKINGMODE: /* struct ifreq */ + case SIOCSQOSMARKINGENABLED: /* struct ifreq */ + case SIOCGQOSMARKINGMODE: /* struct ifreq */ + case SIOCGQOSMARKINGENABLED: /* struct ifreq */ + case SIOCSIFLOWINTERNET: /* struct ifreq */ + case SIOCGIFLOWINTERNET: /* struct ifreq */ + case SIOCGIFLOWPOWER: /* struct ifreq */ + case SIOCSIFLOWPOWER: /* struct ifreq */ + { /* struct ifreq */ struct ifreq ifr; - bcopy(data, &ifr, sizeof (ifr)); + bcopy(data, &ifr, sizeof(ifr)); ifr.ifr_name[IFNAMSIZ - 1] = '\0'; bcopy(&ifr.ifr_name, ifname, IFNAMSIZ); if (ifioctl_restrict_intcoproc(cmd, ifname, NULL, p) == true) { @@ -2717,7 +2910,7 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) goto done; } error = ifioctl_ifreq(so, cmd, &ifr, p); - bcopy(&ifr, data, sizeof (ifr)); + bcopy(&ifr, data, sizeof(ifr)); goto done; } } @@ -2728,96 +2921,98 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) */ dlil_if_lock(); switch (cmd) { - case SIOCSIFPHYADDR: /* struct {if,in_}aliasreq */ + case SIOCSIFPHYADDR: /* struct {if,in_}aliasreq */ bcopy(((struct in_aliasreq *)(void *)data)->ifra_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; #if INET6 - case SIOCSIFPHYADDR_IN6_32: /* struct in6_aliasreq_32 */ + case SIOCSIFPHYADDR_IN6_32: /* struct in6_aliasreq_32 */ bcopy(((struct in6_aliasreq_32 *)(void *)data)->ifra_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCSIFPHYADDR_IN6_64: /* struct in6_aliasreq_64 */ + case SIOCSIFPHYADDR_IN6_64: /* struct in6_aliasreq_64 */ bcopy(((struct in6_aliasreq_64 *)(void *)data)->ifra_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; #endif /* INET6 */ - case SIOCGIFSTATUS: /* struct ifstat */ - ifs = _MALLOC(sizeof (*ifs), M_DEVBUF, M_WAITOK); + case SIOCGIFSTATUS: /* struct ifstat */ + ifs = _MALLOC(sizeof(*ifs), M_DEVBUF, M_WAITOK); if (ifs == NULL) { error = ENOMEM; dlil_if_unlock(); goto done; } - bcopy(data, ifs, sizeof (*ifs)); + bcopy(data, ifs, sizeof(*ifs)); ifs->ifs_name[IFNAMSIZ - 1] = '\0'; bcopy(ifs->ifs_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCGIFMEDIA32: /* struct ifmediareq32 */ + case SIOCGIFMEDIA32: /* struct ifmediareq32 */ + case SIOCGIFXMEDIA32: /* struct ifmediareq32 */ bcopy(((struct ifmediareq32 *)(void *)data)->ifm_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCGIFMEDIA64: /* struct ifmediareq64 */ + case SIOCGIFMEDIA64: /* struct ifmediareq64 */ + case SIOCGIFXMEDIA64: /* struct ifmediareq64 */ bcopy(((struct ifmediareq64 *)(void *)data)->ifm_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCSIFDESC: /* struct if_descreq */ - case SIOCGIFDESC: /* struct if_descreq */ + case SIOCSIFDESC: /* struct if_descreq */ + case SIOCGIFDESC: /* struct if_descreq */ bcopy(((struct if_descreq *)(void *)data)->ifdr_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCSIFLINKPARAMS: /* struct if_linkparamsreq */ - case SIOCGIFLINKPARAMS: /* struct if_linkparamsreq */ + case SIOCSIFLINKPARAMS: /* struct if_linkparamsreq */ + case SIOCGIFLINKPARAMS: /* struct if_linkparamsreq */ bcopy(((struct if_linkparamsreq *)(void *)data)->iflpr_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCGIFQUEUESTATS: /* struct if_qstatsreq */ + case SIOCGIFQUEUESTATS: /* struct if_qstatsreq */ bcopy(((struct if_qstatsreq *)(void *)data)->ifqr_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCSIFTHROTTLE: /* struct if_throttlereq */ - case SIOCGIFTHROTTLE: /* struct if_throttlereq */ + case SIOCSIFTHROTTLE: /* struct if_throttlereq */ + case SIOCGIFTHROTTLE: /* struct if_throttlereq */ bcopy(((struct if_throttlereq *)(void *)data)->ifthr_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCAIFAGENTID: /* struct if_agentidreq */ - case SIOCDIFAGENTID: /* struct if_agentidreq */ - case SIOCGIFAGENTIDS32: /* struct if_agentidsreq32 */ - case SIOCGIFAGENTIDS64: /* struct if_agentidsreq64 */ + case SIOCAIFAGENTID: /* struct if_agentidreq */ + case SIOCDIFAGENTID: /* struct if_agentidreq */ + case SIOCGIFAGENTIDS32: /* struct if_agentidsreq32 */ + case SIOCGIFAGENTIDS64: /* struct if_agentidsreq64 */ bcopy(((struct if_agentidreq *)(void *)data)->ifar_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCSIFNETSIGNATURE: /* struct if_nsreq */ - case SIOCGIFNETSIGNATURE: /* struct if_nsreq */ + case SIOCSIFNETSIGNATURE: /* struct if_nsreq */ + case SIOCGIFNETSIGNATURE: /* struct if_nsreq */ bcopy(((struct if_nsreq *)(void *)data)->ifnsr_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); break; - case SIOCGIFPROTOLIST32: /* struct if_protolistreq32 */ - case SIOCGIFPROTOLIST64: /* struct if_protolistreq64 */ + case SIOCGIFPROTOLIST32: /* struct if_protolistreq32 */ + case SIOCGIFPROTOLIST64: /* struct if_protolistreq64 */ bcopy(((struct if_protolistreq *)(void *)data)->ifpl_name, ifname, IFNAMSIZ); ifp = ifunit(ifname); @@ -2844,80 +3039,84 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) goto done; } switch (cmd) { - case SIOCSIFPHYADDR: /* struct {if,in_}aliasreq */ + case SIOCSIFPHYADDR: /* struct {if,in_}aliasreq */ #if INET6 - case SIOCSIFPHYADDR_IN6_32: /* struct in6_aliasreq_32 */ - case SIOCSIFPHYADDR_IN6_64: /* struct in6_aliasreq_64 */ + case SIOCSIFPHYADDR_IN6_32: /* struct in6_aliasreq_32 */ + case SIOCSIFPHYADDR_IN6_64: /* struct in6_aliasreq_64 */ #endif /* INET6 */ error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, data); - if (error != 0) + if (error != 0) { break; + } ifnet_touch_lastchange(ifp); break; - case SIOCGIFSTATUS: /* struct ifstat */ + case SIOCGIFSTATUS: /* struct ifstat */ VERIFY(ifs != NULL); ifs->ascii[0] = '\0'; error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, (caddr_t)ifs); - bcopy(ifs, data, sizeof (*ifs)); + bcopy(ifs, data, sizeof(*ifs)); break; - case SIOCGIFMEDIA32: /* struct ifmediareq32 */ - case SIOCGIFMEDIA64: /* struct ifmediareq64 */ - error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, data); + case SIOCGIFMEDIA32: /* struct ifmediareq32 */ + case SIOCGIFMEDIA64: /* struct ifmediareq64 */ + case SIOCGIFXMEDIA32: /* struct ifmediareq32 */ + case SIOCGIFXMEDIA64: /* struct ifmediareq64 */ + error = ifioctl_get_media(ifp, so, cmd, data); break; - case SIOCSIFDESC: /* struct if_descreq */ - case SIOCGIFDESC: /* struct if_descreq */ + case SIOCSIFDESC: /* struct if_descreq */ + case SIOCGIFDESC: /* struct if_descreq */ error = ifioctl_ifdesc(ifp, cmd, data, p); break; - case SIOCSIFLINKPARAMS: /* struct if_linkparamsreq */ - case SIOCGIFLINKPARAMS: /* struct if_linkparamsreq */ + case SIOCSIFLINKPARAMS: /* struct if_linkparamsreq */ + case SIOCGIFLINKPARAMS: /* struct if_linkparamsreq */ error = ifioctl_linkparams(ifp, cmd, data, p); break; - case SIOCGIFQUEUESTATS: /* struct if_qstatsreq */ + case SIOCGIFQUEUESTATS: /* struct if_qstatsreq */ error = ifioctl_qstats(ifp, cmd, data); break; - case SIOCSIFTHROTTLE: /* struct if_throttlereq */ - case SIOCGIFTHROTTLE: /* struct if_throttlereq */ + case SIOCSIFTHROTTLE: /* struct if_throttlereq */ + case SIOCGIFTHROTTLE: /* struct if_throttlereq */ error = ifioctl_throttle(ifp, cmd, data, p); break; - case SIOCAIFAGENTID: /* struct if_agentidreq */ - case SIOCDIFAGENTID: /* struct if_agentidreq */ - case SIOCGIFAGENTIDS32: /* struct if_agentidsreq32 */ - case SIOCGIFAGENTIDS64: /* struct if_agentidsreq64 */ + case SIOCAIFAGENTID: /* struct if_agentidreq */ + case SIOCDIFAGENTID: /* struct if_agentidreq */ + case SIOCGIFAGENTIDS32: /* struct if_agentidsreq32 */ + case SIOCGIFAGENTIDS64: /* struct if_agentidsreq64 */ error = ifioctl_netagent(ifp, cmd, data, p); break; - case SIOCSIFNETSIGNATURE: /* struct if_nsreq */ - case SIOCGIFNETSIGNATURE: /* struct if_nsreq */ + case SIOCSIFNETSIGNATURE: /* struct if_nsreq */ + case SIOCGIFNETSIGNATURE: /* struct if_nsreq */ error = ifioctl_netsignature(ifp, cmd, data); break; #if INET6 - case SIOCSIFNAT64PREFIX: /* struct if_nat64req */ - case SIOCGIFNAT64PREFIX: /* struct if_nat64req */ + case SIOCSIFNAT64PREFIX: /* struct if_nat64req */ + case SIOCGIFNAT64PREFIX: /* struct if_nat64req */ error = ifioctl_nat64prefix(ifp, cmd, data); break; - case SIOCGIFCLAT46ADDR: /* struct if_clat46req */ + case SIOCGIFCLAT46ADDR: /* struct if_clat46req */ error = ifioctl_clat46addr(ifp, cmd, data); break; #endif - case SIOCGIFPROTOLIST32: /* struct if_protolistreq32 */ - case SIOCGIFPROTOLIST64: /* struct if_protolistreq64 */ + case SIOCGIFPROTOLIST32: /* struct if_protolistreq32 */ + case SIOCGIFPROTOLIST64: /* struct if_protolistreq64 */ error = ifioctl_protolist(ifp, cmd, data); break; @@ -2932,23 +3131,28 @@ ifioctl(struct socket *so, u_long cmd, caddr_t data, struct proc *p) data, ifp, p)); socket_unlock(so, 1); - if (error == EOPNOTSUPP || error == ENOTSUP) { + // Don't allow to call SIOCAIFADDR and SIOCDIFADDR with + // ifreq as the code expects ifaddr + if ((error == EOPNOTSUPP || error == ENOTSUP) && + !(cmd == SIOCAIFADDR || cmd == SIOCDIFADDR)) { error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, data); } break; } done: - if (ifs != NULL) + if (ifs != NULL) { _FREE(ifs, M_DEVBUF); + } if (if_verbose) { - if (ifname[0] == '\0') - (void) snprintf(ifname, sizeof (ifname), "%s", + if (ifname[0] == '\0') { + (void) snprintf(ifname, sizeof(ifname), "%s", "NULL"); - else if (ifp != NULL) - (void) snprintf(ifname, sizeof (ifname), "%s", + } else if (ifp != NULL) { + (void) snprintf(ifname, sizeof(ifname), "%s", if_name(ifp)); + } if (error != 0) { printf("%s[%s,%d]: ifp %s cmd 0x%08lx (%c%c [%lu] " @@ -2967,7 +3171,7 @@ done: } } - return (error); + return error; } static __attribute__((noinline)) int @@ -2979,22 +3183,24 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) struct kev_msg ev_msg; struct net_event_data ev_data; - bzero(&ev_data, sizeof (struct net_event_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&ev_data, sizeof(struct net_event_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); switch (cmd) { case SIOCIFCREATE: case SIOCIFCREATE2: error = proc_suser(p); - if (error) - return (error); - return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), - cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); + if (error) { + return error; + } + return if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), + cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL); case SIOCIFDESTROY: error = proc_suser(p); - if (error) - return (error); - return (if_clone_destroy(ifr->ifr_name)); + if (error) { + return error; + } + return if_clone_destroy(ifr->ifr_name); } /* @@ -3005,8 +3211,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) ifp = ifunit(ifr->ifr_name); dlil_if_unlock(); - if (ifp == NULL) - return (ENXIO); + if (ifp == NULL) { + return ENXIO; + } switch (cmd) { case SIOCGIFFLAGS: @@ -3058,8 +3265,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCSIFFLAGS: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } (void) ifnet_set_flags(ifp, ifr->ifr_flags, (u_int16_t)~IFF_CANTCHANGE); @@ -3081,8 +3289,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCSIFCAP: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } if ((ifr->ifr_reqcap & ~ifp->if_capabilities)) { error = EINVAL; @@ -3095,8 +3304,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCSIFMETRIC: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } ifp->if_metric = ifr->ifr_metric; @@ -3119,12 +3329,14 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCSIFPHYS: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, (caddr_t)ifr); - if (error != 0) + if (error != 0) { break; + } ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; @@ -3147,8 +3359,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) struct ifclassq *ifq = &ifp->if_snd; error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } if (ifp->if_ioctl == NULL) { error = EOPNOTSUPP; @@ -3159,8 +3372,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) break; } error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, (caddr_t)ifr); - if (error != 0) + if (error != 0) { break; + } ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; @@ -3199,8 +3413,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCADDMULTI: case SIOCDELMULTI: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } /* Don't allow group membership on non-multicast interfaces. */ if ((ifp->if_flags & IFF_MULTICAST) == 0) { @@ -3233,8 +3448,9 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) error = if_delmulti_anon(ifp, &ifr->ifr_addr); ev_msg.event_code = KEV_DL_DELMULTI; } - if (error != 0) + if (error != 0) { break; + } ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; @@ -3251,20 +3467,43 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) ifnet_touch_lastchange(ifp); break; - case SIOCDIFPHYADDR: case SIOCSIFMEDIA: + error = proc_suser(p); + if (error != 0) { + break; + } + /* + * Silently ignore setting IFM_OTHER + */ + if (ifr->ifr_media == IFM_OTHER) { + os_log_info(OS_LOG_DEFAULT, + "%s: %s SIOCSIFMEDIA ignore IFM_OTHER", + __func__, ifp->if_xname); + error = 0; + break; + } + error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, (caddr_t)ifr); + if (error != 0) { + break; + } + ifnet_touch_lastchange(ifp); + break; + + case SIOCDIFPHYADDR: case SIOCSIFGENERIC: case SIOCSIFLLADDR: case SIOCSIFALTMTU: case SIOCSIFVLAN: case SIOCSIFBOND: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, (caddr_t)ifr); - if (error != 0) + if (error != 0) { break; + } ifnet_touch_lastchange(ifp); break; @@ -3277,12 +3516,12 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) break; } /* If larger than 14-bytes we'll need another mechanism */ - if (sdl->sdl_alen > sizeof (ifr->ifr_addr.sa_data)) { + if (sdl->sdl_alen > sizeof(ifr->ifr_addr.sa_data)) { error = EMSGSIZE; break; } /* Follow the same convention used by SIOCSIFLLADDR */ - bzero(&ifr->ifr_addr, sizeof (ifr->ifr_addr)); + bzero(&ifr->ifr_addr, sizeof(ifr->ifr_addr)); ifr->ifr_addr.sa_family = AF_LINK; ifr->ifr_addr.sa_len = sdl->sdl_alen; error = ifnet_guarded_lladdr_copy_bytes(ifp, @@ -3356,10 +3595,11 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCGIFEXPENSIVE: ifnet_lock_shared(ifp); - if (ifp->if_eflags & IFEF_EXPENSIVE) + if (ifp->if_eflags & IFEF_EXPENSIVE) { ifr->ifr_expensive = 1; - else + } else { ifr->ifr_expensive = 0; + } ifnet_lock_done(ifp); break; @@ -3368,13 +3608,15 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) struct ifnet *difp; if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } ifnet_lock_exclusive(ifp); - if (ifr->ifr_expensive) + if (ifr->ifr_expensive) { ifp->if_eflags |= IFEF_EXPENSIVE; - else + } else { ifp->if_eflags &= ~IFEF_EXPENSIVE; + } ifnet_lock_done(ifp); /* * Update the expensive bit in the delegated interface @@ -3386,7 +3628,6 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) if (difp->if_delegated.ifp == ifp) { difp->if_delegated.expensive = ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0; - } ifnet_lock_done(difp); } @@ -3396,22 +3637,25 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCGIF2KCL: ifnet_lock_shared(ifp); - if (ifp->if_eflags & IFEF_2KCL) + if (ifp->if_eflags & IFEF_2KCL) { ifr->ifr_2kcl = 1; - else + } else { ifr->ifr_2kcl = 0; + } ifnet_lock_done(ifp); break; case SIOCSIF2KCL: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } ifnet_lock_exclusive(ifp); - if (ifr->ifr_2kcl) + if (ifr->ifr_2kcl) { ifp->if_eflags |= IFEF_2KCL; - else + } else { ifp->if_eflags &= ~IFEF_2KCL; + } ifnet_lock_done(ifp); break; case SIOCGSTARTDELAY: @@ -3447,17 +3691,18 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) ifr->ifr_addr.sa_len = 16; } #else - if (ifr->ifr_addr.sa_len == 0) + if (ifr->ifr_addr.sa_len == 0) { ifr->ifr_addr.sa_len = 16; + } #endif } else if (cmd == OSIOCGIFADDR) { - cmd = SIOCGIFADDR; /* struct ifreq */ + cmd = SIOCGIFADDR; /* struct ifreq */ } else if (cmd == OSIOCGIFDSTADDR) { - cmd = SIOCGIFDSTADDR; /* struct ifreq */ + cmd = SIOCGIFDSTADDR; /* struct ifreq */ } else if (cmd == OSIOCGIFBRDADDR) { - cmd = SIOCGIFBRDADDR; /* struct ifreq */ + cmd = SIOCGIFBRDADDR; /* struct ifreq */ } else if (cmd == OSIOCGIFNETMASK) { - cmd = SIOCGIFNETMASK; /* struct ifreq */ + cmd = SIOCGIFNETMASK; /* struct ifreq */ } socket_lock(so, 1); @@ -3471,16 +3716,19 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case OSIOCGIFBRDADDR: case OSIOCGIFNETMASK: bcopy(&ifr->ifr_addr.sa_family, &ifr->ifr_addr, - sizeof (u_short)); + sizeof(u_short)); } if (cmd == SIOCSIFKPI) { int temperr = proc_suser(p); - if (temperr != 0) + if (temperr != 0) { error = temperr; + } } - - if (error == EOPNOTSUPP || error == ENOTSUP) { + // Don't allow to call SIOCSIFADDR and SIOCSIFDSTADDR + // with ifreq as the code expects ifaddr + if ((error == EOPNOTSUPP || error == ENOTSUP) && + !(cmd == SIOCSIFADDR || cmd == SIOCSIFDSTADDR)) { error = ifnet_ioctl(ifp, SOCK_DOM(so), cmd, (caddr_t)ifr); } @@ -3492,58 +3740,66 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) break; case SIOCSIFINTERFACESTATE: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } error = if_state_update(ifp, &ifr->ifr_interface_state); break; case SIOCSIFPROBECONNECTIVITY: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } error = if_probe_connectivity(ifp, ifr->ifr_probe_connectivity); break; case SIOCGIFPROBECONNECTIVITY: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); - if (ifp->if_eflags & IFEF_PROBE_CONNECTIVITY) + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } + if (ifp->if_eflags & IFEF_PROBE_CONNECTIVITY) { ifr->ifr_probe_connectivity = 1; - else + } else { ifr->ifr_probe_connectivity = 0; + } break; case SIOCGECNMODE: - if ((ifp->if_eflags & (IFEF_ECN_ENABLE|IFEF_ECN_DISABLE)) == - IFEF_ECN_ENABLE) + if ((ifp->if_eflags & (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE)) == + IFEF_ECN_ENABLE) { ifr->ifr_ecn_mode = IFRTYPE_ECN_ENABLE; - else if ((ifp->if_eflags & (IFEF_ECN_ENABLE|IFEF_ECN_DISABLE)) == - IFEF_ECN_DISABLE) + } else if ((ifp->if_eflags & (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE)) == + IFEF_ECN_DISABLE) { ifr->ifr_ecn_mode = IFRTYPE_ECN_DISABLE; - else + } else { ifr->ifr_ecn_mode = IFRTYPE_ECN_DEFAULT; + } break; case SIOCSECNMODE: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } if (ifr->ifr_ecn_mode == IFRTYPE_ECN_DEFAULT) { - ifp->if_eflags &= ~(IFEF_ECN_ENABLE|IFEF_ECN_DISABLE); + ifp->if_eflags &= ~(IFEF_ECN_ENABLE | IFEF_ECN_DISABLE); } else if (ifr->ifr_ecn_mode == IFRTYPE_ECN_ENABLE) { ifp->if_eflags |= IFEF_ECN_ENABLE; ifp->if_eflags &= ~IFEF_ECN_DISABLE; } else if (ifr->ifr_ecn_mode == IFRTYPE_ECN_DISABLE) { ifp->if_eflags |= IFEF_ECN_DISABLE; ifp->if_eflags &= ~IFEF_ECN_ENABLE; - } else + } else { error = EINVAL; + } break; case SIOCSIFTIMESTAMPENABLE: case SIOCSIFTIMESTAMPDISABLE: error = proc_suser(p); - if (error != 0) + if (error != 0) { break; + } ifnet_lock_exclusive(ifp); if ((cmd == SIOCSIFTIMESTAMPENABLE && @@ -3553,10 +3809,11 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) ifnet_lock_done(ifp); break; } - if (cmd == SIOCSIFTIMESTAMPENABLE) + if (cmd == SIOCSIFTIMESTAMPENABLE) { ifp->if_xflags |= IFXF_TIMESTAMP_ENABLED; - else + } else { ifp->if_xflags &= ~IFXF_TIMESTAMP_ENABLED; + } ifnet_lock_done(ifp); /* * Pass the setting to the interface if it supports either @@ -3569,16 +3826,18 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) } break; case SIOCGIFTIMESTAMPENABLED: { - if ((ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0) + if ((ifp->if_xflags & IFXF_TIMESTAMP_ENABLED) != 0) { ifr->ifr_intval = 1; - else + } else { ifr->ifr_intval = 0; + } break; } case SIOCSQOSMARKINGMODE: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } error = if_set_qosmarking_mode(ifp, ifr->ifr_qosmarking_mode); break; @@ -3588,17 +3847,19 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) case SIOCSQOSMARKINGENABLED: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); - if (ifr->ifr_qosmarking_enabled != 0) + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } + if (ifr->ifr_qosmarking_enabled != 0) { ifp->if_eflags |= IFEF_QOSMARKING_ENABLED; - else + } else { ifp->if_eflags &= ~IFEF_QOSMARKING_ENABLED; + } break; case SIOCGQOSMARKINGENABLED: ifr->ifr_qosmarking_enabled = - (ifp->if_eflags & IFEF_QOSMARKING_ENABLED) ? 1 : 0; + (ifp->if_eflags & IFEF_QOSMARKING_ENABLED) ? 1 : 0; break; case SIOCSIFDISABLEOUTPUT: @@ -3616,29 +3877,34 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) break; case SIOCSIFLOWINTERNET: if ((error = priv_check_cred(kauth_cred_get(), - PRIV_NET_INTERFACE_CONTROL, 0)) != 0) - return (error); + PRIV_NET_INTERFACE_CONTROL, 0)) != 0) { + return error; + } ifnet_lock_exclusive(ifp); - if (ifr->ifr_low_internet & IFRTYPE_LOW_INTERNET_ENABLE_UL) + if (ifr->ifr_low_internet & IFRTYPE_LOW_INTERNET_ENABLE_UL) { ifp->if_xflags |= IFXF_LOW_INTERNET_UL; - else + } else { ifp->if_xflags &= ~(IFXF_LOW_INTERNET_UL); - if (ifr->ifr_low_internet & IFRTYPE_LOW_INTERNET_ENABLE_DL) + } + if (ifr->ifr_low_internet & IFRTYPE_LOW_INTERNET_ENABLE_DL) { ifp->if_xflags |= IFXF_LOW_INTERNET_DL; - else + } else { ifp->if_xflags &= ~(IFXF_LOW_INTERNET_DL); + } ifnet_lock_done(ifp); break; case SIOCGIFLOWINTERNET: ifnet_lock_shared(ifp); ifr->ifr_low_internet = 0; - if (ifp->if_xflags & IFXF_LOW_INTERNET_UL) + if (ifp->if_xflags & IFXF_LOW_INTERNET_UL) { ifr->ifr_low_internet |= IFRTYPE_LOW_INTERNET_ENABLE_UL; - if (ifp->if_xflags & IFXF_LOW_INTERNET_DL) + } + if (ifp->if_xflags & IFXF_LOW_INTERNET_DL) { ifr->ifr_low_internet |= IFRTYPE_LOW_INTERNET_ENABLE_DL; + } ifnet_lock_done(ifp); break; case SIOCGIFLOWPOWER: @@ -3657,7 +3923,7 @@ ifioctl_ifreq(struct socket *so, u_long cmd, struct ifreq *ifr, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } int @@ -3668,7 +3934,7 @@ ifioctllocked(struct socket *so, u_long cmd, caddr_t data, struct proc *p) socket_unlock(so, 0); error = ifioctl(so, cmd, data, p); socket_lock(so, 0); - return (error); + return error; } /* @@ -3679,7 +3945,7 @@ ifioctllocked(struct socket *so, u_long cmd, caddr_t data, struct proc *p) */ errno_t ifnet_set_promiscuous( - ifnet_t ifp, + ifnet_t ifp, int pswitch) { int error = 0; @@ -3690,10 +3956,11 @@ ifnet_set_promiscuous( oldflags = ifp->if_flags; ifp->if_pcount += pswitch ? 1 : -1; - if (ifp->if_pcount > 0) + if (ifp->if_pcount > 0) { ifp->if_flags |= IFF_PROMISC; - else + } else { ifp->if_flags &= ~IFF_PROMISC; + } newflags = ifp->if_flags; ifnet_lock_done(ifp); @@ -3706,10 +3973,11 @@ ifnet_set_promiscuous( ifnet_lock_exclusive(ifp); // revert the flags ifp->if_pcount -= pswitch ? 1 : -1; - if (ifp->if_pcount > 0) - ifp->if_flags |= IFF_PROMISC; - else - ifp->if_flags &= ~IFF_PROMISC; + if (ifp->if_pcount > 0) { + ifp->if_flags |= IFF_PROMISC; + } else { + ifp->if_flags &= ~IFF_PROMISC; + } ifnet_lock_done(ifp); } } @@ -3720,7 +3988,7 @@ ifnet_set_promiscuous( (newflags & IFF_PROMISC) != 0 ? "enable" : "disable", error != 0 ? " failed" : " succeeded"); } - return (error); + return error; } /* @@ -3746,18 +4014,18 @@ ifconf(u_long cmd, user_addr_t ifrp, int *ret_space) * Zero the ifr buffer to make sure we don't * disclose the contents of the stack. */ - bzero(&ifr, sizeof (struct ifreq)); + bzero(&ifr, sizeof(struct ifreq)); space = *ret_space; ifnet_head_lock_shared(); - for (ifp = ifnet_head.tqh_first; space > sizeof (ifr) && + for (ifp = ifnet_head.tqh_first; space > sizeof(ifr) && ifp; ifp = ifp->if_link.tqe_next) { char workbuf[64]; size_t ifnlen, addrs; - ifnlen = snprintf(workbuf, sizeof (workbuf), + ifnlen = snprintf(workbuf, sizeof(workbuf), "%s", if_name(ifp)); - if (ifnlen + 1 > sizeof (ifr.ifr_name)) { + if (ifnlen + 1 > sizeof(ifr.ifr_name)) { error = ENAMETOOLONG; break; } else { @@ -3768,7 +4036,7 @@ ifconf(u_long cmd, user_addr_t ifrp, int *ret_space) addrs = 0; ifa = ifp->if_addrhead.tqh_first; - for (; space > sizeof (ifr) && ifa; + for (; space > sizeof(ifr) && ifa; ifa = ifa->ifa_link.tqe_next) { struct sockaddr *sa; union { @@ -3781,7 +4049,7 @@ ifconf(u_long cmd, user_addr_t ifrp, int *ret_space) * Make sure to accomodate the largest possible * size of SA(if_lladdr)->sa_len. */ - _CASSERT(sizeof (u) == (SOCK_MAXADDRLEN + 1)); + _CASSERT(sizeof(u) == (SOCK_MAXADDRLEN + 1)); IFA_LOCK(ifa); sa = ifa->ifa_addr; @@ -3803,22 +4071,22 @@ ifconf(u_long cmd, user_addr_t ifrp, int *ret_space) ifr.ifr_addr = *sa; osa->sa_family = sa->sa_family; error = copyout((caddr_t)&ifr, ifrp, - sizeof (ifr)); - ifrp += sizeof (struct ifreq); - } else if (sa->sa_len <= sizeof (*sa)) { + sizeof(ifr)); + ifrp += sizeof(struct ifreq); + } else if (sa->sa_len <= sizeof(*sa)) { ifr.ifr_addr = *sa; error = copyout((caddr_t)&ifr, ifrp, - sizeof (ifr)); - ifrp += sizeof (struct ifreq); + sizeof(ifr)); + ifrp += sizeof(struct ifreq); } else { if (space < - sizeof (ifr) + sa->sa_len - sizeof (*sa)) { + sizeof(ifr) + sa->sa_len - sizeof(*sa)) { IFA_UNLOCK(ifa); break; } - space -= sa->sa_len - sizeof (*sa); + space -= sa->sa_len - sizeof(*sa); error = copyout((caddr_t)&ifr, ifrp, - sizeof (ifr.ifr_name)); + sizeof(ifr.ifr_name)); if (error == 0) { error = copyout((caddr_t)sa, (ifrp + offsetof(struct ifreq, ifr_addr)), @@ -3828,27 +4096,30 @@ ifconf(u_long cmd, user_addr_t ifrp, int *ret_space) ifr_addr)); } IFA_UNLOCK(ifa); - if (error) + if (error) { break; - space -= sizeof (ifr); + } + space -= sizeof(ifr); } ifnet_lock_done(ifp); - if (error) + if (error) { break; + } if (!addrs) { - bzero((caddr_t)&ifr.ifr_addr, sizeof (ifr.ifr_addr)); - error = copyout((caddr_t)&ifr, ifrp, sizeof (ifr)); - if (error) + bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); + error = copyout((caddr_t)&ifr, ifrp, sizeof(ifr)); + if (error) { break; - space -= sizeof (ifr); - ifrp += sizeof (struct ifreq); + } + space -= sizeof(ifr); + ifrp += sizeof(struct ifreq); } } ifnet_head_done(); *ret_space -= space; net_thread_marks_pop(marks); - return (error); + return error; } /* @@ -3858,7 +4129,7 @@ int if_allmulti(struct ifnet *ifp, int onswitch) { int error = 0; - int modified = 0; + int modified = 0; ifnet_lock_exclusive(ifp); @@ -3878,12 +4149,14 @@ if_allmulti(struct ifnet *ifp, int onswitch) } ifnet_lock_done(ifp); - if (modified) + if (modified) { error = ifnet_ioctl(ifp, 0, SIOCSIFFLAGS, NULL); + } - if (error == 0) + if (error == 0) { rt_ifmsg(ifp); - return (error); + } + return error; } static struct ifmultiaddr * @@ -3903,7 +4176,7 @@ ifma_alloc(int how) ifma->ifma_trace = ifma_trace; } } - return (ifma); + return ifma; } static void @@ -3987,10 +4260,11 @@ ifma_trace(struct ifmultiaddr *ifma, int refhold) void ifma_addref(struct ifmultiaddr *ifma, int locked) { - if (!locked) + if (!locked) { IFMA_LOCK(ifma); - else + } else { IFMA_LOCK_ASSERT_HELD(ifma); + } if (++ifma->ifma_refcount == 0) { panic("%s: ifma=%p wraparound refcnt", __func__, ifma); @@ -3998,8 +4272,9 @@ ifma_addref(struct ifmultiaddr *ifma, int locked) } else if (ifma->ifma_trace != NULL) { (*ifma->ifma_trace)(ifma, TRUE); } - if (!locked) + if (!locked) { IFMA_UNLOCK(ifma); + } } void @@ -4026,10 +4301,11 @@ ifma_remref(struct ifmultiaddr *ifma) ifma->ifma_ifp = NULL; ifma->ifma_ll = NULL; IFMA_UNLOCK(ifma); - ifma_free(ifma); /* deallocate it */ + ifma_free(ifma); /* deallocate it */ - if (ll != NULL) + if (ll != NULL) { IFMA_REMREF(ll); + } } static void @@ -4091,14 +4367,16 @@ if_detach_ifma(struct ifnet *ifp, struct ifmultiaddr *ifma, int anon) if (anon) { --ifma->ifma_anoncnt; - if (ifma->ifma_anoncnt > 0) - return (0); + if (ifma->ifma_anoncnt > 0) { + return 0; + } ifma->ifma_flags &= ~IFMAF_ANONYMOUS; } --ifma->ifma_reqcnt; - if (ifma->ifma_reqcnt > 0) - return (0); + if (ifma->ifma_reqcnt > 0) { + return 0; + } if (ifma->ifma_protospec != NULL) { panic("%s: Protospec not NULL for ifma=%p", __func__, ifma); @@ -4122,8 +4400,9 @@ if_detach_ifma(struct ifnet *ifp, struct ifmultiaddr *ifma, int anon) */ ifma->ifma_debug &= ~IFD_ATTACHED; LIST_REMOVE(ifma, ifma_link); - if (LIST_EMPTY(&ifp->if_multiaddrs)) + if (LIST_EMPTY(&ifp->if_multiaddrs)) { ifp->if_updatemcasts = 0; + } if (ifma->ifma_debug & IFD_DEBUG) { /* Become a regular mutex, just in case */ @@ -4135,7 +4414,7 @@ if_detach_ifma(struct ifnet *ifp, struct ifmultiaddr *ifma, int anon) ifma->ifma_debug |= IFD_TRASHED; } - return (1); + return 1; } /* @@ -4177,9 +4456,9 @@ if_addmulti_doesexist(struct ifnet *ifp, const struct sockaddr *sa, IFMA_ADDREF_LOCKED(ifma); } IFMA_UNLOCK(ifma); - return (0); + return 0; } - return (ENOENT); + return ENOENT; } /* @@ -4188,52 +4467,55 @@ if_addmulti_doesexist(struct ifnet *ifp, const struct sockaddr *sa, static struct sockaddr * copy_and_normalize(const struct sockaddr *original) { - int alen = 0; - const u_char *aptr = NULL; - struct sockaddr *copy = NULL; - struct sockaddr_dl *sdl_new = NULL; - int len = 0; + int alen = 0; + const u_char *aptr = NULL; + struct sockaddr *copy = NULL; + struct sockaddr_dl *sdl_new = NULL; + int len = 0; if (original->sa_family != AF_LINK && original->sa_family != AF_UNSPEC) { /* Just make a copy */ MALLOC(copy, struct sockaddr *, original->sa_len, M_IFADDR, M_WAITOK); - if (copy != NULL) + if (copy != NULL) { bcopy(original, copy, original->sa_len); - return (copy); + } + return copy; } switch (original->sa_family) { - case AF_LINK: { - const struct sockaddr_dl *sdl_original = - (struct sockaddr_dl *)(uintptr_t)(size_t)original; - - if (sdl_original->sdl_nlen + sdl_original->sdl_alen + - sdl_original->sdl_slen + - offsetof(struct sockaddr_dl, sdl_data) > - sdl_original->sdl_len) - return (NULL); - - alen = sdl_original->sdl_alen; - aptr = CONST_LLADDR(sdl_original); + case AF_LINK: { + const struct sockaddr_dl *sdl_original = + (struct sockaddr_dl *)(uintptr_t)(size_t)original; + + if (sdl_original->sdl_nlen + sdl_original->sdl_alen + + sdl_original->sdl_slen + + offsetof(struct sockaddr_dl, sdl_data) > + sdl_original->sdl_len) { + return NULL; } - break; - case AF_UNSPEC: { - if (original->sa_len < ETHER_ADDR_LEN + - offsetof(struct sockaddr, sa_data)) { - return (NULL); - } + alen = sdl_original->sdl_alen; + aptr = CONST_LLADDR(sdl_original); + } + break; - alen = ETHER_ADDR_LEN; - aptr = (const u_char *)original->sa_data; + case AF_UNSPEC: { + if (original->sa_len < ETHER_ADDR_LEN + + offsetof(struct sockaddr, sa_data)) { + return NULL; } - break; + + alen = ETHER_ADDR_LEN; + aptr = (const u_char *)original->sa_data; + } + break; } - if (alen == 0 || aptr == NULL) - return (NULL); + if (alen == 0 || aptr == NULL) { + return NULL; + } len = alen + offsetof(struct sockaddr_dl, sdl_data); MALLOC(sdl_new, struct sockaddr_dl *, len, M_IFADDR, M_WAITOK); @@ -4246,7 +4528,7 @@ copy_and_normalize(const struct sockaddr *original) bcopy(aptr, LLADDR(sdl_new), alen); } - return ((struct sockaddr *)sdl_new); + return (struct sockaddr *)sdl_new; } /* @@ -4257,7 +4539,7 @@ int if_addmulti(struct ifnet *ifp, const struct sockaddr *sa, struct ifmultiaddr **retifma) { - return (if_addmulti_common(ifp, sa, retifma, 0)); + return if_addmulti_common(ifp, sa, retifma, 0); } /* @@ -4269,7 +4551,7 @@ int if_addmulti_anon(struct ifnet *ifp, const struct sockaddr *sa, struct ifmultiaddr **retifma) { - return (if_addmulti_common(ifp, sa, retifma, 1)); + return if_addmulti_common(ifp, sa, retifma, 1); } /* @@ -4329,8 +4611,9 @@ if_addmulti_common(struct ifnet *ifp, const struct sockaddr *sa, /* If the address is already present, return a new reference to it */ error = if_addmulti_doesexist(ifp, sa, retifma, anon); ifnet_lock_done(ifp); - if (error == 0) + if (error == 0) { goto cleanup; + } /* * The address isn't already present; give the link layer a chance @@ -4338,7 +4621,7 @@ if_addmulti_common(struct ifnet *ifp, const struct sockaddr *sa, * maps to, if it isn't one already. */ error = dlil_resolve_multi(ifp, sa, (struct sockaddr *)&storage, - sizeof (storage)); + sizeof(storage)); if (error == 0 && storage.ss_len != 0) { llsa = copy_and_normalize((struct sockaddr *)&storage); if (llsa == NULL) { @@ -4354,10 +4637,11 @@ if_addmulti_common(struct ifnet *ifp, const struct sockaddr *sa, } /* to be similar to FreeBSD */ - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = 0; - else if (error != 0) + } else if (error != 0) { goto cleanup; + } /* Allocate while we aren't holding any locks */ if (dupsa == NULL) { @@ -4384,7 +4668,7 @@ if_addmulti_common(struct ifnet *ifp, const struct sockaddr *sa, } if (llifma != NULL) { - VERIFY(!anon); /* must not get here if "anonymous" */ + VERIFY(!anon); /* must not get here if "anonymous" */ if (if_addmulti_doesexist(ifp, llsa, &ifma->ifma_ll, 0) == 0) { FREE(llsa, M_IFADDR); llsa = NULL; @@ -4411,10 +4695,10 @@ if_addmulti_common(struct ifnet *ifp, const struct sockaddr *sa, ifma->ifma_ifp = ifp; IFMA_LOCK(ifma); if_attach_ifma(ifp, ifma, anon); - IFMA_ADDREF_LOCKED(ifma); /* for this routine */ + IFMA_ADDREF_LOCKED(ifma); /* for this routine */ if (retifma != NULL) { *retifma = ifma; - IFMA_ADDREF_LOCKED(*retifma); /* for caller */ + IFMA_ADDREF_LOCKED(*retifma); /* for caller */ } lladdr = (ifma->ifma_addr->sa_family == AF_UNSPEC || ifma->ifma_addr->sa_family == AF_LINK); @@ -4422,32 +4706,38 @@ if_addmulti_common(struct ifnet *ifp, const struct sockaddr *sa, ifnet_lock_done(ifp); rt_newmaddrmsg(RTM_NEWMADDR, ifma); - IFMA_REMREF(ifma); /* for this routine */ + IFMA_REMREF(ifma); /* for this routine */ /* * We are certain we have added something, so call down to the * interface to let them know about it. Do this only for newly- * added AF_LINK/AF_UNSPEC address in the if_multiaddrs set. */ - if (lladdr || ll_firstref) + if (lladdr || ll_firstref) { (void) ifnet_ioctl(ifp, 0, SIOCADDMULTI, NULL); + } - if (ifp->if_updatemcasts > 0) + if (ifp->if_updatemcasts > 0) { ifp->if_updatemcasts = 0; + } - return (0); + return 0; cleanup: - if (ifma != NULL) + if (ifma != NULL) { ifma_free(ifma); - if (dupsa != NULL) + } + if (dupsa != NULL) { FREE(dupsa, M_IFADDR); - if (llifma != NULL) + } + if (llifma != NULL) { ifma_free(llifma); - if (llsa != NULL) + } + if (llsa != NULL) { FREE(llsa, M_IFADDR); + } - return (error); + return error; } /* @@ -4457,7 +4747,7 @@ cleanup: int if_delmulti(struct ifnet *ifp, const struct sockaddr *sa) { - return (if_delmulti_common(NULL, ifp, sa, 0)); + return if_delmulti_common(NULL, ifp, sa, 0); } /* @@ -4467,7 +4757,7 @@ if_delmulti(struct ifnet *ifp, const struct sockaddr *sa) int if_delmulti_ifma(struct ifmultiaddr *ifma) { - return (if_delmulti_common(ifma, NULL, NULL, 0)); + return if_delmulti_common(ifma, NULL, NULL, 0); } /* @@ -4478,7 +4768,7 @@ if_delmulti_ifma(struct ifmultiaddr *ifma) int if_delmulti_anon(struct ifnet *ifp, const struct sockaddr *sa) { - return (if_delmulti_common(NULL, ifp, sa, 1)); + return if_delmulti_common(NULL, ifp, sa, 1); } /* @@ -4490,21 +4780,23 @@ static int if_delmulti_common(struct ifmultiaddr *ifma, struct ifnet *ifp, const struct sockaddr *sa, int anon) { - struct sockaddr *dupsa = NULL; - int lastref, ll_lastref = 0, lladdr; - struct ifmultiaddr *ll = NULL; + struct sockaddr *dupsa = NULL; + int lastref, ll_lastref = 0, lladdr; + struct ifmultiaddr *ll = NULL; /* sanity check for callers */ VERIFY(ifma != NULL || (ifp != NULL && sa != NULL)); - if (ifma != NULL) + if (ifma != NULL) { ifp = ifma->ifma_ifp; + } if (sa != NULL && (sa->sa_family == AF_LINK || sa->sa_family == AF_UNSPEC)) { dupsa = copy_and_normalize(sa); - if (dupsa == NULL) - return (ENOMEM); + if (dupsa == NULL) { + return ENOMEM; + } sa = dupsa; } @@ -4524,16 +4816,17 @@ if_delmulti_common(struct ifmultiaddr *ifma, struct ifnet *ifp, break; } if (ifma == NULL) { - if (dupsa != NULL) + if (dupsa != NULL) { FREE(dupsa, M_IFADDR); + } ifnet_lock_done(ifp); - return (ENOENT); + return ENOENT; } } else { IFMA_LOCK(ifma); } IFMA_LOCK_ASSERT_HELD(ifma); - IFMA_ADDREF_LOCKED(ifma); /* for this routine */ + IFMA_ADDREF_LOCKED(ifma); /* for this routine */ lastref = if_detach_ifma(ifp, ifma, anon); VERIFY(!lastref || (!(ifma->ifma_debug & IFD_ATTACHED) && ifma->ifma_reqcnt == 0)); @@ -4549,8 +4842,9 @@ if_delmulti_common(struct ifmultiaddr *ifma, struct ifnet *ifp, } ifnet_lock_done(ifp); - if (lastref) + if (lastref) { rt_newmaddrmsg(RTM_DELMADDR, ifma); + } if ((ll == NULL && lastref && lladdr) || ll_lastref) { /* @@ -4562,16 +4856,18 @@ if_delmulti_common(struct ifmultiaddr *ifma, struct ifnet *ifp, ifnet_ioctl(ifp, 0, SIOCDELMULTI, NULL); } - if (lastref) - IFMA_REMREF(ifma); /* for if_multiaddrs list */ - if (ll_lastref) - IFMA_REMREF(ll); /* for if_multiaddrs list */ - - IFMA_REMREF(ifma); /* for this routine */ - if (dupsa != NULL) + if (lastref) { + IFMA_REMREF(ifma); /* for if_multiaddrs list */ + } + if (ll_lastref) { + IFMA_REMREF(ll); /* for if_multiaddrs list */ + } + IFMA_REMREF(ifma); /* for this routine */ + if (dupsa != NULL) { FREE(dupsa, M_IFADDR); + } - return (0); + return 0; } /* @@ -4582,8 +4878,8 @@ int if_down_all(void) { struct ifnet **ifp; - u_int32_t count; - u_int32_t i; + u_int32_t count; + u_int32_t i; if (ifnet_list_get_all(IFNET_FAMILY_ANY, &ifp, &count) == 0) { for (i = 0; i < count; i++) { @@ -4593,7 +4889,7 @@ if_down_all(void) ifnet_list_free(ifp); } - return (0); + return 0; } /* @@ -4614,12 +4910,13 @@ if_down_all(void) static int if_rtdel(struct radix_node *rn, void *arg) { - struct rtentry *rt = (struct rtentry *)rn; - struct ifnet *ifp = arg; - int err; + struct rtentry *rt = (struct rtentry *)rn; + struct ifnet *ifp = arg; + int err; - if (rt == NULL) - return (0); + if (rt == NULL) { + return 0; + } /* * Checking against RTF_UP protects against walktree * recursion problems with cloned routes. @@ -4640,7 +4937,7 @@ if_rtdel(struct radix_node *rn, void *arg) } else { RT_UNLOCK(rt); } - return (0); + return 0; } /* @@ -4653,7 +4950,7 @@ if_rtproto_del(struct ifnet *ifp, int protocol) struct radix_node_head *rnh; if ((protocol <= AF_MAX) && (protocol >= 0) && - ((rnh = rt_tables[protocol]) != NULL) && (ifp != NULL)) { + ((rnh = rt_tables[protocol]) != NULL) && (ifp != NULL)) { lck_mtx_lock(rnh_lock); (void) rnh->rnh_walktree(rnh, if_rtdel, ifp); lck_mtx_unlock(rnh_lock); @@ -4685,7 +4982,7 @@ if_rtmtu(struct radix_node *rn, void *arg) } RT_UNLOCK(rt); - return (0); + return 0; } /* @@ -4700,8 +4997,9 @@ if_rtmtu_update(struct ifnet *ifp) int p; for (p = 0; p < AF_MAX + 1; p++) { - if ((rnh = rt_tables[p]) == NULL) + if ((rnh = rt_tables[p]) == NULL) { continue; + } lck_mtx_lock(rnh_lock); (void) rnh->rnh_walktree(rnh, if_rtmtu, ifp); @@ -4715,12 +5013,12 @@ if_data_internal_to_if_data(struct ifnet *ifp, const struct if_data_internal *if_data_int, struct if_data *if_data) { #pragma unused(ifp) -#define COPYFIELD(fld) if_data->fld = if_data_int->fld -#define COPYFIELD32(fld) if_data->fld = (u_int32_t)(if_data_int->fld) +#define COPYFIELD(fld) if_data->fld = if_data_int->fld +#define COPYFIELD32(fld) if_data->fld = (u_int32_t)(if_data_int->fld) /* compiler will cast down to 32-bit */ -#define COPYFIELD32_ATOMIC(fld) do { \ - atomic_get_64(if_data->fld, \ - (u_int64_t *)(void *)(uintptr_t)&if_data_int->fld); \ +#define COPYFIELD32_ATOMIC(fld) do { \ + atomic_get_64(if_data->fld, \ + (u_int64_t *)(void *)(uintptr_t)&if_data_int->fld); \ } while (0) COPYFIELD(ifi_type); @@ -4774,10 +5072,10 @@ if_data_internal_to_if_data64(struct ifnet *ifp, struct if_data64 *if_data64) { #pragma unused(ifp) -#define COPYFIELD64(fld) if_data64->fld = if_data_int->fld -#define COPYFIELD64_ATOMIC(fld) do { \ - atomic_get_64(if_data64->fld, \ - (u_int64_t *)(void *)(uintptr_t)&if_data_int->fld); \ +#define COPYFIELD64(fld) if_data64->fld = if_data_int->fld +#define COPYFIELD64_ATOMIC(fld) do { \ + atomic_get_64(if_data64->fld, \ + (u_int64_t *)(void *)(uintptr_t)&if_data_int->fld); \ } while (0) COPYFIELD64(ifi_type); @@ -4823,12 +5121,12 @@ __private_extern__ void if_copy_traffic_class(struct ifnet *ifp, struct if_traffic_class *if_tc) { -#define COPY_IF_TC_FIELD64_ATOMIC(fld) do { \ - atomic_get_64(if_tc->fld, \ - (u_int64_t *)(void *)(uintptr_t)&ifp->if_tc.fld); \ +#define COPY_IF_TC_FIELD64_ATOMIC(fld) do { \ + atomic_get_64(if_tc->fld, \ + (u_int64_t *)(void *)(uintptr_t)&ifp->if_tc.fld); \ } while (0) - bzero(if_tc, sizeof (*if_tc)); + bzero(if_tc, sizeof(*if_tc)); COPY_IF_TC_FIELD64_ATOMIC(ifi_ibepackets); COPY_IF_TC_FIELD64_ATOMIC(ifi_ibebytes); COPY_IF_TC_FIELD64_ATOMIC(ifi_obepackets); @@ -4856,12 +5154,12 @@ if_copy_traffic_class(struct ifnet *ifp, void if_copy_data_extended(struct ifnet *ifp, struct if_data_extended *if_de) { -#define COPY_IF_DE_FIELD64_ATOMIC(fld) do { \ - atomic_get_64(if_de->fld, \ - (u_int64_t *)(void *)(uintptr_t)&ifp->if_data.fld); \ +#define COPY_IF_DE_FIELD64_ATOMIC(fld) do { \ + atomic_get_64(if_de->fld, \ + (u_int64_t *)(void *)(uintptr_t)&ifp->if_data.fld); \ } while (0) - bzero(if_de, sizeof (*if_de)); + bzero(if_de, sizeof(*if_de)); COPY_IF_DE_FIELD64_ATOMIC(ifi_alignerrs); COPY_IF_DE_FIELD64_ATOMIC(ifi_dt_bytes); COPY_IF_DE_FIELD64_ATOMIC(ifi_fpackets); @@ -4873,14 +5171,14 @@ if_copy_data_extended(struct ifnet *ifp, struct if_data_extended *if_de) void if_copy_packet_stats(struct ifnet *ifp, struct if_packet_stats *if_ps) { -#define COPY_IF_PS_TCP_FIELD64_ATOMIC(fld) do { \ - atomic_get_64(if_ps->ifi_tcp_##fld, \ - (u_int64_t *)(void *)(uintptr_t)&ifp->if_tcp_stat->fld); \ +#define COPY_IF_PS_TCP_FIELD64_ATOMIC(fld) do { \ + atomic_get_64(if_ps->ifi_tcp_##fld, \ + (u_int64_t *)(void *)(uintptr_t)&ifp->if_tcp_stat->fld); \ } while (0) -#define COPY_IF_PS_UDP_FIELD64_ATOMIC(fld) do { \ - atomic_get_64(if_ps->ifi_udp_##fld, \ - (u_int64_t *)(void *)(uintptr_t)&ifp->if_udp_stat->fld); \ +#define COPY_IF_PS_UDP_FIELD64_ATOMIC(fld) do { \ + atomic_get_64(if_ps->ifi_udp_##fld, \ + (u_int64_t *)(void *)(uintptr_t)&ifp->if_udp_stat->fld); \ } while (0) COPY_IF_PS_TCP_FIELD64_ATOMIC(badformat); @@ -4914,13 +5212,14 @@ if_copy_packet_stats(struct ifnet *ifp, struct if_packet_stats *if_ps) void if_copy_rxpoll_stats(struct ifnet *ifp, struct if_rxpoll_stats *if_rs) { - bzero(if_rs, sizeof (*if_rs)); - if (!(ifp->if_eflags & IFEF_RXPOLL) || !ifnet_is_attached(ifp, 1)) + bzero(if_rs, sizeof(*if_rs)); + if (!(ifp->if_eflags & IFEF_RXPOLL) || !ifnet_is_attached(ifp, 1)) { return; + } /* by now, ifnet will stay attached so if_inp must be valid */ VERIFY(ifp->if_inp != NULL); - bcopy(&ifp->if_inp->pstats, if_rs, sizeof (*if_rs)); + bcopy(&ifp->if_inp->pstats, if_rs, sizeof(*if_rs)); /* Release the IO refcnt */ ifnet_decr_iorefcnt(ifp); @@ -4929,18 +5228,21 @@ if_copy_rxpoll_stats(struct ifnet *ifp, struct if_rxpoll_stats *if_rs) struct ifaddr * ifa_remref(struct ifaddr *ifa, int locked) { - if (!locked) + if (!locked) { IFA_LOCK_SPIN(ifa); - else + } else { IFA_LOCK_ASSERT_HELD(ifa); + } - if (ifa->ifa_refcnt == 0) + if (ifa->ifa_refcnt == 0) { panic("%s: ifa %p negative refcnt\n", __func__, ifa); - else if (ifa->ifa_trace != NULL) + } else if (ifa->ifa_trace != NULL) { (*ifa->ifa_trace)(ifa, FALSE); + } if (--ifa->ifa_refcnt == 0) { - if (ifa->ifa_debug & IFD_ATTACHED) + if (ifa->ifa_debug & IFD_ATTACHED) { panic("ifa %p attached to ifp is being freed\n", ifa); + } /* * Some interface addresses are allocated either statically * or carved out of a larger block. Only free it if it was @@ -4963,19 +5265,21 @@ ifa_remref(struct ifaddr *ifa, int locked) ifa = NULL; } - if (!locked && ifa != NULL) + if (!locked && ifa != NULL) { IFA_UNLOCK(ifa); + } - return (ifa); + return ifa; } void ifa_addref(struct ifaddr *ifa, int locked) { - if (!locked) + if (!locked) { IFA_LOCK_SPIN(ifa); - else + } else { IFA_LOCK_ASSERT_HELD(ifa); + } if (++ifa->ifa_refcnt == 0) { panic("%s: ifa %p wraparound refcnt\n", __func__, ifa); @@ -4983,8 +5287,9 @@ ifa_addref(struct ifaddr *ifa, int locked) } else if (ifa->ifa_trace != NULL) { (*ifa->ifa_trace)(ifa, TRUE); } - if (!locked) + if (!locked) { IFA_UNLOCK(ifa); + } } void @@ -5122,6 +5427,8 @@ ifioctl_cassert(void) case SIOCSIFMEDIA: case SIOCGIFMEDIA32: case SIOCGIFMEDIA64: + case SIOCGIFXMEDIA32: + case SIOCGIFXMEDIA64: case SIOCSIFGENERIC: case SIOCGIFGENERIC: case SIOCRSLVMULTI: @@ -5263,7 +5570,7 @@ uint32_t ifnet_mbuf_packetpreamblelen(struct ifnet *ifp) { #pragma unused(ifp) - return (0); + return 0; } /* The following is used to enqueue work items for interface events */ diff --git a/bsd/net/if.h b/bsd/net/if.h index 1cf0a5f14..4189a82cc 100644 --- a/bsd/net/if.h +++ b/bsd/net/if.h @@ -61,12 +61,12 @@ */ #ifndef _NET_IF_H_ -#define _NET_IF_H_ +#define _NET_IF_H_ #include #include -#define IF_NAMESIZE 16 +#define IF_NAMESIZE 16 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #include @@ -84,65 +84,65 @@ #endif struct if_clonereq { - int ifcr_total; /* total cloners (out) */ - int ifcr_count; /* room for this many in user buffer */ - char *ifcr_buffer; /* buffer for cloner names */ + int ifcr_total; /* total cloners (out) */ + int ifcr_count; /* room for this many in user buffer */ + char *ifcr_buffer; /* buffer for cloner names */ }; #ifdef KERNEL_PRIVATE -#define IF_MAXUNIT 0x7fff /* historical value */ +#define IF_MAXUNIT 0x7fff /* historical value */ struct if_clonereq64 { - int ifcr_total; /* total cloners (out) */ - int ifcr_count; /* room for this many in user buffer */ - user64_addr_t ifcru_buffer __attribute__((aligned(8))); + int ifcr_total; /* total cloners (out) */ + int ifcr_count; /* room for this many in user buffer */ + user64_addr_t ifcru_buffer __attribute__((aligned(8))); }; struct if_clonereq32 { - int ifcr_total; /* total cloners (out) */ - int ifcr_count; /* room for this many in user buffer */ + int ifcr_total; /* total cloners (out) */ + int ifcr_count; /* room for this many in user buffer */ user32_addr_t ifcru_buffer; }; #endif /* KERNEL_PRIVATE */ -#define IFF_UP 0x1 /* interface is up */ -#define IFF_BROADCAST 0x2 /* broadcast address valid */ -#define IFF_DEBUG 0x4 /* turn on debugging */ -#define IFF_LOOPBACK 0x8 /* is a loopback net */ -#define IFF_POINTOPOINT 0x10 /* interface is point-to-point link */ -#define IFF_NOTRAILERS 0x20 /* obsolete: avoid use of trailers */ -#define IFF_RUNNING 0x40 /* resources allocated */ -#define IFF_NOARP 0x80 /* no address resolution protocol */ -#define IFF_PROMISC 0x100 /* receive all packets */ -#define IFF_ALLMULTI 0x200 /* receive all multicast packets */ -#define IFF_OACTIVE 0x400 /* transmission in progress */ -#define IFF_SIMPLEX 0x800 /* can't hear own transmissions */ -#define IFF_LINK0 0x1000 /* per link layer defined bit */ -#define IFF_LINK1 0x2000 /* per link layer defined bit */ -#define IFF_LINK2 0x4000 /* per link layer defined bit */ -#define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */ -#define IFF_MULTICAST 0x8000 /* supports multicast */ +#define IFF_UP 0x1 /* interface is up */ +#define IFF_BROADCAST 0x2 /* broadcast address valid */ +#define IFF_DEBUG 0x4 /* turn on debugging */ +#define IFF_LOOPBACK 0x8 /* is a loopback net */ +#define IFF_POINTOPOINT 0x10 /* interface is point-to-point link */ +#define IFF_NOTRAILERS 0x20 /* obsolete: avoid use of trailers */ +#define IFF_RUNNING 0x40 /* resources allocated */ +#define IFF_NOARP 0x80 /* no address resolution protocol */ +#define IFF_PROMISC 0x100 /* receive all packets */ +#define IFF_ALLMULTI 0x200 /* receive all multicast packets */ +#define IFF_OACTIVE 0x400 /* transmission in progress */ +#define IFF_SIMPLEX 0x800 /* can't hear own transmissions */ +#define IFF_LINK0 0x1000 /* per link layer defined bit */ +#define IFF_LINK1 0x2000 /* per link layer defined bit */ +#define IFF_LINK2 0x4000 /* per link layer defined bit */ +#define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */ +#define IFF_MULTICAST 0x8000 /* supports multicast */ #ifdef PRIVATE /* extended flags definitions: (all bits reserved for internal/future use) */ -#define IFEF_AUTOCONFIGURING 0x00000001 /* allow BOOTP/DHCP replies to enter */ -#define IFEF_ENQUEUE_MULTI 0x00000002 /* enqueue multiple packets at once */ -#define IFEF_DELAY_START 0x00000004 /* delay start callback */ -#define IFEF_PROBE_CONNECTIVITY 0x00000008 /* Probe connections going over this interface */ -#define IFEF_QOSMARKING_CAPABLE 0x00000010 /* XXX Obsolete, to be removed */ -#define IFEF_IPV6_DISABLED 0x00000020 /* coupled to ND6_IFF_IFDISABLED */ -#define IFEF_ACCEPT_RTADV 0x00000040 /* accepts IPv6 RA on the interface */ -#define IFEF_TXSTART 0x00000080 /* has start callback */ -#define IFEF_RXPOLL 0x00000100 /* supports opportunistic input poll */ -#define IFEF_VLAN 0x00000200 /* interface has one or more vlans */ -#define IFEF_BOND 0x00000400 /* interface is part of bond */ -#define IFEF_ARPLL 0x00000800 /* ARP for IPv4LL addresses */ -#define IFEF_CLAT46 0x00001000 /* CLAT46 RFC 6877 */ - -#define IS_INTF_CLAT46(ifp) ((ifp) != NULL && ((ifp)->if_eflags & IFEF_CLAT46)) -#define INTF_ADJUST_MTU_FOR_CLAT46(intf) \ - (IS_INTF_CLAT46((intf)) || \ - IS_INTF_CLAT46((intf)->if_delegated.ifp)) \ +#define IFEF_AUTOCONFIGURING 0x00000001 /* allow BOOTP/DHCP replies to enter */ +#define IFEF_ENQUEUE_MULTI 0x00000002 /* enqueue multiple packets at once */ +#define IFEF_DELAY_START 0x00000004 /* delay start callback */ +#define IFEF_PROBE_CONNECTIVITY 0x00000008 /* Probe connections going over this interface */ +#define IFEF_QOSMARKING_CAPABLE 0x00000010 /* XXX Obsolete, to be removed */ +#define IFEF_IPV6_DISABLED 0x00000020 /* coupled to ND6_IFF_IFDISABLED */ +#define IFEF_ACCEPT_RTADV 0x00000040 /* accepts IPv6 RA on the interface */ +#define IFEF_TXSTART 0x00000080 /* has start callback */ +#define IFEF_RXPOLL 0x00000100 /* supports opportunistic input poll */ +#define IFEF_VLAN 0x00000200 /* interface has one or more vlans */ +#define IFEF_BOND 0x00000400 /* interface is part of bond */ +#define IFEF_ARPLL 0x00000800 /* ARP for IPv4LL addresses */ +#define IFEF_CLAT46 0x00001000 /* CLAT46 RFC 6877 */ + +#define IS_INTF_CLAT46(ifp) ((ifp) != NULL && ((ifp)->if_eflags & IFEF_CLAT46)) +#define INTF_ADJUST_MTU_FOR_CLAT46(intf) \ + (IS_INTF_CLAT46((intf)) || \ + IS_INTF_CLAT46((intf)->if_delegated.ifp)) \ /* * XXX IFEF_NOAUTOIPV6LL is deprecated and should be done away with. @@ -150,46 +150,46 @@ struct if_clonereq32 { * Rather than looking at the flag we check if a specific LLA * has to be configured or the IID has to be generated by kernel. */ -#define IFEF_NOAUTOIPV6LL 0x00002000 /* Need explicit IPv6 LL address */ -#define IFEF_EXPENSIVE 0x00004000 /* Data access has a cost */ -#define IFEF_IPV4_ROUTER 0x00008000 /* interior when in IPv4 router mode */ -#define IFEF_IPV6_ROUTER 0x00010000 /* interior when in IPv6 router mode */ -#define IFEF_LOCALNET_PRIVATE 0x00020000 /* local private network */ -#define IFEF_SERVICE_TRIGGERED IFEF_LOCALNET_PRIVATE -#define IFEF_IPV6_ND6ALT 0x00040000 /* alternative. KPI for ND6 */ -#define IFEF_RESTRICTED_RECV 0x00080000 /* interface restricts inbound pkts */ -#define IFEF_AWDL 0x00100000 /* Apple Wireless Direct Link */ -#define IFEF_NOACKPRI 0x00200000 /* No TCP ACK prioritization */ -#define IFEF_AWDL_RESTRICTED 0x00400000 /* Restricted AWDL mode */ -#define IFEF_2KCL 0x00800000 /* prefers 2K cluster (socket based tunnel) */ -#define IFEF_ECN_ENABLE 0x01000000 /* use ECN for TCP connections on the interface */ -#define IFEF_ECN_DISABLE 0x02000000 /* do not use ECN for TCP connections on the interface */ -#define IFEF_SKYWALK_NATIVE 0x04000000 /* Native Skywalk support */ -#define IFEF_3CA 0x08000000 /* Capable of 3CA */ -#define IFEF_SENDLIST 0x10000000 /* Supports tx packet lists */ -#define IFEF_DIRECTLINK 0x20000000 /* point-to-point topology */ -#define IFEF_QOSMARKING_ENABLED 0x40000000 /* OoS marking is enabled */ -#define IFEF_UPDOWNCHANGE 0x80000000 /* up/down state is changing */ +#define IFEF_NOAUTOIPV6LL 0x00002000 /* Need explicit IPv6 LL address */ +#define IFEF_EXPENSIVE 0x00004000 /* Data access has a cost */ +#define IFEF_IPV4_ROUTER 0x00008000 /* interior when in IPv4 router mode */ +#define IFEF_IPV6_ROUTER 0x00010000 /* interior when in IPv6 router mode */ +#define IFEF_LOCALNET_PRIVATE 0x00020000 /* local private network */ +#define IFEF_SERVICE_TRIGGERED IFEF_LOCALNET_PRIVATE +#define IFEF_IPV6_ND6ALT 0x00040000 /* alternative. KPI for ND6 */ +#define IFEF_RESTRICTED_RECV 0x00080000 /* interface restricts inbound pkts */ +#define IFEF_AWDL 0x00100000 /* Apple Wireless Direct Link */ +#define IFEF_NOACKPRI 0x00200000 /* No TCP ACK prioritization */ +#define IFEF_AWDL_RESTRICTED 0x00400000 /* Restricted AWDL mode */ +#define IFEF_2KCL 0x00800000 /* prefers 2K cluster (socket based tunnel) */ +#define IFEF_ECN_ENABLE 0x01000000 /* use ECN for TCP connections on the interface */ +#define IFEF_ECN_DISABLE 0x02000000 /* do not use ECN for TCP connections on the interface */ +#define IFEF_SKYWALK_NATIVE 0x04000000 /* Native Skywalk support */ +#define IFEF_3CA 0x08000000 /* Capable of 3CA */ +#define IFEF_SENDLIST 0x10000000 /* Supports tx packet lists */ +#define IFEF_DIRECTLINK 0x20000000 /* point-to-point topology */ +#define IFEF_QOSMARKING_ENABLED 0x40000000 /* OoS marking is enabled */ +#define IFEF_UPDOWNCHANGE 0x80000000 /* up/down state is changing */ #ifdef XNU_KERNEL_PRIVATE /* * Extra flags */ -#define IFXF_WAKE_ON_MAGIC_PACKET 0x00000001 /* wake on magic packet */ -#define IFXF_TIMESTAMP_ENABLED 0x00000002 /* time stamping enabled */ -#define IFXF_NX_NOAUTO 0x00000004 /* no auto config nexus */ -#define IFXF_MULTISTACK_BPF_TAP 0x00000008 /* multistack bpf tap */ -#define IFXF_LOW_INTERNET_UL 0x00000010 /* Uplink Low Internet is confirmed */ -#define IFXF_LOW_INTERNET_DL 0x00000020 /* Downlink Low Internet is confirmed */ -#define IFXF_ALLOC_KPI 0x00000040 /* Allocated via the ifnet_alloc KPI */ -#define IFXF_LOW_POWER 0x00000080 /* Low Power Mode */ +#define IFXF_WAKE_ON_MAGIC_PACKET 0x00000001 /* wake on magic packet */ +#define IFXF_TIMESTAMP_ENABLED 0x00000002 /* time stamping enabled */ +#define IFXF_NX_NOAUTO 0x00000004 /* no auto config nexus */ +#define IFXF_MULTISTACK_BPF_TAP 0x00000008 /* multistack bpf tap */ +#define IFXF_LOW_INTERNET_UL 0x00000010 /* Uplink Low Internet is confirmed */ +#define IFXF_LOW_INTERNET_DL 0x00000020 /* Downlink Low Internet is confirmed */ +#define IFXF_ALLOC_KPI 0x00000040 /* Allocated via the ifnet_alloc KPI */ +#define IFXF_LOW_POWER 0x00000080 /* Low Power Mode */ /* * Current requirements for an AWDL interface. Setting/clearing IFEF_AWDL * will also trigger the setting/clearing of the rest of the flags. Once * IFEF_AWDL is set, the rest of flags cannot be cleared, by definition. */ -#define IFEF_AWDL_MASK \ +#define IFEF_AWDL_MASK \ (IFEF_LOCALNET_PRIVATE | IFEF_IPV6_ND6ALT | IFEF_RESTRICTED_RECV | \ IFEF_AWDL) #endif /* XNU_KERNEL_PRIVATE */ @@ -206,10 +206,10 @@ struct if_clonereq32 { * the if_idle_flags field to a non-zero value will cause the networking * stack to aggressively purge expired objects (routes, etc.) */ -#define IFRF_IDLE_NOTIFY 0x1 /* Generate notifications on idle */ +#define IFRF_IDLE_NOTIFY 0x1 /* Generate notifications on idle */ /* flags set internally only: */ -#define IFF_CANTCHANGE \ +#define IFF_CANTCHANGE \ (IFF_BROADCAST|IFF_POINTOPOINT|IFF_RUNNING|IFF_OACTIVE|\ IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI) #endif /* KERNEL_PRIVATE */ @@ -232,47 +232,47 @@ struct if_clonereq32 { * more detailed or differentiated than IFCAP_*. * IFNET_* hwassist flags have corresponding CSUM_* in sys/mbuf.h */ -#define IFCAP_RXCSUM 0x00001 /* can offload checksum on RX */ -#define IFCAP_TXCSUM 0x00002 /* can offload checksum on TX */ -#define IFCAP_VLAN_MTU 0x00004 /* VLAN-compatible MTU */ -#define IFCAP_VLAN_HWTAGGING 0x00008 /* hardware VLAN tag support */ -#define IFCAP_JUMBO_MTU 0x00010 /* 9000 byte MTU supported */ -#define IFCAP_TSO4 0x00020 /* can do TCP Segmentation Offload */ -#define IFCAP_TSO6 0x00040 /* can do TCP6 Segmentation Offload */ -#define IFCAP_LRO 0x00080 /* can do Large Receive Offload */ -#define IFCAP_AV 0x00100 /* can do 802.1 AV Bridging */ -#define IFCAP_TXSTATUS 0x00200 /* can return linklevel xmit status */ -#define IFCAP_SKYWALK 0x00400 /* Skywalk mode supported/enabled */ -#define IFCAP_HW_TIMESTAMP 0x00800 /* Time stamping in hardware */ -#define IFCAP_SW_TIMESTAMP 0x01000 /* Time stamping in software */ -#define IFCAP_CSUM_PARTIAL 0x02000 /* can offload partial checksum */ -#define IFCAP_CSUM_ZERO_INVERT 0x04000 /* can invert 0 to -0 (0xffff) */ - -#define IFCAP_HWCSUM (IFCAP_RXCSUM | IFCAP_TXCSUM) -#define IFCAP_TSO (IFCAP_TSO4 | IFCAP_TSO6) - -#define IFCAP_VALID (IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_MTU | \ +#define IFCAP_RXCSUM 0x00001 /* can offload checksum on RX */ +#define IFCAP_TXCSUM 0x00002 /* can offload checksum on TX */ +#define IFCAP_VLAN_MTU 0x00004 /* VLAN-compatible MTU */ +#define IFCAP_VLAN_HWTAGGING 0x00008 /* hardware VLAN tag support */ +#define IFCAP_JUMBO_MTU 0x00010 /* 9000 byte MTU supported */ +#define IFCAP_TSO4 0x00020 /* can do TCP Segmentation Offload */ +#define IFCAP_TSO6 0x00040 /* can do TCP6 Segmentation Offload */ +#define IFCAP_LRO 0x00080 /* can do Large Receive Offload */ +#define IFCAP_AV 0x00100 /* can do 802.1 AV Bridging */ +#define IFCAP_TXSTATUS 0x00200 /* can return linklevel xmit status */ +#define IFCAP_SKYWALK 0x00400 /* Skywalk mode supported/enabled */ +#define IFCAP_HW_TIMESTAMP 0x00800 /* Time stamping in hardware */ +#define IFCAP_SW_TIMESTAMP 0x01000 /* Time stamping in software */ +#define IFCAP_CSUM_PARTIAL 0x02000 /* can offload partial checksum */ +#define IFCAP_CSUM_ZERO_INVERT 0x04000 /* can invert 0 to -0 (0xffff) */ + +#define IFCAP_HWCSUM (IFCAP_RXCSUM | IFCAP_TXCSUM) +#define IFCAP_TSO (IFCAP_TSO4 | IFCAP_TSO6) + +#define IFCAP_VALID (IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_MTU | \ IFCAP_VLAN_HWTAGGING | IFCAP_JUMBO_MTU | IFCAP_AV | IFCAP_TXSTATUS | \ IFCAP_SKYWALK | IFCAP_SW_TIMESTAMP | IFCAP_HW_TIMESTAMP | \ IFCAP_CSUM_PARTIAL | IFCAP_CSUM_ZERO_INVERT) -#define IFQ_MAXLEN 128 -#define IFNET_SLOWHZ 1 /* granularity is 1 second */ -#define IFQ_TARGET_DELAY (10ULL * 1000 * 1000) /* 10 ms */ -#define IFQ_UPDATE_INTERVAL (100ULL * 1000 * 1000) /* 100 ms */ +#define IFQ_MAXLEN 128 +#define IFNET_SLOWHZ 1 /* granularity is 1 second */ +#define IFQ_TARGET_DELAY (10ULL * 1000 * 1000) /* 10 ms */ +#define IFQ_UPDATE_INTERVAL (100ULL * 1000 * 1000) /* 100 ms */ /* * Message format for use in obtaining information about interfaces * from sysctl and the routing socket */ struct if_msghdr { - unsigned short ifm_msglen; /* to skip non-understood messages */ - unsigned char ifm_version; /* future binary compatability */ - unsigned char ifm_type; /* message type */ - int ifm_addrs; /* like rtm_addrs */ - int ifm_flags; /* value of if_flags */ - unsigned short ifm_index; /* index for associated ifp */ - struct if_data ifm_data; /* statistics and other data about if */ + unsigned short ifm_msglen; /* to skip non-understood messages */ + unsigned char ifm_version; /* future binary compatability */ + unsigned char ifm_type; /* message type */ + int ifm_addrs; /* like rtm_addrs */ + int ifm_flags; /* value of if_flags */ + unsigned short ifm_index; /* index for associated ifp */ + struct if_data ifm_data; /* statistics and other data about if */ }; /* @@ -280,13 +280,13 @@ struct if_msghdr { * from sysctl and the routing socket */ struct ifa_msghdr { - unsigned short ifam_msglen; /* to skip non-understood messages */ - unsigned char ifam_version; /* future binary compatability */ - unsigned char ifam_type; /* message type */ - int ifam_addrs; /* like rtm_addrs */ - int ifam_flags; /* value of ifa_flags */ - unsigned short ifam_index; /* index for associated ifp */ - int ifam_metric; /* value of ifa_metric */ + unsigned short ifam_msglen; /* to skip non-understood messages */ + unsigned char ifam_version; /* future binary compatability */ + unsigned char ifam_type; /* message type */ + int ifam_addrs; /* like rtm_addrs */ + int ifam_flags; /* value of ifa_flags */ + unsigned short ifam_index; /* index for associated ifp */ + int ifam_metric; /* value of ifa_metric */ }; /* @@ -294,12 +294,12 @@ struct ifa_msghdr { * from the routing socket */ struct ifma_msghdr { - unsigned short ifmam_msglen; /* to skip non-understood messages */ - unsigned char ifmam_version; /* future binary compatability */ - unsigned char ifmam_type; /* message type */ - int ifmam_addrs; /* like rtm_addrs */ - int ifmam_flags; /* value of ifa_flags */ - unsigned short ifmam_index; /* index for associated ifp */ + unsigned short ifmam_msglen; /* to skip non-understood messages */ + unsigned char ifmam_version; /* future binary compatability */ + unsigned char ifmam_type; /* message type */ + int ifmam_addrs; /* like rtm_addrs */ + int ifmam_flags; /* value of ifa_flags */ + unsigned short ifmam_index; /* index for associated ifp */ }; /* @@ -307,17 +307,17 @@ struct ifma_msghdr { * from sysctl */ struct if_msghdr2 { - u_short ifm_msglen; /* to skip over non-understood messages */ - u_char ifm_version; /* future binary compatability */ - u_char ifm_type; /* message type */ - int ifm_addrs; /* like rtm_addrs */ - int ifm_flags; /* value of if_flags */ - u_short ifm_index; /* index for associated ifp */ - int ifm_snd_len; /* instantaneous length of send queue */ - int ifm_snd_maxlen; /* maximum length of send queue */ - int ifm_snd_drops; /* number of drops in send queue */ - int ifm_timer; /* time until if_watchdog called */ - struct if_data64 ifm_data; /* statistics and other data */ + u_short ifm_msglen; /* to skip over non-understood messages */ + u_char ifm_version; /* future binary compatability */ + u_char ifm_type; /* message type */ + int ifm_addrs; /* like rtm_addrs */ + int ifm_flags; /* value of if_flags */ + u_short ifm_index; /* index for associated ifp */ + int ifm_snd_len; /* instantaneous length of send queue */ + int ifm_snd_maxlen; /* maximum length of send queue */ + int ifm_snd_drops; /* number of drops in send queue */ + int ifm_timer; /* time until if_watchdog called */ + struct if_data64 ifm_data; /* statistics and other data */ }; /* @@ -325,12 +325,12 @@ struct if_msghdr2 { * from sysctl */ struct ifma_msghdr2 { - u_short ifmam_msglen; /* to skip over non-understood messages */ - u_char ifmam_version; /* future binary compatability */ - u_char ifmam_type; /* message type */ - int ifmam_addrs; /* like rtm_addrs */ - int ifmam_flags; /* value of ifa_flags */ - u_short ifmam_index; /* index for associated ifp */ + u_short ifmam_msglen; /* to skip over non-understood messages */ + u_char ifmam_version; /* future binary compatability */ + u_char ifmam_type; /* message type */ + int ifmam_addrs; /* like rtm_addrs */ + int ifmam_flags; /* value of ifa_flags */ + u_short ifmam_index; /* index for associated ifp */ int32_t ifmam_refcount; }; @@ -340,54 +340,54 @@ struct ifma_msghdr2 { * as well as the minimum and maximum mtu allowed by the device. */ struct ifdevmtu { - int ifdm_current; - int ifdm_min; - int ifdm_max; + int ifdm_current; + int ifdm_min; + int ifdm_max; }; #pragma pack(4) /* - ifkpi: interface kpi ioctl - Used with SIOCSIFKPI and SIOCGIFKPI. - - ifk_module_id - From in the kernel, a value from kev_vendor_code_find. From - user space, a value from SIOCGKEVVENDOR ioctl on a kernel event socket. - ifk_type - The type. Types are specific to each module id. - ifk_data - The data. ifk_ptr may be a 64bit pointer for 64 bit processes. - - Copying data between user space and kernel space is done using copyin - and copyout. A process may be running in 64bit mode. In such a case, - the pointer will be a 64bit pointer, not a 32bit pointer. The following - sample is a safe way to copy the data in to the kernel from either a - 32bit or 64bit process: - - user_addr_t tmp_ptr; - if (IS_64BIT_PROCESS(current_proc())) { - tmp_ptr = CAST_USER_ADDR_T(ifkpi.ifk_data.ifk_ptr64); - } - else { - tmp_ptr = CAST_USER_ADDR_T(ifkpi.ifk_data.ifk_ptr); - } - error = copyin(tmp_ptr, allocated_dst_buffer, size of allocated_dst_buffer); + * ifkpi: interface kpi ioctl + * Used with SIOCSIFKPI and SIOCGIFKPI. + * + * ifk_module_id - From in the kernel, a value from kev_vendor_code_find. From + * user space, a value from SIOCGKEVVENDOR ioctl on a kernel event socket. + * ifk_type - The type. Types are specific to each module id. + * ifk_data - The data. ifk_ptr may be a 64bit pointer for 64 bit processes. + * + * Copying data between user space and kernel space is done using copyin + * and copyout. A process may be running in 64bit mode. In such a case, + * the pointer will be a 64bit pointer, not a 32bit pointer. The following + * sample is a safe way to copy the data in to the kernel from either a + * 32bit or 64bit process: + * + * user_addr_t tmp_ptr; + * if (IS_64BIT_PROCESS(current_proc())) { + * tmp_ptr = CAST_USER_ADDR_T(ifkpi.ifk_data.ifk_ptr64); + * } + * else { + * tmp_ptr = CAST_USER_ADDR_T(ifkpi.ifk_data.ifk_ptr); + * } + * error = copyin(tmp_ptr, allocated_dst_buffer, size of allocated_dst_buffer); */ struct ifkpi { - unsigned int ifk_module_id; - unsigned int ifk_type; + unsigned int ifk_module_id; + unsigned int ifk_type; union { - void *ifk_ptr; - int ifk_value; + void *ifk_ptr; + int ifk_value; #ifdef KERNEL - u_int64_t ifk_ptr64; + u_int64_t ifk_ptr64; #endif /* KERNEL */ } ifk_data; }; /* Wake capabilities of a interface */ -#define IF_WAKE_ON_MAGIC_PACKET 0x01 +#define IF_WAKE_ON_MAGIC_PACKET 0x01 #ifdef KERNEL_PRIVATE -#define IF_WAKE_VALID_FLAGS IF_WAKE_ON_MAGIC_PACKET +#define IF_WAKE_VALID_FLAGS IF_WAKE_ON_MAGIC_PACKET #endif /* KERNEL_PRIVATE */ @@ -399,92 +399,92 @@ struct ifkpi { * definitions which begin with ifr_name. The * remainder may be interface specific. */ -struct ifreq { +struct ifreq { #ifndef IFNAMSIZ -#define IFNAMSIZ IF_NAMESIZE +#define IFNAMSIZ IF_NAMESIZE #endif - char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ union { - struct sockaddr ifru_addr; - struct sockaddr ifru_dstaddr; - struct sockaddr ifru_broadaddr; - short ifru_flags; - int ifru_metric; - int ifru_mtu; - int ifru_phys; - int ifru_media; - int ifru_intval; - caddr_t ifru_data; + struct sockaddr ifru_addr; + struct sockaddr ifru_dstaddr; + struct sockaddr ifru_broadaddr; + short ifru_flags; + int ifru_metric; + int ifru_mtu; + int ifru_phys; + int ifru_media; + int ifru_intval; + caddr_t ifru_data; #ifdef KERNEL_PRIVATE - u_int64_t ifru_data64; /* 64-bit ifru_data */ + u_int64_t ifru_data64; /* 64-bit ifru_data */ #endif /* KERNEL_PRIVATE */ - struct ifdevmtu ifru_devmtu; - struct ifkpi ifru_kpi; + struct ifdevmtu ifru_devmtu; + struct ifkpi ifru_kpi; u_int32_t ifru_wake_flags; u_int32_t ifru_route_refcnt; #ifdef PRIVATE - int ifru_link_quality_metric; + int ifru_link_quality_metric; #endif /* PRIVATE */ - int ifru_cap[2]; + int ifru_cap[2]; #ifdef PRIVATE struct { - uint32_t ifo_flags; -#define IFRIFOF_BLOCK_OPPORTUNISTIC 0x00000001 - uint32_t ifo_inuse; + uint32_t ifo_flags; +#define IFRIFOF_BLOCK_OPPORTUNISTIC 0x00000001 + uint32_t ifo_inuse; } ifru_opportunistic; u_int64_t ifru_eflags; struct { - int32_t ifl_level; - uint32_t ifl_flags; -#define IFRLOGF_DLIL 0x00000001 -#define IFRLOGF_FAMILY 0x00010000 -#define IFRLOGF_DRIVER 0x01000000 -#define IFRLOGF_FIRMWARE 0x10000000 - int32_t ifl_category; -#define IFRLOGCAT_CONNECTIVITY 1 -#define IFRLOGCAT_QUALITY 2 -#define IFRLOGCAT_PERFORMANCE 3 - int32_t ifl_subcategory; + int32_t ifl_level; + uint32_t ifl_flags; +#define IFRLOGF_DLIL 0x00000001 +#define IFRLOGF_FAMILY 0x00010000 +#define IFRLOGF_DRIVER 0x01000000 +#define IFRLOGF_FIRMWARE 0x10000000 + int32_t ifl_category; +#define IFRLOGCAT_CONNECTIVITY 1 +#define IFRLOGCAT_QUALITY 2 +#define IFRLOGCAT_PERFORMANCE 3 + int32_t ifl_subcategory; } ifru_log; u_int32_t ifru_delegated; struct { - uint32_t ift_type; - uint32_t ift_family; -#define IFRTYPE_FAMILY_ANY 0 -#define IFRTYPE_FAMILY_LOOPBACK 1 -#define IFRTYPE_FAMILY_ETHERNET 2 -#define IFRTYPE_FAMILY_SLIP 3 -#define IFRTYPE_FAMILY_TUN 4 -#define IFRTYPE_FAMILY_VLAN 5 -#define IFRTYPE_FAMILY_PPP 6 -#define IFRTYPE_FAMILY_PVC 7 -#define IFRTYPE_FAMILY_DISC 8 -#define IFRTYPE_FAMILY_MDECAP 9 -#define IFRTYPE_FAMILY_GIF 10 -#define IFRTYPE_FAMILY_FAITH 11 -#define IFRTYPE_FAMILY_STF 12 -#define IFRTYPE_FAMILY_FIREWIRE 13 -#define IFRTYPE_FAMILY_BOND 14 -#define IFRTYPE_FAMILY_CELLULAR 15 - uint32_t ift_subfamily; -#define IFRTYPE_SUBFAMILY_ANY 0 -#define IFRTYPE_SUBFAMILY_USB 1 -#define IFRTYPE_SUBFAMILY_BLUETOOTH 2 -#define IFRTYPE_SUBFAMILY_WIFI 3 -#define IFRTYPE_SUBFAMILY_THUNDERBOLT 4 -#define IFRTYPE_SUBFAMILY_RESERVED 5 -#define IFRTYPE_SUBFAMILY_INTCOPROC 6 + uint32_t ift_type; + uint32_t ift_family; +#define IFRTYPE_FAMILY_ANY 0 +#define IFRTYPE_FAMILY_LOOPBACK 1 +#define IFRTYPE_FAMILY_ETHERNET 2 +#define IFRTYPE_FAMILY_SLIP 3 +#define IFRTYPE_FAMILY_TUN 4 +#define IFRTYPE_FAMILY_VLAN 5 +#define IFRTYPE_FAMILY_PPP 6 +#define IFRTYPE_FAMILY_PVC 7 +#define IFRTYPE_FAMILY_DISC 8 +#define IFRTYPE_FAMILY_MDECAP 9 +#define IFRTYPE_FAMILY_GIF 10 +#define IFRTYPE_FAMILY_FAITH 11 +#define IFRTYPE_FAMILY_STF 12 +#define IFRTYPE_FAMILY_FIREWIRE 13 +#define IFRTYPE_FAMILY_BOND 14 +#define IFRTYPE_FAMILY_CELLULAR 15 + uint32_t ift_subfamily; +#define IFRTYPE_SUBFAMILY_ANY 0 +#define IFRTYPE_SUBFAMILY_USB 1 +#define IFRTYPE_SUBFAMILY_BLUETOOTH 2 +#define IFRTYPE_SUBFAMILY_WIFI 3 +#define IFRTYPE_SUBFAMILY_THUNDERBOLT 4 +#define IFRTYPE_SUBFAMILY_RESERVED 5 +#define IFRTYPE_SUBFAMILY_INTCOPROC 6 } ifru_type; #endif /* PRIVATE */ u_int32_t ifru_functional_type; -#define IFRTYPE_FUNCTIONAL_UNKNOWN 0 -#define IFRTYPE_FUNCTIONAL_LOOPBACK 1 -#define IFRTYPE_FUNCTIONAL_WIRED 2 -#define IFRTYPE_FUNCTIONAL_WIFI_INFRA 3 -#define IFRTYPE_FUNCTIONAL_WIFI_AWDL 4 -#define IFRTYPE_FUNCTIONAL_CELLULAR 5 -#define IFRTYPE_FUNCTIONAL_INTCOPROC 6 -#define IFRTYPE_FUNCTIONAL_LAST 6 +#define IFRTYPE_FUNCTIONAL_UNKNOWN 0 +#define IFRTYPE_FUNCTIONAL_LOOPBACK 1 +#define IFRTYPE_FUNCTIONAL_WIRED 2 +#define IFRTYPE_FUNCTIONAL_WIFI_INFRA 3 +#define IFRTYPE_FUNCTIONAL_WIFI_AWDL 4 +#define IFRTYPE_FUNCTIONAL_CELLULAR 5 +#define IFRTYPE_FUNCTIONAL_INTCOPROC 6 +#define IFRTYPE_FUNCTIONAL_LAST 6 #ifdef PRIVATE u_int32_t ifru_expensive; u_int32_t ifru_2kcl; @@ -492,86 +492,86 @@ struct ifreq { u_int32_t qlen; u_int32_t timeout; } ifru_start_delay; - struct if_interface_state ifru_interface_state; + struct if_interface_state ifru_interface_state; u_int32_t ifru_probe_connectivity; u_int32_t ifru_ecn_mode; -#define IFRTYPE_ECN_DEFAULT 0 -#define IFRTYPE_ECN_ENABLE 1 -#define IFRTYPE_ECN_DISABLE 2 +#define IFRTYPE_ECN_DEFAULT 0 +#define IFRTYPE_ECN_ENABLE 1 +#define IFRTYPE_ECN_DISABLE 2 u_int32_t ifru_qosmarking_mode; -#define IFRTYPE_QOSMARKING_MODE_NONE 0 -#define IFRTYPE_QOSMARKING_FASTLANE 1 +#define IFRTYPE_QOSMARKING_MODE_NONE 0 +#define IFRTYPE_QOSMARKING_FASTLANE 1 u_int32_t ifru_qosmarking_enabled; u_int32_t ifru_disable_output; u_int32_t ifru_low_internet; -#define IFRTYPE_LOW_INTERNET_DISABLE_UL_DL 0x0000 -#define IFRTYPE_LOW_INTERNET_ENABLE_UL 0x0001 -#define IFRTYPE_LOW_INTERNET_ENABLE_DL 0x0002 +#define IFRTYPE_LOW_INTERNET_DISABLE_UL_DL 0x0000 +#define IFRTYPE_LOW_INTERNET_ENABLE_UL 0x0001 +#define IFRTYPE_LOW_INTERNET_ENABLE_DL 0x0002 int ifru_low_power_mode; #endif /* PRIVATE */ } ifr_ifru; -#define ifr_addr ifr_ifru.ifru_addr /* address */ -#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */ -#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ +#define ifr_addr ifr_ifru.ifru_addr /* address */ +#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */ +#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ #ifdef __APPLE__ -#define ifr_flags ifr_ifru.ifru_flags /* flags */ +#define ifr_flags ifr_ifru.ifru_flags /* flags */ #else -#define ifr_flags ifr_ifru.ifru_flags[0] /* flags */ -#define ifr_prevflags ifr_ifru.ifru_flags[1] /* flags */ +#define ifr_flags ifr_ifru.ifru_flags[0] /* flags */ +#define ifr_prevflags ifr_ifru.ifru_flags[1] /* flags */ #endif /* __APPLE__ */ -#define ifr_metric ifr_ifru.ifru_metric /* metric */ -#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ -#define ifr_phys ifr_ifru.ifru_phys /* physical wire */ -#define ifr_media ifr_ifru.ifru_media /* physical media */ -#define ifr_data ifr_ifru.ifru_data /* for use by interface */ -#define ifr_devmtu ifr_ifru.ifru_devmtu -#define ifr_intval ifr_ifru.ifru_intval /* integer value */ +#define ifr_metric ifr_ifru.ifru_metric /* metric */ +#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ +#define ifr_phys ifr_ifru.ifru_phys /* physical wire */ +#define ifr_media ifr_ifru.ifru_media /* physical media */ +#define ifr_data ifr_ifru.ifru_data /* for use by interface */ +#define ifr_devmtu ifr_ifru.ifru_devmtu +#define ifr_intval ifr_ifru.ifru_intval /* integer value */ #ifdef KERNEL_PRIVATE -#define ifr_data64 ifr_ifru.ifru_data64 /* 64-bit pointer */ +#define ifr_data64 ifr_ifru.ifru_data64 /* 64-bit pointer */ #endif /* KERNEL_PRIVATE */ -#define ifr_kpi ifr_ifru.ifru_kpi -#define ifr_wake_flags ifr_ifru.ifru_wake_flags /* wake capabilities */ -#define ifr_route_refcnt ifr_ifru.ifru_route_refcnt /* route references count */ +#define ifr_kpi ifr_ifru.ifru_kpi +#define ifr_wake_flags ifr_ifru.ifru_wake_flags /* wake capabilities */ +#define ifr_route_refcnt ifr_ifru.ifru_route_refcnt /* route references count */ #ifdef PRIVATE -#define ifr_link_quality_metric ifr_ifru.ifru_link_quality_metric /* LQM */ +#define ifr_link_quality_metric ifr_ifru.ifru_link_quality_metric /* LQM */ #endif /* PRIVATE */ -#define ifr_reqcap ifr_ifru.ifru_cap[0] /* requested capabilities */ -#define ifr_curcap ifr_ifru.ifru_cap[1] /* current capabilities */ +#define ifr_reqcap ifr_ifru.ifru_cap[0] /* requested capabilities */ +#define ifr_curcap ifr_ifru.ifru_cap[1] /* current capabilities */ #ifdef PRIVATE -#define ifr_opportunistic ifr_ifru.ifru_opportunistic -#define ifr_eflags ifr_ifru.ifru_eflags /* extended flags */ -#define ifr_log ifr_ifru.ifru_log /* logging level/flags */ -#define ifr_delegated ifr_ifru.ifru_delegated /* delegated interface index */ -#define ifr_expensive ifr_ifru.ifru_expensive -#define ifr_type ifr_ifru.ifru_type /* interface type */ -#define ifr_functional_type ifr_ifru.ifru_functional_type -#define ifr_2kcl ifr_ifru.ifru_2kcl -#define ifr_start_delay_qlen ifr_ifru.ifru_start_delay.qlen -#define ifr_start_delay_timeout ifr_ifru.ifru_start_delay.timeout -#define ifr_interface_state ifr_ifru.ifru_interface_state -#define ifr_probe_connectivity ifr_ifru.ifru_probe_connectivity -#define ifr_ecn_mode ifr_ifru.ifru_ecn_mode -#define ifr_qosmarking_mode ifr_ifru.ifru_qosmarking_mode -#define ifr_fastlane_capable ifr_qosmarking_mode -#define ifr_qosmarking_enabled ifr_ifru.ifru_qosmarking_enabled -#define ifr_fastlane_enabled ifr_qosmarking_enabled -#define ifr_disable_output ifr_ifru.ifru_disable_output -#define ifr_low_internet ifr_ifru.ifru_low_internet -#define ifr_low_power_mode ifr_ifru.ifru_low_power_mode +#define ifr_opportunistic ifr_ifru.ifru_opportunistic +#define ifr_eflags ifr_ifru.ifru_eflags /* extended flags */ +#define ifr_log ifr_ifru.ifru_log /* logging level/flags */ +#define ifr_delegated ifr_ifru.ifru_delegated /* delegated interface index */ +#define ifr_expensive ifr_ifru.ifru_expensive +#define ifr_type ifr_ifru.ifru_type /* interface type */ +#define ifr_functional_type ifr_ifru.ifru_functional_type +#define ifr_2kcl ifr_ifru.ifru_2kcl +#define ifr_start_delay_qlen ifr_ifru.ifru_start_delay.qlen +#define ifr_start_delay_timeout ifr_ifru.ifru_start_delay.timeout +#define ifr_interface_state ifr_ifru.ifru_interface_state +#define ifr_probe_connectivity ifr_ifru.ifru_probe_connectivity +#define ifr_ecn_mode ifr_ifru.ifru_ecn_mode +#define ifr_qosmarking_mode ifr_ifru.ifru_qosmarking_mode +#define ifr_fastlane_capable ifr_qosmarking_mode +#define ifr_qosmarking_enabled ifr_ifru.ifru_qosmarking_enabled +#define ifr_fastlane_enabled ifr_qosmarking_enabled +#define ifr_disable_output ifr_ifru.ifru_disable_output +#define ifr_low_internet ifr_ifru.ifru_low_internet +#define ifr_low_power_mode ifr_ifru.ifru_low_power_mode #endif /* PRIVATE */ }; -#define _SIZEOF_ADDR_IFREQ(ifr) \ +#define _SIZEOF_ADDR_IFREQ(ifr) \ ((ifr).ifr_addr.sa_len > sizeof (struct sockaddr) ? \ (sizeof (struct ifreq) - sizeof (struct sockaddr) + \ (ifr).ifr_addr.sa_len) : sizeof (struct ifreq)) struct ifaliasreq { - char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - struct sockaddr ifra_addr; - struct sockaddr ifra_broadaddr; - struct sockaddr ifra_mask; + char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct sockaddr ifra_addr; + struct sockaddr ifra_broadaddr; + struct sockaddr ifra_mask; }; struct rslvmulti_req { @@ -583,13 +583,13 @@ struct rslvmulti_req { #pragma pack(4) struct ifmediareq { - char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - int ifm_current; /* current media options */ - int ifm_mask; /* don't care mask */ - int ifm_status; /* media status */ - int ifm_active; /* active options */ - int ifm_count; /* # entries in ifm_ulist array */ - int *ifm_ulist; /* media words */ + char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + int ifm_current; /* current media options */ + int ifm_mask; /* don't care mask */ + int ifm_status; /* media status */ + int ifm_active; /* active options */ + int ifm_count; /* # entries in ifm_ulist array */ + int *ifm_ulist; /* media words */ }; #pragma pack() @@ -598,23 +598,23 @@ struct ifmediareq { #ifdef KERNEL_PRIVATE #pragma pack(4) struct ifmediareq64 { - char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - int ifm_current; /* current media options */ - int ifm_mask; /* don't care mask */ - int ifm_status; /* media status */ - int ifm_active; /* active options */ - int ifm_count; /* # entries in ifm_ulist array */ + char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + int ifm_current; /* current media options */ + int ifm_mask; /* don't care mask */ + int ifm_status; /* media status */ + int ifm_active; /* active options */ + int ifm_count; /* # entries in ifm_ulist array */ user64_addr_t ifmu_ulist __attribute__((aligned(8))); }; struct ifmediareq32 { - char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - int ifm_current; /* current media options */ - int ifm_mask; /* don't care mask */ - int ifm_status; /* media status */ - int ifm_active; /* active options */ - int ifm_count; /* # entries in ifm_ulist array */ - user32_addr_t ifmu_ulist; /* 32-bit pointer */ + char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + int ifm_current; /* current media options */ + int ifm_mask; /* don't care mask */ + int ifm_status; /* media status */ + int ifm_active; /* active options */ + int ifm_count; /* # entries in ifm_ulist array */ + user32_addr_t ifmu_ulist; /* 32-bit pointer */ }; #pragma pack() #endif /* KERNEL_PRIVATE */ @@ -622,27 +622,27 @@ struct ifmediareq32 { #pragma pack(4) struct ifdrv { - char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - unsigned long ifd_cmd; - size_t ifd_len; /* length of ifd_data buffer */ - void *ifd_data; + char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + unsigned long ifd_cmd; + size_t ifd_len; /* length of ifd_data buffer */ + void *ifd_data; }; #pragma pack() #ifdef KERNEL_PRIVATE #pragma pack(4) struct ifdrv32 { - char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - u_int32_t ifd_cmd; - u_int32_t ifd_len; - user32_addr_t ifd_data; + char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + u_int32_t ifd_cmd; + u_int32_t ifd_len; + user32_addr_t ifd_data; }; struct ifdrv64 { - char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - u_int64_t ifd_cmd; - u_int64_t ifd_len; - user64_addr_t ifd_data; + char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + u_int64_t ifd_cmd; + u_int64_t ifd_len; + user64_addr_t ifd_data; }; #pragma pack() #endif /* KERNEL_PRIVATE */ @@ -654,10 +654,10 @@ struct ifdrv64 { * a newline. */ -#define IFSTATMAX 800 /* 10 lines of text */ +#define IFSTATMAX 800 /* 10 lines of text */ struct ifstat { - char ifs_name[IFNAMSIZ]; /* if name, e.g. "en0" */ - char ascii[IFSTATMAX + 1]; + char ifs_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + char ascii[IFSTATMAX + 1]; }; #if !defined(KERNEL) || defined(KERNEL_PRIVATE) @@ -668,31 +668,31 @@ struct ifstat { * must know all networks accessible). */ #pragma pack(4) -struct ifconf { - int ifc_len; /* size of associated buffer */ +struct ifconf { + int ifc_len; /* size of associated buffer */ union { - caddr_t ifcu_buf; - struct ifreq *ifcu_req; + caddr_t ifcu_buf; + struct ifreq *ifcu_req; } ifc_ifcu; }; #pragma pack() -#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ -#define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */ +#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ +#define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */ #endif /* !KERNEL || KERNEL_PRIVATE */ #if defined(KERNEL_PRIVATE) #pragma pack(4) struct ifconf32 { - int ifc_len; /* size of associated buffer */ + int ifc_len; /* size of associated buffer */ struct { user32_addr_t ifcu_req; } ifc_ifcu; }; struct ifconf64 { - int ifc_len; /* size of associated buffer */ + int ifc_len; /* size of associated buffer */ struct { - user64_addr_t ifcu_req __attribute__((aligned(8))); + user64_addr_t ifcu_req __attribute__((aligned(8))); } ifc_ifcu; }; #pragma pack() @@ -702,9 +702,9 @@ struct ifconf64 { * DLIL KEV_DL_PROTO_ATTACHED/DETACHED structure */ struct kev_dl_proto_data { - struct net_event_data link_data; - u_int32_t proto_family; - u_int32_t proto_remaining_count; + struct net_event_data link_data; + u_int32_t proto_family; + u_int32_t proto_remaining_count; }; #ifdef PRIVATE @@ -718,37 +718,37 @@ struct kev_dl_proto_data { * IFNET_LQM_THRESH_GOOD Link quality is considered good by driver. */ enum { - IFNET_LQM_THRESH_OFF = (-2), - IFNET_LQM_THRESH_UNKNOWN = (-1), - IFNET_LQM_THRESH_ABORT = 10, + IFNET_LQM_THRESH_OFF = (-2), + IFNET_LQM_THRESH_UNKNOWN = (-1), + IFNET_LQM_THRESH_ABORT = 10, IFNET_LQM_THRESH_MINIMALLY_VIABLE = 20, - IFNET_LQM_THRESH_POOR = 50, - IFNET_LQM_THRESH_GOOD = 100 + IFNET_LQM_THRESH_POOR = 50, + IFNET_LQM_THRESH_GOOD = 100 }; -#define IFNET_LQM_THRESH_BAD IFNET_LQM_THRESH_ABORT +#define IFNET_LQM_THRESH_BAD IFNET_LQM_THRESH_ABORT #ifdef XNU_KERNEL_PRIVATE -#define IFNET_LQM_MIN IFNET_LQM_THRESH_OFF -#define IFNET_LQM_MAX IFNET_LQM_THRESH_GOOD +#define IFNET_LQM_MIN IFNET_LQM_THRESH_OFF +#define IFNET_LQM_MAX IFNET_LQM_THRESH_GOOD #endif /* XNU_KERNEL_PRIVATE */ /* * DLIL KEV_DL_LINK_QUALITY_METRIC_CHANGED structure */ struct kev_dl_link_quality_metric_data { - struct net_event_data link_data; - int link_quality_metric; + struct net_event_data link_data; + int link_quality_metric; }; -#define IF_DESCSIZE 128 +#define IF_DESCSIZE 128 /* * Structure for SIOC[SG]IFDESC */ struct if_descreq { - char ifdr_name[IFNAMSIZ]; /* interface name */ - u_int32_t ifdr_len; /* up to IF_DESCSIZE */ - u_int8_t ifdr_desc[IF_DESCSIZE]; /* opaque data */ + char ifdr_name[IFNAMSIZ]; /* interface name */ + u_int32_t ifdr_len; /* up to IF_DESCSIZE */ + u_int8_t ifdr_desc[IF_DESCSIZE]; /* opaque data */ }; /* @@ -766,30 +766,30 @@ struct if_descreq { * corresponding service classes. */ enum { - IFNET_SCHED_MODEL_NORMAL = 0, - IFNET_SCHED_MODEL_DRIVER_MANAGED = 1, - IFNET_SCHED_MODEL_FQ_CODEL = 2, + IFNET_SCHED_MODEL_NORMAL = 0, + IFNET_SCHED_MODEL_DRIVER_MANAGED = 1, + IFNET_SCHED_MODEL_FQ_CODEL = 2, #ifdef XNU_KERNEL_PRIVATE - IFNET_SCHED_MODEL_MAX = 3, + IFNET_SCHED_MODEL_MAX = 3, #endif /* XNU_KERNEL_PRIVATE */ }; /* * Values for iflpr_flags */ -#define IFLPRF_ALTQ 0x1 /* configured via PF/ALTQ */ -#define IFLPRF_DRVMANAGED 0x2 /* output queue scheduled by drv */ +#define IFLPRF_ALTQ 0x1 /* configured via PF/ALTQ */ +#define IFLPRF_DRVMANAGED 0x2 /* output queue scheduled by drv */ /* * Structure for SIOCGIFLINKPARAMS */ struct if_linkparamsreq { - char iflpr_name[IFNAMSIZ]; /* interface name */ - u_int32_t iflpr_flags; - u_int32_t iflpr_output_sched; - u_int64_t iflpr_output_tbr_rate; - u_int32_t iflpr_output_tbr_percent; - u_int64_t iflpr_input_tbr_rate; + char iflpr_name[IFNAMSIZ]; /* interface name */ + u_int32_t iflpr_flags; + u_int32_t iflpr_output_sched; + u_int64_t iflpr_output_tbr_rate; + u_int32_t iflpr_output_tbr_percent; + u_int64_t iflpr_input_tbr_rate; struct if_bandwidths iflpr_output_bw; struct if_bandwidths iflpr_input_bw; struct if_latencies iflpr_output_lt; @@ -800,20 +800,20 @@ struct if_linkparamsreq { * Structure for SIOCGIFQUEUESTATS */ struct if_qstatsreq { - char ifqr_name[IFNAMSIZ]; /* interface name */ - u_int32_t ifqr_slot; - void *ifqr_buf __attribute__((aligned(8))); - int ifqr_len __attribute__((aligned(8))); + char ifqr_name[IFNAMSIZ]; /* interface name */ + u_int32_t ifqr_slot; + void *ifqr_buf __attribute__((aligned(8))); + int ifqr_len __attribute__((aligned(8))); }; /* * Node Proximity Metrics */ enum { - IFNET_NPM_THRESH_UNKNOWN = (-1), - IFNET_NPM_THRESH_NEAR = 30, - IFNET_NPM_THRESH_GENERAL = 70, - IFNET_NPM_THRESH_FAR = 100, + IFNET_NPM_THRESH_UNKNOWN = (-1), + IFNET_NPM_THRESH_NEAR = 30, + IFNET_NPM_THRESH_GENERAL = 70, + IFNET_NPM_THRESH_FAR = 100, }; /* @@ -822,7 +822,7 @@ enum { * IFNET_RSSI_UNKNOWN Metric is not (yet) known. */ enum { - IFNET_RSSI_UNKNOWN = ((-2147483647)-1), /* INT32_MIN */ + IFNET_RSSI_UNKNOWN = ((-2147483647) - 1), /* INT32_MIN */ }; @@ -831,26 +831,26 @@ enum { */ struct kev_dl_node_presence { struct net_event_data link_data; - struct sockaddr_in6 sin6_node_address; - struct sockaddr_dl sdl_node_address; - int32_t rssi; - int link_quality_metric; - int node_proximity_metric; - u_int8_t node_service_info[48]; + struct sockaddr_in6 sin6_node_address; + struct sockaddr_dl sdl_node_address; + int32_t rssi; + int link_quality_metric; + int node_proximity_metric; + u_int8_t node_service_info[48]; }; struct kev_dl_node_absence { struct net_event_data link_data; - struct sockaddr_in6 sin6_node_address; - struct sockaddr_dl sdl_node_address; + struct sockaddr_in6 sin6_node_address; + struct sockaddr_dl sdl_node_address; }; /* * Structure for SIOC[SG]IFTHROTTLE */ struct if_throttlereq { - char ifthr_name[IFNAMSIZ]; /* interface name */ - u_int32_t ifthr_level; + char ifthr_name[IFNAMSIZ]; /* interface name */ + u_int32_t ifthr_level; }; /* @@ -868,10 +868,10 @@ struct if_throttlereq { * are explicitly flushed. */ enum { - IFNET_THROTTLE_OFF = 0, - IFNET_THROTTLE_OPPORTUNISTIC = 1, + IFNET_THROTTLE_OFF = 0, + IFNET_THROTTLE_OPPORTUNISTIC = 1, #ifdef XNU_KERNEL_PRIVATE - IFNET_THROTTLE_MAX = 2, + IFNET_THROTTLE_MAX = 2, #endif /* XNU_KERNEL_PRIVATE */ }; @@ -879,28 +879,28 @@ enum { * Structure for SIOC[A/D]IFAGENTID */ struct if_agentidreq { - char ifar_name[IFNAMSIZ]; /* interface name */ - uuid_t ifar_uuid; /* agent UUID to add or delete */ + char ifar_name[IFNAMSIZ]; /* interface name */ + uuid_t ifar_uuid; /* agent UUID to add or delete */ }; /* * Structure for SIOCGIFAGENTIDS */ struct if_agentidsreq { - char ifar_name[IFNAMSIZ]; /* interface name */ - u_int32_t ifar_count; /* number of agent UUIDs */ - uuid_t *ifar_uuids; /* array of agent UUIDs */ + char ifar_name[IFNAMSIZ]; /* interface name */ + u_int32_t ifar_count; /* number of agent UUIDs */ + uuid_t *ifar_uuids; /* array of agent UUIDs */ }; #ifdef BSD_KERNEL_PRIVATE struct if_agentidsreq32 { - char ifar_name[IFNAMSIZ]; - u_int32_t ifar_count; + char ifar_name[IFNAMSIZ]; + u_int32_t ifar_count; user32_addr_t ifar_uuids; }; struct if_agentidsreq64 { - char ifar_name[IFNAMSIZ]; - u_int32_t ifar_count; + char ifar_name[IFNAMSIZ]; + u_int32_t ifar_count; user64_addr_t ifar_uuids __attribute__((aligned(8))); }; #endif /* BSD_KERNEL_PRIVATE */ @@ -909,32 +909,32 @@ struct if_agentidsreq64 { * Structure for SIOCGIFNEXUS */ struct if_nexusreq { - char ifnr_name[IFNAMSIZ]; /* interface name */ - uint64_t ifnr_flags; /* unused, must be zero */ - uuid_t ifnr_netif; /* netif nexus instance UUID */ - uuid_t ifnr_multistack; /* multistack nexus UUID */ - uint64_t ifnr_reserved[5]; + char ifnr_name[IFNAMSIZ]; /* interface name */ + uint64_t ifnr_flags; /* unused, must be zero */ + uuid_t ifnr_netif; /* netif nexus instance UUID */ + uuid_t ifnr_multistack; /* multistack nexus UUID */ + uint64_t ifnr_reserved[5]; }; -#define DLIL_MODIDLEN 20 /* same as IFNET_MODIDLEN */ -#define DLIL_MODARGLEN 12 /* same as IFNET_MODARGLEN */ +#define DLIL_MODIDLEN 20 /* same as IFNET_MODIDLEN */ +#define DLIL_MODARGLEN 12 /* same as IFNET_MODARGLEN */ /* * DLIL KEV_DL_ISSUES event structure */ struct kev_dl_issues { struct net_event_data link_data; - u_int8_t modid[DLIL_MODIDLEN]; - u_int64_t timestamp; - u_int8_t info[DLIL_MODARGLEN]; + u_int8_t modid[DLIL_MODIDLEN]; + u_int64_t timestamp; + u_int8_t info[DLIL_MODARGLEN]; }; /* * DLIL KEV_DL_RRC_STATE_CHANGED structure */ struct kev_dl_rrc_state { - struct net_event_data link_data; - u_int32_t rrc_state; + struct net_event_data link_data; + u_int32_t rrc_state; }; /* @@ -942,56 +942,56 @@ struct kev_dl_rrc_state { */ struct kev_dl_low_power_mode { struct net_event_data link_data; - int low_power_event; + int low_power_event; }; /* * Length of network signature/fingerprint blob. */ -#define IFNET_SIGNATURELEN 20 +#define IFNET_SIGNATURELEN 20 /* * Structure for SIOC[S/G]IFNETSIGNATURE */ struct if_nsreq { - char ifnsr_name[IFNAMSIZ]; - u_int8_t ifnsr_family; /* address family */ - u_int8_t ifnsr_len; /* data length */ - u_int16_t ifnsr_flags; /* for future */ - u_int8_t ifnsr_data[IFNET_SIGNATURELEN]; + char ifnsr_name[IFNAMSIZ]; + u_int8_t ifnsr_family; /* address family */ + u_int8_t ifnsr_len; /* data length */ + u_int16_t ifnsr_flags; /* for future */ + u_int8_t ifnsr_data[IFNET_SIGNATURELEN]; }; -#define NAT64_PREFIX_LEN_32 4 -#define NAT64_PREFIX_LEN_40 5 -#define NAT64_PREFIX_LEN_48 6 -#define NAT64_PREFIX_LEN_56 7 -#define NAT64_PREFIX_LEN_64 8 -#define NAT64_PREFIX_LEN_96 12 -#define NAT64_PREFIX_LEN_MAX NAT64_PREFIX_LEN_96 +#define NAT64_PREFIX_LEN_32 4 +#define NAT64_PREFIX_LEN_40 5 +#define NAT64_PREFIX_LEN_48 6 +#define NAT64_PREFIX_LEN_56 7 +#define NAT64_PREFIX_LEN_64 8 +#define NAT64_PREFIX_LEN_96 12 +#define NAT64_PREFIX_LEN_MAX NAT64_PREFIX_LEN_96 -#define NAT64_MAX_NUM_PREFIXES 4 +#define NAT64_MAX_NUM_PREFIXES 4 struct ipv6_prefix { - struct in6_addr ipv6_prefix; - uint32_t prefix_len; + struct in6_addr ipv6_prefix; + uint32_t prefix_len; }; struct if_ipv6_address { - struct in6_addr v6_address; - uint32_t v6_prefixlen; + struct in6_addr v6_address; + uint32_t v6_prefixlen; }; /* Structure for SIOC[S/G]IFNAT64PREFIX */ struct if_nat64req { - char ifnat64_name[IFNAMSIZ]; - struct ipv6_prefix ifnat64_prefixes[NAT64_MAX_NUM_PREFIXES]; + char ifnat64_name[IFNAMSIZ]; + struct ipv6_prefix ifnat64_prefixes[NAT64_MAX_NUM_PREFIXES]; }; /* Structure for SIOCGIFCLAT46ADDR */ struct if_clat46req { - char ifclat46_name[IFNAMSIZ]; - struct if_ipv6_address ifclat46_addr; + char ifclat46_name[IFNAMSIZ]; + struct if_ipv6_address ifclat46_addr; }; /* @@ -1006,31 +1006,31 @@ struct if_clat46req { * of indices copied into the array. */ struct if_order { - u_int32_t ifo_count; - u_int32_t ifo_reserved; - mach_vm_address_t ifo_ordered_indices; /* array of u_int32_t */ + u_int32_t ifo_count; + u_int32_t ifo_reserved; + mach_vm_address_t ifo_ordered_indices; /* array of u_int32_t */ }; /* * Struct for traffic class to DSCP mapping */ struct if_tdmreq { - char iftdm_name[IFNAMSIZ]; - u_int32_t iftdm_len; /* byte length of the table */ - struct netsvctype_dscp_map *iftdm_table; + char iftdm_name[IFNAMSIZ]; + u_int32_t iftdm_len; /* byte length of the table */ + struct netsvctype_dscp_map *iftdm_table; }; #ifdef BSD_KERNEL_PRIVATE struct if_tdmreq32 { - char iftdm_name[IFNAMSIZ]; - u_int32_t iftdm_len; /* byte length of the table */ - user32_addr_t iftdm_table; + char iftdm_name[IFNAMSIZ]; + u_int32_t iftdm_len; /* byte length of the table */ + user32_addr_t iftdm_table; }; struct if_tdmreq64 { - char iftdm_name[IFNAMSIZ]; - u_int32_t iftdm_len; /* byte length of the table */ - user64_addr_t iftdm_table __attribute__((aligned(8))); + char iftdm_name[IFNAMSIZ]; + u_int32_t iftdm_len; /* byte length of the table */ + user64_addr_t iftdm_table __attribute__((aligned(8))); }; #endif @@ -1038,25 +1038,25 @@ struct if_tdmreq64 { * Structure for SIOCGIFPROTOLIST. */ struct if_protolistreq { - char ifpl_name[IFNAMSIZ]; - u_int32_t ifpl_count; - u_int32_t ifpl_reserved; /* must be zero */ - u_int32_t *ifpl_list; + char ifpl_name[IFNAMSIZ]; + u_int32_t ifpl_count; + u_int32_t ifpl_reserved; /* must be zero */ + u_int32_t *ifpl_list; }; #ifdef BSD_KERNEL_PRIVATE struct if_protolistreq32 { - char ifpl_name[IFNAMSIZ]; - u_int32_t ifpl_count; - u_int32_t ifpl_reserved; /* must be zero */ - user32_addr_t ifpl_list; + char ifpl_name[IFNAMSIZ]; + u_int32_t ifpl_count; + u_int32_t ifpl_reserved; /* must be zero */ + user32_addr_t ifpl_list; }; struct if_protolistreq64 { - char ifpl_name[IFNAMSIZ]; - u_int32_t ifpl_count; - u_int32_t ifpl_reserved; /* must be zero */ - user64_addr_t ifpl_list; + char ifpl_name[IFNAMSIZ]; + u_int32_t ifpl_count; + u_int32_t ifpl_reserved; /* must be zero */ + user64_addr_t ifpl_list; }; #endif /* BSD_KERNEL_PRIVATE */ @@ -1072,15 +1072,15 @@ MALLOC_DECLARE(M_IFMADDR); #ifndef KERNEL struct if_nameindex { - unsigned int if_index; /* 1, 2, ... */ - char *if_name; /* null terminated name: "le0", ... */ + unsigned int if_index; /* 1, 2, ... */ + char *if_name; /* null terminated name: "le0", ... */ }; __BEGIN_DECLS -unsigned int if_nametoindex(const char *); -char *if_indextoname(unsigned int, char *); -struct if_nameindex *if_nameindex(void); -void if_freenameindex(struct if_nameindex *); +unsigned int if_nametoindex(const char *); +char *if_indextoname(unsigned int, char *); +struct if_nameindex *if_nameindex(void); +void if_freenameindex(struct if_nameindex *); __END_DECLS #endif diff --git a/bsd/net/if_arp.h b/bsd/net/if_arp.h index 45bb05088..d4eff6324 100644 --- a/bsd/net/if_arp.h +++ b/bsd/net/if_arp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -62,7 +62,7 @@ */ #ifndef _NET_IF_ARP_H_ -#define _NET_IF_ARP_H_ +#define _NET_IF_ARP_H_ #include #include #include @@ -77,32 +77,32 @@ * arp_tha and arp_tpa in that order, according to the lengths * specified. Field names used correspond to RFC 826. */ -struct arphdr { - u_short ar_hrd; /* format of hardware address */ -#define ARPHRD_ETHER 1 /* ethernet hardware format */ -#define ARPHRD_IEEE802 6 /* token-ring hardware format */ -#define ARPHRD_FRELAY 15 /* frame relay hardware format */ -#define ARPHRD_IEEE1394 24 /* IEEE1394 hardware address */ +struct arphdr { + u_short ar_hrd; /* format of hardware address */ +#define ARPHRD_ETHER 1 /* ethernet hardware format */ +#define ARPHRD_IEEE802 6 /* token-ring hardware format */ +#define ARPHRD_FRELAY 15 /* frame relay hardware format */ +#define ARPHRD_IEEE1394 24 /* IEEE1394 hardware address */ #define ARPHRD_IEEE1394_EUI64 27 /* IEEE1394 EUI-64 */ - u_short ar_pro; /* format of protocol address */ - u_char ar_hln; /* length of hardware address */ - u_char ar_pln; /* length of protocol address */ - u_short ar_op; /* one of: */ -#define ARPOP_REQUEST 1 /* request to resolve address */ -#define ARPOP_REPLY 2 /* response to previous request */ -#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */ -#define ARPOP_REVREPLY 4 /* response giving protocol address */ -#define ARPOP_INVREQUEST 8 /* request to identify peer */ -#define ARPOP_INVREPLY 9 /* response identifying peer */ + u_short ar_pro; /* format of protocol address */ + u_char ar_hln; /* length of hardware address */ + u_char ar_pln; /* length of protocol address */ + u_short ar_op; /* one of: */ +#define ARPOP_REQUEST 1 /* request to resolve address */ +#define ARPOP_REPLY 2 /* response to previous request */ +#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */ +#define ARPOP_REVREPLY 4 /* response giving protocol address */ +#define ARPOP_INVREQUEST 8 /* request to identify peer */ +#define ARPOP_INVREPLY 9 /* response identifying peer */ /* * The remaining fields are variable in size, * according to the sizes above. */ #ifdef COMMENT_ONLY - u_char ar_sha[]; /* sender hardware address */ - u_char ar_spa[]; /* sender protocol address */ - u_char ar_tha[]; /* target hardware address */ - u_char ar_tpa[]; /* target protocol address */ + u_char ar_sha[]; /* sender hardware address */ + u_char ar_spa[]; /* sender protocol address */ + u_char ar_tha[]; /* target hardware address */ + u_char ar_tpa[]; /* target protocol address */ #endif }; @@ -110,40 +110,40 @@ struct arphdr { * ARP ioctl request */ struct arpreq { - struct sockaddr arp_pa; /* protocol address */ - struct sockaddr arp_ha; /* hardware address */ - int arp_flags; /* flags */ + struct sockaddr arp_pa; /* protocol address */ + struct sockaddr arp_ha; /* hardware address */ + int arp_flags; /* flags */ }; /* arp_flags and at_flags field values */ -#define ATF_INUSE 0x01 /* entry in use */ -#define ATF_COM 0x02 /* completed entry (enaddr valid) */ -#define ATF_PERM 0x04 /* permanent entry */ -#define ATF_PUBL 0x08 /* publish entry (respond for other host) */ -#define ATF_USETRAILERS 0x10 /* has requested trailers */ +#define ATF_INUSE 0x01 /* entry in use */ +#define ATF_COM 0x02 /* completed entry (enaddr valid) */ +#define ATF_PERM 0x04 /* permanent entry */ +#define ATF_PUBL 0x08 /* publish entry (respond for other host) */ +#define ATF_USETRAILERS 0x10 /* has requested trailers */ struct arpstat { /* Normal things that happen: */ - uint32_t txrequests; /* # of ARP requests sent by this host. */ - uint32_t txreplies; /* # of ARP replies sent by this host. */ - uint32_t txannounces; /* # of ARP announcements sent by this host. */ - uint32_t rxrequests; /* # of ARP requests received by this host. */ - uint32_t rxreplies; /* # of ARP replies received by this host. */ - uint32_t received; /* # of ARP packets received by this host. */ + uint32_t txrequests; /* # of ARP requests sent by this host. */ + uint32_t txreplies; /* # of ARP replies sent by this host. */ + uint32_t txannounces; /* # of ARP announcements sent by this host. */ + uint32_t rxrequests; /* # of ARP requests received by this host. */ + uint32_t rxreplies; /* # of ARP replies received by this host. */ + uint32_t received; /* # of ARP packets received by this host. */ /* Abnormal event and error counting: */ - uint32_t txconflicts; /* # of ARP conflict probes sent */ - uint32_t invalidreqs; /* # of invalid ARP resolve requests */ - uint32_t reqnobufs; /* # of failed requests due to no memory */ - uint32_t dropped; /* # of packets dropped waiting for a reply. */ - uint32_t purged; /* # of packets purged while removing entries */ - uint32_t timeouts; /* # of times with entries removed */ - /* due to timeout. */ - uint32_t dupips; /* # of duplicate IPs detected. */ + uint32_t txconflicts; /* # of ARP conflict probes sent */ + uint32_t invalidreqs; /* # of invalid ARP resolve requests */ + uint32_t reqnobufs; /* # of failed requests due to no memory */ + uint32_t dropped; /* # of packets dropped waiting for a reply. */ + uint32_t purged; /* # of packets purged while removing entries */ + uint32_t timeouts; /* # of times with entries removed */ + /* due to timeout. */ + uint32_t dupips; /* # of duplicate IPs detected. */ /* General statistics */ - uint32_t inuse; /* # of ARP entries in routing table */ - uint32_t txurequests; /* # of ARP requests sent (unicast) */ - uint32_t held; /* # of packets held waiting for a reply */ + uint32_t inuse; /* # of ARP entries in routing table */ + uint32_t txurequests; /* # of ARP requests sent (unicast) */ + uint32_t held; /* # of packets held waiting for a reply */ }; #ifdef BSD_KERNEL_PRIVATE diff --git a/bsd/net/if_bond.c b/bsd/net/if_bond.c index 8682a9fe5..116148967 100644 --- a/bsd/net/if_bond.c +++ b/bsd/net/if_bond.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2004-2018 Apple Inc. All rights reserved. + * Copyright (c) 2004-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * if_bond.c + * if_bond.c * - bond/failover interface * - implements IEEE 802.3ad Link Aggregation */ @@ -68,7 +68,7 @@ #include #include -#include +#include #include #include @@ -80,98 +80,98 @@ #include static struct ether_addr slow_proto_multicast = { - IEEE8023AD_SLOW_PROTO_MULTICAST + IEEE8023AD_SLOW_PROTO_MULTICAST }; typedef struct ifbond_s ifbond, * ifbond_ref; typedef struct bondport_s bondport, * bondport_ref; -#define BOND_MAXUNIT 128 -#define BOND_ZONE_MAX_ELEM MIN(IFNETS_MAX, BOND_MAXUNIT) -#define BONDNAME "bond" +#define BOND_MAXUNIT 128 +#define BOND_ZONE_MAX_ELEM MIN(IFNETS_MAX, BOND_MAXUNIT) +#define BONDNAME "bond" -#define M_BOND M_DEVBUF +#define M_BOND M_DEVBUF -#define EA_FORMAT "%x:%x:%x:%x:%x:%x" -#define EA_CH(e, i) ((u_char)((u_char *)(e))[(i)]) -#define EA_LIST(ea) EA_CH(ea,0),EA_CH(ea,1),EA_CH(ea,2),EA_CH(ea,3),EA_CH(ea,4),EA_CH(ea,5) +#define EA_FORMAT "%x:%x:%x:%x:%x:%x" +#define EA_CH(e, i) ((u_char)((u_char *)(e))[(i)]) +#define EA_LIST(ea) EA_CH(ea,0),EA_CH(ea,1),EA_CH(ea,2),EA_CH(ea,3),EA_CH(ea,4),EA_CH(ea,5) -#define timestamp_printf printf +#define timestamp_printf printf /** - ** bond locks - **/ +** bond locks +**/ static __inline__ lck_grp_t * my_lck_grp_alloc_init(const char * grp_name) { - lck_grp_t * grp; - lck_grp_attr_t * grp_attrs; - - grp_attrs = lck_grp_attr_alloc_init(); - grp = lck_grp_alloc_init(grp_name, grp_attrs); - lck_grp_attr_free(grp_attrs); - return (grp); + lck_grp_t * grp; + lck_grp_attr_t * grp_attrs; + + grp_attrs = lck_grp_attr_alloc_init(); + grp = lck_grp_alloc_init(grp_name, grp_attrs); + lck_grp_attr_free(grp_attrs); + return grp; } static __inline__ lck_mtx_t * my_lck_mtx_alloc_init(lck_grp_t * lck_grp) { - lck_attr_t * lck_attrs; - lck_mtx_t * lck_mtx; + lck_attr_t * lck_attrs; + lck_mtx_t * lck_mtx; - lck_attrs = lck_attr_alloc_init(); - lck_mtx = lck_mtx_alloc_init(lck_grp, lck_attrs); - lck_attr_free(lck_attrs); - return (lck_mtx); + lck_attrs = lck_attr_alloc_init(); + lck_mtx = lck_mtx_alloc_init(lck_grp, lck_attrs); + lck_attr_free(lck_attrs); + return lck_mtx; } -static lck_mtx_t * bond_lck_mtx; +static lck_mtx_t * bond_lck_mtx; static __inline__ void bond_lock_init(void) { - lck_grp_t * bond_lck_grp; + lck_grp_t * bond_lck_grp; - bond_lck_grp = my_lck_grp_alloc_init("if_bond"); - bond_lck_mtx = my_lck_mtx_alloc_init(bond_lck_grp); + bond_lck_grp = my_lck_grp_alloc_init("if_bond"); + bond_lck_mtx = my_lck_mtx_alloc_init(bond_lck_grp); } static __inline__ void bond_assert_lock_held(void) { - LCK_MTX_ASSERT(bond_lck_mtx, LCK_MTX_ASSERT_OWNED); - return; + LCK_MTX_ASSERT(bond_lck_mtx, LCK_MTX_ASSERT_OWNED); + return; } static __inline__ void bond_assert_lock_not_held(void) { - LCK_MTX_ASSERT(bond_lck_mtx, LCK_MTX_ASSERT_NOTOWNED); - return; + LCK_MTX_ASSERT(bond_lck_mtx, LCK_MTX_ASSERT_NOTOWNED); + return; } static __inline__ void bond_lock(void) { - lck_mtx_lock(bond_lck_mtx); - return; + lck_mtx_lock(bond_lck_mtx); + return; } static __inline__ void bond_unlock(void) { - lck_mtx_unlock(bond_lck_mtx); - return; + lck_mtx_unlock(bond_lck_mtx); + return; } /** - ** bond structures, types - **/ +** bond structures, types +**/ struct LAG_info_s { - lacp_system li_system; - lacp_system_priority li_system_priority; - lacp_key li_key; + lacp_system li_system; + lacp_system_priority li_system_priority; + lacp_key li_key; }; typedef struct LAG_info_s LAG_info, * LAG_info_ref; @@ -186,368 +186,368 @@ typedef struct ifbond_s ifbond, * ifbond_ref; typedef struct bondport_s bondport, * bondport_ref; struct LAG_s { - TAILQ_ENTRY(LAG_s) lag_list; - struct port_list lag_port_list; - short lag_port_count; - short lag_selected_port_count; - int lag_active_media; - LAG_info lag_info; + TAILQ_ENTRY(LAG_s) lag_list; + struct port_list lag_port_list; + short lag_port_count; + short lag_selected_port_count; + int lag_active_media; + LAG_info lag_info; }; typedef struct LAG_s LAG, * LAG_ref; typedef struct partner_state_s { - LAG_info ps_lag_info; - lacp_port ps_port; - lacp_port_priority ps_port_priority; - lacp_actor_partner_state ps_state; + LAG_info ps_lag_info; + lacp_port ps_port; + lacp_port_priority ps_port_priority; + lacp_actor_partner_state ps_state; } partner_state, * partner_state_ref; struct ifbond_s { - TAILQ_ENTRY(ifbond_s) ifb_bond_list; - int ifb_flags; - SInt32 ifb_retain_count; - char ifb_name[IFNAMSIZ]; - struct ifnet * ifb_ifp; - bpf_packet_func ifb_bpf_input; - bpf_packet_func ifb_bpf_output; - int ifb_altmtu; - struct port_list ifb_port_list; - short ifb_port_count; - struct lag_list ifb_lag_list; - lacp_key ifb_key; - short ifb_max_active; /* 0 == unlimited */ - LAG_ref ifb_active_lag; - struct ifmultiaddr * ifb_ifma_slow_proto; - bondport_ref * ifb_distributing_array; - int ifb_distributing_count; - int ifb_last_link_event; - int ifb_mode; /* LACP, STATIC */ + TAILQ_ENTRY(ifbond_s) ifb_bond_list; + int ifb_flags; + struct os_refcnt ifb_retain_count; + char ifb_name[IFNAMSIZ]; + struct ifnet * ifb_ifp; + bpf_packet_func ifb_bpf_input; + bpf_packet_func ifb_bpf_output; + int ifb_altmtu; + struct port_list ifb_port_list; + short ifb_port_count; + struct lag_list ifb_lag_list; + lacp_key ifb_key; + short ifb_max_active;/* 0 == unlimited */ + LAG_ref ifb_active_lag; + struct ifmultiaddr * ifb_ifma_slow_proto; + bondport_ref * ifb_distributing_array; + int ifb_distributing_count; + int ifb_last_link_event; + int ifb_mode;/* LACP, STATIC */ }; struct media_info { - int mi_active; - int mi_status; + int mi_active; + int mi_status; }; enum { - ReceiveState_none = 0, - ReceiveState_INITIALIZE = 1, - ReceiveState_PORT_DISABLED = 2, - ReceiveState_EXPIRED = 3, - ReceiveState_LACP_DISABLED = 4, - ReceiveState_DEFAULTED = 5, - ReceiveState_CURRENT = 6, + ReceiveState_none = 0, + ReceiveState_INITIALIZE = 1, + ReceiveState_PORT_DISABLED = 2, + ReceiveState_EXPIRED = 3, + ReceiveState_LACP_DISABLED = 4, + ReceiveState_DEFAULTED = 5, + ReceiveState_CURRENT = 6, }; typedef u_char ReceiveState; enum { - SelectedState_UNSELECTED = IF_BOND_STATUS_SELECTED_STATE_UNSELECTED, - SelectedState_SELECTED = IF_BOND_STATUS_SELECTED_STATE_SELECTED, - SelectedState_STANDBY = IF_BOND_STATUS_SELECTED_STATE_STANDBY + SelectedState_UNSELECTED = IF_BOND_STATUS_SELECTED_STATE_UNSELECTED, + SelectedState_SELECTED = IF_BOND_STATUS_SELECTED_STATE_SELECTED, + SelectedState_STANDBY = IF_BOND_STATUS_SELECTED_STATE_STANDBY }; typedef u_char SelectedState; static __inline__ const char * SelectedStateString(SelectedState s) { - static const char * names[] = { "UNSELECTED", "SELECTED", "STANDBY" }; + static const char * names[] = { "UNSELECTED", "SELECTED", "STANDBY" }; - if (s <= SelectedState_STANDBY) { - return (names[s]); - } - return (""); + if (s <= SelectedState_STANDBY) { + return names[s]; + } + return ""; } enum { - MuxState_none = 0, - MuxState_DETACHED = 1, - MuxState_WAITING = 2, - MuxState_ATTACHED = 3, - MuxState_COLLECTING_DISTRIBUTING = 4, + MuxState_none = 0, + MuxState_DETACHED = 1, + MuxState_WAITING = 2, + MuxState_ATTACHED = 3, + MuxState_COLLECTING_DISTRIBUTING = 4, }; typedef u_char MuxState; struct bondport_s { - TAILQ_ENTRY(bondport_s) po_port_list; - ifbond_ref po_bond; - struct multicast_list po_multicast; - struct ifnet * po_ifp; - struct ether_addr po_saved_addr; - int po_enabled; - char po_name[IFNAMSIZ]; - struct ifdevmtu po_devmtu; - - /* LACP */ - TAILQ_ENTRY(bondport_s) po_lag_port_list; - devtimer_ref po_current_while_timer; - devtimer_ref po_periodic_timer; - devtimer_ref po_wait_while_timer; - devtimer_ref po_transmit_timer; - partner_state po_partner_state; - lacp_port_priority po_priority; - lacp_actor_partner_state po_actor_state; - u_char po_flags; - u_char po_periodic_interval; - u_char po_n_transmit; - ReceiveState po_receive_state; - MuxState po_mux_state; - SelectedState po_selected; - int32_t po_last_transmit_secs; - struct media_info po_media_info; - LAG_ref po_lag; + TAILQ_ENTRY(bondport_s) po_port_list; + ifbond_ref po_bond; + struct multicast_list po_multicast; + struct ifnet * po_ifp; + struct ether_addr po_saved_addr; + int po_enabled; + char po_name[IFNAMSIZ]; + struct ifdevmtu po_devmtu; + + /* LACP */ + TAILQ_ENTRY(bondport_s) po_lag_port_list; + devtimer_ref po_current_while_timer; + devtimer_ref po_periodic_timer; + devtimer_ref po_wait_while_timer; + devtimer_ref po_transmit_timer; + partner_state po_partner_state; + lacp_port_priority po_priority; + lacp_actor_partner_state po_actor_state; + u_char po_flags; + u_char po_periodic_interval; + u_char po_n_transmit; + ReceiveState po_receive_state; + MuxState po_mux_state; + SelectedState po_selected; + int32_t po_last_transmit_secs; + struct media_info po_media_info; + LAG_ref po_lag; }; -#define IFBF_PROMISC 0x1 /* promiscuous mode */ -#define IFBF_IF_DETACHING 0x2 /* interface is detaching */ -#define IFBF_LLADDR 0x4 /* specific link address requested */ -#define IFBF_CHANGE_IN_PROGRESS 0x8 /* interface add/remove in progress */ +#define IFBF_PROMISC 0x1 /* promiscuous mode */ +#define IFBF_IF_DETACHING 0x2 /* interface is detaching */ +#define IFBF_LLADDR 0x4 /* specific link address requested */ +#define IFBF_CHANGE_IN_PROGRESS 0x8 /* interface add/remove in progress */ -static int bond_get_status(ifbond_ref ifb, struct if_bond_req * ibr_p, - user_addr_t datap); +static int bond_get_status(ifbond_ref ifb, struct if_bond_req * ibr_p, + user_addr_t datap); static __inline__ int ifbond_flags_if_detaching(ifbond_ref ifb) { - return ((ifb->ifb_flags & IFBF_IF_DETACHING) != 0); + return (ifb->ifb_flags & IFBF_IF_DETACHING) != 0; } static __inline__ void ifbond_flags_set_if_detaching(ifbond_ref ifb) { - ifb->ifb_flags |= IFBF_IF_DETACHING; - return; + ifb->ifb_flags |= IFBF_IF_DETACHING; + return; } static __inline__ int ifbond_flags_lladdr(ifbond_ref ifb) { - return ((ifb->ifb_flags & IFBF_LLADDR) != 0); + return (ifb->ifb_flags & IFBF_LLADDR) != 0; } static __inline__ int ifbond_flags_change_in_progress(ifbond_ref ifb) { - return ((ifb->ifb_flags & IFBF_CHANGE_IN_PROGRESS) != 0); + return (ifb->ifb_flags & IFBF_CHANGE_IN_PROGRESS) != 0; } static __inline__ void ifbond_flags_set_change_in_progress(ifbond_ref ifb) { - ifb->ifb_flags |= IFBF_CHANGE_IN_PROGRESS; - return; + ifb->ifb_flags |= IFBF_CHANGE_IN_PROGRESS; + return; } static __inline__ void ifbond_flags_clear_change_in_progress(ifbond_ref ifb) { - ifb->ifb_flags &= ~IFBF_CHANGE_IN_PROGRESS; - return; + ifb->ifb_flags &= ~IFBF_CHANGE_IN_PROGRESS; + return; } /* * bondport_ref->po_flags bits */ -#define BONDPORT_FLAGS_NTT 0x01 -#define BONDPORT_FLAGS_READY 0x02 -#define BONDPORT_FLAGS_SELECTED_CHANGED 0x04 -#define BONDPORT_FLAGS_MUX_ATTACHED 0x08 -#define BONDPORT_FLAGS_DISTRIBUTING 0x10 -#define BONDPORT_FLAGS_UNUSED2 0x20 -#define BONDPORT_FLAGS_UNUSED3 0x40 -#define BONDPORT_FLAGS_UNUSED4 0x80 +#define BONDPORT_FLAGS_NTT 0x01 +#define BONDPORT_FLAGS_READY 0x02 +#define BONDPORT_FLAGS_SELECTED_CHANGED 0x04 +#define BONDPORT_FLAGS_MUX_ATTACHED 0x08 +#define BONDPORT_FLAGS_DISTRIBUTING 0x10 +#define BONDPORT_FLAGS_UNUSED2 0x20 +#define BONDPORT_FLAGS_UNUSED3 0x40 +#define BONDPORT_FLAGS_UNUSED4 0x80 static __inline__ void bondport_flags_set_ntt(bondport_ref p) { - p->po_flags |= BONDPORT_FLAGS_NTT; - return; + p->po_flags |= BONDPORT_FLAGS_NTT; + return; } static __inline__ void bondport_flags_clear_ntt(bondport_ref p) { - p->po_flags &= ~BONDPORT_FLAGS_NTT; - return; + p->po_flags &= ~BONDPORT_FLAGS_NTT; + return; } static __inline__ int bondport_flags_ntt(bondport_ref p) { - return ((p->po_flags & BONDPORT_FLAGS_NTT) != 0); + return (p->po_flags & BONDPORT_FLAGS_NTT) != 0; } static __inline__ void bondport_flags_set_ready(bondport_ref p) { - p->po_flags |= BONDPORT_FLAGS_READY; - return; + p->po_flags |= BONDPORT_FLAGS_READY; + return; } static __inline__ void bondport_flags_clear_ready(bondport_ref p) { - p->po_flags &= ~BONDPORT_FLAGS_READY; - return; + p->po_flags &= ~BONDPORT_FLAGS_READY; + return; } static __inline__ int bondport_flags_ready(bondport_ref p) { - return ((p->po_flags & BONDPORT_FLAGS_READY) != 0); + return (p->po_flags & BONDPORT_FLAGS_READY) != 0; } static __inline__ void bondport_flags_set_selected_changed(bondport_ref p) { - p->po_flags |= BONDPORT_FLAGS_SELECTED_CHANGED; - return; + p->po_flags |= BONDPORT_FLAGS_SELECTED_CHANGED; + return; } static __inline__ void bondport_flags_clear_selected_changed(bondport_ref p) { - p->po_flags &= ~BONDPORT_FLAGS_SELECTED_CHANGED; - return; + p->po_flags &= ~BONDPORT_FLAGS_SELECTED_CHANGED; + return; } static __inline__ int bondport_flags_selected_changed(bondport_ref p) { - return ((p->po_flags & BONDPORT_FLAGS_SELECTED_CHANGED) != 0); + return (p->po_flags & BONDPORT_FLAGS_SELECTED_CHANGED) != 0; } static __inline__ void bondport_flags_set_mux_attached(bondport_ref p) { - p->po_flags |= BONDPORT_FLAGS_MUX_ATTACHED; - return; + p->po_flags |= BONDPORT_FLAGS_MUX_ATTACHED; + return; } static __inline__ void bondport_flags_clear_mux_attached(bondport_ref p) { - p->po_flags &= ~BONDPORT_FLAGS_MUX_ATTACHED; - return; + p->po_flags &= ~BONDPORT_FLAGS_MUX_ATTACHED; + return; } static __inline__ int bondport_flags_mux_attached(bondport_ref p) { - return ((p->po_flags & BONDPORT_FLAGS_MUX_ATTACHED) != 0); + return (p->po_flags & BONDPORT_FLAGS_MUX_ATTACHED) != 0; } static __inline__ void bondport_flags_set_distributing(bondport_ref p) { - p->po_flags |= BONDPORT_FLAGS_DISTRIBUTING; - return; + p->po_flags |= BONDPORT_FLAGS_DISTRIBUTING; + return; } static __inline__ void bondport_flags_clear_distributing(bondport_ref p) { - p->po_flags &= ~BONDPORT_FLAGS_DISTRIBUTING; - return; + p->po_flags &= ~BONDPORT_FLAGS_DISTRIBUTING; + return; } static __inline__ int bondport_flags_distributing(bondport_ref p) { - return ((p->po_flags & BONDPORT_FLAGS_DISTRIBUTING) != 0); + return (p->po_flags & BONDPORT_FLAGS_DISTRIBUTING) != 0; } typedef struct bond_globals_s { - struct ifbond_list ifbond_list; - lacp_system system; - lacp_system_priority system_priority; - int verbose; + struct ifbond_list ifbond_list; + lacp_system system; + lacp_system_priority system_priority; + int verbose; } * bond_globals_ref; -static bond_globals_ref g_bond; +static bond_globals_ref g_bond; /** - ** packet_buffer routines - ** - thin wrapper for mbuf - **/ +** packet_buffer routines +** - thin wrapper for mbuf +**/ typedef struct mbuf * packet_buffer_ref; static packet_buffer_ref packet_buffer_allocate(int length) { - packet_buffer_ref m; - int size; - - /* leave room for ethernet header */ - size = length + sizeof(struct ether_header); - if (size > (int)MHLEN) { - if (size > (int)MCLBYTES) { - printf("bond: packet_buffer_allocate size %d > max %u\n", - size, MCLBYTES); - return (NULL); - } - m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); - } else { - m = m_gethdr(M_WAITOK, MT_DATA); - } - if (m == NULL) { - return (NULL); - } - m->m_len = size; - m->m_pkthdr.len = size; - return (m); + packet_buffer_ref m; + int size; + + /* leave room for ethernet header */ + size = length + sizeof(struct ether_header); + if (size > (int)MHLEN) { + if (size > (int)MCLBYTES) { + printf("bond: packet_buffer_allocate size %d > max %u\n", + size, MCLBYTES); + return NULL; + } + m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); + } else { + m = m_gethdr(M_WAITOK, MT_DATA); + } + if (m == NULL) { + return NULL; + } + m->m_len = size; + m->m_pkthdr.len = size; + return m; } static void * packet_buffer_byteptr(packet_buffer_ref buf) { - return (buf->m_data + sizeof(struct ether_header)); + return buf->m_data + sizeof(struct ether_header); } typedef enum { - LAEventStart, - LAEventTimeout, - LAEventPacket, - LAEventMediaChange, - LAEventSelectedChange, - LAEventPortMoved, - LAEventReady + LAEventStart, + LAEventTimeout, + LAEventPacket, + LAEventMediaChange, + LAEventSelectedChange, + LAEventPortMoved, + LAEventReady } LAEvent; /** - ** Receive machine - **/ +** Receive machine +**/ static void bondport_receive_machine(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); /** - ** Periodic Transmission machine - **/ +** Periodic Transmission machine +**/ static void bondport_periodic_transmit_machine(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); /** - ** Transmit machine - **/ -#define TRANSMIT_MACHINE_TX_IMMEDIATE ((void *)1) +** Transmit machine +**/ +#define TRANSMIT_MACHINE_TX_IMMEDIATE ((void *)1) static void bondport_transmit_machine(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); /** - ** Mux machine - **/ +** Mux machine +**/ static void bondport_mux_machine(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); /** - ** bond, LAG - **/ +** bond, LAG +**/ static void ifbond_activate_LAG(ifbond_ref bond, LAG_ref lag, int active_media); @@ -568,8 +568,8 @@ ifbond_selection(ifbond_ref bond); /** - ** bondport - **/ +** bondport +**/ static void bondport_receive_lacpdu(bondport_ref p, lacpdu_ref in_lacpdu_p); @@ -579,7 +579,7 @@ bondport_slow_proto_transmit(bondport_ref p, packet_buffer_ref buf); static bondport_ref bondport_create(struct ifnet * port_ifp, lacp_port_priority priority, - int active, int short_timeout, int * error); + int active, int short_timeout, int * error); static void bondport_start(bondport_ref p); @@ -592,7 +592,7 @@ bondport_aggregatable(bondport_ref p); static int bondport_remove_from_LAG(bondport_ref p); -static void +static void bondport_set_selected(bondport_ref p, SelectedState s); static int @@ -610,15 +610,15 @@ bondport_disable_distributing(bondport_ref p); static __inline__ int bondport_collecting(bondport_ref p) { - if (p->po_bond->ifb_mode == IF_BOND_MODE_LACP) { - return (lacp_actor_partner_state_collecting(p->po_actor_state)); - } - return (TRUE); + if (p->po_bond->ifb_mode == IF_BOND_MODE_LACP) { + return lacp_actor_partner_state_collecting(p->po_actor_state); + } + return TRUE; } /** - ** bond interface/dlil specific routines - **/ +** bond interface/dlil specific routines +**/ static int bond_clone_create(struct if_clone *, u_int32_t, void *); static int bond_clone_destroy(struct ifnet *); static int bond_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t m, @@ -646,84 +646,75 @@ static struct if_clone bond_cloner = IF_CLONE_INITIALIZER(BONDNAME, static int siocsifmtu(struct ifnet * ifp, int mtu) { - struct ifreq ifr; + struct ifreq ifr; - bzero(&ifr, sizeof(ifr)); - ifr.ifr_mtu = mtu; - return (ifnet_ioctl(ifp, 0, SIOCSIFMTU, &ifr)); + bzero(&ifr, sizeof(ifr)); + ifr.ifr_mtu = mtu; + return ifnet_ioctl(ifp, 0, SIOCSIFMTU, &ifr); } static int siocgifdevmtu(struct ifnet * ifp, struct ifdevmtu * ifdm_p) { - struct ifreq ifr; - int error; + struct ifreq ifr; + int error; - bzero(&ifr, sizeof(ifr)); - error = ifnet_ioctl(ifp, 0, SIOCGIFDEVMTU, &ifr); - if (error == 0) { - *ifdm_p = ifr.ifr_devmtu; - } - return (error); + bzero(&ifr, sizeof(ifr)); + error = ifnet_ioctl(ifp, 0, SIOCGIFDEVMTU, &ifr); + if (error == 0) { + *ifdm_p = ifr.ifr_devmtu; + } + return error; } static __inline__ void ether_addr_copy(void * dest, const void * source) { - bcopy(source, dest, ETHER_ADDR_LEN); - return; + bcopy(source, dest, ETHER_ADDR_LEN); + return; } static __inline__ void ifbond_retain(ifbond_ref ifb) { - OSIncrementAtomic(&ifb->ifb_retain_count); + os_ref_retain(&ifb->ifb_retain_count); } static __inline__ void ifbond_release(ifbond_ref ifb) { - UInt32 old_retain_count; + if (os_ref_release(&ifb->ifb_retain_count) != 0) { + return; + } - old_retain_count = OSDecrementAtomic(&ifb->ifb_retain_count); - switch (old_retain_count) { - case 0: - panic("ifbond_release: retain count is 0\n"); - break; - case 1: if (g_bond->verbose) { - printf("ifbond_release(%s)\n", ifb->ifb_name); + printf("ifbond_release(%s)\n", ifb->ifb_name); } if (ifb->ifb_ifma_slow_proto != NULL) { - if (g_bond->verbose) { - printf("ifbond_release(%s) removing multicast\n", - ifb->ifb_name); - } - (void) if_delmulti_anon(ifb->ifb_ifma_slow_proto->ifma_ifp, - ifb->ifb_ifma_slow_proto->ifma_addr); - IFMA_REMREF(ifb->ifb_ifma_slow_proto); + if (g_bond->verbose) { + printf("ifbond_release(%s) removing multicast\n", + ifb->ifb_name); + } + (void) if_delmulti_anon(ifb->ifb_ifma_slow_proto->ifma_ifp, + ifb->ifb_ifma_slow_proto->ifma_addr); + IFMA_REMREF(ifb->ifb_ifma_slow_proto); } if (ifb->ifb_distributing_array != NULL) { - FREE(ifb->ifb_distributing_array, M_BOND); + FREE(ifb->ifb_distributing_array, M_BOND); } if_clone_softc_deallocate(&bond_cloner, ifb); - break; - default: - break; - } - return; } /* * Function: ifbond_wait * Purpose: * Allows a single thread to gain exclusive access to the ifbond - * data structure. Some operations take a long time to complete, + * data structure. Some operations take a long time to complete, * and some have side-effects that we can't predict. Holding the * bond_lock() across such operations is not possible. * * For example: - * 1) The SIOCSIFLLADDR ioctl takes a long time (several seconds) to + * 1) The SIOCSIFLLADDR ioctl takes a long time (several seconds) to * complete. Simply holding the bond_lock() would freeze all other * data structure accesses during that time. * 2) When we attach our protocol to the interface, a dlil event is @@ -737,28 +728,28 @@ ifbond_release(ifbond_ref ifb) static void ifbond_wait(ifbond_ref ifb, const char * msg) { - int waited = 0; + int waited = 0; - /* other add/remove in progress */ - while (ifbond_flags_change_in_progress(ifb)) { - if (g_bond->verbose) { - printf("%s: %s msleep\n", ifb->ifb_name, msg); + /* other add/remove in progress */ + while (ifbond_flags_change_in_progress(ifb)) { + if (g_bond->verbose) { + printf("%s: %s msleep\n", ifb->ifb_name, msg); + } + waited = 1; + (void)msleep(ifb, bond_lck_mtx, PZERO, msg, 0); } - waited = 1; - (void)msleep(ifb, bond_lck_mtx, PZERO, msg, 0); - } - /* prevent other bond list remove/add from taking place */ - ifbond_flags_set_change_in_progress(ifb); - if (g_bond->verbose && waited) { - printf("%s: %s woke up\n", ifb->ifb_name, msg); - } - return; + /* prevent other bond list remove/add from taking place */ + ifbond_flags_set_change_in_progress(ifb); + if (g_bond->verbose && waited) { + printf("%s: %s woke up\n", ifb->ifb_name, msg); + } + return; } /* * Function: ifbond_signal * Purpose: - * Allows the thread that previously invoked ifbond_wait() to + * Allows the thread that previously invoked ifbond_wait() to * give up exclusive access to the ifbond data structure, and wake up * any other threads waiting to access * Notes: @@ -768,235 +759,265 @@ ifbond_wait(ifbond_ref ifb, const char * msg) static void ifbond_signal(ifbond_ref ifb, const char * msg) { - ifbond_flags_clear_change_in_progress(ifb); - wakeup((caddr_t)ifb); - if (g_bond->verbose) { - printf("%s: %s wakeup\n", ifb->ifb_name, msg); - } - return; + ifbond_flags_clear_change_in_progress(ifb); + wakeup((caddr_t)ifb); + if (g_bond->verbose) { + printf("%s: %s wakeup\n", ifb->ifb_name, msg); + } + return; } /** - ** Media information - **/ +** Media information +**/ static int link_speed(int active) { - switch (IFM_SUBTYPE(active)) { - case IFM_10_T: - case IFM_10_2: - case IFM_10_5: - case IFM_10_STP: - case IFM_10_FL: - return (10); - case IFM_100_TX: - case IFM_100_FX: - case IFM_100_T4: - case IFM_100_VG: - case IFM_100_T2: - return (100); - case IFM_1000_SX: - case IFM_1000_LX: - case IFM_1000_CX: - case IFM_1000_TX: - return (1000); - case IFM_HPNA_1: - return (0); - default: + switch (IFM_SUBTYPE(active)) { + case IFM_10_T: + case IFM_10_2: + case IFM_10_5: + case IFM_10_STP: + case IFM_10_FL: + return 10; + case IFM_100_TX: + case IFM_100_FX: + case IFM_100_T4: + case IFM_100_VG: + case IFM_100_T2: + return 100; + case IFM_1000_SX: + case IFM_1000_LX: + case IFM_1000_CX: + case IFM_1000_TX: + case IFM_1000_CX_SGMII: + case IFM_1000_KX: + return 1000; + case IFM_HPNA_1: + return 0; + default: /* assume that new defined types are going to be at least 10GigE */ - case IFM_10G_SR: - case IFM_10G_LR: - return (10000); - case IFM_2500_T: - return (2500); - case IFM_5000_T: - return (5000); - } + case IFM_10G_SR: + case IFM_10G_LR: + case IFM_10G_KX4: + case IFM_10G_KR: + case IFM_10G_CR1: + case IFM_10G_ER: + return 10000; + case IFM_2500_T: + return 2500; + case IFM_5000_T: + return 5000; + case IFM_20G_KR2: + return 20000; + case IFM_25G_CR: + case IFM_25G_KR: + case IFM_25G_SR: + case IFM_25G_LR: + return 25000; + case IFM_40G_CR4: + case IFM_40G_SR4: + case IFM_40G_LR4: + case IFM_40G_KR4: + return 40000; + case IFM_50G_CR2: + case IFM_50G_KR2: + case IFM_50G_SR2: + case IFM_50G_LR2: + return 50000; + case IFM_56G_R4: + return 56000; + case IFM_100G_CR4: + case IFM_100G_SR4: + case IFM_100G_KR4: + case IFM_100G_LR4: + return 100000; + } } static __inline__ int media_active(const struct media_info * mi) { - if ((mi->mi_status & IFM_AVALID) == 0) { - return (1); - } - return ((mi->mi_status & IFM_ACTIVE) != 0); + if ((mi->mi_status & IFM_AVALID) == 0) { + return 1; + } + return (mi->mi_status & IFM_ACTIVE) != 0; } static __inline__ int media_full_duplex(const struct media_info * mi) { - return ((mi->mi_active & IFM_FDX) != 0); + return (mi->mi_active & IFM_FDX) != 0; } static __inline__ int media_speed(const struct media_info * mi) { - return (link_speed(mi->mi_active)); + return link_speed(mi->mi_active); } static struct media_info interface_media_info(struct ifnet * ifp) { - struct ifmediareq ifmr; - struct media_info mi; + struct ifmediareq ifmr; + struct media_info mi; - bzero(&mi, sizeof(mi)); - bzero(&ifmr, sizeof(ifmr)); - if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) { - if (ifmr.ifm_count != 0) { - mi.mi_status = ifmr.ifm_status; - mi.mi_active = ifmr.ifm_active; + bzero(&mi, sizeof(mi)); + bzero(&ifmr, sizeof(ifmr)); + if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) { + if (ifmr.ifm_count != 0) { + mi.mi_status = ifmr.ifm_status; + mi.mi_active = ifmr.ifm_active; + } } - } - return (mi); + return mi; } static int if_siflladdr(struct ifnet * ifp, const struct ether_addr * ea_p) { - struct ifreq ifr; + struct ifreq ifr; - /* - * XXX setting the sa_len to ETHER_ADDR_LEN is wrong, but the driver - * currently expects it that way - */ - ifr.ifr_addr.sa_family = AF_UNSPEC; - ifr.ifr_addr.sa_len = ETHER_ADDR_LEN; - ether_addr_copy(ifr.ifr_addr.sa_data, ea_p); - return (ifnet_ioctl(ifp, 0, SIOCSIFLLADDR, &ifr)); + /* + * XXX setting the sa_len to ETHER_ADDR_LEN is wrong, but the driver + * currently expects it that way + */ + ifr.ifr_addr.sa_family = AF_UNSPEC; + ifr.ifr_addr.sa_len = ETHER_ADDR_LEN; + ether_addr_copy(ifr.ifr_addr.sa_data, ea_p); + return ifnet_ioctl(ifp, 0, SIOCSIFLLADDR, &ifr); } /** - ** bond_globals - **/ +** bond_globals +**/ static bond_globals_ref bond_globals_create(lacp_system_priority sys_pri, - lacp_system_ref sys) + lacp_system_ref sys) { - bond_globals_ref b; + bond_globals_ref b; - b = _MALLOC(sizeof(*b), M_BOND, M_WAITOK | M_ZERO); - if (b == NULL) { - return (NULL); - } - TAILQ_INIT(&b->ifbond_list); - b->system = *sys; - b->system_priority = sys_pri; - return (b); + b = _MALLOC(sizeof(*b), M_BOND, M_WAITOK | M_ZERO); + if (b == NULL) { + return NULL; + } + TAILQ_INIT(&b->ifbond_list); + b->system = *sys; + b->system_priority = sys_pri; + return b; } static int bond_globals_init(void) { - bond_globals_ref b; - int i; - struct ifnet * ifp; - - bond_assert_lock_not_held(); - - if (g_bond != NULL) { - return (0); - } - - /* - * use en0's ethernet address as the system identifier, and if it's not - * there, use en1 .. en3 - */ - ifp = NULL; - for (i = 0; i < 4; i++) { - char ifname[IFNAMSIZ+1]; - snprintf(ifname, sizeof(ifname), "en%d", i); - ifp = ifunit(ifname); + bond_globals_ref b; + int i; + struct ifnet * ifp; + + bond_assert_lock_not_held(); + + if (g_bond != NULL) { + return 0; + } + + /* + * use en0's ethernet address as the system identifier, and if it's not + * there, use en1 .. en3 + */ + ifp = NULL; + for (i = 0; i < 4; i++) { + char ifname[IFNAMSIZ + 1]; + snprintf(ifname, sizeof(ifname), "en%d", i); + ifp = ifunit(ifname); + if (ifp != NULL) { + break; + } + } + b = NULL; if (ifp != NULL) { - break; - } - } - b = NULL; - if (ifp != NULL) { - b = bond_globals_create(0x8000, (lacp_system_ref)IF_LLADDR(ifp)); - } - bond_lock(); - if (g_bond != NULL) { + b = bond_globals_create(0x8000, (lacp_system_ref)IF_LLADDR(ifp)); + } + bond_lock(); + if (g_bond != NULL) { + bond_unlock(); + _FREE(b, M_BOND); + return 0; + } + g_bond = b; bond_unlock(); - _FREE(b, M_BOND); - return (0); - } - g_bond = b; - bond_unlock(); - if (ifp == NULL) { - return (ENXIO); - } - if (b == NULL) { - return (ENOMEM); - } - return (0); + if (ifp == NULL) { + return ENXIO; + } + if (b == NULL) { + return ENOMEM; + } + return 0; } static void bond_bpf_vlan(struct ifnet * ifp, struct mbuf * m, - const struct ether_header * eh_p, - u_int16_t vlan_tag, bpf_packet_func func) -{ - struct ether_vlan_header * vlh_p; - struct mbuf * vl_m; - - vl_m = m_get(M_DONTWAIT, MT_DATA); - if (vl_m == NULL) { + const struct ether_header * eh_p, + u_int16_t vlan_tag, bpf_packet_func func) +{ + struct ether_vlan_header * vlh_p; + struct mbuf * vl_m; + + vl_m = m_get(M_DONTWAIT, MT_DATA); + if (vl_m == NULL) { + return; + } + /* populate a new mbuf containing the vlan ethernet header */ + vl_m->m_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + vlh_p = mtod(vl_m, struct ether_vlan_header *); + bcopy(eh_p, vlh_p, offsetof(struct ether_header, ether_type)); + vlh_p->evl_encap_proto = htons(ETHERTYPE_VLAN); + vlh_p->evl_tag = htons(vlan_tag); + vlh_p->evl_proto = eh_p->ether_type; + vl_m->m_next = m; + (*func)(ifp, vl_m); + vl_m->m_next = NULL; + m_free(vl_m); return; - } - /* populate a new mbuf containing the vlan ethernet header */ - vl_m->m_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; - vlh_p = mtod(vl_m, struct ether_vlan_header *); - bcopy(eh_p, vlh_p, offsetof(struct ether_header, ether_type)); - vlh_p->evl_encap_proto = htons(ETHERTYPE_VLAN); - vlh_p->evl_tag = htons(vlan_tag); - vlh_p->evl_proto = eh_p->ether_type; - vl_m->m_next = m; - (*func)(ifp, vl_m); - vl_m->m_next = NULL; - m_free(vl_m); - return; -} - -static __inline__ void -bond_bpf_output(struct ifnet * ifp, struct mbuf * m, - bpf_packet_func func) -{ - if (func != NULL) { - if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { - const struct ether_header * eh_p; - eh_p = mtod(m, const struct ether_header *); - m->m_data += ETHER_HDR_LEN; - m->m_len -= ETHER_HDR_LEN; - bond_bpf_vlan(ifp, m, eh_p, m->m_pkthdr.vlan_tag, func); - m->m_data -= ETHER_HDR_LEN; - m->m_len += ETHER_HDR_LEN; - } else { - (*func)(ifp, m); +} + +static __inline__ void +bond_bpf_output(struct ifnet * ifp, struct mbuf * m, + bpf_packet_func func) +{ + if (func != NULL) { + if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { + const struct ether_header * eh_p; + eh_p = mtod(m, const struct ether_header *); + m->m_data += ETHER_HDR_LEN; + m->m_len -= ETHER_HDR_LEN; + bond_bpf_vlan(ifp, m, eh_p, m->m_pkthdr.vlan_tag, func); + m->m_data -= ETHER_HDR_LEN; + m->m_len += ETHER_HDR_LEN; + } else { + (*func)(ifp, m); + } } - } - return; + return; } -static __inline__ void +static __inline__ void bond_bpf_input(ifnet_t ifp, mbuf_t m, const struct ether_header * eh_p, - bpf_packet_func func) -{ - if (func != NULL) { - if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { - bond_bpf_vlan(ifp, m, eh_p, m->m_pkthdr.vlan_tag, func); - } else { - /* restore the header */ - m->m_data -= ETHER_HDR_LEN; - m->m_len += ETHER_HDR_LEN; - (*func)(ifp, m); - m->m_data += ETHER_HDR_LEN; - m->m_len -= ETHER_HDR_LEN; + bpf_packet_func func) +{ + if (func != NULL) { + if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { + bond_bpf_vlan(ifp, m, eh_p, m->m_pkthdr.vlan_tag, func); + } else { + /* restore the header */ + m->m_data -= ETHER_HDR_LEN; + m->m_len += ETHER_HDR_LEN; + (*func)(ifp, m); + m->m_data += ETHER_HDR_LEN; + m->m_len -= ETHER_HDR_LEN; + } } - } - return; + return; } /* @@ -1008,117 +1029,118 @@ bond_bpf_input(ifnet_t ifp, mbuf_t m, const struct ether_header * eh_p, static int bond_setmulti(struct ifnet * ifp) { - ifbond_ref ifb; - int error; - int result = 0; - bondport_ref p; + ifbond_ref ifb; + int error; + int result = 0; + bondport_ref p; + + bond_lock(); + ifb = ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb) + || TAILQ_EMPTY(&ifb->ifb_port_list)) { + bond_unlock(); + return 0; + } + ifbond_retain(ifb); + ifbond_wait(ifb, "bond_setmulti"); - bond_lock(); - ifb = ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb) - || TAILQ_EMPTY(&ifb->ifb_port_list)) { + if (ifbond_flags_if_detaching(ifb)) { + /* someone destroyed the bond while we were waiting */ + result = EBUSY; + goto signal_done; + } bond_unlock(); - return (0); - } - ifbond_retain(ifb); - ifbond_wait(ifb, "bond_setmulti"); - - if (ifbond_flags_if_detaching(ifb)) { - /* someone destroyed the bond while we were waiting */ - result = EBUSY; - goto signal_done; - } - bond_unlock(); - - /* ifbond_wait() let's us safely walk the list without holding the lock */ - TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { - struct ifnet * port_ifp = p->po_ifp; - - error = multicast_list_program(&p->po_multicast, - ifp, port_ifp); - if (error != 0) { - printf("bond_setmulti(%s): " - "multicast_list_program(%s%d) failed, %d\n", - ifb->ifb_name, ifnet_name(port_ifp), - ifnet_unit(port_ifp), error); - result = error; + + /* ifbond_wait() let's us safely walk the list without holding the lock */ + TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { + struct ifnet * port_ifp = p->po_ifp; + + error = multicast_list_program(&p->po_multicast, + ifp, port_ifp); + if (error != 0) { + printf("bond_setmulti(%s): " + "multicast_list_program(%s%d) failed, %d\n", + ifb->ifb_name, ifnet_name(port_ifp), + ifnet_unit(port_ifp), error); + result = error; + } } - } - bond_lock(); - signal_done: - ifbond_signal(ifb, "bond_setmulti"); - bond_unlock(); - ifbond_release(ifb); - return (result); + bond_lock(); +signal_done: + ifbond_signal(ifb, "bond_setmulti"); + bond_unlock(); + ifbond_release(ifb); + return result; } static int bond_clone_attach(void) { - int error; + int error; - if ((error = if_clone_attach(&bond_cloner)) != 0) - return error; - bond_lock_init(); - return 0; + if ((error = if_clone_attach(&bond_cloner)) != 0) { + return error; + } + bond_lock_init(); + return 0; } static int ifbond_add_slow_proto_multicast(ifbond_ref ifb) { - int error; - struct ifmultiaddr * ifma = NULL; - struct sockaddr_dl sdl; + int error; + struct ifmultiaddr * ifma = NULL; + struct sockaddr_dl sdl; - bond_assert_lock_not_held(); + bond_assert_lock_not_held(); - bzero(&sdl, sizeof(sdl)); - sdl.sdl_len = sizeof(sdl); - sdl.sdl_family = AF_LINK; - sdl.sdl_type = IFT_ETHER; - sdl.sdl_nlen = 0; - sdl.sdl_alen = sizeof(slow_proto_multicast); - bcopy(&slow_proto_multicast, sdl.sdl_data, sizeof(slow_proto_multicast)); - error = if_addmulti_anon(ifb->ifb_ifp, (struct sockaddr *)&sdl, &ifma); - if (error == 0) { - ifb->ifb_ifma_slow_proto = ifma; - } - return (error); + bzero(&sdl, sizeof(sdl)); + sdl.sdl_len = sizeof(sdl); + sdl.sdl_family = AF_LINK; + sdl.sdl_type = IFT_ETHER; + sdl.sdl_nlen = 0; + sdl.sdl_alen = sizeof(slow_proto_multicast); + bcopy(&slow_proto_multicast, sdl.sdl_data, sizeof(slow_proto_multicast)); + error = if_addmulti_anon(ifb->ifb_ifp, (struct sockaddr *)&sdl, &ifma); + if (error == 0) { + ifb->ifb_ifma_slow_proto = ifma; + } + return error; } static int bond_clone_create(struct if_clone * ifc, u_int32_t unit, __unused void *params) { - int error; - ifbond_ref ifb; - ifnet_t ifp; - struct ifnet_init_eparams bond_init; - + int error; + ifbond_ref ifb; + ifnet_t ifp; + struct ifnet_init_eparams bond_init; + error = bond_globals_init(); if (error != 0) { - return (error); + return error; } - + ifb = if_clone_softc_allocate(&bond_cloner); if (ifb == NULL) { - return (ENOMEM); + return ENOMEM; } - - ifbond_retain(ifb); + + os_ref_init(&ifb->ifb_retain_count, NULL); TAILQ_INIT(&ifb->ifb_port_list); TAILQ_INIT(&ifb->ifb_lag_list); ifb->ifb_key = unit + 1; - + /* use the interface name as the unique id for ifp recycle */ if ((u_int32_t)snprintf(ifb->ifb_name, sizeof(ifb->ifb_name), "%s%d", - ifc->ifc_name, unit) >= sizeof(ifb->ifb_name)) { + ifc->ifc_name, unit) >= sizeof(ifb->ifb_name)) { ifbond_release(ifb); - return (EINVAL); + return EINVAL; } - + bzero(&bond_init, sizeof(bond_init)); bond_init.ver = IFNET_INIT_CURRENT_VERSION; - bond_init.len = sizeof (bond_init); + bond_init.len = sizeof(bond_init); bond_init.flags = IFNET_INIT_LEGACY; bond_init.uniqueid = ifb->ifb_name; bond_init.uniqueid_len = strlen(ifb->ifb_name); @@ -1139,222 +1161,222 @@ bond_clone_create(struct if_clone * ifc, u_int32_t unit, __unused void *params) bond_init.broadcast_len = ETHER_ADDR_LEN; bond_init.softc = ifb; error = ifnet_allocate_extended(&bond_init, &ifp); - + if (error) { ifbond_release(ifb); - return (error); + return error; } - + ifb->ifb_ifp = ifp; ifnet_set_offload(ifp, 0); ifnet_set_addrlen(ifp, ETHER_ADDR_LEN); /* XXX ethernet specific */ ifnet_set_flags(ifp, IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX, 0xffff); ifnet_set_baudrate(ifp, 0); - ifnet_set_mtu(ifp, 0); - + ifnet_set_mtu(ifp, ETHERMTU); + error = ifnet_attach(ifp, NULL); if (error != 0) { ifnet_release(ifp); ifbond_release(ifb); - return (error); + return error; } error = ifbond_add_slow_proto_multicast(ifb); if (error != 0) { printf("bond_clone_create(%s): " - "failed to add slow_proto multicast, %d\n", - ifb->ifb_name, error); + "failed to add slow_proto multicast, %d\n", + ifb->ifb_name, error); } - + /* attach as ethernet */ bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); - + bond_lock(); TAILQ_INSERT_HEAD(&g_bond->ifbond_list, ifb, ifb_bond_list); bond_unlock(); - - return (0); + + return 0; } static void bond_remove_all_interfaces(ifbond_ref ifb) { - bondport_ref p; + bondport_ref p; - bond_assert_lock_held(); + bond_assert_lock_held(); - /* - * do this in reverse order to avoid re-programming the mac address - * as each head interface is removed - */ - while ((p = TAILQ_LAST(&ifb->ifb_port_list, port_list)) != NULL) { - bond_remove_interface(ifb, p->po_ifp); - } - return; + /* + * do this in reverse order to avoid re-programming the mac address + * as each head interface is removed + */ + while ((p = TAILQ_LAST(&ifb->ifb_port_list, port_list)) != NULL) { + bond_remove_interface(ifb, p->po_ifp); + } + return; } static void bond_remove(ifbond_ref ifb) { - bond_assert_lock_held(); - ifbond_flags_set_if_detaching(ifb); - TAILQ_REMOVE(&g_bond->ifbond_list, ifb, ifb_bond_list); - bond_remove_all_interfaces(ifb); - return; + bond_assert_lock_held(); + ifbond_flags_set_if_detaching(ifb); + TAILQ_REMOVE(&g_bond->ifbond_list, ifb, ifb_bond_list); + bond_remove_all_interfaces(ifb); + return; } static void bond_if_detach(struct ifnet * ifp) { - int error; + int error; + + error = ifnet_detach(ifp); + if (error) { + printf("bond_if_detach %s%d: ifnet_detach failed, %d\n", + ifnet_name(ifp), ifnet_unit(ifp), error); + } - error = ifnet_detach(ifp); - if (error) { - printf("bond_if_detach %s%d: ifnet_detach failed, %d\n", - ifnet_name(ifp), ifnet_unit(ifp), error); - } - - return; + return; } static int bond_clone_destroy(struct ifnet * ifp) { - ifbond_ref ifb; + ifbond_ref ifb; - bond_lock(); - ifb = ifnet_softc(ifp); - if (ifb == NULL || ifnet_type(ifp) != IFT_IEEE8023ADLAG) { - bond_unlock(); - return 0; - } - if (ifbond_flags_if_detaching(ifb)) { + bond_lock(); + ifb = ifnet_softc(ifp); + if (ifb == NULL || ifnet_type(ifp) != IFT_IEEE8023ADLAG) { + bond_unlock(); + return 0; + } + if (ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + return 0; + } + bond_remove(ifb); bond_unlock(); + bond_if_detach(ifp); return 0; - } - bond_remove(ifb); - bond_unlock(); - bond_if_detach(ifp); - return 0; } -static int +static int bond_set_bpf_tap(struct ifnet * ifp, bpf_tap_mode mode, bpf_packet_func func) { - ifbond_ref ifb; + ifbond_ref ifb; + + bond_lock(); + ifb = ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + return ENODEV; + } + switch (mode) { + case BPF_TAP_DISABLE: + ifb->ifb_bpf_input = ifb->ifb_bpf_output = NULL; + break; + + case BPF_TAP_INPUT: + ifb->ifb_bpf_input = func; + break; + + case BPF_TAP_OUTPUT: + ifb->ifb_bpf_output = func; + break; - bond_lock(); - ifb = ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + case BPF_TAP_INPUT_OUTPUT: + ifb->ifb_bpf_input = ifb->ifb_bpf_output = func; + break; + default: + break; + } bond_unlock(); - return (ENODEV); - } - switch (mode) { - case BPF_TAP_DISABLE: - ifb->ifb_bpf_input = ifb->ifb_bpf_output = NULL; - break; - - case BPF_TAP_INPUT: - ifb->ifb_bpf_input = func; - break; - - case BPF_TAP_OUTPUT: - ifb->ifb_bpf_output = func; - break; - - case BPF_TAP_INPUT_OUTPUT: - ifb->ifb_bpf_input = ifb->ifb_bpf_output = func; - break; - default: - break; - } - bond_unlock(); - return 0; + return 0; } static uint32_t ether_header_hash(struct ether_header * eh_p) { - uint32_t h; + uint32_t h; - /* get 32-bits from destination ether and ether type */ - h = (*((uint16_t *)&eh_p->ether_dhost[4]) << 16) - | eh_p->ether_type; - h ^= *((uint32_t *)&eh_p->ether_dhost[0]); - return (h); + /* get 32-bits from destination ether and ether type */ + h = (*((uint16_t *)&eh_p->ether_dhost[4]) << 16) + | eh_p->ether_type; + h ^= *((uint32_t *)&eh_p->ether_dhost[0]); + return h; } static struct mbuf * S_mbuf_skip_to_offset(struct mbuf * m, int32_t * offset) { - int len; + int len; - len = m->m_len; - while (*offset >= len) { - *offset -= len; - m = m->m_next; - if (m == NULL) { - break; - } len = m->m_len; - } - return (m); + while (*offset >= len) { + *offset -= len; + m = m->m_next; + if (m == NULL) { + break; + } + len = m->m_len; + } + return m; } #if BYTE_ORDER == BIG_ENDIAN static __inline__ uint32_t make_uint32(u_char c0, u_char c1, u_char c2, u_char c3) { - return (((uint32_t)c0 << 24) | ((uint32_t)c1 << 16) - | ((uint32_t)c2 << 8) | (uint32_t)c3); + return ((uint32_t)c0 << 24) | ((uint32_t)c1 << 16) + | ((uint32_t)c2 << 8) | (uint32_t)c3; } #else /* BYTE_ORDER == LITTLE_ENDIAN */ static __inline__ uint32_t make_uint32(u_char c0, u_char c1, u_char c2, u_char c3) { - return (((uint32_t)c3 << 24) | ((uint32_t)c2 << 16) - | ((uint32_t)c1 << 8) | (uint32_t)c0); + return ((uint32_t)c3 << 24) | ((uint32_t)c2 << 16) + | ((uint32_t)c1 << 8) | (uint32_t)c0; } #endif /* BYTE_ORDER == LITTLE_ENDIAN */ static int S_mbuf_copy_uint32(struct mbuf * m, int32_t offset, uint32_t * val) { - struct mbuf * current; - u_char * current_data; - struct mbuf * next; - u_char * next_data; - int space_current; - - current = S_mbuf_skip_to_offset(m, &offset); - if (current == NULL) { - return (1); - } - current_data = mtod(current, u_char *) + offset; - space_current = current->m_len - offset; - if (space_current >= (int)sizeof(uint32_t)) { - *val = *((uint32_t *)current_data); - return (0); - } - next = current->m_next; - if (next == NULL || (next->m_len + space_current) < (int)sizeof(uint32_t)) { - return (1); - } - next_data = mtod(next, u_char *); - switch (space_current) { - case 1: - *val = make_uint32(current_data[0], next_data[0], - next_data[1], next_data[2]); - break; - case 2: - *val = make_uint32(current_data[0], current_data[1], - next_data[0], next_data[1]); - break; - default: - *val = make_uint32(current_data[0], current_data[1], - current_data[2], next_data[0]); - break; - } - return (0); + struct mbuf * current; + u_char * current_data; + struct mbuf * next; + u_char * next_data; + int space_current; + + current = S_mbuf_skip_to_offset(m, &offset); + if (current == NULL) { + return 1; + } + current_data = mtod(current, u_char *) + offset; + space_current = current->m_len - offset; + if (space_current >= (int)sizeof(uint32_t)) { + *val = *((uint32_t *)current_data); + return 0; + } + next = current->m_next; + if (next == NULL || (next->m_len + space_current) < (int)sizeof(uint32_t)) { + return 1; + } + next_data = mtod(next, u_char *); + switch (space_current) { + case 1: + *val = make_uint32(current_data[0], next_data[0], + next_data[1], next_data[2]); + break; + case 2: + *val = make_uint32(current_data[0], current_data[1], + next_data[0], next_data[1]); + break; + default: + *val = make_uint32(current_data[0], current_data[1], + current_data[2], next_data[0]); + break; + } + return 0; } #define IP_SRC_OFFSET (offsetof(struct ip, ip_src) - offsetof(struct ip, ip_p)) @@ -1363,540 +1385,531 @@ S_mbuf_copy_uint32(struct mbuf * m, int32_t offset, uint32_t * val) static uint32_t ip_header_hash(struct mbuf * m) { - u_char * data; - struct in_addr ip_dst; - struct in_addr ip_src; - u_char ip_p; - int32_t offset; - struct mbuf * orig_m = m; - - /* find the IP protocol field relative to the start of the packet */ - offset = offsetof(struct ip, ip_p) + sizeof(struct ether_header); - m = S_mbuf_skip_to_offset(m, &offset); - if (m == NULL || m->m_len < 1) { - goto bad_ip_packet; - } - data = mtod(m, u_char *) + offset; - ip_p = *data; - - /* find the IP src relative to the IP protocol */ - if ((m->m_len - offset) - >= (int)(IP_SRC_OFFSET + sizeof(struct in_addr) * 2)) { - /* this should be the normal case */ - ip_src = *(struct in_addr *)(data + IP_SRC_OFFSET); - ip_dst = *(struct in_addr *)(data + IP_DST_OFFSET); - } - else { - if (S_mbuf_copy_uint32(m, offset + IP_SRC_OFFSET, - (uint32_t *)&ip_src.s_addr)) { - goto bad_ip_packet; - } - if (S_mbuf_copy_uint32(m, offset + IP_DST_OFFSET, - (uint32_t *)&ip_dst.s_addr)) { - goto bad_ip_packet; - } - } - return (ntohl(ip_dst.s_addr) ^ ntohl(ip_src.s_addr) ^ ((uint32_t)ip_p)); - - bad_ip_packet: - return (ether_header_hash(mtod(orig_m, struct ether_header *))); -} - -#define IP6_ADDRS_LEN (sizeof(struct in6_addr) * 2) + u_char * data; + struct in_addr ip_dst; + struct in_addr ip_src; + u_char ip_p; + int32_t offset; + struct mbuf * orig_m = m; + + /* find the IP protocol field relative to the start of the packet */ + offset = offsetof(struct ip, ip_p) + sizeof(struct ether_header); + m = S_mbuf_skip_to_offset(m, &offset); + if (m == NULL || m->m_len < 1) { + goto bad_ip_packet; + } + data = mtod(m, u_char *) + offset; + ip_p = *data; + + /* find the IP src relative to the IP protocol */ + if ((m->m_len - offset) + >= (int)(IP_SRC_OFFSET + sizeof(struct in_addr) * 2)) { + /* this should be the normal case */ + ip_src = *(struct in_addr *)(data + IP_SRC_OFFSET); + ip_dst = *(struct in_addr *)(data + IP_DST_OFFSET); + } else { + if (S_mbuf_copy_uint32(m, offset + IP_SRC_OFFSET, + (uint32_t *)&ip_src.s_addr)) { + goto bad_ip_packet; + } + if (S_mbuf_copy_uint32(m, offset + IP_DST_OFFSET, + (uint32_t *)&ip_dst.s_addr)) { + goto bad_ip_packet; + } + } + return ntohl(ip_dst.s_addr) ^ ntohl(ip_src.s_addr) ^ ((uint32_t)ip_p); + +bad_ip_packet: + return ether_header_hash(mtod(orig_m, struct ether_header *)); +} + +#define IP6_ADDRS_LEN (sizeof(struct in6_addr) * 2) static uint32_t ipv6_header_hash(struct mbuf * m) { - u_char * data; - int i; - int32_t offset; - struct mbuf * orig_m = m; - uint32_t * scan; - uint32_t val; - - /* find the IP protocol field relative to the start of the packet */ - offset = offsetof(struct ip6_hdr, ip6_src) + sizeof(struct ether_header); - m = S_mbuf_skip_to_offset(m, &offset); - if (m == NULL) { - goto bad_ipv6_packet; - } - data = mtod(m, u_char *) + offset; - val = 0; - if ((m->m_len - offset) >= (int)IP6_ADDRS_LEN) { - /* this should be the normal case */ - for (i = 0, scan = (uint32_t *)data; - i < (int)(IP6_ADDRS_LEN / sizeof(uint32_t)); - i++, scan++) { - val ^= *scan; - } - } - else { - for (i = 0; i < (int)(IP6_ADDRS_LEN / sizeof(uint32_t)); i++) { - uint32_t tmp; - if (S_mbuf_copy_uint32(m, offset + i * sizeof(uint32_t), - (uint32_t *)&tmp)) { + u_char * data; + int i; + int32_t offset; + struct mbuf * orig_m = m; + uint32_t * scan; + uint32_t val; + + /* find the IP protocol field relative to the start of the packet */ + offset = offsetof(struct ip6_hdr, ip6_src) + sizeof(struct ether_header); + m = S_mbuf_skip_to_offset(m, &offset); + if (m == NULL) { goto bad_ipv6_packet; - } - val ^= tmp; } - } - return (ntohl(val)); + data = mtod(m, u_char *) + offset; + val = 0; + if ((m->m_len - offset) >= (int)IP6_ADDRS_LEN) { + /* this should be the normal case */ + for (i = 0, scan = (uint32_t *)data; + i < (int)(IP6_ADDRS_LEN / sizeof(uint32_t)); + i++, scan++) { + val ^= *scan; + } + } else { + for (i = 0; i < (int)(IP6_ADDRS_LEN / sizeof(uint32_t)); i++) { + uint32_t tmp; + if (S_mbuf_copy_uint32(m, offset + i * sizeof(uint32_t), + (uint32_t *)&tmp)) { + goto bad_ipv6_packet; + } + val ^= tmp; + } + } + return ntohl(val); - bad_ipv6_packet: - return (ether_header_hash(mtod(orig_m, struct ether_header *))); +bad_ipv6_packet: + return ether_header_hash(mtod(orig_m, struct ether_header *)); } static int bond_output(struct ifnet * ifp, struct mbuf * m) { - bpf_packet_func bpf_func; - uint32_t h; - ifbond_ref ifb; - struct ifnet * port_ifp = NULL; - int err; - struct flowadv adv = { FADV_SUCCESS }; - - if (m == 0) { - return (0); - } - if ((m->m_flags & M_PKTHDR) == 0) { + bpf_packet_func bpf_func; + uint32_t h; + ifbond_ref ifb; + struct ifnet * port_ifp = NULL; + int err; + struct flowadv adv = { FADV_SUCCESS }; + + if (m == 0) { + return 0; + } + if ((m->m_flags & M_PKTHDR) == 0) { + m_freem(m); + return 0; + } + if (m->m_pkthdr.pkt_flowid != 0) { + h = m->m_pkthdr.pkt_flowid; + } else { + struct ether_header * eh_p; + + eh_p = mtod(m, struct ether_header *); + switch (ntohs(eh_p->ether_type)) { + case ETHERTYPE_IP: + h = ip_header_hash(m); + break; + case ETHERTYPE_IPV6: + h = ipv6_header_hash(m); + break; + default: + h = ether_header_hash(eh_p); + break; + } + } + bond_lock(); + ifb = ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb) + || ifb->ifb_distributing_count == 0) { + goto done; + } + h %= ifb->ifb_distributing_count; + port_ifp = ifb->ifb_distributing_array[h]->po_ifp; + bpf_func = ifb->ifb_bpf_output; + bond_unlock(); + + if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { + (void)ifnet_stat_increment_out(ifp, 1, + m->m_pkthdr.len + ETHER_VLAN_ENCAP_LEN, + 0); + } else { + (void)ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); + } + bond_bpf_output(ifp, m, bpf_func); + + err = dlil_output(port_ifp, PF_BOND, m, NULL, NULL, 1, &adv); + + if (err == 0) { + if (adv.code == FADV_FLOW_CONTROLLED) { + err = EQFULL; + } else if (adv.code == FADV_SUSPENDED) { + err = EQSUSPENDED; + } + } + + return err; + +done: + bond_unlock(); m_freem(m); - return (0); - } - if (m->m_pkthdr.pkt_flowid != 0) { - h = m->m_pkthdr.pkt_flowid; - } - else { - struct ether_header * eh_p; - - eh_p = mtod(m, struct ether_header *); - switch (ntohs(eh_p->ether_type)) { - case ETHERTYPE_IP: - h = ip_header_hash(m); - break; - case ETHERTYPE_IPV6: - h = ipv6_header_hash(m); - break; - default: - h = ether_header_hash(eh_p); - break; - } - } - bond_lock(); - ifb = ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb) - || ifb->ifb_distributing_count == 0) { - goto done; - } - h %= ifb->ifb_distributing_count; - port_ifp = ifb->ifb_distributing_array[h]->po_ifp; - bpf_func = ifb->ifb_bpf_output; - bond_unlock(); - - if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { - (void)ifnet_stat_increment_out(ifp, 1, - m->m_pkthdr.len + ETHER_VLAN_ENCAP_LEN, - 0); - } else { - (void)ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); - } - bond_bpf_output(ifp, m, bpf_func); - - err = dlil_output(port_ifp, PF_BOND, m, NULL, NULL, 1, &adv); - - if (err == 0) { - if (adv.code == FADV_FLOW_CONTROLLED) { - err = EQFULL; - } else if (adv.code == FADV_SUSPENDED) { - err = EQSUSPENDED; - } - } - - return (err); - - done: - bond_unlock(); - m_freem(m); - return (0); + return 0; } static bondport_ref ifbond_lookup_port(ifbond_ref ifb, struct ifnet * port_ifp) { - bondport_ref p; - TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { - if (p->po_ifp == port_ifp) { - return (p); + bondport_ref p; + TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { + if (p->po_ifp == port_ifp) { + return p; + } } - } - return (NULL); + return NULL; } static bondport_ref bond_lookup_port(struct ifnet * port_ifp) { - ifbond_ref ifb; - bondport_ref port; + ifbond_ref ifb; + bondport_ref port; - TAILQ_FOREACH(ifb, &g_bond->ifbond_list, ifb_bond_list) { - port = ifbond_lookup_port(ifb, port_ifp); - if (port != NULL) { - return (port); + TAILQ_FOREACH(ifb, &g_bond->ifbond_list, ifb_bond_list) { + port = ifbond_lookup_port(ifb, port_ifp); + if (port != NULL) { + return port; + } } - } - return (NULL); + return NULL; } static void bond_receive_lacpdu(struct mbuf * m, struct ifnet * port_ifp) { - struct ifnet * bond_ifp = NULL; - ifbond_ref ifb; - int event_code = 0; - bondport_ref p; - - bond_lock(); - if ((ifnet_eflags(port_ifp) & IFEF_BOND) == 0) { - goto done; - } - p = bond_lookup_port(port_ifp); - if (p == NULL) { - goto done; - } - if (p->po_enabled == 0) { - goto done; - } - ifb = p->po_bond; - if (ifb->ifb_mode != IF_BOND_MODE_LACP) { - goto done; - } - bondport_receive_lacpdu(p, (lacpdu_ref)m->m_data); - if (ifbond_selection(ifb)) { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - /* XXX need to take a reference on bond_ifp */ - bond_ifp = ifb->ifb_ifp; - ifb->ifb_last_link_event = event_code; - } - else { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - if (event_code != ifb->ifb_last_link_event) { - if (g_bond->verbose) { - timestamp_printf("%s: (receive) generating LINK event\n", - ifb->ifb_name); - } - bond_ifp = ifb->ifb_ifp; - ifb->ifb_last_link_event = event_code; - } - } - - done: - bond_unlock(); - if (bond_ifp != NULL) { - interface_link_event(bond_ifp, event_code); - } - m_freem(m); - return; + struct ifnet * bond_ifp = NULL; + ifbond_ref ifb; + int event_code = 0; + bondport_ref p; + + bond_lock(); + if ((ifnet_eflags(port_ifp) & IFEF_BOND) == 0) { + goto done; + } + p = bond_lookup_port(port_ifp); + if (p == NULL) { + goto done; + } + if (p->po_enabled == 0) { + goto done; + } + ifb = p->po_bond; + if (ifb->ifb_mode != IF_BOND_MODE_LACP) { + goto done; + } + bondport_receive_lacpdu(p, (lacpdu_ref)m->m_data); + if (ifbond_selection(ifb)) { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + /* XXX need to take a reference on bond_ifp */ + bond_ifp = ifb->ifb_ifp; + ifb->ifb_last_link_event = event_code; + } else { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + if (event_code != ifb->ifb_last_link_event) { + if (g_bond->verbose) { + timestamp_printf("%s: (receive) generating LINK event\n", + ifb->ifb_name); + } + bond_ifp = ifb->ifb_ifp; + ifb->ifb_last_link_event = event_code; + } + } + +done: + bond_unlock(); + if (bond_ifp != NULL) { + interface_link_event(bond_ifp, event_code); + } + m_freem(m); + return; } static void bond_receive_la_marker_pdu(struct mbuf * m, struct ifnet * port_ifp) { - la_marker_pdu_ref marker_p; - bondport_ref p; + la_marker_pdu_ref marker_p; + bondport_ref p; - marker_p = (la_marker_pdu_ref)(m->m_data + ETHER_HDR_LEN); - if (marker_p->lm_marker_tlv_type != LA_MARKER_TLV_TYPE_MARKER) { - goto failed; - } - bond_lock(); - if ((ifnet_eflags(port_ifp) & IFEF_BOND) == 0) { - bond_unlock(); - goto failed; - } - p = bond_lookup_port(port_ifp); - if (p == NULL || p->po_enabled == 0 - || p->po_bond->ifb_mode != IF_BOND_MODE_LACP) { + marker_p = (la_marker_pdu_ref)(m->m_data + ETHER_HDR_LEN); + if (marker_p->lm_marker_tlv_type != LA_MARKER_TLV_TYPE_MARKER) { + goto failed; + } + bond_lock(); + if ((ifnet_eflags(port_ifp) & IFEF_BOND) == 0) { + bond_unlock(); + goto failed; + } + p = bond_lookup_port(port_ifp); + if (p == NULL || p->po_enabled == 0 + || p->po_bond->ifb_mode != IF_BOND_MODE_LACP) { + bond_unlock(); + goto failed; + } + /* echo back the same packet as a marker response */ + marker_p->lm_marker_tlv_type = LA_MARKER_TLV_TYPE_MARKER_RESPONSE; + bondport_slow_proto_transmit(p, (packet_buffer_ref)m); bond_unlock(); - goto failed; - } - /* echo back the same packet as a marker response */ - marker_p->lm_marker_tlv_type = LA_MARKER_TLV_TYPE_MARKER_RESPONSE; - bondport_slow_proto_transmit(p, (packet_buffer_ref)m); - bond_unlock(); - return; + return; - failed: - m_freem(m); - return; +failed: + m_freem(m); + return; } static int bond_input(ifnet_t port_ifp, __unused protocol_family_t protocol, mbuf_t m, - char * frame_header) -{ - bpf_packet_func bpf_func; - const struct ether_header * eh_p; - ifbond_ref ifb; - struct ifnet * ifp; - bondport_ref p; - - eh_p = (const struct ether_header *)frame_header; - if ((m->m_flags & M_MCAST) != 0 - && bcmp(eh_p->ether_dhost, &slow_proto_multicast, - sizeof(eh_p->ether_dhost)) == 0 - && ntohs(eh_p->ether_type) == IEEE8023AD_SLOW_PROTO_ETHERTYPE) { - u_char subtype = *mtod(m, u_char *); - - if (subtype == IEEE8023AD_SLOW_PROTO_SUBTYPE_LACP) { - if (m->m_pkthdr.len < (int)offsetof(lacpdu, la_reserved)) { - m_freem(m); - return (0); - } - /* send to lacp */ - if (m->m_len < (int)offsetof(lacpdu, la_reserved)) { - m = m_pullup(m, offsetof(lacpdu, la_reserved)); - if (m == NULL) { - return (0); + char * frame_header) +{ + bpf_packet_func bpf_func; + const struct ether_header * eh_p; + ifbond_ref ifb; + struct ifnet * ifp; + bondport_ref p; + + eh_p = (const struct ether_header *)frame_header; + if ((m->m_flags & M_MCAST) != 0 + && bcmp(eh_p->ether_dhost, &slow_proto_multicast, + sizeof(eh_p->ether_dhost)) == 0 + && ntohs(eh_p->ether_type) == IEEE8023AD_SLOW_PROTO_ETHERTYPE) { + u_char subtype = *mtod(m, u_char *); + + if (subtype == IEEE8023AD_SLOW_PROTO_SUBTYPE_LACP) { + if (m->m_pkthdr.len < (int)offsetof(lacpdu, la_reserved)) { + m_freem(m); + return 0; + } + /* send to lacp */ + if (m->m_len < (int)offsetof(lacpdu, la_reserved)) { + m = m_pullup(m, offsetof(lacpdu, la_reserved)); + if (m == NULL) { + return 0; + } + } + bond_receive_lacpdu(m, port_ifp); + return 0; + } else if (subtype == IEEE8023AD_SLOW_PROTO_SUBTYPE_LA_MARKER_PROTOCOL) { + int min_size; + + /* restore the ethernet header pointer in the mbuf */ + m->m_pkthdr.len += ETHER_HDR_LEN; + m->m_data -= ETHER_HDR_LEN; + m->m_len += ETHER_HDR_LEN; + min_size = ETHER_HDR_LEN + offsetof(la_marker_pdu, lm_reserved); + if (m->m_pkthdr.len < min_size) { + m_freem(m); + return 0; + } + /* send to lacp */ + if (m->m_len < min_size) { + m = m_pullup(m, min_size); + if (m == NULL) { + return 0; + } + } + /* send to marker responder */ + bond_receive_la_marker_pdu(m, port_ifp); + return 0; + } else if (subtype == 0 + || subtype > IEEE8023AD_SLOW_PROTO_SUBTYPE_RESERVED_END) { + /* invalid subtype, discard the frame */ + m_freem(m); + return 0; } - } - bond_receive_lacpdu(m, port_ifp); - return (0); } - else if (subtype == IEEE8023AD_SLOW_PROTO_SUBTYPE_LA_MARKER_PROTOCOL) { - int min_size; + bond_lock(); + if ((ifnet_eflags(port_ifp) & IFEF_BOND) == 0) { + goto done; + } + p = bond_lookup_port(port_ifp); + if (p == NULL || bondport_collecting(p) == 0) { + goto done; + } - /* restore the ethernet header pointer in the mbuf */ - m->m_pkthdr.len += ETHER_HDR_LEN; - m->m_data -= ETHER_HDR_LEN; - m->m_len += ETHER_HDR_LEN; - min_size = ETHER_HDR_LEN + offsetof(la_marker_pdu, lm_reserved); - if (m->m_pkthdr.len < min_size) { - m_freem(m); - return (0); - } - /* send to lacp */ - if (m->m_len < min_size) { - m = m_pullup(m, min_size); - if (m == NULL) { - return (0); - } - } - /* send to marker responder */ - bond_receive_la_marker_pdu(m, port_ifp); - return (0); - } - else if (subtype == 0 - || subtype > IEEE8023AD_SLOW_PROTO_SUBTYPE_RESERVED_END) { - /* invalid subtype, discard the frame */ - m_freem(m); - return (0); - } - } - bond_lock(); - if ((ifnet_eflags(port_ifp) & IFEF_BOND) == 0) { - goto done; - } - p = bond_lookup_port(port_ifp); - if (p == NULL || bondport_collecting(p) == 0) { - goto done; - } - - /* make the packet appear as if it arrived on the bonded interface */ - ifb = p->po_bond; - ifp = ifb->ifb_ifp; - bpf_func = ifb->ifb_bpf_input; - bond_unlock(); - - if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { - (void)ifnet_stat_increment_in(ifp, 1, - (m->m_pkthdr.len + ETHER_HDR_LEN - + ETHER_VLAN_ENCAP_LEN), 0); - } - else { - (void)ifnet_stat_increment_in(ifp, 1, - (m->m_pkthdr.len + ETHER_HDR_LEN), 0); - } - m->m_pkthdr.rcvif = ifp; - bond_bpf_input(ifp, m, eh_p, bpf_func); - m->m_pkthdr.pkt_hdr = frame_header; - dlil_input_packet_list(ifp, m); - return 0; - - done: - bond_unlock(); - m_freem(m); - return (0); + /* make the packet appear as if it arrived on the bonded interface */ + ifb = p->po_bond; + ifp = ifb->ifb_ifp; + bpf_func = ifb->ifb_bpf_input; + bond_unlock(); + + if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { + (void)ifnet_stat_increment_in(ifp, 1, + (m->m_pkthdr.len + ETHER_HDR_LEN + + ETHER_VLAN_ENCAP_LEN), 0); + } else { + (void)ifnet_stat_increment_in(ifp, 1, + (m->m_pkthdr.len + ETHER_HDR_LEN), 0); + } + m->m_pkthdr.rcvif = ifp; + bond_bpf_input(ifp, m, eh_p, bpf_func); + m->m_pkthdr.pkt_hdr = frame_header; + dlil_input_packet_list(ifp, m); + return 0; + +done: + bond_unlock(); + m_freem(m); + return 0; } static __inline__ const char * bondport_get_name(bondport_ref p) { - return (p->po_name); + return p->po_name; } static __inline__ int bondport_get_index(bondport_ref p) { - return (ifnet_index(p->po_ifp)); + return ifnet_index(p->po_ifp); } static void bondport_slow_proto_transmit(bondport_ref p, packet_buffer_ref buf) { - struct ether_header * eh_p; - int error; + struct ether_header * eh_p; + int error; - /* packet_buffer_allocate leaves room for ethernet header */ - eh_p = mtod(buf, struct ether_header *); - bcopy(&slow_proto_multicast, &eh_p->ether_dhost, sizeof(eh_p->ether_dhost)); - bcopy(&p->po_saved_addr, eh_p->ether_shost, sizeof(eh_p->ether_shost)); - eh_p->ether_type = htons(IEEE8023AD_SLOW_PROTO_ETHERTYPE); - error = ifnet_output_raw(p->po_ifp, PF_BOND, buf); - if (error != 0) { - printf("bondport_slow_proto_transmit(%s) failed %d\n", - bondport_get_name(p), error); - } - return; + /* packet_buffer_allocate leaves room for ethernet header */ + eh_p = mtod(buf, struct ether_header *); + bcopy(&slow_proto_multicast, &eh_p->ether_dhost, sizeof(eh_p->ether_dhost)); + bcopy(&p->po_saved_addr, eh_p->ether_shost, sizeof(eh_p->ether_shost)); + eh_p->ether_type = htons(IEEE8023AD_SLOW_PROTO_ETHERTYPE); + error = ifnet_output_raw(p->po_ifp, PF_BOND, buf); + if (error != 0) { + printf("bondport_slow_proto_transmit(%s) failed %d\n", + bondport_get_name(p), error); + } + return; } static void -bondport_timer_process_func(devtimer_ref timer, - devtimer_process_func_event event) +bondport_timer_process_func(devtimer_ref timer, + devtimer_process_func_event event) { - bondport_ref p; + bondport_ref p; - switch (event) { - case devtimer_process_func_event_lock: - bond_lock(); - devtimer_retain(timer); - break; - case devtimer_process_func_event_unlock: - if (devtimer_valid(timer)) { - /* as long as the devtimer is valid, we can look at arg0 */ - int event_code = 0; - struct ifnet * bond_ifp = NULL; - - p = (bondport_ref)devtimer_arg0(timer); - if (ifbond_selection(p->po_bond)) { - event_code = (p->po_bond->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - /* XXX need to take a reference on bond_ifp */ - bond_ifp = p->po_bond->ifb_ifp; - p->po_bond->ifb_last_link_event = event_code; - } - else { - event_code = (p->po_bond->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - if (event_code != p->po_bond->ifb_last_link_event) { - if (g_bond->verbose) { - timestamp_printf("%s: (timer) generating LINK event\n", - p->po_bond->ifb_name); - } - bond_ifp = p->po_bond->ifb_ifp; - p->po_bond->ifb_last_link_event = event_code; - } - } - devtimer_release(timer); - bond_unlock(); - if (bond_ifp != NULL) { - interface_link_event(bond_ifp, event_code); - } - } - else { - /* timer is going away */ - devtimer_release(timer); - bond_unlock(); + switch (event) { + case devtimer_process_func_event_lock: + bond_lock(); + devtimer_retain(timer); + break; + case devtimer_process_func_event_unlock: + if (devtimer_valid(timer)) { + /* as long as the devtimer is valid, we can look at arg0 */ + int event_code = 0; + struct ifnet * bond_ifp = NULL; + + p = (bondport_ref)devtimer_arg0(timer); + if (ifbond_selection(p->po_bond)) { + event_code = (p->po_bond->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + /* XXX need to take a reference on bond_ifp */ + bond_ifp = p->po_bond->ifb_ifp; + p->po_bond->ifb_last_link_event = event_code; + } else { + event_code = (p->po_bond->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + if (event_code != p->po_bond->ifb_last_link_event) { + if (g_bond->verbose) { + timestamp_printf("%s: (timer) generating LINK event\n", + p->po_bond->ifb_name); + } + bond_ifp = p->po_bond->ifb_ifp; + p->po_bond->ifb_last_link_event = event_code; + } + } + devtimer_release(timer); + bond_unlock(); + if (bond_ifp != NULL) { + interface_link_event(bond_ifp, event_code); + } + } else { + /* timer is going away */ + devtimer_release(timer); + bond_unlock(); + } + break; + default: + break; } - break; - default: - break; - } } static bondport_ref bondport_create(struct ifnet * port_ifp, lacp_port_priority priority, - int active, int short_timeout, int * ret_error) -{ - int error = 0; - bondport_ref p = NULL; - lacp_actor_partner_state s; - - *ret_error = 0; - p = _MALLOC(sizeof(*p), M_BOND, M_WAITOK | M_ZERO); - if (p == NULL) { - *ret_error = ENOMEM; - return (NULL); - } - multicast_list_init(&p->po_multicast); - if ((u_int32_t)snprintf(p->po_name, sizeof(p->po_name), "%s%d", - ifnet_name(port_ifp), ifnet_unit(port_ifp)) - >= sizeof(p->po_name)) { - printf("if_bond: name too large\n"); - *ret_error = EINVAL; - goto failed; - } - error = siocgifdevmtu(port_ifp, &p->po_devmtu); - if (error != 0) { - printf("if_bond: SIOCGIFDEVMTU %s failed, %d\n", - bondport_get_name(p), error); - goto failed; - } - /* remember the current interface MTU so it can be restored */ - p->po_devmtu.ifdm_current = ifnet_mtu(port_ifp); - p->po_ifp = port_ifp; - p->po_media_info = interface_media_info(port_ifp); - p->po_current_while_timer = devtimer_create(bondport_timer_process_func, p); - if (p->po_current_while_timer == NULL) { - *ret_error = ENOMEM; - goto failed; - } - p->po_periodic_timer = devtimer_create(bondport_timer_process_func, p); - if (p->po_periodic_timer == NULL) { - *ret_error = ENOMEM; - goto failed; - } - p->po_wait_while_timer = devtimer_create(bondport_timer_process_func, p); - if (p->po_wait_while_timer == NULL) { - *ret_error = ENOMEM; - goto failed; - } - p->po_transmit_timer = devtimer_create(bondport_timer_process_func, p); - if (p->po_transmit_timer == NULL) { - *ret_error = ENOMEM; - goto failed; - } - p->po_receive_state = ReceiveState_none; - p->po_mux_state = MuxState_none; - p->po_priority = priority; - s = 0; - s = lacp_actor_partner_state_set_aggregatable(s); - if (short_timeout) { - s = lacp_actor_partner_state_set_short_timeout(s); - } - if (active) { - s = lacp_actor_partner_state_set_active_lacp(s); - } - p->po_actor_state = s; - return (p); - - failed: - bondport_free(p); - return (NULL); -} + int active, int short_timeout, int * ret_error) +{ + int error = 0; + bondport_ref p = NULL; + lacp_actor_partner_state s; + + *ret_error = 0; + p = _MALLOC(sizeof(*p), M_BOND, M_WAITOK | M_ZERO); + if (p == NULL) { + *ret_error = ENOMEM; + return NULL; + } + multicast_list_init(&p->po_multicast); + if ((u_int32_t)snprintf(p->po_name, sizeof(p->po_name), "%s%d", + ifnet_name(port_ifp), ifnet_unit(port_ifp)) + >= sizeof(p->po_name)) { + printf("if_bond: name too large\n"); + *ret_error = EINVAL; + goto failed; + } + error = siocgifdevmtu(port_ifp, &p->po_devmtu); + if (error != 0) { + printf("if_bond: SIOCGIFDEVMTU %s failed, %d\n", + bondport_get_name(p), error); + goto failed; + } + /* remember the current interface MTU so it can be restored */ + p->po_devmtu.ifdm_current = ifnet_mtu(port_ifp); + p->po_ifp = port_ifp; + p->po_media_info = interface_media_info(port_ifp); + p->po_current_while_timer = devtimer_create(bondport_timer_process_func, p); + if (p->po_current_while_timer == NULL) { + *ret_error = ENOMEM; + goto failed; + } + p->po_periodic_timer = devtimer_create(bondport_timer_process_func, p); + if (p->po_periodic_timer == NULL) { + *ret_error = ENOMEM; + goto failed; + } + p->po_wait_while_timer = devtimer_create(bondport_timer_process_func, p); + if (p->po_wait_while_timer == NULL) { + *ret_error = ENOMEM; + goto failed; + } + p->po_transmit_timer = devtimer_create(bondport_timer_process_func, p); + if (p->po_transmit_timer == NULL) { + *ret_error = ENOMEM; + goto failed; + } + p->po_receive_state = ReceiveState_none; + p->po_mux_state = MuxState_none; + p->po_priority = priority; + s = 0; + s = lacp_actor_partner_state_set_aggregatable(s); + if (short_timeout) { + s = lacp_actor_partner_state_set_short_timeout(s); + } + if (active) { + s = lacp_actor_partner_state_set_active_lacp(s); + } + p->po_actor_state = s; + return p; + +failed: + bondport_free(p); + return NULL; +} static void bondport_start(bondport_ref p) { - bondport_receive_machine(p, LAEventStart, NULL); - bondport_mux_machine(p, LAEventStart, NULL); - bondport_periodic_transmit_machine(p, LAEventStart, NULL); - bondport_transmit_machine(p, LAEventStart, NULL); - return; + bondport_receive_machine(p, LAEventStart, NULL); + bondport_mux_machine(p, LAEventStart, NULL); + bondport_periodic_transmit_machine(p, LAEventStart, NULL); + bondport_transmit_machine(p, LAEventStart, NULL); + return; } /* @@ -1907,10 +1920,10 @@ bondport_start(bondport_ref p) static void bondport_invalidate_timers(bondport_ref p) { - devtimer_invalidate(p->po_current_while_timer); - devtimer_invalidate(p->po_periodic_timer); - devtimer_invalidate(p->po_wait_while_timer); - devtimer_invalidate(p->po_transmit_timer); + devtimer_invalidate(p->po_current_while_timer); + devtimer_invalidate(p->po_periodic_timer); + devtimer_invalidate(p->po_wait_while_timer); + devtimer_invalidate(p->po_transmit_timer); } /* @@ -1921,1139 +1934,1122 @@ bondport_invalidate_timers(bondport_ref p) static void bondport_cancel_timers(bondport_ref p) { - devtimer_cancel(p->po_current_while_timer); - devtimer_cancel(p->po_periodic_timer); - devtimer_cancel(p->po_wait_while_timer); - devtimer_cancel(p->po_transmit_timer); + devtimer_cancel(p->po_current_while_timer); + devtimer_cancel(p->po_periodic_timer); + devtimer_cancel(p->po_wait_while_timer); + devtimer_cancel(p->po_transmit_timer); } static void bondport_free(bondport_ref p) { - multicast_list_remove(&p->po_multicast); - devtimer_release(p->po_current_while_timer); - devtimer_release(p->po_periodic_timer); - devtimer_release(p->po_wait_while_timer); - devtimer_release(p->po_transmit_timer); - FREE(p, M_BOND); - return; + multicast_list_remove(&p->po_multicast); + devtimer_release(p->po_current_while_timer); + devtimer_release(p->po_periodic_timer); + devtimer_release(p->po_wait_while_timer); + devtimer_release(p->po_transmit_timer); + FREE(p, M_BOND); + return; } -#define BOND_ADD_PROGRESS_IN_LIST 0x1 -#define BOND_ADD_PROGRESS_PROTO_ATTACHED 0x2 -#define BOND_ADD_PROGRESS_LLADDR_SET 0x4 -#define BOND_ADD_PROGRESS_MTU_SET 0x8 +#define BOND_ADD_PROGRESS_IN_LIST 0x1 +#define BOND_ADD_PROGRESS_PROTO_ATTACHED 0x2 +#define BOND_ADD_PROGRESS_LLADDR_SET 0x4 +#define BOND_ADD_PROGRESS_MTU_SET 0x8 static __inline__ int bond_device_mtu(struct ifnet * ifp, ifbond_ref ifb) { - return (((int)ifnet_mtu(ifp) > ifb->ifb_altmtu) - ? (int)ifnet_mtu(ifp) : ifb->ifb_altmtu); + return ((int)ifnet_mtu(ifp) > ifb->ifb_altmtu) + ? (int)ifnet_mtu(ifp) : ifb->ifb_altmtu; } static int bond_add_interface(struct ifnet * ifp, struct ifnet * port_ifp) { - int devmtu; - int error = 0; - int event_code = 0; - int first = FALSE; - ifbond_ref ifb; - bondport_ref * new_array = NULL; - bondport_ref * old_array = NULL; - bondport_ref p; - int progress = 0; - - if (IFNET_IS_INTCOPROC(port_ifp)) { - return (EINVAL); - } - - /* pre-allocate space for new port */ - p = bondport_create(port_ifp, 0x8000, 1, 0, &error); - if (p == NULL) { - return (error); - } - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + int devmtu; + int error = 0; + int event_code = 0; + int first = FALSE; + ifbond_ref ifb; + bondport_ref * new_array = NULL; + bondport_ref * old_array = NULL; + bondport_ref p; + int progress = 0; + + if (IFNET_IS_INTCOPROC(port_ifp)) { + return EINVAL; + } + + /* pre-allocate space for new port */ + p = bondport_create(port_ifp, 0x8000, 1, 0, &error); + if (p == NULL) { + return error; + } + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + bondport_free(p); + return ifb == NULL ? EOPNOTSUPP : EBUSY; + } + + /* make sure this interface can handle our current MTU */ + devmtu = bond_device_mtu(ifp, ifb); + if (devmtu != 0 + && (devmtu > p->po_devmtu.ifdm_max || devmtu < p->po_devmtu.ifdm_min)) { + bond_unlock(); + printf("if_bond: interface %s doesn't support mtu %d", + bondport_get_name(p), devmtu); + bondport_free(p); + return EINVAL; + } + + /* make sure ifb doesn't get de-allocated while we wait */ + ifbond_retain(ifb); + + /* wait for other add or remove to complete */ + ifbond_wait(ifb, "bond_add_interface"); + + if (ifbond_flags_if_detaching(ifb)) { + /* someone destroyed the bond while we were waiting */ + error = EBUSY; + goto signal_done; + } + if (bond_lookup_port(port_ifp) != NULL) { + /* port is already part of a bond */ + error = EBUSY; + goto signal_done; + } + ifnet_lock_exclusive(port_ifp); + if ((ifnet_eflags(port_ifp) & (IFEF_VLAN | IFEF_BOND)) != 0) { + /* interface already has VLAN's, or is part of bond */ + ifnet_lock_done(port_ifp); + error = EBUSY; + goto signal_done; + } + + /* mark the interface busy */ + /* can't use ifnet_set_eflags because that takes the lock */ + port_ifp->if_eflags |= IFEF_BOND; + ifnet_lock_done(port_ifp); + + if (TAILQ_EMPTY(&ifb->ifb_port_list)) { + ifnet_set_offload(ifp, ifnet_offload(port_ifp)); + ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING); + if (ifbond_flags_lladdr(ifb) == FALSE) { + first = TRUE; + } + } else { + ifnet_offload_t ifp_offload; + ifnet_offload_t port_ifp_offload; + + ifp_offload = ifnet_offload(ifp); + port_ifp_offload = ifnet_offload(port_ifp); + if (ifp_offload != port_ifp_offload) { + ifnet_offload_t offload; + + offload = ifp_offload & port_ifp_offload; + printf("bond_add_interface(%s, %s) " + "hwassist values don't match 0x%x != 0x%x, using 0x%x instead\n", + ifb->ifb_name, bondport_get_name(p), + ifp_offload, port_ifp_offload, offload); + /* + * XXX + * if the bond has VLAN's, we can't simply change the hwassist + * field behind its back: this needs work + */ + ifnet_set_offload(ifp, offload); + } + } + p->po_bond = ifb; + + /* remember the port's ethernet address so it can be restored */ + ether_addr_copy(&p->po_saved_addr, IF_LLADDR(port_ifp)); + + /* add it to the list of ports */ + TAILQ_INSERT_TAIL(&ifb->ifb_port_list, p, po_port_list); + ifb->ifb_port_count++; + bond_unlock(); - bondport_free(p); - return ((ifb == NULL ? EOPNOTSUPP : EBUSY)); - } - /* make sure this interface can handle our current MTU */ - devmtu = bond_device_mtu(ifp, ifb); - if (devmtu != 0 - && (devmtu > p->po_devmtu.ifdm_max || devmtu < p->po_devmtu.ifdm_min)) { + + /* first port added to bond determines bond's ethernet address */ + if (first) { + ifnet_set_lladdr_and_type(ifp, IF_LLADDR(port_ifp), ETHER_ADDR_LEN, + IFT_ETHER); + } + + progress |= BOND_ADD_PROGRESS_IN_LIST; + + /* allocate a larger distributing array */ + new_array = (bondport_ref *) + _MALLOC(sizeof(*new_array) * ifb->ifb_port_count, M_BOND, M_WAITOK); + if (new_array == NULL) { + error = ENOMEM; + goto failed; + } + + /* attach our BOND "protocol" to the interface */ + error = bond_attach_protocol(port_ifp); + if (error) { + goto failed; + } + progress |= BOND_ADD_PROGRESS_PROTO_ATTACHED; + + /* set the interface MTU */ + devmtu = bond_device_mtu(ifp, ifb); + error = siocsifmtu(port_ifp, devmtu); + if (error != 0) { + printf("bond_add_interface(%s, %s):" + " SIOCSIFMTU %d failed %d\n", + ifb->ifb_name, bondport_get_name(p), devmtu, error); + goto failed; + } + progress |= BOND_ADD_PROGRESS_MTU_SET; + + /* program the port with our multicast addresses */ + error = multicast_list_program(&p->po_multicast, ifp, port_ifp); + if (error) { + printf("bond_add_interface(%s, %s):" + " multicast_list_program failed %d\n", + ifb->ifb_name, bondport_get_name(p), error); + goto failed; + } + + /* mark the interface up */ + ifnet_set_flags(port_ifp, IFF_UP, IFF_UP); + + error = ifnet_ioctl(port_ifp, 0, SIOCSIFFLAGS, NULL); + if (error != 0) { + printf("bond_add_interface(%s, %s): SIOCSIFFLAGS failed %d\n", + ifb->ifb_name, bondport_get_name(p), error); + goto failed; + } + + /* re-program the port's ethernet address */ + error = if_siflladdr(port_ifp, + (const struct ether_addr *)IF_LLADDR(ifp)); + if (error != 0) { + /* port doesn't support setting the link address */ + printf("bond_add_interface(%s, %s): if_siflladdr failed %d\n", + ifb->ifb_name, bondport_get_name(p), error); + goto failed; + } + progress |= BOND_ADD_PROGRESS_LLADDR_SET; + + bond_lock(); + + /* no failures past this point */ + p->po_enabled = 1; + + /* copy the contents of the existing distributing array */ + if (ifb->ifb_distributing_count) { + bcopy(ifb->ifb_distributing_array, new_array, + sizeof(*new_array) * ifb->ifb_distributing_count); + } + old_array = ifb->ifb_distributing_array; + ifb->ifb_distributing_array = new_array; + + if (ifb->ifb_mode == IF_BOND_MODE_LACP) { + bondport_start(p); + + /* check if we need to generate a link status event */ + if (ifbond_selection(ifb)) { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + ifb->ifb_last_link_event = event_code; + } + } else { + /* are we adding the first distributing interface? */ + if (media_active(&p->po_media_info)) { + if (ifb->ifb_distributing_count == 0) { + ifb->ifb_last_link_event = event_code = KEV_DL_LINK_ON; + } + bondport_enable_distributing(p); + } else { + bondport_disable_distributing(p); + } + } + /* clear the busy state, and wakeup anyone waiting */ + ifbond_signal(ifb, "bond_add_interface"); bond_unlock(); - printf("if_bond: interface %s doesn't support mtu %d", - bondport_get_name(p), devmtu); - bondport_free(p); - return (EINVAL); - } - - /* make sure ifb doesn't get de-allocated while we wait */ - ifbond_retain(ifb); - - /* wait for other add or remove to complete */ - ifbond_wait(ifb, "bond_add_interface"); - - if (ifbond_flags_if_detaching(ifb)) { - /* someone destroyed the bond while we were waiting */ - error = EBUSY; - goto signal_done; - } - if (bond_lookup_port(port_ifp) != NULL) { - /* port is already part of a bond */ - error = EBUSY; - goto signal_done; - } - ifnet_lock_exclusive(port_ifp); - if ((ifnet_eflags(port_ifp) & (IFEF_VLAN | IFEF_BOND)) != 0) { - /* interface already has VLAN's, or is part of bond */ - ifnet_lock_done(port_ifp); - error = EBUSY; - goto signal_done; - } - - /* mark the interface busy */ - /* can't use ifnet_set_eflags because that takes the lock */ - port_ifp->if_eflags |= IFEF_BOND; - ifnet_lock_done(port_ifp); - - if (TAILQ_EMPTY(&ifb->ifb_port_list)) { - ifnet_set_offload(ifp, ifnet_offload(port_ifp)); - ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING); - if (ifbond_flags_lladdr(ifb) == FALSE) { - first = TRUE; - } - } else { - ifnet_offload_t ifp_offload; - ifnet_offload_t port_ifp_offload; - - ifp_offload = ifnet_offload(ifp); - port_ifp_offload = ifnet_offload(port_ifp); - if (ifp_offload != port_ifp_offload) { - ifnet_offload_t offload; - - offload = ifp_offload & port_ifp_offload; - printf("bond_add_interface(%s, %s) " - "hwassist values don't match 0x%x != 0x%x, using 0x%x instead\n", - ifb->ifb_name, bondport_get_name(p), - ifp_offload, port_ifp_offload, offload); - /* - * XXX - * if the bond has VLAN's, we can't simply change the hwassist - * field behind its back: this needs work - */ - ifnet_set_offload(ifp, offload); - } - } - p->po_bond = ifb; - - /* remember the port's ethernet address so it can be restored */ - ether_addr_copy(&p->po_saved_addr, IF_LLADDR(port_ifp)); - - /* add it to the list of ports */ - TAILQ_INSERT_TAIL(&ifb->ifb_port_list, p, po_port_list); - ifb->ifb_port_count++; - - /* set the default MTU */ - if (ifnet_mtu(ifp) == 0) { - ifnet_set_mtu(ifp, ETHERMTU); - } - bond_unlock(); - - - /* first port added to bond determines bond's ethernet address */ - if (first) { - ifnet_set_lladdr_and_type(ifp, IF_LLADDR(port_ifp), ETHER_ADDR_LEN, - IFT_ETHER); - } - - progress |= BOND_ADD_PROGRESS_IN_LIST; - - /* allocate a larger distributing array */ - new_array = (bondport_ref *) - _MALLOC(sizeof(*new_array) * ifb->ifb_port_count, M_BOND, M_WAITOK); - if (new_array == NULL) { - error = ENOMEM; - goto failed; - } - - /* attach our BOND "protocol" to the interface */ - error = bond_attach_protocol(port_ifp); - if (error) { - goto failed; - } - progress |= BOND_ADD_PROGRESS_PROTO_ATTACHED; - - /* set the interface MTU */ - devmtu = bond_device_mtu(ifp, ifb); - error = siocsifmtu(port_ifp, devmtu); - if (error != 0) { - printf("bond_add_interface(%s, %s):" - " SIOCSIFMTU %d failed %d\n", - ifb->ifb_name, bondport_get_name(p), devmtu, error); - goto failed; - } - progress |= BOND_ADD_PROGRESS_MTU_SET; - - /* program the port with our multicast addresses */ - error = multicast_list_program(&p->po_multicast, ifp, port_ifp); - if (error) { - printf("bond_add_interface(%s, %s):" - " multicast_list_program failed %d\n", - ifb->ifb_name, bondport_get_name(p), error); - goto failed; - } - - /* mark the interface up */ - ifnet_set_flags(port_ifp, IFF_UP, IFF_UP); - - error = ifnet_ioctl(port_ifp, 0, SIOCSIFFLAGS, NULL); - if (error != 0) { - printf("bond_add_interface(%s, %s): SIOCSIFFLAGS failed %d\n", - ifb->ifb_name, bondport_get_name(p), error); - goto failed; - } - - /* re-program the port's ethernet address */ - error = if_siflladdr(port_ifp, - (const struct ether_addr *)IF_LLADDR(ifp)); - if (error != 0) { - /* port doesn't support setting the link address */ - printf("bond_add_interface(%s, %s): if_siflladdr failed %d\n", - ifb->ifb_name, bondport_get_name(p), error); - goto failed; - } - progress |= BOND_ADD_PROGRESS_LLADDR_SET; - - bond_lock(); - - /* no failures past this point */ - p->po_enabled = 1; - - /* copy the contents of the existing distributing array */ - if (ifb->ifb_distributing_count) { - bcopy(ifb->ifb_distributing_array, new_array, - sizeof(*new_array) * ifb->ifb_distributing_count); - } - old_array = ifb->ifb_distributing_array; - ifb->ifb_distributing_array = new_array; - - if (ifb->ifb_mode == IF_BOND_MODE_LACP) { - bondport_start(p); + if (event_code != 0) { + interface_link_event(ifp, event_code); + } + if (old_array != NULL) { + FREE(old_array, M_BOND); + } + return 0; - /* check if we need to generate a link status event */ - if (ifbond_selection(ifb)) { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - ifb->ifb_last_link_event = event_code; - } - } - else { - /* are we adding the first distributing interface? */ - if (media_active(&p->po_media_info)) { - if (ifb->ifb_distributing_count == 0) { - ifb->ifb_last_link_event = event_code = KEV_DL_LINK_ON; - } - bondport_enable_distributing(p); - } - else { - bondport_disable_distributing(p); - } - } - /* clear the busy state, and wakeup anyone waiting */ - ifbond_signal(ifb, "bond_add_interface"); - bond_unlock(); - if (event_code != 0) { - interface_link_event(ifp, event_code); - } - if (old_array != NULL) { - FREE(old_array, M_BOND); - } - return 0; - - failed: - bond_assert_lock_not_held(); - - /* if this was the first port to be added, clear our address */ - if (first) { - ifnet_set_lladdr_and_type(ifp, NULL, 0, IFT_IEEE8023ADLAG); - } - - if (new_array != NULL) { - FREE(new_array, M_BOND); - } - if ((progress & BOND_ADD_PROGRESS_LLADDR_SET) != 0) { - int error1; - - error1 = if_siflladdr(port_ifp, &p->po_saved_addr); - if (error1 != 0) { - printf("bond_add_interface(%s, %s): if_siflladdr failed %d\n", - ifb->ifb_name, bondport_get_name(p), error1); - } - } - if ((progress & BOND_ADD_PROGRESS_PROTO_ATTACHED) != 0) { - (void)bond_detach_protocol(port_ifp); - } - if ((progress & BOND_ADD_PROGRESS_MTU_SET) != 0) { - int error1; - - error1 = siocsifmtu(port_ifp, p->po_devmtu.ifdm_current); - if (error1 != 0) { - printf("bond_add_interface(%s, %s): SIOCSIFMTU %d failed %d\n", - ifb->ifb_name, bondport_get_name(p), - p->po_devmtu.ifdm_current, error1); - } - } - bond_lock(); - if ((progress & BOND_ADD_PROGRESS_IN_LIST) != 0) { - TAILQ_REMOVE(&ifb->ifb_port_list, p, po_port_list); - ifb->ifb_port_count--; - } - ifnet_set_eflags(ifp, 0, IFEF_BOND); - if (TAILQ_EMPTY(&ifb->ifb_port_list)) { - ifb->ifb_altmtu = 0; - ifnet_set_mtu(ifp, 0); - ifnet_set_offload(ifp, 0); - } +failed: + bond_assert_lock_not_held(); + + /* if this was the first port to be added, clear our address */ + if (first) { + ifnet_set_lladdr_and_type(ifp, NULL, 0, IFT_IEEE8023ADLAG); + } + + if (new_array != NULL) { + FREE(new_array, M_BOND); + } + if ((progress & BOND_ADD_PROGRESS_LLADDR_SET) != 0) { + int error1; + + error1 = if_siflladdr(port_ifp, &p->po_saved_addr); + if (error1 != 0) { + printf("bond_add_interface(%s, %s): if_siflladdr failed %d\n", + ifb->ifb_name, bondport_get_name(p), error1); + } + } + if ((progress & BOND_ADD_PROGRESS_PROTO_ATTACHED) != 0) { + (void)bond_detach_protocol(port_ifp); + } + if ((progress & BOND_ADD_PROGRESS_MTU_SET) != 0) { + int error1; + + error1 = siocsifmtu(port_ifp, p->po_devmtu.ifdm_current); + if (error1 != 0) { + printf("bond_add_interface(%s, %s): SIOCSIFMTU %d failed %d\n", + ifb->ifb_name, bondport_get_name(p), + p->po_devmtu.ifdm_current, error1); + } + } + bond_lock(); + if ((progress & BOND_ADD_PROGRESS_IN_LIST) != 0) { + TAILQ_REMOVE(&ifb->ifb_port_list, p, po_port_list); + ifb->ifb_port_count--; + } + ifnet_set_eflags(ifp, 0, IFEF_BOND); + if (TAILQ_EMPTY(&ifb->ifb_port_list)) { + ifb->ifb_altmtu = 0; + ifnet_set_mtu(ifp, ETHERMTU); + ifnet_set_offload(ifp, 0); + } - signal_done: - ifbond_signal(ifb, "bond_add_interface"); - bond_unlock(); - ifbond_release(ifb); - bondport_free(p); - return (error); +signal_done: + ifbond_signal(ifb, "bond_add_interface"); + bond_unlock(); + ifbond_release(ifb); + bondport_free(p); + return error; } static int bond_remove_interface(ifbond_ref ifb, struct ifnet * port_ifp) { - int active_lag = 0; - int error = 0; - int event_code = 0; - bondport_ref head_port; - struct ifnet * ifp; - int last = FALSE; - int new_link_address = FALSE; - bondport_ref p; - lacp_actor_partner_state s; - int was_distributing; - - bond_assert_lock_held(); - - ifbond_retain(ifb); - ifbond_wait(ifb, "bond_remove_interface"); - - p = ifbond_lookup_port(ifb, port_ifp); - if (p == NULL) { - error = ENXIO; - /* it got removed by another thread */ - goto signal_done; - } - - /* de-select it and remove it from the lists */ - was_distributing = bondport_flags_distributing(p); - bondport_disable_distributing(p); - if (ifb->ifb_mode == IF_BOND_MODE_LACP) { - bondport_set_selected(p, SelectedState_UNSELECTED); - active_lag = bondport_remove_from_LAG(p); - /* invalidate timers here while holding the bond_lock */ - bondport_invalidate_timers(p); - - /* announce that we're Individual now */ - s = p->po_actor_state; - s = lacp_actor_partner_state_set_individual(s); - s = lacp_actor_partner_state_set_not_collecting(s); - s = lacp_actor_partner_state_set_not_distributing(s); - s = lacp_actor_partner_state_set_out_of_sync(s); - p->po_actor_state = s; - bondport_flags_set_ntt(p); - } + int active_lag = 0; + int error = 0; + int event_code = 0; + bondport_ref head_port; + struct ifnet * ifp; + int last = FALSE; + int new_link_address = FALSE; + bondport_ref p; + lacp_actor_partner_state s; + int was_distributing; + + bond_assert_lock_held(); - TAILQ_REMOVE(&ifb->ifb_port_list, p, po_port_list); - ifb->ifb_port_count--; + ifbond_retain(ifb); + ifbond_wait(ifb, "bond_remove_interface"); - ifp = ifb->ifb_ifp; - head_port = TAILQ_FIRST(&ifb->ifb_port_list); - if (head_port == NULL) { - ifnet_set_flags(ifp, 0, IFF_RUNNING); - if (ifbond_flags_lladdr(ifb) == FALSE) { - last = TRUE; + p = ifbond_lookup_port(ifb, port_ifp); + if (p == NULL) { + error = ENXIO; + /* it got removed by another thread */ + goto signal_done; } - ifnet_set_offload(ifp, 0); - ifnet_set_mtu(ifp, 0); - ifb->ifb_altmtu = 0; - } else if (ifbond_flags_lladdr(ifb) == FALSE - && bcmp(&p->po_saved_addr, IF_LLADDR(ifp), - ETHER_ADDR_LEN) == 0) { - new_link_address = TRUE; - } - /* check if we need to generate a link status event */ - if (ifb->ifb_mode == IF_BOND_MODE_LACP ) { - if (ifbond_selection(ifb) || active_lag) { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - ifb->ifb_last_link_event = event_code; - } - bondport_transmit_machine(p, LAEventStart, - TRANSMIT_MACHINE_TX_IMMEDIATE); - } - else { - /* are we removing the last distributing interface? */ - if (was_distributing && ifb->ifb_distributing_count == 0) { - ifb->ifb_last_link_event = event_code = KEV_DL_LINK_OFF; - } - } - - bond_unlock(); - - if (last) { - ifnet_set_lladdr_and_type(ifp, NULL, 0, IFT_IEEE8023ADLAG); - } - else if (new_link_address) { - struct ifnet * scan_ifp; - bondport_ref scan_port; - - /* ifbond_wait() allows port list traversal without holding the lock */ - - /* this port gave the bond its ethernet address, switch to new one */ - ifnet_set_lladdr_and_type(ifp, - &head_port->po_saved_addr, ETHER_ADDR_LEN, - IFT_ETHER); - - /* re-program each port with the new link address */ - TAILQ_FOREACH(scan_port, &ifb->ifb_port_list, po_port_list) { - scan_ifp = scan_port->po_ifp; - - error = if_siflladdr(scan_ifp, - (const struct ether_addr *) IF_LLADDR(ifp)); - if (error != 0) { - printf("bond_remove_interface(%s, %s): " - "if_siflladdr (%s) failed %d\n", - ifb->ifb_name, bondport_get_name(p), - bondport_get_name(scan_port), error); - } - } - } - - /* restore the port's ethernet address */ - error = if_siflladdr(port_ifp, &p->po_saved_addr); - if (error != 0) { - printf("bond_remove_interface(%s, %s): if_siflladdr failed %d\n", - ifb->ifb_name, bondport_get_name(p), error); - } - - /* restore the port's MTU */ - error = siocsifmtu(port_ifp, p->po_devmtu.ifdm_current); - if (error != 0) { - printf("bond_remove_interface(%s, %s): SIOCSIFMTU %d failed %d\n", - ifb->ifb_name, bondport_get_name(p), - p->po_devmtu.ifdm_current, error); - } - - /* remove the bond "protocol" */ - bond_detach_protocol(port_ifp); - - /* generate link event */ - if (event_code != 0) { - interface_link_event(ifp, event_code); - } - - bond_lock(); - bondport_free(p); - ifnet_set_eflags(port_ifp, 0, IFEF_BOND); - /* release this bondport's reference to the ifbond */ - ifbond_release(ifb); - - signal_done: - ifbond_signal(ifb, "bond_remove_interface"); - ifbond_release(ifb); - return (error); + + /* de-select it and remove it from the lists */ + was_distributing = bondport_flags_distributing(p); + bondport_disable_distributing(p); + if (ifb->ifb_mode == IF_BOND_MODE_LACP) { + bondport_set_selected(p, SelectedState_UNSELECTED); + active_lag = bondport_remove_from_LAG(p); + /* invalidate timers here while holding the bond_lock */ + bondport_invalidate_timers(p); + + /* announce that we're Individual now */ + s = p->po_actor_state; + s = lacp_actor_partner_state_set_individual(s); + s = lacp_actor_partner_state_set_not_collecting(s); + s = lacp_actor_partner_state_set_not_distributing(s); + s = lacp_actor_partner_state_set_out_of_sync(s); + p->po_actor_state = s; + bondport_flags_set_ntt(p); + } + + TAILQ_REMOVE(&ifb->ifb_port_list, p, po_port_list); + ifb->ifb_port_count--; + + ifp = ifb->ifb_ifp; + head_port = TAILQ_FIRST(&ifb->ifb_port_list); + if (head_port == NULL) { + ifnet_set_flags(ifp, 0, IFF_RUNNING); + if (ifbond_flags_lladdr(ifb) == FALSE) { + last = TRUE; + } + ifnet_set_offload(ifp, 0); + ifnet_set_mtu(ifp, ETHERMTU); + ifb->ifb_altmtu = 0; + } else if (ifbond_flags_lladdr(ifb) == FALSE + && bcmp(&p->po_saved_addr, IF_LLADDR(ifp), + ETHER_ADDR_LEN) == 0) { + new_link_address = TRUE; + } + /* check if we need to generate a link status event */ + if (ifb->ifb_mode == IF_BOND_MODE_LACP) { + if (ifbond_selection(ifb) || active_lag) { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + ifb->ifb_last_link_event = event_code; + } + bondport_transmit_machine(p, LAEventStart, + TRANSMIT_MACHINE_TX_IMMEDIATE); + } else { + /* are we removing the last distributing interface? */ + if (was_distributing && ifb->ifb_distributing_count == 0) { + ifb->ifb_last_link_event = event_code = KEV_DL_LINK_OFF; + } + } + + bond_unlock(); + + if (last) { + ifnet_set_lladdr_and_type(ifp, NULL, 0, IFT_IEEE8023ADLAG); + } else if (new_link_address) { + struct ifnet * scan_ifp; + bondport_ref scan_port; + + /* ifbond_wait() allows port list traversal without holding the lock */ + + /* this port gave the bond its ethernet address, switch to new one */ + ifnet_set_lladdr_and_type(ifp, + &head_port->po_saved_addr, ETHER_ADDR_LEN, + IFT_ETHER); + + /* re-program each port with the new link address */ + TAILQ_FOREACH(scan_port, &ifb->ifb_port_list, po_port_list) { + scan_ifp = scan_port->po_ifp; + + error = if_siflladdr(scan_ifp, + (const struct ether_addr *) IF_LLADDR(ifp)); + if (error != 0) { + printf("bond_remove_interface(%s, %s): " + "if_siflladdr (%s) failed %d\n", + ifb->ifb_name, bondport_get_name(p), + bondport_get_name(scan_port), error); + } + } + } + + /* restore the port's ethernet address */ + error = if_siflladdr(port_ifp, &p->po_saved_addr); + if (error != 0) { + printf("bond_remove_interface(%s, %s): if_siflladdr failed %d\n", + ifb->ifb_name, bondport_get_name(p), error); + } + + /* restore the port's MTU */ + error = siocsifmtu(port_ifp, p->po_devmtu.ifdm_current); + if (error != 0) { + printf("bond_remove_interface(%s, %s): SIOCSIFMTU %d failed %d\n", + ifb->ifb_name, bondport_get_name(p), + p->po_devmtu.ifdm_current, error); + } + + /* remove the bond "protocol" */ + bond_detach_protocol(port_ifp); + + /* generate link event */ + if (event_code != 0) { + interface_link_event(ifp, event_code); + } + + bond_lock(); + bondport_free(p); + ifnet_set_eflags(port_ifp, 0, IFEF_BOND); + /* release this bondport's reference to the ifbond */ + ifbond_release(ifb); + +signal_done: + ifbond_signal(ifb, "bond_remove_interface"); + ifbond_release(ifb); + return error; } static void bond_set_lacp_mode(ifbond_ref ifb) { - bondport_ref p; + bondport_ref p; - TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { - bondport_disable_distributing(p); - bondport_start(p); - } - return; + TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { + bondport_disable_distributing(p); + bondport_start(p); + } + return; } static void bond_set_static_mode(ifbond_ref ifb) { - bondport_ref p; - lacp_actor_partner_state s; + bondport_ref p; + lacp_actor_partner_state s; - TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { - bondport_disable_distributing(p); - bondport_set_selected(p, SelectedState_UNSELECTED); - (void)bondport_remove_from_LAG(p); - bondport_cancel_timers(p); - - /* announce that we're Individual now */ - s = p->po_actor_state; - s = lacp_actor_partner_state_set_individual(s); - s = lacp_actor_partner_state_set_not_collecting(s); - s = lacp_actor_partner_state_set_not_distributing(s); - s = lacp_actor_partner_state_set_out_of_sync(s); - p->po_actor_state = s; - bondport_flags_set_ntt(p); - bondport_transmit_machine(p, LAEventStart, - TRANSMIT_MACHINE_TX_IMMEDIATE); - /* clear state */ - p->po_actor_state = 0; - bzero(&p->po_partner_state, sizeof(p->po_partner_state)); - - if (media_active(&p->po_media_info)) { - bondport_enable_distributing(p); - } - else { - bondport_disable_distributing(p); + TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { + bondport_disable_distributing(p); + bondport_set_selected(p, SelectedState_UNSELECTED); + (void)bondport_remove_from_LAG(p); + bondport_cancel_timers(p); + + /* announce that we're Individual now */ + s = p->po_actor_state; + s = lacp_actor_partner_state_set_individual(s); + s = lacp_actor_partner_state_set_not_collecting(s); + s = lacp_actor_partner_state_set_not_distributing(s); + s = lacp_actor_partner_state_set_out_of_sync(s); + p->po_actor_state = s; + bondport_flags_set_ntt(p); + bondport_transmit_machine(p, LAEventStart, + TRANSMIT_MACHINE_TX_IMMEDIATE); + /* clear state */ + p->po_actor_state = 0; + bzero(&p->po_partner_state, sizeof(p->po_partner_state)); + + if (media_active(&p->po_media_info)) { + bondport_enable_distributing(p); + } else { + bondport_disable_distributing(p); + } } - } - return; + return; } static int bond_set_mode(struct ifnet * ifp, int mode) { - int error = 0; - int event_code = 0; - ifbond_ref ifb; + int error = 0; + int event_code = 0; + ifbond_ref ifb; - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - bond_unlock(); - return ((ifb == NULL) ? EOPNOTSUPP : EBUSY); - } - if (ifb->ifb_mode == mode) { - bond_unlock(); - return (0); - } - - ifbond_retain(ifb); - ifbond_wait(ifb, "bond_set_mode"); - - /* verify (again) that the mode is actually different */ - if (ifb->ifb_mode == mode) { - /* nothing to do */ - goto signal_done; - } - - ifb->ifb_mode = mode; - if (mode == IF_BOND_MODE_LACP) { - bond_set_lacp_mode(ifb); - - /* check if we need to generate a link status event */ - if (ifbond_selection(ifb)) { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + return (ifb == NULL) ? EOPNOTSUPP : EBUSY; + } + if (ifb->ifb_mode == mode) { + bond_unlock(); + return 0; } - } else { - bond_set_static_mode(ifb); - event_code = (ifb->ifb_distributing_count == 0) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - } - ifb->ifb_last_link_event = event_code; - signal_done: - ifbond_signal(ifb, "bond_set_mode"); - bond_unlock(); - ifbond_release(ifb); + ifbond_retain(ifb); + ifbond_wait(ifb, "bond_set_mode"); + + /* verify (again) that the mode is actually different */ + if (ifb->ifb_mode == mode) { + /* nothing to do */ + goto signal_done; + } + + ifb->ifb_mode = mode; + if (mode == IF_BOND_MODE_LACP) { + bond_set_lacp_mode(ifb); + + /* check if we need to generate a link status event */ + if (ifbond_selection(ifb)) { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + } + } else { + bond_set_static_mode(ifb); + event_code = (ifb->ifb_distributing_count == 0) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + } + ifb->ifb_last_link_event = event_code; - if (event_code != 0) { - interface_link_event(ifp, event_code); - } - return (error); +signal_done: + ifbond_signal(ifb, "bond_set_mode"); + bond_unlock(); + ifbond_release(ifb); + + if (event_code != 0) { + interface_link_event(ifp, event_code); + } + return error; } static int bond_get_status(ifbond_ref ifb, struct if_bond_req * ibr_p, user_addr_t datap) { - int count; - user_addr_t dst; - int error = 0; - struct if_bond_status_req * ibsr; - struct if_bond_status ibs; - bondport_ref port; - - ibsr = &(ibr_p->ibr_ibru.ibru_status); - if (ibsr->ibsr_version != IF_BOND_STATUS_REQ_VERSION) { - return (EINVAL); - } - ibsr->ibsr_key = ifb->ifb_key; - ibsr->ibsr_mode = ifb->ifb_mode; - ibsr->ibsr_total = ifb->ifb_port_count; - dst = proc_is64bit(current_proc()) - ? ibsr->ibsr_ibsru.ibsru_buffer64 - : CAST_USER_ADDR_T(ibsr->ibsr_ibsru.ibsru_buffer); - if (dst == USER_ADDR_NULL) { - /* just want to know how many there are */ - goto done; - } - if (ibsr->ibsr_count < 0) { - return (EINVAL); - } - count = (ifb->ifb_port_count < ibsr->ibsr_count) - ? ifb->ifb_port_count : ibsr->ibsr_count; - TAILQ_FOREACH(port, &ifb->ifb_port_list, po_port_list) { - struct if_bond_partner_state * ibps_p; - partner_state_ref ps; - - if (count == 0) { - break; - } - bzero(&ibs, sizeof(ibs)); - strlcpy(ibs.ibs_if_name, port->po_name, sizeof(ibs.ibs_if_name)); - ibs.ibs_port_priority = port->po_priority; - if (ifb->ifb_mode == IF_BOND_MODE_LACP) { - ibs.ibs_state = port->po_actor_state; - ibs.ibs_selected_state = port->po_selected; - ps = &port->po_partner_state; - ibps_p = &ibs.ibs_partner_state; - ibps_p->ibps_system = ps->ps_lag_info.li_system; - ibps_p->ibps_system_priority = ps->ps_lag_info.li_system_priority; - ibps_p->ibps_key = ps->ps_lag_info.li_key; - ibps_p->ibps_port = ps->ps_port; - ibps_p->ibps_port_priority = ps->ps_port_priority; - ibps_p->ibps_state = ps->ps_state; - } - else { - /* fake the selected information */ - ibs.ibs_selected_state = bondport_flags_distributing(port) - ? SelectedState_SELECTED : SelectedState_UNSELECTED; - } - error = copyout(&ibs, dst, sizeof(ibs)); - if (error != 0) { - break; + int count; + user_addr_t dst; + int error = 0; + struct if_bond_status_req * ibsr; + struct if_bond_status ibs; + bondport_ref port; + + ibsr = &(ibr_p->ibr_ibru.ibru_status); + if (ibsr->ibsr_version != IF_BOND_STATUS_REQ_VERSION) { + return EINVAL; + } + ibsr->ibsr_key = ifb->ifb_key; + ibsr->ibsr_mode = ifb->ifb_mode; + ibsr->ibsr_total = ifb->ifb_port_count; + dst = proc_is64bit(current_proc()) + ? ibsr->ibsr_ibsru.ibsru_buffer64 + : CAST_USER_ADDR_T(ibsr->ibsr_ibsru.ibsru_buffer); + if (dst == USER_ADDR_NULL) { + /* just want to know how many there are */ + goto done; + } + if (ibsr->ibsr_count < 0) { + return EINVAL; + } + count = (ifb->ifb_port_count < ibsr->ibsr_count) + ? ifb->ifb_port_count : ibsr->ibsr_count; + TAILQ_FOREACH(port, &ifb->ifb_port_list, po_port_list) { + struct if_bond_partner_state * ibps_p; + partner_state_ref ps; + + if (count == 0) { + break; + } + bzero(&ibs, sizeof(ibs)); + strlcpy(ibs.ibs_if_name, port->po_name, sizeof(ibs.ibs_if_name)); + ibs.ibs_port_priority = port->po_priority; + if (ifb->ifb_mode == IF_BOND_MODE_LACP) { + ibs.ibs_state = port->po_actor_state; + ibs.ibs_selected_state = port->po_selected; + ps = &port->po_partner_state; + ibps_p = &ibs.ibs_partner_state; + ibps_p->ibps_system = ps->ps_lag_info.li_system; + ibps_p->ibps_system_priority = ps->ps_lag_info.li_system_priority; + ibps_p->ibps_key = ps->ps_lag_info.li_key; + ibps_p->ibps_port = ps->ps_port; + ibps_p->ibps_port_priority = ps->ps_port_priority; + ibps_p->ibps_state = ps->ps_state; + } else { + /* fake the selected information */ + ibs.ibs_selected_state = bondport_flags_distributing(port) + ? SelectedState_SELECTED : SelectedState_UNSELECTED; + } + error = copyout(&ibs, dst, sizeof(ibs)); + if (error != 0) { + break; + } + dst += sizeof(ibs); + count--; } - dst += sizeof(ibs); - count--; - } - done: - if (error == 0) { - error = copyout(ibr_p, datap, sizeof(*ibr_p)); - } - else { - (void)copyout(ibr_p, datap, sizeof(*ibr_p)); - } - return (error); +done: + if (error == 0) { + error = copyout(ibr_p, datap, sizeof(*ibr_p)); + } else { + (void)copyout(ibr_p, datap, sizeof(*ibr_p)); + } + return error; } static int bond_set_promisc(__unused struct ifnet *ifp) { - int error = 0; - return (error); + int error = 0; + return error; } static void bond_get_mtu_values(ifbond_ref ifb, int * ret_min, int * ret_max) { - int mtu_min = 0; - int mtu_max = 0; - bondport_ref p; + int mtu_min = 0; + int mtu_max = 0; + bondport_ref p; - if (TAILQ_FIRST(&ifb->ifb_port_list) != NULL) { - mtu_min = IF_MINMTU; - } - TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { - struct ifdevmtu * devmtu_p = &p->po_devmtu; - - if (devmtu_p->ifdm_min > mtu_min) { - mtu_min = devmtu_p->ifdm_min; + if (TAILQ_FIRST(&ifb->ifb_port_list) != NULL) { + mtu_min = IF_MINMTU; } - if (mtu_max == 0 || devmtu_p->ifdm_max < mtu_max) { - mtu_max = devmtu_p->ifdm_max; + TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { + struct ifdevmtu * devmtu_p = &p->po_devmtu; + + if (devmtu_p->ifdm_min > mtu_min) { + mtu_min = devmtu_p->ifdm_min; + } + if (mtu_max == 0 || devmtu_p->ifdm_max < mtu_max) { + mtu_max = devmtu_p->ifdm_max; + } } - } - *ret_min = mtu_min; - *ret_max = mtu_max; - return; + *ret_min = mtu_min; + *ret_max = mtu_max; + return; } static int bond_set_mtu_on_ports(ifbond_ref ifb, int mtu) { - int error = 0; - bondport_ref p; + int error = 0; + bondport_ref p; - TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { - error = siocsifmtu(p->po_ifp, mtu); - if (error != 0) { - printf("if_bond(%s): SIOCSIFMTU %s failed, %d\n", - ifb->ifb_name, bondport_get_name(p), error); - break; + TAILQ_FOREACH(p, &ifb->ifb_port_list, po_port_list) { + error = siocsifmtu(p->po_ifp, mtu); + if (error != 0) { + printf("if_bond(%s): SIOCSIFMTU %s failed, %d\n", + ifb->ifb_name, bondport_get_name(p), error); + break; + } } - } - return (error); + return error; } static int bond_set_mtu(struct ifnet * ifp, int mtu, int isdevmtu) { - int error = 0; - ifbond_ref ifb; - int mtu_min; - int mtu_max; - int new_max; - int old_max; - - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - error = (ifb == NULL) ? EOPNOTSUPP : EBUSY; - goto done; - } - ifbond_retain(ifb); - ifbond_wait(ifb, "bond_set_mtu"); - - /* check again */ - if (ifnet_softc(ifp) == NULL || ifbond_flags_if_detaching(ifb)) { - error = EBUSY; - goto signal_done; - } - bond_get_mtu_values(ifb, &mtu_min, &mtu_max); - if (mtu > mtu_max) { - error = EINVAL; - goto signal_done; - } - if (mtu < mtu_min && (isdevmtu == 0 || mtu != 0)) { - /* allow SIOCSIFALTMTU to set the mtu to 0 */ - error = EINVAL; - goto signal_done; - } - if (isdevmtu) { - new_max = (mtu > (int)ifnet_mtu(ifp)) ? mtu : (int)ifnet_mtu(ifp); - } - else { - new_max = (mtu > ifb->ifb_altmtu) ? mtu : ifb->ifb_altmtu; - } - old_max = ((int)ifnet_mtu(ifp) > ifb->ifb_altmtu) - ? (int)ifnet_mtu(ifp) : ifb->ifb_altmtu; - if (new_max != old_max) { - /* we can safely walk the list of port without the lock held */ - bond_unlock(); - error = bond_set_mtu_on_ports(ifb, new_max); - if (error != 0) { - /* try our best to back out of it */ - (void)bond_set_mtu_on_ports(ifb, old_max); - } + int error = 0; + ifbond_ref ifb; + int mtu_min; + int mtu_max; + int new_max; + int old_max; + bond_lock(); - } - if (error == 0) { + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + error = (ifb == NULL) ? EOPNOTSUPP : EBUSY; + goto done; + } + ifbond_retain(ifb); + ifbond_wait(ifb, "bond_set_mtu"); + + /* check again */ + if (ifnet_softc(ifp) == NULL || ifbond_flags_if_detaching(ifb)) { + error = EBUSY; + goto signal_done; + } + bond_get_mtu_values(ifb, &mtu_min, &mtu_max); + if (mtu > mtu_max) { + error = EINVAL; + goto signal_done; + } + if (mtu < mtu_min && (isdevmtu == 0 || mtu != 0)) { + /* allow SIOCSIFALTMTU to set the mtu to 0 */ + error = EINVAL; + goto signal_done; + } if (isdevmtu) { - ifb->ifb_altmtu = mtu; + new_max = (mtu > (int)ifnet_mtu(ifp)) ? mtu : (int)ifnet_mtu(ifp); + } else { + new_max = (mtu > ifb->ifb_altmtu) ? mtu : ifb->ifb_altmtu; } - else { - ifnet_set_mtu(ifp, mtu); + old_max = ((int)ifnet_mtu(ifp) > ifb->ifb_altmtu) + ? (int)ifnet_mtu(ifp) : ifb->ifb_altmtu; + if (new_max != old_max) { + /* we can safely walk the list of port without the lock held */ + bond_unlock(); + error = bond_set_mtu_on_ports(ifb, new_max); + if (error != 0) { + /* try our best to back out of it */ + (void)bond_set_mtu_on_ports(ifb, old_max); + } + bond_lock(); } - } + if (error == 0) { + if (isdevmtu) { + ifb->ifb_altmtu = mtu; + } else { + ifnet_set_mtu(ifp, mtu); + } + } + +signal_done: + ifbond_signal(ifb, "bond_set_mtu"); + ifbond_release(ifb); - signal_done: - ifbond_signal(ifb, "bond_set_mtu"); - ifbond_release(ifb); - - done: - bond_unlock(); - return (error); +done: + bond_unlock(); + return error; } static int bond_ioctl(struct ifnet *ifp, u_long cmd, void * data) { - int error = 0; - struct if_bond_req ibr; - struct ifaddr * ifa; - ifbond_ref ifb; - struct ifreq * ifr; - struct ifmediareq *ifmr; - struct ifnet * port_ifp = NULL; - user_addr_t user_addr; - - if (ifnet_type(ifp) != IFT_IEEE8023ADLAG) { - return (EOPNOTSUPP); - } - ifr = (struct ifreq *)data; - ifa = (struct ifaddr *)data; - - switch (cmd) { - case SIOCSIFADDR: - ifnet_set_flags(ifp, IFF_UP, IFF_UP); - break; - - case SIOCGIFMEDIA32: - case SIOCGIFMEDIA64: - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - bond_unlock(); - return (ifb == NULL ? EOPNOTSUPP : EBUSY); - } - ifmr = (struct ifmediareq *)data; - ifmr->ifm_current = IFM_ETHER; - ifmr->ifm_mask = 0; - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_active = IFM_ETHER; - ifmr->ifm_count = 1; - if (ifb->ifb_mode == IF_BOND_MODE_LACP) { - if (ifb->ifb_active_lag != NULL) { - ifmr->ifm_active = ifb->ifb_active_lag->lag_active_media; - ifmr->ifm_status |= IFM_ACTIVE; - } - } - else if (ifb->ifb_distributing_count > 0) { - ifmr->ifm_active - = ifb->ifb_distributing_array[0]->po_media_info.mi_active; - ifmr->ifm_status |= IFM_ACTIVE; - } - bond_unlock(); - user_addr = (cmd == SIOCGIFMEDIA64) ? - ((struct ifmediareq64 *)ifmr)->ifmu_ulist : - CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); - if (user_addr != USER_ADDR_NULL) { - error = copyout(&ifmr->ifm_current, - user_addr, - sizeof(int)); + int error = 0; + struct if_bond_req ibr; + struct ifaddr * ifa; + ifbond_ref ifb; + struct ifreq * ifr; + struct ifmediareq *ifmr; + struct ifnet * port_ifp = NULL; + user_addr_t user_addr; + + if (ifnet_type(ifp) != IFT_IEEE8023ADLAG) { + return EOPNOTSUPP; } - break; + ifr = (struct ifreq *)data; + ifa = (struct ifaddr *)data; - case SIOCSIFMEDIA: - /* XXX send the SIFMEDIA to all children? Or force autoselect? */ - error = EINVAL; - break; + switch (cmd) { + case SIOCSIFADDR: + ifnet_set_flags(ifp, IFF_UP, IFF_UP); + break; - case SIOCGIFDEVMTU: - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - bond_unlock(); - error = (ifb == NULL) ? EOPNOTSUPP : EBUSY; - break; - } - ifr->ifr_devmtu.ifdm_current = bond_device_mtu(ifp, ifb); - bond_get_mtu_values(ifb, &ifr->ifr_devmtu.ifdm_min, - &ifr->ifr_devmtu.ifdm_max); - bond_unlock(); - break; + case SIOCGIFMEDIA32: + case SIOCGIFMEDIA64: + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + return ifb == NULL ? EOPNOTSUPP : EBUSY; + } + ifmr = (struct ifmediareq *)data; + ifmr->ifm_current = IFM_ETHER; + ifmr->ifm_mask = 0; + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + ifmr->ifm_count = 1; + if (ifb->ifb_mode == IF_BOND_MODE_LACP) { + if (ifb->ifb_active_lag != NULL) { + ifmr->ifm_active = ifb->ifb_active_lag->lag_active_media; + ifmr->ifm_status |= IFM_ACTIVE; + } + } else if (ifb->ifb_distributing_count > 0) { + ifmr->ifm_active + = ifb->ifb_distributing_array[0]->po_media_info.mi_active; + ifmr->ifm_status |= IFM_ACTIVE; + } + bond_unlock(); + user_addr = (cmd == SIOCGIFMEDIA64) ? + ((struct ifmediareq64 *)ifmr)->ifmu_ulist : + CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); + if (user_addr != USER_ADDR_NULL) { + error = copyout(&ifmr->ifm_current, + user_addr, + sizeof(int)); + } + break; - case SIOCGIFALTMTU: - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - bond_unlock(); - error = (ifb == NULL) ? EOPNOTSUPP : EBUSY; - break; - } - ifr->ifr_mtu = ifb->ifb_altmtu; - bond_unlock(); - break; + case SIOCSIFMEDIA: + /* XXX send the SIFMEDIA to all children? Or force autoselect? */ + error = EINVAL; + break; - case SIOCSIFALTMTU: - error = bond_set_mtu(ifp, ifr->ifr_mtu, 1); - break; + case SIOCGIFDEVMTU: + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + error = (ifb == NULL) ? EOPNOTSUPP : EBUSY; + break; + } + ifr->ifr_devmtu.ifdm_current = bond_device_mtu(ifp, ifb); + bond_get_mtu_values(ifb, &ifr->ifr_devmtu.ifdm_min, + &ifr->ifr_devmtu.ifdm_max); + bond_unlock(); + break; - case SIOCSIFMTU: - error = bond_set_mtu(ifp, ifr->ifr_mtu, 0); - break; + case SIOCGIFALTMTU: + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + error = (ifb == NULL) ? EOPNOTSUPP : EBUSY; + break; + } + ifr->ifr_mtu = ifb->ifb_altmtu; + bond_unlock(); + break; - case SIOCSIFBOND: - user_addr = proc_is64bit(current_proc()) - ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); - error = copyin(user_addr, &ibr, sizeof(ibr)); - if (error) { - break; - } - switch (ibr.ibr_op) { - case IF_BOND_OP_ADD_INTERFACE: - case IF_BOND_OP_REMOVE_INTERFACE: - port_ifp = ifunit(ibr.ibr_ibru.ibru_if_name); - if (port_ifp == NULL) { - error = ENXIO; + case SIOCSIFALTMTU: + error = bond_set_mtu(ifp, ifr->ifr_mtu, 1); break; - } - if (ifnet_type(port_ifp) != IFT_ETHER) { - error = EPROTONOSUPPORT; + + case SIOCSIFMTU: + error = bond_set_mtu(ifp, ifr->ifr_mtu, 0); break; - } - break; - case IF_BOND_OP_SET_VERBOSE: - case IF_BOND_OP_SET_MODE: - break; - default: - error = EOPNOTSUPP; - break; - } - if (error != 0) { - break; - } - switch (ibr.ibr_op) { - case IF_BOND_OP_ADD_INTERFACE: - error = bond_add_interface(ifp, port_ifp); - break; - case IF_BOND_OP_REMOVE_INTERFACE: - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - bond_unlock(); - return (ifb == NULL ? EOPNOTSUPP : EBUSY); - } - error = bond_remove_interface(ifb, port_ifp); - bond_unlock(); - break; - case IF_BOND_OP_SET_VERBOSE: - bond_lock(); - if (g_bond == NULL) { + + case SIOCSIFBOND: + user_addr = proc_is64bit(current_proc()) + ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); + error = copyin(user_addr, &ibr, sizeof(ibr)); + if (error) { + break; + } + switch (ibr.ibr_op) { + case IF_BOND_OP_ADD_INTERFACE: + case IF_BOND_OP_REMOVE_INTERFACE: + port_ifp = ifunit(ibr.ibr_ibru.ibru_if_name); + if (port_ifp == NULL) { + error = ENXIO; + break; + } + if (ifnet_type(port_ifp) != IFT_ETHER) { + error = EPROTONOSUPPORT; + break; + } + break; + case IF_BOND_OP_SET_VERBOSE: + case IF_BOND_OP_SET_MODE: + break; + default: + error = EOPNOTSUPP; + break; + } + if (error != 0) { + break; + } + switch (ibr.ibr_op) { + case IF_BOND_OP_ADD_INTERFACE: + error = bond_add_interface(ifp, port_ifp); + break; + case IF_BOND_OP_REMOVE_INTERFACE: + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + return ifb == NULL ? EOPNOTSUPP : EBUSY; + } + error = bond_remove_interface(ifb, port_ifp); + bond_unlock(); + break; + case IF_BOND_OP_SET_VERBOSE: + bond_lock(); + if (g_bond == NULL) { + bond_unlock(); + error = ENXIO; + break; + } + g_bond->verbose = ibr.ibr_ibru.ibru_int_val; + bond_unlock(); + break; + case IF_BOND_OP_SET_MODE: + switch (ibr.ibr_ibru.ibru_int_val) { + case IF_BOND_MODE_LACP: + case IF_BOND_MODE_STATIC: + break; + default: + error = EINVAL; + break; + } + if (error != 0) { + break; + } + error = bond_set_mode(ifp, ibr.ibr_ibru.ibru_int_val); + break; + } + break; /* SIOCSIFBOND */ + + case SIOCGIFBOND: + user_addr = proc_is64bit(current_proc()) + ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); + error = copyin(user_addr, &ibr, sizeof(ibr)); + if (error) { + break; + } + switch (ibr.ibr_op) { + case IF_BOND_OP_GET_STATUS: + break; + default: + error = EOPNOTSUPP; + break; + } + if (error != 0) { + break; + } + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { + bond_unlock(); + return ifb == NULL ? EOPNOTSUPP : EBUSY; + } + switch (ibr.ibr_op) { + case IF_BOND_OP_GET_STATUS: + error = bond_get_status(ifb, &ibr, user_addr); + break; + } bond_unlock(); - error = ENXIO; - break; - } - g_bond->verbose = ibr.ibr_ibru.ibru_int_val; - bond_unlock(); - break; - case IF_BOND_OP_SET_MODE: - switch (ibr.ibr_ibru.ibru_int_val) { - case IF_BOND_MODE_LACP: - case IF_BOND_MODE_STATIC: + break; /* SIOCGIFBOND */ + + case SIOCSIFLLADDR: + error = EOPNOTSUPP; break; - default: - error = EINVAL; + + case SIOCSIFFLAGS: + /* enable/disable promiscuous mode */ + bond_lock(); + error = bond_set_promisc(ifp); + bond_unlock(); break; - } - if (error != 0) { + + case SIOCADDMULTI: + case SIOCDELMULTI: + error = bond_setmulti(ifp); break; - } - error = bond_set_mode(ifp, ibr.ibr_ibru.ibru_int_val); - break; - } - break; /* SIOCSIFBOND */ - - case SIOCGIFBOND: - user_addr = proc_is64bit(current_proc()) - ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); - error = copyin(user_addr, &ibr, sizeof(ibr)); - if (error) { - break; - } - switch (ibr.ibr_op) { - case IF_BOND_OP_GET_STATUS: - break; default: - error = EOPNOTSUPP; - break; - } - if (error != 0) { - break; + error = EOPNOTSUPP; } - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL || ifbond_flags_if_detaching(ifb)) { - bond_unlock(); - return (ifb == NULL ? EOPNOTSUPP : EBUSY); - } - switch (ibr.ibr_op) { - case IF_BOND_OP_GET_STATUS: - error = bond_get_status(ifb, &ibr, user_addr); - break; - } - bond_unlock(); - break; /* SIOCGIFBOND */ - - case SIOCSIFLLADDR: - error = EOPNOTSUPP; - break; - - case SIOCSIFFLAGS: - /* enable/disable promiscuous mode */ - bond_lock(); - error = bond_set_promisc(ifp); - bond_unlock(); - break; - - case SIOCADDMULTI: - case SIOCDELMULTI: - error = bond_setmulti(ifp); - break; - default: - error = EOPNOTSUPP; - } - return error; + return error; } -static void +static void bond_if_free(struct ifnet * ifp) { - ifbond_ref ifb; + ifbond_ref ifb; - if (ifp == NULL) { - return; - } - bond_lock(); - ifb = (ifbond_ref)ifnet_softc(ifp); - if (ifb == NULL) { + if (ifp == NULL) { + return; + } + bond_lock(); + ifb = (ifbond_ref)ifnet_softc(ifp); + if (ifb == NULL) { + bond_unlock(); + return; + } + ifbond_release(ifb); bond_unlock(); + ifnet_release(ifp); return; - } - ifbond_release(ifb); - bond_unlock(); - ifnet_release(ifp); - return; } static void bond_handle_event(struct ifnet * port_ifp, int event_code) { - struct ifnet * bond_ifp = NULL; - ifbond_ref ifb; - int old_distributing_count; - bondport_ref p; - struct media_info media_info = { 0, 0}; - - switch (event_code) { - case KEV_DL_IF_DETACHED: - break; - case KEV_DL_LINK_OFF: - case KEV_DL_LINK_ON: - media_info = interface_media_info(port_ifp); - break; - default: - return; - } - bond_lock(); - p = bond_lookup_port(port_ifp); - if (p == NULL) { - bond_unlock(); - return; - } - ifb = p->po_bond; - old_distributing_count = ifb->ifb_distributing_count; - switch (event_code) { - case KEV_DL_IF_DETACHED: - bond_remove_interface(ifb, p->po_ifp); - break; - case KEV_DL_LINK_OFF: - case KEV_DL_LINK_ON: - p->po_media_info = media_info; - if (p->po_enabled) { - bondport_link_status_changed(p); - } - break; - } - /* generate a link-event */ - if (ifb->ifb_mode == IF_BOND_MODE_LACP) { - if (ifbond_selection(ifb)) { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - /* XXX need to take a reference on bond_ifp */ - bond_ifp = ifb->ifb_ifp; - ifb->ifb_last_link_event = event_code; - } - else { - event_code = (ifb->ifb_active_lag == NULL) - ? KEV_DL_LINK_OFF - : KEV_DL_LINK_ON; - if (event_code != ifb->ifb_last_link_event) { - if (g_bond->verbose) { - timestamp_printf("%s: (event) generating LINK event\n", - ifb->ifb_name); - } - bond_ifp = ifb->ifb_ifp; - ifb->ifb_last_link_event = event_code; - } + struct ifnet * bond_ifp = NULL; + ifbond_ref ifb; + int old_distributing_count; + bondport_ref p; + struct media_info media_info = { 0, 0}; + + switch (event_code) { + case KEV_DL_IF_DETACHED: + break; + case KEV_DL_LINK_OFF: + case KEV_DL_LINK_ON: + media_info = interface_media_info(port_ifp); + break; + default: + return; } - } - else { - /* - * if the distributing array membership changed from 0 <-> !0 - * generate a link event - */ - if (old_distributing_count == 0 - && ifb->ifb_distributing_count != 0) { - event_code = KEV_DL_LINK_ON; + bond_lock(); + p = bond_lookup_port(port_ifp); + if (p == NULL) { + bond_unlock(); + return; } - else if (old_distributing_count != 0 - && ifb->ifb_distributing_count == 0) { - event_code = KEV_DL_LINK_OFF; + ifb = p->po_bond; + old_distributing_count = ifb->ifb_distributing_count; + switch (event_code) { + case KEV_DL_IF_DETACHED: + bond_remove_interface(ifb, p->po_ifp); + break; + case KEV_DL_LINK_OFF: + case KEV_DL_LINK_ON: + p->po_media_info = media_info; + if (p->po_enabled) { + bondport_link_status_changed(p); + } + break; } - if (event_code != 0 && event_code != ifb->ifb_last_link_event) { - bond_ifp = ifb->ifb_ifp; - ifb->ifb_last_link_event = event_code; + /* generate a link-event */ + if (ifb->ifb_mode == IF_BOND_MODE_LACP) { + if (ifbond_selection(ifb)) { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + /* XXX need to take a reference on bond_ifp */ + bond_ifp = ifb->ifb_ifp; + ifb->ifb_last_link_event = event_code; + } else { + event_code = (ifb->ifb_active_lag == NULL) + ? KEV_DL_LINK_OFF + : KEV_DL_LINK_ON; + if (event_code != ifb->ifb_last_link_event) { + if (g_bond->verbose) { + timestamp_printf("%s: (event) generating LINK event\n", + ifb->ifb_name); + } + bond_ifp = ifb->ifb_ifp; + ifb->ifb_last_link_event = event_code; + } + } + } else { + /* + * if the distributing array membership changed from 0 <-> !0 + * generate a link event + */ + if (old_distributing_count == 0 + && ifb->ifb_distributing_count != 0) { + event_code = KEV_DL_LINK_ON; + } else if (old_distributing_count != 0 + && ifb->ifb_distributing_count == 0) { + event_code = KEV_DL_LINK_OFF; + } + if (event_code != 0 && event_code != ifb->ifb_last_link_event) { + bond_ifp = ifb->ifb_ifp; + ifb->ifb_last_link_event = event_code; + } } - } - bond_unlock(); - if (bond_ifp != NULL) { - interface_link_event(bond_ifp, event_code); - } - return; + bond_unlock(); + if (bond_ifp != NULL) { + interface_link_event(bond_ifp, event_code); + } + return; } static void bond_event(struct ifnet * port_ifp, __unused protocol_family_t protocol, - const struct kev_msg * event) + const struct kev_msg * event) { - int event_code; + int event_code; - if (event->vendor_code != KEV_VENDOR_APPLE - || event->kev_class != KEV_NETWORK_CLASS - || event->kev_subclass != KEV_DL_SUBCLASS) { + if (event->vendor_code != KEV_VENDOR_APPLE + || event->kev_class != KEV_NETWORK_CLASS + || event->kev_subclass != KEV_DL_SUBCLASS) { + return; + } + event_code = event->event_code; + switch (event_code) { + case KEV_DL_LINK_OFF: + case KEV_DL_LINK_ON: + /* we only care about link status changes */ + bond_handle_event(port_ifp, event_code); + break; + default: + break; + } return; - } - event_code = event->event_code; - switch (event_code) { - case KEV_DL_LINK_OFF: - case KEV_DL_LINK_ON: - /* we only care about link status changes */ - bond_handle_event(port_ifp, event_code); - break; - default: - break; - } - return; } static errno_t bond_detached(ifnet_t port_ifp, __unused protocol_family_t protocol) { - bond_handle_event(port_ifp, KEV_DL_IF_DETACHED); - return (0); + bond_handle_event(port_ifp, KEV_DL_IF_DETACHED); + return 0; } static void interface_link_event(struct ifnet * ifp, u_int32_t event_code) { - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t) ifnet_unit(ifp); - strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - ifnet_event(ifp, &event.header); - return; + struct { + struct kern_event_msg header; + u_int32_t unit; + char if_name[IFNAMSIZ]; + } event; + + bzero(&event, sizeof(event)); + event.header.total_size = sizeof(event); + event.header.vendor_code = KEV_VENDOR_APPLE; + event.header.kev_class = KEV_NETWORK_CLASS; + event.header.kev_subclass = KEV_DL_SUBCLASS; + event.header.event_code = event_code; + event.header.event_data[0] = ifnet_family(ifp); + event.unit = (u_int32_t) ifnet_unit(ifp); + strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); + ifnet_event(ifp, &event.header); + return; } /* @@ -3061,7 +3057,7 @@ interface_link_event(struct ifnet * ifp, u_int32_t event_code) * Purpose: * Attach a DLIL protocol to the interface. * - * The ethernet demux special cases to always return PF_BOND if the + * The ethernet demux special cases to always return PF_BOND if the * interface is bonded. That means we receive all traffic from that * interface without passing any of the traffic to any other attached * protocol. @@ -3069,20 +3065,20 @@ interface_link_event(struct ifnet * ifp, u_int32_t event_code) static int bond_attach_protocol(struct ifnet *ifp) { - int error; - struct ifnet_attach_proto_param reg; - - bzero(®, sizeof(reg)); - reg.input = bond_input; - reg.event = bond_event; - reg.detached = bond_detached; - - error = ifnet_attach_protocol(ifp, PF_BOND, ®); - if (error) { - printf("bond over %s%d: ifnet_attach_protocol failed, %d\n", - ifnet_name(ifp), ifnet_unit(ifp), error); - } - return (error); + int error; + struct ifnet_attach_proto_param reg; + + bzero(®, sizeof(reg)); + reg.input = bond_input; + reg.event = bond_event; + reg.detached = bond_detached; + + error = ifnet_attach_protocol(ifp, PF_BOND, ®); + if (error) { + printf("bond over %s%d: ifnet_attach_protocol failed, %d\n", + ifnet_name(ifp), ifnet_unit(ifp), error); + } + return error; } /* @@ -3093,14 +3089,14 @@ bond_attach_protocol(struct ifnet *ifp) static int bond_detach_protocol(struct ifnet *ifp) { - int error; + int error; - error = ifnet_detach_protocol(ifp, PF_BOND); - if (error) { - printf("bond over %s%d: ifnet_detach_protocol failed, %d\n", - ifnet_name(ifp), ifnet_unit(ifp), error); - } - return (error); + error = ifnet_detach_protocol(ifp, PF_BOND); + if (error) { + printf("bond over %s%d: ifnet_detach_protocol failed, %d\n", + ifnet_name(ifp), ifnet_unit(ifp), error); + } + return error; } /* @@ -3116,1603 +3112,1580 @@ extern void ether_detach_at(ifnet_t ifp, protocol_family_t protocol_family); __private_extern__ int bond_family_init(void) { - int error=0; + int error = 0; - error = proto_register_plumber(PF_INET, APPLE_IF_FAM_BOND, - ether_attach_inet, - ether_detach_inet); - if (error != 0) { - printf("bond: proto_register_plumber failed for AF_INET error=%d\n", - error); - goto done; - } + error = proto_register_plumber(PF_INET, APPLE_IF_FAM_BOND, + ether_attach_inet, + ether_detach_inet); + if (error != 0) { + printf("bond: proto_register_plumber failed for AF_INET error=%d\n", + error); + goto done; + } #if INET6 - error = proto_register_plumber(PF_INET6, APPLE_IF_FAM_BOND, - ether_attach_inet6, - ether_detach_inet6); - if (error != 0) { - printf("bond: proto_register_plumber failed for AF_INET6 error=%d\n", - error); - goto done; - } + error = proto_register_plumber(PF_INET6, APPLE_IF_FAM_BOND, + ether_attach_inet6, + ether_detach_inet6); + if (error != 0) { + printf("bond: proto_register_plumber failed for AF_INET6 error=%d\n", + error); + goto done; + } #endif - error = bond_clone_attach(); - if (error != 0) { - printf("bond: proto_register_plumber failed bond_clone_attach error=%d\n", - error); - goto done; - } + error = bond_clone_attach(); + if (error != 0) { + printf("bond: proto_register_plumber failed bond_clone_attach error=%d\n", + error); + goto done; + } - done: - return (error); +done: + return error; } /** - ** - ** LACP routines: - ** - **/ +** +** LACP routines: +** +**/ /** - ** LACP ifbond_list routines - **/ +** LACP ifbond_list routines +**/ static bondport_ref -ifbond_list_find_moved_port(bondport_ref rx_port, - const lacp_actor_partner_tlv_ref atlv) -{ - ifbond_ref bond; - bondport_ref p; - partner_state_ref ps; - LAG_info_ref ps_li; - - TAILQ_FOREACH(bond, &g_bond->ifbond_list, ifb_bond_list) { - TAILQ_FOREACH(p, &bond->ifb_port_list, po_port_list) { - - if (rx_port == p) { - /* no point in comparing against ourselves */ - continue; - } - if (p->po_receive_state != ReceiveState_PORT_DISABLED) { - /* it's not clear that we should be checking this */ - continue; - } - ps = &p->po_partner_state; - if (lacp_actor_partner_state_defaulted(ps->ps_state)) { - continue; - } - ps_li = &ps->ps_lag_info; - if (ps->ps_port == lacp_actor_partner_tlv_get_port(atlv) - && bcmp(&ps_li->li_system, atlv->lap_system, - sizeof(ps_li->li_system)) == 0) { - if (g_bond->verbose) { - timestamp_printf("System " EA_FORMAT - " Port 0x%x moved from %s to %s\n", - EA_LIST(&ps_li->li_system), ps->ps_port, - bondport_get_name(p), - bondport_get_name(rx_port)); +ifbond_list_find_moved_port(bondport_ref rx_port, + const lacp_actor_partner_tlv_ref atlv) +{ + ifbond_ref bond; + bondport_ref p; + partner_state_ref ps; + LAG_info_ref ps_li; + + TAILQ_FOREACH(bond, &g_bond->ifbond_list, ifb_bond_list) { + TAILQ_FOREACH(p, &bond->ifb_port_list, po_port_list) { + if (rx_port == p) { + /* no point in comparing against ourselves */ + continue; + } + if (p->po_receive_state != ReceiveState_PORT_DISABLED) { + /* it's not clear that we should be checking this */ + continue; + } + ps = &p->po_partner_state; + if (lacp_actor_partner_state_defaulted(ps->ps_state)) { + continue; + } + ps_li = &ps->ps_lag_info; + if (ps->ps_port == lacp_actor_partner_tlv_get_port(atlv) + && bcmp(&ps_li->li_system, atlv->lap_system, + sizeof(ps_li->li_system)) == 0) { + if (g_bond->verbose) { + timestamp_printf("System " EA_FORMAT + " Port 0x%x moved from %s to %s\n", + EA_LIST(&ps_li->li_system), ps->ps_port, + bondport_get_name(p), + bondport_get_name(rx_port)); + } + return p; + } } - return (p); - } } - } - return (NULL); + return NULL; } /** - ** LACP ifbond, LAG routines - **/ +** LACP ifbond, LAG routines +**/ static int ifbond_selection(ifbond_ref bond) { - int all_ports_ready = 0; - int active_media = 0; - LAG_ref lag = NULL; - int lag_changed = 0; - bondport_ref p; - int port_speed = 0; - - lag = ifbond_find_best_LAG(bond, &active_media); - if (lag != bond->ifb_active_lag) { - if (bond->ifb_active_lag != NULL) { - ifbond_deactivate_LAG(bond, bond->ifb_active_lag); - bond->ifb_active_lag = NULL; + int all_ports_ready = 0; + int active_media = 0; + LAG_ref lag = NULL; + int lag_changed = 0; + bondport_ref p; + int port_speed = 0; + + lag = ifbond_find_best_LAG(bond, &active_media); + if (lag != bond->ifb_active_lag) { + if (bond->ifb_active_lag != NULL) { + ifbond_deactivate_LAG(bond, bond->ifb_active_lag); + bond->ifb_active_lag = NULL; + } + bond->ifb_active_lag = lag; + if (lag != NULL) { + ifbond_activate_LAG(bond, lag, active_media); + } + lag_changed = 1; + } else if (lag != NULL) { + if (lag->lag_active_media != active_media) { + if (g_bond->verbose) { + timestamp_printf("LAG PORT SPEED CHANGED from %d to %d\n", + link_speed(lag->lag_active_media), + link_speed(active_media)); + } + ifbond_deactivate_LAG(bond, lag); + ifbond_activate_LAG(bond, lag, active_media); + lag_changed = 1; + } } - bond->ifb_active_lag = lag; if (lag != NULL) { - ifbond_activate_LAG(bond, lag, active_media); - } - lag_changed = 1; - } - else if (lag != NULL) { - if (lag->lag_active_media != active_media) { - if (g_bond->verbose) { - timestamp_printf("LAG PORT SPEED CHANGED from %d to %d\n", - link_speed(lag->lag_active_media), - link_speed(active_media)); - } - ifbond_deactivate_LAG(bond, lag); - ifbond_activate_LAG(bond, lag, active_media); - lag_changed = 1; - } - } - if (lag != NULL) { - port_speed = link_speed(active_media); - all_ports_ready = ifbond_all_ports_ready(bond); - } - TAILQ_FOREACH(p, &bond->ifb_port_list, po_port_list) { - if (lag != NULL && p->po_lag == lag - && media_speed(&p->po_media_info) == port_speed - && (p->po_mux_state == MuxState_DETACHED - || p->po_selected == SelectedState_SELECTED - || p->po_selected == SelectedState_STANDBY) - && bondport_aggregatable(p)) { - if (bond->ifb_max_active > 0) { - if (lag->lag_selected_port_count < bond->ifb_max_active) { - if (p->po_selected == SelectedState_STANDBY - || p->po_selected == SelectedState_UNSELECTED) { - bondport_set_selected(p, SelectedState_SELECTED); - } - } - else if (p->po_selected == SelectedState_UNSELECTED) { - bondport_set_selected(p, SelectedState_STANDBY); - } - } - else { - bondport_set_selected(p, SelectedState_SELECTED); - } - } - if (bondport_flags_selected_changed(p)) { - bondport_flags_clear_selected_changed(p); - bondport_mux_machine(p, LAEventSelectedChange, NULL); - } - if (all_ports_ready - && bondport_flags_ready(p) - && p->po_mux_state == MuxState_WAITING) { - bondport_mux_machine(p, LAEventReady, NULL); + port_speed = link_speed(active_media); + all_ports_ready = ifbond_all_ports_ready(bond); } - bondport_transmit_machine(p, LAEventStart, NULL); - } - return (lag_changed); + TAILQ_FOREACH(p, &bond->ifb_port_list, po_port_list) { + if (lag != NULL && p->po_lag == lag + && media_speed(&p->po_media_info) == port_speed + && (p->po_mux_state == MuxState_DETACHED + || p->po_selected == SelectedState_SELECTED + || p->po_selected == SelectedState_STANDBY) + && bondport_aggregatable(p)) { + if (bond->ifb_max_active > 0) { + if (lag->lag_selected_port_count < bond->ifb_max_active) { + if (p->po_selected == SelectedState_STANDBY + || p->po_selected == SelectedState_UNSELECTED) { + bondport_set_selected(p, SelectedState_SELECTED); + } + } else if (p->po_selected == SelectedState_UNSELECTED) { + bondport_set_selected(p, SelectedState_STANDBY); + } + } else { + bondport_set_selected(p, SelectedState_SELECTED); + } + } + if (bondport_flags_selected_changed(p)) { + bondport_flags_clear_selected_changed(p); + bondport_mux_machine(p, LAEventSelectedChange, NULL); + } + if (all_ports_ready + && bondport_flags_ready(p) + && p->po_mux_state == MuxState_WAITING) { + bondport_mux_machine(p, LAEventReady, NULL); + } + bondport_transmit_machine(p, LAEventStart, NULL); + } + return lag_changed; } static LAG_ref ifbond_find_best_LAG(ifbond_ref bond, int * active_media) { - int best_active = 0; - LAG_ref best_lag = NULL; - int best_count = 0; - int best_speed = 0; - LAG_ref lag; - - if (bond->ifb_active_lag != NULL) { - best_lag = bond->ifb_active_lag; - best_count = LAG_get_aggregatable_port_count(best_lag, &best_active); - if (bond->ifb_max_active > 0 - && best_count > bond->ifb_max_active) { - best_count = bond->ifb_max_active; - } - best_speed = link_speed(best_active); - } - TAILQ_FOREACH(lag, &bond->ifb_lag_list, lag_list) { - int active; - int count; - int speed; - - if (lag == bond->ifb_active_lag) { - /* we've already computed it */ - continue; - } - count = LAG_get_aggregatable_port_count(lag, &active); - if (count == 0) { - continue; - } - if (bond->ifb_max_active > 0 - && count > bond->ifb_max_active) { - /* if there's a limit, don't count extra links */ - count = bond->ifb_max_active; - } - speed = link_speed(active); - if ((count * speed) > (best_count * best_speed)) { - best_count = count; - best_speed = speed; - best_active = active; - best_lag = lag; - } - } - if (best_count == 0) { - return (NULL); - } - *active_media = best_active; - return (best_lag); + int best_active = 0; + LAG_ref best_lag = NULL; + int best_count = 0; + int best_speed = 0; + LAG_ref lag; + + if (bond->ifb_active_lag != NULL) { + best_lag = bond->ifb_active_lag; + best_count = LAG_get_aggregatable_port_count(best_lag, &best_active); + if (bond->ifb_max_active > 0 + && best_count > bond->ifb_max_active) { + best_count = bond->ifb_max_active; + } + best_speed = link_speed(best_active); + } + TAILQ_FOREACH(lag, &bond->ifb_lag_list, lag_list) { + int active; + int count; + int speed; + + if (lag == bond->ifb_active_lag) { + /* we've already computed it */ + continue; + } + count = LAG_get_aggregatable_port_count(lag, &active); + if (count == 0) { + continue; + } + if (bond->ifb_max_active > 0 + && count > bond->ifb_max_active) { + /* if there's a limit, don't count extra links */ + count = bond->ifb_max_active; + } + speed = link_speed(active); + if ((count * speed) > (best_count * best_speed)) { + best_count = count; + best_speed = speed; + best_active = active; + best_lag = lag; + } + } + if (best_count == 0) { + return NULL; + } + *active_media = best_active; + return best_lag; } static void ifbond_deactivate_LAG(__unused ifbond_ref bond, LAG_ref lag) { - bondport_ref p; + bondport_ref p; - TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { - bondport_set_selected(p, SelectedState_UNSELECTED); - } - return; + TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { + bondport_set_selected(p, SelectedState_UNSELECTED); + } + return; } static void ifbond_activate_LAG(ifbond_ref bond, LAG_ref lag, int active_media) { - int need = 0; - bondport_ref p; + int need = 0; + bondport_ref p; - if (bond->ifb_max_active > 0) { - need = bond->ifb_max_active; - } - lag->lag_active_media = active_media; - TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { - if (bondport_aggregatable(p) == 0) { - bondport_set_selected(p, SelectedState_UNSELECTED); - } - else if (media_speed(&p->po_media_info) != link_speed(active_media)) { - bondport_set_selected(p, SelectedState_UNSELECTED); + if (bond->ifb_max_active > 0) { + need = bond->ifb_max_active; } - else if (p->po_mux_state == MuxState_DETACHED) { - if (bond->ifb_max_active > 0) { - if (need > 0) { - bondport_set_selected(p, SelectedState_SELECTED); - need--; - } - else { - bondport_set_selected(p, SelectedState_STANDBY); + lag->lag_active_media = active_media; + TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { + if (bondport_aggregatable(p) == 0) { + bondport_set_selected(p, SelectedState_UNSELECTED); + } else if (media_speed(&p->po_media_info) != link_speed(active_media)) { + bondport_set_selected(p, SelectedState_UNSELECTED); + } else if (p->po_mux_state == MuxState_DETACHED) { + if (bond->ifb_max_active > 0) { + if (need > 0) { + bondport_set_selected(p, SelectedState_SELECTED); + need--; + } else { + bondport_set_selected(p, SelectedState_STANDBY); + } + } else { + bondport_set_selected(p, SelectedState_SELECTED); + } + } else { + bondport_set_selected(p, SelectedState_UNSELECTED); } - } - else { - bondport_set_selected(p, SelectedState_SELECTED); - } } - else { - bondport_set_selected(p, SelectedState_UNSELECTED); - } - } - return; + return; } #if 0 static void ifbond_set_max_active(ifbond_ref bond, int max_active) { - LAG_ref lag = bond->ifb_active_lag; - - bond->ifb_max_active = max_active; - if (bond->ifb_max_active <= 0 || lag == NULL) { - return; - } - if (lag->lag_selected_port_count > bond->ifb_max_active) { - bondport_ref p; - int remove_count; - - remove_count = lag->lag_selected_port_count - bond->ifb_max_active; - TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { - if (p->po_selected == SelectedState_SELECTED) { - bondport_set_selected(p, SelectedState_UNSELECTED); - remove_count--; - if (remove_count == 0) { - break; + LAG_ref lag = bond->ifb_active_lag; + + bond->ifb_max_active = max_active; + if (bond->ifb_max_active <= 0 || lag == NULL) { + return; + } + if (lag->lag_selected_port_count > bond->ifb_max_active) { + bondport_ref p; + int remove_count; + + remove_count = lag->lag_selected_port_count - bond->ifb_max_active; + TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { + if (p->po_selected == SelectedState_SELECTED) { + bondport_set_selected(p, SelectedState_UNSELECTED); + remove_count--; + if (remove_count == 0) { + break; + } + } } - } } - } - return; + return; } #endif static int ifbond_all_ports_ready(ifbond_ref bond) { - int ready = 0; - bondport_ref p; + int ready = 0; + bondport_ref p; - if (bond->ifb_active_lag == NULL) { - return (0); - } - TAILQ_FOREACH(p, &bond->ifb_active_lag->lag_port_list, po_lag_port_list) { - if (p->po_mux_state == MuxState_WAITING - && p->po_selected == SelectedState_SELECTED) { - if (bondport_flags_ready(p) == 0) { - return (0); - } + if (bond->ifb_active_lag == NULL) { + return 0; + } + TAILQ_FOREACH(p, &bond->ifb_active_lag->lag_port_list, po_lag_port_list) { + if (p->po_mux_state == MuxState_WAITING + && p->po_selected == SelectedState_SELECTED) { + if (bondport_flags_ready(p) == 0) { + return 0; + } + } + /* note that there was at least one ready port */ + ready = 1; } - /* note that there was at least one ready port */ - ready = 1; - } - return (ready); + return ready; } static int ifbond_all_ports_attached(ifbond_ref bond, bondport_ref this_port) { - bondport_ref p; + bondport_ref p; - TAILQ_FOREACH(p, &bond->ifb_port_list, po_port_list) { - if (this_port == p) { - continue; - } - if (bondport_flags_mux_attached(p) == 0) { - return (0); + TAILQ_FOREACH(p, &bond->ifb_port_list, po_port_list) { + if (this_port == p) { + continue; + } + if (bondport_flags_mux_attached(p) == 0) { + return 0; + } } - } - return (1); + return 1; } static LAG_ref ifbond_get_LAG_matching_port(ifbond_ref bond, bondport_ref p) { - LAG_ref lag; + LAG_ref lag; - TAILQ_FOREACH(lag, &bond->ifb_lag_list, lag_list) { - if (bcmp(&lag->lag_info, &p->po_partner_state.ps_lag_info, - sizeof(lag->lag_info)) == 0) { - return (lag); + TAILQ_FOREACH(lag, &bond->ifb_lag_list, lag_list) { + if (bcmp(&lag->lag_info, &p->po_partner_state.ps_lag_info, + sizeof(lag->lag_info)) == 0) { + return lag; + } } - } - return (NULL); + return NULL; } static int LAG_get_aggregatable_port_count(LAG_ref lag, int * active_media) { - int active; - int count; - bondport_ref p; - int speed; - - active = 0; - count = 0; - speed = 0; - TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { - if (bondport_aggregatable(p)) { - int this_speed; - - this_speed = media_speed(&p->po_media_info); - if (this_speed == 0) { - continue; - } - if (this_speed > speed) { - active = p->po_media_info.mi_active; - speed = this_speed; - count = 1; - } - else if (this_speed == speed) { - count++; - } - } - } - *active_media = active; - return (count); + int active; + int count; + bondport_ref p; + int speed; + + active = 0; + count = 0; + speed = 0; + TAILQ_FOREACH(p, &lag->lag_port_list, po_lag_port_list) { + if (bondport_aggregatable(p)) { + int this_speed; + + this_speed = media_speed(&p->po_media_info); + if (this_speed == 0) { + continue; + } + if (this_speed > speed) { + active = p->po_media_info.mi_active; + speed = this_speed; + count = 1; + } else if (this_speed == speed) { + count++; + } + } + } + *active_media = active; + return count; } /** - ** LACP bondport routines - **/ +** LACP bondport routines +**/ static void bondport_link_status_changed(bondport_ref p) { - ifbond_ref bond = p->po_bond; - - if (g_bond->verbose) { - if (media_active(&p->po_media_info)) { - timestamp_printf("[%s] Link UP %d Mbit/s %s duplex\n", - bondport_get_name(p), - media_speed(&p->po_media_info), - media_full_duplex(&p->po_media_info) - ? "full" : "half"); - } - else { - timestamp_printf("[%s] Link DOWN\n", bondport_get_name(p)); - } - } - if (bond->ifb_mode == IF_BOND_MODE_LACP) { - if (media_active(&p->po_media_info) - && bond->ifb_active_lag != NULL - && p->po_lag == bond->ifb_active_lag - && p->po_selected != SelectedState_UNSELECTED) { - if (media_speed(&p->po_media_info) != p->po_lag->lag_active_media) { - if (g_bond->verbose) { - timestamp_printf("[%s] Port speed %d differs from LAG %d\n", - bondport_get_name(p), - media_speed(&p->po_media_info), - link_speed(p->po_lag->lag_active_media)); + ifbond_ref bond = p->po_bond; + + if (g_bond->verbose) { + if (media_active(&p->po_media_info)) { + timestamp_printf("[%s] Link UP %d Mbit/s %s duplex\n", + bondport_get_name(p), + media_speed(&p->po_media_info), + media_full_duplex(&p->po_media_info) + ? "full" : "half"); + } else { + timestamp_printf("[%s] Link DOWN\n", bondport_get_name(p)); } - bondport_set_selected(p, SelectedState_UNSELECTED); - } } - bondport_receive_machine(p, LAEventMediaChange, NULL); - bondport_mux_machine(p, LAEventMediaChange, NULL); - bondport_periodic_transmit_machine(p, LAEventMediaChange, NULL); - } - else { - if (media_active(&p->po_media_info)) { - bondport_enable_distributing(p); - } - else { - bondport_disable_distributing(p); + if (bond->ifb_mode == IF_BOND_MODE_LACP) { + if (media_active(&p->po_media_info) + && bond->ifb_active_lag != NULL + && p->po_lag == bond->ifb_active_lag + && p->po_selected != SelectedState_UNSELECTED) { + if (media_speed(&p->po_media_info) != p->po_lag->lag_active_media) { + if (g_bond->verbose) { + timestamp_printf("[%s] Port speed %d differs from LAG %d\n", + bondport_get_name(p), + media_speed(&p->po_media_info), + link_speed(p->po_lag->lag_active_media)); + } + bondport_set_selected(p, SelectedState_UNSELECTED); + } + } + bondport_receive_machine(p, LAEventMediaChange, NULL); + bondport_mux_machine(p, LAEventMediaChange, NULL); + bondport_periodic_transmit_machine(p, LAEventMediaChange, NULL); + } else { + if (media_active(&p->po_media_info)) { + bondport_enable_distributing(p); + } else { + bondport_disable_distributing(p); + } } - } - return; + return; } static int bondport_aggregatable(bondport_ref p) { - partner_state_ref ps = &p->po_partner_state; - - if (lacp_actor_partner_state_aggregatable(p->po_actor_state) == 0 - || lacp_actor_partner_state_aggregatable(ps->ps_state) == 0) { - /* we and/or our partner are individual */ - return (0); - } - if (p->po_lag == NULL) { - return (0); - } - switch (p->po_receive_state) { - default: - if (g_bond->verbose) { - timestamp_printf("[%s] Port is not selectable\n", - bondport_get_name(p)); + partner_state_ref ps = &p->po_partner_state; + + if (lacp_actor_partner_state_aggregatable(p->po_actor_state) == 0 + || lacp_actor_partner_state_aggregatable(ps->ps_state) == 0) { + /* we and/or our partner are individual */ + return 0; } - return (0); - case ReceiveState_CURRENT: - case ReceiveState_EXPIRED: - break; - } - return (1); + if (p->po_lag == NULL) { + return 0; + } + switch (p->po_receive_state) { + default: + if (g_bond->verbose) { + timestamp_printf("[%s] Port is not selectable\n", + bondport_get_name(p)); + } + return 0; + case ReceiveState_CURRENT: + case ReceiveState_EXPIRED: + break; + } + return 1; } static int bondport_matches_LAG(bondport_ref p, LAG_ref lag) { - LAG_info_ref lag_li; - partner_state_ref ps; - LAG_info_ref ps_li; - - ps = &p->po_partner_state; - ps_li = &ps->ps_lag_info; - lag_li = &lag->lag_info; - if (ps_li->li_system_priority == lag_li->li_system_priority - && ps_li->li_key == lag_li->li_key - && (bcmp(&ps_li->li_system, &lag_li->li_system, - sizeof(lag_li->li_system)) + LAG_info_ref lag_li; + partner_state_ref ps; + LAG_info_ref ps_li; + + ps = &p->po_partner_state; + ps_li = &ps->ps_lag_info; + lag_li = &lag->lag_info; + if (ps_li->li_system_priority == lag_li->li_system_priority + && ps_li->li_key == lag_li->li_key + && (bcmp(&ps_li->li_system, &lag_li->li_system, + sizeof(lag_li->li_system)) == 0)) { - return (1); - } - return (0); + return 1; + } + return 0; } static int bondport_remove_from_LAG(bondport_ref p) { - int active_lag = 0; - ifbond_ref bond = p->po_bond; - LAG_ref lag = p->po_lag; - - if (lag == NULL) { - return (0); - } - TAILQ_REMOVE(&lag->lag_port_list, p, po_lag_port_list); - if (g_bond->verbose) { - timestamp_printf("[%s] Removed from LAG (0x%04x," EA_FORMAT - ",0x%04x)\n", - bondport_get_name(p), - lag->lag_info.li_system_priority, - EA_LIST(&lag->lag_info.li_system), - lag->lag_info.li_key); - } - p->po_lag = NULL; - lag->lag_port_count--; - if (lag->lag_port_count > 0) { - return (bond->ifb_active_lag == lag); - } - if (g_bond->verbose) { - timestamp_printf("Key 0x%04x: LAG Released (%04x," EA_FORMAT - ",0x%04x)\n", - bond->ifb_key, - lag->lag_info.li_system_priority, - EA_LIST(&lag->lag_info.li_system), - lag->lag_info.li_key); - } - TAILQ_REMOVE(&bond->ifb_lag_list, lag, lag_list); - if (bond->ifb_active_lag == lag) { - bond->ifb_active_lag = NULL; - active_lag = 1; - } - FREE(lag, M_BOND); - return (active_lag); + int active_lag = 0; + ifbond_ref bond = p->po_bond; + LAG_ref lag = p->po_lag; + + if (lag == NULL) { + return 0; + } + TAILQ_REMOVE(&lag->lag_port_list, p, po_lag_port_list); + if (g_bond->verbose) { + timestamp_printf("[%s] Removed from LAG (0x%04x," EA_FORMAT + ",0x%04x)\n", + bondport_get_name(p), + lag->lag_info.li_system_priority, + EA_LIST(&lag->lag_info.li_system), + lag->lag_info.li_key); + } + p->po_lag = NULL; + lag->lag_port_count--; + if (lag->lag_port_count > 0) { + return bond->ifb_active_lag == lag; + } + if (g_bond->verbose) { + timestamp_printf("Key 0x%04x: LAG Released (%04x," EA_FORMAT + ",0x%04x)\n", + bond->ifb_key, + lag->lag_info.li_system_priority, + EA_LIST(&lag->lag_info.li_system), + lag->lag_info.li_key); + } + TAILQ_REMOVE(&bond->ifb_lag_list, lag, lag_list); + if (bond->ifb_active_lag == lag) { + bond->ifb_active_lag = NULL; + active_lag = 1; + } + FREE(lag, M_BOND); + return active_lag; } static void bondport_add_to_LAG(bondport_ref p, LAG_ref lag) { - TAILQ_INSERT_TAIL(&lag->lag_port_list, p, po_lag_port_list); - p->po_lag = lag; - lag->lag_port_count++; - if (g_bond->verbose) { - timestamp_printf("[%s] Added to LAG (0x%04x," EA_FORMAT "0x%04x)\n", - bondport_get_name(p), - lag->lag_info.li_system_priority, - EA_LIST(&lag->lag_info.li_system), - lag->lag_info.li_key); - } - return; + TAILQ_INSERT_TAIL(&lag->lag_port_list, p, po_lag_port_list); + p->po_lag = lag; + lag->lag_port_count++; + if (g_bond->verbose) { + timestamp_printf("[%s] Added to LAG (0x%04x," EA_FORMAT "0x%04x)\n", + bondport_get_name(p), + lag->lag_info.li_system_priority, + EA_LIST(&lag->lag_info.li_system), + lag->lag_info.li_key); + } + return; } static void bondport_assign_to_LAG(bondport_ref p) { - ifbond_ref bond = p->po_bond; - LAG_ref lag; + ifbond_ref bond = p->po_bond; + LAG_ref lag; - if (lacp_actor_partner_state_defaulted(p->po_actor_state)) { - bondport_remove_from_LAG(p); - return; - } - lag = p->po_lag; - if (lag != NULL) { - if (bondport_matches_LAG(p, lag)) { - /* still OK */ - return; - } - bondport_remove_from_LAG(p); - } - lag = ifbond_get_LAG_matching_port(bond, p); - if (lag != NULL) { + if (lacp_actor_partner_state_defaulted(p->po_actor_state)) { + bondport_remove_from_LAG(p); + return; + } + lag = p->po_lag; + if (lag != NULL) { + if (bondport_matches_LAG(p, lag)) { + /* still OK */ + return; + } + bondport_remove_from_LAG(p); + } + lag = ifbond_get_LAG_matching_port(bond, p); + if (lag != NULL) { + bondport_add_to_LAG(p, lag); + return; + } + lag = (LAG_ref)_MALLOC(sizeof(*lag), M_BOND, M_WAITOK); + TAILQ_INIT(&lag->lag_port_list); + lag->lag_port_count = 0; + lag->lag_selected_port_count = 0; + lag->lag_info = p->po_partner_state.ps_lag_info; + TAILQ_INSERT_TAIL(&bond->ifb_lag_list, lag, lag_list); + if (g_bond->verbose) { + timestamp_printf("Key 0x%04x: LAG Created (0x%04x," EA_FORMAT + ",0x%04x)\n", + bond->ifb_key, + lag->lag_info.li_system_priority, + EA_LIST(&lag->lag_info.li_system), + lag->lag_info.li_key); + } bondport_add_to_LAG(p, lag); return; - } - lag = (LAG_ref)_MALLOC(sizeof(*lag), M_BOND, M_WAITOK); - TAILQ_INIT(&lag->lag_port_list); - lag->lag_port_count = 0; - lag->lag_selected_port_count = 0; - lag->lag_info = p->po_partner_state.ps_lag_info; - TAILQ_INSERT_TAIL(&bond->ifb_lag_list, lag, lag_list); - if (g_bond->verbose) { - timestamp_printf("Key 0x%04x: LAG Created (0x%04x," EA_FORMAT - ",0x%04x)\n", - bond->ifb_key, - lag->lag_info.li_system_priority, - EA_LIST(&lag->lag_info.li_system), - lag->lag_info.li_key); - } - bondport_add_to_LAG(p, lag); - return; } static void bondport_receive_lacpdu(bondport_ref p, lacpdu_ref in_lacpdu_p) { - bondport_ref moved_port; + bondport_ref moved_port; - moved_port - = ifbond_list_find_moved_port(p, (const lacp_actor_partner_tlv_ref) - &in_lacpdu_p->la_actor_tlv); - if (moved_port != NULL) { - bondport_receive_machine(moved_port, LAEventPortMoved, NULL); - } - bondport_receive_machine(p, LAEventPacket, in_lacpdu_p); - bondport_mux_machine(p, LAEventPacket, in_lacpdu_p); - bondport_periodic_transmit_machine(p, LAEventPacket, in_lacpdu_p); - return; + moved_port + = ifbond_list_find_moved_port(p, (const lacp_actor_partner_tlv_ref) + &in_lacpdu_p->la_actor_tlv); + if (moved_port != NULL) { + bondport_receive_machine(moved_port, LAEventPortMoved, NULL); + } + bondport_receive_machine(p, LAEventPacket, in_lacpdu_p); + bondport_mux_machine(p, LAEventPacket, in_lacpdu_p); + bondport_periodic_transmit_machine(p, LAEventPacket, in_lacpdu_p); + return; } -static void +static void bondport_set_selected(bondport_ref p, SelectedState s) { - if (s != p->po_selected) { - ifbond_ref bond = p->po_bond; - LAG_ref lag = p->po_lag; - - bondport_flags_set_selected_changed(p); - if (lag != NULL && bond->ifb_active_lag == lag) { - if (p->po_selected == SelectedState_SELECTED) { - lag->lag_selected_port_count--; - } - else if (s == SelectedState_SELECTED) { - lag->lag_selected_port_count++; - } - if (g_bond->verbose) { - timestamp_printf("[%s] SetSelected: %s (was %s)\n", - bondport_get_name(p), - SelectedStateString(s), - SelectedStateString(p->po_selected)); - } - } - } - p->po_selected = s; - return; + if (s != p->po_selected) { + ifbond_ref bond = p->po_bond; + LAG_ref lag = p->po_lag; + + bondport_flags_set_selected_changed(p); + if (lag != NULL && bond->ifb_active_lag == lag) { + if (p->po_selected == SelectedState_SELECTED) { + lag->lag_selected_port_count--; + } else if (s == SelectedState_SELECTED) { + lag->lag_selected_port_count++; + } + if (g_bond->verbose) { + timestamp_printf("[%s] SetSelected: %s (was %s)\n", + bondport_get_name(p), + SelectedStateString(s), + SelectedStateString(p->po_selected)); + } + } + } + p->po_selected = s; + return; } /** - ** Receive machine - **/ +** Receive machine +**/ static void bondport_UpdateDefaultSelected(bondport_ref p) { - bondport_set_selected(p, SelectedState_UNSELECTED); - return; + bondport_set_selected(p, SelectedState_UNSELECTED); + return; } static void bondport_RecordDefault(bondport_ref p) { - bzero(&p->po_partner_state, sizeof(p->po_partner_state)); - p->po_actor_state - = lacp_actor_partner_state_set_defaulted(p->po_actor_state); - bondport_assign_to_LAG(p); - return; + bzero(&p->po_partner_state, sizeof(p->po_partner_state)); + p->po_actor_state + = lacp_actor_partner_state_set_defaulted(p->po_actor_state); + bondport_assign_to_LAG(p); + return; } static void bondport_UpdateSelected(bondport_ref p, lacpdu_ref lacpdu_p) { - lacp_actor_partner_tlv_ref actor; - partner_state_ref ps; - LAG_info_ref ps_li; + lacp_actor_partner_tlv_ref actor; + partner_state_ref ps; + LAG_info_ref ps_li; - /* compare the PDU's Actor information to our Partner state */ - actor = (lacp_actor_partner_tlv_ref)lacpdu_p->la_actor_tlv; - ps = &p->po_partner_state; - ps_li = &ps->ps_lag_info; - if (lacp_actor_partner_tlv_get_port(actor) != ps->ps_port - || (lacp_actor_partner_tlv_get_port_priority(actor) + /* compare the PDU's Actor information to our Partner state */ + actor = (lacp_actor_partner_tlv_ref)lacpdu_p->la_actor_tlv; + ps = &p->po_partner_state; + ps_li = &ps->ps_lag_info; + if (lacp_actor_partner_tlv_get_port(actor) != ps->ps_port + || (lacp_actor_partner_tlv_get_port_priority(actor) != ps->ps_port_priority) - || bcmp(actor->lap_system, &ps_li->li_system, sizeof(ps_li->li_system)) - || (lacp_actor_partner_tlv_get_system_priority(actor) + || bcmp(actor->lap_system, &ps_li->li_system, sizeof(ps_li->li_system)) + || (lacp_actor_partner_tlv_get_system_priority(actor) != ps_li->li_system_priority) - || (lacp_actor_partner_tlv_get_key(actor) != ps_li->li_key) - || (lacp_actor_partner_state_aggregatable(actor->lap_state) + || (lacp_actor_partner_tlv_get_key(actor) != ps_li->li_key) + || (lacp_actor_partner_state_aggregatable(actor->lap_state) != lacp_actor_partner_state_aggregatable(ps->ps_state))) { - bondport_set_selected(p, SelectedState_UNSELECTED); - if (g_bond->verbose) { - timestamp_printf("[%s] updateSelected UNSELECTED\n", - bondport_get_name(p)); + bondport_set_selected(p, SelectedState_UNSELECTED); + if (g_bond->verbose) { + timestamp_printf("[%s] updateSelected UNSELECTED\n", + bondport_get_name(p)); + } } - } - return; + return; } static void bondport_RecordPDU(bondport_ref p, lacpdu_ref lacpdu_p) { - lacp_actor_partner_tlv_ref actor; - ifbond_ref bond = p->po_bond; - int lacp_maintain = 0; - partner_state_ref ps; - lacp_actor_partner_tlv_ref partner; - LAG_info_ref ps_li; - - /* copy the PDU's Actor information into our Partner state */ - actor = (lacp_actor_partner_tlv_ref)lacpdu_p->la_actor_tlv; - ps = &p->po_partner_state; - ps_li = &ps->ps_lag_info; - ps->ps_port = lacp_actor_partner_tlv_get_port(actor); - ps->ps_port_priority = lacp_actor_partner_tlv_get_port_priority(actor); - ps_li->li_system = *((lacp_system_ref)actor->lap_system); - ps_li->li_system_priority - = lacp_actor_partner_tlv_get_system_priority(actor); - ps_li->li_key = lacp_actor_partner_tlv_get_key(actor); - ps->ps_state = lacp_actor_partner_state_set_out_of_sync(actor->lap_state); - p->po_actor_state - = lacp_actor_partner_state_set_not_defaulted(p->po_actor_state); - - /* compare the PDU's Partner information to our own information */ - partner = (lacp_actor_partner_tlv_ref)lacpdu_p->la_partner_tlv; - - if (lacp_actor_partner_state_active_lacp(ps->ps_state) - || (lacp_actor_partner_state_active_lacp(p->po_actor_state) + lacp_actor_partner_tlv_ref actor; + ifbond_ref bond = p->po_bond; + int lacp_maintain = 0; + partner_state_ref ps; + lacp_actor_partner_tlv_ref partner; + LAG_info_ref ps_li; + + /* copy the PDU's Actor information into our Partner state */ + actor = (lacp_actor_partner_tlv_ref)lacpdu_p->la_actor_tlv; + ps = &p->po_partner_state; + ps_li = &ps->ps_lag_info; + ps->ps_port = lacp_actor_partner_tlv_get_port(actor); + ps->ps_port_priority = lacp_actor_partner_tlv_get_port_priority(actor); + ps_li->li_system = *((lacp_system_ref)actor->lap_system); + ps_li->li_system_priority + = lacp_actor_partner_tlv_get_system_priority(actor); + ps_li->li_key = lacp_actor_partner_tlv_get_key(actor); + ps->ps_state = lacp_actor_partner_state_set_out_of_sync(actor->lap_state); + p->po_actor_state + = lacp_actor_partner_state_set_not_defaulted(p->po_actor_state); + + /* compare the PDU's Partner information to our own information */ + partner = (lacp_actor_partner_tlv_ref)lacpdu_p->la_partner_tlv; + + if (lacp_actor_partner_state_active_lacp(ps->ps_state) + || (lacp_actor_partner_state_active_lacp(p->po_actor_state) && lacp_actor_partner_state_active_lacp(partner->lap_state))) { - if (g_bond->verbose) { - timestamp_printf("[%s] recordPDU: LACP will maintain\n", - bondport_get_name(p)); - } - lacp_maintain = 1; - } - if ((lacp_actor_partner_tlv_get_port(partner) - == bondport_get_index(p)) - && lacp_actor_partner_tlv_get_port_priority(partner) == p->po_priority - && bcmp(partner->lap_system, &g_bond->system, - sizeof(g_bond->system)) == 0 - && (lacp_actor_partner_tlv_get_system_priority(partner) + if (g_bond->verbose) { + timestamp_printf("[%s] recordPDU: LACP will maintain\n", + bondport_get_name(p)); + } + lacp_maintain = 1; + } + if ((lacp_actor_partner_tlv_get_port(partner) + == bondport_get_index(p)) + && lacp_actor_partner_tlv_get_port_priority(partner) == p->po_priority + && bcmp(partner->lap_system, &g_bond->system, + sizeof(g_bond->system)) == 0 + && (lacp_actor_partner_tlv_get_system_priority(partner) == g_bond->system_priority) - && lacp_actor_partner_tlv_get_key(partner) == bond->ifb_key - && (lacp_actor_partner_state_aggregatable(partner->lap_state) + && lacp_actor_partner_tlv_get_key(partner) == bond->ifb_key + && (lacp_actor_partner_state_aggregatable(partner->lap_state) == lacp_actor_partner_state_aggregatable(p->po_actor_state)) - && lacp_actor_partner_state_in_sync(actor->lap_state) - && lacp_maintain) { - ps->ps_state = lacp_actor_partner_state_set_in_sync(ps->ps_state); - if (g_bond->verbose) { - timestamp_printf("[%s] recordPDU: LACP partner in sync\n", - bondport_get_name(p)); - } - } - else if (lacp_actor_partner_state_aggregatable(actor->lap_state) == 0 - && lacp_actor_partner_state_in_sync(actor->lap_state) - && lacp_maintain) { - ps->ps_state = lacp_actor_partner_state_set_in_sync(ps->ps_state); - if (g_bond->verbose) { - timestamp_printf("[%s] recordPDU: LACP partner in sync (ind)\n", - bondport_get_name(p)); + && lacp_actor_partner_state_in_sync(actor->lap_state) + && lacp_maintain) { + ps->ps_state = lacp_actor_partner_state_set_in_sync(ps->ps_state); + if (g_bond->verbose) { + timestamp_printf("[%s] recordPDU: LACP partner in sync\n", + bondport_get_name(p)); + } + } else if (lacp_actor_partner_state_aggregatable(actor->lap_state) == 0 + && lacp_actor_partner_state_in_sync(actor->lap_state) + && lacp_maintain) { + ps->ps_state = lacp_actor_partner_state_set_in_sync(ps->ps_state); + if (g_bond->verbose) { + timestamp_printf("[%s] recordPDU: LACP partner in sync (ind)\n", + bondport_get_name(p)); + } } - } - bondport_assign_to_LAG(p); - return; + bondport_assign_to_LAG(p); + return; } static __inline__ lacp_actor_partner_state updateNTTBits(lacp_actor_partner_state s) { - return (s & (LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY - | LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT - | LACP_ACTOR_PARTNER_STATE_AGGREGATION - | LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION)); + return s & (LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY + | LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT + | LACP_ACTOR_PARTNER_STATE_AGGREGATION + | LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION); } static void bondport_UpdateNTT(bondport_ref p, lacpdu_ref lacpdu_p) { - ifbond_ref bond = p->po_bond; - lacp_actor_partner_tlv_ref partner; + ifbond_ref bond = p->po_bond; + lacp_actor_partner_tlv_ref partner; - /* compare the PDU's Actor information to our Partner state */ - partner = (lacp_actor_partner_tlv_ref)lacpdu_p->la_partner_tlv; - if ((lacp_actor_partner_tlv_get_port(partner) != bondport_get_index(p)) - || lacp_actor_partner_tlv_get_port_priority(partner) != p->po_priority - || bcmp(partner->lap_system, &g_bond->system, sizeof(g_bond->system)) - || (lacp_actor_partner_tlv_get_system_priority(partner) + /* compare the PDU's Actor information to our Partner state */ + partner = (lacp_actor_partner_tlv_ref)lacpdu_p->la_partner_tlv; + if ((lacp_actor_partner_tlv_get_port(partner) != bondport_get_index(p)) + || lacp_actor_partner_tlv_get_port_priority(partner) != p->po_priority + || bcmp(partner->lap_system, &g_bond->system, sizeof(g_bond->system)) + || (lacp_actor_partner_tlv_get_system_priority(partner) != g_bond->system_priority) - || lacp_actor_partner_tlv_get_key(partner) != bond->ifb_key - || (updateNTTBits(partner->lap_state) + || lacp_actor_partner_tlv_get_key(partner) != bond->ifb_key + || (updateNTTBits(partner->lap_state) != updateNTTBits(p->po_actor_state))) { - bondport_flags_set_ntt(p); - if (g_bond->verbose) { - timestamp_printf("[%s] updateNTT: Need To Transmit\n", - bondport_get_name(p)); + bondport_flags_set_ntt(p); + if (g_bond->verbose) { + timestamp_printf("[%s] updateNTT: Need To Transmit\n", + bondport_get_name(p)); + } } - } - return; + return; } static void bondport_AttachMuxToAggregator(bondport_ref p) { - if (bondport_flags_mux_attached(p) == 0) { - if (g_bond->verbose) { - timestamp_printf("[%s] Attached Mux To Aggregator\n", - bondport_get_name(p)); + if (bondport_flags_mux_attached(p) == 0) { + if (g_bond->verbose) { + timestamp_printf("[%s] Attached Mux To Aggregator\n", + bondport_get_name(p)); + } + bondport_flags_set_mux_attached(p); } - bondport_flags_set_mux_attached(p); - } - return; + return; } static void bondport_DetachMuxFromAggregator(bondport_ref p) { - if (bondport_flags_mux_attached(p)) { - if (g_bond->verbose) { - timestamp_printf("[%s] Detached Mux From Aggregator\n", - bondport_get_name(p)); + if (bondport_flags_mux_attached(p)) { + if (g_bond->verbose) { + timestamp_printf("[%s] Detached Mux From Aggregator\n", + bondport_get_name(p)); + } + bondport_flags_clear_mux_attached(p); } - bondport_flags_clear_mux_attached(p); - } - return; + return; } static void bondport_enable_distributing(bondport_ref p) { - if (bondport_flags_distributing(p) == 0) { - ifbond_ref bond = p->po_bond; + if (bondport_flags_distributing(p) == 0) { + ifbond_ref bond = p->po_bond; - bond->ifb_distributing_array[bond->ifb_distributing_count++] = p; - if (g_bond->verbose) { - timestamp_printf("[%s] Distribution Enabled\n", - bondport_get_name(p)); + bond->ifb_distributing_array[bond->ifb_distributing_count++] = p; + if (g_bond->verbose) { + timestamp_printf("[%s] Distribution Enabled\n", + bondport_get_name(p)); + } + bondport_flags_set_distributing(p); } - bondport_flags_set_distributing(p); - } - return; + return; } static void bondport_disable_distributing(bondport_ref p) { - if (bondport_flags_distributing(p)) { - bondport_ref * array; - ifbond_ref bond; - int count; - int i; - - bond = p->po_bond; - array = bond->ifb_distributing_array; - count = bond->ifb_distributing_count; - for (i = 0; i < count; i++) { - if (array[i] == p) { - int j; - - for (j = i; j < (count - 1); j++) { - array[j] = array[j + 1]; + if (bondport_flags_distributing(p)) { + bondport_ref * array; + ifbond_ref bond; + int count; + int i; + + bond = p->po_bond; + array = bond->ifb_distributing_array; + count = bond->ifb_distributing_count; + for (i = 0; i < count; i++) { + if (array[i] == p) { + int j; + + for (j = i; j < (count - 1); j++) { + array[j] = array[j + 1]; + } + break; + } } - break; - } - } - bond->ifb_distributing_count--; - if (g_bond->verbose) { - timestamp_printf("[%s] Distribution Disabled\n", - bondport_get_name(p)); + bond->ifb_distributing_count--; + if (g_bond->verbose) { + timestamp_printf("[%s] Distribution Disabled\n", + bondport_get_name(p)); + } + bondport_flags_clear_distributing(p); } - bondport_flags_clear_distributing(p); - } - return; + return; } /** - ** Receive machine functions - **/ +** Receive machine functions +**/ static void bondport_receive_machine_initialize(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_receive_machine_port_disabled(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_receive_machine_expired(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_receive_machine_lacp_disabled(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_receive_machine_defaulted(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_receive_machine_current(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void -bondport_receive_machine_event(bondport_ref p, LAEvent event, - void * event_data) -{ - switch (p->po_receive_state) { - case ReceiveState_none: - bondport_receive_machine_initialize(p, LAEventStart, NULL); - break; - case ReceiveState_INITIALIZE: - bondport_receive_machine_initialize(p, event, event_data); - break; - case ReceiveState_PORT_DISABLED: - bondport_receive_machine_port_disabled(p, event, event_data); - break; - case ReceiveState_EXPIRED: - bondport_receive_machine_expired(p, event, event_data); - break; - case ReceiveState_LACP_DISABLED: - bondport_receive_machine_lacp_disabled(p, event, event_data); - break; - case ReceiveState_DEFAULTED: - bondport_receive_machine_defaulted(p, event, event_data); - break; - case ReceiveState_CURRENT: - bondport_receive_machine_current(p, event, event_data); - break; - default: - break; - } - return; +bondport_receive_machine_event(bondport_ref p, LAEvent event, + void * event_data) +{ + switch (p->po_receive_state) { + case ReceiveState_none: + bondport_receive_machine_initialize(p, LAEventStart, NULL); + break; + case ReceiveState_INITIALIZE: + bondport_receive_machine_initialize(p, event, event_data); + break; + case ReceiveState_PORT_DISABLED: + bondport_receive_machine_port_disabled(p, event, event_data); + break; + case ReceiveState_EXPIRED: + bondport_receive_machine_expired(p, event, event_data); + break; + case ReceiveState_LACP_DISABLED: + bondport_receive_machine_lacp_disabled(p, event, event_data); + break; + case ReceiveState_DEFAULTED: + bondport_receive_machine_defaulted(p, event, event_data); + break; + case ReceiveState_CURRENT: + bondport_receive_machine_current(p, event, event_data); + break; + default: + break; + } + return; } static void bondport_receive_machine(bondport_ref p, LAEvent event, - void * event_data) -{ - switch (event) { - case LAEventPacket: - if (p->po_receive_state != ReceiveState_LACP_DISABLED) { - bondport_receive_machine_current(p, event, event_data); - } - break; - case LAEventMediaChange: - if (media_active(&p->po_media_info)) { - switch (p->po_receive_state) { - case ReceiveState_PORT_DISABLED: - case ReceiveState_LACP_DISABLED: - bondport_receive_machine_port_disabled(p, LAEventMediaChange, NULL); + void * event_data) +{ + switch (event) { + case LAEventPacket: + if (p->po_receive_state != ReceiveState_LACP_DISABLED) { + bondport_receive_machine_current(p, event, event_data); + } + break; + case LAEventMediaChange: + if (media_active(&p->po_media_info)) { + switch (p->po_receive_state) { + case ReceiveState_PORT_DISABLED: + case ReceiveState_LACP_DISABLED: + bondport_receive_machine_port_disabled(p, LAEventMediaChange, NULL); + break; + default: + break; + } + } else { + bondport_receive_machine_port_disabled(p, LAEventStart, NULL); + } break; - default: + default: + bondport_receive_machine_event(p, event, event_data); break; - } } - else { - bondport_receive_machine_port_disabled(p, LAEventStart, NULL); - } - break; - default: - bondport_receive_machine_event(p, event, event_data); - break; - } - return; + return; } static void bondport_receive_machine_initialize(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_current_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Receive INITIALIZE\n", - bondport_get_name(p)); + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_current_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Receive INITIALIZE\n", + bondport_get_name(p)); + } + p->po_receive_state = ReceiveState_INITIALIZE; + bondport_set_selected(p, SelectedState_UNSELECTED); + bondport_RecordDefault(p); + p->po_actor_state + = lacp_actor_partner_state_set_not_expired(p->po_actor_state); + bondport_receive_machine_port_disabled(p, LAEventStart, NULL); + break; + default: + break; } - p->po_receive_state = ReceiveState_INITIALIZE; - bondport_set_selected(p, SelectedState_UNSELECTED); - bondport_RecordDefault(p); - p->po_actor_state - = lacp_actor_partner_state_set_not_expired(p->po_actor_state); - bondport_receive_machine_port_disabled(p, LAEventStart, NULL); - break; - default: - break; - } - return; + return; } static void bondport_receive_machine_port_disabled(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - partner_state_ref ps; + partner_state_ref ps; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_current_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Receive PORT_DISABLED\n", - bondport_get_name(p)); - } - p->po_receive_state = ReceiveState_PORT_DISABLED; - ps = &p->po_partner_state; - ps->ps_state = lacp_actor_partner_state_set_out_of_sync(ps->ps_state); + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_current_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Receive PORT_DISABLED\n", + bondport_get_name(p)); + } + p->po_receive_state = ReceiveState_PORT_DISABLED; + ps = &p->po_partner_state; + ps->ps_state = lacp_actor_partner_state_set_out_of_sync(ps->ps_state); /* FALL THROUGH */ - case LAEventMediaChange: - if (media_active(&p->po_media_info)) { - if (media_full_duplex(&p->po_media_info)) { - bondport_receive_machine_expired(p, LAEventStart, NULL); - } - else { - bondport_receive_machine_lacp_disabled(p, LAEventStart, NULL); - } - } - else if (p->po_selected == SelectedState_SELECTED) { - struct timeval tv; - - if (g_bond->verbose) { - timestamp_printf("[%s] Receive PORT_DISABLED: " - "link timer started\n", - bondport_get_name(p)); - } - tv.tv_sec = 1; - tv.tv_usec = 0; - devtimer_set_relative(p->po_current_while_timer, tv, - (devtimer_timeout_func) - bondport_receive_machine_port_disabled, - (void *)LAEventTimeout, NULL); - } - else if (p->po_selected == SelectedState_STANDBY) { - bondport_set_selected(p, SelectedState_UNSELECTED); - } - break; - case LAEventTimeout: - if (p->po_selected == SelectedState_SELECTED) { - if (g_bond->verbose) { - timestamp_printf("[%s] Receive PORT_DISABLED: " - "link timer completed, marking UNSELECTED\n", - bondport_get_name(p)); - } - bondport_set_selected(p, SelectedState_UNSELECTED); - } - break; - case LAEventPortMoved: - bondport_receive_machine_initialize(p, LAEventStart, NULL); - break; - default: - break; - } - return; + case LAEventMediaChange: + if (media_active(&p->po_media_info)) { + if (media_full_duplex(&p->po_media_info)) { + bondport_receive_machine_expired(p, LAEventStart, NULL); + } else { + bondport_receive_machine_lacp_disabled(p, LAEventStart, NULL); + } + } else if (p->po_selected == SelectedState_SELECTED) { + struct timeval tv; + + if (g_bond->verbose) { + timestamp_printf("[%s] Receive PORT_DISABLED: " + "link timer started\n", + bondport_get_name(p)); + } + tv.tv_sec = 1; + tv.tv_usec = 0; + devtimer_set_relative(p->po_current_while_timer, tv, + (devtimer_timeout_func) + bondport_receive_machine_port_disabled, + (void *)LAEventTimeout, NULL); + } else if (p->po_selected == SelectedState_STANDBY) { + bondport_set_selected(p, SelectedState_UNSELECTED); + } + break; + case LAEventTimeout: + if (p->po_selected == SelectedState_SELECTED) { + if (g_bond->verbose) { + timestamp_printf("[%s] Receive PORT_DISABLED: " + "link timer completed, marking UNSELECTED\n", + bondport_get_name(p)); + } + bondport_set_selected(p, SelectedState_UNSELECTED); + } + break; + case LAEventPortMoved: + bondport_receive_machine_initialize(p, LAEventStart, NULL); + break; + default: + break; + } + return; } static void bondport_receive_machine_expired(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - lacp_actor_partner_state s; - struct timeval tv; + lacp_actor_partner_state s; + struct timeval tv; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_current_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Receive EXPIRED\n", - bondport_get_name(p)); - } - p->po_receive_state = ReceiveState_EXPIRED; - s = p->po_partner_state.ps_state; - s = lacp_actor_partner_state_set_out_of_sync(s); - s = lacp_actor_partner_state_set_short_timeout(s); - p->po_partner_state.ps_state = s; - p->po_actor_state - = lacp_actor_partner_state_set_expired(p->po_actor_state); - /* start current_while timer */ - tv.tv_sec = LACP_SHORT_TIMEOUT_TIME; - tv.tv_usec = 0; - devtimer_set_relative(p->po_current_while_timer, tv, - (devtimer_timeout_func) - bondport_receive_machine_expired, - (void *)LAEventTimeout, NULL); - - break; - case LAEventTimeout: - bondport_receive_machine_defaulted(p, LAEventStart, NULL); - break; - default: - break; - } - return; + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_current_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Receive EXPIRED\n", + bondport_get_name(p)); + } + p->po_receive_state = ReceiveState_EXPIRED; + s = p->po_partner_state.ps_state; + s = lacp_actor_partner_state_set_out_of_sync(s); + s = lacp_actor_partner_state_set_short_timeout(s); + p->po_partner_state.ps_state = s; + p->po_actor_state + = lacp_actor_partner_state_set_expired(p->po_actor_state); + /* start current_while timer */ + tv.tv_sec = LACP_SHORT_TIMEOUT_TIME; + tv.tv_usec = 0; + devtimer_set_relative(p->po_current_while_timer, tv, + (devtimer_timeout_func) + bondport_receive_machine_expired, + (void *)LAEventTimeout, NULL); + + break; + case LAEventTimeout: + bondport_receive_machine_defaulted(p, LAEventStart, NULL); + break; + default: + break; + } + return; } static void bondport_receive_machine_lacp_disabled(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - partner_state_ref ps; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_current_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Receive LACP_DISABLED\n", - bondport_get_name(p)); + partner_state_ref ps; + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_current_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Receive LACP_DISABLED\n", + bondport_get_name(p)); + } + p->po_receive_state = ReceiveState_LACP_DISABLED; + bondport_set_selected(p, SelectedState_UNSELECTED); + bondport_RecordDefault(p); + ps = &p->po_partner_state; + ps->ps_state = lacp_actor_partner_state_set_individual(ps->ps_state); + p->po_actor_state + = lacp_actor_partner_state_set_not_expired(p->po_actor_state); + break; + default: + break; } - p->po_receive_state = ReceiveState_LACP_DISABLED; - bondport_set_selected(p, SelectedState_UNSELECTED); - bondport_RecordDefault(p); - ps = &p->po_partner_state; - ps->ps_state = lacp_actor_partner_state_set_individual(ps->ps_state); - p->po_actor_state - = lacp_actor_partner_state_set_not_expired(p->po_actor_state); - break; - default: - break; - } - return; + return; } static void bondport_receive_machine_defaulted(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_current_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Receive DEFAULTED\n", - bondport_get_name(p)); + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_current_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Receive DEFAULTED\n", + bondport_get_name(p)); + } + p->po_receive_state = ReceiveState_DEFAULTED; + bondport_UpdateDefaultSelected(p); + bondport_RecordDefault(p); + p->po_actor_state + = lacp_actor_partner_state_set_not_expired(p->po_actor_state); + break; + default: + break; } - p->po_receive_state = ReceiveState_DEFAULTED; - bondport_UpdateDefaultSelected(p); - bondport_RecordDefault(p); - p->po_actor_state - = lacp_actor_partner_state_set_not_expired(p->po_actor_state); - break; - default: - break; - } - return; + return; } static void bondport_receive_machine_current(bondport_ref p, LAEvent event, - void * event_data) + void * event_data) { - partner_state_ref ps; - struct timeval tv; + partner_state_ref ps; + struct timeval tv; - switch (event) { - case LAEventPacket: - devtimer_cancel(p->po_current_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Receive CURRENT\n", - bondport_get_name(p)); + switch (event) { + case LAEventPacket: + devtimer_cancel(p->po_current_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Receive CURRENT\n", + bondport_get_name(p)); + } + p->po_receive_state = ReceiveState_CURRENT; + bondport_UpdateSelected(p, event_data); + bondport_UpdateNTT(p, event_data); + bondport_RecordPDU(p, event_data); + p->po_actor_state + = lacp_actor_partner_state_set_not_expired(p->po_actor_state); + bondport_assign_to_LAG(p); + /* start current_while timer */ + ps = &p->po_partner_state; + if (lacp_actor_partner_state_short_timeout(ps->ps_state)) { + tv.tv_sec = LACP_SHORT_TIMEOUT_TIME; + } else { + tv.tv_sec = LACP_LONG_TIMEOUT_TIME; + } + tv.tv_usec = 0; + devtimer_set_relative(p->po_current_while_timer, tv, + (devtimer_timeout_func) + bondport_receive_machine_current, + (void *)LAEventTimeout, NULL); + break; + case LAEventTimeout: + bondport_receive_machine_expired(p, LAEventStart, NULL); + break; + default: + break; } - p->po_receive_state = ReceiveState_CURRENT; - bondport_UpdateSelected(p, event_data); - bondport_UpdateNTT(p, event_data); - bondport_RecordPDU(p, event_data); - p->po_actor_state - = lacp_actor_partner_state_set_not_expired(p->po_actor_state); - bondport_assign_to_LAG(p); - /* start current_while timer */ - ps = &p->po_partner_state; - if (lacp_actor_partner_state_short_timeout(ps->ps_state)) { - tv.tv_sec = LACP_SHORT_TIMEOUT_TIME; - } - else { - tv.tv_sec = LACP_LONG_TIMEOUT_TIME; - } - tv.tv_usec = 0; - devtimer_set_relative(p->po_current_while_timer, tv, - (devtimer_timeout_func) - bondport_receive_machine_current, - (void *)LAEventTimeout, NULL); - break; - case LAEventTimeout: - bondport_receive_machine_expired(p, LAEventStart, NULL); - break; - default: - break; - } - return; + return; } /** - ** Periodic Transmission machine - **/ +** Periodic Transmission machine +**/ static void bondport_periodic_transmit_machine(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - int interval; - partner_state_ref ps; - struct timeval tv; + int interval; + partner_state_ref ps; + struct timeval tv; - switch (event) { - case LAEventStart: - if (g_bond->verbose) { - timestamp_printf("[%s] periodic_transmit Start\n", - bondport_get_name(p)); - } - /* FALL THROUGH */ - case LAEventMediaChange: - devtimer_cancel(p->po_periodic_timer); - p->po_periodic_interval = 0; - if (media_active(&p->po_media_info) == 0 - || media_full_duplex(&p->po_media_info) == 0) { - break; - } - case LAEventPacket: - /* Neither Partner nor Actor are LACP Active, no periodic tx */ - ps = &p->po_partner_state; - if (lacp_actor_partner_state_active_lacp(p->po_actor_state) == 0 - && (lacp_actor_partner_state_active_lacp(ps->ps_state) - == 0)) { - devtimer_cancel(p->po_periodic_timer); - p->po_periodic_interval = 0; - break; - } - if (lacp_actor_partner_state_short_timeout(ps->ps_state)) { - interval = LACP_FAST_PERIODIC_TIME; - } - else { - interval = LACP_SLOW_PERIODIC_TIME; - } - if (p->po_periodic_interval != interval) { - if (interval == LACP_FAST_PERIODIC_TIME - && p->po_periodic_interval == LACP_SLOW_PERIODIC_TIME) { + switch (event) { + case LAEventStart: if (g_bond->verbose) { - timestamp_printf("[%s] periodic_transmit:" - " Need To Transmit\n", - bondport_get_name(p)); + timestamp_printf("[%s] periodic_transmit Start\n", + bondport_get_name(p)); + } + /* FALL THROUGH */ + case LAEventMediaChange: + devtimer_cancel(p->po_periodic_timer); + p->po_periodic_interval = 0; + if (media_active(&p->po_media_info) == 0 + || media_full_duplex(&p->po_media_info) == 0) { + break; + } + case LAEventPacket: + /* Neither Partner nor Actor are LACP Active, no periodic tx */ + ps = &p->po_partner_state; + if (lacp_actor_partner_state_active_lacp(p->po_actor_state) == 0 + && (lacp_actor_partner_state_active_lacp(ps->ps_state) + == 0)) { + devtimer_cancel(p->po_periodic_timer); + p->po_periodic_interval = 0; + break; + } + if (lacp_actor_partner_state_short_timeout(ps->ps_state)) { + interval = LACP_FAST_PERIODIC_TIME; + } else { + interval = LACP_SLOW_PERIODIC_TIME; + } + if (p->po_periodic_interval != interval) { + if (interval == LACP_FAST_PERIODIC_TIME + && p->po_periodic_interval == LACP_SLOW_PERIODIC_TIME) { + if (g_bond->verbose) { + timestamp_printf("[%s] periodic_transmit:" + " Need To Transmit\n", + bondport_get_name(p)); + } + bondport_flags_set_ntt(p); + } + p->po_periodic_interval = interval; + tv.tv_usec = 0; + tv.tv_sec = interval; + devtimer_set_relative(p->po_periodic_timer, tv, + (devtimer_timeout_func) + bondport_periodic_transmit_machine, + (void *)LAEventTimeout, NULL); + if (g_bond->verbose) { + timestamp_printf("[%s] Periodic Transmission Timer: %d secs\n", + bondport_get_name(p), + p->po_periodic_interval); + } } + break; + case LAEventTimeout: bondport_flags_set_ntt(p); - } - p->po_periodic_interval = interval; - tv.tv_usec = 0; - tv.tv_sec = interval; - devtimer_set_relative(p->po_periodic_timer, tv, - (devtimer_timeout_func) - bondport_periodic_transmit_machine, - (void *)LAEventTimeout, NULL); - if (g_bond->verbose) { - timestamp_printf("[%s] Periodic Transmission Timer: %d secs\n", - bondport_get_name(p), - p->po_periodic_interval); - } - } - break; - case LAEventTimeout: - bondport_flags_set_ntt(p); - tv.tv_sec = p->po_periodic_interval; - tv.tv_usec = 0; - devtimer_set_relative(p->po_periodic_timer, tv, (devtimer_timeout_func) - bondport_periodic_transmit_machine, - (void *)LAEventTimeout, NULL); - if (g_bond->verbose > 1) { - timestamp_printf("[%s] Periodic Transmission Timer: %d secs\n", - bondport_get_name(p), p->po_periodic_interval); - } - break; - default: - break; - } - return; + tv.tv_sec = p->po_periodic_interval; + tv.tv_usec = 0; + devtimer_set_relative(p->po_periodic_timer, tv, (devtimer_timeout_func) + bondport_periodic_transmit_machine, + (void *)LAEventTimeout, NULL); + if (g_bond->verbose > 1) { + timestamp_printf("[%s] Periodic Transmission Timer: %d secs\n", + bondport_get_name(p), p->po_periodic_interval); + } + break; + default: + break; + } + return; } /** - ** Transmit machine - **/ +** Transmit machine +**/ static int bondport_can_transmit(bondport_ref p, int32_t current_secs, - __darwin_time_t * next_secs) -{ - if (p->po_last_transmit_secs != current_secs) { - p->po_last_transmit_secs = current_secs; - p->po_n_transmit = 0; - } - if (p->po_n_transmit < LACP_PACKET_RATE) { - p->po_n_transmit++; - return (1); - } - if (next_secs != NULL) { - *next_secs = current_secs + 1; - } - return (0); + __darwin_time_t * next_secs) +{ + if (p->po_last_transmit_secs != current_secs) { + p->po_last_transmit_secs = current_secs; + p->po_n_transmit = 0; + } + if (p->po_n_transmit < LACP_PACKET_RATE) { + p->po_n_transmit++; + return 1; + } + if (next_secs != NULL) { + *next_secs = current_secs + 1; + } + return 0; } static void bondport_transmit_machine(bondport_ref p, LAEvent event, - void * event_data) -{ - lacp_actor_partner_tlv_ref aptlv; - lacp_collector_tlv_ref ctlv; - struct timeval next_tick_time = {0, 0}; - lacpdu_ref out_lacpdu_p; - packet_buffer_ref pkt; - partner_state_ref ps; - LAG_info_ref ps_li; - - switch (event) { - case LAEventTimeout: - case LAEventStart: - if (p->po_periodic_interval == 0 || bondport_flags_ntt(p) == 0) { - break; - } - if (event_data == TRANSMIT_MACHINE_TX_IMMEDIATE) { - /* we're going away, transmit the packet no matter what */ - } - else if (bondport_can_transmit(p, devtimer_current_secs(), - &next_tick_time.tv_sec) == 0) { - if (devtimer_enabled(p->po_transmit_timer)) { + void * event_data) +{ + lacp_actor_partner_tlv_ref aptlv; + lacp_collector_tlv_ref ctlv; + struct timeval next_tick_time = {0, 0}; + lacpdu_ref out_lacpdu_p; + packet_buffer_ref pkt; + partner_state_ref ps; + LAG_info_ref ps_li; + + switch (event) { + case LAEventTimeout: + case LAEventStart: + if (p->po_periodic_interval == 0 || bondport_flags_ntt(p) == 0) { + break; + } + if (event_data == TRANSMIT_MACHINE_TX_IMMEDIATE) { + /* we're going away, transmit the packet no matter what */ + } else if (bondport_can_transmit(p, devtimer_current_secs(), + &next_tick_time.tv_sec) == 0) { + if (devtimer_enabled(p->po_transmit_timer)) { + if (g_bond->verbose > 0) { + timestamp_printf("[%s] Transmit Timer Already Set\n", + bondport_get_name(p)); + } + } else { + devtimer_set_absolute(p->po_transmit_timer, next_tick_time, + (devtimer_timeout_func) + bondport_transmit_machine, + (void *)LAEventTimeout, NULL); + if (g_bond->verbose > 0) { + timestamp_printf("[%s] Transmit Timer Deadline %d secs\n", + bondport_get_name(p), + (int)next_tick_time.tv_sec); + } + } + break; + } if (g_bond->verbose > 0) { - timestamp_printf("[%s] Transmit Timer Already Set\n", - bondport_get_name(p)); - } - } - else { - devtimer_set_absolute(p->po_transmit_timer, next_tick_time, - (devtimer_timeout_func) - bondport_transmit_machine, - (void *)LAEventTimeout, NULL); + if (event == LAEventTimeout) { + timestamp_printf("[%s] Transmit Timer Complete\n", + bondport_get_name(p)); + } + } + pkt = packet_buffer_allocate(sizeof(*out_lacpdu_p)); + if (pkt == NULL) { + printf("[%s] Transmit: failed to allocate packet buffer\n", + bondport_get_name(p)); + break; + } + out_lacpdu_p = (lacpdu_ref)packet_buffer_byteptr(pkt); + bzero(out_lacpdu_p, sizeof(*out_lacpdu_p)); + out_lacpdu_p->la_subtype = IEEE8023AD_SLOW_PROTO_SUBTYPE_LACP; + out_lacpdu_p->la_version = LACPDU_VERSION_1; + + /* Actor */ + aptlv = (lacp_actor_partner_tlv_ref)out_lacpdu_p->la_actor_tlv; + aptlv->lap_tlv_type = LACPDU_TLV_TYPE_ACTOR; + aptlv->lap_length = LACPDU_ACTOR_TLV_LENGTH; + *((lacp_system_ref)aptlv->lap_system) = g_bond->system; + lacp_actor_partner_tlv_set_system_priority(aptlv, + g_bond->system_priority); + lacp_actor_partner_tlv_set_port_priority(aptlv, p->po_priority); + lacp_actor_partner_tlv_set_port(aptlv, bondport_get_index(p)); + lacp_actor_partner_tlv_set_key(aptlv, p->po_bond->ifb_key); + aptlv->lap_state = p->po_actor_state; + + /* Partner */ + aptlv = (lacp_actor_partner_tlv_ref)out_lacpdu_p->la_partner_tlv; + aptlv->lap_tlv_type = LACPDU_TLV_TYPE_PARTNER; + aptlv->lap_length = LACPDU_PARTNER_TLV_LENGTH; + ps = &p->po_partner_state; + ps_li = &ps->ps_lag_info; + lacp_actor_partner_tlv_set_port(aptlv, ps->ps_port); + lacp_actor_partner_tlv_set_port_priority(aptlv, ps->ps_port_priority); + *((lacp_system_ref)aptlv->lap_system) = ps_li->li_system; + lacp_actor_partner_tlv_set_system_priority(aptlv, + ps_li->li_system_priority); + lacp_actor_partner_tlv_set_key(aptlv, ps_li->li_key); + aptlv->lap_state = ps->ps_state; + + /* Collector */ + ctlv = (lacp_collector_tlv_ref)out_lacpdu_p->la_collector_tlv; + ctlv->lac_tlv_type = LACPDU_TLV_TYPE_COLLECTOR; + ctlv->lac_length = LACPDU_COLLECTOR_TLV_LENGTH; + + bondport_slow_proto_transmit(p, pkt); + bondport_flags_clear_ntt(p); if (g_bond->verbose > 0) { - timestamp_printf("[%s] Transmit Timer Deadline %d secs\n", - bondport_get_name(p), - (int)next_tick_time.tv_sec); - } - } - break; - } - if (g_bond->verbose > 0) { - if (event == LAEventTimeout) { - timestamp_printf("[%s] Transmit Timer Complete\n", - bondport_get_name(p)); - } - } - pkt = packet_buffer_allocate(sizeof(*out_lacpdu_p)); - if (pkt == NULL) { - printf("[%s] Transmit: failed to allocate packet buffer\n", - bondport_get_name(p)); - break; - } - out_lacpdu_p = (lacpdu_ref)packet_buffer_byteptr(pkt); - bzero(out_lacpdu_p, sizeof(*out_lacpdu_p)); - out_lacpdu_p->la_subtype = IEEE8023AD_SLOW_PROTO_SUBTYPE_LACP; - out_lacpdu_p->la_version = LACPDU_VERSION_1; - - /* Actor */ - aptlv = (lacp_actor_partner_tlv_ref)out_lacpdu_p->la_actor_tlv; - aptlv->lap_tlv_type = LACPDU_TLV_TYPE_ACTOR; - aptlv->lap_length = LACPDU_ACTOR_TLV_LENGTH; - *((lacp_system_ref)aptlv->lap_system) = g_bond->system; - lacp_actor_partner_tlv_set_system_priority(aptlv, - g_bond->system_priority); - lacp_actor_partner_tlv_set_port_priority(aptlv, p->po_priority); - lacp_actor_partner_tlv_set_port(aptlv, bondport_get_index(p)); - lacp_actor_partner_tlv_set_key(aptlv, p->po_bond->ifb_key); - aptlv->lap_state = p->po_actor_state; - - /* Partner */ - aptlv = (lacp_actor_partner_tlv_ref)out_lacpdu_p->la_partner_tlv; - aptlv->lap_tlv_type = LACPDU_TLV_TYPE_PARTNER; - aptlv->lap_length = LACPDU_PARTNER_TLV_LENGTH; - ps = &p->po_partner_state; - ps_li = &ps->ps_lag_info; - lacp_actor_partner_tlv_set_port(aptlv, ps->ps_port); - lacp_actor_partner_tlv_set_port_priority(aptlv, ps->ps_port_priority); - *((lacp_system_ref)aptlv->lap_system) = ps_li->li_system; - lacp_actor_partner_tlv_set_system_priority(aptlv, - ps_li->li_system_priority); - lacp_actor_partner_tlv_set_key(aptlv, ps_li->li_key); - aptlv->lap_state = ps->ps_state; - - /* Collector */ - ctlv = (lacp_collector_tlv_ref)out_lacpdu_p->la_collector_tlv; - ctlv->lac_tlv_type = LACPDU_TLV_TYPE_COLLECTOR; - ctlv->lac_length = LACPDU_COLLECTOR_TLV_LENGTH; - - bondport_slow_proto_transmit(p, pkt); - bondport_flags_clear_ntt(p); - if (g_bond->verbose > 0) { - timestamp_printf("[%s] Transmit Packet %d\n", - bondport_get_name(p), p->po_n_transmit); - } - break; - default: - break; - } - return; + timestamp_printf("[%s] Transmit Packet %d\n", + bondport_get_name(p), p->po_n_transmit); + } + break; + default: + break; + } + return; } /** - ** Mux machine functions - **/ +** Mux machine functions +**/ static void bondport_mux_machine_detached(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_mux_machine_waiting(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_mux_machine_attached(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_mux_machine_collecting_distributing(bondport_ref p, LAEvent event, - void * event_data); + void * event_data); static void bondport_mux_machine(bondport_ref p, LAEvent event, void * event_data) { - switch (p->po_mux_state) { - case MuxState_none: - bondport_mux_machine_detached(p, LAEventStart, NULL); - break; - case MuxState_DETACHED: - bondport_mux_machine_detached(p, event, event_data); - break; - case MuxState_WAITING: - bondport_mux_machine_waiting(p, event, event_data); - break; - case MuxState_ATTACHED: - bondport_mux_machine_attached(p, event, event_data); - break; - case MuxState_COLLECTING_DISTRIBUTING: - bondport_mux_machine_collecting_distributing(p, event, event_data); - break; - default: - break; - } - return; + switch (p->po_mux_state) { + case MuxState_none: + bondport_mux_machine_detached(p, LAEventStart, NULL); + break; + case MuxState_DETACHED: + bondport_mux_machine_detached(p, event, event_data); + break; + case MuxState_WAITING: + bondport_mux_machine_waiting(p, event, event_data); + break; + case MuxState_ATTACHED: + bondport_mux_machine_attached(p, event, event_data); + break; + case MuxState_COLLECTING_DISTRIBUTING: + bondport_mux_machine_collecting_distributing(p, event, event_data); + break; + default: + break; + } + return; } static void bondport_mux_machine_detached(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - lacp_actor_partner_state s; + lacp_actor_partner_state s; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_wait_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Mux DETACHED\n", - bondport_get_name(p)); + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_wait_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Mux DETACHED\n", + bondport_get_name(p)); + } + p->po_mux_state = MuxState_DETACHED; + bondport_flags_clear_ready(p); + bondport_DetachMuxFromAggregator(p); + bondport_disable_distributing(p); + s = p->po_actor_state; + s = lacp_actor_partner_state_set_out_of_sync(s); + s = lacp_actor_partner_state_set_not_collecting(s); + s = lacp_actor_partner_state_set_not_distributing(s); + p->po_actor_state = s; + bondport_flags_set_ntt(p); + break; + case LAEventSelectedChange: + case LAEventPacket: + case LAEventMediaChange: + if (p->po_selected == SelectedState_SELECTED + || p->po_selected == SelectedState_STANDBY) { + bondport_mux_machine_waiting(p, LAEventStart, NULL); + } + break; + default: + break; } - p->po_mux_state = MuxState_DETACHED; - bondport_flags_clear_ready(p); - bondport_DetachMuxFromAggregator(p); - bondport_disable_distributing(p); - s = p->po_actor_state; - s = lacp_actor_partner_state_set_out_of_sync(s); - s = lacp_actor_partner_state_set_not_collecting(s); - s = lacp_actor_partner_state_set_not_distributing(s); - p->po_actor_state = s; - bondport_flags_set_ntt(p); - break; - case LAEventSelectedChange: - case LAEventPacket: - case LAEventMediaChange: - if (p->po_selected == SelectedState_SELECTED - || p->po_selected == SelectedState_STANDBY) { - bondport_mux_machine_waiting(p, LAEventStart, NULL); - } - break; - default: - break; - } - return; + return; } static void bondport_mux_machine_waiting(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - struct timeval tv; + struct timeval tv; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_wait_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING\n", - bondport_get_name(p)); - } - p->po_mux_state = MuxState_WAITING; + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_wait_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING\n", + bondport_get_name(p)); + } + p->po_mux_state = MuxState_WAITING; /* FALL THROUGH */ - default: - case LAEventSelectedChange: - if (p->po_selected == SelectedState_UNSELECTED) { - bondport_mux_machine_detached(p, LAEventStart, NULL); - break; - } - if (p->po_selected == SelectedState_STANDBY) { - devtimer_cancel(p->po_wait_while_timer); - /* wait until state changes to SELECTED */ - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: Standby\n", - bondport_get_name(p)); - } - break; - } - if (bondport_flags_ready(p)) { - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: Port is already ready\n", - bondport_get_name(p)); - } - break; - } - if (devtimer_enabled(p->po_wait_while_timer)) { - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: Timer already set\n", - bondport_get_name(p)); - } - break; - } - if (ifbond_all_ports_attached(p->po_bond, p)) { - devtimer_cancel(p->po_wait_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: No waiting\n", - bondport_get_name(p)); - } - bondport_flags_set_ready(p); - goto no_waiting; + default: + case LAEventSelectedChange: + if (p->po_selected == SelectedState_UNSELECTED) { + bondport_mux_machine_detached(p, LAEventStart, NULL); + break; + } + if (p->po_selected == SelectedState_STANDBY) { + devtimer_cancel(p->po_wait_while_timer); + /* wait until state changes to SELECTED */ + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: Standby\n", + bondport_get_name(p)); + } + break; + } + if (bondport_flags_ready(p)) { + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: Port is already ready\n", + bondport_get_name(p)); + } + break; + } + if (devtimer_enabled(p->po_wait_while_timer)) { + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: Timer already set\n", + bondport_get_name(p)); + } + break; + } + if (ifbond_all_ports_attached(p->po_bond, p)) { + devtimer_cancel(p->po_wait_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: No waiting\n", + bondport_get_name(p)); + } + bondport_flags_set_ready(p); + goto no_waiting; + } + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: 2 seconds\n", + bondport_get_name(p)); + } + tv.tv_sec = LACP_AGGREGATE_WAIT_TIME; + tv.tv_usec = 0; + devtimer_set_relative(p->po_wait_while_timer, tv, + (devtimer_timeout_func) + bondport_mux_machine_waiting, + (void *)LAEventTimeout, NULL); + break; + case LAEventTimeout: + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: Ready\n", + bondport_get_name(p)); + } + bondport_flags_set_ready(p); + break; + case LAEventReady: +no_waiting: + if (bondport_flags_ready(p)) { + if (g_bond->verbose) { + timestamp_printf("[%s] Mux WAITING: All Ports Ready\n", + bondport_get_name(p)); + } + bondport_mux_machine_attached(p, LAEventStart, NULL); + break; + } + break; } - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: 2 seconds\n", - bondport_get_name(p)); - } - tv.tv_sec = LACP_AGGREGATE_WAIT_TIME; - tv.tv_usec = 0; - devtimer_set_relative(p->po_wait_while_timer, tv, - (devtimer_timeout_func) - bondport_mux_machine_waiting, - (void *)LAEventTimeout, NULL); - break; - case LAEventTimeout: - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: Ready\n", - bondport_get_name(p)); - } - bondport_flags_set_ready(p); - break; - case LAEventReady: - no_waiting: - if (bondport_flags_ready(p)){ - if (g_bond->verbose) { - timestamp_printf("[%s] Mux WAITING: All Ports Ready\n", - bondport_get_name(p)); - } - bondport_mux_machine_attached(p, LAEventStart, NULL); - break; - } - break; - } - return; + return; } static void bondport_mux_machine_attached(bondport_ref p, LAEvent event, - __unused void * event_data) + __unused void * event_data) { - lacp_actor_partner_state s; + lacp_actor_partner_state s; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_wait_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Mux ATTACHED\n", - bondport_get_name(p)); - } - p->po_mux_state = MuxState_ATTACHED; - bondport_AttachMuxToAggregator(p); - s = p->po_actor_state; - s = lacp_actor_partner_state_set_in_sync(s); - s = lacp_actor_partner_state_set_not_collecting(s); - s = lacp_actor_partner_state_set_not_distributing(s); - bondport_disable_distributing(p); - p->po_actor_state = s; - bondport_flags_set_ntt(p); + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_wait_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Mux ATTACHED\n", + bondport_get_name(p)); + } + p->po_mux_state = MuxState_ATTACHED; + bondport_AttachMuxToAggregator(p); + s = p->po_actor_state; + s = lacp_actor_partner_state_set_in_sync(s); + s = lacp_actor_partner_state_set_not_collecting(s); + s = lacp_actor_partner_state_set_not_distributing(s); + bondport_disable_distributing(p); + p->po_actor_state = s; + bondport_flags_set_ntt(p); /* FALL THROUGH */ - default: - switch (p->po_selected) { - case SelectedState_SELECTED: - s = p->po_partner_state.ps_state; - if (lacp_actor_partner_state_in_sync(s)) { - bondport_mux_machine_collecting_distributing(p, LAEventStart, - NULL); - } - break; default: - bondport_mux_machine_detached(p, LAEventStart, NULL); - break; + switch (p->po_selected) { + case SelectedState_SELECTED: + s = p->po_partner_state.ps_state; + if (lacp_actor_partner_state_in_sync(s)) { + bondport_mux_machine_collecting_distributing(p, LAEventStart, + NULL); + } + break; + default: + bondport_mux_machine_detached(p, LAEventStart, NULL); + break; + } + break; } - break; - } - return; + return; } static void -bondport_mux_machine_collecting_distributing(bondport_ref p, - LAEvent event, - __unused void * event_data) +bondport_mux_machine_collecting_distributing(bondport_ref p, + LAEvent event, + __unused void * event_data) { - lacp_actor_partner_state s; + lacp_actor_partner_state s; - switch (event) { - case LAEventStart: - devtimer_cancel(p->po_wait_while_timer); - if (g_bond->verbose) { - timestamp_printf("[%s] Mux COLLECTING_DISTRIBUTING\n", - bondport_get_name(p)); - } - p->po_mux_state = MuxState_COLLECTING_DISTRIBUTING; - bondport_enable_distributing(p); - s = p->po_actor_state; - s = lacp_actor_partner_state_set_collecting(s); - s = lacp_actor_partner_state_set_distributing(s); - p->po_actor_state = s; - bondport_flags_set_ntt(p); + switch (event) { + case LAEventStart: + devtimer_cancel(p->po_wait_while_timer); + if (g_bond->verbose) { + timestamp_printf("[%s] Mux COLLECTING_DISTRIBUTING\n", + bondport_get_name(p)); + } + p->po_mux_state = MuxState_COLLECTING_DISTRIBUTING; + bondport_enable_distributing(p); + s = p->po_actor_state; + s = lacp_actor_partner_state_set_collecting(s); + s = lacp_actor_partner_state_set_distributing(s); + p->po_actor_state = s; + bondport_flags_set_ntt(p); /* FALL THROUGH */ - default: - s = p->po_partner_state.ps_state; - if (lacp_actor_partner_state_in_sync(s) == 0) { - bondport_mux_machine_attached(p, LAEventStart, NULL); - break; - } - switch (p->po_selected) { - case SelectedState_UNSELECTED: - case SelectedState_STANDBY: - bondport_mux_machine_attached(p, LAEventStart, NULL); - break; default: - break; + s = p->po_partner_state.ps_state; + if (lacp_actor_partner_state_in_sync(s) == 0) { + bondport_mux_machine_attached(p, LAEventStart, NULL); + break; + } + switch (p->po_selected) { + case SelectedState_UNSELECTED: + case SelectedState_STANDBY: + bondport_mux_machine_attached(p, LAEventStart, NULL); + break; + default: + break; + } + break; } - break; - } - return; + return; } diff --git a/bsd/net/if_bond_internal.h b/bsd/net/if_bond_internal.h index 68e8901b7..27ff601a9 100644 --- a/bsd/net/if_bond_internal.h +++ b/bsd/net/if_bond_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _NET_IF_BOND_INTERNAL_H_ @@ -32,4 +32,3 @@ int bond_family_init(void); #endif /* KERNEL_PRIVATE */ #endif /* _NET_IF_BOND_INTERNAL_H_ */ - diff --git a/bsd/net/if_bond_var.h b/bsd/net/if_bond_var.h index f92a3f24c..ea8b41b4e 100644 --- a/bsd/net/if_bond_var.h +++ b/bsd/net/if_bond_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _NET_IF_BOND_VAR_H_ -#define _NET_IF_BOND_VAR_H_ +#define _NET_IF_BOND_VAR_H_ #include @@ -35,62 +35,62 @@ #pragma pack(4) -#define IF_BOND_OP_ADD_INTERFACE 1 -#define IF_BOND_OP_REMOVE_INTERFACE 2 -#define IF_BOND_OP_GET_STATUS 3 -#define IF_BOND_OP_SET_VERBOSE 4 -#define IF_BOND_OP_SET_MODE 5 +#define IF_BOND_OP_ADD_INTERFACE 1 +#define IF_BOND_OP_REMOVE_INTERFACE 2 +#define IF_BOND_OP_GET_STATUS 3 +#define IF_BOND_OP_SET_VERBOSE 4 +#define IF_BOND_OP_SET_MODE 5 -#define IF_BOND_MODE_LACP 0 -#define IF_BOND_MODE_STATIC 1 +#define IF_BOND_MODE_LACP 0 +#define IF_BOND_MODE_STATIC 1 struct if_bond_partner_state { - lacp_system ibps_system; - lacp_system_priority ibps_system_priority; - lacp_key ibps_key; - lacp_port ibps_port; - lacp_port_priority ibps_port_priority; - lacp_actor_partner_state ibps_state; - u_char ibps_reserved1; + lacp_system ibps_system; + lacp_system_priority ibps_system_priority; + lacp_key ibps_key; + lacp_port ibps_port; + lacp_port_priority ibps_port_priority; + lacp_actor_partner_state ibps_state; + u_char ibps_reserved1; }; -#define IF_BOND_STATUS_SELECTED_STATE_UNSELECTED 0 -#define IF_BOND_STATUS_SELECTED_STATE_SELECTED 1 -#define IF_BOND_STATUS_SELECTED_STATE_STANDBY 2 +#define IF_BOND_STATUS_SELECTED_STATE_UNSELECTED 0 +#define IF_BOND_STATUS_SELECTED_STATE_SELECTED 1 +#define IF_BOND_STATUS_SELECTED_STATE_STANDBY 2 struct if_bond_status { - char ibs_if_name[IFNAMSIZ]; /* interface name */ - lacp_port_priority ibs_port_priority; - lacp_actor_partner_state ibs_state; - u_char ibs_selected_state; - struct if_bond_partner_state ibs_partner_state; - u_int32_t ibs_reserved[8]; + char ibs_if_name[IFNAMSIZ];/* interface name */ + lacp_port_priority ibs_port_priority; + lacp_actor_partner_state ibs_state; + u_char ibs_selected_state; + struct if_bond_partner_state ibs_partner_state; + u_int32_t ibs_reserved[8]; }; -#define IF_BOND_STATUS_REQ_VERSION 1 +#define IF_BOND_STATUS_REQ_VERSION 1 struct if_bond_status_req { - int ibsr_version; /* version */ - int ibsr_total; /* returned number of struct if_bond_status's */ - int ibsr_count; /* number that will fit in ibsr_buffer */ - union { /* buffer to hold if_bond_status's */ - void * ibsru_buffer; - u_int64_t ibsru_buffer64; - } ibsr_ibsru; - lacp_key ibsr_key; /* returned */ - u_int8_t ibsr_mode; /* returned (IF_BOND_MODE_{LACP, STATIC}) */ - u_int8_t ibsr_reserved0; /* for future use */ - u_int32_t ibsr_reserved[3];/* for future use */ + int ibsr_version;/* version */ + int ibsr_total; /* returned number of struct if_bond_status's */ + int ibsr_count; /* number that will fit in ibsr_buffer */ + union { /* buffer to hold if_bond_status's */ + void * ibsru_buffer; + u_int64_t ibsru_buffer64; + } ibsr_ibsru; + lacp_key ibsr_key; /* returned */ + u_int8_t ibsr_mode; /* returned (IF_BOND_MODE_{LACP, STATIC}) */ + u_int8_t ibsr_reserved0;/* for future use */ + u_int32_t ibsr_reserved[3];/* for future use */ }; -#define ibsr_buffer ibsr_ibsru.ibsru_buffer +#define ibsr_buffer ibsr_ibsru.ibsru_buffer struct if_bond_req { - u_int32_t ibr_op; /* operation */ - union { - char ibru_if_name[IFNAMSIZ]; /* interface name */ - struct if_bond_status_req ibru_status; /* status information */ - int ibru_int_val; - } ibr_ibru; + u_int32_t ibr_op; /* operation */ + union { + char ibru_if_name[IFNAMSIZ]; /* interface name */ + struct if_bond_status_req ibru_status; /* status information */ + int ibru_int_val; + } ibr_ibru; }; #pragma pack() diff --git a/bsd/net/if_bridge.c b/bsd/net/if_bridge.c index 33ae35c66..ca97c63dd 100644 --- a/bsd/net/if_bridge.c +++ b/bsd/net/if_bridge.c @@ -105,7 +105,7 @@ #include -#define BRIDGE_DEBUG 1 +#define BRIDGE_DEBUG 1 #include #include @@ -144,7 +144,7 @@ #include /* for struct arpcom */ #include #include -#define _IP_VHL +#define _IP_VHL #include #include #if INET6 @@ -178,97 +178,97 @@ #if BRIDGE_DEBUG -#define BR_DBGF_LIFECYCLE 0x0001 -#define BR_DBGF_INPUT 0x0002 -#define BR_DBGF_OUTPUT 0x0004 -#define BR_DBGF_RT_TABLE 0x0008 -#define BR_DBGF_DELAYED_CALL 0x0010 -#define BR_DBGF_IOCTL 0x0020 -#define BR_DBGF_MBUF 0x0040 -#define BR_DBGF_MCAST 0x0080 -#define BR_DBGF_HOSTFILTER 0x0100 +#define BR_DBGF_LIFECYCLE 0x0001 +#define BR_DBGF_INPUT 0x0002 +#define BR_DBGF_OUTPUT 0x0004 +#define BR_DBGF_RT_TABLE 0x0008 +#define BR_DBGF_DELAYED_CALL 0x0010 +#define BR_DBGF_IOCTL 0x0020 +#define BR_DBGF_MBUF 0x0040 +#define BR_DBGF_MCAST 0x0080 +#define BR_DBGF_HOSTFILTER 0x0100 #endif /* BRIDGE_DEBUG */ -#define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx) -#define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx) -#define BRIDGE_LOCK_ASSERT_HELD(_sc) \ +#define _BRIDGE_LOCK(_sc) lck_mtx_lock(&(_sc)->sc_mtx) +#define _BRIDGE_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->sc_mtx) +#define BRIDGE_LOCK_ASSERT_HELD(_sc) \ LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED) -#define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \ +#define BRIDGE_LOCK_ASSERT_NOTHELD(_sc) \ LCK_MTX_ASSERT(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED) #if BRIDGE_DEBUG -#define BR_LCKDBG_MAX 4 +#define BR_LCKDBG_MAX 4 -#define BRIDGE_LOCK(_sc) bridge_lock(_sc) -#define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc) -#define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc) -#define BRIDGE_UNREF(_sc) bridge_unref(_sc) -#define BRIDGE_XLOCK(_sc) bridge_xlock(_sc) -#define BRIDGE_XDROP(_sc) bridge_xdrop(_sc) +#define BRIDGE_LOCK(_sc) bridge_lock(_sc) +#define BRIDGE_UNLOCK(_sc) bridge_unlock(_sc) +#define BRIDGE_LOCK2REF(_sc, _err) _err = bridge_lock2ref(_sc) +#define BRIDGE_UNREF(_sc) bridge_unref(_sc) +#define BRIDGE_XLOCK(_sc) bridge_xlock(_sc) +#define BRIDGE_XDROP(_sc) bridge_xdrop(_sc) #else /* !BRIDGE_DEBUG */ -#define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc) -#define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc) -#define BRIDGE_LOCK2REF(_sc, _err) do { \ - BRIDGE_LOCK_ASSERT_HELD(_sc); \ - if ((_sc)->sc_iflist_xcnt > 0) \ - (_err) = EBUSY; \ - else \ - (_sc)->sc_iflist_ref++; \ - _BRIDGE_UNLOCK(_sc); \ +#define BRIDGE_LOCK(_sc) _BRIDGE_LOCK(_sc) +#define BRIDGE_UNLOCK(_sc) _BRIDGE_UNLOCK(_sc) +#define BRIDGE_LOCK2REF(_sc, _err) do { \ + BRIDGE_LOCK_ASSERT_HELD(_sc); \ + if ((_sc)->sc_iflist_xcnt > 0) \ + (_err) = EBUSY; \ + else \ + (_sc)->sc_iflist_ref++; \ + _BRIDGE_UNLOCK(_sc); \ } while (0) -#define BRIDGE_UNREF(_sc) do { \ - _BRIDGE_LOCK(_sc); \ - (_sc)->sc_iflist_ref--; \ +#define BRIDGE_UNREF(_sc) do { \ + _BRIDGE_LOCK(_sc); \ + (_sc)->sc_iflist_ref--; \ if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0)) { \ - _BRIDGE_UNLOCK(_sc); \ - wakeup(&(_sc)->sc_cv); \ - } else \ - _BRIDGE_UNLOCK(_sc); \ + _BRIDGE_UNLOCK(_sc); \ + wakeup(&(_sc)->sc_cv); \ + } else \ + _BRIDGE_UNLOCK(_sc); \ } while (0) -#define BRIDGE_XLOCK(_sc) do { \ - BRIDGE_LOCK_ASSERT_HELD(_sc); \ - (_sc)->sc_iflist_xcnt++; \ - while ((_sc)->sc_iflist_ref > 0) \ - msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \ - "BRIDGE_XLOCK", NULL); \ +#define BRIDGE_XLOCK(_sc) do { \ + BRIDGE_LOCK_ASSERT_HELD(_sc); \ + (_sc)->sc_iflist_xcnt++; \ + while ((_sc)->sc_iflist_ref > 0) \ + msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO, \ + "BRIDGE_XLOCK", NULL); \ } while (0) -#define BRIDGE_XDROP(_sc) do { \ - BRIDGE_LOCK_ASSERT_HELD(_sc); \ - (_sc)->sc_iflist_xcnt--; \ +#define BRIDGE_XDROP(_sc) do { \ + BRIDGE_LOCK_ASSERT_HELD(_sc); \ + (_sc)->sc_iflist_xcnt--; \ } while (0) #endif /* BRIDGE_DEBUG */ #if NBPFILTER > 0 -#define BRIDGE_BPF_MTAP_INPUT(sc, m) \ - if (sc->sc_bpf_input) \ - bridge_bpf_input(sc->sc_ifp, m) +#define BRIDGE_BPF_MTAP_INPUT(sc, m) \ + if (sc->sc_bpf_input) \ + bridge_bpf_input(sc->sc_ifp, m) #else /* NBPFILTER */ -#define BRIDGE_BPF_MTAP_INPUT(ifp, m) +#define BRIDGE_BPF_MTAP_INPUT(ifp, m) #endif /* NBPFILTER */ /* * Initial size of the route hash table. Must be a power of two. */ #ifndef BRIDGE_RTHASH_SIZE -#define BRIDGE_RTHASH_SIZE 16 +#define BRIDGE_RTHASH_SIZE 16 #endif /* * Maximum size of the routing hash table */ -#define BRIDGE_RTHASH_SIZE_MAX 2048 +#define BRIDGE_RTHASH_SIZE_MAX 2048 -#define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1) +#define BRIDGE_RTHASH_MASK(sc) ((sc)->sc_rthash_size - 1) /* * Maximum number of addresses to cache. */ #ifndef BRIDGE_RTABLE_MAX -#define BRIDGE_RTABLE_MAX 100 +#define BRIDGE_RTABLE_MAX 100 #endif @@ -276,68 +276,68 @@ * Timeout (in seconds) for entries learned dynamically. */ #ifndef BRIDGE_RTABLE_TIMEOUT -#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ +#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ #endif /* * Number of seconds between walks of the route list. */ #ifndef BRIDGE_RTABLE_PRUNE_PERIOD -#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) +#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) #endif /* * List of capabilities to possibly mask on the member interface. */ -#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) +#define BRIDGE_IFCAPS_MASK (IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM) /* * List of capabilities to disable on the member interface. */ -#define BRIDGE_IFCAPS_STRIP IFCAP_LRO +#define BRIDGE_IFCAPS_STRIP IFCAP_LRO /* * Bridge interface list entry. */ struct bridge_iflist { TAILQ_ENTRY(bridge_iflist) bif_next; - struct ifnet *bif_ifp; /* member if */ - struct bstp_port bif_stp; /* STP state */ - uint32_t bif_ifflags; /* member if flags */ - int bif_savedcaps; /* saved capabilities */ - uint32_t bif_addrmax; /* max # of addresses */ - uint32_t bif_addrcnt; /* cur. # of addresses */ - uint32_t bif_addrexceeded; /* # of address violations */ - - interface_filter_t bif_iff_ref; - struct bridge_softc *bif_sc; - uint32_t bif_flags; - - struct in_addr bif_hf_ipsrc; - uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN]; + struct ifnet *bif_ifp; /* member if */ + struct bstp_port bif_stp; /* STP state */ + uint32_t bif_ifflags; /* member if flags */ + int bif_savedcaps; /* saved capabilities */ + uint32_t bif_addrmax; /* max # of addresses */ + uint32_t bif_addrcnt; /* cur. # of addresses */ + uint32_t bif_addrexceeded; /* # of address violations */ + + interface_filter_t bif_iff_ref; + struct bridge_softc *bif_sc; + uint32_t bif_flags; + + struct in_addr bif_hf_ipsrc; + uint8_t bif_hf_hwsrc[ETHER_ADDR_LEN]; }; -#define BIFF_PROMISC 0x01 /* promiscuous mode set */ -#define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */ -#define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */ -#define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */ -#define BIFF_HOST_FILTER 0x10 /* host filter enabled */ -#define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */ -#define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */ +#define BIFF_PROMISC 0x01 /* promiscuous mode set */ +#define BIFF_PROTO_ATTACHED 0x02 /* protocol attached */ +#define BIFF_FILTER_ATTACHED 0x04 /* interface filter attached */ +#define BIFF_MEDIA_ACTIVE 0x08 /* interface media active */ +#define BIFF_HOST_FILTER 0x10 /* host filter enabled */ +#define BIFF_HF_HWSRC 0x20 /* host filter source MAC is set */ +#define BIFF_HF_IPSRC 0x40 /* host filter source IP is set */ /* * Bridge route node. */ struct bridge_rtnode { - LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ - LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ - struct bridge_iflist *brt_dst; /* destination if */ - unsigned long brt_expire; /* expiration time */ - uint8_t brt_flags; /* address flags */ - uint8_t brt_addr[ETHER_ADDR_LEN]; - uint16_t brt_vlan; /* vlan id */ + LIST_ENTRY(bridge_rtnode) brt_hash; /* hash table linkage */ + LIST_ENTRY(bridge_rtnode) brt_list; /* list linkage */ + struct bridge_iflist *brt_dst; /* destination if */ + unsigned long brt_expire; /* expiration time */ + uint8_t brt_flags; /* address flags */ + uint8_t brt_addr[ETHER_ADDR_LEN]; + uint16_t brt_vlan; /* vlan id */ }; -#define brt_ifp brt_dst->bif_ifp +#define brt_ifp brt_dst->bif_ifp /* * Bridge delayed function call context @@ -345,15 +345,15 @@ struct bridge_rtnode { typedef void (*bridge_delayed_func_t)(struct bridge_softc *); struct bridge_delayed_call { - struct bridge_softc *bdc_sc; - bridge_delayed_func_t bdc_func; /* Function to call */ - struct timespec bdc_ts; /* Time to call */ - u_int32_t bdc_flags; - thread_call_t bdc_thread_call; + struct bridge_softc *bdc_sc; + bridge_delayed_func_t bdc_func; /* Function to call */ + struct timespec bdc_ts; /* Time to call */ + u_int32_t bdc_flags; + thread_call_t bdc_thread_call; }; -#define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */ -#define BDCF_CANCELLING 0x02 /* May be waiting for call completion */ +#define BDCF_OUTSTANDING 0x01 /* Delayed call has been scheduled */ +#define BDCF_CANCELLING 0x02 /* May be waiting for call completion */ /* @@ -362,65 +362,65 @@ struct bridge_delayed_call { LIST_HEAD(_bridge_rtnode_list, bridge_rtnode); typedef struct { - struct _bridge_rtnode_list *bb_rthash; /* our forwarding table */ - struct _bridge_rtnode_list bb_rtlist; /* list version of above */ - uint32_t bb_rthash_key; /* key for hash */ - uint32_t bb_rthash_size; /* size of the hash table */ + struct _bridge_rtnode_list *bb_rthash; /* our forwarding table */ + struct _bridge_rtnode_list bb_rtlist; /* list version of above */ + uint32_t bb_rthash_key; /* key for hash */ + uint32_t bb_rthash_size; /* size of the hash table */ struct bridge_delayed_call bb_aging_timer; struct bridge_delayed_call bb_resize_call; - TAILQ_HEAD(, bridge_iflist) bb_spanlist; /* span ports list */ - struct bstp_state bb_stp; /* STP state */ - bpf_packet_func bb_bpf_input; - bpf_packet_func bb_bpf_output; + TAILQ_HEAD(, bridge_iflist) bb_spanlist; /* span ports list */ + struct bstp_state bb_stp; /* STP state */ + bpf_packet_func bb_bpf_input; + bpf_packet_func bb_bpf_output; } bridge_bsd, *bridge_bsd_t; -#define sc_rthash sc_u.scu_bsd.bb_rthash -#define sc_rtlist sc_u.scu_bsd.bb_rtlist -#define sc_rthash_key sc_u.scu_bsd.bb_rthash_key -#define sc_rthash_size sc_u.scu_bsd.bb_rthash_size -#define sc_aging_timer sc_u.scu_bsd.bb_aging_timer -#define sc_resize_call sc_u.scu_bsd.bb_resize_call -#define sc_spanlist sc_u.scu_bsd.bb_spanlist -#define sc_stp sc_u.scu_bsd.bb_stp -#define sc_bpf_input sc_u.scu_bsd.bb_bpf_input -#define sc_bpf_output sc_u.scu_bsd.bb_bpf_output +#define sc_rthash sc_u.scu_bsd.bb_rthash +#define sc_rtlist sc_u.scu_bsd.bb_rtlist +#define sc_rthash_key sc_u.scu_bsd.bb_rthash_key +#define sc_rthash_size sc_u.scu_bsd.bb_rthash_size +#define sc_aging_timer sc_u.scu_bsd.bb_aging_timer +#define sc_resize_call sc_u.scu_bsd.bb_resize_call +#define sc_spanlist sc_u.scu_bsd.bb_spanlist +#define sc_stp sc_u.scu_bsd.bb_stp +#define sc_bpf_input sc_u.scu_bsd.bb_bpf_input +#define sc_bpf_output sc_u.scu_bsd.bb_bpf_output struct bridge_softc { - struct ifnet *sc_ifp; /* make this an interface */ - u_int32_t sc_flags; + struct ifnet *sc_ifp; /* make this an interface */ + u_int32_t sc_flags; union { - bridge_bsd scu_bsd; + bridge_bsd scu_bsd; } sc_u; LIST_ENTRY(bridge_softc) sc_list; - decl_lck_mtx_data(, sc_mtx); - void *sc_cv; - uint32_t sc_brtmax; /* max # of addresses */ - uint32_t sc_brtcnt; /* cur. # of addresses */ - uint32_t sc_brttimeout; /* rt timeout in seconds */ - uint32_t sc_iflist_ref; /* refcount for sc_iflist */ - uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ - TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ - uint32_t sc_brtexceeded; /* # of cache drops */ - uint32_t sc_filter_flags; /* ipf and flags */ - struct ifnet *sc_ifaddr; /* member mac copied from */ - u_char sc_defaddr[6]; /* Default MAC address */ - char sc_if_xname[IFNAMSIZ]; + decl_lck_mtx_data(, sc_mtx); + void *sc_cv; + uint32_t sc_brtmax; /* max # of addresses */ + uint32_t sc_brtcnt; /* cur. # of addresses */ + uint32_t sc_brttimeout; /* rt timeout in seconds */ + uint32_t sc_iflist_ref; /* refcount for sc_iflist */ + uint32_t sc_iflist_xcnt; /* refcount for sc_iflist */ + TAILQ_HEAD(, bridge_iflist) sc_iflist; /* member interface list */ + uint32_t sc_brtexceeded; /* # of cache drops */ + uint32_t sc_filter_flags; /* ipf and flags */ + struct ifnet *sc_ifaddr; /* member mac copied from */ + u_char sc_defaddr[6]; /* Default MAC address */ + char sc_if_xname[IFNAMSIZ]; #if BRIDGE_DEBUG /* * Locking and unlocking calling history */ - void *lock_lr[BR_LCKDBG_MAX]; - int next_lock_lr; - void *unlock_lr[BR_LCKDBG_MAX]; - int next_unlock_lr; + void *lock_lr[BR_LCKDBG_MAX]; + int next_lock_lr; + void *unlock_lr[BR_LCKDBG_MAX]; + int next_unlock_lr; #endif /* BRIDGE_DEBUG */ }; -#define SCF_DETACHING 0x01 -#define SCF_RESIZING 0x02 -#define SCF_MEDIA_ACTIVE 0x04 -#define SCF_BSD_MODE 0x08 +#define SCF_DETACHING 0x01 +#define SCF_RESIZING 0x02 +#define SCF_MEDIA_ACTIVE 0x04 +#define SCF_BSD_MODE 0x08 static inline void bridge_set_bsd_mode(struct bridge_softc * sc) @@ -431,145 +431,145 @@ bridge_set_bsd_mode(struct bridge_softc * sc) static inline boolean_t bridge_in_bsd_mode(const struct bridge_softc * sc) { - return ((sc->sc_flags & SCF_BSD_MODE) != 0); + return (sc->sc_flags & SCF_BSD_MODE) != 0; } struct bridge_hostfilter_stats bridge_hostfilter_stats; decl_lck_mtx_data(static, bridge_list_mtx); -static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; +static int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; -static zone_t bridge_rtnode_pool = NULL; +static zone_t bridge_rtnode_pool = NULL; -static int bridge_clone_create(struct if_clone *, uint32_t, void *); -static int bridge_clone_destroy(struct ifnet *); +static int bridge_clone_create(struct if_clone *, uint32_t, void *); +static int bridge_clone_destroy(struct ifnet *); -static errno_t bridge_ioctl(struct ifnet *, u_long, void *); +static errno_t bridge_ioctl(struct ifnet *, u_long, void *); #if HAS_IF_CAP -static void bridge_mutecaps(struct bridge_softc *); -static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, - int); +static void bridge_mutecaps(struct bridge_softc *); +static void bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *, + int); #endif static errno_t bridge_set_tso(struct bridge_softc *); -__private_extern__ void bridge_ifdetach(struct bridge_iflist *, struct ifnet *); -static int bridge_init(struct ifnet *); +__private_extern__ void bridge_ifdetach(struct bridge_iflist *, struct ifnet *); +static int bridge_init(struct ifnet *); #if HAS_BRIDGE_DUMMYNET -static void bridge_dummynet(struct mbuf *, struct ifnet *); +static void bridge_dummynet(struct mbuf *, struct ifnet *); #endif -static void bridge_ifstop(struct ifnet *, int); -static int bridge_output(struct ifnet *, struct mbuf *); -static void bridge_finalize_cksum(struct ifnet *, struct mbuf *); -static void bridge_start(struct ifnet *); +static void bridge_ifstop(struct ifnet *, int); +static int bridge_output(struct ifnet *, struct mbuf *); +static void bridge_finalize_cksum(struct ifnet *, struct mbuf *); +static void bridge_start(struct ifnet *); __private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *); #if BRIDGE_MEMBER_OUT_FILTER static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t, - mbuf_t *); -static int bridge_member_output(struct ifnet *, struct mbuf *, - struct sockaddr *, struct rtentry *); + mbuf_t *); +static int bridge_member_output(struct ifnet *, struct mbuf *, + struct sockaddr *, struct rtentry *); #endif -static int bridge_enqueue(struct bridge_softc *, struct ifnet *, - struct mbuf *); -static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); +static int bridge_enqueue(struct bridge_softc *, struct ifnet *, + struct mbuf *); +static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int); -static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, - struct mbuf *); +static void bridge_forward(struct bridge_softc *, struct bridge_iflist *, + struct mbuf *); -static void bridge_aging_timer(struct bridge_softc *sc); +static void bridge_aging_timer(struct bridge_softc *sc); -static void bridge_broadcast(struct bridge_softc *, struct ifnet *, - struct mbuf *, int); -static void bridge_span(struct bridge_softc *, struct mbuf *); +static void bridge_broadcast(struct bridge_softc *, struct ifnet *, + struct mbuf *, int); +static void bridge_span(struct bridge_softc *, struct mbuf *); -static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, - uint16_t, struct bridge_iflist *, int, uint8_t); +static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, + uint16_t, struct bridge_iflist *, int, uint8_t); static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *, - uint16_t); -static void bridge_rttrim(struct bridge_softc *); -static void bridge_rtage(struct bridge_softc *); -static void bridge_rtflush(struct bridge_softc *, int); -static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, - uint16_t); + uint16_t); +static void bridge_rttrim(struct bridge_softc *); +static void bridge_rtage(struct bridge_softc *); +static void bridge_rtflush(struct bridge_softc *, int); +static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *, + uint16_t); -static int bridge_rtable_init(struct bridge_softc *); -static void bridge_rtable_fini(struct bridge_softc *); +static int bridge_rtable_init(struct bridge_softc *); +static void bridge_rtable_fini(struct bridge_softc *); -static void bridge_rthash_resize(struct bridge_softc *); +static void bridge_rthash_resize(struct bridge_softc *); -static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); +static int bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *); static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, - const uint8_t *, uint16_t); -static int bridge_rtnode_hash(struct bridge_softc *, - struct bridge_rtnode *); -static int bridge_rtnode_insert(struct bridge_softc *, - struct bridge_rtnode *); -static void bridge_rtnode_destroy(struct bridge_softc *, - struct bridge_rtnode *); + const uint8_t *, uint16_t); +static int bridge_rtnode_hash(struct bridge_softc *, + struct bridge_rtnode *); +static int bridge_rtnode_insert(struct bridge_softc *, + struct bridge_rtnode *); +static void bridge_rtnode_destroy(struct bridge_softc *, + struct bridge_rtnode *); #if BRIDGESTP -static void bridge_rtable_expire(struct ifnet *, int); -static void bridge_state_change(struct ifnet *, int); +static void bridge_rtable_expire(struct ifnet *, int); +static void bridge_state_change(struct ifnet *, int); #endif /* BRIDGESTP */ static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, - const char *name); + const char *name); static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, - struct ifnet *ifp); -static void bridge_delete_member(struct bridge_softc *, - struct bridge_iflist *, int); -static void bridge_delete_span(struct bridge_softc *, - struct bridge_iflist *); - -static int bridge_ioctl_add(struct bridge_softc *, void *); -static int bridge_ioctl_del(struct bridge_softc *, void *); -static int bridge_ioctl_gifflags(struct bridge_softc *, void *); -static int bridge_ioctl_sifflags(struct bridge_softc *, void *); -static int bridge_ioctl_scache(struct bridge_softc *, void *); -static int bridge_ioctl_gcache(struct bridge_softc *, void *); -static int bridge_ioctl_gifs32(struct bridge_softc *, void *); -static int bridge_ioctl_gifs64(struct bridge_softc *, void *); -static int bridge_ioctl_rts32(struct bridge_softc *, void *); -static int bridge_ioctl_rts64(struct bridge_softc *, void *); -static int bridge_ioctl_saddr32(struct bridge_softc *, void *); -static int bridge_ioctl_saddr64(struct bridge_softc *, void *); -static int bridge_ioctl_sto(struct bridge_softc *, void *); -static int bridge_ioctl_gto(struct bridge_softc *, void *); -static int bridge_ioctl_daddr32(struct bridge_softc *, void *); -static int bridge_ioctl_daddr64(struct bridge_softc *, void *); -static int bridge_ioctl_flush(struct bridge_softc *, void *); -static int bridge_ioctl_gpri(struct bridge_softc *, void *); -static int bridge_ioctl_spri(struct bridge_softc *, void *); -static int bridge_ioctl_ght(struct bridge_softc *, void *); -static int bridge_ioctl_sht(struct bridge_softc *, void *); -static int bridge_ioctl_gfd(struct bridge_softc *, void *); -static int bridge_ioctl_sfd(struct bridge_softc *, void *); -static int bridge_ioctl_gma(struct bridge_softc *, void *); -static int bridge_ioctl_sma(struct bridge_softc *, void *); -static int bridge_ioctl_sifprio(struct bridge_softc *, void *); -static int bridge_ioctl_sifcost(struct bridge_softc *, void *); -static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); -static int bridge_ioctl_addspan(struct bridge_softc *, void *); -static int bridge_ioctl_delspan(struct bridge_softc *, void *); -static int bridge_ioctl_gbparam32(struct bridge_softc *, void *); -static int bridge_ioctl_gbparam64(struct bridge_softc *, void *); -static int bridge_ioctl_grte(struct bridge_softc *, void *); -static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *); -static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *); -static int bridge_ioctl_sproto(struct bridge_softc *, void *); -static int bridge_ioctl_stxhc(struct bridge_softc *, void *); -static int bridge_ioctl_purge(struct bridge_softc *sc, void *); -static int bridge_ioctl_gfilt(struct bridge_softc *, void *); -static int bridge_ioctl_sfilt(struct bridge_softc *, void *); -static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *); -static int bridge_ioctl_shostfilter(struct bridge_softc *, void *); + struct ifnet *ifp); +static void bridge_delete_member(struct bridge_softc *, + struct bridge_iflist *, int); +static void bridge_delete_span(struct bridge_softc *, + struct bridge_iflist *); + +static int bridge_ioctl_add(struct bridge_softc *, void *); +static int bridge_ioctl_del(struct bridge_softc *, void *); +static int bridge_ioctl_gifflags(struct bridge_softc *, void *); +static int bridge_ioctl_sifflags(struct bridge_softc *, void *); +static int bridge_ioctl_scache(struct bridge_softc *, void *); +static int bridge_ioctl_gcache(struct bridge_softc *, void *); +static int bridge_ioctl_gifs32(struct bridge_softc *, void *); +static int bridge_ioctl_gifs64(struct bridge_softc *, void *); +static int bridge_ioctl_rts32(struct bridge_softc *, void *); +static int bridge_ioctl_rts64(struct bridge_softc *, void *); +static int bridge_ioctl_saddr32(struct bridge_softc *, void *); +static int bridge_ioctl_saddr64(struct bridge_softc *, void *); +static int bridge_ioctl_sto(struct bridge_softc *, void *); +static int bridge_ioctl_gto(struct bridge_softc *, void *); +static int bridge_ioctl_daddr32(struct bridge_softc *, void *); +static int bridge_ioctl_daddr64(struct bridge_softc *, void *); +static int bridge_ioctl_flush(struct bridge_softc *, void *); +static int bridge_ioctl_gpri(struct bridge_softc *, void *); +static int bridge_ioctl_spri(struct bridge_softc *, void *); +static int bridge_ioctl_ght(struct bridge_softc *, void *); +static int bridge_ioctl_sht(struct bridge_softc *, void *); +static int bridge_ioctl_gfd(struct bridge_softc *, void *); +static int bridge_ioctl_sfd(struct bridge_softc *, void *); +static int bridge_ioctl_gma(struct bridge_softc *, void *); +static int bridge_ioctl_sma(struct bridge_softc *, void *); +static int bridge_ioctl_sifprio(struct bridge_softc *, void *); +static int bridge_ioctl_sifcost(struct bridge_softc *, void *); +static int bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *); +static int bridge_ioctl_addspan(struct bridge_softc *, void *); +static int bridge_ioctl_delspan(struct bridge_softc *, void *); +static int bridge_ioctl_gbparam32(struct bridge_softc *, void *); +static int bridge_ioctl_gbparam64(struct bridge_softc *, void *); +static int bridge_ioctl_grte(struct bridge_softc *, void *); +static int bridge_ioctl_gifsstp32(struct bridge_softc *, void *); +static int bridge_ioctl_gifsstp64(struct bridge_softc *, void *); +static int bridge_ioctl_sproto(struct bridge_softc *, void *); +static int bridge_ioctl_stxhc(struct bridge_softc *, void *); +static int bridge_ioctl_purge(struct bridge_softc *sc, void *); +static int bridge_ioctl_gfilt(struct bridge_softc *, void *); +static int bridge_ioctl_sfilt(struct bridge_softc *, void *); +static int bridge_ioctl_ghostfilter(struct bridge_softc *, void *); +static int bridge_ioctl_shostfilter(struct bridge_softc *, void *); #ifdef PFIL_HOOKS -static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, - int); -static int bridge_ip_checkbasic(struct mbuf **); +static int bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *, + int); +static int bridge_ip_checkbasic(struct mbuf **); #ifdef INET6 -static int bridge_ip6_checkbasic(struct mbuf **); +static int bridge_ip6_checkbasic(struct mbuf **); #endif /* INET6 */ -static int bridge_fragment(struct ifnet *, struct mbuf *, - struct ether_header *, int, struct llc *); +static int bridge_fragment(struct ifnet *, struct mbuf *, + struct ether_header *, int, struct llc *); #endif /* PFIL_HOOKS */ static errno_t bridge_set_bpf_tap(ifnet_t, bpf_tap_mode, bpf_packet_func); @@ -587,16 +587,16 @@ static void bridge_cleanup_delayed_call(struct bridge_delayed_call *); static int bridge_host_filter(struct bridge_iflist *, struct mbuf *); -#define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how) +#define m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how) /* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */ -#define VLANTAGOF(_m) 0 +#define VLANTAGOF(_m) 0 u_int8_t bstp_etheraddr[ETHER_ADDR_LEN] = - { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; static u_int8_t ethernulladdr[ETHER_ADDR_LEN] = - { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; #if BRIDGESTP static struct bstp_cb_ops bridge_ops = { @@ -606,45 +606,45 @@ static struct bstp_cb_ops bridge_ops = { #endif /* BRIDGESTP */ SYSCTL_DECL(_net_link); -SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Bridge"); +SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Bridge"); static int bridge_inherit_mac = 0; /* share MAC with first bridge member */ SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac, - CTLFLAG_RW|CTLFLAG_LOCKED, - &bridge_inherit_mac, 0, - "Inherit MAC address from the first bridge member"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &bridge_inherit_mac, 0, + "Inherit MAC address from the first bridge member"); SYSCTL_INT(_net_link_bridge, OID_AUTO, rtable_prune_period, - CTLFLAG_RW|CTLFLAG_LOCKED, - &bridge_rtable_prune_period, 0, - "Interval between pruning of routing table"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &bridge_rtable_prune_period, 0, + "Interval between pruning of routing table"); static unsigned int bridge_rtable_hash_size_max = BRIDGE_RTHASH_SIZE_MAX; SYSCTL_UINT(_net_link_bridge, OID_AUTO, rtable_hash_size_max, - CTLFLAG_RW|CTLFLAG_LOCKED, - &bridge_rtable_hash_size_max, 0, - "Maximum size of the routing hash table"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &bridge_rtable_hash_size_max, 0, + "Maximum size of the routing hash table"); #if BRIDGE_DEBUG_DELAYED_CALLBACK static int bridge_delayed_callback_delay = 0; SYSCTL_INT(_net_link_bridge, OID_AUTO, delayed_callback_delay, - CTLFLAG_RW|CTLFLAG_LOCKED, - &bridge_delayed_callback_delay, 0, - "Delay before calling delayed function"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &bridge_delayed_callback_delay, 0, + "Delay before calling delayed function"); #endif static int bridge_bsd_mode = 1; #if (DEVELOPMENT || DEBUG) SYSCTL_INT(_net_link_bridge, OID_AUTO, bsd_mode, - CTLFLAG_RW|CTLFLAG_LOCKED, - &bridge_bsd_mode, 0, - "Bridge using bsd mode"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &bridge_bsd_mode, 0, + "Bridge using bsd mode"); #endif /* (DEVELOPMENT || DEBUG) */ SYSCTL_STRUCT(_net_link_bridge, OID_AUTO, - hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED, - &bridge_hostfilter_stats, bridge_hostfilter_stats, ""); + hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED, + &bridge_hostfilter_stats, bridge_hostfilter_stats, ""); #if defined(PFIL_HOOKS) static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */ @@ -653,257 +653,257 @@ static int pfil_member = 1; /* run pfil hooks on the member interface */ static int pfil_ipfw = 0; /* layer2 filter with ipfw */ static int pfil_ipfw_arp = 0; /* layer2 filter with ipfw */ static int pfil_local_phys = 0; /* run pfil hooks on the physical interface */ - /* for locally destined packets */ -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW|CTLFLAG_LOCKED, - &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW|CTLFLAG_LOCKED, - &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW|CTLFLAG_LOCKED, - &pfil_bridge, 0, "Packet filter on the bridge interface"); -SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW|CTLFLAG_LOCKED, - &pfil_member, 0, "Packet filter on the member interface"); + /* for locally destined packets */ +SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW | CTLFLAG_LOCKED, + &pfil_onlyip, 0, "Only pass IP packets when pfil is enabled"); +SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW | CTLFLAG_LOCKED, + &pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2"); +SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW | CTLFLAG_LOCKED, + &pfil_bridge, 0, "Packet filter on the bridge interface"); +SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW | CTLFLAG_LOCKED, + &pfil_member, 0, "Packet filter on the member interface"); SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys, - CTLFLAG_RW|CTLFLAG_LOCKED, &pfil_local_phys, 0, - "Packet filter on the physical interface for locally destined packets"); + CTLFLAG_RW | CTLFLAG_LOCKED, &pfil_local_phys, 0, + "Packet filter on the physical interface for locally destined packets"); #endif /* PFIL_HOOKS */ #if BRIDGESTP static int log_stp = 0; /* log STP state changes */ SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW, - &log_stp, 0, "Log STP state changes"); + &log_stp, 0, "Log STP state changes"); #endif /* BRIDGESTP */ struct bridge_control { - int (*bc_func)(struct bridge_softc *, void *); - unsigned int bc_argsize; - unsigned int bc_flags; + int (*bc_func)(struct bridge_softc *, void *); + unsigned int bc_argsize; + unsigned int bc_flags; }; -#define BC_F_COPYIN 0x01 /* copy arguments in */ -#define BC_F_COPYOUT 0x02 /* copy arguments out */ -#define BC_F_SUSER 0x04 /* do super-user check */ +#define BC_F_COPYIN 0x01 /* copy arguments in */ +#define BC_F_COPYOUT 0x02 /* copy arguments out */ +#define BC_F_SUSER 0x04 /* do super-user check */ static const struct bridge_control bridge_control_table32[] = { - { bridge_ioctl_add, sizeof (struct ifbreq), /* 0 */ - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_del, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_add, sizeof(struct ifbreq), /* 0 */ + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_del, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gifflags, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_COPYOUT }, - { bridge_ioctl_sifflags, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gifflags, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_COPYOUT }, + { bridge_ioctl_sifflags, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_scache, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_gcache, sizeof (struct ifbrparam), - BC_F_COPYOUT }, + { bridge_ioctl_scache, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_gcache, sizeof(struct ifbrparam), + BC_F_COPYOUT }, - { bridge_ioctl_gifs32, sizeof (struct ifbifconf32), - BC_F_COPYIN|BC_F_COPYOUT }, - { bridge_ioctl_rts32, sizeof (struct ifbaconf32), - BC_F_COPYIN|BC_F_COPYOUT }, + { bridge_ioctl_gifs32, sizeof(struct ifbifconf32), + BC_F_COPYIN | BC_F_COPYOUT }, + { bridge_ioctl_rts32, sizeof(struct ifbaconf32), + BC_F_COPYIN | BC_F_COPYOUT }, - { bridge_ioctl_saddr32, sizeof (struct ifbareq32), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_saddr32, sizeof(struct ifbareq32), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sto, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_gto, sizeof (struct ifbrparam), /* 10 */ - BC_F_COPYOUT }, + { bridge_ioctl_sto, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_gto, sizeof(struct ifbrparam), /* 10 */ + BC_F_COPYOUT }, - { bridge_ioctl_daddr32, sizeof (struct ifbareq32), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_daddr32, sizeof(struct ifbareq32), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_flush, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_flush, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gpri, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_spri, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gpri, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_spri, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_ght, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sht, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_ght, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sht, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gfd, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sfd, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gfd, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sfd, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gma, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sma, sizeof (struct ifbrparam), /* 20 */ - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gma, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sma, sizeof(struct ifbrparam), /* 20 */ + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sifprio, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sifprio, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sifcost, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sifcost, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gfilt, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sfilt, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gfilt, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sfilt, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_purge, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_purge, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_addspan, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_delspan, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_addspan, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_delspan, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gbparam32, sizeof (struct ifbropreq32), - BC_F_COPYOUT }, + { bridge_ioctl_gbparam32, sizeof(struct ifbropreq32), + BC_F_COPYOUT }, - { bridge_ioctl_grte, sizeof (struct ifbrparam), - BC_F_COPYOUT }, + { bridge_ioctl_grte, sizeof(struct ifbrparam), + BC_F_COPYOUT }, - { bridge_ioctl_gifsstp32, sizeof (struct ifbpstpconf32), /* 30 */ - BC_F_COPYIN|BC_F_COPYOUT }, + { bridge_ioctl_gifsstp32, sizeof(struct ifbpstpconf32), /* 30 */ + BC_F_COPYIN | BC_F_COPYOUT }, - { bridge_ioctl_sproto, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sproto, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_stxhc, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_stxhc, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sifmaxaddr, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_ghostfilter, sizeof (struct ifbrhostfilter), - BC_F_COPYIN|BC_F_COPYOUT }, - { bridge_ioctl_shostfilter, sizeof (struct ifbrhostfilter), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_ghostfilter, sizeof(struct ifbrhostfilter), + BC_F_COPYIN | BC_F_COPYOUT }, + { bridge_ioctl_shostfilter, sizeof(struct ifbrhostfilter), + BC_F_COPYIN | BC_F_SUSER }, }; static const struct bridge_control bridge_control_table64[] = { - { bridge_ioctl_add, sizeof (struct ifbreq), /* 0 */ - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_del, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_add, sizeof(struct ifbreq), /* 0 */ + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_del, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gifflags, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_COPYOUT }, - { bridge_ioctl_sifflags, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gifflags, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_COPYOUT }, + { bridge_ioctl_sifflags, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_scache, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_gcache, sizeof (struct ifbrparam), - BC_F_COPYOUT }, + { bridge_ioctl_scache, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_gcache, sizeof(struct ifbrparam), + BC_F_COPYOUT }, - { bridge_ioctl_gifs64, sizeof (struct ifbifconf64), - BC_F_COPYIN|BC_F_COPYOUT }, - { bridge_ioctl_rts64, sizeof (struct ifbaconf64), - BC_F_COPYIN|BC_F_COPYOUT }, + { bridge_ioctl_gifs64, sizeof(struct ifbifconf64), + BC_F_COPYIN | BC_F_COPYOUT }, + { bridge_ioctl_rts64, sizeof(struct ifbaconf64), + BC_F_COPYIN | BC_F_COPYOUT }, - { bridge_ioctl_saddr64, sizeof (struct ifbareq64), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_saddr64, sizeof(struct ifbareq64), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sto, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_gto, sizeof (struct ifbrparam), /* 10 */ - BC_F_COPYOUT }, + { bridge_ioctl_sto, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_gto, sizeof(struct ifbrparam), /* 10 */ + BC_F_COPYOUT }, - { bridge_ioctl_daddr64, sizeof (struct ifbareq64), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_daddr64, sizeof(struct ifbareq64), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_flush, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_flush, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gpri, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_spri, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gpri, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_spri, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_ght, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sht, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_ght, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sht, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gfd, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sfd, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gfd, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sfd, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gma, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sma, sizeof (struct ifbrparam), /* 20 */ - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gma, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sma, sizeof(struct ifbrparam), /* 20 */ + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sifprio, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sifprio, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sifcost, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sifcost, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gfilt, sizeof (struct ifbrparam), - BC_F_COPYOUT }, - { bridge_ioctl_sfilt, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_gfilt, sizeof(struct ifbrparam), + BC_F_COPYOUT }, + { bridge_ioctl_sfilt, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_purge, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_purge, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_addspan, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, - { bridge_ioctl_delspan, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_addspan, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, + { bridge_ioctl_delspan, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_gbparam64, sizeof (struct ifbropreq64), - BC_F_COPYOUT }, + { bridge_ioctl_gbparam64, sizeof(struct ifbropreq64), + BC_F_COPYOUT }, - { bridge_ioctl_grte, sizeof (struct ifbrparam), - BC_F_COPYOUT }, + { bridge_ioctl_grte, sizeof(struct ifbrparam), + BC_F_COPYOUT }, - { bridge_ioctl_gifsstp64, sizeof (struct ifbpstpconf64), /* 30 */ - BC_F_COPYIN|BC_F_COPYOUT }, + { bridge_ioctl_gifsstp64, sizeof(struct ifbpstpconf64), /* 30 */ + BC_F_COPYIN | BC_F_COPYOUT }, - { bridge_ioctl_sproto, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sproto, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_stxhc, sizeof (struct ifbrparam), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_stxhc, sizeof(struct ifbrparam), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_sifmaxaddr, sizeof (struct ifbreq), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_sifmaxaddr, sizeof(struct ifbreq), + BC_F_COPYIN | BC_F_SUSER }, - { bridge_ioctl_ghostfilter, sizeof (struct ifbrhostfilter), - BC_F_COPYIN|BC_F_COPYOUT }, - { bridge_ioctl_shostfilter, sizeof (struct ifbrhostfilter), - BC_F_COPYIN|BC_F_SUSER }, + { bridge_ioctl_ghostfilter, sizeof(struct ifbrhostfilter), + BC_F_COPYIN | BC_F_COPYOUT }, + { bridge_ioctl_shostfilter, sizeof(struct ifbrhostfilter), + BC_F_COPYIN | BC_F_SUSER }, }; static const unsigned int bridge_control_table_size = - sizeof (bridge_control_table32) / sizeof (bridge_control_table32[0]); + sizeof(bridge_control_table32) / sizeof(bridge_control_table32[0]); static LIST_HEAD(, bridge_softc) bridge_list = - LIST_HEAD_INITIALIZER(bridge_list); + LIST_HEAD_INITIALIZER(bridge_list); static lck_grp_t *bridge_lock_grp = NULL; static lck_attr_t *bridge_lock_attr = NULL; -#define BRIDGENAME "bridge" -#define BRIDGES_MAX IF_MAXUNIT -#define BRIDGE_ZONE_MAX_ELEM MIN(IFNETS_MAX, BRIDGES_MAX) +#define BRIDGENAME "bridge" +#define BRIDGES_MAX IF_MAXUNIT +#define BRIDGE_ZONE_MAX_ELEM MIN(IFNETS_MAX, BRIDGES_MAX) static struct if_clone bridge_cloner = IF_CLONE_INITIALIZER(BRIDGENAME, bridge_clone_create, bridge_clone_destroy, - 0, BRIDGES_MAX, BRIDGE_ZONE_MAX_ELEM, sizeof(struct bridge_softc)); + 0, BRIDGES_MAX, BRIDGE_ZONE_MAX_ELEM, sizeof(struct bridge_softc)); static int if_bridge_txstart = 0; SYSCTL_INT(_net_link_bridge, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_bridge_txstart, 0, "Bridge interface uses TXSTART model"); + &if_bridge_txstart, 0, "Bridge interface uses TXSTART model"); #if BRIDGE_DEBUG static int if_bridge_debug = 0; SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_bridge_debug, 0, "Bridge debug"); + &if_bridge_debug, 0, "Bridge debug"); static void printf_ether_header(struct ether_header *); static void printf_mbuf_data(mbuf_t, size_t, size_t); @@ -928,7 +928,7 @@ bridge_lock(struct bridge_softc *sc) _BRIDGE_LOCK(sc); sc->lock_lr[sc->next_lock_lr] = lr_saved; - sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX; + sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX; } static void @@ -939,7 +939,7 @@ bridge_unlock(struct bridge_softc *sc) BRIDGE_LOCK_ASSERT_HELD(sc); sc->unlock_lr[sc->next_unlock_lr] = lr_saved; - sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX; + sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX; _BRIDGE_UNLOCK(sc); } @@ -952,17 +952,18 @@ bridge_lock2ref(struct bridge_softc *sc) BRIDGE_LOCK_ASSERT_HELD(sc); - if (sc->sc_iflist_xcnt > 0) + if (sc->sc_iflist_xcnt > 0) { error = EBUSY; - else + } else { sc->sc_iflist_ref++; + } sc->unlock_lr[sc->next_unlock_lr] = lr_saved; - sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX; + sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX; _BRIDGE_UNLOCK(sc); - return (error); + return error; } static void @@ -974,17 +975,18 @@ bridge_unref(struct bridge_softc *sc) _BRIDGE_LOCK(sc); sc->lock_lr[sc->next_lock_lr] = lr_saved; - sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX; + sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX; sc->sc_iflist_ref--; sc->unlock_lr[sc->next_unlock_lr] = lr_saved; - sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX; + sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX; if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) { _BRIDGE_UNLOCK(sc); wakeup(&sc->sc_cv); - } else + } else { _BRIDGE_UNLOCK(sc); + } } static void @@ -997,12 +999,12 @@ bridge_xlock(struct bridge_softc *sc) sc->sc_iflist_xcnt++; while (sc->sc_iflist_ref > 0) { sc->unlock_lr[sc->next_unlock_lr] = lr_saved; - sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX; + sc->next_unlock_lr = (sc->next_unlock_lr + 1) % SO_LCKDBG_MAX; msleep(&sc->sc_cv, &sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL); sc->lock_lr[sc->next_lock_lr] = lr_saved; - sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX; + sc->next_lock_lr = (sc->next_lock_lr + 1) % SO_LCKDBG_MAX; } } @@ -1017,7 +1019,7 @@ bridge_xdrop(struct bridge_softc *sc) void printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix) { - if (m) + if (m) { printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx " "nextpkt: 0x%llx%s", prefix ? prefix : "", (unsigned int)mbuf_pkthdr_len(m), @@ -1025,8 +1027,9 @@ printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix) (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m)), (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m)), suffix ? suffix : ""); - else + } else { printf("%s%s\n", prefix, suffix); + } } void @@ -1042,24 +1045,27 @@ printf_mbuf(mbuf_t m, const char *prefix, const char *suffix) (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m)), (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m)), !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix); - if ((mbuf_flags(m) & MBUF_PKTHDR)) + if ((mbuf_flags(m) & MBUF_PKTHDR)) { printf_mbuf_pkthdr(m, " ", suffix); - } else + } + } else { printf("%s%s\n", prefix, suffix); + } } void printf_mbuf_data(mbuf_t m, size_t offset, size_t len) { - mbuf_t n; - size_t i, j; - size_t pktlen, mlen, maxlen; - unsigned char *ptr; + mbuf_t n; + size_t i, j; + size_t pktlen, mlen, maxlen; + unsigned char *ptr; pktlen = mbuf_pkthdr_len(m); - if (offset > pktlen) + if (offset > pktlen) { return; + } maxlen = (pktlen - offset > len) ? len : pktlen - offset; n = m; @@ -1068,8 +1074,9 @@ printf_mbuf_data(mbuf_t m, size_t offset, size_t len) for (i = 0, j = 0; i < maxlen; i++, j++) { if (j >= mlen) { n = mbuf_next(n); - if (n == 0) + if (n == 0) { break; + } ptr = mbuf_data(n); mlen = mbuf_len(n); j = 0; @@ -1097,10 +1104,10 @@ link_print(struct bridge_softc * sc) { int i; uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) + - IFNAMSIZ + ETHER_ADDR_LEN]; + IFNAMSIZ + ETHER_ADDR_LEN]; struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer; - memset(sdl, 0, sizeof (sdl_buffer)); + memset(sdl, 0, sizeof(sdl_buffer)); sdl->sdl_family = AF_LINK; sdl->sdl_nlen = strlen(sc->sc_if_xname); sdl->sdl_alen = ETHER_ADDR_LEN; @@ -1114,8 +1121,9 @@ link_print(struct bridge_softc * sc) sdl->sdl_family, sdl->sdl_type, sdl->sdl_nlen, sdl->sdl_alen, sdl->sdl_slen); #endif - for (i = 0; i < sdl->sdl_alen; i++) + for (i = 0; i < sdl->sdl_alen; i++) { printf("%s%x", i ? ":" : "", (CONST_LLADDR(sdl))[i]); + } printf("\n"); } @@ -1133,8 +1141,8 @@ bridgeattach(int n) int error; lck_grp_attr_t *lck_grp_attr = NULL; - bridge_rtnode_pool = zinit(sizeof (struct bridge_rtnode), - 1024 * sizeof (struct bridge_rtnode), 0, "bridge_rtnode"); + bridge_rtnode_pool = zinit(sizeof(struct bridge_rtnode), + 1024 * sizeof(struct bridge_rtnode), 0, "bridge_rtnode"); zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE); lck_grp_attr = lck_grp_attr_alloc_init(); @@ -1159,10 +1167,11 @@ bridgeattach(int n) #endif /* BRIDGESTP */ error = if_clone_attach(&bridge_cloner); - if (error != 0) + if (error != 0) { printf("%s: ifnet_clone_attach failed %d\n", __func__, error); + } - return (error); + return error; } #if defined(PFIL_HOOKS) @@ -1195,17 +1204,17 @@ sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS } } - return (error); + return error; } -SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW, - &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); +SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT | CTLFLAG_RW, + &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW"); #endif /* PFIL_HOOKS */ static errno_t bridge_ifnet_set_attrs(struct ifnet * ifp) { - errno_t error; + errno_t error; error = ifnet_set_mtu(ifp, ETHERMTU); if (error != 0) { @@ -1230,8 +1239,8 @@ bridge_ifnet_set_attrs(struct ifnet * ifp) printf("%s: ifnet_set_flags failed %d\n", __func__, error); goto done; } - done: - return (error); +done: + return error; } /* @@ -1277,17 +1286,17 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) TAILQ_INIT(&sc->sc_iflist); /* use the interface name as the unique id for ifp recycle */ - snprintf(sc->sc_if_xname, sizeof (sc->sc_if_xname), "%s%d", + snprintf(sc->sc_if_xname, sizeof(sc->sc_if_xname), "%s%d", ifc->ifc_name, unit); - bzero(&init_params, sizeof (init_params)); - init_params.ver = IFNET_INIT_CURRENT_VERSION; - init_params.len = sizeof (init_params); + bzero(&init_params, sizeof(init_params)); + init_params.ver = IFNET_INIT_CURRENT_VERSION; + init_params.len = sizeof(init_params); if (bridge_in_bsd_mode(sc)) { /* Initialize our routing table. */ error = bridge_rtable_init(sc); if (error != 0) { printf("%s: bridge_rtable_init failed %d\n", - __func__, error); + __func__, error); goto done; } TAILQ_INIT(&sc->sc_spanlist); @@ -1297,38 +1306,38 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) init_params.flags = IFNET_INIT_LEGACY; init_params.output = bridge_output; } - init_params.set_bpf_tap = bridge_set_bpf_tap; - } - init_params.uniqueid = sc->sc_if_xname; - init_params.uniqueid_len = strlen(sc->sc_if_xname); - init_params.sndq_maxlen = IFQ_MAXLEN; - init_params.name = ifc->ifc_name; - init_params.unit = unit; - init_params.family = IFNET_FAMILY_ETHERNET; - init_params.type = IFT_BRIDGE; - init_params.demux = ether_demux; - init_params.add_proto = ether_add_proto; - init_params.del_proto = ether_del_proto; - init_params.check_multi = ether_check_multi; - init_params.framer_extended = ether_frameout_extended; - init_params.softc = sc; - init_params.ioctl = bridge_ioctl; - init_params.detach = bridge_detach; - init_params.broadcast_addr = etherbroadcastaddr; - init_params.broadcast_len = ETHER_ADDR_LEN; + init_params.set_bpf_tap = bridge_set_bpf_tap; + } + init_params.uniqueid = sc->sc_if_xname; + init_params.uniqueid_len = strlen(sc->sc_if_xname); + init_params.sndq_maxlen = IFQ_MAXLEN; + init_params.name = ifc->ifc_name; + init_params.unit = unit; + init_params.family = IFNET_FAMILY_ETHERNET; + init_params.type = IFT_BRIDGE; + init_params.demux = ether_demux; + init_params.add_proto = ether_add_proto; + init_params.del_proto = ether_del_proto; + init_params.check_multi = ether_check_multi; + init_params.framer_extended = ether_frameout_extended; + init_params.softc = sc; + init_params.ioctl = bridge_ioctl; + init_params.detach = bridge_detach; + init_params.broadcast_addr = etherbroadcastaddr; + init_params.broadcast_len = ETHER_ADDR_LEN; if (bridge_in_bsd_mode(sc)) { error = ifnet_allocate_extended(&init_params, &ifp); if (error != 0) { printf("%s: ifnet_allocate failed %d\n", - __func__, error); + __func__, error); goto done; } sc->sc_ifp = ifp; error = bridge_ifnet_set_attrs(ifp); if (error != 0) { printf("%s: bridge_ifnet_set_attrs failed %d\n", - __func__, error); + __func__, error); goto done; } } @@ -1345,7 +1354,7 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) */ fb = 0; has_hostid = (uuid_get_ethernet(ð_hostid[0]) == 0); - for (retry = 1; retry != 0; ) { + for (retry = 1; retry != 0;) { if (fb || has_hostid == 0) { read_frandom(&sc->sc_defaddr, ETHER_ADDR_LEN); sc->sc_defaddr[0] &= ~1; /* clear multicast bit */ @@ -1355,7 +1364,7 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) ETHER_ADDR_LEN); sc->sc_defaddr[0] &= ~1; /* clear multicast bit */ sc->sc_defaddr[0] |= 2; /* set the LAA bit */ - sc->sc_defaddr[3] = /* stir it up a bit */ + sc->sc_defaddr[3] = /* stir it up a bit */ ((sc->sc_defaddr[3] & 0x0f) << 4) | ((sc->sc_defaddr[3] & 0xf0) >> 4); /* @@ -1374,8 +1383,9 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) lck_mtx_lock(&bridge_list_mtx); LIST_FOREACH(sc2, &bridge_list, sc_list) { if (memcmp(sc->sc_defaddr, - IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0) + IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0) { retry = 1; + } } lck_mtx_unlock(&bridge_list_mtx); } @@ -1383,8 +1393,9 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) sc->sc_flags &= ~SCF_MEDIA_ACTIVE; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { link_print(sc); + } #endif if (bridge_in_bsd_mode(sc)) { error = ifnet_attach(ifp, NULL); @@ -1404,12 +1415,12 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) if (bridge_in_bsd_mode(sc)) { ifnet_set_offload(ifp, - IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | - IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES); + IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | + IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES); error = bridge_set_tso(sc); if (error != 0) { printf("%s: bridge_set_tso failed %d\n", - __func__, error); + __func__, error); goto done; } #if BRIDGESTP @@ -1422,7 +1433,7 @@ bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params) lck_mtx_unlock(&bridge_list_mtx); /* attach as ethernet */ - error = bpf_attach(ifp, DLT_EN10MB, sizeof (struct ether_header), + error = bpf_attach(ifp, DLT_EN10MB, sizeof(struct ether_header), NULL, NULL); done: @@ -1431,7 +1442,7 @@ done: /* TBD: Clean up: sc, sc_rthash etc */ } - return (error); + return error; } /* @@ -1449,7 +1460,7 @@ bridge_clone_destroy(struct ifnet *ifp) BRIDGE_LOCK(sc); if ((sc->sc_flags & SCF_DETACHING)) { BRIDGE_UNLOCK(sc); - return (0); + return 0; } sc->sc_flags |= SCF_DETACHING; @@ -1467,8 +1478,9 @@ bridge_clone_destroy(struct ifnet *ifp) printf("%s: ifnet_set_flags failed %d\n", __func__, error); } - while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL) + while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL) { bridge_delete_member(sc, bif, 0); + } if (bridge_in_bsd_mode(sc)) { while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) { @@ -1480,56 +1492,56 @@ bridge_clone_destroy(struct ifnet *ifp) error = ifnet_detach(ifp); if (error != 0) { panic("%s: ifnet_detach(%p) failed %d\n", - __func__, ifp, error); - } - return (0); -} - -#define DRVSPEC do { \ - if (ifd->ifd_cmd >= bridge_control_table_size) { \ - error = EINVAL; \ - break; \ - } \ - bc = &bridge_control_table[ifd->ifd_cmd]; \ - \ - if (cmd == SIOCGDRVSPEC && \ - (bc->bc_flags & BC_F_COPYOUT) == 0) { \ - error = EINVAL; \ - break; \ - } else if (cmd == SIOCSDRVSPEC && \ - (bc->bc_flags & BC_F_COPYOUT) != 0) { \ - error = EINVAL; \ - break; \ - } \ - \ - if (bc->bc_flags & BC_F_SUSER) { \ - error = kauth_authorize_generic(kauth_cred_get(), \ - KAUTH_GENERIC_ISSUSER); \ - if (error) \ - break; \ - } \ - \ - if (ifd->ifd_len != bc->bc_argsize || \ - ifd->ifd_len > sizeof (args)) { \ - error = EINVAL; \ - break; \ - } \ - \ - bzero(&args, sizeof (args)); \ - if (bc->bc_flags & BC_F_COPYIN) { \ - error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \ - if (error) \ - break; \ - } \ - \ - BRIDGE_LOCK(sc); \ - error = (*bc->bc_func)(sc, &args); \ - BRIDGE_UNLOCK(sc); \ - if (error) \ - break; \ - \ - if (bc->bc_flags & BC_F_COPYOUT) \ - error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \ + __func__, ifp, error); + } + return 0; +} + +#define DRVSPEC do { \ + if (ifd->ifd_cmd >= bridge_control_table_size) { \ + error = EINVAL; \ + break; \ + } \ + bc = &bridge_control_table[ifd->ifd_cmd]; \ + \ + if (cmd == SIOCGDRVSPEC && \ + (bc->bc_flags & BC_F_COPYOUT) == 0) { \ + error = EINVAL; \ + break; \ + } else if (cmd == SIOCSDRVSPEC && \ + (bc->bc_flags & BC_F_COPYOUT) != 0) { \ + error = EINVAL; \ + break; \ + } \ + \ + if (bc->bc_flags & BC_F_SUSER) { \ + error = kauth_authorize_generic(kauth_cred_get(), \ + KAUTH_GENERIC_ISSUSER); \ + if (error) \ + break; \ + } \ + \ + if (ifd->ifd_len != bc->bc_argsize || \ + ifd->ifd_len > sizeof (args)) { \ + error = EINVAL; \ + break; \ + } \ + \ + bzero(&args, sizeof (args)); \ + if (bc->bc_flags & BC_F_COPYIN) { \ + error = copyin(ifd->ifd_data, &args, ifd->ifd_len); \ + if (error) \ + break; \ + } \ + \ + BRIDGE_LOCK(sc); \ + error = (*bc->bc_func)(sc, &args); \ + BRIDGE_UNLOCK(sc); \ + if (error) \ + break; \ + \ + if (bc->bc_flags & BC_F_COPYOUT) \ + error = copyout(&args, ifd->ifd_data, ifd->ifd_len); \ } while (0) /* @@ -1548,15 +1560,15 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data) BRIDGE_LOCK_ASSERT_NOTHELD(sc); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_IOCTL) + if (if_bridge_debug & BR_DBGF_IOCTL) { printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n", __func__, ifp->if_xname, cmd, (cmd & IOC_IN) ? 'I' : ' ', (cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd), (char)IOCGROUP(cmd), cmd & 0xff); + } #endif /* BRIDGE_DEBUG */ switch (cmd) { - case SIOCSIFADDR: case SIOCAIFADDR: ifnet_set_flags(ifp, IFF_UP, IFF_UP); @@ -1588,7 +1600,7 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data) if (user_addr != USER_ADDR_NULL) { error = copyout(&ifmr->ifm_current, user_addr, - sizeof (int)); + sizeof(int)); } break; } @@ -1659,9 +1671,10 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data) case SIOCSIFLLADDR: error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); - if (error != 0) + if (error != 0) { printf("%s: SIOCSIFLLADDR error %d\n", ifp->if_xname, error); + } break; case SIOCSIFMTU: @@ -1685,15 +1698,16 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data) break; } } - if (!error) + if (!error) { sc->sc_ifp->if_mtu = ifr->ifr_mtu; + } BRIDGE_UNLOCK(sc); break; default: error = ether_ioctl(ifp, cmd, data); #if BRIDGE_DEBUG - if (error != 0 && error != EOPNOTSUPP) + if (error != 0 && error != EOPNOTSUPP) { printf("%s: ifp %s cmd 0x%08lx " "(%c%c [%lu] %c %lu) failed error: %d\n", __func__, ifp->if_xname, cmd, @@ -1701,12 +1715,13 @@ bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data) (cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd), (char)IOCGROUP(cmd), cmd & 0xff, error); + } #endif /* BRIDGE_DEBUG */ break; } BRIDGE_LOCK_ASSERT_NOTHELD(sc); - return (error); + return error; } #if HAS_IF_CAP @@ -1738,7 +1753,6 @@ bridge_mutecaps(struct bridge_softc *sc) bridge_set_ifcap(sc, bif, enabled); } - } static void @@ -1748,17 +1762,18 @@ bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set) struct ifreq ifr; int error; - bzero(&ifr, sizeof (ifr)); + bzero(&ifr, sizeof(ifr)); ifr.ifr_reqcap = set; if (ifp->if_capenable != set) { IFF_LOCKGIANT(ifp); error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr); IFF_UNLOCKGIANT(ifp); - if (error) + if (error) { printf("%s: %s error setting interface capabilities " "on %s\n", __func__, sc->sc_ifp->if_xname, ifp->if_xname); + } } } #endif /* HAS_IF_CAP */ @@ -1781,13 +1796,15 @@ bridge_set_tso(struct bridge_softc *sc) TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { ifnet_t ifp = bif->bif_ifp; - if (ifp == NULL) + if (ifp == NULL) { continue; + } if (offload & IFNET_TSO_IPV4) { if (ifp->if_hwassist & IFNET_TSO_IPV4) { - if (tso_v4_mtu > ifp->if_tso_v4_mtu) + if (tso_v4_mtu > ifp->if_tso_v4_mtu) { tso_v4_mtu = ifp->if_tso_v4_mtu; + } } else { offload &= ~IFNET_TSO_IPV4; tso_v4_mtu = 0; @@ -1795,8 +1812,9 @@ bridge_set_tso(struct bridge_softc *sc) } if (offload & IFNET_TSO_IPV6) { if (ifp->if_hwassist & IFNET_TSO_IPV6) { - if (tso_v6_mtu > ifp->if_tso_v6_mtu) + if (tso_v6_mtu > ifp->if_tso_v6_mtu) { tso_v6_mtu = ifp->if_tso_v6_mtu; + } } else { offload &= ~IFNET_TSO_IPV6; tso_v6_mtu = 0; @@ -1808,10 +1826,11 @@ bridge_set_tso(struct bridge_softc *sc) error = ifnet_set_offload(sc->sc_ifp, offload); if (error != 0) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: ifnet_set_offload(%s, 0x%x) " "failed %d\n", __func__, sc->sc_ifp->if_xname, offload, error); + } #endif /* BRIDGE_DEBUG */ goto done; } @@ -1820,40 +1839,44 @@ bridge_set_tso(struct bridge_softc *sc) * as large as the interface MTU */ if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV4) { - if (tso_v4_mtu < sc->sc_ifp->if_mtu) + if (tso_v4_mtu < sc->sc_ifp->if_mtu) { tso_v4_mtu = sc->sc_ifp->if_mtu; + } error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET, tso_v4_mtu); if (error != 0) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: ifnet_set_tso_mtu(%s, " "AF_INET, %u) failed %d\n", __func__, sc->sc_ifp->if_xname, tso_v4_mtu, error); + } #endif /* BRIDGE_DEBUG */ goto done; } } if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV6) { - if (tso_v6_mtu < sc->sc_ifp->if_mtu) + if (tso_v6_mtu < sc->sc_ifp->if_mtu) { tso_v6_mtu = sc->sc_ifp->if_mtu; + } error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET6, tso_v6_mtu); if (error != 0) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: ifnet_set_tso_mtu(%s, " "AF_INET6, %u) failed %d\n", __func__, sc->sc_ifp->if_xname, tso_v6_mtu, error); + } #endif /* BRIDGE_DEBUG */ goto done; } } } done: - return (error); + return error; } /* @@ -1871,11 +1894,12 @@ bridge_lookup_member(struct bridge_softc *sc, const char *name) TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { ifp = bif->bif_ifp; - if (strcmp(ifp->if_xname, name) == 0) - return (bif); + if (strcmp(ifp->if_xname, name) == 0) { + return bif; + } } - return (NULL); + return NULL; } /* @@ -1891,16 +1915,17 @@ bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) BRIDGE_LOCK_ASSERT_HELD(sc); TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { - if (bif->bif_ifp == member_ifp) - return (bif); + if (bif->bif_ifp == member_ifp) { + return bif; + } } - return (NULL); + return NULL; } static errno_t bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol, - mbuf_t *data, char **frame_ptr) + mbuf_t *data, char **frame_ptr) { #pragma unused(protocol) errno_t error = 0; @@ -1910,8 +1935,9 @@ bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol, size_t frmlen = 0; mbuf_t m = *data; - if ((m->m_flags & M_PROTO1)) + if ((m->m_flags & M_PROTO1)) { goto out; + } if (*frame_ptr >= (char *)mbuf_datastart(m) && *frame_ptr <= (char *)mbuf_data(m)) { @@ -1966,13 +1992,13 @@ bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol, out: BRIDGE_LOCK_ASSERT_NOTHELD(sc); - return (error); + return error; } #if BRIDGE_MEMBER_OUT_FILTER static errno_t bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol, - mbuf_t *data) + mbuf_t *data) { #pragma unused(protocol) errno_t error = 0; @@ -1980,8 +2006,9 @@ bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol, struct bridge_softc *sc = bif->bif_sc; mbuf_t m = *data; - if ((m->m_flags & M_PROTO1)) + if ((m->m_flags & M_PROTO1)) { goto out; + } #if BRIDGE_DEBUG if (if_bridge_debug & BR_DBGF_OUTPUT) { @@ -2001,13 +2028,13 @@ bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol, out: BRIDGE_LOCK_ASSERT_NOTHELD(sc); - return (error); + return error; } #endif /* BRIDGE_MEMBER_OUT_FILTER */ static void bridge_iff_event(void *cookie, ifnet_t ifp, protocol_family_t protocol, - const struct kev_msg *event_msg) + const struct kev_msg *event_msg) { #pragma unused(protocol) struct bridge_iflist *bif = (struct bridge_iflist *)cookie; @@ -2017,52 +2044,53 @@ bridge_iff_event(void *cookie, ifnet_t ifp, protocol_family_t protocol, event_msg->kev_class == KEV_NETWORK_CLASS && event_msg->kev_subclass == KEV_DL_SUBCLASS) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname, event_msg->event_code, dlil_kev_dl_code_str(event_msg->event_code)); + } #endif /* BRIDGE_DEBUG */ switch (event_msg->event_code) { - case KEV_DL_IF_DETACHING: - case KEV_DL_IF_DETACHED: { - bridge_ifdetach(bif, ifp); - break; - } - case KEV_DL_LINK_OFF: - case KEV_DL_LINK_ON: { - bridge_iflinkevent(ifp); + case KEV_DL_IF_DETACHING: + case KEV_DL_IF_DETACHED: { + bridge_ifdetach(bif, ifp); + break; + } + case KEV_DL_LINK_OFF: + case KEV_DL_LINK_ON: { + bridge_iflinkevent(ifp); #if BRIDGESTP - bstp_linkstate(ifp, event_msg->event_code); + bstp_linkstate(ifp, event_msg->event_code); #endif /* BRIDGESTP */ - break; - } - case KEV_DL_SIFFLAGS: { - if ((bif->bif_flags & BIFF_PROMISC) == 0 && - (ifp->if_flags & IFF_UP)) { - errno_t error; - - error = ifnet_set_promiscuous(ifp, 1); - if (error != 0) { - printf("%s: " - "ifnet_set_promiscuous (%s)" - " failed %d\n", - __func__, ifp->if_xname, - error); - } else { - bif->bif_flags |= BIFF_PROMISC; - } + break; + } + case KEV_DL_SIFFLAGS: { + if ((bif->bif_flags & BIFF_PROMISC) == 0 && + (ifp->if_flags & IFF_UP)) { + errno_t error; + + error = ifnet_set_promiscuous(ifp, 1); + if (error != 0) { + printf("%s: " + "ifnet_set_promiscuous (%s)" + " failed %d\n", + __func__, ifp->if_xname, + error); + } else { + bif->bif_flags |= BIFF_PROMISC; } - break; - } - case KEV_DL_IFCAP_CHANGED: { - BRIDGE_LOCK(sc); - bridge_set_tso(sc); - BRIDGE_UNLOCK(sc); - break; } - default: - break; + break; + } + case KEV_DL_IFCAP_CHANGED: { + BRIDGE_LOCK(sc); + bridge_set_tso(sc); + BRIDGE_UNLOCK(sc); + break; + } + default: + break; } } } @@ -2079,8 +2107,9 @@ bridge_iff_detached(void *cookie, ifnet_t ifp) struct bridge_iflist *bif = (struct bridge_iflist *)cookie; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s\n", __func__, ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ bridge_ifdetach(bif, ifp); @@ -2090,53 +2119,57 @@ bridge_iff_detached(void *cookie, ifnet_t ifp) static errno_t bridge_proto_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet, - char *header) + char *header) { #pragma unused(protocol, packet, header) #if BRIDGE_DEBUG printf("%s: unexpected packet from %s\n", __func__, ifp->if_xname); #endif /* BRIDGE_DEBUG */ - return (0); + return 0; } static int bridge_attach_protocol(struct ifnet *ifp) { - int error; - struct ifnet_attach_proto_param reg; + int error; + struct ifnet_attach_proto_param reg; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s\n", __func__, ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ - bzero(®, sizeof (reg)); + bzero(®, sizeof(reg)); reg.input = bridge_proto_input; error = ifnet_attach_protocol(ifp, PF_BRIDGE, ®); - if (error) + if (error) { printf("%s: ifnet_attach_protocol(%s) failed, %d\n", __func__, ifp->if_xname, error); + } - return (error); + return error; } static int bridge_detach_protocol(struct ifnet *ifp) { - int error; + int error; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s\n", __func__, ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ error = ifnet_detach_protocol(ifp, PF_BRIDGE); - if (error) + if (error) { printf("%s: ifnet_detach_protocol(%s) failed, %d\n", __func__, ifp->if_xname, error); + } - return (error); + return error; } /* @@ -2146,7 +2179,7 @@ bridge_detach_protocol(struct ifnet *ifp) */ static void bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, - int gone) + int gone) { struct ifnet *ifs = bif->bif_ifp, *bifp = sc->sc_ifp; int lladdr_changed = 0, error, filt_attached; @@ -2186,8 +2219,8 @@ bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, break; case IFT_GIF: - /* currently not supported */ - /* FALLTHRU */ + /* currently not supported */ + /* FALLTHRU */ default: VERIFY(0); /* NOTREACHED */ @@ -2226,13 +2259,13 @@ bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, TAILQ_FIRST(&sc->sc_iflist)->bif_ifp; bcopy(IF_LLADDR(fif), eaddr, ETHER_ADDR_LEN); sc->sc_ifaddr = fif; - ifnet_reference(fif); /* for sc_ifaddr */ + ifnet_reference(fif); /* for sc_ifaddr */ } lladdr_changed = 1; } #if HAS_IF_CAP - bridge_mutecaps(sc); /* recalculate now this interface is removed */ + bridge_mutecaps(sc); /* recalculate now this interface is removed */ #endif /* HAS_IF_CAP */ error = bridge_set_tso(sc); @@ -2259,22 +2292,25 @@ bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif, } if (lladdr_changed && - (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) + (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) { printf("%s: ifnet_set_lladdr failed %d\n", __func__, error); + } - if (event_code != 0) + if (event_code != 0) { bridge_link_event(bifp, event_code); + } #if BRIDGESTP if (bsd_mode) { - bstp_destroy(&bif->bif_stp); /* prepare to free */ + bstp_destroy(&bif->bif_stp); /* prepare to free */ } #endif /* BRIDGESTP */ - if (filt_attached) + if (filt_attached) { iflt_detach(bif->bif_iff_ref); - else + } else { _FREE(bif, M_DEVBUF); + } ifs->if_bridge = NULL; ifnet_release(ifs); @@ -2314,27 +2350,32 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) boolean_t bsd_mode = bridge_in_bsd_mode(sc); ifs = ifunit(req->ifbr_ifsname); - if (ifs == NULL) - return (ENOENT); - if (ifs->if_ioctl == NULL) /* must be supported */ - return (EINVAL); + if (ifs == NULL) { + return ENOENT; + } + if (ifs->if_ioctl == NULL) { /* must be supported */ + return EINVAL; + } if (IFNET_IS_INTCOPROC(ifs)) { - return (EINVAL); + return EINVAL; } if (bsd_mode) { /* If it's in the span list, it can't be a member. */ TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) - if (ifs == bif->bif_ifp) - return (EBUSY); + if (ifs == bif->bif_ifp) { + return EBUSY; + } } - if (ifs->if_bridge == sc) - return (EEXIST); + if (ifs->if_bridge == sc) { + return EEXIST; + } - if (ifs->if_bridge != NULL) - return (EBUSY); + if (ifs->if_bridge != NULL) { + return EBUSY; + } switch (ifs->if_type) { case IFT_ETHER: @@ -2342,15 +2383,16 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) /* permitted interface types */ break; case IFT_GIF: - /* currently not supported */ - /* FALLTHRU */ + /* currently not supported */ + /* FALLTHRU */ default: - return (EINVAL); + return EINVAL; } - bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_WAITOK | M_ZERO); - if (bif == NULL) - return (ENOMEM); + bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO); + if (bif == NULL) { + return ENOMEM; + } bif->bif_ifp = ifs; ifnet_reference(ifs); @@ -2361,13 +2403,13 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) bif->bif_sc = sc; /* Allow the first Ethernet member to define the MTU */ - if (TAILQ_EMPTY(&sc->sc_iflist)) + if (TAILQ_EMPTY(&sc->sc_iflist)) { sc->sc_ifp->if_mtu = ifs->if_mtu; - else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { + } else if (sc->sc_ifp->if_mtu != ifs->if_mtu) { printf("%s: %s: invalid MTU for %s", __func__, sc->sc_ifp->if_xname, ifs->if_xname); - return (EINVAL); + return EINVAL; } /* @@ -2379,7 +2421,7 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) { bcopy(IF_LLADDR(ifs), eaddr, ETHER_ADDR_LEN); sc->sc_ifaddr = ifs; - ifnet_reference(ifs); /* for sc_ifaddr */ + ifnet_reference(ifs); /* for sc_ifaddr */ lladdr_changed = 1; } @@ -2412,8 +2454,9 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) error = ifnet_set_promiscuous(ifs, 1); if (error) { /* Ignore error when device is not up */ - if (error != ENETDOWN) + if (error != ENETDOWN) { goto out; + } error = 0; } else { bif->bif_flags |= BIFF_PROMISC; @@ -2427,10 +2470,11 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) /* * The new member may change the link status of the bridge interface */ - if (interface_media_active(ifs)) + if (interface_media_active(ifs)) { bif->bif_flags |= BIFF_MEDIA_ACTIVE; - else + } else { bif->bif_flags &= ~BIFF_MEDIA_ACTIVE; + } event_code = bridge_updatelinkstatus(sc); @@ -2444,7 +2488,7 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) /* * install an interface filter */ - memset(&iff, 0, sizeof (struct iff_filter)); + memset(&iff, 0, sizeof(struct iff_filter)); iff.iff_cookie = bif; iff.iff_name = "com.apple.kernel.bsd.net.if_bridge"; if (bsd_mode) { @@ -2478,19 +2522,22 @@ bridge_ioctl_add(struct bridge_softc *sc, void *arg) bif->bif_flags |= BIFF_PROTO_ATTACHED; if (lladdr_changed && - (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) + (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0) { printf("%s: ifnet_set_lladdr failed %d\n", __func__, error); + } - if (event_code != 0) + if (event_code != 0) { bridge_link_event(bifp, event_code); + } BRIDGE_LOCK(sc); out: - if (error && bif != NULL) + if (error && bif != NULL) { bridge_delete_member(sc, bif, 1); + } - return (error); + return error; } static int @@ -2500,19 +2547,20 @@ bridge_ioctl_del(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; bif = bridge_lookup_member(sc, req->ifbr_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } bridge_delete_member(sc, bif, 0); - return (0); + return 0; } static int bridge_ioctl_purge(struct bridge_softc *sc, void *arg) { #pragma unused(sc, arg) - return (0); + return 0; } static int @@ -2522,8 +2570,9 @@ bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; bif = bridge_lookup_member(sc, req->ifbr_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } if (bridge_in_bsd_mode(sc)) { struct bstp_port *bp; @@ -2536,18 +2585,24 @@ bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) req->ifbr_role = bp->bp_role; req->ifbr_stpflags = bp->bp_flags; /* Copy STP state options as flags */ - if (bp->bp_operedge) + if (bp->bp_operedge) { req->ifbr_ifsflags |= IFBIF_BSTP_EDGE; - if (bp->bp_flags & BSTP_PORT_AUTOEDGE) + } + if (bp->bp_flags & BSTP_PORT_AUTOEDGE) { req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE; - if (bp->bp_ptp_link) + } + if (bp->bp_ptp_link) { req->ifbr_ifsflags |= IFBIF_BSTP_PTP; - if (bp->bp_flags & BSTP_PORT_AUTOPTP) + } + if (bp->bp_flags & BSTP_PORT_AUTOPTP) { req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP; - if (bp->bp_flags & BSTP_PORT_ADMEDGE) + } + if (bp->bp_flags & BSTP_PORT_ADMEDGE) { req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE; - if (bp->bp_flags & BSTP_PORT_ADMCOST) + } + if (bp->bp_flags & BSTP_PORT_ADMCOST) { req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST; + } } req->ifbr_ifsflags = bif->bif_ifflags; req->ifbr_portno = bif->bif_ifp->if_index & 0xfff; @@ -2555,7 +2610,7 @@ bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) req->ifbr_addrmax = bif->bif_addrmax; req->ifbr_addrexceeded = bif->bif_addrexceeded; - return (0); + return 0; } static int @@ -2569,28 +2624,32 @@ bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) #endif /* BRIDGESTP */ if (!bridge_in_bsd_mode(sc)) { - return (EINVAL); + return EINVAL; } bif = bridge_lookup_member(sc, req->ifbr_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } - if (req->ifbr_ifsflags & IFBIF_SPAN) + if (req->ifbr_ifsflags & IFBIF_SPAN) { /* SPAN is readonly */ - return (EINVAL); + return EINVAL; + } #if BRIDGESTP if (req->ifbr_ifsflags & IFBIF_STP) { if ((bif->bif_ifflags & IFBIF_STP) == 0) { error = bstp_enable(&bif->bif_stp); - if (error) - return (error); + if (error) { + return error; + } } } else { - if ((bif->bif_ifflags & IFBIF_STP) != 0) + if ((bif->bif_ifflags & IFBIF_STP) != 0) { bstp_disable(&bif->bif_stp); + } } /* Pass on STP flags */ @@ -2600,15 +2659,16 @@ bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0); bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0); #else /* !BRIDGESTP */ - if (req->ifbr_ifsflags & IFBIF_STP) - return (EOPNOTSUPP); + if (req->ifbr_ifsflags & IFBIF_STP) { + return EOPNOTSUPP; + } #endif /* !BRIDGESTP */ /* Save the bits relating to the bridge */ bif->bif_ifflags = req->ifbr_ifsflags & IFBIFMASK; - return (0); + return 0; } static int @@ -2620,7 +2680,7 @@ bridge_ioctl_scache(struct bridge_softc *sc, void *arg) if (bridge_in_bsd_mode(sc)) { bridge_rttrim(sc); } - return (0); + return 0; } static int @@ -2630,74 +2690,74 @@ bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) param->ifbrp_csize = sc->sc_brtmax; - return (0); -} - -#define BRIDGE_IOCTL_GIFS do { \ - struct bridge_iflist *bif; \ - struct ifbreq breq; \ - char *buf, *outbuf; \ - unsigned int count, buflen, len; \ - \ - count = 0; \ - TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \ - count++; \ - if (bridge_in_bsd_mode(sc)) { \ - TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \ - count++; \ - } \ - \ - buflen = sizeof (breq) * count; \ - if (bifc->ifbic_len == 0) { \ - bifc->ifbic_len = buflen; \ - return (0); \ - } \ - BRIDGE_UNLOCK(sc); \ - outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \ - BRIDGE_LOCK(sc); \ - \ - count = 0; \ - buf = outbuf; \ - len = min(bifc->ifbic_len, buflen); \ - bzero(&breq, sizeof (breq)); \ - TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \ - if (len < sizeof (breq)) \ - break; \ - \ - snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \ - "%s", bif->bif_ifp->if_xname); \ - /* Fill in the ifbreq structure */ \ - error = bridge_ioctl_gifflags(sc, &breq); \ - if (error) \ - break; \ - memcpy(buf, &breq, sizeof (breq)); \ - count++; \ - buf += sizeof (breq); \ - len -= sizeof (breq); \ - } \ - if (bridge_in_bsd_mode(sc)) { \ - TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \ - if (len < sizeof (breq)) \ - break; \ - \ - snprintf(breq.ifbr_ifsname, \ - sizeof (breq.ifbr_ifsname), \ - "%s", bif->bif_ifp->if_xname); \ - breq.ifbr_ifsflags = bif->bif_ifflags; \ - breq.ifbr_portno \ - = bif->bif_ifp->if_index & 0xfff; \ - memcpy(buf, &breq, sizeof (breq)); \ - count++; \ - buf += sizeof (breq); \ - len -= sizeof (breq); \ - } \ - } \ - \ - BRIDGE_UNLOCK(sc); \ - bifc->ifbic_len = sizeof (breq) * count; \ - error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \ - BRIDGE_LOCK(sc); \ - _FREE(outbuf, M_TEMP); \ + return 0; +} + +#define BRIDGE_IOCTL_GIFS do { \ + struct bridge_iflist *bif; \ + struct ifbreq breq; \ + char *buf, *outbuf; \ + unsigned int count, buflen, len; \ + \ + count = 0; \ + TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) \ + count++; \ + if (bridge_in_bsd_mode(sc)) { \ + TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) \ + count++; \ + } \ + \ + buflen = sizeof (breq) * count; \ + if (bifc->ifbic_len == 0) { \ + bifc->ifbic_len = buflen; \ + return (0); \ + } \ + BRIDGE_UNLOCK(sc); \ + outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \ + BRIDGE_LOCK(sc); \ + \ + count = 0; \ + buf = outbuf; \ + len = min(bifc->ifbic_len, buflen); \ + bzero(&breq, sizeof (breq)); \ + TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \ + if (len < sizeof (breq)) \ + break; \ + \ + snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname), \ + "%s", bif->bif_ifp->if_xname); \ + /* Fill in the ifbreq structure */ \ + error = bridge_ioctl_gifflags(sc, &breq); \ + if (error) \ + break; \ + memcpy(buf, &breq, sizeof (breq)); \ + count++; \ + buf += sizeof (breq); \ + len -= sizeof (breq); \ + } \ + if (bridge_in_bsd_mode(sc)) { \ + TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { \ + if (len < sizeof (breq)) \ + break; \ + \ + snprintf(breq.ifbr_ifsname, \ + sizeof (breq.ifbr_ifsname), \ + "%s", bif->bif_ifp->if_xname); \ + breq.ifbr_ifsflags = bif->bif_ifflags; \ + breq.ifbr_portno \ + = bif->bif_ifp->if_index & 0xfff; \ + memcpy(buf, &breq, sizeof (breq)); \ + count++; \ + buf += sizeof (breq); \ + len -= sizeof (breq); \ + } \ + } \ + \ + BRIDGE_UNLOCK(sc); \ + bifc->ifbic_len = sizeof (breq) * count; \ + error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len); \ + BRIDGE_LOCK(sc); \ + _FREE(outbuf, M_TEMP); \ } while (0) static int @@ -2708,7 +2768,7 @@ bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg) BRIDGE_IOCTL_GIFS; - return (error); + return error; } static int @@ -2719,65 +2779,65 @@ bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg) BRIDGE_IOCTL_GIFS; - return (error); -} - -#define BRIDGE_IOCTL_RTS do { \ - struct bridge_rtnode *brt; \ - char *buf; \ - char *outbuf = NULL; \ - unsigned int count, buflen, len; \ - unsigned long now; \ - \ - if (bac->ifbac_len == 0) \ - return (0); \ - \ - bzero(&bareq, sizeof (bareq)); \ - count = 0; \ - if (!bridge_in_bsd_mode(sc)) { \ - goto out; \ - } \ - LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \ - count++; \ - buflen = sizeof (bareq) * count; \ - \ - BRIDGE_UNLOCK(sc); \ - outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \ - BRIDGE_LOCK(sc); \ - \ - count = 0; \ - buf = outbuf; \ - len = min(bac->ifbac_len, buflen); \ - LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \ - if (len < sizeof (bareq)) \ - goto out; \ - snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \ - "%s", brt->brt_ifp->if_xname); \ - memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \ - bareq.ifba_vlan = brt->brt_vlan; \ - if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \ - now = (unsigned long) net_uptime(); \ - if (now < brt->brt_expire) \ - bareq.ifba_expire = \ - brt->brt_expire - now; \ - } else \ - bareq.ifba_expire = 0; \ - bareq.ifba_flags = brt->brt_flags; \ - \ - memcpy(buf, &bareq, sizeof (bareq)); \ - count++; \ - buf += sizeof (bareq); \ - len -= sizeof (bareq); \ - } \ -out: \ - bac->ifbac_len = sizeof (bareq) * count; \ - if (outbuf != NULL) { \ - BRIDGE_UNLOCK(sc); \ - error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \ - _FREE(outbuf, M_TEMP); \ - BRIDGE_LOCK(sc); \ - } \ - return (error); \ + return error; +} + +#define BRIDGE_IOCTL_RTS do { \ + struct bridge_rtnode *brt; \ + char *buf; \ + char *outbuf = NULL; \ + unsigned int count, buflen, len; \ + unsigned long now; \ + \ + if (bac->ifbac_len == 0) \ + return (0); \ + \ + bzero(&bareq, sizeof (bareq)); \ + count = 0; \ + if (!bridge_in_bsd_mode(sc)) { \ + goto out; \ + } \ + LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) \ + count++; \ + buflen = sizeof (bareq) * count; \ + \ + BRIDGE_UNLOCK(sc); \ + outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \ + BRIDGE_LOCK(sc); \ + \ + count = 0; \ + buf = outbuf; \ + len = min(bac->ifbac_len, buflen); \ + LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { \ + if (len < sizeof (bareq)) \ + goto out; \ + snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname), \ + "%s", brt->brt_ifp->if_xname); \ + memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \ + bareq.ifba_vlan = brt->brt_vlan; \ + if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { \ + now = (unsigned long) net_uptime(); \ + if (now < brt->brt_expire) \ + bareq.ifba_expire = \ + brt->brt_expire - now; \ + } else \ + bareq.ifba_expire = 0; \ + bareq.ifba_flags = brt->brt_flags; \ + \ + memcpy(buf, &bareq, sizeof (bareq)); \ + count++; \ + buf += sizeof (bareq); \ + len -= sizeof (bareq); \ + } \ +out: \ + bac->ifbac_len = sizeof (bareq) * count; \ + if (outbuf != NULL) { \ + BRIDGE_UNLOCK(sc); \ + error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len); \ + _FREE(outbuf, M_TEMP); \ + BRIDGE_LOCK(sc); \ + } \ + return (error); \ } while (0) static int @@ -2788,7 +2848,7 @@ bridge_ioctl_rts64(struct bridge_softc *sc, void *arg) int error = 0; BRIDGE_IOCTL_RTS; - return (error); + return error; } static int @@ -2799,7 +2859,7 @@ bridge_ioctl_rts32(struct bridge_softc *sc, void *arg) int error = 0; BRIDGE_IOCTL_RTS; - return (error); + return error; } static int @@ -2810,17 +2870,18 @@ bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg) int error; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } bif = bridge_lookup_member(sc, req->ifba_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, req->ifba_flags); - return (error); + return error; } static int @@ -2831,17 +2892,18 @@ bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg) int error; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } bif = bridge_lookup_member(sc, req->ifba_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1, req->ifba_flags); - return (error); + return error; } static int @@ -2850,7 +2912,7 @@ bridge_ioctl_sto(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; sc->sc_brttimeout = param->ifbrp_ctime; - return (0); + return 0; } static int @@ -2859,7 +2921,7 @@ bridge_ioctl_gto(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; param->ifbrp_ctime = sc->sc_brttimeout; - return (0); + return 0; } static int @@ -2868,9 +2930,9 @@ bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg) struct ifbareq32 *req = arg; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } - return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); + return bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan); } static int @@ -2879,9 +2941,9 @@ bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg) struct ifbareq64 *req = arg; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } - return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan)); + return bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan); } static int @@ -2890,10 +2952,10 @@ bridge_ioctl_flush(struct bridge_softc *sc, void *arg) struct ifbreq *req = arg; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } bridge_rtflush(sc, req->ifbr_ifsflags); - return (0); + return 0; } static int @@ -2903,10 +2965,10 @@ bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) struct bstp_state *bs = &sc->sc_stp; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } param->ifbrp_prio = bs->bs_bridge_priority; - return (0); + return 0; } static int @@ -2916,12 +2978,12 @@ bridge_ioctl_spri(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } - return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio)); + return bstp_set_priority(&sc->sc_stp, param->ifbrp_prio); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -2932,10 +2994,10 @@ bridge_ioctl_ght(struct bridge_softc *sc, void *arg) struct bstp_state *bs = &sc->sc_stp; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } param->ifbrp_hellotime = bs->bs_bridge_htime >> 8; - return (0); + return 0; } static int @@ -2945,12 +3007,12 @@ bridge_ioctl_sht(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } - return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime)); + return bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -2961,12 +3023,12 @@ bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) struct bstp_state *bs; if (!bridge_in_bsd_mode(sc)) { - return (0); + return 0; } param = arg; bs = &sc->sc_stp; param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8; - return (0); + return 0; } static int @@ -2976,12 +3038,12 @@ bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } - return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay)); + return bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -2992,12 +3054,12 @@ bridge_ioctl_gma(struct bridge_softc *sc, void *arg) struct bstp_state *bs; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } param = arg; bs = &sc->sc_stp; param->ifbrp_maxage = bs->bs_bridge_max_age >> 8; - return (0); + return 0; } static int @@ -3007,12 +3069,12 @@ bridge_ioctl_sma(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } - return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage)); + return bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -3024,16 +3086,17 @@ bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } bif = bridge_lookup_member(sc, req->ifbr_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } - return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority)); + return bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -3045,16 +3108,17 @@ bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } bif = bridge_lookup_member(sc, req->ifbr_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } - return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost)); + return bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -3065,7 +3129,7 @@ bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg) param->ifbrp_filter = sc->sc_filter_flags; - return (0); + return 0; } static int @@ -3073,17 +3137,19 @@ bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg) { struct ifbrparam *param = arg; - if (param->ifbrp_filter & ~IFBF_FILT_MASK) - return (EINVAL); + if (param->ifbrp_filter & ~IFBF_FILT_MASK) { + return EINVAL; + } #ifndef BRIDGE_IPF - if (param->ifbrp_filter & IFBF_FILT_USEIPF) - return (EINVAL); + if (param->ifbrp_filter & IFBF_FILT_USEIPF) { + return EINVAL; + } #endif sc->sc_filter_flags = param->ifbrp_filter; - return (0); + return 0; } static int @@ -3093,11 +3159,12 @@ bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; bif = bridge_lookup_member(sc, req->ifbr_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } bif->bif_addrmax = req->ifbr_addrmax; - return (0); + return 0; } static int @@ -3108,37 +3175,41 @@ bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) struct ifnet *ifs; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } ifs = ifunit(req->ifbr_ifsname); - if (ifs == NULL) - return (ENOENT); + if (ifs == NULL) { + return ENOENT; + } if (IFNET_IS_INTCOPROC(ifs)) { - return (EINVAL); + return EINVAL; } TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) - if (ifs == bif->bif_ifp) - return (EBUSY); + if (ifs == bif->bif_ifp) { + return EBUSY; + } - if (ifs->if_bridge != NULL) - return (EBUSY); + if (ifs->if_bridge != NULL) { + return EBUSY; + } switch (ifs->if_type) { - case IFT_ETHER: - case IFT_L2VLAN: - break; - case IFT_GIF: - /* currently not supported */ - /* FALLTHRU */ - default: - return (EINVAL); + case IFT_ETHER: + case IFT_L2VLAN: + break; + case IFT_GIF: + /* currently not supported */ + /* FALLTHRU */ + default: + return EINVAL; } - bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_WAITOK | M_ZERO); - if (bif == NULL) - return (ENOMEM); + bif = _MALLOC(sizeof(*bif), M_DEVBUF, M_WAITOK | M_ZERO); + if (bif == NULL) { + return ENOMEM; + } bif->bif_ifp = ifs; bif->bif_ifflags = IFBIF_SPAN; @@ -3147,7 +3218,7 @@ bridge_ioctl_addspan(struct bridge_softc *sc, void *arg) TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next); - return (0); + return 0; } static int @@ -3158,47 +3229,50 @@ bridge_ioctl_delspan(struct bridge_softc *sc, void *arg) struct ifnet *ifs; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } ifs = ifunit(req->ifbr_ifsname); - if (ifs == NULL) - return (ENOENT); + if (ifs == NULL) { + return ENOENT; + } TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) - if (ifs == bif->bif_ifp) - break; + if (ifs == bif->bif_ifp) { + break; + } - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } bridge_delete_span(sc, bif); - return (0); -} - -#define BRIDGE_IOCTL_GBPARAM do { \ - struct bstp_state *bs = &sc->sc_stp; \ - struct bstp_port *root_port; \ - \ - req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \ - req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \ - req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \ - \ - root_port = bs->bs_root_port; \ - if (root_port == NULL) \ - req->ifbop_root_port = 0; \ - else \ - req->ifbop_root_port = root_port->bp_ifp->if_index; \ - \ - req->ifbop_holdcount = bs->bs_txholdcount; \ - req->ifbop_priority = bs->bs_bridge_priority; \ - req->ifbop_protocol = bs->bs_protover; \ - req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \ - req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \ - req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \ - req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \ - req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \ - req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \ + return 0; +} + +#define BRIDGE_IOCTL_GBPARAM do { \ + struct bstp_state *bs = &sc->sc_stp; \ + struct bstp_port *root_port; \ + \ + req->ifbop_maxage = bs->bs_bridge_max_age >> 8; \ + req->ifbop_hellotime = bs->bs_bridge_htime >> 8; \ + req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8; \ + \ + root_port = bs->bs_root_port; \ + if (root_port == NULL) \ + req->ifbop_root_port = 0; \ + else \ + req->ifbop_root_port = root_port->bp_ifp->if_index; \ + \ + req->ifbop_holdcount = bs->bs_txholdcount; \ + req->ifbop_priority = bs->bs_bridge_priority; \ + req->ifbop_protocol = bs->bs_protover; \ + req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost; \ + req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id; \ + req->ifbop_designated_root = bs->bs_root_pv.pv_root_id; \ + req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id; \ + req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec; \ + req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec; \ } while (0) static int @@ -3209,7 +3283,7 @@ bridge_ioctl_gbparam32(struct bridge_softc *sc, void *arg) if (bridge_in_bsd_mode(sc)) { BRIDGE_IOCTL_GBPARAM; } - return (0); + return 0; } static int @@ -3220,7 +3294,7 @@ bridge_ioctl_gbparam64(struct bridge_softc *sc, void *arg) if (bridge_in_bsd_mode(sc)) { BRIDGE_IOCTL_GBPARAM; } - return (0); + return 0; } static int @@ -3229,63 +3303,63 @@ bridge_ioctl_grte(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; param->ifbrp_cexceeded = sc->sc_brtexceeded; - return (0); -} - -#define BRIDGE_IOCTL_GIFSSTP do { \ - struct bridge_iflist *bif; \ - struct bstp_port *bp; \ - struct ifbpstpreq bpreq; \ - char *buf, *outbuf; \ - unsigned int count, buflen, len; \ - \ - count = 0; \ - TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \ - if ((bif->bif_ifflags & IFBIF_STP) != 0) \ - count++; \ - } \ - \ - buflen = sizeof (bpreq) * count; \ - if (bifstp->ifbpstp_len == 0) { \ - bifstp->ifbpstp_len = buflen; \ - return (0); \ - } \ - \ - BRIDGE_UNLOCK(sc); \ - outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \ - BRIDGE_LOCK(sc); \ - \ - count = 0; \ - buf = outbuf; \ - len = min(bifstp->ifbpstp_len, buflen); \ - bzero(&bpreq, sizeof (bpreq)); \ - TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \ - if (len < sizeof (bpreq)) \ - break; \ - \ - if ((bif->bif_ifflags & IFBIF_STP) == 0) \ - continue; \ - \ - bp = &bif->bif_stp; \ - bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \ - bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \ - bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \ - bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \ - bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \ - bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \ - \ - memcpy(buf, &bpreq, sizeof (bpreq)); \ - count++; \ - buf += sizeof (bpreq); \ - len -= sizeof (bpreq); \ - } \ - \ - BRIDGE_UNLOCK(sc); \ - bifstp->ifbpstp_len = sizeof (bpreq) * count; \ + return 0; +} + +#define BRIDGE_IOCTL_GIFSSTP do { \ + struct bridge_iflist *bif; \ + struct bstp_port *bp; \ + struct ifbpstpreq bpreq; \ + char *buf, *outbuf; \ + unsigned int count, buflen, len; \ + \ + count = 0; \ + TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \ + if ((bif->bif_ifflags & IFBIF_STP) != 0) \ + count++; \ + } \ + \ + buflen = sizeof (bpreq) * count; \ + if (bifstp->ifbpstp_len == 0) { \ + bifstp->ifbpstp_len = buflen; \ + return (0); \ + } \ + \ + BRIDGE_UNLOCK(sc); \ + outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO); \ + BRIDGE_LOCK(sc); \ + \ + count = 0; \ + buf = outbuf; \ + len = min(bifstp->ifbpstp_len, buflen); \ + bzero(&bpreq, sizeof (bpreq)); \ + TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { \ + if (len < sizeof (bpreq)) \ + break; \ + \ + if ((bif->bif_ifflags & IFBIF_STP) == 0) \ + continue; \ + \ + bp = &bif->bif_stp; \ + bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff; \ + bpreq.ifbp_fwd_trans = bp->bp_forward_transitions; \ + bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost; \ + bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id; \ + bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \ + bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id; \ + \ + memcpy(buf, &bpreq, sizeof (bpreq)); \ + count++; \ + buf += sizeof (bpreq); \ + len -= sizeof (bpreq); \ + } \ + \ + BRIDGE_UNLOCK(sc); \ + bifstp->ifbpstp_len = sizeof (bpreq) * count; \ error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \ - BRIDGE_LOCK(sc); \ - _FREE(outbuf, M_TEMP); \ - return (error); \ + BRIDGE_LOCK(sc); \ + _FREE(outbuf, M_TEMP); \ + return (error); \ } while (0) static int @@ -3297,7 +3371,7 @@ bridge_ioctl_gifsstp32(struct bridge_softc *sc, void *arg) if (bridge_in_bsd_mode(sc)) { BRIDGE_IOCTL_GIFSSTP; } - return (error); + return error; } static int @@ -3309,7 +3383,7 @@ bridge_ioctl_gifsstp64(struct bridge_softc *sc, void *arg) if (bridge_in_bsd_mode(sc)) { BRIDGE_IOCTL_GIFSSTP; } - return (error); + return error; } static int @@ -3319,12 +3393,12 @@ bridge_ioctl_sproto(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } - return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto)); + return bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -3335,12 +3409,12 @@ bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg) struct ifbrparam *param = arg; if (!bridge_in_bsd_mode(sc)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } - return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc)); + return bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc); #else /* !BRIDGESTP */ #pragma unused(sc, arg) - return (EOPNOTSUPP); + return EOPNOTSUPP; #endif /* !BRIDGESTP */ } @@ -3352,8 +3426,9 @@ bridge_ioctl_ghostfilter(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; bif = bridge_lookup_member(sc, req->ifbrhf_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } bzero(req, sizeof(struct ifbrhostfilter)); if (bif->bif_flags & BIFF_HOST_FILTER) { @@ -3362,7 +3437,7 @@ bridge_ioctl_ghostfilter(struct bridge_softc *sc, void *arg) ETHER_ADDR_LEN); req->ifbrhf_ipsrc = bif->bif_hf_ipsrc.s_addr; } - return (0); + return 0; } static int @@ -3372,8 +3447,9 @@ bridge_ioctl_shostfilter(struct bridge_softc *sc, void *arg) struct bridge_iflist *bif; bif = bridge_lookup_member(sc, req->ifbrhf_ifsname); - if (bif == NULL) - return (ENOENT); + if (bif == NULL) { + return ENOENT; + } INC_ATOMIC_INT64_LIM(net_api_stats.nas_vmnet_total); @@ -3384,17 +3460,19 @@ bridge_ioctl_shostfilter(struct bridge_softc *sc, void *arg) bcopy(req->ifbrhf_hwsrca, bif->bif_hf_hwsrc, ETHER_ADDR_LEN); if (bcmp(req->ifbrhf_hwsrca, ethernulladdr, - ETHER_ADDR_LEN) != 0) + ETHER_ADDR_LEN) != 0) { bif->bif_flags |= BIFF_HF_HWSRC; - else + } else { bif->bif_flags &= ~BIFF_HF_HWSRC; + } } if (req->ifbrhf_flags & IFBRHF_IPSRC) { bif->bif_hf_ipsrc.s_addr = req->ifbrhf_ipsrc; - if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY) + if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY) { bif->bif_flags |= BIFF_HF_IPSRC; - else + } else { bif->bif_flags &= ~BIFF_HF_IPSRC; + } } } else { bif->bif_flags &= ~(BIFF_HOST_FILTER | BIFF_HF_HWSRC | @@ -3403,7 +3481,7 @@ bridge_ioctl_shostfilter(struct bridge_softc *sc, void *arg) bif->bif_hf_ipsrc.s_addr = INADDR_ANY; } - return (0); + return 0; } @@ -3419,16 +3497,18 @@ bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp) struct bridge_softc *sc = ifp->if_bridge; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s\n", __func__, ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ /* Check if the interface is a bridge member */ if (sc != NULL) { BRIDGE_LOCK(sc); bif = bridge_lookup_member_if(sc, ifp); - if (bif != NULL) + if (bif != NULL) { bridge_delete_member(sc, bif, 1); + } BRIDGE_UNLOCK(sc); return; } @@ -3438,10 +3518,10 @@ bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp) if (bridge_in_bsd_mode(sc)) { BRIDGE_LOCK(sc); TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) - if (ifp == bif->bif_ifp) { - bridge_delete_span(sc, bif); - break; - } + if (ifp == bif->bif_ifp) { + bridge_delete_span(sc, bif); + break; + } BRIDGE_UNLOCK(sc); } } @@ -3461,17 +3541,18 @@ interface_media_active(struct ifnet *ifp) bzero(&ifmr, sizeof(ifmr)); if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) { - if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0) + if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0) { status = ifmr.ifm_status & IFM_ACTIVE ? 1 : 0; + } } - return (status); + return status; } /* * bridge_updatelinkstatus: * - * Update the media active status of the bridge based on the + * Update the media active status of the bridge based on the * media active status of its member. * If changed, return the corresponding onf/off link event. */ @@ -3502,7 +3583,7 @@ bridge_updatelinkstatus(struct bridge_softc *sc) event_code = KEV_DL_LINK_OFF; } - return (event_code); + return event_code; } /* @@ -3516,28 +3597,32 @@ bridge_iflinkevent(struct ifnet *ifp) u_int32_t event_code = 0; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s\n", __func__, ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ /* Check if the interface is a bridge member */ - if (sc == NULL) + if (sc == NULL) { return; + } BRIDGE_LOCK(sc); bif = bridge_lookup_member_if(sc, ifp); if (bif != NULL) { - if (interface_media_active(ifp)) + if (interface_media_active(ifp)) { bif->bif_flags |= BIFF_MEDIA_ACTIVE; - else + } else { bif->bif_flags &= ~BIFF_MEDIA_ACTIVE; + } - event_code = bridge_updatelinkstatus(sc); + event_code = bridge_updatelinkstatus(sc); } BRIDGE_UNLOCK(sc); - if (event_code != 0) + if (event_code != 0) { bridge_link_event(sc->sc_ifp, event_code); + } } /* @@ -3571,18 +3656,20 @@ bridge_delayed_callback(void *param) BRIDGE_LOCK(sc); #if BRIDGE_DEBUG_DELAYED_CALLBACK - if (if_bridge_debug & BR_DBGF_DELAYED_CALL) + if (if_bridge_debug & BR_DBGF_DELAYED_CALL) { printf("%s: %s call 0x%llx flags 0x%x\n", __func__, sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call), call->bdc_flags); + } #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */ if (call->bdc_flags & BDCF_CANCELLING) { wakeup(call); } else { - if ((sc->sc_flags & SCF_DETACHING) == 0) + if ((sc->sc_flags & SCF_DETACHING) == 0) { (*call->bdc_func)(sc); } + } call->bdc_flags &= ~BDCF_OUTSTANDING; BRIDGE_UNLOCK(sc); } @@ -3591,7 +3678,7 @@ bridge_delayed_callback(void *param) * bridge_schedule_delayed_call: * * Schedule a function to be called on a separate thread - * The actual call may be scheduled to run at a given time or ASAP. + * The actual call may be scheduled to run at a given time or ASAP. */ static void bridge_schedule_delayed_call(struct bridge_delayed_call *call) @@ -3602,34 +3689,37 @@ bridge_schedule_delayed_call(struct bridge_delayed_call *call) BRIDGE_LOCK_ASSERT_HELD(sc); if ((sc->sc_flags & SCF_DETACHING) || - (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING))) + (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING))) { return; + } if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) { nanoseconds_to_absolutetime( - (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC + - call->bdc_ts.tv_nsec, &deadline); + (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC + + call->bdc_ts.tv_nsec, &deadline); clock_absolutetime_interval_to_deadline(deadline, &deadline); } call->bdc_flags = BDCF_OUTSTANDING; #if BRIDGE_DEBUG_DELAYED_CALLBACK - if (if_bridge_debug & BR_DBGF_DELAYED_CALL) + if (if_bridge_debug & BR_DBGF_DELAYED_CALL) { printf("%s: %s call 0x%llx flags 0x%x\n", __func__, sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call), call->bdc_flags); + } #endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */ - if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) + if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) { thread_call_func_delayed( (thread_call_func_t)bridge_delayed_callback, call, deadline); - else { - if (call->bdc_thread_call == NULL) + } else { + if (call->bdc_thread_call == NULL) { call->bdc_thread_call = thread_call_allocate( (thread_call_func_t)bridge_delayed_callback, call); + } thread_call_enter(call->bdc_thread_call); } } @@ -3650,8 +3740,9 @@ bridge_cancel_delayed_call(struct bridge_delayed_call *call) /* * The call was never scheduled */ - if (sc == NULL) + if (sc == NULL) { return; + } BRIDGE_LOCK_ASSERT_HELD(sc); @@ -3659,13 +3750,14 @@ bridge_cancel_delayed_call(struct bridge_delayed_call *call) while (call->bdc_flags & BDCF_OUTSTANDING) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_DELAYED_CALL) + if (if_bridge_debug & BR_DBGF_DELAYED_CALL) { printf("%s: %s call 0x%llx flags 0x%x\n", __func__, sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call), call->bdc_flags); + } #endif /* BRIDGE_DEBUG */ result = thread_call_func_cancel( - (thread_call_func_t)bridge_delayed_callback, call, FALSE); + (thread_call_func_t)bridge_delayed_callback, call, FALSE); if (result) { /* @@ -3697,8 +3789,9 @@ bridge_cleanup_delayed_call(struct bridge_delayed_call *call) /* * The call was never scheduled */ - if (sc == NULL) + if (sc == NULL) { return; + } BRIDGE_LOCK_ASSERT_HELD(sc); @@ -3707,9 +3800,10 @@ bridge_cleanup_delayed_call(struct bridge_delayed_call *call) if (call->bdc_thread_call != NULL) { result = thread_call_free(call->bdc_thread_call); - if (result == FALSE) + if (result == FALSE) { panic("%s thread_call_free() failed for call %p", - __func__, call); + __func__, call); + } call->bdc_thread_call = NULL; } } @@ -3727,8 +3821,9 @@ bridge_init(struct ifnet *ifp) BRIDGE_LOCK_ASSERT_HELD(sc); - if ((ifnet_flags(ifp) & IFF_RUNNING)) - return (0); + if ((ifnet_flags(ifp) & IFF_RUNNING)) { + return 0; + } error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING); @@ -3739,11 +3834,12 @@ bridge_init(struct ifnet *ifp) */ bridge_aging_timer(sc); #if BRIDGESTP - if (error == 0) - bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ + if (error == 0) { + bstp_init(&sc->sc_stp); /* Initialize Spanning Tree */ + } #endif /* BRIDGESTP */ } - return (error); + return error; } /* @@ -3759,8 +3855,9 @@ bridge_ifstop(struct ifnet *ifp, int disable) BRIDGE_LOCK_ASSERT_HELD(sc); - if ((ifnet_flags(ifp) & IFF_RUNNING) == 0) + if ((ifnet_flags(ifp) & IFF_RUNNING) == 0) { return; + } if (bridge_in_bsd_mode(sc)) { bridge_cancel_delayed_call(&sc->sc_aging_timer); @@ -3830,12 +3927,13 @@ bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) /* Preserve existing error value */ if (error == 0) { - if (_error != 0) + if (_error != 0) { error = _error; - else if (adv.code == FADV_FLOW_CONTROLLED) + } else if (adv.code == FADV_FLOW_CONTROLLED) { error = EQFULL; - else if (adv.code == FADV_SUSPENDED) + } else if (adv.code == FADV_SUSPENDED) { error = EQSUSPENDED; + } } if (_error == 0) { @@ -3845,7 +3943,7 @@ bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m) } } - return (error); + return error; } #if HAS_BRIDGE_DUMMYNET @@ -3875,10 +3973,12 @@ bridge_dummynet(struct mbuf *m, struct ifnet *ifp) } if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) { - if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) + if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0) { return; - if (m == NULL) + } + if (m == NULL) { return; + } } (void) bridge_enqueue(sc, ifp, m); @@ -3898,7 +3998,7 @@ bridge_dummynet(struct mbuf *m, struct ifnet *ifp) */ static int bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, - struct rtentry *rt) + struct rtentry *rt) { #pragma unused(sa, rt) struct ether_header *eh; @@ -3907,14 +4007,16 @@ bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, uint16_t vlan; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_OUTPUT) + if (if_bridge_debug & BR_DBGF_OUTPUT) { printf("%s: ifp %s\n", __func__, ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ if (m->m_len < ETHER_HDR_LEN) { m = m_pullup(m, ETHER_HDR_LEN); - if (m == NULL) - return (0); + if (m == NULL) { + return 0; + } } eh = mtod(m, struct ether_header *); @@ -3947,10 +4049,11 @@ bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, * If the packet is a multicast, or we don't know a better way to * get there, send to all interfaces. */ - if (ETHER_IS_MULTICAST(eh->ether_dhost)) + if (ETHER_IS_MULTICAST(eh->ether_dhost)) { dst_if = NULL; - else + } else { dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan); + } if (dst_if == NULL) { struct bridge_iflist *bif; struct mbuf *mc; @@ -3961,16 +4064,18 @@ bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, BRIDGE_LOCK2REF(sc, error); if (error) { m_freem(m); - return (0); + return 0; } TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) { dst_if = bif->bif_ifp; - if (dst_if->if_type == IFT_GIF) + if (dst_if->if_type == IFT_GIF) { continue; - if ((dst_if->if_flags & IFF_RUNNING) == 0) + } + if ((dst_if->if_flags & IFF_RUNNING) == 0) { continue; + } /* * If this is not the original output interface, @@ -3979,8 +4084,9 @@ bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, * allows forwarding. */ if (dst_if != ifp && (bif->bif_ifflags & IFBIF_STP) && - bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) + bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { continue; + } if (LIST_NEXT(bif, bif_next) == NULL) { used = 1; @@ -3989,17 +4095,18 @@ bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa, mc = m_copypacket(m, M_DONTWAIT); if (mc == NULL) { (void) ifnet_stat_increment_out( - sc->sc_ifp, 0, 0, 1); + sc->sc_ifp, 0, 0, 1); continue; } } (void) bridge_enqueue(sc, dst_if, mc); } - if (used == 0) + if (used == 0) { m_freem(m); + } BRIDGE_UNREF(sc); - return (0); + return 0; } sendunicast: @@ -4011,12 +4118,12 @@ sendunicast: if ((dst_if->if_flags & IFF_RUNNING) == 0) { m_freem(m); BRIDGE_UNLOCK(sc); - return (0); + return 0; } BRIDGE_UNLOCK(sc); (void) bridge_enqueue(sc, dst_if, m); - return (0); + return 0; } #endif /* BRIDGE_MEMBER_OUT_FILTER */ @@ -4040,14 +4147,16 @@ bridge_output(struct ifnet *ifp, struct mbuf *m) BRIDGE_LOCK(sc); ASSERT(bridge_in_bsd_mode(sc)); - if (!(m->m_flags & (M_BCAST|M_MCAST))) + if (!(m->m_flags & (M_BCAST | M_MCAST))) { dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0); + } (void) ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); #if NBPFILTER > 0 - if (sc->sc_bpf_output) + if (sc->sc_bpf_output) { bridge_bpf_output(ifp, m); + } #endif if (dst_if == NULL) { @@ -4058,7 +4167,7 @@ bridge_output(struct ifnet *ifp, struct mbuf *m) error = bridge_enqueue(sc, dst_if, m); } - return (error); + return error; } static void @@ -4067,10 +4176,11 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) struct ether_header *eh = mtod(m, struct ether_header *); uint32_t sw_csum, hwcap; - if (ifp != NULL) + if (ifp != NULL) { hwcap = (ifp->if_hwassist | CSUM_DATA_VALID); - else + } else { hwcap = 0; + } /* do in software what the hardware cannot */ sw_csum = m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_FLAGS(hwcap); @@ -4082,7 +4192,7 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) { if (m->m_pkthdr.csum_flags & CSUM_TCP) { uint16_t start = - sizeof (*eh) + sizeof (struct ip); + sizeof(*eh) + sizeof(struct ip); uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff; m->m_pkthdr.csum_flags |= @@ -4094,7 +4204,7 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) m->m_pkthdr.csum_flags); } } - (void) in_finalize_cksum(m, sizeof (*eh), sw_csum); + (void) in_finalize_cksum(m, sizeof(*eh), sw_csum); break; #if INET6 @@ -4104,7 +4214,7 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) (m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) { if (m->m_pkthdr.csum_flags & CSUM_TCPIPV6) { uint16_t start = - sizeof (*eh) + sizeof (struct ip6_hdr); + sizeof(*eh) + sizeof(struct ip6_hdr); uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff; m->m_pkthdr.csum_flags |= @@ -4116,7 +4226,7 @@ bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m) m->m_pkthdr.csum_flags); } } - (void) in6_finalize_cksum(m, sizeof (*eh), -1, -1, sw_csum); + (void) in6_finalize_cksum(m, sizeof(*eh), -1, -1, sw_csum); break; #endif /* INET6 */ } @@ -4140,8 +4250,9 @@ bridge_start(struct ifnet *ifp) struct mbuf *m; for (;;) { - if (ifnet_dequeue(ifp, &m) != 0) + if (ifnet_dequeue(ifp, &m) != 0) { break; + } (void) bridge_output(ifp, m); } @@ -4156,7 +4267,7 @@ bridge_start(struct ifnet *ifp) */ static void bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, - struct mbuf *m) + struct mbuf *m) { struct bridge_iflist *dbif; struct ifnet *src_if, *dst_if, *ifp; @@ -4169,9 +4280,10 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, ASSERT(bridge_in_bsd_mode(sc)); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_OUTPUT) + if (if_bridge_debug & BR_DBGF_OUTPUT) { printf("%s: %s m 0x%llx\n", __func__, sc->sc_ifp->if_xname, (uint64_t)VM_KERNEL_ADDRPERM(m)); + } #endif /* BRIDGE_DEBUG */ src_if = m->m_pkthdr.rcvif; @@ -4182,8 +4294,9 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, if ((sbif->bif_ifflags & IFBIF_STP) && - sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) + sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { goto drop; + } eh = mtod(m, struct ether_header *); dst = eh->ether_dhost; @@ -4196,13 +4309,15 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, * If the interface has addresses limits then deny any source * that is not in the cache. */ - if (error && sbif->bif_addrmax) + if (error && sbif->bif_addrmax) { goto drop; + } } if ((sbif->bif_ifflags & IFBIF_STP) != 0 && - sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) + sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING) { goto drop; + } /* * At this point, the port either doesn't participate @@ -4213,10 +4328,11 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, * If the packet is unicast, destined for someone on * "this" side of the bridge, drop it. */ - if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { + if ((m->m_flags & (M_BCAST | M_MCAST)) == 0) { dst_if = bridge_rtlookup(sc, dst, vlan); - if (src_if == dst_if) + if (src_if == dst_if) { goto drop; + } } else { /* * Check if its a reserved multicast address, any address @@ -4226,8 +4342,9 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, */ if (dst[0] == 0x01 && dst[1] == 0x80 && dst[2] == 0xc2 && dst[3] == 0x00 && - dst[4] == 0x00 && dst[5] <= 0x0f) + dst[4] == 0x00 && dst[5] <= 0x0f) { goto drop; + } /* ...forward it to all interfaces. */ @@ -4248,8 +4365,9 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) || dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) { m->m_pkthdr.rcvif = ifp; - if (sc->sc_bpf_input) + if (sc->sc_bpf_input) { bridge_bpf_input(ifp, m); + } } #endif /* NBPFILTER */ @@ -4257,10 +4375,12 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, /* run the packet filter */ if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) { BRIDGE_UNLOCK(sc); - if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) + if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0) { return; - if (m == NULL) + } + if (m == NULL) { return; + } BRIDGE_LOCK(sc); } #endif /* PFIL_HOOKS */ @@ -4274,21 +4394,25 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, * At this point, we're dealing with a unicast frame * going to a different interface. */ - if ((dst_if->if_flags & IFF_RUNNING) == 0) + if ((dst_if->if_flags & IFF_RUNNING) == 0) { goto drop; + } dbif = bridge_lookup_member_if(sc, dst_if); - if (dbif == NULL) + if (dbif == NULL) { /* Not a member of the bridge (anymore?) */ goto drop; + } /* Private segments can not talk to each other */ - if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE) + if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE) { goto drop; + } if ((dbif->bif_ifflags & IFBIF_STP) && - dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) + dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { goto drop; + } #if HAS_DHCPRA_MASK /* APPLE MODIFICATION */ @@ -4305,10 +4429,12 @@ bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif, #if defined(PFIL_HOOKS) if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) { - if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) + if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0) { return; - if (m == NULL) + } + if (m == NULL) { return; + } } #endif /* PFIL_HOOKS */ @@ -4330,7 +4456,7 @@ ether_ntop(char *buf, size_t len, const u_char *ap) snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x", ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]); - return (buf); + return buf; } #endif /* BRIDGE_DEBUG */ @@ -4354,20 +4480,22 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) ASSERT(bridge_in_bsd_mode(sc)); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_INPUT) + if (if_bridge_debug & BR_DBGF_INPUT) { printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__, sc->sc_ifp->if_xname, ifp->if_xname, (uint64_t)VM_KERNEL_ADDRPERM(m), (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m))); + } #endif /* BRIDGE_DEBUG */ if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_INPUT) + if (if_bridge_debug & BR_DBGF_INPUT) { printf("%s: %s not running passing along\n", __func__, sc->sc_ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ - return (0); + return 0; } bifp = sc->sc_ifp; @@ -4385,7 +4513,7 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) BRIDGE_BPF_MTAP_INPUT(sc, m); (void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0); m_freem(m); - return (EJUSTRETURN); + return EJUSTRETURN; } #endif /* IFF_MONITOR */ @@ -4393,29 +4521,32 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) * Need to clear the promiscous flags otherwise it will be * dropped by DLIL after processing filters */ - if ((mbuf_flags(m) & MBUF_PROMISC)) + if ((mbuf_flags(m) & MBUF_PROMISC)) { mbuf_setflags_mask(m, 0, MBUF_PROMISC); + } BRIDGE_LOCK(sc); bif = bridge_lookup_member_if(sc, ifp); if (bif == NULL) { BRIDGE_UNLOCK(sc); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_INPUT) + if (if_bridge_debug & BR_DBGF_INPUT) { printf("%s: %s bridge_lookup_member_if failed\n", __func__, sc->sc_ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ - return (0); + return 0; } if (bif->bif_flags & BIFF_HOST_FILTER) { error = bridge_host_filter(bif, m); if (error != 0) { - if (if_bridge_debug & BR_DBGF_INPUT) + if (if_bridge_debug & BR_DBGF_INPUT) { printf("%s: %s bridge_host_filter failed\n", __func__, bif->bif_ifp->if_xname); + } BRIDGE_UNLOCK(sc); - return (EJUSTRETURN); + return EJUSTRETURN; } } @@ -4423,17 +4554,18 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) bridge_span(sc, m); - if (m->m_flags & (M_BCAST|M_MCAST)) { - + if (m->m_flags & (M_BCAST | M_MCAST)) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_MCAST) - if ((m->m_flags & M_MCAST)) + if (if_bridge_debug & BR_DBGF_MCAST) { + if ((m->m_flags & M_MCAST)) { printf("%s: multicast: " "%02x:%02x:%02x:%02x:%02x:%02x\n", __func__, eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2], eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5]); + } + } #endif /* BRIDGE_DEBUG */ /* Tap off 802.1D packets; they do not get forwarded. */ @@ -4447,14 +4579,14 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) #endif /* !BRIDGESTP */ if (m == NULL) { BRIDGE_UNLOCK(sc); - return (EJUSTRETURN); + return EJUSTRETURN; } } if ((bif->bif_ifflags & IFBIF_STP) && bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { BRIDGE_UNLOCK(sc); - return (0); + return 0; } /* @@ -4465,7 +4597,7 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) mc = m_dup(m, M_DONTWAIT); if (mc == NULL) { BRIDGE_UNLOCK(sc); - return (0); + return 0; } /* @@ -4494,98 +4626,100 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) mc2->m_pkthdr.pkt_hdr = mbuf_data(mc2); #if NBPFILTER > 0 - if (sc->sc_bpf_input) + if (sc->sc_bpf_input) { bridge_bpf_input(bifp, mc2); + } #endif /* NBPFILTER */ (void) mbuf_setdata(mc2, (char *)mbuf_data(mc2) + ETHER_HDR_LEN, mbuf_len(mc2) - ETHER_HDR_LEN); - (void) mbuf_pkthdr_adjustlen(mc2, - ETHER_HDR_LEN); + (void) mbuf_pkthdr_adjustlen(mc2, -ETHER_HDR_LEN); (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(mc2), 0); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_MCAST) + if (if_bridge_debug & BR_DBGF_MCAST) { printf("%s: %s mcast for us\n", __func__, sc->sc_ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ dlil_input_packet_list(bifp, mc2); } /* Return the original packet for local processing. */ - return (0); + return 0; } if ((bif->bif_ifflags & IFBIF_STP) && bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { BRIDGE_UNLOCK(sc); - return (0); + return 0; } #ifdef DEV_CARP -#define CARP_CHECK_WE_ARE_DST(iface) \ +#define CARP_CHECK_WE_ARE_DST(iface) \ ((iface)->if_carp &&\ - carp_forus((iface)->if_carp, eh->ether_dhost)) -#define CARP_CHECK_WE_ARE_SRC(iface) \ + carp_forus((iface)->if_carp, eh->ether_dhost)) +#define CARP_CHECK_WE_ARE_SRC(iface) \ ((iface)->if_carp &&\ - carp_forus((iface)->if_carp, eh->ether_shost)) + carp_forus((iface)->if_carp, eh->ether_shost)) #else -#define CARP_CHECK_WE_ARE_DST(iface) 0 -#define CARP_CHECK_WE_ARE_SRC(iface) 0 +#define CARP_CHECK_WE_ARE_DST(iface) 0 +#define CARP_CHECK_WE_ARE_SRC(iface) 0 #endif #ifdef INET6 -#define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook) +#define PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook) #else -#define PFIL_HOOKED_INET6 0 +#define PFIL_HOOKED_INET6 0 #endif #if defined(PFIL_HOOKS) -#define PFIL_PHYS(sc, ifp, m) do { \ - if (pfil_local_phys && \ - (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \ - if (bridge_pfil(&m, NULL, ifp, \ - PFIL_IN) != 0 || m == NULL) { \ - BRIDGE_UNLOCK(sc); \ - return (NULL); \ - } \ - } \ +#define PFIL_PHYS(sc, ifp, m) do { \ + if (pfil_local_phys && \ + (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { \ + if (bridge_pfil(&m, NULL, ifp, \ + PFIL_IN) != 0 || m == NULL) { \ + BRIDGE_UNLOCK(sc); \ + return (NULL); \ + } \ + } \ } while (0) #else /* PFIL_HOOKS */ -#define PFIL_PHYS(sc, ifp, m) +#define PFIL_PHYS(sc, ifp, m) #endif /* PFIL_HOOKS */ -#define GRAB_OUR_PACKETS(iface) \ - if ((iface)->if_type == IFT_GIF) \ - continue; \ - /* It is destined for us. */ \ - if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \ - ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \ - if ((iface)->if_type == IFT_BRIDGE) { \ - BRIDGE_BPF_MTAP_INPUT(sc, m); \ - /* Filter on the physical interface. */ \ - PFIL_PHYS(sc, iface, m); \ - } \ - if (bif->bif_ifflags & IFBIF_LEARNING) { \ - error = bridge_rtupdate(sc, eh->ether_shost, \ - vlan, bif, 0, IFBAF_DYNAMIC); \ - if (error && bif->bif_addrmax) { \ - BRIDGE_UNLOCK(sc); \ - return (EJUSTRETURN); \ - } \ - } \ - m->m_pkthdr.rcvif = iface; \ - BRIDGE_UNLOCK(sc); \ - return (0); \ - } \ - \ - /* We just received a packet that we sent out. */ \ - if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \ - ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \ - BRIDGE_UNLOCK(sc); \ - return (EJUSTRETURN); \ +#define GRAB_OUR_PACKETS(iface) \ + if ((iface)->if_type == IFT_GIF) \ + continue; \ + /* It is destined for us. */ \ + if (memcmp(IF_LLADDR((iface)), eh->ether_dhost, \ + ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) { \ + if ((iface)->if_type == IFT_BRIDGE) { \ + BRIDGE_BPF_MTAP_INPUT(sc, m); \ + /* Filter on the physical interface. */ \ + PFIL_PHYS(sc, iface, m); \ + } \ + if (bif->bif_ifflags & IFBIF_LEARNING) { \ + error = bridge_rtupdate(sc, eh->ether_shost, \ + vlan, bif, 0, IFBAF_DYNAMIC); \ + if (error && bif->bif_addrmax) { \ + BRIDGE_UNLOCK(sc); \ + return (EJUSTRETURN); \ + } \ + } \ + m->m_pkthdr.rcvif = iface; \ + BRIDGE_UNLOCK(sc); \ + return (0); \ + } \ + \ + /* We just received a packet that we sent out. */ \ + if (memcmp(IF_LLADDR((iface)), eh->ether_shost, \ + ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) { \ + BRIDGE_UNLOCK(sc); \ + return (EJUSTRETURN); \ } /* @@ -4598,7 +4732,6 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) */ if (memcmp(eh->ether_dhost, IF_LLADDR(bifp), ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bifp)) { - /* Mark the packet as arriving on the bridge interface */ (void) mbuf_pkthdr_setrcvif(m, bifp); mbuf_pkthdr_setheader(m, frame_header); @@ -4608,29 +4741,31 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) * address is valid and not multicast, record * the address. */ - if (bif->bif_ifflags & IFBIF_LEARNING) + if (bif->bif_ifflags & IFBIF_LEARNING) { (void) bridge_rtupdate(sc, eh->ether_shost, vlan, bif, 0, IFBAF_DYNAMIC); + } BRIDGE_BPF_MTAP_INPUT(sc, m); (void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, mbuf_len(m) - ETHER_HDR_LEN); - (void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN); + (void) mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN); (void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0); BRIDGE_UNLOCK(sc); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_INPUT) + if (if_bridge_debug & BR_DBGF_INPUT) { printf("%s: %s packet for bridge\n", __func__, sc->sc_ifp->if_xname); + } #endif /* BRIDGE_DEBUG */ dlil_input_packet_list(bifp, m); - return (EJUSTRETURN); + return EJUSTRETURN; } /* @@ -4649,11 +4784,11 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) if (memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) { #ifdef VERY_VERY_VERY_DIAGNOSTIC - printf("%s: not forwarding packet bound for member " - "interface\n", __func__); + printf("%s: not forwarding packet bound for member " + "interface\n", __func__); #endif - BRIDGE_UNLOCK(sc); - return (0); + BRIDGE_UNLOCK(sc); + return 0; } /* Now check the all bridge members. */ @@ -4672,7 +4807,7 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) */ bridge_forward(sc, bif, m); - return (EJUSTRETURN); + return EJUSTRETURN; } /* @@ -4686,7 +4821,7 @@ bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header) */ static void bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, - struct mbuf *m, int runfilt) + struct mbuf *m, int runfilt) { #ifndef PFIL_HOOKS #pragma unused(runfilt) @@ -4707,33 +4842,40 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, #ifdef PFIL_HOOKS /* Filter on the bridge interface before broadcasting */ if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) { - if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) + if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0) { goto out; - if (m == NULL) + } + if (m == NULL) { goto out; + } } #endif /* PFIL_HOOKS */ TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) { dst_if = dbif->bif_ifp; - if (dst_if == src_if) + if (dst_if == src_if) { continue; + } /* Private segments can not talk to each other */ if (sbif && - (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)) + (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)) { continue; + } if ((dbif->bif_ifflags & IFBIF_STP) && - dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) + dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) { continue; + } if ((dbif->bif_ifflags & IFBIF_DISCOVER) == 0 && - (m->m_flags & (M_BCAST|M_MCAST)) == 0) + (m->m_flags & (M_BCAST | M_MCAST)) == 0) { continue; + } - if ((dst_if->if_flags & IFF_RUNNING) == 0) + if ((dst_if->if_flags & IFF_RUNNING) == 0) { continue; + } if (!(dbif->bif_flags & BIFF_MEDIA_ACTIVE)) { continue; @@ -4765,21 +4907,24 @@ bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, mc = m_copyup(mc, i, ETHER_ALIGN); if (mc == NULL) { (void) ifnet_stat_increment_out( - sc->sc_ifp, 0, 0, 1); + sc->sc_ifp, 0, 0, 1); continue; } } - if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) + if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0) { continue; - if (mc == NULL) + } + if (mc == NULL) { continue; + } } #endif /* PFIL_HOOKS */ (void) bridge_enqueue(sc, dst_if, mc); } - if (used == 0) + if (used == 0) { m_freem(m); + } #ifdef PFIL_HOOKS out: @@ -4801,14 +4946,16 @@ bridge_span(struct bridge_softc *sc, struct mbuf *m) struct ifnet *dst_if; struct mbuf *mc; - if (TAILQ_EMPTY(&sc->sc_spanlist)) + if (TAILQ_EMPTY(&sc->sc_spanlist)) { return; + } TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) { dst_if = bif->bif_ifp; - if ((dst_if->if_flags & IFF_RUNNING) == 0) + if ((dst_if->if_flags & IFF_RUNNING) == 0) { continue; + } mc = m_copypacket(m, M_DONTWAIT); if (mc == NULL) { @@ -4828,7 +4975,7 @@ bridge_span(struct bridge_softc *sc, struct mbuf *m) */ static int bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, - struct bridge_iflist *bif, int setflags, uint8_t flags) + struct bridge_iflist *bif, int setflags, uint8_t flags) { struct bridge_rtnode *brt; int error; @@ -4839,13 +4986,15 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, /* Check the source address is valid and not multicast. */ if (ETHER_IS_MULTICAST(dst) || (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 && - dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) - return (EINVAL); + dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0) { + return EINVAL; + } /* 802.1p frames map to vlan 1 */ - if (vlan == 0) + if (vlan == 0) { vlan = 1; + } /* * A route for this destination might already exist. If so, @@ -4854,12 +5003,12 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) { if (sc->sc_brtcnt >= sc->sc_brtmax) { sc->sc_brtexceeded++; - return (ENOSPC); + return ENOSPC; } /* Check per interface address limits (if enabled) */ if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) { bif->bif_addrexceeded++; - return (ENOSPC); + return ENOSPC; } /* @@ -4868,14 +5017,16 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, * address. */ brt = zalloc_noblock(bridge_rtnode_pool); - if (brt == NULL) - return (ENOMEM); + if (brt == NULL) { + return ENOMEM; + } bzero(brt, sizeof(struct bridge_rtnode)); - if (bif->bif_ifflags & IFBIF_STICKY) + if (bif->bif_ifflags & IFBIF_STICKY) { brt->brt_flags = IFBAF_STICKY; - else + } else { brt->brt_flags = IFBAF_DYNAMIC; + } memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); brt->brt_vlan = vlan; @@ -4883,17 +5034,18 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, if ((error = bridge_rtnode_insert(sc, brt)) != 0) { zfree(bridge_rtnode_pool, brt); - return (error); + return error; } brt->brt_dst = bif; bif->bif_addrcnt++; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_RT_TABLE) + if (if_bridge_debug & BR_DBGF_RT_TABLE) { printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x " "on %s count %u hashsize %u\n", __func__, dst[0], dst[1], dst[2], dst[3], dst[4], dst[5], sc->sc_ifp->if_xname, sc->sc_brtcnt, sc->sc_rthash_size); + } #endif } @@ -4910,11 +5062,12 @@ bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan, now = (unsigned long) net_uptime(); brt->brt_expire = now + sc->sc_brttimeout; } - if (setflags) + if (setflags) { brt->brt_flags = flags; + } - return (0); + return 0; } /* @@ -4929,10 +5082,11 @@ bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) BRIDGE_LOCK_ASSERT_HELD(sc); - if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) - return (NULL); + if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL) { + return NULL; + } - return (brt->brt_ifp); + return brt->brt_ifp; } /* @@ -4950,19 +5104,22 @@ bridge_rttrim(struct bridge_softc *sc) BRIDGE_LOCK_ASSERT_HELD(sc); /* Make sure we actually need to do this. */ - if (sc->sc_brtcnt <= sc->sc_brtmax) + if (sc->sc_brtcnt <= sc->sc_brtmax) { return; + } /* Force an aging cycle; this might trim enough addresses. */ bridge_rtage(sc); - if (sc->sc_brtcnt <= sc->sc_brtmax) + if (sc->sc_brtcnt <= sc->sc_brtmax) { return; + } LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { bridge_rtnode_destroy(sc, brt); - if (sc->sc_brtcnt <= sc->sc_brtmax) + if (sc->sc_brtcnt <= sc->sc_brtmax) { return; + } } } } @@ -5005,8 +5162,9 @@ bridge_rtage(struct bridge_softc *sc) LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { - if (now >= brt->brt_expire) + if (now >= brt->brt_expire) { bridge_rtnode_destroy(sc, brt); + } } } } @@ -5024,8 +5182,9 @@ bridge_rtflush(struct bridge_softc *sc, int full) BRIDGE_LOCK_ASSERT_HELD(sc); LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { - if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) + if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { bridge_rtnode_destroy(sc, brt); + } } } @@ -5051,7 +5210,7 @@ bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan) found = 1; } - return (found ? 0 : ENOENT); + return found ? 0 : ENOENT; } /* @@ -5068,8 +5227,9 @@ bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full) LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { if (brt->brt_ifp == ifp && (full || - (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) + (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)) { bridge_rtnode_destroy(sc, brt); + } } } @@ -5085,22 +5245,23 @@ bridge_rtable_init(struct bridge_softc *sc) ASSERT(bridge_in_bsd_mode(sc)); - sc->sc_rthash = _MALLOC(sizeof (*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, + sc->sc_rthash = _MALLOC(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, M_DEVBUF, M_WAITOK | M_ZERO); if (sc->sc_rthash == NULL) { printf("%s: no memory\n", __func__); - return (ENOMEM); + return ENOMEM; } sc->sc_rthash_size = BRIDGE_RTHASH_SIZE; - for (i = 0; i < sc->sc_rthash_size; i++) + for (i = 0; i < sc->sc_rthash_size; i++) { LIST_INIT(&sc->sc_rthash[i]); + } sc->sc_rthash_key = RandomULong(); LIST_INIT(&sc->sc_rtlist); - return (0); + return 0; } /* @@ -5123,8 +5284,9 @@ bridge_rthash_delayed_resize(struct bridge_softc *sc) /* * Four entries per hash bucket is our ideal load factor */ - if (sc->sc_brtcnt < sc->sc_rthash_size * 4) + if (sc->sc_brtcnt < sc->sc_rthash_size * 4) { goto out; + } /* * Doubling the number of hash buckets may be too simplistic @@ -5135,7 +5297,7 @@ bridge_rthash_delayed_resize(struct bridge_softc *sc) sc->sc_flags |= SCF_RESIZING; BRIDGE_UNLOCK(sc); - new_rthash = _MALLOC(sizeof (*sc->sc_rthash) * new_rthash_size, + new_rthash = _MALLOC(sizeof(*sc->sc_rthash) * new_rthash_size, M_DEVBUF, M_WAITOK | M_ZERO); BRIDGE_LOCK(sc); @@ -5162,29 +5324,33 @@ bridge_rthash_delayed_resize(struct bridge_softc *sc) */ sc->sc_rthash_key = RandomULong(); - for (i = 0; i < sc->sc_rthash_size; i++) + for (i = 0; i < sc->sc_rthash_size; i++) { LIST_INIT(&sc->sc_rthash[i]); + } - LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { + LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { LIST_REMOVE(brt, brt_hash); (void) bridge_rtnode_hash(sc, brt); } out: if (error == 0) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_RT_TABLE) + if (if_bridge_debug & BR_DBGF_RT_TABLE) { printf("%s: %s new size %u\n", __func__, sc->sc_ifp->if_xname, sc->sc_rthash_size); + } #endif /* BRIDGE_DEBUG */ - if (old_rthash) + if (old_rthash) { _FREE(old_rthash, M_DEVBUF); + } } else { #if BRIDGE_DEBUG printf("%s: %s failed %d\n", __func__, sc->sc_ifp->if_xname, error); #endif /* BRIDGE_DEBUG */ - if (new_rthash != NULL) + if (new_rthash != NULL) { _FREE(new_rthash, M_DEVBUF); + } } } @@ -5198,19 +5364,22 @@ bridge_rthash_resize(struct bridge_softc *sc) { BRIDGE_LOCK_ASSERT_HELD(sc); - if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING)) + if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING)) { return; + } /* * Four entries per hash bucket is our ideal load factor */ - if (sc->sc_brtcnt < sc->sc_rthash_size * 4) + if (sc->sc_brtcnt < sc->sc_rthash_size * 4) { return; + } /* * Hard limit on the size of the routing hash table */ - if (sc->sc_rthash_size >= bridge_rtable_hash_size_max) + if (sc->sc_rthash_size >= bridge_rtable_hash_size_max) { return; + } sc->sc_resize_call.bdc_sc = sc; sc->sc_resize_call.bdc_func = bridge_rthash_delayed_resize; @@ -5237,18 +5406,18 @@ bridge_rtable_fini(struct bridge_softc *sc) * The following hash function is adapted from "Hash Functions" by Bob Jenkins * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). */ -#define mix(a, b, c) \ -do { \ - a -= b; a -= c; a ^= (c >> 13); \ - b -= c; b -= a; b ^= (a << 8); \ - c -= a; c -= b; c ^= (b >> 13); \ - a -= b; a -= c; a ^= (c >> 12); \ - b -= c; b -= a; b ^= (a << 16); \ - c -= a; c -= b; c ^= (b >> 5); \ - a -= b; a -= c; a ^= (c >> 3); \ - b -= c; b -= a; b ^= (a << 10); \ - c -= a; c -= b; c ^= (b >> 15); \ -} while (/*CONSTCOND*/0) +#define mix(a, b, c) \ +do { \ + a -= b; a -= c; a ^= (c >> 13); \ + b -= c; b -= a; b ^= (a << 8); \ + c -= a; c -= b; c ^= (b >> 13); \ + a -= b; a -= c; a ^= (c >> 12); \ + b -= c; b -= a; b ^= (a << 16); \ + c -= a; c -= b; c ^= (b >> 5); \ + a -= b; a -= c; a ^= (c >> 3); \ + b -= c; b -= a; b ^= (a << 10); \ + c -= a; c -= b; c ^= (b >> 15); \ +} while ( /*CONSTCOND*/ 0) static __inline uint32_t bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) @@ -5264,7 +5433,7 @@ bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) mix(a, b, c); - return (c & BRIDGE_RTHASH_MASK(sc)); + return c & BRIDGE_RTHASH_MASK(sc); } #undef mix @@ -5278,7 +5447,7 @@ bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) d = ((int)a[i]) - ((int)b[i]); } - return (d); + return d; } /* @@ -5289,7 +5458,7 @@ bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b) */ static struct bridge_rtnode * bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, - uint16_t vlan) + uint16_t vlan) { struct bridge_rtnode *brt; uint32_t hash; @@ -5301,13 +5470,15 @@ bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr, hash = bridge_rthash(sc, addr); LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr); - if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) - return (brt); - if (dir > 0) - return (NULL); + if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0)) { + return brt; + } + if (dir > 0) { + return NULL; + } } - return (NULL); + return NULL; } /* @@ -5338,15 +5509,16 @@ bridge_rtnode_hash(struct bridge_softc *sc, struct bridge_rtnode *brt) dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr); if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) { #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_RT_TABLE) + if (if_bridge_debug & BR_DBGF_RT_TABLE) { printf("%s: %s EEXIST " "%02x:%02x:%02x:%02x:%02x:%02x\n", __func__, sc->sc_ifp->if_xname, brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2], brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5]); + } #endif - return (EEXIST); + return EEXIST; } if (dir > 0) { LIST_INSERT_BEFORE(lbrt, brt, brt_hash); @@ -5360,15 +5532,16 @@ bridge_rtnode_hash(struct bridge_softc *sc, struct bridge_rtnode *brt) } while (lbrt != NULL); #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_RT_TABLE) + if (if_bridge_debug & BR_DBGF_RT_TABLE) { printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, sc->sc_ifp->if_xname, brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2], brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5]); + } #endif out: - return (0); + return 0; } /* @@ -5383,15 +5556,16 @@ bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) int error; error = bridge_rtnode_hash(sc, brt); - if (error != 0) - return (error); + if (error != 0) { + return error; + } LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); sc->sc_brtcnt++; bridge_rthash_resize(sc); - return (0); + return 0; } /* @@ -5441,8 +5615,9 @@ bridge_rtable_expire(struct ifnet *ifp, int age) /* Cap the expiry time to 'age' */ if (brt->brt_ifp == ifp && brt->brt_expire > now + age && - (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) + (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { brt->brt_expire = now + age; + } } } BRIDGE_UNLOCK(sc); @@ -5466,10 +5641,11 @@ bridge_state_change(struct ifnet *ifp, int state) "discarding" }; - if (log_stp) + if (log_stp) { log(LOG_NOTICE, "%s: state changed to %s on %s\n", sc->sc_ifp->if_xname, stpstates[state], ifp->if_xname); + } } #endif /* BRIDGESTP */ @@ -5491,22 +5667,22 @@ bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) u_int16_t ether_type; snap = 0; - error = -1; /* Default error if not error == 0 */ + error = -1; /* Default error if not error == 0 */ #if 0 /* we may return with the IP fields swapped, ensure its not shared */ KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__)); #endif - if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) - return (0); /* filtering is disabled */ - + if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0) { + return 0; /* filtering is disabled */ + } i = min((*mp)->m_pkthdr.len, max_protohdr); if ((*mp)->m_len < i) { *mp = m_pullup(*mp, i); if (*mp == NULL) { printf("%s: m_pullup failed\n", __func__); - return (-1); + return -1; } } @@ -5538,25 +5714,27 @@ bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) * ARP traffic.) */ switch (ether_type) { - case ETHERTYPE_ARP: - case ETHERTYPE_REVARP: - if (pfil_ipfw_arp == 0) - return (0); /* Automatically pass */ - break; + case ETHERTYPE_ARP: + case ETHERTYPE_REVARP: + if (pfil_ipfw_arp == 0) { + return 0; /* Automatically pass */ + } + break; - case ETHERTYPE_IP: + case ETHERTYPE_IP: #if INET6 - case ETHERTYPE_IPV6: + case ETHERTYPE_IPV6: #endif /* INET6 */ - break; - default: - /* - * Check to see if the user wants to pass non-ip - * packets, these will not be checked by pfil(9) and - * passed unconditionally so the default is to drop. - */ - if (pfil_onlyip) - goto bad; + break; + default: + /* + * Check to see if the user wants to pass non-ip + * packets, these will not be checked by pfil(9) and + * passed unconditionally so the default is to drop. + */ + if (pfil_onlyip) { + goto bad; + } } /* Strip off the Ethernet header and keep a copy. */ @@ -5565,8 +5743,8 @@ bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) /* Strip off snap header, if present */ if (snap) { - m_copydata(*mp, 0, sizeof (struct llc), (caddr_t)&llc1); - m_adj(*mp, sizeof (struct llc)); + m_copydata(*mp, 0, sizeof(struct llc), (caddr_t)&llc1); + m_adj(*mp, sizeof(struct llc)); } /* @@ -5574,44 +5752,46 @@ bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) */ if (dir == PFIL_IN) { switch (ether_type) { - case ETHERTYPE_IP: - error = bridge_ip_checkbasic(mp); - break; + case ETHERTYPE_IP: + error = bridge_ip_checkbasic(mp); + break; #if INET6 - case ETHERTYPE_IPV6: - error = bridge_ip6_checkbasic(mp); - break; + case ETHERTYPE_IPV6: + error = bridge_ip6_checkbasic(mp); + break; #endif /* INET6 */ - default: - error = 0; + default: + error = 0; } - if (error) + if (error) { goto bad; + } } if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) { error = -1; args.rule = ip_dn_claim_rule(*mp); - if (args.rule != NULL && fw_one_pass) + if (args.rule != NULL && fw_one_pass) { goto ipfwpass; /* packet already partially processed */ - + } args.m = *mp; args.oif = ifp; args.next_hop = NULL; args.eh = &eh2; - args.inp = NULL; /* used by ipfw uid/gid/jail rules */ + args.inp = NULL; /* used by ipfw uid/gid/jail rules */ i = ip_fw_chk_ptr(&args); *mp = args.m; - if (*mp == NULL) - return (error); + if (*mp == NULL) { + return error; + } if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) { - /* put the Ethernet header back on */ M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0); - if (*mp == NULL) - return (error); + if (*mp == NULL) { + return error; + } bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); /* @@ -5620,11 +5800,12 @@ bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir) */ args.oif = ifp; ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW); - return (error); + return error; } - if (i != IP_FW_PASS) /* drop */ + if (i != IP_FW_PASS) { /* drop */ goto bad; + } } ipfwpass: @@ -5651,26 +5832,32 @@ ipfwpass: * Keep the order: * in_if -> bridge_if -> out_if */ - if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) + if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) { error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir, NULL); + } - if (*mp == NULL || error != 0) /* filter may consume */ + if (*mp == NULL || error != 0) { /* filter may consume */ break; + } - if (pfil_member && ifp != NULL) + if (pfil_member && ifp != NULL) { error = pfil_run_hooks(&inet_pfil_hook, mp, ifp, dir, NULL); + } - if (*mp == NULL || error != 0) /* filter may consume */ + if (*mp == NULL || error != 0) { /* filter may consume */ break; + } - if (pfil_bridge && dir == PFIL_IN && bifp != NULL) + if (pfil_bridge && dir == PFIL_IN && bifp != NULL) { error = pfil_run_hooks(&inet_pfil_hook, mp, bifp, dir, NULL); + } - if (*mp == NULL || error != 0) /* filter may consume */ + if (*mp == NULL || error != 0) { /* filter may consume */ break; + } /* check if we need to fragment the packet */ if (pfil_member && ifp != NULL && dir == PFIL_OUT) { @@ -5678,50 +5865,59 @@ ipfwpass: if (i > ifp->if_mtu) { error = bridge_fragment(ifp, *mp, &eh2, snap, &llc1); - return (error); + return error; } } /* Recalculate the ip checksum and restore byte ordering */ ip = mtod(*mp, struct ip *); hlen = ip->ip_hl << 2; - if (hlen < sizeof (struct ip)) + if (hlen < sizeof(struct ip)) { goto bad; + } if (hlen > (*mp)->m_len) { - if ((*mp = m_pullup(*mp, hlen)) == 0) + if ((*mp = m_pullup(*mp, hlen)) == 0) { goto bad; + } ip = mtod(*mp, struct ip *); - if (ip == NULL) + if (ip == NULL) { goto bad; + } } ip->ip_len = htons(ip->ip_len); ip->ip_off = htons(ip->ip_off); ip->ip_sum = 0; - if (hlen == sizeof (struct ip)) + if (hlen == sizeof(struct ip)) { ip->ip_sum = in_cksum_hdr(ip); - else + } else { ip->ip_sum = in_cksum(*mp, hlen); + } break; #if INET6 case ETHERTYPE_IPV6: - if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) + if (pfil_bridge && dir == PFIL_OUT && bifp != NULL) { error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, dir, NULL); + } - if (*mp == NULL || error != 0) /* filter may consume */ + if (*mp == NULL || error != 0) { /* filter may consume */ break; + } - if (pfil_member && ifp != NULL) + if (pfil_member && ifp != NULL) { error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp, dir, NULL); + } - if (*mp == NULL || error != 0) /* filter may consume */ + if (*mp == NULL || error != 0) { /* filter may consume */ break; + } - if (pfil_bridge && dir == PFIL_IN && bifp != NULL) + if (pfil_bridge && dir == PFIL_IN && bifp != NULL) { error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp, dir, NULL); + } break; #endif default: @@ -5729,10 +5925,12 @@ ipfwpass: break; } - if (*mp == NULL) - return (error); - if (error != 0) + if (*mp == NULL) { + return error; + } + if (error != 0) { goto bad; + } error = -1; @@ -5740,23 +5938,25 @@ ipfwpass: * Finally, put everything back the way it was and return */ if (snap) { - M_PREPEND(*mp, sizeof (struct llc), M_DONTWAIT, 0); - if (*mp == NULL) - return (error); - bcopy(&llc1, mtod(*mp, caddr_t), sizeof (struct llc)); + M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT, 0); + if (*mp == NULL) { + return error; + } + bcopy(&llc1, mtod(*mp, caddr_t), sizeof(struct llc)); } M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT, 0); - if (*mp == NULL) - return (error); + if (*mp == NULL) { + return error; + } bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN); - return (0); + return 0; bad: m_freem(*mp); *mp = NULL; - return (error); + return error; } /* @@ -5779,32 +5979,35 @@ bridge_ip_checkbasic(struct mbuf **mp) int len, hlen; u_short sum; - if (*mp == NULL) - return (-1); + if (*mp == NULL) { + return -1; + } if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { /* max_linkhdr is already rounded up to nearest 4-byte */ - if ((m = m_copyup(m, sizeof (struct ip), + if ((m = m_copyup(m, sizeof(struct ip), max_linkhdr)) == NULL) { /* XXXJRT new stat, please */ ipstat.ips_toosmall++; goto bad; } - } else if (__predict_false(m->m_len < sizeof (struct ip))) { - if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { + } else if (__predict_false(m->m_len < sizeof(struct ip))) { + if ((m = m_pullup(m, sizeof(struct ip))) == NULL) { ipstat.ips_toosmall++; goto bad; } } ip = mtod(m, struct ip *); - if (ip == NULL) goto bad; + if (ip == NULL) { + goto bad; + } if (ip->ip_v != IPVERSION) { ipstat.ips_badvers++; goto bad; } hlen = ip->ip_hl << 2; - if (hlen < sizeof (struct ip)) { /* minimum header length */ + if (hlen < sizeof(struct ip)) { /* minimum header length */ ipstat.ips_badhlen++; goto bad; } @@ -5814,13 +6017,15 @@ bridge_ip_checkbasic(struct mbuf **mp) goto bad; } ip = mtod(m, struct ip *); - if (ip == NULL) goto bad; + if (ip == NULL) { + goto bad; + } } if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) { sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID); } else { - if (hlen == sizeof (struct ip)) { + if (hlen == sizeof(struct ip)) { sum = in_cksum_hdr(ip); } else { sum = in_cksum(m, hlen); @@ -5854,11 +6059,11 @@ bridge_ip_checkbasic(struct mbuf **mp) /* Checks out, proceed */ *mp = m; - return (0); + return 0; bad: *mp = m; - return (-1); + return -1; } #if INET6 @@ -5882,16 +6087,16 @@ bridge_ip6_checkbasic(struct mbuf **mp) if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) { struct ifnet *inifp = m->m_pkthdr.rcvif; /* max_linkhdr is already rounded up to nearest 4-byte */ - if ((m = m_copyup(m, sizeof (struct ip6_hdr), + if ((m = m_copyup(m, sizeof(struct ip6_hdr), max_linkhdr)) == NULL) { /* XXXJRT new stat, please */ ip6stat.ip6s_toosmall++; in6_ifstat_inc(inifp, ifs6_in_hdrerr); goto bad; } - } else if (__predict_false(m->m_len < sizeof (struct ip6_hdr))) { + } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { struct ifnet *inifp = m->m_pkthdr.rcvif; - if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == NULL) { + if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { ip6stat.ip6s_toosmall++; in6_ifstat_inc(inifp, ifs6_in_hdrerr); goto bad; @@ -5908,11 +6113,11 @@ bridge_ip6_checkbasic(struct mbuf **mp) /* Checks out, proceed */ *mp = m; - return (0); + return 0; bad: *mp = m; - return (-1); + return -1; } #endif /* INET6 */ @@ -5923,33 +6128,35 @@ bad: */ static int bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, - int snap, struct llc *llc) + int snap, struct llc *llc) { struct mbuf *m0; struct ip *ip; int error = -1; - if (m->m_len < sizeof (struct ip) && - (m = m_pullup(m, sizeof (struct ip))) == NULL) + if (m->m_len < sizeof(struct ip) && + (m = m_pullup(m, sizeof(struct ip))) == NULL) { goto out; + } ip = mtod(m, struct ip *); error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist, CSUM_DELAY_IP); - if (error) + if (error) { goto out; + } /* walk the chain and re-add the Ethernet header */ for (m0 = m; m0; m0 = m0->m_nextpkt) { if (error == 0) { if (snap) { - M_PREPEND(m0, sizeof (struct llc), M_DONTWAIT, 0); + M_PREPEND(m0, sizeof(struct llc), M_DONTWAIT, 0); if (m0 == NULL) { error = ENOBUFS; continue; } bcopy(llc, mtod(m0, caddr_t), - sizeof (struct llc)); + sizeof(struct llc)); } M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT, 0); if (m0 == NULL) { @@ -5962,15 +6169,17 @@ bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh, } } - if (error == 0) + if (error == 0) { ipstat.ips_fragmented++; + } - return (error); + return error; out: - if (m != NULL) + if (m != NULL) { m_freem(m); - return (error); + } + return error; } #endif /* PFIL_HOOKS */ @@ -5986,31 +6195,31 @@ bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback) /* TBD locking */ if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) { - return (ENODEV); + return ENODEV; } ASSERT(bridge_in_bsd_mode(sc)); switch (mode) { - case BPF_TAP_DISABLE: - sc->sc_bpf_input = sc->sc_bpf_output = NULL; - break; + case BPF_TAP_DISABLE: + sc->sc_bpf_input = sc->sc_bpf_output = NULL; + break; - case BPF_TAP_INPUT: - sc->sc_bpf_input = bpf_callback; - break; + case BPF_TAP_INPUT: + sc->sc_bpf_input = bpf_callback; + break; - case BPF_TAP_OUTPUT: - sc->sc_bpf_output = bpf_callback; - break; + case BPF_TAP_OUTPUT: + sc->sc_bpf_output = bpf_callback; + break; - case BPF_TAP_INPUT_OUTPUT: - sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback; - break; + case BPF_TAP_INPUT_OUTPUT: + sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback; + break; - default: - break; + default: + break; } - return (0); + return 0; } /* @@ -6061,7 +6270,7 @@ bridge_bpf_input(ifnet_t ifp, struct mbuf *m) } (*sc->sc_bpf_input)(ifp, m); } - return (0); + return 0; } /* @@ -6078,7 +6287,7 @@ bridge_bpf_output(ifnet_t ifp, struct mbuf *m) if (sc->sc_bpf_output) { (*sc->sc_bpf_output)(ifp, m); } - return (0); + return 0; } /* @@ -6090,33 +6299,34 @@ static void bridge_link_event(struct ifnet *ifp, u_int32_t event_code) { struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; + struct kern_event_msg header; + u_int32_t unit; + char if_name[IFNAMSIZ]; } event; #if BRIDGE_DEBUG - if (if_bridge_debug & BR_DBGF_LIFECYCLE) + if (if_bridge_debug & BR_DBGF_LIFECYCLE) { printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname, event_code, dlil_kev_dl_code_str(event_code)); + } #endif /* BRIDGE_DEBUG */ - bzero(&event, sizeof (event)); - event.header.total_size = sizeof (event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t)ifnet_unit(ifp); + bzero(&event, sizeof(event)); + event.header.total_size = sizeof(event); + event.header.vendor_code = KEV_VENDOR_APPLE; + event.header.kev_class = KEV_NETWORK_CLASS; + event.header.kev_subclass = KEV_DL_SUBCLASS; + event.header.event_code = event_code; + event.header.event_data[0] = ifnet_family(ifp); + event.unit = (u_int32_t)ifnet_unit(ifp); strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); ifnet_event(ifp, &event.header); } -#define BRIDGE_HF_DROP(reason, func, line) { \ +#define BRIDGE_HF_DROP(reason, func, line) { \ bridge_hostfilter_stats.reason++; \ if (if_bridge_debug & BR_DBGF_HOSTFILTER) \ - printf("%s.%d" #reason, func, line); \ + printf("%s.%d" #reason, func, line); \ error = EINVAL; \ } @@ -6168,7 +6378,7 @@ bridge_dhcp_filter(struct bridge_iflist *bif, struct mbuf *m, size_t offset) } error = 0; done: - return (error); + return error; } static int @@ -6208,7 +6418,7 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) if (eh->ether_type == htons(ETHERTYPE_ARP)) { struct ether_arp *ea; size_t minlen = sizeof(struct ether_header) + - sizeof(struct ether_arp); + sizeof(struct ether_arp); /* * Make the Ethernet and ARP headers contiguous @@ -6219,7 +6429,7 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) } if (mbuf_len(m) < minlen && mbuf_pullup(&m, minlen) != 0) { BRIDGE_HF_DROP(brhf_arp_pullup_failed, - __func__, __LINE__); + __func__, __LINE__); goto done; } /* @@ -6229,12 +6439,12 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) ea = (struct ether_arp *)(eh + 1); if (ea->arp_hrd != htons(ARPHRD_ETHER)) { BRIDGE_HF_DROP(brhf_arp_bad_hw_type, - __func__, __LINE__); + __func__, __LINE__); goto done; } if (ea->arp_pro != htons(ETHERTYPE_IP)) { BRIDGE_HF_DROP(brhf_arp_bad_pro_type, - __func__, __LINE__); + __func__, __LINE__); goto done; } /* @@ -6246,7 +6456,7 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) } if (ea->arp_pln != sizeof(struct in_addr)) { BRIDGE_HF_DROP(brhf_arp_bad_pro_len, - __func__, __LINE__); + __func__, __LINE__); goto done; } @@ -6271,9 +6481,9 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) * May be null for an ARP probe */ if (bcmp(ea->arp_spa, &bif->bif_hf_ipsrc.s_addr, - sizeof(struct in_addr)) != 0 && + sizeof(struct in_addr)) != 0 && bcmp(ea->arp_spa, &inaddr_any, - sizeof(struct in_addr)) != 0) { + sizeof(struct in_addr)) != 0) { BRIDGE_HF_DROP(brhf_arp_bad_spa, __func__, __LINE__); goto done; } @@ -6309,7 +6519,7 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) minlen += sizeof(struct udphdr); if (mbuf_pkthdr_len(m) < minlen) { BRIDGE_HF_DROP(brhf_ip_too_small, - __func__, __LINE__); + __func__, __LINE__); goto done; } @@ -6319,7 +6529,7 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr && iphdr.ip_src.s_addr != INADDR_ANY) { BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, - __func__, __LINE__); + __func__, __LINE__); goto done; } offset = sizeof(struct ether_header) + @@ -6328,7 +6538,7 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) sizeof(struct udphdr), &udp); if (error != 0) { BRIDGE_HF_DROP(brhf_ip_too_small, - __func__, __LINE__); + __func__, __LINE__); goto done; } /* @@ -6340,21 +6550,21 @@ bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m) minlen += sizeof(struct dhcp); if (mbuf_pkthdr_len(m) < minlen) { BRIDGE_HF_DROP(brhf_ip_too_small, - __func__, __LINE__); + __func__, __LINE__); goto done; } offset += sizeof(struct udphdr); error = bridge_dhcp_filter(bif, m, offset); - if (error != 0) + if (error != 0) { goto done; + } } else if (iphdr.ip_src.s_addr == INADDR_ANY) { BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, - __func__, __LINE__); + __func__, __LINE__); goto done; } } else if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr || bif->bif_hf_ipsrc.s_addr == INADDR_ANY) { - BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, __func__, __LINE__); goto done; } @@ -6387,10 +6597,11 @@ done: printf("\n"); } - if (m != NULL) + if (m != NULL) { m_freem(m); + } } - return (error); + return error; } diff --git a/bsd/net/if_bridgevar.h b/bsd/net/if_bridgevar.h index 8b7188c86..ae7754b7b 100644 --- a/bsd/net/if_bridgevar.h +++ b/bsd/net/if_bridgevar.h @@ -114,43 +114,43 @@ * Commands used in the SIOCSDRVSPEC ioctl. Note the lookup of the * bridge interface itself is keyed off the ifdrv structure. */ -#define BRDGADD 0 /* add bridge member (ifbreq) */ -#define BRDGDEL 1 /* delete bridge member (ifbreq) */ -#define BRDGGIFFLGS 2 /* get member if flags (ifbreq) */ -#define BRDGSIFFLGS 3 /* set member if flags (ifbreq) */ -#define BRDGSCACHE 4 /* set cache size (ifbrparam) */ -#define BRDGGCACHE 5 /* get cache size (ifbrparam) */ -#define BRDGGIFS 6 /* get member list (ifbifconf) */ -#define BRDGRTS 7 /* get address list (ifbaconf) */ -#define BRDGSADDR 8 /* set static address (ifbareq) */ -#define BRDGSTO 9 /* set cache timeout (ifbrparam) */ -#define BRDGGTO 10 /* get cache timeout (ifbrparam) */ -#define BRDGDADDR 11 /* delete address (ifbareq) */ -#define BRDGFLUSH 12 /* flush address cache (ifbreq) */ - -#define BRDGGPRI 13 /* get priority (ifbrparam) */ -#define BRDGSPRI 14 /* set priority (ifbrparam) */ -#define BRDGGHT 15 /* get hello time (ifbrparam) */ -#define BRDGSHT 16 /* set hello time (ifbrparam) */ -#define BRDGGFD 17 /* get forward delay (ifbrparam) */ -#define BRDGSFD 18 /* set forward delay (ifbrparam) */ -#define BRDGGMA 19 /* get max age (ifbrparam) */ -#define BRDGSMA 20 /* set max age (ifbrparam) */ -#define BRDGSIFPRIO 21 /* set if priority (ifbreq) */ -#define BRDGSIFCOST 22 /* set if path cost (ifbreq) */ -#define BRDGGFILT 23 /* get filter flags (ifbrparam) */ -#define BRDGSFILT 24 /* set filter flags (ifbrparam) */ -#define BRDGPURGE 25 /* purge address cache for a particular interface (ifbreq) */ -#define BRDGADDS 26 /* add bridge span member (ifbreq) */ -#define BRDGDELS 27 /* delete bridge span member (ifbreq) */ -#define BRDGPARAM 28 /* get bridge STP params (ifbropreq) */ -#define BRDGGRTE 29 /* get cache drops (ifbrparam) */ -#define BRDGGIFSSTP 30 /* get member STP params list (ifbpstpconf) */ -#define BRDGSPROTO 31 /* set protocol (ifbrparam) */ -#define BRDGSTXHC 32 /* set tx hold count (ifbrparam) */ -#define BRDGSIFAMAX 33 /* set max interface addrs (ifbreq) */ -#define BRDGGHOSTFILTER 34 /* set max interface addrs (ifbrhostfilter) */ -#define BRDGSHOSTFILTER 35 /* set max interface addrs (ifbrhostfilter) */ +#define BRDGADD 0 /* add bridge member (ifbreq) */ +#define BRDGDEL 1 /* delete bridge member (ifbreq) */ +#define BRDGGIFFLGS 2 /* get member if flags (ifbreq) */ +#define BRDGSIFFLGS 3 /* set member if flags (ifbreq) */ +#define BRDGSCACHE 4 /* set cache size (ifbrparam) */ +#define BRDGGCACHE 5 /* get cache size (ifbrparam) */ +#define BRDGGIFS 6 /* get member list (ifbifconf) */ +#define BRDGRTS 7 /* get address list (ifbaconf) */ +#define BRDGSADDR 8 /* set static address (ifbareq) */ +#define BRDGSTO 9 /* set cache timeout (ifbrparam) */ +#define BRDGGTO 10 /* get cache timeout (ifbrparam) */ +#define BRDGDADDR 11 /* delete address (ifbareq) */ +#define BRDGFLUSH 12 /* flush address cache (ifbreq) */ + +#define BRDGGPRI 13 /* get priority (ifbrparam) */ +#define BRDGSPRI 14 /* set priority (ifbrparam) */ +#define BRDGGHT 15 /* get hello time (ifbrparam) */ +#define BRDGSHT 16 /* set hello time (ifbrparam) */ +#define BRDGGFD 17 /* get forward delay (ifbrparam) */ +#define BRDGSFD 18 /* set forward delay (ifbrparam) */ +#define BRDGGMA 19 /* get max age (ifbrparam) */ +#define BRDGSMA 20 /* set max age (ifbrparam) */ +#define BRDGSIFPRIO 21 /* set if priority (ifbreq) */ +#define BRDGSIFCOST 22 /* set if path cost (ifbreq) */ +#define BRDGGFILT 23 /* get filter flags (ifbrparam) */ +#define BRDGSFILT 24 /* set filter flags (ifbrparam) */ +#define BRDGPURGE 25 /* purge address cache for a particular interface (ifbreq) */ +#define BRDGADDS 26 /* add bridge span member (ifbreq) */ +#define BRDGDELS 27 /* delete bridge span member (ifbreq) */ +#define BRDGPARAM 28 /* get bridge STP params (ifbropreq) */ +#define BRDGGRTE 29 /* get cache drops (ifbrparam) */ +#define BRDGGIFSSTP 30 /* get member STP params list (ifbpstpconf) */ +#define BRDGSPROTO 31 /* set protocol (ifbrparam) */ +#define BRDGSTXHC 32 /* set tx hold count (ifbrparam) */ +#define BRDGSIFAMAX 33 /* set max interface addrs (ifbreq) */ +#define BRDGGHOSTFILTER 34 /* set max interface addrs (ifbrhostfilter) */ +#define BRDGSHOSTFILTER 35 /* set max interface addrs (ifbrhostfilter) */ /* * Generic bridge control request. @@ -158,62 +158,62 @@ #pragma pack(4) struct ifbreq { - char ifbr_ifsname[IFNAMSIZ]; /* member if name */ - uint32_t ifbr_ifsflags; /* member if flags */ - uint32_t ifbr_stpflags; /* member if STP flags */ - uint32_t ifbr_path_cost; /* member if STP cost */ - uint8_t ifbr_portno; /* member if port number */ - uint8_t ifbr_priority; /* member if STP priority */ - uint8_t ifbr_proto; /* member if STP protocol */ - uint8_t ifbr_role; /* member if STP role */ - uint8_t ifbr_state; /* member if STP state */ - uint32_t ifbr_addrcnt; /* member if addr number */ - uint32_t ifbr_addrmax; /* member if addr max */ - uint32_t ifbr_addrexceeded; /* member if addr violations */ - uint8_t pad[32]; + char ifbr_ifsname[IFNAMSIZ]; /* member if name */ + uint32_t ifbr_ifsflags; /* member if flags */ + uint32_t ifbr_stpflags; /* member if STP flags */ + uint32_t ifbr_path_cost; /* member if STP cost */ + uint8_t ifbr_portno; /* member if port number */ + uint8_t ifbr_priority; /* member if STP priority */ + uint8_t ifbr_proto; /* member if STP protocol */ + uint8_t ifbr_role; /* member if STP role */ + uint8_t ifbr_state; /* member if STP state */ + uint32_t ifbr_addrcnt; /* member if addr number */ + uint32_t ifbr_addrmax; /* member if addr max */ + uint32_t ifbr_addrexceeded; /* member if addr violations */ + uint8_t pad[32]; }; #pragma pack() /* BRDGGIFFLAGS, BRDGSIFFLAGS */ -#define IFBIF_LEARNING 0x0001 /* if can learn */ -#define IFBIF_DISCOVER 0x0002 /* if sends packets w/ unknown dest. */ -#define IFBIF_STP 0x0004 /* if participates in spanning tree */ -#define IFBIF_SPAN 0x0008 /* if is a span port */ -#define IFBIF_STICKY 0x0010 /* if learned addresses stick */ -#define IFBIF_BSTP_EDGE 0x0020 /* member stp edge port */ -#define IFBIF_BSTP_AUTOEDGE 0x0040 /* member stp autoedge enabled */ -#define IFBIF_BSTP_PTP 0x0080 /* member stp point to point */ -#define IFBIF_BSTP_AUTOPTP 0x0100 /* member stp autoptp enabled */ -#define IFBIF_BSTP_ADMEDGE 0x0200 /* member stp admin edge enabled */ -#define IFBIF_BSTP_ADMCOST 0x0400 /* member stp admin path cost */ -#define IFBIF_PRIVATE 0x0800 /* if is a private segment */ - -#define IFBIFBITS "\020\001LEARNING\002DISCOVER\003STP\004SPAN" \ - "\005STICKY\014PRIVATE\006EDGE\007AUTOEDGE\010PTP" \ - "\011AUTOPTP" -#define IFBIFMASK ~(IFBIF_BSTP_EDGE|IFBIF_BSTP_AUTOEDGE|IFBIF_BSTP_PTP| \ - IFBIF_BSTP_AUTOPTP|IFBIF_BSTP_ADMEDGE| \ - IFBIF_BSTP_ADMCOST) /* not saved */ +#define IFBIF_LEARNING 0x0001 /* if can learn */ +#define IFBIF_DISCOVER 0x0002 /* if sends packets w/ unknown dest. */ +#define IFBIF_STP 0x0004 /* if participates in spanning tree */ +#define IFBIF_SPAN 0x0008 /* if is a span port */ +#define IFBIF_STICKY 0x0010 /* if learned addresses stick */ +#define IFBIF_BSTP_EDGE 0x0020 /* member stp edge port */ +#define IFBIF_BSTP_AUTOEDGE 0x0040 /* member stp autoedge enabled */ +#define IFBIF_BSTP_PTP 0x0080 /* member stp point to point */ +#define IFBIF_BSTP_AUTOPTP 0x0100 /* member stp autoptp enabled */ +#define IFBIF_BSTP_ADMEDGE 0x0200 /* member stp admin edge enabled */ +#define IFBIF_BSTP_ADMCOST 0x0400 /* member stp admin path cost */ +#define IFBIF_PRIVATE 0x0800 /* if is a private segment */ + +#define IFBIFBITS "\020\001LEARNING\002DISCOVER\003STP\004SPAN" \ + "\005STICKY\014PRIVATE\006EDGE\007AUTOEDGE\010PTP" \ + "\011AUTOPTP" +#define IFBIFMASK ~(IFBIF_BSTP_EDGE|IFBIF_BSTP_AUTOEDGE|IFBIF_BSTP_PTP| \ + IFBIF_BSTP_AUTOPTP|IFBIF_BSTP_ADMEDGE| \ + IFBIF_BSTP_ADMCOST) /* not saved */ /* BRDGFLUSH */ -#define IFBF_FLUSHDYN 0x00 /* flush learned addresses only */ -#define IFBF_FLUSHALL 0x01 /* flush all addresses */ +#define IFBF_FLUSHDYN 0x00 /* flush learned addresses only */ +#define IFBF_FLUSHALL 0x01 /* flush all addresses */ /* BRDGSFILT */ -#define IFBF_FILT_USEIPF 0x00000001 /* run pfil hooks on the bridge -interface */ -#define IFBF_FILT_MEMBER 0x00000002 /* run pfil hooks on the member -interfaces */ -#define IFBF_FILT_ONLYIP 0x00000004 /* only pass IP[46] packets when -pfil is enabled */ -#define IFBF_FILT_MASK 0x00000007 /* mask of valid values */ +#define IFBF_FILT_USEIPF 0x00000001 /* run pfil hooks on the bridge + * interface */ +#define IFBF_FILT_MEMBER 0x00000002 /* run pfil hooks on the member + * interfaces */ +#define IFBF_FILT_ONLYIP 0x00000004 /* only pass IP[46] packets when + * pfil is enabled */ +#define IFBF_FILT_MASK 0x00000007 /* mask of valid values */ /* APPLE MODIFICATION : Default is to pass non-IP packets. */ -#define IFBF_FILT_DEFAULT ( IFBF_FILT_USEIPF | IFBF_FILT_MEMBER ) +#define IFBF_FILT_DEFAULT ( IFBF_FILT_USEIPF | IFBF_FILT_MEMBER ) #if 0 -#define IFBF_FILT_DEFAULT (IFBF_FILT_USEIPF | \ +#define IFBF_FILT_DEFAULT (IFBF_FILT_USEIPF | \ IFBF_FILT_MEMBER | \ IFBF_FILT_ONLYIP) #endif @@ -227,32 +227,32 @@ IFBF_FILT_ONLYIP) #ifndef XNU_KERNEL_PRIVATE struct ifbifconf { - uint32_t ifbic_len; /* buffer size */ + uint32_t ifbic_len; /* buffer size */ union { - caddr_t ifbicu_buf; + caddr_t ifbicu_buf; struct ifbreq *ifbicu_req; -#define ifbic_buf ifbic_ifbicu.ifbicu_buf -#define ifbic_req ifbic_ifbicu.ifbicu_req +#define ifbic_buf ifbic_ifbicu.ifbicu_buf +#define ifbic_req ifbic_ifbicu.ifbicu_req } ifbic_ifbicu; }; #else /* XNU_KERNEL_PRIVATE */ struct ifbifconf32 { - uint32_t ifbic_len; /* buffer size */ + uint32_t ifbic_len; /* buffer size */ union { - user32_addr_t ifbicu_buf; - user32_addr_t ifbicu_req; -#define ifbic_buf ifbic_ifbicu.ifbicu_buf -#define ifbic_req ifbic_ifbicu.ifbicu_req + user32_addr_t ifbicu_buf; + user32_addr_t ifbicu_req; +#define ifbic_buf ifbic_ifbicu.ifbicu_buf +#define ifbic_req ifbic_ifbicu.ifbicu_req } ifbic_ifbicu; }; struct ifbifconf64 { - uint32_t ifbic_len; /* buffer size */ + uint32_t ifbic_len; /* buffer size */ union { - user64_addr_t ifbicu_buf; - user64_addr_t ifbicu_req; + user64_addr_t ifbicu_buf; + user64_addr_t ifbicu_req; } ifbic_ifbicu; }; #endif /* XNU_KERNEL_PRIVATE */ @@ -268,40 +268,40 @@ struct ifbifconf64 { #ifndef XNU_KERNEL_PRIVATE struct ifbareq { - char ifba_ifsname[IFNAMSIZ]; /* member if name */ - unsigned long ifba_expire; /* address expire time */ - uint8_t ifba_flags; /* address flags */ - uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */ - uint16_t ifba_vlan; /* vlan id */ + char ifba_ifsname[IFNAMSIZ]; /* member if name */ + unsigned long ifba_expire; /* address expire time */ + uint8_t ifba_flags; /* address flags */ + uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */ + uint16_t ifba_vlan; /* vlan id */ }; #else /* XNU_KERNEL_PRIVATE */ struct ifbareq32 { - char ifba_ifsname[IFNAMSIZ]; /* member if name */ - uint32_t ifba_expire; /* address expire time */ - uint8_t ifba_flags; /* address flags */ - uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */ - uint16_t ifba_vlan; /* vlan id */ + char ifba_ifsname[IFNAMSIZ]; /* member if name */ + uint32_t ifba_expire; /* address expire time */ + uint8_t ifba_flags; /* address flags */ + uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */ + uint16_t ifba_vlan; /* vlan id */ }; struct ifbareq64 { - char ifba_ifsname[IFNAMSIZ]; /* member if name */ - uint64_t ifba_expire; /* address expire time */ - uint8_t ifba_flags; /* address flags */ - uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */ - uint16_t ifba_vlan; /* vlan id */ + char ifba_ifsname[IFNAMSIZ]; /* member if name */ + uint64_t ifba_expire; /* address expire time */ + uint8_t ifba_flags; /* address flags */ + uint8_t ifba_dst[ETHER_ADDR_LEN];/* destination address */ + uint16_t ifba_vlan; /* vlan id */ }; #endif /* XNU_KERNEL_PRIVATE */ #pragma pack() -#define IFBAF_TYPEMASK 0x03 /* address type mask */ -#define IFBAF_DYNAMIC 0x00 /* dynamically learned address */ -#define IFBAF_STATIC 0x01 /* static address */ -#define IFBAF_STICKY 0x02 /* sticky address */ +#define IFBAF_TYPEMASK 0x03 /* address type mask */ +#define IFBAF_DYNAMIC 0x00 /* dynamically learned address */ +#define IFBAF_STATIC 0x01 /* static address */ +#define IFBAF_STICKY 0x02 /* sticky address */ -#define IFBAFBITS "\020\1STATIC\2STICKY" +#define IFBAFBITS "\020\1STATIC\2STICKY" /* * Address list structure. @@ -312,32 +312,32 @@ struct ifbareq64 { #ifndef XNU_KERNEL_PRIVATE struct ifbaconf { - uint32_t ifbac_len; /* buffer size */ + uint32_t ifbac_len; /* buffer size */ union { caddr_t ifbacu_buf; struct ifbareq *ifbacu_req; -#define ifbac_buf ifbac_ifbacu.ifbacu_buf -#define ifbac_req ifbac_ifbacu.ifbacu_req +#define ifbac_buf ifbac_ifbacu.ifbacu_buf +#define ifbac_req ifbac_ifbacu.ifbacu_req } ifbac_ifbacu; }; #else /* XNU_KERNEL_PRIVATE */ struct ifbaconf32 { - uint32_t ifbac_len; /* buffer size */ + uint32_t ifbac_len; /* buffer size */ union { - user32_addr_t ifbacu_buf; - user32_addr_t ifbacu_req; -#define ifbac_buf ifbac_ifbacu.ifbacu_buf -#define ifbac_req ifbac_ifbacu.ifbacu_req + user32_addr_t ifbacu_buf; + user32_addr_t ifbacu_req; +#define ifbac_buf ifbac_ifbacu.ifbacu_buf +#define ifbac_req ifbac_ifbacu.ifbacu_req } ifbac_ifbacu; }; struct ifbaconf64 { - uint32_t ifbac_len; /* buffer size */ + uint32_t ifbac_len; /* buffer size */ union { - user64_addr_t ifbacu_buf; - user64_addr_t ifbacu_req; + user64_addr_t ifbacu_buf; + user64_addr_t ifbacu_req; } ifbac_ifbacu; }; #endif /* XNU_KERNEL_PRIVATE */ @@ -360,17 +360,17 @@ struct ifbrparam { #pragma pack() -#define ifbrp_csize ifbrp_ifbrpu.ifbrpu_int32 /* cache size */ -#define ifbrp_ctime ifbrp_ifbrpu.ifbrpu_int32 /* cache time (sec) */ -#define ifbrp_prio ifbrp_ifbrpu.ifbrpu_int16 /* bridge priority */ -#define ifbrp_proto ifbrp_ifbrpu.ifbrpu_int8 /* bridge protocol */ -#define ifbrp_txhc ifbrp_ifbrpu.ifbrpu_int8 /* bpdu tx holdcount */ -#define ifbrp_hellotime ifbrp_ifbrpu.ifbrpu_int8 /* hello time (sec) */ -#define ifbrp_fwddelay ifbrp_ifbrpu.ifbrpu_int8 /* fwd time (sec) */ -#define ifbrp_maxage ifbrp_ifbrpu.ifbrpu_int8 /* max age (sec) */ -#define ifbrp_cexceeded ifbrp_ifbrpu.ifbrpu_int32 /* # of cache dropped - * adresses */ -#define ifbrp_filter ifbrp_ifbrpu.ifbrpu_int32 /* filtering flags */ +#define ifbrp_csize ifbrp_ifbrpu.ifbrpu_int32 /* cache size */ +#define ifbrp_ctime ifbrp_ifbrpu.ifbrpu_int32 /* cache time (sec) */ +#define ifbrp_prio ifbrp_ifbrpu.ifbrpu_int16 /* bridge priority */ +#define ifbrp_proto ifbrp_ifbrpu.ifbrpu_int8 /* bridge protocol */ +#define ifbrp_txhc ifbrp_ifbrpu.ifbrpu_int8 /* bpdu tx holdcount */ +#define ifbrp_hellotime ifbrp_ifbrpu.ifbrpu_int8 /* hello time (sec) */ +#define ifbrp_fwddelay ifbrp_ifbrpu.ifbrpu_int8 /* fwd time (sec) */ +#define ifbrp_maxage ifbrp_ifbrpu.ifbrpu_int8 /* max age (sec) */ +#define ifbrp_cexceeded ifbrp_ifbrpu.ifbrpu_int32 /* # of cache dropped + * adresses */ +#define ifbrp_filter ifbrp_ifbrpu.ifbrpu_int32 /* filtering flags */ /* * Bridge current operational parameters structure. @@ -381,50 +381,50 @@ struct ifbrparam { #ifndef XNU_KERNEL_PRIVATE struct ifbropreq { - uint8_t ifbop_holdcount; - uint8_t ifbop_maxage; - uint8_t ifbop_hellotime; - uint8_t ifbop_fwddelay; - uint8_t ifbop_protocol; - uint16_t ifbop_priority; - uint16_t ifbop_root_port; - uint32_t ifbop_root_path_cost; - uint64_t ifbop_bridgeid; - uint64_t ifbop_designated_root; - uint64_t ifbop_designated_bridge; - struct timeval ifbop_last_tc_time; + uint8_t ifbop_holdcount; + uint8_t ifbop_maxage; + uint8_t ifbop_hellotime; + uint8_t ifbop_fwddelay; + uint8_t ifbop_protocol; + uint16_t ifbop_priority; + uint16_t ifbop_root_port; + uint32_t ifbop_root_path_cost; + uint64_t ifbop_bridgeid; + uint64_t ifbop_designated_root; + uint64_t ifbop_designated_bridge; + struct timeval ifbop_last_tc_time; }; #else /* XNU_KERNEL_PRIVATE */ struct ifbropreq32 { - uint8_t ifbop_holdcount; - uint8_t ifbop_maxage; - uint8_t ifbop_hellotime; - uint8_t ifbop_fwddelay; - uint8_t ifbop_protocol; - uint16_t ifbop_priority; - uint16_t ifbop_root_port; - uint32_t ifbop_root_path_cost; - uint64_t ifbop_bridgeid; - uint64_t ifbop_designated_root; - uint64_t ifbop_designated_bridge; - struct timeval ifbop_last_tc_time; + uint8_t ifbop_holdcount; + uint8_t ifbop_maxage; + uint8_t ifbop_hellotime; + uint8_t ifbop_fwddelay; + uint8_t ifbop_protocol; + uint16_t ifbop_priority; + uint16_t ifbop_root_port; + uint32_t ifbop_root_path_cost; + uint64_t ifbop_bridgeid; + uint64_t ifbop_designated_root; + uint64_t ifbop_designated_bridge; + struct timeval ifbop_last_tc_time; }; struct ifbropreq64 { - uint8_t ifbop_holdcount; - uint8_t ifbop_maxage; - uint8_t ifbop_hellotime; - uint8_t ifbop_fwddelay; - uint8_t ifbop_protocol; - uint16_t ifbop_priority; - uint16_t ifbop_root_port; - uint32_t ifbop_root_path_cost; - uint64_t ifbop_bridgeid; - uint64_t ifbop_designated_root; - uint64_t ifbop_designated_bridge; - struct timeval ifbop_last_tc_time; + uint8_t ifbop_holdcount; + uint8_t ifbop_maxage; + uint8_t ifbop_hellotime; + uint8_t ifbop_fwddelay; + uint8_t ifbop_protocol; + uint16_t ifbop_priority; + uint16_t ifbop_root_port; + uint32_t ifbop_root_path_cost; + uint64_t ifbop_bridgeid; + uint64_t ifbop_designated_root; + uint64_t ifbop_designated_bridge; + struct timeval ifbop_last_tc_time; }; #endif @@ -438,12 +438,12 @@ struct ifbropreq64 { #pragma pack(4) struct ifbpstpreq { - uint8_t ifbp_portno; /* bp STP port number */ - uint32_t ifbp_fwd_trans; /* bp STP fwd transitions */ - uint32_t ifbp_design_cost; /* bp STP designated cost */ - uint32_t ifbp_design_port; /* bp STP designated port */ - uint64_t ifbp_design_bridge; /* bp STP designated bridge */ - uint64_t ifbp_design_root; /* bp STP designated root */ + uint8_t ifbp_portno; /* bp STP port number */ + uint32_t ifbp_fwd_trans; /* bp STP fwd transitions */ + uint32_t ifbp_design_cost; /* bp STP designated cost */ + uint32_t ifbp_design_port; /* bp STP designated port */ + uint64_t ifbp_design_bridge; /* bp STP designated bridge */ + uint64_t ifbp_design_root; /* bp STP designated root */ }; #pragma pack() @@ -457,32 +457,32 @@ struct ifbpstpreq { #ifndef XNU_KERNEL_PRIVATE struct ifbpstpconf { - uint32_t ifbpstp_len; /* buffer size */ + uint32_t ifbpstp_len; /* buffer size */ union { - caddr_t ifbpstpu_buf; + caddr_t ifbpstpu_buf; struct ifbpstpreq *ifbpstpu_req; } ifbpstp_ifbpstpu; -#define ifbpstp_buf ifbpstp_ifbpstpu.ifbpstpu_buf -#define ifbpstp_req ifbpstp_ifbpstpu.ifbpstpu_req +#define ifbpstp_buf ifbpstp_ifbpstpu.ifbpstpu_buf +#define ifbpstp_req ifbpstp_ifbpstpu.ifbpstpu_req }; #else /* XNU_KERNEL_PRIVATE */ struct ifbpstpconf32 { - uint32_t ifbpstp_len; /* buffer size */ + uint32_t ifbpstp_len; /* buffer size */ union { - user32_addr_t ifbpstpu_buf; - user32_addr_t ifbpstpu_req; -#define ifbpstp_buf ifbpstp_ifbpstpu.ifbpstpu_buf -#define ifbpstp_req ifbpstp_ifbpstpu.ifbpstpu_req + user32_addr_t ifbpstpu_buf; + user32_addr_t ifbpstpu_req; +#define ifbpstp_buf ifbpstp_ifbpstpu.ifbpstpu_buf +#define ifbpstp_req ifbpstp_ifbpstpu.ifbpstpu_req } ifbpstp_ifbpstpu; }; struct ifbpstpconf64 { - uint32_t ifbpstp_len; /* buffer size */ + uint32_t ifbpstp_len; /* buffer size */ union { - user64_addr_t ifbpstpu_buf; - user64_addr_t ifbpstpu_req; + user64_addr_t ifbpstpu_buf; + user64_addr_t ifbpstpu_req; } ifbpstp_ifbpstpu; }; @@ -494,17 +494,17 @@ struct ifbpstpconf64 { * Bridge member host filter. */ -#define IFBRHF_ENABLED 0x01 -#define IFBRHF_HWSRC 0x02 /* Valid with enabled flags */ -#define IFBRHF_IPSRC 0x04 /* Valid with enabled flags */ +#define IFBRHF_ENABLED 0x01 +#define IFBRHF_HWSRC 0x02 /* Valid with enabled flags */ +#define IFBRHF_IPSRC 0x04 /* Valid with enabled flags */ #pragma pack(4) struct ifbrhostfilter { - uint32_t ifbrhf_flags; /* flags */ - char ifbrhf_ifsname[IFNAMSIZ]; /* member if name */ - uint8_t ifbrhf_hwsrca[ETHER_ADDR_LEN]; - uint32_t ifbrhf_ipsrc; + uint32_t ifbrhf_flags; /* flags */ + char ifbrhf_ifsname[IFNAMSIZ]; /* member if name */ + uint8_t ifbrhf_hwsrca[ETHER_ADDR_LEN]; + uint32_t ifbrhf_ipsrc; }; #pragma pack() @@ -513,42 +513,42 @@ struct ifbrhostfilter { * sysctl net.link.bridge.hostfilterstats */ struct bridge_hostfilter_stats { - uint64_t brhf_bad_ether_type; - uint64_t brhf_bad_ether_srchw_addr; - - uint64_t brhf_ether_too_small; - uint64_t brhf_ether_pullup_failed; - - uint64_t brhf_arp_ok; - uint64_t brhf_arp_too_small; - uint64_t brhf_arp_pullup_failed; - uint64_t brhf_arp_bad_hw_type; - uint64_t brhf_arp_bad_pro_type; - uint64_t brhf_arp_bad_hw_len; - uint64_t brhf_arp_bad_pro_len; - uint64_t brhf_arp_bad_op; - uint64_t brhf_arp_bad_sha; - uint64_t brhf_arp_bad_spa; - - uint64_t brhf_ip_ok; - uint64_t brhf_ip_too_small; - uint64_t brhf_ip_pullup_failed; - uint64_t brhf_ip_bad_srcaddr; - uint64_t brhf_ip_bad_proto; - - uint64_t brhf_dhcp_too_small; - uint64_t brhf_dhcp_bad_op; - uint64_t brhf_dhcp_bad_htype; - uint64_t brhf_dhcp_bad_hlen; - uint64_t brhf_dhcp_bad_chaddr; - uint64_t brhf_dhcp_bad_ciaddr; + uint64_t brhf_bad_ether_type; + uint64_t brhf_bad_ether_srchw_addr; + + uint64_t brhf_ether_too_small; + uint64_t brhf_ether_pullup_failed; + + uint64_t brhf_arp_ok; + uint64_t brhf_arp_too_small; + uint64_t brhf_arp_pullup_failed; + uint64_t brhf_arp_bad_hw_type; + uint64_t brhf_arp_bad_pro_type; + uint64_t brhf_arp_bad_hw_len; + uint64_t brhf_arp_bad_pro_len; + uint64_t brhf_arp_bad_op; + uint64_t brhf_arp_bad_sha; + uint64_t brhf_arp_bad_spa; + + uint64_t brhf_ip_ok; + uint64_t brhf_ip_too_small; + uint64_t brhf_ip_pullup_failed; + uint64_t brhf_ip_bad_srcaddr; + uint64_t brhf_ip_bad_proto; + + uint64_t brhf_dhcp_too_small; + uint64_t brhf_dhcp_bad_op; + uint64_t brhf_dhcp_bad_htype; + uint64_t brhf_dhcp_bad_hlen; + uint64_t brhf_dhcp_bad_chaddr; + uint64_t brhf_dhcp_bad_ciaddr; }; #ifdef XNU_KERNEL_PRIVATE -extern u_int8_t bstp_etheraddr[ETHER_ADDR_LEN]; +extern u_int8_t bstp_etheraddr[ETHER_ADDR_LEN]; -int bridgeattach(int); +int bridgeattach(int); #endif /* XNU_KERNEL_PRIVATE */ #endif /* PRIVATE */ diff --git a/bsd/net/if_dl.h b/bsd/net/if_dl.h index 55d504dc8..58f0a8954 100644 --- a/bsd/net/if_dl.h +++ b/bsd/net/if_dl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -68,7 +68,7 @@ #include #ifdef BSD_KERNEL_PRIVATE -#define DLIL_SDLMAXLEN 64 +#define DLIL_SDLMAXLEN 64 #endif /* BSD_KERNEL_PRIVATE */ /* @@ -93,19 +93,19 @@ * Structure of a Link-Level sockaddr: */ struct sockaddr_dl { - u_char sdl_len; /* Total length of sockaddr */ - u_char sdl_family; /* AF_LINK */ - u_short sdl_index; /* if != 0, system given index for interface */ - u_char sdl_type; /* interface type */ - u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */ - u_char sdl_alen; /* link level address length */ - u_char sdl_slen; /* link layer selector length */ - char sdl_data[12]; /* minimum work area, can be larger; - contains both if name and ll address */ + u_char sdl_len; /* Total length of sockaddr */ + u_char sdl_family; /* AF_LINK */ + u_short sdl_index; /* if != 0, system given index for interface */ + u_char sdl_type; /* interface type */ + u_char sdl_nlen; /* interface name length, no trailing 0 reqd. */ + u_char sdl_alen; /* link level address length */ + u_char sdl_slen; /* link layer selector length */ + char sdl_data[12]; /* minimum work area, can be larger; + * contains both if name and ll address */ #ifndef __APPLE__ /* For TokenRing */ - u_short sdl_rcf; /* source routing control */ - u_short sdl_route[16]; /* source routing information */ + u_short sdl_rcf; /* source routing control */ + u_short sdl_route[16]; /* source routing information */ #endif }; @@ -123,8 +123,8 @@ struct sockaddr_dl { #include __BEGIN_DECLS -void link_addr(const char *, struct sockaddr_dl *); -char *link_ntoa(const struct sockaddr_dl *); +void link_addr(const char *, struct sockaddr_dl *); +char *link_ntoa(const struct sockaddr_dl *); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/net/if_ether.h b/bsd/net/if_ether.h index 4cdc5d28b..b4c716b54 100644 --- a/bsd/net/if_ether.h +++ b/bsd/net/if_ether.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,11 +42,11 @@ extern int ether_family_init(void); * Ethernet Family, these functions will be set for you. Use these * functions when filling out the ifnet_init_params structure. */ -errno_t ether_demux(ifnet_t interface, mbuf_t packet, char* header, +errno_t ether_demux(ifnet_t interface, mbuf_t packet, char* header, protocol_family_t *protocol); -errno_t ether_add_proto(ifnet_t interface, protocol_family_t protocol, +errno_t ether_add_proto(ifnet_t interface, protocol_family_t protocol, const struct ifnet_demux_desc *demux_list, u_int32_t demux_count); -errno_t ether_del_proto(ifnet_t interface, protocol_family_t protocol); +errno_t ether_del_proto(ifnet_t interface, protocol_family_t protocol); #if KPI_INTERFACE_EMBEDDED errno_t ether_frameout(ifnet_t interface, mbuf_t *packet, const struct sockaddr *dest, const char *dest_lladdr, @@ -61,8 +61,8 @@ errno_t ether_frameout_extended(ifnet_t interface, mbuf_t *packet, const struct sockaddr *dest, const char *dest_lladdr, const char *frame_type, u_int32_t *prepend_len, u_int32_t *postpend_len); #endif /* KERNEL_PRIVATE */ -errno_t ether_ioctl(ifnet_t interface, u_int32_t command, void* data); -errno_t ether_check_multi(ifnet_t ifp, const struct sockaddr *multicast); +errno_t ether_ioctl(ifnet_t interface, u_int32_t command, void* data); +errno_t ether_check_multi(ifnet_t ifp, const struct sockaddr *multicast); __END_DECLS diff --git a/bsd/net/if_fake.c b/bsd/net/if_fake.c index 543a0cb81..88bbab707 100644 --- a/bsd/net/if_fake.c +++ b/bsd/net/if_fake.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2015-2018 Apple Inc. All rights reserved. + * Copyright (c) 2015-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -81,64 +81,64 @@ #include #include -#define FAKE_ETHER_NAME "feth" +#define FAKE_ETHER_NAME "feth" SYSCTL_DECL(_net_link); -SYSCTL_NODE(_net_link, OID_AUTO, fake, CTLFLAG_RW|CTLFLAG_LOCKED, 0, - "Fake interface"); +SYSCTL_NODE(_net_link, OID_AUTO, fake, CTLFLAG_RW | CTLFLAG_LOCKED, 0, + "Fake interface"); static int if_fake_txstart = 1; SYSCTL_INT(_net_link_fake, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_txstart, 0, "Fake interface TXSTART mode"); + &if_fake_txstart, 0, "Fake interface TXSTART mode"); static int if_fake_hwcsum = 0; SYSCTL_INT(_net_link_fake, OID_AUTO, hwcsum, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_hwcsum, 0, "Fake interface simulate hardware checksum"); + &if_fake_hwcsum, 0, "Fake interface simulate hardware checksum"); static int if_fake_nxattach = 0; SYSCTL_INT(_net_link_fake, OID_AUTO, nxattach, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_nxattach, 0, "Fake interface auto-attach nexus"); + &if_fake_nxattach, 0, "Fake interface auto-attach nexus"); static int if_fake_bsd_mode = 1; SYSCTL_INT(_net_link_fake, OID_AUTO, bsd_mode, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_bsd_mode, 0, "Fake interface attach as BSD interface"); + &if_fake_bsd_mode, 0, "Fake interface attach as BSD interface"); static int if_fake_debug = 0; SYSCTL_INT(_net_link_fake, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_debug, 0, "Fake interface debug logs"); + &if_fake_debug, 0, "Fake interface debug logs"); static int if_fake_wmm_mode = 0; SYSCTL_INT(_net_link_fake, OID_AUTO, wmm_mode, CTLFLAG_RW | CTLFLAG_LOCKED, - &if_fake_wmm_mode, 0, "Fake interface in 802.11 WMM mode"); + &if_fake_wmm_mode, 0, "Fake interface in 802.11 WMM mode"); /** - ** virtual ethernet structures, types - **/ +** virtual ethernet structures, types +**/ -#define IFF_NUM_TX_RINGS_WMM_MODE 4 -#define IFF_NUM_RX_RINGS_WMM_MODE 1 -#define IFF_MAX_TX_RINGS IFF_NUM_TX_RINGS_WMM_MODE -#define IFF_MAX_RX_RINGS IFF_NUM_RX_RINGS_WMM_MODE +#define IFF_NUM_TX_RINGS_WMM_MODE 4 +#define IFF_NUM_RX_RINGS_WMM_MODE 1 +#define IFF_MAX_TX_RINGS IFF_NUM_TX_RINGS_WMM_MODE +#define IFF_MAX_RX_RINGS IFF_NUM_RX_RINGS_WMM_MODE -typedef uint16_t iff_flags_t; -#define IFF_FLAGS_HWCSUM 0x0001 -#define IFF_FLAGS_BSD_MODE 0x0002 -#define IFF_FLAGS_DETACHING 0x0004 -#define IFF_FLAGS_WMM_MODE 0x0008 +typedef uint16_t iff_flags_t; +#define IFF_FLAGS_HWCSUM 0x0001 +#define IFF_FLAGS_BSD_MODE 0x0002 +#define IFF_FLAGS_DETACHING 0x0004 +#define IFF_FLAGS_WMM_MODE 0x0008 struct if_fake { - char iff_name[IFNAMSIZ]; /* our unique id */ - ifnet_t iff_ifp; - iff_flags_t iff_flags; - uint32_t iff_retain_count; - ifnet_t iff_peer; /* the other end */ - int iff_media_current; - int iff_media_active; - uint32_t iff_media_count; - int iff_media_list[IF_FAKE_MEDIA_LIST_MAX]; - struct mbuf * iff_pending_tx_packet; - boolean_t iff_start_busy; + char iff_name[IFNAMSIZ]; /* our unique id */ + ifnet_t iff_ifp; + iff_flags_t iff_flags; + uint32_t iff_retain_count; + ifnet_t iff_peer; /* the other end */ + int iff_media_current; + int iff_media_active; + uint32_t iff_media_count; + int iff_media_list[IF_FAKE_MEDIA_LIST_MAX]; + struct mbuf * iff_pending_tx_packet; + boolean_t iff_start_busy; }; typedef struct if_fake * if_fake_ref; @@ -146,13 +146,13 @@ typedef struct if_fake * if_fake_ref; static if_fake_ref ifnet_get_if_fake(ifnet_t ifp); -#define FETH_DPRINTF(fmt, ...) \ +#define FETH_DPRINTF(fmt, ...) \ { if (if_fake_debug != 0) printf("%s " fmt, __func__, ## __VA_ARGS__); } static inline boolean_t feth_in_bsd_mode(if_fake_ref fakeif) { - return ((fakeif->iff_flags & IFF_FLAGS_BSD_MODE) != 0); + return (fakeif->iff_flags & IFF_FLAGS_BSD_MODE) != 0; } static inline void @@ -164,7 +164,7 @@ feth_set_detaching(if_fake_ref fakeif) static inline boolean_t feth_is_detaching(if_fake_ref fakeif) { - return ((fakeif->iff_flags & IFF_FLAGS_DETACHING) != 0); + return (fakeif->iff_flags & IFF_FLAGS_DETACHING) != 0; } static int @@ -172,38 +172,39 @@ feth_enable_dequeue_stall(ifnet_t ifp, uint32_t enable) { int error; - if (enable != 0) + if (enable != 0) { error = ifnet_disable_output(ifp); - else + } else { error = ifnet_enable_output(ifp); + } - return (error); + return error; } -#define FETH_MAXUNIT IF_MAXUNIT -#define FETH_ZONE_MAX_ELEM MIN(IFNETS_MAX, FETH_MAXUNIT) -#define M_FAKE M_DEVBUF +#define FETH_MAXUNIT IF_MAXUNIT +#define FETH_ZONE_MAX_ELEM MIN(IFNETS_MAX, FETH_MAXUNIT) +#define M_FAKE M_DEVBUF -static int feth_clone_create(struct if_clone *, u_int32_t, void *); -static int feth_clone_destroy(ifnet_t); -static int feth_output(ifnet_t ifp, struct mbuf *m); -static void feth_start(ifnet_t ifp); -static int feth_ioctl(ifnet_t ifp, u_long cmd, void * addr); -static int feth_config(ifnet_t ifp, ifnet_t peer); -static void feth_if_free(ifnet_t ifp); -static void feth_ifnet_set_attrs(if_fake_ref fakeif, ifnet_t ifp); -static void feth_free(if_fake_ref fakeif); +static int feth_clone_create(struct if_clone *, u_int32_t, void *); +static int feth_clone_destroy(ifnet_t); +static int feth_output(ifnet_t ifp, struct mbuf *m); +static void feth_start(ifnet_t ifp); +static int feth_ioctl(ifnet_t ifp, u_long cmd, void * addr); +static int feth_config(ifnet_t ifp, ifnet_t peer); +static void feth_if_free(ifnet_t ifp); +static void feth_ifnet_set_attrs(if_fake_ref fakeif, ifnet_t ifp); +static void feth_free(if_fake_ref fakeif); static struct if_clone -feth_cloner = IF_CLONE_INITIALIZER(FAKE_ETHER_NAME, + feth_cloner = IF_CLONE_INITIALIZER(FAKE_ETHER_NAME, feth_clone_create, feth_clone_destroy, 0, FETH_MAXUNIT, FETH_ZONE_MAX_ELEM, sizeof(struct if_fake)); -static void interface_link_event(ifnet_t ifp, u_int32_t event_code); +static void interface_link_event(ifnet_t ifp, u_int32_t event_code); /* some media words to pretend to be ethernet */ static int default_media_words[] = { @@ -211,43 +212,53 @@ static int default_media_words[] = { IFM_MAKEWORD(IFM_ETHER, IFM_10G_T, IFM_FDX, 0), IFM_MAKEWORD(IFM_ETHER, IFM_2500_T, IFM_FDX, 0), IFM_MAKEWORD(IFM_ETHER, IFM_5000_T, IFM_FDX, 0), + + IFM_MAKEWORD(IFM_ETHER, IFM_10G_KX4, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_20G_KR2, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_2500_SX, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_25G_KR, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_40G_SR4, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_50G_CR2, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_56G_R4, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_100G_CR4, IFM_FDX, 0), + IFM_MAKEWORD(IFM_ETHER, IFM_400G_AUI8, IFM_FDX, 0), }; -#define default_media_words_count (sizeof(default_media_words) \ - / sizeof (default_media_words[0])) +#define default_media_words_count (sizeof(default_media_words) \ + / sizeof (default_media_words[0])) /** - ** veth locks - **/ +** veth locks +**/ static inline lck_grp_t * my_lck_grp_alloc_init(const char * grp_name) { - lck_grp_t * grp; - lck_grp_attr_t * grp_attrs; - + lck_grp_t * grp; + lck_grp_attr_t * grp_attrs; + grp_attrs = lck_grp_attr_alloc_init(); grp = lck_grp_alloc_init(grp_name, grp_attrs); lck_grp_attr_free(grp_attrs); - return (grp); + return grp; } static inline lck_mtx_t * my_lck_mtx_alloc_init(lck_grp_t * lck_grp) { - lck_attr_t * lck_attrs; - lck_mtx_t * lck_mtx; + lck_attr_t * lck_attrs; + lck_mtx_t * lck_mtx; lck_attrs = lck_attr_alloc_init(); lck_mtx = lck_mtx_alloc_init(lck_grp, lck_attrs); lck_attr_free(lck_attrs); - return (lck_mtx); + return lck_mtx; } -static lck_mtx_t * feth_lck_mtx; +static lck_mtx_t * feth_lck_mtx; static inline void feth_lock_init(void) { - lck_grp_t * feth_lck_grp; + lck_grp_t * feth_lck_grp; feth_lck_grp = my_lck_grp_alloc_init("fake"); feth_lck_mtx = my_lck_mtx_alloc_init(feth_lck_grp); @@ -280,9 +291,9 @@ static inline int feth_max_mtu(void) { if (njcl > 0) { - return (M16KCLBYTES - ETHER_HDR_LEN); + return M16KCLBYTES - ETHER_HDR_LEN; } - return (MBIGCLBYTES - ETHER_HDR_LEN); + return MBIGCLBYTES - ETHER_HDR_LEN; } static void @@ -302,7 +313,7 @@ feth_free(if_fake_ref fakeif) static void feth_release(if_fake_ref fakeif) { - u_int32_t old_retain_count; + u_int32_t old_retain_count; old_retain_count = OSDecrementAtomic(&fakeif->iff_retain_count); switch (old_retain_count) { @@ -320,8 +331,8 @@ feth_release(if_fake_ref fakeif) /** - ** feth interface routines - **/ +** feth interface routines +**/ static void feth_ifnet_set_attrs(if_fake_ref fakeif, ifnet_t ifp) { @@ -330,8 +341,8 @@ feth_ifnet_set_attrs(if_fake_ref fakeif, ifnet_t ifp) ifnet_set_baudrate(ifp, 0); ifnet_set_mtu(ifp, ETHERMTU); ifnet_set_flags(ifp, - IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX, - 0xffff); + IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX, + 0xffff); ifnet_set_hdrlen(ifp, sizeof(struct ether_header)); if ((fakeif->iff_flags & IFF_FLAGS_HWCSUM) != 0) { ifnet_set_offload(ifp, @@ -346,9 +357,9 @@ static void interface_link_event(ifnet_t ifp, u_int32_t event_code) { struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; + struct kern_event_msg header; + u_int32_t unit; + char if_name[IFNAMSIZ]; } event; bzero(&event, sizeof(event)); @@ -367,24 +378,24 @@ interface_link_event(ifnet_t ifp, u_int32_t event_code) static if_fake_ref ifnet_get_if_fake(ifnet_t ifp) { - return ((if_fake_ref)ifnet_softc(ifp)); + return (if_fake_ref)ifnet_softc(ifp); } static int feth_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) { - int error; - if_fake_ref fakeif; - struct ifnet_init_eparams feth_init; - ifnet_t ifp; - uint8_t mac_address[ETHER_ADDR_LEN]; + int error; + if_fake_ref fakeif; + struct ifnet_init_eparams feth_init; + ifnet_t ifp; + uint8_t mac_address[ETHER_ADDR_LEN]; fakeif = if_clone_softc_allocate(&feth_cloner); if (fakeif == NULL) { return ENOBUFS; } fakeif->iff_retain_count = 1; -#define FAKE_ETHER_NAME_LEN (sizeof(FAKE_ETHER_NAME) - 1) +#define FAKE_ETHER_NAME_LEN (sizeof(FAKE_ETHER_NAME) - 1) _CASSERT(FAKE_ETHER_NAME_LEN == 4); bcopy(FAKE_ETHER_NAME, mac_address, FAKE_ETHER_NAME_LEN); mac_address[ETHER_ADDR_LEN - 2] = (unit & 0xff00) >> 8; @@ -399,13 +410,13 @@ feth_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) /* use the interface name as the unique id for ifp recycle */ if ((unsigned int) snprintf(fakeif->iff_name, sizeof(fakeif->iff_name), "%s%d", - ifc->ifc_name, unit) >= sizeof(fakeif->iff_name)) { + ifc->ifc_name, unit) >= sizeof(fakeif->iff_name)) { feth_release(fakeif); - return (EINVAL); + return EINVAL; } bzero(&feth_init, sizeof(feth_init)); feth_init.ver = IFNET_INIT_CURRENT_VERSION; - feth_init.len = sizeof (feth_init); + feth_init.len = sizeof(feth_init); if (feth_in_bsd_mode(fakeif)) { if (if_fake_txstart != 0) { feth_init.start = feth_start; @@ -438,40 +449,40 @@ feth_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) error = ifnet_allocate_extended(&feth_init, &ifp); if (error) { feth_release(fakeif); - return (error); + return error; } feth_ifnet_set_attrs(fakeif, ifp); } - fakeif->iff_media_count = default_media_words_count; + fakeif->iff_media_count = MIN(default_media_words_count, IF_FAKE_MEDIA_LIST_MAX); bcopy(default_media_words, fakeif->iff_media_list, - sizeof(default_media_words)); + fakeif->iff_media_count * sizeof(fakeif->iff_media_list[0])); if (feth_in_bsd_mode(fakeif)) { error = ifnet_attach(ifp, NULL); if (error) { ifnet_release(ifp); feth_release(fakeif); - return (error); + return error; } fakeif->iff_ifp = ifp; } ifnet_set_lladdr(ifp, mac_address, sizeof(mac_address)); - + /* attach as ethernet */ bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); - return (0); + return 0; } static int feth_clone_destroy(ifnet_t ifp) { - if_fake_ref fakeif; + if_fake_ref fakeif; feth_lock(); fakeif = ifnet_get_if_fake(ifp); if (fakeif == NULL || feth_is_detaching(fakeif)) { feth_unlock(); - return (0); + return 0; } feth_set_detaching(fakeif); feth_unlock(); @@ -494,12 +505,12 @@ feth_enqueue_input(ifnet_t ifp, struct mbuf * m) static struct mbuf * copy_mbuf(struct mbuf *m) { - struct mbuf * copy_m; - uint32_t pkt_len; - uint32_t offset; + struct mbuf * copy_m; + uint32_t pkt_len; + uint32_t offset; if ((m->m_flags & M_PKTHDR) == 0) { - return (NULL); + return NULL; } pkt_len = m->m_pkthdr.len; MGETHDR(copy_m, M_DONTWAIT, MT_DATA); @@ -515,7 +526,7 @@ copy_mbuf(struct mbuf *m) copy_m = m_m16kget(copy_m, M_DONTWAIT); } else { printf("if_fake: copy_mbuf(): packet too large %d\n", - pkt_len); + pkt_len); goto failed; } if (copy_m == NULL || (copy_m->m_flags & M_EXT) == 0) { @@ -527,39 +538,39 @@ copy_mbuf(struct mbuf *m) copy_m->m_pkthdr.pkt_svc = m->m_pkthdr.pkt_svc; offset = 0; while (m != NULL && offset < pkt_len) { - uint32_t frag_len; + uint32_t frag_len; frag_len = m->m_len; if (frag_len > (pkt_len - offset)) { printf("if_fake_: Large mbuf fragment %d > %d\n", - frag_len, (pkt_len - offset)); + frag_len, (pkt_len - offset)); goto failed; } m_copydata(m, 0, frag_len, mtod(copy_m, void *) + offset); offset += frag_len; m = m->m_next; } - return (copy_m); + return copy_m; - failed: +failed: if (copy_m != NULL) { m_freem(copy_m); } - return (NULL); + return NULL; } static void feth_output_common(ifnet_t ifp, struct mbuf * m, ifnet_t peer, - iff_flags_t flags) + iff_flags_t flags) { - void * frame_header; + void * frame_header; frame_header = mbuf_data(m); if ((flags & IFF_FLAGS_HWCSUM) != 0) { m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags = - CSUM_DATA_VALID | CSUM_PSEUDO_HDR | - CSUM_IP_CHECKED | CSUM_IP_VALID; + CSUM_DATA_VALID | CSUM_PSEUDO_HDR | + CSUM_IP_CHECKED | CSUM_IP_VALID; } (void)ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); @@ -567,23 +578,23 @@ feth_output_common(ifnet_t ifp, struct mbuf * m, ifnet_t peer, (void)mbuf_pkthdr_setrcvif(m, peer); mbuf_pkthdr_setheader(m, frame_header); - mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN); + mbuf_pkthdr_adjustlen(m, -ETHER_HDR_LEN); (void)mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN, - mbuf_len(m) - ETHER_HDR_LEN); + mbuf_len(m) - ETHER_HDR_LEN); bpf_tap_in(peer, DLT_EN10MB, m, frame_header, - sizeof(struct ether_header)); + sizeof(struct ether_header)); feth_enqueue_input(peer, m); } static void feth_start(ifnet_t ifp) { - struct mbuf * copy_m = NULL; - if_fake_ref fakeif; - iff_flags_t flags = 0; - ifnet_t peer = NULL; - struct mbuf * m; - struct mbuf * save_m; + struct mbuf * copy_m = NULL; + if_fake_ref fakeif; + iff_flags_t flags = 0; + ifnet_t peer = NULL; + struct mbuf * m; + struct mbuf * save_m; feth_lock(); fakeif = ifnet_get_if_fake(ifp); @@ -655,13 +666,13 @@ feth_start(ifnet_t ifp) static int feth_output(ifnet_t ifp, struct mbuf * m) { - struct mbuf * copy_m; - if_fake_ref fakeif; - iff_flags_t flags; - ifnet_t peer = NULL; + struct mbuf * copy_m; + if_fake_ref fakeif; + iff_flags_t flags; + ifnet_t peer = NULL; if (m == NULL) { - return (0); + return 0; } copy_m = copy_mbuf(m); m_freem(m); @@ -669,7 +680,7 @@ feth_output(ifnet_t ifp, struct mbuf * m) if (copy_m == NULL) { /* count this as an output error */ ifnet_stat_increment_out(ifp, 0, 0, 1); - return (0); + return 0; } feth_lock(); fakeif = ifnet_get_if_fake(ifp); @@ -681,19 +692,19 @@ feth_output(ifnet_t ifp, struct mbuf * m) if (peer == NULL) { m_freem(copy_m); ifnet_stat_increment_out(ifp, 0, 0, 1); - return (0); + return 0; } feth_output_common(ifp, copy_m, peer, flags); - return (0); + return 0; } static int feth_config(ifnet_t ifp, ifnet_t peer) { - int connected = FALSE; - int disconnected = FALSE; - int error = 0; - if_fake_ref fakeif = NULL; + int connected = FALSE; + int disconnected = FALSE; + int error = 0; + if_fake_ref fakeif = NULL; feth_lock(); fakeif = ifnet_get_if_fake(ifp); @@ -703,7 +714,7 @@ feth_config(ifnet_t ifp, ifnet_t peer) } if (peer != NULL) { /* connect to peer */ - if_fake_ref peer_fakeif; + if_fake_ref peer_fakeif; peer_fakeif = ifnet_get_if_fake(peer); if (peer_fakeif == NULL) { @@ -720,10 +731,9 @@ feth_config(ifnet_t ifp, ifnet_t peer) fakeif->iff_peer = peer; peer_fakeif->iff_peer = ifp; connected = TRUE; - } - else if (fakeif->iff_peer != NULL) { + } else if (fakeif->iff_peer != NULL) { /* disconnect from peer */ - if_fake_ref peer_fakeif; + if_fake_ref peer_fakeif; peer = fakeif->iff_peer; peer_fakeif = ifnet_get_if_fake(peer); @@ -737,30 +747,29 @@ feth_config(ifnet_t ifp, ifnet_t peer) disconnected = TRUE; } - done: +done: feth_unlock(); /* generate link status event if we connect or disconnect */ if (connected) { interface_link_event(ifp, KEV_DL_LINK_ON); interface_link_event(peer, KEV_DL_LINK_ON); - } - else if (disconnected) { + } else if (disconnected) { interface_link_event(ifp, KEV_DL_LINK_OFF); interface_link_event(peer, KEV_DL_LINK_OFF); } - return (error); + return error; } static int feth_set_media(ifnet_t ifp, struct if_fake_request * iffr) { - if_fake_ref fakeif; - int error; + if_fake_ref fakeif; + int error; if (iffr->iffr_media.iffm_count > IF_FAKE_MEDIA_LIST_MAX) { /* list is too long */ - return (EINVAL); + return EINVAL; } feth_lock(); fakeif = ifnet_get_if_fake(ifp); @@ -770,23 +779,23 @@ feth_set_media(ifnet_t ifp, struct if_fake_request * iffr) } fakeif->iff_media_count = iffr->iffr_media.iffm_count; bcopy(iffr->iffr_media.iffm_list, fakeif->iff_media_list, - iffr->iffr_media.iffm_count * sizeof(fakeif->iff_media_list[0])); + iffr->iffr_media.iffm_count * sizeof(fakeif->iff_media_list[0])); #if 0 /* XXX: "auto-negotiate" active with peer? */ /* generate link status event? */ fakeif->iff_media_current = iffr->iffr_media.iffm_current; #endif error = 0; - done: +done: feth_unlock(); - return (error); + return error; } static int -if_fake_request_copyin(user_addr_t user_addr, - struct if_fake_request *iffr, u_int32_t len) +if_fake_request_copyin(user_addr_t user_addr, + struct if_fake_request *iffr, u_int32_t len) { - int error; + int error; if (user_addr == USER_ADDR_NULL || len < sizeof(*iffr)) { error = EINVAL; @@ -801,17 +810,17 @@ if_fake_request_copyin(user_addr_t user_addr, error = EINVAL; goto done; } - done: - return (error); +done: + return error; } static int feth_set_drvspec(ifnet_t ifp, uint32_t cmd, u_int32_t len, - user_addr_t user_addr) + user_addr_t user_addr) { - int error; - struct if_fake_request iffr; - ifnet_t peer; + int error; + struct if_fake_request iffr; + ifnet_t peer; switch (cmd) { case IF_FAKE_S_CMD_SET_PEER: @@ -860,17 +869,17 @@ feth_set_drvspec(ifnet_t ifp, uint32_t cmd, u_int32_t len, error = EOPNOTSUPP; break; } - return (error); + return error; } static int feth_get_drvspec(ifnet_t ifp, u_int32_t cmd, u_int32_t len, - user_addr_t user_addr) + user_addr_t user_addr) { - int error = EOPNOTSUPP; - if_fake_ref fakeif; - struct if_fake_request iffr; - ifnet_t peer; + int error = EOPNOTSUPP; + if_fake_ref fakeif; + struct if_fake_request iffr; + ifnet_t peer; switch (cmd) { case IF_FAKE_G_CMD_GET_PEER: @@ -890,38 +899,38 @@ feth_get_drvspec(ifnet_t ifp, u_int32_t cmd, u_int32_t len, bzero(&iffr, sizeof(iffr)); if (peer != NULL) { strlcpy(iffr.iffr_peer_name, - if_name(peer), - sizeof(iffr.iffr_peer_name)); + if_name(peer), + sizeof(iffr.iffr_peer_name)); } error = copyout(&iffr, user_addr, sizeof(iffr)); break; default: break; } - return (error); + return error; } union ifdrvu { - struct ifdrv32 *ifdrvu_32; - struct ifdrv64 *ifdrvu_64; - void *ifdrvu_p; + struct ifdrv32 *ifdrvu_32; + struct ifdrv64 *ifdrvu_64; + void *ifdrvu_p; }; static int feth_ioctl(ifnet_t ifp, u_long cmd, void * data) { - unsigned int count; - struct ifdevmtu * devmtu_p; - union ifdrvu drv; - uint32_t drv_cmd; - uint32_t drv_len; - boolean_t drv_set_command = FALSE; - int error = 0; - struct ifmediareq * ifmr; - struct ifreq * ifr; - if_fake_ref fakeif; - int status; - user_addr_t user_addr; + unsigned int count; + struct ifdevmtu * devmtu_p; + union ifdrvu drv; + uint32_t drv_cmd; + uint32_t drv_len; + boolean_t drv_set_command = FALSE; + int error = 0; + struct ifmediareq * ifmr; + struct ifreq * ifr; + if_fake_ref fakeif; + int status; + user_addr_t user_addr; ifr = (struct ifreq *)data; switch (cmd) { @@ -935,14 +944,14 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) fakeif = (if_fake_ref)ifnet_softc(ifp); if (fakeif == NULL) { feth_unlock(); - return (EOPNOTSUPP); + return EOPNOTSUPP; } status = (fakeif->iff_peer != NULL) ? (IFM_AVALID | IFM_ACTIVE) : IFM_AVALID; ifmr = (struct ifmediareq *)data; user_addr = (cmd == SIOCGIFMEDIA64) ? - ((struct ifmediareq64 *)ifmr)->ifmu_ulist : - CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); + ((struct ifmediareq64 *)ifmr)->ifmu_ulist : + CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); count = ifmr->ifm_count; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_current = IFM_ETHER; @@ -950,14 +959,13 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) ifmr->ifm_status = status; if (user_addr == USER_ADDR_NULL) { ifmr->ifm_count = fakeif->iff_media_count; - } - else if (count > 0) { + } else if (count > 0) { if (count > fakeif->iff_media_count) { count = fakeif->iff_media_count; } ifmr->ifm_count = count; error = copyout(&fakeif->iff_media_list, user_addr, - count * sizeof(int)); + count * sizeof(int)); } feth_unlock(); break; @@ -984,7 +992,7 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) break; } drv_set_command = TRUE; - /* FALL THROUGH */ + /* FALL THROUGH */ case SIOCGDRVSPEC32: case SIOCGDRVSPEC64: drv.ifdrvu_p = data; @@ -992,7 +1000,6 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) drv_cmd = drv.ifdrvu_32->ifd_cmd; drv_len = drv.ifdrvu_32->ifd_len; user_addr = CAST_USER_ADDR_T(drv.ifdrvu_32->ifd_data); - } else { drv_cmd = drv.ifdrvu_64->ifd_cmd; drv_len = drv.ifdrvu_64->ifd_len; @@ -1000,10 +1007,10 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) } if (drv_set_command) { error = feth_set_drvspec(ifp, drv_cmd, drv_len, - user_addr); + user_addr); } else { error = feth_get_drvspec(ifp, drv_cmd, drv_len, - user_addr); + user_addr); } break; @@ -1037,10 +1044,10 @@ feth_ioctl(ifnet_t ifp, u_long cmd, void * data) return error; } -static void +static void feth_if_free(ifnet_t ifp) { - if_fake_ref fakeif; + if_fake_ref fakeif; if (ifp == NULL) { return; diff --git a/bsd/net/if_fake_var.h b/bsd/net/if_fake_var.h index 1b8aaf4b1..33bff4fe2 100644 --- a/bsd/net/if_fake_var.h +++ b/bsd/net/if_fake_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _NET_IF_FAKE_VAR_H_ -#define _NET_IF_FAKE_VAR_H_ 1 +#define _NET_IF_FAKE_VAR_H_ 1 #include @@ -40,44 +40,44 @@ if_fake_init(void); * SIOCSDRVSPEC */ enum { - IF_FAKE_S_CMD_NONE = 0, - IF_FAKE_S_CMD_SET_PEER = 1, - IF_FAKE_S_CMD_SET_MEDIA = 2, - IF_FAKE_S_CMD_SET_DEQUEUE_STALL = 3, + IF_FAKE_S_CMD_NONE = 0, + IF_FAKE_S_CMD_SET_PEER = 1, + IF_FAKE_S_CMD_SET_MEDIA = 2, + IF_FAKE_S_CMD_SET_DEQUEUE_STALL = 3, }; /* * SIOCGDRVSPEC */ enum { - IF_FAKE_G_CMD_NONE = 0, - IF_FAKE_G_CMD_GET_PEER = 1, + IF_FAKE_G_CMD_NONE = 0, + IF_FAKE_G_CMD_GET_PEER = 1, }; - -#define IF_FAKE_MEDIA_LIST_MAX 27 + +#define IF_FAKE_MEDIA_LIST_MAX 27 struct if_fake_media { - int32_t iffm_current; - uint32_t iffm_count; - uint32_t iffm_reserved[3]; - int32_t iffm_list[IF_FAKE_MEDIA_LIST_MAX]; + int32_t iffm_current; + uint32_t iffm_count; + uint32_t iffm_reserved[3]; + int32_t iffm_list[IF_FAKE_MEDIA_LIST_MAX]; }; struct if_fake_request { - uint64_t iffr_reserved[4]; + uint64_t iffr_reserved[4]; union { - char iffru_buf[128]; /* stable size */ - struct if_fake_media iffru_media; - char iffru_peer_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + char iffru_buf[128]; /* stable size */ + struct if_fake_media iffru_media; + char iffru_peer_name[IFNAMSIZ]; /* if name, e.g. "en0" */ /* * control dequeue stall. 0: disable dequeue stall, else * enable dequeue stall. */ - uint32_t iffru_dequeue_stall; + uint32_t iffru_dequeue_stall; } iffr_u; -#define iffr_peer_name iffr_u.iffru_peer_name -#define iffr_media iffr_u.iffru_media -#define iffr_dequeue_stall iffr_u.iffru_dequeue_stall +#define iffr_peer_name iffr_u.iffru_peer_name +#define iffr_media iffr_u.iffru_media +#define iffr_dequeue_stall iffr_u.iffru_dequeue_stall }; #endif /* _NET_IF_FAKE_VAR_H_ */ diff --git a/bsd/net/if_gif.c b/bsd/net/if_gif.c index e0caf004b..0ada425b6 100644 --- a/bsd/net/if_gif.c +++ b/bsd/net/if_gif.c @@ -88,11 +88,11 @@ #include #include #include -#if INET +#if INET #include #include #include -#endif /* INET */ +#endif /* INET */ #if INET6 #include @@ -112,18 +112,18 @@ #include #endif -#define GIFNAME "gif" -#define GIFDEV "if_gif" +#define GIFNAME "gif" +#define GIFDEV "if_gif" -#define GIF_MAXUNIT IF_MAXUNIT -#define GIF_ZONE_MAX_ELEM MIN(IFNETS_MAX, GIF_MAXUNIT) +#define GIF_MAXUNIT IF_MAXUNIT +#define GIF_ZONE_MAX_ELEM MIN(IFNETS_MAX, GIF_MAXUNIT) /* gif lock variables */ -static lck_grp_t *gif_mtx_grp; -static lck_grp_attr_t *gif_mtx_grp_attr; -static lck_attr_t *gif_mtx_attr; +static lck_grp_t *gif_mtx_grp; +static lck_grp_attr_t *gif_mtx_grp_attr; +static lck_attr_t *gif_mtx_attr; decl_lck_mtx_data(static, gif_mtx_data); -static lck_mtx_t *gif_mtx = &gif_mtx_data; +static lck_mtx_t *gif_mtx = &gif_mtx_data; TAILQ_HEAD(gifhead, gif_softc) gifs = TAILQ_HEAD_INITIALIZER(gifs); @@ -133,28 +133,28 @@ static errno_t gif_input(ifnet_t ifp, protocol_family_t protocol_family, mbuf_t m, char *frame_header); static errno_t gif_ioctl(ifnet_t ifp, u_long cmd, void *data); -static int ngif = 0; /* number of interfaces */ +static int ngif = 0; /* number of interfaces */ #if INET static struct protosw in_gif_protosw = { - .pr_type = SOCK_RAW, - .pr_protocol = 0, /* IPPROTO_IPV[46] */ - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = in_gif_input, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, + .pr_type = SOCK_RAW, + .pr_protocol = 0, /* IPPROTO_IPV[46] */ + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = in_gif_input, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, }; #endif #if INET6 static struct ip6protosw in6_gif_protosw = { - .pr_type = SOCK_RAW, - .pr_protocol = 0, /* IPPROTO_IPV[46] */ - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = in6_gif_input, - .pr_usrreqs = &rip6_usrreqs, - .pr_unlock = rip_unlock, + .pr_type = SOCK_RAW, + .pr_protocol = 0, /* IPPROTO_IPV[46] */ + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = in6_gif_input, + .pr_usrreqs = &rip6_usrreqs, + .pr_unlock = rip_unlock, }; #endif @@ -166,7 +166,7 @@ static void gif_detach(struct ifnet *); static struct if_clone gif_cloner = IF_CLONE_INITIALIZER(GIFNAME, gif_clone_create, gif_clone_destroy, - 0, GIF_MAXUNIT, GIF_ZONE_MAX_ELEM, sizeof(struct gif_softc)); + 0, GIF_MAXUNIT, GIF_ZONE_MAX_ELEM, sizeof(struct gif_softc)); /* * Theory of operation: initially, one gif interface is created. * Any time a gif interface is configured, if there are no other @@ -192,7 +192,7 @@ gif_demux( *protocol_family = sc->gif_proto; GIF_UNLOCK(sc); - return (0); + return 0; } static errno_t @@ -206,14 +206,15 @@ gif_add_proto( struct gif_softc *sc = ifnet_softc(ifp); GIF_LOCK(sc); - if (sc->gif_proto != 0) + if (sc->gif_proto != 0) { printf("gif_add_proto: request add_proto for gif%d\n", ifnet_unit(ifp)); + } sc->gif_proto = protocol_family; GIF_UNLOCK(sc); - return (0); + return 0; } static errno_t @@ -224,11 +225,12 @@ gif_del_proto( struct gif_softc *sc = ifnet_softc(ifp); GIF_LOCK(sc); - if (sc->gif_proto == protocol_family) + if (sc->gif_proto == protocol_family) { sc->gif_proto = 0; + } GIF_UNLOCK(sc); - return (0); + return 0; } /* Glue code to attach inet to a gif interface through DLIL */ @@ -240,7 +242,7 @@ gif_attach_proto_family( struct ifnet_attach_proto_param reg; errno_t stat; - bzero(®, sizeof (reg)); + bzero(®, sizeof(reg)); reg.input = gif_input; stat = ifnet_attach_protocol(ifp, protocol_family, ®); @@ -249,7 +251,7 @@ gif_attach_proto_family( fam=%d\n", protocol_family); } - return (stat); + return stat; } /* Function to setup the first gif interface */ @@ -270,19 +272,22 @@ gif_init(void) /* Register protocol registration functions */ result = proto_register_plumber(PF_INET, APPLE_IF_FAM_GIF, gif_attach_proto_family, NULL); - if (result != 0) + if (result != 0) { printf("proto_register_plumber failed for AF_INET error=%d\n", result); + } result = proto_register_plumber(PF_INET6, APPLE_IF_FAM_GIF, gif_attach_proto_family, NULL); - if (result != 0) + if (result != 0) { printf("proto_register_plumber failed for AF_INET6 error=%d\n", result); + } result = if_clone_attach(&gif_cloner); - if (result != 0) + if (result != 0) { panic("%s: if_clone_attach() failed, error %d\n", __func__, result); + } gif_clone_create(&gif_cloner, 0, NULL); } @@ -300,7 +305,7 @@ gif_set_bpf_tap( sc->tap_callback = callback; GIF_UNLOCK(sc); - return (0); + return 0; } static void @@ -337,14 +342,14 @@ gif_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params) } /* use the interface name as the unique id for ifp recycle */ - snprintf(sc->gif_ifname, sizeof (sc->gif_ifname), "%s%d", + snprintf(sc->gif_ifname, sizeof(sc->gif_ifname), "%s%d", ifc->ifc_name, unit); lck_mtx_init(&sc->gif_lock, gif_mtx_grp, gif_mtx_attr); - bzero(&gif_init_params, sizeof (gif_init_params)); + bzero(&gif_init_params, sizeof(gif_init_params)); gif_init_params.ver = IFNET_INIT_CURRENT_VERSION; - gif_init_params.len = sizeof (gif_init_params); + gif_init_params.len = sizeof(gif_init_params); gif_init_params.flags = IFNET_INIT_LEGACY; gif_init_params.uniqueid = sc->gif_ifname; gif_init_params.uniqueid_len = strlen(sc->gif_ifname); @@ -372,7 +377,7 @@ gif_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params) sc->encap_cookie4 = sc->encap_cookie6 = NULL; #if INET sc->encap_cookie4 = encap_attach_func(AF_INET, -1, - gif_encapcheck, &in_gif_protosw, sc); + gif_encapcheck, &in_gif_protosw, sc); if (sc->encap_cookie4 == NULL) { printf("%s: unable to attach encap4\n", if_name(sc->gif_if)); ifnet_release(sc->gif_if); @@ -422,14 +427,14 @@ gif_clone_create(struct if_clone *ifc, uint32_t unit, __unused void *params) #if CONFIG_MACF_NET mac_ifnet_label_init(&sc->gif_if); #endif - bpfattach(sc->gif_if, DLT_NULL, sizeof (u_int)); + bpfattach(sc->gif_if, DLT_NULL, sizeof(u_int)); sc->gif_flags &= ~IFGIF_DETACHING; TAILQ_INSERT_TAIL(&gifs, sc, gif_link); ngif++; done: lck_mtx_unlock(gif_mtx); - return (error); + return error; } static int @@ -437,6 +442,8 @@ gif_remove(struct ifnet *ifp) { int error = 0; struct gif_softc *sc = NULL; + const struct encaptab *encap_cookie4 = NULL; + const struct encaptab *encap_cookie6 = NULL; lck_mtx_lock(gif_mtx); sc = ifp->if_softc; @@ -458,25 +465,30 @@ gif_remove(struct ifnet *ifp) gif_delete_tunnel(sc); #ifdef INET6 - if (sc->encap_cookie6 != NULL) { - error = encap_detach(sc->encap_cookie6); - KASSERT(error == 0, ("gif_clone_destroy: Unexpected " - "error detaching encap_cookie6")); - } + encap_cookie6 = sc->encap_cookie6; #endif #ifdef INET - if (sc->encap_cookie4 != NULL) { - error = encap_detach(sc->encap_cookie4); - KASSERT(error == 0, ("gif_clone_destroy: Unexpected " - "error detaching encap_cookie4")); - } + encap_cookie4 = sc->encap_cookie4; #endif done: - if (sc != NULL) + if (sc != NULL) { GIF_UNLOCK(sc); + } lck_mtx_unlock(gif_mtx); - return (error); + if (encap_cookie6 != NULL) { + error = encap_detach(encap_cookie6); + KASSERT(error == 0, ("gif_clone_destroy: Unexpected " + "error detaching encap_cookie6")); + } + + if (encap_cookie4 != NULL) { + error = encap_detach(encap_cookie4); + KASSERT(error == 0, ("gif_clone_destroy: Unexpected " + "error detaching encap_cookie4")); + } + + return error; } static int @@ -487,7 +499,7 @@ gif_clone_destroy(struct ifnet *ifp) error = gif_remove(ifp); if (error != 0) { printf("gif_clone_destroy: gif remove failed %d\n", error); - return (error); + return error; } error = ifnet_set_flags(ifp, 0, IFF_UP); @@ -496,10 +508,11 @@ gif_clone_destroy(struct ifnet *ifp) } error = ifnet_detach(ifp); - if (error != 0) + if (error != 0) { panic("gif_clone_destroy: ifnet_detach(%p) failed %d\n", ifp, error); - return (0); + } + return 0; } static int @@ -514,16 +527,19 @@ gif_encapcheck( struct gif_softc *sc; sc = (struct gif_softc *)arg; - if (sc == NULL) - return (error); + if (sc == NULL) { + return error; + } GIF_LOCK(sc); - if ((ifnet_flags(sc->gif_if) & IFF_UP) == 0) + if ((ifnet_flags(sc->gif_if) & IFF_UP) == 0) { goto done; + } /* no physical address */ - if (!sc->gif_psrc || !sc->gif_pdst) + if (!sc->gif_psrc || !sc->gif_pdst) { goto done; + } switch (proto) { #if INET @@ -538,21 +554,23 @@ gif_encapcheck( goto done; } - mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof (ip), &ip); + mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof(ip), &ip); switch (ip.ip_v) { #if INET case 4: if (sc->gif_psrc->sa_family != AF_INET || - sc->gif_pdst->sa_family != AF_INET) + sc->gif_pdst->sa_family != AF_INET) { goto done; + } error = gif_encapcheck4(m, off, proto, arg); #endif #if INET6 case 6: if (sc->gif_psrc->sa_family != AF_INET6 || - sc->gif_pdst->sa_family != AF_INET6) + sc->gif_pdst->sa_family != AF_INET6) { goto done; + } error = gif_encapcheck6(m, off, proto, arg); #endif default: @@ -560,7 +578,7 @@ gif_encapcheck( } done: GIF_UNLOCK(sc); - return (error); + return error; } static errno_t @@ -583,16 +601,16 @@ gif_output( * because there is no guaruntee that we won't be called * concurrently from more than one thread. */ - m->m_flags &= ~(M_BCAST|M_MCAST); + m->m_flags &= ~(M_BCAST | M_MCAST); if (!(ifnet_flags(ifp) & IFF_UP) || gif_psrc == NULL || gif_pdst == NULL) { ifnet_touch_lastchange(ifp); - m_freem(m); /* free it here not in dlil_output */ + m_freem(m); /* free it here not in dlil_output */ error = ENETDOWN; goto end; } - bpf_tap_out(ifp, 0, m, &sc->gif_proto, sizeof (sc->gif_proto)); + bpf_tap_out(ifp, 0, m, &sc->gif_proto, sizeof(sc->gif_proto)); GIF_LOCK(sc); @@ -618,16 +636,17 @@ gif_output( } GIF_UNLOCK(sc); -end: +end: if (error) { /* the mbuf was freed either by in_gif_output or in here */ ifnet_stat_increment_out(ifp, 0, 0, 1); } else { ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); } - if (error == 0) + if (error == 0) { error = EJUSTRETURN; /* if no error, packet got sent already */ - return (error); + } + return error; } /* @@ -642,7 +661,7 @@ gif_input( { struct gif_softc *sc = ifnet_softc(ifp); - bpf_tap_in(ifp, 0, m, &sc->gif_proto, sizeof (sc->gif_proto)); + bpf_tap_in(ifp, 0, m, &sc->gif_proto, sizeof(sc->gif_proto)); /* * Put the packet to the network layer input queue according to the @@ -656,23 +675,23 @@ gif_input( * it occurs more times than we thought, we may change the policy * again. */ - int32_t pktlen = m->m_pkthdr.len; + int32_t pktlen = m->m_pkthdr.len; if (proto_input(protocol_family, m) != 0) { ifnet_stat_increment_in(ifp, 0, 0, 1); m_freem(m); - } else { + } else { ifnet_stat_increment_in(ifp, 1, pktlen, 0); - } + } - return (0); + return 0; } /* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */ static errno_t gif_ioctl( - ifnet_t ifp, - u_long cmd, - void *data) + ifnet_t ifp, + u_long cmd, + void *data) { struct gif_softc *sc = ifnet_softc(ifp); struct ifreq *ifr = (struct ifreq *)data; @@ -693,20 +712,20 @@ gif_ioctl( case SIOCDELMULTI: break; -#ifdef SIOCSIFMTU /* xxx */ +#ifdef SIOCSIFMTU /* xxx */ case SIOCGIFMTU: break; case SIOCSIFMTU: - { - u_int32_t mtu; - mtu = ifr->ifr_mtu; - if (mtu < GIF_MTU_MIN || mtu > GIF_MTU_MAX) { - return (EINVAL); - } - ifnet_set_mtu(ifp, mtu); + { + u_int32_t mtu; + mtu = ifr->ifr_mtu; + if (mtu < GIF_MTU_MIN || mtu > GIF_MTU_MAX) { + return EINVAL; } - break; + ifnet_set_mtu(ifp, mtu); + } + break; #endif /* SIOCSIFMTU */ case SIOCSIFPHYADDR: @@ -718,9 +737,9 @@ gif_ioctl( #if INET case SIOCSIFPHYADDR: src = (struct sockaddr *) - &(((struct in_aliasreq *)data)->ifra_addr); + &(((struct in_aliasreq *)data)->ifra_addr); dst = (struct sockaddr *) - &(((struct in_aliasreq *)data)->ifra_dstaddr); + &(((struct in_aliasreq *)data)->ifra_dstaddr); break; #endif #if INET6 @@ -745,83 +764,92 @@ gif_ioctl( } /* sa_family must be equal */ - if (src->sa_family != dst->sa_family) - return (EINVAL); + if (src->sa_family != dst->sa_family) { + return EINVAL; + } /* validate sa_len */ switch (src->sa_family) { #if INET case AF_INET: - if (src->sa_len != sizeof (struct sockaddr_in)) - return (EINVAL); + if (src->sa_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } break; #endif #if INET6 case AF_INET6: - if (src->sa_len != sizeof (struct sockaddr_in6)) - return (EINVAL); + if (src->sa_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } break; #endif default: - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } switch (dst->sa_family) { #if INET case AF_INET: - if (dst->sa_len != sizeof (struct sockaddr_in)) - return (EINVAL); + if (dst->sa_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } break; #endif #if INET6 case AF_INET6: - if (dst->sa_len != sizeof (struct sockaddr_in6)) - return (EINVAL); + if (dst->sa_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } break; #endif default: - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } /* check sa_family looks sane for the cmd */ switch (cmd) { case SIOCSIFPHYADDR: - if (src->sa_family == AF_INET) + if (src->sa_family == AF_INET) { break; - return (EAFNOSUPPORT); + } + return EAFNOSUPPORT; #if INET6 case SIOCSIFPHYADDR_IN6_32: case SIOCSIFPHYADDR_IN6_64: - if (src->sa_family == AF_INET6) + if (src->sa_family == AF_INET6) { break; - return (EAFNOSUPPORT); + } + return EAFNOSUPPORT; #endif /* INET6 */ } -#define GIF_ORDERED_LOCK(sc, sc2) \ - if (sc < sc2) { \ - GIF_LOCK(sc); \ - GIF_LOCK(sc2); \ - } else { \ - GIF_LOCK(sc2); \ - GIF_LOCK(sc); \ +#define GIF_ORDERED_LOCK(sc, sc2) \ + if (sc < sc2) { \ + GIF_LOCK(sc); \ + GIF_LOCK(sc2); \ + } else { \ + GIF_LOCK(sc2); \ + GIF_LOCK(sc); \ } -#define GIF_ORDERED_UNLOCK(sc, sc2) \ - if (sc > sc2) { \ - GIF_UNLOCK(sc); \ - GIF_UNLOCK(sc2); \ - } else { \ - GIF_UNLOCK(sc2); \ - GIF_UNLOCK(sc); \ +#define GIF_ORDERED_UNLOCK(sc, sc2) \ + if (sc > sc2) { \ + GIF_UNLOCK(sc); \ + GIF_UNLOCK(sc2); \ + } else { \ + GIF_UNLOCK(sc2); \ + GIF_UNLOCK(sc); \ } ifnet_head_lock_shared(); TAILQ_FOREACH(ifp2, &ifnet_head, if_link) { - if (strcmp(ifnet_name(ifp2), GIFNAME) != 0) + if (strcmp(ifnet_name(ifp2), GIFNAME) != 0) { continue; + } sc2 = ifnet_softc(ifp2); - if (sc2 == sc) + if (sc2 == sc) { continue; + } /* lock sc and sc2 in increasing order of ifnet index */ GIF_ORDERED_LOCK(sc, sc2); if (!sc2->gif_pdst || !sc2->gif_psrc) { @@ -847,11 +875,11 @@ gif_ioctl( #endif /* can't configure multiple multi-dest interfaces */ -#define multidest(x) \ +#define multidest(x) \ (((struct sockaddr_in *)(void *)(x))->sin_addr.s_addr == INADDR_ANY) #if INET6 -#define multidest6(x) \ - (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *) \ +#define multidest6(x) \ + (IN6_IS_ADDR_UNSPECIFIED(&((struct sockaddr_in6 *) \ (void *)(x))->sin6_addr)) #endif if (dst->sa_family == AF_INET && @@ -875,24 +903,26 @@ gif_ioctl( ifnet_head_done(); GIF_LOCK(sc); - if (sc->gif_psrc) - FREE((caddr_t)sc->gif_psrc, M_IFADDR); + if (sc->gif_psrc) { + FREE(sc->gif_psrc, M_IFADDR); + } sa = (struct sockaddr *)_MALLOC(src->sa_len, M_IFADDR, M_WAITOK); if (sa == NULL) { GIF_UNLOCK(sc); - return (ENOBUFS); + return ENOBUFS; } bcopy((caddr_t)src, (caddr_t)sa, src->sa_len); sc->gif_psrc = sa; - if (sc->gif_pdst) - FREE((caddr_t)sc->gif_pdst, M_IFADDR); + if (sc->gif_pdst) { + FREE(sc->gif_pdst, M_IFADDR); + } sa = (struct sockaddr *)_MALLOC(dst->sa_len, M_IFADDR, M_WAITOK); if (sa == NULL) { GIF_UNLOCK(sc); - return (ENOBUFS); + return ENOBUFS; } bcopy((caddr_t)dst, (caddr_t)sa, dst->sa_len); sc->gif_pdst = sa; @@ -908,11 +938,11 @@ gif_ioctl( case SIOCDIFPHYADDR: GIF_LOCK(sc); if (sc->gif_psrc) { - FREE((caddr_t)sc->gif_psrc, M_IFADDR); + FREE(sc->gif_psrc, M_IFADDR); sc->gif_psrc = NULL; } if (sc->gif_pdst) { - FREE((caddr_t)sc->gif_pdst, M_IFADDR); + FREE(sc->gif_pdst, M_IFADDR); sc->gif_pdst = NULL; } GIF_UNLOCK(sc); @@ -935,14 +965,14 @@ gif_ioctl( #if INET case SIOCGIFPSRCADDR: dst = &ifr->ifr_addr; - size = sizeof (ifr->ifr_addr); + size = sizeof(ifr->ifr_addr); break; #endif /* INET */ #if INET6 case SIOCGIFPSRCADDR_IN6: dst = (struct sockaddr *) - &(((struct in6_ifreq *)data)->ifr_addr); - size = sizeof (((struct in6_ifreq *)data)->ifr_addr); + &(((struct in6_ifreq *)data)->ifr_addr); + size = sizeof(((struct in6_ifreq *)data)->ifr_addr); break; #endif /* INET6 */ default: @@ -952,7 +982,7 @@ gif_ioctl( } if (src->sa_len > size) { GIF_UNLOCK(sc); - return (EINVAL); + return EINVAL; } bcopy((caddr_t)src, (caddr_t)dst, src->sa_len); GIF_UNLOCK(sc); @@ -973,14 +1003,14 @@ gif_ioctl( #if INET case SIOCGIFPDSTADDR: dst = &ifr->ifr_addr; - size = sizeof (ifr->ifr_addr); + size = sizeof(ifr->ifr_addr); break; #endif /* INET */ #if INET6 case SIOCGIFPDSTADDR_IN6: dst = (struct sockaddr *) - &(((struct in6_ifreq *)data)->ifr_addr); - size = sizeof (((struct in6_ifreq *)data)->ifr_addr); + &(((struct in6_ifreq *)data)->ifr_addr); + size = sizeof(((struct in6_ifreq *)data)->ifr_addr); break; #endif /* INET6 */ default: @@ -990,7 +1020,7 @@ gif_ioctl( } if (src->sa_len > size) { GIF_UNLOCK(sc); - return (EINVAL); + return EINVAL; } bcopy((caddr_t)src, (caddr_t)dst, src->sa_len); GIF_UNLOCK(sc); @@ -1005,7 +1035,7 @@ gif_ioctl( break; } bad: - return (error); + return error; } static void @@ -1013,11 +1043,11 @@ gif_delete_tunnel(struct gif_softc *sc) { GIF_LOCK_ASSERT(sc); if (sc->gif_psrc) { - FREE((caddr_t)sc->gif_psrc, M_IFADDR); + FREE(sc->gif_psrc, M_IFADDR); sc->gif_psrc = NULL; } if (sc->gif_pdst) { - FREE((caddr_t)sc->gif_pdst, M_IFADDR); + FREE(sc->gif_pdst, M_IFADDR); sc->gif_pdst = NULL; } ROUTE_RELEASE(&sc->gif_ro); diff --git a/bsd/net/if_gif.h b/bsd/net/if_gif.h index 296cdbf21..19460fcc3 100644 --- a/bsd/net/if_gif.h +++ b/bsd/net/if_gif.h @@ -61,7 +61,7 @@ */ #ifndef _NET_IF_GIF_H_ -#define _NET_IF_GIF_H_ +#define _NET_IF_GIF_H_ #include #include @@ -74,9 +74,9 @@ extern void gif_init(void); struct encaptab; struct gif_softc { - ifnet_t gif_if; /* pointer back to the interface */ - struct sockaddr *gif_psrc; /* Physical src addr */ - struct sockaddr *gif_pdst; /* Physical dst addr */ + ifnet_t gif_if; /* pointer back to the interface */ + struct sockaddr *gif_psrc; /* Physical src addr */ + struct sockaddr *gif_pdst; /* Physical dst addr */ #ifdef __APPLE__ protocol_family_t gif_proto; /* dlil protocol attached */ #endif @@ -86,32 +86,32 @@ struct gif_softc { struct route_in6 gifscr_ro6; /* xxx */ #endif } gifsc_gifscr; - int gif_flags; -#define IFGIF_DETACHING 0x1 - int gif_called; + int gif_flags; +#define IFGIF_DETACHING 0x1 + int gif_called; const struct encaptab *encap_cookie4; const struct encaptab *encap_cookie6; TAILQ_ENTRY(gif_softc) gif_link; /* all gif's are linked */ - bpf_tap_mode tap_mode; + bpf_tap_mode tap_mode; bpf_packet_func tap_callback; - char gif_ifname[IFNAMSIZ]; - decl_lck_mtx_data(, gif_lock); /* lock for gif softc structure */ + char gif_ifname[IFNAMSIZ]; + decl_lck_mtx_data(, gif_lock); /* lock for gif softc structure */ }; -#define GIF_LOCK(_sc) lck_mtx_lock(&(_sc)->gif_lock) -#define GIF_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->gif_lock) -#define GIF_LOCK_ASSERT(_sc) LCK_MTX_ASSERT(&(_sc)->gif_lock, \ +#define GIF_LOCK(_sc) lck_mtx_lock(&(_sc)->gif_lock) +#define GIF_UNLOCK(_sc) lck_mtx_unlock(&(_sc)->gif_lock) +#define GIF_LOCK_ASSERT(_sc) LCK_MTX_ASSERT(&(_sc)->gif_lock, \ LCK_MTX_ASSERT_OWNED) -#define gif_ro gifsc_gifscr.gifscr_ro +#define gif_ro gifsc_gifscr.gifscr_ro #if INET6 -#define gif_ro6 gifsc_gifscr.gifscr_ro6 +#define gif_ro6 gifsc_gifscr.gifscr_ro6 #endif #endif /* BSD_KERNEL_PRIVATE */ -#define GIF_MTU (1280) /* Default MTU */ -#define GIF_MTU_MIN (1280) /* Minimum MTU */ -#define GIF_MTU_MAX (8192) /* Maximum MTU */ +#define GIF_MTU (1280) /* Default MTU */ +#define GIF_MTU_MIN (1280) /* Minimum MTU */ +#define GIF_MTU_MAX (8192) /* Maximum MTU */ #endif /* _NET_IF_GIF_H_ */ diff --git a/bsd/net/if_ipsec.c b/bsd/net/if_ipsec.c index 22a1441dd..eb32af709 100644 --- a/bsd/net/if_ipsec.c +++ b/bsd/net/if_ipsec.c @@ -61,41 +61,41 @@ extern int net_qos_policy_restricted; extern int net_qos_policy_restrict_avapps; /* Kernel Control functions */ -static errno_t ipsec_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo); -static errno_t ipsec_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo); -static errno_t ipsec_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, - void *unitinfo); -static errno_t ipsec_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, - void *unitinfo, mbuf_t m, int flags); -static errno_t ipsec_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t *len); -static errno_t ipsec_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t len); +static errno_t ipsec_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, + void **unitinfo); +static errno_t ipsec_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, + void **unitinfo); +static errno_t ipsec_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, + void *unitinfo); +static errno_t ipsec_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, + void *unitinfo, mbuf_t m, int flags); +static errno_t ipsec_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, + int opt, void *data, size_t *len); +static errno_t ipsec_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, + int opt, void *data, size_t len); /* Network Interface functions */ -static void ipsec_start(ifnet_t interface); -static errno_t ipsec_output(ifnet_t interface, mbuf_t data); -static errno_t ipsec_demux(ifnet_t interface, mbuf_t data, char *frame_header, - protocol_family_t *protocol); -static errno_t ipsec_add_proto(ifnet_t interface, protocol_family_t protocol, - const struct ifnet_demux_desc *demux_array, - u_int32_t demux_count); -static errno_t ipsec_del_proto(ifnet_t interface, protocol_family_t protocol); -static errno_t ipsec_ioctl(ifnet_t interface, u_long cmd, void *data); -static void ipsec_detached(ifnet_t interface); +static void ipsec_start(ifnet_t interface); +static errno_t ipsec_output(ifnet_t interface, mbuf_t data); +static errno_t ipsec_demux(ifnet_t interface, mbuf_t data, char *frame_header, + protocol_family_t *protocol); +static errno_t ipsec_add_proto(ifnet_t interface, protocol_family_t protocol, + const struct ifnet_demux_desc *demux_array, + u_int32_t demux_count); +static errno_t ipsec_del_proto(ifnet_t interface, protocol_family_t protocol); +static errno_t ipsec_ioctl(ifnet_t interface, u_long cmd, void *data); +static void ipsec_detached(ifnet_t interface); /* Protocol handlers */ -static errno_t ipsec_attach_proto(ifnet_t interface, protocol_family_t proto); -static errno_t ipsec_proto_input(ifnet_t interface, protocol_family_t protocol, - mbuf_t m, char *frame_header); +static errno_t ipsec_attach_proto(ifnet_t interface, protocol_family_t proto); +static errno_t ipsec_proto_input(ifnet_t interface, protocol_family_t protocol, + mbuf_t m, char *frame_header); static errno_t ipsec_proto_pre_output(ifnet_t interface, protocol_family_t protocol, - mbuf_t *packet, const struct sockaddr *dest, void *route, - char *frame_type, char *link_layer_dest); + mbuf_t *packet, const struct sockaddr *dest, void *route, + char *frame_type, char *link_layer_dest); -static kern_ctl_ref ipsec_kctlref; -static u_int32_t ipsec_family; +static kern_ctl_ref ipsec_kctlref; +static u_int32_t ipsec_family; static lck_attr_t *ipsec_lck_attr; static lck_grp_attr_t *ipsec_lck_grp_attr; static lck_grp_t *ipsec_lck_grp; @@ -108,13 +108,13 @@ SYSCTL_NODE(_net, OID_AUTO, ipsec, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPsec"); static int if_ipsec_verify_interface_creation = 0; SYSCTL_INT(_net_ipsec, OID_AUTO, verify_interface_creation, CTLFLAG_RW | CTLFLAG_LOCKED, &if_ipsec_verify_interface_creation, 0, ""); -#define IPSEC_IF_VERIFY(_e) if (__improbable(if_ipsec_verify_interface_creation)) { VERIFY(_e); } +#define IPSEC_IF_VERIFY(_e) if (__improbable(if_ipsec_verify_interface_creation)) { VERIFY(_e); } #define IPSEC_IF_DEFAULT_SLOT_SIZE 2048 #define IPSEC_IF_DEFAULT_RING_SIZE 64 #define IPSEC_IF_DEFAULT_TX_FSW_RING_SIZE 64 #define IPSEC_IF_DEFAULT_RX_FSW_RING_SIZE 128 -#define IPSEC_IF_DEFAULT_BUF_SEG_SIZE skmem_usr_buf_seg_size +#define IPSEC_IF_DEFAULT_BUF_SEG_SIZE skmem_usr_buf_seg_size #define IPSEC_IF_MIN_RING_SIZE 16 #define IPSEC_IF_MAX_RING_SIZE 1024 @@ -131,11 +131,11 @@ static int if_ipsec_tx_fsw_ring_size = IPSEC_IF_DEFAULT_TX_FSW_RING_SIZE; static int if_ipsec_rx_fsw_ring_size = IPSEC_IF_DEFAULT_RX_FSW_RING_SIZE; SYSCTL_PROC(_net_ipsec, OID_AUTO, ring_size, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - &if_ipsec_ring_size, IPSEC_IF_DEFAULT_RING_SIZE, &sysctl_if_ipsec_ring_size, "I", ""); + &if_ipsec_ring_size, IPSEC_IF_DEFAULT_RING_SIZE, &sysctl_if_ipsec_ring_size, "I", ""); SYSCTL_PROC(_net_ipsec, OID_AUTO, tx_fsw_ring_size, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - &if_ipsec_tx_fsw_ring_size, IPSEC_IF_DEFAULT_TX_FSW_RING_SIZE, &sysctl_if_ipsec_tx_fsw_ring_size, "I", ""); + &if_ipsec_tx_fsw_ring_size, IPSEC_IF_DEFAULT_TX_FSW_RING_SIZE, &sysctl_if_ipsec_tx_fsw_ring_size, "I", ""); SYSCTL_PROC(_net_ipsec, OID_AUTO, rx_fsw_ring_size, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - &if_ipsec_rx_fsw_ring_size, IPSEC_IF_DEFAULT_RX_FSW_RING_SIZE, &sysctl_if_ipsec_rx_fsw_ring_size, "I", ""); + &if_ipsec_rx_fsw_ring_size, IPSEC_IF_DEFAULT_RX_FSW_RING_SIZE, &sysctl_if_ipsec_rx_fsw_ring_size, "I", ""); static errno_t ipsec_register_nexus(void); @@ -158,56 +158,56 @@ static uuid_t ipsec_kpipe_uuid; /* Control block allocated for each kernel control connection */ struct ipsec_pcb { - TAILQ_ENTRY(ipsec_pcb) ipsec_chain; - kern_ctl_ref ipsec_ctlref; - ifnet_t ipsec_ifp; - u_int32_t ipsec_unit; - u_int32_t ipsec_unique_id; - u_int32_t ipsec_flags; - u_int32_t ipsec_input_frag_size; - bool ipsec_frag_size_set; - int ipsec_ext_ifdata_stats; - mbuf_svc_class_t ipsec_output_service_class; - char ipsec_if_xname[IFXNAMSIZ]; - char ipsec_unique_name[IFXNAMSIZ]; + TAILQ_ENTRY(ipsec_pcb) ipsec_chain; + kern_ctl_ref ipsec_ctlref; + ifnet_t ipsec_ifp; + u_int32_t ipsec_unit; + u_int32_t ipsec_unique_id; + u_int32_t ipsec_flags; + u_int32_t ipsec_input_frag_size; + bool ipsec_frag_size_set; + int ipsec_ext_ifdata_stats; + mbuf_svc_class_t ipsec_output_service_class; + char ipsec_if_xname[IFXNAMSIZ]; + char ipsec_unique_name[IFXNAMSIZ]; // PCB lock protects state fields, like ipsec_kpipe_enabled decl_lck_rw_data(, ipsec_pcb_lock); #if IPSEC_NEXUS - lck_mtx_t ipsec_input_chain_lock; - struct mbuf * ipsec_input_chain; - struct mbuf * ipsec_input_chain_last; + lck_mtx_t ipsec_input_chain_lock; + struct mbuf * ipsec_input_chain; + struct mbuf * ipsec_input_chain_last; // Input chain lock protects the list of input mbufs // The input chain lock must be taken AFTER the PCB lock if both are held - struct ipsec_nx ipsec_nx; - int ipsec_kpipe_enabled; - uuid_t ipsec_kpipe_uuid; - void * ipsec_kpipe_rxring; - void * ipsec_kpipe_txring; - kern_pbufpool_t ipsec_kpipe_pp; - - kern_nexus_t ipsec_netif_nexus; - kern_pbufpool_t ipsec_netif_pp; - void * ipsec_netif_rxring; - void * ipsec_netif_txring; - uint64_t ipsec_netif_txring_size; - - u_int32_t ipsec_slot_size; - u_int32_t ipsec_netif_ring_size; - u_int32_t ipsec_tx_fsw_ring_size; - u_int32_t ipsec_rx_fsw_ring_size; - bool ipsec_use_netif; - bool ipsec_needs_netagent; + struct ipsec_nx ipsec_nx; + int ipsec_kpipe_enabled; + uuid_t ipsec_kpipe_uuid; + void * ipsec_kpipe_rxring; + void * ipsec_kpipe_txring; + kern_pbufpool_t ipsec_kpipe_pp; + + kern_nexus_t ipsec_netif_nexus; + kern_pbufpool_t ipsec_netif_pp; + void * ipsec_netif_rxring; + void * ipsec_netif_txring; + uint64_t ipsec_netif_txring_size; + + u_int32_t ipsec_slot_size; + u_int32_t ipsec_netif_ring_size; + u_int32_t ipsec_tx_fsw_ring_size; + u_int32_t ipsec_rx_fsw_ring_size; + bool ipsec_use_netif; + bool ipsec_needs_netagent; #endif // IPSEC_NEXUS }; TAILQ_HEAD(ipsec_list, ipsec_pcb) ipsec_head; -#define IPSEC_PCB_ZONE_MAX 32 -#define IPSEC_PCB_ZONE_NAME "net.if_ipsec" +#define IPSEC_PCB_ZONE_MAX 32 +#define IPSEC_PCB_ZONE_NAME "net.if_ipsec" -static unsigned int ipsec_pcb_size; /* size of zone element */ -static struct zone *ipsec_pcb_zone; /* zone for ipsec_pcb */ +static unsigned int ipsec_pcb_size; /* size of zone element */ +static struct zone *ipsec_pcb_zone; /* zone for ipsec_pcb */ #define IPSECQ_MAXLEN 256 @@ -220,17 +220,17 @@ sysctl_if_ipsec_ring_size SYSCTL_HANDLER_ARGS int error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) { - return (error); + return error; } if (value < IPSEC_IF_MIN_RING_SIZE || - value > IPSEC_IF_MAX_RING_SIZE) { - return (EINVAL); + value > IPSEC_IF_MAX_RING_SIZE) { + return EINVAL; } if_ipsec_ring_size = value; - return (0); + return 0; } static int @@ -241,17 +241,17 @@ sysctl_if_ipsec_tx_fsw_ring_size SYSCTL_HANDLER_ARGS int error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) { - return (error); + return error; } if (value < IPSEC_IF_MIN_RING_SIZE || - value > IPSEC_IF_MAX_RING_SIZE) { - return (EINVAL); + value > IPSEC_IF_MAX_RING_SIZE) { + return EINVAL; } if_ipsec_tx_fsw_ring_size = value; - return (0); + return 0; } static int @@ -262,26 +262,26 @@ sysctl_if_ipsec_rx_fsw_ring_size SYSCTL_HANDLER_ARGS int error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) { - return (error); + return error; } if (value < IPSEC_IF_MIN_RING_SIZE || - value > IPSEC_IF_MAX_RING_SIZE) { - return (EINVAL); + value > IPSEC_IF_MAX_RING_SIZE) { + return EINVAL; } if_ipsec_rx_fsw_ring_size = value; - return (0); + return 0; } #endif // IPSEC_NEXUS errno_t ipsec_register_control(void) { - struct kern_ctl_reg kern_ctl; - errno_t result = 0; - + struct kern_ctl_reg kern_ctl; + errno_t result = 0; + /* Find a unique value for our interface family */ result = mbuf_tag_id_find(IPSEC_CONTROL_NAME, &ipsec_family); if (result != 0) { @@ -291,8 +291,8 @@ ipsec_register_control(void) ipsec_pcb_size = sizeof(struct ipsec_pcb); ipsec_pcb_zone = zinit(ipsec_pcb_size, - IPSEC_PCB_ZONE_MAX * ipsec_pcb_size, - 0, IPSEC_PCB_ZONE_NAME); + IPSEC_PCB_ZONE_MAX * ipsec_pcb_size, + 0, IPSEC_PCB_ZONE_NAME); if (ipsec_pcb_zone == NULL) { printf("ipsec_register_control - zinit(ipsec_pcb) failed"); return ENOMEM; @@ -303,7 +303,7 @@ ipsec_register_control(void) #endif // IPSEC_NEXUS TAILQ_INIT(&ipsec_head); - + bzero(&kern_ctl, sizeof(kern_ctl)); strlcpy(kern_ctl.ctl_name, IPSEC_CONTROL_NAME, sizeof(kern_ctl.ctl_name)); kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0; @@ -316,29 +316,29 @@ ipsec_register_control(void) kern_ctl.ctl_send = ipsec_ctl_send; kern_ctl.ctl_setopt = ipsec_ctl_setopt; kern_ctl.ctl_getopt = ipsec_ctl_getopt; - + result = ctl_register(&kern_ctl, &ipsec_kctlref); if (result != 0) { printf("ipsec_register_control - ctl_register failed: %d\n", result); return result; } - + /* Register the protocol plumbers */ if ((result = proto_register_plumber(PF_INET, ipsec_family, - ipsec_attach_proto, NULL)) != 0) { + ipsec_attach_proto, NULL)) != 0) { printf("ipsec_register_control - proto_register_plumber(PF_INET, %d) failed: %d\n", - ipsec_family, result); + ipsec_family, result); ctl_deregister(ipsec_kctlref); return result; } - + /* Register the protocol plumbers */ if ((result = proto_register_plumber(PF_INET6, ipsec_family, - ipsec_attach_proto, NULL)) != 0) { + ipsec_attach_proto, NULL)) != 0) { proto_unregister_plumber(PF_INET, ipsec_family); ctl_deregister(ipsec_kctlref); printf("ipsec_register_control - proto_register_plumber(PF_INET6, %d) failed: %d\n", - ipsec_family, result); + ipsec_family, result); return result; } @@ -346,29 +346,32 @@ ipsec_register_control(void) ipsec_lck_grp_attr = lck_grp_attr_alloc_init(); ipsec_lck_grp = lck_grp_alloc_init("ipsec", ipsec_lck_grp_attr); lck_mtx_init(&ipsec_lock, ipsec_lck_grp, ipsec_lck_attr); - + return 0; } /* Helpers */ int -ipsec_interface_isvalid (ifnet_t interface) +ipsec_interface_isvalid(ifnet_t interface) { - struct ipsec_pcb *pcb = NULL; - - if (interface == NULL) - return 0; - - pcb = ifnet_softc(interface); - - if (pcb == NULL) - return 0; - - /* When ctl disconnects, ipsec_unit is set to 0 */ - if (pcb->ipsec_unit == 0) - return 0; - - return 1; + struct ipsec_pcb *pcb = NULL; + + if (interface == NULL) { + return 0; + } + + pcb = ifnet_softc(interface); + + if (pcb == NULL) { + return 0; + } + + /* When ctl disconnects, ipsec_unit is set to 0 */ + if (pcb->ipsec_unit == 0) { + return 0; + } + + return 1; } #if IPSEC_NEXUS @@ -378,16 +381,16 @@ ipsec_interface_needs_netagent(ifnet_t interface) struct ipsec_pcb *pcb = NULL; if (interface == NULL) { - return (FALSE); + return FALSE; } pcb = ifnet_softc(interface); if (pcb == NULL) { - return (FALSE); + return FALSE; } - return (pcb->ipsec_needs_netagent == true); + return pcb->ipsec_needs_netagent == true; } #endif // IPSEC_NEXUS @@ -410,7 +413,7 @@ ipsec_ifnet_set_attrs(ifnet_t ifp) ifnet_set_stat(ifp, &stats); #endif // !IPSEC_NEXUS - return (0); + return 0; } #if IPSEC_NEXUS @@ -442,14 +445,14 @@ ipsec_register_nexus(void) /* ipsec_nxdp_init() is called before this function returns */ err = kern_nexus_register_domain_provider(NEXUS_TYPE_NET_IF, - (const uint8_t *) "com.apple.ipsec", - &dp_init, sizeof(dp_init), - &ipsec_nx_dom_prov); + (const uint8_t *) "com.apple.ipsec", + &dp_init, sizeof(dp_init), + &ipsec_nx_dom_prov); if (err != 0) { printf("%s: failed to register domain provider\n", __func__); - return (err); + return err; } - return (0); + return 0; } static errno_t @@ -457,46 +460,46 @@ ipsec_netif_prepare(kern_nexus_t nexus, ifnet_t ifp) { struct ipsec_pcb *pcb = kern_nexus_get_context(nexus); pcb->ipsec_netif_nexus = nexus; - return (ipsec_ifnet_set_attrs(ifp)); + return ipsec_ifnet_set_attrs(ifp); } static errno_t ipsec_nexus_pre_connect(kern_nexus_provider_t nxprov, - proc_t p, kern_nexus_t nexus, - nexus_port_t nexus_port, kern_channel_t channel, void **ch_ctx) + proc_t p, kern_nexus_t nexus, + nexus_port_t nexus_port, kern_channel_t channel, void **ch_ctx) { #pragma unused(nxprov, p) #pragma unused(nexus, nexus_port, channel, ch_ctx) - return (0); + return 0; } static errno_t ipsec_nexus_connected(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel) + kern_channel_t channel) { #pragma unused(nxprov, channel) struct ipsec_pcb *pcb = kern_nexus_get_context(nexus); boolean_t ok = ifnet_is_attached(pcb->ipsec_ifp, 1); - return (ok ? 0 : ENXIO); + return ok ? 0 : ENXIO; } static void ipsec_nexus_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel) + kern_channel_t channel) { #pragma unused(nxprov, nexus, channel) } static void ipsec_netif_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel) + kern_channel_t channel) { #pragma unused(nxprov, nexus, channel) } static void ipsec_nexus_disconnected(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel) + kern_channel_t channel) { #pragma unused(nxprov, channel) struct ipsec_pcb *pcb = kern_nexus_get_context(nexus); @@ -508,8 +511,8 @@ ipsec_nexus_disconnected(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t ipsec_kpipe_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring, - void **ring_ctx) + kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring, + void **ring_ctx) { #pragma unused(nxprov) #pragma unused(channel) @@ -527,7 +530,7 @@ ipsec_kpipe_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static void ipsec_kpipe_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t ring) + kern_channel_ring_t ring) { #pragma unused(nxprov) struct ipsec_pcb *pcb = kern_nexus_get_context(nexus); @@ -540,7 +543,7 @@ ipsec_kpipe_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t ipsec_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t tx_ring, uint32_t flags) + kern_channel_ring_t tx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -572,7 +575,7 @@ ipsec_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static mbuf_t ipsec_encrypt_mbuf(ifnet_t interface, - mbuf_t data) + mbuf_t data) { struct ipsec_output_state ipsec_state; int error = 0; @@ -591,68 +594,68 @@ ipsec_encrypt_mbuf(ifnet_t interface, u_int ip_version = ip->ip_v; switch (ip_version) { - case 4: { - af = AF_INET; - - memset(&ipsec_state, 0, sizeof(ipsec_state)); - ipsec_state.m = data; - ipsec_state.dst = (struct sockaddr *)&ip->ip_dst; - memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); - - error = ipsec4_interface_output(&ipsec_state, interface); - if (error == 0 && ipsec_state.tunneled == 6) { - // Tunneled in IPv6 - packet is gone - // TODO: Don't lose mbuf - data = NULL; - goto done; - } - - data = ipsec_state.m; - if (error || data == NULL) { - if (error) { - printf("ipsec_encrypt_mbuf: ipsec4_output error %d\n", error); - } - goto ipsec_output_err; - } + case 4: { + af = AF_INET; + + memset(&ipsec_state, 0, sizeof(ipsec_state)); + ipsec_state.m = data; + ipsec_state.dst = (struct sockaddr *)&ip->ip_dst; + memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); + + error = ipsec4_interface_output(&ipsec_state, interface); + if (error == 0 && ipsec_state.tunneled == 6) { + // Tunneled in IPv6 - packet is gone + // TODO: Don't lose mbuf + data = NULL; goto done; } - case 6: { - af = AF_INET6; - data = ipsec6_splithdr(data); - if (data == NULL) { - printf("ipsec_encrypt_mbuf: ipsec6_splithdr returned NULL\n"); - goto ipsec_output_err; + data = ipsec_state.m; + if (error || data == NULL) { + if (error) { + printf("ipsec_encrypt_mbuf: ipsec4_output error %d\n", error); } + goto ipsec_output_err; + } + goto done; + } + case 6: { + af = AF_INET6; - struct ip6_hdr *ip6 = mtod(data, struct ip6_hdr *); + data = ipsec6_splithdr(data); + if (data == NULL) { + printf("ipsec_encrypt_mbuf: ipsec6_splithdr returned NULL\n"); + goto ipsec_output_err; + } - memset(&ipsec_state, 0, sizeof(ipsec_state)); - ipsec_state.m = data; - ipsec_state.dst = (struct sockaddr *)&ip6->ip6_dst; - memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); + struct ip6_hdr *ip6 = mtod(data, struct ip6_hdr *); - error = ipsec6_interface_output(&ipsec_state, interface, &ip6->ip6_nxt, ipsec_state.m); - if (error == 0 && ipsec_state.tunneled == 4) { - // Tunneled in IPv4 - packet is gone - // TODO: Don't lose mbuf - data = NULL; - goto done; - } - data = ipsec_state.m; - if (error || data == NULL) { - if (error) { - printf("ipsec_encrypt_mbuf: ipsec6_output error %d\n", error); - } - goto ipsec_output_err; - } + memset(&ipsec_state, 0, sizeof(ipsec_state)); + ipsec_state.m = data; + ipsec_state.dst = (struct sockaddr *)&ip6->ip6_dst; + memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); + + error = ipsec6_interface_output(&ipsec_state, interface, &ip6->ip6_nxt, ipsec_state.m); + if (error == 0 && ipsec_state.tunneled == 4) { + // Tunneled in IPv4 - packet is gone + // TODO: Don't lose mbuf + data = NULL; goto done; } - default: { - printf("ipsec_encrypt_mbuf: Received unknown packet version %d\n", ip_version); - error = -1; + data = ipsec_state.m; + if (error || data == NULL) { + if (error) { + printf("ipsec_encrypt_mbuf: ipsec6_output error %d\n", error); + } goto ipsec_output_err; } + goto done; + } + default: { + printf("ipsec_encrypt_mbuf: Received unknown packet version %d\n", ip_version); + error = -1; + goto ipsec_output_err; + } } done: @@ -667,7 +670,7 @@ ipsec_output_err: static errno_t ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t rx_ring, uint32_t flags) + kern_channel_ring_t rx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -744,7 +747,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, error = kern_pbufpool_alloc_nosleep(rx_pp, 1, &rx_ph); if (__improbable(error != 0)) { printf("ipsec_kpipe_sync_rx %s: failed to allocate packet\n", - pcb->ipsec_ifp->if_xname); + pcb->ipsec_ifp->if_xname); break; } @@ -758,7 +761,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_pbufpool_free(rx_pp, rx_ph); continue; } - + kern_buflet_t tx_buf = kern_packet_get_next_buflet(tx_ph, NULL); VERIFY(tx_buf != NULL); uint8_t *tx_baddr = kern_buflet_get_object_address(tx_buf); @@ -768,7 +771,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, bpf_tap_packet_out(pcb->ipsec_ifp, DLT_RAW, tx_ph, NULL, 0); length = MIN(kern_packet_get_data_length(tx_ph), - pcb->ipsec_slot_size); + pcb->ipsec_slot_size); // Increment TX stats tx_ring_stats.kcrsi_slots_transferred++; @@ -811,7 +814,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, mbuf_freem(data); kern_pbufpool_free(rx_pp, rx_ph); printf("ipsec_kpipe_sync_rx %s: encrypted packet length %zu > %u\n", - pcb->ipsec_ifp->if_xname, length, rx_pp->pp_buflet_size); + pcb->ipsec_ifp->if_xname, length, rx_pp->pp_buflet_size); continue; } @@ -823,7 +826,7 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Copy-in data from mbuf to buflet mbuf_copydata(data, 0, length, (void *)rx_baddr); - kern_packet_clear_flow_uuid(rx_ph); // Zero flow id + kern_packet_clear_flow_uuid(rx_ph); // Zero flow id // Finalize and attach the packet error = kern_buflet_set_data_offset(rx_buf, 0); @@ -881,8 +884,8 @@ ipsec_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t ipsec_netif_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring, - void **ring_ctx) + kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring, + void **ring_ctx) { #pragma unused(nxprov) #pragma unused(channel) @@ -900,7 +903,7 @@ ipsec_netif_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static void ipsec_netif_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t ring) + kern_channel_ring_t ring) { #pragma unused(nxprov) struct ipsec_pcb *pcb = kern_nexus_get_context(nexus); @@ -920,45 +923,45 @@ ipsec_netif_check_policy(mbuf_t data) // This packet has been marked with IP level policy, do not mark again. if (data && data->m_pkthdr.necp_mtag.necp_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) { - return (true); + return true; } size_t length = mbuf_pkthdr_len(data); if (length < sizeof(struct ip)) { - return (false); + return false; } struct ip *ip = mtod(data, struct ip *); u_int ip_version = ip->ip_v; switch (ip_version) { - case 4: { - necp_matched_policy_id = necp_ip_output_find_policy_match(data, 0, NULL, - &necp_result, &necp_result_parameter); - break; - } - case 6: { - necp_matched_policy_id = necp_ip6_output_find_policy_match(data, 0, NULL, - &necp_result, &necp_result_parameter); - break; - } - default: { - return (false); - } + case 4: { + necp_matched_policy_id = necp_ip_output_find_policy_match(data, 0, NULL, + &necp_result, &necp_result_parameter); + break; + } + case 6: { + necp_matched_policy_id = necp_ip6_output_find_policy_match(data, 0, NULL, + &necp_result, &necp_result_parameter); + break; + } + default: { + return false; + } } if (necp_result == NECP_KERNEL_POLICY_RESULT_DROP || - necp_result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) { + necp_result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) { /* Drop and flow divert packets should be blocked at the IP layer */ - return (false); + return false; } necp_mark_packet_from_ip(data, necp_matched_policy_id); - return (true); + return true; } static errno_t ipsec_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t tx_ring, uint32_t flags) + kern_channel_ring_t tx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -1016,7 +1019,7 @@ ipsec_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, bpf_tap_packet_out(pcb->ipsec_ifp, DLT_RAW, tx_ph, NULL, 0); length = MIN(kern_packet_get_data_length(tx_ph), - pcb->ipsec_slot_size); + pcb->ipsec_slot_size); if (length > 0) { errno_t error = mbuf_gethdr(MBUF_DONTWAIT, MBUF_TYPE_HEADER, &data); @@ -1083,7 +1086,7 @@ ipsec_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t ipsec_netif_tx_doorbell(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t ring, __unused uint32_t flags) + kern_channel_ring_t ring, __unused uint32_t flags) { #pragma unused(nxprov) struct ipsec_pcb *pcb = kern_nexus_get_context(nexus); @@ -1106,7 +1109,7 @@ ipsec_netif_tx_doorbell(kern_nexus_provider_t nxprov, kern_nexus_t nexus, if (pcb->ipsec_kpipe_enabled) { uint32_t tx_available = kern_channel_available_slot_count(ring); if (pcb->ipsec_netif_txring_size > 0 && - tx_available >= pcb->ipsec_netif_txring_size - 1) { + tx_available >= pcb->ipsec_netif_txring_size - 1) { // No room left in tx ring, disable output for now errno_t error = ifnet_disable_output(pcb->ipsec_ifp); if (error != 0) { @@ -1130,12 +1133,12 @@ ipsec_netif_tx_doorbell(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kr_exit(ring); - return (0); + return 0; } static errno_t ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t rx_ring, uint32_t flags) + kern_channel_ring_t rx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -1199,7 +1202,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); printf("ipsec_netif_sync_rx %s: legacy decrypted packet length cannot hold IP %zu < %zu\n", - pcb->ipsec_ifp->if_xname, length, sizeof(struct ip)); + pcb->ipsec_ifp->if_xname, length, sizeof(struct ip)); continue; } @@ -1207,87 +1210,85 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, struct ip *ip = mtod(data, struct ip *); u_int ip_version = ip->ip_v; switch (ip_version) { - case 4: { - af = AF_INET; - break; - } - case 6: { - af = AF_INET6; - break; - } - default: { - printf("ipsec_netif_sync_rx %s: legacy unknown ip version %u\n", - pcb->ipsec_ifp->if_xname, ip_version); - break; - } + case 4: { + af = AF_INET; + break; + } + case 6: { + af = AF_INET6; + break; + } + default: { + printf("ipsec_netif_sync_rx %s: legacy unknown ip version %u\n", + pcb->ipsec_ifp->if_xname, ip_version); + break; + } } if (length > rx_pp->pp_buflet_size || - (pcb->ipsec_frag_size_set && length > pcb->ipsec_input_frag_size)) { - + (pcb->ipsec_frag_size_set && length > pcb->ipsec_input_frag_size)) { // We need to fragment to send up into the netif u_int32_t fragment_mtu = rx_pp->pp_buflet_size; if (pcb->ipsec_frag_size_set && - pcb->ipsec_input_frag_size < rx_pp->pp_buflet_size) { + pcb->ipsec_input_frag_size < rx_pp->pp_buflet_size) { fragment_mtu = pcb->ipsec_input_frag_size; } mbuf_t fragment_chain = NULL; switch (af) { - case AF_INET: { - // ip_fragment expects the length in host order - ip->ip_len = ntohs(ip->ip_len); + case AF_INET: { + // ip_fragment expects the length in host order + ip->ip_len = ntohs(ip->ip_len); + + // ip_fragment will modify the original data, don't free + int fragment_error = ip_fragment(data, pcb->ipsec_ifp, fragment_mtu, TRUE); + if (fragment_error == 0 && data != NULL) { + fragment_chain = data; + } else { + STATS_INC(nifs, NETIF_STATS_BADLEN); + STATS_INC(nifs, NETIF_STATS_DROPPED); + printf("ipsec_netif_sync_rx %s: failed to fragment IPv4 packet of length %zu (%d)\n", + pcb->ipsec_ifp->if_xname, length, fragment_error); + } + break; + } + case AF_INET6: { + if (length < sizeof(struct ip6_hdr)) { + mbuf_freem(data); + STATS_INC(nifs, NETIF_STATS_BADLEN); + STATS_INC(nifs, NETIF_STATS_DROPPED); + printf("ipsec_netif_sync_rx %s: failed to fragment IPv6 packet of length %zu < %zu\n", + pcb->ipsec_ifp->if_xname, length, sizeof(struct ip6_hdr)); + } else { + // ip6_do_fragmentation will free the original data on success only + struct ip6_hdr *ip6 = mtod(data, struct ip6_hdr *); + struct ip6_exthdrs exthdrs; + memset(&exthdrs, 0, sizeof(exthdrs)); - // ip_fragment will modify the original data, don't free - int fragment_error = ip_fragment(data, pcb->ipsec_ifp, fragment_mtu, TRUE); + int fragment_error = ip6_do_fragmentation(&data, 0, pcb->ipsec_ifp, sizeof(struct ip6_hdr), + ip6, &exthdrs, fragment_mtu, ip6->ip6_nxt); if (fragment_error == 0 && data != NULL) { fragment_chain = data; } else { - STATS_INC(nifs, NETIF_STATS_BADLEN); - STATS_INC(nifs, NETIF_STATS_DROPPED); - printf("ipsec_netif_sync_rx %s: failed to fragment IPv4 packet of length %zu (%d)\n", - pcb->ipsec_ifp->if_xname, length, fragment_error); - } - break; - } - case AF_INET6: { - if (length < sizeof(struct ip6_hdr)) { mbuf_freem(data); STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); - printf("ipsec_netif_sync_rx %s: failed to fragment IPv6 packet of length %zu < %zu\n", - pcb->ipsec_ifp->if_xname, length, sizeof(struct ip6_hdr)); - } else { - - // ip6_do_fragmentation will free the original data on success only - struct ip6_hdr *ip6 = mtod(data, struct ip6_hdr *); - struct ip6_exthdrs exthdrs; - memset(&exthdrs, 0, sizeof(exthdrs)); - - int fragment_error = ip6_do_fragmentation(&data, 0, pcb->ipsec_ifp, sizeof(struct ip6_hdr), - ip6, &exthdrs, fragment_mtu, ip6->ip6_nxt); - if (fragment_error == 0 && data != NULL) { - fragment_chain = data; - } else { - mbuf_freem(data); - STATS_INC(nifs, NETIF_STATS_BADLEN); - STATS_INC(nifs, NETIF_STATS_DROPPED); - printf("ipsec_netif_sync_rx %s: failed to fragment IPv6 packet of length %zu (%d)\n", - pcb->ipsec_ifp->if_xname, length, fragment_error); - } + printf("ipsec_netif_sync_rx %s: failed to fragment IPv6 packet of length %zu (%d)\n", + pcb->ipsec_ifp->if_xname, length, fragment_error); } - break; - } - default: { - // Cannot fragment unknown families - mbuf_freem(data); - STATS_INC(nifs, NETIF_STATS_BADLEN); - STATS_INC(nifs, NETIF_STATS_DROPPED); - printf("ipsec_netif_sync_rx %s: uknown legacy decrypted packet length %zu > %u\n", - pcb->ipsec_ifp->if_xname, length, rx_pp->pp_buflet_size); - break; } + break; + } + default: { + // Cannot fragment unknown families + mbuf_freem(data); + STATS_INC(nifs, NETIF_STATS_BADLEN); + STATS_INC(nifs, NETIF_STATS_DROPPED); + printf("ipsec_netif_sync_rx %s: uknown legacy decrypted packet length %zu > %u\n", + pcb->ipsec_ifp->if_xname, length, rx_pp->pp_buflet_size); + break; + } } if (fragment_chain != NULL) { @@ -1322,7 +1323,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Copy-in data from mbuf to buflet mbuf_copydata(data, 0, length, (void *)rx_baddr); - kern_packet_clear_flow_uuid(rx_ph); // Zero flow id + kern_packet_clear_flow_uuid(rx_ph); // Zero flow id // Finalize and attach the packet error = kern_buflet_set_data_offset(rx_buf, 0); @@ -1418,7 +1419,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, tx_baddr += kern_buflet_get_data_offset(tx_buf); length = MIN(kern_packet_get_data_length(tx_ph), - pcb->ipsec_slot_size); + pcb->ipsec_slot_size); // Increment TX stats tx_ring_stats.kcrsi_slots_transferred++; @@ -1432,57 +1433,57 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, struct ip *ip = mtod(data, struct ip *); u_int ip_version = ip->ip_v; switch (ip_version) { - case 4: { - af = AF_INET; - ip->ip_len = ntohs(ip->ip_len) - sizeof(struct ip); - ip->ip_off = ntohs(ip->ip_off); - - if (length < ip->ip_len) { - printf("ipsec_netif_sync_rx %s: IPv4 packet length too short (%zu < %u)\n", - pcb->ipsec_ifp->if_xname, length, ip->ip_len); - STATS_INC(nifs, NETIF_STATS_BADLEN); - STATS_INC(nifs, NETIF_STATS_DROPPED); - mbuf_freem(data); - data = NULL; - } else { - data = esp4_input_extended(data, sizeof(struct ip), pcb->ipsec_ifp); - } - break; + case 4: { + af = AF_INET; + ip->ip_len = ntohs(ip->ip_len) - sizeof(struct ip); + ip->ip_off = ntohs(ip->ip_off); + + if (length < ip->ip_len) { + printf("ipsec_netif_sync_rx %s: IPv4 packet length too short (%zu < %u)\n", + pcb->ipsec_ifp->if_xname, length, ip->ip_len); + STATS_INC(nifs, NETIF_STATS_BADLEN); + STATS_INC(nifs, NETIF_STATS_DROPPED); + mbuf_freem(data); + data = NULL; + } else { + data = esp4_input_extended(data, sizeof(struct ip), pcb->ipsec_ifp); } - case 6: { - if (length < sizeof(struct ip6_hdr)) { - printf("ipsec_netif_sync_rx %s: IPv6 packet length too short for header %zu\n", - pcb->ipsec_ifp->if_xname, length); + break; + } + case 6: { + if (length < sizeof(struct ip6_hdr)) { + printf("ipsec_netif_sync_rx %s: IPv6 packet length too short for header %zu\n", + pcb->ipsec_ifp->if_xname, length); + STATS_INC(nifs, NETIF_STATS_BADLEN); + STATS_INC(nifs, NETIF_STATS_DROPPED); + mbuf_freem(data); + data = NULL; + } else { + af = AF_INET6; + struct ip6_hdr *ip6 = mtod(data, struct ip6_hdr *); + const size_t ip6_len = sizeof(*ip6) + ntohs(ip6->ip6_plen); + if (length < ip6_len) { + printf("ipsec_netif_sync_rx %s: IPv6 packet length too short (%zu < %zu)\n", + pcb->ipsec_ifp->if_xname, length, ip6_len); STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); mbuf_freem(data); data = NULL; } else { - af = AF_INET6; - struct ip6_hdr *ip6 = mtod(data, struct ip6_hdr *); - const size_t ip6_len = sizeof(*ip6) + ntohs(ip6->ip6_plen); - if (length < ip6_len) { - printf("ipsec_netif_sync_rx %s: IPv6 packet length too short (%zu < %zu)\n", - pcb->ipsec_ifp->if_xname, length, ip6_len); - STATS_INC(nifs, NETIF_STATS_BADLEN); - STATS_INC(nifs, NETIF_STATS_DROPPED); - mbuf_freem(data); - data = NULL; - } else { - int offset = sizeof(struct ip6_hdr); - esp6_input_extended(&data, &offset, ip6->ip6_nxt, pcb->ipsec_ifp); - } + int offset = sizeof(struct ip6_hdr); + esp6_input_extended(&data, &offset, ip6->ip6_nxt, pcb->ipsec_ifp); } - break; - } - default: { - printf("ipsec_netif_sync_rx %s: unknown ip version %u\n", - pcb->ipsec_ifp->if_xname, ip_version); - STATS_INC(nifs, NETIF_STATS_DROPPED); - mbuf_freem(data); - data = NULL; - break; } + break; + } + default: { + printf("ipsec_netif_sync_rx %s: unknown ip version %u\n", + pcb->ipsec_ifp->if_xname, ip_version); + STATS_INC(nifs, NETIF_STATS_DROPPED); + mbuf_freem(data); + data = NULL; + break; + } } } else { printf("ipsec_netif_sync_rx %s - mbuf_copyback(%zu) error %d\n", pcb->ipsec_ifp->if_xname, length, error); @@ -1516,7 +1517,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); printf("ipsec_netif_sync_rx %s: decrypted packet length %zu > %u\n", - pcb->ipsec_ifp->if_xname, length, rx_pp->pp_buflet_size); + pcb->ipsec_ifp->if_xname, length, rx_pp->pp_buflet_size); continue; } @@ -1530,7 +1531,7 @@ ipsec_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Copy-in data from mbuf to buflet mbuf_copydata(data, 0, length, (void *)rx_baddr); - kern_packet_clear_flow_uuid(rx_ph); // Zero flow id + kern_packet_clear_flow_uuid(rx_ph); // Zero flow id // Finalize and attach the packet error = kern_buflet_set_data_offset(rx_buf, 0); @@ -1589,8 +1590,8 @@ done: static errno_t ipsec_nexus_ifattach(struct ipsec_pcb *pcb, - struct ifnet_init_eparams *init_params, - struct ifnet **ifp) + struct ifnet_init_eparams *init_params, + struct ifnet **ifp) { errno_t err; nexus_controller_t controller = kern_nexus_shared_controller(); @@ -1599,7 +1600,7 @@ ipsec_nexus_ifattach(struct ipsec_pcb *pcb, nexus_name_t provider_name; snprintf((char *)provider_name, sizeof(provider_name), - "com.apple.netif.%s", pcb->ipsec_if_xname); + "com.apple.netif.%s", pcb->ipsec_if_xname); struct kern_nexus_provider_init prov_init = { .nxpi_version = KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION, @@ -1622,7 +1623,7 @@ ipsec_nexus_ifattach(struct ipsec_pcb *pcb, IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s: kern_nexus_attr_create failed: %d\n", - __func__, err); + __func__, err); goto failed; } @@ -1639,13 +1640,13 @@ ipsec_nexus_ifattach(struct ipsec_pcb *pcb, pcb->ipsec_netif_txring_size = ring_size; - bzero(&pp_init, sizeof (pp_init)); + bzero(&pp_init, sizeof(pp_init)); pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION; pp_init.kbi_packets = pcb->ipsec_netif_ring_size * 2; pp_init.kbi_bufsize = pcb->ipsec_slot_size; pp_init.kbi_buf_seg_size = IPSEC_IF_DEFAULT_BUF_SEG_SIZE; pp_init.kbi_max_frags = 1; - (void) snprintf((char *)pp_init.kbi_name, sizeof (pp_init.kbi_name), + (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name), "%s", provider_name); err = kern_pbufpool_create(&pp_init, &pp_init, &pcb->ipsec_netif_pp, NULL); @@ -1655,16 +1656,16 @@ ipsec_nexus_ifattach(struct ipsec_pcb *pcb, } err = kern_nexus_controller_register_provider(controller, - ipsec_nx_dom_prov, - provider_name, - &prov_init, - sizeof(prov_init), - nxa, - &pcb->ipsec_nx.if_provider); + ipsec_nx_dom_prov, + provider_name, + &prov_init, + sizeof(prov_init), + nxa, + &pcb->ipsec_nx.if_provider); IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s register provider failed, error %d\n", - __func__, err); + __func__, err); goto failed; } @@ -1676,17 +1677,17 @@ ipsec_nexus_ifattach(struct ipsec_pcb *pcb, net_init.nxneti_prepare = ipsec_netif_prepare; net_init.nxneti_tx_pbufpool = pcb->ipsec_netif_pp; err = kern_nexus_controller_alloc_net_provider_instance(controller, - pcb->ipsec_nx.if_provider, - pcb, - &pcb->ipsec_nx.if_instance, - &net_init, - ifp); + pcb->ipsec_nx.if_provider, + pcb, + &pcb->ipsec_nx.if_instance, + &net_init, + ifp); IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s alloc_net_provider_instance failed, %d\n", - __func__, err); + __func__, err); kern_nexus_controller_deregister_provider(controller, - pcb->ipsec_nx.if_provider); + pcb->ipsec_nx.if_provider); uuid_clear(pcb->ipsec_nx.if_provider); goto failed; } @@ -1699,27 +1700,27 @@ failed: kern_pbufpool_destroy(pcb->ipsec_netif_pp); pcb->ipsec_netif_pp = NULL; } - return (err); + return err; } static void ipsec_detach_provider_and_instance(uuid_t provider, uuid_t instance) { nexus_controller_t controller = kern_nexus_shared_controller(); - errno_t err; + errno_t err; if (!uuid_is_null(instance)) { err = kern_nexus_controller_free_provider_instance(controller, - instance); + instance); if (err != 0) { printf("%s free_provider_instance failed %d\n", - __func__, err); + __func__, err); } uuid_clear(instance); } if (!uuid_is_null(provider)) { err = kern_nexus_controller_deregister_provider(controller, - provider); + provider); if (err != 0) { printf("%s deregister_provider %d\n", __func__, err); } @@ -1733,60 +1734,59 @@ ipsec_nexus_detach(struct ipsec_pcb *pcb) { ipsec_nx_t nx = &pcb->ipsec_nx; nexus_controller_t controller = kern_nexus_shared_controller(); - errno_t err; + errno_t err; if (!uuid_is_null(nx->ms_host)) { err = kern_nexus_ifdetach(controller, - nx->ms_instance, - nx->ms_host); + nx->ms_instance, + nx->ms_host); if (err != 0) { printf("%s: kern_nexus_ifdetach ms host failed %d\n", - __func__, err); + __func__, err); } } if (!uuid_is_null(nx->ms_device)) { err = kern_nexus_ifdetach(controller, - nx->ms_instance, - nx->ms_device); + nx->ms_instance, + nx->ms_device); if (err != 0) { printf("%s: kern_nexus_ifdetach ms device failed %d\n", - __func__, err); + __func__, err); } } ipsec_detach_provider_and_instance(nx->if_provider, - nx->if_instance); + nx->if_instance); ipsec_detach_provider_and_instance(nx->ms_provider, - nx->ms_instance); + nx->ms_instance); if (pcb->ipsec_netif_pp != NULL) { kern_pbufpool_destroy(pcb->ipsec_netif_pp); pcb->ipsec_netif_pp = NULL; - } memset(nx, 0, sizeof(*nx)); } static errno_t ipsec_create_fs_provider_and_instance(struct ipsec_pcb *pcb, - uint32_t subtype, const char *type_name, - const char *ifname, - uuid_t *provider, uuid_t *instance) + uint32_t subtype, const char *type_name, + const char *ifname, + uuid_t *provider, uuid_t *instance) { nexus_attr_t attr = NULL; nexus_controller_t controller = kern_nexus_shared_controller(); uuid_t dom_prov; errno_t err; struct kern_nexus_init init; - nexus_name_t provider_name; + nexus_name_t provider_name; err = kern_nexus_get_builtin_domain_provider(NEXUS_TYPE_FLOW_SWITCH, - &dom_prov); + &dom_prov); IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s can't get %s provider, error %d\n", - __func__, type_name, err); + __func__, type_name, err); goto failed; } @@ -1794,7 +1794,7 @@ ipsec_create_fs_provider_and_instance(struct ipsec_pcb *pcb, IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s: kern_nexus_attr_create failed: %d\n", - __func__, err); + __func__, err); goto failed; } @@ -1814,38 +1814,38 @@ ipsec_create_fs_provider_and_instance(struct ipsec_pcb *pcb, VERIFY(err == 0); snprintf((char *)provider_name, sizeof(provider_name), - "com.apple.%s.%s", type_name, ifname); + "com.apple.%s.%s", type_name, ifname); err = kern_nexus_controller_register_provider(controller, - dom_prov, - provider_name, - NULL, - 0, - attr, - provider); + dom_prov, + provider_name, + NULL, + 0, + attr, + provider); kern_nexus_attr_destroy(attr); attr = NULL; IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s register %s provider failed, error %d\n", - __func__, type_name, err); + __func__, type_name, err); goto failed; } - bzero(&init, sizeof (init)); + bzero(&init, sizeof(init)); init.nxi_version = KERN_NEXUS_CURRENT_VERSION; err = kern_nexus_controller_alloc_provider_instance(controller, - *provider, - NULL, - instance, &init); + *provider, + NULL, + instance, &init); IPSEC_IF_VERIFY(err == 0); if (err != 0) { printf("%s alloc_provider_instance %s failed, %d\n", - __func__, type_name, err); + __func__, type_name, err); kern_nexus_controller_deregister_provider(controller, - *provider); + *provider); uuid_clear(*provider); } failed: - return (err); + return err; } static errno_t @@ -1857,21 +1857,21 @@ ipsec_multistack_attach(struct ipsec_pcb *pcb) // Allocate multistack flowswitch err = ipsec_create_fs_provider_and_instance(pcb, - NEXUS_EXTENSION_FSW_TYPE_MULTISTACK, - "multistack", - pcb->ipsec_ifp->if_xname, - &nx->ms_provider, - &nx->ms_instance); + NEXUS_EXTENSION_FSW_TYPE_MULTISTACK, + "multistack", + pcb->ipsec_ifp->if_xname, + &nx->ms_provider, + &nx->ms_instance); if (err != 0) { printf("%s: failed to create bridge provider and instance\n", - __func__); + __func__); goto failed; } // Attach multistack to device port err = kern_nexus_ifattach(controller, nx->ms_instance, - NULL, nx->if_instance, - FALSE, &nx->ms_device); + NULL, nx->if_instance, + FALSE, &nx->ms_device); if (err != 0) { printf("%s kern_nexus_ifattach ms device %d\n", __func__, err); goto failed; @@ -1879,8 +1879,8 @@ ipsec_multistack_attach(struct ipsec_pcb *pcb) // Attach multistack to host port err = kern_nexus_ifattach(controller, nx->ms_instance, - NULL, nx->if_instance, - TRUE, &nx->ms_host); + NULL, nx->if_instance, + TRUE, &nx->ms_host); if (err != 0) { printf("%s kern_nexus_ifattach ms host %d\n", __func__, err); goto failed; @@ -1907,7 +1907,7 @@ ipsec_multistack_attach(struct ipsec_pcb *pcb) printf("ipsec_multistack_attach - unable to find multistack nexus\n"); } - return (0); + return 0; failed: ipsec_nexus_detach(pcb); @@ -1918,7 +1918,7 @@ failed: /* NOT REACHED */ } - return (err); + return err; } #pragma mark Kernel Pipe Nexus @@ -1938,16 +1938,16 @@ ipsec_register_kernel_pipe_nexus(void) result = kern_nexus_controller_create(&ipsec_ncd); if (result) { printf("%s: kern_nexus_controller_create failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } uuid_t dom_prov; result = kern_nexus_get_builtin_domain_provider( - NEXUS_TYPE_KERNEL_PIPE, &dom_prov); + NEXUS_TYPE_KERNEL_PIPE, &dom_prov); if (result) { printf("%s: kern_nexus_get_builtin_domain_provider failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -1970,7 +1970,7 @@ ipsec_register_kernel_pipe_nexus(void) result = kern_nexus_attr_create(&nxa); if (result) { printf("%s: kern_nexus_attr_create failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -1986,15 +1986,15 @@ ipsec_register_kernel_pipe_nexus(void) VERIFY(result == 0); result = kern_nexus_controller_register_provider(ipsec_ncd, - dom_prov, - (const uint8_t *)"com.apple.nexus.ipsec.kpipe", - &prov_init, - sizeof(prov_init), - nxa, - &ipsec_kpipe_uuid); + dom_prov, + (const uint8_t *)"com.apple.nexus.ipsec.kpipe", + &prov_init, + sizeof(prov_init), + nxa, + &ipsec_kpipe_uuid); if (result) { printf("%s: kern_nexus_controller_register_provider failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -2101,14 +2101,14 @@ ipsec_enable_channel(struct ipsec_pcb *pcb, struct proc *proc) goto done; } - bzero(&pp_init, sizeof (pp_init)); + bzero(&pp_init, sizeof(pp_init)); pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION; pp_init.kbi_packets = pcb->ipsec_netif_ring_size * 2; pp_init.kbi_bufsize = pcb->ipsec_slot_size; pp_init.kbi_buf_seg_size = IPSEC_IF_DEFAULT_BUF_SEG_SIZE; pp_init.kbi_max_frags = 1; pp_init.kbi_flags |= KBIF_QUANTUM; - (void) snprintf((char *)pp_init.kbi_name, sizeof (pp_init.kbi_name), + (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name), "com.apple.kpipe.%s", pcb->ipsec_if_xname); result = kern_pbufpool_create(&pp_init, &pp_init, &pcb->ipsec_kpipe_pp, @@ -2119,22 +2119,22 @@ ipsec_enable_channel(struct ipsec_pcb *pcb, struct proc *proc) } VERIFY(uuid_is_null(pcb->ipsec_kpipe_uuid)); - bzero(&init, sizeof (init)); + bzero(&init, sizeof(init)); init.nxi_version = KERN_NEXUS_CURRENT_VERSION; init.nxi_tx_pbufpool = pcb->ipsec_kpipe_pp; result = kern_nexus_controller_alloc_provider_instance(ipsec_ncd, - ipsec_kpipe_uuid, pcb, &pcb->ipsec_kpipe_uuid, &init); + ipsec_kpipe_uuid, pcb, &pcb->ipsec_kpipe_uuid, &init); if (result) { goto done; } nexus_port_t port = NEXUS_PORT_KERNEL_PIPE_CLIENT; result = kern_nexus_controller_bind_provider_instance(ipsec_ncd, - pcb->ipsec_kpipe_uuid, &port, - proc_pid(proc), NULL, NULL, 0, NEXUS_BIND_PID); + pcb->ipsec_kpipe_uuid, &port, + proc_pid(proc), NULL, NULL, 0, NEXUS_BIND_PID); if (result) { kern_nexus_controller_free_provider_instance(ipsec_ncd, - pcb->ipsec_kpipe_uuid); + pcb->ipsec_kpipe_uuid); uuid_clear(pcb->ipsec_kpipe_uuid); goto done; } @@ -2143,7 +2143,7 @@ ipsec_enable_channel(struct ipsec_pcb *pcb, struct proc *proc) done: lck_rw_unlock_exclusive(&pcb->ipsec_pcb_lock); - + if (result) { if (pcb->ipsec_kpipe_pp != NULL) { kern_pbufpool_destroy(pcb->ipsec_kpipe_pp); @@ -2151,7 +2151,7 @@ done: } ipsec_unregister_kernel_pipe_nexus(); } - + return result; } @@ -2178,8 +2178,8 @@ ipsec_free_pcb(struct ipsec_pcb *pcb, bool in_list) static errno_t ipsec_ctl_bind(kern_ctl_ref kctlref, - struct sockaddr_ctl *sac, - void **unitinfo) + struct sockaddr_ctl *sac, + void **unitinfo) { struct ipsec_pcb *pcb = zalloc(ipsec_pcb_zone); memset(pcb, 0, sizeof(*pcb)); @@ -2203,13 +2203,13 @@ ipsec_ctl_bind(kern_ctl_ref kctlref, lck_mtx_init(&pcb->ipsec_input_chain_lock, ipsec_lck_grp, ipsec_lck_attr); #endif // IPSEC_NEXUS - return (0); + return 0; } static errno_t ipsec_ctl_connect(kern_ctl_ref kctlref, - struct sockaddr_ctl *sac, - void **unitinfo) + struct sockaddr_ctl *sac, + void **unitinfo) { struct ifnet_init_eparams ipsec_init = {}; errno_t result = 0; @@ -2264,7 +2264,7 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, /* Create the interface */ bzero(&ipsec_init, sizeof(ipsec_init)); ipsec_init.ver = IFNET_INIT_CURRENT_VERSION; - ipsec_init.len = sizeof (ipsec_init); + ipsec_init.len = sizeof(ipsec_init); #if IPSEC_NEXUS if (pcb->ipsec_use_netif) { @@ -2327,7 +2327,7 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, ifnet_release(pcb->ipsec_ifp); ipsec_free_pcb(pcb, true); *unitinfo = NULL; - return (result); + return result; } /* Attach to bpf */ @@ -2337,124 +2337,120 @@ ipsec_ctl_connect(kern_ctl_ref kctlref, /* The interfaces resoures allocated, mark it as running */ ifnet_set_flags(pcb->ipsec_ifp, IFF_RUNNING, IFF_RUNNING); - return (0); + return 0; } static errno_t -ipsec_detach_ip(ifnet_t interface, - protocol_family_t protocol, - socket_t pf_socket) +ipsec_detach_ip(ifnet_t interface, + protocol_family_t protocol, + socket_t pf_socket) { errno_t result = EPROTONOSUPPORT; - + /* Attempt a detach */ if (protocol == PF_INET) { - struct ifreq ifr; - + struct ifreq ifr; + bzero(&ifr, sizeof(ifr)); snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); - + ifnet_name(interface), ifnet_unit(interface)); + result = sock_ioctl(pf_socket, SIOCPROTODETACH, &ifr); - } - else if (protocol == PF_INET6) { - struct in6_ifreq ifr6; - + } else if (protocol == PF_INET6) { + struct in6_ifreq ifr6; + bzero(&ifr6, sizeof(ifr6)); snprintf(ifr6.ifr_name, sizeof(ifr6.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); - + ifnet_name(interface), ifnet_unit(interface)); + result = sock_ioctl(pf_socket, SIOCPROTODETACH_IN6, &ifr6); } - + return result; } static void -ipsec_remove_address(ifnet_t interface, - protocol_family_t protocol, - ifaddr_t address, - socket_t pf_socket) +ipsec_remove_address(ifnet_t interface, + protocol_family_t protocol, + ifaddr_t address, + socket_t pf_socket) { errno_t result = 0; - + /* Attempt a detach */ if (protocol == PF_INET) { - struct ifreq ifr; - + struct ifreq ifr; + bzero(&ifr, sizeof(ifr)); snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); + ifnet_name(interface), ifnet_unit(interface)); result = ifaddr_address(address, &ifr.ifr_addr, sizeof(ifr.ifr_addr)); if (result != 0) { printf("ipsec_remove_address - ifaddr_address failed: %d", result); - } - else { + } else { result = sock_ioctl(pf_socket, SIOCDIFADDR, &ifr); if (result != 0) { printf("ipsec_remove_address - SIOCDIFADDR failed: %d", result); } } - } - else if (protocol == PF_INET6) { - struct in6_ifreq ifr6; - + } else if (protocol == PF_INET6) { + struct in6_ifreq ifr6; + bzero(&ifr6, sizeof(ifr6)); snprintf(ifr6.ifr_name, sizeof(ifr6.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); + ifnet_name(interface), ifnet_unit(interface)); result = ifaddr_address(address, (struct sockaddr*)&ifr6.ifr_addr, - sizeof(ifr6.ifr_addr)); + sizeof(ifr6.ifr_addr)); if (result != 0) { printf("ipsec_remove_address - ifaddr_address failed (v6): %d", - result); - } - else { + result); + } else { result = sock_ioctl(pf_socket, SIOCDIFADDR_IN6, &ifr6); if (result != 0) { printf("ipsec_remove_address - SIOCDIFADDR_IN6 failed: %d", - result); + result); } } } } static void -ipsec_cleanup_family(ifnet_t interface, - protocol_family_t protocol) +ipsec_cleanup_family(ifnet_t interface, + protocol_family_t protocol) { - errno_t result = 0; - socket_t pf_socket = NULL; - ifaddr_t *addresses = NULL; - int i; - + errno_t result = 0; + socket_t pf_socket = NULL; + ifaddr_t *addresses = NULL; + int i; + if (protocol != PF_INET && protocol != PF_INET6) { printf("ipsec_cleanup_family - invalid protocol family %d\n", protocol); return; } - + /* Create a socket for removing addresses and detaching the protocol */ result = sock_socket(protocol, SOCK_DGRAM, 0, NULL, NULL, &pf_socket); if (result != 0) { - if (result != EAFNOSUPPORT) + if (result != EAFNOSUPPORT) { printf("ipsec_cleanup_family - failed to create %s socket: %d\n", - protocol == PF_INET ? "IP" : "IPv6", result); + protocol == PF_INET ? "IP" : "IPv6", result); + } goto cleanup; } - + /* always set SS_PRIV, we want to close and detach regardless */ sock_setpriv(pf_socket, 1); - + result = ipsec_detach_ip(interface, protocol, pf_socket); if (result == 0 || result == ENXIO) { /* We are done! We either detached or weren't attached. */ goto cleanup; - } - else if (result != EBUSY) { + } else if (result != EBUSY) { /* Uh, not really sure what happened here... */ printf("ipsec_cleanup_family - ipsec_detach_ip failed: %d\n", result); goto cleanup; } - + /* * At this point, we received an EBUSY error. This means there are * addresses attached. We should detach them and then try again. @@ -2462,17 +2458,17 @@ ipsec_cleanup_family(ifnet_t interface, result = ifnet_get_address_list_family(interface, &addresses, protocol); if (result != 0) { printf("fnet_get_address_list_family(%s%d, 0xblah, %s) - failed: %d\n", - ifnet_name(interface), ifnet_unit(interface), - protocol == PF_INET ? "PF_INET" : "PF_INET6", result); + ifnet_name(interface), ifnet_unit(interface), + protocol == PF_INET ? "PF_INET" : "PF_INET6", result); goto cleanup; } - + for (i = 0; addresses[i] != 0; i++) { ipsec_remove_address(interface, protocol, addresses[i], pf_socket); } ifnet_free_address_list(addresses); addresses = NULL; - + /* * The addresses should be gone, we should try the remove again. */ @@ -2480,19 +2476,21 @@ ipsec_cleanup_family(ifnet_t interface, if (result != 0 && result != ENXIO) { printf("ipsec_cleanup_family - ipsec_detach_ip failed: %d\n", result); } - + cleanup: - if (pf_socket != NULL) + if (pf_socket != NULL) { sock_close(pf_socket); - - if (addresses != NULL) + } + + if (addresses != NULL) { ifnet_free_address_list(addresses); + } } static errno_t -ipsec_ctl_disconnect(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo) +ipsec_ctl_disconnect(__unused kern_ctl_ref kctlref, + __unused u_int32_t unit, + void *unitinfo) { struct ipsec_pcb *pcb = unitinfo; ifnet_t ifp = NULL; @@ -2604,759 +2602,765 @@ ipsec_ctl_disconnect(__unused kern_ctl_ref kctlref, lck_rw_unlock_exclusive(&pcb->ipsec_pcb_lock); ipsec_free_pcb(pcb, false); } - + return 0; } static errno_t -ipsec_ctl_send(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - __unused void *unitinfo, - mbuf_t m, - __unused int flags) +ipsec_ctl_send(__unused kern_ctl_ref kctlref, + __unused u_int32_t unit, + __unused void *unitinfo, + mbuf_t m, + __unused int flags) { - /* Receive messages from the control socket. Currently unused. */ - mbuf_freem(m); + /* Receive messages from the control socket. Currently unused. */ + mbuf_freem(m); return 0; } static errno_t -ipsec_ctl_setopt(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo, - int opt, - void *data, - size_t len) +ipsec_ctl_setopt(__unused kern_ctl_ref kctlref, + __unused u_int32_t unit, + void *unitinfo, + int opt, + void *data, + size_t len) { - struct ipsec_pcb *pcb = unitinfo; - errno_t result = 0; - + struct ipsec_pcb *pcb = unitinfo; + errno_t result = 0; + /* check for privileges for privileged options */ switch (opt) { - case IPSEC_OPT_FLAGS: - case IPSEC_OPT_EXT_IFDATA_STATS: - case IPSEC_OPT_SET_DELEGATE_INTERFACE: - case IPSEC_OPT_OUTPUT_TRAFFIC_CLASS: - if (kauth_cred_issuser(kauth_cred_get()) == 0) { - return EPERM; - } - break; + case IPSEC_OPT_FLAGS: + case IPSEC_OPT_EXT_IFDATA_STATS: + case IPSEC_OPT_SET_DELEGATE_INTERFACE: + case IPSEC_OPT_OUTPUT_TRAFFIC_CLASS: + if (kauth_cred_issuser(kauth_cred_get()) == 0) { + return EPERM; + } + break; } - + switch (opt) { - case IPSEC_OPT_FLAGS: - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - pcb->ipsec_flags = *(u_int32_t *)data; - } + case IPSEC_OPT_FLAGS: + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + pcb->ipsec_flags = *(u_int32_t *)data; + } + break; + + case IPSEC_OPT_EXT_IFDATA_STATS: + if (len != sizeof(int)) { + result = EMSGSIZE; break; - - case IPSEC_OPT_EXT_IFDATA_STATS: - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - pcb->ipsec_ext_ifdata_stats = (*(int *)data) ? 1 : 0; + } + if (pcb->ipsec_ifp == NULL) { + // Only can set after connecting + result = EINVAL; break; - - case IPSEC_OPT_INC_IFDATA_STATS_IN: - case IPSEC_OPT_INC_IFDATA_STATS_OUT: { - struct ipsec_stats_param *utsp = (struct ipsec_stats_param *)data; - - if (utsp == NULL || len < sizeof(struct ipsec_stats_param)) { - result = EINVAL; - break; - } - if (pcb->ipsec_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (!pcb->ipsec_ext_ifdata_stats) { - result = EINVAL; - break; - } - if (opt == IPSEC_OPT_INC_IFDATA_STATS_IN) - ifnet_stat_increment_in(pcb->ipsec_ifp, utsp->utsp_packets, - utsp->utsp_bytes, utsp->utsp_errors); - else - ifnet_stat_increment_out(pcb->ipsec_ifp, utsp->utsp_packets, - utsp->utsp_bytes, utsp->utsp_errors); + } + pcb->ipsec_ext_ifdata_stats = (*(int *)data) ? 1 : 0; + break; + + case IPSEC_OPT_INC_IFDATA_STATS_IN: + case IPSEC_OPT_INC_IFDATA_STATS_OUT: { + struct ipsec_stats_param *utsp = (struct ipsec_stats_param *)data; + + if (utsp == NULL || len < sizeof(struct ipsec_stats_param)) { + result = EINVAL; break; } - - case IPSEC_OPT_SET_DELEGATE_INTERFACE: { - ifnet_t del_ifp = NULL; - char name[IFNAMSIZ]; - - if (len > IFNAMSIZ - 1) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (len != 0) { /* if len==0, del_ifp will be NULL causing the delegate to be removed */ - bcopy(data, name, len); - name[len] = 0; - result = ifnet_find_by_name(name, &del_ifp); - } - if (result == 0) { - printf("%s IPSEC_OPT_SET_DELEGATE_INTERFACE %s to %s\n", - __func__, pcb->ipsec_ifp->if_xname, - del_ifp ? del_ifp->if_xname : "NULL"); - - result = ifnet_set_delegate(pcb->ipsec_ifp, del_ifp); - if (del_ifp) - ifnet_release(del_ifp); - } + if (pcb->ipsec_ifp == NULL) { + // Only can set after connecting + result = EINVAL; break; } - - case IPSEC_OPT_OUTPUT_TRAFFIC_CLASS: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - mbuf_svc_class_t output_service_class = so_tc2msc(*(int *)data); - if (output_service_class == MBUF_SC_UNSPEC) { - pcb->ipsec_output_service_class = MBUF_SC_OAM; - } else { - pcb->ipsec_output_service_class = output_service_class; - } - printf("%s IPSEC_OPT_OUTPUT_TRAFFIC_CLASS %s svc %d\n", - __func__, pcb->ipsec_ifp->if_xname, - pcb->ipsec_output_service_class); + if (!pcb->ipsec_ext_ifdata_stats) { + result = EINVAL; break; } + if (opt == IPSEC_OPT_INC_IFDATA_STATS_IN) { + ifnet_stat_increment_in(pcb->ipsec_ifp, utsp->utsp_packets, + utsp->utsp_bytes, utsp->utsp_errors); + } else { + ifnet_stat_increment_out(pcb->ipsec_ifp, utsp->utsp_packets, + utsp->utsp_bytes, utsp->utsp_errors); + } + break; + } -#if IPSEC_NEXUS - case IPSEC_OPT_ENABLE_CHANNEL: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (*(int *)data) { - result = ipsec_enable_channel(pcb, current_proc()); - } else { - result = ipsec_disable_channel(pcb); - } + case IPSEC_OPT_SET_DELEGATE_INTERFACE: { + ifnet_t del_ifp = NULL; + char name[IFNAMSIZ]; + + if (len > IFNAMSIZ - 1) { + result = EMSGSIZE; break; } + if (pcb->ipsec_ifp == NULL) { + // Only can set after connecting + result = EINVAL; + break; + } + if (len != 0) { /* if len==0, del_ifp will be NULL causing the delegate to be removed */ + bcopy(data, name, len); + name[len] = 0; + result = ifnet_find_by_name(name, &del_ifp); + } + if (result == 0) { + printf("%s IPSEC_OPT_SET_DELEGATE_INTERFACE %s to %s\n", + __func__, pcb->ipsec_ifp->if_xname, + del_ifp ? del_ifp->if_xname : "NULL"); - case IPSEC_OPT_ENABLE_FLOWSWITCH: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (!if_is_netagent_enabled()) { - result = ENOTSUP; - break; - } - if (uuid_is_null(pcb->ipsec_nx.ms_agent)) { - result = ENOENT; - break; + result = ifnet_set_delegate(pcb->ipsec_ifp, del_ifp); + if (del_ifp) { + ifnet_release(del_ifp); } + } + break; + } - if (*(int *)data) { - if_add_netagent(pcb->ipsec_ifp, pcb->ipsec_nx.ms_agent); - pcb->ipsec_needs_netagent = true; - } else { - pcb->ipsec_needs_netagent = false; - if_delete_netagent(pcb->ipsec_ifp, pcb->ipsec_nx.ms_agent); - } + case IPSEC_OPT_OUTPUT_TRAFFIC_CLASS: { + if (len != sizeof(int)) { + result = EMSGSIZE; + break; + } + if (pcb->ipsec_ifp == NULL) { + // Only can set after connecting + result = EINVAL; break; } + mbuf_svc_class_t output_service_class = so_tc2msc(*(int *)data); + if (output_service_class == MBUF_SC_UNSPEC) { + pcb->ipsec_output_service_class = MBUF_SC_OAM; + } else { + pcb->ipsec_output_service_class = output_service_class; + } + printf("%s IPSEC_OPT_OUTPUT_TRAFFIC_CLASS %s svc %d\n", + __func__, pcb->ipsec_ifp->if_xname, + pcb->ipsec_output_service_class); + break; + } - case IPSEC_OPT_INPUT_FRAG_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - u_int32_t input_frag_size = *(u_int32_t *)data; - if (input_frag_size <= sizeof(struct ip6_hdr)) { - pcb->ipsec_frag_size_set = FALSE; - pcb->ipsec_input_frag_size = 0; - } else { - printf("SET FRAG SIZE TO %u\n", input_frag_size); - pcb->ipsec_frag_size_set = TRUE; - pcb->ipsec_input_frag_size = input_frag_size; - } +#if IPSEC_NEXUS + case IPSEC_OPT_ENABLE_CHANNEL: { + if (len != sizeof(int)) { + result = EMSGSIZE; break; } - case IPSEC_OPT_ENABLE_NETIF: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - lck_rw_lock_exclusive(&pcb->ipsec_pcb_lock); - pcb->ipsec_use_netif = !!(*(int *)data); - lck_rw_unlock_exclusive(&pcb->ipsec_pcb_lock); + if (pcb->ipsec_ifp == NULL) { + // Only can set after connecting + result = EINVAL; break; } - case IPSEC_OPT_SLOT_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t slot_size = *(u_int32_t *)data; - if (slot_size < IPSEC_IF_MIN_SLOT_SIZE || - slot_size > IPSEC_IF_MAX_SLOT_SIZE) { - return (EINVAL); - } - pcb->ipsec_slot_size = slot_size; + if (*(int *)data) { + result = ipsec_enable_channel(pcb, current_proc()); + } else { + result = ipsec_disable_channel(pcb); + } + break; + } + + case IPSEC_OPT_ENABLE_FLOWSWITCH: { + if (len != sizeof(int)) { + result = EMSGSIZE; break; } - case IPSEC_OPT_NETIF_RING_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t ring_size = *(u_int32_t *)data; - if (ring_size < IPSEC_IF_MIN_RING_SIZE || - ring_size > IPSEC_IF_MAX_RING_SIZE) { - return (EINVAL); - } - pcb->ipsec_netif_ring_size = ring_size; + if (pcb->ipsec_ifp == NULL) { + // Only can set after connecting + result = EINVAL; break; } - case IPSEC_OPT_TX_FSW_RING_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t ring_size = *(u_int32_t *)data; - if (ring_size < IPSEC_IF_MIN_RING_SIZE || - ring_size > IPSEC_IF_MAX_RING_SIZE) { - return (EINVAL); - } - pcb->ipsec_tx_fsw_ring_size = ring_size; + if (!if_is_netagent_enabled()) { + result = ENOTSUP; break; } - case IPSEC_OPT_RX_FSW_RING_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->ipsec_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t ring_size = *(u_int32_t *)data; - if (ring_size < IPSEC_IF_MIN_RING_SIZE || - ring_size > IPSEC_IF_MAX_RING_SIZE) { - return (EINVAL); - } - pcb->ipsec_rx_fsw_ring_size = ring_size; + if (uuid_is_null(pcb->ipsec_nx.ms_agent)) { + result = ENOENT; break; } -#endif // IPSEC_NEXUS - - default: - result = ENOPROTOOPT; + if (*(int *)data) { + if_add_netagent(pcb->ipsec_ifp, pcb->ipsec_nx.ms_agent); + pcb->ipsec_needs_netagent = true; + } else { + pcb->ipsec_needs_netagent = false; + if_delete_netagent(pcb->ipsec_ifp, pcb->ipsec_nx.ms_agent); + } + break; + } + + case IPSEC_OPT_INPUT_FRAG_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + u_int32_t input_frag_size = *(u_int32_t *)data; + if (input_frag_size <= sizeof(struct ip6_hdr)) { + pcb->ipsec_frag_size_set = FALSE; + pcb->ipsec_input_frag_size = 0; + } else { + printf("SET FRAG SIZE TO %u\n", input_frag_size); + pcb->ipsec_frag_size_set = TRUE; + pcb->ipsec_input_frag_size = input_frag_size; + } + break; + } + case IPSEC_OPT_ENABLE_NETIF: { + if (len != sizeof(int)) { + result = EMSGSIZE; + break; + } + if (pcb->ipsec_ifp != NULL) { + // Only can set before connecting + result = EINVAL; + break; + } + lck_rw_lock_exclusive(&pcb->ipsec_pcb_lock); + pcb->ipsec_use_netif = !!(*(int *)data); + lck_rw_unlock_exclusive(&pcb->ipsec_pcb_lock); + break; + } + case IPSEC_OPT_SLOT_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + if (pcb->ipsec_ifp != NULL) { + // Only can set before connecting + result = EINVAL; break; + } + u_int32_t slot_size = *(u_int32_t *)data; + if (slot_size < IPSEC_IF_MIN_SLOT_SIZE || + slot_size > IPSEC_IF_MAX_SLOT_SIZE) { + return EINVAL; + } + pcb->ipsec_slot_size = slot_size; + break; } - + case IPSEC_OPT_NETIF_RING_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + if (pcb->ipsec_ifp != NULL) { + // Only can set before connecting + result = EINVAL; + break; + } + u_int32_t ring_size = *(u_int32_t *)data; + if (ring_size < IPSEC_IF_MIN_RING_SIZE || + ring_size > IPSEC_IF_MAX_RING_SIZE) { + return EINVAL; + } + pcb->ipsec_netif_ring_size = ring_size; + break; + } + case IPSEC_OPT_TX_FSW_RING_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + if (pcb->ipsec_ifp != NULL) { + // Only can set before connecting + result = EINVAL; + break; + } + u_int32_t ring_size = *(u_int32_t *)data; + if (ring_size < IPSEC_IF_MIN_RING_SIZE || + ring_size > IPSEC_IF_MAX_RING_SIZE) { + return EINVAL; + } + pcb->ipsec_tx_fsw_ring_size = ring_size; + break; + } + case IPSEC_OPT_RX_FSW_RING_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + if (pcb->ipsec_ifp != NULL) { + // Only can set before connecting + result = EINVAL; + break; + } + u_int32_t ring_size = *(u_int32_t *)data; + if (ring_size < IPSEC_IF_MIN_RING_SIZE || + ring_size > IPSEC_IF_MAX_RING_SIZE) { + return EINVAL; + } + pcb->ipsec_rx_fsw_ring_size = ring_size; + break; + } + +#endif // IPSEC_NEXUS + + default: + result = ENOPROTOOPT; + break; + } + return result; } static errno_t ipsec_ctl_getopt(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo, - int opt, - void *data, - size_t *len) + __unused u_int32_t unit, + void *unitinfo, + int opt, + void *data, + size_t *len) { struct ipsec_pcb *pcb = unitinfo; errno_t result = 0; - + switch (opt) { - case IPSEC_OPT_FLAGS: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->ipsec_flags; - } - break; + case IPSEC_OPT_FLAGS: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->ipsec_flags; } - - case IPSEC_OPT_EXT_IFDATA_STATS: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - *(int *)data = (pcb->ipsec_ext_ifdata_stats) ? 1 : 0; - } - break; + break; + } + + case IPSEC_OPT_EXT_IFDATA_STATS: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + *(int *)data = (pcb->ipsec_ext_ifdata_stats) ? 1 : 0; } - - case IPSEC_OPT_IFNAME: { - if (*len < MIN(strlen(pcb->ipsec_if_xname) + 1, sizeof(pcb->ipsec_if_xname))) { - result = EMSGSIZE; - } else { - if (pcb->ipsec_ifp == NULL) { - // Only can get after connecting - result = EINVAL; - break; - } - *len = snprintf(data, *len, "%s", pcb->ipsec_if_xname) + 1; + break; + } + + case IPSEC_OPT_IFNAME: { + if (*len < MIN(strlen(pcb->ipsec_if_xname) + 1, sizeof(pcb->ipsec_if_xname))) { + result = EMSGSIZE; + } else { + if (pcb->ipsec_ifp == NULL) { + // Only can get after connecting + result = EINVAL; + break; } - break; + *len = snprintf(data, *len, "%s", pcb->ipsec_if_xname) + 1; } - - case IPSEC_OPT_OUTPUT_TRAFFIC_CLASS: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - *(int *)data = so_svc2tc(pcb->ipsec_output_service_class); - } - break; + break; + } + + case IPSEC_OPT_OUTPUT_TRAFFIC_CLASS: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + *(int *)data = so_svc2tc(pcb->ipsec_output_service_class); } + break; + } #if IPSEC_NEXUS - case IPSEC_OPT_ENABLE_CHANNEL: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - lck_rw_lock_shared(&pcb->ipsec_pcb_lock); - *(int *)data = pcb->ipsec_kpipe_enabled; - lck_rw_unlock_shared(&pcb->ipsec_pcb_lock); - } - break; - } - - case IPSEC_OPT_ENABLE_FLOWSWITCH: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - *(int *)data = if_check_netagent(pcb->ipsec_ifp, pcb->ipsec_nx.ms_agent); - } - break; + case IPSEC_OPT_ENABLE_CHANNEL: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + lck_rw_lock_shared(&pcb->ipsec_pcb_lock); + *(int *)data = pcb->ipsec_kpipe_enabled; + lck_rw_unlock_shared(&pcb->ipsec_pcb_lock); } + break; + } - case IPSEC_OPT_ENABLE_NETIF: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - lck_rw_lock_shared(&pcb->ipsec_pcb_lock); - *(int *)data = !!pcb->ipsec_use_netif; - lck_rw_unlock_shared(&pcb->ipsec_pcb_lock); - } - break; + case IPSEC_OPT_ENABLE_FLOWSWITCH: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + *(int *)data = if_check_netagent(pcb->ipsec_ifp, pcb->ipsec_nx.ms_agent); } + break; + } - case IPSEC_OPT_GET_CHANNEL_UUID: { + case IPSEC_OPT_ENABLE_NETIF: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { lck_rw_lock_shared(&pcb->ipsec_pcb_lock); - if (uuid_is_null(pcb->ipsec_kpipe_uuid)) { - result = ENXIO; - } else if (*len != sizeof(uuid_t)) { - result = EMSGSIZE; - } else { - uuid_copy(data, pcb->ipsec_kpipe_uuid); - } + *(int *)data = !!pcb->ipsec_use_netif; lck_rw_unlock_shared(&pcb->ipsec_pcb_lock); - break; } + break; + } - case IPSEC_OPT_INPUT_FRAG_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->ipsec_input_frag_size; - } - break; + case IPSEC_OPT_GET_CHANNEL_UUID: { + lck_rw_lock_shared(&pcb->ipsec_pcb_lock); + if (uuid_is_null(pcb->ipsec_kpipe_uuid)) { + result = ENXIO; + } else if (*len != sizeof(uuid_t)) { + result = EMSGSIZE; + } else { + uuid_copy(data, pcb->ipsec_kpipe_uuid); } - case IPSEC_OPT_SLOT_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->ipsec_slot_size; - } - break; + lck_rw_unlock_shared(&pcb->ipsec_pcb_lock); + break; + } + + case IPSEC_OPT_INPUT_FRAG_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->ipsec_input_frag_size; } - case IPSEC_OPT_NETIF_RING_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->ipsec_netif_ring_size; - } - break; + break; + } + case IPSEC_OPT_SLOT_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->ipsec_slot_size; } - case IPSEC_OPT_TX_FSW_RING_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->ipsec_tx_fsw_ring_size; - } - break; + break; + } + case IPSEC_OPT_NETIF_RING_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->ipsec_netif_ring_size; } - case IPSEC_OPT_RX_FSW_RING_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->ipsec_rx_fsw_ring_size; - } - break; + break; + } + case IPSEC_OPT_TX_FSW_RING_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->ipsec_tx_fsw_ring_size; + } + break; + } + case IPSEC_OPT_RX_FSW_RING_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->ipsec_rx_fsw_ring_size; } + break; + } #endif // IPSEC_NEXUS - default: { - result = ENOPROTOOPT; - break; - } + default: { + result = ENOPROTOOPT; + break; + } } - + return result; } /* Network Interface functions */ static errno_t ipsec_output(ifnet_t interface, - mbuf_t data) + mbuf_t data) { struct ipsec_pcb *pcb = ifnet_softc(interface); - struct ipsec_output_state ipsec_state; - struct route ro; - struct route_in6 ro6; - int length; - struct ip *ip; - struct ip6_hdr *ip6; - struct ip_out_args ipoa; - struct ip6_out_args ip6oa; - int error = 0; - u_int ip_version = 0; - int flags = 0; - struct flowadv *adv = NULL; - + struct ipsec_output_state ipsec_state; + struct route ro; + struct route_in6 ro6; + int length; + struct ip *ip; + struct ip6_hdr *ip6; + struct ip_out_args ipoa; + struct ip6_out_args ip6oa; + int error = 0; + u_int ip_version = 0; + int flags = 0; + struct flowadv *adv = NULL; + // Make sure this packet isn't looping through the interface if (necp_get_last_interface_index_from_packet(data) == interface->if_index) { error = EINVAL; goto ipsec_output_err; } - + // Mark the interface so NECP can evaluate tunnel policy necp_mark_packet_from_interface(data, interface); - - ip = mtod(data, struct ip *); - ip_version = ip->ip_v; - - switch (ip_version) { - case 4: { + + ip = mtod(data, struct ip *); + ip_version = ip->ip_v; + + switch (ip_version) { + case 4: { #if IPSEC_NEXUS - if (!pcb->ipsec_use_netif) + if (!pcb->ipsec_use_netif) #endif // IPSEC_NEXUS - { - int af = AF_INET; - bpf_tap_out(pcb->ipsec_ifp, DLT_NULL, data, &af, sizeof(af)); + { + int af = AF_INET; + bpf_tap_out(pcb->ipsec_ifp, DLT_NULL, data, &af, sizeof(af)); + } + + /* Apply encryption */ + memset(&ipsec_state, 0, sizeof(ipsec_state)); + ipsec_state.m = data; + ipsec_state.dst = (struct sockaddr *)&ip->ip_dst; + memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); + + error = ipsec4_interface_output(&ipsec_state, interface); + /* Tunneled in IPv6 - packet is gone */ + if (error == 0 && ipsec_state.tunneled == 6) { + goto done; + } + + data = ipsec_state.m; + if (error || data == NULL) { + if (error) { + printf("ipsec_output: ipsec4_output error %d.\n", error); } + goto ipsec_output_err; + } - /* Apply encryption */ - memset(&ipsec_state, 0, sizeof(ipsec_state)); - ipsec_state.m = data; - ipsec_state.dst = (struct sockaddr *)&ip->ip_dst; - memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); - - error = ipsec4_interface_output(&ipsec_state, interface); - /* Tunneled in IPv6 - packet is gone */ - if (error == 0 && ipsec_state.tunneled == 6) { - goto done; - } - - data = ipsec_state.m; - if (error || data == NULL) { - if (error) { - printf("ipsec_output: ipsec4_output error %d.\n", error); - } - goto ipsec_output_err; - } - - /* Set traffic class, set flow */ - m_set_service_class(data, pcb->ipsec_output_service_class); - data->m_pkthdr.pkt_flowsrc = FLOWSRC_IFNET; - data->m_pkthdr.pkt_flowid = interface->if_flowhash; - data->m_pkthdr.pkt_proto = ip->ip_p; - data->m_pkthdr.pkt_flags = (PKTF_FLOW_ID | PKTF_FLOW_ADV | PKTF_FLOW_LOCALSRC); - - /* Flip endian-ness for ip_output */ - ip = mtod(data, struct ip *); - NTOHS(ip->ip_len); - NTOHS(ip->ip_off); - - /* Increment statistics */ - length = mbuf_pkthdr_len(data); - ifnet_stat_increment_out(interface, 1, length, 0); - - /* Send to ip_output */ - memset(&ro, 0, sizeof(ro)); - - flags = (IP_OUTARGS | /* Passing out args to specify interface */ - IP_NOIPSEC); /* To ensure the packet doesn't go through ipsec twice */ - - memset(&ipoa, 0, sizeof(ipoa)); - ipoa.ipoa_flowadv.code = 0; - ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR; - if (ipsec_state.outgoing_if) { - ipoa.ipoa_boundif = ipsec_state.outgoing_if; - ipoa.ipoa_flags |= IPOAF_BOUND_IF; - } - ipsec_set_ipoa_for_interface(pcb->ipsec_ifp, &ipoa); - - adv = &ipoa.ipoa_flowadv; - - (void)ip_output(data, NULL, &ro, flags, NULL, &ipoa); - data = NULL; - - if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) { - error = ENOBUFS; - ifnet_disable_output(interface); - } - - goto done; + /* Set traffic class, set flow */ + m_set_service_class(data, pcb->ipsec_output_service_class); + data->m_pkthdr.pkt_flowsrc = FLOWSRC_IFNET; + data->m_pkthdr.pkt_flowid = interface->if_flowhash; + data->m_pkthdr.pkt_proto = ip->ip_p; + data->m_pkthdr.pkt_flags = (PKTF_FLOW_ID | PKTF_FLOW_ADV | PKTF_FLOW_LOCALSRC); + + /* Flip endian-ness for ip_output */ + ip = mtod(data, struct ip *); + NTOHS(ip->ip_len); + NTOHS(ip->ip_off); + + /* Increment statistics */ + length = mbuf_pkthdr_len(data); + ifnet_stat_increment_out(interface, 1, length, 0); + + /* Send to ip_output */ + memset(&ro, 0, sizeof(ro)); + + flags = (IP_OUTARGS | /* Passing out args to specify interface */ + IP_NOIPSEC); /* To ensure the packet doesn't go through ipsec twice */ + + memset(&ipoa, 0, sizeof(ipoa)); + ipoa.ipoa_flowadv.code = 0; + ipoa.ipoa_flags = IPOAF_SELECT_SRCIF | IPOAF_BOUND_SRCADDR; + if (ipsec_state.outgoing_if) { + ipoa.ipoa_boundif = ipsec_state.outgoing_if; + ipoa.ipoa_flags |= IPOAF_BOUND_IF; } - case 6: { + ipsec_set_ipoa_for_interface(pcb->ipsec_ifp, &ipoa); + + adv = &ipoa.ipoa_flowadv; + + (void)ip_output(data, NULL, &ro, flags, NULL, &ipoa); + data = NULL; + + if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) { + error = ENOBUFS; + ifnet_disable_output(interface); + } + + goto done; + } + case 6: { #if IPSEC_NEXUS - if (!pcb->ipsec_use_netif) + if (!pcb->ipsec_use_netif) #endif // IPSEC_NEXUS - { - int af = AF_INET6; - bpf_tap_out(pcb->ipsec_ifp, DLT_NULL, data, &af, sizeof(af)); - } + { + int af = AF_INET6; + bpf_tap_out(pcb->ipsec_ifp, DLT_NULL, data, &af, sizeof(af)); + } - data = ipsec6_splithdr(data); - if (data == NULL) { - printf("ipsec_output: ipsec6_splithdr returned NULL\n"); - goto ipsec_output_err; - } + data = ipsec6_splithdr(data); + if (data == NULL) { + printf("ipsec_output: ipsec6_splithdr returned NULL\n"); + goto ipsec_output_err; + } - ip6 = mtod(data, struct ip6_hdr *); - - memset(&ipsec_state, 0, sizeof(ipsec_state)); - ipsec_state.m = data; - ipsec_state.dst = (struct sockaddr *)&ip6->ip6_dst; - memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); - - error = ipsec6_interface_output(&ipsec_state, interface, &ip6->ip6_nxt, ipsec_state.m); - if (error == 0 && ipsec_state.tunneled == 4) { /* tunneled in IPv4 - packet is gone */ - goto done; + ip6 = mtod(data, struct ip6_hdr *); + + memset(&ipsec_state, 0, sizeof(ipsec_state)); + ipsec_state.m = data; + ipsec_state.dst = (struct sockaddr *)&ip6->ip6_dst; + memset(&ipsec_state.ro, 0, sizeof(ipsec_state.ro)); + + error = ipsec6_interface_output(&ipsec_state, interface, &ip6->ip6_nxt, ipsec_state.m); + if (error == 0 && ipsec_state.tunneled == 4) { /* tunneled in IPv4 - packet is gone */ + goto done; + } + data = ipsec_state.m; + if (error || data == NULL) { + if (error) { + printf("ipsec_output: ipsec6_output error %d\n", error); } - data = ipsec_state.m; - if (error || data == NULL) { - if (error) { - printf("ipsec_output: ipsec6_output error %d\n", error); - } - goto ipsec_output_err; - } - - /* Set traffic class, set flow */ - m_set_service_class(data, pcb->ipsec_output_service_class); - data->m_pkthdr.pkt_flowsrc = FLOWSRC_IFNET; - data->m_pkthdr.pkt_flowid = interface->if_flowhash; - data->m_pkthdr.pkt_proto = ip6->ip6_nxt; - data->m_pkthdr.pkt_flags = (PKTF_FLOW_ID | PKTF_FLOW_ADV | PKTF_FLOW_LOCALSRC); - - /* Increment statistics */ - length = mbuf_pkthdr_len(data); - ifnet_stat_increment_out(interface, 1, length, 0); - - /* Send to ip6_output */ - memset(&ro6, 0, sizeof(ro6)); - - flags = IPV6_OUTARGS; - - memset(&ip6oa, 0, sizeof(ip6oa)); - ip6oa.ip6oa_flowadv.code = 0; - ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR; - if (ipsec_state.outgoing_if) { - ip6oa.ip6oa_boundif = ipsec_state.outgoing_if; - ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; - } - ipsec_set_ip6oa_for_interface(pcb->ipsec_ifp, &ip6oa); - - adv = &ip6oa.ip6oa_flowadv; - - (void) ip6_output(data, NULL, &ro6, flags, NULL, NULL, &ip6oa); - data = NULL; - - if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) { - error = ENOBUFS; - ifnet_disable_output(interface); - } - - goto done; + goto ipsec_output_err; } - default: { - printf("ipsec_output: Received unknown packet version %d.\n", ip_version); - error = EINVAL; - goto ipsec_output_err; + + /* Set traffic class, set flow */ + m_set_service_class(data, pcb->ipsec_output_service_class); + data->m_pkthdr.pkt_flowsrc = FLOWSRC_IFNET; + data->m_pkthdr.pkt_flowid = interface->if_flowhash; + data->m_pkthdr.pkt_proto = ip6->ip6_nxt; + data->m_pkthdr.pkt_flags = (PKTF_FLOW_ID | PKTF_FLOW_ADV | PKTF_FLOW_LOCALSRC); + + /* Increment statistics */ + length = mbuf_pkthdr_len(data); + ifnet_stat_increment_out(interface, 1, length, 0); + + /* Send to ip6_output */ + memset(&ro6, 0, sizeof(ro6)); + + flags = IPV6_OUTARGS; + + memset(&ip6oa, 0, sizeof(ip6oa)); + ip6oa.ip6oa_flowadv.code = 0; + ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR; + if (ipsec_state.outgoing_if) { + ip6oa.ip6oa_boundif = ipsec_state.outgoing_if; + ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; } - } - + ipsec_set_ip6oa_for_interface(pcb->ipsec_ifp, &ip6oa); + + adv = &ip6oa.ip6oa_flowadv; + + (void) ip6_output(data, NULL, &ro6, flags, NULL, NULL, &ip6oa); + data = NULL; + + if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) { + error = ENOBUFS; + ifnet_disable_output(interface); + } + + goto done; + } + default: { + printf("ipsec_output: Received unknown packet version %d.\n", ip_version); + error = EINVAL; + goto ipsec_output_err; + } + } + done: - return error; - + return error; + ipsec_output_err: - if (data) - mbuf_freem(data); + if (data) { + mbuf_freem(data); + } goto done; } static void -ipsec_start(ifnet_t interface) +ipsec_start(ifnet_t interface) { mbuf_t data; struct ipsec_pcb *pcb = ifnet_softc(interface); VERIFY(pcb != NULL); for (;;) { - if (ifnet_dequeue(interface, &data) != 0) + if (ifnet_dequeue(interface, &data) != 0) { break; - if (ipsec_output(interface, data) != 0) + } + if (ipsec_output(interface, data) != 0) { break; + } } } /* Network Interface functions */ static errno_t -ipsec_demux(__unused ifnet_t interface, - mbuf_t data, - __unused char *frame_header, - protocol_family_t *protocol) +ipsec_demux(__unused ifnet_t interface, + mbuf_t data, + __unused char *frame_header, + protocol_family_t *protocol) { - struct ip *ip; - u_int ip_version; - + struct ip *ip; + u_int ip_version; + while (data != NULL && mbuf_len(data) < 1) { data = mbuf_next(data); } - - if (data == NULL) + + if (data == NULL) { return ENOENT; - - ip = mtod(data, struct ip *); - ip_version = ip->ip_v; - - switch(ip_version) { - case 4: - *protocol = PF_INET; - return 0; - case 6: - *protocol = PF_INET6; - return 0; - default: - break; } - + + ip = mtod(data, struct ip *); + ip_version = ip->ip_v; + + switch (ip_version) { + case 4: + *protocol = PF_INET; + return 0; + case 6: + *protocol = PF_INET6; + return 0; + default: + break; + } + return 0; } static errno_t -ipsec_add_proto(__unused ifnet_t interface, - protocol_family_t protocol, - __unused const struct ifnet_demux_desc *demux_array, - __unused u_int32_t demux_count) +ipsec_add_proto(__unused ifnet_t interface, + protocol_family_t protocol, + __unused const struct ifnet_demux_desc *demux_array, + __unused u_int32_t demux_count) { - switch(protocol) { - case PF_INET: - return 0; - case PF_INET6: - return 0; - default: - break; + switch (protocol) { + case PF_INET: + return 0; + case PF_INET6: + return 0; + default: + break; } - + return ENOPROTOOPT; } static errno_t -ipsec_del_proto(__unused ifnet_t interface, - __unused protocol_family_t protocol) +ipsec_del_proto(__unused ifnet_t interface, + __unused protocol_family_t protocol) { return 0; } static errno_t ipsec_ioctl(ifnet_t interface, - u_long command, - void *data) + u_long command, + void *data) { #if IPSEC_NEXUS struct ipsec_pcb *pcb = ifnet_softc(interface); #endif - errno_t result = 0; - - switch(command) { - case SIOCSIFMTU: { + errno_t result = 0; + + switch (command) { + case SIOCSIFMTU: { #if IPSEC_NEXUS - if (pcb->ipsec_use_netif) { - // Make sure we can fit packets in the channel buffers - if (((uint64_t)((struct ifreq*)data)->ifr_mtu) > pcb->ipsec_slot_size) { - result = EINVAL; - } else { - ifnet_set_mtu(interface, (uint32_t)((struct ifreq*)data)->ifr_mtu); - } - } else -#endif // IPSEC_NEXUS - { - ifnet_set_mtu(interface, ((struct ifreq*)data)->ifr_mtu); + if (pcb->ipsec_use_netif) { + // Make sure we can fit packets in the channel buffers + if (((uint64_t)((struct ifreq*)data)->ifr_mtu) > pcb->ipsec_slot_size) { + result = EINVAL; + } else { + ifnet_set_mtu(interface, (uint32_t)((struct ifreq*)data)->ifr_mtu); } - break; + } else +#endif // IPSEC_NEXUS + { + ifnet_set_mtu(interface, ((struct ifreq*)data)->ifr_mtu); } - - case SIOCSIFFLAGS: - /* ifioctl() takes care of it */ - break; - - default: - result = EOPNOTSUPP; + break; } - + + case SIOCSIFFLAGS: + /* ifioctl() takes care of it */ + break; + + default: + result = EOPNOTSUPP; + } + return result; } @@ -3372,9 +3376,9 @@ ipsec_detached(ifnet_t interface) static errno_t ipsec_proto_input(ifnet_t interface, - protocol_family_t protocol, - mbuf_t m, - __unused char *frame_header) + protocol_family_t protocol, + mbuf_t m, + __unused char *frame_header) { mbuf_pkthdr_setrcvif(m, interface); @@ -3394,54 +3398,53 @@ ipsec_proto_input(ifnet_t interface, pktap_input(interface, protocol, m, NULL); } - int32_t pktlen = m->m_pkthdr.len; + int32_t pktlen = m->m_pkthdr.len; if (proto_input(protocol, m) != 0) { ifnet_stat_increment_in(interface, 0, 0, 1); m_freem(m); } else { ifnet_stat_increment_in(interface, 1, pktlen, 0); } - + return 0; } static errno_t -ipsec_proto_pre_output(__unused ifnet_t interface, - protocol_family_t protocol, - __unused mbuf_t *packet, - __unused const struct sockaddr *dest, - __unused void *route, - __unused char *frame_type, - __unused char *link_layer_dest) +ipsec_proto_pre_output(__unused ifnet_t interface, + protocol_family_t protocol, + __unused mbuf_t *packet, + __unused const struct sockaddr *dest, + __unused void *route, + __unused char *frame_type, + __unused char *link_layer_dest) { - *(protocol_family_t *)(void *)frame_type = protocol; return 0; } static errno_t -ipsec_attach_proto(ifnet_t interface, - protocol_family_t protocol) +ipsec_attach_proto(ifnet_t interface, + protocol_family_t protocol) { - struct ifnet_attach_proto_param proto; - errno_t result; - + struct ifnet_attach_proto_param proto; + errno_t result; + bzero(&proto, sizeof(proto)); proto.input = ipsec_proto_input; proto.pre_output = ipsec_proto_pre_output; - + result = ifnet_attach_protocol(interface, protocol, &proto); if (result != 0 && result != EEXIST) { printf("ipsec_attach_inet - ifnet_attach_protocol %d failed: %d\n", - protocol, result); + protocol, result); } - + return result; } errno_t -ipsec_inject_inbound_packet(ifnet_t interface, - mbuf_t packet) +ipsec_inject_inbound_packet(ifnet_t interface, + mbuf_t packet) { #if IPSEC_NEXUS struct ipsec_pcb *pcb = ifnet_softc(interface); @@ -3469,7 +3472,7 @@ ipsec_inject_inbound_packet(ifnet_t interface, kern_channel_notify(rx_ring, 0); } - return (0); + return 0; } else #endif // IPSEC_NEXUS { @@ -3509,16 +3512,17 @@ void ipsec_set_ipoa_for_interface(ifnet_t interface, struct ip_out_args *ipoa) { struct ipsec_pcb *pcb; - - if (interface == NULL || ipoa == NULL) + + if (interface == NULL || ipoa == NULL) { return; + } pcb = ifnet_softc(interface); - + if (net_qos_policy_restricted == 0) { ipoa->ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; ipoa->ipoa_sotc = so_svc2tc(pcb->ipsec_output_service_class); } else if (pcb->ipsec_output_service_class != MBUF_SC_VO || - net_qos_policy_restrict_avapps != 0) { + net_qos_policy_restrict_avapps != 0) { ipoa->ipoa_flags &= ~IPOAF_QOSMARKING_ALLOWED; } else { ipoa->ipoa_flags |= IP6OAF_QOSMARKING_ALLOWED; @@ -3530,16 +3534,17 @@ void ipsec_set_ip6oa_for_interface(ifnet_t interface, struct ip6_out_args *ip6oa) { struct ipsec_pcb *pcb; - - if (interface == NULL || ip6oa == NULL) + + if (interface == NULL || ip6oa == NULL) { return; + } pcb = ifnet_softc(interface); - + if (net_qos_policy_restricted == 0) { ip6oa->ip6oa_flags |= IPOAF_QOSMARKING_ALLOWED; ip6oa->ip6oa_sotc = so_svc2tc(pcb->ipsec_output_service_class); } else if (pcb->ipsec_output_service_class != MBUF_SC_VO || - net_qos_policy_restrict_avapps != 0) { + net_qos_policy_restrict_avapps != 0) { ip6oa->ip6oa_flags &= ~IPOAF_QOSMARKING_ALLOWED; } else { ip6oa->ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; diff --git a/bsd/net/if_ipsec.h b/bsd/net/if_ipsec.h index ca18916cf..3c0fcbd2b 100644 --- a/bsd/net/if_ipsec.h +++ b/bsd/net/if_ipsec.h @@ -27,8 +27,8 @@ */ -#ifndef _NET_IF_IPSEC_H_ -#define _NET_IF_IPSEC_H_ +#ifndef _NET_IF_IPSEC_H_ +#define _NET_IF_IPSEC_H_ #ifdef BSD_KERNEL_PRIVATE @@ -39,10 +39,10 @@ errno_t ipsec_register_control(void); /* Helpers */ -int ipsec_interface_isvalid (ifnet_t interface); +int ipsec_interface_isvalid(ifnet_t interface); boolean_t ipsec_interface_needs_netagent(ifnet_t interface); -errno_t ipsec_inject_inbound_packet(ifnet_t interface, mbuf_t packet); +errno_t ipsec_inject_inbound_packet(ifnet_t interface, mbuf_t packet); void ipsec_set_pkthdr_for_interface(ifnet_t interface, mbuf_t packet, int family); @@ -61,23 +61,23 @@ void ipsec_set_ip6oa_for_interface(ifnet_t interface, struct ip6_out_args *ip6oa /* * Socket option names to manage ipsec */ -#define IPSEC_OPT_FLAGS 1 -#define IPSEC_OPT_IFNAME 2 -#define IPSEC_OPT_EXT_IFDATA_STATS 3 /* get|set (type int) */ -#define IPSEC_OPT_INC_IFDATA_STATS_IN 4 /* set to increment stat counters (type struct ipsec_stats_param) */ -#define IPSEC_OPT_INC_IFDATA_STATS_OUT 5 /* set to increment stat counters (type struct ipsec_stats_param) */ -#define IPSEC_OPT_SET_DELEGATE_INTERFACE 6 /* set the delegate interface (char[]) */ -#define IPSEC_OPT_OUTPUT_TRAFFIC_CLASS 7 /* set the traffic class for packets leaving the interface, see sys/socket.h */ -#define IPSEC_OPT_ENABLE_CHANNEL 8 /* enable a kernel pipe nexus that allows the owner to open a channel to act as a driver */ -#define IPSEC_OPT_GET_CHANNEL_UUID 9 /* get the uuid of the kernel pipe nexus instance */ -#define IPSEC_OPT_ENABLE_FLOWSWITCH 10 /* enable a flowswitch nexus that clients can use */ -#define IPSEC_OPT_INPUT_FRAG_SIZE 11 /* set the maximum size of input packets before fragmenting as a uint32_t */ - -#define IPSEC_OPT_ENABLE_NETIF 12 /* Must be set before connecting */ -#define IPSEC_OPT_SLOT_SIZE 13 /* Must be set before connecting */ -#define IPSEC_OPT_NETIF_RING_SIZE 14 /* Must be set before connecting */ -#define IPSEC_OPT_TX_FSW_RING_SIZE 15 /* Must be set before connecting */ -#define IPSEC_OPT_RX_FSW_RING_SIZE 16 /* Must be set before connecting */ +#define IPSEC_OPT_FLAGS 1 +#define IPSEC_OPT_IFNAME 2 +#define IPSEC_OPT_EXT_IFDATA_STATS 3 /* get|set (type int) */ +#define IPSEC_OPT_INC_IFDATA_STATS_IN 4 /* set to increment stat counters (type struct ipsec_stats_param) */ +#define IPSEC_OPT_INC_IFDATA_STATS_OUT 5 /* set to increment stat counters (type struct ipsec_stats_param) */ +#define IPSEC_OPT_SET_DELEGATE_INTERFACE 6 /* set the delegate interface (char[]) */ +#define IPSEC_OPT_OUTPUT_TRAFFIC_CLASS 7 /* set the traffic class for packets leaving the interface, see sys/socket.h */ +#define IPSEC_OPT_ENABLE_CHANNEL 8 /* enable a kernel pipe nexus that allows the owner to open a channel to act as a driver */ +#define IPSEC_OPT_GET_CHANNEL_UUID 9 /* get the uuid of the kernel pipe nexus instance */ +#define IPSEC_OPT_ENABLE_FLOWSWITCH 10 /* enable a flowswitch nexus that clients can use */ +#define IPSEC_OPT_INPUT_FRAG_SIZE 11 /* set the maximum size of input packets before fragmenting as a uint32_t */ + +#define IPSEC_OPT_ENABLE_NETIF 12 /* Must be set before connecting */ +#define IPSEC_OPT_SLOT_SIZE 13 /* Must be set before connecting */ +#define IPSEC_OPT_NETIF_RING_SIZE 14 /* Must be set before connecting */ +#define IPSEC_OPT_TX_FSW_RING_SIZE 15 /* Must be set before connecting */ +#define IPSEC_OPT_RX_FSW_RING_SIZE 16 /* Must be set before connecting */ /* * ipsec stats parameter structure diff --git a/bsd/net/if_llatbl.c b/bsd/net/if_llatbl.c index 7b7eab342..3a4174113 100644 --- a/bsd/net/if_llatbl.c +++ b/bsd/net/if_llatbl.c @@ -124,8 +124,9 @@ lltable_dump_af(struct lltable *llt, struct sysctl_req *wr) LLTABLE_LOCK_ASSERT(); - if (llt->llt_ifp->if_flags & IFF_LOOPBACK) - return (0); + if (llt->llt_ifp->if_flags & IFF_LOOPBACK) { + return 0; + } error = 0; IF_AFDATA_RLOCK(llt->llt_ifp, llt->llt_af); @@ -133,7 +134,7 @@ lltable_dump_af(struct lltable *llt, struct sysctl_req *wr) (llt_foreach_cb_t *)llt->llt_dump_entry, wr); IF_AFDATA_RUNLOCK(llt->llt_ifp, llt->llt_af); - return (error); + return error; } /* @@ -149,13 +150,14 @@ lltable_sysctl_dumparp(int af, struct sysctl_req *wr) SLIST_FOREACH(llt, &lltables, llt_link) { if (llt->llt_af == af) { error = lltable_dump_af(llt, wr); - if (error != 0) + if (error != 0) { goto done; + } } } done: LLTABLE_RUNLOCK(); - return (error); + return error; } /* @@ -178,12 +180,13 @@ htable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg) for (i = 0; i < llt->llt_hsize; i++) { LIST_FOREACH_SAFE(lle, &llt->lle_head[i], lle_next, next) { error = f(llt, lle, farg); - if (error != 0) + if (error != 0) { break; + } } } - return (error); + return error; } static void @@ -192,8 +195,9 @@ htable_link_entry(struct lltable *llt, struct llentry *lle) struct llentries *lleh; uint32_t hashidx; - if ((lle->la_flags & LLE_LINKED) != 0) + if ((lle->la_flags & LLE_LINKED) != 0) { return; + } IF_AFDATA_WLOCK_ASSERT(llt->llt_ifp, llt->llt_af); @@ -239,7 +243,7 @@ htable_prefix_free_cb(struct lltable *llt, struct llentry *lle, void *farg) LIST_INSERT_HEAD(&pmd->dchain, lle, lle_chain); } - return (0); + return 0; } static void @@ -263,13 +267,12 @@ htable_prefix_free(struct lltable *llt, const struct sockaddr *addr, IF_AFDATA_WUNLOCK(llt->llt_ifp, llt->llt_af); LIST_FOREACH_SAFE(lle, &pmd.dchain, lle_chain, next) - lltable_free_entry(llt, lle); + lltable_free_entry(llt, lle); } static void htable_free_tbl(struct lltable *llt) { - FREE(llt->lle_head, M_LLTABLE); FREE(llt, M_LLTABLE); } @@ -280,7 +283,7 @@ llentries_unlink(struct lltable *llt, struct llentries *head) struct llentry *lle, *next; LIST_FOREACH_SAFE(lle, head, lle_chain, next) - llt->llt_unlink_entry(lle); + llt->llt_unlink_entry(lle); } /* @@ -306,10 +309,10 @@ lltable_drop_entry_queue(struct llentry *lle) } KASSERT(lle->la_numheld == 0, - ("%s: la_numheld %d > 0, pkts_droped %zd", __func__, - lle->la_numheld, pkts_dropped)); + ("%s: la_numheld %d > 0, pkts_droped %zd", __func__, + lle->la_numheld, pkts_dropped)); - return (pkts_dropped); + return pkts_dropped; } void @@ -355,7 +358,7 @@ lltable_try_set_entry_addr(struct ifnet *ifp, struct llentry *lle, if ((lle->la_flags & LLE_DELETED) != 0) { IF_AFDATA_WUNLOCK(ifp, lle->lle_tbl->llt_af); LLE_FREE_LOCKED(lle); - return (0); + return 0; } /* Update data */ @@ -365,7 +368,7 @@ lltable_try_set_entry_addr(struct ifnet *ifp, struct llentry *lle, LLE_REMREF(lle); - return (1); + return 1; } /* @@ -393,7 +396,7 @@ lltable_calc_llheader(struct ifnet *ifp, int family, char *lladdr, *lladdr_off = ereq.lladdr_off; } - return (error); + return error; } /* @@ -416,11 +419,12 @@ llentry_update_ifaddr(struct lltable *llt, struct llentry *lle, void *farg) LLE_WLOCK(lle); if ((lle->la_flags & LLE_VALID) == 0) { LLE_WUNLOCK(lle); - return (0); + return 0; } - if ((lle->la_flags & LLE_IFADDR) != 0) + if ((lle->la_flags & LLE_IFADDR) != 0) { lladdr = (void *)IF_LLADDR(ifp); + } linkhdrsize = sizeof(linkhdr); lltable_calc_llheader(ifp, llt->llt_af, (void *)lladdr, (void *)linkhdr, &linkhdrsize, @@ -428,7 +432,7 @@ llentry_update_ifaddr(struct lltable *llt, struct llentry *lle, void *farg) memcpy(lle->r_linkdata, linkhdr, linkhdrsize); LLE_WUNLOCK(lle); - return (0); + return 0; } /* @@ -437,9 +441,9 @@ llentry_update_ifaddr(struct lltable *llt, struct llentry *lle, void *farg) void lltable_update_ifaddr(struct lltable *llt) { - - if (llt->llt_ifp->if_flags & IFF_LOOPBACK) + if (llt->llt_ifp->if_flags & IFF_LOOPBACK) { return; + } IF_AFDATA_WLOCK(llt->llt_ifp, llt->llt_af); lltable_foreach_lle(llt, llentry_update_ifaddr, llt->llt_ifp); @@ -471,7 +475,7 @@ llentry_free(struct llentry *lle) LLE_FREE_LOCKED(lle); - return (pkts_dropped); + return pkts_dropped; } /* @@ -492,19 +496,21 @@ llentry_alloc(struct ifnet *ifp, struct lltable *lt, if (la != NULL) { LLE_ADDREF(la); LLE_WUNLOCK(la); - return (la); + return la; } if ((ifp->if_flags & IFF_NOARP) == 0) { la = lltable_alloc_entry(lt, 0, (struct sockaddr *)dst); - if (la == NULL) - return (NULL); + if (la == NULL) { + return NULL; + } IF_AFDATA_WLOCK(ifp, lt->llt_af); LLE_WLOCK(la); /* Prefer any existing LLE over newly-created one */ la_tmp = lla_lookup(lt, LLE_EXCLUSIVE, (struct sockaddr *)dst); - if (la_tmp == NULL) + if (la_tmp == NULL) { lltable_link_entry(lt, la); + } IF_AFDATA_WUNLOCK(ifp, lt->llt_af); if (la_tmp != NULL) { lltable_free_entry(lt, la); @@ -514,7 +520,7 @@ llentry_alloc(struct ifnet *ifp, struct lltable *lt, LLE_WUNLOCK(la); } - return (la); + return la; } /* @@ -532,7 +538,7 @@ lltable_free_cb(struct lltable *llt, struct llentry *lle, void *farg) LLE_WLOCK(lle); LIST_INSERT_HEAD(dchain, lle, lle_chain); - return (0); + return 0; } /* @@ -557,8 +563,9 @@ lltable_free(struct lltable *llt) LIST_FOREACH_SAFE(lle, &dchain, lle_chain, next) { #if 0 - if (thread_call_cancel(lle->lle_timer) == TRUE) + if (thread_call_cancel(lle->lle_timer) == TRUE) { LLE_REMREF(lle); + } #endif llentry_free(lle); } @@ -571,16 +578,17 @@ lltable_free(struct lltable *llt) void lltable_drain(int af) { - struct lltable *llt; - struct llentry *lle; + struct lltable *llt; + struct llentry *lle; register int i; LLTABLE_RLOCK(); SLIST_FOREACH(llt, &lltables, llt_link) { - if (llt->llt_af != af) + if (llt->llt_af != af) { continue; + } - for (i=0; i < llt->llt_hsize; i++) { + for (i = 0; i < llt->llt_hsize; i++) { LIST_FOREACH(lle, &llt->lle_head[i], lle_next) { LLE_WLOCK(lle); if (lle->la_hold) { @@ -613,12 +621,12 @@ lltable_delete_addr(struct lltable *llt, u_int flags, if (lle == NULL) { IF_AFDATA_WUNLOCK(ifp, llt->llt_af); - return (ENOENT); + return ENOENT; } if ((lle->la_flags & LLE_IFADDR) != 0 && (flags & LLE_IFADDR) == 0) { IF_AFDATA_WUNLOCK(ifp, llt->llt_af); LLE_WUNLOCK(lle); - return (EPERM); + return EPERM; } lltable_unlink_entry(llt, lle); @@ -626,7 +634,7 @@ lltable_delete_addr(struct lltable *llt, u_int flags, llt->llt_delete_entry(llt, lle); - return (0); + return 0; } void @@ -637,8 +645,9 @@ lltable_prefix_free(int af, struct sockaddr *addr, struct sockaddr *mask, LLTABLE_RLOCK(); SLIST_FOREACH(llt, &lltables, llt_link) { - if (llt->llt_af != af) + if (llt->llt_af != af) { continue; + } llt->llt_prefix_free(llt, addr, mask, flags); } @@ -656,8 +665,9 @@ lltable_allocate_htbl(uint32_t hsize) MALLOC(llt->lle_head, struct llentries *, sizeof(struct llentries) * hsize, M_LLTABLE, M_WAITOK | M_ZERO); - for (i = 0; i < llt->llt_hsize; i++) + for (i = 0; i < llt->llt_hsize; i++) { LIST_INIT(&llt->lle_head[i]); + } /* Set some default callbacks */ llt->llt_link_entry = htable_link_entry; @@ -666,7 +676,7 @@ lltable_allocate_htbl(uint32_t hsize) llt->llt_foreach_entry = htable_foreach_lle; llt->llt_free_tbl = htable_free_tbl; - return (llt); + return llt; } /* @@ -697,14 +707,14 @@ lltable_unlink(struct lltable *llt) int lltable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, void *farg) { - return (llt->llt_foreach_entry(llt, f, farg)); + return llt->llt_foreach_entry(llt, f, farg); } struct llentry * lltable_alloc_entry(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) { - return (llt->llt_alloc_entry(llt, flags, l3addr)); + return llt->llt_alloc_entry(llt, flags, l3addr); } void @@ -737,16 +747,16 @@ lltable_fill_sa_entry(const struct llentry *lle, struct sockaddr *sa) struct ifnet * lltable_get_ifp(const struct lltable *llt) { - return (llt->llt_ifp); + return llt->llt_ifp; } int lltable_get_af(const struct lltable *llt) { - return (llt->llt_af); + return llt->llt_af; } -#define ifnet_byindex(index) ifindex2ifnet[(index)] +#define ifnet_byindex(index) ifindex2ifnet[(index)] /* * Called in route_output when rtm_flags contains RTF_LLDATA. @@ -777,8 +787,9 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info) LLTABLE_RLOCK(); SLIST_FOREACH(llt, &lltables, llt_link) { if (llt->llt_af == dst->sa_family && - llt->llt_ifp == ifp) + llt->llt_ifp == ifp) { break; + } } LLTABLE_RUNLOCK(); KASSERT(llt != NULL, ("Yep, ugly hacks are bad\n")); @@ -789,21 +800,25 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info) case RTM_ADD: /* Add static LLE */ laflags = 0; - if (rtm->rtm_rmx.rmx_expire == 0) + if (rtm->rtm_rmx.rmx_expire == 0) { laflags = LLE_STATIC; + } lle = lltable_alloc_entry(llt, laflags, dst); - if (lle == NULL) - return (ENOMEM); + if (lle == NULL) { + return ENOMEM; + } #if 0 linkhdrsize = sizeof(linkhdr); if (lltable_calc_llheader(ifp, dst->sa_family, LLADDR(dl), - (void *)linkhdr, &linkhdrsize, &lladdr_off) != 0) - return (EINVAL); + (void *)linkhdr, &linkhdrsize, &lladdr_off) != 0) { + return EINVAL; + } #endif lltable_set_entry_addr(ifp, lle, LLADDR(dl)); - if (rtm->rtm_flags & RTF_ANNOUNCE) + if (rtm->rtm_flags & RTF_ANNOUNCE) { lle->la_flags |= LLE_PUB; + } lle->la_expire = rtm->rtm_rmx.rmx_expire; laflags = lle->la_flags; @@ -819,7 +834,7 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info) IF_AFDATA_WUNLOCK(ifp, llt->llt_af); LLE_WUNLOCK(lle_tmp); lltable_free_entry(llt, lle); - return (EPERM); + return EPERM; } /* Unlink existing entry from table */ lltable_unlink_entry(llt, lle_tmp); @@ -842,19 +857,19 @@ lla_rt_output(struct rt_msghdr *rtm, struct rt_addrinfo *info) LLE_WUNLOCK(lle); #ifdef INET /* gratuitous ARP */ - if ((laflags & LLE_PUB) && dst->sa_family == AF_INET) + if ((laflags & LLE_PUB) && dst->sa_family == AF_INET) { dlil_send_arp(ifp, ARPOP_REQUEST, NULL, dst, NULL, dst, 0); + } #endif break; case RTM_DELETE: - return (lltable_delete_addr(llt, 0, dst)); + return lltable_delete_addr(llt, 0, dst); default: error = EINVAL; } - return (error); + return error; } - diff --git a/bsd/net/if_llatbl.h b/bsd/net/if_llatbl.h index 9b4697fb6..67755dc29 100644 --- a/bsd/net/if_llatbl.h +++ b/bsd/net/if_llatbl.h @@ -38,7 +38,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * + * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE @@ -53,8 +53,8 @@ */ #include -#ifndef _NET_IF_LLATBL_H_ -#define _NET_IF_LLATBL_H_ +#ifndef _NET_IF_LLATBL_H_ +#define _NET_IF_LLATBL_H_ #include #include @@ -71,52 +71,52 @@ struct llentry; LIST_HEAD(llentries, llentry); extern lck_rw_t *lltable_rwlock; -#define LLTABLE_RLOCK() lck_rw_lock_shared(lltable_rwlock) -#define LLTABLE_RUNLOCK() lck_rw_done(lltable_rwlock) -#define LLTABLE_WLOCK() lck_rw_lock_exclusive(lltable_rwlock) -#define LLTABLE_WUNLOCK() lck_rw_done(lltable_rwlock) -#define LLTABLE_LOCK_ASSERT() LCK_RW_ASSERT(lltable_rwlock, LCK_RW_ASSERT_EXCLUSIVE) +#define LLTABLE_RLOCK() lck_rw_lock_shared(lltable_rwlock) +#define LLTABLE_RUNLOCK() lck_rw_done(lltable_rwlock) +#define LLTABLE_WLOCK() lck_rw_lock_exclusive(lltable_rwlock) +#define LLTABLE_WUNLOCK() lck_rw_done(lltable_rwlock) +#define LLTABLE_LOCK_ASSERT() LCK_RW_ASSERT(lltable_rwlock, LCK_RW_ASSERT_EXCLUSIVE) -#define LLE_MAX_LINKHDR 24 /* Full IB header */ +#define LLE_MAX_LINKHDR 24 /* Full IB header */ /* * Code referencing llentry must at least hold * a shared lock */ struct llentry { - LIST_ENTRY(llentry) lle_next; + LIST_ENTRY(llentry) lle_next; union { - struct in_addr addr4; - struct in6_addr addr6; + struct in_addr addr4; + struct in6_addr addr6; } r_l3addr; #if 0 - char r_linkdata[LLE_MAX_LINKHDR]; /* L2 data */ - uint8_t r_hdrlen; /* length for LL header */ - uint8_t spare0[3]; + char r_linkdata[LLE_MAX_LINKHDR]; /* L2 data */ + uint8_t r_hdrlen; /* length for LL header */ + uint8_t spare0[3]; #endif - uint16_t r_flags; /* LLE runtime flags */ - uint16_t r_skip_req; /* feedback from fast path */ + uint16_t r_flags; /* LLE runtime flags */ + uint16_t r_skip_req; /* feedback from fast path */ - struct lltable *lle_tbl; - struct llentries *lle_head; - void (*lle_free)(struct llentry *); - struct mbuf *la_hold; - int la_numheld; /* # of packets currently held */ - u_int64_t la_expire; - uint16_t la_flags; - uint16_t la_asked; - uint16_t la_preempt; - int16_t ln_state; /* IPv6 has ND6_LLINFO_NOSTATE == -2 */ - uint16_t ln_router; - time_t ln_ntick; - time_t lle_remtime; /* Real time remaining */ - time_t lle_hittime; /* Time when r_skip_req was unset */ - int lle_refcnt; + struct lltable *lle_tbl; + struct llentries *lle_head; + void (*lle_free)(struct llentry *); + struct mbuf *la_hold; + int la_numheld; /* # of packets currently held */ + u_int64_t la_expire; + uint16_t la_flags; + uint16_t la_asked; + uint16_t la_preempt; + int16_t ln_state; /* IPv6 has ND6_LLINFO_NOSTATE == -2 */ + uint16_t ln_router; + time_t ln_ntick; + time_t lle_remtime; /* Real time remaining */ + time_t lle_hittime; /* Time when r_skip_req was unset */ + int lle_refcnt; union { uint64_t mac_aligned; uint16_t mac16[3]; } ll_addr; - LIST_ENTRY(llentry) lle_chain; /* chain of deleted items */ - thread_call_t lle_timer; + LIST_ENTRY(llentry) lle_chain; /* chain of deleted items */ + thread_call_t lle_timer; u_int64_t ln_lastused; /* last used timestamp */ struct if_llreach *ln_llreach; /* link-layer reachability record */ decl_lck_rw_data(, lle_lock); @@ -126,56 +126,56 @@ struct llentry { extern lck_grp_t *lle_lock_grp; extern lck_attr_t *lle_lock_attr; -#define LLE_WLOCK(lle) lck_rw_lock_exclusive(&(lle)->lle_lock) -#define LLE_RLOCK(lle) lck_rw_lock_shared(&(lle)->lle_lock) -#define LLE_WUNLOCK(lle) lck_rw_done(&(lle)->lle_lock) -#define LLE_RUNLOCK(lle) lck_rw_done(&(lle)->lle_lock) -#define LLE_DOWNGRADE(lle) lck_rw_lock_exclusive_to_shared(&(lle)->lle_lock) -#define LLE_TRY_UPGRADE(lle) lck_rw_lock_shared_to_exclusive(&(lle)->lle_lock) -#define LLE_LOCK_INIT(lle) lck_rw_init(&(lle)->lle_lock, lle_lock_grp, lle_lock_attr) -#define LLE_LOCK_DESTROY(lle) lck_rw_destroy(&(lle)->lle_lock, lle_lock_grp) -#define LLE_WLOCK_ASSERT(lle) LCK_RW_ASSERT(&(lle)->lle_lock, LCK_RW_ASSERT_EXCLUSIVE) +#define LLE_WLOCK(lle) lck_rw_lock_exclusive(&(lle)->lle_lock) +#define LLE_RLOCK(lle) lck_rw_lock_shared(&(lle)->lle_lock) +#define LLE_WUNLOCK(lle) lck_rw_done(&(lle)->lle_lock) +#define LLE_RUNLOCK(lle) lck_rw_done(&(lle)->lle_lock) +#define LLE_DOWNGRADE(lle) lck_rw_lock_exclusive_to_shared(&(lle)->lle_lock) +#define LLE_TRY_UPGRADE(lle) lck_rw_lock_shared_to_exclusive(&(lle)->lle_lock) +#define LLE_LOCK_INIT(lle) lck_rw_init(&(lle)->lle_lock, lle_lock_grp, lle_lock_attr) +#define LLE_LOCK_DESTROY(lle) lck_rw_destroy(&(lle)->lle_lock, lle_lock_grp) +#define LLE_WLOCK_ASSERT(lle) LCK_RW_ASSERT(&(lle)->lle_lock, LCK_RW_ASSERT_EXCLUSIVE) -#define LLE_REQ_INIT(lle) lck_mtx_init(&(lle)->req_mtx, lle_lock_grp, lle_lock_attr) -#define LLE_REQ_DESTROY(lle) lck_mtx_destroy(&(lle)->req_mtx, lle_lock_grp) -#define LLE_REQ_LOCK(lle) lck_mtx_lock(&(lle)->req_mtx) -#define LLE_REQ_UNLOCK(lle) lck_mtx_unlock(&(lle)->req_mtx) +#define LLE_REQ_INIT(lle) lck_mtx_init(&(lle)->req_mtx, lle_lock_grp, lle_lock_attr) +#define LLE_REQ_DESTROY(lle) lck_mtx_destroy(&(lle)->req_mtx, lle_lock_grp) +#define LLE_REQ_LOCK(lle) lck_mtx_lock(&(lle)->req_mtx) +#define LLE_REQ_UNLOCK(lle) lck_mtx_unlock(&(lle)->req_mtx) -#define LLE_IS_VALID(lle) (((lle) != NULL) && ((lle) != (void *)-1)) +#define LLE_IS_VALID(lle) (((lle) != NULL) && ((lle) != (void *)-1)) -#define LLE_ADDREF(lle) do { \ - LLE_WLOCK_ASSERT(lle); \ - VERIFY((lle)->lle_refcnt >= 0); \ - (lle)->lle_refcnt++; \ +#define LLE_ADDREF(lle) do { \ + LLE_WLOCK_ASSERT(lle); \ + VERIFY((lle)->lle_refcnt >= 0); \ + (lle)->lle_refcnt++; \ } while (0) -#define LLE_REMREF(lle) do { \ - LLE_WLOCK_ASSERT(lle); \ - VERIFY((lle)->lle_refcnt > 0); \ - (lle)->lle_refcnt--; \ +#define LLE_REMREF(lle) do { \ + LLE_WLOCK_ASSERT(lle); \ + VERIFY((lle)->lle_refcnt > 0); \ + (lle)->lle_refcnt--; \ } while (0) -#define LLE_FREE_LOCKED(lle) do { \ - if ((lle)->lle_refcnt == 1) \ - (lle)->lle_free(lle); \ - else { \ - LLE_REMREF(lle); \ - LLE_WUNLOCK(lle); \ - } \ - /* guard against invalid refs */ \ - (lle) = NULL; \ +#define LLE_FREE_LOCKED(lle) do { \ + if ((lle)->lle_refcnt == 1) \ + (lle)->lle_free(lle); \ + else { \ + LLE_REMREF(lle); \ + LLE_WUNLOCK(lle); \ + } \ + /* guard against invalid refs */ \ + (lle) = NULL; \ } while (0) -#define LLE_FREE(lle) do { \ - LLE_WLOCK(lle); \ - LLE_FREE_LOCKED(lle); \ +#define LLE_FREE(lle) do { \ + LLE_WLOCK(lle); \ + LLE_FREE_LOCKED(lle); \ } while (0) -typedef struct llentry *(llt_lookup_t)(struct lltable *, u_int flags, +typedef struct llentry *(llt_lookup_t)(struct lltable *, u_int flags, const struct sockaddr *l3addr); -typedef struct llentry *(llt_alloc_t)(struct lltable *, u_int flags, +typedef struct llentry *(llt_alloc_t)(struct lltable *, u_int flags, const struct sockaddr *l3addr); -typedef void (llt_delete_t)(struct lltable *, struct llentry *); +typedef void (llt_delete_t)(struct lltable *, struct llentry *); typedef void (llt_prefix_free_t)(struct lltable *, const struct sockaddr *addr, const struct sockaddr *mask, u_int flags); typedef int (llt_dump_entry_t)(struct lltable *, struct llentry *, @@ -193,25 +193,25 @@ typedef int (llt_foreach_cb_t)(struct lltable *, struct llentry *, void *); typedef int (llt_foreach_entry_t)(struct lltable *, llt_foreach_cb_t *, void *); struct lltable { - SLIST_ENTRY(lltable) llt_link; - int llt_af; - int llt_hsize; - struct llentries *lle_head; - struct ifnet *llt_ifp; + SLIST_ENTRY(lltable) llt_link; + int llt_af; + int llt_hsize; + struct llentries *lle_head; + struct ifnet *llt_ifp; - llt_lookup_t *llt_lookup; - llt_alloc_t *llt_alloc_entry; - llt_delete_t *llt_delete_entry; - llt_prefix_free_t *llt_prefix_free; - llt_dump_entry_t *llt_dump_entry; - llt_hash_t *llt_hash; - llt_match_prefix_t *llt_match_prefix; - llt_free_entry_t *llt_free_entry; - llt_foreach_entry_t *llt_foreach_entry; - llt_link_entry_t *llt_link_entry; - llt_unlink_entry_t *llt_unlink_entry; - llt_fill_sa_entry_t *llt_fill_sa_entry; - llt_free_tbl_t *llt_free_tbl; + llt_lookup_t *llt_lookup; + llt_alloc_t *llt_alloc_entry; + llt_delete_t *llt_delete_entry; + llt_prefix_free_t *llt_prefix_free; + llt_dump_entry_t *llt_dump_entry; + llt_hash_t *llt_hash; + llt_match_prefix_t *llt_match_prefix; + llt_free_entry_t *llt_free_entry; + llt_foreach_entry_t *llt_foreach_entry; + llt_link_entry_t *llt_link_entry; + llt_unlink_entry_t *llt_unlink_entry; + llt_fill_sa_entry_t *llt_fill_sa_entry; + llt_free_tbl_t *llt_free_tbl; }; #ifdef MALLOC_DECLARE @@ -221,22 +221,22 @@ MALLOC_DECLARE(M_LLTABLE); /* * LLentry flags */ -#define LLE_DELETED 0x0001 /* entry must be deleted */ -#define LLE_STATIC 0x0002 /* entry is static */ -#define LLE_IFADDR 0x0004 /* entry is interface addr */ -#define LLE_VALID 0x0008 /* ll_addr is valid */ -#define LLE_REDIRECT 0x0010 /* installed by redirect; has host rtentry */ -#define LLE_PUB 0x0020 /* publish entry ??? */ -#define LLE_LINKED 0x0040 /* linked to lookup structure */ +#define LLE_DELETED 0x0001 /* entry must be deleted */ +#define LLE_STATIC 0x0002 /* entry is static */ +#define LLE_IFADDR 0x0004 /* entry is interface addr */ +#define LLE_VALID 0x0008 /* ll_addr is valid */ +#define LLE_REDIRECT 0x0010 /* installed by redirect; has host rtentry */ +#define LLE_PUB 0x0020 /* publish entry ??? */ +#define LLE_LINKED 0x0040 /* linked to lookup structure */ /* LLE request flags */ -#define LLE_EXCLUSIVE 0x2000 /* return lle xlocked */ -#define LLE_UNLOCKED 0x4000 /* return lle unlocked */ -#define LLE_ADDRONLY 0x4000 /* return lladdr instead of full header */ -#define LLE_CREATE 0x8000 /* hint to avoid lle lookup */ +#define LLE_EXCLUSIVE 0x2000 /* return lle xlocked */ +#define LLE_UNLOCKED 0x4000 /* return lle unlocked */ +#define LLE_ADDRONLY 0x4000 /* return lladdr instead of full header */ +#define LLE_CREATE 0x8000 /* hint to avoid lle lookup */ /* LLE flags used by fastpath code */ -#define RLLE_VALID 0x0001 /* entry is valid */ -#define RLLE_IFADDR LLE_IFADDR /* entry is ifaddr */ +#define RLLE_VALID 0x0001 /* entry is valid */ +#define RLLE_IFADDR LLE_IFADDR /* entry is ifaddr */ #define LLATBL_HASH(key, mask) \ (((((((key >> 8) ^ key) >> 8) ^ key) >> 8) ^ key) & mask) @@ -248,13 +248,13 @@ void lltable_link(struct lltable *llt); void lltable_prefix_free(int, struct sockaddr *, struct sockaddr *, u_int); #if 0 -void lltable_drain(int); +void lltable_drain(int); #endif -int lltable_sysctl_dumparp(int, struct sysctl_req *); +int lltable_sysctl_dumparp(int, struct sysctl_req *); -size_t llentry_free(struct llentry *); +size_t llentry_free(struct llentry *); struct llentry *llentry_alloc(struct ifnet *, struct lltable *, - struct sockaddr_storage *); + struct sockaddr_storage *); /* helper functions */ size_t lltable_drop_entry_queue(struct llentry *); @@ -285,7 +285,7 @@ int lltable_foreach_lle(struct lltable *llt, llt_foreach_cb_t *f, static __inline struct llentry * lla_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3addr) { - return (llt->llt_lookup(llt, flags, l3addr)); + return llt->llt_lookup(llt, flags, l3addr); } int lla_rt_output(struct rt_msghdr *, struct rt_addrinfo *); diff --git a/bsd/net/if_llc.h b/bsd/net/if_llc.h index dade70621..f2ccb92bc 100644 --- a/bsd/net/if_llc.h +++ b/bsd/net/if_llc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000,2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -72,41 +72,41 @@ struct llc { u_int8_t llc_dsap; u_int8_t llc_ssap; union { - struct { - u_int8_t control; - u_int8_t format_id; - u_int8_t class_id; - u_int8_t window_x2; - } type_u; - struct { - u_int8_t num_snd_x2; - u_int8_t num_rcv_x2; - } type_i; - struct { - u_int8_t control; - u_int8_t num_rcv_x2; - } type_s; - struct { - u_int8_t control; - /* - * We cannot put the following fields in a structure because - * the structure rounding might cause padding. - */ - u_int8_t frmr_rej_pdu0; - u_int8_t frmr_rej_pdu1; - u_int8_t frmr_control; - u_int8_t frmr_control_ext; - u_int8_t frmr_cause; - } type_frmr; - struct { - u_int8_t control; - u_int8_t org_code[3]; - u_int16_t ether_type; - } type_snap __attribute__((__packed__)); - struct { - u_int8_t control; - u_int8_t control_ext; - } type_raw; + struct { + u_int8_t control; + u_int8_t format_id; + u_int8_t class_id; + u_int8_t window_x2; + } type_u; + struct { + u_int8_t num_snd_x2; + u_int8_t num_rcv_x2; + } type_i; + struct { + u_int8_t control; + u_int8_t num_rcv_x2; + } type_s; + struct { + u_int8_t control; + /* + * We cannot put the following fields in a structure because + * the structure rounding might cause padding. + */ + u_int8_t frmr_rej_pdu0; + u_int8_t frmr_rej_pdu1; + u_int8_t frmr_control; + u_int8_t frmr_control_ext; + u_int8_t frmr_cause; + } type_frmr; + struct { + u_int8_t control; + u_int8_t org_code[3]; + u_int16_t ether_type; + } type_snap __attribute__((__packed__)); + struct { + u_int8_t control; + u_int8_t control_ext; + } type_raw; } llc_un; } __attribute__((__packed__)); @@ -118,18 +118,18 @@ struct frmrinfo { u_int8_t frmr_cause; } __attribute__((__packed__)); -#define llc_control llc_un.type_u.control -#define llc_control_ext llc_un.type_raw.control_ext -#define llc_fid llc_un.type_u.format_id -#define llc_class llc_un.type_u.class -#define llc_window llc_un.type_u.window_x2 -#define llc_frmrinfo llc_un.type_frmr.frmr_rej_pdu0 -#define llc_frmr_pdu0 llc_un.type_frmr.frmr_rej_pdu0 -#define llc_frmr_pdu1 llc_un.type_frmr.frmr_rej_pdu1 -#define llc_frmr_control llc_un.type_frmr.frmr_control -#define llc_frmr_control_ext llc_un.type_frmr.frmr_control_ext -#define llc_frmr_cause llc_un.type_frmr.frmr_cause -#define llc_snap llc_un.type_snap +#define llc_control llc_un.type_u.control +#define llc_control_ext llc_un.type_raw.control_ext +#define llc_fid llc_un.type_u.format_id +#define llc_class llc_un.type_u.class +#define llc_window llc_un.type_u.window_x2 +#define llc_frmrinfo llc_un.type_frmr.frmr_rej_pdu0 +#define llc_frmr_pdu0 llc_un.type_frmr.frmr_rej_pdu0 +#define llc_frmr_pdu1 llc_un.type_frmr.frmr_rej_pdu1 +#define llc_frmr_control llc_un.type_frmr.frmr_control +#define llc_frmr_control_ext llc_un.type_frmr.frmr_control_ext +#define llc_frmr_cause llc_un.type_frmr.frmr_cause +#define llc_snap llc_un.type_snap /* * Don't use sizeof(struct llc_un) for LLC header sizes @@ -142,54 +142,54 @@ struct frmrinfo { /* * Unnumbered LLC format commands */ -#define LLC_UI 0x3 -#define LLC_UI_P 0x13 -#define LLC_DISC 0x43 -#define LLC_DISC_P 0x53 -#define LLC_UA 0x63 -#define LLC_UA_P 0x73 -#define LLC_TEST 0xe3 -#define LLC_TEST_P 0xf3 -#define LLC_FRMR 0x87 -#define LLC_FRMR_P 0x97 -#define LLC_DM 0x0f -#define LLC_DM_P 0x1f -#define LLC_XID 0xaf -#define LLC_XID_P 0xbf -#define LLC_SABME 0x6f -#define LLC_SABME_P 0x7f +#define LLC_UI 0x3 +#define LLC_UI_P 0x13 +#define LLC_DISC 0x43 +#define LLC_DISC_P 0x53 +#define LLC_UA 0x63 +#define LLC_UA_P 0x73 +#define LLC_TEST 0xe3 +#define LLC_TEST_P 0xf3 +#define LLC_FRMR 0x87 +#define LLC_FRMR_P 0x97 +#define LLC_DM 0x0f +#define LLC_DM_P 0x1f +#define LLC_XID 0xaf +#define LLC_XID_P 0xbf +#define LLC_SABME 0x6f +#define LLC_SABME_P 0x7f /* * Supervisory LLC commands */ -#define LLC_RR 0x01 -#define LLC_RNR 0x05 -#define LLC_REJ 0x09 +#define LLC_RR 0x01 +#define LLC_RNR 0x05 +#define LLC_REJ 0x09 /* * Info format - dummy only */ -#define LLC_INFO 0x00 +#define LLC_INFO 0x00 /* * ISO PDTR 10178 contains among others */ -#define LLC_8021D_LSAP 0x42 -#define LLC_X25_LSAP 0x7e -#define LLC_SNAP_LSAP 0xaa -#define LLC_ISO_LSAP 0xfe +#define LLC_8021D_LSAP 0x42 +#define LLC_X25_LSAP 0x7e +#define LLC_SNAP_LSAP 0xaa +#define LLC_ISO_LSAP 0xfe /* * LLC XID definitions from 802.2, as needed */ -#define LLC_XID_FORMAT_BASIC 0x81 -#define LLC_XID_BASIC_MINLEN (LLC_UFRAMELEN + 3) +#define LLC_XID_FORMAT_BASIC 0x81 +#define LLC_XID_BASIC_MINLEN (LLC_UFRAMELEN + 3) -#define LLC_XID_CLASS_I 0x1 -#define LLC_XID_CLASS_II 0x3 -#define LLC_XID_CLASS_III 0x5 -#define LLC_XID_CLASS_IV 0x7 +#define LLC_XID_CLASS_I 0x1 +#define LLC_XID_CLASS_II 0x3 +#define LLC_XID_CLASS_III 0x5 +#define LLC_XID_CLASS_IV 0x7 #endif /* !_NET_IF_LLC_H_ */ diff --git a/bsd/net/if_llreach.c b/bsd/net/if_llreach.c index 29a449936..91f6435d2 100644 --- a/bsd/net/if_llreach.c +++ b/bsd/net/if_llreach.c @@ -136,11 +136,11 @@ #include #endif /* INET6 */ -static unsigned int iflr_size; /* size of if_llreach */ -static struct zone *iflr_zone; /* zone for if_llreach */ +static unsigned int iflr_size; /* size of if_llreach */ +static struct zone *iflr_zone; /* zone for if_llreach */ -#define IFLR_ZONE_MAX 128 /* maximum elements in zone */ -#define IFLR_ZONE_NAME "if_llreach" /* zone name */ +#define IFLR_ZONE_MAX 128 /* maximum elements in zone */ +#define IFLR_ZONE_NAME "if_llreach" /* zone name */ static struct if_llreach *iflr_alloc(int); static void iflr_free(struct if_llreach *); @@ -162,19 +162,19 @@ SYSCTL_NODE(_net_link_generic_system, OID_AUTO, llreach_info, * Link-layer reachability is based off node constants in RFC4861. */ #if INET6 -#define LL_COMPUTE_RTIME(x) ND_COMPUTE_RTIME(x) +#define LL_COMPUTE_RTIME(x) ND_COMPUTE_RTIME(x) #else -#define LL_MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */ -#define LL_MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */ -#define LL_COMPUTE_RTIME(x) \ - (((LL_MIN_RANDOM_FACTOR * (x >> 10)) + (RandomULong() & \ +#define LL_MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */ +#define LL_MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */ +#define LL_COMPUTE_RTIME(x) \ + (((LL_MIN_RANDOM_FACTOR * (x >> 10)) + (RandomULong() & \ ((LL_MAX_RANDOM_FACTOR - LL_MIN_RANDOM_FACTOR) * (x >> 10)))) / 1000) #endif /* !INET6 */ void ifnet_llreach_init(void) { - iflr_size = sizeof (struct if_llreach); + iflr_size = sizeof(struct if_llreach); iflr_zone = zinit(iflr_size, IFLR_ZONE_MAX * iflr_size, 0, IFLR_ZONE_NAME); if (iflr_zone == NULL) { @@ -190,8 +190,9 @@ ifnet_llreach_ifattach(struct ifnet *ifp, boolean_t reuse) { lck_rw_lock_exclusive(&ifp->if_llreach_lock); /* Initialize link-layer source tree (if not already) */ - if (!reuse) + if (!reuse) { RB_INIT(&ifp->if_ll_srcs); + } lck_rw_done(&ifp->if_llreach_lock); } @@ -215,7 +216,7 @@ ifnet_llreach_ifdetach(struct ifnet *ifp) static __inline int iflr_cmp(const struct if_llreach *a, const struct if_llreach *b) { - return (memcmp(&a->lr_key, &b->lr_key, sizeof (a->lr_key))); + return memcmp(&a->lr_key, &b->lr_key, sizeof(a->lr_key)); } static __inline int @@ -224,7 +225,7 @@ iflr_reachable(struct if_llreach *lr, int cmp_delta, u_int64_t tval) u_int64_t now; u_int64_t expire; - now = net_uptime(); /* current approx. uptime */ + now = net_uptime(); /* current approx. uptime */ /* * No need for lr_lock; atomically read the last rcvd uptime. */ @@ -234,15 +235,16 @@ iflr_reachable(struct if_llreach *lr, int cmp_delta, u_int64_t tval) * lr_reachable seconds, consider that the host is no * longer reachable. */ - if (!cmp_delta) - return (expire >= now); + if (!cmp_delta) { + return expire >= now; + } /* * If the caller supplied a reference time, consider the * host is reachable if the record hasn't expired (see above) * and if the reference time is within the past lr_reachable * seconds. */ - return ((expire >= now) && (now - tval) < lr->lr_reachable); + return (expire >= now) && (now - tval) < lr->lr_reachable; } int @@ -251,7 +253,7 @@ ifnet_llreach_reachable(struct if_llreach *lr) /* * Check whether the cache is too old to be trusted. */ - return (iflr_reachable(lr, 0, 0)); + return iflr_reachable(lr, 0, 0); } int @@ -260,7 +262,7 @@ ifnet_llreach_reachable_delta(struct if_llreach *lr, u_int64_t tval) /* * Check whether the cache is too old to be trusted. */ - return (iflr_reachable(lr, 1, tval)); + return iflr_reachable(lr, 1, tval); } void @@ -269,7 +271,7 @@ ifnet_llreach_set_reachable(struct ifnet *ifp, u_int16_t llproto, void *addr, { struct if_llreach find, *lr; - VERIFY(alen == IF_LLREACH_MAXLEN); /* for now */ + VERIFY(alen == IF_LLREACH_MAXLEN); /* for now */ find.lr_key.proto = llproto; bcopy(addr, &find.lr_key.addr, IF_LLREACH_MAXLEN); @@ -294,10 +296,11 @@ ifnet_llreach_alloc(struct ifnet *ifp, u_int16_t llproto, void *addr, struct if_llreach find, *lr; struct timeval cnow; - if (llreach_base == 0) - return (NULL); + if (llreach_base == 0) { + return NULL; + } - VERIFY(alen == IF_LLREACH_MAXLEN); /* for now */ + VERIFY(alen == IF_LLREACH_MAXLEN); /* for now */ find.lr_key.proto = llproto; bcopy(addr, &find.lr_key.addr, IF_LLREACH_MAXLEN); @@ -310,37 +313,39 @@ found: VERIFY(lr->lr_reqcnt >= 1); lr->lr_reqcnt++; VERIFY(lr->lr_reqcnt != 0); - IFLR_ADDREF_LOCKED(lr); /* for caller */ - lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */ + IFLR_ADDREF_LOCKED(lr); /* for caller */ + lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */ IFLR_UNLOCK(lr); lck_rw_done(&ifp->if_llreach_lock); - return (lr); + return lr; } - if (!lck_rw_lock_shared_to_exclusive(&ifp->if_llreach_lock)) + if (!lck_rw_lock_shared_to_exclusive(&ifp->if_llreach_lock)) { lck_rw_lock_exclusive(&ifp->if_llreach_lock); + } LCK_RW_ASSERT(&ifp->if_llreach_lock, LCK_RW_ASSERT_EXCLUSIVE); /* in case things have changed while becoming writer */ lr = RB_FIND(ll_reach_tree, &ifp->if_ll_srcs, &find); - if (lr != NULL) + if (lr != NULL) { goto found; + } lr = iflr_alloc(M_WAITOK); if (lr == NULL) { lck_rw_done(&ifp->if_llreach_lock); - return (NULL); + return NULL; } IFLR_LOCK(lr); lr->lr_reqcnt++; VERIFY(lr->lr_reqcnt == 1); - IFLR_ADDREF_LOCKED(lr); /* for RB tree */ - IFLR_ADDREF_LOCKED(lr); /* for caller */ - lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */ - lr->lr_baseup = lr->lr_lastrcvd; /* base uptime */ + IFLR_ADDREF_LOCKED(lr); /* for RB tree */ + IFLR_ADDREF_LOCKED(lr); /* for caller */ + lr->lr_lastrcvd = net_uptime(); /* current approx. uptime */ + lr->lr_baseup = lr->lr_lastrcvd; /* base uptime */ getmicrotime(&cnow); - lr->lr_basecal = cnow.tv_sec; /* base calendar time */ + lr->lr_basecal = cnow.tv_sec; /* base calendar time */ lr->lr_basereachable = llreach_base; lr->lr_reachable = LL_COMPUTE_RTIME(lr->lr_basereachable * 1000); lr->lr_debug |= IFD_ATTACHED; @@ -354,7 +359,7 @@ found: IFLR_UNLOCK(lr); lck_rw_done(&ifp->if_llreach_lock); - return (lr); + return lr; } void @@ -375,7 +380,7 @@ ifnet_llreach_free(struct if_llreach *lr) if (lr->lr_reqcnt > 0) { IFLR_UNLOCK(lr); lck_rw_done(&ifp->if_llreach_lock); - IFLR_REMREF(lr); /* for caller */ + IFLR_REMREF(lr); /* for caller */ return; } if (!(lr->lr_debug & IFD_ATTACHED)) { @@ -388,8 +393,8 @@ ifnet_llreach_free(struct if_llreach *lr) IFLR_UNLOCK(lr); lck_rw_done(&ifp->if_llreach_lock); - IFLR_REMREF(lr); /* for RB tree */ - IFLR_REMREF(lr); /* for caller */ + IFLR_REMREF(lr); /* for RB tree */ + IFLR_REMREF(lr); /* for caller */ } u_int64_t @@ -401,8 +406,8 @@ ifnet_llreach_up2calexp(struct if_llreach *lr, u_int64_t uptime) struct timeval cnow; u_int64_t unow; - getmicrotime(&cnow); /* current calendar time */ - unow = net_uptime(); /* current approx. uptime */ + getmicrotime(&cnow); /* current calendar time */ + unow = net_uptime(); /* current approx. uptime */ /* * Take into account possible calendar time changes; * adjust base calendar value if necessary, i.e. @@ -415,13 +420,13 @@ ifnet_llreach_up2calexp(struct if_llreach *lr, u_int64_t uptime) (uptime - lr->lr_baseup); } - return (calendar); + return calendar; } u_int64_t ifnet_llreach_up2upexp(struct if_llreach *lr, u_int64_t uptime) { - return (lr->lr_reachable + uptime); + return lr->lr_reachable + uptime; } int @@ -436,16 +441,17 @@ ifnet_llreach_get_defrouter(struct ifnet *ifp, int af, VERIFY(ifp != NULL && iflri != NULL && (af == AF_INET || af == AF_INET6)); - bzero(iflri, sizeof (*iflri)); + bzero(iflri, sizeof(*iflri)); - if ((rnh = rt_tables[af]) == NULL) - return (error); + if ((rnh = rt_tables[af]) == NULL) { + return error; + } - bzero(&dst_ss, sizeof (dst_ss)); - bzero(&mask_ss, sizeof (mask_ss)); + bzero(&dst_ss, sizeof(dst_ss)); + bzero(&mask_ss, sizeof(mask_ss)); dst_ss.ss_family = af; - dst_ss.ss_len = (af == AF_INET) ? sizeof (struct sockaddr_in) : - sizeof (struct sockaddr_in6); + dst_ss.ss_len = (af == AF_INET) ? sizeof(struct sockaddr_in) : + sizeof(struct sockaddr_in6); lck_mtx_lock(rnh_lock); rt = rt_lookup(TRUE, SA(&dst_ss), SA(&mask_ss), rnh, ifp->if_index); @@ -471,7 +477,7 @@ ifnet_llreach_get_defrouter(struct ifnet *ifp, int af, } lck_mtx_unlock(rnh_lock); - return (error); + return error; } static struct if_llreach * @@ -485,7 +491,7 @@ iflr_alloc(int how) lck_mtx_init(&lr->lr_lock, ifnet_lock_group, ifnet_lock_attr); lr->lr_debug |= IFD_ALLOC; } - return (lr); + return lr; } static void @@ -515,17 +521,19 @@ iflr_free(struct if_llreach *lr) void iflr_addref(struct if_llreach *lr, int locked) { - if (!locked) + if (!locked) { IFLR_LOCK(lr); - else + } else { IFLR_LOCK_ASSERT_HELD(lr); + } if (++lr->lr_refcnt == 0) { panic("%s: lr=%p wraparound refcnt", __func__, lr); /* NOTREACHED */ } - if (!locked) + if (!locked) { IFLR_UNLOCK(lr); + } } void @@ -543,7 +551,7 @@ iflr_remref(struct if_llreach *lr) } IFLR_UNLOCK(lr); - iflr_free(lr); /* deallocate it */ + iflr_free(lr); /* deallocate it */ } void @@ -553,7 +561,7 @@ ifnet_lr2ri(struct if_llreach *lr, struct rt_reach_info *ri) IFLR_LOCK_ASSERT_HELD(lr); - bzero(ri, sizeof (*ri)); + bzero(ri, sizeof(*ri)); ifnet_lr2lri(lr, &lri); ri->ri_refcnt = lri.lri_refcnt; ri->ri_probes = lri.lri_probes; @@ -568,7 +576,7 @@ ifnet_lr2iflri(struct if_llreach *lr, struct ifnet_llreach_info *iflri) { IFLR_LOCK_ASSERT_HELD(lr); - bzero(iflri, sizeof (*iflri)); + bzero(iflri, sizeof(*iflri)); /* * Note here we return request count, not actual memory refcnt. */ @@ -603,13 +611,13 @@ ifnet_lr2lri(struct if_llreach *lr, struct if_llreach_info *lri) { IFLR_LOCK_ASSERT_HELD(lr); - bzero(lri, sizeof (*lri)); + bzero(lri, sizeof(*lri)); /* * Note here we return request count, not actual memory refcnt. */ - lri->lri_refcnt = lr->lr_reqcnt; + lri->lri_refcnt = lr->lr_reqcnt; lri->lri_ifindex = lr->lr_ifp->if_index; - lri->lri_probes = lr->lr_probes; + lri->lri_probes = lr->lr_probes; lri->lri_expire = ifnet_llreach_up2calexp(lr, lr->lr_lastrcvd); lri->lri_proto = lr->lr_key.proto; bcopy(&lr->lr_key.addr, &lri->lri_addr, IF_LLREACH_MAXLEN); @@ -622,35 +630,37 @@ static int sysctl_llreach_ifinfo SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - int *name, retval = 0; - unsigned int namelen; - uint32_t ifindex; + int *name, retval = 0; + unsigned int namelen; + uint32_t ifindex; struct if_llreach *lr; struct if_llreach_info lri = {}; - struct ifnet *ifp; + struct ifnet *ifp; name = (int *)arg1; namelen = (unsigned int)arg2; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - if (namelen != 1) - return (EINVAL); + if (namelen != 1) { + return EINVAL; + } ifindex = name[0]; ifnet_head_lock_shared(); if (ifindex <= 0 || ifindex > (u_int)if_index) { printf("%s: ifindex %u out of range\n", __func__, ifindex); ifnet_head_done(); - return (ENOENT); + return ENOENT; } ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); if (ifp == NULL) { printf("%s: no ifp for ifindex %u\n", __func__, ifindex); - return (ENOENT); + return ENOENT; } lck_rw_lock_shared(&ifp->if_llreach_lock); @@ -660,10 +670,11 @@ sysctl_llreach_ifinfo SYSCTL_HANDLER_ARGS ifnet_lr2lri(lr, &lri); IFLR_UNLOCK(lr); - if ((retval = SYSCTL_OUT(req, &lri, sizeof (lri))) != 0) + if ((retval = SYSCTL_OUT(req, &lri, sizeof(lri))) != 0) { break; + } } lck_rw_done(&ifp->if_llreach_lock); - return (retval); + return retval; } diff --git a/bsd/net/if_llreach.h b/bsd/net/if_llreach.h index 63c86aef4..42c9f6ddd 100644 --- a/bsd/net/if_llreach.h +++ b/bsd/net/if_llreach.h @@ -26,8 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _NET_IF_LLREACH_H_ -#define _NET_IF_LLREACH_H_ +#ifndef _NET_IF_LLREACH_H_ +#define _NET_IF_LLREACH_H_ #ifdef PRIVATE #ifdef __cplusplus @@ -39,21 +39,21 @@ extern "C" { /* * Per-interface link-layer reachability information (private). */ -#define IF_LLREACHINFO_ADDRLEN 64 /* max ll addr len */ -#define IF_LLREACHINFO_RESERVED2 16 /* more reserved bits */ +#define IF_LLREACHINFO_ADDRLEN 64 /* max ll addr len */ +#define IF_LLREACHINFO_RESERVED2 16 /* more reserved bits */ struct if_llreach_info { - u_int32_t lri_refcnt; /* reference count */ - u_int32_t lri_ifindex; /* interface index */ - u_int64_t lri_expire; /* expiration (calendar) time */ - u_int32_t lri_probes; /* total # of probes */ - u_int16_t lri_reserved; /* for future use */ - u_int16_t lri_proto; /* ll proto */ - u_int8_t lri_addr[IF_LLREACHINFO_ADDRLEN]; /* ll addr */ - int32_t lri_rssi; /* received signal strength */ - int32_t lri_lqm; /* link quality metric */ - int32_t lri_npm; /* node proximity metric */ - u_int8_t lri_reserved2[IF_LLREACHINFO_RESERVED2]; + u_int32_t lri_refcnt; /* reference count */ + u_int32_t lri_ifindex; /* interface index */ + u_int64_t lri_expire; /* expiration (calendar) time */ + u_int32_t lri_probes; /* total # of probes */ + u_int16_t lri_reserved; /* for future use */ + u_int16_t lri_proto; /* ll proto */ + u_int8_t lri_addr[IF_LLREACHINFO_ADDRLEN]; /* ll addr */ + int32_t lri_rssi; /* received signal strength */ + int32_t lri_lqm; /* link quality metric */ + int32_t lri_npm; /* node proximity metric */ + u_int8_t lri_reserved2[IF_LLREACHINFO_RESERVED2]; }; #ifdef XNU_KERNEL_PRIVATE @@ -69,63 +69,63 @@ struct if_llreach_info { /* * Per-interface link-layer reachability. (Currently only for ARP/NDP/Ethernet.) */ -#define IF_LLREACH_MAXLEN ETHER_ADDR_LEN +#define IF_LLREACH_MAXLEN ETHER_ADDR_LEN struct if_llreach { decl_lck_mtx_data(, lr_lock); - RB_ENTRY(if_llreach) lr_link; /* RB tree links */ - struct ifnet *lr_ifp; /* back pointer to ifnet */ - u_int32_t lr_refcnt; /* reference count */ - u_int32_t lr_reqcnt; /* RB tree request count */ - u_int32_t lr_debug; /* see ifa_debug flags */ - u_int32_t lr_probes; /* number of probes so far */ - u_int64_t lr_basecal; /* base calendar time */ - u_int64_t lr_baseup; /* base uptime */ - u_int64_t lr_lastrcvd; /* last-heard-of timestamp */ - u_int32_t lr_basereachable; /* baseline time */ - u_int32_t lr_reachable; /* reachable time */ + RB_ENTRY(if_llreach) lr_link; /* RB tree links */ + struct ifnet *lr_ifp; /* back pointer to ifnet */ + u_int32_t lr_refcnt; /* reference count */ + u_int32_t lr_reqcnt; /* RB tree request count */ + u_int32_t lr_debug; /* see ifa_debug flags */ + u_int32_t lr_probes; /* number of probes so far */ + u_int64_t lr_basecal; /* base calendar time */ + u_int64_t lr_baseup; /* base uptime */ + u_int64_t lr_lastrcvd; /* last-heard-of timestamp */ + u_int32_t lr_basereachable; /* baseline time */ + u_int32_t lr_reachable; /* reachable time */ struct lr_key_s { - u_int16_t proto; /* ll proto */ - u_int8_t addr[IF_LLREACH_MAXLEN]; /* ll addr */ + u_int16_t proto; /* ll proto */ + u_int8_t addr[IF_LLREACH_MAXLEN]; /* ll addr */ } lr_key; - int32_t lr_rssi; /* received signal strength */ - int32_t lr_lqm; /* link quality metric */ - int32_t lr_npm; /* node proximity metric */ + int32_t lr_rssi; /* received signal strength */ + int32_t lr_lqm; /* link quality metric */ + int32_t lr_npm; /* node proximity metric */ }; RB_PROTOTYPE_SC_PREV(__private_extern__, ll_reach_tree, if_llreach, ls_link, ifllr_cmp); -#define IFLR_LOCK_ASSERT_HELD(_iflr) \ +#define IFLR_LOCK_ASSERT_HELD(_iflr) \ LCK_MTX_ASSERT(&(_iflr)->lr_lock, LCK_MTX_ASSERT_OWNED) -#define IFLR_LOCK_ASSERT_NOTHELD(_iflr) \ +#define IFLR_LOCK_ASSERT_NOTHELD(_iflr) \ LCK_MTX_ASSERT(&(_iflr)->lr_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IFLR_LOCK(_iflr) \ +#define IFLR_LOCK(_iflr) \ lck_mtx_lock(&(_iflr)->lr_lock) -#define IFLR_LOCK_SPIN(_iflr) \ +#define IFLR_LOCK_SPIN(_iflr) \ lck_mtx_lock_spin(&(_iflr)->lr_lock) -#define IFLR_CONVERT_LOCK(_iflr) do { \ - IFLR_LOCK_ASSERT_HELD(_iflr); \ - lck_mtx_convert_spin(&(_iflr)->lr_lock); \ +#define IFLR_CONVERT_LOCK(_iflr) do { \ + IFLR_LOCK_ASSERT_HELD(_iflr); \ + lck_mtx_convert_spin(&(_iflr)->lr_lock); \ } while (0) -#define IFLR_UNLOCK(_iflr) \ +#define IFLR_UNLOCK(_iflr) \ lck_mtx_unlock(&(_iflr)->lr_lock) -#define IFLR_ADDREF(_iflr) \ +#define IFLR_ADDREF(_iflr) \ iflr_addref(_iflr, 0) -#define IFLR_ADDREF_LOCKED(_iflr) \ +#define IFLR_ADDREF_LOCKED(_iflr) \ iflr_addref(_iflr, 1) -#define IFLR_REMREF(_iflr) \ +#define IFLR_REMREF(_iflr) \ iflr_remref(_iflr) -struct ifnet_llreach_info; /* forward declaration */ +struct ifnet_llreach_info; /* forward declaration */ extern void ifnet_llreach_init(void); extern void ifnet_llreach_ifattach(struct ifnet *, boolean_t); diff --git a/bsd/net/if_loop.c b/bsd/net/if_loop.c index cc2089cb6..b76714440 100644 --- a/bsd/net/if_loop.c +++ b/bsd/net/if_loop.c @@ -114,28 +114,28 @@ #include -#define LOMTU 16384 -#define LOSNDQ_MAXLEN 256 - -#define LO_BPF_TAP_OUT(_m) { \ - if (lo_statics[0].bpf_callback != NULL) { \ - bpf_tap_out(lo_ifp, DLT_NULL, _m, \ - &((struct loopback_header *)_m->m_pkthdr.pkt_hdr)-> \ - protocol, sizeof (u_int32_t)); \ - } \ +#define LOMTU 16384 +#define LOSNDQ_MAXLEN 256 + +#define LO_BPF_TAP_OUT(_m) { \ + if (lo_statics[0].bpf_callback != NULL) { \ + bpf_tap_out(lo_ifp, DLT_NULL, _m, \ + &((struct loopback_header *)_m->m_pkthdr.pkt_hdr)-> \ + protocol, sizeof (u_int32_t)); \ + } \ } -#define LO_BPF_TAP_OUT_MULTI(_m) { \ - if (lo_statics[0].bpf_callback != NULL) { \ - struct mbuf *_n; \ - for (_n = _m; _n != NULL; _n = _n->m_nextpkt) \ - LO_BPF_TAP_OUT(_n); \ - } \ +#define LO_BPF_TAP_OUT_MULTI(_m) { \ + if (lo_statics[0].bpf_callback != NULL) { \ + struct mbuf *_n; \ + for (_n = _m; _n != NULL; _n = _n->m_nextpkt) \ + LO_BPF_TAP_OUT(_n); \ + } \ } struct lo_statics_str { - int bpf_mode; - bpf_packet_func bpf_callback; + int bpf_mode; + bpf_packet_func bpf_callback; }; static struct lo_statics_str lo_statics[NLOOP]; @@ -143,8 +143,8 @@ static int lo_txstart = 0; struct ifnet *lo_ifp = NULL; -struct loopback_header { - protocol_family_t protocol; +struct loopback_header { + protocol_family_t protocol; }; /* Local forward declerations */ @@ -203,7 +203,7 @@ lo_demux(struct ifnet *ifp, struct mbuf *m, char *frame_header, *protocol_family = header->protocol; - return (0); + return 0; } static errno_t @@ -214,20 +214,22 @@ lo_framer(struct ifnet *ifp, struct mbuf **m, const struct sockaddr *dest, #pragma unused(ifp, dest, dest_linkaddr) struct loopback_header *header; - M_PREPEND(*m, sizeof (struct loopback_header), M_WAITOK, 1); + M_PREPEND(*m, sizeof(struct loopback_header), M_WAITOK, 1); if (*m == NULL) { /* Tell caller not to try to free passed-in mbuf */ - return (EJUSTRETURN); + return EJUSTRETURN; } - if (prepend_len != NULL) - *prepend_len = sizeof (struct loopback_header); - if (postpend_len != NULL) + if (prepend_len != NULL) { + *prepend_len = sizeof(struct loopback_header); + } + if (postpend_len != NULL) { *postpend_len = 0; + } header = mtod(*m, struct loopback_header *); - bcopy(frame_type, &header->protocol, sizeof (u_int32_t)); - return (0); + bcopy(frame_type, &header->protocol, sizeof(u_int32_t)); + return 0; } static errno_t @@ -235,14 +237,14 @@ lo_add_proto(struct ifnet *interface, protocol_family_t protocol_family, const struct ifnet_demux_desc *demux_array, u_int32_t demux_count) { #pragma unused(interface, protocol_family, demux_array, demux_count) - return (0); + return 0; } static errno_t lo_del_proto(struct ifnet *ifp, protocol_family_t protocol) { #pragma unused(ifp, protocol) - return (0); + return 0; } static void @@ -262,15 +264,17 @@ lo_tx_compl(struct ifnet *ifp, struct mbuf *m) net_timernsec(&now, &ts); error = mbuf_set_timestamp(m, ts, TRUE); - if (error != 0) + if (error != 0) { printf("%s: mbuf_set_timestamp() failed %d\n", - __func__, error); + __func__, error); + } } } error = mbuf_set_status(m, KERN_SUCCESS); - if (error != 0) + if (error != 0) { printf("%s: mbuf_set_status() failed %d\n", - __func__, error); + __func__, error); + } ifnet_tx_compl(ifp, m); } @@ -299,8 +303,9 @@ lo_output(struct ifnet *ifp, struct mbuf *m_list) * This is used to match multicast packets, sent looping * back, with the appropriate group record on input. */ - if (m->m_pkthdr.rcvif == NULL) + if (m->m_pkthdr.rcvif == NULL) { m->m_pkthdr.rcvif = ifp; + } m->m_pkthdr.pkt_flags |= PKTF_LOOP; m->m_pkthdr.pkt_hdr = mtod(m, char *); @@ -311,7 +316,7 @@ lo_output(struct ifnet *ifp, struct mbuf *m_list) CSUM_DATA_VALID | CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; - m_adj(m, sizeof (struct loopback_header)); + m_adj(m, sizeof(struct loopback_header)); LO_BPF_TAP_OUT(m); if (m->m_nextpkt == NULL) { @@ -325,7 +330,7 @@ lo_output(struct ifnet *ifp, struct mbuf *m_list) s.bytes_in = len; s.bytes_out = len; - return (ifnet_input_extended(ifp, m_list, m_tail, &s)); + return ifnet_input_extended(ifp, m_list, m_tail, &s); } /* @@ -350,8 +355,9 @@ lo_pre_enqueue(struct ifnet *ifp, struct mbuf *m0) * This is used to match multicast packets, sent looping * back, with the appropriate group record on input. */ - if (m->m_pkthdr.rcvif == NULL) + if (m->m_pkthdr.rcvif == NULL) { m->m_pkthdr.rcvif = ifp; + } m->m_pkthdr.pkt_flags |= PKTF_LOOP; m->m_pkthdr.pkt_hdr = mtod(m, char *); @@ -362,7 +368,7 @@ lo_pre_enqueue(struct ifnet *ifp, struct mbuf *m0) CSUM_DATA_VALID | CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID; - m_adj(m, sizeof (struct loopback_header)); + m_adj(m, sizeof(struct loopback_header)); /* * Let the callee free it in case of error, @@ -373,7 +379,7 @@ lo_pre_enqueue(struct ifnet *ifp, struct mbuf *m0) m = n; } - return (error); + return error; } /* @@ -391,7 +397,7 @@ lo_start(struct ifnet *ifp) { struct ifnet_stat_increment_param s; - bzero(&s, sizeof (s)); + bzero(&s, sizeof(s)); for (;;) { struct mbuf *m = NULL, *m_tail = NULL; @@ -399,13 +405,15 @@ lo_start(struct ifnet *ifp) if (lo_sched_model == IFNET_SCHED_MODEL_NORMAL) { if (ifnet_dequeue_multi(ifp, lo_dequeue_max, &m, - &m_tail, &cnt, &len) != 0) + &m_tail, &cnt, &len) != 0) { break; + } } else { if (ifnet_dequeue_service_class_multi(ifp, lo_dequeue_sc, lo_dequeue_max, &m, - &m_tail, &cnt, &len) != 0) + &m_tail, &cnt, &len) != 0) { break; + } } LO_BPF_TAP_OUT_MULTI(m); @@ -442,17 +450,17 @@ lo_pre_output(struct ifnet *ifp, protocol_family_t protocol_family, if (rt_flags & (RTF_REJECT | RTF_BLACKHOLE)) { if (rt_flags & RTF_BLACKHOLE) { m_freem(*m); - return (EJUSTRETURN); + return EJUSTRETURN; } else { - return ((rt_flags & RTF_HOST) ? - EHOSTUNREACH : ENETUNREACH); + return (rt_flags & RTF_HOST) ? + EHOSTUNREACH : ENETUNREACH; } } } - bcopy(&protocol_family, frame_type, sizeof (protocol_family)); + bcopy(&protocol_family, frame_type, sizeof(protocol_family)); - return (0); + return 0; } /* @@ -473,14 +481,16 @@ lo_input(struct ifnet *ifp, protocol_family_t protocol_family, struct mbuf *m) net_timernsec(&now, &ts); error = mbuf_set_timestamp(m, ts, TRUE); - if (error != 0) + if (error != 0) { printf("%s: mbuf_set_timestamp() failed %d\n", - __func__, error); + __func__, error); + } } - if (proto_input(protocol_family, m) != 0) + if (proto_input(protocol_family, m) != 0) { m_freem(m); - return (0); + } + return 0; } /* ARGSUSED */ @@ -509,11 +519,10 @@ lo_ioctl(struct ifnet *ifp, u_long cmd, void *data) int error = 0; switch (cmd) { - - case SIOCSIFADDR: { /* struct ifaddr pointer */ + case SIOCSIFADDR: { /* struct ifaddr pointer */ struct ifaddr *ifa = data; - ifnet_set_flags(ifp, IFF_UP|IFF_RUNNING, IFF_UP|IFF_RUNNING); + ifnet_set_flags(ifp, IFF_UP | IFF_RUNNING, IFF_UP | IFF_RUNNING); IFA_LOCK_SPIN(ifa); ifa->ifa_rtrequest = lo_rtrequest; IFA_UNLOCK(ifa); @@ -523,16 +532,15 @@ lo_ioctl(struct ifnet *ifp, u_long cmd, void *data) break; } - case SIOCADDMULTI: /* struct ifreq */ - case SIOCDELMULTI: { /* struct ifreq */ + case SIOCADDMULTI: /* struct ifreq */ + case SIOCDELMULTI: { /* struct ifreq */ struct ifreq *ifr = data; if (ifr == NULL) { - error = EAFNOSUPPORT; /* XXX */ + error = EAFNOSUPPORT; /* XXX */ break; } switch (ifr->ifr_addr.sa_family) { - #if INET case AF_INET: break; @@ -549,14 +557,14 @@ lo_ioctl(struct ifnet *ifp, u_long cmd, void *data) break; } - case SIOCSIFMTU: { /* struct ifreq */ + case SIOCSIFMTU: { /* struct ifreq */ struct ifreq *ifr = data; - bcopy(&ifr->ifr_mtu, &ifp->if_mtu, sizeof (int)); + bcopy(&ifr->ifr_mtu, &ifp->if_mtu, sizeof(int)); break; } - case SIOCSIFFLAGS: /* struct ifreq */ + case SIOCSIFFLAGS: /* struct ifreq */ case SIOCSIFTIMESTAMPENABLE: case SIOCSIFTIMESTAMPDISABLE: break; @@ -565,7 +573,7 @@ lo_ioctl(struct ifnet *ifp, u_long cmd, void *data) error = EOPNOTSUPP; break; } - return (error); + return error; } #endif /* NLOOP > 0 */ @@ -573,10 +581,10 @@ lo_ioctl(struct ifnet *ifp, u_long cmd, void *data) static errno_t lo_attach_proto(struct ifnet *ifp, protocol_family_t protocol_family) { - struct ifnet_attach_proto_param_v2 proto; - errno_t result = 0; + struct ifnet_attach_proto_param_v2 proto; + errno_t result = 0; - bzero(&proto, sizeof (proto)); + bzero(&proto, sizeof(proto)); proto.input = lo_input; proto.pre_output = lo_pre_output; @@ -587,7 +595,7 @@ lo_attach_proto(struct ifnet *ifp, protocol_family_t protocol_family) "returned=%d\n", protocol_family, result); } - return (result); + return result; } static void @@ -597,14 +605,16 @@ lo_reg_if_mods(void) /* Register protocol registration functions */ if ((error = proto_register_plumber(PF_INET, - APPLE_IF_FAM_LOOPBACK, lo_attach_proto, NULL)) != 0) + APPLE_IF_FAM_LOOPBACK, lo_attach_proto, NULL)) != 0) { printf("proto_register_plumber failed for AF_INET " "error=%d\n", error); + } if ((error = proto_register_plumber(PF_INET6, - APPLE_IF_FAM_LOOPBACK, lo_attach_proto, NULL)) != 0) + APPLE_IF_FAM_LOOPBACK, lo_attach_proto, NULL)) != 0) { printf("proto_register_plumber failed for AF_INET6 " "error=%d\n", error); + } } static errno_t @@ -616,18 +626,18 @@ lo_set_bpf_tap(struct ifnet *ifp, bpf_tap_mode mode, lo_statics[0].bpf_mode = mode; switch (mode) { - case BPF_TAP_DISABLE: - case BPF_TAP_INPUT: - lo_statics[0].bpf_callback = NULL; - break; + case BPF_TAP_DISABLE: + case BPF_TAP_INPUT: + lo_statics[0].bpf_callback = NULL; + break; - case BPF_TAP_OUTPUT: - case BPF_TAP_INPUT_OUTPUT: - lo_statics[0].bpf_callback = bpf_callback; - break; + case BPF_TAP_OUTPUT: + case BPF_TAP_INPUT_OUTPUT: + lo_statics[0].bpf_callback = bpf_callback; + break; } - return (0); + return 0; } /* ARGSUSED */ @@ -635,40 +645,40 @@ void loopattach(void) { struct ifnet_init_eparams lo_init; - errno_t result = 0; + errno_t result = 0; - PE_parse_boot_argn("lo_txstart", &lo_txstart, sizeof (lo_txstart)); + PE_parse_boot_argn("lo_txstart", &lo_txstart, sizeof(lo_txstart)); lo_reg_if_mods(); lo_statics[0].bpf_callback = NULL; lo_statics[0].bpf_mode = BPF_TAP_DISABLE; - bzero(&lo_init, sizeof (lo_init)); - lo_init.ver = IFNET_INIT_CURRENT_VERSION; - lo_init.len = sizeof (lo_init); - lo_init.sndq_maxlen = LOSNDQ_MAXLEN; + bzero(&lo_init, sizeof(lo_init)); + lo_init.ver = IFNET_INIT_CURRENT_VERSION; + lo_init.len = sizeof(lo_init); + lo_init.sndq_maxlen = LOSNDQ_MAXLEN; if (lo_txstart) { - lo_init.flags = 0; - lo_init.pre_enqueue = lo_pre_enqueue; - lo_init.start = lo_start; + lo_init.flags = 0; + lo_init.pre_enqueue = lo_pre_enqueue; + lo_init.start = lo_start; lo_init.output_sched_model = lo_sched_model; } else { - lo_init.flags = IFNET_INIT_LEGACY; - lo_init.output = lo_output; + lo_init.flags = IFNET_INIT_LEGACY; + lo_init.output = lo_output; } - lo_init.flags |= IFNET_INIT_NX_NOAUTO; - lo_init.name = "lo"; - lo_init.unit = 0; - lo_init.family = IFNET_FAMILY_LOOPBACK; - lo_init.type = IFT_LOOP; - lo_init.demux = lo_demux; - lo_init.add_proto = lo_add_proto; - lo_init.del_proto = lo_del_proto; - lo_init.framer_extended = lo_framer; - lo_init.softc = &lo_statics[0]; - lo_init.ioctl = lo_ioctl; - lo_init.set_bpf_tap = lo_set_bpf_tap; + lo_init.flags |= IFNET_INIT_NX_NOAUTO; + lo_init.name = "lo"; + lo_init.unit = 0; + lo_init.family = IFNET_FAMILY_LOOPBACK; + lo_init.type = IFT_LOOP; + lo_init.demux = lo_demux; + lo_init.add_proto = lo_add_proto; + lo_init.del_proto = lo_del_proto; + lo_init.framer_extended = lo_framer; + lo_init.softc = &lo_statics[0]; + lo_init.ioctl = lo_ioctl; + lo_init.set_bpf_tap = lo_set_bpf_tap; result = ifnet_allocate_extended(&lo_init, &lo_ifp); if (result != 0) { @@ -685,7 +695,7 @@ loopattach(void) IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_IPV6_FRAGMENT | IFNET_CSUM_FRAGMENT | IFNET_IP_FRAGMENT | IFNET_MULTIPAGES | IFNET_TX_STATUS | IFNET_SW_TIMESTAMP); - ifnet_set_hdrlen(lo_ifp, sizeof (struct loopback_header)); + ifnet_set_hdrlen(lo_ifp, sizeof(struct loopback_header)); ifnet_set_eflags(lo_ifp, IFEF_SENDLIST, IFEF_SENDLIST); #if CONFIG_MACF_NET @@ -705,7 +715,7 @@ loopattach(void) lo_ifp->if_eflags &= ~IFEF_ECN_ENABLE; lo_ifp->if_eflags |= IFEF_ECN_DISABLE; - bpfattach(lo_ifp, DLT_NULL, sizeof (u_int32_t)); + bpfattach(lo_ifp, DLT_NULL, sizeof(u_int32_t)); } static int @@ -718,17 +728,19 @@ sysctl_dequeue_max SYSCTL_HANDLER_ARGS i = lo_dequeue_max; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (i < 1) + if (i < 1) { i = 1; - else if (i > LOSNDQ_MAXLEN) + } else if (i > LOSNDQ_MAXLEN) { i = LOSNDQ_MAXLEN; + } lo_dequeue_max = i; - return (err); + return err; } static int @@ -741,8 +753,9 @@ sysctl_sched_model SYSCTL_HANDLER_ARGS i = lo_sched_model; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } switch (i) { case IFNET_SCHED_MODEL_NORMAL: @@ -755,10 +768,11 @@ sysctl_sched_model SYSCTL_HANDLER_ARGS break; } - if (err == 0 && (err = ifnet_set_output_sched_model(lo_ifp, i)) == 0) + if (err == 0 && (err = ifnet_set_output_sched_model(lo_ifp, i)) == 0) { lo_sched_model = i; + } - return (err); + return err; } static int @@ -771,17 +785,20 @@ sysctl_dequeue_scidx SYSCTL_HANDLER_ARGS i = lo_dequeue_scidx; err = sysctl_handle_int(oidp, &i, 0, req); - if (err != 0 || req->newptr == USER_ADDR_NULL) - return (err); + if (err != 0 || req->newptr == USER_ADDR_NULL) { + return err; + } - if (!MBUF_VALID_SCIDX(i)) - return (EINVAL); + if (!MBUF_VALID_SCIDX(i)) { + return EINVAL; + } - if (lo_sched_model != IFNET_SCHED_MODEL_DRIVER_MANAGED) - return (ENODEV); + if (lo_sched_model != IFNET_SCHED_MODEL_DRIVER_MANAGED) { + return ENODEV; + } lo_dequeue_sc = m_service_class_from_idx(i); lo_dequeue_scidx = MBUF_SCIDX(lo_dequeue_sc); - return (err); + return err; } diff --git a/bsd/net/if_low_power_mode.c b/bsd/net/if_low_power_mode.c index aac91d5c7..3f9e3e89b 100644 --- a/bsd/net/if_low_power_mode.c +++ b/bsd/net/if_low_power_mode.c @@ -93,8 +93,9 @@ if_low_power_evhdlr_callback(__unused struct eventhandler_entry_arg arg, { struct kev_dl_low_power_mode kev; - if (!IF_FULLY_ATTACHED(ifp)) + if (!IF_FULLY_ATTACHED(ifp)) { return; + } if (if_low_power_verbose > 0) { os_log_info(OS_LOG_DEFAULT, @@ -136,7 +137,7 @@ if_low_power_evhdlr_init(void) (void) EVENTHANDLER_REGISTER(&if_low_power_evhdlr_ctx, if_low_power_event, - if_low_power_evhdlr_callback, + if_low_power_evhdlr_callback, eventhandler_entry_dummy_arg, EVENTHANDLER_PRI_ANY); } @@ -147,7 +148,7 @@ if_low_power_nwk_ev_callback(void *arg) { struct if_low_power_ev_args *if_low_power_ev_args = (struct if_low_power_ev_args *)arg; - + EVENTHANDLER_INVOKE(&if_low_power_evhdlr_ctx, if_low_power_event, if_low_power_ev_args->ifp, @@ -180,8 +181,9 @@ if_set_low_power(ifnet_t ifp, bool on) { int error = 0; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } os_log(OS_LOG_DEFAULT, "%s: ifp %s low_power mode %d", __func__, if_name(ifp), on); @@ -191,6 +193,5 @@ if_set_low_power(ifnet_t ifp, bool on) (ifp->if_xflags & ~IFXF_LOW_POWER); ifnet_lock_done(ifp); - return (error); + return error; } - diff --git a/bsd/net/if_media.h b/bsd/net/if_media.h index 633eb62cd..55fd50f64 100644 --- a/bsd/net/if_media.h +++ b/bsd/net/if_media.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* $NetBSD: if_media.h,v 1.3 1997/03/26 01:19:27 thorpej Exp $ */ +/* $NetBSD: if_media.h,v 1.3 1997/03/26 01:19:27 thorpej Exp $ */ /* $FreeBSD: src/sys/net/if_media.h,v 1.9.2.1 2001/07/04 00:12:38 brooks Exp $ */ /* * Copyright (c) 1997 - * Jonathan Stone and Jason R. Thorpe. All rights reserved. + * Jonathan Stone and Jason R. Thorpe. All rights reserved. * * This software is derived from information provided by Matt Thomas. * @@ -44,8 +44,8 @@ * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: - * This product includes software developed by Jonathan Stone - * and Jason R. Thorpe for the NetBSD Project. + * This product includes software developed by Jonathan Stone + * and Jason R. Thorpe for the NetBSD Project. * 4. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission. * @@ -87,133 +87,247 @@ * if_media Options word: * Bits Use * ---- ------- - * 0-4 Media subtype - * 5-7 Media type + * 0-4 Media variant + * 5-7 Media type * 8-15 Type specific options - * 16-19 RFU + * 16-19 Extended media variant bits * 20-27 Shared (global) options * 28-31 Instance */ /* * Ethernet + * + * In order to use more than 31 subtypes, Ethernet uses the extended media + * variant bits + * + * The extended media variant bits are not backward compatible so they + * must not be used by kernel extensions like ifnet and drivers that + * are to be deployed on older system versions */ -#define IFM_ETHER 0x00000020 -#define IFM_10_T 3 /* 10BaseT - RJ45 */ -#define IFM_10_2 4 /* 10Base2 - Thinnet */ -#define IFM_10_5 5 /* 10Base5 - AUI */ -#define IFM_100_TX 6 /* 100BaseTX - RJ45 */ -#define IFM_100_FX 7 /* 100BaseFX - Fiber */ -#define IFM_100_T4 8 /* 100BaseT4 - 4 pair cat 3 */ -#define IFM_100_VG 9 /* 100VG-AnyLAN */ -#define IFM_100_T2 10 /* 100BaseT2 */ -#define IFM_1000_SX 11 /* 1000BaseSX - multi-mode fiber */ -#define IFM_10_STP 12 /* 10BaseT over shielded TP */ -#define IFM_10_FL 13 /* 10baseFL - Fiber */ -#define IFM_1000_LX 14 /* 1000baseLX - single-mode fiber */ -#define IFM_1000_CX 15 /* 1000baseCX - 150ohm STP */ -#define IFM_1000_T 16 /* 1000baseT - 4 pair cat 5 */ +#define IFM_X(x) IFM_X_SUBTYPE(x) /* internal shorthand */ + +#define IFM_ETHER 0x00000020 +#define IFM_10_T 3 /* 10BaseT - RJ45 */ +#define IFM_10_2 4 /* 10Base2 - Thinnet */ +#define IFM_10_5 5 /* 10Base5 - AUI */ +#define IFM_100_TX 6 /* 100BaseTX - RJ45 */ +#define IFM_100_FX 7 /* 100BaseFX - Fiber */ +#define IFM_100_T4 8 /* 100BaseT4 - 4 pair cat 3 */ +#define IFM_100_VG 9 /* 100VG-AnyLAN */ +#define IFM_100_T2 10 /* 100BaseT2 */ +#define IFM_1000_SX 11 /* 1000BaseSX - multi-mode fiber */ +#define IFM_10_STP 12 /* 10BaseT over shielded TP */ +#define IFM_10_FL 13 /* 10baseFL - Fiber */ +#define IFM_1000_LX 14 /* 1000baseLX - single-mode fiber */ +#define IFM_1000_CX 15 /* 1000baseCX - 150ohm STP */ +#define IFM_1000_T 16 /* 1000baseT - 4 pair cat 5 */ #ifdef PRIVATE -#define IFM_1000_TX IFM_1000_T /* For compatibility */ +#define IFM_1000_TX IFM_1000_T /* For compatibility */ #endif /* PRIVATE */ -#define IFM_HPNA_1 17 /* HomePNA 1.0 (1Mb/s) */ -#define IFM_10G_SR 18 /* 10GbaseSR - multi-mode fiber */ -#define IFM_10G_LR 19 /* 10GbaseLR - single-mode fiber */ -#define IFM_10G_CX4 20 /* 10GbaseCX4 - copper */ -#define IFM_10G_T 21 /* 10GbaseT - 4 pair cat 6 */ -#define IFM_2500_T 22 /* 2500baseT - 4 pair cat 5 */ -#define IFM_5000_T 23 /* 5000baseT - 4 pair cat 5 */ +#define IFM_HPNA_1 17 /* HomePNA 1.0 (1Mb/s) */ +#define IFM_10G_SR 18 /* 10GbaseSR - multi-mode fiber */ +#define IFM_10G_LR 19 /* 10GbaseLR - single-mode fiber */ +#define IFM_10G_CX4 20 /* 10GbaseCX4 - copper */ +#define IFM_10G_T 21 /* 10GbaseT - 4 pair cat 6 */ +#define IFM_2500_T 22 /* 2500baseT - 4 pair cat 5 */ +#define IFM_5000_T 23 /* 5000baseT - 4 pair cat 5 */ +#define IFM_1000_CX_SGMII 24 /* 1000Base-CX-SGMII */ +#define IFM_1000_KX 25 /* 1000Base-KX backplane */ +#define IFM_10G_KX4 26 /* 10GBase-KX4 backplane */ +#define IFM_10G_KR 27 /* 10GBase-KR backplane */ +#define IFM_10G_CR1 28 /* 10GBase-CR1 Twinax splitter */ +#define IFM_10G_ER 29 /* 10GBase-ER */ +#define IFM_20G_KR2 30 /* 20GBase-KR2 backplane */ +#define IFM_OTHER 31 /* Other: one of the following */ + +/* following types are not visible to old binaries using the low bits of IFM_TMASK */ +#define IFM_2500_SX IFM_X(32) /* 2500BaseSX - multi-mode fiber */ +#define IFM_10G_TWINAX IFM_X(33) /* 10GBase Twinax copper */ +#define IFM_10G_TWINAX_LONG IFM_X(34) /* 10GBase Twinax Long copper */ +#define IFM_10G_LRM IFM_X(35) /* 10GBase-LRM 850nm Multi-mode */ +#define IFM_2500_KX IFM_X(36) /* 2500Base-KX backplane */ +#define IFM_40G_CR4 IFM_X(37) /* 40GBase-CR4 */ +#define IFM_40G_SR4 IFM_X(38) /* 40GBase-SR4 */ +#define IFM_50G_PCIE IFM_X(39) /* 50G Ethernet over PCIE */ +#define IFM_25G_PCIE IFM_X(40) /* 25G Ethernet over PCIE */ +#define IFM_1000_SGMII IFM_X(41) /* 1G media interface */ +#define IFM_10G_SFI IFM_X(42) /* 10G media interface */ +#define IFM_40G_XLPPI IFM_X(43) /* 40G media interface */ +#define IFM_40G_LR4 IFM_X(44) /* 40GBase-LR4 */ +#define IFM_40G_KR4 IFM_X(45) /* 40GBase-KR4 */ +#define IFM_100G_CR4 IFM_X(47) /* 100GBase-CR4 */ +#define IFM_100G_SR4 IFM_X(48) /* 100GBase-SR4 */ +#define IFM_100G_KR4 IFM_X(49) /* 100GBase-KR4 */ +#define IFM_100G_LR4 IFM_X(50) /* 100GBase-LR4 */ +#define IFM_56G_R4 IFM_X(51) /* 56GBase-R4 */ +#define IFM_100_T IFM_X(52) /* 100BaseT - RJ45 */ +#define IFM_25G_CR IFM_X(53) /* 25GBase-CR */ +#define IFM_25G_KR IFM_X(54) /* 25GBase-KR */ +#define IFM_25G_SR IFM_X(55) /* 25GBase-SR */ +#define IFM_50G_CR2 IFM_X(56) /* 50GBase-CR2 */ +#define IFM_50G_KR2 IFM_X(57) /* 50GBase-KR2 */ +#define IFM_25G_LR IFM_X(58) /* 25GBase-LR */ +#define IFM_10G_AOC IFM_X(59) /* 10G active optical cable */ +#define IFM_25G_ACC IFM_X(60) /* 25G active copper cable */ +#define IFM_25G_AOC IFM_X(61) /* 25G active optical cable */ +#define IFM_100_SGMII IFM_X(62) /* 100M media interface */ +#define IFM_2500_X IFM_X(63) /* 2500BaseX */ +#define IFM_5000_KR IFM_X(64) /* 5GBase-KR backplane */ +#define IFM_25G_T IFM_X(65) /* 25GBase-T - RJ45 */ +#define IFM_25G_CR_S IFM_X(66) /* 25GBase-CR (short) */ +#define IFM_25G_CR1 IFM_X(67) /* 25GBase-CR1 DA cable */ +#define IFM_25G_KR_S IFM_X(68) /* 25GBase-KR (short) */ +#define IFM_5000_KR_S IFM_X(69) /* 5GBase-KR backplane (short) */ +#define IFM_5000_KR1 IFM_X(70) /* 5GBase-KR backplane */ +#define IFM_25G_AUI IFM_X(71) /* 25G-AUI-C2C (chip to chip) */ +#define IFM_40G_XLAUI IFM_X(72) /* 40G-XLAUI */ +#define IFM_40G_XLAUI_AC IFM_X(73) /* 40G active copper/optical */ +#define IFM_40G_ER4 IFM_X(74) /* 40GBase-ER4 */ +#define IFM_50G_SR2 IFM_X(75) /* 50GBase-SR2 */ +#define IFM_50G_LR2 IFM_X(76) /* 50GBase-LR2 */ +#define IFM_50G_LAUI2_AC IFM_X(77) /* 50G active copper/optical */ +#define IFM_50G_LAUI2 IFM_X(78) /* 50G-LAUI2 */ +#define IFM_50G_AUI2_AC IFM_X(79) /* 50G active copper/optical */ +#define IFM_50G_AUI2 IFM_X(80) /* 50G-AUI2 */ +#define IFM_50G_CP IFM_X(81) /* 50GBase-CP */ +#define IFM_50G_SR IFM_X(82) /* 50GBase-SR */ +#define IFM_50G_LR IFM_X(83) /* 50GBase-LR */ +#define IFM_50G_FR IFM_X(84) /* 50GBase-FR */ +#define IFM_50G_KR_PAM4 IFM_X(85) /* 50GBase-KR PAM4 */ +#define IFM_25G_KR1 IFM_X(86) /* 25GBase-KR1 */ +#define IFM_50G_AUI1_AC IFM_X(87) /* 50G active copper/optical */ +#define IFM_50G_AUI1 IFM_X(88) /* 50G-AUI1 */ +#define IFM_100G_CAUI4_AC IFM_X(89) /* 100G-CAUI4 active copper/optical */ +#define IFM_100G_CAUI4 IFM_X(90) /* 100G-CAUI4 */ +#define IFM_100G_AUI4_AC IFM_X(91) /* 100G-AUI4 active copper/optical */ +#define IFM_100G_AUI4 IFM_X(92) /* 100G-AUI4 */ +#define IFM_100G_CR_PAM4 IFM_X(93) /* 100GBase-CR PAM4 */ +#define IFM_100G_KR_PAM4 IFM_X(94) /* 100GBase-CR PAM4 */ +#define IFM_100G_CP2 IFM_X(95) /* 100GBase-CP2 */ +#define IFM_100G_SR2 IFM_X(96) /* 100GBase-SR2 */ +#define IFM_100G_DR IFM_X(97) /* 100GBase-DR */ +#define IFM_100G_KR2_PAM4 IFM_X(98) /* 100GBase-KR2 PAM4 */ +#define IFM_100G_CAUI2_AC IFM_X(99) /* 100G-CAUI2 active copper/optical */ +#define IFM_100G_CAUI2 IFM_X(100) /* 100G-CAUI2 */ +#define IFM_100G_AUI2_AC IFM_X(101) /* 100G-AUI2 active copper/optical */ +#define IFM_100G_AUI2 IFM_X(102) /* 100G-AUI2 */ +#define IFM_200G_CR4_PAM4 IFM_X(103) /* 200GBase-CR4 PAM4 */ +#define IFM_200G_SR4 IFM_X(104) /* 200GBase-SR4 */ +#define IFM_200G_FR4 IFM_X(105) /* 200GBase-FR4 */ +#define IFM_200G_LR4 IFM_X(106) /* 200GBase-LR4 */ +#define IFM_200G_DR4 IFM_X(107) /* 200GBase-DR4 */ +#define IFM_200G_KR4_PAM4 IFM_X(108) /* 200GBase-KR4 PAM4 */ +#define IFM_200G_AUI4_AC IFM_X(109) /* 200G-AUI4 active copper/optical */ +#define IFM_200G_AUI4 IFM_X(110) /* 200G-AUI4 */ +#define IFM_200G_AUI8_AC IFM_X(111) /* 200G-AUI8 active copper/optical */ +#define IFM_200G_AUI8 IFM_X(112) /* 200G-AUI8 */ +#define IFM_400G_FR8 IFM_X(113) /* 400GBase-FR8 */ +#define IFM_400G_LR8 IFM_X(114) /* 400GBase-LR8 */ +#define IFM_400G_DR4 IFM_X(115) /* 400GBase-DR4 */ +#define IFM_400G_AUI8_AC IFM_X(116) /* 400G-AUI8 active copper/optical */ +#define IFM_400G_AUI8 IFM_X(117) /* 400G-AUI8 */ /* * Token ring */ -#define IFM_TOKEN 0x00000040 -#define IFM_TOK_STP4 3 /* Shielded twisted pair 4m - DB9 */ -#define IFM_TOK_STP16 4 /* Shielded twisted pair 16m - DB9 */ -#define IFM_TOK_UTP4 5 /* Unshielded twisted pair 4m - RJ45 */ -#define IFM_TOK_UTP16 6 /* Unshielded twisted pair 16m - RJ45 */ -#define IFM_TOK_STP100 7 /* Shielded twisted pair 100m - DB9 */ -#define IFM_TOK_UTP100 8 /* Unshielded twisted pair 100m - RJ45 */ -#define IFM_TOK_ETR 0x00000200 /* Early token release */ -#define IFM_TOK_SRCRT 0x00000400 /* Enable source routing features */ -#define IFM_TOK_ALLR 0x00000800 /* All routes / Single route bcast */ -#define IFM_TOK_DTR 0x00002000 /* Dedicated token ring */ -#define IFM_TOK_CLASSIC 0x00004000 /* Classic token ring */ -#define IFM_TOK_AUTO 0x00008000 /* Automatic Dedicate/Classic token ring */ +#define IFM_TOKEN 0x00000040 +#define IFM_TOK_STP4 3 /* Shielded twisted pair 4m - DB9 */ +#define IFM_TOK_STP16 4 /* Shielded twisted pair 16m - DB9 */ +#define IFM_TOK_UTP4 5 /* Unshielded twisted pair 4m - RJ45 */ +#define IFM_TOK_UTP16 6 /* Unshielded twisted pair 16m - RJ45 */ +#define IFM_TOK_STP100 7 /* Shielded twisted pair 100m - DB9 */ +#define IFM_TOK_UTP100 8 /* Unshielded twisted pair 100m - RJ45 */ +#define IFM_TOK_ETR 0x00000200 /* Early token release */ +#define IFM_TOK_SRCRT 0x00000400 /* Enable source routing features */ +#define IFM_TOK_ALLR 0x00000800 /* All routes / Single route bcast */ +#define IFM_TOK_DTR 0x00002000 /* Dedicated token ring */ +#define IFM_TOK_CLASSIC 0x00004000 /* Classic token ring */ +#define IFM_TOK_AUTO 0x00008000 /* Automatic Dedicate/Classic token ring */ /* * FDDI */ -#define IFM_FDDI 0x00000060 -#define IFM_FDDI_SMF 3 /* Single-mode fiber */ -#define IFM_FDDI_MMF 4 /* Multi-mode fiber */ -#define IFM_FDDI_UTP 5 /* CDDI / UTP */ -#define IFM_FDDI_DA 0x00000100 /* Dual attach / single attach */ +#define IFM_FDDI 0x00000060 +#define IFM_FDDI_SMF 3 /* Single-mode fiber */ +#define IFM_FDDI_MMF 4 /* Multi-mode fiber */ +#define IFM_FDDI_UTP 5 /* CDDI / UTP */ +#define IFM_FDDI_DA 0x00000100 /* Dual attach / single attach */ /* * IEEE 802.11 Wireless */ -#define IFM_IEEE80211 0x00000080 -#define IFM_IEEE80211_FH1 3 /* Frequency Hopping 1Mbps */ -#define IFM_IEEE80211_FH2 4 /* Frequency Hopping 2Mbps */ -#define IFM_IEEE80211_DS2 5 /* Direct Sequence 2Mbps */ -#define IFM_IEEE80211_DS5 6 /* Direct Sequence 5Mbps*/ -#define IFM_IEEE80211_DS11 7 /* Direct Sequence 11Mbps*/ -#define IFM_IEEE80211_DS1 8 /* Direct Sequence 1Mbps */ -#define IFM_IEEE80211_DS22 9 /* Direct Sequence 22Mbps */ -#define IFM_IEEE80211_ADHOC 0x00000100 /* Operate in Adhoc mode */ +#define IFM_IEEE80211 0x00000080 +#define IFM_IEEE80211_FH1 3 /* Frequency Hopping 1Mbps */ +#define IFM_IEEE80211_FH2 4 /* Frequency Hopping 2Mbps */ +#define IFM_IEEE80211_DS2 5 /* Direct Sequence 2Mbps */ +#define IFM_IEEE80211_DS5 6 /* Direct Sequence 5Mbps*/ +#define IFM_IEEE80211_DS11 7 /* Direct Sequence 11Mbps*/ +#define IFM_IEEE80211_DS1 8 /* Direct Sequence 1Mbps */ +#define IFM_IEEE80211_DS22 9 /* Direct Sequence 22Mbps */ +#define IFM_IEEE80211_ADHOC 0x00000100 /* Operate in Adhoc mode */ /* * Shared media sub-types */ -#define IFM_AUTO 0 /* Autoselect best media */ -#define IFM_MANUAL 1 /* Jumper/dipswitch selects media */ -#define IFM_NONE 2 /* Deselect all media */ +#define IFM_AUTO 0 /* Autoselect best media */ +#define IFM_MANUAL 1 /* Jumper/dipswitch selects media */ +#define IFM_NONE 2 /* Deselect all media */ /* * Shared options */ -#define IFM_FDX 0x00100000 /* Force full duplex */ -#define IFM_HDX 0x00200000 /* Force half duplex */ -#define IFM_FLOW 0x00400000 /* enable hardware flow control */ -#define IFM_EEE 0x00800000 /* Support energy efficient ethernet */ -#define IFM_FLAG0 0x01000000 /* Driver defined flag */ -#define IFM_FLAG1 0x02000000 /* Driver defined flag */ -#define IFM_FLAG2 0x04000000 /* Driver defined flag */ -#define IFM_LOOP 0x08000000 /* Put hardware in loopback */ +#define IFM_FDX 0x00100000 /* Force full duplex */ +#define IFM_HDX 0x00200000 /* Force half duplex */ +#define IFM_FLOW 0x00400000 /* enable hardware flow control */ +#define IFM_EEE 0x00800000 /* Support energy efficient ethernet */ +#define IFM_FLAG0 0x01000000 /* Driver defined flag */ +#define IFM_FLAG1 0x02000000 /* Driver defined flag */ +#define IFM_FLAG2 0x04000000 /* Driver defined flag */ +#define IFM_LOOP 0x08000000 /* Put hardware in loopback */ + +/* + * Macros to access bits of extended media sub-types (media variants) + */ +#define IFM_TMASK_COMPAT 0x0000001f /* Lower bits of media sub-type */ +#define IFM_TMASK_EXT 0x000f0000 /* For extended media sub-type */ +#define IFM_TMASK_EXT_SHIFT 11 /* to extract high bits */ +#define IFM_X_SUBTYPE(x) (((x) & IFM_TMASK_COMPAT) | \ + (((x) & (IFM_TMASK_EXT >> IFM_TMASK_EXT_SHIFT)) << IFM_TMASK_EXT_SHIFT)) /* * Masks */ -#define IFM_NMASK 0x000000e0 /* Network type */ -#define IFM_TMASK 0x0000001f /* Media sub-type */ -#define IFM_IMASK 0xf0000000 /* Instance */ -#define IFM_ISHIFT 28 /* Instance shift */ -#define IFM_OMASK 0x0000ff00 /* Type specific options */ -#define IFM_GMASK 0x0ff00000 /* Global options */ +#define IFM_NMASK 0x000000e0 /* Network type */ +#define IFM_TMASK (IFM_TMASK_COMPAT|IFM_TMASK_EXT) /* Media sub-type */ +#define IFM_IMASK 0xf0000000 /* Instance */ +#define IFM_ISHIFT 28 /* Instance shift */ +#define IFM_OMASK 0x0000ff00 /* Type specific options */ +#define IFM_GMASK 0x0ff00000 /* Global options */ /* * Status bits */ -#define IFM_AVALID 0x00000001 /* Active bit valid */ -#define IFM_ACTIVE 0x00000002 /* Interface attached to working net */ -#define IFM_WAKESAMENET 0x00000004 /* No link transition while asleep */ +#define IFM_AVALID 0x00000001 /* Active bit valid */ +#define IFM_ACTIVE 0x00000002 /* Interface attached to working net */ +#define IFM_WAKESAMENET 0x00000004 /* No link transition while asleep */ /* * Macros to extract various bits of information from the media word. */ -#define IFM_TYPE(x) ((x) & IFM_NMASK) -#define IFM_SUBTYPE(x) ((x) & IFM_TMASK) + +#define IFM_TYPE(x) ((x) & IFM_NMASK) +#define IFM_SUBTYPE(x) ((x) & IFM_TMASK) #define IFM_TYPE_OPTIONS(x) ((x) & IFM_OMASK) -#define IFM_INST(x) (((x) & IFM_IMASK) >> IFM_ISHIFT) -#define IFM_OPTIONS(x) ((x) & (IFM_OMASK|IFM_GMASK)) +#define IFM_INST(x) (((x) & IFM_IMASK) >> IFM_ISHIFT) +#define IFM_OPTIONS(x) ((x) & (IFM_OMASK|IFM_GMASK)) -#define IFM_INST_MAX IFM_INST(IFM_IMASK) +#define IFM_INST_MAX IFM_INST(IFM_IMASK) /* * Macro to create a media word. */ -#define IFM_MAKEWORD(type, subtype, options, instance) \ +#define IFM_MAKEWORD(type, subtype, options, instance) \ ((type) | (subtype) | (options) | ((instance) << IFM_ISHIFT)) /* @@ -224,8 +338,8 @@ * Otherwise, parsing these in ifconfig(8) would be a nightmare. */ struct ifmedia_description { - int ifmt_word; /* word value; may be masked */ - const char *ifmt_string; /* description */ + int ifmt_word; /* word value; may be masked */ + const char *ifmt_string; /* description */ }; #define IFM_TYPE_DESCRIPTIONS { \ @@ -236,29 +350,122 @@ struct ifmedia_description { { 0, NULL }, \ } -#define IFM_SUBTYPE_ETHERNET_DESCRIPTIONS { \ - { IFM_10_T, "10baseT/UTP" }, \ - { IFM_10_2, "10base2/BNC" }, \ - { IFM_10_5, "10base5/AUI" }, \ - { IFM_100_TX, "100baseTX" }, \ - { IFM_100_FX, "100baseFX" }, \ - { IFM_100_T4, "100baseT4" }, \ - { IFM_100_VG, "100baseVG" }, \ - { IFM_100_T2, "100baseT2" }, \ - { IFM_1000_SX, "1000baseSX" }, \ - { IFM_10_STP, "10baseSTP" }, \ - { IFM_10_FL, "10baseFL" }, \ - { IFM_1000_LX, "1000baseLX" }, \ - { IFM_1000_CX, "1000baseCX" }, \ - { IFM_1000_T, "1000baseT" }, \ - { IFM_HPNA_1, "HomePNA1" }, \ - { IFM_10G_SR, "10GbaseSR" }, \ - { IFM_10G_LR, "10GbaseLR" }, \ - { IFM_10G_CX4, "10GbaseCX4" }, \ - { IFM_10G_T, "10GbaseT" }, \ - { IFM_2500_T, "2500baseT" }, \ - { IFM_5000_T, "5000baseT" }, \ - { 0, NULL }, \ +#define IFM_SUBTYPE_ETHERNET_DESCRIPTIONS { \ + { IFM_10_T, "10baseT/UTP" }, \ + { IFM_10_2, "10base2/BNC" }, \ + { IFM_10_5, "10base5/AUI" }, \ + { IFM_100_TX, "100baseTX" }, \ + { IFM_100_FX, "100baseFX" }, \ + { IFM_100_T4, "100baseT4" }, \ + { IFM_100_VG, "100baseVG" }, \ + { IFM_100_T2, "100baseT2" }, \ + { IFM_10_STP, "10baseSTP" }, \ + { IFM_10_FL, "10baseFL" }, \ + { IFM_1000_SX, "1000baseSX" }, \ + { IFM_1000_LX, "1000baseLX" }, \ + { IFM_1000_CX, "1000baseCX" }, \ + { IFM_1000_T, "1000baseT" }, \ + { IFM_HPNA_1, "homePNA" }, \ + { IFM_10G_LR, "10Gbase-LR" }, \ + { IFM_10G_SR, "10Gbase-SR" }, \ + { IFM_10G_CX4, "10Gbase-CX4" }, \ + { IFM_2500_SX, "2500BaseSX" }, \ + { IFM_10G_LRM, "10Gbase-LRM" }, \ + { IFM_10G_TWINAX, "10Gbase-Twinax" }, \ + { IFM_10G_TWINAX_LONG, "10Gbase-Twinax-Long" }, \ + { IFM_10G_T, "10Gbase-T" }, \ + { IFM_40G_CR4, "40Gbase-CR4" }, \ + { IFM_40G_SR4, "40Gbase-SR4" }, \ + { IFM_40G_LR4, "40Gbase-LR4" }, \ + { IFM_1000_KX, "1000Base-KX" }, \ + { IFM_OTHER, "Other" }, \ + { IFM_10G_KX4, "10GBase-KX4" }, \ + { IFM_10G_KR, "10GBase-KR" }, \ + { IFM_10G_CR1, "10GBase-CR1" }, \ + { IFM_20G_KR2, "20GBase-KR2" }, \ + { IFM_2500_KX, "2500Base-KX" }, \ + { IFM_2500_T, "2500Base-T" }, \ + { IFM_5000_T, "5000Base-T" }, \ + { IFM_50G_PCIE, "PCIExpress-50G" }, \ + { IFM_25G_PCIE, "PCIExpress-25G" }, \ + { IFM_1000_SGMII, "1000Base-SGMII" }, \ + { IFM_10G_SFI, "10GBase-SFI" }, \ + { IFM_40G_XLPPI, "40GBase-XLPPI" }, \ + { IFM_1000_CX_SGMII, "1000Base-CX-SGMII" }, \ + { IFM_40G_KR4, "40GBase-KR4" }, \ + { IFM_10G_ER, "10GBase-ER" }, \ + { IFM_100G_CR4, "100GBase-CR4" }, \ + { IFM_100G_SR4, "100GBase-SR4" }, \ + { IFM_100G_KR4, "100GBase-KR4" }, \ + { IFM_100G_LR4, "100GBase-LR4" }, \ + { IFM_56G_R4, "56GBase-R4" }, \ + { IFM_100_T, "100BaseT" }, \ + { IFM_25G_CR, "25GBase-CR" }, \ + { IFM_25G_KR, "25GBase-KR" }, \ + { IFM_25G_SR, "25GBase-SR" }, \ + { IFM_50G_CR2, "50GBase-CR2" }, \ + { IFM_50G_KR2, "50GBase-KR2" }, \ + { IFM_25G_LR, "25GBase-LR" }, \ + { IFM_10G_AOC, "10GBase-AOC" }, \ + { IFM_25G_ACC, "25GBase-ACC" }, \ + { IFM_25G_AOC, "25GBase-AOC" }, \ + { IFM_100_SGMII, "100M-SGMII" }, \ + { IFM_2500_X, "2500Base-X" }, \ + { IFM_5000_KR, "5000Base-KR" }, \ + { IFM_25G_T, "25GBase-T" }, \ + { IFM_25G_CR_S, "25GBase-CR-S" }, \ + { IFM_25G_CR1, "25GBase-CR1" }, \ + { IFM_25G_KR_S, "25GBase-KR-S" }, \ + { IFM_5000_KR_S, "5000Base-KR-S" }, \ + { IFM_5000_KR1, "5000Base-KR1" }, \ + { IFM_25G_AUI, "25G-AUI" }, \ + { IFM_40G_XLAUI, "40G-XLAUI" }, \ + { IFM_40G_XLAUI_AC, "40G-XLAUI-AC" }, \ + { IFM_40G_ER4, "40GBase-ER4" }, \ + { IFM_50G_SR2, "50GBase-SR2" }, \ + { IFM_50G_LR2, "50GBase-LR2" }, \ + { IFM_50G_LAUI2_AC, "50G-LAUI2-AC" }, \ + { IFM_50G_LAUI2, "50G-LAUI2" }, \ + { IFM_50G_AUI2_AC, "50G-AUI2-AC" }, \ + { IFM_50G_AUI2, "50G-AUI2" }, \ + { IFM_50G_CP, "50GBase-CP" }, \ + { IFM_50G_SR, "50GBase-SR" }, \ + { IFM_50G_LR, "50GBase-LR" }, \ + { IFM_50G_FR, "50GBase-FR" }, \ + { IFM_50G_KR_PAM4, "50GBase-KR-PAM4" }, \ + { IFM_25G_KR1, "25GBase-KR1" }, \ + { IFM_50G_AUI1_AC, "50G-AUI1-AC" }, \ + { IFM_50G_AUI1, "50G-AUI1" }, \ + { IFM_100G_CAUI4_AC, "100G-CAUI4-AC" }, \ + { IFM_100G_CAUI4, "100G-CAUI4" }, \ + { IFM_100G_AUI4_AC, "100G-AUI4-AC" }, \ + { IFM_100G_AUI4, "100G-AUI4" }, \ + { IFM_100G_CR_PAM4, "100GBase-CR-PAM4" }, \ + { IFM_100G_KR_PAM4, "100GBase-KR-PAM4" }, \ + { IFM_100G_CP2, "100GBase-CP2" }, \ + { IFM_100G_SR2, "100GBase-SR2" }, \ + { IFM_100G_DR, "100GBase-DR" }, \ + { IFM_100G_KR2_PAM4, "100GBase-KR2-PAM4" }, \ + { IFM_100G_CAUI2_AC, "100G-CAUI2-AC" }, \ + { IFM_100G_CAUI2, "100G-CAUI2" }, \ + { IFM_100G_AUI2_AC, "100G-AUI2-AC" }, \ + { IFM_100G_AUI2, "100G-AUI2" }, \ + { IFM_200G_CR4_PAM4, "200GBase-CR4-PAM4" }, \ + { IFM_200G_SR4, "200GBase-SR4" }, \ + { IFM_200G_FR4, "200GBase-FR4" }, \ + { IFM_200G_LR4, "200GBase-LR4" }, \ + { IFM_200G_DR4, "200GBase-DR4" }, \ + { IFM_200G_KR4_PAM4, "200GBase-KR4-PAM4" }, \ + { IFM_200G_AUI4_AC, "200G-AUI4-AC" }, \ + { IFM_200G_AUI4, "200G-AUI4" }, \ + { IFM_200G_AUI8_AC, "200G-AUI8-AC" }, \ + { IFM_200G_AUI8, "200G-AUI8" }, \ + { IFM_400G_FR8, "400GBase-FR8" }, \ + { IFM_400G_LR8, "400GBase-LR8" }, \ + { IFM_400G_DR4, "400GBase-DR4" }, \ + { IFM_400G_AUI8_AC, "400G-AUI8-AC" }, \ + { IFM_400G_AUI8, "400G-AUI8" }, \ + { 0, NULL }, \ } #define IFM_SUBTYPE_ETHERNET_ALIASES { \ @@ -367,7 +574,7 @@ struct ifmedia_description { { IFM_FDX, "full-duplex" }, \ { IFM_HDX, "half-duplex" }, \ { IFM_FLOW, "flow-control" }, \ - { IFM_EEE, "energy-efficient-ethernet" }, \ + { IFM_EEE, "energy-efficient-ethernet" }, \ { IFM_FLAG0, "flag0" }, \ { IFM_FLAG1, "flag1" }, \ { IFM_FLAG2, "flag2" }, \ @@ -375,4 +582,4 @@ struct ifmedia_description { { 0, NULL }, \ } -#endif /* _NET_IF_MEDIA_H_ */ +#endif /* _NET_IF_MEDIA_H_ */ diff --git a/bsd/net/if_mib.c b/bsd/net/if_mib.c index 92544dd1e..6c0f94ef8 100644 --- a/bsd/net/if_mib.c +++ b/bsd/net/if_mib.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,7 +39,7 @@ * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. - * + * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF @@ -92,29 +92,29 @@ SYSCTL_DECL(_net_link_generic); -SYSCTL_NODE(_net_link_generic, IFMIB_SYSTEM, system, CTLFLAG_RD|CTLFLAG_LOCKED, 0, - "Variables global to all interfaces"); +SYSCTL_NODE(_net_link_generic, IFMIB_SYSTEM, system, CTLFLAG_RD | CTLFLAG_LOCKED, 0, + "Variables global to all interfaces"); SYSCTL_INT(_net_link_generic_system, IFMIB_IFCOUNT, ifcount, CTLFLAG_RD | CTLFLAG_LOCKED, - &if_index, 0, "Number of configured interfaces"); + &if_index, 0, "Number of configured interfaces"); static int sysctl_ifdata SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net_link_generic, IFMIB_IFDATA, ifdata, CTLFLAG_RD | CTLFLAG_LOCKED, - sysctl_ifdata, "Interface table"); + sysctl_ifdata, "Interface table"); static int sysctl_ifalldata SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net_link_generic, IFMIB_IFALLDATA, ifalldata, CTLFLAG_RD | CTLFLAG_LOCKED, - sysctl_ifalldata, "Interface table"); + sysctl_ifalldata, "Interface table"); static int make_ifmibdata(struct ifnet *, int *, struct sysctl_req *); int make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req) { - struct ifmibdata ifmd; + struct ifmibdata ifmd; int error = 0; - switch(name[1]) { + switch (name[1]) { default: error = ENOENT; break; @@ -126,7 +126,7 @@ make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req) */ if (ifnet_is_attached(ifp, 0)) { snprintf(ifmd.ifmd_name, sizeof(ifmd.ifmd_name), "%s", - if_name(ifp)); + if_name(ifp)); #define COPY(fld) ifmd.ifmd_##fld = ifp->if_##fld COPY(pcount); @@ -138,13 +138,15 @@ make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req) ifmd.ifmd_snd_drops = ifp->if_snd.ifcq_dropcnt.packets; } error = SYSCTL_OUT(req, &ifmd, sizeof ifmd); - if (error || !req->newptr) + if (error || !req->newptr) { break; + } #ifdef IF_MIB_WR error = SYSCTL_IN(req, &ifmd, sizeof ifmd); - if (error) + if (error) { break; + } #define DONTCOPY(fld) ifmd.ifmd_data.ifi_##fld = ifp->if_data.ifi_##fld DONTCOPY(type); @@ -165,20 +167,22 @@ make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req) case IFDATA_LINKSPECIFIC: error = SYSCTL_OUT(req, ifp->if_linkmib, ifp->if_linkmiblen); - if (error || !req->newptr) + if (error || !req->newptr) { break; + } #ifdef IF_MIB_WR error = SYSCTL_IN(req, ifp->if_linkmib, ifp->if_linkmiblen); - if (error) + if (error) { break; + } #endif /* IF_MIB_WR */ break; case IFDATA_SUPPLEMENTAL: { struct ifmibdata_supplemental *ifmd_supp; - if ((ifmd_supp = _MALLOC(sizeof (*ifmd_supp), M_TEMP, + if ((ifmd_supp = _MALLOC(sizeof(*ifmd_supp), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; break; @@ -189,10 +193,11 @@ make_ifmibdata(struct ifnet *ifp, int *name, struct sysctl_req *req) if_copy_packet_stats(ifp, &ifmd_supp->ifmd_packet_stats); if_copy_rxpoll_stats(ifp, &ifmd_supp->ifmd_rxpoll_stats); - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = sizeof (*ifmd_supp); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = sizeof(*ifmd_supp); + } - error = SYSCTL_OUT(req, ifmd_supp, MIN(sizeof (*ifmd_supp), + error = SYSCTL_OUT(req, ifmd_supp, MIN(sizeof(*ifmd_supp), req->oldlen)); _FREE(ifmd_supp, M_TEMP); @@ -212,14 +217,15 @@ sysctl_ifdata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ u_int namelen = arg2; struct ifnet *ifp; - if (namelen != 2) - return (EINVAL); + if (namelen != 2) { + return EINVAL; + } ifnet_head_lock_shared(); if (name[0] <= 0 || name[0] > if_index || (ifp = ifindex2ifnet[name[0]]) == NULL) { ifnet_head_done(); - return (ENOENT); + return ENOENT; } ifnet_reference(ifp); ifnet_head_done(); @@ -230,7 +236,7 @@ sysctl_ifdata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ ifnet_release(ifp); - return (error); + return error; } int @@ -242,8 +248,9 @@ sysctl_ifalldata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ u_int namelen = arg2; struct ifnet *ifp; - if (namelen != 2) - return (EINVAL); + if (namelen != 2) { + return EINVAL; + } ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { @@ -252,8 +259,9 @@ sysctl_ifalldata SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ error = make_ifmibdata(ifp, name, req); ifnet_lock_done(ifp); - if (error != 0) + if (error != 0) { break; + } } ifnet_head_done(); return error; diff --git a/bsd/net/if_mib.h b/bsd/net/if_mib.h index 3dbf262a2..0cec12310 100644 --- a/bsd/net/if_mib.h +++ b/bsd/net/if_mib.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,7 +39,7 @@ * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. - * + * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF @@ -57,57 +57,57 @@ */ #ifndef _NET_IF_MIB_H -#define _NET_IF_MIB_H 1 +#define _NET_IF_MIB_H 1 #include struct ifmibdata { - char ifmd_name[IFNAMSIZ]; /* name of interface */ - unsigned int ifmd_pcount; /* number of promiscuous listeners */ - unsigned int ifmd_flags; /* interface flags */ - unsigned int ifmd_snd_len; /* instantaneous length of send queue */ - unsigned int ifmd_snd_maxlen; /* maximum length of send queue */ - unsigned int ifmd_snd_drops; /* number of drops in send queue */ - unsigned int ifmd_filler[4]; /* for future expansion */ - struct if_data64 ifmd_data; /* generic information and statistics */ + char ifmd_name[IFNAMSIZ]; /* name of interface */ + unsigned int ifmd_pcount; /* number of promiscuous listeners */ + unsigned int ifmd_flags; /* interface flags */ + unsigned int ifmd_snd_len; /* instantaneous length of send queue */ + unsigned int ifmd_snd_maxlen; /* maximum length of send queue */ + unsigned int ifmd_snd_drops; /* number of drops in send queue */ + unsigned int ifmd_filler[4]; /* for future expansion */ + struct if_data64 ifmd_data; /* generic information and statistics */ }; #ifdef PRIVATE struct ifmibdata_supplemental { - struct if_traffic_class ifmd_traffic_class; - struct if_data_extended ifmd_data_extended; - struct if_packet_stats ifmd_packet_stats; - struct if_rxpoll_stats ifmd_rxpoll_stats; + struct if_traffic_class ifmd_traffic_class; + struct if_data_extended ifmd_data_extended; + struct if_packet_stats ifmd_packet_stats; + struct if_rxpoll_stats ifmd_rxpoll_stats; }; #endif /* PRIVATE */ /* * sysctl MIB tags at the net.link.generic level */ -#define IFMIB_SYSTEM 1 /* non-interface-specific */ -#define IFMIB_IFDATA 2 /* per-interface data table */ -#define IFMIB_IFALLDATA 3 /* all interfaces data at once */ +#define IFMIB_SYSTEM 1 /* non-interface-specific */ +#define IFMIB_IFDATA 2 /* per-interface data table */ +#define IFMIB_IFALLDATA 3 /* all interfaces data at once */ /* * MIB tags for the various net.link.generic.ifdata tables */ -#define IFDATA_GENERAL 1 /* generic stats for all kinds of ifaces */ -#define IFDATA_LINKSPECIFIC 2 /* specific to the type of interface */ -#define IFDATA_ADDRS 3 /* addresses assigned to interface */ -#define IFDATA_MULTIADDRS 4 /* multicast addresses assigned to interface */ +#define IFDATA_GENERAL 1 /* generic stats for all kinds of ifaces */ +#define IFDATA_LINKSPECIFIC 2 /* specific to the type of interface */ +#define IFDATA_ADDRS 3 /* addresses assigned to interface */ +#define IFDATA_MULTIADDRS 4 /* multicast addresses assigned to interface */ #ifdef PRIVATE -#define IFDATA_SUPPLEMENTAL 5 /* supplemental link specific stats */ +#define IFDATA_SUPPLEMENTAL 5 /* supplemental link specific stats */ #endif /* PRIVATE */ /* * MIB tags at the net.link.generic.system level */ -#define IFMIB_IFCOUNT 1 /* number of interfaces configured */ +#define IFMIB_IFCOUNT 1 /* number of interfaces configured */ /* * MIB tags as the net.link level * All of the other values are IFT_* names defined in if_types.h. */ -#define NETLINK_GENERIC 0 /* functions not specific to a type of iface */ +#define NETLINK_GENERIC 0 /* functions not specific to a type of iface */ /* * The reason why the IFDATA_LINKSPECIFIC stuff is not under the @@ -126,27 +126,27 @@ struct ifmibdata_supplemental { /* For IFT_ETHER, IFT_ISO88023, and IFT_STARLAN, as used by RFC 1650 */ struct ifs_iso_8802_3 { - u_int32_t dot3StatsAlignmentErrors; - u_int32_t dot3StatsFCSErrors; - u_int32_t dot3StatsSingleCollisionFrames; - u_int32_t dot3StatsMultipleCollisionFrames; - u_int32_t dot3StatsSQETestErrors; - u_int32_t dot3StatsDeferredTransmissions; - u_int32_t dot3StatsLateCollisions; - u_int32_t dot3StatsExcessiveCollisions; - u_int32_t dot3StatsInternalMacTransmitErrors; - u_int32_t dot3StatsCarrierSenseErrors; - u_int32_t dot3StatsFrameTooLongs; - u_int32_t dot3StatsInternalMacReceiveErrors; - u_int32_t dot3StatsEtherChipSet; + u_int32_t dot3StatsAlignmentErrors; + u_int32_t dot3StatsFCSErrors; + u_int32_t dot3StatsSingleCollisionFrames; + u_int32_t dot3StatsMultipleCollisionFrames; + u_int32_t dot3StatsSQETestErrors; + u_int32_t dot3StatsDeferredTransmissions; + u_int32_t dot3StatsLateCollisions; + u_int32_t dot3StatsExcessiveCollisions; + u_int32_t dot3StatsInternalMacTransmitErrors; + u_int32_t dot3StatsCarrierSenseErrors; + u_int32_t dot3StatsFrameTooLongs; + u_int32_t dot3StatsInternalMacReceiveErrors; + u_int32_t dot3StatsEtherChipSet; /* Matt Thomas wants this one, not included in RFC 1650: */ - u_int32_t dot3StatsMissedFrames; + u_int32_t dot3StatsMissedFrames; - u_int32_t dot3StatsCollFrequencies[16]; /* NB: index origin */ + u_int32_t dot3StatsCollFrequencies[16]; /* NB: index origin */ - u_int32_t dot3Compliance; -#define DOT3COMPLIANCE_STATS 1 -#define DOT3COMPLIANCE_COLLS 2 + u_int32_t dot3Compliance; +#define DOT3COMPLIANCE_STATS 1 +#define DOT3COMPLIANCE_COLLS 2 }; /* @@ -156,9 +156,9 @@ struct ifs_iso_8802_3 { * obvious to the driver implementor. So, we define our own identification * mechanism here, and let the agent writer deal with the translation. */ -#define DOT3CHIPSET_VENDOR(x) ((x) >> 16) -#define DOT3CHIPSET_PART(x) ((x) & 0xffff) -#define DOT3CHIPSET(v,p) (((v) << 16) + ((p) & 0xffff)) +#define DOT3CHIPSET_VENDOR(x) ((x) >> 16) +#define DOT3CHIPSET_PART(x) ((x) & 0xffff) +#define DOT3CHIPSET(v, p) (((v) << 16) + ((p) & 0xffff)) /* Driver writers! Add your vendors here! */ enum dot3Vendors { @@ -216,9 +216,9 @@ enum { */ struct if_family_id { - u_int32_t iffmid_len; - u_int32_t iffmid_id; - char iffmid_str[1]; /* variable length string */ + u_int32_t iffmid_len; + u_int32_t iffmid_id; + char iffmid_str[1]; /* variable length string */ }; diff --git a/bsd/net/if_pflog.c b/bsd/net/if_pflog.c index cbed433ab..9747bb37a 100644 --- a/bsd/net/if_pflog.c +++ b/bsd/net/if_pflog.c @@ -30,10 +30,10 @@ /* $OpenBSD: if_pflog.c,v 1.22 2006/12/15 09:31:20 otto Exp $ */ /* * The authors of this code are John Ioannidis (ji@tla.org), - * Angelos D. Keromytis (kermit@csd.uch.gr) and + * Angelos D. Keromytis (kermit@csd.uch.gr) and * Niels Provos (provos@physnet.uni-hamburg.de). * - * This code was written by John Ioannidis for BSD/OS in Athens, Greece, + * This code was written by John Ioannidis for BSD/OS in Athens, Greece, * in November 1995. * * Ported to OpenBSD and NetBSD, with additional transforms, in December 1996, @@ -49,7 +49,7 @@ * Permission to use, copy, and modify this software with or without fee * is hereby granted, provided that this entire notice is included in * all copies of any software which is or includes a copy or - * modification of this software. + * modification of this software. * You may use this code under the GNU public license if you so wish. Please * contribute changes back to the authors under this freer than GPL license * so that we may further the use of strong encryption without limitations to @@ -95,8 +95,8 @@ #include #include -#define PFLOGNAME "pflog" -#define PFLOGMTU (32768 + MHLEN + MLEN) +#define PFLOGNAME "pflog" +#define PFLOGMTU (32768 + MHLEN + MLEN) #ifdef PFLOGDEBUG #define DPRINTF(x) do { if (pflogdebug) printf x ; } while (0) @@ -116,12 +116,12 @@ static errno_t pflogaddproto(struct ifnet *, protocol_family_t, static errno_t pflogdelproto(struct ifnet *, protocol_family_t); static void pflogfree(struct ifnet *); -static LIST_HEAD(, pflog_softc) pflogif_list; +static LIST_HEAD(, pflog_softc) pflogif_list; static struct if_clone pflog_cloner = IF_CLONE_INITIALIZER(PFLOGNAME, pflog_clone_create, pflog_clone_destroy, - 0, (PFLOGIFS_MAX - 1), PFLOGIF_ZONE_MAX_ELEM, sizeof(struct pflog_softc)); + 0, (PFLOGIFS_MAX - 1), PFLOGIF_ZONE_MAX_ELEM, sizeof(struct pflog_softc)); -struct ifnet *pflogifs[PFLOGIFS_MAX]; /* for fast access */ +struct ifnet *pflogifs[PFLOGIFS_MAX]; /* for fast access */ void pfloginit(void) @@ -129,8 +129,9 @@ pfloginit(void) int i; LIST_INIT(&pflogif_list); - for (i = 0; i < PFLOGIFS_MAX; i++) + for (i = 0; i < PFLOGIFS_MAX; i++) { pflogifs[i] = NULL; + } (void) if_clone_attach(&pflog_cloner); } @@ -154,9 +155,9 @@ pflog_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) goto done; } - bzero(&pf_init, sizeof (pf_init)); + bzero(&pf_init, sizeof(pf_init)); pf_init.ver = IFNET_INIT_CURRENT_VERSION; - pf_init.len = sizeof (pf_init); + pf_init.len = sizeof(pf_init); pf_init.flags = IFNET_INIT_LEGACY; pf_init.name = ifc->ifc_name; pf_init.unit = unit; @@ -170,7 +171,7 @@ pflog_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) pf_init.ioctl = pflogioctl; pf_init.detach = pflogfree; - bzero(pflogif, sizeof (*pflogif)); + bzero(pflogif, sizeof(*pflogif)); pflogif->sc_unit = unit; pflogif->sc_flags |= IFPFLF_DETACHING; @@ -205,7 +206,7 @@ pflog_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) lck_rw_done(pf_perim_lock); done: - return (error); + return error; } static int @@ -237,12 +238,13 @@ pflog_clone_destroy(struct ifnet *ifp) { int error = 0; - if ((error = pflog_remove(ifp)) != 0) + if ((error = pflog_remove(ifp)) != 0) { goto done; + } /* bpfdetach() is taken care of as part of interface detach */ (void)ifnet_detach(ifp); done: - return (error); + return error; } static errno_t @@ -250,7 +252,7 @@ pflogoutput(struct ifnet *ifp, struct mbuf *m) { printf("%s: freeing data for %s\n", __func__, if_name(ifp)); m_freem(m); - return (ENOTSUP); + return ENOTSUP; } static errno_t @@ -262,16 +264,17 @@ pflogioctl(struct ifnet *ifp, unsigned long cmd, void *data) case SIOCAIFADDR: case SIOCSIFDSTADDR: case SIOCSIFFLAGS: - if (ifnet_flags(ifp) & IFF_UP) + if (ifnet_flags(ifp) & IFF_UP) { ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING); - else + } else { ifnet_set_flags(ifp, 0, IFF_RUNNING); + } break; default: - return (ENOTTY); + return ENOTTY; } - return (0); + return 0; } static errno_t @@ -280,7 +283,7 @@ pflogdemux(struct ifnet *ifp, struct mbuf *m, char *h, protocol_family_t *ppf) #pragma unused(h, ppf) printf("%s: freeing data for %s\n", __func__, if_name(ifp)); m_freem(m); - return (EJUSTRETURN); + return EJUSTRETURN; } static errno_t @@ -288,14 +291,14 @@ pflogaddproto(struct ifnet *ifp, protocol_family_t pf, const struct ifnet_demux_desc *d, u_int32_t cnt) { #pragma unused(ifp, pf, d, cnt) - return (0); + return 0; } static errno_t pflogdelproto(struct ifnet *ifp, protocol_family_t pf) { #pragma unused(ifp, pf) - return (0); + return 0; } static void @@ -318,23 +321,25 @@ pflog_packet(struct pfi_kif *kif, pbuf_t *pbuf, sa_family_t af, u_int8_t dir, LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (kif == NULL || !pbuf_is_valid(pbuf) || rm == NULL || pd == NULL) - return (-1); + if (kif == NULL || !pbuf_is_valid(pbuf) || rm == NULL || pd == NULL) { + return -1; + } if (rm->logif >= PFLOGIFS_MAX || (ifn = pflogifs[rm->logif]) == NULL || !ifn->if_bpf) { - return (0); + return 0; } - if ((m = pbuf_to_mbuf(pbuf, FALSE)) == NULL) - return (0); + if ((m = pbuf_to_mbuf(pbuf, FALSE)) == NULL) { + return 0; + } - bzero(&hdr, sizeof (hdr)); + bzero(&hdr, sizeof(hdr)); hdr.length = PFLOG_REAL_HDRLEN; hdr.af = af; hdr.action = rm->action; hdr.reason = reason; - memcpy(hdr.ifname, kif->pfik_name, sizeof (hdr.ifname)); + memcpy(hdr.ifname, kif->pfik_name, sizeof(hdr.ifname)); if (am == NULL) { hdr.rulenr = htonl(rm->nr); @@ -342,12 +347,14 @@ pflog_packet(struct pfi_kif *kif, pbuf_t *pbuf, sa_family_t af, u_int8_t dir, } else { hdr.rulenr = htonl(am->nr); hdr.subrulenr = htonl(rm->nr); - if (ruleset != NULL && ruleset->anchor != NULL) + if (ruleset != NULL && ruleset->anchor != NULL) { strlcpy(hdr.ruleset, ruleset->anchor->name, - sizeof (hdr.ruleset)); + sizeof(hdr.ruleset)); + } } - if (rm->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done) + if (rm->log & PF_LOG_SOCKET_LOOKUP && !pd->lookup.done) { pd->lookup.done = pf_socket_lookup(dir, pd); + } if (pd->lookup.done > 0) { hdr.uid = pd->lookup.uid; hdr.pid = pd->lookup.pid; @@ -385,5 +392,5 @@ pflog_packet(struct pfi_kif *kif, pbuf_t *pbuf, sa_family_t af, u_int8_t dir, break; } #endif /* NBPFILTER > 0 */ - return (0); + return 0; } diff --git a/bsd/net/if_pflog.h b/bsd/net/if_pflog.h index 1ebfb6bb6..592d4665a 100644 --- a/bsd/net/if_pflog.h +++ b/bsd/net/if_pflog.h @@ -62,51 +62,51 @@ extern "C" { #endif -#define PFLOGIFS_MAX 16 -#define PFLOGIF_ZONE_MAX_ELEM MIN(IFNETS_MAX, PFLOGIFS_MAX) +#define PFLOGIFS_MAX 16 +#define PFLOGIF_ZONE_MAX_ELEM MIN(IFNETS_MAX, PFLOGIFS_MAX) #if KERNEL_PRIVATE struct pflog_softc { - struct ifnet *sc_if; /* back ptr to interface */ - u_int32_t sc_flags; -#define IFPFLF_DETACHING 0x1 - int sc_unit; - LIST_ENTRY(pflog_softc) sc_list; + struct ifnet *sc_if; /* back ptr to interface */ + u_int32_t sc_flags; +#define IFPFLF_DETACHING 0x1 + int sc_unit; + LIST_ENTRY(pflog_softc) sc_list; }; #endif /* KERNEL_PRIVATE */ -#define PFLOG_RULESET_NAME_SIZE 16 +#define PFLOG_RULESET_NAME_SIZE 16 struct pfloghdr { - u_int8_t length; - sa_family_t af; - u_int8_t action; - u_int8_t reason; - char ifname[IFNAMSIZ]; - char ruleset[PFLOG_RULESET_NAME_SIZE]; - u_int32_t rulenr; - u_int32_t subrulenr; - uid_t uid; - pid_t pid; - uid_t rule_uid; - pid_t rule_pid; - u_int8_t dir; - u_int8_t pad[3]; + u_int8_t length; + sa_family_t af; + u_int8_t action; + u_int8_t reason; + char ifname[IFNAMSIZ]; + char ruleset[PFLOG_RULESET_NAME_SIZE]; + u_int32_t rulenr; + u_int32_t subrulenr; + uid_t uid; + pid_t pid; + uid_t rule_uid; + pid_t rule_pid; + u_int8_t dir; + u_int8_t pad[3]; }; -#define PFLOG_HDRLEN sizeof(struct pfloghdr) +#define PFLOG_HDRLEN sizeof(struct pfloghdr) /* minus pad, also used as a signature */ -#define PFLOG_REAL_HDRLEN offsetof(struct pfloghdr, pad) +#define PFLOG_REAL_HDRLEN offsetof(struct pfloghdr, pad) #ifdef KERNEL_PRIVATE #if PFLOG -#define PFLOG_PACKET(i,x,a,b,c,d,e,f,g,h) pflog_packet(i,a,b,c,d,e,f,g,h) +#define PFLOG_PACKET(i, x, a, b, c, d, e, f, g, h) pflog_packet(i,a,b,c,d,e,f,g,h) #else -#define PFLOG_PACKET(i,x,a,b,c,d,e,f,g,h) ((void)0) +#define PFLOG_PACKET(i, x, a, b, c, d, e, f, g, h) ((void)0) #endif /* PFLOG */ -__private_extern__ void pfloginit(void); +__private_extern__ void pfloginit(void); #endif /* KERNEL_PRIVATE */ #ifdef __cplusplus diff --git a/bsd/net/if_ports_used.c b/bsd/net/if_ports_used.c index c78a690a1..378d834fc 100644 --- a/bsd/net/if_ports_used.c +++ b/bsd/net/if_ports_used.c @@ -60,9 +60,9 @@ SYSCTL_DECL(_net_link_generic_system); SYSCTL_NODE(_net_link_generic_system, OID_AUTO, port_used, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "if port used"); -static uuid_t current_wakeuuid; +static uuid_t current_wakeuuid; SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, current_wakeuuid, - CTLFLAG_RD|CTLFLAG_LOCKED, + CTLFLAG_RD | CTLFLAG_LOCKED, current_wakeuuid, sizeof(uuid_t), "S,uuid_t", ""); static int sysctl_net_port_info_list SYSCTL_HANDLER_ARGS; @@ -95,7 +95,7 @@ SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid_str, sysctl_test_wakeuuid_str, "A", ""); SYSCTL_OPAQUE(_net_link_generic_system_port_used, OID_AUTO, test_wakeuuid, - CTLFLAG_RD|CTLFLAG_LOCKED, + CTLFLAG_RD | CTLFLAG_LOCKED, test_wakeuuid, sizeof(uuid_t), "S,uuid_t", ""); #endif /* (DEVELOPMENT || DEBUG) */ @@ -130,7 +130,7 @@ static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wakeuuid_not_set_last_time, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_wakeuuid_not_set_last_time, "S,timeval", ""); -char wakeuuid_not_set_last_if [IFXNAMSIZ]; +char wakeuuid_not_set_last_if[IFXNAMSIZ]; int sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS; static SYSCTL_PROC(_net_link_generic_system_port_used, OID_AUTO, wakeuuid_not_set_last_if, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED, @@ -143,14 +143,14 @@ decl_lck_mtx_data(static, net_port_entry_head_lock); static lck_grp_t *net_port_entry_head_lock_group; struct net_port_entry { - SLIST_ENTRY(net_port_entry) npe_next; - struct net_port_info npe_npi; + SLIST_ENTRY(net_port_entry) npe_next; + struct net_port_info npe_npi; }; static struct zone *net_port_entry_zone = NULL; -#define NET_PORT_ENTRY_ZONE_MAX 128 -#define NET_PORT_ENTRY_ZONE_NAME "net_port_entry" +#define NET_PORT_ENTRY_ZONE_MAX 128 +#define NET_PORT_ENTRY_ZONE_NAME "net_port_entry" static SLIST_HEAD(net_port_entry_list, net_port_entry) net_port_entry_list = SLIST_HEAD_INITIALIZER(&net_port_entry_list); @@ -170,7 +170,7 @@ if_ports_used_init(void) lck_grp_attributes = lck_grp_attr_alloc_init(); net_port_entry_head_lock_group = lck_grp_alloc_init( - "net port entry lock", lck_grp_attributes); + "net port entry lock", lck_grp_attributes); lck_attributes = lck_attr_alloc_init(); if (lck_attributes == NULL) { @@ -222,17 +222,17 @@ get_test_wake_uuid(uuid_string_t wakeuuid_str, size_t len) if (wakeuuid_str != NULL && len != 0) { uuid_unparse(test_wakeuuid, wakeuuid_str); } - return (true); + return true; } else if (strlen(test_wakeuuid_str) != 0) { if (wakeuuid_str != NULL && len != 0) { strlcpy(wakeuuid_str, test_wakeuuid_str, len); } - return (true); + return true; } else { - return (false); + return false; } } else { - return (false); + return false; } } @@ -243,7 +243,7 @@ is_wakeuuid_set(void) * IOPMCopySleepWakeUUIDKey() tells if SleepWakeUUID is currently set * That means we are currently in a sleep/wake cycle */ - return (get_test_wake_uuid(NULL, 0) || IOPMCopySleepWakeUUIDKey(NULL, 0)); + return get_test_wake_uuid(NULL, 0) || IOPMCopySleepWakeUUIDKey(NULL, 0); } void @@ -260,7 +260,6 @@ if_ports_used_update_wakeuuid(struct ifnet *ifp) wakeuuid_is_set = get_test_wake_uuid(wakeuuid_str, sizeof(wakeuuid_str)); } else { - wakeuuid_is_set = IOPMCopySleepWakeUUIDKey(wakeuuid_str, sizeof(wakeuuid_str)); } @@ -286,7 +285,7 @@ if_ports_used_update_wakeuuid(struct ifnet *ifp) microtime(&wakeuuid_not_set_last_time); strlcpy(wakeuuid_not_set_last_if, if_name(ifp), sizeof(wakeuuid_not_set_last_if)); - } + } return; } @@ -296,7 +295,7 @@ if_ports_used_update_wakeuuid(struct ifnet *ifp) uuid_copy(current_wakeuuid, wakeuuid); updated = true; } - /* + /* * Record the time last checked */ microuptime(&wakeuiid_last_check); @@ -325,12 +324,12 @@ net_port_info_equal(const struct net_port_info *x, x->npi_effective_pid == y->npi_effective_pid && x->npi_flags == y->npi_flags && memcmp(&x->npi_local_addr_, &y->npi_local_addr_, - sizeof(union in_addr_4_6)) == 0 && + sizeof(union in_addr_4_6)) == 0 && memcmp(&x->npi_foreign_addr_, &y->npi_foreign_addr_, - sizeof(union in_addr_4_6)) == 0) { - return (true); + sizeof(union in_addr_4_6)) == 0) { + return true; } - return (false); + return false; } static bool @@ -342,17 +341,17 @@ net_port_info_has_entry(const struct net_port_info *npi) SLIST_FOREACH(npe, &net_port_entry_list, npe_next) { if (net_port_info_equal(&npe->npe_npi, npi)) { - return (true); + return true; } } - return (false); + return false; } static bool net_port_info_add_entry(const struct net_port_info *npi) { - struct net_port_entry *npe = NULL; + struct net_port_entry *npe = NULL; uint32_t num = 0; bool entry_added = false; @@ -369,7 +368,7 @@ net_port_info_add_entry(const struct net_port_info *npi) npi->npi_owner_pid, npi->npi_effective_pid); } - return (0); + return 0; } npe = zalloc(net_port_entry_zone); @@ -382,7 +381,7 @@ net_port_info_add_entry(const struct net_port_info *npi) npi->npi_if_index, npi->npi_owner_pid, npi->npi_effective_pid); - return (0); + return 0; } bzero(npe, sizeof(struct net_port_entry)); @@ -425,7 +424,7 @@ net_port_info_add_entry(const struct net_port_info *npi) zfree(net_port_entry_zone, npe); npe = NULL; } - return (entry_added); + return entry_added; } #if (DEVELOPMENT || DEBUG) @@ -436,11 +435,11 @@ sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS int error = 0; if (kauth_cred_issuser(kauth_cred_get()) == 0) { - return (EPERM); + return EPERM; } if (req->oldptr == USER_ADDR_NULL) { req->oldidx = sizeof(uuid_t); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { uuid_generate(test_wakeuuid); @@ -448,7 +447,7 @@ sysctl_new_test_wakeuuid SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, test_wakeuuid, MIN(sizeof(uuid_t), req->oldlen)); - return (error); + return error; } int @@ -458,20 +457,19 @@ sysctl_clear_test_wakeuuid SYSCTL_HANDLER_ARGS int error = 0; if (kauth_cred_issuser(kauth_cred_get()) == 0) { - return (EPERM); + return EPERM; } if (req->oldptr == USER_ADDR_NULL) { req->oldidx = sizeof(uuid_t); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { uuid_clear(test_wakeuuid); - test_wakeuuid_str[0] = 0; } error = SYSCTL_OUT(req, test_wakeuuid, MIN(sizeof(uuid_t), req->oldlen)); - return (error); + return error; } int @@ -482,7 +480,7 @@ sysctl_test_wakeuuid_str SYSCTL_HANDLER_ARGS int changed; if (kauth_cred_issuser(kauth_cred_get()) == 0) { - return (EPERM); + return EPERM; } error = sysctl_io_string(req, test_wakeuuid_str, sizeof(test_wakeuuid_str), 1, &changed); if (changed) { @@ -490,7 +488,7 @@ sysctl_test_wakeuuid_str SYSCTL_HANDLER_ARGS __func__, test_wakeuuid_str); } - return (error); + return error; } #endif /* (DEVELOPMENT || DEBUG) */ @@ -521,7 +519,7 @@ sysctl_wakeuuid_not_set_last_if SYSCTL_HANDLER_ARGS #pragma unused(oidp, arg1, arg2) return SYSCTL_OUT(req, &wakeuuid_not_set_last_if, - strlen(wakeuuid_not_set_last_if) + 1); + strlen(wakeuuid_not_set_last_if) + 1); } static int @@ -534,7 +532,7 @@ sysctl_net_port_info_list SYSCTL_HANDLER_ARGS if ((error = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0)) != 0) { - return (EPERM); + return EPERM; } lck_mtx_lock(&net_port_entry_head_lock); @@ -543,7 +541,7 @@ sysctl_net_port_info_list SYSCTL_HANDLER_ARGS uint32_t cnt = net_port_entry_count; cnt += cnt >> 4; req->oldidx = sizeof(struct xnpigen) + - cnt * sizeof(struct net_port_info); + cnt * sizeof(struct net_port_info); goto done; } @@ -553,7 +551,7 @@ sysctl_net_port_info_list SYSCTL_HANDLER_ARGS uuid_copy(xnpigen.xng_wakeuuid, current_wakeuuid); xnpigen.xng_npi_count = net_port_entry_count; xnpigen.xng_npi_size = sizeof(struct net_port_info); - error = SYSCTL_OUT(req, &xnpigen, sizeof (xnpigen)); + error = SYSCTL_OUT(req, &xnpigen, sizeof(xnpigen)); if (error != 0) { printf("%s: SYSCTL_OUT(xnpigen) error %d\n", __func__, error); @@ -572,7 +570,7 @@ sysctl_net_port_info_list SYSCTL_HANDLER_ARGS done: lck_mtx_unlock(&net_port_entry_head_lock); - return (error); + return error; } /* @@ -642,9 +640,10 @@ sysctl_get_ports_used SYSCTL_HANDLER_ARGS } error = SYSCTL_OUT(req, bitfield, bitstr_size(IP_PORTRANGE_SIZE)); done: - if (bitfield != NULL) + if (bitfield != NULL) { _FREE(bitfield, M_TEMP); - return (error); + } + return error; } __private_extern__ void @@ -683,9 +682,9 @@ if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp) } else { npi.npi_flags |= NPIF_IPV6; memcpy(&npi.npi_local_addr_in6, - &inp->in6p_laddr, sizeof (struct in6_addr)); + &inp->in6p_laddr, sizeof(struct in6_addr)); memcpy(&npi.npi_foreign_addr_in6, - &inp->in6p_faddr, sizeof (struct in6_addr)); + &inp->in6p_faddr, sizeof(struct in6_addr)); } npi.npi_owner_pid = so->last_pid; diff --git a/bsd/net/if_ports_used.h b/bsd/net/if_ports_used.h index dc11a1052..5fcbc480a 100644 --- a/bsd/net/if_ports_used.h +++ b/bsd/net/if_ports_used.h @@ -40,7 +40,7 @@ #include #include -#define IP_PORTRANGE_SIZE 65536 +#define IP_PORTRANGE_SIZE 65536 /* * The sysctl "net.link.generic.system.port_used.list" returns: @@ -56,39 +56,39 @@ */ struct xnpigen { - uint32_t xng_len; /* length of this data structure */ - uint32_t xng_gen; /* how many times the list was built */ - uint32_t xng_npi_count; /* number of net_port_info following */ - uint32_t xng_npi_size; /* number of struct net_port_info */ - uuid_t xng_wakeuuid; /* WakeUUID when list was built */ + uint32_t xng_len; /* length of this data structure */ + uint32_t xng_gen; /* how many times the list was built */ + uint32_t xng_npi_count; /* number of net_port_info following */ + uint32_t xng_npi_size; /* number of struct net_port_info */ + uuid_t xng_wakeuuid; /* WakeUUID when list was built */ }; union in_addr_4_6 { - struct in_addr _in_a_4; - struct in6_addr _in_a_6; + struct in_addr _in_a_4; + struct in6_addr _in_a_6; }; -#define NPIF_IPV4 0x00000001 -#define NPIF_IPV6 0x00000002 -#define NPIF_TCP 0x00000004 -#define NPIF_UDP 0x00000008 -#define NPIF_DELEGATED 0x00000010 -#define NPIF_SOCKET 0x00000020 -#define NPIF_CHANNEL 0x00000040 +#define NPIF_IPV4 0x00000001 +#define NPIF_IPV6 0x00000002 +#define NPIF_TCP 0x00000004 +#define NPIF_UDP 0x00000008 +#define NPIF_DELEGATED 0x00000010 +#define NPIF_SOCKET 0x00000020 +#define NPIF_CHANNEL 0x00000040 struct net_port_info { - uint16_t npi_if_index; - uint16_t npi_flags; - struct timeval32 npi_timestamp; /* when passed to driver */ - uuid_t npi_flow_uuid; - in_port_t npi_local_port; /* network byte order */ - in_port_t npi_foreign_port; /* network byte order */ - union in_addr_4_6 npi_local_addr_; - union in_addr_4_6 npi_foreign_addr_; - pid_t npi_owner_pid; - pid_t npi_effective_pid; - char npi_owner_pname[MAXCOMLEN+1]; - char npi_effective_pname[MAXCOMLEN+1]; + uint16_t npi_if_index; + uint16_t npi_flags; + struct timeval32 npi_timestamp; /* when passed to driver */ + uuid_t npi_flow_uuid; + in_port_t npi_local_port; /* network byte order */ + in_port_t npi_foreign_port; /* network byte order */ + union in_addr_4_6 npi_local_addr_; + union in_addr_4_6 npi_foreign_addr_; + pid_t npi_owner_pid; + pid_t npi_effective_pid; + char npi_owner_pname[MAXCOMLEN + 1]; + char npi_effective_pname[MAXCOMLEN + 1]; }; #define npi_local_addr_in npi_local_addr_._in_a_4 @@ -108,6 +108,6 @@ void if_ports_used_add_inpcb(const uint32_t ifindex, const struct inpcb *inp); #endif /* XNU_KERNEL_PRIVATE */ -#endif /* PRIVATE */ +#endif /* PRIVATE */ #endif /* _NET_IF_PORT_USED_H_ */ diff --git a/bsd/net/if_ppp.h b/bsd/net/if_ppp.h index ae89246e2..1ae27a447 100644 --- a/bsd/net/if_ppp.h +++ b/bsd/net/if_ppp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -57,109 +57,109 @@ /* * Packet sizes */ -#define PPP_MTU 1500 /* Default MTU (size of Info field) */ -#define PPP_MAXMRU 65000 /* Largest MRU we allow */ -#define PPP_MAXMTU 16384 /* Largest MTU we allow */ +#define PPP_MTU 1500 /* Default MTU (size of Info field) */ +#define PPP_MAXMRU 65000 /* Largest MRU we allow */ +#define PPP_MAXMTU 16384 /* Largest MTU we allow */ /* * Bit definitions for flags. */ -#define SC_COMP_PROT 0x00000001 /* protocol compression (output) */ -#define SC_COMP_AC 0x00000002 /* header compression (output) */ -#define SC_COMP_TCP 0x00000004 /* TCP (VJ) compression (output) */ -#define SC_NO_TCP_CCID 0x00000008 /* disable VJ connection-id comp. */ -#define SC_REJ_COMP_AC 0x00000010 /* reject adrs/ctrl comp. on input */ -#define SC_REJ_COMP_TCP 0x00000020 /* reject TCP (VJ) comp. on input */ -#define SC_CCP_OPEN 0x00000040 /* Look at CCP packets */ -#define SC_CCP_UP 0x00000080 /* May send/recv compressed packets */ -#define SC_DEBUG 0x00010000 /* enable debug messages */ -#define SC_LOG_INPKT 0x00020000 /* log contents of good pkts recvd */ -#define SC_LOG_OUTPKT 0x00040000 /* log contents of pkts sent */ -#define SC_LOG_RAWIN 0x00080000 /* log all chars received */ -#define SC_LOG_FLUSH 0x00100000 /* log all chars flushed */ -#define SC_RCV_B7_0 0x01000000 /* have rcvd char with bit 7 = 0 */ -#define SC_RCV_B7_1 0x02000000 /* have rcvd char with bit 7 = 1 */ -#define SC_RCV_EVNP 0x04000000 /* have rcvd char with even parity */ -#define SC_RCV_ODDP 0x08000000 /* have rcvd char with odd parity */ -#define SC_MASK 0x0fff00ff /* bits that user can change */ +#define SC_COMP_PROT 0x00000001 /* protocol compression (output) */ +#define SC_COMP_AC 0x00000002 /* header compression (output) */ +#define SC_COMP_TCP 0x00000004 /* TCP (VJ) compression (output) */ +#define SC_NO_TCP_CCID 0x00000008 /* disable VJ connection-id comp. */ +#define SC_REJ_COMP_AC 0x00000010 /* reject adrs/ctrl comp. on input */ +#define SC_REJ_COMP_TCP 0x00000020 /* reject TCP (VJ) comp. on input */ +#define SC_CCP_OPEN 0x00000040 /* Look at CCP packets */ +#define SC_CCP_UP 0x00000080 /* May send/recv compressed packets */ +#define SC_DEBUG 0x00010000 /* enable debug messages */ +#define SC_LOG_INPKT 0x00020000 /* log contents of good pkts recvd */ +#define SC_LOG_OUTPKT 0x00040000 /* log contents of pkts sent */ +#define SC_LOG_RAWIN 0x00080000 /* log all chars received */ +#define SC_LOG_FLUSH 0x00100000 /* log all chars flushed */ +#define SC_RCV_B7_0 0x01000000 /* have rcvd char with bit 7 = 0 */ +#define SC_RCV_B7_1 0x02000000 /* have rcvd char with bit 7 = 1 */ +#define SC_RCV_EVNP 0x04000000 /* have rcvd char with even parity */ +#define SC_RCV_ODDP 0x08000000 /* have rcvd char with odd parity */ +#define SC_MASK 0x0fff00ff /* bits that user can change */ /* * State bits in sc_flags, not changeable by user. */ -#define SC_TIMEOUT 0x00000400 /* timeout is currently pending */ -#define SC_VJ_RESET 0x00000800 /* need to reset VJ decomp */ -#define SC_COMP_RUN 0x00001000 /* compressor has been initiated */ -#define SC_DECOMP_RUN 0x00002000 /* decompressor has been initiated */ -#define SC_DC_ERROR 0x00004000 /* non-fatal decomp error detected */ -#define SC_DC_FERROR 0x00008000 /* fatal decomp error detected */ -#define SC_TBUSY 0x10000000 /* xmitter doesn't need a packet yet */ -#define SC_PKTLOST 0x20000000 /* have lost or dropped a packet */ -#define SC_FLUSH 0x40000000 /* flush input until next PPP_FLAG */ -#define SC_ESCAPED 0x80000000 /* saw a PPP_ESCAPE */ +#define SC_TIMEOUT 0x00000400 /* timeout is currently pending */ +#define SC_VJ_RESET 0x00000800 /* need to reset VJ decomp */ +#define SC_COMP_RUN 0x00001000 /* compressor has been initiated */ +#define SC_DECOMP_RUN 0x00002000 /* decompressor has been initiated */ +#define SC_DC_ERROR 0x00004000 /* non-fatal decomp error detected */ +#define SC_DC_FERROR 0x00008000 /* fatal decomp error detected */ +#define SC_TBUSY 0x10000000 /* xmitter doesn't need a packet yet */ +#define SC_PKTLOST 0x20000000 /* have lost or dropped a packet */ +#define SC_FLUSH 0x40000000 /* flush input until next PPP_FLAG */ +#define SC_ESCAPED 0x80000000 /* saw a PPP_ESCAPE */ /* * Ioctl definitions. */ struct npioctl { - int protocol; /* PPP procotol, e.g. PPP_IP */ - enum NPmode mode; + int protocol; /* PPP procotol, e.g. PPP_IP */ + enum NPmode mode; }; /* Structure describing a CCP configuration option, for PPPIOCSCOMPRESS */ struct ppp_option_data { - u_char *ptr; - u_int length; - int transmit; + u_char *ptr; + u_int length; + int transmit; }; struct ifpppstatsreq { - char ifr_name[IFNAMSIZ]; - struct ppp_stats stats; + char ifr_name[IFNAMSIZ]; + struct ppp_stats stats; }; struct ifpppcstatsreq { - char ifr_name[IFNAMSIZ]; - struct ppp_comp_stats stats; + char ifr_name[IFNAMSIZ]; + struct ppp_comp_stats stats; }; /* * Ioctl definitions. */ -#define PPPIOCGFLAGS _IOR('t', 90, int) /* get configuration flags */ -#define PPPIOCSFLAGS _IOW('t', 89, int) /* set configuration flags */ -#define PPPIOCGASYNCMAP _IOR('t', 88, int) /* get async map */ -#define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */ -#define PPPIOCGUNIT _IOR('t', 86, int) /* get ppp unit number */ -#define PPPIOCGRASYNCMAP _IOR('t', 85, int) /* get receive async map */ -#define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */ -#define PPPIOCGMRU _IOR('t', 83, int) /* get max receive unit */ -#define PPPIOCSMRU _IOW('t', 82, int) /* set max receive unit */ -#define PPPIOCSMAXCID _IOW('t', 81, int) /* set VJ max slot ID */ +#define PPPIOCGFLAGS _IOR('t', 90, int) /* get configuration flags */ +#define PPPIOCSFLAGS _IOW('t', 89, int) /* set configuration flags */ +#define PPPIOCGASYNCMAP _IOR('t', 88, int) /* get async map */ +#define PPPIOCSASYNCMAP _IOW('t', 87, int) /* set async map */ +#define PPPIOCGUNIT _IOR('t', 86, int) /* get ppp unit number */ +#define PPPIOCGRASYNCMAP _IOR('t', 85, int) /* get receive async map */ +#define PPPIOCSRASYNCMAP _IOW('t', 84, int) /* set receive async map */ +#define PPPIOCGMRU _IOR('t', 83, int) /* get max receive unit */ +#define PPPIOCSMRU _IOW('t', 82, int) /* set max receive unit */ +#define PPPIOCSMAXCID _IOW('t', 81, int) /* set VJ max slot ID */ #define PPPIOCGXASYNCMAP _IOR('t', 80, ext_accm) /* get extended ACCM */ #define PPPIOCSXASYNCMAP _IOW('t', 79, ext_accm) /* set extended ACCM */ -#define PPPIOCXFERUNIT _IO('t', 78) /* transfer PPP unit */ -#define PPPIOCSCOMPRESS _IOW('t', 77, struct ppp_option_data) -#define PPPIOCGNPMODE _IOWR('t', 76, struct npioctl) /* get NP mode */ -#define PPPIOCSNPMODE _IOW('t', 75, struct npioctl) /* set NP mode */ -#define PPPIOCGIDLE _IOR('t', 74, struct ppp_idle) /* get idle time */ -#define PPPIOCSPASS _IOW('t', 71, struct bpf_program) /* set pass filter */ -#define PPPIOCSACTIVE _IOW('t', 70, struct bpf_program) /* set active filt */ +#define PPPIOCXFERUNIT _IO('t', 78) /* transfer PPP unit */ +#define PPPIOCSCOMPRESS _IOW('t', 77, struct ppp_option_data) +#define PPPIOCGNPMODE _IOWR('t', 76, struct npioctl) /* get NP mode */ +#define PPPIOCSNPMODE _IOW('t', 75, struct npioctl) /* set NP mode */ +#define PPPIOCGIDLE _IOR('t', 74, struct ppp_idle) /* get idle time */ +#define PPPIOCSPASS _IOW('t', 71, struct bpf_program) /* set pass filter */ +#define PPPIOCSACTIVE _IOW('t', 70, struct bpf_program) /* set active filt */ /* PPPIOC[GS]MTU are alternatives to SIOC[GS]IFMTU, used under Ultrix */ -#define PPPIOCGMTU _IOR('t', 73, int) /* get interface MTU */ -#define PPPIOCSMTU _IOW('t', 72, int) /* set interface MTU */ +#define PPPIOCGMTU _IOR('t', 73, int) /* get interface MTU */ +#define PPPIOCSMTU _IOW('t', 72, int) /* set interface MTU */ /* * These two are interface ioctls so that pppstats can do them on * a socket without having to open the serial device. */ -#define SIOCGPPPSTATS _IOWR('i', 123, struct ifpppstatsreq) -#define SIOCGPPPCSTATS _IOWR('i', 122, struct ifpppcstatsreq) +#define SIOCGPPPSTATS _IOWR('i', 123, struct ifpppstatsreq) +#define SIOCGPPPCSTATS _IOWR('i', 122, struct ifpppcstatsreq) #if !defined(ifr_mtu) -#define ifr_mtu ifr_ifru.ifru_metric +#define ifr_mtu ifr_ifru.ifru_metric #endif #endif /* _IF_PPP_H_ */ diff --git a/bsd/net/if_stf.c b/bsd/net/if_stf.c index 2d5f2e90b..05e6087ab 100644 --- a/bsd/net/if_stf.c +++ b/bsd/net/if_stf.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -94,7 +94,7 @@ * ICMPv6: * - Redirects cannot be used due to the lack of link-local address. * - * stf interface does not have, and will not need, a link-local address. + * stf interface does not have, and will not need, a link-local address. * It seems to have no real benefit and does not help the above symptoms much. * Even if we assign link-locals to interface, we cannot really * use link-local unicast/multicast on top of 6to4 cloud (since there's no @@ -150,25 +150,25 @@ #include #endif -#define GET_V4(x) ((const struct in_addr *)(const void *)(&(x)->s6_addr16[1])) +#define GET_V4(x) ((const struct in_addr *)(const void *)(&(x)->s6_addr16[1])) static lck_grp_t *stf_mtx_grp; struct stf_softc { - ifnet_t sc_if; /* common area */ - u_int32_t sc_protocol_family; /* dlil protocol attached */ + ifnet_t sc_if; /* common area */ + u_int32_t sc_protocol_family; /* dlil protocol attached */ union { struct route __sc_ro4; struct route_in6 __sc_ro6; /* just for safety */ } __sc_ro46; -#define sc_ro __sc_ro46.__sc_ro4 +#define sc_ro __sc_ro46.__sc_ro4 decl_lck_mtx_data(, sc_ro_mtx); const struct encaptab *encap_cookie; - bpf_tap_mode tap_mode; - bpf_packet_func tap_callback; + bpf_tap_mode tap_mode; + bpf_packet_func tap_callback; }; -void stfattach (void); +void stfattach(void); static int ip_stf_ttl = 40; static int stf_init_done; @@ -178,23 +178,23 @@ static void stfinit(void); static struct protosw in_stf_protosw = { - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPV6, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = in_stf_input, - .pr_ctloutput = rip_ctloutput, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPV6, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = in_stf_input, + .pr_ctloutput = rip_ctloutput, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, }; static int stf_encapcheck(const struct mbuf *, int, int, void *); static struct in6_ifaddr *stf_getsrcifa6(struct ifnet *); int stf_pre_output(struct ifnet *, protocol_family_t, struct mbuf **, - const struct sockaddr *, void *, char *, char *); + const struct sockaddr *, void *, char *, char *); static int stf_checkaddr4(struct stf_softc *, const struct in_addr *, - struct ifnet *); + struct ifnet *); static int stf_checkaddr6(struct stf_softc *, struct in6_addr *, - struct ifnet *); + struct ifnet *); static void stf_rtrequest(int, struct rtentry *, struct sockaddr *); static errno_t stf_ioctl(ifnet_t ifp, u_long cmd, void *data); static errno_t stf_output(ifnet_t ifp, mbuf_t m); @@ -213,79 +213,82 @@ stfinit(void) */ static errno_t stf_media_input( - __unused ifnet_t ifp, - protocol_family_t protocol_family, - mbuf_t m, - __unused char *frame_header) + __unused ifnet_t ifp, + protocol_family_t protocol_family, + mbuf_t m, + __unused char *frame_header) { - if (proto_input(protocol_family, m) != 0) + if (proto_input(protocol_family, m) != 0) { m_freem(m); + } - return (0); + return 0; } static errno_t stf_add_proto( - ifnet_t ifp, - protocol_family_t protocol_family, - __unused const struct ifnet_demux_desc *demux_array, - __unused u_int32_t demux_count) + ifnet_t ifp, + protocol_family_t protocol_family, + __unused const struct ifnet_demux_desc *demux_array, + __unused u_int32_t demux_count) { /* Only one protocol may be attached at a time */ struct stf_softc* stf = ifnet_softc(ifp); - if (stf->sc_protocol_family == 0) + if (stf->sc_protocol_family == 0) { stf->sc_protocol_family = protocol_family; - else { + } else { printf("stf_add_proto: stf already has a proto\n"); return EBUSY; } - + return 0; } static errno_t stf_del_proto( - ifnet_t ifp, - protocol_family_t protocol_family) + ifnet_t ifp, + protocol_family_t protocol_family) { - if (((struct stf_softc*)ifnet_softc(ifp))->sc_protocol_family == protocol_family) + if (((struct stf_softc*)ifnet_softc(ifp))->sc_protocol_family == protocol_family) { ((struct stf_softc*)ifnet_softc(ifp))->sc_protocol_family = 0; - + } + return 0; } static errno_t stf_attach_inet6( - ifnet_t ifp, - protocol_family_t protocol_family) + ifnet_t ifp, + protocol_family_t protocol_family) { - struct ifnet_attach_proto_param reg; - errno_t stat; - - if (protocol_family != PF_INET6) - return EPROTONOSUPPORT; + struct ifnet_attach_proto_param reg; + errno_t stat; + + if (protocol_family != PF_INET6) { + return EPROTONOSUPPORT; + } bzero(®, sizeof(reg)); - reg.input = stf_media_input; - reg.pre_output = stf_pre_output; + reg.input = stf_media_input; + reg.pre_output = stf_pre_output; - stat = ifnet_attach_protocol(ifp, protocol_family, ®); - if (stat && stat != EEXIST) { - printf("stf_attach_proto_family can't attach interface fam=%d\n", - protocol_family); - } + stat = ifnet_attach_protocol(ifp, protocol_family, ®); + if (stat && stat != EEXIST) { + printf("stf_attach_proto_family can't attach interface fam=%d\n", + protocol_family); + } - return stat; + return stat; } static errno_t stf_demux( - ifnet_t ifp, - __unused mbuf_t m, - __unused char *frame_ptr, - protocol_family_t *protocol_family) + ifnet_t ifp, + __unused mbuf_t m, + __unused char *frame_ptr, + protocol_family_t *protocol_family) { struct stf_softc* stf = ifnet_softc(ifp); *protocol_family = stf->sc_protocol_family; @@ -294,15 +297,15 @@ stf_demux( static errno_t stf_set_bpf_tap( - ifnet_t ifp, - bpf_tap_mode mode, - bpf_packet_func callback) + ifnet_t ifp, + bpf_tap_mode mode, + bpf_packet_func callback) { - struct stf_softc *sc = ifnet_softc(ifp); - + struct stf_softc *sc = ifnet_softc(ifp); + sc->tap_mode = mode; sc->tap_callback = callback; - + return 0; } @@ -312,21 +315,22 @@ stfattach(void) struct stf_softc *sc; int error; const struct encaptab *p; - struct ifnet_init_eparams stf_init; + struct ifnet_init_eparams stf_init; stfinit(); error = proto_register_plumber(PF_INET6, APPLE_IF_FAM_STF, stf_attach_inet6, NULL); - if (error != 0) + if (error != 0) { printf("proto_register_plumber failed for AF_INET6 error=%d\n", error); + } sc = _MALLOC(sizeof(struct stf_softc), M_DEVBUF, M_WAITOK | M_ZERO); if (sc == 0) { printf("stf softc attach failed\n" ); return; } - + p = encap_attach_func(AF_INET, IPPROTO_IPV6, stf_encapcheck, &in_stf_protosw, sc); if (p == NULL) { @@ -336,10 +340,10 @@ stfattach(void) } sc->encap_cookie = p; lck_mtx_init(&sc->sc_ro_mtx, stf_mtx_grp, LCK_ATTR_NULL); - + bzero(&stf_init, sizeof(stf_init)); stf_init.ver = IFNET_INIT_CURRENT_VERSION; - stf_init.len = sizeof (stf_init); + stf_init.len = sizeof(stf_init); stf_init.flags = IFNET_INIT_LEGACY; stf_init.name = "stf"; stf_init.unit = 0; @@ -352,7 +356,7 @@ stfattach(void) stf_init.softc = sc; stf_init.ioctl = stf_ioctl; stf_init.set_bpf_tap = stf_set_bpf_tap; - + error = ifnet_allocate_extended(&stf_init, &sc->sc_if); if (error != 0) { printf("stfattach, ifnet_allocate failed - %d\n", error); @@ -371,7 +375,7 @@ stfattach(void) #if CONFIG_MACF_NET mac_ifnet_label_init(&sc->sc_if); #endif - + error = ifnet_attach(sc->sc_if, NULL); if (error != 0) { printf("stfattach: ifnet_attach returned error=%d\n", error); @@ -381,9 +385,9 @@ stfattach(void) FREE(sc, M_DEVBUF); return; } - + bpfattach(sc->sc_if, DLT_NULL, sizeof(u_int)); - + return; } @@ -400,27 +404,33 @@ stf_encapcheck( struct in_addr a, b; sc = (struct stf_softc *)arg; - if (sc == NULL) + if (sc == NULL) { return 0; + } - if ((ifnet_flags(sc->sc_if) & IFF_UP) == 0) + if ((ifnet_flags(sc->sc_if) & IFF_UP) == 0) { return 0; + } /* IFF_LINK0 means "no decapsulation" */ - if ((ifnet_flags(sc->sc_if) & IFF_LINK0) != 0) + if ((ifnet_flags(sc->sc_if) & IFF_LINK0) != 0) { return 0; + } - if (proto != IPPROTO_IPV6) + if (proto != IPPROTO_IPV6) { return 0; + } mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof(ip), &ip); - if (ip.ip_v != 4) + if (ip.ip_v != 4) { return 0; + } ia6 = stf_getsrcifa6(sc->sc_if); - if (ia6 == NULL) + if (ia6 == NULL) { return 0; + } /* * check if IPv4 dst matches the IPv4 address derived from the @@ -484,9 +494,8 @@ stf_getsrcifa6(struct ifnet *ifp) IFA_UNLOCK(ia); lck_rw_lock_shared(in_ifaddr_rwlock); for (ia4 = TAILQ_FIRST(&in_ifaddrhead); - ia4; - ia4 = TAILQ_NEXT(ia4, ia_link)) - { + ia4; + ia4 = TAILQ_NEXT(ia4, ia_link)) { IFA_LOCK(&ia4->ia_ifa); if (ia4->ia_addr.sin_addr.s_addr == in.s_addr) { IFA_UNLOCK(&ia4->ia_ifa); @@ -495,24 +504,25 @@ stf_getsrcifa6(struct ifnet *ifp) IFA_UNLOCK(&ia4->ia_ifa); } lck_rw_done(in_ifaddr_rwlock); - if (ia4 == NULL) + if (ia4 == NULL) { continue; + } - IFA_ADDREF(ia); /* for caller */ + IFA_ADDREF(ia); /* for caller */ ifnet_lock_done(ifp); - return ((struct in6_ifaddr *)ia); + return (struct in6_ifaddr *)ia; } ifnet_lock_done(ifp); - return (NULL); + return NULL; } int stf_pre_output( - struct ifnet *ifp, + struct ifnet *ifp, __unused protocol_family_t protocol_family, - struct mbuf **m0, - const struct sockaddr *dst, + struct mbuf **m0, + const struct sockaddr *dst, __unused void *route, __unused char *desk_linkaddr, __unused char *frame_type) @@ -525,9 +535,9 @@ stf_pre_output( struct ip *ip; struct ip6_hdr *ip6; struct in6_ifaddr *ia6; - struct sockaddr_in *dst4; + struct sockaddr_in *dst4; struct ip_out_args ipoa; - errno_t result = 0; + errno_t result = 0; bzero(&ipoa, sizeof(ipoa)); ipoa.ipoa_boundif = IFSCOPE_NONE; @@ -569,11 +579,11 @@ stf_pre_output( * Pickup the right outer dst addr from the list of candidates. * ip6_dst has priority as it may be able to give us shorter IPv4 hops. */ - if (IN6_IS_ADDR_6TO4(&ip6->ip6_dst)) + if (IN6_IS_ADDR_6TO4(&ip6->ip6_dst)) { in4 = GET_V4(&ip6->ip6_dst); - else if (IN6_IS_ADDR_6TO4(&dst6->sin6_addr)) + } else if (IN6_IS_ADDR_6TO4(&dst6->sin6_addr)) { in4 = GET_V4(&dst6->sin6_addr); - else { + } else { IFA_REMREF(&ia6->ia_ifa); return ENETUNREACH; } @@ -581,15 +591,16 @@ stf_pre_output( if (ifp->if_bpf) { /* We need to prepend the address family as a four byte field. */ u_int32_t af = AF_INET6; - + bpf_tap_out(ifp, 0, m, &af, sizeof(af)); } M_PREPEND(m, sizeof(struct ip), M_DONTWAIT, 1); - if (m && mbuf_len(m) < sizeof(struct ip)) + if (m && mbuf_len(m) < sizeof(struct ip)) { m = m_pullup(m, sizeof(struct ip)); + } if (m == NULL) { - *m0 = NULL; + *m0 = NULL; IFA_REMREF(&ia6->ia_ifa); return ENOBUFS; } @@ -604,11 +615,12 @@ stf_pre_output( bcopy(in4, &ip->ip_dst, sizeof(ip->ip_dst)); ip->ip_p = IPPROTO_IPV6; ip->ip_ttl = ip_stf_ttl; - ip->ip_len = m->m_pkthdr.len; /*host order*/ - if (ifp->if_flags & IFF_LINK1) + ip->ip_len = m->m_pkthdr.len; /*host order*/ + if (ifp->if_flags & IFF_LINK1) { ip_ecn_ingress(ECN_NORMAL, &ip->ip_tos, &tos); - else + } else { ip_ecn_ingress(ECN_NOCARE, &ip->ip_tos, &tos); + } lck_mtx_lock(&sc->sc_ro_mtx); dst4 = (struct sockaddr_in *)(void *)&sc->sc_ro.ro_dst; @@ -626,28 +638,29 @@ stf_pre_output( /* Assumption: ip_output will free mbuf on errors */ /* All the output processing is done here, don't let stf_output be called */ - if (result == 0) + if (result == 0) { result = EJUSTRETURN; + } *m0 = NULL; IFA_REMREF(&ia6->ia_ifa); return result; } static errno_t stf_output( - __unused ifnet_t ifp, - __unused mbuf_t m) + __unused ifnet_t ifp, + __unused mbuf_t m) { /* All processing is done in stf_pre_output * this shouldn't be called as the pre_output returns "EJUSTRETURN" */ return 0; -} +} static int stf_checkaddr4( struct stf_softc *sc, const struct in_addr *in, - struct ifnet *inifp) /* incoming interface */ + struct ifnet *inifp) /* incoming interface */ { struct in_ifaddr *ia4; @@ -655,8 +668,9 @@ stf_checkaddr4( * reject packets with the following address: * 224.0.0.0/4 0.0.0.0/8 127.0.0.0/8 255.0.0.0/8 */ - if (IN_MULTICAST(ntohl(in->s_addr))) + if (IN_MULTICAST(ntohl(in->s_addr))) { return -1; + } switch ((ntohl(in->s_addr) & 0xff000000) >> 24) { case 0: case 127: case 255: return -1; @@ -667,9 +681,8 @@ stf_checkaddr4( */ lck_rw_lock_shared(in_ifaddr_rwlock); for (ia4 = TAILQ_FIRST(&in_ifaddrhead); - ia4; - ia4 = TAILQ_NEXT(ia4, ia_link)) - { + ia4; + ia4 = TAILQ_NEXT(ia4, ia_link)) { IFA_LOCK(&ia4->ia_ifa); if ((ia4->ia_ifa.ifa_ifp->if_flags & IFF_BROADCAST) == 0) { IFA_UNLOCK(&ia4->ia_ifa); @@ -696,8 +709,9 @@ stf_checkaddr4( sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr = *in; rt = rtalloc1((struct sockaddr *)&sin, 0, 0); - if (rt != NULL) + if (rt != NULL) { RT_LOCK(rt); + } if (rt == NULL || rt->rt_ifp != inifp) { #if 1 log(LOG_WARNING, "%s: packet from 0x%x dropped " @@ -721,13 +735,14 @@ static int stf_checkaddr6( struct stf_softc *sc, struct in6_addr *in6, - struct ifnet *inifp) /* incoming interface */ + struct ifnet *inifp) /* incoming interface */ { /* * check 6to4 addresses */ - if (IN6_IS_ADDR_6TO4(in6)) + if (IN6_IS_ADDR_6TO4(in6)) { return stf_checkaddr4(sc, GET_V4(in6), inifp); + } /* * reject anything that look suspicious. the test is implemented @@ -735,8 +750,9 @@ stf_checkaddr6( * (1) reject bad packets earlier, and * (2) to be safe against future ip6_input change. */ - if (IN6_IS_ADDR_V4COMPAT(in6) || IN6_IS_ADDR_V4MAPPED(in6)) + if (IN6_IS_ADDR_V4COMPAT(in6) || IN6_IS_ADDR_V4MAPPED(in6)) { return -1; + } return 0; } @@ -752,7 +768,7 @@ in_stf_input( u_int8_t otos, itos; int proto; struct ifnet *ifp; - struct ifnet_stat_increment_param stats; + struct ifnet_stat_increment_param stats; ip = mtod(m, struct ip *); proto = ip->ip_p; @@ -801,17 +817,18 @@ in_stf_input( } itos = (ntohl(ip6.ip6_flow) >> 20) & 0xff; - if ((ifnet_flags(ifp) & IFF_LINK1) != 0) + if ((ifnet_flags(ifp) & IFF_LINK1) != 0) { ip_ecn_egress(ECN_NORMAL, &otos, &itos); - else + } else { ip_ecn_egress(ECN_NOCARE, &otos, &itos); + } ip6.ip6_flow &= ~htonl(0xff << 20); ip6.ip6_flow |= htonl((u_int32_t)itos << 20); m->m_pkthdr.rcvif = ifp; mbuf_pkthdr_setheader(m, mbuf_data(m)); mbuf_adj(m, off); - + if (ifp->if_bpf) { /* We need to prepend the address family as a four byte field. */ u_int32_t af = AF_INET6; @@ -829,7 +846,7 @@ in_stf_input( stats.bytes_in = mbuf_pkthdr_len(m); mbuf_pkthdr_setrcvif(m, ifp); ifnet_input(ifp, m, &stats); - + return; } @@ -847,9 +864,9 @@ stf_rtrequest( static errno_t stf_ioctl( - ifnet_t ifp, - u_long cmd, - void *data) + ifnet_t ifp, + u_long cmd, + void *data) { struct ifaddr *ifa; struct ifreq *ifr; @@ -872,8 +889,8 @@ stf_ioctl( } sin6 = (struct sockaddr_in6 *)(void *)ifa->ifa_addr; if (IN6_IS_ADDR_6TO4(&sin6->sin6_addr)) { - if ( !(ifnet_flags( ifp ) & IFF_UP) ) { - /* do this only if the interface is not already up */ + if (!(ifnet_flags( ifp ) & IFF_UP)) { + /* do this only if the interface is not already up */ ifa->ifa_rtrequest = stf_rtrequest; IFA_UNLOCK(ifa); ifnet_set_flags(ifp, IFF_UP, IFF_UP); @@ -890,10 +907,11 @@ stf_ioctl( case SIOCADDMULTI: case SIOCDELMULTI: ifr = (struct ifreq *)data; - if (ifr && ifr->ifr_addr.sa_family == AF_INET6) + if (ifr && ifr->ifr_addr.sa_family == AF_INET6) { ; - else + } else { error = EAFNOSUPPORT; + } break; default: diff --git a/bsd/net/if_types.h b/bsd/net/if_types.h index 4d9697275..2d95e9eeb 100644 --- a/bsd/net/if_types.h +++ b/bsd/net/if_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -70,82 +70,82 @@ * This list is derived from the SNMP list of ifTypes, currently * documented in RFC1573. * The current list of assignments is maintained at: - * http://www.iana.org/assignments/smi-numbers + * http://www.iana.org/assignments/smi-numbers */ -#define IFT_OTHER 0x1 /* none of the following */ -#define IFT_1822 0x2 /* old-style arpanet imp */ -#define IFT_HDH1822 0x3 /* HDH arpanet imp */ -#define IFT_X25DDN 0x4 /* x25 to imp */ -#define IFT_X25 0x5 /* PDN X25 interface (RFC877) */ -#define IFT_ETHER 0x6 /* Ethernet CSMACD */ -#define IFT_ISO88023 0x7 /* CMSA CD */ -#define IFT_ISO88024 0x8 /* Token Bus */ -#define IFT_ISO88025 0x9 /* Token Ring */ -#define IFT_ISO88026 0xa /* MAN */ -#define IFT_STARLAN 0xb -#define IFT_P10 0xc /* Proteon 10MBit ring */ -#define IFT_P80 0xd /* Proteon 80MBit ring */ -#define IFT_HY 0xe /* Hyperchannel */ -#define IFT_FDDI 0xf -#define IFT_LAPB 0x10 -#define IFT_SDLC 0x11 -#define IFT_T1 0x12 -#define IFT_CEPT 0x13 /* E1 - european T1 */ -#define IFT_ISDNBASIC 0x14 -#define IFT_ISDNPRIMARY 0x15 -#define IFT_PTPSERIAL 0x16 /* Proprietary PTP serial */ -#define IFT_PPP 0x17 /* RFC 1331 */ -#define IFT_LOOP 0x18 /* loopback */ -#define IFT_EON 0x19 /* ISO over IP */ -#define IFT_XETHER 0x1a /* obsolete 3MB experimental ethernet */ -#define IFT_NSIP 0x1b /* XNS over IP */ -#define IFT_SLIP 0x1c /* IP over generic TTY */ -#define IFT_ULTRA 0x1d /* Ultra Technologies */ -#define IFT_DS3 0x1e /* Generic T3 */ -#define IFT_SIP 0x1f /* SMDS */ -#define IFT_FRELAY 0x20 /* Frame Relay DTE only */ -#define IFT_RS232 0x21 -#define IFT_PARA 0x22 /* parallel-port */ -#define IFT_ARCNET 0x23 -#define IFT_ARCNETPLUS 0x24 -#define IFT_ATM 0x25 /* ATM cells */ -#define IFT_MIOX25 0x26 -#define IFT_SONET 0x27 /* SONET or SDH */ -#define IFT_X25PLE 0x28 -#define IFT_ISO88022LLC 0x29 -#define IFT_LOCALTALK 0x2a -#define IFT_SMDSDXI 0x2b -#define IFT_FRELAYDCE 0x2c /* Frame Relay DCE */ -#define IFT_V35 0x2d -#define IFT_HSSI 0x2e -#define IFT_HIPPI 0x2f -#define IFT_MODEM 0x30 /* Generic Modem */ -#define IFT_AAL5 0x31 /* AAL5 over ATM */ -#define IFT_SONETPATH 0x32 -#define IFT_SONETVT 0x33 -#define IFT_SMDSICIP 0x34 /* SMDS InterCarrier Interface */ -#define IFT_PROPVIRTUAL 0x35 /* Proprietary Virtual/internal */ -#define IFT_PROPMUX 0x36 /* Proprietary Multiplexing */ +#define IFT_OTHER 0x1 /* none of the following */ +#define IFT_1822 0x2 /* old-style arpanet imp */ +#define IFT_HDH1822 0x3 /* HDH arpanet imp */ +#define IFT_X25DDN 0x4 /* x25 to imp */ +#define IFT_X25 0x5 /* PDN X25 interface (RFC877) */ +#define IFT_ETHER 0x6 /* Ethernet CSMACD */ +#define IFT_ISO88023 0x7 /* CMSA CD */ +#define IFT_ISO88024 0x8 /* Token Bus */ +#define IFT_ISO88025 0x9 /* Token Ring */ +#define IFT_ISO88026 0xa /* MAN */ +#define IFT_STARLAN 0xb +#define IFT_P10 0xc /* Proteon 10MBit ring */ +#define IFT_P80 0xd /* Proteon 80MBit ring */ +#define IFT_HY 0xe /* Hyperchannel */ +#define IFT_FDDI 0xf +#define IFT_LAPB 0x10 +#define IFT_SDLC 0x11 +#define IFT_T1 0x12 +#define IFT_CEPT 0x13 /* E1 - european T1 */ +#define IFT_ISDNBASIC 0x14 +#define IFT_ISDNPRIMARY 0x15 +#define IFT_PTPSERIAL 0x16 /* Proprietary PTP serial */ +#define IFT_PPP 0x17 /* RFC 1331 */ +#define IFT_LOOP 0x18 /* loopback */ +#define IFT_EON 0x19 /* ISO over IP */ +#define IFT_XETHER 0x1a /* obsolete 3MB experimental ethernet */ +#define IFT_NSIP 0x1b /* XNS over IP */ +#define IFT_SLIP 0x1c /* IP over generic TTY */ +#define IFT_ULTRA 0x1d /* Ultra Technologies */ +#define IFT_DS3 0x1e /* Generic T3 */ +#define IFT_SIP 0x1f /* SMDS */ +#define IFT_FRELAY 0x20 /* Frame Relay DTE only */ +#define IFT_RS232 0x21 +#define IFT_PARA 0x22 /* parallel-port */ +#define IFT_ARCNET 0x23 +#define IFT_ARCNETPLUS 0x24 +#define IFT_ATM 0x25 /* ATM cells */ +#define IFT_MIOX25 0x26 +#define IFT_SONET 0x27 /* SONET or SDH */ +#define IFT_X25PLE 0x28 +#define IFT_ISO88022LLC 0x29 +#define IFT_LOCALTALK 0x2a +#define IFT_SMDSDXI 0x2b +#define IFT_FRELAYDCE 0x2c /* Frame Relay DCE */ +#define IFT_V35 0x2d +#define IFT_HSSI 0x2e +#define IFT_HIPPI 0x2f +#define IFT_MODEM 0x30 /* Generic Modem */ +#define IFT_AAL5 0x31 /* AAL5 over ATM */ +#define IFT_SONETPATH 0x32 +#define IFT_SONETVT 0x33 +#define IFT_SMDSICIP 0x34 /* SMDS InterCarrier Interface */ +#define IFT_PROPVIRTUAL 0x35 /* Proprietary Virtual/internal */ +#define IFT_PROPMUX 0x36 /* Proprietary Multiplexing */ /* * IFT_GIF, IFT_FAITH and IFT_FAITH are not based on IANA assignments. * Note: IFT_STF has a defined ifType: 0xd7 (215), but we use 0x39. */ -#define IFT_GIF 0x37 /*0xf0*/ -#define IFT_FAITH 0x38 /*0xf2*/ -#define IFT_STF 0x39 /*0xf3*/ +#define IFT_GIF 0x37 /*0xf0*/ +#define IFT_FAITH 0x38 /*0xf2*/ +#define IFT_STF 0x39 /*0xf3*/ -#define IFT_L2VLAN 0x87 /* Layer 2 Virtual LAN using 802.1Q */ -#define IFT_IEEE8023ADLAG 0x88 /* IEEE802.3ad Link Aggregate */ -#define IFT_IEEE1394 0x90 /* IEEE1394 High Performance SerialBus*/ -#define IFT_BRIDGE 0xd1 /* Transparent bridge interface */ +#define IFT_L2VLAN 0x87 /* Layer 2 Virtual LAN using 802.1Q */ +#define IFT_IEEE8023ADLAG 0x88 /* IEEE802.3ad Link Aggregate */ +#define IFT_IEEE1394 0x90 /* IEEE1394 High Performance SerialBus*/ +#define IFT_BRIDGE 0xd1 /* Transparent bridge interface */ -#define IFT_ENC 0xf4 /* Encapsulation */ -#define IFT_PFLOG 0xf5 /* Packet filter logging */ -#define IFT_PFSYNC 0xf6 /* Packet filter state syncing */ -#define IFT_CARP 0xf8 /* Common Address Redundancy Protocol */ -#define IFT_PKTAP 0xfe /* Packet tap pseudo interface */ -#define IFT_CELLULAR 0xff /* Packet Data over Cellular */ -#define IFT_PDP IFT_CELLULAR /* deprecated; use IFT_CELLULAR */ +#define IFT_ENC 0xf4 /* Encapsulation */ +#define IFT_PFLOG 0xf5 /* Packet filter logging */ +#define IFT_PFSYNC 0xf6 /* Packet filter state syncing */ +#define IFT_CARP 0xf8 /* Common Address Redundancy Protocol */ +#define IFT_PKTAP 0xfe /* Packet tap pseudo interface */ +#define IFT_CELLULAR 0xff /* Packet Data over Cellular */ +#define IFT_PDP IFT_CELLULAR /* deprecated; use IFT_CELLULAR */ #endif diff --git a/bsd/net/if_utun.c b/bsd/net/if_utun.c index b0e711694..416ef1537 100644 --- a/bsd/net/if_utun.c +++ b/bsd/net/if_utun.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,19 +22,19 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* ---------------------------------------------------------------------------------- -Application of kernel control for interface creation - -Theory of operation: -utun (user tunnel) acts as glue between kernel control sockets and network interfaces. -This kernel control will register an interface for every client that connects. ----------------------------------------------------------------------------------- */ + * Application of kernel control for interface creation + * + * Theory of operation: + * utun (user tunnel) acts as glue between kernel control sockets and network interfaces. + * This kernel control will register an interface for every client that connects. + * ---------------------------------------------------------------------------------- */ #include #include @@ -45,7 +45,7 @@ This kernel control will register an interface for every client that connects. #include #include #include -#include +#include #include #include #include @@ -77,85 +77,85 @@ typedef struct utun_nx { /* Control block allocated for each kernel control connection */ struct utun_pcb { - TAILQ_ENTRY(utun_pcb) utun_chain; - kern_ctl_ref utun_ctlref; - ifnet_t utun_ifp; - u_int32_t utun_unit; - u_int32_t utun_unique_id; - u_int32_t utun_flags; - int utun_ext_ifdata_stats; - u_int32_t utun_max_pending_packets; - char utun_if_xname[IFXNAMSIZ]; - char utun_unique_name[IFXNAMSIZ]; + TAILQ_ENTRY(utun_pcb) utun_chain; + kern_ctl_ref utun_ctlref; + ifnet_t utun_ifp; + u_int32_t utun_unit; + u_int32_t utun_unique_id; + u_int32_t utun_flags; + int utun_ext_ifdata_stats; + u_int32_t utun_max_pending_packets; + char utun_if_xname[IFXNAMSIZ]; + char utun_unique_name[IFXNAMSIZ]; // PCB lock protects state fields and rings decl_lck_rw_data(, utun_pcb_lock); - struct mbuf * utun_input_chain; - struct mbuf * utun_input_chain_last; + struct mbuf * utun_input_chain; + struct mbuf * utun_input_chain_last; // Input chain lock protects the list of input mbufs // The input chain lock must be taken AFTER the PCB lock if both are held - lck_mtx_t utun_input_chain_lock; + lck_mtx_t utun_input_chain_lock; #if UTUN_NEXUS - struct utun_nx utun_nx; - int utun_kpipe_enabled; - uuid_t utun_kpipe_uuid; - void * utun_kpipe_rxring; - void * utun_kpipe_txring; - kern_pbufpool_t utun_kpipe_pp; - - kern_nexus_t utun_netif_nexus; - kern_pbufpool_t utun_netif_pp; - void * utun_netif_rxring; - void * utun_netif_txring; - uint64_t utun_netif_txring_size; - - u_int32_t utun_slot_size; - u_int32_t utun_netif_ring_size; - u_int32_t utun_tx_fsw_ring_size; - u_int32_t utun_rx_fsw_ring_size; - bool utun_use_netif; - bool utun_needs_netagent; + struct utun_nx utun_nx; + int utun_kpipe_enabled; + uuid_t utun_kpipe_uuid; + void * utun_kpipe_rxring; + void * utun_kpipe_txring; + kern_pbufpool_t utun_kpipe_pp; + + kern_nexus_t utun_netif_nexus; + kern_pbufpool_t utun_netif_pp; + void * utun_netif_rxring; + void * utun_netif_txring; + uint64_t utun_netif_txring_size; + + u_int32_t utun_slot_size; + u_int32_t utun_netif_ring_size; + u_int32_t utun_tx_fsw_ring_size; + u_int32_t utun_rx_fsw_ring_size; + bool utun_use_netif; + bool utun_needs_netagent; #endif // UTUN_NEXUS }; /* Kernel Control functions */ -static errno_t utun_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo); -static errno_t utun_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo); -static errno_t utun_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, - void *unitinfo); -static errno_t utun_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, - void *unitinfo, mbuf_t m, int flags); -static errno_t utun_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t *len); -static errno_t utun_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t len); -static void utun_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int flags); +static errno_t utun_ctl_bind(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, + void **unitinfo); +static errno_t utun_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, + void **unitinfo); +static errno_t utun_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, + void *unitinfo); +static errno_t utun_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, + void *unitinfo, mbuf_t m, int flags); +static errno_t utun_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, + int opt, void *data, size_t *len); +static errno_t utun_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, + int opt, void *data, size_t len); +static void utun_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, + int flags); /* Network Interface functions */ static void utun_start(ifnet_t interface); -static errno_t utun_framer(ifnet_t interface, mbuf_t *packet, - const struct sockaddr *dest, const char *desk_linkaddr, - const char *frame_type, u_int32_t *prepend_len, u_int32_t *postpend_len); -static errno_t utun_output(ifnet_t interface, mbuf_t data); -static errno_t utun_demux(ifnet_t interface, mbuf_t data, char *frame_header, - protocol_family_t *protocol); -static errno_t utun_add_proto(ifnet_t interface, protocol_family_t protocol, - const struct ifnet_demux_desc *demux_array, - u_int32_t demux_count); -static errno_t utun_del_proto(ifnet_t interface, protocol_family_t protocol); -static errno_t utun_ioctl(ifnet_t interface, u_long cmd, void *data); -static void utun_detached(ifnet_t interface); +static errno_t utun_framer(ifnet_t interface, mbuf_t *packet, + const struct sockaddr *dest, const char *desk_linkaddr, + const char *frame_type, u_int32_t *prepend_len, u_int32_t *postpend_len); +static errno_t utun_output(ifnet_t interface, mbuf_t data); +static errno_t utun_demux(ifnet_t interface, mbuf_t data, char *frame_header, + protocol_family_t *protocol); +static errno_t utun_add_proto(ifnet_t interface, protocol_family_t protocol, + const struct ifnet_demux_desc *demux_array, + u_int32_t demux_count); +static errno_t utun_del_proto(ifnet_t interface, protocol_family_t protocol); +static errno_t utun_ioctl(ifnet_t interface, u_long cmd, void *data); +static void utun_detached(ifnet_t interface); /* Protocol handlers */ -static errno_t utun_attach_proto(ifnet_t interface, protocol_family_t proto); -static errno_t utun_proto_input(ifnet_t interface, protocol_family_t protocol, - mbuf_t m, char *frame_header); -static errno_t utun_proto_pre_output(ifnet_t interface, protocol_family_t protocol, - mbuf_t *packet, const struct sockaddr *dest, void *route, - char *frame_type, char *link_layer_dest); +static errno_t utun_attach_proto(ifnet_t interface, protocol_family_t proto); +static errno_t utun_proto_input(ifnet_t interface, protocol_family_t protocol, + mbuf_t m, char *frame_header); +static errno_t utun_proto_pre_output(ifnet_t interface, protocol_family_t protocol, + mbuf_t *packet, const struct sockaddr *dest, void *route, + char *frame_type, char *link_layer_dest); static errno_t utun_pkt_input(struct utun_pcb *pcb, mbuf_t m); #if UTUN_NEXUS @@ -164,7 +164,7 @@ static errno_t utun_pkt_input(struct utun_pcb *pcb, mbuf_t m); #define UTUN_IF_DEFAULT_RING_SIZE 64 #define UTUN_IF_DEFAULT_TX_FSW_RING_SIZE 64 #define UTUN_IF_DEFAULT_RX_FSW_RING_SIZE 128 -#define UTUN_IF_DEFAULT_BUF_SEG_SIZE skmem_usr_buf_seg_size +#define UTUN_IF_DEFAULT_BUF_SEG_SIZE skmem_usr_buf_seg_size #define UTUN_IF_HEADROOM_SIZE 32 #define UTUN_IF_MIN_RING_SIZE 16 @@ -185,11 +185,11 @@ SYSCTL_DECL(_net_utun); SYSCTL_NODE(_net, OID_AUTO, utun, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "UTun"); SYSCTL_PROC(_net_utun, OID_AUTO, ring_size, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - &if_utun_ring_size, UTUN_IF_DEFAULT_RING_SIZE, &sysctl_if_utun_ring_size, "I", ""); + &if_utun_ring_size, UTUN_IF_DEFAULT_RING_SIZE, &sysctl_if_utun_ring_size, "I", ""); SYSCTL_PROC(_net_utun, OID_AUTO, tx_fsw_ring_size, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - &if_utun_tx_fsw_ring_size, UTUN_IF_DEFAULT_TX_FSW_RING_SIZE, &sysctl_if_utun_tx_fsw_ring_size, "I", ""); + &if_utun_tx_fsw_ring_size, UTUN_IF_DEFAULT_TX_FSW_RING_SIZE, &sysctl_if_utun_tx_fsw_ring_size, "I", ""); SYSCTL_PROC(_net_utun, OID_AUTO, rx_fsw_ring_size, CTLTYPE_INT | CTLFLAG_LOCKED | CTLFLAG_RW, - &if_utun_rx_fsw_ring_size, UTUN_IF_DEFAULT_RX_FSW_RING_SIZE, &sysctl_if_utun_rx_fsw_ring_size, "I", ""); + &if_utun_rx_fsw_ring_size, UTUN_IF_DEFAULT_RX_FSW_RING_SIZE, &sysctl_if_utun_rx_fsw_ring_size, "I", ""); static errno_t utun_register_nexus(void); @@ -208,7 +208,7 @@ utun_netif_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_channel_t channel); static void utun_nexus_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel); + kern_channel_t channel); static void utun_nexus_disconnected(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kern_channel_t channel); @@ -230,8 +230,8 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, #define UTUN_DEFAULT_MTU 1500 #define UTUN_HEADER_SIZE(_pcb) (sizeof(u_int32_t) + (((_pcb)->utun_flags & UTUN_FLAGS_ENABLE_PROC_UUID) ? sizeof(uuid_t) : 0)) -static kern_ctl_ref utun_kctlref; -static u_int32_t utun_family; +static kern_ctl_ref utun_kctlref; +static u_int32_t utun_family; static lck_attr_t *utun_lck_attr; static lck_grp_attr_t *utun_lck_grp_attr; static lck_grp_t *utun_lck_grp; @@ -239,11 +239,11 @@ static lck_mtx_t utun_lock; TAILQ_HEAD(utun_list, utun_pcb) utun_head; -#define UTUN_PCB_ZONE_MAX 32 -#define UTUN_PCB_ZONE_NAME "net.if_utun" +#define UTUN_PCB_ZONE_MAX 32 +#define UTUN_PCB_ZONE_NAME "net.if_utun" -static unsigned int utun_pcb_size; /* size of zone element */ -static struct zone *utun_pcb_zone; /* zone for utun_pcb */ +static unsigned int utun_pcb_size; /* size of zone element */ +static struct zone *utun_pcb_zone; /* zone for utun_pcb */ #if UTUN_NEXUS @@ -255,17 +255,17 @@ sysctl_if_utun_ring_size SYSCTL_HANDLER_ARGS int error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) { - return (error); + return error; } if (value < UTUN_IF_MIN_RING_SIZE || - value > UTUN_IF_MAX_RING_SIZE) { - return (EINVAL); + value > UTUN_IF_MAX_RING_SIZE) { + return EINVAL; } if_utun_ring_size = value; - return (0); + return 0; } static int @@ -276,17 +276,17 @@ sysctl_if_utun_tx_fsw_ring_size SYSCTL_HANDLER_ARGS int error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) { - return (error); + return error; } if (value < UTUN_IF_MIN_RING_SIZE || - value > UTUN_IF_MAX_RING_SIZE) { - return (EINVAL); + value > UTUN_IF_MAX_RING_SIZE) { + return EINVAL; } if_utun_tx_fsw_ring_size = value; - return (0); + return 0; } static int @@ -297,23 +297,23 @@ sysctl_if_utun_rx_fsw_ring_size SYSCTL_HANDLER_ARGS int error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) { - return (error); + return error; } if (value < UTUN_IF_MIN_RING_SIZE || - value > UTUN_IF_MAX_RING_SIZE) { - return (EINVAL); + value > UTUN_IF_MAX_RING_SIZE) { + return EINVAL; } if_utun_rx_fsw_ring_size = value; - return (0); + return 0; } static errno_t utun_netif_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring, - void **ring_ctx) + kern_channel_t channel, kern_channel_ring_t ring, boolean_t is_tx_ring, + void **ring_ctx) { #pragma unused(nxprov) #pragma unused(channel) @@ -331,7 +331,7 @@ utun_netif_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static void utun_netif_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t ring) + kern_channel_ring_t ring) { #pragma unused(nxprov) struct utun_pcb *pcb = kern_nexus_get_context(nexus); @@ -344,7 +344,7 @@ utun_netif_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t utun_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t tx_ring, uint32_t flags) + kern_channel_ring_t tx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -417,20 +417,20 @@ utun_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, uint8_t vhl = *(uint8_t *)(tx_baddr + tx_offset); u_int ip_version = (vhl >> 4); switch (ip_version) { - case 4: { - af = AF_INET; - break; - } - case 6: { - af = AF_INET6; - break; - } - default: { - printf("utun_netif_sync_tx %s: unknown ip version %u vhl %u tx_offset %u len %u header_size %zu\n", - pcb->utun_ifp->if_xname, ip_version, vhl, tx_offset, tx_length, - UTUN_HEADER_SIZE(pcb)); - break; - } + case 4: { + af = AF_INET; + break; + } + case 6: { + af = AF_INET6; + break; + } + default: { + printf("utun_netif_sync_tx %s: unknown ip version %u vhl %u tx_offset %u len %u header_size %zu\n", + pcb->utun_ifp->if_xname, ip_version, vhl, tx_offset, tx_length, + UTUN_HEADER_SIZE(pcb)); + break; + } } tx_offset -= UTUN_HEADER_SIZE(pcb); @@ -498,7 +498,7 @@ utun_netif_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t utun_netif_tx_doorbell(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t ring, __unused uint32_t flags) + kern_channel_ring_t ring, __unused uint32_t flags) { #pragma unused(nxprov) struct utun_pcb *pcb = kern_nexus_get_context(nexus); @@ -521,7 +521,7 @@ utun_netif_tx_doorbell(kern_nexus_provider_t nxprov, kern_nexus_t nexus, if (pcb->utun_kpipe_enabled) { uint32_t tx_available = kern_channel_available_slot_count(ring); if (pcb->utun_netif_txring_size > 0 && - tx_available >= pcb->utun_netif_txring_size - 1) { + tx_available >= pcb->utun_netif_txring_size - 1) { // No room left in tx ring, disable output for now errno_t error = ifnet_disable_output(pcb->utun_ifp); if (error != 0) { @@ -545,12 +545,12 @@ utun_netif_tx_doorbell(kern_nexus_provider_t nxprov, kern_nexus_t nexus, kr_exit(ring); - return (0); + return 0; } static errno_t utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t rx_ring, uint32_t flags) + kern_channel_ring_t rx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -615,7 +615,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); printf("utun_netif_sync_rx %s: legacy packet length too short for header %zu < %zu\n", - pcb->utun_ifp->if_xname, length, header_offset); + pcb->utun_ifp->if_xname, length, header_offset); continue; } @@ -627,7 +627,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); printf("utun_netif_sync_rx %s: legacy packet length %zu > %u\n", - pcb->utun_ifp->if_xname, length, rx_pp->pp_buflet_size); + pcb->utun_ifp->if_xname, length, rx_pp->pp_buflet_size); continue; } @@ -641,7 +641,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Copy-in data from mbuf to buflet mbuf_copydata(data, header_offset, length, (void *)rx_baddr); - kern_packet_clear_flow_uuid(rx_ph); // Zero flow id + kern_packet_clear_flow_uuid(rx_ph); // Zero flow id // Finalize and attach the packet error = kern_buflet_set_data_offset(rx_buf, 0); @@ -737,12 +737,12 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); printf("utun_netif_sync_rx %s: packet length too short for header %u < %zu\n", - pcb->utun_ifp->if_xname, tx_length, header_offset); + pcb->utun_ifp->if_xname, tx_length, header_offset); continue; } size_t length = MIN(tx_length - header_offset, - pcb->utun_slot_size); + pcb->utun_slot_size); tx_ring_stats.kcrsi_slots_transferred++; tx_ring_stats.kcrsi_bytes_transferred += length; @@ -755,7 +755,7 @@ utun_netif_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Copy-in data from tx to rx memcpy((void *)rx_baddr, (void *)(tx_baddr + header_offset), length); - kern_packet_clear_flow_uuid(rx_ph); // Zero flow id + kern_packet_clear_flow_uuid(rx_ph); // Zero flow id // Finalize and attach the packet error = kern_buflet_set_data_offset(rx_buf, 0); @@ -808,8 +808,8 @@ done: static errno_t utun_nexus_ifattach(struct utun_pcb *pcb, - struct ifnet_init_eparams *init_params, - struct ifnet **ifp) + struct ifnet_init_eparams *init_params, + struct ifnet **ifp) { errno_t err; nexus_controller_t controller = kern_nexus_shared_controller(); @@ -818,7 +818,7 @@ utun_nexus_ifattach(struct utun_pcb *pcb, nexus_name_t provider_name; snprintf((char *)provider_name, sizeof(provider_name), - "com.apple.netif.%s", pcb->utun_if_xname); + "com.apple.netif.%s", pcb->utun_if_xname); struct kern_nexus_provider_init prov_init = { .nxpi_version = KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION, @@ -840,7 +840,7 @@ utun_nexus_ifattach(struct utun_pcb *pcb, err = kern_nexus_attr_create(&nxa); if (err != 0) { printf("%s: kern_nexus_attr_create failed: %d\n", - __func__, err); + __func__, err); goto failed; } @@ -857,13 +857,13 @@ utun_nexus_ifattach(struct utun_pcb *pcb, pcb->utun_netif_txring_size = ring_size; - bzero(&pp_init, sizeof (pp_init)); + bzero(&pp_init, sizeof(pp_init)); pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION; pp_init.kbi_packets = pcb->utun_netif_ring_size * 2; pp_init.kbi_bufsize = pcb->utun_slot_size; pp_init.kbi_buf_seg_size = UTUN_IF_DEFAULT_BUF_SEG_SIZE; pp_init.kbi_max_frags = 1; - (void) snprintf((char *)pp_init.kbi_name, sizeof (pp_init.kbi_name), + (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name), "%s", provider_name); err = kern_pbufpool_create(&pp_init, &pp_init, &pcb->utun_netif_pp, NULL); @@ -873,15 +873,15 @@ utun_nexus_ifattach(struct utun_pcb *pcb, } err = kern_nexus_controller_register_provider(controller, - utun_nx_dom_prov, - provider_name, - &prov_init, - sizeof(prov_init), - nxa, - &pcb->utun_nx.if_provider); + utun_nx_dom_prov, + provider_name, + &prov_init, + sizeof(prov_init), + nxa, + &pcb->utun_nx.if_provider); if (err != 0) { printf("%s register provider failed, error %d\n", - __func__, err); + __func__, err); goto failed; } @@ -893,16 +893,16 @@ utun_nexus_ifattach(struct utun_pcb *pcb, net_init.nxneti_prepare = utun_netif_prepare; net_init.nxneti_tx_pbufpool = pcb->utun_netif_pp; err = kern_nexus_controller_alloc_net_provider_instance(controller, - pcb->utun_nx.if_provider, - pcb, - &pcb->utun_nx.if_instance, - &net_init, - ifp); + pcb->utun_nx.if_provider, + pcb, + &pcb->utun_nx.if_instance, + &net_init, + ifp); if (err != 0) { printf("%s alloc_net_provider_instance failed, %d\n", - __func__, err); + __func__, err); kern_nexus_controller_deregister_provider(controller, - pcb->utun_nx.if_provider); + pcb->utun_nx.if_provider); uuid_clear(pcb->utun_nx.if_provider); goto failed; } @@ -915,27 +915,27 @@ failed: kern_pbufpool_destroy(pcb->utun_netif_pp); pcb->utun_netif_pp = NULL; } - return (err); + return err; } static void utun_detach_provider_and_instance(uuid_t provider, uuid_t instance) { nexus_controller_t controller = kern_nexus_shared_controller(); - errno_t err; + errno_t err; if (!uuid_is_null(instance)) { err = kern_nexus_controller_free_provider_instance(controller, - instance); + instance); if (err != 0) { printf("%s free_provider_instance failed %d\n", - __func__, err); + __func__, err); } uuid_clear(instance); } if (!uuid_is_null(provider)) { err = kern_nexus_controller_deregister_provider(controller, - provider); + provider); if (err != 0) { printf("%s deregister_provider %d\n", __func__, err); } @@ -949,66 +949,65 @@ utun_nexus_detach(struct utun_pcb *pcb) { utun_nx_t nx = &pcb->utun_nx; nexus_controller_t controller = kern_nexus_shared_controller(); - errno_t err; + errno_t err; if (!uuid_is_null(nx->ms_host)) { err = kern_nexus_ifdetach(controller, - nx->ms_instance, - nx->ms_host); + nx->ms_instance, + nx->ms_host); if (err != 0) { printf("%s: kern_nexus_ifdetach ms host failed %d\n", - __func__, err); + __func__, err); } } if (!uuid_is_null(nx->ms_device)) { err = kern_nexus_ifdetach(controller, - nx->ms_instance, - nx->ms_device); + nx->ms_instance, + nx->ms_device); if (err != 0) { printf("%s: kern_nexus_ifdetach ms device failed %d\n", - __func__, err); + __func__, err); } } utun_detach_provider_and_instance(nx->if_provider, - nx->if_instance); + nx->if_instance); utun_detach_provider_and_instance(nx->ms_provider, - nx->ms_instance); + nx->ms_instance); if (pcb->utun_netif_pp != NULL) { kern_pbufpool_destroy(pcb->utun_netif_pp); pcb->utun_netif_pp = NULL; - } memset(nx, 0, sizeof(*nx)); } static errno_t utun_create_fs_provider_and_instance(struct utun_pcb *pcb, - uint32_t subtype, const char *type_name, - const char *ifname, - uuid_t *provider, uuid_t *instance) + uint32_t subtype, const char *type_name, + const char *ifname, + uuid_t *provider, uuid_t *instance) { nexus_attr_t attr = NULL; nexus_controller_t controller = kern_nexus_shared_controller(); uuid_t dom_prov; errno_t err; struct kern_nexus_init init; - nexus_name_t provider_name; + nexus_name_t provider_name; err = kern_nexus_get_builtin_domain_provider(NEXUS_TYPE_FLOW_SWITCH, - &dom_prov); + &dom_prov); if (err != 0) { printf("%s can't get %s provider, error %d\n", - __func__, type_name, err); + __func__, type_name, err); goto failed; } err = kern_nexus_attr_create(&attr); if (err != 0) { printf("%s: kern_nexus_attr_create failed: %d\n", - __func__, err); + __func__, err); goto failed; } @@ -1028,36 +1027,36 @@ utun_create_fs_provider_and_instance(struct utun_pcb *pcb, VERIFY(err == 0); snprintf((char *)provider_name, sizeof(provider_name), - "com.apple.%s.%s", type_name, ifname); + "com.apple.%s.%s", type_name, ifname); err = kern_nexus_controller_register_provider(controller, - dom_prov, - provider_name, - NULL, - 0, - attr, - provider); + dom_prov, + provider_name, + NULL, + 0, + attr, + provider); kern_nexus_attr_destroy(attr); attr = NULL; if (err != 0) { printf("%s register %s provider failed, error %d\n", - __func__, type_name, err); + __func__, type_name, err); goto failed; } - bzero(&init, sizeof (init)); + bzero(&init, sizeof(init)); init.nxi_version = KERN_NEXUS_CURRENT_VERSION; err = kern_nexus_controller_alloc_provider_instance(controller, - *provider, - NULL, - instance, &init); + *provider, + NULL, + instance, &init); if (err != 0) { printf("%s alloc_provider_instance %s failed, %d\n", - __func__, type_name, err); + __func__, type_name, err); kern_nexus_controller_deregister_provider(controller, - *provider); + *provider); uuid_clear(*provider); } failed: - return (err); + return err; } static errno_t @@ -1069,21 +1068,21 @@ utun_multistack_attach(struct utun_pcb *pcb) // Allocate multistack flowswitch err = utun_create_fs_provider_and_instance(pcb, - NEXUS_EXTENSION_FSW_TYPE_MULTISTACK, - "multistack", - pcb->utun_ifp->if_xname, - &nx->ms_provider, - &nx->ms_instance); + NEXUS_EXTENSION_FSW_TYPE_MULTISTACK, + "multistack", + pcb->utun_ifp->if_xname, + &nx->ms_provider, + &nx->ms_instance); if (err != 0) { printf("%s: failed to create bridge provider and instance\n", - __func__); + __func__); goto failed; } // Attach multistack to device port err = kern_nexus_ifattach(controller, nx->ms_instance, - NULL, nx->if_instance, - FALSE, &nx->ms_device); + NULL, nx->if_instance, + FALSE, &nx->ms_device); if (err != 0) { printf("%s kern_nexus_ifattach ms device %d\n", __func__, err); goto failed; @@ -1091,8 +1090,8 @@ utun_multistack_attach(struct utun_pcb *pcb) // Attach multistack to host port err = kern_nexus_ifattach(controller, nx->ms_instance, - NULL, nx->if_instance, - TRUE, &nx->ms_host); + NULL, nx->if_instance, + TRUE, &nx->ms_host); if (err != 0) { printf("%s kern_nexus_ifattach ms host %d\n", __func__, err); goto failed; @@ -1119,7 +1118,7 @@ utun_multistack_attach(struct utun_pcb *pcb) printf("utun_multistack_attach - unable to find multistack nexus\n"); } - return (0); + return 0; failed: utun_nexus_detach(pcb); @@ -1129,8 +1128,8 @@ failed: panic("utun_multistack_attach - ifnet_detach failed: %d\n", detach_error); /* NOT REACHED */ } - - return (err); + + return err; } static errno_t @@ -1148,7 +1147,7 @@ utun_register_kernel_pipe_nexus(void) result = kern_nexus_controller_create(&utun_ncd); if (result) { printf("%s: kern_nexus_controller_create failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -1157,7 +1156,7 @@ utun_register_kernel_pipe_nexus(void) NEXUS_TYPE_KERNEL_PIPE, &dom_prov); if (result) { printf("%s: kern_nexus_get_builtin_domain_provider failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -1180,7 +1179,7 @@ utun_register_kernel_pipe_nexus(void) result = kern_nexus_attr_create(&nxa); if (result) { printf("%s: kern_nexus_attr_create failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -1196,15 +1195,15 @@ utun_register_kernel_pipe_nexus(void) VERIFY(result == 0); result = kern_nexus_controller_register_provider(utun_ncd, - dom_prov, - (const uint8_t *)"com.apple.nexus.utun.kpipe", - &prov_init, - sizeof(prov_init), - nxa, - &utun_kpipe_uuid); + dom_prov, + (const uint8_t *)"com.apple.nexus.utun.kpipe", + &prov_init, + sizeof(prov_init), + nxa, + &utun_kpipe_uuid); if (result) { printf("%s: kern_nexus_controller_register_provider failed: %d\n", - __FUNCTION__, result); + __FUNCTION__, result); goto done; } @@ -1314,14 +1313,14 @@ utun_enable_channel(struct utun_pcb *pcb, struct proc *proc) goto done; } - bzero(&pp_init, sizeof (pp_init)); + bzero(&pp_init, sizeof(pp_init)); pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION; pp_init.kbi_packets = pcb->utun_netif_ring_size * 2; pp_init.kbi_bufsize = pcb->utun_slot_size; pp_init.kbi_buf_seg_size = UTUN_IF_DEFAULT_BUF_SEG_SIZE; pp_init.kbi_max_frags = 1; pp_init.kbi_flags |= KBIF_QUANTUM; - (void) snprintf((char *)pp_init.kbi_name, sizeof (pp_init.kbi_name), + (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name), "com.apple.kpipe.%s", pcb->utun_if_xname); result = kern_pbufpool_create(&pp_init, &pp_init, &pcb->utun_kpipe_pp, @@ -1332,22 +1331,22 @@ utun_enable_channel(struct utun_pcb *pcb, struct proc *proc) } VERIFY(uuid_is_null(pcb->utun_kpipe_uuid)); - bzero(&init, sizeof (init)); + bzero(&init, sizeof(init)); init.nxi_version = KERN_NEXUS_CURRENT_VERSION; init.nxi_tx_pbufpool = pcb->utun_kpipe_pp; result = kern_nexus_controller_alloc_provider_instance(utun_ncd, - utun_kpipe_uuid, pcb, &pcb->utun_kpipe_uuid, &init); + utun_kpipe_uuid, pcb, &pcb->utun_kpipe_uuid, &init); if (result) { goto done; } nexus_port_t port = NEXUS_PORT_KERNEL_PIPE_CLIENT; result = kern_nexus_controller_bind_provider_instance(utun_ncd, - pcb->utun_kpipe_uuid, &port, - proc_pid(proc), NULL, NULL, 0, NEXUS_BIND_PID); + pcb->utun_kpipe_uuid, &port, + proc_pid(proc), NULL, NULL, 0, NEXUS_BIND_PID); if (result) { kern_nexus_controller_free_provider_instance(utun_ncd, - pcb->utun_kpipe_uuid); + pcb->utun_kpipe_uuid); uuid_clear(pcb->utun_kpipe_uuid); goto done; } @@ -1375,7 +1374,7 @@ utun_register_control(void) { struct kern_ctl_reg kern_ctl; errno_t result = 0; - + /* Find a unique value for our interface family */ result = mbuf_tag_id_find(UTUN_CONTROL_NAME, &utun_family); if (result != 0) { @@ -1385,8 +1384,8 @@ utun_register_control(void) utun_pcb_size = sizeof(struct utun_pcb); utun_pcb_zone = zinit(utun_pcb_size, - UTUN_PCB_ZONE_MAX * utun_pcb_size, - 0, UTUN_PCB_ZONE_NAME); + UTUN_PCB_ZONE_MAX * utun_pcb_size, + 0, UTUN_PCB_ZONE_NAME); if (utun_pcb_zone == NULL) { printf("utun_register_control - zinit(utun_pcb) failed"); return ENOMEM; @@ -1397,7 +1396,7 @@ utun_register_control(void) #endif // UTUN_NEXUS TAILQ_INIT(&utun_head); - + bzero(&kern_ctl, sizeof(kern_ctl)); strlcpy(kern_ctl.ctl_name, UTUN_CONTROL_NAME, sizeof(kern_ctl.ctl_name)); kern_ctl.ctl_name[sizeof(kern_ctl.ctl_name) - 1] = 0; @@ -1417,29 +1416,29 @@ utun_register_control(void) printf("utun_register_control - ctl_register failed: %d\n", result); return result; } - + /* Register the protocol plumbers */ if ((result = proto_register_plumber(PF_INET, utun_family, - utun_attach_proto, NULL)) != 0) { + utun_attach_proto, NULL)) != 0) { printf("utun_register_control - proto_register_plumber(PF_INET, %d) failed: %d\n", - utun_family, result); + utun_family, result); ctl_deregister(utun_kctlref); return result; } - + /* Register the protocol plumbers */ if ((result = proto_register_plumber(PF_INET6, utun_family, - utun_attach_proto, NULL)) != 0) { + utun_attach_proto, NULL)) != 0) { proto_unregister_plumber(PF_INET, utun_family); ctl_deregister(utun_kctlref); printf("utun_register_control - proto_register_plumber(PF_INET6, %d) failed: %d\n", - utun_family, result); + utun_family, result); return result; } utun_lck_attr = lck_attr_alloc_init(); utun_lck_grp_attr = lck_grp_attr_alloc_init(); - utun_lck_grp = lck_grp_alloc_init("utun", utun_lck_grp_attr); + utun_lck_grp = lck_grp_alloc_init("utun", utun_lck_grp_attr); lck_mtx_init(&utun_lock, utun_lck_grp, utun_lck_attr); @@ -1466,8 +1465,8 @@ utun_free_pcb(struct utun_pcb *pcb, bool in_list) static errno_t utun_ctl_bind(kern_ctl_ref kctlref, - struct sockaddr_ctl *sac, - void **unitinfo) + struct sockaddr_ctl *sac, + void **unitinfo) { struct utun_pcb *pcb = zalloc(utun_pcb_zone); memset(pcb, 0, sizeof(*pcb)); @@ -1488,13 +1487,13 @@ utun_ctl_bind(kern_ctl_ref kctlref, lck_mtx_init(&pcb->utun_input_chain_lock, utun_lck_grp, utun_lck_attr); lck_rw_init(&pcb->utun_pcb_lock, utun_lck_grp, utun_lck_attr); - return (0); + return 0; } static errno_t utun_ctl_connect(kern_ctl_ref kctlref, - struct sockaddr_ctl *sac, - void **unitinfo) + struct sockaddr_ctl *sac, + void **unitinfo) { struct ifnet_init_eparams utun_init = {}; errno_t result = 0; @@ -1502,7 +1501,7 @@ utun_ctl_connect(kern_ctl_ref kctlref, if (*unitinfo == NULL) { (void)utun_ctl_bind(kctlref, sac, unitinfo); } - + struct utun_pcb *pcb = *unitinfo; lck_mtx_lock(&utun_lock); @@ -1549,7 +1548,7 @@ utun_ctl_connect(kern_ctl_ref kctlref, /* Create the interface */ bzero(&utun_init, sizeof(utun_init)); utun_init.ver = IFNET_INIT_CURRENT_VERSION; - utun_init.len = sizeof (utun_init); + utun_init.len = sizeof(utun_init); #if UTUN_NEXUS if (pcb->utun_use_netif) { @@ -1632,7 +1631,7 @@ utun_ctl_connect(kern_ctl_ref kctlref, ifnet_release(pcb->utun_ifp); utun_free_pcb(pcb, true); *unitinfo = NULL; - return (result); + return result; } /* Attach to bpf */ @@ -1647,48 +1646,48 @@ utun_ctl_connect(kern_ctl_ref kctlref, static errno_t utun_detach_ip(ifnet_t interface, - protocol_family_t protocol, - socket_t pf_socket) + protocol_family_t protocol, + socket_t pf_socket) { errno_t result = EPROTONOSUPPORT; - + /* Attempt a detach */ if (protocol == PF_INET) { - struct ifreq ifr; - + struct ifreq ifr; + bzero(&ifr, sizeof(ifr)); snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); - + ifnet_name(interface), ifnet_unit(interface)); + result = sock_ioctl(pf_socket, SIOCPROTODETACH, &ifr); } else if (protocol == PF_INET6) { - struct in6_ifreq ifr6; - + struct in6_ifreq ifr6; + bzero(&ifr6, sizeof(ifr6)); snprintf(ifr6.ifr_name, sizeof(ifr6.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); - + ifnet_name(interface), ifnet_unit(interface)); + result = sock_ioctl(pf_socket, SIOCPROTODETACH_IN6, &ifr6); } - + return result; } static void utun_remove_address(ifnet_t interface, - protocol_family_t protocol, - ifaddr_t address, - socket_t pf_socket) + protocol_family_t protocol, + ifaddr_t address, + socket_t pf_socket) { errno_t result = 0; - + /* Attempt a detach */ if (protocol == PF_INET) { struct ifreq ifr; - + bzero(&ifr, sizeof(ifr)); snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); + ifnet_name(interface), ifnet_unit(interface)); result = ifaddr_address(address, &ifr.ifr_addr, sizeof(ifr.ifr_addr)); if (result != 0) { printf("utun_remove_address - ifaddr_address failed: %d", result); @@ -1700,20 +1699,20 @@ utun_remove_address(ifnet_t interface, } } else if (protocol == PF_INET6) { struct in6_ifreq ifr6; - + bzero(&ifr6, sizeof(ifr6)); snprintf(ifr6.ifr_name, sizeof(ifr6.ifr_name), "%s%d", - ifnet_name(interface), ifnet_unit(interface)); + ifnet_name(interface), ifnet_unit(interface)); result = ifaddr_address(address, (struct sockaddr*)&ifr6.ifr_addr, - sizeof(ifr6.ifr_addr)); + sizeof(ifr6.ifr_addr)); if (result != 0) { printf("utun_remove_address - ifaddr_address failed (v6): %d", - result); + result); } else { result = sock_ioctl(pf_socket, SIOCDIFADDR_IN6, &ifr6); if (result != 0) { printf("utun_remove_address - SIOCDIFADDR_IN6 failed: %d", - result); + result); } } } @@ -1721,29 +1720,30 @@ utun_remove_address(ifnet_t interface, static void utun_cleanup_family(ifnet_t interface, - protocol_family_t protocol) + protocol_family_t protocol) { errno_t result = 0; socket_t pf_socket = NULL; ifaddr_t *addresses = NULL; int i; - + if (protocol != PF_INET && protocol != PF_INET6) { printf("utun_cleanup_family - invalid protocol family %d\n", protocol); return; } - + /* Create a socket for removing addresses and detaching the protocol */ result = sock_socket(protocol, SOCK_DGRAM, 0, NULL, NULL, &pf_socket); if (result != 0) { - if (result != EAFNOSUPPORT) + if (result != EAFNOSUPPORT) { printf("utun_cleanup_family - failed to create %s socket: %d\n", - protocol == PF_INET ? "IP" : "IPv6", result); + protocol == PF_INET ? "IP" : "IPv6", result); + } goto cleanup; } - - /* always set SS_PRIV, we want to close and detach regardless */ - sock_setpriv(pf_socket, 1); + + /* always set SS_PRIV, we want to close and detach regardless */ + sock_setpriv(pf_socket, 1); result = utun_detach_ip(interface, protocol, pf_socket); if (result == 0 || result == ENXIO) { @@ -1754,7 +1754,7 @@ utun_cleanup_family(ifnet_t interface, printf("utun_cleanup_family - utun_detach_ip failed: %d\n", result); goto cleanup; } - + /* * At this point, we received an EBUSY error. This means there are * addresses attached. We should detach them and then try again. @@ -1762,17 +1762,17 @@ utun_cleanup_family(ifnet_t interface, result = ifnet_get_address_list_family(interface, &addresses, protocol); if (result != 0) { printf("fnet_get_address_list_family(%s%d, 0xblah, %s) - failed: %d\n", - ifnet_name(interface), ifnet_unit(interface), - protocol == PF_INET ? "PF_INET" : "PF_INET6", result); + ifnet_name(interface), ifnet_unit(interface), + protocol == PF_INET ? "PF_INET" : "PF_INET6", result); goto cleanup; } - + for (i = 0; addresses[i] != 0; i++) { utun_remove_address(interface, protocol, addresses[i], pf_socket); } ifnet_free_address_list(addresses); addresses = NULL; - + /* * The addresses should be gone, we should try the remove again. */ @@ -1780,12 +1780,12 @@ utun_cleanup_family(ifnet_t interface, if (result != 0 && result != ENXIO) { printf("utun_cleanup_family - utun_detach_ip failed: %d\n", result); } - + cleanup: if (pf_socket != NULL) { sock_close(pf_socket); } - + if (addresses != NULL) { ifnet_free_address_list(addresses); } @@ -1793,10 +1793,10 @@ cleanup: static errno_t utun_ctl_disconnect(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo) + __unused u_int32_t unit, + void *unitinfo) { - struct utun_pcb *pcb = unitinfo; + struct utun_pcb *pcb = unitinfo; ifnet_t ifp = NULL; errno_t result = 0; @@ -1902,25 +1902,25 @@ utun_ctl_disconnect(__unused kern_ctl_ref kctlref, lck_rw_unlock_exclusive(&pcb->utun_pcb_lock); utun_free_pcb(pcb, false); } - + return 0; } static errno_t utun_ctl_send(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo, - mbuf_t m, - __unused int flags) + __unused u_int32_t unit, + void *unitinfo, + mbuf_t m, + __unused int flags) { /* - * The userland ABI requires the first four bytes have the protocol family + * The userland ABI requires the first four bytes have the protocol family * in network byte order: swap them */ if (m_pktlen(m) >= (int32_t)UTUN_HEADER_SIZE((struct utun_pcb *)unitinfo)) { *(protocol_family_t *)mbuf_data(m) = ntohl(*(protocol_family_t *)mbuf_data(m)); } else { - printf("%s - unexpected short mbuf pkt len %d\n", __func__, m_pktlen(m) ); + printf("%s - unexpected short mbuf pkt len %d\n", __func__, m_pktlen(m)); } return utun_pkt_input((struct utun_pcb *)unitinfo, m); @@ -1928,267 +1928,269 @@ utun_ctl_send(__unused kern_ctl_ref kctlref, static errno_t utun_ctl_setopt(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo, - int opt, - void *data, - size_t len) + __unused u_int32_t unit, + void *unitinfo, + int opt, + void *data, + size_t len) { struct utun_pcb *pcb = unitinfo; errno_t result = 0; /* check for privileges for privileged options */ switch (opt) { - case UTUN_OPT_FLAGS: - case UTUN_OPT_EXT_IFDATA_STATS: - case UTUN_OPT_SET_DELEGATE_INTERFACE: - if (kauth_cred_issuser(kauth_cred_get()) == 0) { - return EPERM; - } - break; + case UTUN_OPT_FLAGS: + case UTUN_OPT_EXT_IFDATA_STATS: + case UTUN_OPT_SET_DELEGATE_INTERFACE: + if (kauth_cred_issuser(kauth_cred_get()) == 0) { + return EPERM; + } + break; } switch (opt) { - case UTUN_OPT_FLAGS: - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } + case UTUN_OPT_FLAGS: + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + if (pcb->utun_ifp == NULL) { + // Only can set after connecting + result = EINVAL; + break; + } #if UTUN_NEXUS - if (pcb->utun_use_netif) { - pcb->utun_flags = *(u_int32_t *)data; - } else + if (pcb->utun_use_netif) { + pcb->utun_flags = *(u_int32_t *)data; + } else #endif // UTUN_NEXUS - { - u_int32_t old_flags = pcb->utun_flags; - pcb->utun_flags = *(u_int32_t *)data; - if (((old_flags ^ pcb->utun_flags) & UTUN_FLAGS_ENABLE_PROC_UUID)) { - // If UTUN_FLAGS_ENABLE_PROC_UUID flag changed, update bpf - bpfdetach(pcb->utun_ifp); - bpfattach(pcb->utun_ifp, DLT_NULL, UTUN_HEADER_SIZE(pcb)); - } + { + u_int32_t old_flags = pcb->utun_flags; + pcb->utun_flags = *(u_int32_t *)data; + if (((old_flags ^ pcb->utun_flags) & UTUN_FLAGS_ENABLE_PROC_UUID)) { + // If UTUN_FLAGS_ENABLE_PROC_UUID flag changed, update bpf + bpfdetach(pcb->utun_ifp); + bpfattach(pcb->utun_ifp, DLT_NULL, UTUN_HEADER_SIZE(pcb)); } } + } + break; + + case UTUN_OPT_EXT_IFDATA_STATS: + if (len != sizeof(int)) { + result = EMSGSIZE; + break; + } + if (pcb->utun_ifp == NULL) { + // Only can set after connecting + result = EINVAL; break; + } + pcb->utun_ext_ifdata_stats = (*(int *)data) ? 1 : 0; + break; - case UTUN_OPT_EXT_IFDATA_STATS: - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - pcb->utun_ext_ifdata_stats = (*(int *)data) ? 1 : 0; + case UTUN_OPT_INC_IFDATA_STATS_IN: + case UTUN_OPT_INC_IFDATA_STATS_OUT: { + struct utun_stats_param *utsp = (struct utun_stats_param *)data; + + if (utsp == NULL || len < sizeof(struct utun_stats_param)) { + result = EINVAL; break; - - case UTUN_OPT_INC_IFDATA_STATS_IN: - case UTUN_OPT_INC_IFDATA_STATS_OUT: { - struct utun_stats_param *utsp = (struct utun_stats_param *)data; - - if (utsp == NULL || len < sizeof(struct utun_stats_param)) { - result = EINVAL; - break; - } - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (!pcb->utun_ext_ifdata_stats) { - result = EINVAL; - break; - } - if (opt == UTUN_OPT_INC_IFDATA_STATS_IN) - ifnet_stat_increment_in(pcb->utun_ifp, utsp->utsp_packets, - utsp->utsp_bytes, utsp->utsp_errors); - else - ifnet_stat_increment_out(pcb->utun_ifp, utsp->utsp_packets, - utsp->utsp_bytes, utsp->utsp_errors); + } + if (pcb->utun_ifp == NULL) { + // Only can set after connecting + result = EINVAL; + break; + } + if (!pcb->utun_ext_ifdata_stats) { + result = EINVAL; break; } - case UTUN_OPT_SET_DELEGATE_INTERFACE: { - ifnet_t del_ifp = NULL; - char name[IFNAMSIZ]; + if (opt == UTUN_OPT_INC_IFDATA_STATS_IN) { + ifnet_stat_increment_in(pcb->utun_ifp, utsp->utsp_packets, + utsp->utsp_bytes, utsp->utsp_errors); + } else { + ifnet_stat_increment_out(pcb->utun_ifp, utsp->utsp_packets, + utsp->utsp_bytes, utsp->utsp_errors); + } + break; + } + case UTUN_OPT_SET_DELEGATE_INTERFACE: { + ifnet_t del_ifp = NULL; + char name[IFNAMSIZ]; - if (len > IFNAMSIZ - 1) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (len != 0) { /* if len==0, del_ifp will be NULL causing the delegate to be removed */ - bcopy(data, name, len); - name[len] = 0; - result = ifnet_find_by_name(name, &del_ifp); - } - if (result == 0) { - result = ifnet_set_delegate(pcb->utun_ifp, del_ifp); - if (del_ifp) - ifnet_release(del_ifp); - } + if (len > IFNAMSIZ - 1) { + result = EMSGSIZE; break; } - case UTUN_OPT_MAX_PENDING_PACKETS: { - u_int32_t max_pending_packets = 0; - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - max_pending_packets = *(u_int32_t *)data; - if (max_pending_packets == 0) { - result = EINVAL; - break; + if (pcb->utun_ifp == NULL) { + // Only can set after connecting + result = EINVAL; + break; + } + if (len != 0) { /* if len==0, del_ifp will be NULL causing the delegate to be removed */ + bcopy(data, name, len); + name[len] = 0; + result = ifnet_find_by_name(name, &del_ifp); + } + if (result == 0) { + result = ifnet_set_delegate(pcb->utun_ifp, del_ifp); + if (del_ifp) { + ifnet_release(del_ifp); } - pcb->utun_max_pending_packets = max_pending_packets; + } + break; + } + case UTUN_OPT_MAX_PENDING_PACKETS: { + u_int32_t max_pending_packets = 0; + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + max_pending_packets = *(u_int32_t *)data; + if (max_pending_packets == 0) { + result = EINVAL; break; } + pcb->utun_max_pending_packets = max_pending_packets; + break; + } #if UTUN_NEXUS - case UTUN_OPT_ENABLE_CHANNEL: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (*(int *)data) { - result = utun_enable_channel(pcb, current_proc()); - } else { - result = utun_disable_channel(pcb); - } + case UTUN_OPT_ENABLE_CHANNEL: { + if (len != sizeof(int)) { + result = EMSGSIZE; + break; + } + if (pcb->utun_ifp == NULL) { + // Only can set after connecting + result = EINVAL; + break; + } + if (*(int *)data) { + result = utun_enable_channel(pcb, current_proc()); + } else { + result = utun_disable_channel(pcb); + } + break; + } + case UTUN_OPT_ENABLE_FLOWSWITCH: { + if (len != sizeof(int)) { + result = EMSGSIZE; + break; + } + if (pcb->utun_ifp == NULL) { + // Only can set after connecting + result = EINVAL; + break; + } + if (!if_is_netagent_enabled()) { + result = ENOTSUP; + break; + } + if (uuid_is_null(pcb->utun_nx.ms_agent)) { + result = ENOENT; break; } - case UTUN_OPT_ENABLE_FLOWSWITCH: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp == NULL) { - // Only can set after connecting - result = EINVAL; - break; - } - if (!if_is_netagent_enabled()) { - result = ENOTSUP; - break; - } - if (uuid_is_null(pcb->utun_nx.ms_agent)) { - result = ENOENT; - break; - } - if (*(int *)data) { - if_add_netagent(pcb->utun_ifp, pcb->utun_nx.ms_agent); - pcb->utun_needs_netagent = true; - } else { - pcb->utun_needs_netagent = false; - if_delete_netagent(pcb->utun_ifp, pcb->utun_nx.ms_agent); - } + if (*(int *)data) { + if_add_netagent(pcb->utun_ifp, pcb->utun_nx.ms_agent); + pcb->utun_needs_netagent = true; + } else { + pcb->utun_needs_netagent = false; + if_delete_netagent(pcb->utun_ifp, pcb->utun_nx.ms_agent); + } + break; + } + case UTUN_OPT_ENABLE_NETIF: { + if (len != sizeof(int)) { + result = EMSGSIZE; break; } - case UTUN_OPT_ENABLE_NETIF: { - if (len != sizeof(int)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - lck_rw_lock_exclusive(&pcb->utun_pcb_lock); - pcb->utun_use_netif = !!(*(int *)data); - lck_rw_unlock_exclusive(&pcb->utun_pcb_lock); + if (pcb->utun_ifp != NULL) { + // Only can set before connecting + result = EINVAL; break; } - case UTUN_OPT_SLOT_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t slot_size = *(u_int32_t *)data; - if (slot_size < UTUN_IF_MIN_SLOT_SIZE || - slot_size > UTUN_IF_MAX_SLOT_SIZE) { - return (EINVAL); - } - pcb->utun_slot_size = slot_size; + lck_rw_lock_exclusive(&pcb->utun_pcb_lock); + pcb->utun_use_netif = !!(*(int *)data); + lck_rw_unlock_exclusive(&pcb->utun_pcb_lock); + break; + } + case UTUN_OPT_SLOT_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; break; } - case UTUN_OPT_NETIF_RING_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t ring_size = *(u_int32_t *)data; - if (ring_size < UTUN_IF_MIN_RING_SIZE || - ring_size > UTUN_IF_MAX_RING_SIZE) { - return (EINVAL); - } - pcb->utun_netif_ring_size = ring_size; + if (pcb->utun_ifp != NULL) { + // Only can set before connecting + result = EINVAL; break; } - case UTUN_OPT_TX_FSW_RING_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t ring_size = *(u_int32_t *)data; - if (ring_size < UTUN_IF_MIN_RING_SIZE || - ring_size > UTUN_IF_MAX_RING_SIZE) { - return (EINVAL); - } - pcb->utun_tx_fsw_ring_size = ring_size; + u_int32_t slot_size = *(u_int32_t *)data; + if (slot_size < UTUN_IF_MIN_SLOT_SIZE || + slot_size > UTUN_IF_MAX_SLOT_SIZE) { + return EINVAL; + } + pcb->utun_slot_size = slot_size; + break; + } + case UTUN_OPT_NETIF_RING_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; break; } - case UTUN_OPT_RX_FSW_RING_SIZE: { - if (len != sizeof(u_int32_t)) { - result = EMSGSIZE; - break; - } - if (pcb->utun_ifp != NULL) { - // Only can set before connecting - result = EINVAL; - break; - } - u_int32_t ring_size = *(u_int32_t *)data; - if (ring_size < UTUN_IF_MIN_RING_SIZE || - ring_size > UTUN_IF_MAX_RING_SIZE) { - return (EINVAL); - } - pcb->utun_rx_fsw_ring_size = ring_size; + if (pcb->utun_ifp != NULL) { + // Only can set before connecting + result = EINVAL; break; } -#endif // UTUN_NEXUS - default: { - result = ENOPROTOOPT; + u_int32_t ring_size = *(u_int32_t *)data; + if (ring_size < UTUN_IF_MIN_RING_SIZE || + ring_size > UTUN_IF_MAX_RING_SIZE) { + return EINVAL; + } + pcb->utun_netif_ring_size = ring_size; + break; + } + case UTUN_OPT_TX_FSW_RING_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + if (pcb->utun_ifp != NULL) { + // Only can set before connecting + result = EINVAL; break; } + u_int32_t ring_size = *(u_int32_t *)data; + if (ring_size < UTUN_IF_MIN_RING_SIZE || + ring_size > UTUN_IF_MAX_RING_SIZE) { + return EINVAL; + } + pcb->utun_tx_fsw_ring_size = ring_size; + break; + } + case UTUN_OPT_RX_FSW_RING_SIZE: { + if (len != sizeof(u_int32_t)) { + result = EMSGSIZE; + break; + } + if (pcb->utun_ifp != NULL) { + // Only can set before connecting + result = EINVAL; + break; + } + u_int32_t ring_size = *(u_int32_t *)data; + if (ring_size < UTUN_IF_MIN_RING_SIZE || + ring_size > UTUN_IF_MAX_RING_SIZE) { + return EINVAL; + } + pcb->utun_rx_fsw_ring_size = ring_size; + break; + } +#endif // UTUN_NEXUS + default: { + result = ENOPROTOOPT; + break; + } } return result; @@ -2196,137 +2198,137 @@ utun_ctl_setopt(__unused kern_ctl_ref kctlref, static errno_t utun_ctl_getopt(__unused kern_ctl_ref kctlref, - __unused u_int32_t unit, - void *unitinfo, - int opt, - void *data, - size_t *len) + __unused u_int32_t unit, + void *unitinfo, + int opt, + void *data, + size_t *len) { struct utun_pcb *pcb = unitinfo; errno_t result = 0; - + switch (opt) { - case UTUN_OPT_FLAGS: - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->utun_flags; - } - break; + case UTUN_OPT_FLAGS: + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->utun_flags; + } + break; - case UTUN_OPT_EXT_IFDATA_STATS: - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - *(int *)data = (pcb->utun_ext_ifdata_stats) ? 1 : 0; - } - break; - - case UTUN_OPT_IFNAME: - if (*len < MIN(strlen(pcb->utun_if_xname) + 1, sizeof(pcb->utun_if_xname))) { - result = EMSGSIZE; - } else { - if (pcb->utun_ifp == NULL) { - // Only can get after connecting - result = EINVAL; - break; - } - *len = snprintf(data, *len, "%s", pcb->utun_if_xname) + 1; - } - break; + case UTUN_OPT_EXT_IFDATA_STATS: + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + *(int *)data = (pcb->utun_ext_ifdata_stats) ? 1 : 0; + } + break; - case UTUN_OPT_MAX_PENDING_PACKETS: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *((u_int32_t *)data) = pcb->utun_max_pending_packets; + case UTUN_OPT_IFNAME: + if (*len < MIN(strlen(pcb->utun_if_xname) + 1, sizeof(pcb->utun_if_xname))) { + result = EMSGSIZE; + } else { + if (pcb->utun_ifp == NULL) { + // Only can get after connecting + result = EINVAL; + break; } - break; + *len = snprintf(data, *len, "%s", pcb->utun_if_xname) + 1; } + break; -#if UTUN_NEXUS - case UTUN_OPT_ENABLE_CHANNEL: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - lck_rw_lock_shared(&pcb->utun_pcb_lock); - *(int *)data = pcb->utun_kpipe_enabled; - lck_rw_unlock_shared(&pcb->utun_pcb_lock); - } - break; + case UTUN_OPT_MAX_PENDING_PACKETS: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *((u_int32_t *)data) = pcb->utun_max_pending_packets; } + break; + } - case UTUN_OPT_ENABLE_FLOWSWITCH: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - *(int *)data = if_check_netagent(pcb->utun_ifp, pcb->utun_nx.ms_agent); - } - break; +#if UTUN_NEXUS + case UTUN_OPT_ENABLE_CHANNEL: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + lck_rw_lock_shared(&pcb->utun_pcb_lock); + *(int *)data = pcb->utun_kpipe_enabled; + lck_rw_unlock_shared(&pcb->utun_pcb_lock); } + break; + } - case UTUN_OPT_ENABLE_NETIF: { - if (*len != sizeof(int)) { - result = EMSGSIZE; - } else { - lck_rw_lock_shared(&pcb->utun_pcb_lock); - *(int *)data = !!pcb->utun_use_netif; - lck_rw_unlock_shared(&pcb->utun_pcb_lock); - } - break; + case UTUN_OPT_ENABLE_FLOWSWITCH: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { + *(int *)data = if_check_netagent(pcb->utun_ifp, pcb->utun_nx.ms_agent); } + break; + } - case UTUN_OPT_GET_CHANNEL_UUID: { + case UTUN_OPT_ENABLE_NETIF: { + if (*len != sizeof(int)) { + result = EMSGSIZE; + } else { lck_rw_lock_shared(&pcb->utun_pcb_lock); - if (uuid_is_null(pcb->utun_kpipe_uuid)) { - result = ENXIO; - } else if (*len != sizeof(uuid_t)) { - result = EMSGSIZE; - } else { - uuid_copy(data, pcb->utun_kpipe_uuid); - } + *(int *)data = !!pcb->utun_use_netif; lck_rw_unlock_shared(&pcb->utun_pcb_lock); - break; } - case UTUN_OPT_SLOT_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->utun_slot_size; - } - break; + break; + } + + case UTUN_OPT_GET_CHANNEL_UUID: { + lck_rw_lock_shared(&pcb->utun_pcb_lock); + if (uuid_is_null(pcb->utun_kpipe_uuid)) { + result = ENXIO; + } else if (*len != sizeof(uuid_t)) { + result = EMSGSIZE; + } else { + uuid_copy(data, pcb->utun_kpipe_uuid); } - case UTUN_OPT_NETIF_RING_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->utun_netif_ring_size; - } - break; + lck_rw_unlock_shared(&pcb->utun_pcb_lock); + break; + } + case UTUN_OPT_SLOT_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->utun_slot_size; } - case UTUN_OPT_TX_FSW_RING_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->utun_tx_fsw_ring_size; - } - break; + break; + } + case UTUN_OPT_NETIF_RING_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->utun_netif_ring_size; } - case UTUN_OPT_RX_FSW_RING_SIZE: { - if (*len != sizeof(u_int32_t)) { - result = EMSGSIZE; - } else { - *(u_int32_t *)data = pcb->utun_rx_fsw_ring_size; - } - break; + break; + } + case UTUN_OPT_TX_FSW_RING_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->utun_tx_fsw_ring_size; + } + break; + } + case UTUN_OPT_RX_FSW_RING_SIZE: { + if (*len != sizeof(u_int32_t)) { + result = EMSGSIZE; + } else { + *(u_int32_t *)data = pcb->utun_rx_fsw_ring_size; } + break; + } #endif // UTUN_NEXUS - default: - result = ENOPROTOOPT; - break; + default: + result = ENOPROTOOPT; + break; } - + return result; } @@ -2426,10 +2428,10 @@ utun_start(ifnet_t interface) } static errno_t -utun_output(ifnet_t interface, - mbuf_t data) +utun_output(ifnet_t interface, + mbuf_t data) { - struct utun_pcb *pcb = ifnet_softc(interface); + struct utun_pcb *pcb = ifnet_softc(interface); errno_t result; VERIFY(interface == pcb->utun_ifp); @@ -2451,7 +2453,7 @@ utun_output(ifnet_t interface, // otherwise, fall thru to ctl_enqueumbuf if (pcb->utun_ctlref) { - int length; + int length; /* * The ABI requires the protocol in network byte order @@ -2484,18 +2486,18 @@ utun_output(ifnet_t interface, } else { mbuf_freem(data); } - + return 0; } static errno_t utun_demux(__unused ifnet_t interface, - mbuf_t data, - __unused char *frame_header, - protocol_family_t *protocol) + mbuf_t data, + __unused char *frame_header, + protocol_family_t *protocol) { #if UTUN_NEXUS - struct utun_pcb *pcb = ifnet_softc(interface); + struct utun_pcb *pcb = ifnet_softc(interface); struct ip *ip; u_int ip_version; #endif @@ -2513,16 +2515,16 @@ utun_demux(__unused ifnet_t interface, ip = mtod(data, struct ip *); ip_version = ip->ip_v; - switch(ip_version) { - case 4: - *protocol = PF_INET; - return 0; - case 6: - *protocol = PF_INET6; - return 0; - default: - *protocol = 0; - break; + switch (ip_version) { + case 4: + *protocol = PF_INET; + return 0; + case 6: + *protocol = PF_INET6; + return 0; + default: + *protocol = 0; + break; } } else #endif // UTUN_NEXUS @@ -2535,14 +2537,14 @@ utun_demux(__unused ifnet_t interface, static errno_t utun_framer(ifnet_t interface, - mbuf_t *packet, - __unused const struct sockaddr *dest, - __unused const char *desk_linkaddr, - const char *frame_type, - u_int32_t *prepend_len, - u_int32_t *postpend_len) + mbuf_t *packet, + __unused const struct sockaddr *dest, + __unused const char *desk_linkaddr, + const char *frame_type, + u_int32_t *prepend_len, + u_int32_t *postpend_len) { - struct utun_pcb *pcb = ifnet_softc(interface); + struct utun_pcb *pcb = ifnet_softc(interface); VERIFY(interface == pcb->utun_ifp); u_int32_t header_length = UTUN_HEADER_SIZE(pcb); @@ -2552,7 +2554,7 @@ utun_framer(ifnet_t interface, ifnet_stat_increment_out(interface, 0, 0, 1); // just return, because the buffer was freed in mbuf_prepend - return EJUSTRETURN; + return EJUSTRETURN; } if (prepend_len != NULL) { *prepend_len = header_length; @@ -2560,83 +2562,83 @@ utun_framer(ifnet_t interface, if (postpend_len != NULL) { *postpend_len = 0; } - + // place protocol number at the beginning of the mbuf *(protocol_family_t *)mbuf_data(*packet) = *(protocol_family_t *)(uintptr_t)(size_t)frame_type; - return 0; + return 0; } static errno_t utun_add_proto(__unused ifnet_t interface, - protocol_family_t protocol, - __unused const struct ifnet_demux_desc *demux_array, - __unused u_int32_t demux_count) + protocol_family_t protocol, + __unused const struct ifnet_demux_desc *demux_array, + __unused u_int32_t demux_count) { - switch(protocol) { - case PF_INET: - return 0; - case PF_INET6: - return 0; - default: - break; + switch (protocol) { + case PF_INET: + return 0; + case PF_INET6: + return 0; + default: + break; } - + return ENOPROTOOPT; } static errno_t utun_del_proto(__unused ifnet_t interface, - __unused protocol_family_t protocol) + __unused protocol_family_t protocol) { return 0; } static errno_t utun_ioctl(ifnet_t interface, - u_long command, - void *data) + u_long command, + void *data) { #if UTUN_NEXUS - struct utun_pcb *pcb = ifnet_softc(interface); + struct utun_pcb *pcb = ifnet_softc(interface); #endif - errno_t result = 0; - - switch(command) { - case SIOCSIFMTU: { + errno_t result = 0; + + switch (command) { + case SIOCSIFMTU: { #if UTUN_NEXUS - if (pcb->utun_use_netif) { - // Make sure we can fit packets in the channel buffers - // Allow for the headroom in the slot - if (((uint64_t)((struct ifreq*)data)->ifr_mtu) + UTUN_IF_HEADROOM_SIZE > pcb->utun_slot_size) { - result = EINVAL; - } else { - ifnet_set_mtu(interface, (uint32_t)((struct ifreq*)data)->ifr_mtu); - } - } else -#endif // UTUN_NEXUS - { - ifnet_set_mtu(interface, ((struct ifreq*)data)->ifr_mtu); + if (pcb->utun_use_netif) { + // Make sure we can fit packets in the channel buffers + // Allow for the headroom in the slot + if (((uint64_t)((struct ifreq*)data)->ifr_mtu) + UTUN_IF_HEADROOM_SIZE > pcb->utun_slot_size) { + result = EINVAL; + } else { + ifnet_set_mtu(interface, (uint32_t)((struct ifreq*)data)->ifr_mtu); } - break; + } else +#endif // UTUN_NEXUS + { + ifnet_set_mtu(interface, ((struct ifreq*)data)->ifr_mtu); } - - case SIOCSIFFLAGS: - /* ifioctl() takes care of it */ - break; - - default: - result = EOPNOTSUPP; + break; } - + + case SIOCSIFFLAGS: + /* ifioctl() takes care of it */ + break; + + default: + result = EOPNOTSUPP; + } + return result; } static void utun_detached(ifnet_t interface) { - struct utun_pcb *pcb = ifnet_softc(interface); + struct utun_pcb *pcb = ifnet_softc(interface); (void)ifnet_release(interface); utun_free_pcb(pcb, true); } @@ -2645,18 +2647,18 @@ utun_detached(ifnet_t interface) static errno_t utun_proto_input(__unused ifnet_t interface, - protocol_family_t protocol, - mbuf_t m, - __unused char *frame_header) + protocol_family_t protocol, + mbuf_t m, + __unused char *frame_header) { - struct utun_pcb *pcb = ifnet_softc(interface); + struct utun_pcb *pcb = ifnet_softc(interface); #if UTUN_NEXUS if (!pcb->utun_use_netif) #endif // UTUN_NEXUS { mbuf_adj(m, UTUN_HEADER_SIZE(pcb)); } - int32_t pktlen = m->m_pkthdr.len; + int32_t pktlen = m->m_pkthdr.len; if (proto_input(protocol, m) != 0) { m_freem(m); #if UTUN_NEXUS @@ -2673,18 +2675,18 @@ utun_proto_input(__unused ifnet_t interface, ifnet_stat_increment_in(interface, 1, pktlen, 0); } } - + return 0; } static errno_t utun_proto_pre_output(__unused ifnet_t interface, - protocol_family_t protocol, - __unused mbuf_t *packet, - __unused const struct sockaddr *dest, - __unused void *route, - char *frame_type, - __unused char *link_layer_dest) + protocol_family_t protocol, + __unused mbuf_t *packet, + __unused const struct sockaddr *dest, + __unused void *route, + char *frame_type, + __unused char *link_layer_dest) { *(protocol_family_t *)(void *)frame_type = protocol; return 0; @@ -2692,10 +2694,10 @@ utun_proto_pre_output(__unused ifnet_t interface, static errno_t utun_attach_proto(ifnet_t interface, - protocol_family_t protocol) + protocol_family_t protocol) { - struct ifnet_attach_proto_param proto; - + struct ifnet_attach_proto_param proto; + bzero(&proto, sizeof(proto)); proto.input = utun_proto_input; proto.pre_output = utun_proto_pre_output; @@ -2703,9 +2705,9 @@ utun_attach_proto(ifnet_t interface, errno_t result = ifnet_attach_protocol(interface, protocol, &proto); if (result != 0 && result != EEXIST) { printf("utun_attach_inet - ifnet_attach_protocol %d failed: %d\n", - protocol, result); + protocol, result); } - + return result; } @@ -2736,13 +2738,13 @@ utun_pkt_input(struct utun_pcb *pcb, mbuf_t packet) kern_channel_notify(rx_ring, 0); } - return (0); + return 0; } else #endif // IPSEC_NEXUS { mbuf_pkthdr_setrcvif(packet, pcb->utun_ifp); - if (m_pktlen(packet) >= (int32_t)UTUN_HEADER_SIZE(pcb)) { + if (m_pktlen(packet) >= (int32_t)UTUN_HEADER_SIZE(pcb)) { bpf_tap_in(pcb->utun_ifp, DLT_NULL, packet, 0, 0); } if (pcb->utun_flags & UTUN_FLAGS_NO_INPUT) { @@ -2751,7 +2753,7 @@ utun_pkt_input(struct utun_pcb *pcb, mbuf_t packet) return 0; } - errno_t result = 0; + errno_t result = 0; if (!pcb->utun_ext_ifdata_stats) { struct ifnet_stat_increment_param incs = {}; incs.packets_in = 1; @@ -2767,7 +2769,7 @@ utun_pkt_input(struct utun_pcb *pcb, mbuf_t packet) mbuf_freem(packet); } - return (0); + return 0; } } @@ -2798,14 +2800,14 @@ utun_register_nexus(void) /* utun_nxdp_init() is called before this function returns */ err = kern_nexus_register_domain_provider(NEXUS_TYPE_NET_IF, - (const uint8_t *) "com.apple.utun", - &dp_init, sizeof(dp_init), - &utun_nx_dom_prov); + (const uint8_t *) "com.apple.utun", + &dp_init, sizeof(dp_init), + &utun_nx_dom_prov); if (err != 0) { printf("%s: failed to register domain provider\n", __func__); - return (err); + return err; } - return (0); + return 0; } boolean_t utun_interface_needs_netagent(ifnet_t interface) @@ -2813,16 +2815,16 @@ utun_interface_needs_netagent(ifnet_t interface) struct utun_pcb *pcb = NULL; if (interface == NULL) { - return (FALSE); + return FALSE; } pcb = ifnet_softc(interface); if (pcb == NULL) { - return (FALSE); + return FALSE; } - return (pcb->utun_needs_netagent == true); + return pcb->utun_needs_netagent == true; } static errno_t @@ -2837,7 +2839,7 @@ utun_ifnet_set_attrs(ifnet_t ifp) */ ifnet_set_eflags(ifp, IFEF_NOAUTOIPV6LL, IFEF_NOAUTOIPV6LL); - return (0); + return 0; } static errno_t @@ -2845,7 +2847,7 @@ utun_netif_prepare(kern_nexus_t nexus, ifnet_t ifp) { struct utun_pcb *pcb = kern_nexus_get_context(nexus); pcb->utun_netif_nexus = nexus; - return (utun_ifnet_set_attrs(ifp)); + return utun_ifnet_set_attrs(ifp); } static errno_t @@ -2855,7 +2857,7 @@ utun_nexus_pre_connect(kern_nexus_provider_t nxprov, { #pragma unused(nxprov, p) #pragma unused(nexus, nexus_port, channel, ch_ctx) - return (0); + return 0; } static errno_t @@ -2865,7 +2867,7 @@ utun_nexus_connected(kern_nexus_provider_t nxprov, kern_nexus_t nexus, #pragma unused(nxprov, channel) struct utun_pcb *pcb = kern_nexus_get_context(nexus); boolean_t ok = ifnet_is_attached(pcb->utun_ifp, 1); - return (ok ? 0 : ENXIO); + return ok ? 0 : ENXIO; } static void @@ -2877,7 +2879,7 @@ utun_nexus_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static void utun_netif_pre_disconnect(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel) + kern_channel_t channel) { #pragma unused(nxprov, nexus, channel) } @@ -2896,8 +2898,8 @@ utun_nexus_disconnected(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t utun_kpipe_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_t channel, kern_channel_ring_t ring, - boolean_t is_tx_ring, void **ring_ctx) + kern_channel_t channel, kern_channel_ring_t ring, + boolean_t is_tx_ring, void **ring_ctx) { #pragma unused(nxprov) #pragma unused(channel) @@ -2915,7 +2917,7 @@ utun_kpipe_ring_init(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static void utun_kpipe_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t ring) + kern_channel_ring_t ring) { #pragma unused(nxprov) struct utun_pcb *pcb = kern_nexus_get_context(nexus); @@ -2928,7 +2930,7 @@ utun_kpipe_ring_fini(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t utun_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t tx_ring, uint32_t flags) + kern_channel_ring_t tx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -2982,11 +2984,11 @@ utun_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, tx_baddr += kern_buflet_get_data_offset(tx_buf); size_t length = MIN(kern_packet_get_data_length(tx_ph), - pcb->utun_slot_size); + pcb->utun_slot_size); mbuf_t data = NULL; if (length >= UTUN_HEADER_SIZE(pcb) && - !(pcb->utun_flags & UTUN_FLAGS_NO_INPUT)) { + !(pcb->utun_flags & UTUN_FLAGS_NO_INPUT)) { errno_t error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_HEADER, &data); VERIFY(0 == error); error = mbuf_copyback(data, 0, length, tx_baddr, MBUF_WAITOK); @@ -3012,7 +3014,7 @@ utun_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, } if (!MBUFQ_EMPTY(&mbq)) { (void) ifnet_input_extended(pcb->utun_ifp, MBUFQ_FIRST(&mbq), - MBUFQ_LAST(&mbq), &incs); + MBUFQ_LAST(&mbq), &incs); MBUFQ_INIT(&mbq); } } @@ -3022,7 +3024,7 @@ utun_kpipe_sync_tx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, static errno_t utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, - kern_channel_ring_t rx_ring, uint32_t flags) + kern_channel_ring_t rx_ring, uint32_t flags) { #pragma unused(nxprov) #pragma unused(flags) @@ -3049,7 +3051,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, if (pcb->utun_use_netif) { kern_channel_ring_t tx_ring = pcb->utun_netif_txring; if (tx_ring == NULL || - pcb->utun_netif_nexus == NULL) { + pcb->utun_netif_nexus == NULL) { // Net-If TX ring not set up yet, nothing to read lck_rw_unlock_shared(&pcb->utun_pcb_lock); return 0; @@ -3110,7 +3112,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, errno_t error = kern_pbufpool_alloc_nosleep(rx_pp, 1, &rx_ph); if (__improbable(error != 0)) { printf("utun_kpipe_sync_rx %s: failed to allocate packet\n", - pcb->utun_ifp->if_xname); + pcb->utun_ifp->if_xname); break; } @@ -3123,19 +3125,19 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, bpf_tap_packet_out(pcb->utun_ifp, DLT_RAW, tx_ph, NULL, 0); length = MIN(kern_packet_get_data_length(tx_ph) + UTUN_HEADER_SIZE(pcb), - pcb->utun_slot_size); + pcb->utun_slot_size); tx_ring_stats.kcrsi_slots_transferred++; tx_ring_stats.kcrsi_bytes_transferred += length; if (length < UTUN_HEADER_SIZE(pcb) || - length > pcb->utun_slot_size || - length > rx_pp->pp_buflet_size || - (pcb->utun_flags & UTUN_FLAGS_NO_OUTPUT)) { + length > pcb->utun_slot_size || + length > rx_pp->pp_buflet_size || + (pcb->utun_flags & UTUN_FLAGS_NO_OUTPUT)) { /* flush data */ kern_pbufpool_free(rx_pp, rx_ph); printf("utun_kpipe_sync_rx %s: invalid length %zu header_size %zu\n", - pcb->utun_ifp->if_xname, length, UTUN_HEADER_SIZE(pcb)); + pcb->utun_ifp->if_xname, length, UTUN_HEADER_SIZE(pcb)); STATS_INC(nifs, NETIF_STATS_BADLEN); STATS_INC(nifs, NETIF_STATS_DROPPED); continue; @@ -3152,19 +3154,19 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, uint8_t vhl = *(uint8_t *)(tx_baddr); u_int ip_version = (vhl >> 4); switch (ip_version) { - case 4: { - af = AF_INET; - break; - } - case 6: { - af = AF_INET6; - break; - } - default: { - printf("utun_kpipe_sync_rx %s: unknown ip version %u vhl %u header_size %zu\n", - pcb->utun_ifp->if_xname, ip_version, vhl, UTUN_HEADER_SIZE(pcb)); - break; - } + case 4: { + af = AF_INET; + break; + } + case 6: { + af = AF_INET6; + break; + } + default: { + printf("utun_kpipe_sync_rx %s: unknown ip version %u vhl %u header_size %zu\n", + pcb->utun_ifp->if_xname, ip_version, vhl, UTUN_HEADER_SIZE(pcb)); + break; + } } // Copy header @@ -3231,7 +3233,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, struct mbuf *mb_tail = NULL; if (ifnet_dequeue_multi(pcb->utun_ifp, avail, &mb_head, - &mb_tail, &mb_cnt, &mb_len) != 0) { + &mb_tail, &mb_cnt, &mb_len) != 0) { return 0; } VERIFY(mb_cnt <= avail); @@ -3253,8 +3255,8 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, --mb_cnt; length = mbuf_pkthdr_len(data); if (length < UTUN_HEADER_SIZE(pcb) || - length > pcb->utun_slot_size || - (pcb->utun_flags & UTUN_FLAGS_NO_OUTPUT)) { + length > pcb->utun_slot_size || + (pcb->utun_flags & UTUN_FLAGS_NO_OUTPUT)) { /* flush data */ mbuf_freem(data); continue; @@ -3266,7 +3268,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, errno_t error = kern_pbufpool_alloc_nosleep(rx_pp, 1, &rx_ph); if (__improbable(error != 0)) { printf("utun_kpipe_sync_rx %s: failed to allocate packet\n", - pcb->utun_ifp->if_xname); + pcb->utun_ifp->if_xname); break; } @@ -3283,7 +3285,7 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, // Copy-in data from mbuf to buflet mbuf_copydata(data, 0, length, (void *)rx_baddr); - kern_packet_clear_flow_uuid(rx_ph); // Zero flow id + kern_packet_clear_flow_uuid(rx_ph); // Zero flow id // Finalize and attach the packet error = kern_buflet_set_data_offset(rx_buf, 0); @@ -3326,12 +3328,12 @@ utun_kpipe_sync_rx(kern_nexus_provider_t nxprov, kern_nexus_t nexus, /* * These are place holders until coreTLS kext stops calling them */ -errno_t utun_ctl_register_dtls (void *reg); +errno_t utun_ctl_register_dtls(void *reg); int utun_pkt_dtls_input(struct utun_pcb *pcb, mbuf_t *pkt, protocol_family_t family); void utun_ctl_disable_crypto_dtls(struct utun_pcb *pcb); errno_t -utun_ctl_register_dtls (void *reg) +utun_ctl_register_dtls(void *reg) { #pragma unused(reg) return 0; diff --git a/bsd/net/if_utun.h b/bsd/net/if_utun.h index 460fcdb62..0a8f9f967 100644 --- a/bsd/net/if_utun.h +++ b/bsd/net/if_utun.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _NET_IF_UTUN_H_ -#define _NET_IF_UTUN_H_ +#ifndef _NET_IF_UTUN_H_ +#define _NET_IF_UTUN_H_ #ifdef KERNEL_PRIVATE @@ -49,39 +49,39 @@ boolean_t utun_interface_needs_netagent(ifnet_t interface); /* * Socket option names to manage utun */ -#define UTUN_OPT_FLAGS 1 -#define UTUN_OPT_IFNAME 2 -#define UTUN_OPT_EXT_IFDATA_STATS 3 /* get|set (type int) */ -#define UTUN_OPT_INC_IFDATA_STATS_IN 4 /* set to increment stat counters (type struct utun_stats_param) */ -#define UTUN_OPT_INC_IFDATA_STATS_OUT 5 /* set to increment stat counters (type struct utun_stats_param) */ +#define UTUN_OPT_FLAGS 1 +#define UTUN_OPT_IFNAME 2 +#define UTUN_OPT_EXT_IFDATA_STATS 3 /* get|set (type int) */ +#define UTUN_OPT_INC_IFDATA_STATS_IN 4 /* set to increment stat counters (type struct utun_stats_param) */ +#define UTUN_OPT_INC_IFDATA_STATS_OUT 5 /* set to increment stat counters (type struct utun_stats_param) */ -#define UTUN_OPT_SET_DELEGATE_INTERFACE 15 /* set the delegate interface (char[]) */ -#define UTUN_OPT_MAX_PENDING_PACKETS 16 /* the number of packets that can be waiting to be read - from the control socket at a time */ -#define UTUN_OPT_ENABLE_CHANNEL 17 -#define UTUN_OPT_GET_CHANNEL_UUID 18 -#define UTUN_OPT_ENABLE_FLOWSWITCH 19 +#define UTUN_OPT_SET_DELEGATE_INTERFACE 15 /* set the delegate interface (char[]) */ +#define UTUN_OPT_MAX_PENDING_PACKETS 16 /* the number of packets that can be waiting to be read + * from the control socket at a time */ +#define UTUN_OPT_ENABLE_CHANNEL 17 +#define UTUN_OPT_GET_CHANNEL_UUID 18 +#define UTUN_OPT_ENABLE_FLOWSWITCH 19 -#define UTUN_OPT_ENABLE_NETIF 20 /* Must be set before connecting */ -#define UTUN_OPT_SLOT_SIZE 21 /* Must be set before connecting */ -#define UTUN_OPT_NETIF_RING_SIZE 22 /* Must be set before connecting */ -#define UTUN_OPT_TX_FSW_RING_SIZE 23 /* Must be set before connecting */ -#define UTUN_OPT_RX_FSW_RING_SIZE 24 /* Must be set before connecting */ +#define UTUN_OPT_ENABLE_NETIF 20 /* Must be set before connecting */ +#define UTUN_OPT_SLOT_SIZE 21 /* Must be set before connecting */ +#define UTUN_OPT_NETIF_RING_SIZE 22 /* Must be set before connecting */ +#define UTUN_OPT_TX_FSW_RING_SIZE 23 /* Must be set before connecting */ +#define UTUN_OPT_RX_FSW_RING_SIZE 24 /* Must be set before connecting */ /* - * Flags for by UTUN_OPT_FLAGS + * Flags for by UTUN_OPT_FLAGS */ -#define UTUN_FLAGS_NO_OUTPUT 0x0001 -#define UTUN_FLAGS_NO_INPUT 0x0002 -#define UTUN_FLAGS_ENABLE_PROC_UUID 0x0004 +#define UTUN_FLAGS_NO_OUTPUT 0x0001 +#define UTUN_FLAGS_NO_INPUT 0x0002 +#define UTUN_FLAGS_ENABLE_PROC_UUID 0x0004 /* * utun stats parameter structure */ struct utun_stats_param { - u_int64_t utsp_packets; - u_int64_t utsp_bytes; - u_int64_t utsp_errors; + u_int64_t utsp_packets; + u_int64_t utsp_bytes; + u_int64_t utsp_errors; }; #endif diff --git a/bsd/net/if_var.h b/bsd/net/if_var.h index 1c69489b5..c980cd7e3 100644 --- a/bsd/net/if_var.h +++ b/bsd/net/if_var.h @@ -61,14 +61,14 @@ * $FreeBSD: src/sys/net/if_var.h,v 1.18.2.7 2001/07/24 19:10:18 brooks Exp $ */ -#ifndef _NET_IF_VAR_H_ -#define _NET_IF_VAR_H_ +#ifndef _NET_IF_VAR_H_ +#define _NET_IF_VAR_H_ #include #include #include #include -#include /* get TAILQ macros */ +#include /* get TAILQ macros */ #ifdef KERNEL_PRIVATE #include #endif /* KERNEL_PRIVATE */ @@ -94,7 +94,7 @@ #define APPLE_IF_FAM_DISC 8 #define APPLE_IF_FAM_MDECAP 9 #define APPLE_IF_FAM_GIF 10 -#define APPLE_IF_FAM_FAITH 11 /* deprecated */ +#define APPLE_IF_FAM_FAITH 11 /* deprecated */ #define APPLE_IF_FAM_STF 12 #define APPLE_IF_FAM_FIREWIRE 13 #define APPLE_IF_FAM_BOND 14 @@ -104,8 +104,8 @@ * 72 was chosen below because it is the size of a TCP/IP * header (40) + the minimum mss (32). */ -#define IF_MINMTU 72 -#define IF_MAXMTU 65535 +#define IF_MINMTU 72 +#define IF_MAXMTU 65535 /* * Structures defining a network interface, providing a packet @@ -132,16 +132,16 @@ * interfaces. These routines live in the files if.c and route.c */ -#define IFNAMSIZ 16 +#define IFNAMSIZ 16 /* This belongs up in socket.h or socketvar.h, depending on how far the * event bubbles up. */ struct net_event_data { - u_int32_t if_family; - u_int32_t if_unit; - char if_name[IFNAMSIZ]; + u_int32_t if_family; + u_int32_t if_unit; + char if_name[IFNAMSIZ]; }; #if defined(__LP64__) @@ -159,36 +159,36 @@ struct net_event_data { */ struct if_data { /* generic interface information */ - u_char ifi_type; /* ethernet, tokenring, etc */ - u_char ifi_typelen; /* Length of frame type id */ - u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ - u_char ifi_addrlen; /* media address length */ - u_char ifi_hdrlen; /* media header length */ - u_char ifi_recvquota; /* polling quota for receive intrs */ - u_char ifi_xmitquota; /* polling quota for xmit intrs */ - u_char ifi_unused1; /* for future use */ - u_int32_t ifi_mtu; /* maximum transmission unit */ - u_int32_t ifi_metric; /* routing metric (external only) */ - u_int32_t ifi_baudrate; /* linespeed */ + u_char ifi_type; /* ethernet, tokenring, etc */ + u_char ifi_typelen; /* Length of frame type id */ + u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ + u_char ifi_addrlen; /* media address length */ + u_char ifi_hdrlen; /* media header length */ + u_char ifi_recvquota; /* polling quota for receive intrs */ + u_char ifi_xmitquota; /* polling quota for xmit intrs */ + u_char ifi_unused1; /* for future use */ + u_int32_t ifi_mtu; /* maximum transmission unit */ + u_int32_t ifi_metric; /* routing metric (external only) */ + u_int32_t ifi_baudrate; /* linespeed */ /* volatile statistics */ - u_int32_t ifi_ipackets; /* packets received on interface */ - u_int32_t ifi_ierrors; /* input errors on interface */ - u_int32_t ifi_opackets; /* packets sent on interface */ - u_int32_t ifi_oerrors; /* output errors on interface */ - u_int32_t ifi_collisions; /* collisions on csma interfaces */ - u_int32_t ifi_ibytes; /* total number of octets received */ - u_int32_t ifi_obytes; /* total number of octets sent */ - u_int32_t ifi_imcasts; /* packets received via multicast */ - u_int32_t ifi_omcasts; /* packets sent via multicast */ - u_int32_t ifi_iqdrops; /* dropped on input, this interface */ - u_int32_t ifi_noproto; /* destined for unsupported protocol */ - u_int32_t ifi_recvtiming; /* usec spent receiving when timing */ - u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */ - struct IF_DATA_TIMEVAL ifi_lastchange; /* time of last administrative change */ - u_int32_t ifi_unused2; /* used to be the default_proto */ - u_int32_t ifi_hwassist; /* HW offload capabilities */ - u_int32_t ifi_reserved1; /* for future use */ - u_int32_t ifi_reserved2; /* for future use */ + u_int32_t ifi_ipackets; /* packets received on interface */ + u_int32_t ifi_ierrors; /* input errors on interface */ + u_int32_t ifi_opackets; /* packets sent on interface */ + u_int32_t ifi_oerrors; /* output errors on interface */ + u_int32_t ifi_collisions; /* collisions on csma interfaces */ + u_int32_t ifi_ibytes; /* total number of octets received */ + u_int32_t ifi_obytes; /* total number of octets sent */ + u_int32_t ifi_imcasts; /* packets received via multicast */ + u_int32_t ifi_omcasts; /* packets sent via multicast */ + u_int32_t ifi_iqdrops; /* dropped on input, this interface */ + u_int32_t ifi_noproto; /* destined for unsupported protocol */ + u_int32_t ifi_recvtiming; /* usec spent receiving when timing */ + u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */ + struct IF_DATA_TIMEVAL ifi_lastchange; /* time of last administrative change */ + u_int32_t ifi_unused2; /* used to be the default_proto */ + u_int32_t ifi_hwassist; /* HW offload capabilities */ + u_int32_t ifi_reserved1; /* for future use */ + u_int32_t ifi_reserved2; /* for future use */ }; /* @@ -197,137 +197,137 @@ struct if_data { */ struct if_data64 { /* generic interface information */ - u_char ifi_type; /* ethernet, tokenring, etc */ - u_char ifi_typelen; /* Length of frame type id */ - u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ - u_char ifi_addrlen; /* media address length */ - u_char ifi_hdrlen; /* media header length */ - u_char ifi_recvquota; /* polling quota for receive intrs */ - u_char ifi_xmitquota; /* polling quota for xmit intrs */ - u_char ifi_unused1; /* for future use */ - u_int32_t ifi_mtu; /* maximum transmission unit */ - u_int32_t ifi_metric; /* routing metric (external only) */ - u_int64_t ifi_baudrate; /* linespeed */ + u_char ifi_type; /* ethernet, tokenring, etc */ + u_char ifi_typelen; /* Length of frame type id */ + u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ + u_char ifi_addrlen; /* media address length */ + u_char ifi_hdrlen; /* media header length */ + u_char ifi_recvquota; /* polling quota for receive intrs */ + u_char ifi_xmitquota; /* polling quota for xmit intrs */ + u_char ifi_unused1; /* for future use */ + u_int32_t ifi_mtu; /* maximum transmission unit */ + u_int32_t ifi_metric; /* routing metric (external only) */ + u_int64_t ifi_baudrate; /* linespeed */ /* volatile statistics */ - u_int64_t ifi_ipackets; /* packets received on interface */ - u_int64_t ifi_ierrors; /* input errors on interface */ - u_int64_t ifi_opackets; /* packets sent on interface */ - u_int64_t ifi_oerrors; /* output errors on interface */ - u_int64_t ifi_collisions; /* collisions on csma interfaces */ - u_int64_t ifi_ibytes; /* total number of octets received */ - u_int64_t ifi_obytes; /* total number of octets sent */ - u_int64_t ifi_imcasts; /* packets received via multicast */ - u_int64_t ifi_omcasts; /* packets sent via multicast */ - u_int64_t ifi_iqdrops; /* dropped on input, this interface */ - u_int64_t ifi_noproto; /* destined for unsupported protocol */ - u_int32_t ifi_recvtiming; /* usec spent receiving when timing */ - u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */ - struct IF_DATA_TIMEVAL ifi_lastchange; /* time of last administrative change */ + u_int64_t ifi_ipackets; /* packets received on interface */ + u_int64_t ifi_ierrors; /* input errors on interface */ + u_int64_t ifi_opackets; /* packets sent on interface */ + u_int64_t ifi_oerrors; /* output errors on interface */ + u_int64_t ifi_collisions; /* collisions on csma interfaces */ + u_int64_t ifi_ibytes; /* total number of octets received */ + u_int64_t ifi_obytes; /* total number of octets sent */ + u_int64_t ifi_imcasts; /* packets received via multicast */ + u_int64_t ifi_omcasts; /* packets sent via multicast */ + u_int64_t ifi_iqdrops; /* dropped on input, this interface */ + u_int64_t ifi_noproto; /* destined for unsupported protocol */ + u_int32_t ifi_recvtiming; /* usec spent receiving when timing */ + u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */ + struct IF_DATA_TIMEVAL ifi_lastchange; /* time of last administrative change */ }; #ifdef PRIVATE struct if_traffic_class { - u_int64_t ifi_ibepackets; /* TC_BE packets received on interface */ - u_int64_t ifi_ibebytes; /* TC_BE bytes received on interface */ - u_int64_t ifi_obepackets; /* TC_BE packet sent on interface */ - u_int64_t ifi_obebytes; /* TC_BE bytes sent on interface */ - u_int64_t ifi_ibkpackets; /* TC_BK packets received on interface */ - u_int64_t ifi_ibkbytes; /* TC_BK bytes received on interface */ - u_int64_t ifi_obkpackets; /* TC_BK packet sent on interface */ - u_int64_t ifi_obkbytes; /* TC_BK bytes sent on interface */ - u_int64_t ifi_ivipackets; /* TC_VI packets received on interface */ - u_int64_t ifi_ivibytes; /* TC_VI bytes received on interface */ - u_int64_t ifi_ovipackets; /* TC_VI packets sent on interface */ - u_int64_t ifi_ovibytes; /* TC_VI bytes sent on interface */ - u_int64_t ifi_ivopackets; /* TC_VO packets received on interface */ - u_int64_t ifi_ivobytes; /* TC_VO bytes received on interface */ - u_int64_t ifi_ovopackets; /* TC_VO packets sent on interface */ - u_int64_t ifi_ovobytes; /* TC_VO bytes sent on interface */ - u_int64_t ifi_ipvpackets; /* TC priv packets received on interface */ - u_int64_t ifi_ipvbytes; /* TC priv bytes received on interface */ - u_int64_t ifi_opvpackets; /* TC priv packets sent on interface */ - u_int64_t ifi_opvbytes; /* TC priv bytes sent on interface */ + u_int64_t ifi_ibepackets; /* TC_BE packets received on interface */ + u_int64_t ifi_ibebytes; /* TC_BE bytes received on interface */ + u_int64_t ifi_obepackets; /* TC_BE packet sent on interface */ + u_int64_t ifi_obebytes; /* TC_BE bytes sent on interface */ + u_int64_t ifi_ibkpackets; /* TC_BK packets received on interface */ + u_int64_t ifi_ibkbytes; /* TC_BK bytes received on interface */ + u_int64_t ifi_obkpackets; /* TC_BK packet sent on interface */ + u_int64_t ifi_obkbytes; /* TC_BK bytes sent on interface */ + u_int64_t ifi_ivipackets; /* TC_VI packets received on interface */ + u_int64_t ifi_ivibytes; /* TC_VI bytes received on interface */ + u_int64_t ifi_ovipackets; /* TC_VI packets sent on interface */ + u_int64_t ifi_ovibytes; /* TC_VI bytes sent on interface */ + u_int64_t ifi_ivopackets; /* TC_VO packets received on interface */ + u_int64_t ifi_ivobytes; /* TC_VO bytes received on interface */ + u_int64_t ifi_ovopackets; /* TC_VO packets sent on interface */ + u_int64_t ifi_ovobytes; /* TC_VO bytes sent on interface */ + u_int64_t ifi_ipvpackets; /* TC priv packets received on interface */ + u_int64_t ifi_ipvbytes; /* TC priv bytes received on interface */ + u_int64_t ifi_opvpackets; /* TC priv packets sent on interface */ + u_int64_t ifi_opvbytes; /* TC priv bytes sent on interface */ }; struct if_data_extended { - u_int64_t ifi_alignerrs; /* unaligned (32-bit) input pkts */ - u_int64_t ifi_dt_bytes; /* Data threshold counter */ - u_int64_t ifi_fpackets; /* forwarded packets on interface */ - u_int64_t ifi_fbytes; /* forwarded bytes on interface */ - u_int64_t reserved[12]; /* for future */ + u_int64_t ifi_alignerrs; /* unaligned (32-bit) input pkts */ + u_int64_t ifi_dt_bytes; /* Data threshold counter */ + u_int64_t ifi_fpackets; /* forwarded packets on interface */ + u_int64_t ifi_fbytes; /* forwarded bytes on interface */ + u_int64_t reserved[12]; /* for future */ }; struct if_packet_stats { /* TCP */ - u_int64_t ifi_tcp_badformat; - u_int64_t ifi_tcp_unspecv6; - u_int64_t ifi_tcp_synfin; - u_int64_t ifi_tcp_badformatipsec; - u_int64_t ifi_tcp_noconnnolist; - u_int64_t ifi_tcp_noconnlist; - u_int64_t ifi_tcp_listbadsyn; - u_int64_t ifi_tcp_icmp6unreach; - u_int64_t ifi_tcp_deprecate6; - u_int64_t ifi_tcp_rstinsynrcv; - u_int64_t ifi_tcp_ooopacket; - u_int64_t ifi_tcp_dospacket; - u_int64_t ifi_tcp_cleanup; - u_int64_t ifi_tcp_synwindow; - u_int64_t reserved[6]; + u_int64_t ifi_tcp_badformat; + u_int64_t ifi_tcp_unspecv6; + u_int64_t ifi_tcp_synfin; + u_int64_t ifi_tcp_badformatipsec; + u_int64_t ifi_tcp_noconnnolist; + u_int64_t ifi_tcp_noconnlist; + u_int64_t ifi_tcp_listbadsyn; + u_int64_t ifi_tcp_icmp6unreach; + u_int64_t ifi_tcp_deprecate6; + u_int64_t ifi_tcp_rstinsynrcv; + u_int64_t ifi_tcp_ooopacket; + u_int64_t ifi_tcp_dospacket; + u_int64_t ifi_tcp_cleanup; + u_int64_t ifi_tcp_synwindow; + u_int64_t reserved[6]; /* UDP */ - u_int64_t ifi_udp_port_unreach; - u_int64_t ifi_udp_faithprefix; - u_int64_t ifi_udp_port0; - u_int64_t ifi_udp_badlength; - u_int64_t ifi_udp_badchksum; - u_int64_t ifi_udp_badmcast; - u_int64_t ifi_udp_cleanup; - u_int64_t ifi_udp_badipsec; - u_int64_t _reserved[4]; + u_int64_t ifi_udp_port_unreach; + u_int64_t ifi_udp_faithprefix; + u_int64_t ifi_udp_port0; + u_int64_t ifi_udp_badlength; + u_int64_t ifi_udp_badchksum; + u_int64_t ifi_udp_badmcast; + u_int64_t ifi_udp_cleanup; + u_int64_t ifi_udp_badipsec; + u_int64_t _reserved[4]; }; struct if_description { - u_int32_t ifd_maxlen; /* must be IF_DESCSIZE */ - u_int32_t ifd_len; /* actual ifd_desc length */ - u_int8_t *ifd_desc; /* ptr to desc buffer */ + u_int32_t ifd_maxlen; /* must be IF_DESCSIZE */ + u_int32_t ifd_len; /* actual ifd_desc length */ + u_int8_t *ifd_desc; /* ptr to desc buffer */ }; struct if_bandwidths { - u_int64_t eff_bw; /* effective bandwidth */ - u_int64_t max_bw; /* maximum theoretical bandwidth */ + u_int64_t eff_bw; /* effective bandwidth */ + u_int64_t max_bw; /* maximum theoretical bandwidth */ }; struct if_latencies { - u_int64_t eff_lt; /* effective latency */ - u_int64_t max_lt; /* maximum theoretical latency */ + u_int64_t eff_lt; /* effective latency */ + u_int64_t max_lt; /* maximum theoretical latency */ }; struct if_rxpoll_stats { - u_int32_t ifi_poll_off_req; /* total # of POLL_OFF reqs */ - u_int32_t ifi_poll_off_err; /* total # of POLL_OFF errors */ - u_int32_t ifi_poll_on_req; /* total # of POLL_ON reqs */ - u_int32_t ifi_poll_on_err; /* total # of POLL_ON errors */ - - u_int32_t ifi_poll_wakeups_avg; /* avg # of wakeup reqs */ - u_int32_t ifi_poll_wakeups_lowat; /* wakeups low watermark */ - u_int32_t ifi_poll_wakeups_hiwat; /* wakeups high watermark */ - - u_int64_t ifi_poll_packets; /* total # of polled packets */ - u_int32_t ifi_poll_packets_avg; /* average polled packets */ - u_int32_t ifi_poll_packets_min; /* smallest polled packets */ - u_int32_t ifi_poll_packets_max; /* largest polled packets */ - u_int32_t ifi_poll_packets_lowat; /* packets low watermark */ - u_int32_t ifi_poll_packets_hiwat; /* packets high watermark */ - - u_int64_t ifi_poll_bytes; /* total # of polled bytes */ - u_int32_t ifi_poll_bytes_avg; /* average polled bytes */ - u_int32_t ifi_poll_bytes_min; /* smallest polled bytes */ - u_int32_t ifi_poll_bytes_max; /* largest polled bytes */ - u_int32_t ifi_poll_bytes_lowat; /* bytes low watermark */ - u_int32_t ifi_poll_bytes_hiwat; /* bytes high watermark */ - - u_int32_t ifi_poll_packets_limit; /* max packets per poll call */ - u_int64_t ifi_poll_interval_time; /* poll interval (nsec) */ + u_int32_t ifi_poll_off_req; /* total # of POLL_OFF reqs */ + u_int32_t ifi_poll_off_err; /* total # of POLL_OFF errors */ + u_int32_t ifi_poll_on_req; /* total # of POLL_ON reqs */ + u_int32_t ifi_poll_on_err; /* total # of POLL_ON errors */ + + u_int32_t ifi_poll_wakeups_avg; /* avg # of wakeup reqs */ + u_int32_t ifi_poll_wakeups_lowat; /* wakeups low watermark */ + u_int32_t ifi_poll_wakeups_hiwat; /* wakeups high watermark */ + + u_int64_t ifi_poll_packets; /* total # of polled packets */ + u_int32_t ifi_poll_packets_avg; /* average polled packets */ + u_int32_t ifi_poll_packets_min; /* smallest polled packets */ + u_int32_t ifi_poll_packets_max; /* largest polled packets */ + u_int32_t ifi_poll_packets_lowat; /* packets low watermark */ + u_int32_t ifi_poll_packets_hiwat; /* packets high watermark */ + + u_int64_t ifi_poll_bytes; /* total # of polled bytes */ + u_int32_t ifi_poll_bytes_avg; /* average polled bytes */ + u_int32_t ifi_poll_bytes_min; /* smallest polled bytes */ + u_int32_t ifi_poll_bytes_max; /* largest polled bytes */ + u_int32_t ifi_poll_bytes_lowat; /* bytes low watermark */ + u_int32_t ifi_poll_bytes_hiwat; /* bytes high watermark */ + + u_int32_t ifi_poll_packets_limit; /* max packets per poll call */ + u_int64_t ifi_poll_interval_time; /* poll interval (nsec) */ }; struct if_tcp_ecn_perf_stat { @@ -375,23 +375,23 @@ struct if_tcp_ecn_stat { }; struct if_lim_perf_stat { - u_int64_t lim_dl_max_bandwidth; /* bits per second */ - u_int64_t lim_ul_max_bandwidth; /* bits per second */ - u_int64_t lim_total_txpkts; /* Total transmit packets, count */ - u_int64_t lim_total_rxpkts; /* Total receive packets, count */ - u_int64_t lim_total_retxpkts; /* Total retransmit packets */ + u_int64_t lim_dl_max_bandwidth; /* bits per second */ + u_int64_t lim_ul_max_bandwidth; /* bits per second */ + u_int64_t lim_total_txpkts; /* Total transmit packets, count */ + u_int64_t lim_total_rxpkts; /* Total receive packets, count */ + u_int64_t lim_total_retxpkts; /* Total retransmit packets */ u_int64_t lim_packet_loss_percent; /* Packet loss rate */ - u_int64_t lim_total_oopkts; /* Total out-of-order packets */ + u_int64_t lim_total_oopkts; /* Total out-of-order packets */ u_int64_t lim_packet_ooo_percent; /* Out-of-order packet rate */ - u_int64_t lim_rtt_variance; /* RTT variance, milliseconds */ - u_int64_t lim_rtt_average; /* RTT average, milliseconds */ - u_int64_t lim_rtt_min; /* RTT minimum, milliseconds */ - u_int64_t lim_conn_timeouts; /* connection timeouts */ - u_int64_t lim_conn_attempts; /* connection attempts */ + u_int64_t lim_rtt_variance; /* RTT variance, milliseconds */ + u_int64_t lim_rtt_average; /* RTT average, milliseconds */ + u_int64_t lim_rtt_min; /* RTT minimum, milliseconds */ + u_int64_t lim_conn_timeouts; /* connection timeouts */ + u_int64_t lim_conn_attempts; /* connection attempts */ u_int64_t lim_conn_timeout_percent; /* Rate of connection timeouts */ - u_int64_t lim_bk_txpkts; /* Transmit packets with BK service class, that use delay based algorithms */ - u_int64_t lim_dl_detected:1, /* Low internet */ - lim_ul_detected:1; + u_int64_t lim_bk_txpkts; /* Transmit packets with BK service class, that use delay based algorithms */ + u_int64_t lim_dl_detected:1, /* Low internet */ + lim_ul_detected:1; }; #define IF_VAR_H_HAS_IFNET_STATS_PER_FLOW 1 @@ -439,11 +439,11 @@ struct ifnet_stats_per_flow { * The definitions are different for different kind of interfaces like * Wifi, Cellular etc,. */ -#define IF_CELLULAR_STATUS_REPORT_VERSION_1 1 -#define IF_WIFI_STATUS_REPORT_VERSION_1 1 -#define IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION \ - IF_CELLULAR_STATUS_REPORT_VERSION_1 -#define IF_WIFI_STATUS_REPORT_CURRENT_VERSION IF_WIFI_STATUS_REPORT_VERSION_1 +#define IF_CELLULAR_STATUS_REPORT_VERSION_1 1 +#define IF_WIFI_STATUS_REPORT_VERSION_1 1 +#define IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION \ + IF_CELLULAR_STATUS_REPORT_VERSION_1 +#define IF_WIFI_STATUS_REPORT_CURRENT_VERSION IF_WIFI_STATUS_REPORT_VERSION_1 /* * For cellular interface -- * There is no way to share common headers between the Baseband and @@ -453,22 +453,22 @@ struct ifnet_stats_per_flow { */ struct if_cellular_status_v1 { u_int32_t valid_bitmask; /* indicates which fields are valid */ -#define IF_CELL_LINK_QUALITY_METRIC_VALID 0x1 -#define IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 -#define IF_CELL_UL_MAX_BANDWIDTH_VALID 0x4 -#define IF_CELL_UL_MIN_LATENCY_VALID 0x8 -#define IF_CELL_UL_EFFECTIVE_LATENCY_VALID 0x10 -#define IF_CELL_UL_MAX_LATENCY_VALID 0x20 -#define IF_CELL_UL_RETXT_LEVEL_VALID 0x40 -#define IF_CELL_UL_BYTES_LOST_VALID 0x80 -#define IF_CELL_UL_MIN_QUEUE_SIZE_VALID 0x100 -#define IF_CELL_UL_AVG_QUEUE_SIZE_VALID 0x200 -#define IF_CELL_UL_MAX_QUEUE_SIZE_VALID 0x400 -#define IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID 0x800 -#define IF_CELL_DL_MAX_BANDWIDTH_VALID 0x1000 -#define IF_CELL_CONFIG_INACTIVITY_TIME_VALID 0x2000 -#define IF_CELL_CONFIG_BACKOFF_TIME_VALID 0x4000 -#define IF_CELL_UL_MSS_RECOMMENDED_VALID 0x8000 +#define IF_CELL_LINK_QUALITY_METRIC_VALID 0x1 +#define IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 +#define IF_CELL_UL_MAX_BANDWIDTH_VALID 0x4 +#define IF_CELL_UL_MIN_LATENCY_VALID 0x8 +#define IF_CELL_UL_EFFECTIVE_LATENCY_VALID 0x10 +#define IF_CELL_UL_MAX_LATENCY_VALID 0x20 +#define IF_CELL_UL_RETXT_LEVEL_VALID 0x40 +#define IF_CELL_UL_BYTES_LOST_VALID 0x80 +#define IF_CELL_UL_MIN_QUEUE_SIZE_VALID 0x100 +#define IF_CELL_UL_AVG_QUEUE_SIZE_VALID 0x200 +#define IF_CELL_UL_MAX_QUEUE_SIZE_VALID 0x400 +#define IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID 0x800 +#define IF_CELL_DL_MAX_BANDWIDTH_VALID 0x1000 +#define IF_CELL_CONFIG_INACTIVITY_TIME_VALID 0x2000 +#define IF_CELL_CONFIG_BACKOFF_TIME_VALID 0x4000 +#define IF_CELL_UL_MSS_RECOMMENDED_VALID 0x8000 u_int32_t link_quality_metric; u_int32_t ul_effective_bandwidth; /* Measured uplink bandwidth based on current activity (bps) */ @@ -477,10 +477,10 @@ struct if_cellular_status_v1 { u_int32_t ul_effective_latency; /* current expected uplink latency for first hop (ms) */ u_int32_t ul_max_latency; /* max expected uplink latency first hop (ms) */ u_int32_t ul_retxt_level; /* Retransmission metric */ -#define IF_CELL_UL_RETXT_LEVEL_NONE 1 -#define IF_CELL_UL_RETXT_LEVEL_LOW 2 -#define IF_CELL_UL_RETXT_LEVEL_MEDIUM 3 -#define IF_CELL_UL_RETXT_LEVEL_HIGH 4 +#define IF_CELL_UL_RETXT_LEVEL_NONE 1 +#define IF_CELL_UL_RETXT_LEVEL_LOW 2 +#define IF_CELL_UL_RETXT_LEVEL_MEDIUM 3 +#define IF_CELL_UL_RETXT_LEVEL_HIGH 4 u_int32_t ul_bytes_lost; /* % of total bytes lost on uplink in Q10 format */ u_int32_t ul_min_queue_size; /* minimum bytes in queue */ u_int32_t ul_avg_queue_size; /* average bytes in queue */ @@ -489,9 +489,9 @@ struct if_cellular_status_v1 { u_int32_t dl_max_bandwidth; /* Maximum supported downlink bandwidth (bps) */ u_int32_t config_inactivity_time; /* ms */ u_int32_t config_backoff_time; /* new connections backoff time in ms */ -#define IF_CELL_UL_MSS_RECOMMENDED_NONE 0x0 /* Use default */ -#define IF_CELL_UL_MSS_RECOMMENDED_MEDIUM 0x1 /* 1200 byte MSS */ -#define IF_CELL_UL_MSS_RECOMMENDED_LOW 0x2 /* 512 byte MSS */ +#define IF_CELL_UL_MSS_RECOMMENDED_NONE 0x0 /* Use default */ +#define IF_CELL_UL_MSS_RECOMMENDED_MEDIUM 0x1 /* 1200 byte MSS */ +#define IF_CELL_UL_MSS_RECOMMENDED_LOW 0x2 /* 512 byte MSS */ u_int16_t mss_recommended; u_int16_t reserved_1; u_int32_t reserved_2; @@ -516,25 +516,25 @@ struct if_cellular_status { struct if_wifi_status_v1 { u_int32_t valid_bitmask; -#define IF_WIFI_LINK_QUALITY_METRIC_VALID 0x1 -#define IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 -#define IF_WIFI_UL_MAX_BANDWIDTH_VALID 0x4 -#define IF_WIFI_UL_MIN_LATENCY_VALID 0x8 -#define IF_WIFI_UL_EFFECTIVE_LATENCY_VALID 0x10 -#define IF_WIFI_UL_MAX_LATENCY_VALID 0x20 -#define IF_WIFI_UL_RETXT_LEVEL_VALID 0x40 -#define IF_WIFI_UL_ERROR_RATE_VALID 0x80 -#define IF_WIFI_UL_BYTES_LOST_VALID 0x100 -#define IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID 0x200 -#define IF_WIFI_DL_MAX_BANDWIDTH_VALID 0x400 -#define IF_WIFI_DL_MIN_LATENCY_VALID 0x800 -#define IF_WIFI_DL_EFFECTIVE_LATENCY_VALID 0x1000 -#define IF_WIFI_DL_MAX_LATENCY_VALID 0x2000 -#define IF_WIFI_DL_ERROR_RATE_VALID 0x4000 -#define IF_WIFI_CONFIG_FREQUENCY_VALID 0x8000 -#define IF_WIFI_CONFIG_MULTICAST_RATE_VALID 0x10000 -#define IF_WIFI_CONFIG_SCAN_COUNT_VALID 0x20000 -#define IF_WIFI_CONFIG_SCAN_DURATION_VALID 0x40000 +#define IF_WIFI_LINK_QUALITY_METRIC_VALID 0x1 +#define IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 +#define IF_WIFI_UL_MAX_BANDWIDTH_VALID 0x4 +#define IF_WIFI_UL_MIN_LATENCY_VALID 0x8 +#define IF_WIFI_UL_EFFECTIVE_LATENCY_VALID 0x10 +#define IF_WIFI_UL_MAX_LATENCY_VALID 0x20 +#define IF_WIFI_UL_RETXT_LEVEL_VALID 0x40 +#define IF_WIFI_UL_ERROR_RATE_VALID 0x80 +#define IF_WIFI_UL_BYTES_LOST_VALID 0x100 +#define IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID 0x200 +#define IF_WIFI_DL_MAX_BANDWIDTH_VALID 0x400 +#define IF_WIFI_DL_MIN_LATENCY_VALID 0x800 +#define IF_WIFI_DL_EFFECTIVE_LATENCY_VALID 0x1000 +#define IF_WIFI_DL_MAX_LATENCY_VALID 0x2000 +#define IF_WIFI_DL_ERROR_RATE_VALID 0x4000 +#define IF_WIFI_CONFIG_FREQUENCY_VALID 0x8000 +#define IF_WIFI_CONFIG_MULTICAST_RATE_VALID 0x10000 +#define IF_WIFI_CONFIG_SCAN_COUNT_VALID 0x20000 +#define IF_WIFI_CONFIG_SCAN_DURATION_VALID 0x40000 u_int32_t link_quality_metric; /* link quality metric */ u_int32_t ul_effective_bandwidth; /* Measured uplink bandwidth based on current activity (bps) */ u_int32_t ul_max_bandwidth; /* Maximum supported uplink bandwidth (bps) */ @@ -542,10 +542,10 @@ struct if_wifi_status_v1 { u_int32_t ul_effective_latency; /* current expected uplink latency for first hop (ms) */ u_int32_t ul_max_latency; /* max expected uplink latency for first hop (ms) */ u_int32_t ul_retxt_level; /* Retransmission metric */ -#define IF_WIFI_UL_RETXT_LEVEL_NONE 1 -#define IF_WIFI_UL_RETXT_LEVEL_LOW 2 -#define IF_WIFI_UL_RETXT_LEVEL_MEDIUM 3 -#define IF_WIFI_UL_RETXT_LEVEL_HIGH 4 +#define IF_WIFI_UL_RETXT_LEVEL_NONE 1 +#define IF_WIFI_UL_RETXT_LEVEL_LOW 2 +#define IF_WIFI_UL_RETXT_LEVEL_MEDIUM 3 +#define IF_WIFI_UL_RETXT_LEVEL_HIGH 4 u_int32_t ul_bytes_lost; /* % of total bytes lost on uplink in Q10 format */ u_int32_t ul_error_rate; /* % of bytes dropped on uplink after many retransmissions in Q10 format */ u_int32_t dl_effective_bandwidth; /* Measured downlink bandwidth based on current activity (bps) */ @@ -561,8 +561,8 @@ struct if_wifi_status_v1 { u_int32_t dl_max_latency; /* max expected latency for first hop in ms */ u_int32_t dl_error_rate; /* % of CRC or other errors in Q10 format */ u_int32_t config_frequency; /* 2.4 or 5 GHz */ -#define IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ 1 -#define IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ 2 +#define IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ 1 +#define IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ 2 u_int32_t config_multicast_rate; /* bps */ u_int32_t scan_count; /* scan count during the previous period */ u_int32_t scan_duration; /* scan duration in ms */ @@ -579,8 +579,8 @@ struct if_wifi_status { }; struct if_link_status { - u_int32_t ifsr_version; /* version of this report */ - u_int32_t ifsr_len; /* length of the following struct */ + u_int32_t ifsr_version; /* version of this report */ + u_int32_t ifsr_len; /* length of the following struct */ union { struct if_cellular_status ifsr_cell; struct if_wifi_status ifsr_wifi; @@ -596,16 +596,16 @@ struct if_interface_state { * - When getting, it tells which fields are set. */ u_int8_t valid_bitmask; -#define IF_INTERFACE_STATE_RRC_STATE_VALID 0x1 -#define IF_INTERFACE_STATE_LQM_STATE_VALID 0x2 -#define IF_INTERFACE_STATE_INTERFACE_AVAILABILITY_VALID 0x4 +#define IF_INTERFACE_STATE_RRC_STATE_VALID 0x1 +#define IF_INTERFACE_STATE_LQM_STATE_VALID 0x2 +#define IF_INTERFACE_STATE_INTERFACE_AVAILABILITY_VALID 0x4 /* * Valid only for cellular interface */ u_int8_t rrc_state; -#define IF_INTERFACE_STATE_RRC_STATE_IDLE 0x0 -#define IF_INTERFACE_STATE_RRC_STATE_CONNECTED 0x1 +#define IF_INTERFACE_STATE_RRC_STATE_IDLE 0x0 +#define IF_INTERFACE_STATE_RRC_STATE_CONNECTED 0x1 /* * Values normalized to the edge of the following values @@ -621,17 +621,17 @@ struct if_interface_state { * available */ u_int8_t interface_availability; -#define IF_INTERFACE_STATE_INTERFACE_AVAILABLE 0x0 -#define IF_INTERFACE_STATE_INTERFACE_UNAVAILABLE 0x1 +#define IF_INTERFACE_STATE_INTERFACE_AVAILABLE 0x0 +#define IF_INTERFACE_STATE_INTERFACE_UNAVAILABLE 0x1 }; struct chain_len_stats { - uint64_t cls_one; - uint64_t cls_two; - uint64_t cls_three; - uint64_t cls_four; - uint64_t cls_five_or_more; -} __attribute__((__aligned__(sizeof (uint64_t)))); + uint64_t cls_one; + uint64_t cls_two; + uint64_t cls_three; + uint64_t cls_four; + uint64_t cls_five_or_more; +} __attribute__((__aligned__(sizeof(uint64_t)))); #endif /* PRIVATE */ @@ -640,16 +640,16 @@ struct chain_len_stats { /* * Structure defining a queue for a network interface. */ -struct ifqueue { - void *ifq_head; - void *ifq_tail; - int ifq_len; - int ifq_maxlen; - int ifq_drops; +struct ifqueue { + void *ifq_head; + void *ifq_tail; + int ifq_len; + int ifq_maxlen; + int ifq_drops; }; #ifdef BSD_KERNEL_PRIVATE -#define IFNETS_MAX 64 +#define IFNETS_MAX 64 /* * Internal storage of if_data. This is bound to change. Various places in the @@ -660,76 +660,76 @@ struct ifqueue { */ struct if_data_internal { /* generic interface information */ - u_char ifi_type; /* ethernet, tokenring, etc */ - u_char ifi_typelen; /* Length of frame type id */ - u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ - u_char ifi_addrlen; /* media address length */ - u_char ifi_hdrlen; /* media header length */ - u_char ifi_recvquota; /* polling quota for receive intrs */ - u_char ifi_xmitquota; /* polling quota for xmit intrs */ - u_char ifi_unused1; /* for future use */ - u_int32_t ifi_mtu; /* maximum transmission unit */ - u_int32_t ifi_metric; /* routing metric (external only) */ - u_int32_t ifi_baudrate; /* linespeed */ + u_char ifi_type; /* ethernet, tokenring, etc */ + u_char ifi_typelen; /* Length of frame type id */ + u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ + u_char ifi_addrlen; /* media address length */ + u_char ifi_hdrlen; /* media header length */ + u_char ifi_recvquota; /* polling quota for receive intrs */ + u_char ifi_xmitquota; /* polling quota for xmit intrs */ + u_char ifi_unused1; /* for future use */ + u_int32_t ifi_mtu; /* maximum transmission unit */ + u_int32_t ifi_metric; /* routing metric (external only) */ + u_int32_t ifi_baudrate; /* linespeed */ /* volatile statistics */ - u_int64_t ifi_ipackets; /* packets received on interface */ - u_int64_t ifi_ierrors; /* input errors on interface */ - u_int64_t ifi_opackets; /* packets sent on interface */ - u_int64_t ifi_oerrors; /* output errors on interface */ - u_int64_t ifi_collisions; /* collisions on csma interfaces */ - u_int64_t ifi_ibytes; /* total number of octets received */ - u_int64_t ifi_obytes; /* total number of octets sent */ - u_int64_t ifi_imcasts; /* packets received via multicast */ - u_int64_t ifi_omcasts; /* packets sent via multicast */ - u_int64_t ifi_iqdrops; /* dropped on input, this interface */ - u_int64_t ifi_noproto; /* destined for unsupported protocol */ - u_int32_t ifi_recvtiming; /* usec spent receiving when timing */ - u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */ - u_int64_t ifi_alignerrs; /* unaligned (32-bit) input pkts */ - u_int64_t ifi_dt_bytes; /* Data threshold counter */ - u_int64_t ifi_fpackets; /* forwarded packets on interface */ - u_int64_t ifi_fbytes; /* forwarded bytes on interface */ - struct timeval ifi_lastchange; /* time of last administrative change */ - struct timeval ifi_lastupdown; /* time of last up/down event */ - u_int32_t ifi_hwassist; /* HW offload capabilities */ - u_int32_t ifi_tso_v4_mtu; /* TCP Segment Offload IPv4 maximum segment size */ - u_int32_t ifi_tso_v6_mtu; /* TCP Segment Offload IPv6 maximum segment size */ + u_int64_t ifi_ipackets; /* packets received on interface */ + u_int64_t ifi_ierrors; /* input errors on interface */ + u_int64_t ifi_opackets; /* packets sent on interface */ + u_int64_t ifi_oerrors; /* output errors on interface */ + u_int64_t ifi_collisions; /* collisions on csma interfaces */ + u_int64_t ifi_ibytes; /* total number of octets received */ + u_int64_t ifi_obytes; /* total number of octets sent */ + u_int64_t ifi_imcasts; /* packets received via multicast */ + u_int64_t ifi_omcasts; /* packets sent via multicast */ + u_int64_t ifi_iqdrops; /* dropped on input, this interface */ + u_int64_t ifi_noproto; /* destined for unsupported protocol */ + u_int32_t ifi_recvtiming; /* usec spent receiving when timing */ + u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */ + u_int64_t ifi_alignerrs; /* unaligned (32-bit) input pkts */ + u_int64_t ifi_dt_bytes; /* Data threshold counter */ + u_int64_t ifi_fpackets; /* forwarded packets on interface */ + u_int64_t ifi_fbytes; /* forwarded bytes on interface */ + struct timeval ifi_lastchange; /* time of last administrative change */ + struct timeval ifi_lastupdown; /* time of last up/down event */ + u_int32_t ifi_hwassist; /* HW offload capabilities */ + u_int32_t ifi_tso_v4_mtu; /* TCP Segment Offload IPv4 maximum segment size */ + u_int32_t ifi_tso_v6_mtu; /* TCP Segment Offload IPv6 maximum segment size */ }; #endif /* BSD_KERNEL_PRIVATE */ #ifdef PRIVATE -#define if_mtu if_data.ifi_mtu -#define if_type if_data.ifi_type -#define if_typelen if_data.ifi_typelen -#define if_physical if_data.ifi_physical -#define if_addrlen if_data.ifi_addrlen -#define if_hdrlen if_data.ifi_hdrlen -#define if_metric if_data.ifi_metric -#define if_baudrate if_data.ifi_baudrate -#define if_hwassist if_data.ifi_hwassist -#define if_ipackets if_data.ifi_ipackets -#define if_ierrors if_data.ifi_ierrors -#define if_opackets if_data.ifi_opackets -#define if_oerrors if_data.ifi_oerrors -#define if_collisions if_data.ifi_collisions -#define if_ibytes if_data.ifi_ibytes -#define if_obytes if_data.ifi_obytes -#define if_imcasts if_data.ifi_imcasts -#define if_omcasts if_data.ifi_omcasts -#define if_iqdrops if_data.ifi_iqdrops -#define if_noproto if_data.ifi_noproto -#define if_lastchange if_data.ifi_lastchange -#define if_recvquota if_data.ifi_recvquota -#define if_xmitquota if_data.ifi_xmitquota +#define if_mtu if_data.ifi_mtu +#define if_type if_data.ifi_type +#define if_typelen if_data.ifi_typelen +#define if_physical if_data.ifi_physical +#define if_addrlen if_data.ifi_addrlen +#define if_hdrlen if_data.ifi_hdrlen +#define if_metric if_data.ifi_metric +#define if_baudrate if_data.ifi_baudrate +#define if_hwassist if_data.ifi_hwassist +#define if_ipackets if_data.ifi_ipackets +#define if_ierrors if_data.ifi_ierrors +#define if_opackets if_data.ifi_opackets +#define if_oerrors if_data.ifi_oerrors +#define if_collisions if_data.ifi_collisions +#define if_ibytes if_data.ifi_ibytes +#define if_obytes if_data.ifi_obytes +#define if_imcasts if_data.ifi_imcasts +#define if_omcasts if_data.ifi_omcasts +#define if_iqdrops if_data.ifi_iqdrops +#define if_noproto if_data.ifi_noproto +#define if_lastchange if_data.ifi_lastchange +#define if_recvquota if_data.ifi_recvquota +#define if_xmitquota if_data.ifi_xmitquota #endif /* PRIVATE */ #ifdef BSD_KERNEL_PRIVATE -#define if_tso_v4_mtu if_data.ifi_tso_v4_mtu -#define if_tso_v6_mtu if_data.ifi_tso_v6_mtu -#define if_alignerrs if_data.ifi_alignerrs -#define if_dt_bytes if_data.ifi_dt_bytes -#define if_fpackets if_data.ifi_fpackets -#define if_fbytes if_data.ifi_fbytes -#define if_lastupdown if_data.ifi_lastupdown +#define if_tso_v4_mtu if_data.ifi_tso_v4_mtu +#define if_tso_v6_mtu if_data.ifi_tso_v6_mtu +#define if_alignerrs if_data.ifi_alignerrs +#define if_dt_bytes if_data.ifi_dt_bytes +#define if_fpackets if_data.ifi_fpackets +#define if_fbytes if_data.ifi_fbytes +#define if_lastupdown if_data.ifi_lastupdown #endif /* BSD_KERNEL_PRIVATE */ #ifdef BSD_KERNEL_PRIVATE @@ -772,31 +772,31 @@ extern boolean_t intcoproc_unrestricted; * * Bottom 16 bits reserved for hardware checksum */ -#define IF_HWASSIST_CSUM_IP 0x0001 /* will csum IP, IFNET_CSUM_IP */ -#define IF_HWASSIST_CSUM_TCP 0x0002 /* will csum TCP, IFNET_CSUM_TCP */ -#define IF_HWASSIST_CSUM_UDP 0x0004 /* will csum UDP, IFNET_CSUM_UDP */ -#define IF_HWASSIST_CSUM_IP_FRAGS 0x0008 /* will csum IP fragments, IFNET_CSUM_FRAGMENT */ -#define IF_HWASSIST_CSUM_FRAGMENT 0x0010 /* will do IP fragmentation, IFNET_IP_FRAGMENT */ -#define IF_HWASSIST_CSUM_TCPIPV6 0x0020 /* will csum TCPv6, IFNET_CSUM_TCPIPV6 */ -#define IF_HWASSIST_CSUM_UDPIPV6 0x0040 /* will csum UDPv6, IFNET_CSUM_UDP */ -#define IF_HWASSIST_CSUM_FRAGMENT_IPV6 0x0080 /* will do IPv6 fragmentation, IFNET_IPV6_FRAGMENT */ -#define IF_HWASSIST_CSUM_PARTIAL 0x1000 /* simple Sum16 computation, IFNET_CSUM_PARTIAL */ -#define IF_HWASSIST_CSUM_ZERO_INVERT 0x2000 /* capable of inverting csum of 0 to -0 (0xffff) */ -#define IF_HWASSIST_CSUM_MASK 0xffff -#define IF_HWASSIST_CSUM_FLAGS(hwassist) ((hwassist) & IF_HWASSIST_CSUM_MASK) +#define IF_HWASSIST_CSUM_IP 0x0001 /* will csum IP, IFNET_CSUM_IP */ +#define IF_HWASSIST_CSUM_TCP 0x0002 /* will csum TCP, IFNET_CSUM_TCP */ +#define IF_HWASSIST_CSUM_UDP 0x0004 /* will csum UDP, IFNET_CSUM_UDP */ +#define IF_HWASSIST_CSUM_IP_FRAGS 0x0008 /* will csum IP fragments, IFNET_CSUM_FRAGMENT */ +#define IF_HWASSIST_CSUM_FRAGMENT 0x0010 /* will do IP fragmentation, IFNET_IP_FRAGMENT */ +#define IF_HWASSIST_CSUM_TCPIPV6 0x0020 /* will csum TCPv6, IFNET_CSUM_TCPIPV6 */ +#define IF_HWASSIST_CSUM_UDPIPV6 0x0040 /* will csum UDPv6, IFNET_CSUM_UDP */ +#define IF_HWASSIST_CSUM_FRAGMENT_IPV6 0x0080 /* will do IPv6 fragmentation, IFNET_IPV6_FRAGMENT */ +#define IF_HWASSIST_CSUM_PARTIAL 0x1000 /* simple Sum16 computation, IFNET_CSUM_PARTIAL */ +#define IF_HWASSIST_CSUM_ZERO_INVERT 0x2000 /* capable of inverting csum of 0 to -0 (0xffff) */ +#define IF_HWASSIST_CSUM_MASK 0xffff +#define IF_HWASSIST_CSUM_FLAGS(hwassist) ((hwassist) & IF_HWASSIST_CSUM_MASK) /* VLAN support */ -#define IF_HWASSIST_VLAN_TAGGING 0x00010000 /* supports VLAN tagging, IFNET_VLAN_TAGGING */ -#define IF_HWASSIST_VLAN_MTU 0x00020000 /* supports VLAN MTU-sized packet (for software VLAN), IFNET_VLAN_MTU */ +#define IF_HWASSIST_VLAN_TAGGING 0x00010000 /* supports VLAN tagging, IFNET_VLAN_TAGGING */ +#define IF_HWASSIST_VLAN_MTU 0x00020000 /* supports VLAN MTU-sized packet (for software VLAN), IFNET_VLAN_MTU */ /* TCP Segment Offloading support */ -#define IF_HWASSIST_TSO_V4 0x00200000 /* will do TCP Segment offload for IPv4, IFNET_TSO_IPV4 */ -#define IF_HWASSIST_TSO_V6 0x00400000 /* will do TCP Segment offload for IPv6, IFNET_TSO_IPV6 */ +#define IF_HWASSIST_TSO_V4 0x00200000 /* will do TCP Segment offload for IPv4, IFNET_TSO_IPV4 */ +#define IF_HWASSIST_TSO_V6 0x00400000 /* will do TCP Segment offload for IPv6, IFNET_TSO_IPV6 */ #endif /* PRIVATE */ #ifdef PRIVATE -#define IFXNAMSIZ (IFNAMSIZ + 8) /* external name (name + unit) */ +#define IFXNAMSIZ (IFNAMSIZ + 8) /* external name (name + unit) */ #endif #ifdef BSD_KERNEL_PRIVATE @@ -810,7 +810,7 @@ extern boolean_t intcoproc_unrestricted; #include #include -RB_HEAD(ll_reach_tree, if_llreach); /* define struct ll_reach_tree */ +RB_HEAD(ll_reach_tree, if_llreach); /* define struct ll_reach_tree */ typedef errno_t (*dlil_input_func)(ifnet_t ifp, mbuf_t m_head, @@ -818,7 +818,7 @@ typedef errno_t (*dlil_input_func)(ifnet_t ifp, mbuf_t m_head, boolean_t poll, struct thread *tp); typedef errno_t (*dlil_output_func)(ifnet_t interface, mbuf_t data); -#define if_name(ifp) ifp->if_xname +#define if_name(ifp) ifp->if_xname /* * Structure defining a network interface. * @@ -829,200 +829,200 @@ struct ifnet { * Lock (RW or mutex) to protect this data structure (static storage.) */ decl_lck_rw_data(, if_lock); - void *if_softc; /* pointer to driver state */ - const char *if_name; /* name, e.g. ``en'' or ``lo'' */ - const char *if_xname; /* external name (name + unit) */ - struct if_description if_desc; /* extended description */ - TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */ + void *if_softc; /* pointer to driver state */ + const char *if_name; /* name, e.g. ``en'' or ``lo'' */ + const char *if_xname; /* external name (name + unit) */ + struct if_description if_desc; /* extended description */ + TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */ TAILQ_ENTRY(ifnet) if_detaching_link; /* list of detaching ifnets */ - TAILQ_ENTRY(ifnet) if_ordered_link; /* list of ordered ifnets */ + TAILQ_ENTRY(ifnet) if_ordered_link; /* list of ordered ifnets */ decl_lck_mtx_data(, if_ref_lock) - u_int32_t if_refflags; /* see IFRF flags below */ - u_int32_t if_refio; /* number of io ops to the underlying driver */ + u_int32_t if_refflags; /* see IFRF flags below */ + u_int32_t if_refio; /* number of io ops to the underlying driver */ -#define if_list if_link - struct ifaddrhead if_addrhead; /* linked list of addresses per if */ -#define if_addrlist if_addrhead - struct ifaddr *if_lladdr; /* link address (first/permanent) */ +#define if_list if_link + struct ifaddrhead if_addrhead; /* linked list of addresses per if */ +#define if_addrlist if_addrhead + struct ifaddr *if_lladdr; /* link address (first/permanent) */ - u_int32_t if_qosmarking_mode; /* generation to use with NECP clients */ + u_int32_t if_qosmarking_mode; /* generation to use with NECP clients */ - int if_pcount; /* number of promiscuous listeners */ - struct bpf_if *if_bpf; /* packet filter structure */ - u_short if_index; /* numeric abbreviation for this if */ - short if_unit; /* sub-unit for lower level driver */ - short if_timer; /* time 'til if_watchdog called */ - short if_flags; /* up/down, broadcast, etc. */ - u_int32_t if_eflags; /* see */ - u_int32_t if_xflags; /* see */ + int if_pcount; /* number of promiscuous listeners */ + struct bpf_if *if_bpf; /* packet filter structure */ + u_short if_index; /* numeric abbreviation for this if */ + short if_unit; /* sub-unit for lower level driver */ + short if_timer; /* time 'til if_watchdog called */ + short if_flags; /* up/down, broadcast, etc. */ + u_int32_t if_eflags; /* see */ + u_int32_t if_xflags; /* see */ - int if_capabilities; /* interface features & capabilities */ - int if_capenable; /* enabled features & capabilities */ + int if_capabilities; /* interface features & capabilities */ + int if_capenable; /* enabled features & capabilities */ - void *if_linkmib; /* link-type-specific MIB data */ - size_t if_linkmiblen; /* length of above data */ + void *if_linkmib; /* link-type-specific MIB data */ + size_t if_linkmiblen; /* length of above data */ struct if_data_internal if_data __attribute__((aligned(8))); - ifnet_family_t if_family; /* value assigned by Apple */ - ifnet_subfamily_t if_subfamily; /* value assigned by Apple */ - uintptr_t if_family_cookie; + ifnet_family_t if_family; /* value assigned by Apple */ + ifnet_subfamily_t if_subfamily; /* value assigned by Apple */ + uintptr_t if_family_cookie; volatile dlil_input_func if_input_dlil; volatile dlil_output_func if_output_dlil; volatile ifnet_start_func if_start; - ifnet_output_func if_output; - ifnet_pre_enqueue_func if_pre_enqueue; - ifnet_ctl_func if_output_ctl; - ifnet_input_poll_func if_input_poll; - ifnet_ctl_func if_input_ctl; - ifnet_ioctl_func if_ioctl; - ifnet_set_bpf_tap if_set_bpf_tap; - ifnet_detached_func if_free; - ifnet_demux_func if_demux; - ifnet_event_func if_event; - ifnet_framer_func if_framer_legacy; + ifnet_output_func if_output; + ifnet_pre_enqueue_func if_pre_enqueue; + ifnet_ctl_func if_output_ctl; + ifnet_input_poll_func if_input_poll; + ifnet_ctl_func if_input_ctl; + ifnet_ioctl_func if_ioctl; + ifnet_set_bpf_tap if_set_bpf_tap; + ifnet_detached_func if_free; + ifnet_demux_func if_demux; + ifnet_event_func if_event; + ifnet_framer_func if_framer_legacy; ifnet_framer_extended_func if_framer; - ifnet_add_proto_func if_add_proto; - ifnet_del_proto_func if_del_proto; - ifnet_check_multi if_check_multi; - struct proto_hash_entry *if_proto_hash; - void *if_kpi_storage; + ifnet_add_proto_func if_add_proto; + ifnet_del_proto_func if_del_proto; + ifnet_check_multi if_check_multi; + struct proto_hash_entry *if_proto_hash; + void *if_kpi_storage; - u_int32_t if_flowhash; /* interface flow control ID */ + u_int32_t if_flowhash; /* interface flow control ID */ decl_lck_mtx_data(, if_start_lock); - u_int32_t if_start_flags; /* see IFSF flags below */ - u_int32_t if_start_req; - u_int16_t if_start_active; /* output is active */ - u_int16_t if_start_delayed; - u_int16_t if_start_delay_qlen; - u_int16_t if_start_delay_idle; - u_int64_t if_start_delay_swin; - u_int32_t if_start_delay_cnt; - u_int32_t if_start_delay_timeout; /* nanoseconds */ - struct timespec if_start_cycle; /* restart interval */ - struct thread *if_start_thread; - - struct ifclassq if_snd; /* transmit queue */ - u_int32_t if_output_sched_model; /* tx sched model */ - - struct if_bandwidths if_output_bw; - struct if_bandwidths if_input_bw; - - struct if_latencies if_output_lt; - struct if_latencies if_input_lt; + u_int32_t if_start_flags; /* see IFSF flags below */ + u_int32_t if_start_req; + u_int16_t if_start_active; /* output is active */ + u_int16_t if_start_delayed; + u_int16_t if_start_delay_qlen; + u_int16_t if_start_delay_idle; + u_int64_t if_start_delay_swin; + u_int32_t if_start_delay_cnt; + u_int32_t if_start_delay_timeout; /* nanoseconds */ + struct timespec if_start_cycle; /* restart interval */ + struct thread *if_start_thread; + + struct ifclassq if_snd; /* transmit queue */ + u_int32_t if_output_sched_model; /* tx sched model */ + + struct if_bandwidths if_output_bw; + struct if_bandwidths if_input_bw; + + struct if_latencies if_output_lt; + struct if_latencies if_input_lt; decl_lck_mtx_data(, if_flt_lock) - u_int32_t if_flt_busy; - u_int32_t if_flt_waiters; + u_int32_t if_flt_busy; + u_int32_t if_flt_waiters; struct ifnet_filter_head if_flt_head; - struct ifmultihead if_multiaddrs; /* multicast addresses */ - u_int32_t if_updatemcasts; /* mcast addrs need updating */ - int if_amcount; /* # of all-multicast reqs */ + struct ifmultihead if_multiaddrs; /* multicast addresses */ + u_int32_t if_updatemcasts; /* mcast addrs need updating */ + int if_amcount; /* # of all-multicast reqs */ decl_lck_mtx_data(, if_addrconfig_lock); /* for serializing addr config */ - struct in_multi *if_allhostsinm; /* store all-hosts inm for this ifp */ + struct in_multi *if_allhostsinm; /* store all-hosts inm for this ifp */ decl_lck_mtx_data(, if_poll_lock); - u_int16_t if_poll_req; - u_int16_t if_poll_update; /* link update */ - u_int32_t if_poll_active; /* polling is active */ - struct timespec if_poll_cycle; /* poll interval */ - struct thread *if_poll_thread; + u_int16_t if_poll_req; + u_int16_t if_poll_update; /* link update */ + u_int32_t if_poll_active; /* polling is active */ + struct timespec if_poll_cycle; /* poll interval */ + struct thread *if_poll_thread; struct dlil_threading_info *if_inp; /* allocated once along with dlil_ifnet and is never freed */ - thread_call_t if_dt_tcall; + thread_call_t if_dt_tcall; struct { - u_int32_t length; + u_int32_t length; union { - u_char buffer[8]; - u_char *ptr; + u_char buffer[8]; + u_char *ptr; } u; } if_broadcast; #if CONFIG_MACF_NET - struct label *if_label; /* interface MAC label */ + struct label *if_label; /* interface MAC label */ #endif #if PF - struct pfi_kif *if_pf_kif; + struct pfi_kif *if_pf_kif; #endif /* PF */ decl_lck_mtx_data(, if_cached_route_lock); - u_int32_t if_fwd_cacheok; - struct route if_fwd_route; /* cached forwarding route */ - struct route if_src_route; /* cached ipv4 source route */ - struct route_in6 if_src_route6; /* cached ipv6 source route */ + u_int32_t if_fwd_cacheok; + struct route if_fwd_route; /* cached forwarding route */ + struct route if_src_route; /* cached ipv4 source route */ + struct route_in6 if_src_route6; /* cached ipv6 source route */ decl_lck_rw_data(, if_llreach_lock); - struct ll_reach_tree if_ll_srcs; /* source link-layer tree */ + struct ll_reach_tree if_ll_srcs; /* source link-layer tree */ - void *if_bridge; /* bridge glue */ + void *if_bridge; /* bridge glue */ - u_int32_t if_want_aggressive_drain; - u_int32_t if_idle_flags; /* idle flags */ - u_int32_t if_idle_new_flags; /* temporary idle flags */ - u_int32_t if_idle_new_flags_mask; /* temporary mask */ - u_int32_t if_route_refcnt; /* idle: route ref count */ - u_int32_t if_rt_sendts; /* last of a real time packet */ + u_int32_t if_want_aggressive_drain; + u_int32_t if_idle_flags; /* idle flags */ + u_int32_t if_idle_new_flags; /* temporary idle flags */ + u_int32_t if_idle_new_flags_mask; /* temporary mask */ + u_int32_t if_route_refcnt; /* idle: route ref count */ + u_int32_t if_rt_sendts; /* last of a real time packet */ struct if_traffic_class if_tc __attribute__((aligned(8))); #if INET - struct igmp_ifinfo *if_igi; /* for IGMPv3 */ + struct igmp_ifinfo *if_igi; /* for IGMPv3 */ #endif /* INET */ #if INET6 - struct mld_ifinfo *if_mli; /* for MLDv2 */ + struct mld_ifinfo *if_mli; /* for MLDv2 */ #endif /* INET6 */ - struct tcpstat_local *if_tcp_stat; /* TCP specific stats */ - struct udpstat_local *if_udp_stat; /* UDP specific stats */ + struct tcpstat_local *if_tcp_stat; /* TCP specific stats */ + struct udpstat_local *if_udp_stat; /* UDP specific stats */ struct { - int32_t level; /* cached logging level */ - u_int32_t flags; /* cached logging flags */ - int32_t category; /* cached category */ - int32_t subcategory; /* cached subcategory */ + int32_t level; /* cached logging level */ + u_int32_t flags; /* cached logging flags */ + int32_t category; /* cached category */ + int32_t subcategory; /* cached subcategory */ } if_log; struct { - struct ifnet *ifp; /* delegated ifp */ - u_int32_t type; /* delegated i/f type */ - u_int32_t family; /* delegated i/f family */ - u_int32_t subfamily; /* delegated i/f sub-family */ - uint32_t expensive:1; /* delegated i/f expensive? */ + struct ifnet *ifp; /* delegated ifp */ + u_int32_t type; /* delegated i/f type */ + u_int32_t family; /* delegated i/f family */ + u_int32_t subfamily; /* delegated i/f sub-family */ + uint32_t expensive:1; /* delegated i/f expensive? */ } if_delegated; - uuid_t *if_agentids; /* network agents attached to interface */ - u_int32_t if_agentcount; + uuid_t *if_agentids; /* network agents attached to interface */ + u_int32_t if_agentcount; - volatile uint32_t if_low_power_gencnt; + volatile uint32_t if_low_power_gencnt; - u_int32_t if_generation; /* generation to use with NECP clients */ - u_int32_t if_fg_sendts; /* last send on a fg socket in seconds */ + u_int32_t if_generation; /* generation to use with NECP clients */ + u_int32_t if_fg_sendts; /* last send on a fg socket in seconds */ - u_int64_t if_data_threshold; + u_int64_t if_data_threshold; /* Total bytes in send socket buffer */ - int64_t if_sndbyte_total __attribute__ ((aligned(8))); + int64_t if_sndbyte_total __attribute__ ((aligned(8))); /* Total unsent bytes in send socket buffer */ - int64_t if_sndbyte_unsent __attribute__ ((aligned(8))); + int64_t if_sndbyte_unsent __attribute__ ((aligned(8))); /* count of times, when there was data to send when sleep is impending */ - uint32_t if_unsent_data_cnt; + uint32_t if_unsent_data_cnt; #if INET decl_lck_rw_data(, if_inetdata_lock); - void *if_inetdata; + void *if_inetdata; #endif /* INET */ #if INET6 decl_lck_rw_data(, if_inet6data_lock); - void *if_inet6data; + void *if_inet6data; #endif decl_lck_rw_data(, if_link_status_lock); - struct if_link_status *if_link_status; - struct if_interface_state if_interface_state; + struct if_link_status *if_link_status; + struct if_interface_state if_interface_state; struct if_tcp_ecn_stat *if_ipv4_stat; struct if_tcp_ecn_stat *if_ipv6_stat; @@ -1046,65 +1046,65 @@ typedef enum { typedef void (*ifnet_event_fn)(struct eventhandler_entry_arg, struct ifnet *, struct sockaddr *, intf_event_code_t); EVENTHANDLER_DECLARE(ifnet_event, ifnet_event_fn); -#define IF_TCP_STATINC(_ifp, _s) do { \ - if ((_ifp)->if_tcp_stat != NULL) \ - atomic_add_64(&(_ifp)->if_tcp_stat->_s, 1); \ +#define IF_TCP_STATINC(_ifp, _s) do { \ + if ((_ifp)->if_tcp_stat != NULL) \ + atomic_add_64(&(_ifp)->if_tcp_stat->_s, 1); \ } while (0); -#define IF_UDP_STATINC(_ifp, _s) do { \ - if ((_ifp)->if_udp_stat != NULL) \ - atomic_add_64(&(_ifp)->if_udp_stat->_s, 1); \ +#define IF_UDP_STATINC(_ifp, _s) do { \ + if ((_ifp)->if_udp_stat != NULL) \ + atomic_add_64(&(_ifp)->if_udp_stat->_s, 1); \ } while (0); /* * Valid values for if_refflags */ -#define IFRF_EMBRYONIC 0x1 /* ifnet is allocated; awaiting attach */ -#define IFRF_ATTACHED 0x2 /* ifnet attach is completely done */ -#define IFRF_DETACHING 0x4 /* detach has been requested */ -#define IFRF_ATTACH_MASK \ +#define IFRF_EMBRYONIC 0x1 /* ifnet is allocated; awaiting attach */ +#define IFRF_ATTACHED 0x2 /* ifnet attach is completely done */ +#define IFRF_DETACHING 0x4 /* detach has been requested */ +#define IFRF_ATTACH_MASK \ (IFRF_EMBRYONIC|IFRF_ATTACHED|IFRF_DETACHING) -#define IF_FULLY_ATTACHED(_ifp) \ +#define IF_FULLY_ATTACHED(_ifp) \ (((_ifp)->if_refflags & IFRF_ATTACH_MASK) == IFRF_ATTACHED) /* * Valid values for if_start_flags */ -#define IFSF_FLOW_CONTROLLED 0x1 /* flow controlled */ +#define IFSF_FLOW_CONTROLLED 0x1 /* flow controlled */ /* * Structure describing a `cloning' interface. */ struct if_clone { - LIST_ENTRY(if_clone) ifc_list; /* on list of cloners */ - decl_lck_mtx_data(, ifc_mutex); /* To serialize clone create/delete */ - const char *ifc_name; /* name of device, e.g. `vlan' */ - size_t ifc_namelen; /* length of name */ - u_int32_t ifc_minifs; /* minimum number of interfaces */ - u_int32_t ifc_maxunit; /* maximum unit number */ - unsigned char *ifc_units; /* bitmap to handle units */ - u_int32_t ifc_bmlen; /* bitmap length */ - u_int32_t ifc_zone_max_elem; /* Max elements for this zone type */ - u_int32_t ifc_softc_size; /* size of softc for the device */ - struct zone *ifc_zone; /* if_clone allocation zone */ - int (*ifc_create)(struct if_clone *, u_int32_t, void *); - int (*ifc_destroy)(struct ifnet *); + LIST_ENTRY(if_clone) ifc_list; /* on list of cloners */ + decl_lck_mtx_data(, ifc_mutex); /* To serialize clone create/delete */ + const char *ifc_name; /* name of device, e.g. `vlan' */ + size_t ifc_namelen; /* length of name */ + u_int32_t ifc_minifs; /* minimum number of interfaces */ + u_int32_t ifc_maxunit; /* maximum unit number */ + unsigned char *ifc_units; /* bitmap to handle units */ + u_int32_t ifc_bmlen; /* bitmap length */ + u_int32_t ifc_zone_max_elem; /* Max elements for this zone type */ + u_int32_t ifc_softc_size; /* size of softc for the device */ + struct zone *ifc_zone; /* if_clone allocation zone */ + int (*ifc_create)(struct if_clone *, u_int32_t, void *); + int (*ifc_destroy)(struct ifnet *); }; -#define IF_CLONE_INITIALIZER(name, create, destroy, minifs, maxunit, zone_max_elem, softc_size) { \ - .ifc_list = { NULL, NULL }, \ - .ifc_mutex = {}, \ - .ifc_name = name, \ - .ifc_namelen = (sizeof (name) - 1), \ - .ifc_minifs = minifs, \ - .ifc_maxunit = maxunit, \ - .ifc_units = NULL, \ - .ifc_bmlen = 0, \ - .ifc_zone_max_elem = zone_max_elem, \ - .ifc_softc_size = softc_size, \ - .ifc_zone = NULL, \ - .ifc_create = create, \ - .ifc_destroy = destroy \ +#define IF_CLONE_INITIALIZER(name, create, destroy, minifs, maxunit, zone_max_elem, softc_size) { \ + .ifc_list = { NULL, NULL }, \ + .ifc_mutex = {}, \ + .ifc_name = name, \ + .ifc_namelen = (sizeof (name) - 1), \ + .ifc_minifs = minifs, \ + .ifc_maxunit = maxunit, \ + .ifc_units = NULL, \ + .ifc_bmlen = 0, \ + .ifc_zone_max_elem = zone_max_elem, \ + .ifc_softc_size = softc_size, \ + .ifc_zone = NULL, \ + .ifc_create = create, \ + .ifc_destroy = destroy \ } #define M_CLONE M_IFADDR @@ -1114,68 +1114,68 @@ struct if_clone { * for serialization, by holding whatever lock is appropriate for the * corresponding structure that is referring the ifqueue. */ -#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) -#define IF_DROP(ifq) ((ifq)->ifq_drops++) - -#define IF_ENQUEUE(ifq, m) do { \ - (m)->m_nextpkt = NULL; \ - if ((ifq)->ifq_tail == NULL) \ - (ifq)->ifq_head = m; \ - else \ - ((struct mbuf*)(ifq)->ifq_tail)->m_nextpkt = m; \ - (ifq)->ifq_tail = m; \ - (ifq)->ifq_len++; \ +#define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) +#define IF_DROP(ifq) ((ifq)->ifq_drops++) + +#define IF_ENQUEUE(ifq, m) do { \ + (m)->m_nextpkt = NULL; \ + if ((ifq)->ifq_tail == NULL) \ + (ifq)->ifq_head = m; \ + else \ + ((struct mbuf*)(ifq)->ifq_tail)->m_nextpkt = m; \ + (ifq)->ifq_tail = m; \ + (ifq)->ifq_len++; \ } while (0) -#define IF_PREPEND(ifq, m) do { \ - (m)->m_nextpkt = (ifq)->ifq_head; \ - if ((ifq)->ifq_tail == NULL) \ - (ifq)->ifq_tail = (m); \ - (ifq)->ifq_head = (m); \ - (ifq)->ifq_len++; \ +#define IF_PREPEND(ifq, m) do { \ + (m)->m_nextpkt = (ifq)->ifq_head; \ + if ((ifq)->ifq_tail == NULL) \ + (ifq)->ifq_tail = (m); \ + (ifq)->ifq_head = (m); \ + (ifq)->ifq_len++; \ } while (0) -#define IF_DEQUEUE(ifq, m) do { \ - (m) = (ifq)->ifq_head; \ - if (m != NULL) { \ - if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \ - (ifq)->ifq_tail = NULL; \ - (m)->m_nextpkt = NULL; \ - (ifq)->ifq_len--; \ - } \ +#define IF_DEQUEUE(ifq, m) do { \ + (m) = (ifq)->ifq_head; \ + if (m != NULL) { \ + if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \ + (ifq)->ifq_tail = NULL; \ + (m)->m_nextpkt = NULL; \ + (ifq)->ifq_len--; \ + } \ } while (0) -#define IF_REMQUEUE(ifq, m) do { \ - struct mbuf *_p = (ifq)->ifq_head; \ - struct mbuf *_n = (m)->m_nextpkt; \ - if ((m) == _p) \ - _p = NULL; \ - while (_p != NULL) { \ - if (_p->m_nextpkt == (m)) \ - break; \ - _p = _p->m_nextpkt; \ - } \ - VERIFY(_p != NULL || ((m) == (ifq)->ifq_head)); \ - if ((m) == (ifq)->ifq_head) \ - (ifq)->ifq_head = _n; \ - if ((m) == (ifq)->ifq_tail) \ - (ifq)->ifq_tail = _p; \ - VERIFY((ifq)->ifq_tail != NULL || (ifq)->ifq_head == NULL); \ - VERIFY((ifq)->ifq_len != 0); \ - --(ifq)->ifq_len; \ - if (_p != NULL) \ - _p->m_nextpkt = _n; \ - (m)->m_nextpkt = NULL; \ +#define IF_REMQUEUE(ifq, m) do { \ + struct mbuf *_p = (ifq)->ifq_head; \ + struct mbuf *_n = (m)->m_nextpkt; \ + if ((m) == _p) \ + _p = NULL; \ + while (_p != NULL) { \ + if (_p->m_nextpkt == (m)) \ + break; \ + _p = _p->m_nextpkt; \ + } \ + VERIFY(_p != NULL || ((m) == (ifq)->ifq_head)); \ + if ((m) == (ifq)->ifq_head) \ + (ifq)->ifq_head = _n; \ + if ((m) == (ifq)->ifq_tail) \ + (ifq)->ifq_tail = _p; \ + VERIFY((ifq)->ifq_tail != NULL || (ifq)->ifq_head == NULL); \ + VERIFY((ifq)->ifq_len != 0); \ + --(ifq)->ifq_len; \ + if (_p != NULL) \ + _p->m_nextpkt = _n; \ + (m)->m_nextpkt = NULL; \ } while (0) -#define IF_DRAIN(ifq) do { \ - struct mbuf *_m; \ - for (;;) { \ - IF_DEQUEUE(ifq, _m); \ - if (_m == NULL) \ - break; \ - m_freem(_m); \ - } \ +#define IF_DRAIN(ifq) do { \ + struct mbuf *_m; \ + for (;;) { \ + IF_DEQUEUE(ifq, _m); \ + if (_m == NULL) \ + break; \ + m_freem(_m); \ + } \ } while (0) /* @@ -1185,22 +1185,22 @@ struct if_clone { * together so all addresses for an interface can be located. */ struct ifaddr { - decl_lck_mtx_data(, ifa_lock); /* lock for ifaddr */ - uint32_t ifa_refcnt; /* ref count, use IFA_{ADD,REM}REF */ - uint32_t ifa_debug; /* debug flags */ - struct sockaddr *ifa_addr; /* address of interface */ - struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */ -#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ - struct sockaddr *ifa_netmask; /* used to determine subnet */ - struct ifnet *ifa_ifp; /* back-pointer to interface */ - TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */ - void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */ - (int, struct rtentry *, struct sockaddr *); - uint32_t ifa_flags; /* mostly rt_flags for cloning */ - int32_t ifa_metric; /* cost of going out this interface */ + decl_lck_mtx_data(, ifa_lock); /* lock for ifaddr */ + uint32_t ifa_refcnt; /* ref count, use IFA_{ADD,REM}REF */ + uint32_t ifa_debug; /* debug flags */ + struct sockaddr *ifa_addr; /* address of interface */ + struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */ +#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ + struct sockaddr *ifa_netmask; /* used to determine subnet */ + struct ifnet *ifa_ifp; /* back-pointer to interface */ + TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */ + void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */ + (int, struct rtentry *, struct sockaddr *); + uint32_t ifa_flags; /* mostly rt_flags for cloning */ + int32_t ifa_metric; /* cost of going out this interface */ void (*ifa_free)(struct ifaddr *); /* callback fn for freeing */ - void (*ifa_trace) /* callback fn for tracing refs */ - (struct ifaddr *, int); + void (*ifa_trace) /* callback fn for tracing refs */ + (struct ifaddr *, int); void (*ifa_attached)(struct ifaddr *); /* callback fn for attaching */ void (*ifa_detached)(struct ifaddr *); /* callback fn for detaching */ #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) @@ -1218,51 +1218,51 @@ struct ifaddr { /* * Valid values for ifa_flags */ -#define IFA_ROUTE RTF_UP /* route installed (0x1) */ -#define IFA_CLONING RTF_CLONING /* (0x100) */ +#define IFA_ROUTE RTF_UP /* route installed (0x1) */ +#define IFA_CLONING RTF_CLONING /* (0x100) */ /* * Valid values for ifa_debug */ -#define IFD_ATTACHED 0x1 /* attached to list */ -#define IFD_ALLOC 0x2 /* dynamically allocated */ -#define IFD_DEBUG 0x4 /* has debugging info */ -#define IFD_LINK 0x8 /* link address */ -#define IFD_TRASHED 0x10 /* in trash list */ -#define IFD_DETACHING 0x20 /* detach is in progress */ -#define IFD_NOTREADY 0x40 /* embryonic; not yet ready */ - -#define IFA_LOCK_ASSERT_HELD(_ifa) \ +#define IFD_ATTACHED 0x1 /* attached to list */ +#define IFD_ALLOC 0x2 /* dynamically allocated */ +#define IFD_DEBUG 0x4 /* has debugging info */ +#define IFD_LINK 0x8 /* link address */ +#define IFD_TRASHED 0x10 /* in trash list */ +#define IFD_DETACHING 0x20 /* detach is in progress */ +#define IFD_NOTREADY 0x40 /* embryonic; not yet ready */ + +#define IFA_LOCK_ASSERT_HELD(_ifa) \ LCK_MTX_ASSERT(&(_ifa)->ifa_lock, LCK_MTX_ASSERT_OWNED) -#define IFA_LOCK_ASSERT_NOTHELD(_ifa) \ +#define IFA_LOCK_ASSERT_NOTHELD(_ifa) \ LCK_MTX_ASSERT(&(_ifa)->ifa_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IFA_LOCK(_ifa) \ +#define IFA_LOCK(_ifa) \ lck_mtx_lock(&(_ifa)->ifa_lock) -#define IFA_LOCK_SPIN(_ifa) \ +#define IFA_LOCK_SPIN(_ifa) \ lck_mtx_lock_spin(&(_ifa)->ifa_lock) -#define IFA_CONVERT_LOCK(_ifa) do { \ - IFA_LOCK_ASSERT_HELD(_ifa); \ - lck_mtx_convert_spin(&(_ifa)->ifa_lock); \ +#define IFA_CONVERT_LOCK(_ifa) do { \ + IFA_LOCK_ASSERT_HELD(_ifa); \ + lck_mtx_convert_spin(&(_ifa)->ifa_lock); \ } while (0) -#define IFA_UNLOCK(_ifa) \ +#define IFA_UNLOCK(_ifa) \ lck_mtx_unlock(&(_ifa)->ifa_lock) -#define IFA_ADDREF(_ifa) \ +#define IFA_ADDREF(_ifa) \ ifa_addref(_ifa, 0) -#define IFA_ADDREF_LOCKED(_ifa) \ +#define IFA_ADDREF_LOCKED(_ifa) \ ifa_addref(_ifa, 1) -#define IFA_REMREF(_ifa) do { \ - (void) ifa_remref(_ifa, 0); \ +#define IFA_REMREF(_ifa) do { \ + (void) ifa_remref(_ifa, 0); \ } while (0) -#define IFA_REMREF_LOCKED(_ifa) \ +#define IFA_REMREF_LOCKED(_ifa) \ ifa_remref(_ifa, 1) /* @@ -1274,52 +1274,52 @@ struct ifaddr { */ struct ifmultiaddr { decl_lck_mtx_data(, ifma_lock); - u_int32_t ifma_refcount; /* reference count */ - u_int32_t ifma_anoncnt; /* # of anonymous requests */ - u_int32_t ifma_reqcnt; /* total requests for this address */ - u_int32_t ifma_debug; /* see ifa_debug flags */ - u_int32_t ifma_flags; /* see below */ + u_int32_t ifma_refcount; /* reference count */ + u_int32_t ifma_anoncnt; /* # of anonymous requests */ + u_int32_t ifma_reqcnt; /* total requests for this address */ + u_int32_t ifma_debug; /* see ifa_debug flags */ + u_int32_t ifma_flags; /* see below */ LIST_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */ - struct sockaddr *ifma_addr; /* address this membership is for */ - struct ifmultiaddr *ifma_ll; /* link-layer translation, if any */ - struct ifnet *ifma_ifp; /* back-pointer to interface */ - void *ifma_protospec; /* protocol-specific state, if any */ - void (*ifma_trace) /* callback fn for tracing refs */ - (struct ifmultiaddr *, int); + struct sockaddr *ifma_addr; /* address this membership is for */ + struct ifmultiaddr *ifma_ll; /* link-layer translation, if any */ + struct ifnet *ifma_ifp; /* back-pointer to interface */ + void *ifma_protospec; /* protocol-specific state, if any */ + void (*ifma_trace) /* callback fn for tracing refs */ + (struct ifmultiaddr *, int); }; /* * Values for ifma_flags */ -#define IFMAF_ANONYMOUS 0x1 /* has anonymous request ref(s) held */ +#define IFMAF_ANONYMOUS 0x1 /* has anonymous request ref(s) held */ -#define IFMA_LOCK_ASSERT_HELD(_ifma) \ +#define IFMA_LOCK_ASSERT_HELD(_ifma) \ LCK_MTX_ASSERT(&(_ifma)->ifma_lock, LCK_MTX_ASSERT_OWNED) -#define IFMA_LOCK_ASSERT_NOTHELD(_ifma) \ +#define IFMA_LOCK_ASSERT_NOTHELD(_ifma) \ LCK_MTX_ASSERT(&(_ifma)->ifma_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IFMA_LOCK(_ifma) \ +#define IFMA_LOCK(_ifma) \ lck_mtx_lock(&(_ifma)->ifma_lock) -#define IFMA_LOCK_SPIN(_ifma) \ +#define IFMA_LOCK_SPIN(_ifma) \ lck_mtx_lock_spin(&(_ifma)->ifma_lock) -#define IFMA_CONVERT_LOCK(_ifma) do { \ - IFMA_LOCK_ASSERT_HELD(_ifma); \ - lck_mtx_convert_spin(&(_ifma)->ifma_lock); \ +#define IFMA_CONVERT_LOCK(_ifma) do { \ + IFMA_LOCK_ASSERT_HELD(_ifma); \ + lck_mtx_convert_spin(&(_ifma)->ifma_lock); \ } while (0) -#define IFMA_UNLOCK(_ifma) \ +#define IFMA_UNLOCK(_ifma) \ lck_mtx_unlock(&(_ifma)->ifma_lock) -#define IFMA_ADDREF(_ifma) \ +#define IFMA_ADDREF(_ifma) \ ifma_addref(_ifma, 0) -#define IFMA_ADDREF_LOCKED(_ifma) \ +#define IFMA_ADDREF_LOCKED(_ifma) \ ifma_addref(_ifma, 1) -#define IFMA_REMREF(_ifma) \ +#define IFMA_REMREF(_ifma) \ ifma_remref(_ifma) /* @@ -1335,16 +1335,16 @@ struct ifmultiaddr { * The test is done against IFT_CELLULAR instead of IFNET_FAMILY_CELLULAR to * handle certain cases where the family isn't set to the latter. */ -#define IFNET_IS_CELLULAR(_ifp) \ - ((_ifp)->if_type == IFT_CELLULAR || \ +#define IFNET_IS_CELLULAR(_ifp) \ + ((_ifp)->if_type == IFT_CELLULAR || \ (_ifp)->if_delegated.type == IFT_CELLULAR) /* * Indicate whether or not the immediate interface, or the interface delegated * by it, is an ETHERNET interface. */ -#define IFNET_IS_ETHERNET(_ifp) \ - ((_ifp)->if_family == IFNET_FAMILY_ETHERNET || \ +#define IFNET_IS_ETHERNET(_ifp) \ + ((_ifp)->if_family == IFNET_FAMILY_ETHERNET || \ (_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET) /* * Indicate whether or not the immediate interface, or the interface delegated @@ -1359,8 +1359,8 @@ struct ifmultiaddr { * The test is done against IFNET_SUBFAMILY_WIFI as the family may be set to * IFNET_FAMILY_ETHERNET (as well as type to IFT_ETHER) which is too generic. */ -#define IFNET_IS_WIFI(_ifp) \ - ((_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI || \ +#define IFNET_IS_WIFI(_ifp) \ + ((_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI || \ (_ifp)->if_delegated.subfamily == IFNET_SUBFAMILY_WIFI) /* @@ -1373,19 +1373,19 @@ struct ifmultiaddr { * certain places need to explicitly know the immediate interface type, and * this macro should not be used there. */ -#define IFNET_IS_WIRED(_ifp) \ - ((_ifp)->if_family == IFNET_FAMILY_ETHERNET || \ - (_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET || \ - (_ifp)->if_family == IFNET_FAMILY_FIREWIRE || \ +#define IFNET_IS_WIRED(_ifp) \ + ((_ifp)->if_family == IFNET_FAMILY_ETHERNET || \ + (_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET || \ + (_ifp)->if_family == IFNET_FAMILY_FIREWIRE || \ (_ifp)->if_delegated.family == IFNET_FAMILY_FIREWIRE) /* * Indicate whether or not the immediate WiFi interface is on an infrastructure * network */ -#define IFNET_IS_WIFI_INFRA(_ifp) \ - ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \ - (_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI && \ +#define IFNET_IS_WIFI_INFRA(_ifp) \ + ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \ + (_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI && \ !((_ifp)->if_eflags & IFEF_AWDL)) /* @@ -1396,25 +1396,25 @@ struct ifmultiaddr { * * Note that this is meant to be used only for policy purposes. */ -#define IFNET_IS_EXPENSIVE(_ifp) \ - ((_ifp)->if_eflags & IFEF_EXPENSIVE || \ +#define IFNET_IS_EXPENSIVE(_ifp) \ + ((_ifp)->if_eflags & IFEF_EXPENSIVE || \ (_ifp)->if_delegated.expensive) -#define IFNET_IS_LOW_POWER(_ifp) \ - (if_low_power_restricted != 0 && \ - ((_ifp)->if_xflags & IFXF_LOW_POWER) || \ - ((_ifp)->if_delegated.ifp != NULL && \ +#define IFNET_IS_LOW_POWER(_ifp) \ + (if_low_power_restricted != 0 && \ + ((_ifp)->if_xflags & IFXF_LOW_POWER) || \ + ((_ifp)->if_delegated.ifp != NULL && \ ((_ifp)->if_delegated.ifp->if_xflags & IFXF_LOW_POWER))) /* * We don't support AWDL interface delegation. */ -#define IFNET_IS_AWDL_RESTRICTED(_ifp) \ - (((_ifp)->if_eflags & (IFEF_AWDL|IFEF_AWDL_RESTRICTED)) == \ +#define IFNET_IS_AWDL_RESTRICTED(_ifp) \ + (((_ifp)->if_eflags & (IFEF_AWDL|IFEF_AWDL_RESTRICTED)) == \ (IFEF_AWDL|IFEF_AWDL_RESTRICTED)) -#define IFNET_IS_INTCOPROC(_ifp) \ - ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \ +#define IFNET_IS_INTCOPROC(_ifp) \ + ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \ (_ifp)->if_subfamily == IFNET_SUBFAMILY_INTCOPROC) extern struct ifnethead ifnet_head; @@ -1462,16 +1462,16 @@ extern u_int32_t if_functional_type(struct ifnet *, bool); extern errno_t if_mcasts_update(struct ifnet *); typedef enum { - IFNET_LCK_ASSERT_EXCLUSIVE, /* RW: held as writer */ - IFNET_LCK_ASSERT_SHARED, /* RW: held as reader */ - IFNET_LCK_ASSERT_OWNED, /* RW: writer/reader, MTX: held */ - IFNET_LCK_ASSERT_NOTOWNED /* not held */ + IFNET_LCK_ASSERT_EXCLUSIVE, /* RW: held as writer */ + IFNET_LCK_ASSERT_SHARED, /* RW: held as reader */ + IFNET_LCK_ASSERT_OWNED, /* RW: writer/reader, MTX: held */ + IFNET_LCK_ASSERT_NOTOWNED /* not held */ } ifnet_lock_assert_t; -#define IF_LLADDR(_ifp) \ +#define IF_LLADDR(_ifp) \ (LLADDR(SDL(((_ifp)->if_lladdr)->ifa_addr))) -#define IF_INDEX_IN_RANGE(_ind_) ((_ind_) > 0 && \ +#define IF_INDEX_IN_RANGE(_ind_) ((_ind_) > 0 && \ (unsigned int)(_ind_) <= (unsigned int)if_index) __private_extern__ void ifnet_lock_assert(struct ifnet *, ifnet_lock_assert_t); @@ -1491,10 +1491,10 @@ __private_extern__ void if_inet6data_lock_exclusive(struct ifnet *ifp); __private_extern__ void if_inet6data_lock_done(struct ifnet *ifp); #endif -__private_extern__ void ifnet_head_lock_shared(void); -__private_extern__ void ifnet_head_lock_exclusive(void); -__private_extern__ void ifnet_head_done(void); -__private_extern__ void ifnet_head_assert_exclusive(void); +__private_extern__ void ifnet_head_lock_shared(void); +__private_extern__ void ifnet_head_lock_exclusive(void); +__private_extern__ void ifnet_head_done(void); +__private_extern__ void ifnet_head_assert_exclusive(void); __private_extern__ errno_t ifnet_set_idle_flags_locked(ifnet_t, u_int32_t, u_int32_t); @@ -1527,7 +1527,7 @@ extern struct ifaddr *ifa_ifwithnet_scoped(const struct sockaddr *, unsigned int); extern struct ifaddr *ifa_ifwithroute(int, const struct sockaddr *, const struct sockaddr *); -extern struct ifaddr *ifa_ifwithroute_locked(int, const struct sockaddr *, +extern struct ifaddr *ifa_ifwithroute_locked(int, const struct sockaddr *, const struct sockaddr *); extern struct ifaddr *ifa_ifwithroute_scoped_locked(int, const struct sockaddr *, const struct sockaddr *, unsigned int); @@ -1569,7 +1569,7 @@ extern void ifnet_notify_data_threshold(struct ifnet *); #define IF_AFDATA_UNLOCK_ASSERT if_afdata_unlock_assert static inline void -if_afdata_rlock (struct ifnet *ifp, int af) +if_afdata_rlock(struct ifnet *ifp, int af) { switch (af) { #if INET @@ -1590,7 +1590,7 @@ if_afdata_rlock (struct ifnet *ifp, int af) } static inline void -if_afdata_runlock (struct ifnet *ifp, int af) +if_afdata_runlock(struct ifnet *ifp, int af) { switch (af) { #if INET @@ -1611,7 +1611,7 @@ if_afdata_runlock (struct ifnet *ifp, int af) } static inline void -if_afdata_wlock (struct ifnet *ifp, int af) +if_afdata_wlock(struct ifnet *ifp, int af) { switch (af) { #if INET @@ -1632,7 +1632,7 @@ if_afdata_wlock (struct ifnet *ifp, int af) } static inline void -if_afdata_unlock (struct ifnet *ifp, int af) +if_afdata_unlock(struct ifnet *ifp, int af) { switch (af) { #if INET @@ -1653,7 +1653,7 @@ if_afdata_unlock (struct ifnet *ifp, int af) } static inline void -if_afdata_wlock_assert (struct ifnet *ifp, int af) +if_afdata_wlock_assert(struct ifnet *ifp, int af) { #if !MACH_ASSERT #pragma unused(ifp) @@ -1677,7 +1677,7 @@ if_afdata_wlock_assert (struct ifnet *ifp, int af) } static inline void -if_afdata_unlock_assert (struct ifnet *ifp, int af) +if_afdata_unlock_assert(struct ifnet *ifp, int af) { #if !MACH_ASSERT #pragma unused(ifp) @@ -1701,7 +1701,7 @@ if_afdata_unlock_assert (struct ifnet *ifp, int af) } static inline void -if_afdata_lock_assert (struct ifnet *ifp, int af) +if_afdata_lock_assert(struct ifnet *ifp, int af) { #if !MACH_ASSERT #pragma unused(ifp) @@ -1733,11 +1733,11 @@ __private_extern__ struct in6_ifaddr *ifa_foraddr6_scoped(struct in6_addr *, __private_extern__ void if_data_internal_to_if_data(struct ifnet *ifp, const struct if_data_internal *if_data_int, struct if_data *if_data); -__private_extern__ void if_data_internal_to_if_data64(struct ifnet *ifp, +__private_extern__ void if_data_internal_to_if_data64(struct ifnet *ifp, const struct if_data_internal *if_data_int, struct if_data64 *if_data64); -__private_extern__ void if_copy_traffic_class(struct ifnet *ifp, +__private_extern__ void if_copy_traffic_class(struct ifnet *ifp, struct if_traffic_class *if_tc); -__private_extern__ void if_copy_data_extended(struct ifnet *ifp, +__private_extern__ void if_copy_data_extended(struct ifnet *ifp, struct if_data_extended *if_de); __private_extern__ void if_copy_packet_stats(struct ifnet *ifp, struct if_packet_stats *if_ps); diff --git a/bsd/net/if_vlan.c b/bsd/net/if_vlan.c index 8d26fab65..b4a922301 100644 --- a/bsd/net/if_vlan.c +++ b/bsd/net/if_vlan.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,7 +39,7 @@ * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. - * + * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF @@ -108,136 +108,136 @@ #include #include -#define VLANNAME "vlan" +#define VLANNAME "vlan" /** - ** vlan locks - **/ +** vlan locks +**/ static __inline__ lck_grp_t * my_lck_grp_alloc_init(const char * grp_name) { - lck_grp_t * grp; - lck_grp_attr_t * grp_attrs; - - grp_attrs = lck_grp_attr_alloc_init(); - grp = lck_grp_alloc_init(grp_name, grp_attrs); - lck_grp_attr_free(grp_attrs); - return (grp); + lck_grp_t * grp; + lck_grp_attr_t * grp_attrs; + + grp_attrs = lck_grp_attr_alloc_init(); + grp = lck_grp_alloc_init(grp_name, grp_attrs); + lck_grp_attr_free(grp_attrs); + return grp; } static __inline__ lck_mtx_t * my_lck_mtx_alloc_init(lck_grp_t * lck_grp) { - lck_attr_t * lck_attrs; - lck_mtx_t * lck_mtx; + lck_attr_t * lck_attrs; + lck_mtx_t * lck_mtx; - lck_attrs = lck_attr_alloc_init(); - lck_mtx = lck_mtx_alloc_init(lck_grp, lck_attrs); - lck_attr_free(lck_attrs); - return (lck_mtx); + lck_attrs = lck_attr_alloc_init(); + lck_mtx = lck_mtx_alloc_init(lck_grp, lck_attrs); + lck_attr_free(lck_attrs); + return lck_mtx; } -static lck_mtx_t * vlan_lck_mtx; +static lck_mtx_t * vlan_lck_mtx; static __inline__ void vlan_lock_init(void) { - lck_grp_t * vlan_lck_grp; + lck_grp_t * vlan_lck_grp; - vlan_lck_grp = my_lck_grp_alloc_init("if_vlan"); - vlan_lck_mtx = my_lck_mtx_alloc_init(vlan_lck_grp); + vlan_lck_grp = my_lck_grp_alloc_init("if_vlan"); + vlan_lck_mtx = my_lck_mtx_alloc_init(vlan_lck_grp); } static __inline__ void vlan_assert_lock_held(void) { - LCK_MTX_ASSERT(vlan_lck_mtx, LCK_MTX_ASSERT_OWNED); - return; + LCK_MTX_ASSERT(vlan_lck_mtx, LCK_MTX_ASSERT_OWNED); + return; } static __inline__ void vlan_assert_lock_not_held(void) { - LCK_MTX_ASSERT(vlan_lck_mtx, LCK_MTX_ASSERT_NOTOWNED); - return; + LCK_MTX_ASSERT(vlan_lck_mtx, LCK_MTX_ASSERT_NOTOWNED); + return; } static __inline__ void vlan_lock(void) { - lck_mtx_lock(vlan_lck_mtx); - return; + lck_mtx_lock(vlan_lck_mtx); + return; } static __inline__ void vlan_unlock(void) { - lck_mtx_unlock(vlan_lck_mtx); - return; + lck_mtx_unlock(vlan_lck_mtx); + return; } /** - ** vlan structures, types - **/ +** vlan structures, types +**/ struct vlan_parent; LIST_HEAD(vlan_parent_list, vlan_parent); struct ifvlan; LIST_HEAD(ifvlan_list, ifvlan); -typedef LIST_ENTRY(vlan_parent) +typedef LIST_ENTRY(vlan_parent) vlan_parent_entry; -typedef LIST_ENTRY(ifvlan) +typedef LIST_ENTRY(ifvlan) ifvlan_entry; -#define VLP_SIGNATURE 0xfaceface +#define VLP_SIGNATURE 0xfaceface typedef struct vlan_parent { - vlan_parent_entry vlp_parent_list;/* list of parents */ - struct ifnet * vlp_ifp; /* interface */ - struct ifvlan_list vlp_vlan_list; /* list of VLAN's */ -#define VLPF_SUPPORTS_VLAN_MTU 0x00000001 -#define VLPF_CHANGE_IN_PROGRESS 0x00000002 -#define VLPF_DETACHING 0x00000004 -#define VLPF_LINK_EVENT_REQUIRED 0x00000008 - u_int32_t vlp_flags; - u_int32_t vlp_event_code; - struct ifdevmtu vlp_devmtu; - int32_t vlp_retain_count; - u_int32_t vlp_signature; /* VLP_SIGNATURE */ + vlan_parent_entry vlp_parent_list;/* list of parents */ + struct ifnet * vlp_ifp; /* interface */ + struct ifvlan_list vlp_vlan_list;/* list of VLAN's */ +#define VLPF_SUPPORTS_VLAN_MTU 0x00000001 +#define VLPF_CHANGE_IN_PROGRESS 0x00000002 +#define VLPF_DETACHING 0x00000004 +#define VLPF_LINK_EVENT_REQUIRED 0x00000008 + u_int32_t vlp_flags; + u_int32_t vlp_event_code; + struct ifdevmtu vlp_devmtu; + int32_t vlp_retain_count; + u_int32_t vlp_signature;/* VLP_SIGNATURE */ } vlan_parent, * vlan_parent_ref; -#define IFV_SIGNATURE 0xbeefbeef +#define IFV_SIGNATURE 0xbeefbeef struct ifvlan { - ifvlan_entry ifv_vlan_list; - char ifv_name[IFNAMSIZ]; /* our unique id */ - struct ifnet * ifv_ifp; /* our interface */ - vlan_parent_ref ifv_vlp; /* parent information */ - struct ifv_linkmib { - u_int16_t ifvm_encaplen;/* encapsulation length */ - u_int16_t ifvm_mtufudge;/* MTU fudged by this much */ - u_int16_t ifvm_proto; /* encapsulation ethertype */ - u_int16_t ifvm_tag; /* tag to apply on packets leaving if */ - } ifv_mib; - struct multicast_list ifv_multicast; -#define IFVF_PROMISC 0x1 /* promiscuous mode enabled */ -#define IFVF_DETACHING 0x2 /* interface is detaching */ -#define IFVF_READY 0x4 /* interface is ready */ - u_int32_t ifv_flags; - int32_t ifv_retain_count; - u_int32_t ifv_signature; /* IFV_SIGNATURE */ + ifvlan_entry ifv_vlan_list; + char ifv_name[IFNAMSIZ];/* our unique id */ + struct ifnet * ifv_ifp; /* our interface */ + vlan_parent_ref ifv_vlp; /* parent information */ + struct ifv_linkmib { + u_int16_t ifvm_encaplen;/* encapsulation length */ + u_int16_t ifvm_mtufudge;/* MTU fudged by this much */ + u_int16_t ifvm_proto; /* encapsulation ethertype */ + u_int16_t ifvm_tag; /* tag to apply on packets leaving if */ + } ifv_mib; + struct multicast_list ifv_multicast; +#define IFVF_PROMISC 0x1 /* promiscuous mode enabled */ +#define IFVF_DETACHING 0x2 /* interface is detaching */ +#define IFVF_READY 0x4 /* interface is ready */ + u_int32_t ifv_flags; + int32_t ifv_retain_count; + u_int32_t ifv_signature;/* IFV_SIGNATURE */ }; typedef struct ifvlan * ifvlan_ref; typedef struct vlan_globals_s { - struct vlan_parent_list parent_list; - int verbose; + struct vlan_parent_list parent_list; + int verbose; } * vlan_globals_ref; - -static vlan_globals_ref g_vlan; -#define ifv_tag ifv_mib.ifvm_tag -#define ifv_encaplen ifv_mib.ifvm_encaplen -#define ifv_mtufudge ifv_mib.ifvm_mtufudge +static vlan_globals_ref g_vlan; + +#define ifv_tag ifv_mib.ifvm_tag +#define ifv_encaplen ifv_mib.ifvm_encaplen +#define ifv_mtufudge ifv_mib.ifvm_mtufudge static void vlan_parent_retain(vlan_parent_ref vlp); @@ -246,147 +246,147 @@ static void vlan_parent_release(vlan_parent_ref vlp); /** - ** vlan_parent_ref vlp_flags in-lines - **/ +** vlan_parent_ref vlp_flags in-lines +**/ static __inline__ int vlan_parent_flags_supports_vlan_mtu(vlan_parent_ref vlp) { - return ((vlp->vlp_flags & VLPF_SUPPORTS_VLAN_MTU) != 0); + return (vlp->vlp_flags & VLPF_SUPPORTS_VLAN_MTU) != 0; } static __inline__ void vlan_parent_flags_set_supports_vlan_mtu(vlan_parent_ref vlp) { - vlp->vlp_flags |= VLPF_SUPPORTS_VLAN_MTU; - return; + vlp->vlp_flags |= VLPF_SUPPORTS_VLAN_MTU; + return; } static __inline__ int vlan_parent_flags_change_in_progress(vlan_parent_ref vlp) { - return ((vlp->vlp_flags & VLPF_CHANGE_IN_PROGRESS) != 0); + return (vlp->vlp_flags & VLPF_CHANGE_IN_PROGRESS) != 0; } static __inline__ void vlan_parent_flags_set_change_in_progress(vlan_parent_ref vlp) { - vlp->vlp_flags |= VLPF_CHANGE_IN_PROGRESS; - return; + vlp->vlp_flags |= VLPF_CHANGE_IN_PROGRESS; + return; } static __inline__ void vlan_parent_flags_clear_change_in_progress(vlan_parent_ref vlp) { - vlp->vlp_flags &= ~VLPF_CHANGE_IN_PROGRESS; - return; + vlp->vlp_flags &= ~VLPF_CHANGE_IN_PROGRESS; + return; } static __inline__ int vlan_parent_flags_detaching(struct vlan_parent * vlp) { - return ((vlp->vlp_flags & VLPF_DETACHING) != 0); + return (vlp->vlp_flags & VLPF_DETACHING) != 0; } static __inline__ void vlan_parent_flags_set_detaching(struct vlan_parent * vlp) { - vlp->vlp_flags |= VLPF_DETACHING; - return; + vlp->vlp_flags |= VLPF_DETACHING; + return; } static __inline__ int vlan_parent_flags_link_event_required(vlan_parent_ref vlp) { - return ((vlp->vlp_flags & VLPF_LINK_EVENT_REQUIRED) != 0); + return (vlp->vlp_flags & VLPF_LINK_EVENT_REQUIRED) != 0; } static __inline__ void vlan_parent_flags_set_link_event_required(vlan_parent_ref vlp) { - vlp->vlp_flags |= VLPF_LINK_EVENT_REQUIRED; - return; + vlp->vlp_flags |= VLPF_LINK_EVENT_REQUIRED; + return; } static __inline__ void vlan_parent_flags_clear_link_event_required(vlan_parent_ref vlp) { - vlp->vlp_flags &= ~VLPF_LINK_EVENT_REQUIRED; - return; + vlp->vlp_flags &= ~VLPF_LINK_EVENT_REQUIRED; + return; } /** - ** ifvlan_flags in-lines routines - **/ +** ifvlan_flags in-lines routines +**/ static __inline__ int ifvlan_flags_promisc(ifvlan_ref ifv) { - return ((ifv->ifv_flags & IFVF_PROMISC) != 0); + return (ifv->ifv_flags & IFVF_PROMISC) != 0; } static __inline__ void ifvlan_flags_set_promisc(ifvlan_ref ifv) { - ifv->ifv_flags |= IFVF_PROMISC; - return; + ifv->ifv_flags |= IFVF_PROMISC; + return; } static __inline__ void ifvlan_flags_clear_promisc(ifvlan_ref ifv) { - ifv->ifv_flags &= ~IFVF_PROMISC; - return; + ifv->ifv_flags &= ~IFVF_PROMISC; + return; } static __inline__ int ifvlan_flags_ready(ifvlan_ref ifv) { - return ((ifv->ifv_flags & IFVF_READY) != 0); + return (ifv->ifv_flags & IFVF_READY) != 0; } static __inline__ void ifvlan_flags_set_ready(ifvlan_ref ifv) { - ifv->ifv_flags |= IFVF_READY; - return; + ifv->ifv_flags |= IFVF_READY; + return; } static __inline__ int ifvlan_flags_detaching(ifvlan_ref ifv) { - return ((ifv->ifv_flags & IFVF_DETACHING) != 0); + return (ifv->ifv_flags & IFVF_DETACHING) != 0; } static __inline__ void ifvlan_flags_set_detaching(ifvlan_ref ifv) { - ifv->ifv_flags |= IFVF_DETACHING; - return; + ifv->ifv_flags |= IFVF_DETACHING; + return; } #if 0 SYSCTL_DECL(_net_link); -SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IEEE 802.1Q VLAN"); -SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "for consistency"); +SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IEEE 802.1Q VLAN"); +SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "for consistency"); #endif -#define VLAN_UNITMAX IF_MAXUNIT -#define VLAN_ZONE_MAX_ELEM MIN(IFNETS_MAX, VLAN_UNITMAX) -#define M_VLAN M_DEVBUF - -static int vlan_clone_create(struct if_clone *, u_int32_t, void *); -static int vlan_clone_destroy(struct ifnet *); -static int vlan_input(ifnet_t ifp, protocol_family_t protocol, - mbuf_t m, char *frame_header); -static int vlan_output(struct ifnet *ifp, struct mbuf *m); -static int vlan_ioctl(ifnet_t ifp, u_long cmd, void * addr); -static int vlan_attach_protocol(struct ifnet *ifp); -static int vlan_detach_protocol(struct ifnet *ifp); -static int vlan_setmulti(struct ifnet *ifp); -static int vlan_unconfig(ifvlan_ref ifv, int need_to_wait); -static int vlan_config(struct ifnet * ifp, struct ifnet * p, int tag); -static void vlan_if_free(struct ifnet * ifp); -static int vlan_remove(ifvlan_ref ifv, int need_to_wait); +#define VLAN_UNITMAX IF_MAXUNIT +#define VLAN_ZONE_MAX_ELEM MIN(IFNETS_MAX, VLAN_UNITMAX) +#define M_VLAN M_DEVBUF + +static int vlan_clone_create(struct if_clone *, u_int32_t, void *); +static int vlan_clone_destroy(struct ifnet *); +static int vlan_input(ifnet_t ifp, protocol_family_t protocol, + mbuf_t m, char *frame_header); +static int vlan_output(struct ifnet *ifp, struct mbuf *m); +static int vlan_ioctl(ifnet_t ifp, u_long cmd, void * addr); +static int vlan_attach_protocol(struct ifnet *ifp); +static int vlan_detach_protocol(struct ifnet *ifp); +static int vlan_setmulti(struct ifnet *ifp); +static int vlan_unconfig(ifvlan_ref ifv, int need_to_wait); +static int vlan_config(struct ifnet * ifp, struct ifnet * p, int tag); +static void vlan_if_free(struct ifnet * ifp); +static int vlan_remove(ifvlan_ref ifv, int need_to_wait); static struct if_clone vlan_cloner = IF_CLONE_INITIALIZER(VLANNAME, vlan_clone_create, @@ -395,223 +395,223 @@ static struct if_clone vlan_cloner = IF_CLONE_INITIALIZER(VLANNAME, VLAN_UNITMAX, VLAN_ZONE_MAX_ELEM, sizeof(struct ifvlan)); -static void interface_link_event(struct ifnet * ifp, u_int32_t event_code); -static void vlan_parent_link_event(struct ifnet * p, - u_int32_t event_code); +static void interface_link_event(struct ifnet * ifp, u_int32_t event_code); +static void vlan_parent_link_event(struct ifnet * p, + u_int32_t event_code); -static int ifvlan_new_mtu(ifvlan_ref ifv, int mtu); +static int ifvlan_new_mtu(ifvlan_ref ifv, int mtu); /** - ** ifvlan_ref routines - **/ +** ifvlan_ref routines +**/ static void ifvlan_retain(ifvlan_ref ifv) { - if (ifv->ifv_signature != IFV_SIGNATURE) { - panic("ifvlan_retain: bad signature\n"); - } - if (ifv->ifv_retain_count == 0) { - panic("ifvlan_retain: retain count is 0\n"); - } - OSIncrementAtomic(&ifv->ifv_retain_count); + if (ifv->ifv_signature != IFV_SIGNATURE) { + panic("ifvlan_retain: bad signature\n"); + } + if (ifv->ifv_retain_count == 0) { + panic("ifvlan_retain: retain count is 0\n"); + } + OSIncrementAtomic(&ifv->ifv_retain_count); } static void ifvlan_release(ifvlan_ref ifv) { - u_int32_t old_retain_count; - - if (ifv->ifv_signature != IFV_SIGNATURE) { - panic("ifvlan_release: bad signature\n"); - } - old_retain_count = OSDecrementAtomic(&ifv->ifv_retain_count); - switch (old_retain_count) { - case 0: - panic("ifvlan_release: retain count is 0\n"); - break; - case 1: - if (g_vlan->verbose) { - printf("ifvlan_release(%s)\n", ifv->ifv_name); + u_int32_t old_retain_count; + + if (ifv->ifv_signature != IFV_SIGNATURE) { + panic("ifvlan_release: bad signature\n"); } - ifv->ifv_signature = 0; - if_clone_softc_deallocate(&vlan_cloner, ifv); - break; - default: - break; - } - return; + old_retain_count = OSDecrementAtomic(&ifv->ifv_retain_count); + switch (old_retain_count) { + case 0: + panic("ifvlan_release: retain count is 0\n"); + break; + case 1: + if (g_vlan->verbose) { + printf("ifvlan_release(%s)\n", ifv->ifv_name); + } + ifv->ifv_signature = 0; + if_clone_softc_deallocate(&vlan_cloner, ifv); + break; + default: + break; + } + return; } static vlan_parent_ref ifvlan_get_vlan_parent_retained(ifvlan_ref ifv) { - vlan_parent_ref vlp = ifv->ifv_vlp; + vlan_parent_ref vlp = ifv->ifv_vlp; - if (vlp == NULL || vlan_parent_flags_detaching(vlp)) { - return (NULL); - } - vlan_parent_retain(vlp); - return (vlp); + if (vlp == NULL || vlan_parent_flags_detaching(vlp)) { + return NULL; + } + vlan_parent_retain(vlp); + return vlp; } /** - ** ifnet_* routines - **/ +** ifnet_* routines +**/ static ifvlan_ref ifnet_get_ifvlan(struct ifnet * ifp) { - ifvlan_ref ifv; + ifvlan_ref ifv; - ifv = (ifvlan_ref)ifnet_softc(ifp); - return (ifv); + ifv = (ifvlan_ref)ifnet_softc(ifp); + return ifv; } static ifvlan_ref ifnet_get_ifvlan_retained(struct ifnet * ifp) { - ifvlan_ref ifv; + ifvlan_ref ifv; - ifv = ifnet_get_ifvlan(ifp); - if (ifv == NULL) { - return (NULL); - } - if (ifvlan_flags_detaching(ifv)) { - return (NULL); - } - ifvlan_retain(ifv); - return (ifv); + ifv = ifnet_get_ifvlan(ifp); + if (ifv == NULL) { + return NULL; + } + if (ifvlan_flags_detaching(ifv)) { + return NULL; + } + ifvlan_retain(ifv); + return ifv; } static int ifnet_ifvlan_vlan_parent_ok(struct ifnet * ifp, ifvlan_ref ifv, - vlan_parent_ref vlp) -{ - ifvlan_ref check_ifv; - - check_ifv = ifnet_get_ifvlan(ifp); - if (check_ifv != ifv || ifvlan_flags_detaching(ifv)) { - /* ifvlan_ref no longer valid */ - return (FALSE); - } - if (ifv->ifv_vlp != vlp) { - /* vlan_parent no longer valid */ - return (FALSE); - } - if (vlan_parent_flags_detaching(vlp)) { - /* parent is detaching */ - return (FALSE); - } - return (TRUE); + vlan_parent_ref vlp) +{ + ifvlan_ref check_ifv; + + check_ifv = ifnet_get_ifvlan(ifp); + if (check_ifv != ifv || ifvlan_flags_detaching(ifv)) { + /* ifvlan_ref no longer valid */ + return FALSE; + } + if (ifv->ifv_vlp != vlp) { + /* vlan_parent no longer valid */ + return FALSE; + } + if (vlan_parent_flags_detaching(vlp)) { + /* parent is detaching */ + return FALSE; + } + return TRUE; } /** - ** vlan, etc. routines - **/ +** vlan, etc. routines +**/ static int vlan_globals_init(void) { - vlan_globals_ref v; + vlan_globals_ref v; - vlan_assert_lock_not_held(); + vlan_assert_lock_not_held(); - if (g_vlan != NULL) { - return (0); - } - v = _MALLOC(sizeof(*v), M_VLAN, M_WAITOK); - if (v != NULL) { - LIST_INIT(&v->parent_list); - v->verbose = 0; - } - vlan_lock(); - if (g_vlan != NULL) { - vlan_unlock(); + if (g_vlan != NULL) { + return 0; + } + v = _MALLOC(sizeof(*v), M_VLAN, M_WAITOK); if (v != NULL) { - _FREE(v, M_VLAN); + LIST_INIT(&v->parent_list); + v->verbose = 0; + } + vlan_lock(); + if (g_vlan != NULL) { + vlan_unlock(); + if (v != NULL) { + _FREE(v, M_VLAN); + } + return 0; + } + g_vlan = v; + vlan_unlock(); + if (v == NULL) { + return ENOMEM; } - return (0); - } - g_vlan = v; - vlan_unlock(); - if (v == NULL) { - return (ENOMEM); - } - return (0); + return 0; } static int siocgifdevmtu(struct ifnet * ifp, struct ifdevmtu * ifdm_p) { - struct ifreq ifr; - int error; + struct ifreq ifr; + int error; - bzero(&ifr, sizeof(ifr)); - error = ifnet_ioctl(ifp, 0,SIOCGIFDEVMTU, &ifr); - if (error == 0) { - *ifdm_p = ifr.ifr_devmtu; - } - return (error); + bzero(&ifr, sizeof(ifr)); + error = ifnet_ioctl(ifp, 0, SIOCGIFDEVMTU, &ifr); + if (error == 0) { + *ifdm_p = ifr.ifr_devmtu; + } + return error; } static int siocsifaltmtu(struct ifnet * ifp, int mtu) { - struct ifreq ifr; + struct ifreq ifr; - bzero(&ifr, sizeof(ifr)); - ifr.ifr_mtu = mtu; - return (ifnet_ioctl(ifp, 0, SIOCSIFALTMTU, &ifr)); + bzero(&ifr, sizeof(ifr)); + ifr.ifr_mtu = mtu; + return ifnet_ioctl(ifp, 0, SIOCSIFALTMTU, &ifr); } /** - ** vlan_parent synchronization routines - **/ +** vlan_parent synchronization routines +**/ static void vlan_parent_retain(vlan_parent_ref vlp) { - if (vlp->vlp_signature != VLP_SIGNATURE) { - panic("vlan_parent_retain: signature is bad\n"); - } - if (vlp->vlp_retain_count == 0) { - panic("vlan_parent_retain: retain count is 0\n"); - } - OSIncrementAtomic(&vlp->vlp_retain_count); + if (vlp->vlp_signature != VLP_SIGNATURE) { + panic("vlan_parent_retain: signature is bad\n"); + } + if (vlp->vlp_retain_count == 0) { + panic("vlan_parent_retain: retain count is 0\n"); + } + OSIncrementAtomic(&vlp->vlp_retain_count); } static void vlan_parent_release(vlan_parent_ref vlp) { - u_int32_t old_retain_count; - - if (vlp->vlp_signature != VLP_SIGNATURE) { - panic("vlan_parent_release: signature is bad\n"); - } - old_retain_count = OSDecrementAtomic(&vlp->vlp_retain_count); - switch (old_retain_count) { - case 0: - panic("vlan_parent_release: retain count is 0\n"); - break; - case 1: - if (g_vlan->verbose) { - struct ifnet * ifp = vlp->vlp_ifp; - printf("vlan_parent_release(%s%d)\n", ifnet_name(ifp), - ifnet_unit(ifp)); + u_int32_t old_retain_count; + + if (vlp->vlp_signature != VLP_SIGNATURE) { + panic("vlan_parent_release: signature is bad\n"); + } + old_retain_count = OSDecrementAtomic(&vlp->vlp_retain_count); + switch (old_retain_count) { + case 0: + panic("vlan_parent_release: retain count is 0\n"); + break; + case 1: + if (g_vlan->verbose) { + struct ifnet * ifp = vlp->vlp_ifp; + printf("vlan_parent_release(%s%d)\n", ifnet_name(ifp), + ifnet_unit(ifp)); + } + vlp->vlp_signature = 0; + FREE(vlp, M_VLAN); + break; + default: + break; } - vlp->vlp_signature = 0; - FREE(vlp, M_VLAN); - break; - default: - break; - } - return; + return; } /* * Function: vlan_parent_wait * Purpose: * Allows a single thread to gain exclusive access to the vlan_parent - * data structure. Some operations take a long time to complete, + * data structure. Some operations take a long time to complete, * and some have side-effects that we can't predict. Holding the * vlan_lock() across such operations is not possible. * @@ -622,32 +622,32 @@ vlan_parent_release(vlan_parent_ref vlp) static void vlan_parent_wait(vlan_parent_ref vlp, const char * msg) { - int waited = 0; + int waited = 0; - /* other add/remove/multicast-change in progress */ - while (vlan_parent_flags_change_in_progress(vlp)) { - if (g_vlan->verbose) { - struct ifnet * ifp = vlp->vlp_ifp; + /* other add/remove/multicast-change in progress */ + while (vlan_parent_flags_change_in_progress(vlp)) { + if (g_vlan->verbose) { + struct ifnet * ifp = vlp->vlp_ifp; - printf("%s%d: %s msleep\n", ifnet_name(ifp), ifnet_unit(ifp), msg); + printf("%s%d: %s msleep\n", ifnet_name(ifp), ifnet_unit(ifp), msg); + } + waited = 1; + (void)msleep(vlp, vlan_lck_mtx, PZERO, msg, 0); } - waited = 1; - (void)msleep(vlp, vlan_lck_mtx, PZERO, msg, 0); - } - /* prevent other vlan parent remove/add from taking place */ - vlan_parent_flags_set_change_in_progress(vlp); - if (g_vlan->verbose && waited) { - struct ifnet * ifp = vlp->vlp_ifp; + /* prevent other vlan parent remove/add from taking place */ + vlan_parent_flags_set_change_in_progress(vlp); + if (g_vlan->verbose && waited) { + struct ifnet * ifp = vlp->vlp_ifp; - printf("%s%d: %s woke up\n", ifnet_name(ifp), ifnet_unit(ifp), msg); - } - return; + printf("%s%d: %s woke up\n", ifnet_name(ifp), ifnet_unit(ifp), msg); + } + return; } /* * Function: vlan_parent_signal * Purpose: - * Allows the thread that previously invoked vlan_parent_wait() to + * Allows the thread that previously invoked vlan_parent_wait() to * give up exclusive access to the vlan_parent data structure, and wake up * any other threads waiting to access * Notes: @@ -657,36 +657,36 @@ vlan_parent_wait(vlan_parent_ref vlp, const char * msg) static void vlan_parent_signal(vlan_parent_ref vlp, const char * msg) { - struct ifnet * vlp_ifp = vlp->vlp_ifp; - - if (vlan_parent_flags_link_event_required(vlp)) { - vlan_parent_flags_clear_link_event_required(vlp); - if (!vlan_parent_flags_detaching(vlp)) { - u_int32_t event_code = vlp->vlp_event_code; - ifvlan_ref ifv; - - vlan_unlock(); - - /* we can safely walk the list unlocked */ - LIST_FOREACH(ifv, &vlp->vlp_vlan_list, ifv_vlan_list) { - struct ifnet * ifp = ifv->ifv_ifp; - - interface_link_event(ifp, event_code); - } - if (g_vlan->verbose) { - printf("%s%d: propagated link event to vlans\n", - ifnet_name(vlp_ifp), ifnet_unit(vlp_ifp)); - } - vlan_lock(); - } - } - vlan_parent_flags_clear_change_in_progress(vlp); - wakeup((caddr_t)vlp); - if (g_vlan->verbose) { - printf("%s%d: %s wakeup\n", - ifnet_name(vlp_ifp), ifnet_unit(vlp_ifp), msg); - } - return; + struct ifnet * vlp_ifp = vlp->vlp_ifp; + + if (vlan_parent_flags_link_event_required(vlp)) { + vlan_parent_flags_clear_link_event_required(vlp); + if (!vlan_parent_flags_detaching(vlp)) { + u_int32_t event_code = vlp->vlp_event_code; + ifvlan_ref ifv; + + vlan_unlock(); + + /* we can safely walk the list unlocked */ + LIST_FOREACH(ifv, &vlp->vlp_vlan_list, ifv_vlan_list) { + struct ifnet * ifp = ifv->ifv_ifp; + + interface_link_event(ifp, event_code); + } + if (g_vlan->verbose) { + printf("%s%d: propagated link event to vlans\n", + ifnet_name(vlp_ifp), ifnet_unit(vlp_ifp)); + } + vlan_lock(); + } + } + vlan_parent_flags_clear_change_in_progress(vlp); + wakeup((caddr_t)vlp); + if (g_vlan->verbose) { + printf("%s%d: %s wakeup\n", + ifnet_name(vlp_ifp), ifnet_unit(vlp_ifp), msg); + } + return; } /* @@ -700,108 +700,108 @@ vlan_parent_signal(vlan_parent_ref vlp, const char * msg) static int vlan_setmulti(struct ifnet * ifp) { - int error = 0; - ifvlan_ref ifv; - struct ifnet * p; - vlan_parent_ref vlp = NULL; - - vlan_lock(); - ifv = ifnet_get_ifvlan_retained(ifp); - if (ifv == NULL) { - goto unlock_done; - } - vlp = ifvlan_get_vlan_parent_retained(ifv); - if (vlp == NULL) { - /* no parent, no need to program the multicast filter */ - goto unlock_done; - } - vlan_parent_wait(vlp, "vlan_setmulti"); - - /* check again, things could have changed */ - if (ifnet_ifvlan_vlan_parent_ok(ifp, ifv, vlp) == FALSE) { - goto signal_done; - } - p = vlp->vlp_ifp; - vlan_unlock(); - - /* update parent interface with our multicast addresses */ - error = multicast_list_program(&ifv->ifv_multicast, ifp, p); - - vlan_lock(); - - signal_done: - vlan_parent_signal(vlp, "vlan_setmulti"); - - unlock_done: - vlan_unlock(); - if (ifv != NULL) { - ifvlan_release(ifv); - } - if (vlp != NULL) { - vlan_parent_release(vlp); - } - return (error); + int error = 0; + ifvlan_ref ifv; + struct ifnet * p; + vlan_parent_ref vlp = NULL; + + vlan_lock(); + ifv = ifnet_get_ifvlan_retained(ifp); + if (ifv == NULL) { + goto unlock_done; + } + vlp = ifvlan_get_vlan_parent_retained(ifv); + if (vlp == NULL) { + /* no parent, no need to program the multicast filter */ + goto unlock_done; + } + vlan_parent_wait(vlp, "vlan_setmulti"); + + /* check again, things could have changed */ + if (ifnet_ifvlan_vlan_parent_ok(ifp, ifv, vlp) == FALSE) { + goto signal_done; + } + p = vlp->vlp_ifp; + vlan_unlock(); + + /* update parent interface with our multicast addresses */ + error = multicast_list_program(&ifv->ifv_multicast, ifp, p); + + vlan_lock(); + +signal_done: + vlan_parent_signal(vlp, "vlan_setmulti"); + +unlock_done: + vlan_unlock(); + if (ifv != NULL) { + ifvlan_release(ifv); + } + if (vlp != NULL) { + vlan_parent_release(vlp); + } + return error; } /** - ** vlan_parent list manipulation/lookup routines - **/ +** vlan_parent list manipulation/lookup routines +**/ static vlan_parent_ref parent_list_lookup(struct ifnet * p) { - vlan_parent_ref vlp; + vlan_parent_ref vlp; - LIST_FOREACH(vlp, &g_vlan->parent_list, vlp_parent_list) { - if (vlp->vlp_ifp == p) { - return (vlp); + LIST_FOREACH(vlp, &g_vlan->parent_list, vlp_parent_list) { + if (vlp->vlp_ifp == p) { + return vlp; + } } - } - return (NULL); + return NULL; } static ifvlan_ref vlan_parent_lookup_tag(vlan_parent_ref vlp, int tag) { - ifvlan_ref ifv; + ifvlan_ref ifv; - LIST_FOREACH(ifv, &vlp->vlp_vlan_list, ifv_vlan_list) { - if (tag == ifv->ifv_tag) { - return (ifv); + LIST_FOREACH(ifv, &vlp->vlp_vlan_list, ifv_vlan_list) { + if (tag == ifv->ifv_tag) { + return ifv; + } } - } - return (NULL); + return NULL; } -static ifvlan_ref +static ifvlan_ref vlan_lookup_parent_and_tag(struct ifnet * p, int tag) { - vlan_parent_ref vlp; + vlan_parent_ref vlp; - vlp = parent_list_lookup(p); - if (vlp != NULL) { - return (vlan_parent_lookup_tag(vlp, tag)); - } - return (NULL); + vlp = parent_list_lookup(p); + if (vlp != NULL) { + return vlan_parent_lookup_tag(vlp, tag); + } + return NULL; } static int vlan_parent_find_max_mtu(vlan_parent_ref vlp, ifvlan_ref exclude_ifv) { - int max_mtu = 0; - ifvlan_ref ifv; + int max_mtu = 0; + ifvlan_ref ifv; - LIST_FOREACH(ifv, &vlp->vlp_vlan_list, ifv_vlan_list) { - int req_mtu; + LIST_FOREACH(ifv, &vlp->vlp_vlan_list, ifv_vlan_list) { + int req_mtu; - if (exclude_ifv == ifv) { - continue; - } - req_mtu = ifnet_mtu(ifv->ifv_ifp) + ifv->ifv_mtufudge; - if (req_mtu > max_mtu) { - max_mtu = req_mtu; + if (exclude_ifv == ifv) { + continue; + } + req_mtu = ifnet_mtu(ifv->ifv_ifp) + ifv->ifv_mtufudge; + if (req_mtu > max_mtu) { + max_mtu = req_mtu; + } } - } - return (max_mtu); + return max_mtu; } /* @@ -813,153 +813,155 @@ vlan_parent_find_max_mtu(vlan_parent_ref vlp, ifvlan_ref exclude_ifv) static int vlan_parent_create(struct ifnet * p, vlan_parent_ref * ret_vlp) { - int error; - vlan_parent_ref vlp; - - *ret_vlp = NULL; - vlp = _MALLOC(sizeof(*vlp), M_VLAN, M_WAITOK | M_ZERO); - if (vlp == NULL) { - return (ENOMEM); - } - error = siocgifdevmtu(p, &vlp->vlp_devmtu); - if (error != 0) { - printf("vlan_parent_create (%s%d): siocgifdevmtu failed, %d\n", - ifnet_name(p), ifnet_unit(p), error); - FREE(vlp, M_VLAN); - return (error); - } - LIST_INIT(&vlp->vlp_vlan_list); - vlp->vlp_ifp = p; - vlp->vlp_retain_count = 1; - vlp->vlp_signature = VLP_SIGNATURE; - if (ifnet_offload(p) - & (IF_HWASSIST_VLAN_MTU | IF_HWASSIST_VLAN_TAGGING)) { - vlan_parent_flags_set_supports_vlan_mtu(vlp); - } - *ret_vlp = vlp; - return (0); + int error; + vlan_parent_ref vlp; + + *ret_vlp = NULL; + vlp = _MALLOC(sizeof(*vlp), M_VLAN, M_WAITOK | M_ZERO); + if (vlp == NULL) { + return ENOMEM; + } + error = siocgifdevmtu(p, &vlp->vlp_devmtu); + if (error != 0) { + printf("vlan_parent_create (%s%d): siocgifdevmtu failed, %d\n", + ifnet_name(p), ifnet_unit(p), error); + FREE(vlp, M_VLAN); + return error; + } + LIST_INIT(&vlp->vlp_vlan_list); + vlp->vlp_ifp = p; + vlp->vlp_retain_count = 1; + vlp->vlp_signature = VLP_SIGNATURE; + if (ifnet_offload(p) + & (IF_HWASSIST_VLAN_MTU | IF_HWASSIST_VLAN_TAGGING)) { + vlan_parent_flags_set_supports_vlan_mtu(vlp); + } + *ret_vlp = vlp; + return 0; } static void vlan_parent_remove_all_vlans(struct ifnet * p) { - ifvlan_ref ifv; - int need_vlp_release = 0; - ifvlan_ref next; - vlan_parent_ref vlp; + ifvlan_ref ifv; + int need_vlp_release = 0; + ifvlan_ref next; + vlan_parent_ref vlp; - vlan_lock(); - vlp = parent_list_lookup(p); - if (vlp == NULL || vlan_parent_flags_detaching(vlp)) { - /* no VLAN's */ + vlan_lock(); + vlp = parent_list_lookup(p); + if (vlp == NULL || vlan_parent_flags_detaching(vlp)) { + /* no VLAN's */ + vlan_unlock(); + return; + } + vlan_parent_flags_set_detaching(vlp); + vlan_parent_retain(vlp); + vlan_parent_wait(vlp, "vlan_parent_remove_all_vlans"); + need_vlp_release++; + + /* check again */ + if (parent_list_lookup(p) != vlp) { + goto signal_done; + } + + for (ifv = LIST_FIRST(&vlp->vlp_vlan_list); ifv != NULL; ifv = next) { + struct ifnet * ifp = ifv->ifv_ifp; + int removed; + + next = LIST_NEXT(ifv, ifv_vlan_list); + removed = vlan_remove(ifv, FALSE); + if (removed) { + vlan_unlock(); + ifnet_detach(ifp); + vlan_lock(); + } + } + + /* the vlan parent has no more VLAN's */ + ifnet_set_eflags(p, 0, IFEF_VLAN); /* clear IFEF_VLAN */ + + LIST_REMOVE(vlp, vlp_parent_list); + need_vlp_release++; /* one for being in the list */ + need_vlp_release++; /* final reference */ + +signal_done: + vlan_parent_signal(vlp, "vlan_parent_remove_all_vlans"); vlan_unlock(); + + while (need_vlp_release--) { + vlan_parent_release(vlp); + } return; - } - vlan_parent_flags_set_detaching(vlp); - vlan_parent_retain(vlp); - vlan_parent_wait(vlp, "vlan_parent_remove_all_vlans"); - need_vlp_release++; - - /* check again */ - if (parent_list_lookup(p) != vlp) { - goto signal_done; - } - - for (ifv = LIST_FIRST(&vlp->vlp_vlan_list); ifv != NULL; ifv = next) { - struct ifnet * ifp = ifv->ifv_ifp; - int removed; - - next = LIST_NEXT(ifv, ifv_vlan_list); - removed = vlan_remove(ifv, FALSE); - if (removed) { - vlan_unlock(); - ifnet_detach(ifp); - vlan_lock(); - } - } - - /* the vlan parent has no more VLAN's */ - ifnet_set_eflags(p, 0, IFEF_VLAN); /* clear IFEF_VLAN */ - - LIST_REMOVE(vlp, vlp_parent_list); - need_vlp_release++; /* one for being in the list */ - need_vlp_release++; /* final reference */ - - signal_done: - vlan_parent_signal(vlp, "vlan_parent_remove_all_vlans"); - vlan_unlock(); - - while (need_vlp_release--) { - vlan_parent_release(vlp); - } - return; } static __inline__ int vlan_parent_no_vlans(vlan_parent_ref vlp) { - return (LIST_EMPTY(&vlp->vlp_vlan_list)); + return LIST_EMPTY(&vlp->vlp_vlan_list); } static void vlan_parent_add_vlan(vlan_parent_ref vlp, ifvlan_ref ifv, int tag) { - LIST_INSERT_HEAD(&vlp->vlp_vlan_list, ifv, ifv_vlan_list); - ifv->ifv_vlp = vlp; - ifv->ifv_tag = tag; - return; + LIST_INSERT_HEAD(&vlp->vlp_vlan_list, ifv, ifv_vlan_list); + ifv->ifv_vlp = vlp; + ifv->ifv_tag = tag; + return; } static void vlan_parent_remove_vlan(__unused vlan_parent_ref vlp, ifvlan_ref ifv) { - ifv->ifv_vlp = NULL; - LIST_REMOVE(ifv, ifv_vlan_list); - return; + ifv->ifv_vlp = NULL; + LIST_REMOVE(ifv, ifv_vlan_list); + return; } static int vlan_clone_attach(void) { - int error; + int error; - error = if_clone_attach(&vlan_cloner); - if (error != 0) - return error; - vlan_lock_init(); - return 0; + error = if_clone_attach(&vlan_cloner); + if (error != 0) { + return error; + } + vlan_lock_init(); + return 0; } static int vlan_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) { - int error; - ifvlan_ref ifv; - ifnet_t ifp; - struct ifnet_init_eparams vlan_init; - + int error; + ifvlan_ref ifv; + ifnet_t ifp; + struct ifnet_init_eparams vlan_init; + error = vlan_globals_init(); if (error != 0) { - return (error); + return error; } ifv = if_clone_softc_allocate(&vlan_cloner); - if (ifv == NULL) + if (ifv == NULL) { return ENOBUFS; + } ifv->ifv_retain_count = 1; ifv->ifv_signature = IFV_SIGNATURE; multicast_list_init(&ifv->ifv_multicast); - + /* use the interface name as the unique id for ifp recycle */ if ((unsigned int) snprintf(ifv->ifv_name, sizeof(ifv->ifv_name), "%s%d", - ifc->ifc_name, unit) >= sizeof(ifv->ifv_name)) { - ifvlan_release(ifv); - return (EINVAL); + ifc->ifc_name, unit) >= sizeof(ifv->ifv_name)) { + ifvlan_release(ifv); + return EINVAL; } - + bzero(&vlan_init, sizeof(vlan_init)); vlan_init.ver = IFNET_INIT_CURRENT_VERSION; - vlan_init.len = sizeof (vlan_init); + vlan_init.len = sizeof(vlan_init); vlan_init.flags = IFNET_INIT_LEGACY; vlan_init.uniqueid = ifv->ifv_name; vlan_init.uniqueid_len = strlen(ifv->ifv_name); @@ -980,1049 +982,1043 @@ vlan_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) vlan_init.broadcast_addr = etherbroadcastaddr; vlan_init.broadcast_len = ETHER_ADDR_LEN; error = ifnet_allocate_extended(&vlan_init, &ifp); - + if (error) { - ifvlan_release(ifv); - return (error); + ifvlan_release(ifv); + return error; } - + ifnet_set_offload(ifp, 0); ifnet_set_addrlen(ifp, ETHER_ADDR_LEN); /* XXX ethernet specific */ ifnet_set_baudrate(ifp, 0); ifnet_set_hdrlen(ifp, ETHER_VLAN_ENCAP_LEN); - + error = ifnet_attach(ifp, NULL); if (error) { - ifnet_release(ifp); - ifvlan_release(ifv); - return (error); + ifnet_release(ifp); + ifvlan_release(ifv); + return error; } ifv->ifv_ifp = ifp; - + /* attach as ethernet */ bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); - return (0); + return 0; } static int vlan_remove(ifvlan_ref ifv, int need_to_wait) { - vlan_assert_lock_held(); - if (ifvlan_flags_detaching(ifv)) { - return (0); - } - ifvlan_flags_set_detaching(ifv); - vlan_unconfig(ifv, need_to_wait); - return (1); + vlan_assert_lock_held(); + if (ifvlan_flags_detaching(ifv)) { + return 0; + } + ifvlan_flags_set_detaching(ifv); + vlan_unconfig(ifv, need_to_wait); + return 1; } static int vlan_clone_destroy(struct ifnet *ifp) { - ifvlan_ref ifv; + ifvlan_ref ifv; - vlan_lock(); - ifv = ifnet_get_ifvlan_retained(ifp); - if (ifv == NULL) { - vlan_unlock(); - return 0; - } - if (vlan_remove(ifv, TRUE) == 0) { + vlan_lock(); + ifv = ifnet_get_ifvlan_retained(ifp); + if (ifv == NULL) { + vlan_unlock(); + return 0; + } + if (vlan_remove(ifv, TRUE) == 0) { + vlan_unlock(); + ifvlan_release(ifv); + return 0; + } vlan_unlock(); ifvlan_release(ifv); - return 0; - } - vlan_unlock(); - ifvlan_release(ifv); - ifnet_detach(ifp); + ifnet_detach(ifp); - return 0; + return 0; } static int vlan_output(struct ifnet * ifp, struct mbuf * m) { - struct ether_vlan_header * evl; - int encaplen; - ifvlan_ref ifv; - struct ifnet * p; - int soft_vlan; - u_short tag; - vlan_parent_ref vlp = NULL; - int err; - struct flowadv adv = { FADV_SUCCESS }; - - if (m == 0) { - return (0); - } - if ((m->m_flags & M_PKTHDR) == 0) { - m_freem_list(m); - return (0); - } - vlan_lock(); - ifv = ifnet_get_ifvlan_retained(ifp); - if (ifv == NULL || ifvlan_flags_ready(ifv) == 0) { - goto unlock_done; - } - vlp = ifvlan_get_vlan_parent_retained(ifv); - if (vlp == NULL) { - goto unlock_done; - } - p = vlp->vlp_ifp; - (void)ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); - soft_vlan = (ifnet_offload(p) & IF_HWASSIST_VLAN_TAGGING) == 0; - tag = ifv->ifv_tag; - encaplen = ifv->ifv_encaplen; - vlan_unlock(); - - ifvlan_release(ifv); - vlan_parent_release(vlp); - - bpf_tap_out(ifp, DLT_EN10MB, m, NULL, 0); - - /* do not run parent's if_output() if the parent is not up */ - if ((ifnet_flags(p) & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) { - m_freem(m); - atomic_add_64(&ifp->if_collisions, 1); - return (0); - } - /* - * If underlying interface can do VLAN tag insertion itself, - * just pass the packet along. However, we need some way to - * tell the interface where the packet came from so that it - * knows how to find the VLAN tag to use. We use a field in - * the mbuf header to store the VLAN tag, and a bit in the - * csum_flags field to mark the field as valid. - */ - if (soft_vlan == 0) { - m->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID; - m->m_pkthdr.vlan_tag = tag; - } else { - M_PREPEND(m, encaplen, M_DONTWAIT, 1); - if (m == NULL) { - printf("%s%d: unable to prepend VLAN header\n", ifnet_name(ifp), - ifnet_unit(ifp)); - atomic_add_64(&ifp->if_oerrors, 1); - return (0); - } - /* M_PREPEND takes care of m_len, m_pkthdr.len for us */ - if (m->m_len < (int)sizeof(*evl)) { - m = m_pullup(m, sizeof(*evl)); - if (m == NULL) { - printf("%s%d: unable to pullup VLAN header\n", ifnet_name(ifp), - ifnet_unit(ifp)); - atomic_add_64(&ifp->if_oerrors, 1); - return (0); - } - } - + struct ether_vlan_header * evl; + int encaplen; + ifvlan_ref ifv; + struct ifnet * p; + int soft_vlan; + u_short tag; + vlan_parent_ref vlp = NULL; + int err; + struct flowadv adv = { FADV_SUCCESS }; + + if (m == 0) { + return 0; + } + if ((m->m_flags & M_PKTHDR) == 0) { + m_freem_list(m); + return 0; + } + vlan_lock(); + ifv = ifnet_get_ifvlan_retained(ifp); + if (ifv == NULL || ifvlan_flags_ready(ifv) == 0) { + goto unlock_done; + } + vlp = ifvlan_get_vlan_parent_retained(ifv); + if (vlp == NULL) { + goto unlock_done; + } + p = vlp->vlp_ifp; + (void)ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0); + soft_vlan = (ifnet_offload(p) & IF_HWASSIST_VLAN_TAGGING) == 0; + tag = ifv->ifv_tag; + encaplen = ifv->ifv_encaplen; + vlan_unlock(); + + ifvlan_release(ifv); + vlan_parent_release(vlp); + + bpf_tap_out(ifp, DLT_EN10MB, m, NULL, 0); + + /* do not run parent's if_output() if the parent is not up */ + if ((ifnet_flags(p) & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) { + m_freem(m); + atomic_add_64(&ifp->if_collisions, 1); + return 0; + } /* - * Transform the Ethernet header into an Ethernet header - * with 802.1Q encapsulation. + * If underlying interface can do VLAN tag insertion itself, + * just pass the packet along. However, we need some way to + * tell the interface where the packet came from so that it + * knows how to find the VLAN tag to use. We use a field in + * the mbuf header to store the VLAN tag, and a bit in the + * csum_flags field to mark the field as valid. */ - bcopy(mtod(m, char *) + encaplen, - mtod(m, char *), ETHER_HDR_LEN); - evl = mtod(m, struct ether_vlan_header *); - evl->evl_proto = evl->evl_encap_proto; - evl->evl_encap_proto = htons(ETHERTYPE_VLAN); - evl->evl_tag = htons(tag); - } - - err = dlil_output(p, PF_VLAN, m, NULL, NULL, 1, &adv); + if (soft_vlan == 0) { + m->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID; + m->m_pkthdr.vlan_tag = tag; + } else { + M_PREPEND(m, encaplen, M_DONTWAIT, 1); + if (m == NULL) { + printf("%s%d: unable to prepend VLAN header\n", ifnet_name(ifp), + ifnet_unit(ifp)); + atomic_add_64(&ifp->if_oerrors, 1); + return 0; + } + /* M_PREPEND takes care of m_len, m_pkthdr.len for us */ + if (m->m_len < (int)sizeof(*evl)) { + m = m_pullup(m, sizeof(*evl)); + if (m == NULL) { + printf("%s%d: unable to pullup VLAN header\n", ifnet_name(ifp), + ifnet_unit(ifp)); + atomic_add_64(&ifp->if_oerrors, 1); + return 0; + } + } - if (err == 0) { - if (adv.code == FADV_FLOW_CONTROLLED) { - err = EQFULL; - } else if (adv.code == FADV_SUSPENDED) { - err = EQSUSPENDED; + /* + * Transform the Ethernet header into an Ethernet header + * with 802.1Q encapsulation. + */ + bcopy(mtod(m, char *) + encaplen, + mtod(m, char *), ETHER_HDR_LEN); + evl = mtod(m, struct ether_vlan_header *); + evl->evl_proto = evl->evl_encap_proto; + evl->evl_encap_proto = htons(ETHERTYPE_VLAN); + evl->evl_tag = htons(tag); } - } - return (err); + err = dlil_output(p, PF_VLAN, m, NULL, NULL, 1, &adv); - unlock_done: - vlan_unlock(); - if (ifv != NULL) { - ifvlan_release(ifv); - } - if (vlp != NULL) { - vlan_parent_release(vlp); - } - m_freem_list(m); - return (0); + if (err == 0) { + if (adv.code == FADV_FLOW_CONTROLLED) { + err = EQFULL; + } else if (adv.code == FADV_SUSPENDED) { + err = EQSUSPENDED; + } + } + return err; + +unlock_done: + vlan_unlock(); + if (ifv != NULL) { + ifvlan_release(ifv); + } + if (vlp != NULL) { + vlan_parent_release(vlp); + } + m_freem_list(m); + return 0; } static int vlan_input(ifnet_t p, __unused protocol_family_t protocol, - mbuf_t m, char *frame_header) + mbuf_t m, char *frame_header) { - struct ether_vlan_header * evl; - struct ifnet * ifp = NULL; - int soft_vlan = 0; - u_int tag = 0; + struct ether_vlan_header * evl; + struct ifnet * ifp = NULL; + int soft_vlan = 0; + u_int tag = 0; - if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { - /* - * Packet is tagged, m contains a normal - * Ethernet frame; the tag is stored out-of-band. - */ - m->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID; - tag = EVL_VLANOFTAG(m->m_pkthdr.vlan_tag); - m->m_pkthdr.vlan_tag = 0; - } else { - soft_vlan = 1; - switch (ifnet_type(p)) { - case IFT_ETHER: - if (m->m_len < ETHER_VLAN_ENCAP_LEN) { - m_freem(m); - return 0; - } - evl = (struct ether_vlan_header *)(void *)frame_header; - if (ntohs(evl->evl_proto) == ETHERTYPE_VLAN) { - /* don't allow VLAN within VLAN */ - m_freem(m); - return (0); - } - tag = EVL_VLANOFTAG(ntohs(evl->evl_tag)); - - /* - * Restore the original ethertype. We'll remove - * the encapsulation after we've found the vlan - * interface corresponding to the tag. - */ - evl->evl_encap_proto = evl->evl_proto; - break; - default: - printf("vlan_demux: unsupported if type %u", - ifnet_type(p)); - m_freem(m); - return 0; + if (m->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) { + /* + * Packet is tagged, m contains a normal + * Ethernet frame; the tag is stored out-of-band. + */ + m->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID; + tag = EVL_VLANOFTAG(m->m_pkthdr.vlan_tag); + m->m_pkthdr.vlan_tag = 0; + } else { + soft_vlan = 1; + switch (ifnet_type(p)) { + case IFT_ETHER: + if (m->m_len < ETHER_VLAN_ENCAP_LEN) { + m_freem(m); + return 0; + } + evl = (struct ether_vlan_header *)(void *)frame_header; + if (ntohs(evl->evl_proto) == ETHERTYPE_VLAN) { + /* don't allow VLAN within VLAN */ + m_freem(m); + return 0; + } + tag = EVL_VLANOFTAG(ntohs(evl->evl_tag)); + + /* + * Restore the original ethertype. We'll remove + * the encapsulation after we've found the vlan + * interface corresponding to the tag. + */ + evl->evl_encap_proto = evl->evl_proto; + break; + default: + printf("vlan_demux: unsupported if type %u", + ifnet_type(p)); + m_freem(m); + return 0; + } } - } - if (tag != 0) { - ifvlan_ref ifv; + if (tag != 0) { + ifvlan_ref ifv; - if ((ifnet_eflags(p) & IFEF_VLAN) == 0) { - /* don't bother looking through the VLAN list */ - m_freem(m); - return 0; + if ((ifnet_eflags(p) & IFEF_VLAN) == 0) { + /* don't bother looking through the VLAN list */ + m_freem(m); + return 0; + } + vlan_lock(); + ifv = vlan_lookup_parent_and_tag(p, tag); + if (ifv != NULL) { + ifp = ifv->ifv_ifp; + } + if (ifv == NULL + || ifvlan_flags_ready(ifv) == 0 + || (ifnet_flags(ifp) & IFF_UP) == 0) { + vlan_unlock(); + m_freem(m); + return 0; + } + vlan_unlock(); } - vlan_lock(); - ifv = vlan_lookup_parent_and_tag(p, tag); - if (ifv != NULL) { - ifp = ifv->ifv_ifp; + if (soft_vlan) { + /* + * Packet had an in-line encapsulation header; + * remove it. The original header has already + * been fixed up above. + */ + m->m_len -= ETHER_VLAN_ENCAP_LEN; + m->m_data += ETHER_VLAN_ENCAP_LEN; + m->m_pkthdr.len -= ETHER_VLAN_ENCAP_LEN; + m->m_pkthdr.csum_flags = 0; /* can't trust hardware checksum */ } - if (ifv == NULL - || ifvlan_flags_ready(ifv) == 0 - || (ifnet_flags(ifp) & IFF_UP) == 0) { - vlan_unlock(); - m_freem(m); - return 0; + if (tag != 0) { + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.pkt_hdr = frame_header; + (void)ifnet_stat_increment_in(ifp, 1, + m->m_pkthdr.len + ETHER_HDR_LEN, 0); + bpf_tap_in(ifp, DLT_EN10MB, m, frame_header, ETHER_HDR_LEN); + /* We found a vlan interface, inject on that interface. */ + dlil_input_packet_list(ifp, m); + } else { + m->m_pkthdr.pkt_hdr = frame_header; + /* Send priority-tagged packet up through the parent */ + dlil_input_packet_list(p, m); } - vlan_unlock(); - } - if (soft_vlan) { - /* - * Packet had an in-line encapsulation header; - * remove it. The original header has already - * been fixed up above. - */ - m->m_len -= ETHER_VLAN_ENCAP_LEN; - m->m_data += ETHER_VLAN_ENCAP_LEN; - m->m_pkthdr.len -= ETHER_VLAN_ENCAP_LEN; - m->m_pkthdr.csum_flags = 0; /* can't trust hardware checksum */ - } - if (tag != 0) { - m->m_pkthdr.rcvif = ifp; - m->m_pkthdr.pkt_hdr = frame_header; - (void)ifnet_stat_increment_in(ifp, 1, - m->m_pkthdr.len + ETHER_HDR_LEN, 0); - bpf_tap_in(ifp, DLT_EN10MB, m, frame_header, ETHER_HDR_LEN); - /* We found a vlan interface, inject on that interface. */ - dlil_input_packet_list(ifp, m); - } else { - m->m_pkthdr.pkt_hdr = frame_header; - /* Send priority-tagged packet up through the parent */ - dlil_input_packet_list(p, m); - } - return 0; + return 0; } static int vlan_config(struct ifnet * ifp, struct ifnet * p, int tag) { - int error; - int first_vlan = FALSE; - ifvlan_ref ifv = NULL; - int ifv_added = FALSE; - int need_vlp_release = 0; - vlan_parent_ref new_vlp = NULL; - ifnet_offload_t offload; - u_int16_t parent_flags; - vlan_parent_ref vlp = NULL; - - /* pre-allocate space for vlan_parent, in case we're first */ - error = vlan_parent_create(p, &new_vlp); - if (error != 0) { - return (error); - } - - vlan_lock(); - ifv = ifnet_get_ifvlan_retained(ifp); - if (ifv == NULL || ifv->ifv_vlp != NULL) { - vlan_unlock(); - if (ifv != NULL) { - ifvlan_release(ifv); + int error; + int first_vlan = FALSE; + ifvlan_ref ifv = NULL; + int ifv_added = FALSE; + int need_vlp_release = 0; + vlan_parent_ref new_vlp = NULL; + ifnet_offload_t offload; + u_int16_t parent_flags; + vlan_parent_ref vlp = NULL; + + /* pre-allocate space for vlan_parent, in case we're first */ + error = vlan_parent_create(p, &new_vlp); + if (error != 0) { + return error; } - vlan_parent_release(new_vlp); - return (EBUSY); - } - vlp = parent_list_lookup(p); - if (vlp != NULL) { - vlan_parent_retain(vlp); - need_vlp_release++; - if (vlan_parent_lookup_tag(vlp, tag) != NULL) { - /* already a VLAN with that tag on this interface */ - error = EADDRINUSE; - goto unlock_done; + + vlan_lock(); + ifv = ifnet_get_ifvlan_retained(ifp); + if (ifv == NULL || ifv->ifv_vlp != NULL) { + vlan_unlock(); + if (ifv != NULL) { + ifvlan_release(ifv); + } + vlan_parent_release(new_vlp); + return EBUSY; + } + vlp = parent_list_lookup(p); + if (vlp != NULL) { + vlan_parent_retain(vlp); + need_vlp_release++; + if (vlan_parent_lookup_tag(vlp, tag) != NULL) { + /* already a VLAN with that tag on this interface */ + error = EADDRINUSE; + goto unlock_done; + } + } else { + /* one for being in the list */ + vlan_parent_retain(new_vlp); + + /* we're the first VLAN on this interface */ + LIST_INSERT_HEAD(&g_vlan->parent_list, new_vlp, vlp_parent_list); + vlp = new_vlp; + + vlan_parent_retain(vlp); + need_vlp_release++; } - } - else { - /* one for being in the list */ - vlan_parent_retain(new_vlp); - /* we're the first VLAN on this interface */ - LIST_INSERT_HEAD(&g_vlan->parent_list, new_vlp, vlp_parent_list); - vlp = new_vlp; + /* need to wait to ensure no one else is trying to add/remove */ + vlan_parent_wait(vlp, "vlan_config"); - vlan_parent_retain(vlp); - need_vlp_release++; - } - - /* need to wait to ensure no one else is trying to add/remove */ - vlan_parent_wait(vlp, "vlan_config"); - - if (ifnet_get_ifvlan(ifp) != ifv) { - error = EINVAL; - goto signal_done; - } - - /* check again because someone might have gotten in */ - if (parent_list_lookup(p) != vlp) { - error = EBUSY; - goto signal_done; - } - - if (vlan_parent_flags_detaching(vlp) - || ifvlan_flags_detaching(ifv) || ifv->ifv_vlp != NULL) { - error = EBUSY; - goto signal_done; - } - - /* check again because someone might have gotten the tag */ - if (vlan_parent_lookup_tag(vlp, tag) != NULL) { - /* already a VLAN with that tag on this interface */ - error = EADDRINUSE; - goto signal_done; - } - - if (vlan_parent_no_vlans(vlp)) { - first_vlan = TRUE; - } - vlan_parent_add_vlan(vlp, ifv, tag); - ifvlan_retain(ifv); /* parent references ifv */ - ifv_added = TRUE; - - /* check whether bond interface is using parent interface */ - ifnet_lock_exclusive(p); - if ((ifnet_eflags(p) & IFEF_BOND) != 0) { - ifnet_lock_done(p); - /* don't allow VLAN over interface that's already part of a bond */ - error = EBUSY; - goto signal_done; - } - /* prevent BOND interface from using it */ - /* Can't use ifnet_set_eflags because that would take the lock */ - p->if_eflags |= IFEF_VLAN; - ifnet_lock_done(p); - vlan_unlock(); - - if (first_vlan) { - /* attach our VLAN "protocol" to the interface */ - error = vlan_attach_protocol(p); - if (error) { - vlan_lock(); - goto signal_done; + if (ifnet_get_ifvlan(ifp) != ifv) { + error = EINVAL; + goto signal_done; + } + + /* check again because someone might have gotten in */ + if (parent_list_lookup(p) != vlp) { + error = EBUSY; + goto signal_done; + } + + if (vlan_parent_flags_detaching(vlp) + || ifvlan_flags_detaching(ifv) || ifv->ifv_vlp != NULL) { + error = EBUSY; + goto signal_done; + } + + /* check again because someone might have gotten the tag */ + if (vlan_parent_lookup_tag(vlp, tag) != NULL) { + /* already a VLAN with that tag on this interface */ + error = EADDRINUSE; + goto signal_done; } - } - /* configure parent to receive our multicast addresses */ - error = multicast_list_program(&ifv->ifv_multicast, ifp, p); - if (error != 0) { + if (vlan_parent_no_vlans(vlp)) { + first_vlan = TRUE; + } + vlan_parent_add_vlan(vlp, ifv, tag); + ifvlan_retain(ifv); /* parent references ifv */ + ifv_added = TRUE; + + /* check whether bond interface is using parent interface */ + ifnet_lock_exclusive(p); + if ((ifnet_eflags(p) & IFEF_BOND) != 0) { + ifnet_lock_done(p); + /* don't allow VLAN over interface that's already part of a bond */ + error = EBUSY; + goto signal_done; + } + /* prevent BOND interface from using it */ + /* Can't use ifnet_set_eflags because that would take the lock */ + p->if_eflags |= IFEF_VLAN; + ifnet_lock_done(p); + vlan_unlock(); + if (first_vlan) { - (void)vlan_detach_protocol(p); + /* attach our VLAN "protocol" to the interface */ + error = vlan_attach_protocol(p); + if (error) { + vlan_lock(); + goto signal_done; + } } - vlan_lock(); - goto signal_done; - } - /* set our ethernet address to that of the parent */ - ifnet_set_lladdr_and_type(ifp, IF_LLADDR(p), ETHER_ADDR_LEN, IFT_ETHER); + /* configure parent to receive our multicast addresses */ + error = multicast_list_program(&ifv->ifv_multicast, ifp, p); + if (error != 0) { + if (first_vlan) { + (void)vlan_detach_protocol(p); + } + vlan_lock(); + goto signal_done; + } - /* no failures past this point */ - vlan_lock(); + /* set our ethernet address to that of the parent */ + ifnet_set_lladdr_and_type(ifp, IF_LLADDR(p), ETHER_ADDR_LEN, IFT_ETHER); + + /* no failures past this point */ + vlan_lock(); + + ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; + ifv->ifv_flags = 0; + if (vlan_parent_flags_supports_vlan_mtu(vlp)) { + ifv->ifv_mtufudge = 0; + } else { + /* + * Fudge the MTU by the encapsulation size. This + * makes us incompatible with strictly compliant + * 802.1Q implementations, but allows us to use + * the feature with other NetBSD implementations, + * which might still be useful. + */ + ifv->ifv_mtufudge = ifv->ifv_encaplen; + } + ifnet_set_mtu(ifp, ETHERMTU - ifv->ifv_mtufudge); - ifv->ifv_encaplen = ETHER_VLAN_ENCAP_LEN; - ifv->ifv_flags = 0; - if (vlan_parent_flags_supports_vlan_mtu(vlp)) { - ifv->ifv_mtufudge = 0; - } else { /* - * Fudge the MTU by the encapsulation size. This - * makes us incompatible with strictly compliant - * 802.1Q implementations, but allows us to use - * the feature with other NetBSD implementations, - * which might still be useful. + * Copy only a selected subset of flags from the parent. + * Other flags are none of our business. */ - ifv->ifv_mtufudge = ifv->ifv_encaplen; - } - ifnet_set_mtu(ifp, ETHERMTU - ifv->ifv_mtufudge); - - /* - * Copy only a selected subset of flags from the parent. - * Other flags are none of our business. - */ - parent_flags = ifnet_flags(p) - & (IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); - ifnet_set_flags(ifp, parent_flags, - IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); - - /* use hwassist bits from parent interface, but exclude VLAN bits */ - offload = ifnet_offload(p) & ~(IFNET_VLAN_TAGGING | IFNET_VLAN_MTU); - ifnet_set_offload(ifp, offload); - - ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING); - ifvlan_flags_set_ready(ifv); - vlan_parent_signal(vlp, "vlan_config"); - vlan_unlock(); - if (new_vlp != vlp) { - /* throw it away, it wasn't needed */ - vlan_parent_release(new_vlp); - } - if (ifv != NULL) { - ifvlan_release(ifv); - } - if (first_vlan) { - /* mark the parent interface up */ - ifnet_set_flags(p, IFF_UP, IFF_UP); - (void)ifnet_ioctl(p, 0, SIOCSIFFLAGS, (caddr_t)NULL); - } - return 0; - - signal_done: - vlan_assert_lock_held(); - - if (ifv_added) { - vlan_parent_remove_vlan(vlp, ifv); - if (!vlan_parent_flags_detaching(vlp) && vlan_parent_no_vlans(vlp)) { - /* the vlan parent has no more VLAN's */ - ifnet_set_eflags(p, 0, IFEF_VLAN); - LIST_REMOVE(vlp, vlp_parent_list); - /* release outside of the lock below */ - need_vlp_release++; - - /* one for being in the list */ - need_vlp_release++; + parent_flags = ifnet_flags(p) + & (IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); + ifnet_set_flags(ifp, parent_flags, + IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX); + + /* use hwassist bits from parent interface, but exclude VLAN bits */ + offload = ifnet_offload(p) & ~(IFNET_VLAN_TAGGING | IFNET_VLAN_MTU); + ifnet_set_offload(ifp, offload); + + ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING); + ifvlan_flags_set_ready(ifv); + vlan_parent_signal(vlp, "vlan_config"); + vlan_unlock(); + if (new_vlp != vlp) { + /* throw it away, it wasn't needed */ + vlan_parent_release(new_vlp); + } + if (ifv != NULL) { + ifvlan_release(ifv); } - } - vlan_parent_signal(vlp, "vlan_config"); + if (first_vlan) { + /* mark the parent interface up */ + ifnet_set_flags(p, IFF_UP, IFF_UP); + (void)ifnet_ioctl(p, 0, SIOCSIFFLAGS, (caddr_t)NULL); + } + return 0; - unlock_done: - vlan_unlock(); +signal_done: + vlan_assert_lock_held(); - while (need_vlp_release--) { - vlan_parent_release(vlp); - } - if (new_vlp != vlp) { - vlan_parent_release(new_vlp); - } - if (ifv != NULL) { if (ifv_added) { - ifvlan_release(ifv); + vlan_parent_remove_vlan(vlp, ifv); + if (!vlan_parent_flags_detaching(vlp) && vlan_parent_no_vlans(vlp)) { + /* the vlan parent has no more VLAN's */ + ifnet_set_eflags(p, 0, IFEF_VLAN); + LIST_REMOVE(vlp, vlp_parent_list); + /* release outside of the lock below */ + need_vlp_release++; + + /* one for being in the list */ + need_vlp_release++; + } } - ifvlan_release(ifv); - } - return (error); + vlan_parent_signal(vlp, "vlan_config"); + +unlock_done: + vlan_unlock(); + + while (need_vlp_release--) { + vlan_parent_release(vlp); + } + if (new_vlp != vlp) { + vlan_parent_release(new_vlp); + } + if (ifv != NULL) { + if (ifv_added) { + ifvlan_release(ifv); + } + ifvlan_release(ifv); + } + return error; } static void vlan_link_event(struct ifnet * ifp, struct ifnet * p) { - struct ifmediareq ifmr; + struct ifmediareq ifmr; - /* generate a link event based on the state of the underlying interface */ - bzero(&ifmr, sizeof(ifmr)); - snprintf(ifmr.ifm_name, sizeof(ifmr.ifm_name), - "%s%d", ifnet_name(p), ifnet_unit(p)); - if (ifnet_ioctl(p, 0, SIOCGIFMEDIA, &ifmr) == 0 - && ifmr.ifm_count > 0 && ifmr.ifm_status & IFM_AVALID) { - u_int32_t event; - - event = (ifmr.ifm_status & IFM_ACTIVE) - ? KEV_DL_LINK_ON : KEV_DL_LINK_OFF; - interface_link_event(ifp, event); - } - return; + /* generate a link event based on the state of the underlying interface */ + bzero(&ifmr, sizeof(ifmr)); + snprintf(ifmr.ifm_name, sizeof(ifmr.ifm_name), + "%s%d", ifnet_name(p), ifnet_unit(p)); + if (ifnet_ioctl(p, 0, SIOCGIFMEDIA, &ifmr) == 0 + && ifmr.ifm_count > 0 && ifmr.ifm_status & IFM_AVALID) { + u_int32_t event; + + event = (ifmr.ifm_status & IFM_ACTIVE) + ? KEV_DL_LINK_ON : KEV_DL_LINK_OFF; + interface_link_event(ifp, event); + } + return; } static int vlan_unconfig(ifvlan_ref ifv, int need_to_wait) { - struct ifnet * ifp = ifv->ifv_ifp; - int last_vlan = FALSE; - int need_ifv_release = 0; - int need_vlp_release = 0; - struct ifnet * p; - vlan_parent_ref vlp; - - vlan_assert_lock_held(); - vlp = ifv->ifv_vlp; - if (vlp == NULL) { - return (0); - } - if (need_to_wait) { - need_vlp_release++; - vlan_parent_retain(vlp); - vlan_parent_wait(vlp, "vlan_unconfig"); + struct ifnet * ifp = ifv->ifv_ifp; + int last_vlan = FALSE; + int need_ifv_release = 0; + int need_vlp_release = 0; + struct ifnet * p; + vlan_parent_ref vlp; - /* check again because another thread could be in vlan_unconfig */ - if (ifv != ifnet_get_ifvlan(ifp)) { - goto signal_done; + vlan_assert_lock_held(); + vlp = ifv->ifv_vlp; + if (vlp == NULL) { + return 0; } - if (ifv->ifv_vlp != vlp) { - /* vlan parent changed */ - goto signal_done; + if (need_to_wait) { + need_vlp_release++; + vlan_parent_retain(vlp); + vlan_parent_wait(vlp, "vlan_unconfig"); + + /* check again because another thread could be in vlan_unconfig */ + if (ifv != ifnet_get_ifvlan(ifp)) { + goto signal_done; + } + if (ifv->ifv_vlp != vlp) { + /* vlan parent changed */ + goto signal_done; + } } - } - /* ifv has a reference on vlp, need to remove it */ - need_vlp_release++; - p = vlp->vlp_ifp; + /* ifv has a reference on vlp, need to remove it */ + need_vlp_release++; + p = vlp->vlp_ifp; - /* remember whether we're the last VLAN on the parent */ - if (LIST_NEXT(LIST_FIRST(&vlp->vlp_vlan_list), ifv_vlan_list) == NULL) { - if (g_vlan->verbose) { - printf("vlan_unconfig: last vlan on %s%d\n", - ifnet_name(p), ifnet_unit(p)); + /* remember whether we're the last VLAN on the parent */ + if (LIST_NEXT(LIST_FIRST(&vlp->vlp_vlan_list), ifv_vlan_list) == NULL) { + if (g_vlan->verbose) { + printf("vlan_unconfig: last vlan on %s%d\n", + ifnet_name(p), ifnet_unit(p)); + } + last_vlan = TRUE; } - last_vlan = TRUE; - } - /* back-out any effect our mtu might have had on the parent */ - (void)ifvlan_new_mtu(ifv, ETHERMTU - ifv->ifv_mtufudge); + /* back-out any effect our mtu might have had on the parent */ + (void)ifvlan_new_mtu(ifv, ETHERMTU - ifv->ifv_mtufudge); - vlan_unlock(); + vlan_unlock(); - /* un-join multicast on parent interface */ - (void)multicast_list_remove(&ifv->ifv_multicast); + /* un-join multicast on parent interface */ + (void)multicast_list_remove(&ifv->ifv_multicast); - /* Clear our MAC address. */ - ifnet_set_lladdr_and_type(ifp, NULL, 0, IFT_L2VLAN); + /* Clear our MAC address. */ + ifnet_set_lladdr_and_type(ifp, NULL, 0, IFT_L2VLAN); - /* detach VLAN "protocol" */ - if (last_vlan) { - (void)vlan_detach_protocol(p); - } + /* detach VLAN "protocol" */ + if (last_vlan) { + (void)vlan_detach_protocol(p); + } - vlan_lock(); + vlan_lock(); - /* return to the state we were in before SIFVLAN */ - ifnet_set_mtu(ifp, 0); - ifnet_set_flags(ifp, 0, - IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX | IFF_RUNNING); - ifnet_set_offload(ifp, 0); - ifv->ifv_mtufudge = 0; + /* return to the state we were in before SIFVLAN */ + ifnet_set_mtu(ifp, 0); + ifnet_set_flags(ifp, 0, + IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX | IFF_RUNNING); + ifnet_set_offload(ifp, 0); + ifv->ifv_mtufudge = 0; - /* Disconnect from parent. */ - vlan_parent_remove_vlan(vlp, ifv); - ifv->ifv_flags = 0; + /* Disconnect from parent. */ + vlan_parent_remove_vlan(vlp, ifv); + ifv->ifv_flags = 0; - /* vlan_parent has reference to ifv, remove it */ - need_ifv_release++; + /* vlan_parent has reference to ifv, remove it */ + need_ifv_release++; - /* from this point on, no more referencing ifv */ - if (last_vlan && !vlan_parent_flags_detaching(vlp)) { - /* the vlan parent has no more VLAN's */ - ifnet_set_eflags(p, 0, IFEF_VLAN); - LIST_REMOVE(vlp, vlp_parent_list); + /* from this point on, no more referencing ifv */ + if (last_vlan && !vlan_parent_flags_detaching(vlp)) { + /* the vlan parent has no more VLAN's */ + ifnet_set_eflags(p, 0, IFEF_VLAN); + LIST_REMOVE(vlp, vlp_parent_list); - /* one for being in the list */ - need_vlp_release++; + /* one for being in the list */ + need_vlp_release++; - /* release outside of the lock below */ - need_vlp_release++; - } - - signal_done: - if (need_to_wait) { - vlan_parent_signal(vlp, "vlan_unconfig"); - } - vlan_unlock(); - while (need_ifv_release--) { - ifvlan_release(ifv); - } - while (need_vlp_release--) { /* references to vlp */ - vlan_parent_release(vlp); - } - vlan_lock(); - return (0); + /* release outside of the lock below */ + need_vlp_release++; + } + +signal_done: + if (need_to_wait) { + vlan_parent_signal(vlp, "vlan_unconfig"); + } + vlan_unlock(); + while (need_ifv_release--) { + ifvlan_release(ifv); + } + while (need_vlp_release--) { /* references to vlp */ + vlan_parent_release(vlp); + } + vlan_lock(); + return 0; } static int vlan_set_promisc(struct ifnet * ifp) { - int error = 0; - ifvlan_ref ifv; - vlan_parent_ref vlp; - - vlan_lock(); - ifv = ifnet_get_ifvlan_retained(ifp); - if (ifv == NULL) { - error = EBUSY; - goto done; - } - - vlp = ifv->ifv_vlp; - if (vlp == NULL) { - goto done; - } - if ((ifnet_flags(ifp) & IFF_PROMISC) != 0) { - if (!ifvlan_flags_promisc(ifv)) { - error = ifnet_set_promiscuous(vlp->vlp_ifp, 1); - if (error == 0) { - ifvlan_flags_set_promisc(ifv); - } - } - } else { - if (ifvlan_flags_promisc(ifv)) { - error = ifnet_set_promiscuous(vlp->vlp_ifp, 0); - if (error == 0) { - ifvlan_flags_clear_promisc(ifv); - } - } - } - done: - vlan_unlock(); - if (ifv != NULL) { - ifvlan_release(ifv); - } - return (error); + int error = 0; + ifvlan_ref ifv; + vlan_parent_ref vlp; + + vlan_lock(); + ifv = ifnet_get_ifvlan_retained(ifp); + if (ifv == NULL) { + error = EBUSY; + goto done; + } + + vlp = ifv->ifv_vlp; + if (vlp == NULL) { + goto done; + } + if ((ifnet_flags(ifp) & IFF_PROMISC) != 0) { + if (!ifvlan_flags_promisc(ifv)) { + error = ifnet_set_promiscuous(vlp->vlp_ifp, 1); + if (error == 0) { + ifvlan_flags_set_promisc(ifv); + } + } + } else { + if (ifvlan_flags_promisc(ifv)) { + error = ifnet_set_promiscuous(vlp->vlp_ifp, 0); + if (error == 0) { + ifvlan_flags_clear_promisc(ifv); + } + } + } +done: + vlan_unlock(); + if (ifv != NULL) { + ifvlan_release(ifv); + } + return error; } static int ifvlan_new_mtu(ifvlan_ref ifv, int mtu) { - struct ifdevmtu * devmtu_p; - int error = 0; - struct ifnet * ifp = ifv->ifv_ifp; - int max_mtu; - int new_mtu = 0; - int req_mtu; - vlan_parent_ref vlp; - - vlan_assert_lock_held(); - vlp = ifv->ifv_vlp; - devmtu_p = &vlp->vlp_devmtu; - req_mtu = mtu + ifv->ifv_mtufudge; - if (req_mtu > devmtu_p->ifdm_max || req_mtu < devmtu_p->ifdm_min) { - return (EINVAL); - } - max_mtu = vlan_parent_find_max_mtu(vlp, ifv); - if (req_mtu > max_mtu) { - new_mtu = req_mtu; - } - else if (max_mtu < devmtu_p->ifdm_current) { - new_mtu = max_mtu; - } - if (new_mtu != 0) { - struct ifnet * p = vlp->vlp_ifp; - vlan_unlock(); - error = siocsifaltmtu(p, new_mtu); - vlan_lock(); - } - if (error == 0) { + struct ifdevmtu * devmtu_p; + int error = 0; + struct ifnet * ifp = ifv->ifv_ifp; + int max_mtu; + int new_mtu = 0; + int req_mtu; + vlan_parent_ref vlp; + + vlan_assert_lock_held(); + vlp = ifv->ifv_vlp; + devmtu_p = &vlp->vlp_devmtu; + req_mtu = mtu + ifv->ifv_mtufudge; + if (req_mtu > devmtu_p->ifdm_max || req_mtu < devmtu_p->ifdm_min) { + return EINVAL; + } + max_mtu = vlan_parent_find_max_mtu(vlp, ifv); + if (req_mtu > max_mtu) { + new_mtu = req_mtu; + } else if (max_mtu < devmtu_p->ifdm_current) { + new_mtu = max_mtu; + } if (new_mtu != 0) { - devmtu_p->ifdm_current = new_mtu; + struct ifnet * p = vlp->vlp_ifp; + vlan_unlock(); + error = siocsifaltmtu(p, new_mtu); + vlan_lock(); } - ifnet_set_mtu(ifp, mtu); - } - return (error); + if (error == 0) { + if (new_mtu != 0) { + devmtu_p->ifdm_current = new_mtu; + } + ifnet_set_mtu(ifp, mtu); + } + return error; } static int vlan_set_mtu(struct ifnet * ifp, int mtu) { - int error = 0; - ifvlan_ref ifv; - vlan_parent_ref vlp; - - if (mtu < IF_MINMTU) { - return (EINVAL); - } - vlan_lock(); - ifv = ifnet_get_ifvlan_retained(ifp); - if (ifv == NULL) { - vlan_unlock(); - return (EBUSY); - } - vlp = ifvlan_get_vlan_parent_retained(ifv); - if (vlp == NULL) { - vlan_unlock(); - ifvlan_release(ifv); - if (mtu != 0) { - return (EINVAL); - } - return (0); - } - vlan_parent_wait(vlp, "vlan_set_mtu"); - - /* check again, something might have changed */ - if (ifnet_get_ifvlan(ifp) != ifv - || ifvlan_flags_detaching(ifv)) { - error = EBUSY; - goto signal_done; - } - if (ifv->ifv_vlp != vlp) { - /* vlan parent changed */ - goto signal_done; - } - if (vlan_parent_flags_detaching(vlp)) { - if (mtu != 0) { - error = EINVAL; - } - goto signal_done; - } - error = ifvlan_new_mtu(ifv, mtu); - - signal_done: - vlan_parent_signal(vlp, "vlan_set_mtu"); - vlan_unlock(); - vlan_parent_release(vlp); - ifvlan_release(ifv); - - return (error); -} + int error = 0; + ifvlan_ref ifv; + vlan_parent_ref vlp; -static int -vlan_ioctl(ifnet_t ifp, u_long cmd, void * data) -{ - struct ifdevmtu * devmtu_p; - int error = 0; - struct ifaddr * ifa; - struct ifmediareq *ifmr; - struct ifreq * ifr; - ifvlan_ref ifv; - struct ifnet * p; - u_short tag; - user_addr_t user_addr; - vlan_parent_ref vlp; - struct vlanreq vlr; - - if (ifnet_type(ifp) != IFT_L2VLAN) { - return (EOPNOTSUPP); - } - ifr = (struct ifreq *)data; - ifa = (struct ifaddr *)data; - - switch (cmd) { - case SIOCSIFADDR: - ifnet_set_flags(ifp, IFF_UP, IFF_UP); - break; - - case SIOCGIFMEDIA32: - case SIOCGIFMEDIA64: + if (mtu < IF_MINMTU) { + return EINVAL; + } vlan_lock(); - ifv = (ifvlan_ref)ifnet_softc(ifp); - if (ifv == NULL || ifvlan_flags_detaching(ifv)) { - vlan_unlock(); - return (ifv == NULL ? EOPNOTSUPP : EBUSY); + ifv = ifnet_get_ifvlan_retained(ifp); + if (ifv == NULL) { + vlan_unlock(); + return EBUSY; } - p = (ifv->ifv_vlp == NULL) ? NULL : ifv->ifv_vlp->vlp_ifp; - vlan_unlock(); - ifmr = (struct ifmediareq *)data; - user_addr = (cmd == SIOCGIFMEDIA64) ? - ((struct ifmediareq64 *)ifmr)->ifmu_ulist : - CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); - if (p != NULL) { - struct ifmediareq p_ifmr; - - bzero(&p_ifmr, sizeof(p_ifmr)); - error = ifnet_ioctl(p, 0, SIOCGIFMEDIA, &p_ifmr); - if (error == 0) { - ifmr->ifm_active = p_ifmr.ifm_active; - ifmr->ifm_current = p_ifmr.ifm_current; - ifmr->ifm_mask = p_ifmr.ifm_mask; - ifmr->ifm_status = p_ifmr.ifm_status; - ifmr->ifm_count = p_ifmr.ifm_count; - /* Limit the result to the parent's current config. */ - if (ifmr->ifm_count >= 1 && user_addr != USER_ADDR_NULL) { - ifmr->ifm_count = 1; - error = copyout(&ifmr->ifm_current, user_addr, - sizeof(int)); + vlp = ifvlan_get_vlan_parent_retained(ifv); + if (vlp == NULL) { + vlan_unlock(); + ifvlan_release(ifv); + if (mtu != 0) { + return EINVAL; } - } - } else { - ifmr->ifm_active = ifmr->ifm_current = IFM_NONE; - ifmr->ifm_mask = 0; - ifmr->ifm_status = IFM_AVALID; - ifmr->ifm_count = 1; - if (user_addr != USER_ADDR_NULL) { - error = copyout(&ifmr->ifm_current, user_addr, sizeof(int)); - } + return 0; } - break; - - case SIOCSIFMEDIA: - error = EOPNOTSUPP; - break; + vlan_parent_wait(vlp, "vlan_set_mtu"); - case SIOCGIFDEVMTU: - vlan_lock(); - ifv = (ifvlan_ref)ifnet_softc(ifp); - if (ifv == NULL || ifvlan_flags_detaching(ifv)) { - vlan_unlock(); - return (ifv == NULL ? EOPNOTSUPP : EBUSY); + /* check again, something might have changed */ + if (ifnet_get_ifvlan(ifp) != ifv + || ifvlan_flags_detaching(ifv)) { + error = EBUSY; + goto signal_done; } - vlp = ifv->ifv_vlp; - if (vlp != NULL) { - int min_mtu = vlp->vlp_devmtu.ifdm_min - ifv->ifv_mtufudge; - devmtu_p = &ifr->ifr_devmtu; - devmtu_p->ifdm_current = ifnet_mtu(ifp); - devmtu_p->ifdm_min = max(min_mtu, IF_MINMTU); - devmtu_p->ifdm_max = vlp->vlp_devmtu.ifdm_max - ifv->ifv_mtufudge; - } - else { - devmtu_p = &ifr->ifr_devmtu; - devmtu_p->ifdm_current = 0; - devmtu_p->ifdm_min = 0; - devmtu_p->ifdm_max = 0; + if (ifv->ifv_vlp != vlp) { + /* vlan parent changed */ + goto signal_done; + } + if (vlan_parent_flags_detaching(vlp)) { + if (mtu != 0) { + error = EINVAL; + } + goto signal_done; } + error = ifvlan_new_mtu(ifv, mtu); + +signal_done: + vlan_parent_signal(vlp, "vlan_set_mtu"); vlan_unlock(); - break; + vlan_parent_release(vlp); + ifvlan_release(ifv); - case SIOCSIFMTU: - error = vlan_set_mtu(ifp, ifr->ifr_mtu); - break; + return error; +} - case SIOCSIFVLAN: - user_addr = proc_is64bit(current_proc()) - ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); - error = copyin(user_addr, &vlr, sizeof(vlr)); - if (error) { - break; +static int +vlan_ioctl(ifnet_t ifp, u_long cmd, void * data) +{ + struct ifdevmtu * devmtu_p; + int error = 0; + struct ifaddr * ifa; + struct ifmediareq *ifmr; + struct ifreq * ifr; + ifvlan_ref ifv; + struct ifnet * p; + u_short tag; + user_addr_t user_addr; + vlan_parent_ref vlp; + struct vlanreq vlr; + + if (ifnet_type(ifp) != IFT_L2VLAN) { + return EOPNOTSUPP; } - p = NULL; - /* ensure nul termination */ - vlr.vlr_parent[IFNAMSIZ - 1] = '\0'; - if (vlr.vlr_parent[0] != '\0') { - if (vlr.vlr_tag & ~EVL_VLID_MASK) { - /* - * Don't let the caller set up a VLAN tag with - * anything except VLID bits. - */ - error = EINVAL; + ifr = (struct ifreq *)data; + ifa = (struct ifaddr *)data; + + switch (cmd) { + case SIOCSIFADDR: + ifnet_set_flags(ifp, IFF_UP, IFF_UP); + break; + + case SIOCGIFMEDIA32: + case SIOCGIFMEDIA64: + vlan_lock(); + ifv = (ifvlan_ref)ifnet_softc(ifp); + if (ifv == NULL || ifvlan_flags_detaching(ifv)) { + vlan_unlock(); + return ifv == NULL ? EOPNOTSUPP : EBUSY; + } + p = (ifv->ifv_vlp == NULL) ? NULL : ifv->ifv_vlp->vlp_ifp; + vlan_unlock(); + ifmr = (struct ifmediareq *)data; + user_addr = (cmd == SIOCGIFMEDIA64) ? + ((struct ifmediareq64 *)ifmr)->ifmu_ulist : + CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist); + if (p != NULL) { + struct ifmediareq p_ifmr; + + bzero(&p_ifmr, sizeof(p_ifmr)); + error = ifnet_ioctl(p, 0, SIOCGIFMEDIA, &p_ifmr); + if (error == 0) { + ifmr->ifm_active = p_ifmr.ifm_active; + ifmr->ifm_current = p_ifmr.ifm_current; + ifmr->ifm_mask = p_ifmr.ifm_mask; + ifmr->ifm_status = p_ifmr.ifm_status; + ifmr->ifm_count = p_ifmr.ifm_count; + /* Limit the result to the parent's current config. */ + if (ifmr->ifm_count >= 1 && user_addr != USER_ADDR_NULL) { + ifmr->ifm_count = 1; + error = copyout(&ifmr->ifm_current, user_addr, + sizeof(int)); + } + } + } else { + ifmr->ifm_active = ifmr->ifm_current = IFM_NONE; + ifmr->ifm_mask = 0; + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_count = 1; + if (user_addr != USER_ADDR_NULL) { + error = copyout(&ifmr->ifm_current, user_addr, sizeof(int)); + } + } break; - } - p = ifunit(vlr.vlr_parent); - if (p == NULL) { - error = ENXIO; + + case SIOCSIFMEDIA: + error = EOPNOTSUPP; break; - } - if (IFNET_IS_INTCOPROC(p)) { - error = EINVAL; + + case SIOCGIFDEVMTU: + vlan_lock(); + ifv = (ifvlan_ref)ifnet_softc(ifp); + if (ifv == NULL || ifvlan_flags_detaching(ifv)) { + vlan_unlock(); + return ifv == NULL ? EOPNOTSUPP : EBUSY; + } + vlp = ifv->ifv_vlp; + if (vlp != NULL) { + int min_mtu = vlp->vlp_devmtu.ifdm_min - ifv->ifv_mtufudge; + devmtu_p = &ifr->ifr_devmtu; + devmtu_p->ifdm_current = ifnet_mtu(ifp); + devmtu_p->ifdm_min = max(min_mtu, IF_MINMTU); + devmtu_p->ifdm_max = vlp->vlp_devmtu.ifdm_max - ifv->ifv_mtufudge; + } else { + devmtu_p = &ifr->ifr_devmtu; + devmtu_p->ifdm_current = 0; + devmtu_p->ifdm_min = 0; + devmtu_p->ifdm_max = 0; + } + vlan_unlock(); break; - } - /* can't do VLAN over anything but ethernet or ethernet aggregate */ - if (ifnet_type(p) != IFT_ETHER - && ifnet_type(p) != IFT_IEEE8023ADLAG) { - error = EPROTONOSUPPORT; + case SIOCSIFMTU: + error = vlan_set_mtu(ifp, ifr->ifr_mtu); break; - } - error = vlan_config(ifp, p, vlr.vlr_tag); - if (error) { + + case SIOCSIFVLAN: + user_addr = proc_is64bit(current_proc()) + ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); + error = copyin(user_addr, &vlr, sizeof(vlr)); + if (error) { + break; + } + p = NULL; + /* ensure nul termination */ + vlr.vlr_parent[IFNAMSIZ - 1] = '\0'; + if (vlr.vlr_parent[0] != '\0') { + if (vlr.vlr_tag & ~EVL_VLID_MASK) { + /* + * Don't let the caller set up a VLAN tag with + * anything except VLID bits. + */ + error = EINVAL; + break; + } + p = ifunit(vlr.vlr_parent); + if (p == NULL) { + error = ENXIO; + break; + } + if (IFNET_IS_INTCOPROC(p)) { + error = EINVAL; + break; + } + + /* can't do VLAN over anything but ethernet or ethernet aggregate */ + if (ifnet_type(p) != IFT_ETHER + && ifnet_type(p) != IFT_IEEE8023ADLAG) { + error = EPROTONOSUPPORT; + break; + } + error = vlan_config(ifp, p, vlr.vlr_tag); + if (error) { + break; + } + + /* Update promiscuous mode, if necessary. */ + (void)vlan_set_promisc(ifp); + + /* generate a link event based on the state of the parent */ + vlan_link_event(ifp, p); + } else { + int need_link_event = FALSE; + + vlan_lock(); + ifv = (ifvlan_ref)ifnet_softc(ifp); + if (ifv == NULL || ifvlan_flags_detaching(ifv)) { + vlan_unlock(); + error = (ifv == NULL ? EOPNOTSUPP : EBUSY); + break; + } + need_link_event = vlan_remove(ifv, TRUE); + vlan_unlock(); + if (need_link_event) { + interface_link_event(ifp, KEV_DL_LINK_OFF); + } + } break; - } - - /* Update promiscuous mode, if necessary. */ - (void)vlan_set_promisc(ifp); - - /* generate a link event based on the state of the parent */ - vlan_link_event(ifp, p); - } - else { - int need_link_event = FALSE; - - vlan_lock(); - ifv = (ifvlan_ref)ifnet_softc(ifp); - if (ifv == NULL || ifvlan_flags_detaching(ifv)) { + + case SIOCGIFVLAN: + bzero(&vlr, sizeof vlr); + vlan_lock(); + ifv = (ifvlan_ref)ifnet_softc(ifp); + if (ifv == NULL || ifvlan_flags_detaching(ifv)) { + vlan_unlock(); + return ifv == NULL ? EOPNOTSUPP : EBUSY; + } + p = (ifv->ifv_vlp == NULL) ? NULL : ifv->ifv_vlp->vlp_ifp; + tag = ifv->ifv_tag; vlan_unlock(); - error = (ifv == NULL ? EOPNOTSUPP : EBUSY); + if (p != NULL) { + snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), + "%s%d", ifnet_name(p), ifnet_unit(p)); + vlr.vlr_tag = tag; + } + user_addr = proc_is64bit(current_proc()) + ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); + error = copyout(&vlr, user_addr, sizeof(vlr)); break; - } - need_link_event = vlan_remove(ifv, TRUE); - vlan_unlock(); - if (need_link_event) { - interface_link_event(ifp, KEV_DL_LINK_OFF); - } - } - break; - - case SIOCGIFVLAN: - bzero(&vlr, sizeof vlr); - vlan_lock(); - ifv = (ifvlan_ref)ifnet_softc(ifp); - if (ifv == NULL || ifvlan_flags_detaching(ifv)) { - vlan_unlock(); - return (ifv == NULL ? EOPNOTSUPP : EBUSY); - } - p = (ifv->ifv_vlp == NULL) ? NULL : ifv->ifv_vlp->vlp_ifp; - tag = ifv->ifv_tag; - vlan_unlock(); - if (p != NULL) { - snprintf(vlr.vlr_parent, sizeof(vlr.vlr_parent), - "%s%d", ifnet_name(p), ifnet_unit(p)); - vlr.vlr_tag = tag; - } - user_addr = proc_is64bit(current_proc()) - ? ifr->ifr_data64 : CAST_USER_ADDR_T(ifr->ifr_data); - error = copyout(&vlr, user_addr, sizeof(vlr)); - break; - - case SIOCSIFFLAGS: - /* - * For promiscuous mode, we enable promiscuous mode on - * the parent if we need promiscuous on the VLAN interface. - */ - error = vlan_set_promisc(ifp); - break; - case SIOCADDMULTI: - case SIOCDELMULTI: - error = vlan_setmulti(ifp); - break; - default: - error = EOPNOTSUPP; - } - return error; + case SIOCSIFFLAGS: + /* + * For promiscuous mode, we enable promiscuous mode on + * the parent if we need promiscuous on the VLAN interface. + */ + error = vlan_set_promisc(ifp); + break; + + case SIOCADDMULTI: + case SIOCDELMULTI: + error = vlan_setmulti(ifp); + break; + default: + error = EOPNOTSUPP; + } + return error; } -static void +static void vlan_if_free(struct ifnet * ifp) { - ifvlan_ref ifv; + ifvlan_ref ifv; - if (ifp == NULL) { - return; - } - ifv = (ifvlan_ref)ifnet_softc(ifp); - if (ifv == NULL) { + if (ifp == NULL) { + return; + } + ifv = (ifvlan_ref)ifnet_softc(ifp); + if (ifv == NULL) { + return; + } + ifvlan_release(ifv); + ifnet_release(ifp); return; - } - ifvlan_release(ifv); - ifnet_release(ifp); - return; } static void -vlan_event(struct ifnet * p, __unused protocol_family_t protocol, - const struct kev_msg * event) +vlan_event(struct ifnet * p, __unused protocol_family_t protocol, + const struct kev_msg * event) { - int event_code; + int event_code; - /* Check if the interface we are attached to is being detached */ - if (event->vendor_code != KEV_VENDOR_APPLE - || event->kev_class != KEV_NETWORK_CLASS - || event->kev_subclass != KEV_DL_SUBCLASS) { - return; - } - event_code = event->event_code; - switch (event_code) { - case KEV_DL_LINK_OFF: - case KEV_DL_LINK_ON: - vlan_parent_link_event(p, event_code); - break; - default: + /* Check if the interface we are attached to is being detached */ + if (event->vendor_code != KEV_VENDOR_APPLE + || event->kev_class != KEV_NETWORK_CLASS + || event->kev_subclass != KEV_DL_SUBCLASS) { + return; + } + event_code = event->event_code; + switch (event_code) { + case KEV_DL_LINK_OFF: + case KEV_DL_LINK_ON: + vlan_parent_link_event(p, event_code); + break; + default: + return; + } return; - } - return; } static errno_t vlan_detached(ifnet_t p, __unused protocol_family_t protocol) { - if (ifnet_is_attached(p, 0) == 0) { - /* if the parent isn't attached, remove all VLANs */ - vlan_parent_remove_all_vlans(p); - } - return (0); + if (ifnet_is_attached(p, 0) == 0) { + /* if the parent isn't attached, remove all VLANs */ + vlan_parent_remove_all_vlans(p); + } + return 0; } static void interface_link_event(struct ifnet * ifp, u_int32_t event_code) { - struct { - struct kern_event_msg header; - u_int32_t unit; - char if_name[IFNAMSIZ]; - } event; - - bzero(&event, sizeof(event)); - event.header.total_size = sizeof(event); - event.header.vendor_code = KEV_VENDOR_APPLE; - event.header.kev_class = KEV_NETWORK_CLASS; - event.header.kev_subclass = KEV_DL_SUBCLASS; - event.header.event_code = event_code; - event.header.event_data[0] = ifnet_family(ifp); - event.unit = (u_int32_t) ifnet_unit(ifp); - strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); - ifnet_event(ifp, &event.header); - return; + struct { + struct kern_event_msg header; + u_int32_t unit; + char if_name[IFNAMSIZ]; + } event; + + bzero(&event, sizeof(event)); + event.header.total_size = sizeof(event); + event.header.vendor_code = KEV_VENDOR_APPLE; + event.header.kev_class = KEV_NETWORK_CLASS; + event.header.kev_subclass = KEV_DL_SUBCLASS; + event.header.event_code = event_code; + event.header.event_data[0] = ifnet_family(ifp); + event.unit = (u_int32_t) ifnet_unit(ifp); + strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ); + ifnet_event(ifp, &event.header); + return; } static void vlan_parent_link_event(struct ifnet * p, u_int32_t event_code) { - vlan_parent_ref vlp; + vlan_parent_ref vlp; - vlan_lock(); - if ((ifnet_eflags(p) & IFEF_VLAN) == 0) { - vlan_unlock(); - /* no VLAN's */ - return; - } - vlp = parent_list_lookup(p); - if (vlp == NULL) { - /* no VLAN's */ - vlan_unlock(); - return; - } - vlan_parent_flags_set_link_event_required(vlp); - vlp->vlp_event_code = event_code; - if (vlan_parent_flags_change_in_progress(vlp)) { - /* don't block waiting to generate an event */ + vlan_lock(); + if ((ifnet_eflags(p) & IFEF_VLAN) == 0) { + vlan_unlock(); + /* no VLAN's */ + return; + } + vlp = parent_list_lookup(p); + if (vlp == NULL) { + /* no VLAN's */ + vlan_unlock(); + return; + } + vlan_parent_flags_set_link_event_required(vlp); + vlp->vlp_event_code = event_code; + if (vlan_parent_flags_change_in_progress(vlp)) { + /* don't block waiting to generate an event */ + vlan_unlock(); + return; + } + vlan_parent_retain(vlp); + vlan_parent_wait(vlp, "vlan_parent_link_event"); + vlan_parent_signal(vlp, "vlan_parent_link_event"); vlan_unlock(); + vlan_parent_release(vlp); return; - } - vlan_parent_retain(vlp); - vlan_parent_wait(vlp, "vlan_parent_link_event"); - vlan_parent_signal(vlp, "vlan_parent_link_event"); - vlan_unlock(); - vlan_parent_release(vlp); - return; - } /* @@ -2038,19 +2034,19 @@ vlan_parent_link_event(struct ifnet * p, u_int32_t event_code) static int vlan_attach_protocol(struct ifnet *ifp) { - int error; - struct ifnet_attach_proto_param reg; - - bzero(®, sizeof(reg)); - reg.input = vlan_input; - reg.event = vlan_event; - reg.detached = vlan_detached; - error = ifnet_attach_protocol(ifp, PF_VLAN, ®); - if (error) { - printf("vlan_proto_attach(%s%d) ifnet_attach_protocol failed, %d\n", - ifnet_name(ifp), ifnet_unit(ifp), error); - } - return (error); + int error; + struct ifnet_attach_proto_param reg; + + bzero(®, sizeof(reg)); + reg.input = vlan_input; + reg.event = vlan_event; + reg.detached = vlan_detached; + error = ifnet_attach_protocol(ifp, PF_VLAN, ®); + if (error) { + printf("vlan_proto_attach(%s%d) ifnet_attach_protocol failed, %d\n", + ifnet_name(ifp), ifnet_unit(ifp), error); + } + return error; } /* @@ -2061,15 +2057,15 @@ vlan_attach_protocol(struct ifnet *ifp) static int vlan_detach_protocol(struct ifnet *ifp) { - int error; + int error; + + error = ifnet_detach_protocol(ifp, PF_VLAN); + if (error) { + printf("vlan_proto_detach(%s%d) ifnet_detach_protocol failed, %d\n", + ifnet_name(ifp), ifnet_unit(ifp), error); + } - error = ifnet_detach_protocol(ifp, PF_VLAN); - if (error) { - printf("vlan_proto_detach(%s%d) ifnet_detach_protocol failed, %d\n", - ifnet_name(ifp), ifnet_unit(ifp), error); - } - - return (error); + return error; } /* @@ -2082,58 +2078,58 @@ vlan_detach_protocol(struct ifnet *ifp) static errno_t vlan_attach_inet(struct ifnet *ifp, protocol_family_t protocol_family) { - return (ether_attach_inet(ifp, protocol_family)); + return ether_attach_inet(ifp, protocol_family); } static void vlan_detach_inet(struct ifnet *ifp, protocol_family_t protocol_family) { - ether_detach_inet(ifp, protocol_family); + ether_detach_inet(ifp, protocol_family); } #if INET6 static errno_t vlan_attach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) { - return (ether_attach_inet6(ifp, protocol_family)); + return ether_attach_inet6(ifp, protocol_family); } static void vlan_detach_inet6(struct ifnet *ifp, protocol_family_t protocol_family) { - ether_detach_inet6(ifp, protocol_family); + ether_detach_inet6(ifp, protocol_family); } #endif /* INET6 */ __private_extern__ int vlan_family_init(void) { - int error=0; + int error = 0; - error = proto_register_plumber(PF_INET, IFNET_FAMILY_VLAN, - vlan_attach_inet, vlan_detach_inet); - if (error != 0) { - printf("proto_register_plumber failed for AF_INET error=%d\n", - error); - goto done; - } + error = proto_register_plumber(PF_INET, IFNET_FAMILY_VLAN, + vlan_attach_inet, vlan_detach_inet); + if (error != 0) { + printf("proto_register_plumber failed for AF_INET error=%d\n", + error); + goto done; + } #if INET6 - error = proto_register_plumber(PF_INET6, IFNET_FAMILY_VLAN, - vlan_attach_inet6, vlan_detach_inet6); - if (error != 0) { - printf("proto_register_plumber failed for AF_INET6 error=%d\n", - error); - goto done; - } + error = proto_register_plumber(PF_INET6, IFNET_FAMILY_VLAN, + vlan_attach_inet6, vlan_detach_inet6); + if (error != 0) { + printf("proto_register_plumber failed for AF_INET6 error=%d\n", + error); + goto done; + } #endif - error = vlan_clone_attach(); - if (error != 0) { - printf("proto_register_plumber failed vlan_clone_attach error=%d\n", - error); - goto done; - } + error = vlan_clone_attach(); + if (error != 0) { + printf("proto_register_plumber failed vlan_clone_attach error=%d\n", + error); + goto done; + } - done: - return (error); +done: + return error; } diff --git a/bsd/net/if_vlan_var.h b/bsd/net/if_vlan_var.h index cbbec55a6..d89a6c610 100644 --- a/bsd/net/if_vlan_var.h +++ b/bsd/net/if_vlan_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,7 +39,7 @@ * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. - * + * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF @@ -57,33 +57,33 @@ */ #ifndef _NET_IF_VLAN_VAR_H_ -#define _NET_IF_VLAN_VAR_H_ 1 +#define _NET_IF_VLAN_VAR_H_ 1 -#define ETHER_VLAN_ENCAP_LEN 4 /* len of 802.1Q VLAN encapsulation */ -struct ether_vlan_header { - u_char evl_dhost[ETHER_ADDR_LEN]; - u_char evl_shost[ETHER_ADDR_LEN]; +#define ETHER_VLAN_ENCAP_LEN 4 /* len of 802.1Q VLAN encapsulation */ +struct ether_vlan_header { + u_char evl_dhost[ETHER_ADDR_LEN]; + u_char evl_shost[ETHER_ADDR_LEN]; u_int16_t evl_encap_proto; u_int16_t evl_tag; u_int16_t evl_proto; }; -#define EVL_VLID_MASK 0x0FFF -#define EVL_VLANOFTAG(tag) ((tag) & EVL_VLID_MASK) -#define EVL_PRIOFTAG(tag) (((tag) >> 13) & 7) +#define EVL_VLID_MASK 0x0FFF +#define EVL_VLANOFTAG(tag) ((tag) & EVL_VLID_MASK) +#define EVL_PRIOFTAG(tag) (((tag) >> 13) & 7) #if 0 /* sysctl(3) tags, for compatibility purposes */ -#define VLANCTL_PROTO 1 -#define VLANCTL_MAX 2 +#define VLANCTL_PROTO 1 +#define VLANCTL_MAX 2 #endif /* * Configuration structure for SIOCSETVLAN and SIOCGETVLAN ioctls. */ -struct vlanreq { - char vlr_parent[IFNAMSIZ]; - u_short vlr_tag; +struct vlanreq { + char vlr_parent[IFNAMSIZ]; + u_short vlr_tag; }; #ifdef KERNEL_PRIVATE diff --git a/bsd/net/init.c b/bsd/net/init.c index 85464da74..5ff72176d 100644 --- a/bsd/net/init.c +++ b/bsd/net/init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,57 +34,57 @@ #include struct init_list_entry { - struct init_list_entry *next; - net_init_func_ptr func; + struct init_list_entry *next; + net_init_func_ptr func; }; #define LIST_RAN ((struct init_list_entry*)0xffffffff) -static struct init_list_entry *list_head = 0; +static struct init_list_entry *list_head = 0; errno_t net_init_add( net_init_func_ptr init_func) { - struct init_list_entry *entry; - + struct init_list_entry *entry; + if (init_func == 0) { return EINVAL; } - + /* Check if we've already started */ if (list_head == LIST_RAN) { return EALREADY; } - + entry = kalloc(sizeof(*entry)); if (entry == 0) { printf("net_init_add: no memory\n"); return ENOMEM; } - + bzero(entry, sizeof(*entry)); entry->func = init_func; - + do { entry->next = list_head; - + if (entry->next == LIST_RAN) { /* List already ran, cleanup and call the function */ kfree(entry, sizeof(*entry)); return EALREADY; } - } while(!OSCompareAndSwapPtr(entry->next, entry, &list_head)); - + } while (!OSCompareAndSwapPtr(entry->next, entry, &list_head)); + return 0; } __private_extern__ void net_init_run(void) { - struct init_list_entry *backward_head = 0; - struct init_list_entry *forward_head = 0; - struct init_list_entry *current = 0; - + struct init_list_entry *backward_head = 0; + struct init_list_entry *forward_head = 0; + struct init_list_entry *current = 0; + /* * Grab the list, replacing the head with 0xffffffff to indicate * that we've already run. @@ -92,7 +92,7 @@ net_init_run(void) do { backward_head = list_head; } while (!OSCompareAndSwapPtr(backward_head, LIST_RAN, &list_head)); - + /* Reverse the order of the list */ while (backward_head != 0) { current = backward_head; @@ -100,7 +100,7 @@ net_init_run(void) current->next = forward_head; forward_head = current; } - + /* Call each function pointer registered */ while (forward_head != 0) { current = forward_head; diff --git a/bsd/net/init.h b/bsd/net/init.h index b2545b48a..677e0c76a 100644 --- a/bsd/net/init.h +++ b/bsd/net/init.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,40 +22,40 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header init.h - This header defines an API to register a function that will be called when - the network stack is being initialized. This gives a kernel extensions an - opportunity to install filters before sockets are created and network - operations occur. + * @header init.h + * This header defines an API to register a function that will be called when + * the network stack is being initialized. This gives a kernel extensions an + * opportunity to install filters before sockets are created and network + * operations occur. */ #ifndef _NET_INIT_H_ -#define _NET_INIT_H_ +#define _NET_INIT_H_ #include /*! - @typedef net_init_func_ptr - @discussion net_init_func_ptr will be called once the networking stack - initialized and before network operations occur. + * @typedef net_init_func_ptr + * @discussion net_init_func_ptr will be called once the networking stack + * initialized and before network operations occur. */ -typedef void (*net_init_func_ptr)(void); +typedef void (*net_init_func_ptr)(void); /*! - @function net_init_add - @discussion Add a function to be called during network initialization. Your - kext must not unload until the function you register is called if - net_init_add returns success. - @param init_func A pointer to a function to be called when the stack is - initialized. - @result EINVAL - the init_func value was NULL. - EALREADY - the network has already been initialized - ENOMEM - there was not enough memory to perform this operation - 0 - success + * @function net_init_add + * @discussion Add a function to be called during network initialization. Your + * kext must not unload until the function you register is called if + * net_init_add returns success. + * @param init_func A pointer to a function to be called when the stack is + * initialized. + * @result EINVAL - the init_func value was NULL. + * EALREADY - the network has already been initialized + * ENOMEM - there was not enough memory to perform this operation + * 0 - success */ -errno_t net_init_add(net_init_func_ptr init_func); +errno_t net_init_add(net_init_func_ptr init_func); #ifdef BSD_KERNEL_PRIVATE /* net_init_run is called from bsd_init */ diff --git a/bsd/net/iptap.c b/bsd/net/iptap.c index 06fed0e3a..0754860e6 100644 --- a/bsd/net/iptap.c +++ b/bsd/net/iptap.c @@ -36,7 +36,7 @@ #include #include #include -#include +#include #include #include @@ -69,32 +69,32 @@ #include struct iptap_softc { - LIST_ENTRY(iptap_softc) iptap_link; - uint32_t iptap_unit; - uint32_t iptap_dlt_raw_count; - uint32_t iptap_dlt_pkttap_count; - struct ifnet *iptap_ifp; + LIST_ENTRY(iptap_softc) iptap_link; + uint32_t iptap_unit; + uint32_t iptap_dlt_raw_count; + uint32_t iptap_dlt_pkttap_count; + struct ifnet *iptap_ifp; }; static LIST_HEAD(iptap_list, iptap_softc) iptap_list = LIST_HEAD_INITIALIZER(iptap_list); -static void iptap_lock_shared(void); -static void iptap_lock_exclusive(void); -static void iptap_lock_done(void); -static void iptap_alloc_lock(void); +static void iptap_lock_shared(void); +static void iptap_lock_exclusive(void); +static void iptap_lock_done(void); +static void iptap_alloc_lock(void); decl_lck_rw_data(static, iptap_lck_rw); -static lck_grp_t *iptap_grp; +static lck_grp_t *iptap_grp; errno_t iptap_if_output(ifnet_t, mbuf_t); -errno_t iptap_demux(ifnet_t , mbuf_t, char *, protocol_family_t *); +errno_t iptap_demux(ifnet_t, mbuf_t, char *, protocol_family_t *); errno_t iptap_add_proto(ifnet_t, protocol_family_t, const struct ifnet_demux_desc *, - u_int32_t); + u_int32_t); errno_t iptap_del_proto(ifnet_t, protocol_family_t); -errno_t iptap_getdrvspec(ifnet_t , struct ifdrv64 *); +errno_t iptap_getdrvspec(ifnet_t, struct ifdrv64 *); errno_t iptap_ioctl(ifnet_t, unsigned long, void *); void iptap_detach(ifnet_t); -errno_t iptap_tap_callback(ifnet_t , u_int32_t , bpf_tap_mode ); +errno_t iptap_tap_callback(ifnet_t, u_int32_t, bpf_tap_mode ); int iptap_clone_create(struct if_clone *, u_int32_t, void *); int iptap_clone_destroy(struct ifnet *); @@ -106,48 +106,49 @@ static void iptap_ipf_detach(void *); static ipfilter_t iptap_ipf4, iptap_ipf6; -void iptap_bpf_tap(struct mbuf *m, u_int32_t proto, int outgoing); +void iptap_bpf_tap(struct mbuf *m, u_int32_t proto, int outgoing); -#define IPTAP_MAXUNIT IF_MAXUNIT -#define IPTAP_ZONE_MAX_ELEM MIN(IFNETS_MAX, IPTAP_MAXUNIT) +#define IPTAP_MAXUNIT IF_MAXUNIT +#define IPTAP_ZONE_MAX_ELEM MIN(IFNETS_MAX, IPTAP_MAXUNIT) -static struct if_clone iptap_cloner = - IF_CLONE_INITIALIZER(IPTAP_IFNAME, - iptap_clone_create, - iptap_clone_destroy, - 0, - IPTAP_MAXUNIT, - IPTAP_ZONE_MAX_ELEM, - sizeof(struct iptap_softc)); +static struct if_clone iptap_cloner = + IF_CLONE_INITIALIZER(IPTAP_IFNAME, + iptap_clone_create, + iptap_clone_destroy, + 0, + IPTAP_MAXUNIT, + IPTAP_ZONE_MAX_ELEM, + sizeof(struct iptap_softc)); SYSCTL_DECL(_net_link); -SYSCTL_NODE(_net_link, OID_AUTO, iptap, CTLFLAG_RW|CTLFLAG_LOCKED, 0, +SYSCTL_NODE(_net_link, OID_AUTO, iptap, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "iptap virtual interface"); -static int iptap_total_tap_count = 0; -SYSCTL_INT(_net_link_iptap, OID_AUTO, total_tap_count, CTLFLAG_RD | CTLFLAG_LOCKED, - &iptap_total_tap_count, 0, ""); +static int iptap_total_tap_count = 0; +SYSCTL_INT(_net_link_iptap, OID_AUTO, total_tap_count, CTLFLAG_RD | CTLFLAG_LOCKED, + &iptap_total_tap_count, 0, ""); static int iptap_log = 0; SYSCTL_INT(_net_link_iptap, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED, - &iptap_log, 0, ""); + &iptap_log, 0, ""); #define IPTAP_LOG(fmt, ...) \ do { \ if ((iptap_log)) \ - printf("%s:%d " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + printf("%s:%d " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while(false) __private_extern__ void iptap_init(void) { errno_t error; - + iptap_alloc_lock(); - + error = if_clone_attach(&iptap_cloner); - if (error != 0) + if (error != 0) { panic("%s: if_clone_attach() failed, error %d\n", __func__, error); + } } static void @@ -155,15 +156,15 @@ iptap_alloc_lock(void) { lck_grp_attr_t *grp_attr; lck_attr_t *attr; - + grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setdefault(grp_attr); iptap_grp = lck_grp_alloc_init(IPTAP_IFNAME, grp_attr); lck_grp_attr_free(grp_attr); - + attr = lck_attr_alloc_init(); lck_attr_setdefault(attr); - + lck_rw_init(&iptap_lck_rw, iptap_grp, attr); lck_attr_free(attr); } @@ -194,7 +195,7 @@ iptap_clone_create(struct if_clone *ifc, u_int32_t unit, void *params) int error = 0; struct iptap_softc *iptap = NULL; struct ifnet_init_eparams if_init; - + iptap = if_clone_softc_allocate(&iptap_cloner); if (iptap == NULL) { printf("%s: _MALLOC failed\n", __func__); @@ -204,12 +205,12 @@ iptap_clone_create(struct if_clone *ifc, u_int32_t unit, void *params) iptap->iptap_unit = unit; /* - * We do not use a set_bpf_tap() function as we rather rely on the more + * We do not use a set_bpf_tap() function as we rather rely on the more * accurate callback passed to bpf_attach() */ bzero(&if_init, sizeof(if_init)); if_init.ver = IFNET_INIT_CURRENT_VERSION; - if_init.len = sizeof (if_init); + if_init.len = sizeof(if_init); if_init.flags = IFNET_INIT_LEGACY; if_init.name = ifc->ifc_name; if_init.unit = unit; @@ -228,40 +229,42 @@ iptap_clone_create(struct if_clone *ifc, u_int32_t unit, void *params) printf("%s: ifnet_allocate failed, error %d\n", __func__, error); goto done; } - + ifnet_set_flags(iptap->iptap_ifp, IFF_UP, IFF_UP); - + error = ifnet_attach(iptap->iptap_ifp, NULL); if (error != 0) { printf("%s: ifnet_attach failed - error %d\n", __func__, error); ifnet_release(iptap->iptap_ifp); goto done; } - - /* + + /* * Attach by default as DLT_PKTAP for packet metadata * Provide DLT_RAW for legacy */ - bpf_attach(iptap->iptap_ifp, DLT_PKTAP, sizeof(struct pktap_header), NULL, - iptap_tap_callback); + bpf_attach(iptap->iptap_ifp, DLT_PKTAP, sizeof(struct pktap_header), NULL, + iptap_tap_callback); bpf_attach(iptap->iptap_ifp, DLT_RAW, 0, NULL, - iptap_tap_callback); - + iptap_tap_callback); + /* Take a reference and add to the global list */ ifnet_reference(iptap->iptap_ifp); - + iptap_lock_exclusive(); - - if (LIST_EMPTY(&iptap_list)) + + if (LIST_EMPTY(&iptap_list)) { iptap_ipf_register(); + } LIST_INSERT_HEAD(&iptap_list, iptap, iptap_link); iptap_lock_done(); done: if (error != 0) { - if (iptap != NULL) + if (iptap != NULL) { if_clone_softc_deallocate(&iptap_cloner, iptap); + } } - return (error); + return error; } __private_extern__ int @@ -270,8 +273,8 @@ iptap_clone_destroy(struct ifnet *ifp) int error = 0; (void) ifnet_detach(ifp); - - return (error); + + return error; } /* @@ -292,38 +295,37 @@ iptap_tap_callback(ifnet_t ifp, u_int32_t dlt, bpf_tap_mode direction) goto done; } switch (dlt) { - case DLT_RAW: - if (direction == 0) { - if (iptap->iptap_dlt_raw_count > 0) { - iptap->iptap_dlt_raw_count--; - OSAddAtomic(-1, &iptap_total_tap_count); - - } - } else { - iptap->iptap_dlt_raw_count++; - OSAddAtomic(1, &iptap_total_tap_count); + case DLT_RAW: + if (direction == 0) { + if (iptap->iptap_dlt_raw_count > 0) { + iptap->iptap_dlt_raw_count--; + OSAddAtomic(-1, &iptap_total_tap_count); } - break; - case DLT_PKTAP: - if (direction == 0) { - if (iptap->iptap_dlt_pkttap_count > 0) { - iptap->iptap_dlt_pkttap_count--; - OSAddAtomic(-1, &iptap_total_tap_count); - } - } else { - iptap->iptap_dlt_pkttap_count++; - OSAddAtomic(1, &iptap_total_tap_count); + } else { + iptap->iptap_dlt_raw_count++; + OSAddAtomic(1, &iptap_total_tap_count); + } + break; + case DLT_PKTAP: + if (direction == 0) { + if (iptap->iptap_dlt_pkttap_count > 0) { + iptap->iptap_dlt_pkttap_count--; + OSAddAtomic(-1, &iptap_total_tap_count); } - break; + } else { + iptap->iptap_dlt_pkttap_count++; + OSAddAtomic(1, &iptap_total_tap_count); + } + break; } done: - /* - * Attachements count must be positive and we're in trouble + /* + * Attachements count must be positive and we're in trouble * if we have more that 2**31 attachements */ VERIFY(iptap_total_tap_count >= 0); - return (0); + return 0; } __private_extern__ errno_t @@ -332,19 +334,19 @@ iptap_if_output(ifnet_t ifp, mbuf_t m) #pragma unused(ifp) mbuf_freem(m); - return (ENOTSUP); + return ENOTSUP; } __private_extern__ errno_t -iptap_demux(ifnet_t ifp, mbuf_t m, char *header, - protocol_family_t *ppf) +iptap_demux(ifnet_t ifp, mbuf_t m, char *header, + protocol_family_t *ppf) { #pragma unused(ifp) #pragma unused(m) #pragma unused(header) #pragma unused(ppf) - return (ENOTSUP); + return ENOTSUP; } __private_extern__ errno_t @@ -356,7 +358,7 @@ iptap_add_proto(ifnet_t ifp, protocol_family_t pf, #pragma unused(dmx) #pragma unused(cnt) - return (0); + return 0; } __private_extern__ errno_t @@ -365,7 +367,7 @@ iptap_del_proto(ifnet_t ifp, protocol_family_t pf) #pragma unused(ifp) #pragma unused(pf) - return (0); + return 0; } __private_extern__ errno_t @@ -384,10 +386,10 @@ iptap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) switch (ifd->ifd_cmd) { case PKTP_CMD_TAP_COUNT: { uint32_t tap_count = iptap->iptap_dlt_raw_count + iptap->iptap_dlt_pkttap_count; - + if (ifd->ifd_len < sizeof(tap_count)) { - printf("%s: PKTP_CMD_TAP_COUNT ifd_len %llu too small - error %d\n", - __func__, ifd->ifd_len, error); + printf("%s: PKTP_CMD_TAP_COUNT ifd_len %llu too small - error %d\n", + __func__, ifd->ifd_len, error); error = EINVAL; break; } @@ -404,7 +406,7 @@ iptap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) } done: - return (error); + return error; } __private_extern__ errno_t @@ -418,24 +420,24 @@ iptap_ioctl(ifnet_t ifp, unsigned long cmd, void *data) goto done; } } - + switch (cmd) { case SIOCGDRVSPEC32: { struct ifdrv64 ifd; struct ifdrv32 *ifd32 = (struct ifdrv32 *)data; - + memcpy(ifd.ifd_name, ifd32->ifd_name, sizeof(ifd.ifd_name)); ifd.ifd_cmd = ifd32->ifd_cmd; ifd.ifd_len = ifd32->ifd_len; ifd.ifd_data = ifd32->ifd_data; - + error = iptap_getdrvspec(ifp, &ifd); - + break; } case SIOCGDRVSPEC64: { struct ifdrv64 *ifd64 = (struct ifdrv64 *)data; - + error = iptap_getdrvspec(ifp, ifd64); break; @@ -445,22 +447,23 @@ iptap_ioctl(ifnet_t ifp, unsigned long cmd, void *data) break; } done: - return (error); + return error; } __private_extern__ void iptap_detach(ifnet_t ifp) { struct iptap_softc *iptap = NULL; - + iptap_lock_exclusive(); iptap = ifp->if_softc; ifp->if_softc = NULL; LIST_REMOVE(iptap, iptap_link); - if (LIST_EMPTY(&iptap_list)) + if (LIST_EMPTY(&iptap_list)) { iptap_ipf_unregister(); + } iptap_lock_done(); @@ -480,7 +483,7 @@ iptap_ipf_register(void) IPTAP_LOG("\n"); - bzero(&iptap_ipfinit, sizeof (iptap_ipfinit)); + bzero(&iptap_ipfinit, sizeof(iptap_ipfinit)); iptap_ipfinit.name = IPTAP_IFNAME; iptap_ipfinit.cookie = &iptap_ipf4; iptap_ipfinit.ipf_input = iptap_ipf_input; @@ -505,7 +508,7 @@ iptap_ipf_register(void) } done: - return (err); + return err; } static int @@ -535,27 +538,28 @@ iptap_ipf_unregister(void) iptap_ipf6 = NULL; } done: - return (err); + return err; } static errno_t -iptap_ipf_input(void *arg, mbuf_t *mp, int off, u_int8_t proto) +iptap_ipf_input(void *arg, mbuf_t *mp, int off, u_int8_t proto) { #pragma unused(off) #pragma unused(proto) - if (arg == (void *)&iptap_ipf4) + if (arg == (void *)&iptap_ipf4) { iptap_bpf_tap(*mp, AF_INET, 0); - else if (arg == (void *)&iptap_ipf6) + } else if (arg == (void *)&iptap_ipf6) { iptap_bpf_tap(*mp, AF_INET6, 0); - else + } else { IPTAP_LOG("%s:%d bad cookie 0x%llx &iptap_ipf4 0x%llx " "&iptap_ipf6 0x%llx\n", __func__, __LINE__, (uint64_t)VM_KERNEL_ADDRPERM(arg), (uint64_t)VM_KERNEL_ADDRPERM(&iptap_ipf4), (uint64_t)VM_KERNEL_ADDRPERM(&iptap_ipf6)); + } - return (0); + return 0; } static errno_t @@ -563,18 +567,19 @@ iptap_ipf_output(void *arg, mbuf_t *mp, ipf_pktopts_t opt) { #pragma unused(opt) - if (arg == (void *)&iptap_ipf4) + if (arg == (void *)&iptap_ipf4) { iptap_bpf_tap(*mp, AF_INET, 1); - else if (arg == (void *)&iptap_ipf6) + } else if (arg == (void *)&iptap_ipf6) { iptap_bpf_tap(*mp, AF_INET6, 1); - else + } else { IPTAP_LOG("%s:%d bad cookie 0x%llx &iptap_ipf4 0x%llx " "&iptap_ipf6 0x%llx\n", __func__, __LINE__, (uint64_t)VM_KERNEL_ADDRPERM(arg), (uint64_t)VM_KERNEL_ADDRPERM(&iptap_ipf4), (uint64_t)VM_KERNEL_ADDRPERM(&iptap_ipf6)); + } - return (0); + return 0; } static void @@ -584,11 +589,11 @@ iptap_ipf_detach(void *arg) } __private_extern__ void -iptap_bpf_tap(struct mbuf *m, u_int32_t proto, int outgoing) +iptap_bpf_tap(struct mbuf *m, u_int32_t proto, int outgoing) { struct iptap_softc *iptap; - void (*bpf_tap_func)(ifnet_t , u_int32_t , mbuf_t , void * , size_t ) = - outgoing ? bpf_tap_out : bpf_tap_in; + void (*bpf_tap_func)(ifnet_t, u_int32_t, mbuf_t, void *, size_t ) = + outgoing ? bpf_tap_out : bpf_tap_in; uint16_t src_scope_id = 0; uint16_t dst_scope_id = 0; @@ -610,47 +615,48 @@ iptap_bpf_tap(struct mbuf *m, u_int32_t proto, int outgoing) iptap_lock_shared(); LIST_FOREACH(iptap, &iptap_list, iptap_link) { - if (iptap->iptap_dlt_raw_count > 0) { - bpf_tap_func(iptap->iptap_ifp, DLT_RAW, m, - NULL, 0); - } - if (iptap->iptap_dlt_pkttap_count > 0) { - struct { - struct pktap_header hdr; - u_int32_t proto; - } hdr_buffer; - struct pktap_header *hdr = &hdr_buffer.hdr; - size_t hdr_size = sizeof(hdr_buffer); - struct ifnet *ifp = outgoing ? NULL : m->m_pkthdr.rcvif; - - /* Verify the structure is packed */ - _CASSERT(sizeof(hdr_buffer) == sizeof(struct pktap_header) + sizeof(u_int32_t)); - - bzero(hdr, sizeof(hdr_buffer)); - hdr->pth_length = sizeof(struct pktap_header); - hdr->pth_type_next = PTH_TYPE_PACKET; - hdr->pth_dlt = DLT_NULL; - if (ifp != NULL) - snprintf(hdr->pth_ifname, sizeof(hdr->pth_ifname), "%s", - ifp->if_xname); - hdr_buffer.proto = proto; - hdr->pth_flags = outgoing ? PTH_FLAG_DIR_OUT : PTH_FLAG_DIR_IN; - hdr->pth_protocol_family = proto; - hdr->pth_frame_pre_length = 0; - hdr->pth_frame_post_length = 0; - hdr->pth_iftype = ifp != NULL ? ifp->if_type : 0; - hdr->pth_ifunit = ifp != NULL ? ifp->if_unit : 0; - - pktap_fill_proc_info(hdr, proto, m, 0, outgoing, ifp); - - hdr->pth_svc = so_svc2tc(m->m_pkthdr.pkt_svc); - - bpf_tap_func(iptap->iptap_ifp, DLT_PKTAP, m, hdr, hdr_size); + if (iptap->iptap_dlt_raw_count > 0) { + bpf_tap_func(iptap->iptap_ifp, DLT_RAW, m, + NULL, 0); + } + if (iptap->iptap_dlt_pkttap_count > 0) { + struct { + struct pktap_header hdr; + u_int32_t proto; + } hdr_buffer; + struct pktap_header *hdr = &hdr_buffer.hdr; + size_t hdr_size = sizeof(hdr_buffer); + struct ifnet *ifp = outgoing ? NULL : m->m_pkthdr.rcvif; + + /* Verify the structure is packed */ + _CASSERT(sizeof(hdr_buffer) == sizeof(struct pktap_header) + sizeof(u_int32_t)); + + bzero(hdr, sizeof(hdr_buffer)); + hdr->pth_length = sizeof(struct pktap_header); + hdr->pth_type_next = PTH_TYPE_PACKET; + hdr->pth_dlt = DLT_NULL; + if (ifp != NULL) { + snprintf(hdr->pth_ifname, sizeof(hdr->pth_ifname), "%s", + ifp->if_xname); } + hdr_buffer.proto = proto; + hdr->pth_flags = outgoing ? PTH_FLAG_DIR_OUT : PTH_FLAG_DIR_IN; + hdr->pth_protocol_family = proto; + hdr->pth_frame_pre_length = 0; + hdr->pth_frame_post_length = 0; + hdr->pth_iftype = ifp != NULL ? ifp->if_type : 0; + hdr->pth_ifunit = ifp != NULL ? ifp->if_unit : 0; + + pktap_fill_proc_info(hdr, proto, m, 0, outgoing, ifp); + + hdr->pth_svc = so_svc2tc(m->m_pkthdr.pkt_svc); + + bpf_tap_func(iptap->iptap_ifp, DLT_PKTAP, m, hdr, hdr_size); + } } - + iptap_lock_done(); - + if (proto == AF_INET6) { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); diff --git a/bsd/net/iptap.h b/bsd/net/iptap.h index 0619ca909..2c9ee2407 100644 --- a/bsd/net/iptap.h +++ b/bsd/net/iptap.h @@ -35,7 +35,7 @@ #include -#define IPTAP_IFNAME "iptap" +#define IPTAP_IFNAME "iptap" #ifdef KERNEL_PRIVATE diff --git a/bsd/net/kext_net.h b/bsd/net/kext_net.h index 1055044b3..c8a364c86 100644 --- a/bsd/net/kext_net.h +++ b/bsd/net/kext_net.h @@ -31,7 +31,7 @@ */ #ifndef NET_KEXT_NET_H -#define NET_KEXT_NET_H +#define NET_KEXT_NET_H #include @@ -49,37 +49,37 @@ struct sockopt; struct inpcb; /* Private, internal implementation functions */ -extern void sflt_init(void); -extern int sflt_permission_check(struct inpcb *inp); -extern void sflt_initsock(struct socket *so); -extern void sflt_termsock(struct socket *so); -extern errno_t sflt_attach_internal(struct socket *so, sflt_handle handle); -extern void sflt_notify(struct socket *so, sflt_event_t event, void *param); -extern int sflt_ioctl(struct socket *so, u_long cmd, caddr_t data); -extern int sflt_bind(struct socket *so, const struct sockaddr *nam); -extern int sflt_listen(struct socket *so); -extern int sflt_accept(struct socket *head, struct socket *so, - const struct sockaddr *local, - const struct sockaddr *remote); -extern int sflt_getsockname(struct socket *so, struct sockaddr **local); -extern int sflt_getpeername(struct socket *so, struct sockaddr **remote); -extern int sflt_connectin(struct socket *head, - const struct sockaddr *remote); -extern int sflt_connectout(struct socket *so, const struct sockaddr *nam); -extern int sflt_setsockopt(struct socket *so, struct sockopt *sopt); -extern int sflt_getsockopt(struct socket *so, struct sockopt *sopt); -extern int sflt_data_out(struct socket *so, const struct sockaddr *to, - mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags); -extern int sflt_data_in(struct socket *so, const struct sockaddr *from, - mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags); +extern void sflt_init(void); +extern int sflt_permission_check(struct inpcb *inp); +extern void sflt_initsock(struct socket *so); +extern void sflt_termsock(struct socket *so); +extern errno_t sflt_attach_internal(struct socket *so, sflt_handle handle); +extern void sflt_notify(struct socket *so, sflt_event_t event, void *param); +extern int sflt_ioctl(struct socket *so, u_long cmd, caddr_t data); +extern int sflt_bind(struct socket *so, const struct sockaddr *nam); +extern int sflt_listen(struct socket *so); +extern int sflt_accept(struct socket *head, struct socket *so, + const struct sockaddr *local, + const struct sockaddr *remote); +extern int sflt_getsockname(struct socket *so, struct sockaddr **local); +extern int sflt_getpeername(struct socket *so, struct sockaddr **remote); +extern int sflt_connectin(struct socket *head, + const struct sockaddr *remote); +extern int sflt_connectout(struct socket *so, const struct sockaddr *nam); +extern int sflt_setsockopt(struct socket *so, struct sockopt *sopt); +extern int sflt_getsockopt(struct socket *so, struct sockopt *sopt); +extern int sflt_data_out(struct socket *so, const struct sockaddr *to, + mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags); +extern int sflt_data_in(struct socket *so, const struct sockaddr *from, + mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags); #endif /* BSD_KERNEL_PRIVATE */ -#define NFF_BEFORE 0x01 -#define NFF_AFTER 0x02 +#define NFF_BEFORE 0x01 +#define NFF_AFTER 0x02 -#define NKE_OK 0 -#define NKE_REMOVE (-1) +#define NKE_OK 0 +#define NKE_REMOVE (-1) /* * Interface structure for inserting an installed socket NKE into an @@ -95,7 +95,7 @@ struct so_nke { unsigned int nke_handle; unsigned int nke_where; int nke_flags; /* NFF_BEFORE, NFF_AFTER: net/kext_net.h */ - u_int32_t reserved[4]; /* for future use */ + u_int32_t reserved[4]; /* for future use */ }; #pragma pack() diff --git a/bsd/net/kpi_interface.c b/bsd/net/kpi_interface.c index b0c1e3531..22281c271 100644 --- a/bsd/net/kpi_interface.c +++ b/bsd/net/kpi_interface.c @@ -29,7 +29,7 @@ #include "kpi_interface.h" #include -#include /* for definition of NULL */ +#include /* for definition of NULL */ #include /* for panic */ #include #include @@ -90,9 +90,9 @@ static errno_t ifnet_allocate_common(const struct ifnet_init_params *init, ifnet_t *ifp, bool is_internal); -#define TOUCHLASTCHANGE(__if_lastchange) { \ - (__if_lastchange)->tv_sec = net_uptime(); \ - (__if_lastchange)->tv_usec = 0; \ +#define TOUCHLASTCHANGE(__if_lastchange) { \ + (__if_lastchange)->tv_sec = net_uptime(); \ + (__if_lastchange)->tv_usec = 0; \ } static errno_t ifnet_defrouter_llreachinfo(ifnet_t, int, @@ -117,10 +117,11 @@ ifnet_kpi_free(ifnet_t ifp) { ifnet_detached_func detach_func = ifp->if_kpi_storage; - if (detach_func != NULL) + if (detach_func != NULL) { detach_func(ifp); + } - if (ifp->if_broadcast.length > sizeof (ifp->if_broadcast.u.buffer)) { + if (ifp->if_broadcast.length > sizeof(ifp->if_broadcast.u.buffer)) { FREE(ifp->if_broadcast.u.ptr, M_IFADDR); ifp->if_broadcast.u.ptr = NULL; } @@ -134,47 +135,47 @@ ifnet_allocate_common(const struct ifnet_init_params *init, { struct ifnet_init_eparams einit; - bzero(&einit, sizeof (einit)); + bzero(&einit, sizeof(einit)); - einit.ver = IFNET_INIT_CURRENT_VERSION; - einit.len = sizeof (einit); - einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO; + einit.ver = IFNET_INIT_CURRENT_VERSION; + einit.len = sizeof(einit); + einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO; if (!is_internal) { einit.flags |= IFNET_INIT_ALLOC_KPI; } - einit.uniqueid = init->uniqueid; - einit.uniqueid_len = init->uniqueid_len; - einit.name = init->name; - einit.unit = init->unit; - einit.family = init->family; - einit.type = init->type; - einit.output = init->output; - einit.demux = init->demux; - einit.add_proto = init->add_proto; - einit.del_proto = init->del_proto; - einit.check_multi = init->check_multi; - einit.framer = init->framer; - einit.softc = init->softc; - einit.ioctl = init->ioctl; - einit.set_bpf_tap = init->set_bpf_tap; - einit.detach = init->detach; - einit.event = init->event; - einit.broadcast_addr = init->broadcast_addr; - einit.broadcast_len = init->broadcast_len; - - return (ifnet_allocate_extended(&einit, ifp)); + einit.uniqueid = init->uniqueid; + einit.uniqueid_len = init->uniqueid_len; + einit.name = init->name; + einit.unit = init->unit; + einit.family = init->family; + einit.type = init->type; + einit.output = init->output; + einit.demux = init->demux; + einit.add_proto = init->add_proto; + einit.del_proto = init->del_proto; + einit.check_multi = init->check_multi; + einit.framer = init->framer; + einit.softc = init->softc; + einit.ioctl = init->ioctl; + einit.set_bpf_tap = init->set_bpf_tap; + einit.detach = init->detach; + einit.event = init->event; + einit.broadcast_addr = init->broadcast_addr; + einit.broadcast_len = init->broadcast_len; + + return ifnet_allocate_extended(&einit, ifp); } errno_t ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp) { - return (ifnet_allocate_common(init, ifp, true)); + return ifnet_allocate_common(init, ifp, true); } errno_t ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp) { - return (ifnet_allocate_common(init, ifp, false)); + return ifnet_allocate_common(init, ifp, false); } errno_t @@ -189,19 +190,22 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, einit = *einit0; if (einit.ver != IFNET_INIT_CURRENT_VERSION || - einit.len < sizeof (einit)) - return (EINVAL); + einit.len < sizeof(einit)) { + return EINVAL; + } if (einit.family == 0 || einit.name == NULL || strlen(einit.name) >= IFNAMSIZ || - (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) - return (EINVAL); + (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) { + return EINVAL; + } if (einit.flags & IFNET_INIT_LEGACY) { if (einit.output == NULL || - (einit.flags & IFNET_INIT_INPUT_POLL)) - return (EINVAL); + (einit.flags & IFNET_INIT_INPUT_POLL)) { + return EINVAL; + } einit.pre_enqueue = NULL; einit.start = NULL; @@ -210,16 +214,19 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, einit.input_poll = NULL; einit.input_ctl = NULL; } else { - if (einit.start == NULL) - return (EINVAL); + if (einit.start == NULL) { + return EINVAL; + } einit.output = NULL; - if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) - return (EINVAL); + if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) { + return EINVAL; + } if (einit.flags & IFNET_INIT_INPUT_POLL) { - if (einit.input_poll == NULL || einit.input_ctl == NULL) - return (EINVAL); + if (einit.input_poll == NULL || einit.input_ctl == NULL) { + return EINVAL; + } } else { einit.input_poll = NULL; einit.input_ctl = NULL; @@ -228,7 +235,7 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, /* Initialize external name (name + unit) */ - (void) snprintf(if_xname, sizeof (if_xname), "%s%d", + (void) snprintf(if_xname, sizeof(if_xname), "%s%d", einit.name, einit.unit); if (einit.uniqueid == NULL) { @@ -248,37 +255,37 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, * to write to this. */ strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ); - ifp->if_type = einit.type; - ifp->if_family = einit.family; - ifp->if_subfamily = einit.subfamily; - ifp->if_unit = einit.unit; - ifp->if_output = einit.output; - ifp->if_pre_enqueue = einit.pre_enqueue; - ifp->if_start = einit.start; - ifp->if_output_ctl = einit.output_ctl; + ifp->if_type = einit.type; + ifp->if_family = einit.family; + ifp->if_subfamily = einit.subfamily; + ifp->if_unit = einit.unit; + ifp->if_output = einit.output; + ifp->if_pre_enqueue = einit.pre_enqueue; + ifp->if_start = einit.start; + ifp->if_output_ctl = einit.output_ctl; ifp->if_output_sched_model = einit.output_sched_model; ifp->if_output_bw.eff_bw = einit.output_bw; ifp->if_output_bw.max_bw = einit.output_bw_max; ifp->if_output_lt.eff_lt = einit.output_lt; ifp->if_output_lt.max_lt = einit.output_lt_max; - ifp->if_input_poll = einit.input_poll; - ifp->if_input_ctl = einit.input_ctl; - ifp->if_input_bw.eff_bw = einit.input_bw; - ifp->if_input_bw.max_bw = einit.input_bw_max; - ifp->if_input_lt.eff_lt = einit.input_lt; - ifp->if_input_lt.max_lt = einit.input_lt_max; - ifp->if_demux = einit.demux; - ifp->if_add_proto = einit.add_proto; - ifp->if_del_proto = einit.del_proto; - ifp->if_check_multi = einit.check_multi; - ifp->if_framer_legacy = einit.framer; - ifp->if_framer = einit.framer_extended; - ifp->if_softc = einit.softc; - ifp->if_ioctl = einit.ioctl; - ifp->if_set_bpf_tap = einit.set_bpf_tap; - ifp->if_free = ifnet_kpi_free; - ifp->if_event = einit.event; - ifp->if_kpi_storage = einit.detach; + ifp->if_input_poll = einit.input_poll; + ifp->if_input_ctl = einit.input_ctl; + ifp->if_input_bw.eff_bw = einit.input_bw; + ifp->if_input_bw.max_bw = einit.input_bw_max; + ifp->if_input_lt.eff_lt = einit.input_lt; + ifp->if_input_lt.max_lt = einit.input_lt_max; + ifp->if_demux = einit.demux; + ifp->if_add_proto = einit.add_proto; + ifp->if_del_proto = einit.del_proto; + ifp->if_check_multi = einit.check_multi; + ifp->if_framer_legacy = einit.framer; + ifp->if_framer = einit.framer_extended; + ifp->if_softc = einit.softc; + ifp->if_ioctl = einit.ioctl; + ifp->if_set_bpf_tap = einit.set_bpf_tap; + ifp->if_free = ifnet_kpi_free; + ifp->if_event = einit.event; + ifp->if_kpi_storage = einit.detach; /* Initialize external name (name + unit) */ snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ, @@ -301,69 +308,81 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, * variant which is represented by if_framer. */ #if CONFIG_EMBEDDED - if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) + if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) { ifp->if_framer = ifp->if_framer_legacy; + } #else /* !CONFIG_EMBEDDED */ if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) { - if (ifp->if_framer_legacy == ether_frameout) + if (ifp->if_framer_legacy == ether_frameout) { ifp->if_framer = ether_frameout_extended; - else + } else { ifp->if_framer = ifnet_framer_stub; + } } #endif /* !CONFIG_EMBEDDED */ - if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) + if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) { ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw; - else if (ifp->if_output_bw.eff_bw == 0) + } else if (ifp->if_output_bw.eff_bw == 0) { ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw; + } - if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) + if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) { ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw; - else if (ifp->if_input_bw.eff_bw == 0) + } else if (ifp->if_input_bw.eff_bw == 0) { ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw; + } - if (ifp->if_output_bw.max_bw == 0) + if (ifp->if_output_bw.max_bw == 0) { ifp->if_output_bw = ifp->if_input_bw; - else if (ifp->if_input_bw.max_bw == 0) + } else if (ifp->if_input_bw.max_bw == 0) { ifp->if_input_bw = ifp->if_output_bw; + } /* Pin if_baudrate to 32 bits */ br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw); - if (br != 0) + if (br != 0) { ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br; + } - if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) + if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) { ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt; - else if (ifp->if_output_lt.eff_lt == 0) + } else if (ifp->if_output_lt.eff_lt == 0) { ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt; + } - if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) + if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) { ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt; - else if (ifp->if_input_lt.eff_lt == 0) + } else if (ifp->if_input_lt.eff_lt == 0) { ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt; + } - if (ifp->if_output_lt.max_lt == 0) + if (ifp->if_output_lt.max_lt == 0) { ifp->if_output_lt = ifp->if_input_lt; - else if (ifp->if_input_lt.max_lt == 0) + } else if (ifp->if_input_lt.max_lt == 0) { ifp->if_input_lt = ifp->if_output_lt; + } - if (ifp->if_ioctl == NULL) + if (ifp->if_ioctl == NULL) { ifp->if_ioctl = ifp_if_ioctl; + } ifp->if_eflags = 0; if (ifp->if_start != NULL) { ifp->if_eflags |= IFEF_TXSTART; - if (ifp->if_pre_enqueue == NULL) + if (ifp->if_pre_enqueue == NULL) { ifp->if_pre_enqueue = ifnet_enqueue; + } ifp->if_output = ifp->if_pre_enqueue; } else { ifp->if_eflags &= ~IFEF_TXSTART; } - if (ifp->if_input_poll != NULL) + if (ifp->if_input_poll != NULL) { ifp->if_eflags |= IFEF_RXPOLL; - else + } else { ifp->if_eflags &= ~IFEF_RXPOLL; + } ifp->if_output_dlil = dlil_output_handler; ifp->if_input_dlil = dlil_input_handler; @@ -377,7 +396,7 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, if (einit.broadcast_len && einit.broadcast_addr) { if (einit.broadcast_len > - sizeof (ifp->if_broadcast.u.buffer)) { + sizeof(ifp->if_broadcast.u.buffer)) { MALLOC(ifp->if_broadcast.u.ptr, u_char *, einit.broadcast_len, M_IFADDR, M_NOWAIT); if (ifp->if_broadcast.u.ptr == NULL) { @@ -394,7 +413,7 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, } ifp->if_broadcast.length = einit.broadcast_len; } else { - bzero(&ifp->if_broadcast, sizeof (ifp->if_broadcast)); + bzero(&ifp->if_broadcast, sizeof(ifp->if_broadcast)); } ifp->if_xflags = 0; @@ -427,9 +446,9 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, ifp->if_xflags |= IFXF_ALLOC_KPI; } else { OSIncrementAtomic64( - &net_api_stats.nas_ifnet_alloc_os_count); + &net_api_stats.nas_ifnet_alloc_os_count); INC_ATOMIC_INT64_LIM( - net_api_stats.nas_ifnet_alloc_os_total); + net_api_stats.nas_ifnet_alloc_os_total); } if (error == 0) { @@ -441,68 +460,69 @@ ifnet_allocate_extended(const struct ifnet_init_eparams *einit0, *interface = NULL; } } - return (error); + return error; } errno_t ifnet_reference(ifnet_t ifp) { - return (dlil_if_ref(ifp)); + return dlil_if_ref(ifp); } errno_t ifnet_release(ifnet_t ifp) { - return (dlil_if_free(ifp)); + return dlil_if_free(ifp); } errno_t ifnet_interface_family_find(const char *module_string, ifnet_family_t *family_id) { - if (module_string == NULL || family_id == NULL) - return (EINVAL); + if (module_string == NULL || family_id == NULL) { + return EINVAL; + } - return (net_str_id_find_internal(module_string, family_id, - NSI_IF_FAM_ID, 1)); + return net_str_id_find_internal(module_string, family_id, + NSI_IF_FAM_ID, 1); } void * ifnet_softc(ifnet_t interface) { - return ((interface == NULL) ? NULL : interface->if_softc); + return (interface == NULL) ? NULL : interface->if_softc; } const char * ifnet_name(ifnet_t interface) { - return ((interface == NULL) ? NULL : interface->if_name); + return (interface == NULL) ? NULL : interface->if_name; } ifnet_family_t ifnet_family(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_family); + return (interface == NULL) ? 0 : interface->if_family; } ifnet_subfamily_t ifnet_subfamily(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_subfamily); + return (interface == NULL) ? 0 : interface->if_subfamily; } u_int32_t ifnet_unit(ifnet_t interface) { - return ((interface == NULL) ? (u_int32_t)0xffffffff : - (u_int32_t)interface->if_unit); + return (interface == NULL) ? (u_int32_t)0xffffffff : + (u_int32_t)interface->if_unit; } u_int32_t ifnet_index(ifnet_t interface) { - return ((interface == NULL) ? (u_int32_t)0xffffffff : - interface->if_index); + return (interface == NULL) ? (u_int32_t)0xffffffff : + interface->if_index; } errno_t @@ -510,8 +530,9 @@ ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask) { uint16_t old_flags; - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } ifnet_lock_exclusive(interface); @@ -526,24 +547,26 @@ ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask) if ((old_flags & IFF_MULTICAST) != (interface->if_flags & IFF_MULTICAST)) { #if INET - if (IGMP_IFINFO(interface) != NULL) + if (IGMP_IFINFO(interface) != NULL) { igmp_initsilent(interface, IGMP_IFINFO(interface)); + } #endif /* INET */ #if INET6 - if (MLD_IFINFO(interface) != NULL) + if (MLD_IFINFO(interface) != NULL) { mld6_initsilent(interface, MLD_IFINFO(interface)); + } #endif /* INET6 */ } ifnet_lock_done(interface); - return (0); + return 0; } u_int16_t ifnet_flags(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_flags); + return (interface == NULL) ? 0 : interface->if_flags; } /* @@ -574,8 +597,9 @@ ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask) if (ifp->if_eflags & IFEF_AWDL) { if (eflags & IFEF_AWDL) { - if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) - return (EINVAL); + if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) { + return EINVAL; + } } else { *new_eflags &= ~IFEF_AWDL_MASK; *mask |= IFEF_AWDL_MASK; @@ -584,10 +608,11 @@ ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask) *new_eflags |= IFEF_AWDL_MASK; *mask |= IFEF_AWDL_MASK; } else if (eflags & IFEF_AWDL_RESTRICTED && - !(ifp->if_eflags & IFEF_AWDL)) - return (EINVAL); + !(ifp->if_eflags & IFEF_AWDL)) { + return EINVAL; + } - return (0); + return 0; } errno_t @@ -597,8 +622,9 @@ ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask) struct kev_msg ev_msg; struct net_event_data ev_data; - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } bzero(&ev_msg, sizeof(ev_msg)); ifnet_lock_exclusive(interface); @@ -607,7 +633,7 @@ ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask) */ if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) { ifnet_lock_done(interface); - return (EINVAL); + return EINVAL; } oeflags = interface->if_eflags; interface->if_eflags = @@ -623,8 +649,9 @@ ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask) * path, so we don't have to do anything here. */ } else if (oeflags & IFEF_AWDL_RESTRICTED && - !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) + !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) { ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED; + } /* * Notify configd so that it has a chance to perform better * reachability detection. @@ -643,13 +670,13 @@ ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask) dlil_post_complete_msg(interface, &ev_msg); } - return (0); + return 0; } u_int32_t ifnet_eflags(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_eflags); + return (interface == NULL) ? 0 : interface->if_eflags; } errno_t @@ -657,8 +684,9 @@ ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask) { int before, after; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE); @@ -671,7 +699,7 @@ ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask) if (!ifnet_is_attached(ifp, 0)) { ifp->if_idle_new_flags = new_flags; ifp->if_idle_new_flags_mask = mask; - return (0); + return 0; } else { ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0; } @@ -687,7 +715,7 @@ ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask) ifp->if_want_aggressive_drain++; } - return (0); + return 0; } errno_t @@ -701,13 +729,13 @@ ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask) ifnet_lock_done(ifp); lck_mtx_unlock(rnh_lock); - return (err); + return err; } u_int32_t ifnet_idle_flags(ifnet_t ifp) { - return ((ifp == NULL) ? 0 : ifp->if_idle_flags); + return (ifp == NULL) ? 0 : ifp->if_idle_flags; } errno_t @@ -728,7 +756,7 @@ ifnet_set_link_quality(ifnet_t ifp, int quality) if_lqm_update(ifp, quality, 0); done: - return (err); + return err; } int @@ -736,14 +764,15 @@ ifnet_link_quality(ifnet_t ifp) { int lqm; - if (ifp == NULL) - return (IFNET_LQM_THRESH_OFF); + if (ifp == NULL) { + return IFNET_LQM_THRESH_OFF; + } ifnet_lock_shared(ifp); lqm = ifp->if_interface_state.lqm_state; ifnet_lock_done(ifp); - return (lqm); + return lqm; } errno_t @@ -765,7 +794,7 @@ ifnet_set_interface_state(ifnet_t ifp, if_state_update(ifp, if_interface_state); done: - return (err); + return err; } errno_t @@ -787,7 +816,7 @@ ifnet_get_interface_state(ifnet_t ifp, if_get_state(ifp, if_interface_state); done: - return (err); + return err; } @@ -795,24 +824,25 @@ static errno_t ifnet_defrouter_llreachinfo(ifnet_t ifp, int af, struct ifnet_llreach_info *iflri) { - if (ifp == NULL || iflri == NULL) - return (EINVAL); + if (ifp == NULL || iflri == NULL) { + return EINVAL; + } VERIFY(af == AF_INET || af == AF_INET6); - return (ifnet_llreach_get_defrouter(ifp, af, iflri)); + return ifnet_llreach_get_defrouter(ifp, af, iflri); } errno_t ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri) { - return (ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri)); + return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri); } errno_t ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri) { - return (ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri)); + return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri); } errno_t @@ -822,24 +852,26 @@ ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps, errno_t error = 0; int tmp; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } ifnet_lock_exclusive(ifp); tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask); - if ((tmp & ~IFCAP_VALID)) + if ((tmp & ~IFCAP_VALID)) { error = EINVAL; - else + } else { ifp->if_capabilities = tmp; + } ifnet_lock_done(ifp); - return (error); + return error; } u_int32_t ifnet_capabilities_supported(ifnet_t ifp) { - return ((ifp == NULL) ? 0 : ifp->if_capabilities); + return (ifp == NULL) ? 0 : ifp->if_capabilities; } @@ -852,49 +884,51 @@ ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps, struct kev_msg ev_msg; struct net_event_data ev_data; - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } ifnet_lock_exclusive(ifp); tmp = (new_caps & mask) | (ifp->if_capenable & ~mask); - if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) + if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) { error = EINVAL; - else + } else { ifp->if_capenable = tmp; + } ifnet_lock_done(ifp); /* Notify application of the change */ - bzero(&ev_data, sizeof (struct net_event_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_DL_SUBCLASS; + bzero(&ev_data, sizeof(struct net_event_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; - ev_msg.event_code = KEV_DL_IFCAP_CHANGED; + ev_msg.event_code = KEV_DL_IFCAP_CHANGED; strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ); - ev_data.if_family = ifp->if_family; - ev_data.if_unit = (u_int32_t)ifp->if_unit; - ev_msg.dv[0].data_length = sizeof (struct net_event_data); + ev_data.if_family = ifp->if_family; + ev_data.if_unit = (u_int32_t)ifp->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); ev_msg.dv[0].data_ptr = &ev_data; ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(ifp, &ev_msg); - return (error); + return error; } u_int32_t ifnet_capabilities_enabled(ifnet_t ifp) { - return ((ifp == NULL) ? 0 : ifp->if_capenable); + return (ifp == NULL) ? 0 : ifp->if_capenable; } static const ifnet_offload_t offload_mask = - (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT | - IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | - IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT | - IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES | - IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP | - IFNET_SW_TIMESTAMP); + (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT | + IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | + IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT | + IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES | + IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP | + IFNET_SW_TIMESTAMP); static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF; @@ -903,8 +937,9 @@ ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload) { u_int32_t ifcaps = 0; - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } ifnet_lock_exclusive(interface); interface->if_hwassist = (offload & offload_mask); @@ -926,26 +961,36 @@ ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload) } ifnet_lock_done(interface); - if ((offload & any_offload_csum)) + if ((offload & any_offload_csum)) { ifcaps |= IFCAP_HWCSUM; - if ((offload & IFNET_TSO_IPV4)) + } + if ((offload & IFNET_TSO_IPV4)) { ifcaps |= IFCAP_TSO4; - if ((offload & IFNET_TSO_IPV6)) + } + if ((offload & IFNET_TSO_IPV6)) { ifcaps |= IFCAP_TSO6; - if ((offload & IFNET_VLAN_MTU)) + } + if ((offload & IFNET_VLAN_MTU)) { ifcaps |= IFCAP_VLAN_MTU; - if ((offload & IFNET_VLAN_TAGGING)) + } + if ((offload & IFNET_VLAN_TAGGING)) { ifcaps |= IFCAP_VLAN_HWTAGGING; - if ((offload & IFNET_TX_STATUS)) + } + if ((offload & IFNET_TX_STATUS)) { ifcaps |= IFCAP_TXSTATUS; - if ((offload & IFNET_HW_TIMESTAMP)) + } + if ((offload & IFNET_HW_TIMESTAMP)) { ifcaps |= IFCAP_HW_TIMESTAMP; - if ((offload & IFNET_SW_TIMESTAMP)) + } + if ((offload & IFNET_SW_TIMESTAMP)) { ifcaps |= IFCAP_SW_TIMESTAMP; - if ((offload & IFNET_CSUM_PARTIAL)) + } + if ((offload & IFNET_CSUM_PARTIAL)) { ifcaps |= IFCAP_CSUM_PARTIAL; - if ((offload & IFNET_CSUM_ZERO_INVERT)) + } + if ((offload & IFNET_CSUM_ZERO_INVERT)) { ifcaps |= IFCAP_CSUM_ZERO_INVERT; + } if (ifcaps != 0) { (void) ifnet_set_capabilities_supported(interface, ifcaps, IFCAP_VALID); @@ -953,14 +998,14 @@ ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload) IFCAP_VALID); } - return (0); + return 0; } ifnet_offload_t ifnet_offload(ifnet_t interface) { - return ((interface == NULL) ? - 0 : (interface->if_hwassist & offload_mask)); + return (interface == NULL) ? + 0 : (interface->if_hwassist & offload_mask); } errno_t @@ -968,22 +1013,25 @@ ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen) { errno_t error = 0; - if (interface == NULL || mtuLen < interface->if_mtu) - return (EINVAL); + if (interface == NULL || mtuLen < interface->if_mtu) { + return EINVAL; + } switch (family) { case AF_INET: - if (interface->if_hwassist & IFNET_TSO_IPV4) + if (interface->if_hwassist & IFNET_TSO_IPV4) { interface->if_tso_v4_mtu = mtuLen; - else + } else { error = EINVAL; + } break; case AF_INET6: - if (interface->if_hwassist & IFNET_TSO_IPV6) + if (interface->if_hwassist & IFNET_TSO_IPV6) { interface->if_tso_v6_mtu = mtuLen; - else + } else { error = EINVAL; + } break; default: @@ -991,7 +1039,7 @@ ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen) break; } - return (error); + return error; } errno_t @@ -999,22 +1047,25 @@ ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen) { errno_t error = 0; - if (interface == NULL || mtuLen == NULL) - return (EINVAL); + if (interface == NULL || mtuLen == NULL) { + return EINVAL; + } switch (family) { case AF_INET: - if (interface->if_hwassist & IFNET_TSO_IPV4) + if (interface->if_hwassist & IFNET_TSO_IPV4) { *mtuLen = interface->if_tso_v4_mtu; - else + } else { error = EINVAL; + } break; case AF_INET6: - if (interface->if_hwassist & IFNET_TSO_IPV6) + if (interface->if_hwassist & IFNET_TSO_IPV6) { *mtuLen = interface->if_tso_v6_mtu; - else + } else { error = EINVAL; + } break; default: @@ -1022,7 +1073,7 @@ ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen) break; } - return (error); + return error; } errno_t @@ -1031,23 +1082,26 @@ ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask) struct kev_msg ev_msg; struct net_event_data ev_data; - bzero(&ev_data, sizeof (struct net_event_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&ev_data, sizeof(struct net_event_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } /* Do not accept wacky values */ - if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) - return (EINVAL); + if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) { + return EINVAL; + } ifnet_lock_exclusive(interface); if (mask & IF_WAKE_ON_MAGIC_PACKET) { - if (properties & IF_WAKE_ON_MAGIC_PACKET) + if (properties & IF_WAKE_ON_MAGIC_PACKET) { interface->if_xflags |= IFXF_WAKE_ON_MAGIC_PACKET; - else + } else { interface->if_xflags &= ~IFXF_WAKE_ON_MAGIC_PACKET; + } } ifnet_lock_done(interface); @@ -1055,20 +1109,20 @@ ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask) (void) ifnet_touch_lastchange(interface); /* Notify application of the change */ - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_DL_SUBCLASS; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_DL_SUBCLASS; - ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED; + ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED; strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ); - ev_data.if_family = interface->if_family; - ev_data.if_unit = (u_int32_t)interface->if_unit; - ev_msg.dv[0].data_length = sizeof (struct net_event_data); - ev_msg.dv[0].data_ptr = &ev_data; + ev_data.if_family = interface->if_family; + ev_data.if_unit = (u_int32_t)interface->if_unit; + ev_msg.dv[0].data_length = sizeof(struct net_event_data); + ev_msg.dv[0].data_ptr = &ev_data; ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(interface, &ev_msg); - return (0); + return 0; } u_int32_t @@ -1076,13 +1130,15 @@ ifnet_get_wake_flags(ifnet_t interface) { u_int32_t flags = 0; - if (interface == NULL) - return (0); + if (interface == NULL) { + return 0; + } - if (interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) + if (interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) { flags |= IF_WAKE_ON_MAGIC_PACKET; + } - return (flags); + return flags; } /* @@ -1091,29 +1147,33 @@ ifnet_get_wake_flags(ifnet_t interface) errno_t ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } ifnet_lock_exclusive(interface); interface->if_linkmib = (void*)mibData; interface->if_linkmiblen = mibLen; ifnet_lock_done(interface); - return (0); + return 0; } errno_t ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen) { - errno_t result = 0; + errno_t result = 0; - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } ifnet_lock_shared(interface); - if (*mibLen < interface->if_linkmiblen) + if (*mibLen < interface->if_linkmiblen) { result = EMSGSIZE; - if (result == 0 && interface->if_linkmib == NULL) + } + if (result == 0 && interface->if_linkmib == NULL) { result = ENOTSUP; + } if (result == 0) { *mibLen = interface->if_linkmiblen; @@ -1121,13 +1181,13 @@ ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen) } ifnet_lock_done(interface); - return (result); + return result; } u_int32_t ifnet_get_link_mib_data_length(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_linkmiblen); + return (interface == NULL) ? 0 : interface->if_linkmiblen; } errno_t @@ -1135,99 +1195,106 @@ ifnet_output(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m, void *route, const struct sockaddr *dest) { if (interface == NULL || protocol_family == 0 || m == NULL) { - if (m != NULL) + if (m != NULL) { mbuf_freem_list(m); - return (EINVAL); + } + return EINVAL; } - return (dlil_output(interface, protocol_family, m, route, dest, 0, NULL)); + return dlil_output(interface, protocol_family, m, route, dest, 0, NULL); } errno_t ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m) { if (interface == NULL || m == NULL) { - if (m != NULL) + if (m != NULL) { mbuf_freem_list(m); - return (EINVAL); + } + return EINVAL; } - return (dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL)); + return dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL); } errno_t ifnet_set_mtu(ifnet_t interface, u_int32_t mtu) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } interface->if_mtu = mtu; - return (0); + return 0; } u_int32_t ifnet_mtu(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_mtu); + return (interface == NULL) ? 0 : interface->if_mtu; } u_char ifnet_type(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_data.ifi_type); + return (interface == NULL) ? 0 : interface->if_data.ifi_type; } errno_t ifnet_set_addrlen(ifnet_t interface, u_char addrlen) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } interface->if_data.ifi_addrlen = addrlen; - return (0); + return 0; } u_char ifnet_addrlen(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_data.ifi_addrlen); + return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen; } errno_t ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } interface->if_data.ifi_hdrlen = hdrlen; - return (0); + return 0; } u_char ifnet_hdrlen(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_data.ifi_hdrlen); + return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen; } errno_t ifnet_set_metric(ifnet_t interface, u_int32_t metric) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } interface->if_data.ifi_metric = metric; - return (0); + return 0; } u_int32_t ifnet_metric(ifnet_t interface) { - return ((interface == NULL) ? 0 : interface->if_data.ifi_metric); + return (interface == NULL) ? 0 : interface->if_data.ifi_metric; } errno_t ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw = ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate; @@ -1235,30 +1302,33 @@ ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate) /* Pin if_baudrate to 32 bits until we can change the storage size */ ifp->if_baudrate = (baudrate > 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate; - return (0); + return 0; } u_int64_t ifnet_baudrate(struct ifnet *ifp) { - return ((ifp == NULL) ? 0 : ifp->if_baudrate); + return (ifp == NULL) ? 0 : ifp->if_baudrate; } errno_t ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw, struct if_bandwidths *input_bw) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } /* set input values first (if any), as output values depend on them */ - if (input_bw != NULL) + if (input_bw != NULL) { (void) ifnet_set_input_bandwidths(ifp, input_bw); + } - if (output_bw != NULL) + if (output_bw != NULL) { (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE); + } - return (0); + return 0; } static void @@ -1291,32 +1361,39 @@ ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw, VERIFY(ifp != NULL && bw != NULL); ifq = &ifp->if_snd; - if (!locked) + if (!locked) { IFCQ_LOCK(ifq); + } IFCQ_LOCK_ASSERT_HELD(ifq); old_bw = ifp->if_output_bw; - if (bw->eff_bw != 0) + if (bw->eff_bw != 0) { ifp->if_output_bw.eff_bw = bw->eff_bw; - if (bw->max_bw != 0) + } + if (bw->max_bw != 0) { ifp->if_output_bw.max_bw = bw->max_bw; - if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) + } + if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) { ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw; - else if (ifp->if_output_bw.eff_bw == 0) + } else if (ifp->if_output_bw.eff_bw == 0) { ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw; + } /* Pin if_baudrate to 32 bits */ br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw); - if (br != 0) + if (br != 0) { ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br; + } /* Adjust queue parameters if needed */ if (old_bw.eff_bw != ifp->if_output_bw.eff_bw || - old_bw.max_bw != ifp->if_output_bw.max_bw) + old_bw.max_bw != ifp->if_output_bw.max_bw) { ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH); + } - if (!locked) + if (!locked) { IFCQ_UNLOCK(ifq); + } /* * If this is a Wifi interface, update the values in @@ -1328,7 +1405,7 @@ ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw, lck_rw_done(&ifp->if_link_status_lock); } - return (0); + return 0; } static void @@ -1358,14 +1435,17 @@ ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw) VERIFY(ifp != NULL && bw != NULL); old_bw = ifp->if_input_bw; - if (bw->eff_bw != 0) + if (bw->eff_bw != 0) { ifp->if_input_bw.eff_bw = bw->eff_bw; - if (bw->max_bw != 0) + } + if (bw->max_bw != 0) { ifp->if_input_bw.max_bw = bw->max_bw; - if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) + } + if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) { ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw; - else if (ifp->if_input_bw.eff_bw == 0) + } else if (ifp->if_input_bw.eff_bw == 0) { ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw; + } if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) { lck_rw_lock_exclusive(&ifp->if_link_status_lock); @@ -1374,10 +1454,11 @@ ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw) } if (old_bw.eff_bw != ifp->if_input_bw.eff_bw || - old_bw.max_bw != ifp->if_input_bw.max_bw) + old_bw.max_bw != ifp->if_input_bw.max_bw) { ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH); + } - return (0); + return 0; } u_int64_t @@ -1395,44 +1476,50 @@ ifnet_output_linkrate(struct ifnet *ifp) rate = MIN(rate, ifp->if_snd.ifcq_tbr.tbr_rate_raw); } - return (rate); + return rate; } u_int64_t ifnet_input_linkrate(struct ifnet *ifp) { - return (ifp->if_input_bw.eff_bw); + return ifp->if_input_bw.eff_bw; } errno_t ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw, struct if_bandwidths *input_bw) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } - if (output_bw != NULL) + if (output_bw != NULL) { *output_bw = ifp->if_output_bw; - if (input_bw != NULL) + } + if (input_bw != NULL) { *input_bw = ifp->if_input_bw; + } - return (0); + return 0; } errno_t ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt, struct if_latencies *input_lt) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } - if (output_lt != NULL) + if (output_lt != NULL) { (void) ifnet_set_output_latencies(ifp, output_lt, FALSE); + } - if (input_lt != NULL) + if (input_lt != NULL) { (void) ifnet_set_input_latencies(ifp, input_lt); + } - return (0); + return 0; } errno_t @@ -1445,29 +1532,35 @@ ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt, VERIFY(ifp != NULL && lt != NULL); ifq = &ifp->if_snd; - if (!locked) + if (!locked) { IFCQ_LOCK(ifq); + } IFCQ_LOCK_ASSERT_HELD(ifq); old_lt = ifp->if_output_lt; - if (lt->eff_lt != 0) + if (lt->eff_lt != 0) { ifp->if_output_lt.eff_lt = lt->eff_lt; - if (lt->max_lt != 0) + } + if (lt->max_lt != 0) { ifp->if_output_lt.max_lt = lt->max_lt; - if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) + } + if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) { ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt; - else if (ifp->if_output_lt.eff_lt == 0) + } else if (ifp->if_output_lt.eff_lt == 0) { ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt; + } /* Adjust queue parameters if needed */ if (old_lt.eff_lt != ifp->if_output_lt.eff_lt || - old_lt.max_lt != ifp->if_output_lt.max_lt) + old_lt.max_lt != ifp->if_output_lt.max_lt) { ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY); + } - if (!locked) + if (!locked) { IFCQ_UNLOCK(ifq); + } - return (0); + return 0; } errno_t @@ -1478,35 +1571,42 @@ ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt) VERIFY(ifp != NULL && lt != NULL); old_lt = ifp->if_input_lt; - if (lt->eff_lt != 0) + if (lt->eff_lt != 0) { ifp->if_input_lt.eff_lt = lt->eff_lt; - if (lt->max_lt != 0) + } + if (lt->max_lt != 0) { ifp->if_input_lt.max_lt = lt->max_lt; - if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) + } + if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) { ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt; - else if (ifp->if_input_lt.eff_lt == 0) + } else if (ifp->if_input_lt.eff_lt == 0) { ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt; + } if (old_lt.eff_lt != ifp->if_input_lt.eff_lt || - old_lt.max_lt != ifp->if_input_lt.max_lt) + old_lt.max_lt != ifp->if_input_lt.max_lt) { ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY); + } - return (0); + return 0; } errno_t ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt, struct if_latencies *input_lt) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } - if (output_lt != NULL) + if (output_lt != NULL) { *output_lt = ifp->if_output_lt; - if (input_lt != NULL) + } + if (input_lt != NULL) { *input_lt = ifp->if_input_lt; + } - return (0); + return 0; } errno_t @@ -1514,17 +1614,18 @@ ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p) { errno_t err; - if (ifp == NULL) - return (EINVAL); - else if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL) { + return EINVAL; + } else if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } err = dlil_rxpoll_set_params(ifp, p, FALSE); /* Release the io ref count */ ifnet_decr_iorefcnt(ifp); - return (err); + return err; } errno_t @@ -1532,103 +1633,125 @@ ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p) { errno_t err; - if (ifp == NULL || p == NULL) - return (EINVAL); - else if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL || p == NULL) { + return EINVAL; + } else if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } err = dlil_rxpoll_get_params(ifp, p); /* Release the io ref count */ ifnet_decr_iorefcnt(ifp); - return (err); + return err; } errno_t ifnet_stat_increment(struct ifnet *ifp, const struct ifnet_stat_increment_param *s) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } - if (s->packets_in != 0) + if (s->packets_in != 0) { atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in); - if (s->bytes_in != 0) + } + if (s->bytes_in != 0) { atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in); - if (s->errors_in != 0) + } + if (s->errors_in != 0) { atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in); + } - if (s->packets_out != 0) + if (s->packets_out != 0) { atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out); - if (s->bytes_out != 0) + } + if (s->bytes_out != 0) { atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out); - if (s->errors_out != 0) + } + if (s->errors_out != 0) { atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out); + } - if (s->collisions != 0) + if (s->collisions != 0) { atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions); - if (s->dropped != 0) + } + if (s->dropped != 0) { atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped); + } /* Touch the last change time. */ TOUCHLASTCHANGE(&ifp->if_lastchange); - if (ifp->if_data_threshold != 0) + if (ifp->if_data_threshold != 0) { ifnet_notify_data_threshold(ifp); + } - return (0); + return 0; } errno_t ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in, u_int32_t bytes_in, u_int32_t errors_in) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } - if (packets_in != 0) + if (packets_in != 0) { atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in); - if (bytes_in != 0) + } + if (bytes_in != 0) { atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in); - if (errors_in != 0) + } + if (errors_in != 0) { atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in); + } TOUCHLASTCHANGE(&ifp->if_lastchange); - if (ifp->if_data_threshold != 0) + if (ifp->if_data_threshold != 0) { ifnet_notify_data_threshold(ifp); + } - return (0); + return 0; } errno_t ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out, u_int32_t bytes_out, u_int32_t errors_out) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } - if (packets_out != 0) + if (packets_out != 0) { atomic_add_64(&ifp->if_data.ifi_opackets, packets_out); - if (bytes_out != 0) + } + if (bytes_out != 0) { atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out); - if (errors_out != 0) + } + if (errors_out != 0) { atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out); + } TOUCHLASTCHANGE(&ifp->if_lastchange); - if (ifp->if_data_threshold != 0) + if (ifp->if_data_threshold != 0) { ifnet_notify_data_threshold(ifp); + } - return (0); + return 0; } errno_t ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in); atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in); @@ -1647,17 +1770,19 @@ ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s) /* Touch the last change time. */ TOUCHLASTCHANGE(&ifp->if_lastchange); - if (ifp->if_data_threshold != 0) + if (ifp->if_data_threshold != 0) { ifnet_notify_data_threshold(ifp); + } - return (0); + return 0; } errno_t ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets); atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes); @@ -1673,53 +1798,56 @@ ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s) atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops); atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto); - if (ifp->if_data_threshold != 0) + if (ifp->if_data_threshold != 0) { ifnet_notify_data_threshold(ifp); + } - return (0); + return 0; } errno_t ifnet_touch_lastchange(ifnet_t interface) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } TOUCHLASTCHANGE(&interface->if_lastchange); - return (0); + return 0; } errno_t ifnet_lastchange(ifnet_t interface, struct timeval *last_change) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } *last_change = interface->if_data.ifi_lastchange; /* Crude conversion from uptime to calendar time */ last_change->tv_sec += boottime_sec(); - return (0); + return 0; } errno_t ifnet_touch_lastupdown(ifnet_t interface) { if (interface == NULL) { - return (EINVAL); + return EINVAL; } TOUCHLASTCHANGE(&interface->if_lastupdown); - return (0); + return 0; } errno_t ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta) { if (interface == NULL) { - return (EINVAL); + return EINVAL; } /* Calculate the delta */ @@ -1729,35 +1857,35 @@ ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta) } updown_delta->tv_usec = 0; - return (0); + return 0; } errno_t ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses) { - return (addresses == NULL ? EINVAL : - ifnet_get_address_list_family(interface, addresses, 0)); + return addresses == NULL ? EINVAL : + ifnet_get_address_list_family(interface, addresses, 0); } struct ifnet_addr_list { - SLIST_ENTRY(ifnet_addr_list) ifal_le; - struct ifaddr *ifal_ifa; + SLIST_ENTRY(ifnet_addr_list) ifal_le; + struct ifaddr *ifal_ifa; }; errno_t ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses, sa_family_t family) { - return (ifnet_get_address_list_family_internal(interface, addresses, - family, 0, M_NOWAIT, 0)); + return ifnet_get_address_list_family_internal(interface, addresses, + family, 0, M_NOWAIT, 0); } errno_t ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses) { - return (addresses == NULL ? EINVAL : - ifnet_get_address_list_family_internal(interface, addresses, - 0, 0, M_NOWAIT, 1)); + return addresses == NULL ? EINVAL : + ifnet_get_address_list_family_internal(interface, addresses, + 0, 0, M_NOWAIT, 1); } extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa); @@ -1799,8 +1927,9 @@ ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses, ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (interface != NULL && ifp != interface) + if (interface != NULL && ifp != interface) { continue; + } one: ifnet_lock_shared(ifp); if (interface == NULL || interface == ifp) { @@ -1813,12 +1942,13 @@ one: continue; } MALLOC(ifal, struct ifnet_addr_list *, - sizeof (*ifal), M_TEMP, how); + sizeof(*ifal), M_TEMP, how); if (ifal == NULL) { IFA_UNLOCK(ifa); ifnet_lock_done(ifp); - if (!detached) + if (!detached) { ifnet_head_done(); + } err = ENOMEM; goto done; } @@ -1830,23 +1960,25 @@ one: } } ifnet_lock_done(ifp); - if (detached) + if (detached) { break; + } } - if (!detached) + if (!detached) { ifnet_head_done(); + } if (count == 0) { err = ENXIO; goto done; } - MALLOC(*addresses, ifaddr_t *, sizeof (ifaddr_t) * (count + 1), + MALLOC(*addresses, ifaddr_t *, sizeof(ifaddr_t) * (count + 1), M_TEMP, how); if (*addresses == NULL) { err = ENOMEM; goto done; } - bzero(*addresses, sizeof (ifaddr_t) * (count + 1)); + bzero(*addresses, sizeof(ifaddr_t) * (count + 1)); done: SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) { @@ -1876,7 +2008,7 @@ done: FREE(*addresses, M_TEMP); err = ENXIO; } - return (err); + return err; } void @@ -1884,11 +2016,13 @@ ifnet_free_address_list(ifaddr_t *addresses) { int i; - if (addresses == NULL) + if (addresses == NULL) { return; + } - for (i = 0; addresses[i] != NULL; i++) + for (i = 0; addresses[i] != NULL; i++) { IFA_REMREF(addresses[i]); + } FREE(addresses, M_TEMP); } @@ -1899,8 +2033,9 @@ ifnet_lladdr(ifnet_t interface) struct ifaddr *ifa; void *lladdr; - if (interface == NULL) - return (NULL); + if (interface == NULL) { + return NULL; + } /* * if_lladdr points to the permanent link address of @@ -1912,26 +2047,29 @@ ifnet_lladdr(ifnet_t interface) lladdr = LLADDR(SDL((void *)ifa->ifa_addr)); IFA_UNLOCK(ifa); - return (lladdr); + return lladdr; } errno_t ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len, size_t *out_len) { - if (interface == NULL || addr == NULL || out_len == NULL) - return (EINVAL); + if (interface == NULL || addr == NULL || out_len == NULL) { + return EINVAL; + } *out_len = interface->if_broadcast.length; - if (buffer_len < interface->if_broadcast.length) - return (EMSGSIZE); + if (buffer_len < interface->if_broadcast.length) { + return EMSGSIZE; + } - if (interface->if_broadcast.length == 0) - return (ENXIO); + if (interface->if_broadcast.length == 0) { + return ENXIO; + } if (interface->if_broadcast.length <= - sizeof (interface->if_broadcast.u.buffer)) { + sizeof(interface->if_broadcast.u.buffer)) { bcopy(interface->if_broadcast.u.buffer, addr, interface->if_broadcast.length); } else { @@ -1939,7 +2077,7 @@ ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len, interface->if_broadcast.length); } - return (0); + return 0; } static errno_t @@ -1956,10 +2094,11 @@ ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr, * Make sure to accomodate the largest possible * size of SA(if_lladdr)->sa_len. */ - _CASSERT(sizeof (sdlbuf) == (SOCK_MAXADDRLEN + 1)); + _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1)); - if (interface == NULL || lladdr == NULL) - return (EINVAL); + if (interface == NULL || lladdr == NULL) { + return EINVAL; + } ifa = interface->if_lladdr; IFA_LOCK_SPIN(ifa); @@ -1974,14 +2113,14 @@ ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr, bcopy(bytes, lladdr, bytes_len); } - return (error); + return error; } errno_t ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length) { - return (ifnet_lladdr_copy_bytes_internal(interface, lladdr, length, - NULL)); + return ifnet_lladdr_copy_bytes_internal(interface, lladdr, length, + NULL); } errno_t @@ -2011,7 +2150,7 @@ ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length) net_thread_marks_pop(marks); #endif - return (error); + return error; } static errno_t @@ -2019,10 +2158,11 @@ ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr, size_t lladdr_len, u_char new_type, int apply_type) { struct ifaddr *ifa; - errno_t error = 0; + errno_t error = 0; - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } ifnet_head_lock_shared(); ifnet_lock_exclusive(interface); @@ -2030,7 +2170,7 @@ ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr, (lladdr_len != interface->if_addrlen || lladdr == 0)) { ifnet_lock_done(interface); ifnet_head_done(); - return (EINVAL); + return EINVAL; } ifa = ifnet_addrs[interface->if_index - 1]; if (ifa != NULL) { @@ -2063,35 +2203,37 @@ ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr, KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0); } - return (error); + return error; } errno_t ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len) { - return (ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0)); + return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0); } errno_t ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr, size_t lladdr_len, u_char type) { - return (ifnet_set_lladdr_internal(interface, lladdr, - lladdr_len, type, 1)); + return ifnet_set_lladdr_internal(interface, lladdr, + lladdr_len, type, 1); } errno_t ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr, ifmultiaddr_t *ifmap) { - if (interface == NULL || maddr == NULL) - return (EINVAL); + if (interface == NULL || maddr == NULL) { + return EINVAL; + } /* Don't let users screw up protocols' entries. */ - if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) - return (EINVAL); + if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) { + return EINVAL; + } - return (if_addmulti_anon(interface, maddr, ifmap)); + return if_addmulti_anon(interface, maddr, ifmap); } errno_t @@ -2099,15 +2241,17 @@ ifnet_remove_multicast(ifmultiaddr_t ifma) { struct sockaddr *maddr; - if (ifma == NULL) - return (EINVAL); + if (ifma == NULL) { + return EINVAL; + } maddr = ifma->ifma_addr; /* Don't let users screw up protocols' entries. */ - if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) - return (EINVAL); + if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) { + return EINVAL; + } - return (if_delmulti_anon(ifma->ifma_ifp, maddr)); + return if_delmulti_anon(ifma->ifma_ifp, maddr); } errno_t @@ -2117,24 +2261,26 @@ ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses) int cmax = 0; struct ifmultiaddr *addr; - if (ifp == NULL || addresses == NULL) - return (EINVAL); + if (ifp == NULL || addresses == NULL) { + return EINVAL; + } ifnet_lock_shared(ifp); LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) { cmax++; } - MALLOC(*addresses, ifmultiaddr_t *, sizeof (ifmultiaddr_t) * (cmax + 1), + MALLOC(*addresses, ifmultiaddr_t *, sizeof(ifmultiaddr_t) * (cmax + 1), M_TEMP, M_NOWAIT); if (*addresses == NULL) { ifnet_lock_done(ifp); - return (ENOMEM); + return ENOMEM; } LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) { - if (count + 1 > cmax) + if (count + 1 > cmax) { break; + } (*addresses)[count] = (ifmultiaddr_t)addr; ifmaddr_reference((*addresses)[count]); count++; @@ -2142,7 +2288,7 @@ ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses) (*addresses)[cmax] = NULL; ifnet_lock_done(ifp); - return (0); + return 0; } void @@ -2150,11 +2296,13 @@ ifnet_free_multicast_list(ifmultiaddr_t *addresses) { int i; - if (addresses == NULL) + if (addresses == NULL) { return; + } - for (i = 0; addresses[i] != NULL; i++) + for (i = 0; addresses[i] != NULL; i++) { ifmaddr_release(addresses[i]); + } FREE(addresses, M_TEMP); } @@ -2163,10 +2311,11 @@ errno_t ifnet_find_by_name(const char *ifname, ifnet_t *ifpp) { struct ifnet *ifp; - int namelen; + int namelen; - if (ifname == NULL) - return (EINVAL); + if (ifname == NULL) { + return EINVAL; + } namelen = strlen(ifname); @@ -2178,8 +2327,9 @@ ifnet_find_by_name(const char *ifname, ifnet_t *ifpp) struct sockaddr_dl *ll_addr; ifa = ifnet_addrs[ifp->if_index - 1]; - if (ifa == NULL) + if (ifa == NULL) { continue; + } IFA_LOCK(ifa); ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr; @@ -2195,24 +2345,24 @@ ifnet_find_by_name(const char *ifname, ifnet_t *ifpp) } ifnet_head_done(); - return ((ifp == NULL) ? ENXIO : 0); + return (ifp == NULL) ? ENXIO : 0; } errno_t ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count) { - return (ifnet_list_get_common(family, FALSE, list, count)); + return ifnet_list_get_common(family, FALSE, list, count); } __private_extern__ errno_t ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count) { - return (ifnet_list_get_common(family, TRUE, list, count)); + return ifnet_list_get_common(family, TRUE, list, count); } struct ifnet_list { - SLIST_ENTRY(ifnet_list) ifl_le; - struct ifnet *ifl_ifp; + SLIST_ENTRY(ifnet_list) ifl_le; + struct ifnet *ifl_ifp; }; static errno_t @@ -2238,7 +2388,7 @@ ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list, ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (family == IFNET_FAMILY_ANY || ifp->if_family == family) { - MALLOC(ifl, struct ifnet_list *, sizeof (*ifl), + MALLOC(ifl, struct ifnet_list *, sizeof(*ifl), M_TEMP, M_NOWAIT); if (ifl == NULL) { ifnet_head_done(); @@ -2258,26 +2408,27 @@ ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list, goto done; } - MALLOC(*list, ifnet_t *, sizeof (ifnet_t) * (cnt + 1), + MALLOC(*list, ifnet_t *, sizeof(ifnet_t) * (cnt + 1), M_TEMP, M_NOWAIT); if (*list == NULL) { err = ENOMEM; goto done; } - bzero(*list, sizeof (ifnet_t) * (cnt + 1)); + bzero(*list, sizeof(ifnet_t) * (cnt + 1)); *count = cnt; done: SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) { SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le); - if (err == 0) + if (err == 0) { (*list)[--cnt] = ifl->ifl_ifp; - else + } else { ifnet_release(ifl->ifl_ifp); + } FREE(ifl, M_TEMP); } - return (err); + return err; } void @@ -2285,11 +2436,13 @@ ifnet_list_free(ifnet_t *interfaces) { int i; - if (interfaces == NULL) + if (interfaces == NULL) { return; + } - for (i = 0; interfaces[i]; i++) + for (i = 0; interfaces[i]; i++) { ifnet_release(interfaces[i]); + } FREE(interfaces, M_TEMP); } @@ -2301,21 +2454,23 @@ ifnet_list_free(ifnet_t *interfaces) errno_t ifaddr_reference(ifaddr_t ifa) { - if (ifa == NULL) - return (EINVAL); + if (ifa == NULL) { + return EINVAL; + } IFA_ADDREF(ifa); - return (0); + return 0; } errno_t ifaddr_release(ifaddr_t ifa) { - if (ifa == NULL) - return (EINVAL); + if (ifa == NULL) { + return EINVAL; + } IFA_REMREF(ifa); - return (0); + return 0; } sa_family_t @@ -2325,11 +2480,12 @@ ifaddr_address_family(ifaddr_t ifa) if (ifa != NULL) { IFA_LOCK_SPIN(ifa); - if (ifa->ifa_addr != NULL) + if (ifa->ifa_addr != NULL) { family = ifa->ifa_addr->sa_family; + } IFA_UNLOCK(ifa); } - return (family); + return family; } errno_t @@ -2337,13 +2493,14 @@ ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size) { u_int32_t copylen; - if (ifa == NULL || out_addr == NULL) - return (EINVAL); + if (ifa == NULL || out_addr == NULL) { + return EINVAL; + } IFA_LOCK_SPIN(ifa); if (ifa->ifa_addr == NULL) { IFA_UNLOCK(ifa); - return (ENOTSUP); + return ENOTSUP; } copylen = (addr_size >= ifa->ifa_addr->sa_len) ? @@ -2352,11 +2509,11 @@ ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size) if (ifa->ifa_addr->sa_len > addr_size) { IFA_UNLOCK(ifa); - return (EMSGSIZE); + return EMSGSIZE; } IFA_UNLOCK(ifa); - return (0); + return 0; } errno_t @@ -2364,13 +2521,14 @@ ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size) { u_int32_t copylen; - if (ifa == NULL || out_addr == NULL) - return (EINVAL); + if (ifa == NULL || out_addr == NULL) { + return EINVAL; + } IFA_LOCK_SPIN(ifa); if (ifa->ifa_dstaddr == NULL) { IFA_UNLOCK(ifa); - return (ENOTSUP); + return ENOTSUP; } copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ? @@ -2379,11 +2537,11 @@ ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size) if (ifa->ifa_dstaddr->sa_len > addr_size) { IFA_UNLOCK(ifa); - return (EMSGSIZE); + return EMSGSIZE; } IFA_UNLOCK(ifa); - return (0); + return 0; } errno_t @@ -2391,13 +2549,14 @@ ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size) { u_int32_t copylen; - if (ifa == NULL || out_addr == NULL) - return (EINVAL); + if (ifa == NULL || out_addr == NULL) { + return EINVAL; + } IFA_LOCK_SPIN(ifa); if (ifa->ifa_netmask == NULL) { IFA_UNLOCK(ifa); - return (ENOTSUP); + return ENOTSUP; } copylen = addr_size >= ifa->ifa_netmask->sa_len ? @@ -2406,11 +2565,11 @@ ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size) if (ifa->ifa_netmask->sa_len > addr_size) { IFA_UNLOCK(ifa); - return (EMSGSIZE); + return EMSGSIZE; } IFA_UNLOCK(ifa); - return (0); + return 0; } ifnet_t @@ -2418,79 +2577,87 @@ ifaddr_ifnet(ifaddr_t ifa) { struct ifnet *ifp; - if (ifa == NULL) - return (NULL); + if (ifa == NULL) { + return NULL; + } /* ifa_ifp is set once at creation time; it is never changed */ ifp = ifa->ifa_ifp; - return (ifp); + return ifp; } ifaddr_t ifaddr_withaddr(const struct sockaddr *address) { - if (address == NULL) - return (NULL); + if (address == NULL) { + return NULL; + } - return (ifa_ifwithaddr(address)); + return ifa_ifwithaddr(address); } ifaddr_t ifaddr_withdstaddr(const struct sockaddr *address) { - if (address == NULL) - return (NULL); + if (address == NULL) { + return NULL; + } - return (ifa_ifwithdstaddr(address)); + return ifa_ifwithdstaddr(address); } ifaddr_t ifaddr_withnet(const struct sockaddr *net) { - if (net == NULL) - return (NULL); + if (net == NULL) { + return NULL; + } - return (ifa_ifwithnet(net)); + return ifa_ifwithnet(net); } ifaddr_t ifaddr_withroute(int flags, const struct sockaddr *destination, const struct sockaddr *gateway) { - if (destination == NULL || gateway == NULL) - return (NULL); + if (destination == NULL || gateway == NULL) { + return NULL; + } - return (ifa_ifwithroute(flags, destination, gateway)); + return ifa_ifwithroute(flags, destination, gateway); } ifaddr_t ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface) { - if (addr == NULL || interface == NULL) - return (NULL); + if (addr == NULL || interface == NULL) { + return NULL; + } - return (ifaof_ifpforaddr_select(addr, interface)); + return ifaof_ifpforaddr_select(addr, interface); } errno_t ifmaddr_reference(ifmultiaddr_t ifmaddr) { - if (ifmaddr == NULL) - return (EINVAL); + if (ifmaddr == NULL) { + return EINVAL; + } IFMA_ADDREF(ifmaddr); - return (0); + return 0; } errno_t ifmaddr_release(ifmultiaddr_t ifmaddr) { - if (ifmaddr == NULL) - return (EINVAL); + if (ifmaddr == NULL) { + return EINVAL; + } IFMA_REMREF(ifmaddr); - return (0); + return 0; } errno_t @@ -2499,13 +2666,14 @@ ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr, { u_int32_t copylen; - if (ifma == NULL || out_addr == NULL) - return (EINVAL); + if (ifma == NULL || out_addr == NULL) { + return EINVAL; + } IFMA_LOCK(ifma); if (ifma->ifma_addr == NULL) { IFMA_UNLOCK(ifma); - return (ENOTSUP); + return ENOTSUP; } copylen = (addr_size >= ifma->ifma_addr->sa_len ? @@ -2514,10 +2682,10 @@ ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr, if (ifma->ifma_addr->sa_len > addr_size) { IFMA_UNLOCK(ifma); - return (EMSGSIZE); + return EMSGSIZE; } IFMA_UNLOCK(ifma); - return (0); + return 0; } errno_t @@ -2526,18 +2694,20 @@ ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr, { struct ifmultiaddr *ifma_ll; - if (ifma == NULL || out_addr == NULL) - return (EINVAL); - if ((ifma_ll = ifma->ifma_ll) == NULL) - return (ENOTSUP); + if (ifma == NULL || out_addr == NULL) { + return EINVAL; + } + if ((ifma_ll = ifma->ifma_ll) == NULL) { + return ENOTSUP; + } - return (ifmaddr_address(ifma_ll, out_addr, addr_size)); + return ifmaddr_address(ifma_ll, out_addr, addr_size); } ifnet_t ifmaddr_ifnet(ifmultiaddr_t ifma) { - return ((ifma == NULL) ? NULL : ifma->ifma_ifp); + return (ifma == NULL) ? NULL : ifma->ifma_ifp; } /**************************************************************************/ @@ -2569,7 +2739,7 @@ ifnet_clone_attach(struct ifnet_clone_params *cloner_params, } /* Make room for name string */ - ifc = _MALLOC(sizeof (struct if_clone) + IFNAMSIZ + 1, M_CLONE, + ifc = _MALLOC(sizeof(struct if_clone) + IFNAMSIZ + 1, M_CLONE, M_WAITOK | M_ZERO); if (ifc == NULL) { printf("%s: _MALLOC failed\n", __func__); @@ -2590,11 +2760,12 @@ ifnet_clone_attach(struct ifnet_clone_params *cloner_params, } *ifcloner = ifc; - return (0); + return 0; fail: - if (ifc != NULL) + if (ifc != NULL) { FREE(ifc, M_CLONE); - return (error); + } + return error; } errno_t @@ -2603,8 +2774,9 @@ ifnet_clone_detach(if_clone_t ifcloner) errno_t error = 0; struct if_clone *ifc = ifcloner; - if (ifc == NULL || ifc->ifc_name == NULL) - return (EINVAL); + if (ifc == NULL || ifc->ifc_name == NULL) { + return EINVAL; + } if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) { printf("%s: no cloner for %s\n", __func__, ifc->ifc_name); @@ -2617,7 +2789,7 @@ ifnet_clone_detach(if_clone_t ifcloner) FREE(ifc, M_CLONE); fail: - return (error); + return error; } /**************************************************************************/ @@ -2631,8 +2803,9 @@ ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol, u_int32_t ifindex; u_int32_t inp_flags = 0; - if (bitfield == NULL) - return (EINVAL); + if (bitfield == NULL) { + return EINVAL; + } switch (protocol) { case PF_UNSPEC: @@ -2640,7 +2813,7 @@ ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol, case PF_INET6: break; default: - return (EINVAL); + return EINVAL; } /* bit string is long enough to hold 16-bit port values */ @@ -2649,75 +2822,84 @@ ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol, if_ports_used_update_wakeuuid(ifp); - inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ? - INPCB_GET_PORTS_USED_WILDCARDOK : 0); - inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ? - INPCB_GET_PORTS_USED_NOWAKEUPOK : 0); - inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_RECVANYIFONLY) ? - INPCB_GET_PORTS_USED_RECVANYIFONLY : 0); - inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY) ? - INPCB_GET_PORTS_USED_EXTBGIDLEONLY : 0); - inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_ACTIVEONLY) ? - INPCB_GET_PORTS_USED_ACTIVEONLY : 0); + inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ? + INPCB_GET_PORTS_USED_WILDCARDOK : 0); + inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ? + INPCB_GET_PORTS_USED_NOWAKEUPOK : 0); + inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_RECVANYIFONLY) ? + INPCB_GET_PORTS_USED_RECVANYIFONLY : 0); + inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY) ? + INPCB_GET_PORTS_USED_EXTBGIDLEONLY : 0); + inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_ACTIVEONLY) ? + INPCB_GET_PORTS_USED_ACTIVEONLY : 0); - ifindex = (ifp != NULL) ? ifp->if_index : 0; + ifindex = (ifp != NULL) ? ifp->if_index : 0; - if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) - udp_get_ports_used(ifindex, protocol, inp_flags, - bitfield); + if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) { + udp_get_ports_used(ifindex, protocol, inp_flags, + bitfield); + } - if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) - tcp_get_ports_used(ifindex, protocol, inp_flags, - bitfield); + if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) { + tcp_get_ports_used(ifindex, protocol, inp_flags, + bitfield); + } - return (0); + return 0; } errno_t ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield) { u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK; - return (ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags, - bitfield)); + return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags, + bitfield); } errno_t ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48]) { - if (ifp == NULL || sa == NULL || srvinfo == NULL) - return (EINVAL); - if (sa->sa_len > sizeof(struct sockaddr_storage)) - return (EINVAL); - if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) - return (EINVAL); + if (ifp == NULL || sa == NULL || srvinfo == NULL) { + return EINVAL; + } + if (sa->sa_len > sizeof(struct sockaddr_storage)) { + return EINVAL; + } + if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) { + return EINVAL; + } dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo); - return (0); + return 0; } errno_t ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa) { - if (ifp == NULL || sa == NULL) - return (EINVAL); - if (sa->sa_len > sizeof(struct sockaddr_storage)) - return (EINVAL); - if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) - return (EINVAL); + if (ifp == NULL || sa == NULL) { + return EINVAL; + } + if (sa->sa_len > sizeof(struct sockaddr_storage)) { + return EINVAL; + } + if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) { + return EINVAL; + } dlil_node_absent(ifp, sa); - return (0); + return 0; } errno_t ifnet_notice_master_elected(ifnet_t ifp) { - if (ifp == NULL) - return (EINVAL); + if (ifp == NULL) { + return EINVAL; + } dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0); - return (0); + return 0; } errno_t @@ -2727,7 +2909,7 @@ ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val) m_do_tx_compl_callback(m, ifp); - return (0); + return 0; } errno_t @@ -2735,18 +2917,19 @@ ifnet_tx_compl(ifnet_t ifp, mbuf_t m) { m_do_tx_compl_callback(m, ifp); - return (0); + return 0; } errno_t ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN], u_int8_t info[IFNET_MODARGLEN]) { - if (ifp == NULL || modid == NULL) - return (EINVAL); + if (ifp == NULL || modid == NULL) { + return EINVAL; + } dlil_report_issues(ifp, modid, info); - return (0); + return 0; } errno_t @@ -2754,10 +2937,11 @@ ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp) { ifnet_t odifp = NULL; - if (ifp == NULL) - return (EINVAL); - else if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL) { + return EINVAL; + } else if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } ifnet_lock_exclusive(ifp); odifp = ifp->if_delegated.ifp; @@ -2777,7 +2961,7 @@ ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp) } delegate_check_ifp = delegate_check_ifp->if_delegated.ifp; } - bzero(&ifp->if_delegated, sizeof (ifp->if_delegated)); + bzero(&ifp->if_delegated, sizeof(ifp->if_delegated)); if (delegated_ifp != NULL && ifp != delegated_ifp) { ifp->if_delegated.ifp = delegated_ifp; ifnet_reference(delegated_ifp); @@ -2790,9 +2974,9 @@ ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp) /* * Propogate flags related to ECN from delegated interface */ - ifp->if_eflags &= ~(IFEF_ECN_ENABLE|IFEF_ECN_DISABLE); + ifp->if_eflags &= ~(IFEF_ECN_ENABLE | IFEF_ECN_DISABLE); ifp->if_eflags |= (delegated_ifp->if_eflags & - (IFEF_ECN_ENABLE|IFEF_ECN_DISABLE)); + (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE)); printf("%s: is now delegating %s (type 0x%x, family %u, " "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname, @@ -2817,27 +3001,29 @@ done: /* Release the io ref count */ ifnet_decr_iorefcnt(ifp); - return (0); + return 0; } errno_t ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp) { - if (ifp == NULL || pdelegated_ifp == NULL) - return (EINVAL); - else if (!ifnet_is_attached(ifp, 1)) - return (ENXIO); + if (ifp == NULL || pdelegated_ifp == NULL) { + return EINVAL; + } else if (!ifnet_is_attached(ifp, 1)) { + return ENXIO; + } ifnet_lock_shared(ifp); - if (ifp->if_delegated.ifp != NULL) + if (ifp->if_delegated.ifp != NULL) { ifnet_reference(ifp->if_delegated.ifp); + } *pdelegated_ifp = ifp->if_delegated.ifp; ifnet_lock_done(ifp); /* Release the io ref count */ ifnet_decr_iorefcnt(ifp); - return (0); + return 0; } errno_t @@ -2849,21 +3035,25 @@ ifnet_get_keepalive_offload_frames(ifnet_t ifp, u_int32_t i; if (frames_array == NULL || used_frames_count == NULL || - frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) - return (EINVAL); + frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) { + return EINVAL; + } /* frame_data_offset should be 32-bit aligned */ if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) != - frame_data_offset) - return (EINVAL); + frame_data_offset) { + return EINVAL; + } *used_frames_count = 0; - if (frames_array_count == 0) - return (0); + if (frames_array_count == 0) { + return 0; + } /* Keep-alive offload not required for CLAT interface */ - if (IS_INTF_CLAT46(ifp)) - return (0); + if (IS_INTF_CLAT46(ifp)) { + return 0; + } for (i = 0; i < frames_array_count; i++) { struct ifnet_keepalive_offload_frame *frame = frames_array + i; @@ -2876,20 +3066,22 @@ ifnet_get_keepalive_offload_frames(ifnet_t ifp, frames_array, frames_array_count, frame_data_offset); /* If there is more room, collect other UDP keep-alive frames */ - if (*used_frames_count < frames_array_count) + if (*used_frames_count < frames_array_count) { udp_fill_keepalive_offload_frames(ifp, frames_array, frames_array_count, frame_data_offset, used_frames_count); + } /* If there is more room, collect other TCP keep-alive frames */ - if (*used_frames_count < frames_array_count) + if (*used_frames_count < frames_array_count) { tcp_fill_keepalive_offload_frames(ifp, frames_array, frames_array_count, frame_data_offset, used_frames_count); + } VERIFY(*used_frames_count <= frames_array_count); - return (0); + return 0; } errno_t @@ -2899,8 +3091,9 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, struct if_link_status *ifsr; errno_t err = 0; - if (ifp == NULL || buffer == NULL || buffer_len == 0) - return (EINVAL); + if (ifp == NULL || buffer == NULL || buffer_len == 0) { + return EINVAL; + } ifnet_lock_shared(ifp); @@ -2910,7 +3103,7 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, */ if (!ifnet_is_attached(ifp, 0)) { ifnet_lock_done(ifp); - return (ENXIO); + return ENXIO; } lck_rw_lock_exclusive(&ifp->if_link_status_lock); @@ -2968,7 +3161,6 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, ifp->if_link_status->ifsr_len = ifsr->ifsr_len; if_cell_sr->valid_bitmask = 0; bcopy(new_cell_sr, if_cell_sr, sizeof(*if_cell_sr)); - } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) { struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr; @@ -3034,7 +3226,7 @@ ifnet_link_status_report(ifnet_t ifp, const void *buffer, done: lck_rw_done(&ifp->if_link_status_lock); ifnet_lock_done(ifp); - return (err); + return err; } /*************************************************************************/ @@ -3044,25 +3236,28 @@ done: errno_t ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable) { - if (interface == NULL) - return (EINVAL); + if (interface == NULL) { + return EINVAL; + } if_set_qosmarking_mode(interface, capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE); - return (0); + return 0; } errno_t ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable) { - if (interface == NULL || capable == NULL) - return (EINVAL); - if (interface->if_eflags & IFEF_QOSMARKING_CAPABLE) + if (interface == NULL || capable == NULL) { + return EINVAL; + } + if (interface->if_eflags & IFEF_QOSMARKING_CAPABLE) { *capable = true; - else + } else { *capable = false; - return (0); + } + return 0; } errno_t @@ -3070,41 +3265,47 @@ ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes) { int64_t bytes; - if (interface == NULL || unsent_bytes == NULL) - return (EINVAL); + if (interface == NULL || unsent_bytes == NULL) { + return EINVAL; + } bytes = *unsent_bytes = 0; - if (!IF_FULLY_ATTACHED(interface)) - return (ENXIO); + if (!IF_FULLY_ATTACHED(interface)) { + return ENXIO; + } bytes = interface->if_sndbyte_unsent; - if (interface->if_eflags & IFEF_TXSTART) + if (interface->if_eflags & IFEF_TXSTART) { bytes += IFCQ_BYTES(&interface->if_snd); + } *unsent_bytes = bytes; - return (0); + return 0; } errno_t ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status) { - if (ifp == NULL || buf_status == NULL) - return (EINVAL); + if (ifp == NULL || buf_status == NULL) { + return EINVAL; + } - bzero(buf_status, sizeof (*buf_status)); + bzero(buf_status, sizeof(*buf_status)); - if (!IF_FULLY_ATTACHED(ifp)) - return (ENXIO); + if (!IF_FULLY_ATTACHED(ifp)) { + return ENXIO; + } - if (ifp->if_eflags & IFEF_TXSTART) + if (ifp->if_eflags & IFEF_TXSTART) { buf_status->buf_interface = IFCQ_BYTES(&ifp->if_snd); + } buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) || (ifp->if_sndbyte_unsent != 0)) ? 1 : 0; - return (0); + return 0; } void @@ -3125,8 +3326,9 @@ ifnet_normalise_unsent_data(void) } if (ifp->if_sndbyte_total > 0 || - IFCQ_BYTES(&ifp->if_snd) > 0) + IFCQ_BYTES(&ifp->if_snd) > 0) { ifp->if_unsent_data_cnt++; + } ifnet_lock_done(ifp); } @@ -3140,16 +3342,17 @@ ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on) error = if_set_low_power(ifp, on); - return (error); + return error; } errno_t ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on) { - if (ifp == NULL || on == NULL) - return (EINVAL); + if (ifp == NULL || on == NULL) { + return EINVAL; + } *on = !!(ifp->if_xflags & IFXF_LOW_POWER); - return (0); + return 0; } diff --git a/bsd/net/kpi_interface.h b/bsd/net/kpi_interface.h index ba4736d59..3d71fdeec 100644 --- a/bsd/net/kpi_interface.h +++ b/bsd/net/kpi_interface.h @@ -26,14 +26,14 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_interface.h - This header defines an API to interact with network interfaces in - the kernel. The network interface KPI may be used to implement - network interfaces or to attach protocols to existing interfaces. + * @header kpi_interface.h + * This header defines an API to interact with network interfaces in + * the kernel. The network interface KPI may be used to implement + * network interfaces or to attach protocols to existing interfaces. */ #ifndef __KPI_INTERFACE__ -#define __KPI_INTERFACE__ +#define __KPI_INTERFACE__ #ifndef XNU_KERNEL_PRIVATE #include @@ -50,15 +50,15 @@ struct if_interface_state; #ifdef XNU_KERNEL_PRIVATE #if CONFIG_EMBEDDED -#define KPI_INTERFACE_EMBEDDED 1 +#define KPI_INTERFACE_EMBEDDED 1 #else -#define KPI_INTERFACE_EMBEDDED 0 +#define KPI_INTERFACE_EMBEDDED 0 #endif #else #if TARGET_OS_EMBEDDED -#define KPI_INTERFACE_EMBEDDED 1 +#define KPI_INTERFACE_EMBEDDED 1 #else -#define KPI_INTERFACE_EMBEDDED 0 +#define KPI_INTERFACE_EMBEDDED 0 #endif #endif @@ -70,194 +70,194 @@ struct kev_msg; struct ifnet_demux_desc; /*! - @enum Interface Families - @abstract Constants defining interface families. - @constant IFNET_FAMILY_ANY Match interface of any family type. - @constant IFNET_FAMILY_LOOPBACK A software loopback interface. - @constant IFNET_FAMILY_ETHERNET An Ethernet interface. - @constant IFNET_FAMILY_SLIP A SLIP interface. - @constant IFNET_FAMILY_TUN A tunnel interface. - @constant IFNET_FAMILY_VLAN A virtual LAN interface. - @constant IFNET_FAMILY_PPP A PPP interface. - @constant IFNET_FAMILY_PVC A PVC interface. - @constant IFNET_FAMILY_DISC A DISC interface. - @constant IFNET_FAMILY_MDECAP A MDECAP interface. - @constant IFNET_FAMILY_GIF A generic tunnel interface. - @constant IFNET_FAMILY_FAITH A FAITH [IPv4/IPv6 translation] interface. - @constant IFNET_FAMILY_STF A 6to4 interface. - @constant IFNET_FAMILY_FIREWIRE An IEEE 1394 [Firewire] interface. - @constant IFNET_FAMILY_BOND A virtual bonded interface. - @constant IFNET_FAMILY_CELLULAR A cellular interface. + * @enum Interface Families + * @abstract Constants defining interface families. + * @constant IFNET_FAMILY_ANY Match interface of any family type. + * @constant IFNET_FAMILY_LOOPBACK A software loopback interface. + * @constant IFNET_FAMILY_ETHERNET An Ethernet interface. + * @constant IFNET_FAMILY_SLIP A SLIP interface. + * @constant IFNET_FAMILY_TUN A tunnel interface. + * @constant IFNET_FAMILY_VLAN A virtual LAN interface. + * @constant IFNET_FAMILY_PPP A PPP interface. + * @constant IFNET_FAMILY_PVC A PVC interface. + * @constant IFNET_FAMILY_DISC A DISC interface. + * @constant IFNET_FAMILY_MDECAP A MDECAP interface. + * @constant IFNET_FAMILY_GIF A generic tunnel interface. + * @constant IFNET_FAMILY_FAITH A FAITH [IPv4/IPv6 translation] interface. + * @constant IFNET_FAMILY_STF A 6to4 interface. + * @constant IFNET_FAMILY_FIREWIRE An IEEE 1394 [Firewire] interface. + * @constant IFNET_FAMILY_BOND A virtual bonded interface. + * @constant IFNET_FAMILY_CELLULAR A cellular interface. */ enum { - IFNET_FAMILY_ANY = 0, - IFNET_FAMILY_LOOPBACK = 1, - IFNET_FAMILY_ETHERNET = 2, - IFNET_FAMILY_SLIP = 3, - IFNET_FAMILY_TUN = 4, - IFNET_FAMILY_VLAN = 5, - IFNET_FAMILY_PPP = 6, - IFNET_FAMILY_PVC = 7, - IFNET_FAMILY_DISC = 8, - IFNET_FAMILY_MDECAP = 9, - IFNET_FAMILY_GIF = 10, - IFNET_FAMILY_FAITH = 11, /* deprecated */ - IFNET_FAMILY_STF = 12, - IFNET_FAMILY_FIREWIRE = 13, - IFNET_FAMILY_BOND = 14, - IFNET_FAMILY_CELLULAR = 15 + IFNET_FAMILY_ANY = 0, + IFNET_FAMILY_LOOPBACK = 1, + IFNET_FAMILY_ETHERNET = 2, + IFNET_FAMILY_SLIP = 3, + IFNET_FAMILY_TUN = 4, + IFNET_FAMILY_VLAN = 5, + IFNET_FAMILY_PPP = 6, + IFNET_FAMILY_PVC = 7, + IFNET_FAMILY_DISC = 8, + IFNET_FAMILY_MDECAP = 9, + IFNET_FAMILY_GIF = 10, + IFNET_FAMILY_FAITH = 11, /* deprecated */ + IFNET_FAMILY_STF = 12, + IFNET_FAMILY_FIREWIRE = 13, + IFNET_FAMILY_BOND = 14, + IFNET_FAMILY_CELLULAR = 15 }; /*! - @typedef ifnet_family_t - @abstract Storage type for the interface family. + * @typedef ifnet_family_t + * @abstract Storage type for the interface family. */ typedef u_int32_t ifnet_family_t; #ifdef KERNEL_PRIVATE /* - @enum Interface Sub-families - @abstract Constants defining interface sub-families (may also - be viewed as the underlying transport). Some families - (e.g. IFNET_FAMILY_ETHERNET) are often too generic. - These sub-families allow us to further refine the - interface family, e.g. Ethernet over Wi-Fi/USB, etc. + * @enum Interface Sub-families + * @abstract Constants defining interface sub-families (may also + * be viewed as the underlying transport). Some families + * (e.g. IFNET_FAMILY_ETHERNET) are often too generic. + * These sub-families allow us to further refine the + * interface family, e.g. Ethernet over Wi-Fi/USB, etc. */ enum { - IFNET_SUBFAMILY_ANY = 0, - IFNET_SUBFAMILY_USB = 1, - IFNET_SUBFAMILY_BLUETOOTH = 2, - IFNET_SUBFAMILY_WIFI = 3, - IFNET_SUBFAMILY_THUNDERBOLT = 4, - IFNET_SUBFAMILY_RESERVED = 5, - IFNET_SUBFAMILY_INTCOPROC = 6, - IFNET_SUBFAMILY_UTUN = 7, - IFNET_SUBFAMILY_IPSEC = 8, + IFNET_SUBFAMILY_ANY = 0, + IFNET_SUBFAMILY_USB = 1, + IFNET_SUBFAMILY_BLUETOOTH = 2, + IFNET_SUBFAMILY_WIFI = 3, + IFNET_SUBFAMILY_THUNDERBOLT = 4, + IFNET_SUBFAMILY_RESERVED = 5, + IFNET_SUBFAMILY_INTCOPROC = 6, + IFNET_SUBFAMILY_UTUN = 7, + IFNET_SUBFAMILY_IPSEC = 8, }; /* - @typedef ifnet_sub_family_t - @abstract Storage type for the interface sub-family. + * @typedef ifnet_sub_family_t + * @abstract Storage type for the interface sub-family. */ typedef u_int32_t ifnet_subfamily_t; #endif /* KERNEL_PRIVATE */ #ifndef BPF_TAP_MODE_T -#define BPF_TAP_MODE_T +#define BPF_TAP_MODE_T /*! - @enum BPF tap mode - @abstract Constants defining interface families. - @constant BPF_MODE_DISABLED Disable bpf. - @constant BPF_MODE_INPUT Enable input only. - @constant BPF_MODE_OUTPUT Enable output only. - @constant BPF_MODE_INPUT_OUTPUT Enable input and output. -*/ + * @enum BPF tap mode + * @abstract Constants defining interface families. + * @constant BPF_MODE_DISABLED Disable bpf. + * @constant BPF_MODE_INPUT Enable input only. + * @constant BPF_MODE_OUTPUT Enable output only. + * @constant BPF_MODE_INPUT_OUTPUT Enable input and output. + */ enum { - BPF_MODE_DISABLED = 0, - BPF_MODE_INPUT = 1, - BPF_MODE_OUTPUT = 2, - BPF_MODE_INPUT_OUTPUT = 3 + BPF_MODE_DISABLED = 0, + BPF_MODE_INPUT = 1, + BPF_MODE_OUTPUT = 2, + BPF_MODE_INPUT_OUTPUT = 3 }; /*! - @typedef bpf_tap_mode - @abstract Mode for tapping. BPF_MODE_DISABLED/BPF_MODE_INPUT_OUTPUT etc. -*/ + * @typedef bpf_tap_mode + * @abstract Mode for tapping. BPF_MODE_DISABLED/BPF_MODE_INPUT_OUTPUT etc. + */ typedef u_int32_t bpf_tap_mode; #endif /* !BPF_TAP_MODE_T */ /*! - @typedef protocol_family_t - @abstract Storage type for the protocol family. -*/ + * @typedef protocol_family_t + * @abstract Storage type for the protocol family. + */ typedef u_int32_t protocol_family_t; /*! - @enum Interface Abilities - @abstract Constants defining interface offload support. - @constant IFNET_CSUM_IP Hardware will calculate IPv4 checksums. - @constant IFNET_CSUM_TCP Hardware will calculate TCP checksums. - @constant IFNET_CSUM_UDP Hardware will calculate UDP checksums. - @constant IFNET_CSUM_FRAGMENT Hardware will checksum IP fragments. - @constant IFNET_IP_FRAGMENT Hardware will fragment IP packets. - @constant IFNET_CSUM_TCPIPV6 Hardware will calculate TCP IPv6 checksums. - @constant IFNET_CSUM_UDPIPV6 Hardware will calculate UDP IPv6 checksums. - @constant IFNET_IPV6_FRAGMENT Hardware will fragment IPv6 packets. - @constant IFNET_VLAN_TAGGING Hardware will generate VLAN headers. - @constant IFNET_VLAN_MTU Hardware supports VLAN MTU. - @constant IFNET_MULTIPAGES Driver is capable of handling packets - coming down from the network stack that reside in virtually, - but not in physically contiguous span of the external mbuf - clusters. In this case, the data area of a packet in the - external mbuf cluster might cross one or more physical - pages that are disjoint, depending on the interface MTU - and the packet size. Such a use of larger than system page - size clusters by the network stack is done for better system - efficiency. Drivers that utilize the IOMbufNaturalMemoryCursor - with the getPhysicalSegmentsWithCoalesce interfaces and - enumerate the list of vectors should set this flag for - possible gain in performance during bulk data transfer. - @constant IFNET_TSO_IPV4 Hardware supports IPv4 TCP Segment Offloading. - If the Interface driver sets this flag, TCP will send larger frames (up to 64KB) as one - frame to the adapter which will perform the final packetization. The maximum TSO segment - supported by the interface can be set with "ifnet_set_tso_mtu". To retreive the real MTU - for the TCP connection the function "mbuf_get_tso_requested" is used by the driver. Note - that if TSO is active, all the packets will be flagged for TSO, not just large packets. - @constant IFNET_TSO_IPV6 Hardware supports IPv6 TCP Segment Offloading. - If the Interface driver sets this flag, TCP IPv6 will send larger frames (up to 64KB) as one - frame to the adapter which will perform the final packetization. The maximum TSO segment - supported by the interface can be set with "ifnet_set_tso_mtu". To retreive the real MTU - for the TCP IPv6 connection the function "mbuf_get_tso_requested" is used by the driver. - Note that if TSO is active, all the packets will be flagged for TSO, not just large packets. - @constant IFNET_TX_STATUS Driver supports returning a per packet - transmission status (pass, fail or other errors) of whether - the packet was successfully transmitted on the link, or the - transmission was aborted, or transmission failed. - -*/ + * @enum Interface Abilities + * @abstract Constants defining interface offload support. + * @constant IFNET_CSUM_IP Hardware will calculate IPv4 checksums. + * @constant IFNET_CSUM_TCP Hardware will calculate TCP checksums. + * @constant IFNET_CSUM_UDP Hardware will calculate UDP checksums. + * @constant IFNET_CSUM_FRAGMENT Hardware will checksum IP fragments. + * @constant IFNET_IP_FRAGMENT Hardware will fragment IP packets. + * @constant IFNET_CSUM_TCPIPV6 Hardware will calculate TCP IPv6 checksums. + * @constant IFNET_CSUM_UDPIPV6 Hardware will calculate UDP IPv6 checksums. + * @constant IFNET_IPV6_FRAGMENT Hardware will fragment IPv6 packets. + * @constant IFNET_VLAN_TAGGING Hardware will generate VLAN headers. + * @constant IFNET_VLAN_MTU Hardware supports VLAN MTU. + * @constant IFNET_MULTIPAGES Driver is capable of handling packets + * coming down from the network stack that reside in virtually, + * but not in physically contiguous span of the external mbuf + * clusters. In this case, the data area of a packet in the + * external mbuf cluster might cross one or more physical + * pages that are disjoint, depending on the interface MTU + * and the packet size. Such a use of larger than system page + * size clusters by the network stack is done for better system + * efficiency. Drivers that utilize the IOMbufNaturalMemoryCursor + * with the getPhysicalSegmentsWithCoalesce interfaces and + * enumerate the list of vectors should set this flag for + * possible gain in performance during bulk data transfer. + * @constant IFNET_TSO_IPV4 Hardware supports IPv4 TCP Segment Offloading. + * If the Interface driver sets this flag, TCP will send larger frames (up to 64KB) as one + * frame to the adapter which will perform the final packetization. The maximum TSO segment + * supported by the interface can be set with "ifnet_set_tso_mtu". To retreive the real MTU + * for the TCP connection the function "mbuf_get_tso_requested" is used by the driver. Note + * that if TSO is active, all the packets will be flagged for TSO, not just large packets. + * @constant IFNET_TSO_IPV6 Hardware supports IPv6 TCP Segment Offloading. + * If the Interface driver sets this flag, TCP IPv6 will send larger frames (up to 64KB) as one + * frame to the adapter which will perform the final packetization. The maximum TSO segment + * supported by the interface can be set with "ifnet_set_tso_mtu". To retreive the real MTU + * for the TCP IPv6 connection the function "mbuf_get_tso_requested" is used by the driver. + * Note that if TSO is active, all the packets will be flagged for TSO, not just large packets. + * @constant IFNET_TX_STATUS Driver supports returning a per packet + * transmission status (pass, fail or other errors) of whether + * the packet was successfully transmitted on the link, or the + * transmission was aborted, or transmission failed. + * + */ enum { - IFNET_CSUM_IP = 0x00000001, - IFNET_CSUM_TCP = 0x00000002, - IFNET_CSUM_UDP = 0x00000004, - IFNET_CSUM_FRAGMENT = 0x00000008, - IFNET_IP_FRAGMENT = 0x00000010, - IFNET_CSUM_TCPIPV6 = 0x00000020, - IFNET_CSUM_UDPIPV6 = 0x00000040, - IFNET_IPV6_FRAGMENT = 0x00000080, + IFNET_CSUM_IP = 0x00000001, + IFNET_CSUM_TCP = 0x00000002, + IFNET_CSUM_UDP = 0x00000004, + IFNET_CSUM_FRAGMENT = 0x00000008, + IFNET_IP_FRAGMENT = 0x00000010, + IFNET_CSUM_TCPIPV6 = 0x00000020, + IFNET_CSUM_UDPIPV6 = 0x00000040, + IFNET_IPV6_FRAGMENT = 0x00000080, #ifdef KERNEL_PRIVATE - IFNET_CSUM_PARTIAL = 0x00001000, - IFNET_CSUM_SUM16 = IFNET_CSUM_PARTIAL, - IFNET_CSUM_ZERO_INVERT = 0x00002000, + IFNET_CSUM_PARTIAL = 0x00001000, + IFNET_CSUM_SUM16 = IFNET_CSUM_PARTIAL, + IFNET_CSUM_ZERO_INVERT = 0x00002000, #endif /* KERNEL_PRIVATE */ - IFNET_VLAN_TAGGING = 0x00010000, - IFNET_VLAN_MTU = 0x00020000, - IFNET_MULTIPAGES = 0x00100000, - IFNET_TSO_IPV4 = 0x00200000, - IFNET_TSO_IPV6 = 0x00400000, - IFNET_TX_STATUS = 0x00800000, - IFNET_HW_TIMESTAMP = 0x01000000, - IFNET_SW_TIMESTAMP = 0x02000000 + IFNET_VLAN_TAGGING = 0x00010000, + IFNET_VLAN_MTU = 0x00020000, + IFNET_MULTIPAGES = 0x00100000, + IFNET_TSO_IPV4 = 0x00200000, + IFNET_TSO_IPV6 = 0x00400000, + IFNET_TX_STATUS = 0x00800000, + IFNET_HW_TIMESTAMP = 0x01000000, + IFNET_SW_TIMESTAMP = 0x02000000 }; /*! - @typedef ifnet_offload_t - @abstract Flags indicating the offload support of the interface. -*/ + * @typedef ifnet_offload_t + * @abstract Flags indicating the offload support of the interface. + */ typedef u_int32_t ifnet_offload_t; #ifdef KERNEL_PRIVATE -#define IFNET_OFFLOADF_BITS \ +#define IFNET_OFFLOADF_BITS \ "\020\1CSUM_IP\2CSUM_TCP\3CSUM_UDP\4CSUM_IP_FRAGS\5IP_FRAGMENT" \ - "\6CSUM_TCPIPV6\7CSUM_UDPIPV6\10IPV6_FRAGMENT\15CSUM_PARTIAL" \ - "\16CSUM_ZERO_INVERT\20VLAN_TAGGING\21VLAN_MTU\25MULTIPAGES" \ + "\6CSUM_TCPIPV6\7CSUM_UDPIPV6\10IPV6_FRAGMENT\15CSUM_PARTIAL" \ + "\16CSUM_ZERO_INVERT\20VLAN_TAGGING\21VLAN_MTU\25MULTIPAGES" \ "\26TSO_IPV4\27TSO_IPV6\30TXSTATUS\31HW_TIMESTAMP\32SW_TIMESTAMP" -#define IFNET_CHECKSUMF \ - (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | \ +#define IFNET_CHECKSUMF \ + (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | \ IFNET_CSUM_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | \ IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT) -#define IFNET_TSOF \ +#define IFNET_TSOF \ (IFNET_TSO_IPV4 | IFNET_TSO_IPV6) #endif /* KERNEL_PRIVATE */ @@ -267,953 +267,953 @@ typedef u_int32_t ifnet_offload_t; * These are function pointers you supply to the kernel in the interface. */ /*! - @typedef bpf_packet_func - - @discussion bpf_packet_func The bpf_packet_func is used to intercept - inbound and outbound packets. The tap function will never free - the mbuf. The tap function will only copy the mbuf in to various - bpf file descriptors tapping this interface. - @param interface The interface being sent or received on. - @param data The packet to be transmitted or received. - @result An errno value or zero upon success. + * @typedef bpf_packet_func + * + * @discussion bpf_packet_func The bpf_packet_func is used to intercept + * inbound and outbound packets. The tap function will never free + * the mbuf. The tap function will only copy the mbuf in to various + * bpf file descriptors tapping this interface. + * @param interface The interface being sent or received on. + * @param data The packet to be transmitted or received. + * @result An errno value or zero upon success. */ /* Fast path - do not block or spend excessive amounts of time */ typedef errno_t (*bpf_packet_func)(ifnet_t interface, mbuf_t data); /*! - @typedef ifnet_output_func - - @discussion ifnet_output_func is used to transmit packets. The stack - will pass fully formed packets, including frame header, to the - ifnet_output function for an interface. The driver is - responsible for freeing the mbuf. - @param interface The interface being sent on. - @param data The packet to be sent. + * @typedef ifnet_output_func + * + * @discussion ifnet_output_func is used to transmit packets. The stack + * will pass fully formed packets, including frame header, to the + * ifnet_output function for an interface. The driver is + * responsible for freeing the mbuf. + * @param interface The interface being sent on. + * @param data The packet to be sent. */ /* Fast path - do not block or spend excessive amounts of time */ typedef errno_t (*ifnet_output_func)(ifnet_t interface, mbuf_t data); /*! - @typedef ifnet_ioctl_func - @discussion ifnet_ioctl_func is used to communicate ioctls from the - stack to the driver. - - All undefined ioctls are reserved for future use by Apple. If - you need to communicate with your kext using an ioctl, please - use SIOCSIFKPI and SIOCGIFKPI. - @param interface The interface the ioctl is being sent to. - @param cmd The ioctl command. - @param data A pointer to any data related to the ioctl. + * @typedef ifnet_ioctl_func + * @discussion ifnet_ioctl_func is used to communicate ioctls from the + * stack to the driver. + * + * All undefined ioctls are reserved for future use by Apple. If + * you need to communicate with your kext using an ioctl, please + * use SIOCSIFKPI and SIOCGIFKPI. + * @param interface The interface the ioctl is being sent to. + * @param cmd The ioctl command. + * @param data A pointer to any data related to the ioctl. */ typedef errno_t (*ifnet_ioctl_func)(ifnet_t interface, unsigned long cmd, void *data); /*! - @typedef ifnet_set_bpf_tap - @discussion Deprecated. Specify NULL. Call bpf_tap_in/bpf_tap_out - for all packets. + * @typedef ifnet_set_bpf_tap + * @discussion Deprecated. Specify NULL. Call bpf_tap_in/bpf_tap_out + * for all packets. */ typedef errno_t (*ifnet_set_bpf_tap)(ifnet_t interface, bpf_tap_mode mode, bpf_packet_func callback); /*! - @typedef ifnet_detached_func - @discussion ifnet_detached_func is called an interface is detached - from the list of interfaces. When ifnet_detach is called, it may - not detach the interface immediately if protocols are attached. - ifnet_detached_func is used to notify the interface that it has - been detached from the networking stack. This is the last - function that will be called on an interface. Until this - function returns, you must not unload a kext supplying function - pointers to this interface, even if ifnet_detacah has been - called. Your detach function may be called during your call to - ifnet_detach. - @param interface The interface that has been detached. - event. + * @typedef ifnet_detached_func + * @discussion ifnet_detached_func is called an interface is detached + * from the list of interfaces. When ifnet_detach is called, it may + * not detach the interface immediately if protocols are attached. + * ifnet_detached_func is used to notify the interface that it has + * been detached from the networking stack. This is the last + * function that will be called on an interface. Until this + * function returns, you must not unload a kext supplying function + * pointers to this interface, even if ifnet_detacah has been + * called. Your detach function may be called during your call to + * ifnet_detach. + * @param interface The interface that has been detached. + * event. */ typedef void (*ifnet_detached_func)(ifnet_t interface); /*! - @typedef ifnet_demux_func - @discussion ifnet_demux_func is called for each inbound packet to - determine which protocol family the packet belongs to. This - information is then used by the stack to determine which - protocol to pass the packet to. This function may return - protocol families for protocols that are not attached. If the - protocol family has not been attached to the interface, the - packet will be discarded. - @param interface The interface the packet was received on. - @param packet The mbuf containing the packet. - @param frame_header A pointer to the frame header. - @param protocol_family Upon return, the protocol family matching the - packet should be stored here. - @result - If the result is zero, processing will continue normally. - If the result is EJUSTRETURN, processing will stop but the - packet will not be freed. - If the result is anything else, the processing will stop and - the packet will be freed. + * @typedef ifnet_demux_func + * @discussion ifnet_demux_func is called for each inbound packet to + * determine which protocol family the packet belongs to. This + * information is then used by the stack to determine which + * protocol to pass the packet to. This function may return + * protocol families for protocols that are not attached. If the + * protocol family has not been attached to the interface, the + * packet will be discarded. + * @param interface The interface the packet was received on. + * @param packet The mbuf containing the packet. + * @param frame_header A pointer to the frame header. + * @param protocol_family Upon return, the protocol family matching the + * packet should be stored here. + * @result + * If the result is zero, processing will continue normally. + * If the result is EJUSTRETURN, processing will stop but the + * packet will not be freed. + * If the result is anything else, the processing will stop and + * the packet will be freed. */ typedef errno_t (*ifnet_demux_func)(ifnet_t interface, mbuf_t packet, char *frame_header, protocol_family_t *protocol_family); /*! - @typedef ifnet_event_func - @discussion ifnet_event_func is called when an event occurs on a - specific interface. - @param interface The interface the event occurred on. + * @typedef ifnet_event_func + * @discussion ifnet_event_func is called when an event occurs on a + * specific interface. + * @param interface The interface the event occurred on. */ typedef void (*ifnet_event_func)(ifnet_t interface, const struct kev_msg *msg); /*! - @typedef ifnet_framer_func - @discussion ifnet_framer_func is called for each outbound packet to - give the interface an opportunity to prepend interface specific - headers. - @param interface The interface the packet is being sent on. - @param packet Pointer to the mbuf containing the packet, caller may - set this to a different mbuf upon return. This can happen if the - frameout function needs to prepend another mbuf to the chain to - have enough space for the header. - @param dest The higher layer protocol destination (i.e. IP address). - @param dest_linkaddr The link layer address as determined by the - protocol's pre-output function. - @param frame_type The frame type as determined by the protocol's - pre-output function. - @discussion prepend_len The length of prepended bytes to the mbuf. - (ONLY used if KPI_INTERFACE_EMBEDDED is defined to 1) - @discussion postpend_len The length of the postpended bytes to the mbuf. - (ONLY used if KPI_INTERFACE_EMBEDDED is defined to 1) - @result - If the result is zero, processing will continue normally. - If the result is EJUSTRETURN, processing will stop but the - packet will not be freed. - If the result is anything else, the processing will stop and - the packet will be freed. + * @typedef ifnet_framer_func + * @discussion ifnet_framer_func is called for each outbound packet to + * give the interface an opportunity to prepend interface specific + * headers. + * @param interface The interface the packet is being sent on. + * @param packet Pointer to the mbuf containing the packet, caller may + * set this to a different mbuf upon return. This can happen if the + * frameout function needs to prepend another mbuf to the chain to + * have enough space for the header. + * @param dest The higher layer protocol destination (i.e. IP address). + * @param dest_linkaddr The link layer address as determined by the + * protocol's pre-output function. + * @param frame_type The frame type as determined by the protocol's + * pre-output function. + * @discussion prepend_len The length of prepended bytes to the mbuf. + * (ONLY used if KPI_INTERFACE_EMBEDDED is defined to 1) + * @discussion postpend_len The length of the postpended bytes to the mbuf. + * (ONLY used if KPI_INTERFACE_EMBEDDED is defined to 1) + * @result + * If the result is zero, processing will continue normally. + * If the result is EJUSTRETURN, processing will stop but the + * packet will not be freed. + * If the result is anything else, the processing will stop and + * the packet will be freed. */ typedef errno_t (*ifnet_framer_func)(ifnet_t interface, mbuf_t *packet, - const struct sockaddr *dest, const char *dest_linkaddr, - const char *frame_type + const struct sockaddr *dest, const char *dest_linkaddr, + const char *frame_type #if KPI_INTERFACE_EMBEDDED - , u_int32_t *prepend_len, u_int32_t *postpend_len + , u_int32_t *prepend_len, u_int32_t *postpend_len #endif /* KPI_INTERFACE_EMBEDDED */ - ); + ); #ifdef KERNEL_PRIVATE typedef errno_t (*ifnet_framer_extended_func)(ifnet_t interface, mbuf_t *packet, - const struct sockaddr *dest, const char *dest_linkaddr, - const char *frame_type, u_int32_t *prepend_len, - u_int32_t *postpend_len); + const struct sockaddr *dest, const char *dest_linkaddr, + const char *frame_type, u_int32_t *prepend_len, + u_int32_t *postpend_len); #endif /* KERNEL_PRIVATE */ /*! - @typedef ifnet_add_proto_func - @discussion if_add_proto_func is called by the stack when a protocol - is attached to an interface. This gives the interface an - opportunity to get a list of protocol description structures - for demuxing packets to this protocol (demux descriptors). - @param interface The interface the protocol will be attached to. - @param protocol_family The family of the protocol being attached. - @param demux_array An array of demux descriptors that describe - the interface specific ways of identifying packets belonging - to this protocol family. - @param demux_count The number of demux descriptors in the array. - @result - If the result is zero, processing will continue normally. - If the result is anything else, the add protocol will be - aborted. + * @typedef ifnet_add_proto_func + * @discussion if_add_proto_func is called by the stack when a protocol + * is attached to an interface. This gives the interface an + * opportunity to get a list of protocol description structures + * for demuxing packets to this protocol (demux descriptors). + * @param interface The interface the protocol will be attached to. + * @param protocol_family The family of the protocol being attached. + * @param demux_array An array of demux descriptors that describe + * the interface specific ways of identifying packets belonging + * to this protocol family. + * @param demux_count The number of demux descriptors in the array. + * @result + * If the result is zero, processing will continue normally. + * If the result is anything else, the add protocol will be + * aborted. */ typedef errno_t (*ifnet_add_proto_func)(ifnet_t interface, protocol_family_t protocol_family, const struct ifnet_demux_desc *demux_array, u_int32_t demux_count); /*! - @typedef if_del_proto_func - @discussion if_del_proto_func is called by the stack when a protocol - is being detached from an interface. This gives the interface an - opportunity to free any storage related to this specific - protocol being attached to this interface. - @param interface The interface the protocol will be detached from. - @param protocol_family The family of the protocol being detached. - @result - If the result is zero, processing will continue normally. - If the result is anything else, the detach will continue - and the error will be returned to the caller. + * @typedef if_del_proto_func + * @discussion if_del_proto_func is called by the stack when a protocol + * is being detached from an interface. This gives the interface an + * opportunity to free any storage related to this specific + * protocol being attached to this interface. + * @param interface The interface the protocol will be detached from. + * @param protocol_family The family of the protocol being detached. + * @result + * If the result is zero, processing will continue normally. + * If the result is anything else, the detach will continue + * and the error will be returned to the caller. */ typedef errno_t (*ifnet_del_proto_func)(ifnet_t interface, protocol_family_t protocol_family); /*! - @typedef ifnet_check_multi - @discussion ifnet_check_multi is called for each multicast address - added to an interface. This gives the interface an opportunity - to reject invalid multicast addresses before they are attached - to the interface. - - To prevent an address from being added to your multicast list, - return EADDRNOTAVAIL. If you don't know how to parse/translate - the address, return EOPNOTSUPP. - @param interface The interface. - @param mcast The multicast address. - @result - Zero upon success, EADDRNOTAVAIL on invalid multicast, - EOPNOTSUPP for addresses the interface does not understand. + * @typedef ifnet_check_multi + * @discussion ifnet_check_multi is called for each multicast address + * added to an interface. This gives the interface an opportunity + * to reject invalid multicast addresses before they are attached + * to the interface. + * + * To prevent an address from being added to your multicast list, + * return EADDRNOTAVAIL. If you don't know how to parse/translate + * the address, return EOPNOTSUPP. + * @param interface The interface. + * @param mcast The multicast address. + * @result + * Zero upon success, EADDRNOTAVAIL on invalid multicast, + * EOPNOTSUPP for addresses the interface does not understand. */ typedef errno_t (*ifnet_check_multi)(ifnet_t interface, const struct sockaddr *mcast); /*! - @typedef proto_media_input - @discussion proto_media_input is called for all inbound packets for - a specific protocol on a specific interface. This function is - registered on an interface using ifnet_attach_protocol. - @param ifp The interface the packet was received on. - @param protocol The protocol of the packet received. - @param packet The packet being input. - @param header The frame header. - @result - If the result is zero, the caller will assume the packet was - passed to the protocol. - If the result is non-zero and not EJUSTRETURN, the caller will - free the packet. + * @typedef proto_media_input + * @discussion proto_media_input is called for all inbound packets for + * a specific protocol on a specific interface. This function is + * registered on an interface using ifnet_attach_protocol. + * @param ifp The interface the packet was received on. + * @param protocol The protocol of the packet received. + * @param packet The packet being input. + * @param header The frame header. + * @result + * If the result is zero, the caller will assume the packet was + * passed to the protocol. + * If the result is non-zero and not EJUSTRETURN, the caller will + * free the packet. */ typedef errno_t (*proto_media_input)(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet, char *header); /*! - @typedef proto_media_input_v2 - @discussion proto_media_input_v2 is called for all inbound packets for - a specific protocol on a specific interface. This function is - registered on an interface using ifnet_attach_protocolv2. - proto_media_input_v2 differs from proto_media_input in that it - will be called for a list of packets instead of once for each - individual packet. The frame header can be retrieved using - mbuf_pkthdr_header. - @param ifp The interface the packet was received on. - @param protocol The protocol of the packet received. - @param packet The packet being input. - @result - If the result is zero, the caller will assume the packets were - passed to the protocol. - If the result is non-zero and not EJUSTRETURN, the caller will - free the packets. + * @typedef proto_media_input_v2 + * @discussion proto_media_input_v2 is called for all inbound packets for + * a specific protocol on a specific interface. This function is + * registered on an interface using ifnet_attach_protocolv2. + * proto_media_input_v2 differs from proto_media_input in that it + * will be called for a list of packets instead of once for each + * individual packet. The frame header can be retrieved using + * mbuf_pkthdr_header. + * @param ifp The interface the packet was received on. + * @param protocol The protocol of the packet received. + * @param packet The packet being input. + * @result + * If the result is zero, the caller will assume the packets were + * passed to the protocol. + * If the result is non-zero and not EJUSTRETURN, the caller will + * free the packets. */ typedef errno_t (*proto_media_input_v2)(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet); /*! - @typedef proto_media_preout - @discussion proto_media_preout is called just before the packet - is transmitted. This gives the proto_media_preout function an - opportunity to specify the media specific frame type and - destination. - @param ifp The interface the packet will be sent on. - @param protocol The protocol of the packet being sent - (PF_INET/etc...). - @param packet The packet being sent. - @param dest The protocol level destination address. - @param route A pointer to the routing structure for the packet. - @param frame_type The media specific frame type. - @param link_layer_dest The media specific destination. - @result - If the result is zero, processing will continue normally. If the - result is non-zero, processing will stop. If the result is - non-zero and not EJUSTRETURN, the packet will be freed by the - caller. + * @typedef proto_media_preout + * @discussion proto_media_preout is called just before the packet + * is transmitted. This gives the proto_media_preout function an + * opportunity to specify the media specific frame type and + * destination. + * @param ifp The interface the packet will be sent on. + * @param protocol The protocol of the packet being sent + * (PF_INET/etc...). + * @param packet The packet being sent. + * @param dest The protocol level destination address. + * @param route A pointer to the routing structure for the packet. + * @param frame_type The media specific frame type. + * @param link_layer_dest The media specific destination. + * @result + * If the result is zero, processing will continue normally. If the + * result is non-zero, processing will stop. If the result is + * non-zero and not EJUSTRETURN, the packet will be freed by the + * caller. */ typedef errno_t (*proto_media_preout)(ifnet_t ifp, protocol_family_t protocol, mbuf_t *packet, const struct sockaddr *dest, void *route, char *frame_type, char *link_layer_dest); /*! - @typedef proto_media_event - @discussion proto_media_event is called to notify this layer of - interface specific events. - @param ifp The interface. - @param protocol The protocol family. - @param event The event. + * @typedef proto_media_event + * @discussion proto_media_event is called to notify this layer of + * interface specific events. + * @param ifp The interface. + * @param protocol The protocol family. + * @param event The event. */ typedef void (*proto_media_event)(ifnet_t ifp, protocol_family_t protocol, const struct kev_msg *event); /*! - @typedef proto_media_ioctl - @discussion proto_media_event allows this layer to handle ioctls. - When an ioctl is handled, it is passed to the interface filters, - protocol filters, protocol, and interface. If you do not support - this ioctl, return EOPNOTSUPP. If you successfully handle the - ioctl, return zero. If you return any error other than - EOPNOTSUPP, other parts of the stack may not get an opportunity - to process the ioctl. If you return EJUSTRETURN, processing will - stop and a result of zero will be returned to the caller. - - All undefined ioctls are reserved for future use by Apple. If - you need to communicate with your kext using an ioctl, please - use SIOCSIFKPI and SIOCGIFKPI. - @param ifp The interface. - @param protocol The protocol family. - @param command The ioctl command. - @param argument The argument to the ioctl. - @result - See the discussion. + * @typedef proto_media_ioctl + * @discussion proto_media_event allows this layer to handle ioctls. + * When an ioctl is handled, it is passed to the interface filters, + * protocol filters, protocol, and interface. If you do not support + * this ioctl, return EOPNOTSUPP. If you successfully handle the + * ioctl, return zero. If you return any error other than + * EOPNOTSUPP, other parts of the stack may not get an opportunity + * to process the ioctl. If you return EJUSTRETURN, processing will + * stop and a result of zero will be returned to the caller. + * + * All undefined ioctls are reserved for future use by Apple. If + * you need to communicate with your kext using an ioctl, please + * use SIOCSIFKPI and SIOCGIFKPI. + * @param ifp The interface. + * @param protocol The protocol family. + * @param command The ioctl command. + * @param argument The argument to the ioctl. + * @result + * See the discussion. */ typedef errno_t (*proto_media_ioctl)(ifnet_t ifp, protocol_family_t protocol, unsigned long command, void *argument); /*! - @typedef proto_media_detached - @discussion proto_media_detached notifies you that your protocol - has been detached. - @param ifp The interface. - @param protocol The protocol family. - @result - See the discussion. + * @typedef proto_media_detached + * @discussion proto_media_detached notifies you that your protocol + * has been detached. + * @param ifp The interface. + * @param protocol The protocol family. + * @result + * See the discussion. */ typedef errno_t (*proto_media_detached)(ifnet_t ifp, protocol_family_t protocol); /*! - @typedef proto_media_resolve_multi - @discussion proto_media_resolve_multi is called to resolve a - protocol layer mulitcast address to a link layer multicast - address. - @param ifp The interface. - @param proto_addr The protocol address. - @param out_ll A sockaddr_dl to copy the link layer multicast in to. - @param ll_len The length of data allocated for out_ll. - @result Return zero on success or an errno error value on failure. + * @typedef proto_media_resolve_multi + * @discussion proto_media_resolve_multi is called to resolve a + * protocol layer mulitcast address to a link layer multicast + * address. + * @param ifp The interface. + * @param proto_addr The protocol address. + * @param out_ll A sockaddr_dl to copy the link layer multicast in to. + * @param ll_len The length of data allocated for out_ll. + * @result Return zero on success or an errno error value on failure. */ typedef errno_t (*proto_media_resolve_multi)(ifnet_t ifp, const struct sockaddr *proto_addr, struct sockaddr_dl *out_ll, size_t ll_len); /*! - @typedef proto_media_send_arp - @discussion proto_media_send_arp is called by the stack to generate - an ARP packet. This field is currently only used with IP. This - function should inspect the parameters and transmit an arp - packet using the information passed in. - @param ifp The interface the arp packet should be sent on. - @param arpop The arp operation (usually ARPOP_REQUEST or - ARPOP_REPLY). - @param sender_hw The value to use for the sender hardware - address field. If this is NULL, use the hardware address - of the interface. - @param sender_proto The value to use for the sender protocol - address field. This will not be NULL. - @param target_hw The value to use for the target hardware address. - If this is NULL, the target hardware address in the ARP packet - should be NULL and the link-layer destination for the back - should be a broadcast. If this is not NULL, this value should be - used for both the link-layer destination and the target hardware - address. - @param target_proto The target protocol address. This will not be - NULL. - @result Return zero on success or an errno error value on failure. + * @typedef proto_media_send_arp + * @discussion proto_media_send_arp is called by the stack to generate + * an ARP packet. This field is currently only used with IP. This + * function should inspect the parameters and transmit an arp + * packet using the information passed in. + * @param ifp The interface the arp packet should be sent on. + * @param arpop The arp operation (usually ARPOP_REQUEST or + * ARPOP_REPLY). + * @param sender_hw The value to use for the sender hardware + * address field. If this is NULL, use the hardware address + * of the interface. + * @param sender_proto The value to use for the sender protocol + * address field. This will not be NULL. + * @param target_hw The value to use for the target hardware address. + * If this is NULL, the target hardware address in the ARP packet + * should be NULL and the link-layer destination for the back + * should be a broadcast. If this is not NULL, this value should be + * used for both the link-layer destination and the target hardware + * address. + * @param target_proto The target protocol address. This will not be + * NULL. + * @result Return zero on success or an errno error value on failure. */ typedef errno_t (*proto_media_send_arp)(ifnet_t ifp, u_short arpop, const struct sockaddr_dl *sender_hw, const struct sockaddr *sender_proto, const struct sockaddr_dl *target_hw, const struct sockaddr *target_proto); /*! - @struct ifnet_stat_increment_param - @discussion This structure is used increment the counters on a - network interface. - @field packets_in The number of packets received. - @field bytes_in The number of bytes received. - @field errors_in The number of receive errors. - @field packets_out The number of packets transmitted. - @field bytes_out The number of bytes transmitted. - @field errors_out The number of transmission errors. - @field collisions The number of collisions seen by this interface. - @field dropped The number of packets dropped. -*/ + * @struct ifnet_stat_increment_param + * @discussion This structure is used increment the counters on a + * network interface. + * @field packets_in The number of packets received. + * @field bytes_in The number of bytes received. + * @field errors_in The number of receive errors. + * @field packets_out The number of packets transmitted. + * @field bytes_out The number of bytes transmitted. + * @field errors_out The number of transmission errors. + * @field collisions The number of collisions seen by this interface. + * @field dropped The number of packets dropped. + */ struct ifnet_stat_increment_param { - u_int32_t packets_in; - u_int32_t bytes_in; - u_int32_t errors_in; + u_int32_t packets_in; + u_int32_t bytes_in; + u_int32_t errors_in; - u_int32_t packets_out; - u_int32_t bytes_out; - u_int32_t errors_out; + u_int32_t packets_out; + u_int32_t bytes_out; + u_int32_t errors_out; - u_int32_t collisions; - u_int32_t dropped; + u_int32_t collisions; + u_int32_t dropped; }; /*! - @struct ifnet_init_params - @discussion This structure is used to define various properties of - the interface when calling ifnet_allocate. A copy of these - values will be stored in the ifnet and cannot be modified - while the interface is attached. - @field uniqueid An identifier unique to this instance of the - interface. - @field uniqueid_len The length, in bytes, of the uniqueid. - @field name The interface name (i.e. en). - @field unit The interface unit number (en0's unit number is 0). - @field family The interface family. - @field type The interface type (see sys/if_types.h). Must be less - than 256. For new types, use IFT_OTHER. - @field output The output function for the interface. Every packet the - stack attempts to send through this interface will go out - through this function. - @field demux The function used to determine the protocol family of an - incoming packet. - @field add_proto The function used to attach a protocol to this - interface. - @field del_proto The function used to remove a protocol from this - interface. - @field framer The function used to frame outbound packets, may be NULL. - @field softc Driver specific storage. This value can be retrieved from - the ifnet using the ifnet_softc function. - @field ioctl The function used to handle ioctls. - @field set_bpf_tap The function used to set the bpf_tap function. - @field detach The function called to let the driver know the interface - has been detached. - @field event The function to notify the interface of various interface - specific kernel events. - @field broadcast_addr The link-layer broadcast address for this - interface. - @field broadcast_len The length of the link-layer broadcast address. -*/ + * @struct ifnet_init_params + * @discussion This structure is used to define various properties of + * the interface when calling ifnet_allocate. A copy of these + * values will be stored in the ifnet and cannot be modified + * while the interface is attached. + * @field uniqueid An identifier unique to this instance of the + * interface. + * @field uniqueid_len The length, in bytes, of the uniqueid. + * @field name The interface name (i.e. en). + * @field unit The interface unit number (en0's unit number is 0). + * @field family The interface family. + * @field type The interface type (see sys/if_types.h). Must be less + * than 256. For new types, use IFT_OTHER. + * @field output The output function for the interface. Every packet the + * stack attempts to send through this interface will go out + * through this function. + * @field demux The function used to determine the protocol family of an + * incoming packet. + * @field add_proto The function used to attach a protocol to this + * interface. + * @field del_proto The function used to remove a protocol from this + * interface. + * @field framer The function used to frame outbound packets, may be NULL. + * @field softc Driver specific storage. This value can be retrieved from + * the ifnet using the ifnet_softc function. + * @field ioctl The function used to handle ioctls. + * @field set_bpf_tap The function used to set the bpf_tap function. + * @field detach The function called to let the driver know the interface + * has been detached. + * @field event The function to notify the interface of various interface + * specific kernel events. + * @field broadcast_addr The link-layer broadcast address for this + * interface. + * @field broadcast_len The length of the link-layer broadcast address. + */ struct ifnet_init_params { /* used to match recycled interface */ - const void *uniqueid; /* optional */ - u_int32_t uniqueid_len; /* optional */ + const void *uniqueid; /* optional */ + u_int32_t uniqueid_len; /* optional */ /* used to fill out initial values for interface */ - const char *name; /* required */ - u_int32_t unit; /* required */ - ifnet_family_t family; /* required */ - u_int32_t type; /* required */ - ifnet_output_func output; /* required */ - ifnet_demux_func demux; /* required */ - ifnet_add_proto_func add_proto; /* required */ - ifnet_del_proto_func del_proto; /* required */ - ifnet_check_multi check_multi; /* required for non point-to-point interfaces */ - ifnet_framer_func framer; /* optional */ - void *softc; /* optional */ - ifnet_ioctl_func ioctl; /* optional */ - ifnet_set_bpf_tap set_bpf_tap; /* deprecated */ - ifnet_detached_func detach; /* optional */ - ifnet_event_func event; /* optional */ - const void *broadcast_addr; /* required for non point-to-point interfaces */ - u_int32_t broadcast_len; /* required for non point-to-point interfaces */ + const char *name; /* required */ + u_int32_t unit; /* required */ + ifnet_family_t family; /* required */ + u_int32_t type; /* required */ + ifnet_output_func output; /* required */ + ifnet_demux_func demux; /* required */ + ifnet_add_proto_func add_proto; /* required */ + ifnet_del_proto_func del_proto; /* required */ + ifnet_check_multi check_multi; /* required for non point-to-point interfaces */ + ifnet_framer_func framer; /* optional */ + void *softc; /* optional */ + ifnet_ioctl_func ioctl; /* optional */ + ifnet_set_bpf_tap set_bpf_tap; /* deprecated */ + ifnet_detached_func detach; /* optional */ + ifnet_event_func event; /* optional */ + const void *broadcast_addr; /* required for non point-to-point interfaces */ + u_int32_t broadcast_len; /* required for non point-to-point interfaces */ }; #ifdef KERNEL_PRIVATE /* Valid values for version */ -#define IFNET_INIT_VERSION_2 2 -#define IFNET_INIT_CURRENT_VERSION IFNET_INIT_VERSION_2 +#define IFNET_INIT_VERSION_2 2 +#define IFNET_INIT_CURRENT_VERSION IFNET_INIT_VERSION_2 /* Valid values for flags */ -#define IFNET_INIT_LEGACY 0x1 /* legacy network interface model */ -#define IFNET_INIT_INPUT_POLL 0x2 /* opportunistic input polling model */ -#define IFNET_INIT_NX_NOAUTO 0x4 /* do not auto config nexus */ -#define IFNET_INIT_ALLOC_KPI 0x8 /* allocated via the ifnet_alloc() KPI */ +#define IFNET_INIT_LEGACY 0x1 /* legacy network interface model */ +#define IFNET_INIT_INPUT_POLL 0x2 /* opportunistic input polling model */ +#define IFNET_INIT_NX_NOAUTO 0x4 /* do not auto config nexus */ +#define IFNET_INIT_ALLOC_KPI 0x8 /* allocated via the ifnet_alloc() KPI */ /* - @typedef ifnet_pre_enqueue_func - @discussion ifnet_pre_enqueue_func is called for each outgoing packet - for the interface. The driver may perform last-minute changes - on the (fully formed) packet, but it is responsible for calling - ifnet_enqueue() to enqueue the packet upon completion. - @param interface The interface being sent on. - @param data The packet to be sent. + * @typedef ifnet_pre_enqueue_func + * @discussion ifnet_pre_enqueue_func is called for each outgoing packet + * for the interface. The driver may perform last-minute changes + * on the (fully formed) packet, but it is responsible for calling + * ifnet_enqueue() to enqueue the packet upon completion. + * @param interface The interface being sent on. + * @param data The packet to be sent. */ typedef errno_t (*ifnet_pre_enqueue_func)(ifnet_t interface, mbuf_t data); /* - @typedef ifnet_start_func - @discussion ifnet_start_func is used to indicate to the driver that - one or more packets may be dequeued by calling ifnet_dequeue() - or ifnet_dequeue_multi() or ifnet_dequeue_multi_bytes(). - This routine gets invoked when ifnet_start() is called; - the ifnet_start_func callback will be executed within the - context of a dedicated kernel thread, hence it is - guaranteed to be single threaded. The driver must employ - additional serializations if this callback routine is - to be called directly from another context, in order to - prevent race condition related issues (e.g. out-of-order - packets.) The dequeued packets will be fully formed - packets (including frame headers). The packets must be - freed by the driver. - @param interface The interface being sent on. + * @typedef ifnet_start_func + * @discussion ifnet_start_func is used to indicate to the driver that + * one or more packets may be dequeued by calling ifnet_dequeue() + * or ifnet_dequeue_multi() or ifnet_dequeue_multi_bytes(). + * This routine gets invoked when ifnet_start() is called; + * the ifnet_start_func callback will be executed within the + * context of a dedicated kernel thread, hence it is + * guaranteed to be single threaded. The driver must employ + * additional serializations if this callback routine is + * to be called directly from another context, in order to + * prevent race condition related issues (e.g. out-of-order + * packets.) The dequeued packets will be fully formed + * packets (including frame headers). The packets must be + * freed by the driver. + * @param interface The interface being sent on. */ typedef void (*ifnet_start_func)(ifnet_t interface); /* - @typedef ifnet_input_poll_func - @discussion ifnet_input_poll_func is called by the network stack to - retrieve one or more packets from the driver which implements - the new driver input model. - @param interface The interface to retrieve the packets from. - @param flags For future use. - @param max_count The maximum number of packets to be dequeued. - @param first_packet Pointer to the first packet being dequeued. - @param last_packet Pointer to the last packet being dequeued. - @param cnt Pointer to a storage for the number of packets dequeued. - @param len Pointer to a storage for the total length (in bytes) - of the dequeued packets. + * @typedef ifnet_input_poll_func + * @discussion ifnet_input_poll_func is called by the network stack to + * retrieve one or more packets from the driver which implements + * the new driver input model. + * @param interface The interface to retrieve the packets from. + * @param flags For future use. + * @param max_count The maximum number of packets to be dequeued. + * @param first_packet Pointer to the first packet being dequeued. + * @param last_packet Pointer to the last packet being dequeued. + * @param cnt Pointer to a storage for the number of packets dequeued. + * @param len Pointer to a storage for the total length (in bytes) + * of the dequeued packets. */ typedef void (*ifnet_input_poll_func)(ifnet_t interface, u_int32_t flags, u_int32_t max_count, mbuf_t *first_packet, mbuf_t *last_packet, u_int32_t *cnt, u_int32_t *len); /* - @enum Interface control commands - @abstract Constants defining control commands. - @discussion - @constant IFNET_CTL_SET_INPUT_MODEL Set input model. - @constant IFNET_CTL_GET_INPUT_MODEL Get input model. - @constant IFNET_CTL_SET_LOG Set logging level. - @constant IFNET_CTL_GET_LOG Get logging level. + * @enum Interface control commands + * @abstract Constants defining control commands. + * @discussion + * @constant IFNET_CTL_SET_INPUT_MODEL Set input model. + * @constant IFNET_CTL_GET_INPUT_MODEL Get input model. + * @constant IFNET_CTL_SET_LOG Set logging level. + * @constant IFNET_CTL_GET_LOG Get logging level. */ enum { - IFNET_CTL_SET_INPUT_MODEL = 1, /* input ctl */ - IFNET_CTL_GET_INPUT_MODEL = 2, /* input ctl */ - IFNET_CTL_SET_LOG = 3, /* output ctl */ - IFNET_CTL_GET_LOG = 4, /* output ctl */ - IFNET_CTL_NOTIFY_ADDRESS = 5 /* output ctl */ + IFNET_CTL_SET_INPUT_MODEL = 1, /* input ctl */ + IFNET_CTL_GET_INPUT_MODEL = 2, /* input ctl */ + IFNET_CTL_SET_LOG = 3, /* output ctl */ + IFNET_CTL_GET_LOG = 4, /* output ctl */ + IFNET_CTL_NOTIFY_ADDRESS = 5 /* output ctl */ }; /* - @typedef ifnet_ctl_cmd_t - @abstract Storage type for the interface control command. + * @typedef ifnet_ctl_cmd_t + * @abstract Storage type for the interface control command. */ typedef u_int32_t ifnet_ctl_cmd_t; /* - @enum Interface model sub-commands - @abstract Constants defining model sub-commands. - @discussion - @constant IFNET_MODEL_INPUT_POLL_OFF Polling is inactive. When set, - the network stack will no longer invoke the input_poll callback - until the next time polling is turned on; the driver should - proceed to pushing the packets up to the network stack as in - the legacy input model, and if applicable, the driver should - also enable receive interrupt for the hardware. During get, - this indicates that the driver is currently operating in - the legacy/push input model. - @constant IFNET_MODEL_INPUT_POLL_ON Polling is active. When set, the - network stack will begin to invoke the input_poll callback to - retrieve packets from the driver until the next time polling - is turned off; the driver should no longer be pushing packets - up to the network stack, and if applicable, the driver should - also disable receive interrupt for the hardware. During get, - this indicates that the driver is currently operating in - the new/pull input model. + * @enum Interface model sub-commands + * @abstract Constants defining model sub-commands. + * @discussion + * @constant IFNET_MODEL_INPUT_POLL_OFF Polling is inactive. When set, + * the network stack will no longer invoke the input_poll callback + * until the next time polling is turned on; the driver should + * proceed to pushing the packets up to the network stack as in + * the legacy input model, and if applicable, the driver should + * also enable receive interrupt for the hardware. During get, + * this indicates that the driver is currently operating in + * the legacy/push input model. + * @constant IFNET_MODEL_INPUT_POLL_ON Polling is active. When set, the + * network stack will begin to invoke the input_poll callback to + * retrieve packets from the driver until the next time polling + * is turned off; the driver should no longer be pushing packets + * up to the network stack, and if applicable, the driver should + * also disable receive interrupt for the hardware. During get, + * this indicates that the driver is currently operating in + * the new/pull input model. */ enum { - IFNET_MODEL_INPUT_POLL_OFF = 0, - IFNET_MODEL_INPUT_POLL_ON = 1, + IFNET_MODEL_INPUT_POLL_OFF = 0, + IFNET_MODEL_INPUT_POLL_ON = 1, }; /* - @typedef ifnet_model_t - @abstract Storage type for the interface model sub-command. + * @typedef ifnet_model_t + * @abstract Storage type for the interface model sub-command. */ typedef u_int32_t ifnet_model_t; /* - @struct ifnet_model_params - @discussion This structure is used as parameter to the ifnet model - sub-commands. - @field model The interface model. + * @struct ifnet_model_params + * @discussion This structure is used as parameter to the ifnet model + * sub-commands. + * @field model The interface model. */ struct ifnet_model_params { - ifnet_model_t model; - u_int32_t reserved[3]; + ifnet_model_t model; + u_int32_t reserved[3]; }; /* - @enum Interface logging sub-commands. - @abstract Constants defining logging levels/priorities. A level - includes all other levels below it. It is expected that - verbosity increases along with the level. - @discussion - @constant IFNET_LOG_DEFAULT Revert to default logging level. - @constant IFNET_LOG_ALERT Log actions that must be taken immediately. - @constant IFNET_LOG_CRITICAL Log critical conditions. - @constant IFNET_LOG_ERROR Log error conditions. - @constant IFNET_LOG_WARNING Log warning conditions. - @constant IFNET_LOG_NOTICE Log normal but significant conditions. - @constant IFNET_LOG_INFORMATIONAL Log informational messages. - @constant IFNET_LOG_DEBUG Log debug-level messages. + * @enum Interface logging sub-commands. + * @abstract Constants defining logging levels/priorities. A level + * includes all other levels below it. It is expected that + * verbosity increases along with the level. + * @discussion + * @constant IFNET_LOG_DEFAULT Revert to default logging level. + * @constant IFNET_LOG_ALERT Log actions that must be taken immediately. + * @constant IFNET_LOG_CRITICAL Log critical conditions. + * @constant IFNET_LOG_ERROR Log error conditions. + * @constant IFNET_LOG_WARNING Log warning conditions. + * @constant IFNET_LOG_NOTICE Log normal but significant conditions. + * @constant IFNET_LOG_INFORMATIONAL Log informational messages. + * @constant IFNET_LOG_DEBUG Log debug-level messages. */ enum { - IFNET_LOG_DEFAULT = 0, - IFNET_LOG_ALERT = 1, - IFNET_LOG_CRITICAL = 2, - IFNET_LOG_ERROR = 3, - IFNET_LOG_WARNING = 4, - IFNET_LOG_NOTICE = 5, - IFNET_LOG_INFORMATIONAL = 6, - IFNET_LOG_DEBUG = 7 + IFNET_LOG_DEFAULT = 0, + IFNET_LOG_ALERT = 1, + IFNET_LOG_CRITICAL = 2, + IFNET_LOG_ERROR = 3, + IFNET_LOG_WARNING = 4, + IFNET_LOG_NOTICE = 5, + IFNET_LOG_INFORMATIONAL = 6, + IFNET_LOG_DEBUG = 7 }; #ifdef BSD_KERNEL_PRIVATE -#define IFNET_LOG_MIN IFNET_LOG_DEFAULT -#define IFNET_LOG_MAX IFNET_LOG_DEBUG +#define IFNET_LOG_MIN IFNET_LOG_DEFAULT +#define IFNET_LOG_MAX IFNET_LOG_DEBUG #endif /* BSD_KERNEL_PRIVATE */ /* - @typedef ifnet_log_level_t - @abstract Storage type for log level/priority. + * @typedef ifnet_log_level_t + * @abstract Storage type for log level/priority. */ typedef int32_t ifnet_log_level_t; /* - @enum Interface logging facilities - @abstract Constants defining the logging facilities which - are to be configured with the specified logging level. - @discussion - @constant IFNET_LOGF_DLIL The DLIL layer. - @constant IFNET_LOGF_FAMILY The networking family layer. - @constant IFNET_LOGF_DRIVER The device driver layer. - @constant IFNET_LOGF_FIRMWARE The firmware layer. + * @enum Interface logging facilities + * @abstract Constants defining the logging facilities which + * are to be configured with the specified logging level. + * @discussion + * @constant IFNET_LOGF_DLIL The DLIL layer. + * @constant IFNET_LOGF_FAMILY The networking family layer. + * @constant IFNET_LOGF_DRIVER The device driver layer. + * @constant IFNET_LOGF_FIRMWARE The firmware layer. */ enum { - IFNET_LOGF_DLIL = 0x00000001, - IFNET_LOGF_FAMILY = 0x00010000, - IFNET_LOGF_DRIVER = 0x01000000, - IFNET_LOGF_FIRMWARE = 0x10000000 + IFNET_LOGF_DLIL = 0x00000001, + IFNET_LOGF_FAMILY = 0x00010000, + IFNET_LOGF_DRIVER = 0x01000000, + IFNET_LOGF_FIRMWARE = 0x10000000 }; #ifdef BSD_KERNEL_PRIVATE -#define IFNET_LOGF_MASK \ - (IFNET_LOGF_DLIL | IFNET_LOGF_FAMILY | IFNET_LOGF_DRIVER | \ +#define IFNET_LOGF_MASK \ + (IFNET_LOGF_DLIL | IFNET_LOGF_FAMILY | IFNET_LOGF_DRIVER | \ IFNET_LOGF_FIRMWARE) -#define IFNET_LOGF_BITS \ +#define IFNET_LOGF_BITS \ "\020\1DLIL\21FAMILY\31DRIVER\35FIRMWARE" #endif /* BSD_KERNEL_PRIVATE */ /* - @typedef ifnet_log_flags_t - @abstract Storage type for log flags/facilities. + * @typedef ifnet_log_flags_t + * @abstract Storage type for log flags/facilities. */ typedef u_int32_t ifnet_log_flags_t; /* - @enum Interface logging category - @abstract Constants defininig categories for issues experienced. - @discussion - @constant IFNET_LOGCAT_CONNECTIVITY Connectivity related issues. - @constant IFNET_LOGCAT_QUALITY Quality/fidelity related issues. - @constant IFNET_LOGCAT_PERFORMANCE Performance related issues. + * @enum Interface logging category + * @abstract Constants defininig categories for issues experienced. + * @discussion + * @constant IFNET_LOGCAT_CONNECTIVITY Connectivity related issues. + * @constant IFNET_LOGCAT_QUALITY Quality/fidelity related issues. + * @constant IFNET_LOGCAT_PERFORMANCE Performance related issues. */ enum { - IFNET_LOGCAT_CONNECTIVITY = 1, - IFNET_LOGCAT_QUALITY = 2, - IFNET_LOGCAT_PERFORMANCE = 3 + IFNET_LOGCAT_CONNECTIVITY = 1, + IFNET_LOGCAT_QUALITY = 2, + IFNET_LOGCAT_PERFORMANCE = 3 }; /* - @typedef ifnet_log_category_t - @abstract Storage type for log category. + * @typedef ifnet_log_category_t + * @abstract Storage type for log category. */ typedef int32_t ifnet_log_category_t; /* - @typedef ifnet_log_subcategory_t - @abstract Storage type for log subcategory. This is largely opaque - and it can be used for IOReturn values, etc. + * @typedef ifnet_log_subcategory_t + * @abstract Storage type for log subcategory. This is largely opaque + * and it can be used for IOReturn values, etc. */ typedef int32_t ifnet_log_subcategory_t; /* - @struct ifnet_log_params - @discussion This structure is used as parameter to the ifnet - logging sub-commands. - @field level The logging level/priority. - @field flags The logging flags/facilities. - @field category The category of issue. - @field subcategory The subcategory of issue. + * @struct ifnet_log_params + * @discussion This structure is used as parameter to the ifnet + * logging sub-commands. + * @field level The logging level/priority. + * @field flags The logging flags/facilities. + * @field category The category of issue. + * @field subcategory The subcategory of issue. */ struct ifnet_log_params { - ifnet_log_level_t level; - ifnet_log_flags_t flags; - ifnet_log_category_t category; - ifnet_log_subcategory_t subcategory; + ifnet_log_level_t level; + ifnet_log_flags_t flags; + ifnet_log_category_t category; + ifnet_log_subcategory_t subcategory; }; /* - @struct ifnet_notify_address_params - @discussion This structure is used as parameter to the ifnet - address notification sub-command. This is used to indicate - to the family/driver that one or more addresses of the given - address family has been added to, or removed from the list - of addresses on the interface. The driver may query for the - current address list by calling ifnet_get_address_list_family(). - @field address_family The address family of the interface address(es). + * @struct ifnet_notify_address_params + * @discussion This structure is used as parameter to the ifnet + * address notification sub-command. This is used to indicate + * to the family/driver that one or more addresses of the given + * address family has been added to, or removed from the list + * of addresses on the interface. The driver may query for the + * current address list by calling ifnet_get_address_list_family(). + * @field address_family The address family of the interface address(es). */ struct ifnet_notify_address_params { - sa_family_t address_family; - u_int32_t reserved[3]; + sa_family_t address_family; + u_int32_t reserved[3]; }; /* - @typedef ifnet_ctl_func - @discussion ifnet_ctl_func is called by the network stack to inform - about changes in parameters, or retrieve the parameters - related to the output or input processing or capabilities. - @param interface The interface. - @param cmd The ifnet_ctl_cmd_t interface control command. - @param arglen The length of the command argument. - @param arg The command argument. - @result 0 upon success, otherwise errno error. + * @typedef ifnet_ctl_func + * @discussion ifnet_ctl_func is called by the network stack to inform + * about changes in parameters, or retrieve the parameters + * related to the output or input processing or capabilities. + * @param interface The interface. + * @param cmd The ifnet_ctl_cmd_t interface control command. + * @param arglen The length of the command argument. + * @param arg The command argument. + * @result 0 upon success, otherwise errno error. */ typedef errno_t (*ifnet_ctl_func)(ifnet_t interface, ifnet_ctl_cmd_t cmd, u_int32_t arglen, void *arg); /* - @struct ifnet_init_eparams - @discussion This structure is used to define various properties of - the interface when calling ifnet_allocate_extended. A copy of - these values will be stored in the ifnet and cannot be modified - while the interface is attached. - @field ver The current structure version (IFNET_INIT_CURRENT_VERSION) - @field len The length of this structure. - @field flags See above values for flags. - @field uniqueid An identifier unique to this instance of the - interface. - @field uniqueid_len The length, in bytes, of the uniqueid. - @field name The interface name (i.e. en). - @field unit The interface unit number (en0's unit number is 0). - @field family The interface family. - @field type The interface type (see sys/if_types.h). Must be less - than 256. For new types, use IFT_OTHER. - @field sndq_maxlen The maximum size of the output queue; valid only - if IFNET_INIT_LEGACY is not set. - @field output The output function for the interface. Every packet the - stack attempts to send through this interface will go out - through this function. - @field pre_enqueue The pre_enqueue function for the interface, valid - only if IFNET_INIT_LEGACY is not set, and optional if it is set. - @field start The start function for the interface, valid and required - only if IFNET_INIT_LEGACY is not set. - @field output_ctl The output control function for the interface, valid - only if IFNET_INIT_LEGACY is not set. - @field output_sched_model The IFNET_SCHED_MODEL value for the output - queue, as defined in net/if.h - @field output_target_qdelay The target queue delay is used for - dynamically sizing the output queue, valid only if - IFNET_INIT_LEGACY is not set. - @field output_bw The effective output bandwidth (in bits per second.) - @field output_bw_max The maximum theoretical output bandwidth - (in bits per second.) - @field output_lt The effective output latency (in nanosecond.) - @field output_lt_max The maximum theoretical output latency - (in nanosecond.) - @field start_delay_qlen The maximum length of output queue for - delaying start callback to the driver. This is an - optimization for coalescing output packets. - @field start_delay_timeout The timeout in microseconds to delay - start callback. If start_delay_qlen number of packets are - not in the output queue when the timer fires, the start - callback will be invoked. Maximum allowed value is - 20ms (in microseconds). - @field input_poll The poll function for the interface, valid only if - IFNET_INIT_LEGACY is not set and only if IFNET_INIT_INPUT_POLL - is set. - @field input_ctl The input control function for the interface, valid - only if IFNET_INIT_LEGACY is not set and only if opportunistic - input polling is enabled via IFNET_INIT_INPUT_POLL flag. - @field rcvq_maxlen The size of the driver's receive ring or the total - count of descriptors used in the receive path; valid only if - IFNET_INIT_INPUT_POLL is set. - @field input_bw The effective input bandwidth (in bits per second.) - @field input_bw_max The maximum theoretical input bandwidth - (in bits per second.) - @field input_lt The effective input latency (in nanosecond.) - @field input_lt_max The maximum theoretical input latency - (in nanosecond.) - @field demux The function used to determine the protocol family of an - incoming packet. - @field add_proto The function used to attach a protocol to this - interface. - @field del_proto The function used to remove a protocol from this - interface. - @field framer The function used to frame outbound packets, may be NULL. - @field framer_extended The function used to frame outbound packets, - in the newer form; may be NULL. If specified, it will override - the value set via framer. - @field softc Driver specific storage. This value can be retrieved from - the ifnet using the ifnet_softc function. - @field ioctl The function used to handle ioctls. - @field set_bpf_tap The function used to set the bpf_tap function. - @field detach The function called to let the driver know the interface - has been detached. - @field event The function to notify the interface of various interface - specific kernel events. - @field broadcast_addr The link-layer broadcast address for this - interface. - @field broadcast_len The length of the link-layer broadcast address. - @field tx_headroom The amount of headroom space to be reserved in the - packet being transmitted on the interface, specified in bytes. - Must be a multiple of 8 bytes. - @field tx_trailer The amount of trailer space to be reserved in the - packet being transmitted on the interface, specified in bytes. - @field rx_mit_ival mitigation interval for the rx mitigation logic, - specified in microseconds. -*/ + * @struct ifnet_init_eparams + * @discussion This structure is used to define various properties of + * the interface when calling ifnet_allocate_extended. A copy of + * these values will be stored in the ifnet and cannot be modified + * while the interface is attached. + * @field ver The current structure version (IFNET_INIT_CURRENT_VERSION) + * @field len The length of this structure. + * @field flags See above values for flags. + * @field uniqueid An identifier unique to this instance of the + * interface. + * @field uniqueid_len The length, in bytes, of the uniqueid. + * @field name The interface name (i.e. en). + * @field unit The interface unit number (en0's unit number is 0). + * @field family The interface family. + * @field type The interface type (see sys/if_types.h). Must be less + * than 256. For new types, use IFT_OTHER. + * @field sndq_maxlen The maximum size of the output queue; valid only + * if IFNET_INIT_LEGACY is not set. + * @field output The output function for the interface. Every packet the + * stack attempts to send through this interface will go out + * through this function. + * @field pre_enqueue The pre_enqueue function for the interface, valid + * only if IFNET_INIT_LEGACY is not set, and optional if it is set. + * @field start The start function for the interface, valid and required + * only if IFNET_INIT_LEGACY is not set. + * @field output_ctl The output control function for the interface, valid + * only if IFNET_INIT_LEGACY is not set. + * @field output_sched_model The IFNET_SCHED_MODEL value for the output + * queue, as defined in net/if.h + * @field output_target_qdelay The target queue delay is used for + * dynamically sizing the output queue, valid only if + * IFNET_INIT_LEGACY is not set. + * @field output_bw The effective output bandwidth (in bits per second.) + * @field output_bw_max The maximum theoretical output bandwidth + * (in bits per second.) + * @field output_lt The effective output latency (in nanosecond.) + * @field output_lt_max The maximum theoretical output latency + * (in nanosecond.) + * @field start_delay_qlen The maximum length of output queue for + * delaying start callback to the driver. This is an + * optimization for coalescing output packets. + * @field start_delay_timeout The timeout in microseconds to delay + * start callback. If start_delay_qlen number of packets are + * not in the output queue when the timer fires, the start + * callback will be invoked. Maximum allowed value is + * 20ms (in microseconds). + * @field input_poll The poll function for the interface, valid only if + * IFNET_INIT_LEGACY is not set and only if IFNET_INIT_INPUT_POLL + * is set. + * @field input_ctl The input control function for the interface, valid + * only if IFNET_INIT_LEGACY is not set and only if opportunistic + * input polling is enabled via IFNET_INIT_INPUT_POLL flag. + * @field rcvq_maxlen The size of the driver's receive ring or the total + * count of descriptors used in the receive path; valid only if + * IFNET_INIT_INPUT_POLL is set. + * @field input_bw The effective input bandwidth (in bits per second.) + * @field input_bw_max The maximum theoretical input bandwidth + * (in bits per second.) + * @field input_lt The effective input latency (in nanosecond.) + * @field input_lt_max The maximum theoretical input latency + * (in nanosecond.) + * @field demux The function used to determine the protocol family of an + * incoming packet. + * @field add_proto The function used to attach a protocol to this + * interface. + * @field del_proto The function used to remove a protocol from this + * interface. + * @field framer The function used to frame outbound packets, may be NULL. + * @field framer_extended The function used to frame outbound packets, + * in the newer form; may be NULL. If specified, it will override + * the value set via framer. + * @field softc Driver specific storage. This value can be retrieved from + * the ifnet using the ifnet_softc function. + * @field ioctl The function used to handle ioctls. + * @field set_bpf_tap The function used to set the bpf_tap function. + * @field detach The function called to let the driver know the interface + * has been detached. + * @field event The function to notify the interface of various interface + * specific kernel events. + * @field broadcast_addr The link-layer broadcast address for this + * interface. + * @field broadcast_len The length of the link-layer broadcast address. + * @field tx_headroom The amount of headroom space to be reserved in the + * packet being transmitted on the interface, specified in bytes. + * Must be a multiple of 8 bytes. + * @field tx_trailer The amount of trailer space to be reserved in the + * packet being transmitted on the interface, specified in bytes. + * @field rx_mit_ival mitigation interval for the rx mitigation logic, + * specified in microseconds. + */ struct ifnet_init_eparams { - u_int32_t ver; /* required */ - u_int32_t len; /* required */ - u_int32_t flags; /* optional */ + u_int32_t ver; /* required */ + u_int32_t len; /* required */ + u_int32_t flags; /* optional */ /* used to match recycled interface */ - const void *uniqueid; /* optional */ - u_int32_t uniqueid_len; /* optional */ + const void *uniqueid; /* optional */ + u_int32_t uniqueid_len; /* optional */ /* used to fill out initial values for interface */ - const char *name; /* required */ - u_int32_t unit; /* required */ - ifnet_family_t family; /* required */ - u_int32_t type; /* required */ - u_int32_t sndq_maxlen; /* optional, only for new model */ - ifnet_output_func output; /* required only for legacy model */ - ifnet_pre_enqueue_func pre_enqueue; /* optional, only for new model */ - ifnet_start_func start; /* required only for new model */ - ifnet_ctl_func output_ctl; /* optional, only for new model */ - u_int32_t output_sched_model; /* optional, only for new model */ - u_int32_t output_target_qdelay; /* optional, only for new model, value in ms */ - u_int64_t output_bw; /* optional */ - u_int64_t output_bw_max; /* optional */ - u_int64_t output_lt; /* optional */ - u_int64_t output_lt_max; /* optional */ - u_int16_t start_delay_qlen; /* optional */ - u_int16_t start_delay_timeout; /* optional */ - u_int32_t _reserved[3]; /* for future use */ - ifnet_input_poll_func input_poll; /* optional, ignored for legacy model */ - ifnet_ctl_func input_ctl; /* required for opportunistic polling */ - u_int32_t rcvq_maxlen; /* optional, only for opportunistic polling */ - u_int32_t __reserved; /* for future use */ - u_int64_t input_bw; /* optional */ - u_int64_t input_bw_max; /* optional */ - u_int64_t input_lt; /* optional */ - u_int64_t input_lt_max; /* optional */ - u_int64_t ___reserved[2]; /* for future use */ - ifnet_demux_func demux; /* required */ - ifnet_add_proto_func add_proto; /* required */ - ifnet_del_proto_func del_proto; /* required */ - ifnet_check_multi check_multi; /* required for non point-to-point interfaces */ - ifnet_framer_func framer; /* optional */ - void *softc; /* optional */ - ifnet_ioctl_func ioctl; /* optional */ - ifnet_set_bpf_tap set_bpf_tap; /* deprecated */ - ifnet_detached_func detach; /* optional */ - ifnet_event_func event; /* optional */ - const void *broadcast_addr; /* required for non point-to-point interfaces */ - u_int32_t broadcast_len; /* required for non point-to-point interfaces */ - ifnet_framer_extended_func framer_extended; /* optional */ - ifnet_subfamily_t subfamily; /* optional */ - u_int16_t tx_headroom; /* optional */ - u_int16_t tx_trailer; /* optional */ - u_int32_t rx_mit_ival; /* optional */ + const char *name; /* required */ + u_int32_t unit; /* required */ + ifnet_family_t family; /* required */ + u_int32_t type; /* required */ + u_int32_t sndq_maxlen; /* optional, only for new model */ + ifnet_output_func output; /* required only for legacy model */ + ifnet_pre_enqueue_func pre_enqueue; /* optional, only for new model */ + ifnet_start_func start; /* required only for new model */ + ifnet_ctl_func output_ctl; /* optional, only for new model */ + u_int32_t output_sched_model; /* optional, only for new model */ + u_int32_t output_target_qdelay; /* optional, only for new model, value in ms */ + u_int64_t output_bw; /* optional */ + u_int64_t output_bw_max; /* optional */ + u_int64_t output_lt; /* optional */ + u_int64_t output_lt_max; /* optional */ + u_int16_t start_delay_qlen; /* optional */ + u_int16_t start_delay_timeout; /* optional */ + u_int32_t _reserved[3]; /* for future use */ + ifnet_input_poll_func input_poll; /* optional, ignored for legacy model */ + ifnet_ctl_func input_ctl; /* required for opportunistic polling */ + u_int32_t rcvq_maxlen; /* optional, only for opportunistic polling */ + u_int32_t __reserved; /* for future use */ + u_int64_t input_bw; /* optional */ + u_int64_t input_bw_max; /* optional */ + u_int64_t input_lt; /* optional */ + u_int64_t input_lt_max; /* optional */ + u_int64_t ___reserved[2]; /* for future use */ + ifnet_demux_func demux; /* required */ + ifnet_add_proto_func add_proto; /* required */ + ifnet_del_proto_func del_proto; /* required */ + ifnet_check_multi check_multi; /* required for non point-to-point interfaces */ + ifnet_framer_func framer; /* optional */ + void *softc; /* optional */ + ifnet_ioctl_func ioctl; /* optional */ + ifnet_set_bpf_tap set_bpf_tap; /* deprecated */ + ifnet_detached_func detach; /* optional */ + ifnet_event_func event; /* optional */ + const void *broadcast_addr; /* required for non point-to-point interfaces */ + u_int32_t broadcast_len; /* required for non point-to-point interfaces */ + ifnet_framer_extended_func framer_extended; /* optional */ + ifnet_subfamily_t subfamily; /* optional */ + u_int16_t tx_headroom; /* optional */ + u_int16_t tx_trailer; /* optional */ + u_int32_t rx_mit_ival; /* optional */ #if !defined(__LP64__) - u_int64_t ____reserved[2]; /* for future use */ + u_int64_t ____reserved[2]; /* for future use */ #else - u_int32_t ____reserved; /* for future use */ - u_int64_t _____reserved[1]; /* for future use */ + u_int32_t ____reserved; /* for future use */ + u_int64_t _____reserved[1]; /* for future use */ #endif /* __LP64__ */ }; #endif /* KERNEL_PRIVATE */ /*! - @struct ifnet_stats_param - @discussion This structure is used get and set the interface - statistics. - @field packets_in The number of packets received. - @field bytes_in The number of bytes received. - @field errors_in The number of receive errors. - @field packets_out The number of packets transmitted. - @field bytes_out The number of bytes transmitted. - @field errors_out The number of transmission errors. - @field collisions The number of collisions seen by this interface. - @field dropped The number of packets dropped. -*/ + * @struct ifnet_stats_param + * @discussion This structure is used get and set the interface + * statistics. + * @field packets_in The number of packets received. + * @field bytes_in The number of bytes received. + * @field errors_in The number of receive errors. + * @field packets_out The number of packets transmitted. + * @field bytes_out The number of bytes transmitted. + * @field errors_out The number of transmission errors. + * @field collisions The number of collisions seen by this interface. + * @field dropped The number of packets dropped. + */ struct ifnet_stats_param { - u_int64_t packets_in; - u_int64_t bytes_in; - u_int64_t multicasts_in; - u_int64_t errors_in; - - u_int64_t packets_out; - u_int64_t bytes_out; - u_int64_t multicasts_out; - u_int64_t errors_out; - - u_int64_t collisions; - u_int64_t dropped; - u_int64_t no_protocol; + u_int64_t packets_in; + u_int64_t bytes_in; + u_int64_t multicasts_in; + u_int64_t errors_in; + + u_int64_t packets_out; + u_int64_t bytes_out; + u_int64_t multicasts_out; + u_int64_t errors_out; + + u_int64_t collisions; + u_int64_t dropped; + u_int64_t no_protocol; }; /*! - @struct ifnet_demux_desc - @discussion This structure is to identify packets that belong to a - specific protocol. The types supported are interface specific. - Ethernet supports ETHER_DESC_ETYPE2, ETHER_DESC_SAP, and - ETHER_DESC_SNAP. The type defines the offset in the packet where - the data will be matched as well as context. For example, if - ETHER_DESC_SNAP is specified, the only valid datalen is 5 and - only in the 5 bytes will only be matched when the packet header - indicates that the packet is a SNAP packet. - @field type The type of identifier data (i.e. ETHER_DESC_ETYPE2) - @field data A pointer to an entry of type (i.e. pointer to 0x0800). - @field datalen The number of bytes of data used to describe the - packet. -*/ + * @struct ifnet_demux_desc + * @discussion This structure is to identify packets that belong to a + * specific protocol. The types supported are interface specific. + * Ethernet supports ETHER_DESC_ETYPE2, ETHER_DESC_SAP, and + * ETHER_DESC_SNAP. The type defines the offset in the packet where + * the data will be matched as well as context. For example, if + * ETHER_DESC_SNAP is specified, the only valid datalen is 5 and + * only in the 5 bytes will only be matched when the packet header + * indicates that the packet is a SNAP packet. + * @field type The type of identifier data (i.e. ETHER_DESC_ETYPE2) + * @field data A pointer to an entry of type (i.e. pointer to 0x0800). + * @field datalen The number of bytes of data used to describe the + * packet. + */ struct ifnet_demux_desc { - u_int32_t type; - void *data; - u_int32_t datalen; + u_int32_t type; + void *data; + u_int32_t datalen; }; /*! - @struct ifnet_attach_proto_param - @discussion This structure is used to attach a protocol to an - interface. This structure provides the various functions for - handling operations related to the protocol on the interface as - well as information for how to demux packets for this protocol. - @field demux_array An array of ifnet_demux_desc structures - describing the protocol. - @field demux_count The number of entries in the demux_array array. - @field input The function to be called for inbound packets. - @field pre_output The function to be called for outbound packets. - @field event The function to be called for interface events. - @field ioctl The function to be called for ioctls. - @field detached The function to be called for handling the detach. -*/ + * @struct ifnet_attach_proto_param + * @discussion This structure is used to attach a protocol to an + * interface. This structure provides the various functions for + * handling operations related to the protocol on the interface as + * well as information for how to demux packets for this protocol. + * @field demux_array An array of ifnet_demux_desc structures + * describing the protocol. + * @field demux_count The number of entries in the demux_array array. + * @field input The function to be called for inbound packets. + * @field pre_output The function to be called for outbound packets. + * @field event The function to be called for interface events. + * @field ioctl The function to be called for ioctls. + * @field detached The function to be called for handling the detach. + */ #ifdef KERNEL_PRIVATE -#define demux_list demux_array +#define demux_list demux_array #endif /* KERNEL_PRIVATE */ struct ifnet_attach_proto_param { - struct ifnet_demux_desc *demux_array; /* interface may/may not require */ - u_int32_t demux_count; /* interface may/may not require */ - - proto_media_input input; /* required */ - proto_media_preout pre_output; /* required */ - proto_media_event event; /* optional */ - proto_media_ioctl ioctl; /* optional */ - proto_media_detached detached; /* optional */ - proto_media_resolve_multi resolve; /* optional */ - proto_media_send_arp send_arp; /* optional */ + struct ifnet_demux_desc *demux_array; /* interface may/may not require */ + u_int32_t demux_count; /* interface may/may not require */ + + proto_media_input input; /* required */ + proto_media_preout pre_output; /* required */ + proto_media_event event; /* optional */ + proto_media_ioctl ioctl; /* optional */ + proto_media_detached detached; /* optional */ + proto_media_resolve_multi resolve; /* optional */ + proto_media_send_arp send_arp; /* optional */ }; struct ifnet_attach_proto_param_v2 { - struct ifnet_demux_desc *demux_array; /* interface may/may not require */ - u_int32_t demux_count; /* interface may/may not require */ - - proto_media_input_v2 input; /* required */ - proto_media_preout pre_output; /* required */ - proto_media_event event; /* optional */ - proto_media_ioctl ioctl; /* optional */ - proto_media_detached detached; /* optional */ - proto_media_resolve_multi resolve; /* optional */ - proto_media_send_arp send_arp; /* optional */ + struct ifnet_demux_desc *demux_array; /* interface may/may not require */ + u_int32_t demux_count; /* interface may/may not require */ + + proto_media_input_v2 input; /* required */ + proto_media_preout pre_output; /* required */ + proto_media_event event; /* optional */ + proto_media_ioctl ioctl; /* optional */ + proto_media_detached detached; /* optional */ + proto_media_resolve_multi resolve; /* optional */ + proto_media_send_arp send_arp; /* optional */ }; __BEGIN_DECLS @@ -1223,26 +1223,26 @@ __BEGIN_DECLS */ /*! - @function ifnet_allocate - @discussion Allocate an ifnet_t with an initial refcount of 1. Many - parts of the stack do not properly refcount the ifnet_t. In - order to avoid freeing the ifnet_t while some parts of the stack - may contain a reference to it, the ifnet_ts are only recycled, - never freed. A unique id is used to try and recycle the same - ifnet_t when allocating an interface. For example, for an - ethernet interface, the hardware address of the ethernet card is - usually used for the uniqueid. If a PC Card is removed and - inserted again, if the ethernet address of the PC card is used, - the same ifnet_t will be used for the card the second time it is - inserted. In the future, when the ifnet_t is correctly - refcounted by all of the stack, the interfaces may be freed and - the unique ids ignored. - @param init The initial values for the interface. These values can - not be changed after the interface has been allocated. - @param interface The interface allocated upon success. - @result May return ENOMEM if there is insufficient memory or EEXIST - if an interface with the same uniqueid and family has already - been allocated and is in use. + * @function ifnet_allocate + * @discussion Allocate an ifnet_t with an initial refcount of 1. Many + * parts of the stack do not properly refcount the ifnet_t. In + * order to avoid freeing the ifnet_t while some parts of the stack + * may contain a reference to it, the ifnet_ts are only recycled, + * never freed. A unique id is used to try and recycle the same + * ifnet_t when allocating an interface. For example, for an + * ethernet interface, the hardware address of the ethernet card is + * usually used for the uniqueid. If a PC Card is removed and + * inserted again, if the ethernet address of the PC card is used, + * the same ifnet_t will be used for the card the second time it is + * inserted. In the future, when the ifnet_t is correctly + * refcounted by all of the stack, the interfaces may be freed and + * the unique ids ignored. + * @param init The initial values for the interface. These values can + * not be changed after the interface has been allocated. + * @param interface The interface allocated upon success. + * @result May return ENOMEM if there is insufficient memory or EEXIST + * if an interface with the same uniqueid and family has already + * been allocated and is in use. */ #ifdef KERNEL_PRIVATE extern errno_t ifnet_allocate_internal(const struct ifnet_init_params *init, @@ -1257,482 +1257,482 @@ extern errno_t ifnet_allocate(const struct ifnet_init_params *init, #ifdef KERNEL_PRIVATE /* - @function ifnet_allocate_extended - @discussion An extended/newer variant of ifnet_allocate, with additional - support for the new output and input driver models. - @param init The initial values for the interface. These values can - not be changed after the interface has been allocated. - @param interface The interface allocated upon success. - @result May return ENOMEM if there is insufficient memory or EBUSY - if an interface with the same uniqueid/(name + unit) and family has already - been allocated and is in use. + * @function ifnet_allocate_extended + * @discussion An extended/newer variant of ifnet_allocate, with additional + * support for the new output and input driver models. + * @param init The initial values for the interface. These values can + * not be changed after the interface has been allocated. + * @param interface The interface allocated upon success. + * @result May return ENOMEM if there is insufficient memory or EBUSY + * if an interface with the same uniqueid/(name + unit) and family has already + * been allocated and is in use. */ extern errno_t ifnet_allocate_extended(const struct ifnet_init_eparams *init, ifnet_t *interface); /* - @function ifnet_purge - @discussion Purge the output queue of an interface which implements - the new driver output model. - @param interface The interface to purge. + * @function ifnet_purge + * @discussion Purge the output queue of an interface which implements + * the new driver output model. + * @param interface The interface to purge. */ extern void ifnet_purge(ifnet_t interface); /* - @function ifnet_enqueue - @discussion Enqueue a packet to the output queue of an interface - which implements the new driver output model. - @param interface The interface to enqueue the packet to. - @param packet The packet being enqueued; only one packet is allowed - to be enqueued at a time. - @result May return EINVAL if the parameters are invalid; ENXIO if - the interface doesn't implement the new driver output model; - EQFULL if the output queue is flow-controlled; or EQSUSPENDED - if the output queue is suspended. This routine either frees - or consumes the packet; the caller must not modify or free - it after calling this routine. Any attempt to enqueue more - than one packet will cause the entire packet chain to be freed. + * @function ifnet_enqueue + * @discussion Enqueue a packet to the output queue of an interface + * which implements the new driver output model. + * @param interface The interface to enqueue the packet to. + * @param packet The packet being enqueued; only one packet is allowed + * to be enqueued at a time. + * @result May return EINVAL if the parameters are invalid; ENXIO if + * the interface doesn't implement the new driver output model; + * EQFULL if the output queue is flow-controlled; or EQSUSPENDED + * if the output queue is suspended. This routine either frees + * or consumes the packet; the caller must not modify or free + * it after calling this routine. Any attempt to enqueue more + * than one packet will cause the entire packet chain to be freed. */ extern errno_t ifnet_enqueue(ifnet_t interface, mbuf_t packet); /* - @function ifnet_dequeue - @discussion Dequeue a packet from the output queue of an interface - which implements the new driver output model, and that the - output scheduling model is set to IFNET_SCHED_MODEL_NORMAL. - @param interface The interface to dequeue the packet from. - @param packet Pointer to the packet being dequeued. - @result May return EINVAL if the parameters are invalid, ENXIO if - the interface doesn't implement the new driver output model - or the output scheduling model isn't IFNET_SCHED_MODEL_NORMAL, - or EAGAIN if there is currently no packet available to - be dequeued. + * @function ifnet_dequeue + * @discussion Dequeue a packet from the output queue of an interface + * which implements the new driver output model, and that the + * output scheduling model is set to IFNET_SCHED_MODEL_NORMAL. + * @param interface The interface to dequeue the packet from. + * @param packet Pointer to the packet being dequeued. + * @result May return EINVAL if the parameters are invalid, ENXIO if + * the interface doesn't implement the new driver output model + * or the output scheduling model isn't IFNET_SCHED_MODEL_NORMAL, + * or EAGAIN if there is currently no packet available to + * be dequeued. */ extern errno_t ifnet_dequeue(ifnet_t interface, mbuf_t *packet); /* - @function ifnet_dequeue_service_class - @discussion Dequeue a packet of a particular service class from the - appropriate output queue of an interface which implements the - new driver output model, and that the output scheduling model - is set to IFNET_SCHED_MODEL_DRIVER_MANAGED. - @param interface The interface to dequeue the packet from. - @param sc The service class. - @param packet Pointer to the packet being dequeued. - @result May return EINVAL if the parameters are invalid, ENXIO if - the interface doesn't implement the new driver output model - or if the output scheduling model isn't configured to - IFNET_SCHED_MODEL_DRIVER_MANAGED, or EAGAIN if there - is currently no packet available to be dequeued. + * @function ifnet_dequeue_service_class + * @discussion Dequeue a packet of a particular service class from the + * appropriate output queue of an interface which implements the + * new driver output model, and that the output scheduling model + * is set to IFNET_SCHED_MODEL_DRIVER_MANAGED. + * @param interface The interface to dequeue the packet from. + * @param sc The service class. + * @param packet Pointer to the packet being dequeued. + * @result May return EINVAL if the parameters are invalid, ENXIO if + * the interface doesn't implement the new driver output model + * or if the output scheduling model isn't configured to + * IFNET_SCHED_MODEL_DRIVER_MANAGED, or EAGAIN if there + * is currently no packet available to be dequeued. */ extern errno_t ifnet_dequeue_service_class(ifnet_t interface, mbuf_svc_class_t sc, mbuf_t *packet); /* - @function ifnet_dequeue_multi - @discussion Dequeue one or more packets from the output queue of an - interface which implements the new driver output model, and that - the output scheduling model is set to IFNET_SCHED_MODEL_NORMAL. - The returned packet chain is traversable with mbuf_nextpkt(). - @param interface The interface to dequeue the packets from. - @param max The maximum number of packets in the packet chain that - may be returned to the caller; this needs to be a non-zero - value for any packet to be returned. - @param first_packet Pointer to the first packet being dequeued. - @param last_packet Pointer to the last packet being dequeued. Caller - may supply NULL if not interested in value. - @param cnt Pointer to a storage for the number of packets dequeued. - Caller may supply NULL if not interested in value. - @param len Pointer to a storage for the total length (in bytes) - of the dequeued packets. Caller may supply NULL if not - interested in value. - @result May return EINVAL if the parameters are invalid, ENXIO if - the interface doesn't implement the new driver output model - or the output scheduling model isn't IFNET_SCHED_MODEL_NORMAL, - or EAGAIN if there is currently no packet available to - be dequeued. + * @function ifnet_dequeue_multi + * @discussion Dequeue one or more packets from the output queue of an + * interface which implements the new driver output model, and that + * the output scheduling model is set to IFNET_SCHED_MODEL_NORMAL. + * The returned packet chain is traversable with mbuf_nextpkt(). + * @param interface The interface to dequeue the packets from. + * @param max The maximum number of packets in the packet chain that + * may be returned to the caller; this needs to be a non-zero + * value for any packet to be returned. + * @param first_packet Pointer to the first packet being dequeued. + * @param last_packet Pointer to the last packet being dequeued. Caller + * may supply NULL if not interested in value. + * @param cnt Pointer to a storage for the number of packets dequeued. + * Caller may supply NULL if not interested in value. + * @param len Pointer to a storage for the total length (in bytes) + * of the dequeued packets. Caller may supply NULL if not + * interested in value. + * @result May return EINVAL if the parameters are invalid, ENXIO if + * the interface doesn't implement the new driver output model + * or the output scheduling model isn't IFNET_SCHED_MODEL_NORMAL, + * or EAGAIN if there is currently no packet available to + * be dequeued. */ extern errno_t ifnet_dequeue_multi(ifnet_t interface, u_int32_t max, mbuf_t *first_packet, mbuf_t *last_packet, u_int32_t *cnt, u_int32_t *len); /* - @function ifnet_dequeue_multi_bytes - @discussion Dequeue one or more packets from the output queue of - an interface which implements the new driver output model, - where the scheduling model is set to - IFNET_SCHED_MODEL_NORMAL. The limit is specified in terms - of maximum number of bytes to return. The number of bytes - returned can be slightly higher than the limit so that - packet boundaries can be preserved. - @param interface The interface to dequeue the packets from - @param max_bytes The maximum number of bytes in the packet chain - that may be returned to the caller; this needs to be a - non-zero value for any packet to be returned. - @param first_packet Pointer to the first packet being dequeued - @param last_packet Pointer to the last packet being dequeued - @param cnt Pointer to a storage for the number of bytes dequeued. - Caller may supply NULL if not interested in this value - @param len Pointer to a storage for the total length (in bytes) - of the dequeued packets. Caller may supply NULL if not - interested in this value. - @result May return EINVAL if the parameters are invalid, ENXIO if - the interface doesn't implement the new driver output - model or the output scheduling model isn't - IFNET_SCHED_MODEL_NORMAL, or EAGAIN if there is currently - no packet available to be dequeued + * @function ifnet_dequeue_multi_bytes + * @discussion Dequeue one or more packets from the output queue of + * an interface which implements the new driver output model, + * where the scheduling model is set to + * IFNET_SCHED_MODEL_NORMAL. The limit is specified in terms + * of maximum number of bytes to return. The number of bytes + * returned can be slightly higher than the limit so that + * packet boundaries can be preserved. + * @param interface The interface to dequeue the packets from + * @param max_bytes The maximum number of bytes in the packet chain + * that may be returned to the caller; this needs to be a + * non-zero value for any packet to be returned. + * @param first_packet Pointer to the first packet being dequeued + * @param last_packet Pointer to the last packet being dequeued + * @param cnt Pointer to a storage for the number of bytes dequeued. + * Caller may supply NULL if not interested in this value + * @param len Pointer to a storage for the total length (in bytes) + * of the dequeued packets. Caller may supply NULL if not + * interested in this value. + * @result May return EINVAL if the parameters are invalid, ENXIO if + * the interface doesn't implement the new driver output + * model or the output scheduling model isn't + * IFNET_SCHED_MODEL_NORMAL, or EAGAIN if there is currently + * no packet available to be dequeued */ extern errno_t ifnet_dequeue_multi_bytes(ifnet_t interface, u_int32_t max_bytes, mbuf_t *first_packet, mbuf_t *last_packet, u_int32_t *cnt, u_int32_t *len); /* - @function ifnet_dequeue_service_class_multi - @discussion Dequeue one or more packets of a particular service class - from the appropriate output queue of an interface which - implements the new driver output model, and that the output - scheduling model is set to IFNET_SCHED_MODEL_DRIVER_MANAGED. - The returned packet chain is traversable with mbuf_nextpkt(). - @param interface The interface to dequeue the packets from. - @param sc The service class. - @param max The maximum number of packets in the packet chain that - may be returned to the caller; this needs to be a non-zero - value for any packet to be returned. - @param first_packet Pointer to the first packet being dequeued. - @param last_packet Pointer to the last packet being dequeued. Caller - may supply NULL if not interested in value. - @param cnt Pointer to a storage for the number of packets dequeued. - Caller may supply NULL if not interested in value. - @param len Pointer to a storage for the total length (in bytes) - of the dequeued packets. Caller may supply NULL if not - interested in value. - @result May return EINVAL if the parameters are invalid, ENXIO if - the interface doesn't implement the new driver output model - or if the output scheduling model isn't configured to - IFNET_SCHED_MODEL_DRIVER_MANAGED, or EAGAIN if there - is currently no packet available to be dequeued. + * @function ifnet_dequeue_service_class_multi + * @discussion Dequeue one or more packets of a particular service class + * from the appropriate output queue of an interface which + * implements the new driver output model, and that the output + * scheduling model is set to IFNET_SCHED_MODEL_DRIVER_MANAGED. + * The returned packet chain is traversable with mbuf_nextpkt(). + * @param interface The interface to dequeue the packets from. + * @param sc The service class. + * @param max The maximum number of packets in the packet chain that + * may be returned to the caller; this needs to be a non-zero + * value for any packet to be returned. + * @param first_packet Pointer to the first packet being dequeued. + * @param last_packet Pointer to the last packet being dequeued. Caller + * may supply NULL if not interested in value. + * @param cnt Pointer to a storage for the number of packets dequeued. + * Caller may supply NULL if not interested in value. + * @param len Pointer to a storage for the total length (in bytes) + * of the dequeued packets. Caller may supply NULL if not + * interested in value. + * @result May return EINVAL if the parameters are invalid, ENXIO if + * the interface doesn't implement the new driver output model + * or if the output scheduling model isn't configured to + * IFNET_SCHED_MODEL_DRIVER_MANAGED, or EAGAIN if there + * is currently no packet available to be dequeued. */ extern errno_t ifnet_dequeue_service_class_multi(ifnet_t interface, mbuf_svc_class_t sc, u_int32_t max, mbuf_t *first_packet, mbuf_t *last_packet, u_int32_t *cnt, u_int32_t *len); /* - @function ifnet_set_output_sched_model - @discussion Set the output scheduling model of an interface which - implements the new driver output model. - @param interface The interface to set scheduling model on. - @param model The IFNET_SCHED_MODEL value as defined in net/if.h - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver output model. + * @function ifnet_set_output_sched_model + * @discussion Set the output scheduling model of an interface which + * implements the new driver output model. + * @param interface The interface to set scheduling model on. + * @param model The IFNET_SCHED_MODEL value as defined in net/if.h + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver output model. */ extern errno_t ifnet_set_output_sched_model(ifnet_t interface, u_int32_t model); /* - @function ifnet_set_sndq_maxlen - @discussion Set the maximum length of the output queue of an - interface which implements the new driver output model. - This call may be issued post ifnet_allocate_extended in - order to modify the maximum output queue length previously - set at registration time. - @param interface The interface to set the max queue length on. - @param maxqlen The maximum number of packets in the output queue. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver output model. + * @function ifnet_set_sndq_maxlen + * @discussion Set the maximum length of the output queue of an + * interface which implements the new driver output model. + * This call may be issued post ifnet_allocate_extended in + * order to modify the maximum output queue length previously + * set at registration time. + * @param interface The interface to set the max queue length on. + * @param maxqlen The maximum number of packets in the output queue. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver output model. */ extern errno_t ifnet_set_sndq_maxlen(ifnet_t interface, u_int32_t maxqlen); /* - @function ifnet_get_sndq_maxlen - @discussion Get the maximum length of the output queue of an - interface which implements the new driver output model. - @param interface The interface to get the max queue length on. - @param maxqlen Pointer to a storage for the maximum number of packets - in the output queue for all service classes. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver output model. + * @function ifnet_get_sndq_maxlen + * @discussion Get the maximum length of the output queue of an + * interface which implements the new driver output model. + * @param interface The interface to get the max queue length on. + * @param maxqlen Pointer to a storage for the maximum number of packets + * in the output queue for all service classes. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver output model. */ extern errno_t ifnet_get_sndq_maxlen(ifnet_t interface, u_int32_t *maxqlen); /* - @function ifnet_get_sndq_len - @discussion Get the current length of the output queue of an - interface which implements the new driver output model. - @param interface The interface to get the current queue length on. - @param packets Pointer to a storage for the current number of packets - in the aggregate output queue. This number represents all - enqueued packets regardless of their service classes. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver output model. + * @function ifnet_get_sndq_len + * @discussion Get the current length of the output queue of an + * interface which implements the new driver output model. + * @param interface The interface to get the current queue length on. + * @param packets Pointer to a storage for the current number of packets + * in the aggregate output queue. This number represents all + * enqueued packets regardless of their service classes. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver output model. */ extern errno_t ifnet_get_sndq_len(ifnet_t interface, u_int32_t *packets); /* - @function ifnet_get_service_class_sndq_len - @discussion Get the current length of the output queue for a specific - service class of an interface which implements the new driver - output model. - @param interface The interface to get the current queue length on. - @param sc The service class. - @param packets Pointer to a storage for the current number of packets - of the specific service class in the output queue; may be - NULL if caller is not interested in getting the value. Note - that multiple service classes may be mapped to an output queue; - this routine reports the packet count of that output queue. - @param bytes Pointer to a storage for the current size (in bytes) of - the output queue specific to the service class; may be NULL if - caller is not interested in getting the value. Note that - multiple service classes may be mapped to an output queue; - this routine reports the length of that output queue. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver output model. + * @function ifnet_get_service_class_sndq_len + * @discussion Get the current length of the output queue for a specific + * service class of an interface which implements the new driver + * output model. + * @param interface The interface to get the current queue length on. + * @param sc The service class. + * @param packets Pointer to a storage for the current number of packets + * of the specific service class in the output queue; may be + * NULL if caller is not interested in getting the value. Note + * that multiple service classes may be mapped to an output queue; + * this routine reports the packet count of that output queue. + * @param bytes Pointer to a storage for the current size (in bytes) of + * the output queue specific to the service class; may be NULL if + * caller is not interested in getting the value. Note that + * multiple service classes may be mapped to an output queue; + * this routine reports the length of that output queue. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver output model. */ extern errno_t ifnet_get_service_class_sndq_len(ifnet_t interface, mbuf_svc_class_t sc, u_int32_t *packets, u_int32_t *bytes); /* - @function ifnet_set_rcvq_maxlen - @discussion Set the maximum length of the input queue of an - interface which implements the new driver input model. - This call may be issued post ifnet_allocate_extended in - order to modify the maximum input queue length previously - set at registration time. - @param interface The interface to set the max queue length on. - @param maxqlen The maximum number of packets in the input queue. - Drivers typically set this to the size of the receive ring - or the total number of descriptors used for the input path. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_set_rcvq_maxlen + * @discussion Set the maximum length of the input queue of an + * interface which implements the new driver input model. + * This call may be issued post ifnet_allocate_extended in + * order to modify the maximum input queue length previously + * set at registration time. + * @param interface The interface to set the max queue length on. + * @param maxqlen The maximum number of packets in the input queue. + * Drivers typically set this to the size of the receive ring + * or the total number of descriptors used for the input path. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_set_rcvq_maxlen(ifnet_t interface, u_int32_t maxqlen); /* - @function ifnet_get_rcvq_maxlen - @discussion Get the maximum length of the input queue of an - interface which implements the new driver input model. - @param interface The interface to get the max queue length on. - @param maxqlen Pointer to a storage for the maximum number of packets - in the input queue. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_get_rcvq_maxlen + * @discussion Get the maximum length of the input queue of an + * interface which implements the new driver input model. + * @param interface The interface to get the max queue length on. + * @param maxqlen Pointer to a storage for the maximum number of packets + * in the input queue. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_get_rcvq_maxlen(ifnet_t interface, u_int32_t *maxqlen); /* - @struct ifnet_poll_params - @discussion This structure is used to define various opportunistic - polling parameters for an interface. - @field flags Currently unused/ignored; must be set to zero. - @field packets_limit The maximum number of packets to be dequeued - each time the driver's input poll callback is invoked while - in polling mode; this corresponds to the max_count parameter - of ifnet_input_poll_func. A zero value indicates the use of - default maximum packets defined by the system. - @field packets_lowat Low watermark packet threshold. - @field packets_hiwat High watermark packet threshold. - @field bytes_lowat Low watermark packet threshold. - @field bytes_hiwat High watermark packet threshold. - The low and high watermark inbound packet and bytes thresholds; - these values may be link rate dependent. Polling is enabled - when the average inbound packets or bytes goes above the - corresponding high watermark value; it stays in that mode until - both of the average inbound packets and bytes go below their - corresponding low watermark values. Zero watermark values - indicates the use of default thresholds as defined by the - system. Both low and high watermark values must either be - zeroes, or both must be non-zeroes with low watermark value - being less than the high watermark value. - @field interval_time The interval time between each invocation of - the driver's input poll callback, in nanoseconds. A zero - value indicates the use of default poll interval time as - defined by the system. If a non-zero value is specified and - is less than the minimum interval time, the latter will be - chosen by the system. + * @struct ifnet_poll_params + * @discussion This structure is used to define various opportunistic + * polling parameters for an interface. + * @field flags Currently unused/ignored; must be set to zero. + * @field packets_limit The maximum number of packets to be dequeued + * each time the driver's input poll callback is invoked while + * in polling mode; this corresponds to the max_count parameter + * of ifnet_input_poll_func. A zero value indicates the use of + * default maximum packets defined by the system. + * @field packets_lowat Low watermark packet threshold. + * @field packets_hiwat High watermark packet threshold. + * @field bytes_lowat Low watermark packet threshold. + * @field bytes_hiwat High watermark packet threshold. + * The low and high watermark inbound packet and bytes thresholds; + * these values may be link rate dependent. Polling is enabled + * when the average inbound packets or bytes goes above the + * corresponding high watermark value; it stays in that mode until + * both of the average inbound packets and bytes go below their + * corresponding low watermark values. Zero watermark values + * indicates the use of default thresholds as defined by the + * system. Both low and high watermark values must either be + * zeroes, or both must be non-zeroes with low watermark value + * being less than the high watermark value. + * @field interval_time The interval time between each invocation of + * the driver's input poll callback, in nanoseconds. A zero + * value indicates the use of default poll interval time as + * defined by the system. If a non-zero value is specified and + * is less than the minimum interval time, the latter will be + * chosen by the system. */ struct ifnet_poll_params { - u_int32_t flags; - u_int32_t packets_limit; - u_int32_t packets_lowat; - u_int32_t packets_hiwat; - u_int32_t bytes_lowat; - u_int32_t bytes_hiwat; - u_int64_t interval_time; - u_int64_t reserved[4]; + u_int32_t flags; + u_int32_t packets_limit; + u_int32_t packets_lowat; + u_int32_t packets_hiwat; + u_int32_t bytes_lowat; + u_int32_t bytes_hiwat; + u_int64_t interval_time; + u_int64_t reserved[4]; }; typedef struct ifnet_poll_params ifnet_poll_params_t; /* - @function ifnet_set_poll_params - @discussion Configures opportunistic input polling parameters on an - interface. This call may be issued post ifnet_attach in order - to modify the interface's polling parameters. The driver may - alter the default values chosen by the system to achieve the - optimal performance for a given link rate or driver dynamics. - @param interface The interface to configure opportunistic polling on. - @param poll_params Pointer to the polling parameters. If NULL, it - implies that the system should revert the interface's polling - parameter to their default values. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_set_poll_params + * @discussion Configures opportunistic input polling parameters on an + * interface. This call may be issued post ifnet_attach in order + * to modify the interface's polling parameters. The driver may + * alter the default values chosen by the system to achieve the + * optimal performance for a given link rate or driver dynamics. + * @param interface The interface to configure opportunistic polling on. + * @param poll_params Pointer to the polling parameters. If NULL, it + * implies that the system should revert the interface's polling + * parameter to their default values. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_set_poll_params(ifnet_t interface, ifnet_poll_params_t *poll_params); /* - @function ifnet_poll_params - @discussion Retrieves opportunistic input polling parameters on an - interface. This call may be issued post ifnet_attach in order - to retrieve the interface's polling parameters. - @param interface The interface to configure opportunistic polling on. - @param poll_params Pointer to the polling parameters. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_poll_params + * @discussion Retrieves opportunistic input polling parameters on an + * interface. This call may be issued post ifnet_attach in order + * to retrieve the interface's polling parameters. + * @param interface The interface to configure opportunistic polling on. + * @param poll_params Pointer to the polling parameters. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_poll_params(ifnet_t interface, ifnet_poll_params_t *poll_params); /* - @function ifnet_start - @discussion Trigger the transmission at the driver layer on an - interface which implements the new driver output model. - @param interface The interface to start the transmission on. + * @function ifnet_start + * @discussion Trigger the transmission at the driver layer on an + * interface which implements the new driver output model. + * @param interface The interface to start the transmission on. */ extern void ifnet_start(ifnet_t interface); /* - @function ifnet_flowid - @discussion Returns the interface flow ID value, which can be used - by a (virtual) network interface for participating in the - FLOWSRC_IFNET flow advisory mechanism. The flow ID value - is available after the interface is attached. - @param interface The interface to retrieve the flow ID from. - @param flowid Pointer to the flow ID value. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_flowid + * @discussion Returns the interface flow ID value, which can be used + * by a (virtual) network interface for participating in the + * FLOWSRC_IFNET flow advisory mechanism. The flow ID value + * is available after the interface is attached. + * @param interface The interface to retrieve the flow ID from. + * @param flowid Pointer to the flow ID value. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_flowid(ifnet_t interface, u_int32_t *flowid); /* - @function ifnet_enable_output - @discussion Enables output on a (virtual) interface if it has been - previously disabled via ifnet_disable_output(). This call - is used to override the flow advisory mechanism typically - used between a (virtual) network interface and a real network - interface beneath it. Under normal circumstances, the flow - advisory mechanism will automatically re-enable the (virtual) - interface's output mechanism when the real network interface - is able to transmit more data. Re-enabling output will cause - the (virtual) interface's start callback to be called again. - @param interface The interface to enable the transmission on. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_enable_output + * @discussion Enables output on a (virtual) interface if it has been + * previously disabled via ifnet_disable_output(). This call + * is used to override the flow advisory mechanism typically + * used between a (virtual) network interface and a real network + * interface beneath it. Under normal circumstances, the flow + * advisory mechanism will automatically re-enable the (virtual) + * interface's output mechanism when the real network interface + * is able to transmit more data. Re-enabling output will cause + * the (virtual) interface's start callback to be called again. + * @param interface The interface to enable the transmission on. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_enable_output(ifnet_t interface); /* - @function ifnet_disable_output - @discussion Disables output on a (virtual) interface. Disabling - output will cause the (virtual) interface's start callback - to go idle. This call is typically used by a (virtual) - interface upon receiving flow control feedbacks from the - real network interface beneath it, in order propagate the - flow control condition to the layers above. Under normal - circumstances, the flow advisory mechanism will automatically - re-enable the (virtual) interface's output mechanism when - the real network interface is able to transmit more data, - as long as the (virtual) interface participates in the - FLOWSRC_IFNET flow advisory for the data that it emits. - @param interface The interface to disable the transmission on. - @result May return EINVAL if the parameters are invalid or ENXIO if - the interface doesn't implement the new driver input model. + * @function ifnet_disable_output + * @discussion Disables output on a (virtual) interface. Disabling + * output will cause the (virtual) interface's start callback + * to go idle. This call is typically used by a (virtual) + * interface upon receiving flow control feedbacks from the + * real network interface beneath it, in order propagate the + * flow control condition to the layers above. Under normal + * circumstances, the flow advisory mechanism will automatically + * re-enable the (virtual) interface's output mechanism when + * the real network interface is able to transmit more data, + * as long as the (virtual) interface participates in the + * FLOWSRC_IFNET flow advisory for the data that it emits. + * @param interface The interface to disable the transmission on. + * @result May return EINVAL if the parameters are invalid or ENXIO if + * the interface doesn't implement the new driver input model. */ extern errno_t ifnet_disable_output(ifnet_t interface); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_reference - @discussion Increment the reference count of the ifnet to assure - that it will not go away. The interface must already have at - least one reference. - @param interface The interface to increment the reference count of. - @result May return EINVAL if the interface is not valid. + * @function ifnet_reference + * @discussion Increment the reference count of the ifnet to assure + * that it will not go away. The interface must already have at + * least one reference. + * @param interface The interface to increment the reference count of. + * @result May return EINVAL if the interface is not valid. */ extern errno_t ifnet_reference(ifnet_t interface); /*! - @function ifnet_release - @discussion Release a reference of the ifnet, this may trigger a - free if the reference count reaches 0. - @param interface The interface to decrement the reference count of - and possibly free. - @result May return EINVAL if the interface is not valid. + * @function ifnet_release + * @discussion Release a reference of the ifnet, this may trigger a + * free if the reference count reaches 0. + * @param interface The interface to decrement the reference count of + * and possibly free. + * @result May return EINVAL if the interface is not valid. */ extern errno_t ifnet_release(ifnet_t interface); /*! - @function ifnet_attach - @discussion Attaches an interface to the global interface list. The - interface must be setup properly before calling attach. The - stack will take a reference on the interface and hold it until - ifnet_detach is called. - - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface to attach. - @param ll_addr The link layer address of the interface. This is used - to fill out the first ifaddr in the list of addresses for the - interface. This parameter is not required for interfaces such as - PPP that have no link-layer address. - @result Will return an error if there is anything wrong with the - interface. + * @function ifnet_attach + * @discussion Attaches an interface to the global interface list. The + * interface must be setup properly before calling attach. The + * stack will take a reference on the interface and hold it until + * ifnet_detach is called. + * + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface to attach. + * @param ll_addr The link layer address of the interface. This is used + * to fill out the first ifaddr in the list of addresses for the + * interface. This parameter is not required for interfaces such as + * PPP that have no link-layer address. + * @result Will return an error if there is anything wrong with the + * interface. */ extern errno_t ifnet_attach(ifnet_t interface, const struct sockaddr_dl *ll_addr); /*! - @function ifnet_detach - @discussion Detaches the interface. - - Call this to indicate this interface is no longer valid (i.e. PC - Card was removed). This function will begin the process of - removing knowledge of this interface from the stack. - - The function will return before the interface is detached. The - functions you supplied in to the interface may continue to be - called. When the detach has been completed, your detached - function will be called. Your kext must not unload until the - detached function has been called. The interface will be - properly freed when the reference count reaches zero. - - An interface may not be attached again. You must call - ifnet_allocate to create a new interface to attach. - - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface to detach. - @result 0 on success, otherwise errno error. + * @function ifnet_detach + * @discussion Detaches the interface. + * + * Call this to indicate this interface is no longer valid (i.e. PC + * Card was removed). This function will begin the process of + * removing knowledge of this interface from the stack. + * + * The function will return before the interface is detached. The + * functions you supplied in to the interface may continue to be + * called. When the detach has been completed, your detached + * function will be called. Your kext must not unload until the + * detached function has been called. The interface will be + * properly freed when the reference count reaches zero. + * + * An interface may not be attached again. You must call + * ifnet_allocate to create a new interface to attach. + * + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface to detach. + * @result 0 on success, otherwise errno error. */ extern errno_t ifnet_detach(ifnet_t interface); /*! - @function ifnet_interface_family_find - @discussion Look up the interface family identifier for a string. - If there is no interface family identifier assigned for this string - a new interface family identifier is created and assigned. - It is recommended to use the bundle id of the KEXT as the string - to avoid collisions with other KEXTs. - The lookup operation is not optimized so a module should call this - function once during startup and cache the interface family identifier. - The interface family identifier for a string will not be re-assigned until - the system reboots. - @param module_string A unique string identifying your interface family - @param family_id Upon return, a unique interface family identifier for use with - ifnet_* functions. This identifier is valid until the system - is rebooted. - @result 0 on success, otherwise errno error. -*/ + * @function ifnet_interface_family_find + * @discussion Look up the interface family identifier for a string. + * If there is no interface family identifier assigned for this string + * a new interface family identifier is created and assigned. + * It is recommended to use the bundle id of the KEXT as the string + * to avoid collisions with other KEXTs. + * The lookup operation is not optimized so a module should call this + * function once during startup and cache the interface family identifier. + * The interface family identifier for a string will not be re-assigned until + * the system reboots. + * @param module_string A unique string identifying your interface family + * @param family_id Upon return, a unique interface family identifier for use with + * ifnet_* functions. This identifier is valid until the system + * is rebooted. + * @result 0 on success, otherwise errno error. + */ extern errno_t ifnet_interface_family_find(const char *module_string, ifnet_family_t *family_id); /* @@ -1740,215 +1740,215 @@ extern errno_t ifnet_interface_family_find(const char *module_string, ifnet_fami */ /*! - @function ifnet_softc - @discussion Returns the driver's private storage on the interface. - @param interface Interface to retrieve the storage from. - @result Driver's private storage. + * @function ifnet_softc + * @discussion Returns the driver's private storage on the interface. + * @param interface Interface to retrieve the storage from. + * @result Driver's private storage. */ extern void *ifnet_softc(ifnet_t interface); /*! - @function ifnet_name - @discussion Returns a pointer to the name of the interface. - @param interface Interface to retrieve the name from. - @result Pointer to the name. + * @function ifnet_name + * @discussion Returns a pointer to the name of the interface. + * @param interface Interface to retrieve the name from. + * @result Pointer to the name. */ extern const char *ifnet_name(ifnet_t interface); /*! - @function ifnet_family - @discussion Returns the family of the interface. - @param interface Interface to retrieve the family from. - @result Interface family type. + * @function ifnet_family + * @discussion Returns the family of the interface. + * @param interface Interface to retrieve the family from. + * @result Interface family type. */ extern ifnet_family_t ifnet_family(ifnet_t interface); #ifdef KERNEL_PRIVATE /* - @function ifnet_subfamily - @discussion Returns the sub-family of the interface. - @param interface Interface to retrieve the sub-family from. - @result Interface sub-family type. + * @function ifnet_subfamily + * @discussion Returns the sub-family of the interface. + * @param interface Interface to retrieve the sub-family from. + * @result Interface sub-family type. */ extern ifnet_subfamily_t ifnet_subfamily(ifnet_t interface); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_unit - @discussion Returns the unit number of the interface. - @param interface Interface to retrieve the unit number from. - @result Unit number. + * @function ifnet_unit + * @discussion Returns the unit number of the interface. + * @param interface Interface to retrieve the unit number from. + * @result Unit number. */ extern u_int32_t ifnet_unit(ifnet_t interface); /*! - @function ifnet_index - @discussion Returns the index of the interface. This index value - will match the index you would find in a sockaddr_dl or using - if_nametoindex or if_indextoname in user space. The value of the - interface index is undefined for an interface that is not - currently attached. - @param interface Interface to retrieve the index of. - @result Index. + * @function ifnet_index + * @discussion Returns the index of the interface. This index value + * will match the index you would find in a sockaddr_dl or using + * if_nametoindex or if_indextoname in user space. The value of the + * interface index is undefined for an interface that is not + * currently attached. + * @param interface Interface to retrieve the index of. + * @result Index. */ extern u_int32_t ifnet_index(ifnet_t interface); /*! - @function ifnet_set_flags - @discussion Sets the interface flags to match new_flags. - @discussion Sets the interface flags to new_flags. This function - lets you specify which flags you want to change using the mask. - The kernel will effectively take the lock, then set the - interface's flags to (if_flags & ~mask) | (new_flags & mask). - @param interface Interface to set the flags on. - @param new_flags The new set of flags that should be set. These - flags are defined in net/if.h - @result 0 on success otherwise the errno error. + * @function ifnet_set_flags + * @discussion Sets the interface flags to match new_flags. + * @discussion Sets the interface flags to new_flags. This function + * lets you specify which flags you want to change using the mask. + * The kernel will effectively take the lock, then set the + * interface's flags to (if_flags & ~mask) | (new_flags & mask). + * @param interface Interface to set the flags on. + * @param new_flags The new set of flags that should be set. These + * flags are defined in net/if.h + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask); /*! - @function ifnet_flags - @discussion Returns the interface flags that are set. - @param interface Interface to retrieve the flags from. - @result Flags. These flags are defined in net/if.h + * @function ifnet_flags + * @discussion Returns the interface flags that are set. + * @param interface Interface to retrieve the flags from. + * @result Flags. These flags are defined in net/if.h */ extern u_int16_t ifnet_flags(ifnet_t interface); #ifdef KERNEL_PRIVATE /* - @function ifnet_set_eflags - @discussion Sets the extended interface flags to new_flags. This - function lets you specify which flags you want to change using - the mask. The kernel will effectively take the lock, then set - the interface's extended flags to (if_eflags & ~mask) | - (new_flags & mask). - @param interface The interface. - @param new_flags The new set of flags that should be set. These - flags are defined in net/if.h - @param mask The mask of flags to be modified. - @result 0 on success otherwise the errno error. + * @function ifnet_set_eflags + * @discussion Sets the extended interface flags to new_flags. This + * function lets you specify which flags you want to change using + * the mask. The kernel will effectively take the lock, then set + * the interface's extended flags to (if_eflags & ~mask) | + * (new_flags & mask). + * @param interface The interface. + * @param new_flags The new set of flags that should be set. These + * flags are defined in net/if.h + * @param mask The mask of flags to be modified. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask); /* - @function ifnet_eflags - @discussion Returns the extended interface flags that are set. - @param interface Interface to retrieve the flags from. - @result Extended flags. These flags are defined in net/if.h + * @function ifnet_eflags + * @discussion Returns the extended interface flags that are set. + * @param interface Interface to retrieve the flags from. + * @result Extended flags. These flags are defined in net/if.h */ extern u_int32_t ifnet_eflags(ifnet_t interface); /* - @function ifnet_set_idle_flags - @discussion Sets the if_idle_flags to new_flags. This function - lets you specify which flags you want to change using the - mask. The kernel will effectively take the lock, then set - the interface's idle flags to: - (if_idle_flags & ~mask) | (new_flags & mask). - Setting the flags to any non-zero value will cause the - networking stack to aggressively purge expired objects, - such as route entries, etc. - @param interface The interface. - @param new_flags The new set of flags that should be set. These - flags are defined in net/if.h - @param mask The mask of flags to be modified. - @result 0 on success otherwise the errno error. ENOTSUP is returned - when this call is made on non-supporting platforms. -*/ + * @function ifnet_set_idle_flags + * @discussion Sets the if_idle_flags to new_flags. This function + * lets you specify which flags you want to change using the + * mask. The kernel will effectively take the lock, then set + * the interface's idle flags to: + * (if_idle_flags & ~mask) | (new_flags & mask). + * Setting the flags to any non-zero value will cause the + * networking stack to aggressively purge expired objects, + * such as route entries, etc. + * @param interface The interface. + * @param new_flags The new set of flags that should be set. These + * flags are defined in net/if.h + * @param mask The mask of flags to be modified. + * @result 0 on success otherwise the errno error. ENOTSUP is returned + * when this call is made on non-supporting platforms. + */ extern errno_t ifnet_set_idle_flags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask); /* - @function ifnet_idle_flags - @discussion Returns the value of if_idle_flags. - @param interface Interface to retrieve the flags from. - @result if_idle_flags. These flags are defined in net/if.h -*/ + * @function ifnet_idle_flags + * @discussion Returns the value of if_idle_flags. + * @param interface Interface to retrieve the flags from. + * @result if_idle_flags. These flags are defined in net/if.h + */ extern u_int32_t ifnet_idle_flags(ifnet_t interface); /* - @function ifnet_set_link_quality - @discussion Sets the Link Quality Metric for the ifnet. - @param interface Interface for which the Link Quality Metric should - be associated to. - @param quality IFNET_LQM value as defined in net/if.h. - @result 0 on success otherwise the errno error. EINVAL if quality - is not a valid value. ENXIO if the interface is not attached. -*/ + * @function ifnet_set_link_quality + * @discussion Sets the Link Quality Metric for the ifnet. + * @param interface Interface for which the Link Quality Metric should + * be associated to. + * @param quality IFNET_LQM value as defined in net/if.h. + * @result 0 on success otherwise the errno error. EINVAL if quality + * is not a valid value. ENXIO if the interface is not attached. + */ extern errno_t ifnet_set_link_quality(ifnet_t interface, int quality); /* - @function ifnet_link_quality - @discussion Returns the Link Quality Metric for the ifnet. - @param interface Interface to retrieve the value from. - @result IFNET_LQM as defined in net/if.h -*/ + * @function ifnet_link_quality + * @discussion Returns the Link Quality Metric for the ifnet. + * @param interface Interface to retrieve the value from. + * @result IFNET_LQM as defined in net/if.h + */ extern int ifnet_link_quality(ifnet_t interface); /* - @function ifnet_set_interface_state - @discussion Sets the interface state for the ifnet. - @param interface Interface for which the interface state should - be set to. - @param if_interface_state as defined in net/if_var.h. - @result 0 on success otherwise the errno error. EINVAL if quality - is not a valid value. ENXIO if the interface is not attached. -*/ + * @function ifnet_set_interface_state + * @discussion Sets the interface state for the ifnet. + * @param interface Interface for which the interface state should + * be set to. + * @param if_interface_state as defined in net/if_var.h. + * @result 0 on success otherwise the errno error. EINVAL if quality + * is not a valid value. ENXIO if the interface is not attached. + */ extern errno_t ifnet_set_interface_state(ifnet_t interface, struct if_interface_state *if_interface_state); /* - @function ifnet_get_interface_state - @discussion Returns the interface state for the ifnet. - @param if_interface_state to ret. - @result 0 on success, errno otherwise -*/ + * @function ifnet_get_interface_state + * @discussion Returns the interface state for the ifnet. + * @param if_interface_state to ret. + * @result 0 on success, errno otherwise + */ extern int ifnet_get_interface_state(ifnet_t interface, struct if_interface_state *if_interface_state); /* - @struct ifnet_llreach_info - @discussion This structure is used to describe the link-layer - reachability information of an on-link node. - @field iflri_refcnt The number of network-layer objects referring - to this link-layer reachability record. - @field iflri_probes The total number of outstanding probes. - @field iflri_snd_expire The send expiration time. This is calculated - based on the last time the system transmitted a packet to the - node. A zero value indicates that a packet has not been sent - to the node. A non-zero value indicates the time before the - record is determined to be invalid. When the record is no - longer valid, the system will send probe(s) to resolve the - node again. This value is relative to the current time - specified in iflri_curtime. - @field iflri_rcv_expire The receive expiriation time. This is - calculated based on the last time the system received a packet - from the node. A zero value indicates that a packet has not - been received from the node. A non-zero value indicates the - time before the record is determined to be invalid. When the - record is no longer valid, the system will send probe(s) to - resolve the node again. This value is relative to the current - time specified in iflri_curtime. - @field iflri_curtime The current time when this record was retrieved. - @field iflri_netproto The protocol number of the network-layer object. - @field iflri_addr The link-layer address of the node. - @field iflri_rssi The received signal strength indication (RSSI) of the - node in dBm. The special value IFNET_RSSI_UNKNOWN is used when - the RSSI is either unknown or inapplicable for the interface. - @field iflri_lqm The link quality metric (LQM) to the node. The - special value IFNET_LQM_UNKNOWN is used when the LQM is not - currently known. The special value IFNET_LQM_OFF is used when - the link quality metric is inapplicable to nodes at this - attached to the network at this interface. - @field iflri_npm The node proximity metric (NPM) to the node. The - special value IFNET_NPM_UNKNOWN is used when the NPM is not - currently known. - */ -#define IFNET_LLREACHINFO_ADDRLEN 64 /* max ll addr len */ + * @struct ifnet_llreach_info + * @discussion This structure is used to describe the link-layer + * reachability information of an on-link node. + * @field iflri_refcnt The number of network-layer objects referring + * to this link-layer reachability record. + * @field iflri_probes The total number of outstanding probes. + * @field iflri_snd_expire The send expiration time. This is calculated + * based on the last time the system transmitted a packet to the + * node. A zero value indicates that a packet has not been sent + * to the node. A non-zero value indicates the time before the + * record is determined to be invalid. When the record is no + * longer valid, the system will send probe(s) to resolve the + * node again. This value is relative to the current time + * specified in iflri_curtime. + * @field iflri_rcv_expire The receive expiriation time. This is + * calculated based on the last time the system received a packet + * from the node. A zero value indicates that a packet has not + * been received from the node. A non-zero value indicates the + * time before the record is determined to be invalid. When the + * record is no longer valid, the system will send probe(s) to + * resolve the node again. This value is relative to the current + * time specified in iflri_curtime. + * @field iflri_curtime The current time when this record was retrieved. + * @field iflri_netproto The protocol number of the network-layer object. + * @field iflri_addr The link-layer address of the node. + * @field iflri_rssi The received signal strength indication (RSSI) of the + * node in dBm. The special value IFNET_RSSI_UNKNOWN is used when + * the RSSI is either unknown or inapplicable for the interface. + * @field iflri_lqm The link quality metric (LQM) to the node. The + * special value IFNET_LQM_UNKNOWN is used when the LQM is not + * currently known. The special value IFNET_LQM_OFF is used when + * the link quality metric is inapplicable to nodes at this + * attached to the network at this interface. + * @field iflri_npm The node proximity metric (NPM) to the node. The + * special value IFNET_NPM_UNKNOWN is used when the NPM is not + * currently known. + */ +#define IFNET_LLREACHINFO_ADDRLEN 64 /* max ll addr len */ struct ifnet_llreach_info { u_int32_t iflri_refcnt; @@ -1964,484 +1964,484 @@ struct ifnet_llreach_info { }; /* - @function ifnet_inet_defrouter_llreachinfo - @discussion Retrieve link-layer reachability information of the - default IPv4 router specific to the interface. - @param interface The interface associated with the default IPv4 router. - @param pinfo Pointer to the ifnet_llreach_info structure where the - information will be returned to, upon success. - @result 0 upon success, otherwise errno error. + * @function ifnet_inet_defrouter_llreachinfo + * @discussion Retrieve link-layer reachability information of the + * default IPv4 router specific to the interface. + * @param interface The interface associated with the default IPv4 router. + * @param pinfo Pointer to the ifnet_llreach_info structure where the + * information will be returned to, upon success. + * @result 0 upon success, otherwise errno error. */ extern errno_t ifnet_inet_defrouter_llreachinfo(ifnet_t interface, struct ifnet_llreach_info *pinfo); /* - @function ifnet_inet6_defrouter_llreachinfo - @discussion Retrieve link-layer reachability information of the - default IPv6 router specific to the interface. - @param interface The interface associated with the default IPv6 router. - @param pinfo Pointer to the ifnet_llreach_info structure where the - information will be returned to, upon success. - @result 0 upon success, otherwise errno error. + * @function ifnet_inet6_defrouter_llreachinfo + * @discussion Retrieve link-layer reachability information of the + * default IPv6 router specific to the interface. + * @param interface The interface associated with the default IPv6 router. + * @param pinfo Pointer to the ifnet_llreach_info structure where the + * information will be returned to, upon success. + * @result 0 upon success, otherwise errno error. */ extern errno_t ifnet_inet6_defrouter_llreachinfo(ifnet_t interface, struct ifnet_llreach_info *pinfo); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_set_capabilities_supported - @discussion Specify the capabilities supported by the interface. - @discussion This function lets you specify which capabilities are supported - by the interface. Typically this function is called by the driver when - the interface gets attached to the system. - The mask allows to control which capability to set or unset. - The kernel will effectively take the lock, then set the - interface's flags to (if_capabilities & ~mask) | (new_caps & mask). - - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface Interface to set the capabilities on. - @param new_caps The value of the capabilities that should be set or unset. These - flags are defined in net/if.h - @param mask Which capabilities that should be affected. These - flags are defined in net/if.h - @result 0 on success otherwise the errno error. + * @function ifnet_set_capabilities_supported + * @discussion Specify the capabilities supported by the interface. + * @discussion This function lets you specify which capabilities are supported + * by the interface. Typically this function is called by the driver when + * the interface gets attached to the system. + * The mask allows to control which capability to set or unset. + * The kernel will effectively take the lock, then set the + * interface's flags to (if_capabilities & ~mask) | (new_caps & mask). + * + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface Interface to set the capabilities on. + * @param new_caps The value of the capabilities that should be set or unset. These + * flags are defined in net/if.h + * @param mask Which capabilities that should be affected. These + * flags are defined in net/if.h + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_capabilities_supported(ifnet_t interface, u_int32_t new_caps, u_int32_t mask); /*! - @function ifnet_capabilities_supported - @discussion Retrieve the interface capabilities supported by the interface. - @param interface Interface to retrieve the capabilities from. - @result Flags. Capabilities flags are defined in net/if.h + * @function ifnet_capabilities_supported + * @discussion Retrieve the interface capabilities supported by the interface. + * @param interface Interface to retrieve the capabilities from. + * @result Flags. Capabilities flags are defined in net/if.h */ extern u_int32_t ifnet_capabilities_supported(ifnet_t interface); /*! - @function ifnet_set_capabilities_enabled - @discussion Enable and/or disable the interface capabilities to match new_caps. - @discussion Sets the interface capabilities to new_caps. This function - lets you specify which capabilities you want to change using the mask. - The kernel will effectively take the lock, then set the - interface's flags to (if_capenable & ~mask) | (new_caps & mask). - - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - - Typically this function is called by the driver when the interface is - created to specify which of the supported capabilities are enabled by - default. This function is also meant to be called when the driver handles - the interface ioctl SIOCSIFCAP. - - The driver should call ifnet_set_offlad() to indicate the corresponding - hardware offload bits that will be used by the networking stack. - - It is an error to enable a capability that is not marked as - supported by the interface. - @param interface Interface to set the capabilities on. - @param new_caps The value of the capabilities that should be set or unset. These - flags are defined in net/if.h - @param mask Which capabilities that should be affected. These - flags are defined in net/if.h - @result 0 on success otherwise the errno error. + * @function ifnet_set_capabilities_enabled + * @discussion Enable and/or disable the interface capabilities to match new_caps. + * @discussion Sets the interface capabilities to new_caps. This function + * lets you specify which capabilities you want to change using the mask. + * The kernel will effectively take the lock, then set the + * interface's flags to (if_capenable & ~mask) | (new_caps & mask). + * + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * + * Typically this function is called by the driver when the interface is + * created to specify which of the supported capabilities are enabled by + * default. This function is also meant to be called when the driver handles + * the interface ioctl SIOCSIFCAP. + * + * The driver should call ifnet_set_offlad() to indicate the corresponding + * hardware offload bits that will be used by the networking stack. + * + * It is an error to enable a capability that is not marked as + * supported by the interface. + * @param interface Interface to set the capabilities on. + * @param new_caps The value of the capabilities that should be set or unset. These + * flags are defined in net/if.h + * @param mask Which capabilities that should be affected. These + * flags are defined in net/if.h + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_capabilities_enabled(ifnet_t interface, u_int32_t new_caps, u_int32_t mask); /*! - @function ifnet_capabilities_enabled - @discussion Retrieve the interface capabilities enabled on the interface. - @param interface Interface to retrieve the capabilities from. - @result Flags. Capabilities flags are defined in net/if.h + * @function ifnet_capabilities_enabled + * @discussion Retrieve the interface capabilities enabled on the interface. + * @param interface Interface to retrieve the capabilities from. + * @result Flags. Capabilities flags are defined in net/if.h */ extern u_int32_t ifnet_capabilities_enabled(ifnet_t interface); /*! - @function ifnet_set_offload - @discussion Sets a bitfield to indicate special hardware offload - support provided by the interface such as hardware checksums and - VLAN. This replaces the if_hwassist flags field. Any flags - unrecognized by the stack will not be set. - - Note the system will automatically set the interface capabilities - that correspond to the offload flags modified -- i.e. the driver - does not have to call ifnet_set_capabilities_enabled() and - ifnet_set_capabilities_supported(). - @param interface The interface. - @param offload The new set of flags indicating which offload options - the device supports. - @result 0 on success otherwise the errno error. + * @function ifnet_set_offload + * @discussion Sets a bitfield to indicate special hardware offload + * support provided by the interface such as hardware checksums and + * VLAN. This replaces the if_hwassist flags field. Any flags + * unrecognized by the stack will not be set. + * + * Note the system will automatically set the interface capabilities + * that correspond to the offload flags modified -- i.e. the driver + * does not have to call ifnet_set_capabilities_enabled() and + * ifnet_set_capabilities_supported(). + * @param interface The interface. + * @param offload The new set of flags indicating which offload options + * the device supports. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload); /*! - @function ifnet_offload - @discussion Returns flags indicating which operations can be - offloaded to the interface. - @param interface Interface to retrieve the offload from. - @result Abilities flags, see ifnet_offload_t. + * @function ifnet_offload + * @discussion Returns flags indicating which operations can be + * offloaded to the interface. + * @param interface Interface to retrieve the offload from. + * @result Abilities flags, see ifnet_offload_t. */ extern ifnet_offload_t ifnet_offload(ifnet_t interface); /*! - @function ifnet_set_tso_mtu - @discussion Sets maximum TCP Segmentation Offload segment size for - the interface - @param interface The interface. - @param family The family for which the offload MTU is provided for - (AF_INET or AF_INET6) - @param mtuLen Maximum segment size supported by the interface - @result 0 on success otherwise the errno error. -*/ + * @function ifnet_set_tso_mtu + * @discussion Sets maximum TCP Segmentation Offload segment size for + * the interface + * @param interface The interface. + * @param family The family for which the offload MTU is provided for + * (AF_INET or AF_INET6) + * @param mtuLen Maximum segment size supported by the interface + * @result 0 on success otherwise the errno error. + */ extern errno_t ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen); /*! - @function ifnet_get_tso_mtu - @discussion Returns maximum TCP Segmentation Offload segment size for - the interface - @param interface The interface. - @param family The family for which the offload MTU is provided for - (AF_INET or AF_INET6) - @param mtuLen Value of the maximum MTU supported for the interface - and family provided. - @result 0 on success otherwise the errno error. + * @function ifnet_get_tso_mtu + * @discussion Returns maximum TCP Segmentation Offload segment size for + * the interface + * @param interface The interface. + * @param family The family for which the offload MTU is provided for + * (AF_INET or AF_INET6) + * @param mtuLen Value of the maximum MTU supported for the interface + * and family provided. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen); /*! - @enum Interface wake properties - @abstract Constants defining Interface wake properties. - @constant IFNET_WAKE_ON_MAGIC_PACKET Wake on Magic Packet. -*/ + * @enum Interface wake properties + * @abstract Constants defining Interface wake properties. + * @constant IFNET_WAKE_ON_MAGIC_PACKET Wake on Magic Packet. + */ enum { IFNET_WAKE_ON_MAGIC_PACKET = 0x01 }; /*! - @function ifnet_set_wake_flags - @discussion Sets the wake properties of the underlying hardware. These are - typically set by the driver. - @param interface The interface. - @param properties Properties to set or unset. - @param mask Mask of the properties to set of unset. - @result 0 on success otherwise the errno error. -*/ + * @function ifnet_set_wake_flags + * @discussion Sets the wake properties of the underlying hardware. These are + * typically set by the driver. + * @param interface The interface. + * @param properties Properties to set or unset. + * @param mask Mask of the properties to set of unset. + * @result 0 on success otherwise the errno error. + */ extern errno_t ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask); /*! - @function ifnet_get_wake_flags - @discussion Returns the wake properties set on the interface. - @param interface The interface. - @result The wake properties -*/ + * @function ifnet_get_wake_flags + * @discussion Returns the wake properties set on the interface. + * @param interface The interface. + * @result The wake properties + */ extern u_int32_t ifnet_get_wake_flags(ifnet_t interface); /*! - @function ifnet_set_link_mib_data - @discussion Sets the mib link data. The ifnet_t will store the - pointer you supply and copy mibLen bytes from the pointer - whenever the sysctl for getting interface specific MIB data is - used. Since the ifnet_t stores a pointer to your data instead of - a copy, you may update the data at the address at any time. - - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface Interface to set the unit number of. - @param mibData A pointer to the data. - @param mibLen Length of data pointed to. - @result 0 on success otherwise the errno error. -*/ + * @function ifnet_set_link_mib_data + * @discussion Sets the mib link data. The ifnet_t will store the + * pointer you supply and copy mibLen bytes from the pointer + * whenever the sysctl for getting interface specific MIB data is + * used. Since the ifnet_t stores a pointer to your data instead of + * a copy, you may update the data at the address at any time. + * + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface Interface to set the unit number of. + * @param mibData A pointer to the data. + * @param mibLen Length of data pointed to. + * @result 0 on success otherwise the errno error. + */ extern errno_t ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen); /*! - @function ifnet_get_link_mib_data - @discussion Copies the link MIB data in to mibData, up to mibLen - bytes. Returns error if the buffer is too small to hold all of - the MIB data. - @param interface The interface. - @param mibData A pointer to space for the mibData to be copied in - to. - @param mibLen When calling, this should be the size of the buffer - passed in mibData. Upon return, this will be the size of data - copied in to mibData. - @result Returns an error if the buffer size is too small or there is - no data. + * @function ifnet_get_link_mib_data + * @discussion Copies the link MIB data in to mibData, up to mibLen + * bytes. Returns error if the buffer is too small to hold all of + * the MIB data. + * @param interface The interface. + * @param mibData A pointer to space for the mibData to be copied in + * to. + * @param mibLen When calling, this should be the size of the buffer + * passed in mibData. Upon return, this will be the size of data + * copied in to mibData. + * @result Returns an error if the buffer size is too small or there is + * no data. */ extern errno_t ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen); /*! - @function ifnet_get_link_mib_data_length - @discussion Retrieve the size of the mib data. - @param interface The interface. - @result Returns the number of bytes of mib data associated with the - interface. + * @function ifnet_get_link_mib_data_length + * @discussion Retrieve the size of the mib data. + * @param interface The interface. + * @result Returns the number of bytes of mib data associated with the + * interface. */ extern u_int32_t ifnet_get_link_mib_data_length(ifnet_t interface); /*! - @function ifnet_attach_protocol - @discussion Attaches a protocol to an interface. - @param interface The interface. - @param protocol_family The protocol family being attached - (PF_INET/PF_INET6/etc...). - @param proto_details Details of the protocol being attached. - @result 0 on success otherwise the errno error. + * @function ifnet_attach_protocol + * @discussion Attaches a protocol to an interface. + * @param interface The interface. + * @param protocol_family The protocol family being attached + * (PF_INET/PF_INET6/etc...). + * @param proto_details Details of the protocol being attached. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_attach_protocol(ifnet_t interface, protocol_family_t protocol_family, const struct ifnet_attach_proto_param *proto_details); /*! - @function ifnet_attach_protocol_v2 - @discussion Attaches a protocol to an interface using the newer - version 2 style interface. So far the only difference is support - for packet chains which improve performance. - @param interface The interface. - @param protocol_family The protocol family being attached - (PF_INET/PF_INET6/etc...). - @param proto_details Details of the protocol being attached. - @result 0 on success otherwise the errno error. + * @function ifnet_attach_protocol_v2 + * @discussion Attaches a protocol to an interface using the newer + * version 2 style interface. So far the only difference is support + * for packet chains which improve performance. + * @param interface The interface. + * @param protocol_family The protocol family being attached + * (PF_INET/PF_INET6/etc...). + * @param proto_details Details of the protocol being attached. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_attach_protocol_v2(ifnet_t interface, protocol_family_t protocol_family, const struct ifnet_attach_proto_param_v2 *proto_details); /*! - @function ifnet_detach_protocol - @discussion Detaches a protocol from an interface. - @param interface The interface. - @param protocol_family The protocol family of the protocol to - detach. - @result 0 on success otherwise the errno error. + * @function ifnet_detach_protocol + * @discussion Detaches a protocol from an interface. + * @param interface The interface. + * @param protocol_family The protocol family of the protocol to + * detach. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_detach_protocol(ifnet_t interface, protocol_family_t protocol_family); /*! - @function ifnet_output - @discussion Handles an outbound packet on the interface by calling - any filters, a protocol preoutput function, the interface framer - function, and finally the interface's output function. The - protocol_family will be used to apply protocol filters and - determine which preoutput function to call. The route and dest - parameters will be passed to the preoutput function defined for - the attachment of the specified protocol to the specified - interface. ifnet_output will always free the mbuf chain. - @param interface The interface. - @param protocol_family The family of the protocol generating this - packet (i.e. AF_INET). - @param packet The packet to be transmitted. - @param route A pointer to a routing structure for this packet. The - preoutput function determines whether this value may be NULL or - not. - @param dest The destination address of protocol_family type. This - will be passed to the preoutput function. If the preoutput - function does not require this value, you may pass NULL. - @result 0 on success otherwise the errno error. + * @function ifnet_output + * @discussion Handles an outbound packet on the interface by calling + * any filters, a protocol preoutput function, the interface framer + * function, and finally the interface's output function. The + * protocol_family will be used to apply protocol filters and + * determine which preoutput function to call. The route and dest + * parameters will be passed to the preoutput function defined for + * the attachment of the specified protocol to the specified + * interface. ifnet_output will always free the mbuf chain. + * @param interface The interface. + * @param protocol_family The family of the protocol generating this + * packet (i.e. AF_INET). + * @param packet The packet to be transmitted. + * @param route A pointer to a routing structure for this packet. The + * preoutput function determines whether this value may be NULL or + * not. + * @param dest The destination address of protocol_family type. This + * will be passed to the preoutput function. If the preoutput + * function does not require this value, you may pass NULL. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_output(ifnet_t interface, protocol_family_t protocol_family, mbuf_t packet, void *route, const struct sockaddr *dest); /*! - @function ifnet_output_raw - @discussion Handles and outbond raw packet on the interface by - calling any filters followed by the interface's output function. - protocol_family may be zero. If the packet is from a specific - protocol the protocol_family will be used to apply protocol - filters. All interface filters will be applied to the outgoing - packet. Processing, such as calling the protocol preoutput and - interface framer functions will be bypassed. The packet will - pass through the filters and be sent on the interface as is. - ifnet_output_raw will always free the packet chain. - @param interface The interface. - @param protocol_family The family of the protocol generating this - packet (i.e. AF_INET). - @param packet The fully formed packet to be transmitted. - @result 0 on success otherwise the errno error. + * @function ifnet_output_raw + * @discussion Handles and outbond raw packet on the interface by + * calling any filters followed by the interface's output function. + * protocol_family may be zero. If the packet is from a specific + * protocol the protocol_family will be used to apply protocol + * filters. All interface filters will be applied to the outgoing + * packet. Processing, such as calling the protocol preoutput and + * interface framer functions will be bypassed. The packet will + * pass through the filters and be sent on the interface as is. + * ifnet_output_raw will always free the packet chain. + * @param interface The interface. + * @param protocol_family The family of the protocol generating this + * packet (i.e. AF_INET). + * @param packet The fully formed packet to be transmitted. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t packet); /*! - @function ifnet_input - @discussion Inputs packets from the interface. The interface's demux - will be called to determine the protocol. Once the protocol is - determined, the interface filters and protocol filters will be - called. From there, the packet will be passed to the registered - protocol. If there is an error, the mbuf chain will be freed. - @param interface The interface. - @param first_packet The first packet in a chain of packets. - @param stats Counts to be integrated in to the stats. The interface - statistics will be incremented by the amounts specified in - stats. This parameter may be NULL. - @result 0 on success otherwise the errno error. + * @function ifnet_input + * @discussion Inputs packets from the interface. The interface's demux + * will be called to determine the protocol. Once the protocol is + * determined, the interface filters and protocol filters will be + * called. From there, the packet will be passed to the registered + * protocol. If there is an error, the mbuf chain will be freed. + * @param interface The interface. + * @param first_packet The first packet in a chain of packets. + * @param stats Counts to be integrated in to the stats. The interface + * statistics will be incremented by the amounts specified in + * stats. This parameter may be NULL. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_input(ifnet_t interface, mbuf_t first_packet, const struct ifnet_stat_increment_param *stats); #ifdef KERNEL_PRIVATE /* - @function ifnet_input_extended - @discussion Inputs packets from the interface. The interface's demux - will be called to determine the protocol. Once the protocol is - determined, the interface filters and protocol filters will be - called. From there, the packet will be passed to the registered - protocol. If there is an error, the mbuf chain will be freed. - @param interface The interface. - @param first_packet The first packet in a chain of packets. - @param last_packet The last packet in a chain of packets. This may be - set to NULL if the driver does not have the information. - @param stats Counts to be integrated in to the stats. The interface - statistics will be incremented by the amounts specified in - stats. Unlike ifnet_input(), this parameter is required by - this extended variant. - @result 0 on success otherwise the errno error. + * @function ifnet_input_extended + * @discussion Inputs packets from the interface. The interface's demux + * will be called to determine the protocol. Once the protocol is + * determined, the interface filters and protocol filters will be + * called. From there, the packet will be passed to the registered + * protocol. If there is an error, the mbuf chain will be freed. + * @param interface The interface. + * @param first_packet The first packet in a chain of packets. + * @param last_packet The last packet in a chain of packets. This may be + * set to NULL if the driver does not have the information. + * @param stats Counts to be integrated in to the stats. The interface + * statistics will be incremented by the amounts specified in + * stats. Unlike ifnet_input(), this parameter is required by + * this extended variant. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_input_extended(ifnet_t interface, mbuf_t first_packet, mbuf_t last_packet, const struct ifnet_stat_increment_param *stats); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_ioctl - @discussion Calls the interface's ioctl function with the parameters - passed. - - All undefined ioctls are reserved for future use by Apple. If - you need to communicate with your kext using an ioctl, please - use SIOCSIFKPI and SIOCGIFKPI. - @param interface The interface. - @param protocol The protocol family of the protocol to send the - ioctl to (may be zero). Some ioctls apply to a protocol while - other ioctls apply to just an interface. - @param ioctl_code The ioctl to perform. - @param ioctl_arg Any parameters to the ioctl. - @result 0 on success otherwise the errno error. + * @function ifnet_ioctl + * @discussion Calls the interface's ioctl function with the parameters + * passed. + * + * All undefined ioctls are reserved for future use by Apple. If + * you need to communicate with your kext using an ioctl, please + * use SIOCSIFKPI and SIOCGIFKPI. + * @param interface The interface. + * @param protocol The protocol family of the protocol to send the + * ioctl to (may be zero). Some ioctls apply to a protocol while + * other ioctls apply to just an interface. + * @param ioctl_code The ioctl to perform. + * @param ioctl_arg Any parameters to the ioctl. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_ioctl(ifnet_t interface, protocol_family_t protocol, unsigned long ioctl_code, void *ioctl_arg); /*! - @function ifnet_event - @discussion Calls the interface's event function. - @param interface The interface. - @param event_ptr Pointer to an kern_event structure describing the - event. - @result 0 on success otherwise the errno error. + * @function ifnet_event + * @discussion Calls the interface's event function. + * @param interface The interface. + * @param event_ptr Pointer to an kern_event structure describing the + * event. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_event(ifnet_t interface, struct kern_event_msg *event_ptr); /*! - @function ifnet_set_mtu - @discussion Sets the value of the MTU in the interface structure. - Calling this function will not notify the driver that the MTU - should be changed. Use the appropriate ioctl. - - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface. - @param mtu The new MTU. - @result 0 on success otherwise the errno error. + * @function ifnet_set_mtu + * @discussion Sets the value of the MTU in the interface structure. + * Calling this function will not notify the driver that the MTU + * should be changed. Use the appropriate ioctl. + * + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface. + * @param mtu The new MTU. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_mtu(ifnet_t interface, u_int32_t mtu); /*! - @function ifnet_mtu - @param interface The interface. - @result The MTU. + * @function ifnet_mtu + * @param interface The interface. + * @result The MTU. */ extern u_int32_t ifnet_mtu(ifnet_t interface); /*! - @function ifnet_type - @param interface The interface. - @result The type. See net/if_types.h. + * @function ifnet_type + * @param interface The interface. + * @result The type. See net/if_types.h. */ extern u_int8_t ifnet_type(ifnet_t interface); /*! - @function ifnet_set_addrlen - @discussion - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface. - @param addrlen The new address length. - @result 0 on success otherwise the errno error. + * @function ifnet_set_addrlen + * @discussion + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface. + * @param addrlen The new address length. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_addrlen(ifnet_t interface, u_int8_t addrlen); /*! - @function ifnet_addrlen - @param interface The interface. - @result The address length. + * @function ifnet_addrlen + * @param interface The interface. + * @result The address length. */ extern u_int8_t ifnet_addrlen(ifnet_t interface); /*! - @function ifnet_set_hdrlen - @discussion - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface. - @param hdrlen The new header length. - @result 0 on success otherwise the errno error. + * @function ifnet_set_hdrlen + * @discussion + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface. + * @param hdrlen The new header length. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_hdrlen(ifnet_t interface, u_int8_t hdrlen); /*! - @function ifnet_hdrlen - @param interface The interface. - @result The header length. + * @function ifnet_hdrlen + * @param interface The interface. + * @result The header length. */ extern u_int8_t ifnet_hdrlen(ifnet_t interface); /*! - @function ifnet_set_metric - @discussion - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface. - @param metric The new metric. - @result 0 on success otherwise the errno error. + * @function ifnet_set_metric + * @discussion + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface. + * @param metric The new metric. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_metric(ifnet_t interface, u_int32_t metric); /*! - @function ifnet_metric - @param interface The interface. - @result The metric. + * @function ifnet_metric + * @param interface The interface. + * @result The metric. */ extern u_int32_t ifnet_metric(ifnet_t interface); /*! - @function ifnet_set_baudrate - @discussion - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface. - @param baudrate The new baudrate. - @result 0 on success otherwise the errno error. + * @function ifnet_set_baudrate + * @discussion + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface. + * @param baudrate The new baudrate. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_baudrate(ifnet_t interface, u_int64_t baudrate); /*! - @function ifnet_baudrate - @param interface The interface. - @result The baudrate. + * @function ifnet_baudrate + * @param interface The interface. + * @result The baudrate. */ extern u_int64_t ifnet_baudrate(ifnet_t interface); @@ -2449,33 +2449,33 @@ extern u_int64_t ifnet_baudrate(ifnet_t interface); typedef struct if_bandwidths if_bandwidths_t; /* - @function ifnet_set_bandwidths - @discussion This function allows a driver to indicate the output - and/or input bandwidth information to the system. Each set - is comprised of the effective and maximum theoretical values. - Each value must be greater than zero. - @param interface The interface. - @param output_bw The output bandwidth values (in bits per second). - May be set to NULL if the caller does not want to alter the - existing output bandwidth values. - @param input_bw The input bandwidth values (in bits per second). - May be set to NULL if the caller does not want to alter the - existing input bandwidth values. - @result 0 on success otherwise the errno error. + * @function ifnet_set_bandwidths + * @discussion This function allows a driver to indicate the output + * and/or input bandwidth information to the system. Each set + * is comprised of the effective and maximum theoretical values. + * Each value must be greater than zero. + * @param interface The interface. + * @param output_bw The output bandwidth values (in bits per second). + * May be set to NULL if the caller does not want to alter the + * existing output bandwidth values. + * @param input_bw The input bandwidth values (in bits per second). + * May be set to NULL if the caller does not want to alter the + * existing input bandwidth values. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_bandwidths(ifnet_t interface, if_bandwidths_t *output_bw, if_bandwidths_t *input_bw); /* - @function ifnet_bandwidths - @param interface The interface. - @param output_bw The output bandwidth values (in bits per second). - May be set to NULL if the caller does not want to retrieve the - output bandwidth value. - @param input_bw The input bandwidth values (in bits per second). - May be set to NULL if the caller does not want to retrieve the - input bandwidth value. - @result 0 on success otherwise the errno error. + * @function ifnet_bandwidths + * @param interface The interface. + * @param output_bw The output bandwidth values (in bits per second). + * May be set to NULL if the caller does not want to retrieve the + * output bandwidth value. + * @param input_bw The input bandwidth values (in bits per second). + * May be set to NULL if the caller does not want to retrieve the + * input bandwidth value. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_bandwidths(ifnet_t interface, if_bandwidths_t *output_bw, if_bandwidths_t *input_bw); @@ -2483,194 +2483,194 @@ extern errno_t ifnet_bandwidths(ifnet_t interface, if_bandwidths_t *output_bw, typedef struct if_latencies if_latencies_t; /* - @function ifnet_set_latencies - @discussion This function allows a driver to indicate the output - and/or input latency information to the system. Each set - is comprised of the effective and maximum theoretical values. - Each value must be greater than zero. - @param interface The interface. - @param output_lt The output latency values (in nanosecond). - May be set to NULL if the caller does not want to alter the - existing output latency values. - @param input_lt The input latency values (in nanosecond). - May be set to NULL if the caller does not want to alter the - existing input latency values. - @result 0 on success otherwise the errno error. + * @function ifnet_set_latencies + * @discussion This function allows a driver to indicate the output + * and/or input latency information to the system. Each set + * is comprised of the effective and maximum theoretical values. + * Each value must be greater than zero. + * @param interface The interface. + * @param output_lt The output latency values (in nanosecond). + * May be set to NULL if the caller does not want to alter the + * existing output latency values. + * @param input_lt The input latency values (in nanosecond). + * May be set to NULL if the caller does not want to alter the + * existing input latency values. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_latencies(ifnet_t interface, if_latencies_t *output_lt, if_latencies_t *input_lt); /* - @function ifnet_latencies - @param interface The interface. - @param output_lt The output latency values (in nanosecond). - May be set to NULL if the caller does not want to retrieve the - output latency value. - @param input_lt The input latency values (in nanosecond). - May be set to NULL if the caller does not want to retrieve the - input latency value. - @result 0 on success otherwise the errno error. + * @function ifnet_latencies + * @param interface The interface. + * @param output_lt The output latency values (in nanosecond). + * May be set to NULL if the caller does not want to retrieve the + * output latency value. + * @param input_lt The input latency values (in nanosecond). + * May be set to NULL if the caller does not want to retrieve the + * input latency value. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_latencies(ifnet_t interface, if_latencies_t *output_lt, if_latencies_t *input_lt); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_stat_increment - @discussion - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - @param interface The interface. - @param counts A pointer to a structure containing the amount to - increment each counter by. Any counts not appearing in the - ifnet_counter_increment structure are handled in the stack. - @result 0 on success otherwise the errno error. + * @function ifnet_stat_increment + * @discussion + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * @param interface The interface. + * @param counts A pointer to a structure containing the amount to + * increment each counter by. Any counts not appearing in the + * ifnet_counter_increment structure are handled in the stack. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_stat_increment(ifnet_t interface, const struct ifnet_stat_increment_param *counts); /*! - @function ifnet_stat_increment_in - @discussion - This function is intended to be called by the driver. This - function allows a driver to update the inbound interface counts. - The most efficient time to update these counts is when calling - ifnet_input. - - A lock protects the counts, this makes the increment functions - expensive. The increment function will update the lastchanged - value. - @param interface The interface. - @param packets_in The number of additional packets received. - @param bytes_in The number of additional bytes received. - @param errors_in The number of additional receive errors. - @result 0 on success otherwise the errno error. + * @function ifnet_stat_increment_in + * @discussion + * This function is intended to be called by the driver. This + * function allows a driver to update the inbound interface counts. + * The most efficient time to update these counts is when calling + * ifnet_input. + * + * A lock protects the counts, this makes the increment functions + * expensive. The increment function will update the lastchanged + * value. + * @param interface The interface. + * @param packets_in The number of additional packets received. + * @param bytes_in The number of additional bytes received. + * @param errors_in The number of additional receive errors. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_stat_increment_in(ifnet_t interface, u_int32_t packets_in, u_int32_t bytes_in, u_int32_t errors_in); /*! - @function ifnet_stat_increment_out - @discussion - This function is intended to be called by the driver. This - function allows a driver to update the outbound interface - counts. - - A lock protects the counts, this makes the increment functions - expensive. The increment function will update the lastchanged - value. - @param interface The interface. - @param packets_out The number of additional packets sent. - @param bytes_out The number of additional bytes sent. - @param errors_out The number of additional send errors. - @result 0 on success otherwise the errno error. + * @function ifnet_stat_increment_out + * @discussion + * This function is intended to be called by the driver. This + * function allows a driver to update the outbound interface + * counts. + * + * A lock protects the counts, this makes the increment functions + * expensive. The increment function will update the lastchanged + * value. + * @param interface The interface. + * @param packets_out The number of additional packets sent. + * @param bytes_out The number of additional bytes sent. + * @param errors_out The number of additional send errors. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_stat_increment_out(ifnet_t interface, -u_int32_t packets_out, u_int32_t bytes_out, u_int32_t errors_out); + u_int32_t packets_out, u_int32_t bytes_out, u_int32_t errors_out); /*! - @function ifnet_set_stat - @discussion - This function is intended to be called by the driver. A kext - must not call this function on an interface the kext does not - own. - - The one exception would be the case where a kext wants to zero - all of the counters. - @param interface The interface. - @param stats The new stats values. - @result 0 on success otherwise the errno error. + * @function ifnet_set_stat + * @discussion + * This function is intended to be called by the driver. A kext + * must not call this function on an interface the kext does not + * own. + * + * The one exception would be the case where a kext wants to zero + * all of the counters. + * @param interface The interface. + * @param stats The new stats values. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_stat(ifnet_t interface, const struct ifnet_stats_param *stats); /*! - @function ifnet_stat - @param interface The interface. - @param out_stats Storage for the values. - @result 0 on success otherwise the errno error. + * @function ifnet_stat + * @param interface The interface. + * @param out_stats Storage for the values. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_stat(ifnet_t interface, struct ifnet_stats_param *out_stats); /*! - @function ifnet_set_promiscuous - @discussion Enable or disable promiscuous mode on the interface. The - interface keeps an internal count of the number of times - promiscuous mode has been enabled. Promiscuous mode is only - disabled when this count reaches zero. Be sure to disable - promiscuous mode only once for every time you enable it. - @param interface The interface to toggle promiscuous mode on. - @param on If set, the number of promicuous on requests will be - incremented. If this is the first requrest, promiscuous mode - will be enabled. If this is not set, the number of promiscous - clients will be decremented. If this causes the number to reach - zero, promiscuous mode will be disabled. - @result 0 on success otherwise the errno error. + * @function ifnet_set_promiscuous + * @discussion Enable or disable promiscuous mode on the interface. The + * interface keeps an internal count of the number of times + * promiscuous mode has been enabled. Promiscuous mode is only + * disabled when this count reaches zero. Be sure to disable + * promiscuous mode only once for every time you enable it. + * @param interface The interface to toggle promiscuous mode on. + * @param on If set, the number of promicuous on requests will be + * incremented. If this is the first requrest, promiscuous mode + * will be enabled. If this is not set, the number of promiscous + * clients will be decremented. If this causes the number to reach + * zero, promiscuous mode will be disabled. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_set_promiscuous(ifnet_t interface, int on); /*! - @function ifnet_touch_lastchange - @discussion Updates the lastchange value to now. - @param interface The interface. - @result 0 on success otherwise the errno error. + * @function ifnet_touch_lastchange + * @discussion Updates the lastchange value to now. + * @param interface The interface. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_touch_lastchange(ifnet_t interface); /*! - @function ifnet_lastchange - @param interface The interface. - @param last_change A timeval struct to copy the last time changed in - to. + * @function ifnet_lastchange + * @param interface The interface. + * @param last_change A timeval struct to copy the last time changed in + * to. */ extern errno_t ifnet_lastchange(ifnet_t interface, struct timeval *last_change); /*! - @function ifnet_get_address_list - @discussion Get a list of addresses on the interface. Passing NULL - for the interface will return a list of all addresses. The - addresses will have their reference count bumped so they will - not go away. Calling ifnet_free_address_list will decrement the - refcount and free the array. If you wish to hold on to a - reference to an ifaddr_t, be sure to bump the reference count - before calling ifnet_free_address_list. - @param interface The interface. - @param addresses A pointer to a NULL terminated array of ifaddr_ts. - @result 0 on success otherwise the errno error. + * @function ifnet_get_address_list + * @discussion Get a list of addresses on the interface. Passing NULL + * for the interface will return a list of all addresses. The + * addresses will have their reference count bumped so they will + * not go away. Calling ifnet_free_address_list will decrement the + * refcount and free the array. If you wish to hold on to a + * reference to an ifaddr_t, be sure to bump the reference count + * before calling ifnet_free_address_list. + * @param interface The interface. + * @param addresses A pointer to a NULL terminated array of ifaddr_ts. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses); /*! - @function ifnet_get_address_list_family - @discussion Get a list of addresses on the interface. Passing NULL - for the interface will return a list of all addresses. The - addresses will have their reference count bumped so they will - not go away. Calling ifnet_free_address_list will decrement the - refcount and free the array. If you wish to hold on to a - reference to an ifaddr_t, be sure to bump the reference count - before calling ifnet_free_address_list. Unlike - ifnet_get_address_list, this function lets the caller specify - the address family to get a list of only a specific address type. - @param interface The interface. - @param addresses A pointer to a NULL terminated array of ifaddr_ts. - @result 0 on success otherwise the errno error. + * @function ifnet_get_address_list_family + * @discussion Get a list of addresses on the interface. Passing NULL + * for the interface will return a list of all addresses. The + * addresses will have their reference count bumped so they will + * not go away. Calling ifnet_free_address_list will decrement the + * refcount and free the array. If you wish to hold on to a + * reference to an ifaddr_t, be sure to bump the reference count + * before calling ifnet_free_address_list. Unlike + * ifnet_get_address_list, this function lets the caller specify + * the address family to get a list of only a specific address type. + * @param interface The interface. + * @param addresses A pointer to a NULL terminated array of ifaddr_ts. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses, sa_family_t family); #ifdef KERNEL_PRIVATE /*! - @function ifnet_get_inuse_address_list - @discussion Get a list of addresses on the interface that are in - use by atleast one TCP or UDP socket. The rest of the API is similar - to ifnet_get_address_list. Calling ifnet_free_address_list will - free the array of addresses. Note this only gives a point in time - snapshot of the addresses in use. - @param interface The interface - @param addresses A pointer to a NULL terminated array of ifaddr_ts - @result 0 on success otherwise the errno error. + * @function ifnet_get_inuse_address_list + * @discussion Get a list of addresses on the interface that are in + * use by atleast one TCP or UDP socket. The rest of the API is similar + * to ifnet_get_address_list. Calling ifnet_free_address_list will + * free the array of addresses. Note this only gives a point in time + * snapshot of the addresses in use. + * @param interface The interface + * @param addresses A pointer to a NULL terminated array of ifaddr_ts + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses); @@ -2680,232 +2680,232 @@ __private_extern__ errno_t ifnet_get_address_list_family_internal(ifnet_t, #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_free_address_list - @discussion Free a list of addresses returned from - ifnet_get_address_list. Decrements the refcounts and frees the - memory used for the array of references. - @param addresses An array of ifaddr_ts. + * @function ifnet_free_address_list + * @discussion Free a list of addresses returned from + * ifnet_get_address_list. Decrements the refcounts and frees the + * memory used for the array of references. + * @param addresses An array of ifaddr_ts. */ extern void ifnet_free_address_list(ifaddr_t *addresses); /*! - @function ifnet_set_lladdr - @discussion Sets the link-layer address for this interface. - @param interface The interface the link layer address is being - changed on. - @param lladdr A pointer to the raw link layer address (pointer to - the 6 byte ethernet address for ethernet). - @param lladdr_len The length, in bytes, of the link layer address. + * @function ifnet_set_lladdr + * @discussion Sets the link-layer address for this interface. + * @param interface The interface the link layer address is being + * changed on. + * @param lladdr A pointer to the raw link layer address (pointer to + * the 6 byte ethernet address for ethernet). + * @param lladdr_len The length, in bytes, of the link layer address. */ extern errno_t ifnet_set_lladdr(ifnet_t interface, const void *lladdr, size_t lladdr_len); /*! - @function ifnet_lladdr_copy_bytes - @discussion Copies the bytes of the link-layer address into the - specified buffer. - @param interface The interface to copy the link-layer address from. - @param lladdr The buffer to copy the link-layer address in to. - @param length The length of the buffer. This value must match the - length of the link-layer address. + * @function ifnet_lladdr_copy_bytes + * @discussion Copies the bytes of the link-layer address into the + * specified buffer. + * @param interface The interface to copy the link-layer address from. + * @param lladdr The buffer to copy the link-layer address in to. + * @param length The length of the buffer. This value must match the + * length of the link-layer address. */ extern errno_t ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length); #ifdef KERNEL_PRIVATE /*! - @function ifnet_guarded_lladdr_copy_bytes - @discussion Copies the bytes of the link-layer address into the - specified buffer unless the current process is a sandboxed - application without the net.link.addr system info privilege. - @param interface The interface to copy the link-layer address from. - @param lladdr The buffer to copy the link-layer address in to. - @param length The length of the buffer. This value must match the - length of the link-layer address. + * @function ifnet_guarded_lladdr_copy_bytes + * @discussion Copies the bytes of the link-layer address into the + * specified buffer unless the current process is a sandboxed + * application without the net.link.addr system info privilege. + * @param interface The interface to copy the link-layer address from. + * @param lladdr The buffer to copy the link-layer address in to. + * @param length The length of the buffer. This value must match the + * length of the link-layer address. */ extern errno_t ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length); /*! - @function ifnet_lladdr - @discussion Returns a pointer to the link-layer address. - @param interface The interface the link-layer address is on. + * @function ifnet_lladdr + * @discussion Returns a pointer to the link-layer address. + * @param interface The interface the link-layer address is on. */ extern void *ifnet_lladdr(ifnet_t interface); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_llbroadcast_copy_bytes - @discussion Retrieves the link-layer broadcast address for this - interface. - @param interface The interface. - @param addr A buffer to copy the broadcast address in to. - @param bufferlen The length of the buffer at addr. - @param out_len On return, the length of the broadcast address. + * @function ifnet_llbroadcast_copy_bytes + * @discussion Retrieves the link-layer broadcast address for this + * interface. + * @param interface The interface. + * @param addr A buffer to copy the broadcast address in to. + * @param bufferlen The length of the buffer at addr. + * @param out_len On return, the length of the broadcast address. */ extern errno_t ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t bufferlen, size_t *out_len); #ifdef KERNEL_PRIVATE /*! - @function ifnet_set_lladdr_and_type - @discussion Sets the link-layer address as well as the type field in - the sockaddr_dl. Support for setting the type was added for vlan - and bond interfaces. - @param interface The interface the link layer address is being - changed on. - @param lladdr A pointer to the raw link layer address (pointer to - the 6 byte ethernet address for ethernet). - @param length The length, in bytes, of the link layer address. - @param type The link-layer address type. + * @function ifnet_set_lladdr_and_type + * @discussion Sets the link-layer address as well as the type field in + * the sockaddr_dl. Support for setting the type was added for vlan + * and bond interfaces. + * @param interface The interface the link layer address is being + * changed on. + * @param lladdr A pointer to the raw link layer address (pointer to + * the 6 byte ethernet address for ethernet). + * @param length The length, in bytes, of the link layer address. + * @param type The link-layer address type. */ extern errno_t ifnet_set_lladdr_and_type(ifnet_t interface, const void *lladdr, size_t length, u_char type); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_resolve_multicast - @discussion Resolves a multicast address for an attached protocol to - a link-layer address. If a link-layer address is passed in, the - interface will verify that it is a valid multicast address. - @param ifp The interface. - @param proto_addr A protocol address to be converted to a link-layer - address. - @param ll_addr Storage for the resulting link-layer address. - @param ll_len Length of the storage for the link-layer address. - @result 0 on success. EOPNOTSUPP indicates the multicast address was - not supported or could not be translated. Other errors may - indicate other failures. + * @function ifnet_resolve_multicast + * @discussion Resolves a multicast address for an attached protocol to + * a link-layer address. If a link-layer address is passed in, the + * interface will verify that it is a valid multicast address. + * @param ifp The interface. + * @param proto_addr A protocol address to be converted to a link-layer + * address. + * @param ll_addr Storage for the resulting link-layer address. + * @param ll_len Length of the storage for the link-layer address. + * @result 0 on success. EOPNOTSUPP indicates the multicast address was + * not supported or could not be translated. Other errors may + * indicate other failures. */ extern errno_t ifnet_resolve_multicast(ifnet_t ifp, const struct sockaddr *proto_addr, struct sockaddr *ll_addr, size_t ll_len); /*! - @function ifnet_add_multicast - @discussion Joins a multicast and returns an ifmultiaddr_t with the - reference count incremented for you. You are responsible for - decrementing the reference count after calling - ifnet_remove_multicast and making sure you no longer have any - references to the multicast. - @param interface The interface. - @param maddr The multicast address (AF_UNSPEC/AF_LINK) to join. Either - a physical address or logical address to be translated to a - physical address. - @param multicast The resulting ifmultiaddr_t multicast address. - @result 0 on success otherwise the errno error. + * @function ifnet_add_multicast + * @discussion Joins a multicast and returns an ifmultiaddr_t with the + * reference count incremented for you. You are responsible for + * decrementing the reference count after calling + * ifnet_remove_multicast and making sure you no longer have any + * references to the multicast. + * @param interface The interface. + * @param maddr The multicast address (AF_UNSPEC/AF_LINK) to join. Either + * a physical address or logical address to be translated to a + * physical address. + * @param multicast The resulting ifmultiaddr_t multicast address. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr, ifmultiaddr_t *multicast); /*! - @function ifnet_remove_multicast - @discussion Causes the interface to leave the multicast group. The - stack keeps track of how many times ifnet_add_multicast has been - called for a given multicast address. The multicast will only be - removed when the number of times ifnet_remove_multicast has been - called matches the number of times ifnet_add_multicast has been - called. - - The memory for the multicast address is not actually freed until - the separate reference count has reached zero. Some parts of the - stack may keep a pointer to the multicast even after that - multicast has been removed from the interface. - - When an interface is detached, all of the multicasts are - removed. If the interface of the multicast passed in is no - longer attached, this function will gracefully return, - performing no work. - - It is the callers responsibility to release the multicast - address after calling this function. - @param multicast The multicast to be removed. - @result 0 on success otherwise the errno error. + * @function ifnet_remove_multicast + * @discussion Causes the interface to leave the multicast group. The + * stack keeps track of how many times ifnet_add_multicast has been + * called for a given multicast address. The multicast will only be + * removed when the number of times ifnet_remove_multicast has been + * called matches the number of times ifnet_add_multicast has been + * called. + * + * The memory for the multicast address is not actually freed until + * the separate reference count has reached zero. Some parts of the + * stack may keep a pointer to the multicast even after that + * multicast has been removed from the interface. + * + * When an interface is detached, all of the multicasts are + * removed. If the interface of the multicast passed in is no + * longer attached, this function will gracefully return, + * performing no work. + * + * It is the callers responsibility to release the multicast + * address after calling this function. + * @param multicast The multicast to be removed. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_remove_multicast(ifmultiaddr_t multicast); /*! - @function ifnet_get_multicast_list - @discussion Retrieves a list of multicast address the interface is - set to receive. This function allocates and returns an array of - references to the various multicast addresses. The multicasts - have their reference counts bumped on your behalf. Calling - ifnet_free_multicast_list will decrement the reference counts - and free the array. - @param interface The interface. - @param addresses A pointer to a NULL terminated array of references - to the multicast addresses. - @result 0 on success otherwise the errno error. + * @function ifnet_get_multicast_list + * @discussion Retrieves a list of multicast address the interface is + * set to receive. This function allocates and returns an array of + * references to the various multicast addresses. The multicasts + * have their reference counts bumped on your behalf. Calling + * ifnet_free_multicast_list will decrement the reference counts + * and free the array. + * @param interface The interface. + * @param addresses A pointer to a NULL terminated array of references + * to the multicast addresses. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_get_multicast_list(ifnet_t interface, ifmultiaddr_t **addresses); /*! - @function ifnet_free_multicast_list - @discussion Frees a list of multicasts returned by - ifnet_get_multicast_list. Decrements the refcount on each - multicast address and frees the array. - @param multicasts An array of references to the multicast addresses. + * @function ifnet_free_multicast_list + * @discussion Frees a list of multicasts returned by + * ifnet_get_multicast_list. Decrements the refcount on each + * multicast address and frees the array. + * @param multicasts An array of references to the multicast addresses. */ extern void ifnet_free_multicast_list(ifmultiaddr_t *multicasts); /*! - @function ifnet_find_by_name - @discussion Find an interface by the name including the unit number. - Caller must call ifnet_release on any non-null interface return - value. - @param ifname The name of the interface, including any unit number - (i.e. "en0"). - @param interface A pointer to an interface reference. This will be - filled in if a matching interface is found. - @result 0 on success otherwise the errno error. + * @function ifnet_find_by_name + * @discussion Find an interface by the name including the unit number. + * Caller must call ifnet_release on any non-null interface return + * value. + * @param ifname The name of the interface, including any unit number + * (i.e. "en0"). + * @param interface A pointer to an interface reference. This will be + * filled in if a matching interface is found. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_find_by_name(const char *ifname, ifnet_t *interface); /*! - @function ifnet_list_get - @discussion Get a list of attached interfaces. List will be set to - point to an array allocated by ifnet_list_get. The interfaces - are refcounted and the counts will be incremented before the - function returns. The list of interfaces must be freed using - ifnet_list_free. - @param family The interface family (i.e. IFNET_FAMILY_ETHERNET). To - find interfaces of all families, use IFNET_FAMILY_ANY. - @param interfaces A pointer to an array of interface references. - @param count A pointer that will be filled in with the number of - matching interfaces in the array. - @result 0 on success otherwise the errno error. + * @function ifnet_list_get + * @discussion Get a list of attached interfaces. List will be set to + * point to an array allocated by ifnet_list_get. The interfaces + * are refcounted and the counts will be incremented before the + * function returns. The list of interfaces must be freed using + * ifnet_list_free. + * @param family The interface family (i.e. IFNET_FAMILY_ETHERNET). To + * find interfaces of all families, use IFNET_FAMILY_ANY. + * @param interfaces A pointer to an array of interface references. + * @param count A pointer that will be filled in with the number of + * matching interfaces in the array. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_list_get(ifnet_family_t family, ifnet_t **interfaces, u_int32_t *count); #ifdef KERNEL_PRIVATE /*! - @function ifnet_list_get_all - @discussion Get a list of attached interfaces. List will be set to - point to an array allocated by ifnet_list_get. The interfaces - are refcounted and the counts will be incremented before the - function returns. The list of interfaces must be freed using - ifnet_list_free. This is similar to ifnet_list_get, except - that it includes interfaces that are detaching. - @param family The interface family (i.e. IFNET_FAMILY_ETHERNET). To - find interfaces of all families, use IFNET_FAMILY_ANY. - @param interfaces A pointer to an array of interface references. - @param count A pointer that will be filled in with the number of - matching interfaces in the array. - @result 0 on success otherwise the errno error. + * @function ifnet_list_get_all + * @discussion Get a list of attached interfaces. List will be set to + * point to an array allocated by ifnet_list_get. The interfaces + * are refcounted and the counts will be incremented before the + * function returns. The list of interfaces must be freed using + * ifnet_list_free. This is similar to ifnet_list_get, except + * that it includes interfaces that are detaching. + * @param family The interface family (i.e. IFNET_FAMILY_ETHERNET). To + * find interfaces of all families, use IFNET_FAMILY_ANY. + * @param interfaces A pointer to an array of interface references. + * @param count A pointer that will be filled in with the number of + * matching interfaces in the array. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_list_get_all(ifnet_family_t family, ifnet_t **interfaces, u_int32_t *count); #endif /* KERNEL_PRIVATE */ /*! - @function ifnet_list_free - @discussion Free a list of interfaces returned by ifnet_list_get. - Decrements the reference count on each interface and frees the - array of references. If you keep a reference to an interface, be - sure to increment the reference count before calling - ifnet_list_free. - @param interfaces An array of interface references from ifnet_list_get. + * @function ifnet_list_free + * @discussion Free a list of interfaces returned by ifnet_list_get. + * Decrements the reference count on each interface and frees the + * array of references. If you keep a reference to an interface, be + * sure to increment the reference count before calling + * ifnet_list_free. + * @param interfaces An array of interface references from ifnet_list_get. */ extern void ifnet_list_free(ifnet_t *interfaces); @@ -2914,133 +2914,133 @@ extern void ifnet_list_free(ifnet_t *interfaces); /******************************************************************************/ /*! - @function ifaddr_reference - @discussion Increment the reference count of an address tied to an - interface. - @param ifaddr The interface address. - @result 0 upon success + * @function ifaddr_reference + * @discussion Increment the reference count of an address tied to an + * interface. + * @param ifaddr The interface address. + * @result 0 upon success */ extern errno_t ifaddr_reference(ifaddr_t ifaddr); /*! - @function ifaddr_release - @discussion Decrements the reference count of and possibly frees an - address tied to an interface. - @param ifaddr The interface address. - @result 0 upon success + * @function ifaddr_release + * @discussion Decrements the reference count of and possibly frees an + * address tied to an interface. + * @param ifaddr The interface address. + * @result 0 upon success */ extern errno_t ifaddr_release(ifaddr_t ifaddr); /*! - @function ifaddr_address - @discussion Copies the address out of the ifaddr. - @param ifaddr The interface address. - @param out_addr The sockaddr storage for the address. - @param addr_size The size of the storage for the address. - @result 0 upon success + * @function ifaddr_address + * @discussion Copies the address out of the ifaddr. + * @param ifaddr The interface address. + * @param out_addr The sockaddr storage for the address. + * @param addr_size The size of the storage for the address. + * @result 0 upon success */ extern errno_t ifaddr_address(ifaddr_t ifaddr, struct sockaddr *out_addr, u_int32_t addr_size); /*! - @function ifaddr_address - @discussion Returns the address family of the address. - @param ifaddr The interface address. - @result 0 on failure, address family on success. + * @function ifaddr_address + * @discussion Returns the address family of the address. + * @param ifaddr The interface address. + * @result 0 on failure, address family on success. */ extern sa_family_t ifaddr_address_family(ifaddr_t ifaddr); /*! - @function ifaddr_dstaddress - @discussion Copies the destination address out of the ifaddr. - @param ifaddr The interface address. - @param out_dstaddr The sockaddr storage for the destination address. - @param dstaddr_size The size of the storage for the destination address. - @result 0 upon success + * @function ifaddr_dstaddress + * @discussion Copies the destination address out of the ifaddr. + * @param ifaddr The interface address. + * @param out_dstaddr The sockaddr storage for the destination address. + * @param dstaddr_size The size of the storage for the destination address. + * @result 0 upon success */ extern errno_t ifaddr_dstaddress(ifaddr_t ifaddr, struct sockaddr *out_dstaddr, u_int32_t dstaddr_size); /*! - @function ifaddr_netmask - @discussion Copies the netmask out of the ifaddr. - @param ifaddr The interface address. - @param out_netmask The sockaddr storage for the netmask. - @param netmask_size The size of the storage for the netmask. - @result 0 upon success + * @function ifaddr_netmask + * @discussion Copies the netmask out of the ifaddr. + * @param ifaddr The interface address. + * @param out_netmask The sockaddr storage for the netmask. + * @param netmask_size The size of the storage for the netmask. + * @result 0 upon success */ extern errno_t ifaddr_netmask(ifaddr_t ifaddr, struct sockaddr *out_netmask, u_int32_t netmask_size); /*! - @function ifaddr_ifnet - @discussion Returns the interface the address is attached to. The - reference is only valid until the ifaddr is released. If you - need to hold a reference to the ifnet for longer than you hold a - reference to the ifaddr, increment the reference using - ifnet_reference. - @param ifaddr The interface address. - @result A reference to the interface the address is attached to. + * @function ifaddr_ifnet + * @discussion Returns the interface the address is attached to. The + * reference is only valid until the ifaddr is released. If you + * need to hold a reference to the ifnet for longer than you hold a + * reference to the ifaddr, increment the reference using + * ifnet_reference. + * @param ifaddr The interface address. + * @result A reference to the interface the address is attached to. */ extern ifnet_t ifaddr_ifnet(ifaddr_t ifaddr); /*! - @function ifaddr_withaddr - @discussion Returns an interface address with the address specified. - Increments the reference count on the ifaddr before returning to - the caller. Caller is responsible for calling ifaddr_release. - @param address The address to search for. - @result A reference to the interface address. + * @function ifaddr_withaddr + * @discussion Returns an interface address with the address specified. + * Increments the reference count on the ifaddr before returning to + * the caller. Caller is responsible for calling ifaddr_release. + * @param address The address to search for. + * @result A reference to the interface address. */ extern ifaddr_t ifaddr_withaddr(const struct sockaddr *address); /*! - @function ifaddr_withdstaddr - @discussion Returns an interface address for the interface address - that matches the destination when the netmask is applied. - Increments the reference count on the ifaddr before returning to - the caller. Caller is responsible for calling ifaddr_release. - @param destination The destination to search for. - @result A reference to the interface address. + * @function ifaddr_withdstaddr + * @discussion Returns an interface address for the interface address + * that matches the destination when the netmask is applied. + * Increments the reference count on the ifaddr before returning to + * the caller. Caller is responsible for calling ifaddr_release. + * @param destination The destination to search for. + * @result A reference to the interface address. */ extern ifaddr_t ifaddr_withdstaddr(const struct sockaddr *destination); /*! - @function ifaddr_withnet - @discussion Returns an interface address for the interface with the - network described by net. Increments the reference count on the - ifaddr before returning to the caller. Caller is responsible for - calling ifaddr_release. - @param net The network to search for. - @result A reference to the interface address. + * @function ifaddr_withnet + * @discussion Returns an interface address for the interface with the + * network described by net. Increments the reference count on the + * ifaddr before returning to the caller. Caller is responsible for + * calling ifaddr_release. + * @param net The network to search for. + * @result A reference to the interface address. */ extern ifaddr_t ifaddr_withnet(const struct sockaddr *net); /*! - @function ifaddr_withroute - @discussion Returns an interface address given a destination and - gateway. Increments the reference count on the ifaddr before - returning to the caller. Caller is responsible for calling - ifaddr_release. - @param flags Routing flags. See net/route.h, RTF_GATEWAY etc. - @param destination The destination to search for. - @param gateway A gateway to search for. - @result A reference to the interface address. + * @function ifaddr_withroute + * @discussion Returns an interface address given a destination and + * gateway. Increments the reference count on the ifaddr before + * returning to the caller. Caller is responsible for calling + * ifaddr_release. + * @param flags Routing flags. See net/route.h, RTF_GATEWAY etc. + * @param destination The destination to search for. + * @param gateway A gateway to search for. + * @result A reference to the interface address. */ extern ifaddr_t ifaddr_withroute(int flags, const struct sockaddr *destination, const struct sockaddr *gateway); /*! - @function ifaddr_findbestforaddr - @discussion Finds the best local address assigned to a specific - interface to use when communicating with another address. - Increments the reference count on the ifaddr before returning to - the caller. Caller is responsible for calling ifaddr_release. - @param addr The remote address. - @param interface The local interface. - @result A reference to the interface address. + * @function ifaddr_findbestforaddr + * @discussion Finds the best local address assigned to a specific + * interface to use when communicating with another address. + * Increments the reference count on the ifaddr before returning to + * the caller. Caller is responsible for calling ifaddr_release. + * @param addr The remote address. + * @param interface The local interface. + * @result A reference to the interface address. */ -extern ifaddr_t ifaddr_findbestforaddr(const struct sockaddr *addr, +extern ifaddr_t ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface); /******************************************************************************/ @@ -3048,56 +3048,56 @@ extern ifaddr_t ifaddr_findbestforaddr(const struct sockaddr *addr, /******************************************************************************/ /*! - @function ifmaddr_reference - @discussion Increment the reference count of an interface multicast - address. - @param ifmaddr The interface multicast address. - @result 0 on success. Only error will be EINVAL if ifmaddr is not valid. + * @function ifmaddr_reference + * @discussion Increment the reference count of an interface multicast + * address. + * @param ifmaddr The interface multicast address. + * @result 0 on success. Only error will be EINVAL if ifmaddr is not valid. */ extern errno_t ifmaddr_reference(ifmultiaddr_t ifmaddr); /*! - @function ifmaddr_release - @discussion Decrement the reference count of an interface multicast - address. If the reference count reaches zero, the ifmultiaddr - will be removed from the interface and the ifmultiaddr will be - freed. - @param ifmaddr The interface multicast address. - @result 0 on success. Only error will be EINVAL if ifmaddr is not valid. + * @function ifmaddr_release + * @discussion Decrement the reference count of an interface multicast + * address. If the reference count reaches zero, the ifmultiaddr + * will be removed from the interface and the ifmultiaddr will be + * freed. + * @param ifmaddr The interface multicast address. + * @result 0 on success. Only error will be EINVAL if ifmaddr is not valid. */ extern errno_t ifmaddr_release(ifmultiaddr_t ifmaddr); /*! - @function ifmaddr_address - @discussion Copies the multicast address to out_multicast. - @param out_multicast Storage for a sockaddr. - @param addr_size Size of the storage. - @result 0 on success. + * @function ifmaddr_address + * @discussion Copies the multicast address to out_multicast. + * @param out_multicast Storage for a sockaddr. + * @param addr_size Size of the storage. + * @result 0 on success. */ extern errno_t ifmaddr_address(ifmultiaddr_t ifmaddr, struct sockaddr *out_multicast, u_int32_t addr_size); /*! - @function ifmaddr_lladdress - @discussion Copies the link layer multicast address to - out_link_layer_multicast. - @param out_link_layer_multicast Storage for a sockaddr. - @param addr_size Size of the storage. - @result 0 on success. + * @function ifmaddr_lladdress + * @discussion Copies the link layer multicast address to + * out_link_layer_multicast. + * @param out_link_layer_multicast Storage for a sockaddr. + * @param addr_size Size of the storage. + * @result 0 on success. */ extern errno_t ifmaddr_lladdress(ifmultiaddr_t ifmaddr, struct sockaddr *out_link_layer_multicast, u_int32_t addr_size); /*! - @function ifmaddr_ifnet - @discussion Returns the interface this multicast address is attached - to. The interface reference count is not bumped by this - function. The interface is only valid as long as you don't - release the refernece to the multiast address. If you need to - maintain your pointer to the ifnet, call ifnet_reference - followed by ifnet_release when you're finished. - @param ifmaddr The interface multicast address. - @result A reference to the interface. + * @function ifmaddr_ifnet + * @discussion Returns the interface this multicast address is attached + * to. The interface reference count is not bumped by this + * function. The interface is only valid as long as you don't + * release the refernece to the multiast address. If you need to + * maintain your pointer to the ifnet, call ifnet_reference + * followed by ifnet_release when you're finished. + * @param ifmaddr The interface multicast address. + * @result A reference to the interface. */ extern ifnet_t ifmaddr_ifnet(ifmultiaddr_t ifmaddr); @@ -3107,54 +3107,54 @@ extern ifnet_t ifmaddr_ifnet(ifmultiaddr_t ifmaddr); /******************************************************************************/ /* - @typedef ifnet_clone_create_func - @discussion ifnet_clone_create_func is called to create an interface. - @param ifcloner The interface cloner. - @param unit The interface unit number to create. - @param params Additional information specific to the interface cloner. - @result Return zero on success or an errno error value on failure. + * @typedef ifnet_clone_create_func + * @discussion ifnet_clone_create_func is called to create an interface. + * @param ifcloner The interface cloner. + * @param unit The interface unit number to create. + * @param params Additional information specific to the interface cloner. + * @result Return zero on success or an errno error value on failure. */ typedef errno_t (*ifnet_clone_create_func)(if_clone_t ifcloner, u_int32_t unit, void *params); /* - @typedef ifnet_clone_destroy_func - @discussion ifnet_clone_create_func is called to destroy an interface created - by an interface cloner. - @param interface The interface to destroy. - @result Return zero on success or an errno error value on failure. + * @typedef ifnet_clone_destroy_func + * @discussion ifnet_clone_create_func is called to destroy an interface created + * by an interface cloner. + * @param interface The interface to destroy. + * @result Return zero on success or an errno error value on failure. */ typedef errno_t (*ifnet_clone_destroy_func)(ifnet_t interface); /* - @struct ifnet_clone_params - @discussion This structure is used to represent an interface cloner. - @field ifc_name The interface name handled by this interface cloner. - @field ifc_create The function to create an interface. - @field ifc_destroy The function to destroy an interface. -*/ + * @struct ifnet_clone_params + * @discussion This structure is used to represent an interface cloner. + * @field ifc_name The interface name handled by this interface cloner. + * @field ifc_create The function to create an interface. + * @field ifc_destroy The function to destroy an interface. + */ struct ifnet_clone_params { - const char *ifc_name; - ifnet_clone_create_func ifc_create; - ifnet_clone_destroy_func ifc_destroy; + const char *ifc_name; + ifnet_clone_create_func ifc_create; + ifnet_clone_destroy_func ifc_destroy; }; /* - @function ifnet_clone_attach - @discussion Attaches a new interface cloner. - @param cloner_params The structure that defines an interface cloner. - @param interface A pointer to an opaque handle that represent the interface cloner - that is attached upon success. - @result Returns 0 on success. - May return ENOBUFS if there is insufficient memory. - May return EEXIST if an interface cloner with the same name is already attached. + * @function ifnet_clone_attach + * @discussion Attaches a new interface cloner. + * @param cloner_params The structure that defines an interface cloner. + * @param interface A pointer to an opaque handle that represent the interface cloner + * that is attached upon success. + * @result Returns 0 on success. + * May return ENOBUFS if there is insufficient memory. + * May return EEXIST if an interface cloner with the same name is already attached. */ extern errno_t ifnet_clone_attach(struct ifnet_clone_params *cloner_params, if_clone_t *ifcloner); /* - @function ifnet_clone_detach - @discussion Detaches a previously attached interface cloner. - @param ifcloner The opaque handle returned when the interface cloner was attached. - @result Returns 0 on success. + * @function ifnet_clone_detach + * @discussion Detaches a previously attached interface cloner. + * @param ifcloner The opaque handle returned when the interface cloner was attached. + * @result Returns 0 on success. */ extern errno_t ifnet_clone_detach(if_clone_t ifcloner); @@ -3163,68 +3163,68 @@ extern errno_t ifnet_clone_detach(if_clone_t ifcloner); /******************************************************************************/ /* - @function ifnet_get_local_ports - @discussion Returns a bitfield indicating which ports of PF_INET - and PF_INET6 protocol families have sockets in the usable - state. An interface that supports waking the host on unicast - traffic may use this information to discard incoming unicast - packets that don't have a corresponding bit set instead of - waking up the host. For port 0x0001, bit 1 of the first byte - would be set. For port n, bit 1 << (n % 8) of the (n / 8)'th - byte would be set. - @param ifp The interface in question. May be NULL, which means - all interfaces. - @param bitfield A pointer to 8192 bytes. - @result Returns 0 on success. + * @function ifnet_get_local_ports + * @discussion Returns a bitfield indicating which ports of PF_INET + * and PF_INET6 protocol families have sockets in the usable + * state. An interface that supports waking the host on unicast + * traffic may use this information to discard incoming unicast + * packets that don't have a corresponding bit set instead of + * waking up the host. For port 0x0001, bit 1 of the first byte + * would be set. For port n, bit 1 << (n % 8) of the (n / 8)'th + * byte would be set. + * @param ifp The interface in question. May be NULL, which means + * all interfaces. + * @param bitfield A pointer to 8192 bytes. + * @result Returns 0 on success. */ extern errno_t ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield); -#define IFNET_GET_LOCAL_PORTS_WILDCARDOK 0x01 -#define IFNET_GET_LOCAL_PORTS_NOWAKEUPOK 0x02 -#define IFNET_GET_LOCAL_PORTS_TCPONLY 0x04 -#define IFNET_GET_LOCAL_PORTS_UDPONLY 0x08 -#define IFNET_GET_LOCAL_PORTS_RECVANYIFONLY 0x10 -#define IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY 0x20 -#define IFNET_GET_LOCAL_PORTS_ACTIVEONLY 0x40 -/* - @function ifnet_get_local_ports_extended - @discussion Returns a bitfield indicating which local ports of the - specified protocol have sockets in the usable state. An - interface that supports waking the host on unicast traffic may - use this information to discard incoming unicast packets that - don't have a corresponding bit set instead of waking up the - host. For port 0x0001, bit 1 of the first byte would be set. - For port n, bit 1 << (n % 8) of the (n / 8)'th byte would be - set. - @param ifp The interface in question. May be NULL, which means - all interfaces. - @param protocol The protocol family of the sockets. PF_UNSPEC (0) - means all protocols, otherwise PF_INET or PF_INET6. - @param flags A bitwise of the following flags: - IFNET_GET_LOCAL_PORTS_WILDCARDOK: When bit is set, - the list of local ports should include those that are - used by sockets that aren't bound to any local address. - IFNET_GET_LOCAL_PORTS_NOWAKEUPOK: When bit is - set, the list of local ports should return all sockets - including the ones that do not need a wakeup from sleep. - Sockets that do not want to wake from sleep are marked - with a socket option. - IFNET_GET_LOCAL_PORTS_TCPONLY: When bit is set, the list - of local ports should return the ports used by TCP sockets. - IFNET_GET_LOCAL_PORTS_UDPONLY: When bit is set, the list - of local ports should return the ports used by UDP sockets. - only. - IFNET_GET_LOCAL_PORTS_RECVANYIFONLY: When bit is set, the - port is in the list only if the socket has the option - SO_RECV_ANYIF set - IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY: When bit is set, the - port is in the list only if the socket has the option - SO_EXTENDED_BK_IDLE set - IFNET_GET_LOCAL_PORTS_ACTIVEONLY: When bit is set, the - port is in the list only if the socket is not in a final TCP - state or the connection is not idle in a final TCP state - @param bitfield A pointer to 8192 bytes. - @result Returns 0 on success. +#define IFNET_GET_LOCAL_PORTS_WILDCARDOK 0x01 +#define IFNET_GET_LOCAL_PORTS_NOWAKEUPOK 0x02 +#define IFNET_GET_LOCAL_PORTS_TCPONLY 0x04 +#define IFNET_GET_LOCAL_PORTS_UDPONLY 0x08 +#define IFNET_GET_LOCAL_PORTS_RECVANYIFONLY 0x10 +#define IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY 0x20 +#define IFNET_GET_LOCAL_PORTS_ACTIVEONLY 0x40 +/* + * @function ifnet_get_local_ports_extended + * @discussion Returns a bitfield indicating which local ports of the + * specified protocol have sockets in the usable state. An + * interface that supports waking the host on unicast traffic may + * use this information to discard incoming unicast packets that + * don't have a corresponding bit set instead of waking up the + * host. For port 0x0001, bit 1 of the first byte would be set. + * For port n, bit 1 << (n % 8) of the (n / 8)'th byte would be + * set. + * @param ifp The interface in question. May be NULL, which means + * all interfaces. + * @param protocol The protocol family of the sockets. PF_UNSPEC (0) + * means all protocols, otherwise PF_INET or PF_INET6. + * @param flags A bitwise of the following flags: + * IFNET_GET_LOCAL_PORTS_WILDCARDOK: When bit is set, + * the list of local ports should include those that are + * used by sockets that aren't bound to any local address. + * IFNET_GET_LOCAL_PORTS_NOWAKEUPOK: When bit is + * set, the list of local ports should return all sockets + * including the ones that do not need a wakeup from sleep. + * Sockets that do not want to wake from sleep are marked + * with a socket option. + * IFNET_GET_LOCAL_PORTS_TCPONLY: When bit is set, the list + * of local ports should return the ports used by TCP sockets. + * IFNET_GET_LOCAL_PORTS_UDPONLY: When bit is set, the list + * of local ports should return the ports used by UDP sockets. + * only. + * IFNET_GET_LOCAL_PORTS_RECVANYIFONLY: When bit is set, the + * port is in the list only if the socket has the option + * SO_RECV_ANYIF set + * IFNET_GET_LOCAL_PORTS_EXTBGIDLEONLY: When bit is set, the + * port is in the list only if the socket has the option + * SO_EXTENDED_BK_IDLE set + * IFNET_GET_LOCAL_PORTS_ACTIVEONLY: When bit is set, the + * port is in the list only if the socket is not in a final TCP + * state or the connection is not idle in a final TCP state + * @param bitfield A pointer to 8192 bytes. + * @result Returns 0 on success. */ extern errno_t ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol, u_int32_t flags, u_int8_t *bitfield); @@ -3233,23 +3233,23 @@ extern errno_t ifnet_get_local_ports_extended(ifnet_t ifp, /* for reporting issues */ /******************************************************************************/ -#define IFNET_MODIDLEN 20 -#define IFNET_MODARGLEN 12 +#define IFNET_MODIDLEN 20 +#define IFNET_MODARGLEN 12 /* - @function ifnet_report_issues - @discussion Provided for network interface families and drivers to - notify the system of issues detected at their layers. - @param ifp The interface experiencing issues. - @param modid The ID of the module reporting issues. It may contain - any value that is unique enough to identify the module, such - as the SHA-1 hash of the bundle ID of the module, e.g. - "com.apple.iokit.IONetworkingFamily" or - "com.apple.iokit.IO80211Family". - @param info An optional, fixed-size array of octets containing opaque - information data used specific to the module/layer reporting - the issues. May be NULL. - @result Returns 0 on success, or EINVAL if arguments are invalid. + * @function ifnet_report_issues + * @discussion Provided for network interface families and drivers to + * notify the system of issues detected at their layers. + * @param ifp The interface experiencing issues. + * @param modid The ID of the module reporting issues. It may contain + * any value that is unique enough to identify the module, such + * as the SHA-1 hash of the bundle ID of the module, e.g. + * "com.apple.iokit.IONetworkingFamily" or + * "com.apple.iokit.IO80211Family". + * @param info An optional, fixed-size array of octets containing opaque + * information data used specific to the module/layer reporting + * the issues. May be NULL. + * @result Returns 0 on success, or EINVAL if arguments are invalid. */ extern errno_t ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN], u_int8_t info[IFNET_MODARGLEN]); @@ -3258,41 +3258,41 @@ extern errno_t ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN], /* for interfaces that support link level transmit completion status */ /******************************************************************************/ /* - @enum Per packet phy level transmit completion status values - @abstract Constants defining possible completion status values - A driver may support all or some of these values - @discussion - @constant IFNET_TX_COMPL_SUCCESS link transmission succeeded - @constant IFNET_TX_COMPL_FAIL link transmission failed - @constant IFNET_TX_COMPL_ABORTED link transmission aborted, may retry - @constant IFNET_TX_QUEUE_FULL link level secondary queue full -*/ + * @enum Per packet phy level transmit completion status values + * @abstract Constants defining possible completion status values + * A driver may support all or some of these values + * @discussion + * @constant IFNET_TX_COMPL_SUCCESS link transmission succeeded + * @constant IFNET_TX_COMPL_FAIL link transmission failed + * @constant IFNET_TX_COMPL_ABORTED link transmission aborted, may retry + * @constant IFNET_TX_QUEUE_FULL link level secondary queue full + */ enum { - IFNET_TX_COMPL_SUCCESS = 0, /* sent on link */ - IFNET_TX_COMPL_FAIL = 1, /* failed to send on link */ - IFNET_TX_COMPL_ABORTED = 2, /* aborted send, peer asleep */ - IFNET_TX_COMPL_QFULL = 3 /* driver level queue full */ + IFNET_TX_COMPL_SUCCESS = 0, /* sent on link */ + IFNET_TX_COMPL_FAIL = 1, /* failed to send on link */ + IFNET_TX_COMPL_ABORTED = 2, /* aborted send, peer asleep */ + IFNET_TX_COMPL_QFULL = 3 /* driver level queue full */ }; -typedef u_int32_t tx_compl_val_t; +typedef u_int32_t tx_compl_val_t; /* - @function ifnet_tx_compl_status - @discussion Used as an upcall from IONetwork Family to stack that - indicates the link level completion status of a transmitted - packet. - @param ifp The interface to which the mbuf was sent - @param m The mbuf that was transmitted - @param val indicates the status of the transmission -*/ + * @function ifnet_tx_compl_status + * @discussion Used as an upcall from IONetwork Family to stack that + * indicates the link level completion status of a transmitted + * packet. + * @param ifp The interface to which the mbuf was sent + * @param m The mbuf that was transmitted + * @param val indicates the status of the transmission + */ extern errno_t ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val); /* - @function ifnet_tx_compl - @discussion Used to indicates the packet has been transmitted. - @param ifp The interface to which the mbuf was sent - @param m The mbuf that was transmitted -*/ + * @function ifnet_tx_compl + * @discussion Used to indicates the packet has been transmitted. + * @param ifp The interface to which the mbuf was sent + * @param m The mbuf that was transmitted + */ extern errno_t ifnet_tx_compl(ifnet_t ifp, mbuf_t m); /******************************************************************************/ @@ -3300,46 +3300,46 @@ extern errno_t ifnet_tx_compl(ifnet_t ifp, mbuf_t m); /******************************************************************************/ /* - @function ifnet_notice_node_presence - @discussion Provided for network interface drivers to notify the - system of a change detected in the presence of the specified - node. - @param ifp The interface attached to the link where the specified node - is present. - @param sa The AF_LINK family address of the node whose presence is - changing. - @param rssi The received signal strength indication as measured in - dBm by a radio receiver. - @param lqm A link quality metric associated with the specified node. - @param npm A node proximity metric associated with the specified node. - @param srvinfo A fixed-size array of octets containing opaque service - information data used by the mDNS responder subsystem. - @result Returns 0 on success, or EINVAL if arguments are invalid. + * @function ifnet_notice_node_presence + * @discussion Provided for network interface drivers to notify the + * system of a change detected in the presence of the specified + * node. + * @param ifp The interface attached to the link where the specified node + * is present. + * @param sa The AF_LINK family address of the node whose presence is + * changing. + * @param rssi The received signal strength indication as measured in + * dBm by a radio receiver. + * @param lqm A link quality metric associated with the specified node. + * @param npm A node proximity metric associated with the specified node. + * @param srvinfo A fixed-size array of octets containing opaque service + * information data used by the mDNS responder subsystem. + * @result Returns 0 on success, or EINVAL if arguments are invalid. */ extern errno_t ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48]); /* - @function ifnet_notice_node_absence - @discussion Provided for network interface drivers to notify the - system that the absence of the specified node has been detected. - @param ifp The interface attached to the link where the absence of the - specified node has been detected. - @param sa The AF_LINK family address of the node whose absence has been - detected. - @result Returns 0 on success, or EINVAL if arguments are invalid. + * @function ifnet_notice_node_absence + * @discussion Provided for network interface drivers to notify the + * system that the absence of the specified node has been detected. + * @param ifp The interface attached to the link where the absence of the + * specified node has been detected. + * @param sa The AF_LINK family address of the node whose absence has been + * detected. + * @result Returns 0 on success, or EINVAL if arguments are invalid. */ extern errno_t ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa); /* - @function ifnet_notice_master_elected - @discussion Provided for network interface drivers to notify the system - that the nodes with a locally detected presence on the attached - link have elected a new master. - @param ifp The interface attached to the link where the new master has - been elected. - @result Returns 0 on success, or EINVAL if arguments are invalid. + * @function ifnet_notice_master_elected + * @discussion Provided for network interface drivers to notify the system + * that the nodes with a locally detected presence on the attached + * link have elected a new master. + * @param ifp The interface attached to the link where the new master has + * been elected. + * @result Returns 0 on success, or EINVAL if arguments are invalid. */ extern errno_t ifnet_notice_master_elected(ifnet_t ifp); @@ -3348,38 +3348,38 @@ extern errno_t ifnet_notice_master_elected(ifnet_t ifp); /******************************************************************************/ /* - @function ifnet_set_delegate - @discussion Indicate that an interface is delegating another interface - for accounting/restriction purposes. This could be used by a - virtual interface that is going over another interface, where - the virtual interface is to be treated as if it's the underlying - interface for certain operations/checks. - @param ifp The delegating interface. - @param delegated_ifp The delegated interface. If NULL or equal to - the delegating interface itself, any previously-established - delegation is removed. If non-NULL, a reference to the - delegated interface is held by the delegating interface; - this reference is released via a subsequent call to remove - the established association, or when the delegating interface - is detached. - @param Returns 0 on success, EINVAL if arguments are invalid, or - ENXIO if the delegating interface isn't currently attached. + * @function ifnet_set_delegate + * @discussion Indicate that an interface is delegating another interface + * for accounting/restriction purposes. This could be used by a + * virtual interface that is going over another interface, where + * the virtual interface is to be treated as if it's the underlying + * interface for certain operations/checks. + * @param ifp The delegating interface. + * @param delegated_ifp The delegated interface. If NULL or equal to + * the delegating interface itself, any previously-established + * delegation is removed. If non-NULL, a reference to the + * delegated interface is held by the delegating interface; + * this reference is released via a subsequent call to remove + * the established association, or when the delegating interface + * is detached. + * @param Returns 0 on success, EINVAL if arguments are invalid, or + * ENXIO if the delegating interface isn't currently attached. */ extern errno_t ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp); /* - @function ifnet_get_delegate - @discussion Retrieve delegated interface information on an interface. - @param ifp The delegating interface. - @param pdelegated_ifp Pointer to the delegated interface. Upon - success, this will contain the delegated interface or - NULL if there is no delegation in place. If non-NULL, - the delegated interface will be returned with a reference - held for caller, and the caller is responsible for releasing - it via ifnet_release(); - @param Returns 0 on success, EINVAL if arguments are invalid, or - ENXIO if the delegating interface isn't currently attached. + * @function ifnet_get_delegate + * @discussion Retrieve delegated interface information on an interface. + * @param ifp The delegating interface. + * @param pdelegated_ifp Pointer to the delegated interface. Upon + * success, this will contain the delegated interface or + * NULL if there is no delegation in place. If non-NULL, + * the delegated interface will be returned with a reference + * held for caller, and the caller is responsible for releasing + * it via ifnet_release(); + * @param Returns 0 on success, EINVAL if arguments are invalid, or + * ENXIO if the delegating interface isn't currently attached. */ extern errno_t ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp); @@ -3389,54 +3389,54 @@ ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp); /*************************************************************************/ /* - @struct ifnet_keepalive_offload_frame - @discussion This structure is used to define various opportunistic - polling parameters for an interface. - For IPSec and AirPlay UDP keep alive only a subset of the - fields are relevant. - An incoming TCP keep alive probe has the sequence number - in the TCP header equal to "remote_seq" and the - acknowledgment number field is equal to "local_seq". - An incoming TCP keep alive probe has the sequence number - equlal to "remote_seq" minus 1 and the acknowledgment number - field is equal to "local_seq". - Note that remote_seq is in network byte order so the value to - match may have to be converted to host byte order when - subtracting 1. - For TCP, the field "interval" corresponds to the socket option - TCP_KEEPALIVE, the field "keep_cnt" to TCP_KEEPINTVL and - the field "keep_cnt" to TCP_KEEPCNT. - @field data Keep alive probe to be sent. - @field type The type of keep alive frame - @field length The length of the frame in the data field - @field interval Keep alive interval between probes in seconds - @field ether_type Tell if it's the protocol is IPv4 or IPv6 - @field keep_cnt Maximum number of time to retry probes (TCP only) - @field keep_retry Interval before retrying if previous probe was not answered (TCP only) - @field reply_length The length of the frame in the reply_data field (TCP only) - @field addr_length Length in bytes of local_addr and remote_addr (TCP only) - @field reply_data Keep alive reply to be sent to incoming probe (TCP only) - @field local_addr Local address: 4 bytes IPv4 or 16 bytes IPv6 address (TCP only) - @field remote_addr Remote address: 4 bytes IPv4 or 16 bytes IPv6 address (TCP only) - @field local_port Local port (TCP only) - @field remote_port Remote port (TCP only) - @field local_seq Local sequence number for matching incoming replies (TCP only) - @field remote_seq Remote sequence number for matching incoming probes or replies (TCP only) -*/ - -#define IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE 128 -#define IFNET_KEEPALIVE_OFFLOAD_MAX_ADDR_SIZE 16 + * @struct ifnet_keepalive_offload_frame + * @discussion This structure is used to define various opportunistic + * polling parameters for an interface. + * For IPSec and AirPlay UDP keep alive only a subset of the + * fields are relevant. + * An incoming TCP keep alive probe has the sequence number + * in the TCP header equal to "remote_seq" and the + * acknowledgment number field is equal to "local_seq". + * An incoming TCP keep alive probe has the sequence number + * equlal to "remote_seq" minus 1 and the acknowledgment number + * field is equal to "local_seq". + * Note that remote_seq is in network byte order so the value to + * match may have to be converted to host byte order when + * subtracting 1. + * For TCP, the field "interval" corresponds to the socket option + * TCP_KEEPALIVE, the field "keep_cnt" to TCP_KEEPINTVL and + * the field "keep_cnt" to TCP_KEEPCNT. + * @field data Keep alive probe to be sent. + * @field type The type of keep alive frame + * @field length The length of the frame in the data field + * @field interval Keep alive interval between probes in seconds + * @field ether_type Tell if it's the protocol is IPv4 or IPv6 + * @field keep_cnt Maximum number of time to retry probes (TCP only) + * @field keep_retry Interval before retrying if previous probe was not answered (TCP only) + * @field reply_length The length of the frame in the reply_data field (TCP only) + * @field addr_length Length in bytes of local_addr and remote_addr (TCP only) + * @field reply_data Keep alive reply to be sent to incoming probe (TCP only) + * @field local_addr Local address: 4 bytes IPv4 or 16 bytes IPv6 address (TCP only) + * @field remote_addr Remote address: 4 bytes IPv4 or 16 bytes IPv6 address (TCP only) + * @field local_port Local port (TCP only) + * @field remote_port Remote port (TCP only) + * @field local_seq Local sequence number for matching incoming replies (TCP only) + * @field remote_seq Remote sequence number for matching incoming probes or replies (TCP only) + */ + +#define IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE 128 +#define IFNET_KEEPALIVE_OFFLOAD_MAX_ADDR_SIZE 16 struct ifnet_keepalive_offload_frame { u_int8_t data[IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE]; /* data bytes */ -#define IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC 0x0 -#define IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY 0x1 -#define IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP 0x2 - u_int8_t type; /* type of application */ +#define IFNET_KEEPALIVE_OFFLOAD_FRAME_IPSEC 0x0 +#define IFNET_KEEPALIVE_OFFLOAD_FRAME_AIRPLAY 0x1 +#define IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP 0x2 + u_int8_t type; /* type of application */ u_int8_t length; /* Number of valid data bytes including offset */ u_int16_t interval; /* Keep alive interval in seconds */ -#define IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 0x0 -#define IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6 0x1 +#define IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 0x0 +#define IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6 0x1 u_int8_t ether_type; /* Ether type IPv4 or IPv6 */ u_int8_t keep_cnt; /* max number of time to retry probes */ u_int16_t keep_retry; /* interval before retrying if previous probe was not answered */ @@ -3453,29 +3453,29 @@ struct ifnet_keepalive_offload_frame { }; /* - @function ifnet_get_keepalive_offload_frames - @discussion Fills out frames_array with IP packets to send at - periodic intervals as Keep-alive or heartbeat messages. - This can be used to offload keep alives for UDP or TCP. - Note: The frames are returned in this order: first the IPSec - frames, then the AirPlay frames and finally the TCP frames. - If a device does not support one kind of keep alive frames_array - it should provide a frames_array large enough to accomodate - the other frames - @param ifp The interface to send frames out on. This is used to - select which sockets or IPSec SAs should generate the - packets. - @param frames_array An array of ifnet_keepalive_offload_frame - structs. This is allocated by the caller, and has - frames_array_count frames of valid memory. - @param frames_array_count The number of valid frames allocated - by the caller in frames_array - @param frame_data_offset The offset in bytes into each frame data - at which the IPv4/IPv6 packet and payload should be written - @param used_frames_count The returned number of frames that were - filled out with valid information. - @result Returns 0 on success, error number otherwise. -*/ + * @function ifnet_get_keepalive_offload_frames + * @discussion Fills out frames_array with IP packets to send at + * periodic intervals as Keep-alive or heartbeat messages. + * This can be used to offload keep alives for UDP or TCP. + * Note: The frames are returned in this order: first the IPSec + * frames, then the AirPlay frames and finally the TCP frames. + * If a device does not support one kind of keep alive frames_array + * it should provide a frames_array large enough to accomodate + * the other frames + * @param ifp The interface to send frames out on. This is used to + * select which sockets or IPSec SAs should generate the + * packets. + * @param frames_array An array of ifnet_keepalive_offload_frame + * structs. This is allocated by the caller, and has + * frames_array_count frames of valid memory. + * @param frames_array_count The number of valid frames allocated + * by the caller in frames_array + * @param frame_data_offset The offset in bytes into each frame data + * at which the IPv4/IPv6 packet and payload should be written + * @param used_frames_count The returned number of frames that were + * filled out with valid information. + * @result Returns 0 on success, error number otherwise. + */ extern errno_t ifnet_get_keepalive_offload_frames(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frames_array, u_int32_t frames_array_count, size_t frame_data_offset, @@ -3485,49 +3485,49 @@ extern errno_t ifnet_get_keepalive_offload_frames(ifnet_t ifp, /* Link level notifications */ /*************************************************************************/ /* - @function ifnet_link_status_report - @discussion A KPI to let the driver provide link specific - status information to the protocol stack. The KPI will - copy contents from the buffer based on the version and - length provided by the driver. The contents of the buffer - will be read but will not be modified. - @param ifp The interface that is generating the report - @param buffer Buffer containing the link specific information - for this interface. It is the caller's responsibility - to free this buffer. - @param buffer_len Valid length of the buffer provided by the caller - @result Returns 0 on success, error number otherwise. -*/ + * @function ifnet_link_status_report + * @discussion A KPI to let the driver provide link specific + * status information to the protocol stack. The KPI will + * copy contents from the buffer based on the version and + * length provided by the driver. The contents of the buffer + * will be read but will not be modified. + * @param ifp The interface that is generating the report + * @param buffer Buffer containing the link specific information + * for this interface. It is the caller's responsibility + * to free this buffer. + * @param buffer_len Valid length of the buffer provided by the caller + * @result Returns 0 on success, error number otherwise. + */ extern errno_t ifnet_link_status_report(ifnet_t ifp, const void *buffer, - size_t buffer_len); + size_t buffer_len); /*************************************************************************/ /* QoS Fastlane */ /*************************************************************************/ /*! - @function ifnet_set_fastlane_capable - @param interface The interface. - @param capable Set the truth value that the interface is attached to - a network that is capable of Fastlane QoS marking. - @result Returns 0 on success, error number otherwise. + * @function ifnet_set_fastlane_capable + * @param interface The interface. + * @param capable Set the truth value that the interface is attached to + * a network that is capable of Fastlane QoS marking. + * @result Returns 0 on success, error number otherwise. */ extern errno_t ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable); /*! - @function ifnet_get_fastlane_capable - @param interface The interface. - @param capable On output contains the truth value that the interface - is attached ta network that is capable of Fastlane QoS marking. - @result Returns 0 on success, error number otherwise. + * @function ifnet_get_fastlane_capable + * @param interface The interface. + * @param capable On output contains the truth value that the interface + * is attached ta network that is capable of Fastlane QoS marking. + * @result Returns 0 on success, error number otherwise. */ extern errno_t ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable); /*! - @function ifnet_get_unsent_bytes - @param interface The interface - @param unsent_bytes An out parameter that contains unsent bytes for - an interface - @result Returns 0 on success, error otherwise. + * @function ifnet_get_unsent_bytes + * @param interface The interface + * @param unsent_bytes An out parameter that contains unsent bytes for + * an interface + * @result Returns 0 on success, error otherwise. */ extern errno_t ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes); @@ -3537,22 +3537,22 @@ typedef struct { } ifnet_buffer_status_t; /*! - @function ifnet_get_buffer_status - @param interface The interface - @param buf_status An out parameter that contains unsent bytes - for an interface - @result Returns 0 on success, EINVAL if any of the arguments is - NULL, ENXIO if the interface pointer is invalid + * @function ifnet_get_buffer_status + * @param interface The interface + * @param buf_status An out parameter that contains unsent bytes + * for an interface + * @result Returns 0 on success, EINVAL if any of the arguments is + * NULL, ENXIO if the interface pointer is invalid */ extern errno_t ifnet_get_buffer_status(const ifnet_t interface, ifnet_buffer_status_t *buf_status); /*! - @function ifnet_normalise_unsent_data - @discussion - Gathers the unsent bytes on all the interfaces. - This data will be reported to NetworkStatistics. - + * @function ifnet_normalise_unsent_data + * @discussion + * Gathers the unsent bytes on all the interfaces. + * This data will be reported to NetworkStatistics. + * */ extern void ifnet_normalise_unsent_data(void); @@ -3561,36 +3561,36 @@ extern void ifnet_normalise_unsent_data(void); /*************************************************************************/ /*! - @function ifnet_set_low_power_mode - @param interface The interface. - @param on Set the truth value that the interface is in low power mode. - @result Returns 0 on success, error number otherwise. + * @function ifnet_set_low_power_mode + * @param interface The interface. + * @param on Set the truth value that the interface is in low power mode. + * @result Returns 0 on success, error number otherwise. */ extern errno_t ifnet_set_low_power_mode(ifnet_t interface, boolean_t on); /*! - @function ifnet_get_low_power_mode - @param interface The interface. - @param on On output contains the truth value that the interface - is in low power mode. - @result Returns 0 on success, error number otherwise. + * @function ifnet_get_low_power_mode + * @param interface The interface. + * @param on On output contains the truth value that the interface + * is in low power mode. + * @result Returns 0 on success, error number otherwise. */ extern errno_t ifnet_get_low_power_mode(ifnet_t interface, boolean_t *on); /*! - @function ifnet_touch_lastupdown - @discussion Updates the lastupdown value to now. - @param interface The interface. - @result 0 on success otherwise the errno error. + * @function ifnet_touch_lastupdown + * @discussion Updates the lastupdown value to now. + * @param interface The interface. + * @result 0 on success otherwise the errno error. */ extern errno_t ifnet_touch_lastupdown(ifnet_t interface); /*! - @function ifnet_updown_delta - @discussion Retrieves the difference between lastupdown and now. - @param interface The interface. - @param updown_delta A timeval struct to copy the delta between lastupdown and now. - to. + * @function ifnet_updown_delta + * @discussion Retrieves the difference between lastupdown and now. + * @param interface The interface. + * @param updown_delta A timeval struct to copy the delta between lastupdown and now. + * to. */ extern errno_t ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta); diff --git a/bsd/net/kpi_interfacefilter.c b/bsd/net/kpi_interfacefilter.c index 47d03f9d4..a014f3068 100644 --- a/bsd/net/kpi_interfacefilter.c +++ b/bsd/net/kpi_interfacefilter.c @@ -2,7 +2,7 @@ * Copyright (c) 2003,2013,2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,10 +48,12 @@ iflt_attach_internal( const struct iff_filter *filter, interface_filter_t *filter_ref) { - if (interface == NULL) return ENOENT; - + if (interface == NULL) { + return ENOENT; + } + return dlil_attach_filter(interface, filter, filter_ref, - DLIL_IFF_INTERNAL); + DLIL_IFF_INTERNAL); } errno_t @@ -60,8 +62,10 @@ iflt_attach( const struct iff_filter *filter, interface_filter_t *filter_ref) { - if (interface == NULL) return ENOENT; - + if (interface == NULL) { + return ENOENT; + } + return dlil_attach_filter(interface, filter, filter_ref, 0); } diff --git a/bsd/net/kpi_interfacefilter.h b/bsd/net/kpi_interfacefilter.h index e5ac569e1..dd16bd7d4 100644 --- a/bsd/net/kpi_interfacefilter.h +++ b/bsd/net/kpi_interfacefilter.h @@ -2,7 +2,7 @@ * Copyright (c) 2003,2008,2017 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_interfacefilter.h - This header defines an API to attach interface filters. Interface - filters may be attached to a specific interface. The filters can - intercept all packets in to and out of the specific interface. In - addition, the filters may intercept interface specific events and - ioctls. + * @header kpi_interfacefilter.h + * This header defines an API to attach interface filters. Interface + * filters may be attached to a specific interface. The filters can + * intercept all packets in to and out of the specific interface. In + * addition, the filters may intercept interface specific events and + * ioctls. */ #ifndef __KPI_INTERFACEFILTER__ @@ -44,165 +44,165 @@ struct kev_msg; __BEGIN_DECLS /*! - @typedef iff_input_func - - @discussion iff_input_func is used to filter incoming packets. The - interface is only valid for the duration of the filter call. If - you need to keep a reference to the interface, be sure to call - ifnet_reference and ifnet_release. The packets passed to the - inbound filter are different from those passed to the outbound - filter. Packets to the inbound filter have the frame header - passed in separately from the rest of the packet. The outbound - data filters is passed the whole packet including the frame - header. - - The frame header usually preceeds the data in the mbuf. This - ensures that the frame header will be a valid pointer as long as - the mbuf is not freed. If you need to change the frame header to - point somewhere else, the recommended method is to prepend a new - frame header to the mbuf chain (mbuf_prepend), set the header to - point to that data, then call mbuf_adj to move the mbuf data - pointer back to the start of the packet payload. - @param cookie The cookie specified when this filter was attached. - @param interface The interface the packet was recieved on. - @param protocol The protocol of this packet. If you specified a - protocol when attaching your filter, the protocol will only ever - be the protocol you specified. - @param data The inbound packet, after the frame header as determined - by the interface. - @param frame_ptr A pointer to the pointer to the frame header. The - frame header length can be found by inspecting the interface's - frame header length (ifnet_hdrlen). - @result Return: - 0 - The caller will continue with normal processing of the - packet. - EJUSTRETURN - The caller will stop processing the packet, - the packet will not be freed. - Anything Else - The caller will free the packet and stop - processing. -*/ -typedef errno_t (*iff_input_func)(void *cookie, ifnet_t interface, + * @typedef iff_input_func + * + * @discussion iff_input_func is used to filter incoming packets. The + * interface is only valid for the duration of the filter call. If + * you need to keep a reference to the interface, be sure to call + * ifnet_reference and ifnet_release. The packets passed to the + * inbound filter are different from those passed to the outbound + * filter. Packets to the inbound filter have the frame header + * passed in separately from the rest of the packet. The outbound + * data filters is passed the whole packet including the frame + * header. + * + * The frame header usually preceeds the data in the mbuf. This + * ensures that the frame header will be a valid pointer as long as + * the mbuf is not freed. If you need to change the frame header to + * point somewhere else, the recommended method is to prepend a new + * frame header to the mbuf chain (mbuf_prepend), set the header to + * point to that data, then call mbuf_adj to move the mbuf data + * pointer back to the start of the packet payload. + * @param cookie The cookie specified when this filter was attached. + * @param interface The interface the packet was recieved on. + * @param protocol The protocol of this packet. If you specified a + * protocol when attaching your filter, the protocol will only ever + * be the protocol you specified. + * @param data The inbound packet, after the frame header as determined + * by the interface. + * @param frame_ptr A pointer to the pointer to the frame header. The + * frame header length can be found by inspecting the interface's + * frame header length (ifnet_hdrlen). + * @result Return: + * 0 - The caller will continue with normal processing of the + * packet. + * EJUSTRETURN - The caller will stop processing the packet, + * the packet will not be freed. + * Anything Else - The caller will free the packet and stop + * processing. + */ +typedef errno_t (*iff_input_func)(void *cookie, ifnet_t interface, protocol_family_t protocol, mbuf_t *data, char **frame_ptr); /*! - @typedef iff_output_func - - @discussion iff_output_func is used to filter fully formed outbound - packets. The interface is only valid for the duration of the - filter call. If you need to keep a reference to the interface, - be sure to call ifnet_reference and ifnet_release. - @param cookie The cookie specified when this filter was attached. - @param interface The interface the packet is being transmitted on. - @param data The fully formed outbound packet in a chain of mbufs. - The frame header is already included. The filter function may - modify the packet or return a different mbuf chain. - @result Return: - 0 - The caller will continue with normal processing of the - packet. - EJUSTRETURN - The caller will stop processing the packet, - the packet will not be freed. - Anything Else - The caller will free the packet and stop - processing. -*/ -typedef errno_t (*iff_output_func)(void *cookie, ifnet_t interface, + * @typedef iff_output_func + * + * @discussion iff_output_func is used to filter fully formed outbound + * packets. The interface is only valid for the duration of the + * filter call. If you need to keep a reference to the interface, + * be sure to call ifnet_reference and ifnet_release. + * @param cookie The cookie specified when this filter was attached. + * @param interface The interface the packet is being transmitted on. + * @param data The fully formed outbound packet in a chain of mbufs. + * The frame header is already included. The filter function may + * modify the packet or return a different mbuf chain. + * @result Return: + * 0 - The caller will continue with normal processing of the + * packet. + * EJUSTRETURN - The caller will stop processing the packet, + * the packet will not be freed. + * Anything Else - The caller will free the packet and stop + * processing. + */ +typedef errno_t (*iff_output_func)(void *cookie, ifnet_t interface, protocol_family_t protocol, mbuf_t *data); /*! - @typedef iff_event_func - - @discussion iff_event_func is used to filter interface specific - events. The interface is only valid for the duration of the - filter call. If you need to keep a reference to the interface, - be sure to call ifnet_reference and ifnet_release. - @param cookie The cookie specified when this filter was attached. - @param interface The interface the packet is being transmitted on. - @param event_msg The kernel event, may not be changed. -*/ -typedef void (*iff_event_func)(void *cookie, ifnet_t interface, + * @typedef iff_event_func + * + * @discussion iff_event_func is used to filter interface specific + * events. The interface is only valid for the duration of the + * filter call. If you need to keep a reference to the interface, + * be sure to call ifnet_reference and ifnet_release. + * @param cookie The cookie specified when this filter was attached. + * @param interface The interface the packet is being transmitted on. + * @param event_msg The kernel event, may not be changed. + */ +typedef void (*iff_event_func)(void *cookie, ifnet_t interface, protocol_family_t protocol, const struct kev_msg *event_msg); /*! - @typedef iff_ioctl_func - - @discussion iff_ioctl_func is used to filter ioctls sent to an - interface. The interface is only valid for the duration of the - filter call. If you need to keep a reference to the interface, - be sure to call ifnet_reference and ifnet_release. - - All undefined ioctls are reserved for future use by Apple. If - you need to communicate with your kext using an ioctl, please - use SIOCSIFKPI and SIOCGIFKPI. - @param cookie The cookie specified when this filter was attached. - @param interface The interface the packet is being transmitted on. - @param ioctl_cmd The ioctl command. - @param ioctl_arg A pointer to the ioctl argument. - @result Return: - 0 - This filter function handled the ioctl. - EOPNOTSUPP - This filter function does not understand/did not - handle this ioctl. - EJUSTRETURN - This filter function handled the ioctl, - processing should stop. - Anything Else - Processing will stop, the error will be - returned. -*/ -typedef errno_t (*iff_ioctl_func)(void *cookie, ifnet_t interface, + * @typedef iff_ioctl_func + * + * @discussion iff_ioctl_func is used to filter ioctls sent to an + * interface. The interface is only valid for the duration of the + * filter call. If you need to keep a reference to the interface, + * be sure to call ifnet_reference and ifnet_release. + * + * All undefined ioctls are reserved for future use by Apple. If + * you need to communicate with your kext using an ioctl, please + * use SIOCSIFKPI and SIOCGIFKPI. + * @param cookie The cookie specified when this filter was attached. + * @param interface The interface the packet is being transmitted on. + * @param ioctl_cmd The ioctl command. + * @param ioctl_arg A pointer to the ioctl argument. + * @result Return: + * 0 - This filter function handled the ioctl. + * EOPNOTSUPP - This filter function does not understand/did not + * handle this ioctl. + * EJUSTRETURN - This filter function handled the ioctl, + * processing should stop. + * Anything Else - Processing will stop, the error will be + * returned. + */ +typedef errno_t (*iff_ioctl_func)(void *cookie, ifnet_t interface, protocol_family_t protocol, unsigned long ioctl_cmd, void *ioctl_arg); /*! - @typedef iff_detached_func - - @discussion iff_detached_func is called to notify the filter that it - has been detached from an interface. This is the last call to - the filter that will be made. A filter may be detached if the - interface is detached or the detach filter function is called. - In the case that the interface is being detached, your filter's - event function will be called with the interface detaching event - before the your detached function will be called. - @param cookie The cookie specified when this filter was attached. - @param interface The interface this filter was detached from. -*/ -typedef void (*iff_detached_func)(void *cookie, ifnet_t interface); + * @typedef iff_detached_func + * + * @discussion iff_detached_func is called to notify the filter that it + * has been detached from an interface. This is the last call to + * the filter that will be made. A filter may be detached if the + * interface is detached or the detach filter function is called. + * In the case that the interface is being detached, your filter's + * event function will be called with the interface detaching event + * before the your detached function will be called. + * @param cookie The cookie specified when this filter was attached. + * @param interface The interface this filter was detached from. + */ +typedef void (*iff_detached_func)(void *cookie, ifnet_t interface); /*! - @struct iff_filter - @discussion This structure is used to define an interface filter for - use with the iflt_attach function. - @field iff_cookie A kext defined cookie that will be passed to all - filter functions. - @field iff_name A filter name used for debugging purposes. - @field iff_protocol The protocol of the packets this filter is - interested in. If you specify zero, packets from all protocols - will be passed to the filter. - @field iff_input The filter function to handle inbound packets, may - be NULL. - @field iff_output The filter function to handle outbound packets, - may be NULL. - @field iff_event The filter function to handle interface events, may - be null. - @field iff_ioctl The filter function to handle interface ioctls, may - be null. - @field iff_detached The filter function used to notify the filter that - it has been detached. -*/ + * @struct iff_filter + * @discussion This structure is used to define an interface filter for + * use with the iflt_attach function. + * @field iff_cookie A kext defined cookie that will be passed to all + * filter functions. + * @field iff_name A filter name used for debugging purposes. + * @field iff_protocol The protocol of the packets this filter is + * interested in. If you specify zero, packets from all protocols + * will be passed to the filter. + * @field iff_input The filter function to handle inbound packets, may + * be NULL. + * @field iff_output The filter function to handle outbound packets, + * may be NULL. + * @field iff_event The filter function to handle interface events, may + * be null. + * @field iff_ioctl The filter function to handle interface ioctls, may + * be null. + * @field iff_detached The filter function used to notify the filter that + * it has been detached. + */ struct iff_filter { - void *iff_cookie; - const char *iff_name; - protocol_family_t iff_protocol; - iff_input_func iff_input; - iff_output_func iff_output; - iff_event_func iff_event; - iff_ioctl_func iff_ioctl; - iff_detached_func iff_detached; + void *iff_cookie; + const char *iff_name; + protocol_family_t iff_protocol; + iff_input_func iff_input; + iff_output_func iff_output; + iff_event_func iff_event; + iff_ioctl_func iff_ioctl; + iff_detached_func iff_detached; }; /*! - @function iflt_attach - @discussion Attaches an interface filter to an interface. - @param interface The interface the filter should be attached to. - @param filter A structure defining the filter. - @param filter_ref A reference to the filter used to detach. - @result 0 on success otherwise the errno error. + * @function iflt_attach + * @discussion Attaches an interface filter to an interface. + * @param interface The interface the filter should be attached to. + * @param filter A structure defining the filter. + * @param filter_ref A reference to the filter used to detach. + * @result 0 on success otherwise the errno error. */ #ifdef KERNEL_PRIVATE extern errno_t iflt_attach_internal(ifnet_t interface, const struct iff_filter *filter, @@ -216,9 +216,9 @@ extern errno_t iflt_attach(ifnet_t interface, const struct iff_filter *filter, #endif /* KERNEL_PRIVATE */ /*! - @function iflt_detach - @discussion Detaches an interface filter from an interface. - @param filter_ref The reference to the filter from iflt_attach. + * @function iflt_detach + * @discussion Detaches an interface filter from an interface. + * @param filter_ref The reference to the filter from iflt_attach. */ extern void iflt_detach(interface_filter_t filter_ref); diff --git a/bsd/net/kpi_protocol.c b/bsd/net/kpi_protocol.c index 6265a4b4b..3b3c50f5f 100644 --- a/bsd/net/kpi_protocol.c +++ b/bsd/net/kpi_protocol.c @@ -44,36 +44,36 @@ typedef int (*attach_t)(struct ifnet *ifp, uint32_t protocol_family); typedef int (*detach_t)(struct ifnet *ifp, uint32_t protocol_family); struct proto_input_entry { - struct proto_input_entry *next; - int detach; - struct domain *domain; - int hash; - int chain; - - protocol_family_t protocol; - proto_input_handler input; - proto_input_detached_handler detached; - - mbuf_t inject_first; - mbuf_t inject_last; - - struct proto_input_entry *input_next; - mbuf_t input_first; - mbuf_t input_last; + struct proto_input_entry *next; + int detach; + struct domain *domain; + int hash; + int chain; + + protocol_family_t protocol; + proto_input_handler input; + proto_input_detached_handler detached; + + mbuf_t inject_first; + mbuf_t inject_last; + + struct proto_input_entry *input_next; + mbuf_t input_first; + mbuf_t input_last; }; struct proto_family_str { - TAILQ_ENTRY(proto_family_str) proto_fam_next; - protocol_family_t proto_family; - ifnet_family_t if_family; - proto_plumb_handler attach_proto; - proto_unplumb_handler detach_proto; + TAILQ_ENTRY(proto_family_str) proto_fam_next; + protocol_family_t proto_family; + ifnet_family_t if_family; + proto_plumb_handler attach_proto; + proto_unplumb_handler detach_proto; }; static struct proto_input_entry *proto_hash[PROTO_HASH_SLOTS]; static int proto_total_waiting = 0; -static struct proto_input_entry *proto_input_add_list = NULL; +static struct proto_input_entry *proto_input_add_list = NULL; decl_lck_mtx_data(static, proto_family_mutex_data); static lck_mtx_t *proto_family_mutex = &proto_family_mutex_data; static TAILQ_HEAD(, proto_family_str) proto_family_head = @@ -82,9 +82,9 @@ static TAILQ_HEAD(, proto_family_str) proto_family_head = __private_extern__ void proto_kpi_init(void) { - lck_grp_attr_t *grp_attrib = NULL; - lck_attr_t *lck_attrib = NULL; - lck_grp_t *lck_group = NULL; + lck_grp_attr_t *grp_attrib = NULL; + lck_attr_t *lck_attrib = NULL; + lck_grp_t *lck_group = NULL; /* Allocate a mtx lock */ grp_attrib = lck_grp_attr_alloc_init(); @@ -95,21 +95,22 @@ proto_kpi_init(void) lck_grp_free(lck_group); lck_attr_free(lck_attrib); - bzero(proto_hash, sizeof (proto_hash)); + bzero(proto_hash, sizeof(proto_hash)); } __private_extern__ errno_t proto_register_input(protocol_family_t protocol, proto_input_handler input, - proto_input_detached_handler detached, int chains) + proto_input_detached_handler detached, int chains) { struct proto_input_entry *entry; struct dlil_threading_info *inp = dlil_main_input_thread; struct domain *dp; domain_guard_t guard; - entry = _MALLOC(sizeof (*entry), M_IFADDR, M_WAITOK | M_ZERO); - if (entry == NULL) - return (ENOMEM); + entry = _MALLOC(sizeof(*entry), M_IFADDR, M_WAITOK | M_ZERO); + if (entry == NULL) { + return ENOMEM; + } entry->protocol = protocol; entry->input = input; @@ -119,12 +120,14 @@ proto_register_input(protocol_family_t protocol, proto_input_handler input, guard = domain_guard_deploy(); TAILQ_FOREACH(dp, &domains, dom_entry) { - if (dp->dom_family == (int)protocol) + if (dp->dom_family == (int)protocol) { break; + } } domain_guard_release(guard); - if (dp == NULL) - return (EINVAL); + if (dp == NULL) { + return EINVAL; + } entry->domain = dp; @@ -133,11 +136,12 @@ proto_register_input(protocol_family_t protocol, proto_input_handler input, proto_input_add_list = entry; inp->input_waiting |= DLIL_PROTO_REGISTER; - if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) + if ((inp->input_waiting & DLIL_INPUT_RUNNING) == 0) { wakeup((caddr_t)&inp->input_waiting); + } lck_mtx_unlock(&inp->input_lck); - return (0); + return 0; } __private_extern__ void @@ -147,12 +151,14 @@ proto_unregister_input(protocol_family_t protocol) for (entry = proto_hash[proto_hash_value(protocol)]; entry != NULL; entry = entry->next) { - if (entry->protocol == protocol) + if (entry->protocol == protocol) { break; + } } - if (entry != NULL) + if (entry != NULL) { entry->detach = 1; + } } static void @@ -169,14 +175,16 @@ proto_delayed_attach(struct proto_input_entry *entry) for (exist = proto_hash[hash_slot]; exist != NULL; exist = exist->next) { - if (exist->protocol == entry->protocol) + if (exist->protocol == entry->protocol) { break; + } } /* If the entry already exists, call detached and dispose */ if (exist != NULL) { - if (entry->detached) + if (entry->detached) { entry->detached(entry->protocol); + } FREE(entry, M_IFADDR); } else { entry->next = proto_hash[hash_slot]; @@ -233,7 +241,7 @@ proto_input_run(void) entry->input(entry->protocol, packet_list); } else { - mbuf_t packet; + mbuf_t packet; for (packet = packet_list; packet != NULL; @@ -262,12 +270,14 @@ proto_input(protocol_family_t protocol, mbuf_t packet_list) for (entry = proto_hash[proto_hash_value(protocol)]; entry != NULL; entry = entry->next) { - if (entry->protocol == protocol) + if (entry->protocol == protocol) { break; + } } - if (entry == NULL) - return (-1); + if (entry == NULL) { + return -1; + } if (entry->domain && !(entry->domain->dom_flags & DOM_REENTRANT)) { lck_mtx_lock(entry->domain->dom_mtx); @@ -277,7 +287,7 @@ proto_input(protocol_family_t protocol, mbuf_t packet_list) if (entry->chain) { entry->input(entry->protocol, packet_list); } else { - mbuf_t packet; + mbuf_t packet; for (packet = packet_list; packet != NULL; packet = packet_list) { @@ -290,7 +300,7 @@ proto_input(protocol_family_t protocol, mbuf_t packet_list) if (locked) { lck_mtx_unlock(entry->domain->dom_mtx); } - return (result); + return result; } errno_t @@ -302,13 +312,15 @@ proto_inject(protocol_family_t protocol, mbuf_t packet_list) struct dlil_threading_info *inp = dlil_main_input_thread; for (last_packet = packet_list; mbuf_nextpkt(last_packet) != NULL; - last_packet = mbuf_nextpkt(last_packet)) + last_packet = mbuf_nextpkt(last_packet)) { /* find the last packet */; + } for (entry = proto_hash[hash_slot]; entry != NULL; entry = entry->next) { - if (entry->protocol == protocol) + if (entry->protocol == protocol) { break; + } } if (entry != NULL) { @@ -326,10 +338,10 @@ proto_inject(protocol_family_t protocol, mbuf_t packet_list) } lck_mtx_unlock(&inp->input_lck); } else { - return (ENOENT); + return ENOENT; } - return (0); + return 0; } static struct proto_family_str * @@ -339,11 +351,12 @@ proto_plumber_find(protocol_family_t proto_family, ifnet_family_t if_family) TAILQ_FOREACH(mod, &proto_family_head, proto_fam_next) { if ((mod->proto_family == (proto_family & 0xffff)) && - (mod->if_family == (if_family & 0xffff))) + (mod->if_family == (if_family & 0xffff))) { break; + } } - return (mod); + return mod; } errno_t @@ -353,8 +366,9 @@ proto_register_plumber(protocol_family_t protocol_family, { struct proto_family_str *proto_family; - if (attach == NULL) - return (EINVAL); + if (attach == NULL) { + return EINVAL; + } lck_mtx_lock(proto_family_mutex); @@ -362,26 +376,26 @@ proto_register_plumber(protocol_family_t protocol_family, if (proto_family->proto_family == protocol_family && proto_family->if_family == interface_family) { lck_mtx_unlock(proto_family_mutex); - return (EEXIST); + return EEXIST; } } proto_family = (struct proto_family_str *) - _MALLOC(sizeof (struct proto_family_str), M_IFADDR, + _MALLOC(sizeof(struct proto_family_str), M_IFADDR, M_WAITOK | M_ZERO); if (!proto_family) { lck_mtx_unlock(proto_family_mutex); - return (ENOMEM); + return ENOMEM; } - proto_family->proto_family = protocol_family; - proto_family->if_family = interface_family & 0xffff; - proto_family->attach_proto = attach; - proto_family->detach_proto = detach; + proto_family->proto_family = protocol_family; + proto_family->if_family = interface_family & 0xffff; + proto_family->attach_proto = attach; + proto_family->detach_proto = detach; TAILQ_INSERT_TAIL(&proto_family_head, proto_family, proto_fam_next); lck_mtx_unlock(proto_family_mutex); - return (0); + return 0; } void @@ -414,13 +428,13 @@ proto_plumb(protocol_family_t protocol_family, ifnet_t ifp) proto_family = proto_plumber_find(protocol_family, ifp->if_family); if (proto_family == NULL) { lck_mtx_unlock(proto_family_mutex); - return (ENXIO); + return ENXIO; } ret = proto_family->attach_proto(ifp, protocol_family); lck_mtx_unlock(proto_family_mutex); - return (ret); + return ret; } @@ -433,11 +447,12 @@ proto_unplumb(protocol_family_t protocol_family, ifnet_t ifp) lck_mtx_lock(proto_family_mutex); proto_family = proto_plumber_find(protocol_family, ifp->if_family); - if (proto_family != NULL && proto_family->detach_proto) + if (proto_family != NULL && proto_family->detach_proto) { proto_family->detach_proto(ifp, protocol_family); - else + } else { ret = ifnet_detach_protocol(ifp, protocol_family); + } lck_mtx_unlock(proto_family_mutex); - return (ret); + return ret; } diff --git a/bsd/net/kpi_protocol.h b/bsd/net/kpi_protocol.h index b2546d4e3..f8b2ee8a4 100644 --- a/bsd/net/kpi_protocol.h +++ b/bsd/net/kpi_protocol.h @@ -26,12 +26,12 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_protocol.h - This header defines an API to interact with protocols in the kernel. - The KPIs in this header file can be used to interact with protocols - that already exist in the stack. These KPIs can be used to support - existing protocols over media types that are not natively supported - in the kernel, such as ATM. + * @header kpi_protocol.h + * This header defines an API to interact with protocols in the kernel. + * The KPIs in this header file can be used to interact with protocols + * that already exist in the stack. These KPIs can be used to support + * existing protocols over media types that are not natively supported + * in the kernel, such as ATM. */ #ifndef __KPI_PROTOCOL__ @@ -47,73 +47,73 @@ __BEGIN_DECLS #ifdef BSD_KERNEL_PRIVATE /*! - @typedef protocol_input_handler - @discussion protocol_input_handler is called to input a packet. If - your protocol has specified a global lock, the lock will be held - when this funciton is called. - @pararm protocol The protocol this packet is intended for. - @param packet The packet that should be input. + * @typedef protocol_input_handler + * @discussion protocol_input_handler is called to input a packet. If + * your protocol has specified a global lock, the lock will be held + * when this funciton is called. + * @pararm protocol The protocol this packet is intended for. + * @param packet The packet that should be input. */ typedef void (*proto_input_handler)(protocol_family_t protocol, mbuf_t packet); /*! - @typedef proto_input_detached_handler - @discussion proto_input_detached_handler is called to notify the - protocol that it has been detached. When this function is - called, the proto_input_handler will not be called again, making - it safe to unload. - @pararm protocol The protocol detached. + * @typedef proto_input_detached_handler + * @discussion proto_input_detached_handler is called to notify the + * protocol that it has been detached. When this function is + * called, the proto_input_handler will not be called again, making + * it safe to unload. + * @pararm protocol The protocol detached. */ typedef void (*proto_input_detached_handler)(protocol_family_t protocol); /*! - @function proto_register_input - @discussion Allows the caller to specify the functions called when a - packet for a protocol is received. - @param protocol The protocol family these functions will receive - packets for. - @param input The function called when a packet is input. - @param chains Input function supports packet chains. - @result A errno error on failure. + * @function proto_register_input + * @discussion Allows the caller to specify the functions called when a + * packet for a protocol is received. + * @param protocol The protocol family these functions will receive + * packets for. + * @param input The function called when a packet is input. + * @param chains Input function supports packet chains. + * @result A errno error on failure. */ extern errno_t proto_register_input(protocol_family_t protocol, proto_input_handler input, proto_input_detached_handler detached, int chains); /*! - @function proto_unregister_input - @discussion Allows the caller to unregister the input and inject - functions for a protocol. The input/inject functions may not be - unregistered immediately if there is a chance they are in use. - To notify the owner when the functions are no longer in use, the - proto_detached_handler function will be called. It is not safe - to unload until the proto_detached_handler is called. - @param protocol The protocol family these functions will receive - packets for. + * @function proto_unregister_input + * @discussion Allows the caller to unregister the input and inject + * functions for a protocol. The input/inject functions may not be + * unregistered immediately if there is a chance they are in use. + * To notify the owner when the functions are no longer in use, the + * proto_detached_handler function will be called. It is not safe + * to unload until the proto_detached_handler is called. + * @param protocol The protocol family these functions will receive + * packets for. */ extern void proto_unregister_input(protocol_family_t protocol); #endif /* BSD_KERNEL_PRIVATE */ /*! - @function proto_input - @discussion Inputs a packet on the specified protocol from the input - path. - @param protocol The protocol of the packet. - @param packet The first packet in a chain of packets to be input. - @result A errno error on failure. Unless proto_input returns zero, - the caller is responsible for freeing the mbuf. + * @function proto_input + * @discussion Inputs a packet on the specified protocol from the input + * path. + * @param protocol The protocol of the packet. + * @param packet The first packet in a chain of packets to be input. + * @result A errno error on failure. Unless proto_input returns zero, + * the caller is responsible for freeing the mbuf. */ extern errno_t proto_input(protocol_family_t protocol, mbuf_t packet); /*! - @function proto_inject - @discussion Injects a packet on the specified protocol from - anywhere. To avoid recursion, the protocol may need to queue the - packet to be handled later. - @param protocol The protocol of the packet. - @param packet The first packet in a chain of packets to be injected. - @result A errno error on failure. Unless proto_inject returns zero, - the caller is responsible for freeing the mbuf. + * @function proto_inject + * @discussion Injects a packet on the specified protocol from + * anywhere. To avoid recursion, the protocol may need to queue the + * packet to be handled later. + * @param protocol The protocol of the packet. + * @param packet The first packet in a chain of packets to be injected. + * @result A errno error on failure. Unless proto_inject returns zero, + * the caller is responsible for freeing the mbuf. */ extern errno_t proto_inject(protocol_family_t protocol, mbuf_t packet); @@ -123,87 +123,87 @@ extern errno_t proto_inject(protocol_family_t protocol, mbuf_t packet); /******************************************************************************/ /*! - @typedef proto_plumb_handler - @discussion proto_plumb_handler is called to attach a protocol to an - interface. A typical protocol plumb function would fill out an - ifnet_attach_proto_param and call ifnet_attach_protocol. - @param ifp The interface the protocol should be attached to. - @param protocol The protocol that should be attached to the - interface. - @result - A non-zero value of the attach failed. + * @typedef proto_plumb_handler + * @discussion proto_plumb_handler is called to attach a protocol to an + * interface. A typical protocol plumb function would fill out an + * ifnet_attach_proto_param and call ifnet_attach_protocol. + * @param ifp The interface the protocol should be attached to. + * @param protocol The protocol that should be attached to the + * interface. + * @result + * A non-zero value of the attach failed. */ typedef errno_t (*proto_plumb_handler)(ifnet_t ifp, protocol_family_t protocol); /*! - @typedef proto_unplumb_handler - @discussion proto_unplumb_handler is called to detach a protocol - from an interface. A typical unplumb function would call - ifnet_detach_protocol and perform any necessary cleanup. - @param ifp The interface the protocol should be detached from. - @param protocol The protocol that should be detached from the - interface. + * @typedef proto_unplumb_handler + * @discussion proto_unplumb_handler is called to detach a protocol + * from an interface. A typical unplumb function would call + * ifnet_detach_protocol and perform any necessary cleanup. + * @param ifp The interface the protocol should be detached from. + * @param protocol The protocol that should be detached from the + * interface. */ typedef void (*proto_unplumb_handler)(ifnet_t ifp, protocol_family_t protocol); /*! - @function proto_register_plumber - @discussion Allows the caller to specify the functions called when a - protocol is attached to an interface belonging to the specified - family and when that protocol is detached. - @param proto_fam The protocol family these plumbing functions will - handle. - @param if_fam The interface family these plumbing functions will - handle. - @param plumb The function to call to attach the protocol to an - interface. - @param unplumb The function to call to detach the protocol to an - interface, may be NULL in which case ifnet_detach_protocol will - be used to detach the protocol. - @result A non-zero value of the attach failed. + * @function proto_register_plumber + * @discussion Allows the caller to specify the functions called when a + * protocol is attached to an interface belonging to the specified + * family and when that protocol is detached. + * @param proto_fam The protocol family these plumbing functions will + * handle. + * @param if_fam The interface family these plumbing functions will + * handle. + * @param plumb The function to call to attach the protocol to an + * interface. + * @param unplumb The function to call to detach the protocol to an + * interface, may be NULL in which case ifnet_detach_protocol will + * be used to detach the protocol. + * @result A non-zero value of the attach failed. */ extern errno_t proto_register_plumber(protocol_family_t proto_fam, ifnet_family_t if_fam, proto_plumb_handler plumb, proto_unplumb_handler unplumb); /*! - @function proto_unregister_plumber - @discussion Unregisters a previously registered plumbing function. - @param proto_fam The protocol family these plumbing functions - handle. - @param if_fam The interface family these plumbing functions handle. + * @function proto_unregister_plumber + * @discussion Unregisters a previously registered plumbing function. + * @param proto_fam The protocol family these plumbing functions + * handle. + * @param if_fam The interface family these plumbing functions handle. */ extern void proto_unregister_plumber(protocol_family_t proto_fam, ifnet_family_t if_fam); #ifdef BSD_KERNEL_PRIVATE /* - @function proto_plumb - @discussion Plumbs a protocol to an actual interface. This will find - a registered protocol module and call its attach function. - The module will typically call dlil_attach_protocol() with the - appropriate parameters. - @param protocol_family The protocol family. - @param ifp The interface to plumb the protocol to. - @result 0: No error. - ENOENT: No module was registered. - Other: Error returned by the attach_proto function -*/ + * @function proto_plumb + * @discussion Plumbs a protocol to an actual interface. This will find + * a registered protocol module and call its attach function. + * The module will typically call dlil_attach_protocol() with the + * appropriate parameters. + * @param protocol_family The protocol family. + * @param ifp The interface to plumb the protocol to. + * @result 0: No error. + * ENOENT: No module was registered. + * Other: Error returned by the attach_proto function + */ extern errno_t proto_plumb(protocol_family_t protocol_family, ifnet_t ifp); /* - @function proto_unplumb - @discussion Unplumbs a protocol from an interface. This will find - a registered protocol module and call its detach function. - The module will typically call dlil_detach_protocol() with - the appropriate parameters. If no module is found, this - function will call dlil_detach_protocol directly(). - @param protocol_family The protocol family. - @param ifp The interface to unplumb the protocol from. - @result 0: No error. - ENOENT: No module was registered. - Other: Error returned by the attach_proto function -*/ + * @function proto_unplumb + * @discussion Unplumbs a protocol from an interface. This will find + * a registered protocol module and call its detach function. + * The module will typically call dlil_detach_protocol() with + * the appropriate parameters. If no module is found, this + * function will call dlil_detach_protocol directly(). + * @param protocol_family The protocol family. + * @param ifp The interface to unplumb the protocol from. + * @result 0: No error. + * ENOENT: No module was registered. + * Other: Error returned by the attach_proto function + */ extern errno_t proto_unplumb(protocol_family_t protocol_family, ifnet_t ifp); __private_extern__ void diff --git a/bsd/net/lacp.h b/bsd/net/lacp.h index 71ff0d543..0b0cb02a3 100644 --- a/bsd/net/lacp.h +++ b/bsd/net/lacp.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,7 +32,7 @@ * the Link Aggregation Marker Protocol */ -/* +/* * Modification History * * May 14, 2004 Dieter Siegmund (dieter@apple.com) @@ -40,213 +40,213 @@ */ #ifndef _NET_LACP_H_ -#define _NET_LACP_H_ +#define _NET_LACP_H_ #include #include /** - ** Link Aggregation Control Protocol (LACP) definitions - **/ -#define LACPDU_VERSION_1 1 +** Link Aggregation Control Protocol (LACP) definitions +**/ +#define LACPDU_VERSION_1 1 -#define LACPDU_TLV_TYPE_TERMINATOR 0x00 -#define LACPDU_TLV_TYPE_ACTOR 0x01 -#define LACPDU_TLV_TYPE_PARTNER 0x02 -#define LACPDU_TLV_TYPE_COLLECTOR 0x03 +#define LACPDU_TLV_TYPE_TERMINATOR 0x00 +#define LACPDU_TLV_TYPE_ACTOR 0x01 +#define LACPDU_TLV_TYPE_PARTNER 0x02 +#define LACPDU_TLV_TYPE_COLLECTOR 0x03 -#define LACPDU_ACTOR_TLV_LENGTH 20 -#define LACPDU_PARTNER_TLV_LENGTH 20 -#define LACPDU_COLLECTOR_TLV_LENGTH 16 +#define LACPDU_ACTOR_TLV_LENGTH 20 +#define LACPDU_PARTNER_TLV_LENGTH 20 +#define LACPDU_COLLECTOR_TLV_LENGTH 16 typedef u_char lacp_actor_partner_state; typedef u_int16_t lacp_key; typedef u_int16_t lacp_system_priority, lacp_port_priority, lacp_port; typedef u_int16_t lacp_collector_max_delay; typedef struct { - u_char system_id[6]; + u_char system_id[6]; } lacp_system, *lacp_system_ref; /* - * LACP Actor/Partner TLV + * LACP Actor/Partner TLV */ typedef struct lacp_actor_partner_tlv_s { - u_char lap_tlv_type; /* 0x01 or 0x02 */ - u_char lap_length; /* 20 */ - u_char lap_system_priority[2]; - u_char lap_system[6]; - u_char lap_key[2]; - u_char lap_port_priority[2]; - u_char lap_port[2]; - u_char lap_state; - u_char lap_reserved[3]; + u_char lap_tlv_type; /* 0x01 or 0x02 */ + u_char lap_length; /* 20 */ + u_char lap_system_priority[2]; + u_char lap_system[6]; + u_char lap_key[2]; + u_char lap_port_priority[2]; + u_char lap_port[2]; + u_char lap_state; + u_char lap_reserved[3]; } lacp_actor_partner_tlv, *lacp_actor_partner_tlv_ref; /* * LACP Collector TLV */ typedef struct lacp_collector_tlv_s { - u_char lac_tlv_type; /* 0x03 */ - u_char lac_length; /* 16 */ - u_char lac_max_delay[2]; - u_char lac_reserved[12]; + u_char lac_tlv_type; /* 0x03 */ + u_char lac_length; /* 16 */ + u_char lac_max_delay[2]; + u_char lac_reserved[12]; } lacp_collector_tlv, *lacp_collector_tlv_ref; -/* +/* * LACP Actor/Partner State bits */ -#define LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY 0x01 -#define LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT 0x02 -#define LACP_ACTOR_PARTNER_STATE_AGGREGATION 0x04 -#define LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION 0x08 -#define LACP_ACTOR_PARTNER_STATE_COLLECTING 0x10 -#define LACP_ACTOR_PARTNER_STATE_DISTRIBUTING 0x20 -#define LACP_ACTOR_PARTNER_STATE_DEFAULTED 0x40 -#define LACP_ACTOR_PARTNER_STATE_EXPIRED 0x80 +#define LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY 0x01 +#define LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT 0x02 +#define LACP_ACTOR_PARTNER_STATE_AGGREGATION 0x04 +#define LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION 0x08 +#define LACP_ACTOR_PARTNER_STATE_COLLECTING 0x10 +#define LACP_ACTOR_PARTNER_STATE_DISTRIBUTING 0x20 +#define LACP_ACTOR_PARTNER_STATE_DEFAULTED 0x40 +#define LACP_ACTOR_PARTNER_STATE_EXPIRED 0x80 static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_active_lacp(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY); + return state | LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_passive_lacp(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY); + return state &= ~LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY; } static __inline__ int lacp_actor_partner_state_active_lacp(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_LACP_ACTIVITY) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_short_timeout(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT); + return state | LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_long_timeout(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT); + return state &= ~LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT; } static __inline__ int lacp_actor_partner_state_short_timeout(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_LACP_TIMEOUT) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_aggregatable(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_AGGREGATION); + return state | LACP_ACTOR_PARTNER_STATE_AGGREGATION; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_individual(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_AGGREGATION); + return state &= ~LACP_ACTOR_PARTNER_STATE_AGGREGATION; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_aggregatable(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_AGGREGATION) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_AGGREGATION) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_in_sync(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION); + return state | LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_out_of_sync(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION); + return state &= ~LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION; } static __inline__ int lacp_actor_partner_state_in_sync(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_SYNCHRONIZATION) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_collecting(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_COLLECTING); + return state | LACP_ACTOR_PARTNER_STATE_COLLECTING; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_collecting(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_COLLECTING); + return state &= ~LACP_ACTOR_PARTNER_STATE_COLLECTING; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_collecting(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_COLLECTING) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_COLLECTING) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_distributing(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_DISTRIBUTING); + return state | LACP_ACTOR_PARTNER_STATE_DISTRIBUTING; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_distributing(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_DISTRIBUTING); + return state &= ~LACP_ACTOR_PARTNER_STATE_DISTRIBUTING; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_distributing(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_DISTRIBUTING) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_DISTRIBUTING) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_defaulted(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_DEFAULTED); + return state | LACP_ACTOR_PARTNER_STATE_DEFAULTED; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_defaulted(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_DEFAULTED); + return state &= ~LACP_ACTOR_PARTNER_STATE_DEFAULTED; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_defaulted(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_DEFAULTED) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_DEFAULTED) != 0; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_expired(lacp_actor_partner_state state) { - return (state | LACP_ACTOR_PARTNER_STATE_EXPIRED); + return state | LACP_ACTOR_PARTNER_STATE_EXPIRED; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_set_not_expired(lacp_actor_partner_state state) { - return (state &= ~LACP_ACTOR_PARTNER_STATE_EXPIRED); + return state &= ~LACP_ACTOR_PARTNER_STATE_EXPIRED; } static __inline__ lacp_actor_partner_state lacp_actor_partner_state_expired(lacp_actor_partner_state state) { - return ((state & LACP_ACTOR_PARTNER_STATE_EXPIRED) != 0); + return (state & LACP_ACTOR_PARTNER_STATE_EXPIRED) != 0; } /* @@ -258,9 +258,9 @@ lacp_actor_partner_state_expired(lacp_actor_partner_state state) static __inline__ void lacp_uint16_set(uint8_t * field, uint16_t value) { - uint16_t tmp_value = htons(value); - memcpy((void *)field, (void *)&tmp_value, sizeof(uint16_t)); - return; + uint16_t tmp_value = htons(value); + memcpy((void *)field, (void *)&tmp_value, sizeof(uint16_t)); + return; } /* @@ -272,9 +272,9 @@ lacp_uint16_set(uint8_t * field, uint16_t value) static __inline__ uint16_t lacp_uint16_get(const uint8_t * field) { - uint16_t tmp_field; - memcpy((void *)&tmp_field, (const void *)field, sizeof(uint16_t)); - return (ntohs(tmp_field)); + uint16_t tmp_field; + memcpy((void *)&tmp_field, (const void *)field, sizeof(uint16_t)); + return ntohs(tmp_field); } /* @@ -286,9 +286,9 @@ lacp_uint16_get(const uint8_t * field) static __inline__ void lacp_uint32_set(uint8_t * field, uint32_t value) { - uint32_t tmp_value = htonl(value); - memcpy((void *)field, (void *)&tmp_value, sizeof(uint32_t)); - return; + uint32_t tmp_value = htonl(value); + memcpy((void *)field, (void *)&tmp_value, sizeof(uint32_t)); + return; } /* @@ -300,173 +300,173 @@ lacp_uint32_set(uint8_t * field, uint32_t value) static __inline__ uint32_t lacp_uint32_get(const uint8_t * field) { - uint32_t tmp_field; - memcpy((void *)&tmp_field, (const void *)field, sizeof(uint32_t)); - return (ntohl(tmp_field)); + uint32_t tmp_field; + memcpy((void *)&tmp_field, (const void *)field, sizeof(uint32_t)); + return ntohl(tmp_field); } /* * LACP Actor/Partner TLV access functions */ static __inline__ void -lacp_actor_partner_tlv_set_system_priority(lacp_actor_partner_tlv_ref tlv, - lacp_system_priority system_priority) +lacp_actor_partner_tlv_set_system_priority(lacp_actor_partner_tlv_ref tlv, + lacp_system_priority system_priority) { - lacp_uint16_set(tlv->lap_system_priority, system_priority); - return; + lacp_uint16_set(tlv->lap_system_priority, system_priority); + return; } static __inline__ lacp_system_priority lacp_actor_partner_tlv_get_system_priority(const lacp_actor_partner_tlv_ref tlv) { - return (lacp_system_priority)lacp_uint16_get(tlv->lap_system_priority); + return (lacp_system_priority)lacp_uint16_get(tlv->lap_system_priority); } static __inline__ void lacp_actor_partner_tlv_set_key(lacp_actor_partner_tlv_ref tlv, lacp_key key) { - lacp_uint16_set(tlv->lap_key, key); - return; + lacp_uint16_set(tlv->lap_key, key); + return; } static __inline__ lacp_key lacp_actor_partner_tlv_get_key(const lacp_actor_partner_tlv_ref tlv) { - return (lacp_key)lacp_uint16_get(tlv->lap_key); + return (lacp_key)lacp_uint16_get(tlv->lap_key); } static __inline__ void -lacp_actor_partner_tlv_set_port_priority(lacp_actor_partner_tlv_ref tlv, - lacp_port_priority port_priority) +lacp_actor_partner_tlv_set_port_priority(lacp_actor_partner_tlv_ref tlv, + lacp_port_priority port_priority) { - lacp_uint16_set(tlv->lap_port_priority, port_priority); - return; + lacp_uint16_set(tlv->lap_port_priority, port_priority); + return; } static __inline__ lacp_port_priority lacp_actor_partner_tlv_get_port_priority(const lacp_actor_partner_tlv_ref tlv) { - return (lacp_port_priority)lacp_uint16_get(tlv->lap_port_priority); + return (lacp_port_priority)lacp_uint16_get(tlv->lap_port_priority); } static __inline__ void lacp_actor_partner_tlv_set_port(lacp_actor_partner_tlv_ref tlv, lacp_port port) { - lacp_uint16_set(tlv->lap_port, port); - return; + lacp_uint16_set(tlv->lap_port, port); + return; } static __inline__ lacp_port lacp_actor_partner_tlv_get_port(const lacp_actor_partner_tlv_ref tlv) { - return (lacp_port)lacp_uint16_get(tlv->lap_port); + return (lacp_port)lacp_uint16_get(tlv->lap_port); } /* * LACP Collector TLV access functions */ static __inline__ void -lacp_collector_tlv_set_max_delay(lacp_collector_tlv_ref tlv, - lacp_collector_max_delay delay) +lacp_collector_tlv_set_max_delay(lacp_collector_tlv_ref tlv, + lacp_collector_max_delay delay) { - lacp_uint16_set(tlv->lac_max_delay, delay); - return; + lacp_uint16_set(tlv->lac_max_delay, delay); + return; } static __inline__ lacp_collector_max_delay lacp_collector_tlv_get_max_delay(const lacp_collector_tlv_ref tlv) { - return (lacp_collector_max_delay)lacp_uint16_get(tlv->lac_max_delay); + return (lacp_collector_max_delay)lacp_uint16_get(tlv->lac_max_delay); } typedef struct lacpdu_s { - u_char la_subtype; - u_char la_version; - u_char la_actor_tlv[LACPDU_ACTOR_TLV_LENGTH]; - u_char la_partner_tlv[LACPDU_PARTNER_TLV_LENGTH]; - u_char la_collector_tlv[LACPDU_COLLECTOR_TLV_LENGTH]; - u_char la_terminator_type; - u_char la_terminator_length; - u_char la_reserved[50]; + u_char la_subtype; + u_char la_version; + u_char la_actor_tlv[LACPDU_ACTOR_TLV_LENGTH]; + u_char la_partner_tlv[LACPDU_PARTNER_TLV_LENGTH]; + u_char la_collector_tlv[LACPDU_COLLECTOR_TLV_LENGTH]; + u_char la_terminator_type; + u_char la_terminator_length; + u_char la_reserved[50]; } lacpdu, *lacpdu_ref; /* timer values in seconds */ -#define LACP_FAST_PERIODIC_TIME 1 -#define LACP_SLOW_PERIODIC_TIME 30 -#define LACP_SHORT_TIMEOUT_TIME 3 -#define LACP_LONG_TIMEOUT_TIME 90 -#define LACP_CHURN_DETECTION_TIME 60 -#define LACP_AGGREGATE_WAIT_TIME 2 +#define LACP_FAST_PERIODIC_TIME 1 +#define LACP_SLOW_PERIODIC_TIME 30 +#define LACP_SHORT_TIMEOUT_TIME 3 +#define LACP_LONG_TIMEOUT_TIME 90 +#define LACP_CHURN_DETECTION_TIME 60 +#define LACP_AGGREGATE_WAIT_TIME 2 /* packet rate per second */ -#define LACP_PACKET_RATE 3 +#define LACP_PACKET_RATE 3 /** - ** Link Aggregation Marker Protocol definitions - **/ -#define LA_MARKER_PDU_VERSION_1 1 -#define LA_MARKER_TLV_TYPE_TERMINATOR 0x00 -#define LA_MARKER_TLV_TYPE_MARKER 0x01 -#define LA_MARKER_TLV_TYPE_MARKER_RESPONSE 0x02 +** Link Aggregation Marker Protocol definitions +**/ +#define LA_MARKER_PDU_VERSION_1 1 +#define LA_MARKER_TLV_TYPE_TERMINATOR 0x00 +#define LA_MARKER_TLV_TYPE_MARKER 0x01 +#define LA_MARKER_TLV_TYPE_MARKER_RESPONSE 0x02 -#define LA_MARKER_TLV_LENGTH 16 -#define LA_MARKER_RESPONSE_TLV_LENGTH 16 +#define LA_MARKER_TLV_LENGTH 16 +#define LA_MARKER_RESPONSE_TLV_LENGTH 16 typedef u_int32_t la_marker_transaction_id; typedef struct la_marker_pdu_s { - u_char lm_subtype; /* 0x02 */ - u_char lm_version; /* 0x01 */ - u_char lm_marker_tlv_type; /* 0x01 or 0x02 */ - u_char lm_marker_tlv_length; /* 16 */ - u_char lm_requestor_port[2]; - u_char lm_requestor_system[6]; - u_char lm_requestor_transaction_id[4]; - u_char lm_pad[2]; - u_char lm_terminator_type; /* 0x00 */ - u_char lm_terminator_length; /* 0 */ - u_char lm_reserved[90]; -} la_marker_pdu, *la_marker_pdu_ref, - la_marker_response_pdu, * la_marker_response_pdu_ref; + u_char lm_subtype; /* 0x02 */ + u_char lm_version; /* 0x01 */ + u_char lm_marker_tlv_type; /* 0x01 or 0x02 */ + u_char lm_marker_tlv_length;/* 16 */ + u_char lm_requestor_port[2]; + u_char lm_requestor_system[6]; + u_char lm_requestor_transaction_id[4]; + u_char lm_pad[2]; + u_char lm_terminator_type; /* 0x00 */ + u_char lm_terminator_length;/* 0 */ + u_char lm_reserved[90]; +} la_marker_pdu, *la_marker_pdu_ref, +la_marker_response_pdu, * la_marker_response_pdu_ref; static __inline__ void la_marker_pdu_set_requestor_port(la_marker_pdu_ref lmpdu, lacp_port port) { - lacp_uint16_set(lmpdu->lm_requestor_port, port); - return; + lacp_uint16_set(lmpdu->lm_requestor_port, port); + return; } static __inline__ lacp_port la_marker_pdu_get_requestor_port(la_marker_pdu_ref lmpdu) { - return (lacp_port)lacp_uint16_get(lmpdu->lm_requestor_port); + return (lacp_port)lacp_uint16_get(lmpdu->lm_requestor_port); } static __inline__ void -la_marker_pdu_set_requestor_transaction_id(la_marker_pdu_ref lmpdu, - la_marker_transaction_id xid) +la_marker_pdu_set_requestor_transaction_id(la_marker_pdu_ref lmpdu, + la_marker_transaction_id xid) { - lacp_uint32_set(lmpdu->lm_requestor_transaction_id, xid); - return; + lacp_uint32_set(lmpdu->lm_requestor_transaction_id, xid); + return; } static __inline__ la_marker_transaction_id la_marker_pdu_get_requestor_transaction_id(la_marker_pdu_ref lmpdu) { - return (la_marker_transaction_id)lacp_uint32_get(lmpdu->lm_requestor_transaction_id); + return (la_marker_transaction_id)lacp_uint32_get(lmpdu->lm_requestor_transaction_id); } static __inline__ void la_marker_pdu_set_requestor_system(la_marker_pdu_ref lmpdu, lacp_system sys) { - *((lacp_system_ref)lmpdu->lm_requestor_system) = sys; - return; + *((lacp_system_ref)lmpdu->lm_requestor_system) = sys; + return; } static __inline__ lacp_system la_marker_pdu_get_requestor_system(la_marker_pdu_ref lmpdu) { - return (*(lacp_system_ref)(lmpdu->lm_requestor_system)); + return *(lacp_system_ref)(lmpdu->lm_requestor_system); } #endif /* _NET_LACP_H_ */ diff --git a/bsd/net/multicast_list.c b/bsd/net/multicast_list.c index e91aeeb11..15c87fd40 100644 --- a/bsd/net/multicast_list.c +++ b/bsd/net/multicast_list.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -49,8 +49,8 @@ __private_extern__ void multicast_list_init(struct multicast_list * mc_list) { - SLIST_INIT(mc_list); - return; + SLIST_INIT(mc_list); + return; } /* @@ -62,20 +62,20 @@ multicast_list_init(struct multicast_list * mc_list) __private_extern__ int multicast_list_remove(struct multicast_list * mc_list) { - int error; - struct multicast_entry * mc; - int result = 0; + int error; + struct multicast_entry * mc; + int result = 0; - while ((mc = SLIST_FIRST(mc_list)) != NULL) { - error = ifnet_remove_multicast(mc->mc_ifma); - if (error != 0) { - result = error; + while ((mc = SLIST_FIRST(mc_list)) != NULL) { + error = ifnet_remove_multicast(mc->mc_ifma); + if (error != 0) { + result = error; + } + SLIST_REMOVE_HEAD(mc_list, mc_entries); + ifmaddr_release(mc->mc_ifma); + FREE(mc, M_DEVBUF); } - SLIST_REMOVE_HEAD(mc_list, mc_entries); - ifmaddr_release(mc->mc_ifma); - FREE(mc, M_DEVBUF); - } - return (result); + return result; } /* @@ -85,7 +85,7 @@ multicast_list_remove(struct multicast_list * mc_list) * "source_ifp", and saving the result in "mc_list" * * We build a new list of multicast addresses while programming the new list. - * If that completes successfully, we remove the old list, and return the + * If that completes successfully, we remove the old list, and return the * new list. * * If it fails, we remove what we've added to the new list, and @@ -93,64 +93,64 @@ multicast_list_remove(struct multicast_list * mc_list) */ __private_extern__ int multicast_list_program(struct multicast_list * mc_list, - struct ifnet * source_ifp, - struct ifnet * target_ifp) + struct ifnet * source_ifp, + struct ifnet * target_ifp) { - int alen; - int error = 0; - int i; - struct multicast_entry * mc = NULL; - struct multicast_list new_mc_list; - struct sockaddr_dl source_sdl; - ifmultiaddr_t * source_multicast_list; - struct sockaddr_dl target_sdl; + int alen; + int error = 0; + int i; + struct multicast_entry * mc = NULL; + struct multicast_list new_mc_list; + struct sockaddr_dl source_sdl; + ifmultiaddr_t * source_multicast_list; + struct sockaddr_dl target_sdl; - alen = target_ifp->if_addrlen; - bzero((char *)&target_sdl, sizeof(target_sdl)); - target_sdl.sdl_len = sizeof(target_sdl); - target_sdl.sdl_family = AF_LINK; - target_sdl.sdl_type = target_ifp->if_type; - target_sdl.sdl_alen = alen; - target_sdl.sdl_index = target_ifp->if_index; + alen = target_ifp->if_addrlen; + bzero((char *)&target_sdl, sizeof(target_sdl)); + target_sdl.sdl_len = sizeof(target_sdl); + target_sdl.sdl_family = AF_LINK; + target_sdl.sdl_type = target_ifp->if_type; + target_sdl.sdl_alen = alen; + target_sdl.sdl_index = target_ifp->if_index; - /* build a new list */ - multicast_list_init(&new_mc_list); - error = ifnet_get_multicast_list(source_ifp, &source_multicast_list); - if (error != 0) { - printf("multicast_list_program: " - "ifnet_get_multicast_list(%s%d) failed, %d\n", - source_ifp->if_name, source_ifp->if_unit, error); - return (error); - } - for (i = 0; source_multicast_list[i] != NULL; i++) { - if (ifmaddr_address(source_multicast_list[i], - (struct sockaddr *)&source_sdl, - sizeof(source_sdl)) != 0 - || source_sdl.sdl_family != AF_LINK) { - continue; + /* build a new list */ + multicast_list_init(&new_mc_list); + error = ifnet_get_multicast_list(source_ifp, &source_multicast_list); + if (error != 0) { + printf("multicast_list_program: " + "ifnet_get_multicast_list(%s%d) failed, %d\n", + source_ifp->if_name, source_ifp->if_unit, error); + return error; } - mc = _MALLOC(sizeof(struct multicast_entry), M_DEVBUF, M_WAITOK); - if (mc == NULL) { - error = ENOBUFS; - break; + for (i = 0; source_multicast_list[i] != NULL; i++) { + if (ifmaddr_address(source_multicast_list[i], + (struct sockaddr *)&source_sdl, + sizeof(source_sdl)) != 0 + || source_sdl.sdl_family != AF_LINK) { + continue; + } + mc = _MALLOC(sizeof(struct multicast_entry), M_DEVBUF, M_WAITOK); + if (mc == NULL) { + error = ENOBUFS; + break; + } + bcopy(LLADDR(&source_sdl), LLADDR(&target_sdl), alen); + error = ifnet_add_multicast(target_ifp, (struct sockaddr *)&target_sdl, + &mc->mc_ifma); + if (error != 0) { + FREE(mc, M_DEVBUF); + break; + } + SLIST_INSERT_HEAD(&new_mc_list, mc, mc_entries); } - bcopy(LLADDR(&source_sdl), LLADDR(&target_sdl), alen); - error = ifnet_add_multicast(target_ifp, (struct sockaddr *)&target_sdl, - &mc->mc_ifma); if (error != 0) { - FREE(mc, M_DEVBUF); - break; + /* restore previous state */ + (void)multicast_list_remove(&new_mc_list); + } else { + /* remove the old entries, and return the new list */ + (void)multicast_list_remove(mc_list); + *mc_list = new_mc_list; } - SLIST_INSERT_HEAD(&new_mc_list, mc, mc_entries); - } - if (error != 0) { - /* restore previous state */ - (void)multicast_list_remove(&new_mc_list); - } else { - /* remove the old entries, and return the new list */ - (void)multicast_list_remove(mc_list); - *mc_list = new_mc_list; - } - ifnet_free_multicast_list(source_multicast_list); - return (error); + ifnet_free_multicast_list(source_multicast_list); + return error; } diff --git a/bsd/net/multicast_list.h b/bsd/net/multicast_list.h index 267fb3b07..3d216c218 100644 --- a/bsd/net/multicast_list.h +++ b/bsd/net/multicast_list.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,8 +41,8 @@ * another (VLAN, BOND) */ struct multicast_entry { - SLIST_ENTRY(multicast_entry) mc_entries; - ifmultiaddr_t mc_ifma; + SLIST_ENTRY(multicast_entry) mc_entries; + ifmultiaddr_t mc_ifma; }; SLIST_HEAD(multicast_list, multicast_entry); @@ -51,8 +51,8 @@ multicast_list_init(struct multicast_list * mc_list); int multicast_list_program(struct multicast_list * mc_list, - struct ifnet * source_ifp, - struct ifnet * target_ifp); + struct ifnet * source_ifp, + struct ifnet * target_ifp); int multicast_list_remove(struct multicast_list * mc_list); diff --git a/bsd/net/nat464_utils.c b/bsd/net/nat464_utils.c index 495ac6cfd..fd99d34b5 100644 --- a/bsd/net/nat464_utils.c +++ b/bsd/net/nat464_utils.c @@ -100,22 +100,24 @@ nat464_synthesize_ipv6(ifnet_t ifp, const struct in_addr *addrv4, struct in6_add { static const struct in6_addr well_known_prefix = { .__u6_addr.__u6_addr8 = {0x00, 0x64, 0xff, 0x9b, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00}, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, }; struct ipv6_prefix nat64prefixes[NAT64_MAX_NUM_PREFIXES]; int error = 0, i = 0; /* Below call is not optimized as it creates a copy of prefixes */ - if ((error = ifnet_get_nat64prefix(ifp, nat64prefixes)) != 0) - return (error); + if ((error = ifnet_get_nat64prefix(ifp, nat64prefixes)) != 0) { + return error; + } for (i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) { - if (nat64prefixes[i].prefix_len != 0) + if (nat64prefixes[i].prefix_len != 0) { break; + } } - VERIFY (i < NAT64_MAX_NUM_PREFIXES); + VERIFY(i < NAT64_MAX_NUM_PREFIXES); struct in6_addr prefix = nat64prefixes[i].ipv6_prefix; int prefix_len = nat64prefixes[i].prefix_len; @@ -130,43 +132,44 @@ nat464_synthesize_ipv6(ifnet_t ifp, const struct in_addr *addrv4, struct in6_add IN_6TO4_RELAY_ANYCAST(ntohl(addrv4->s_addr)) || // 192.88.99.0/24 6to4 Relay Anycast IN_MULTICAST(ntohl(addrv4->s_addr)) || // 224.0.0.0/4 Multicast INADDR_BROADCAST == addrv4->s_addr) { // 255.255.255.255/32 Limited Broadcast - return (-1); + return -1; } /* Check for the well-known prefix */ if (prefix_len == NAT64_PREFIX_LEN_96 && IN6_ARE_ADDR_EQUAL(&prefix, &well_known_prefix)) { // https://tools.ietf.org/html/rfc6052#section-3.1 if (IN_PRIVATE(ntohl(addrv4->s_addr)) || // 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 Private-Use - IN_SHARED_ADDRESS_SPACE(ntohl(addrv4->s_addr))) // 100.64.0.0/10 Shared Address Space - return (-1); + IN_SHARED_ADDRESS_SPACE(ntohl(addrv4->s_addr))) { // 100.64.0.0/10 Shared Address Space + return -1; + } } memcpy(ptr, (char *)&prefix, prefix_len); switch (prefix_len) { - case NAT64_PREFIX_LEN_96: - memcpy(ptr + 12, ptrv4, 4); - break; - case NAT64_PREFIX_LEN_64: - memcpy(ptr + 9, ptrv4, 4); - break; - case NAT64_PREFIX_LEN_56: - memcpy(ptr + 7, ptrv4, 1); - memcpy(ptr + 9, ptrv4 + 1, 3); - break; - case NAT64_PREFIX_LEN_48: - memcpy(ptr + 6, ptrv4, 2); - memcpy(ptr + 9, ptrv4 + 2, 2); - break; - case NAT64_PREFIX_LEN_40: - memcpy(ptr + 5, ptrv4, 3); - memcpy(ptr + 9, ptrv4 + 3, 1); - break; - case NAT64_PREFIX_LEN_32: - memcpy(ptr + 4, ptrv4, 4); - break; - default: - panic("NAT64-prefix len is wrong: %u\n", prefix_len); + case NAT64_PREFIX_LEN_96: + memcpy(ptr + 12, ptrv4, 4); + break; + case NAT64_PREFIX_LEN_64: + memcpy(ptr + 9, ptrv4, 4); + break; + case NAT64_PREFIX_LEN_56: + memcpy(ptr + 7, ptrv4, 1); + memcpy(ptr + 9, ptrv4 + 1, 3); + break; + case NAT64_PREFIX_LEN_48: + memcpy(ptr + 6, ptrv4, 2); + memcpy(ptr + 9, ptrv4 + 2, 2); + break; + case NAT64_PREFIX_LEN_40: + memcpy(ptr + 5, ptrv4, 3); + memcpy(ptr + 9, ptrv4 + 3, 1); + break; + case NAT64_PREFIX_LEN_32: + memcpy(ptr + 4, ptrv4, 4); + break; + default: + panic("NAT64-prefix len is wrong: %u\n", prefix_len); } if (clat_debug) { @@ -175,7 +178,7 @@ nat464_synthesize_ipv6(ifnet_t ifp, const struct in_addr *addrv4, struct in6_add inet_ntop(AF_INET6, (void *)addr, buf, sizeof(buf)))); } - return (error); + return error; } /* Synthesize ipv4 from ipv6 */ @@ -186,15 +189,17 @@ nat464_synthesize_ipv4(ifnet_t ifp, const struct in6_addr *addr, struct in_addr int error = 0, i = 0; /* Below call is not optimized as it creates a copy of prefixes */ - if ((error = ifnet_get_nat64prefix(ifp, nat64prefixes)) != 0) + if ((error = ifnet_get_nat64prefix(ifp, nat64prefixes)) != 0) { return error; + } for (i = 0; i < NAT64_MAX_NUM_PREFIXES; i++) { - if (nat64prefixes[i].prefix_len != 0) + if (nat64prefixes[i].prefix_len != 0) { break; + } } - VERIFY (i < NAT64_MAX_NUM_PREFIXES); + VERIFY(i < NAT64_MAX_NUM_PREFIXES); struct in6_addr prefix = nat64prefixes[i].ipv6_prefix; int prefix_len = nat64prefixes[i].prefix_len; @@ -202,59 +207,60 @@ nat464_synthesize_ipv4(ifnet_t ifp, const struct in6_addr *addr, struct in_addr char *ptrv4 = __DECONST(void *, addrv4); char *ptr = __DECONST(void *, addr); - if (memcmp(addr, &prefix, prefix_len) != 0) - return (-1); + if (memcmp(addr, &prefix, prefix_len) != 0) { + return -1; + } switch (prefix_len) { - case NAT64_PREFIX_LEN_96: - memcpy(ptrv4, ptr + 12, 4); - break; - case NAT64_PREFIX_LEN_64: - memcpy(ptrv4, ptr + 9, 4); - break; - case NAT64_PREFIX_LEN_56: - memcpy(ptrv4, ptr + 7, 1); - memcpy(ptrv4 + 1, ptr + 9, 3); - break; - case NAT64_PREFIX_LEN_48: - memcpy(ptrv4, ptr + 6, 2); - memcpy(ptrv4 + 2, ptr + 9, 2); - break; - case NAT64_PREFIX_LEN_40: - memcpy(ptrv4, ptr + 5, 3); - memcpy(ptrv4 + 3, ptr + 9, 1); - break; - case NAT64_PREFIX_LEN_32: - memcpy(ptrv4, ptr + 4, 4); - break; - default: - panic("NAT64-prefix len is wrong: %u\n", - prefix_len); + case NAT64_PREFIX_LEN_96: + memcpy(ptrv4, ptr + 12, 4); + break; + case NAT64_PREFIX_LEN_64: + memcpy(ptrv4, ptr + 9, 4); + break; + case NAT64_PREFIX_LEN_56: + memcpy(ptrv4, ptr + 7, 1); + memcpy(ptrv4 + 1, ptr + 9, 3); + break; + case NAT64_PREFIX_LEN_48: + memcpy(ptrv4, ptr + 6, 2); + memcpy(ptrv4 + 2, ptr + 9, 2); + break; + case NAT64_PREFIX_LEN_40: + memcpy(ptrv4, ptr + 5, 3); + memcpy(ptrv4 + 3, ptr + 9, 1); + break; + case NAT64_PREFIX_LEN_32: + memcpy(ptrv4, ptr + 4, 4); + break; + default: + panic("NAT64-prefix len is wrong: %u\n", + prefix_len); } - if(clat_debug) { + if (clat_debug) { char buf[MAX_IPv4_STR_LEN]; clat_log2((LOG_DEBUG, "%s desynthesized to %s\n", __func__, inet_ntop(AF_INET, (void *)addrv4, buf, sizeof(buf)))); } - return (error); + return error; } -#define PTR_IP(field) ((int32_t)offsetof(struct ip, field)) -#define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field)) +#define PTR_IP(field) ((int32_t)offsetof(struct ip, field)) +#define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field)) /* - Translate the ICMP header -*/ + * Translate the ICMP header + */ int nat464_translate_icmp(int naf, void *arg) { - struct icmp *icmp4; - struct icmp6_hdr *icmp6; - uint32_t mtu; - int32_t ptr = -1; - uint8_t type; - uint8_t code; + struct icmp *icmp4; + struct icmp6_hdr *icmp6; + uint32_t mtu; + int32_t ptr = -1; + uint8_t type; + uint8_t code; switch (naf) { case AF_INET: @@ -285,7 +291,7 @@ nat464_translate_icmp(int naf, void *arg) code = ICMP_UNREACH_PORT; break; default: - return (-1); + return -1; } break; case ICMP6_PACKET_TOO_BIG: @@ -303,25 +309,25 @@ nat464_translate_icmp(int naf, void *arg) code = ICMP_PARAMPROB_ERRATPTR; ptr = ntohl(icmp6->icmp6_pptr); - if (ptr == PTR_IP6(ip6_vfc)) + if (ptr == PTR_IP6(ip6_vfc)) { ; /* preserve */ - else if (ptr == PTR_IP6(ip6_vfc) + 1) + } else if (ptr == PTR_IP6(ip6_vfc) + 1) { ptr = PTR_IP(ip_tos); - else if (ptr == PTR_IP6(ip6_plen) || - ptr == PTR_IP6(ip6_plen) + 1) + } else if (ptr == PTR_IP6(ip6_plen) || + ptr == PTR_IP6(ip6_plen) + 1) { ptr = PTR_IP(ip_len); - else if (ptr == PTR_IP6(ip6_nxt)) + } else if (ptr == PTR_IP6(ip6_nxt)) { ptr = PTR_IP(ip_p); - else if (ptr == PTR_IP6(ip6_hlim)) + } else if (ptr == PTR_IP6(ip6_hlim)) { ptr = PTR_IP(ip_ttl); - else if (ptr >= PTR_IP6(ip6_src) && - ptr < PTR_IP6(ip6_dst)) + } else if (ptr >= PTR_IP6(ip6_src) && + ptr < PTR_IP6(ip6_dst)) { ptr = PTR_IP(ip_src); - else if (ptr >= PTR_IP6(ip6_dst) && - ptr < (int32_t)sizeof(struct ip6_hdr)) + } else if (ptr >= PTR_IP6(ip6_dst) && + ptr < (int32_t)sizeof(struct ip6_hdr)) { ptr = PTR_IP(ip_dst); - else { - return (-1); + } else { + return -1; } break; case ICMP6_PARAMPROB_NEXTHEADER: @@ -329,19 +335,20 @@ nat464_translate_icmp(int naf, void *arg) code = ICMP_UNREACH_PROTOCOL; break; default: - return (-1); + return -1; } break; default: - return (-1); + return -1; } icmp6->icmp6_type = type; icmp6->icmp6_code = code; /* aligns well with a icmpv4 nextmtu */ icmp6->icmp6_mtu = htonl(mtu); /* icmpv4 pptr is a one most significant byte */ - if (ptr >= 0) + if (ptr >= 0) { icmp6->icmp6_pptr = htonl(ptr << 24); + } break; case AF_INET6: @@ -389,7 +396,7 @@ nat464_translate_icmp(int naf, void *arg) mtu += 20; break; default: - return (-1); + return -1; } break; case ICMP_TIMXCEED: @@ -405,41 +412,42 @@ nat464_translate_icmp(int naf, void *arg) code = ICMP6_PARAMPROB_HEADER; break; default: - return (-1); + return -1; } ptr = icmp4->icmp_pptr; - if (ptr == 0 || ptr == PTR_IP(ip_tos)) + if (ptr == 0 || ptr == PTR_IP(ip_tos)) { ; /* preserve */ - else if (ptr == PTR_IP(ip_len) || - ptr == PTR_IP(ip_len) + 1) + } else if (ptr == PTR_IP(ip_len) || + ptr == PTR_IP(ip_len) + 1) { ptr = PTR_IP6(ip6_plen); - else if (ptr == PTR_IP(ip_ttl)) + } else if (ptr == PTR_IP(ip_ttl)) { ptr = PTR_IP6(ip6_hlim); - else if (ptr == PTR_IP(ip_p)) + } else if (ptr == PTR_IP(ip_p)) { ptr = PTR_IP6(ip6_nxt); - else if (ptr >= PTR_IP(ip_src) && - ptr < PTR_IP(ip_dst)) + } else if (ptr >= PTR_IP(ip_src) && + ptr < PTR_IP(ip_dst)) { ptr = PTR_IP6(ip6_src); - else if (ptr >= PTR_IP(ip_dst) && - ptr < (int32_t)sizeof(struct ip)) + } else if (ptr >= PTR_IP(ip_dst) && + ptr < (int32_t)sizeof(struct ip)) { ptr = PTR_IP6(ip6_dst); - else { - return (-1); + } else { + return -1; } break; default: - return (-1); + return -1; } icmp4->icmp_type = type; icmp4->icmp_code = code; icmp4->icmp_nextmtu = htons(mtu); - if (ptr >= 0) + if (ptr >= 0) { icmp4->icmp_void = htonl(ptr); + } break; } - return (0); + return 0; } /* @@ -463,8 +471,8 @@ nat464_translate_icmp(int naf, void *arg) */ int nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t *off2, - uint8_t proto2, uint8_t ttl2, uint64_t tot_len2, struct nat464_addr *src, - struct nat464_addr *dst, protocol_family_t af, protocol_family_t naf) + uint8_t proto2, uint8_t ttl2, uint64_t tot_len2, struct nat464_addr *src, + struct nat464_addr *dst, protocol_family_t af, protocol_family_t naf) { struct ip *ip4 = NULL; struct ip6_hdr *ip6 = NULL; @@ -472,8 +480,9 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t int hlen = 0, olen = 0; if (af == naf || (af != AF_INET && af != AF_INET6) || - (naf != AF_INET && naf != AF_INET6)) - return (-1); + (naf != AF_INET && naf != AF_INET6)) { + return -1; + } /* old header */ olen = *off2 - off; @@ -482,8 +491,9 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t /* Modify the pbuf to accommodate the new header */ hdr = pbuf_resize_segment(pbuf, off, olen, hlen); - if (hdr == NULL) - return (-1); + if (hdr == NULL) { + return -1; + } /* translate inner ip/ip6 header */ switch (naf) { @@ -496,10 +506,11 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t ip4->ip_id = rfc6864 ? 0 : htons(ip_randomid()); ip4->ip_off = htons(IP_DF); ip4->ip_ttl = ttl2; - if (proto2 == IPPROTO_ICMPV6) + if (proto2 == IPPROTO_ICMPV6) { ip4->ip_p = IPPROTO_ICMP; - else + } else { ip4->ip_p = proto2; + } ip4->ip_src = src->natv4addr; ip4->ip_dst = dst->natv4addr; ip4->ip_sum = pbuf_inet_cksum(pbuf, 0, 0, ip4->ip_hl << 2); @@ -518,14 +529,16 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = htons(tot_len2 - olen); - if (proto2 == IPPROTO_ICMP) + if (proto2 == IPPROTO_ICMP) { ip6->ip6_nxt = IPPROTO_ICMPV6; - else + } else { ip6->ip6_nxt = proto2; - if (!ttl2 || ttl2 > IPV6_DEFHLIM) + } + if (!ttl2 || ttl2 > IPV6_DEFHLIM) { ip6->ip6_hlim = IPV6_DEFHLIM; - else + } else { ip6->ip6_hlim = ttl2; + } ip6->ip6_src = src->natv6addr; ip6->ip6_dst = dst->natv6addr; @@ -544,7 +557,7 @@ nat464_translate_icmp_ip(pbuf_t *pbuf, uint32_t off, uint64_t *tot_len, uint32_t *off2 += hlen - olen; *tot_len += hlen - olen; - return (0); + return 0; } /* * @brief The function inserts IPv6 fragmentation header @@ -567,22 +580,25 @@ nat464_insert_frag46(pbuf_t *pbuf, uint16_t ip_id_val, uint16_t frag_offset, /* Insert IPv6 fragmentation header */ if (pbuf_resize_segment(pbuf, sizeof(struct ip6_hdr), 0, - sizeof(struct ip6_frag)) == NULL) - return (-1); + sizeof(struct ip6_frag)) == NULL) { + return -1; + } p_ip6h = mtod(pbuf->pb_mbuf, struct ip6_hdr *); p_ip6_frag = (struct ip6_frag *)pbuf_contig_segment(pbuf, sizeof(struct ip6_hdr), sizeof(struct ip6_frag)); - if (p_ip6_frag == NULL) - return (-1); + if (p_ip6_frag == NULL) { + return -1; + } /* Populate IPv6 fragmentation header */ p_ip6_frag->ip6f_nxt = p_ip6h->ip6_nxt; p_ip6_frag->ip6f_reserved = 0; p_ip6_frag->ip6f_offlg = (frag_offset) << 3; - if (!is_last_frag) + if (!is_last_frag) { p_ip6_frag->ip6f_offlg |= 0x1; + } p_ip6_frag->ip6f_offlg = htons(p_ip6_frag->ip6f_offlg); p_ip6_frag->ip6f_ident = ip_id_val; @@ -591,7 +607,7 @@ nat464_insert_frag46(pbuf_t *pbuf, uint16_t ip_id_val, uint16_t frag_offset, p_ip6h->ip6_plen = htons(ntohs(p_ip6h->ip6_plen) + sizeof(struct ip6_frag)); - return (0); + return 0; } int @@ -613,15 +629,16 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, * 2. If IPv6 stack in kernel internally generates a * message destined for a synthesized IPv6 end-point. */ - if (pbuf->pb_ifp == NULL) - return (NT_DROP); + if (pbuf->pb_ifp == NULL) { + return NT_DROP; + } if (*proto == IPPROTO_FRAGMENT) { p_frag6 = (struct ip6_frag *)pbuf_contig_segment(pbuf, sizeof(struct ip6_hdr), sizeof(struct ip6_frag)); if (p_frag6 == NULL) { ip6stat.ip6s_clat464_in_64frag_transfail_drop++; - return (NT_DROP); + return NT_DROP; } frag6 = *p_frag6; @@ -636,8 +653,9 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, } ip4 = (struct ip *)pbuf_resize_segment(pbuf, 0, off, sizeof(*ip4)); - if (ip4 == NULL) - return (NT_DROP); + if (ip4 == NULL) { + return NT_DROP; + } ip4->ip_v = 4; ip4->ip_hl = 5; ip4->ip_tos = tos; @@ -657,8 +675,9 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, */ ip4->ip_id = ntohl(frag6.ip6f_ident) & 0xffff; ip4->ip_id = htons(ip4->ip_id); - if(frag6.ip6f_offlg & IP6F_MORE_FRAG) + if (frag6.ip6f_offlg & IP6F_MORE_FRAG) { ip_frag_off |= IP_MF; + } ip4->ip_off = htons(ip_frag_off); } else { ip4->ip_off |= htons(IP_DF); @@ -668,8 +687,9 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, * Defer calculating ip_sum for ICMPv6 as we do it * later in Protocol translation */ - if (*proto != IPPROTO_ICMPV6) + if (*proto != IPPROTO_ICMPV6) { ip4->ip_sum = pbuf_inet_cksum(pbuf, 0, 0, ip4->ip_hl << 2); + } if (clat_debug) { char buf1[MAX_IPv4_STR_LEN], buf2[MAX_IPv4_STR_LEN]; @@ -679,7 +699,7 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, inet_ntop(AF_INET, (void *)&ip4->ip_src, buf1, sizeof(buf1)), inet_ntop(AF_INET, (void *)&ip4->ip_dst, buf2, sizeof(buf2)))); } - return (NT_NAT64); + return NT_NAT64; } /* * @brief The routine translates the IPv4 header to IPv6 header. @@ -694,7 +714,7 @@ nat464_translate_64(pbuf_t *pbuf, int off, uint8_t tos, * @param tot_len Total payload length * * @return NT_NAT64 if IP header translation is successful, else error - */ + */ int nat464_translate_46(pbuf_t *pbuf, int off, uint8_t tos, uint8_t proto, uint8_t ttl, struct in6_addr src_v6, @@ -702,16 +722,18 @@ nat464_translate_46(pbuf_t *pbuf, int off, uint8_t tos, { struct ip6_hdr *ip6; - if (pbuf->pb_ifp == NULL) - return (NT_DROP); + if (pbuf->pb_ifp == NULL) { + return NT_DROP; + } /* * Trim the buffer from head of size equal to to off (which is equal to * the size of IP header and prepend IPv6 header length to the buffer - */ + */ ip6 = (struct ip6_hdr *)pbuf_resize_segment(pbuf, 0, off, sizeof(*ip6)); - if (ip6 == NULL) - return (NT_DROP); + if (ip6 == NULL) { + return NT_DROP; + } ip6->ip6_flow = htonl((6 << 28) | (tos << 20)); ip6->ip6_plen = htons(tot_len - off); ip6->ip6_nxt = proto; @@ -727,7 +749,7 @@ nat464_translate_46(pbuf_t *pbuf, int off, uint8_t tos, inet_ntop(AF_INET6, (void *)&ip6->ip6_src, buf1, sizeof(buf1)), inet_ntop(AF_INET6, (void *)&ip6->ip6_dst, buf2, sizeof(buf2)))); } - return (NT_NAT64); + return NT_NAT64; } /* Handle the next protocol checksum */ @@ -742,7 +764,7 @@ nat464_translate_46(pbuf_t *pbuf, int off, uint8_t tos, * @param naf New family * * @return void - */ + */ int nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, struct nat464_addr *odst, uint8_t oproto, protocol_family_t af, @@ -803,14 +825,15 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, * and adjust checksums */ if (*proto == IPPROTO_ICMP) { - if (naf != PF_INET6) - return (NT_DROP); + if (naf != PF_INET6) { + return NT_DROP; + } *proto = IPPROTO_ICMPV6; - } - else if (*proto == IPPROTO_ICMPV6) { - if (naf != PF_INET) - return (NT_DROP); + } else if (*proto == IPPROTO_ICMPV6) { + if (naf != PF_INET) { + return NT_DROP; + } *proto = IPPROTO_ICMP; /* Recalculate IP checksum as proto field has changed */ @@ -825,15 +848,17 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, struct udphdr *uh = (struct udphdr *)pbuf_contig_segment(pbuf, hlen, sizeof(*uh)); - if (uh == NULL) - return (NT_DROP); + if (uh == NULL) { + return NT_DROP; + } if (!(*pbuf->pb_csum_flags & (CSUM_UDP | CSUM_PARTIAL)) && uh->uh_sum == 0 && af == PF_INET && naf == PF_INET6) { uh->uh_sum = pbuf_inet6_cksum(pbuf, IPPROTO_UDP, hlen, ntohs(ip6h->ip6_plen)); - if (uh->uh_sum == 0) + if (uh->uh_sum == 0) { uh->uh_sum = 0xffff; + } goto done; } @@ -844,21 +869,22 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, struct tcphdr *th = (struct tcphdr *)pbuf_contig_segment(pbuf, hlen, sizeof(*th)); - if (th == NULL) - return (NT_DROP); + if (th == NULL) { + return NT_DROP; + } psum = &th->th_sum; break; } } - /* + /* * Translate the protocol header, update IP header if needed, * calculate checksums and update the checksum flags. */ switch (*proto) { case IPPROTO_UDP: - /* Fall through */ + /* Fall through */ case IPPROTO_TCP: { /* @@ -867,8 +893,9 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, * that has not yet been one's complemented. */ if (direction == NT_OUT && - (*pbuf->pb_csum_flags & CSUM_DELAY_DATA)) + (*pbuf->pb_csum_flags & CSUM_DELAY_DATA)) { do_ones_complement = TRUE; + } nat464_addr_cksum_fixup(psum, osrc, (struct nat464_addr *)nsrc, af, naf, (*proto == IPPROTO_UDP) ? 1 : 0, do_ones_complement); @@ -878,8 +905,9 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, break; } case IPPROTO_ICMP: { - if (naf != PF_INET6) /* allow only v6 as naf for ICMP */ - return (NT_DROP); + if (naf != PF_INET6) { /* allow only v6 as naf for ICMP */ + return NT_DROP; + } struct icmp *icmph = NULL; struct icmp6_hdr *icmp6h = NULL; @@ -887,12 +915,14 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, icmph = (struct icmp*) pbuf_contig_segment(pbuf, hlen, ICMP_MINLEN); - if (icmph == NULL) - return (NT_DROP); + if (icmph == NULL) { + return NT_DROP; + } /* Translate the ICMP header */ - if (nat464_translate_icmp(PF_INET6, icmph) != 0) - return (NT_DROP); + if (nat464_translate_icmp(PF_INET6, icmph) != 0) { + return NT_DROP; + } *proto = IPPROTO_ICMPV6; icmp6h = (struct icmp6_hdr *)(uintptr_t)icmph; @@ -904,9 +934,10 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, ip2off = hlen + sizeof(*icmp6h); struct ip *iph2; iph2 = (struct ip*) pbuf_contig_segment(pbuf, ip2off, - sizeof (*iph2)); - if (iph2 == NULL) - return (NT_DROP); + sizeof(*iph2)); + if (iph2 == NULL) { + return NT_DROP; + } hlen2 = ip2off + (iph2->ip_hl << 2); tot_len2 = ntohs(iph2->ip_len); @@ -916,15 +947,16 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, if (nat464_translate_icmp_ip(pbuf, ip2off, &tot_len, &hlen2, iph2->ip_p, iph2->ip_ttl, tot_len2, (struct nat464_addr *)ndst, (struct nat464_addr *)nsrc, - PF_INET, PF_INET6) != 0) - return (NT_DROP); + PF_INET, PF_INET6) != 0) { + return NT_DROP; + } /* Update total length/payload length for outer header */ switch (naf) { case PF_INET: - iph->ip_len = htons(tot_len); + iph->ip_len = htons(tot_len); break; case PF_INET6: - ip6h->ip6_plen = htons(tot_len - hlen); + ip6h->ip6_plen = htons(tot_len - hlen); break; } iph2 = NULL; @@ -941,10 +973,11 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, icmph = NULL; icmp6h = NULL; break; - } + } case IPPROTO_ICMPV6: - { if (naf != PF_INET) /* allow only v4 as naf for ICMPV6 */ - return (NT_DROP); + { if (naf != PF_INET) { /* allow only v4 as naf for ICMPV6 */ + return NT_DROP; + } struct icmp6_hdr *icmp6h = NULL; struct icmp *icmph = NULL; @@ -952,12 +985,14 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, icmp6h = (struct icmp6_hdr*) pbuf_contig_segment(pbuf, hlen, sizeof(*icmp6h)); - if (icmp6h == NULL) - return (NT_DROP); + if (icmp6h == NULL) { + return NT_DROP; + } /* Translate the ICMP header */ - if (nat464_translate_icmp(PF_INET, icmp6h) != 0) - return (NT_DROP); + if (nat464_translate_icmp(PF_INET, icmp6h) != 0) { + return NT_DROP; + } *proto = IPPROTO_ICMP; icmph = (struct icmp *)(uintptr_t)icmp6h; @@ -969,9 +1004,10 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, ip2off = hlen + ICMP_MINLEN; struct ip6_hdr *iph2; iph2 = (struct ip6_hdr*) pbuf_contig_segment(pbuf, ip2off, - sizeof (*iph2)); - if (iph2 == NULL) - return (NT_DROP); + sizeof(*iph2)); + if (iph2 == NULL) { + return NT_DROP; + } /* hlen2 points to end of inner IP header from the beginning */ hlen2 = ip2off + sizeof(struct ip6_hdr); @@ -980,16 +1016,17 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, if (nat464_translate_icmp_ip(pbuf, ip2off, &tot_len, &hlen2, iph2->ip6_nxt, iph2->ip6_hlim, tot_len2, (struct nat464_addr *)ndst, (struct nat464_addr *)nsrc, - PF_INET6, PF_INET) != 0) - return (NT_DROP); + PF_INET6, PF_INET) != 0) { + return NT_DROP; + } /* Update total length for outer header */ switch (naf) { case PF_INET: - iph->ip_len = htons(tot_len); + iph->ip_len = htons(tot_len); break; case PF_INET6: - ip6h->ip6_plen = htons(tot_len - hlen); + ip6h->ip6_plen = htons(tot_len - hlen); break; } iph2 = NULL; @@ -1007,8 +1044,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, icmp6h = NULL; icmph = NULL; - break; - } + break;} /* * https://tools.ietf.org/html/rfc7915#section-5.1.1 @@ -1020,7 +1056,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, case IPPROTO_ROUTING: case IPPROTO_DSTOPTS: case IPPROTO_AH: - return (NT_DROP); + return NT_DROP; case IPPROTO_FRAGMENT: /* @@ -1032,7 +1068,7 @@ nat464_translate_proto(pbuf_t *pbuf, struct nat464_addr *osrc, break; default: - return (NT_DROP); + return NT_DROP; } done: @@ -1044,12 +1080,15 @@ done: (pbuf->pb_mbuf)->m_pkthdr.csum_tx_stuff += CLAT46_HDR_EXPANSION_OVERHD; } - if(*pbuf->pb_csum_flags & CSUM_TCP) + if (*pbuf->pb_csum_flags & CSUM_TCP) { *pbuf->pb_csum_flags |= CSUM_TCPIPV6; - if(*pbuf->pb_csum_flags & CSUM_UDP) + } + if (*pbuf->pb_csum_flags & CSUM_UDP) { *pbuf->pb_csum_flags |= CSUM_UDPIPV6; - if (*pbuf->pb_csum_flags & CSUM_FRAGMENT) + } + if (*pbuf->pb_csum_flags & CSUM_FRAGMENT) { *pbuf->pb_csum_flags |= CSUM_FRAGMENT_IPV6; + } /* Clear IPv4 checksum flags */ *pbuf->pb_csum_flags &= ~(CSUM_IP | CSUM_IP_FRAGS | CSUM_DELAY_DATA | CSUM_FRAGMENT); @@ -1063,7 +1102,7 @@ done: } #endif } - return (NT_NAT64); + return NT_NAT64; } /* Fix the proto checksum for address change */ @@ -1072,7 +1111,7 @@ nat464_addr_cksum_fixup(uint16_t *pc, struct nat464_addr *ao, struct nat464_addr protocol_family_t af, protocol_family_t naf, uint8_t u, boolean_t do_ones_complement) { /* Currently we only support v4 to v6 and vice versa */ - VERIFY (af != naf); + VERIFY(af != naf); switch (af) { case PF_INET: @@ -1080,28 +1119,28 @@ nat464_addr_cksum_fixup(uint16_t *pc, struct nat464_addr *ao, struct nat464_addr case PF_INET6: if (do_ones_complement) { *pc = ~nat464_cksum_fixup(nat464_cksum_fixup( - nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup( - nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup(~*pc, - ao->nataddr16[0], an->nataddr16[0], u), - ao->nataddr16[1], an->nataddr16[1], u), - 0, an->nataddr16[2], u), - 0, an->nataddr16[3], u), - 0, an->nataddr16[4], u), - 0, an->nataddr16[5], u), - 0, an->nataddr16[6], u), - 0, an->nataddr16[7], u); + nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup( + nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup(~*pc, + ao->nataddr16[0], an->nataddr16[0], u), + ao->nataddr16[1], an->nataddr16[1], u), + 0, an->nataddr16[2], u), + 0, an->nataddr16[3], u), + 0, an->nataddr16[4], u), + 0, an->nataddr16[5], u), + 0, an->nataddr16[6], u), + 0, an->nataddr16[7], u); } else { *pc = nat464_cksum_fixup(nat464_cksum_fixup( - nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup( - nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup(*pc, - ao->nataddr16[0], an->nataddr16[0], u), - ao->nataddr16[1], an->nataddr16[1], u), - 0, an->nataddr16[2], u), - 0, an->nataddr16[3], u), - 0, an->nataddr16[4], u), - 0, an->nataddr16[5], u), - 0, an->nataddr16[6], u), - 0, an->nataddr16[7], u); + nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup( + nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup(*pc, + ao->nataddr16[0], an->nataddr16[0], u), + ao->nataddr16[1], an->nataddr16[1], u), + 0, an->nataddr16[2], u), + 0, an->nataddr16[3], u), + 0, an->nataddr16[4], u), + 0, an->nataddr16[5], u), + 0, an->nataddr16[6], u), + 0, an->nataddr16[7], u); } break; } @@ -1115,16 +1154,16 @@ nat464_addr_cksum_fixup(uint16_t *pc, struct nat464_addr *ao, struct nat464_addr switch (naf) { case PF_INET: *pc = nat464_cksum_fixup(nat464_cksum_fixup( - nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup( - nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup(*pc, - ao->nataddr16[0], an->nataddr16[0], u), - ao->nataddr16[1], an->nataddr16[1], u), - ao->nataddr16[2], 0, u), - ao->nataddr16[3], 0, u), - ao->nataddr16[4], 0, u), - ao->nataddr16[5], 0, u), - ao->nataddr16[6], 0, u), - ao->nataddr16[7], 0, u); + nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup( + nat464_cksum_fixup(nat464_cksum_fixup(nat464_cksum_fixup(*pc, + ao->nataddr16[0], an->nataddr16[0], u), + ao->nataddr16[1], an->nataddr16[1], u), + ao->nataddr16[2], 0, u), + ao->nataddr16[3], 0, u), + ao->nataddr16[4], 0, u), + ao->nataddr16[5], 0, u), + ao->nataddr16[6], 0, u), + ao->nataddr16[7], 0, u); break; } break; @@ -1136,14 +1175,16 @@ nat464_cksum_fixup(uint16_t cksum, uint16_t old, uint16_t new, uint8_t udp) { uint32_t l; - if (udp && !cksum) - return (0); + if (udp && !cksum) { + return 0; + } l = cksum + old - new; l = (l >> 16) + (l & 0xffff); l = l & 0xffff; - if (udp && !l) - return (0xffff); - return (l); + if (udp && !l) { + return 0xffff; + } + return l; } /* CLAT46 event handlers */ @@ -1151,62 +1192,61 @@ void in6_clat46_eventhdlr_callback(struct eventhandler_entry_arg arg0 __unused, in6_clat46_evhdlr_code_t in6_clat46_ev_code, pid_t epid, uuid_t euuid) { - struct kev_msg ev_msg; - struct kev_netevent_clat46_data clat46_event_data; + struct kev_msg ev_msg; + struct kev_netevent_clat46_data clat46_event_data; - bzero(&ev_msg, sizeof(ev_msg)); - bzero(&clat46_event_data, sizeof(clat46_event_data)); + bzero(&ev_msg, sizeof(ev_msg)); + bzero(&clat46_event_data, sizeof(clat46_event_data)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS; - ev_msg.event_code = KEV_NETEVENT_CLAT46_EVENT; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS; + ev_msg.event_code = KEV_NETEVENT_CLAT46_EVENT; - bzero(&clat46_event_data, sizeof(clat46_event_data)); - clat46_event_data.clat46_event_code = in6_clat46_ev_code; - clat46_event_data.epid = epid; - uuid_copy(clat46_event_data.euuid, euuid); + bzero(&clat46_event_data, sizeof(clat46_event_data)); + clat46_event_data.clat46_event_code = in6_clat46_ev_code; + clat46_event_data.epid = epid; + uuid_copy(clat46_event_data.euuid, euuid); - ev_msg.dv[0].data_ptr = &clat46_event_data; - ev_msg.dv[0].data_length = sizeof(clat46_event_data); + ev_msg.dv[0].data_ptr = &clat46_event_data; + ev_msg.dv[0].data_length = sizeof(clat46_event_data); - kev_post_msg(&ev_msg); + kev_post_msg(&ev_msg); } static void in6_clat46_event_callback(void *arg) { - struct kev_netevent_clat46_data *p_in6_clat46_ev = - (struct kev_netevent_clat46_data *)arg; + struct kev_netevent_clat46_data *p_in6_clat46_ev = + (struct kev_netevent_clat46_data *)arg; - EVENTHANDLER_INVOKE(&in6_clat46_evhdlr_ctxt, in6_clat46_event, - p_in6_clat46_ev->clat46_event_code, p_in6_clat46_ev->epid, - p_in6_clat46_ev->euuid); + EVENTHANDLER_INVOKE(&in6_clat46_evhdlr_ctxt, in6_clat46_event, + p_in6_clat46_ev->clat46_event_code, p_in6_clat46_ev->epid, + p_in6_clat46_ev->euuid); } -struct in6_clat46_event_nwk_wq_entry -{ - struct nwk_wq_entry nwk_wqe; - struct kev_netevent_clat46_data in6_clat46_ev_arg; +struct in6_clat46_event_nwk_wq_entry { + struct nwk_wq_entry nwk_wqe; + struct kev_netevent_clat46_data in6_clat46_ev_arg; }; void in6_clat46_event_enqueue_nwk_wq_entry(in6_clat46_evhdlr_code_t in6_clat46_event_code, pid_t epid, uuid_t euuid) { - struct in6_clat46_event_nwk_wq_entry *p_ev = NULL; + struct in6_clat46_event_nwk_wq_entry *p_ev = NULL; - MALLOC(p_ev, struct in6_clat46_event_nwk_wq_entry *, - sizeof(struct in6_clat46_event_nwk_wq_entry), - M_NWKWQ, M_WAITOK | M_ZERO); + MALLOC(p_ev, struct in6_clat46_event_nwk_wq_entry *, + sizeof(struct in6_clat46_event_nwk_wq_entry), + M_NWKWQ, M_WAITOK | M_ZERO); - p_ev->nwk_wqe.func = in6_clat46_event_callback; - p_ev->nwk_wqe.is_arg_managed = TRUE; - p_ev->nwk_wqe.arg = &p_ev->in6_clat46_ev_arg; + p_ev->nwk_wqe.func = in6_clat46_event_callback; + p_ev->nwk_wqe.is_arg_managed = TRUE; + p_ev->nwk_wqe.arg = &p_ev->in6_clat46_ev_arg; - p_ev->in6_clat46_ev_arg.clat46_event_code = in6_clat46_event_code; - p_ev->in6_clat46_ev_arg.epid = epid; - uuid_copy(p_ev->in6_clat46_ev_arg.euuid, euuid); + p_ev->in6_clat46_ev_arg.clat46_event_code = in6_clat46_event_code; + p_ev->in6_clat46_ev_arg.epid = epid; + uuid_copy(p_ev->in6_clat46_ev_arg.euuid, euuid); - nwk_wq_enqueue((struct nwk_wq_entry*)p_ev); + nwk_wq_enqueue((struct nwk_wq_entry*)p_ev); } diff --git a/bsd/net/nat464_utils.h b/bsd/net/nat464_utils.h index be938d23e..23675ac50 100644 --- a/bsd/net/nat464_utils.h +++ b/bsd/net/nat464_utils.h @@ -61,74 +61,74 @@ * Materiel Command, USAF, under agreement number F30602-01-2-0537. * */ -#ifndef _NET_NAT464_UTILS_H_ -#define _NET_NAT464_UTILS_H_ +#ifndef _NET_NAT464_UTILS_H_ +#define _NET_NAT464_UTILS_H_ #include #include -#define clat_log0(x) do { log x; } while (0) -#define clat_log1(x) do { if (clat_debug >= 1) log x; } while (0) -#define clat_log2(x) do { if (clat_debug >= 2) log x; } while (0) +#define clat_log0(x) do { log x; } while (0) +#define clat_log1(x) do { if (clat_debug >= 1) log x; } while (0) +#define clat_log2(x) do { if (clat_debug >= 2) log x; } while (0) -#define CLAT46_NEEDED(x) \ - (!IN_LOOPBACK(x) && !IN_LINKLOCAL(x) && !IN_MULTICAST(x) && \ +#define CLAT46_NEEDED(x) \ + (!IN_LOOPBACK(x) && !IN_LINKLOCAL(x) && !IN_MULTICAST(x) && \ INADDR_BROADCAST != x) -#define CLAT64_NEEDED(x) \ - (!IN6_IS_ADDR_LOOPBACK(x) && !IN6_IS_ADDR_LINKLOCAL(x) && \ +#define CLAT64_NEEDED(x) \ + (!IN6_IS_ADDR_LOOPBACK(x) && !IN6_IS_ADDR_LINKLOCAL(x) && \ !IN6_IS_ADDR_MULTICAST(x)) extern int clat_debug; -enum { NT_DROP, NT_NAT64 }; -enum { NT_IN, NT_OUT }; +enum { NT_DROP, NT_NAT64 }; +enum { NT_IN, NT_OUT }; struct nat464_addr { union { - struct in_addr _v4addr; - struct in6_addr _v6addr; - uint8_t _addr8[16]; - uint16_t _addr16[8]; - uint32_t _addr32[4]; - } nat464a; /* 128-bit address */ -#define natv4addr nat464a._v4addr -#define natv6addr nat464a._v6addr -#define nataddr8 nat464a._addr8 -#define nataddr16 nat464a._addr16 -#define nataddr32 nat464a._addr32 + struct in_addr _v4addr; + struct in6_addr _v6addr; + uint8_t _addr8[16]; + uint16_t _addr16[8]; + uint32_t _addr32[4]; + } nat464a; /* 128-bit address */ +#define natv4addr nat464a._v4addr +#define natv6addr nat464a._v6addr +#define nataddr8 nat464a._addr8 +#define nataddr16 nat464a._addr16 +#define nataddr32 nat464a._addr32 }; int -nat464_translate_icmp(int , void *); +nat464_translate_icmp(int, void *); int -nat464_translate_icmp_ip(pbuf_t *, uint32_t , uint64_t *, uint32_t *, - uint8_t , uint8_t , uint64_t , struct nat464_addr *, - struct nat464_addr *, protocol_family_t , protocol_family_t ); + nat464_translate_icmp_ip(pbuf_t *, uint32_t, uint64_t *, uint32_t *, + uint8_t, uint8_t, uint64_t, struct nat464_addr *, + struct nat464_addr *, protocol_family_t, protocol_family_t ); int -nat464_synthesize_ipv6(ifnet_t, const struct in_addr *, struct in6_addr *); + nat464_synthesize_ipv6(ifnet_t, const struct in_addr *, struct in6_addr *); int -nat464_synthesize_ipv4(ifnet_t, const struct in6_addr *, struct in_addr *); + nat464_synthesize_ipv4(ifnet_t, const struct in6_addr *, struct in_addr *); int -nat464_translate_64(pbuf_t *, int, uint8_t, uint8_t *, uint8_t, struct in_addr, + nat464_translate_64(pbuf_t *, int, uint8_t, uint8_t *, uint8_t, struct in_addr, struct in_addr, uint64_t, boolean_t *); int -nat464_translate_46(pbuf_t *, int, uint8_t, uint8_t, uint8_t, struct in6_addr, + nat464_translate_46(pbuf_t *, int, uint8_t, uint8_t, uint8_t, struct in6_addr, struct in6_addr, uint64_t); int -nat464_translate_proto(pbuf_t *, struct nat464_addr *, struct nat464_addr *, + nat464_translate_proto(pbuf_t *, struct nat464_addr *, struct nat464_addr *, uint8_t, protocol_family_t, protocol_family_t, int, boolean_t); int -nat464_insert_frag46(pbuf_t *, uint16_t, uint16_t, boolean_t); + nat464_insert_frag46(pbuf_t *, uint16_t, uint16_t, boolean_t); int -nat464_remove_frag64(pbuf_t *, uint32_t, uint16_t, boolean_t); + nat464_remove_frag64(pbuf_t *, uint32_t, uint16_t, boolean_t); uint16_t -nat464_cksum_fixup(uint16_t, uint16_t , uint16_t , uint8_t); + nat464_cksum_fixup(uint16_t, uint16_t, uint16_t, uint8_t); #endif /* !_NET_NAT464_UTILS_H_ */ diff --git a/bsd/net/ndrv.c b/bsd/net/ndrv.c index 41603c5b2..7e13e2638 100644 --- a/bsd/net/ndrv.c +++ b/bsd/net/ndrv.c @@ -568,10 +568,10 @@ ndrv_do_detach(struct ndrv_cb *np) } } if (np->nd_laddr != NULL) { - FREE((caddr_t)np->nd_laddr, M_IFADDR); + FREE(np->nd_laddr, M_IFADDR); np->nd_laddr = NULL; } - FREE((caddr_t)np, M_PCB); + FREE(np, M_PCB); so->so_pcb = 0; so->so_flags |= SOF_PCBCLEARING; sofree(so); diff --git a/bsd/net/ndrv.h b/bsd/net/ndrv.h index a201a2fd7..dde3a2a9b 100644 --- a/bsd/net/ndrv.h +++ b/bsd/net/ndrv.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */ @@ -37,9 +37,8 @@ #include -struct sockaddr_ndrv -{ - unsigned char snd_len; +struct sockaddr_ndrv { + unsigned char snd_len; unsigned char snd_family; unsigned char snd_name[IFNAMSIZ]; /* from if.h */ }; @@ -48,11 +47,11 @@ struct sockaddr_ndrv * Support for user-mode protocol handlers */ -#define NDRV_DEMUXTYPE_ETHERTYPE 4 -#define NDRV_DEMUXTYPE_SAP 5 -#define NDRV_DEMUXTYPE_SNAP 6 +#define NDRV_DEMUXTYPE_ETHERTYPE 4 +#define NDRV_DEMUXTYPE_SAP 5 +#define NDRV_DEMUXTYPE_SNAP 6 -#define NDRVPROTO_NDRV 0 +#define NDRVPROTO_NDRV 0 /* * Struct: ndrv_demux_desc @@ -62,7 +61,7 @@ struct sockaddr_ndrv * Fields: * type : type of protocol in data field, must be understood by * the interface family of the interface the socket is bound to - * length : length of protocol data in "data" field + * length : length of protocol data in "data" field * data : union of framing-specific data, in network byte order * ether_type : ethernet type in network byte order, assuming * ethernet type II framing @@ -86,20 +85,18 @@ struct sockaddr_ndrv * desc.data.snap[3] = 80; * desc.data.snap[4] = 9B; */ -struct ndrv_demux_desc -{ - u_int16_t type; - u_int16_t length; - union - { - u_int16_t ether_type; - u_int8_t sap[3]; - u_int8_t snap[5]; - u_int8_t other[28]; - } data; +struct ndrv_demux_desc { + u_int16_t type; + u_int16_t length; + union{ + u_int16_t ether_type; + u_int8_t sap[3]; + u_int8_t snap[5]; + u_int8_t other[28]; + } data; }; -#define NDRV_PROTOCOL_DESC_VERS 1 +#define NDRV_PROTOCOL_DESC_VERS 1 /* * Struct: ndrv_protocol_desc @@ -112,39 +109,38 @@ struct ndrv_demux_desc * demux_count : number of demux_list descriptors in demux_list; maximum of 10 * demux_list : pointer to array of demux descriptors */ -struct ndrv_protocol_desc -{ - u_int32_t version; - u_int32_t protocol_family; - u_int32_t demux_count; - struct ndrv_demux_desc *demux_list; +struct ndrv_protocol_desc { + u_int32_t version; + u_int32_t protocol_family; + u_int32_t demux_count; + struct ndrv_demux_desc *demux_list; }; #ifdef KERNEL_PRIVATE -/* LP64 version of ndrv_protocol_desc. all pointers +/* LP64 version of ndrv_protocol_desc. all pointers * grow when we're dealing with a 64-bit process. * WARNING - keep in sync with ndrv_protocol_desc */ struct ndrv_protocol_desc64 { - u_int32_t version; - u_int32_t protocol_family; - u_int32_t demux_count; - user64_addr_t demux_list __attribute__((aligned(8))); + u_int32_t version; + u_int32_t protocol_family; + u_int32_t demux_count; + user64_addr_t demux_list __attribute__((aligned(8))); }; struct ndrv_protocol_desc32 { - u_int32_t version; - u_int32_t protocol_family; - u_int32_t demux_count; - user32_addr_t demux_list; + u_int32_t version; + u_int32_t protocol_family; + u_int32_t demux_count; + user32_addr_t demux_list; }; #endif /* KERNEL_PRIVATE */ -#define SOL_NDRVPROTO NDRVPROTO_NDRV /* Use this socket level */ -#define NDRV_DELDMXSPEC 0x02 /* Delete the registered protocol */ -#define NDRV_SETDMXSPEC 0x04 /* Set the protocol spec */ -#define NDRV_ADDMULTICAST 0x05 /* Add a physical multicast address */ -#define NDRV_DELMULTICAST 0x06 /* Delete a phyiscal multicast */ +#define SOL_NDRVPROTO NDRVPROTO_NDRV /* Use this socket level */ +#define NDRV_DELDMXSPEC 0x02 /* Delete the registered protocol */ +#define NDRV_SETDMXSPEC 0x04 /* Set the protocol spec */ +#define NDRV_ADDMULTICAST 0x05 /* Add a physical multicast address */ +#define NDRV_DELMULTICAST 0x06 /* Delete a phyiscal multicast */ /* * SOL_NDRVPROTO - use this for the socket level when calling setsocketopt @@ -170,11 +166,11 @@ struct ndrv_protocol_desc32 { */ /* Max number of descriptions allowed by default */ -#define NDRV_DMUX_MAX_DESCR 1024 - +#define NDRV_DMUX_MAX_DESCR 1024 + /* * sysctl MIB tags at the kern.ipc.nrdv level */ -#define NRDV_MULTICAST_ADDRS_PER_SOCK 1 /* to toggle NDRV_DMUX_MAX_DESCR value */ +#define NRDV_MULTICAST_ADDRS_PER_SOCK 1 /* to toggle NDRV_DMUX_MAX_DESCR value */ -#endif /* _NET_NDRV_H */ +#endif /* _NET_NDRV_H */ diff --git a/bsd/net/ndrv_var.h b/bsd/net/ndrv_var.h index c2c208595..4b95612c9 100644 --- a/bsd/net/ndrv_var.h +++ b/bsd/net/ndrv_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997, 1998 Apple Computer, Inc. All Rights Reserved */ @@ -40,11 +40,10 @@ * registered by this socket. May be variable in length. */ -struct ndrv_multiaddr -{ - struct ndrv_multiaddr* next; - ifmultiaddr_t ifma; - struct sockaddr addr; +struct ndrv_multiaddr { + struct ndrv_multiaddr* next; + ifmultiaddr_t ifma; + struct sockaddr addr; }; /* @@ -52,15 +51,14 @@ struct ndrv_multiaddr * of BIND is plugged in here. * For now, it looks like a raw_cb up front... */ -struct ndrv_cb -{ - TAILQ_ENTRY(ndrv_cb) nd_next; - struct socket *nd_socket; /* Back to the socket */ - u_int32_t nd_signature; /* Just double-checking */ +struct ndrv_cb { + TAILQ_ENTRY(ndrv_cb) nd_next; + struct socket *nd_socket; /* Back to the socket */ + u_int32_t nd_signature; /* Just double-checking */ struct sockaddr_ndrv *nd_faddr; struct sockaddr_ndrv *nd_laddr; - struct sockproto nd_proto; /* proto family, protocol */ - int nd_descrcnt; /* # elements in nd_dlist - Obsolete */ + struct sockproto nd_proto; /* proto family, protocol */ + int nd_descrcnt; /* # elements in nd_dlist - Obsolete */ TAILQ_HEAD(dlist, dlil_demux_desc) nd_dlist; /* Descr. list */ u_int32_t nd_dlist_cnt; /* Descr. list count */ struct ifnet *nd_if; /* obsolete, maintained for binary compatibility */ @@ -70,12 +68,12 @@ struct ndrv_cb short nd_unit; }; -#define sotondrvcb(so) ((struct ndrv_cb *)(so)->so_pcb) -#define NDRV_SIGNATURE 0x4e445256 /* "NDRV" */ +#define sotondrvcb(so) ((struct ndrv_cb *)(so)->so_pcb) +#define NDRV_SIGNATURE 0x4e445256 /* "NDRV" */ /* Nominal allocated space for NDRV sockets */ -#define NDRVSNDQ 8192 -#define NDRVRCVQ 8192 +#define NDRVSNDQ 8192 +#define NDRVRCVQ 8192 #endif /* PRIVATE */ -#endif /* _NET_NDRV_VAR_H */ +#endif /* _NET_NDRV_VAR_H */ diff --git a/bsd/net/necp.c b/bsd/net/necp.c index 3fd05ae1b..513cea4ae 100644 --- a/bsd/net/necp.c +++ b/bsd/net/necp.c @@ -141,98 +141,98 @@ u_int32_t necp_debug = 0; // 0=None, 1=Basic, 2=EveryMatch u_int32_t necp_session_count = 0; -#define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \ - if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \ - LIST_INSERT_HEAD((head), elm, field); \ - } else { \ - LIST_FOREACH(tmpelm, head, field) { \ - if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \ - LIST_INSERT_AFTER(tmpelm, elm, field); \ - break; \ - } \ - } \ - } \ +#define LIST_INSERT_SORTED_ASCENDING(head, elm, field, sortfield, tmpelm) do { \ + if (LIST_EMPTY((head)) || (LIST_FIRST(head)->sortfield >= (elm)->sortfield)) { \ + LIST_INSERT_HEAD((head), elm, field); \ + } else { \ + LIST_FOREACH(tmpelm, head, field) { \ + if (LIST_NEXT(tmpelm, field) == NULL || LIST_NEXT(tmpelm, field)->sortfield >= (elm)->sortfield) { \ + LIST_INSERT_AFTER(tmpelm, elm, field); \ + break; \ + } \ + } \ + } \ } while (0) -#define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \ - if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \ - LIST_INSERT_HEAD((head), elm, field); \ - } else { \ - LIST_FOREACH(tmpelm, head, field) { \ - if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \ - LIST_INSERT_AFTER(tmpelm, elm, field); \ - break; \ - } \ - } \ - } \ +#define LIST_INSERT_SORTED_TWICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, tmpelm) do { \ + if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield))) { \ + LIST_INSERT_HEAD((head), elm, field); \ + } else { \ + LIST_FOREACH(tmpelm, head, field) { \ + if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield))) { \ + LIST_INSERT_AFTER(tmpelm, elm, field); \ + break; \ + } \ + } \ + } \ } while (0) -#define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \ - if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \ - LIST_INSERT_HEAD((head), elm, field); \ - } else { \ - LIST_FOREACH(tmpelm, head, field) { \ - if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \ - LIST_INSERT_AFTER(tmpelm, elm, field); \ - break; \ - } \ - } \ - } \ +#define LIST_INSERT_SORTED_THRICE_ASCENDING(head, elm, field, firstsortfield, secondsortfield, thirdsortfield, tmpelm) do { \ + if (LIST_EMPTY((head)) || (LIST_FIRST(head)->firstsortfield > (elm)->firstsortfield) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_FIRST(head)->firstsortfield == (elm)->firstsortfield) && (LIST_FIRST(head)->secondsortfield == (elm)->secondsortfield) && (LIST_FIRST(head)->thirdsortfield >= (elm)->thirdsortfield))) { \ + LIST_INSERT_HEAD((head), elm, field); \ + } else { \ + LIST_FOREACH(tmpelm, head, field) { \ + if (LIST_NEXT(tmpelm, field) == NULL || (LIST_NEXT(tmpelm, field)->firstsortfield > (elm)->firstsortfield) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield >= (elm)->secondsortfield)) || ((LIST_NEXT(tmpelm, field)->firstsortfield == (elm)->firstsortfield) && (LIST_NEXT(tmpelm, field)->secondsortfield == (elm)->secondsortfield) && (LIST_NEXT(tmpelm, field)->thirdsortfield >= (elm)->thirdsortfield))) { \ + LIST_INSERT_AFTER(tmpelm, elm, field); \ + break; \ + } \ + } \ + } \ } while (0) -#define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE) - -#define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x000001 -#define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x000002 -#define NECP_KERNEL_CONDITION_PROTOCOL 0x000004 -#define NECP_KERNEL_CONDITION_LOCAL_START 0x000008 -#define NECP_KERNEL_CONDITION_LOCAL_END 0x000010 -#define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x000020 -#define NECP_KERNEL_CONDITION_REMOTE_START 0x000040 -#define NECP_KERNEL_CONDITION_REMOTE_END 0x000080 -#define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x000100 -#define NECP_KERNEL_CONDITION_APP_ID 0x000200 -#define NECP_KERNEL_CONDITION_REAL_APP_ID 0x000400 -#define NECP_KERNEL_CONDITION_DOMAIN 0x000800 -#define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x001000 -#define NECP_KERNEL_CONDITION_POLICY_ID 0x002000 -#define NECP_KERNEL_CONDITION_PID 0x004000 -#define NECP_KERNEL_CONDITION_UID 0x008000 -#define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x010000 // Only set from packets looping between interfaces -#define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x020000 -#define NECP_KERNEL_CONDITION_ENTITLEMENT 0x040000 -#define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x080000 -#define NECP_KERNEL_CONDITION_AGENT_TYPE 0x100000 - -#define NECP_MAX_POLICY_RESULT_SIZE 512 -#define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024 -#define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096 -#define NECP_MAX_POLICY_LIST_COUNT 1024 +#define IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(x) ((x) == NECP_ROUTE_RULE_DENY_INTERFACE || (x) == NECP_ROUTE_RULE_ALLOW_INTERFACE) + +#define NECP_KERNEL_CONDITION_ALL_INTERFACES 0x000001 +#define NECP_KERNEL_CONDITION_BOUND_INTERFACE 0x000002 +#define NECP_KERNEL_CONDITION_PROTOCOL 0x000004 +#define NECP_KERNEL_CONDITION_LOCAL_START 0x000008 +#define NECP_KERNEL_CONDITION_LOCAL_END 0x000010 +#define NECP_KERNEL_CONDITION_LOCAL_PREFIX 0x000020 +#define NECP_KERNEL_CONDITION_REMOTE_START 0x000040 +#define NECP_KERNEL_CONDITION_REMOTE_END 0x000080 +#define NECP_KERNEL_CONDITION_REMOTE_PREFIX 0x000100 +#define NECP_KERNEL_CONDITION_APP_ID 0x000200 +#define NECP_KERNEL_CONDITION_REAL_APP_ID 0x000400 +#define NECP_KERNEL_CONDITION_DOMAIN 0x000800 +#define NECP_KERNEL_CONDITION_ACCOUNT_ID 0x001000 +#define NECP_KERNEL_CONDITION_POLICY_ID 0x002000 +#define NECP_KERNEL_CONDITION_PID 0x004000 +#define NECP_KERNEL_CONDITION_UID 0x008000 +#define NECP_KERNEL_CONDITION_LAST_INTERFACE 0x010000 // Only set from packets looping between interfaces +#define NECP_KERNEL_CONDITION_TRAFFIC_CLASS 0x020000 +#define NECP_KERNEL_CONDITION_ENTITLEMENT 0x040000 +#define NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT 0x080000 +#define NECP_KERNEL_CONDITION_AGENT_TYPE 0x100000 + +#define NECP_MAX_POLICY_RESULT_SIZE 512 +#define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024 +#define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096 +#define NECP_MAX_POLICY_LIST_COUNT 1024 // Cap the policy size at the max result + conditions size, with room for extra TLVs -#define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE) +#define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE) struct necp_service_registration { - LIST_ENTRY(necp_service_registration) session_chain; - LIST_ENTRY(necp_service_registration) kernel_chain; - u_int32_t service_id; + LIST_ENTRY(necp_service_registration) session_chain; + LIST_ENTRY(necp_service_registration) kernel_chain; + u_int32_t service_id; }; struct necp_session { - u_int8_t necp_fd_type; - u_int32_t control_unit; - u_int32_t session_priority; // Descriptive priority rating - u_int32_t session_order; + u_int8_t necp_fd_type; + u_int32_t control_unit; + u_int32_t session_priority; // Descriptive priority rating + u_int32_t session_order; - necp_policy_id last_policy_id; + necp_policy_id last_policy_id; decl_lck_mtx_data(, lock); - bool proc_locked; // Messages must come from proc_uuid - uuid_t proc_uuid; - int proc_pid; + bool proc_locked; // Messages must come from proc_uuid + uuid_t proc_uuid; + int proc_pid; - bool dirty; + bool dirty; LIST_HEAD(_policies, necp_session_policy) policies; LIST_HEAD(_services, necp_service_registration) services; @@ -260,17 +260,17 @@ struct necp_socket_info { errno_t cred_result; }; -static kern_ctl_ref necp_kctlref; -static u_int32_t necp_family; -static OSMallocTag necp_malloc_tag; -static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL; -static lck_attr_t *necp_kernel_policy_mtx_attr = NULL; -static lck_grp_t *necp_kernel_policy_mtx_grp = NULL; +static kern_ctl_ref necp_kctlref; +static u_int32_t necp_family; +static OSMallocTag necp_malloc_tag; +static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL; +static lck_attr_t *necp_kernel_policy_mtx_attr = NULL; +static lck_grp_t *necp_kernel_policy_mtx_grp = NULL; decl_lck_rw_data(static, necp_kernel_policy_lock); -static lck_grp_attr_t *necp_route_rule_grp_attr = NULL; -static lck_attr_t *necp_route_rule_mtx_attr = NULL; -static lck_grp_t *necp_route_rule_mtx_grp = NULL; +static lck_grp_attr_t *necp_route_rule_grp_attr = NULL; +static lck_attr_t *necp_route_rule_mtx_attr = NULL; +static lck_grp_t *necp_route_rule_mtx_grp = NULL; decl_lck_rw_data(static, necp_route_rule_lock); /* @@ -279,10 +279,10 @@ decl_lck_rw_data(static, necp_route_rule_lock); * the subsystem lock. */ static volatile int32_t necp_kernel_socket_policies_gencount; -#define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \ - if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \ - necp_kernel_socket_policies_gencount = 1; \ - } \ +#define BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT() do { \ + if (OSIncrementAtomic(&necp_kernel_socket_policies_gencount) == (INT32_MAX - 1)) { \ + necp_kernel_socket_policies_gencount = 1; \ + } \ } while (0) static u_int32_t necp_kernel_application_policies_condition_mask; @@ -291,8 +291,8 @@ static u_int32_t necp_kernel_socket_policies_condition_mask; static size_t necp_kernel_socket_policies_count; static size_t necp_kernel_socket_policies_non_app_count; static LIST_HEAD(_necpkernelsocketconnectpolicies, necp_kernel_socket_policy) necp_kernel_socket_policies; -#define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5 -#define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0) +#define NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS 5 +#define NECP_SOCKET_MAP_APP_ID_TO_BUCKET(appid) (appid ? (appid%(NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS - 1) + 1) : 0) static struct necp_kernel_socket_policy **necp_kernel_socket_policies_map[NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS]; static struct necp_kernel_socket_policy **necp_kernel_socket_policies_app_layer_map; /* @@ -307,22 +307,22 @@ static u_int32_t necp_kernel_ip_output_policies_condition_mask; static size_t necp_kernel_ip_output_policies_count; static size_t necp_kernel_ip_output_policies_non_id_count; static LIST_HEAD(_necpkernelipoutputpolicies, necp_kernel_ip_output_policy) necp_kernel_ip_output_policies; -#define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5 -#define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0) +#define NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS 5 +#define NECP_IP_OUTPUT_MAP_ID_TO_BUCKET(id) (id ? (id%(NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS - 1) + 1) : 0) static struct necp_kernel_ip_output_policy **necp_kernel_ip_output_policies_map[NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS]; static struct necp_session *necp_create_session(void); static void necp_delete_session(struct necp_session *session); static necp_policy_id necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet, - u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *error); + u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *error); static void necp_handle_policy_get(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static void necp_handle_policy_delete(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static void necp_handle_policy_apply_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static void necp_handle_policy_list_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static void necp_handle_policy_delete_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static int necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, - user_addr_t out_buffer, size_t out_buffer_length, int offset); + user_addr_t out_buffer, size_t out_buffer_length, int offset); static void necp_handle_set_session_priority(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static void necp_handle_lock_session_to_proc(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); static void necp_handle_register_service(struct necp_session *session, u_int32_t message_id, mbuf_t packet, int offset); @@ -358,18 +358,18 @@ static bool necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet); struct necp_uuid_id_mapping { LIST_ENTRY(necp_uuid_id_mapping) chain; - uuid_t uuid; - u_int32_t id; - u_int32_t refcount; - u_int32_t table_refcount; // Add to UUID policy table count + uuid_t uuid; + u_int32_t id; + u_int32_t refcount; + u_int32_t table_refcount; // Add to UUID policy table count }; static size_t necp_num_uuid_app_id_mappings; static bool necp_uuid_app_id_mappings_dirty; -#define NECP_UUID_APP_ID_HASH_SIZE 64 +#define NECP_UUID_APP_ID_HASH_SIZE 64 static u_long necp_uuid_app_id_hash_mask; static u_long necp_uuid_app_id_hash_num_buckets; -static LIST_HEAD(necp_uuid_id_mapping_head, necp_uuid_id_mapping) *necp_uuid_app_id_hashtbl, necp_uuid_service_id_list; // App map is real hash table, service map is just mapping -#define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed +static LIST_HEAD(necp_uuid_id_mapping_head, necp_uuid_id_mapping) * necp_uuid_app_id_hashtbl, necp_uuid_service_id_list; // App map is real hash table, service map is just mapping +#define APPUUIDHASH(uuid) (&necp_uuid_app_id_hashtbl[uuid[0] & necp_uuid_app_id_hash_mask]) // Assume first byte of UUIDs are evenly distributed static u_int32_t necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_policy_table); static bool necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_policy_table); static struct necp_uuid_id_mapping *necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id); @@ -381,9 +381,9 @@ static bool necp_remove_uuid_service_id_mapping(uuid_t uuid); struct necp_string_id_mapping { LIST_ENTRY(necp_string_id_mapping) chain; - char *string; - necp_app_id id; - u_int32_t refcount; + char *string; + necp_app_id id; + u_int32_t refcount; }; static LIST_HEAD(necp_string_id_mapping_list, necp_string_id_mapping) necp_account_id_list; static u_int32_t necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain); @@ -406,15 +406,15 @@ static bool necp_update_qos_marking(struct ifnet *ifp, u_int32_t route_rule_id); #define MAX_ROUTE_RULE_INTERFACES 10 struct necp_route_rule { LIST_ENTRY(necp_route_rule) chain; - u_int32_t id; - u_int32_t default_action; - u_int8_t cellular_action; - u_int8_t wifi_action; - u_int8_t wired_action; - u_int8_t expensive_action; - u_int exception_if_indices[MAX_ROUTE_RULE_INTERFACES]; - u_int8_t exception_if_actions[MAX_ROUTE_RULE_INTERFACES]; - u_int32_t refcount; + u_int32_t id; + u_int32_t default_action; + u_int8_t cellular_action; + u_int8_t wifi_action; + u_int8_t wired_action; + u_int8_t expensive_action; + u_int exception_if_indices[MAX_ROUTE_RULE_INTERFACES]; + u_int8_t exception_if_actions[MAX_ROUTE_RULE_INTERFACES]; + u_int32_t refcount; }; static LIST_HEAD(necp_route_rule_list, necp_route_rule) necp_route_rules; static u_int32_t necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_array, u_int32_t route_rules_array_size); @@ -426,8 +426,8 @@ static inline void necp_get_parent_cred_result(proc_t proc, struct necp_socket_i #define MAX_AGGREGATE_ROUTE_RULES 16 struct necp_aggregate_route_rule { LIST_ENTRY(necp_aggregate_route_rule) chain; - u_int32_t id; - u_int32_t rule_ids[MAX_AGGREGATE_ROUTE_RULES]; + u_int32_t id; + u_int32_t rule_ids[MAX_AGGREGATE_ROUTE_RULES]; }; static LIST_HEAD(necp_aggregate_route_rule_list, necp_aggregate_route_rule) necp_aggregate_route_rules; static u_int32_t necp_create_aggregate_route_rule(u_int32_t *rule_ids); @@ -459,13 +459,13 @@ necp_allocate_new_session_order(u_int32_t priority, u_int32_t control_unit) // Use the control unit to decide the offset into the priority list new_order = (control_unit) + ((priority - 1) * 1000); - return (new_order); + return new_order; } static inline u_int32_t necp_get_first_order_for_priority(u_int32_t priority) { - return (((priority - 1) * 1000) + 1); + return ((priority - 1) * 1000) + 1; } // Sysctl handler @@ -479,7 +479,7 @@ sysctl_handle_necp_level SYSCTL_HANDLER_ARGS } else { necp_drop_all_order = necp_get_first_order_for_priority(necp_drop_all_level); } - return (error); + return error; } // Session fd @@ -487,11 +487,11 @@ sysctl_handle_necp_level SYSCTL_HANDLER_ARGS static int noop_read(struct fileproc *, struct uio *, int, vfs_context_t); static int noop_write(struct fileproc *, struct uio *, int, vfs_context_t); static int noop_ioctl(struct fileproc *, unsigned long, caddr_t, - vfs_context_t); + vfs_context_t); static int noop_select(struct fileproc *, int, void *, vfs_context_t); static int necp_session_op_close(struct fileglob *, vfs_context_t); static int noop_kqfilter(struct fileproc *, struct knote *, - struct kevent_internal_s *, vfs_context_t); + struct kevent_internal_s *, vfs_context_t); static const struct fileops necp_session_fd_ops = { .fo_type = DTYPE_NETPOLICY, @@ -508,38 +508,38 @@ static int noop_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) { #pragma unused(fp, uio, flags, ctx) - return (ENXIO); + return ENXIO; } static int noop_write(struct fileproc *fp, struct uio *uio, int flags, - vfs_context_t ctx) + vfs_context_t ctx) { #pragma unused(fp, uio, flags, ctx) - return (ENXIO); + return ENXIO; } static int noop_ioctl(struct fileproc *fp, unsigned long com, caddr_t data, - vfs_context_t ctx) + vfs_context_t ctx) { #pragma unused(fp, com, data, ctx) - return (ENOTTY); + return ENOTTY; } static int noop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) { #pragma unused(fp, which, wql, ctx) - return (ENXIO); + return ENXIO; } static int noop_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx) + struct kevent_internal_s *kev, vfs_context_t ctx) { #pragma unused(fp, kn, kev, ctx) - return (ENXIO); + return ENXIO; } int @@ -588,7 +588,7 @@ done: } } - return (error); + return error; } static int @@ -602,9 +602,9 @@ necp_session_op_close(struct fileglob *fg, vfs_context_t ctx) necp_policy_mark_all_for_deletion(session); necp_policy_apply_all(session); necp_delete_session(session); - return (0); + return 0; } else { - return (ENOENT); + return ENOENT; } } @@ -628,13 +628,14 @@ necp_session_find_from_fd(int fd, struct necp_session **session) if ((*session)->necp_fd_type != necp_fd_type_session) { // Not a client fd, ignore + fp_drop(p, fd, fp, 1); error = EINVAL; goto done; } done: proc_fdunlock(p); - return (error); + return error; } static int @@ -685,7 +686,7 @@ done: } *retval = error; - return (error); + return error; } static int @@ -758,7 +759,7 @@ done: } *retval = error; - return (error); + return error; } static int @@ -789,7 +790,7 @@ necp_session_delete_policy(struct necp_session *session, struct necp_session_act necp_policy_mark_for_deletion(session, policy); done: *retval = error; - return (error); + return error; } static int @@ -798,7 +799,7 @@ necp_session_apply_all(struct necp_session *session, struct necp_session_action_ #pragma unused(uap) necp_policy_apply_all(session); *retval = 0; - return (0); + return 0; } static int @@ -859,7 +860,7 @@ done: } *retval = error; - return (error); + return error; } @@ -869,7 +870,7 @@ necp_session_delete_all(struct necp_session *session, struct necp_session_action #pragma unused(uap) necp_policy_mark_all_for_deletion(session); *retval = 0; - return (0); + return 0; } static int @@ -894,7 +895,7 @@ necp_session_set_session_priority(struct necp_session *session, struct necp_sess // Enforce special session priorities with entitlements if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL || - requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) { + requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) { errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0); if (cred_result != 0) { NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority); @@ -916,7 +917,7 @@ necp_session_set_session_priority(struct necp_session *session, struct necp_sess done: *retval = error; - return (error); + return error; } static int @@ -925,7 +926,7 @@ necp_session_lock_to_process(struct necp_session *session, struct necp_session_a #pragma unused(uap) session->proc_locked = TRUE; *retval = 0; - return (0); + return 0; } static int @@ -962,7 +963,7 @@ necp_session_register_service(struct necp_session *session, struct necp_session_ done: *retval = error; - return (error); + return error; } static int @@ -1003,7 +1004,7 @@ necp_session_unregister_service(struct necp_session *session, struct necp_sessio done: *retval = error; - return (error); + return error; } static int @@ -1020,7 +1021,7 @@ necp_session_dump_all(struct necp_session *session, struct necp_session_action_a error = necp_handle_policy_dump_all(session, 0, NULL, uap->out_buffer, uap->out_buffer_length, 0); done: *retval = error; - return (error); + return error; } int @@ -1033,7 +1034,7 @@ necp_session_action(struct proc *p, struct necp_session_action_args *uap, int *r error = necp_session_find_from_fd(uap->necp_fd, &session); if (error != 0) { NECPLOG(LOG_ERR, "necp_session_action find fd error (%d)", error); - return (error); + return error; } NECP_SESSION_LOCK(session); @@ -1054,62 +1055,62 @@ necp_session_action(struct proc *p, struct necp_session_action_args *uap, int *r u_int32_t action = uap->action; switch (action) { - case NECP_SESSION_ACTION_POLICY_ADD: { - return_value = necp_session_add_policy(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_POLICY_GET: { - return_value = necp_session_get_policy(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_POLICY_DELETE: { - return_value = necp_session_delete_policy(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_POLICY_APPLY_ALL: { - return_value = necp_session_apply_all(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_POLICY_LIST_ALL: { - return_value = necp_session_list_all(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_POLICY_DELETE_ALL: { - return_value = necp_session_delete_all(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_SET_SESSION_PRIORITY: { - return_value = necp_session_set_session_priority(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC: { - return_value = necp_session_lock_to_process(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_REGISTER_SERVICE: { - return_value = necp_session_register_service(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_UNREGISTER_SERVICE: { - return_value = necp_session_unregister_service(session, uap, retval); - break; - } - case NECP_SESSION_ACTION_POLICY_DUMP_ALL: { - return_value = necp_session_dump_all(session, uap, retval); - break; - } - default: { - NECPLOG(LOG_ERR, "necp_session_action unknown action (%u)", action); - return_value = EINVAL; - break; - } + case NECP_SESSION_ACTION_POLICY_ADD: { + return_value = necp_session_add_policy(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_POLICY_GET: { + return_value = necp_session_get_policy(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_POLICY_DELETE: { + return_value = necp_session_delete_policy(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_POLICY_APPLY_ALL: { + return_value = necp_session_apply_all(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_POLICY_LIST_ALL: { + return_value = necp_session_list_all(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_POLICY_DELETE_ALL: { + return_value = necp_session_delete_all(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_SET_SESSION_PRIORITY: { + return_value = necp_session_set_session_priority(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC: { + return_value = necp_session_lock_to_process(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_REGISTER_SERVICE: { + return_value = necp_session_register_service(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_UNREGISTER_SERVICE: { + return_value = necp_session_unregister_service(session, uap, retval); + break; + } + case NECP_SESSION_ACTION_POLICY_DUMP_ALL: { + return_value = necp_session_dump_all(session, uap, retval); + break; + } + default: { + NECPLOG(LOG_ERR, "necp_session_action unknown action (%u)", action); + return_value = EINVAL; + break; + } } done: NECP_SESSION_UNLOCK(session); file_drop(uap->necp_fd); - return (return_value); + return return_value; } // Kernel Control functions @@ -1247,14 +1248,14 @@ done: necp_kctlref = NULL; } } - return (result); + return result; } static errno_t necp_register_control(void) { - struct kern_ctl_reg kern_ctl; - errno_t result = 0; + struct kern_ctl_reg kern_ctl; + errno_t result = 0; // Create a tag to allocate memory necp_malloc_tag = OSMalloc_Tagalloc(NECP_CONTROL_NAME, OSMT_DEFAULT); @@ -1263,7 +1264,7 @@ necp_register_control(void) result = mbuf_tag_id_find(NECP_CONTROL_NAME, &necp_family); if (result != 0) { NECPLOG(LOG_ERR, "mbuf_tag_id_find_internal failed: %d", result); - return (result); + return result; } bzero(&kern_ctl, sizeof(kern_ctl)); @@ -1282,10 +1283,10 @@ necp_register_control(void) result = ctl_register(&kern_ctl, &necp_kctlref); if (result != 0) { NECPLOG(LOG_ERR, "ctl_register failed: %d", result); - return (result); + return result; } - return (0); + return 0; } static void @@ -1294,12 +1295,12 @@ necp_post_change_event(struct kev_necp_policies_changed_data *necp_event_data) struct kev_msg ev_msg; memset(&ev_msg, 0, sizeof(ev_msg)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_NECP_SUBCLASS; - ev_msg.event_code = KEV_NECP_POLICIES_CHANGED; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_NECP_SUBCLASS; + ev_msg.event_code = KEV_NECP_POLICIES_CHANGED; - ev_msg.dv[0].data_ptr = necp_event_data; + ev_msg.dv[0].data_ptr = necp_event_data; ev_msg.dv[0].data_length = sizeof(necp_event_data->changed_count); ev_msg.dv[1].data_length = 0; @@ -1313,10 +1314,10 @@ necp_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unitinfo *unitinfo = necp_create_session(); if (*unitinfo == NULL) { // Could not allocate session - return (ENOBUFS); + return ENOBUFS; } - return (0); + return 0; } static errno_t @@ -1330,7 +1331,7 @@ necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo) necp_delete_session((struct necp_session *)unitinfo); } - return (0); + return 0; } @@ -1338,10 +1339,10 @@ necp_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo) static int necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int next) { - size_t cursor = offset; - int error = 0; - u_int32_t curr_length; - u_int8_t curr_type; + size_t cursor = offset; + int error = 0; + u_int32_t curr_length; + u_int8_t curr_type; *err = 0; @@ -1350,7 +1351,7 @@ necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int nex error = mbuf_copydata(packet, cursor, sizeof(curr_type), &curr_type); if (error) { *err = ENOENT; - return (-1); + return -1; } } else { next = 0; @@ -1362,35 +1363,35 @@ necp_packet_find_tlv(mbuf_t packet, int offset, u_int8_t type, int *err, int nex error = mbuf_copydata(packet, cursor, sizeof(curr_length), &curr_length); if (error) { *err = error; - return (-1); + return -1; } cursor += (sizeof(curr_length) + curr_length); } } while (curr_type != type); - return (cursor); + return cursor; } static int necp_packet_get_tlv_at_offset(mbuf_t packet, int tlv_offset, u_int32_t buff_len, void *buff, u_int32_t *value_size) { - int error = 0; - u_int32_t length; + int error = 0; + u_int32_t length; if (tlv_offset < 0) { - return (EINVAL); + return EINVAL; } error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t), sizeof(length), &length); if (error) { - return (error); + return error; } u_int32_t total_len = m_length2(packet, NULL); if (total_len < (tlv_offset + sizeof(u_int8_t) + sizeof(length) + length)) { NECPLOG(LOG_ERR, "Got a bad TLV, length (%u) + offset (%d) < total length (%u)", - length, (tlv_offset + sizeof(u_int8_t) + sizeof(length)), total_len); - return (EINVAL); + length, (tlv_offset + sizeof(u_int8_t) + sizeof(length)), total_len); + return EINVAL; } if (value_size != NULL) { @@ -1401,11 +1402,11 @@ necp_packet_get_tlv_at_offset(mbuf_t packet, int tlv_offset, u_int32_t buff_len, u_int32_t to_copy = (length < buff_len) ? length : buff_len; error = mbuf_copydata(packet, tlv_offset + sizeof(u_int8_t) + sizeof(length), to_copy, buff); if (error) { - return (error); + return error; } } - return (0); + return 0; } static u_int8_t * @@ -1414,34 +1415,34 @@ necp_buffer_write_packet_header(u_int8_t *buffer, u_int8_t packet_type, u_int8_t ((struct necp_packet_header *)(void *)buffer)->packet_type = packet_type; ((struct necp_packet_header *)(void *)buffer)->flags = flags; ((struct necp_packet_header *)(void *)buffer)->message_id = message_id; - return (buffer + sizeof(struct necp_packet_header)); + return buffer + sizeof(struct necp_packet_header); } static inline bool necp_buffer_write_tlv_validate(u_int8_t *cursor, u_int8_t type, u_int32_t length, - u_int8_t *buffer, u_int32_t buffer_length) + u_int8_t *buffer, u_int32_t buffer_length) { if (cursor < buffer || (uintptr_t)(cursor - buffer) > buffer_length) { NECPLOG0(LOG_ERR, "Cannot write TLV in buffer (invalid cursor)"); - return (false); + return false; } u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length); if (next_tlv <= buffer || // make sure the next TLV start doesn't overflow - (uintptr_t)(next_tlv - buffer) > buffer_length) { // make sure the next TLV has enough room in buffer + (uintptr_t)(next_tlv - buffer) > buffer_length) { // make sure the next TLV has enough room in buffer NECPLOG(LOG_ERR, "Cannot write TLV in buffer (TLV length %u, buffer length %u)", - length, buffer_length); - return (false); + length, buffer_length); + return false; } - return (true); + return true; } u_int8_t * necp_buffer_write_tlv_if_different(u_int8_t *cursor, u_int8_t type, - u_int32_t length, const void *value, bool *updated, - u_int8_t *buffer, u_int32_t buffer_length) + u_int32_t length, const void *value, bool *updated, + u_int8_t *buffer, u_int32_t buffer_length) { if (!necp_buffer_write_tlv_validate(cursor, type, length, buffer, buffer_length)) { - return (NULL); + return NULL; } u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length); if (*updated || *(u_int8_t *)(cursor) != type) { @@ -1458,16 +1459,16 @@ necp_buffer_write_tlv_if_different(u_int8_t *cursor, u_int8_t type, *updated = TRUE; } } - return (next_tlv); + return next_tlv; } u_int8_t * necp_buffer_write_tlv(u_int8_t *cursor, u_int8_t type, - u_int32_t length, const void *value, - u_int8_t *buffer, u_int32_t buffer_length) + u_int32_t length, const void *value, + u_int8_t *buffer, u_int32_t buffer_length) { if (!necp_buffer_write_tlv_validate(cursor, type, length, buffer, buffer_length)) { - return (NULL); + return NULL; } u_int8_t *next_tlv = (u_int8_t *)(cursor + sizeof(type) + sizeof(length) + length); *(u_int8_t *)(cursor) = type; @@ -1476,7 +1477,7 @@ necp_buffer_write_tlv(u_int8_t *cursor, u_int8_t type, memcpy((u_int8_t *)(cursor + sizeof(type) + sizeof(length)), value, length); } - return (next_tlv); + return next_tlv; } u_int8_t @@ -1485,11 +1486,11 @@ necp_buffer_get_tlv_type(u_int8_t *buffer, int tlv_offset) u_int8_t *type = NULL; if (buffer == NULL) { - return (0); + return 0; } type = (u_int8_t *)((u_int8_t *)buffer + tlv_offset); - return (type ? *type : 0); + return type ? *type : 0; } u_int32_t @@ -1498,11 +1499,11 @@ necp_buffer_get_tlv_length(u_int8_t *buffer, int tlv_offset) u_int32_t *length = NULL; if (buffer == NULL) { - return (0); + return 0; } length = (u_int32_t *)(void *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t)); - return (length ? *length : 0); + return length ? *length : 0; } u_int8_t * @@ -1511,7 +1512,7 @@ necp_buffer_get_tlv_value(u_int8_t *buffer, int tlv_offset, u_int32_t *value_siz u_int8_t *value = NULL; u_int32_t length = necp_buffer_get_tlv_length(buffer, tlv_offset); if (length == 0) { - return (value); + return value; } if (value_size) { @@ -1519,14 +1520,14 @@ necp_buffer_get_tlv_value(u_int8_t *buffer, int tlv_offset, u_int32_t *value_siz } value = (u_int8_t *)((u_int8_t *)buffer + tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t)); - return (value); + return value; } int necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int next) { if (offset < 0) { - return (-1); + return -1; } int cursor = offset; int next_cursor; @@ -1535,7 +1536,7 @@ necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_in while (TRUE) { if ((((u_int32_t)cursor) + sizeof(curr_type) + sizeof(curr_length)) > buffer_length) { - return (-1); + return -1; } if (!next) { curr_type = necp_buffer_get_tlv_type(buffer, cursor); @@ -1545,16 +1546,16 @@ necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_in } curr_length = necp_buffer_get_tlv_length(buffer, cursor); if (curr_length > buffer_length - ((u_int32_t)cursor + sizeof(curr_type) + sizeof(curr_length))) { - return (-1); + return -1; } next_cursor = (cursor + sizeof(curr_type) + sizeof(curr_length) + curr_length); if (curr_type == type) { // check if entire TLV fits inside buffer if (((u_int32_t)next_cursor) <= buffer_length) { - return (cursor); + return cursor; } else { - return (-1); + return -1; } } cursor = next_cursor; @@ -1570,12 +1571,12 @@ necp_find_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, int offs } else if (buffer != NULL) { cursor = necp_buffer_find_tlv(buffer, buffer_length, offset, type, next); } - return (cursor); + return cursor; } static int necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, - int tlv_offset, u_int32_t out_buffer_length, void *out_buffer, u_int32_t *value_size) + int tlv_offset, u_int32_t out_buffer_length, void *out_buffer, u_int32_t *value_size) { if (packet != NULL) { // Handle mbuf parsing @@ -1584,7 +1585,7 @@ necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, if (buffer == NULL) { NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset buffer is NULL"); - return (EINVAL); + return EINVAL; } // Handle buffer parsing @@ -1592,31 +1593,31 @@ necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, // Validate that buffer has enough room for any TLV if (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) > buffer_length) { NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV (%u < %u)", - buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t)); - return (EINVAL); + buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t)); + return EINVAL; } // Validate that buffer has enough room for this TLV u_int32_t tlv_length = necp_buffer_get_tlv_length(buffer, tlv_offset); if (tlv_length > buffer_length - (tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t))) { NECPLOG(LOG_ERR, "necp_get_tlv_at_offset buffer_length is too small for TLV of length %u (%u < %u)", - tlv_length, buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) + tlv_length); - return (EINVAL); + tlv_length, buffer_length, tlv_offset + sizeof(u_int8_t) + sizeof(u_int32_t) + tlv_length); + return EINVAL; } if (out_buffer != NULL && out_buffer_length > 0) { // Validate that out buffer is large enough for value if (out_buffer_length < tlv_length) { NECPLOG(LOG_ERR, "necp_get_tlv_at_offset out_buffer_length is too small for TLV value (%u < %u)", - out_buffer_length, tlv_length); - return (EINVAL); + out_buffer_length, tlv_length); + return EINVAL; } // Get value pointer u_int8_t *tlv_value = necp_buffer_get_tlv_value(buffer, tlv_offset, NULL); if (tlv_value == NULL) { NECPLOG0(LOG_ERR, "necp_get_tlv_at_offset tlv_value is NULL"); - return (ENOENT); + return ENOENT; } // Copy value @@ -1628,35 +1629,35 @@ necp_get_tlv_at_offset(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, *value_size = tlv_length; } - return (0); + return 0; } static int necp_get_tlv(mbuf_t packet, u_int8_t *buffer, u_int32_t buffer_length, - int offset, u_int8_t type, u_int32_t buff_len, void *buff, u_int32_t *value_size) + int offset, u_int8_t type, u_int32_t buff_len, void *buff, u_int32_t *value_size) { int error = 0; int tlv_offset = necp_find_tlv(packet, buffer, buffer_length, offset, type, &error, 0); if (tlv_offset < 0) { - return (error); + return error; } - return (necp_get_tlv_at_offset(packet, buffer, buffer_length, tlv_offset, buff_len, buff, value_size)); + return necp_get_tlv_at_offset(packet, buffer, buffer_length, tlv_offset, buff_len, buff, value_size); } static bool necp_send_ctl_data(struct necp_session *session, u_int8_t *buffer, size_t buffer_size) { - int error; + int error; if (necp_kctlref == NULL || session == NULL || buffer == NULL || buffer_size == 0) { - return (FALSE); + return FALSE; } error = ctl_enqueuedata(necp_kctlref, session->control_unit, buffer, buffer_size, CTL_DATA_EOR); - return (error == 0); + return error == 0; } static bool @@ -1668,7 +1669,7 @@ necp_send_success_response(struct necp_session *session, u_int8_t packet_type, u size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t); MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK); if (response == NULL) { - return (FALSE); + return FALSE; } cursor = response; cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id); @@ -1679,7 +1680,7 @@ necp_send_success_response(struct necp_session *session, u_int8_t packet_type, u } FREE(response, M_NECP); - return (success); + return success; } static bool @@ -1691,7 +1692,7 @@ necp_send_error_response(struct necp_session *session, u_int8_t packet_type, u_i size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t); MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK); if (response == NULL) { - return (FALSE); + return FALSE; } cursor = response; cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id); @@ -1702,7 +1703,7 @@ necp_send_error_response(struct necp_session *session, u_int8_t packet_type, u_i } FREE(response, M_NECP); - return (success); + return success; } static bool @@ -1714,7 +1715,7 @@ necp_send_policy_id_response(struct necp_session *session, u_int8_t packet_type, size_t response_size = sizeof(struct necp_packet_header) + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(u_int32_t); MALLOC(response, u_int8_t *, response_size, M_NECP, M_WAITOK); if (response == NULL) { - return (FALSE); + return FALSE; } cursor = response; cursor = necp_buffer_write_packet_header(cursor, packet_type, NECP_PACKET_FLAGS_RESPONSE, message_id); @@ -1725,7 +1726,7 @@ necp_send_policy_id_response(struct necp_session *session, u_int8_t packet_type, } FREE(response, M_NECP); - return (success); + return success; } static errno_t @@ -1770,60 +1771,60 @@ necp_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t packe } switch (header.packet_type) { - case NECP_PACKET_TYPE_POLICY_ADD: { - necp_handle_policy_add(session, header.message_id, packet, NULL, 0, sizeof(header), NULL); - break; - } - case NECP_PACKET_TYPE_POLICY_GET: { - necp_handle_policy_get(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_POLICY_DELETE: { - necp_handle_policy_delete(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_POLICY_APPLY_ALL: { - necp_handle_policy_apply_all(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_POLICY_LIST_ALL: { - necp_handle_policy_list_all(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_POLICY_DELETE_ALL: { - necp_handle_policy_delete_all(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_POLICY_DUMP_ALL: { - necp_handle_policy_dump_all(session, header.message_id, packet, 0, 0, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_SET_SESSION_PRIORITY: { - necp_handle_set_session_priority(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC: { - necp_handle_lock_session_to_proc(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_REGISTER_SERVICE: { - necp_handle_register_service(session, header.message_id, packet, sizeof(header)); - break; - } - case NECP_PACKET_TYPE_UNREGISTER_SERVICE: { - necp_handle_unregister_service(session, header.message_id, packet, sizeof(header)); - break; - } - default: { - NECPLOG(LOG_ERR, "Received unknown message type %d", header.packet_type); - necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_UNKNOWN_PACKET_TYPE); - break; - } + case NECP_PACKET_TYPE_POLICY_ADD: { + necp_handle_policy_add(session, header.message_id, packet, NULL, 0, sizeof(header), NULL); + break; + } + case NECP_PACKET_TYPE_POLICY_GET: { + necp_handle_policy_get(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_POLICY_DELETE: { + necp_handle_policy_delete(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_POLICY_APPLY_ALL: { + necp_handle_policy_apply_all(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_POLICY_LIST_ALL: { + necp_handle_policy_list_all(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_POLICY_DELETE_ALL: { + necp_handle_policy_delete_all(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_POLICY_DUMP_ALL: { + necp_handle_policy_dump_all(session, header.message_id, packet, 0, 0, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_SET_SESSION_PRIORITY: { + necp_handle_set_session_priority(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC: { + necp_handle_lock_session_to_proc(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_REGISTER_SERVICE: { + necp_handle_register_service(session, header.message_id, packet, sizeof(header)); + break; + } + case NECP_PACKET_TYPE_UNREGISTER_SERVICE: { + necp_handle_unregister_service(session, header.message_id, packet, sizeof(header)); + break; + } + default: { + NECPLOG(LOG_ERR, "Received unknown message type %d", header.packet_type); + necp_send_error_response(session, header.packet_type, header.message_id, NECP_ERROR_UNKNOWN_PACKET_TYPE); + break; + } } done: mbuf_freem(packet); - return (error); + return error; } static void @@ -1837,14 +1838,14 @@ static errno_t necp_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t *len) { #pragma unused(kctlref, unit, unitinfo, opt, data, len) - return (0); + return 0; } static errno_t necp_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, void *data, size_t len) { #pragma unused(kctlref, unit, unitinfo, opt, data, len) - return (0); + return 0; } // Session Management @@ -1898,7 +1899,7 @@ necp_create_session(void) } done: - return (new_session); + return new_session; } static void @@ -1933,19 +1934,19 @@ necp_delete_session(struct necp_session *session) static inline u_int8_t necp_policy_result_get_type_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0); + return (buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0; } static inline u_int32_t necp_policy_result_get_parameter_length_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length > sizeof(u_int8_t)) ? (length - sizeof(u_int8_t)) : 0); + return (buffer && length > sizeof(u_int8_t)) ? (length - sizeof(u_int8_t)) : 0; } static inline u_int8_t * necp_policy_result_get_parameter_pointer_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length > sizeof(u_int8_t)) ? (buffer + sizeof(u_int8_t)) : NULL); + return (buffer && length > sizeof(u_int8_t)) ? (buffer + sizeof(u_int8_t)) : NULL; } static bool @@ -1953,20 +1954,20 @@ necp_policy_result_requires_route_rules(u_int8_t *buffer, u_int32_t length) { u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length); if (type == NECP_POLICY_RESULT_ROUTE_RULES) { - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static inline bool necp_address_is_valid(struct sockaddr *address) { if (address->sa_family == AF_INET) { - return (address->sa_len == sizeof(struct sockaddr_in)); + return address->sa_len == sizeof(struct sockaddr_in); } else if (address->sa_family == AF_INET6) { - return (address->sa_len == sizeof(struct sockaddr_in6)); + return address->sa_len == sizeof(struct sockaddr_in6); } else { - return (FALSE); + return FALSE; } } @@ -1977,111 +1978,111 @@ necp_policy_result_is_valid(u_int8_t *buffer, u_int32_t length) u_int8_t type = necp_policy_result_get_type_from_buffer(buffer, length); u_int32_t parameter_length = necp_policy_result_get_parameter_length_from_buffer(buffer, length); switch (type) { - case NECP_POLICY_RESULT_PASS: - case NECP_POLICY_RESULT_DROP: - case NECP_POLICY_RESULT_ROUTE_RULES: - case NECP_POLICY_RESULT_SCOPED_DIRECT: { + case NECP_POLICY_RESULT_PASS: + case NECP_POLICY_RESULT_DROP: + case NECP_POLICY_RESULT_ROUTE_RULES: + case NECP_POLICY_RESULT_SCOPED_DIRECT: { + validated = TRUE; + break; + } + case NECP_POLICY_RESULT_SKIP: + case NECP_POLICY_RESULT_SOCKET_DIVERT: + case NECP_POLICY_RESULT_SOCKET_FILTER: { + if (parameter_length >= sizeof(u_int32_t)) { validated = TRUE; - break; - } - case NECP_POLICY_RESULT_SKIP: - case NECP_POLICY_RESULT_SOCKET_DIVERT: - case NECP_POLICY_RESULT_SOCKET_FILTER: { - if (parameter_length >= sizeof(u_int32_t)) { - validated = TRUE; - } - break; - } - case NECP_POLICY_RESULT_IP_TUNNEL: { - if (parameter_length > sizeof(u_int32_t)) { - validated = TRUE; - } - break; } - case NECP_POLICY_RESULT_SOCKET_SCOPED: { - if (parameter_length > 0) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_RESULT_IP_TUNNEL: { + if (parameter_length > sizeof(u_int32_t)) { + validated = TRUE; } - case NECP_POLICY_RESULT_TRIGGER: - case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: - case NECP_POLICY_RESULT_TRIGGER_SCOPED: - case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: - case NECP_POLICY_RESULT_USE_NETAGENT: - case NECP_POLICY_RESULT_NETAGENT_SCOPED:{ - if (parameter_length >= sizeof(uuid_t)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_RESULT_SOCKET_SCOPED: { + if (parameter_length > 0) { + validated = TRUE; } - default: { - validated = FALSE; - break; + break; + } + case NECP_POLICY_RESULT_TRIGGER: + case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: + case NECP_POLICY_RESULT_TRIGGER_SCOPED: + case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: + case NECP_POLICY_RESULT_USE_NETAGENT: + case NECP_POLICY_RESULT_NETAGENT_SCOPED:{ + if (parameter_length >= sizeof(uuid_t)) { + validated = TRUE; } + break; + } + default: { + validated = FALSE; + break; + } } if (necp_debug) { NECPLOG(LOG_DEBUG, "Policy result type %d, valid %d", type, validated); } - return (validated); + return validated; } static inline u_int8_t necp_policy_condition_get_type_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0); + return (buffer && length >= sizeof(u_int8_t)) ? buffer[0] : 0; } static inline u_int8_t necp_policy_condition_get_flags_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length >= (2 * sizeof(u_int8_t))) ? buffer[1] : 0); + return (buffer && length >= (2 * sizeof(u_int8_t))) ? buffer[1] : 0; } static inline u_int32_t necp_policy_condition_get_value_length_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length >= (2 * sizeof(u_int8_t))) ? (length - (2 * sizeof(u_int8_t))) : 0); + return (buffer && length >= (2 * sizeof(u_int8_t))) ? (length - (2 * sizeof(u_int8_t))) : 0; } static inline u_int8_t * necp_policy_condition_get_value_pointer_from_buffer(u_int8_t *buffer, u_int32_t length) { - return ((buffer && length > (2 * sizeof(u_int8_t))) ? (buffer + (2 * sizeof(u_int8_t))) : NULL); + return (buffer && length > (2 * sizeof(u_int8_t))) ? (buffer + (2 * sizeof(u_int8_t))) : NULL; } static inline bool necp_policy_condition_is_default(u_int8_t *buffer, u_int32_t length) { - return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_DEFAULT); + return necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_DEFAULT; } static inline bool necp_policy_condition_is_application(u_int8_t *buffer, u_int32_t length) { - return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_APPLICATION); + return necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_APPLICATION; } static inline bool necp_policy_condition_is_real_application(u_int8_t *buffer, u_int32_t length) { - return (necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_REAL_APPLICATION); + return necp_policy_condition_get_type_from_buffer(buffer, length) == NECP_POLICY_CONDITION_REAL_APPLICATION; } static inline bool necp_policy_condition_requires_application(u_int8_t *buffer, u_int32_t length) { u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length); - return (type == NECP_POLICY_CONDITION_REAL_APPLICATION); + return type == NECP_POLICY_CONDITION_REAL_APPLICATION; } static inline bool necp_policy_condition_requires_real_application(u_int8_t *buffer, u_int32_t length) { u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length); - return (type == NECP_POLICY_CONDITION_ENTITLEMENT); + return type == NECP_POLICY_CONDITION_ENTITLEMENT; } static bool @@ -2089,115 +2090,115 @@ necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t poli { bool validated = FALSE; bool result_cannot_have_ip_layer = (policy_result_type == NECP_POLICY_RESULT_SOCKET_DIVERT || - policy_result_type == NECP_POLICY_RESULT_SOCKET_FILTER || - policy_result_type == NECP_POLICY_RESULT_TRIGGER || - policy_result_type == NECP_POLICY_RESULT_TRIGGER_IF_NEEDED || - policy_result_type == NECP_POLICY_RESULT_TRIGGER_SCOPED || - policy_result_type == NECP_POLICY_RESULT_NO_TRIGGER_SCOPED || - policy_result_type == NECP_POLICY_RESULT_SOCKET_SCOPED || - policy_result_type == NECP_POLICY_RESULT_ROUTE_RULES || - policy_result_type == NECP_POLICY_RESULT_USE_NETAGENT || - policy_result_type == NECP_POLICY_RESULT_NETAGENT_SCOPED || - policy_result_type == NECP_POLICY_RESULT_SCOPED_DIRECT) ? TRUE : FALSE; + policy_result_type == NECP_POLICY_RESULT_SOCKET_FILTER || + policy_result_type == NECP_POLICY_RESULT_TRIGGER || + policy_result_type == NECP_POLICY_RESULT_TRIGGER_IF_NEEDED || + policy_result_type == NECP_POLICY_RESULT_TRIGGER_SCOPED || + policy_result_type == NECP_POLICY_RESULT_NO_TRIGGER_SCOPED || + policy_result_type == NECP_POLICY_RESULT_SOCKET_SCOPED || + policy_result_type == NECP_POLICY_RESULT_ROUTE_RULES || + policy_result_type == NECP_POLICY_RESULT_USE_NETAGENT || + policy_result_type == NECP_POLICY_RESULT_NETAGENT_SCOPED || + policy_result_type == NECP_POLICY_RESULT_SCOPED_DIRECT) ? TRUE : FALSE; u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(buffer, length); u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(buffer, length); u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length); u_int8_t flags = necp_policy_condition_get_flags_from_buffer(buffer, length); switch (type) { - case NECP_POLICY_CONDITION_APPLICATION: - case NECP_POLICY_CONDITION_REAL_APPLICATION: { - if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) && - condition_length >= sizeof(uuid_t) && - condition_value != NULL && - !uuid_is_null(condition_value)) { - validated = TRUE; - } - break; - } - case NECP_POLICY_CONDITION_DOMAIN: - case NECP_POLICY_CONDITION_ACCOUNT: - case NECP_POLICY_CONDITION_BOUND_INTERFACE: { - if (condition_length > 0) { - validated = TRUE; - } - break; + case NECP_POLICY_CONDITION_APPLICATION: + case NECP_POLICY_CONDITION_REAL_APPLICATION: { + if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) && + condition_length >= sizeof(uuid_t) && + condition_value != NULL && + !uuid_is_null(condition_value)) { + validated = TRUE; } - case NECP_POLICY_CONDITION_TRAFFIC_CLASS: { - if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_DOMAIN: + case NECP_POLICY_CONDITION_ACCOUNT: + case NECP_POLICY_CONDITION_BOUND_INTERFACE: { + if (condition_length > 0) { + validated = TRUE; } - case NECP_POLICY_CONDITION_DEFAULT: - case NECP_POLICY_CONDITION_ALL_INTERFACES: - case NECP_POLICY_CONDITION_ENTITLEMENT: { - if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_TRAFFIC_CLASS: { + if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) { + validated = TRUE; } - case NECP_POLICY_CONDITION_IP_PROTOCOL: { - if (condition_length >= sizeof(u_int16_t)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_DEFAULT: + case NECP_POLICY_CONDITION_ALL_INTERFACES: + case NECP_POLICY_CONDITION_ENTITLEMENT: { + if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE)) { + validated = TRUE; } - case NECP_POLICY_CONDITION_PID: { - if (condition_length >= sizeof(pid_t) && - condition_value != NULL && - *((pid_t *)(void *)condition_value) != 0) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_IP_PROTOCOL: { + if (condition_length >= sizeof(u_int16_t)) { + validated = TRUE; } - case NECP_POLICY_CONDITION_UID: { - if (condition_length >= sizeof(uid_t)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_PID: { + if (condition_length >= sizeof(pid_t) && + condition_value != NULL && + *((pid_t *)(void *)condition_value) != 0) { + validated = TRUE; } - case NECP_POLICY_CONDITION_LOCAL_ADDR: - case NECP_POLICY_CONDITION_REMOTE_ADDR: { - if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr) && - necp_address_is_valid(&((struct necp_policy_condition_addr *)(void *)condition_value)->address.sa)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_UID: { + if (condition_length >= sizeof(uid_t)) { + validated = TRUE; } - case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: - case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: { - if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr_range) && - necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->start_address.sa) && - necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->end_address.sa)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_LOCAL_ADDR: + case NECP_POLICY_CONDITION_REMOTE_ADDR: { + if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr) && + necp_address_is_valid(&((struct necp_policy_condition_addr *)(void *)condition_value)->address.sa)) { + validated = TRUE; } - case NECP_POLICY_CONDITION_AGENT_TYPE: { - if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) && - condition_length >= sizeof(struct necp_policy_condition_agent_type)) { - validated = TRUE; - } - break; + break; + } + case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: + case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: { + if (!result_cannot_have_ip_layer && condition_length >= sizeof(struct necp_policy_condition_addr_range) && + necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->start_address.sa) && + necp_address_is_valid(&((struct necp_policy_condition_addr_range *)(void *)condition_value)->end_address.sa)) { + validated = TRUE; } - default: { - validated = FALSE; - break; + break; + } + case NECP_POLICY_CONDITION_AGENT_TYPE: { + if (!(flags & NECP_POLICY_CONDITION_FLAGS_NEGATIVE) && + condition_length >= sizeof(struct necp_policy_condition_agent_type)) { + validated = TRUE; } + break; + } + default: { + validated = FALSE; + break; + } } if (necp_debug) { NECPLOG(LOG_DEBUG, "Policy condition type %d, valid %d", type, validated); } - return (validated); + return validated; } static bool necp_policy_route_rule_is_default(u_int8_t *buffer, u_int32_t length) { - return (necp_policy_condition_get_value_length_from_buffer(buffer, length) == 0 && - necp_policy_condition_get_flags_from_buffer(buffer, length) == 0); + return necp_policy_condition_get_value_length_from_buffer(buffer, length) == 0 && + necp_policy_condition_get_flags_from_buffer(buffer, length) == 0; } static bool @@ -2206,56 +2207,56 @@ necp_policy_route_rule_is_valid(u_int8_t *buffer, u_int32_t length) bool validated = FALSE; u_int8_t type = necp_policy_condition_get_type_from_buffer(buffer, length); switch (type) { - case NECP_ROUTE_RULE_ALLOW_INTERFACE: { - validated = TRUE; - break; - } - case NECP_ROUTE_RULE_DENY_INTERFACE: { - validated = TRUE; - break; - } - case NECP_ROUTE_RULE_QOS_MARKING: { - validated = TRUE; - break; - } - case NECP_ROUTE_RULE_DENY_LQM_ABORT: { - validated = TRUE; - break; - } - default: { - validated = FALSE; - break; - } + case NECP_ROUTE_RULE_ALLOW_INTERFACE: { + validated = TRUE; + break; + } + case NECP_ROUTE_RULE_DENY_INTERFACE: { + validated = TRUE; + break; + } + case NECP_ROUTE_RULE_QOS_MARKING: { + validated = TRUE; + break; + } + case NECP_ROUTE_RULE_DENY_LQM_ABORT: { + validated = TRUE; + break; + } + default: { + validated = FALSE; + break; + } } if (necp_debug) { NECPLOG(LOG_DEBUG, "Policy route rule type %d, valid %d", type, validated); } - return (validated); + return validated; } static int necp_get_posix_error_for_necp_error(int response_error) { switch (response_error) { - case NECP_ERROR_UNKNOWN_PACKET_TYPE: - case NECP_ERROR_INVALID_TLV: - case NECP_ERROR_POLICY_RESULT_INVALID: - case NECP_ERROR_POLICY_CONDITIONS_INVALID: - case NECP_ERROR_ROUTE_RULES_INVALID: { - return (EINVAL); - } - case NECP_ERROR_POLICY_ID_NOT_FOUND: { - return (ENOENT); - } - case NECP_ERROR_INVALID_PROCESS: { - return (EPERM); - } - case NECP_ERROR_INTERNAL: - default: { - return (ENOMEM); - } + case NECP_ERROR_UNKNOWN_PACKET_TYPE: + case NECP_ERROR_INVALID_TLV: + case NECP_ERROR_POLICY_RESULT_INVALID: + case NECP_ERROR_POLICY_CONDITIONS_INVALID: + case NECP_ERROR_ROUTE_RULES_INVALID: { + return EINVAL; + } + case NECP_ERROR_POLICY_ID_NOT_FOUND: { + return ENOENT; + } + case NECP_ERROR_INVALID_PROCESS: { + return EPERM; + } + case NECP_ERROR_INTERNAL: + default: { + return ENOMEM; + } } } @@ -2284,7 +2285,7 @@ necp_handle_set_session_priority(struct necp_session *session, u_int32_t message // Enforce special session priorities with entitlements if (requested_session_priority == NECP_SESSION_PRIORITY_CONTROL || - requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) { + requested_session_priority == NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL) { errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0); if (cred_result != 0) { NECPLOG(LOG_ERR, "Session does not hold necessary entitlement to claim priority level %d", requested_session_priority); @@ -2417,7 +2418,7 @@ fail: static necp_policy_id necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_t packet, - u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *return_error) + u_int8_t *tlv_buffer, size_t tlv_buffer_length, int offset, int *return_error) { bool has_default_condition = FALSE; bool has_non_default_condition = FALSE; @@ -2485,8 +2486,8 @@ necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_ if (necp_policy_result_requires_route_rules(policy_result, policy_result_size)) { // Read route rules conditions for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0); - cursor >= 0; - cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) { + cursor >= 0; + cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) { u_int32_t route_rule_size = 0; necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size); if (route_rule_size > 0) { @@ -2513,8 +2514,8 @@ necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_ route_rules_array_cursor = 0; for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_ROUTE_RULE, &error, 0); - cursor >= 0; - cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) { + cursor >= 0; + cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_ROUTE_RULE, &error, 1)) { u_int8_t route_rule_type = NECP_TLV_ROUTE_RULE; u_int32_t route_rule_size = 0; necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &route_rule_size); @@ -2552,8 +2553,8 @@ necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_ // Read policy conditions for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0); - cursor >= 0; - cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) { + cursor >= 0; + cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) { u_int32_t condition_size = 0; necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size); @@ -2581,8 +2582,8 @@ necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_ conditions_array_cursor = 0; for (cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, offset, NECP_TLV_POLICY_CONDITION, &error, 0); - cursor >= 0; - cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) { + cursor >= 0; + cursor = necp_find_tlv(packet, tlv_buffer, tlv_buffer_length, cursor, NECP_TLV_POLICY_CONDITION, &error, 1)) { u_int8_t condition_type = NECP_TLV_POLICY_CONDITION; u_int32_t condition_size = 0; necp_get_tlv_at_offset(packet, tlv_buffer, tlv_buffer_length, cursor, 0, NULL, &condition_size); @@ -2654,7 +2655,7 @@ necp_handle_policy_add(struct necp_session *session, u_int32_t message_id, mbuf_ if (packet != NULL) { necp_send_policy_id_response(session, NECP_PACKET_TYPE_POLICY_ADD, message_id, policy->local_id); } - return (policy->local_id); + return policy->local_id; fail: if (policy_result != NULL) { @@ -2673,7 +2674,7 @@ fail: if (return_error != NULL) { *return_error = necp_get_posix_error_for_necp_error(response_error); } - return (0); + return 0; } static void @@ -2841,10 +2842,10 @@ necp_policy_get_new_id(struct necp_session *session) if (newid == 0) { NECPLOG0(LOG_ERR, "Allocate policy id failed.\n"); - return (0); + return 0; } - return (newid); + return newid; } /* @@ -2945,7 +2946,7 @@ necp_policy_get_new_id(struct necp_session *session) */ static int necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, mbuf_t packet, - user_addr_t out_buffer, size_t out_buffer_length, int offset) + user_addr_t out_buffer, size_t out_buffer_length, int offset) { #pragma unused(offset) struct necp_kernel_socket_policy *policy = NULL; @@ -2963,12 +2964,12 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, bool error_occured = false; u_int32_t response_error = NECP_ERROR_INTERNAL; -#define REPORT_ERROR(error) error_occured = true; \ - response_error = error; \ - goto done +#define REPORT_ERROR(error) error_occured = true; \ + response_error = error; \ + goto done -#define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \ - REPORT_ERROR(error) +#define UNLOCK_AND_REPORT_ERROR(lock, error) lck_rw_done(lock); \ + REPORT_ERROR(error) errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0); if (cred_result != 0) { @@ -2985,7 +2986,7 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, policy_count = necp_kernel_application_policies_count; - MALLOC(tlv_buffer_pointers, u_int8_t **, sizeof(u_int8_t *) * policy_count, M_NECP, M_NOWAIT | M_ZERO); + MALLOC(tlv_buffer_pointers, u_int8_t * *, sizeof(u_int8_t *) * policy_count, M_NECP, M_NOWAIT | M_ZERO); if (tlv_buffer_pointers == NULL) { NECPLOG(LOG_DEBUG, "Failed to allocate tlv_buffer_pointers (%u bytes)", sizeof(u_int8_t *) * policy_count); UNLOCK_AND_REPORT_ERROR(&necp_kernel_policy_lock, NECP_ERROR_INTERNAL); @@ -3013,12 +3014,12 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, NECPLOG(LOG_DEBUG, "Policy: process: %s, result: %s", proc_name_string, result_string); } - u_int32_t total_allocated_bytes = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->id) + // NECP_TLV_POLICY_ID - sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->order) + // NECP_TLV_POLICY_ORDER - sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->session_order) + // NECP_TLV_POLICY_SESSION_ORDER - sizeof(u_int8_t) + sizeof(u_int32_t) + result_string_len + // NECP_TLV_POLICY_RESULT_STRING - sizeof(u_int8_t) + sizeof(u_int32_t) + proc_name_len + // NECP_TLV_POLICY_OWNER - sizeof(u_int8_t) + sizeof(u_int32_t); // NECP_TLV_POLICY_CONDITION + u_int32_t total_allocated_bytes = sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->id) + // NECP_TLV_POLICY_ID + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->order) + // NECP_TLV_POLICY_ORDER + sizeof(u_int8_t) + sizeof(u_int32_t) + sizeof(policy->session_order) + // NECP_TLV_POLICY_SESSION_ORDER + sizeof(u_int8_t) + sizeof(u_int32_t) + result_string_len + // NECP_TLV_POLICY_RESULT_STRING + sizeof(u_int8_t) + sizeof(u_int32_t) + proc_name_len + // NECP_TLV_POLICY_OWNER + sizeof(u_int8_t) + sizeof(u_int32_t); // NECP_TLV_POLICY_CONDITION // We now traverse the condition_mask to see how much space we need to allocate u_int32_t condition_mask = policy->condition_mask; @@ -3149,55 +3150,55 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, } if (condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_BOUND_INTERFACE, strlen(if_name) + 1, - if_name, cond_buf, condition_tlv_length); + if_name, cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_PROTOCOL) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_IP_PROTOCOL, sizeof(policy->cond_protocol), &policy->cond_protocol, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_APP_ID) { struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_app_id); if (entry != NULL) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_APPLICATION, sizeof(entry->uuid), entry->uuid, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } } if (condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) { struct necp_uuid_id_mapping *entry = necp_uuid_lookup_uuid_with_app_id_locked(policy->cond_real_app_id); if (entry != NULL) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REAL_APPLICATION, sizeof(entry->uuid), entry->uuid, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } } if (condition_mask & NECP_KERNEL_CONDITION_DOMAIN) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_DOMAIN, strlen(policy->cond_domain) + 1, policy->cond_domain, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) { if (account_id_entry != NULL) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ACCOUNT, strlen(account_id_entry->string) + 1, account_id_entry->string, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } } if (condition_mask & NECP_KERNEL_CONDITION_PID) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PID, sizeof(policy->cond_pid), &policy->cond_pid, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_UID) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_UID, sizeof(policy->cond_uid), &policy->cond_uid, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_TRAFFIC_CLASS, sizeof(policy->cond_traffic_class), &policy->cond_traffic_class, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, 0, "", - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_ENTITLEMENT, strlen(policy->cond_custom_entitlement) + 1, policy->cond_custom_entitlement, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_START) { if (condition_mask & NECP_KERNEL_CONDITION_LOCAL_END) { @@ -3205,13 +3206,13 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, memcpy(&range.start_address, &policy->cond_local_start, sizeof(policy->cond_local_start)); memcpy(&range.end_address, &policy->cond_local_end, sizeof(policy->cond_local_end)); cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE, sizeof(range), &range, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } else { struct necp_policy_condition_addr addr; addr.prefix = policy->cond_local_prefix; memcpy(&addr.address, &policy->cond_local_start, sizeof(policy->cond_local_start)); cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_LOCAL_ADDR, sizeof(addr), &addr, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } } if (condition_mask & NECP_KERNEL_CONDITION_REMOTE_START) { @@ -3220,19 +3221,19 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, memcpy(&range.start_address, &policy->cond_remote_start, sizeof(policy->cond_remote_start)); memcpy(&range.end_address, &policy->cond_remote_end, sizeof(policy->cond_remote_end)); cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE, sizeof(range), &range, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } else { struct necp_policy_condition_addr addr; addr.prefix = policy->cond_remote_prefix; memcpy(&addr.address, &policy->cond_remote_start, sizeof(policy->cond_remote_start)); cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_REMOTE_ADDR, sizeof(addr), &addr, - cond_buf, condition_tlv_length); + cond_buf, condition_tlv_length); } } if (condition_mask & NECP_KERNEL_CONDITION_AGENT_TYPE) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_AGENT_TYPE, - sizeof(policy->cond_agent_type), &policy->cond_agent_type, - cond_buf, condition_tlv_length); + sizeof(policy->cond_agent_type), &policy->cond_agent_type, + cond_buf, condition_tlv_length); } } @@ -3253,7 +3254,7 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, // Send packet if (packet != NULL) { - u_int32_t total_result_length = sizeof(struct necp_packet_header) + total_tlv_len; + u_int32_t total_result_length = sizeof(struct necp_packet_header) + total_tlv_len; // Allow malloc to wait, since the total buffer may be large and we are not holding any locks MALLOC(result_buf, u_int8_t *, total_result_length, M_NECP, M_WAITOK | M_ZERO); @@ -3300,7 +3301,7 @@ necp_handle_policy_dump_all(struct necp_session *session, u_int32_t message_id, for (int i = 0; i < policy_count; i++) { if (tlv_buffer_pointers[i] != NULL) { result_buf_cursor = necp_buffer_write_tlv(result_buf_cursor, NECP_TLV_POLICY_DUMP, tlv_buffer_lengths[i], tlv_buffer_pointers[i], - result_buf, total_tlv_len + sizeof(u_int32_t)); + result_buf, total_tlv_len + sizeof(u_int32_t)); } } @@ -3315,7 +3316,7 @@ done: if (error_occured) { if (packet != NULL) { - if(!necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DUMP_ALL, message_id, response_error)) { + if (!necp_send_error_response(session, NECP_PACKET_TYPE_POLICY_DUMP_ALL, message_id, response_error)) { NECPLOG0(LOG_ERR, "Failed to send error response"); } else { NECPLOG0(LOG_ERR, "Sent error response"); @@ -3346,7 +3347,7 @@ done: #undef REPORT_ERROR #undef UNLOCK_AND_REPORT_ERROR - return (error_code); + return error_code; } static struct necp_session_policy * @@ -3385,7 +3386,7 @@ necp_policy_create(struct necp_session *session, necp_policy_order order, u_int8 NECPLOG(LOG_DEBUG, "Created NECP policy, order %d", order); } done: - return (new_policy); + return new_policy; } static struct necp_session_policy * @@ -3393,28 +3394,28 @@ necp_policy_find(struct necp_session *session, necp_policy_id policy_id) { struct necp_session_policy *policy = NULL; if (policy_id == 0) { - return (NULL); + return NULL; } LIST_FOREACH(policy, &session->policies, chain) { if (policy->local_id == policy_id) { - return (policy); + return policy; } } - return (NULL); + return NULL; } static inline u_int8_t necp_policy_get_result_type(struct necp_session_policy *policy) { - return (policy ? necp_policy_result_get_type_from_buffer(policy->result, policy->result_size) : 0); + return policy ? necp_policy_result_get_type_from_buffer(policy->result, policy->result_size) : 0; } static inline u_int32_t necp_policy_get_result_parameter_length(struct necp_session_policy *policy) { - return (policy ? necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size) : 0); + return policy ? necp_policy_result_get_parameter_length_from_buffer(policy->result, policy->result_size) : 0; } static bool @@ -3426,19 +3427,19 @@ necp_policy_get_result_parameter(struct necp_session_policy *policy, u_int8_t *p u_int8_t *parameter = necp_policy_result_get_parameter_pointer_from_buffer(policy->result, policy->result_size); if (parameter && parameter_buffer) { memcpy(parameter_buffer, parameter, parameter_length); - return (TRUE); + return TRUE; } } } - return (FALSE); + return FALSE; } static bool necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_policy *policy) { if (session == NULL || policy == NULL) { - return (FALSE); + return FALSE; } policy->pending_deletion = TRUE; @@ -3447,7 +3448,7 @@ necp_policy_mark_for_deletion(struct necp_session *session, struct necp_session_ if (necp_debug) { NECPLOG0(LOG_DEBUG, "Marked NECP policy for removal"); } - return (TRUE); + return TRUE; } static bool @@ -3460,14 +3461,14 @@ necp_policy_mark_all_for_deletion(struct necp_session *session) necp_policy_mark_for_deletion(session, policy); } - return (TRUE); + return TRUE; } static bool necp_policy_delete(struct necp_session *session, struct necp_session_policy *policy) { if (session == NULL || policy == NULL) { - return (FALSE); + return FALSE; } LIST_REMOVE(policy, chain); @@ -3492,7 +3493,7 @@ necp_policy_delete(struct necp_session *session, struct necp_session_policy *pol if (necp_debug) { NECPLOG0(LOG_DEBUG, "Removed NECP policy"); } - return (TRUE); + return TRUE; } static bool @@ -3500,7 +3501,7 @@ necp_policy_unapply(struct necp_session_policy *policy) { int i = 0; if (policy == NULL) { - return (FALSE); + return FALSE; } LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE); @@ -3554,13 +3555,13 @@ necp_policy_unapply(struct necp_session_policy *policy) policy->applied = FALSE; - return (TRUE); + return TRUE; } -#define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0 -#define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1 -#define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2 -#define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3 +#define NECP_KERNEL_POLICY_SUBORDER_ID_TUNNEL_CONDITION 0 +#define NECP_KERNEL_POLICY_SUBORDER_NON_ID_TUNNEL_CONDITION 1 +#define NECP_KERNEL_POLICY_SUBORDER_ID_CONDITION 2 +#define NECP_KERNEL_POLICY_SUBORDER_NON_ID_CONDITIONS 3 struct necp_policy_result_ip_tunnel { u_int32_t secondary_result; char interface_name[IFXNAMSIZ]; @@ -3616,7 +3617,7 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli memset(&ultimate_result_parameter, 0, sizeof(ultimate_result_parameter)); if (policy == NULL) { - return (FALSE); + return FALSE; } LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE); @@ -3632,246 +3633,246 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli u_int32_t condition_length = necp_policy_condition_get_value_length_from_buffer(value, length); u_int8_t *condition_value = necp_policy_condition_get_value_pointer_from_buffer(value, length); switch (condition_type) { - case NECP_POLICY_CONDITION_DEFAULT: { - socket_ip_conditions = TRUE; - break; - } - case NECP_POLICY_CONDITION_ALL_INTERFACES: { - master_condition_mask |= NECP_KERNEL_CONDITION_ALL_INTERFACES; - socket_ip_conditions = TRUE; - break; - } - case NECP_POLICY_CONDITION_ENTITLEMENT: { - if (condition_length > 0) { - if (cond_custom_entitlement == NULL) { - cond_custom_entitlement = necp_copy_string((char *)condition_value, condition_length); - if (cond_custom_entitlement != NULL) { - master_condition_mask |= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT; - socket_only_conditions = TRUE; - } - } - } else { - master_condition_mask |= NECP_KERNEL_CONDITION_ENTITLEMENT; - socket_only_conditions = TRUE; - } - break; - } - case NECP_POLICY_CONDITION_DOMAIN: { - // Make sure there is only one such rule - if (condition_length > 0 && cond_domain == NULL) { - cond_domain = necp_create_trimmed_domain((char *)condition_value, condition_length); - if (cond_domain != NULL) { - master_condition_mask |= NECP_KERNEL_CONDITION_DOMAIN; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_DOMAIN; - } + case NECP_POLICY_CONDITION_DEFAULT: { + socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_ALL_INTERFACES: { + master_condition_mask |= NECP_KERNEL_CONDITION_ALL_INTERFACES; + socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_ENTITLEMENT: { + if (condition_length > 0) { + if (cond_custom_entitlement == NULL) { + cond_custom_entitlement = necp_copy_string((char *)condition_value, condition_length); + if (cond_custom_entitlement != NULL) { + master_condition_mask |= NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT; socket_only_conditions = TRUE; } } - break; + } else { + master_condition_mask |= NECP_KERNEL_CONDITION_ENTITLEMENT; + socket_only_conditions = TRUE; } - case NECP_POLICY_CONDITION_ACCOUNT: { - // Make sure there is only one such rule - if (condition_length > 0 && cond_account_id == 0 && policy->applied_account == NULL) { - char *string = NULL; - MALLOC(string, char *, condition_length + 1, M_NECP, M_WAITOK); - if (string != NULL) { - memcpy(string, condition_value, condition_length); - string[condition_length] = 0; - cond_account_id = necp_create_string_to_id_mapping(&necp_account_id_list, string); - if (cond_account_id != 0) { - policy->applied_account = string; // Save the string in parent policy - master_condition_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID; - } - socket_only_conditions = TRUE; - } else { - FREE(string, M_NECP); - } + break; + } + case NECP_POLICY_CONDITION_DOMAIN: { + // Make sure there is only one such rule + if (condition_length > 0 && cond_domain == NULL) { + cond_domain = necp_create_trimmed_domain((char *)condition_value, condition_length); + if (cond_domain != NULL) { + master_condition_mask |= NECP_KERNEL_CONDITION_DOMAIN; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_DOMAIN; } + socket_only_conditions = TRUE; } - break; } - case NECP_POLICY_CONDITION_APPLICATION: { - // Make sure there is only one such rule, because we save the uuid in the policy - if (condition_length >= sizeof(uuid_t) && cond_app_id == 0) { - bool allocated_mapping = FALSE; - uuid_t application_uuid; - memcpy(application_uuid, condition_value, sizeof(uuid_t)); - cond_app_id = necp_create_uuid_app_id_mapping(application_uuid, &allocated_mapping, TRUE); - if (cond_app_id != 0) { - if (allocated_mapping) { - necp_uuid_app_id_mappings_dirty = TRUE; - necp_num_uuid_app_id_mappings++; - } - uuid_copy(policy->applied_app_uuid, application_uuid); - master_condition_mask |= NECP_KERNEL_CONDITION_APP_ID; + break; + } + case NECP_POLICY_CONDITION_ACCOUNT: { + // Make sure there is only one such rule + if (condition_length > 0 && cond_account_id == 0 && policy->applied_account == NULL) { + char *string = NULL; + MALLOC(string, char *, condition_length + 1, M_NECP, M_WAITOK); + if (string != NULL) { + memcpy(string, condition_value, condition_length); + string[condition_length] = 0; + cond_account_id = necp_create_string_to_id_mapping(&necp_account_id_list, string); + if (cond_account_id != 0) { + policy->applied_account = string; // Save the string in parent policy + master_condition_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID; if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_APP_ID; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_ACCOUNT_ID; } socket_only_conditions = TRUE; + } else { + FREE(string, M_NECP); } } - break; } - case NECP_POLICY_CONDITION_REAL_APPLICATION: { - // Make sure there is only one such rule, because we save the uuid in the policy - if (condition_length >= sizeof(uuid_t) && cond_real_app_id == 0) { - uuid_t real_application_uuid; - memcpy(real_application_uuid, condition_value, sizeof(uuid_t)); - cond_real_app_id = necp_create_uuid_app_id_mapping(real_application_uuid, NULL, FALSE); - if (cond_real_app_id != 0) { - uuid_copy(policy->applied_real_app_uuid, real_application_uuid); - master_condition_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID; - } - socket_only_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_APPLICATION: { + // Make sure there is only one such rule, because we save the uuid in the policy + if (condition_length >= sizeof(uuid_t) && cond_app_id == 0) { + bool allocated_mapping = FALSE; + uuid_t application_uuid; + memcpy(application_uuid, condition_value, sizeof(uuid_t)); + cond_app_id = necp_create_uuid_app_id_mapping(application_uuid, &allocated_mapping, TRUE); + if (cond_app_id != 0) { + if (allocated_mapping) { + necp_uuid_app_id_mappings_dirty = TRUE; + necp_num_uuid_app_id_mappings++; } - } - break; - } - case NECP_POLICY_CONDITION_PID: { - if (condition_length >= sizeof(pid_t)) { - master_condition_mask |= NECP_KERNEL_CONDITION_PID; + uuid_copy(policy->applied_app_uuid, application_uuid); + master_condition_mask |= NECP_KERNEL_CONDITION_APP_ID; if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_PID; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_APP_ID; } - memcpy(&cond_pid, condition_value, sizeof(cond_pid)); socket_only_conditions = TRUE; } - break; } - case NECP_POLICY_CONDITION_UID: { - if (condition_length >= sizeof(uid_t)) { - master_condition_mask |= NECP_KERNEL_CONDITION_UID; + break; + } + case NECP_POLICY_CONDITION_REAL_APPLICATION: { + // Make sure there is only one such rule, because we save the uuid in the policy + if (condition_length >= sizeof(uuid_t) && cond_real_app_id == 0) { + uuid_t real_application_uuid; + memcpy(real_application_uuid, condition_value, sizeof(uuid_t)); + cond_real_app_id = necp_create_uuid_app_id_mapping(real_application_uuid, NULL, FALSE); + if (cond_real_app_id != 0) { + uuid_copy(policy->applied_real_app_uuid, real_application_uuid); + master_condition_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID; if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_UID; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_REAL_APP_ID; } - memcpy(&cond_uid, condition_value, sizeof(cond_uid)); socket_only_conditions = TRUE; } - break; } - case NECP_POLICY_CONDITION_TRAFFIC_CLASS: { - if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) { - master_condition_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS; - } - memcpy(&cond_traffic_class, condition_value, sizeof(cond_traffic_class)); - socket_only_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_PID: { + if (condition_length >= sizeof(pid_t)) { + master_condition_mask |= NECP_KERNEL_CONDITION_PID; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_PID; } - break; + memcpy(&cond_pid, condition_value, sizeof(cond_pid)); + socket_only_conditions = TRUE; } - case NECP_POLICY_CONDITION_BOUND_INTERFACE: { - if (condition_length <= IFXNAMSIZ && condition_length > 0) { - char interface_name[IFXNAMSIZ]; - memcpy(interface_name, condition_value, condition_length); - interface_name[condition_length - 1] = 0; // Make sure the string is NULL terminated - if (ifnet_find_by_name(interface_name, &cond_bound_interface) == 0) { - master_condition_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE; - } - } - socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_UID: { + if (condition_length >= sizeof(uid_t)) { + master_condition_mask |= NECP_KERNEL_CONDITION_UID; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_UID; } - break; + memcpy(&cond_uid, condition_value, sizeof(cond_uid)); + socket_only_conditions = TRUE; + } + break; + } + case NECP_POLICY_CONDITION_TRAFFIC_CLASS: { + if (condition_length >= sizeof(struct necp_policy_condition_tc_range)) { + master_condition_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_TRAFFIC_CLASS; + } + memcpy(&cond_traffic_class, condition_value, sizeof(cond_traffic_class)); + socket_only_conditions = TRUE; } - case NECP_POLICY_CONDITION_IP_PROTOCOL: { - if (condition_length >= sizeof(u_int16_t)) { - master_condition_mask |= NECP_KERNEL_CONDITION_PROTOCOL; + break; + } + case NECP_POLICY_CONDITION_BOUND_INTERFACE: { + if (condition_length <= IFXNAMSIZ && condition_length > 0) { + char interface_name[IFXNAMSIZ]; + memcpy(interface_name, condition_value, condition_length); + interface_name[condition_length - 1] = 0; // Make sure the string is NULL terminated + if (ifnet_find_by_name(interface_name, &cond_bound_interface) == 0) { + master_condition_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE; if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_PROTOCOL; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_BOUND_INTERFACE; } - memcpy(&cond_protocol, condition_value, sizeof(cond_protocol)); - socket_ip_conditions = TRUE; } - break; + socket_ip_conditions = TRUE; } - case NECP_POLICY_CONDITION_LOCAL_ADDR: { - struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value; - if (!necp_address_is_valid(&address_struct->address.sa)) { - break; - } - - cond_local_prefix = address_struct->prefix; - memcpy(&cond_local_start, &address_struct->address, sizeof(address_struct->address)); - master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START; - master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX; + break; + } + case NECP_POLICY_CONDITION_IP_PROTOCOL: { + if (condition_length >= sizeof(u_int16_t)) { + master_condition_mask |= NECP_KERNEL_CONDITION_PROTOCOL; if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START; - master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_PROTOCOL; } + memcpy(&cond_protocol, condition_value, sizeof(cond_protocol)); socket_ip_conditions = TRUE; + } + break; + } + case NECP_POLICY_CONDITION_LOCAL_ADDR: { + struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value; + if (!necp_address_is_valid(&address_struct->address.sa)) { break; } - case NECP_POLICY_CONDITION_REMOTE_ADDR: { - struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value; - if (!necp_address_is_valid(&address_struct->address.sa)) { - break; - } - cond_remote_prefix = address_struct->prefix; - memcpy(&cond_remote_start, &address_struct->address, sizeof(address_struct->address)); - master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START; - master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START; - master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX; - } - socket_ip_conditions = TRUE; + cond_local_prefix = address_struct->prefix; + memcpy(&cond_local_start, &address_struct->address, sizeof(address_struct->address)); + master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START; + master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_PREFIX; + } + socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_REMOTE_ADDR: { + struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)condition_value; + if (!necp_address_is_valid(&address_struct->address.sa)) { break; } - case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: { - struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value; - if (!necp_address_is_valid(&address_struct->start_address.sa) || - !necp_address_is_valid(&address_struct->end_address.sa)) { - break; - } - memcpy(&cond_local_start, &address_struct->start_address, sizeof(address_struct->start_address)); - memcpy(&cond_local_end, &address_struct->end_address, sizeof(address_struct->end_address)); - master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START; - master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_END; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START; - master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_END; - } - socket_ip_conditions = TRUE; + cond_remote_prefix = address_struct->prefix; + memcpy(&cond_remote_start, &address_struct->address, sizeof(address_struct->address)); + master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START; + master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_PREFIX; + } + socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE: { + struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value; + if (!necp_address_is_valid(&address_struct->start_address.sa) || + !necp_address_is_valid(&address_struct->end_address.sa)) { break; } - case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: { - struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value; - if (!necp_address_is_valid(&address_struct->start_address.sa) || - !necp_address_is_valid(&address_struct->end_address.sa)) { - break; - } - memcpy(&cond_remote_start, &address_struct->start_address, sizeof(address_struct->start_address)); - memcpy(&cond_remote_end, &address_struct->end_address, sizeof(address_struct->end_address)); - master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START; - master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_END; - if (condition_is_negative) { - master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START; - master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_END; - } - socket_ip_conditions = TRUE; - break; + memcpy(&cond_local_start, &address_struct->start_address, sizeof(address_struct->start_address)); + memcpy(&cond_local_end, &address_struct->end_address, sizeof(address_struct->end_address)); + master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_START; + master_condition_mask |= NECP_KERNEL_CONDITION_LOCAL_END; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_START; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_LOCAL_END; } - case NECP_POLICY_CONDITION_AGENT_TYPE: { - if (condition_length >= sizeof(cond_agent_type)) { - master_condition_mask |= NECP_KERNEL_CONDITION_AGENT_TYPE; - memcpy(&cond_agent_type, condition_value, sizeof(cond_agent_type)); - socket_only_conditions = TRUE; - } + socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE: { + struct necp_policy_condition_addr_range *address_struct = (struct necp_policy_condition_addr_range *)(void *)condition_value; + if (!necp_address_is_valid(&address_struct->start_address.sa) || + !necp_address_is_valid(&address_struct->end_address.sa)) { break; } - default: { - break; + + memcpy(&cond_remote_start, &address_struct->start_address, sizeof(address_struct->start_address)); + memcpy(&cond_remote_end, &address_struct->end_address, sizeof(address_struct->end_address)); + master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_START; + master_condition_mask |= NECP_KERNEL_CONDITION_REMOTE_END; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_START; + master_condition_negated_mask |= NECP_KERNEL_CONDITION_REMOTE_END; + } + socket_ip_conditions = TRUE; + break; + } + case NECP_POLICY_CONDITION_AGENT_TYPE: { + if (condition_length >= sizeof(cond_agent_type)) { + master_condition_mask |= NECP_KERNEL_CONDITION_AGENT_TYPE; + memcpy(&cond_agent_type, condition_value, sizeof(cond_agent_type)); + socket_only_conditions = TRUE; } + break; + } + default: { + break; + } } offset += sizeof(u_int8_t) + sizeof(u_int32_t) + length; @@ -3880,156 +3881,156 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli // Process result ultimate_result = necp_policy_get_result_type(policy); switch (ultimate_result) { - case NECP_POLICY_RESULT_PASS: { - if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE - socket_layer_non_id_conditions = TRUE; - ip_output_layer_id_condition = TRUE; - } else if (socket_ip_conditions) { - socket_layer_non_id_conditions = TRUE; - ip_output_layer_id_condition = TRUE; - ip_output_layer_non_id_conditions = TRUE; - } - break; + case NECP_POLICY_RESULT_PASS: { + if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE + socket_layer_non_id_conditions = TRUE; + ip_output_layer_id_condition = TRUE; + } else if (socket_ip_conditions) { + socket_layer_non_id_conditions = TRUE; + ip_output_layer_id_condition = TRUE; + ip_output_layer_non_id_conditions = TRUE; } - case NECP_POLICY_RESULT_DROP: { - if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE - socket_layer_non_id_conditions = TRUE; - } else if (socket_ip_conditions) { - socket_layer_non_id_conditions = TRUE; - ip_output_layer_non_id_conditions = TRUE; - ip_output_layer_non_id_only = TRUE; // Only apply drop to packets that didn't go through socket layer - } - break; + break; + } + case NECP_POLICY_RESULT_DROP: { + if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE + socket_layer_non_id_conditions = TRUE; + } else if (socket_ip_conditions) { + socket_layer_non_id_conditions = TRUE; + ip_output_layer_non_id_conditions = TRUE; + ip_output_layer_non_id_only = TRUE; // Only apply drop to packets that didn't go through socket layer } - case NECP_POLICY_RESULT_SKIP: { - u_int32_t skip_policy_order = 0; - if (necp_policy_get_result_parameter(policy, (u_int8_t *)&skip_policy_order, sizeof(skip_policy_order))) { - ultimate_result_parameter.skip_policy_order = skip_policy_order; - } - - if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE - socket_layer_non_id_conditions = TRUE; - ip_output_layer_id_condition = TRUE; - } else if (socket_ip_conditions) { - socket_layer_non_id_conditions = TRUE; - ip_output_layer_non_id_conditions = TRUE; - } - break; + break; + } + case NECP_POLICY_RESULT_SKIP: { + u_int32_t skip_policy_order = 0; + if (necp_policy_get_result_parameter(policy, (u_int8_t *)&skip_policy_order, sizeof(skip_policy_order))) { + ultimate_result_parameter.skip_policy_order = skip_policy_order; } - case NECP_POLICY_RESULT_SOCKET_DIVERT: - case NECP_POLICY_RESULT_SOCKET_FILTER: { - u_int32_t control_unit = 0; - if (necp_policy_get_result_parameter(policy, (u_int8_t *)&control_unit, sizeof(control_unit))) { - ultimate_result_parameter.flow_divert_control_unit = control_unit; - } + + if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE socket_layer_non_id_conditions = TRUE; - break; + ip_output_layer_id_condition = TRUE; + } else if (socket_ip_conditions) { + socket_layer_non_id_conditions = TRUE; + ip_output_layer_non_id_conditions = TRUE; } - case NECP_POLICY_RESULT_IP_TUNNEL: { - struct necp_policy_result_ip_tunnel tunnel_parameters; - u_int32_t tunnel_parameters_length = necp_policy_get_result_parameter_length(policy); - if (tunnel_parameters_length > sizeof(u_int32_t) && - tunnel_parameters_length <= sizeof(struct necp_policy_result_ip_tunnel) && - necp_policy_get_result_parameter(policy, (u_int8_t *)&tunnel_parameters, sizeof(tunnel_parameters))) { - ifnet_t tunnel_interface = NULL; - tunnel_parameters.interface_name[tunnel_parameters_length - sizeof(u_int32_t) - 1] = 0; // Make sure the string is NULL terminated - if (ifnet_find_by_name(tunnel_parameters.interface_name, &tunnel_interface) == 0) { - ultimate_result_parameter.tunnel_interface_index = tunnel_interface->if_index; - ifnet_release(tunnel_interface); - } - - secondary_result = tunnel_parameters.secondary_result; - if (secondary_result) { - cond_last_interface_index = ultimate_result_parameter.tunnel_interface_index; - } + break; + } + case NECP_POLICY_RESULT_SOCKET_DIVERT: + case NECP_POLICY_RESULT_SOCKET_FILTER: { + u_int32_t control_unit = 0; + if (necp_policy_get_result_parameter(policy, (u_int8_t *)&control_unit, sizeof(control_unit))) { + ultimate_result_parameter.flow_divert_control_unit = control_unit; + } + socket_layer_non_id_conditions = TRUE; + break; + } + case NECP_POLICY_RESULT_IP_TUNNEL: { + struct necp_policy_result_ip_tunnel tunnel_parameters; + u_int32_t tunnel_parameters_length = necp_policy_get_result_parameter_length(policy); + if (tunnel_parameters_length > sizeof(u_int32_t) && + tunnel_parameters_length <= sizeof(struct necp_policy_result_ip_tunnel) && + necp_policy_get_result_parameter(policy, (u_int8_t *)&tunnel_parameters, sizeof(tunnel_parameters))) { + ifnet_t tunnel_interface = NULL; + tunnel_parameters.interface_name[tunnel_parameters_length - sizeof(u_int32_t) - 1] = 0; // Make sure the string is NULL terminated + if (ifnet_find_by_name(tunnel_parameters.interface_name, &tunnel_interface) == 0) { + ultimate_result_parameter.tunnel_interface_index = tunnel_interface->if_index; + ifnet_release(tunnel_interface); } - if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE - socket_layer_non_id_conditions = TRUE; - ip_output_layer_id_condition = TRUE; - if (secondary_result) { - ip_output_layer_tunnel_condition_from_id = TRUE; - } - } else if (socket_ip_conditions) { - socket_layer_non_id_conditions = TRUE; - ip_output_layer_id_condition = TRUE; - ip_output_layer_non_id_conditions = TRUE; - if (secondary_result) { - ip_output_layer_tunnel_condition_from_id = TRUE; - ip_output_layer_tunnel_condition_from_non_id = TRUE; - } + secondary_result = tunnel_parameters.secondary_result; + if (secondary_result) { + cond_last_interface_index = ultimate_result_parameter.tunnel_interface_index; } - break; } - case NECP_POLICY_RESULT_TRIGGER: - case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: - case NECP_POLICY_RESULT_TRIGGER_SCOPED: - case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: { - struct necp_policy_result_service service_parameters; - u_int32_t service_result_length = necp_policy_get_result_parameter_length(policy); - bool has_extra_service_data = FALSE; - if (service_result_length >= (sizeof(service_parameters))) { - has_extra_service_data = TRUE; - } - if (necp_policy_get_result_parameter(policy, (u_int8_t *)&service_parameters, sizeof(service_parameters))) { - ultimate_result_parameter.service.identifier = necp_create_uuid_service_id_mapping(service_parameters.identifier); - if (ultimate_result_parameter.service.identifier != 0) { - uuid_copy(policy->applied_result_uuid, service_parameters.identifier); - socket_layer_non_id_conditions = TRUE; - if (has_extra_service_data) { - ultimate_result_parameter.service.data = service_parameters.data; - } else { - ultimate_result_parameter.service.data = 0; - } + + if (socket_only_conditions) { // socket_ip_conditions can be TRUE or FALSE + socket_layer_non_id_conditions = TRUE; + ip_output_layer_id_condition = TRUE; + if (secondary_result) { + ip_output_layer_tunnel_condition_from_id = TRUE; + } + } else if (socket_ip_conditions) { + socket_layer_non_id_conditions = TRUE; + ip_output_layer_id_condition = TRUE; + ip_output_layer_non_id_conditions = TRUE; + if (secondary_result) { + ip_output_layer_tunnel_condition_from_id = TRUE; + ip_output_layer_tunnel_condition_from_non_id = TRUE; + } + } + break; + } + case NECP_POLICY_RESULT_TRIGGER: + case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: + case NECP_POLICY_RESULT_TRIGGER_SCOPED: + case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: { + struct necp_policy_result_service service_parameters; + u_int32_t service_result_length = necp_policy_get_result_parameter_length(policy); + bool has_extra_service_data = FALSE; + if (service_result_length >= (sizeof(service_parameters))) { + has_extra_service_data = TRUE; + } + if (necp_policy_get_result_parameter(policy, (u_int8_t *)&service_parameters, sizeof(service_parameters))) { + ultimate_result_parameter.service.identifier = necp_create_uuid_service_id_mapping(service_parameters.identifier); + if (ultimate_result_parameter.service.identifier != 0) { + uuid_copy(policy->applied_result_uuid, service_parameters.identifier); + socket_layer_non_id_conditions = TRUE; + if (has_extra_service_data) { + ultimate_result_parameter.service.data = service_parameters.data; + } else { + ultimate_result_parameter.service.data = 0; } } - break; } - case NECP_POLICY_RESULT_USE_NETAGENT: - case NECP_POLICY_RESULT_NETAGENT_SCOPED: { - uuid_t netagent_uuid; - if (necp_policy_get_result_parameter(policy, (u_int8_t *)&netagent_uuid, sizeof(netagent_uuid))) { - ultimate_result_parameter.netagent_id = necp_create_uuid_service_id_mapping(netagent_uuid); - if (ultimate_result_parameter.netagent_id != 0) { - uuid_copy(policy->applied_result_uuid, netagent_uuid); - socket_layer_non_id_conditions = TRUE; - } + break; + } + case NECP_POLICY_RESULT_USE_NETAGENT: + case NECP_POLICY_RESULT_NETAGENT_SCOPED: { + uuid_t netagent_uuid; + if (necp_policy_get_result_parameter(policy, (u_int8_t *)&netagent_uuid, sizeof(netagent_uuid))) { + ultimate_result_parameter.netagent_id = necp_create_uuid_service_id_mapping(netagent_uuid); + if (ultimate_result_parameter.netagent_id != 0) { + uuid_copy(policy->applied_result_uuid, netagent_uuid); + socket_layer_non_id_conditions = TRUE; } - break; } - case NECP_POLICY_RESULT_SOCKET_SCOPED: { - u_int32_t interface_name_length = necp_policy_get_result_parameter_length(policy); - if (interface_name_length <= IFXNAMSIZ && interface_name_length > 0) { - char interface_name[IFXNAMSIZ]; - ifnet_t scope_interface = NULL; - necp_policy_get_result_parameter(policy, (u_int8_t *)interface_name, interface_name_length); - interface_name[interface_name_length - 1] = 0; // Make sure the string is NULL terminated - if (ifnet_find_by_name(interface_name, &scope_interface) == 0) { - ultimate_result_parameter.scoped_interface_index = scope_interface->if_index; - socket_layer_non_id_conditions = TRUE; - ifnet_release(scope_interface); - } + break; + } + case NECP_POLICY_RESULT_SOCKET_SCOPED: { + u_int32_t interface_name_length = necp_policy_get_result_parameter_length(policy); + if (interface_name_length <= IFXNAMSIZ && interface_name_length > 0) { + char interface_name[IFXNAMSIZ]; + ifnet_t scope_interface = NULL; + necp_policy_get_result_parameter(policy, (u_int8_t *)interface_name, interface_name_length); + interface_name[interface_name_length - 1] = 0; // Make sure the string is NULL terminated + if (ifnet_find_by_name(interface_name, &scope_interface) == 0) { + ultimate_result_parameter.scoped_interface_index = scope_interface->if_index; + socket_layer_non_id_conditions = TRUE; + ifnet_release(scope_interface); } - break; - } - case NECP_POLICY_RESULT_SCOPED_DIRECT: { - socket_layer_non_id_conditions = TRUE; - break; } - case NECP_POLICY_RESULT_ROUTE_RULES: { - if (policy->route_rules != NULL && policy->route_rules_size > 0) { - u_int32_t route_rule_id = necp_create_route_rule(&necp_route_rules, policy->route_rules, policy->route_rules_size); - if (route_rule_id > 0) { - policy->applied_route_rules_id = route_rule_id; - ultimate_result_parameter.route_rule_id = route_rule_id; - socket_layer_non_id_conditions = TRUE; - } + break; + } + case NECP_POLICY_RESULT_SCOPED_DIRECT: { + socket_layer_non_id_conditions = TRUE; + break; + } + case NECP_POLICY_RESULT_ROUTE_RULES: { + if (policy->route_rules != NULL && policy->route_rules_size > 0) { + u_int32_t route_rule_id = necp_create_route_rule(&necp_route_rules, policy->route_rules, policy->route_rules_size); + if (route_rule_id > 0) { + policy->applied_route_rules_id = route_rule_id; + ultimate_result_parameter.route_rule_id = route_rule_id; + socket_layer_non_id_conditions = TRUE; } - break; - } - default: { - break; } + break; + } + default: { + break; + } } if (socket_layer_non_id_conditions) { @@ -4095,10 +4096,10 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli policy->applied = TRUE; policy->pending_update = FALSE; - return (TRUE); + return TRUE; fail: - return (FALSE); + return FALSE; } static void @@ -4165,11 +4166,11 @@ necp_kernel_policy_get_new_id(bool socket_level) do { necp_last_kernel_socket_policy_id++; if (necp_last_kernel_socket_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET || - necp_last_kernel_socket_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) { + necp_last_kernel_socket_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) { if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free socket kernel policy ID.\n"); - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } necp_last_kernel_socket_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET; wrapped = TRUE; @@ -4184,7 +4185,7 @@ necp_kernel_policy_get_new_id(bool socket_level) if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free IP kernel policy ID.\n"); - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } necp_last_kernel_ip_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_IP; wrapped = TRUE; @@ -4195,13 +4196,13 @@ necp_kernel_policy_get_new_id(bool socket_level) if (newid == NECP_KERNEL_POLICY_ID_NONE) { NECPLOG0(LOG_ERR, "Allocate kernel policy id failed.\n"); - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } - return (newid); + return newid; } -#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE) +#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE) static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) { @@ -4304,7 +4305,7 @@ necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, } LIST_INSERT_SORTED_TWICE_ASCENDING(&necp_kernel_socket_policies, new_kernel_policy, chain, session_order, order, tmp_kernel_policy); done: - return (new_kernel_policy ? new_kernel_policy->id : 0); + return new_kernel_policy ? new_kernel_policy->id : 0; } static struct necp_kernel_socket_policy * @@ -4314,16 +4315,16 @@ necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id) struct necp_kernel_socket_policy *tmp_kernel_policy = NULL; if (policy_id == 0) { - return (NULL); + return NULL; } LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_socket_policies, chain, tmp_kernel_policy) { if (kernel_policy->id == policy_id) { - return (kernel_policy); + return kernel_policy; } } - return (NULL); + return NULL; } static bool @@ -4353,10 +4354,10 @@ necp_kernel_socket_policy_delete(necp_kernel_policy_id policy_id) } FREE_ZONE(policy, sizeof(*policy), M_NECP_SOCKET_POLICY); - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static inline const char * @@ -4364,214 +4365,214 @@ necp_get_result_description(char *result_string, necp_kernel_policy_result resul { uuid_string_t uuid_string; switch (result) { - case NECP_KERNEL_POLICY_RESULT_NONE: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "None"); - break; - } - case NECP_KERNEL_POLICY_RESULT_PASS: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "Pass"); - break; - } - case NECP_KERNEL_POLICY_RESULT_SKIP: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "Skip (%u)", result_parameter.skip_policy_order); - break; - } - case NECP_KERNEL_POLICY_RESULT_DROP: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop"); - break; - } - case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketDivert (%d)", result_parameter.flow_divert_control_unit); - break; - } - case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketFilter (%d)", result_parameter.filter_control_unit); - break; - } - case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: { - ifnet_t interface = ifindex2ifnet[result_parameter.tunnel_interface_index]; - snprintf(result_string, MAX_RESULT_STRING_LEN, "IPTunnel (%s%d)", ifnet_name(interface), ifnet_unit(interface)); - break; - } - case NECP_KERNEL_POLICY_RESULT_IP_FILTER: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "IPFilter"); - break; - } - case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED: { - ifnet_t interface = ifindex2ifnet[result_parameter.scoped_interface_index]; - snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketScoped (%s%d)", ifnet_name(interface), ifnet_unit(interface)); - break; - } - case NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "ScopedDirect"); - break; - } - case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES: { - int index = 0; - char interface_names[IFXNAMSIZ][MAX_ROUTE_RULE_INTERFACES]; - struct necp_route_rule *route_rule = necp_lookup_route_rule_locked(&necp_route_rules, result_parameter.route_rule_id); - if (route_rule != NULL) { - for (index = 0; index < MAX_ROUTE_RULE_INTERFACES; index++) { - if (route_rule->exception_if_indices[index] != 0) { - ifnet_t interface = ifindex2ifnet[route_rule->exception_if_indices[index]]; - snprintf(interface_names[index], IFXNAMSIZ, "%s%d", ifnet_name(interface), ifnet_unit(interface)); - } else { - memset(interface_names[index], 0, IFXNAMSIZ); - } - } - switch (route_rule->default_action) { - case NECP_ROUTE_RULE_DENY_INTERFACE: - snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)", - (route_rule->cellular_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Cell " : "", - (route_rule->wifi_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "WiFi " : "", - (route_rule->wired_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Wired " : "", - (route_rule->expensive_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Exp " : "", - (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[0] : "", - (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[1] : "", - (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[2] : "", - (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[3] : "", - (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[4] : "", - (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[5] : "", - (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[6] : "", - (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[7] : "", - (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[8] : "", - (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", - (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[9] : ""); - break; - case NECP_ROUTE_RULE_ALLOW_INTERFACE: - snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)", - (route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Cell " : "", - (route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!WiFi " : "", - (route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Wired " : "", - (route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Exp " : "", - (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[0] : "", - (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[1] : "", - (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[2] : "", - (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[3] : "", - (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[4] : "", - (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[5] : "", - (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[6] : "", - (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[7] : "", - (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[8] : "", - (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", - (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[9] : ""); - break; - case NECP_ROUTE_RULE_QOS_MARKING: - snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)", - (route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Cell " : "", - (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING) ? "WiFi " : "", - (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Wired " : "", - (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Exp " : "", - (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[0] : "", - (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[1] : "", - (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[2] : "", - (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[3] : "", - (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[4] : "", - (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[5] : "", - (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[6] : "", - (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[7] : "", - (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[8] : "", - (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", - (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[9] : ""); - break; - default: - snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Unknown)"); - break; - } - } - break; - } - case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT: { - bool found_mapping = FALSE; - struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id); - if (mapping != NULL) { - uuid_unparse(mapping->uuid, uuid_string); - found_mapping = TRUE; + case NECP_KERNEL_POLICY_RESULT_NONE: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "None"); + break; + } + case NECP_KERNEL_POLICY_RESULT_PASS: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "Pass"); + break; + } + case NECP_KERNEL_POLICY_RESULT_SKIP: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "Skip (%u)", result_parameter.skip_policy_order); + break; + } + case NECP_KERNEL_POLICY_RESULT_DROP: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "Drop"); + break; + } + case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketDivert (%d)", result_parameter.flow_divert_control_unit); + break; + } + case NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketFilter (%d)", result_parameter.filter_control_unit); + break; + } + case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: { + ifnet_t interface = ifindex2ifnet[result_parameter.tunnel_interface_index]; + snprintf(result_string, MAX_RESULT_STRING_LEN, "IPTunnel (%s%d)", ifnet_name(interface), ifnet_unit(interface)); + break; + } + case NECP_KERNEL_POLICY_RESULT_IP_FILTER: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "IPFilter"); + break; + } + case NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED: { + ifnet_t interface = ifindex2ifnet[result_parameter.scoped_interface_index]; + snprintf(result_string, MAX_RESULT_STRING_LEN, "SocketScoped (%s%d)", ifnet_name(interface), ifnet_unit(interface)); + break; + } + case NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "ScopedDirect"); + break; + } + case NECP_KERNEL_POLICY_RESULT_ROUTE_RULES: { + int index = 0; + char interface_names[IFXNAMSIZ][MAX_ROUTE_RULE_INTERFACES]; + struct necp_route_rule *route_rule = necp_lookup_route_rule_locked(&necp_route_rules, result_parameter.route_rule_id); + if (route_rule != NULL) { + for (index = 0; index < MAX_ROUTE_RULE_INTERFACES; index++) { + if (route_rule->exception_if_indices[index] != 0) { + ifnet_t interface = ifindex2ifnet[route_rule->exception_if_indices[index]]; + snprintf(interface_names[index], IFXNAMSIZ, "%s%d", ifnet_name(interface), ifnet_unit(interface)); + } else { + memset(interface_names[index], 0, IFXNAMSIZ); + } + } + switch (route_rule->default_action) { + case NECP_ROUTE_RULE_DENY_INTERFACE: + snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Only %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)", + (route_rule->cellular_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Cell " : "", + (route_rule->wifi_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "WiFi " : "", + (route_rule->wired_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Wired " : "", + (route_rule->expensive_action == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? "Exp " : "", + (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[0] : "", + (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[1] : "", + (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[2] : "", + (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[3] : "", + (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[4] : "", + (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[5] : "", + (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[6] : "", + (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[7] : "", + (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[8] : "", + (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? " " : "", + (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_ALLOW_INTERFACE) ? interface_names[9] : ""); + break; + case NECP_ROUTE_RULE_ALLOW_INTERFACE: + snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)", + (route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Cell " : "", + (route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!WiFi " : "", + (route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Wired " : "", + (route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!Exp " : "", + (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[0] : "", + (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[1] : "", + (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[2] : "", + (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[3] : "", + (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[4] : "", + (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[5] : "", + (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[6] : "", + (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[7] : "", + (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[8] : "", + (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? "!" : "", + (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_DENY_INTERFACE) ? interface_names[9] : ""); + break; + case NECP_ROUTE_RULE_QOS_MARKING: + snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (QoSMarking %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)", + (route_rule->cellular_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Cell " : "", + (route_rule->wifi_action == NECP_ROUTE_RULE_QOS_MARKING) ? "WiFi " : "", + (route_rule->wired_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Wired " : "", + (route_rule->expensive_action == NECP_ROUTE_RULE_QOS_MARKING) ? "Exp " : "", + (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[0] : "", + (route_rule->exception_if_actions[0] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[1] : "", + (route_rule->exception_if_actions[1] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[2] : "", + (route_rule->exception_if_actions[2] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[3] : "", + (route_rule->exception_if_actions[3] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[4] : "", + (route_rule->exception_if_actions[4] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[5] : "", + (route_rule->exception_if_actions[5] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[6] : "", + (route_rule->exception_if_actions[6] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[7] : "", + (route_rule->exception_if_actions[7] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[8] : "", + (route_rule->exception_if_actions[8] == NECP_ROUTE_RULE_QOS_MARKING) ? " " : "", + (route_rule->exception_if_actions[9] == NECP_ROUTE_RULE_QOS_MARKING) ? interface_names[9] : ""); + break; + default: + snprintf(result_string, MAX_RESULT_STRING_LEN, "RouteRules (Unknown)"); + break; } - snprintf(result_string, MAX_RESULT_STRING_LEN, "UseNetAgent (%s)", found_mapping ? uuid_string : "Unknown"); - break; } - case NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED: { - bool found_mapping = FALSE; - struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id); - if (mapping != NULL) { - uuid_unparse(mapping->uuid, uuid_string); - found_mapping = TRUE; - } - snprintf(result_string, MAX_RESULT_STRING_LEN, "NetAgentScoped (%s)", found_mapping ? uuid_string : "Unknown"); - break; + break; + } + case NECP_KERNEL_POLICY_RESULT_USE_NETAGENT: { + bool found_mapping = FALSE; + struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id); + if (mapping != NULL) { + uuid_unparse(mapping->uuid, uuid_string); + found_mapping = TRUE; } - case NECP_POLICY_RESULT_TRIGGER: { - bool found_mapping = FALSE; - struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); - if (mapping != NULL) { - uuid_unparse(mapping->uuid, uuid_string); - found_mapping = TRUE; - } - snprintf(result_string, MAX_RESULT_STRING_LEN, "Trigger (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); - break; + snprintf(result_string, MAX_RESULT_STRING_LEN, "UseNetAgent (%s)", found_mapping ? uuid_string : "Unknown"); + break; + } + case NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED: { + bool found_mapping = FALSE; + struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.netagent_id); + if (mapping != NULL) { + uuid_unparse(mapping->uuid, uuid_string); + found_mapping = TRUE; } - case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: { - bool found_mapping = FALSE; - struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); - if (mapping != NULL) { - uuid_unparse(mapping->uuid, uuid_string); - found_mapping = TRUE; - } - snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerIfNeeded (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); - break; + snprintf(result_string, MAX_RESULT_STRING_LEN, "NetAgentScoped (%s)", found_mapping ? uuid_string : "Unknown"); + break; + } + case NECP_POLICY_RESULT_TRIGGER: { + bool found_mapping = FALSE; + struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); + if (mapping != NULL) { + uuid_unparse(mapping->uuid, uuid_string); + found_mapping = TRUE; } - case NECP_POLICY_RESULT_TRIGGER_SCOPED: { - bool found_mapping = FALSE; - struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); - if (mapping != NULL) { - uuid_unparse(mapping->uuid, uuid_string); - found_mapping = TRUE; - } - snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); - break; + snprintf(result_string, MAX_RESULT_STRING_LEN, "Trigger (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); + break; + } + case NECP_POLICY_RESULT_TRIGGER_IF_NEEDED: { + bool found_mapping = FALSE; + struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); + if (mapping != NULL) { + uuid_unparse(mapping->uuid, uuid_string); + found_mapping = TRUE; } - case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: { - bool found_mapping = FALSE; - struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); - if (mapping != NULL) { - uuid_unparse(mapping->uuid, uuid_string); - found_mapping = TRUE; - } - snprintf(result_string, MAX_RESULT_STRING_LEN, "NoTriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); - break; + snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerIfNeeded (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); + break; + } + case NECP_POLICY_RESULT_TRIGGER_SCOPED: { + bool found_mapping = FALSE; + struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); + if (mapping != NULL) { + uuid_unparse(mapping->uuid, uuid_string); + found_mapping = TRUE; } - default: { - snprintf(result_string, MAX_RESULT_STRING_LEN, "Unknown %d (%d)", result, result_parameter.tunnel_interface_index); - break; + snprintf(result_string, MAX_RESULT_STRING_LEN, "TriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); + break; + } + case NECP_POLICY_RESULT_NO_TRIGGER_SCOPED: { + bool found_mapping = FALSE; + struct necp_uuid_id_mapping *mapping = necp_uuid_lookup_uuid_with_service_id_locked(result_parameter.service.identifier); + if (mapping != NULL) { + uuid_unparse(mapping->uuid, uuid_string); + found_mapping = TRUE; } + snprintf(result_string, MAX_RESULT_STRING_LEN, "NoTriggerScoped (%s.%d)", found_mapping ? uuid_string : "Unknown", result_parameter.service.data); + break; + } + default: { + snprintf(result_string, MAX_RESULT_STRING_LEN, "Unknown %d (%d)", result, result_parameter.tunnel_interface_index); + break; } - return (result_string); + } + return result_string; } static void @@ -4614,7 +4615,7 @@ necp_kernel_socket_policies_dump_all(void) static inline bool necp_kernel_socket_result_is_trigger_service_type(struct necp_kernel_socket_policy *kernel_policy) { - return (kernel_policy->result >= NECP_KERNEL_POLICY_RESULT_TRIGGER && kernel_policy->result <= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED); + return kernel_policy->result >= NECP_KERNEL_POLICY_RESULT_TRIGGER && kernel_policy->result <= NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED; } static inline bool @@ -4622,34 +4623,34 @@ necp_kernel_socket_policy_results_overlap(struct necp_kernel_socket_policy *uppe { if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_DROP) { // Drop always cancels out lower policies - return (TRUE); + return TRUE; } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER || - upper_policy->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES || - upper_policy->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT || - upper_policy->result == NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED) { + upper_policy->result == NECP_KERNEL_POLICY_RESULT_ROUTE_RULES || + upper_policy->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT || + upper_policy->result == NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED) { // Filters and route rules never cancel out lower policies - return (FALSE); + return FALSE; } else if (necp_kernel_socket_result_is_trigger_service_type(upper_policy)) { // Trigger/Scoping policies can overlap one another, but not other results - return (necp_kernel_socket_result_is_trigger_service_type(lower_policy)); + return necp_kernel_socket_result_is_trigger_service_type(lower_policy); } else if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { if (upper_policy->session_order != lower_policy->session_order) { // A skip cannot override a policy of a different session - return (FALSE); + return FALSE; } else { if (upper_policy->result_parameter.skip_policy_order == 0 || - lower_policy->order >= upper_policy->result_parameter.skip_policy_order) { + lower_policy->order >= upper_policy->result_parameter.skip_policy_order) { // This policy is beyond the skip - return (FALSE); + return FALSE; } else { // This policy is inside the skip - return (TRUE); + return TRUE; } } } // A hard pass, flow divert, tunnel, or scope will currently block out lower policies - return (TRUE); + return TRUE; } static bool @@ -4665,7 +4666,7 @@ necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *polic // For policies in a skip window, we can't mark conflicting policies as unnecessary if (can_skip) { if (highest_skip_session_order != compared_policy->session_order || - (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) { + (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) { // If we've moved on to the next session, or passed the skip window highest_skip_session_order = 0; highest_skip_order = 0; @@ -4700,7 +4701,7 @@ necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *polic // Default makes lower policies unecessary always if (compared_policy->condition_mask == 0) { - return (TRUE); + return TRUE; } // Compared must be more general than policy, and include only conditions within policy @@ -4714,58 +4715,58 @@ necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *polic } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_DOMAIN && - strcmp(compared_policy->cond_domain, policy->cond_domain) != 0) { + strcmp(compared_policy->cond_domain, policy->cond_domain) != 0) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT && - strcmp(compared_policy->cond_custom_entitlement, policy->cond_custom_entitlement) != 0) { + strcmp(compared_policy->cond_custom_entitlement, policy->cond_custom_entitlement) != 0) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID && - compared_policy->cond_account_id != policy->cond_account_id) { + compared_policy->cond_account_id != policy->cond_account_id) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID && - compared_policy->cond_policy_id != policy->cond_policy_id) { + compared_policy->cond_policy_id != policy->cond_policy_id) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID && - compared_policy->cond_app_id != policy->cond_app_id) { + compared_policy->cond_app_id != policy->cond_app_id) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REAL_APP_ID && - compared_policy->cond_real_app_id != policy->cond_real_app_id) { + compared_policy->cond_real_app_id != policy->cond_real_app_id) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PID && - compared_policy->cond_pid != policy->cond_pid) { + compared_policy->cond_pid != policy->cond_pid) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_UID && - compared_policy->cond_uid != policy->cond_uid) { + compared_policy->cond_uid != policy->cond_uid) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE && - compared_policy->cond_bound_interface != policy->cond_bound_interface) { + compared_policy->cond_bound_interface != policy->cond_bound_interface) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL && - compared_policy->cond_protocol != policy->cond_protocol) { + compared_policy->cond_protocol != policy->cond_protocol) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS && - !(compared_policy->cond_traffic_class.start_tc <= policy->cond_traffic_class.start_tc && - compared_policy->cond_traffic_class.end_tc >= policy->cond_traffic_class.end_tc)) { + !(compared_policy->cond_traffic_class.start_tc <= policy->cond_traffic_class.start_tc && + compared_policy->cond_traffic_class.end_tc >= policy->cond_traffic_class.end_tc)) { continue; } @@ -4776,7 +4777,7 @@ necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *polic } } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) { if (compared_policy->cond_local_prefix > policy->cond_local_prefix || - !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) { + !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) { continue; } } @@ -4789,21 +4790,21 @@ necp_kernel_socket_policy_is_unnecessary(struct necp_kernel_socket_policy *polic } } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix || - !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) { + !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) { continue; } } } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_AGENT_TYPE && - memcmp(&compared_policy->cond_agent_type, &policy->cond_agent_type, sizeof(policy->cond_agent_type)) == 0) { + memcmp(&compared_policy->cond_agent_type, &policy->cond_agent_type, sizeof(policy->cond_agent_type)) == 0) { continue; } - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static bool @@ -4857,7 +4858,7 @@ necp_kernel_socket_policies_reprocess(void) necp_kernel_socket_policies_count++; if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) || - kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) { + kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) { necp_kernel_socket_policies_non_app_count++; for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) { bucket_allocation_counts[app_i]++; @@ -4903,7 +4904,7 @@ necp_kernel_socket_policies_reprocess(void) // Add socket policies if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) || - kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) { + kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) { for (app_i = 0; app_i < NECP_KERNEL_SOCKET_POLICIES_MAP_NUM_APP_ID_BUCKETS; app_i++) { if (!necp_kernel_socket_policy_is_unnecessary(kernel_policy, necp_kernel_socket_policies_map[app_i], bucket_current_free_index[app_i])) { (necp_kernel_socket_policies_map[app_i])[(bucket_current_free_index[app_i])] = kernel_policy; @@ -4922,7 +4923,7 @@ necp_kernel_socket_policies_reprocess(void) } necp_kernel_socket_policies_dump_all(); BUMP_KERNEL_SOCKET_POLICIES_GENERATION_COUNT(); - return (TRUE); + return TRUE; fail: // Free memory, reset masks to 0 @@ -4941,7 +4942,7 @@ fail: FREE(necp_kernel_socket_policies_app_layer_map, M_NECP); necp_kernel_socket_policies_app_layer_map = NULL; } - return (FALSE); + return FALSE; } static u_int32_t @@ -4960,7 +4961,7 @@ necp_get_new_string_id(void) if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free app UUID.\n"); - return (0); + return 0; } necp_last_string_id = 1; wrapped = TRUE; @@ -4970,10 +4971,10 @@ necp_get_new_string_id(void) if (newid == 0) { NECPLOG0(LOG_ERR, "Allocate string id failed.\n"); - return (0); + return 0; } - return (newid); + return newid; } static struct necp_string_id_mapping * @@ -4989,7 +4990,7 @@ necp_lookup_string_to_id_locked(struct necp_string_id_mapping_list *list, char * } } - return (foundentry); + return foundentry; } static struct necp_string_id_mapping * @@ -5005,7 +5006,7 @@ necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_in } } - return (foundentry); + return foundentry; } static u_int32_t @@ -5040,7 +5041,7 @@ necp_create_string_to_id_mapping(struct necp_string_id_mapping_list *list, char } } } - return (string_id); + return string_id; } static bool @@ -5057,10 +5058,10 @@ necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char FREE(existing_mapping->string, M_NECP); FREE(existing_mapping, M_NECP); } - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } #define NECP_FIRST_VALID_ROUTE_RULE_ID 1 @@ -5081,11 +5082,11 @@ necp_get_new_route_rule_id(bool aggregate) do { necp_last_route_rule_id++; if (necp_last_route_rule_id < NECP_FIRST_VALID_ROUTE_RULE_ID || - necp_last_route_rule_id >= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID) { + necp_last_route_rule_id >= NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID) { if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free route rule id.\n"); - return (0); + return 0; } necp_last_route_rule_id = NECP_FIRST_VALID_ROUTE_RULE_ID; wrapped = TRUE; @@ -5103,7 +5104,7 @@ necp_get_new_route_rule_id(bool aggregate) if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free aggregate route rule id.\n"); - return (0); + return 0; } necp_last_aggregate_route_rule_id = NECP_FIRST_VALID_AGGREGATE_ROUTE_RULE_ID; wrapped = TRUE; @@ -5114,10 +5115,10 @@ necp_get_new_route_rule_id(bool aggregate) if (newid == 0) { NECPLOG0(LOG_ERR, "Allocate route rule ID failed.\n"); - return (0); + return 0; } - return (newid); + return newid; } static struct necp_route_rule * @@ -5133,7 +5134,7 @@ necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route } } - return (foundentry); + return foundentry; } static struct necp_route_rule * @@ -5144,10 +5145,10 @@ necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list *list, u_i LIST_FOREACH(searchentry, list, chain) { if (searchentry->default_action == default_action && - searchentry->cellular_action == cellular_action && - searchentry->wifi_action == wifi_action && - searchentry->wired_action == wired_action && - searchentry->expensive_action == expensive_action) { + searchentry->cellular_action == cellular_action && + searchentry->wifi_action == wifi_action && + searchentry->wired_action == wired_action && + searchentry->expensive_action == expensive_action) { bool match_failed = FALSE; size_t index_a = 0; size_t index_b = 0; @@ -5167,7 +5168,7 @@ necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list *list, u_i count_b = index_b + 1; } if (searchentry->exception_if_indices[index_a] == if_indices[index_b] && - searchentry->exception_if_actions[index_a] == if_actions[index_b]) { + searchentry->exception_if_actions[index_a] == if_actions[index_b]) { found_index = TRUE; break; } @@ -5184,7 +5185,7 @@ necp_lookup_route_rule_by_contents_locked(struct necp_route_rule_list *list, u_i } } - return (foundentry); + return foundentry; } static u_int32_t @@ -5207,7 +5208,7 @@ necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_ LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE); if (route_rules_array == NULL || route_rules_array_size == 0) { - return (0); + return 0; } // Process rules @@ -5285,7 +5286,7 @@ necp_create_route_rule(struct necp_route_rule_list *list, u_int8_t *route_rules_ LIST_INSERT_HEAD(list, new_rule, chain); } } - return (route_rule_id); + return route_rule_id; } static void @@ -5327,10 +5328,10 @@ necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_i LIST_REMOVE(existing_rule, chain); FREE(existing_rule, M_NECP); } - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static struct necp_aggregate_route_rule * @@ -5350,7 +5351,7 @@ necp_lookup_aggregate_route_rule_locked(u_int32_t route_rule_id) lck_rw_done(&necp_route_rule_lock); - return (foundentry); + return foundentry; } static u_int32_t @@ -5362,7 +5363,7 @@ necp_create_aggregate_route_rule(u_int32_t *rule_ids) LIST_FOREACH(existing_rule, &necp_aggregate_route_rules, chain) { if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) { - return (existing_rule->id); + return existing_rule->id; } } @@ -5372,7 +5373,7 @@ necp_create_aggregate_route_rule(u_int32_t *rule_ids) // Re-check, in case something else created the rule while we are waiting to lock if (memcmp(existing_rule->rule_ids, rule_ids, (sizeof(u_int32_t) * MAX_AGGREGATE_ROUTE_RULES)) == 0) { lck_rw_done(&necp_route_rule_lock); - return (existing_rule->id); + return existing_rule->id; } } @@ -5386,7 +5387,7 @@ necp_create_aggregate_route_rule(u_int32_t *rule_ids) } lck_rw_done(&necp_route_rule_lock); - return (aggregate_route_rule_id); + return aggregate_route_rule_id; } #define NECP_NULL_SERVICE_ID 1 @@ -5407,11 +5408,11 @@ necp_get_new_uuid_id(bool service) do { necp_last_service_uuid_id++; if (necp_last_service_uuid_id < NECP_FIRST_VALID_SERVICE_ID || - necp_last_service_uuid_id >= NECP_FIRST_VALID_APP_ID) { + necp_last_service_uuid_id >= NECP_FIRST_VALID_APP_ID) { if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free service UUID.\n"); - return (NECP_NULL_SERVICE_ID); + return NECP_NULL_SERVICE_ID; } necp_last_service_uuid_id = NECP_FIRST_VALID_SERVICE_ID; wrapped = TRUE; @@ -5426,7 +5427,7 @@ necp_get_new_uuid_id(bool service) if (wrapped) { // Already wrapped, give up NECPLOG0(LOG_ERR, "Failed to find a free app UUID.\n"); - return (NECP_NULL_SERVICE_ID); + return NECP_NULL_SERVICE_ID; } necp_last_app_uuid_id = NECP_FIRST_VALID_APP_ID; wrapped = TRUE; @@ -5437,10 +5438,10 @@ necp_get_new_uuid_id(bool service) if (newid == NECP_NULL_SERVICE_ID) { NECPLOG0(LOG_ERR, "Allocate uuid ID failed.\n"); - return (NECP_NULL_SERVICE_ID); + return NECP_NULL_SERVICE_ID; } - return (newid); + return newid; } static struct necp_uuid_id_mapping * @@ -5456,7 +5457,7 @@ necp_uuid_lookup_app_id_locked(uuid_t uuid) } } - return (foundentry); + return foundentry; } static struct necp_uuid_id_mapping * @@ -5475,7 +5476,7 @@ necp_uuid_lookup_uuid_with_app_id_locked(u_int32_t local_id) } } - return (foundentry); + return foundentry; } static u_int32_t @@ -5520,7 +5521,7 @@ necp_create_uuid_app_id_mapping(uuid_t uuid, bool *allocated_mapping, bool uuid_ } } - return (local_id); + return local_id; } static bool @@ -5546,10 +5547,10 @@ necp_remove_uuid_app_id_mapping(uuid_t uuid, bool *removed_mapping, bool uuid_po *removed_mapping = TRUE; } } - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static struct necp_uuid_id_mapping * @@ -5559,7 +5560,7 @@ necp_uuid_get_null_service_id_mapping(void) uuid_clear(null_mapping.uuid); null_mapping.id = NECP_NULL_SERVICE_ID; - return (&null_mapping); + return &null_mapping; } static struct necp_uuid_id_mapping * @@ -5579,7 +5580,7 @@ necp_uuid_lookup_service_id_locked(uuid_t uuid) } } - return (foundentry); + return foundentry; } static struct necp_uuid_id_mapping * @@ -5599,7 +5600,7 @@ necp_uuid_lookup_uuid_with_service_id_locked(u_int32_t local_id) } } - return (foundentry); + return foundentry; } static u_int32_t @@ -5609,7 +5610,7 @@ necp_create_uuid_service_id_mapping(uuid_t uuid) struct necp_uuid_id_mapping *existing_mapping = NULL; if (uuid_is_null(uuid)) { - return (NECP_NULL_SERVICE_ID); + return NECP_NULL_SERVICE_ID; } LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE); @@ -5632,7 +5633,7 @@ necp_create_uuid_service_id_mapping(uuid_t uuid) } } - return (local_id); + return local_id; } static bool @@ -5641,7 +5642,7 @@ necp_remove_uuid_service_id_mapping(uuid_t uuid) struct necp_uuid_id_mapping *existing_mapping = NULL; if (uuid_is_null(uuid)) { - return (TRUE); + return TRUE; } LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE); @@ -5652,10 +5653,10 @@ necp_remove_uuid_service_id_mapping(uuid_t uuid) LIST_REMOVE(existing_mapping, chain); FREE(existing_mapping, M_NECP); } - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } @@ -5667,7 +5668,7 @@ necp_kernel_socket_policies_update_uuid_table(void) if (necp_uuid_app_id_mappings_dirty) { if (proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_CLEAR, NULL, PROC_UUID_NECP_APP_POLICY) < 0) { NECPLOG0(LOG_DEBUG, "Error clearing uuids from policy table\n"); - return (FALSE); + return FALSE; } if (necp_num_uuid_app_id_mappings > 0) { @@ -5676,7 +5677,7 @@ necp_kernel_socket_policies_update_uuid_table(void) struct necp_uuid_id_mapping *mapping = NULL; LIST_FOREACH(mapping, uuid_list_head, chain) { if (mapping->table_refcount > 0 && - proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD, mapping->uuid, PROC_UUID_NECP_APP_POLICY) < 0) { + proc_uuid_policy_kernel(PROC_UUID_POLICY_OPERATION_ADD, mapping->uuid, PROC_UUID_NECP_APP_POLICY) < 0) { NECPLOG0(LOG_DEBUG, "Error adding uuid to policy table\n"); } } @@ -5686,10 +5687,10 @@ necp_kernel_socket_policies_update_uuid_table(void) necp_uuid_app_id_mappings_dirty = FALSE; } - return (TRUE); + return TRUE; } -#define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE) +#define NECP_KERNEL_VALID_IP_OUTPUT_CONDITIONS (NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_POLICY_ID | NECP_KERNEL_CONDITION_LAST_INTERFACE) static necp_kernel_policy_id necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order suborder, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_kernel_policy_id cond_policy_id, ifnet_t cond_bound_interface, u_int32_t cond_last_interface_index, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) { @@ -5764,7 +5765,7 @@ necp_kernel_ip_output_policy_add(necp_policy_order order, necp_policy_order subo } LIST_INSERT_SORTED_THRICE_ASCENDING(&necp_kernel_ip_output_policies, new_kernel_policy, chain, session_order, order, suborder, tmp_kernel_policy); done: - return (new_kernel_policy ? new_kernel_policy->id : 0); + return new_kernel_policy ? new_kernel_policy->id : 0; } static struct necp_kernel_ip_output_policy * @@ -5774,16 +5775,16 @@ necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id) struct necp_kernel_ip_output_policy *tmp_kernel_policy = NULL; if (policy_id == 0) { - return (NULL); + return NULL; } LIST_FOREACH_SAFE(kernel_policy, &necp_kernel_ip_output_policies, chain, tmp_kernel_policy) { if (kernel_policy->id == policy_id) { - return (kernel_policy); + return kernel_policy; } } - return (NULL); + return NULL; } static bool @@ -5803,10 +5804,10 @@ necp_kernel_ip_output_policy_delete(necp_kernel_policy_id policy_id) } FREE_ZONE(policy, sizeof(*policy), M_NECP_IP_POLICY); - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static void @@ -5841,21 +5842,21 @@ necp_kernel_ip_output_policy_results_overlap(struct necp_kernel_ip_output_policy if (upper_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { if (upper_policy->session_order != lower_policy->session_order) { // A skip cannot override a policy of a different session - return (FALSE); + return FALSE; } else { if (upper_policy->result_parameter.skip_policy_order == 0 || - lower_policy->order >= upper_policy->result_parameter.skip_policy_order) { + lower_policy->order >= upper_policy->result_parameter.skip_policy_order) { // This policy is beyond the skip - return (FALSE); + return FALSE; } else { // This policy is inside the skip - return (TRUE); + return TRUE; } } } // All other IP Output policy results (drop, tunnel, hard pass) currently overlap - return (TRUE); + return TRUE; } static bool @@ -5871,7 +5872,7 @@ necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy // For policies in a skip window, we can't mark conflicting policies as unnecessary if (can_skip) { if (highest_skip_session_order != compared_policy->session_order || - (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) { + (highest_skip_order != 0 && compared_policy->order >= highest_skip_order)) { // If we've moved on to the next session, or passed the skip window highest_skip_session_order = 0; highest_skip_order = 0; @@ -5906,7 +5907,7 @@ necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy // Default makes lower policies unecessary always if (compared_policy->condition_mask == 0) { - return (TRUE); + return TRUE; } // Compared must be more general than policy, and include only conditions within policy @@ -5920,17 +5921,17 @@ necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID && - compared_policy->cond_policy_id != policy->cond_policy_id) { + compared_policy->cond_policy_id != policy->cond_policy_id) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE && - compared_policy->cond_bound_interface != policy->cond_bound_interface) { + compared_policy->cond_bound_interface != policy->cond_bound_interface) { continue; } if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_PROTOCOL && - compared_policy->cond_protocol != policy->cond_protocol) { + compared_policy->cond_protocol != policy->cond_protocol) { continue; } @@ -5941,7 +5942,7 @@ necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy } } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) { if (compared_policy->cond_local_prefix > policy->cond_local_prefix || - !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) { + !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_local_start, (struct sockaddr *)&compared_policy->cond_local_start, compared_policy->cond_local_prefix)) { continue; } } @@ -5954,16 +5955,16 @@ necp_kernel_ip_output_policy_is_unnecessary(struct necp_kernel_ip_output_policy } } else if (compared_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { if (compared_policy->cond_remote_prefix > policy->cond_remote_prefix || - !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) { + !necp_is_addr_in_subnet((struct sockaddr *)&policy->cond_remote_start, (struct sockaddr *)&compared_policy->cond_remote_start, compared_policy->cond_remote_prefix)) { continue; } } } - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } static bool @@ -6000,7 +6001,7 @@ necp_kernel_ip_output_policies_reprocess(void) * Non-id and SKIP policies will be added to all buckets */ if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) || - kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { + kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) { bucket_allocation_counts[i]++; } @@ -6029,7 +6030,7 @@ necp_kernel_ip_output_policies_reprocess(void) LIST_FOREACH(kernel_policy, &necp_kernel_ip_output_policies, chain) { // Insert pointers into map if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) || - kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { + kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP) { for (i = 0; i < NECP_KERNEL_IP_OUTPUT_POLICIES_MAP_NUM_ID_BUCKETS; i++) { if (!necp_kernel_ip_output_policy_is_unnecessary(kernel_policy, necp_kernel_ip_output_policies_map[i], bucket_current_free_index[i])) { (necp_kernel_ip_output_policies_map[i])[(bucket_current_free_index[i])] = kernel_policy; @@ -6047,7 +6048,7 @@ necp_kernel_ip_output_policies_reprocess(void) } } necp_kernel_ip_output_policies_dump_all(); - return (TRUE); + return TRUE; fail: // Free memory, reset mask to 0 @@ -6060,7 +6061,7 @@ fail: necp_kernel_ip_output_policies_map[i] = NULL; } } - return (FALSE); + return FALSE; } // Outbound Policy Matching @@ -6086,7 +6087,7 @@ necp_trim_dots_and_stars(char *string, size_t length) sub.length--; } - return (sub); + return sub; } static char * @@ -6097,13 +6098,13 @@ necp_create_trimmed_domain(char *string, size_t length) MALLOC(trimmed_domain, char *, sub.length + 1, M_NECP, M_WAITOK); if (trimmed_domain == NULL) { - return (NULL); + return NULL; } memcpy(trimmed_domain, sub.string, sub.length); trimmed_domain[sub.length] = 0; - return (trimmed_domain); + return trimmed_domain; } static inline int @@ -6118,33 +6119,33 @@ necp_count_dots(char *string, size_t length) } } - return (dot_count); + return dot_count; } static bool necp_check_suffix(struct substring parent, struct substring suffix, bool require_dot_before_suffix) { if (parent.length <= suffix.length) { - return (FALSE); + return FALSE; } size_t length_difference = (parent.length - suffix.length); if (require_dot_before_suffix) { if (((char *)(parent.string + length_difference - 1))[0] != '.') { - return (FALSE); + return FALSE; } } // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters) - return (strncasecmp(parent.string + length_difference, suffix.string, suffix.length) == 0); + return strncasecmp(parent.string + length_difference, suffix.string, suffix.length) == 0; } static bool necp_hostname_matches_domain(struct substring hostname_substring, u_int8_t hostname_dot_count, char *domain, u_int8_t domain_dot_count) { if (hostname_substring.string == NULL || domain == NULL) { - return (hostname_substring.string == domain); + return hostname_substring.string == domain; } struct substring domain_substring; @@ -6154,16 +6155,16 @@ necp_hostname_matches_domain(struct substring hostname_substring, u_int8_t hostn if (hostname_dot_count == domain_dot_count) { // strncasecmp does case-insensitive check for all UTF-8 strings (ignores non-ASCII characters) if (hostname_substring.length == domain_substring.length && - strncasecmp(hostname_substring.string, domain_substring.string, hostname_substring.length) == 0) { - return (TRUE); + strncasecmp(hostname_substring.string, domain_substring.string, hostname_substring.length) == 0) { + return TRUE; } } else if (domain_dot_count < hostname_dot_count) { if (necp_check_suffix(hostname_substring, domain_substring, TRUE)) { - return (TRUE); + return TRUE; } } - return (FALSE); + return FALSE; } static char * @@ -6173,13 +6174,13 @@ necp_copy_string(char *string, size_t length) MALLOC(copied_string, char *, length + 1, M_NECP, M_WAITOK); if (copied_string == NULL) { - return (NULL); + return NULL; } memcpy(copied_string, string, length); copied_string[length] = 0; - return (copied_string); + return copied_string; } static u_int32_t @@ -6192,7 +6193,7 @@ necp_get_primary_direct_interface_index(void) TAILQ_FOREACH(ordered_interface, &ifnet_ordered_head, if_ordered_link) { const u_int8_t functional_type = if_functional_type(ordered_interface, TRUE); if (functional_type != IFRTYPE_FUNCTIONAL_UNKNOWN && - functional_type != IFRTYPE_FUNCTIONAL_LOOPBACK) { + functional_type != IFRTYPE_FUNCTIONAL_LOOPBACK) { // All known, non-loopback functional types represent direct physical interfaces (Wi-Fi, Cellular, Wired) interface_index = ordered_interface->if_index; break; @@ -6232,7 +6233,7 @@ necp_get_parent_cred_result(proc_t proc, struct necp_socket_info *info) } } -#define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX) +#define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX) static void necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, proc_t proc, struct necp_socket_info *info) { @@ -6309,25 +6310,25 @@ extern char *proc_name_address(void *p); #define NECP_VERIFY_DELEGATION_ENTITLEMENT(_p, _d) \ if (!has_checked_delegation_entitlement) { \ - has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \ - has_checked_delegation_entitlement = TRUE; \ + has_delegation_entitlement = (priv_check_cred(proc_ucred(_p), PRIV_NET_PRIVILEGED_SOCKET_DELEGATE, 0) == 0); \ + has_checked_delegation_entitlement = TRUE; \ } \ if (!has_delegation_entitlement) { \ - NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \ - proc_name_address(_p), proc_pid(_p), _d); \ - break; \ + NECPLOG(LOG_ERR, "%s(%d) does not hold the necessary entitlement to delegate network traffic for other processes by %s", \ + proc_name_address(_p), proc_pid(_p), _d); \ + break; \ } int necp_application_find_policy_match_internal(proc_t proc, - u_int8_t *parameters, - u_int32_t parameters_size, - struct necp_aggregate_result *returned_result, - u_int32_t *flags, - u_int required_interface_index, - const union necp_sockaddr_union *override_local_addr, - const union necp_sockaddr_union *override_remote_addr, - struct rtentry **returned_route, bool ignore_address) + u_int8_t *parameters, + u_int32_t parameters_size, + struct necp_aggregate_result *returned_result, + u_int32_t *flags, + u_int required_interface_index, + const union necp_sockaddr_union *override_local_addr, + const union necp_sockaddr_union *override_remote_addr, + struct rtentry **returned_route, bool ignore_address) { int error = 0; size_t offset = 0; @@ -6388,7 +6389,7 @@ necp_application_find_policy_match_internal(proc_t proc, bool has_delegation_entitlement = FALSE; if (returned_result == NULL) { - return (EINVAL); + return EINVAL; } memset(returned_result, 0, sizeof(struct necp_aggregate_result)); @@ -6398,7 +6399,7 @@ necp_application_find_policy_match_internal(proc_t proc, if (necp_drop_all_order > 0) { returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_DROP; lck_rw_done(&necp_kernel_policy_lock); - return (0); + return 0; } } lck_rw_done(&necp_kernel_policy_lock); @@ -6417,138 +6418,138 @@ necp_application_find_policy_match_internal(proc_t proc, u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL); if (value != NULL) { switch (type) { - case NECP_CLIENT_PARAMETER_APPLICATION: { - if (length >= sizeof(uuid_t)) { - if (uuid_compare(application_uuid, value) == 0) { - // No delegation - break; - } + case NECP_CLIENT_PARAMETER_APPLICATION: { + if (length >= sizeof(uuid_t)) { + if (uuid_compare(application_uuid, value) == 0) { + // No delegation + break; + } - NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "euuid"); + NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "euuid"); - uuid_copy(application_uuid, value); - } - break; + uuid_copy(application_uuid, value); } - case NECP_CLIENT_PARAMETER_REAL_APPLICATION: { - if (length >= sizeof(uuid_t)) { - if (uuid_compare(real_application_uuid, value) == 0) { - // No delegation - break; - } + break; + } + case NECP_CLIENT_PARAMETER_REAL_APPLICATION: { + if (length >= sizeof(uuid_t)) { + if (uuid_compare(real_application_uuid, value) == 0) { + // No delegation + break; + } - NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uuid"); + NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uuid"); - uuid_copy(real_application_uuid, value); - } - break; + uuid_copy(real_application_uuid, value); } - case NECP_CLIENT_PARAMETER_PID: { - if (length >= sizeof(pid_t)) { - if (memcmp(&pid, value, sizeof(pid_t)) == 0) { - // No delegation - break; - } + break; + } + case NECP_CLIENT_PARAMETER_PID: { + if (length >= sizeof(pid_t)) { + if (memcmp(&pid, value, sizeof(pid_t)) == 0) { + // No delegation + break; + } - NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "pid"); + NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "pid"); - memcpy(&pid, value, sizeof(pid_t)); - } - break; + memcpy(&pid, value, sizeof(pid_t)); } - case NECP_CLIENT_PARAMETER_UID: { - if (length >= sizeof(uid_t)) { - if (memcmp(&uid, value, sizeof(uid_t)) == 0) { - // No delegation - break; - } + break; + } + case NECP_CLIENT_PARAMETER_UID: { + if (length >= sizeof(uid_t)) { + if (memcmp(&uid, value, sizeof(uid_t)) == 0) { + // No delegation + break; + } - NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uid"); + NECP_VERIFY_DELEGATION_ENTITLEMENT(proc, "uid"); - memcpy(&uid, value, sizeof(uid_t)); - } - break; - } - case NECP_CLIENT_PARAMETER_DOMAIN: { - domain = (char *)value; - domain[length - 1] = 0; - break; + memcpy(&uid, value, sizeof(uid_t)); } - case NECP_CLIENT_PARAMETER_ACCOUNT: { - account = (char *)value; - account[length - 1] = 0; - break; + break; + } + case NECP_CLIENT_PARAMETER_DOMAIN: { + domain = (char *)value; + domain[length - 1] = 0; + break; + } + case NECP_CLIENT_PARAMETER_ACCOUNT: { + account = (char *)value; + account[length - 1] = 0; + break; + } + case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: { + if (length >= sizeof(u_int32_t)) { + memcpy(&traffic_class, value, sizeof(u_int32_t)); } - case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: { - if (length >= sizeof(u_int32_t)) { - memcpy(&traffic_class, value, sizeof(u_int32_t)); - } - break; + break; + } + case NECP_CLIENT_PARAMETER_IP_PROTOCOL: { + if (length >= sizeof(u_int16_t)) { + memcpy(&protocol, value, sizeof(u_int16_t)); } - case NECP_CLIENT_PARAMETER_IP_PROTOCOL: { - if (length >= sizeof(u_int16_t)) { - memcpy(&protocol, value, sizeof(u_int16_t)); + break; + } + case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: { + if (length <= IFXNAMSIZ && length > 0) { + ifnet_t bound_interface = NULL; + char interface_name[IFXNAMSIZ]; + memcpy(interface_name, value, length); + interface_name[length - 1] = 0; // Make sure the string is NULL terminated + if (ifnet_find_by_name(interface_name, &bound_interface) == 0) { + bound_interface_index = bound_interface->if_index; + ifnet_release(bound_interface); } - break; } - case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: { - if (length <= IFXNAMSIZ && length > 0) { - ifnet_t bound_interface = NULL; - char interface_name[IFXNAMSIZ]; - memcpy(interface_name, value, length); - interface_name[length - 1] = 0; // Make sure the string is NULL terminated - if (ifnet_find_by_name(interface_name, &bound_interface) == 0) { - bound_interface_index = bound_interface->if_index; - ifnet_release(bound_interface); - } - } + break; + } + case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: { + if (ignore_address) { break; } - case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: { - if (ignore_address) { - break; - } - if (length >= sizeof(struct necp_policy_condition_addr)) { - struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; - if (necp_address_is_valid(&address_struct->address.sa)) { - memcpy(&local_addr, &address_struct->address, sizeof(address_struct->address)); - } + if (length >= sizeof(struct necp_policy_condition_addr)) { + struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; + if (necp_address_is_valid(&address_struct->address.sa)) { + memcpy(&local_addr, &address_struct->address, sizeof(address_struct->address)); } + } + break; + } + case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: { + if (ignore_address) { break; } - case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: { - if (ignore_address) { - break; - } - if (length >= sizeof(struct necp_policy_condition_addr)) { - struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; - if (necp_address_is_valid(&address_struct->address.sa)) { - memcpy(&remote_addr, &address_struct->address, sizeof(address_struct->address)); - } + if (length >= sizeof(struct necp_policy_condition_addr)) { + struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; + if (necp_address_is_valid(&address_struct->address.sa)) { + memcpy(&remote_addr, &address_struct->address, sizeof(address_struct->address)); } - break; } - case NECP_CLIENT_PARAMETER_FLAGS: { - if (length >= sizeof(client_flags)) { - memcpy(&client_flags, value, sizeof(client_flags)); - } - break; + break; + } + case NECP_CLIENT_PARAMETER_FLAGS: { + if (length >= sizeof(client_flags)) { + memcpy(&client_flags, value, sizeof(client_flags)); } - case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: { - if (num_required_agent_types >= NECP_MAX_REQUIRED_AGENTS) { - break; - } - if (length >= sizeof(struct necp_client_parameter_netagent_type)) { - memcpy(&required_agent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); - num_required_agent_types++; - } + break; + } + case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: { + if (num_required_agent_types >= NECP_MAX_REQUIRED_AGENTS) { break; } - default: { - break; + if (length >= sizeof(struct necp_client_parameter_netagent_type)) { + memcpy(&required_agent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); + num_required_agent_types++; } + break; + } + default: { + break; + } } } } @@ -6628,14 +6629,14 @@ necp_application_find_policy_match_internal(proc_t proc, } if (local_addr.sa.sa_len == 0 || - (local_addr.sa.sa_family == AF_INET && local_addr.sin.sin_addr.s_addr == 0) || - (local_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&local_addr.sin6.sin6_addr))) { + (local_addr.sa.sa_family == AF_INET && local_addr.sin.sin_addr.s_addr == 0) || + (local_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&local_addr.sin6.sin6_addr))) { no_local_addr = TRUE; } if (remote_addr.sa.sa_len == 0 || - (remote_addr.sa.sa_family == AF_INET && remote_addr.sin.sin_addr.s_addr == 0) || - (remote_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&remote_addr.sin6.sin6_addr))) { + (remote_addr.sa.sa_family == AF_INET && remote_addr.sin.sin_addr.s_addr == 0) || + (remote_addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&remote_addr.sin6.sin6_addr))) { no_remote_addr = TRUE; remote_family = remote_addr.sa.sa_family; } @@ -6661,7 +6662,7 @@ necp_application_find_policy_match_internal(proc_t proc, } rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0, - output_bound_interface); + output_bound_interface); if (remote_addr.sa.sa_family == AF_INET && rt != NULL && IS_INTF_CLAT46(rt->rt_ifp)) { @@ -6671,7 +6672,7 @@ necp_application_find_policy_match_internal(proc_t proc, } if (no_remote_addr && remote_family == 0 && - (rt == NULL || rt->rt_ifp == NULL)) { + (rt == NULL || rt->rt_ifp == NULL)) { // Route lookup for default IPv4 failed, try IPv6 // Cleanup old route if necessary @@ -6687,11 +6688,11 @@ necp_application_find_policy_match_internal(proc_t proc, // Get route rt = rtalloc1_scoped((struct sockaddr *)&remote_addr, 0, 0, - output_bound_interface); + output_bound_interface); } if (rt != NULL && - rt->rt_ifp != NULL) { + rt->rt_ifp != NULL) { returned_result->routed_interface_index = rt->rt_ifp->if_index; /* * For local addresses, we allow the interface scope to be @@ -6699,10 +6700,10 @@ necp_application_find_policy_match_internal(proc_t proc, * local address. */ if (bound_interface_index != IFSCOPE_NONE && - rt->rt_ifa != NULL && rt->rt_ifa->ifa_ifp && - (output_bound_interface == lo_ifp->if_index || - rt->rt_ifp->if_index == lo_ifp->if_index || - rt->rt_ifa->ifa_ifp->if_index == bound_interface_index)) { + rt->rt_ifa != NULL && rt->rt_ifa->ifa_ifp && + (output_bound_interface == lo_ifp->if_index || + rt->rt_ifp->if_index == lo_ifp->if_index || + rt->rt_ifa->ifa_ifp->if_index == bound_interface_index)) { struct sockaddr_storage dst; unsigned int ifscope = bound_interface_index; @@ -6710,20 +6711,20 @@ necp_application_find_policy_match_internal(proc_t proc, * Transform dst into the internal routing table form */ (void) sa_copy((struct sockaddr *)&remote_addr, - &dst, &ifscope); + &dst, &ifscope); if ((rt->rt_ifp->if_index == lo_ifp->if_index) || - rt_ifa_is_dst((struct sockaddr *)&dst, rt->rt_ifa)) + rt_ifa_is_dst((struct sockaddr *)&dst, rt->rt_ifa)) { returned_result->routed_interface_index = - bound_interface_index; + bound_interface_index; + } } } } if (returned_result->routed_interface_index != 0 && - returned_result->routed_interface_index != lo_ifp->if_index && // Loopback can accept any local address - !no_local_addr) { - + returned_result->routed_interface_index != lo_ifp->if_index && // Loopback can accept any local address + !no_local_addr) { // Transform local_addr into the ifaddr form // IPv6 Scope IDs are always embedded in the ifaddr list struct sockaddr_storage local_address_sanitized; @@ -6756,7 +6757,7 @@ necp_application_find_policy_match_internal(proc_t proc, if (rt != NULL && (rt->rt_flags & RTF_LOCAL)) { is_local = TRUE; } else if (returned_result->routed_interface_index != 0 && - !no_remote_addr) { + !no_remote_addr) { // Clean up the address before comparison with interface addresses // Transform remote_addr into the ifaddr form @@ -6774,7 +6775,7 @@ necp_application_find_policy_match_internal(proc_t proc, if (ifa != NULL && ifa->ifa_ifp != NULL) { u_int if_index_for_remote_addr = ifa->ifa_ifp->if_index; if (if_index_for_remote_addr == returned_result->routed_interface_index || - if_index_for_remote_addr == lo_ifp->if_index) { + if_index_for_remote_addr == lo_ifp->if_index) { is_local = TRUE; } } @@ -6788,15 +6789,15 @@ necp_application_find_policy_match_internal(proc_t proc, *flags |= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL | NECP_CLIENT_RESULT_FLAG_IS_DIRECT); } else { if (rt != NULL && - !(rt->rt_flags & RTF_GATEWAY) && - (rt->rt_ifa && rt->rt_ifa->ifa_ifp && !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) { + !(rt->rt_flags & RTF_GATEWAY) && + (rt->rt_ifa && rt->rt_ifa->ifa_ifp && !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) { // Route is directly accessible *flags |= NECP_CLIENT_RESULT_FLAG_IS_DIRECT; } } if (rt != NULL && - rt->rt_ifp != NULL) { + rt->rt_ifp != NULL) { // Check probe status if (rt->rt_ifp->if_eflags & IFEF_PROBE_CONNECTIVITY) { *flags |= NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY; @@ -6827,8 +6828,8 @@ necp_application_find_policy_match_internal(proc_t proc, // Check link quality if ((client_flags & NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY) && - (rt->rt_ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && - rt->rt_ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) { + (rt->rt_ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && + rt->rt_ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) { *flags |= NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT; } @@ -6907,7 +6908,7 @@ necp_application_find_policy_match_internal(proc_t proc, // Unlock lck_rw_done(&necp_kernel_policy_lock); - return (error); + return error; } static bool @@ -6919,36 +6920,36 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) { if (bound_interface_index == cond_bound_interface_index) { // No match, matches forbidden interface - return (FALSE); + return FALSE; } } else { if (bound_interface_index != cond_bound_interface_index) { // No match, does not match required interface - return (FALSE); + return FALSE; } } } else { if (bound_interface_index != 0) { // No match, requires a non-bound packet - return (FALSE); + return FALSE; } } } if (kernel_policy->condition_mask == 0) { - return (TRUE); + return TRUE; } if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_APP_ID) { if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_APP_ID) { if (app_id == kernel_policy->cond_app_id) { // No match, matches forbidden application - return (FALSE); + return FALSE; } } else { if (app_id != kernel_policy->cond_app_id) { // No match, does not match required application - return (FALSE); + return FALSE; } } } @@ -6957,12 +6958,12 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REAL_APP_ID) { if (real_app_id == kernel_policy->cond_real_app_id) { // No match, matches forbidden application - return (FALSE); + return FALSE; } } else { if (real_app_id != kernel_policy->cond_real_app_id) { // No match, does not match required application - return (FALSE); + return FALSE; } } } @@ -6970,26 +6971,26 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) { if (cred_result != 0) { // Process is missing entitlement - return (FALSE); + return FALSE; } } if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT) { if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_false) { // Process is missing entitlement based on previous check - return (FALSE); + return FALSE; } else if (kernel_policy->cond_custom_entitlement_matched == necp_boolean_state_unknown) { if (kernel_policy->cond_custom_entitlement != NULL) { if (proc == NULL) { // No process found, cannot check entitlement - return (FALSE); + return FALSE; } task_t task = proc_task(proc); if (task == NULL || - !IOTaskHasEntitlement(task, kernel_policy->cond_custom_entitlement)) { + !IOTaskHasEntitlement(task, kernel_policy->cond_custom_entitlement)) { // Process is missing custom entitlement kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_false; - return (FALSE); + return FALSE; } else { kernel_policy->cond_custom_entitlement_matched = necp_boolean_state_true; } @@ -7002,12 +7003,12 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_DOMAIN) { if (domain_matches) { // No match, matches forbidden domain - return (FALSE); + return FALSE; } } else { if (!domain_matches) { // No match, does not match required domain - return (FALSE); + return FALSE; } } } @@ -7016,12 +7017,12 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_ACCOUNT_ID) { if (account_id == kernel_policy->cond_account_id) { // No match, matches forbidden account - return (FALSE); + return FALSE; } } else { if (account_id != kernel_policy->cond_account_id) { // No match, does not match required account - return (FALSE); + return FALSE; } } } @@ -7030,12 +7031,12 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PID) { if (pid == kernel_policy->cond_pid) { // No match, matches forbidden pid - return (FALSE); + return FALSE; } } else { if (pid != kernel_policy->cond_pid) { // No match, does not match required pid - return (FALSE); + return FALSE; } } } @@ -7044,12 +7045,12 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_UID) { if (uid == kernel_policy->cond_uid) { // No match, matches forbidden uid - return (FALSE); + return FALSE; } } else { if (uid != kernel_policy->cond_uid) { // No match, does not match required uid - return (FALSE); + return FALSE; } } } @@ -7057,15 +7058,15 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) { if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_TRAFFIC_CLASS) { if (traffic_class >= kernel_policy->cond_traffic_class.start_tc && - traffic_class <= kernel_policy->cond_traffic_class.end_tc) { + traffic_class <= kernel_policy->cond_traffic_class.end_tc) { // No match, matches forbidden traffic class - return (FALSE); + return FALSE; } } else { if (traffic_class < kernel_policy->cond_traffic_class.start_tc || - traffic_class > kernel_policy->cond_traffic_class.end_tc) { + traffic_class > kernel_policy->cond_traffic_class.end_tc) { // No match, does not match required traffic class - return (FALSE); + return FALSE; } } } @@ -7074,12 +7075,12 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) { if (protocol == kernel_policy->cond_protocol) { // No match, matches forbidden protocol - return (FALSE); + return FALSE; } } else { if (protocol != kernel_policy->cond_protocol) { // No match, does not match required protocol - return (FALSE); + return FALSE; } } } @@ -7089,16 +7090,16 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a for (u_int32_t i = 0; i < num_required_agent_types; i++) { struct necp_client_parameter_netagent_type *required_agent_type = &required_agent_types[i]; if ((strlen(kernel_policy->cond_agent_type.agent_domain) == 0 || - strncmp(required_agent_type->netagent_domain, kernel_policy->cond_agent_type.agent_domain, NETAGENT_DOMAINSIZE) == 0) && - (strlen(kernel_policy->cond_agent_type.agent_type) == 0 || - strncmp(required_agent_type->netagent_type, kernel_policy->cond_agent_type.agent_type, NETAGENT_TYPESIZE) == 0)) { - // Found a required agent that matches - matches_agent_type = TRUE; - break; - } + strncmp(required_agent_type->netagent_domain, kernel_policy->cond_agent_type.agent_domain, NETAGENT_DOMAINSIZE) == 0) && + (strlen(kernel_policy->cond_agent_type.agent_type) == 0 || + strncmp(required_agent_type->netagent_type, kernel_policy->cond_agent_type.agent_type, NETAGENT_TYPESIZE) == 0)) { + // Found a required agent that matches + matches_agent_type = TRUE; + break; + } } if (!matches_agent_type) { - return (FALSE); + return FALSE; } } @@ -7107,22 +7108,22 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) { if (inRange) { - return (FALSE); + return FALSE; } } else { if (!inRange) { - return (FALSE); + return FALSE; } } } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) { bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) { if (inSubnet) { - return (FALSE); + return FALSE; } } else { if (!inSubnet) { - return (FALSE); + return FALSE; } } } @@ -7133,34 +7134,34 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) { if (inRange) { - return (FALSE); + return FALSE; } } else { if (!inRange) { - return (FALSE); + return FALSE; } } } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { if (inSubnet) { - return (FALSE); + return FALSE; } } else { if (!inSubnet) { - return (FALSE); + return FALSE; } } } } - return (TRUE); + return TRUE; } static inline u_int32_t necp_socket_calc_flowhash_locked(struct necp_socket_info *info) { - return (net_flowhash(info, sizeof(*info), necp_kernel_socket_policies_gencount)); + return net_flowhash(info, sizeof(*info), necp_kernel_socket_policies_gencount); } static void @@ -7286,11 +7287,11 @@ necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_loc static inline struct necp_kernel_socket_policy * necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy **policy_search_array, struct necp_socket_info *info, - necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, - necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, - u_int32_t *return_netagent_array, u_int32_t *return_netagent_use_flags_array, size_t netagent_array_count, - struct necp_client_parameter_netagent_type *required_agent_types, - u_int32_t num_required_agent_types, proc_t proc, necp_kernel_policy_id *skip_policy_id) + necp_kernel_policy_filter *return_filter, u_int32_t *return_route_rule_id, + necp_kernel_policy_result *return_service_action, necp_kernel_policy_service *return_service, + u_int32_t *return_netagent_array, u_int32_t *return_netagent_use_flags_array, size_t netagent_array_count, + struct necp_client_parameter_netagent_type *required_agent_types, + u_int32_t num_required_agent_types, proc_t proc, necp_kernel_policy_id *skip_policy_id) { struct necp_kernel_socket_policy *matched_policy = NULL; u_int32_t skip_order = 0; @@ -7378,20 +7379,20 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy } continue; } else if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT || - policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED) { + policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED) { if (return_netagent_array != NULL && - netagent_cursor < netagent_array_count) { + netagent_cursor < netagent_array_count) { return_netagent_array[netagent_cursor] = policy_search_array[i]->result_parameter.netagent_id; if (return_netagent_use_flags_array != NULL && - policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED) { + policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED) { return_netagent_use_flags_array[netagent_cursor] |= NECP_AGENT_USE_FLAG_SCOPE; } netagent_cursor++; if (necp_debug > 1) { NECPLOG(LOG_DEBUG, "Socket Policy: (Application %d Real Application %d BoundInterface %d Proto %d) %s Netagent %d", - info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, - policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT ? "Use" : "Scope", - policy_search_array[i]->result_parameter.netagent_id); + info->application_id, info->real_application_id, info->bound_interface_index, info->protocol, + policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_USE_NETAGENT ? "Use" : "Scope", + policy_search_array[i]->result_parameter.netagent_id); } } continue; @@ -7419,7 +7420,7 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy } else if (route_rule_id_count > 1) { *return_route_rule_id = necp_create_aggregate_route_rule(route_rule_id_array); } - return (matched_policy); + return matched_policy; } static bool @@ -7434,7 +7435,7 @@ necp_socket_uses_interface(struct inpcb *inp, u_int32_t interface_index) ifnet_t interface = ifindex2ifnet[interface_index]; if (inp == NULL || interface == NULL) { - return (FALSE); + return FALSE; } if (inp->inp_vflag & INP_IPV4) { @@ -7446,7 +7447,7 @@ necp_socket_uses_interface(struct inpcb *inp, u_int32_t interface_index) result = ifnet_get_address_list_family(interface, &addresses, family); if (result != 0) { NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface)); - return (FALSE); + return FALSE; } for (i = 0; addresses[i] != NULL; i++) { @@ -7468,26 +7469,25 @@ necp_socket_uses_interface(struct inpcb *inp, u_int32_t interface_index) done: ifnet_free_address_list(addresses); addresses = NULL; - return (found_match); + return found_match; } static inline bool necp_socket_is_connected(struct inpcb *inp) { - return (inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING)); + return inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); } static inline bool necp_socket_bypass(struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, struct inpcb *inp) { - if (necp_pass_loopback > 0 && necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL)) { - return (true); + return true; } else if (necp_is_intcoproc(inp, NULL)) { - return (true); + return true; } - return (false); + return false; } necp_kernel_policy_id @@ -7508,16 +7508,16 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local struct necp_socket_info info; if (inp == NULL) { - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } // Ignore invalid addresses if (override_local_addr != NULL && - !necp_address_is_valid(override_local_addr)) { + !necp_address_is_valid(override_local_addr)) { override_local_addr = NULL; } if (override_remote_addr != NULL && - !necp_address_is_valid(override_remote_addr)) { + !necp_address_is_valid(override_remote_addr)) { override_remote_addr = NULL; } @@ -7525,7 +7525,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Don't lock. Possible race condition, but we don't want the performance hit. if (necp_kernel_socket_policies_count == 0 || - (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) { + (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) { if (necp_drop_all_order > 0) { inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; inp->inp_policyresult.skip_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; @@ -7540,7 +7540,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP; } } - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } // Check for loopback exception @@ -7554,7 +7554,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.results.filter_control_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS; - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } // Lock @@ -7566,14 +7566,14 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Check info u_int32_t flowhash = necp_socket_calc_flowhash_locked(&info); if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE && - inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount && - inp->inp_policyresult.flowhash == flowhash) { + inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount && + inp->inp_policyresult.flowhash == flowhash) { // If already matched this socket on this generation of table, skip // Unlock lck_rw_done(&necp_kernel_policy_lock); - return (inp->inp_policyresult.policy_id); + return inp->inp_policyresult.policy_id; } // Match socket to policy @@ -7582,9 +7582,9 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // If the socket matched a scoped service policy, mark as Drop if not registered. // This covers the cases in which a service is required (on demand) but hasn't started yet. if ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED || - service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) && - service.identifier != 0 && - service.identifier != NECP_NULL_SERVICE_ID) { + service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) && + service.identifier != 0 && + service.identifier != NECP_NULL_SERVICE_ID) { bool service_is_registered = FALSE; struct necp_service_registration *service_registration = NULL; LIST_FOREACH(service_registration, &necp_registered_service_list, kernel_chain) { @@ -7609,7 +7609,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Unlock lck_rw_done(&necp_kernel_policy_lock); - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } } // Verify netagents @@ -7650,7 +7650,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Unlock lck_rw_done(&necp_kernel_policy_lock); - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } } } @@ -7667,15 +7667,15 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local memcpy(&inp->inp_policyresult.results.result_parameter, &matched_policy->result_parameter, sizeof(matched_policy->result_parameter)); if (necp_socket_is_connected(inp) && - (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP || - (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && !necp_socket_uses_interface(inp, matched_policy->result_parameter.tunnel_interface_index)))) { + (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP || + (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && !necp_socket_uses_interface(inp, matched_policy->result_parameter.tunnel_interface_index)))) { if (necp_debug) { NECPLOG(LOG_DEBUG, "Marking socket in state %d as defunct", so->so_state); } sosetdefunct(current_proc(), so, SHUTDOWN_SOCKET_LEVEL_NECP | SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL, TRUE); } else if (necp_socket_is_connected(inp) && - matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && - info.protocol == IPPROTO_TCP) { + matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && + info.protocol == IPPROTO_TCP) { // Reset MSS on TCP socket if tunnel policy changes tcp_mtudisc(inp, 0); } @@ -7706,7 +7706,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Unlock lck_rw_done(&necp_kernel_policy_lock); - return (matched_policy_id); + return matched_policy_id; } static bool @@ -7718,38 +7718,38 @@ necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) { if (bound_interface_index == cond_bound_interface_index) { // No match, matches forbidden interface - return (FALSE); + return FALSE; } } else { if (bound_interface_index != cond_bound_interface_index) { // No match, does not match required interface - return (FALSE); + return FALSE; } } } else { if (bound_interface_index != 0) { // No match, requires a non-bound packet - return (FALSE); + return FALSE; } } } if (kernel_policy->condition_mask == 0) { - return (TRUE); + return TRUE; } if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_POLICY_ID) { necp_kernel_policy_id matched_policy_id = - kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP ? socket_skip_policy_id : socket_policy_id; + kernel_policy->result == NECP_KERNEL_POLICY_RESULT_SKIP ? socket_skip_policy_id : socket_policy_id; if (matched_policy_id != kernel_policy->cond_policy_id) { // No match, does not match required id - return (FALSE); + return FALSE; } } if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LAST_INTERFACE) { if (last_interface_index != kernel_policy->cond_last_interface_index) { - return (FALSE); + return FALSE; } } @@ -7757,12 +7757,12 @@ necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_PROTOCOL) { if (protocol == kernel_policy->cond_protocol) { // No match, matches forbidden protocol - return (FALSE); + return FALSE; } } else { if (protocol != kernel_policy->cond_protocol) { // No match, does not match required protocol - return (FALSE); + return FALSE; } } } @@ -7772,22 +7772,22 @@ necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, bool inRange = necp_is_addr_in_range((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, (struct sockaddr *)&kernel_policy->cond_local_end); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_END) { if (inRange) { - return (FALSE); + return FALSE; } } else { if (!inRange) { - return (FALSE); + return FALSE; } } } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) { bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)local, (struct sockaddr *)&kernel_policy->cond_local_start, kernel_policy->cond_local_prefix); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_LOCAL_PREFIX) { if (inSubnet) { - return (FALSE); + return FALSE; } } else { if (!inSubnet) { - return (FALSE); + return FALSE; } } } @@ -7798,28 +7798,28 @@ necp_ip_output_check_policy(struct necp_kernel_ip_output_policy *kernel_policy, bool inRange = necp_is_addr_in_range((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, (struct sockaddr *)&kernel_policy->cond_remote_end); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_END) { if (inRange) { - return (FALSE); + return FALSE; } } else { if (!inRange) { - return (FALSE); + return FALSE; } } } else if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { bool inSubnet = necp_is_addr_in_subnet((struct sockaddr *)remote, (struct sockaddr *)&kernel_policy->cond_remote_start, kernel_policy->cond_remote_prefix); if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_REMOTE_PREFIX) { if (inSubnet) { - return (FALSE); + return FALSE; } } else { if (!inSubnet) { - return (FALSE); + return FALSE; } } } } - return (TRUE); + return TRUE; } static inline struct necp_kernel_ip_output_policy * @@ -7869,22 +7869,22 @@ necp_ip_output_find_policy_match_locked(necp_kernel_policy_id socket_policy_id, } } - return (matched_policy); + return matched_policy; } static inline bool necp_output_bypass(struct mbuf *packet) { if (necp_pass_loopback > 0 && necp_is_loopback(NULL, NULL, NULL, packet)) { - return (true); + return true; } if (necp_pass_keepalives > 0 && necp_get_is_keepalive_from_packet(packet)) { - return (true); + return true; } if (necp_is_intcoproc(NULL, packet)) { - return (true); + return true; } - return (false); + return false; } necp_kernel_policy_id @@ -7911,7 +7911,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a } if (packet == NULL) { - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } socket_policy_id = necp_get_policy_id_from_packet(packet); @@ -7920,7 +7920,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a // Exit early for an empty list // Don't lock. Possible race condition, but we don't want the performance hit. if (necp_kernel_ip_output_policies_count == 0 || - ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) { + ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) { if (necp_drop_all_order > 0) { matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; if (result) { @@ -7932,7 +7932,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a } } - return (matched_policy_id); + return matched_policy_id; } // Check for loopback exception @@ -7941,7 +7941,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a if (result) { *result = NECP_KERNEL_POLICY_RESULT_PASS; } - return (matched_policy_id); + return matched_policy_id; } last_interface_index = necp_get_last_interface_index_from_packet(packet); @@ -7957,8 +7957,8 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a protocol = ip->ip_p; if ((flags & IP_OUTARGS) && (ipoa != NULL) && - (ipoa->ipoa_flags & IPOAF_BOUND_IF) && - ipoa->ipoa_boundif != IFSCOPE_NONE) { + (ipoa->ipoa_flags & IPOAF_BOUND_IF) && + ipoa->ipoa_boundif != IFSCOPE_NONE) { bound_interface_index = ipoa->ipoa_boundif; } @@ -7971,29 +7971,29 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a memcpy(&((struct sockaddr_in *)&remote_addr)->sin_addr, &ip->ip_dst, sizeof(ip->ip_dst)); switch (protocol) { - case IPPROTO_TCP: { - struct tcphdr th; - if ((int)(hlen + sizeof(th)) <= packet->m_pkthdr.len) { - m_copydata(packet, hlen, sizeof(th), (u_int8_t *)&th); - ((struct sockaddr_in *)&local_addr)->sin_port = th.th_sport; - ((struct sockaddr_in *)&remote_addr)->sin_port = th.th_dport; - } - break; - } - case IPPROTO_UDP: { - struct udphdr uh; - if ((int)(hlen + sizeof(uh)) <= packet->m_pkthdr.len) { - m_copydata(packet, hlen, sizeof(uh), (u_int8_t *)&uh); - ((struct sockaddr_in *)&local_addr)->sin_port = uh.uh_sport; - ((struct sockaddr_in *)&remote_addr)->sin_port = uh.uh_dport; - } - break; + case IPPROTO_TCP: { + struct tcphdr th; + if ((int)(hlen + sizeof(th)) <= packet->m_pkthdr.len) { + m_copydata(packet, hlen, sizeof(th), (u_int8_t *)&th); + ((struct sockaddr_in *)&local_addr)->sin_port = th.th_sport; + ((struct sockaddr_in *)&remote_addr)->sin_port = th.th_dport; } - default: { - ((struct sockaddr_in *)&local_addr)->sin_port = 0; - ((struct sockaddr_in *)&remote_addr)->sin_port = 0; - break; + break; + } + case IPPROTO_UDP: { + struct udphdr uh; + if ((int)(hlen + sizeof(uh)) <= packet->m_pkthdr.len) { + m_copydata(packet, hlen, sizeof(uh), (u_int8_t *)&uh); + ((struct sockaddr_in *)&local_addr)->sin_port = uh.uh_sport; + ((struct sockaddr_in *)&remote_addr)->sin_port = uh.uh_dport; } + break; + } + default: { + ((struct sockaddr_in *)&local_addr)->sin_port = 0; + ((struct sockaddr_in *)&remote_addr)->sin_port = 0; + break; + } } // Match packet to policy @@ -8021,7 +8021,7 @@ necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_a lck_rw_done(&necp_kernel_policy_lock); - return (matched_policy_id); + return matched_policy_id; } necp_kernel_policy_id @@ -8049,7 +8049,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out } if (packet == NULL) { - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } socket_policy_id = necp_get_policy_id_from_packet(packet); @@ -8058,7 +8058,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out // Exit early for an empty list // Don't lock. Possible race condition, but we don't want the performance hit. if (necp_kernel_ip_output_policies_count == 0 || - ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) { + ((socket_policy_id == NECP_KERNEL_POLICY_ID_NONE) && necp_kernel_ip_output_policies_non_id_count == 0)) { if (necp_drop_all_order > 0) { matched_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; if (result) { @@ -8070,7 +8070,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out } } - return (matched_policy_id); + return matched_policy_id; } // Check for loopback exception @@ -8079,7 +8079,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out if (result) { *result = NECP_KERNEL_POLICY_RESULT_PASS; } - return (matched_policy_id); + return matched_policy_id; } last_interface_index = necp_get_last_interface_index_from_packet(packet); @@ -8088,8 +8088,8 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out ip6 = mtod(packet, struct ip6_hdr *); if ((flags & IPV6_OUTARGS) && (ip6oa != NULL) && - (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) && - ip6oa->ip6oa_boundif != IFSCOPE_NONE) { + (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) && + ip6oa->ip6oa_boundif != IFSCOPE_NONE) { bound_interface_index = ip6oa->ip6oa_boundif; } @@ -8105,29 +8105,29 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out if (offset >= 0 && packet->m_pkthdr.len >= offset) { protocol = next; switch (protocol) { - case IPPROTO_TCP: { - struct tcphdr th; - if ((int)(offset + sizeof(th)) <= packet->m_pkthdr.len) { - m_copydata(packet, offset, sizeof(th), (u_int8_t *)&th); - ((struct sockaddr_in6 *)&local_addr)->sin6_port = th.th_sport; - ((struct sockaddr_in6 *)&remote_addr)->sin6_port = th.th_dport; - } - break; - } - case IPPROTO_UDP: { - struct udphdr uh; - if ((int)(offset + sizeof(uh)) <= packet->m_pkthdr.len) { - m_copydata(packet, offset, sizeof(uh), (u_int8_t *)&uh); - ((struct sockaddr_in6 *)&local_addr)->sin6_port = uh.uh_sport; - ((struct sockaddr_in6 *)&remote_addr)->sin6_port = uh.uh_dport; - } - break; + case IPPROTO_TCP: { + struct tcphdr th; + if ((int)(offset + sizeof(th)) <= packet->m_pkthdr.len) { + m_copydata(packet, offset, sizeof(th), (u_int8_t *)&th); + ((struct sockaddr_in6 *)&local_addr)->sin6_port = th.th_sport; + ((struct sockaddr_in6 *)&remote_addr)->sin6_port = th.th_dport; } - default: { - ((struct sockaddr_in6 *)&local_addr)->sin6_port = 0; - ((struct sockaddr_in6 *)&remote_addr)->sin6_port = 0; - break; + break; + } + case IPPROTO_UDP: { + struct udphdr uh; + if ((int)(offset + sizeof(uh)) <= packet->m_pkthdr.len) { + m_copydata(packet, offset, sizeof(uh), (u_int8_t *)&uh); + ((struct sockaddr_in6 *)&local_addr)->sin6_port = uh.uh_sport; + ((struct sockaddr_in6 *)&remote_addr)->sin6_port = uh.uh_dport; } + break; + } + default: { + ((struct sockaddr_in6 *)&local_addr)->sin6_port = 0; + ((struct sockaddr_in6 *)&remote_addr)->sin6_port = 0; + break; + } } } @@ -8156,7 +8156,7 @@ necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out lck_rw_done(&necp_kernel_policy_lock); - return (matched_policy_id); + return matched_policy_id; } // Utilities @@ -8166,22 +8166,22 @@ necp_is_addr_in_range(struct sockaddr *addr, struct sockaddr *range_start, struc int cmp = 0; if (addr == NULL || range_start == NULL || range_end == NULL) { - return (FALSE); + return FALSE; } /* Must be greater than or equal to start */ cmp = necp_addr_compare(addr, range_start, 1); if (cmp != 0 && cmp != 1) { - return (FALSE); + return FALSE; } /* Must be less than or equal to end */ cmp = necp_addr_compare(addr, range_end, 1); if (cmp != 0 && cmp != -1) { - return (FALSE); + return FALSE; } - return (TRUE); + return TRUE; } static bool @@ -8190,61 +8190,61 @@ necp_is_range_in_range(struct sockaddr *inner_range_start, struct sockaddr *inne int cmp = 0; if (inner_range_start == NULL || inner_range_end == NULL || range_start == NULL || range_end == NULL) { - return (FALSE); + return FALSE; } /* Must be greater than or equal to start */ cmp = necp_addr_compare(inner_range_start, range_start, 1); if (cmp != 0 && cmp != 1) { - return (FALSE); + return FALSE; } /* Must be less than or equal to end */ cmp = necp_addr_compare(inner_range_end, range_end, 1); if (cmp != 0 && cmp != -1) { - return (FALSE); + return FALSE; } - return (TRUE); + return TRUE; } static bool necp_is_addr_in_subnet(struct sockaddr *addr, struct sockaddr *subnet_addr, u_int8_t subnet_prefix) { if (addr == NULL || subnet_addr == NULL) { - return (FALSE); + return FALSE; } if (addr->sa_family != subnet_addr->sa_family || addr->sa_len != subnet_addr->sa_len) { - return (FALSE); + return FALSE; } switch (addr->sa_family) { - case AF_INET: { - if (satosin(subnet_addr)->sin_port != 0 && - satosin(addr)->sin_port != satosin(subnet_addr)->sin_port) { - return (FALSE); - } - return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin(addr)->sin_addr, (u_int8_t *)&satosin(subnet_addr)->sin_addr, subnet_prefix)); + case AF_INET: { + if (satosin(subnet_addr)->sin_port != 0 && + satosin(addr)->sin_port != satosin(subnet_addr)->sin_port) { + return FALSE; } - case AF_INET6: { - if (satosin6(subnet_addr)->sin6_port != 0 && - satosin6(addr)->sin6_port != satosin6(subnet_addr)->sin6_port) { - return (FALSE); - } - if (satosin6(addr)->sin6_scope_id && - satosin6(subnet_addr)->sin6_scope_id && - satosin6(addr)->sin6_scope_id != satosin6(subnet_addr)->sin6_scope_id) { - return (FALSE); - } - return (necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin6(addr)->sin6_addr, (u_int8_t *)&satosin6(subnet_addr)->sin6_addr, subnet_prefix)); + return necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin(addr)->sin_addr, (u_int8_t *)&satosin(subnet_addr)->sin_addr, subnet_prefix); + } + case AF_INET6: { + if (satosin6(subnet_addr)->sin6_port != 0 && + satosin6(addr)->sin6_port != satosin6(subnet_addr)->sin6_port) { + return FALSE; } - default: { - return (FALSE); + if (satosin6(addr)->sin6_scope_id && + satosin6(subnet_addr)->sin6_scope_id && + satosin6(addr)->sin6_scope_id != satosin6(subnet_addr)->sin6_scope_id) { + return FALSE; } + return necp_buffer_compare_with_bit_prefix((u_int8_t *)&satosin6(addr)->sin6_addr, (u_int8_t *)&satosin6(subnet_addr)->sin6_addr, subnet_prefix); + } + default: { + return FALSE; + } } - return (FALSE); + return FALSE; } /* @@ -8261,68 +8261,68 @@ necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port) int port_result = 0; if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) { - return (2); + return 2; } if (sa1->sa_len == 0) { - return (0); + return 0; } switch (sa1->sa_family) { - case AF_INET: { - if (sa1->sa_len != sizeof(struct sockaddr_in)) { - return (2); - } - - result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr)); + case AF_INET: { + if (sa1->sa_len != sizeof(struct sockaddr_in)) { + return 2; + } - if (check_port) { - if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) { - port_result = -1; - } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) { - port_result = 1; - } + result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr)); - if (result == 0) { - result = port_result; - } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) { - return (2); - } + if (check_port) { + if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) { + port_result = -1; + } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) { + port_result = 1; } - break; - } - case AF_INET6: { - if (sa1->sa_len != sizeof(struct sockaddr_in6)) { - return (2); + if (result == 0) { + result = port_result; + } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) { + return 2; } + } - if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) { - return (2); - } + break; + } + case AF_INET6: { + if (sa1->sa_len != sizeof(struct sockaddr_in6)) { + return 2; + } - result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr)); + if (satosin6(sa1)->sin6_scope_id != satosin6(sa2)->sin6_scope_id) { + return 2; + } - if (check_port) { - if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) { - port_result = -1; - } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) { - port_result = 1; - } + result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr)); - if (result == 0) { - result = port_result; - } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) { - return (2); - } + if (check_port) { + if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) { + port_result = -1; + } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) { + port_result = 1; } - break; - } - default: { - result = memcmp(sa1, sa2, sa1->sa_len); - break; + if (result == 0) { + result = port_result; + } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) { + return 2; + } } + + break; + } + default: { + result = memcmp(sa1, sa2, sa1->sa_len); + break; + } } if (result < 0) { @@ -8331,7 +8331,7 @@ necp_addr_compare(struct sockaddr *sa1, struct sockaddr *sa2, int check_port) result = (1); } - return (result); + return result; } static bool @@ -8341,23 +8341,23 @@ necp_buffer_compare_with_bit_prefix(u_int8_t *p1, u_int8_t *p2, u_int32_t bits) /* Handle null pointers */ if (p1 == NULL || p2 == NULL) { - return (p1 == p2); + return p1 == p2; } while (bits >= 8) { if (*p1++ != *p2++) { - return (FALSE); + return FALSE; } bits -= 8; } if (bits > 0) { - mask = ~((1<<(8-bits))-1); + mask = ~((1 << (8 - bits)) - 1); if ((*p1 & mask) != (*p2 & mask)) { - return (FALSE); + return FALSE; } } - return (TRUE); + return TRUE; } static bool @@ -8413,7 +8413,7 @@ done: NECPLOG(LOG_DEBUG, "QoS Marking: Rule %d ifp %s Allowed %d", route_rule_id, ifp ? ifp->if_xname : "", qos_marking); } - return (qos_marking); + return qos_marking; } void @@ -8488,13 +8488,13 @@ static bool necp_route_is_lqm_abort(struct ifnet *ifp, struct ifnet *delegated_ifp) { if (ifp != NULL && - (ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && - ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) { + (ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && + ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) { return true; } if (delegated_ifp != NULL && - (delegated_ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && - delegated_ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) { + (delegated_ifp->if_interface_state.valid_bitmask & IF_INTERFACE_STATE_LQM_STATE_VALID) && + delegated_ifp->if_interface_state.lqm_state == IFNET_LQM_THRESH_ABORT) { return true; } return false; @@ -8511,7 +8511,7 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t route_rule = necp_lookup_route_rule_locked(&necp_route_rules, route_rule_id); if (route_rule == NULL) { - return (TRUE); + return TRUE; } default_is_allowed = (route_rule->default_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE; @@ -8522,7 +8522,7 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t if (necp_debug > 1 && !default_is_allowed) { NECPLOG(LOG_DEBUG, "Route Allowed: No interface for route, using default for Rule %d Allowed %d", route_rule_id, default_is_allowed); } - return (default_is_allowed); + return default_is_allowed; } delegated_ifp = ifp->if_delegated.ifp; @@ -8531,19 +8531,19 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t break; } if (route_rule->exception_if_indices[exception_index] == ifp->if_index || - (delegated_ifp != NULL && route_rule->exception_if_indices[exception_index] == delegated_ifp->if_index)) { + (delegated_ifp != NULL && route_rule->exception_if_indices[exception_index] == delegated_ifp->if_index)) { if (route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_LQM_ABORT) { const bool lqm_abort = necp_route_is_lqm_abort(ifp, delegated_ifp); if (necp_debug > 1 && lqm_abort) { NECPLOG(LOG_DEBUG, "Route Allowed: Interface match %d for Rule %d Deny LQM Abort", - route_rule->exception_if_indices[exception_index], route_rule_id); + route_rule->exception_if_indices[exception_index], route_rule_id); } return false; } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->exception_if_actions[exception_index])) { if (necp_debug > 1) { NECPLOG(LOG_DEBUG, "Route Allowed: Interface match %d for Rule %d Allowed %d", route_rule->exception_if_indices[exception_index], route_rule_id, ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE)); } - return ((route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE); + return (route_rule->exception_if_actions[exception_index] == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE; } } } @@ -8562,11 +8562,11 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t *interface_type_denied = IFRTYPE_FUNCTIONAL_CELLULAR; } if (type_aggregate_action == NECP_ROUTE_RULE_NONE || - (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && - route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { - // Deny wins if there is a conflict - type_aggregate_action = route_rule->cellular_action; - } + (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && + route_rule->cellular_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { + // Deny wins if there is a conflict + type_aggregate_action = route_rule->cellular_action; + } } } @@ -8584,11 +8584,11 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t *interface_type_denied = IFRTYPE_FUNCTIONAL_WIFI_INFRA; } if (type_aggregate_action == NECP_ROUTE_RULE_NONE || - (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && - route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { - // Deny wins if there is a conflict - type_aggregate_action = route_rule->wifi_action; - } + (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && + route_rule->wifi_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { + // Deny wins if there is a conflict + type_aggregate_action = route_rule->wifi_action; + } } } @@ -8606,11 +8606,11 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t *interface_type_denied = IFRTYPE_FUNCTIONAL_WIRED; } if (type_aggregate_action == NECP_ROUTE_RULE_NONE || - (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && - route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { - // Deny wins if there is a conflict - type_aggregate_action = route_rule->wired_action; - } + (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && + route_rule->wired_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { + // Deny wins if there is a conflict + type_aggregate_action = route_rule->wired_action; + } } } @@ -8622,11 +8622,11 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t } } else if (IS_NECP_ROUTE_RULE_ALLOW_OR_DENY(route_rule->expensive_action)) { if (type_aggregate_action == NECP_ROUTE_RULE_NONE || - (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && - route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { - // Deny wins if there is a conflict - type_aggregate_action = route_rule->expensive_action; - } + (type_aggregate_action == NECP_ROUTE_RULE_ALLOW_INTERFACE && + route_rule->expensive_action == NECP_ROUTE_RULE_DENY_INTERFACE)) { + // Deny wins if there is a conflict + type_aggregate_action = route_rule->expensive_action; + } } } @@ -8634,13 +8634,13 @@ necp_route_is_allowed_inner(struct rtentry *route, struct ifnet *ifp, u_int32_t if (necp_debug > 1) { NECPLOG(LOG_DEBUG, "Route Allowed: C:%d WF:%d W:%d E:%d for Rule %d Allowed %d", route_rule->cellular_action, route_rule->wifi_action, route_rule->wired_action, route_rule->expensive_action, route_rule_id, ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE)); } - return ((type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE); + return (type_aggregate_action == NECP_ROUTE_RULE_DENY_INTERFACE) ? FALSE : TRUE; } if (necp_debug > 1 && !default_is_allowed) { NECPLOG(LOG_DEBUG, "Route Allowed: Using default for Rule %d Allowed %d", route_rule_id, default_is_allowed); } - return (default_is_allowed); + return default_is_allowed; } static bool @@ -8650,7 +8650,7 @@ necp_route_is_allowed(struct rtentry *route, struct ifnet *interface, u_int32_t if (necp_debug > 1) { NECPLOG(LOG_DEBUG, "Route Allowed: no route or interface, Rule %d Allowed %d", route_rule_id, TRUE); } - return (TRUE); + return TRUE; } if (ROUTE_RULE_IS_AGGREGATE(route_rule_id)) { @@ -8663,15 +8663,15 @@ necp_route_is_allowed(struct rtentry *route, struct ifnet *interface, u_int32_t break; } if (!necp_route_is_allowed_inner(route, interface, sub_route_rule_id, interface_type_denied)) { - return (FALSE); + return FALSE; } } } } else { - return (necp_route_is_allowed_inner(route, interface, route_rule_id, interface_type_denied)); + return necp_route_is_allowed_inner(route, interface, route_rule_id, interface_type_denied); } - return (TRUE); + return TRUE; } bool @@ -8680,12 +8680,12 @@ necp_packet_is_allowed_over_interface(struct mbuf *packet, struct ifnet *interfa bool is_allowed = TRUE; u_int32_t route_rule_id = necp_get_route_rule_id_from_packet(packet); if (route_rule_id != 0 && - interface != NULL) { + interface != NULL) { lck_rw_lock_shared(&necp_kernel_policy_lock); is_allowed = necp_route_is_allowed(NULL, interface, necp_get_route_rule_id_from_packet(packet), NULL); lck_rw_done(&necp_kernel_policy_lock); } - return (is_allowed); + return is_allowed; } static bool @@ -8706,12 +8706,12 @@ necp_netagents_allow_traffic(u_int32_t *netagent_ids, size_t netagent_id_count) if (agent_flags & NETAGENT_FLAG_ACTIVE) { continue; } else if ((agent_flags & NETAGENT_FLAG_VOLUNTARY) == 0) { - return (FALSE); + return FALSE; } } } } - return (TRUE); + return TRUE; } static bool @@ -8748,7 +8748,7 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr // Don't lock. Possible race condition, but we don't want the performance hit. if (necp_kernel_socket_policies_count == 0 || - (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) { + (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) { if (necp_drop_all_order > 0) { if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) { allowed_to_receive = TRUE; @@ -8778,22 +8778,22 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (!policies_have_changed) { if (!route_allowed || - inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP || - inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || - (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && - inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex)) { - allowed_to_receive = FALSE; - } else { - if (return_policy_id) { - *return_policy_id = inp->inp_policyresult.policy_id; - } - if (return_skip_policy_id) { - *return_skip_policy_id = inp->inp_policyresult.skip_policy_id; - } - if (return_route_rule_id) { - *return_route_rule_id = inp->inp_policyresult.results.route_rule_id; - } + inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP || + inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || + (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && + inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex)) { + allowed_to_receive = FALSE; + } else { + if (return_policy_id) { + *return_policy_id = inp->inp_policyresult.policy_id; + } + if (return_skip_policy_id) { + *return_skip_policy_id = inp->inp_policyresult.skip_policy_id; } + if (return_route_rule_id) { + *return_route_rule_id = inp->inp_policyresult.results.route_rule_id; + } + } goto done; } } @@ -8810,14 +8810,14 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr flowhash = necp_socket_calc_flowhash_locked(&info); if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE && - inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount && - inp->inp_policyresult.flowhash == flowhash) { + inp->inp_policyresult.policy_gencount == necp_kernel_socket_policies_gencount && + inp->inp_policyresult.flowhash == flowhash) { if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_DROP || - inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || - (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && - inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex) || - (inp->inp_policyresult.results.route_rule_id != 0 && - !necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied))) { + inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || + (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && + inp->inp_policyresult.results.result_parameter.tunnel_interface_index != verifyifindex) || + (inp->inp_policyresult.results.route_rule_id != 0 && + !necp_route_is_allowed(route, interface, inp->inp_policyresult.results.route_rule_id, &interface_type_denied))) { allowed_to_receive = FALSE; } else { if (return_policy_id) { @@ -8837,15 +8837,15 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, NULL, &route_rule_id, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, current_proc(), return_skip_policy_id); if (matched_policy != NULL) { if (matched_policy->result == NECP_KERNEL_POLICY_RESULT_DROP || - matched_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || - (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && - matched_policy->result_parameter.tunnel_interface_index != verifyifindex) || - ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED || - service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) && - service.identifier != 0 && service.identifier != NECP_NULL_SERVICE_ID) || - (route_rule_id != 0 && - !necp_route_is_allowed(route, interface, route_rule_id, &interface_type_denied)) || - !necp_netagents_allow_traffic(netagent_ids, NECP_MAX_NETAGENTS)) { + matched_policy->result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT || + (matched_policy->result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && interface && + matched_policy->result_parameter.tunnel_interface_index != verifyifindex) || + ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED || + service_action == NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED) && + service.identifier != 0 && service.identifier != NECP_NULL_SERVICE_ID) || + (route_rule_id != 0 && + !necp_route_is_allowed(route, interface, route_rule_id, &interface_type_denied)) || + !necp_netagents_allow_traffic(netagent_ids, NECP_MAX_NETAGENTS)) { allowed_to_receive = FALSE; } else { if (return_policy_id) { @@ -8879,7 +8879,7 @@ done: soevent(inp->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); } - return (allowed_to_receive); + return allowed_to_receive; } bool @@ -8894,8 +8894,8 @@ necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, memcpy(&local.sin_addr, local_addr, sizeof(local.sin_addr)); memcpy(&remote.sin_addr, remote_addr, sizeof(remote.sin_addr)); - return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, - return_policy_id, return_route_rule_id, return_skip_policy_id)); + return necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, + return_policy_id, return_route_rule_id, return_skip_policy_id); } bool @@ -8910,30 +8910,30 @@ necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, memcpy(&local.sin6_addr, local_addr, sizeof(local.sin6_addr)); memcpy(&remote.sin6_addr, remote_addr, sizeof(remote.sin6_addr)); - return (necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, - return_policy_id, return_route_rule_id, return_skip_policy_id)); + return necp_socket_is_allowed_to_send_recv_internal(inp, (struct sockaddr *)&local, (struct sockaddr *)&remote, interface, + return_policy_id, return_route_rule_id, return_skip_policy_id); } bool necp_socket_is_allowed_to_send_recv(struct inpcb *inp, necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id) + necp_kernel_policy_id *return_skip_policy_id) { - return (necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, NULL, return_policy_id, return_route_rule_id, return_skip_policy_id)); + return necp_socket_is_allowed_to_send_recv_internal(inp, NULL, NULL, NULL, return_policy_id, return_route_rule_id, return_skip_policy_id); } int necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, u_int32_t route_rule_id, - necp_kernel_policy_id skip_policy_id) + necp_kernel_policy_id skip_policy_id) { if (packet == NULL || inp == NULL || !(packet->m_flags & M_PKTHDR)) { - return (EINVAL); + return EINVAL; } // Mark ID for Pass and IP Tunnel if (policy_id != NECP_KERNEL_POLICY_ID_NONE) { packet->m_pkthdr.necp_mtag.necp_policy_id = policy_id; } else if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_PASS || - inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) { + inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL) { packet->m_pkthdr.necp_mtag.necp_policy_id = inp->inp_policyresult.policy_id; } else { packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE; @@ -8950,14 +8950,14 @@ necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel packet->m_pkthdr.necp_mtag.necp_skip_policy_id = skip_policy_id; } - return (0); + return 0; } int necp_mark_packet_from_ip(struct mbuf *packet, necp_kernel_policy_id policy_id) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (EINVAL); + return EINVAL; } // Mark ID for Pass and IP Tunnel @@ -8967,14 +8967,14 @@ necp_mark_packet_from_ip(struct mbuf *packet, necp_kernel_policy_id policy_id) packet->m_pkthdr.necp_mtag.necp_policy_id = NECP_KERNEL_POLICY_ID_NONE; } - return (0); + return 0; } int necp_mark_packet_from_interface(struct mbuf *packet, ifnet_t interface) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (EINVAL); + return EINVAL; } // Mark ID for Pass and IP Tunnel @@ -8982,14 +8982,14 @@ necp_mark_packet_from_interface(struct mbuf *packet, ifnet_t interface) packet->m_pkthdr.necp_mtag.necp_last_interface_index = interface->if_index; } - return (0); + return 0; } int necp_mark_packet_as_keepalive(struct mbuf *packet, bool is_keepalive) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (EINVAL); + return EINVAL; } if (is_keepalive) { @@ -8998,55 +8998,55 @@ necp_mark_packet_as_keepalive(struct mbuf *packet, bool is_keepalive) packet->m_pkthdr.pkt_flags &= ~PKTF_KEEPALIVE; } - return (0); + return 0; } necp_kernel_policy_id necp_get_policy_id_from_packet(struct mbuf *packet) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } - return (packet->m_pkthdr.necp_mtag.necp_policy_id); + return packet->m_pkthdr.necp_mtag.necp_policy_id; } necp_kernel_policy_id necp_get_skip_policy_id_from_packet(struct mbuf *packet) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (NECP_KERNEL_POLICY_ID_NONE); + return NECP_KERNEL_POLICY_ID_NONE; } - return (packet->m_pkthdr.necp_mtag.necp_skip_policy_id); + return packet->m_pkthdr.necp_mtag.necp_skip_policy_id; } u_int32_t necp_get_last_interface_index_from_packet(struct mbuf *packet) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (0); + return 0; } - return (packet->m_pkthdr.necp_mtag.necp_last_interface_index); + return packet->m_pkthdr.necp_mtag.necp_last_interface_index; } u_int32_t necp_get_route_rule_id_from_packet(struct mbuf *packet) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (0); + return 0; } - return (packet->m_pkthdr.necp_mtag.necp_route_rule_id); + return packet->m_pkthdr.necp_mtag.necp_route_rule_id; } int necp_get_app_uuid_from_packet(struct mbuf *packet, - uuid_t app_uuid) + uuid_t app_uuid) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (EINVAL); + return EINVAL; } bool found_mapping = FALSE; @@ -9062,17 +9062,17 @@ necp_get_app_uuid_from_packet(struct mbuf *packet, if (!found_mapping) { uuid_clear(app_uuid); } - return (0); + return 0; } bool necp_get_is_keepalive_from_packet(struct mbuf *packet) { if (packet == NULL || !(packet->m_flags & M_PKTHDR)) { - return (FALSE); + return FALSE; } - return (packet->m_pkthdr.pkt_flags & PKTF_KEEPALIVE); + return packet->m_pkthdr.pkt_flags & PKTF_KEEPALIVE; } u_int32_t @@ -9081,73 +9081,72 @@ necp_socket_get_content_filter_control_unit(struct socket *so) struct inpcb *inp = sotoinpcb(so); if (inp == NULL) { - return (0); + return 0; } - return (inp->inp_policyresult.results.filter_control_unit); + return inp->inp_policyresult.results.filter_control_unit; } bool necp_socket_should_use_flow_divert(struct inpcb *inp) { if (inp == NULL) { - return (FALSE); + return FALSE; } - return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT); + return inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT; } u_int32_t necp_socket_get_flow_divert_control_unit(struct inpcb *inp) { if (inp == NULL) { - return (0); + return 0; } if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT) { - return (inp->inp_policyresult.results.result_parameter.flow_divert_control_unit); + return inp->inp_policyresult.results.result_parameter.flow_divert_control_unit; } - return (0); + return 0; } bool necp_socket_should_rescope(struct inpcb *inp) { if (inp == NULL) { - return (FALSE); + return FALSE; } - return (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED || - inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT); + return inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED || + inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT; } u_int necp_socket_get_rescope_if_index(struct inpcb *inp) { if (inp == NULL) { - return (0); + return 0; } if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) { - return (inp->inp_policyresult.results.result_parameter.scoped_interface_index); + return inp->inp_policyresult.results.result_parameter.scoped_interface_index; } else if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT) { - return (necp_get_primary_direct_interface_index()); + return necp_get_primary_direct_interface_index(); } - return (0); + return 0; } u_int32_t necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t current_mtu) { if (inp == NULL) { - return (current_mtu); + return current_mtu; } if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && - (inp->inp_flags & INP_BOUND_IF) && - inp->inp_boundifp) { - + (inp->inp_flags & INP_BOUND_IF) && + inp->inp_boundifp) { u_int bound_interface_index = inp->inp_boundifp->if_index; u_int tunnel_interface_index = inp->inp_policyresult.results.result_parameter.tunnel_interface_index; @@ -9163,40 +9162,40 @@ necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t current_mtu) u_int32_t direct_tunnel_mtu = tunnel_interface->if_mtu; u_int32_t delegate_tunnel_mtu = (tunnel_interface->if_delegated.ifp != NULL) ? tunnel_interface->if_delegated.ifp->if_mtu : 0; if (delegate_tunnel_mtu != 0 && - strncmp(tunnel_interface->if_name, "ipsec", strlen("ipsec")) == 0) { - // For ipsec interfaces, calculate the overhead from the delegate interface - u_int32_t tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr)); - if (delegate_tunnel_mtu > tunnel_overhead) { - delegate_tunnel_mtu -= tunnel_overhead; - } + strncmp(tunnel_interface->if_name, "ipsec", strlen("ipsec")) == 0) { + // For ipsec interfaces, calculate the overhead from the delegate interface + u_int32_t tunnel_overhead = (u_int32_t)(esp_hdrsiz(NULL) + sizeof(struct ip6_hdr)); + if (delegate_tunnel_mtu > tunnel_overhead) { + delegate_tunnel_mtu -= tunnel_overhead; + } - if (delegate_tunnel_mtu < direct_tunnel_mtu) { - // If the (delegate - overhead) < direct, return (delegate - overhead) - return (delegate_tunnel_mtu); - } else { - // Otherwise return direct - return (direct_tunnel_mtu); - } + if (delegate_tunnel_mtu < direct_tunnel_mtu) { + // If the (delegate - overhead) < direct, return (delegate - overhead) + return delegate_tunnel_mtu; + } else { + // Otherwise return direct + return direct_tunnel_mtu; + } } else { // For non-ipsec interfaces, just return the tunnel MTU - return (direct_tunnel_mtu); + return direct_tunnel_mtu; } } } } // By default, just return the MTU passed in - return (current_mtu); + return current_mtu; } ifnet_t necp_get_ifnet_from_result_parameter(necp_kernel_policy_result_parameter *result_parameter) { if (result_parameter == NULL) { - return (NULL); + return NULL; } - return (ifindex2ifnet[result_parameter->tunnel_interface_index]); + return ifindex2ifnet[result_parameter->tunnel_interface_index]; } bool @@ -9209,13 +9208,13 @@ necp_packet_can_rebind_to_ifnet(struct mbuf *packet, struct ifnet *interface, st int i; if (packet == NULL || interface == NULL || new_route == NULL || (family != AF_INET && family != AF_INET6)) { - return (FALSE); + return FALSE; } result = ifnet_get_address_list_family(interface, &addresses, family); if (result != 0) { NECPLOG(LOG_ERR, "Failed to get address list for %s%d", ifnet_name(interface), ifnet_unit(interface)); - return (FALSE); + return FALSE; } for (i = 0; addresses[i] != NULL; i++) { @@ -9254,23 +9253,23 @@ necp_packet_can_rebind_to_ifnet(struct mbuf *packet, struct ifnet *interface, st done: ifnet_free_address_list(addresses); addresses = NULL; - return (found_match); + return found_match; } static bool necp_addr_is_loopback(struct sockaddr *address) { if (address == NULL) { - return (FALSE); + return FALSE; } if (address->sa_family == AF_INET) { - return (ntohl(((struct sockaddr_in *)(void *)address)->sin_addr.s_addr) == INADDR_LOOPBACK); + return ntohl(((struct sockaddr_in *)(void *)address)->sin_addr.s_addr) == INADDR_LOOPBACK; } else if (address->sa_family == AF_INET6) { return IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *)(void *)address)->sin6_addr); } - return (FALSE); + return FALSE; } static bool @@ -9282,26 +9281,26 @@ necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, stru // check is cheaper. if (local_addr != NULL && necp_addr_is_loopback(local_addr)) { - return (TRUE); + return TRUE; } if (remote_addr != NULL && necp_addr_is_loopback(remote_addr)) { - return (TRUE); + return TRUE; } if (inp != NULL) { if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp && (inp->inp_boundifp->if_flags & IFF_LOOPBACK)) { - return (TRUE); + return TRUE; } if (inp->inp_vflag & INP_IPV4) { if (ntohl(inp->inp_laddr.s_addr) == INADDR_LOOPBACK || - ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK) { - return (TRUE); + ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK) { + return TRUE; } } else if (inp->inp_vflag & INP_IPV6) { if (IN6_IS_ADDR_LOOPBACK(&inp->in6p_laddr) || - IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) { - return (TRUE); + IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) { + return TRUE; } } } @@ -9310,31 +9309,30 @@ necp_is_loopback(struct sockaddr *local_addr, struct sockaddr *remote_addr, stru struct ip *ip = mtod(packet, struct ip *); if (ip->ip_v == 4) { if (ntohl(ip->ip_src.s_addr) == INADDR_LOOPBACK) { - return (TRUE); + return TRUE; } if (ntohl(ip->ip_dst.s_addr) == INADDR_LOOPBACK) { - return (TRUE); + return TRUE; } } else if (ip->ip_v == 6) { struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *); if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)) { - return (TRUE); + return TRUE; } if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_dst)) { - return (TRUE); + return TRUE; } } } - return (FALSE); + return FALSE; } static bool necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet) { - if (inp != NULL) { - return (sflt_permission_check(inp) ? true : false); + return sflt_permission_check(inp) ? true : false; } if (packet != NULL) { struct ip6_hdr *ip6 = mtod(packet, struct ip6_hdr *); @@ -9342,9 +9340,9 @@ necp_is_intcoproc(struct inpcb *inp, struct mbuf *packet) IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_dst) && ip6->ip6_dst.s6_addr32[2] == ntohl(0xaede48ff) && ip6->ip6_dst.s6_addr32[3] == ntohl(0xfe334455)) { - return (true); + return true; } } - return (false); + return false; } diff --git a/bsd/net/necp.h b/bsd/net/necp.h index 8eb159c17..5ae4af20d 100644 --- a/bsd/net/necp.h +++ b/bsd/net/necp.h @@ -26,8 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _NET_NECP_H_ -#define _NET_NECP_H_ +#ifndef _NET_NECP_H_ +#define _NET_NECP_H_ #include #ifdef PRIVATE @@ -40,154 +40,154 @@ /* * Name registered by the ipsec kernel control */ -#define NECP_CONTROL_NAME "com.apple.net.necp_control" +#define NECP_CONTROL_NAME "com.apple.net.necp_control" -#define NECP_TLV_LENGTH_UINT32 1 +#define NECP_TLV_LENGTH_UINT32 1 struct necp_packet_header { - u_int8_t packet_type; - u_int8_t flags; - u_int32_t message_id; + u_int8_t packet_type; + u_int8_t flags; + u_int32_t message_id; }; /* * Control message commands */ -#define NECP_PACKET_TYPE_POLICY_ADD 1 -#define NECP_PACKET_TYPE_POLICY_GET 2 -#define NECP_PACKET_TYPE_POLICY_DELETE 3 -#define NECP_PACKET_TYPE_POLICY_APPLY_ALL 4 -#define NECP_PACKET_TYPE_POLICY_LIST_ALL 5 -#define NECP_PACKET_TYPE_POLICY_DELETE_ALL 6 -#define NECP_PACKET_TYPE_SET_SESSION_PRIORITY 7 -#define NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC 8 -#define NECP_PACKET_TYPE_REGISTER_SERVICE 9 -#define NECP_PACKET_TYPE_UNREGISTER_SERVICE 10 -#define NECP_PACKET_TYPE_POLICY_DUMP_ALL 11 +#define NECP_PACKET_TYPE_POLICY_ADD 1 +#define NECP_PACKET_TYPE_POLICY_GET 2 +#define NECP_PACKET_TYPE_POLICY_DELETE 3 +#define NECP_PACKET_TYPE_POLICY_APPLY_ALL 4 +#define NECP_PACKET_TYPE_POLICY_LIST_ALL 5 +#define NECP_PACKET_TYPE_POLICY_DELETE_ALL 6 +#define NECP_PACKET_TYPE_SET_SESSION_PRIORITY 7 +#define NECP_PACKET_TYPE_LOCK_SESSION_TO_PROC 8 +#define NECP_PACKET_TYPE_REGISTER_SERVICE 9 +#define NECP_PACKET_TYPE_UNREGISTER_SERVICE 10 +#define NECP_PACKET_TYPE_POLICY_DUMP_ALL 11 /* * Session actions */ -#define NECP_SESSION_ACTION_POLICY_ADD 1 // In: Policy TLVs Out: necp_policy_id -#define NECP_SESSION_ACTION_POLICY_GET 2 // In: necp_policy_id Out: Policy TLVs -#define NECP_SESSION_ACTION_POLICY_DELETE 3 // In: necp_policy_id Out: None -#define NECP_SESSION_ACTION_POLICY_APPLY_ALL 4 // In: None Out: None -#define NECP_SESSION_ACTION_POLICY_LIST_ALL 5 // In: None Out: TLVs of IDs -#define NECP_SESSION_ACTION_POLICY_DELETE_ALL 6 // In: None Out: None -#define NECP_SESSION_ACTION_SET_SESSION_PRIORITY 7 // In: necp_session_priority Out: None -#define NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC 8 // In: None Out: None -#define NECP_SESSION_ACTION_REGISTER_SERVICE 9 // In: uuid_t Out: None -#define NECP_SESSION_ACTION_UNREGISTER_SERVICE 10 // In: uuid_t Out: None -#define NECP_SESSION_ACTION_POLICY_DUMP_ALL 11 // In: None Out: uint32_t bytes length, then Policy TLVs +#define NECP_SESSION_ACTION_POLICY_ADD 1 // In: Policy TLVs Out: necp_policy_id +#define NECP_SESSION_ACTION_POLICY_GET 2 // In: necp_policy_id Out: Policy TLVs +#define NECP_SESSION_ACTION_POLICY_DELETE 3 // In: necp_policy_id Out: None +#define NECP_SESSION_ACTION_POLICY_APPLY_ALL 4 // In: None Out: None +#define NECP_SESSION_ACTION_POLICY_LIST_ALL 5 // In: None Out: TLVs of IDs +#define NECP_SESSION_ACTION_POLICY_DELETE_ALL 6 // In: None Out: None +#define NECP_SESSION_ACTION_SET_SESSION_PRIORITY 7 // In: necp_session_priority Out: None +#define NECP_SESSION_ACTION_LOCK_SESSION_TO_PROC 8 // In: None Out: None +#define NECP_SESSION_ACTION_REGISTER_SERVICE 9 // In: uuid_t Out: None +#define NECP_SESSION_ACTION_UNREGISTER_SERVICE 10 // In: uuid_t Out: None +#define NECP_SESSION_ACTION_POLICY_DUMP_ALL 11 // In: None Out: uint32_t bytes length, then Policy TLVs /* * Control message flags */ -#define NECP_PACKET_FLAGS_RESPONSE 0x01 // Used for acks, errors, and query responses +#define NECP_PACKET_FLAGS_RESPONSE 0x01 // Used for acks, errors, and query responses /* * Control message TLV types */ -#define NECP_TLV_NIL 0 -#define NECP_TLV_ERROR 1 // u_int32_t -#define NECP_TLV_POLICY_ORDER 2 // u_int32_t -#define NECP_TLV_POLICY_CONDITION 3 -#define NECP_TLV_POLICY_RESULT 4 -#define NECP_TLV_POLICY_ID 5 // u_int32_t -#define NECP_TLV_SESSION_PRIORITY 6 // u_int32_t -#define NECP_TLV_ATTRIBUTE_DOMAIN 7 // char[] -#define NECP_TLV_ATTRIBUTE_ACCOUNT 8 // char[] -#define NECP_TLV_SERVICE_UUID 9 // uuid_t -#define NECP_TLV_ROUTE_RULE 10 +#define NECP_TLV_NIL 0 +#define NECP_TLV_ERROR 1 // u_int32_t +#define NECP_TLV_POLICY_ORDER 2 // u_int32_t +#define NECP_TLV_POLICY_CONDITION 3 +#define NECP_TLV_POLICY_RESULT 4 +#define NECP_TLV_POLICY_ID 5 // u_int32_t +#define NECP_TLV_SESSION_PRIORITY 6 // u_int32_t +#define NECP_TLV_ATTRIBUTE_DOMAIN 7 // char[] +#define NECP_TLV_ATTRIBUTE_ACCOUNT 8 // char[] +#define NECP_TLV_SERVICE_UUID 9 // uuid_t +#define NECP_TLV_ROUTE_RULE 10 /* * Control message TLV sent only by the kernel to userspace */ -#define NECP_TLV_POLICY_OWNER 100 // char [] -#define NECP_TLV_POLICY_DUMP 101 -#define NECP_TLV_POLICY_RESULT_STRING 102 // char [] -#define NECP_TLV_POLICY_SESSION_ORDER 103 // u_int32_t +#define NECP_TLV_POLICY_OWNER 100 // char [] +#define NECP_TLV_POLICY_DUMP 101 +#define NECP_TLV_POLICY_RESULT_STRING 102 // char [] +#define NECP_TLV_POLICY_SESSION_ORDER 103 // u_int32_t /* * Condition flags */ -#define NECP_POLICY_CONDITION_FLAGS_NEGATIVE 0x01 // Negative +#define NECP_POLICY_CONDITION_FLAGS_NEGATIVE 0x01 // Negative /* * Conditions * Used for setting policies as well as passing parameters to necp_match_policy. */ -#define NECP_POLICY_CONDITION_DEFAULT 0 // N/A, not valid with any other conditions +#define NECP_POLICY_CONDITION_DEFAULT 0 // N/A, not valid with any other conditions // Socket/Application conditions -#define NECP_POLICY_CONDITION_APPLICATION 1 // uuid_t, uses effective UUID when possible -#define NECP_POLICY_CONDITION_REAL_APPLICATION 2 // uuid_t, never uses effective UUID. Only valid with NECP_POLICY_CONDITION_APPLICATION -#define NECP_POLICY_CONDITION_DOMAIN 3 // String, such as apple.com -#define NECP_POLICY_CONDITION_ACCOUNT 4 // String -#define NECP_POLICY_CONDITION_ENTITLEMENT 5 // String -#define NECP_POLICY_CONDITION_PID 6 // pid_t -#define NECP_POLICY_CONDITION_UID 7 // uid_t -#define NECP_POLICY_CONDITION_ALL_INTERFACES 8 // N/A -#define NECP_POLICY_CONDITION_BOUND_INTERFACE 9 // String -#define NECP_POLICY_CONDITION_TRAFFIC_CLASS 10 // necp_policy_condition_tc_range +#define NECP_POLICY_CONDITION_APPLICATION 1 // uuid_t, uses effective UUID when possible +#define NECP_POLICY_CONDITION_REAL_APPLICATION 2 // uuid_t, never uses effective UUID. Only valid with NECP_POLICY_CONDITION_APPLICATION +#define NECP_POLICY_CONDITION_DOMAIN 3 // String, such as apple.com +#define NECP_POLICY_CONDITION_ACCOUNT 4 // String +#define NECP_POLICY_CONDITION_ENTITLEMENT 5 // String +#define NECP_POLICY_CONDITION_PID 6 // pid_t +#define NECP_POLICY_CONDITION_UID 7 // uid_t +#define NECP_POLICY_CONDITION_ALL_INTERFACES 8 // N/A +#define NECP_POLICY_CONDITION_BOUND_INTERFACE 9 // String +#define NECP_POLICY_CONDITION_TRAFFIC_CLASS 10 // necp_policy_condition_tc_range // Socket/IP conditions -#define NECP_POLICY_CONDITION_IP_PROTOCOL 11 // u_int8_t -#define NECP_POLICY_CONDITION_LOCAL_ADDR 12 // necp_policy_condition_addr -#define NECP_POLICY_CONDITION_REMOTE_ADDR 13 // necp_policy_condition_addr -#define NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE 14 // necp_policy_condition_addr_range -#define NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE 15 // necp_policy_condition_addr_range -#define NECP_POLICY_CONDITION_AGENT_TYPE 16 // struct necp_policy_condition_agent_type +#define NECP_POLICY_CONDITION_IP_PROTOCOL 11 // u_int8_t +#define NECP_POLICY_CONDITION_LOCAL_ADDR 12 // necp_policy_condition_addr +#define NECP_POLICY_CONDITION_REMOTE_ADDR 13 // necp_policy_condition_addr +#define NECP_POLICY_CONDITION_LOCAL_ADDR_RANGE 14 // necp_policy_condition_addr_range +#define NECP_POLICY_CONDITION_REMOTE_ADDR_RANGE 15 // necp_policy_condition_addr_range +#define NECP_POLICY_CONDITION_AGENT_TYPE 16 // struct necp_policy_condition_agent_type /* * Results */ -#define NECP_POLICY_RESULT_PASS 1 // N/A -#define NECP_POLICY_RESULT_SKIP 2 // u_int32_t, policy order to skip to. 0 to skip all session policies. -#define NECP_POLICY_RESULT_DROP 3 // N/A -#define NECP_POLICY_RESULT_SOCKET_DIVERT 4 // u_int32_t, flow divert control unit -#define NECP_POLICY_RESULT_SOCKET_FILTER 5 // u_int32_t, filter control unit -#define NECP_POLICY_RESULT_IP_TUNNEL 6 // String, interface name -#define NECP_POLICY_RESULT_IP_FILTER 7 // ? -#define NECP_POLICY_RESULT_TRIGGER 8 // service uuid_t -#define NECP_POLICY_RESULT_TRIGGER_IF_NEEDED 9 // service uuid_t -#define NECP_POLICY_RESULT_TRIGGER_SCOPED 10 // service uuid_t -#define NECP_POLICY_RESULT_NO_TRIGGER_SCOPED 11 // service uuid_t -#define NECP_POLICY_RESULT_SOCKET_SCOPED 12 // String, interface name -#define NECP_POLICY_RESULT_ROUTE_RULES 13 // N/A, must have route rules defined -#define NECP_POLICY_RESULT_USE_NETAGENT 14 // netagent uuid_t -#define NECP_POLICY_RESULT_NETAGENT_SCOPED 15 // netagent uuid_t -#define NECP_POLICY_RESULT_SCOPED_DIRECT 16 // N/A, scopes to primary physical interface - -#define NECP_POLICY_RESULT_MAX NECP_POLICY_RESULT_SCOPED_DIRECT +#define NECP_POLICY_RESULT_PASS 1 // N/A +#define NECP_POLICY_RESULT_SKIP 2 // u_int32_t, policy order to skip to. 0 to skip all session policies. +#define NECP_POLICY_RESULT_DROP 3 // N/A +#define NECP_POLICY_RESULT_SOCKET_DIVERT 4 // u_int32_t, flow divert control unit +#define NECP_POLICY_RESULT_SOCKET_FILTER 5 // u_int32_t, filter control unit +#define NECP_POLICY_RESULT_IP_TUNNEL 6 // String, interface name +#define NECP_POLICY_RESULT_IP_FILTER 7 // ? +#define NECP_POLICY_RESULT_TRIGGER 8 // service uuid_t +#define NECP_POLICY_RESULT_TRIGGER_IF_NEEDED 9 // service uuid_t +#define NECP_POLICY_RESULT_TRIGGER_SCOPED 10 // service uuid_t +#define NECP_POLICY_RESULT_NO_TRIGGER_SCOPED 11 // service uuid_t +#define NECP_POLICY_RESULT_SOCKET_SCOPED 12 // String, interface name +#define NECP_POLICY_RESULT_ROUTE_RULES 13 // N/A, must have route rules defined +#define NECP_POLICY_RESULT_USE_NETAGENT 14 // netagent uuid_t +#define NECP_POLICY_RESULT_NETAGENT_SCOPED 15 // netagent uuid_t +#define NECP_POLICY_RESULT_SCOPED_DIRECT 16 // N/A, scopes to primary physical interface + +#define NECP_POLICY_RESULT_MAX NECP_POLICY_RESULT_SCOPED_DIRECT /* * Route Rules * Detailed parameters for NECP_POLICY_RESULT_ROUTE_RULES. */ -#define NECP_ROUTE_RULE_NONE 0 // N/A -#define NECP_ROUTE_RULE_DENY_INTERFACE 1 // String, or empty to match all -#define NECP_ROUTE_RULE_ALLOW_INTERFACE 2 // String, or empty to match all -#define NECP_ROUTE_RULE_QOS_MARKING 3 // String, or empty to match all -#define NECP_ROUTE_RULE_DENY_LQM_ABORT 4 // String, or empty to match all +#define NECP_ROUTE_RULE_NONE 0 // N/A +#define NECP_ROUTE_RULE_DENY_INTERFACE 1 // String, or empty to match all +#define NECP_ROUTE_RULE_ALLOW_INTERFACE 2 // String, or empty to match all +#define NECP_ROUTE_RULE_QOS_MARKING 3 // String, or empty to match all +#define NECP_ROUTE_RULE_DENY_LQM_ABORT 4 // String, or empty to match all -#define NECP_ROUTE_RULE_FLAG_CELLULAR 0x01 -#define NECP_ROUTE_RULE_FLAG_WIFI 0x02 -#define NECP_ROUTE_RULE_FLAG_WIRED 0x04 -#define NECP_ROUTE_RULE_FLAG_EXPENSIVE 0x08 +#define NECP_ROUTE_RULE_FLAG_CELLULAR 0x01 +#define NECP_ROUTE_RULE_FLAG_WIFI 0x02 +#define NECP_ROUTE_RULE_FLAG_WIRED 0x04 +#define NECP_ROUTE_RULE_FLAG_EXPENSIVE 0x08 /* * Error types */ -#define NECP_ERROR_INTERNAL 0 -#define NECP_ERROR_UNKNOWN_PACKET_TYPE 1 -#define NECP_ERROR_INVALID_TLV 2 -#define NECP_ERROR_POLICY_RESULT_INVALID 3 -#define NECP_ERROR_POLICY_CONDITIONS_INVALID 4 -#define NECP_ERROR_POLICY_ID_NOT_FOUND 5 -#define NECP_ERROR_INVALID_PROCESS 6 -#define NECP_ERROR_ROUTE_RULES_INVALID 7 +#define NECP_ERROR_INTERNAL 0 +#define NECP_ERROR_UNKNOWN_PACKET_TYPE 1 +#define NECP_ERROR_INVALID_TLV 2 +#define NECP_ERROR_POLICY_RESULT_INVALID 3 +#define NECP_ERROR_POLICY_CONDITIONS_INVALID 4 +#define NECP_ERROR_POLICY_ID_NOT_FOUND 5 +#define NECP_ERROR_INVALID_PROCESS 6 +#define NECP_ERROR_ROUTE_RULES_INVALID 7 // Modifiers -#define NECP_MASK_USERSPACE_ONLY 0x80000000 // on filter_control_unit value +#define NECP_MASK_USERSPACE_ONLY 0x80000000 // on filter_control_unit value struct necp_policy_condition_tc_range { u_int32_t start_tc; @@ -195,24 +195,24 @@ struct necp_policy_condition_tc_range { } __attribute__((__packed__)); struct necp_policy_condition_addr { - u_int8_t prefix; + u_int8_t prefix; union { - struct sockaddr sa; - struct sockaddr_in sin; - struct sockaddr_in6 sin6; + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; } address; } __attribute__((__packed__)); struct necp_policy_condition_addr_range { union { - struct sockaddr sa; - struct sockaddr_in sin; - struct sockaddr_in6 sin6; + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; } start_address; union { - struct sockaddr sa; - struct sockaddr_in sin; - struct sockaddr_in6 sin6; + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; } end_address; } __attribute__((__packed__)); @@ -221,14 +221,14 @@ struct necp_policy_condition_agent_type { char agent_type[32]; } __attribute__((__packed__)); -#define NECP_SESSION_PRIORITY_UNKNOWN 0 -#define NECP_SESSION_PRIORITY_CONTROL 1 -#define NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL 2 -#define NECP_SESSION_PRIORITY_HIGH 3 -#define NECP_SESSION_PRIORITY_DEFAULT 4 -#define NECP_SESSION_PRIORITY_LOW 5 +#define NECP_SESSION_PRIORITY_UNKNOWN 0 +#define NECP_SESSION_PRIORITY_CONTROL 1 +#define NECP_SESSION_PRIORITY_PRIVILEGED_TUNNEL 2 +#define NECP_SESSION_PRIORITY_HIGH 3 +#define NECP_SESSION_PRIORITY_DEFAULT 4 +#define NECP_SESSION_PRIORITY_LOW 5 -#define NECP_SESSION_NUM_PRIORITIES NECP_SESSION_PRIORITY_LOW +#define NECP_SESSION_NUM_PRIORITIES NECP_SESSION_PRIORITY_LOW typedef u_int32_t necp_policy_id; typedef u_int32_t necp_policy_order; @@ -238,31 +238,31 @@ typedef u_int32_t necp_kernel_policy_result; typedef u_int32_t necp_kernel_policy_filter; typedef union { - u_int tunnel_interface_index; - u_int scoped_interface_index; - u_int32_t flow_divert_control_unit; - u_int32_t filter_control_unit; + u_int tunnel_interface_index; + u_int scoped_interface_index; + u_int32_t flow_divert_control_unit; + u_int32_t filter_control_unit; } necp_kernel_policy_routing_result_parameter; -#define NECP_SERVICE_FLAGS_REGISTERED 0x01 -#define NECP_MAX_NETAGENTS 8 +#define NECP_SERVICE_FLAGS_REGISTERED 0x01 +#define NECP_MAX_NETAGENTS 8 -#define NECP_AGENT_USE_FLAG_SCOPE 0x01 +#define NECP_AGENT_USE_FLAG_SCOPE 0x01 #define NECP_TFO_COOKIE_LEN_MAX 16 struct necp_aggregate_result { - necp_kernel_policy_result routing_result; - necp_kernel_policy_routing_result_parameter routing_result_parameter; - necp_kernel_policy_filter filter_control_unit; - necp_kernel_policy_result service_action; - uuid_t service_uuid; - u_int32_t service_flags; - u_int32_t service_data; - u_int routed_interface_index; - u_int32_t policy_id; - uuid_t netagents[NECP_MAX_NETAGENTS]; - u_int32_t netagent_use_flags[NECP_MAX_NETAGENTS]; - u_int8_t mss_recommended; + necp_kernel_policy_result routing_result; + necp_kernel_policy_routing_result_parameter routing_result_parameter; + necp_kernel_policy_filter filter_control_unit; + necp_kernel_policy_result service_action; + uuid_t service_uuid; + u_int32_t service_flags; + u_int32_t service_data; + u_int routed_interface_index; + u_int32_t policy_id; + uuid_t netagents[NECP_MAX_NETAGENTS]; + u_int32_t netagent_use_flags[NECP_MAX_NETAGENTS]; + u_int8_t mss_recommended; }; /* @@ -270,27 +270,26 @@ struct necp_aggregate_result { * but they get entangled with #defines for v4 etc in pfvar.h and it may be better practice * to have separate definitions here. */ -struct necp_stat_counts -{ +struct necp_stat_counts { /* Counters */ - u_int64_t necp_stat_rxpackets __attribute__((aligned(8))); - u_int64_t necp_stat_rxbytes __attribute__((aligned(8))); - u_int64_t necp_stat_txpackets __attribute__((aligned(8))); - u_int64_t necp_stat_txbytes __attribute__((aligned(8))); + u_int64_t necp_stat_rxpackets __attribute__((aligned(8))); + u_int64_t necp_stat_rxbytes __attribute__((aligned(8))); + u_int64_t necp_stat_txpackets __attribute__((aligned(8))); + u_int64_t necp_stat_txbytes __attribute__((aligned(8))); - u_int32_t necp_stat_rxduplicatebytes; - u_int32_t necp_stat_rxoutoforderbytes; - u_int32_t necp_stat_txretransmit; + u_int32_t necp_stat_rxduplicatebytes; + u_int32_t necp_stat_rxoutoforderbytes; + u_int32_t necp_stat_txretransmit; - u_int32_t necp_stat_connectattempts; - u_int32_t necp_stat_connectsuccesses; + u_int32_t necp_stat_connectattempts; + u_int32_t necp_stat_connectsuccesses; - u_int32_t necp_stat_min_rtt; - u_int32_t necp_stat_avg_rtt; - u_int32_t necp_stat_var_rtt; + u_int32_t necp_stat_min_rtt; + u_int32_t necp_stat_avg_rtt; + u_int32_t necp_stat_var_rtt; -#define NECP_STAT_ROUTE_FLAGS 1 - u_int32_t necp_stat_route_flags; +#define NECP_STAT_ROUTE_FLAGS 1 + u_int32_t necp_stat_route_flags; }; // Note, some metadata is implicit in the necp client itself: @@ -299,91 +298,89 @@ struct necp_stat_counts // // The following may well be supplied via future necp client parameters, // but they are here so they don't get forgotten. -struct necp_basic_metadata -{ - u_int32_t rcvbufsize; - u_int32_t rcvbufused; +struct necp_basic_metadata { + u_int32_t rcvbufsize; + u_int32_t rcvbufused; }; struct necp_tcp_probe_status { - unsigned int probe_activated : 1; - unsigned int write_probe_failed : 1; - unsigned int read_probe_failed : 1; - unsigned int conn_probe_failed : 1; + unsigned int probe_activated : 1; + unsigned int write_probe_failed : 1; + unsigned int read_probe_failed : 1; + unsigned int conn_probe_failed : 1; }; -struct necp_extra_tcp_metadata -{ +struct necp_extra_tcp_metadata { struct necp_tcp_probe_status probestatus; - u_int32_t sndbufsize; - u_int32_t sndbufused; - u_int32_t txunacked; - u_int32_t txwindow; - u_int32_t txcwindow; - u_int32_t flags; // use SOF_* - u_int32_t flags1; // use SOF1_* - u_int32_t traffic_mgt_flags; - u_int32_t cc_alg_index; - u_int32_t state; - activity_bitmap_t activity_bitmap; + u_int32_t sndbufsize; + u_int32_t sndbufused; + u_int32_t txunacked; + u_int32_t txwindow; + u_int32_t txcwindow; + u_int32_t flags; // use SOF_* + u_int32_t flags1; // use SOF1_* + u_int32_t traffic_mgt_flags; + u_int32_t cc_alg_index; + u_int32_t state; + activity_bitmap_t activity_bitmap; }; struct necp_stats_hdr { - u_int32_t necp_stats_type __attribute__((aligned(8))); - u_int32_t necp_stats_ver; - u_int64_t __necp_stats_reserved; // Pad the field for future use + u_int32_t necp_stats_type __attribute__((aligned(8))); + u_int32_t necp_stats_ver; + u_int64_t __necp_stats_reserved; // Pad the field for future use }; -#define NECP_CLIENT_STATISTICS_TYPE_TCP 1 // Identifies use of necp_tcp_stats -#define NECP_CLIENT_STATISTICS_TYPE_UDP 2 // Identifies use of necp_udp_stats -#define NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1 1 // Currently supported version for TCP -#define NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1 1 // Currently supported version for UDP +#define NECP_CLIENT_STATISTICS_TYPE_TCP 1 // Identifies use of necp_tcp_stats +#define NECP_CLIENT_STATISTICS_TYPE_UDP 2 // Identifies use of necp_udp_stats +#define NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1 1 // Currently supported version for TCP +#define NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1 1 // Currently supported version for UDP -#define NECP_CLIENT_STATISTICS_TYPE_TCP_CURRENT_VER NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1 -#define NECP_CLIENT_STATISTICS_TYPE_UDP_CURRENT_VER NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1 +#define NECP_CLIENT_STATISTICS_TYPE_TCP_CURRENT_VER NECP_CLIENT_STATISTICS_TYPE_TCP_VER_1 +#define NECP_CLIENT_STATISTICS_TYPE_UDP_CURRENT_VER NECP_CLIENT_STATISTICS_TYPE_UDP_VER_1 -#define NECP_CLIENT_STATISTICS_EVENT_INIT 0x00000000 // Register the flow -#define NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT 0x00000001 // The flow is effectively finished but waiting on timer +#define NECP_CLIENT_STATISTICS_EVENT_INIT 0x00000000 // Register the flow +#define NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT 0x00000001 // The flow is effectively finished but waiting on timer struct necp_tcp_stats { - struct necp_stats_hdr necp_tcp_hdr; - struct necp_stat_counts necp_tcp_counts; - struct necp_basic_metadata necp_tcp_basic; - struct necp_extra_tcp_metadata necp_tcp_extra; + struct necp_stats_hdr necp_tcp_hdr; + struct necp_stat_counts necp_tcp_counts; + struct necp_basic_metadata necp_tcp_basic; + struct necp_extra_tcp_metadata necp_tcp_extra; }; struct necp_udp_stats { - struct necp_stats_hdr necp_udp_hdr; - struct necp_stat_counts necp_udp_counts; - struct necp_basic_metadata necp_udp_basic; + struct necp_stats_hdr necp_udp_hdr; + struct necp_stat_counts necp_udp_counts; + struct necp_basic_metadata necp_udp_basic; }; typedef struct necp_all_stats { union { - struct necp_tcp_stats tcp_stats; - struct necp_udp_stats udp_stats; + struct necp_tcp_stats tcp_stats; + struct necp_udp_stats udp_stats; } all_stats_u; } necp_all_stats; // Memory for statistics is requested via a necp_stats_bufreq // struct necp_stats_bufreq { - u_int32_t necp_stats_bufreq_id __attribute__((aligned(8))); - u_int32_t necp_stats_bufreq_type; // NECP_CLIENT_STATISTICS_TYPE_* - u_int32_t necp_stats_bufreq_ver; // NECP_CLIENT_STATISTICS_TYPE_*_VER - u_int32_t necp_stats_bufreq_size; + u_int32_t necp_stats_bufreq_id __attribute__((aligned(8))); + u_int32_t necp_stats_bufreq_type; // NECP_CLIENT_STATISTICS_TYPE_* + u_int32_t necp_stats_bufreq_ver; // NECP_CLIENT_STATISTICS_TYPE_*_VER + u_int32_t necp_stats_bufreq_size; union { - void *necp_stats_bufreq_addr; - mach_vm_address_t necp_stats_bufreq_uaddr; + void *necp_stats_bufreq_addr; + mach_vm_address_t necp_stats_bufreq_uaddr; }; }; -#define NECP_CLIENT_STATISTICS_BUFREQ_ID 0xbf // Distinguishes from statistics actions taking a necp_all_stats struct +#define NECP_CLIENT_STATISTICS_BUFREQ_ID 0xbf // Distinguishes from statistics actions taking a necp_all_stats struct // There is a limit to the number of statistics structures that may be allocated per process, subject to change // -#define NECP_MAX_PER_PROCESS_CLIENT_STATISTICS_STRUCTS 512 +#define NECP_MAX_PER_PROCESS_CLIENT_STATISTICS_STRUCTS 512 #define NECP_TCP_ECN_HEURISTICS_SYN_RST 1 typedef struct necp_tcp_ecn_cache { @@ -408,11 +405,11 @@ typedef struct necp_tcp_tfo_cache { u_int8_t necp_tcp_tfo_heuristics_rst_req:1; // Received RST upon SYN with the TFO-option } necp_tcp_tfo_cache; -#define NECP_CLIENT_CACHE_TYPE_ECN 1 // Identifies use of necp_tcp_ecn_cache -#define NECP_CLIENT_CACHE_TYPE_TFO 2 // Identifies use of necp_tcp_tfo_cache +#define NECP_CLIENT_CACHE_TYPE_ECN 1 // Identifies use of necp_tcp_ecn_cache +#define NECP_CLIENT_CACHE_TYPE_TFO 2 // Identifies use of necp_tcp_tfo_cache -#define NECP_CLIENT_CACHE_TYPE_ECN_VER_1 1 // Currently supported version for ECN -#define NECP_CLIENT_CACHE_TYPE_TFO_VER_1 1 // Currently supported version for TFO +#define NECP_CLIENT_CACHE_TYPE_ECN_VER_1 1 // Currently supported version for ECN +#define NECP_CLIENT_CACHE_TYPE_TFO_VER_1 1 // Currently supported version for TFO typedef struct necp_cache_buffer { u_int8_t necp_cache_buf_type; // NECP_CLIENT_CACHE_TYPE_* @@ -424,143 +421,143 @@ typedef struct necp_cache_buffer { /* * NECP Client definitions */ -#define NECP_MAX_CLIENT_PARAMETERS_SIZE 1024 -#define NECP_MAX_CLIENT_RESULT_SIZE 512 - -#define NECP_OPEN_FLAG_OBSERVER 0x01 // Observers can query clients they don't own -#define NECP_OPEN_FLAG_BACKGROUND 0x02 // Mark this fd as backgrounded -#define NECP_OPEN_FLAG_PUSH_OBSERVER 0x04 // When used with the OBSERVER flag, allows updates to be pushed. Adding clients is not allowed in this mode. - -#define NECP_FD_SUPPORTS_GUARD 1 - -#define NECP_CLIENT_ACTION_ADD 1 // Register a new client. Input: parameters in buffer; Output: client_id -#define NECP_CLIENT_ACTION_REMOVE 2 // Unregister a client. Input: client_id, optional struct ifnet_stats_per_flow -#define NECP_CLIENT_ACTION_COPY_PARAMETERS 3 // Copy client parameters. Input: client_id; Output: parameters in buffer -#define NECP_CLIENT_ACTION_COPY_RESULT 4 // Copy client result. Input: client_id; Output: result in buffer -#define NECP_CLIENT_ACTION_COPY_LIST 5 // Copy all client IDs. Output: struct necp_client_list in buffer -#define NECP_CLIENT_ACTION_REQUEST_NEXUS_INSTANCE 6 // Request a nexus instance from a nexus provider, optional struct necp_stats_bufreq -#define NECP_CLIENT_ACTION_AGENT 7 // Interact with agent. Input: client_id, agent parameters -#define NECP_CLIENT_ACTION_COPY_AGENT 8 // Copy agent content. Input: agent UUID; Output: struct netagent -#define NECP_CLIENT_ACTION_COPY_INTERFACE 9 // Copy interface details. Input: ifindex cast to UUID; Output: struct necp_interface_details -#define NECP_CLIENT_ACTION_SET_STATISTICS 10 // Deprecated -#define NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS 11 // Get route statistics. Input: client_id; Output: struct necp_stat_counts -#define NECP_CLIENT_ACTION_AGENT_USE 12 // Return the use count and increment the use count. Input/Output: struct necp_agent_use_parameters -#define NECP_CLIENT_ACTION_MAP_SYSCTLS 13 // Get the read-only sysctls memory location. Output: mach_vm_address_t -#define NECP_CLIENT_ACTION_UPDATE_CACHE 14 // Update heuristics and cache -#define NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE 15 // Fetch an updated client for push-mode observer. Output: Client id, struct necp_client_observer_update in buffer -#define NECP_CLIENT_ACTION_COPY_UPDATED_RESULT 16 // Copy client result only if changed. Input: client_id; Output: result in buffer -#define NECP_CLIENT_ACTION_ADD_FLOW 17 // Add a flow. Input: client_id; Output: struct necp_client_add_flow -#define NECP_CLIENT_ACTION_REMOVE_FLOW 18 // Remove a flow. Input: flow_id, optional struct ifnet_stats_per_flow - - -#define NECP_CLIENT_PARAMETER_APPLICATION NECP_POLICY_CONDITION_APPLICATION // Requires entitlement -#define NECP_CLIENT_PARAMETER_REAL_APPLICATION NECP_POLICY_CONDITION_REAL_APPLICATION // Requires entitlement -#define NECP_CLIENT_PARAMETER_DOMAIN NECP_POLICY_CONDITION_DOMAIN -#define NECP_CLIENT_PARAMETER_ACCOUNT NECP_POLICY_CONDITION_ACCOUNT -#define NECP_CLIENT_PARAMETER_PID NECP_POLICY_CONDITION_PID // Requires entitlement -#define NECP_CLIENT_PARAMETER_UID NECP_POLICY_CONDITION_UID // Requires entitlement -#define NECP_CLIENT_PARAMETER_BOUND_INTERFACE NECP_POLICY_CONDITION_BOUND_INTERFACE -#define NECP_CLIENT_PARAMETER_TRAFFIC_CLASS NECP_POLICY_CONDITION_TRAFFIC_CLASS -#define NECP_CLIENT_PARAMETER_IP_PROTOCOL NECP_POLICY_CONDITION_IP_PROTOCOL -#define NECP_CLIENT_PARAMETER_LOCAL_ADDRESS NECP_POLICY_CONDITION_LOCAL_ADDR -#define NECP_CLIENT_PARAMETER_REMOTE_ADDRESS NECP_POLICY_CONDITION_REMOTE_ADDR -#define NECP_CLIENT_PARAMETER_NEXUS_KEY 102 +#define NECP_MAX_CLIENT_PARAMETERS_SIZE 1024 +#define NECP_MAX_CLIENT_RESULT_SIZE 512 + +#define NECP_OPEN_FLAG_OBSERVER 0x01 // Observers can query clients they don't own +#define NECP_OPEN_FLAG_BACKGROUND 0x02 // Mark this fd as backgrounded +#define NECP_OPEN_FLAG_PUSH_OBSERVER 0x04 // When used with the OBSERVER flag, allows updates to be pushed. Adding clients is not allowed in this mode. + +#define NECP_FD_SUPPORTS_GUARD 1 + +#define NECP_CLIENT_ACTION_ADD 1 // Register a new client. Input: parameters in buffer; Output: client_id +#define NECP_CLIENT_ACTION_REMOVE 2 // Unregister a client. Input: client_id, optional struct ifnet_stats_per_flow +#define NECP_CLIENT_ACTION_COPY_PARAMETERS 3 // Copy client parameters. Input: client_id; Output: parameters in buffer +#define NECP_CLIENT_ACTION_COPY_RESULT 4 // Copy client result. Input: client_id; Output: result in buffer +#define NECP_CLIENT_ACTION_COPY_LIST 5 // Copy all client IDs. Output: struct necp_client_list in buffer +#define NECP_CLIENT_ACTION_REQUEST_NEXUS_INSTANCE 6 // Request a nexus instance from a nexus provider, optional struct necp_stats_bufreq +#define NECP_CLIENT_ACTION_AGENT 7 // Interact with agent. Input: client_id, agent parameters +#define NECP_CLIENT_ACTION_COPY_AGENT 8 // Copy agent content. Input: agent UUID; Output: struct netagent +#define NECP_CLIENT_ACTION_COPY_INTERFACE 9 // Copy interface details. Input: ifindex cast to UUID; Output: struct necp_interface_details +#define NECP_CLIENT_ACTION_SET_STATISTICS 10 // Deprecated +#define NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS 11 // Get route statistics. Input: client_id; Output: struct necp_stat_counts +#define NECP_CLIENT_ACTION_AGENT_USE 12 // Return the use count and increment the use count. Input/Output: struct necp_agent_use_parameters +#define NECP_CLIENT_ACTION_MAP_SYSCTLS 13 // Get the read-only sysctls memory location. Output: mach_vm_address_t +#define NECP_CLIENT_ACTION_UPDATE_CACHE 14 // Update heuristics and cache +#define NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE 15 // Fetch an updated client for push-mode observer. Output: Client id, struct necp_client_observer_update in buffer +#define NECP_CLIENT_ACTION_COPY_UPDATED_RESULT 16 // Copy client result only if changed. Input: client_id; Output: result in buffer +#define NECP_CLIENT_ACTION_ADD_FLOW 17 // Add a flow. Input: client_id; Output: struct necp_client_add_flow +#define NECP_CLIENT_ACTION_REMOVE_FLOW 18 // Remove a flow. Input: flow_id, optional struct ifnet_stats_per_flow + + +#define NECP_CLIENT_PARAMETER_APPLICATION NECP_POLICY_CONDITION_APPLICATION // Requires entitlement +#define NECP_CLIENT_PARAMETER_REAL_APPLICATION NECP_POLICY_CONDITION_REAL_APPLICATION // Requires entitlement +#define NECP_CLIENT_PARAMETER_DOMAIN NECP_POLICY_CONDITION_DOMAIN +#define NECP_CLIENT_PARAMETER_ACCOUNT NECP_POLICY_CONDITION_ACCOUNT +#define NECP_CLIENT_PARAMETER_PID NECP_POLICY_CONDITION_PID // Requires entitlement +#define NECP_CLIENT_PARAMETER_UID NECP_POLICY_CONDITION_UID // Requires entitlement +#define NECP_CLIENT_PARAMETER_BOUND_INTERFACE NECP_POLICY_CONDITION_BOUND_INTERFACE +#define NECP_CLIENT_PARAMETER_TRAFFIC_CLASS NECP_POLICY_CONDITION_TRAFFIC_CLASS +#define NECP_CLIENT_PARAMETER_IP_PROTOCOL NECP_POLICY_CONDITION_IP_PROTOCOL +#define NECP_CLIENT_PARAMETER_LOCAL_ADDRESS NECP_POLICY_CONDITION_LOCAL_ADDR +#define NECP_CLIENT_PARAMETER_REMOTE_ADDRESS NECP_POLICY_CONDITION_REMOTE_ADDR +#define NECP_CLIENT_PARAMETER_NEXUS_KEY 102 // "Prohibit" will never choose an interface with that property -#define NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE 100 // String, interface name -#define NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE 101 // u_int8_t, see ifru_functional_type in -#define NECP_CLIENT_PARAMETER_PROHIBIT_AGENT 102 // uuid_t, network agent UUID -#define NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE 103 // struct necp_client_parameter_netagent_type +#define NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE 100 // String, interface name +#define NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE 101 // u_int8_t, see ifru_functional_type in +#define NECP_CLIENT_PARAMETER_PROHIBIT_AGENT 102 // uuid_t, network agent UUID +#define NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE 103 // struct necp_client_parameter_netagent_type // "Require" will choose an interface with that property, or none if not found -#define NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE 111 // u_int8_t, see ifru_functional_type in -#define NECP_CLIENT_PARAMETER_REQUIRE_AGENT 112 // uuid_t, network agent UUID -#define NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE 113 // struct necp_client_parameter_netagent_type +#define NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE 111 // u_int8_t, see ifru_functional_type in +#define NECP_CLIENT_PARAMETER_REQUIRE_AGENT 112 // uuid_t, network agent UUID +#define NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE 113 // struct necp_client_parameter_netagent_type // "Prefer" will choose an interface with an agent, or best otherwise if not found -#define NECP_CLIENT_PARAMETER_PREFER_AGENT 122 // uuid_t, network agent UUID -#define NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE 123 // struct necp_client_parameter_netagent_type +#define NECP_CLIENT_PARAMETER_PREFER_AGENT 122 // uuid_t, network agent UUID +#define NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE 123 // struct necp_client_parameter_netagent_type // "Avoid" will choose an interface without an agent, or best otherwise if unavoidable -#define NECP_CLIENT_PARAMETER_AVOID_AGENT 124 // uuid_t, network agent UUID -#define NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE 125 // struct necp_client_parameter_netagent_type +#define NECP_CLIENT_PARAMETER_AVOID_AGENT 124 // uuid_t, network agent UUID +#define NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE 125 // struct necp_client_parameter_netagent_type // Use actions with NECP_CLIENT_ACTION_AGENT -#define NECP_CLIENT_PARAMETER_TRIGGER_AGENT 130 // uuid_t, network agent UUID -#define NECP_CLIENT_PARAMETER_ASSERT_AGENT 131 // uuid_t, network agent UUID -#define NECP_CLIENT_PARAMETER_UNASSERT_AGENT 132 // uuid_t, network agent UUID - -#define NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT 200 // struct necp_client_endpoint -#define NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT 201 // struct necp_client_endpoint -#define NECP_CLIENT_PARAMETER_BROWSE_CATEGORY 202 // struct necp_client_endpoint - -#define NECP_CLIENT_PARAMETER_FLAGS 250 // u_int32_t, see NECP_CLIENT_PAREMETER_FLAG_* values - -#define NECP_CLIENT_PARAMETER_FLAG_MULTIPATH 0x0001 // Get multipath interface results -#define NECP_CLIENT_PARAMETER_FLAG_BROWSE 0x0002 // Agent assertions on nexuses are requests to browse -#define NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE 0x0004 // Prohibit expensive interfaces -#define NECP_CLIENT_PARAMETER_FLAG_LISTENER 0x0008 // Client is interested in listening for inbound connections -#define NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY 0x0010 // Client's traffic is discretionary, and eligible for early defuncting -#define NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE 0x0020 // Client is requesting to enable ECN -#define NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE 0x0040 // Client is requesting to disable ECN -#define NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE 0x0080 // Client is requesting to enable TFO -#define NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE 0x0100 // Interpret NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE only for primary - // interface, and allow exceptions for multipath or listeners - -#define NECP_CLIENT_RESULT_CLIENT_ID 1 // uuid_t -#define NECP_CLIENT_RESULT_POLICY_RESULT 2 // u_int32_t -#define NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER 3 // u_int32_t -#define NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT 4 // u_int32_t -#define NECP_CLIENT_RESULT_INTERFACE_INDEX 5 // u_int32_t -#define NECP_CLIENT_RESULT_NETAGENT 6 // struct necp_client_result_netagent -#define NECP_CLIENT_RESULT_FLAGS 7 // u_int32_t, see NECP_CLIENT_RESULT_FLAG_* values -#define NECP_CLIENT_RESULT_INTERFACE 8 // struct necp_client_result_interface -#define NECP_CLIENT_RESULT_INTERFACE_OPTION 9 // struct necp_client_interface_option -#define NECP_CLIENT_RESULT_EFFECTIVE_MTU 10 // u_int32_t -#define NECP_CLIENT_RESULT_FLOW 11 // TLV array of a single flow's state -#define NECP_CLIENT_RESULT_PROTO_CTL_EVENT 12 -#define NECP_CLIENT_RESULT_TFO_COOKIE 13 // NECP_TFO_COOKIE_LEN_MAX -#define NECP_CLIENT_RESULT_TFO_FLAGS 14 // u_int8_t -#define NECP_CLIENT_RESULT_RECOMMENDED_MSS 15 // u_int8_t -#define NECP_CLIENT_RESULT_FLOW_ID 16 // uuid_t -#define NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA 17 // u_int32_t, seconds since interface up/down - -#define NECP_CLIENT_RESULT_NEXUS_INSTANCE 100 // uuid_t -#define NECP_CLIENT_RESULT_NEXUS_PORT 101 // u_int16_t -#define NECP_CLIENT_RESULT_NEXUS_KEY 102 // uuid_t -#define NECP_CLIENT_RESULT_NEXUS_PORT_FLOW_INDEX 103 // u_int32_t -#define NECP_CLIENT_RESULT_NEXUS_FLOW_STATS 104 // struct sk_stats_flow * - -#define NECP_CLIENT_RESULT_LOCAL_ENDPOINT 200 // struct necp_client_endpoint -#define NECP_CLIENT_RESULT_REMOTE_ENDPOINT 201 // struct necp_client_endpoint -#define NECP_CLIENT_RESULT_DISCOVERED_ENDPOINT 202 // struct necp_client_endpoint, result of browse -#define NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS 210 // u_int32_t -#define NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG 211 // u_int32_t, 1: background, 0: not background - -#define NECP_CLIENT_RESULT_FLAG_IS_LOCAL 0x0001 // Routes to this device -#define NECP_CLIENT_RESULT_FLAG_IS_DIRECT 0x0002 // Routes to directly accessible peer -#define NECP_CLIENT_RESULT_FLAG_HAS_IPV4 0x0004 // Supports IPv4 -#define NECP_CLIENT_RESULT_FLAG_HAS_IPV6 0x0008 // Supports IPv6 -#define NECP_CLIENT_RESULT_FLAG_DEFUNCT 0x0010 // Defunct -#define NECP_CLIENT_RESULT_FLAG_SATISFIED 0x0020 // Satisfied path -#define NECP_CLIENT_RESULT_FLAG_FLOW_ASSIGNED 0x0040 // Assigned, the flow is active -#define NECP_CLIENT_RESULT_FLAG_FLOW_VIABLE 0x0080 // Viable, the flow has a valid route -#define NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY 0x0100 // Flow should probe connectivity -#define NECP_CLIENT_RESULT_FLAG_ECN_ENABLED 0x0200 // ECN should be used -#define NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED 0x0400 // Fast open should not be used -#define NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT 0x0800 // Link quality is very bad, recommend close connections -#define NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING 0x1000 // QoS marking is allowed -#define NECP_CLIENT_RESULT_FLAG_HAS_NAT64 0x2000 // Has NAT64 prefix -#define NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER 0x4000 // Interface is in low-power mode +#define NECP_CLIENT_PARAMETER_TRIGGER_AGENT 130 // uuid_t, network agent UUID +#define NECP_CLIENT_PARAMETER_ASSERT_AGENT 131 // uuid_t, network agent UUID +#define NECP_CLIENT_PARAMETER_UNASSERT_AGENT 132 // uuid_t, network agent UUID + +#define NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT 200 // struct necp_client_endpoint +#define NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT 201 // struct necp_client_endpoint +#define NECP_CLIENT_PARAMETER_BROWSE_CATEGORY 202 // struct necp_client_endpoint + +#define NECP_CLIENT_PARAMETER_FLAGS 250 // u_int32_t, see NECP_CLIENT_PAREMETER_FLAG_* values + +#define NECP_CLIENT_PARAMETER_FLAG_MULTIPATH 0x0001 // Get multipath interface results +#define NECP_CLIENT_PARAMETER_FLAG_BROWSE 0x0002 // Agent assertions on nexuses are requests to browse +#define NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE 0x0004 // Prohibit expensive interfaces +#define NECP_CLIENT_PARAMETER_FLAG_LISTENER 0x0008 // Client is interested in listening for inbound connections +#define NECP_CLIENT_PARAMETER_FLAG_DISCRETIONARY 0x0010 // Client's traffic is discretionary, and eligible for early defuncting +#define NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE 0x0020 // Client is requesting to enable ECN +#define NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE 0x0040 // Client is requesting to disable ECN +#define NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE 0x0080 // Client is requesting to enable TFO +#define NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE 0x0100 // Interpret NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE only for primary +// interface, and allow exceptions for multipath or listeners + +#define NECP_CLIENT_RESULT_CLIENT_ID 1 // uuid_t +#define NECP_CLIENT_RESULT_POLICY_RESULT 2 // u_int32_t +#define NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER 3 // u_int32_t +#define NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT 4 // u_int32_t +#define NECP_CLIENT_RESULT_INTERFACE_INDEX 5 // u_int32_t +#define NECP_CLIENT_RESULT_NETAGENT 6 // struct necp_client_result_netagent +#define NECP_CLIENT_RESULT_FLAGS 7 // u_int32_t, see NECP_CLIENT_RESULT_FLAG_* values +#define NECP_CLIENT_RESULT_INTERFACE 8 // struct necp_client_result_interface +#define NECP_CLIENT_RESULT_INTERFACE_OPTION 9 // struct necp_client_interface_option +#define NECP_CLIENT_RESULT_EFFECTIVE_MTU 10 // u_int32_t +#define NECP_CLIENT_RESULT_FLOW 11 // TLV array of a single flow's state +#define NECP_CLIENT_RESULT_PROTO_CTL_EVENT 12 +#define NECP_CLIENT_RESULT_TFO_COOKIE 13 // NECP_TFO_COOKIE_LEN_MAX +#define NECP_CLIENT_RESULT_TFO_FLAGS 14 // u_int8_t +#define NECP_CLIENT_RESULT_RECOMMENDED_MSS 15 // u_int8_t +#define NECP_CLIENT_RESULT_FLOW_ID 16 // uuid_t +#define NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA 17 // u_int32_t, seconds since interface up/down + +#define NECP_CLIENT_RESULT_NEXUS_INSTANCE 100 // uuid_t +#define NECP_CLIENT_RESULT_NEXUS_PORT 101 // u_int16_t +#define NECP_CLIENT_RESULT_NEXUS_KEY 102 // uuid_t +#define NECP_CLIENT_RESULT_NEXUS_PORT_FLOW_INDEX 103 // u_int32_t +#define NECP_CLIENT_RESULT_NEXUS_FLOW_STATS 104 // struct sk_stats_flow * + +#define NECP_CLIENT_RESULT_LOCAL_ENDPOINT 200 // struct necp_client_endpoint +#define NECP_CLIENT_RESULT_REMOTE_ENDPOINT 201 // struct necp_client_endpoint +#define NECP_CLIENT_RESULT_DISCOVERED_ENDPOINT 202 // struct necp_client_endpoint, result of browse +#define NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS 210 // u_int32_t +#define NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG 211 // u_int32_t, 1: background, 0: not background + +#define NECP_CLIENT_RESULT_FLAG_IS_LOCAL 0x0001 // Routes to this device +#define NECP_CLIENT_RESULT_FLAG_IS_DIRECT 0x0002 // Routes to directly accessible peer +#define NECP_CLIENT_RESULT_FLAG_HAS_IPV4 0x0004 // Supports IPv4 +#define NECP_CLIENT_RESULT_FLAG_HAS_IPV6 0x0008 // Supports IPv6 +#define NECP_CLIENT_RESULT_FLAG_DEFUNCT 0x0010 // Defunct +#define NECP_CLIENT_RESULT_FLAG_SATISFIED 0x0020 // Satisfied path +#define NECP_CLIENT_RESULT_FLAG_FLOW_ASSIGNED 0x0040 // Assigned, the flow is active +#define NECP_CLIENT_RESULT_FLAG_FLOW_VIABLE 0x0080 // Viable, the flow has a valid route +#define NECP_CLIENT_RESULT_FLAG_PROBE_CONNECTIVITY 0x0100 // Flow should probe connectivity +#define NECP_CLIENT_RESULT_FLAG_ECN_ENABLED 0x0200 // ECN should be used +#define NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED 0x0400 // Fast open should not be used +#define NECP_CLIENT_RESULT_FLAG_LINK_QUALITY_ABORT 0x0800 // Link quality is very bad, recommend close connections +#define NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING 0x1000 // QoS marking is allowed +#define NECP_CLIENT_RESULT_FLAG_HAS_NAT64 0x2000 // Has NAT64 prefix +#define NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER 0x4000 // Interface is in low-power mode #define NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE (NECP_CLIENT_RESULT_FLAG_HAS_IPV4 | NECP_CLIENT_RESULT_FLAG_HAS_IPV6 | NECP_CLIENT_RESULT_FLAG_HAS_NAT64 | NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER) -#define NECP_CLIENT_RESULT_FAST_OPEN_SND_PROBE 0x01 // DEPRECATED - Fast open send probe -#define NECP_CLIENT_RESULT_FAST_OPEN_RCV_PROBE 0x02 // DEPRECATED - Fast open receive probe +#define NECP_CLIENT_RESULT_FAST_OPEN_SND_PROBE 0x01 // DEPRECATED - Fast open send probe +#define NECP_CLIENT_RESULT_FAST_OPEN_RCV_PROBE 0x02 // DEPRECATED - Fast open receive probe -#define NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE 0x01 -#define NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW 0x02 -#define NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM 0x04 +#define NECP_CLIENT_RESULT_RECOMMENDED_MSS_NONE 0x01 +#define NECP_CLIENT_RESULT_RECOMMENDED_MSS_LOW 0x02 +#define NECP_CLIENT_RESULT_RECOMMENDED_MSS_MEDIUM 0x04 struct necp_interface_signature { u_int8_t signature[IFNET_SIGNATURELEN]; @@ -579,11 +576,11 @@ struct necp_interface_details { struct necp_interface_signature ipv6_signature; }; -#define NECP_INTERFACE_FLAG_EXPENSIVE 0x0001 -#define NECP_INTERFACE_FLAG_TXSTART 0X0002 -#define NECP_INTERFACE_FLAG_NOACKPRI 0x0004 -#define NECP_INTERFACE_FLAG_3CARRIERAGG 0x0008 -#define NECP_INTERFACE_FLAG_IS_LOW_POWER 0x0010 +#define NECP_INTERFACE_FLAG_EXPENSIVE 0x0001 +#define NECP_INTERFACE_FLAG_TXSTART 0X0002 +#define NECP_INTERFACE_FLAG_NOACKPRI 0x0004 +#define NECP_INTERFACE_FLAG_3CARRIERAGG 0x0008 +#define NECP_INTERFACE_FLAG_IS_LOW_POWER 0x0010 struct necp_client_parameter_netagent_type { char netagent_domain[32]; @@ -627,11 +624,11 @@ struct necp_client_list { }; struct kev_necp_policies_changed_data { - u_int32_t changed_count; // Defaults to 0. + u_int32_t changed_count; // Defaults to 0. }; -#define NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS 0x01 // Request a nexus instance upon adding a flow -#define NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID 0x02 // Register the client ID rather than the flow registration ID with network agents +#define NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS 0x01 // Request a nexus instance upon adding a flow +#define NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID 0x02 // Register the client ID rather than the flow registration ID with network agents struct necp_client_flow_stats { u_int32_t stats_type; // NECP_CLIENT_STATISTICS_TYPE_* @@ -654,15 +651,15 @@ struct necp_agent_use_parameters { }; struct necp_client_flow_protoctl_event { - uint32_t protoctl_event_code; - uint32_t protoctl_event_val; + uint32_t protoctl_event_code; + uint32_t protoctl_event_val; /* TCP seq number is in host byte order */ - uint32_t protoctl_event_tcp_seq_num; + uint32_t protoctl_event_tcp_seq_num; }; -#define NECP_CLIENT_UPDATE_TYPE_PARAMETERS 1 // Parameters, for a new client -#define NECP_CLIENT_UPDATE_TYPE_RESULT 2 // Result, for a udpated client -#define NECP_CLIENT_UPDATE_TYPE_REMOVE 3 // Empty, for a removed client +#define NECP_CLIENT_UPDATE_TYPE_PARAMETERS 1 // Parameters, for a new client +#define NECP_CLIENT_UPDATE_TYPE_RESULT 2 // Result, for a udpated client +#define NECP_CLIENT_UPDATE_TYPE_REMOVE 3 // Empty, for a removed client struct necp_client_observer_update { u_int32_t update_type; // NECP_CLIENT_UPDATE_TYPE_* @@ -681,12 +678,12 @@ struct necp_client_observer_update { SYSCTL_DECL(_net_necp); -#define NECPLOG(level, format, ...) do { \ - log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ +#define NECPLOG(level, format, ...) do { \ + log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ } while (0) -#define NECPLOG0(level, msg) do { \ - log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: %s\n", __FUNCTION__, msg); \ +#define NECPLOG0(level, msg) do { \ + log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: %s\n", __FUNCTION__, msg); \ } while (0) enum necp_fd_type_t { @@ -696,9 +693,9 @@ enum necp_fd_type_t { }; union necp_sockaddr_union { - struct sockaddr sa; - struct sockaddr_in sin; - struct sockaddr_in6 sin6; + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; }; /* @@ -709,17 +706,17 @@ union necp_sockaddr_union { * ustats object */ struct necp_all_kstats { - struct necp_all_stats necp_stats_comm; /* kernel private stats snapshot */ - struct necp_all_stats *necp_stats_ustats; /* points to user-visible stats (in shared ustats region) */ + struct necp_all_stats necp_stats_comm; /* kernel private stats snapshot */ + struct necp_all_stats *necp_stats_ustats; /* points to user-visible stats (in shared ustats region) */ }; extern errno_t necp_client_init(void); extern int necp_application_find_policy_match_internal(proc_t proc, u_int8_t *parameters, u_int32_t parameters_size, - struct necp_aggregate_result *returned_result, - u_int32_t *flags, u_int required_interface_index, - const union necp_sockaddr_union *override_local_addr, - const union necp_sockaddr_union *override_remote_addr, - struct rtentry **returned_route, bool ignore_address); + struct necp_aggregate_result *returned_result, + u_int32_t *flags, u_int required_interface_index, + const union necp_sockaddr_union *override_local_addr, + const union necp_sockaddr_union *override_remote_addr, + struct rtentry **returned_route, bool ignore_address); /* * TLV utilities * @@ -732,66 +729,66 @@ struct necp_tlv_header { } __attribute__((__packed__)); extern u_int8_t *necp_buffer_write_tlv(u_int8_t *cursor, u_int8_t type, u_int32_t length, const void *value, - u_int8_t *buffer, u_int32_t buffer_length); + u_int8_t *buffer, u_int32_t buffer_length); extern u_int8_t *necp_buffer_write_tlv_if_different(u_int8_t *cursor, u_int8_t type, - u_int32_t length, const void *value, bool *updated, - u_int8_t *buffer, u_int32_t buffer_length); + u_int32_t length, const void *value, bool *updated, + u_int8_t *buffer, u_int32_t buffer_length); extern u_int8_t necp_buffer_get_tlv_type(u_int8_t *buffer, int tlv_offset); extern u_int32_t necp_buffer_get_tlv_length(u_int8_t *buffer, int tlv_offset); extern u_int8_t *necp_buffer_get_tlv_value(u_int8_t *buffer, int tlv_offset, u_int32_t *value_size); extern int necp_buffer_find_tlv(u_int8_t *buffer, u_int32_t buffer_length, int offset, u_int8_t type, int next); -#define NECPCTL_DROP_ALL_LEVEL 1 /* Drop all packets if no policy matches above this level */ -#define NECPCTL_DEBUG 2 /* Log all kernel policy matches */ -#define NECPCTL_PASS_LOOPBACK 3 /* Pass all loopback traffic */ -#define NECPCTL_PASS_KEEPALIVES 4 /* Pass all kernel-generated keepalive traffic */ -#define NECPCTL_SOCKET_POLICY_COUNT 5 /* Count of all socket-level policies */ -#define NECPCTL_SOCKET_NON_APP_POLICY_COUNT 6 /* Count of non-per-app socket-level policies */ -#define NECPCTL_IP_POLICY_COUNT 7 /* Count of all ip-level policies */ -#define NECPCTL_SESSION_COUNT 8 /* Count of NECP sessions */ -#define NECPCTL_CLIENT_FD_COUNT 9 /* Count of NECP client fds */ -#define NECPCTL_CLIENT_COUNT 10 /* Count of NECP clients */ -#define NECPCTL_ARENA_COUNT 11 /* Count of NECP arenas (stats, etc) */ -#define NECPCTL_NEXUS_FLOW_COUNT 12 /* Count of NECP nexus flows */ -#define NECPCTL_SOCKET_FLOW_COUNT 13 /* Count of NECP socket flows */ -#define NECPCTL_IF_FLOW_COUNT 14 /* Count of NECP socket flows */ -#define NECPCTL_OBSERVER_FD_COUNT 15 /* Count of NECP observer fds */ -#define NECPCTL_OBSERVER_MESSAGE_LIMIT 16 /* Number of of NECP observer messages allowed to be queued */ -#define NECPCTL_SYSCTL_ARENA_COUNT 17 /* Count of sysctl arenas */ - -#define NECPCTL_NAMES { \ - { 0, 0 }, \ - { "drop_all_level", CTLTYPE_INT }, \ - { "debug", CTLTYPE_INT }, \ - { "pass_loopback", CTLTYPE_INT }, \ - { "pass_keepalives", CTLTYPE_INT }, \ +#define NECPCTL_DROP_ALL_LEVEL 1 /* Drop all packets if no policy matches above this level */ +#define NECPCTL_DEBUG 2 /* Log all kernel policy matches */ +#define NECPCTL_PASS_LOOPBACK 3 /* Pass all loopback traffic */ +#define NECPCTL_PASS_KEEPALIVES 4 /* Pass all kernel-generated keepalive traffic */ +#define NECPCTL_SOCKET_POLICY_COUNT 5 /* Count of all socket-level policies */ +#define NECPCTL_SOCKET_NON_APP_POLICY_COUNT 6 /* Count of non-per-app socket-level policies */ +#define NECPCTL_IP_POLICY_COUNT 7 /* Count of all ip-level policies */ +#define NECPCTL_SESSION_COUNT 8 /* Count of NECP sessions */ +#define NECPCTL_CLIENT_FD_COUNT 9 /* Count of NECP client fds */ +#define NECPCTL_CLIENT_COUNT 10 /* Count of NECP clients */ +#define NECPCTL_ARENA_COUNT 11 /* Count of NECP arenas (stats, etc) */ +#define NECPCTL_NEXUS_FLOW_COUNT 12 /* Count of NECP nexus flows */ +#define NECPCTL_SOCKET_FLOW_COUNT 13 /* Count of NECP socket flows */ +#define NECPCTL_IF_FLOW_COUNT 14 /* Count of NECP socket flows */ +#define NECPCTL_OBSERVER_FD_COUNT 15 /* Count of NECP observer fds */ +#define NECPCTL_OBSERVER_MESSAGE_LIMIT 16 /* Number of of NECP observer messages allowed to be queued */ +#define NECPCTL_SYSCTL_ARENA_COUNT 17 /* Count of sysctl arenas */ + +#define NECPCTL_NAMES { \ + { 0, 0 }, \ + { "drop_all_level", CTLTYPE_INT }, \ + { "debug", CTLTYPE_INT }, \ + { "pass_loopback", CTLTYPE_INT }, \ + { "pass_keepalives", CTLTYPE_INT }, \ } typedef u_int32_t necp_kernel_policy_id; -#define NECP_KERNEL_POLICY_ID_NONE 0 -#define NECP_KERNEL_POLICY_ID_NO_MATCH 1 -#define NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET 2 -#define NECP_KERNEL_POLICY_ID_FIRST_VALID_IP UINT16_MAX +#define NECP_KERNEL_POLICY_ID_NONE 0 +#define NECP_KERNEL_POLICY_ID_NO_MATCH 1 +#define NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET 2 +#define NECP_KERNEL_POLICY_ID_FIRST_VALID_IP UINT16_MAX typedef u_int32_t necp_app_id; -#define NECP_KERNEL_POLICY_RESULT_NONE 0 -#define NECP_KERNEL_POLICY_RESULT_PASS NECP_POLICY_RESULT_PASS -#define NECP_KERNEL_POLICY_RESULT_SKIP NECP_POLICY_RESULT_SKIP -#define NECP_KERNEL_POLICY_RESULT_DROP NECP_POLICY_RESULT_DROP -#define NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT NECP_POLICY_RESULT_SOCKET_DIVERT -#define NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER NECP_POLICY_RESULT_SOCKET_FILTER -#define NECP_KERNEL_POLICY_RESULT_IP_TUNNEL NECP_POLICY_RESULT_IP_TUNNEL -#define NECP_KERNEL_POLICY_RESULT_IP_FILTER NECP_POLICY_RESULT_IP_FILTER -#define NECP_KERNEL_POLICY_RESULT_TRIGGER NECP_POLICY_RESULT_TRIGGER -#define NECP_KERNEL_POLICY_RESULT_TRIGGER_IF_NEEDED NECP_POLICY_RESULT_TRIGGER_IF_NEEDED -#define NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED NECP_POLICY_RESULT_TRIGGER_SCOPED -#define NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED NECP_POLICY_RESULT_NO_TRIGGER_SCOPED -#define NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED NECP_POLICY_RESULT_SOCKET_SCOPED -#define NECP_KERNEL_POLICY_RESULT_ROUTE_RULES NECP_POLICY_RESULT_ROUTE_RULES -#define NECP_KERNEL_POLICY_RESULT_USE_NETAGENT NECP_POLICY_RESULT_USE_NETAGENT -#define NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED NECP_POLICY_RESULT_NETAGENT_SCOPED -#define NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT NECP_POLICY_RESULT_SCOPED_DIRECT +#define NECP_KERNEL_POLICY_RESULT_NONE 0 +#define NECP_KERNEL_POLICY_RESULT_PASS NECP_POLICY_RESULT_PASS +#define NECP_KERNEL_POLICY_RESULT_SKIP NECP_POLICY_RESULT_SKIP +#define NECP_KERNEL_POLICY_RESULT_DROP NECP_POLICY_RESULT_DROP +#define NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT NECP_POLICY_RESULT_SOCKET_DIVERT +#define NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER NECP_POLICY_RESULT_SOCKET_FILTER +#define NECP_KERNEL_POLICY_RESULT_IP_TUNNEL NECP_POLICY_RESULT_IP_TUNNEL +#define NECP_KERNEL_POLICY_RESULT_IP_FILTER NECP_POLICY_RESULT_IP_FILTER +#define NECP_KERNEL_POLICY_RESULT_TRIGGER NECP_POLICY_RESULT_TRIGGER +#define NECP_KERNEL_POLICY_RESULT_TRIGGER_IF_NEEDED NECP_POLICY_RESULT_TRIGGER_IF_NEEDED +#define NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED NECP_POLICY_RESULT_TRIGGER_SCOPED +#define NECP_KERNEL_POLICY_RESULT_NO_TRIGGER_SCOPED NECP_POLICY_RESULT_NO_TRIGGER_SCOPED +#define NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED NECP_POLICY_RESULT_SOCKET_SCOPED +#define NECP_KERNEL_POLICY_RESULT_ROUTE_RULES NECP_POLICY_RESULT_ROUTE_RULES +#define NECP_KERNEL_POLICY_RESULT_USE_NETAGENT NECP_POLICY_RESULT_USE_NETAGENT +#define NECP_KERNEL_POLICY_RESULT_NETAGENT_SCOPED NECP_POLICY_RESULT_NETAGENT_SCOPED +#define NECP_KERNEL_POLICY_RESULT_SCOPED_DIRECT NECP_POLICY_RESULT_SCOPED_DIRECT typedef struct { u_int32_t identifier; @@ -799,14 +796,14 @@ typedef struct { } necp_kernel_policy_service; typedef union { - u_int tunnel_interface_index; - u_int scoped_interface_index; - u_int32_t flow_divert_control_unit; - u_int32_t filter_control_unit; - u_int32_t skip_policy_order; - u_int32_t route_rule_id; - u_int32_t netagent_id; - necp_kernel_policy_service service; + u_int tunnel_interface_index; + u_int scoped_interface_index; + u_int32_t flow_divert_control_unit; + u_int32_t filter_control_unit; + u_int32_t skip_policy_order; + u_int32_t route_rule_id; + u_int32_t netagent_id; + necp_kernel_policy_service service; } necp_kernel_policy_result_parameter; enum necp_boolean_state { @@ -816,107 +813,107 @@ enum necp_boolean_state { }; struct necp_kernel_socket_policy { - LIST_ENTRY(necp_kernel_socket_policy) chain; - necp_kernel_policy_id id; - necp_policy_order order; - u_int32_t session_order; - int session_pid; - - u_int32_t condition_mask; - u_int32_t condition_negated_mask; - necp_kernel_policy_id cond_policy_id; - u_int32_t cond_app_id; // Locally assigned ID value stored - u_int32_t cond_real_app_id; // Locally assigned ID value stored - char *cond_custom_entitlement; // String - u_int8_t cond_custom_entitlement_matched;// Boolean if entitlement matched app - u_int32_t cond_account_id; // Locally assigned ID value stored - char *cond_domain; // String - u_int8_t cond_domain_dot_count; // Number of dots in cond_domain - pid_t cond_pid; - uid_t cond_uid; - ifnet_t cond_bound_interface; // Matches specific binding only - struct necp_policy_condition_tc_range cond_traffic_class; // Matches traffic class in range - u_int16_t cond_protocol; // Matches IP protcol number - union necp_sockaddr_union cond_local_start; // Matches local IP address (or start) - union necp_sockaddr_union cond_local_end; // Matches IP address range - u_int8_t cond_local_prefix; // Defines subnet - union necp_sockaddr_union cond_remote_start; // Matches remote IP address (or start) - union necp_sockaddr_union cond_remote_end; // Matches IP address range - u_int8_t cond_remote_prefix; // Defines subnet + LIST_ENTRY(necp_kernel_socket_policy) chain; + necp_kernel_policy_id id; + necp_policy_order order; + u_int32_t session_order; + int session_pid; + + u_int32_t condition_mask; + u_int32_t condition_negated_mask; + necp_kernel_policy_id cond_policy_id; + u_int32_t cond_app_id; // Locally assigned ID value stored + u_int32_t cond_real_app_id; // Locally assigned ID value stored + char *cond_custom_entitlement; // String + u_int8_t cond_custom_entitlement_matched;// Boolean if entitlement matched app + u_int32_t cond_account_id; // Locally assigned ID value stored + char *cond_domain; // String + u_int8_t cond_domain_dot_count; // Number of dots in cond_domain + pid_t cond_pid; + uid_t cond_uid; + ifnet_t cond_bound_interface; // Matches specific binding only + struct necp_policy_condition_tc_range cond_traffic_class; // Matches traffic class in range + u_int16_t cond_protocol; // Matches IP protcol number + union necp_sockaddr_union cond_local_start; // Matches local IP address (or start) + union necp_sockaddr_union cond_local_end; // Matches IP address range + u_int8_t cond_local_prefix; // Defines subnet + union necp_sockaddr_union cond_remote_start; // Matches remote IP address (or start) + union necp_sockaddr_union cond_remote_end; // Matches IP address range + u_int8_t cond_remote_prefix; // Defines subnet struct necp_policy_condition_agent_type cond_agent_type; - necp_kernel_policy_result result; - necp_kernel_policy_result_parameter result_parameter; + necp_kernel_policy_result result; + necp_kernel_policy_result_parameter result_parameter; }; struct necp_kernel_ip_output_policy { - LIST_ENTRY(necp_kernel_ip_output_policy) chain; - necp_kernel_policy_id id; - necp_policy_order suborder; - necp_policy_order order; - u_int32_t session_order; - int session_pid; - - u_int32_t condition_mask; - u_int32_t condition_negated_mask; - necp_kernel_policy_id cond_policy_id; - ifnet_t cond_bound_interface; // Matches specific binding only - u_int16_t cond_protocol; // Matches IP protcol number - union necp_sockaddr_union cond_local_start; // Matches local IP address (or start) - union necp_sockaddr_union cond_local_end; // Matches IP address range - u_int8_t cond_local_prefix; // Defines subnet - union necp_sockaddr_union cond_remote_start; // Matches remote IP address (or start) - union necp_sockaddr_union cond_remote_end; // Matches IP address range - u_int8_t cond_remote_prefix; // Defines subnet - u_int32_t cond_last_interface_index; - - necp_kernel_policy_result result; - necp_kernel_policy_result_parameter result_parameter; + LIST_ENTRY(necp_kernel_ip_output_policy) chain; + necp_kernel_policy_id id; + necp_policy_order suborder; + necp_policy_order order; + u_int32_t session_order; + int session_pid; + + u_int32_t condition_mask; + u_int32_t condition_negated_mask; + necp_kernel_policy_id cond_policy_id; + ifnet_t cond_bound_interface; // Matches specific binding only + u_int16_t cond_protocol; // Matches IP protcol number + union necp_sockaddr_union cond_local_start; // Matches local IP address (or start) + union necp_sockaddr_union cond_local_end; // Matches IP address range + u_int8_t cond_local_prefix; // Defines subnet + union necp_sockaddr_union cond_remote_start; // Matches remote IP address (or start) + union necp_sockaddr_union cond_remote_end; // Matches IP address range + u_int8_t cond_remote_prefix; // Defines subnet + u_int32_t cond_last_interface_index; + + necp_kernel_policy_result result; + necp_kernel_policy_result_parameter result_parameter; }; -#define MAX_KERNEL_SOCKET_POLICIES 1 -#define MAX_KERNEL_IP_OUTPUT_POLICIES 4 +#define MAX_KERNEL_SOCKET_POLICIES 1 +#define MAX_KERNEL_IP_OUTPUT_POLICIES 4 struct necp_session_policy { LIST_ENTRY(necp_session_policy) chain; - bool applied; // Applied into the kernel table - bool pending_deletion; // Waiting to be removed from kernel table - bool pending_update; // Policy has been modified since creation/last application - necp_policy_id local_id; - necp_policy_order order; - u_int8_t *result; - u_int32_t result_size; - u_int8_t *conditions; // Array of conditions, each with a u_int32_t length at start - u_int32_t conditions_size; - u_int8_t *route_rules; // Array of route rules, each with a u_int32_t length at start - u_int32_t route_rules_size; - - uuid_t applied_app_uuid; - uuid_t applied_real_app_uuid; - char *applied_account; - - uuid_t applied_result_uuid; - - u_int32_t applied_route_rules_id; - - necp_kernel_policy_id kernel_socket_policies[MAX_KERNEL_SOCKET_POLICIES]; - necp_kernel_policy_id kernel_ip_output_policies[MAX_KERNEL_IP_OUTPUT_POLICIES]; + bool applied; // Applied into the kernel table + bool pending_deletion; // Waiting to be removed from kernel table + bool pending_update; // Policy has been modified since creation/last application + necp_policy_id local_id; + necp_policy_order order; + u_int8_t *result; + u_int32_t result_size; + u_int8_t *conditions; // Array of conditions, each with a u_int32_t length at start + u_int32_t conditions_size; + u_int8_t *route_rules; // Array of route rules, each with a u_int32_t length at start + u_int32_t route_rules_size; + + uuid_t applied_app_uuid; + uuid_t applied_real_app_uuid; + char *applied_account; + + uuid_t applied_result_uuid; + + u_int32_t applied_route_rules_id; + + necp_kernel_policy_id kernel_socket_policies[MAX_KERNEL_SOCKET_POLICIES]; + necp_kernel_policy_id kernel_ip_output_policies[MAX_KERNEL_IP_OUTPUT_POLICIES]; }; struct necp_aggregate_socket_result { - necp_kernel_policy_result result; - necp_kernel_policy_result_parameter result_parameter; - necp_kernel_policy_filter filter_control_unit; - u_int32_t route_rule_id; - int32_t qos_marking_gencount; + necp_kernel_policy_result result; + necp_kernel_policy_result_parameter result_parameter; + necp_kernel_policy_filter filter_control_unit; + u_int32_t route_rule_id; + int32_t qos_marking_gencount; }; struct necp_inpcb_result { - u_int32_t app_id; - necp_kernel_policy_id policy_id; - necp_kernel_policy_id skip_policy_id; - int32_t policy_gencount; - u_int32_t flowhash; - struct necp_aggregate_socket_result results; + u_int32_t app_id; + necp_kernel_policy_id policy_id; + necp_kernel_policy_id skip_policy_id; + int32_t policy_gencount; + u_int32_t flowhash; + struct necp_aggregate_socket_result results; }; extern errno_t necp_init(void); @@ -936,36 +933,36 @@ extern u_int necp_socket_get_rescope_if_index(struct inpcb *inp); extern u_int32_t necp_socket_get_effective_mtu(struct inpcb *inp, u_int32_t current_mtu); extern bool necp_socket_is_allowed_to_send_recv(struct inpcb *inp, necp_kernel_policy_id *return_policy_id, - u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id); + u_int32_t *return_route_rule_id, + necp_kernel_policy_id *return_skip_policy_id); extern bool necp_socket_is_allowed_to_send_recv_v4(struct inpcb *inp, u_int16_t local_port, - u_int16_t remote_port, struct in_addr *local_addr, - struct in_addr *remote_addr, ifnet_t interface, - necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id); + u_int16_t remote_port, struct in_addr *local_addr, + struct in_addr *remote_addr, ifnet_t interface, + necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, + necp_kernel_policy_id *return_skip_policy_id); extern bool necp_socket_is_allowed_to_send_recv_v6(struct inpcb *inp, u_int16_t local_port, - u_int16_t remote_port, struct in6_addr *local_addr, - struct in6_addr *remote_addr, ifnet_t interface, - necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, - necp_kernel_policy_id *return_skip_policy_id); + u_int16_t remote_port, struct in6_addr *local_addr, + struct in6_addr *remote_addr, ifnet_t interface, + necp_kernel_policy_id *return_policy_id, u_int32_t *return_route_rule_id, + necp_kernel_policy_id *return_skip_policy_id); extern void necp_socket_update_qos_marking(struct inpcb *inp, struct rtentry *route, struct ifnet *interface, u_int32_t route_rule_id); extern int necp_mark_packet_from_socket(struct mbuf *packet, struct inpcb *inp, necp_kernel_policy_id policy_id, - u_int32_t route_rule_id, necp_kernel_policy_id skip_policy_id); + u_int32_t route_rule_id, necp_kernel_policy_id skip_policy_id); extern necp_kernel_policy_id necp_get_policy_id_from_packet(struct mbuf *packet); extern necp_kernel_policy_id necp_get_skip_policy_id_from_packet(struct mbuf *packet); extern u_int32_t necp_get_last_interface_index_from_packet(struct mbuf *packet); extern u_int32_t necp_get_route_rule_id_from_packet(struct mbuf *packet); extern int necp_get_app_uuid_from_packet(struct mbuf *packet, - uuid_t app_uuid); + uuid_t app_uuid); extern necp_kernel_policy_id necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local_addr, - struct sockaddr *override_remote_addr, u_int32_t override_bound_interface); + struct sockaddr *override_remote_addr, u_int32_t override_bound_interface); extern necp_kernel_policy_id necp_ip_output_find_policy_match(struct mbuf *packet, int flags, struct ip_out_args *ipoa, - necp_kernel_policy_result *result, - necp_kernel_policy_result_parameter *result_parameter); + necp_kernel_policy_result *result, + necp_kernel_policy_result_parameter *result_parameter); extern necp_kernel_policy_id necp_ip6_output_find_policy_match(struct mbuf *packet, int flags, struct ip6_out_args *ip6oa, - necp_kernel_policy_result *result, - necp_kernel_policy_result_parameter *result_parameter); + necp_kernel_policy_result *result, + necp_kernel_policy_result_parameter *result_parameter); extern int necp_mark_packet_from_ip(struct mbuf *packet, necp_kernel_policy_id policy_id); extern int necp_mark_packet_from_interface(struct mbuf *packet, ifnet_t interface); @@ -995,8 +992,8 @@ extern int necp_client_assert_bb_radio_manager(uuid_t client_id, bool assert); extern int necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp); extern int necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id, - u_int8_t *assigned_results, size_t assigned_results_length); -struct skmem_obj_info; // forward declaration + u_int8_t *assigned_results, size_t assigned_results_length); +struct skmem_obj_info; // forward declaration extern int necp_stats_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim, void *arg, uint32_t skmflag); extern int necp_stats_dtor(void *addr, void *arg); @@ -1007,10 +1004,10 @@ necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id, uint32_t protoctl_event_code, uint32_t protoctl_event_val, uint32_t protoctl_event_tcp_seq_num); -#define NECP_FLOWADV_IDX_INVALID UINT32_MAX +#define NECP_FLOWADV_IDX_INVALID UINT32_MAX extern void *necp_create_nexus_assign_message(uuid_t nexus_instance, u_int32_t nexus_port, void *key, uint32_t key_length, - struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, - u_int32_t flow_adv_index, void *flow_stats, size_t *message_length); + struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, + u_int32_t flow_adv_index, void *flow_stats, size_t *message_length); struct necp_client_nexus_parameters { pid_t pid; @@ -1025,9 +1022,9 @@ struct necp_client_nexus_parameters { unsigned allow_qos_marking:1; }; -#define NECP_CLIENT_CBACTION_NONVIABLE 1 -#define NECP_CLIENT_CBACTION_VIABLE 2 -#define NECP_CLIENT_CBACTION_INITIAL 3 +#define NECP_CLIENT_CBACTION_NONVIABLE 1 +#define NECP_CLIENT_CBACTION_VIABLE 2 +#define NECP_CLIENT_CBACTION_INITIAL 3 struct necp_client_add_flow_default { uuid_t agent_uuid; @@ -1050,13 +1047,13 @@ extern int necp_match_policy(const uint8_t *parameters, size_t parameters_size, extern int necp_open(int flags); extern int necp_client_action(int necp_fd, uint32_t action, uuid_t client_id, - size_t client_id_len, uint8_t *buffer, size_t buffer_size); + size_t client_id_len, uint8_t *buffer, size_t buffer_size); extern int necp_session_open(int flags); extern int necp_session_action(int necp_fd, uint32_t action, - uint8_t *in_buffer, size_t in_buffer_length, - uint8_t *out_buffer, size_t out_buffer_length); + uint8_t *in_buffer, size_t in_buffer_length, + uint8_t *out_buffer, size_t out_buffer_length); #endif /* !KERNEL */ diff --git a/bsd/net/necp_client.c b/bsd/net/necp_client.c index 814f2f0be..893ce06f0 100644 --- a/bsd/net/necp_client.c +++ b/bsd/net/necp_client.c @@ -148,11 +148,11 @@ extern u_int32_t necp_debug; static int noop_read(struct fileproc *, struct uio *, int, vfs_context_t); static int noop_write(struct fileproc *, struct uio *, int, vfs_context_t); static int noop_ioctl(struct fileproc *, unsigned long, caddr_t, - vfs_context_t); + vfs_context_t); static int necpop_select(struct fileproc *, int, void *, vfs_context_t); static int necpop_close(struct fileglob *, vfs_context_t); static int necpop_kqfilter(struct fileproc *, struct knote *, - struct kevent_internal_s *kev, vfs_context_t); + struct kevent_internal_s *kev, vfs_context_t); // Timer functions static int necp_timeout_microseconds = 1000 * 100; // 100ms @@ -172,33 +172,33 @@ SYSCTL_INT(_net_necp, NECPCTL_SOCKET_FLOW_COUNT, socket_flow_count, CTLFLAG_LOCK SYSCTL_INT(_net_necp, NECPCTL_IF_FLOW_COUNT, if_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_if_flow_count, 0, ""); SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_MESSAGE_LIMIT, observer_message_limit, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_observer_message_limit, 256, ""); -#define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB -#define NECP_MAX_AGENT_ACTION_SIZE 256 +#define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB +#define NECP_MAX_AGENT_ACTION_SIZE 256 extern int tvtohz(struct timeval *); extern unsigned int get_maxmtu(struct rtentry *); // Parsed parameters -#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001 -#define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002 -#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004 -#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008 -#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010 -#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020 -#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040 -#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080 -#define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100 -#define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200 -#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400 -#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800 -#define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000 -#define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000 -#define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000 -#define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000 -#define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000 -#define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000 -#define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000 -#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000 +#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001 +#define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002 +#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004 +#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008 +#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010 +#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020 +#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040 +#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080 +#define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100 +#define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200 +#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400 +#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800 +#define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000 +#define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000 +#define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000 +#define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000 +#define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000 +#define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000 +#define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000 +#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000 #define NECP_MAX_PARSED_PARAMETERS 16 struct necp_client_parsed_parameters { @@ -226,16 +226,16 @@ struct necp_client_parsed_parameters { static bool necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters, - u_int *return_ifindex, bool *validate_agents); + u_int *return_ifindex, bool *validate_agents); static bool necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa); static bool necp_ifnet_matches_parameters(struct ifnet *ifp, - struct necp_client_parsed_parameters *parsed_parameters, - u_int32_t *preferred_count, - bool secondary_interface); + struct necp_client_parsed_parameters *parsed_parameters, + u_int32_t *preferred_count, + bool secondary_interface); static const struct fileops necp_fd_ops = { .fo_type = DTYPE_NETPOLICY, @@ -437,9 +437,9 @@ struct necp_client_update { }; -#define NAIF_ATTACHED 0x1 // arena is attached to list -#define NAIF_REDIRECT 0x2 // arena mmap has been redirected -#define NAIF_DEFUNCT 0x4 // arena is now defunct +#define NAIF_ATTACHED 0x1 // arena is attached to list +#define NAIF_REDIRECT 0x2 // arena mmap has been redirected +#define NAIF_DEFUNCT 0x4 // arena is now defunct struct necp_fd_data { u_int8_t necp_fd_type; @@ -462,28 +462,28 @@ struct necp_fd_data { static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list; static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list; -#define NECP_CLIENT_FD_ZONE_MAX 128 -#define NECP_CLIENT_FD_ZONE_NAME "necp.clientfd" +#define NECP_CLIENT_FD_ZONE_MAX 128 +#define NECP_CLIENT_FD_ZONE_NAME "necp.clientfd" -static unsigned int necp_client_fd_size; /* size of zone element */ -static struct zone *necp_client_fd_zone; /* zone for necp_fd_data */ +static unsigned int necp_client_fd_size; /* size of zone element */ +static struct zone *necp_client_fd_zone; /* zone for necp_fd_data */ -#define NECP_FLOW_ZONE_NAME "necp.flow" -#define NECP_FLOW_REGISTRATION_ZONE_NAME "necp.flowregistration" +#define NECP_FLOW_ZONE_NAME "necp.flow" +#define NECP_FLOW_REGISTRATION_ZONE_NAME "necp.flowregistration" -static unsigned int necp_flow_size; /* size of necp_client_flow */ -static struct mcache *necp_flow_cache; /* cache for necp_client_flow */ +static unsigned int necp_flow_size; /* size of necp_client_flow */ +static struct mcache *necp_flow_cache; /* cache for necp_client_flow */ -static unsigned int necp_flow_registration_size; /* size of necp_client_flow_registration */ -static struct mcache *necp_flow_registration_cache; /* cache for necp_client_flow_registration */ +static unsigned int necp_flow_registration_size; /* size of necp_client_flow_registration */ +static struct mcache *necp_flow_registration_cache; /* cache for necp_client_flow_registration */ -#define NECP_ARENA_INFO_ZONE_MAX 128 -#define NECP_ARENA_INFO_ZONE_NAME "necp.arenainfo" +#define NECP_ARENA_INFO_ZONE_MAX 128 +#define NECP_ARENA_INFO_ZONE_NAME "necp.arenainfo" -static lck_grp_attr_t *necp_fd_grp_attr = NULL; -static lck_attr_t *necp_fd_mtx_attr = NULL; -static lck_grp_t *necp_fd_mtx_grp = NULL; +static lck_grp_attr_t *necp_fd_grp_attr = NULL; +static lck_attr_t *necp_fd_mtx_attr = NULL; +static lck_grp_t *necp_fd_mtx_grp = NULL; decl_lck_rw_data(static, necp_fd_lock); decl_lck_rw_data(static, necp_observer_lock); @@ -541,23 +541,23 @@ static int noop_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) { #pragma unused(fp, uio, flags, ctx) - return (ENXIO); + return ENXIO; } static int noop_write(struct fileproc *fp, struct uio *uio, int flags, - vfs_context_t ctx) + vfs_context_t ctx) { #pragma unused(fp, uio, flags, ctx) - return (ENXIO); + return ENXIO; } static int noop_ioctl(struct fileproc *fp, unsigned long com, caddr_t data, - vfs_context_t ctx) + vfs_context_t ctx) { #pragma unused(fp, com, data, ctx) - return (ENOTTY); + return ENOTTY; } static void @@ -616,7 +616,7 @@ necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p has_unread_clients = TRUE; } NECP_CLIENT_UNLOCK(client); - if (has_unread_clients) { + if (has_unread_clients) { break; } } @@ -627,7 +627,7 @@ necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p } } - return (revents); + return revents; } static inline void @@ -645,7 +645,7 @@ necp_generate_client_id(uuid_t client_id, bool is_flow) static inline bool necp_client_id_is_flow(uuid_t client_id) { - return (client_id[9] & 0x01); + return client_id[9] & 0x01; } static struct necp_client * @@ -674,7 +674,7 @@ necp_find_client_and_lock(uuid_t client_id) NECP_CLIENT_LOCK(client); } - return (client); + return client; } static struct necp_client_flow_registration * @@ -691,7 +691,7 @@ necp_client_find_flow(struct necp_client *client, uuid_t flow_id) flow = RB_ROOT(&client->flow_registrations); } - return (flow); + return flow; } static struct necp_client * @@ -713,7 +713,7 @@ necp_client_fd_find_client_unlocked(struct necp_fd_data *client_fd, uuid_t clien client = RB_FIND(_necp_client_tree, &client_fd->clients, &find); } - return (client); + return client; } static struct necp_client * @@ -724,26 +724,26 @@ necp_client_fd_find_client_and_lock(struct necp_fd_data *client_fd, uuid_t clien NECP_CLIENT_LOCK(client); } - return (client); + return client; } static inline int necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1) { - return (uuid_compare(client0->client_id, client1->client_id)); + return uuid_compare(client0->client_id, client1->client_id); } static inline int necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1) { - return (uuid_compare(flow0->registration_id, flow1->registration_id)); + return uuid_compare(flow0->registration_id, flow1->registration_id); } static int necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) { #pragma unused(fp, which, wql, ctx) - return (0); + return 0; struct necp_fd_data *fd_data = NULL; int revents = 0; int events = 0; @@ -751,27 +751,27 @@ necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx) fd_data = (struct necp_fd_data *)fp->f_fglob->fg_data; if (fd_data == NULL) { - return (0); + return 0; } procp = vfs_context_proc(ctx); switch (which) { - case FREAD: { - events = POLLIN; - break; - } + case FREAD: { + events = POLLIN; + break; + } - default: { - return (1); - } + default: { + return 1; + } } NECP_FD_LOCK(fd_data); revents = necp_fd_poll(fd_data, events, wql, procp, 0); NECP_FD_UNLOCK(fd_data); - return ((events & revents) ? 1 : 0); + return (events & revents) ? 1 : 0; } static void @@ -809,7 +809,7 @@ necp_fd_knrprocess(struct knote *kn, struct filt_process_s *data, struct kevent_ *kev = kn->kn_kevent; } NECP_FD_UNLOCK(fd_data); - return (res); + return res; } static int @@ -825,7 +825,7 @@ necp_fd_knrtouch(struct knote *kn, struct kevent_internal_s *kev) revents = necp_fd_poll(fd_data, POLLIN, NULL, current_proc(), 1); NECP_FD_UNLOCK(fd_data); - return ((revents & POLLIN) != 0); + return (revents & POLLIN) != 0; } SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = { @@ -838,7 +838,7 @@ SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = { static int necpop_kqfilter(struct fileproc *fp, struct knote *kn, - __unused struct kevent_internal_s *kev, vfs_context_t ctx) + __unused struct kevent_internal_s *kev, vfs_context_t ctx) { #pragma unused(fp, ctx) struct necp_fd_data *fd_data = NULL; @@ -848,7 +848,7 @@ necpop_kqfilter(struct fileproc *fp, struct knote *kn, NECPLOG(LOG_ERR, "bad filter request %d", kn->kn_filter); kn->kn_flags = EV_ERROR; kn->kn_data = EINVAL; - return (0); + return 0; } fd_data = (struct necp_fd_data *)kn->kn_fp->f_fglob->fg_data; @@ -856,7 +856,7 @@ necpop_kqfilter(struct fileproc *fp, struct knote *kn, NECPLOG0(LOG_ERR, "No channel for kqfilter"); kn->kn_flags = EV_ERROR; kn->kn_data = ENOENT; - return (0); + return 0; } NECP_FD_LOCK(fd_data); @@ -868,7 +868,7 @@ necpop_kqfilter(struct fileproc *fp, struct knote *kn, NECP_FD_UNLOCK(fd_data); - return ((revents & POLLIN) != 0); + return (revents & POLLIN) != 0; } #define INTERFACE_FLAGS_SHIFT 32 @@ -879,15 +879,15 @@ necpop_kqfilter(struct fileproc *fp, struct knote *kn, static uint64_t combine_interface_details(uint32_t interface_index, uint16_t interface_flags) { - return (((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT | - ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT); + return ((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT | + ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT; } static void necp_defunct_flow_registration(struct necp_client *client, - struct necp_client_flow_registration *flow_registration, - struct _necp_flow_defunct_list *defunct_list) + struct necp_client_flow_registration *flow_registration, + struct _necp_flow_defunct_list *defunct_list) { NECP_CLIENT_ASSERT_LOCKED(client); @@ -896,17 +896,16 @@ necp_defunct_flow_registration(struct necp_client *client, struct necp_client_flow *search_flow = NULL; LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) { if (search_flow->nexus && - !uuid_is_null(search_flow->u.nexus_agent)) { - + !uuid_is_null(search_flow->u.nexus_agent)) { // Save defunct values for the nexus if (defunct_list != NULL) { // Sleeping alloc won't fail; copy only what's necessary - struct necp_flow_defunct *flow_defunct = _MALLOC(sizeof (struct necp_flow_defunct), - M_NECP, M_WAITOK | M_ZERO); + struct necp_flow_defunct *flow_defunct = _MALLOC(sizeof(struct necp_flow_defunct), + M_NECP, M_WAITOK | M_ZERO); uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent); uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? - client->client_id : - flow_registration->registration_id)); + client->client_id : + flow_registration->registration_id)); flow_defunct->proc_pid = client->proc_pid; flow_defunct->agent_handle = client->agent_handle; @@ -928,7 +927,7 @@ necp_defunct_flow_registration(struct necp_client *client, static void necp_defunct_client_for_policy(struct necp_client *client, - struct _necp_flow_defunct_list *defunct_list) + struct _necp_flow_defunct_list *defunct_list) { NECP_CLIENT_ASSERT_LOCKED(client); @@ -983,7 +982,7 @@ necp_client_release_locked(struct necp_client *client) necp_client_free(client); } - return (old_ref == 1); + return old_ref == 1; } @@ -998,7 +997,7 @@ necp_client_update_observer_add_internal(struct necp_fd_data *observer_fd, struc } struct necp_client_update *client_update = _MALLOC(sizeof(struct necp_client_update) + client->parameters_length, - M_NECP, M_WAITOK | M_ZERO); + M_NECP, M_WAITOK | M_ZERO); if (client_update != NULL) { client_update->update_length = sizeof(struct necp_client_observer_update) + client->parameters_length; uuid_copy(client_update->client_id, client->client_id); @@ -1024,7 +1023,7 @@ necp_client_update_observer_update_internal(struct necp_fd_data *observer_fd, st } struct necp_client_update *client_update = _MALLOC(sizeof(struct necp_client_update) + client->result_length, - M_NECP, M_WAITOK | M_ZERO); + M_NECP, M_WAITOK | M_ZERO); if (client_update != NULL) { client_update->update_length = sizeof(struct necp_client_observer_update) + client->result_length; uuid_copy(client_update->client_id, client->client_id); @@ -1050,7 +1049,7 @@ necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, st } struct necp_client_update *client_update = _MALLOC(sizeof(struct necp_client_update), - M_NECP, M_WAITOK | M_ZERO); + M_NECP, M_WAITOK | M_ZERO); if (client_update != NULL) { client_update->update_length = sizeof(struct necp_client_observer_update); uuid_copy(client_update->client_id, client->client_id); @@ -1123,8 +1122,8 @@ necp_client_update_observer_remove(struct necp_client *client) static void necp_destroy_client_flow_registration(struct necp_client *client, - struct necp_client_flow_registration *flow_registration, - pid_t pid, bool abort) + struct necp_client_flow_registration *flow_registration, + pid_t pid, bool abort) { NECP_CLIENT_ASSERT_LOCKED(client); @@ -1133,15 +1132,15 @@ necp_destroy_client_flow_registration(struct necp_client *client, struct necp_client_flow *temp_flow = NULL; LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) { if (search_flow->nexus && - !uuid_is_null(search_flow->u.nexus_agent)) { + !uuid_is_null(search_flow->u.nexus_agent)) { // Note that if we had defuncted the client earlier, this would result in a harmless ENOENT int netagent_error = netagent_client_message(search_flow->u.nexus_agent, - ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? - client->client_id : - flow_registration->registration_id), - pid, client->agent_handle, - (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS : - NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS)); + ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? + client->client_id : + flow_registration->registration_id), + pid, client->agent_handle, + (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS : + NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS)); if (netagent_error != 0 && netagent_error != ENOENT) { NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d)", netagent_error); } @@ -1195,10 +1194,10 @@ necp_destroy_client(struct necp_client *client, pid_t pid, bool abort) struct necp_client_assertion *temp_assertion = NULL; LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) { int netagent_error = netagent_client_message(search_assertion->asserted_netagent, client->client_id, pid, - client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT); + client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT); if (netagent_error != 0) { NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), - "necp_client_remove unassert agent error (%d)", netagent_error); + "necp_client_remove unassert agent error (%d)", netagent_error); } LIST_REMOVE(search_assertion, assertion_chain); FREE(search_assertion, M_NECP); @@ -1289,7 +1288,7 @@ necpop_close(struct fileglob *fg, vfs_context_t ctx) } } - return (error); + return error; } /// NECP client utilities @@ -1297,8 +1296,8 @@ necpop_close(struct fileglob *fg, vfs_context_t ctx) static inline bool necp_address_is_wildcard(const union necp_sockaddr_union * const addr) { - return ((addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) || - (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr))); + return (addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) || + (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr)); } static int @@ -1321,19 +1320,20 @@ necp_find_fd_data(int fd, struct necp_fd_data **fd_data) if ((*fd_data)->necp_fd_type != necp_fd_type_client) { // Not a client fd, ignore + fp_drop(p, fd, fp, 1); error = EINVAL; goto done; } done: proc_fdunlock(p); - return (error); + return error; } static struct necp_client_flow * necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration, - uint32_t interface_index) + uint32_t interface_index) { struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP); if (new_flow == NULL) { @@ -1357,11 +1357,11 @@ necp_client_add_interface_flow(struct necp_client_flow_registration *flow_regist static struct necp_client_flow * necp_client_add_interface_flow_if_needed(struct necp_client *client, - struct necp_client_flow_registration *flow_registration, - uint32_t interface_index) + struct necp_client_flow_registration *flow_registration, + uint32_t interface_index) { if (!client->allow_multiple_flows || - interface_index == IFSCOPE_NONE) { + interface_index == IFSCOPE_NONE) { // Interface not set, or client not allowed to use this mode return NULL; } @@ -1381,12 +1381,12 @@ necp_client_add_interface_flow_if_needed(struct necp_client *client, static void necp_client_add_interface_option_if_needed(struct necp_client *client, - uint32_t interface_index, - uint32_t interface_generation, - uuid_t *nexus_agent) + uint32_t interface_index, + uint32_t interface_generation, + uuid_t *nexus_agent) { if (interface_index == IFSCOPE_NONE || - (client->interface_option_count != 0 && !client->allow_multiple_flows)) { + (client->interface_option_count != 0 && !client->allow_multiple_flows)) { // Interface not set, or client not allowed to use this mode return; } @@ -1460,28 +1460,28 @@ necp_client_add_interface_option_if_needed(struct necp_client *client, static bool necp_client_flow_is_viable(proc_t proc, struct necp_client *client, - struct necp_client_flow *flow) + struct necp_client_flow *flow) { struct necp_aggregate_result result; bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket); flow->necp_flow_flags = 0; int error = necp_application_find_policy_match_internal(proc, client->parameters, - (u_int32_t)client->parameters_length, - &result, &flow->necp_flow_flags, - flow->interface_index, - &flow->local_addr, &flow->remote_addr, NULL, ignore_address); - - return (error == 0 && - result.routed_interface_index != IFSCOPE_NONE && - result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP); + (u_int32_t)client->parameters_length, + &result, &flow->necp_flow_flags, + flow->interface_index, + &flow->local_addr, &flow->remote_addr, NULL, ignore_address); + + return error == 0 && + result.routed_interface_index != IFSCOPE_NONE && + result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP; } static void necp_flow_add_interface_flows(proc_t proc, - struct necp_client *client, - struct necp_client_flow_registration *flow_registration, - bool send_initial) + struct necp_client *client, + struct necp_client_flow_registration *flow_registration, + bool send_initial) { // Traverse all interfaces and add a tracking flow if needed for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) { @@ -1513,8 +1513,8 @@ necp_flow_add_interface_flows(proc_t proc, static bool necp_client_update_flows(proc_t proc, - struct necp_client *client, - struct _necp_flow_defunct_list *defunct_list) + struct necp_client *client, + struct _necp_flow_defunct_list *defunct_list) { NECP_CLIENT_ASSERT_LOCKED(client); @@ -1541,7 +1541,7 @@ necp_client_update_flows(proc_t proc, } if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) != - (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) { + (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) { client_updated = TRUE; } @@ -1585,7 +1585,7 @@ necp_client_update_flows(proc_t proc, } } - return (client_updated); + return client_updated; } static void @@ -1607,30 +1607,30 @@ necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client) static bool necp_netagent_applies_to_client(struct necp_client *client, - const struct necp_client_parsed_parameters *parameters, - uuid_t *netagent_uuid, bool allow_nexus, - uint32_t interface_index, uint32_t interface_generation) + const struct necp_client_parsed_parameters *parameters, + uuid_t *netagent_uuid, bool allow_nexus, + uint32_t interface_index, uint32_t interface_generation) { #pragma unused(interface_index, interface_generation) bool applies = FALSE; u_int32_t flags = netagent_get_flags(*netagent_uuid); if (!(flags & NETAGENT_FLAG_REGISTERED)) { // Unregistered agents never apply - return (applies); + return applies; } if (!allow_nexus && - (flags & NETAGENT_FLAG_NEXUS_PROVIDER)) { + (flags & NETAGENT_FLAG_NEXUS_PROVIDER)) { // Hide nexus providers unless allowed // Direct interfaces and direct policies are allowed to use a nexus // Delegate interfaces or re-scoped interfaces are not allowed - return (applies); + return applies; } if (uuid_compare(client->failed_trigger_agent.netagent_uuid, *netagent_uuid) == 0) { if (client->failed_trigger_agent.generation == netagent_get_generation(*netagent_uuid)) { // If this agent was triggered, and failed, and hasn't changed, keep hiding it - return (applies); + return applies; } else { // Mismatch generation, clear out old trigger uuid_clear(client->failed_trigger_agent.netagent_uuid); @@ -1663,7 +1663,7 @@ necp_netagent_applies_to_client(struct necp_client *client, for (int i = 0; i < NECP_MAX_PARSED_PARAMETERS; i++) { if (strlen(parameters->required_netagent_types[i].netagent_domain) == 0 || - strlen(parameters->required_netagent_types[i].netagent_type) == 0) { + strlen(parameters->required_netagent_types[i].netagent_type) == 0) { break; } @@ -1676,9 +1676,9 @@ necp_netagent_applies_to_client(struct necp_client *client, } if ((strlen(parameters->required_netagent_types[i].netagent_domain) == 0 || - strncmp(netagent_domain, parameters->required_netagent_types[i].netagent_domain, NETAGENT_DOMAINSIZE) == 0) && - (strlen(parameters->required_netagent_types[i].netagent_type) == 0 || - strncmp(netagent_type, parameters->required_netagent_types[i].netagent_type, NETAGENT_TYPESIZE) == 0)) { + strncmp(netagent_domain, parameters->required_netagent_types[i].netagent_domain, NETAGENT_DOMAINSIZE) == 0) && + (strlen(parameters->required_netagent_types[i].netagent_type) == 0 || + strncmp(netagent_type, parameters->required_netagent_types[i].netagent_type, NETAGENT_TYPESIZE) == 0)) { required = TRUE; break; } @@ -1692,13 +1692,13 @@ necp_netagent_applies_to_client(struct necp_client *client, } - return (applies); + return applies; } static void necp_client_add_agent_interface_options(struct necp_client *client, - const struct necp_client_parsed_parameters *parsed_parameters, - ifnet_t ifp) + const struct necp_client_parsed_parameters *parsed_parameters, + ifnet_t ifp) { if (ifp != NULL && ifp->if_agentids != NULL) { for (u_int32_t i = 0; i < ifp->if_agentcount; i++) { @@ -1707,7 +1707,7 @@ necp_client_add_agent_interface_options(struct necp_client *client, } // Relies on the side effect that nexus agents that apply will create flows (void)necp_netagent_applies_to_client(client, parsed_parameters, &ifp->if_agentids[i], TRUE, - ifp->if_index, ifnet_get_generation(ifp)); + ifp->if_index, ifnet_get_generation(ifp)); } } } @@ -1716,18 +1716,18 @@ static inline bool necp_client_address_is_valid(struct sockaddr *address) { if (address->sa_family == AF_INET) { - return (address->sa_len == sizeof(struct sockaddr_in)); + return address->sa_len == sizeof(struct sockaddr_in); } else if (address->sa_family == AF_INET6) { - return (address->sa_len == sizeof(struct sockaddr_in6)); + return address->sa_len == sizeof(struct sockaddr_in6); } else { - return (FALSE); + return FALSE; } } static int necp_client_parse_parameters(u_int8_t *parameters, - u_int32_t parameters_size, - struct necp_client_parsed_parameters *parsed_parameters) + u_int32_t parameters_size, + struct necp_client_parsed_parameters *parsed_parameters) { int error = 0; size_t offset = 0; @@ -1744,7 +1744,7 @@ necp_client_parse_parameters(u_int8_t *parameters, u_int32_t num_avoided_agent_types = 0; if (parsed_parameters == NULL) { - return (EINVAL); + return EINVAL; } memset(parsed_parameters, 0, sizeof(struct necp_client_parsed_parameters)); @@ -1763,233 +1763,233 @@ necp_client_parse_parameters(u_int8_t *parameters, u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL); if (value != NULL) { switch (type) { - case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: { - if (length <= IFXNAMSIZ && length > 0) { - ifnet_t bound_interface = NULL; - char interface_name[IFXNAMSIZ]; - memcpy(interface_name, value, length); - interface_name[length - 1] = 0; // Make sure the string is NULL terminated - if (ifnet_find_by_name(interface_name, &bound_interface) == 0) { - parsed_parameters->required_interface_index = bound_interface->if_index; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF; - ifnet_release(bound_interface); - } + case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: { + if (length <= IFXNAMSIZ && length > 0) { + ifnet_t bound_interface = NULL; + char interface_name[IFXNAMSIZ]; + memcpy(interface_name, value, length); + interface_name[length - 1] = 0; // Make sure the string is NULL terminated + if (ifnet_find_by_name(interface_name, &bound_interface) == 0) { + parsed_parameters->required_interface_index = bound_interface->if_index; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF; + ifnet_release(bound_interface); } - break; } - case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: { - if (length >= sizeof(struct necp_policy_condition_addr)) { - struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; - if (necp_client_address_is_valid(&address_struct->address.sa)) { - memcpy(&parsed_parameters->local_addr, &address_struct->address, sizeof(address_struct->address)); - if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) { - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR; - } - if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) || - (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) { - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT; - } + break; + } + case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: { + if (length >= sizeof(struct necp_policy_condition_addr)) { + struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; + if (necp_client_address_is_valid(&address_struct->address.sa)) { + memcpy(&parsed_parameters->local_addr, &address_struct->address, sizeof(address_struct->address)); + if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) { + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR; } - } - break; - } - case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: { - if (length >= sizeof(struct necp_client_endpoint)) { - struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; - if (necp_client_address_is_valid(&endpoint->u.sa)) { - memcpy(&parsed_parameters->local_addr, &endpoint->u.sa, sizeof(union necp_sockaddr_union)); - if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) { - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR; - } - if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) || - (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) { - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT; - } + if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) || + (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) { + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT; } } - break; } - case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: { - if (length >= sizeof(struct necp_policy_condition_addr)) { - struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; - if (necp_client_address_is_valid(&address_struct->address.sa)) { - memcpy(&parsed_parameters->remote_addr, &address_struct->address, sizeof(address_struct->address)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR; + break; + } + case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: { + if (length >= sizeof(struct necp_client_endpoint)) { + struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; + if (necp_client_address_is_valid(&endpoint->u.sa)) { + memcpy(&parsed_parameters->local_addr, &endpoint->u.sa, sizeof(union necp_sockaddr_union)); + if (!necp_address_is_wildcard(&parsed_parameters->local_addr)) { + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR; } - } - break; - } - case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT: { - if (length >= sizeof(struct necp_client_endpoint)) { - struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; - if (necp_client_address_is_valid(&endpoint->u.sa)) { - memcpy(&parsed_parameters->remote_addr, &endpoint->u.sa, sizeof(union necp_sockaddr_union)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR; + if ((parsed_parameters->local_addr.sa.sa_family == AF_INET && parsed_parameters->local_addr.sin.sin_port) || + (parsed_parameters->local_addr.sa.sa_family == AF_INET6 && parsed_parameters->local_addr.sin6.sin6_port)) { + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT; } } - break; } - case NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE: { - if (num_prohibited_interfaces >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length <= IFXNAMSIZ && length > 0) { - memcpy(parsed_parameters->prohibited_interfaces[num_prohibited_interfaces], value, length); - parsed_parameters->prohibited_interfaces[num_prohibited_interfaces][length - 1] = 0; // Make sure the string is NULL terminated - num_prohibited_interfaces++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF; + break; + } + case NECP_CLIENT_PARAMETER_REMOTE_ADDRESS: { + if (length >= sizeof(struct necp_policy_condition_addr)) { + struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value; + if (necp_client_address_is_valid(&address_struct->address.sa)) { + memcpy(&parsed_parameters->remote_addr, &address_struct->address, sizeof(address_struct->address)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR; } - break; } - case NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE: { - if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) { - break; - } - if (length >= sizeof(u_int8_t)) { - memcpy(&parsed_parameters->required_interface_type, value, sizeof(u_int8_t)); - if (parsed_parameters->required_interface_type) { - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE; - } + break; + } + case NECP_CLIENT_PARAMETER_REMOTE_ENDPOINT: { + if (length >= sizeof(struct necp_client_endpoint)) { + struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; + if (necp_client_address_is_valid(&endpoint->u.sa)) { + memcpy(&parsed_parameters->remote_addr, &endpoint->u.sa, sizeof(union necp_sockaddr_union)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR; } - break; } - case NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE: { - if (num_prohibited_interface_types >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(u_int8_t)) { - memcpy(&parsed_parameters->prohibited_interface_types[num_prohibited_interface_types], value, sizeof(u_int8_t)); - num_prohibited_interface_types++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE; - } + break; + } + case NECP_CLIENT_PARAMETER_PROHIBIT_INTERFACE: { + if (num_prohibited_interfaces >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_REQUIRE_AGENT: { - if (num_required_agents >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(uuid_t)) { - memcpy(&parsed_parameters->required_netagents[num_required_agents], value, sizeof(uuid_t)); - num_required_agents++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT; - } - break; + if (length <= IFXNAMSIZ && length > 0) { + memcpy(parsed_parameters->prohibited_interfaces[num_prohibited_interfaces], value, length); + parsed_parameters->prohibited_interfaces[num_prohibited_interfaces][length - 1] = 0; // Make sure the string is NULL terminated + num_prohibited_interfaces++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF; } - case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT: { - if (num_prohibited_agents >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(uuid_t)) { - memcpy(&parsed_parameters->prohibited_netagents[num_prohibited_agents], value, sizeof(uuid_t)); - num_prohibited_agents++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT; - } + break; + } + case NECP_CLIENT_PARAMETER_REQUIRE_IF_TYPE: { + if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) { break; } - case NECP_CLIENT_PARAMETER_PREFER_AGENT: { - if (num_preferred_agents >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(uuid_t)) { - memcpy(&parsed_parameters->preferred_netagents[num_preferred_agents], value, sizeof(uuid_t)); - num_preferred_agents++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT; + if (length >= sizeof(u_int8_t)) { + memcpy(&parsed_parameters->required_interface_type, value, sizeof(u_int8_t)); + if (parsed_parameters->required_interface_type) { + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE; } - break; } - case NECP_CLIENT_PARAMETER_AVOID_AGENT: { - if (num_avoided_agents >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(uuid_t)) { - memcpy(&parsed_parameters->avoided_netagents[num_avoided_agents], value, sizeof(uuid_t)); - num_avoided_agents++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT; - } + break; + } + case NECP_CLIENT_PARAMETER_PROHIBIT_IF_TYPE: { + if (num_prohibited_interface_types >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: { - if (num_required_agent_types >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(struct necp_client_parameter_netagent_type)) { - memcpy(&parsed_parameters->required_netagent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); - num_required_agent_types++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE; - } - break; + if (length >= sizeof(u_int8_t)) { + memcpy(&parsed_parameters->prohibited_interface_types[num_prohibited_interface_types], value, sizeof(u_int8_t)); + num_prohibited_interface_types++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE; } - case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE: { - if (num_prohibited_agent_types >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(struct necp_client_parameter_netagent_type)) { - memcpy(&parsed_parameters->prohibited_netagent_types[num_prohibited_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); - num_prohibited_agent_types++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE; - } + break; + } + case NECP_CLIENT_PARAMETER_REQUIRE_AGENT: { + if (num_required_agents >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE: { - if (num_preferred_agent_types >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(struct necp_client_parameter_netagent_type)) { - memcpy(&parsed_parameters->preferred_netagent_types[num_preferred_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); - num_preferred_agent_types++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE; - } - break; + if (length >= sizeof(uuid_t)) { + memcpy(&parsed_parameters->required_netagents[num_required_agents], value, sizeof(uuid_t)); + num_required_agents++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT; } - case NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE: { - if (num_avoided_agent_types >= NECP_MAX_PARSED_PARAMETERS) { - break; - } - if (length >= sizeof(struct necp_client_parameter_netagent_type)) { - memcpy(&parsed_parameters->avoided_netagent_types[num_avoided_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); - num_avoided_agent_types++; - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE; - } + break; + } + case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT: { + if (num_prohibited_agents >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_FLAGS: { - if (length >= sizeof(u_int32_t)) { - memcpy(&parsed_parameters->flags, value, sizeof(parsed_parameters->flags)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLAGS; - } + if (length >= sizeof(uuid_t)) { + memcpy(&parsed_parameters->prohibited_netagents[num_prohibited_agents], value, sizeof(uuid_t)); + num_prohibited_agents++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT; + } + break; + } + case NECP_CLIENT_PARAMETER_PREFER_AGENT: { + if (num_preferred_agents >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_IP_PROTOCOL: { - if (length >= sizeof(parsed_parameters->ip_protocol)) { - memcpy(&parsed_parameters->ip_protocol, value, sizeof(parsed_parameters->ip_protocol)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL; - } + if (length >= sizeof(uuid_t)) { + memcpy(&parsed_parameters->preferred_netagents[num_preferred_agents], value, sizeof(uuid_t)); + num_preferred_agents++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT; + } + break; + } + case NECP_CLIENT_PARAMETER_AVOID_AGENT: { + if (num_avoided_agents >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_PID: { - if (length >= sizeof(parsed_parameters->effective_pid)) { - memcpy(&parsed_parameters->effective_pid, value, sizeof(parsed_parameters->effective_pid)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID; - } + if (length >= sizeof(uuid_t)) { + memcpy(&parsed_parameters->avoided_netagents[num_avoided_agents], value, sizeof(uuid_t)); + num_avoided_agents++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT; + } + break; + } + case NECP_CLIENT_PARAMETER_REQUIRE_AGENT_TYPE: { + if (num_required_agent_types >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_APPLICATION: { - if (length >= sizeof(parsed_parameters->effective_uuid)) { - memcpy(&parsed_parameters->effective_uuid, value, sizeof(parsed_parameters->effective_uuid)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID; - } + if (length >= sizeof(struct necp_client_parameter_netagent_type)) { + memcpy(&parsed_parameters->required_netagent_types[num_required_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); + num_required_agent_types++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE; + } + break; + } + case NECP_CLIENT_PARAMETER_PROHIBIT_AGENT_TYPE: { + if (num_prohibited_agent_types >= NECP_MAX_PARSED_PARAMETERS) { break; } - case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: { - if (length >= sizeof(parsed_parameters->traffic_class)) { - memcpy(&parsed_parameters->traffic_class, value, sizeof(parsed_parameters->traffic_class)); - parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS; - } + if (length >= sizeof(struct necp_client_parameter_netagent_type)) { + memcpy(&parsed_parameters->prohibited_netagent_types[num_prohibited_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); + num_prohibited_agent_types++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE; + } + break; + } + case NECP_CLIENT_PARAMETER_PREFER_AGENT_TYPE: { + if (num_preferred_agent_types >= NECP_MAX_PARSED_PARAMETERS) { break; } - default: { + if (length >= sizeof(struct necp_client_parameter_netagent_type)) { + memcpy(&parsed_parameters->preferred_netagent_types[num_preferred_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); + num_preferred_agent_types++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE; + } + break; + } + case NECP_CLIENT_PARAMETER_AVOID_AGENT_TYPE: { + if (num_avoided_agent_types >= NECP_MAX_PARSED_PARAMETERS) { break; } + if (length >= sizeof(struct necp_client_parameter_netagent_type)) { + memcpy(&parsed_parameters->avoided_netagent_types[num_avoided_agent_types], value, sizeof(struct necp_client_parameter_netagent_type)); + num_avoided_agent_types++; + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE; + } + break; + } + case NECP_CLIENT_PARAMETER_FLAGS: { + if (length >= sizeof(u_int32_t)) { + memcpy(&parsed_parameters->flags, value, sizeof(parsed_parameters->flags)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_FLAGS; + } + break; + } + case NECP_CLIENT_PARAMETER_IP_PROTOCOL: { + if (length >= sizeof(parsed_parameters->ip_protocol)) { + memcpy(&parsed_parameters->ip_protocol, value, sizeof(parsed_parameters->ip_protocol)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL; + } + break; + } + case NECP_CLIENT_PARAMETER_PID: { + if (length >= sizeof(parsed_parameters->effective_pid)) { + memcpy(&parsed_parameters->effective_pid, value, sizeof(parsed_parameters->effective_pid)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID; + } + break; + } + case NECP_CLIENT_PARAMETER_APPLICATION: { + if (length >= sizeof(parsed_parameters->effective_uuid)) { + memcpy(&parsed_parameters->effective_uuid, value, sizeof(parsed_parameters->effective_uuid)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID; + } + break; + } + case NECP_CLIENT_PARAMETER_TRAFFIC_CLASS: { + if (length >= sizeof(parsed_parameters->traffic_class)) { + memcpy(&parsed_parameters->traffic_class, value, sizeof(parsed_parameters->traffic_class)); + parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS; + } + break; + } + default: { + break; + } } } } @@ -1997,15 +1997,15 @@ necp_client_parse_parameters(u_int8_t *parameters, offset += sizeof(struct necp_tlv_header) + length; } - return (error); + return error; } static int necp_client_parse_result(u_int8_t *result, - u_int32_t result_size, - union necp_sockaddr_union *local_address, - union necp_sockaddr_union *remote_address, - void **flow_stats) + u_int32_t result_size, + union necp_sockaddr_union *local_address, + union necp_sockaddr_union *remote_address, + void **flow_stats) { #pragma unused(flow_stats) int error = 0; @@ -2019,27 +2019,27 @@ necp_client_parse_result(u_int8_t *result, u_int8_t *value = necp_buffer_get_tlv_value(result, offset, NULL); if (value != NULL) { switch (type) { - case NECP_CLIENT_RESULT_LOCAL_ENDPOINT: { - if (length >= sizeof(struct necp_client_endpoint)) { - struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; - if (local_address != NULL && necp_client_address_is_valid(&endpoint->u.sa)) { - memcpy(local_address, &endpoint->u.sa, endpoint->u.sa.sa_len); - } + case NECP_CLIENT_RESULT_LOCAL_ENDPOINT: { + if (length >= sizeof(struct necp_client_endpoint)) { + struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; + if (local_address != NULL && necp_client_address_is_valid(&endpoint->u.sa)) { + memcpy(local_address, &endpoint->u.sa, endpoint->u.sa.sa_len); } - break; } - case NECP_CLIENT_RESULT_REMOTE_ENDPOINT: { - if (length >= sizeof(struct necp_client_endpoint)) { - struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; - if (remote_address != NULL && necp_client_address_is_valid(&endpoint->u.sa)) { - memcpy(remote_address, &endpoint->u.sa, endpoint->u.sa.sa_len); - } + break; + } + case NECP_CLIENT_RESULT_REMOTE_ENDPOINT: { + if (length >= sizeof(struct necp_client_endpoint)) { + struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value; + if (remote_address != NULL && necp_client_address_is_valid(&endpoint->u.sa)) { + memcpy(remote_address, &endpoint->u.sa, endpoint->u.sa.sa_len); } - break; - } - default: { - break; } + break; + } + default: { + break; + } } } } @@ -2047,7 +2047,7 @@ necp_client_parse_result(u_int8_t *result, offset += sizeof(struct necp_tlv_header) + length; } - return (error); + return error; } static struct necp_client_flow_registration * @@ -2089,7 +2089,7 @@ necp_client_create_flow_registration(struct necp_fd_data *fd_data, struct necp_c static void necp_client_add_socket_flow(struct necp_client_flow_registration *flow_registration, - struct inpcb *inp) + struct inpcb *inp) { struct necp_client_flow *new_flow = mcache_alloc(necp_flow_cache, MCR_SLEEP); if (new_flow == NULL) { @@ -2160,13 +2160,13 @@ necp_client_register_socket_flow(pid_t pid, uuid_t client_id, struct inpcb *inp) } } - return (error); + return error; } static void necp_client_add_multipath_interface_flows(struct necp_client_flow_registration *flow_registration, - struct necp_client *client, - struct mppcb *mpp) + struct necp_client *client, + struct mppcb *mpp) { flow_registration->interface_handle = mpp; flow_registration->interface_cb = mpp->necp_cb; @@ -2228,15 +2228,15 @@ necp_client_register_multipath_cb(pid_t pid, uuid_t client_id, struct mppcb *mpp error = ENOENT; } - return (error); + return error; } -#define NETAGENT_DOMAIN_RADIO_MANAGER "WirelessRadioManager" -#define NETAGENT_TYPE_RADIO_MANAGER "WirelessRadioManager:BB Manager" +#define NETAGENT_DOMAIN_RADIO_MANAGER "WirelessRadioManager" +#define NETAGENT_TYPE_RADIO_MANAGER "WirelessRadioManager:BB Manager" static int necp_client_lookup_bb_radio_manager(struct necp_client *client, - uuid_t netagent_uuid) + uuid_t netagent_uuid) { char netagent_domain[NETAGENT_DOMAINSIZE]; char netagent_type[NETAGENT_TYPESIZE]; @@ -2250,7 +2250,7 @@ necp_client_lookup_bb_radio_manager(struct necp_client *client, } error = necp_application_find_policy_match_internal(proc, client->parameters, (u_int32_t)client->parameters_length, - &result, NULL, 0, NULL, NULL, NULL, true); + &result, NULL, 0, NULL, NULL, NULL, true); proc_rele(proc); proc = PROC_NULL; @@ -2347,7 +2347,7 @@ necp_client_assert_bb_radio_manager(uuid_t client_id, bool assert) NECP_CLIENT_TREE_UNLOCK(); - return (error); + return error; } static int @@ -2405,7 +2405,7 @@ necp_client_unregister_socket_flow(uuid_t client_id, void *handle) error = ENOENT; } - return (error); + return error; } static int @@ -2428,7 +2428,7 @@ necp_client_unregister_multipath_cb(uuid_t client_id, void *handle) struct necp_client_flow *temp_flow = NULL; LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) { if (!search_flow->socket && !search_flow->nexus && - search_flow->u.socket_handle == handle) { + search_flow->u.socket_handle == handle) { search_flow->u.socket_handle = NULL; search_flow->u.cb = NULL; } @@ -2447,7 +2447,7 @@ necp_client_unregister_multipath_cb(uuid_t client_id, void *handle) error = ENOENT; } - return (error); + return error; } int @@ -2528,9 +2528,9 @@ necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp) uuid_clear(empty_uuid); flow->assigned = TRUE; flow->assigned_results = necp_create_nexus_assign_message(empty_uuid, 0, NULL, 0, - (struct necp_client_endpoint *)&flow->local_addr, - (struct necp_client_endpoint *)&flow->remote_addr, - 0, NULL, &flow->assigned_results_length); + (struct necp_client_endpoint *)&flow->local_addr, + (struct necp_client_endpoint *)&flow->remote_addr, + 0, NULL, &flow->assigned_results_length); flow_registration->flow_result_read = FALSE; client_updated = TRUE; break; @@ -2562,7 +2562,7 @@ necp_client_assign_from_socket(pid_t pid, uuid_t client_id, struct inpcb *inp) } } - return (error); + return error; } int @@ -2595,8 +2595,8 @@ necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id, LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) { // Verify that the client nexus agent matches if (flow->nexus && - uuid_compare(flow->u.nexus_agent, - netagent_uuid) == 0) { + uuid_compare(flow->u.nexus_agent, + netagent_uuid) == 0) { flow->has_protoctl_event = TRUE; flow->protoctl_event.protoctl_event_code = protoctl_event_code; flow->protoctl_event.protoctl_event_val = protoctl_event_val; @@ -2630,18 +2630,18 @@ necp_update_flow_protoctl_event(uuid_t netagent_uuid, uuid_t client_id, } else if (!client_updated) { error = EINVAL; } - return (error); + return error; } static bool necp_assign_client_result_locked(struct proc *proc, - struct necp_fd_data *client_fd, - struct necp_client *client, - struct necp_client_flow_registration *flow_registration, - uuid_t netagent_uuid, - u_int8_t *assigned_results, - size_t assigned_results_length, - bool notify_fd) + struct necp_fd_data *client_fd, + struct necp_client *client, + struct necp_client_flow_registration *flow_registration, + uuid_t netagent_uuid, + u_int8_t *assigned_results, + size_t assigned_results_length, + bool notify_fd) { bool client_updated = FALSE; @@ -2652,7 +2652,7 @@ necp_assign_client_result_locked(struct proc *proc, LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) { // Verify that the client nexus agent matches if (flow->nexus && - uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) { + uuid_compare(flow->u.nexus_agent, netagent_uuid) == 0) { // Release prior results and route if (flow->assigned_results != NULL) { FREE(flow->assigned_results, M_NETAGENT); @@ -2662,7 +2662,7 @@ necp_assign_client_result_locked(struct proc *proc, void *nexus_stats = NULL; if (assigned_results != NULL && assigned_results_length > 0) { int error = necp_client_parse_result(assigned_results, (u_int32_t)assigned_results_length, - &flow->local_addr, &flow->remote_addr, &nexus_stats); + &flow->local_addr, &flow->remote_addr, &nexus_stats); VERIFY(error == 0); } @@ -2682,12 +2682,12 @@ necp_assign_client_result_locked(struct proc *proc, } // if not updated, client must free assigned_results - return (client_updated); + return client_updated; } int necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id, - u_int8_t *assigned_results, size_t assigned_results_length) + u_int8_t *assigned_results, size_t assigned_results_length) { int error = 0; struct necp_fd_data *client_fd = NULL; @@ -2710,7 +2710,7 @@ necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id, // Found the right client and flow! found_client = TRUE; if (necp_assign_client_result_locked(proc, client_fd, client, flow_registration, netagent_uuid, - assigned_results, assigned_results_length, true)) { + assigned_results, assigned_results_length, true)) { client_updated = TRUE; } } @@ -2736,18 +2736,18 @@ necp_assign_client_result(uuid_t netagent_uuid, uuid_t client_id, error = EINVAL; } - return (error); + return error; } /// Client updating static bool necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_parameters, - struct necp_aggregate_result *result) + struct necp_aggregate_result *result) { if (parsed_parameters == NULL || - result == NULL) { - return (false); + result == NULL) { + return false; } bool updated = false; @@ -2791,13 +2791,12 @@ necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_param if (netagent_get_agent_domain_and_type(result->netagents[i], remove_agent_domain, remove_agent_type)) { for (int j = 0; j < NECP_MAX_PARSED_PARAMETERS; j++) { if (strlen(parsed_parameters->required_netagent_types[j].netagent_domain) == 0 && - strlen(parsed_parameters->required_netagent_types[j].netagent_type) == 0) { + strlen(parsed_parameters->required_netagent_types[j].netagent_type) == 0) { break; } if (strncmp(parsed_parameters->required_netagent_types[j].netagent_domain, remove_agent_domain, NETAGENT_DOMAINSIZE) == 0 && - strncmp(parsed_parameters->required_netagent_types[j].netagent_type, remove_agent_type, NETAGENT_TYPESIZE) == 0) { - + strncmp(parsed_parameters->required_netagent_types[j].netagent_type, remove_agent_type, NETAGENT_TYPESIZE) == 0) { updated = true; if (j == NECP_MAX_PARSED_PARAMETERS - 1) { @@ -2807,8 +2806,8 @@ necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_param } else { // Move the parameters down, clear the last entry memmove(&parsed_parameters->required_netagent_types[j], - &parsed_parameters->required_netagent_types[j + 1], - sizeof(struct necp_client_parameter_netagent_type) * (NECP_MAX_PARSED_PARAMETERS - (j + 1))); + &parsed_parameters->required_netagent_types[j + 1], + sizeof(struct necp_client_parameter_netagent_type) * (NECP_MAX_PARSED_PARAMETERS - (j + 1))); memset(&parsed_parameters->required_netagent_types[NECP_MAX_PARSED_PARAMETERS - 1], 0, sizeof(struct necp_client_parameter_netagent_type)); // Continue, don't increment but look at the new shifted item instead continue; @@ -2823,32 +2822,32 @@ necp_update_parsed_parameters(struct necp_client_parsed_parameters *parsed_param } if (updated && - parsed_parameters->required_interface_index != IFSCOPE_NONE && - (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) { + parsed_parameters->required_interface_index != IFSCOPE_NONE && + (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF) == 0) { // A required interface index was added after the fact. Clear it. parsed_parameters->required_interface_index = IFSCOPE_NONE; } - return (updated); + return updated; } static inline bool necp_agent_types_match(const char *agent_domain1, const char *agent_type1, - const char *agent_domain2, const char *agent_type2) + const char *agent_domain2, const char *agent_type2) { - return ((strlen(agent_domain1) == 0 || - strncmp(agent_domain2, agent_domain1, NETAGENT_DOMAINSIZE) == 0) && - (strlen(agent_type1) == 0 || - strncmp(agent_type2, agent_type1, NETAGENT_TYPESIZE) == 0)); + return (strlen(agent_domain1) == 0 || + strncmp(agent_domain2, agent_domain1, NETAGENT_DOMAINSIZE) == 0) && + (strlen(agent_type1) == 0 || + strncmp(agent_type2, agent_type1, NETAGENT_TYPESIZE) == 0); } static inline bool necp_calculate_client_result(proc_t proc, - struct necp_client *client, - struct necp_client_parsed_parameters *parsed_parameters, - struct necp_aggregate_result *result, - u_int32_t *flags) + struct necp_client *client, + struct necp_client_parsed_parameters *parsed_parameters, + struct necp_aggregate_result *result, + u_int32_t *flags) { struct rtentry *route = NULL; @@ -2862,14 +2861,14 @@ necp_calculate_client_result(proc_t proc, // Interface found or not needed, match policy. memset(result, 0, sizeof(*result)); int error = necp_application_find_policy_match_internal(proc, client->parameters, - (u_int32_t)client->parameters_length, - result, flags, matching_if_index, - NULL, NULL, &route, false); + (u_int32_t)client->parameters_length, + result, flags, matching_if_index, + NULL, NULL, &route, false); if (error != 0) { if (route != NULL) { rtfree(route); } - return (FALSE); + return FALSE; } if (validate_agents) { @@ -2902,7 +2901,7 @@ necp_calculate_client_result(proc_t proc, if (!requirement_failed && parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) { for (int i = 0; i < NECP_MAX_PARSED_PARAMETERS; i++) { if (strlen(parsed_parameters->required_netagent_types[i].netagent_domain) == 0 && - strlen(parsed_parameters->required_netagent_types[i].netagent_type) == 0) { + strlen(parsed_parameters->required_netagent_types[i].netagent_type) == 0) { break; } @@ -2917,8 +2916,8 @@ necp_calculate_client_result(proc_t proc, if (netagent_get_agent_domain_and_type(result->netagents[j], policy_agent_domain, policy_agent_type)) { if (necp_agent_types_match(parsed_parameters->required_netagent_types[i].netagent_domain, - parsed_parameters->required_netagent_types[i].netagent_type, - policy_agent_domain, policy_agent_type)) { + parsed_parameters->required_netagent_types[i].netagent_type, + policy_agent_domain, policy_agent_type)) { requirement_found = TRUE; break; } @@ -2938,7 +2937,7 @@ necp_calculate_client_result(proc_t proc, if (route != NULL) { rtfree(route); } - return (TRUE); + return TRUE; } } @@ -2954,14 +2953,14 @@ necp_calculate_client_result(proc_t proc, memset(result, 0, sizeof(*result)); } - return (TRUE); + return TRUE; } static bool necp_update_client_result(proc_t proc, - struct necp_fd_data *client_fd, - struct necp_client *client, - struct _necp_flow_defunct_list *defunct_list) + struct necp_fd_data *client_fd, + struct necp_client *client, + struct _necp_flow_defunct_list *defunct_list) { struct necp_client_result_netagent netagent; struct necp_aggregate_result result; @@ -2972,8 +2971,8 @@ necp_update_client_result(proc_t proc, MALLOC(parsed_parameters, struct necp_client_parsed_parameters *, sizeof(*parsed_parameters), M_NECP, (M_WAITOK | M_ZERO)); if (parsed_parameters == NULL) { - NECPLOG0(LOG_ERR, "Failed to allocate parsed parameters"); - return (FALSE); + NECPLOG0(LOG_ERR, "Failed to allocate parsed parameters"); + return FALSE; } // Nexus flows will be brought back if they are still valid @@ -2982,7 +2981,7 @@ necp_update_client_result(proc_t proc, int error = necp_client_parse_parameters(client->parameters, (u_int32_t)client->parameters_length, parsed_parameters); if (error != 0) { FREE(parsed_parameters, M_NECP); - return (FALSE); + return FALSE; } // Update saved IP protocol @@ -2991,14 +2990,14 @@ necp_update_client_result(proc_t proc, // Calculate the policy result if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags)) { FREE(parsed_parameters, M_NECP); - return (FALSE); + return FALSE; } if (necp_update_parsed_parameters(parsed_parameters, &result)) { // Changed the parameters based on result, try again (only once) if (!necp_calculate_client_result(proc, client, parsed_parameters, &result, &flags)) { FREE(parsed_parameters, M_NECP); - return (FALSE); + return FALSE; } } @@ -3006,8 +3005,8 @@ necp_update_client_result(proc_t proc, client->policy_id = result.policy_id; if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_MULTIPATH) || - ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) && - result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED)) { + ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) && + result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED)) { client->allow_multiple_flows = TRUE; } else { client->allow_multiple_flows = FALSE; @@ -3015,15 +3014,15 @@ necp_update_client_result(proc_t proc, // If the original request was scoped, and the policy result matches, make sure the result is scoped if ((result.routing_result == NECP_KERNEL_POLICY_RESULT_NONE || - result.routing_result == NECP_KERNEL_POLICY_RESULT_PASS) && - result.routed_interface_index != IFSCOPE_NONE && - parsed_parameters->required_interface_index == result.routed_interface_index) { + result.routing_result == NECP_KERNEL_POLICY_RESULT_PASS) && + result.routed_interface_index != IFSCOPE_NONE && + parsed_parameters->required_interface_index == result.routed_interface_index) { result.routing_result = NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED; result.routing_result_parameter.scoped_interface_index = result.routed_interface_index; } if (defunct_list != NULL && - result.routing_result == NECP_KERNEL_POLICY_RESULT_DROP) { + result.routing_result == NECP_KERNEL_POLICY_RESULT_DROP) { // If we are forced to drop the client, defunct it if it has flows necp_defunct_client_for_policy(client, defunct_list); } @@ -3043,42 +3042,42 @@ necp_update_client_result(proc_t proc, u_int8_t *cursor = client->result; cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FLAGS, sizeof(flags), &flags, &updated, client->result, sizeof(client->result)); cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_CLIENT_ID, sizeof(uuid_t), client->client_id, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT, sizeof(result.routing_result), &result.routing_result, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); if (result.routing_result_parameter.tunnel_interface_index != 0) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_POLICY_RESULT_PARAMETER, - sizeof(result.routing_result_parameter), &result.routing_result_parameter, &updated, - client->result, sizeof(client->result)); + sizeof(result.routing_result_parameter), &result.routing_result_parameter, &updated, + client->result, sizeof(client->result)); } if (result.filter_control_unit != 0) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_FILTER_CONTROL_UNIT, - sizeof(result.filter_control_unit), &result.filter_control_unit, &updated, - client->result, sizeof(client->result)); + sizeof(result.filter_control_unit), &result.filter_control_unit, &updated, + client->result, sizeof(client->result)); } if (result.routed_interface_index != 0) { u_int routed_interface_index = result.routed_interface_index; if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && - parsed_parameters->required_interface_index != IFSCOPE_NONE && - parsed_parameters->required_interface_index != result.routed_interface_index) { + parsed_parameters->required_interface_index != IFSCOPE_NONE && + parsed_parameters->required_interface_index != result.routed_interface_index) { routed_interface_index = parsed_parameters->required_interface_index; } cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_INDEX, - sizeof(routed_interface_index), &routed_interface_index, &updated, - client->result, sizeof(client->result)); + sizeof(routed_interface_index), &routed_interface_index, &updated, + client->result, sizeof(client->result)); } if (client_fd && client_fd->flags & NECP_OPEN_FLAG_BACKGROUND) { u_int32_t effective_traffic_class = SO_TC_BK_SYS; cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_TRAFFIC_CLASS, - sizeof(effective_traffic_class), &effective_traffic_class, &updated, - client->result, sizeof(client->result)); + sizeof(effective_traffic_class), &effective_traffic_class, &updated, + client->result, sizeof(client->result)); } if (client->background_update) { u_int32_t background = client->background; cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_TRAFFIC_MGMT_BG, - sizeof(background), &background, &updated, - client->result, sizeof(client->result)); + sizeof(background), &background, &updated, + client->result, sizeof(client->result)); if (updated) { client->background_update = 0; } @@ -3088,16 +3087,16 @@ necp_update_client_result(proc_t proc, const u_int32_t route_mtu = get_maxmtu(client->current_route); if (route_mtu != 0) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_EFFECTIVE_MTU, - sizeof(route_mtu), &route_mtu, &updated, - client->result, sizeof(client->result)); + sizeof(route_mtu), &route_mtu, &updated, + client->result, sizeof(client->result)); } } NECP_CLIENT_ROUTE_UNLOCK(client); if (result.mss_recommended != 0) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_RECOMMENDED_MSS, - sizeof(result.mss_recommended), &result.mss_recommended, &updated, - client->result, sizeof(client->result)); + sizeof(result.mss_recommended), &result.mss_recommended, &updated, + client->result, sizeof(client->result)); } for (int i = 0; i < NECP_MAX_NETAGENTS; i++) { @@ -3108,7 +3107,7 @@ necp_update_client_result(proc_t proc, netagent.generation = netagent_get_generation(netagent.netagent_uuid); if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE, 0, 0)) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } } @@ -3120,21 +3119,21 @@ necp_update_client_result(proc_t proc, if (result.routed_interface_index != IFSCOPE_NONE && result.routed_interface_index <= (u_int32_t)if_index) { direct_interface = ifindex2ifnet[result.routed_interface_index]; } else if (parsed_parameters->required_interface_index != IFSCOPE_NONE && - parsed_parameters->required_interface_index <= (u_int32_t)if_index) { + parsed_parameters->required_interface_index <= (u_int32_t)if_index) { // If the request was scoped, but the route didn't match, still grab the agents direct_interface = ifindex2ifnet[parsed_parameters->required_interface_index]; } else if (result.routed_interface_index == IFSCOPE_NONE && - result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED && - result.routing_result_parameter.scoped_interface_index != IFSCOPE_NONE) { + result.routing_result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED && + result.routing_result_parameter.scoped_interface_index != IFSCOPE_NONE) { direct_interface = ifindex2ifnet[result.routing_result_parameter.scoped_interface_index]; } if (direct_interface != NULL) { delegate_interface = direct_interface->if_delegated.ifp; } if (result.routing_result == NECP_KERNEL_POLICY_RESULT_IP_TUNNEL && - parsed_parameters->required_interface_index != IFSCOPE_NONE && - parsed_parameters->required_interface_index != result.routing_result_parameter.tunnel_interface_index && - parsed_parameters->required_interface_index <= (u_int32_t)if_index) { + parsed_parameters->required_interface_index != IFSCOPE_NONE && + parsed_parameters->required_interface_index != result.routing_result_parameter.tunnel_interface_index && + parsed_parameters->required_interface_index <= (u_int32_t)if_index) { original_scoped_interface = ifindex2ifnet[parsed_parameters->required_interface_index]; } // Add interfaces @@ -3143,14 +3142,14 @@ necp_update_client_result(proc_t proc, interface_struct.index = original_scoped_interface->if_index; interface_struct.generation = ifnet_get_generation(original_scoped_interface); cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } if (direct_interface != NULL) { struct necp_client_result_interface interface_struct; interface_struct.index = direct_interface->if_index; interface_struct.generation = ifnet_get_generation(direct_interface); cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); // Set the delta time since interface up/down struct timeval updown_delta = {}; @@ -3158,8 +3157,8 @@ necp_update_client_result(proc_t proc, u_int32_t delta = updown_delta.tv_sec; bool ignore_updated = FALSE; cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_TIME_DELTA, - sizeof(delta), &delta, &ignore_updated, - client->result, sizeof(client->result)); + sizeof(delta), &delta, &ignore_updated, + client->result, sizeof(client->result)); } } if (delegate_interface != NULL) { @@ -3167,7 +3166,7 @@ necp_update_client_result(proc_t proc, interface_struct.index = delegate_interface->if_index; interface_struct.generation = ifnet_get_generation(delegate_interface); cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE, sizeof(interface_struct), &interface_struct, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } // Update multipath/listener interface flows @@ -3178,14 +3177,14 @@ necp_update_client_result(proc_t proc, if (necp_ifnet_matches_parameters(multi_interface, parsed_parameters, NULL, true)) { // Add multipath interface flows for kernel MPTCP necp_client_add_interface_option_if_needed(client, multi_interface->if_index, - ifnet_get_generation(multi_interface), NULL); + ifnet_get_generation(multi_interface), NULL); // Add nexus agents for multipath necp_client_add_agent_interface_options(client, parsed_parameters, multi_interface); } } } else if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) && - result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) { + result.routing_result != NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) { // Get listener interface options from global list struct ifnet *listen_interface = NULL; TAILQ_FOREACH(listen_interface, &ifnet_head, if_link) { @@ -3207,9 +3206,9 @@ necp_update_client_result(proc_t proc, uuid_copy(netagent.netagent_uuid, original_scoped_interface->if_agentids[i]); netagent.generation = netagent_get_generation(netagent.netagent_uuid); if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE, - original_scoped_interface->if_index, ifnet_get_generation(original_scoped_interface))) { + original_scoped_interface->if_index, ifnet_get_generation(original_scoped_interface))) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } } } @@ -3225,9 +3224,9 @@ necp_update_client_result(proc_t proc, uuid_copy(netagent.netagent_uuid, direct_interface->if_agentids[i]); netagent.generation = netagent_get_generation(netagent.netagent_uuid); if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, TRUE, - direct_interface->if_index, ifnet_get_generation(direct_interface))) { + direct_interface->if_index, ifnet_get_generation(direct_interface))) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } } } @@ -3243,9 +3242,9 @@ necp_update_client_result(proc_t proc, uuid_copy(netagent.netagent_uuid, delegate_interface->if_agentids[i]); netagent.generation = netagent_get_generation(netagent.netagent_uuid); if (necp_netagent_applies_to_client(client, parsed_parameters, &netagent.netagent_uuid, FALSE, - delegate_interface->if_index, ifnet_get_generation(delegate_interface))) { + delegate_interface->if_index, ifnet_get_generation(delegate_interface))) { cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_NETAGENT, sizeof(netagent), &netagent, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } } } @@ -3258,11 +3257,11 @@ necp_update_client_result(proc_t proc, if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) { struct necp_client_interface_option *option = &client->interface_options[option_i]; cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } else { struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT]; cursor = necp_buffer_write_tlv_if_different(cursor, NECP_CLIENT_RESULT_INTERFACE_OPTION, sizeof(*option), option, &updated, - client->result, sizeof(client->result)); + client->result, sizeof(client->result)); } } @@ -3283,7 +3282,7 @@ necp_update_client_result(proc_t proc, } FREE(parsed_parameters, M_NECP); - return (updated); + return updated; } static inline void @@ -3305,16 +3304,15 @@ necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_ RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) { LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) { if (search_flow->nexus && - !uuid_is_null(search_flow->u.nexus_agent)) { - + !uuid_is_null(search_flow->u.nexus_agent)) { struct necp_flow_defunct *flow_defunct; // Sleeping alloc won't fail; copy only what's necessary - flow_defunct = _MALLOC(sizeof (struct necp_flow_defunct), M_NECP, M_WAITOK | M_ZERO); + flow_defunct = _MALLOC(sizeof(struct necp_flow_defunct), M_NECP, M_WAITOK | M_ZERO); uuid_copy(flow_defunct->nexus_agent, search_flow->u.nexus_agent); uuid_copy(flow_defunct->flow_id, ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ? - client->client_id : - flow_registration->registration_id)); + client->client_id : + flow_registration->registration_id)); flow_defunct->proc_pid = client->proc_pid; flow_defunct->agent_handle = client->agent_handle; @@ -3338,8 +3336,8 @@ necp_defunct_client_fd_locked(struct necp_fd_data *client_fd, struct _necp_flow_ static inline void necp_update_client_fd_locked(struct necp_fd_data *client_fd, - proc_t proc, - struct _necp_flow_defunct_list *defunct_list) + proc_t proc, + struct _necp_flow_defunct_list *defunct_list) { struct necp_client *client = NULL; bool updated_result = FALSE; @@ -3359,7 +3357,7 @@ necp_update_client_fd_locked(struct necp_fd_data *client_fd, static void necp_update_all_clients_callout(__unused thread_call_param_t dummy, - __unused thread_call_param_t arg) + __unused thread_call_param_t arg) { struct necp_fd_data *client_fd = NULL; @@ -3394,14 +3392,14 @@ necp_update_all_clients_callout(__unused thread_call_param_t dummy, LIST_FOREACH_SAFE(flow_defunct, &defunct_list, chain, temp_flow_defunct) { if (!uuid_is_null(flow_defunct->nexus_agent)) { int netagent_error = netagent_client_message(flow_defunct->nexus_agent, - flow_defunct->flow_id, - flow_defunct->proc_pid, - flow_defunct->agent_handle, - NETAGENT_MESSAGE_TYPE_ABORT_NEXUS); + flow_defunct->flow_id, + flow_defunct->proc_pid, + flow_defunct->agent_handle, + NETAGENT_MESSAGE_TYPE_ABORT_NEXUS); if (netagent_error != 0) { - char namebuf[MAXCOMLEN+1]; - (void) strlcpy(namebuf, "unknown", sizeof (namebuf)); - proc_name(flow_defunct->proc_pid, namebuf, sizeof (namebuf)); + char namebuf[MAXCOMLEN + 1]; + (void) strlcpy(namebuf, "unknown", sizeof(namebuf)); + proc_name(flow_defunct->proc_pid, namebuf, sizeof(namebuf)); NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf); } } @@ -3426,13 +3424,13 @@ necp_update_all_clients(void) clock_interval_to_absolutetime_interval(necp_timeout_leeway_microseconds, NSEC_PER_USEC, &leeway); thread_call_enter_delayed_with_leeway(necp_client_update_tcall, NULL, - deadline, leeway, THREAD_CALL_DELAY_LEEWAY); + deadline, leeway, THREAD_CALL_DELAY_LEEWAY); } void necp_set_client_as_background(proc_t proc, - struct fileproc *fp, - bool background) + struct fileproc *fp, + bool background) { bool updated_result = FALSE; struct necp_client *client = NULL; @@ -3531,10 +3529,10 @@ necp_fd_defunct(proc_t proc, struct necp_fd_data *client_fd) LIST_FOREACH_SAFE(flow_defunct, &defunct_list, chain, temp_flow_defunct) { if (!uuid_is_null(flow_defunct->nexus_agent)) { int netagent_error = netagent_client_message(flow_defunct->nexus_agent, - flow_defunct->flow_id, - flow_defunct->proc_pid, - flow_defunct->agent_handle, - NETAGENT_MESSAGE_TYPE_ABORT_NEXUS); + flow_defunct->flow_id, + flow_defunct->proc_pid, + flow_defunct->agent_handle, + NETAGENT_MESSAGE_TYPE_ABORT_NEXUS); if (netagent_error != 0) { NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_defunct_client abort nexus error (%d)", netagent_error); } @@ -3558,17 +3556,17 @@ necp_client_remove_agent_from_result(struct necp_client *client, uuid_t netagent size_t tlv_total_length = (sizeof(struct necp_tlv_header) + length); if (type == NECP_CLIENT_RESULT_NETAGENT && - length == sizeof(struct necp_client_result_netagent) && - (offset + tlv_total_length) <= client->result_length) { + length == sizeof(struct necp_client_result_netagent) && + (offset + tlv_total_length) <= client->result_length) { struct necp_client_result_netagent *value = ((struct necp_client_result_netagent *)(void *) - necp_buffer_get_tlv_value(result_buffer, offset, NULL)); + necp_buffer_get_tlv_value(result_buffer, offset, NULL)); if (uuid_compare(value->netagent_uuid, netagent_uuid) == 0) { // Found a netagent to remove // Shift bytes down to remove the tlv, and adjust total length // Don't adjust the current offset memmove(result_buffer + offset, - result_buffer + offset + tlv_total_length, - client->result_length - (offset + tlv_total_length)); + result_buffer + offset + tlv_total_length, + client->result_length - (offset + tlv_total_length)); client->result_length -= tlv_total_length; memset(result_buffer + client->result_length, 0, sizeof(client->result) - client->result_length); continue; @@ -3617,35 +3615,35 @@ necp_force_update_client(uuid_t client_id, uuid_t remove_netagent_uuid, u_int32_ /// Interface matching -#define NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \ - NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \ - NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \ - NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE | \ - NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \ - NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) - -#define NECP_PARSED_PARAMETERS_SCOPED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \ - NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \ - NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) - -#define NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \ - NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) - -#define NECP_PARSED_PARAMETERS_PREFERRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \ - NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \ - NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) +#define NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \ + NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \ + NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \ + NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE | \ + NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \ + NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) + +#define NECP_PARSED_PARAMETERS_SCOPED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE | \ + NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \ + NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) + +#define NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS (NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR | \ + NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) + +#define NECP_PARSED_PARAMETERS_PREFERRED_FIELDS (NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT | \ + NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE | \ + NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) static bool necp_ifnet_matches_type(struct ifnet *ifp, u_int8_t interface_type, bool check_delegates) @@ -3653,15 +3651,14 @@ necp_ifnet_matches_type(struct ifnet *ifp, u_int8_t interface_type, bool check_d struct ifnet *check_ifp = ifp; while (check_ifp) { if (if_functional_type(check_ifp, TRUE) == interface_type) { - return (TRUE); + return TRUE; } if (!check_delegates) { break; } check_ifp = check_ifp->if_delegated.ifp; - } - return (FALSE); + return FALSE; } static bool @@ -3670,14 +3667,14 @@ necp_ifnet_matches_name(struct ifnet *ifp, const char *interface_name, bool chec struct ifnet *check_ifp = ifp; while (check_ifp) { if (strncmp(check_ifp->if_xname, interface_name, IFXNAMSIZ) == 0) { - return (TRUE); + return TRUE; } if (!check_delegates) { break; } check_ifp = check_ifp->if_delegated.ifp; } - return (FALSE); + return FALSE; } static bool @@ -3691,7 +3688,7 @@ necp_ifnet_matches_agent(struct ifnet *ifp, uuid_t *agent_uuid, bool check_deleg for (u_int32_t index = 0; index < check_ifp->if_agentcount; index++) { if (uuid_compare(check_ifp->if_agentids[index], *agent_uuid) == 0) { ifnet_lock_done(check_ifp); - return (TRUE); + return TRUE; } } } @@ -3702,7 +3699,7 @@ necp_ifnet_matches_agent(struct ifnet *ifp, uuid_t *agent_uuid, bool check_deleg } check_ifp = check_ifp->if_delegated.ifp; } - return (FALSE); + return FALSE; } static bool @@ -3724,7 +3721,7 @@ necp_ifnet_matches_agent_type(struct ifnet *ifp, const char *agent_domain, const if (netagent_get_agent_domain_and_type(check_ifp->if_agentids[index], if_agent_domain, if_agent_type)) { if (necp_agent_types_match(agent_domain, agent_type, if_agent_domain, if_agent_type)) { ifnet_lock_done(check_ifp); - return (TRUE); + return TRUE; } } } @@ -3736,7 +3733,7 @@ necp_ifnet_matches_agent_type(struct ifnet *ifp, const char *agent_domain, const } check_ifp = check_ifp->if_delegated.ifp; } - return (FALSE); + return FALSE; } static bool @@ -3762,21 +3759,21 @@ necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa) ifaddr_release(ifa); } - return (matched_local_address); + return matched_local_address; } static bool necp_interface_type_is_primary_eligible(u_int8_t interface_type) { switch (interface_type) { - // These types can never be primary, so a client requesting these types is allowed - // to match an interface that isn't currently eligible to be primary (has default - // route, dns, etc) - case IFRTYPE_FUNCTIONAL_WIFI_AWDL: - case IFRTYPE_FUNCTIONAL_INTCOPROC: - return false; - default: - break; + // These types can never be primary, so a client requesting these types is allowed + // to match an interface that isn't currently eligible to be primary (has default + // route, dns, etc) + case IFRTYPE_FUNCTIONAL_WIFI_AWDL: + case IFRTYPE_FUNCTIONAL_INTCOPROC: + return false; + default: + break; } return true; } @@ -3787,9 +3784,9 @@ necp_interface_type_is_primary_eligible(u_int8_t interface_type) // used for multipath or a listener as an extra path static bool necp_ifnet_matches_parameters(struct ifnet *ifp, - struct necp_client_parsed_parameters *parsed_parameters, - u_int32_t *preferred_count, - bool secondary_interface) + struct necp_client_parsed_parameters *parsed_parameters, + u_int32_t *preferred_count, + bool secondary_interface) { if (preferred_count) { *preferred_count = 0; @@ -3797,23 +3794,23 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR) { if (!necp_ifnet_matches_local_address(ifp, &parsed_parameters->local_addr.sa)) { - return (FALSE); + return FALSE; } } if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) { if ((parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_PROHIBIT_EXPENSIVE) && - IFNET_IS_EXPENSIVE(ifp)) { - return (FALSE); + IFNET_IS_EXPENSIVE(ifp)) { + return FALSE; } } if ((!secondary_interface || // Enforce interface type if this is the primary interface - !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) || // or if there are no flags - !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE)) && // or if the flags don't give an exception + !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_FLAGS) || // or if there are no flags + !(parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_ONLY_PRIMARY_REQUIRES_TYPE)) && // or if the flags don't give an exception (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE) && - !necp_ifnet_matches_type(ifp, parsed_parameters->required_interface_type, FALSE)) { - return (FALSE); + !necp_ifnet_matches_type(ifp, parsed_parameters->required_interface_type, FALSE)) { + return FALSE; } if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE) { @@ -3823,7 +3820,7 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, } if (necp_ifnet_matches_type(ifp, parsed_parameters->prohibited_interface_types[i], TRUE)) { - return (FALSE); + return FALSE; } } } @@ -3835,7 +3832,7 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, } if (necp_ifnet_matches_name(ifp, parsed_parameters->prohibited_interfaces[i], TRUE)) { - return (FALSE); + return FALSE; } } } @@ -3847,7 +3844,7 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, } if (!necp_ifnet_matches_agent(ifp, &parsed_parameters->required_netagents[i], FALSE)) { - return (FALSE); + return FALSE; } } } @@ -3859,7 +3856,7 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, } if (necp_ifnet_matches_agent(ifp, &parsed_parameters->prohibited_netagents[i], TRUE)) { - return (FALSE); + return FALSE; } } } @@ -3867,12 +3864,12 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE) { for (int i = 0; i < NECP_MAX_PARSED_PARAMETERS; i++) { if (strlen(parsed_parameters->required_netagent_types[i].netagent_domain) == 0 && - strlen(parsed_parameters->required_netagent_types[i].netagent_type) == 0) { + strlen(parsed_parameters->required_netagent_types[i].netagent_type) == 0) { break; } if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->required_netagent_types[i].netagent_domain, parsed_parameters->required_netagent_types[i].netagent_type, FALSE)) { - return (FALSE); + return FALSE; } } } @@ -3880,12 +3877,12 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE) { for (int i = 0; i < NECP_MAX_PARSED_PARAMETERS; i++) { if (strlen(parsed_parameters->prohibited_netagent_types[i].netagent_domain) == 0 && - strlen(parsed_parameters->prohibited_netagent_types[i].netagent_type) == 0) { + strlen(parsed_parameters->prohibited_netagent_types[i].netagent_type) == 0) { break; } if (necp_ifnet_matches_agent_type(ifp, parsed_parameters->prohibited_netagent_types[i].netagent_domain, parsed_parameters->prohibited_netagent_types[i].netagent_type, TRUE)) { - return (FALSE); + return FALSE; } } } @@ -3907,7 +3904,7 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE) { for (int i = 0; i < NECP_MAX_PARSED_PARAMETERS; i++) { if (strlen(parsed_parameters->preferred_netagent_types[i].netagent_domain) == 0 && - strlen(parsed_parameters->preferred_netagent_types[i].netagent_type) == 0) { + strlen(parsed_parameters->preferred_netagent_types[i].netagent_type) == 0) { break; } @@ -3932,24 +3929,24 @@ necp_ifnet_matches_parameters(struct ifnet *ifp, if (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE) { for (int i = 0; i < NECP_MAX_PARSED_PARAMETERS; i++) { if (strlen(parsed_parameters->avoided_netagent_types[i].netagent_domain) == 0 && - strlen(parsed_parameters->avoided_netagent_types[i].netagent_type) == 0) { + strlen(parsed_parameters->avoided_netagent_types[i].netagent_type) == 0) { break; } if (!necp_ifnet_matches_agent_type(ifp, parsed_parameters->avoided_netagent_types[i].netagent_domain, - parsed_parameters->avoided_netagent_types[i].netagent_type, TRUE)) { + parsed_parameters->avoided_netagent_types[i].netagent_type, TRUE)) { (*preferred_count)++; } } } } - return (TRUE); + return TRUE; } static bool necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters, - u_int *return_ifindex, bool *validate_agents) + u_int *return_ifindex, bool *validate_agents) { struct ifnet *ifp = NULL; u_int32_t best_preferred_count = 0; @@ -3958,11 +3955,11 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ if (parsed_parameters->required_interface_index != 0) { *return_ifindex = parsed_parameters->required_interface_index; - return (TRUE); + return TRUE; } if (!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_INTERESTING_IFNET_FIELDS)) { - return (TRUE); + return TRUE; } has_preferred_fields = (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS); @@ -3977,7 +3974,7 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ if (ifp == NULL || necp_ifnet_matches_parameters(ifp, parsed_parameters, NULL, false)) { // Don't set return_ifindex, so the client doesn't need to scope ifnet_head_done(); - return (TRUE); + return TRUE; } } @@ -3986,8 +3983,7 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ u_int32_t preferred_count = 0; if (necp_ifnet_matches_parameters(ifp, parsed_parameters, &preferred_count, false)) { if (preferred_count > best_preferred_count || - *return_ifindex == 0) { - + *return_ifindex == 0) { // Everything matched, and is most preferred. Return this interface. *return_ifindex = ifp->if_index; best_preferred_count = preferred_count; @@ -4001,9 +3997,9 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ // Then check the remaining interfaces if ((parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_FIELDS) && - ((!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)) || - !necp_interface_type_is_primary_eligible(parsed_parameters->required_interface_type)) && - *return_ifindex == 0) { + ((!(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE)) || + !necp_interface_type_is_primary_eligible(parsed_parameters->required_interface_type)) && + *return_ifindex == 0) { TAILQ_FOREACH(ifp, &ifnet_head, if_link) { u_int32_t preferred_count = 0; if (NECP_IFP_IS_ON_ORDERED_LIST(ifp)) { @@ -4012,8 +4008,7 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ } if (necp_ifnet_matches_parameters(ifp, parsed_parameters, &preferred_count, false)) { if (preferred_count > best_preferred_count || - *return_ifindex == 0) { - + *return_ifindex == 0) { // Everything matched, and is most preferred. Return this interface. *return_ifindex = ifp->if_index; best_preferred_count = preferred_count; @@ -4029,21 +4024,21 @@ necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_ ifnet_head_done(); if ((parsed_parameters->valid_fields == (parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_PREFERRED_FIELDS)) && - best_preferred_count == 0) { + best_preferred_count == 0) { // If only has preferred fields, and nothing was found, clear the interface index and return TRUE *return_ifindex = 0; - return (TRUE); + return TRUE; } if (*return_ifindex == 0 && - !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS)) { + !(parsed_parameters->valid_fields & NECP_PARSED_PARAMETERS_SCOPED_IFNET_FIELDS)) { // Has required fields, but not including specific interface fields. Pass for now, and check // to see if agents are satisfied by policy. *validate_agents = TRUE; - return (TRUE); + return TRUE; } - return (*return_ifindex != 0); + return *return_ifindex != 0; } @@ -4051,7 +4046,7 @@ static int necp_skywalk_priv_check_cred(proc_t p, kauth_cred_t cred) { #pragma unused(p, cred) - return (0); + return 0; } /// System calls @@ -4066,9 +4061,9 @@ necp_open(struct proc *p, struct necp_open_args *uap, int *retval) int fd = -1; if (uap->flags & NECP_OPEN_FLAG_OBSERVER || - uap->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) { + uap->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) { if (necp_skywalk_priv_check_cred(p, kauth_cred_get()) != 0 && - priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) { + priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) { NECPLOG0(LOG_ERR, "Client does not hold necessary entitlement to observe other NECP clients"); error = EACCES; goto done; @@ -4145,7 +4140,7 @@ done: } } - return (error); + return error; } static int @@ -4156,16 +4151,16 @@ necp_client_add(struct proc *p, struct necp_fd_data *fd_data, struct necp_client if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) { NECPLOG0(LOG_ERR, "NECP client observers with push enabled may not add their own clients"); - return (EINVAL); + return EINVAL; } if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size == 0 || uap->buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) { - return (EINVAL); + uap->buffer_size == 0 || uap->buffer_size > NECP_MAX_CLIENT_PARAMETERS_SIZE || uap->buffer == 0) { + return EINVAL; } if ((client = _MALLOC(sizeof(struct necp_client) + uap->buffer_size, M_NECP, - M_WAITOK | M_ZERO)) == NULL) { + M_WAITOK | M_ZERO)) == NULL) { error = ENOMEM; goto done; } @@ -4218,7 +4213,7 @@ done: } *retval = error; - return (error); + return error; } static int @@ -4244,7 +4239,7 @@ necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args if (error) { NECPLOG(LOG_ERR, "necp_client_remove flow_ifnet_stats copyin error (%d)", error); // Not fatal; make sure to zero-out stats in case of partial copy - memset(&flow_ifnet_stats, 0, sizeof (flow_ifnet_stats)); + memset(&flow_ifnet_stats, 0, sizeof(flow_ifnet_stats)); error = 0; } } else if (uap->buffer != 0) { @@ -4287,7 +4282,7 @@ necp_client_remove(struct necp_fd_data *fd_data, struct necp_client_action_args done: *retval = error; - return (error); + return error; } @@ -4298,37 +4293,37 @@ necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_ int error = 0; error = necp_client_parse_parameters(client->parameters, - (u_int32_t)client->parameters_length, - &parsed_parameters); + (u_int32_t)client->parameters_length, + &parsed_parameters); if (error) { NECPLOG(LOG_ERR, "necp_client_parse_parameters error (%d)", error); - return (error); + return error; } if ((flow->remote_addr.sa.sa_family != AF_INET && - flow->remote_addr.sa.sa_family != AF_INET6) || - (flow->local_addr.sa.sa_family != AF_INET && - flow->local_addr.sa.sa_family != AF_INET6)) { - return (EINVAL); + flow->remote_addr.sa.sa_family != AF_INET6) || + (flow->local_addr.sa.sa_family != AF_INET && + flow->local_addr.sa.sa_family != AF_INET6)) { + return EINVAL; } NECP_CLIENT_ROUTE_LOCK(client); if (client->current_route == NULL) { - error = ENOENT; - goto do_unlock; + error = ENOENT; + goto do_unlock; } bool check_ecn = false; do { if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) == - NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) { + NECP_CLIENT_PARAMETER_FLAG_ECN_ENABLE) { check_ecn = true; break; } if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) == - NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) { + NECP_CLIENT_PARAMETER_FLAG_ECN_DISABLE) { break; } @@ -4344,25 +4339,24 @@ necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_ bool inbound = ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER) == 0); if ((inbound && tcp_ecn_inbound == 1) || - (!inbound && tcp_ecn_outbound == 1)) { + (!inbound && tcp_ecn_outbound == 1)) { check_ecn = true; } } while (false); if (check_ecn) { if (tcp_heuristic_do_ecn_with_address(client->current_route->rt_ifp, - (union sockaddr_in_4_6 *)&flow->local_addr)) { + (union sockaddr_in_4_6 *)&flow->local_addr)) { *flags |= NECP_CLIENT_RESULT_FLAG_ECN_ENABLED; } } if ((parsed_parameters.flags & NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) == - NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) { - + NECP_CLIENT_PARAMETER_FLAG_TFO_ENABLE) { if (!tcp_heuristic_do_tfo_with_address(client->current_route->rt_ifp, - (union sockaddr_in_4_6 *)&flow->local_addr, - (union sockaddr_in_4_6 *)&flow->remote_addr, - tfo_cookie, tfo_cookie_len)) { + (union sockaddr_in_4_6 *)&flow->local_addr, + (union sockaddr_in_4_6 *)&flow->remote_addr, + tfo_cookie, tfo_cookie_len)) { *flags |= NECP_CLIENT_RESULT_FLAG_FAST_OPEN_BLOCKED; *tfo_cookie_len = 0; } @@ -4373,7 +4367,7 @@ necp_client_check_tcp_heuristics(struct necp_client *client, struct necp_client_ do_unlock: NECP_CLIENT_ROUTE_UNLOCK(client); - return (error); + return error; } static size_t @@ -4401,10 +4395,10 @@ necp_client_calculate_flow_tlv_size(struct necp_client_flow_registration *flow_r static int necp_client_fillout_flow_tlvs(struct necp_client *client, - bool client_is_observed, - struct necp_client_flow_registration *flow_registration, - struct necp_client_action_args *uap, - size_t *assigned_results_cursor) + bool client_is_observed, + struct necp_client_flow_registration *flow_registration, + struct necp_client_action_args *uap, + size_t *assigned_results_cursor) { int error = 0; struct necp_client_flow *flow = NULL; @@ -4429,7 +4423,7 @@ necp_client_fillout_flow_tlvs(struct necp_client *client, tfo_cookie_len = NECP_TFO_COOKIE_LEN_MAX; if (necp_client_check_tcp_heuristics(client, flow, &flags, - tfo_cookie, &tfo_cookie_len) != 0) { + tfo_cookie, &tfo_cookie_len) != 0) { tfo_cookie_len = 0; } else { flow->check_tcp_heuristics = FALSE; @@ -4507,17 +4501,17 @@ necp_client_fillout_flow_tlvs(struct necp_client *client, error = copyout(&header, uap->buffer + client->result_length + *assigned_results_cursor, header_length); if (error) { NECPLOG(LOG_ERR, "necp_client_copy assigned results tlv_header copyout error (%d)", error); - return (error); + return error; } *assigned_results_cursor += header_length; if (flow->assigned_results && flow->assigned_results_length) { // Write inner TLVs error = copyout(flow->assigned_results, uap->buffer + client->result_length + *assigned_results_cursor, - flow->assigned_results_length); + flow->assigned_results_length); if (error) { NECPLOG(LOG_ERR, "necp_client_copy assigned results copyout error (%d)", error); - return (error); + return error; } } *assigned_results_cursor += flow->assigned_results_length; @@ -4532,15 +4526,15 @@ necp_client_fillout_flow_tlvs(struct necp_client *client, memcpy(&protoctl_event_header.protoctl_tlv_header.type, &type, sizeof(type)); memcpy(&protoctl_event_header.protoctl_tlv_header.length, &length, sizeof(length)); memcpy(&protoctl_event_header.protoctl_event, &flow->protoctl_event, - sizeof(flow->protoctl_event)); + sizeof(flow->protoctl_event)); error = copyout(&protoctl_event_header, uap->buffer + client->result_length + *assigned_results_cursor, - sizeof(protoctl_event_header)); + sizeof(protoctl_event_header)); if (error) { NECPLOG(LOG_ERR, "necp_client_copy protocol control event results" - " tlv_header copyout error (%d)", error); - return (error); + " tlv_header copyout error (%d)", error); + return error; } *assigned_results_cursor += sizeof(protoctl_event_header); flow->has_protoctl_event = FALSE; @@ -4553,7 +4547,7 @@ necp_client_fillout_flow_tlvs(struct necp_client *client, if (!client_is_observed) { flow_registration->flow_result_read = TRUE; } - return (0); + return 0; } static int @@ -4564,21 +4558,21 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli // Copy results out if (uap->action == NECP_CLIENT_ACTION_COPY_PARAMETERS) { if (uap->buffer_size < client->parameters_length) { - return (EINVAL); + return EINVAL; } error = copyout(client->parameters, uap->buffer, client->parameters_length); if (error) { NECPLOG(LOG_ERR, "necp_client_copy parameters copyout error (%d)", error); - return (error); + return error; } *retval = client->parameters_length; } else if (uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT && - client->result_read && !necp_client_has_unread_flows(client)) { + client->result_read && !necp_client_has_unread_flows(client)) { // Copy updates only, but nothing to read // Just return 0 for bytes read *retval = 0; } else if (uap->action == NECP_CLIENT_ACTION_COPY_RESULT || - uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) { + uap->action == NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) { size_t assigned_results_size = 0; bool some_flow_is_defunct = false; @@ -4599,7 +4593,7 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli } } if (uap->buffer_size < (client->result_length + assigned_results_size)) { - return (EINVAL); + return EINVAL; } u_int32_t original_flags = 0; @@ -4614,8 +4608,8 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli original_flags = client_flags; client_flags |= NECP_CLIENT_RESULT_FLAG_DEFUNCT; (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS, - sizeof(client_flags), &client_flags, &flags_updated, - client->result, sizeof(client->result)); + sizeof(client_flags), &client_flags, &flags_updated, + client->result, sizeof(client->result)); } } @@ -4624,13 +4618,13 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli if (flags_updated) { // Revert stored flags (void)necp_buffer_write_tlv_if_different(client->result, NECP_CLIENT_RESULT_FLAGS, - sizeof(original_flags), &original_flags, &flags_updated, - client->result, sizeof(client->result)); + sizeof(original_flags), &original_flags, &flags_updated, + client->result, sizeof(client->result)); } if (error) { NECPLOG(LOG_ERR, "necp_client_copy result copyout error (%d)", error); - return (error); + return error; } size_t assigned_results_cursor = 0; @@ -4638,7 +4632,7 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli if (single_flow_registration != NULL) { error = necp_client_fillout_flow_tlvs(client, client_is_observed, single_flow_registration, uap, &assigned_results_cursor); if (error != 0) { - return (error); + return error; } } } else { @@ -4647,7 +4641,7 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) { error = necp_client_fillout_flow_tlvs(client, client_is_observed, flow_registration, uap, &assigned_results_cursor); if (error != 0) { - return (error); + return error; } } } @@ -4659,7 +4653,7 @@ necp_client_copy_internal(struct necp_client *client, uuid_t client_id, bool cli } } - return (0); + return 0; } static int @@ -4673,25 +4667,25 @@ necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *u *retval = 0; if (uap->buffer_size == 0 || uap->buffer == 0) { - return (EINVAL); + return EINVAL; } if (uap->action != NECP_CLIENT_ACTION_COPY_PARAMETERS && - uap->action != NECP_CLIENT_ACTION_COPY_RESULT && - uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) { - return (EINVAL); + uap->action != NECP_CLIENT_ACTION_COPY_RESULT && + uap->action != NECP_CLIENT_ACTION_COPY_UPDATED_RESULT) { + return EINVAL; } if (uap->client_id) { if (uap->client_id_len != sizeof(uuid_t)) { NECPLOG(LOG_ERR, "Incorrect length (got %d, expected %d)", uap->client_id_len, sizeof(uuid_t)); - return (ERANGE); + return ERANGE; } error = copyin(uap->client_id, client_id, sizeof(uuid_t)); if (error) { NECPLOG(LOG_ERR, "necp_client_copy client_id copyin error (%d)", error); - return (error); + return error; } } @@ -4747,15 +4741,15 @@ necp_client_copy(struct necp_fd_data *fd_data, struct necp_client_action_args *u // No client found, fail if (!found_client) { - return (ENOENT); + return ENOENT; } } else { // No client found, and not allowed to search other fds, fail - return (ENOENT); + return ENOENT; } } - return (error); + return error; } static int @@ -4767,17 +4761,17 @@ necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_ if (!(fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER)) { NECPLOG0(LOG_ERR, "NECP fd is not observer, cannot copy client update"); - return (EINVAL); + return EINVAL; } if (uap->client_id_len != sizeof(uuid_t) || uap->client_id == 0) { NECPLOG0(LOG_ERR, "Client id invalid, cannot copy client update"); - return (EINVAL); + return EINVAL; } if (uap->buffer_size == 0 || uap->buffer == 0) { NECPLOG0(LOG_ERR, "Buffer invalid, cannot copy client update"); - return (EINVAL); + return EINVAL; } NECP_FD_LOCK(fd_data); @@ -4813,12 +4807,12 @@ necp_client_copy_client_update(struct necp_fd_data *fd_data, struct necp_client_ error = ENOENT; } - return (error); + return error; } static int necp_client_copy_parameters_locked(struct necp_client *client, - struct necp_client_nexus_parameters *parameters) + struct necp_client_nexus_parameters *parameters) { VERIFY(parameters != NULL); @@ -4849,7 +4843,7 @@ necp_client_copy_parameters_locked(struct necp_client *client, } parameters->allow_qos_marking = (client_result_flags & NECP_CLIENT_RESULT_FLAG_ALLOW_QOS_MARKING) ? 1 : 0; - return (error); + return error; } static int @@ -4925,8 +4919,8 @@ necp_client_list(struct necp_fd_data *fd_data, struct necp_client_action_args *u } if (requested_client_count > 0 && - client_count > 0 && - list != NULL) { + client_count > 0 && + list != NULL) { error = copyout(list, uap->buffer + sizeof(client_count), copy_buffer_size); if (error) { NECPLOG(LOG_ERR, "necp_client_list client count copyout error (%d)", error); @@ -4939,7 +4933,7 @@ done: } *retval = error; - return (error); + return error; } @@ -4992,7 +4986,7 @@ necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action size_t parameters_size = uap->buffer_size; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size == 0 || uap->buffer == 0) { + uap->buffer_size == 0 || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_agent_action invalid parameters"); error = EINVAL; goto done; @@ -5039,51 +5033,50 @@ necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action if (length > 0) { u_int8_t *value = necp_buffer_get_tlv_value(parameters, offset, NULL); if (length >= sizeof(uuid_t) && - value != NULL && - (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT || - type == NECP_CLIENT_PARAMETER_ASSERT_AGENT || - type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT)) { - - uuid_t agent_uuid; - uuid_copy(agent_uuid, value); - u_int8_t netagent_message_type = 0; - if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT) { - netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER; - } else if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) { - netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT; - } else if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) { - netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; - } - - // Before unasserting, verify that the assertion was already taken - if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) { - if (!necp_client_remove_assertion(client, agent_uuid)) { - error = ENOENT; - break; - } - } + value != NULL && + (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT || + type == NECP_CLIENT_PARAMETER_ASSERT_AGENT || + type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT)) { + uuid_t agent_uuid; + uuid_copy(agent_uuid, value); + u_int8_t netagent_message_type = 0; + if (type == NECP_CLIENT_PARAMETER_TRIGGER_AGENT) { + netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER; + } else if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) { + netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT; + } else if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) { + netagent_message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT; + } - struct necp_client_nexus_parameters parsed_parameters = {}; - necp_client_copy_parameters_locked(client, &parsed_parameters); - - error = netagent_client_message_with_params(agent_uuid, - client_id, - fd_data->proc_pid, - client->agent_handle, - netagent_message_type, - &parsed_parameters, - NULL, NULL); - if (error == 0) { - acted_on_agent = TRUE; - } else { + // Before unasserting, verify that the assertion was already taken + if (type == NECP_CLIENT_PARAMETER_UNASSERT_AGENT) { + if (!necp_client_remove_assertion(client, agent_uuid)) { + error = ENOENT; break; } + } - // Only save the assertion if the action succeeded - if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) { - necp_client_add_assertion(client, agent_uuid); - } + struct necp_client_nexus_parameters parsed_parameters = {}; + necp_client_copy_parameters_locked(client, &parsed_parameters); + + error = netagent_client_message_with_params(agent_uuid, + client_id, + fd_data->proc_pid, + client->agent_handle, + netagent_message_type, + &parsed_parameters, + NULL, NULL); + if (error == 0) { + acted_on_agent = TRUE; + } else { + break; + } + + // Only save the assertion if the action succeeded + if (type == NECP_CLIENT_PARAMETER_ASSERT_AGENT) { + necp_client_add_assertion(client, agent_uuid); } + } } offset += sizeof(struct necp_tlv_header) + length; @@ -5094,7 +5087,7 @@ necp_client_agent_action(struct necp_fd_data *fd_data, struct necp_client_action NECP_FD_UNLOCK(fd_data); if (!acted_on_agent && - error == 0) { + error == 0) { error = ENOENT; } done: @@ -5104,7 +5097,7 @@ done: parameters = NULL; } - return (error); + return error; } static int @@ -5114,7 +5107,7 @@ necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client uuid_t agent_uuid; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size == 0 || uap->buffer == 0) { + uap->buffer_size == 0 || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_copy_agent bad input"); error = EINVAL; goto done; @@ -5134,7 +5127,7 @@ necp_client_copy_agent(__unused struct necp_fd_data *fd_data, struct necp_client done: *retval = error; - return (error); + return error; } static int @@ -5146,7 +5139,7 @@ necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_ar struct necp_agent_use_parameters parameters; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size != sizeof(parameters) || uap->buffer == 0) { + uap->buffer_size != sizeof(parameters) || uap->buffer == 0) { error = EINVAL; goto done; } @@ -5185,7 +5178,7 @@ necp_client_agent_use(struct necp_fd_data *fd_data, struct necp_client_action_ar done: *retval = error; - return (error); + return error; } static int @@ -5196,7 +5189,7 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl struct necp_interface_details interface_details; if (uap->client_id == 0 || uap->client_id_len != sizeof(u_int32_t) || - uap->buffer_size < sizeof(interface_details) || uap->buffer == 0) { + uap->buffer_size < sizeof(interface_details) || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_copy_interface bad input"); error = EINVAL; goto done; @@ -5252,7 +5245,7 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl u_int8_t ipv4_signature_len = sizeof(interface_details.ipv4_signature.signature); u_int16_t ipv4_signature_flags; if (ifnet_get_netsignature(interface, AF_INET, &ipv4_signature_len, &ipv4_signature_flags, - (u_int8_t *)&interface_details.ipv4_signature) != 0) { + (u_int8_t *)&interface_details.ipv4_signature) != 0) { ipv4_signature_len = 0; } interface_details.ipv4_signature.signature_len = ipv4_signature_len; @@ -5260,7 +5253,7 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl u_int8_t ipv6_signature_len = sizeof(interface_details.ipv6_signature.signature); u_int16_t ipv6_signature_flags; if (ifnet_get_netsignature(interface, AF_INET6, &ipv6_signature_len, &ipv6_signature_flags, - (u_int8_t *)&interface_details.ipv6_signature) != 0) { + (u_int8_t *)&interface_details.ipv6_signature) != 0) { ipv6_signature_len = 0; } interface_details.ipv6_signature.signature_len = ipv6_signature_len; @@ -5276,7 +5269,7 @@ necp_client_copy_interface(__unused struct necp_fd_data *fd_data, struct necp_cl done: *retval = error; - return (error); + return error; } @@ -5288,7 +5281,7 @@ necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct uuid_t client_id; if (uap->client_id == 0 || uap->client_id_len != sizeof(uuid_t) || - uap->buffer_size < sizeof(struct necp_stat_counts) || uap->buffer == 0) { + uap->buffer_size < sizeof(struct necp_stat_counts) || uap->buffer == 0) { NECPLOG0(LOG_ERR, "necp_client_copy_route_statistics bad input"); error = EINVAL; goto done; @@ -5307,7 +5300,7 @@ necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct NECP_CLIENT_ROUTE_LOCK(client); struct necp_stat_counts route_stats = {}; if (client->current_route != NULL && client->current_route->rt_stats != NULL) { - struct nstat_counts *rt_stats = client->current_route->rt_stats; + struct nstat_counts *rt_stats = client->current_route->rt_stats; atomic_get_64(route_stats.necp_stat_rxpackets, &rt_stats->nstat_rxpackets); atomic_get_64(route_stats.necp_stat_rxbytes, &rt_stats->nstat_rxbytes); atomic_get_64(route_stats.necp_stat_txpackets, &rt_stats->nstat_txpackets); @@ -5341,7 +5334,7 @@ necp_client_copy_route_statistics(__unused struct necp_fd_data *fd_data, struct done: *retval = error; - return (error); + return error; } static int @@ -5382,10 +5375,10 @@ necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action // This needs to be changed when TFO/ECN is supported by multiple flows struct necp_client_flow *flow = LIST_FIRST(&flow_registration->flow_list); if (flow == NULL || - (flow->remote_addr.sa.sa_family != AF_INET && - flow->remote_addr.sa.sa_family != AF_INET6) || - (flow->local_addr.sa.sa_family != AF_INET && - flow->local_addr.sa.sa_family != AF_INET6)) { + (flow->remote_addr.sa.sa_family != AF_INET && + flow->remote_addr.sa.sa_family != AF_INET6) || + (flow->local_addr.sa.sa_family != AF_INET && + flow->local_addr.sa.sa_family != AF_INET6)) { error = EINVAL; NECPLOG(LOG_ERR, "necp_client_update_cache no flow error (%d)", error); goto done_unlock; @@ -5395,7 +5388,7 @@ necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action memset(&cache_buffer, 0, sizeof(cache_buffer)); if (uap->buffer_size != sizeof(necp_cache_buffer) || - uap->buffer == USER_ADDR_NULL) { + uap->buffer == USER_ADDR_NULL) { error = EINVAL; goto done_unlock; } @@ -5425,13 +5418,13 @@ necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action if (client->current_route != NULL && client->current_route->rt_ifp != NULL) { if (!client->platform_binary) { - ecn_cache_buffer.necp_tcp_ecn_heuristics_success = 0; + ecn_cache_buffer.necp_tcp_ecn_heuristics_success = 0; } tcp_heuristics_ecn_update(&ecn_cache_buffer, client->current_route->rt_ifp, - (union sockaddr_in_4_6 *)&flow->local_addr); + (union sockaddr_in_4_6 *)&flow->local_addr); } } else if (cache_buffer.necp_cache_buf_type == NECP_CLIENT_CACHE_TYPE_TFO && - cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_TFO_VER_1) { + cache_buffer.necp_cache_buf_ver == NECP_CLIENT_CACHE_TYPE_TFO_VER_1) { if (cache_buffer.necp_cache_buf_size != sizeof(necp_tcp_tfo_cache) || cache_buffer.necp_cache_buf_addr == USER_ADDR_NULL) { error = EINVAL; @@ -5449,14 +5442,14 @@ necp_client_update_cache(struct necp_fd_data *fd_data, struct necp_client_action if (client->current_route != NULL && client->current_route->rt_ifp != NULL) { if (!client->platform_binary) { - tfo_cache_buffer.necp_tcp_tfo_heuristics_success = 0; + tfo_cache_buffer.necp_tcp_tfo_heuristics_success = 0; } tcp_heuristics_tfo_update(&tfo_cache_buffer, client->current_route->rt_ifp, - (union sockaddr_in_4_6 *)&flow->local_addr, - (union sockaddr_in_4_6 *)&flow->remote_addr); + (union sockaddr_in_4_6 *)&flow->local_addr, + (union sockaddr_in_4_6 *)&flow->remote_addr); } } else { - error = EINVAL; + error = EINVAL; } done_unlock: NECP_CLIENT_ROUTE_UNLOCK(client); @@ -5464,7 +5457,7 @@ done_unlock: NECP_FD_UNLOCK(fd_data); done: *retval = error; - return (error); + return error; } int @@ -5477,67 +5470,67 @@ necp_client_action(struct proc *p, struct necp_client_action_args *uap, int *ret error = necp_find_fd_data(uap->necp_fd, &fd_data); if (error != 0) { NECPLOG(LOG_ERR, "necp_client_action find fd error (%d)", error); - return (error); + return error; } u_int32_t action = uap->action; switch (action) { - case NECP_CLIENT_ACTION_ADD: { - return_value = necp_client_add(p, fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_REMOVE: { - return_value = necp_client_remove(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_COPY_PARAMETERS: - case NECP_CLIENT_ACTION_COPY_RESULT: - case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT: { - return_value = necp_client_copy(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_COPY_LIST: { - return_value = necp_client_list(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_AGENT: { - return_value = necp_client_agent_action(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_COPY_AGENT: { - return_value = necp_client_copy_agent(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_AGENT_USE: { - return_value = necp_client_agent_use(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_COPY_INTERFACE: { - return_value = necp_client_copy_interface(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS: { - return_value = necp_client_copy_route_statistics(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_UPDATE_CACHE: { - return_value = necp_client_update_cache(fd_data, uap, retval); - break; - } - case NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE: { - return_value = necp_client_copy_client_update(fd_data, uap, retval); - break; - } - default: { - NECPLOG(LOG_ERR, "necp_client_action unknown action (%u)", action); - return_value = EINVAL; - break; - } + case NECP_CLIENT_ACTION_ADD: { + return_value = necp_client_add(p, fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_REMOVE: { + return_value = necp_client_remove(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_COPY_PARAMETERS: + case NECP_CLIENT_ACTION_COPY_RESULT: + case NECP_CLIENT_ACTION_COPY_UPDATED_RESULT: { + return_value = necp_client_copy(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_COPY_LIST: { + return_value = necp_client_list(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_AGENT: { + return_value = necp_client_agent_action(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_COPY_AGENT: { + return_value = necp_client_copy_agent(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_AGENT_USE: { + return_value = necp_client_agent_use(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_COPY_INTERFACE: { + return_value = necp_client_copy_interface(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_COPY_ROUTE_STATISTICS: { + return_value = necp_client_copy_route_statistics(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_UPDATE_CACHE: { + return_value = necp_client_update_cache(fd_data, uap, retval); + break; + } + case NECP_CLIENT_ACTION_COPY_CLIENT_UPDATE: { + return_value = necp_client_copy_client_update(fd_data, uap, retval); + break; + } + default: { + NECPLOG(LOG_ERR, "necp_client_action unknown action (%u)", action); + return_value = EINVAL; + break; + } } file_drop(uap->necp_fd); - return (return_value); + return return_value; } #define NECP_MAX_MATCH_POLICY_PARAMETER_SIZE 1024 @@ -5572,7 +5565,7 @@ necp_match_policy(struct proc *p, struct necp_match_policy_args *uap, int32_t *r } error = necp_application_find_policy_match_internal(p, parameters, uap->parameters_size, - &returned_result, NULL, 0, NULL, NULL, NULL, false); + &returned_result, NULL, 0, NULL, NULL, NULL, false); if (error) { goto done; } @@ -5586,7 +5579,7 @@ done: if (parameters != NULL) { FREE(parameters, M_NECP); } - return (error); + return error; } /// Socket operations @@ -5635,12 +5628,12 @@ done: } *buffer_p = local_string; - return (0); + return 0; fail: if (local_string != NULL) { FREE(local_string, M_NECP); } - return (error); + return error; } errno_t @@ -5652,9 +5645,9 @@ necp_set_socket_attributes(struct socket *so, struct sockopt *sopt) if ((SOCK_DOM(so) != PF_INET #if INET6 - && SOCK_DOM(so) != PF_INET6 + && SOCK_DOM(so) != PF_INET6 #endif - )) { + )) { error = EINVAL; goto done; } @@ -5663,7 +5656,7 @@ necp_set_socket_attributes(struct socket *so, struct sockopt *sopt) size_t valsize = sopt->sopt_valsize; if (valsize == 0 || - valsize > ((sizeof(struct necp_tlv_header) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) * 2)) { + valsize > ((sizeof(struct necp_tlv_header) + NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH) * 2)) { goto done; } @@ -5697,7 +5690,7 @@ done: FREE(buffer, M_NECP); } - return (error); + return error; } errno_t @@ -5711,9 +5704,9 @@ necp_get_socket_attributes(struct socket *so, struct sockopt *sopt) if ((SOCK_DOM(so) != PF_INET #if INET6 - && SOCK_DOM(so) != PF_INET6 + && SOCK_DOM(so) != PF_INET6 #endif - )) { + )) { error = EINVAL; goto done; } @@ -5737,12 +5730,12 @@ necp_get_socket_attributes(struct socket *so, struct sockopt *sopt) cursor = buffer; if (inp->inp_necp_attributes.inp_domain != NULL) { cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_DOMAIN, strlen(inp->inp_necp_attributes.inp_domain), inp->inp_necp_attributes.inp_domain, - buffer, valsize); + buffer, valsize); } if (inp->inp_necp_attributes.inp_account != NULL) { cursor = necp_buffer_write_tlv(cursor, NECP_TLV_ATTRIBUTE_ACCOUNT, strlen(inp->inp_necp_attributes.inp_account), inp->inp_necp_attributes.inp_account, - buffer, valsize); + buffer, valsize); } error = sooptcopyout(sopt, buffer, valsize); @@ -5754,13 +5747,13 @@ done: FREE(buffer, M_NECP); } - return (error); + return error; } void * necp_create_nexus_assign_message(uuid_t nexus_instance, u_int32_t nexus_port, void *key, uint32_t key_length, - struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, - u_int32_t flow_adv_index, void *flow_stats, size_t *message_length) + struct necp_client_endpoint *local_endpoint, struct necp_client_endpoint *remote_endpoint, + u_int32_t flow_adv_index, void *flow_stats, size_t *message_length) { u_int8_t *buffer = NULL; u_int8_t *cursor = NULL; @@ -5789,12 +5782,12 @@ necp_create_nexus_assign_message(uuid_t nexus_instance, u_int32_t nexus_port, vo valsize += sizeof(struct necp_tlv_header) + sizeof(void *); } if (valsize == 0) { - return (NULL); + return NULL; } MALLOC(buffer, u_int8_t *, valsize, M_NETAGENT, M_WAITOK | M_ZERO); // Use M_NETAGENT area, since it is expected upon free if (buffer == NULL) { - return (NULL); + return NULL; } cursor = buffer; @@ -5820,7 +5813,7 @@ necp_create_nexus_assign_message(uuid_t nexus_instance, u_int32_t nexus_port, vo *message_length = valsize; - return (buffer); + return buffer; } void @@ -5880,35 +5873,35 @@ necp_client_init(void) necp_client_fd_size = sizeof(struct necp_fd_data); necp_client_fd_zone = zinit(necp_client_fd_size, - NECP_CLIENT_FD_ZONE_MAX * necp_client_fd_size, - 0, NECP_CLIENT_FD_ZONE_NAME); + NECP_CLIENT_FD_ZONE_MAX * necp_client_fd_size, + 0, NECP_CLIENT_FD_ZONE_NAME); if (necp_client_fd_zone == NULL) { panic("zinit(necp_client_fd) failed\n"); /* NOTREACHED */ } necp_flow_size = sizeof(struct necp_client_flow); - necp_flow_cache = mcache_create(NECP_FLOW_ZONE_NAME, necp_flow_size, sizeof (uint64_t), 0, MCR_SLEEP); + necp_flow_cache = mcache_create(NECP_FLOW_ZONE_NAME, necp_flow_size, sizeof(uint64_t), 0, MCR_SLEEP); if (necp_flow_cache == NULL) { panic("mcache_create(necp_flow_cache) failed\n"); /* NOTREACHED */ } necp_flow_registration_size = sizeof(struct necp_client_flow_registration); - necp_flow_registration_cache = mcache_create(NECP_FLOW_REGISTRATION_ZONE_NAME, necp_flow_registration_size, sizeof (uint64_t), 0, MCR_SLEEP); + necp_flow_registration_cache = mcache_create(NECP_FLOW_REGISTRATION_ZONE_NAME, necp_flow_registration_size, sizeof(uint64_t), 0, MCR_SLEEP); if (necp_flow_registration_cache == NULL) { panic("mcache_create(necp_client_flow_registration) failed\n"); /* NOTREACHED */ } necp_client_update_tcall = thread_call_allocate_with_options(necp_update_all_clients_callout, NULL, - THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE); + THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE); VERIFY(necp_client_update_tcall != NULL); lck_rw_init(&necp_fd_lock, necp_fd_mtx_grp, necp_fd_mtx_attr); lck_rw_init(&necp_observer_lock, necp_fd_mtx_grp, necp_fd_mtx_attr); lck_rw_init(&necp_client_tree_lock, necp_fd_mtx_grp, necp_fd_mtx_attr); - lck_rw_init(&necp_flow_tree_lock, necp_fd_mtx_grp, necp_fd_mtx_attr); + lck_rw_init(&necp_flow_tree_lock, necp_fd_mtx_grp, necp_fd_mtx_attr); lck_rw_init(&necp_collect_stats_list_lock, necp_fd_mtx_grp, necp_fd_mtx_attr); LIST_INIT(&necp_fd_list); @@ -5918,7 +5911,7 @@ necp_client_init(void) RB_INIT(&necp_client_global_tree); RB_INIT(&necp_client_flow_global_tree); - return (0); + return 0; } void diff --git a/bsd/net/net_api_stats.h b/bsd/net/net_api_stats.h index af986b05e..af63a2121 100644 --- a/bsd/net/net_api_stats.h +++ b/bsd/net/net_api_stats.h @@ -27,7 +27,7 @@ */ #ifndef __NET_API_STATS__ -#define __NET_API_STATS__ +#define __NET_API_STATS__ #ifdef PRIVATE #include @@ -41,31 +41,31 @@ struct net_api_stats { /* * Interface Filters */ - int64_t nas_iflt_attach_count; // Currently attached - int64_t nas_iflt_attach_total; // Total number of attachments + int64_t nas_iflt_attach_count; // Currently attached + int64_t nas_iflt_attach_total; // Total number of attachments int64_t nas_iflt_attach_os_total; /* * IP Filters */ - int64_t nas_ipf_add_count; // Currently attached - int64_t nas_ipf_add_total; // Total number of attachments + int64_t nas_ipf_add_count; // Currently attached + int64_t nas_ipf_add_total; // Total number of attachments int64_t nas_ipf_add_os_total; /* * Socket Filters */ - int64_t nas_sfltr_register_count; // Currently attached - int64_t nas_sfltr_register_total; // Total number of attachments + int64_t nas_sfltr_register_count; // Currently attached + int64_t nas_sfltr_register_total; // Total number of attachments int64_t nas_sfltr_register_os_total; /* * Sockets */ - int64_t nas_socket_alloc_total; - int64_t nas_socket_in_kernel_total; + int64_t nas_socket_alloc_total; + int64_t nas_socket_in_kernel_total; int64_t nas_socket_in_kernel_os_total; - int64_t nas_socket_necp_clientuuid_total; + int64_t nas_socket_necp_clientuuid_total; /* * Sockets per protocol domains @@ -86,14 +86,14 @@ struct net_api_stats { int64_t nas_socket_inet_stream_total; int64_t nas_socket_inet_dgram_total; int64_t nas_socket_inet_dgram_connected; - int64_t nas_socket_inet_dgram_dns; // port 53 - int64_t nas_socket_inet_dgram_no_data; // typically for interface ioctl + int64_t nas_socket_inet_dgram_dns; // port 53 + int64_t nas_socket_inet_dgram_no_data; // typically for interface ioctl int64_t nas_socket_inet6_stream_total; int64_t nas_socket_inet6_dgram_total; int64_t nas_socket_inet6_dgram_connected; - int64_t nas_socket_inet6_dgram_dns; // port 53 - int64_t nas_socket_inet6_dgram_no_data; // typically for interface ioctl + int64_t nas_socket_inet6_dgram_dns; // port 53 + int64_t nas_socket_inet6_dgram_no_data; // typically for interface ioctl /* * Multicast join @@ -104,33 +104,33 @@ struct net_api_stats { /* * IPv6 Extension Header Socket API */ - int64_t nas_sock_inet6_stream_exthdr_in; - int64_t nas_sock_inet6_stream_exthdr_out; - int64_t nas_sock_inet6_dgram_exthdr_in; - int64_t nas_sock_inet6_dgram_exthdr_out; + int64_t nas_sock_inet6_stream_exthdr_in; + int64_t nas_sock_inet6_stream_exthdr_out; + int64_t nas_sock_inet6_dgram_exthdr_in; + int64_t nas_sock_inet6_dgram_exthdr_out; /* * Nexus flows */ - int64_t nas_nx_flow_inet_stream_total; - int64_t nas_nx_flow_inet_dgram_total; + int64_t nas_nx_flow_inet_stream_total; + int64_t nas_nx_flow_inet_dgram_total; - int64_t nas_nx_flow_inet6_stream_total; - int64_t nas_nx_flow_inet6_dgram_total; + int64_t nas_nx_flow_inet6_stream_total; + int64_t nas_nx_flow_inet6_dgram_total; /* * Interfaces */ - int64_t nas_ifnet_alloc_count; - int64_t nas_ifnet_alloc_total; - int64_t nas_ifnet_alloc_os_count; - int64_t nas_ifnet_alloc_os_total; + int64_t nas_ifnet_alloc_count; + int64_t nas_ifnet_alloc_total; + int64_t nas_ifnet_alloc_os_count; + int64_t nas_ifnet_alloc_os_total; /* * PF */ - int64_t nas_pf_addrule_total; - int64_t nas_pf_addrule_os; + int64_t nas_pf_addrule_total; + int64_t nas_pf_addrule_os; /* * vmnet API @@ -144,14 +144,14 @@ extern struct net_api_stats net_api_stats; /* * Increment up to the max value of int64_t */ -#define INC_ATOMIC_INT64_LIM(counter) { \ - int64_t val; \ - do { \ - val = counter; \ - if (val >= INT64_MAX) { \ - break; \ - } \ - } while (!OSCompareAndSwap64(val, val + 1, &(counter))); \ +#define INC_ATOMIC_INT64_LIM(counter) { \ + int64_t val; \ + do { \ + val = counter; \ + if (val >= INT64_MAX) { \ + break; \ + } \ + } while (!OSCompareAndSwap64(val, val + 1, &(counter))); \ } #endif /* XNU_KERNEL_PRIVATE */ diff --git a/bsd/net/net_kev.h b/bsd/net/net_kev.h index f7fd5a699..085db9c3f 100644 --- a/bsd/net/net_kev.h +++ b/bsd/net/net_kev.h @@ -32,115 +32,115 @@ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* Kernel event subclass identifiers for KEV_NETWORK_CLASS */ -#define KEV_INET_SUBCLASS 1 /* inet subclass */ +#define KEV_INET_SUBCLASS 1 /* inet subclass */ /* KEV_INET_SUBCLASS event codes */ -#define KEV_INET_NEW_ADDR 1 /* Userland configured IP address */ -#define KEV_INET_CHANGED_ADDR 2 /* Address changed event */ -#define KEV_INET_ADDR_DELETED 3 /* IPv6 address was deleted */ -#define KEV_INET_SIFDSTADDR 4 /* Dest. address was set */ -#define KEV_INET_SIFBRDADDR 5 /* Broadcast address was set */ -#define KEV_INET_SIFNETMASK 6 /* Netmask was set */ -#define KEV_INET_ARPCOLLISION 7 /* ARP collision detected */ +#define KEV_INET_NEW_ADDR 1 /* Userland configured IP address */ +#define KEV_INET_CHANGED_ADDR 2 /* Address changed event */ +#define KEV_INET_ADDR_DELETED 3 /* IPv6 address was deleted */ +#define KEV_INET_SIFDSTADDR 4 /* Dest. address was set */ +#define KEV_INET_SIFBRDADDR 5 /* Broadcast address was set */ +#define KEV_INET_SIFNETMASK 6 /* Netmask was set */ +#define KEV_INET_ARPCOLLISION 7 /* ARP collision detected */ #ifdef __APPLE_API_PRIVATE -#define KEV_INET_PORTINUSE 8 /* use ken_in_portinuse */ +#define KEV_INET_PORTINUSE 8 /* use ken_in_portinuse */ #endif -#define KEV_INET_ARPRTRFAILURE 9 /* ARP resolution failed for router */ -#define KEV_INET_ARPRTRALIVE 10 /* ARP resolution succeeded for router */ +#define KEV_INET_ARPRTRFAILURE 9 /* ARP resolution failed for router */ +#define KEV_INET_ARPRTRALIVE 10 /* ARP resolution succeeded for router */ -#define KEV_DL_SUBCLASS 2 /* Data Link subclass */ +#define KEV_DL_SUBCLASS 2 /* Data Link subclass */ /* * Define Data-Link event subclass, and associated * events. */ -#define KEV_DL_SIFFLAGS 1 -#define KEV_DL_SIFMETRICS 2 -#define KEV_DL_SIFMTU 3 -#define KEV_DL_SIFPHYS 4 -#define KEV_DL_SIFMEDIA 5 -#define KEV_DL_SIFGENERIC 6 -#define KEV_DL_ADDMULTI 7 -#define KEV_DL_DELMULTI 8 -#define KEV_DL_IF_ATTACHED 9 -#define KEV_DL_IF_DETACHING 10 -#define KEV_DL_IF_DETACHED 11 -#define KEV_DL_LINK_OFF 12 -#define KEV_DL_LINK_ON 13 -#define KEV_DL_PROTO_ATTACHED 14 -#define KEV_DL_PROTO_DETACHED 15 -#define KEV_DL_LINK_ADDRESS_CHANGED 16 -#define KEV_DL_WAKEFLAGS_CHANGED 17 -#define KEV_DL_IF_IDLE_ROUTE_REFCNT 18 -#define KEV_DL_IFCAP_CHANGED 19 -#define KEV_DL_LINK_QUALITY_METRIC_CHANGED 20 -#define KEV_DL_NODE_PRESENCE 21 -#define KEV_DL_NODE_ABSENCE 22 -#define KEV_DL_MASTER_ELECTED 23 -#define KEV_DL_ISSUES 24 -#define KEV_DL_IFDELEGATE_CHANGED 25 -#define KEV_DL_AWDL_RESTRICTED 26 -#define KEV_DL_AWDL_UNRESTRICTED 27 -#define KEV_DL_RRC_STATE_CHANGED 28 -#define KEV_DL_QOS_MODE_CHANGED 29 -#define KEV_DL_LOW_POWER_MODE_CHANGED 30 +#define KEV_DL_SIFFLAGS 1 +#define KEV_DL_SIFMETRICS 2 +#define KEV_DL_SIFMTU 3 +#define KEV_DL_SIFPHYS 4 +#define KEV_DL_SIFMEDIA 5 +#define KEV_DL_SIFGENERIC 6 +#define KEV_DL_ADDMULTI 7 +#define KEV_DL_DELMULTI 8 +#define KEV_DL_IF_ATTACHED 9 +#define KEV_DL_IF_DETACHING 10 +#define KEV_DL_IF_DETACHED 11 +#define KEV_DL_LINK_OFF 12 +#define KEV_DL_LINK_ON 13 +#define KEV_DL_PROTO_ATTACHED 14 +#define KEV_DL_PROTO_DETACHED 15 +#define KEV_DL_LINK_ADDRESS_CHANGED 16 +#define KEV_DL_WAKEFLAGS_CHANGED 17 +#define KEV_DL_IF_IDLE_ROUTE_REFCNT 18 +#define KEV_DL_IFCAP_CHANGED 19 +#define KEV_DL_LINK_QUALITY_METRIC_CHANGED 20 +#define KEV_DL_NODE_PRESENCE 21 +#define KEV_DL_NODE_ABSENCE 22 +#define KEV_DL_MASTER_ELECTED 23 +#define KEV_DL_ISSUES 24 +#define KEV_DL_IFDELEGATE_CHANGED 25 +#define KEV_DL_AWDL_RESTRICTED 26 +#define KEV_DL_AWDL_UNRESTRICTED 27 +#define KEV_DL_RRC_STATE_CHANGED 28 +#define KEV_DL_QOS_MODE_CHANGED 29 +#define KEV_DL_LOW_POWER_MODE_CHANGED 30 #ifdef PRIVATE -#define KEV_NETPOLICY_SUBCLASS 3 /* Network policy subclass */ +#define KEV_NETPOLICY_SUBCLASS 3 /* Network policy subclass */ /* KEV_NETPOLICY_SUBCLASS event codes */ -#define KEV_NETPOLICY_IFDENIED 1 /* denied access to interface */ -#define KEV_NETPOLICY_IFFAILED 2 /* failed to bring up interface */ +#define KEV_NETPOLICY_IFDENIED 1 /* denied access to interface */ +#define KEV_NETPOLICY_IFFAILED 2 /* failed to bring up interface */ -#define KEV_SOCKET_SUBCLASS 4 /* Socket subclass */ +#define KEV_SOCKET_SUBCLASS 4 /* Socket subclass */ /* KEV_SOCKET_SUBCLASS event codes */ -#define KEV_SOCKET_CLOSED 1 /* completely closed by protocol */ +#define KEV_SOCKET_CLOSED 1 /* completely closed by protocol */ #endif /* PRIVATE */ -#define KEV_INET6_SUBCLASS 6 /* inet6 subclass */ +#define KEV_INET6_SUBCLASS 6 /* inet6 subclass */ /* KEV_INET6_SUBCLASS event codes */ -#define KEV_INET6_NEW_USER_ADDR 1 /* Userland configured IPv6 address */ -#define KEV_INET6_CHANGED_ADDR 2 /* Address changed event (future) */ -#define KEV_INET6_ADDR_DELETED 3 /* IPv6 address was deleted */ -#define KEV_INET6_NEW_LL_ADDR 4 /* Autoconf LL address appeared */ -#define KEV_INET6_NEW_RTADV_ADDR 5 /* Autoconf address has appeared */ -#define KEV_INET6_DEFROUTER 6 /* Default router detected */ -#define KEV_INET6_REQUEST_NAT64_PREFIX 7 /* Asking for the NAT64-prefix */ +#define KEV_INET6_NEW_USER_ADDR 1 /* Userland configured IPv6 address */ +#define KEV_INET6_CHANGED_ADDR 2 /* Address changed event (future) */ +#define KEV_INET6_ADDR_DELETED 3 /* IPv6 address was deleted */ +#define KEV_INET6_NEW_LL_ADDR 4 /* Autoconf LL address appeared */ +#define KEV_INET6_NEW_RTADV_ADDR 5 /* Autoconf address has appeared */ +#define KEV_INET6_DEFROUTER 6 /* Default router detected */ +#define KEV_INET6_REQUEST_NAT64_PREFIX 7 /* Asking for the NAT64-prefix */ #ifdef PRIVATE -#define KEV_ND6_SUBCLASS 7 /* IPv6 NDP subclass */ +#define KEV_ND6_SUBCLASS 7 /* IPv6 NDP subclass */ /* KEV_ND6_SUBCLASS event codes */ -#define KEV_ND6_RA 1 -#define KEV_ND6_NDFAILURE 2 /* IPv6 neighbor cache entry expiry */ -#define KEV_ND6_NDALIVE 3 /* IPv6 neighbor reachable */ -#define KEV_ND6_DAD_FAILURE 4 /* IPv6 address failed DAD */ -#define KEV_ND6_DAD_SUCCESS 5 /* IPv6 address completed DAD */ -#define KEV_ND6_ADDR_DETACHED 6 /* IPv6 address is deemed detached */ -#define KEV_ND6_ADDR_DEPRECATED 7 /* IPv6 address's preferred lifetime expired */ -#define KEV_ND6_ADDR_EXPIRED 8 /* IPv6 address has expired */ -#define KEV_ND6_RTR_EXPIRED 9 /* IPv6 default router has expired */ -#define KEV_ND6_PFX_EXPIRED 10 /* IPv6 prefix has expired */ +#define KEV_ND6_RA 1 +#define KEV_ND6_NDFAILURE 2 /* IPv6 neighbor cache entry expiry */ +#define KEV_ND6_NDALIVE 3 /* IPv6 neighbor reachable */ +#define KEV_ND6_DAD_FAILURE 4 /* IPv6 address failed DAD */ +#define KEV_ND6_DAD_SUCCESS 5 /* IPv6 address completed DAD */ +#define KEV_ND6_ADDR_DETACHED 6 /* IPv6 address is deemed detached */ +#define KEV_ND6_ADDR_DEPRECATED 7 /* IPv6 address's preferred lifetime expired */ +#define KEV_ND6_ADDR_EXPIRED 8 /* IPv6 address has expired */ +#define KEV_ND6_RTR_EXPIRED 9 /* IPv6 default router has expired */ +#define KEV_ND6_PFX_EXPIRED 10 /* IPv6 prefix has expired */ -#define KEV_NECP_SUBCLASS 8 /* NECP subclasss */ +#define KEV_NECP_SUBCLASS 8 /* NECP subclasss */ /* KEV_NECP_SUBCLASS event codes */ -#define KEV_NECP_POLICIES_CHANGED 1 +#define KEV_NECP_POLICIES_CHANGED 1 -#define KEV_NETAGENT_SUBCLASS 9 /* Net-Agent subclass */ +#define KEV_NETAGENT_SUBCLASS 9 /* Net-Agent subclass */ /* Network Agent kernel event codes */ -#define KEV_NETAGENT_REGISTERED 1 -#define KEV_NETAGENT_UNREGISTERED 2 -#define KEV_NETAGENT_UPDATED 3 -#define KEV_NETAGENT_UPDATED_INTERFACES 4 +#define KEV_NETAGENT_REGISTERED 1 +#define KEV_NETAGENT_UNREGISTERED 2 +#define KEV_NETAGENT_UPDATED 3 +#define KEV_NETAGENT_UPDATED_INTERFACES 4 -#define KEV_LOG_SUBCLASS 10 /* Log subclass */ +#define KEV_LOG_SUBCLASS 10 /* Log subclass */ /* KEV_LOG_SUBCLASS event codes */ -#define IPFWLOGEVENT 0 +#define IPFWLOGEVENT 0 -#define KEV_NETEVENT_SUBCLASS 11 /* Generic Net events subclass */ +#define KEV_NETEVENT_SUBCLASS 11 /* Generic Net events subclass */ /* KEV_NETEVENT_SUBCLASS event codes */ -#define KEV_NETEVENT_APNFALLBACK 1 -#define KEV_NETEVENT_CLAT46_EVENT 2 +#define KEV_NETEVENT_APNFALLBACK 1 +#define KEV_NETEVENT_CLAT46_EVENT 2 -#define KEV_MPTCP_SUBCLASS 12 /* Global MPTCP events subclass */ +#define KEV_MPTCP_SUBCLASS 12 /* Global MPTCP events subclass */ /* KEV_MPTCP_SUBCLASS event codes */ -#define KEV_MPTCP_CELLUSE 1 +#define KEV_MPTCP_CELLUSE 1 #endif /* PRIVATE */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ diff --git a/bsd/net/net_osdep.h b/bsd/net/net_osdep.h index 71d87d020..429713f5a 100644 --- a/bsd/net/net_osdep.h +++ b/bsd/net/net_osdep.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -73,7 +73,7 @@ * - ifa_ifwithaf() * bsdi[34], netbsd, and openbsd define it in sys/net/if.c * freebsd (all versions) does not have it. - * + * * - struct rt_addrinfo * bsdi4, netbsd 1.5R and beyond: rti_addrs, rti_info[], rti_flags, rti_ifa, * rti_ifp, and rti_rtm. @@ -263,13 +263,13 @@ struct ifnet; #define HAVE_OLD_BPF -#define ifa_list ifa_link -#define if_addrlist if_addrhead -#define if_list if_link +#define ifa_list ifa_link +#define if_addrlist if_addrhead +#define if_list if_link #define WITH_CONVERT_AND_STRIP_IP_LEN -#if 1 /* at this moment, all OSes do this */ +#if 1 /* at this moment, all OSes do this */ #define WITH_CONVERT_IP_OFF #endif diff --git a/bsd/net/net_perf.c b/bsd/net/net_perf.c index b475644ed..476b111eb 100644 --- a/bsd/net/net_perf.c +++ b/bsd/net/net_perf.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -31,16 +31,18 @@ #include static void ip_perf_record_stats(net_perf_t *npp, struct timeval *tv1, - struct timeval *tv2, uint64_t num_pkts); + struct timeval *tv2, uint64_t num_pkts); static void update_bins(net_perf_t *npp, uint64_t bins); -void net_perf_start_time(net_perf_t *npp, struct timeval *tv) +void +net_perf_start_time(net_perf_t *npp, struct timeval *tv) { #pragma unused(npp) microtime(tv); } -void net_perf_measure_time(net_perf_t *npp, struct timeval *start, uint64_t num_pkts) +void +net_perf_measure_time(net_perf_t *npp, struct timeval *start, uint64_t num_pkts) { struct timeval stop; microtime(&stop); @@ -99,6 +101,5 @@ net_perf_histogram(net_perf_t *npp, uint64_t num_pkts) boolean_t net_perf_validate_bins(uint64_t bins) { - return (NET_PERF_BARS == __builtin_popcountll(bins)); + return NET_PERF_BARS == __builtin_popcountll(bins); } - diff --git a/bsd/net/net_perf.h b/bsd/net/net_perf.h index 7c37356b4..0ddf55117 100644 --- a/bsd/net/net_perf.h +++ b/bsd/net/net_perf.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _NET_NET_PERF_H_ @@ -39,13 +39,13 @@ #define NET_PERF_BARS 4 typedef struct net_perf { - uint64_t np_total_pkts; /* total packets input or output during measurement */ - uint64_t np_total_usecs; /* microseconds elapsed during measurement */ - uint64_t np_hist1; /* histogram bin 1 */ - uint64_t np_hist2; /* histogram bin 2 */ - uint64_t np_hist3; /* histogram bin 3 */ - uint64_t np_hist4; /* histogram bin 4 */ - uint64_t np_hist5; /* histogram bin 5 */ + uint64_t np_total_pkts; /* total packets input or output during measurement */ + uint64_t np_total_usecs; /* microseconds elapsed during measurement */ + uint64_t np_hist1; /* histogram bin 1 */ + uint64_t np_hist2; /* histogram bin 2 */ + uint64_t np_hist3; /* histogram bin 3 */ + uint64_t np_hist4; /* histogram bin 4 */ + uint64_t np_hist5; /* histogram bin 5 */ uint8_t np_hist_bars[NET_PERF_BARS]; } net_perf_t; @@ -59,4 +59,3 @@ boolean_t net_perf_validate_bins(uint64_t bins); #endif /* KERNEL_PRIVATE */ #endif /* _NET_NET_PERF_H_ */ - diff --git a/bsd/net/net_str_id.c b/bsd/net/net_str_id.c index e1ed7e907..26f008ade 100644 --- a/bsd/net/net_str_id.c +++ b/bsd/net/net_str_id.c @@ -2,7 +2,7 @@ * Copyright (c) 2008,2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,14 +41,14 @@ #include "net/net_str_id.h" -#define NET_ID_STR_ENTRY_SIZE(__str) \ +#define NET_ID_STR_ENTRY_SIZE(__str) \ ((size_t)&(((struct net_str_id_entry*)0)->nsi_string[0]) + \ strlen(__str) + 1) -#define FIRST_NET_STR_ID 1000 -static SLIST_HEAD(,net_str_id_entry) net_str_id_list = {NULL}; +#define FIRST_NET_STR_ID 1000 +static SLIST_HEAD(, net_str_id_entry) net_str_id_list = {NULL}; decl_lck_mtx_data(static, net_str_id_lock_data); -static lck_mtx_t *net_str_id_lock = &net_str_id_lock_data; +static lck_mtx_t *net_str_id_lock = &net_str_id_lock_data; static u_int32_t nsi_kind_next[NSI_MAX_KIND] = { FIRST_NET_STR_ID, FIRST_NET_STR_ID, FIRST_NET_STR_ID }; static u_int32_t nsi_next_id = FIRST_NET_STR_ID; @@ -58,22 +58,22 @@ extern int sysctl_if_family_ids SYSCTL_HANDLER_ARGS; SYSCTL_DECL(_net_link_generic_system); SYSCTL_PROC(_net_link_generic_system, OID_AUTO, if_family_ids, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_if_family_ids, "S, if_family_id", "Interface Family ID table"); + 0, 0, sysctl_if_family_ids, "S, if_family_id", "Interface Family ID table"); __private_extern__ void net_str_id_init(void) { - lck_grp_attr_t *grp_attrib = NULL; - lck_attr_t *lck_attrb = NULL; - lck_grp_t *lck_group = NULL; - + lck_grp_attr_t *grp_attrib = NULL; + lck_attr_t *lck_attrb = NULL; + lck_grp_t *lck_group = NULL; + grp_attrib = lck_grp_attr_alloc_init(); lck_group = lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib); lck_grp_attr_free(grp_attrib); lck_attrb = lck_attr_alloc_init(); - + lck_mtx_init(net_str_id_lock, lck_group, lck_attrb); - + lck_grp_free(lck_group); lck_attr_free(lck_attrb); } @@ -84,29 +84,30 @@ net_str_id_first_last(u_int32_t *first, u_int32_t *last, u_int32_t kind) *first = FIRST_NET_STR_ID; switch (kind) { - case NSI_MBUF_TAG: - case NSI_VENDOR_CODE: - case NSI_IF_FAM_ID: - *last = nsi_kind_next[kind] - 1; - break; - default: - *last = FIRST_NET_STR_ID - 1; - break; + case NSI_MBUF_TAG: + case NSI_VENDOR_CODE: + case NSI_IF_FAM_ID: + *last = nsi_kind_next[kind] - 1; + break; + default: + *last = FIRST_NET_STR_ID - 1; + break; } } __private_extern__ errno_t -net_str_id_find_internal(const char *string, u_int32_t *out_id, - u_int32_t kind, int create) +net_str_id_find_internal(const char *string, u_int32_t *out_id, + u_int32_t kind, int create) { - struct net_str_id_entry *entry = NULL; - - - if (string == NULL || out_id == NULL || kind >= NSI_MAX_KIND) + struct net_str_id_entry *entry = NULL; + + + if (string == NULL || out_id == NULL || kind >= NSI_MAX_KIND) { return EINVAL; + } *out_id = 0; - + /* Look for an existing entry */ lck_mtx_lock(net_str_id_lock); SLIST_FOREACH(entry, &net_str_id_list, nsi_next) { @@ -114,19 +115,19 @@ net_str_id_find_internal(const char *string, u_int32_t *out_id, break; } } - + if (entry == NULL) { if (create == 0) { lck_mtx_unlock(net_str_id_lock); return ENOENT; } - + entry = kalloc(NET_ID_STR_ENTRY_SIZE(string)); if (entry == NULL) { lck_mtx_unlock(net_str_id_lock); return ENOMEM; } - + strlcpy(entry->nsi_string, string, strlen(string) + 1); entry->nsi_flags = (1 << kind); entry->nsi_id = nsi_next_id++; @@ -138,19 +139,20 @@ net_str_id_find_internal(const char *string, u_int32_t *out_id, return ENOENT; } entry->nsi_flags |= (1 << kind); - if (entry->nsi_id >= nsi_kind_next[kind]) + if (entry->nsi_id >= nsi_kind_next[kind]) { nsi_kind_next[kind] = entry->nsi_id + 1; + } } lck_mtx_unlock(net_str_id_lock); *out_id = entry->nsi_id; - + return 0; } #define ROUNDUP32(a) \ - ((a) > 0 ? (1 + (((a) - 1) | (sizeof(uint32_t) - 1))) : sizeof(uint32_t)) + ((a) > 0 ? (1 + (((a) - 1) | (sizeof(uint32_t) - 1))) : sizeof(uint32_t)) int sysctl_if_family_ids SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ @@ -158,25 +160,27 @@ sysctl_if_family_ids SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ #pragma unused(oidp) #pragma unused(arg1) #pragma unused(arg2) - errno_t error = 0; + errno_t error = 0; struct net_str_id_entry *entry = NULL; struct if_family_id *iffmid = NULL; size_t max_size = 0; - + lck_mtx_lock(net_str_id_lock); SLIST_FOREACH(entry, &net_str_id_list, nsi_next) { size_t str_size; size_t iffmid_size; - - if ((entry->nsi_flags & (1 << NSI_IF_FAM_ID)) == 0) + + if ((entry->nsi_flags & (1 << NSI_IF_FAM_ID)) == 0) { continue; - + } + str_size = strlen(entry->nsi_string) + 1; iffmid_size = ROUNDUP32(offsetof(struct net_str_id_entry, nsi_string) + str_size); if (iffmid_size > max_size) { - if (iffmid) + if (iffmid) { _FREE(iffmid, M_TEMP); + } iffmid = _MALLOC(iffmid_size, M_TEMP, M_WAITOK); if (iffmid == NULL) { lck_mtx_unlock(net_str_id_lock); @@ -190,17 +194,17 @@ sysctl_if_family_ids SYSCTL_HANDLER_ARGS /* XXX bad syntax! */ iffmid->iffmid_len = iffmid_size; iffmid->iffmid_id = entry->nsi_id; strlcpy(iffmid->iffmid_str, entry->nsi_string, str_size); - error = SYSCTL_OUT(req, iffmid, iffmid_size); - if (error) { + error = SYSCTL_OUT(req, iffmid, iffmid_size); + if (error) { lck_mtx_unlock(net_str_id_lock); goto done; - } - + } } lck_mtx_unlock(net_str_id_lock); - + done: - if (iffmid) + if (iffmid) { _FREE(iffmid, M_TEMP); + } return error; } diff --git a/bsd/net/net_str_id.h b/bsd/net/net_str_id.h index 35deea089..2d4537652 100644 --- a/bsd/net/net_str_id.h +++ b/bsd/net/net_str_id.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,22 +37,22 @@ #include struct net_str_id_entry { - SLIST_ENTRY(net_str_id_entry) nsi_next; - u_int32_t nsi_flags; - u_int32_t nsi_id; - char nsi_string[1]; /* variable lenght string */ + SLIST_ENTRY(net_str_id_entry) nsi_next; + u_int32_t nsi_flags; + u_int32_t nsi_id; + char nsi_string[1]; /* variable lenght string */ }; enum { - NSI_MBUF_TAG = 0, + NSI_MBUF_TAG = 0, NSI_VENDOR_CODE = 1, - NSI_IF_FAM_ID = 2, - NSI_MAX_KIND + NSI_IF_FAM_ID = 2, + NSI_MAX_KIND }; -extern void net_str_id_first_last(u_int32_t * , u_int32_t *, u_int32_t); +extern void net_str_id_first_last(u_int32_t *, u_int32_t *, u_int32_t); -extern errno_t net_str_id_find_internal(const char * , u_int32_t *, u_int32_t, int); +extern errno_t net_str_id_find_internal(const char *, u_int32_t *, u_int32_t, int); extern void net_str_id_init(void); diff --git a/bsd/net/net_stubs.c b/bsd/net/net_stubs.c index 24bc5426f..27c2f1e03 100644 --- a/bsd/net/net_stubs.c +++ b/bsd/net/net_stubs.c @@ -30,12 +30,12 @@ #if !NETWORKING -#define STUB(name) \ - int name(void); \ - int name(void) \ - { \ - panic("stub called in a config with no networking"); \ - return (0); \ +#define STUB(name) \ + int name(void); \ + int name(void) \ + { \ + panic("stub called in a config with no networking"); \ + return (0); \ } STUB(bpf_attach); @@ -462,7 +462,8 @@ STUB(sock_socket_internal); * Called from vm_pageout.c. Nothing to be done when there's no networking. */ void mbuf_drain(boolean_t); -void mbuf_drain(boolean_t) +void +mbuf_drain(boolean_t) { return; } diff --git a/bsd/net/netsrc.c b/bsd/net/netsrc.c index 17f00fead..1b53940ae 100644 --- a/bsd/net/netsrc.c +++ b/bsd/net/netsrc.c @@ -2,7 +2,7 @@ * Copyright (c) 2011-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -72,36 +72,36 @@ netsrc_ctlconnect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo) * We don't need to do anything here. This callback is only necessary * for ctl_register() to succeed. */ - return (0); + return 0; } static errno_t netsrc_reply(kern_ctl_ref kctl, uint32_t unit, uint16_t version, - struct netsrc_rep *reply) + struct netsrc_rep *reply) { switch (version) { - case NETSRC_CURVERS: - return ctl_enqueuedata(kctl, unit, reply, - sizeof(*reply), CTL_DATA_EOR); - case NETSRC_VERSION1: { - if ((reply->nrp_flags & NETSRC_FLAG_ROUTEABLE) == 0) { - return EHOSTUNREACH; - } -#define NETSRC_FLAG_V1_MASK (NETSRC_IP6_FLAG_TENTATIVE | \ - NETSRC_IP6_FLAG_TEMPORARY | \ - NETSRC_IP6_FLAG_DEPRECATED | \ - NETSRC_IP6_FLAG_OPTIMISTIC | \ - NETSRC_IP6_FLAG_SECURED) - struct netsrc_repv1 v1 = { - .nrp_src = reply->nrp_src, - .nrp_flags = (reply->nrp_flags & NETSRC_FLAG_V1_MASK), - .nrp_label = reply->nrp_label, - .nrp_precedence = reply->nrp_precedence, - .nrp_dstlabel = reply->nrp_dstlabel, - .nrp_dstprecedence = reply->nrp_dstprecedence - }; - return ctl_enqueuedata(kctl, unit, &v1, sizeof(v1), CTL_DATA_EOR); + case NETSRC_CURVERS: + return ctl_enqueuedata(kctl, unit, reply, + sizeof(*reply), CTL_DATA_EOR); + case NETSRC_VERSION1: { + if ((reply->nrp_flags & NETSRC_FLAG_ROUTEABLE) == 0) { + return EHOSTUNREACH; } +#define NETSRC_FLAG_V1_MASK (NETSRC_IP6_FLAG_TENTATIVE | \ + NETSRC_IP6_FLAG_TEMPORARY | \ + NETSRC_IP6_FLAG_DEPRECATED | \ + NETSRC_IP6_FLAG_OPTIMISTIC | \ + NETSRC_IP6_FLAG_SECURED) + struct netsrc_repv1 v1 = { + .nrp_src = reply->nrp_src, + .nrp_flags = (reply->nrp_flags & NETSRC_FLAG_V1_MASK), + .nrp_label = reply->nrp_label, + .nrp_precedence = reply->nrp_precedence, + .nrp_dstlabel = reply->nrp_dstlabel, + .nrp_dstprecedence = reply->nrp_dstprecedence + }; + return ctl_enqueuedata(kctl, unit, &v1, sizeof(v1), CTL_DATA_EOR); + } } return EINVAL; } @@ -114,7 +114,7 @@ netsrc_common(struct rtentry *rt, struct netsrc_rep *reply) } // Gather statistics information - struct nstat_counts *rt_stats = rt->rt_stats; + struct nstat_counts *rt_stats = rt->rt_stats; if (rt_stats) { reply->nrp_min_rtt = rt_stats->nstat_min_rtt; reply->nrp_connection_attempts = rt_stats->nstat_connectattempts; @@ -143,8 +143,8 @@ netsrc_common(struct rtentry *rt, struct netsrc_rep *reply) if (rt->rt_flags & RTF_LOCAL) { reply->nrp_flags |= NETSRC_FLAG_DIRECT; } else if (!(rt->rt_flags & RTF_GATEWAY) && - (rt->rt_ifa && rt->rt_ifa->ifa_ifp && - !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) { + (rt->rt_ifa && rt->rt_ifa->ifa_ifp && + !(rt->rt_ifa->ifa_ifp->if_flags & IFF_POINTOPOINT))) { reply->nrp_flags |= NETSRC_FLAG_DIRECT; } } @@ -196,8 +196,8 @@ netsrc_ipv6(kern_ctl_ref kctl, uint32_t unit, struct netsrc_req *request) int error = 0; struct in6_addr storage, *in6 = in6_selectsrc(&request->nrq_sin6, NULL, - NULL, &ro, NULL, &storage, - request->nrq_ifscope, &error); + NULL, &ro, NULL, &storage, + request->nrq_ifscope, &error); struct netsrc_rep reply = { .nrp_sin6.sin6_family = AF_INET6, .nrp_sin6.sin6_len = sizeof(reply.nrp_sin6), @@ -209,10 +209,10 @@ netsrc_ipv6(kern_ctl_ref kctl, uint32_t unit, struct netsrc_req *request) } if (ro.ro_srcia) { struct in6_ifaddr *ia = (struct in6_ifaddr *)ro.ro_srcia; -#define IA_TO_NRP_FLAG(flag) \ - if (ia->ia6_flags & IN6_IFF_##flag) { \ - reply.nrp_flags |= NETSRC_FLAG_IP6_##flag; \ - } +#define IA_TO_NRP_FLAG(flag) \ + if (ia->ia6_flags & IN6_IFF_##flag) { \ + reply.nrp_flags |= NETSRC_FLAG_IP6_##flag; \ + } IA_TO_NRP_FLAG(TENTATIVE); IA_TO_NRP_FLAG(TEMPORARY); IA_TO_NRP_FLAG(DEPRECATED); @@ -235,8 +235,8 @@ netsrc_ipv4(kern_ctl_ref kctl, uint32_t unit, struct netsrc_req *request) // Look up the route lck_mtx_lock(rnh_lock); struct rtentry *rt = rt_lookup(TRUE, &request->nrq_dst.sa, - NULL, rt_tables[AF_INET], - request->nrq_ifscope); + NULL, rt_tables[AF_INET], + request->nrq_ifscope); lck_mtx_unlock(rnh_lock); // Look up the ifa @@ -279,9 +279,9 @@ netsrc_ctlsend(kern_ctl_ref kctl, uint32_t unit, void *uinfo, mbuf_t m, error = EINVAL; goto out; } - if (mbuf_len(m) >= sizeof(*nrq)) + if (mbuf_len(m) >= sizeof(*nrq)) { nrq = mbuf_data(m); - else { + } else { mbuf_copydata(m, 0, sizeof(storage), &storage); nrq = &storage; } @@ -291,8 +291,8 @@ netsrc_ctlsend(kern_ctl_ref kctl, uint32_t unit, void *uinfo, mbuf_t m, } switch (nrq->nrq_sin.sin_family) { case AF_INET: - if (nrq->nrq_sin.sin_len < sizeof (nrq->nrq_sin) || - nrq->nrq_sin.sin_addr.s_addr == INADDR_ANY) { + if (nrq->nrq_sin.sin_len < sizeof(nrq->nrq_sin) || + nrq->nrq_sin.sin_addr.s_addr == INADDR_ANY) { error = EINVAL; } else { error = netsrc_ipv4(kctl, unit, nrq); @@ -300,7 +300,7 @@ netsrc_ctlsend(kern_ctl_ref kctl, uint32_t unit, void *uinfo, mbuf_t m, break; case AF_INET6: if (nrq->nrq_sin6.sin6_len < sizeof(nrq->nrq_sin6) || - IN6_IS_ADDR_UNSPECIFIED(&nrq->nrq_sin6.sin6_addr)) { + IN6_IS_ADDR_UNSPECIFIED(&nrq->nrq_sin6.sin6_addr)) { error = EINVAL; } else { error = netsrc_ipv6(kctl, unit, nrq); @@ -313,8 +313,7 @@ netsrc_ctlsend(kern_ctl_ref kctl, uint32_t unit, void *uinfo, mbuf_t m, out: mbuf_freem(m); - return (error); - + return error; } __private_extern__ void @@ -327,7 +326,7 @@ netsrc_init(void) strlcpy(netsrc_ctl.ctl_name, NETSRC_CTLNAME, sizeof(netsrc_ctl.ctl_name)); - static kern_ctl_ref netsrc_ctlref = NULL; + static kern_ctl_ref netsrc_ctlref = NULL; errno_t error = ctl_register(&netsrc_ctl, &netsrc_ctlref); if (error != 0) { printf("%s: ctl_register failed %d\n", __func__, error); diff --git a/bsd/net/netsrc.h b/bsd/net/netsrc.h index 02d43fa19..6372a523d 100644 --- a/bsd/net/netsrc.h +++ b/bsd/net/netsrc.h @@ -30,11 +30,11 @@ #include -#define NETSRC_CTLNAME "com.apple.netsrc" +#define NETSRC_CTLNAME "com.apple.netsrc" -#define NETSRC_VERSION1 1 -#define NETSRC_VERSION2 2 -#define NETSRC_CURVERS NETSRC_VERSION2 +#define NETSRC_VERSION1 1 +#define NETSRC_VERSION2 2 +#define NETSRC_CURVERS NETSRC_VERSION2 struct netsrc_req { unsigned int nrq_ver; @@ -45,25 +45,25 @@ struct netsrc_req { }; }; -#define nrq_sin _usa.sin -#define nrq_sin6 _usa.sin6 +#define nrq_sin _usa.sin +#define nrq_sin6 _usa.sin6 struct netsrc_repv1 { union { union sockaddr_in_4_6 nrp_src; union sockaddr_in_4_6 _usa; }; -#define NETSRC_IP6_FLAG_TENTATIVE 0x0001 -#define NETSRC_IP6_FLAG_TEMPORARY 0x0002 -#define NETSRC_IP6_FLAG_DEPRECATED 0x0004 -#define NETSRC_IP6_FLAG_OPTIMISTIC 0x0008 -#define NETSRC_IP6_FLAG_SECURED 0x0010 +#define NETSRC_IP6_FLAG_TENTATIVE 0x0001 +#define NETSRC_IP6_FLAG_TEMPORARY 0x0002 +#define NETSRC_IP6_FLAG_DEPRECATED 0x0004 +#define NETSRC_IP6_FLAG_OPTIMISTIC 0x0008 +#define NETSRC_IP6_FLAG_SECURED 0x0010 uint16_t nrp_flags; uint16_t nrp_label; uint16_t nrp_precedence; uint16_t nrp_dstlabel; uint16_t nrp_dstprecedence; - uint16_t nrp_unused; // Padding + uint16_t nrp_unused; // Padding }; struct netsrc_repv2 { @@ -75,16 +75,16 @@ struct netsrc_repv2 { uint32_t nrp_connection_attempts; uint32_t nrp_connection_successes; // Continues from above, fixes naming -#define NETSRC_FLAG_IP6_TENTATIVE NETSRC_IP6_FLAG_TENTATIVE -#define NETSRC_FLAG_IP6_TEMPORARY NETSRC_IP6_FLAG_TEMPORARY -#define NETSRC_FLAG_IP6_DEPRECATED NETSRC_IP6_FLAG_DEPRECATED -#define NETSRC_FLAG_IP6_OPTIMISTIC NETSRC_IP6_FLAG_OPTIMISTIC -#define NETSRC_FLAG_IP6_SECURED NETSRC_IP6_FLAG_SECURED -#define NETSRC_FLAG_ROUTEABLE 0x00000020 -#define NETSRC_FLAG_DIRECT 0x00000040 -#define NETSRC_FLAG_AWDL 0x00000080 -#define NETSRC_FLAG_IP6_DYNAMIC 0x00000100 -#define NETSRC_FLAG_IP6_AUTOCONF 0x00000200 +#define NETSRC_FLAG_IP6_TENTATIVE NETSRC_IP6_FLAG_TENTATIVE +#define NETSRC_FLAG_IP6_TEMPORARY NETSRC_IP6_FLAG_TEMPORARY +#define NETSRC_FLAG_IP6_DEPRECATED NETSRC_IP6_FLAG_DEPRECATED +#define NETSRC_FLAG_IP6_OPTIMISTIC NETSRC_IP6_FLAG_OPTIMISTIC +#define NETSRC_FLAG_IP6_SECURED NETSRC_IP6_FLAG_SECURED +#define NETSRC_FLAG_ROUTEABLE 0x00000020 +#define NETSRC_FLAG_DIRECT 0x00000040 +#define NETSRC_FLAG_AWDL 0x00000080 +#define NETSRC_FLAG_IP6_DYNAMIC 0x00000100 +#define NETSRC_FLAG_IP6_AUTOCONF 0x00000200 uint32_t nrp_flags; uint16_t nrp_label; uint16_t nrp_precedence; @@ -96,8 +96,8 @@ struct netsrc_repv2 { #define netsrc_rep netsrc_repv2 -#define nrp_sin nrp_src.sin -#define nrp_sin6 nrp_src.sin6 +#define nrp_sin nrp_src.sin +#define nrp_sin6 nrp_src.sin6 #ifdef KERNEL_PRIVATE __private_extern__ void netsrc_init(void); diff --git a/bsd/net/network_agent.c b/bsd/net/network_agent.c index a52cd6506..d54016311 100644 --- a/bsd/net/network_agent.c +++ b/bsd/net/network_agent.c @@ -51,21 +51,21 @@ SYSCTL_NODE(_net, OID_AUTO, netagent, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "NetworkAg SYSCTL_INT(_net_netagent, OID_AUTO, debug, CTLFLAG_LOCKED | CTLFLAG_RW, &netagent_debug, 0, ""); static int netagent_registered_count = 0; -SYSCTL_INT(_net_netagent, OID_AUTO, registered_count , CTLFLAG_RD | CTLFLAG_LOCKED, - &netagent_registered_count, 0, ""); +SYSCTL_INT(_net_netagent, OID_AUTO, registered_count, CTLFLAG_RD | CTLFLAG_LOCKED, + &netagent_registered_count, 0, ""); static int netagent_active_count = 0; -SYSCTL_INT(_net_netagent, OID_AUTO, active_count , CTLFLAG_RD | CTLFLAG_LOCKED, - &netagent_active_count, 0, ""); +SYSCTL_INT(_net_netagent, OID_AUTO, active_count, CTLFLAG_RD | CTLFLAG_LOCKED, + &netagent_active_count, 0, ""); -#define NETAGENTLOG(level, format, ...) do { \ - if (level <= netagent_debug) \ - log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ +#define NETAGENTLOG(level, format, ...) do { \ + if (level <= netagent_debug) \ + log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: " format "\n", __FUNCTION__, __VA_ARGS__); \ } while (0) -#define NETAGENTLOG0(level, msg) do { \ - if (level <= netagent_debug) \ - log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: %s\n", __FUNCTION__, msg); \ +#define NETAGENTLOG0(level, msg) do { \ + if (level <= netagent_debug) \ + log((level > LOG_NOTICE ? LOG_NOTICE : level), "%s: %s\n", __FUNCTION__, msg); \ } while (0) struct netagent_client { @@ -96,8 +96,8 @@ struct netagent_session { }; typedef enum { - kNetagentErrorDomainPOSIX = 0, - kNetagentErrorDomainUserDefined = 1, + kNetagentErrorDomainPOSIX = 0, + kNetagentErrorDomainUserDefined = 1, } netagent_error_domain_t; static LIST_HEAD(_netagent_list, netagent_wrapper) master_netagent_list; @@ -105,25 +105,25 @@ static LIST_HEAD(_netagent_list, netagent_wrapper) master_netagent_list; // Protected by netagent_lock static u_int32_t g_next_generation = 1; -static kern_ctl_ref netagent_kctlref; -static u_int32_t netagent_family; -static OSMallocTag netagent_malloc_tag; -static lck_grp_attr_t *netagent_grp_attr = NULL; -static lck_attr_t *netagent_mtx_attr = NULL; -static lck_grp_t *netagent_mtx_grp = NULL; +static kern_ctl_ref netagent_kctlref; +static u_int32_t netagent_family; +static OSMallocTag netagent_malloc_tag; +static lck_grp_attr_t *netagent_grp_attr = NULL; +static lck_attr_t *netagent_mtx_attr = NULL; +static lck_grp_t *netagent_mtx_grp = NULL; decl_lck_rw_data(static, netagent_lock); static errno_t netagent_register_control(void); static errno_t netagent_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo); + void **unitinfo); static errno_t netagent_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo); static errno_t netagent_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - mbuf_t m, int flags); + mbuf_t m, int flags); static void netagent_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flags); static errno_t netagent_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t *len); + int opt, void *data, size_t *len); static errno_t netagent_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t len); + int opt, void *data, size_t len); static int netagent_send_ctl_data(u_int32_t control_unit, u_int8_t *buffer, size_t buffer_size); @@ -132,34 +132,34 @@ static void netagent_delete_session(struct netagent_session *session); // Register static void netagent_handle_register_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + u_int32_t payload_length, mbuf_t packet, int offset); static errno_t netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + u_int32_t payload_length); // Unregister static void netagent_handle_unregister_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + u_int32_t payload_length, mbuf_t packet, int offset); static errno_t netagent_handle_unregister_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + u_int32_t payload_length); // Update static void netagent_handle_update_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + u_int32_t payload_length, mbuf_t packet, int offset); static errno_t netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + u_int32_t payload_length); // Assign nexus static void netagent_handle_assign_nexus_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + u_int32_t payload_length, mbuf_t packet, int offset); static errno_t netagent_handle_assign_nexus_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length); + u_int32_t payload_length); // Set/get assert count static errno_t netagent_handle_use_count_setopt(struct netagent_session *session, u_int8_t *payload, size_t payload_length); static errno_t netagent_handle_use_count_getopt(struct netagent_session *session, u_int8_t *buffer, size_t *buffer_length); static void netagent_handle_get(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset); + u_int32_t payload_length, mbuf_t packet, int offset); static struct netagent_wrapper *netagent_find_agent_with_uuid(uuid_t uuid); @@ -217,14 +217,14 @@ done: netagent_kctlref = NULL; } } - return (result); + return result; } static errno_t netagent_register_control(void) { - struct kern_ctl_reg kern_ctl; - errno_t result = 0; + struct kern_ctl_reg kern_ctl; + errno_t result = 0; // Create a tag to allocate memory netagent_malloc_tag = OSMalloc_Tagalloc(NETAGENT_CONTROL_NAME, OSMT_DEFAULT); @@ -233,7 +233,7 @@ netagent_register_control(void) result = mbuf_tag_id_find(NETAGENT_CONTROL_NAME, &netagent_family); if (result != 0) { NETAGENTLOG(LOG_ERR, "mbuf_tag_id_find_internal failed: %d", result); - return (result); + return result; } bzero(&kern_ctl, sizeof(kern_ctl)); @@ -252,10 +252,10 @@ netagent_register_control(void) result = ctl_register(&kern_ctl, &netagent_kctlref); if (result != 0) { NETAGENTLOG(LOG_ERR, "ctl_register failed: %d", result); - return (result); + return result; } - return (0); + return 0; } static errno_t @@ -265,10 +265,10 @@ netagent_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, void **unit *unitinfo = netagent_create_session(sac->sc_unit); if (*unitinfo == NULL) { // Could not allocate session - return (ENOBUFS); + return ENOBUFS; } - return (0); + return 0; } static errno_t @@ -280,7 +280,7 @@ netagent_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo) netagent_delete_session(session); } - return (0); + return 0; } // Kernel events @@ -296,13 +296,13 @@ netagent_post_event(uuid_t agent_uuid, u_int32_t event_code, bool update_necp) struct kev_netagent_data event_data; - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_NETAGENT_SUBCLASS; - ev_msg.event_code = event_code; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_NETAGENT_SUBCLASS; + ev_msg.event_code = event_code; uuid_copy(event_data.netagent_uuid, agent_uuid); - ev_msg.dv[0].data_ptr = &event_data; + ev_msg.dv[0].data_ptr = &event_data; ev_msg.dv[0].data_length = sizeof(event_data); kev_post_msg(&ev_msg); @@ -311,21 +311,21 @@ netagent_post_event(uuid_t agent_uuid, u_int32_t event_code, bool update_necp) // Message handling static u_int8_t * netagent_buffer_write_message_header(u_int8_t *buffer, u_int8_t message_type, u_int8_t flags, - u_int32_t message_id, u_int32_t error, u_int32_t payload_length) + u_int32_t message_id, u_int32_t error, u_int32_t payload_length) { ((struct netagent_message_header *)(void *)buffer)->message_type = message_type; ((struct netagent_message_header *)(void *)buffer)->message_flags = flags; ((struct netagent_message_header *)(void *)buffer)->message_id = message_id; ((struct netagent_message_header *)(void *)buffer)->message_error = error; ((struct netagent_message_header *)(void *)buffer)->message_payload_length = payload_length; - return (buffer + sizeof(struct netagent_message_header)); + return buffer + sizeof(struct netagent_message_header); } static int netagent_send_ctl_data(u_int32_t control_unit, u_int8_t *buffer, size_t buffer_size) { if (netagent_kctlref == NULL || control_unit == 0 || buffer == NULL || buffer_size == 0) { - return (EINVAL); + return EINVAL; } return ctl_enqueuedata(netagent_kctlref, control_unit, buffer, buffer_size, CTL_DATA_EOR); @@ -341,7 +341,7 @@ netagent_send_trigger(struct netagent_wrapper *wrapper, struct proc *p, u_int32_ MALLOC(trigger, u_int8_t *, trigger_size, M_NETAGENT, M_WAITOK); if (trigger == NULL) { - return (ENOMEM); + return ENOMEM; } (void)netagent_buffer_write_message_header(trigger, trigger_type, 0, 0, 0, sizeof(struct netagent_trigger_message)); @@ -361,7 +361,7 @@ netagent_send_trigger(struct netagent_wrapper *wrapper, struct proc *p, u_int32_ } FREE(trigger, M_NETAGENT); - return (error); + return error; } static int @@ -374,7 +374,7 @@ netagent_send_client_message(struct netagent_wrapper *wrapper, uuid_t client_id, MALLOC(message, u_int8_t *, message_size, M_NETAGENT, M_WAITOK); if (message == NULL) { - return (ENOMEM); + return ENOMEM; } (void)netagent_buffer_write_message_header(message, message_type, 0, 0, 0, sizeof(struct netagent_client_message)); @@ -387,7 +387,7 @@ netagent_send_client_message(struct netagent_wrapper *wrapper, uuid_t client_id, } FREE(message, M_NETAGENT); - return (error); + return error; } static int @@ -398,7 +398,7 @@ netagent_send_success_response(struct netagent_session *session, u_int8_t messag size_t response_size = sizeof(struct netagent_message_header); MALLOC(response, u_int8_t *, response_size, M_NETAGENT, M_WAITOK); if (response == NULL) { - return (ENOMEM); + return ENOMEM; } (void)netagent_buffer_write_message_header(response, message_type, NETAGENT_MESSAGE_FLAGS_RESPONSE, message_id, 0, 0); @@ -407,12 +407,12 @@ netagent_send_success_response(struct netagent_session *session, u_int8_t messag } FREE(response, M_NETAGENT); - return (error); + return error; } static int netagent_send_error_response(struct netagent_session *session, u_int8_t message_type, - u_int32_t message_id, u_int32_t error_code) + u_int32_t message_id, u_int32_t error_code) { int error = 0; u_int8_t *response = NULL; @@ -420,22 +420,22 @@ netagent_send_error_response(struct netagent_session *session, u_int8_t message_ if (session == NULL) { NETAGENTLOG0(LOG_ERR, "Got a NULL session"); - return (EINVAL); + return EINVAL; } MALLOC(response, u_int8_t *, response_size, M_NETAGENT, M_WAITOK); if (response == NULL) { - return (ENOMEM); + return ENOMEM; } (void)netagent_buffer_write_message_header(response, message_type, NETAGENT_MESSAGE_FLAGS_RESPONSE, - message_id, error_code, 0); + message_id, error_code, 0); if ((error = netagent_send_ctl_data(session->control_unit, (u_int8_t *)response, response_size))) { NETAGENTLOG0(LOG_ERR, "Failed to send response"); } FREE(response, M_NETAGENT); - return (error); + return error; } static errno_t @@ -454,7 +454,7 @@ netagent_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t p if (mbuf_pkthdr_len(packet) < sizeof(header)) { NETAGENTLOG(LOG_ERR, "Got a bad packet, length (%lu) < sizeof header (%lu)", - mbuf_pkthdr_len(packet), sizeof(header)); + mbuf_pkthdr_len(packet), sizeof(header)); error = EINVAL; goto done; } @@ -467,50 +467,50 @@ netagent_ctl_send(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, mbuf_t p } switch (header.message_type) { - case NETAGENT_MESSAGE_TYPE_REGISTER: { - netagent_handle_register_message(session, header.message_id, header.message_payload_length, - packet, sizeof(header)); - break; - } - case NETAGENT_MESSAGE_TYPE_UNREGISTER: { - netagent_handle_unregister_message(session, header.message_id, header.message_payload_length, - packet, sizeof(header)); - break; - } - case NETAGENT_MESSAGE_TYPE_UPDATE: { - netagent_handle_update_message(session, header.message_id, header.message_payload_length, - packet, sizeof(header)); - break; - } - case NETAGENT_MESSAGE_TYPE_GET: { - netagent_handle_get(session, header.message_id, header.message_payload_length, - packet, sizeof(header)); - break; - } - case NETAGENT_MESSAGE_TYPE_ASSERT: { - NETAGENTLOG0(LOG_ERR, "NETAGENT_MESSAGE_TYPE_ASSERT no longer supported"); - break; - } - case NETAGENT_MESSAGE_TYPE_UNASSERT: { - NETAGENTLOG0(LOG_ERR, "NETAGENT_MESSAGE_TYPE_UNASSERT no longer supported"); - break; - } - case NETAGENT_MESSAGE_TYPE_ASSIGN_NEXUS: { - netagent_handle_assign_nexus_message(session, header.message_id, header.message_payload_length, - packet, sizeof(header)); - break; - } - default: { - NETAGENTLOG(LOG_ERR, "Received unknown message type %d", header.message_type); - netagent_send_error_response(session, header.message_type, header.message_id, - NETAGENT_MESSAGE_ERROR_UNKNOWN_TYPE); - break; - } + case NETAGENT_MESSAGE_TYPE_REGISTER: { + netagent_handle_register_message(session, header.message_id, header.message_payload_length, + packet, sizeof(header)); + break; + } + case NETAGENT_MESSAGE_TYPE_UNREGISTER: { + netagent_handle_unregister_message(session, header.message_id, header.message_payload_length, + packet, sizeof(header)); + break; + } + case NETAGENT_MESSAGE_TYPE_UPDATE: { + netagent_handle_update_message(session, header.message_id, header.message_payload_length, + packet, sizeof(header)); + break; + } + case NETAGENT_MESSAGE_TYPE_GET: { + netagent_handle_get(session, header.message_id, header.message_payload_length, + packet, sizeof(header)); + break; + } + case NETAGENT_MESSAGE_TYPE_ASSERT: { + NETAGENTLOG0(LOG_ERR, "NETAGENT_MESSAGE_TYPE_ASSERT no longer supported"); + break; + } + case NETAGENT_MESSAGE_TYPE_UNASSERT: { + NETAGENTLOG0(LOG_ERR, "NETAGENT_MESSAGE_TYPE_UNASSERT no longer supported"); + break; + } + case NETAGENT_MESSAGE_TYPE_ASSIGN_NEXUS: { + netagent_handle_assign_nexus_message(session, header.message_id, header.message_payload_length, + packet, sizeof(header)); + break; + } + default: { + NETAGENTLOG(LOG_ERR, "Received unknown message type %d", header.message_type); + netagent_send_error_response(session, header.message_type, header.message_id, + NETAGENT_MESSAGE_ERROR_UNKNOWN_TYPE); + break; + } } done: mbuf_freem(packet); - return (error); + return error; } static void @@ -522,7 +522,7 @@ netagent_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int flag static errno_t netagent_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, - void *data, size_t *len) + void *data, size_t *len) { #pragma unused(kctlref, unit) struct netagent_session *session = (struct netagent_session *)unitinfo; @@ -535,24 +535,24 @@ netagent_ctl_getopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int op } switch (opt) { - case NETAGENT_OPTION_TYPE_USE_COUNT: { - NETAGENTLOG0(LOG_DEBUG, "Request to get use count"); - error = netagent_handle_use_count_getopt(session, data, len); - } - break; - default: - NETAGENTLOG0(LOG_ERR, "Received unknown option"); - error = ENOPROTOOPT; + case NETAGENT_OPTION_TYPE_USE_COUNT: { + NETAGENTLOG0(LOG_DEBUG, "Request to get use count"); + error = netagent_handle_use_count_getopt(session, data, len); + } + break; + default: + NETAGENTLOG0(LOG_ERR, "Received unknown option"); + error = ENOPROTOOPT; break; } done: - return (error); + return error; } static errno_t netagent_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int opt, - void *data, size_t len) + void *data, size_t len) { #pragma unused(kctlref, unit) struct netagent_session *session = (struct netagent_session *)unitinfo; @@ -565,39 +565,39 @@ netagent_ctl_setopt(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, int op } switch (opt) { - case NETAGENT_OPTION_TYPE_REGISTER: { - NETAGENTLOG0(LOG_DEBUG, "Request for registration"); - error = netagent_handle_register_setopt(session, data, len); - } - break; - case NETAGENT_OPTION_TYPE_UPDATE: { - NETAGENTLOG0(LOG_DEBUG, "Request for update"); - error = netagent_handle_update_setopt(session, data, len); - } - break; - case NETAGENT_OPTION_TYPE_UNREGISTER: { - NETAGENTLOG0(LOG_DEBUG, "Request for unregistration"); - error = netagent_handle_unregister_setopt(session, data, len); - } - break; - case NETAGENT_OPTION_TYPE_ASSIGN_NEXUS: { - NETAGENTLOG0(LOG_DEBUG, "Request for assigning nexus"); - error = netagent_handle_assign_nexus_setopt(session, data, len); - } - break; - case NETAGENT_OPTION_TYPE_USE_COUNT: { - NETAGENTLOG0(LOG_DEBUG, "Request to set use count"); - error = netagent_handle_use_count_setopt(session, data, len); - } - break; - default: - NETAGENTLOG0(LOG_ERR, "Received unknown option"); - error = ENOPROTOOPT; + case NETAGENT_OPTION_TYPE_REGISTER: { + NETAGENTLOG0(LOG_DEBUG, "Request for registration"); + error = netagent_handle_register_setopt(session, data, len); + } + break; + case NETAGENT_OPTION_TYPE_UPDATE: { + NETAGENTLOG0(LOG_DEBUG, "Request for update"); + error = netagent_handle_update_setopt(session, data, len); + } + break; + case NETAGENT_OPTION_TYPE_UNREGISTER: { + NETAGENTLOG0(LOG_DEBUG, "Request for unregistration"); + error = netagent_handle_unregister_setopt(session, data, len); + } + break; + case NETAGENT_OPTION_TYPE_ASSIGN_NEXUS: { + NETAGENTLOG0(LOG_DEBUG, "Request for assigning nexus"); + error = netagent_handle_assign_nexus_setopt(session, data, len); + } + break; + case NETAGENT_OPTION_TYPE_USE_COUNT: { + NETAGENTLOG0(LOG_DEBUG, "Request to set use count"); + error = netagent_handle_use_count_setopt(session, data, len); + } + break; + default: + NETAGENTLOG0(LOG_ERR, "Received unknown option"); + error = ENOPROTOOPT; break; } done: - return (error); + return error; } // Session Management @@ -615,10 +615,11 @@ netagent_create_session(u_int32_t control_unit) new_session->control_unit = control_unit; new_session->wrapper = NULL; done: - return (new_session); + return new_session; } -netagent_session_t netagent_create(netagent_event_f event_handler, void *context) +netagent_session_t +netagent_create(netagent_event_f event_handler, void *context) { struct netagent_session *session = netagent_create_session(0); if (session == NULL) { @@ -659,7 +660,7 @@ netagent_unregister_session_wrapper(struct netagent_session *session) netagent_registered_count--; } if ((session->wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE) && - netagent_active_count > 0) { + netagent_active_count > 0) { netagent_active_count--; } @@ -690,7 +691,8 @@ netagent_delete_session(struct netagent_session *session) } } -void netagent_destroy(netagent_session_t session) +void +netagent_destroy(netagent_session_t session) { return netagent_delete_session((struct netagent_session *)session); } @@ -708,10 +710,10 @@ netagent_packet_get_netagent_data_size(mbuf_t packet, int offset, int *err) error = mbuf_copydata(packet, offset, sizeof(netagent_peek), &netagent_peek); if (error) { *err = ENOENT; - return (-1); + return -1; } - return (netagent_peek.netagent_data_size); + return netagent_peek.netagent_data_size; } static errno_t @@ -764,7 +766,7 @@ netagent_register(netagent_session_t _session, struct netagent *agent) data_size = agent->netagent_data_size; if (data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %d", - data_size); + data_size); return EINVAL; } @@ -791,7 +793,7 @@ netagent_register(netagent_session_t _session, struct netagent *agent) static errno_t netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length) + u_int32_t payload_length) { int data_size = 0; struct netagent_wrapper *new_wrapper = NULL; @@ -818,7 +820,7 @@ netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payl if (payload_length < sizeof(struct netagent)) { NETAGENTLOG(LOG_ERR, "Register message size too small for agent: (%u < %lu)", - payload_length, sizeof(struct netagent)); + payload_length, sizeof(struct netagent)); response_error = EINVAL; goto done; } @@ -834,7 +836,7 @@ netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payl NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%lu != %u)", (sizeof(struct netagent) + data_size), payload_length); response_error = EINVAL; goto done; - } + } MALLOC(new_wrapper, struct netagent_wrapper *, sizeof(*new_wrapper) + data_size, M_NETAGENT, M_WAITOK); if (new_wrapper == NULL) { @@ -861,7 +863,7 @@ done: static void netagent_handle_register_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + u_int32_t payload_length, mbuf_t packet, int offset) { int error; int data_size = 0; @@ -884,7 +886,7 @@ netagent_handle_register_message(struct netagent_session *session, u_int32_t mes if (payload_length < sizeof(struct netagent)) { NETAGENTLOG(LOG_ERR, "Register message size too small for agent: (%u < %lu)", - payload_length, sizeof(struct netagent)); + payload_length, sizeof(struct netagent)); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; } @@ -892,7 +894,7 @@ netagent_handle_register_message(struct netagent_session *session, u_int32_t mes data_size = netagent_packet_get_netagent_data_size(packet, offset, &error); if (error || data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { NETAGENTLOG(LOG_ERR, "Register message size could not be read, error %d data_size %d", - error, data_size); + error, data_size); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; } @@ -907,7 +909,7 @@ netagent_handle_register_message(struct netagent_session *session, u_int32_t mes memset(new_wrapper, 0, sizeof(*new_wrapper) + data_size); error = mbuf_copydata(packet, offset, sizeof(struct netagent) + data_size, - &new_wrapper->netagent); + &new_wrapper->netagent); if (error) { NETAGENTLOG(LOG_ERR, "Failed to read data into agent structure: %d", error); FREE(new_wrapper, M_NETAGENT); @@ -940,7 +942,7 @@ netagent_unregister(netagent_session_t _session) static errno_t netagent_handle_unregister_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length) + u_int32_t payload_length) { #pragma unused(payload, payload_length) u_int32_t response_error = 0; @@ -959,7 +961,7 @@ done: static void netagent_handle_unregister_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + u_int32_t payload_length, mbuf_t packet, int offset) { #pragma unused(payload_length, packet, offset) u_int32_t response_error = NETAGENT_MESSAGE_ERROR_INTERNAL; @@ -980,7 +982,7 @@ fail: static void netagent_send_cellular_failed_event(struct netagent_wrapper *wrapper, - pid_t pid, uuid_t proc_uuid) + pid_t pid, uuid_t proc_uuid) { if (strncmp(wrapper->netagent.netagent_domain, "Cellular", NETAGENT_DOMAINSIZE) != 0) { return; @@ -1010,23 +1012,23 @@ netagent_handle_update_inner(struct netagent_session *session, struct netagent_w lck_rw_lock_exclusive(&netagent_lock); if (uuid_compare(session->wrapper->netagent.netagent_uuid, new_wrapper->netagent.netagent_uuid) != 0 || - memcmp(&session->wrapper->netagent.netagent_domain, &new_wrapper->netagent.netagent_domain, - sizeof(new_wrapper->netagent.netagent_domain)) != 0 || - memcmp(&session->wrapper->netagent.netagent_type, &new_wrapper->netagent.netagent_type, - sizeof(new_wrapper->netagent.netagent_type)) != 0) { - lck_rw_done(&netagent_lock); - NETAGENTLOG0(LOG_ERR, "Basic agent parameters do not match, cannot update"); - if (error_domain == kNetagentErrorDomainPOSIX) { - response_error = EINVAL; - } else if (error_domain == kNetagentErrorDomainUserDefined) { - response_error = NETAGENT_MESSAGE_ERROR_CANNOT_UPDATE; - } - return response_error; + memcmp(&session->wrapper->netagent.netagent_domain, &new_wrapper->netagent.netagent_domain, + sizeof(new_wrapper->netagent.netagent_domain)) != 0 || + memcmp(&session->wrapper->netagent.netagent_type, &new_wrapper->netagent.netagent_type, + sizeof(new_wrapper->netagent.netagent_type)) != 0) { + lck_rw_done(&netagent_lock); + NETAGENTLOG0(LOG_ERR, "Basic agent parameters do not match, cannot update"); + if (error_domain == kNetagentErrorDomainPOSIX) { + response_error = EINVAL; + } else if (error_domain == kNetagentErrorDomainUserDefined) { + response_error = NETAGENT_MESSAGE_ERROR_CANNOT_UPDATE; } + return response_error; + } new_wrapper->netagent.netagent_flags |= NETAGENT_FLAG_REGISTERED; if (session->wrapper->netagent.netagent_data_size == new_wrapper->netagent.netagent_data_size && - memcmp(&session->wrapper->netagent, &new_wrapper->netagent, sizeof(struct netagent) + data_size) == 0) { + memcmp(&session->wrapper->netagent, &new_wrapper->netagent, sizeof(struct netagent) + data_size) == 0) { // Agent is exactly identical, don't increment the generation count // Make a copy of the list of pending clients, and clear the current list @@ -1058,11 +1060,11 @@ netagent_handle_update_inner(struct netagent_session *session, struct netagent_w new_wrapper->use_count = session->wrapper->use_count; if ((new_wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE) && - !(session->wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE)) { + !(session->wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE)) { netagent_active_count++; } else if (!(new_wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE) && - (session->wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE) && - netagent_active_count > 0) { + (session->wrapper->netagent.netagent_flags & NETAGENT_FLAG_ACTIVE) && + netagent_active_count > 0) { netagent_active_count--; } @@ -1165,7 +1167,7 @@ netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payloa if (payload_length < sizeof(struct netagent)) { NETAGENTLOG(LOG_ERR, "Update message size too small for agent: (%u < %lu)", - payload_length, sizeof(struct netagent)); + payload_length, sizeof(struct netagent)); response_error = EINVAL; goto done; } @@ -1181,7 +1183,7 @@ netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payloa NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%lu != %u)", (sizeof(struct netagent) + data_size), payload_length); response_error = EINVAL; goto done; - } + } MALLOC(new_wrapper, struct netagent_wrapper *, sizeof(*new_wrapper) + data_size, M_NETAGENT, M_WAITOK); if (new_wrapper == NULL) { @@ -1210,7 +1212,7 @@ done: static void netagent_handle_update_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + u_int32_t payload_length, mbuf_t packet, int offset) { int error; int data_size = 0; @@ -1232,7 +1234,7 @@ netagent_handle_update_message(struct netagent_session *session, u_int32_t messa if (payload_length < sizeof(struct netagent)) { NETAGENTLOG(LOG_ERR, "Update message size too small for agent: (%u < %lu)", - payload_length, sizeof(struct netagent)); + payload_length, sizeof(struct netagent)); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; } @@ -1240,7 +1242,7 @@ netagent_handle_update_message(struct netagent_session *session, u_int32_t messa data_size = netagent_packet_get_netagent_data_size(packet, offset, &error); if (error || data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) { NETAGENTLOG(LOG_ERR, "Update message size could not be read, error %d data_size %d", - error, data_size); + error, data_size); response_error = NETAGENT_MESSAGE_ERROR_INVALID_DATA; goto fail; } @@ -1262,7 +1264,7 @@ netagent_handle_update_message(struct netagent_session *session, u_int32_t messa goto fail; } - response_error = netagent_handle_update_inner(session, new_wrapper, data_size, &agent_changed , kNetagentErrorDomainUserDefined); + response_error = netagent_handle_update_inner(session, new_wrapper, data_size, &agent_changed, kNetagentErrorDomainUserDefined); if (response_error != 0) { FREE(new_wrapper, M_NETAGENT); goto fail; @@ -1283,7 +1285,7 @@ fail: static void netagent_handle_get(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + u_int32_t payload_length, mbuf_t packet, int offset) { #pragma unused(payload_length, packet, offset) u_int8_t *response = NULL; @@ -1305,7 +1307,7 @@ netagent_handle_get(struct netagent_session *session, u_int32_t message_id, lck_rw_lock_shared(&netagent_lock); size_t response_size = sizeof(struct netagent_message_header) + sizeof(session->wrapper->netagent) - + session->wrapper->netagent.netagent_data_size; + + session->wrapper->netagent.netagent_data_size; MALLOC(response, u_int8_t *, response_size, M_NETAGENT, M_WAITOK); if (response == NULL) { goto fail; @@ -1313,10 +1315,10 @@ netagent_handle_get(struct netagent_session *session, u_int32_t message_id, cursor = response; cursor = netagent_buffer_write_message_header(cursor, NETAGENT_MESSAGE_TYPE_GET, - NETAGENT_MESSAGE_FLAGS_RESPONSE, message_id, 0, - response_size - sizeof(struct netagent_message_header)); + NETAGENT_MESSAGE_FLAGS_RESPONSE, message_id, 0, + response_size - sizeof(struct netagent_message_header)); memcpy(cursor, &session->wrapper->netagent, sizeof(session->wrapper->netagent) + - session->wrapper->netagent.netagent_data_size); + session->wrapper->netagent.netagent_data_size); lck_rw_done(&netagent_lock); @@ -1331,7 +1333,7 @@ fail: errno_t netagent_assign_nexus(netagent_session_t _session, uuid_t necp_client_uuid, - void *assign_message, size_t assigned_results_length) + void *assign_message, size_t assigned_results_length) { struct netagent_session *session = (struct netagent_session *)_session; if (session == NULL) { @@ -1366,23 +1368,23 @@ netagent_update_flow_protoctl_event(netagent_session_t _session, if (session == NULL) { NETAGENTLOG0(LOG_ERR, "Cannot assign nexus from NULL session"); - return (EINVAL); + return EINVAL; } if (session->wrapper == NULL) { NETAGENTLOG0(LOG_ERR, "Session has no agent"); - return (ENOENT); + return ENOENT; } error = necp_update_flow_protoctl_event(session->wrapper->netagent.netagent_uuid, client_id, protoctl_event_code, protoctl_event_val, protoctl_event_tcp_seq_number); - return (error); + return error; } static errno_t netagent_handle_assign_nexus_setopt(struct netagent_session *session, u_int8_t *payload, - u_int32_t payload_length) + u_int32_t payload_length) { errno_t response_error = 0; struct netagent_assign_nexus_message *assign_nexus_netagent = (struct netagent_assign_nexus_message *)(void *)payload; @@ -1445,7 +1447,7 @@ done: static void netagent_handle_assign_nexus_message(struct netagent_session *session, u_int32_t message_id, - u_int32_t payload_length, mbuf_t packet, int offset) + u_int32_t payload_length, mbuf_t packet, int offset) { int error = 0; u_int32_t response_error = NETAGENT_MESSAGE_ERROR_INTERNAL; @@ -1606,11 +1608,11 @@ netagent_find_agent_with_uuid(uuid_t uuid) LIST_FOREACH(search_netagent, &master_netagent_list, master_chain) { if (uuid_compare(search_netagent->netagent.netagent_uuid, uuid) == 0) { - return (search_netagent); + return search_netagent; } } - return (NULL); + return NULL; } void @@ -1663,125 +1665,125 @@ netagent_ioctl(u_long cmd, caddr_t data) int error = 0; switch (cmd) { - case SIOCGIFAGENTLIST32: - case SIOCGIFAGENTLIST64: { - /* Check entitlement if the client requests agent dump */ - errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0); - if (cred_result != 0) { - NETAGENTLOG0(LOG_ERR, "Client does not hold the necessary entitlement to get netagent information"); - return EINVAL; - } - break; + case SIOCGIFAGENTLIST32: + case SIOCGIFAGENTLIST64: { + /* Check entitlement if the client requests agent dump */ + errno_t cred_result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NECP_POLICIES, 0); + if (cred_result != 0) { + NETAGENTLOG0(LOG_ERR, "Client does not hold the necessary entitlement to get netagent information"); + return EINVAL; } - default: - break; + break; + } + default: + break; } lck_rw_lock_shared(&netagent_lock); switch (cmd) { - case SIOCGIFAGENTDATA32: { - struct netagent_req32 *ifsir32 = (struct netagent_req32 *)(void *)data; - struct netagent_wrapper *wrapper = netagent_find_agent_with_uuid(ifsir32->netagent_uuid); - if (wrapper == NULL) { - error = ENOENT; - break; - } - uuid_copy(ifsir32->netagent_uuid, wrapper->netagent.netagent_uuid); - memcpy(ifsir32->netagent_domain, wrapper->netagent.netagent_domain, sizeof(ifsir32->netagent_domain)); - memcpy(ifsir32->netagent_type, wrapper->netagent.netagent_type, sizeof(ifsir32->netagent_type)); - memcpy(ifsir32->netagent_desc, wrapper->netagent.netagent_desc, sizeof(ifsir32->netagent_desc)); - ifsir32->netagent_flags = wrapper->netagent.netagent_flags; - if (ifsir32->netagent_data_size == 0) { - // First pass, client wants data size - ifsir32->netagent_data_size = wrapper->netagent.netagent_data_size; - } else if (ifsir32->netagent_data != USER_ADDR_NULL && - ifsir32->netagent_data_size == wrapper->netagent.netagent_data_size) { - // Second pass, client wants data buffer filled out - error = copyout(wrapper->netagent.netagent_data, ifsir32->netagent_data, wrapper->netagent.netagent_data_size); - } else { - error = EINVAL; - } + case SIOCGIFAGENTDATA32: { + struct netagent_req32 *ifsir32 = (struct netagent_req32 *)(void *)data; + struct netagent_wrapper *wrapper = netagent_find_agent_with_uuid(ifsir32->netagent_uuid); + if (wrapper == NULL) { + error = ENOENT; break; } - case SIOCGIFAGENTDATA64: { - struct netagent_req64 *ifsir64 = (struct netagent_req64 *)(void *)data; - struct netagent_wrapper *wrapper = netagent_find_agent_with_uuid(ifsir64->netagent_uuid); - if (wrapper == NULL) { - error = ENOENT; - break; - } - uuid_copy(ifsir64->netagent_uuid, wrapper->netagent.netagent_uuid); - memcpy(ifsir64->netagent_domain, wrapper->netagent.netagent_domain, sizeof(ifsir64->netagent_domain)); - memcpy(ifsir64->netagent_type, wrapper->netagent.netagent_type, sizeof(ifsir64->netagent_type)); - memcpy(ifsir64->netagent_desc, wrapper->netagent.netagent_desc, sizeof(ifsir64->netagent_desc)); - ifsir64->netagent_flags = wrapper->netagent.netagent_flags; - if (ifsir64->netagent_data_size == 0) { - // First pass, client wants data size - ifsir64->netagent_data_size = wrapper->netagent.netagent_data_size; - } else if (ifsir64->netagent_data != USER_ADDR_NULL && - ifsir64->netagent_data_size == wrapper->netagent.netagent_data_size) { - // Second pass, client wants data buffer filled out - error = copyout(wrapper->netagent.netagent_data, ifsir64->netagent_data, wrapper->netagent.netagent_data_size); - } else { - error = EINVAL; - } - break; + uuid_copy(ifsir32->netagent_uuid, wrapper->netagent.netagent_uuid); + memcpy(ifsir32->netagent_domain, wrapper->netagent.netagent_domain, sizeof(ifsir32->netagent_domain)); + memcpy(ifsir32->netagent_type, wrapper->netagent.netagent_type, sizeof(ifsir32->netagent_type)); + memcpy(ifsir32->netagent_desc, wrapper->netagent.netagent_desc, sizeof(ifsir32->netagent_desc)); + ifsir32->netagent_flags = wrapper->netagent.netagent_flags; + if (ifsir32->netagent_data_size == 0) { + // First pass, client wants data size + ifsir32->netagent_data_size = wrapper->netagent.netagent_data_size; + } else if (ifsir32->netagent_data != USER_ADDR_NULL && + ifsir32->netagent_data_size == wrapper->netagent.netagent_data_size) { + // Second pass, client wants data buffer filled out + error = copyout(wrapper->netagent.netagent_data, ifsir32->netagent_data, wrapper->netagent.netagent_data_size); + } else { + error = EINVAL; } - case SIOCGIFAGENTLIST32: { - struct netagentlist_req32 *ifsir32 = (struct netagentlist_req32 *)(void *)data; - if (ifsir32->data_size == 0) { - // First pass, client wants data size - ifsir32->data_size = netagent_dump_get_data_size_locked(); - } else if (ifsir32->data != USER_ADDR_NULL && - ifsir32->data_size > 0 && - ifsir32->data_size == netagent_dump_get_data_size_locked()) { - // Second pass, client wants data buffer filled out - u_int8_t *response = NULL; - MALLOC(response, u_int8_t *, ifsir32->data_size, M_NETAGENT, M_NOWAIT | M_ZERO); - if (response == NULL) { - error = ENOMEM; - break; - } - - netagent_dump_copy_data_locked(response, ifsir32->data_size); - error = copyout(response, ifsir32->data, ifsir32->data_size); - FREE(response, M_NETAGENT); - } else { - error = EINVAL; - } + break; + } + case SIOCGIFAGENTDATA64: { + struct netagent_req64 *ifsir64 = (struct netagent_req64 *)(void *)data; + struct netagent_wrapper *wrapper = netagent_find_agent_with_uuid(ifsir64->netagent_uuid); + if (wrapper == NULL) { + error = ENOENT; break; } - case SIOCGIFAGENTLIST64: { - struct netagentlist_req64 *ifsir64 = (struct netagentlist_req64 *)(void *)data; - if (ifsir64->data_size == 0) { - // First pass, client wants data size - ifsir64->data_size = netagent_dump_get_data_size_locked(); - } else if (ifsir64->data != USER_ADDR_NULL && - ifsir64->data_size > 0 && - ifsir64->data_size == netagent_dump_get_data_size_locked()) { - // Second pass, client wants data buffer filled out - u_int8_t *response = NULL; - MALLOC(response, u_int8_t *, ifsir64->data_size, M_NETAGENT, M_NOWAIT | M_ZERO); - if (response == NULL) { - error = ENOMEM; - break; - } - - netagent_dump_copy_data_locked(response, ifsir64->data_size); - error = copyout(response, ifsir64->data, ifsir64->data_size); - FREE(response, M_NETAGENT); - } else { - error = EINVAL; + uuid_copy(ifsir64->netagent_uuid, wrapper->netagent.netagent_uuid); + memcpy(ifsir64->netagent_domain, wrapper->netagent.netagent_domain, sizeof(ifsir64->netagent_domain)); + memcpy(ifsir64->netagent_type, wrapper->netagent.netagent_type, sizeof(ifsir64->netagent_type)); + memcpy(ifsir64->netagent_desc, wrapper->netagent.netagent_desc, sizeof(ifsir64->netagent_desc)); + ifsir64->netagent_flags = wrapper->netagent.netagent_flags; + if (ifsir64->netagent_data_size == 0) { + // First pass, client wants data size + ifsir64->netagent_data_size = wrapper->netagent.netagent_data_size; + } else if (ifsir64->netagent_data != USER_ADDR_NULL && + ifsir64->netagent_data_size == wrapper->netagent.netagent_data_size) { + // Second pass, client wants data buffer filled out + error = copyout(wrapper->netagent.netagent_data, ifsir64->netagent_data, wrapper->netagent.netagent_data_size); + } else { + error = EINVAL; + } + break; + } + case SIOCGIFAGENTLIST32: { + struct netagentlist_req32 *ifsir32 = (struct netagentlist_req32 *)(void *)data; + if (ifsir32->data_size == 0) { + // First pass, client wants data size + ifsir32->data_size = netagent_dump_get_data_size_locked(); + } else if (ifsir32->data != USER_ADDR_NULL && + ifsir32->data_size > 0 && + ifsir32->data_size == netagent_dump_get_data_size_locked()) { + // Second pass, client wants data buffer filled out + u_int8_t *response = NULL; + MALLOC(response, u_int8_t *, ifsir32->data_size, M_NETAGENT, M_NOWAIT | M_ZERO); + if (response == NULL) { + error = ENOMEM; + break; } - break; + + netagent_dump_copy_data_locked(response, ifsir32->data_size); + error = copyout(response, ifsir32->data, ifsir32->data_size); + FREE(response, M_NETAGENT); + } else { + error = EINVAL; } - default: { + break; + } + case SIOCGIFAGENTLIST64: { + struct netagentlist_req64 *ifsir64 = (struct netagentlist_req64 *)(void *)data; + if (ifsir64->data_size == 0) { + // First pass, client wants data size + ifsir64->data_size = netagent_dump_get_data_size_locked(); + } else if (ifsir64->data != USER_ADDR_NULL && + ifsir64->data_size > 0 && + ifsir64->data_size == netagent_dump_get_data_size_locked()) { + // Second pass, client wants data buffer filled out + u_int8_t *response = NULL; + MALLOC(response, u_int8_t *, ifsir64->data_size, M_NETAGENT, M_NOWAIT | M_ZERO); + if (response == NULL) { + error = ENOMEM; + break; + } + + netagent_dump_copy_data_locked(response, ifsir64->data_size); + error = copyout(response, ifsir64->data, ifsir64->data_size); + FREE(response, M_NETAGENT); + } else { error = EINVAL; - break; } + break; + } + default: { + error = EINVAL; + break; + } } lck_rw_done(&netagent_lock); - return (error); + return error; } u_int32_t @@ -1797,7 +1799,7 @@ netagent_get_flags(uuid_t uuid) } lck_rw_done(&netagent_lock); - return (flags); + return flags; } u_int32_t @@ -1813,7 +1815,7 @@ netagent_get_generation(uuid_t uuid) } lck_rw_done(&netagent_lock); - return (generation); + return generation; } bool @@ -1822,7 +1824,7 @@ netagent_get_agent_domain_and_type(uuid_t uuid, char *domain, char *type) bool found = FALSE; if (domain == NULL || type == NULL) { NETAGENTLOG(LOG_ERR, "Invalid arguments for netagent_get_agent_domain_and_type %p %p", domain, type); - return (FALSE); + return FALSE; } lck_rw_lock_shared(&netagent_lock); @@ -1836,7 +1838,7 @@ netagent_get_agent_domain_and_type(uuid_t uuid, char *domain, char *type) } lck_rw_done(&netagent_lock); - return (found); + return found; } int @@ -1870,29 +1872,29 @@ netagent_kernel_trigger(uuid_t uuid) NETAGENTLOG((error ? LOG_ERR : LOG_INFO), "Triggered netagent from kernel (error %d)", error); done: lck_rw_done(&netagent_lock); - return (error); + return error; } int netagent_client_message_with_params(uuid_t agent_uuid, - uuid_t necp_client_uuid, - pid_t pid, - void *handle, - u_int8_t message_type, - struct necp_client_nexus_parameters *parameters, - void **assigned_results, - size_t *assigned_results_length) + uuid_t necp_client_uuid, + pid_t pid, + void *handle, + u_int8_t message_type, + struct necp_client_nexus_parameters *parameters, + void **assigned_results, + size_t *assigned_results_length) { int error = 0; if (message_type != NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER && - message_type != NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT && - message_type != NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT && - message_type != NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS && - message_type != NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS && - message_type != NETAGENT_MESSAGE_TYPE_ABORT_NEXUS) { + message_type != NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT && + message_type != NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT && + message_type != NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS && + message_type != NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS && + message_type != NETAGENT_MESSAGE_TYPE_ABORT_NEXUS) { NETAGENTLOG(LOG_ERR, "Client netagent message type (%d) is invalid", message_type); - return(EINVAL); + return EINVAL; } lck_rw_lock_shared(&netagent_lock); @@ -1927,8 +1929,8 @@ netagent_client_message_with_params(uuid_t agent_uuid, goto done; } } else if (message_type == NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS || - message_type == NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS || - message_type == NETAGENT_MESSAGE_TYPE_ABORT_NEXUS) { + message_type == NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS || + message_type == NETAGENT_MESSAGE_TYPE_ABORT_NEXUS) { if ((wrapper->netagent.netagent_flags & NETAGENT_FLAG_NEXUS_PROVIDER) == 0) { NETAGENTLOG0(LOG_ERR, "Requested netagent for nexus instance is not a nexus provider"); // Agent is not a nexus provider @@ -1953,8 +1955,8 @@ netagent_client_message_with_params(uuid_t agent_uuid, // that the event handler will not lead to any registrations or unregistrations // of network agents. error = wrapper->event_handler(message_type, necp_client_uuid, pid, handle, - wrapper->event_context, parameters, - assigned_results, assigned_results_length); + wrapper->event_context, parameters, + assigned_results, assigned_results_length); if (error != 0) { VERIFY(assigned_results == NULL || *assigned_results == NULL); VERIFY(assigned_results_length == NULL || *assigned_results_length == 0); @@ -2004,13 +2006,13 @@ done: if (should_unlock) { lck_rw_done(&netagent_lock); } - return (error); + return error; } int netagent_client_message(uuid_t agent_uuid, uuid_t necp_client_uuid, pid_t pid, void *handle, u_int8_t message_type) { - return (netagent_client_message_with_params(agent_uuid, necp_client_uuid, pid, handle, message_type, NULL, NULL, NULL)); + return netagent_client_message_with_params(agent_uuid, necp_client_uuid, pid, handle, message_type, NULL, NULL, NULL); } int @@ -2035,7 +2037,7 @@ netagent_use(uuid_t agent_uuid, uint64_t *out_use_count) done: lck_rw_done(&netagent_lock); - return (error); + return error; } int @@ -2063,7 +2065,7 @@ netagent_copyout(uuid_t agent_uuid, user_addr_t user_addr, u_int32_t user_size) NETAGENTLOG((error ? LOG_ERR : LOG_DEBUG), "Copied agent content (error %d)", error); done: lck_rw_done(&netagent_lock); - return (error); + return error; } int @@ -2075,26 +2077,26 @@ netagent_trigger(struct proc *p, struct netagent_trigger_args *uap, int32_t *ret if (uap == NULL) { NETAGENTLOG0(LOG_ERR, "uap == NULL"); - return (EINVAL); + return EINVAL; } if (uap->agent_uuid) { if (uap->agent_uuidlen != sizeof(uuid_t)) { NETAGENTLOG(LOG_ERR, "Incorrect length (got %llu, expected %lu)", - uap->agent_uuidlen, sizeof(uuid_t)); - return (ERANGE); + uap->agent_uuidlen, sizeof(uuid_t)); + return ERANGE; } error = copyin(uap->agent_uuid, agent_uuid, sizeof(uuid_t)); if (error) { NETAGENTLOG(LOG_ERR, "copyin error (%d)", error); - return (error); + return error; } } if (uuid_is_null(agent_uuid)) { NETAGENTLOG0(LOG_ERR, "Requested netagent UUID is empty"); - return (EINVAL); + return EINVAL; } lck_rw_lock_shared(&netagent_lock); @@ -2123,5 +2125,5 @@ netagent_trigger(struct proc *p, struct netagent_trigger_args *uap, int32_t *ret NETAGENTLOG((error ? LOG_ERR : LOG_INFO), "Triggered netagent (error %d)", error); done: lck_rw_done(&netagent_lock); - return (error); + return error; } diff --git a/bsd/net/network_agent.h b/bsd/net/network_agent.h index 0eddfa2aa..d51352628 100644 --- a/bsd/net/network_agent.h +++ b/bsd/net/network_agent.h @@ -26,8 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _NETAGENT_H_ -#define _NETAGENT_H_ +#ifndef _NETAGENT_H_ +#define _NETAGENT_H_ #include #ifdef PRIVATE @@ -43,132 +43,132 @@ errno_t netagent_init(void); /* * Name registered by the Network Agent kernel control */ -#define NETAGENT_CONTROL_NAME "com.apple.net.netagent" +#define NETAGENT_CONTROL_NAME "com.apple.net.netagent" struct netagent_message_header { - u_int8_t message_type; - u_int8_t message_flags; - u_int32_t message_id; - u_int32_t message_error; - u_int32_t message_payload_length; + u_int8_t message_type; + u_int8_t message_flags; + u_int32_t message_id; + u_int32_t message_error; + u_int32_t message_payload_length; }; struct netagent_trigger_message { - u_int32_t trigger_flags; - pid_t trigger_pid; - uuid_t trigger_proc_uuid; + u_int32_t trigger_flags; + pid_t trigger_pid; + uuid_t trigger_proc_uuid; }; struct netagent_client_message { - uuid_t client_id; + uuid_t client_id; }; struct netagent_assign_nexus_message { - uuid_t assign_client_id; - u_int8_t assign_necp_results[0]; + uuid_t assign_client_id; + u_int8_t assign_necp_results[0]; }; -#define NETAGENT_MESSAGE_TYPE_REGISTER 1 // Pass netagent to set, no return value -#define NETAGENT_MESSAGE_TYPE_UNREGISTER 2 // No value, no return value -#define NETAGENT_MESSAGE_TYPE_UPDATE 3 // Pass netagent to update, no return value -#define NETAGENT_MESSAGE_TYPE_GET 4 // No value, return netagent -#define NETAGENT_MESSAGE_TYPE_TRIGGER 5 // Kernel initiated, no reply expected -#define NETAGENT_MESSAGE_TYPE_ASSERT 6 // Deprecated -#define NETAGENT_MESSAGE_TYPE_UNASSERT 7 // Deprecated -#define NETAGENT_MESSAGE_TYPE_TRIGGER_ASSERT 8 // Kernel initiated, no reply expected -#define NETAGENT_MESSAGE_TYPE_TRIGGER_UNASSERT 9 // Kernel initiated, no reply expected -#define NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS 10 // Kernel initiated, struct netagent_client_message -#define NETAGENT_MESSAGE_TYPE_ASSIGN_NEXUS 11 // Pass struct netagent_assign_nexus_message -#define NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS 12 // Kernel initiated, struct netagent_client_message -#define NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER 13 // Kernel initiated, struct netagent_client_message -#define NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT 14 // Kernel initiated, struct netagent_client_message -#define NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT 15 // Kernel initiated, struct netagent_client_message - -#define NETAGENT_OPTION_TYPE_REGISTER NETAGENT_MESSAGE_TYPE_REGISTER // Pass netagent to set, no return value -#define NETAGENT_OPTION_TYPE_UNREGISTER NETAGENT_MESSAGE_TYPE_UNREGISTER // No value, no return value -#define NETAGENT_OPTION_TYPE_UPDATE NETAGENT_MESSAGE_TYPE_UPDATE // Pass netagent to update, no return value -#define NETAGENT_OPTION_TYPE_ASSIGN_NEXUS NETAGENT_MESSAGE_TYPE_ASSIGN_NEXUS // Pass struct netagent_assign_nexus_message -#define NETAGENT_OPTION_TYPE_USE_COUNT 16 // Pass use count to set, get current use count -#define NETAGENT_MESSAGE_TYPE_ABORT_NEXUS 17 // Kernel private - -#define NETAGENT_MESSAGE_FLAGS_RESPONSE 0x01 // Used for acks, errors, and query responses - -#define NETAGENT_MESSAGE_ERROR_NONE 0 -#define NETAGENT_MESSAGE_ERROR_INTERNAL 1 -#define NETAGENT_MESSAGE_ERROR_UNKNOWN_TYPE 2 -#define NETAGENT_MESSAGE_ERROR_INVALID_DATA 3 -#define NETAGENT_MESSAGE_ERROR_NOT_REGISTERED 4 -#define NETAGENT_MESSAGE_ERROR_ALREADY_REGISTERED 5 -#define NETAGENT_MESSAGE_ERROR_CANNOT_UPDATE 6 -#define NETAGENT_MESSAGE_ERROR_CANNOT_ASSIGN 7 - -#define NETAGENT_DOMAINSIZE 32 -#define NETAGENT_TYPESIZE 32 -#define NETAGENT_DESCSIZE 128 - -#define NETAGENT_MAX_DATA_SIZE 4096 - -#define NETAGENT_FLAG_REGISTERED 0x0001 // Agent is registered -#define NETAGENT_FLAG_ACTIVE 0x0002 // Agent is active -#define NETAGENT_FLAG_KERNEL_ACTIVATED 0x0004 // Agent can be activated by kernel activity -#define NETAGENT_FLAG_USER_ACTIVATED 0x0008 // Agent can be activated by system call (netagent_trigger) -#define NETAGENT_FLAG_VOLUNTARY 0x0010 // Use of agent is optional -#define NETAGENT_FLAG_SPECIFIC_USE_ONLY 0x0020 // Agent should only be used and activated when specifically required -#define NETAGENT_FLAG_NETWORK_PROVIDER 0x0040 // Agent provides network access -#define NETAGENT_FLAG_NEXUS_PROVIDER 0x0080 // Agent provides a skywalk nexus -#define NETAGENT_FLAG_SUPPORTS_BROWSE 0x0100 // Assertions will cause agent to fill in browse endpoints - -#define NETAGENT_NEXUS_MAX_REQUEST_TYPES 16 -#define NETAGENT_NEXUS_MAX_RESOLUTION_TYPE_PAIRS 16 - -#define NETAGENT_NEXUS_FRAME_TYPE_UNKNOWN 0 -#define NETAGENT_NEXUS_FRAME_TYPE_LINK 1 -#define NETAGENT_NEXUS_FRAME_TYPE_INTERNET 2 -#define NETAGENT_NEXUS_FRAME_TYPE_TRANSPORT 3 -#define NETAGENT_NEXUS_FRAME_TYPE_APPLICATION 4 - -#define NETAGENT_NEXUS_ENDPOINT_TYPE_ADDRESS 1 -#define NETAGENT_NEXUS_ENDPOINT_TYPE_HOST 2 -#define NETAGENT_NEXUS_ENDPOINT_TYPE_BONJOUR 3 - -#define NETAGENT_NEXUS_FLAG_SUPPORTS_USER_PACKET_POOL 0x1 -#define NETAGENT_NEXUS_FLAG_ASSERT_UNSUPPORTED 0x2 // No calls to assert the agent are required +#define NETAGENT_MESSAGE_TYPE_REGISTER 1 // Pass netagent to set, no return value +#define NETAGENT_MESSAGE_TYPE_UNREGISTER 2 // No value, no return value +#define NETAGENT_MESSAGE_TYPE_UPDATE 3 // Pass netagent to update, no return value +#define NETAGENT_MESSAGE_TYPE_GET 4 // No value, return netagent +#define NETAGENT_MESSAGE_TYPE_TRIGGER 5 // Kernel initiated, no reply expected +#define NETAGENT_MESSAGE_TYPE_ASSERT 6 // Deprecated +#define NETAGENT_MESSAGE_TYPE_UNASSERT 7 // Deprecated +#define NETAGENT_MESSAGE_TYPE_TRIGGER_ASSERT 8 // Kernel initiated, no reply expected +#define NETAGENT_MESSAGE_TYPE_TRIGGER_UNASSERT 9 // Kernel initiated, no reply expected +#define NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS 10 // Kernel initiated, struct netagent_client_message +#define NETAGENT_MESSAGE_TYPE_ASSIGN_NEXUS 11 // Pass struct netagent_assign_nexus_message +#define NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS 12 // Kernel initiated, struct netagent_client_message +#define NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER 13 // Kernel initiated, struct netagent_client_message +#define NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT 14 // Kernel initiated, struct netagent_client_message +#define NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT 15 // Kernel initiated, struct netagent_client_message + +#define NETAGENT_OPTION_TYPE_REGISTER NETAGENT_MESSAGE_TYPE_REGISTER // Pass netagent to set, no return value +#define NETAGENT_OPTION_TYPE_UNREGISTER NETAGENT_MESSAGE_TYPE_UNREGISTER // No value, no return value +#define NETAGENT_OPTION_TYPE_UPDATE NETAGENT_MESSAGE_TYPE_UPDATE // Pass netagent to update, no return value +#define NETAGENT_OPTION_TYPE_ASSIGN_NEXUS NETAGENT_MESSAGE_TYPE_ASSIGN_NEXUS // Pass struct netagent_assign_nexus_message +#define NETAGENT_OPTION_TYPE_USE_COUNT 16 // Pass use count to set, get current use count +#define NETAGENT_MESSAGE_TYPE_ABORT_NEXUS 17 // Kernel private + +#define NETAGENT_MESSAGE_FLAGS_RESPONSE 0x01 // Used for acks, errors, and query responses + +#define NETAGENT_MESSAGE_ERROR_NONE 0 +#define NETAGENT_MESSAGE_ERROR_INTERNAL 1 +#define NETAGENT_MESSAGE_ERROR_UNKNOWN_TYPE 2 +#define NETAGENT_MESSAGE_ERROR_INVALID_DATA 3 +#define NETAGENT_MESSAGE_ERROR_NOT_REGISTERED 4 +#define NETAGENT_MESSAGE_ERROR_ALREADY_REGISTERED 5 +#define NETAGENT_MESSAGE_ERROR_CANNOT_UPDATE 6 +#define NETAGENT_MESSAGE_ERROR_CANNOT_ASSIGN 7 + +#define NETAGENT_DOMAINSIZE 32 +#define NETAGENT_TYPESIZE 32 +#define NETAGENT_DESCSIZE 128 + +#define NETAGENT_MAX_DATA_SIZE 4096 + +#define NETAGENT_FLAG_REGISTERED 0x0001 // Agent is registered +#define NETAGENT_FLAG_ACTIVE 0x0002 // Agent is active +#define NETAGENT_FLAG_KERNEL_ACTIVATED 0x0004 // Agent can be activated by kernel activity +#define NETAGENT_FLAG_USER_ACTIVATED 0x0008 // Agent can be activated by system call (netagent_trigger) +#define NETAGENT_FLAG_VOLUNTARY 0x0010 // Use of agent is optional +#define NETAGENT_FLAG_SPECIFIC_USE_ONLY 0x0020 // Agent should only be used and activated when specifically required +#define NETAGENT_FLAG_NETWORK_PROVIDER 0x0040 // Agent provides network access +#define NETAGENT_FLAG_NEXUS_PROVIDER 0x0080 // Agent provides a skywalk nexus +#define NETAGENT_FLAG_SUPPORTS_BROWSE 0x0100 // Assertions will cause agent to fill in browse endpoints + +#define NETAGENT_NEXUS_MAX_REQUEST_TYPES 16 +#define NETAGENT_NEXUS_MAX_RESOLUTION_TYPE_PAIRS 16 + +#define NETAGENT_NEXUS_FRAME_TYPE_UNKNOWN 0 +#define NETAGENT_NEXUS_FRAME_TYPE_LINK 1 +#define NETAGENT_NEXUS_FRAME_TYPE_INTERNET 2 +#define NETAGENT_NEXUS_FRAME_TYPE_TRANSPORT 3 +#define NETAGENT_NEXUS_FRAME_TYPE_APPLICATION 4 + +#define NETAGENT_NEXUS_ENDPOINT_TYPE_ADDRESS 1 +#define NETAGENT_NEXUS_ENDPOINT_TYPE_HOST 2 +#define NETAGENT_NEXUS_ENDPOINT_TYPE_BONJOUR 3 + +#define NETAGENT_NEXUS_FLAG_SUPPORTS_USER_PACKET_POOL 0x1 +#define NETAGENT_NEXUS_FLAG_ASSERT_UNSUPPORTED 0x2 // No calls to assert the agent are required struct netagent_nexus { - u_int32_t frame_type; - u_int32_t endpoint_assignment_type; - u_int32_t endpoint_request_types[NETAGENT_NEXUS_MAX_REQUEST_TYPES]; - u_int32_t endpoint_resolution_type_pairs[NETAGENT_NEXUS_MAX_RESOLUTION_TYPE_PAIRS * 2]; - u_int32_t nexus_flags; + u_int32_t frame_type; + u_int32_t endpoint_assignment_type; + u_int32_t endpoint_request_types[NETAGENT_NEXUS_MAX_REQUEST_TYPES]; + u_int32_t endpoint_resolution_type_pairs[NETAGENT_NEXUS_MAX_RESOLUTION_TYPE_PAIRS * 2]; + u_int32_t nexus_flags; }; -#define NETAGENT_TRIGGER_FLAG_USER 0x0001 // Userspace triggered agent -#define NETAGENT_TRIGGER_FLAG_KERNEL 0x0002 // Kernel triggered agent +#define NETAGENT_TRIGGER_FLAG_USER 0x0001 // Userspace triggered agent +#define NETAGENT_TRIGGER_FLAG_KERNEL 0x0002 // Kernel triggered agent struct kev_netagent_data { - uuid_t netagent_uuid; + uuid_t netagent_uuid; }; // To be used with kernel control socket struct netagent { - uuid_t netagent_uuid; - char netagent_domain[NETAGENT_DOMAINSIZE]; - char netagent_type[NETAGENT_TYPESIZE]; - char netagent_desc[NETAGENT_DESCSIZE]; - u_int32_t netagent_flags; - u_int32_t netagent_data_size; - u_int8_t netagent_data[0]; + uuid_t netagent_uuid; + char netagent_domain[NETAGENT_DOMAINSIZE]; + char netagent_type[NETAGENT_TYPESIZE]; + char netagent_desc[NETAGENT_DESCSIZE]; + u_int32_t netagent_flags; + u_int32_t netagent_data_size; + u_int8_t netagent_data[0]; }; // To be used with SIOCGAGENTDATA struct netagent_req { - uuid_t netagent_uuid; - char netagent_domain[NETAGENT_DOMAINSIZE]; - char netagent_type[NETAGENT_TYPESIZE]; - char netagent_desc[NETAGENT_DESCSIZE]; - u_int32_t netagent_flags; - u_int32_t netagent_data_size; - u_int8_t *netagent_data; + uuid_t netagent_uuid; + char netagent_domain[NETAGENT_DOMAINSIZE]; + char netagent_type[NETAGENT_TYPESIZE]; + char netagent_desc[NETAGENT_DESCSIZE]; + u_int32_t netagent_flags; + u_int32_t netagent_data_size; + u_int8_t *netagent_data; }; // To be used with SIOCGAGENTLIST @@ -180,22 +180,22 @@ struct netagentlist_req { int netagent_ioctl(u_long cmd, caddr_t data); struct netagent_req32 { - uuid_t netagent_uuid; - char netagent_domain[NETAGENT_DOMAINSIZE]; - char netagent_type[NETAGENT_TYPESIZE]; - char netagent_desc[NETAGENT_DESCSIZE]; - u_int32_t netagent_flags; - u_int32_t netagent_data_size; - user32_addr_t netagent_data; + uuid_t netagent_uuid; + char netagent_domain[NETAGENT_DOMAINSIZE]; + char netagent_type[NETAGENT_TYPESIZE]; + char netagent_desc[NETAGENT_DESCSIZE]; + u_int32_t netagent_flags; + u_int32_t netagent_data_size; + user32_addr_t netagent_data; }; struct netagent_req64 { - uuid_t netagent_uuid; - char netagent_domain[NETAGENT_DOMAINSIZE]; - char netagent_type[NETAGENT_TYPESIZE]; - char netagent_desc[NETAGENT_DESCSIZE]; - u_int32_t netagent_flags; - u_int32_t netagent_data_size; - user64_addr_t netagent_data __attribute__((aligned(8))); + uuid_t netagent_uuid; + char netagent_domain[NETAGENT_DOMAINSIZE]; + char netagent_type[NETAGENT_TYPESIZE]; + char netagent_desc[NETAGENT_DESCSIZE]; + u_int32_t netagent_flags; + u_int32_t netagent_data_size; + user64_addr_t netagent_data __attribute__((aligned(8))); }; struct netagentlist_req32 { u_int32_t data_size; @@ -222,13 +222,13 @@ extern int netagent_kernel_trigger(uuid_t uuid); extern int netagent_client_message(uuid_t agent_uuid, uuid_t necp_client_uuid, pid_t pid, void *handle, u_int8_t message_type); extern int netagent_client_message_with_params(uuid_t agent_uuid, - uuid_t necp_client_uuid, - pid_t pid, - void *handle, - u_int8_t message_type, - struct necp_client_nexus_parameters *parameters, - void **assigned_results, - size_t *assigned_results_length); + uuid_t necp_client_uuid, + pid_t pid, + void *handle, + u_int8_t message_type, + struct necp_client_nexus_parameters *parameters, + void **assigned_results, + size_t *assigned_results_length); extern int netagent_copyout(uuid_t uuid, user_addr_t user_addr, u_int32_t user_size); @@ -238,16 +238,16 @@ extern int netagent_copyout(uuid_t uuid, user_addr_t user_addr, u_int32_t user_s typedef void * netagent_session_t; struct netagent_nexus_agent { - struct netagent agent; - struct netagent_nexus nexus_data; + struct netagent agent; + struct netagent_nexus nexus_data; }; -#define NETAGENT_EVENT_TRIGGER NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER -#define NETAGENT_EVENT_ASSERT NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT -#define NETAGENT_EVENT_UNASSERT NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT -#define NETAGENT_EVENT_NEXUS_FLOW_INSERT NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS -#define NETAGENT_EVENT_NEXUS_FLOW_REMOVE NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS -#define NETAGENT_EVENT_NEXUS_FLOW_ABORT NETAGENT_MESSAGE_TYPE_ABORT_NEXUS +#define NETAGENT_EVENT_TRIGGER NETAGENT_MESSAGE_TYPE_CLIENT_TRIGGER +#define NETAGENT_EVENT_ASSERT NETAGENT_MESSAGE_TYPE_CLIENT_ASSERT +#define NETAGENT_EVENT_UNASSERT NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT +#define NETAGENT_EVENT_NEXUS_FLOW_INSERT NETAGENT_MESSAGE_TYPE_REQUEST_NEXUS +#define NETAGENT_EVENT_NEXUS_FLOW_REMOVE NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS +#define NETAGENT_EVENT_NEXUS_FLOW_ABORT NETAGENT_MESSAGE_TYPE_ABORT_NEXUS typedef errno_t (*netagent_event_f)(u_int8_t event, uuid_t necp_client_uuid, pid_t pid, void *necp_handle, void *context, struct necp_client_nexus_parameters *parameters, void **assigned_results, size_t *assigned_results_length); @@ -262,15 +262,15 @@ extern errno_t netagent_update(netagent_session_t session, struct netagent *agen extern errno_t netagent_unregister(netagent_session_t session); extern errno_t netagent_assign_nexus(netagent_session_t _session, - uuid_t necp_client_uuid, - void *assign_message, - size_t assigned_results_length); // Length of assigned_results_length + uuid_t necp_client_uuid, + void *assign_message, + size_t assigned_results_length); // Length of assigned_results_length extern errno_t netagent_update_flow_protoctl_event(netagent_session_t _session, - uuid_t client_id, - uint32_t protoctl_event_code, - uint32_t protoctl_event_val, - uint32_t protoctl_event_tcp_seq_number); + uuid_t client_id, + uint32_t protoctl_event_code, + uint32_t protoctl_event_val, + uint32_t protoctl_event_tcp_seq_number); extern int netagent_use(uuid_t agent_uuid, uint64_t *out_use_count); diff --git a/bsd/net/ntstat.c b/bsd/net/ntstat.c index eefb69aaf..7a33d6832 100644 --- a/bsd/net/ntstat.c +++ b/bsd/net/ntstat.c @@ -77,7 +77,7 @@ #include #include -__private_extern__ int nstat_collect = 1; +__private_extern__ int nstat_collect = 1; #if (DEBUG || DEVELOPMENT) SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -93,7 +93,7 @@ SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_privcheck, 0, "Entitlement check"); SYSCTL_NODE(_net, OID_AUTO, stats, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics"); static int nstat_debug = 0; SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -138,12 +138,11 @@ SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval, CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, ""); #endif /* DEBUG || DEVELOPMENT */ -enum -{ - NSTAT_FLAG_CLEANUP = (1 << 0), - NSTAT_FLAG_REQCOUNTS = (1 << 1), - NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2), - NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3), +enum{ + NSTAT_FLAG_CLEANUP = (1 << 0), + NSTAT_FLAG_REQCOUNTS = (1 << 1), + NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2), + NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3), }; #if CONFIG_EMBEDDED @@ -152,87 +151,83 @@ enum #define QUERY_CONTINUATION_SRC_COUNT 100 #endif -typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src; -typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src; +typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src; +typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src; -typedef struct nstat_provider_filter -{ - u_int64_t npf_flags; - u_int64_t npf_events; - pid_t npf_pid; - uuid_t npf_uuid; +typedef struct nstat_provider_filter { + u_int64_t npf_flags; + u_int64_t npf_events; + pid_t npf_pid; + uuid_t npf_uuid; } nstat_provider_filter; -typedef struct nstat_control_state -{ - struct nstat_control_state *ncs_next; - u_int32_t ncs_watching; +typedef struct nstat_control_state { + struct nstat_control_state *ncs_next; + u_int32_t ncs_watching; decl_lck_mtx_data(, ncs_mtx); - kern_ctl_ref ncs_kctl; - u_int32_t ncs_unit; - nstat_src_ref_t ncs_next_srcref; - tailq_head_nstat_src ncs_src_queue; - mbuf_t ncs_accumulated; - u_int32_t ncs_flags; - nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT]; + kern_ctl_ref ncs_kctl; + u_int32_t ncs_unit; + nstat_src_ref_t ncs_next_srcref; + tailq_head_nstat_src ncs_src_queue; + mbuf_t ncs_accumulated; + u_int32_t ncs_flags; + nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT]; /* state maintained for partial query requests */ - u_int64_t ncs_context; - u_int64_t ncs_seq; + u_int64_t ncs_context; + u_int64_t ncs_seq; } nstat_control_state; -typedef struct nstat_provider -{ - struct nstat_provider *next; - nstat_provider_id_t nstat_provider_id; - size_t nstat_descriptor_length; - errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie); - int (*nstat_gone)(nstat_provider_cookie_t cookie); - errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone); - errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req); - void (*nstat_watcher_remove)(nstat_control_state *state); - errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len); - void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked); - bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter); +typedef struct nstat_provider { + struct nstat_provider *next; + nstat_provider_id_t nstat_provider_id; + size_t nstat_descriptor_length; + errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie); + int (*nstat_gone)(nstat_provider_cookie_t cookie); + errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone); + errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req); + void (*nstat_watcher_remove)(nstat_control_state *state); + errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len); + void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked); + bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter); } nstat_provider; -typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src; -typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src; +typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src; +typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src; -typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow; -typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow; +typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow; +typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow; -typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails; -typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails; +typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails; +typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails; -typedef struct nstat_src -{ - tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over. - nstat_control_state *ns_control; // The nstat_control_state that this is a source for - nstat_src_ref_t srcref; - nstat_provider *provider; - nstat_provider_cookie_t cookie; - uint32_t filter; - uint64_t seq; +typedef struct nstat_src { + tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over. + nstat_control_state *ns_control; // The nstat_control_state that this is a source for + nstat_src_ref_t srcref; + nstat_provider *provider; + nstat_provider_cookie_t cookie; + uint32_t filter; + uint64_t seq; } nstat_src; -static errno_t nstat_control_send_counts(nstat_control_state *, - nstat_src *, unsigned long long, u_int16_t, int *); -static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags); +static errno_t nstat_control_send_counts(nstat_control_state *, + nstat_src *, unsigned long long, u_int16_t, int *); +static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags); static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone); -static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *); -static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src); -static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t); -static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src); -static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp); -static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial); -static void nstat_ifnet_report_ecn_stats(void); -static void nstat_ifnet_report_lim_stats(void); -static void nstat_net_api_report_stats(void); -static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req); - -static u_int32_t nstat_udp_watchers = 0; -static u_int32_t nstat_tcp_watchers = 0; +static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *); +static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src); +static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t); +static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src); +static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp); +static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial); +static void nstat_ifnet_report_ecn_stats(void); +static void nstat_ifnet_report_lim_stats(void); +static void nstat_net_api_report_stats(void); +static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req); + +static u_int32_t nstat_udp_watchers = 0; +static u_int32_t nstat_tcp_watchers = 0; static void nstat_control_register(void); @@ -243,9 +238,9 @@ static void nstat_control_register(void); * nstat_mtx * state->ncs_mtx */ -static volatile OSMallocTag nstat_malloc_tag = NULL; -static nstat_control_state *nstat_controls = NULL; -static uint64_t nstat_idle_time = 0; +static volatile OSMallocTag nstat_malloc_tag = NULL; +static nstat_control_state *nstat_controls = NULL; +static uint64_t nstat_idle_time = 0; static decl_lck_mtx_data(, nstat_mtx); /* some extern definitions */ @@ -254,21 +249,22 @@ extern void tcp_report_stats(void); static void nstat_copy_sa_out( - const struct sockaddr *src, - struct sockaddr *dst, - int maxlen) + const struct sockaddr *src, + struct sockaddr *dst, + int maxlen) { - if (src->sa_len > maxlen) return; + if (src->sa_len > maxlen) { + return; + } bcopy(src, dst, src->sa_len); if (src->sa_family == AF_INET6 && - src->sa_len >= sizeof(struct sockaddr_in6)) - { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst; - if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) - { - if (sin6->sin6_scope_id == 0) + src->sa_len >= sizeof(struct sockaddr_in6)) { + struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst; + if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { + if (sin6->sin6_scope_id == 0) { sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); + } sin6->sin6_addr.s6_addr16[1] = 0; } } @@ -276,13 +272,14 @@ nstat_copy_sa_out( static void nstat_ip_to_sockaddr( - const struct in_addr *ip, - u_int16_t port, - struct sockaddr_in *sin, - u_int32_t maxlen) + const struct in_addr *ip, + u_int16_t port, + struct sockaddr_in *sin, + u_int32_t maxlen) { - if (maxlen < sizeof(struct sockaddr_in)) + if (maxlen < sizeof(struct sockaddr_in)) { return; + } sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); @@ -300,8 +297,7 @@ nstat_ifnet_to_flags( /* Panic if someone adds a functional type without updating ntstat. */ VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST); - switch (functional_type) - { + switch (functional_type) { case IFRTYPE_FUNCTIONAL_UNKNOWN: flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE; break; @@ -324,8 +320,7 @@ nstat_ifnet_to_flags( break; } - if (IFNET_IS_EXPENSIVE(ifp)) - { + if (IFNET_IS_EXPENSIVE(ifp)) { flags |= NSTAT_IFNET_IS_EXPENSIVE; } @@ -338,20 +333,17 @@ nstat_inpcb_to_flags( { u_int16_t flags = 0; - if ((inp != NULL ) && (inp->inp_last_outifp != NULL)) - { + if ((inp != NULL) && (inp->inp_last_outifp != NULL)) { struct ifnet *ifp = inp->inp_last_outifp; flags = nstat_ifnet_to_flags(ifp); - if (flags & NSTAT_IFNET_IS_CELLULAR) - { + if (flags & NSTAT_IFNET_IS_CELLULAR) { if (inp->inp_socket != NULL && - (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) + (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) { flags |= NSTAT_IFNET_VIA_CELLFALLBACK; + } } - } - else - { + } else { flags = NSTAT_IFNET_IS_UNKNOWN_TYPE; } @@ -361,18 +353,18 @@ nstat_inpcb_to_flags( #pragma mark -- Network Statistic Providers -- static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie); -struct nstat_provider *nstat_providers = NULL; +struct nstat_provider *nstat_providers = NULL; static struct nstat_provider* nstat_find_provider_by_id( - nstat_provider_id_t id) + nstat_provider_id_t id) { - struct nstat_provider *provider; + struct nstat_provider *provider; - for (provider = nstat_providers; provider != NULL; provider = provider->next) - { - if (provider->nstat_provider_id == id) + for (provider = nstat_providers; provider != NULL; provider = provider->next) { + if (provider->nstat_provider_id == id) { break; + } } return provider; @@ -380,15 +372,14 @@ nstat_find_provider_by_id( static errno_t nstat_lookup_entry( - nstat_provider_id_t id, - const void *data, - u_int32_t length, - nstat_provider **out_provider, - nstat_provider_cookie_t *out_cookie) + nstat_provider_id_t id, + const void *data, + u_int32_t length, + nstat_provider **out_provider, + nstat_provider_cookie_t *out_cookie) { *out_provider = nstat_find_provider_by_id(id); - if (*out_provider == NULL) - { + if (*out_provider == NULL) { return ENOENT; } @@ -403,16 +394,15 @@ static void nstat_init_ifnet_provider(void); __private_extern__ void nstat_init(void) { - if (nstat_malloc_tag != NULL) return; + if (nstat_malloc_tag != NULL) { + return; + } OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT); - if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag)) - { + if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag)) { OSMalloc_Tagfree(tag); tag = nstat_malloc_tag; - } - else - { + } else { // we need to initialize other things, we do it here as this code path will only be hit once; nstat_init_route_provider(); nstat_init_tcp_provider(); @@ -424,25 +414,26 @@ nstat_init(void) #pragma mark -- Aligned Buffer Allocation -- -struct align_header -{ - u_int32_t offset; - u_int32_t length; +struct align_header { + u_int32_t offset; + u_int32_t length; }; static void* nstat_malloc_aligned( - u_int32_t length, - u_int8_t alignment, - OSMallocTag tag) + u_int32_t length, + u_int8_t alignment, + OSMallocTag tag) { - struct align_header *hdr = NULL; - u_int32_t size = length + sizeof(*hdr) + alignment - 1; + struct align_header *hdr = NULL; + u_int32_t size = length + sizeof(*hdr) + alignment - 1; - u_int8_t *buffer = OSMalloc(size, tag); - if (buffer == NULL) return NULL; + u_int8_t *buffer = OSMalloc(size, tag); + if (buffer == NULL) { + return NULL; + } - u_int8_t *aligned = buffer + sizeof(*hdr); + u_int8_t *aligned = buffer + sizeof(*hdr); aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment); hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr)); @@ -454,8 +445,8 @@ nstat_malloc_aligned( static void nstat_free_aligned( - void *buffer, - OSMallocTag tag) + void *buffer, + OSMallocTag tag) { struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr)); OSFree(((char*)buffer) - hdr->offset, hdr->length, tag); @@ -463,88 +454,90 @@ nstat_free_aligned( #pragma mark -- Route Provider -- -static nstat_provider nstat_route_provider; +static nstat_provider nstat_route_provider; static errno_t nstat_route_lookup( - const void *data, - u_int32_t length, - nstat_provider_cookie_t *out_cookie) + const void *data, + u_int32_t length, + nstat_provider_cookie_t *out_cookie) { // rt_lookup doesn't take const params but it doesn't modify the parameters for // the lookup. So...we use a union to eliminate the warning. - union - { + union{ struct sockaddr *sa; const struct sockaddr *const_sa; } dst, mask; - const nstat_route_add_param *param = (const nstat_route_add_param*)data; + const nstat_route_add_param *param = (const nstat_route_add_param*)data; *out_cookie = NULL; - if (length < sizeof(*param)) - { + if (length < sizeof(*param)) { return EINVAL; } if (param->dst.v4.sin_family == 0 || - param->dst.v4.sin_family > AF_MAX || - (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) - { + param->dst.v4.sin_family > AF_MAX || + (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) { return EINVAL; } if (param->dst.v4.sin_len > sizeof(param->dst) || - (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) - { + (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) { return EINVAL; } if ((param->dst.v4.sin_family == AF_INET && param->dst.v4.sin_len < sizeof(struct sockaddr_in)) || (param->dst.v6.sin6_family == AF_INET6 && - param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) - { + param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) { return EINVAL; } dst.const_sa = (const struct sockaddr*)¶m->dst; mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)¶m->mask : NULL; - struct radix_node_head *rnh = rt_tables[dst.sa->sa_family]; - if (rnh == NULL) return EAFNOSUPPORT; + struct radix_node_head *rnh = rt_tables[dst.sa->sa_family]; + if (rnh == NULL) { + return EAFNOSUPPORT; + } lck_mtx_lock(rnh_lock); struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex); lck_mtx_unlock(rnh_lock); - if (rt) *out_cookie = (nstat_provider_cookie_t)rt; + if (rt) { + *out_cookie = (nstat_provider_cookie_t)rt; + } return rt ? 0 : ENOENT; } static int nstat_route_gone( - nstat_provider_cookie_t cookie) + nstat_provider_cookie_t cookie) { - struct rtentry *rt = (struct rtentry*)cookie; + struct rtentry *rt = (struct rtentry*)cookie; return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0; } static errno_t nstat_route_counts( - nstat_provider_cookie_t cookie, - struct nstat_counts *out_counts, - int *out_gone) + nstat_provider_cookie_t cookie, + struct nstat_counts *out_counts, + int *out_gone) { - struct rtentry *rt = (struct rtentry*)cookie; - struct nstat_counts *rt_stats = rt->rt_stats; + struct rtentry *rt = (struct rtentry*)cookie; + struct nstat_counts *rt_stats = rt->rt_stats; - if (out_gone) *out_gone = 0; + if (out_gone) { + *out_gone = 0; + } - if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1; + if (out_gone && (rt->rt_flags & RTF_UP) == 0) { + *out_gone = 1; + } - if (rt_stats) - { + if (rt_stats) { atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets); atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes); atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets); @@ -558,9 +551,7 @@ nstat_route_counts( out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt; out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt; out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0; - } - else - { + } else { bzero(out_counts, sizeof(*out_counts)); } @@ -575,22 +566,21 @@ nstat_route_release( rtfree((struct rtentry*)cookie); } -static u_int32_t nstat_route_watchers = 0; +static u_int32_t nstat_route_watchers = 0; static int nstat_route_walktree_add( - struct radix_node *rn, - void *context) + struct radix_node *rn, + void *context) { - errno_t result = 0; + errno_t result = 0; struct rtentry *rt = (struct rtentry *)rn; - nstat_control_state *state = (nstat_control_state*)context; + nstat_control_state *state = (nstat_control_state*)context; LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); /* RTF_UP can't change while rnh_lock is held */ - if ((rt->rt_flags & RTF_UP) != 0) - { + if ((rt->rt_flags & RTF_UP) != 0) { /* Clear RTPRF_OURS if the route is still usable */ RT_LOCK(rt); if (rt_validate(rt)) { @@ -602,12 +592,14 @@ nstat_route_walktree_add( } /* Otherwise if RTF_CONDEMNED, treat it as if it were down */ - if (rt == NULL) - return (0); + if (rt == NULL) { + return 0; + } result = nstat_control_source_add(0, state, &nstat_route_provider, rt); - if (result != 0) + if (result != 0) { rtfree_locked(rt); + } } return result; @@ -615,7 +607,7 @@ nstat_route_walktree_add( static errno_t nstat_route_add_watcher( - nstat_control_state *state, + nstat_control_state *state, nstat_msg_add_all_srcs *req) { int i; @@ -624,19 +616,18 @@ nstat_route_add_watcher( lck_mtx_lock(rnh_lock); result = nstat_set_provider_filter(state, req); - if (result == 0) - { + if (result == 0) { OSIncrementAtomic(&nstat_route_watchers); - for (i = 1; i < AF_MAX; i++) - { + for (i = 1; i < AF_MAX; i++) { struct radix_node_head *rnh; rnh = rt_tables[i]; - if (!rnh) continue; + if (!rnh) { + continue; + } result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state); - if (result != 0) - { + if (result != 0) { // This is probably resource exhaustion. // There currently isn't a good way to recover from this. // Least bad seems to be to give up on the add-all but leave @@ -652,26 +643,25 @@ nstat_route_add_watcher( __private_extern__ void nstat_route_new_entry( - struct rtentry *rt) + struct rtentry *rt) { - if (nstat_route_watchers == 0) + if (nstat_route_watchers == 0) { return; + } lck_mtx_lock(&nstat_mtx); - if ((rt->rt_flags & RTF_UP) != 0) - { - nstat_control_state *state; - for (state = nstat_controls; state; state = state->ncs_next) - { - if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) - { + if ((rt->rt_flags & RTF_UP) != 0) { + nstat_control_state *state; + for (state = nstat_controls; state; state = state->ncs_next) { + if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) { // this client is watching routes // acquire a reference for the route RT_ADDREF(rt); // add the source, if that fails, release the reference - if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) + if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0) { RT_REMREF(rt); + } } } } @@ -680,45 +670,48 @@ nstat_route_new_entry( static void nstat_route_remove_watcher( - __unused nstat_control_state *state) + __unused nstat_control_state *state) { OSDecrementAtomic(&nstat_route_watchers); } static errno_t nstat_route_copy_descriptor( - nstat_provider_cookie_t cookie, - void *data, - u_int32_t len) + nstat_provider_cookie_t cookie, + void *data, + u_int32_t len) { - nstat_route_descriptor *desc = (nstat_route_descriptor*)data; - if (len < sizeof(*desc)) - { + nstat_route_descriptor *desc = (nstat_route_descriptor*)data; + if (len < sizeof(*desc)) { return EINVAL; } bzero(desc, sizeof(*desc)); - struct rtentry *rt = (struct rtentry*)cookie; + struct rtentry *rt = (struct rtentry*)cookie; desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt); desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent); desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute); // key/dest - struct sockaddr *sa; - if ((sa = rt_key(rt))) + struct sockaddr *sa; + if ((sa = rt_key(rt))) { nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst)); + } // mask - if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) + if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) { memcpy(&desc->mask, sa, sa->sa_len); + } // gateway - if ((sa = rt->rt_gateway)) + if ((sa = rt->rt_gateway)) { nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway)); + } - if (rt->rt_ifp) + if (rt->rt_ifp) { desc->ifindex = rt->rt_ifp->if_index; + } desc->flags = rt->rt_flags; @@ -730,17 +723,14 @@ nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_fil { bool retval = true; - if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) - { - struct rtentry *rt = (struct rtentry*)cookie; + if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { + struct rtentry *rt = (struct rtentry*)cookie; struct ifnet *ifp = rt->rt_ifp; - if (ifp) - { + if (ifp) { uint16_t interface_properties = nstat_ifnet_to_flags(ifp); - if ((filter->npf_flags & interface_properties) == 0) - { + if ((filter->npf_flags & interface_properties) == 0) { retval = false; } } @@ -770,20 +760,25 @@ nstat_init_route_provider(void) __private_extern__ struct nstat_counts* nstat_route_attach( - struct rtentry *rte) + struct rtentry *rte) { struct nstat_counts *result = rte->rt_stats; - if (result) return result; + if (result) { + return result; + } - if (nstat_malloc_tag == NULL) nstat_init(); + if (nstat_malloc_tag == NULL) { + nstat_init(); + } result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag); - if (!result) return result; + if (!result) { + return result; + } bzero(result, sizeof(*result)); - if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) - { + if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) { nstat_free_aligned(result, nstat_malloc_tag); result = rte->rt_stats; } @@ -793,10 +788,9 @@ nstat_route_attach( __private_extern__ void nstat_route_detach( - struct rtentry *rte) + struct rtentry *rte) { - if (rte->rt_stats) - { + if (rte->rt_stats) { nstat_free_aligned(rte->rt_stats, nstat_malloc_tag); rte->rt_stats = NULL; } @@ -804,13 +798,11 @@ nstat_route_detach( __private_extern__ void nstat_route_connect_attempt( - struct rtentry *rte) + struct rtentry *rte) { - while (rte) - { - struct nstat_counts* stats = nstat_route_attach(rte); - if (stats) - { + while (rte) { + struct nstat_counts* stats = nstat_route_attach(rte); + if (stats) { OSIncrementAtomic(&stats->nstat_connectattempts); } @@ -820,14 +812,12 @@ nstat_route_connect_attempt( __private_extern__ void nstat_route_connect_success( - struct rtentry *rte) + struct rtentry *rte) { // This route - while (rte) - { - struct nstat_counts* stats = nstat_route_attach(rte); - if (stats) - { + while (rte) { + struct nstat_counts* stats = nstat_route_attach(rte); + if (stats) { OSIncrementAtomic(&stats->nstat_connectsuccesses); } @@ -837,22 +827,17 @@ nstat_route_connect_success( __private_extern__ void nstat_route_tx( - struct rtentry *rte, - u_int32_t packets, - u_int32_t bytes, - u_int32_t flags) + struct rtentry *rte, + u_int32_t packets, + u_int32_t bytes, + u_int32_t flags) { - while (rte) - { - struct nstat_counts* stats = nstat_route_attach(rte); - if (stats) - { - if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) - { + while (rte) { + struct nstat_counts* stats = nstat_route_attach(rte); + if (stats) { + if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) { OSAddAtomic(bytes, &stats->nstat_txretransmit); - } - else - { + } else { OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets); OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes); } @@ -864,27 +849,24 @@ nstat_route_tx( __private_extern__ void nstat_route_rx( - struct rtentry *rte, - u_int32_t packets, - u_int32_t bytes, - u_int32_t flags) + struct rtentry *rte, + u_int32_t packets, + u_int32_t bytes, + u_int32_t flags) { - while (rte) - { - struct nstat_counts* stats = nstat_route_attach(rte); - if (stats) - { - if (flags == 0) - { + while (rte) { + struct nstat_counts* stats = nstat_route_attach(rte); + if (stats) { + if (flags == 0) { OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets); OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes); - } - else - { - if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) + } else { + if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) { OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes); - if (flags & NSTAT_RX_FLAG_DUPLICATE) + } + if (flags & NSTAT_RX_FLAG_DUPLICATE) { OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes); + } } } @@ -893,48 +875,46 @@ nstat_route_rx( } /* atomically average current value at _val_addr with _new_val and store */ -#define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \ - volatile uint32_t _old_val; \ - volatile uint32_t _avg; \ - do { \ - _old_val = *_val_addr; \ - if (_old_val == 0) \ - { \ - _avg = _new_val; \ - } \ - else \ - { \ - _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \ - } \ - if (_old_val == _avg) break; \ - } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \ +#define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \ + volatile uint32_t _old_val; \ + volatile uint32_t _avg; \ + do { \ + _old_val = *_val_addr; \ + if (_old_val == 0) \ + { \ + _avg = _new_val; \ + } \ + else \ + { \ + _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \ + } \ + if (_old_val == _avg) break; \ + } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \ } while (0); /* atomically compute minimum of current value at _val_addr with _new_val and store */ -#define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \ - volatile uint32_t _old_val; \ - do { \ - _old_val = *_val_addr; \ - if (_old_val != 0 && _old_val < _new_val) \ - { \ - break; \ - } \ - } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \ +#define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \ + volatile uint32_t _old_val; \ + do { \ + _old_val = *_val_addr; \ + if (_old_val != 0 && _old_val < _new_val) \ + { \ + break; \ + } \ + } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \ } while (0); __private_extern__ void nstat_route_rtt( - struct rtentry *rte, - u_int32_t rtt, - u_int32_t rtt_var) + struct rtentry *rte, + u_int32_t rtt, + u_int32_t rtt_var) { const uint32_t decay = 3; - while (rte) - { - struct nstat_counts* stats = nstat_route_attach(rte); - if (stats) - { + while (rte) { + struct nstat_counts* stats = nstat_route_attach(rte); + if (stats) { NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay); NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt); NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay); @@ -945,26 +925,24 @@ nstat_route_rtt( __private_extern__ void nstat_route_update( - struct rtentry *rte, - uint32_t connect_attempts, - uint32_t connect_successes, - uint32_t rx_packets, - uint32_t rx_bytes, - uint32_t rx_duplicatebytes, - uint32_t rx_outoforderbytes, - uint32_t tx_packets, - uint32_t tx_bytes, - uint32_t tx_retransmit, - uint32_t rtt, - uint32_t rtt_var) + struct rtentry *rte, + uint32_t connect_attempts, + uint32_t connect_successes, + uint32_t rx_packets, + uint32_t rx_bytes, + uint32_t rx_duplicatebytes, + uint32_t rx_outoforderbytes, + uint32_t tx_packets, + uint32_t tx_bytes, + uint32_t tx_retransmit, + uint32_t rtt, + uint32_t rtt_var) { const uint32_t decay = 3; - while (rte) - { - struct nstat_counts* stats = nstat_route_attach(rte); - if (stats) - { + while (rte) { + struct nstat_counts* stats = nstat_route_attach(rte); + if (stats) { OSAddAtomic(connect_attempts, &stats->nstat_connectattempts); OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses); OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets); @@ -999,38 +977,37 @@ nstat_route_update( * we need to keep track of the last call to connect() in ntstat. */ struct nstat_tucookie { - struct inpcb *inp; - char pname[MAXCOMLEN+1]; - bool cached; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + struct inpcb *inp; + char pname[MAXCOMLEN + 1]; + bool cached; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } local; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } remote; - unsigned int if_index; - uint16_t ifnet_properties; + unsigned int if_index; + uint16_t ifnet_properties; }; static struct nstat_tucookie * nstat_tucookie_alloc_internal( - struct inpcb *inp, - bool ref, - bool locked) + struct inpcb *inp, + bool ref, + bool locked) { struct nstat_tucookie *cookie; cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag); - if (cookie == NULL) + if (cookie == NULL) { return NULL; - if (!locked) + } + if (!locked) { LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED); - if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) - { + } + if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) { OSFree(cookie, sizeof(*cookie), nstat_malloc_tag); return NULL; } @@ -1042,153 +1019,151 @@ nstat_tucookie_alloc_internal( * We only increment the reference count for UDP sockets because we * only cache UDP socket tuples. */ - if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) + if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) { OSIncrementAtomic(&inp->inp_nstat_refcnt); + } return cookie; } static struct nstat_tucookie * nstat_tucookie_alloc( - struct inpcb *inp) + struct inpcb *inp) { return nstat_tucookie_alloc_internal(inp, false, false); } static struct nstat_tucookie * nstat_tucookie_alloc_ref( - struct inpcb *inp) + struct inpcb *inp) { return nstat_tucookie_alloc_internal(inp, true, false); } static struct nstat_tucookie * nstat_tucookie_alloc_ref_locked( - struct inpcb *inp) + struct inpcb *inp) { return nstat_tucookie_alloc_internal(inp, true, true); } static void nstat_tucookie_release_internal( - struct nstat_tucookie *cookie, - int inplock) + struct nstat_tucookie *cookie, + int inplock) { - if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) + if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) { OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt); + } in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock); OSFree(cookie, sizeof(*cookie), nstat_malloc_tag); } static void nstat_tucookie_release( - struct nstat_tucookie *cookie) + struct nstat_tucookie *cookie) { nstat_tucookie_release_internal(cookie, false); } static void nstat_tucookie_release_locked( - struct nstat_tucookie *cookie) + struct nstat_tucookie *cookie) { nstat_tucookie_release_internal(cookie, true); } -static nstat_provider nstat_tcp_provider; +static nstat_provider nstat_tcp_provider; static errno_t nstat_tcpudp_lookup( - struct inpcbinfo *inpinfo, - const void *data, - u_int32_t length, - nstat_provider_cookie_t *out_cookie) + struct inpcbinfo *inpinfo, + const void *data, + u_int32_t length, + nstat_provider_cookie_t *out_cookie) { struct inpcb *inp = NULL; // parameter validation - const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data; - if (length < sizeof(*param)) - { + const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data; + if (length < sizeof(*param)) { return EINVAL; } // src and dst must match if (param->remote.v4.sin_family != 0 && - param->remote.v4.sin_family != param->local.v4.sin_family) - { + param->remote.v4.sin_family != param->local.v4.sin_family) { return EINVAL; } - switch (param->local.v4.sin_family) + switch (param->local.v4.sin_family) { + case AF_INET: { - case AF_INET: - { - if (param->local.v4.sin_len != sizeof(param->local.v4) || - (param->remote.v4.sin_family != 0 && - param->remote.v4.sin_len != sizeof(param->remote.v4))) - { - return EINVAL; - } - - inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port, - param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL); + if (param->local.v4.sin_len != sizeof(param->local.v4) || + (param->remote.v4.sin_family != 0 && + param->remote.v4.sin_len != sizeof(param->remote.v4))) { + return EINVAL; } - break; + + inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port, + param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL); + } + break; #if INET6 - case AF_INET6: - { - union - { - const struct in6_addr *in6c; - struct in6_addr *in6; - } local, remote; + case AF_INET6: + { + union{ + const struct in6_addr *in6c; + struct in6_addr *in6; + } local, remote; - if (param->local.v6.sin6_len != sizeof(param->local.v6) || - (param->remote.v6.sin6_family != 0 && - param->remote.v6.sin6_len != sizeof(param->remote.v6))) - { - return EINVAL; - } + if (param->local.v6.sin6_len != sizeof(param->local.v6) || + (param->remote.v6.sin6_family != 0 && + param->remote.v6.sin6_len != sizeof(param->remote.v6))) { + return EINVAL; + } - local.in6c = ¶m->local.v6.sin6_addr; - remote.in6c = ¶m->remote.v6.sin6_addr; + local.in6c = ¶m->local.v6.sin6_addr; + remote.in6c = ¶m->remote.v6.sin6_addr; - inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port, - local.in6, param->local.v6.sin6_port, 1, NULL); - } - break; + inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port, + local.in6, param->local.v6.sin6_port, 1, NULL); + } + break; #endif - default: - return EINVAL; + default: + return EINVAL; } - if (inp == NULL) + if (inp == NULL) { return ENOENT; + } // At this point we have a ref to the inpcb *out_cookie = nstat_tucookie_alloc(inp); - if (*out_cookie == NULL) + if (*out_cookie == NULL) { in_pcb_checkstate(inp, WNT_RELEASE, 0); + } return 0; } static errno_t nstat_tcp_lookup( - const void *data, - u_int32_t length, - nstat_provider_cookie_t *out_cookie) + const void *data, + u_int32_t length, + nstat_provider_cookie_t *out_cookie) { return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie); } static int nstat_tcp_gone( - nstat_provider_cookie_t cookie) + nstat_provider_cookie_t cookie) { struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; @@ -1196,15 +1171,15 @@ nstat_tcp_gone( struct tcpcb *tp; return (!(inp = tucookie->inp) || - !(tp = intotcpcb(inp)) || - inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0; + !(tp = intotcpcb(inp)) || + inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0; } static errno_t nstat_tcp_counts( - nstat_provider_cookie_t cookie, - struct nstat_counts *out_counts, - int *out_gone) + nstat_provider_cookie_t cookie, + struct nstat_counts *out_counts, + int *out_gone) { struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; @@ -1212,14 +1187,18 @@ nstat_tcp_counts( bzero(out_counts, sizeof(*out_counts)); - if (out_gone) *out_gone = 0; + if (out_gone) { + *out_gone = 0; + } // if the pcb is in the dead state, we should stop using it - if (nstat_tcp_gone(cookie)) - { - if (out_gone) *out_gone = 1; - if (!(inp = tucookie->inp) || !intotcpcb(inp)) + if (nstat_tcp_gone(cookie)) { + if (out_gone) { + *out_gone = 1; + } + if (!(inp = tucookie->inp) || !intotcpcb(inp)) { return EINVAL; + } } inp = tucookie->inp; struct tcpcb *tp = intotcpcb(inp); @@ -1236,8 +1215,9 @@ nstat_tcp_counts( out_counts->nstat_avg_rtt = tp->t_srtt; out_counts->nstat_min_rtt = tp->t_rttbest; out_counts->nstat_var_rtt = tp->t_rttvar; - if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) + if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) { out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt; + } atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes); atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes); atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes); @@ -1250,7 +1230,7 @@ nstat_tcp_counts( static void nstat_tcp_release( - nstat_provider_cookie_t cookie, + nstat_provider_cookie_t cookie, int locked) { struct nstat_tucookie *tucookie = @@ -1261,7 +1241,7 @@ nstat_tcp_release( static errno_t nstat_tcp_add_watcher( - nstat_control_state *state, + nstat_control_state *state, nstat_msg_add_all_srcs *req) { // There is a tricky issue around getting all TCP sockets added once @@ -1287,11 +1267,11 @@ nstat_tcp_add_watcher( LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { cookie = nstat_tucookie_alloc_ref(inp); - if (cookie == NULL) + if (cookie == NULL) { continue; + } if (nstat_control_source_add(0, state, &nstat_tcp_provider, - cookie) != 0) - { + cookie) != 0) { nstat_tucookie_release(cookie); break; } @@ -1305,38 +1285,37 @@ nstat_tcp_add_watcher( static void nstat_tcp_remove_watcher( - __unused nstat_control_state *state) + __unused nstat_control_state *state) { OSDecrementAtomic(&nstat_tcp_watchers); } __private_extern__ void nstat_tcp_new_pcb( - struct inpcb *inp) + struct inpcb *inp) { struct nstat_tucookie *cookie; inp->inp_start_timestamp = mach_continuous_time(); - if (nstat_tcp_watchers == 0) + if (nstat_tcp_watchers == 0) { return; + } socket_lock(inp->inp_socket, 0); lck_mtx_lock(&nstat_mtx); - nstat_control_state *state; - for (state = nstat_controls; state; state = state->ncs_next) - { - if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) - { + nstat_control_state *state; + for (state = nstat_controls; state; state = state->ncs_next) { + if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) { // this client is watching tcp // acquire a reference for it cookie = nstat_tucookie_alloc_ref_locked(inp); - if (cookie == NULL) + if (cookie == NULL) { continue; + } // add the source, if that fails, release the reference if (nstat_control_source_add(0, state, - &nstat_tcp_provider, cookie) != 0) - { + &nstat_tcp_provider, cookie) != 0) { nstat_tucookie_release_locked(cookie); break; } @@ -1355,27 +1334,26 @@ nstat_pcb_detach(struct inpcb *inp) struct nstat_tucookie *tucookie; errno_t result; - if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) + if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) { return; + } TAILQ_INIT(&dead_list); lck_mtx_lock(&nstat_mtx); - for (state = nstat_controls; state; state = state->ncs_next) - { + for (state = nstat_controls; state; state = state->ncs_next) { lck_mtx_lock(&state->ncs_mtx); TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { nstat_provider_id_t provider_id = src->provider->nstat_provider_id; - if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) - { + if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) { tucookie = (struct nstat_tucookie *)src->cookie; - if (tucookie->inp == inp) + if (tucookie->inp == inp) { break; + } } } - if (src) - { + if (src) { result = nstat_control_send_goodbye(state, src); TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); @@ -1385,8 +1363,7 @@ nstat_pcb_detach(struct inpcb *inp) } lck_mtx_unlock(&nstat_mtx); - while ((src = TAILQ_FIRST(&dead_list))) - { + while ((src = TAILQ_FIRST(&dead_list))) { TAILQ_REMOVE(&dead_list, src, ns_control_link); nstat_control_cleanup_source(NULL, src, TRUE); } @@ -1400,8 +1377,9 @@ nstat_pcb_cache(struct inpcb *inp) struct nstat_tucookie *tucookie; if (inp == NULL || nstat_udp_watchers == 0 || - inp->inp_nstat_refcnt == 0) + inp->inp_nstat_refcnt == 0) { return; + } VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP); lck_mtx_lock(&nstat_mtx); for (state = nstat_controls; state; state = state->ncs_next) { @@ -1409,10 +1387,8 @@ nstat_pcb_cache(struct inpcb *inp) TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { tucookie = (struct nstat_tucookie *)src->cookie; - if (tucookie->inp == inp) - { - if (inp->inp_vflag & INP_IPV6) - { + if (tucookie->inp == inp) { + if (inp->inp_vflag & INP_IPV6) { in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, &tucookie->local.v6, @@ -1421,9 +1397,7 @@ nstat_pcb_cache(struct inpcb *inp) inp->inp_fport, &tucookie->remote.v6, sizeof(tucookie->remote)); - } - else if (inp->inp_vflag & INP_IPV4) - { + } else if (inp->inp_vflag & INP_IPV4) { nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport, &tucookie->local.v4, @@ -1433,9 +1407,10 @@ nstat_pcb_cache(struct inpcb *inp) &tucookie->remote.v4, sizeof(tucookie->remote)); } - if (inp->inp_last_outifp) + if (inp->inp_last_outifp) { tucookie->if_index = inp->inp_last_outifp->if_index; + } tucookie->ifnet_properties = nstat_inpcb_to_flags(inp); tucookie->cached = true; @@ -1455,8 +1430,9 @@ nstat_pcb_invalidate_cache(struct inpcb *inp) struct nstat_tucookie *tucookie; if (inp == NULL || nstat_udp_watchers == 0 || - inp->inp_nstat_refcnt == 0) + inp->inp_nstat_refcnt == 0) { return; + } VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP); lck_mtx_lock(&nstat_mtx); for (state = nstat_controls; state; state = state->ncs_next) { @@ -1464,8 +1440,7 @@ nstat_pcb_invalidate_cache(struct inpcb *inp) TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { tucookie = (struct nstat_tucookie *)src->cookie; - if (tucookie->inp == inp) - { + if (tucookie->inp == inp) { tucookie->cached = false; break; } @@ -1477,38 +1452,35 @@ nstat_pcb_invalidate_cache(struct inpcb *inp) static errno_t nstat_tcp_copy_descriptor( - nstat_provider_cookie_t cookie, - void *data, - u_int32_t len) + nstat_provider_cookie_t cookie, + void *data, + u_int32_t len) { - if (len < sizeof(nstat_tcp_descriptor)) - { + if (len < sizeof(nstat_tcp_descriptor)) { return EINVAL; } - if (nstat_tcp_gone(cookie)) + if (nstat_tcp_gone(cookie)) { return EINVAL; + } - nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data; + nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data; struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; - struct inpcb *inp = tucookie->inp; - struct tcpcb *tp = intotcpcb(inp); + struct inpcb *inp = tucookie->inp; + struct tcpcb *tp = intotcpcb(inp); bzero(desc, sizeof(*desc)); - if (inp->inp_vflag & INP_IPV6) - { + if (inp->inp_vflag & INP_IPV6) { in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, - &desc->local.v6, sizeof(desc->local)); + &desc->local.v6, sizeof(desc->local)); in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, - &desc->remote.v6, sizeof(desc->remote)); - } - else if (inp->inp_vflag & INP_IPV4) - { + &desc->remote.v6, sizeof(desc->remote)); + } else if (inp->inp_vflag & INP_IPV4) { nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport, - &desc->local.v4, sizeof(desc->local)); + &desc->local.v4, sizeof(desc->local)); nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport, - &desc->remote.v4, sizeof(desc->remote)); + &desc->remote.v4, sizeof(desc->remote)); } desc->state = intotcpcb(inp)->t_state; @@ -1526,25 +1498,23 @@ nstat_tcp_copy_descriptor( } struct socket *so = inp->inp_socket; - if (so) - { + if (so) { // TBD - take the socket lock around these to make sure // they're in sync? desc->upid = so->last_upid; desc->pid = so->last_pid; desc->traffic_class = so->so_traffic_class; - if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) + if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) { desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND; - if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) + } + if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) { desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG; + } proc_name(desc->pid, desc->pname, sizeof(desc->pname)); - if (desc->pname[0] == 0) - { + if (desc->pname[0] == 0) { strlcpy(desc->pname, tucookie->pname, sizeof(desc->pname)); - } - else - { + } else { desc->pname[sizeof(desc->pname) - 1] = 0; strlcpy(tucookie->pname, desc->pname, sizeof(tucookie->pname)); @@ -1579,77 +1549,58 @@ nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_fi { bool retval = true; - if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0) - { + if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) { struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; struct inpcb *inp = tucookie->inp; /* Only apply interface filter if at least one is allowed. */ - if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) - { + if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { uint16_t interface_properties = nstat_inpcb_to_flags(inp); - if ((filter->npf_flags & interface_properties) == 0) - { + if ((filter->npf_flags & interface_properties) == 0) { // For UDP, we could have an undefined interface and yet transfers may have occurred. // We allow reporting if there have been transfers of the requested kind. // This is imperfect as we cannot account for the expensive attribute over wifi. // We also assume that cellular is expensive and we have no way to select for AWDL - if (is_UDP) - { - do - { - if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR|NSTAT_FILTER_ACCEPT_EXPENSIVE)) && - (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) - { + if (is_UDP) { + do{ + if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) && + (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) { break; } if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) && - (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) - { + (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) { break; } if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) && - (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) - { + (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) { break; } return false; } while (0); - } - else - { + } else { return false; } } } - if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) - { + if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) { struct socket *so = inp->inp_socket; retval = false; - if (so) - { + if (so) { if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) && - (filter->npf_pid == so->last_pid)) - { + (filter->npf_pid == so->last_pid)) { retval = true; - } - else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) && - (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) - { + } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) && + (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) { retval = true; - } - else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) && - (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) - { + } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) && + (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0)) { retval = true; - } - else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) && - (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid, - sizeof(so->last_uuid)) == 0)) - { + } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) && + (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid, + sizeof(so->last_uuid)) == 0)) { retval = true; } } @@ -1684,46 +1635,50 @@ nstat_init_tcp_provider(void) #pragma mark -- UDP Provider -- -static nstat_provider nstat_udp_provider; +static nstat_provider nstat_udp_provider; static errno_t nstat_udp_lookup( - const void *data, - u_int32_t length, - nstat_provider_cookie_t *out_cookie) + const void *data, + u_int32_t length, + nstat_provider_cookie_t *out_cookie) { return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie); } static int nstat_udp_gone( - nstat_provider_cookie_t cookie) + nstat_provider_cookie_t cookie) { struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; struct inpcb *inp; return (!(inp = tucookie->inp) || - inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0; + inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0; } static errno_t nstat_udp_counts( - nstat_provider_cookie_t cookie, - struct nstat_counts *out_counts, - int *out_gone) + nstat_provider_cookie_t cookie, + struct nstat_counts *out_counts, + int *out_gone) { struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; - if (out_gone) *out_gone = 0; + if (out_gone) { + *out_gone = 0; + } // if the pcb is in the dead state, we should stop using it - if (nstat_udp_gone(cookie)) - { - if (out_gone) *out_gone = 1; - if (!tucookie->inp) + if (nstat_udp_gone(cookie)) { + if (out_gone) { + *out_gone = 1; + } + if (!tucookie->inp) { return EINVAL; + } } struct inpcb *inp = tucookie->inp; @@ -1743,7 +1698,7 @@ nstat_udp_counts( static void nstat_udp_release( - nstat_provider_cookie_t cookie, + nstat_provider_cookie_t cookie, int locked) { struct nstat_tucookie *tucookie = @@ -1754,7 +1709,7 @@ nstat_udp_release( static errno_t nstat_udp_add_watcher( - nstat_control_state *state, + nstat_control_state *state, nstat_msg_add_all_srcs *req) { // There is a tricky issue around getting all UDP sockets added once @@ -1782,11 +1737,11 @@ nstat_udp_add_watcher( LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) { cookie = nstat_tucookie_alloc_ref(inp); - if (cookie == NULL) + if (cookie == NULL) { continue; + } if (nstat_control_source_add(0, state, &nstat_udp_provider, - cookie) != 0) - { + cookie) != 0) { nstat_tucookie_release(cookie); break; } @@ -1800,38 +1755,37 @@ nstat_udp_add_watcher( static void nstat_udp_remove_watcher( - __unused nstat_control_state *state) + __unused nstat_control_state *state) { OSDecrementAtomic(&nstat_udp_watchers); } __private_extern__ void nstat_udp_new_pcb( - struct inpcb *inp) + struct inpcb *inp) { struct nstat_tucookie *cookie; inp->inp_start_timestamp = mach_continuous_time(); - if (nstat_udp_watchers == 0) + if (nstat_udp_watchers == 0) { return; + } socket_lock(inp->inp_socket, 0); lck_mtx_lock(&nstat_mtx); - nstat_control_state *state; - for (state = nstat_controls; state; state = state->ncs_next) - { - if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) - { + nstat_control_state *state; + for (state = nstat_controls; state; state = state->ncs_next) { + if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) { // this client is watching tcp // acquire a reference for it cookie = nstat_tucookie_alloc_ref_locked(inp); - if (cookie == NULL) + if (cookie == NULL) { continue; + } // add the source, if that fails, release the reference if (nstat_control_source_add(0, state, - &nstat_udp_provider, cookie) != 0) - { + &nstat_udp_provider, cookie) != 0) { nstat_tucookie_release_locked(cookie); break; } @@ -1843,53 +1797,45 @@ nstat_udp_new_pcb( static errno_t nstat_udp_copy_descriptor( - nstat_provider_cookie_t cookie, - void *data, - u_int32_t len) + nstat_provider_cookie_t cookie, + void *data, + u_int32_t len) { - if (len < sizeof(nstat_udp_descriptor)) - { + if (len < sizeof(nstat_udp_descriptor)) { return EINVAL; } - if (nstat_udp_gone(cookie)) + if (nstat_udp_gone(cookie)) { return EINVAL; + } - struct nstat_tucookie *tucookie = + struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; - nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data; - struct inpcb *inp = tucookie->inp; + nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data; + struct inpcb *inp = tucookie->inp; bzero(desc, sizeof(*desc)); if (tucookie->cached == false) { - if (inp->inp_vflag & INP_IPV6) - { + if (inp->inp_vflag & INP_IPV6) { in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport, - &desc->local.v6, sizeof(desc->local.v6)); + &desc->local.v6, sizeof(desc->local.v6)); in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport, - &desc->remote.v6, sizeof(desc->remote.v6)); - } - else if (inp->inp_vflag & INP_IPV4) - { + &desc->remote.v6, sizeof(desc->remote.v6)); + } else if (inp->inp_vflag & INP_IPV4) { nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport, - &desc->local.v4, sizeof(desc->local.v4)); + &desc->local.v4, sizeof(desc->local.v4)); nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport, - &desc->remote.v4, sizeof(desc->remote.v4)); + &desc->remote.v4, sizeof(desc->remote.v4)); } desc->ifnet_properties = nstat_inpcb_to_flags(inp); - } - else - { - if (inp->inp_vflag & INP_IPV6) - { + } else { + if (inp->inp_vflag & INP_IPV6) { memcpy(&desc->local.v6, &tucookie->local.v6, sizeof(desc->local.v6)); memcpy(&desc->remote.v6, &tucookie->remote.v6, sizeof(desc->remote.v6)); - } - else if (inp->inp_vflag & INP_IPV4) - { + } else if (inp->inp_vflag & INP_IPV4) { memcpy(&desc->local.v4, &tucookie->local.v4, sizeof(desc->local.v4)); memcpy(&desc->remote.v4, &tucookie->remote.v4, @@ -1898,26 +1844,23 @@ nstat_udp_copy_descriptor( desc->ifnet_properties = tucookie->ifnet_properties; } - if (inp->inp_last_outifp) + if (inp->inp_last_outifp) { desc->ifindex = inp->inp_last_outifp->if_index; - else + } else { desc->ifindex = tucookie->if_index; + } struct socket *so = inp->inp_socket; - if (so) - { + if (so) { // TBD - take the socket lock around these to make sure // they're in sync? desc->upid = so->last_upid; desc->pid = so->last_pid; proc_name(desc->pid, desc->pname, sizeof(desc->pname)); - if (desc->pname[0] == 0) - { + if (desc->pname[0] == 0) { strlcpy(desc->pname, tucookie->pname, sizeof(desc->pname)); - } - else - { + } else { desc->pname[sizeof(desc->pname) - 1] = 0; strlcpy(tucookie->pname, desc->pname, sizeof(tucookie->pname)); @@ -1973,23 +1916,22 @@ nstat_init_udp_provider(void) #pragma mark -- ifnet Provider -- -static nstat_provider nstat_ifnet_provider; +static nstat_provider nstat_ifnet_provider; /* * We store a pointer to the ifnet and the original threshold * requested by the client. */ -struct nstat_ifnet_cookie -{ - struct ifnet *ifp; - uint64_t threshold; +struct nstat_ifnet_cookie { + struct ifnet *ifp; + uint64_t threshold; }; static errno_t nstat_ifnet_lookup( - const void *data, - u_int32_t length, - nstat_provider_cookie_t *out_cookie) + const void *data, + u_int32_t length, + nstat_provider_cookie_t *out_cookie) { const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data; struct ifnet *ifp; @@ -1998,31 +1940,32 @@ nstat_ifnet_lookup( nstat_src *src; struct nstat_ifnet_cookie *cookie; - if (length < sizeof(*param) || param->threshold < 1024*1024) + if (length < sizeof(*param) || param->threshold < 1024 * 1024) { return EINVAL; + } if (nstat_privcheck != 0) { errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0); - if (result != 0) + if (result != 0) { return result; + } } cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag); - if (cookie == NULL) + if (cookie == NULL) { return ENOMEM; + } bzero(cookie, sizeof(*cookie)); ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { ifnet_lock_exclusive(ifp); - if (ifp->if_index == param->ifindex) - { + if (ifp->if_index == param->ifindex) { cookie->ifp = ifp; cookie->threshold = param->threshold; *out_cookie = cookie; if (!ifp->if_data_threshold || - ifp->if_data_threshold > param->threshold) - { + ifp->if_data_threshold > param->threshold) { changed = TRUE; ifp->if_data_threshold = param->threshold; } @@ -2040,31 +1983,31 @@ nstat_ifnet_lookup( * We won't send a message to the client we are currently serving * because it has no `ifnet source' yet. */ - if (changed) - { + if (changed) { lck_mtx_lock(&nstat_mtx); - for (state = nstat_controls; state; state = state->ncs_next) - { + for (state = nstat_controls; state; state = state->ncs_next) { lck_mtx_lock(&state->ncs_mtx); TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { - if (src->provider != &nstat_ifnet_provider) + if (src->provider != &nstat_ifnet_provider) { continue; + } nstat_control_send_description(state, src, 0, 0); } lck_mtx_unlock(&state->ncs_mtx); } lck_mtx_unlock(&nstat_mtx); } - if (cookie->ifp == NULL) + if (cookie->ifp == NULL) { OSFree(cookie, sizeof(*cookie), nstat_malloc_tag); + } return ifp ? 0 : EINVAL; } static int nstat_ifnet_gone( - nstat_provider_cookie_t cookie) + nstat_provider_cookie_t cookie) { struct ifnet *ifp; struct nstat_ifnet_cookie *ifcookie = @@ -2073,8 +2016,9 @@ nstat_ifnet_gone( ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (ifp == ifcookie->ifp) + if (ifp == ifcookie->ifp) { break; + } } ifnet_head_done(); @@ -2083,20 +2027,23 @@ nstat_ifnet_gone( static errno_t nstat_ifnet_counts( - nstat_provider_cookie_t cookie, - struct nstat_counts *out_counts, - int *out_gone) + nstat_provider_cookie_t cookie, + struct nstat_counts *out_counts, + int *out_gone) { struct nstat_ifnet_cookie *ifcookie = (struct nstat_ifnet_cookie *)cookie; struct ifnet *ifp = ifcookie->ifp; - if (out_gone) *out_gone = 0; + if (out_gone) { + *out_gone = 0; + } // if the ifnet is gone, we should stop using it - if (nstat_ifnet_gone(cookie)) - { - if (out_gone) *out_gone = 1; + if (nstat_ifnet_gone(cookie)) { + if (out_gone) { + *out_gone = 1; + } return EINVAL; } @@ -2111,8 +2058,8 @@ nstat_ifnet_counts( static void nstat_ifnet_release( - nstat_provider_cookie_t cookie, - __unused int locked) + nstat_provider_cookie_t cookie, + __unused int locked) { struct nstat_ifnet_cookie *ifcookie; struct ifnet *ifp; @@ -2125,18 +2072,19 @@ nstat_ifnet_release( * for this ifnet and re-calculate if_data_threshold. */ lck_mtx_lock(&nstat_mtx); - for (state = nstat_controls; state; state = state->ncs_next) - { + for (state = nstat_controls; state; state = state->ncs_next) { lck_mtx_lock(&state->ncs_mtx); TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { /* Skip the provider we are about to detach. */ if (src->provider != &nstat_ifnet_provider || - src->cookie == cookie) + src->cookie == cookie) { continue; - ifcookie = (struct nstat_ifnet_cookie *)src->cookie; - if (ifcookie->threshold < minthreshold) + } + ifcookie = (struct nstat_ifnet_cookie *)src->cookie; + if (ifcookie->threshold < minthreshold) { minthreshold = ifcookie->threshold; + } } lck_mtx_unlock(&state->ncs_mtx); } @@ -2148,10 +2096,11 @@ nstat_ifnet_release( ifp = ifcookie->ifp; if (ifnet_is_attached(ifp, 1)) { ifnet_lock_exclusive(ifp); - if (minthreshold == UINT64_MAX) + if (minthreshold == UINT64_MAX) { ifp->if_data_threshold = 0; - else + } else { ifp->if_data_threshold = minthreshold; + } ifnet_lock_done(ifp); ifnet_decr_iorefcnt(ifp); } @@ -2161,26 +2110,27 @@ nstat_ifnet_release( static void nstat_ifnet_copy_link_status( - struct ifnet *ifp, - struct nstat_ifnet_descriptor *desc) + struct ifnet *ifp, + struct nstat_ifnet_descriptor *desc) { struct if_link_status *ifsr = ifp->if_link_status; nstat_ifnet_desc_link_status *link_status = &desc->link_status; link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE; - if (ifsr == NULL) + if (ifsr == NULL) { return; + } lck_rw_lock_shared(&ifp->if_link_status_lock); if (ifp->if_type == IFT_CELLULAR) { - nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular; struct if_cellular_status_v1 *if_cell_sr = - &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1; + &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1; - if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) + if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) { goto done; + } link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR; @@ -2210,16 +2160,17 @@ nstat_ifnet_copy_link_status( } if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) { cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID; - if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) + if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) { cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE; - else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) + } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) { cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW; - else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) + } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) { cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM; - else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) + } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) { cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH; - else + } else { cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID; + } } if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) { cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID; @@ -2258,13 +2209,13 @@ nstat_ifnet_copy_link_status( cell_status->mss_recommended = if_cell_sr->mss_recommended; } } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) { - nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi; struct if_wifi_status_v1 *if_wifi_sr = - &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1; + &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1; - if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) + if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) { goto done; + } link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI; @@ -2294,16 +2245,17 @@ nstat_ifnet_copy_link_status( } if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) { wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID; - if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) + if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) { wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE; - else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) + } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) { wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW; - else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) + } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) { wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM; - else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) + } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) { wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH; - else + } else { wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID; + } } if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) { wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID; @@ -2339,12 +2291,13 @@ nstat_ifnet_copy_link_status( } if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) { wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID; - if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) + if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) { wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ; - else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) + } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) { wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ; - else + } else { wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID; + } } if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) { wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID; @@ -2405,8 +2358,9 @@ nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st) { u_int64_t ecn_on_conn, ecn_off_conn; - if (if_st == NULL) + if (if_st == NULL) { return; + } ecn_on_conn = if_st->ecn_client_success + if_st->ecn_server_success; ecn_off_conn = if_st->ecn_off_conn + @@ -2457,8 +2411,9 @@ nstat_ifnet_report_ecn_stats(void) uptime = net_uptime(); if ((int)(uptime - nstat_ifnet_last_report_time) < - tcp_report_stats_interval) + tcp_report_stats_interval) { return; + } last_report_time = nstat_ifnet_last_report_time; nstat_ifnet_last_report_time = uptime; @@ -2467,15 +2422,18 @@ nstat_ifnet_report_ecn_stats(void) ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) + if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) { continue; + } - if (!IF_FULLY_ATTACHED(ifp)) + if (!IF_FULLY_ATTACHED(ifp)) { continue; + } /* Limit reporting to Wifi, Ethernet and cellular. */ - if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) + if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) { continue; + } bzero(st, sizeof(*st)); if (IFNET_IS_CELLULAR(ifp)) { @@ -2488,8 +2446,9 @@ nstat_ifnet_report_ecn_stats(void) data.unsent_data_cnt = ifp->if_unsent_data_cnt; /* skip if there was no update since last report */ if (ifp->if_ipv4_stat->timestamp <= 0 || - ifp->if_ipv4_stat->timestamp < last_report_time) + ifp->if_ipv4_stat->timestamp < last_report_time) { goto v6; + } st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4; /* compute percentages using packet counts */ nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on); @@ -2503,8 +2462,9 @@ nstat_ifnet_report_ecn_stats(void) v6: /* skip if there was no update since last report */ if (ifp->if_ipv6_stat->timestamp <= 0 || - ifp->if_ipv6_stat->timestamp < last_report_time) + ifp->if_ipv6_stat->timestamp < last_report_time) { continue; + } st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6; /* compute percentages using packet counts */ @@ -2519,15 +2479,14 @@ v6: bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat)); } ifnet_head_done(); - } /* Some thresholds to determine Low Iternet mode */ -#define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */ -#define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */ -#define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */ -#define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */ -#define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */ +#define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */ +#define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */ +#define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */ +#define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */ +#define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */ static boolean_t nstat_lim_activity_check(struct if_lim_perf_stat *st) @@ -2535,8 +2494,9 @@ nstat_lim_activity_check(struct if_lim_perf_stat *st) /* check that the current activity is enough to report stats */ if (st->lim_total_txpkts < nstat_lim_min_tx_pkts || st->lim_total_rxpkts < nstat_lim_min_rx_pkts || - st->lim_conn_attempts == 0) - return (FALSE); + st->lim_conn_attempts == 0) { + return FALSE; + } /* * Compute percentages if there was enough activity. Use @@ -2561,18 +2521,21 @@ nstat_lim_activity_check(struct if_lim_perf_stat *st) * capacity. */ if (st->lim_dl_max_bandwidth > 0 && - st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) + st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) { st->lim_dl_detected = 1; + } if ((st->lim_ul_max_bandwidth > 0 && st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) || - st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) + st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) { st->lim_ul_detected = 1; + } if (st->lim_conn_attempts > 20 && st->lim_conn_timeout_percent >= - NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) + NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) { st->lim_ul_detected = 1; + } /* * Second order metrics: If there was high packet loss even after * using delay based algorithms then we classify it as Low Internet @@ -2580,9 +2543,10 @@ nstat_lim_activity_check(struct if_lim_perf_stat *st) */ if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts && st->lim_packet_loss_percent >= - NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) + NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) { st->lim_ul_detected = 1; - return (TRUE); + } + return TRUE; } static u_int64_t nstat_lim_last_report_time = 0; @@ -2598,8 +2562,9 @@ nstat_ifnet_report_lim_stats(void) uptime = net_uptime(); if ((u_int32_t)(uptime - nstat_lim_last_report_time) < - nstat_lim_interval) + nstat_lim_interval) { return; + } nstat_lim_last_report_time = uptime; data.flags = NSTAT_SYSINFO_LIM_STATS; @@ -2608,18 +2573,21 @@ nstat_ifnet_report_lim_stats(void) ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (!IF_FULLY_ATTACHED(ifp)) + if (!IF_FULLY_ATTACHED(ifp)) { continue; + } /* Limit reporting to Wifi, Ethernet and cellular */ - if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) + if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) { continue; + } - if (!nstat_lim_activity_check(&ifp->if_lim_stat)) + if (!nstat_lim_activity_check(&ifp->if_lim_stat)) { continue; + } bzero(st, sizeof(*st)); - st->ifnet_siglen = sizeof (st->ifnet_signature); + st->ifnet_siglen = sizeof(st->ifnet_signature); err = ifnet_get_netsignature(ifp, AF_INET, (u_int8_t *)&st->ifnet_siglen, NULL, st->ifnet_signature); @@ -2627,8 +2595,9 @@ nstat_ifnet_report_lim_stats(void) err = ifnet_get_netsignature(ifp, AF_INET6, (u_int8_t *)&st->ifnet_siglen, NULL, st->ifnet_signature); - if (err != 0) + if (err != 0) { continue; + } } ifnet_lock_shared(ifp); if (IFNET_IS_CELLULAR(ifp)) { @@ -2651,20 +2620,22 @@ nstat_ifnet_report_lim_stats(void) static errno_t nstat_ifnet_copy_descriptor( - nstat_provider_cookie_t cookie, - void *data, - u_int32_t len) + nstat_provider_cookie_t cookie, + void *data, + u_int32_t len) { nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data; struct nstat_ifnet_cookie *ifcookie = (struct nstat_ifnet_cookie *)cookie; struct ifnet *ifp = ifcookie->ifp; - if (len < sizeof(nstat_ifnet_descriptor)) + if (len < sizeof(nstat_ifnet_descriptor)) { return EINVAL; + } - if (nstat_ifnet_gone(cookie)) + if (nstat_ifnet_gone(cookie)) { return EINVAL; + } bzero(desc, sizeof(*desc)); ifnet_lock_shared(ifp); @@ -2672,9 +2643,10 @@ nstat_ifnet_copy_descriptor( desc->ifindex = ifp->if_index; desc->threshold = ifp->if_data_threshold; desc->type = ifp->if_type; - if (ifp->if_desc.ifd_len < sizeof(desc->description)) + if (ifp->if_desc.ifd_len < sizeof(desc->description)) { memcpy(desc->description, ifp->if_desc.ifd_desc, - sizeof(desc->description)); + sizeof(desc->description)); + } nstat_ifnet_copy_link_status(ifp, desc); ifnet_lock_done(ifp); return 0; @@ -2706,17 +2678,18 @@ nstat_ifnet_threshold_reached(unsigned int ifindex) struct nstat_ifnet_cookie *ifcookie; lck_mtx_lock(&nstat_mtx); - for (state = nstat_controls; state; state = state->ncs_next) - { + for (state = nstat_controls; state; state = state->ncs_next) { lck_mtx_lock(&state->ncs_mtx); TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { - if (src->provider != &nstat_ifnet_provider) + if (src->provider != &nstat_ifnet_provider) { continue; + } ifcookie = (struct nstat_ifnet_cookie *)src->cookie; ifp = ifcookie->ifp; - if (ifp->if_index != ifindex) + if (ifp->if_index != ifindex) { continue; + } nstat_control_send_counts(state, src, 0, 0, NULL); } lck_mtx_unlock(&state->ncs_mtx); @@ -2761,628 +2734,626 @@ nstat_sysinfo_send_data_internal( finalsize = allocsize; /* get number of key-vals for each kind of stat */ - switch (data->flags) - { - case NSTAT_SYSINFO_MBUF_STATS: - nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) / - sizeof(u_int32_t); - break; - case NSTAT_SYSINFO_TCP_STATS: - nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT; - break; - case NSTAT_SYSINFO_IFNET_ECN_STATS: - nkeyvals = (sizeof(struct if_tcp_ecn_stat) / - sizeof(u_int64_t)); + switch (data->flags) { + case NSTAT_SYSINFO_MBUF_STATS: + nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) / + sizeof(u_int32_t); + break; + case NSTAT_SYSINFO_TCP_STATS: + nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT; + break; + case NSTAT_SYSINFO_IFNET_ECN_STATS: + nkeyvals = (sizeof(struct if_tcp_ecn_stat) / + sizeof(u_int64_t)); - /* Two more keys for ifnet type and proto */ - nkeyvals += 2; + /* Two more keys for ifnet type and proto */ + nkeyvals += 2; - /* One key for unsent data. */ - nkeyvals++; - break; - case NSTAT_SYSINFO_LIM_STATS: - nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT; - break; - case NSTAT_SYSINFO_NET_API_STATS: - nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT; - break; - default: - return; + /* One key for unsent data. */ + nkeyvals++; + break; + case NSTAT_SYSINFO_LIM_STATS: + nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT; + break; + case NSTAT_SYSINFO_NET_API_STATS: + nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT; + break; + default: + return; } countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals; allocsize += countsize; syscnt = OSMalloc(allocsize, nstat_malloc_tag); - if (syscnt == NULL) + if (syscnt == NULL) { return; + } bzero(syscnt, allocsize); kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals; - switch (data->flags) - { - case NSTAT_SYSINFO_MBUF_STATS: - { - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL, - data->u.mb_stats.total_256b); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL, - data->u.mb_stats.total_2kb); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL, - data->u.mb_stats.total_4kb); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MBUF_16KB_TOTAL, - data->u.mb_stats.total_16kb); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_SOCK_MBCNT, - data->u.mb_stats.sbmb_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT, - data->u.mb_stats.sb_atmbuflimit); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MBUF_DRAIN_CNT, - data->u.mb_stats.draincnt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MBUF_MEM_RELEASED, - data->u.mb_stats.memreleased); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_SOCK_MBFLOOR, - data->u.mb_stats.sbmb_floor); - VERIFY(i == nkeyvals); - break; - } - case NSTAT_SYSINFO_TCP_STATS: - { - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_IPV4_AVGRTT, - data->u.tcp_stats.ipv4_avgrtt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_IPV6_AVGRTT, - data->u.tcp_stats.ipv6_avgrtt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_SEND_PLR, - data->u.tcp_stats.send_plr); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_RECV_PLR, - data->u.tcp_stats.recv_plr); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_SEND_TLRTO, - data->u.tcp_stats.send_tlrto_rate); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_KEY_SEND_REORDERRATE, - data->u.tcp_stats.send_reorder_rate); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_CONNECTION_ATTEMPTS, - data->u.tcp_stats.connection_attempts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_CONNECTION_ACCEPTS, - data->u.tcp_stats.connection_accepts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CLIENT_ENABLED, - data->u.tcp_stats.ecn_client_enabled); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_SERVER_ENABLED, - data->u.tcp_stats.ecn_server_enabled); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CLIENT_SETUP, - data->u.tcp_stats.ecn_client_setup); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_SERVER_SETUP, - data->u.tcp_stats.ecn_server_setup); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CLIENT_SUCCESS, - data->u.tcp_stats.ecn_client_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_SERVER_SUCCESS, - data->u.tcp_stats.ecn_server_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_NOT_SUPPORTED, - data->u.tcp_stats.ecn_not_supported); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_LOST_SYN, - data->u.tcp_stats.ecn_lost_syn); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_LOST_SYNACK, - data->u.tcp_stats.ecn_lost_synack); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_RECV_CE, - data->u.tcp_stats.ecn_recv_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_RECV_ECE, - data->u.tcp_stats.ecn_recv_ece); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_SENT_ECE, - data->u.tcp_stats.ecn_sent_ece); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CONN_RECV_CE, - data->u.tcp_stats.ecn_conn_recv_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CONN_RECV_ECE, - data->u.tcp_stats.ecn_conn_recv_ece); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CONN_PLNOCE, - data->u.tcp_stats.ecn_conn_plnoce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CONN_PL_CE, - data->u.tcp_stats.ecn_conn_pl_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_CONN_NOPL_CE, - data->u.tcp_stats.ecn_conn_nopl_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS, - data->u.tcp_stats.ecn_fallback_synloss); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_FALLBACK_REORDER, - data->u.tcp_stats.ecn_fallback_reorder); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_FALLBACK_CE, - data->u.tcp_stats.ecn_fallback_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_SYN_DATA_RCV, - data->u.tcp_stats.tfo_syn_data_rcv); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV, - data->u.tcp_stats.tfo_cookie_req_rcv); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_COOKIE_SENT, - data->u.tcp_stats.tfo_cookie_sent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_COOKIE_INVALID, - data->u.tcp_stats.tfo_cookie_invalid); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_COOKIE_REQ, - data->u.tcp_stats.tfo_cookie_req); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_COOKIE_RCV, - data->u.tcp_stats.tfo_cookie_rcv); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_SYN_DATA_SENT, - data->u.tcp_stats.tfo_syn_data_sent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_SYN_DATA_ACKED, - data->u.tcp_stats.tfo_syn_data_acked); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_SYN_LOSS, - data->u.tcp_stats.tfo_syn_loss); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_BLACKHOLE, - data->u.tcp_stats.tfo_blackhole); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_COOKIE_WRONG, - data->u.tcp_stats.tfo_cookie_wrong); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_NO_COOKIE_RCV, - data->u.tcp_stats.tfo_no_cookie_rcv); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE, - data->u.tcp_stats.tfo_heuristics_disable); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_TFO_SEND_BLACKHOLE, - data->u.tcp_stats.tfo_sndblackhole); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT, - data->u.tcp_stats.mptcp_handover_attempt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT, - data->u.tcp_stats.mptcp_interactive_attempt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT, - data->u.tcp_stats.mptcp_aggregate_attempt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT, - data->u.tcp_stats.mptcp_fp_handover_attempt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT, - data->u.tcp_stats.mptcp_fp_interactive_attempt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT, - data->u.tcp_stats.mptcp_fp_aggregate_attempt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK, - data->u.tcp_stats.mptcp_heuristic_fallback); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK, - data->u.tcp_stats.mptcp_fp_heuristic_fallback); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI, - data->u.tcp_stats.mptcp_handover_success_wifi); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL, - data->u.tcp_stats.mptcp_handover_success_cell); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS, - data->u.tcp_stats.mptcp_interactive_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS, - data->u.tcp_stats.mptcp_aggregate_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI, - data->u.tcp_stats.mptcp_fp_handover_success_wifi); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL, - data->u.tcp_stats.mptcp_fp_handover_success_cell); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS, - data->u.tcp_stats.mptcp_fp_interactive_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS, - data->u.tcp_stats.mptcp_fp_aggregate_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI, - data->u.tcp_stats.mptcp_handover_cell_from_wifi); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL, - data->u.tcp_stats.mptcp_handover_wifi_from_cell); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI, - data->u.tcp_stats.mptcp_interactive_cell_from_wifi); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES, - data->u.tcp_stats.mptcp_handover_cell_bytes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES, - data->u.tcp_stats.mptcp_interactive_cell_bytes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES, - data->u.tcp_stats.mptcp_aggregate_cell_bytes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES, - data->u.tcp_stats.mptcp_handover_all_bytes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES, - data->u.tcp_stats.mptcp_interactive_all_bytes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES, - data->u.tcp_stats.mptcp_aggregate_all_bytes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI, - data->u.tcp_stats.mptcp_back_to_wifi); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_WIFI_PROXY, - data->u.tcp_stats.mptcp_wifi_proxy); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_CELL_PROXY, - data->u.tcp_stats.mptcp_cell_proxy); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL, - data->u.tcp_stats.mptcp_triggered_cell); - VERIFY(i == nkeyvals); - break; - } - case NSTAT_SYSINFO_IFNET_ECN_STATS: - { - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_TYPE, - data->u.ifnet_ecn_stats.ifnet_type); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_PROTO, - data->u.ifnet_ecn_stats.ifnet_proto); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP, - data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP, - data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_SYN_LOST, - data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST, - data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_RECV_CE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_RECV_ECE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER, - data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST, - data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST, - data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN, - data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_IFNET_UNSENT_DATA, - data->unsent_data_cnt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST, - data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT, - data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST, - data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst); - break; - } - case NSTAT_SYSINFO_LIM_STATS: - { - nstat_set_keyval_string(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_SIGNATURE, - data->u.lim_stats.ifnet_signature, - data->u.lim_stats.ifnet_siglen); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH, - data->u.lim_stats.lim_stat.lim_dl_max_bandwidth); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH, - data->u.lim_stats.lim_stat.lim_ul_max_bandwidth); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT, - data->u.lim_stats.lim_stat.lim_packet_loss_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT, - data->u.lim_stats.lim_stat.lim_packet_ooo_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE, - data->u.lim_stats.lim_stat.lim_rtt_variance); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_RTT_MIN, - data->u.lim_stats.lim_stat.lim_rtt_min); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_RTT_AVG, - data->u.lim_stats.lim_stat.lim_rtt_average); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT, - data->u.lim_stats.lim_stat.lim_conn_timeout_percent); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED, - data->u.lim_stats.lim_stat.lim_dl_detected); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED, - data->u.lim_stats.lim_stat.lim_ul_detected); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_LIM_IFNET_TYPE, - data->u.lim_stats.ifnet_type); - break; - } - case NSTAT_SYSINFO_NET_API_STATS: - { - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_IF_FLTR_ATTACH, - data->u.net_api_stats.net_api_stats.nas_iflt_attach_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS, - data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_IP_FLTR_ADD, - data->u.net_api_stats.net_api_stats.nas_ipf_add_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_IP_FLTR_ADD_OS, - data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH, - data->u.net_api_stats.net_api_stats.nas_sfltr_register_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS, - data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total); - - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL, - data->u.net_api_stats.net_api_stats.nas_socket_alloc_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL, - data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS, - data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID, - data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL, - data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE, - data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_INET, - data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6, - data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM, - data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH, - data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY, - data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV, - data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER, - data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_STREAM, - data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_DGRAM, - data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED, - data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS, - data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA, - data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET6_STREAM, - data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET6_DGRAM, - data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED, - data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS, - data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA, - data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN, - data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS, - data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM, - data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM, - data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM, - data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM, - data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_IFNET_ALLOC, - data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_IFNET_ALLOC_OS, - data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_PF_ADDRULE, - data->u.net_api_stats.net_api_stats.nas_pf_addrule_total); - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_PF_ADDRULE_OS, - data->u.net_api_stats.net_api_stats.nas_pf_addrule_os); - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_VMNET_START, - data->u.net_api_stats.net_api_stats.nas_vmnet_total); - - - nstat_set_keyval_scalar(&kv[i++], - NSTAT_SYSINFO_API_REPORT_INTERVAL, - data->u.net_api_stats.report_interval); + switch (data->flags) { + case NSTAT_SYSINFO_MBUF_STATS: + { + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL, + data->u.mb_stats.total_256b); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL, + data->u.mb_stats.total_2kb); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL, + data->u.mb_stats.total_4kb); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MBUF_16KB_TOTAL, + data->u.mb_stats.total_16kb); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_SOCK_MBCNT, + data->u.mb_stats.sbmb_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT, + data->u.mb_stats.sb_atmbuflimit); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MBUF_DRAIN_CNT, + data->u.mb_stats.draincnt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MBUF_MEM_RELEASED, + data->u.mb_stats.memreleased); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_SOCK_MBFLOOR, + data->u.mb_stats.sbmb_floor); + VERIFY(i == nkeyvals); + break; + } + case NSTAT_SYSINFO_TCP_STATS: + { + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_IPV4_AVGRTT, + data->u.tcp_stats.ipv4_avgrtt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_IPV6_AVGRTT, + data->u.tcp_stats.ipv6_avgrtt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_SEND_PLR, + data->u.tcp_stats.send_plr); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_RECV_PLR, + data->u.tcp_stats.recv_plr); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_SEND_TLRTO, + data->u.tcp_stats.send_tlrto_rate); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_KEY_SEND_REORDERRATE, + data->u.tcp_stats.send_reorder_rate); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_CONNECTION_ATTEMPTS, + data->u.tcp_stats.connection_attempts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_CONNECTION_ACCEPTS, + data->u.tcp_stats.connection_accepts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CLIENT_ENABLED, + data->u.tcp_stats.ecn_client_enabled); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_SERVER_ENABLED, + data->u.tcp_stats.ecn_server_enabled); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CLIENT_SETUP, + data->u.tcp_stats.ecn_client_setup); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_SERVER_SETUP, + data->u.tcp_stats.ecn_server_setup); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CLIENT_SUCCESS, + data->u.tcp_stats.ecn_client_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_SERVER_SUCCESS, + data->u.tcp_stats.ecn_server_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_NOT_SUPPORTED, + data->u.tcp_stats.ecn_not_supported); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_LOST_SYN, + data->u.tcp_stats.ecn_lost_syn); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_LOST_SYNACK, + data->u.tcp_stats.ecn_lost_synack); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_RECV_CE, + data->u.tcp_stats.ecn_recv_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_RECV_ECE, + data->u.tcp_stats.ecn_recv_ece); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_SENT_ECE, + data->u.tcp_stats.ecn_sent_ece); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CONN_RECV_CE, + data->u.tcp_stats.ecn_conn_recv_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CONN_RECV_ECE, + data->u.tcp_stats.ecn_conn_recv_ece); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CONN_PLNOCE, + data->u.tcp_stats.ecn_conn_plnoce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CONN_PL_CE, + data->u.tcp_stats.ecn_conn_pl_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_CONN_NOPL_CE, + data->u.tcp_stats.ecn_conn_nopl_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS, + data->u.tcp_stats.ecn_fallback_synloss); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_FALLBACK_REORDER, + data->u.tcp_stats.ecn_fallback_reorder); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_FALLBACK_CE, + data->u.tcp_stats.ecn_fallback_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_SYN_DATA_RCV, + data->u.tcp_stats.tfo_syn_data_rcv); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV, + data->u.tcp_stats.tfo_cookie_req_rcv); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_COOKIE_SENT, + data->u.tcp_stats.tfo_cookie_sent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_COOKIE_INVALID, + data->u.tcp_stats.tfo_cookie_invalid); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_COOKIE_REQ, + data->u.tcp_stats.tfo_cookie_req); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_COOKIE_RCV, + data->u.tcp_stats.tfo_cookie_rcv); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_SYN_DATA_SENT, + data->u.tcp_stats.tfo_syn_data_sent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_SYN_DATA_ACKED, + data->u.tcp_stats.tfo_syn_data_acked); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_SYN_LOSS, + data->u.tcp_stats.tfo_syn_loss); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_BLACKHOLE, + data->u.tcp_stats.tfo_blackhole); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_COOKIE_WRONG, + data->u.tcp_stats.tfo_cookie_wrong); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_NO_COOKIE_RCV, + data->u.tcp_stats.tfo_no_cookie_rcv); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE, + data->u.tcp_stats.tfo_heuristics_disable); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_TFO_SEND_BLACKHOLE, + data->u.tcp_stats.tfo_sndblackhole); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT, + data->u.tcp_stats.mptcp_handover_attempt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT, + data->u.tcp_stats.mptcp_interactive_attempt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT, + data->u.tcp_stats.mptcp_aggregate_attempt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT, + data->u.tcp_stats.mptcp_fp_handover_attempt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT, + data->u.tcp_stats.mptcp_fp_interactive_attempt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT, + data->u.tcp_stats.mptcp_fp_aggregate_attempt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK, + data->u.tcp_stats.mptcp_heuristic_fallback); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK, + data->u.tcp_stats.mptcp_fp_heuristic_fallback); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI, + data->u.tcp_stats.mptcp_handover_success_wifi); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL, + data->u.tcp_stats.mptcp_handover_success_cell); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS, + data->u.tcp_stats.mptcp_interactive_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS, + data->u.tcp_stats.mptcp_aggregate_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI, + data->u.tcp_stats.mptcp_fp_handover_success_wifi); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL, + data->u.tcp_stats.mptcp_fp_handover_success_cell); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS, + data->u.tcp_stats.mptcp_fp_interactive_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS, + data->u.tcp_stats.mptcp_fp_aggregate_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI, + data->u.tcp_stats.mptcp_handover_cell_from_wifi); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL, + data->u.tcp_stats.mptcp_handover_wifi_from_cell); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI, + data->u.tcp_stats.mptcp_interactive_cell_from_wifi); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES, + data->u.tcp_stats.mptcp_handover_cell_bytes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES, + data->u.tcp_stats.mptcp_interactive_cell_bytes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES, + data->u.tcp_stats.mptcp_aggregate_cell_bytes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES, + data->u.tcp_stats.mptcp_handover_all_bytes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES, + data->u.tcp_stats.mptcp_interactive_all_bytes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES, + data->u.tcp_stats.mptcp_aggregate_all_bytes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI, + data->u.tcp_stats.mptcp_back_to_wifi); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_WIFI_PROXY, + data->u.tcp_stats.mptcp_wifi_proxy); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_CELL_PROXY, + data->u.tcp_stats.mptcp_cell_proxy); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL, + data->u.tcp_stats.mptcp_triggered_cell); + VERIFY(i == nkeyvals); + break; + } + case NSTAT_SYSINFO_IFNET_ECN_STATS: + { + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_TYPE, + data->u.ifnet_ecn_stats.ifnet_type); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_PROTO, + data->u.ifnet_ecn_stats.ifnet_proto); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP, + data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP, + data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_SYN_LOST, + data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST, + data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_RECV_CE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_RECV_ECE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER, + data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST, + data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST, + data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN, + data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_IFNET_UNSENT_DATA, + data->unsent_data_cnt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST, + data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT, + data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST, + data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst); + break; + } + case NSTAT_SYSINFO_LIM_STATS: + { + nstat_set_keyval_string(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_SIGNATURE, + data->u.lim_stats.ifnet_signature, + data->u.lim_stats.ifnet_siglen); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH, + data->u.lim_stats.lim_stat.lim_dl_max_bandwidth); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH, + data->u.lim_stats.lim_stat.lim_ul_max_bandwidth); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT, + data->u.lim_stats.lim_stat.lim_packet_loss_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT, + data->u.lim_stats.lim_stat.lim_packet_ooo_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE, + data->u.lim_stats.lim_stat.lim_rtt_variance); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_RTT_MIN, + data->u.lim_stats.lim_stat.lim_rtt_min); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_RTT_AVG, + data->u.lim_stats.lim_stat.lim_rtt_average); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT, + data->u.lim_stats.lim_stat.lim_conn_timeout_percent); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED, + data->u.lim_stats.lim_stat.lim_dl_detected); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED, + data->u.lim_stats.lim_stat.lim_ul_detected); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_LIM_IFNET_TYPE, + data->u.lim_stats.ifnet_type); + break; + } + case NSTAT_SYSINFO_NET_API_STATS: + { + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_IF_FLTR_ATTACH, + data->u.net_api_stats.net_api_stats.nas_iflt_attach_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS, + data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_IP_FLTR_ADD, + data->u.net_api_stats.net_api_stats.nas_ipf_add_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_IP_FLTR_ADD_OS, + data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH, + data->u.net_api_stats.net_api_stats.nas_sfltr_register_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS, + data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total); + + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL, + data->u.net_api_stats.net_api_stats.nas_socket_alloc_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL, + data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS, + data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID, + data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL, + data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE, + data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_INET, + data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6, + data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM, + data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH, + data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY, + data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV, + data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER, + data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_STREAM, + data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_DGRAM, + data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED, + data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS, + data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA, + data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET6_STREAM, + data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET6_DGRAM, + data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED, + data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS, + data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA, + data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN, + data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS, + data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM, + data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM, + data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM, + data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM, + data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_IFNET_ALLOC, + data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_IFNET_ALLOC_OS, + data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_PF_ADDRULE, + data->u.net_api_stats.net_api_stats.nas_pf_addrule_total); + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_PF_ADDRULE_OS, + data->u.net_api_stats.net_api_stats.nas_pf_addrule_os); + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_VMNET_START, + data->u.net_api_stats.net_api_stats.nas_vmnet_total); + + + nstat_set_keyval_scalar(&kv[i++], + NSTAT_SYSINFO_API_REPORT_INTERVAL, + data->u.net_api_stats.report_interval); - break; - } + break; } - if (syscnt != NULL) - { + } + if (syscnt != NULL) { VERIFY(i > 0 && i <= nkeyvals); countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals) + @@ -3394,8 +3365,7 @@ nstat_sysinfo_send_data_internal( result = ctl_enqueuedata(control->ncs_kctl, control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR); - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_sysinfofailures += 1; } OSFree(syscnt, allocsize, nstat_malloc_tag); @@ -3445,8 +3415,9 @@ nstat_net_api_report_stats(void) uptime = net_uptime(); if ((u_int32_t)(uptime - net_api_stats_last_report_time) < - net_api_stats_report_interval) + net_api_stats_report_interval) { return; + } st->report_interval = uptime - net_api_stats_last_report_time; net_api_stats_last_report_time = uptime; @@ -3462,9 +3433,9 @@ nstat_net_api_report_stats(void) * - Report current value for other counters as they tend not to change * much with time */ -#define STATCOPY(f) \ +#define STATCOPY(f) \ (st->net_api_stats.f = net_api_stats.f) -#define STATDIFF(f) \ +#define STATDIFF(f) \ (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f) STATCOPY(nas_iflt_attach_count); @@ -3540,24 +3511,24 @@ nstat_net_api_report_stats(void) */ memcpy(&net_api_stats_before, &net_api_stats, sizeof(struct net_api_stats)); - _CASSERT(sizeof (net_api_stats_before) == sizeof (net_api_stats)); + _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats)); } #pragma mark -- Kernel Control Socket -- -static kern_ctl_ref nstat_ctlref = NULL; -static lck_grp_t *nstat_lck_grp = NULL; +static kern_ctl_ref nstat_ctlref = NULL; +static lck_grp_t *nstat_lck_grp = NULL; -static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo); -static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo); -static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags); +static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo); +static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo); +static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags); static errno_t nstat_enqueue_success( - uint64_t context, - nstat_control_state *state, - u_int16_t flags) + uint64_t context, + nstat_control_state *state, + u_int16_t flags) { nstat_msg_hdr success; errno_t result; @@ -3570,9 +3541,10 @@ nstat_enqueue_success( result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success, sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT); if (result != 0) { - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s: could not enqueue success message %d\n", __func__, result); + } nstat_stats.nstat_successmsgfailures += 1; } return result; @@ -3580,57 +3552,54 @@ nstat_enqueue_success( static errno_t nstat_control_send_goodbye( - nstat_control_state *state, - nstat_src *src) + nstat_control_state *state, + nstat_src *src) { errno_t result = 0; int failed = 0; - if (nstat_control_reporting_allowed(state, src)) - { - if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) - { + if (nstat_control_reporting_allowed(state, src)) { + if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) { result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL); - if (result != 0) - { + if (result != 0) { failed = 1; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - nstat_control_send_update() %d\n", __func__, result); + } } - } - else - { + } else { // send one last counts notification result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL); - if (result != 0) - { + if (result != 0) { failed = 1; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - nstat_control_send_counts() %d\n", __func__, result); + } } // send a last description result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING); - if (result != 0) - { + if (result != 0) { failed = 1; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - nstat_control_send_description() %d\n", __func__, result); + } } } } // send the source removed notification result = nstat_control_send_removed(state, src); - if (result != 0 && nstat_debug) - { + if (result != 0 && nstat_debug) { failed = 1; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - nstat_control_send_removed() %d\n", __func__, result); + } } - if (failed != 0) + if (failed != 0) { nstat_stats.nstat_control_send_goodbye_failures++; + } return result; @@ -3638,18 +3607,17 @@ nstat_control_send_goodbye( static errno_t nstat_flush_accumulated_msgs( - nstat_control_state *state) + nstat_control_state *state) { errno_t result = 0; - if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) - { + if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0) { mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated)); result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR); - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_flush_accumulated_msgs_failures++; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result); + } mbuf_freem(state->ncs_accumulated); } state->ncs_accumulated = NULL; @@ -3659,50 +3627,46 @@ nstat_flush_accumulated_msgs( static errno_t nstat_accumulate_msg( - nstat_control_state *state, - nstat_msg_hdr *hdr, - size_t length) + nstat_control_state *state, + nstat_msg_hdr *hdr, + size_t length) { - if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) - { + if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length) { // Will send the current mbuf nstat_flush_accumulated_msgs(state); } errno_t result = 0; - if (state->ncs_accumulated == NULL) - { + if (state->ncs_accumulated == NULL) { unsigned int one = 1; - if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) - { - if (nstat_debug != 0) + if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0) { + if (nstat_debug != 0) { printf("%s - mbuf_allocpacket failed\n", __func__); + } result = ENOMEM; - } - else - { + } else { mbuf_setlen(state->ncs_accumulated, 0); } } - if (result == 0) - { + if (result == 0) { hdr->length = length; result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated), - length, hdr, MBUF_DONTWAIT); + length, hdr, MBUF_DONTWAIT); } - if (result != 0) - { + if (result != 0) { nstat_flush_accumulated_msgs(state); - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - resorting to ctl_enqueuedata\n", __func__); + } result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR); } - if (result != 0) + if (result != 0) { nstat_stats.nstat_accumulate_msg_failures++; + } return result; } @@ -3713,7 +3677,7 @@ nstat_idle_check( __unused thread_call_param_t p1) { nstat_control_state *control; - nstat_src *src, *tmpsrc; + nstat_src *src, *tmpsrc; tailq_head_nstat_src dead_list; TAILQ_INIT(&dead_list); @@ -3721,15 +3685,12 @@ nstat_idle_check( nstat_idle_time = 0; - for (control = nstat_controls; control; control = control->ncs_next) - { + for (control = nstat_controls; control; control = control->ncs_next) { lck_mtx_lock(&control->ncs_mtx); - if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) - { + if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) { TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc) { - if (src->provider->nstat_gone(src->cookie)) - { + if (src->provider->nstat_gone(src->cookie)) { errno_t result; // Pull it off the list @@ -3746,8 +3707,7 @@ nstat_idle_check( lck_mtx_unlock(&control->ncs_mtx); } - if (nstat_controls) - { + if (nstat_controls) { clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time); thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time); } @@ -3758,8 +3718,7 @@ nstat_idle_check( nstat_sysinfo_generate_report(); // Release the sources now that we aren't holding lots of locks - while ((src = TAILQ_FIRST(&dead_list))) - { + while ((src = TAILQ_FIRST(&dead_list))) { TAILQ_REMOVE(&dead_list, src, ns_control_link); nstat_control_cleanup_source(NULL, src, FALSE); } @@ -3772,7 +3731,7 @@ static void nstat_control_register(void) { // Create our lock group first - lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init(); + lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setdefault(grp_attr); nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr); lck_grp_attr_free(grp_attr); @@ -3780,7 +3739,7 @@ nstat_control_register(void) lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL); // Register the control - struct kern_ctl_reg nstat_control; + struct kern_ctl_reg nstat_control; bzero(&nstat_control, sizeof(nstat_control)); strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name)); nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT; @@ -3795,21 +3754,20 @@ nstat_control_register(void) static void nstat_control_cleanup_source( - nstat_control_state *state, - struct nstat_src *src, - boolean_t locked) + nstat_control_state *state, + struct nstat_src *src, + boolean_t locked) { errno_t result; - if (state) - { + if (state) { result = nstat_control_send_removed(state, src); - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_control_cleanup_source_failures++; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - nstat_control_send_removed() %d\n", __func__, result); + } } } // Cleanup the source if we found it. @@ -3823,24 +3781,27 @@ nstat_control_reporting_allowed( nstat_control_state *state, nstat_src *src) { - if (src->provider->nstat_reporting_allowed == NULL) + if (src->provider->nstat_reporting_allowed == NULL) { return TRUE; + } - return ( - src->provider->nstat_reporting_allowed(src->cookie, - &state->ncs_provider_filters[src->provider->nstat_provider_id]) - ); + return + src->provider->nstat_reporting_allowed(src->cookie, + &state->ncs_provider_filters[src->provider->nstat_provider_id]) + ; } static errno_t nstat_control_connect( - kern_ctl_ref kctl, - struct sockaddr_ctl *sac, - void **uinfo) + kern_ctl_ref kctl, + struct sockaddr_ctl *sac, + void **uinfo) { - nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag); - if (state == NULL) return ENOMEM; + nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag); + if (state == NULL) { + return ENOMEM; + } bzero(state, sizeof(*state)); lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL); @@ -3853,8 +3814,7 @@ nstat_control_connect( state->ncs_next = nstat_controls; nstat_controls = state; - if (nstat_idle_time == 0) - { + if (nstat_idle_time == 0) { clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time); thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time); } @@ -3866,12 +3826,12 @@ nstat_control_connect( static errno_t nstat_control_disconnect( - __unused kern_ctl_ref kctl, - __unused u_int32_t unit, - void *uinfo) + __unused kern_ctl_ref kctl, + __unused u_int32_t unit, + void *uinfo) { - u_int32_t watching; - nstat_control_state *state = (nstat_control_state*)uinfo; + u_int32_t watching; + nstat_control_state *state = (nstat_control_state*)uinfo; tailq_head_nstat_src cleanup_list; nstat_src *src; @@ -3879,11 +3839,9 @@ nstat_control_disconnect( // pull it out of the global list of states lck_mtx_lock(&nstat_mtx); - nstat_control_state **statepp; - for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) - { - if (*statepp == state) - { + nstat_control_state **statepp; + for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) { + if (*statepp == state) { *statepp = state->ncs_next; break; } @@ -3892,13 +3850,11 @@ nstat_control_disconnect( lck_mtx_lock(&state->ncs_mtx); // Stop watching for sources - nstat_provider *provider; + nstat_provider *provider; watching = state->ncs_watching; state->ncs_watching = 0; - for (provider = nstat_providers; provider && watching; provider = provider->next) - { - if ((watching & (1 << provider->nstat_provider_id)) != 0) - { + for (provider = nstat_providers; provider && watching; provider = provider->next) { + if ((watching & (1 << provider->nstat_provider_id)) != 0) { watching &= ~(1 << provider->nstat_provider_id); provider->nstat_watcher_remove(state); } @@ -3907,8 +3863,7 @@ nstat_control_disconnect( // set cleanup flags state->ncs_flags |= NSTAT_FLAG_CLEANUP; - if (state->ncs_accumulated) - { + if (state->ncs_accumulated) { mbuf_freem(state->ncs_accumulated); state->ncs_accumulated = NULL; } @@ -3917,8 +3872,7 @@ nstat_control_disconnect( TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link); lck_mtx_unlock(&state->ncs_mtx); - while ((src = TAILQ_FIRST(&cleanup_list))) - { + while ((src = TAILQ_FIRST(&cleanup_list))) { TAILQ_REMOVE(&cleanup_list, src, ns_control_link); nstat_control_cleanup_source(NULL, src, FALSE); } @@ -3931,16 +3885,16 @@ nstat_control_disconnect( static nstat_src_ref_t nstat_control_next_src_ref( - nstat_control_state *state) + nstat_control_state *state) { return ++state->ncs_next_srcref; } static errno_t nstat_control_send_counts( - nstat_control_state *state, - nstat_src *src, - unsigned long long context, + nstat_control_state *state, + nstat_src *src, + unsigned long long context, u_int16_t hdr_flags, int *gone) { @@ -3948,8 +3902,9 @@ nstat_control_send_counts( errno_t result = 0; /* Some providers may not have any counts to send */ - if (src->provider->nstat_counts == NULL) - return (0); + if (src->provider->nstat_counts == NULL) { + return 0; + } bzero(&counts, sizeof(counts)); counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS; @@ -3959,21 +3914,18 @@ nstat_control_send_counts( counts.srcref = src->srcref; counts.event_flags = 0; - if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) - { + if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) { if ((src->filter & NSTAT_FILTER_NOZEROBYTES) && counts.counts.nstat_rxbytes == 0 && - counts.counts.nstat_txbytes == 0) - { + counts.counts.nstat_txbytes == 0) { result = EAGAIN; - } - else - { + } else { result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &counts, sizeof(counts), CTL_DATA_EOR); - if (result != 0) + if (result != 0) { nstat_stats.nstat_sendcountfailures += 1; + } } } return result; @@ -3981,12 +3933,14 @@ nstat_control_send_counts( static errno_t nstat_control_append_counts( - nstat_control_state *state, - nstat_src *src, - int *gone) + nstat_control_state *state, + nstat_src *src, + int *gone) { /* Some providers may not have any counts to send */ - if (!src->provider->nstat_counts) return 0; + if (!src->provider->nstat_counts) { + return 0; + } nstat_msg_src_counts counts; bzero(&counts, sizeof(counts)); @@ -3995,16 +3949,14 @@ nstat_control_append_counts( counts.srcref = src->srcref; counts.event_flags = 0; - errno_t result = 0; + errno_t result = 0; result = src->provider->nstat_counts(src->cookie, &counts.counts, gone); - if (result != 0) - { + if (result != 0) { return result; } if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES && - counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) - { + counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) { return EAGAIN; } @@ -4013,37 +3965,34 @@ nstat_control_append_counts( static int nstat_control_send_description( - nstat_control_state *state, - nstat_src *src, - u_int64_t context, - u_int16_t hdr_flags) + nstat_control_state *state, + nstat_src *src, + u_int64_t context, + u_int16_t hdr_flags) { // Provider doesn't support getting the descriptor? Done. if (src->provider->nstat_descriptor_length == 0 || - src->provider->nstat_copy_descriptor == NULL) - { + src->provider->nstat_copy_descriptor == NULL) { return EOPNOTSUPP; } // Allocate storage for the descriptor message - mbuf_t msg; - unsigned int one = 1; - u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; - if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) - { + mbuf_t msg; + unsigned int one = 1; + u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; + if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) { return ENOMEM; } - nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg); + nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg); bzero(desc, size); mbuf_setlen(msg, size); mbuf_pkthdr_setlen(msg, mbuf_len(msg)); // Query the provider for the provider specific bits - errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length); + errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length); - if (result != 0) - { + if (result != 0) { mbuf_freem(msg); return result; } @@ -4057,8 +4006,7 @@ nstat_control_send_description( desc->provider = src->provider->nstat_provider_id; result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR); - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_descriptionfailures += 1; mbuf_freem(msg); } @@ -4068,34 +4016,32 @@ nstat_control_send_description( static errno_t nstat_control_append_description( - nstat_control_state *state, - nstat_src *src) + nstat_control_state *state, + nstat_src *src) { - size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; + size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; if (size > 512 || src->provider->nstat_descriptor_length == 0 || - src->provider->nstat_copy_descriptor == NULL) - { + src->provider->nstat_copy_descriptor == NULL) { return EOPNOTSUPP; } // Fill out a buffer on the stack, we will copy to the mbuf later - u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment + u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment bzero(buffer, size); - nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer; + nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer; desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC; desc->hdr.length = size; desc->srcref = src->srcref; desc->event_flags = 0; desc->provider = src->provider->nstat_provider_id; - errno_t result = 0; + errno_t result = 0; // Fill in the description // Query the provider for the provider specific bits result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, - src->provider->nstat_descriptor_length); - if (result != 0) - { + src->provider->nstat_descriptor_length); + if (result != 0) { return result; } @@ -4104,31 +4050,29 @@ nstat_control_append_description( static int nstat_control_send_update( - nstat_control_state *state, - nstat_src *src, - u_int64_t context, - u_int16_t hdr_flags, - int *gone) + nstat_control_state *state, + nstat_src *src, + u_int64_t context, + u_int16_t hdr_flags, + int *gone) { // Provider doesn't support getting the descriptor or counts? Done. if ((src->provider->nstat_descriptor_length == 0 || - src->provider->nstat_copy_descriptor == NULL) && - src->provider->nstat_counts == NULL) - { + src->provider->nstat_copy_descriptor == NULL) && + src->provider->nstat_counts == NULL) { return EOPNOTSUPP; } // Allocate storage for the descriptor message - mbuf_t msg; - unsigned int one = 1; - u_int32_t size = offsetof(nstat_msg_src_update, data) + - src->provider->nstat_descriptor_length; - if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) - { + mbuf_t msg; + unsigned int one = 1; + u_int32_t size = offsetof(nstat_msg_src_update, data) + + src->provider->nstat_descriptor_length; + if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0) { return ENOMEM; } - nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg); + nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg); bzero(desc, size); desc->hdr.context = context; desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE; @@ -4141,38 +4085,30 @@ nstat_control_send_update( mbuf_setlen(msg, size); mbuf_pkthdr_setlen(msg, mbuf_len(msg)); - errno_t result = 0; - if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) - { + errno_t result = 0; + if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) { // Query the provider for the provider specific bits result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, - src->provider->nstat_descriptor_length); - if (result != 0) - { + src->provider->nstat_descriptor_length); + if (result != 0) { mbuf_freem(msg); return result; } } - if (src->provider->nstat_counts) - { + if (src->provider->nstat_counts) { result = src->provider->nstat_counts(src->cookie, &desc->counts, gone); - if (result == 0) - { + if (result == 0) { if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES && - desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) - { + desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) { result = EAGAIN; - } - else - { + } else { result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR); } } } - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_srcupatefailures += 1; mbuf_freem(msg); } @@ -4182,59 +4118,55 @@ nstat_control_send_update( static errno_t nstat_control_append_update( - nstat_control_state *state, - nstat_src *src, - int *gone) + nstat_control_state *state, + nstat_src *src, + int *gone) { - size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length; + size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length; if (size > 512 || ((src->provider->nstat_descriptor_length == 0 || - src->provider->nstat_copy_descriptor == NULL) && - src->provider->nstat_counts == NULL)) - { + src->provider->nstat_copy_descriptor == NULL) && + src->provider->nstat_counts == NULL)) { return EOPNOTSUPP; } // Fill out a buffer on the stack, we will copy to the mbuf later - u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment + u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment bzero(buffer, size); - nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer; + nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer; desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE; desc->hdr.length = size; desc->srcref = src->srcref; desc->event_flags = 0; desc->provider = src->provider->nstat_provider_id; - errno_t result = 0; + errno_t result = 0; // Fill in the description - if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) - { + if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) { // Query the provider for the provider specific bits result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, - src->provider->nstat_descriptor_length); - if (result != 0) - { + src->provider->nstat_descriptor_length); + if (result != 0) { nstat_stats.nstat_copy_descriptor_failures++; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result); + } return result; } } - if (src->provider->nstat_counts) - { + if (src->provider->nstat_counts) { result = src->provider->nstat_counts(src->cookie, &desc->counts, gone); - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_provider_counts_failures++; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s: src->provider->nstat_counts: %d\n", __func__, result); + } return result; } if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES && - desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) - { + desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) { return EAGAIN; } } @@ -4244,8 +4176,8 @@ nstat_control_append_update( static errno_t nstat_control_send_removed( - nstat_control_state *state, - nstat_src *src) + nstat_control_state *state, + nstat_src *src) { nstat_msg_src_removed removed; errno_t result; @@ -4257,74 +4189,74 @@ nstat_control_send_removed( removed.srcref = src->srcref; result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed, sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT); - if (result != 0) + if (result != 0) { nstat_stats.nstat_msgremovedfailures += 1; + } return result; } static errno_t nstat_control_handle_add_request( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { - errno_t result; + errno_t result; // Verify the header fits in the first mbuf - if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) - { + if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param)) { return EINVAL; } // Calculate the length of the parameter field - int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param); - if (paramlength < 0 || paramlength > 2 * 1024) - { + int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param); + if (paramlength < 0 || paramlength > 2 * 1024) { return EINVAL; } - nstat_provider *provider = NULL; - nstat_provider_cookie_t cookie = NULL; - nstat_msg_add_src_req *req = mbuf_data(m); - if (mbuf_pkthdr_len(m) > mbuf_len(m)) - { + nstat_provider *provider = NULL; + nstat_provider_cookie_t cookie = NULL; + nstat_msg_add_src_req *req = mbuf_data(m); + if (mbuf_pkthdr_len(m) > mbuf_len(m)) { // parameter is too large, we need to make a contiguous copy - void *data = OSMalloc(paramlength, nstat_malloc_tag); + void *data = OSMalloc(paramlength, nstat_malloc_tag); - if (!data) return ENOMEM; + if (!data) { + return ENOMEM; + } result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data); - if (result == 0) + if (result == 0) { result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie); + } OSFree(data, paramlength, nstat_malloc_tag); - } - else - { + } else { result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie); } - if (result != 0) - { + if (result != 0) { return result; } result = nstat_control_source_add(req->hdr.context, state, provider, cookie); - if (result != 0) + if (result != 0) { provider->nstat_release(cookie, 0); + } return result; } static errno_t nstat_set_provider_filter( - nstat_control_state *state, + nstat_control_state *state, nstat_msg_add_all_srcs *req) { nstat_provider_id_t provider_id = req->provider; u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id)); - if ((prev_ncs_watching & (1 << provider_id)) != 0) + if ((prev_ncs_watching & (1 << provider_id)) != 0) { return EALREADY; + } state->ncs_watching |= (1 << provider_id); state->ncs_provider_filters[provider_id].npf_flags = req->filter; @@ -4336,35 +4268,40 @@ nstat_set_provider_filter( static errno_t nstat_control_handle_add_all( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { - errno_t result = 0; + errno_t result = 0; // Verify the header fits in the first mbuf - if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) - { + if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs)) { return EINVAL; } - nstat_msg_add_all_srcs *req = mbuf_data(m); - if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT; + nstat_msg_add_all_srcs *req = mbuf_data(m); + if (req->provider > NSTAT_PROVIDER_LAST) { + return ENOENT; + } - nstat_provider *provider = nstat_find_provider_by_id(req->provider); + nstat_provider *provider = nstat_find_provider_by_id(req->provider); - if (!provider) return ENOENT; - if (provider->nstat_watcher_add == NULL) return ENOTSUP; + if (!provider) { + return ENOENT; + } + if (provider->nstat_watcher_add == NULL) { + return ENOTSUP; + } if (nstat_privcheck != 0) { result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0); - if (result != 0) + if (result != 0) { return result; + } } lck_mtx_lock(&state->ncs_mtx); - if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) - { + if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) { // Suppression of source messages implicitly requires the use of update messages state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES; } @@ -4377,47 +4314,47 @@ nstat_control_handle_add_all( result = provider->nstat_watcher_add(state, req); - if (result == 0) + if (result == 0) { nstat_enqueue_success(req->hdr.context, state, 0); + } return result; } static errno_t nstat_control_source_add( - u_int64_t context, - nstat_control_state *state, - nstat_provider *provider, - nstat_provider_cookie_t cookie) + u_int64_t context, + nstat_control_state *state, + nstat_provider *provider, + nstat_provider_cookie_t cookie) { // Fill out source added message if appropriate - mbuf_t msg = NULL; - nstat_src_ref_t *srcrefp = NULL; + mbuf_t msg = NULL; + nstat_src_ref_t *srcrefp = NULL; - u_int64_t provider_filter_flagss = + u_int64_t provider_filter_flagss = state->ncs_provider_filters[provider->nstat_provider_id].npf_flags; - boolean_t tell_user = + boolean_t tell_user = ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0); - u_int32_t src_filter = + u_int32_t src_filter = (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES) - ? NSTAT_FILTER_NOZEROBYTES : 0; + ? NSTAT_FILTER_NOZEROBYTES : 0; - if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) - { + if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) { src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE; } - if (tell_user) - { + if (tell_user) { unsigned int one = 1; if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added), - &one, &msg) != 0) + &one, &msg) != 0) { return ENOMEM; + } mbuf_setlen(msg, sizeof(nstat_msg_src_added)); mbuf_pkthdr_setlen(msg, mbuf_len(msg)); - nstat_msg_src_added *add = mbuf_data(msg); + nstat_msg_src_added *add = mbuf_data(msg); bzero(add, sizeof(*add)); add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED; add->hdr.length = mbuf_len(msg); @@ -4427,10 +4364,11 @@ nstat_control_source_add( } // Allocate storage for the source - nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag); - if (src == NULL) - { - if (msg) mbuf_freem(msg); + nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag); + if (src == NULL) { + if (msg) { + mbuf_freem(msg); + } return ENOMEM; } @@ -4438,14 +4376,16 @@ nstat_control_source_add( lck_mtx_lock(&state->ncs_mtx); src->srcref = nstat_control_next_src_ref(state); - if (srcrefp) + if (srcrefp) { *srcrefp = src->srcref; + } - if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) - { + if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) { lck_mtx_unlock(&state->ncs_mtx); OSFree(src, sizeof(*src), nstat_malloc_tag); - if (msg) mbuf_freem(msg); + if (msg) { + mbuf_freem(msg); + } return EINVAL; } src->provider = provider; @@ -4453,13 +4393,11 @@ nstat_control_source_add( src->filter = src_filter; src->seq = 0; - if (msg) - { + if (msg) { // send the source added message if appropriate errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, - CTL_DATA_EOR); - if (result != 0) - { + CTL_DATA_EOR); + if (result != 0) { nstat_stats.nstat_srcaddedfailures += 1; lck_mtx_unlock(&state->ncs_mtx); OSFree(src, sizeof(*src), nstat_malloc_tag); @@ -4478,14 +4416,13 @@ nstat_control_source_add( static errno_t nstat_control_handle_remove_request( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { - nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID; + nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID; nstat_src *src; - if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) - { + if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0) { return EINVAL; } @@ -4494,27 +4431,27 @@ nstat_control_handle_remove_request( // Remove this source as we look for it TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { - if (src->srcref == srcref) - { + if (src->srcref == srcref) { break; } } - if (src) - { + if (src) { TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); } lck_mtx_unlock(&state->ncs_mtx); - if (src) nstat_control_cleanup_source(state, src, FALSE); + if (src) { + nstat_control_cleanup_source(state, src, FALSE); + } return src ? 0 : ENOENT; } static errno_t nstat_control_handle_query_request( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { // TBD: handle this from another thread so we can enqueue a lot of data // As written, if a client requests query all, this function will be @@ -4524,12 +4461,11 @@ nstat_control_handle_query_request( // using this socket, one for read and one for write. Two threads probably // won't work with this code anyhow since we don't have proper locking in // place yet. - tailq_head_nstat_src dead_list; - errno_t result = ENOENT; - nstat_msg_query_src_req req; + tailq_head_nstat_src dead_list; + errno_t result = ENOENT; + nstat_msg_query_src_req req; - if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) - { + if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) { return EINVAL; } @@ -4538,13 +4474,12 @@ nstat_control_handle_query_request( lck_mtx_lock(&state->ncs_mtx); - if (all_srcs) - { + if (all_srcs) { state->ncs_flags |= NSTAT_FLAG_REQCOUNTS; } - nstat_src *src, *tmpsrc; - u_int64_t src_count = 0; - boolean_t partial = FALSE; + nstat_src *src, *tmpsrc; + u_int64_t src_count = 0; + boolean_t partial = FALSE; /* * Error handling policy and sequence number generation is folded into @@ -4555,26 +4490,20 @@ nstat_control_handle_query_request( TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) { - int gone = 0; + int gone = 0; // XXX ignore IFACE types? - if (all_srcs || src->srcref == req.srcref) - { + if (all_srcs || src->srcref == req.srcref) { if (nstat_control_reporting_allowed(state, src) - && (!partial || !all_srcs || src->seq != state->ncs_seq)) - { + && (!partial || !all_srcs || src->seq != state->ncs_seq)) { if (all_srcs && - (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) - { + (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) { result = nstat_control_append_counts(state, src, &gone); - } - else - { + } else { result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone); } - if (ENOMEM == result || ENOBUFS == result) - { + if (ENOMEM == result || ENOBUFS == result) { /* * If the counts message failed to * enqueue then we should clear our flag so @@ -4586,8 +4515,7 @@ nstat_control_handle_query_request( state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS; break; } - if (partial) - { + if (partial) { /* * We skip over hard errors and * filtered sources. @@ -4598,17 +4526,16 @@ nstat_control_handle_query_request( } } - if (gone) - { + if (gone) { // send one last descriptor message so client may see last state // If we can't send the notification now, it // will be sent in the idle cleanup. result = nstat_control_send_description(state, src, 0, 0); - if (result != 0) - { + if (result != 0) { nstat_stats.nstat_control_send_description_failures++; - if (nstat_debug != 0) + if (nstat_debug != 0) { printf("%s - nstat_control_send_description() %d\n", __func__, result); + } state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS; break; } @@ -4618,15 +4545,11 @@ nstat_control_handle_query_request( TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); } - if (all_srcs) - { - if (src_count >= QUERY_CONTINUATION_SRC_COUNT) - { + if (all_srcs) { + if (src_count >= QUERY_CONTINUATION_SRC_COUNT) { break; } - } - else if (req.srcref == src->srcref) - { + } else if (req.srcref == src->srcref) { break; } } @@ -4634,8 +4557,9 @@ nstat_control_handle_query_request( nstat_flush_accumulated_msgs(state); u_int16_t flags = 0; - if (req.srcref == NSTAT_SRC_REF_ALL) + if (req.srcref == NSTAT_SRC_REF_ALL) { flags = nstat_control_end_query(state, src, partial); + } lck_mtx_unlock(&state->ncs_mtx); @@ -4644,14 +4568,12 @@ nstat_control_handle_query_request( * propagate to nstat_control_send. This way, the error is sent to * user-level. */ - if (all_srcs && ENOMEM != result && ENOBUFS != result) - { + if (all_srcs && ENOMEM != result && ENOBUFS != result) { nstat_enqueue_success(req.hdr.context, state, flags); result = 0; } - while ((src = TAILQ_FIRST(&dead_list))) - { + while ((src = TAILQ_FIRST(&dead_list))) { TAILQ_REMOVE(&dead_list, src, ns_control_link); nstat_control_cleanup_source(state, src, FALSE); } @@ -4661,15 +4583,14 @@ nstat_control_handle_query_request( static errno_t nstat_control_handle_get_src_description( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { - nstat_msg_get_src_description req; + nstat_msg_get_src_description req; errno_t result = ENOENT; nstat_src *src; - if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) - { + if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) { return EINVAL; } @@ -4686,45 +4607,36 @@ nstat_control_handle_get_src_description( TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { - if (all_srcs || src->srcref == req.srcref) - { + if (all_srcs || src->srcref == req.srcref) { if (nstat_control_reporting_allowed(state, src) - && (!all_srcs || !partial || src->seq != state->ncs_seq)) - { - if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) - { + && (!all_srcs || !partial || src->seq != state->ncs_seq)) { + if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) { result = nstat_control_append_description(state, src); - } - else - { + } else { result = nstat_control_send_description(state, src, req.hdr.context, 0); } - if (ENOMEM == result || ENOBUFS == result) - { + if (ENOMEM == result || ENOBUFS == result) { /* * If the description message failed to * enqueue then we give up for now. */ break; } - if (partial) - { + if (partial) { /* * Note, we skip over hard errors and * filtered sources. */ src->seq = state->ncs_seq; src_count++; - if (src_count >= QUERY_CONTINUATION_SRC_COUNT) - { + if (src_count >= QUERY_CONTINUATION_SRC_COUNT) { break; } } } - if (!all_srcs) - { + if (!all_srcs) { break; } } @@ -4732,8 +4644,9 @@ nstat_control_handle_get_src_description( nstat_flush_accumulated_msgs(state); u_int16_t flags = 0; - if (req.srcref == NSTAT_SRC_REF_ALL) + if (req.srcref == NSTAT_SRC_REF_ALL) { flags = nstat_control_end_query(state, src, partial); + } lck_mtx_unlock(&state->ncs_mtx); /* @@ -4741,8 +4654,7 @@ nstat_control_handle_get_src_description( * propagate to nstat_control_send. This way, the error is sent to * user-level. */ - if (all_srcs && ENOMEM != result && ENOBUFS != result) - { + if (all_srcs && ENOMEM != result && ENOBUFS != result) { nstat_enqueue_success(req.hdr.context, state, flags); result = 0; } @@ -4752,42 +4664,44 @@ nstat_control_handle_get_src_description( static errno_t nstat_control_handle_set_filter( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { nstat_msg_set_filter req; nstat_src *src; - if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) + if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) { return EINVAL; + } if (req.srcref == NSTAT_SRC_REF_ALL || - req.srcref == NSTAT_SRC_REF_INVALID) + req.srcref == NSTAT_SRC_REF_INVALID) { return EINVAL; + } lck_mtx_lock(&state->ncs_mtx); TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { - if (req.srcref == src->srcref) - { + if (req.srcref == src->srcref) { src->filter = req.filter; break; } } lck_mtx_unlock(&state->ncs_mtx); - if (src == NULL) + if (src == NULL) { return ENOENT; + } return 0; } static void nstat_send_error( - nstat_control_state *state, - u_int64_t context, - u_int32_t error) + nstat_control_state *state, + u_int64_t context, + u_int32_t error) { errno_t result; - struct nstat_msg_error err; + struct nstat_msg_error err; bzero(&err, sizeof(err)); err.hdr.type = NSTAT_MSG_TYPE_ERROR; @@ -4796,27 +4710,27 @@ nstat_send_error( err.error = error; result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err, - sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT); - if (result != 0) + sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT); + if (result != 0) { nstat_stats.nstat_msgerrorfailures++; + } } static boolean_t nstat_control_begin_query( - nstat_control_state *state, - const nstat_msg_hdr *hdrp) + nstat_control_state *state, + const nstat_msg_hdr *hdrp) { boolean_t partial = FALSE; - if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) - { + if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) { /* A partial query all has been requested. */ partial = TRUE; - if (state->ncs_context != hdrp->context) - { - if (state->ncs_context != 0) + if (state->ncs_context != hdrp->context) { + if (state->ncs_context != 0) { nstat_send_error(state, state->ncs_context, EAGAIN); + } /* Initialize state for a partial query all. */ state->ncs_context = hdrp->context; @@ -4829,14 +4743,13 @@ nstat_control_begin_query( static u_int16_t nstat_control_end_query( - nstat_control_state *state, - nstat_src *last_src, - boolean_t partial) + nstat_control_state *state, + nstat_src *last_src, + boolean_t partial) { u_int16_t flags = 0; - if (last_src == NULL || !partial) - { + if (last_src == NULL || !partial) { /* * We iterated through the entire srcs list or exited early * from the loop when a partial update was not requested (an @@ -4844,9 +4757,7 @@ nstat_control_end_query( * that the query is finished. */ state->ncs_context = 0; - } - else - { + } else { /* * Indicate to userlevel to make another partial request as * there are still sources left to be reported. @@ -4859,13 +4770,12 @@ nstat_control_end_query( static errno_t nstat_control_handle_get_update( - nstat_control_state *state, - mbuf_t m) + nstat_control_state *state, + mbuf_t m) { - nstat_msg_query_src_req req; + nstat_msg_query_src_req req; - if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) - { + if (mbuf_copydata(m, 0, sizeof(req), &req) != 0) { return EINVAL; } @@ -4873,8 +4783,8 @@ nstat_control_handle_get_update( state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES; - errno_t result = ENOENT; - nstat_src *src, *tmpsrc; + errno_t result = ENOENT; + nstat_src *src, *tmpsrc; tailq_head_nstat_src dead_list; u_int64_t src_count = 0; boolean_t partial = FALSE; @@ -4888,28 +4798,24 @@ nstat_control_handle_get_update( TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) { - int gone; + int gone; gone = 0; - if (nstat_control_reporting_allowed(state, src)) - { + if (nstat_control_reporting_allowed(state, src)) { /* skip this source if it has the current state * sequence number as it's already been reported in * this query-all partial sequence. */ if (req.srcref == NSTAT_SRC_REF_ALL - && (FALSE == partial || src->seq != state->ncs_seq)) - { + && (FALSE == partial || src->seq != state->ncs_seq)) { result = nstat_control_append_update(state, src, &gone); - if (ENOMEM == result || ENOBUFS == result) - { + if (ENOMEM == result || ENOBUFS == result) { /* * If the update message failed to * enqueue then give up. */ break; } - if (partial) - { + if (partial) { /* * We skip over hard errors and * filtered sources. @@ -4917,26 +4823,21 @@ nstat_control_handle_get_update( src->seq = state->ncs_seq; src_count++; } - } - else if (src->srcref == req.srcref) - { + } else if (src->srcref == req.srcref) { result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone); } } - if (gone) - { + if (gone) { // pull src out of the list TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); } - if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref) - { + if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref) { break; } - if (src_count >= QUERY_CONTINUATION_SRC_COUNT) - { + if (src_count >= QUERY_CONTINUATION_SRC_COUNT) { break; } } @@ -4945,8 +4846,9 @@ nstat_control_handle_get_update( u_int16_t flags = 0; - if (req.srcref == NSTAT_SRC_REF_ALL) + if (req.srcref == NSTAT_SRC_REF_ALL) { flags = nstat_control_end_query(state, src, partial); + } lck_mtx_unlock(&state->ncs_mtx); /* @@ -4954,14 +4856,12 @@ nstat_control_handle_get_update( * propagate to nstat_control_send. This way, the error is sent to * user-level. */ - if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result) - { + if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result) { nstat_enqueue_success(req.hdr.context, state, flags); result = 0; } - while ((src = TAILQ_FIRST(&dead_list))) - { + while ((src = TAILQ_FIRST(&dead_list))) { TAILQ_REMOVE(&dead_list, src, ns_control_link); // release src and send notification nstat_control_cleanup_source(state, src, FALSE); @@ -4972,12 +4872,11 @@ nstat_control_handle_get_update( static errno_t nstat_control_handle_subscribe_sysinfo( - nstat_control_state *state) + nstat_control_state *state) { errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0); - if (result != 0) - { + if (result != 0) { return result; } @@ -4990,30 +4889,26 @@ nstat_control_handle_subscribe_sysinfo( static errno_t nstat_control_send( - kern_ctl_ref kctl, - u_int32_t unit, - void *uinfo, - mbuf_t m, - __unused int flags) + kern_ctl_ref kctl, + u_int32_t unit, + void *uinfo, + mbuf_t m, + __unused int flags) { - nstat_control_state *state = (nstat_control_state*)uinfo; - struct nstat_msg_hdr *hdr; - struct nstat_msg_hdr storage; - errno_t result = 0; + nstat_control_state *state = (nstat_control_state*)uinfo; + struct nstat_msg_hdr *hdr; + struct nstat_msg_hdr storage; + errno_t result = 0; - if (mbuf_pkthdr_len(m) < sizeof(*hdr)) - { + if (mbuf_pkthdr_len(m) < sizeof(*hdr)) { // Is this the right thing to do? mbuf_freem(m); return EINVAL; } - if (mbuf_len(m) >= sizeof(*hdr)) - { + if (mbuf_len(m) >= sizeof(*hdr)) { hdr = mbuf_data(m); - } - else - { + } else { mbuf_copydata(m, 0, sizeof(storage), &storage); hdr = &storage; } @@ -5021,58 +4916,54 @@ nstat_control_send( // Legacy clients may not set the length // Those clients are likely not setting the flags either // Fix everything up so old clients continue to work - if (hdr->length != mbuf_pkthdr_len(m)) - { + if (hdr->length != mbuf_pkthdr_len(m)) { hdr->flags = 0; hdr->length = mbuf_pkthdr_len(m); - if (hdr == &storage) - { + if (hdr == &storage) { mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT); } } - switch (hdr->type) - { - case NSTAT_MSG_TYPE_ADD_SRC: - result = nstat_control_handle_add_request(state, m); - break; + switch (hdr->type) { + case NSTAT_MSG_TYPE_ADD_SRC: + result = nstat_control_handle_add_request(state, m); + break; - case NSTAT_MSG_TYPE_ADD_ALL_SRCS: - result = nstat_control_handle_add_all(state, m); - break; + case NSTAT_MSG_TYPE_ADD_ALL_SRCS: + result = nstat_control_handle_add_all(state, m); + break; - case NSTAT_MSG_TYPE_REM_SRC: - result = nstat_control_handle_remove_request(state, m); - break; + case NSTAT_MSG_TYPE_REM_SRC: + result = nstat_control_handle_remove_request(state, m); + break; - case NSTAT_MSG_TYPE_QUERY_SRC: - result = nstat_control_handle_query_request(state, m); - break; + case NSTAT_MSG_TYPE_QUERY_SRC: + result = nstat_control_handle_query_request(state, m); + break; - case NSTAT_MSG_TYPE_GET_SRC_DESC: - result = nstat_control_handle_get_src_description(state, m); - break; + case NSTAT_MSG_TYPE_GET_SRC_DESC: + result = nstat_control_handle_get_src_description(state, m); + break; - case NSTAT_MSG_TYPE_SET_FILTER: - result = nstat_control_handle_set_filter(state, m); - break; + case NSTAT_MSG_TYPE_SET_FILTER: + result = nstat_control_handle_set_filter(state, m); + break; - case NSTAT_MSG_TYPE_GET_UPDATE: - result = nstat_control_handle_get_update(state, m); - break; + case NSTAT_MSG_TYPE_GET_UPDATE: + result = nstat_control_handle_get_update(state, m); + break; - case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO: - result = nstat_control_handle_subscribe_sysinfo(state); - break; + case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO: + result = nstat_control_handle_subscribe_sysinfo(state); + break; - default: - result = EINVAL; - break; + default: + result = EINVAL; + break; } - if (result != 0) - { - struct nstat_msg_error err; + if (result != 0) { + struct nstat_msg_error err; bzero(&err, sizeof(err)); err.hdr.type = NSTAT_MSG_TYPE_ERROR; @@ -5081,29 +4972,29 @@ nstat_control_send( err.error = result; if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 && - mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) - { + mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0) { result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT); - if (result != 0) - { + if (result != 0) { mbuf_freem(m); } m = NULL; } - if (result != 0) - { + if (result != 0) { // Unable to prepend the error to the request - just send the error err.hdr.length = sizeof(err); result = ctl_enqueuedata(kctl, unit, &err, sizeof(err), - CTL_DATA_EOR | CTL_DATA_CRIT); - if (result != 0) + CTL_DATA_EOR | CTL_DATA_CRIT); + if (result != 0) { nstat_stats.nstat_msgerrorfailures += 1; + } } nstat_stats.nstat_handle_msg_failures += 1; } - if (m) mbuf_freem(m); + if (m) { + mbuf_freem(m); + } return result; } @@ -5127,21 +5018,22 @@ tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_ { struct tcpcb *tp = intotcpcb(inp); if (tp && inp->inp_last_outifp && - inp->inp_last_outifp->if_index == ifindex && - inp->inp_state != INPCB_STATE_DEAD && - !(tp->t_flags & TF_LOCAL)) - { + inp->inp_last_outifp->if_index == ifindex && + inp->inp_state != INPCB_STATE_DEAD && + !(tp->t_flags & TF_LOCAL)) { struct tcp_conn_status connstatus; indicators->xp_numflows++; tcp_get_connectivity_status(tp, &connstatus); - if (connstatus.write_probe_failed) + if (connstatus.write_probe_failed) { indicators->xp_write_probe_fails++; - if (connstatus.read_probe_failed) + } + if (connstatus.read_probe_failed) { indicators->xp_read_probe_fails++; - if (connstatus.conn_probe_failed) + } + if (connstatus.conn_probe_failed) { indicators->xp_conn_probe_fails++; - if (inp->inp_start_timestamp > min_recent_start_time) - { + } + if (inp->inp_start_timestamp > min_recent_start_time) { uint64_t flow_count; indicators->xp_recentflows++; @@ -5153,8 +5045,7 @@ tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_ indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes; indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes; indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes; - if (tp->snd_max - tp->snd_una) - { + if (tp->snd_max - tp->snd_una) { indicators->xp_recentflows_unacked++; } } @@ -5162,7 +5053,7 @@ tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_ } lck_rw_done(tcbinfo.ipi_lock); - return (error); + return error; } @@ -5173,31 +5064,26 @@ ntstat_tcp_progress_indicators(struct sysctl_req *req) int error = 0; struct tcpprogressreq requested; - if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) - { + if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0) { return EACCES; } - if (req->newptr == USER_ADDR_NULL) - { + if (req->newptr == USER_ADDR_NULL) { return EINVAL; } - if (req->newlen < sizeof(req)) - { + if (req->newlen < sizeof(req)) { return EINVAL; } error = SYSCTL_IN(req, &requested, sizeof(requested)); - if (error != 0) - { + if (error != 0) { return error; } error = tcp_progress_indicators_for_interface(requested.ifindex, requested.recentflow_maxduration, &indicators); - if (error != 0) - { + if (error != 0) { return error; } error = SYSCTL_OUT(req, &indicators, sizeof(indicators)); - return (error); + return error; } diff --git a/bsd/net/ntstat.h b/bsd/net/ntstat.h index 82577499f..7f204fcf3 100644 --- a/bsd/net/ntstat.h +++ b/bsd/net/ntstat.h @@ -37,293 +37,288 @@ #ifdef PRIVATE #pragma mark -- Common Data Structures -- -#define __NSTAT_REVISION__ 9 +#define __NSTAT_REVISION__ 9 -typedef u_int32_t nstat_provider_id_t; -typedef u_int64_t nstat_src_ref_t; -typedef u_int64_t nstat_event_flags_t; +typedef u_int32_t nstat_provider_id_t; +typedef u_int64_t nstat_src_ref_t; +typedef u_int64_t nstat_event_flags_t; // The following event definitions are very provisional.. -enum -{ - NSTAT_EVENT_SRC_ADDED = 0x00000001 - ,NSTAT_EVENT_SRC_REMOVED = 0x00000002 - ,NSTAT_EVENT_SRC_QUERIED = 0x00000004 - ,NSTAT_EVENT_SRC_QUERIED_ALL = 0x00000008 - ,NSTAT_EVENT_SRC_WILL_CHANGE_STATE = 0x00000010 - ,NSTAT_EVENT_SRC_DID_CHANGE_STATE = 0x00000020 - ,NSTAT_EVENT_SRC_WILL_CHANGE_OWNER = 0x00000040 - ,NSTAT_EVENT_SRC_DID_CHANGE_OWNER = 0x00000080 - ,NSTAT_EVENT_SRC_WILL_CHANGE_PROPERTY = 0x00000100 - ,NSTAT_EVENT_SRC_DID_CHANGE_PROPERTY = 0x00000200 +enum{ + NSTAT_EVENT_SRC_ADDED = 0x00000001 + , NSTAT_EVENT_SRC_REMOVED = 0x00000002 + , NSTAT_EVENT_SRC_QUERIED = 0x00000004 + , NSTAT_EVENT_SRC_QUERIED_ALL = 0x00000008 + , NSTAT_EVENT_SRC_WILL_CHANGE_STATE = 0x00000010 + , NSTAT_EVENT_SRC_DID_CHANGE_STATE = 0x00000020 + , NSTAT_EVENT_SRC_WILL_CHANGE_OWNER = 0x00000040 + , NSTAT_EVENT_SRC_DID_CHANGE_OWNER = 0x00000080 + , NSTAT_EVENT_SRC_WILL_CHANGE_PROPERTY = 0x00000100 + , NSTAT_EVENT_SRC_DID_CHANGE_PROPERTY = 0x00000200 }; -typedef struct nstat_counts -{ +typedef struct nstat_counts { /* Counters */ - u_int64_t nstat_rxpackets __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_txpackets __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_txbytes __attribute__((aligned(sizeof(u_int64_t)))); - - u_int64_t nstat_cell_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_cell_txbytes __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_wifi_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_wifi_txbytes __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_wired_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t nstat_wired_txbytes __attribute__((aligned(sizeof(u_int64_t)))); - - u_int32_t nstat_rxduplicatebytes; - u_int32_t nstat_rxoutoforderbytes; - u_int32_t nstat_txretransmit; - - u_int32_t nstat_connectattempts; - u_int32_t nstat_connectsuccesses; - - u_int32_t nstat_min_rtt; - u_int32_t nstat_avg_rtt; - u_int32_t nstat_var_rtt; + u_int64_t nstat_rxpackets __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_txpackets __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_txbytes __attribute__((aligned(sizeof(u_int64_t)))); + + u_int64_t nstat_cell_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_cell_txbytes __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_wifi_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_wifi_txbytes __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_wired_rxbytes __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t nstat_wired_txbytes __attribute__((aligned(sizeof(u_int64_t)))); + + u_int32_t nstat_rxduplicatebytes; + u_int32_t nstat_rxoutoforderbytes; + u_int32_t nstat_txretransmit; + + u_int32_t nstat_connectattempts; + u_int32_t nstat_connectsuccesses; + + u_int32_t nstat_min_rtt; + u_int32_t nstat_avg_rtt; + u_int32_t nstat_var_rtt; } nstat_counts; -#define NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE 24 -typedef struct nstat_sysinfo_keyval -{ - u_int32_t nstat_sysinfo_key; - u_int32_t nstat_sysinfo_flags; +#define NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE 24 +typedef struct nstat_sysinfo_keyval { + u_int32_t nstat_sysinfo_key; + u_int32_t nstat_sysinfo_flags; union { - int64_t nstat_sysinfo_scalar; - double nstat_sysinfo_distribution; - u_int8_t nstat_sysinfo_string[NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE]; + int64_t nstat_sysinfo_scalar; + double nstat_sysinfo_distribution; + u_int8_t nstat_sysinfo_string[NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE]; } u; - u_int32_t nstat_sysinfo_valsize; - u_int8_t reserved[4]; + u_int32_t nstat_sysinfo_valsize; + u_int8_t reserved[4]; } nstat_sysinfo_keyval; -#define NSTAT_SYSINFO_FLAG_SCALAR 0x0001 -#define NSTAT_SYSINFO_FLAG_DISTRIBUTION 0x0002 -#define NSTAT_SYSINFO_FLAG_STRING 0x0004 +#define NSTAT_SYSINFO_FLAG_SCALAR 0x0001 +#define NSTAT_SYSINFO_FLAG_DISTRIBUTION 0x0002 +#define NSTAT_SYSINFO_FLAG_STRING 0x0004 -#define NSTAT_MAX_MSG_SIZE 4096 +#define NSTAT_MAX_MSG_SIZE 4096 -typedef struct nstat_sysinfo_counts -{ +typedef struct nstat_sysinfo_counts { /* Counters */ - u_int32_t nstat_sysinfo_len; - u_int32_t pad; - u_int8_t nstat_sysinfo_keyvals[]; + u_int32_t nstat_sysinfo_len; + u_int32_t pad; + u_int8_t nstat_sysinfo_keyvals[]; } nstat_sysinfo_counts; -enum -{ - NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL = 1 - ,NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL = 2 - ,NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL = 3 - ,NSTAT_SYSINFO_KEY_SOCK_MBCNT = 4 - ,NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT = 5 - ,NSTAT_SYSINFO_KEY_IPV4_AVGRTT = 6 - ,NSTAT_SYSINFO_KEY_IPV6_AVGRTT = 7 - ,NSTAT_SYSINFO_KEY_SEND_PLR = 8 - ,NSTAT_SYSINFO_KEY_RECV_PLR = 9 - ,NSTAT_SYSINFO_KEY_SEND_TLRTO = 10 - ,NSTAT_SYSINFO_KEY_SEND_REORDERRATE = 11 - ,NSTAT_SYSINFO_CONNECTION_ATTEMPTS = 12 - ,NSTAT_SYSINFO_CONNECTION_ACCEPTS = 13 - ,NSTAT_SYSINFO_ECN_CLIENT_SETUP = 14 - ,NSTAT_SYSINFO_ECN_SERVER_SETUP = 15 - ,NSTAT_SYSINFO_ECN_CLIENT_SUCCESS = 16 - ,NSTAT_SYSINFO_ECN_SERVER_SUCCESS = 17 - ,NSTAT_SYSINFO_ECN_NOT_SUPPORTED = 18 - ,NSTAT_SYSINFO_ECN_LOST_SYN = 19 - ,NSTAT_SYSINFO_ECN_LOST_SYNACK = 20 - ,NSTAT_SYSINFO_ECN_RECV_CE = 21 - ,NSTAT_SYSINFO_ECN_RECV_ECE = 22 - ,NSTAT_SYSINFO_ECN_SENT_ECE = 23 - ,NSTAT_SYSINFO_ECN_CONN_RECV_CE = 24 - ,NSTAT_SYSINFO_ECN_CONN_PLNOCE = 25 - ,NSTAT_SYSINFO_ECN_CONN_PL_CE = 26 - ,NSTAT_SYSINFO_ECN_CONN_NOPL_CE = 27 - ,NSTAT_SYSINFO_MBUF_16KB_TOTAL = 28 - ,NSTAT_SYSINFO_ECN_CLIENT_ENABLED = 29 - ,NSTAT_SYSINFO_ECN_SERVER_ENABLED = 30 - ,NSTAT_SYSINFO_ECN_CONN_RECV_ECE = 31 - ,NSTAT_SYSINFO_MBUF_MEM_RELEASED = 32 - ,NSTAT_SYSINFO_MBUF_DRAIN_CNT = 33 - ,NSTAT_SYSINFO_TFO_SYN_DATA_RCV = 34 - ,NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV = 35 - ,NSTAT_SYSINFO_TFO_COOKIE_SENT = 36 - ,NSTAT_SYSINFO_TFO_COOKIE_INVALID = 37 - ,NSTAT_SYSINFO_TFO_COOKIE_REQ = 38 - ,NSTAT_SYSINFO_TFO_COOKIE_RCV = 39 - ,NSTAT_SYSINFO_TFO_SYN_DATA_SENT = 40 - ,NSTAT_SYSINFO_TFO_SYN_DATA_ACKED = 41 - ,NSTAT_SYSINFO_TFO_SYN_LOSS = 42 - ,NSTAT_SYSINFO_TFO_BLACKHOLE = 43 - ,NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS = 44 - ,NSTAT_SYSINFO_ECN_FALLBACK_REORDER = 45 - ,NSTAT_SYSINFO_ECN_FALLBACK_CE = 46 - ,NSTAT_SYSINFO_ECN_IFNET_TYPE = 47 - ,NSTAT_SYSINFO_ECN_IFNET_PROTO = 48 - ,NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP = 49 - ,NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP = 50 - ,NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS = 51 - ,NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS = 52 - ,NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT = 53 - ,NSTAT_SYSINFO_ECN_IFNET_SYN_LOST = 54 - ,NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST = 55 - ,NSTAT_SYSINFO_ECN_IFNET_RECV_CE = 56 - ,NSTAT_SYSINFO_ECN_IFNET_RECV_ECE = 57 - ,NSTAT_SYSINFO_ECN_IFNET_SENT_ECE = 58 - ,NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE = 59 - ,NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE = 60 - ,NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE = 61 - ,NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE = 62 - ,NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE = 63 - ,NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS = 64 - ,NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER = 65 - ,NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE = 66 - ,NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG = 67 - ,NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR = 68 - ,NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT = 69 - ,NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE = 70 - ,NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT = 71 - ,NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT = 72 - ,NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP = 73 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG = 74 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR = 75 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT = 76 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE = 77 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT = 78 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT = 79 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP = 80 - ,NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS = 81 - ,NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS = 82 - ,NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS = 83 - ,NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS = 84 - ,NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST = 85 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS = 86 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS = 87 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS = 88 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS = 89 - ,NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST = 90 - ,NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN = 91 - ,NSTAT_SYSINFO_TFO_COOKIE_WRONG = 92 - ,NSTAT_SYSINFO_TFO_NO_COOKIE_RCV = 93 - ,NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE = 94 - ,NSTAT_SYSINFO_TFO_SEND_BLACKHOLE = 95 - ,NSTAT_SYSINFO_KEY_SOCK_MBFLOOR = 96 - ,NSTAT_SYSINFO_IFNET_UNSENT_DATA = 97 - ,NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST = 98 - ,NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT = 99 - ,NSTAT_SYSINFO_LIM_IFNET_SIGNATURE = 100 - ,NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH = 101 - ,NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH = 102 - ,NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT = 103 - ,NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT = 104 - ,NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE = 105 - ,NSTAT_SYSINFO_LIM_IFNET_RTT_MIN = 106 - ,NSTAT_SYSINFO_LIM_IFNET_RTT_AVG = 107 - ,NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT = 108 - ,NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED = 109 - ,NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED = 110 - ,NSTAT_SYSINFO_LIM_IFNET_TYPE = 111 - - ,NSTAT_SYSINFO_API_IF_FLTR_ATTACH = 112 - ,NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS = 113 - ,NSTAT_SYSINFO_API_IP_FLTR_ADD = 114 - ,NSTAT_SYSINFO_API_IP_FLTR_ADD_OS = 115 - ,NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH = 116 - ,NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS = 117 - - ,NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL = 118 - ,NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL = 119 - ,NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS = 120 - ,NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID = 121 - - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL = 122 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE = 123 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_INET = 124 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6 = 125 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM = 126 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH = 127 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY = 128 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV = 129 - ,NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER = 130 - - ,NSTAT_SYSINFO_API_SOCK_INET_STREAM= 131 - ,NSTAT_SYSINFO_API_SOCK_INET_DGRAM = 132 - ,NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED = 133 - ,NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS = 134 - ,NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA = 135 - - ,NSTAT_SYSINFO_API_SOCK_INET6_STREAM= 136 - ,NSTAT_SYSINFO_API_SOCK_INET6_DGRAM = 137 - ,NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED = 138 - ,NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS = 139 - ,NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA = 140 - - ,NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN = 141 - ,NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS = 142 - - ,NSTAT_SYSINFO_API_SOCK_INET6_STREAM_EXTHDR_IN = 143 - ,NSTAT_SYSINFO_API_SOCK_INET6_STREAM_EXTHDR_OUT = 144 - ,NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_EXTHDR_IN = 145 - ,NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_EXTHDR_OUT = 146 - - ,NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM = 147 - ,NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM = 148 - - ,NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM = 149 - ,NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM = 150 - - ,NSTAT_SYSINFO_API_IFNET_ALLOC = 151 - ,NSTAT_SYSINFO_API_IFNET_ALLOC_OS = 152 - - ,NSTAT_SYSINFO_API_PF_ADDRULE = 153 - ,NSTAT_SYSINFO_API_PF_ADDRULE_OS = 154 - - ,NSTAT_SYSINFO_API_VMNET_START = 155 - - ,NSTAT_SYSINFO_API_IF_NETAGENT_ENABLED = 156 - - ,NSTAT_SYSINFO_API_REPORT_INTERVAL = 157 - - ,NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT = 158 - ,NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT = 159 - ,NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT = 160 - ,NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT = 161 /* _FP_ stands for first-party */ - ,NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT = 162 - ,NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT = 163 - ,NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK = 164 - ,NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK = 165 - ,NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI = 166 - ,NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL = 167 - ,NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS = 168 - ,NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS = 169 - ,NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI = 170 - ,NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL = 171 - ,NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS = 172 - ,NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS = 173 - ,NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI = 174 - ,NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL = 175 - ,NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI = 176 - ,NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES = 177 - ,NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES = 178 - ,NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES = 179 - ,NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES = 180 - ,NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES = 181 - ,NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES = 182 - ,NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI = 183 - ,NSTAT_SYSINFO_MPTCP_WIFI_PROXY = 184 - ,NSTAT_SYSINFO_MPTCP_CELL_PROXY = 185 - ,NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST = 186 - ,NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL = 187 +enum{ + NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL = 1 + , NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL = 2 + , NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL = 3 + , NSTAT_SYSINFO_KEY_SOCK_MBCNT = 4 + , NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT = 5 + , NSTAT_SYSINFO_KEY_IPV4_AVGRTT = 6 + , NSTAT_SYSINFO_KEY_IPV6_AVGRTT = 7 + , NSTAT_SYSINFO_KEY_SEND_PLR = 8 + , NSTAT_SYSINFO_KEY_RECV_PLR = 9 + , NSTAT_SYSINFO_KEY_SEND_TLRTO = 10 + , NSTAT_SYSINFO_KEY_SEND_REORDERRATE = 11 + , NSTAT_SYSINFO_CONNECTION_ATTEMPTS = 12 + , NSTAT_SYSINFO_CONNECTION_ACCEPTS = 13 + , NSTAT_SYSINFO_ECN_CLIENT_SETUP = 14 + , NSTAT_SYSINFO_ECN_SERVER_SETUP = 15 + , NSTAT_SYSINFO_ECN_CLIENT_SUCCESS = 16 + , NSTAT_SYSINFO_ECN_SERVER_SUCCESS = 17 + , NSTAT_SYSINFO_ECN_NOT_SUPPORTED = 18 + , NSTAT_SYSINFO_ECN_LOST_SYN = 19 + , NSTAT_SYSINFO_ECN_LOST_SYNACK = 20 + , NSTAT_SYSINFO_ECN_RECV_CE = 21 + , NSTAT_SYSINFO_ECN_RECV_ECE = 22 + , NSTAT_SYSINFO_ECN_SENT_ECE = 23 + , NSTAT_SYSINFO_ECN_CONN_RECV_CE = 24 + , NSTAT_SYSINFO_ECN_CONN_PLNOCE = 25 + , NSTAT_SYSINFO_ECN_CONN_PL_CE = 26 + , NSTAT_SYSINFO_ECN_CONN_NOPL_CE = 27 + , NSTAT_SYSINFO_MBUF_16KB_TOTAL = 28 + , NSTAT_SYSINFO_ECN_CLIENT_ENABLED = 29 + , NSTAT_SYSINFO_ECN_SERVER_ENABLED = 30 + , NSTAT_SYSINFO_ECN_CONN_RECV_ECE = 31 + , NSTAT_SYSINFO_MBUF_MEM_RELEASED = 32 + , NSTAT_SYSINFO_MBUF_DRAIN_CNT = 33 + , NSTAT_SYSINFO_TFO_SYN_DATA_RCV = 34 + , NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV = 35 + , NSTAT_SYSINFO_TFO_COOKIE_SENT = 36 + , NSTAT_SYSINFO_TFO_COOKIE_INVALID = 37 + , NSTAT_SYSINFO_TFO_COOKIE_REQ = 38 + , NSTAT_SYSINFO_TFO_COOKIE_RCV = 39 + , NSTAT_SYSINFO_TFO_SYN_DATA_SENT = 40 + , NSTAT_SYSINFO_TFO_SYN_DATA_ACKED = 41 + , NSTAT_SYSINFO_TFO_SYN_LOSS = 42 + , NSTAT_SYSINFO_TFO_BLACKHOLE = 43 + , NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS = 44 + , NSTAT_SYSINFO_ECN_FALLBACK_REORDER = 45 + , NSTAT_SYSINFO_ECN_FALLBACK_CE = 46 + , NSTAT_SYSINFO_ECN_IFNET_TYPE = 47 + , NSTAT_SYSINFO_ECN_IFNET_PROTO = 48 + , NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP = 49 + , NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP = 50 + , NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS = 51 + , NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS = 52 + , NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT = 53 + , NSTAT_SYSINFO_ECN_IFNET_SYN_LOST = 54 + , NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST = 55 + , NSTAT_SYSINFO_ECN_IFNET_RECV_CE = 56 + , NSTAT_SYSINFO_ECN_IFNET_RECV_ECE = 57 + , NSTAT_SYSINFO_ECN_IFNET_SENT_ECE = 58 + , NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE = 59 + , NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE = 60 + , NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE = 61 + , NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE = 62 + , NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE = 63 + , NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS = 64 + , NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER = 65 + , NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE = 66 + , NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG = 67 + , NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR = 68 + , NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT = 69 + , NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE = 70 + , NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT = 71 + , NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT = 72 + , NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP = 73 + , NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG = 74 + , NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR = 75 + , NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT = 76 + , NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE = 77 + , NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT = 78 + , NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT = 79 + , NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP = 80 + , NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS = 81 + , NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS = 82 + , NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS = 83 + , NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS = 84 + , NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST = 85 + , NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS = 86 + , NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS = 87 + , NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS = 88 + , NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS = 89 + , NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST = 90 + , NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN = 91 + , NSTAT_SYSINFO_TFO_COOKIE_WRONG = 92 + , NSTAT_SYSINFO_TFO_NO_COOKIE_RCV = 93 + , NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE = 94 + , NSTAT_SYSINFO_TFO_SEND_BLACKHOLE = 95 + , NSTAT_SYSINFO_KEY_SOCK_MBFLOOR = 96 + , NSTAT_SYSINFO_IFNET_UNSENT_DATA = 97 + , NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST = 98 + , NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT = 99 + , NSTAT_SYSINFO_LIM_IFNET_SIGNATURE = 100 + , NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH = 101 + , NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH = 102 + , NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT = 103 + , NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT = 104 + , NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE = 105 + , NSTAT_SYSINFO_LIM_IFNET_RTT_MIN = 106 + , NSTAT_SYSINFO_LIM_IFNET_RTT_AVG = 107 + , NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT = 108 + , NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED = 109 + , NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED = 110 + , NSTAT_SYSINFO_LIM_IFNET_TYPE = 111 + + , NSTAT_SYSINFO_API_IF_FLTR_ATTACH = 112 + , NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS = 113 + , NSTAT_SYSINFO_API_IP_FLTR_ADD = 114 + , NSTAT_SYSINFO_API_IP_FLTR_ADD_OS = 115 + , NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH = 116 + , NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS = 117 + + , NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL = 118 + , NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL = 119 + , NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS = 120 + , NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID = 121 + + , NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL = 122 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE = 123 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_INET = 124 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6 = 125 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM = 126 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH = 127 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY = 128 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV = 129 + , NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER = 130 + + , NSTAT_SYSINFO_API_SOCK_INET_STREAM= 131 + , NSTAT_SYSINFO_API_SOCK_INET_DGRAM = 132 + , NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED = 133 + , NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS = 134 + , NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA = 135 + + , NSTAT_SYSINFO_API_SOCK_INET6_STREAM= 136 + , NSTAT_SYSINFO_API_SOCK_INET6_DGRAM = 137 + , NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED = 138 + , NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS = 139 + , NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA = 140 + + , NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN = 141 + , NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS = 142 + + , NSTAT_SYSINFO_API_SOCK_INET6_STREAM_EXTHDR_IN = 143 + , NSTAT_SYSINFO_API_SOCK_INET6_STREAM_EXTHDR_OUT = 144 + , NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_EXTHDR_IN = 145 + , NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_EXTHDR_OUT = 146 + + , NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM = 147 + , NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM = 148 + + , NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM = 149 + , NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM = 150 + + , NSTAT_SYSINFO_API_IFNET_ALLOC = 151 + , NSTAT_SYSINFO_API_IFNET_ALLOC_OS = 152 + + , NSTAT_SYSINFO_API_PF_ADDRULE = 153 + , NSTAT_SYSINFO_API_PF_ADDRULE_OS = 154 + + , NSTAT_SYSINFO_API_VMNET_START = 155 + + , NSTAT_SYSINFO_API_IF_NETAGENT_ENABLED = 156 + + , NSTAT_SYSINFO_API_REPORT_INTERVAL = 157 + + , NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT = 158 + , NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT = 159 + , NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT = 160 + , NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT = 161 /* _FP_ stands for first-party */ + , NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT = 162 + , NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT = 163 + , NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK = 164 + , NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK = 165 + , NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI = 166 + , NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL = 167 + , NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS = 168 + , NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS = 169 + , NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI = 170 + , NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL = 171 + , NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS = 172 + , NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS = 173 + , NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI = 174 + , NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL = 175 + , NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI = 176 + , NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES = 177 + , NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES = 178 + , NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES = 179 + , NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES = 180 + , NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES = 181 + , NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES = 182 + , NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI = 183 + , NSTAT_SYSINFO_MPTCP_WIFI_PROXY = 184 + , NSTAT_SYSINFO_MPTCP_CELL_PROXY = 185 + , NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST = 186 + , NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL = 187 // NSTAT_SYSINFO_ENUM_VERSION must be updated any time a value is added -#define NSTAT_SYSINFO_ENUM_VERSION 20180416 +#define NSTAT_SYSINFO_ENUM_VERSION 20180416 }; -#define NSTAT_SYSINFO_API_FIRST NSTAT_SYSINFO_API_IF_FLTR_ATTACH -#define NSTAT_SYSINFO_API_LAST NSTAT_SYSINFO_API_REPORT_INTERVAL +#define NSTAT_SYSINFO_API_FIRST NSTAT_SYSINFO_API_IF_FLTR_ATTACH +#define NSTAT_SYSINFO_API_LAST NSTAT_SYSINFO_API_REPORT_INTERVAL #pragma mark -- Network Statistics Providers -- @@ -344,284 +339,265 @@ enum #define NSTAT_IFNET_FLOWSWITCH_VALUE_UNOBTAINABLE 0x2000 -enum -{ - NSTAT_PROVIDER_NONE = 0 - ,NSTAT_PROVIDER_ROUTE = 1 - ,NSTAT_PROVIDER_TCP_KERNEL = 2 - ,NSTAT_PROVIDER_TCP_USERLAND = 3 - ,NSTAT_PROVIDER_UDP_KERNEL = 4 - ,NSTAT_PROVIDER_UDP_USERLAND = 5 - ,NSTAT_PROVIDER_IFNET = 6 - ,NSTAT_PROVIDER_SYSINFO = 7 +enum{ + NSTAT_PROVIDER_NONE = 0 + , NSTAT_PROVIDER_ROUTE = 1 + , NSTAT_PROVIDER_TCP_KERNEL = 2 + , NSTAT_PROVIDER_TCP_USERLAND = 3 + , NSTAT_PROVIDER_UDP_KERNEL = 4 + , NSTAT_PROVIDER_UDP_USERLAND = 5 + , NSTAT_PROVIDER_IFNET = 6 + , NSTAT_PROVIDER_SYSINFO = 7 }; #define NSTAT_PROVIDER_LAST NSTAT_PROVIDER_SYSINFO #define NSTAT_PROVIDER_COUNT (NSTAT_PROVIDER_LAST+1) -typedef struct nstat_route_add_param -{ - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; +typedef struct nstat_route_add_param { + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } dst; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } mask; - u_int32_t ifindex; + u_int32_t ifindex; } nstat_route_add_param; -typedef struct nstat_tcp_add_param -{ - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; +typedef struct nstat_tcp_add_param { + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } local; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } remote; } nstat_tcp_add_param; -typedef struct nstat_tcp_descriptor -{ - u_int64_t upid __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t eupid __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t start_timestamp __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t timestamp __attribute__((aligned(sizeof(u_int64_t)))); +typedef struct nstat_tcp_descriptor { + u_int64_t upid __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t eupid __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t start_timestamp __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t timestamp __attribute__((aligned(sizeof(u_int64_t)))); activity_bitmap_t activity_bitmap; - u_int32_t ifindex; - u_int32_t state; - - u_int32_t sndbufsize; - u_int32_t sndbufused; - u_int32_t rcvbufsize; - u_int32_t rcvbufused; - u_int32_t txunacked; - u_int32_t txwindow; - u_int32_t txcwindow; - u_int32_t traffic_class; - u_int32_t traffic_mgt_flags; - - u_int32_t pid; - u_int32_t epid; - - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + u_int32_t ifindex; + u_int32_t state; + + u_int32_t sndbufsize; + u_int32_t sndbufused; + u_int32_t rcvbufsize; + u_int32_t rcvbufused; + u_int32_t txunacked; + u_int32_t txwindow; + u_int32_t txcwindow; + u_int32_t traffic_class; + u_int32_t traffic_mgt_flags; + + u_int32_t pid; + u_int32_t epid; + + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } local; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } remote; - char cc_algo[16]; - char pname[64]; + char cc_algo[16]; + char pname[64]; - uuid_t uuid; - uuid_t euuid; - uuid_t vuuid; + uuid_t uuid; + uuid_t euuid; + uuid_t vuuid; union { struct tcp_conn_status connstatus; // On armv7k, tcp_conn_status is 1 byte instead of 4 - uint8_t __pad_connstatus[4]; + uint8_t __pad_connstatus[4]; }; - uint16_t ifnet_properties __attribute__((aligned(4))); + uint16_t ifnet_properties __attribute__((aligned(4))); - u_int8_t reserved[6]; + u_int8_t reserved[6]; } nstat_tcp_descriptor; -typedef struct nstat_tcp_add_param nstat_udp_add_param; +typedef struct nstat_tcp_add_param nstat_udp_add_param; -typedef struct nstat_udp_descriptor -{ - u_int64_t upid __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t eupid __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t start_timestamp __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t timestamp __attribute__((aligned(sizeof(u_int64_t)))); +typedef struct nstat_udp_descriptor { + u_int64_t upid __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t eupid __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t start_timestamp __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t timestamp __attribute__((aligned(sizeof(u_int64_t)))); activity_bitmap_t activity_bitmap; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } local; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; } remote; - u_int32_t ifindex; + u_int32_t ifindex; - u_int32_t rcvbufsize; - u_int32_t rcvbufused; - u_int32_t traffic_class; + u_int32_t rcvbufsize; + u_int32_t rcvbufused; + u_int32_t traffic_class; - u_int32_t pid; - char pname[64]; - u_int32_t epid; + u_int32_t pid; + char pname[64]; + u_int32_t epid; - uuid_t uuid; - uuid_t euuid; - uuid_t vuuid; - uint16_t ifnet_properties; + uuid_t uuid; + uuid_t euuid; + uuid_t vuuid; + uint16_t ifnet_properties; - u_int8_t reserved[6]; + u_int8_t reserved[6]; } nstat_udp_descriptor; -typedef struct nstat_route_descriptor -{ - u_int64_t id __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t parent_id __attribute__((aligned(sizeof(u_int64_t)))); - u_int64_t gateway_id __attribute__((aligned(sizeof(u_int64_t)))); - - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - struct sockaddr sa; +typedef struct nstat_route_descriptor { + u_int64_t id __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t parent_id __attribute__((aligned(sizeof(u_int64_t)))); + u_int64_t gateway_id __attribute__((aligned(sizeof(u_int64_t)))); + + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; + struct sockaddr sa; } dst; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - struct sockaddr sa; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; + struct sockaddr sa; } mask; - union - { - struct sockaddr_in v4; - struct sockaddr_in6 v6; - struct sockaddr sa; + union{ + struct sockaddr_in v4; + struct sockaddr_in6 v6; + struct sockaddr sa; } gateway; - u_int32_t ifindex; - u_int32_t flags; + u_int32_t ifindex; + u_int32_t flags; - u_int8_t reserved[4]; + u_int8_t reserved[4]; } nstat_route_descriptor; -typedef struct nstat_ifnet_add_param -{ - u_int64_t threshold __attribute__((aligned(sizeof(u_int64_t)))); - u_int32_t ifindex; +typedef struct nstat_ifnet_add_param { + u_int64_t threshold __attribute__((aligned(sizeof(u_int64_t)))); + u_int32_t ifindex; - u_int8_t reserved[4]; + u_int8_t reserved[4]; } nstat_ifnet_add_param; -typedef struct nstat_ifnet_desc_cellular_status -{ +typedef struct nstat_ifnet_desc_cellular_status { u_int32_t valid_bitmask; /* indicates which fields are valid */ -#define NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID 0x1 -#define NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 -#define NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID 0x4 -#define NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID 0x8 -#define NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID 0x10 -#define NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID 0x20 -#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID 0x40 -#define NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID 0x80 -#define NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID 0x100 -#define NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID 0x200 -#define NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID 0x400 -#define NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID 0x800 -#define NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID 0x1000 -#define NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID 0x2000 -#define NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID 0x4000 -#define NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID 0x8000 +#define NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID 0x1 +#define NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 +#define NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID 0x4 +#define NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID 0x8 +#define NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID 0x10 +#define NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID 0x20 +#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID 0x40 +#define NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID 0x80 +#define NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID 0x100 +#define NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID 0x200 +#define NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID 0x400 +#define NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID 0x800 +#define NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID 0x1000 +#define NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID 0x2000 +#define NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID 0x4000 +#define NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID 0x8000 u_int32_t link_quality_metric; u_int32_t ul_effective_bandwidth; /* Measured uplink bandwidth based on - current activity (bps) */ + * current activity (bps) */ u_int32_t ul_max_bandwidth; /* Maximum supported uplink bandwidth - (bps) */ + * (bps) */ u_int32_t ul_min_latency; /* min expected uplink latency for first hop - (ms) */ + * (ms) */ u_int32_t ul_effective_latency; /* current expected uplink latency for - first hop (ms) */ + * first hop (ms) */ u_int32_t ul_max_latency; /* max expected uplink latency first hop - (ms) */ + * (ms) */ u_int32_t ul_retxt_level; /* Retransmission metric */ -#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE 1 -#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW 2 -#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM 3 -#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH 4 +#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE 1 +#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW 2 +#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM 3 +#define NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH 4 u_int32_t ul_bytes_lost; /* % of total bytes lost on uplink in Q10 - format */ + * format */ u_int32_t ul_min_queue_size; /* minimum bytes in queue */ u_int32_t ul_avg_queue_size; /* average bytes in queue */ u_int32_t ul_max_queue_size; /* maximum bytes in queue */ u_int32_t dl_effective_bandwidth; /* Measured downlink bandwidth based - on current activity (bps) */ + * on current activity (bps) */ u_int32_t dl_max_bandwidth; /* Maximum supported downlink bandwidth - (bps) */ + * (bps) */ u_int32_t config_inactivity_time; /* ms */ u_int32_t config_backoff_time; /* new connections backoff time in ms */ -#define NSTAT_IFNET_DESC_MSS_RECOMMENDED_NONE 0x0 -#define NSTAT_IFNET_DESC_MSS_RECOMMENDED_MEDIUM 0x1 -#define NSTAT_IFNET_DESC_MSS_RECOMMENDED_LOW 0x2 +#define NSTAT_IFNET_DESC_MSS_RECOMMENDED_NONE 0x0 +#define NSTAT_IFNET_DESC_MSS_RECOMMENDED_MEDIUM 0x1 +#define NSTAT_IFNET_DESC_MSS_RECOMMENDED_LOW 0x2 u_int16_t mss_recommended; /* recommended MSS */ - u_int8_t reserved[2]; + u_int8_t reserved[2]; } nstat_ifnet_desc_cellular_status; typedef struct nstat_ifnet_desc_wifi_status { u_int32_t valid_bitmask; -#define NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID 0x1 -#define NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 -#define NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID 0x4 -#define NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID 0x8 -#define NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID 0x10 -#define NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID 0x20 -#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID 0x40 -#define NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID 0x80 -#define NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID 0x100 -#define NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID 0x200 -#define NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID 0x400 -#define NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID 0x800 -#define NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID 0x1000 -#define NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID 0x2000 -#define NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID 0x4000 -#define NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID 0x8000 -#define NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID 0x10000 -#define NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID 0x20000 -#define NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID 0x40000 +#define NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID 0x1 +#define NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID 0x2 +#define NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID 0x4 +#define NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID 0x8 +#define NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID 0x10 +#define NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID 0x20 +#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID 0x40 +#define NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID 0x80 +#define NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID 0x100 +#define NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID 0x200 +#define NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID 0x400 +#define NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID 0x800 +#define NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID 0x1000 +#define NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID 0x2000 +#define NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID 0x4000 +#define NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID 0x8000 +#define NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID 0x10000 +#define NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID 0x20000 +#define NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID 0x40000 u_int32_t link_quality_metric; /* link quality metric */ u_int32_t ul_effective_bandwidth; /* Measured uplink bandwidth based on - current activity (bps) */ + * current activity (bps) */ u_int32_t ul_max_bandwidth; /* Maximum supported uplink bandwidth - (bps) */ + * (bps) */ u_int32_t ul_min_latency; /* min expected uplink latency for first hop - (ms) */ + * (ms) */ u_int32_t ul_effective_latency; /* current expected uplink latency for - first hop (ms) */ + * first hop (ms) */ u_int32_t ul_max_latency; /* max expected uplink latency for first hop - (ms) */ + * (ms) */ u_int32_t ul_retxt_level; /* Retransmission metric */ -#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE 1 -#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW 2 -#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM 3 -#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH 4 +#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE 1 +#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW 2 +#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM 3 +#define NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH 4 u_int32_t ul_bytes_lost; /* % of total bytes lost on uplink in Q10 - format */ + * format */ u_int32_t ul_error_rate; /* % of bytes dropped on uplink after many - retransmissions in Q10 format */ + * retransmissions in Q10 format */ u_int32_t dl_effective_bandwidth; /* Measured downlink bandwidth based - on current activity (bps) */ + * on current activity (bps) */ u_int32_t dl_max_bandwidth; /* Maximum supported downlink bandwidth - (bps) */ + * (bps) */ /* * The download latency values indicate the time AP may have to wait * for the driver to receive the packet. These values give the range @@ -630,319 +606,289 @@ typedef struct nstat_ifnet_desc_wifi_status { */ u_int32_t dl_min_latency; /* min expected latency for first hop in ms */ u_int32_t dl_effective_latency; /* current expected latency for first - hop in ms */ + * hop in ms */ u_int32_t dl_max_latency; /* max expected latency for first hop in ms */ u_int32_t dl_error_rate; /* % of CRC or other errors in Q10 format */ u_int32_t config_frequency; /* 2.4 or 5 GHz */ -#define NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ 1 -#define NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ 2 +#define NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ 1 +#define NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ 2 u_int32_t config_multicast_rate; /* bps */ u_int32_t scan_count; /* scan count during the previous period */ u_int32_t scan_duration; /* scan duration in ms */ } nstat_ifnet_desc_wifi_status; -enum -{ +enum{ NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE = 0 - ,NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR = 1 - ,NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI = 2 - ,NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET = 3 + , NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR = 1 + , NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI = 2 + , NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET = 3 }; -typedef struct nstat_ifnet_desc_link_status -{ - u_int32_t link_status_type; +typedef struct nstat_ifnet_desc_link_status { + u_int32_t link_status_type; union { - nstat_ifnet_desc_cellular_status cellular; - nstat_ifnet_desc_wifi_status wifi; + nstat_ifnet_desc_cellular_status cellular; + nstat_ifnet_desc_wifi_status wifi; } u; } nstat_ifnet_desc_link_status; -#ifndef IF_DESCSIZE -#define IF_DESCSIZE 128 +#ifndef IF_DESCSIZE +#define IF_DESCSIZE 128 #endif -typedef struct nstat_ifnet_descriptor -{ - u_int64_t threshold __attribute__((aligned(sizeof(u_int64_t)))); - u_int32_t ifindex; - nstat_ifnet_desc_link_status link_status; - unsigned int type; - char description[IF_DESCSIZE]; - char name[IFNAMSIZ+1]; - u_int8_t reserved[3]; +typedef struct nstat_ifnet_descriptor { + u_int64_t threshold __attribute__((aligned(sizeof(u_int64_t)))); + u_int32_t ifindex; + nstat_ifnet_desc_link_status link_status; + unsigned int type; + char description[IF_DESCSIZE]; + char name[IFNAMSIZ + 1]; + u_int8_t reserved[3]; } nstat_ifnet_descriptor; -typedef struct nstat_sysinfo_descriptor -{ - u_int32_t flags; +typedef struct nstat_sysinfo_descriptor { + u_int32_t flags; } nstat_sysinfo_descriptor; -typedef struct nstat_sysinfo_add_param -{ +typedef struct nstat_sysinfo_add_param { /* To indicate which system level information should be collected */ - u_int32_t flags; + u_int32_t flags; } nstat_sysinfo_add_param; -#define NSTAT_SYSINFO_MBUF_STATS 0x0001 -#define NSTAT_SYSINFO_TCP_STATS 0x0002 -#define NSTAT_SYSINFO_IFNET_ECN_STATS 0x0003 -#define NSTAT_SYSINFO_LIM_STATS 0x0004 /* Low Internet mode stats */ -#define NSTAT_SYSINFO_NET_API_STATS 0x0005 /* API and KPI stats */ +#define NSTAT_SYSINFO_MBUF_STATS 0x0001 +#define NSTAT_SYSINFO_TCP_STATS 0x0002 +#define NSTAT_SYSINFO_IFNET_ECN_STATS 0x0003 +#define NSTAT_SYSINFO_LIM_STATS 0x0004 /* Low Internet mode stats */ +#define NSTAT_SYSINFO_NET_API_STATS 0x0005 /* API and KPI stats */ #pragma mark -- Network Statistics User Client -- -#define NET_STAT_CONTROL_NAME "com.apple.network.statistics" +#define NET_STAT_CONTROL_NAME "com.apple.network.statistics" -enum -{ +enum{ // generic response messages - NSTAT_MSG_TYPE_SUCCESS = 0 - ,NSTAT_MSG_TYPE_ERROR = 1 - - // Requests - ,NSTAT_MSG_TYPE_ADD_SRC = 1001 - ,NSTAT_MSG_TYPE_ADD_ALL_SRCS = 1002 - ,NSTAT_MSG_TYPE_REM_SRC = 1003 - ,NSTAT_MSG_TYPE_QUERY_SRC = 1004 - ,NSTAT_MSG_TYPE_GET_SRC_DESC = 1005 - ,NSTAT_MSG_TYPE_SET_FILTER = 1006 - ,NSTAT_MSG_TYPE_GET_UPDATE = 1007 - ,NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO = 1008 - - // Responses/Notfications - ,NSTAT_MSG_TYPE_SRC_ADDED = 10001 - ,NSTAT_MSG_TYPE_SRC_REMOVED = 10002 - ,NSTAT_MSG_TYPE_SRC_DESC = 10003 - ,NSTAT_MSG_TYPE_SRC_COUNTS = 10004 - ,NSTAT_MSG_TYPE_SYSINFO_COUNTS = 10005 - ,NSTAT_MSG_TYPE_SRC_UPDATE = 10006 + NSTAT_MSG_TYPE_SUCCESS = 0 + , NSTAT_MSG_TYPE_ERROR = 1 + + // Requests + , NSTAT_MSG_TYPE_ADD_SRC = 1001 + , NSTAT_MSG_TYPE_ADD_ALL_SRCS = 1002 + , NSTAT_MSG_TYPE_REM_SRC = 1003 + , NSTAT_MSG_TYPE_QUERY_SRC = 1004 + , NSTAT_MSG_TYPE_GET_SRC_DESC = 1005 + , NSTAT_MSG_TYPE_SET_FILTER = 1006 + , NSTAT_MSG_TYPE_GET_UPDATE = 1007 + , NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO = 1008 + + // Responses/Notfications + , NSTAT_MSG_TYPE_SRC_ADDED = 10001 + , NSTAT_MSG_TYPE_SRC_REMOVED = 10002 + , NSTAT_MSG_TYPE_SRC_DESC = 10003 + , NSTAT_MSG_TYPE_SRC_COUNTS = 10004 + , NSTAT_MSG_TYPE_SYSINFO_COUNTS = 10005 + , NSTAT_MSG_TYPE_SRC_UPDATE = 10006 }; -enum -{ - NSTAT_SRC_REF_ALL = 0xffffffffffffffffULL - ,NSTAT_SRC_REF_INVALID = 0 +enum{ + NSTAT_SRC_REF_ALL = 0xffffffffffffffffULL + , NSTAT_SRC_REF_INVALID = 0 }; /* Source-level filters */ -enum -{ +enum{ NSTAT_FILTER_NOZEROBYTES = 0x00000001 }; /* Provider-level filters */ -enum -{ +enum{ NSTAT_FILTER_ACCEPT_UNKNOWN = 0x00000001 - ,NSTAT_FILTER_ACCEPT_LOOPBACK = 0x00000002 - ,NSTAT_FILTER_ACCEPT_CELLULAR = 0x00000004 - ,NSTAT_FILTER_ACCEPT_WIFI = 0x00000008 - ,NSTAT_FILTER_ACCEPT_WIRED = 0x00000010 - ,NSTAT_FILTER_ACCEPT_AWDL = 0x00000020 - ,NSTAT_FILTER_ACCEPT_EXPENSIVE = 0x00000040 - ,NSTAT_FILTER_ACCEPT_CELLFALLBACK = 0x00000100 - ,NSTAT_FILTER_IFNET_FLAGS = 0x00000FFF - - ,NSTAT_FILTER_TCP_NO_LISTENER = 0x00001000 - ,NSTAT_FILTER_TCP_ONLY_LISTENER = 0x00002000 - ,NSTAT_FILTER_TCP_INTERFACE_ATTACH = 0x00004000 - ,NSTAT_FILTER_TCP_NO_EARLY_CLOSE = 0x00008000 - ,NSTAT_FILTER_TCP_FLAGS = 0x0000F000 - - ,NSTAT_FILTER_UDP_INTERFACE_ATTACH = 0x00010000 - ,NSTAT_FILTER_UDP_FLAGS = 0x000F0000 - - ,NSTAT_FILTER_SUPPRESS_SRC_ADDED = 0x00100000 - ,NSTAT_FILTER_REQUIRE_SRC_ADDED = 0x00200000 - ,NSTAT_FILTER_PROVIDER_NOZEROBYTES = 0x00400000 - - ,NSTAT_FILTER_SPECIFIC_USER_BY_PID = 0x01000000 - ,NSTAT_FILTER_SPECIFIC_USER_BY_EPID = 0x02000000 - ,NSTAT_FILTER_SPECIFIC_USER_BY_UUID = 0x04000000 - ,NSTAT_FILTER_SPECIFIC_USER_BY_EUUID = 0x08000000 - ,NSTAT_FILTER_SPECIFIC_USER = 0x0F000000 + , NSTAT_FILTER_ACCEPT_LOOPBACK = 0x00000002 + , NSTAT_FILTER_ACCEPT_CELLULAR = 0x00000004 + , NSTAT_FILTER_ACCEPT_WIFI = 0x00000008 + , NSTAT_FILTER_ACCEPT_WIRED = 0x00000010 + , NSTAT_FILTER_ACCEPT_AWDL = 0x00000020 + , NSTAT_FILTER_ACCEPT_EXPENSIVE = 0x00000040 + , NSTAT_FILTER_ACCEPT_CELLFALLBACK = 0x00000100 + , NSTAT_FILTER_IFNET_FLAGS = 0x00000FFF + + , NSTAT_FILTER_TCP_NO_LISTENER = 0x00001000 + , NSTAT_FILTER_TCP_ONLY_LISTENER = 0x00002000 + , NSTAT_FILTER_TCP_INTERFACE_ATTACH = 0x00004000 + , NSTAT_FILTER_TCP_NO_EARLY_CLOSE = 0x00008000 + , NSTAT_FILTER_TCP_FLAGS = 0x0000F000 + + , NSTAT_FILTER_UDP_INTERFACE_ATTACH = 0x00010000 + , NSTAT_FILTER_UDP_FLAGS = 0x000F0000 + + , NSTAT_FILTER_SUPPRESS_SRC_ADDED = 0x00100000 + , NSTAT_FILTER_REQUIRE_SRC_ADDED = 0x00200000 + , NSTAT_FILTER_PROVIDER_NOZEROBYTES = 0x00400000 + + , NSTAT_FILTER_SPECIFIC_USER_BY_PID = 0x01000000 + , NSTAT_FILTER_SPECIFIC_USER_BY_EPID = 0x02000000 + , NSTAT_FILTER_SPECIFIC_USER_BY_UUID = 0x04000000 + , NSTAT_FILTER_SPECIFIC_USER_BY_EUUID = 0x08000000 + , NSTAT_FILTER_SPECIFIC_USER = 0x0F000000 }; -enum -{ - NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE = 1 << 0, - NSTAT_MSG_HDR_FLAG_CONTINUATION = 1 << 1, - NSTAT_MSG_HDR_FLAG_CLOSING = 1 << 2, +enum{ + NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE = 1 << 0, + NSTAT_MSG_HDR_FLAG_CONTINUATION = 1 << 1, + NSTAT_MSG_HDR_FLAG_CLOSING = 1 << 2, }; -typedef struct nstat_msg_hdr -{ - u_int64_t context __attribute__((aligned(sizeof(u_int64_t)))); - u_int32_t type; - u_int16_t length; - u_int16_t flags; +typedef struct nstat_msg_hdr { + u_int64_t context __attribute__((aligned(sizeof(u_int64_t)))); + u_int32_t type; + u_int16_t length; + u_int16_t flags; } nstat_msg_hdr; -typedef struct nstat_msg_error -{ - nstat_msg_hdr hdr; - u_int32_t error; // errno error - u_int8_t reserved[4]; +typedef struct nstat_msg_error { + nstat_msg_hdr hdr; + u_int32_t error; // errno error + u_int8_t reserved[4]; } nstat_msg_error; -#define NSTAT_ADD_SRC_FIELDS \ - nstat_msg_hdr hdr; \ - nstat_provider_id_t provider; \ - u_int8_t reserved[4] \ +#define NSTAT_ADD_SRC_FIELDS \ + nstat_msg_hdr hdr; \ + nstat_provider_id_t provider; \ + u_int8_t reserved[4] \ -typedef struct nstat_msg_add_src -{ +typedef struct nstat_msg_add_src { NSTAT_ADD_SRC_FIELDS; - u_int8_t param[]; + u_int8_t param[]; } nstat_msg_add_src_req; -typedef struct nstat_msg_add_src_header -{ +typedef struct nstat_msg_add_src_header { NSTAT_ADD_SRC_FIELDS; } nstat_msg_add_src_header; -typedef struct nstat_msg_add_src_convenient -{ - nstat_msg_add_src_header hdr; +typedef struct nstat_msg_add_src_convenient { + nstat_msg_add_src_header hdr; union { - nstat_route_add_param route; - nstat_tcp_add_param tcp; - nstat_udp_add_param udp; - nstat_ifnet_add_param ifnet; - nstat_sysinfo_add_param sysinfo; + nstat_route_add_param route; + nstat_tcp_add_param tcp; + nstat_udp_add_param udp; + nstat_ifnet_add_param ifnet; + nstat_sysinfo_add_param sysinfo; }; } nstat_msg_add_src_convenient; #undef NSTAT_ADD_SRC_FIELDS -typedef struct nstat_msg_add_all_srcs -{ - nstat_msg_hdr hdr; - u_int64_t filter __attribute__((aligned(sizeof(u_int64_t)))); - nstat_event_flags_t events __attribute__((aligned(sizeof(u_int64_t)))); - nstat_provider_id_t provider; - pid_t target_pid; - uuid_t target_uuid; +typedef struct nstat_msg_add_all_srcs { + nstat_msg_hdr hdr; + u_int64_t filter __attribute__((aligned(sizeof(u_int64_t)))); + nstat_event_flags_t events __attribute__((aligned(sizeof(u_int64_t)))); + nstat_provider_id_t provider; + pid_t target_pid; + uuid_t target_uuid; } nstat_msg_add_all_srcs; -typedef struct nstat_msg_src_added -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); - nstat_provider_id_t provider; - u_int8_t reserved[4]; +typedef struct nstat_msg_src_added { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); + nstat_provider_id_t provider; + u_int8_t reserved[4]; } nstat_msg_src_added; -typedef struct nstat_msg_rem_src -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); +typedef struct nstat_msg_rem_src { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); } nstat_msg_rem_src_req; -typedef struct nstat_msg_get_src_description -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); +typedef struct nstat_msg_get_src_description { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); } nstat_msg_get_src_description; -typedef struct nstat_msg_set_filter -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); - u_int32_t filter; - u_int8_t reserved[4]; +typedef struct nstat_msg_set_filter { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); + u_int32_t filter; + u_int8_t reserved[4]; } nstat_msg_set_filter; -#define NSTAT_SRC_DESCRIPTION_FIELDS \ - nstat_msg_hdr hdr; \ - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); \ - nstat_event_flags_t event_flags __attribute__((aligned(sizeof(u_int64_t)))); \ - nstat_provider_id_t provider; \ +#define NSTAT_SRC_DESCRIPTION_FIELDS \ + nstat_msg_hdr hdr; \ + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); \ + nstat_event_flags_t event_flags __attribute__((aligned(sizeof(u_int64_t)))); \ + nstat_provider_id_t provider; \ u_int8_t reserved[4] -typedef struct nstat_msg_src_description -{ +typedef struct nstat_msg_src_description { NSTAT_SRC_DESCRIPTION_FIELDS; - u_int8_t data[]; + u_int8_t data[]; } nstat_msg_src_description; -typedef struct nstat_msg_src_description_header -{ +typedef struct nstat_msg_src_description_header { NSTAT_SRC_DESCRIPTION_FIELDS; } nstat_msg_src_description_header; -typedef struct nstat_msg_src_description_convenient -{ - nstat_msg_src_description_header hdr; +typedef struct nstat_msg_src_description_convenient { + nstat_msg_src_description_header hdr; union { - nstat_tcp_descriptor tcp; - nstat_udp_descriptor udp; - nstat_route_descriptor route; - nstat_ifnet_descriptor ifnet; - nstat_sysinfo_descriptor sysinfo; + nstat_tcp_descriptor tcp; + nstat_udp_descriptor udp; + nstat_route_descriptor route; + nstat_ifnet_descriptor ifnet; + nstat_sysinfo_descriptor sysinfo; }; } nstat_msg_src_description_convenient; #undef NSTAT_SRC_DESCRIPTION_FIELDS -typedef struct nstat_msg_query_src -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); +typedef struct nstat_msg_query_src { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); } nstat_msg_query_src_req; -typedef struct nstat_msg_src_counts -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); - nstat_event_flags_t event_flags __attribute__((aligned(sizeof(u_int64_t)))); - nstat_counts counts; +typedef struct nstat_msg_src_counts { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); + nstat_event_flags_t event_flags __attribute__((aligned(sizeof(u_int64_t)))); + nstat_counts counts; } nstat_msg_src_counts; -#define NSTAT_SRC_UPDATE_FIELDS \ - nstat_msg_hdr hdr; \ - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); \ - nstat_event_flags_t event_flags __attribute__((aligned(sizeof(u_int64_t)))); \ - nstat_counts counts; \ - nstat_provider_id_t provider; \ +#define NSTAT_SRC_UPDATE_FIELDS \ + nstat_msg_hdr hdr; \ + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); \ + nstat_event_flags_t event_flags __attribute__((aligned(sizeof(u_int64_t)))); \ + nstat_counts counts; \ + nstat_provider_id_t provider; \ u_int8_t reserved[4] -typedef struct nstat_msg_src_update -{ +typedef struct nstat_msg_src_update { NSTAT_SRC_UPDATE_FIELDS; - u_int8_t data[]; + u_int8_t data[]; } nstat_msg_src_update; -typedef struct nstat_msg_src_update_hdr -{ +typedef struct nstat_msg_src_update_hdr { NSTAT_SRC_UPDATE_FIELDS; } nstat_msg_src_update_hdr; -typedef struct nstat_msg_src_update_convenient -{ - nstat_msg_src_update_hdr hdr; +typedef struct nstat_msg_src_update_convenient { + nstat_msg_src_update_hdr hdr; union { - nstat_tcp_descriptor tcp; - nstat_udp_descriptor udp; - nstat_route_descriptor route; - nstat_ifnet_descriptor ifnet; - nstat_sysinfo_descriptor sysinfo; + nstat_tcp_descriptor tcp; + nstat_udp_descriptor udp; + nstat_route_descriptor route; + nstat_ifnet_descriptor ifnet; + nstat_sysinfo_descriptor sysinfo; }; } nstat_msg_src_update_convenient; #undef NSTAT_SRC_UPDATE_FIELDS -typedef struct nstat_msg_src_removed -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); +typedef struct nstat_msg_src_removed { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); } nstat_msg_src_removed; -typedef struct nstat_msg_sysinfo_counts -{ - nstat_msg_hdr hdr; - nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); - nstat_sysinfo_counts counts; +typedef struct nstat_msg_sysinfo_counts { + nstat_msg_hdr hdr; + nstat_src_ref_t srcref __attribute__((aligned(sizeof(u_int64_t)))); + nstat_sysinfo_counts counts; } nstat_msg_sysinfo_counts; #pragma mark -- Statitiscs about Network Statistics -- @@ -973,135 +919,132 @@ struct nstat_stats { #pragma mark -- System Information Internal Support -- -typedef struct nstat_sysinfo_mbuf_stats -{ - u_int32_t total_256b; /* Peak usage, 256B pool */ - u_int32_t total_2kb; /* Peak usage, 2KB pool */ - u_int32_t total_4kb; /* Peak usage, 4KB pool */ - u_int32_t total_16kb; /* Peak usage, 16KB pool */ - u_int32_t sbmb_total; /* Total mbufs in sock buffer pool */ - u_int32_t sb_atmbuflimit; /* Memory limit reached for socket buffer autoscaling */ - u_int32_t draincnt; /* Number of times mbuf pool has been drained under memory pressure */ - u_int32_t memreleased; /* Memory (bytes) released from mbuf pool to VM */ - u_int32_t sbmb_floor; /* Lowest mbufs in sock buffer pool */ +typedef struct nstat_sysinfo_mbuf_stats { + u_int32_t total_256b; /* Peak usage, 256B pool */ + u_int32_t total_2kb; /* Peak usage, 2KB pool */ + u_int32_t total_4kb; /* Peak usage, 4KB pool */ + u_int32_t total_16kb; /* Peak usage, 16KB pool */ + u_int32_t sbmb_total; /* Total mbufs in sock buffer pool */ + u_int32_t sb_atmbuflimit; /* Memory limit reached for socket buffer autoscaling */ + u_int32_t draincnt; /* Number of times mbuf pool has been drained under memory pressure */ + u_int32_t memreleased; /* Memory (bytes) released from mbuf pool to VM */ + u_int32_t sbmb_floor; /* Lowest mbufs in sock buffer pool */ } nstat_sysinfo_mbuf_stats; -typedef struct nstat_sysinfo_tcp_stats -{ +typedef struct nstat_sysinfo_tcp_stats { /* When adding/removing here, also adjust NSTAT_SYSINFO_TCP_STATS_COUNT */ - u_int32_t ipv4_avgrtt; /* Average RTT for IPv4 */ - u_int32_t ipv6_avgrtt; /* Average RTT for IPv6 */ - u_int32_t send_plr; /* Average uplink packet loss rate */ - u_int32_t recv_plr; /* Average downlink packet loss rate */ - u_int32_t send_tlrto_rate; /* Average rxt timeout after tail loss */ - u_int32_t send_reorder_rate; /* Average packet reordering rate */ - u_int32_t connection_attempts; /* TCP client connection attempts */ - u_int32_t connection_accepts; /* TCP server connection accepts */ - u_int32_t ecn_client_enabled; /* Global setting for ECN client side */ - u_int32_t ecn_server_enabled; /* Global setting for ECN server side */ - u_int32_t ecn_client_setup; /* Attempts to setup TCP client connection with ECN */ - u_int32_t ecn_server_setup; /* Attempts to setup TCP server connection with ECN */ - u_int32_t ecn_client_success; /* Number of successful negotiations of ECN for a client connection */ - u_int32_t ecn_server_success; /* Number of successful negotiations of ECN for a server connection */ - u_int32_t ecn_not_supported; /* Number of falbacks to Non-ECN, no support from peer */ - u_int32_t ecn_lost_syn; /* Number of SYNs lost with ECN bits */ - u_int32_t ecn_lost_synack; /* Number of SYN-ACKs lost with ECN bits */ - u_int32_t ecn_recv_ce; /* Number of CEs received from network */ - u_int32_t ecn_recv_ece; /* Number of ECEs received from receiver */ - u_int32_t ecn_sent_ece; /* Number of ECEs sent in response to CE */ - u_int32_t ecn_conn_recv_ce; /* Number of connections using ECN received CE at least once */ - u_int32_t ecn_conn_recv_ece; /* Number of connections using ECN received ECE at least once */ - u_int32_t ecn_conn_plnoce; /* Number of connections using ECN seen packet loss but never received CE */ - u_int32_t ecn_conn_pl_ce; /* Number of connections using ECN seen packet loss and CE */ - u_int32_t ecn_conn_nopl_ce; /* Number of connections using ECN with no packet loss but received CE */ - u_int32_t ecn_fallback_synloss; /* Number of times we did fall back due to SYN-Loss */ - u_int32_t ecn_fallback_reorder; /* Number of times we fallback because we detected the PAWS-issue */ - u_int32_t ecn_fallback_ce; /* Number of times we fallback because we received too many CEs */ - u_int32_t tfo_syn_data_rcv; /* Number of SYN+data received with valid cookie */ - u_int32_t tfo_cookie_req_rcv;/* Number of TFO cookie-requests received */ - u_int32_t tfo_cookie_sent; /* Number of TFO-cookies offered to the client */ - u_int32_t tfo_cookie_invalid;/* Number of invalid TFO-cookies received */ - u_int32_t tfo_cookie_req; /* Number of SYNs with cookie request received*/ - u_int32_t tfo_cookie_rcv; /* Number of SYN/ACKs with Cookie received */ - u_int32_t tfo_syn_data_sent; /* Number of SYNs+data+cookie sent */ - u_int32_t tfo_syn_data_acked;/* Number of times our SYN+data has been acknowledged */ - u_int32_t tfo_syn_loss; /* Number of times SYN+TFO has been lost and we fallback */ - u_int32_t tfo_blackhole; /* Number of times SYN+TFO has been lost and we fallback */ - u_int32_t tfo_cookie_wrong; /* TFO-cookie we sent was wrong */ - u_int32_t tfo_no_cookie_rcv; /* We asked for a cookie but didn't get one */ - u_int32_t tfo_heuristics_disable; /* TFO got disabled due to heuristics */ - u_int32_t tfo_sndblackhole; /* TFO got blackholed in the sending direction */ - u_int32_t mptcp_handover_attempt; /* Total number of MPTCP-attempts using handover mode */ - u_int32_t mptcp_interactive_attempt; /* Total number of MPTCP-attempts using interactive mode */ - u_int32_t mptcp_aggregate_attempt; /* Total number of MPTCP-attempts using aggregate mode */ - u_int32_t mptcp_fp_handover_attempt; /* Same as previous three but only for first-party apps */ - u_int32_t mptcp_fp_interactive_attempt; - u_int32_t mptcp_fp_aggregate_attempt; - u_int32_t mptcp_heuristic_fallback; /* Total number of MPTCP-connections that fell back due to heuristics */ - u_int32_t mptcp_fp_heuristic_fallback; /* Same as previous but for first-party apps */ - u_int32_t mptcp_handover_success_wifi; /* Total number of successfull handover-mode connections that *started* on WiFi */ - u_int32_t mptcp_handover_success_cell; /* Total number of successfull handover-mode connections that *started* on Cell */ - u_int32_t mptcp_interactive_success; /* Total number of interactive-mode connections that negotiated MPTCP */ - u_int32_t mptcp_aggregate_success; /* Same as previous but for aggregate */ - u_int32_t mptcp_fp_handover_success_wifi; /* Same as previous four, but for first-party apps */ - u_int32_t mptcp_fp_handover_success_cell; - u_int32_t mptcp_fp_interactive_success; - u_int32_t mptcp_fp_aggregate_success; - u_int32_t mptcp_handover_cell_from_wifi; /* Total number of connections that use cell in handover-mode (coming from WiFi) */ - u_int32_t mptcp_handover_wifi_from_cell; /* Total number of connections that use WiFi in handover-mode (coming from cell) */ - u_int32_t mptcp_interactive_cell_from_wifi; /* Total number of connections that use cell in interactive mode (coming from WiFi) */ - u_int32_t mptcp_back_to_wifi; /* Total number of connections that succeed to move traffic away from cell (when starting on cell) */ - u_int64_t mptcp_handover_cell_bytes; /* Total number of bytes sent on cell in handover-mode (on new subflows, ignoring initial one) */ - u_int64_t mptcp_interactive_cell_bytes; /* Same as previous but for interactive */ - u_int64_t mptcp_aggregate_cell_bytes; - u_int64_t mptcp_handover_all_bytes; /* Total number of bytes sent in handover */ - u_int64_t mptcp_interactive_all_bytes; - u_int64_t mptcp_aggregate_all_bytes; - u_int32_t mptcp_wifi_proxy; /* Total number of new subflows that fell back to regular TCP on cell */ - u_int32_t mptcp_cell_proxy; /* Total number of new subflows that fell back to regular TCP on WiFi */ - u_int32_t mptcp_triggered_cell; /* Total number of times an MPTCP-connection triggered cell bringup */ - u_int32_t _padding; + u_int32_t ipv4_avgrtt; /* Average RTT for IPv4 */ + u_int32_t ipv6_avgrtt; /* Average RTT for IPv6 */ + u_int32_t send_plr; /* Average uplink packet loss rate */ + u_int32_t recv_plr; /* Average downlink packet loss rate */ + u_int32_t send_tlrto_rate; /* Average rxt timeout after tail loss */ + u_int32_t send_reorder_rate; /* Average packet reordering rate */ + u_int32_t connection_attempts; /* TCP client connection attempts */ + u_int32_t connection_accepts; /* TCP server connection accepts */ + u_int32_t ecn_client_enabled; /* Global setting for ECN client side */ + u_int32_t ecn_server_enabled; /* Global setting for ECN server side */ + u_int32_t ecn_client_setup; /* Attempts to setup TCP client connection with ECN */ + u_int32_t ecn_server_setup; /* Attempts to setup TCP server connection with ECN */ + u_int32_t ecn_client_success; /* Number of successful negotiations of ECN for a client connection */ + u_int32_t ecn_server_success; /* Number of successful negotiations of ECN for a server connection */ + u_int32_t ecn_not_supported; /* Number of falbacks to Non-ECN, no support from peer */ + u_int32_t ecn_lost_syn; /* Number of SYNs lost with ECN bits */ + u_int32_t ecn_lost_synack; /* Number of SYN-ACKs lost with ECN bits */ + u_int32_t ecn_recv_ce; /* Number of CEs received from network */ + u_int32_t ecn_recv_ece; /* Number of ECEs received from receiver */ + u_int32_t ecn_sent_ece; /* Number of ECEs sent in response to CE */ + u_int32_t ecn_conn_recv_ce; /* Number of connections using ECN received CE at least once */ + u_int32_t ecn_conn_recv_ece; /* Number of connections using ECN received ECE at least once */ + u_int32_t ecn_conn_plnoce; /* Number of connections using ECN seen packet loss but never received CE */ + u_int32_t ecn_conn_pl_ce; /* Number of connections using ECN seen packet loss and CE */ + u_int32_t ecn_conn_nopl_ce; /* Number of connections using ECN with no packet loss but received CE */ + u_int32_t ecn_fallback_synloss; /* Number of times we did fall back due to SYN-Loss */ + u_int32_t ecn_fallback_reorder; /* Number of times we fallback because we detected the PAWS-issue */ + u_int32_t ecn_fallback_ce; /* Number of times we fallback because we received too many CEs */ + u_int32_t tfo_syn_data_rcv; /* Number of SYN+data received with valid cookie */ + u_int32_t tfo_cookie_req_rcv;/* Number of TFO cookie-requests received */ + u_int32_t tfo_cookie_sent; /* Number of TFO-cookies offered to the client */ + u_int32_t tfo_cookie_invalid;/* Number of invalid TFO-cookies received */ + u_int32_t tfo_cookie_req; /* Number of SYNs with cookie request received*/ + u_int32_t tfo_cookie_rcv; /* Number of SYN/ACKs with Cookie received */ + u_int32_t tfo_syn_data_sent; /* Number of SYNs+data+cookie sent */ + u_int32_t tfo_syn_data_acked;/* Number of times our SYN+data has been acknowledged */ + u_int32_t tfo_syn_loss; /* Number of times SYN+TFO has been lost and we fallback */ + u_int32_t tfo_blackhole; /* Number of times SYN+TFO has been lost and we fallback */ + u_int32_t tfo_cookie_wrong; /* TFO-cookie we sent was wrong */ + u_int32_t tfo_no_cookie_rcv; /* We asked for a cookie but didn't get one */ + u_int32_t tfo_heuristics_disable; /* TFO got disabled due to heuristics */ + u_int32_t tfo_sndblackhole; /* TFO got blackholed in the sending direction */ + u_int32_t mptcp_handover_attempt; /* Total number of MPTCP-attempts using handover mode */ + u_int32_t mptcp_interactive_attempt; /* Total number of MPTCP-attempts using interactive mode */ + u_int32_t mptcp_aggregate_attempt; /* Total number of MPTCP-attempts using aggregate mode */ + u_int32_t mptcp_fp_handover_attempt; /* Same as previous three but only for first-party apps */ + u_int32_t mptcp_fp_interactive_attempt; + u_int32_t mptcp_fp_aggregate_attempt; + u_int32_t mptcp_heuristic_fallback; /* Total number of MPTCP-connections that fell back due to heuristics */ + u_int32_t mptcp_fp_heuristic_fallback; /* Same as previous but for first-party apps */ + u_int32_t mptcp_handover_success_wifi; /* Total number of successfull handover-mode connections that *started* on WiFi */ + u_int32_t mptcp_handover_success_cell; /* Total number of successfull handover-mode connections that *started* on Cell */ + u_int32_t mptcp_interactive_success; /* Total number of interactive-mode connections that negotiated MPTCP */ + u_int32_t mptcp_aggregate_success; /* Same as previous but for aggregate */ + u_int32_t mptcp_fp_handover_success_wifi; /* Same as previous four, but for first-party apps */ + u_int32_t mptcp_fp_handover_success_cell; + u_int32_t mptcp_fp_interactive_success; + u_int32_t mptcp_fp_aggregate_success; + u_int32_t mptcp_handover_cell_from_wifi; /* Total number of connections that use cell in handover-mode (coming from WiFi) */ + u_int32_t mptcp_handover_wifi_from_cell; /* Total number of connections that use WiFi in handover-mode (coming from cell) */ + u_int32_t mptcp_interactive_cell_from_wifi; /* Total number of connections that use cell in interactive mode (coming from WiFi) */ + u_int32_t mptcp_back_to_wifi; /* Total number of connections that succeed to move traffic away from cell (when starting on cell) */ + u_int64_t mptcp_handover_cell_bytes; /* Total number of bytes sent on cell in handover-mode (on new subflows, ignoring initial one) */ + u_int64_t mptcp_interactive_cell_bytes; /* Same as previous but for interactive */ + u_int64_t mptcp_aggregate_cell_bytes; + u_int64_t mptcp_handover_all_bytes; /* Total number of bytes sent in handover */ + u_int64_t mptcp_interactive_all_bytes; + u_int64_t mptcp_aggregate_all_bytes; + u_int32_t mptcp_wifi_proxy; /* Total number of new subflows that fell back to regular TCP on cell */ + u_int32_t mptcp_cell_proxy; /* Total number of new subflows that fell back to regular TCP on WiFi */ + u_int32_t mptcp_triggered_cell; /* Total number of times an MPTCP-connection triggered cell bringup */ + u_int32_t _padding; /* When adding/removing here, also adjust NSTAT_SYSINFO_TCP_STATS_COUNT */ } nstat_sysinfo_tcp_stats; -#define NSTAT_SYSINFO_TCP_STATS_COUNT 71 +#define NSTAT_SYSINFO_TCP_STATS_COUNT 71 enum { NSTAT_IFNET_ECN_PROTO_IPV4 = 1 - ,NSTAT_IFNET_ECN_PROTO_IPV6 + , NSTAT_IFNET_ECN_PROTO_IPV6 }; enum { NSTAT_IFNET_ECN_TYPE_CELLULAR = 1 - ,NSTAT_IFNET_ECN_TYPE_WIFI - ,NSTAT_IFNET_ECN_TYPE_ETHERNET + , NSTAT_IFNET_ECN_TYPE_WIFI + , NSTAT_IFNET_ECN_TYPE_ETHERNET }; typedef struct nstat_sysinfo_ifnet_ecn_stats { - u_int32_t ifnet_proto; - u_int32_t ifnet_type; - struct if_tcp_ecn_stat ecn_stat; + u_int32_t ifnet_proto; + u_int32_t ifnet_type; + struct if_tcp_ecn_stat ecn_stat; } nstat_sysinfo_ifnet_ecn_stats; /* Total number of Low Internet stats that will be reported */ -#define NSTAT_LIM_STAT_KEYVAL_COUNT 12 +#define NSTAT_LIM_STAT_KEYVAL_COUNT 12 typedef struct nstat_sysinfo_lim_stats { - u_int8_t ifnet_signature[NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE]; - u_int32_t ifnet_siglen; - u_int32_t ifnet_type; - struct if_lim_perf_stat lim_stat; + u_int8_t ifnet_signature[NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE]; + u_int32_t ifnet_siglen; + u_int32_t ifnet_type; + struct if_lim_perf_stat lim_stat; } nstat_sysinfo_lim_stats; -#define NSTAT_NET_API_STAT_KEYVAL_COUNT (NSTAT_SYSINFO_API_LAST - NSTAT_SYSINFO_API_FIRST + 1) +#define NSTAT_NET_API_STAT_KEYVAL_COUNT (NSTAT_SYSINFO_API_LAST - NSTAT_SYSINFO_API_FIRST + 1) typedef struct nstat_sysinfo_net_api_stats { - u_int32_t report_interval; - u_int32_t _padding; - struct net_api_stats net_api_stats; + u_int32_t report_interval; + u_int32_t _padding; + struct net_api_stats net_api_stats; } nstat_sysinfo_net_api_stats; -typedef struct nstat_sysinfo_data -{ - uint32_t flags; - uint32_t unsent_data_cnt; /* Before sleeping */ +typedef struct nstat_sysinfo_data { + uint32_t flags; + uint32_t unsent_data_cnt; /* Before sleeping */ union { nstat_sysinfo_mbuf_stats mb_stats; nstat_sysinfo_tcp_stats tcp_stats; @@ -1113,24 +1056,22 @@ typedef struct nstat_sysinfo_data #pragma mark -- Generic Network Statistics Provider -- -typedef void * nstat_provider_cookie_t; +typedef void * nstat_provider_cookie_t; #pragma mark -- Route Statistics Gathering Functions -- struct rtentry; -enum -{ - NSTAT_TX_FLAG_RETRANSMIT = 1 +enum{ + NSTAT_TX_FLAG_RETRANSMIT = 1 }; -enum -{ - NSTAT_RX_FLAG_DUPLICATE = 1, - NSTAT_RX_FLAG_OUT_OF_ORDER = 2 +enum{ + NSTAT_RX_FLAG_DUPLICATE = 1, + NSTAT_RX_FLAG_OUT_OF_ORDER = 2 }; // indicates whether or not collection of statistics is enabled -extern int nstat_collect; +extern int nstat_collect; void nstat_init(void); @@ -1141,10 +1082,10 @@ void nstat_route_tx(struct rtentry *rte, u_int32_t packets, u_int32_t bytes, u_i void nstat_route_rx(struct rtentry *rte, u_int32_t packets, u_int32_t bytes, u_int32_t flags); void nstat_route_rtt(struct rtentry *rte, u_int32_t rtt, u_int32_t rtt_var); void nstat_route_update(struct rtentry *rte, uint32_t connect_attempts, uint32_t connect_successes, - uint32_t rx_packets, uint32_t rx_bytes, uint32_t rx_duplicatebytes, uint32_t rx_outoforderbytes, - uint32_t tx_packets, uint32_t tx_bytes, uint32_t tx_retransmit, - uint32_t rtt, uint32_t rtt_var); -struct nstat_counts* nstat_route_attach(struct rtentry *rte); + uint32_t rx_packets, uint32_t rx_bytes, uint32_t rx_duplicatebytes, uint32_t rx_outoforderbytes, + uint32_t tx_packets, uint32_t tx_bytes, uint32_t tx_retransmit, + uint32_t rtt, uint32_t rtt_var); +struct nstat_counts* nstat_route_attach(struct rtentry *rte); void nstat_route_detach(struct rtentry *rte); // watcher support @@ -1172,11 +1113,11 @@ u_int16_t nstat_ifnet_to_flags(struct ifnet *ifp); // while under the socket lock, so on 64bit we don't actually need // atomic operations to increment. #if defined(__LP64__) -#define locked_add_64(__addr, __count) do { \ +#define locked_add_64(__addr, __count) do { \ *(__addr) += (__count); \ } while (0) #else -#define locked_add_64(__addr, __count) do { \ +#define locked_add_64(__addr, __count) do { \ atomic_add_64((__addr), (__count)); \ } while (0) #endif diff --git a/bsd/net/nwk_wq.c b/bsd/net/nwk_wq.c index 400fd1312..9eaa778e5 100644 --- a/bsd/net/nwk_wq.c +++ b/bsd/net/nwk_wq.c @@ -56,7 +56,7 @@ static int nwk_wq_thread_cont(int err); static void nwk_wq_thread_func(void *v, wait_result_t w); void -nwk_wq_init (void) +nwk_wq_init(void) { thread_t nwk_wq_thread = THREAD_NULL; @@ -103,8 +103,9 @@ nwk_wq_thread_cont(int err) VERIFY(TAILQ_FIRST(&temp_nwk_wq_head) != NULL); TAILQ_FOREACH_SAFE(nwk_item, &temp_nwk_wq_head, nwk_wq_link, nwk_item_next) { nwk_item->func(nwk_item->arg); - if (nwk_item->is_arg_managed == FALSE) + if (nwk_item->is_arg_managed == FALSE) { FREE(nwk_item->arg, M_NWKWQ); + } FREE(nwk_item, M_NWKWQ); } lck_mtx_lock(&nwk_wq_lock); @@ -134,4 +135,3 @@ nwk_wq_enqueue(struct nwk_wq_entry *nwk_item) lck_mtx_unlock(&nwk_wq_lock); wakeup((caddr_t)&nwk_wq_waitch); } - diff --git a/bsd/net/nwk_wq.h b/bsd/net/nwk_wq.h index 80d8e4851..22eec5a75 100644 --- a/bsd/net/nwk_wq.h +++ b/bsd/net/nwk_wq.h @@ -38,8 +38,7 @@ struct nwk_wq_entry { TAILQ_ENTRY(nwk_wq_entry) nwk_wq_link; }; -void nwk_wq_init (void); +void nwk_wq_init(void); void nwk_wq_enqueue(struct nwk_wq_entry *nwk_item); #endif /* BSD_KERNEL_PRIVATE */ #endif /* NWK_WQ_H */ - diff --git a/bsd/net/packet_mangler.c b/bsd/net/packet_mangler.c index 24d18870a..db7b4e643 100644 --- a/bsd/net/packet_mangler.c +++ b/bsd/net/packet_mangler.c @@ -54,44 +54,44 @@ #include #include -#define MAX_PACKET_MANGLER 1 +#define MAX_PACKET_MANGLER 1 -#define PKT_MNGLR_FLG_IPFILTER_ATTACHED 0x00000001 +#define PKT_MNGLR_FLG_IPFILTER_ATTACHED 0x00000001 -SYSCTL_NODE(_net, OID_AUTO, pktmnglr, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "pktmnglr"); -SYSCTL_INT(_net_pktmnglr, OID_AUTO, log, CTLFLAG_RW|CTLFLAG_LOCKED, - &pkt_mnglr_log_level, 0, ""); +SYSCTL_NODE(_net, OID_AUTO, pktmnglr, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "pktmnglr"); +SYSCTL_INT(_net_pktmnglr, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED, + &pkt_mnglr_log_level, 0, ""); /* * The structure packet_mangler represents a user space packet filter * It's created and associated with a kernel control socket instance */ struct packet_mangler { - kern_ctl_ref pkt_mnglr_kcref; - uint32_t pkt_mnglr_kcunit; - uint32_t pkt_mnglr_flags; + kern_ctl_ref pkt_mnglr_kcref; + uint32_t pkt_mnglr_kcunit; + uint32_t pkt_mnglr_flags; /* IP filter related params */ - ipfilter_t pkt_mnglr_ipfref; - ipfilter_t pkt_mnglr_ipfrefv6; - struct ipf_filter pkt_mnglr_ipfilter; + ipfilter_t pkt_mnglr_ipfref; + ipfilter_t pkt_mnglr_ipfrefv6; + struct ipf_filter pkt_mnglr_ipfilter; /* Options */ - uint8_t activate; - Pkt_Mnglr_Flow dir; - struct sockaddr_storage lsaddr; - struct sockaddr_storage rsaddr; - struct sockaddr_storage swap_lsaddr; - struct sockaddr_storage swap_rsaddr; - uint32_t ip_action_mask; - uint16_t lport; - uint16_t rport; - uint32_t proto; - uint32_t proto_action_mask; + uint8_t activate; + Pkt_Mnglr_Flow dir; + struct sockaddr_storage lsaddr; + struct sockaddr_storage rsaddr; + struct sockaddr_storage swap_lsaddr; + struct sockaddr_storage swap_rsaddr; + uint32_t ip_action_mask; + uint16_t lport; + uint16_t rport; + uint32_t proto; + uint32_t proto_action_mask; }; /* Array of all the packet mangler instancesi */ struct packet_mangler **packet_manglers = NULL; -uint32_t pkt_mnglr_active_count = 0; /* Number of active packet filters */ +uint32_t pkt_mnglr_active_count = 0; /* Number of active packet filters */ uint32_t pkt_mnglr_close_wait_timeout = 1000; /* in milliseconds */ static kern_ctl_ref pkt_mnglr_kctlref = NULL; @@ -103,7 +103,7 @@ static lck_grp_t *pkt_mnglr_lck_grp = NULL; /* The lock below protects packet_manglers DS, packet_mangler DS */ decl_lck_rw_data(static, pkt_mnglr_lck_rw); -#define PKT_MNGLR_RW_LCK_MAX 8 +#define PKT_MNGLR_RW_LCK_MAX 8 int pkt_mnglr_rw_nxt_lck = 0; void* pkt_mnglr_rw_lock_history[PKT_MNGLR_RW_LCK_MAX]; @@ -112,9 +112,9 @@ int pkt_mnglr_rw_nxt_unlck = 0; void* pkt_mnglr_rw_unlock_history[PKT_MNGLR_RW_LCK_MAX]; -#define PACKET_MANGLER_ZONE_NAME "packet_mangler" -#define PACKET_MANGLER_ZONE_MAX 10 -static struct zone *packet_mangler_zone = NULL; /* zone for packet_mangler */ +#define PACKET_MANGLER_ZONE_NAME "packet_mangler" +#define PACKET_MANGLER_ZONE_MAX 10 +static struct zone *packet_mangler_zone = NULL; /* zone for packet_mangler */ /* * For troubleshooting @@ -131,24 +131,24 @@ static void pkt_mnglr_rw_lock_shared(lck_rw_t *); static void pkt_mnglr_rw_unlock_shared(lck_rw_t *); static errno_t pktmnglr_ipfilter_output(void *cookie, mbuf_t *data, - ipf_pktopts_t options); + ipf_pktopts_t options); static errno_t pktmnglr_ipfilter_input(void *cookie, mbuf_t *data, - int offset, u_int8_t protocol); + int offset, u_int8_t protocol); static void pktmnglr_ipfilter_detach(void *cookie); static void chksm_update(mbuf_t data); -#define TCP_OPT_MULTIPATH_TCP 30 -#define MPTCP_SBT_VER_OFFSET 2 +#define TCP_OPT_MULTIPATH_TCP 30 +#define MPTCP_SBT_VER_OFFSET 2 -#define MPTCP_SUBTYPE_MPCAPABLE 0x0 -#define MPTCP_SUBTYPE_MPJOIN 0x1 -#define MPTCP_SUBTYPE_DSS 0x2 -#define MPTCP_SUBTYPE_ADD_ADDR 0x3 -#define MPTCP_SUBTYPE_REM_ADDR 0x4 -#define MPTCP_SUBTYPE_MP_PRIO 0x5 -#define MPTCP_SUBTYPE_MP_FAIL 0x6 -#define MPTCP_SUBTYPE_MP_FASTCLOSE 0x7 +#define MPTCP_SUBTYPE_MPCAPABLE 0x0 +#define MPTCP_SUBTYPE_MPJOIN 0x1 +#define MPTCP_SUBTYPE_DSS 0x2 +#define MPTCP_SUBTYPE_ADD_ADDR 0x3 +#define MPTCP_SUBTYPE_REM_ADDR 0x4 +#define MPTCP_SUBTYPE_MP_PRIO 0x5 +#define MPTCP_SUBTYPE_MP_FAIL 0x6 +#define MPTCP_SUBTYPE_MP_FASTCLOSE 0x7 /* * packet filter global read write lock @@ -213,9 +213,9 @@ pkt_mnglr_rw_unlock_shared(lck_rw_t *lck) */ static errno_t pkt_mnglr_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo) + void **unitinfo) { - errno_t error = 0; + errno_t error = 0; struct packet_mangler *p_pkt_mnglr = NULL; PKT_MNGLR_LOG(LOG_NOTICE, "Connecting packet mangler filter."); @@ -249,10 +249,11 @@ pkt_mnglr_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, goto done; } /* Another thread may have won the race */ - if (packet_manglers != NULL) + if (packet_manglers != NULL) { FREE(tmp, M_TEMP); - else + } else { packet_manglers = tmp; + } } if (sac->sc_unit == 0 || sac->sc_unit > MAX_PACKET_MANGLER) { @@ -292,24 +293,25 @@ pkt_mnglr_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, } PKT_MNGLR_LOG(LOG_INFO, "Registered packet mangler's IP Filters"); - p_pkt_mnglr->pkt_mnglr_flags |= PKT_MNGLR_FLG_IPFILTER_ATTACHED; + p_pkt_mnglr->pkt_mnglr_flags |= PKT_MNGLR_FLG_IPFILTER_ATTACHED; pkt_mnglr_rw_unlock_exclusive(&pkt_mnglr_lck_rw); done: - if (error != 0 && p_pkt_mnglr != NULL) + if (error != 0 && p_pkt_mnglr != NULL) { zfree(packet_mangler_zone, p_pkt_mnglr); + } PKT_MNGLR_LOG(LOG_INFO, "return %d pkt_mnglr_active_count %u kcunit %u", error, pkt_mnglr_active_count, sac->sc_unit); - return (error); + return error; } static errno_t pkt_mnglr_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) { #pragma unused(kctlref) - errno_t error = 0; + errno_t error = 0; struct packet_mangler *p_pkt_mnglr; PKT_MNGLR_LOG(LOG_INFO, "Disconnecting packet mangler kernel control"); @@ -355,15 +357,15 @@ done: PKT_MNGLR_LOG(LOG_INFO, "return %d pkt_mnglr_active_count %u kcunit %u", error, pkt_mnglr_active_count, kcunit); - return (error); + return error; } static errno_t pkt_mnglr_ctl_getopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, - int opt, void *data, size_t *len) + int opt, void *data, size_t *len) { #pragma unused(kctlref, opt) - errno_t error = 0; + errno_t error = 0; struct packet_mangler *p_pkt_mnglr = (struct packet_mangler *)unitinfo; PKT_MNGLR_LOG(LOG_NOTICE, ""); @@ -388,128 +390,128 @@ pkt_mnglr_ctl_getopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, goto done; } switch (opt) { - case PKT_MNGLR_OPT_PROTO_ACT_MASK: - if (*len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTO_ACT_MASK " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + case PKT_MNGLR_OPT_PROTO_ACT_MASK: + if (*len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTO_ACT_MASK " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(uint32_t *)data = p_pkt_mnglr->proto_action_mask; - } - break; - case PKT_MNGLR_OPT_IP_ACT_MASK: - if (*len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_IP_ACT_MASK " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + if (data != NULL) { + *(uint32_t *)data = p_pkt_mnglr->proto_action_mask; + } + break; + case PKT_MNGLR_OPT_IP_ACT_MASK: + if (*len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_IP_ACT_MASK " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(uint32_t *)data = p_pkt_mnglr->ip_action_mask; - } - break; - case PKT_MNGLR_OPT_LOCAL_IP: - if (*len < sizeof(struct sockaddr_storage)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_IP " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + if (data != NULL) { + *(uint32_t *)data = p_pkt_mnglr->ip_action_mask; + } + break; + case PKT_MNGLR_OPT_LOCAL_IP: + if (*len < sizeof(struct sockaddr_storage)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_IP " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(struct sockaddr_storage *)data = p_pkt_mnglr->lsaddr; - } - break; - case PKT_MNGLR_OPT_REMOTE_IP: - if (*len < sizeof(struct sockaddr_storage)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_IP " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + if (data != NULL) { + *(struct sockaddr_storage *)data = p_pkt_mnglr->lsaddr; + } + break; + case PKT_MNGLR_OPT_REMOTE_IP: + if (*len < sizeof(struct sockaddr_storage)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_IP " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(struct sockaddr_storage *)data = p_pkt_mnglr->rsaddr; - } - break; - case PKT_MNGLR_OPT_LOCAL_PORT: - if (*len < sizeof(uint16_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_PORT " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + if (data != NULL) { + *(struct sockaddr_storage *)data = p_pkt_mnglr->rsaddr; + } + break; + case PKT_MNGLR_OPT_LOCAL_PORT: + if (*len < sizeof(uint16_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_PORT " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(uint16_t *)data = p_pkt_mnglr->lport; - } - break; - case PKT_MNGLR_OPT_REMOTE_PORT: - if (*len < sizeof(uint16_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_PORT " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + if (data != NULL) { + *(uint16_t *)data = p_pkt_mnglr->lport; + } + break; + case PKT_MNGLR_OPT_REMOTE_PORT: + if (*len < sizeof(uint16_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_PORT " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(uint16_t *)data = p_pkt_mnglr->rport; - } - break; - case PKT_MNGLR_OPT_DIRECTION: - if (*len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_DIRECTION " - "len too small %lu", *len); - error = EINVAL; - goto done; - } - if (data != NULL) { - *(uint32_t *)data = p_pkt_mnglr->dir; - } - break; - case PKT_MNGLR_OPT_PROTOCOL: - if (*len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTOCOL " - "len too small %lu", *len); - error = EINVAL; - goto done; - } - if (data != NULL) { - *(uint32_t *)data = p_pkt_mnglr->proto; - } - break; - case PKT_MNGLR_OPT_ACTIVATE: - if (*len < sizeof(uint8_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_ACTIVATE " - "len too small %lu", *len); - error = EINVAL; - goto done; - } + if (data != NULL) { + *(uint16_t *)data = p_pkt_mnglr->rport; + } + break; + case PKT_MNGLR_OPT_DIRECTION: + if (*len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_DIRECTION " + "len too small %lu", *len); + error = EINVAL; + goto done; + } + if (data != NULL) { + *(uint32_t *)data = p_pkt_mnglr->dir; + } + break; + case PKT_MNGLR_OPT_PROTOCOL: + if (*len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTOCOL " + "len too small %lu", *len); + error = EINVAL; + goto done; + } + if (data != NULL) { + *(uint32_t *)data = p_pkt_mnglr->proto; + } + break; + case PKT_MNGLR_OPT_ACTIVATE: + if (*len < sizeof(uint8_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_ACTIVATE " + "len too small %lu", *len); + error = EINVAL; + goto done; + } - if (data != NULL) { - *(uint8_t *)data = p_pkt_mnglr->activate; - } - break; - default: - error = ENOPROTOOPT; - break; + if (data != NULL) { + *(uint8_t *)data = p_pkt_mnglr->activate; + } + break; + default: + error = ENOPROTOOPT; + break; } done: pkt_mnglr_rw_unlock_shared(&pkt_mnglr_lck_rw); - return (error); + return error; } static errno_t pkt_mnglr_ctl_setopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, - int opt, void *data, size_t len) + int opt, void *data, size_t len) { #pragma unused(kctlref, opt) - errno_t error = 0; + errno_t error = 0; struct packet_mangler *p_pkt_mnglr = (struct packet_mangler *)unitinfo; PKT_MNGLR_LOG(LOG_NOTICE, ""); @@ -534,170 +536,170 @@ pkt_mnglr_ctl_setopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, goto done; } switch (opt) { - case PKT_MNGLR_OPT_PROTO_ACT_MASK: - if (len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTO_ACT_MASK " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->proto_action_mask != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTO_ACT_MASK " - "already set %u", - p_pkt_mnglr->proto_action_mask); - error = EINVAL; - goto done; - } - p_pkt_mnglr->proto_action_mask = *(uint32_t *)data; - PKT_MNGLR_LOG(LOG_INFO, "p_pkt_mnglr->proto_action_mask set to :%d", p_pkt_mnglr->proto_action_mask); - break; - case PKT_MNGLR_OPT_IP_ACT_MASK: - if (len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_IP_ACT_MASK " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->ip_action_mask != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_IP_ACT_MASK " - "already set %u", - p_pkt_mnglr->ip_action_mask); - error = EINVAL; - goto done; - } - p_pkt_mnglr->ip_action_mask = *(uint32_t *)data; - break; - case PKT_MNGLR_OPT_LOCAL_IP: - if (len < sizeof(struct sockaddr_storage)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_IP " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->lsaddr.ss_family) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_IP " - "already set"); - error = EINVAL; - goto done; - } - p_pkt_mnglr->lsaddr = *(struct sockaddr_storage *)data; - break; - case PKT_MNGLR_OPT_REMOTE_IP: - if (len < sizeof(struct sockaddr_storage)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_IP " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->rsaddr.ss_family) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_IP " - "already set"); - error = EINVAL; - goto done; - } + case PKT_MNGLR_OPT_PROTO_ACT_MASK: + if (len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTO_ACT_MASK " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->proto_action_mask != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTO_ACT_MASK " + "already set %u", + p_pkt_mnglr->proto_action_mask); + error = EINVAL; + goto done; + } + p_pkt_mnglr->proto_action_mask = *(uint32_t *)data; + PKT_MNGLR_LOG(LOG_INFO, "p_pkt_mnglr->proto_action_mask set to :%d", p_pkt_mnglr->proto_action_mask); + break; + case PKT_MNGLR_OPT_IP_ACT_MASK: + if (len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_IP_ACT_MASK " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->ip_action_mask != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_IP_ACT_MASK " + "already set %u", + p_pkt_mnglr->ip_action_mask); + error = EINVAL; + goto done; + } + p_pkt_mnglr->ip_action_mask = *(uint32_t *)data; + break; + case PKT_MNGLR_OPT_LOCAL_IP: + if (len < sizeof(struct sockaddr_storage)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_IP " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->lsaddr.ss_family) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_IP " + "already set"); + error = EINVAL; + goto done; + } + p_pkt_mnglr->lsaddr = *(struct sockaddr_storage *)data; + break; + case PKT_MNGLR_OPT_REMOTE_IP: + if (len < sizeof(struct sockaddr_storage)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_IP " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->rsaddr.ss_family) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_IP " + "already set"); + error = EINVAL; + goto done; + } - p_pkt_mnglr->rsaddr = *(struct sockaddr_storage *)data; - PKT_MNGLR_LOG(LOG_INFO, - "Remote IP registered for address family: %d", - p_pkt_mnglr->rsaddr.ss_family); - break; - case PKT_MNGLR_OPT_LOCAL_PORT: - if (len < sizeof(uint16_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_PORT " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->lport != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_PORT " - "already set %d", - p_pkt_mnglr->lport); - error = EINVAL; - goto done; - } - p_pkt_mnglr->lport = *(uint16_t *)data; - break; - case PKT_MNGLR_OPT_REMOTE_PORT: - if (len < sizeof(uint16_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_PORT " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->rport != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_PORT " - "already set %d", - p_pkt_mnglr->rport); - error = EINVAL; - goto done; - } - p_pkt_mnglr->rport = *(uint16_t *)data; - break; - case PKT_MNGLR_OPT_DIRECTION: - if (len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_DIRECTION " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->dir != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_DIRECTION " - "already set %u", - p_pkt_mnglr->dir); - error = EINVAL; - goto done; - } - p_pkt_mnglr->dir = *(uint32_t *)data; - break; - case PKT_MNGLR_OPT_PROTOCOL: - if (len < sizeof(uint32_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTOCOL " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->proto != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTOCOL " - "already set %u", - p_pkt_mnglr->proto); - error = EINVAL; - goto done; - } - p_pkt_mnglr->proto = *(uint32_t *)data; - break; - case PKT_MNGLR_OPT_ACTIVATE: - if (len < sizeof(uint8_t)) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_ACTIVATE " - "len too small %lu", len); - error = EINVAL; - goto done; - } - if (p_pkt_mnglr->activate != 0) { - PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_ACTIVATE " - "already set %u", - p_pkt_mnglr->activate); - error = EINVAL; - goto done; - } - p_pkt_mnglr->activate = *(uint8_t *)data; - PKT_MNGLR_LOG(LOG_ERR, "p_pkt_mnglr->activate set to :%d", + p_pkt_mnglr->rsaddr = *(struct sockaddr_storage *)data; + PKT_MNGLR_LOG(LOG_INFO, + "Remote IP registered for address family: %d", + p_pkt_mnglr->rsaddr.ss_family); + break; + case PKT_MNGLR_OPT_LOCAL_PORT: + if (len < sizeof(uint16_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_PORT " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->lport != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_LOCAL_PORT " + "already set %d", + p_pkt_mnglr->lport); + error = EINVAL; + goto done; + } + p_pkt_mnglr->lport = *(uint16_t *)data; + break; + case PKT_MNGLR_OPT_REMOTE_PORT: + if (len < sizeof(uint16_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_PORT " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->rport != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_REMOTE_PORT " + "already set %d", + p_pkt_mnglr->rport); + error = EINVAL; + goto done; + } + p_pkt_mnglr->rport = *(uint16_t *)data; + break; + case PKT_MNGLR_OPT_DIRECTION: + if (len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_DIRECTION " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->dir != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_DIRECTION " + "already set %u", + p_pkt_mnglr->dir); + error = EINVAL; + goto done; + } + p_pkt_mnglr->dir = *(uint32_t *)data; + break; + case PKT_MNGLR_OPT_PROTOCOL: + if (len < sizeof(uint32_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTOCOL " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->proto != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_PROTOCOL " + "already set %u", + p_pkt_mnglr->proto); + error = EINVAL; + goto done; + } + p_pkt_mnglr->proto = *(uint32_t *)data; + break; + case PKT_MNGLR_OPT_ACTIVATE: + if (len < sizeof(uint8_t)) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_ACTIVATE " + "len too small %lu", len); + error = EINVAL; + goto done; + } + if (p_pkt_mnglr->activate != 0) { + PKT_MNGLR_LOG(LOG_ERR, "PKT_MNGLR_OPT_ACTIVATE " + "already set %u", p_pkt_mnglr->activate); - break; - default: - error = ENOPROTOOPT; - break; + error = EINVAL; + goto done; + } + p_pkt_mnglr->activate = *(uint8_t *)data; + PKT_MNGLR_LOG(LOG_ERR, "p_pkt_mnglr->activate set to :%d", + p_pkt_mnglr->activate); + break; + default: + error = ENOPROTOOPT; + break; } done: pkt_mnglr_rw_unlock_exclusive(&pkt_mnglr_lck_rw); - return (error); + return error; } void pkt_mnglr_init(void) { struct kern_ctl_reg kern_ctl; - errno_t error = 0; + errno_t error = 0; vm_size_t pkt_mnglr_size = 0; PKT_MNGLR_LOG(LOG_NOTICE, ""); @@ -764,7 +766,8 @@ pkt_mnglr_init(void) } } -static errno_t pktmnglr_ipfilter_output(void *cookie, mbuf_t *data, ipf_pktopts_t options) +static errno_t +pktmnglr_ipfilter_output(void *cookie, mbuf_t *data, ipf_pktopts_t options) { struct packet_mangler *p_pkt_mnglr = (struct packet_mangler *)cookie; struct ip ip; @@ -831,9 +834,10 @@ output_done: return 0; } -#define TCP_MAX_OPTLEN 40 +#define TCP_MAX_OPTLEN 40 -static errno_t pktmnglr_ipfilter_input(void *cookie, mbuf_t *data, int offset, u_int8_t protocol) +static errno_t +pktmnglr_ipfilter_input(void *cookie, mbuf_t *data, int offset, u_int8_t protocol) { struct packet_mangler *p_pkt_mnglr = (struct packet_mangler *)cookie; struct ip ip; @@ -908,36 +912,36 @@ static errno_t pktmnglr_ipfilter_input(void *cookie, mbuf_t *data, int offset, u } switch (protocol) { - case IPPROTO_TCP: - if (ip_pld_len < (int) sizeof(tcp)) { - PKT_MNGLR_LOG(LOG_ERR, "IP total len not big enough for TCP: %d", ip_pld_len); - goto drop_it; - } - - error = mbuf_copydata(*data, offset, sizeof(tcp), &tcp); - if (error) { - PKT_MNGLR_LOG(LOG_ERR, "Could not make local TCP header copy"); - goto input_done; - } - - if (p_pkt_mnglr->lport && (p_pkt_mnglr->lport != tcp.th_dport)) { - PKT_MNGLR_LOG(LOG_INFO, "Local port and IP des port do not match"); - goto input_done; - } + case IPPROTO_TCP: + if (ip_pld_len < (int) sizeof(tcp)) { + PKT_MNGLR_LOG(LOG_ERR, "IP total len not big enough for TCP: %d", ip_pld_len); + goto drop_it; + } - if (p_pkt_mnglr->rport && (p_pkt_mnglr->rport != tcp.th_sport)) { - PKT_MNGLR_LOG(LOG_INFO, "Remote port and IP src port do not match"); - goto input_done; - } - break; - case IPPROTO_UDP: + error = mbuf_copydata(*data, offset, sizeof(tcp), &tcp); + if (error) { + PKT_MNGLR_LOG(LOG_ERR, "Could not make local TCP header copy"); goto input_done; - case IPPROTO_ICMP: - goto input_done; - case IPPROTO_ICMPV6: + } + + if (p_pkt_mnglr->lport && (p_pkt_mnglr->lport != tcp.th_dport)) { + PKT_MNGLR_LOG(LOG_INFO, "Local port and IP des port do not match"); goto input_done; - default: + } + + if (p_pkt_mnglr->rport && (p_pkt_mnglr->rport != tcp.th_sport)) { + PKT_MNGLR_LOG(LOG_INFO, "Remote port and IP src port do not match"); goto input_done; + } + break; + case IPPROTO_UDP: + goto input_done; + case IPPROTO_ICMP: + goto input_done; + case IPPROTO_ICMPV6: + goto input_done; + default: + goto input_done; } /* XXX Do IP actions here */ @@ -945,106 +949,106 @@ static errno_t pktmnglr_ipfilter_input(void *cookie, mbuf_t *data, int offset, u /* Protocol actions */ switch (protocol) { - case IPPROTO_TCP: - if (p_pkt_mnglr->proto_action_mask) { - char tcp_opt_buf[TCP_MAX_OPTLEN] = {0}; - int orig_tcp_optlen; - int tcp_optlen = 0; - int i = 0, off; - - off = (tcp.th_off << 2); - - if (off < (int) sizeof(struct tcphdr) || off > ip_pld_len) { - PKT_MNGLR_LOG(LOG_ERR, "TCP header offset is wrong: %d", off); - goto drop_it; - } + case IPPROTO_TCP: + if (p_pkt_mnglr->proto_action_mask) { + char tcp_opt_buf[TCP_MAX_OPTLEN] = {0}; + int orig_tcp_optlen; + int tcp_optlen = 0; + int i = 0, off; + off = (tcp.th_off << 2); - tcp_optlen = off - sizeof(struct tcphdr); + if (off < (int) sizeof(struct tcphdr) || off > ip_pld_len) { + PKT_MNGLR_LOG(LOG_ERR, "TCP header offset is wrong: %d", off); + goto drop_it; + } - PKT_MNGLR_LOG(LOG_INFO, "Packet from F5 is TCP\n"); - PKT_MNGLR_LOG(LOG_INFO, "Optlen: %d\n", tcp_optlen); - orig_tcp_optlen = tcp_optlen; - if (orig_tcp_optlen) { - error = mbuf_copydata(*data, offset+sizeof(struct tcphdr), orig_tcp_optlen, tcp_opt_buf); - if (error) { - PKT_MNGLR_LOG(LOG_ERR, "Failed to copy tcp options: error %d offset %d optlen %d", error, offset, orig_tcp_optlen); - goto input_done; - } + + tcp_optlen = off - sizeof(struct tcphdr); + + PKT_MNGLR_LOG(LOG_INFO, "Packet from F5 is TCP\n"); + PKT_MNGLR_LOG(LOG_INFO, "Optlen: %d\n", tcp_optlen); + orig_tcp_optlen = tcp_optlen; + if (orig_tcp_optlen) { + error = mbuf_copydata(*data, offset + sizeof(struct tcphdr), orig_tcp_optlen, tcp_opt_buf); + if (error) { + PKT_MNGLR_LOG(LOG_ERR, "Failed to copy tcp options: error %d offset %d optlen %d", error, offset, orig_tcp_optlen); + goto input_done; } + } - while (tcp_optlen > 0) { - if (tcp_opt_buf[i] == 0x1) { - PKT_MNGLR_LOG(LOG_INFO, "Skipping NOP\n"); - tcp_optlen--; - i++; - continue; - } else if ((tcp_opt_buf[i] != 0) && (tcp_opt_buf[i] != TCP_OPT_MULTIPATH_TCP)) { - PKT_MNGLR_LOG(LOG_INFO, "Skipping option %x\n", tcp_opt_buf[i]); - - /* Minimum TCP option size is 2 */ - if (tcp_opt_buf[i+1] < 2) { - PKT_MNGLR_LOG(LOG_ERR, "Received suspicious TCP option"); + while (tcp_optlen > 0) { + if (tcp_opt_buf[i] == 0x1) { + PKT_MNGLR_LOG(LOG_INFO, "Skipping NOP\n"); + tcp_optlen--; + i++; + continue; + } else if ((tcp_opt_buf[i] != 0) && (tcp_opt_buf[i] != TCP_OPT_MULTIPATH_TCP)) { + PKT_MNGLR_LOG(LOG_INFO, "Skipping option %x\n", tcp_opt_buf[i]); + + /* Minimum TCP option size is 2 */ + if (tcp_opt_buf[i + 1] < 2) { + PKT_MNGLR_LOG(LOG_ERR, "Received suspicious TCP option"); + goto drop_it; + } + tcp_optlen -= tcp_opt_buf[i + 1]; + i += tcp_opt_buf[i + 1]; + continue; + } else if (tcp_opt_buf[i] == TCP_OPT_MULTIPATH_TCP) { + int j = 0; + unsigned char mptcpoptlen = tcp_opt_buf[i + 1]; + uint8_t sbtver = tcp_opt_buf[i + MPTCP_SBT_VER_OFFSET]; + uint8_t subtype = sbtver >> 4; + + PKT_MNGLR_LOG(LOG_INFO, "Got MPTCP option %x\n", tcp_opt_buf[i]); + PKT_MNGLR_LOG(LOG_INFO, "Got MPTCP subtype %x\n", subtype); + if (subtype == MPTCP_SUBTYPE_DSS) { + PKT_MNGLR_LOG(LOG_INFO, "Got DSS option\n"); + PKT_MNGLR_LOG(LOG_INFO, "Protocol option mask: %d\n", p_pkt_mnglr->proto_action_mask); + if (p_pkt_mnglr->proto_action_mask & + PKT_MNGLR_TCP_ACT_DSS_DROP) { goto drop_it; } - tcp_optlen -= tcp_opt_buf[i+1]; - i += tcp_opt_buf[i+1]; - continue; - } else if (tcp_opt_buf[i] == TCP_OPT_MULTIPATH_TCP) { - int j = 0; - unsigned char mptcpoptlen = tcp_opt_buf[i+1]; - uint8_t sbtver = tcp_opt_buf[i+MPTCP_SBT_VER_OFFSET]; - uint8_t subtype = sbtver >> 4; - - PKT_MNGLR_LOG(LOG_INFO, "Got MPTCP option %x\n", tcp_opt_buf[i]); - PKT_MNGLR_LOG(LOG_INFO, "Got MPTCP subtype %x\n", subtype); - if (subtype == MPTCP_SUBTYPE_DSS) { - PKT_MNGLR_LOG(LOG_INFO, "Got DSS option\n"); - PKT_MNGLR_LOG(LOG_INFO, "Protocol option mask: %d\n", p_pkt_mnglr->proto_action_mask); - if (p_pkt_mnglr->proto_action_mask & - PKT_MNGLR_TCP_ACT_DSS_DROP) { - goto drop_it; - } - } + } - PKT_MNGLR_LOG(LOG_INFO, "Got MPTCP option %x\n", tcp_opt_buf[i]); - for (; j < mptcpoptlen && j < tcp_optlen; j++) { - if (p_pkt_mnglr->proto_action_mask & - PKT_MNGLR_TCP_ACT_NOP_MPTCP) { - tcp_opt_buf[i+j] = 0x1; - } + PKT_MNGLR_LOG(LOG_INFO, "Got MPTCP option %x\n", tcp_opt_buf[i]); + for (; j < mptcpoptlen && j < tcp_optlen; j++) { + if (p_pkt_mnglr->proto_action_mask & + PKT_MNGLR_TCP_ACT_NOP_MPTCP) { + tcp_opt_buf[i + j] = 0x1; } - tcp_optlen -= mptcpoptlen; - i += mptcpoptlen; - } else { - tcp_optlen--; - i++; } + tcp_optlen -= mptcpoptlen; + i += mptcpoptlen; + } else { + tcp_optlen--; + i++; } + } - if (orig_tcp_optlen) { - error = mbuf_copyback(*data, - offset+sizeof(struct tcphdr), - orig_tcp_optlen, tcp_opt_buf, MBUF_WAITOK); + if (orig_tcp_optlen) { + error = mbuf_copyback(*data, + offset + sizeof(struct tcphdr), + orig_tcp_optlen, tcp_opt_buf, MBUF_WAITOK); - if (error) { - PKT_MNGLR_LOG(LOG_ERR, - "Failed to copy tcp options back: error %d offset %d optlen %d", - error, offset, orig_tcp_optlen); - goto input_done; - } + if (error) { + PKT_MNGLR_LOG(LOG_ERR, + "Failed to copy tcp options back: error %d offset %d optlen %d", + error, offset, orig_tcp_optlen); + goto input_done; } } - break; - case IPPROTO_UDP: - /* Don't handle UDP */ - break; - case IPPROTO_ICMP: - break; - case IPPROTO_ICMPV6: - break; - default: - break; + } + break; + case IPPROTO_UDP: + /* Don't handle UDP */ + break; + case IPPROTO_ICMP: + break; + case IPPROTO_ICMPV6: + break; + default: + break; } chksm_update(*data); input_done: @@ -1056,14 +1060,16 @@ drop_it: return EJUSTRETURN; } -static void pktmnglr_ipfilter_detach(void *cookie) +static void +pktmnglr_ipfilter_detach(void *cookie) { #pragma unused(cookie) return; } /* XXX Still need to modify this to use mbuf_copy* macros */ -static void chksm_update(mbuf_t data) +static void +chksm_update(mbuf_t data) { u_int16_t ip_sum; u_int16_t tsum; @@ -1078,26 +1084,28 @@ static void chksm_update(mbuf_t data) ip->ip_sum = 0; err = mbuf_inet_cksum(data, 0, 0, ip->ip_hl << 2, &ip_sum); // ip sum - if (err == 0) + if (err == 0) { ip->ip_sum = ip_sum; + } switch (ip->ip_p) { - case IPPROTO_TCP: - tcp = (struct tcphdr *)(void *)(ptr + (ip->ip_hl << 2)); - tcp->th_sum = 0; - err = mbuf_inet_cksum(data, IPPROTO_TCP, ip->ip_hl << 2, - ntohs(ip->ip_len) - (ip->ip_hl << 2), &tsum); - if (err == 0) - tcp->th_sum = tsum; - break; - case IPPROTO_UDP: - /* Don't handle UDP */ - break; - case IPPROTO_ICMP: - break; - case IPPROTO_ICMPV6: - break; - default: - break; + case IPPROTO_TCP: + tcp = (struct tcphdr *)(void *)(ptr + (ip->ip_hl << 2)); + tcp->th_sum = 0; + err = mbuf_inet_cksum(data, IPPROTO_TCP, ip->ip_hl << 2, + ntohs(ip->ip_len) - (ip->ip_hl << 2), &tsum); + if (err == 0) { + tcp->th_sum = tsum; + } + break; + case IPPROTO_UDP: + /* Don't handle UDP */ + break; + case IPPROTO_ICMP: + break; + case IPPROTO_ICMPV6: + break; + default: + break; } mbuf_clear_csum_performed(data); diff --git a/bsd/net/packet_mangler.h b/bsd/net/packet_mangler.h index b23849910..68d7cca31 100644 --- a/bsd/net/packet_mangler.h +++ b/bsd/net/packet_mangler.h @@ -22,7 +22,7 @@ */ #ifndef __PACKET_MANGLER_H__ -#define __PACKET_MANGLER_H__ +#define __PACKET_MANGLER_H__ #include #include @@ -52,40 +52,40 @@ typedef enum { * to be set in the sc_id field of sockaddr_ctl for connect(2) * Note: the sc_unit is ephemeral */ -#define PACKET_MANGLER_CONTROL_NAME "com.apple.packet-mangler" - -#define PKT_MNGLR_OPT_PROTO_ACT_MASK 1 -#define PKT_MNGLR_OPT_IP_ACT_MASK 2 -#define PKT_MNGLR_OPT_LOCAL_IP 3 -#define PKT_MNGLR_OPT_REMOTE_IP 4 -#define PKT_MNGLR_OPT_LOCAL_PORT 5 -#define PKT_MNGLR_OPT_REMOTE_PORT 6 -#define PKT_MNGLR_OPT_DIRECTION 7 -#define PKT_MNGLR_OPT_PROTOCOL 8 -#define PKT_MNGLR_OPT_ACTIVATE 0xFFFFFFFF +#define PACKET_MANGLER_CONTROL_NAME "com.apple.packet-mangler" + +#define PKT_MNGLR_OPT_PROTO_ACT_MASK 1 +#define PKT_MNGLR_OPT_IP_ACT_MASK 2 +#define PKT_MNGLR_OPT_LOCAL_IP 3 +#define PKT_MNGLR_OPT_REMOTE_IP 4 +#define PKT_MNGLR_OPT_LOCAL_PORT 5 +#define PKT_MNGLR_OPT_REMOTE_PORT 6 +#define PKT_MNGLR_OPT_DIRECTION 7 +#define PKT_MNGLR_OPT_PROTOCOL 8 +#define PKT_MNGLR_OPT_ACTIVATE 0xFFFFFFFF /* Packet mangler action masks */ /* Packet Mangler TCP action mask */ -#define PKT_MNGLR_TCP_ACT_NOP_MPTCP 0x00000001 -#define PKT_MNGLR_TCP_ACT_SWAP_L_PORT 0x00000002 -#define PKT_MNGLR_TCP_ACT_SWAP_R_PORT 0x00000004 -#define PKT_MNGLR_TCP_ACT_DSS_DROP 0x00000008 -#define PKT_MNGLR_TCP_ACT_CHK_EXTENDED 0x80000000 +#define PKT_MNGLR_TCP_ACT_NOP_MPTCP 0x00000001 +#define PKT_MNGLR_TCP_ACT_SWAP_L_PORT 0x00000002 +#define PKT_MNGLR_TCP_ACT_SWAP_R_PORT 0x00000004 +#define PKT_MNGLR_TCP_ACT_DSS_DROP 0x00000008 +#define PKT_MNGLR_TCP_ACT_CHK_EXTENDED 0x80000000 /* Packet Mangler IP action mask */ -#define PKT_MNGLR_IP_ACT_FLT_L_IP 0x00000001 -#define PKT_MNGLR_IP_ACT_FLT_R_IP 0x00000002 -#define PKT_MNGLR_IP_ACT_SWAP_L_IP 0x00000004 -#define PKT_MNGLR_IP_ACT_SWAP_R_IP 0x00000008 -#define PKT_MNGLR_IP_ACT_DROP_PACKET 0x00000010 -#define PKT_MNGLR_IP_ACT_CHK_EXTENDED 0x80000000 +#define PKT_MNGLR_IP_ACT_FLT_L_IP 0x00000001 +#define PKT_MNGLR_IP_ACT_FLT_R_IP 0x00000002 +#define PKT_MNGLR_IP_ACT_SWAP_L_IP 0x00000004 +#define PKT_MNGLR_IP_ACT_SWAP_R_IP 0x00000008 +#define PKT_MNGLR_IP_ACT_DROP_PACKET 0x00000010 +#define PKT_MNGLR_IP_ACT_CHK_EXTENDED 0x80000000 /* * How many filter may be active simultaneously */ -#define PKT_MNGLR_MAX_FILTER_COUNT 1 +#define PKT_MNGLR_MAX_FILTER_COUNT 1 -#define PKT_MNGLR_VERSION_CURRENT 1 +#define PKT_MNGLR_VERSION_CURRENT 1 #endif /* PRIVATE */ @@ -93,11 +93,11 @@ typedef enum { extern int pkt_mnglr_log_level; -#define PKT_MNGLR_LOG(level, fmt, ...) \ +#define PKT_MNGLR_LOG(level, fmt, ...) \ do { \ if (pkt_mnglr_log_level >= level) \ - printf("%s:%d " fmt "\n",\ - __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + printf("%s:%d " fmt "\n",\ + __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0) diff --git a/bsd/net/pf.c b/bsd/net/pf.c index 70f1f906d..e9fa2a37d 100644 --- a/bsd/net/pf.c +++ b/bsd/net/pf.c @@ -133,7 +133,7 @@ */ #include -#define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0)) +#define DPFPRINTF(n, x) (pf_status.debug >= (n) ? printf x : ((void)0)) /* * On Mac OS X, the rtableid value is treated as the interface scope @@ -144,40 +144,40 @@ * the test against INT_MAX to handle userland apps which initialize * the field with a negative number. */ -#define PF_RTABLEID_IS_VALID(r) \ +#define PF_RTABLEID_IS_VALID(r) \ ((r) > IFSCOPE_NONE && (r) <= INT_MAX) /* * Global variables */ -decl_lck_mtx_data(,pf_lock_data); -decl_lck_rw_data(,pf_perim_lock_data); +decl_lck_mtx_data(, pf_lock_data); +decl_lck_rw_data(, pf_perim_lock_data); lck_mtx_t *pf_lock = &pf_lock_data; lck_rw_t *pf_perim_lock = &pf_perim_lock_data; /* state tables */ -struct pf_state_tree_lan_ext pf_statetbl_lan_ext; -struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy; +struct pf_state_tree_lan_ext pf_statetbl_lan_ext; +struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy; -struct pf_palist pf_pabuf; -struct pf_status pf_status; +struct pf_palist pf_pabuf; +struct pf_status pf_status; -u_int32_t ticket_pabuf; +u_int32_t ticket_pabuf; -static MD5_CTX pf_tcp_secret_ctx; -static u_char pf_tcp_secret[16]; -static int pf_tcp_secret_init; -static int pf_tcp_iss_off; +static MD5_CTX pf_tcp_secret_ctx; +static u_char pf_tcp_secret[16]; +static int pf_tcp_secret_init; +static int pf_tcp_iss_off; static struct pf_anchor_stackframe { - struct pf_ruleset *rs; - struct pf_rule *r; - struct pf_anchor_node *parent; - struct pf_anchor *child; + struct pf_ruleset *rs; + struct pf_rule *r; + struct pf_anchor_node *parent; + struct pf_anchor *child; } pf_anchor_stack[64]; -struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl; -struct pool pf_state_pl, pf_state_key_pl; +struct pool pf_src_tree_pl, pf_rule_pl, pf_pooladdr_pl; +struct pool pf_state_pl, pf_state_key_pl; typedef void (*hook_fn_t)(void *); @@ -187,141 +187,141 @@ struct hook_desc { void *hd_arg; }; -#define HOOK_REMOVE 0x01 -#define HOOK_FREE 0x02 -#define HOOK_ABORT 0x04 +#define HOOK_REMOVE 0x01 +#define HOOK_FREE 0x02 +#define HOOK_ABORT 0x04 -static void *hook_establish(struct hook_desc_head *, int, - hook_fn_t, void *); -static void hook_runloop(struct hook_desc_head *, int flags); +static void *hook_establish(struct hook_desc_head *, int, + hook_fn_t, void *); +static void hook_runloop(struct hook_desc_head *, int flags); -struct pool pf_app_state_pl; -static void pf_print_addr(struct pf_addr *addr, sa_family_t af); -static void pf_print_sk_host(struct pf_state_host *, u_int8_t, int, - u_int8_t); +struct pool pf_app_state_pl; +static void pf_print_addr(struct pf_addr *addr, sa_family_t af); +static void pf_print_sk_host(struct pf_state_host *, u_int8_t, int, + u_int8_t); -static void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); +static void pf_print_host(struct pf_addr *, u_int16_t, u_int8_t); -static void pf_init_threshold(struct pf_threshold *, u_int32_t, - u_int32_t); -static void pf_add_threshold(struct pf_threshold *); -static int pf_check_threshold(struct pf_threshold *); +static void pf_init_threshold(struct pf_threshold *, u_int32_t, + u_int32_t); +static void pf_add_threshold(struct pf_threshold *); +static int pf_check_threshold(struct pf_threshold *); -static void pf_change_ap(int, pbuf_t *, struct pf_addr *, - u_int16_t *, u_int16_t *, u_int16_t *, - struct pf_addr *, u_int16_t, u_int8_t, sa_family_t, - sa_family_t, int); -static int pf_modulate_sack(pbuf_t *, int, struct pf_pdesc *, - struct tcphdr *, struct pf_state_peer *); +static void pf_change_ap(int, pbuf_t *, struct pf_addr *, + u_int16_t *, u_int16_t *, u_int16_t *, + struct pf_addr *, u_int16_t, u_int8_t, sa_family_t, + sa_family_t, int); +static int pf_modulate_sack(pbuf_t *, int, struct pf_pdesc *, + struct tcphdr *, struct pf_state_peer *); #if INET6 -static void pf_change_a6(struct pf_addr *, u_int16_t *, - struct pf_addr *, u_int8_t); -void pf_change_addr(struct pf_addr *a, u_int16_t *c, - struct pf_addr *an, u_int8_t u, - sa_family_t af, sa_family_t afn); +static void pf_change_a6(struct pf_addr *, u_int16_t *, + struct pf_addr *, u_int8_t); +void pf_change_addr(struct pf_addr *a, u_int16_t *c, + struct pf_addr *an, u_int8_t u, + sa_family_t af, sa_family_t afn); #endif /* INET6 */ -static void pf_change_icmp(struct pf_addr *, u_int16_t *, - struct pf_addr *, struct pf_addr *, u_int16_t, - u_int16_t *, u_int16_t *, u_int16_t *, - u_int16_t *, u_int8_t, sa_family_t); -static void pf_send_tcp(const struct pf_rule *, sa_family_t, - const struct pf_addr *, const struct pf_addr *, - u_int16_t, u_int16_t, u_int32_t, u_int32_t, - u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, - u_int16_t, struct ether_header *, struct ifnet *); -static void pf_send_icmp(pbuf_t *, u_int8_t, u_int8_t, - sa_family_t, struct pf_rule *); -static struct pf_rule *pf_match_translation(struct pf_pdesc *, pbuf_t *, - int, int, struct pfi_kif *, struct pf_addr *, - union pf_state_xport *, struct pf_addr *, - union pf_state_xport *, int); -static struct pf_rule *pf_get_translation_aux(struct pf_pdesc *, - pbuf_t *, int, int, struct pfi_kif *, - struct pf_src_node **, struct pf_addr *, - union pf_state_xport *, struct pf_addr *, - union pf_state_xport *, union pf_state_xport * - ); -static void pf_attach_state(struct pf_state_key *, - struct pf_state *, int); -static void pf_detach_state(struct pf_state *, int); -static u_int32_t pf_tcp_iss(struct pf_pdesc *); -static int pf_test_rule(struct pf_rule **, struct pf_state **, - int, struct pfi_kif *, pbuf_t *, int, - void *, struct pf_pdesc *, struct pf_rule **, - struct pf_ruleset **, struct ifqueue *); +static void pf_change_icmp(struct pf_addr *, u_int16_t *, + struct pf_addr *, struct pf_addr *, u_int16_t, + u_int16_t *, u_int16_t *, u_int16_t *, + u_int16_t *, u_int8_t, sa_family_t); +static void pf_send_tcp(const struct pf_rule *, sa_family_t, + const struct pf_addr *, const struct pf_addr *, + u_int16_t, u_int16_t, u_int32_t, u_int32_t, + u_int8_t, u_int16_t, u_int16_t, u_int8_t, int, + u_int16_t, struct ether_header *, struct ifnet *); +static void pf_send_icmp(pbuf_t *, u_int8_t, u_int8_t, + sa_family_t, struct pf_rule *); +static struct pf_rule *pf_match_translation(struct pf_pdesc *, pbuf_t *, + int, int, struct pfi_kif *, struct pf_addr *, + union pf_state_xport *, struct pf_addr *, + union pf_state_xport *, int); +static struct pf_rule *pf_get_translation_aux(struct pf_pdesc *, + pbuf_t *, int, int, struct pfi_kif *, + struct pf_src_node **, struct pf_addr *, + union pf_state_xport *, struct pf_addr *, + union pf_state_xport *, union pf_state_xport * + ); +static void pf_attach_state(struct pf_state_key *, + struct pf_state *, int); +static void pf_detach_state(struct pf_state *, int); +static u_int32_t pf_tcp_iss(struct pf_pdesc *); +static int pf_test_rule(struct pf_rule **, struct pf_state **, + int, struct pfi_kif *, pbuf_t *, int, + void *, struct pf_pdesc *, struct pf_rule **, + struct pf_ruleset **, struct ifqueue *); #if DUMMYNET -static int pf_test_dummynet(struct pf_rule **, int, - struct pfi_kif *, pbuf_t **, - struct pf_pdesc *, struct ip_fw_args *); +static int pf_test_dummynet(struct pf_rule **, int, + struct pfi_kif *, pbuf_t **, + struct pf_pdesc *, struct ip_fw_args *); #endif /* DUMMYNET */ -static int pf_test_fragment(struct pf_rule **, int, - struct pfi_kif *, pbuf_t *, void *, - struct pf_pdesc *, struct pf_rule **, - struct pf_ruleset **); -static int pf_test_state_tcp(struct pf_state **, int, - struct pfi_kif *, pbuf_t *, int, - void *, struct pf_pdesc *, u_short *); -static int pf_test_state_udp(struct pf_state **, int, - struct pfi_kif *, pbuf_t *, int, - void *, struct pf_pdesc *, u_short *); -static int pf_test_state_icmp(struct pf_state **, int, - struct pfi_kif *, pbuf_t *, int, - void *, struct pf_pdesc *, u_short *); -static int pf_test_state_other(struct pf_state **, int, - struct pfi_kif *, struct pf_pdesc *); -static int pf_match_tag(struct pf_rule *, - struct pf_mtag *, int *); -static void pf_hash(struct pf_addr *, struct pf_addr *, - struct pf_poolhashkey *, sa_family_t); -static int pf_map_addr(u_int8_t, struct pf_rule *, - struct pf_addr *, struct pf_addr *, - struct pf_addr *, struct pf_src_node **); -static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *, - struct pf_rule *, struct pf_addr *, - union pf_state_xport *, struct pf_addr *, - union pf_state_xport *, struct pf_addr *, - union pf_state_xport *, struct pf_src_node ** - ); -static void pf_route(pbuf_t **, struct pf_rule *, int, - struct ifnet *, struct pf_state *, - struct pf_pdesc *); +static int pf_test_fragment(struct pf_rule **, int, + struct pfi_kif *, pbuf_t *, void *, + struct pf_pdesc *, struct pf_rule **, + struct pf_ruleset **); +static int pf_test_state_tcp(struct pf_state **, int, + struct pfi_kif *, pbuf_t *, int, + void *, struct pf_pdesc *, u_short *); +static int pf_test_state_udp(struct pf_state **, int, + struct pfi_kif *, pbuf_t *, int, + void *, struct pf_pdesc *, u_short *); +static int pf_test_state_icmp(struct pf_state **, int, + struct pfi_kif *, pbuf_t *, int, + void *, struct pf_pdesc *, u_short *); +static int pf_test_state_other(struct pf_state **, int, + struct pfi_kif *, struct pf_pdesc *); +static int pf_match_tag(struct pf_rule *, + struct pf_mtag *, int *); +static void pf_hash(struct pf_addr *, struct pf_addr *, + struct pf_poolhashkey *, sa_family_t); +static int pf_map_addr(u_int8_t, struct pf_rule *, + struct pf_addr *, struct pf_addr *, + struct pf_addr *, struct pf_src_node **); +static int pf_get_sport(struct pf_pdesc *, struct pfi_kif *, + struct pf_rule *, struct pf_addr *, + union pf_state_xport *, struct pf_addr *, + union pf_state_xport *, struct pf_addr *, + union pf_state_xport *, struct pf_src_node ** + ); +static void pf_route(pbuf_t **, struct pf_rule *, int, + struct ifnet *, struct pf_state *, + struct pf_pdesc *); #if INET6 -static void pf_route6(pbuf_t **, struct pf_rule *, int, - struct ifnet *, struct pf_state *, - struct pf_pdesc *); +static void pf_route6(pbuf_t **, struct pf_rule *, int, + struct ifnet *, struct pf_state *, + struct pf_pdesc *); #endif /* INET6 */ -static u_int8_t pf_get_wscale(pbuf_t *, int, u_int16_t, - sa_family_t); -static u_int16_t pf_get_mss(pbuf_t *, int, u_int16_t, - sa_family_t); -static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, - u_int16_t); -static void pf_set_rt_ifp(struct pf_state *, - struct pf_addr *, sa_family_t af); -static int pf_check_proto_cksum(pbuf_t *, int, int, - u_int8_t, sa_family_t); -static int pf_addr_wrap_neq(struct pf_addr_wrap *, - struct pf_addr_wrap *); -static struct pf_state *pf_find_state(struct pfi_kif *, - struct pf_state_key_cmp *, u_int); -static int pf_src_connlimit(struct pf_state **); -static void pf_stateins_err(const char *, struct pf_state *, - struct pfi_kif *); -static int pf_check_congestion(struct ifqueue *); +static u_int8_t pf_get_wscale(pbuf_t *, int, u_int16_t, + sa_family_t); +static u_int16_t pf_get_mss(pbuf_t *, int, u_int16_t, + sa_family_t); +static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, + u_int16_t); +static void pf_set_rt_ifp(struct pf_state *, + struct pf_addr *, sa_family_t af); +static int pf_check_proto_cksum(pbuf_t *, int, int, + u_int8_t, sa_family_t); +static int pf_addr_wrap_neq(struct pf_addr_wrap *, + struct pf_addr_wrap *); +static struct pf_state *pf_find_state(struct pfi_kif *, + struct pf_state_key_cmp *, u_int); +static int pf_src_connlimit(struct pf_state **); +static void pf_stateins_err(const char *, struct pf_state *, + struct pfi_kif *); +static int pf_check_congestion(struct ifqueue *); #if 0 static const char *pf_pptp_ctrl_type_name(u_int16_t code); #endif -static void pf_pptp_handler(struct pf_state *, int, int, - struct pf_pdesc *, struct pfi_kif *); -static void pf_pptp_unlink(struct pf_state *); -static void pf_grev1_unlink(struct pf_state *); -static int pf_test_state_grev1(struct pf_state **, int, - struct pfi_kif *, int, struct pf_pdesc *); -static int pf_ike_compare(struct pf_app_state *, - struct pf_app_state *); -static int pf_test_state_esp(struct pf_state **, int, - struct pfi_kif *, int, struct pf_pdesc *); +static void pf_pptp_handler(struct pf_state *, int, int, + struct pf_pdesc *, struct pfi_kif *); +static void pf_pptp_unlink(struct pf_state *); +static void pf_grev1_unlink(struct pf_state *); +static int pf_test_state_grev1(struct pf_state **, int, + struct pfi_kif *, int, struct pf_pdesc *); +static int pf_ike_compare(struct pf_app_state *, + struct pf_app_state *); +static int pf_test_state_esp(struct pf_state **, int, + struct pfi_kif *, int, struct pf_pdesc *); extern struct pool pfr_ktable_pl; extern struct pool pfr_kentry_pl; @@ -341,15 +341,17 @@ pf_lazy_makewritable(struct pf_pdesc *pd, pbuf_t *pbuf, int len) { void *p; - if (pd->lmw < 0) - return (NULL); + if (pd->lmw < 0) { + return NULL; + } VERIFY(pbuf == pd->mp); p = pbuf->pb_data; if (len > pd->lmw) { - if ((p = pbuf_ensure_writable(pbuf, len)) == NULL) + if ((p = pbuf_ensure_writable(pbuf, len)) == NULL) { len = -1; + } pd->lmw = len; if (len >= 0) { pd->pf_mtag = pf_find_mtag_pbuf(pbuf); @@ -374,16 +376,16 @@ pf_lazy_makewritable(struct pf_pdesc *pd, pbuf_t *pbuf, int len) } } - return (len < 0 ? NULL : p); + return len < 0 ? NULL : p; } static const int * pf_state_lookup_aux(struct pf_state **state, struct pfi_kif *kif, - int direction, int *action) + int direction, int *action) { if (*state == NULL || (*state)->timeout == PFTM_PURGE) { *action = PF_DROP; - return (action); + return action; } if (direction == PF_OUT && @@ -393,83 +395,83 @@ pf_state_lookup_aux(struct pf_state **state, struct pfi_kif *kif, (*state)->rule.ptr->direction == PF_IN)) && (*state)->rt_kif != NULL && (*state)->rt_kif != kif) { *action = PF_PASS; - return (action); + return action; } - return (0); + return 0; } -#define STATE_LOOKUP() \ - do { \ - int action; \ - *state = pf_find_state(kif, &key, direction); \ - if (*state != NULL && pd != NULL && \ - !(pd->pktflags & PKTF_FLOW_ID)) { \ - pd->flowsrc = (*state)->state_key->flowsrc; \ - pd->flowhash = (*state)->state_key->flowhash; \ - if (pd->flowhash != 0) { \ - pd->pktflags |= PKTF_FLOW_ID; \ - pd->pktflags &= ~PKTF_FLOW_ADV; \ - } \ - } \ - if (pf_state_lookup_aux(state, kif, direction, &action)) \ - return (action); \ +#define STATE_LOOKUP() \ + do { \ + int action; \ + *state = pf_find_state(kif, &key, direction); \ + if (*state != NULL && pd != NULL && \ + !(pd->pktflags & PKTF_FLOW_ID)) { \ + pd->flowsrc = (*state)->state_key->flowsrc; \ + pd->flowhash = (*state)->state_key->flowhash; \ + if (pd->flowhash != 0) { \ + pd->pktflags |= PKTF_FLOW_ID; \ + pd->pktflags &= ~PKTF_FLOW_ADV; \ + } \ + } \ + if (pf_state_lookup_aux(state, kif, direction, &action)) \ + return (action); \ } while (0) -#define STATE_ADDR_TRANSLATE(sk) \ - (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \ - ((sk)->af_lan == AF_INET6 && \ - ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \ - (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \ +#define STATE_ADDR_TRANSLATE(sk) \ + (sk)->lan.addr.addr32[0] != (sk)->gwy.addr.addr32[0] || \ + ((sk)->af_lan == AF_INET6 && \ + ((sk)->lan.addr.addr32[1] != (sk)->gwy.addr.addr32[1] || \ + (sk)->lan.addr.addr32[2] != (sk)->gwy.addr.addr32[2] || \ (sk)->lan.addr.addr32[3] != (sk)->gwy.addr.addr32[3])) -#define STATE_TRANSLATE(sk) \ - ((sk)->af_lan != (sk)->af_gwy || \ - STATE_ADDR_TRANSLATE(sk) || \ +#define STATE_TRANSLATE(sk) \ + ((sk)->af_lan != (sk)->af_gwy || \ + STATE_ADDR_TRANSLATE(sk) || \ (sk)->lan.xport.port != (sk)->gwy.xport.port) -#define STATE_GRE_TRANSLATE(sk) \ - (STATE_ADDR_TRANSLATE(sk) || \ +#define STATE_GRE_TRANSLATE(sk) \ + (STATE_ADDR_TRANSLATE(sk) || \ (sk)->lan.xport.call_id != (sk)->gwy.xport.call_id) #define BOUND_IFACE(r, k) \ ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all -#define STATE_INC_COUNTERS(s) \ - do { \ - s->rule.ptr->states++; \ - VERIFY(s->rule.ptr->states != 0); \ - if (s->anchor.ptr != NULL) { \ - s->anchor.ptr->states++; \ - VERIFY(s->anchor.ptr->states != 0); \ - } \ - if (s->nat_rule.ptr != NULL) { \ - s->nat_rule.ptr->states++; \ - VERIFY(s->nat_rule.ptr->states != 0); \ - } \ +#define STATE_INC_COUNTERS(s) \ + do { \ + s->rule.ptr->states++; \ + VERIFY(s->rule.ptr->states != 0); \ + if (s->anchor.ptr != NULL) { \ + s->anchor.ptr->states++; \ + VERIFY(s->anchor.ptr->states != 0); \ + } \ + if (s->nat_rule.ptr != NULL) { \ + s->nat_rule.ptr->states++; \ + VERIFY(s->nat_rule.ptr->states != 0); \ + } \ } while (0) -#define STATE_DEC_COUNTERS(s) \ - do { \ - if (s->nat_rule.ptr != NULL) { \ - VERIFY(s->nat_rule.ptr->states > 0); \ - s->nat_rule.ptr->states--; \ - } \ - if (s->anchor.ptr != NULL) { \ - VERIFY(s->anchor.ptr->states > 0); \ - s->anchor.ptr->states--; \ - } \ - VERIFY(s->rule.ptr->states > 0); \ - s->rule.ptr->states--; \ +#define STATE_DEC_COUNTERS(s) \ + do { \ + if (s->nat_rule.ptr != NULL) { \ + VERIFY(s->nat_rule.ptr->states > 0); \ + s->nat_rule.ptr->states--; \ + } \ + if (s->anchor.ptr != NULL) { \ + VERIFY(s->anchor.ptr->states > 0); \ + s->anchor.ptr->states--; \ + } \ + VERIFY(s->rule.ptr->states > 0); \ + s->rule.ptr->states--; \ } while (0) static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *); static __inline int pf_state_compare_lan_ext(struct pf_state_key *, - struct pf_state_key *); + struct pf_state_key *); static __inline int pf_state_compare_ext_gwy(struct pf_state_key *, - struct pf_state_key *); + struct pf_state_key *); static __inline int pf_state_compare_id(struct pf_state *, - struct pf_state *); + struct pf_state *); struct pf_src_tree tree_src_tracking; @@ -484,177 +486,178 @@ RB_GENERATE(pf_state_tree_ext_gwy, pf_state_key, RB_GENERATE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id); -#define PF_DT_SKIP_LANEXT 0x01 -#define PF_DT_SKIP_EXTGWY 0x02 +#define PF_DT_SKIP_LANEXT 0x01 +#define PF_DT_SKIP_EXTGWY 0x02 static const u_int16_t PF_PPTP_PORT = 1723; static const u_int32_t PF_PPTP_MAGIC_NUMBER = 0x1A2B3C4D; struct pf_pptp_hdr { - u_int16_t length; - u_int16_t type; - u_int32_t magic; + u_int16_t length; + u_int16_t type; + u_int32_t magic; }; struct pf_pptp_ctrl_hdr { - u_int16_t type; - u_int16_t reserved_0; + u_int16_t type; + u_int16_t reserved_0; }; struct pf_pptp_ctrl_generic { - u_int16_t data[0]; + u_int16_t data[0]; }; -#define PF_PPTP_CTRL_TYPE_START_REQ 1 +#define PF_PPTP_CTRL_TYPE_START_REQ 1 struct pf_pptp_ctrl_start_req { - u_int16_t protocol_version; - u_int16_t reserved_1; - u_int32_t framing_capabilities; - u_int32_t bearer_capabilities; - u_int16_t maximum_channels; - u_int16_t firmware_revision; - u_int8_t host_name[64]; - u_int8_t vendor_string[64]; + u_int16_t protocol_version; + u_int16_t reserved_1; + u_int32_t framing_capabilities; + u_int32_t bearer_capabilities; + u_int16_t maximum_channels; + u_int16_t firmware_revision; + u_int8_t host_name[64]; + u_int8_t vendor_string[64]; }; -#define PF_PPTP_CTRL_TYPE_START_RPY 2 +#define PF_PPTP_CTRL_TYPE_START_RPY 2 struct pf_pptp_ctrl_start_rpy { - u_int16_t protocol_version; - u_int8_t result_code; - u_int8_t error_code; - u_int32_t framing_capabilities; - u_int32_t bearer_capabilities; - u_int16_t maximum_channels; - u_int16_t firmware_revision; - u_int8_t host_name[64]; - u_int8_t vendor_string[64]; + u_int16_t protocol_version; + u_int8_t result_code; + u_int8_t error_code; + u_int32_t framing_capabilities; + u_int32_t bearer_capabilities; + u_int16_t maximum_channels; + u_int16_t firmware_revision; + u_int8_t host_name[64]; + u_int8_t vendor_string[64]; }; -#define PF_PPTP_CTRL_TYPE_STOP_REQ 3 +#define PF_PPTP_CTRL_TYPE_STOP_REQ 3 struct pf_pptp_ctrl_stop_req { - u_int8_t reason; - u_int8_t reserved_1; - u_int16_t reserved_2; + u_int8_t reason; + u_int8_t reserved_1; + u_int16_t reserved_2; }; -#define PF_PPTP_CTRL_TYPE_STOP_RPY 4 +#define PF_PPTP_CTRL_TYPE_STOP_RPY 4 struct pf_pptp_ctrl_stop_rpy { - u_int8_t reason; - u_int8_t error_code; - u_int16_t reserved_1; + u_int8_t reason; + u_int8_t error_code; + u_int16_t reserved_1; }; -#define PF_PPTP_CTRL_TYPE_ECHO_REQ 5 +#define PF_PPTP_CTRL_TYPE_ECHO_REQ 5 struct pf_pptp_ctrl_echo_req { - u_int32_t identifier; + u_int32_t identifier; }; -#define PF_PPTP_CTRL_TYPE_ECHO_RPY 6 +#define PF_PPTP_CTRL_TYPE_ECHO_RPY 6 struct pf_pptp_ctrl_echo_rpy { - u_int32_t identifier; - u_int8_t result_code; - u_int8_t error_code; - u_int16_t reserved_1; + u_int32_t identifier; + u_int8_t result_code; + u_int8_t error_code; + u_int16_t reserved_1; }; -#define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7 +#define PF_PPTP_CTRL_TYPE_CALL_OUT_REQ 7 struct pf_pptp_ctrl_call_out_req { - u_int16_t call_id; - u_int16_t call_sernum; - u_int32_t min_bps; - u_int32_t bearer_type; - u_int32_t framing_type; - u_int16_t rxwindow_size; - u_int16_t proc_delay; - u_int8_t phone_num[64]; - u_int8_t sub_addr[64]; + u_int16_t call_id; + u_int16_t call_sernum; + u_int32_t min_bps; + u_int32_t bearer_type; + u_int32_t framing_type; + u_int16_t rxwindow_size; + u_int16_t proc_delay; + u_int8_t phone_num[64]; + u_int8_t sub_addr[64]; }; -#define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8 +#define PF_PPTP_CTRL_TYPE_CALL_OUT_RPY 8 struct pf_pptp_ctrl_call_out_rpy { - u_int16_t call_id; - u_int16_t peer_call_id; - u_int8_t result_code; - u_int8_t error_code; - u_int16_t cause_code; - u_int32_t connect_speed; - u_int16_t rxwindow_size; - u_int16_t proc_delay; - u_int32_t phy_channel_id; + u_int16_t call_id; + u_int16_t peer_call_id; + u_int8_t result_code; + u_int8_t error_code; + u_int16_t cause_code; + u_int32_t connect_speed; + u_int16_t rxwindow_size; + u_int16_t proc_delay; + u_int32_t phy_channel_id; }; -#define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9 +#define PF_PPTP_CTRL_TYPE_CALL_IN_1ST 9 struct pf_pptp_ctrl_call_in_1st { - u_int16_t call_id; - u_int16_t call_sernum; - u_int32_t bearer_type; - u_int32_t phy_channel_id; - u_int16_t dialed_number_len; - u_int16_t dialing_number_len; - u_int8_t dialed_num[64]; - u_int8_t dialing_num[64]; - u_int8_t sub_addr[64]; + u_int16_t call_id; + u_int16_t call_sernum; + u_int32_t bearer_type; + u_int32_t phy_channel_id; + u_int16_t dialed_number_len; + u_int16_t dialing_number_len; + u_int8_t dialed_num[64]; + u_int8_t dialing_num[64]; + u_int8_t sub_addr[64]; }; -#define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10 +#define PF_PPTP_CTRL_TYPE_CALL_IN_2ND 10 struct pf_pptp_ctrl_call_in_2nd { - u_int16_t call_id; - u_int16_t peer_call_id; - u_int8_t result_code; - u_int8_t error_code; - u_int16_t rxwindow_size; - u_int16_t txdelay; - u_int16_t reserved_1; + u_int16_t call_id; + u_int16_t peer_call_id; + u_int8_t result_code; + u_int8_t error_code; + u_int16_t rxwindow_size; + u_int16_t txdelay; + u_int16_t reserved_1; }; -#define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11 +#define PF_PPTP_CTRL_TYPE_CALL_IN_3RD 11 struct pf_pptp_ctrl_call_in_3rd { - u_int16_t call_id; - u_int16_t reserved_1; - u_int32_t connect_speed; - u_int16_t rxwindow_size; - u_int16_t txdelay; - u_int32_t framing_type; + u_int16_t call_id; + u_int16_t reserved_1; + u_int32_t connect_speed; + u_int16_t rxwindow_size; + u_int16_t txdelay; + u_int32_t framing_type; }; -#define PF_PPTP_CTRL_TYPE_CALL_CLR 12 +#define PF_PPTP_CTRL_TYPE_CALL_CLR 12 struct pf_pptp_ctrl_call_clr { - u_int16_t call_id; - u_int16_t reserved_1; + u_int16_t call_id; + u_int16_t reserved_1; }; -#define PF_PPTP_CTRL_TYPE_CALL_DISC 13 +#define PF_PPTP_CTRL_TYPE_CALL_DISC 13 struct pf_pptp_ctrl_call_disc { - u_int16_t call_id; - u_int8_t result_code; - u_int8_t error_code; - u_int16_t cause_code; - u_int16_t reserved_1; - u_int8_t statistics[128]; + u_int16_t call_id; + u_int8_t result_code; + u_int8_t error_code; + u_int16_t cause_code; + u_int16_t reserved_1; + u_int8_t statistics[128]; }; -#define PF_PPTP_CTRL_TYPE_ERROR 14 +#define PF_PPTP_CTRL_TYPE_ERROR 14 struct pf_pptp_ctrl_error { - u_int16_t peer_call_id; - u_int16_t reserved_1; - u_int32_t crc_errors; - u_int32_t fr_errors; - u_int32_t hw_errors; - u_int32_t buf_errors; - u_int32_t tim_errors; - u_int32_t align_errors; + u_int16_t peer_call_id; + u_int16_t reserved_1; + u_int32_t crc_errors; + u_int32_t fr_errors; + u_int32_t hw_errors; + u_int32_t buf_errors; + u_int32_t tim_errors; + u_int32_t align_errors; }; -#define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15 +#define PF_PPTP_CTRL_TYPE_SET_LINKINFO 15 struct pf_pptp_ctrl_set_linkinfo { - u_int16_t peer_call_id; - u_int16_t reserved_1; - u_int32_t tx_accm; - u_int32_t rx_accm; + u_int16_t peer_call_id; + u_int16_t reserved_1; + u_int32_t tx_accm; + u_int32_t rx_accm; }; #if 0 -static const char *pf_pptp_ctrl_type_name(u_int16_t code) +static const char * +pf_pptp_ctrl_type_name(u_int16_t code) { code = ntohs(code); @@ -663,7 +666,7 @@ static const char *pf_pptp_ctrl_type_name(u_int16_t code) static char reserved[] = "reserved-00"; sprintf(&reserved[9], "%02x", code); - return (reserved); + return reserved; } else { static const char *name[] = { "start_req", "start_rpy", "stop_req", "stop_rpy", @@ -672,42 +675,42 @@ static const char *pf_pptp_ctrl_type_name(u_int16_t code) "call_clr", "call_disc", "error", "set_linkinfo" }; - return (name[code - 1]); + return name[code - 1]; } }; #endif static const size_t PF_PPTP_CTRL_MSG_MINSIZE = - sizeof (struct pf_pptp_hdr) + sizeof (struct pf_pptp_ctrl_hdr); + sizeof(struct pf_pptp_hdr) + sizeof(struct pf_pptp_ctrl_hdr); union pf_pptp_ctrl_msg_union { - struct pf_pptp_ctrl_start_req start_req; - struct pf_pptp_ctrl_start_rpy start_rpy; - struct pf_pptp_ctrl_stop_req stop_req; - struct pf_pptp_ctrl_stop_rpy stop_rpy; - struct pf_pptp_ctrl_echo_req echo_req; - struct pf_pptp_ctrl_echo_rpy echo_rpy; - struct pf_pptp_ctrl_call_out_req call_out_req; - struct pf_pptp_ctrl_call_out_rpy call_out_rpy; - struct pf_pptp_ctrl_call_in_1st call_in_1st; - struct pf_pptp_ctrl_call_in_2nd call_in_2nd; - struct pf_pptp_ctrl_call_in_3rd call_in_3rd; - struct pf_pptp_ctrl_call_clr call_clr; - struct pf_pptp_ctrl_call_disc call_disc; - struct pf_pptp_ctrl_error error; - struct pf_pptp_ctrl_set_linkinfo set_linkinfo; - u_int8_t data[0]; + struct pf_pptp_ctrl_start_req start_req; + struct pf_pptp_ctrl_start_rpy start_rpy; + struct pf_pptp_ctrl_stop_req stop_req; + struct pf_pptp_ctrl_stop_rpy stop_rpy; + struct pf_pptp_ctrl_echo_req echo_req; + struct pf_pptp_ctrl_echo_rpy echo_rpy; + struct pf_pptp_ctrl_call_out_req call_out_req; + struct pf_pptp_ctrl_call_out_rpy call_out_rpy; + struct pf_pptp_ctrl_call_in_1st call_in_1st; + struct pf_pptp_ctrl_call_in_2nd call_in_2nd; + struct pf_pptp_ctrl_call_in_3rd call_in_3rd; + struct pf_pptp_ctrl_call_clr call_clr; + struct pf_pptp_ctrl_call_disc call_disc; + struct pf_pptp_ctrl_error error; + struct pf_pptp_ctrl_set_linkinfo set_linkinfo; + u_int8_t data[0]; }; struct pf_pptp_ctrl_msg { - struct pf_pptp_hdr hdr; - struct pf_pptp_ctrl_hdr ctrl; - union pf_pptp_ctrl_msg_union msg; + struct pf_pptp_hdr hdr; + struct pf_pptp_ctrl_hdr ctrl; + union pf_pptp_ctrl_msg_union msg; }; -#define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000 -#define PF_GRE_FLAG_VERSION_MASK 0x0007 -#define PF_GRE_PPP_ETHERTYPE 0x880B +#define PF_GRE_FLAG_CHECKSUM_PRESENT 0x8000 +#define PF_GRE_FLAG_VERSION_MASK 0x0007 +#define PF_GRE_PPP_ETHERTYPE 0x880B struct pf_grev1_hdr { u_int16_t flags; @@ -715,9 +718,9 @@ struct pf_grev1_hdr { u_int16_t payload_length; u_int16_t call_id; /* - u_int32_t seqno; - u_int32_t ackno; - */ + * u_int32_t seqno; + * u_int32_t ackno; + */ }; static const u_int16_t PF_IKE_PORT = 500; @@ -728,24 +731,24 @@ struct pf_ike_hdr { u_int32_t message_id, length; }; -#define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr)) - -#define PF_IKEv1_EXCHTYPE_BASE 1 -#define PF_IKEv1_EXCHTYPE_ID_PROTECT 2 -#define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3 -#define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4 -#define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5 -#define PF_IKEv2_EXCHTYPE_SA_INIT 34 -#define PF_IKEv2_EXCHTYPE_AUTH 35 -#define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36 -#define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37 - -#define PF_IKEv1_FLAG_E 0x01 -#define PF_IKEv1_FLAG_C 0x02 -#define PF_IKEv1_FLAG_A 0x04 -#define PF_IKEv2_FLAG_I 0x08 -#define PF_IKEv2_FLAG_V 0x10 -#define PF_IKEv2_FLAG_R 0x20 +#define PF_IKE_PACKET_MINSIZE (sizeof (struct pf_ike_hdr)) + +#define PF_IKEv1_EXCHTYPE_BASE 1 +#define PF_IKEv1_EXCHTYPE_ID_PROTECT 2 +#define PF_IKEv1_EXCHTYPE_AUTH_ONLY 3 +#define PF_IKEv1_EXCHTYPE_AGGRESSIVE 4 +#define PF_IKEv1_EXCHTYPE_INFORMATIONAL 5 +#define PF_IKEv2_EXCHTYPE_SA_INIT 34 +#define PF_IKEv2_EXCHTYPE_AUTH 35 +#define PF_IKEv2_EXCHTYPE_CREATE_CHILD_SA 36 +#define PF_IKEv2_EXCHTYPE_INFORMATIONAL 37 + +#define PF_IKEv1_FLAG_E 0x01 +#define PF_IKEv1_FLAG_C 0x02 +#define PF_IKEv1_FLAG_A 0x04 +#define PF_IKEv2_FLAG_I 0x08 +#define PF_IKEv2_FLAG_V 0x10 +#define PF_IKEv2_FLAG_R 0x20 struct pf_esp_hdr { u_int32_t spi; @@ -759,102 +762,126 @@ pf_addr_compare(struct pf_addr *a, struct pf_addr *b, sa_family_t af) switch (af) { #ifdef INET case AF_INET: - if (a->addr32[0] > b->addr32[0]) - return (1); - if (a->addr32[0] < b->addr32[0]) - return (-1); + if (a->addr32[0] > b->addr32[0]) { + return 1; + } + if (a->addr32[0] < b->addr32[0]) { + return -1; + } break; #endif /* INET */ #ifdef INET6 case AF_INET6: - if (a->addr32[3] > b->addr32[3]) - return (1); - if (a->addr32[3] < b->addr32[3]) - return (-1); - if (a->addr32[2] > b->addr32[2]) - return (1); - if (a->addr32[2] < b->addr32[2]) - return (-1); - if (a->addr32[1] > b->addr32[1]) - return (1); - if (a->addr32[1] < b->addr32[1]) - return (-1); - if (a->addr32[0] > b->addr32[0]) - return (1); - if (a->addr32[0] < b->addr32[0]) - return (-1); + if (a->addr32[3] > b->addr32[3]) { + return 1; + } + if (a->addr32[3] < b->addr32[3]) { + return -1; + } + if (a->addr32[2] > b->addr32[2]) { + return 1; + } + if (a->addr32[2] < b->addr32[2]) { + return -1; + } + if (a->addr32[1] > b->addr32[1]) { + return 1; + } + if (a->addr32[1] < b->addr32[1]) { + return -1; + } + if (a->addr32[0] > b->addr32[0]) { + return 1; + } + if (a->addr32[0] < b->addr32[0]) { + return -1; + } break; #endif /* INET6 */ } - return (0); + return 0; } static __inline int pf_src_compare(struct pf_src_node *a, struct pf_src_node *b) { - int diff; - - if (a->rule.ptr > b->rule.ptr) - return (1); - if (a->rule.ptr < b->rule.ptr) - return (-1); - if ((diff = a->af - b->af) != 0) - return (diff); - if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0) - return (diff); - return (0); + int diff; + + if (a->rule.ptr > b->rule.ptr) { + return 1; + } + if (a->rule.ptr < b->rule.ptr) { + return -1; + } + if ((diff = a->af - b->af) != 0) { + return diff; + } + if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0) { + return diff; + } + return 0; } static __inline int pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b) { - int diff; - int extfilter; + int diff; + int extfilter; - if ((diff = a->proto - b->proto) != 0) - return (diff); - if ((diff = a->af_lan - b->af_lan) != 0) - return (diff); + if ((diff = a->proto - b->proto) != 0) { + return diff; + } + if ((diff = a->af_lan - b->af_lan) != 0) { + return diff; + } extfilter = PF_EXTFILTER_APD; switch (a->proto) { case IPPROTO_ICMP: case IPPROTO_ICMPV6: - if ((diff = a->lan.xport.port - b->lan.xport.port) != 0) - return (diff); + if ((diff = a->lan.xport.port - b->lan.xport.port) != 0) { + return diff; + } break; case IPPROTO_TCP: - if ((diff = a->lan.xport.port - b->lan.xport.port) != 0) - return (diff); - if ((diff = a->ext_lan.xport.port - b->ext_lan.xport.port) != 0) - return (diff); + if ((diff = a->lan.xport.port - b->lan.xport.port) != 0) { + return diff; + } + if ((diff = a->ext_lan.xport.port - b->ext_lan.xport.port) != 0) { + return diff; + } break; case IPPROTO_UDP: - if ((diff = a->proto_variant - b->proto_variant)) - return (diff); + if ((diff = a->proto_variant - b->proto_variant)) { + return diff; + } extfilter = a->proto_variant; - if ((diff = a->lan.xport.port - b->lan.xport.port) != 0) - return (diff); + if ((diff = a->lan.xport.port - b->lan.xport.port) != 0) { + return diff; + } if ((extfilter < PF_EXTFILTER_AD) && - (diff = a->ext_lan.xport.port - b->ext_lan.xport.port) != 0) - return (diff); + (diff = a->ext_lan.xport.port - b->ext_lan.xport.port) != 0) { + return diff; + } break; case IPPROTO_GRE: if (a->proto_variant == PF_GRE_PPTP_VARIANT && a->proto_variant == b->proto_variant) { if (!!(diff = a->ext_lan.xport.call_id - - b->ext_lan.xport.call_id)) - return (diff); + b->ext_lan.xport.call_id)) { + return diff; + } } break; case IPPROTO_ESP: - if (!!(diff = a->ext_lan.xport.spi - b->ext_lan.xport.spi)) - return (diff); + if (!!(diff = a->ext_lan.xport.spi - b->ext_lan.xport.spi)) { + return diff; + } break; default: @@ -865,29 +892,33 @@ pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b) #if INET case AF_INET: if ((diff = pf_addr_compare(&a->lan.addr, &b->lan.addr, - a->af_lan)) != 0) - return (diff); + a->af_lan)) != 0) { + return diff; + } if (extfilter < PF_EXTFILTER_EI) { if ((diff = pf_addr_compare(&a->ext_lan.addr, - &b->ext_lan.addr, - a->af_lan)) != 0) - return (diff); + &b->ext_lan.addr, + a->af_lan)) != 0) { + return diff; + } } break; #endif /* INET */ #if INET6 case AF_INET6: if ((diff = pf_addr_compare(&a->lan.addr, &b->lan.addr, - a->af_lan)) != 0) - return (diff); + a->af_lan)) != 0) { + return diff; + } if (extfilter < PF_EXTFILTER_EI || !PF_AZERO(&b->ext_lan.addr, AF_INET6)) { if ((diff = pf_addr_compare(&a->ext_lan.addr, - &b->ext_lan.addr, - a->af_lan)) != 0) - return (diff); + &b->ext_lan.addr, + a->af_lan)) != 0) { + return diff; + } } break; #endif /* INET6 */ @@ -898,69 +929,81 @@ pf_state_compare_lan_ext(struct pf_state_key *a, struct pf_state_key *b) b->app_state->compare_lan_ext) { diff = (const char *)b->app_state->compare_lan_ext - (const char *)a->app_state->compare_lan_ext; - if (diff != 0) - return (diff); + if (diff != 0) { + return diff; + } diff = a->app_state->compare_lan_ext(a->app_state, b->app_state); - if (diff != 0) - return (diff); + if (diff != 0) { + return diff; + } } } - return (0); + return 0; } static __inline int pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b) { - int diff; - int extfilter; + int diff; + int extfilter; - if ((diff = a->proto - b->proto) != 0) - return (diff); + if ((diff = a->proto - b->proto) != 0) { + return diff; + } - if ((diff = a->af_gwy - b->af_gwy) != 0) - return (diff); + if ((diff = a->af_gwy - b->af_gwy) != 0) { + return diff; + } extfilter = PF_EXTFILTER_APD; switch (a->proto) { case IPPROTO_ICMP: case IPPROTO_ICMPV6: - if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0) - return (diff); + if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0) { + return diff; + } break; case IPPROTO_TCP: - if ((diff = a->ext_gwy.xport.port - b->ext_gwy.xport.port) != 0) - return (diff); - if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0) - return (diff); + if ((diff = a->ext_gwy.xport.port - b->ext_gwy.xport.port) != 0) { + return diff; + } + if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0) { + return diff; + } break; case IPPROTO_UDP: - if ((diff = a->proto_variant - b->proto_variant)) - return (diff); + if ((diff = a->proto_variant - b->proto_variant)) { + return diff; + } extfilter = a->proto_variant; - if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0) - return (diff); + if ((diff = a->gwy.xport.port - b->gwy.xport.port) != 0) { + return diff; + } if ((extfilter < PF_EXTFILTER_AD) && - (diff = a->ext_gwy.xport.port - b->ext_gwy.xport.port) != 0) - return (diff); + (diff = a->ext_gwy.xport.port - b->ext_gwy.xport.port) != 0) { + return diff; + } break; case IPPROTO_GRE: if (a->proto_variant == PF_GRE_PPTP_VARIANT && a->proto_variant == b->proto_variant) { if (!!(diff = a->gwy.xport.call_id - - b->gwy.xport.call_id)) - return (diff); + b->gwy.xport.call_id)) { + return diff; + } } break; case IPPROTO_ESP: - if (!!(diff = a->gwy.xport.spi - b->gwy.xport.spi)) - return (diff); + if (!!(diff = a->gwy.xport.spi - b->gwy.xport.spi)) { + return diff; + } break; default: @@ -971,27 +1014,31 @@ pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b) #if INET case AF_INET: if ((diff = pf_addr_compare(&a->gwy.addr, &b->gwy.addr, - a->af_gwy)) != 0) - return (diff); + a->af_gwy)) != 0) { + return diff; + } if (extfilter < PF_EXTFILTER_EI) { if ((diff = pf_addr_compare(&a->ext_gwy.addr, &b->ext_gwy.addr, - a->af_gwy)) != 0) - return (diff); + a->af_gwy)) != 0) { + return diff; + } } break; #endif /* INET */ #if INET6 case AF_INET6: if ((diff = pf_addr_compare(&a->gwy.addr, &b->gwy.addr, - a->af_gwy)) != 0) - return (diff); + a->af_gwy)) != 0) { + return diff; + } if (extfilter < PF_EXTFILTER_EI || !PF_AZERO(&b->ext_gwy.addr, AF_INET6)) { if ((diff = pf_addr_compare(&a->ext_gwy.addr, &b->ext_gwy.addr, - a->af_gwy)) != 0) - return (diff); + a->af_gwy)) != 0) { + return diff; + } } break; #endif /* INET6 */ @@ -1002,31 +1049,37 @@ pf_state_compare_ext_gwy(struct pf_state_key *a, struct pf_state_key *b) b->app_state->compare_ext_gwy) { diff = (const char *)b->app_state->compare_ext_gwy - (const char *)a->app_state->compare_ext_gwy; - if (diff != 0) - return (diff); + if (diff != 0) { + return diff; + } diff = a->app_state->compare_ext_gwy(a->app_state, b->app_state); - if (diff != 0) - return (diff); + if (diff != 0) { + return diff; + } } } - return (0); + return 0; } static __inline int pf_state_compare_id(struct pf_state *a, struct pf_state *b) { - if (a->id > b->id) - return (1); - if (a->id < b->id) - return (-1); - if (a->creatorid > b->creatorid) - return (1); - if (a->creatorid < b->creatorid) - return (-1); - - return (0); + if (a->id > b->id) { + return 1; + } + if (a->id < b->id) { + return -1; + } + if (a->creatorid > b->creatorid) { + return 1; + } + if (a->creatorid < b->creatorid) { + return -1; + } + + return 0; } #if INET6 @@ -1054,15 +1107,15 @@ pf_find_state_byid(struct pf_state_cmp *key) { pf_status.fcounters[FCNT_STATE_SEARCH]++; - return (RB_FIND(pf_state_tree_id, &tree_id, - (struct pf_state *)(void *)key)); + return RB_FIND(pf_state_tree_id, &tree_id, + (struct pf_state *)(void *)key); } static struct pf_state * pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir) { - struct pf_state_key *sk = NULL; - struct pf_state *s; + struct pf_state_key *sk = NULL; + struct pf_state *s; pf_status.fcounters[FCNT_STATE_SEARCH]++; @@ -1080,10 +1133,11 @@ pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir) */ if (sk == NULL) { sk = RB_FIND(pf_state_tree_lan_ext, - &pf_statetbl_lan_ext, - (struct pf_state_key *)key); - if (sk && sk->af_lan == sk->af_gwy) + &pf_statetbl_lan_ext, + (struct pf_state_key *)key); + if (sk && sk->af_lan == sk->af_gwy) { sk = NULL; + } } break; default: @@ -1091,19 +1145,21 @@ pf_find_state(struct pfi_kif *kif, struct pf_state_key_cmp *key, u_int dir) } /* list is sorted, if-bound states before floating ones */ - if (sk != NULL) + if (sk != NULL) { TAILQ_FOREACH(s, &sk->states, next) - if (s->kif == pfi_all || s->kif == kif) - return (s); + if (s->kif == pfi_all || s->kif == kif) { + return s; + } + } - return (NULL); + return NULL; } struct pf_state * pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) { - struct pf_state_key *sk = NULL; - struct pf_state *s, *ret = NULL; + struct pf_state_key *sk = NULL; + struct pf_state *s, *ret = NULL; pf_status.fcounters[FCNT_STATE_SEARCH]++; @@ -1121,10 +1177,11 @@ pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) */ if ((sk == NULL) && pf_nat64_configured) { sk = RB_FIND(pf_state_tree_lan_ext, - &pf_statetbl_lan_ext, - (struct pf_state_key *)key); - if (sk && sk->af_lan == sk->af_gwy) + &pf_statetbl_lan_ext, + (struct pf_state_key *)key); + if (sk && sk->af_lan == sk->af_gwy) { sk = NULL; + } } break; default: @@ -1133,14 +1190,15 @@ pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more) if (sk != NULL) { ret = TAILQ_FIRST(&sk->states); - if (more == NULL) - return (ret); + if (more == NULL) { + return ret; + } TAILQ_FOREACH(s, &sk->states, next) - (*more)++; + (*more)++; } - return (ret); + return ret; } static void @@ -1158,11 +1216,12 @@ pf_add_threshold(struct pf_threshold *threshold) { u_int32_t t = pf_time_second(), diff = t - threshold->last; - if (diff >= threshold->seconds) + if (diff >= threshold->seconds) { threshold->count = 0; - else + } else { threshold->count -= threshold->count * diff / threshold->seconds; + } threshold->count += PF_THRESHOLD_MULT; threshold->last = t; } @@ -1170,7 +1229,7 @@ pf_add_threshold(struct pf_threshold *threshold) static int pf_check_threshold(struct pf_threshold *threshold) { - return (threshold->count > threshold->limit); + return threshold->count > threshold->limit; } static int @@ -1195,21 +1254,22 @@ pf_src_connlimit(struct pf_state **state) bad++; } - if (!bad) - return (0); + if (!bad) { + return 0; + } if ((*state)->rule.ptr->overload_tbl) { struct pfr_addr p; - u_int32_t killed = 0; + u_int32_t killed = 0; pf_status.lcounters[LCNT_OVERLOAD_TABLE]++; if (pf_status.debug >= PF_DEBUG_MISC) { printf("pf_src_connlimit: blocking address "); pf_print_host(&(*state)->src_node->addr, 0, - (*state)->state_key->af_lan); + (*state)->state_key->af_lan); } - bzero(&p, sizeof (p)); + bzero(&p, sizeof(p)); p.pfra_af = (*state)->state_key->af_lan; switch ((*state)->state_key->af_lan) { #if INET @@ -1245,12 +1305,12 @@ pf_src_connlimit(struct pf_state **state) if (sk->af_lan == (*state)->state_key->af_lan && (((*state)->state_key->direction == - PF_OUT && + PF_OUT && PF_AEQ(&(*state)->src_node->addr, - &sk->lan.addr, sk->af_lan)) || + &sk->lan.addr, sk->af_lan)) || ((*state)->state_key->direction == PF_IN && PF_AEQ(&(*state)->src_node->addr, - &sk->ext_lan.addr, sk->af_lan))) && + &sk->ext_lan.addr, sk->af_lan))) && ((*state)->rule.ptr->flush & PF_FLUSH_GLOBAL || (*state)->rule.ptr == st->rule.ptr)) { @@ -1260,45 +1320,50 @@ pf_src_connlimit(struct pf_state **state) killed++; } } - if (pf_status.debug >= PF_DEBUG_MISC) + if (pf_status.debug >= PF_DEBUG_MISC) { printf(", %u states killed", killed); + } } - if (pf_status.debug >= PF_DEBUG_MISC) + if (pf_status.debug >= PF_DEBUG_MISC) { printf("\n"); + } } /* kill this state */ (*state)->timeout = PFTM_PURGE; (*state)->src.state = (*state)->dst.state = TCPS_CLOSED; - return (1); + return 1; } int pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, struct pf_addr *src, sa_family_t af) { - struct pf_src_node k; + struct pf_src_node k; if (*sn == NULL) { k.af = af; PF_ACPY(&k.addr, src, af); if (rule->rule_flag & PFRULE_RULESRCTRACK || - rule->rpool.opts & PF_POOL_STICKYADDR) + rule->rpool.opts & PF_POOL_STICKYADDR) { k.rule.ptr = rule; - else + } else { k.rule.ptr = NULL; + } pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); } if (*sn == NULL) { if (!rule->max_src_nodes || - rule->src_nodes < rule->max_src_nodes) + rule->src_nodes < rule->max_src_nodes) { (*sn) = pool_get(&pf_src_tree_pl, PR_WAITOK); - else + } else { pf_status.lcounters[LCNT_SRCNODES]++; - if ((*sn) == NULL) - return (-1); - bzero(*sn, sizeof (struct pf_src_node)); + } + if ((*sn) == NULL) { + return -1; + } + bzero(*sn, sizeof(struct pf_src_node)); pf_init_threshold(&(*sn)->conn_rate, rule->max_src_conn_rate.limit, @@ -1306,10 +1371,11 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, (*sn)->af = af; if (rule->rule_flag & PFRULE_RULESRCTRACK || - rule->rpool.opts & PF_POOL_STICKYADDR) + rule->rpool.opts & PF_POOL_STICKYADDR) { (*sn)->rule.ptr = rule; - else + } else { (*sn)->rule.ptr = NULL; + } PF_ACPY(&(*sn)->addr, src, af); if (RB_INSERT(pf_src_tree, &tree_src_tracking, *sn) != NULL) { @@ -1319,28 +1385,29 @@ pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule, printf("\n"); } pool_put(&pf_src_tree_pl, *sn); - return (-1); + return -1; } (*sn)->creation = pf_time_second(); (*sn)->ruletype = rule->action; - if ((*sn)->rule.ptr != NULL) + if ((*sn)->rule.ptr != NULL) { (*sn)->rule.ptr->src_nodes++; + } pf_status.scounters[SCNT_SRC_NODE_INSERT]++; pf_status.src_nodes++; } else { if (rule->max_src_states && (*sn)->states >= rule->max_src_states) { pf_status.lcounters[LCNT_SRCSTATES]++; - return (-1); + return -1; } } - return (0); + return 0; } static void pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif) { - struct pf_state_key *sk = s->state_key; + struct pf_state_key *sk = s->state_key; if (pf_status.debug >= PF_DEBUG_MISC) { printf("pf: state insert failed: %s %s ", tree, kif->pfik_name); @@ -1373,8 +1440,9 @@ pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif) printf(" ext_gwy: "); pf_print_sk_host(&sk->ext_gwy, sk->af_gwy, sk->proto, sk->proto_variant); - if (s->sync_flags & PFSTATE_FROMSYNC) + if (s->sync_flags & PFSTATE_FROMSYNC) { printf(" (from sync)"); + } printf("\n"); } } @@ -1382,8 +1450,8 @@ pf_stateins_err(const char *tree, struct pf_state *s, struct pfi_kif *kif) int pf_insert_state(struct pfi_kif *kif, struct pf_state *s) { - struct pf_state_key *cur; - struct pf_state *sp; + struct pf_state_key *cur; + struct pf_state *sp; VERIFY(s->state_key != NULL); s->kif = kif; @@ -1392,13 +1460,13 @@ pf_insert_state(struct pfi_kif *kif, struct pf_state *s) s->state_key)) != NULL) { /* key exists. check for same kif, if none, add to key */ TAILQ_FOREACH(sp, &cur->states, next) - if (sp->kif == kif) { /* collision! */ - pf_stateins_err("tree_lan_ext", s, kif); - pf_detach_state(s, - PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY); - return (-1); - } - pf_detach_state(s, PF_DT_SKIP_LANEXT|PF_DT_SKIP_EXTGWY); + if (sp->kif == kif) { /* collision! */ + pf_stateins_err("tree_lan_ext", s, kif); + pf_detach_state(s, + PF_DT_SKIP_LANEXT | PF_DT_SKIP_EXTGWY); + return -1; + } + pf_detach_state(s, PF_DT_SKIP_LANEXT | PF_DT_SKIP_EXTGWY); pf_attach_state(cur, s, kif == pfi_all ? 1 : 0); } @@ -1408,7 +1476,7 @@ pf_insert_state(struct pfi_kif *kif, struct pf_state *s) /* must not happen. we must have found the sk above! */ pf_stateins_err("tree_ext_gwy", s, kif); pf_detach_state(s, PF_DT_SKIP_EXTGWY); - return (-1); + return -1; } if (s->id == 0 && s->creatorid == 0) { @@ -1420,12 +1488,13 @@ pf_insert_state(struct pfi_kif *kif, struct pf_state *s) printf("pf: state insert failed: " "id: %016llx creatorid: %08x", be64toh(s->id), ntohl(s->creatorid)); - if (s->sync_flags & PFSTATE_FROMSYNC) + if (s->sync_flags & PFSTATE_FROMSYNC) { printf(" (from sync)"); + } printf("\n"); } pf_detach_state(s, 0); - return (-1); + return -1; } TAILQ_INSERT_TAIL(&state_list, s, entry_list); pf_status.fcounters[FCNT_STATE_INSERT]++; @@ -1435,7 +1504,7 @@ pf_insert_state(struct pfi_kif *kif, struct pf_state *s) #if NPFSYNC pfsync_insert_state(s); #endif - return (0); + return 0; } static int @@ -1443,7 +1512,7 @@ pf_purge_thread_cont(int err) { #pragma unused(err) static u_int32_t nloops = 0; - int t = 1; /* 1 second */ + int t = 1; /* 1 second */ /* * Update coarse-grained networking timestamp (in sec.); the idea @@ -1469,7 +1538,7 @@ pf_purge_thread_cont(int err) thread_deallocate(current_thread()); thread_terminate(current_thread()); /* NOTREACHED */ - return (0); + return 0; } else { /* if there's nothing left, sleep w/o timeout */ if (pf_status.states == 0 && @@ -1501,7 +1570,7 @@ done: /* NOTREACHED */ VERIFY(0); - return (0); + return 0; } void @@ -1520,22 +1589,24 @@ pf_purge_thread_fn(void *v, wait_result_t w) u_int64_t pf_state_expires(const struct pf_state *state) { - u_int32_t t; - u_int32_t start; - u_int32_t end; - u_int32_t states; + u_int32_t t; + u_int32_t start; + u_int32_t end; + u_int32_t states; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); /* handle all PFTM_* > PFTM_MAX here */ - if (state->timeout == PFTM_PURGE) - return (pf_time_second()); + if (state->timeout == PFTM_PURGE) { + return pf_time_second(); + } VERIFY(state->timeout != PFTM_UNLINKED); VERIFY(state->timeout < PFTM_MAX); t = state->rule.ptr->timeout[state->timeout]; - if (!t) + if (!t) { t = pf_default_rule.timeout[state->timeout]; + } start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START]; if (start) { end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END]; @@ -1546,19 +1617,20 @@ pf_state_expires(const struct pf_state *state) states = pf_status.states; } if (end && states > start && start < end) { - if (states < end) - return (state->expire + t * (end - states) / - (end - start)); - else - return (pf_time_second()); + if (states < end) { + return state->expire + t * (end - states) / + (end - start); + } else { + return pf_time_second(); + } } - return (state->expire + t); + return state->expire + t; } void pf_purge_expired_src_nodes(void) { - struct pf_src_node *cur, *next; + struct pf_src_node *cur, *next; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1569,8 +1641,9 @@ pf_purge_expired_src_nodes(void) if (cur->rule.ptr != NULL) { cur->rule.ptr->src_nodes--; if (cur->rule.ptr->states <= 0 && - cur->rule.ptr->max_src_nodes <= 0) + cur->rule.ptr->max_src_nodes <= 0) { pf_rm_rule(NULL, cur->rule.ptr); + } } RB_REMOVE(pf_src_tree, &tree_src_tracking, cur); pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++; @@ -1595,8 +1668,9 @@ pf_src_tree_remove_state(struct pf_state *s) VERIFY(s->src_node->states > 0); if (--s->src_node->states <= 0) { t = s->rule.ptr->timeout[PFTM_SRC_NODE]; - if (!t) + if (!t) { t = pf_default_rule.timeout[PFTM_SRC_NODE]; + } s->src_node->expire = pf_time_second() + t; } } @@ -1604,8 +1678,9 @@ pf_src_tree_remove_state(struct pf_state *s) VERIFY(s->nat_src_node->states > 0); if (--s->nat_src_node->states <= 0) { t = s->rule.ptr->timeout[PFTM_SRC_NODE]; - if (!t) + if (!t) { t = pf_default_rule.timeout[PFTM_SRC_NODE]; + } s->nat_src_node->expire = pf_time_second() + t; } } @@ -1623,14 +1698,15 @@ pf_unlink_state(struct pf_state *cur) cur->state_key->ext_lan.xport.port, cur->state_key->lan.xport.port, cur->src.seqhi, cur->src.seqlo + 1, - TH_RST|TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); + TH_RST | TH_ACK, 0, 0, 0, 1, cur->tag, NULL, NULL); } - hook_runloop(&cur->unlink_hooks, HOOK_REMOVE|HOOK_FREE); + hook_runloop(&cur->unlink_hooks, HOOK_REMOVE | HOOK_FREE); RB_REMOVE(pf_state_tree_id, &tree_id, cur); #if NPFSYNC - if (cur->creatorid == pf_status.hostid) + if (cur->creatorid == pf_status.hostid) { pfsync_delete_state(cur); + } #endif cur->timeout = PFTM_UNLINKED; pf_src_tree_remove_state(cur); @@ -1646,30 +1722,35 @@ pf_free_state(struct pf_state *cur) #if NPFSYNC if (pfsyncif != NULL && (pfsyncif->sc_bulk_send_next == cur || - pfsyncif->sc_bulk_terminator == cur)) + pfsyncif->sc_bulk_terminator == cur)) { return; + } #endif VERIFY(cur->timeout == PFTM_UNLINKED); VERIFY(cur->rule.ptr->states > 0); if (--cur->rule.ptr->states <= 0 && - cur->rule.ptr->src_nodes <= 0) + cur->rule.ptr->src_nodes <= 0) { pf_rm_rule(NULL, cur->rule.ptr); + } if (cur->nat_rule.ptr != NULL) { VERIFY(cur->nat_rule.ptr->states > 0); if (--cur->nat_rule.ptr->states <= 0 && - cur->nat_rule.ptr->src_nodes <= 0) + cur->nat_rule.ptr->src_nodes <= 0) { pf_rm_rule(NULL, cur->nat_rule.ptr); + } } if (cur->anchor.ptr != NULL) { VERIFY(cur->anchor.ptr->states > 0); - if (--cur->anchor.ptr->states <= 0) + if (--cur->anchor.ptr->states <= 0) { pf_rm_rule(NULL, cur->anchor.ptr); + } } pf_normalize_tcp_cleanup(cur); pfi_kif_unref(cur->kif, PFI_KIF_REF_STATE); TAILQ_REMOVE(&state_list, cur, entry_list); - if (cur->tag) + if (cur->tag) { pf_tag_unref(cur->tag); + } pool_put(&pf_state_pl, cur); pf_status.fcounters[FCNT_STATE_REMOVALS]++; VERIFY(pf_status.states > 0); @@ -1679,8 +1760,8 @@ pf_free_state(struct pf_state *cur) void pf_purge_expired_states(u_int32_t maxcheck) { - static struct pf_state *cur = NULL; - struct pf_state *next; + static struct pf_state *cur = NULL; + struct pf_state *next; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1688,8 +1769,9 @@ pf_purge_expired_states(u_int32_t maxcheck) /* wrap to start of list when we hit the end */ if (cur == NULL) { cur = TAILQ_FIRST(&state_list); - if (cur == NULL) - break; /* list empty */ + if (cur == NULL) { + break; /* list empty */ + } } /* get next state, as cur may get deleted */ @@ -1711,11 +1793,13 @@ pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw) { LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (aw->type != PF_ADDR_TABLE) - return (0); - if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) - return (1); - return (0); + if (aw->type != PF_ADDR_TABLE) { + return 0; + } + if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname)) == NULL) { + return 1; + } + return 0; } void @@ -1723,8 +1807,9 @@ pf_tbladdr_remove(struct pf_addr_wrap *aw) { LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) + if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL) { return; + } pfr_detach_table(aw->p.tbl); aw->p.tbl = NULL; } @@ -1736,10 +1821,12 @@ pf_tbladdr_copyout(struct pf_addr_wrap *aw) LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (aw->type != PF_ADDR_TABLE || kt == NULL) + if (aw->type != PF_ADDR_TABLE || kt == NULL) { return; - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) + } + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) { kt = kt->pfrkt_root; + } aw->p.tbl = NULL; aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ? kt->pfrkt_cnt : -1; @@ -1752,8 +1839,8 @@ pf_print_addr(struct pf_addr *addr, sa_family_t af) #if INET case AF_INET: { u_int32_t a = ntohl(addr->addr32[0]); - printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255, - (a>>8)&255, a&255); + printf("%u.%u.%u.%u", (a >> 24) & 255, (a >> 16) & 255, + (a >> 8) & 255, a & 255); break; } #endif /* INET */ @@ -1764,10 +1851,11 @@ pf_print_addr(struct pf_addr *addr, sa_family_t af) maxstart = 0, maxend = 0; for (i = 0; i < 8; i++) { if (!addr->addr16[i]) { - if (curstart == 255) + if (curstart == 255) { curstart = i; - else + } else { curend = i; + } } else { if (curstart) { if ((curend - curstart) > @@ -1782,17 +1870,20 @@ pf_print_addr(struct pf_addr *addr, sa_family_t af) for (i = 0; i < 8; i++) { if (i >= maxstart && i <= maxend) { if (maxend != 7) { - if (i == maxstart) + if (i == maxstart) { printf(":"); + } } else { - if (i == maxend) + if (i == maxend) { printf(":"); + } } } else { b = ntohs(addr->addr16[i]); printf("%x", b); - if (i < 7) + if (i < 7) { printf(":"); + } } } break; @@ -1803,19 +1894,21 @@ pf_print_addr(struct pf_addr *addr, sa_family_t af) static void pf_print_sk_host(struct pf_state_host *sh, sa_family_t af, int proto, - u_int8_t proto_variant) + u_int8_t proto_variant) { pf_print_addr(&sh->addr, af); switch (proto) { case IPPROTO_ESP: - if (sh->xport.spi) + if (sh->xport.spi) { printf("[%08x]", ntohl(sh->xport.spi)); + } break; case IPPROTO_GRE: - if (proto_variant == PF_GRE_PPTP_VARIANT) + if (proto_variant == PF_GRE_PPTP_VARIANT) { printf("[%u]", ntohs(sh->xport.call_id)); + } break; case IPPROTO_TCP: @@ -1832,8 +1925,9 @@ static void pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af) { pf_print_addr(addr, af); - if (p) + if (p) { printf("[%u]", ntohs(p)); + } } void @@ -1868,19 +1962,21 @@ pf_print_state(struct pf_state *s) pf_print_sk_host(&sk->gwy, sk->af_gwy, sk->proto, sk->proto_variant); printf(" "); pf_print_sk_host(&sk->ext_lan, sk->af_lan, sk->proto, - sk->proto_variant); + sk->proto_variant); printf(" "); pf_print_sk_host(&sk->ext_gwy, sk->af_gwy, sk->proto, - sk->proto_variant); + sk->proto_variant); printf(" [lo=%u high=%u win=%u modulator=%u", s->src.seqlo, s->src.seqhi, s->src.max_win, s->src.seqdiff); - if (s->src.wscale && s->dst.wscale) + if (s->src.wscale && s->dst.wscale) { printf(" wscale=%u", s->src.wscale & PF_WSCALE_MASK); + } printf("]"); printf(" [lo=%u high=%u win=%u modulator=%u", s->dst.seqlo, s->dst.seqhi, s->dst.max_win, s->dst.seqdiff); - if (s->src.wscale && s->dst.wscale) + if (s->src.wscale && s->dst.wscale) { printf(" wscale=%u", s->dst.wscale & PF_WSCALE_MASK); + } printf("]"); printf(" %u:%u", s->src.state, s->dst.state); } @@ -1888,32 +1984,41 @@ pf_print_state(struct pf_state *s) void pf_print_flags(u_int8_t f) { - if (f) + if (f) { printf(" "); - if (f & TH_FIN) + } + if (f & TH_FIN) { printf("F"); - if (f & TH_SYN) + } + if (f & TH_SYN) { printf("S"); - if (f & TH_RST) + } + if (f & TH_RST) { printf("R"); - if (f & TH_PUSH) + } + if (f & TH_PUSH) { printf("P"); - if (f & TH_ACK) + } + if (f & TH_ACK) { printf("A"); - if (f & TH_URG) + } + if (f & TH_URG) { printf("U"); - if (f & TH_ECE) + } + if (f & TH_ECE) { printf("E"); - if (f & TH_CWR) + } + if (f & TH_CWR) { printf("W"); + } } -#define PF_SET_SKIP_STEPS(i) \ - do { \ - while (head[i] != cur) { \ - head[i]->skip[i].ptr = cur; \ - head[i] = TAILQ_NEXT(head[i], entries); \ - } \ +#define PF_SET_SKIP_STEPS(i) \ + do { \ + while (head[i] != cur) { \ + head[i]->skip[i].ptr = cur; \ + head[i] = TAILQ_NEXT(head[i], entries); \ + } \ } while (0) void @@ -1924,21 +2029,26 @@ pf_calc_skip_steps(struct pf_rulequeue *rules) cur = TAILQ_FIRST(rules); prev = cur; - for (i = 0; i < PF_SKIP_COUNT; ++i) + for (i = 0; i < PF_SKIP_COUNT; ++i) { head[i] = cur; + } while (cur != NULL) { - - if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) + if (cur->kif != prev->kif || cur->ifnot != prev->ifnot) { PF_SET_SKIP_STEPS(PF_SKIP_IFP); - if (cur->direction != prev->direction) + } + if (cur->direction != prev->direction) { PF_SET_SKIP_STEPS(PF_SKIP_DIR); - if (cur->af != prev->af) + } + if (cur->af != prev->af) { PF_SET_SKIP_STEPS(PF_SKIP_AF); - if (cur->proto != prev->proto) + } + if (cur->proto != prev->proto) { PF_SET_SKIP_STEPS(PF_SKIP_PROTO); + } if (cur->src.neg != prev->src.neg || - pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) + pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr)) { PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR); + } { union pf_rule_xport *cx = &cur->src.xport; union pf_rule_xport *px = &prev->src.xport; @@ -1953,14 +2063,16 @@ pf_calc_skip_steps(struct pf_rulequeue *rules) prev->proto == IPPROTO_ESP || cx->range.op != px->range.op || cx->range.port[0] != px->range.port[0] || - cx->range.port[1] != px->range.port[1]) + cx->range.port[1] != px->range.port[1]) { PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT); + } break; } } if (cur->dst.neg != prev->dst.neg || - pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) + pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr)) { PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR); + } { union pf_rule_xport *cx = &cur->dst.xport; union pf_rule_xport *px = &prev->dst.xport; @@ -1968,21 +2080,24 @@ pf_calc_skip_steps(struct pf_rulequeue *rules) switch (cur->proto) { case IPPROTO_GRE: if (cur->proto != prev->proto || - cx->call_id != px->call_id) + cx->call_id != px->call_id) { PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); + } break; case IPPROTO_ESP: if (cur->proto != prev->proto || - cx->spi != px->spi) + cx->spi != px->spi) { PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); + } break; default: if (prev->proto == IPPROTO_GRE || prev->proto == IPPROTO_ESP || cx->range.op != px->range.op || cx->range.port[0] != px->range.port[0] || - cx->range.port[1] != px->range.port[1]) + cx->range.port[1] != px->range.port[1]) { PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT); + } break; } } @@ -1990,8 +2105,9 @@ pf_calc_skip_steps(struct pf_rulequeue *rules) prev = cur; cur = TAILQ_NEXT(cur, entries); } - for (i = 0; i < PF_SKIP_COUNT; ++i) + for (i = 0; i < PF_SKIP_COUNT; ++i) { PF_SET_SKIP_STEPS(i); + } } u_int32_t @@ -2000,13 +2116,13 @@ pf_calc_state_key_flowhash(struct pf_state_key *sk) struct pf_flowhash_key fh __attribute__((aligned(8))); uint32_t flowhash = 0; - bzero(&fh, sizeof (fh)); + bzero(&fh, sizeof(fh)); if (PF_ALEQ(&sk->lan.addr, &sk->ext_lan.addr, sk->af_lan)) { - bcopy(&sk->lan.addr, &fh.ap1.addr, sizeof (fh.ap1.addr)); - bcopy(&sk->ext_lan.addr, &fh.ap2.addr, sizeof (fh.ap2.addr)); + bcopy(&sk->lan.addr, &fh.ap1.addr, sizeof(fh.ap1.addr)); + bcopy(&sk->ext_lan.addr, &fh.ap2.addr, sizeof(fh.ap2.addr)); } else { - bcopy(&sk->ext_lan.addr, &fh.ap1.addr, sizeof (fh.ap1.addr)); - bcopy(&sk->lan.addr, &fh.ap2.addr, sizeof (fh.ap2.addr)); + bcopy(&sk->ext_lan.addr, &fh.ap1.addr, sizeof(fh.ap1.addr)); + bcopy(&sk->lan.addr, &fh.ap2.addr, sizeof(fh.ap2.addr)); } if (sk->lan.xport.spi <= sk->ext_lan.xport.spi) { fh.ap1.xport.spi = sk->lan.xport.spi; @@ -2019,49 +2135,52 @@ pf_calc_state_key_flowhash(struct pf_state_key *sk) fh.proto = sk->proto; try_again: - flowhash = net_flowhash(&fh, sizeof (fh), pf_hash_seed); + flowhash = net_flowhash(&fh, sizeof(fh), pf_hash_seed); if (flowhash == 0) { /* try to get a non-zero flowhash */ pf_hash_seed = RandomULong(); goto try_again; } - return (flowhash); + return flowhash; } static int pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2) { - if (aw1->type != aw2->type) - return (1); + if (aw1->type != aw2->type) { + return 1; + } switch (aw1->type) { case PF_ADDR_ADDRMASK: case PF_ADDR_RANGE: - if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) - return (1); - if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) - return (1); - return (0); + if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, 0)) { + return 1; + } + if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, 0)) { + return 1; + } + return 0; case PF_ADDR_DYNIFTL: - return (aw1->p.dyn == NULL || aw2->p.dyn == NULL || - aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt); + return aw1->p.dyn == NULL || aw2->p.dyn == NULL || + aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt; case PF_ADDR_NOROUTE: case PF_ADDR_URPFFAILED: - return (0); + return 0; case PF_ADDR_TABLE: - return (aw1->p.tbl != aw2->p.tbl); + return aw1->p.tbl != aw2->p.tbl; case PF_ADDR_RTLABEL: - return (aw1->v.rtlabel != aw2->v.rtlabel); + return aw1->v.rtlabel != aw2->v.rtlabel; default: printf("invalid address type: %d\n", aw1->type); - return (1); + return 1; } } u_int16_t pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp) { - return (nat464_cksum_fixup(cksum, old, new, udp)); + return nat464_cksum_fixup(cksum, old, new, udp); } /* @@ -2084,12 +2203,13 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, u_int16_t *ic, u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u, sa_family_t af, sa_family_t afn, int ua) { - struct pf_addr ao; - u_int16_t po = *p; + struct pf_addr ao; + u_int16_t po = *p; PF_ACPY(&ao, a, af); - if (ua) + if (ua) { PF_ACPY(a, an, afn); + } *p = pn; @@ -2099,8 +2219,8 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, switch (afn) { case AF_INET: *ic = pf_cksum_fixup(pf_cksum_fixup(*ic, - ao.addr16[0], an->addr16[0], 0), - ao.addr16[1], an->addr16[1], 0); + ao.addr16[0], an->addr16[0], 0), + ao.addr16[1], an->addr16[1], 0); *p = pn; /* * If the packet is originated from an ALG on the NAT gateway @@ -2120,31 +2240,31 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, (*pbuf->pb_csum_flags & (CSUM_TCP | CSUM_UDP))) { /* Pseudo-header checksum does not include ports */ *pc = ~pf_cksum_fixup(pf_cksum_fixup(~*pc, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u); + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u); } else { *pc = - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - *pc, ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - po, pn, u); + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + *pc, ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + po, pn, u); } break; #ifdef INET6 case AF_INET6: *p = pn; *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - 0, an->addr16[2], u), - 0, an->addr16[3], u), - 0, an->addr16[4], u), - 0, an->addr16[5], u), - 0, an->addr16[6], u), - 0, an->addr16[7], u), + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + 0, an->addr16[2], u), + 0, an->addr16[3], u), + 0, an->addr16[4], u), + 0, an->addr16[5], u), + 0, an->addr16[6], u), + 0, an->addr16[7], u), po, pn, u); break; #endif /* INET6 */ @@ -2155,62 +2275,62 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, case AF_INET6: switch (afn) { case AF_INET6: - /* - * If the packet is originated from an ALG on the NAT gateway - * (source address is loopback or local), in which case the - * TCP/UDP checksum field contains the pseudo header checksum - * that's not yet complemented. - * A packet generated locally - * will have UDP/TCP CSUM flag set (gets set in protocol - * output). - */ + /* + * If the packet is originated from an ALG on the NAT gateway + * (source address is loopback or local), in which case the + * TCP/UDP checksum field contains the pseudo header checksum + * that's not yet complemented. + * A packet generated locally + * will have UDP/TCP CSUM flag set (gets set in protocol + * output). + */ if (dir == PF_OUT && pbuf != NULL && (*pbuf->pb_csum_flags & (CSUM_TCPIPV6 | - CSUM_UDPIPV6))) { - /* Pseudo-header checksum does not include ports */ + CSUM_UDPIPV6))) { + /* Pseudo-header checksum does not include ports */ *pc = - ~pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - ~*pc, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - ao.addr16[2], an->addr16[2], u), - ao.addr16[3], an->addr16[3], u), - ao.addr16[4], an->addr16[4], u), - ao.addr16[5], an->addr16[5], u), - ao.addr16[6], an->addr16[6], u), - ao.addr16[7], an->addr16[7], u); + ~pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + ~*pc, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + ao.addr16[2], an->addr16[2], u), + ao.addr16[3], an->addr16[3], u), + ao.addr16[4], an->addr16[4], u), + ao.addr16[5], an->addr16[5], u), + ao.addr16[6], an->addr16[6], u), + ao.addr16[7], an->addr16[7], u); } else { *pc = - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - *pc, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - ao.addr16[2], an->addr16[2], u), - ao.addr16[3], an->addr16[3], u), - ao.addr16[4], an->addr16[4], u), - ao.addr16[5], an->addr16[5], u), - ao.addr16[6], an->addr16[6], u), - ao.addr16[7], an->addr16[7], u), - po, pn, u); + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + *pc, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + ao.addr16[2], an->addr16[2], u), + ao.addr16[3], an->addr16[3], u), + ao.addr16[4], an->addr16[4], u), + ao.addr16[5], an->addr16[5], u), + ao.addr16[6], an->addr16[6], u), + ao.addr16[7], an->addr16[7], u), + po, pn, u); } break; #ifdef INET case AF_INET: *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - ao.addr16[2], 0, u), - ao.addr16[3], 0, u), - ao.addr16[4], 0, u), - ao.addr16[5], 0, u), - ao.addr16[6], 0, u), - ao.addr16[7], 0, u), + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(*pc, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + ao.addr16[2], 0, u), + ao.addr16[3], 0, u), + ao.addr16[4], 0, u), + ao.addr16[5], 0, u), + ao.addr16[6], 0, u), + ao.addr16[7], 0, u), po, pn, u); break; #endif /* INET */ @@ -2225,10 +2345,10 @@ pf_change_ap(int dir, pbuf_t *pbuf, struct pf_addr *a, u_int16_t *p, void pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) { - u_int32_t ao; + u_int32_t ao; - memcpy(&ao, a, sizeof (ao)); - memcpy(a, &an, sizeof (u_int32_t)); + memcpy(&ao, a, sizeof(ao)); + memcpy(a, &an, sizeof(u_int32_t)); *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u), ao % 65536, an % 65536, u); } @@ -2237,29 +2357,29 @@ pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u) static void pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u) { - struct pf_addr ao; + struct pf_addr ao; PF_ACPY(&ao, a, AF_INET6); PF_ACPY(a, an, AF_INET6); *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(*c, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - ao.addr16[2], an->addr16[2], u), - ao.addr16[3], an->addr16[3], u), - ao.addr16[4], an->addr16[4], u), - ao.addr16[5], an->addr16[5], u), + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(*c, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + ao.addr16[2], an->addr16[2], u), + ao.addr16[3], an->addr16[3], u), + ao.addr16[4], an->addr16[4], u), + ao.addr16[5], an->addr16[5], u), ao.addr16[6], an->addr16[6], u), ao.addr16[7], an->addr16[7], u); } void pf_change_addr(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u, - sa_family_t af, sa_family_t afn) + sa_family_t af, sa_family_t afn) { - struct pf_addr ao; + struct pf_addr ao; PF_ACPY(&ao, a, af); PF_ACPY(a, an, afn); @@ -2272,16 +2392,16 @@ pf_change_addr(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u, break; case AF_INET6: *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(*c, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - 0, an->addr16[2], u), - 0, an->addr16[3], u), - 0, an->addr16[4], u), - 0, an->addr16[5], u), - 0, an->addr16[6], u), - 0, an->addr16[7], u); + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(*c, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + 0, an->addr16[2], u), + 0, an->addr16[3], u), + 0, an->addr16[4], u), + 0, an->addr16[5], u), + 0, an->addr16[6], u), + 0, an->addr16[7], u); break; } break; @@ -2289,16 +2409,16 @@ pf_change_addr(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u, switch (afn) { case AF_INET: *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(*c, - ao.addr16[0], an->addr16[0], u), - ao.addr16[1], an->addr16[1], u), - ao.addr16[2], 0, u), - ao.addr16[3], 0, u), - ao.addr16[4], 0, u), - ao.addr16[5], 0, u), - ao.addr16[6], 0, u), - ao.addr16[7], 0, u); + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(*c, + ao.addr16[0], an->addr16[0], u), + ao.addr16[1], an->addr16[1], u), + ao.addr16[2], 0, u), + ao.addr16[3], 0, u), + ao.addr16[4], 0, u), + ao.addr16[5], 0, u), + ao.addr16[6], 0, u), + ao.addr16[7], 0, u); break; case AF_INET6: pf_change_a6(a, c, an, u); @@ -2315,31 +2435,34 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c, u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af) { - struct pf_addr oia, ooa; + struct pf_addr oia, ooa; PF_ACPY(&oia, ia, af); PF_ACPY(&ooa, oa, af); /* Change inner protocol port, fix inner protocol checksum. */ if (ip != NULL) { - u_int16_t oip = *ip; - u_int32_t opc = 0; + u_int16_t oip = *ip; + u_int32_t opc = 0; - if (pc != NULL) + if (pc != NULL) { opc = *pc; + } *ip = np; - if (pc != NULL) + if (pc != NULL) { *pc = pf_cksum_fixup(*pc, oip, *ip, u); + } *ic = pf_cksum_fixup(*ic, oip, *ip, 0); - if (pc != NULL) + if (pc != NULL) { *ic = pf_cksum_fixup(*ic, opc, *pc, 0); + } } /* Change inner ip address, fix inner ip and icmp checksums. */ PF_ACPY(ia, na, af); switch (af) { #if INET case AF_INET: { - u_int32_t oh2c = *h2c; + u_int32_t oh2c = *h2c; *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c, oia.addr16[0], ia->addr16[0], 0), @@ -2354,14 +2477,14 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, #if INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(*ic, - oia.addr16[0], ia->addr16[0], u), - oia.addr16[1], ia->addr16[1], u), - oia.addr16[2], ia->addr16[2], u), - oia.addr16[3], ia->addr16[3], u), - oia.addr16[4], ia->addr16[4], u), - oia.addr16[5], ia->addr16[5], u), + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(*ic, + oia.addr16[0], ia->addr16[0], u), + oia.addr16[1], ia->addr16[1], u), + oia.addr16[2], ia->addr16[2], u), + oia.addr16[3], ia->addr16[3], u), + oia.addr16[4], ia->addr16[4], u), + oia.addr16[5], ia->addr16[5], u), oia.addr16[6], ia->addr16[6], u), oia.addr16[7], ia->addr16[7], u); break; @@ -2380,14 +2503,14 @@ pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa, #if INET6 case AF_INET6: *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( - pf_cksum_fixup(pf_cksum_fixup(*ic, - ooa.addr16[0], oa->addr16[0], u), - ooa.addr16[1], oa->addr16[1], u), - ooa.addr16[2], oa->addr16[2], u), - ooa.addr16[3], oa->addr16[3], u), - ooa.addr16[4], oa->addr16[4], u), - ooa.addr16[5], oa->addr16[5], u), + pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup( + pf_cksum_fixup(pf_cksum_fixup(*ic, + ooa.addr16[0], oa->addr16[0], u), + ooa.addr16[1], oa->addr16[1], u), + ooa.addr16[2], oa->addr16[2], u), + ooa.addr16[3], oa->addr16[3], u), + ooa.addr16[4], oa->addr16[4], u), + ooa.addr16[5], oa->addr16[5], u), ooa.addr16[6], oa->addr16[6], u), ooa.addr16[7], oa->addr16[7], u); break; @@ -2404,56 +2527,60 @@ static int pf_modulate_sack(pbuf_t *pbuf, int off, struct pf_pdesc *pd, struct tcphdr *th, struct pf_state_peer *dst) { - int hlen = (th->th_off << 2) - sizeof (*th), thoptlen = hlen; + int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen; u_int8_t opts[MAX_TCPOPTLEN], *opt = opts; int copyback = 0, i, olen; struct sackblk sack; -#define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) +#define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2) if (hlen < TCPOLEN_SACKLEN || - !pf_pull_hdr(pbuf, off + sizeof (*th), opts, hlen, NULL, NULL, pd->af)) - return (0); + !pf_pull_hdr(pbuf, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af)) { + return 0; + } while (hlen >= TCPOLEN_SACKLEN) { olen = opt[1]; switch (*opt) { - case TCPOPT_EOL: /* FALLTHROUGH */ + case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; break; case TCPOPT_SACK: - if (olen > hlen) + if (olen > hlen) { olen = hlen; + } if (olen >= TCPOLEN_SACKLEN) { for (i = 2; i + TCPOLEN_SACK <= olen; i += TCPOLEN_SACK) { - memcpy(&sack, &opt[i], sizeof (sack)); + memcpy(&sack, &opt[i], sizeof(sack)); pf_change_a(&sack.start, &th->th_sum, htonl(ntohl(sack.start) - dst->seqdiff), 0); pf_change_a(&sack.end, &th->th_sum, htonl(ntohl(sack.end) - dst->seqdiff), 0); - memcpy(&opt[i], &sack, sizeof (sack)); + memcpy(&opt[i], &sack, sizeof(sack)); } - copyback = off + sizeof (*th) + thoptlen; + copyback = off + sizeof(*th) + thoptlen; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: - if (olen < 2) + if (olen < 2) { olen = 2; + } hlen -= olen; opt += olen; } } if (copyback) { - if (pf_lazy_makewritable(pd, pbuf, copyback) == NULL) - return (-1); - pbuf_copy_back(pbuf, off + sizeof (*th), thoptlen, opts); + if (pf_lazy_makewritable(pd, pbuf, copyback) == NULL) { + return -1; + } + pbuf_copy_back(pbuf, off + sizeof(*th), thoptlen, opts); } - return (copyback); + return copyback; } /* @@ -2476,32 +2603,33 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, u_int16_t rtag, struct ether_header *eh, struct ifnet *ifp) { #pragma unused(eh, ifp) - struct mbuf *m; - int len, tlen; + struct mbuf *m; + int len, tlen; #if INET - struct ip *h = NULL; + struct ip *h = NULL; #endif /* INET */ #if INET6 - struct ip6_hdr *h6 = NULL; + struct ip6_hdr *h6 = NULL; #endif /* INET6 */ - struct tcphdr *th = NULL; - char *opt; - struct pf_mtag *pf_mtag; + struct tcphdr *th = NULL; + char *opt; + struct pf_mtag *pf_mtag; /* maximum segment size tcp option */ - tlen = sizeof (struct tcphdr); - if (mss) + tlen = sizeof(struct tcphdr); + if (mss) { tlen += 4; + } switch (af) { #if INET case AF_INET: - len = sizeof (struct ip) + tlen; + len = sizeof(struct ip) + tlen; break; #endif /* INET */ #if INET6 case AF_INET6: - len = sizeof (struct ip6_hdr) + tlen; + len = sizeof(struct ip6_hdr) + tlen; break; #endif /* INET6 */ default: @@ -2511,18 +2639,22 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, /* create outgoing mbuf */ m = m_gethdr(M_DONTWAIT, MT_HEADER); - if (m == NULL) + if (m == NULL) { return; + } - if ((pf_mtag = pf_get_mtag(m)) == NULL) + if ((pf_mtag = pf_get_mtag(m)) == NULL) { return; + } - if (tag) + if (tag) { pf_mtag->pftag_flags |= PF_TAG_GENERATED; + } pf_mtag->pftag_tag = rtag; - if (r != NULL && PF_RTABLEID_IS_VALID(r->rtableid)) + if (r != NULL && PF_RTABLEID_IS_VALID(r->rtableid)) { pf_mtag->pftag_rtableid = r->rtableid; + } #if PF_ECN /* add hints for ecn */ @@ -2562,7 +2694,7 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, h->ip_src.s_addr = saddr->v4addr.s_addr; h->ip_dst.s_addr = daddr->v4addr.s_addr; - th = (struct tcphdr *)(void *)((caddr_t)h + sizeof (struct ip)); + th = (struct tcphdr *)(void *)((caddr_t)h + sizeof(struct ip)); break; #endif /* INET */ #if INET6 @@ -2572,11 +2704,11 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, /* IP header fields included in the TCP checksum */ h6->ip6_nxt = IPPROTO_TCP; h6->ip6_plen = htons(tlen); - memcpy(&h6->ip6_src, &saddr->v6addr, sizeof (struct in6_addr)); - memcpy(&h6->ip6_dst, &daddr->v6addr, sizeof (struct in6_addr)); + memcpy(&h6->ip6_src, &saddr->v6addr, sizeof(struct in6_addr)); + memcpy(&h6->ip6_dst, &daddr->v6addr, sizeof(struct in6_addr)); th = (struct tcphdr *)(void *) - ((caddr_t)h6 + sizeof (struct ip6_hdr)); + ((caddr_t)h6 + sizeof(struct ip6_hdr)); break; #endif /* INET6 */ } @@ -2610,7 +2742,7 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, /* Finish the IP header */ h->ip_v = 4; - h->ip_hl = sizeof (*h) >> 2; + h->ip_hl = sizeof(*h) >> 2; h->ip_tos = IPTOS_LOWDELAY; /* * ip_output() expects ip_len and ip_off to be in host order. @@ -2620,7 +2752,7 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, h->ip_ttl = ttl ? ttl : ip_defttl; h->ip_sum = 0; - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); ip_output(m, NULL, &ro, 0, NULL, NULL); ROUTE_RELEASE(&ro); break; @@ -2632,12 +2764,12 @@ pf_send_tcp(const struct pf_rule *r, sa_family_t af, /* TCP checksum */ th->th_sum = in6_cksum(m, IPPROTO_TCP, - sizeof (struct ip6_hdr), tlen); + sizeof(struct ip6_hdr), tlen); h6->ip6_vfc |= IPV6_VERSION; h6->ip6_hlim = IPV6_DEFHLIM; - bzero(&ro6, sizeof (ro6)); + bzero(&ro6, sizeof(ro6)); ip6_output(m, NULL, &ro6, 0, NULL, NULL, NULL); ROUTE_RELEASE(&ro6); break; @@ -2650,20 +2782,23 @@ static void pf_send_icmp(pbuf_t *pbuf, u_int8_t type, u_int8_t code, sa_family_t af, struct pf_rule *r) { - struct mbuf *m0; - struct pf_mtag *pf_mtag; + struct mbuf *m0; + struct pf_mtag *pf_mtag; m0 = pbuf_clone_to_mbuf(pbuf); - if (m0 == NULL) + if (m0 == NULL) { return; + } - if ((pf_mtag = pf_get_mtag(m0)) == NULL) + if ((pf_mtag = pf_get_mtag(m0)) == NULL) { return; + } pf_mtag->pftag_flags |= PF_TAG_GENERATED; - if (PF_RTABLEID_IS_VALID(r->rtableid)) + if (PF_RTABLEID_IS_VALID(r->rtableid)) { pf_mtag->pftag_rtableid = r->rtableid; + } #if PF_ECN /* add hints for ecn */ @@ -2709,40 +2844,44 @@ int pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m, struct pf_addr *b, sa_family_t af) { - int match = 0; + int match = 0; switch (af) { #if INET case AF_INET: if ((a->addr32[0] & m->addr32[0]) == - (b->addr32[0] & m->addr32[0])) + (b->addr32[0] & m->addr32[0])) { match++; + } break; #endif /* INET */ #if INET6 case AF_INET6: if (((a->addr32[0] & m->addr32[0]) == - (b->addr32[0] & m->addr32[0])) && + (b->addr32[0] & m->addr32[0])) && ((a->addr32[1] & m->addr32[1]) == - (b->addr32[1] & m->addr32[1])) && + (b->addr32[1] & m->addr32[1])) && ((a->addr32[2] & m->addr32[2]) == - (b->addr32[2] & m->addr32[2])) && + (b->addr32[2] & m->addr32[2])) && ((a->addr32[3] & m->addr32[3]) == - (b->addr32[3] & m->addr32[3]))) + (b->addr32[3] & m->addr32[3]))) { match++; + } break; #endif /* INET6 */ } if (match) { - if (n) - return (0); - else - return (1); + if (n) { + return 0; + } else { + return 1; + } } else { - if (n) - return (1); - else - return (0); + if (n) { + return 1; + } else { + return 0; + } } } @@ -2757,31 +2896,36 @@ pf_match_addr_range(struct pf_addr *b, struct pf_addr *e, #if INET case AF_INET: if ((a->addr32[0] < b->addr32[0]) || - (a->addr32[0] > e->addr32[0])) - return (0); + (a->addr32[0] > e->addr32[0])) { + return 0; + } break; #endif /* INET */ #if INET6 case AF_INET6: { - int i; + int i; /* check a >= b */ - for (i = 0; i < 4; ++i) - if (a->addr32[i] > b->addr32[i]) + for (i = 0; i < 4; ++i) { + if (a->addr32[i] > b->addr32[i]) { break; - else if (a->addr32[i] < b->addr32[i]) - return (0); + } else if (a->addr32[i] < b->addr32[i]) { + return 0; + } + } /* check a <= e */ - for (i = 0; i < 4; ++i) - if (a->addr32[i] < e->addr32[i]) + for (i = 0; i < 4; ++i) { + if (a->addr32[i] < e->addr32[i]) { break; - else if (a->addr32[i] > e->addr32[i]) - return (0); + } else if (a->addr32[i] > e->addr32[i]) { + return 0; + } + } break; } #endif /* INET6 */ } - return (1); + return 1; } int @@ -2789,25 +2933,25 @@ pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p) { switch (op) { case PF_OP_IRG: - return ((p > a1) && (p < a2)); + return (p > a1) && (p < a2); case PF_OP_XRG: - return ((p < a1) || (p > a2)); + return (p < a1) || (p > a2); case PF_OP_RRG: - return ((p >= a1) && (p <= a2)); + return (p >= a1) && (p <= a2); case PF_OP_EQ: - return (p == a1); + return p == a1; case PF_OP_NE: - return (p != a1); + return p != a1; case PF_OP_LT: - return (p < a1); + return p < a1; case PF_OP_LE: - return (p <= a1); + return p <= a1; case PF_OP_GT: - return (p > a1); + return p > a1; case PF_OP_GE: - return (p >= a1); + return p >= a1; } - return (0); /* never reached */ + return 0; /* never reached */ } int @@ -2818,7 +2962,7 @@ pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p) NTOHS(a2); NTOHS(p); #endif - return (pf_match(op, a1, a2, p)); + return pf_match(op, a1, a2, p); } int @@ -2830,8 +2974,9 @@ pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx, if (sx) { switch (proto) { case IPPROTO_GRE: - if (proto_variant == PF_GRE_PPTP_VARIANT) + if (proto_variant == PF_GRE_PPTP_VARIANT) { d = (rx->call_id == sx->call_id); + } break; case IPPROTO_ESP: @@ -2842,10 +2987,11 @@ pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx, case IPPROTO_UDP: case IPPROTO_ICMP: case IPPROTO_ICMPV6: - if (rx->range.op) + if (rx->range.op) { d = pf_match_port(rx->range.op, rx->range.port[0], rx->range.port[1], sx->port); + } break; default: @@ -2853,34 +2999,37 @@ pf_match_xport(u_int8_t proto, u_int8_t proto_variant, union pf_rule_xport *rx, } } - return (d); + return d; } int pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u) { - if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) - return (0); - return (pf_match(op, a1, a2, u)); + if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE) { + return 0; + } + return pf_match(op, a1, a2, u); } int pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g) { - if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) - return (0); - return (pf_match(op, a1, a2, g)); + if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE) { + return 0; + } + return pf_match(op, a1, a2, g); } static int pf_match_tag(struct pf_rule *r, struct pf_mtag *pf_mtag, int *tag) { - if (*tag == -1) + if (*tag == -1) { *tag = pf_mtag->pftag_tag; + } - return ((!r->match_tag_not && r->match_tag == *tag) || - (r->match_tag_not && r->match_tag != *tag)); + return (!r->match_tag_not && r->match_tag == *tag) || + (r->match_tag_not && r->match_tag != *tag); } int @@ -2888,16 +3037,20 @@ pf_tag_packet(pbuf_t *pbuf, struct pf_mtag *pf_mtag, int tag, unsigned int rtableid, struct pf_pdesc *pd) { if (tag <= 0 && !PF_RTABLEID_IS_VALID(rtableid) && - (pd == NULL || !(pd->pktflags & PKTF_FLOW_ID))) - return (0); + (pd == NULL || !(pd->pktflags & PKTF_FLOW_ID))) { + return 0; + } - if (pf_mtag == NULL && (pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL) - return (1); + if (pf_mtag == NULL && (pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL) { + return 1; + } - if (tag > 0) + if (tag > 0) { pf_mtag->pftag_tag = tag; - if (PF_RTABLEID_IS_VALID(rtableid)) + } + if (PF_RTABLEID_IS_VALID(rtableid)) { pf_mtag->pftag_rtableid = rtableid; + } if (pd != NULL && (pd->pktflags & PKTF_FLOW_ID)) { *pbuf->pb_flowsrc = pd->flowsrc; *pbuf->pb_flowid = pd->flowhash; @@ -2905,25 +3058,27 @@ pf_tag_packet(pbuf_t *pbuf, struct pf_mtag *pf_mtag, int tag, *pbuf->pb_proto = pd->proto; } - return (0); + return 0; } void pf_step_into_anchor(int *depth, struct pf_ruleset **rs, int n, - struct pf_rule **r, struct pf_rule **a, int *match) + struct pf_rule **r, struct pf_rule **a, int *match) { - struct pf_anchor_stackframe *f; + struct pf_anchor_stackframe *f; (*r)->anchor->match = 0; - if (match) + if (match) { *match = 0; - if (*depth >= (int)sizeof (pf_anchor_stack) / - (int)sizeof (pf_anchor_stack[0])) { + } + if (*depth >= (int)sizeof(pf_anchor_stack) / + (int)sizeof(pf_anchor_stack[0])) { printf("pf_step_into_anchor: stack overflow\n"); *r = TAILQ_NEXT(*r, entries); return; - } else if (*depth == 0 && a != NULL) + } else if (*depth == 0 && a != NULL) { *a = *r; + } f = pf_anchor_stack + (*depth)++; f->rs = *rs; f->r = *r; @@ -2947,40 +3102,45 @@ int pf_step_out_of_anchor(int *depth, struct pf_ruleset **rs, int n, struct pf_rule **r, struct pf_rule **a, int *match) { - struct pf_anchor_stackframe *f; + struct pf_anchor_stackframe *f; int quick = 0; do { - if (*depth <= 0) + if (*depth <= 0) { break; + } f = pf_anchor_stack + *depth - 1; if (f->parent != NULL && f->child != NULL) { if (f->child->match || (match != NULL && *match)) { f->r->anchor->match = 1; - if (match) + if (match) { *match = 0; + } } f->child = RB_NEXT(pf_anchor_node, f->parent, f->child); if (f->child != NULL) { *rs = &f->child->ruleset; *r = TAILQ_FIRST((*rs)->rules[n].active.ptr); - if (*r == NULL) + if (*r == NULL) { continue; - else + } else { break; + } } } (*depth)--; - if (*depth == 0 && a != NULL) + if (*depth == 0 && a != NULL) { *a = NULL; + } *rs = f->rs; - if (f->r->anchor->match || (match != NULL && *match)) + if (f->r->anchor->match || (match != NULL && *match)) { quick = f->r->quick; + } *r = TAILQ_NEXT(f->r, entries); } while (*r == NULL); - return (quick); + return quick; } #if INET6 @@ -3026,31 +3186,34 @@ pf_addr_inc(struct pf_addr *addr, sa_family_t af) addr->addr32[1] = 0; addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1); - } else + } else { addr->addr32[1] = htonl(ntohl(addr->addr32[1]) + 1); - } else + } + } else { addr->addr32[2] = htonl(ntohl(addr->addr32[2]) + 1); - } else + } + } else { addr->addr32[3] = htonl(ntohl(addr->addr32[3]) + 1); + } break; } } #endif /* INET6 */ #define mix(a, b, c) \ - do { \ - a -= b; a -= c; a ^= (c >> 13); \ - b -= c; b -= a; b ^= (a << 8); \ - c -= a; c -= b; c ^= (b >> 13); \ - a -= b; a -= c; a ^= (c >> 12); \ - b -= c; b -= a; b ^= (a << 16); \ - c -= a; c -= b; c ^= (b >> 5); \ - a -= b; a -= c; a ^= (c >> 3); \ - b -= c; b -= a; b ^= (a << 10); \ - c -= a; c -= b; c ^= (b >> 15); \ + do { \ + a -= b; a -= c; a ^= (c >> 13); \ + b -= c; b -= a; b ^= (a << 8); \ + c -= a; c -= b; c ^= (b >> 13); \ + a -= b; a -= c; a ^= (c >> 12); \ + b -= c; b -= a; b ^= (a << 16); \ + c -= a; c -= b; c ^= (b >> 5); \ + a -= b; a -= c; a ^= (c >> 3); \ + b -= c; b -= a; b ^= (a << 10); \ + c -= a; c -= b; c ^= (b >> 15); \ } while (0) /* @@ -3060,7 +3223,7 @@ static void pf_hash(struct pf_addr *inaddr, struct pf_addr *hash, struct pf_poolhashkey *key, sa_family_t af) { - u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; + u_int32_t a = 0x9e3779b9, b = 0x9e3779b9, c = key->key32[0]; switch (af) { #if INET @@ -3101,22 +3264,23 @@ static int pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, struct pf_addr *naddr, struct pf_addr *init_addr, struct pf_src_node **sn) { - unsigned char hash[16]; - struct pf_pool *rpool = &r->rpool; - struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; - struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; - struct pf_pooladdr *acur = rpool->cur; - struct pf_src_node k; + unsigned char hash[16]; + struct pf_pool *rpool = &r->rpool; + struct pf_addr *raddr = &rpool->cur->addr.v.a.addr; + struct pf_addr *rmask = &rpool->cur->addr.v.a.mask; + struct pf_pooladdr *acur = rpool->cur; + struct pf_src_node k; if (*sn == NULL && r->rpool.opts & PF_POOL_STICKYADDR && (r->rpool.opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { k.af = af; PF_ACPY(&k.addr, saddr, af); if (r->rule_flag & PFRULE_RULESRCTRACK || - r->rpool.opts & PF_POOL_STICKYADDR) + r->rpool.opts & PF_POOL_STICKYADDR) { k.rule.ptr = r; - else + } else { k.rule.ptr = NULL; + } pf_status.scounters[SCNT_SRC_NODE_SEARCH]++; *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k); if (*sn != NULL && !PF_AZERO(&(*sn)->raddr, rpool->af)) { @@ -3128,22 +3292,25 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, pf_print_host(naddr, 0, rpool->af); printf("\n"); } - return (0); + return 0; } } - if (rpool->cur->addr.type == PF_ADDR_NOROUTE) - return (1); + if (rpool->cur->addr.type == PF_ADDR_NOROUTE) { + return 1; + } if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { - if (rpool->cur->addr.p.dyn == NULL) - return (1); + if (rpool->cur->addr.p.dyn == NULL) { + return 1; + } switch (rpool->af) { #if INET case AF_INET: if (rpool->cur->addr.p.dyn->pfid_acnt4 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != - PF_POOL_ROUNDROBIN) - return (1); + PF_POOL_ROUNDROBIN) { + return 1; + } raddr = &rpool->cur->addr.p.dyn->pfid_addr4; rmask = &rpool->cur->addr.p.dyn->pfid_mask4; break; @@ -3152,16 +3319,18 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, case AF_INET6: if (rpool->cur->addr.p.dyn->pfid_acnt6 < 1 && (rpool->opts & PF_POOL_TYPEMASK) != - PF_POOL_ROUNDROBIN) - return (1); + PF_POOL_ROUNDROBIN) { + return 1; + } raddr = &rpool->cur->addr.p.dyn->pfid_addr6; rmask = &rpool->cur->addr.p.dyn->pfid_mask6; break; #endif /* INET6 */ } } else if (rpool->cur->addr.type == PF_ADDR_TABLE) { - if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) - return (1); /* unsupported */ + if ((rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_ROUNDROBIN) { + return 1; /* unsupported */ + } } else { raddr = &rpool->cur->addr.v.a.addr; rmask = &rpool->cur->addr.v.a.mask; @@ -3185,35 +3354,38 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, #endif /* INET */ #if INET6 case AF_INET6: - if (rmask->addr32[3] != 0xffffffff) + if (rmask->addr32[3] != 0xffffffff) { rpool->counter.addr32[3] = RandomULong(); - else + } else { break; - if (rmask->addr32[2] != 0xffffffff) + } + if (rmask->addr32[2] != 0xffffffff) { rpool->counter.addr32[2] = RandomULong(); - else + } else { break; - if (rmask->addr32[1] != 0xffffffff) + } + if (rmask->addr32[1] != 0xffffffff) { rpool->counter.addr32[1] = RandomULong(); - else + } else { break; - if (rmask->addr32[0] != 0xffffffff) + } + if (rmask->addr32[0] != 0xffffffff) { rpool->counter.addr32[0] = RandomULong(); + } break; #endif /* INET6 */ } PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, - rpool->af); + rpool->af); PF_ACPY(init_addr, naddr, rpool->af); - } else { PF_AINC(&rpool->counter, rpool->af); PF_POOLMASK(naddr, raddr, rmask, &rpool->counter, - rpool->af); + rpool->af); } break; case PF_POOL_SRCHASH: @@ -3228,21 +3400,25 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, if (rpool->cur->addr.type == PF_ADDR_TABLE) { if (!pfr_pool_get(rpool->cur->addr.p.tbl, &rpool->tblidx, &rpool->counter, - &raddr, &rmask, rpool->af)) + &raddr, &rmask, rpool->af)) { goto get_addr; + } } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { if (rpool->cur->addr.p.dyn != NULL && !pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, &rpool->tblidx, &rpool->counter, - &raddr, &rmask, af)) + &raddr, &rmask, af)) { goto get_addr; + } } else if (pf_match_addr(0, raddr, rmask, &rpool->counter, - rpool->af)) + rpool->af)) { goto get_addr; + } - try_next: - if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) +try_next: + if ((rpool->cur = TAILQ_NEXT(rpool->cur, entries)) == NULL) { rpool->cur = TAILQ_FIRST(&rpool->list); + } if (rpool->cur->addr.type == PF_ADDR_TABLE) { rpool->tblidx = -1; if (pfr_pool_get(rpool->cur->addr.p.tbl, @@ -3250,22 +3426,25 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, &raddr, &rmask, rpool->af)) { /* table contains no address of type * 'rpool->af' */ - if (rpool->cur != acur) + if (rpool->cur != acur) { goto try_next; - return (1); + } + return 1; } } else if (rpool->cur->addr.type == PF_ADDR_DYNIFTL) { rpool->tblidx = -1; - if (rpool->cur->addr.p.dyn == NULL) - return (1); + if (rpool->cur->addr.p.dyn == NULL) { + return 1; + } if (pfr_pool_get(rpool->cur->addr.p.dyn->pfid_kt, &rpool->tblidx, &rpool->counter, &raddr, &rmask, rpool->af)) { /* table contains no address of type * 'rpool->af' */ - if (rpool->cur != acur) + if (rpool->cur != acur) { goto try_next; - return (1); + } + return 1; } } else { raddr = &rpool->cur->addr.v.a.addr; @@ -3273,15 +3452,17 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, PF_ACPY(&rpool->counter, raddr, rpool->af); } - get_addr: +get_addr: PF_ACPY(naddr, &rpool->counter, rpool->af); - if (init_addr != NULL && PF_AZERO(init_addr, rpool->af)) + if (init_addr != NULL && PF_AZERO(init_addr, rpool->af)) { PF_ACPY(init_addr, naddr, rpool->af); + } PF_AINC(&rpool->counter, rpool->af); break; } - if (*sn != NULL) + if (*sn != NULL) { PF_ACPY(&(*sn)->raddr, naddr, rpool->af); + } if (pf_status.debug >= PF_DEBUG_MISC && (rpool->opts & PF_POOL_TYPEMASK) != PF_POOL_NONE) { @@ -3290,7 +3471,7 @@ pf_map_addr(sa_family_t af, struct pf_rule *r, struct pf_addr *saddr, printf("\n"); } - return (0); + return 0; } static int @@ -3301,33 +3482,33 @@ pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r, ) { #pragma unused(kif) - struct pf_state_key_cmp key; - struct pf_addr init_addr; + struct pf_state_key_cmp key; + struct pf_addr init_addr; unsigned int cut; sa_family_t af = pd->af; u_int8_t proto = pd->proto; unsigned int low = r->rpool.proxy_port[0]; unsigned int high = r->rpool.proxy_port[1]; - bzero(&init_addr, sizeof (init_addr)); - if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) - return (1); + bzero(&init_addr, sizeof(init_addr)); + if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) { + return 1; + } if (proto == IPPROTO_ICMP) { low = 1; high = 65535; } - if (!nxport) - return (0); /* No output necessary. */ - + if (!nxport) { + return 0; /* No output necessary. */ + } /*--- Special mapping rules for UDP ---*/ if (proto == IPPROTO_UDP) { - /*--- Never float IKE source port ---*/ if (ntohs(sxport->port) == PF_IKE_PORT) { nxport->port = sxport->port; - return (0); + return 0; } /*--- Apply exterior mapping options ---*/ @@ -3336,23 +3517,29 @@ pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r, TAILQ_FOREACH(s, &state_list, entry_list) { struct pf_state_key *sk = s->state_key; - if (!sk) + if (!sk) { continue; - if (s->nat_rule.ptr != r) + } + if (s->nat_rule.ptr != r) { continue; + } if (sk->proto != IPPROTO_UDP || - sk->af_lan != af) + sk->af_lan != af) { continue; - if (sk->lan.xport.port != sxport->port) + } + if (sk->lan.xport.port != sxport->port) { continue; - if (PF_ANEQ(&sk->lan.addr, saddr, af)) + } + if (PF_ANEQ(&sk->lan.addr, saddr, af)) { continue; + } if (r->extmap < PF_EXTMAP_EI && - PF_ANEQ(&sk->ext_lan.addr, daddr, af)) + PF_ANEQ(&sk->ext_lan.addr, daddr, af)) { continue; + } nxport->port = sk->gwy.xport.port; - return (0); + return 0; } } } else if (proto == IPPROTO_TCP) { @@ -3364,18 +3551,23 @@ pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r, */ TAILQ_FOREACH(s, &state_list, entry_list) { struct pf_state_key* sk = s->state_key; - if (!sk) + if (!sk) { continue; - if (s->nat_rule.ptr != r) + } + if (s->nat_rule.ptr != r) { continue; - if (sk->proto != IPPROTO_TCP || sk->af_lan != af) - continue; - if (sk->lan.xport.port != sxport->port) + } + if (sk->proto != IPPROTO_TCP || sk->af_lan != af) { + continue; + } + if (sk->lan.xport.port != sxport->port) { continue; - if (!(PF_AEQ(&sk->lan.addr, saddr, af))) + } + if (!(PF_AEQ(&sk->lan.addr, saddr, af))) { continue; + } nxport->port = sk->gwy.xport.port; - return (0); + return 0; } } do { @@ -3384,43 +3576,46 @@ pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r, PF_ACPY(&key.ext_gwy.addr, daddr, key.af_gwy); PF_ACPY(&key.gwy.addr, naddr, key.af_gwy); switch (proto) { - case IPPROTO_UDP: - key.proto_variant = r->extfilter; - break; - default: - key.proto_variant = 0; - break; + case IPPROTO_UDP: + key.proto_variant = r->extfilter; + break; + default: + key.proto_variant = 0; + break; } - if (dxport) + if (dxport) { key.ext_gwy.xport = *dxport; - else + } else { memset(&key.ext_gwy.xport, 0, - sizeof (key.ext_gwy.xport)); + sizeof(key.ext_gwy.xport)); + } /* * port search; start random, step; * similar 2 portloop in in_pcbbind */ if (!(proto == IPPROTO_TCP || proto == IPPROTO_UDP || proto == IPPROTO_ICMP)) { - if (dxport) + if (dxport) { key.gwy.xport = *dxport; - else + } else { memset(&key.gwy.xport, 0, - sizeof (key.gwy.xport)); - if (pf_find_state_all(&key, PF_IN, NULL) == NULL) - return (0); + sizeof(key.gwy.xport)); + } + if (pf_find_state_all(&key, PF_IN, NULL) == NULL) { + return 0; + } } else if (low == 0 && high == 0) { key.gwy.xport = *nxport; if (pf_find_state_all(&key, PF_IN, NULL) == NULL ) { - return (0); + return 0; } } else if (low == high) { key.gwy.xport.port = htons(low); if (pf_find_state_all(&key, PF_IN, NULL) == NULL ) { nxport->port = htons(low); - return (0); + return 0; } } else { unsigned int tmp; @@ -3435,17 +3630,17 @@ pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r, for (tmp = cut; tmp <= high; ++(tmp)) { key.gwy.xport.port = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL - ) { + ) { nxport->port = htons(tmp); - return (0); + return 0; } } for (tmp = cut - 1; tmp >= low; --(tmp)) { key.gwy.xport.port = htons(tmp); if (pf_find_state_all(&key, PF_IN, NULL) == NULL - ) { + ) { nxport->port = htons(tmp); - return (0); + return 0; } } } @@ -3453,18 +3648,19 @@ pf_get_sport(struct pf_pdesc *pd, struct pfi_kif *kif, struct pf_rule *r, switch (r->rpool.opts & PF_POOL_TYPEMASK) { case PF_POOL_RANDOM: case PF_POOL_ROUNDROBIN: - if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) - return (1); + if (pf_map_addr(af, r, saddr, naddr, &init_addr, sn)) { + return 1; + } break; case PF_POOL_NONE: case PF_POOL_SRCHASH: case PF_POOL_BITMASK: default: - return (1); + return 1; } } while (!PF_AEQ(&init_addr, naddr, af)); - return (1); /* none available */ + return 1; /* none available */ } static struct pf_rule * @@ -3473,23 +3669,24 @@ pf_match_translation(struct pf_pdesc *pd, pbuf_t *pbuf, int off, union pf_state_xport *sxport, struct pf_addr *daddr, union pf_state_xport *dxport, int rs_num) { - struct pf_rule *r, *rm = NULL; - struct pf_ruleset *ruleset = NULL; - int tag = -1; - unsigned int rtableid = IFSCOPE_NONE; - int asd = 0; + struct pf_rule *r, *rm = NULL; + struct pf_ruleset *ruleset = NULL; + int tag = -1; + unsigned int rtableid = IFSCOPE_NONE; + int asd = 0; r = TAILQ_FIRST(pf_main_ruleset.rules[rs_num].active.ptr); while (r && rm == NULL) { - struct pf_rule_addr *src = NULL, *dst = NULL; - struct pf_addr_wrap *xdst = NULL; - struct pf_addr_wrap *xsrc = NULL; - union pf_rule_xport rdrxport; + struct pf_rule_addr *src = NULL, *dst = NULL; + struct pf_addr_wrap *xdst = NULL; + struct pf_addr_wrap *xsrc = NULL; + union pf_rule_xport rdrxport; if (r->action == PF_BINAT && direction == PF_IN) { src = &r->dst; - if (r->rpool.cur != NULL) + if (r->rpool.cur != NULL) { xdst = &r->rpool.cur->addr; + } } else if (r->action == PF_RDR && direction == PF_OUT) { dst = &r->src; src = &r->dst; @@ -3505,64 +3702,70 @@ pf_match_translation(struct pf_pdesc *pd, pbuf_t *pbuf, int off, } r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != direction) + } else if (r->direction && r->direction != direction) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != pd->af) + } else if (r->af && r->af != pd->af) { r = r->skip[PF_SKIP_AF].ptr; - else if (r->proto && r->proto != pd->proto) + } else if (r->proto && r->proto != pd->proto) { r = r->skip[PF_SKIP_PROTO].ptr; - else if (xsrc && PF_MISMATCHAW(xsrc, saddr, pd->af, 0, NULL)) + } else if (xsrc && PF_MISMATCHAW(xsrc, saddr, pd->af, 0, NULL)) { r = TAILQ_NEXT(r, entries); - else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af, - src->neg, kif)) + } else if (!xsrc && PF_MISMATCHAW(&src->addr, saddr, pd->af, + src->neg, kif)) { r = TAILQ_NEXT(r, entries); - else if (xsrc && (!rdrxport.range.port[0] || + } else if (xsrc && (!rdrxport.range.port[0] || !pf_match_xport(r->proto, r->proto_variant, &rdrxport, - sxport))) + sxport))) { r = TAILQ_NEXT(r, entries); - else if (!xsrc && !pf_match_xport(r->proto, - r->proto_variant, &src->xport, sxport)) + } else if (!xsrc && !pf_match_xport(r->proto, + r->proto_variant, &src->xport, sxport)) { r = r->skip[src == &r->src ? PF_SKIP_SRC_PORT : PF_SKIP_DST_PORT].ptr; - else if (dst != NULL && - PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) + } else if (dst != NULL && + PF_MISMATCHAW(&dst->addr, daddr, pd->af, dst->neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; - else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, - 0, NULL)) + } else if (xdst != NULL && PF_MISMATCHAW(xdst, daddr, pd->af, + 0, NULL)) { r = TAILQ_NEXT(r, entries); - else if (dst && !pf_match_xport(r->proto, r->proto_variant, - &dst->xport, dxport)) + } else if (dst && !pf_match_xport(r->proto, r->proto_variant, + &dst->xport, dxport)) { r = r->skip[PF_SKIP_DST_PORT].ptr; - else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) + } else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) { r = TAILQ_NEXT(r, entries); - else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != + } else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != IPPROTO_TCP || !pf_osfp_match(pf_osfp_fingerprint(pd, pbuf, - off, pd->hdr.tcp), r->os_fingerprint))) + off, pd->hdr.tcp), r->os_fingerprint))) { r = TAILQ_NEXT(r, entries); - else { - if (r->tag) + } else { + if (r->tag) { tag = r->tag; - if (PF_RTABLEID_IS_VALID(r->rtableid)) + } + if (PF_RTABLEID_IS_VALID(r->rtableid)) { rtableid = r->rtableid; + } if (r->anchor == NULL) { rm = r; - } else + } else { pf_step_into_anchor(&asd, &ruleset, rs_num, &r, NULL, NULL); + } } - if (r == NULL) + if (r == NULL) { pf_step_out_of_anchor(&asd, &ruleset, rs_num, &r, NULL, NULL); + } + } + if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, NULL)) { + return NULL; } - if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, NULL)) - return (NULL); if (rm != NULL && (rm->action == PF_NONAT || rm->action == PF_NORDR || rm->action == PF_NOBINAT || - rm->action == PF_NONAT64)) - return (NULL); - return (rm); + rm->action == PF_NONAT64)) { + return NULL; + } + return rm; } /* @@ -3589,24 +3792,27 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, union pf_state_xport *dxport, union pf_state_xport *nsxport ) { - struct pf_rule *r = NULL; + struct pf_rule *r = NULL; pd->naf = pd->af; if (direction == PF_OUT) { r = pf_match_translation(pd, pbuf, off, direction, kif, saddr, sxport, daddr, dxport, PF_RULESET_BINAT); - if (r == NULL) + if (r == NULL) { r = pf_match_translation(pd, pbuf, off, direction, kif, saddr, sxport, daddr, dxport, PF_RULESET_RDR); - if (r == NULL) + } + if (r == NULL) { r = pf_match_translation(pd, pbuf, off, direction, kif, saddr, sxport, daddr, dxport, PF_RULESET_NAT); + } } else { r = pf_match_translation(pd, pbuf, off, direction, kif, saddr, sxport, daddr, dxport, PF_RULESET_RDR); - if (r == NULL) + if (r == NULL) { r = pf_match_translation(pd, pbuf, off, direction, kif, saddr, sxport, daddr, dxport, PF_RULESET_BINAT); + } } if (r != NULL) { @@ -3621,7 +3827,7 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, case PF_NONAT64: case PF_NOBINAT: case PF_NORDR: - return (NULL); + return NULL; case PF_NAT: case PF_NAT64: /* @@ -3631,19 +3837,19 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, * packet generated by local entity using synthesized * IPv6 address. */ - if ((r->action == PF_NAT64) && (direction == PF_OUT)) - return (NULL); + if ((r->action == PF_NAT64) && (direction == PF_OUT)) { + return NULL; + } if (pf_get_sport(pd, kif, r, saddr, sxport, daddr, dxport, nsaddr, nsxport, sn - )) - { + )) { DPFPRINTF(PF_DEBUG_MISC, ("pf: NAT proxy port allocation " "(%u-%u) failed\n", r->rpool.proxy_port[0], r->rpool.proxy_port[1])); - return (NULL); + return NULL; } /* * For NAT64 the destination IPv4 address is derived @@ -3659,14 +3865,16 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, case PF_OUT: if (r->rpool.cur->addr.type == PF_ADDR_DYNIFTL) { - if (r->rpool.cur->addr.p.dyn == NULL) - return (NULL); + if (r->rpool.cur->addr.p.dyn == NULL) { + return NULL; + } switch (pd->af) { #if INET case AF_INET: if (r->rpool.cur->addr.p.dyn-> - pfid_acnt4 < 1) - return (NULL); + pfid_acnt4 < 1) { + return NULL; + } PF_POOLMASK(nsaddr, &r->rpool.cur->addr.p.dyn-> pfid_addr4, @@ -3678,8 +3886,9 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, #if INET6 case AF_INET6: if (r->rpool.cur->addr.p.dyn-> - pfid_acnt6 < 1) - return (NULL); + pfid_acnt6 < 1) { + return NULL; + } PF_POOLMASK(nsaddr, &r->rpool.cur->addr.p.dyn-> pfid_addr6, @@ -3698,14 +3907,16 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, break; case PF_IN: if (r->src.addr.type == PF_ADDR_DYNIFTL) { - if (r->src.addr.p.dyn == NULL) - return (NULL); + if (r->src.addr.p.dyn == NULL) { + return NULL; + } switch (pd->af) { #if INET case AF_INET: if (r->src.addr.p.dyn-> - pfid_acnt4 < 1) - return (NULL); + pfid_acnt4 < 1) { + return NULL; + } PF_POOLMASK(ndaddr, &r->src.addr.p.dyn-> pfid_addr4, @@ -3717,8 +3928,9 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, #if INET6 case AF_INET6: if (r->src.addr.p.dyn-> - pfid_acnt6 < 1) - return (NULL); + pfid_acnt6 < 1) { + return NULL; + } PF_POOLMASK(ndaddr, &r->src.addr.p.dyn-> pfid_addr6, @@ -3728,11 +3940,12 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, break; #endif /* INET6 */ } - } else + } else { PF_POOLMASK(ndaddr, &r->src.addr.v.a.addr, &r->src.addr.v.a.mask, daddr, pd->af); + } break; } break; @@ -3740,14 +3953,16 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, switch (direction) { case PF_OUT: if (r->dst.addr.type == PF_ADDR_DYNIFTL) { - if (r->dst.addr.p.dyn == NULL) - return (NULL); + if (r->dst.addr.p.dyn == NULL) { + return NULL; + } switch (pd->af) { #if INET case AF_INET: if (r->dst.addr.p.dyn-> - pfid_acnt4 < 1) - return (NULL); + pfid_acnt4 < 1) { + return NULL; + } PF_POOLMASK(nsaddr, &r->dst.addr.p.dyn-> pfid_addr4, @@ -3759,8 +3974,9 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, #if INET6 case AF_INET6: if (r->dst.addr.p.dyn-> - pfid_acnt6 < 1) - return (NULL); + pfid_acnt6 < 1) { + return NULL; + } PF_POOLMASK(nsaddr, &r->dst.addr.p.dyn-> pfid_addr6, @@ -3776,23 +3992,26 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, &r->dst.addr.v.a.mask, daddr, pd->af); } - if (nsxport && r->dst.xport.range.port[0]) + if (nsxport && r->dst.xport.range.port[0]) { nsxport->port = r->dst.xport.range.port[0]; + } break; case PF_IN: if (pf_map_addr(pd->af, r, saddr, - ndaddr, NULL, sn)) - return (NULL); + ndaddr, NULL, sn)) { + return NULL; + } if ((r->rpool.opts & PF_POOL_TYPEMASK) == - PF_POOL_BITMASK) + PF_POOL_BITMASK) { PF_POOLMASK(ndaddr, ndaddr, &r->rpool.cur->addr.v.a.mask, daddr, pd->af); + } if (nsxport && dxport) { if (r->rpool.proxy_port[1]) { - u_int32_t tmp_nport; + u_int32_t tmp_nport; tmp_nport = ((ntohs(dxport->port) - @@ -3803,8 +4022,9 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, 1)) + r->rpool.proxy_port[0]; /* wrap around if necessary */ - if (tmp_nport > 65535) + if (tmp_nport > 65535) { tmp_nport -= 65535; + } nsxport->port = htons((u_int16_t)tmp_nport); } else if (r->rpool.proxy_port[0]) { @@ -3817,50 +4037,53 @@ pf_get_translation_aux(struct pf_pdesc *pd, pbuf_t *pbuf, int off, break; } default: - return (NULL); + return NULL; } } - return (r); + return r; } int pf_socket_lookup(int direction, struct pf_pdesc *pd) { - struct pf_addr *saddr, *daddr; - u_int16_t sport, dport; - struct inpcbinfo *pi; - int inp = 0; + struct pf_addr *saddr, *daddr; + u_int16_t sport, dport; + struct inpcbinfo *pi; + int inp = 0; - if (pd == NULL) - return (-1); + if (pd == NULL) { + return -1; + } pd->lookup.uid = UID_MAX; pd->lookup.gid = GID_MAX; pd->lookup.pid = NO_PID; switch (pd->proto) { case IPPROTO_TCP: - if (pd->hdr.tcp == NULL) - return (-1); + if (pd->hdr.tcp == NULL) { + return -1; + } sport = pd->hdr.tcp->th_sport; dport = pd->hdr.tcp->th_dport; pi = &tcbinfo; break; case IPPROTO_UDP: - if (pd->hdr.udp == NULL) - return (-1); + if (pd->hdr.udp == NULL) { + return -1; + } sport = pd->hdr.udp->uh_sport; dport = pd->hdr.udp->uh_dport; pi = &udbinfo; break; default: - return (-1); + return -1; } if (direction == PF_IN) { saddr = pd->src; daddr = pd->dst; } else { - u_int16_t p; + u_int16_t p; p = sport; sport = dport; @@ -3877,15 +4100,15 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) if (inp == 0) { struct in6_addr s6, d6; - memset(&s6, 0, sizeof (s6)); + memset(&s6, 0, sizeof(s6)); s6.s6_addr16[5] = htons(0xffff); memcpy(&s6.s6_addr32[3], &saddr->v4addr, - sizeof (saddr->v4addr)); + sizeof(saddr->v4addr)); - memset(&d6, 0, sizeof (d6)); + memset(&d6, 0, sizeof(d6)); d6.s6_addr16[5] = htons(0xffff); memcpy(&d6.s6_addr32[3], &daddr->v4addr, - sizeof (daddr->v4addr)); + sizeof(daddr->v4addr)); inp = in6_pcblookup_hash_exists(pi, &s6, sport, &d6, dport, 0, &pd->lookup.uid, &pd->lookup.gid, NULL); @@ -3896,8 +4119,9 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) inp = in6_pcblookup_hash_exists(pi, &s6, sport, &d6, dport, INPLOOKUP_WILDCARD, &pd->lookup.uid, &pd->lookup.gid, NULL); - if (inp == 0) - return (-1); + if (inp == 0) { + return -1; + } } } } @@ -3906,8 +4130,9 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) inp = in_pcblookup_hash_exists(pi, saddr->v4addr, sport, daddr->v4addr, dport, INPLOOKUP_WILDCARD, &pd->lookup.uid, &pd->lookup.gid, NULL); - if (inp == 0) - return (-1); + if (inp == 0) { + return -1; + } } #endif /* !INET6 */ break; @@ -3920,34 +4145,37 @@ pf_socket_lookup(int direction, struct pf_pdesc *pd) inp = in6_pcblookup_hash_exists(pi, &saddr->v6addr, sport, &daddr->v6addr, dport, INPLOOKUP_WILDCARD, &pd->lookup.uid, &pd->lookup.gid, NULL); - if (inp == 0) - return (-1); + if (inp == 0) { + return -1; + } } break; #endif /* INET6 */ default: - return (-1); + return -1; } - return (1); + return 1; } static u_int8_t pf_get_wscale(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af) { - int hlen; - u_int8_t hdr[60]; - u_int8_t *opt, optlen; - u_int8_t wscale = 0; - - hlen = th_off << 2; /* hlen <= sizeof (hdr) */ - if (hlen <= (int)sizeof (struct tcphdr)) - return (0); - if (!pf_pull_hdr(pbuf, off, hdr, hlen, NULL, NULL, af)) - return (0); - opt = hdr + sizeof (struct tcphdr); - hlen -= sizeof (struct tcphdr); + int hlen; + u_int8_t hdr[60]; + u_int8_t *opt, optlen; + u_int8_t wscale = 0; + + hlen = th_off << 2; /* hlen <= sizeof (hdr) */ + if (hlen <= (int)sizeof(struct tcphdr)) { + return 0; + } + if (!pf_pull_hdr(pbuf, off, hdr, hlen, NULL, NULL, af)) { + return 0; + } + opt = hdr + sizeof(struct tcphdr); + hlen -= sizeof(struct tcphdr); while (hlen >= 3) { switch (*opt) { case TCPOPT_EOL: @@ -3957,37 +4185,41 @@ pf_get_wscale(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af) break; case TCPOPT_WINDOW: wscale = opt[2]; - if (wscale > TCP_MAX_WINSHIFT) + if (wscale > TCP_MAX_WINSHIFT) { wscale = TCP_MAX_WINSHIFT; + } wscale |= PF_WSCALE_FLAG; - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: optlen = opt[1]; - if (optlen < 2) + if (optlen < 2) { optlen = 2; + } hlen -= optlen; opt += optlen; break; } } - return (wscale); + return wscale; } static u_int16_t pf_get_mss(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af) { - int hlen; - u_int8_t hdr[60]; - u_int8_t *opt, optlen; - u_int16_t mss = tcp_mssdflt; - - hlen = th_off << 2; /* hlen <= sizeof (hdr) */ - if (hlen <= (int)sizeof (struct tcphdr)) - return (0); - if (!pf_pull_hdr(pbuf, off, hdr, hlen, NULL, NULL, af)) - return (0); - opt = hdr + sizeof (struct tcphdr); - hlen -= sizeof (struct tcphdr); + int hlen; + u_int8_t hdr[60]; + u_int8_t *opt, optlen; + u_int16_t mss = tcp_mssdflt; + + hlen = th_off << 2; /* hlen <= sizeof (hdr) */ + if (hlen <= (int)sizeof(struct tcphdr)) { + return 0; + } + if (!pf_pull_hdr(pbuf, off, hdr, hlen, NULL, NULL, af)) { + return 0; + } + opt = hdr + sizeof(struct tcphdr); + hlen -= sizeof(struct tcphdr); while (hlen >= TCPOLEN_MAXSEG) { switch (*opt) { case TCPOPT_EOL: @@ -4000,42 +4232,43 @@ pf_get_mss(pbuf_t *pbuf, int off, u_int16_t th_off, sa_family_t af) #if BYTE_ORDER != BIG_ENDIAN NTOHS(mss); #endif - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: optlen = opt[1]; - if (optlen < 2) + if (optlen < 2) { optlen = 2; + } hlen -= optlen; opt += optlen; break; } } - return (mss); + return mss; } static u_int16_t pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) { #if INET - struct sockaddr_in *dst; - struct route ro; + struct sockaddr_in *dst; + struct route ro; #endif /* INET */ #if INET6 - struct sockaddr_in6 *dst6; - struct route_in6 ro6; + struct sockaddr_in6 *dst6; + struct route_in6 ro6; #endif /* INET6 */ - struct rtentry *rt = NULL; - int hlen; - u_int16_t mss = tcp_mssdflt; + struct rtentry *rt = NULL; + int hlen; + u_int16_t mss = tcp_mssdflt; switch (af) { #if INET case AF_INET: - hlen = sizeof (struct ip); - bzero(&ro, sizeof (ro)); + hlen = sizeof(struct ip); + bzero(&ro, sizeof(ro)); dst = (struct sockaddr_in *)(void *)&ro.ro_dst; dst->sin_family = AF_INET; - dst->sin_len = sizeof (*dst); + dst->sin_len = sizeof(*dst); dst->sin_addr = addr->v4addr; rtalloc(&ro); rt = ro.ro_rt; @@ -4043,11 +4276,11 @@ pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) #endif /* INET */ #if INET6 case AF_INET6: - hlen = sizeof (struct ip6_hdr); - bzero(&ro6, sizeof (ro6)); + hlen = sizeof(struct ip6_hdr); + bzero(&ro6, sizeof(ro6)); dst6 = (struct sockaddr_in6 *)(void *)&ro6.ro_dst; dst6->sin6_family = AF_INET6; - dst6->sin6_len = sizeof (*dst6); + dst6->sin6_len = sizeof(*dst6); dst6->sin6_addr = addr->v6addr; rtalloc((struct route *)&ro); rt = ro6.ro_rt; @@ -4055,11 +4288,11 @@ pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) #endif /* INET6 */ default: panic("pf_calc_mss: not AF_INET or AF_INET6!"); - return (0); + return 0; } if (rt && rt->rt_ifp) { - /* This is relevant only for PF SYN Proxy */ + /* This is relevant only for PF SYN Proxy */ int interface_mtu = rt->rt_ifp->if_mtu; if (af == AF_INET && @@ -4068,13 +4301,13 @@ pf_calc_mss(struct pf_addr *addr, sa_family_t af, u_int16_t offer) /* Further adjust the size for CLAT46 expansion */ interface_mtu -= CLAT46_HDR_EXPANSION_OVERHD; } - mss = interface_mtu - hlen - sizeof (struct tcphdr); + mss = interface_mtu - hlen - sizeof(struct tcphdr); mss = max(tcp_mssdflt, mss); rtfree(rt); } mss = min(mss, offer); - mss = max(mss, 64); /* sanity - at least max opt space */ - return (mss); + mss = max(mss, 64); /* sanity - at least max opt space */ + return mss; } static void @@ -4084,8 +4317,9 @@ pf_set_rt_ifp(struct pf_state *s, struct pf_addr *saddr, sa_family_t af) s->rt_kif = NULL; - if (!r->rt || r->rt == PF_FASTROUTE) + if (!r->rt || r->rt == PF_FASTROUTE) { return; + } if ((af == AF_INET) || (af == AF_INET6)) { pf_map_addr(af, r, saddr, &s->rt_addr, NULL, &s->nat_src_node); @@ -4102,31 +4336,36 @@ pf_attach_state(struct pf_state_key *sk, struct pf_state *s, int tail) sk->refcnt++; /* list is sorted, if-bound states before floating */ - if (tail) + if (tail) { TAILQ_INSERT_TAIL(&sk->states, s, next); - else + } else { TAILQ_INSERT_HEAD(&sk->states, s, next); + } } static void pf_detach_state(struct pf_state *s, int flags) { - struct pf_state_key *sk = s->state_key; + struct pf_state_key *sk = s->state_key; - if (sk == NULL) + if (sk == NULL) { return; + } s->state_key = NULL; TAILQ_REMOVE(&sk->states, s, next); if (--sk->refcnt == 0) { - if (!(flags & PF_DT_SKIP_EXTGWY)) + if (!(flags & PF_DT_SKIP_EXTGWY)) { RB_REMOVE(pf_state_tree_ext_gwy, &pf_statetbl_ext_gwy, sk); - if (!(flags & PF_DT_SKIP_LANEXT)) + } + if (!(flags & PF_DT_SKIP_LANEXT)) { RB_REMOVE(pf_state_tree_lan_ext, &pf_statetbl_lan_ext, sk); - if (sk->app_state) + } + if (sk->app_state) { pool_put(&pf_app_state_pl, sk->app_state); + } pool_put(&pf_state_key_pl, sk); } } @@ -4134,20 +4373,21 @@ pf_detach_state(struct pf_state *s, int flags) struct pf_state_key * pf_alloc_state_key(struct pf_state *s, struct pf_state_key *psk) { - struct pf_state_key *sk; + struct pf_state_key *sk; - if ((sk = pool_get(&pf_state_key_pl, PR_WAITOK)) == NULL) - return (NULL); - bzero(sk, sizeof (*sk)); + if ((sk = pool_get(&pf_state_key_pl, PR_WAITOK)) == NULL) { + return NULL; + } + bzero(sk, sizeof(*sk)); TAILQ_INIT(&sk->states); pf_attach_state(sk, s, 0); /* initialize state key from psk, if provided */ if (psk != NULL) { - bcopy(&psk->lan, &sk->lan, sizeof (sk->lan)); - bcopy(&psk->gwy, &sk->gwy, sizeof (sk->gwy)); - bcopy(&psk->ext_lan, &sk->ext_lan, sizeof (sk->ext_lan)); - bcopy(&psk->ext_gwy, &sk->ext_gwy, sizeof (sk->ext_gwy)); + bcopy(&psk->lan, &sk->lan, sizeof(sk->lan)); + bcopy(&psk->gwy, &sk->gwy, sizeof(sk->gwy)); + bcopy(&psk->ext_lan, &sk->ext_lan, sizeof(sk->ext_lan)); + bcopy(&psk->ext_gwy, &sk->ext_gwy, sizeof(sk->ext_gwy)); sk->af_lan = psk->af_lan; sk->af_gwy = psk->af_gwy; sk->proto = psk->proto; @@ -4159,7 +4399,7 @@ pf_alloc_state_key(struct pf_state *s, struct pf_state_key *psk) /* don't touch tree entries, states and refcnt on sk */ } - return (sk); + return sk; } static u_int32_t @@ -4169,26 +4409,26 @@ pf_tcp_iss(struct pf_pdesc *pd) u_int32_t digest[4]; if (pf_tcp_secret_init == 0) { - read_frandom(pf_tcp_secret, sizeof (pf_tcp_secret)); + read_frandom(pf_tcp_secret, sizeof(pf_tcp_secret)); MD5Init(&pf_tcp_secret_ctx); MD5Update(&pf_tcp_secret_ctx, pf_tcp_secret, - sizeof (pf_tcp_secret)); + sizeof(pf_tcp_secret)); pf_tcp_secret_init = 1; } ctx = pf_tcp_secret_ctx; - MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof (u_short)); - MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof (u_short)); + MD5Update(&ctx, (char *)&pd->hdr.tcp->th_sport, sizeof(u_short)); + MD5Update(&ctx, (char *)&pd->hdr.tcp->th_dport, sizeof(u_short)); if (pd->af == AF_INET6) { - MD5Update(&ctx, (char *)&pd->src->v6addr, sizeof (struct in6_addr)); - MD5Update(&ctx, (char *)&pd->dst->v6addr, sizeof (struct in6_addr)); + MD5Update(&ctx, (char *)&pd->src->v6addr, sizeof(struct in6_addr)); + MD5Update(&ctx, (char *)&pd->dst->v6addr, sizeof(struct in6_addr)); } else { - MD5Update(&ctx, (char *)&pd->src->v4addr, sizeof (struct in_addr)); - MD5Update(&ctx, (char *)&pd->dst->v4addr, sizeof (struct in_addr)); + MD5Update(&ctx, (char *)&pd->src->v4addr, sizeof(struct in_addr)); + MD5Update(&ctx, (char *)&pd->dst->v4addr, sizeof(struct in_addr)); } MD5Final((u_char *)digest, &ctx); pf_tcp_iss_off += 4096; - return (digest[0] + random() + pf_tcp_iss_off); + return digest[0] + random() + pf_tcp_iss_off; } /* @@ -4198,17 +4438,18 @@ pf_tcp_iss(struct pf_pdesc *pd) */ static int pf_change_icmp_af(pbuf_t *pbuf, int off, - struct pf_pdesc *pd, struct pf_pdesc *pd2, struct pf_addr *src, - struct pf_addr *dst, sa_family_t af, sa_family_t naf) + struct pf_pdesc *pd, struct pf_pdesc *pd2, struct pf_addr *src, + struct pf_addr *dst, sa_family_t af, sa_family_t naf) { - struct ip *ip4 = NULL; - struct ip6_hdr *ip6 = NULL; - void *hdr; - int hlen, olen; + struct ip *ip4 = NULL; + struct ip6_hdr *ip6 = NULL; + void *hdr; + int hlen, olen; if (af == naf || (af != AF_INET && af != AF_INET6) || - (naf != AF_INET && naf != AF_INET6)) - return (-1); + (naf != AF_INET && naf != AF_INET6)) { + return -1; + } /* old header */ olen = pd2->off - off; @@ -4217,8 +4458,9 @@ pf_change_icmp_af(pbuf_t *pbuf, int off, /* Modify the pbuf to accommodate the new header */ hdr = pbuf_resize_segment(pbuf, off, olen, hlen); - if (hdr == NULL) - return (-1); + if (hdr == NULL) { + return -1; + } /* translate inner ip/ip6 header */ switch (naf) { @@ -4231,10 +4473,11 @@ pf_change_icmp_af(pbuf_t *pbuf, int off, ip4->ip_id = rfc6864 ? 0 : htons(ip_randomid()); ip4->ip_off = htons(IP_DF); ip4->ip_ttl = pd2->ttl; - if (pd2->proto == IPPROTO_ICMPV6) + if (pd2->proto == IPPROTO_ICMPV6) { ip4->ip_p = IPPROTO_ICMP; - else + } else { ip4->ip_p = pd2->proto; + } ip4->ip_src = src->v4addr; ip4->ip_dst = dst->v4addr; ip4->ip_sum = pbuf_inet_cksum(pbuf, 0, 0, ip4->ip_hl << 2); @@ -4244,14 +4487,16 @@ pf_change_icmp_af(pbuf_t *pbuf, int off, bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = htons(pd2->tot_len - olen); - if (pd2->proto == IPPROTO_ICMP) + if (pd2->proto == IPPROTO_ICMP) { ip6->ip6_nxt = IPPROTO_ICMPV6; - else + } else { ip6->ip6_nxt = pd2->proto; - if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM) + } + if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM) { ip6->ip6_hlim = IPV6_DEFHLIM; - else + } else { ip6->ip6_hlim = pd2->ttl; + } ip6->ip6_src = src->v6addr; ip6->ip6_dst = dst->v6addr; break; @@ -4261,21 +4506,21 @@ pf_change_icmp_af(pbuf_t *pbuf, int off, pd2->off += hlen - olen; pd->tot_len += hlen - olen; - return (0); + return 0; } -#define PTR_IP(field) ((int32_t)offsetof(struct ip, field)) -#define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field)) +#define PTR_IP(field) ((int32_t)offsetof(struct ip, field)) +#define PTR_IP6(field) ((int32_t)offsetof(struct ip6_hdr, field)) static int pf_translate_icmp_af(int af, void *arg) { - struct icmp *icmp4; - struct icmp6_hdr *icmp6; - u_int32_t mtu; - int32_t ptr = -1; - u_int8_t type; - u_int8_t code; + struct icmp *icmp4; + struct icmp6_hdr *icmp6; + u_int32_t mtu; + int32_t ptr = -1; + u_int8_t type; + u_int8_t code; switch (af) { case AF_INET: @@ -4306,7 +4551,7 @@ pf_translate_icmp_af(int af, void *arg) code = ICMP_UNREACH_PORT; break; default: - return (-1); + return -1; } break; case ICMP6_PACKET_TOO_BIG: @@ -4324,25 +4569,25 @@ pf_translate_icmp_af(int af, void *arg) code = ICMP_PARAMPROB_ERRATPTR; ptr = ntohl(icmp6->icmp6_pptr); - if (ptr == PTR_IP6(ip6_vfc)) + if (ptr == PTR_IP6(ip6_vfc)) { ; /* preserve */ - else if (ptr == PTR_IP6(ip6_vfc) + 1) + } else if (ptr == PTR_IP6(ip6_vfc) + 1) { ptr = PTR_IP(ip_tos); - else if (ptr == PTR_IP6(ip6_plen) || - ptr == PTR_IP6(ip6_plen) + 1) + } else if (ptr == PTR_IP6(ip6_plen) || + ptr == PTR_IP6(ip6_plen) + 1) { ptr = PTR_IP(ip_len); - else if (ptr == PTR_IP6(ip6_nxt)) + } else if (ptr == PTR_IP6(ip6_nxt)) { ptr = PTR_IP(ip_p); - else if (ptr == PTR_IP6(ip6_hlim)) + } else if (ptr == PTR_IP6(ip6_hlim)) { ptr = PTR_IP(ip_ttl); - else if (ptr >= PTR_IP6(ip6_src) && - ptr < PTR_IP6(ip6_dst)) + } else if (ptr >= PTR_IP6(ip6_src) && + ptr < PTR_IP6(ip6_dst)) { ptr = PTR_IP(ip_src); - else if (ptr >= PTR_IP6(ip6_dst) && - ptr < (int32_t)sizeof(struct ip6_hdr)) + } else if (ptr >= PTR_IP6(ip6_dst) && + ptr < (int32_t)sizeof(struct ip6_hdr)) { ptr = PTR_IP(ip_dst); - else { - return (-1); + } else { + return -1; } break; case ICMP6_PARAMPROB_NEXTHEADER: @@ -4350,19 +4595,20 @@ pf_translate_icmp_af(int af, void *arg) code = ICMP_UNREACH_PROTOCOL; break; default: - return (-1); + return -1; } break; default: - return (-1); + return -1; } icmp6->icmp6_type = type; icmp6->icmp6_code = code; /* aligns well with a icmpv4 nextmtu */ icmp6->icmp6_mtu = htonl(mtu); /* icmpv4 pptr is a one most significant byte */ - if (ptr >= 0) + if (ptr >= 0) { icmp6->icmp6_pptr = htonl(ptr << 24); + } break; case AF_INET6: @@ -4410,7 +4656,7 @@ pf_translate_icmp_af(int af, void *arg) mtu += 20; break; default: - return (-1); + return -1; } break; case ICMP_TIMXCEED: @@ -4426,48 +4672,49 @@ pf_translate_icmp_af(int af, void *arg) code = ICMP6_PARAMPROB_HEADER; break; default: - return (-1); + return -1; } ptr = icmp4->icmp_pptr; - if (ptr == 0 || ptr == PTR_IP(ip_tos)) + if (ptr == 0 || ptr == PTR_IP(ip_tos)) { ; /* preserve */ - else if (ptr == PTR_IP(ip_len) || - ptr == PTR_IP(ip_len) + 1) + } else if (ptr == PTR_IP(ip_len) || + ptr == PTR_IP(ip_len) + 1) { ptr = PTR_IP6(ip6_plen); - else if (ptr == PTR_IP(ip_ttl)) + } else if (ptr == PTR_IP(ip_ttl)) { ptr = PTR_IP6(ip6_hlim); - else if (ptr == PTR_IP(ip_p)) + } else if (ptr == PTR_IP(ip_p)) { ptr = PTR_IP6(ip6_nxt); - else if (ptr >= PTR_IP(ip_src) && - ptr < PTR_IP(ip_dst)) + } else if (ptr >= PTR_IP(ip_src) && + ptr < PTR_IP(ip_dst)) { ptr = PTR_IP6(ip6_src); - else if (ptr >= PTR_IP(ip_dst) && - ptr < (int32_t)sizeof(struct ip)) + } else if (ptr >= PTR_IP(ip_dst) && + ptr < (int32_t)sizeof(struct ip)) { ptr = PTR_IP6(ip6_dst); - else { - return (-1); + } else { + return -1; } break; default: - return (-1); + return -1; } icmp4->icmp_type = type; icmp4->icmp_code = code; icmp4->icmp_nextmtu = htons(mtu); - if (ptr >= 0) + if (ptr >= 0) { icmp4->icmp_void = htonl(ptr); + } break; } - return (0); + return 0; } /* Note: frees pbuf if PF_NAT64 is returned */ static int pf_nat64_ipv6(pbuf_t *pbuf, int off, struct pf_pdesc *pd) { - struct ip *ip4; + struct ip *ip4; struct mbuf *m; /* @@ -4478,12 +4725,14 @@ pf_nat64_ipv6(pbuf_t *pbuf, int off, struct pf_pdesc *pd) * 2. If IPv6 stack in kernel internally generates a * message destined for a synthesized IPv6 end-point. */ - if (pbuf->pb_ifp == NULL) - return (PF_DROP); + if (pbuf->pb_ifp == NULL) { + return PF_DROP; + } ip4 = (struct ip *)pbuf_resize_segment(pbuf, 0, off, sizeof(*ip4)); - if (ip4 == NULL) - return (PF_DROP); + if (ip4 == NULL) { + return PF_DROP; + } ip4->ip_v = 4; ip4->ip_hl = 5; @@ -4505,32 +4754,36 @@ pf_nat64_ipv6(pbuf_t *pbuf, int off, struct pf_pdesc *pd) icmp = (struct icmp *)pbuf_contig_segment(pbuf, hlen, ICMP_MINLEN); - if (icmp == NULL) - return (PF_DROP); + if (icmp == NULL) { + return PF_DROP; + } icmp->icmp_cksum = 0; icmp->icmp_cksum = pbuf_inet_cksum(pbuf, 0, hlen, - ntohs(ip4->ip_len) - hlen); + ntohs(ip4->ip_len) - hlen); } - if ((m = pbuf_to_mbuf(pbuf, TRUE)) != NULL) + if ((m = pbuf_to_mbuf(pbuf, TRUE)) != NULL) { ip_input(m); + } - return (PF_NAT64); + return PF_NAT64; } static int pf_nat64_ipv4(pbuf_t *pbuf, int off, struct pf_pdesc *pd) { - struct ip6_hdr *ip6; + struct ip6_hdr *ip6; struct mbuf *m; - if (pbuf->pb_ifp == NULL) - return (PF_DROP); + if (pbuf->pb_ifp == NULL) { + return PF_DROP; + } ip6 = (struct ip6_hdr *)pbuf_resize_segment(pbuf, 0, off, sizeof(*ip6)); - if (ip6 == NULL) - return (PF_DROP); + if (ip6 == NULL) { + return PF_DROP; + } ip6->ip6_vfc = htonl((6 << 28) | (pd->tos << 20)); ip6->ip6_plen = htons(pd->tot_len - off); @@ -4546,31 +4799,35 @@ pf_nat64_ipv4(pbuf_t *pbuf, int off, struct pf_pdesc *pd) icmp6 = (struct icmp6_hdr *)pbuf_contig_segment(pbuf, hlen, sizeof(*icmp6)); - if (icmp6 == NULL) - return (PF_DROP); + if (icmp6 == NULL) { + return PF_DROP; + } icmp6->icmp6_cksum = 0; icmp6->icmp6_cksum = pbuf_inet6_cksum(pbuf, - IPPROTO_ICMPV6, hlen, - ntohs(ip6->ip6_plen)); + IPPROTO_ICMPV6, hlen, + ntohs(ip6->ip6_plen)); } else if (pd->proto == IPPROTO_UDP) { struct udphdr *uh; int hlen = sizeof(*ip6); uh = (struct udphdr *)pbuf_contig_segment(pbuf, hlen, sizeof(*uh)); - if (uh == NULL) - return (PF_DROP); + if (uh == NULL) { + return PF_DROP; + } - if (uh->uh_sum == 0) + if (uh->uh_sum == 0) { uh->uh_sum = pbuf_inet6_cksum(pbuf, IPPROTO_UDP, - hlen, ntohs(ip6->ip6_plen)); + hlen, ntohs(ip6->ip6_plen)); + } } - if ((m = pbuf_to_mbuf(pbuf, TRUE)) != NULL) + if ((m = pbuf_to_mbuf(pbuf, TRUE)) != NULL) { ip6_input(m); + } - return (PF_NAT64); + return PF_NAT64; } static int @@ -4580,33 +4837,33 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, struct ifqueue *ifq) { #pragma unused(h) - struct pf_rule *nr = NULL; - struct pf_addr *saddr = pd->src, *daddr = pd->dst; - sa_family_t af = pd->af; - struct pf_rule *r, *a = NULL; - struct pf_ruleset *ruleset = NULL; - struct pf_src_node *nsn = NULL; - struct tcphdr *th = pd->hdr.tcp; - struct udphdr *uh = pd->hdr.udp; - u_short reason; - int rewrite = 0, hdrlen = 0; - int tag = -1; - unsigned int rtableid = IFSCOPE_NONE; - int asd = 0; - int match = 0; - int state_icmp = 0; - u_int16_t mss = tcp_mssdflt; - u_int8_t icmptype = 0, icmpcode = 0; - - struct pf_grev1_hdr *grev1 = pd->hdr.grev1; + struct pf_rule *nr = NULL; + struct pf_addr *saddr = pd->src, *daddr = pd->dst; + sa_family_t af = pd->af; + struct pf_rule *r, *a = NULL; + struct pf_ruleset *ruleset = NULL; + struct pf_src_node *nsn = NULL; + struct tcphdr *th = pd->hdr.tcp; + struct udphdr *uh = pd->hdr.udp; + u_short reason; + int rewrite = 0, hdrlen = 0; + int tag = -1; + unsigned int rtableid = IFSCOPE_NONE; + int asd = 0; + int match = 0; + int state_icmp = 0; + u_int16_t mss = tcp_mssdflt; + u_int8_t icmptype = 0, icmpcode = 0; + + struct pf_grev1_hdr *grev1 = pd->hdr.grev1; union pf_state_xport bxport, bdxport, nxport, sxport, dxport; - struct pf_state_key psk; + struct pf_state_key psk; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); if (direction == PF_IN && pf_check_congestion(ifq)) { REASON_SET(&reason, PFRES_CONGEST); - return (PF_DROP); + return PF_DROP; } hdrlen = 0; @@ -4618,50 +4875,54 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, case IPPROTO_TCP: sxport.port = th->th_sport; dxport.port = th->th_dport; - hdrlen = sizeof (*th); + hdrlen = sizeof(*th); break; case IPPROTO_UDP: sxport.port = uh->uh_sport; dxport.port = uh->uh_dport; - hdrlen = sizeof (*uh); + hdrlen = sizeof(*uh); break; #if INET case IPPROTO_ICMP: - if (pd->af != AF_INET) + if (pd->af != AF_INET) { break; + } sxport.port = dxport.port = pd->hdr.icmp->icmp_id; hdrlen = ICMP_MINLEN; icmptype = pd->hdr.icmp->icmp_type; icmpcode = pd->hdr.icmp->icmp_code; - if (ICMP_ERRORTYPE(icmptype)) + if (ICMP_ERRORTYPE(icmptype)) { state_icmp++; + } break; #endif /* INET */ #if INET6 case IPPROTO_ICMPV6: - if (pd->af != AF_INET6) + if (pd->af != AF_INET6) { break; + } sxport.port = dxport.port = pd->hdr.icmp6->icmp6_id; - hdrlen = sizeof (*pd->hdr.icmp6); + hdrlen = sizeof(*pd->hdr.icmp6); icmptype = pd->hdr.icmp6->icmp6_type; icmpcode = pd->hdr.icmp6->icmp6_code; - if (ICMP6_ERRORTYPE(icmptype)) + if (ICMP6_ERRORTYPE(icmptype)) { state_icmp++; + } break; #endif /* INET6 */ case IPPROTO_GRE: if (pd->proto_variant == PF_GRE_PPTP_VARIANT) { sxport.call_id = dxport.call_id = pd->hdr.grev1->call_id; - hdrlen = sizeof (*pd->hdr.grev1); + hdrlen = sizeof(*pd->hdr.grev1); } break; case IPPROTO_ESP: sxport.spi = 0; dxport.spi = pd->hdr.esp->spi; - hdrlen = sizeof (*pd->hdr.esp); + hdrlen = sizeof(*pd->hdr.esp); break; } @@ -4670,22 +4931,24 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, bxport = sxport; bdxport = dxport; - if (direction == PF_OUT) + if (direction == PF_OUT) { nxport = sxport; - else + } else { nxport = dxport; + } /* check packet for BINAT/NAT/RDR */ if ((nr = pf_get_translation_aux(pd, pbuf, off, direction, kif, &nsn, - saddr, &sxport, daddr, &dxport, &nxport - )) != NULL) { + saddr, &sxport, daddr, &dxport, &nxport + )) != NULL) { int ua; u_int16_t dport; - if (pd->af != pd->naf) + if (pd->af != pd->naf) { ua = 0; - else + } else { ua = 1; + } PF_ACPY(&pd->baddr, saddr, af); PF_ACPY(&pd->bdaddr, daddr, af); @@ -4695,24 +4958,25 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if (pd->af != pd->naf || PF_ANEQ(saddr, &pd->naddr, pd->af)) { pf_change_ap(direction, pd->mp, saddr, - &th->th_sport, pd->ip_sum, &th->th_sum, - &pd->naddr, nxport.port, 0, af, - pd->naf, ua); + &th->th_sport, pd->ip_sum, &th->th_sum, + &pd->naddr, nxport.port, 0, af, + pd->naf, ua); sxport.port = th->th_sport; } if (pd->af != pd->naf || PF_ANEQ(daddr, &pd->ndaddr, pd->af) || (nr && (nr->action == PF_RDR) && - (th->th_dport != nxport.port))) { - if (nr && nr->action == PF_RDR) + (th->th_dport != nxport.port))) { + if (nr && nr->action == PF_RDR) { dport = nxport.port; - else + } else { dport = th->th_dport; + } pf_change_ap(direction, pd->mp, daddr, - &th->th_dport, pd->ip_sum, - &th->th_sum, &pd->ndaddr, - dport, 0, af, pd->naf, ua); + &th->th_dport, pd->ip_sum, + &th->th_sum, &pd->ndaddr, + dport, 0, af, pd->naf, ua); dxport.port = th->th_dport; } rewrite++; @@ -4722,32 +4986,34 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if (pd->af != pd->naf || PF_ANEQ(saddr, &pd->naddr, pd->af)) { pf_change_ap(direction, pd->mp, saddr, - &uh->uh_sport, pd->ip_sum, - &uh->uh_sum, &pd->naddr, - nxport.port, 1, af, pd->naf, ua); + &uh->uh_sport, pd->ip_sum, + &uh->uh_sum, &pd->naddr, + nxport.port, 1, af, pd->naf, ua); sxport.port = uh->uh_sport; } if (pd->af != pd->naf || PF_ANEQ(daddr, &pd->ndaddr, pd->af) || (nr && (nr->action == PF_RDR) && - (uh->uh_dport != nxport.port))) { - if (nr && nr->action == PF_RDR) + (uh->uh_dport != nxport.port))) { + if (nr && nr->action == PF_RDR) { dport = nxport.port; - else + } else { dport = uh->uh_dport; + } pf_change_ap(direction, pd->mp, daddr, - &uh->uh_dport, pd->ip_sum, - &uh->uh_sum, &pd->ndaddr, - dport, 0, af, pd->naf, ua); + &uh->uh_dport, pd->ip_sum, + &uh->uh_sum, &pd->ndaddr, + dport, 0, af, pd->naf, ua); dxport.port = uh->uh_dport; } rewrite++; break; #if INET case IPPROTO_ICMP: - if (pd->af != AF_INET) + if (pd->af != AF_INET) { break; + } /* * TODO: * pd->af != pd->naf not handled yet here and would be @@ -4756,43 +5022,45 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, */ if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { pf_change_a(&saddr->v4addr.s_addr, pd->ip_sum, - pd->naddr.v4addr.s_addr, 0); + pd->naddr.v4addr.s_addr, 0); pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( - pd->hdr.icmp->icmp_cksum, sxport.port, - nxport.port, 0); + pd->hdr.icmp->icmp_cksum, sxport.port, + nxport.port, 0); pd->hdr.icmp->icmp_id = nxport.port; } if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { pf_change_a(&daddr->v4addr.s_addr, pd->ip_sum, - pd->ndaddr.v4addr.s_addr, 0); + pd->ndaddr.v4addr.s_addr, 0); } ++rewrite; break; #endif /* INET */ #if INET6 case IPPROTO_ICMPV6: - if (pd->af != AF_INET6) + if (pd->af != AF_INET6) { break; + } if (pd->af != pd->naf || PF_ANEQ(saddr, &pd->naddr, pd->af)) { pf_change_addr(saddr, - &pd->hdr.icmp6->icmp6_cksum, - &pd->naddr, 0, pd->af, pd->naf); + &pd->hdr.icmp6->icmp6_cksum, + &pd->naddr, 0, pd->af, pd->naf); } if (pd->af != pd->naf || PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { pf_change_addr(daddr, - &pd->hdr.icmp6->icmp6_cksum, - &pd->ndaddr, 0, pd->af, pd->naf); + &pd->hdr.icmp6->icmp6_cksum, + &pd->ndaddr, 0, pd->af, pd->naf); } if (pd->af != pd->naf) { if (pf_translate_icmp_af(AF_INET, - pd->hdr.icmp6)) - return (PF_DROP); + pd->hdr.icmp6)) { + return PF_DROP; + } pd->proto = IPPROTO_ICMP; } rewrite++; @@ -4800,59 +5068,65 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, #endif /* INET */ case IPPROTO_GRE: if ((direction == PF_IN) && - (pd->proto_variant == PF_GRE_PPTP_VARIANT)) - grev1->call_id = nxport.call_id; + (pd->proto_variant == PF_GRE_PPTP_VARIANT)) { + grev1->call_id = nxport.call_id; + } switch (pd->af) { #if INET case AF_INET: if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { pf_change_a(&saddr->v4addr.s_addr, - pd->ip_sum, - pd->naddr.v4addr.s_addr, 0); + pd->ip_sum, + pd->naddr.v4addr.s_addr, 0); } if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { pf_change_a(&daddr->v4addr.s_addr, - pd->ip_sum, - pd->ndaddr.v4addr.s_addr, 0); + pd->ip_sum, + pd->ndaddr.v4addr.s_addr, 0); } break; #endif /* INET */ #if INET6 case AF_INET6: - if (PF_ANEQ(saddr, &pd->naddr, pd->af)) + if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { PF_ACPY(saddr, &pd->naddr, AF_INET6); - if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) + } + if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { PF_ACPY(daddr, &pd->ndaddr, AF_INET6); + } break; #endif /* INET6 */ } ++rewrite; break; case IPPROTO_ESP: - if (direction == PF_OUT) + if (direction == PF_OUT) { bxport.spi = 0; + } switch (pd->af) { #if INET case AF_INET: if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { pf_change_a(&saddr->v4addr.s_addr, - pd->ip_sum, pd->naddr.v4addr.s_addr, 0); + pd->ip_sum, pd->naddr.v4addr.s_addr, 0); } if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { pf_change_a(&daddr->v4addr.s_addr, - pd->ip_sum, - pd->ndaddr.v4addr.s_addr, 0); + pd->ip_sum, + pd->ndaddr.v4addr.s_addr, 0); } break; #endif /* INET */ #if INET6 case AF_INET6: - if (PF_ANEQ(saddr, &pd->naddr, pd->af)) + if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { PF_ACPY(saddr, &pd->naddr, AF_INET6); - if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) + } + if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { PF_ACPY(daddr, &pd->ndaddr, AF_INET6); + } break; #endif /* INET6 */ } @@ -4864,131 +5138,146 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if ((pd->naf != AF_INET) || (PF_ANEQ(saddr, &pd->naddr, pd->af))) { pf_change_addr(saddr, pd->ip_sum, - &pd->naddr, 0, af, pd->naf); + &pd->naddr, 0, af, pd->naf); } if ((pd->naf != AF_INET) || (PF_ANEQ(daddr, &pd->ndaddr, pd->af))) { pf_change_addr(daddr, pd->ip_sum, - &pd->ndaddr, 0, af, pd->naf); + &pd->ndaddr, 0, af, pd->naf); } break; #endif /* INET */ #if INET6 case AF_INET6: - if (PF_ANEQ(saddr, &pd->naddr, pd->af)) + if (PF_ANEQ(saddr, &pd->naddr, pd->af)) { PF_ACPY(saddr, &pd->naddr, af); - if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) + } + if (PF_ANEQ(daddr, &pd->ndaddr, pd->af)) { PF_ACPY(daddr, &pd->ndaddr, af); + } break; #endif /* INET */ } break; } - if (nr->natpass) + if (nr->natpass) { r = NULL; + } pd->nat_rule = nr; pd->af = pd->naf; } else { } - if (nr && nr->tag > 0) + if (nr && nr->tag > 0) { tag = nr->tag; + } while (r != NULL) { r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != direction) + } else if (r->direction && r->direction != direction) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != pd->af) + } else if (r->af && r->af != pd->af) { r = r->skip[PF_SKIP_AF].ptr; - else if (r->proto && r->proto != pd->proto) + } else if (r->proto && r->proto != pd->proto) { r = r->skip[PF_SKIP_PROTO].ptr; - else if (PF_MISMATCHAW(&r->src.addr, saddr, pd->af, - r->src.neg, kif)) + } else if (PF_MISMATCHAW(&r->src.addr, saddr, pd->af, + r->src.neg, kif)) { r = r->skip[PF_SKIP_SRC_ADDR].ptr; + } /* tcp/udp only. port_op always 0 in other cases */ else if (r->proto == pd->proto && (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) && r->src.xport.range.op && !pf_match_port(r->src.xport.range.op, r->src.xport.range.port[0], r->src.xport.range.port[1], - th->th_sport)) + th->th_sport)) { r = r->skip[PF_SKIP_SRC_PORT].ptr; - else if (PF_MISMATCHAW(&r->dst.addr, daddr, pd->af, - r->dst.neg, NULL)) + } else if (PF_MISMATCHAW(&r->dst.addr, daddr, pd->af, + r->dst.neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; + } /* tcp/udp only. port_op always 0 in other cases */ else if (r->proto == pd->proto && (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) && r->dst.xport.range.op && !pf_match_port(r->dst.xport.range.op, r->dst.xport.range.port[0], r->dst.xport.range.port[1], - th->th_dport)) + th->th_dport)) { r = r->skip[PF_SKIP_DST_PORT].ptr; + } /* icmp only. type always 0 in other cases */ - else if (r->type && r->type != icmptype + 1) + else if (r->type && r->type != icmptype + 1) { r = TAILQ_NEXT(r, entries); + } /* icmp only. type always 0 in other cases */ - else if (r->code && r->code != icmpcode + 1) + else if (r->code && r->code != icmpcode + 1) { r = TAILQ_NEXT(r, entries); - else if ((r->rule_flag & PFRULE_TOS) && r->tos && - !(r->tos & pd->tos)) + } else if ((r->rule_flag & PFRULE_TOS) && r->tos && + !(r->tos & pd->tos)) { r = TAILQ_NEXT(r, entries); - else if ((r->rule_flag & PFRULE_DSCP) && r->tos && - !(r->tos & (pd->tos & DSCP_MASK))) + } else if ((r->rule_flag & PFRULE_DSCP) && r->tos && + !(r->tos & (pd->tos & DSCP_MASK))) { r = TAILQ_NEXT(r, entries); - else if ((r->rule_flag & PFRULE_SC) && r->tos && - ((r->tos & SCIDX_MASK) != pd->sc)) + } else if ((r->rule_flag & PFRULE_SC) && r->tos && + ((r->tos & SCIDX_MASK) != pd->sc)) { r = TAILQ_NEXT(r, entries); - else if (r->rule_flag & PFRULE_FRAGMENT) + } else if (r->rule_flag & PFRULE_FRAGMENT) { r = TAILQ_NEXT(r, entries); - else if (pd->proto == IPPROTO_TCP && - (r->flagset & th->th_flags) != r->flags) + } else if (pd->proto == IPPROTO_TCP && + (r->flagset & th->th_flags) != r->flags) { r = TAILQ_NEXT(r, entries); + } /* tcp/udp only. uid.op always 0 in other cases */ else if (r->uid.op && (pd->lookup.done || ((void)(pd->lookup.done = pf_socket_lookup(direction, pd)), 1)) && !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1], - pd->lookup.uid)) + pd->lookup.uid)) { r = TAILQ_NEXT(r, entries); + } /* tcp/udp only. gid.op always 0 in other cases */ else if (r->gid.op && (pd->lookup.done || ((void)(pd->lookup.done = pf_socket_lookup(direction, pd)), 1)) && !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1], - pd->lookup.gid)) + pd->lookup.gid)) { r = TAILQ_NEXT(r, entries); - else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1)) + } else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1)) { r = TAILQ_NEXT(r, entries); - else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) + } else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) { r = TAILQ_NEXT(r, entries); - else if (r->os_fingerprint != PF_OSFP_ANY && + } else if (r->os_fingerprint != PF_OSFP_ANY && (pd->proto != IPPROTO_TCP || !pf_osfp_match( - pf_osfp_fingerprint(pd, pbuf, off, th), - r->os_fingerprint))) + pf_osfp_fingerprint(pd, pbuf, off, th), + r->os_fingerprint))) { r = TAILQ_NEXT(r, entries); - else { - if (r->tag) + } else { + if (r->tag) { tag = r->tag; - if (PF_RTABLEID_IS_VALID(r->rtableid)) + } + if (PF_RTABLEID_IS_VALID(r->rtableid)) { rtableid = r->rtableid; + } if (r->anchor == NULL) { match = 1; *rm = r; *am = a; *rsm = ruleset; - if ((*rm)->quick) + if ((*rm)->quick) { break; + } r = TAILQ_NEXT(r, entries); - } else + } else { pf_step_into_anchor(&asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match); + } } if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - PF_RULESET_FILTER, &r, &a, &match)) + PF_RULESET_FILTER, &r, &a, &match)) { break; + } } r = *rm; a = *am; @@ -4998,18 +5287,19 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if (r->log || (nr != NULL && nr->log)) { if (rewrite > 0) { - if (rewrite < off + hdrlen) + if (rewrite < off + hdrlen) { rewrite = off + hdrlen; + } if (pf_lazy_makewritable(pd, pbuf, rewrite) == NULL) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } pbuf_copy_back(pbuf, off, hdrlen, pd->hdr.any); } PFLOG_PACKET(kif, h, pbuf, pd->af, direction, reason, - r->log ? r : nr, a, ruleset, pd); + r->log ? r : nr, a, ruleset, pd); } if ((r->action == PF_DROP) && @@ -5119,9 +5409,10 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, break; case IPPROTO_GRE: if (pd->proto_variant == - PF_GRE_PPTP_VARIANT) + PF_GRE_PPTP_VARIANT) { grev1->call_id = - bdxport.call_id; + bdxport.call_id; + } ++rewrite; switch (af) { #if INET @@ -5176,11 +5467,11 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, ((r->rule_flag & PFRULE_RETURNRST) || (r->rule_flag & PFRULE_RETURN)) && !(th->th_flags & TH_RST)) { - u_int32_t ack = ntohl(th->th_seq) + pd->p_len; - int len = 0; - struct ip *h4; + u_int32_t ack = ntohl(th->th_seq) + pd->p_len; + int len = 0; + struct ip *h4; #if INET6 - struct ip6_hdr *h6; + struct ip6_hdr *h6; #endif /* INET6 */ switch (pd->af) { @@ -5192,42 +5483,45 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, case AF_INET6: h6 = pbuf->pb_data; len = ntohs(h6->ip6_plen) - - (off - sizeof (*h6)); + (off - sizeof(*h6)); break; #endif /* INET6 */ } if (pf_check_proto_cksum(pbuf, off, len, IPPROTO_TCP, - pd->af)) + pd->af)) { REASON_SET(&reason, PFRES_PROTCKSUM); - else { - if (th->th_flags & TH_SYN) + } else { + if (th->th_flags & TH_SYN) { ack++; - if (th->th_flags & TH_FIN) + } + if (th->th_flags & TH_FIN) { ack++; + } pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, - ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0, + ntohl(th->th_ack), ack, TH_RST | TH_ACK, 0, 0, r->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); } } else if (pd->proto != IPPROTO_ICMP && pd->af == AF_INET && pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH && - r->return_icmp) + r->return_icmp) { pf_send_icmp(pbuf, r->return_icmp >> 8, r->return_icmp & 255, pd->af, r); - else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && + } else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 && pd->proto != IPPROTO_ESP && pd->proto != IPPROTO_AH && - r->return_icmp6) + r->return_icmp6) { pf_send_icmp(pbuf, r->return_icmp6 >> 8, r->return_icmp6 & 255, pd->af, r); + } } if (r->action == PF_DROP) { - return (PF_DROP); + return PF_DROP; } /* prepare state key, for flowhash and/or the state (if created) */ - bzero(&psk, sizeof (psk)); + bzero(&psk, sizeof(psk)); psk.proto = pd->proto; psk.direction = direction; if (pd->proto == IPPROTO_UDP) { @@ -5236,8 +5530,9 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, psk.proto_variant = PF_EXTFILTER_APD; } else { psk.proto_variant = nr ? nr->extfilter : r->extfilter; - if (psk.proto_variant < PF_EXTFILTER_APD) + if (psk.proto_variant < PF_EXTFILTER_APD) { psk.proto_variant = PF_EXTFILTER_APD; + } } } else if (pd->proto == IPPROTO_GRE) { psk.proto_variant = pd->proto_variant; @@ -5337,7 +5632,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if (nr->action == PF_NAT64) { PF_ACPY(&psk.gwy.addr, &pd->naddr, pd->naf); PF_ACPY(&psk.ext_gwy.addr, &pd->ndaddr, - pd->naf); + pd->naf); if ((pd->proto == IPPROTO_ICMPV6) || (pd->proto == IPPROTO_ICMP)) { psk.gwy.xport = nxport; @@ -5375,13 +5670,13 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, pd)) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } if (!state_icmp && (r->keep_state || nr != NULL || (pd->flags & PFDESC_TCP_NORM))) { /* create new state */ - struct pf_state *s = NULL; + struct pf_state *s = NULL; struct pf_state_key *sk = NULL; struct pf_src_node *sn = NULL; struct pf_ike_hdr ike; @@ -5392,16 +5687,17 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, if (ntohs(uh->uh_sport) == PF_IKE_PORT && ntohs(uh->uh_dport) == PF_IKE_PORT && plen >= PF_IKE_PACKET_MINSIZE) { - if (plen > PF_IKE_PACKET_MINSIZE) + if (plen > PF_IKE_PACKET_MINSIZE) { plen = PF_IKE_PACKET_MINSIZE; - pbuf_copy_data(pbuf, off + sizeof (*uh), plen, + } + pbuf_copy_data(pbuf, off + sizeof(*uh), plen, &ike); } } if (nr != NULL && pd->proto == IPPROTO_ESP && direction == PF_OUT) { - struct pf_state_key_cmp sk0; + struct pf_state_key_cmp sk0; struct pf_state *s0; /* @@ -5411,7 +5707,7 @@ pf_test_rule(struct pf_rule **rm, struct pf_state **sm, int direction, * different internal address. Only one 'blocking' * partial state is allowed for each external address. */ - memset(&sk0, 0, sizeof (sk0)); + memset(&sk0, 0, sizeof(sk0)); sk0.af_gwy = pd->af; sk0.proto = IPPROTO_ESP; PF_ACPY(&sk0.gwy.addr, saddr, sk0.af_gwy); @@ -5465,14 +5761,15 @@ cleanup: pool_put(&pf_src_tree_pl, nsn); } if (sk != NULL) { - if (sk->app_state) + if (sk->app_state) { pool_put(&pf_app_state_pl, sk->app_state); + } pool_put(&pf_state_key_pl, sk); } - return (PF_DROP); + return PF_DROP; } - bzero(s, sizeof (*s)); + bzero(s, sizeof(*s)); TAILQ_INIT(&s->unlink_hooks); s->rule.ptr = r; s->nat_rule.ptr = nr; @@ -5480,23 +5777,26 @@ cleanup: STATE_INC_COUNTERS(s); s->allow_opts = r->allow_opts; s->log = r->log & PF_LOG_ALL; - if (nr != NULL) + if (nr != NULL) { s->log |= nr->log & PF_LOG_ALL; + } switch (pd->proto) { case IPPROTO_TCP: s->src.seqlo = ntohl(th->th_seq); s->src.seqhi = s->src.seqlo + pd->p_len + 1; - if ((th->th_flags & (TH_SYN|TH_ACK)) == + if ((th->th_flags & (TH_SYN | TH_ACK)) == TH_SYN && r->keep_state == PF_STATE_MODULATE) { /* Generate sequence number modulator */ if ((s->src.seqdiff = pf_tcp_iss(pd) - - s->src.seqlo) == 0) + s->src.seqlo) == 0) { s->src.seqdiff = 1; + } pf_change_a(&th->th_seq, &th->th_sum, htonl(s->src.seqlo + s->src.seqdiff), 0); - rewrite = off + sizeof (*th); - } else + rewrite = off + sizeof(*th); + } else { s->src.seqdiff = 0; + } if (th->th_flags & TH_SYN) { s->src.seqhi++; s->src.wscale = pf_get_wscale(pbuf, off, @@ -5510,8 +5810,9 @@ cleanup: s->src.max_win = (win - 1) >> (s->src.wscale & PF_WSCALE_MASK); } - if (th->th_flags & TH_FIN) + if (th->th_flags & TH_FIN) { s->src.seqhi++; + } s->dst.seqhi = 1; s->dst.max_win = 1; s->src.state = TCPS_SYN_SENT; @@ -5567,7 +5868,7 @@ cleanup: pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); pool_put(&pf_state_pl, s); - return (PF_DROP); + return PF_DROP; } if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub && pf_normalize_tcp_stateful(pbuf, off, pd, &reason, @@ -5580,7 +5881,7 @@ cleanup: pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); pool_put(&pf_state_pl, s); - return (PF_DROP); + return PF_DROP; } } @@ -5595,7 +5896,7 @@ cleanup: goto cleanup; } - pf_set_rt_ifp(s, saddr, af); /* needs s->state_key set */ + pf_set_rt_ifp(s, saddr, af); /* needs s->state_key set */ pbuf = pd->mp; // XXXSCW: Why? @@ -5617,7 +5918,7 @@ cleanup: goto cleanup; } - bzero(as, sizeof (*as)); + bzero(as, sizeof(*as)); as->handler = pf_pptp_handler; as->compare_lan_ext = 0; as->compare_ext_gwy = 0; @@ -5643,7 +5944,7 @@ cleanup: goto cleanup; } - bzero(as, sizeof (*as)); + bzero(as, sizeof(*as)); as->compare_lan_ext = pf_ike_compare; as->compare_ext_gwy = pf_ike_compare; as->u.ike.cookie = ike.initiator_cookie; @@ -5658,13 +5959,14 @@ cleanup: } if (pf_insert_state(BOUND_IFACE(r, kif), s)) { - if (pd->proto == IPPROTO_TCP) + if (pd->proto == IPPROTO_TCP) { pf_normalize_tcp_cleanup(s); + } REASON_SET(&reason, PFRES_STATEINS); pf_src_tree_remove_state(s); STATE_DEC_COUNTERS(s); pool_put(&pf_state_pl, s); - return (PF_DROP); + return PF_DROP; } else { *sm = s; } @@ -5673,7 +5975,7 @@ cleanup: s->tag = tag; } if (pd->proto == IPPROTO_TCP && - (th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN && + (th->th_flags & (TH_SYN | TH_ACK)) == TH_SYN && r->keep_state == PF_STATE_SYNPROXY) { int ua = (sk->af_lan == sk->af_gwy) ? 1 : 0; s->src.state = PF_TCPS_PROXY_SRC; @@ -5700,9 +6002,9 @@ cleanup: s->src.mss = mss; pf_send_tcp(r, af, daddr, saddr, th->th_dport, th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1, - TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); + TH_SYN | TH_ACK, 0, s->src.mss, 0, 1, 0, NULL, NULL); REASON_SET(&reason, PFRES_SYNPROXY); - return (PF_SYNPROXY_DROP); + return PF_SYNPROXY_DROP; } if (sk->app_state && sk->app_state->handler) { @@ -5725,32 +6027,33 @@ cleanup: pd, kif); if (pd->lmw < 0) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } - pbuf = pd->mp; // XXXSCW: Why? + pbuf = pd->mp; // XXXSCW: Why? } } } /* copy back packet headers if we performed NAT operations */ if (rewrite) { - if (rewrite < off + hdrlen) + if (rewrite < off + hdrlen) { rewrite = off + hdrlen; + } if (pf_lazy_makewritable(pd, pd->mp, rewrite) == NULL) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } pbuf_copy_back(pbuf, off, hdrlen, pd->hdr.any); - if (af == AF_INET6 && pd->naf == AF_INET) + if (af == AF_INET6 && pd->naf == AF_INET) { return pf_nat64_ipv6(pbuf, off, pd); - else if (af == AF_INET && pd->naf == AF_INET6) + } else if (af == AF_INET && pd->naf == AF_INET6) { return pf_nat64_ipv4(pbuf, off, pd); - + } } - return (PF_PASS); + return PF_PASS; } boolean_t is_nlc_enabled_glb = FALSE; @@ -5759,19 +6062,22 @@ static inline boolean_t pf_is_dummynet_enabled(void) { #if DUMMYNET - if (__probable(!PF_IS_ENABLED)) - return (FALSE); + if (__probable(!PF_IS_ENABLED)) { + return FALSE; + } - if (__probable(!DUMMYNET_LOADED)) - return (FALSE); + if (__probable(!DUMMYNET_LOADED)) { + return FALSE; + } if (__probable(TAILQ_EMPTY(pf_main_ruleset. - rules[PF_RULESET_DUMMYNET].active.ptr))) - return (FALSE); + rules[PF_RULESET_DUMMYNET].active.ptr))) { + return FALSE; + } - return (TRUE); + return TRUE; #else - return (FALSE); + return FALSE; #endif /* DUMMYNET */ } @@ -5779,15 +6085,17 @@ boolean_t pf_is_nlc_enabled(void) { #if DUMMYNET - if (__probable(!pf_is_dummynet_enabled())) - return (FALSE); + if (__probable(!pf_is_dummynet_enabled())) { + return FALSE; + } - if (__probable(!is_nlc_enabled_glb)) - return (FALSE); + if (__probable(!is_nlc_enabled_glb)) { + return FALSE; + } - return (TRUE); + return TRUE; #else - return (FALSE); + return FALSE; #endif /* DUMMYNET */ } @@ -5803,52 +6111,54 @@ static int pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, pbuf_t **pbuf0, struct pf_pdesc *pd, struct ip_fw_args *fwa) { - pbuf_t *pbuf = *pbuf0; - struct pf_rule *am = NULL; - struct pf_ruleset *rsm = NULL; - struct pf_addr *saddr = pd->src, *daddr = pd->dst; - sa_family_t af = pd->af; - struct pf_rule *r, *a = NULL; - struct pf_ruleset *ruleset = NULL; - struct tcphdr *th = pd->hdr.tcp; - u_short reason; - int hdrlen = 0; - int tag = -1; - unsigned int rtableid = IFSCOPE_NONE; - int asd = 0; - int match = 0; - u_int8_t icmptype = 0, icmpcode = 0; - struct ip_fw_args dnflow; - struct pf_rule *prev_matching_rule = fwa ? fwa->fwa_pf_rule : NULL; - int found_prev_rule = (prev_matching_rule) ? 0 : 1; + pbuf_t *pbuf = *pbuf0; + struct pf_rule *am = NULL; + struct pf_ruleset *rsm = NULL; + struct pf_addr *saddr = pd->src, *daddr = pd->dst; + sa_family_t af = pd->af; + struct pf_rule *r, *a = NULL; + struct pf_ruleset *ruleset = NULL; + struct tcphdr *th = pd->hdr.tcp; + u_short reason; + int hdrlen = 0; + int tag = -1; + unsigned int rtableid = IFSCOPE_NONE; + int asd = 0; + int match = 0; + u_int8_t icmptype = 0, icmpcode = 0; + struct ip_fw_args dnflow; + struct pf_rule *prev_matching_rule = fwa ? fwa->fwa_pf_rule : NULL; + int found_prev_rule = (prev_matching_rule) ? 0 : 1; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (!pf_is_dummynet_enabled()) - return (PF_PASS); + if (!pf_is_dummynet_enabled()) { + return PF_PASS; + } bzero(&dnflow, sizeof(dnflow)); hdrlen = 0; /* Fragments don't gave protocol headers */ - if (!(pd->flags & PFDESC_IP_FRAG)) + if (!(pd->flags & PFDESC_IP_FRAG)) { switch (pd->proto) { case IPPROTO_TCP: dnflow.fwa_id.flags = pd->hdr.tcp->th_flags; dnflow.fwa_id.dst_port = ntohs(pd->hdr.tcp->th_dport); dnflow.fwa_id.src_port = ntohs(pd->hdr.tcp->th_sport); - hdrlen = sizeof (*th); + hdrlen = sizeof(*th); break; case IPPROTO_UDP: dnflow.fwa_id.dst_port = ntohs(pd->hdr.udp->uh_dport); dnflow.fwa_id.src_port = ntohs(pd->hdr.udp->uh_sport); - hdrlen = sizeof (*pd->hdr.udp); + hdrlen = sizeof(*pd->hdr.udp); break; #if INET case IPPROTO_ICMP: - if (af != AF_INET) + if (af != AF_INET) { break; + } hdrlen = ICMP_MINLEN; icmptype = pd->hdr.icmp->icmp_type; icmpcode = pd->hdr.icmp->icmp_code; @@ -5856,37 +6166,41 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, #endif /* INET */ #if INET6 case IPPROTO_ICMPV6: - if (af != AF_INET6) + if (af != AF_INET6) { break; - hdrlen = sizeof (*pd->hdr.icmp6); + } + hdrlen = sizeof(*pd->hdr.icmp6); icmptype = pd->hdr.icmp6->icmp6_type; icmpcode = pd->hdr.icmp6->icmp6_code; break; #endif /* INET6 */ case IPPROTO_GRE: - if (pd->proto_variant == PF_GRE_PPTP_VARIANT) - hdrlen = sizeof (*pd->hdr.grev1); + if (pd->proto_variant == PF_GRE_PPTP_VARIANT) { + hdrlen = sizeof(*pd->hdr.grev1); + } break; case IPPROTO_ESP: - hdrlen = sizeof (*pd->hdr.esp); + hdrlen = sizeof(*pd->hdr.esp); break; } + } r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_DUMMYNET].active.ptr); while (r != NULL) { r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != direction) + } else if (r->direction && r->direction != direction) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != af) + } else if (r->af && r->af != af) { r = r->skip[PF_SKIP_AF].ptr; - else if (r->proto && r->proto != pd->proto) + } else if (r->proto && r->proto != pd->proto) { r = r->skip[PF_SKIP_PROTO].ptr; - else if (PF_MISMATCHAW(&r->src.addr, saddr, af, - r->src.neg, kif)) + } else if (PF_MISMATCHAW(&r->src.addr, saddr, af, + r->src.neg, kif)) { r = r->skip[PF_SKIP_SRC_ADDR].ptr; + } /* tcp/udp only. port_op always 0 in other cases */ else if (r->proto == pd->proto && (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) && @@ -5894,11 +6208,12 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, ((r->src.xport.range.op && !pf_match_port(r->src.xport.range.op, r->src.xport.range.port[0], r->src.xport.range.port[1], - th->th_sport))))) + th->th_sport))))) { r = r->skip[PF_SKIP_SRC_PORT].ptr; - else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, - r->dst.neg, NULL)) + } else if (PF_MISMATCHAW(&r->dst.addr, daddr, af, + r->dst.neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; + } /* tcp/udp only. port_op always 0 in other cases */ else if (r->proto == pd->proto && (r->proto == IPPROTO_TCP || r->proto == IPPROTO_UDP) && @@ -5906,46 +6221,51 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, ((pd->flags & PFDESC_IP_FRAG) || !pf_match_port(r->dst.xport.range.op, r->dst.xport.range.port[0], r->dst.xport.range.port[1], - th->th_dport))) + th->th_dport))) { r = r->skip[PF_SKIP_DST_PORT].ptr; + } /* icmp only. type always 0 in other cases */ else if (r->type && - ((pd->flags & PFDESC_IP_FRAG) || - r->type != icmptype + 1)) + ((pd->flags & PFDESC_IP_FRAG) || + r->type != icmptype + 1)) { r = TAILQ_NEXT(r, entries); + } /* icmp only. type always 0 in other cases */ else if (r->code && - ((pd->flags & PFDESC_IP_FRAG) || - r->code != icmpcode + 1)) + ((pd->flags & PFDESC_IP_FRAG) || + r->code != icmpcode + 1)) { r = TAILQ_NEXT(r, entries); - else if (r->tos && !(r->tos == pd->tos)) + } else if (r->tos && !(r->tos == pd->tos)) { r = TAILQ_NEXT(r, entries); - else if (r->rule_flag & PFRULE_FRAGMENT) + } else if (r->rule_flag & PFRULE_FRAGMENT) { r = TAILQ_NEXT(r, entries); - else if (pd->proto == IPPROTO_TCP && + } else if (pd->proto == IPPROTO_TCP && ((pd->flags & PFDESC_IP_FRAG) || - (r->flagset & th->th_flags) != r->flags)) + (r->flagset & th->th_flags) != r->flags)) { r = TAILQ_NEXT(r, entries); - else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1)) + } else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1)) { r = TAILQ_NEXT(r, entries); - else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) + } else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) { r = TAILQ_NEXT(r, entries); - else { + } else { /* * Need to go past the previous dummynet matching rule */ if (r->anchor == NULL) { if (found_prev_rule) { - if (r->tag) + if (r->tag) { tag = r->tag; - if (PF_RTABLEID_IS_VALID(r->rtableid)) + } + if (PF_RTABLEID_IS_VALID(r->rtableid)) { rtableid = r->rtableid; + } match = 1; *rm = r; am = a; rsm = ruleset; - if ((*rm)->quick) + if ((*rm)->quick) { break; + } } else if (r == prev_matching_rule) { found_prev_rule = 1; } @@ -5956,15 +6276,17 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, } } if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - PF_RULESET_DUMMYNET, &r, &a, &match)) + PF_RULESET_DUMMYNET, &r, &a, &match)) { break; + } } r = *rm; a = am; ruleset = rsm; - if (!match) - return (PF_PASS); + if (!match) { + return PF_PASS; + } REASON_SET(&reason, PFRES_DUMMYNET); @@ -5979,12 +6301,12 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, r->packets[dirndx]++; r->bytes[dirndx] += pd->tot_len; - return (PF_PASS); + return PF_PASS; } if (pf_tag_packet(pbuf, pd->pf_mtag, tag, rtableid, pd)) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } if (r->dnpipe && ip_dn_io_ptr != NULL) { @@ -5999,17 +6321,17 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, dnflow.fwa_id.proto = pd->proto; dnflow.fwa_flags = r->dntype; switch (af) { - case AF_INET: - dnflow.fwa_id.addr_type = 4; - dnflow.fwa_id.src_ip = ntohl(saddr->v4addr.s_addr); - dnflow.fwa_id.dst_ip = ntohl(daddr->v4addr.s_addr); - break; - case AF_INET6: - dnflow.fwa_id.addr_type = 6; - dnflow.fwa_id.src_ip6 = saddr->v6addr; - dnflow.fwa_id.dst_ip6 = saddr->v6addr; - break; - } + case AF_INET: + dnflow.fwa_id.addr_type = 4; + dnflow.fwa_id.src_ip = ntohl(saddr->v4addr.s_addr); + dnflow.fwa_id.dst_ip = ntohl(daddr->v4addr.s_addr); + break; + case AF_INET6: + dnflow.fwa_id.addr_type = 6; + dnflow.fwa_id.src_ip6 = saddr->v6addr; + dnflow.fwa_id.dst_ip6 = saddr->v6addr; + break; + } if (fwa != NULL) { dnflow.fwa_oif = fwa->fwa_oif; @@ -6043,8 +6365,8 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, if (m != NULL) { ip_dn_io_ptr(m, dnflow.fwa_cookie, (af == AF_INET) ? - ((direction==PF_IN) ? DN_TO_IP_IN : DN_TO_IP_OUT) : - ((direction==PF_IN) ? DN_TO_IP6_IN : DN_TO_IP6_OUT), + ((direction == PF_IN) ? DN_TO_IP_IN : DN_TO_IP_OUT) : + ((direction == PF_IN) ? DN_TO_IP6_IN : DN_TO_IP6_OUT), &dnflow, DN_CLIENT_PF); } @@ -6054,10 +6376,10 @@ pf_test_dummynet(struct pf_rule **rm, int direction, struct pfi_kif *kif, */ *pbuf0 = NULL; - return (PF_PASS); + return PF_PASS; } - return (PF_PASS); + return PF_PASS; } #endif /* DUMMYNET */ @@ -6067,73 +6389,76 @@ pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, struct pf_ruleset **rsm) { #pragma unused(h) - struct pf_rule *r, *a = NULL; - struct pf_ruleset *ruleset = NULL; - sa_family_t af = pd->af; - u_short reason; - int tag = -1; - int asd = 0; - int match = 0; + struct pf_rule *r, *a = NULL; + struct pf_ruleset *ruleset = NULL; + sa_family_t af = pd->af; + u_short reason; + int tag = -1; + int asd = 0; + int match = 0; r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr); while (r != NULL) { r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != direction) + } else if (r->direction && r->direction != direction) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != af) + } else if (r->af && r->af != af) { r = r->skip[PF_SKIP_AF].ptr; - else if (r->proto && r->proto != pd->proto) + } else if (r->proto && r->proto != pd->proto) { r = r->skip[PF_SKIP_PROTO].ptr; - else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, - r->src.neg, kif)) + } else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, + r->src.neg, kif)) { r = r->skip[PF_SKIP_SRC_ADDR].ptr; - else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, - r->dst.neg, NULL)) + } else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, + r->dst.neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; - else if ((r->rule_flag & PFRULE_TOS) && r->tos && - !(r->tos & pd->tos)) + } else if ((r->rule_flag & PFRULE_TOS) && r->tos && + !(r->tos & pd->tos)) { r = TAILQ_NEXT(r, entries); - else if ((r->rule_flag & PFRULE_DSCP) && r->tos && - !(r->tos & (pd->tos & DSCP_MASK))) + } else if ((r->rule_flag & PFRULE_DSCP) && r->tos && + !(r->tos & (pd->tos & DSCP_MASK))) { r = TAILQ_NEXT(r, entries); - else if ((r->rule_flag & PFRULE_SC) && r->tos && - ((r->tos & SCIDX_MASK) != pd->sc)) + } else if ((r->rule_flag & PFRULE_SC) && r->tos && + ((r->tos & SCIDX_MASK) != pd->sc)) { r = TAILQ_NEXT(r, entries); - else if (r->os_fingerprint != PF_OSFP_ANY) + } else if (r->os_fingerprint != PF_OSFP_ANY) { r = TAILQ_NEXT(r, entries); - else if (pd->proto == IPPROTO_UDP && - (r->src.xport.range.op || r->dst.xport.range.op)) + } else if (pd->proto == IPPROTO_UDP && + (r->src.xport.range.op || r->dst.xport.range.op)) { r = TAILQ_NEXT(r, entries); - else if (pd->proto == IPPROTO_TCP && + } else if (pd->proto == IPPROTO_TCP && (r->src.xport.range.op || r->dst.xport.range.op || - r->flagset)) + r->flagset)) { r = TAILQ_NEXT(r, entries); - else if ((pd->proto == IPPROTO_ICMP || + } else if ((pd->proto == IPPROTO_ICMP || pd->proto == IPPROTO_ICMPV6) && - (r->type || r->code)) + (r->type || r->code)) { r = TAILQ_NEXT(r, entries); - else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1)) + } else if (r->prob && r->prob <= (RandomULong() % (UINT_MAX - 1) + 1)) { r = TAILQ_NEXT(r, entries); - else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) + } else if (r->match_tag && !pf_match_tag(r, pd->pf_mtag, &tag)) { r = TAILQ_NEXT(r, entries); - else { + } else { if (r->anchor == NULL) { match = 1; *rm = r; *am = a; *rsm = ruleset; - if ((*rm)->quick) + if ((*rm)->quick) { break; + } r = TAILQ_NEXT(r, entries); - } else + } else { pf_step_into_anchor(&asd, &ruleset, PF_RULESET_FILTER, &r, &a, &match); + } } if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - PF_RULESET_FILTER, &r, &a, &match)) + PF_RULESET_FILTER, &r, &a, &match)) { break; + } } r = *rm; a = *am; @@ -6141,19 +6466,21 @@ pf_test_fragment(struct pf_rule **rm, int direction, struct pfi_kif *kif, REASON_SET(&reason, PFRES_MATCH); - if (r->log) + if (r->log) { PFLOG_PACKET(kif, h, pbuf, af, direction, reason, r, a, ruleset, pd); + } - if (r->action != PF_PASS) - return (PF_DROP); + if (r->action != PF_PASS) { + return PF_DROP; + } if (pf_tag_packet(pbuf, pd->pf_mtag, tag, -1, NULL)) { REASON_SET(&reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } - return (PF_PASS); + return PF_PASS; } static void @@ -6182,43 +6509,47 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, pptps = &sk->app_state->u.pptp; gs = pptps->grev1_state; - if (gs) + if (gs) { gs->expire = pf_time_second(); + } pbuf = pd->mp; - plen = min(sizeof (cm), pbuf->pb_packet_len - off); - if (plen < PF_PPTP_CTRL_MSG_MINSIZE) + plen = min(sizeof(cm), pbuf->pb_packet_len - off); + if (plen < PF_PPTP_CTRL_MSG_MINSIZE) { return; + } tlen = plen - PF_PPTP_CTRL_MSG_MINSIZE; pbuf_copy_data(pbuf, off, plen, &cm); - if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER) + if (ntohl(cm.hdr.magic) != PF_PPTP_MAGIC_NUMBER) { return; - if (ntohs(cm.hdr.type) != 1) + } + if (ntohs(cm.hdr.type) != 1) { return; + } -#define TYPE_LEN_CHECK(_type, _name) \ - case PF_PPTP_CTRL_TYPE_##_type: \ - if (tlen < sizeof(struct pf_pptp_ctrl_##_name)) \ - return; \ - break; +#define TYPE_LEN_CHECK(_type, _name) \ + case PF_PPTP_CTRL_TYPE_##_type: \ + if (tlen < sizeof(struct pf_pptp_ctrl_##_name)) \ + return; \ + break; switch (cm.ctrl.type) { - TYPE_LEN_CHECK(START_REQ, start_req); - TYPE_LEN_CHECK(START_RPY, start_rpy); - TYPE_LEN_CHECK(STOP_REQ, stop_req); - TYPE_LEN_CHECK(STOP_RPY, stop_rpy); - TYPE_LEN_CHECK(ECHO_REQ, echo_req); - TYPE_LEN_CHECK(ECHO_RPY, echo_rpy); - TYPE_LEN_CHECK(CALL_OUT_REQ, call_out_req); - TYPE_LEN_CHECK(CALL_OUT_RPY, call_out_rpy); - TYPE_LEN_CHECK(CALL_IN_1ST, call_in_1st); - TYPE_LEN_CHECK(CALL_IN_2ND, call_in_2nd); - TYPE_LEN_CHECK(CALL_IN_3RD, call_in_3rd); - TYPE_LEN_CHECK(CALL_CLR, call_clr); - TYPE_LEN_CHECK(CALL_DISC, call_disc); - TYPE_LEN_CHECK(ERROR, error); - TYPE_LEN_CHECK(SET_LINKINFO, set_linkinfo); + TYPE_LEN_CHECK(START_REQ, start_req); + TYPE_LEN_CHECK(START_RPY, start_rpy); + TYPE_LEN_CHECK(STOP_REQ, stop_req); + TYPE_LEN_CHECK(STOP_RPY, stop_rpy); + TYPE_LEN_CHECK(ECHO_REQ, echo_req); + TYPE_LEN_CHECK(ECHO_RPY, echo_rpy); + TYPE_LEN_CHECK(CALL_OUT_REQ, call_out_req); + TYPE_LEN_CHECK(CALL_OUT_RPY, call_out_rpy); + TYPE_LEN_CHECK(CALL_IN_1ST, call_in_1st); + TYPE_LEN_CHECK(CALL_IN_2ND, call_in_2nd); + TYPE_LEN_CHECK(CALL_IN_3RD, call_in_3rd); + TYPE_LEN_CHECK(CALL_CLR, call_clr); + TYPE_LEN_CHECK(CALL_DISC, call_disc); + TYPE_LEN_CHECK(ERROR, error); + TYPE_LEN_CHECK(SET_LINKINFO, set_linkinfo); default: return; } @@ -6226,13 +6557,14 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, if (!gs) { gs = pool_get(&pf_state_pl, PR_WAITOK); - if (!gs) + if (!gs) { return; + } - memcpy(gs, s, sizeof (*gs)); + memcpy(gs, s, sizeof(*gs)); - memset(&gs->entry_id, 0, sizeof (gs->entry_id)); - memset(&gs->entry_list, 0, sizeof (gs->entry_list)); + memset(&gs->entry_id, 0, sizeof(gs->entry_id)); + memset(&gs->entry_list, 0, sizeof(gs->entry_list)); TAILQ_INIT(&gs->unlink_hooks); gs->rt_kif = NULL; @@ -6258,10 +6590,10 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, return; } - memcpy(&gsk->lan, &sk->lan, sizeof (gsk->lan)); - memcpy(&gsk->gwy, &sk->gwy, sizeof (gsk->gwy)); - memcpy(&gsk->ext_lan, &sk->ext_lan, sizeof (gsk->ext_lan)); - memcpy(&gsk->ext_gwy, &sk->ext_gwy, sizeof (gsk->ext_gwy)); + memcpy(&gsk->lan, &sk->lan, sizeof(gsk->lan)); + memcpy(&gsk->gwy, &sk->gwy, sizeof(gsk->gwy)); + memcpy(&gsk->ext_lan, &sk->ext_lan, sizeof(gsk->ext_lan)); + memcpy(&gsk->ext_gwy, &sk->ext_gwy, sizeof(gsk->ext_gwy)); gsk->af_lan = sk->af_lan; gsk->af_gwy = sk->af_gwy; gsk->proto = IPPROTO_GRE; @@ -6273,7 +6605,7 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, gsk->ext_gwy.xport.call_id = 0; gsk->flowsrc = FLOWSRC_PF; gsk->flowhash = pf_calc_state_key_flowhash(gsk); - memset(gas, 0, sizeof (*gas)); + memset(gas, 0, sizeof(*gas)); gas->u.grev1.pptp_state = s; STATE_INC_COUNTERS(gs); pptps->grev1_state = gs; @@ -6313,17 +6645,19 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, case PF_PPTP_CTRL_TYPE_CALL_OUT_REQ: *pns_call_id = cm.msg.call_out_req.call_id; *pns_state = PFGRE1S_INITIATING; - if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) + if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) { spoof_call_id = &cm.msg.call_out_req.call_id; + } break; case PF_PPTP_CTRL_TYPE_CALL_OUT_RPY: *pac_call_id = cm.msg.call_out_rpy.call_id; - if (s->nat_rule.ptr) + if (s->nat_rule.ptr) { spoof_call_id = (pac_call_id == &gsk->lan.xport.call_id) ? &cm.msg.call_out_rpy.call_id : &cm.msg.call_out_rpy.peer_call_id; + } if (gs->timeout == PFTM_UNLINKED) { *pac_state = PFGRE1S_INITIATING; op = PF_PPTP_INSERT_GRE; @@ -6333,48 +6667,56 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, case PF_PPTP_CTRL_TYPE_CALL_IN_1ST: *pns_call_id = cm.msg.call_in_1st.call_id; *pns_state = PFGRE1S_INITIATING; - if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) + if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) { spoof_call_id = &cm.msg.call_in_1st.call_id; + } break; case PF_PPTP_CTRL_TYPE_CALL_IN_2ND: *pac_call_id = cm.msg.call_in_2nd.call_id; *pac_state = PFGRE1S_INITIATING; - if (s->nat_rule.ptr) + if (s->nat_rule.ptr) { spoof_call_id = (pac_call_id == &gsk->lan.xport.call_id) ? &cm.msg.call_in_2nd.call_id : &cm.msg.call_in_2nd.peer_call_id; + } break; case PF_PPTP_CTRL_TYPE_CALL_IN_3RD: - if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) + if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) { spoof_call_id = &cm.msg.call_in_3rd.call_id; + } if (cm.msg.call_in_3rd.call_id != *pns_call_id) { break; } - if (gs->timeout == PFTM_UNLINKED) + if (gs->timeout == PFTM_UNLINKED) { op = PF_PPTP_INSERT_GRE; + } break; case PF_PPTP_CTRL_TYPE_CALL_CLR: - if (cm.msg.call_clr.call_id != *pns_call_id) + if (cm.msg.call_clr.call_id != *pns_call_id) { op = PF_PPTP_REMOVE_GRE; + } break; case PF_PPTP_CTRL_TYPE_CALL_DISC: - if (cm.msg.call_clr.call_id != *pac_call_id) + if (cm.msg.call_clr.call_id != *pac_call_id) { op = PF_PPTP_REMOVE_GRE; + } break; case PF_PPTP_CTRL_TYPE_ERROR: - if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) + if (s->nat_rule.ptr && pns_call_id == &gsk->lan.xport.call_id) { spoof_call_id = &cm.msg.error.peer_call_id; + } break; case PF_PPTP_CTRL_TYPE_SET_LINKINFO: - if (s->nat_rule.ptr && pac_call_id == &gsk->lan.xport.call_id) + if (s->nat_rule.ptr && pac_call_id == &gsk->lan.xport.call_id) { spoof_call_id = &cm.msg.set_linkinfo.peer_call_id; + } break; default: @@ -6403,7 +6745,9 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, while (pf_find_state_all(&key, PF_IN, 0)) { call_id = ntohs(call_id); --call_id; - if (--call_id == 0) call_id = 0xffff; + if (--call_id == 0) { + call_id = 0xffff; + } call_id = htons(call_id); key.gwy.xport.call_id = call_id; @@ -6468,7 +6812,6 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, } pf_set_rt_ifp(gs, &sk->lan.addr, sk->af_lan); if (pf_insert_state(BOUND_IFACE(s->rule.ptr, kif), gs)) { - /* * * FIX ME: insertion can fail when multiple PNS @@ -6481,7 +6824,7 @@ pf_pptp_handler(struct pf_state *s, int direction, int off, * that fixing this is a low priority. */ pptps->grev1_state = NULL; - pd->lmw = -1; /* Force PF_DROP on PFRES_MEMORY */ + pd->lmw = -1; /* Force PF_DROP on PFRES_MEMORY */ pf_src_tree_remove_state(gs); STATE_DEC_COUNTERS(gs); pool_put(&pf_state_pl, gs); @@ -6504,8 +6847,9 @@ pf_pptp_unlink(struct pf_state *s) if (grev1s) { struct pf_app_state *gas = grev1s->state_key->app_state; - if (grev1s->timeout < PFTM_MAX) + if (grev1s->timeout < PFTM_MAX) { grev1s->timeout = PFTM_PURGE; + } gas->u.grev1.pptp_state = NULL; as->u.pptp.grev1_state = NULL; } @@ -6529,12 +6873,12 @@ static int pf_ike_compare(struct pf_app_state *a, struct pf_app_state *b) { int64_t d = a->u.ike.cookie - b->u.ike.cookie; - return ((d > 0) ? 1 : ((d < 0) ? -1 : 0)); + return (d > 0) ? 1 : ((d < 0) ? -1 : 0); } static int pf_do_nat64(struct pf_state_key *sk, struct pf_pdesc *pd, pbuf_t *pbuf, - int off) + int off) { if (pd->af == AF_INET) { if (pd->af != sk->af_lan) { @@ -6544,19 +6888,18 @@ pf_do_nat64(struct pf_state_key *sk, struct pf_pdesc *pd, pbuf_t *pbuf, pd->naddr = sk->gwy.addr; pd->ndaddr = sk->ext_gwy.addr; } - return (pf_nat64_ipv4(pbuf, off, pd)); - } - else if (pd->af == AF_INET6) { + return pf_nat64_ipv4(pbuf, off, pd); + } else if (pd->af == AF_INET6) { if (pd->af != sk->af_lan) { pd->ndaddr = sk->lan.addr; pd->naddr = sk->ext_lan.addr; - } else { - pd->naddr = sk->gwy.addr; - pd->ndaddr = sk->ext_gwy.addr; - } - return (pf_nat64_ipv6(pbuf, off, pd)); + } else { + pd->naddr = sk->gwy.addr; + pd->ndaddr = sk->ext_gwy.addr; + } + return pf_nat64_ipv6(pbuf, off, pd); } - return (PF_DROP); + return PF_DROP; } static int @@ -6565,15 +6908,15 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, u_short *reason) { #pragma unused(h) - struct pf_state_key_cmp key; - struct tcphdr *th = pd->hdr.tcp; - u_int16_t win = ntohs(th->th_win); - u_int32_t ack, end, seq, orig_seq; - u_int8_t sws, dws; - int ackskew; - int copyback = 0; - struct pf_state_peer *src, *dst; - struct pf_state_key *sk; + struct pf_state_key_cmp key; + struct tcphdr *th = pd->hdr.tcp; + u_int16_t win = ntohs(th->th_win); + u_int32_t ack, end, seq, orig_seq; + u_int8_t sws, dws; + int ackskew; + int copyback = 0; + struct pf_state_peer *src, *dst; + struct pf_state_key *sk; key.app_state = 0; key.proto = IPPROTO_TCP; @@ -6621,31 +6964,32 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, if (src->state == PF_TCPS_PROXY_SRC) { if (direction != sk->direction) { REASON_SET(reason, PFRES_SYNPROXY); - return (PF_SYNPROXY_DROP); + return PF_SYNPROXY_DROP; } if (th->th_flags & TH_SYN) { if (ntohl(th->th_seq) != src->seqlo) { REASON_SET(reason, PFRES_SYNPROXY); - return (PF_DROP); + return PF_DROP; } pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, src->seqhi, ntohl(th->th_seq) + 1, - TH_SYN|TH_ACK, 0, src->mss, 0, 1, + TH_SYN | TH_ACK, 0, src->mss, 0, 1, 0, NULL, NULL); REASON_SET(reason, PFRES_SYNPROXY); - return (PF_SYNPROXY_DROP); + return PF_SYNPROXY_DROP; } else if (!(th->th_flags & TH_ACK) || (ntohl(th->th_ack) != src->seqhi + 1) || (ntohl(th->th_seq) != src->seqlo + 1)) { REASON_SET(reason, PFRES_SYNPROXY); - return (PF_DROP); + return PF_DROP; } else if ((*state)->src_node != NULL && pf_src_connlimit(state)) { REASON_SET(reason, PFRES_SRCLIMIT); - return (PF_DROP); - } else + return PF_DROP; + } else { src->state = PF_TCPS_PROXY_DST; + } } if (src->state == PF_TCPS_PROXY_DST) { struct pf_state_host *psrc, *pdst; @@ -6658,26 +7002,27 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, pdst = &sk->lan; } if (direction == sk->direction) { - if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) || + if (((th->th_flags & (TH_SYN | TH_ACK)) != TH_ACK) || (ntohl(th->th_ack) != src->seqhi + 1) || (ntohl(th->th_seq) != src->seqlo + 1)) { REASON_SET(reason, PFRES_SYNPROXY); - return (PF_DROP); + return PF_DROP; } src->max_win = MAX(ntohs(th->th_win), 1); - if (dst->seqhi == 1) + if (dst->seqhi == 1) { dst->seqhi = htonl(random()); + } pf_send_tcp((*state)->rule.ptr, pd->af, &psrc->addr, &pdst->addr, psrc->xport.port, pdst->xport.port, dst->seqhi, 0, TH_SYN, 0, src->mss, 0, 0, (*state)->tag, NULL, NULL); REASON_SET(reason, PFRES_SYNPROXY); - return (PF_SYNPROXY_DROP); - } else if (((th->th_flags & (TH_SYN|TH_ACK)) != - (TH_SYN|TH_ACK)) || + return PF_SYNPROXY_DROP; + } else if (((th->th_flags & (TH_SYN | TH_ACK)) != + (TH_SYN | TH_ACK)) || (ntohl(th->th_ack) != dst->seqhi + 1)) { REASON_SET(reason, PFRES_SYNPROXY); - return (PF_DROP); + return PF_DROP; } else { dst->max_win = MAX(ntohs(th->th_win), 1); dst->seqlo = ntohl(th->th_seq); @@ -6703,11 +7048,11 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, src->state = dst->state = TCPS_ESTABLISHED; REASON_SET(reason, PFRES_SYNPROXY); - return (PF_SYNPROXY_DROP); + return PF_SYNPROXY_DROP; } } - if (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) && + if (((th->th_flags & (TH_SYN | TH_ACK)) == TH_SYN) && dst->state >= TCPS_FIN_WAIT_2 && src->state >= TCPS_FIN_WAIT_2) { if (pf_status.debug >= PF_DEBUG_MISC) { @@ -6720,7 +7065,7 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, src->state = dst->state = TCPS_CLOSED; pf_unlink_state(*state); *state = NULL; - return (PF_DROP); + return PF_DROP; } if ((th->th_flags & TH_SYN) == 0) { @@ -6728,9 +7073,9 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, (src->wscale & PF_WSCALE_MASK) : TCP_MAX_WINSHIFT; dws = (dst->wscale & PF_WSCALE_FLAG) ? (dst->wscale & PF_WSCALE_MASK) : TCP_MAX_WINSHIFT; - } - else + } else { sws = dws = 0; + } /* * Sequence tracking algorithm from Guido van Rooij's paper: @@ -6746,20 +7091,21 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, src->scrub == NULL) { if (pf_normalize_tcp_init(pbuf, off, pd, th, src, dst)) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } } /* Deferred generation of sequence number modulator */ if (dst->seqdiff && !src->seqdiff) { /* use random iss for the TCP server */ - while ((src->seqdiff = random() - seq) == 0) + while ((src->seqdiff = random() - seq) == 0) { ; + } ack = ntohl(th->th_ack) - dst->seqdiff; pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + src->seqdiff), 0); pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); - copyback = off + sizeof (*th); + copyback = off + sizeof(*th); } else { ack = ntohl(th->th_ack); } @@ -6799,12 +7145,14 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, } } } - if (th->th_flags & TH_FIN) + if (th->th_flags & TH_FIN) { end++; + } src->seqlo = seq; - if (src->state < TCPS_SYN_SENT) + if (src->state < TCPS_SYN_SENT) { src->state = TCPS_SYN_SENT; + } /* * May need to slide the window (seqhi may have been set by @@ -6813,11 +7161,12 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, */ if (src->seqhi == 1 || SEQ_GEQ(end + MAX(1, (u_int32_t)dst->max_win << dws), - src->seqhi)) + src->seqhi)) { src->seqhi = end + MAX(1, (u_int32_t)dst->max_win << dws); - if (win > src->max_win) + } + if (win > src->max_win) { src->max_win = win; - + } } else { ack = ntohl(th->th_ack) - dst->seqdiff; if (src->seqdiff) { @@ -6825,20 +7174,22 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, pf_change_a(&th->th_seq, &th->th_sum, htonl(seq + src->seqdiff), 0); pf_change_a(&th->th_ack, &th->th_sum, htonl(ack), 0); - copyback = off+ sizeof (*th); + copyback = off + sizeof(*th); } end = seq + pd->p_len; - if (th->th_flags & TH_SYN) + if (th->th_flags & TH_SYN) { end++; - if (th->th_flags & TH_FIN) + } + if (th->th_flags & TH_FIN) { end++; + } } if ((th->th_flags & TH_ACK) == 0) { /* Let it pass through the ack skew check */ ack = dst->seqlo; } else if ((ack == 0 && - (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) || + (th->th_flags & (TH_ACK | TH_RST)) == (TH_ACK | TH_RST)) || /* broken tcp stacks do not set ack */ (dst->state < TCPS_SYN_SENT)) { /* @@ -6869,18 +7220,18 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, * spoof into a TCP connection won't bother blindly sending SACK * options anyway. */ - if (dst->seqdiff && (th->th_off << 2) > (int)sizeof (struct tcphdr)) { + if (dst->seqdiff && (th->th_off << 2) > (int)sizeof(struct tcphdr)) { copyback = pf_modulate_sack(pbuf, off, pd, th, dst); if (copyback == -1) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } - pbuf = pd->mp; // XXXSCW: Why? + pbuf = pd->mp; // XXXSCW: Why? } -#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ +#define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */ if (SEQ_GEQ(src->seqhi, end) && /* Last octet inside other's window space */ SEQ_GEQ(seq, src->seqlo - ((u_int32_t)dst->max_win << dws)) && @@ -6892,33 +7243,41 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo || (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) || (pd->flags & PFDESC_IP_REAS) == 0)) { - /* Require an exact/+1 sequence match on resets when possible */ + /* Require an exact/+1 sequence match on resets when possible */ if (dst->scrub || src->scrub) { if (pf_normalize_tcp_stateful(pbuf, off, pd, reason, th, - *state, src, dst, ©back)) - return (PF_DROP); + *state, src, dst, ©back)) { + return PF_DROP; + } - pbuf = pd->mp; // XXXSCW: Why? + pbuf = pd->mp; // XXXSCW: Why? } /* update max window */ - if (src->max_win < win) + if (src->max_win < win) { src->max_win = win; + } /* synchronize sequencing */ - if (SEQ_GT(end, src->seqlo)) + if (SEQ_GT(end, src->seqlo)) { src->seqlo = end; + } /* slide the window of what the other end can send */ - if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi)) + if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi)) { dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1); + } /* update states */ - if (th->th_flags & TH_SYN) - if (src->state < TCPS_SYN_SENT) + if (th->th_flags & TH_SYN) { + if (src->state < TCPS_SYN_SENT) { src->state = TCPS_SYN_SENT; - if (th->th_flags & TH_FIN) - if (src->state < TCPS_CLOSING) + } + } + if (th->th_flags & TH_FIN) { + if (src->state < TCPS_CLOSING) { src->state = TCPS_CLOSING; + } + } if (th->th_flags & TH_ACK) { if (dst->state == TCPS_SYN_SENT) { dst->state = TCPS_ESTABLISHED; @@ -6926,39 +7285,41 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, (*state)->src_node != NULL && pf_src_connlimit(state)) { REASON_SET(reason, PFRES_SRCLIMIT); - return (PF_DROP); + return PF_DROP; } - } else if (dst->state == TCPS_CLOSING) + } else if (dst->state == TCPS_CLOSING) { dst->state = TCPS_FIN_WAIT_2; + } } - if (th->th_flags & TH_RST) + if (th->th_flags & TH_RST) { src->state = dst->state = TCPS_TIME_WAIT; + } /* update expire time */ (*state)->expire = pf_time_second(); if (src->state >= TCPS_FIN_WAIT_2 && - dst->state >= TCPS_FIN_WAIT_2) + dst->state >= TCPS_FIN_WAIT_2) { (*state)->timeout = PFTM_TCP_CLOSED; - else if (src->state >= TCPS_CLOSING && - dst->state >= TCPS_CLOSING) + } else if (src->state >= TCPS_CLOSING && + dst->state >= TCPS_CLOSING) { (*state)->timeout = PFTM_TCP_FIN_WAIT; - else if (src->state < TCPS_ESTABLISHED || - dst->state < TCPS_ESTABLISHED) + } else if (src->state < TCPS_ESTABLISHED || + dst->state < TCPS_ESTABLISHED) { (*state)->timeout = PFTM_TCP_OPENING; - else if (src->state >= TCPS_CLOSING || - dst->state >= TCPS_CLOSING) + } else if (src->state >= TCPS_CLOSING || + dst->state >= TCPS_CLOSING) { (*state)->timeout = PFTM_TCP_CLOSING; - else + } else { (*state)->timeout = PFTM_TCP_ESTABLISHED; + } /* Fall through to PASS packet */ - } else if ((dst->state < TCPS_SYN_SENT || dst->state >= TCPS_FIN_WAIT_2 || src->state >= TCPS_FIN_WAIT_2) && SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) && /* Within a window forward of the originating packet */ SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) { - /* Within a window backward of the originating packet */ + /* Within a window backward of the originating packet */ /* * This currently handles three situations: @@ -6996,45 +7357,52 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, if (dst->scrub || src->scrub) { if (pf_normalize_tcp_stateful(pbuf, off, pd, reason, th, - *state, src, dst, ©back)) - return (PF_DROP); - pbuf = pd->mp; // XXXSCW: Why? + *state, src, dst, ©back)) { + return PF_DROP; + } + pbuf = pd->mp; // XXXSCW: Why? } /* update max window */ - if (src->max_win < win) + if (src->max_win < win) { src->max_win = win; + } /* synchronize sequencing */ - if (SEQ_GT(end, src->seqlo)) + if (SEQ_GT(end, src->seqlo)) { src->seqlo = end; + } /* slide the window of what the other end can send */ - if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi)) + if (SEQ_GEQ(ack + ((u_int32_t)win << sws), dst->seqhi)) { dst->seqhi = ack + MAX(((u_int32_t)win << sws), 1); + } /* * Cannot set dst->seqhi here since this could be a shotgunned * SYN and not an already established connection. */ - if (th->th_flags & TH_FIN) - if (src->state < TCPS_CLOSING) + if (th->th_flags & TH_FIN) { + if (src->state < TCPS_CLOSING) { src->state = TCPS_CLOSING; - if (th->th_flags & TH_RST) + } + } + if (th->th_flags & TH_RST) { src->state = dst->state = TCPS_TIME_WAIT; + } /* Fall through to PASS packet */ - } else { if (dst->state == TCPS_SYN_SENT && src->state == TCPS_SYN_SENT) { /* Send RST for state mismatches during handshake */ - if (!(th->th_flags & TH_RST)) + if (!(th->th_flags & TH_RST)) { pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst, pd->src, th->th_dport, th->th_sport, ntohl(th->th_ack), 0, TH_RST, 0, 0, (*state)->rule.ptr->return_ttl, 1, 0, pd->eh, kif->pfik_ifp); + } src->seqlo = 0; src->seqhi = 1; src->max_win = 1; @@ -7061,7 +7429,7 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6'); } REASON_SET(reason, PFRES_BADSTATE); - return (PF_DROP); + return PF_DROP; } /* Any packets which have gotten here are to be passed */ @@ -7072,9 +7440,9 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, off + (th->th_off << 2), pd, kif); if (pd->lmw < 0) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } - pbuf = pd->mp; // XXXSCW: Why? + pbuf = pd->mp; // XXXSCW: Why? } /* translate source/destination address, if necessary */ @@ -7083,61 +7451,61 @@ pf_test_state_tcp(struct pf_state **state, int direction, struct pfi_kif *kif, if (direction == PF_OUT) { pf_change_ap(direction, pd->mp, pd->src, &th->th_sport, - pd->ip_sum, &th->th_sum, &sk->gwy.addr, - sk->gwy.xport.port, 0, pd->af, pd->naf, 1); + pd->ip_sum, &th->th_sum, &sk->gwy.addr, + sk->gwy.xport.port, 0, pd->af, pd->naf, 1); } else { if (pd->af != pd->naf) { if (pd->af == sk->af_gwy) { pf_change_ap(direction, pd->mp, pd->dst, - &th->th_dport, pd->ip_sum, - &th->th_sum, &sk->lan.addr, - sk->lan.xport.port, 0, - pd->af, pd->naf, 0); + &th->th_dport, pd->ip_sum, + &th->th_sum, &sk->lan.addr, + sk->lan.xport.port, 0, + pd->af, pd->naf, 0); pf_change_ap(direction, pd->mp, pd->src, - &th->th_sport, pd->ip_sum, - &th->th_sum, &sk->ext_lan.addr, - th->th_sport, 0, pd->af, - pd->naf, 0); - + &th->th_sport, pd->ip_sum, + &th->th_sum, &sk->ext_lan.addr, + th->th_sport, 0, pd->af, + pd->naf, 0); } else { pf_change_ap(direction, pd->mp, pd->dst, - &th->th_dport, pd->ip_sum, - &th->th_sum, &sk->ext_gwy.addr, - th->th_dport, 0, pd->af, - pd->naf, 0); + &th->th_dport, pd->ip_sum, + &th->th_sum, &sk->ext_gwy.addr, + th->th_dport, 0, pd->af, + pd->naf, 0); pf_change_ap(direction, pd->mp, pd->src, - &th->th_sport, pd->ip_sum, - &th->th_sum, &sk->gwy.addr, - sk->gwy.xport.port, 0, pd->af, - pd->naf, 0); + &th->th_sport, pd->ip_sum, + &th->th_sum, &sk->gwy.addr, + sk->gwy.xport.port, 0, pd->af, + pd->naf, 0); } } else { pf_change_ap(direction, pd->mp, pd->dst, - &th->th_dport, pd->ip_sum, - &th->th_sum, &sk->lan.addr, - sk->lan.xport.port, 0, pd->af, - pd->naf, 1); + &th->th_dport, pd->ip_sum, + &th->th_sum, &sk->lan.addr, + sk->lan.xport.port, 0, pd->af, + pd->naf, 1); } } - copyback = off + sizeof (*th); + copyback = off + sizeof(*th); } if (copyback) { if (pf_lazy_makewritable(pd, pbuf, copyback) == NULL) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } /* Copyback sequence modulation or stateful scrub changes */ - pbuf_copy_back(pbuf, off, sizeof (*th), th); + pbuf_copy_back(pbuf, off, sizeof(*th), th); - if (sk->af_lan != sk->af_gwy) - return (pf_do_nat64(sk, pd, pbuf, off)); + if (sk->af_lan != sk->af_gwy) { + return pf_do_nat64(sk, pd, pbuf, off); + } } - return (PF_PASS); + return PF_PASS; } static int @@ -7145,10 +7513,10 @@ pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, pbuf_t *pbuf, int off, void *h, struct pf_pdesc *pd, u_short *reason) { #pragma unused(h) - struct pf_state_peer *src, *dst; - struct pf_state_key_cmp key; - struct pf_state_key *sk; - struct udphdr *uh = pd->hdr.udp; + struct pf_state_peer *src, *dst; + struct pf_state_key_cmp key; + struct pf_state_key *sk; + struct udphdr *uh = pd->hdr.udp; struct pf_app_state as; int action, extfilter; key.app_state = 0; @@ -7183,16 +7551,17 @@ pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, if (ntohs(uh->uh_sport) == PF_IKE_PORT && ntohs(uh->uh_dport) == PF_IKE_PORT) { struct pf_ike_hdr ike; - size_t plen = pbuf->pb_packet_len - off - sizeof (*uh); + size_t plen = pbuf->pb_packet_len - off - sizeof(*uh); if (plen < PF_IKE_PACKET_MINSIZE) { DPFPRINTF(PF_DEBUG_MISC, ("pf: IKE message too small.\n")); - return (PF_DROP); + return PF_DROP; } - if (plen > sizeof (ike)) - plen = sizeof (ike); - pbuf_copy_data(pbuf, off + sizeof (*uh), plen, &ike); + if (plen > sizeof(ike)) { + plen = sizeof(ike); + } + pbuf_copy_data(pbuf, off + sizeof(*uh), plen, &ike); if (ike.initiator_cookie) { key.app_state = &as; @@ -7234,8 +7603,9 @@ pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, } } - if (pf_state_lookup_aux(state, kif, direction, &action)) - return (action); + if (pf_state_lookup_aux(state, kif, direction, &action)) { + return action; + } sk = (*state)->state_key; @@ -7253,99 +7623,103 @@ pf_test_state_udp(struct pf_state **state, int direction, struct pfi_kif *kif, } /* update states */ - if (src->state < PFUDPS_SINGLE) + if (src->state < PFUDPS_SINGLE) { src->state = PFUDPS_SINGLE; - if (dst->state == PFUDPS_SINGLE) + } + if (dst->state == PFUDPS_SINGLE) { dst->state = PFUDPS_MULTIPLE; + } /* update expire time */ (*state)->expire = pf_time_second(); - if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) + if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE) { (*state)->timeout = PFTM_UDP_MULTIPLE; - else + } else { (*state)->timeout = PFTM_UDP_SINGLE; + } extfilter = sk->proto_variant; if (extfilter > PF_EXTFILTER_APD) { if (direction == PF_OUT) { sk->ext_lan.xport.port = key.ext_lan.xport.port; - if (extfilter > PF_EXTFILTER_AD) + if (extfilter > PF_EXTFILTER_AD) { PF_ACPY(&sk->ext_lan.addr, &key.ext_lan.addr, - key.af_lan); + key.af_lan); + } } else { sk->ext_gwy.xport.port = key.ext_gwy.xport.port; - if (extfilter > PF_EXTFILTER_AD) + if (extfilter > PF_EXTFILTER_AD) { PF_ACPY(&sk->ext_gwy.addr, &key.ext_gwy.addr, - key.af_gwy); + key.af_gwy); + } } } if (sk->app_state && sk->app_state->handler) { sk->app_state->handler(*state, direction, off + uh->uh_ulen, - pd, kif); + pd, kif); if (pd->lmw < 0) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } - pbuf = pd->mp; // XXXSCW: Why? + pbuf = pd->mp; // XXXSCW: Why? } /* translate source/destination address, if necessary */ if (STATE_TRANSLATE(sk)) { - if (pf_lazy_makewritable(pd, pbuf, off + sizeof (*uh)) == NULL) { + if (pf_lazy_makewritable(pd, pbuf, off + sizeof(*uh)) == NULL) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } pd->naf = (pd->af == sk->af_lan) ? sk->af_gwy : sk->af_lan; if (direction == PF_OUT) { pf_change_ap(direction, pd->mp, pd->src, &uh->uh_sport, - pd->ip_sum, &uh->uh_sum, &sk->gwy.addr, - sk->gwy.xport.port, 1, pd->af, pd->naf, 1); + pd->ip_sum, &uh->uh_sum, &sk->gwy.addr, + sk->gwy.xport.port, 1, pd->af, pd->naf, 1); } else { if (pd->af != pd->naf) { - if (pd->af == sk->af_gwy) { pf_change_ap(direction, pd->mp, pd->dst, - &uh->uh_dport, pd->ip_sum, - &uh->uh_sum, &sk->lan.addr, - sk->lan.xport.port, 1, - pd->af, pd->naf, 0); + &uh->uh_dport, pd->ip_sum, + &uh->uh_sum, &sk->lan.addr, + sk->lan.xport.port, 1, + pd->af, pd->naf, 0); pf_change_ap(direction, pd->mp, pd->src, - &uh->uh_sport, pd->ip_sum, - &uh->uh_sum, &sk->ext_lan.addr, - uh->uh_sport, 1, pd->af, - pd->naf, 0); - + &uh->uh_sport, pd->ip_sum, + &uh->uh_sum, &sk->ext_lan.addr, + uh->uh_sport, 1, pd->af, + pd->naf, 0); } else { pf_change_ap(direction, pd->mp, pd->dst, - &uh->uh_dport, pd->ip_sum, - &uh->uh_sum, &sk->ext_gwy.addr, - uh->uh_dport, 1, pd->af, - pd->naf, 0); + &uh->uh_dport, pd->ip_sum, + &uh->uh_sum, &sk->ext_gwy.addr, + uh->uh_dport, 1, pd->af, + pd->naf, 0); pf_change_ap(direction, pd->mp, pd->src, - &uh->uh_sport, pd->ip_sum, - &uh->uh_sum, &sk->gwy.addr, - sk->gwy.xport.port, 1, pd->af, - pd->naf, 0); + &uh->uh_sport, pd->ip_sum, + &uh->uh_sum, &sk->gwy.addr, + sk->gwy.xport.port, 1, pd->af, + pd->naf, 0); } } else { pf_change_ap(direction, pd->mp, pd->dst, - &uh->uh_dport, pd->ip_sum, - &uh->uh_sum, &sk->lan.addr, - sk->lan.xport.port, 1, - pd->af, pd->naf, 1); + &uh->uh_dport, pd->ip_sum, + &uh->uh_sum, &sk->lan.addr, + sk->lan.xport.port, 1, + pd->af, pd->naf, 1); } } - pbuf_copy_back(pbuf, off, sizeof (*uh), uh); - if (sk->af_lan != sk->af_gwy) - return (pf_do_nat64(sk, pd, pbuf, off)); + pbuf_copy_back(pbuf, off, sizeof(*uh), uh); + if (sk->af_lan != sk->af_gwy) { + return pf_do_nat64(sk, pd, pbuf, off); + } } - return (PF_PASS); + return PF_PASS; } static int @@ -7353,13 +7727,13 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pbuf_t *pbuf, int off, void *h, struct pf_pdesc *pd, u_short *reason) { #pragma unused(h) - struct pf_addr *saddr = pd->src, *daddr = pd->dst; - struct in_addr srcv4_inaddr = saddr->v4addr; - u_int16_t icmpid = 0, *icmpsum = NULL; - u_int8_t icmptype = 0; - int state_icmp = 0; + struct pf_addr *saddr = pd->src, *daddr = pd->dst; + struct in_addr srcv4_inaddr = saddr->v4addr; + u_int16_t icmpid = 0, *icmpsum = NULL; + u_int8_t icmptype = 0; + int state_icmp = 0; struct pf_state_key_cmp key; - struct pf_state_key *sk; + struct pf_state_key *sk; struct pf_app_state as; key.app_state = 0; @@ -7373,8 +7747,9 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, icmpid = pd->hdr.icmp->icmp_id; icmpsum = &pd->hdr.icmp->icmp_cksum; - if (ICMP_ERRORTYPE(icmptype)) + if (ICMP_ERRORTYPE(icmptype)) { state_icmp++; + } break; #endif /* INET */ #if INET6 @@ -7383,14 +7758,14 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, icmpid = pd->hdr.icmp6->icmp6_id; icmpsum = &pd->hdr.icmp6->icmp6_cksum; - if (ICMP6_ERRORTYPE(icmptype)) + if (ICMP6_ERRORTYPE(icmptype)) { state_icmp++; + } break; #endif /* INET6 */ } if (!state_icmp) { - /* * ICMP query/reply message not related to a TCP/UDP packet. * Search for an ICMP state. @@ -7424,7 +7799,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, /* translate source/destination address, if necessary */ if (STATE_TRANSLATE(sk)) { pd->naf = (pd->af == sk->af_lan) ? - sk->af_gwy : sk->af_lan; + sk->af_gwy : sk->af_lan; if (direction == PF_OUT) { switch (pd->af) { #if INET @@ -7434,13 +7809,14 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, sk->gwy.addr.v4addr.s_addr, 0); pd->hdr.icmp->icmp_cksum = pf_cksum_fixup( - pd->hdr.icmp->icmp_cksum, icmpid, - sk->gwy.xport.port, 0); + pd->hdr.icmp->icmp_cksum, icmpid, + sk->gwy.xport.port, 0); pd->hdr.icmp->icmp_id = - sk->gwy.xport.port; + sk->gwy.xport.port; if (pf_lazy_makewritable(pd, pbuf, - off + ICMP_MINLEN) == NULL) - return (PF_DROP); + off + ICMP_MINLEN) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, ICMP_MINLEN, pd->hdr.icmp); break; @@ -7451,11 +7827,12 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, &pd->hdr.icmp6->icmp6_cksum, &sk->gwy.addr, 0); if (pf_lazy_makewritable(pd, NULL, - off + sizeof (struct icmp6_hdr)) == - NULL) - return (PF_DROP); + off + sizeof(struct icmp6_hdr)) == + NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, - sizeof (struct icmp6_hdr), + sizeof(struct icmp6_hdr), pd->hdr.icmp6); break; #endif /* INET6 */ @@ -7466,84 +7843,87 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, case AF_INET: if (pd->naf != AF_INET) { if (pf_translate_icmp_af( - AF_INET6, pd->hdr.icmp)) - return (PF_DROP); + AF_INET6, pd->hdr.icmp)) { + return PF_DROP; + } pd->proto = IPPROTO_ICMPV6; - } else { - pf_change_a(&daddr->v4addr.s_addr, - pd->ip_sum, - sk->lan.addr.v4addr.s_addr, 0); + pd->ip_sum, + sk->lan.addr.v4addr.s_addr, 0); pd->hdr.icmp->icmp_cksum = - pf_cksum_fixup( - pd->hdr.icmp->icmp_cksum, - icmpid, sk->lan.xport.port, 0); + pf_cksum_fixup( + pd->hdr.icmp->icmp_cksum, + icmpid, sk->lan.xport.port, 0); pd->hdr.icmp->icmp_id = - sk->lan.xport.port; + sk->lan.xport.port; } if (pf_lazy_makewritable(pd, pbuf, - off + ICMP_MINLEN) == NULL) - return (PF_DROP); + off + ICMP_MINLEN) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, ICMP_MINLEN, - pd->hdr.icmp); - if (sk->af_lan != sk->af_gwy) - return (pf_do_nat64(sk, pd, - pbuf, off)); + pd->hdr.icmp); + if (sk->af_lan != sk->af_gwy) { + return pf_do_nat64(sk, pd, + pbuf, off); + } break; #endif /* INET */ #if INET6 case AF_INET6: if (pd->naf != AF_INET6) { if (pf_translate_icmp_af( - AF_INET, pd->hdr.icmp6)) - return (PF_DROP); + AF_INET, pd->hdr.icmp6)) { + return PF_DROP; + } pd->proto = IPPROTO_ICMP; } else { pf_change_a6(daddr, - &pd->hdr.icmp6->icmp6_cksum, - &sk->lan.addr, 0); + &pd->hdr.icmp6->icmp6_cksum, + &sk->lan.addr, 0); } if (pf_lazy_makewritable(pd, pbuf, - off + sizeof (struct icmp6_hdr)) == - NULL) - return (PF_DROP); + off + sizeof(struct icmp6_hdr)) == + NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, - sizeof (struct icmp6_hdr), - pd->hdr.icmp6); - if (sk->af_lan != sk->af_gwy) - return (pf_do_nat64(sk, pd, - pbuf, off)); + sizeof(struct icmp6_hdr), + pd->hdr.icmp6); + if (sk->af_lan != sk->af_gwy) { + return pf_do_nat64(sk, pd, + pbuf, off); + } break; #endif /* INET6 */ } } } - return (PF_PASS); - + return PF_PASS; } else { /* * ICMP error message in response to a TCP/UDP packet. * Extract the inner TCP/UDP header and search for that state. */ - struct pf_pdesc pd2; /* For inner (original) header */ + struct pf_pdesc pd2; /* For inner (original) header */ #if INET - struct ip h2; + struct ip h2; #endif /* INET */ #if INET6 - struct ip6_hdr h2_6; - int terminal = 0; + struct ip6_hdr h2_6; + int terminal = 0; #endif /* INET6 */ - int ipoff2 = 0; - int off2 = 0; + int ipoff2 = 0; + int off2 = 0; - memset(&pd2, 0, sizeof (pd2)); + memset(&pd2, 0, sizeof(pd2)); pd2.af = pd->af; switch (pd->af) { @@ -7552,12 +7932,12 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, /* offset of h2 in mbuf chain */ ipoff2 = off + ICMP_MINLEN; - if (!pf_pull_hdr(pbuf, ipoff2, &h2, sizeof (h2), + if (!pf_pull_hdr(pbuf, ipoff2, &h2, sizeof(h2), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(ip)\n")); - return (PF_DROP); + return PF_DROP; } /* * ICMP error messages don't refer to non-first @@ -7565,7 +7945,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, */ if (h2.ip_off & htons(IP_OFFMASK)) { REASON_SET(reason, PFRES_FRAG); - return (PF_DROP); + return PF_DROP; } /* offset of protocol header that follows h2 */ @@ -7581,20 +7961,20 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, #endif /* INET */ #if INET6 case AF_INET6: - ipoff2 = off + sizeof (struct icmp6_hdr); + ipoff2 = off + sizeof(struct icmp6_hdr); - if (!pf_pull_hdr(pbuf, ipoff2, &h2_6, sizeof (h2_6), + if (!pf_pull_hdr(pbuf, ipoff2, &h2_6, sizeof(h2_6), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(ip6)\n")); - return (PF_DROP); + return PF_DROP; } pd2.proto = h2_6.ip6_nxt; pd2.src = (struct pf_addr *)(uintptr_t)&h2_6.ip6_src; pd2.dst = (struct pf_addr *)(uintptr_t)&h2_6.ip6_dst; pd2.ip_sum = NULL; - off2 = ipoff2 + sizeof (h2_6); + off2 = ipoff2 + sizeof(h2_6); do { switch (pd2.proto) { case IPPROTO_FRAGMENT: @@ -7603,7 +7983,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, * non-first fragments */ REASON_SET(reason, PFRES_FRAG); - return (PF_DROP); + return PF_DROP; case IPPROTO_AH: case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: @@ -7612,16 +7992,17 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, struct ip6_ext opt6; if (!pf_pull_hdr(pbuf, off2, &opt6, - sizeof (opt6), NULL, reason, + sizeof(opt6), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMPv6 short opt\n")); - return (PF_DROP); + return PF_DROP; } - if (pd2.proto == IPPROTO_AH) + if (pd2.proto == IPPROTO_AH) { off2 += (opt6.ip6e_len + 2) * 4; - else + } else { off2 += (opt6.ip6e_len + 1) * 8; + } pd2.proto = opt6.ip6e_nxt; /* goto the next header */ break; @@ -7639,11 +8020,11 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, switch (pd2.proto) { case IPPROTO_TCP: { - struct tcphdr th; - u_int32_t seq; - struct pf_state_peer *src, *dst; - u_int8_t dws; - int copyback = 0; + struct tcphdr th; + u_int32_t seq; + struct pf_state_peer *src, *dst; + u_int8_t dws; + int copyback = 0; /* * Only the first 8 bytes of the TCP header can be @@ -7655,7 +8036,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(tcp)\n")); - return (PF_DROP); + return PF_DROP; } key.proto = IPPROTO_TCP; @@ -7676,7 +8057,7 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, sk = (*state)->state_key; if ((direction == sk->direction) && ((sk->af_lan == sk->af_gwy) || - (pd2.af == sk->af_lan))) { + (pd2.af == sk->af_lan))) { src = &(*state)->dst; dst = &(*state)->src; } else { @@ -7684,10 +8065,11 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, dst = &(*state)->dst; } - if (src->wscale && (dst->wscale & PF_WSCALE_FLAG)) + if (src->wscale && (dst->wscale & PF_WSCALE_FLAG)) { dws = dst->wscale & PF_WSCALE_MASK; - else + } else { dws = TCP_MAX_WINSHIFT; + } /* Demodulate sequence number */ seq = ntohl(th.th_seq) - src->seqdiff; @@ -7711,11 +8093,11 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, printf(" seq=%u\n", seq); } REASON_SET(reason, PFRES_BADSTATE); - return (PF_DROP); + return PF_DROP; } pd->naf = pd2.naf = (pd2.af == sk->af_lan) ? - sk->af_gwy : sk->af_lan; + sk->af_gwy : sk->af_lan; if (STATE_TRANSLATE(sk)) { /* NAT64 case */ @@ -7732,16 +8114,18 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, /* translate ICMP message types and codes */ if (pf_translate_icmp_af(pd->naf, - pd->hdr.icmp)) - return (PF_DROP); + pd->hdr.icmp)) { + return PF_DROP; + } if (pf_lazy_makewritable(pd, pbuf, - off2 + 8) == NULL) - return (PF_DROP); + off2 + 8) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, pd->off, - sizeof(struct icmp6_hdr), - pd->hdr.icmp6); + sizeof(struct icmp6_hdr), + pd->hdr.icmp6); /* * translate inner ip header within the @@ -7749,46 +8133,48 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, */ if (pf_change_icmp_af(pbuf, ipoff2, pd, &pd2, &saddr2->addr, &daddr2->addr, - pd->af, pd->naf)) - return (PF_DROP); + pd->af, pd->naf)) { + return PF_DROP; + } - if (pd->naf == AF_INET) + if (pd->naf == AF_INET) { pd->proto = IPPROTO_ICMP; - else + } else { pd->proto = IPPROTO_ICMPV6; + } /* * translate inner tcp header within * the ICMP message */ pf_change_ap(direction, NULL, pd2.src, - &th.th_sport, pd2.ip_sum, - &th.th_sum, &daddr2->addr, - saddr2->xport.port, 0, pd2.af, - pd2.naf, 0); + &th.th_sport, pd2.ip_sum, + &th.th_sum, &daddr2->addr, + saddr2->xport.port, 0, pd2.af, + pd2.naf, 0); pf_change_ap(direction, NULL, pd2.dst, - &th.th_dport, pd2.ip_sum, - &th.th_sum, &saddr2->addr, - daddr2->xport.port, 0, pd2.af, - pd2.naf, 0); + &th.th_dport, pd2.ip_sum, + &th.th_sum, &saddr2->addr, + daddr2->xport.port, 0, pd2.af, + pd2.naf, 0); pbuf_copy_back(pbuf, pd2.off, 8, &th); /* translate outer ip header */ PF_ACPY(&pd->naddr, &daddr2->addr, - pd->naf); + pd->naf); PF_ACPY(&pd->ndaddr, &saddr2->addr, - pd->naf); + pd->naf); if (pd->af == AF_INET) { memcpy(&pd->naddr.addr32[3], &srcv4_inaddr, sizeof(pd->naddr.addr32[3])); - return (pf_nat64_ipv4(pbuf, off, - pd)); + return pf_nat64_ipv4(pbuf, off, + pd); } else { - return (pf_nat64_ipv6(pbuf, off, - pd)); + return pf_nat64_ipv6(pbuf, off, + pd); } } if (direction == PF_IN) { @@ -7809,8 +8195,9 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, if (copyback) { if (pf_lazy_makewritable(pd, pbuf, off2 + 8) == - NULL) - return (PF_DROP); + NULL) { + return PF_DROP; + } switch (pd2.af) { #if INET case AF_INET: @@ -7823,27 +8210,27 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, #if INET6 case AF_INET6: pbuf_copy_back(pbuf, off, - sizeof (struct icmp6_hdr), + sizeof(struct icmp6_hdr), pd->hdr.icmp6); pbuf_copy_back(pbuf, ipoff2, - sizeof (h2_6), &h2_6); + sizeof(h2_6), &h2_6); break; #endif /* INET6 */ } pbuf_copy_back(pbuf, off2, 8, &th); } - return (PF_PASS); + return PF_PASS; } case IPPROTO_UDP: { struct udphdr uh; int dx, action; - if (!pf_pull_hdr(pbuf, off2, &uh, sizeof (uh), + if (!pf_pull_hdr(pbuf, off2, &uh, sizeof(uh), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(udp)\n")); - return (PF_DROP); + return PF_DROP; } key.af_gwy = pd2.af; @@ -7866,18 +8253,19 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, ntohs(uh.uh_dport) == PF_IKE_PORT) { struct pf_ike_hdr ike; size_t plen = pbuf->pb_packet_len - off2 - - sizeof (uh); + sizeof(uh); if (direction == PF_IN && plen < 8 /* PF_IKE_PACKET_MINSIZE */) { DPFPRINTF(PF_DEBUG_MISC, ("pf: " "ICMP error, embedded IKE message " "too small.\n")); - return (PF_DROP); + return PF_DROP; } - if (plen > sizeof (ike)) - plen = sizeof (ike); - pbuf_copy_data(pbuf, off + sizeof (uh), plen, + if (plen > sizeof(ike)) { + plen = sizeof(ike); + } + pbuf_copy_data(pbuf, off + sizeof(uh), plen, &ike); key.app_state = &as; @@ -7914,12 +8302,13 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, } } - if (pf_state_lookup_aux(state, kif, direction, &action)) - return (action); + if (pf_state_lookup_aux(state, kif, direction, &action)) { + return action; + } sk = (*state)->state_key; pd->naf = pd2.naf = (pd2.af == sk->af_lan) ? - sk->af_gwy : sk->af_lan; + sk->af_gwy : sk->af_lan; if (STATE_TRANSLATE(sk)) { /* NAT64 case */ @@ -7936,15 +8325,17 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, /* translate ICMP message */ if (pf_translate_icmp_af(pd->naf, - pd->hdr.icmp)) - return (PF_DROP); + pd->hdr.icmp)) { + return PF_DROP; + } if (pf_lazy_makewritable(pd, pbuf, - off2 + 8) == NULL) - return (PF_DROP); + off2 + 8) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, pd->off, - sizeof(struct icmp6_hdr), - pd->hdr.icmp6); + sizeof(struct icmp6_hdr), + pd->hdr.icmp6); /* * translate inner ip header within the @@ -7952,47 +8343,49 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, */ if (pf_change_icmp_af(pbuf, ipoff2, pd, &pd2, &saddr2->addr, &daddr2->addr, - pd->af, pd->naf)) - return (PF_DROP); + pd->af, pd->naf)) { + return PF_DROP; + } - if (pd->naf == AF_INET) + if (pd->naf == AF_INET) { pd->proto = IPPROTO_ICMP; - else + } else { pd->proto = IPPROTO_ICMPV6; + } /* * translate inner udp header within * the ICMP message */ pf_change_ap(direction, NULL, pd2.src, - &uh.uh_sport, pd2.ip_sum, - &uh.uh_sum, &daddr2->addr, - saddr2->xport.port, 0, pd2.af, - pd2.naf, 0); + &uh.uh_sport, pd2.ip_sum, + &uh.uh_sum, &daddr2->addr, + saddr2->xport.port, 0, pd2.af, + pd2.naf, 0); pf_change_ap(direction, NULL, pd2.dst, - &uh.uh_dport, pd2.ip_sum, - &uh.uh_sum, &saddr2->addr, - daddr2->xport.port, 0, pd2.af, - pd2.naf, 0); + &uh.uh_dport, pd2.ip_sum, + &uh.uh_sum, &saddr2->addr, + daddr2->xport.port, 0, pd2.af, + pd2.naf, 0); pbuf_copy_back(pbuf, pd2.off, sizeof(uh), &uh); /* translate outer ip header */ PF_ACPY(&pd->naddr, &daddr2->addr, - pd->naf); + pd->naf); PF_ACPY(&pd->ndaddr, &saddr2->addr, - pd->naf); + pd->naf); if (pd->af == AF_INET) { memcpy(&pd->naddr.addr32[3], &srcv4_inaddr, sizeof(pd->naddr.addr32[3])); - return (pf_nat64_ipv4(pbuf, off, - pd)); + return pf_nat64_ipv4(pbuf, off, + pd); } else { - return (pf_nat64_ipv6(pbuf, off, - pd)); + return pf_nat64_ipv6(pbuf, off, + pd); } } if (direction == PF_IN) { @@ -8009,46 +8402,47 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pd->ip_sum, 1, pd2.af); } if (pf_lazy_makewritable(pd, pbuf, - off2 + sizeof (uh)) == NULL) - return (PF_DROP); + off2 + sizeof(uh)) == NULL) { + return PF_DROP; + } switch (pd2.af) { #if INET case AF_INET: pbuf_copy_back(pbuf, off, ICMP_MINLEN, pd->hdr.icmp); pbuf_copy_back(pbuf, ipoff2, - sizeof (h2), &h2); + sizeof(h2), &h2); break; #endif /* INET */ #if INET6 case AF_INET6: pbuf_copy_back(pbuf, off, - sizeof (struct icmp6_hdr), + sizeof(struct icmp6_hdr), pd->hdr.icmp6); pbuf_copy_back(pbuf, ipoff2, - sizeof (h2_6), &h2_6); + sizeof(h2_6), &h2_6); break; #endif /* INET6 */ } - pbuf_copy_back(pbuf, off2, sizeof (uh), &uh); + pbuf_copy_back(pbuf, off2, sizeof(uh), &uh); } - return (PF_PASS); + return PF_PASS; } #if INET case IPPROTO_ICMP: { - struct icmp iih; + struct icmp iih; if (!pf_pull_hdr(pbuf, off2, &iih, ICMP_MINLEN, NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short i" "(icmp)\n")); - return (PF_DROP); + return PF_DROP; } key.proto = IPPROTO_ICMP; - if (direction == PF_IN) { + if (direction == PF_IN) { key.af_gwy = pd2.af; PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy); PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy); @@ -8080,31 +8474,32 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pd->ip_sum, 0, AF_INET); } if (pf_lazy_makewritable(pd, pbuf, - off2 + ICMP_MINLEN) == NULL) - return (PF_DROP); + off2 + ICMP_MINLEN) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, ICMP_MINLEN, pd->hdr.icmp); - pbuf_copy_back(pbuf, ipoff2, sizeof (h2), &h2); + pbuf_copy_back(pbuf, ipoff2, sizeof(h2), &h2); pbuf_copy_back(pbuf, off2, ICMP_MINLEN, &iih); } - return (PF_PASS); + return PF_PASS; } #endif /* INET */ #if INET6 case IPPROTO_ICMPV6: { - struct icmp6_hdr iih; + struct icmp6_hdr iih; if (!pf_pull_hdr(pbuf, off2, &iih, - sizeof (struct icmp6_hdr), NULL, reason, pd2.af)) { + sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) { DPFPRINTF(PF_DEBUG_MISC, ("pf: ICMP error message too short " "(icmp6)\n")); - return (PF_DROP); + return PF_DROP; } key.proto = IPPROTO_ICMPV6; - if (direction == PF_IN) { + if (direction == PF_IN) { key.af_gwy = pd2.af; PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy); PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy); @@ -8136,22 +8531,23 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, pd->ip_sum, 0, AF_INET6); } if (pf_lazy_makewritable(pd, pbuf, off2 + - sizeof (struct icmp6_hdr)) == NULL) - return (PF_DROP); + sizeof(struct icmp6_hdr)) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, - sizeof (struct icmp6_hdr), pd->hdr.icmp6); - pbuf_copy_back(pbuf, ipoff2, sizeof (h2_6), + sizeof(struct icmp6_hdr), pd->hdr.icmp6); + pbuf_copy_back(pbuf, ipoff2, sizeof(h2_6), &h2_6); pbuf_copy_back(pbuf, off2, - sizeof (struct icmp6_hdr), &iih); + sizeof(struct icmp6_hdr), &iih); } - return (PF_PASS); + return PF_PASS; } #endif /* INET6 */ default: { key.proto = pd2.proto; - if (direction == PF_IN) { + if (direction == PF_IN) { key.af_gwy = pd2.af; PF_ACPY(&key.ext_gwy.addr, pd2.dst, key.af_gwy); PF_ACPY(&key.gwy.addr, pd2.src, key.af_gwy); @@ -8171,21 +8567,22 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, if (STATE_TRANSLATE(sk)) { if (direction == PF_IN) { pf_change_icmp(pd2.src, NULL, daddr, - &sk->lan.addr, 0, NULL, - pd2.ip_sum, icmpsum, - pd->ip_sum, 0, pd2.af); + &sk->lan.addr, 0, NULL, + pd2.ip_sum, icmpsum, + pd->ip_sum, 0, pd2.af); } else { pf_change_icmp(pd2.dst, NULL, saddr, - &sk->gwy.addr, 0, NULL, - pd2.ip_sum, icmpsum, - pd->ip_sum, 0, pd2.af); + &sk->gwy.addr, 0, NULL, + pd2.ip_sum, icmpsum, + pd->ip_sum, 0, pd2.af); } switch (pd2.af) { #if INET case AF_INET: if (pf_lazy_makewritable(pd, pbuf, - ipoff2 + sizeof (h2)) == NULL) - return (PF_DROP); + ipoff2 + sizeof(h2)) == NULL) { + return PF_DROP; + } /* * * Xnu was missing the following... @@ -8202,19 +8599,20 @@ pf_test_state_icmp(struct pf_state **state, int direction, struct pfi_kif *kif, #if INET6 case AF_INET6: if (pf_lazy_makewritable(pd, pbuf, - ipoff2 + sizeof (h2_6)) == NULL) - return (PF_DROP); + ipoff2 + sizeof(h2_6)) == NULL) { + return PF_DROP; + } pbuf_copy_back(pbuf, off, - sizeof (struct icmp6_hdr), + sizeof(struct icmp6_hdr), pd->hdr.icmp6); pbuf_copy_back(pbuf, ipoff2, - sizeof (h2_6), &h2_6); + sizeof(h2_6), &h2_6); break; #endif /* INET6 */ } } - return (PF_PASS); + return PF_PASS; } } } @@ -8232,7 +8630,7 @@ pf_test_state_grev1(struct pf_state **state, int direction, key.app_state = 0; key.proto = IPPROTO_GRE; key.proto_variant = PF_GRE_PPTP_VARIANT; - if (direction == PF_IN) { + if (direction == PF_IN) { key.af_gwy = pd->af; PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy); PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy); @@ -8255,24 +8653,27 @@ pf_test_state_grev1(struct pf_state **state, int direction, } /* update states */ - if (src->state < PFGRE1S_INITIATING) + if (src->state < PFGRE1S_INITIATING) { src->state = PFGRE1S_INITIATING; + } /* update expire time */ (*state)->expire = pf_time_second(); if (src->state >= PFGRE1S_INITIATING && dst->state >= PFGRE1S_INITIATING) { - if ((*state)->timeout != PFTM_TCP_ESTABLISHED) + if ((*state)->timeout != PFTM_TCP_ESTABLISHED) { (*state)->timeout = PFTM_GREv1_ESTABLISHED; + } src->state = PFGRE1S_ESTABLISHED; dst->state = PFGRE1S_ESTABLISHED; } else { (*state)->timeout = PFTM_GREv1_INITIATING; } - if ((*state)->state_key->app_state) + if ((*state)->state_key->app_state) { (*state)->state_key->app_state->u.grev1.pptp_state->expire = pf_time_second(); + } /* translate source/destination address, if necessary */ if (STATE_GRE_TRANSLATE((*state)->state_key)) { @@ -8312,13 +8713,14 @@ pf_test_state_grev1(struct pf_state **state, int direction, } } - if (pf_lazy_makewritable(pd, pd->mp, off + sizeof (*grev1)) == - NULL) - return (PF_DROP); - pbuf_copy_back(pd->mp, off, sizeof (*grev1), grev1); + if (pf_lazy_makewritable(pd, pd->mp, off + sizeof(*grev1)) == + NULL) { + return PF_DROP; + } + pbuf_copy_back(pd->mp, off, sizeof(*grev1), grev1); } - return (PF_PASS); + return PF_PASS; } static int @@ -8332,9 +8734,9 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, struct pf_esp_hdr *esp = pd->hdr.esp; int action; - memset(&key, 0, sizeof (key)); + memset(&key, 0, sizeof(key)); key.proto = IPPROTO_ESP; - if (direction == PF_IN) { + if (direction == PF_IN) { key.af_gwy = pd->af; PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy); PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy); @@ -8370,10 +8772,11 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, esp->spi; if (RB_INSERT(pf_state_tree_ext_gwy, - &pf_statetbl_ext_gwy, sk)) + &pf_statetbl_ext_gwy, sk)) { pf_detach_state(s, PF_DT_SKIP_EXTGWY); - else + } else { *state = s; + } } } else { key.ext_lan.xport.spi = 0; @@ -8387,25 +8790,27 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, sk->ext_lan.xport.spi = esp->spi; if (RB_INSERT(pf_state_tree_lan_ext, - &pf_statetbl_lan_ext, sk)) + &pf_statetbl_lan_ext, sk)) { pf_detach_state(s, PF_DT_SKIP_LANEXT); - else + } else { *state = s; + } } } if (s) { if (*state == 0) { #if NPFSYNC - if (s->creatorid == pf_status.hostid) + if (s->creatorid == pf_status.hostid) { pfsync_delete_state(s); + } #endif s->timeout = PFTM_UNLINKED; hook_runloop(&s->unlink_hooks, - HOOK_REMOVE|HOOK_FREE); + HOOK_REMOVE | HOOK_FREE); pf_src_tree_remove_state(s); pf_free_state(s); - return (PF_DROP); + return PF_DROP; } } } @@ -8420,8 +8825,9 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, } } - if (pf_state_lookup_aux(state, kif, direction, &action)) - return (action); + if (pf_state_lookup_aux(state, kif, direction, &action)) { + return action; + } if (direction == (*state)->state_key->direction) { src = &(*state)->src; @@ -8432,8 +8838,9 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, } /* update states */ - if (src->state < PFESPS_INITIATING) + if (src->state < PFESPS_INITIATING) { src->state = PFESPS_INITIATING; + } /* update expire time */ (*state)->expire = pf_time_second(); @@ -8482,19 +8889,19 @@ pf_test_state_esp(struct pf_state **state, int direction, struct pfi_kif *kif, } } - return (PF_PASS); + return PF_PASS; } static int pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, struct pf_pdesc *pd) { - struct pf_state_peer *src, *dst; - struct pf_state_key_cmp key; + struct pf_state_peer *src, *dst; + struct pf_state_key_cmp key; key.app_state = 0; key.proto = pd->proto; - if (direction == PF_IN) { + if (direction == PF_IN) { key.af_gwy = pd->af; PF_ACPY(&key.ext_gwy.addr, pd->src, key.af_gwy); PF_ACPY(&key.gwy.addr, pd->dst, key.af_gwy); @@ -8519,17 +8926,20 @@ pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, } /* update states */ - if (src->state < PFOTHERS_SINGLE) + if (src->state < PFOTHERS_SINGLE) { src->state = PFOTHERS_SINGLE; - if (dst->state == PFOTHERS_SINGLE) + } + if (dst->state == PFOTHERS_SINGLE) { dst->state = PFOTHERS_MULTIPLE; + } /* update expire time */ (*state)->expire = pf_time_second(); - if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) + if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE) { (*state)->timeout = PFTM_OTHER_MULTIPLE; - else + } else { (*state)->timeout = PFTM_OTHER_SINGLE; + } /* translate source/destination address, if necessary */ if (STATE_ADDR_TRANSLATE((*state)->state_key)) { @@ -8570,7 +8980,7 @@ pf_test_state_other(struct pf_state **state, int direction, struct pfi_kif *kif, } } - return (PF_PASS); + return PF_PASS; } /* @@ -8584,8 +8994,8 @@ pf_pull_hdr(pbuf_t *pbuf, int off, void *p, int len, switch (af) { #if INET case AF_INET: { - struct ip *h = pbuf->pb_data; - u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; + struct ip *h = pbuf->pb_data; + u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; if (fragoff) { if (fragoff >= len) { @@ -8594,113 +9004,114 @@ pf_pull_hdr(pbuf_t *pbuf, int off, void *p, int len, ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_FRAG); } - return (NULL); + return NULL; } if (pbuf->pb_packet_len < (unsigned)(off + len) || ntohs(h->ip_len) < off + len) { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_SHORT); - return (NULL); + return NULL; } break; } #endif /* INET */ #if INET6 case AF_INET6: { - struct ip6_hdr *h = pbuf->pb_data; + struct ip6_hdr *h = pbuf->pb_data; if (pbuf->pb_packet_len < (unsigned)(off + len) || - (ntohs(h->ip6_plen) + sizeof (struct ip6_hdr)) < + (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) < (unsigned)(off + len)) { ACTION_SET(actionp, PF_DROP); REASON_SET(reasonp, PFRES_SHORT); - return (NULL); + return NULL; } break; } #endif /* INET6 */ } pbuf_copy_data(pbuf, off, len, p); - return (p); + return p; } int pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif) { #pragma unused(kif) - struct sockaddr_in *dst; - int ret = 1; + struct sockaddr_in *dst; + int ret = 1; #if INET6 - struct sockaddr_in6 *dst6; - struct route_in6 ro; + struct sockaddr_in6 *dst6; + struct route_in6 ro; #else - struct route ro; + struct route ro; #endif - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); switch (af) { case AF_INET: dst = satosin(&ro.ro_dst); dst->sin_family = AF_INET; - dst->sin_len = sizeof (*dst); + dst->sin_len = sizeof(*dst); dst->sin_addr = addr->v4addr; break; #if INET6 case AF_INET6: dst6 = (struct sockaddr_in6 *)&ro.ro_dst; dst6->sin6_family = AF_INET6; - dst6->sin6_len = sizeof (*dst6); + dst6->sin6_len = sizeof(*dst6); dst6->sin6_addr = addr->v6addr; break; #endif /* INET6 */ default: - return (0); + return 0; } /* XXX: IFT_ENC is not currently used by anything*/ /* Skip checks for ipsec interfaces */ - if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) + if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC) { goto out; + } /* XXX: what is the point of this? */ rtalloc((struct route *)&ro); out: ROUTE_RELEASE(&ro); - return (ret); + return ret; } int pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) { #pragma unused(aw) - struct sockaddr_in *dst; + struct sockaddr_in *dst; #if INET6 - struct sockaddr_in6 *dst6; - struct route_in6 ro; + struct sockaddr_in6 *dst6; + struct route_in6 ro; #else - struct route ro; + struct route ro; #endif - int ret = 0; + int ret = 0; - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); switch (af) { case AF_INET: dst = satosin(&ro.ro_dst); dst->sin_family = AF_INET; - dst->sin_len = sizeof (*dst); + dst->sin_len = sizeof(*dst); dst->sin_addr = addr->v4addr; break; #if INET6 case AF_INET6: dst6 = (struct sockaddr_in6 *)&ro.ro_dst; dst6->sin6_family = AF_INET6; - dst6->sin6_len = sizeof (*dst6); + dst6->sin6_len = sizeof(*dst6); dst6->sin6_addr = addr->v6addr; break; #endif /* INET6 */ default: - return (0); + return 0; } /* XXX: what is the point of this? */ @@ -8708,7 +9119,7 @@ pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw) ROUTE_RELEASE(&ro); - return (ret); + return ret; } #if INET @@ -8717,22 +9128,23 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, struct pf_state *s, struct pf_pdesc *pd) { #pragma unused(pd) - struct mbuf *m0, *m1; - struct route iproute; - struct route *ro = &iproute; - struct sockaddr_in *dst; - struct ip *ip; - struct ifnet *ifp = NULL; - struct pf_addr naddr; - struct pf_src_node *sn = NULL; - int error = 0; - uint32_t sw_csum; - int interface_mtu = 0; - bzero(&iproute, sizeof (iproute)); + struct mbuf *m0, *m1; + struct route iproute; + struct route *ro = &iproute; + struct sockaddr_in *dst; + struct ip *ip; + struct ifnet *ifp = NULL; + struct pf_addr naddr; + struct pf_src_node *sn = NULL; + int error = 0; + uint32_t sw_csum; + int interface_mtu = 0; + bzero(&iproute, sizeof(iproute)); if (pbufp == NULL || !pbuf_is_valid(*pbufp) || r == NULL || - (dir != PF_IN && dir != PF_OUT) || oifp == NULL) + (dir != PF_IN && dir != PF_OUT) || oifp == NULL) { panic("pf_route: invalid parameters"); + } if (pd->pf_mtag->pftag_routed++ > 3) { pbuf_destroy(*pbufp); @@ -8746,23 +9158,23 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, * host stack (for routing, at least for now), we convert the * incoming pbuf into an mbuf. */ - if (r->rt == PF_DUPTO) + if (r->rt == PF_DUPTO) { m0 = pbuf_clone_to_mbuf(*pbufp); - else - if ((r->rt == PF_REPLYTO) == (r->direction == dir)) + } else if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { return; - else { + } else { /* We're going to consume this packet */ m0 = pbuf_to_mbuf(*pbufp, TRUE); *pbufp = NULL; } - if (m0 == NULL) + if (m0 == NULL) { goto bad; + } /* We now have the packet in an mbuf (m0) */ - if (m0->m_len < (int)sizeof (struct ip)) { + if (m0->m_len < (int)sizeof(struct ip)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_route: packet length < sizeof (struct ip)\n")); goto bad; @@ -8772,7 +9184,7 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, dst = satosin((void *)&ro->ro_dst); dst->sin_family = AF_INET; - dst->sin_len = sizeof (*dst); + dst->sin_len = sizeof(*dst); dst->sin_addr = ip->ip_dst; if (r->rt == PF_FASTROUTE) { @@ -8786,8 +9198,9 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, RT_LOCK(ro->ro_rt); ro->ro_rt->rt_use++; - if (ro->ro_rt->rt_flags & RTF_GATEWAY) + if (ro->ro_rt->rt_flags & RTF_GATEWAY) { dst = satosin((void *)ro->ro_rt->rt_gateway); + } RT_UNLOCK(ro->ro_rt); } else { if (TAILQ_EMPTY(&r->rpool.list)) { @@ -8798,26 +9211,30 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, if (s == NULL) { pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src, &naddr, NULL, &sn); - if (!PF_AZERO(&naddr, AF_INET)) + if (!PF_AZERO(&naddr, AF_INET)) { dst->sin_addr.s_addr = naddr.v4addr.s_addr; + } ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; } else { - if (!PF_AZERO(&s->rt_addr, AF_INET)) + if (!PF_AZERO(&s->rt_addr, AF_INET)) { dst->sin_addr.s_addr = s->rt_addr.v4addr.s_addr; + } ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; } } - if (ifp == NULL) + if (ifp == NULL) { goto bad; + } if (oifp != ifp) { - if (pf_test_mbuf(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) + if (pf_test_mbuf(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { goto bad; - else if (m0 == NULL) + } else if (m0 == NULL) { goto done; - if (m0->m_len < (int)sizeof (struct ip)) { + } + if (m0->m_len < (int)sizeof(struct ip)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_route: packet length < sizeof (struct ip)\n")); goto bad; @@ -8862,8 +9279,9 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, interface_mtu); goto done; - } else + } else { goto bad; + } } m1 = m0; @@ -8883,23 +9301,26 @@ pf_route(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, for (m0 = m1; m0; m0 = m1) { m1 = m0->m_nextpkt; m0->m_nextpkt = 0; - if (error == 0) + if (error == 0) { error = ifnet_output(ifp, PF_INET, m0, ro->ro_rt, sintosa(dst)); - else + } else { m_freem(m0); + } } - if (error == 0) + if (error == 0) { ipstat.ips_fragmented++; + } done: ROUTE_RELEASE(&iproute); return; bad: - if (m0) + if (m0) { m_freem(m0); + } goto done; } #endif /* INET */ @@ -8910,19 +9331,20 @@ pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, struct pf_state *s, struct pf_pdesc *pd) { #pragma unused(pd) - struct mbuf *m0; - struct route_in6 ip6route; - struct route_in6 *ro; - struct sockaddr_in6 *dst; - struct ip6_hdr *ip6; - struct ifnet *ifp = NULL; - struct pf_addr naddr; - struct pf_src_node *sn = NULL; - int error = 0; + struct mbuf *m0; + struct route_in6 ip6route; + struct route_in6 *ro; + struct sockaddr_in6 *dst; + struct ip6_hdr *ip6; + struct ifnet *ifp = NULL; + struct pf_addr naddr; + struct pf_src_node *sn = NULL; + int error = 0; if (pbufp == NULL || !pbuf_is_valid(*pbufp) || r == NULL || - (dir != PF_IN && dir != PF_OUT) || oifp == NULL) + (dir != PF_IN && dir != PF_OUT) || oifp == NULL) { panic("pf_route6: invalid parameters"); + } if (pd->pf_mtag->pftag_routed++ > 3) { pbuf_destroy(*pbufp); @@ -8938,19 +9360,19 @@ pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, */ if (r->rt == PF_DUPTO) { m0 = pbuf_clone_to_mbuf(*pbufp); - } else - if ((r->rt == PF_REPLYTO) == (r->direction == dir)) + } else if ((r->rt == PF_REPLYTO) == (r->direction == dir)) { return; - else { + } else { /* We're about to consume this packet */ m0 = pbuf_to_mbuf(*pbufp, TRUE); *pbufp = NULL; } - if (m0 == NULL) + if (m0 == NULL) { goto bad; + } - if (m0->m_len < (int)sizeof (struct ip6_hdr)) { + if (m0->m_len < (int)sizeof(struct ip6_hdr)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_route6: m0->m_len < sizeof (struct ip6_hdr)\n")); goto bad; @@ -8958,18 +9380,19 @@ pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, ip6 = mtod(m0, struct ip6_hdr *); ro = &ip6route; - bzero((caddr_t)ro, sizeof (*ro)); + bzero((caddr_t)ro, sizeof(*ro)); dst = (struct sockaddr_in6 *)&ro->ro_dst; dst->sin6_family = AF_INET6; - dst->sin6_len = sizeof (*dst); + dst->sin6_len = sizeof(*dst); dst->sin6_addr = ip6->ip6_dst; /* Cheat. XXX why only in the v6addr case??? */ if (r->rt == PF_FASTROUTE) { struct pf_mtag *pf_mtag; - if ((pf_mtag = pf_get_mtag(m0)) == NULL) + if ((pf_mtag = pf_get_mtag(m0)) == NULL) { goto bad; + } pf_mtag->pftag_flags |= PF_TAG_GENERATED; ip6_output(m0, NULL, NULL, 0, NULL, NULL, NULL); return; @@ -8983,25 +9406,29 @@ pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, if (s == NULL) { pf_map_addr(AF_INET6, r, (struct pf_addr *)(uintptr_t)&ip6->ip6_src, &naddr, NULL, &sn); - if (!PF_AZERO(&naddr, AF_INET6)) + if (!PF_AZERO(&naddr, AF_INET6)) { PF_ACPY((struct pf_addr *)&dst->sin6_addr, &naddr, AF_INET6); + } ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL; } else { - if (!PF_AZERO(&s->rt_addr, AF_INET6)) + if (!PF_AZERO(&s->rt_addr, AF_INET6)) { PF_ACPY((struct pf_addr *)&dst->sin6_addr, &s->rt_addr, AF_INET6); + } ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL; } - if (ifp == NULL) + if (ifp == NULL) { goto bad; + } if (oifp != ifp) { - if (pf_test6_mbuf(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) + if (pf_test6_mbuf(PF_OUT, ifp, &m0, NULL, NULL) != PF_PASS) { goto bad; - else if (m0 == NULL) + } else if (m0 == NULL) { goto done; - if (m0->m_len < (int)sizeof (struct ip6_hdr)) { + } + if (m0->m_len < (int)sizeof(struct ip6_hdr)) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_route6: m0->m_len " "< sizeof (struct ip6_hdr)\n")); goto bad; @@ -9013,24 +9440,27 @@ pf_route6(pbuf_t **pbufp, struct pf_rule *r, int dir, struct ifnet *oifp, * If the packet is too large for the outgoing interface, * send back an icmp6 error. */ - if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr)) + if (IN6_IS_SCOPE_EMBED(&dst->sin6_addr)) { dst->sin6_addr.s6_addr16[1] = htons(ifp->if_index); + } if ((unsigned)m0->m_pkthdr.len <= ifp->if_mtu) { error = nd6_output(ifp, ifp, m0, dst, NULL, NULL); } else { in6_ifstat_inc(ifp, ifs6_in_toobig); - if (r->rt != PF_DUPTO) + if (r->rt != PF_DUPTO) { icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu); - else + } else { goto bad; + } } done: return; bad: - if (m0) + if (m0) { m_freem(m0); + } goto done; } #endif /* INET6 */ @@ -9061,7 +9491,7 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR) && (*pbuf->pb_csum_data ^ 0xffff) == 0) { - return (0); + return 0; } break; case IPPROTO_ICMP: @@ -9070,45 +9500,51 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, #endif /* INET6 */ break; default: - return (1); + return 1; + } + if (off < (int)sizeof(struct ip) || len < (int)sizeof(struct udphdr)) { + return 1; + } + if (pbuf->pb_packet_len < (unsigned)(off + len)) { + return 1; } - if (off < (int)sizeof (struct ip) || len < (int)sizeof (struct udphdr)) - return (1); - if (pbuf->pb_packet_len < (unsigned)(off + len)) - return (1); switch (af) { #if INET case AF_INET: if (p == IPPROTO_ICMP) { #if 0 - if (m->m_len < off) - return (1); + if (m->m_len < off) { + return 1; + } m->m_data += off; m->m_len -= off; sum = in_cksum(m, len); m->m_data -= off; m->m_len += off; #else - if (pbuf->pb_contig_len < (unsigned)off) - return (1); + if (pbuf->pb_contig_len < (unsigned)off) { + return 1; + } sum = pbuf_inet_cksum(pbuf, 0, off, len); #endif } else { - if (pbuf->pb_contig_len < (int)sizeof (struct ip)) - return (1); + if (pbuf->pb_contig_len < (int)sizeof(struct ip)) { + return 1; + } sum = pbuf_inet_cksum(pbuf, p, off, len); } break; #endif /* INET */ #if INET6 case AF_INET6: - if (pbuf->pb_contig_len < (int)sizeof (struct ip6_hdr)) - return (1); + if (pbuf->pb_contig_len < (int)sizeof(struct ip6_hdr)) { + return 1; + } sum = pbuf_inet6_cksum(pbuf, p, off, len); break; #endif /* INET6 */ default: - return (1); + return 1; } if (sum) { switch (p) { @@ -9127,19 +9563,19 @@ pf_check_proto_cksum(pbuf_t *pbuf, int off, int len, u_int8_t p, break; #endif /* INET6 */ } - return (1); + return 1; } - return (0); + return 0; } #if INET -#define PF_APPLE_UPDATE_PDESC_IPv4() \ - do { \ - if (pbuf && pd.mp && pbuf != pd.mp) { \ - pbuf = pd.mp; \ - h = pbuf->pb_data; \ - pd.pf_mtag = pf_get_mtag_pbuf(pbuf); \ - } \ +#define PF_APPLE_UPDATE_PDESC_IPv4() \ + do { \ + if (pbuf && pd.mp && pbuf != pd.mp) { \ + pbuf = pd.mp; \ + h = pbuf->pb_data; \ + pd.pf_mtag = pf_get_mtag_pbuf(pbuf); \ + } \ } while (0) int @@ -9158,10 +9594,11 @@ pf_test_mbuf(int dir, struct ifnet *ifp, struct mbuf **m0, *m0 = pbuf->pb_mbuf; pbuf->pb_mbuf = NULL; pbuf_destroy(pbuf); - } else + } else { *m0 = NULL; + } - return (rv); + return rv; } int @@ -9171,42 +9608,45 @@ pf_test(int dir, struct ifnet *ifp, pbuf_t **pbufp, #if !DUMMYNET #pragma unused(fwa) #endif - struct pfi_kif *kif; - u_short action = PF_PASS, reason = 0, log = 0; - pbuf_t *pbuf = *pbufp; - struct ip *h = 0; - struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; - struct pf_state *s = NULL; - struct pf_state_key *sk = NULL; - struct pf_ruleset *ruleset = NULL; - struct pf_pdesc pd; - int off, dirndx, pqid = 0; + struct pfi_kif *kif; + u_short action = PF_PASS, reason = 0, log = 0; + pbuf_t *pbuf = *pbufp; + struct ip *h = 0; + struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; + struct pf_state *s = NULL; + struct pf_state_key *sk = NULL; + struct pf_ruleset *ruleset = NULL; + struct pf_pdesc pd; + int off, dirndx, pqid = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (!pf_status.running) - return (PF_PASS); + if (!pf_status.running) { + return PF_PASS; + } - memset(&pd, 0, sizeof (pd)); + memset(&pd, 0, sizeof(pd)); if ((pd.pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test: pf_get_mtag_pbuf returned NULL\n")); - return (PF_DROP); + return PF_DROP; } - if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED) - return (PF_PASS); + if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED) { + return PF_PASS; + } kif = (struct pfi_kif *)ifp->if_pf_kif; if (kif == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test: kif == NULL, if_name %s\n", ifp->if_name)); - return (PF_DROP); + return PF_DROP; + } + if (kif->pfik_flags & PFI_IFLAG_SKIP) { + return PF_PASS; } - if (kif->pfik_flags & PFI_IFLAG_SKIP) - return (PF_PASS); /* initialize enough of pd for the done label */ h = pbuf->pb_data; @@ -9226,7 +9666,7 @@ pf_test(int dir, struct ifnet *ifp, pbuf_t **pbufp, pd.tot_len = ntohs(h->ip_len); pd.eh = eh; - if (pbuf->pb_packet_len < (int)sizeof (*h)) { + if (pbuf->pb_packet_len < (int)sizeof(*h)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); log = 1; @@ -9234,8 +9674,9 @@ pf_test(int dir, struct ifnet *ifp, pbuf_t **pbufp, } #if DUMMYNET - if (fwa != NULL && fwa->fwa_pf_rule != NULL) + if (fwa != NULL && fwa->fwa_pf_rule != NULL) { goto nonormalize; + } #endif /* DUMMYNET */ /* We do IP header normalization and packet reassembly here */ @@ -9252,7 +9693,7 @@ nonormalize: h = pbuf->pb_data; off = h->ip_hl << 2; - if (off < (int)sizeof (*h)) { + if (off < (int)sizeof(*h)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); log = 1; @@ -9290,7 +9731,7 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_fragment(&r, dir, kif, pbuf, h, @@ -9299,38 +9740,42 @@ nonormalize: } switch (h->ip_p) { - case IPPROTO_TCP: { - struct tcphdr th; + struct tcphdr th; pd.hdr.tcp = &th; - if (!pf_pull_hdr(pbuf, off, &th, sizeof (th), + if (!pf_pull_hdr(pbuf, off, &th, sizeof(th), &action, &reason, AF_INET)) { log = action != PF_PASS; goto done; } pd.p_len = pd.tot_len - off - (th.th_off << 2); - if ((th.th_flags & TH_ACK) && pd.p_len == 0) + if ((th.th_flags & TH_ACK) && pd.p_len == 0) { pqid = 1; + } #if DUMMYNET /* Traffic goes through dummynet first */ action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_normalize_tcp(dir, kif, pbuf, 0, off, h, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); - if (action == PF_DROP) + if (action == PF_DROP) { goto done; + } action = pf_test_state_tcp(&s, dir, kif, pbuf, off, h, &pd, &reason); - if (action == PF_NAT64) + if (action == PF_NAT64) { goto done; - if (pd.lmw < 0) + } + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); if (action == PF_PASS) { #if NPFSYNC @@ -9339,24 +9784,25 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_UDP: { - struct udphdr uh; + struct udphdr uh; pd.hdr.udp = &uh; - if (!pf_pull_hdr(pbuf, off, &uh, sizeof (uh), + if (!pf_pull_hdr(pbuf, off, &uh, sizeof(uh), &action, &reason, AF_INET)) { log = action != PF_PASS; goto done; } if (uh.uh_dport == 0 || ntohs(uh.uh_ulen) > pbuf->pb_packet_len - off || - ntohs(uh.uh_ulen) < sizeof (struct udphdr)) { + ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); goto done; @@ -9366,15 +9812,17 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_udp(&s, dir, kif, pbuf, off, h, &pd, &reason); - if (action == PF_NAT64) + if (action == PF_NAT64) { goto done; - if (pd.lmw < 0) + } + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); if (action == PF_PASS) { #if NPFSYNC @@ -9383,14 +9831,15 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_ICMP: { - struct icmp ih; + struct icmp ih; pd.hdr.icmp = &ih; if (!pf_pull_hdr(pbuf, off, &ih, ICMP_MINLEN, @@ -9403,15 +9852,17 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_icmp(&s, dir, kif, pbuf, off, h, &pd, &reason); - if (action == PF_NAT64) + if (action == PF_NAT64) { goto done; - if (pd.lmw < 0) + } + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); if (action == PF_PASS) { #if NPFSYNC @@ -9420,17 +9871,18 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_ESP: { - struct pf_esp_hdr esp; + struct pf_esp_hdr esp; pd.hdr.esp = &esp; - if (!pf_pull_hdr(pbuf, off, &esp, sizeof (esp), &action, &reason, + if (!pf_pull_hdr(pbuf, off, &esp, sizeof(esp), &action, &reason, AF_INET)) { log = action != PF_PASS; goto done; @@ -9440,12 +9892,13 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_esp(&s, dir, kif, off, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); if (action == PF_PASS) { #if NPFSYNC @@ -9454,16 +9907,17 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_GRE: { - struct pf_grev1_hdr grev1; + struct pf_grev1_hdr grev1; pd.hdr.grev1 = &grev1; - if (!pf_pull_hdr(pbuf, off, &grev1, sizeof (grev1), &action, + if (!pf_pull_hdr(pbuf, off, &grev1, sizeof(grev1), &action, &reason, AF_INET)) { log = (action != PF_PASS); goto done; @@ -9473,7 +9927,7 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 && @@ -9486,7 +9940,9 @@ nonormalize: } pd.proto_variant = PF_GRE_PPTP_VARIANT; action = pf_test_state_grev1(&s, dir, kif, off, &pd); - if (pd.lmw < 0) goto done; + if (pd.lmw < 0) { + goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); if (action == PF_PASS) { #if NPFSYNC @@ -9499,8 +9955,9 @@ nonormalize: } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); - if (action == PF_PASS) + if (action == PF_PASS) { break; + } } } @@ -9513,12 +9970,13 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_other(&s, dir, kif, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv4(); if (action == PF_PASS) { #if NPFSYNC @@ -9527,16 +9985,17 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } done: if (action == PF_NAT64) { *pbufp = NULL; - return (action); + return action; } *pbufp = pd.mp; @@ -9554,9 +10013,10 @@ done: } if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid) || - (pd.pktflags & PKTF_FLOW_ID)) + (pd.pktflags & PKTF_FLOW_ID)) { (void) pf_tag_packet(pbuf, pd.pf_mtag, s ? s->tag : 0, r->rtableid, &pd); + } if (action == PF_PASS) { #if PF_ECN @@ -9575,13 +10035,14 @@ done: * see tcp_input() and in_pcblookup_listen(). */ if (dir == PF_IN && (pd.proto == IPPROTO_TCP || - pd.proto == IPPROTO_UDP) && s != NULL && - s->nat_rule.ptr != NULL && - (s->nat_rule.ptr->action == PF_RDR || - s->nat_rule.ptr->action == PF_BINAT) && - (ntohl(pd.dst->v4addr.s_addr) >> IN_CLASSA_NSHIFT) - == IN_LOOPBACKNET) + pd.proto == IPPROTO_UDP) && s != NULL && + s->nat_rule.ptr != NULL && + (s->nat_rule.ptr->action == PF_RDR || + s->nat_rule.ptr->action == PF_BINAT) && + (ntohl(pd.dst->v4addr.s_addr) >> IN_CLASSA_NSHIFT) + == IN_LOOPBACKNET) { pd.pf_mtag->pftag_flags |= PF_TAG_TRANSLATE_LOCALHOST; + } } } @@ -9589,10 +10050,11 @@ done: struct pf_rule *lr; if (s != NULL && s->nat_rule.ptr != NULL && - s->nat_rule.ptr->log & PF_LOG_ALL) + s->nat_rule.ptr->log & PF_LOG_ALL) { lr = s->nat_rule.ptr; - else + } else { lr = r; + } PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, reason, lr, a, ruleset, &pd); } @@ -9639,28 +10101,32 @@ done: tr = nr; x = (sk == NULL || sk->direction == dir) ? &pd.baddr : &pd.naddr; - } else + } else { x = (sk == NULL || sk->direction == dir) ? &pd.naddr : &pd.baddr; + } if (x == &pd.baddr || s == NULL) { /* we need to change the address */ - if (dir == PF_OUT) + if (dir == PF_OUT) { pd.src = x; - else + } else { pd.dst = x; + } } } - if (tr->src.addr.type == PF_ADDR_TABLE) + if (tr->src.addr.type == PF_ADDR_TABLE) { pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL || sk->direction == dir) ? pd.src : pd.dst, pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->src.neg); - if (tr->dst.addr.type == PF_ADDR_TABLE) + } + if (tr->dst.addr.type == PF_ADDR_TABLE) { pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL || sk->direction == dir) ? pd.dst : pd.src, pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->dst.neg); + } } VERIFY(pbuf == NULL || pd.mp == NULL || pd.mp == pbuf); @@ -9674,7 +10140,7 @@ done: if (action == PF_DROP) { pbuf_destroy(*pbufp); *pbufp = NULL; - return (PF_DROP); + return PF_DROP; } *pbufp = pbuf; @@ -9684,21 +10150,22 @@ done: pbuf_destroy(*pbufp); *pbufp = NULL; action = PF_PASS; - } else if (r->rt) + } else if (r->rt) { /* pf_route can free the pbuf causing *pbufp to become NULL */ pf_route(pbufp, r, dir, kif->pfik_ifp, s, &pd); + } - return (action); + return action; } #endif /* INET */ #if INET6 -#define PF_APPLE_UPDATE_PDESC_IPv6() \ - do { \ - if (pbuf && pd.mp && pbuf != pd.mp) { \ - pbuf = pd.mp; \ - } \ - h = pbuf->pb_data; \ +#define PF_APPLE_UPDATE_PDESC_IPv6() \ + do { \ + if (pbuf && pd.mp && pbuf != pd.mp) { \ + pbuf = pd.mp; \ + } \ + h = pbuf->pb_data; \ } while (0) int @@ -9717,10 +10184,11 @@ pf_test6_mbuf(int dir, struct ifnet *ifp, struct mbuf **m0, *m0 = pbuf->pb_mbuf; pbuf->pb_mbuf = NULL; pbuf_destroy(pbuf); - } else + } else { *m0 = NULL; + } - return (rv); + return rv; } int @@ -9730,43 +10198,46 @@ pf_test6(int dir, struct ifnet *ifp, pbuf_t **pbufp, #if !DUMMYNET #pragma unused(fwa) #endif - struct pfi_kif *kif; - u_short action = PF_PASS, reason = 0, log = 0; - pbuf_t *pbuf = *pbufp; - struct ip6_hdr *h; - struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; - struct pf_state *s = NULL; - struct pf_state_key *sk = NULL; - struct pf_ruleset *ruleset = NULL; - struct pf_pdesc pd; - int off, terminal = 0, dirndx, rh_cnt = 0; - u_int8_t nxt; + struct pfi_kif *kif; + u_short action = PF_PASS, reason = 0, log = 0; + pbuf_t *pbuf = *pbufp; + struct ip6_hdr *h; + struct pf_rule *a = NULL, *r = &pf_default_rule, *tr, *nr; + struct pf_state *s = NULL; + struct pf_state_key *sk = NULL; + struct pf_ruleset *ruleset = NULL; + struct pf_pdesc pd; + int off, terminal = 0, dirndx, rh_cnt = 0; + u_int8_t nxt; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (!pf_status.running) - return (PF_PASS); + if (!pf_status.running) { + return PF_PASS; + } - memset(&pd, 0, sizeof (pd)); + memset(&pd, 0, sizeof(pd)); if ((pd.pf_mtag = pf_get_mtag_pbuf(pbuf)) == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test6: pf_get_mtag_pbuf returned NULL\n")); - return (PF_DROP); + return PF_DROP; } - if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED) - return (PF_PASS); + if (pd.pf_mtag->pftag_flags & PF_TAG_GENERATED) { + return PF_PASS; + } kif = (struct pfi_kif *)ifp->if_pf_kif; if (kif == NULL) { DPFPRINTF(PF_DEBUG_URGENT, ("pf_test6: kif == NULL, if_name %s\n", ifp->if_name)); - return (PF_DROP); + return PF_DROP; + } + if (kif->pfik_flags & PFI_IFLAG_SKIP) { + return PF_PASS; } - if (kif->pfik_flags & PFI_IFLAG_SKIP) - return (PF_PASS); h = pbuf->pb_data; @@ -9795,7 +10266,7 @@ pf_test6(int dir, struct ifnet *ifp, pbuf_t **pbufp, pd.pktflags = (*pbuf->pb_flags & PKTF_FLOW_MASK); } - if (pbuf->pb_packet_len < (int)sizeof (*h)) { + if (pbuf->pb_packet_len < (int)sizeof(*h)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); log = 1; @@ -9803,8 +10274,9 @@ pf_test6(int dir, struct ifnet *ifp, pbuf_t **pbufp, } #if DUMMYNET - if (fwa != NULL && fwa->fwa_pf_rule != NULL) + if (fwa != NULL && fwa->fwa_pf_rule != NULL) { goto nonormalize; + } #endif /* DUMMYNET */ /* We do IP header normalization and packet reassembly here */ @@ -9826,7 +10298,7 @@ nonormalize: */ if (htons(h->ip6_plen) == 0) { action = PF_DROP; - REASON_SET(&reason, PFRES_NORM); /*XXX*/ + REASON_SET(&reason, PFRES_NORM); /*XXX*/ goto done; } #endif @@ -9839,10 +10311,10 @@ nonormalize: pd.af = AF_INET6; pd.tos = 0; pd.ttl = h->ip6_hlim; - pd.tot_len = ntohs(h->ip6_plen) + sizeof (struct ip6_hdr); + pd.tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr); pd.eh = eh; - off = ((caddr_t)h - (caddr_t)pbuf->pb_data) + sizeof (struct ip6_hdr); + off = ((caddr_t)h - (caddr_t)pbuf->pb_data) + sizeof(struct ip6_hdr); pd.proto = h->ip6_nxt; pd.proto_variant = 0; pd.mp = pbuf; @@ -9871,7 +10343,7 @@ nonormalize: fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_fragment(&r, dir, kif, pbuf, h, &pd, @@ -9884,13 +10356,13 @@ nonormalize: } case IPPROTO_ROUTING: ++rh_cnt; - /* FALL THROUGH */ + /* FALL THROUGH */ case IPPROTO_AH: case IPPROTO_HOPOPTS: case IPPROTO_DSTOPTS: { /* get next header and header length */ - struct ip6_ext opt6; + struct ip6_ext opt6; if (!pf_pull_hdr(pbuf, off, &opt6, sizeof(opt6), NULL, &reason, pd.af)) { @@ -9900,10 +10372,11 @@ nonormalize: log = 1; goto done; } - if (pd.proto == IPPROTO_AH) + if (pd.proto == IPPROTO_AH) { off += (opt6.ip6e_len + 2) * 4; - else + } else { off += (opt6.ip6e_len + 1) * 8; + } nxt = opt6.ip6e_nxt; /* goto the next header */ break; @@ -9916,12 +10389,11 @@ nonormalize: switch (pd.proto) { - case IPPROTO_TCP: { - struct tcphdr th; + struct tcphdr th; pd.hdr.tcp = &th; - if (!pf_pull_hdr(pbuf, off, &th, sizeof (th), + if (!pf_pull_hdr(pbuf, off, &th, sizeof(th), &action, &reason, AF_INET6)) { log = action != PF_PASS; goto done; @@ -9932,21 +10404,25 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_normalize_tcp(dir, kif, pbuf, 0, off, h, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); - if (action == PF_DROP) + if (action == PF_DROP) { goto done; + } action = pf_test_state_tcp(&s, dir, kif, pbuf, off, h, &pd, &reason); - if (action == PF_NAT64) + if (action == PF_NAT64) { goto done; - if (pd.lmw < 0) + } + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); if (action == PF_PASS) { #if NPFSYNC @@ -9955,24 +10431,25 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_UDP: { - struct udphdr uh; + struct udphdr uh; pd.hdr.udp = &uh; - if (!pf_pull_hdr(pbuf, off, &uh, sizeof (uh), + if (!pf_pull_hdr(pbuf, off, &uh, sizeof(uh), &action, &reason, AF_INET6)) { log = action != PF_PASS; goto done; } if (uh.uh_dport == 0 || ntohs(uh.uh_ulen) > pbuf->pb_packet_len - off || - ntohs(uh.uh_ulen) < sizeof (struct udphdr)) { + ntohs(uh.uh_ulen) < sizeof(struct udphdr)) { action = PF_DROP; REASON_SET(&reason, PFRES_SHORT); goto done; @@ -9982,15 +10459,17 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_udp(&s, dir, kif, pbuf, off, h, &pd, &reason); - if (action == PF_NAT64) + if (action == PF_NAT64) { goto done; - if (pd.lmw < 0) + } + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); if (action == PF_PASS) { #if NPFSYNC @@ -9999,17 +10478,18 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_ICMPV6: { - struct icmp6_hdr ih; + struct icmp6_hdr ih; pd.hdr.icmp6 = &ih; - if (!pf_pull_hdr(pbuf, off, &ih, sizeof (ih), + if (!pf_pull_hdr(pbuf, off, &ih, sizeof(ih), &action, &reason, AF_INET6)) { log = action != PF_PASS; goto done; @@ -10019,15 +10499,17 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_icmp(&s, dir, kif, pbuf, off, h, &pd, &reason); - if (action == PF_NAT64) + if (action == PF_NAT64) { goto done; - if (pd.lmw < 0) + } + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); if (action == PF_PASS) { #if NPFSYNC @@ -10036,17 +10518,18 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_ESP: { - struct pf_esp_hdr esp; + struct pf_esp_hdr esp; pd.hdr.esp = &esp; - if (!pf_pull_hdr(pbuf, off, &esp, sizeof (esp), &action, + if (!pf_pull_hdr(pbuf, off, &esp, sizeof(esp), &action, &reason, AF_INET6)) { log = action != PF_PASS; goto done; @@ -10056,12 +10539,13 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_esp(&s, dir, kif, off, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); if (action == PF_PASS) { #if NPFSYNC @@ -10070,17 +10554,18 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } case IPPROTO_GRE: { - struct pf_grev1_hdr grev1; + struct pf_grev1_hdr grev1; pd.hdr.grev1 = &grev1; - if (!pf_pull_hdr(pbuf, off, &grev1, sizeof (grev1), &action, + if (!pf_pull_hdr(pbuf, off, &grev1, sizeof(grev1), &action, &reason, AF_INET6)) { log = (action != PF_PASS); goto done; @@ -10090,7 +10575,7 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ if ((ntohs(grev1.flags) & PF_GRE_FLAG_VERSION_MASK) == 1 && @@ -10102,8 +10587,9 @@ nonormalize: goto done; } action = pf_test_state_grev1(&s, dir, kif, off, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); if (action == PF_PASS) { #if NPFSYNC @@ -10116,8 +10602,9 @@ nonormalize: } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); - if (action == PF_PASS) + if (action == PF_PASS) { break; + } } } @@ -10130,12 +10617,13 @@ nonormalize: action = pf_test_dummynet(&r, dir, kif, &pbuf, &pd, fwa); if (action == PF_DROP || pbuf == NULL) { *pbufp = NULL; - return (action); + return action; } #endif /* DUMMYNET */ action = pf_test_state_other(&s, dir, kif, &pd); - if (pd.lmw < 0) + if (pd.lmw < 0) { goto done; + } PF_APPLE_UPDATE_PDESC_IPv6(); if (action == PF_PASS) { #if NPFSYNC @@ -10144,16 +10632,17 @@ nonormalize: r = s->rule.ptr; a = s->anchor.ptr; log = s->log; - } else if (s == NULL) + } else if (s == NULL) { action = pf_test_rule(&r, &s, dir, kif, pbuf, off, h, &pd, &a, &ruleset, NULL); + } break; } done: if (action == PF_NAT64) { *pbufp = NULL; - return (action); + return action; } *pbufp = pd.mp; @@ -10171,9 +10660,10 @@ done: } if ((s && s->tag) || PF_RTABLEID_IS_VALID(r->rtableid) || - (pd.pktflags & PKTF_FLOW_ID)) + (pd.pktflags & PKTF_FLOW_ID)) { (void) pf_tag_packet(pbuf, pd.pf_mtag, s ? s->tag : 0, r->rtableid, &pd); + } if (action == PF_PASS) { #if PF_ECN @@ -10189,9 +10679,10 @@ done: pd.proto == IPPROTO_UDP) && s != NULL && s->nat_rule.ptr != NULL && (s->nat_rule.ptr->action == PF_RDR || - s->nat_rule.ptr->action == PF_BINAT) && - IN6_IS_ADDR_LOOPBACK(&pd.dst->v6addr)) + s->nat_rule.ptr->action == PF_BINAT) && + IN6_IS_ADDR_LOOPBACK(&pd.dst->v6addr)) { pd.pf_mtag->pftag_flags |= PF_TAG_TRANSLATE_LOCALHOST; + } } } @@ -10200,10 +10691,11 @@ done: struct pf_rule *lr; if (s != NULL && s->nat_rule.ptr != NULL && - s->nat_rule.ptr->log & PF_LOG_ALL) + s->nat_rule.ptr->log & PF_LOG_ALL) { lr = s->nat_rule.ptr; - else + } else { lr = r; + } PFLOG_PACKET(kif, h, pbuf, AF_INET6, dir, reason, lr, a, ruleset, &pd); } @@ -10255,22 +10747,25 @@ done: &pd.naddr : &pd.baddr; } if (x == &pd.baddr || s == NULL) { - if (dir == PF_OUT) + if (dir == PF_OUT) { pd.src = x; - else + } else { pd.dst = x; + } } } - if (tr->src.addr.type == PF_ADDR_TABLE) + if (tr->src.addr.type == PF_ADDR_TABLE) { pfr_update_stats(tr->src.addr.p.tbl, (sk == NULL || sk->direction == dir) ? pd.src : pd.dst, pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->src.neg); - if (tr->dst.addr.type == PF_ADDR_TABLE) + } + if (tr->dst.addr.type == PF_ADDR_TABLE) { pfr_update_stats(tr->dst.addr.p.tbl, (sk == NULL || sk->direction == dir) ? pd.dst : pd.src, pd.af, pd.tot_len, dir == PF_OUT, r->action == PF_PASS, tr->dst.neg); + } } #if 0 @@ -10278,9 +10773,10 @@ done: m_freem(*m0); *m0 = NULL; action = PF_PASS; - } else if (r->rt) + } else if (r->rt) { /* pf_route6 can free the mbuf causing *m0 to become NULL */ pf_route6(m0, r, dir, kif->pfik_ifp, s, &pd); + } #else VERIFY(pbuf == NULL || pd.mp == NULL || pd.mp == pbuf); @@ -10293,7 +10789,7 @@ done: if (action == PF_DROP) { pbuf_destroy(*pbufp); *pbufp = NULL; - return (PF_DROP); + return PF_DROP; } *pbufp = pbuf; @@ -10309,7 +10805,7 @@ done: } #endif /* 0 */ - return (action); + return action; } #endif /* INET6 */ @@ -10317,7 +10813,7 @@ static int pf_check_congestion(struct ifqueue *ifq) { #pragma unused(ifq) - return (0); + return 0; } void @@ -10325,7 +10821,7 @@ pool_init(struct pool *pp, size_t size, unsigned int align, unsigned int ioff, int flags, const char *wchan, void *palloc) { #pragma unused(align, ioff, flags, palloc) - bzero(pp, sizeof (*pp)); + bzero(pp, sizeof(*pp)); pp->pool_zone = zinit(size, 1024 * size, PAGE_SIZE, wchan); if (pp->pool_zone != NULL) { zone_change(pp->pool_zone, Z_EXPAND, TRUE); @@ -10345,7 +10841,7 @@ pool_destroy(struct pool *pp) void pool_sethiwat(struct pool *pp, int n) { - pp->pool_hiwat = n; /* Currently unused */ + pp->pool_hiwat = n; /* Currently unused */ } void @@ -10368,7 +10864,7 @@ pool_get(struct pool *pp, int flags) pp->pool_name != NULL ? pp->pool_name : "unknown", pp->pool_limit)); pp->pool_fails++; - return (NULL); + return NULL; } buf = zalloc_canblock(pp->pool_zone, (flags & (PR_NOWAIT | PR_WAITOK))); @@ -10376,7 +10872,7 @@ pool_get(struct pool *pp, int flags) pp->pool_count++; VERIFY(pp->pool_count != 0); } - return (buf); + return buf; } void @@ -10392,27 +10888,25 @@ pool_put(struct pool *pp, void *v) struct pf_mtag * pf_find_mtag_pbuf(pbuf_t *pbuf) { - - return (pbuf->pb_pftag); + return pbuf->pb_pftag; } struct pf_mtag * pf_find_mtag(struct mbuf *m) { - - return (m_pftag(m)); + return m_pftag(m); } struct pf_mtag * pf_get_mtag(struct mbuf *m) { - return (pf_find_mtag(m)); + return pf_find_mtag(m); } struct pf_mtag * pf_get_mtag_pbuf(pbuf_t *pbuf) { - return (pf_find_mtag_pbuf(pbuf)); + return pf_find_mtag_pbuf(pbuf); } uint64_t @@ -10421,7 +10915,7 @@ pf_time_second(void) struct timeval t; microuptime(&t); - return (t.tv_sec); + return t.tv_sec; } uint64_t @@ -10430,7 +10924,7 @@ pf_calendar_time_second(void) struct timeval t; getmicrotime(&t); - return (t.tv_sec); + return t.tv_sec; } static void * @@ -10439,17 +10933,19 @@ hook_establish(struct hook_desc_head *head, int tail, hook_fn_t fn, void *arg) struct hook_desc *hd; hd = _MALLOC(sizeof(*hd), M_DEVBUF, M_WAITOK); - if (hd == NULL) - return (NULL); + if (hd == NULL) { + return NULL; + } hd->hd_fn = fn; hd->hd_arg = arg; - if (tail) + if (tail) { TAILQ_INSERT_TAIL(head, hd, hd_list); - else + } else { TAILQ_INSERT_HEAD(head, hd, hd_list); + } - return (hd); + return hd; } static void @@ -10458,16 +10954,19 @@ hook_runloop(struct hook_desc_head *head, int flags) struct hook_desc *hd; if (!(flags & HOOK_REMOVE)) { - if (!(flags & HOOK_ABORT)) + if (!(flags & HOOK_ABORT)) { TAILQ_FOREACH(hd, head, hd_list) - hd->hd_fn(hd->hd_arg); + hd->hd_fn(hd->hd_arg); + } } else { while (!!(hd = TAILQ_FIRST(head))) { TAILQ_REMOVE(head, hd, hd_list); - if (!(flags & HOOK_ABORT)) + if (!(flags & HOOK_ABORT)) { hd->hd_fn(hd->hd_arg); - if (flags & HOOK_FREE) + } + if (flags & HOOK_FREE) { _FREE(hd, M_DEVBUF); + } } } } diff --git a/bsd/net/pf_if.c b/bsd/net/pf_if.c index f67d06a5d..fad147b5a 100644 --- a/bsd/net/pf_if.c +++ b/bsd/net/pf_if.c @@ -87,14 +87,14 @@ #include -struct pfi_kif *pfi_all = NULL; +struct pfi_kif *pfi_all = NULL; -static struct pool pfi_addr_pl; -static struct pfi_ifhead pfi_ifs; -static u_int32_t pfi_update = 1; -static struct pfr_addr *pfi_buffer; -static int pfi_buffer_cnt; -static int pfi_buffer_max; +static struct pool pfi_addr_pl; +static struct pfi_ifhead pfi_ifs; +static u_int32_t pfi_update = 1; +static struct pfr_addr *pfi_buffer; +static int pfi_buffer_cnt; +static int pfi_buffer_max; __private_extern__ void pfi_kifaddr_update(void *); @@ -110,25 +110,27 @@ static int pfi_unmask(void *); RB_PROTOTYPE_SC(static, pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); RB_GENERATE(pfi_ifhead, pfi_kif, pfik_tree, pfi_if_compare); -#define PFI_BUFFER_MAX 0x10000 -#define PFI_MTYPE M_IFADDR +#define PFI_BUFFER_MAX 0x10000 +#define PFI_MTYPE M_IFADDR -#define IFG_ALL "ALL" +#define IFG_ALL "ALL" void pfi_initialize(void) { - if (pfi_all != NULL) /* already initialized */ + if (pfi_all != NULL) { /* already initialized */ return; + } - pool_init(&pfi_addr_pl, sizeof (struct pfi_dynaddr), 0, 0, 0, + pool_init(&pfi_addr_pl, sizeof(struct pfi_dynaddr), 0, 0, 0, "pfiaddrpl", NULL); pfi_buffer_max = 64; - pfi_buffer = _MALLOC(pfi_buffer_max * sizeof (*pfi_buffer), + pfi_buffer = _MALLOC(pfi_buffer_max * sizeof(*pfi_buffer), PFI_MTYPE, M_WAITOK); - if ((pfi_all = pfi_kif_get(IFG_ALL)) == NULL) + if ((pfi_all = pfi_kif_get(IFG_ALL)) == NULL) { panic("pfi_kif_get for pfi_all failed"); + } } #if 0 @@ -143,25 +145,27 @@ pfi_destroy(void) struct pfi_kif * pfi_kif_get(const char *kif_name) { - struct pfi_kif *kif; - struct pfi_kif_cmp s; + struct pfi_kif *kif; + struct pfi_kif_cmp s; - bzero(&s, sizeof (s)); - strlcpy(s.pfik_name, kif_name, sizeof (s.pfik_name)); + bzero(&s, sizeof(s)); + strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name)); if ((kif = RB_FIND(pfi_ifhead, &pfi_ifs, - (struct pfi_kif *)(void *)&s)) != NULL) - return (kif); + (struct pfi_kif *)(void *)&s)) != NULL) { + return kif; + } /* create new one */ - if ((kif = _MALLOC(sizeof (*kif), PFI_MTYPE, M_WAITOK|M_ZERO)) == NULL) - return (NULL); + if ((kif = _MALLOC(sizeof(*kif), PFI_MTYPE, M_WAITOK | M_ZERO)) == NULL) { + return NULL; + } - strlcpy(kif->pfik_name, kif_name, sizeof (kif->pfik_name)); + strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name)); kif->pfik_tzero = pf_calendar_time_second(); TAILQ_INIT(&kif->pfik_dynaddrs); RB_INSERT(pfi_ifhead, &pfi_ifs, kif); - return (kif); + return kif; } void @@ -182,8 +186,9 @@ pfi_kif_ref(struct pfi_kif *kif, enum pfi_kif_refs what) void pfi_kif_unref(struct pfi_kif *kif, enum pfi_kif_refs what) { - if (kif == NULL) + if (kif == NULL) { return; + } switch (what) { case PFI_KIF_REF_NONE: @@ -206,11 +211,13 @@ pfi_kif_unref(struct pfi_kif *kif, enum pfi_kif_refs what) panic("pfi_kif_unref with unknown type"); } - if (kif->pfik_ifp != NULL || kif == pfi_all) + if (kif->pfik_ifp != NULL || kif == pfi_all) { return; + } - if (kif->pfik_rules || kif->pfik_states) + if (kif->pfik_rules || kif->pfik_states) { return; + } RB_REMOVE(pfi_ifhead, &pfi_ifs, kif); _FREE(kif, PFI_MTYPE); @@ -219,11 +226,11 @@ pfi_kif_unref(struct pfi_kif *kif, enum pfi_kif_refs what) int pfi_kif_match(struct pfi_kif *rule_kif, struct pfi_kif *packet_kif) { + if (rule_kif == NULL || rule_kif == packet_kif) { + return 1; + } - if (rule_kif == NULL || rule_kif == packet_kif) - return (1); - - return (0); + return 0; } void @@ -234,8 +241,9 @@ pfi_attach_ifnet(struct ifnet *ifp) LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); pfi_update++; - if ((kif = pfi_kif_get(if_name(ifp))) == NULL) + if ((kif = pfi_kif_get(if_name(ifp))) == NULL) { panic("pfi_kif_get failed"); + } ifnet_lock_exclusive(ifp); kif->pfik_ifp = ifp; @@ -251,12 +259,13 @@ pfi_attach_ifnet(struct ifnet *ifp) void pfi_detach_ifnet(struct ifnet *ifp) { - struct pfi_kif *kif; + struct pfi_kif *kif; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if ((kif = (struct pfi_kif *)ifp->if_pf_kif) == NULL) + if ((kif = (struct pfi_kif *)ifp->if_pf_kif) == NULL) { return; + } pfi_update++; pfi_kif_update(kif); @@ -277,51 +286,54 @@ pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af) case AF_INET: switch (dyn->pfid_acnt4) { case 0: - return (0); + return 0; case 1: - return (PF_MATCHA(0, &dyn->pfid_addr4, - &dyn->pfid_mask4, a, AF_INET)); + return PF_MATCHA(0, &dyn->pfid_addr4, + &dyn->pfid_mask4, a, AF_INET); default: - return (pfr_match_addr(dyn->pfid_kt, a, AF_INET)); + return pfr_match_addr(dyn->pfid_kt, a, AF_INET); } #endif /* INET */ #if INET6 case AF_INET6: switch (dyn->pfid_acnt6) { case 0: - return (0); + return 0; case 1: - return (PF_MATCHA(0, &dyn->pfid_addr6, - &dyn->pfid_mask6, a, AF_INET6)); + return PF_MATCHA(0, &dyn->pfid_addr6, + &dyn->pfid_mask6, a, AF_INET6); default: - return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6)); + return pfr_match_addr(dyn->pfid_kt, a, AF_INET6); } #endif /* INET6 */ default: - return (0); + return 0; } } int pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af) { - struct pfi_dynaddr *dyn; - char tblname[PF_TABLE_NAME_SIZE]; - struct pf_ruleset *ruleset = NULL; - int rv = 0; + struct pfi_dynaddr *dyn; + char tblname[PF_TABLE_NAME_SIZE]; + struct pf_ruleset *ruleset = NULL; + int rv = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (aw->type != PF_ADDR_DYNIFTL) - return (0); - if ((dyn = pool_get(&pfi_addr_pl, PR_WAITOK)) == NULL) - return (1); - bzero(dyn, sizeof (*dyn)); + if (aw->type != PF_ADDR_DYNIFTL) { + return 0; + } + if ((dyn = pool_get(&pfi_addr_pl, PR_WAITOK)) == NULL) { + return 1; + } + bzero(dyn, sizeof(*dyn)); - if (strcmp(aw->v.ifname, "self") == 0) + if (strcmp(aw->v.ifname, "self") == 0) { dyn->pfid_kif = pfi_kif_get(IFG_ALL); - else + } else { dyn->pfid_kif = pfi_kif_get(aw->v.ifname); + } if (dyn->pfid_kif == NULL) { rv = 1; goto _bad; @@ -329,20 +341,26 @@ pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af) pfi_kif_ref(dyn->pfid_kif, PFI_KIF_REF_RULE); dyn->pfid_net = pfi_unmask(&aw->v.a.mask); - if (af == AF_INET && dyn->pfid_net == 32) + if (af == AF_INET && dyn->pfid_net == 32) { dyn->pfid_net = 128; - strlcpy(tblname, aw->v.ifname, sizeof (tblname)); - if (aw->iflags & PFI_AFLAG_NETWORK) - strlcat(tblname, ":network", sizeof (tblname)); - if (aw->iflags & PFI_AFLAG_BROADCAST) - strlcat(tblname, ":broadcast", sizeof (tblname)); - if (aw->iflags & PFI_AFLAG_PEER) - strlcat(tblname, ":peer", sizeof (tblname)); - if (aw->iflags & PFI_AFLAG_NOALIAS) - strlcat(tblname, ":0", sizeof (tblname)); - if (dyn->pfid_net != 128) + } + strlcpy(tblname, aw->v.ifname, sizeof(tblname)); + if (aw->iflags & PFI_AFLAG_NETWORK) { + strlcat(tblname, ":network", sizeof(tblname)); + } + if (aw->iflags & PFI_AFLAG_BROADCAST) { + strlcat(tblname, ":broadcast", sizeof(tblname)); + } + if (aw->iflags & PFI_AFLAG_PEER) { + strlcat(tblname, ":peer", sizeof(tblname)); + } + if (aw->iflags & PFI_AFLAG_NOALIAS) { + strlcat(tblname, ":0", sizeof(tblname)); + } + if (dyn->pfid_net != 128) { snprintf(tblname + strlen(tblname), - sizeof (tblname) - strlen(tblname), "/%d", dyn->pfid_net); + sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net); + } if ((ruleset = pf_find_or_create_ruleset(PF_RESERVED_ANCHOR)) == NULL) { rv = 1; goto _bad; @@ -360,39 +378,43 @@ pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af) TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry); aw->p.dyn = dyn; pfi_kif_update(dyn->pfid_kif); - return (0); + return 0; _bad: - if (dyn->pfid_kt != NULL) + if (dyn->pfid_kt != NULL) { pfr_detach_table(dyn->pfid_kt); - if (ruleset != NULL) + } + if (ruleset != NULL) { pf_remove_if_empty_ruleset(ruleset); - if (dyn->pfid_kif != NULL) + } + if (dyn->pfid_kif != NULL) { pfi_kif_unref(dyn->pfid_kif, PFI_KIF_REF_RULE); + } pool_put(&pfi_addr_pl, dyn); - return (rv); + return rv; } void pfi_kif_update(struct pfi_kif *kif) { - struct pfi_dynaddr *p; + struct pfi_dynaddr *p; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); /* update all dynaddr */ TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry) - pfi_dynaddr_update(p); + pfi_dynaddr_update(p); } void pfi_dynaddr_update(struct pfi_dynaddr *dyn) { - struct pfi_kif *kif; - struct pfr_ktable *kt; + struct pfi_kif *kif; + struct pfr_ktable *kt; - if (dyn == NULL || dyn->pfid_kif == NULL || dyn->pfid_kt == NULL) + if (dyn == NULL || dyn->pfid_kif == NULL || dyn->pfid_kt == NULL) { panic("pfi_dynaddr_update"); + } kif = dyn->pfid_kif; kt = dyn->pfid_kt; @@ -408,28 +430,31 @@ pfi_dynaddr_update(struct pfi_dynaddr *dyn) void pfi_table_update(struct pfr_ktable *kt, struct pfi_kif *kif, int net, int flags) { - int e, size2 = 0; + int e, size2 = 0; pfi_buffer_cnt = 0; - if (kif->pfik_ifp != NULL) + if (kif->pfik_ifp != NULL) { pfi_instance_add(kif->pfik_ifp, net, flags); + } if ((e = pfr_set_addrs(&kt->pfrkt_t, CAST_USER_ADDR_T(pfi_buffer), - pfi_buffer_cnt, &size2, NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK))) + pfi_buffer_cnt, &size2, NULL, NULL, NULL, 0, PFR_TFLAG_ALLMASK))) { printf("pfi_table_update: cannot set %d new addresses " "into table %s: %d\n", pfi_buffer_cnt, kt->pfrkt_name, e); + } } void pfi_instance_add(struct ifnet *ifp, int net, int flags) { - struct ifaddr *ia; - int got4 = 0, got6 = 0; - int net2, af; + struct ifaddr *ia; + int got4 = 0, got6 = 0; + int net2, af; - if (ifp == NULL) + if (ifp == NULL) { return; + } ifnet_lock_shared(ifp); TAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) { IFA_LOCK(ia); @@ -472,27 +497,31 @@ pfi_instance_add(struct ifnet *ifp, int net, int flags) continue; } } - if (af == AF_INET) + if (af == AF_INET) { got4 = 1; - else if (af == AF_INET6) + } else if (af == AF_INET6) { got6 = 1; + } net2 = net; if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) { - if (af == AF_INET) + if (af == AF_INET) { net2 = pfi_unmask(&((struct sockaddr_in *) (void *)ia->ifa_netmask)->sin_addr); - else if (af == AF_INET6) + } else if (af == AF_INET6) { net2 = pfi_unmask(&((struct sockaddr_in6 *) (void *)ia->ifa_netmask)->sin6_addr); + } } - if (af == AF_INET && net2 > 32) + if (af == AF_INET && net2 > 32) { net2 = 32; - if (flags & PFI_AFLAG_BROADCAST) + } + if (flags & PFI_AFLAG_BROADCAST) { pfi_address_add(ia->ifa_broadaddr, af, net2); - else if (flags & PFI_AFLAG_PEER) + } else if (flags & PFI_AFLAG_PEER) { pfi_address_add(ia->ifa_dstaddr, af, net2); - else + } else { pfi_address_add(ia->ifa_addr, af, net2); + } IFA_UNLOCK(ia); } ifnet_lock_done(ifp); @@ -501,57 +530,62 @@ pfi_instance_add(struct ifnet *ifp, int net, int flags) void pfi_address_add(struct sockaddr *sa, int af, int net) { - struct pfr_addr *p; - int i; + struct pfr_addr *p; + int i; if (pfi_buffer_cnt >= pfi_buffer_max) { - int new_max = pfi_buffer_max * 2; + int new_max = pfi_buffer_max * 2; if (new_max > PFI_BUFFER_MAX) { printf("pfi_address_add: address buffer full (%d/%d)\n", pfi_buffer_cnt, PFI_BUFFER_MAX); return; } - p = _MALLOC(new_max * sizeof (*pfi_buffer), PFI_MTYPE, + p = _MALLOC(new_max * sizeof(*pfi_buffer), PFI_MTYPE, M_WAITOK); if (p == NULL) { printf("pfi_address_add: no memory to grow buffer " "(%d/%d)\n", pfi_buffer_cnt, PFI_BUFFER_MAX); return; } - memcpy(pfi_buffer, p, pfi_buffer_cnt * sizeof (*pfi_buffer)); + memcpy(pfi_buffer, p, pfi_buffer_cnt * sizeof(*pfi_buffer)); /* no need to zero buffer */ _FREE(pfi_buffer, PFI_MTYPE); pfi_buffer = p; pfi_buffer_max = new_max; } - if (af == AF_INET && net > 32) + if (af == AF_INET && net > 32) { net = 128; + } p = pfi_buffer + pfi_buffer_cnt++; - bzero(p, sizeof (*p)); + bzero(p, sizeof(*p)); p->pfra_af = af; p->pfra_net = net; - if (af == AF_INET) + if (af == AF_INET) { p->pfra_ip4addr = ((struct sockaddr_in *)(void *)sa)->sin_addr; - else if (af == AF_INET6) { + } else if (af == AF_INET6) { p->pfra_ip6addr = ((struct sockaddr_in6 *)(void *)sa)->sin6_addr; - if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr)) + if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr)) { p->pfra_ip6addr.s6_addr16[1] = 0; + } } /* mask network address bits */ - if (net < 128) - ((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8)); - for (i = (p->pfra_net+7)/8; i < (int)sizeof (p->pfra_u); i++) + if (net < 128) { + ((caddr_t)p)[p->pfra_net / 8] &= ~(0xFF >> (p->pfra_net % 8)); + } + for (i = (p->pfra_net + 7) / 8; i < (int)sizeof(p->pfra_u); i++) { ((caddr_t)p)[i] = 0; + } } void pfi_dynaddr_remove(struct pf_addr_wrap *aw) { if (aw->type != PF_ADDR_DYNIFTL || aw->p.dyn == NULL || - aw->p.dyn->pfid_kif == NULL || aw->p.dyn->pfid_kt == NULL) + aw->p.dyn->pfid_kif == NULL || aw->p.dyn->pfid_kt == NULL) { return; + } TAILQ_REMOVE(&aw->p.dyn->pfid_kif->pfik_dynaddrs, aw->p.dyn, entry); pfi_kif_unref(aw->p.dyn->pfid_kif, PFI_KIF_REF_RULE); @@ -566,15 +600,16 @@ void pfi_dynaddr_copyout(struct pf_addr_wrap *aw) { if (aw->type != PF_ADDR_DYNIFTL || aw->p.dyn == NULL || - aw->p.dyn->pfid_kif == NULL) + aw->p.dyn->pfid_kif == NULL) { return; + } aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6; } void pfi_kifaddr_update(void *v) { - struct pfi_kif *kif = (struct pfi_kif *)v; + struct pfi_kif *kif = (struct pfi_kif *)v; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -585,38 +620,41 @@ pfi_kifaddr_update(void *v) int pfi_if_compare(struct pfi_kif *p, struct pfi_kif *q) { - return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ)); + return strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ); } void pfi_update_status(const char *name, struct pf_status *pfs) { - struct pfi_kif *p; - struct pfi_kif_cmp key; - int i, j, k; + struct pfi_kif *p; + struct pfi_kif_cmp key; + int i, j, k; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - strlcpy(key.pfik_name, name, sizeof (key.pfik_name)); + strlcpy(key.pfik_name, name, sizeof(key.pfik_name)); p = RB_FIND(pfi_ifhead, &pfi_ifs, (struct pfi_kif *)(void *)&key); - if (p == NULL) + if (p == NULL) { return; + } if (pfs != NULL) { - bzero(pfs->pcounters, sizeof (pfs->pcounters)); - bzero(pfs->bcounters, sizeof (pfs->bcounters)); - for (i = 0; i < 2; i++) - for (j = 0; j < 2; j++) + bzero(pfs->pcounters, sizeof(pfs->pcounters)); + bzero(pfs->bcounters, sizeof(pfs->bcounters)); + for (i = 0; i < 2; i++) { + for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { pfs->pcounters[i][j][k] += - p->pfik_packets[i][j][k]; + p->pfik_packets[i][j][k]; pfs->bcounters[i][j] += - p->pfik_bytes[i][j][k]; + p->pfik_bytes[i][j][k]; } + } + } } else { /* just clear statistics */ - bzero(p->pfik_packets, sizeof (p->pfik_packets)); - bzero(p->pfik_bytes, sizeof (p->pfik_bytes)); + bzero(p->pfik_packets, sizeof(p->pfik_packets)); + bzero(p->pfik_bytes, sizeof(p->pfik_bytes)); p->pfik_tzero = pf_calendar_time_second(); } } @@ -624,94 +662,103 @@ pfi_update_status(const char *name, struct pf_status *pfs) int pfi_get_ifaces(const char *name, user_addr_t buf, int *size) { - struct pfi_kif *p, *nextp; - int n = 0; + struct pfi_kif *p, *nextp; + int n = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); for (p = RB_MIN(pfi_ifhead, &pfi_ifs); p; p = nextp) { nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p); - if (pfi_skip_if(name, p)) + if (pfi_skip_if(name, p)) { continue; + } if (*size > n++) { struct pfi_uif u; - if (!p->pfik_tzero) + if (!p->pfik_tzero) { p->pfik_tzero = pf_calendar_time_second(); + } pfi_kif_ref(p, PFI_KIF_REF_RULE); /* return the user space version of pfi_kif */ - bzero(&u, sizeof (u)); - bcopy(p->pfik_name, &u.pfik_name, sizeof (u.pfik_name)); + bzero(&u, sizeof(u)); + bcopy(p->pfik_name, &u.pfik_name, sizeof(u.pfik_name)); bcopy(p->pfik_packets, &u.pfik_packets, - sizeof (u.pfik_packets)); + sizeof(u.pfik_packets)); bcopy(p->pfik_bytes, &u.pfik_bytes, - sizeof (u.pfik_bytes)); + sizeof(u.pfik_bytes)); u.pfik_tzero = p->pfik_tzero; u.pfik_flags = p->pfik_flags; u.pfik_states = p->pfik_states; u.pfik_rules = p->pfik_rules; - if (copyout(&u, buf, sizeof (u))) { + if (copyout(&u, buf, sizeof(u))) { pfi_kif_unref(p, PFI_KIF_REF_RULE); - return (EFAULT); + return EFAULT; } - buf += sizeof (u); + buf += sizeof(u); nextp = RB_NEXT(pfi_ifhead, &pfi_ifs, p); pfi_kif_unref(p, PFI_KIF_REF_RULE); } } *size = n; - return (0); + return 0; } int pfi_skip_if(const char *filter, struct pfi_kif *p) { - int n; + int n; - if (filter == NULL || !*filter) - return (0); - if (strcmp(p->pfik_name, filter) == 0) - return (0); /* exact match */ + if (filter == NULL || !*filter) { + return 0; + } + if (strcmp(p->pfik_name, filter) == 0) { + return 0; /* exact match */ + } n = strlen(filter); - if (n < 1 || n >= IFNAMSIZ) - return (1); /* sanity check */ - if (filter[n-1] >= '0' && filter[n-1] <= '9') - return (1); /* only do exact match in that case */ - if (strncmp(p->pfik_name, filter, n)) - return (1); /* prefix doesn't match */ - return (p->pfik_name[n] < '0' || p->pfik_name[n] > '9'); + if (n < 1 || n >= IFNAMSIZ) { + return 1; /* sanity check */ + } + if (filter[n - 1] >= '0' && filter[n - 1] <= '9') { + return 1; /* only do exact match in that case */ + } + if (strncmp(p->pfik_name, filter, n)) { + return 1; /* prefix doesn't match */ + } + return p->pfik_name[n] < '0' || p->pfik_name[n] > '9'; } int pfi_set_flags(const char *name, int flags) { - struct pfi_kif *p; + struct pfi_kif *p; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); RB_FOREACH(p, pfi_ifhead, &pfi_ifs) { - if (pfi_skip_if(name, p)) + if (pfi_skip_if(name, p)) { continue; + } p->pfik_flags |= flags; } - return (0); + return 0; } int pfi_clear_flags(const char *name, int flags) { - struct pfi_kif *p; + struct pfi_kif *p; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); RB_FOREACH(p, pfi_ifhead, &pfi_ifs) { - if (pfi_skip_if(name, p)) + if (pfi_skip_if(name, p)) { continue; + } p->pfik_flags &= ~flags; } - return (0); + return 0; } /* from pf_print_state.c */ @@ -728,8 +775,9 @@ pfi_unmask(void *addr) } if (j < 4) { tmp = ntohl(m->addr32[j]); - for (i = 31; tmp & (1 << i); --i) + for (i = 31; tmp & (1 << i); --i) { b++; + } } - return (b); + return b; } diff --git a/bsd/net/pf_ioctl.c b/bsd/net/pf_ioctl.c index 395568c48..43a8e23c7 100644 --- a/bsd/net/pf_ioctl.c +++ b/bsd/net/pf_ioctl.c @@ -186,23 +186,23 @@ static void pf_ruleset_cleanup(struct pf_ruleset *, int); static void pf_deleterule_anchor_step_out(struct pf_ruleset **, int, struct pf_rule **); -#define PF_CDEV_MAJOR (-1) +#define PF_CDEV_MAJOR (-1) static struct cdevsw pf_cdevsw = { - /* open */ pfopen, - /* close */ pfclose, - /* read */ eno_rdwrt, - /* write */ eno_rdwrt, - /* ioctl */ pfioctl, - /* stop */ eno_stop, - /* reset */ eno_reset, - /* tty */ NULL, - /* select */ eno_select, - /* mmap */ eno_mmap, - /* strategy */ eno_strat, - /* getc */ eno_getc, - /* putc */ eno_putc, - /* type */ 0 + /* open */ pfopen, + /* close */ pfclose, + /* read */ eno_rdwrt, + /* write */ eno_rdwrt, + /* ioctl */ pfioctl, + /* stop */ eno_stop, + /* reset */ eno_reset, + /* tty */ NULL, + /* select */ eno_select, + /* mmap */ eno_mmap, + /* strategy */ eno_strat, + /* getc */ eno_getc, + /* putc */ eno_putc, + /* type */ 0 }; static void pf_attach_hooks(void); @@ -232,21 +232,21 @@ static u_int32_t pfdevcnt; SLIST_HEAD(list_head, pfioc_kernel_token); static struct list_head token_list_head; -struct pf_rule pf_default_rule; +struct pf_rule pf_default_rule; -#define TAGID_MAX 50000 -static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = +#define TAGID_MAX 50000 +static TAILQ_HEAD(pf_tags, pf_tagname) pf_tags = TAILQ_HEAD_INITIALIZER(pf_tags); #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE) #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE #endif -static u_int16_t tagname2tag(struct pf_tags *, char *); -static void tag2tagname(struct pf_tags *, u_int16_t, char *); -static void tag_unref(struct pf_tags *, u_int16_t); -static int pf_rtlabel_add(struct pf_addr_wrap *); -static void pf_rtlabel_remove(struct pf_addr_wrap *); -static void pf_rtlabel_copyout(struct pf_addr_wrap *); +static u_int16_t tagname2tag(struct pf_tags *, char *); +static void tag2tagname(struct pf_tags *, u_int16_t, char *); +static void tag_unref(struct pf_tags *, u_int16_t); +static int pf_rtlabel_add(struct pf_addr_wrap *); +static void pf_rtlabel_remove(struct pf_addr_wrap *); +static void pf_rtlabel_copyout(struct pf_addr_wrap *); #if INET static int pf_inet_hook(struct ifnet *, struct mbuf **, int, @@ -257,69 +257,69 @@ static int pf_inet6_hook(struct ifnet *, struct mbuf **, int, struct ip_fw_args *); #endif /* INET6 */ -#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x +#define DPFPRINTF(n, x) if (pf_status.debug >= (n)) printf x /* * Helper macros for ioctl structures which vary in size (32-bit vs. 64-bit) */ -#define PFIOCX_STRUCT_DECL(s) \ -struct { \ - union { \ - struct s##_32 _s##_32; \ - struct s##_64 _s##_64; \ - } _u; \ -} *s##_un = NULL \ - -#define PFIOCX_STRUCT_BEGIN(a, s, _action) { \ - VERIFY(s##_un == NULL); \ - s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \ - if (s##_un == NULL) { \ - _action \ - } else { \ - if (p64) \ - bcopy(a, &s##_un->_u._s##_64, \ - sizeof (struct s##_64)); \ - else \ - bcopy(a, &s##_un->_u._s##_32, \ - sizeof (struct s##_32)); \ - } \ +#define PFIOCX_STRUCT_DECL(s) \ +struct { \ + union { \ + struct s##_32 _s##_32; \ + struct s##_64 _s##_64; \ + } _u; \ +} *s##_un = NULL \ + +#define PFIOCX_STRUCT_BEGIN(a, s, _action) { \ + VERIFY(s##_un == NULL); \ + s##_un = _MALLOC(sizeof (*s##_un), M_TEMP, M_WAITOK|M_ZERO); \ + if (s##_un == NULL) { \ + _action \ + } else { \ + if (p64) \ + bcopy(a, &s##_un->_u._s##_64, \ + sizeof (struct s##_64)); \ + else \ + bcopy(a, &s##_un->_u._s##_32, \ + sizeof (struct s##_32)); \ + } \ } -#define PFIOCX_STRUCT_END(s, a) { \ - VERIFY(s##_un != NULL); \ - if (p64) \ - bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \ - else \ - bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \ - _FREE(s##_un, M_TEMP); \ - s##_un = NULL; \ +#define PFIOCX_STRUCT_END(s, a) { \ + VERIFY(s##_un != NULL); \ + if (p64) \ + bcopy(&s##_un->_u._s##_64, a, sizeof (struct s##_64)); \ + else \ + bcopy(&s##_un->_u._s##_32, a, sizeof (struct s##_32)); \ + _FREE(s##_un, M_TEMP); \ + s##_un = NULL; \ } -#define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32) -#define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64) +#define PFIOCX_STRUCT_ADDR32(s) (&s##_un->_u._s##_32) +#define PFIOCX_STRUCT_ADDR64(s) (&s##_un->_u._s##_64) /* * Helper macros for regular ioctl structures. */ -#define PFIOC_STRUCT_BEGIN(a, v, _action) { \ - VERIFY((v) == NULL); \ - (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \ - if ((v) == NULL) { \ - _action \ - } else { \ - bcopy(a, v, sizeof (*(v))); \ - } \ +#define PFIOC_STRUCT_BEGIN(a, v, _action) { \ + VERIFY((v) == NULL); \ + (v) = _MALLOC(sizeof (*(v)), M_TEMP, M_WAITOK|M_ZERO); \ + if ((v) == NULL) { \ + _action \ + } else { \ + bcopy(a, v, sizeof (*(v))); \ + } \ } -#define PFIOC_STRUCT_END(v, a) { \ - VERIFY((v) != NULL); \ - bcopy(v, a, sizeof (*(v))); \ - _FREE(v, M_TEMP); \ - (v) = NULL; \ +#define PFIOC_STRUCT_END(v, a) { \ + VERIFY((v) != NULL); \ + bcopy(v, a, sizeof (*(v))); \ + _FREE(v, M_TEMP); \ + (v) = NULL; \ } -#define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32) -#define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64) +#define PFIOC_STRUCT_ADDR32(s) (&s##_un->_u._s##_32) +#define PFIOC_STRUCT_ADDR64(s) (&s##_un->_u._s##_64) static lck_attr_t *pf_perim_lock_attr; static lck_grp_t *pf_perim_lock_grp; @@ -334,9 +334,9 @@ struct thread *pf_purge_thread; extern void pfi_kifaddr_update(void *); /* pf enable ref-counting helper functions */ -static u_int64_t generate_token(struct proc *); -static int remove_token(struct pfioc_remove_token *); -static void invalidate_all_tokens(void); +static u_int64_t generate_token(struct proc *); +static int remove_token(struct pfioc_remove_token *); +static void invalidate_all_tokens(void); static u_int64_t generate_token(struct proc *p) @@ -344,15 +344,15 @@ generate_token(struct proc *p) u_int64_t token_value; struct pfioc_kernel_token *new_token; - new_token = _MALLOC(sizeof (struct pfioc_kernel_token), M_TEMP, - M_WAITOK|M_ZERO); + new_token = _MALLOC(sizeof(struct pfioc_kernel_token), M_TEMP, + M_WAITOK | M_ZERO); LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); if (new_token == NULL) { /* malloc failed! bail! */ printf("%s: unable to allocate pf token structure!", __func__); - return (0); + return 0; } token_value = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)new_token); @@ -360,13 +360,13 @@ generate_token(struct proc *p) new_token->token.token_value = token_value; new_token->token.pid = proc_pid(p); proc_name(new_token->token.pid, new_token->token.proc_name, - sizeof (new_token->token.proc_name)); + sizeof(new_token->token.proc_name)); new_token->token.timestamp = pf_calendar_time_second(); SLIST_INSERT_HEAD(&token_list_head, new_token, next); nr_tokens++; - return (token_value); + return token_value; } static int @@ -382,12 +382,12 @@ remove_token(struct pfioc_remove_token *tok) pfioc_kernel_token, next); _FREE(entry, M_TEMP); nr_tokens--; - return (0); /* success */ + return 0; /* success */ } } printf("pf : remove failure\n"); - return (ESRCH); /* failure */ + return ESRCH; /* failure */ } static void @@ -422,17 +422,17 @@ pfinit(void) pf_lock_attr = lck_attr_alloc_init(); lck_mtx_init(pf_lock, pf_lock_grp, pf_lock_attr); - pool_init(&pf_rule_pl, sizeof (struct pf_rule), 0, 0, 0, "pfrulepl", + pool_init(&pf_rule_pl, sizeof(struct pf_rule), 0, 0, 0, "pfrulepl", NULL); - pool_init(&pf_src_tree_pl, sizeof (struct pf_src_node), 0, 0, 0, + pool_init(&pf_src_tree_pl, sizeof(struct pf_src_node), 0, 0, 0, "pfsrctrpl", NULL); - pool_init(&pf_state_pl, sizeof (struct pf_state), 0, 0, 0, "pfstatepl", + pool_init(&pf_state_pl, sizeof(struct pf_state), 0, 0, 0, "pfstatepl", NULL); - pool_init(&pf_state_key_pl, sizeof (struct pf_state_key), 0, 0, 0, + pool_init(&pf_state_key_pl, sizeof(struct pf_state_key), 0, 0, 0, "pfstatekeypl", NULL); - pool_init(&pf_app_state_pl, sizeof (struct pf_app_state), 0, 0, 0, + pool_init(&pf_app_state_pl, sizeof(struct pf_app_state), 0, 0, 0, "pfappstatepl", NULL); - pool_init(&pf_pooladdr_pl, sizeof (struct pf_pooladdr), 0, 0, 0, + pool_init(&pf_pooladdr_pl, sizeof(struct pf_pooladdr), 0, 0, 0, "pfpooladdrpl", NULL); pfr_initialize(); pfi_initialize(); @@ -441,9 +441,10 @@ pfinit(void) pool_sethardlimit(pf_pool_limits[PF_LIMIT_STATES].pp, pf_pool_limits[PF_LIMIT_STATES].limit, NULL, 0); - if (max_mem <= 256*1024*1024) + if (max_mem <= 256 * 1024 * 1024) { pf_pool_limits[PF_LIMIT_TABLE_ENTRIES].limit = PFR_KENTRY_HIWAT_SMALL; + } RB_INIT(&tree_src_tracking); RB_INIT(&pf_anchors); @@ -498,7 +499,7 @@ pfinit(void) t[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END; pf_normalize_init(); - bzero(&pf_status, sizeof (pf_status)); + bzero(&pf_status, sizeof(pf_status)); pf_status.debug = PF_DEBUG_URGENT; pf_hash_seed = RandomULong(); @@ -532,13 +533,13 @@ pfinit(void) static void pfdetach(void) { - struct pf_anchor *anchor; - struct pf_state *state; - struct pf_src_node *node; - struct pfioc_table pt; - u_int32_t ticket; - int i; - char r = '\0'; + struct pf_anchor *anchor; + struct pf_state *state; + struct pf_src_node *node; + struct pfioc_table pt; + u_int32_t ticket; + int i; + char r = '\0'; pf_detach_hooks(); @@ -546,9 +547,11 @@ pfdetach(void) wakeup(pf_purge_thread_fn); /* clear the rulesets */ - for (i = 0; i < PF_RULESET_MAX; i++) - if (pf_begin_rules(&ticket, i, &r) == 0) - pf_commit_rules(ticket, i, &r); + for (i = 0; i < PF_RULESET_MAX; i++) { + if (pf_begin_rules(&ticket, i, &r) == 0) { + pf_commit_rules(ticket, i, &r); + } + } /* clear states */ RB_FOREACH(state, pf_state_tree_id, &tree_id) { @@ -575,14 +578,16 @@ pfdetach(void) pf_purge_expired_src_nodes(); /* clear tables */ - memset(&pt, '\0', sizeof (pt)); + memset(&pt, '\0', sizeof(pt)); pfr_clr_tables(&pt.pfrio_table, &pt.pfrio_ndel, pt.pfrio_flags); /* destroy anchors */ while ((anchor = RB_MIN(pf_anchor_global, &pf_anchors)) != NULL) { - for (i = 0; i < PF_RULESET_MAX; i++) - if (pf_begin_rules(&ticket, i, anchor->name) == 0) + for (i = 0; i < PF_RULESET_MAX; i++) { + if (pf_begin_rules(&ticket, i, anchor->name) == 0) { pf_commit_rules(ticket, i, anchor->name); + } + } } /* destroy main ruleset */ @@ -606,27 +611,29 @@ static int pfopen(dev_t dev, int flags, int fmt, struct proc *p) { #pragma unused(flags, fmt, p) - if (minor(dev) >= PFDEV_MAX) - return (ENXIO); + if (minor(dev) >= PFDEV_MAX) { + return ENXIO; + } if (minor(dev) == PFDEV_PFM) { lck_mtx_lock(pf_lock); if (pfdevcnt != 0) { lck_mtx_unlock(pf_lock); - return (EBUSY); + return EBUSY; } pfdevcnt++; lck_mtx_unlock(pf_lock); } - return (0); + return 0; } static int pfclose(dev_t dev, int flags, int fmt, struct proc *p) { #pragma unused(flags, fmt, p) - if (minor(dev) >= PFDEV_MAX) - return (ENXIO); + if (minor(dev) >= PFDEV_MAX) { + return ENXIO; + } if (minor(dev) == PFDEV_PFM) { lck_mtx_lock(pf_lock); @@ -634,7 +641,7 @@ pfclose(dev_t dev, int flags, int fmt, struct proc *p) pfdevcnt--; lck_mtx_unlock(pf_lock); } - return (0); + return 0; } static struct pf_pool * @@ -642,49 +649,57 @@ pf_get_pool(char *anchor, u_int32_t ticket, u_int8_t rule_action, u_int32_t rule_number, u_int8_t r_last, u_int8_t active, u_int8_t check_ticket) { - struct pf_ruleset *ruleset; - struct pf_rule *rule; - int rs_num; + struct pf_ruleset *ruleset; + struct pf_rule *rule; + int rs_num; ruleset = pf_find_ruleset(anchor); - if (ruleset == NULL) - return (NULL); + if (ruleset == NULL) { + return NULL; + } rs_num = pf_get_ruleset_number(rule_action); - if (rs_num >= PF_RULESET_MAX) - return (NULL); + if (rs_num >= PF_RULESET_MAX) { + return NULL; + } if (active) { if (check_ticket && ticket != - ruleset->rules[rs_num].active.ticket) - return (NULL); - if (r_last) + ruleset->rules[rs_num].active.ticket) { + return NULL; + } + if (r_last) { rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, pf_rulequeue); - else + } else { rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); + } } else { if (check_ticket && ticket != - ruleset->rules[rs_num].inactive.ticket) - return (NULL); - if (r_last) + ruleset->rules[rs_num].inactive.ticket) { + return NULL; + } + if (r_last) { rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_rulequeue); - else + } else { rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr); + } } if (!r_last) { - while ((rule != NULL) && (rule->nr != rule_number)) + while ((rule != NULL) && (rule->nr != rule_number)) { rule = TAILQ_NEXT(rule, entries); + } + } + if (rule == NULL) { + return NULL; } - if (rule == NULL) - return (NULL); - return (&rule->rpool); + return &rule->rpool; } static void pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) { - struct pf_pooladdr *mv_pool_pa; + struct pf_pooladdr *mv_pool_pa; while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) { TAILQ_REMOVE(poola, mv_pool_pa, entries); @@ -695,7 +710,7 @@ pf_mv_pool(struct pf_palist *poola, struct pf_palist *poolb) static void pf_empty_pool(struct pf_palist *poola) { - struct pf_pooladdr *empty_pool_pa; + struct pf_pooladdr *empty_pool_pa; while ((empty_pool_pa = TAILQ_FIRST(poola)) != NULL) { pfi_dynaddr_remove(&empty_pool_pa->addr); @@ -718,8 +733,9 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) */ pf_tbladdr_remove(&rule->src.addr); pf_tbladdr_remove(&rule->dst.addr); - if (rule->overload_tbl) + if (rule->overload_tbl) { pfr_detach_table(rule->overload_tbl); + } } TAILQ_REMOVE(rulequeue, rule, entries); rule->entries.tqe_prev = NULL; @@ -727,8 +743,9 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) } if (rule->states > 0 || rule->src_nodes > 0 || - rule->entries.tqe_prev != NULL) + rule->entries.tqe_prev != NULL) { return; + } pf_tag_unref(rule->tag); pf_tag_unref(rule->match_tag); pf_rtlabel_remove(&rule->src.addr); @@ -738,8 +755,9 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) if (rulequeue == NULL) { pf_tbladdr_remove(&rule->src.addr); pf_tbladdr_remove(&rule->dst.addr); - if (rule->overload_tbl) + if (rule->overload_tbl) { pfr_detach_table(rule->overload_tbl); + } } pfi_kif_unref(rule->kif, PFI_KIF_REF_RULE); pf_anchor_remove(rule); @@ -750,14 +768,14 @@ pf_rm_rule(struct pf_rulequeue *rulequeue, struct pf_rule *rule) static u_int16_t tagname2tag(struct pf_tags *head, char *tagname) { - struct pf_tagname *tag, *p = NULL; - u_int16_t new_tagid = 1; + struct pf_tagname *tag, *p = NULL; + u_int16_t new_tagid = 1; TAILQ_FOREACH(tag, head, entries) - if (strcmp(tagname, tag->name) == 0) { - tag->ref++; - return (tag->tag); - } + if (strcmp(tagname, tag->name) == 0) { + tag->ref++; + return tag->tag; + } /* * to avoid fragmentation, we do a linear search from the beginning @@ -766,49 +784,55 @@ tagname2tag(struct pf_tags *head, char *tagname) */ /* new entry */ - if (!TAILQ_EMPTY(head)) + if (!TAILQ_EMPTY(head)) { for (p = TAILQ_FIRST(head); p != NULL && - p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) + p->tag == new_tagid; p = TAILQ_NEXT(p, entries)) { new_tagid = p->tag + 1; + } + } - if (new_tagid > TAGID_MAX) - return (0); + if (new_tagid > TAGID_MAX) { + return 0; + } /* allocate and fill new struct pf_tagname */ - tag = _MALLOC(sizeof (*tag), M_TEMP, M_WAITOK|M_ZERO); - if (tag == NULL) - return (0); - strlcpy(tag->name, tagname, sizeof (tag->name)); + tag = _MALLOC(sizeof(*tag), M_TEMP, M_WAITOK | M_ZERO); + if (tag == NULL) { + return 0; + } + strlcpy(tag->name, tagname, sizeof(tag->name)); tag->tag = new_tagid; tag->ref++; - if (p != NULL) /* insert new entry before p */ + if (p != NULL) { /* insert new entry before p */ TAILQ_INSERT_BEFORE(p, tag, entries); - else /* either list empty or no free slot in between */ + } else { /* either list empty or no free slot in between */ TAILQ_INSERT_TAIL(head, tag, entries); + } - return (tag->tag); + return tag->tag; } static void tag2tagname(struct pf_tags *head, u_int16_t tagid, char *p) { - struct pf_tagname *tag; + struct pf_tagname *tag; TAILQ_FOREACH(tag, head, entries) - if (tag->tag == tagid) { - strlcpy(p, tag->name, PF_TAG_NAME_SIZE); - return; - } + if (tag->tag == tagid) { + strlcpy(p, tag->name, PF_TAG_NAME_SIZE); + return; + } } static void tag_unref(struct pf_tags *head, u_int16_t tag) { - struct pf_tagname *p, *next; + struct pf_tagname *p, *next; - if (tag == 0) + if (tag == 0) { return; + } for (p = TAILQ_FIRST(head); p != NULL; p = next) { next = TAILQ_NEXT(p, entries); @@ -825,7 +849,7 @@ tag_unref(struct pf_tags *head, u_int16_t tag) u_int16_t pf_tagname2tag(char *tagname) { - return (tagname2tag(&pf_tags, tagname)); + return tagname2tag(&pf_tags, tagname); } void @@ -840,10 +864,12 @@ pf_tag_ref(u_int16_t tag) struct pf_tagname *t; TAILQ_FOREACH(t, &pf_tags, entries) - if (t->tag == tag) - break; - if (t != NULL) + if (t->tag == tag) { + break; + } + if (t != NULL) { t->ref++; + } } void @@ -856,7 +882,7 @@ static int pf_rtlabel_add(struct pf_addr_wrap *a) { #pragma unused(a) - return (0); + return 0; } static void @@ -874,57 +900,61 @@ pf_rtlabel_copyout(struct pf_addr_wrap *a) static int pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor) { - struct pf_ruleset *rs; - struct pf_rule *rule; + struct pf_ruleset *rs; + struct pf_rule *rule; - if (rs_num < 0 || rs_num >= PF_RULESET_MAX) - return (EINVAL); + if (rs_num < 0 || rs_num >= PF_RULESET_MAX) { + return EINVAL; + } rs = pf_find_or_create_ruleset(anchor); - if (rs == NULL) - return (EINVAL); + if (rs == NULL) { + return EINVAL; + } while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); rs->rules[rs_num].inactive.rcount--; } *ticket = ++rs->rules[rs_num].inactive.ticket; rs->rules[rs_num].inactive.open = 1; - return (0); + return 0; } static int pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor) { - struct pf_ruleset *rs; - struct pf_rule *rule; + struct pf_ruleset *rs; + struct pf_rule *rule; - if (rs_num < 0 || rs_num >= PF_RULESET_MAX) - return (EINVAL); + if (rs_num < 0 || rs_num >= PF_RULESET_MAX) { + return EINVAL; + } rs = pf_find_ruleset(anchor); if (rs == NULL || !rs->rules[rs_num].inactive.open || - rs->rules[rs_num].inactive.ticket != ticket) - return (0); + rs->rules[rs_num].inactive.ticket != ticket) { + return 0; + } while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) { pf_rm_rule(rs->rules[rs_num].inactive.ptr, rule); rs->rules[rs_num].inactive.rcount--; } rs->rules[rs_num].inactive.open = 0; - return (0); + return 0; } -#define PF_MD5_UPD(st, elm) \ +#define PF_MD5_UPD(st, elm) \ MD5Update(ctx, (u_int8_t *)&(st)->elm, sizeof ((st)->elm)) -#define PF_MD5_UPD_STR(st, elm) \ +#define PF_MD5_UPD_STR(st, elm) \ MD5Update(ctx, (u_int8_t *)(st)->elm, strlen((st)->elm)) -#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ - (stor) = htonl((st)->elm); \ - MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \ +#define PF_MD5_UPD_HTONL(st, elm, stor) do { \ + (stor) = htonl((st)->elm); \ + MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int32_t)); \ } while (0) -#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ - (stor) = htons((st)->elm); \ - MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \ +#define PF_MD5_UPD_HTONS(st, elm, stor) do { \ + (stor) = htons((st)->elm); \ + MD5Update(ctx, (u_int8_t *)&(stor), sizeof (u_int16_t)); \ } while (0) static void @@ -1006,26 +1036,29 @@ pf_hash_rule(MD5_CTX *ctx, struct pf_rule *rule) static int pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) { - struct pf_ruleset *rs; - struct pf_rule *rule, **old_array, *r; - struct pf_rulequeue *old_rules; - int error; - u_int32_t old_rcount; + struct pf_ruleset *rs; + struct pf_rule *rule, **old_array, *r; + struct pf_rulequeue *old_rules; + int error; + u_int32_t old_rcount; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (rs_num < 0 || rs_num >= PF_RULESET_MAX) - return (EINVAL); + if (rs_num < 0 || rs_num >= PF_RULESET_MAX) { + return EINVAL; + } rs = pf_find_ruleset(anchor); if (rs == NULL || !rs->rules[rs_num].inactive.open || - ticket != rs->rules[rs_num].inactive.ticket) - return (EBUSY); + ticket != rs->rules[rs_num].inactive.ticket) { + return EBUSY; + } /* Calculate checksum for the main ruleset */ if (rs == &pf_main_ruleset) { error = pf_setup_pfsync_matching(rs); - if (error != 0) - return (error); + if (error != 0) { + return error; + } } /* Swap rules, keep the old. */ @@ -1033,11 +1066,12 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) old_rcount = rs->rules[rs_num].active.rcount; old_array = rs->rules[rs_num].active.ptr_array; - if(old_rcount != 0) { + if (old_rcount != 0) { r = TAILQ_FIRST(rs->rules[rs_num].active.ptr); while (r) { - if (r->rule_flag & PFRULE_PFM) + if (r->rule_flag & PFRULE_PFM) { pffwrules--; + } r = TAILQ_NEXT(r, entries); } } @@ -1059,30 +1093,32 @@ pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor) /* Purge the old rule list. */ - while ((rule = TAILQ_FIRST(old_rules)) != NULL) + while ((rule = TAILQ_FIRST(old_rules)) != NULL) { pf_rm_rule(old_rules, rule); - if (rs->rules[rs_num].inactive.ptr_array) + } + if (rs->rules[rs_num].inactive.ptr_array) { _FREE(rs->rules[rs_num].inactive.ptr_array, M_TEMP); + } rs->rules[rs_num].inactive.ptr_array = NULL; rs->rules[rs_num].inactive.rcount = 0; rs->rules[rs_num].inactive.open = 0; pf_remove_if_empty_ruleset(rs); - return (0); + return 0; } static void pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p, int minordev) { - bcopy(src, dst, sizeof (struct pf_rule)); + bcopy(src, dst, sizeof(struct pf_rule)); - dst->label[sizeof (dst->label) - 1] = '\0'; - dst->ifname[sizeof (dst->ifname) - 1] = '\0'; - dst->qname[sizeof (dst->qname) - 1] = '\0'; - dst->pqname[sizeof (dst->pqname) - 1] = '\0'; - dst->tagname[sizeof (dst->tagname) - 1] = '\0'; - dst->match_tagname[sizeof (dst->match_tagname) - 1] = '\0'; - dst->overload_tblname[sizeof (dst->overload_tblname) - 1] = '\0'; + dst->label[sizeof(dst->label) - 1] = '\0'; + dst->ifname[sizeof(dst->ifname) - 1] = '\0'; + dst->qname[sizeof(dst->qname) - 1] = '\0'; + dst->pqname[sizeof(dst->pqname) - 1] = '\0'; + dst->tagname[sizeof(dst->tagname) - 1] = '\0'; + dst->match_tagname[sizeof(dst->match_tagname) - 1] = '\0'; + dst->overload_tblname[sizeof(dst->overload_tblname) - 1] = '\0'; dst->cuid = kauth_cred_getuid(p->p_ucred); dst->cpid = p->p_pid; @@ -1100,14 +1136,15 @@ pf_rule_copyin(struct pf_rule *src, struct pf_rule *dst, struct proc *p, dst->entries.tqe_prev = NULL; dst->entries.tqe_next = NULL; - if ((uint8_t)minordev == PFDEV_PFM) + if ((uint8_t)minordev == PFDEV_PFM) { dst->rule_flag |= PFRULE_PFM; + } } static void pf_rule_copyout(struct pf_rule *src, struct pf_rule *dst) { - bcopy(src, dst, sizeof (struct pf_rule)); + bcopy(src, dst, sizeof(struct pf_rule)); dst->anchor = NULL; dst->kif = NULL; @@ -1125,7 +1162,7 @@ pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, struct pf_state *s) { uint64_t secs = pf_time_second(); - bzero(sp, sizeof (struct pfsync_state)); + bzero(sp, sizeof(struct pfsync_state)); /* copy from state key */ sp->lan.addr = sk->lan.addr; @@ -1145,9 +1182,9 @@ pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, sp->flowhash = sk->flowhash; /* copy from state */ - memcpy(&sp->id, &s->id, sizeof (sp->id)); + memcpy(&sp->id, &s->id, sizeof(sp->id)); sp->creatorid = s->creatorid; - strlcpy(sp->ifname, s->kif->pfik_name, sizeof (sp->ifname)); + strlcpy(sp->ifname, s->kif->pfik_name, sizeof(sp->ifname)); pf_state_peer_to_pfsync(&s->src, &sp->src); pf_state_peer_to_pfsync(&s->dst, &sp->dst); @@ -1167,16 +1204,18 @@ pf_state_export(struct pfsync_state *sp, struct pf_state_key *sk, sp->allow_opts = s->allow_opts; sp->timeout = s->timeout; - if (s->src_node) + if (s->src_node) { sp->sync_flags |= PFSYNC_FLAG_SRCNODE; - if (s->nat_src_node) + } + if (s->nat_src_node) { sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE; + } - if (sp->expire > secs) + if (sp->expire > secs) { sp->expire -= secs; - else + } else { sp->expire = 0; - + } } static void @@ -1201,7 +1240,7 @@ pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, sk->flowhash = pf_calc_state_key_flowhash(sk); /* copy to state */ - memcpy(&s->id, &sp->id, sizeof (sp->id)); + memcpy(&s->id, &sp->id, sizeof(sp->id)); s->creatorid = sp->creatorid; pf_state_peer_from_pfsync(&sp->src, &s->src); pf_state_peer_from_pfsync(&sp->dst, &s->dst); @@ -1212,8 +1251,9 @@ pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, s->rt_kif = NULL; s->creation = pf_time_second(); s->expire = pf_time_second(); - if (sp->expire > 0) + if (sp->expire > 0) { s->expire -= pf_default_rule.timeout[sp->timeout] - sp->expire; + } s->pfsync_time = 0; s->packets[0] = s->packets[1] = 0; s->bytes[0] = s->bytes[1] = 0; @@ -1222,18 +1262,18 @@ pf_state_import(struct pfsync_state *sp, struct pf_state_key *sk, static void pf_pooladdr_copyin(struct pf_pooladdr *src, struct pf_pooladdr *dst) { - bcopy(src, dst, sizeof (struct pf_pooladdr)); + bcopy(src, dst, sizeof(struct pf_pooladdr)); dst->entries.tqe_prev = NULL; dst->entries.tqe_next = NULL; - dst->ifname[sizeof (dst->ifname) - 1] = '\0'; + dst->ifname[sizeof(dst->ifname) - 1] = '\0'; dst->kif = NULL; } static void pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst) { - bcopy(src, dst, sizeof (struct pf_pooladdr)); + bcopy(src, dst, sizeof(struct pf_pooladdr)); dst->entries.tqe_prev = NULL; dst->entries.tqe_next = NULL; @@ -1243,29 +1283,32 @@ pf_pooladdr_copyout(struct pf_pooladdr *src, struct pf_pooladdr *dst) static int pf_setup_pfsync_matching(struct pf_ruleset *rs) { - MD5_CTX ctx; - struct pf_rule *rule; - int rs_cnt; - u_int8_t digest[PF_MD5_DIGEST_LENGTH]; + MD5_CTX ctx; + struct pf_rule *rule; + int rs_cnt; + u_int8_t digest[PF_MD5_DIGEST_LENGTH]; MD5Init(&ctx); for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) { /* XXX PF_RULESET_SCRUB as well? */ - if (rs_cnt == PF_RULESET_SCRUB) + if (rs_cnt == PF_RULESET_SCRUB) { continue; + } - if (rs->rules[rs_cnt].inactive.ptr_array) + if (rs->rules[rs_cnt].inactive.ptr_array) { _FREE(rs->rules[rs_cnt].inactive.ptr_array, M_TEMP); + } rs->rules[rs_cnt].inactive.ptr_array = NULL; if (rs->rules[rs_cnt].inactive.rcount) { rs->rules[rs_cnt].inactive.ptr_array = - _MALLOC(sizeof (caddr_t) * + _MALLOC(sizeof(caddr_t) * rs->rules[rs_cnt].inactive.rcount, M_TEMP, M_WAITOK); - if (!rs->rules[rs_cnt].inactive.ptr_array) - return (ENOMEM); + if (!rs->rules[rs_cnt].inactive.ptr_array) { + return ENOMEM; + } } TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr, @@ -1276,8 +1319,8 @@ pf_setup_pfsync_matching(struct pf_ruleset *rs) } MD5Final(digest, &ctx); - memcpy(pf_status.pf_chksum, digest, sizeof (pf_status.pf_chksum)); - return (0); + memcpy(pf_status.pf_chksum, digest, sizeof(pf_status.pf_chksum)); + return 0; } static void @@ -1320,11 +1363,12 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) int error = 0; int minordev = minor(dev); - if (kauth_cred_issuser(kauth_cred_get()) == 0) - return (EPERM); + if (kauth_cred_issuser(kauth_cred_get()) == 0) { + return EPERM; + } /* XXX keep in sync with switch() below */ - if (securelevel > 1) + if (securelevel > 1) { switch (cmd) { case DIOCGETRULES: case DIOCGETRULE: @@ -1373,17 +1417,19 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) int pfrio_flags; bcopy(&((struct pfioc_table *)(void *)addr)-> - pfrio_flags, &pfrio_flags, sizeof (pfrio_flags)); + pfrio_flags, &pfrio_flags, sizeof(pfrio_flags)); - if (pfrio_flags & PFR_FLAG_DUMMY) + if (pfrio_flags & PFR_FLAG_DUMMY) { break; /* dummy operation ok */ - return (EPERM); + } + return EPERM; } default: - return (EPERM); + return EPERM; } + } - if (!(flags & FWRITE)) + if (!(flags & FWRITE)) { switch (cmd) { case DIOCSTART: case DIOCSTARTREF: @@ -1428,37 +1474,39 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) int pfrio_flags; bcopy(&((struct pfioc_table *)(void *)addr)-> - pfrio_flags, &pfrio_flags, sizeof (pfrio_flags)); + pfrio_flags, &pfrio_flags, sizeof(pfrio_flags)); if (pfrio_flags & PFR_FLAG_DUMMY) { flags |= FWRITE; /* need write lock for dummy */ break; /* dummy operation ok */ } - return (EACCES); + return EACCES; } case DIOCGETRULE: { u_int32_t action; bcopy(&((struct pfioc_rule *)(void *)addr)->action, - &action, sizeof (action)); + &action, sizeof(action)); - if (action == PF_GET_CLR_CNTR) - return (EACCES); + if (action == PF_GET_CLR_CNTR) { + return EACCES; + } break; } default: - return (EACCES); + return EACCES; } + } - if (flags & FWRITE) + if (flags & FWRITE) { lck_rw_lock_exclusive(pf_perim_lock); - else + } else { lck_rw_lock_shared(pf_perim_lock); + } lck_mtx_lock(pf_lock); switch (cmd) { - case DIOCSTART: if (pf_status.running) { /* @@ -1481,7 +1529,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) } break; - case DIOCSTARTREF: /* u_int64_t */ + case DIOCSTARTREF: /* u_int64_t */ if (pf_purge_thread == NULL) { error = ENOMEM; } else { @@ -1499,7 +1547,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) DPFPRINTF(PF_DEBUG_URGENT, ("pf: unable to generate token\n")); } - bcopy(&token, addr, sizeof (token)); + bcopy(&token, addr, sizeof(token)); } break; @@ -1513,14 +1561,14 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) } break; - case DIOCSTOPREF: /* struct pfioc_remove_token */ + case DIOCSTOPREF: /* struct pfioc_remove_token */ if (!pf_status.running) { error = ENOENT; } else { struct pfioc_remove_token pfrt; /* small enough to be on stack */ - bcopy(addr, &pfrt, sizeof (pfrt)); + bcopy(addr, &pfrt, sizeof(pfrt)); if ((error = remove_token(&pfrt)) == 0) { VERIFY(pf_enabled_ref_count != 0); pf_enabled_ref_count--; @@ -1533,17 +1581,18 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) DPFPRINTF(PF_DEBUG_URGENT, ("pf: token mismatch\n")); } - bcopy(&pfrt, addr, sizeof (pfrt)); + bcopy(&pfrt, addr, sizeof(pfrt)); - if (error == 0 && pf_enabled_ref_count == 0) + if (error == 0 && pf_enabled_ref_count == 0) { pf_stop(); + } } break; - case DIOCGETSTARTERS: { /* struct pfioc_tokens */ + case DIOCGETSTARTERS: { /* struct pfioc_tokens */ PFIOCX_STRUCT_DECL(pfioc_tokens); - PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break;); + PFIOCX_STRUCT_BEGIN(addr, pfioc_tokens, error = ENOMEM; break; ); error = pfioctl_ioc_tokens(cmd, PFIOCX_STRUCT_ADDR32(pfioc_tokens), PFIOCX_STRUCT_ADDR64(pfioc_tokens), p); @@ -1551,44 +1600,44 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCADDRULE: /* struct pfioc_rule */ - case DIOCGETRULES: /* struct pfioc_rule */ - case DIOCGETRULE: /* struct pfioc_rule */ - case DIOCCHANGERULE: /* struct pfioc_rule */ - case DIOCINSERTRULE: /* struct pfioc_rule */ - case DIOCDELETERULE: { /* struct pfioc_rule */ + case DIOCADDRULE: /* struct pfioc_rule */ + case DIOCGETRULES: /* struct pfioc_rule */ + case DIOCGETRULE: /* struct pfioc_rule */ + case DIOCCHANGERULE: /* struct pfioc_rule */ + case DIOCINSERTRULE: /* struct pfioc_rule */ + case DIOCDELETERULE: { /* struct pfioc_rule */ struct pfioc_rule *pr = NULL; - PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; ); error = pfioctl_ioc_rule(cmd, minordev, pr, p); PFIOC_STRUCT_END(pr, addr); break; } - case DIOCCLRSTATES: /* struct pfioc_state_kill */ - case DIOCKILLSTATES: { /* struct pfioc_state_kill */ + case DIOCCLRSTATES: /* struct pfioc_state_kill */ + case DIOCKILLSTATES: { /* struct pfioc_state_kill */ struct pfioc_state_kill *psk = NULL; - PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, psk, error = ENOMEM; break; ); error = pfioctl_ioc_state_kill(cmd, psk, p); PFIOC_STRUCT_END(psk, addr); break; } - case DIOCADDSTATE: /* struct pfioc_state */ - case DIOCGETSTATE: { /* struct pfioc_state */ + case DIOCADDSTATE: /* struct pfioc_state */ + case DIOCGETSTATE: { /* struct pfioc_state */ struct pfioc_state *ps = NULL; - PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, ps, error = ENOMEM; break; ); error = pfioctl_ioc_state(cmd, ps, p); PFIOC_STRUCT_END(ps, addr); break; } - case DIOCGETSTATES: { /* struct pfioc_states */ + case DIOCGETSTATES: { /* struct pfioc_states */ PFIOCX_STRUCT_DECL(pfioc_states); - PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break;); + PFIOCX_STRUCT_BEGIN(addr, pfioc_states, error = ENOMEM; break; ); error = pfioctl_ioc_states(cmd, PFIOCX_STRUCT_ADDR32(pfioc_states), PFIOCX_STRUCT_ADDR64(pfioc_states), p); @@ -1596,17 +1645,17 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCGETSTATUS: { /* struct pf_status */ + case DIOCGETSTATUS: { /* struct pf_status */ struct pf_status *s = NULL; - PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(&pf_status, s, error = ENOMEM; break; ); pfi_update_status(s->ifname, s); PFIOC_STRUCT_END(s, addr); break; } - case DIOCSETSTATUSIF: { /* struct pfioc_if */ - struct pfioc_if *pi = (struct pfioc_if *)(void *)addr; + case DIOCSETSTATUSIF: { /* struct pfioc_if */ + struct pfioc_if *pi = (struct pfioc_if *)(void *)addr; /* OK for unaligned accesses */ if (pi->ifname[0] == 0) { @@ -1618,55 +1667,56 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) } case DIOCCLRSTATUS: { - bzero(pf_status.counters, sizeof (pf_status.counters)); - bzero(pf_status.fcounters, sizeof (pf_status.fcounters)); - bzero(pf_status.scounters, sizeof (pf_status.scounters)); + bzero(pf_status.counters, sizeof(pf_status.counters)); + bzero(pf_status.fcounters, sizeof(pf_status.fcounters)); + bzero(pf_status.scounters, sizeof(pf_status.scounters)); pf_status.since = pf_calendar_time_second(); - if (*pf_status.ifname) + if (*pf_status.ifname) { pfi_update_status(pf_status.ifname, NULL); + } break; } - case DIOCNATLOOK: { /* struct pfioc_natlook */ + case DIOCNATLOOK: { /* struct pfioc_natlook */ struct pfioc_natlook *pnl = NULL; - PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, pnl, error = ENOMEM; break; ); error = pfioctl_ioc_natlook(cmd, pnl, p); PFIOC_STRUCT_END(pnl, addr); break; } - case DIOCSETTIMEOUT: /* struct pfioc_tm */ - case DIOCGETTIMEOUT: { /* struct pfioc_tm */ - struct pfioc_tm pt; + case DIOCSETTIMEOUT: /* struct pfioc_tm */ + case DIOCGETTIMEOUT: { /* struct pfioc_tm */ + struct pfioc_tm pt; /* small enough to be on stack */ - bcopy(addr, &pt, sizeof (pt)); + bcopy(addr, &pt, sizeof(pt)); error = pfioctl_ioc_tm(cmd, &pt, p); - bcopy(&pt, addr, sizeof (pt)); + bcopy(&pt, addr, sizeof(pt)); break; } - case DIOCGETLIMIT: /* struct pfioc_limit */ - case DIOCSETLIMIT: { /* struct pfioc_limit */ + case DIOCGETLIMIT: /* struct pfioc_limit */ + case DIOCSETLIMIT: { /* struct pfioc_limit */ struct pfioc_limit pl; /* small enough to be on stack */ - bcopy(addr, &pl, sizeof (pl)); + bcopy(addr, &pl, sizeof(pl)); error = pfioctl_ioc_limit(cmd, &pl, p); - bcopy(&pl, addr, sizeof (pl)); + bcopy(&pl, addr, sizeof(pl)); break; } - case DIOCSETDEBUG: { /* u_int32_t */ - bcopy(addr, &pf_status.debug, sizeof (u_int32_t)); + case DIOCSETDEBUG: { /* u_int32_t */ + bcopy(addr, &pf_status.debug, sizeof(u_int32_t)); break; } case DIOCCLRRULECTRS: { /* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */ - struct pf_ruleset *ruleset = &pf_main_ruleset; - struct pf_rule *rule; + struct pf_ruleset *ruleset = &pf_main_ruleset; + struct pf_rule *rule; TAILQ_FOREACH(rule, ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) { @@ -1691,7 +1741,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) if (ifp != NULL) { baudrate = ifp->if_output_bw.max_bw; bcopy(&baudrate, &psp->baudrate, - sizeof (baudrate)); + sizeof(baudrate)); } else { error = EINVAL; } @@ -1701,48 +1751,48 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCBEGINADDRS: /* struct pfioc_pooladdr */ - case DIOCADDADDR: /* struct pfioc_pooladdr */ - case DIOCGETADDRS: /* struct pfioc_pooladdr */ - case DIOCGETADDR: /* struct pfioc_pooladdr */ - case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */ + case DIOCBEGINADDRS: /* struct pfioc_pooladdr */ + case DIOCADDADDR: /* struct pfioc_pooladdr */ + case DIOCGETADDRS: /* struct pfioc_pooladdr */ + case DIOCGETADDR: /* struct pfioc_pooladdr */ + case DIOCCHANGEADDR: { /* struct pfioc_pooladdr */ struct pfioc_pooladdr *pp = NULL; - PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break;) + PFIOC_STRUCT_BEGIN(addr, pp, error = ENOMEM; break; ) error = pfioctl_ioc_pooladdr(cmd, pp, p); PFIOC_STRUCT_END(pp, addr); break; } - case DIOCGETRULESETS: /* struct pfioc_ruleset */ - case DIOCGETRULESET: { /* struct pfioc_ruleset */ + case DIOCGETRULESETS: /* struct pfioc_ruleset */ + case DIOCGETRULESET: { /* struct pfioc_ruleset */ struct pfioc_ruleset *pr = NULL; - PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, pr, error = ENOMEM; break; ); error = pfioctl_ioc_ruleset(cmd, pr, p); PFIOC_STRUCT_END(pr, addr); break; } - case DIOCRCLRTABLES: /* struct pfioc_table */ - case DIOCRADDTABLES: /* struct pfioc_table */ - case DIOCRDELTABLES: /* struct pfioc_table */ - case DIOCRGETTABLES: /* struct pfioc_table */ - case DIOCRGETTSTATS: /* struct pfioc_table */ - case DIOCRCLRTSTATS: /* struct pfioc_table */ - case DIOCRSETTFLAGS: /* struct pfioc_table */ - case DIOCRCLRADDRS: /* struct pfioc_table */ - case DIOCRADDADDRS: /* struct pfioc_table */ - case DIOCRDELADDRS: /* struct pfioc_table */ - case DIOCRSETADDRS: /* struct pfioc_table */ - case DIOCRGETADDRS: /* struct pfioc_table */ - case DIOCRGETASTATS: /* struct pfioc_table */ - case DIOCRCLRASTATS: /* struct pfioc_table */ - case DIOCRTSTADDRS: /* struct pfioc_table */ - case DIOCRINADEFINE: { /* struct pfioc_table */ + case DIOCRCLRTABLES: /* struct pfioc_table */ + case DIOCRADDTABLES: /* struct pfioc_table */ + case DIOCRDELTABLES: /* struct pfioc_table */ + case DIOCRGETTABLES: /* struct pfioc_table */ + case DIOCRGETTSTATS: /* struct pfioc_table */ + case DIOCRCLRTSTATS: /* struct pfioc_table */ + case DIOCRSETTFLAGS: /* struct pfioc_table */ + case DIOCRCLRADDRS: /* struct pfioc_table */ + case DIOCRADDADDRS: /* struct pfioc_table */ + case DIOCRDELADDRS: /* struct pfioc_table */ + case DIOCRSETADDRS: /* struct pfioc_table */ + case DIOCRGETADDRS: /* struct pfioc_table */ + case DIOCRGETASTATS: /* struct pfioc_table */ + case DIOCRCLRASTATS: /* struct pfioc_table */ + case DIOCRTSTADDRS: /* struct pfioc_table */ + case DIOCRINADEFINE: { /* struct pfioc_table */ PFIOCX_STRUCT_DECL(pfioc_table); - PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break;); + PFIOCX_STRUCT_BEGIN(addr, pfioc_table, error = ENOMEM; break; ); error = pfioctl_ioc_table(cmd, PFIOCX_STRUCT_ADDR32(pfioc_table), PFIOCX_STRUCT_ADDR64(pfioc_table), p); @@ -1750,11 +1800,11 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCOSFPADD: /* struct pf_osfp_ioctl */ - case DIOCOSFPGET: { /* struct pf_osfp_ioctl */ + case DIOCOSFPADD: /* struct pf_osfp_ioctl */ + case DIOCOSFPGET: { /* struct pf_osfp_ioctl */ struct pf_osfp_ioctl *io = NULL; - PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, io, error = ENOMEM; break; ); if (cmd == DIOCOSFPADD) { error = pf_osfp_add(io); } else { @@ -1765,12 +1815,12 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCXBEGIN: /* struct pfioc_trans */ - case DIOCXROLLBACK: /* struct pfioc_trans */ - case DIOCXCOMMIT: { /* struct pfioc_trans */ + case DIOCXBEGIN: /* struct pfioc_trans */ + case DIOCXROLLBACK: /* struct pfioc_trans */ + case DIOCXCOMMIT: { /* struct pfioc_trans */ PFIOCX_STRUCT_DECL(pfioc_trans); - PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break;); + PFIOCX_STRUCT_BEGIN(addr, pfioc_trans, error = ENOMEM; break; ); error = pfioctl_ioc_trans(cmd, PFIOCX_STRUCT_ADDR32(pfioc_trans), PFIOCX_STRUCT_ADDR64(pfioc_trans), p); @@ -1778,11 +1828,11 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */ + case DIOCGETSRCNODES: { /* struct pfioc_src_nodes */ PFIOCX_STRUCT_DECL(pfioc_src_nodes); PFIOCX_STRUCT_BEGIN(addr, pfioc_src_nodes, - error = ENOMEM; break;); + error = ENOMEM; break; ); error = pfioctl_ioc_src_nodes(cmd, PFIOCX_STRUCT_ADDR32(pfioc_src_nodes), PFIOCX_STRUCT_ADDR64(pfioc_src_nodes), p); @@ -1791,8 +1841,8 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) } case DIOCCLRSRCNODES: { - struct pf_src_node *n; - struct pf_state *state; + struct pf_src_node *n; + struct pf_state *state; RB_FOREACH(state, pf_state_tree_id, &tree_id) { state->src_node = NULL; @@ -1807,24 +1857,25 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) break; } - case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */ + case DIOCKILLSRCNODES: { /* struct pfioc_src_node_kill */ struct pfioc_src_node_kill *psnk = NULL; - PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break;); + PFIOC_STRUCT_BEGIN(addr, psnk, error = ENOMEM; break; ); error = pfioctl_ioc_src_node_kill(cmd, psnk, p); PFIOC_STRUCT_END(psnk, addr); break; } - case DIOCSETHOSTID: { /* u_int32_t */ + case DIOCSETHOSTID: { /* u_int32_t */ u_int32_t hid; /* small enough to be on stack */ - bcopy(addr, &hid, sizeof (hid)); - if (hid == 0) + bcopy(addr, &hid, sizeof(hid)); + if (hid == 0) { pf_status.hostid = random(); - else + } else { pf_status.hostid = hid; + } break; } @@ -1832,12 +1883,12 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) pf_osfp_flush(); break; - case DIOCIGETIFACES: /* struct pfioc_iface */ - case DIOCSETIFFLAG: /* struct pfioc_iface */ - case DIOCCLRIFFLAG: { /* struct pfioc_iface */ + case DIOCIGETIFACES: /* struct pfioc_iface */ + case DIOCSETIFFLAG: /* struct pfioc_iface */ + case DIOCCLRIFFLAG: { /* struct pfioc_iface */ PFIOCX_STRUCT_DECL(pfioc_iface); - PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break;); + PFIOCX_STRUCT_BEGIN(addr, pfioc_iface, error = ENOMEM; break; ); error = pfioctl_ioc_iface(cmd, PFIOCX_STRUCT_ADDR32(pfioc_iface), PFIOCX_STRUCT_ADDR64(pfioc_iface), p); @@ -1853,7 +1904,7 @@ pfioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct proc *p) lck_mtx_unlock(pf_lock); lck_rw_done(pf_perim_lock); - return (error); + return error; } static int @@ -1863,8 +1914,9 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, int p64 = proc_is64bit(p); int error = 0; - if (!p64) + if (!p64) { goto struct32; + } /* * 64-bit structure processing @@ -1881,7 +1933,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRADDTABLES: - if (io64->pfrio_esize != sizeof (struct pfr_table)) { + if (io64->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -1890,7 +1942,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRDELTABLES: - if (io64->pfrio_esize != sizeof (struct pfr_table)) { + if (io64->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -1899,7 +1951,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRGETTABLES: - if (io64->pfrio_esize != sizeof (struct pfr_table)) { + if (io64->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -1909,7 +1961,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRGETTSTATS: - if (io64->pfrio_esize != sizeof (struct pfr_tstats)) { + if (io64->pfrio_esize != sizeof(struct pfr_tstats)) { error = ENODEV; break; } @@ -1919,7 +1971,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRCLRTSTATS: - if (io64->pfrio_esize != sizeof (struct pfr_table)) { + if (io64->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -1928,7 +1980,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRSETTFLAGS: - if (io64->pfrio_esize != sizeof (struct pfr_table)) { + if (io64->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -1949,7 +2001,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRADDADDRS: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -1960,7 +2012,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRDELADDRS: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -1971,7 +2023,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRSETADDRS: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -1983,7 +2035,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRGETADDRS: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -1993,7 +2045,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRGETASTATS: - if (io64->pfrio_esize != sizeof (struct pfr_astats)) { + if (io64->pfrio_esize != sizeof(struct pfr_astats)) { error = ENODEV; break; } @@ -2003,7 +2055,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRCLRASTATS: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2014,7 +2066,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRTSTADDRS: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2025,7 +2077,7 @@ pfioctl_ioc_table(u_long cmd, struct pfioc_table_32 *io32, break; case DIOCRINADEFINE: - if (io64->pfrio_esize != sizeof (struct pfr_addr)) { + if (io64->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2057,7 +2109,7 @@ struct32: break; case DIOCRADDTABLES: - if (io32->pfrio_esize != sizeof (struct pfr_table)) { + if (io32->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -2066,7 +2118,7 @@ struct32: break; case DIOCRDELTABLES: - if (io32->pfrio_esize != sizeof (struct pfr_table)) { + if (io32->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -2075,7 +2127,7 @@ struct32: break; case DIOCRGETTABLES: - if (io32->pfrio_esize != sizeof (struct pfr_table)) { + if (io32->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -2085,7 +2137,7 @@ struct32: break; case DIOCRGETTSTATS: - if (io32->pfrio_esize != sizeof (struct pfr_tstats)) { + if (io32->pfrio_esize != sizeof(struct pfr_tstats)) { error = ENODEV; break; } @@ -2095,7 +2147,7 @@ struct32: break; case DIOCRCLRTSTATS: - if (io32->pfrio_esize != sizeof (struct pfr_table)) { + if (io32->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -2104,7 +2156,7 @@ struct32: break; case DIOCRSETTFLAGS: - if (io32->pfrio_esize != sizeof (struct pfr_table)) { + if (io32->pfrio_esize != sizeof(struct pfr_table)) { error = ENODEV; break; } @@ -2125,7 +2177,7 @@ struct32: break; case DIOCRADDADDRS: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2136,7 +2188,7 @@ struct32: break; case DIOCRDELADDRS: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2147,7 +2199,7 @@ struct32: break; case DIOCRSETADDRS: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2159,7 +2211,7 @@ struct32: break; case DIOCRGETADDRS: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2169,7 +2221,7 @@ struct32: break; case DIOCRGETASTATS: - if (io32->pfrio_esize != sizeof (struct pfr_astats)) { + if (io32->pfrio_esize != sizeof(struct pfr_astats)) { error = ENODEV; break; } @@ -2179,7 +2231,7 @@ struct32: break; case DIOCRCLRASTATS: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2190,7 +2242,7 @@ struct32: break; case DIOCRTSTADDRS: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2201,7 +2253,7 @@ struct32: break; case DIOCRINADEFINE: - if (io32->pfrio_esize != sizeof (struct pfr_addr)) { + if (io32->pfrio_esize != sizeof(struct pfr_addr)) { error = ENODEV; break; } @@ -2217,7 +2269,7 @@ struct32: } done: - return (error); + return error; } static int @@ -2239,18 +2291,19 @@ pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32, break; } - size = sizeof (struct pfioc_token) * nr_tokens; + size = sizeof(struct pfioc_token) * nr_tokens; ocnt = cnt = (p64 ? tok64->size : tok32->size); if (cnt == 0) { - if (p64) + if (p64) { tok64->size = size; - else + } else { tok32->size = size; + } break; } token_buf = (p64 ? tok64->pgt_buf : tok32->pgt_buf); - tokens = _MALLOC(size, M_TEMP, M_WAITOK|M_ZERO); + tokens = _MALLOC(size, M_TEMP, M_WAITOK | M_ZERO); if (tokens == NULL) { error = ENOMEM; break; @@ -2260,27 +2313,29 @@ pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32, SLIST_FOREACH_SAFE(entry, &token_list_head, next, tmp) { struct pfioc_token *t; - if ((unsigned)cnt < sizeof (*tokens)) + if ((unsigned)cnt < sizeof(*tokens)) { break; /* no more buffer space left */ - + } t = (struct pfioc_token *)(void *)ptr; - t->token_value = entry->token.token_value; - t->timestamp = entry->token.timestamp; - t->pid = entry->token.pid; + t->token_value = entry->token.token_value; + t->timestamp = entry->token.timestamp; + t->pid = entry->token.pid; bcopy(entry->token.proc_name, t->proc_name, PFTOK_PROCNAME_LEN); - ptr += sizeof (struct pfioc_token); + ptr += sizeof(struct pfioc_token); - cnt -= sizeof (struct pfioc_token); + cnt -= sizeof(struct pfioc_token); } - if (cnt < ocnt) + if (cnt < ocnt) { error = copyout(tokens, token_buf, ocnt - cnt); + } - if (p64) + if (p64) { tok64->size = ocnt - cnt; - else + } else { tok32->size = ocnt - cnt; + } _FREE(tokens, M_TEMP); break; @@ -2291,44 +2346,49 @@ pfioctl_ioc_tokens(u_long cmd, struct pfioc_tokens_32 *tok32, /* NOTREACHED */ } - return (error); + return error; } static void pf_expire_states_and_src_nodes(struct pf_rule *rule) { - struct pf_state *state; - struct pf_src_node *sn; - int killed = 0; + struct pf_state *state; + struct pf_src_node *sn; + int killed = 0; /* expire the states */ state = TAILQ_FIRST(&state_list); while (state) { - if (state->rule.ptr == rule) + if (state->rule.ptr == rule) { state->timeout = PFTM_PURGE; + } state = TAILQ_NEXT(state, entry_list); } pf_purge_expired_states(pf_status.states); /* expire the src_nodes */ RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { - if (sn->rule.ptr != rule) + if (sn->rule.ptr != rule) { continue; + } if (sn->states != 0) { RB_FOREACH(state, pf_state_tree_id, &tree_id) { - if (state->src_node == sn) + if (state->src_node == sn) { state->src_node = NULL; - if (state->nat_src_node == sn) + } + if (state->nat_src_node == sn) { state->nat_src_node = NULL; + } } sn->states = 0; } sn->expire = 1; killed++; } - if (killed) + if (killed) { pf_purge_expired_src_nodes(); + } } static void @@ -2341,8 +2401,9 @@ pf_delete_rule_from_ruleset(struct pf_ruleset *ruleset, int rs_num, pf_expire_states_and_src_nodes(rule); pf_rm_rule(ruleset->rules[rs_num].active.ptr, rule); - if (ruleset->rules[rs_num].active.rcount-- == 0) + if (ruleset->rules[rs_num].active.rcount-- == 0) { panic("%s: rcount value broken!", __func__); + } r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); while (r) { @@ -2367,29 +2428,33 @@ pf_ruleset_cleanup(struct pf_ruleset *ruleset, int rs) static int pf_delete_rule_by_ticket(struct pfioc_rule *pr, u_int32_t req_dev) { - struct pf_ruleset *ruleset; - struct pf_rule *rule = NULL; - int is_anchor; - int error; - int i; + struct pf_ruleset *ruleset; + struct pf_rule *rule = NULL; + int is_anchor; + int error; + int i; is_anchor = (pr->anchor_call[0] != '\0'); if ((ruleset = pf_find_ruleset_with_owner(pr->anchor, - pr->rule.owner, is_anchor, &error)) == NULL) - return (error); + pr->rule.owner, is_anchor, &error)) == NULL) { + return error; + } for (i = 0; i < PF_RULESET_MAX && rule == NULL; i++) { rule = TAILQ_FIRST(ruleset->rules[i].active.ptr); - while (rule && (rule->ticket != pr->rule.ticket)) + while (rule && (rule->ticket != pr->rule.ticket)) { rule = TAILQ_NEXT(rule, entries); + } } - if (rule == NULL) - return (ENOENT); - else + if (rule == NULL) { + return ENOENT; + } else { i--; + } - if (strcmp(rule->owner, pr->rule.owner)) - return (EACCES); + if (strcmp(rule->owner, pr->rule.owner)) { + return EACCES; + } delete_rule: if (rule->anchor && (ruleset != &pf_main_ruleset) && @@ -2399,18 +2464,21 @@ delete_rule: struct pf_rule *delete_rule = rule; struct pf_ruleset *delete_ruleset = ruleset; -#define parent_ruleset ruleset->anchor->parent->ruleset - if (ruleset->anchor->parent == NULL) +#define parent_ruleset ruleset->anchor->parent->ruleset + if (ruleset->anchor->parent == NULL) { ruleset = &pf_main_ruleset; - else + } else { ruleset = &parent_ruleset; + } rule = TAILQ_FIRST(ruleset->rules[i].active.ptr); while (rule && - (rule->anchor != delete_ruleset->anchor)) + (rule->anchor != delete_ruleset->anchor)) { rule = TAILQ_NEXT(rule, entries); - if (rule == NULL) + } + if (rule == NULL) { panic("%s: rule not found!", __func__); + } /* * if reqest device != rule's device, bail : @@ -2419,7 +2487,7 @@ delete_rule: */ if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) { if (rule->ticket != pr->rule.ticket) { - return (0); + return 0; } else { return EACCES; } @@ -2439,16 +2507,18 @@ delete_rule: * process deleting rule only if device that added the * rule matches device that issued the request */ - if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) + if ((rule->rule_flag & PFRULE_PFM) ^ req_dev) { return EACCES; - if (rule->rule_flag & PFRULE_PFM) + } + if (rule->rule_flag & PFRULE_PFM) { pffwrules--; + } pf_delete_rule_from_ruleset(ruleset, i, rule); pf_ruleset_cleanup(ruleset, i); } - return (0); + return 0; } /* @@ -2458,9 +2528,9 @@ delete_rule: static void pf_delete_rule_by_owner(char *owner, u_int32_t req_dev) { - struct pf_ruleset *ruleset; - struct pf_rule *rule, *next; - int deleted = 0; + struct pf_ruleset *ruleset; + struct pf_rule *rule, *next; + int deleted = 0; for (int rs = 0; rs < PF_RULESET_MAX; rs++) { rule = TAILQ_FIRST(pf_main_ruleset.rules[rs].active.ptr); @@ -2490,19 +2560,22 @@ pf_delete_rule_by_owner(char *owner, u_int32_t req_dev) continue; } else { if (rule->rule_flag & - PFRULE_PFM) + PFRULE_PFM) { pffwrules--; + } pf_delete_rule_from_ruleset(ruleset, rs, rule); deleted = 1; rule = next; } - } else + } else { rule = next; + } } else { if (((strcmp(rule->owner, owner)) == 0)) { /* delete rule */ - if (rule->rule_flag & PFRULE_PFM) + if (rule->rule_flag & PFRULE_PFM) { pffwrules--; + } pf_delete_rule_from_ruleset(ruleset, rs, rule); deleted = 1; @@ -2514,9 +2587,10 @@ pf_delete_rule_by_owner(char *owner, u_int32_t req_dev) pf_ruleset_cleanup(ruleset, rs); deleted = 0; } - if (ruleset != &pf_main_ruleset) + if (ruleset != &pf_main_ruleset) { pf_deleterule_anchor_step_out(&ruleset, rs, &rule); + } } } } @@ -2535,12 +2609,15 @@ pf_deleterule_anchor_step_out(struct pf_ruleset **ruleset_ptr, &ruleset->anchor->parent->ruleset:&pf_main_ruleset; rule = TAILQ_FIRST(ruleset->rules[rs].active.ptr); - while (rule && (rule->anchor != rs_copy->anchor)) + while (rule && (rule->anchor != rs_copy->anchor)) { rule = TAILQ_NEXT(rule, entries); - if (rule == NULL) + } + if (rule == NULL) { panic("%s: parent rule of anchor not found!", __func__); - if (rule->anchor->ruleset.rules[rs].active.rcount > 0) + } + if (rule->anchor->ruleset.rules[rs].active.rcount > 0) { rule = TAILQ_NEXT(rule, entries); + } *ruleset_ptr = ruleset; *rule_ptr = rule; @@ -2555,59 +2632,75 @@ pf_addrwrap_setup(struct pf_addr_wrap *aw) static int pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule, - struct pf_ruleset *ruleset) { - struct pf_pooladdr *apa; - int error = 0; + struct pf_ruleset *ruleset) +{ + struct pf_pooladdr *apa; + int error = 0; if (rule->ifname[0]) { rule->kif = pfi_kif_get(rule->ifname); if (rule->kif == NULL) { pool_put(&pf_rule_pl, rule); - return (EINVAL); + return EINVAL; } pfi_kif_ref(rule->kif, PFI_KIF_REF_RULE); } - if (rule->tagname[0]) - if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) + if (rule->tagname[0]) { + if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0) { error = EBUSY; - if (rule->match_tagname[0]) + } + } + if (rule->match_tagname[0]) { if ((rule->match_tag = - pf_tagname2tag(rule->match_tagname)) == 0) + pf_tagname2tag(rule->match_tagname)) == 0) { error = EBUSY; - if (rule->rt && !rule->direction) + } + } + if (rule->rt && !rule->direction) { error = EINVAL; + } #if PFLOG - if (!rule->log) + if (!rule->log) { rule->logif = 0; - if (rule->logif >= PFLOGIFS_MAX) + } + if (rule->logif >= PFLOGIFS_MAX) { error = EINVAL; + } #endif /* PFLOG */ pf_addrwrap_setup(&rule->src.addr); pf_addrwrap_setup(&rule->dst.addr); if (pf_rtlabel_add(&rule->src.addr) || - pf_rtlabel_add(&rule->dst.addr)) + pf_rtlabel_add(&rule->dst.addr)) { error = EBUSY; - if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) + } + if (pfi_dynaddr_setup(&rule->src.addr, rule->af)) { error = EINVAL; - if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) + } + if (pfi_dynaddr_setup(&rule->dst.addr, rule->af)) { error = EINVAL; - if (pf_tbladdr_setup(ruleset, &rule->src.addr)) + } + if (pf_tbladdr_setup(ruleset, &rule->src.addr)) { error = EINVAL; - if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) + } + if (pf_tbladdr_setup(ruleset, &rule->dst.addr)) { error = EINVAL; - if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) + } + if (pf_anchor_setup(rule, ruleset, pr->anchor_call)) { error = EINVAL; + } TAILQ_FOREACH(apa, &pf_pabuf, entries) - if (pf_tbladdr_setup(ruleset, &apa->addr)) - error = EINVAL; + if (pf_tbladdr_setup(ruleset, &apa->addr)) { + error = EINVAL; + } if (rule->overload_tblname[0]) { if ((rule->overload_tbl = pfr_attach_table(ruleset, - rule->overload_tblname)) == NULL) + rule->overload_tblname)) == NULL) { error = EINVAL; - else + } else { rule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; + } } pf_mv_pool(&pf_pabuf, &rule->rpool.list); @@ -2616,12 +2709,13 @@ pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule, (rule->action == PF_BINAT) || (rule->action == PF_NAT64)) && rule->anchor == NULL) || (rule->rt > PF_FASTROUTE)) && - (TAILQ_FIRST(&rule->rpool.list) == NULL)) + (TAILQ_FIRST(&rule->rpool.list) == NULL)) { error = EINVAL; + } if (error) { pf_rm_rule(NULL, rule); - return (error); + return error; } /* For a NAT64 rule the rule's address family is AF_INET6 whereas * the address pool's family will be AF_INET @@ -2631,7 +2725,7 @@ pf_rule_setup(struct pfioc_rule *pr, struct pf_rule *rule, rule->evaluations = rule->packets[0] = rule->packets[1] = rule->bytes[0] = rule->bytes[1] = 0; - return (0); + return 0; } static int @@ -2642,12 +2736,12 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p switch (cmd) { case DIOCADDRULE: { - struct pf_ruleset *ruleset; - struct pf_rule *rule, *tail; - int rs_num; + struct pf_ruleset *ruleset; + struct pf_rule *rule, *tail; + int rs_num; - pr->anchor[sizeof (pr->anchor) - 1] = '\0'; - pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; + pr->anchor[sizeof(pr->anchor) - 1] = '\0'; + pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0'; ruleset = pf_find_ruleset(pr->anchor); if (ruleset == NULL) { error = EINVAL; @@ -2692,22 +2786,26 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p #endif /* INET6 */ tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr, pf_rulequeue); - if (tail) + if (tail) { rule->nr = tail->nr + 1; - else + } else { rule->nr = 0; + } - if ((error = pf_rule_setup(pr, rule, ruleset))) + if ((error = pf_rule_setup(pr, rule, ruleset))) { break; + } TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr, rule, entries); ruleset->rules[rs_num].inactive.rcount++; - if (rule->rule_flag & PFRULE_PFM) + if (rule->rule_flag & PFRULE_PFM) { pffwrules++; + } - if (rule->action == PF_NAT64) + if (rule->action == PF_NAT64) { atomic_add_16(&pf_nat64_configured, 1); + } if (pr->anchor_call[0] == '\0') { INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total); @@ -2724,10 +2822,11 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p dn_event.dn_event_code = DUMMYNET_RULE_CONFIG; - if (rule->direction == PF_IN) + if (rule->direction == PF_IN) { direction = DN_IN; - else if (rule->direction == PF_OUT) + } else if (rule->direction == PF_OUT) { direction = DN_OUT; + } dn_event.dn_event_rule_config.dir = direction; dn_event.dn_event_rule_config.af = rule->af; @@ -2744,12 +2843,12 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } case DIOCGETRULES: { - struct pf_ruleset *ruleset; - struct pf_rule *tail; - int rs_num; + struct pf_ruleset *ruleset; + struct pf_rule *tail; + int rs_num; - pr->anchor[sizeof (pr->anchor) - 1] = '\0'; - pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; + pr->anchor[sizeof(pr->anchor) - 1] = '\0'; + pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0'; ruleset = pf_find_ruleset(pr->anchor); if (ruleset == NULL) { error = EINVAL; @@ -2762,21 +2861,22 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, pf_rulequeue); - if (tail) + if (tail) { pr->nr = tail->nr + 1; - else + } else { pr->nr = 0; + } pr->ticket = ruleset->rules[rs_num].active.ticket; break; } case DIOCGETRULE: { - struct pf_ruleset *ruleset; - struct pf_rule *rule; - int rs_num, i; + struct pf_ruleset *ruleset; + struct pf_rule *rule; + int rs_num, i; - pr->anchor[sizeof (pr->anchor) - 1] = '\0'; - pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; + pr->anchor[sizeof(pr->anchor) - 1] = '\0'; + pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0'; ruleset = pf_find_ruleset(pr->anchor); if (ruleset == NULL) { error = EINVAL; @@ -2792,8 +2892,9 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p break; } rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); - while ((rule != NULL) && (rule->nr != pr->nr)) + while ((rule != NULL) && (rule->nr != pr->nr)) { rule = TAILQ_NEXT(rule, entries); + } if (rule == NULL) { error = EBUSY; break; @@ -2809,12 +2910,14 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p pf_tbladdr_copyout(&pr->rule.dst.addr); pf_rtlabel_copyout(&pr->rule.src.addr); pf_rtlabel_copyout(&pr->rule.dst.addr); - for (i = 0; i < PF_SKIP_COUNT; ++i) - if (rule->skip[i].ptr == NULL) + for (i = 0; i < PF_SKIP_COUNT; ++i) { + if (rule->skip[i].ptr == NULL) { pr->rule.skip[i].nr = -1; - else + } else { pr->rule.skip[i].nr = rule->skip[i].ptr->nr; + } + } if (pr->action == PF_GET_CLR_CNTR) { rule->evaluations = 0; @@ -2825,12 +2928,12 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } case DIOCCHANGERULE: { - struct pfioc_rule *pcr = pr; - struct pf_ruleset *ruleset; - struct pf_rule *oldrule = NULL, *newrule = NULL; - struct pf_pooladdr *pa; - u_int32_t nr = 0; - int rs_num; + struct pfioc_rule *pcr = pr; + struct pf_ruleset *ruleset; + struct pf_rule *oldrule = NULL, *newrule = NULL; + struct pf_pooladdr *pa; + u_int32_t nr = 0; + int rs_num; if (!(pcr->action == PF_CHANGE_REMOVE || pcr->action == PF_CHANGE_GET_TICKET) && @@ -2844,8 +2947,8 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p error = EINVAL; break; } - pcr->anchor[sizeof (pcr->anchor) - 1] = '\0'; - pcr->anchor_call[sizeof (pcr->anchor_call) - 1] = '\0'; + pcr->anchor[sizeof(pcr->anchor) - 1] = '\0'; + pcr->anchor_call[sizeof(pcr->anchor_call) - 1] = '\0'; ruleset = pf_find_ruleset(pcr->anchor); if (ruleset == NULL) { error = EINVAL; @@ -2901,52 +3004,68 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p break; } pfi_kif_ref(newrule->kif, PFI_KIF_REF_RULE); - } else + } else { newrule->kif = NULL; + } - if (newrule->tagname[0]) + if (newrule->tagname[0]) { if ((newrule->tag = - pf_tagname2tag(newrule->tagname)) == 0) + pf_tagname2tag(newrule->tagname)) == 0) { error = EBUSY; - if (newrule->match_tagname[0]) + } + } + if (newrule->match_tagname[0]) { if ((newrule->match_tag = pf_tagname2tag( - newrule->match_tagname)) == 0) + newrule->match_tagname)) == 0) { error = EBUSY; - if (newrule->rt && !newrule->direction) + } + } + if (newrule->rt && !newrule->direction) { error = EINVAL; + } #if PFLOG - if (!newrule->log) + if (!newrule->log) { newrule->logif = 0; - if (newrule->logif >= PFLOGIFS_MAX) + } + if (newrule->logif >= PFLOGIFS_MAX) { error = EINVAL; + } #endif /* PFLOG */ pf_addrwrap_setup(&newrule->src.addr); pf_addrwrap_setup(&newrule->dst.addr); if (pf_rtlabel_add(&newrule->src.addr) || - pf_rtlabel_add(&newrule->dst.addr)) + pf_rtlabel_add(&newrule->dst.addr)) { error = EBUSY; - if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) + } + if (pfi_dynaddr_setup(&newrule->src.addr, newrule->af)) { error = EINVAL; - if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) + } + if (pfi_dynaddr_setup(&newrule->dst.addr, newrule->af)) { error = EINVAL; - if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) + } + if (pf_tbladdr_setup(ruleset, &newrule->src.addr)) { error = EINVAL; - if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) + } + if (pf_tbladdr_setup(ruleset, &newrule->dst.addr)) { error = EINVAL; - if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) + } + if (pf_anchor_setup(newrule, ruleset, pcr->anchor_call)) { error = EINVAL; + } TAILQ_FOREACH(pa, &pf_pabuf, entries) - if (pf_tbladdr_setup(ruleset, &pa->addr)) - error = EINVAL; + if (pf_tbladdr_setup(ruleset, &pa->addr)) { + error = EINVAL; + } if (newrule->overload_tblname[0]) { if ((newrule->overload_tbl = pfr_attach_table( - ruleset, newrule->overload_tblname)) == - NULL) + ruleset, newrule->overload_tblname)) == + NULL) { error = EINVAL; - else + } else { newrule->overload_tbl->pfrkt_flags |= PFR_TFLAG_ACTIVE; + } } pf_mv_pool(&pf_pabuf, &newrule->rpool.list); @@ -2955,8 +3074,9 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p (newrule->action == PF_BINAT) || (newrule->rt > PF_FASTROUTE)) && !newrule->anchor)) && - (TAILQ_FIRST(&newrule->rpool.list) == NULL)) + (TAILQ_FIRST(&newrule->rpool.list) == NULL)) { error = EINVAL; + } if (error) { pf_rm_rule(NULL, newrule); @@ -2969,20 +3089,22 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } pf_empty_pool(&pf_pabuf); - if (pcr->action == PF_CHANGE_ADD_HEAD) + if (pcr->action == PF_CHANGE_ADD_HEAD) { oldrule = TAILQ_FIRST( - ruleset->rules[rs_num].active.ptr); - else if (pcr->action == PF_CHANGE_ADD_TAIL) + ruleset->rules[rs_num].active.ptr); + } else if (pcr->action == PF_CHANGE_ADD_TAIL) { oldrule = TAILQ_LAST( - ruleset->rules[rs_num].active.ptr, pf_rulequeue); - else { + ruleset->rules[rs_num].active.ptr, pf_rulequeue); + } else { oldrule = TAILQ_FIRST( - ruleset->rules[rs_num].active.ptr); - while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) + ruleset->rules[rs_num].active.ptr); + while ((oldrule != NULL) && (oldrule->nr != pcr->nr)) { oldrule = TAILQ_NEXT(oldrule, entries); + } if (oldrule == NULL) { - if (newrule != NULL) + if (newrule != NULL) { pf_rm_rule(NULL, newrule); + } error = EINVAL; break; } @@ -2992,24 +3114,25 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p pf_rm_rule(ruleset->rules[rs_num].active.ptr, oldrule); ruleset->rules[rs_num].active.rcount--; } else { - if (oldrule == NULL) + if (oldrule == NULL) { TAILQ_INSERT_TAIL( - ruleset->rules[rs_num].active.ptr, - newrule, entries); - else if (pcr->action == PF_CHANGE_ADD_HEAD || - pcr->action == PF_CHANGE_ADD_BEFORE) + ruleset->rules[rs_num].active.ptr, + newrule, entries); + } else if (pcr->action == PF_CHANGE_ADD_HEAD || + pcr->action == PF_CHANGE_ADD_BEFORE) { TAILQ_INSERT_BEFORE(oldrule, newrule, entries); - else + } else { TAILQ_INSERT_AFTER( - ruleset->rules[rs_num].active.ptr, - oldrule, newrule, entries); + ruleset->rules[rs_num].active.ptr, + oldrule, newrule, entries); + } ruleset->rules[rs_num].active.rcount++; } nr = 0; TAILQ_FOREACH(oldrule, ruleset->rules[rs_num].active.ptr, entries) - oldrule->nr = nr++; + oldrule->nr = nr++; ruleset->rules[rs_num].active.ticket++; @@ -3020,18 +3143,19 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } case DIOCINSERTRULE: { - struct pf_ruleset *ruleset; - struct pf_rule *rule, *tail, *r; - int rs_num; - int is_anchor; + struct pf_ruleset *ruleset; + struct pf_rule *rule, *tail, *r; + int rs_num; + int is_anchor; - pr->anchor[sizeof (pr->anchor) - 1] = '\0'; - pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; + pr->anchor[sizeof(pr->anchor) - 1] = '\0'; + pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0'; is_anchor = (pr->anchor_call[0] != '\0'); if ((ruleset = pf_find_ruleset_with_owner(pr->anchor, - pr->rule.owner, is_anchor, &error)) == NULL) + pr->rule.owner, is_anchor, &error)) == NULL) { break; + } rs_num = pf_get_ruleset_number(pr->rule.action); if (rs_num >= PF_RULESET_MAX) { @@ -3052,16 +3176,18 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p pr->anchor_call)) == 0)) { if (((strcmp(pr->rule.owner, r->owner)) == 0) || - ((strcmp(r->owner, "")) == 0)) + ((strcmp(r->owner, "")) == 0)) { error = EEXIST; - else + } else { error = EPERM; + } break; } r = TAILQ_NEXT(r, entries); } - if (error != 0) - return (error); + if (error != 0) { + return error; + } } rule = pool_get(&pf_rule_pl, PR_WAITOK); @@ -3086,48 +3212,57 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p #endif /* INET6 */ r = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr); - while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) + while ((r != NULL) && (rule->priority >= (unsigned)r->priority)) { r = TAILQ_NEXT(r, entries); + } if (r == NULL) { if ((tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr, - pf_rulequeue)) != NULL) + pf_rulequeue)) != NULL) { rule->nr = tail->nr + 1; - else + } else { rule->nr = 0; + } } else { rule->nr = r->nr; } - if ((error = pf_rule_setup(pr, rule, ruleset))) + if ((error = pf_rule_setup(pr, rule, ruleset))) { break; + } - if (rule->anchor != NULL) + if (rule->anchor != NULL) { strlcpy(rule->anchor->owner, rule->owner, PF_OWNER_NAME_SIZE); + } if (r) { TAILQ_INSERT_BEFORE(r, rule, entries); - while (r && ++r->nr) + while (r && ++r->nr) { r = TAILQ_NEXT(r, entries); - } else + } + } else { TAILQ_INSERT_TAIL(ruleset->rules[rs_num].active.ptr, rule, entries); + } ruleset->rules[rs_num].active.rcount++; /* Calculate checksum for the main ruleset */ - if (ruleset == &pf_main_ruleset) + if (ruleset == &pf_main_ruleset) { error = pf_setup_pfsync_matching(ruleset); + } pf_ruleset_cleanup(ruleset, rs_num); rule->ticket = VM_KERNEL_ADDRPERM((u_int64_t)(uintptr_t)rule); pr->rule.ticket = rule->ticket; pf_rule_copyout(rule, &pr->rule); - if (rule->rule_flag & PFRULE_PFM) + if (rule->rule_flag & PFRULE_PFM) { pffwrules++; - if (rule->action == PF_NAT64) + } + if (rule->action == PF_NAT64) { atomic_add_16(&pf_nat64_configured, 1); + } if (pr->anchor_call[0] == '\0') { INC_ATOMIC_INT64_LIM(net_api_stats.nas_pf_addrule_total); @@ -3139,8 +3274,8 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } case DIOCDELETERULE: { - pr->anchor[sizeof (pr->anchor) - 1] = '\0'; - pr->anchor_call[sizeof (pr->anchor_call) - 1] = '\0'; + pr->anchor[sizeof(pr->anchor) - 1] = '\0'; + pr->anchor_call[sizeof(pr->anchor_call) - 1] = '\0'; if (pr->rule.return_icmp >> 8 > ICMP_MAXTYPE) { error = EINVAL; @@ -3148,17 +3283,21 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p } /* get device through which request is made */ - if ((uint8_t)minordev == PFDEV_PFM) + if ((uint8_t)minordev == PFDEV_PFM) { req_dev |= PFRULE_PFM; + } if (pr->rule.ticket) { - if ((error = pf_delete_rule_by_ticket(pr, req_dev))) + if ((error = pf_delete_rule_by_ticket(pr, req_dev))) { break; - } else + } + } else { pf_delete_rule_by_owner(pr->rule.owner, req_dev); + } pr->nr = pffwrules; - if (pr->rule.action == PF_NAT64) + if (pr->rule.action == PF_NAT64) { atomic_add_16(&pf_nat64_configured, -1); + } break; } @@ -3167,7 +3306,7 @@ pfioctl_ioc_rule(u_long cmd, int minordev, struct pfioc_rule *pr, struct proc *p /* NOTREACHED */ } - return (error); + return error; } static int @@ -3176,7 +3315,7 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) #pragma unused(p) int error = 0; - psk->psk_ifname[sizeof (psk->psk_ifname) - 1] = '\0'; + psk->psk_ifname[sizeof(psk->psk_ifname) - 1] = '\0'; psk->psk_ownername[sizeof(psk->psk_ownername) - 1] = '\0'; bool ifname_matched = true; @@ -3184,8 +3323,8 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) switch (cmd) { case DIOCCLRSTATES: { - struct pf_state *s, *nexts; - int killed = 0; + struct pf_state *s, *nexts; + int killed = 0; for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { nexts = RB_NEXT(pf_state_tree_id, &tree_id, s); @@ -3205,7 +3344,7 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) if (psk->psk_ownername[0] && ((NULL == s->rule.ptr) || - strcmp(psk->psk_ownername, s->rule.ptr->owner))) { + strcmp(psk->psk_ownername, s->rule.ptr->owner))) { owner_matched = false; } @@ -3228,10 +3367,10 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) } case DIOCKILLSTATES: { - struct pf_state *s, *nexts; - struct pf_state_key *sk; - struct pf_state_host *src, *dst; - int killed = 0; + struct pf_state *s, *nexts; + struct pf_state_key *sk; + struct pf_state_host *src, *dst; + int killed = 0; for (s = RB_MIN(pf_state_tree_id, &tree_id); s; s = nexts) { @@ -3247,7 +3386,7 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) if (psk->psk_ownername[0] && ((NULL == s->rule.ptr) || - strcmp(psk->psk_ownername, s->rule.ptr->owner))) { + strcmp(psk->psk_ownername, s->rule.ptr->owner))) { owner_matched = false; } @@ -3294,7 +3433,7 @@ pfioctl_ioc_state_kill(u_long cmd, struct pfioc_state_kill *psk, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static int @@ -3305,10 +3444,10 @@ pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p) switch (cmd) { case DIOCADDSTATE: { - struct pfsync_state *sp = &ps->state; - struct pf_state *s; - struct pf_state_key *sk; - struct pfi_kif *kif; + struct pfsync_state *sp = &ps->state; + struct pf_state *s; + struct pf_state_key *sk; + struct pfi_kif *kif; if (sp->timeout >= PFTM_MAX) { error = EINVAL; @@ -3319,7 +3458,7 @@ pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p) error = ENOMEM; break; } - bzero(s, sizeof (struct pf_state)); + bzero(s, sizeof(struct pf_state)); if ((sk = pf_alloc_state_key(s, NULL)) == NULL) { pool_put(&pf_state_pl, s); error = ENOMEM; @@ -3347,10 +3486,10 @@ pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p) } case DIOCGETSTATE: { - struct pf_state *s; - struct pf_state_cmp id_key; + struct pf_state *s; + struct pf_state_cmp id_key; - bcopy(ps->state.id, &id_key.id, sizeof (id_key.id)); + bcopy(ps->state.id, &id_key.id, sizeof(id_key.id)); id_key.creatorid = ps->state.creatorid; s = pf_find_state_byid(&id_key); @@ -3368,7 +3507,7 @@ pfioctl_ioc_state(u_long cmd, struct pfioc_state *ps, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static int @@ -3379,24 +3518,25 @@ pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32, int error = 0; switch (cmd) { - case DIOCGETSTATES: { /* struct pfioc_states */ - struct pf_state *state; - struct pfsync_state *pstore; - user_addr_t buf; - u_int32_t nr = 0; - int len, size; + case DIOCGETSTATES: { /* struct pfioc_states */ + struct pf_state *state; + struct pfsync_state *pstore; + user_addr_t buf; + u_int32_t nr = 0; + int len, size; len = (p64 ? ps64->ps_len : ps32->ps_len); if (len == 0) { - size = sizeof (struct pfsync_state) * pf_status.states; - if (p64) + size = sizeof(struct pfsync_state) * pf_status.states; + if (p64) { ps64->ps_len = size; - else + } else { ps32->ps_len = size; + } break; } - pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK | M_ZERO); + pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK | M_ZERO); if (pstore == NULL) { error = ENOMEM; break; @@ -3406,27 +3546,29 @@ pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32, state = TAILQ_FIRST(&state_list); while (state) { if (state->timeout != PFTM_UNLINKED) { - if ((nr + 1) * sizeof (*pstore) > (unsigned)len) + if ((nr + 1) * sizeof(*pstore) > (unsigned)len) { break; + } pf_state_export(pstore, state->state_key, state); - error = copyout(pstore, buf, sizeof (*pstore)); + error = copyout(pstore, buf, sizeof(*pstore)); if (error) { _FREE(pstore, M_TEMP); goto fail; } - buf += sizeof (*pstore); + buf += sizeof(*pstore); nr++; } state = TAILQ_NEXT(state, entry_list); } - size = sizeof (struct pfsync_state) * nr; - if (p64) + size = sizeof(struct pfsync_state) * nr; + if (p64) { ps64->ps_len = size; - else + } else { ps32->ps_len = size; + } _FREE(pstore, M_TEMP); break; @@ -3437,7 +3579,7 @@ pfioctl_ioc_states(u_long cmd, struct pfioc_states_32 *ps32, /* NOTREACHED */ } fail: - return (error); + return error; } static int @@ -3448,10 +3590,10 @@ pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p) switch (cmd) { case DIOCNATLOOK: { - struct pf_state_key *sk; - struct pf_state *state; - struct pf_state_key_cmp key; - int m = 0, direction = pnl->direction; + struct pf_state_key *sk; + struct pf_state *state; + struct pf_state_key_cmp key; + int m = 0, direction = pnl->direction; key.proto = pnl->proto; key.proto_variant = pnl->proto_variant; @@ -3461,9 +3603,9 @@ pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p) PF_AZERO(&pnl->daddr, pnl->af) || ((pnl->proto == IPPROTO_TCP || pnl->proto == IPPROTO_UDP) && - (!pnl->dxport.port || !pnl->sxport.port))) + (!pnl->dxport.port || !pnl->sxport.port))) { error = EINVAL; - else { + } else { /* * userland gives us source and dest of connection, * reverse the lookup so we ask for what happens with @@ -3473,49 +3615,50 @@ pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p) if (direction == PF_IN) { key.af_gwy = pnl->af; PF_ACPY(&key.ext_gwy.addr, &pnl->daddr, - pnl->af); + pnl->af); memcpy(&key.ext_gwy.xport, &pnl->dxport, - sizeof (key.ext_gwy.xport)); + sizeof(key.ext_gwy.xport)); PF_ACPY(&key.gwy.addr, &pnl->saddr, pnl->af); memcpy(&key.gwy.xport, &pnl->sxport, - sizeof (key.gwy.xport)); + sizeof(key.gwy.xport)); state = pf_find_state_all(&key, PF_IN, &m); } else { key.af_lan = pnl->af; PF_ACPY(&key.lan.addr, &pnl->daddr, pnl->af); memcpy(&key.lan.xport, &pnl->dxport, - sizeof (key.lan.xport)); + sizeof(key.lan.xport)); PF_ACPY(&key.ext_lan.addr, &pnl->saddr, - pnl->af); + pnl->af); memcpy(&key.ext_lan.xport, &pnl->sxport, - sizeof (key.ext_lan.xport)); + sizeof(key.ext_lan.xport)); state = pf_find_state_all(&key, PF_OUT, &m); } - if (m > 1) - error = E2BIG; /* more than one state */ - else if (state != NULL) { + if (m > 1) { + error = E2BIG; /* more than one state */ + } else if (state != NULL) { sk = state->state_key; if (direction == PF_IN) { PF_ACPY(&pnl->rsaddr, &sk->lan.addr, sk->af_lan); memcpy(&pnl->rsxport, &sk->lan.xport, - sizeof (pnl->rsxport)); + sizeof(pnl->rsxport)); PF_ACPY(&pnl->rdaddr, &pnl->daddr, pnl->af); memcpy(&pnl->rdxport, &pnl->dxport, - sizeof (pnl->rdxport)); + sizeof(pnl->rdxport)); } else { PF_ACPY(&pnl->rdaddr, &sk->gwy.addr, sk->af_gwy); memcpy(&pnl->rdxport, &sk->gwy.xport, - sizeof (pnl->rdxport)); + sizeof(pnl->rdxport)); PF_ACPY(&pnl->rsaddr, &pnl->saddr, pnl->af); memcpy(&pnl->rsxport, &pnl->sxport, - sizeof (pnl->rsxport)); + sizeof(pnl->rsxport)); } - } else + } else { error = ENOENT; + } } break; } @@ -3525,7 +3668,7 @@ pfioctl_ioc_natlook(u_long cmd, struct pfioc_natlook *pnl, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static int @@ -3544,11 +3687,13 @@ pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p) goto fail; } old = pf_default_rule.timeout[pt->timeout]; - if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) + if (pt->timeout == PFTM_INTERVAL && pt->seconds == 0) { pt->seconds = 1; + } pf_default_rule.timeout[pt->timeout] = pt->seconds; - if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) + if (pt->timeout == PFTM_INTERVAL && pt->seconds < old) { wakeup(pf_purge_thread_fn); + } pt->seconds = old; break; } @@ -3567,7 +3712,7 @@ pfioctl_ioc_tm(u_long cmd, struct pfioc_tm *pt, struct proc *p) /* NOTREACHED */ } fail: - return (error); + return error; } static int @@ -3578,7 +3723,6 @@ pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p) switch (cmd) { case DIOCGETLIMIT: { - if (pl->index < 0 || pl->index >= PF_LIMIT_MAX) { error = EINVAL; goto fail; @@ -3608,7 +3752,7 @@ pfioctl_ioc_limit(u_long cmd, struct pfioc_limit *pl, struct proc *p) /* NOTREACHED */ } fail: - return (error); + return error; } static int @@ -3627,7 +3771,7 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) } case DIOCADDADDR: { - pp->anchor[sizeof (pp->anchor) - 1] = '\0'; + pp->anchor[sizeof(pp->anchor) - 1] = '\0'; if (pp->ticket != ticket_pabuf) { error = EBUSY; break; @@ -3679,7 +3823,7 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) case DIOCGETADDRS: { pp->nr = 0; - pp->anchor[sizeof (pp->anchor) - 1] = '\0'; + pp->anchor[sizeof(pp->anchor) - 1] = '\0'; pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, pp->r_num, 0, 1, 0); if (pool == NULL) { @@ -3687,14 +3831,14 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) break; } TAILQ_FOREACH(pa, &pool->list, entries) - pp->nr++; + pp->nr++; break; } case DIOCGETADDR: { - u_int32_t nr = 0; + u_int32_t nr = 0; - pp->anchor[sizeof (pp->anchor) - 1] = '\0'; + pp->anchor[sizeof(pp->anchor) - 1] = '\0'; pool = pf_get_pool(pp->anchor, pp->ticket, pp->r_action, pp->r_num, 0, 1, 1); if (pool == NULL) { @@ -3718,9 +3862,9 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) } case DIOCCHANGEADDR: { - struct pfioc_pooladdr *pca = pp; - struct pf_pooladdr *oldpa = NULL, *newpa = NULL; - struct pf_ruleset *ruleset; + struct pfioc_pooladdr *pca = pp; + struct pf_pooladdr *oldpa = NULL, *newpa = NULL; + struct pf_ruleset *ruleset; if (pca->action < PF_CHANGE_ADD_HEAD || pca->action > PF_CHANGE_REMOVE) { @@ -3734,7 +3878,7 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) break; } - pca->anchor[sizeof (pca->anchor) - 1] = '\0'; + pca->anchor[sizeof(pca->anchor) - 1] = '\0'; ruleset = pf_find_ruleset(pca->anchor); if (ruleset == NULL) { error = EBUSY; @@ -3775,8 +3919,9 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) break; } pfi_kif_ref(newpa->kif, PFI_KIF_REF_RULE); - } else + } else { newpa->kif = NULL; + } pf_addrwrap_setup(&newpa->addr); if (pfi_dynaddr_setup(&newpa->addr, pca->af) || pf_tbladdr_setup(ruleset, &newpa->addr)) { @@ -3788,12 +3933,12 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) } } - if (pca->action == PF_CHANGE_ADD_HEAD) + if (pca->action == PF_CHANGE_ADD_HEAD) { oldpa = TAILQ_FIRST(&pool->list); - else if (pca->action == PF_CHANGE_ADD_TAIL) + } else if (pca->action == PF_CHANGE_ADD_TAIL) { oldpa = TAILQ_LAST(&pool->list, pf_palist); - else { - int i = 0; + } else { + int i = 0; oldpa = TAILQ_FIRST(&pool->list); while ((oldpa != NULL) && (i < (int)pca->nr)) { @@ -3813,14 +3958,15 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) pfi_kif_unref(oldpa->kif, PFI_KIF_REF_RULE); pool_put(&pf_pooladdr_pl, oldpa); } else { - if (oldpa == NULL) + if (oldpa == NULL) { TAILQ_INSERT_TAIL(&pool->list, newpa, entries); - else if (pca->action == PF_CHANGE_ADD_HEAD || - pca->action == PF_CHANGE_ADD_BEFORE) + } else if (pca->action == PF_CHANGE_ADD_HEAD || + pca->action == PF_CHANGE_ADD_BEFORE) { TAILQ_INSERT_BEFORE(oldpa, newpa, entries); - else + } else { TAILQ_INSERT_AFTER(&pool->list, oldpa, newpa, entries); + } } pool->cur = TAILQ_FIRST(&pool->list); @@ -3834,7 +3980,7 @@ pfioctl_ioc_pooladdr(u_long cmd, struct pfioc_pooladdr *pp, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static int @@ -3845,11 +3991,11 @@ pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p) switch (cmd) { case DIOCGETRULESETS: { - struct pf_ruleset *ruleset; - struct pf_anchor *anchor; + struct pf_ruleset *ruleset; + struct pf_anchor *anchor; - pr->path[sizeof (pr->path) - 1] = '\0'; - pr->name[sizeof (pr->name) - 1] = '\0'; + pr->path[sizeof(pr->path) - 1] = '\0'; + pr->name[sizeof(pr->name) - 1] = '\0'; if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { error = EINVAL; break; @@ -3858,22 +4004,23 @@ pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p) if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) - if (anchor->parent == NULL) - pr->nr++; + if (anchor->parent == NULL) { + pr->nr++; + } } else { RB_FOREACH(anchor, pf_anchor_node, &ruleset->anchor->children) - pr->nr++; + pr->nr++; } break; } case DIOCGETRULESET: { - struct pf_ruleset *ruleset; - struct pf_anchor *anchor; - u_int32_t nr = 0; + struct pf_ruleset *ruleset; + struct pf_anchor *anchor; + u_int32_t nr = 0; - pr->path[sizeof (pr->path) - 1] = '\0'; + pr->path[sizeof(pr->path) - 1] = '\0'; if ((ruleset = pf_find_ruleset(pr->path)) == NULL) { error = EINVAL; break; @@ -3882,22 +4029,23 @@ pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p) if (ruleset->anchor == NULL) { /* XXX kludge for pf_main_ruleset */ RB_FOREACH(anchor, pf_anchor_global, &pf_anchors) - if (anchor->parent == NULL && nr++ == pr->nr) { - strlcpy(pr->name, anchor->name, - sizeof (pr->name)); - break; - } + if (anchor->parent == NULL && nr++ == pr->nr) { + strlcpy(pr->name, anchor->name, + sizeof(pr->name)); + break; + } } else { RB_FOREACH(anchor, pf_anchor_node, &ruleset->anchor->children) - if (nr++ == pr->nr) { - strlcpy(pr->name, anchor->name, - sizeof (pr->name)); - break; - } + if (nr++ == pr->nr) { + strlcpy(pr->name, anchor->name, + sizeof(pr->name)); + break; + } } - if (!pr->name[0]) + if (!pr->name[0]) { error = EBUSY; + } break; } @@ -3906,7 +4054,7 @@ pfioctl_ioc_ruleset(u_long cmd, struct pfioc_ruleset *pr, struct proc *p) /* NOTREACHED */ } - return (error); + return error; } static int @@ -3923,31 +4071,31 @@ pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, switch (cmd) { case DIOCXBEGIN: { - struct pfioc_trans_e *ioe; - struct pfr_table *table; - int i; + struct pfioc_trans_e *ioe; + struct pfr_table *table; + int i; - if (esize != sizeof (*ioe)) { + if (esize != sizeof(*ioe)) { error = ENODEV; goto fail; } - ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); - table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); - for (i = 0; i < size; i++, buf += sizeof (*ioe)) { - if (copyin(buf, ioe, sizeof (*ioe))) { + ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK); + table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK); + for (i = 0; i < size; i++, buf += sizeof(*ioe)) { + if (copyin(buf, ioe, sizeof(*ioe))) { _FREE(table, M_TEMP); _FREE(ioe, M_TEMP); error = EFAULT; goto fail; } - ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; + ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ALTQ: break; case PF_RULESET_TABLE: - bzero(table, sizeof (*table)); + bzero(table, sizeof(*table)); strlcpy(table->pfrt_anchor, ioe->anchor, - sizeof (table->pfrt_anchor)); + sizeof(table->pfrt_anchor)); if ((error = pfr_ina_begin(table, &ioe->ticket, NULL, 0))) { _FREE(table, M_TEMP); @@ -3964,7 +4112,7 @@ pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, } break; } - if (copyout(ioe, buf, sizeof (*ioe))) { + if (copyout(ioe, buf, sizeof(*ioe))) { _FREE(table, M_TEMP); _FREE(ioe, M_TEMP); error = EFAULT; @@ -3977,31 +4125,31 @@ pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, } case DIOCXROLLBACK: { - struct pfioc_trans_e *ioe; - struct pfr_table *table; - int i; + struct pfioc_trans_e *ioe; + struct pfr_table *table; + int i; - if (esize != sizeof (*ioe)) { + if (esize != sizeof(*ioe)) { error = ENODEV; goto fail; } - ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); - table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); - for (i = 0; i < size; i++, buf += sizeof (*ioe)) { - if (copyin(buf, ioe, sizeof (*ioe))) { + ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK); + table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK); + for (i = 0; i < size; i++, buf += sizeof(*ioe)) { + if (copyin(buf, ioe, sizeof(*ioe))) { _FREE(table, M_TEMP); _FREE(ioe, M_TEMP); error = EFAULT; goto fail; } - ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; + ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ALTQ: break; case PF_RULESET_TABLE: - bzero(table, sizeof (*table)); + bzero(table, sizeof(*table)); strlcpy(table->pfrt_anchor, ioe->anchor, - sizeof (table->pfrt_anchor)); + sizeof(table->pfrt_anchor)); if ((error = pfr_ina_rollback(table, ioe->ticket, NULL, 0))) { _FREE(table, M_TEMP); @@ -4025,27 +4173,27 @@ pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, } case DIOCXCOMMIT: { - struct pfioc_trans_e *ioe; - struct pfr_table *table; - struct pf_ruleset *rs; - user_addr_t _buf = buf; - int i; + struct pfioc_trans_e *ioe; + struct pfr_table *table; + struct pf_ruleset *rs; + user_addr_t _buf = buf; + int i; - if (esize != sizeof (*ioe)) { + if (esize != sizeof(*ioe)) { error = ENODEV; goto fail; } - ioe = _MALLOC(sizeof (*ioe), M_TEMP, M_WAITOK); - table = _MALLOC(sizeof (*table), M_TEMP, M_WAITOK); + ioe = _MALLOC(sizeof(*ioe), M_TEMP, M_WAITOK); + table = _MALLOC(sizeof(*table), M_TEMP, M_WAITOK); /* first makes sure everything will succeed */ - for (i = 0; i < size; i++, buf += sizeof (*ioe)) { - if (copyin(buf, ioe, sizeof (*ioe))) { + for (i = 0; i < size; i++, buf += sizeof(*ioe)) { + if (copyin(buf, ioe, sizeof(*ioe))) { _FREE(table, M_TEMP); _FREE(ioe, M_TEMP); error = EFAULT; goto fail; } - ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; + ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ALTQ: break; @@ -4082,21 +4230,21 @@ pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, } buf = _buf; /* now do the commit - no errors should happen here */ - for (i = 0; i < size; i++, buf += sizeof (*ioe)) { - if (copyin(buf, ioe, sizeof (*ioe))) { + for (i = 0; i < size; i++, buf += sizeof(*ioe)) { + if (copyin(buf, ioe, sizeof(*ioe))) { _FREE(table, M_TEMP); _FREE(ioe, M_TEMP); error = EFAULT; goto fail; } - ioe->anchor[sizeof (ioe->anchor) - 1] = '\0'; + ioe->anchor[sizeof(ioe->anchor) - 1] = '\0'; switch (ioe->rs_num) { case PF_RULESET_ALTQ: break; case PF_RULESET_TABLE: - bzero(table, sizeof (*table)); + bzero(table, sizeof(*table)); strlcpy(table->pfrt_anchor, ioe->anchor, - sizeof (table->pfrt_anchor)); + sizeof(table->pfrt_anchor)); if ((error = pfr_ina_commit(table, ioe->ticket, NULL, NULL, 0))) { _FREE(table, M_TEMP); @@ -4124,7 +4272,7 @@ pfioctl_ioc_trans(u_long cmd, struct pfioc_trans_32 *io32, /* NOTREACHED */ } fail: - return (error); + return error; } static int @@ -4136,25 +4284,26 @@ pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32, switch (cmd) { case DIOCGETSRCNODES: { - struct pf_src_node *n, *pstore; - user_addr_t buf; - u_int32_t nr = 0; - int space, size; + struct pf_src_node *n, *pstore; + user_addr_t buf; + u_int32_t nr = 0; + int space, size; space = (p64 ? psn64->psn_len : psn32->psn_len); if (space == 0) { RB_FOREACH(n, pf_src_tree, &tree_src_tracking) - nr++; + nr++; - size = sizeof (struct pf_src_node) * nr; - if (p64) + size = sizeof(struct pf_src_node) * nr; + if (p64) { psn64->psn_len = size; - else + } else { psn32->psn_len = size; + } break; } - pstore = _MALLOC(sizeof (*pstore), M_TEMP, M_WAITOK); + pstore = _MALLOC(sizeof(*pstore), M_TEMP, M_WAITOK); if (pstore == NULL) { error = ENOMEM; break; @@ -4164,45 +4313,50 @@ pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32, RB_FOREACH(n, pf_src_tree, &tree_src_tracking) { uint64_t secs = pf_time_second(), diff; - if ((nr + 1) * sizeof (*pstore) > (unsigned)space) + if ((nr + 1) * sizeof(*pstore) > (unsigned)space) { break; + } - bcopy(n, pstore, sizeof (*pstore)); - if (n->rule.ptr != NULL) + bcopy(n, pstore, sizeof(*pstore)); + if (n->rule.ptr != NULL) { pstore->rule.nr = n->rule.ptr->nr; + } pstore->creation = secs - pstore->creation; - if (pstore->expire > secs) + if (pstore->expire > secs) { pstore->expire -= secs; - else + } else { pstore->expire = 0; + } /* adjust the connection rate estimate */ diff = secs - n->conn_rate.last; - if (diff >= n->conn_rate.seconds) + if (diff >= n->conn_rate.seconds) { pstore->conn_rate.count = 0; - else + } else { pstore->conn_rate.count -= n->conn_rate.count * diff / n->conn_rate.seconds; + } _RB_PARENT(pstore, entry) = NULL; RB_LEFT(pstore, entry) = RB_RIGHT(pstore, entry) = NULL; pstore->kif = NULL; - error = copyout(pstore, buf, sizeof (*pstore)); + error = copyout(pstore, buf, sizeof(*pstore)); if (error) { _FREE(pstore, M_TEMP); goto fail; } - buf += sizeof (*pstore); + buf += sizeof(*pstore); nr++; } - size = sizeof (struct pf_src_node) * nr; - if (p64) + size = sizeof(struct pf_src_node) * nr; + if (p64) { psn64->psn_len = size; - else + } else { psn32->psn_len = size; + } _FREE(pstore, M_TEMP); break; @@ -4213,8 +4367,7 @@ pfioctl_ioc_src_nodes(u_long cmd, struct pfioc_src_nodes_32 *psn32, /* NOTREACHED */ } fail: - return (error); - + return error; } static int @@ -4226,9 +4379,9 @@ pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk, switch (cmd) { case DIOCKILLSRCNODES: { - struct pf_src_node *sn; - struct pf_state *s; - int killed = 0; + struct pf_src_node *sn; + struct pf_state *s; + int killed = 0; RB_FOREACH(sn, pf_src_tree, &tree_src_tracking) { if (PF_MATCHA(psnk->psnk_src.neg, @@ -4243,10 +4396,12 @@ pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk, if (sn->states != 0) { RB_FOREACH(s, pf_state_tree_id, &tree_id) { - if (s->src_node == sn) + if (s->src_node == sn) { s->src_node = NULL; - if (s->nat_src_node == sn) + } + if (s->nat_src_node == sn) { s->nat_src_node = NULL; + } } sn->states = 0; } @@ -4255,8 +4410,9 @@ pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk, } } - if (killed > 0) + if (killed > 0) { pf_purge_expired_src_nodes(); + } psnk->psnk_af = killed; break; @@ -4267,7 +4423,7 @@ pfioctl_ioc_src_node_kill(u_long cmd, struct pfioc_src_node_kill *psnk, /* NOTREACHED */ } - return (error); + return error; } static int @@ -4286,41 +4442,44 @@ pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32, esize = (p64 ? io64->pfiio_esize : io32->pfiio_esize); /* esize must be that of the user space version of pfi_kif */ - if (esize != sizeof (struct pfi_uif)) { + if (esize != sizeof(struct pfi_uif)) { error = ENODEV; break; } - if (p64) - io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0'; - else - io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0'; + if (p64) { + io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0'; + } else { + io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0'; + } error = pfi_get_ifaces( - p64 ? io64->pfiio_name : io32->pfiio_name, buf, - p64 ? &io64->pfiio_size : &io32->pfiio_size); + p64 ? io64->pfiio_name : io32->pfiio_name, buf, + p64 ? &io64->pfiio_size : &io32->pfiio_size); break; } case DIOCSETIFFLAG: { - if (p64) - io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0'; - else - io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0'; + if (p64) { + io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0'; + } else { + io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0'; + } error = pfi_set_flags( - p64 ? io64->pfiio_name : io32->pfiio_name, - p64 ? io64->pfiio_flags : io32->pfiio_flags); + p64 ? io64->pfiio_name : io32->pfiio_name, + p64 ? io64->pfiio_flags : io32->pfiio_flags); break; } case DIOCCLRIFFLAG: { - if (p64) - io64->pfiio_name[sizeof (io64->pfiio_name) - 1] = '\0'; - else - io32->pfiio_name[sizeof (io32->pfiio_name) - 1] = '\0'; + if (p64) { + io64->pfiio_name[sizeof(io64->pfiio_name) - 1] = '\0'; + } else { + io32->pfiio_name[sizeof(io32->pfiio_name) - 1] = '\0'; + } error = pfi_clear_flags( - p64 ? io64->pfiio_name : io32->pfiio_name, - p64 ? io64->pfiio_flags : io32->pfiio_flags); + p64 ? io64->pfiio_name : io32->pfiio_name, + p64 ? io64->pfiio_flags : io32->pfiio_flags); break; } @@ -4329,7 +4488,7 @@ pfioctl_ioc_iface(u_long cmd, struct pfioc_iface_32 *io32, /* NOTREACHED */ } - return (error); + return error; } int @@ -4342,35 +4501,39 @@ pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp, struct ifnet * pf_ifp = ifp; /* Always allow traffic on co-processor interfaces. */ - if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) - return (0); + if (!intcoproc_unrestricted && ifp && IFNET_IS_INTCOPROC(ifp)) { + return 0; + } marks = net_thread_marks_push(NET_THREAD_HELD_PF); if (marks != net_thread_marks_none) { lck_rw_lock_shared(pf_perim_lock); - if (!pf_is_enabled) + if (!pf_is_enabled) { goto done; + } lck_mtx_lock(pf_lock); } - if (mppn != NULL && *mppn != NULL) + if (mppn != NULL && *mppn != NULL) { VERIFY(*mppn == *mp); - if ((nextpkt = (*mp)->m_nextpkt) != NULL) + } + if ((nextpkt = (*mp)->m_nextpkt) != NULL) { (*mp)->m_nextpkt = NULL; + } - /* - * For packets destined to locally hosted IP address - * ip_output_list sets Mbuf's pkt header's rcvif to - * the interface hosting the IP address. - * While on the output path ifp passed to pf_af_hook - * to such local communication is the loopback interface, - * the input path derives ifp from mbuf packet header's - * rcvif. - * This asymmetry caues issues with PF. - * To handle that case, we have a limited change here to - * pass interface as loopback if packets are looped in. - */ + /* + * For packets destined to locally hosted IP address + * ip_output_list sets Mbuf's pkt header's rcvif to + * the interface hosting the IP address. + * While on the output path ifp passed to pf_af_hook + * to such local communication is the loopback interface, + * the input path derives ifp from mbuf packet header's + * rcvif. + * This asymmetry caues issues with PF. + * To handle that case, we have a limited change here to + * pass interface as loopback if packets are looped in. + */ if (input && ((*mp)->m_pkthdr.pkt_flags & PKTF_LOOP)) { pf_ifp = lo_ifp; } @@ -4394,27 +4557,31 @@ pf_af_hook(struct ifnet *ifp, struct mbuf **mppn, struct mbuf **mp, /* When packet valid, link to the next packet */ if (*mp != NULL && nextpkt != NULL) { struct mbuf *m = *mp; - while (m->m_nextpkt != NULL) + while (m->m_nextpkt != NULL) { m = m->m_nextpkt; + } m->m_nextpkt = nextpkt; } /* Fix up linkage of previous packet in the chain */ if (mppn != NULL) { - if (*mp != NULL) + if (*mp != NULL) { *mppn = *mp; - else + } else { *mppn = nextpkt; + } } - if (marks != net_thread_marks_none) + if (marks != net_thread_marks_none) { lck_mtx_unlock(pf_lock); + } done: - if (marks != net_thread_marks_none) + if (marks != net_thread_marks_none) { lck_rw_done(pf_perim_lock); + } net_thread_marks_pop(marks); - return (error); + return error; } @@ -4469,7 +4636,7 @@ pf_inet_hook(struct ifnet *ifp, struct mbuf **mp, int input, } } #endif - return (error); + return error; } #endif /* INET */ @@ -4511,7 +4678,7 @@ pf_inet6_hook(struct ifnet *ifp, struct mbuf **mp, int input, error = ENOBUFS; } } - return (error); + return error; } #endif /* INET6 */ @@ -4529,7 +4696,7 @@ pf_ifaddr_hook(struct ifnet *ifp) lck_mtx_unlock(pf_lock); lck_rw_done(pf_perim_lock); } - return (0); + return 0; } /* @@ -4540,10 +4707,11 @@ pf_ifnet_hook(struct ifnet *ifp, int attach) { lck_rw_lock_shared(pf_perim_lock); lck_mtx_lock(pf_lock); - if (attach) + if (attach) { pfi_attach_ifnet(ifp); - else + } else { pfi_detach_ifnet(ifp); + } lck_mtx_unlock(pf_lock); lck_rw_done(pf_perim_lock); } diff --git a/bsd/net/pf_norm.c b/bsd/net/pf_norm.c index 3cb22e9ce..9c28415de 100644 --- a/bsd/net/pf_norm.c +++ b/bsd/net/pf_norm.c @@ -89,61 +89,61 @@ #include struct pf_frent { - LIST_ENTRY(pf_frent) fr_next; - struct mbuf *fr_m; -#define fr_ip fr_u.fru_ipv4 -#define fr_ip6 fr_u.fru_ipv6 + LIST_ENTRY(pf_frent) fr_next; + struct mbuf *fr_m; +#define fr_ip fr_u.fru_ipv4 +#define fr_ip6 fr_u.fru_ipv6 union { - struct ip *fru_ipv4; - struct ip6_hdr *fru_ipv6; + struct ip *fru_ipv4; + struct ip6_hdr *fru_ipv6; } fr_u; - struct ip6_frag fr_ip6f_opt; - int fr_ip6f_hlen; + struct ip6_frag fr_ip6f_opt; + int fr_ip6f_hlen; }; struct pf_frcache { LIST_ENTRY(pf_frcache) fr_next; - uint16_t fr_off; - uint16_t fr_end; + uint16_t fr_off; + uint16_t fr_end; }; -#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */ -#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */ -#define PFFRAG_DROP 0x0004 /* Drop all fragments */ -#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER)) +#define PFFRAG_SEENLAST 0x0001 /* Seen the last fragment for this */ +#define PFFRAG_NOBUFFER 0x0002 /* Non-buffering fragment cache */ +#define PFFRAG_DROP 0x0004 /* Drop all fragments */ +#define BUFFER_FRAGMENTS(fr) (!((fr)->fr_flags & PFFRAG_NOBUFFER)) struct pf_fragment { RB_ENTRY(pf_fragment) fr_entry; TAILQ_ENTRY(pf_fragment) frag_next; - struct pf_addr fr_srcx; - struct pf_addr fr_dstx; - u_int8_t fr_p; /* protocol of this fragment */ - u_int8_t fr_flags; /* status flags */ - u_int16_t fr_max; /* fragment data max */ -#define fr_id fr_uid.fru_id4 -#define fr_id6 fr_uid.fru_id6 + struct pf_addr fr_srcx; + struct pf_addr fr_dstx; + u_int8_t fr_p; /* protocol of this fragment */ + u_int8_t fr_flags; /* status flags */ + u_int16_t fr_max; /* fragment data max */ +#define fr_id fr_uid.fru_id4 +#define fr_id6 fr_uid.fru_id6 union { - u_int16_t fru_id4; - u_int32_t fru_id6; + u_int16_t fru_id4; + u_int32_t fru_id6; } fr_uid; - int fr_af; - u_int32_t fr_timeout; -#define fr_queue fr_u.fru_queue -#define fr_cache fr_u.fru_cache + int fr_af; + u_int32_t fr_timeout; +#define fr_queue fr_u.fru_queue +#define fr_cache fr_u.fru_cache union { - LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */ - LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */ + LIST_HEAD(pf_fragq, pf_frent) fru_queue; /* buffering */ + LIST_HEAD(pf_cacheq, pf_frcache) fru_cache; /* non-buf */ } fr_u; - uint32_t fr_csum_flags; /* checksum flags */ - uint32_t fr_csum; /* partial checksum value */ + uint32_t fr_csum_flags; /* checksum flags */ + uint32_t fr_csum; /* partial checksum value */ }; -static TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue; -static TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue; +static TAILQ_HEAD(pf_fragqueue, pf_fragment) pf_fragqueue; +static TAILQ_HEAD(pf_cachequeue, pf_fragment) pf_cachequeue; static __inline int pf_frag_compare(struct pf_fragment *, struct pf_fragment *); -static RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree; +static RB_HEAD(pf_frag_tree, pf_fragment) pf_frag_tree, pf_cache_tree; RB_PROTOTYPE_SC(__private_extern__, pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare); @@ -158,9 +158,9 @@ static void pf_free_fragment(struct pf_fragment *); static struct pf_fragment *pf_find_fragment_by_key(struct pf_fragment *, struct pf_frag_tree *); static __inline struct pf_fragment * - pf_find_fragment_by_ipv4_header(struct ip *, struct pf_frag_tree *); +pf_find_fragment_by_ipv4_header(struct ip *, struct pf_frag_tree *); static __inline struct pf_fragment * - pf_find_fragment_by_ipv6_header(struct ip6_hdr *, struct ip6_frag *, +pf_find_fragment_by_ipv6_header(struct ip6_hdr *, struct ip6_frag *, struct pf_frag_tree *); static struct mbuf *pf_reassemble(struct mbuf *, struct pf_fragment **, struct pf_frent *, int); @@ -173,32 +173,32 @@ static struct mbuf *pf_frag6cache(struct mbuf **, struct ip6_hdr*, static int pf_normalize_tcpopt(struct pf_rule *, int, struct pfi_kif *, struct pf_pdesc *, pbuf_t *, struct tcphdr *, int, int *); -#define DPFPRINTF(x) do { \ - if (pf_status.debug >= PF_DEBUG_MISC) { \ - printf("%s: ", __func__); \ - printf x ; \ - } \ +#define DPFPRINTF(x) do { \ + if (pf_status.debug >= PF_DEBUG_MISC) { \ + printf("%s: ", __func__); \ + printf x ; \ + } \ } while (0) /* Globals */ -struct pool pf_frent_pl, pf_frag_pl; -static struct pool pf_cache_pl, pf_cent_pl; -struct pool pf_state_scrub_pl; +struct pool pf_frent_pl, pf_frag_pl; +static struct pool pf_cache_pl, pf_cent_pl; +struct pool pf_state_scrub_pl; -static int pf_nfrents, pf_ncache; +static int pf_nfrents, pf_ncache; void pf_normalize_init(void) { - pool_init(&pf_frent_pl, sizeof (struct pf_frent), 0, 0, 0, "pffrent", + pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent", NULL); - pool_init(&pf_frag_pl, sizeof (struct pf_fragment), 0, 0, 0, "pffrag", + pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag", NULL); - pool_init(&pf_cache_pl, sizeof (struct pf_fragment), 0, 0, 0, + pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrcache", NULL); - pool_init(&pf_cent_pl, sizeof (struct pf_frcache), 0, 0, 0, "pffrcent", + pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent", NULL); - pool_init(&pf_state_scrub_pl, sizeof (struct pf_state_scrub), 0, 0, 0, + pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0, "pfstscr", NULL); pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT); @@ -225,75 +225,77 @@ pf_normalize_destroy(void) int pf_normalize_isempty(void) { - return (TAILQ_EMPTY(&pf_fragqueue) && TAILQ_EMPTY(&pf_cachequeue)); + return TAILQ_EMPTY(&pf_fragqueue) && TAILQ_EMPTY(&pf_cachequeue); } static __inline int pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) { - int diff; + int diff; - if ((diff = a->fr_af - b->fr_af)) - return (diff); - else if ((diff = a->fr_p - b->fr_p)) - return (diff); - else { + if ((diff = a->fr_af - b->fr_af)) { + return diff; + } else if ((diff = a->fr_p - b->fr_p)) { + return diff; + } else { struct pf_addr *sa = &a->fr_srcx; struct pf_addr *sb = &b->fr_srcx; struct pf_addr *da = &a->fr_dstx; struct pf_addr *db = &b->fr_dstx; - + switch (a->fr_af) { #ifdef INET case AF_INET: - if ((diff = a->fr_id - b->fr_id)) - return (diff); - else if (sa->v4addr.s_addr < sb->v4addr.s_addr) - return (-1); - else if (sa->v4addr.s_addr > sb->v4addr.s_addr) - return (1); - else if (da->v4addr.s_addr < db->v4addr.s_addr) - return (-1); - else if (da->v4addr.s_addr > db->v4addr.s_addr) - return (1); + if ((diff = a->fr_id - b->fr_id)) { + return diff; + } else if (sa->v4addr.s_addr < sb->v4addr.s_addr) { + return -1; + } else if (sa->v4addr.s_addr > sb->v4addr.s_addr) { + return 1; + } else if (da->v4addr.s_addr < db->v4addr.s_addr) { + return -1; + } else if (da->v4addr.s_addr > db->v4addr.s_addr) { + return 1; + } break; #endif #ifdef INET6 case AF_INET6: - if ((diff = a->fr_id6 - b->fr_id6)) - return (diff); - else if (sa->addr32[3] < sb->addr32[3]) - return (-1); - else if (sa->addr32[3] > sb->addr32[3]) - return (1); - else if (sa->addr32[2] < sb->addr32[2]) - return (-1); - else if (sa->addr32[2] > sb->addr32[2]) - return (1); - else if (sa->addr32[1] < sb->addr32[1]) - return (-1); - else if (sa->addr32[1] > sb->addr32[1]) - return (1); - else if (sa->addr32[0] < sb->addr32[0]) - return (-1); - else if (sa->addr32[0] > sb->addr32[0]) - return (1); - else if (da->addr32[3] < db->addr32[3]) - return (-1); - else if (da->addr32[3] > db->addr32[3]) - return (1); - else if (da->addr32[2] < db->addr32[2]) - return (-1); - else if (da->addr32[2] > db->addr32[2]) - return (1); - else if (da->addr32[1] < db->addr32[1]) - return (-1); - else if (da->addr32[1] > db->addr32[1]) - return (1); - else if (da->addr32[0] < db->addr32[0]) - return (-1); - else if (da->addr32[0] > db->addr32[0]) - return (1); + if ((diff = a->fr_id6 - b->fr_id6)) { + return diff; + } else if (sa->addr32[3] < sb->addr32[3]) { + return -1; + } else if (sa->addr32[3] > sb->addr32[3]) { + return 1; + } else if (sa->addr32[2] < sb->addr32[2]) { + return -1; + } else if (sa->addr32[2] > sb->addr32[2]) { + return 1; + } else if (sa->addr32[1] < sb->addr32[1]) { + return -1; + } else if (sa->addr32[1] > sb->addr32[1]) { + return 1; + } else if (sa->addr32[0] < sb->addr32[0]) { + return -1; + } else if (sa->addr32[0] > sb->addr32[0]) { + return 1; + } else if (da->addr32[3] < db->addr32[3]) { + return -1; + } else if (da->addr32[3] > db->addr32[3]) { + return 1; + } else if (da->addr32[2] < db->addr32[2]) { + return -1; + } else if (da->addr32[2] > db->addr32[2]) { + return 1; + } else if (da->addr32[1] < db->addr32[1]) { + return -1; + } else if (da->addr32[1] > db->addr32[1]) { + return 1; + } else if (da->addr32[0] < db->addr32[0]) { + return -1; + } else if (da->addr32[0] > db->addr32[0]) { + return 1; + } break; #endif default: @@ -301,7 +303,7 @@ pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b) break; } } - return (0); + return 0; } void @@ -313,46 +315,48 @@ pf_purge_expired_fragments(void) while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) { VERIFY(BUFFER_FRAGMENTS(frag)); - if (frag->fr_timeout > expire) + if (frag->fr_timeout > expire) { break; + } switch (frag->fr_af) { case AF_INET: - DPFPRINTF(("expiring IPv4 %d(0x%llx) from queue.\n", - ntohs(frag->fr_id), - (uint64_t)VM_KERNEL_ADDRPERM(frag))); - break; + DPFPRINTF(("expiring IPv4 %d(0x%llx) from queue.\n", + ntohs(frag->fr_id), + (uint64_t)VM_KERNEL_ADDRPERM(frag))); + break; case AF_INET6: - DPFPRINTF(("expiring IPv6 %d(0x%llx) from queue.\n", - ntohl(frag->fr_id6), - (uint64_t)VM_KERNEL_ADDRPERM(frag))); - break; + DPFPRINTF(("expiring IPv6 %d(0x%llx) from queue.\n", + ntohl(frag->fr_id6), + (uint64_t)VM_KERNEL_ADDRPERM(frag))); + break; default: - VERIFY(0 && "only IPv4 and IPv6 supported"); - break; + VERIFY(0 && "only IPv4 and IPv6 supported"); + break; } pf_free_fragment(frag); } while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) { VERIFY(!BUFFER_FRAGMENTS(frag)); - if (frag->fr_timeout > expire) + if (frag->fr_timeout > expire) { break; + } switch (frag->fr_af) { case AF_INET: - DPFPRINTF(("expiring IPv4 %d(0x%llx) from cache.\n", - ntohs(frag->fr_id), - (uint64_t)VM_KERNEL_ADDRPERM(frag))); - break; + DPFPRINTF(("expiring IPv4 %d(0x%llx) from cache.\n", + ntohs(frag->fr_id), + (uint64_t)VM_KERNEL_ADDRPERM(frag))); + break; case AF_INET6: - DPFPRINTF(("expiring IPv6 %d(0x%llx) from cache.\n", - ntohl(frag->fr_id6), - (uint64_t)VM_KERNEL_ADDRPERM(frag))); - break; + DPFPRINTF(("expiring IPv6 %d(0x%llx) from cache.\n", + ntohl(frag->fr_id6), + (uint64_t)VM_KERNEL_ADDRPERM(frag))); + break; default: - VERIFY(0 && "only IPv4 and IPv6 supported"); - break; + VERIFY(0 && "only IPv4 and IPv6 supported"); + break; } pf_free_fragment(frag); VERIFY(TAILQ_EMPTY(&pf_cachequeue) || @@ -367,16 +371,17 @@ pf_purge_expired_fragments(void) static void pf_flush_fragments(void) { - struct pf_fragment *frag; - int goal; + struct pf_fragment *frag; + int goal; goal = pf_nfrents * 9 / 10; DPFPRINTF(("trying to free > %d frents\n", pf_nfrents - goal)); while (goal < pf_nfrents) { frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue); - if (frag == NULL) + if (frag == NULL) { break; + } pf_free_fragment(frag); } @@ -386,8 +391,9 @@ pf_flush_fragments(void) pf_ncache - goal)); while (goal < pf_ncache) { frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue); - if (frag == NULL) + if (frag == NULL) { break; + } pf_free_fragment(frag); } } @@ -397,8 +403,8 @@ pf_flush_fragments(void) static void pf_free_fragment(struct pf_fragment *frag) { - struct pf_frent *frent; - struct pf_frcache *frcache; + struct pf_frent *frent; + struct pf_frcache *frcache; /* Free all fragments */ if (BUFFER_FRAGMENTS(frag)) { @@ -437,7 +443,7 @@ pf_ip6hdr2key(struct pf_fragment *key, struct ip6_hdr *ip6, key->fr_srcx.v6addr = ip6->ip6_src; key->fr_dstx.v6addr = ip6->ip6_dst; } - + static void pf_ip2key(struct pf_fragment *key, struct ip *ip) { @@ -452,7 +458,7 @@ static struct pf_fragment * pf_find_fragment_by_key(struct pf_fragment *key, struct pf_frag_tree *tree) { struct pf_fragment *frag; - + frag = RB_FIND(pf_frag_tree, tree, key); if (frag != NULL) { /* XXX Are we sure we want to update the timeout? */ @@ -465,10 +471,10 @@ pf_find_fragment_by_key(struct pf_fragment *key, struct pf_frag_tree *tree) TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next); } } - - return (frag); + + return frag; } - + static __inline struct pf_fragment * pf_find_fragment_by_ipv4_header(struct ip *ip, struct pf_frag_tree *tree) { @@ -481,9 +487,9 @@ static __inline struct pf_fragment * pf_find_fragment_by_ipv6_header(struct ip6_hdr *ip6, struct ip6_frag *fh, struct pf_frag_tree *tree) { - struct pf_fragment key; - pf_ip6hdr2key(&key, ip6, fh); - return pf_find_fragment_by_key(&key, tree); + struct pf_fragment key; + pf_ip6hdr2key(&key, ip6, fh); + return pf_find_fragment_by_key(&key, tree); } /* Removes a fragment from the fragment queue and frees the fragment */ @@ -502,20 +508,20 @@ pf_remove_fragment(struct pf_fragment *frag) } } -#define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3) +#define FR_IP_OFF(fr) ((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3) static struct mbuf * pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, struct pf_frent *frent, int mff) { - struct mbuf *m = m0, *m2; - struct pf_frent *frea, *next; - struct pf_frent *frep = NULL; - struct ip *ip = frent->fr_ip; - uint32_t hlen = ip->ip_hl << 2; - u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; - u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4; - u_int16_t fr_max = ip_len + off; - uint32_t csum, csum_flags; + struct mbuf *m = m0, *m2; + struct pf_frent *frea, *next; + struct pf_frent *frep = NULL; + struct ip *ip = frent->fr_ip; + uint32_t hlen = ip->ip_hl << 2; + u_int16_t off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; + u_int16_t ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4; + u_int16_t fr_max = ip_len + off; + uint32_t csum, csum_flags; VERIFY(*frag == NULL || BUFFER_FRAGMENTS(*frag)); @@ -531,7 +537,7 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, * has already performed its header checksum validation. Also take * care of any trailing bytes and subtract out their partial sum. */ - if (ip->ip_p == IPPROTO_UDP && hlen == sizeof (struct ip) && + if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PARTIAL)) { @@ -552,10 +558,11 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, /* callee folds in sum */ csum = m_adj_sum16(m, start, hlen, (ip->ip_len - hlen), csum); - if (hlen > start) + if (hlen > start) { swbytes += (hlen - start); - else + } else { swbytes += (start - hlen); + } #if BYTE_ORDER != BIG_ENDIAN if (start < hlen) { NTOHS(ip->ip_off); @@ -565,10 +572,12 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, } csum_flags = m->m_pkthdr.csum_flags; - if (swbytes != 0) + if (swbytes != 0) { udp_in_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } else { csum = 0; csum_flags = 0; @@ -587,8 +596,9 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, if (*frag == NULL) { pf_flush_fragments(); *frag = pool_get(&pf_frag_pl, PR_NOWAIT); - if (*frag == NULL) + if (*frag == NULL) { goto drop_fragment; + } } (*frag)->fr_flags = 0; @@ -618,18 +628,20 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, * as that of the existing ones, accumulate checksum. Otherwise, * invalidate checksum offload info for the entire datagram. */ - if (csum_flags != 0 && csum_flags == (*frag)->fr_csum_flags) + if (csum_flags != 0 && csum_flags == (*frag)->fr_csum_flags) { (*frag)->fr_csum += csum; - else if ((*frag)->fr_csum_flags != 0) + } else if ((*frag)->fr_csum_flags != 0) { (*frag)->fr_csum_flags = 0; + } /* * Find a fragment after the current one: * - off contains the real shifted offset. */ LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) { - if (FR_IP_OFF(frea) > off) + if (FR_IP_OFF(frea) > off) { break; + } frep = frea; } @@ -638,12 +650,13 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, if (frep != NULL && FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4 > off) { - u_int16_t precut; + u_int16_t precut; precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4 - off; - if (precut >= ip_len) + if (precut >= ip_len) { goto drop_fragment; + } m_adj(frent->fr_m, precut); DPFPRINTF(("overlap -%d\n", precut)); /* Enforce 8 byte boundaries */ @@ -655,7 +668,7 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, for (; frea != NULL && ip_len + off > FR_IP_OFF(frea); frea = next) { - u_int16_t aftercut; + u_int16_t aftercut; aftercut = ip_len + off - FR_IP_OFF(frea); DPFPRINTF(("adjust overlap %d\n", aftercut)); @@ -679,20 +692,24 @@ pf_reassemble(struct mbuf *m0, struct pf_fragment **frag, insert: /* Update maximum data size */ - if ((*frag)->fr_max < fr_max) + if ((*frag)->fr_max < fr_max) { (*frag)->fr_max = fr_max; + } /* This is the last segment */ - if (!mff) + if (!mff) { (*frag)->fr_flags |= PFFRAG_SEENLAST; + } - if (frep == NULL) + if (frep == NULL) { LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next); - else + } else { LIST_INSERT_AFTER(frep, frent, fr_next); + } /* Check if we are completely reassembled */ - if (!((*frag)->fr_flags & PFFRAG_SEENLAST)) - return (NULL); + if (!((*frag)->fr_flags & PFFRAG_SEENLAST)) { + return NULL; + } /* Check if we have all the data */ off = 0; @@ -705,12 +722,13 @@ insert: DPFPRINTF(("missing fragment at %d, next %d, max %d\n", off, next == NULL ? -1 : FR_IP_OFF(next), (*frag)->fr_max)); - return (NULL); + return NULL; } } DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max)); - if (off < (*frag)->fr_max) - return (NULL); + if (off < (*frag)->fr_max) { + return NULL; + } /* We have all the data */ frent = LIST_FIRST(&(*frag)->fr_queue); @@ -719,7 +737,7 @@ insert: DPFPRINTF(("drop: too big: %d\n", off)); pf_free_fragment(*frag); *frag = NULL; - return (NULL); + return NULL; } next = LIST_NEXT(frent, fr_next); @@ -749,7 +767,7 @@ insert: ADDCARRY(csum); m->m_pkthdr.csum_rx_val = csum; - m->m_pkthdr.csum_rx_start = sizeof (struct ip); + m->m_pkthdr.csum_rx_start = sizeof(struct ip); m->m_pkthdr.csum_flags = (*frag)->fr_csum_flags; } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { @@ -774,33 +792,34 @@ insert: /* XXX this should be done elsewhere */ if (m->m_flags & M_PKTHDR) { int plen = 0; - for (m2 = m; m2; m2 = m2->m_next) + for (m2 = m; m2; m2 = m2->m_next) { plen += m2->m_len; + } m->m_pkthdr.len = plen; } DPFPRINTF(("complete: 0x%llx(%d)\n", (uint64_t)VM_KERNEL_ADDRPERM(m), ntohs(ip->ip_len))); - return (m); + return m; drop_fragment: /* Oops - fail safe - drop packet */ pool_put(&pf_frent_pl, frent); pf_nfrents--; m_freem(m); - return (NULL); + return NULL; } static struct mbuf * pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, int drop, int *nomem) { - struct mbuf *m = *m0; - struct pf_frcache *frp, *fra, *cur = NULL; - int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2); - u_int16_t off = ntohs(h->ip_off) << 3; - u_int16_t fr_max = ip_len + off; - int hosed = 0; + struct mbuf *m = *m0; + struct pf_frcache *frp, *fra, *cur = NULL; + int ip_len = ntohs(h->ip_len) - (h->ip_hl << 2); + u_int16_t off = ntohs(h->ip_off) << 3; + u_int16_t fr_max = ip_len + off; + int hosed = 0; VERIFY(*frag == NULL || !BUFFER_FRAGMENTS(*frag)); @@ -810,8 +829,9 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, if (*frag == NULL) { pf_flush_fragments(); *frag = pool_get(&pf_cache_pl, PR_NOWAIT); - if (*frag == NULL) + if (*frag == NULL) { goto no_mem; + } } /* Get an entry for the queue */ @@ -852,15 +872,16 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, */ frp = NULL; LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) { - if (fra->fr_off > off) + if (fra->fr_off > off) { break; + } frp = fra; } VERIFY(frp != NULL || fra != NULL); if (frp != NULL) { - int precut; + int precut; precut = frp->fr_end - off; if (precut >= ip_len) { @@ -902,8 +923,9 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, * I'll pull a rabbit out of my laptop. */ *m0 = m_copym(m, 0, h->ip_hl << 2, M_NOWAIT); - if (*m0 == NULL) + if (*m0 == NULL) { goto no_mem; + } VERIFY((*m0)->m_next == NULL); m_adj(m, precut + (h->ip_hl << 2)); m_cat(*m0, m); @@ -911,8 +933,9 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, if (m->m_flags & M_PKTHDR) { int plen = 0; struct mbuf *t; - for (t = m; t; t = t->m_next) + for (t = m; t; t = t->m_next) { plen += t->m_len; + } m->m_pkthdr.len = plen; } @@ -936,8 +959,9 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, fr_max)); cur = pool_get(&pf_cent_pl, PR_NOWAIT); - if (cur == NULL) + if (cur == NULL) { goto no_mem; + } pf_ncache++; cur->fr_off = off; @@ -947,8 +971,8 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, } if (fra != NULL) { - int aftercut; - int merge = 0; + int aftercut; + int merge = 0; aftercut = fr_max - fra->fr_off; if (aftercut == 0) { @@ -972,8 +996,9 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, if (m->m_flags & M_PKTHDR) { int plen = 0; struct mbuf *t; - for (t = m; t; t = t->m_next) + for (t = m; t; t = t->m_next) { plen += t->m_len; + } m->m_pkthdr.len = plen; } h = mtod(m, struct ip *); @@ -990,8 +1015,9 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, fra->fr_end)); cur = pool_get(&pf_cent_pl, PR_NOWAIT); - if (cur == NULL) + if (cur == NULL) { goto no_mem; + } pf_ncache++; cur->fr_off = off; @@ -1013,7 +1039,6 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, pool_put(&pf_cent_pl, cur); pf_ncache--; cur = NULL; - } else if (frp && fra->fr_off <= frp->fr_end) { /* Need to merge in a modified 'frp' */ VERIFY(cur == NULL); @@ -1026,7 +1051,6 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, pool_put(&pf_cent_pl, frp); pf_ncache--; frp = NULL; - } } } @@ -1043,12 +1067,14 @@ pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment **frag, int mff, pass: /* Update maximum data size */ - if ((*frag)->fr_max < fr_max) + if ((*frag)->fr_max < fr_max) { (*frag)->fr_max = fr_max; + } /* This is the last segment */ - if (!mff) + if (!mff) { (*frag)->fr_flags |= PFFRAG_SEENLAST; + } /* Check if we are completely reassembled */ if (((*frag)->fr_flags & PFFRAG_SEENLAST) && @@ -1061,34 +1087,37 @@ pass: *frag = NULL; } - return (m); + return m; no_mem: *nomem = 1; /* Still need to pay attention to !IP_MF */ - if (!mff && *frag != NULL) + if (!mff && *frag != NULL) { (*frag)->fr_flags |= PFFRAG_SEENLAST; + } m_freem(m); - return (NULL); + return NULL; drop_fragment: /* Still need to pay attention to !IP_MF */ - if (!mff && *frag != NULL) + if (!mff && *frag != NULL) { (*frag)->fr_flags |= PFFRAG_SEENLAST; + } if (drop) { /* This fragment has been deemed bad. Don't reass */ - if (((*frag)->fr_flags & PFFRAG_DROP) == 0) + if (((*frag)->fr_flags & PFFRAG_DROP) == 0) { DPFPRINTF(("fragcache[%d]: dropping overall fragment\n", h->ip_id)); + } (*frag)->fr_flags |= PFFRAG_DROP; } m_freem(m); - return (NULL); + return NULL; } #define FR_IP6_OFF(fr) \ @@ -1104,7 +1133,7 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, struct ip6_frag *ip6f; int plen, off, fr_max; uint32_t uoff, csum, csum_flags; - + VERIFY(*frag == NULL || BUFFER_FRAGMENTS(*frag)); m = *m0; frep = NULL; @@ -1128,12 +1157,12 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, * care of any trailing bytes and subtract out their partial sum. */ if (ip6f->ip6f_nxt == IPPROTO_UDP && - uoff == (sizeof (*ip6) + sizeof (*ip6f)) && + uoff == (sizeof(*ip6) + sizeof(*ip6f)) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PARTIAL)) { uint32_t start = m->m_pkthdr.csum_rx_start; - uint32_t ip_len = (sizeof (*ip6) + ntohs(ip6->ip6_plen)); + uint32_t ip_len = (sizeof(*ip6) + ntohs(ip6->ip6_plen)); int32_t trailer = (m_pktlen(m) - ip_len); uint32_t swbytes = (uint32_t)trailer; @@ -1145,7 +1174,7 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { s = ip6->ip6_src.s6_addr16[1]; - ip6->ip6_src.s6_addr16[1] = 0 ; + ip6->ip6_src.s6_addr16[1] = 0; } if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { d = ip6->ip6_dst.s6_addr16[1]; @@ -1155,23 +1184,27 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, /* callee folds in sum */ csum = m_adj_sum16(m, start, uoff, (ip_len - uoff), csum); - if (uoff > start) + if (uoff > start) { swbytes += (uoff - start); - else + } else { swbytes += (start - uoff); + } - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = s; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = d; - + } } csum_flags = m->m_pkthdr.csum_flags; - if (swbytes != 0) + if (swbytes != 0) { udp_in6_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } else { csum = 0; csum_flags = 0; @@ -1179,21 +1212,22 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, /* Invalidate checksum */ m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID; - + /* strip off headers up to the fragment payload */ m->m_data += frent->fr_ip6f_hlen; m->m_len -= frent->fr_ip6f_hlen; - + /* Create a new reassembly queue for this packet */ if (*frag == NULL) { *frag = pool_get(&pf_frag_pl, PR_NOWAIT); if (*frag == NULL) { pf_flush_fragments(); *frag = pool_get(&pf_frag_pl, PR_NOWAIT); - if (*frag == NULL) + if (*frag == NULL) { goto drop_fragment; + } } - + (*frag)->fr_flags = 0; (*frag)->fr_max = 0; (*frag)->fr_af = AF_INET6; @@ -1207,10 +1241,10 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, (*frag)->fr_csum = csum; } LIST_INIT(&(*frag)->fr_queue); - + RB_INSERT(pf_frag_tree, &pf_frag_tree, *frag); TAILQ_INSERT_HEAD(&pf_fragqueue, *frag, frag_next); - + /* We do not have a previous fragment */ frep = NULL; goto insert; @@ -1221,32 +1255,34 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, * as that of the existing ones, accumulate checksum. Otherwise, * invalidate checksum offload info for the entire datagram. */ - if (csum_flags != 0 && csum_flags == (*frag)->fr_csum_flags) + if (csum_flags != 0 && csum_flags == (*frag)->fr_csum_flags) { (*frag)->fr_csum += csum; - else if ((*frag)->fr_csum_flags != 0) + } else if ((*frag)->fr_csum_flags != 0) { (*frag)->fr_csum_flags = 0; - + } + /* * Find a fragment after the current one: * - off contains the real shifted offset. */ LIST_FOREACH(frea, &(*frag)->fr_queue, fr_next) { - if (FR_IP6_OFF(frea) > off) + if (FR_IP6_OFF(frea) > off) { break; + } frep = frea; } - + VERIFY(frep != NULL || frea != NULL); - + if (frep != NULL && - FR_IP6_OFF(frep) + FR_IP6_PLEN(frep) - frep->fr_ip6f_hlen > off) - { + FR_IP6_OFF(frep) + FR_IP6_PLEN(frep) - frep->fr_ip6f_hlen > off) { u_int16_t precut; - + precut = FR_IP6_OFF(frep) + FR_IP6_PLEN(frep) - frep->fr_ip6f_hlen - off; - if (precut >= plen) + if (precut >= plen) { goto drop_fragment; + } m_adj(frent->fr_m, precut); DPFPRINTF(("overlap -%d\n", precut)); /* Enforce 8 byte boundaries */ @@ -1257,22 +1293,22 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, plen -= precut; ip6->ip6_plen = htons(plen); } - + for (; frea != NULL && plen + off > FR_IP6_OFF(frea); frea = next) { - u_int16_t aftercut; - + u_int16_t aftercut; + aftercut = plen + off - FR_IP6_OFF(frea); DPFPRINTF(("adjust overlap %d\n", aftercut)); if (aftercut < FR_IP6_PLEN(frea) - frea->fr_ip6f_hlen) { frea->fr_ip6->ip6_plen = htons(FR_IP6_PLEN(frea) - - aftercut); + aftercut); frea->fr_ip6f_opt.ip6f_offlg = htons(ntohs(frea->fr_ip6f_opt.ip6f_offlg) + (aftercut >> 3)); m_adj(frea->fr_m, aftercut); break; } - + /* This fragment is completely overlapped, lose it */ next = LIST_NEXT(frea, fr_next); m_freem(frea->fr_m); @@ -1280,44 +1316,49 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, pool_put(&pf_frent_pl, frea); pf_nfrents--; } - - insert: + +insert: /* Update maximum data size */ - if ((*frag)->fr_max < fr_max) + if ((*frag)->fr_max < fr_max) { (*frag)->fr_max = fr_max; + } /* This is the last segment */ - if (!mff) + if (!mff) { (*frag)->fr_flags |= PFFRAG_SEENLAST; - - if (frep == NULL) + } + + if (frep == NULL) { LIST_INSERT_HEAD(&(*frag)->fr_queue, frent, fr_next); - else + } else { LIST_INSERT_AFTER(frep, frent, fr_next); - + } + /* Check if we are completely reassembled */ - if (!((*frag)->fr_flags & PFFRAG_SEENLAST)) - return (NULL); - + if (!((*frag)->fr_flags & PFFRAG_SEENLAST)) { + return NULL; + } + /* Check if we have all the data */ off = 0; for (frep = LIST_FIRST(&(*frag)->fr_queue); frep; frep = next) { next = LIST_NEXT(frep, fr_next); off += FR_IP6_PLEN(frep) - (frent->fr_ip6f_hlen - sizeof *ip6); DPFPRINTF(("frep at %d, next %d, max %d\n", - off, next == NULL ? -1 : FR_IP6_OFF(next), - (*frag)->fr_max)); + off, next == NULL ? -1 : FR_IP6_OFF(next), + (*frag)->fr_max)); if (off < (*frag)->fr_max && (next == NULL || FR_IP6_OFF(next) != off)) { DPFPRINTF(("missing fragment at %d, next %d, max %d\n", off, next == NULL ? -1 : FR_IP6_OFF(next), (*frag)->fr_max)); - return (NULL); + return NULL; } } DPFPRINTF(("%d < %d?\n", off, (*frag)->fr_max)); - if (off < (*frag)->fr_max) - return (NULL); - + if (off < (*frag)->fr_max) { + return NULL; + } + /* We have all the data */ frent = LIST_FIRST(&(*frag)->fr_queue); VERIFY(frent != NULL); @@ -1325,9 +1366,9 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, DPFPRINTF(("drop: too big: %d\n", off)); pf_free_fragment(*frag); *frag = NULL; - return (NULL); + return NULL; } - + ip6 = frent->fr_ip6; ip6->ip6_nxt = (*frag)->fr_p; ip6->ip6_plen = htons(off); @@ -1340,7 +1381,7 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, ADDCARRY(csum); m->m_pkthdr.csum_rx_val = csum; - m->m_pkthdr.csum_rx_start = sizeof (struct ip6_hdr); + m->m_pkthdr.csum_rx_start = sizeof(struct ip6_hdr); m->m_pkthdr.csum_flags = (*frag)->fr_csum_flags; } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { @@ -1349,16 +1390,16 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, m->m_pkthdr.csum_flags &= ~CSUM_PARTIAL; m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } - + /* Remove from fragment queue */ pf_remove_fragment(*frag); *frag = NULL; - + m = frent->fr_m; m->m_len += sizeof(struct ip6_hdr); m->m_data -= sizeof(struct ip6_hdr); memmove(m->m_data, ip6, sizeof(struct ip6_hdr)); - + next = LIST_NEXT(frent, fr_next); pool_put(&pf_frent_pl, frent); pf_nfrents--; @@ -1370,22 +1411,23 @@ pf_reassemble6(struct mbuf **m0, struct pf_fragment **frag, pool_put(&pf_frent_pl, frent); pf_nfrents--; } - + /* XXX this should be done elsewhere */ if (m->m_flags & M_PKTHDR) { int pktlen = 0; - for (m2 = m; m2; m2 = m2->m_next) + for (m2 = m; m2; m2 = m2->m_next) { pktlen += m2->m_len; + } m->m_pkthdr.len = pktlen; } - + DPFPRINTF(("complete: 0x%llx ip6_plen %d m_pkthdr.len %d\n", (uint64_t)VM_KERNEL_ADDRPERM(m), ntohs(ip6->ip6_plen), m->m_pkthdr.len)); return m; - - drop_fragment: + +drop_fragment: /* Oops - fail safe - drop packet */ pool_put(&pf_frent_pl, frent); --pf_nfrents; @@ -1401,7 +1443,7 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, u_int16_t plen, off, fr_max; struct pf_frcache *frp, *fra, *cur = NULL; int hosed = 0; - + VERIFY(*frag == NULL || !BUFFER_FRAGMENTS(*frag)); m = *m0; off = ntohs(fh->ip6f_offlg & IP6F_OFF_MASK); @@ -1409,7 +1451,7 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, /* * Apple Modification: dimambro@apple.com. The hlen, being passed - * into this function Includes all the headers associated with + * into this function Includes all the headers associated with * the packet, and may include routing headers, so to get to * the data payload as stored in the original IPv6 header we need * to subtract al those headers and the IP header. @@ -1425,7 +1467,7 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, * of the IPv6 packet to the beginning of the data. */ fr_max = off + plen; - + DPFPRINTF(("0x%llx plen %u off %u fr_max %u\n", (uint64_t)VM_KERNEL_ADDRPERM(m), plen, off, fr_max)); @@ -1435,10 +1477,11 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, if (*frag == NULL) { pf_flush_fragments(); *frag = pool_get(&pf_cache_pl, PR_NOWAIT); - if (*frag == NULL) + if (*frag == NULL) { goto no_mem; + } } - + /* Get an entry for the queue */ cur = pool_get(&pf_cent_pl, PR_NOWAIT); if (cur == NULL) { @@ -1447,7 +1490,7 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, goto no_mem; } pf_ncache++; - + (*frag)->fr_flags = PFFRAG_NOBUFFER; (*frag)->fr_max = 0; (*frag)->fr_af = AF_INET6; @@ -1456,37 +1499,38 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, (*frag)->fr_p = fh->ip6f_nxt; (*frag)->fr_id6 = fh->ip6f_ident; (*frag)->fr_timeout = pf_time_second(); - + cur->fr_off = off; cur->fr_end = fr_max; LIST_INIT(&(*frag)->fr_cache); LIST_INSERT_HEAD(&(*frag)->fr_cache, cur, fr_next); - + RB_INSERT(pf_frag_tree, &pf_cache_tree, *frag); TAILQ_INSERT_HEAD(&pf_cachequeue, *frag, frag_next); - + DPFPRINTF(("frag6cache[%d]: new %d-%d\n", ntohl(fh->ip6f_ident), off, fr_max)); - + goto pass; } - + /* * Find a fragment after the current one: * - off contains the real shifted offset. */ frp = NULL; LIST_FOREACH(fra, &(*frag)->fr_cache, fr_next) { - if (fra->fr_off > off) + if (fra->fr_off > off) { break; + } frp = fra; } - + VERIFY(frp != NULL || fra != NULL); - + if (frp != NULL) { int precut; - + precut = frp->fr_end - off; if (precut >= plen) { /* Fragment is entirely a duplicate */ @@ -1508,16 +1552,16 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, * But to do so easily, we need to create another * mbuf to throw the original header into. */ - + DPFPRINTF(("frag6cache[%u]: chop %d (%d-%d) %d-%d\n", ntohl(fh->ip6f_ident), precut, frp->fr_off, frp->fr_end, off, fr_max)); - + off += precut; fr_max -= precut; /* Update the previous frag to encompass this one */ frp->fr_end = fr_max; - + if (!drop) { /* XXX Optimization opportunity * This is a very heavy way to trim the payload. @@ -1527,8 +1571,9 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, * I'll pull a rabbit out of my laptop. */ *m0 = m_copym(m, 0, hlen, M_NOWAIT); - if (*m0 == NULL) + if (*m0 == NULL) { goto no_mem; + } VERIFY((*m0)->m_next == NULL); m_adj(m, precut + hlen); m_cat(*m0, m); @@ -1536,13 +1581,14 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, if (m->m_flags & M_PKTHDR) { int pktlen = 0; struct mbuf *t; - for (t = m; t; t = t->m_next) + for (t = m; t; t = t->m_next) { pktlen += t->m_len; + } m->m_pkthdr.len = pktlen; } - + h = mtod(m, struct ip6_hdr *); - + VERIFY((int)m->m_len == ntohs(h->ip6_plen) - precut); fh->ip6f_offlg &= ~IP6F_OFF_MASK; @@ -1556,26 +1602,27 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, } } else { /* There is a gap between fragments */ - + DPFPRINTF(("frag6cache[%u]: gap %d (%d-%d) %d-%d\n", ntohl(fh->ip6f_ident), -precut, frp->fr_off, frp->fr_end, off, fr_max)); - + cur = pool_get(&pf_cent_pl, PR_NOWAIT); - if (cur == NULL) + if (cur == NULL) { goto no_mem; + } pf_ncache++; - + cur->fr_off = off; cur->fr_end = fr_max; LIST_INSERT_AFTER(frp, cur, fr_next); } } - + if (fra != NULL) { - int aftercut; - int merge = 0; - + int aftercut; + int merge = 0; + aftercut = fr_max - fra->fr_off; if (aftercut == 0) { /* Adjacent fragments */ @@ -1591,16 +1638,17 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, fra->fr_off, fra->fr_end)); fra->fr_off = off; fr_max -= aftercut; - + merge = 1; - + if (!drop) { m_adj(m, -aftercut); if (m->m_flags & M_PKTHDR) { int pktlen = 0; struct mbuf *t; - for (t = m; t; t = t->m_next) + for (t = m; t; t = t->m_next) { pktlen += t->m_len; + } m->m_pkthdr.len = pktlen; } h = mtod(m, struct ip6_hdr *); @@ -1616,17 +1664,18 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, DPFPRINTF(("frag6cache[%u]: gap %d %d-%d (%d-%d)\n", ntohl(fh->ip6f_ident), -aftercut, off, fr_max, fra->fr_off, fra->fr_end)); - + cur = pool_get(&pf_cent_pl, PR_NOWAIT); - if (cur == NULL) + if (cur == NULL) { goto no_mem; + } pf_ncache++; - + cur->fr_off = off; cur->fr_end = fr_max; LIST_INSERT_BEFORE(fra, cur, fr_next); } - + /* Need to glue together two separate fragment descriptors */ if (merge) { if (cur && fra->fr_off <= cur->fr_end) { @@ -1657,7 +1706,7 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, } } } - + if (hosed) { /* * We must keep tracking the overall fragment even when @@ -1666,16 +1715,18 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, */ goto drop_fragment; } - - pass: + +pass: /* Update maximum data size */ - if ((*frag)->fr_max < fr_max) + if ((*frag)->fr_max < fr_max) { (*frag)->fr_max = fr_max; - + } + /* This is the last segment */ - if (!mff) + if (!mff) { (*frag)->fr_flags |= PFFRAG_SEENLAST; - + } + /* Check if we are completely reassembled */ if (((*frag)->fr_flags & PFFRAG_SEENLAST) && LIST_FIRST(&(*frag)->fr_cache)->fr_off == 0 && @@ -1686,100 +1737,107 @@ pf_frag6cache(struct mbuf **m0, struct ip6_hdr *h, struct ip6_frag *fh, pf_free_fragment(*frag); *frag = NULL; } - - return (m); - - no_mem: + + return m; + +no_mem: *nomem = 1; - + /* Still need to pay attention to !IP_MF */ - if (!mff && *frag != NULL) + if (!mff && *frag != NULL) { (*frag)->fr_flags |= PFFRAG_SEENLAST; - + } + m_freem(m); - return (NULL); - - drop_fragment: - + return NULL; + +drop_fragment: + /* Still need to pay attention to !IP_MF */ - if (!mff && *frag != NULL) + if (!mff && *frag != NULL) { (*frag)->fr_flags |= PFFRAG_SEENLAST; - + } + if (drop) { /* This fragment has been deemed bad. Don't reass */ - if (((*frag)->fr_flags & PFFRAG_DROP) == 0) + if (((*frag)->fr_flags & PFFRAG_DROP) == 0) { DPFPRINTF(("frag6cache[%u]: dropping overall fragment\n", ntohl(fh->ip6f_ident))); + } (*frag)->fr_flags |= PFFRAG_DROP; } - + m_freem(m); - return (NULL); + return NULL; } int pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, struct pf_pdesc *pd) { - struct mbuf *m; - struct pf_rule *r; - struct pf_frent *frent; - struct pf_fragment *frag = NULL; - struct ip *h = pbuf->pb_data; - int mff = (ntohs(h->ip_off) & IP_MF); - int hlen = h->ip_hl << 2; - u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; - u_int16_t fr_max; - int ip_len; - int ip_off; - int asd = 0; - struct pf_ruleset *ruleset = NULL; - struct ifnet *ifp = pbuf->pb_ifp; + struct mbuf *m; + struct pf_rule *r; + struct pf_frent *frent; + struct pf_fragment *frag = NULL; + struct ip *h = pbuf->pb_data; + int mff = (ntohs(h->ip_off) & IP_MF); + int hlen = h->ip_hl << 2; + u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3; + u_int16_t fr_max; + int ip_len; + int ip_off; + int asd = 0; + struct pf_ruleset *ruleset = NULL; + struct ifnet *ifp = pbuf->pb_ifp; r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); while (r != NULL) { r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != dir) + } else if (r->direction && r->direction != dir) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != AF_INET) + } else if (r->af && r->af != AF_INET) { r = r->skip[PF_SKIP_AF].ptr; - else if (r->proto && r->proto != h->ip_p) + } else if (r->proto && r->proto != h->ip_p) { r = r->skip[PF_SKIP_PROTO].ptr; - else if (PF_MISMATCHAW(&r->src.addr, + } else if (PF_MISMATCHAW(&r->src.addr, (struct pf_addr *)&h->ip_src.s_addr, AF_INET, - r->src.neg, kif)) + r->src.neg, kif)) { r = r->skip[PF_SKIP_SRC_ADDR].ptr; - else if (PF_MISMATCHAW(&r->dst.addr, + } else if (PF_MISMATCHAW(&r->dst.addr, (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, - r->dst.neg, NULL)) + r->dst.neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; - else { - if (r->anchor == NULL) + } else { + if (r->anchor == NULL) { break; - else + } else { pf_step_into_anchor(&asd, &ruleset, PF_RULESET_SCRUB, &r, NULL, NULL); + } } if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - PF_RULESET_SCRUB, &r, NULL, NULL)) + PF_RULESET_SCRUB, &r, NULL, NULL)) { break; + } } - if (r == NULL || r->action == PF_NOSCRUB) - return (PF_PASS); - else { + if (r == NULL || r->action == PF_NOSCRUB) { + return PF_PASS; + } else { r->packets[dir == PF_OUT]++; r->bytes[dir == PF_OUT] += pd->tot_len; } /* Check for illegal packets */ - if (hlen < (int)sizeof (struct ip)) + if (hlen < (int)sizeof(struct ip)) { goto drop; + } - if (hlen > ntohs(h->ip_len)) + if (hlen > ntohs(h->ip_len)) { goto drop; + } /* Clear IP_DF if the rule uses the no-df option */ if (r->rule_flag & PFRULE_NODF && h->ip_off & htons(IP_DF)) { @@ -1790,8 +1848,9 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, } /* We will need other tests here */ - if (!fragoff && !mff) + if (!fragoff && !mff) { goto no_fragment; + } /* * We're dealing with a fragment now. Don't allow fragments @@ -1819,18 +1878,19 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, } fr_max = fragoff + ip_len; - if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) { + if ((r->rule_flag & (PFRULE_FRAGCROP | PFRULE_FRAGDROP)) == 0) { /* Fully buffer all of the fragments */ frag = pf_find_fragment_by_ipv4_header(h, &pf_frag_tree); /* Check if we saw the last fragment already */ if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) && - fr_max > frag->fr_max) + fr_max > frag->fr_max) { goto bad; + } if ((m = pbuf_to_mbuf(pbuf, TRUE)) == NULL) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } VERIFY(!pbuf_is_valid(pbuf)); @@ -1843,7 +1903,7 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, if (frent == NULL) { REASON_SET(reason, PFRES_MEMORY); m_freem(m); - return (PF_DROP); + return PF_DROP; } pf_nfrents++; frent->fr_ip = h; @@ -1854,8 +1914,9 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, fragoff, fr_max)); m = pf_reassemble(m, &frag, frent, mff); - if (m == NULL) - return (PF_DROP); + if (m == NULL) { + return PF_DROP; + } VERIFY(m->m_flags & M_PKTHDR); pbuf_init_mbuf(pbuf, m, ifp); @@ -1878,11 +1939,12 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, h = mtod(m, struct ip *); - if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) + if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) { goto drop; + } } else { /* non-buffering fragment cache (drops or masks overlaps) */ - int nomem = 0; + int nomem = 0; if (dir == PF_OUT && (pd->pf_mtag->pftag_flags & PF_TAG_FRAGCACHE)) { /* @@ -1898,8 +1960,9 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, /* Check if we saw the last fragment already */ if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) && fr_max > frag->fr_max) { - if (r->rule_flag & PFRULE_FRAGDROP) + if (r->rule_flag & PFRULE_FRAGDROP) { frag->fr_flags |= PFFRAG_DROP; + } goto bad; } @@ -1917,8 +1980,9 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem); if (m == NULL) { // Note: pf_fragcache() has already m_freem'd the mbuf - if (nomem) + if (nomem) { goto no_mem; + } goto drop; } @@ -1940,11 +2004,13 @@ pf_normalize_ip(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, } #endif #endif - if (dir == PF_IN) + if (dir == PF_IN) { pd->pf_mtag->pftag_flags |= PF_TAG_FRAGCACHE; + } - if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) + if (frag != NULL && (frag->fr_flags & PFFRAG_DROP)) { goto drop; + } goto fragment_pass; } @@ -1975,10 +2041,11 @@ no_fragment: } h->ip_sum = pf_cksum_fixup(h->ip_sum, oip_id, h->ip_id, 0); } - if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) + if ((r->rule_flag & (PFRULE_FRAGCROP | PFRULE_FRAGDROP)) == 0) { pd->flags |= PFDESC_IP_REAS; + } - return (PF_PASS); + return PF_PASS; fragment_pass: /* Enforce a minimum ttl, may cause endless packet loops */ @@ -1988,36 +2055,41 @@ fragment_pass: h->ip_ttl = r->min_ttl; h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0); } - if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) + if ((r->rule_flag & (PFRULE_FRAGCROP | PFRULE_FRAGDROP)) == 0) { pd->flags |= PFDESC_IP_REAS; - return (PF_PASS); + } + return PF_PASS; no_mem: REASON_SET(reason, PFRES_MEMORY); - if (r != NULL && r->log && pbuf_is_valid(pbuf)) + if (r != NULL && r->log && pbuf_is_valid(pbuf)) { PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, *reason, r, NULL, NULL, pd); - return (PF_DROP); + } + return PF_DROP; drop: REASON_SET(reason, PFRES_NORM); - if (r != NULL && r->log && pbuf_is_valid(pbuf)) + if (r != NULL && r->log && pbuf_is_valid(pbuf)) { PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, *reason, r, NULL, NULL, pd); - return (PF_DROP); + } + return PF_DROP; bad: DPFPRINTF(("dropping bad IPv4 fragment\n")); /* Free associated fragments */ - if (frag != NULL) + if (frag != NULL) { pf_free_fragment(frag); + } REASON_SET(reason, PFRES_FRAG); - if (r != NULL && r->log && pbuf_is_valid(pbuf)) + if (r != NULL && r->log && pbuf_is_valid(pbuf)) { PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, *reason, r, NULL, NULL, pd); + } - return (PF_DROP); + return PF_DROP; } #if INET6 @@ -2025,77 +2097,82 @@ int pf_normalize_ip6(pbuf_t *pbuf, int dir, struct pfi_kif *kif, u_short *reason, struct pf_pdesc *pd) { - struct mbuf *m; - struct pf_rule *r; - struct ip6_hdr *h = pbuf->pb_data; - int off; - struct ip6_ext ext; + struct mbuf *m; + struct pf_rule *r; + struct ip6_hdr *h = pbuf->pb_data; + int off; + struct ip6_ext ext; /* adi XXX */ #if 0 - struct ip6_opt opt; - struct ip6_opt_jumbo jumbo; - int optend; - int ooff; + struct ip6_opt opt; + struct ip6_opt_jumbo jumbo; + int optend; + int ooff; #endif - struct ip6_frag frag; - u_int32_t jumbolen = 0, plen; - u_int16_t fragoff = 0; - u_int8_t proto; - int terminal; - struct pf_frent *frent; - struct pf_fragment *pff = NULL; - int mff = 0, rh_cnt = 0; - u_int16_t fr_max; - int asd = 0; - struct pf_ruleset *ruleset = NULL; - struct ifnet *ifp = pbuf->pb_ifp; + struct ip6_frag frag; + u_int32_t jumbolen = 0, plen; + u_int16_t fragoff = 0; + u_int8_t proto; + int terminal; + struct pf_frent *frent; + struct pf_fragment *pff = NULL; + int mff = 0, rh_cnt = 0; + u_int16_t fr_max; + int asd = 0; + struct pf_ruleset *ruleset = NULL; + struct ifnet *ifp = pbuf->pb_ifp; r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); while (r != NULL) { r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != dir) + } else if (r->direction && r->direction != dir) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != AF_INET6) + } else if (r->af && r->af != AF_INET6) { r = r->skip[PF_SKIP_AF].ptr; + } #if 0 /* header chain! */ - else if (r->proto && r->proto != h->ip6_nxt) + else if (r->proto && r->proto != h->ip6_nxt) { r = r->skip[PF_SKIP_PROTO].ptr; + } #endif else if (PF_MISMATCHAW(&r->src.addr, (struct pf_addr *)(uintptr_t)&h->ip6_src, AF_INET6, - r->src.neg, kif)) + r->src.neg, kif)) { r = r->skip[PF_SKIP_SRC_ADDR].ptr; - else if (PF_MISMATCHAW(&r->dst.addr, + } else if (PF_MISMATCHAW(&r->dst.addr, (struct pf_addr *)(uintptr_t)&h->ip6_dst, AF_INET6, - r->dst.neg, NULL)) + r->dst.neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; - else { - if (r->anchor == NULL) + } else { + if (r->anchor == NULL) { break; - else + } else { pf_step_into_anchor(&asd, &ruleset, PF_RULESET_SCRUB, &r, NULL, NULL); + } } if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - PF_RULESET_SCRUB, &r, NULL, NULL)) + PF_RULESET_SCRUB, &r, NULL, NULL)) { break; + } } - if (r == NULL || r->action == PF_NOSCRUB) - return (PF_PASS); - else { + if (r == NULL || r->action == PF_NOSCRUB) { + return PF_PASS; + } else { r->packets[dir == PF_OUT]++; r->bytes[dir == PF_OUT] += pd->tot_len; } /* Check for illegal packets */ - if ((uint32_t)(sizeof (struct ip6_hdr) + IPV6_MAXPACKET) < - pbuf->pb_packet_len) + if ((uint32_t)(sizeof(struct ip6_hdr) + IPV6_MAXPACKET) < + pbuf->pb_packet_len) { goto drop; + } - off = sizeof (struct ip6_hdr); + off = sizeof(struct ip6_hdr); proto = h->ip6_nxt; terminal = 0; do { @@ -2106,9 +2183,10 @@ pf_normalize_ip6(pbuf_t *pbuf, int dir, struct pfi_kif *kif, case IPPROTO_AH: case IPPROTO_ROUTING: case IPPROTO_DSTOPTS: - if (!pf_pull_hdr(pbuf, off, &ext, sizeof (ext), NULL, - NULL, AF_INET6)) + if (!pf_pull_hdr(pbuf, off, &ext, sizeof(ext), NULL, + NULL, AF_INET6)) { goto shortpkt; + } /* * * Multiple routing headers not allowed. @@ -2117,61 +2195,70 @@ pf_normalize_ip6(pbuf_t *pbuf, int dir, struct pfi_kif *kif, if (proto == IPPROTO_ROUTING) { const struct ip6_rthdr *rh = (const struct ip6_rthdr *)&ext; - if (rh_cnt++) + if (rh_cnt++) { goto drop; - if (rh->ip6r_type == IPV6_RTHDR_TYPE_0) + } + if (rh->ip6r_type == IPV6_RTHDR_TYPE_0) { goto drop; - } - else - if (proto == IPPROTO_AH) + } + } else if (proto == IPPROTO_AH) { off += (ext.ip6e_len + 2) * 4; - else + } else { off += (ext.ip6e_len + 1) * 8; + } proto = ext.ip6e_nxt; break; case IPPROTO_HOPOPTS: /* adi XXX */ #if 0 - if (!pf_pull_hdr(m, off, &ext, sizeof (ext), NULL, - NULL, AF_INET6)) + if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL, + NULL, AF_INET6)) { goto shortpkt; + } optend = off + (ext.ip6e_len + 1) * 8; - ooff = off + sizeof (ext); + ooff = off + sizeof(ext); do { if (!pf_pull_hdr(m, ooff, &opt.ip6o_type, - sizeof (opt.ip6o_type), NULL, NULL, - AF_INET6)) + sizeof(opt.ip6o_type), NULL, NULL, + AF_INET6)) { goto shortpkt; + } if (opt.ip6o_type == IP6OPT_PAD1) { ooff++; continue; } - if (!pf_pull_hdr(m, ooff, &opt, sizeof (opt), - NULL, NULL, AF_INET6)) + if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt), + NULL, NULL, AF_INET6)) { goto shortpkt; - if (ooff + sizeof (opt) + opt.ip6o_len > optend) + } + if (ooff + sizeof(opt) + opt.ip6o_len > optend) { goto drop; + } switch (opt.ip6o_type) { case IP6OPT_JUMBO: - if (h->ip6_plen != 0) + if (h->ip6_plen != 0) { goto drop; + } if (!pf_pull_hdr(m, ooff, &jumbo, - sizeof (jumbo), NULL, NULL, - AF_INET6)) + sizeof(jumbo), NULL, NULL, + AF_INET6)) { goto shortpkt; + } memcpy(&jumbolen, jumbo.ip6oj_jumbo_len, - sizeof (jumbolen)); + sizeof(jumbolen)); jumbolen = ntohl(jumbolen); - if (jumbolen <= IPV6_MAXPACKET) + if (jumbolen <= IPV6_MAXPACKET) { goto drop; - if (sizeof (struct ip6_hdr) + - jumbolen != m->m_pkthdr.len) + } + if (sizeof(struct ip6_hdr) + + jumbolen != m->m_pkthdr.len) { goto drop; + } break; default: break; } - ooff += sizeof (opt) + opt.ip6o_len; + ooff += sizeof(opt) + opt.ip6o_len; } while (ooff < optend); off = optend; @@ -2185,58 +2272,66 @@ pf_normalize_ip6(pbuf_t *pbuf, int dir, struct pfi_kif *kif, } while (!terminal); /* jumbo payload option must be present, or plen > 0 */ - if (ntohs(h->ip6_plen) == 0) + if (ntohs(h->ip6_plen) == 0) { plen = jumbolen; - else + } else { plen = ntohs(h->ip6_plen); - if (plen == 0) + } + if (plen == 0) { goto drop; - if ((uint32_t)(sizeof (struct ip6_hdr) + plen) > pbuf->pb_packet_len) + } + if ((uint32_t)(sizeof(struct ip6_hdr) + plen) > pbuf->pb_packet_len) { goto shortpkt; + } /* Enforce a minimum ttl, may cause endless packet loops */ - if (r->min_ttl && h->ip6_hlim < r->min_ttl) + if (r->min_ttl && h->ip6_hlim < r->min_ttl) { h->ip6_hlim = r->min_ttl; + } - return (PF_PASS); + return PF_PASS; fragment: - if (ntohs(h->ip6_plen) == 0 || jumbolen) + if (ntohs(h->ip6_plen) == 0 || jumbolen) { goto drop; + } plen = ntohs(h->ip6_plen); - if (!pf_pull_hdr(pbuf, off, &frag, sizeof (frag), NULL, NULL, AF_INET6)) + if (!pf_pull_hdr(pbuf, off, &frag, sizeof(frag), NULL, NULL, AF_INET6)) { goto shortpkt; + } fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK); pd->proto = frag.ip6f_nxt; mff = ntohs(frag.ip6f_offlg & IP6F_MORE_FRAG); off += sizeof frag; - if (fragoff + (plen - off) > IPV6_MAXPACKET) - goto badfrag; - + if (fragoff + (plen - off) > IPV6_MAXPACKET) { + goto badfrag; + } + fr_max = fragoff + plen - (off - sizeof(struct ip6_hdr)); // XXX SCW: mbuf-specific // DPFPRINTF(("0x%llx IPv6 frag plen %u mff %d off %u fragoff %u " // "fr_max %u\n", (uint64_t)VM_KERNEL_ADDRPERM(m), plen, mff, off, // fragoff, fr_max)); - - if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) { + + if ((r->rule_flag & (PFRULE_FRAGCROP | PFRULE_FRAGDROP)) == 0) { /* Fully buffer all of the fragments */ pd->flags |= PFDESC_IP_REAS; - + pff = pf_find_fragment_by_ipv6_header(h, &frag, - &pf_frag_tree); - + &pf_frag_tree); + /* Check if we saw the last fragment already */ if (pff != NULL && (pff->fr_flags & PFFRAG_SEENLAST) && - fr_max > pff->fr_max) + fr_max > pff->fr_max) { goto badfrag; + } if ((m = pbuf_to_mbuf(pbuf, TRUE)) == NULL) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } - + /* Restore iph pointer after pbuf_to_mbuf() */ h = mtod(m, struct ip6_hdr *); @@ -2244,7 +2339,7 @@ fragment: frent = pool_get(&pf_frent_pl, PR_NOWAIT); if (frent == NULL) { REASON_SET(reason, PFRES_MEMORY); - return (PF_DROP); + return PF_DROP; } pf_nfrents++; @@ -2252,36 +2347,38 @@ fragment: frent->fr_m = m; frent->fr_ip6f_opt = frag; frent->fr_ip6f_hlen = off; - + /* Might return a completely reassembled mbuf, or NULL */ DPFPRINTF(("reass IPv6 frag %d @ %d-%d\n", - ntohl(frag.ip6f_ident), fragoff, fr_max)); + ntohl(frag.ip6f_ident), fragoff, fr_max)); m = pf_reassemble6(&m, &pff, frent, mff); - - if (m == NULL) - return (PF_DROP); + + if (m == NULL) { + return PF_DROP; + } pbuf_init_mbuf(pbuf, m, ifp); h = pbuf->pb_data; - - if (pff != NULL && (pff->fr_flags & PFFRAG_DROP)) + + if (pff != NULL && (pff->fr_flags & PFFRAG_DROP)) { goto drop; - } - else if (dir == PF_IN || !(pd->pf_mtag->pftag_flags & PF_TAG_FRAGCACHE)) { + } + } else if (dir == PF_IN || !(pd->pf_mtag->pftag_flags & PF_TAG_FRAGCACHE)) { /* non-buffering fragment cache (overlaps: see RFC 5722) */ int nomem = 0; - + pff = pf_find_fragment_by_ipv6_header(h, &frag, &pf_cache_tree); - + /* Check if we saw the last fragment already */ if (pff != NULL && (pff->fr_flags & PFFRAG_SEENLAST) && fr_max > pff->fr_max) { - if (r->rule_flag & PFRULE_FRAGDROP) + if (r->rule_flag & PFRULE_FRAGDROP) { pff->fr_flags |= PFFRAG_DROP; - goto badfrag; + } + goto badfrag; } - + if ((m = pbuf_to_mbuf(pbuf, TRUE)) == NULL) { goto no_mem; } @@ -2290,53 +2387,59 @@ fragment: h = mtod(m, struct ip6_hdr *); m = pf_frag6cache(&m, h, &frag, &pff, off, mff, - (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem); + (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem); if (m == NULL) { // Note: pf_frag6cache() has already m_freem'd the mbuf - if (nomem) + if (nomem) { goto no_mem; + } goto drop; } - + pbuf_init_mbuf(pbuf, m, ifp); pd->pf_mtag = pf_find_mtag_pbuf(pbuf); h = pbuf->pb_data; - if (dir == PF_IN) + if (dir == PF_IN) { pd->pf_mtag->pftag_flags |= PF_TAG_FRAGCACHE; - - if (pff != NULL && (pff->fr_flags & PFFRAG_DROP)) + } + + if (pff != NULL && (pff->fr_flags & PFFRAG_DROP)) { goto drop; + } } - + /* Enforce a minimum ttl, may cause endless packet loops */ - if (r->min_ttl && h->ip6_hlim < r->min_ttl) + if (r->min_ttl && h->ip6_hlim < r->min_ttl) { h->ip6_hlim = r->min_ttl; - return (PF_PASS); + } + return PF_PASS; - no_mem: +no_mem: REASON_SET(reason, PFRES_MEMORY); goto dropout; - - shortpkt: + +shortpkt: REASON_SET(reason, PFRES_SHORT); goto dropout; - - drop: + +drop: REASON_SET(reason, PFRES_NORM); goto dropout; - - badfrag: + +badfrag: DPFPRINTF(("dropping bad IPv6 fragment\n")); REASON_SET(reason, PFRES_FRAG); goto dropout; - - dropout: - if (pff != NULL) + +dropout: + if (pff != NULL) { pf_free_fragment(pff); - if (r != NULL && r->log && pbuf_is_valid(pbuf)) + } + if (r != NULL && r->log && pbuf_is_valid(pbuf)) { PFLOG_PACKET(kif, h, pbuf, AF_INET6, dir, *reason, r, NULL, NULL, pd); - return (PF_DROP); + } + return PF_DROP; } #endif /* INET6 */ @@ -2345,13 +2448,13 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, pbuf_t *pbuf, int ipoff, int off, void *h, struct pf_pdesc *pd) { #pragma unused(ipoff, h) - struct pf_rule *r, *rm = NULL; - struct tcphdr *th = pd->hdr.tcp; - int rewrite = 0; - int asd = 0; - u_short reason; - u_int8_t flags; - sa_family_t af = pd->af; + struct pf_rule *r, *rm = NULL; + struct tcphdr *th = pd->hdr.tcp; + int rewrite = 0; + int asd = 0; + u_short reason; + u_int8_t flags; + sa_family_t af = pd->af; struct pf_ruleset *ruleset = NULL; union pf_state_xport sxport, dxport; @@ -2361,33 +2464,33 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, pbuf_t *pbuf, int ipoff, r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr); while (r != NULL) { r->evaluations++; - if (pfi_kif_match(r->kif, kif) == r->ifnot) + if (pfi_kif_match(r->kif, kif) == r->ifnot) { r = r->skip[PF_SKIP_IFP].ptr; - else if (r->direction && r->direction != dir) + } else if (r->direction && r->direction != dir) { r = r->skip[PF_SKIP_DIR].ptr; - else if (r->af && r->af != af) + } else if (r->af && r->af != af) { r = r->skip[PF_SKIP_AF].ptr; - else if (r->proto && r->proto != pd->proto) + } else if (r->proto && r->proto != pd->proto) { r = r->skip[PF_SKIP_PROTO].ptr; - else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, - r->src.neg, kif)) + } else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, + r->src.neg, kif)) { r = r->skip[PF_SKIP_SRC_ADDR].ptr; - else if (r->src.xport.range.op && + } else if (r->src.xport.range.op && !pf_match_xport(r->src.xport.range.op, r->proto_variant, - &r->src.xport, &sxport)) + &r->src.xport, &sxport)) { r = r->skip[PF_SKIP_SRC_PORT].ptr; - else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, - r->dst.neg, NULL)) + } else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, + r->dst.neg, NULL)) { r = r->skip[PF_SKIP_DST_ADDR].ptr; - else if (r->dst.xport.range.op && + } else if (r->dst.xport.range.op && !pf_match_xport(r->dst.xport.range.op, r->proto_variant, - &r->dst.xport, &dxport)) + &r->dst.xport, &dxport)) { r = r->skip[PF_SKIP_DST_PORT].ptr; - else if (r->os_fingerprint != PF_OSFP_ANY && + } else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(pf_osfp_fingerprint(pd, pbuf, off, th), - r->os_fingerprint)) + r->os_fingerprint)) { r = TAILQ_NEXT(r, entries); - else { + } else { if (r->anchor == NULL) { rm = r; break; @@ -2397,47 +2500,54 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, pbuf_t *pbuf, int ipoff, } } if (r == NULL && pf_step_out_of_anchor(&asd, &ruleset, - PF_RULESET_SCRUB, &r, NULL, NULL)) + PF_RULESET_SCRUB, &r, NULL, NULL)) { break; + } } - if (rm == NULL || rm->action == PF_NOSCRUB) - return (PF_PASS); - else { + if (rm == NULL || rm->action == PF_NOSCRUB) { + return PF_PASS; + } else { r->packets[dir == PF_OUT]++; r->bytes[dir == PF_OUT] += pd->tot_len; } - if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) + if (rm->rule_flag & PFRULE_REASSEMBLE_TCP) { pd->flags |= PFDESC_TCP_NORM; + } flags = th->th_flags; if (flags & TH_SYN) { /* Illegal packet */ - if (flags & TH_RST) + if (flags & TH_RST) { goto tcp_drop; + } - if (flags & TH_FIN) + if (flags & TH_FIN) { flags &= ~TH_FIN; + } } else { /* Illegal packet */ - if (!(flags & (TH_ACK|TH_RST))) + if (!(flags & (TH_ACK | TH_RST))) { goto tcp_drop; + } } if (!(flags & TH_ACK)) { /* These flags are only valid if ACK is set */ - if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) + if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG)) { goto tcp_drop; + } } /* Check for illegal header length */ - if (th->th_off < (sizeof (struct tcphdr) >> 2)) + if (th->th_off < (sizeof(struct tcphdr) >> 2)) { goto tcp_drop; + } /* If flags changed, or reserved data set, then adjust */ if (flags != th->th_flags || th->th_x2 != 0) { - u_int16_t ov, nv; + u_int16_t ov, nv; ov = *(u_int16_t *)(&th->th_ack + 1); th->th_flags = flags; @@ -2460,31 +2570,34 @@ pf_normalize_tcp(int dir, struct pfi_kif *kif, pbuf_t *pbuf, int ipoff, if (r->max_mss) { int rv = pf_normalize_tcpopt(r, dir, kif, pd, pbuf, th, off, &rewrite); - if (rv == PF_DROP) + if (rv == PF_DROP) { return rv; + } pbuf = pd->mp; } if (rewrite) { if (pf_lazy_makewritable(pd, pbuf, - off + sizeof (*th)) == NULL) { + off + sizeof(*th)) == NULL) { REASON_SET(&reason, PFRES_MEMORY); - if (r->log) + if (r->log) { PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, reason, r, 0, 0, pd); + } return PF_DROP; } - pbuf_copy_back(pbuf, off, sizeof (*th), th); + pbuf_copy_back(pbuf, off, sizeof(*th), th); } - return (PF_PASS); + return PF_PASS; tcp_drop: REASON_SET(&reason, PFRES_NORM); - if (rm != NULL && r->log) + if (rm != NULL && r->log) { PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, reason, r, NULL, NULL, pd); - return (PF_DROP); + } + return PF_DROP; } int @@ -2499,9 +2612,10 @@ pf_normalize_tcp_init(pbuf_t *pbuf, int off, struct pf_pdesc *pd, VERIFY(src->scrub == NULL); src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT); - if (src->scrub == NULL) - return (1); - bzero(src->scrub, sizeof (*src->scrub)); + if (src->scrub == NULL) { + return 1; + } + bzero(src->scrub, sizeof(*src->scrub)); switch (pd->af) { #if INET @@ -2525,19 +2639,20 @@ pf_normalize_tcp_init(pbuf_t *pbuf, int off, struct pf_pdesc *pd, * All normalizations below are only begun if we see the start of * the connections. They must all set an enabled bit in pfss_flags */ - if ((th->th_flags & TH_SYN) == 0) - return (0); + if ((th->th_flags & TH_SYN) == 0) { + return 0; + } - if (th->th_off > (sizeof (struct tcphdr) >> 2) && src->scrub && + if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub && pf_pull_hdr(pbuf, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { /* Diddle with TCP options */ int hlen; - opt = hdr + sizeof (struct tcphdr); - hlen = (th->th_off << 2) - sizeof (struct tcphdr); + opt = hdr + sizeof(struct tcphdr); + hlen = (th->th_off << 2) - sizeof(struct tcphdr); while (hlen >= TCPOLEN_TIMESTAMP) { switch (*opt) { - case TCPOPT_EOL: /* FALLTHROUGH */ + case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; @@ -2551,15 +2666,15 @@ pf_normalize_tcp_init(pbuf_t *pbuf, int off, struct pf_pdesc *pd, /* note PFSS_PAWS not set yet */ memcpy(&tsval, &opt[2], - sizeof (u_int32_t)); + sizeof(u_int32_t)); memcpy(&tsecr, &opt[6], - sizeof (u_int32_t)); + sizeof(u_int32_t)); src->scrub->pfss_tsval0 = ntohl(tsval); src->scrub->pfss_tsval = ntohl(tsval); src->scrub->pfss_tsecr = ntohl(tsecr); getmicrouptime(&src->scrub->pfss_last); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: hlen -= MAX(opt[1], 2); opt += MAX(opt[1], 2); @@ -2568,16 +2683,18 @@ pf_normalize_tcp_init(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } } - return (0); + return 0; } void pf_normalize_tcp_cleanup(struct pf_state *state) { - if (state->src.scrub) + if (state->src.scrub) { pool_put(&pf_state_scrub_pl, state->src.scrub); - if (state->dst.scrub) + } + if (state->dst.scrub) { pool_put(&pf_state_scrub_pl, state->dst.scrub); + } /* Someday... flush the TCP segment reassembly descriptors. */ } @@ -2607,8 +2724,9 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, case AF_INET: { if (src->scrub) { struct ip *h = pbuf->pb_data; - if (h->ip_ttl > src->scrub->pfss_ttl) + if (h->ip_ttl > src->scrub->pfss_ttl) { src->scrub->pfss_ttl = h->ip_ttl; + } h->ip_ttl = src->scrub->pfss_ttl; } break; @@ -2618,8 +2736,9 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, case AF_INET6: { if (src->scrub) { struct ip6_hdr *h = pbuf->pb_data; - if (h->ip6_hlim > src->scrub->pfss_ttl) + if (h->ip6_hlim > src->scrub->pfss_ttl) { src->scrub->pfss_ttl = h->ip6_hlim; + } h->ip6_hlim = src->scrub->pfss_ttl; } break; @@ -2627,17 +2746,17 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, #endif /* INET6 */ } - if (th->th_off > (sizeof (struct tcphdr) >> 2) && + if (th->th_off > (sizeof(struct tcphdr) >> 2) && ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) || (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) && pf_pull_hdr(pbuf, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) { /* Diddle with TCP options */ int hlen; - opt = hdr + sizeof (struct tcphdr); - hlen = (th->th_off << 2) - sizeof (struct tcphdr); + opt = hdr + sizeof(struct tcphdr); + hlen = (th->th_off << 2) - sizeof(struct tcphdr); while (hlen >= TCPOLEN_TIMESTAMP) { switch (*opt) { - case TCPOPT_EOL: /* FALLTHROUGH */ + case TCPOPT_EOL: /* FALLTHROUGH */ case TCPOPT_NOP: opt++; hlen--; @@ -2657,11 +2776,11 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, printf("\n"); } REASON_SET(reason, PFRES_TS); - return (PF_DROP); + return PF_DROP; } if (opt[1] >= TCPOLEN_TIMESTAMP) { memcpy(&tsval, &opt[2], - sizeof (u_int32_t)); + sizeof(u_int32_t)); if (tsval && src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) { @@ -2676,7 +2795,7 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, /* Modulate TS reply iff valid (!0) */ memcpy(&tsecr, &opt[6], - sizeof (u_int32_t)); + sizeof(u_int32_t)); if (tsecr && dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { @@ -2689,7 +2808,7 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } got_ts = 1; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: hlen -= MAX(opt[1], 2); opt += MAX(opt[1], 2); @@ -2698,8 +2817,8 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } if (copyback) { /* Copyback the options, caller copys back header */ - int optoff = off + sizeof (*th); - int optlen = (th->th_off << 2) - sizeof (*th); + int optoff = off + sizeof(*th); + int optlen = (th->th_off << 2) - sizeof(*th); if (pf_lazy_makewritable(pd, pbuf, optoff + optlen) == NULL) { REASON_SET(reason, PFRES_MEMORY); @@ -2718,13 +2837,13 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, * TS echo check only works for the first 12 days of a connection * when the TS has exhausted half its 32bit space */ -#define TS_MAX_IDLE (24*24*60*60) -#define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ +#define TS_MAX_IDLE (24*24*60*60) +#define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */ getmicrouptime(&uptime); if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) && (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE || - pf_time_second() - state->creation > TS_MAX_CONN)) { + pf_time_second() - state->creation > TS_MAX_CONN)) { if (pf_status.debug >= PF_DEBUG_MISC) { DPFPRINTF(("src idled out of PAWS\n")); pf_print_state(state); @@ -2819,16 +2938,17 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, * packet got delayed in transit for much longer than * this packet. */ - if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) + if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0) { ts_fudge = pf_default_rule.timeout[PFTM_TS_DIFF]; + } /* Calculate max ticks since the last timestamp */ -#define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ -#define TS_MICROSECS 1000000 /* microseconds per second */ +#define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */ +#define TS_MICROSECS 1000000 /* microseconds per second */ timersub(&uptime, &src->scrub->pfss_last, &delta_ts); tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ; - tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ); + tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS / TS_MAXFREQ); if ((src->state >= TCPS_ESTABLISHED && @@ -2866,11 +2986,10 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, printf("\n"); } REASON_SET(reason, PFRES_TS); - return (PF_DROP); + return PF_DROP; } /* XXX I'd really like to require tsecr but it's optional */ - } else if (!got_ts && (th->th_flags & TH_RST) == 0 && ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED) || pd->p_len > 0 || (th->th_flags & TH_SYN)) && @@ -2917,7 +3036,7 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, printf("\n"); } REASON_SET(reason, PFRES_TS); - return (PF_DROP); + return PF_DROP; } } @@ -2931,10 +3050,10 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, * packets (seen in a WWW accelerator or cache). */ if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags & - (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { - if (got_ts) + (PFSS_TIMESTAMP | PFSS_DATA_TS | PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) { + if (got_ts) { src->scrub->pfss_flags |= PFSS_DATA_TS; - else { + } else { src->scrub->pfss_flags |= PFSS_DATA_NOTS; if (pf_status.debug >= PF_DEBUG_MISC && dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) { @@ -2954,16 +3073,18 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, * Update PAWS values */ if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags & - (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) { + (PFSS_PAWS_IDLED | PFSS_TIMESTAMP))) { getmicrouptime(&src->scrub->pfss_last); if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) || - (src->scrub->pfss_flags & PFSS_PAWS) == 0) + (src->scrub->pfss_flags & PFSS_PAWS) == 0) { src->scrub->pfss_tsval = tsval; + } if (tsecr) { if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) || - (src->scrub->pfss_flags & PFSS_PAWS) == 0) + (src->scrub->pfss_flags & PFSS_PAWS) == 0) { src->scrub->pfss_tsecr = tsecr; + } if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 && (SEQ_LT(tsval, src->scrub->pfss_tsval0) || @@ -2973,13 +3094,14 @@ pf_normalize_tcp_stateful(pbuf_t *pbuf, int off, struct pf_pdesc *pd, } /* Only fully initialized after a TS gets echoed */ - if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) + if ((src->scrub->pfss_flags & PFSS_PAWS) == 0) { src->scrub->pfss_flags |= PFSS_PAWS; + } } } /* I have a dream.... TCP segment reassembly.... */ - return (0); + return 0; } static int @@ -2989,32 +3111,36 @@ pf_normalize_tcpopt(struct pf_rule *r, int dir, struct pfi_kif *kif, { #pragma unused(dir, kif) sa_family_t af = pd->af; - u_int16_t *mss; - int thoff; - int opt, cnt, optlen = 0; - int rewrite = 0; - u_char opts[MAX_TCPOPTLEN]; - u_char *optp = opts; + u_int16_t *mss; + int thoff; + int opt, cnt, optlen = 0; + int rewrite = 0; + u_char opts[MAX_TCPOPTLEN]; + u_char *optp = opts; thoff = th->th_off << 2; - cnt = thoff - sizeof (struct tcphdr); + cnt = thoff - sizeof(struct tcphdr); - if (cnt > 0 && !pf_pull_hdr(pbuf, off + sizeof (*th), opts, cnt, - NULL, NULL, af)) + if (cnt > 0 && !pf_pull_hdr(pbuf, off + sizeof(*th), opts, cnt, + NULL, NULL, af)) { return PF_DROP; + } for (; cnt > 0; cnt -= optlen, optp += optlen) { opt = optp[0]; - if (opt == TCPOPT_EOL) + if (opt == TCPOPT_EOL) { break; - if (opt == TCPOPT_NOP) + } + if (opt == TCPOPT_NOP) { optlen = 1; - else { - if (cnt < 2) + } else { + if (cnt < 2) { break; + } optlen = optp[1]; - if (optlen < 2 || optlen > cnt) + if (optlen < 2 || optlen > cnt) { break; + } } switch (opt) { case TCPOPT_MAXSEG: @@ -3026,9 +3152,10 @@ pf_normalize_tcpopt(struct pf_rule *r, int dir, struct pfi_kif *kif, * checksum calculation will not be performed. */ if (pbuf->pb_ifp || - !(*pbuf->pb_csum_flags & CSUM_TCP)) + !(*pbuf->pb_csum_flags & CSUM_TCP)) { th->th_sum = pf_cksum_fixup(th->th_sum, *mss, htons(r->max_mss), 0); + } *mss = htons(r->max_mss); rewrite = 1; } @@ -3044,16 +3171,17 @@ pf_normalize_tcpopt(struct pf_rule *r, int dir, struct pfi_kif *kif, VERIFY(pbuf == pd->mp); if (pf_lazy_makewritable(pd, pd->mp, - off + sizeof (*th) + thoff) == NULL) { + off + sizeof(*th) + thoff) == NULL) { REASON_SET(&reason, PFRES_MEMORY); - if (r->log) + if (r->log) { PFLOG_PACKET(kif, h, pbuf, AF_INET, dir, reason, r, 0, 0, pd); + } return PF_DROP; } *rewrptr = 1; - pbuf_copy_back(pd->mp, off + sizeof (*th), thoff - sizeof (*th), opts); + pbuf_copy_back(pd->mp, off + sizeof(*th), thoff - sizeof(*th), opts); } return PF_PASS; diff --git a/bsd/net/pf_osfp.c b/bsd/net/pf_osfp.c index 2dc1e241c..20b523d1a 100644 --- a/bsd/net/pf_osfp.c +++ b/bsd/net/pf_osfp.c @@ -66,17 +66,17 @@ #include #endif /* INET6 */ -#define DPFPRINTF(format, x...) \ - if (pf_status.debug >= PF_DEBUG_NOISY) \ - printf(format, ##x) +#define DPFPRINTF(format, x...) \ + if (pf_status.debug >= PF_DEBUG_NOISY) \ + printf(format, ##x) static SLIST_HEAD(pf_osfp_list, pf_os_fingerprint) pf_osfp_list; static struct pool pf_osfp_entry_pl; static struct pool pf_osfp_pl; -static struct pf_os_fingerprint *pf_osfp_find(struct pf_osfp_list *, +static struct pf_os_fingerprint *pf_osfp_find(struct pf_osfp_list *, struct pf_os_fingerprint *, u_int8_t); -static struct pf_os_fingerprint *pf_osfp_find_exact(struct pf_osfp_list *, +static struct pf_os_fingerprint *pf_osfp_find_exact(struct pf_osfp_list *, struct pf_os_fingerprint *); static void pf_osfp_insert(struct pf_osfp_list *, struct pf_os_fingerprint *); @@ -95,8 +95,9 @@ pf_osfp_fingerprint(struct pf_pdesc *pd, pbuf_t *pbuf, int off, if ((pd->af != PF_INET && pd->af != PF_INET6) || pd->proto != IPPROTO_TCP || - (tcp->th_off << 2) < (int)sizeof (*tcp)) - return (NULL); + (tcp->th_off << 2) < (int)sizeof(*tcp)) { + return NULL; + } if (pd->af == PF_INET) { ip = pbuf->pb_data; @@ -106,10 +107,11 @@ pf_osfp_fingerprint(struct pf_pdesc *pd, pbuf_t *pbuf, int off, ip6 = pbuf->pb_data; } if (!pf_pull_hdr(pbuf, off, hdr, tcp->th_off << 2, NULL, NULL, - pd->af)) - return (NULL); + pd->af)) { + return NULL; + } - return (pf_osfp_fingerprint_hdr(ip, ip6, (struct tcphdr *)(void *)hdr)); + return pf_osfp_fingerprint_hdr(ip, ip6, (struct tcphdr *)(void *)hdr); } struct pf_osfp_enlist * @@ -124,44 +126,49 @@ pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, const u_int8_t *optp; char srcname[128]; - if ((tcp->th_flags & (TH_SYN|TH_ACK)) != TH_SYN) - return (NULL); + if ((tcp->th_flags & (TH_SYN | TH_ACK)) != TH_SYN) { + return NULL; + } if (ip) { - if ((ip->ip_off & htons(IP_OFFMASK)) != 0) - return (NULL); + if ((ip->ip_off & htons(IP_OFFMASK)) != 0) { + return NULL; + } } - memset(&fp, 0, sizeof (fp)); + memset(&fp, 0, sizeof(fp)); if (ip) { fp.fp_psize = ntohs(ip->ip_len); fp.fp_ttl = ip->ip_ttl; - if (ip->ip_off & htons(IP_DF)) + if (ip->ip_off & htons(IP_DF)) { fp.fp_flags |= PF_OSFP_DF; + } (void) inet_ntop(AF_INET, &ip->ip_src, srcname, - (socklen_t)sizeof (srcname)); + (socklen_t)sizeof(srcname)); } #if INET6 else if (ip6) { /* jumbo payload? */ - fp.fp_psize = sizeof (struct ip6_hdr) + ntohs(ip6->ip6_plen); + fp.fp_psize = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen); fp.fp_ttl = ip6->ip6_hlim; fp.fp_flags |= PF_OSFP_DF; fp.fp_flags |= PF_OSFP_INET6; (void) inet_ntop(AF_INET6, &ip6->ip6_src, srcname, - (socklen_t)sizeof (srcname)); + (socklen_t)sizeof(srcname)); } #endif - else - return (NULL); + else { + return NULL; + } fp.fp_wsize = ntohs(tcp->th_win); - cnt = (tcp->th_off << 2) - sizeof (*tcp); - optp = (const u_int8_t *)((const char *)tcp + sizeof (*tcp)); + cnt = (tcp->th_off << 2) - sizeof(*tcp); + optp = (const u_int8_t *)((const char *)tcp + sizeof(*tcp)); for (; cnt > 0; cnt -= optlen, optp += optlen) { - if (*optp == TCPOPT_EOL) + if (*optp == TCPOPT_EOL) { break; + } fp.fp_optcnt++; if (*optp == TCPOPT_NOP) { @@ -169,53 +176,57 @@ pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, PF_OSFP_TCPOPT_NOP; optlen = 1; } else { - if (cnt < 2) - return (NULL); + if (cnt < 2) { + return NULL; + } optlen = optp[1]; - if (optlen > cnt || optlen < 2) - return (NULL); + if (optlen > cnt || optlen < 2) { + return NULL; + } switch (*optp) { case TCPOPT_MAXSEG: - if (optlen >= TCPOLEN_MAXSEG) + if (optlen >= TCPOLEN_MAXSEG) { memcpy(&fp.fp_mss, &optp[2], - sizeof (fp.fp_mss)); + sizeof(fp.fp_mss)); + } fp.fp_tcpopts = (fp.fp_tcpopts << - PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_MSS; + PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_MSS; #if BYTE_ORDER != BIG_ENDIAN NTOHS(fp.fp_mss); #endif break; case TCPOPT_WINDOW: - if (optlen >= TCPOLEN_WINDOW) + if (optlen >= TCPOLEN_WINDOW) { memcpy(&fp.fp_wscale, &optp[2], - sizeof (fp.fp_wscale)); + sizeof(fp.fp_wscale)); + } #if BYTE_ORDER != BIG_ENDIAN NTOHS(fp.fp_wscale); #endif fp.fp_tcpopts = (fp.fp_tcpopts << - PF_OSFP_TCPOPT_BITS) | + PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_WSCALE; break; case TCPOPT_SACK_PERMITTED: fp.fp_tcpopts = (fp.fp_tcpopts << - PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_SACK; + PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_SACK; break; case TCPOPT_TIMESTAMP: if (optlen >= TCPOLEN_TIMESTAMP) { u_int32_t ts; - memcpy(&ts, &optp[2], sizeof (ts)); - if (ts == 0) + memcpy(&ts, &optp[2], sizeof(ts)); + if (ts == 0) { fp.fp_flags |= PF_OSFP_TS0; - + } } fp.fp_tcpopts = (fp.fp_tcpopts << - PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_TS; + PF_OSFP_TCPOPT_BITS) | PF_OSFP_TCPOPT_TS; break; default: - return (NULL); + return NULL; } } - optlen = MAX(optlen, 1); /* paranoia */ + optlen = MAX(optlen, 1); /* paranoia */ } DPFPRINTF("fingerprinted %s:%d %d:%d:%d:%d:%llx (%d) " @@ -232,9 +243,10 @@ pf_osfp_fingerprint_hdr(const struct ip *ip, const struct ip6_hdr *ip6, fp.fp_wscale); if ((fpresult = pf_osfp_find(&pf_osfp_list, &fp, - PF_OSFP_MAXTTL_OFFSET))) - return (&fpresult->fp_oses); - return (NULL); + PF_OSFP_MAXTTL_OFFSET))) { + return &fpresult->fp_oses; + } + return NULL; } /* Match a fingerprint ID against a list of OSes */ @@ -245,11 +257,12 @@ pf_osfp_match(struct pf_osfp_enlist *list, pf_osfp_t os) int os_class, os_version, os_subtype; int en_class, en_version, en_subtype; - if (os == PF_OSFP_ANY) - return (1); + if (os == PF_OSFP_ANY) { + return 1; + } if (list == NULL) { DPFPRINTF("osfp no match against %x\n", os); - return (os == PF_OSFP_UNKNOWN); + return os == PF_OSFP_UNKNOWN; } PF_OSFP_UNPACK(os, os_class, os_version, os_subtype); SLIST_FOREACH(entry, list, fp_entry) { @@ -260,20 +273,20 @@ pf_osfp_match(struct pf_osfp_enlist *list, pf_osfp_t os) DPFPRINTF("osfp matched %s %s %s %x==%x\n", entry->fp_class_nm, entry->fp_version_nm, entry->fp_subtype_nm, os, entry->fp_os); - return (1); + return 1; } } DPFPRINTF("fingerprint 0x%x didn't match\n", os); - return (0); + return 0; } /* Initialize the OS fingerprint system */ void pf_osfp_initialize(void) { - pool_init(&pf_osfp_entry_pl, sizeof (struct pf_osfp_entry), 0, 0, 0, + pool_init(&pf_osfp_entry_pl, sizeof(struct pf_osfp_entry), 0, 0, 0, "pfosfpen", NULL); - pool_init(&pf_osfp_pl, sizeof (struct pf_os_fingerprint), 0, 0, 0, + pool_init(&pf_osfp_pl, sizeof(struct pf_os_fingerprint), 0, 0, 0, "pfosfp", NULL); SLIST_INIT(&pf_osfp_list); } @@ -314,7 +327,7 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) struct pf_os_fingerprint *fp, fpadd; struct pf_osfp_entry *entry, *uentry; - memset(&fpadd, 0, sizeof (fpadd)); + memset(&fpadd, 0, sizeof(fpadd)); fpadd.fp_tcpopts = fpioc->fp_tcpopts; fpadd.fp_wsize = fpioc->fp_wsize; fpadd.fp_psize = fpioc->fp_psize; @@ -326,9 +339,9 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) uentry = &fpioc->fp_os; uentry->fp_entry.sle_next = NULL; - uentry->fp_class_nm[sizeof (uentry->fp_class_nm) - 1] = '\0'; - uentry->fp_version_nm[sizeof (uentry->fp_version_nm) - 1] = '\0'; - uentry->fp_subtype_nm[sizeof (uentry->fp_subtype_nm) - 1] = '\0'; + uentry->fp_class_nm[sizeof(uentry->fp_class_nm) - 1] = '\0'; + uentry->fp_version_nm[sizeof(uentry->fp_version_nm) - 1] = '\0'; + uentry->fp_subtype_nm[sizeof(uentry->fp_subtype_nm) - 1] = '\0'; DPFPRINTF("adding osfp %s %s %s = %s%d:%d:%d:%s%d:0x%llx %d " "(TS=%s,M=%s%d,W=%s%d) %x\n", @@ -357,15 +370,18 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) if ((fp = pf_osfp_find_exact(&pf_osfp_list, &fpadd))) { SLIST_FOREACH(entry, &fp->fp_oses, fp_entry) { - if (PF_OSFP_ENTRY_EQ(entry, &fpioc->fp_os)) - return (EEXIST); + if (PF_OSFP_ENTRY_EQ(entry, &fpioc->fp_os)) { + return EEXIST; + } + } + if ((entry = pool_get(&pf_osfp_entry_pl, PR_WAITOK)) == NULL) { + return ENOMEM; } - if ((entry = pool_get(&pf_osfp_entry_pl, PR_WAITOK)) == NULL) - return (ENOMEM); } else { - if ((fp = pool_get(&pf_osfp_pl, PR_WAITOK)) == NULL) - return (ENOMEM); - memset(fp, 0, sizeof (*fp)); + if ((fp = pool_get(&pf_osfp_pl, PR_WAITOK)) == NULL) { + return ENOMEM; + } + memset(fp, 0, sizeof(*fp)); fp->fp_tcpopts = fpioc->fp_tcpopts; fp->fp_wsize = fpioc->fp_wsize; fp->fp_psize = fpioc->fp_psize; @@ -377,24 +393,25 @@ pf_osfp_add(struct pf_osfp_ioctl *fpioc) SLIST_INIT(&fp->fp_oses); if ((entry = pool_get(&pf_osfp_entry_pl, PR_WAITOK)) == NULL) { pool_put(&pf_osfp_pl, fp); - return (ENOMEM); + return ENOMEM; } pf_osfp_insert(&pf_osfp_list, fp); } - memcpy(entry, &fpioc->fp_os, sizeof (*entry)); + memcpy(entry, &fpioc->fp_os, sizeof(*entry)); /* Make sure the strings are NUL terminated */ - entry->fp_class_nm[sizeof (entry->fp_class_nm)-1] = '\0'; - entry->fp_version_nm[sizeof (entry->fp_version_nm)-1] = '\0'; - entry->fp_subtype_nm[sizeof (entry->fp_subtype_nm)-1] = '\0'; + entry->fp_class_nm[sizeof(entry->fp_class_nm) - 1] = '\0'; + entry->fp_version_nm[sizeof(entry->fp_version_nm) - 1] = '\0'; + entry->fp_subtype_nm[sizeof(entry->fp_subtype_nm) - 1] = '\0'; SLIST_INSERT_HEAD(&fp->fp_oses, entry, fp_entry); #ifdef PFDEBUG - if ((fp = pf_osfp_validate())) + if ((fp = pf_osfp_validate())) { printf("Invalid fingerprint list\n"); + } #endif /* PFDEBUG */ - return (0); + return 0; } @@ -405,15 +422,15 @@ pf_osfp_find(struct pf_osfp_list *list, struct pf_os_fingerprint *find, { struct pf_os_fingerprint *f; -#define MATCH_INT(_MOD, _DC, _field) \ - if ((f->fp_flags & _DC) == 0) { \ - if ((f->fp_flags & _MOD) == 0) { \ - if (f->_field != find->_field) \ - continue; \ - } else { \ - if (f->_field == 0 || find->_field % f->_field) \ - continue; \ - } \ +#define MATCH_INT(_MOD, _DC, _field) \ + if ((f->fp_flags & _DC) == 0) { \ + if ((f->fp_flags & _MOD) == 0) { \ + if (f->_field != find->_field) \ + continue; \ + } else { \ + if (f->_field == 0 || find->_field % f->_field) \ + continue; \ + } \ } SLIST_FOREACH(f, list, fp_next) { @@ -421,56 +438,63 @@ pf_osfp_find(struct pf_osfp_list *list, struct pf_os_fingerprint *find, f->fp_optcnt != find->fp_optcnt || f->fp_ttl < find->fp_ttl || f->fp_ttl - find->fp_ttl > ttldiff || - (f->fp_flags & (PF_OSFP_DF|PF_OSFP_TS0)) != - (find->fp_flags & (PF_OSFP_DF|PF_OSFP_TS0))) + (f->fp_flags & (PF_OSFP_DF | PF_OSFP_TS0)) != + (find->fp_flags & (PF_OSFP_DF | PF_OSFP_TS0))) { continue; + } MATCH_INT(PF_OSFP_PSIZE_MOD, PF_OSFP_PSIZE_DC, fp_psize) MATCH_INT(PF_OSFP_MSS_MOD, PF_OSFP_MSS_DC, fp_mss) MATCH_INT(PF_OSFP_WSCALE_MOD, PF_OSFP_WSCALE_DC, fp_wscale) if ((f->fp_flags & PF_OSFP_WSIZE_DC) == 0) { if (f->fp_flags & PF_OSFP_WSIZE_MSS) { - if (find->fp_mss == 0) + if (find->fp_mss == 0) { continue; + } /* * Some "smart" NAT devices and DSL routers will tweak the MSS size and * will set it to whatever is suitable for the link type. */ -#define SMART_MSS 1460 +#define SMART_MSS 1460 if ((find->fp_wsize % find->fp_mss || find->fp_wsize / find->fp_mss != f->fp_wsize) && (find->fp_wsize % SMART_MSS || find->fp_wsize / SMART_MSS != - f->fp_wsize)) + f->fp_wsize)) { continue; + } } else if (f->fp_flags & PF_OSFP_WSIZE_MTU) { - if (find->fp_mss == 0) + if (find->fp_mss == 0) { continue; + } -#define MTUOFF (sizeof (struct ip) + sizeof (struct tcphdr)) -#define SMART_MTU (SMART_MSS + MTUOFF) +#define MTUOFF (sizeof (struct ip) + sizeof (struct tcphdr)) +#define SMART_MTU (SMART_MSS + MTUOFF) if ((find->fp_wsize % (find->fp_mss + MTUOFF) || find->fp_wsize / (find->fp_mss + MTUOFF) != f->fp_wsize) && (find->fp_wsize % SMART_MTU || find->fp_wsize / SMART_MTU != - f->fp_wsize)) + f->fp_wsize)) { continue; + } } else if (f->fp_flags & PF_OSFP_WSIZE_MOD) { if (f->fp_wsize == 0 || find->fp_wsize % - f->fp_wsize) + f->fp_wsize) { continue; + } } else { - if (f->fp_wsize != find->fp_wsize) + if (f->fp_wsize != find->fp_wsize) { continue; + } } } - return (f); + return f; } - return (NULL); + return NULL; } /* Find an exact fingerprint in the list */ @@ -487,11 +511,12 @@ pf_osfp_find_exact(struct pf_osfp_list *list, struct pf_os_fingerprint *find) f->fp_flags == find->fp_flags && f->fp_optcnt == find->fp_optcnt && f->fp_wscale == find->fp_wscale && - f->fp_ttl == find->fp_ttl) - return (f); + f->fp_ttl == find->fp_ttl) { + return f; + } } - return (NULL); + return NULL; } /* Insert a fingerprint into the list */ @@ -503,11 +528,12 @@ pf_osfp_insert(struct pf_osfp_list *list, struct pf_os_fingerprint *ins) /* XXX need to go semi tree based. can key on tcp options */ SLIST_FOREACH(f, list, fp_next) - prev = f; - if (prev) + prev = f; + if (prev) { SLIST_INSERT_AFTER(prev, ins, fp_next); - else + } else { SLIST_INSERT_HEAD(list, ins, fp_next); + } } /* Fill a fingerprint by its number (from an ioctl) */ @@ -520,7 +546,7 @@ pf_osfp_get(struct pf_osfp_ioctl *fpioc) int i = 0; - memset(fpioc, 0, sizeof (*fpioc)); + memset(fpioc, 0, sizeof(*fpioc)); SLIST_FOREACH(fp, &pf_osfp_list, fp_next) { SLIST_FOREACH(entry, &fp->fp_oses, fp_entry) { if (i++ == num) { @@ -532,14 +558,14 @@ pf_osfp_get(struct pf_osfp_ioctl *fpioc) fpioc->fp_wscale = fp->fp_wscale; fpioc->fp_getnum = num; memcpy(&fpioc->fp_os, entry, - sizeof (fpioc->fp_os)); + sizeof(fpioc->fp_os)); fpioc->fp_os.fp_entry.sle_next = NULL; - return (0); + return 0; } } } - return (EBUSY); + return EBUSY; } @@ -550,19 +576,21 @@ pf_osfp_validate(void) struct pf_os_fingerprint *f, *f2, find; SLIST_FOREACH(f, &pf_osfp_list, fp_next) { - memcpy(&find, f, sizeof (find)); + memcpy(&find, f, sizeof(find)); /* We do a few MSS/th_win percolations to make things unique */ - if (find.fp_mss == 0) + if (find.fp_mss == 0) { find.fp_mss = 128; - if (f->fp_flags & PF_OSFP_WSIZE_MSS) + } + if (f->fp_flags & PF_OSFP_WSIZE_MSS) { find.fp_wsize *= find.fp_mss; - else if (f->fp_flags & PF_OSFP_WSIZE_MTU) + } else if (f->fp_flags & PF_OSFP_WSIZE_MTU) { find.fp_wsize *= (find.fp_mss + 40); - else if (f->fp_flags & PF_OSFP_WSIZE_MOD) + } else if (f->fp_flags & PF_OSFP_WSIZE_MOD) { find.fp_wsize *= 2; + } if (f != (f2 = pf_osfp_find(&pf_osfp_list, &find, 0))) { - if (f2) + if (f2) { printf("Found \"%s %s %s\" instead of " "\"%s %s %s\"\n", SLIST_FIRST(&f2->fp_oses)->fp_class_nm, @@ -571,13 +599,14 @@ pf_osfp_validate(void) SLIST_FIRST(&f->fp_oses)->fp_class_nm, SLIST_FIRST(&f->fp_oses)->fp_version_nm, SLIST_FIRST(&f->fp_oses)->fp_subtype_nm); - else + } else { printf("Couldn't find \"%s %s %s\"\n", SLIST_FIRST(&f->fp_oses)->fp_class_nm, SLIST_FIRST(&f->fp_oses)->fp_version_nm, SLIST_FIRST(&f->fp_oses)->fp_subtype_nm); - return (f); + } + return f; } } - return (NULL); + return NULL; } diff --git a/bsd/net/pf_pbuf.c b/bsd/net/pf_pbuf.c index 86cc47c3b..be08224fa 100644 --- a/bsd/net/pf_pbuf.c +++ b/bsd/net/pf_pbuf.c @@ -33,7 +33,6 @@ void pbuf_init_mbuf(pbuf_t *pbuf, struct mbuf *m, struct ifnet *ifp) { - VERIFY((m->m_flags & M_PKTHDR) != 0); pbuf->pb_type = PBUF_TYPE_MBUF; @@ -46,7 +45,6 @@ pbuf_init_mbuf(pbuf_t *pbuf, struct mbuf *m, struct ifnet *ifp) void pbuf_init_memory(pbuf_t *pbuf, const struct pbuf_memory *mp, struct ifnet *ifp) { - pbuf->pb_type = PBUF_TYPE_MEMORY; pbuf->pb_memory = *mp; pbuf->pb_ifp = ifp; @@ -57,14 +55,12 @@ pbuf_init_memory(pbuf_t *pbuf, const struct pbuf_memory *mp, struct ifnet *ifp) void pbuf_destroy(pbuf_t *pbuf) { - if (pbuf->pb_type == PBUF_TYPE_MBUF) { if (pbuf->pb_mbuf) { m_freem(pbuf->pb_mbuf); pbuf->pb_mbuf = NULL; } - } else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) { + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { VERIFY(pbuf->pb_memory.pm_buffer != NULL); (void) (pbuf->pb_memory.pm_action)(&pbuf->pb_memory, PBUF_ACTION_DESTROY); @@ -78,7 +74,6 @@ pbuf_destroy(pbuf_t *pbuf) void pbuf_sync(pbuf_t *pbuf) { - if (pbuf->pb_type == PBUF_TYPE_MBUF) { struct mbuf *m = pbuf->pb_mbuf; @@ -95,8 +90,7 @@ pbuf_sync(pbuf_t *pbuf) pbuf->pb_flowid = &m->m_pkthdr.pkt_flowid; pbuf->pb_flags = &m->m_pkthdr.pkt_flags; pbuf->pb_pftag = m_pftag(m); - } else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) { + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { struct pbuf_memory *nm = &pbuf->pb_memory; VERIFY(nm->pm_buffer != NULL); @@ -115,8 +109,9 @@ pbuf_sync(pbuf_t *pbuf) pbuf->pb_flowid = &nm->pm_flowid; pbuf->pb_flags = &nm->pm_flags; pbuf->pb_pftag = &nm->pm_pftag; - } else + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } } struct mbuf * @@ -132,20 +127,20 @@ pbuf_to_mbuf(pbuf_t *pbuf, boolean_t release_ptr) pbuf->pb_mbuf = NULL; pbuf_destroy(pbuf); } - } else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) { + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { if (pbuf->pb_packet_len > (u_int)MHLEN) { if (pbuf->pb_packet_len > (u_int)MCLBYTES) { printf("%s: packet too big for cluster (%u)\n", __func__, pbuf->pb_packet_len); - return (NULL); + return NULL; } m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR); } else { m = m_gethdr(M_DONTWAIT, MT_DATA); } - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } m_copyback(m, 0, pbuf->pb_packet_len, pbuf->pb_data); m->m_pkthdr.csum_flags = *pbuf->pb_csum_flags; @@ -158,15 +153,17 @@ pbuf_to_mbuf(pbuf_t *pbuf, boolean_t release_ptr) if (pbuf->pb_pftag != NULL) { struct pf_mtag *pftag = m_pftag(m); - if (pftag != NULL) + if (pftag != NULL) { *pftag = *pbuf->pb_pftag; + } } - if (release_ptr) + if (release_ptr) { pbuf_destroy(pbuf); + } } - return (m); + return m; } struct mbuf * @@ -176,40 +173,40 @@ pbuf_clone_to_mbuf(pbuf_t *pbuf) pbuf_sync(pbuf); - if (pbuf->pb_type == PBUF_TYPE_MBUF) + if (pbuf->pb_type == PBUF_TYPE_MBUF) { m = m_copy(pbuf->pb_mbuf, 0, M_COPYALL); - else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { m = pbuf_to_mbuf(pbuf, FALSE); - else + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } - return (m); + return m; } void * pbuf_ensure_writable(pbuf_t *pbuf, size_t len) { - if (pbuf->pb_type == PBUF_TYPE_MBUF) { struct mbuf *m = pbuf->pb_mbuf; - if (m_makewritable(&pbuf->pb_mbuf, 0, len, M_DONTWAIT)) - return (NULL); + if (m_makewritable(&pbuf->pb_mbuf, 0, len, M_DONTWAIT)) { + return NULL; + } if (pbuf->pb_mbuf == NULL) { pbuf_destroy(pbuf); - return (NULL); + return NULL; } - if (m != pbuf->pb_mbuf) + if (m != pbuf->pb_mbuf) { pbuf_sync(pbuf); - - } else - if (pbuf->pb_type != PBUF_TYPE_MEMORY) + } + } else if (pbuf->pb_type != PBUF_TYPE_MEMORY) { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } - return (pbuf->pb_data); + return pbuf->pb_data; } void * @@ -229,8 +226,9 @@ pbuf_resize_segment(pbuf_t *pbuf, int off, int olen, int nlen) if (off > 0) { /* Split the mbuf chain at the specified boundary */ - if ((n = m_split(m, off, M_DONTWAIT)) == NULL) - return (NULL); + if ((n = m_split(m, off, M_DONTWAIT)) == NULL) { + return NULL; + } } else { n = m; } @@ -239,8 +237,9 @@ pbuf_resize_segment(pbuf_t *pbuf, int off, int olen, int nlen) m_adj(n, olen); /* Prepend new length */ - if (M_PREPEND(n, nlen, M_DONTWAIT, 0) == NULL) - return (NULL); + if (M_PREPEND(n, nlen, M_DONTWAIT, 0) == NULL) { + return NULL; + } rv = mtod(n, void *); @@ -282,7 +281,7 @@ pbuf_resize_segment(pbuf_t *pbuf, int off, int olen, int nlen) } else { panic("pbuf_csum_flags_get: bad pb_type: %d", pbuf->pb_type); } - return (rv); + return rv; } void * @@ -309,58 +308,58 @@ pbuf_contig_segment(pbuf_t *pbuf, int off, int len) /* mbuf is freed by m_pulldown() in this case */ pbuf->pb_mbuf = NULL; pbuf_destroy(pbuf); - return (NULL); + return NULL; } pbuf_sync(pbuf); rv = (void *)(mtod(n, uint8_t *) + moff); - } else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) { + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { /* * This always succeeds since memory pbufs are fully contig. */ rv = (void *)(uintptr_t)(((uint8_t *)pbuf->pb_data)[off]); - } else + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } - return (rv); + return rv; } void pbuf_copy_back(pbuf_t *pbuf, int off, int len, void *src) { - VERIFY(off >= 0); VERIFY(len >= 0); VERIFY((u_int)(off + len) <= pbuf->pb_packet_len); - if (pbuf->pb_type == PBUF_TYPE_MBUF) - m_copyback(pbuf->pb_mbuf, off, len, src); - else if (pbuf->pb_type == PBUF_TYPE_MBUF) { - if (len) + m_copyback(pbuf->pb_mbuf, off, len, src); + } else if (pbuf->pb_type == PBUF_TYPE_MBUF) { + if (len) { memcpy(&((uint8_t *)pbuf->pb_data)[off], src, len); - } else + } + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } } void pbuf_copy_data(pbuf_t *pbuf, int off, int len, void *dst) { - VERIFY(off >= 0); VERIFY(len >= 0); VERIFY((u_int)(off + len) <= pbuf->pb_packet_len); - if (pbuf->pb_type == PBUF_TYPE_MBUF) - m_copydata(pbuf->pb_mbuf, off, len, dst); - else if (pbuf->pb_type == PBUF_TYPE_MBUF) { - if (len) + m_copydata(pbuf->pb_mbuf, off, len, dst); + } else if (pbuf->pb_type == PBUF_TYPE_MBUF) { + if (len) { memcpy(dst, &((uint8_t *)pbuf->pb_data)[off], len); - } else + } + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } } uint16_t @@ -368,15 +367,15 @@ pbuf_inet_cksum(const pbuf_t *pbuf, uint32_t nxt, uint32_t off, uint32_t len) { uint16_t sum = 0; - if (pbuf->pb_type == PBUF_TYPE_MBUF) + if (pbuf->pb_type == PBUF_TYPE_MBUF) { sum = inet_cksum(pbuf->pb_mbuf, nxt, off, len); - else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { sum = inet_cksum_buffer(pbuf->pb_data, nxt, off, len); - else + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } - return (sum); + return sum; } uint16_t @@ -384,25 +383,25 @@ pbuf_inet6_cksum(const pbuf_t *pbuf, uint32_t nxt, uint32_t off, uint32_t len) { uint16_t sum = 0; - if (pbuf->pb_type == PBUF_TYPE_MBUF) + if (pbuf->pb_type == PBUF_TYPE_MBUF) { sum = inet6_cksum(pbuf->pb_mbuf, nxt, off, len); - else - if (pbuf->pb_type == PBUF_TYPE_MEMORY) + } else if (pbuf->pb_type == PBUF_TYPE_MEMORY) { sum = inet6_cksum_buffer(pbuf->pb_data, nxt, off, len); - else + } else { panic("%s: bad pb_type: %d", __func__, pbuf->pb_type); + } - return (sum); + return sum; } mbuf_svc_class_t pbuf_get_service_class(const pbuf_t *pbuf) { - - if (pbuf->pb_type == PBUF_TYPE_MBUF) + if (pbuf->pb_type == PBUF_TYPE_MBUF) { return m_get_service_class(pbuf->pb_mbuf); + } VERIFY(pbuf->pb_type == PBUF_TYPE_MEMORY); - return (MBUF_SC_BE); + return MBUF_SC_BE; } diff --git a/bsd/net/pf_pbuf.h b/bsd/net/pf_pbuf.h index ec6d0333a..fd8f7dd57 100644 --- a/bsd/net/pf_pbuf.h +++ b/bsd/net/pf_pbuf.h @@ -22,7 +22,7 @@ */ #ifndef __PBUF_H__ -#define __PBUF_H__ +#define __PBUF_H__ #include @@ -36,14 +36,14 @@ enum pbuf_action { PBUF_ACTION_DESTROY }; -#define PBUF_ACTION_RV_SUCCESS 0 -#define PBUF_ACTION_RV_FAILURE (-1) +#define PBUF_ACTION_RV_SUCCESS 0 +#define PBUF_ACTION_RV_FAILURE (-1) struct pbuf_memory { - uint8_t *pm_buffer; // Pointer to start of buffer - u_int pm_buffer_len; // Total length of buffer - u_int pm_offset; // Offset to start of payload - u_int pm_len; // Length of payload + uint8_t *pm_buffer; // Pointer to start of buffer + u_int pm_buffer_len; // Total length of buffer + u_int pm_offset; // Offset to start of payload + u_int pm_len; // Length of payload uint32_t pm_csum_flags; uint32_t pm_csum_data; uint8_t pm_proto; @@ -56,51 +56,50 @@ struct pbuf_memory { }; typedef struct pbuf { - enum pbuf_type pb_type; + enum pbuf_type pb_type; union { struct mbuf *pbu_mbuf; struct pbuf_memory pbu_memory; } pb_u; -#define pb_mbuf pb_u.pbu_mbuf -#define pb_memory pb_u.pbu_memory - - void *pb_data; - uint32_t pb_packet_len; - uint32_t pb_contig_len; - uint32_t *pb_csum_flags; - uint32_t *pb_csum_data; /* data field used by csum routines */ - uint8_t *pb_proto; - uint8_t *pb_flowsrc; - uint32_t *pb_flowid; - uint32_t *pb_flags; - struct pf_mtag *pb_pftag; - struct ifnet *pb_ifp; - struct pbuf *pb_next; - +#define pb_mbuf pb_u.pbu_mbuf +#define pb_memory pb_u.pbu_memory + + void *pb_data; + uint32_t pb_packet_len; + uint32_t pb_contig_len; + uint32_t *pb_csum_flags; + uint32_t *pb_csum_data; /* data field used by csum routines */ + uint8_t *pb_proto; + uint8_t *pb_flowsrc; + uint32_t *pb_flowid; + uint32_t *pb_flags; + struct pf_mtag *pb_pftag; + struct ifnet *pb_ifp; + struct pbuf *pb_next; } pbuf_t; #define pbuf_is_valid(pb) (!((pb) == NULL || (pb)->pb_type == PBUF_TYPE_ZOMBIE)) -void pbuf_init_mbuf(pbuf_t *, struct mbuf *, struct ifnet *); -void pbuf_init_memory(pbuf_t *, const struct pbuf_memory *, - struct ifnet *); -void pbuf_destroy(pbuf_t *); -void pbuf_sync(pbuf_t *); +void pbuf_init_mbuf(pbuf_t *, struct mbuf *, struct ifnet *); +void pbuf_init_memory(pbuf_t *, const struct pbuf_memory *, + struct ifnet *); +void pbuf_destroy(pbuf_t *); +void pbuf_sync(pbuf_t *); -struct mbuf *pbuf_to_mbuf(pbuf_t *, boolean_t); -struct mbuf *pbuf_clone_to_mbuf(pbuf_t *); +struct mbuf *pbuf_to_mbuf(pbuf_t *, boolean_t); +struct mbuf *pbuf_clone_to_mbuf(pbuf_t *); -void * pbuf_ensure_contig(pbuf_t *, size_t); -void * pbuf_ensure_writable(pbuf_t *, size_t); +void * pbuf_ensure_contig(pbuf_t *, size_t); +void * pbuf_ensure_writable(pbuf_t *, size_t); -void * pbuf_resize_segment(pbuf_t *, int off, int olen, int nlen); -void * pbuf_contig_segment(pbuf_t *, int off, int len); +void * pbuf_resize_segment(pbuf_t *, int off, int olen, int nlen); +void * pbuf_contig_segment(pbuf_t *, int off, int len); -void pbuf_copy_data(pbuf_t *, int, int, void *); -void pbuf_copy_back(pbuf_t *, int, int, void *); +void pbuf_copy_data(pbuf_t *, int, int, void *); +void pbuf_copy_back(pbuf_t *, int, int, void *); -uint16_t pbuf_inet_cksum(const pbuf_t *, uint32_t, uint32_t, uint32_t); -uint16_t pbuf_inet6_cksum(const pbuf_t *, uint32_t, uint32_t, uint32_t); +uint16_t pbuf_inet_cksum(const pbuf_t *, uint32_t, uint32_t, uint32_t); +uint16_t pbuf_inet6_cksum(const pbuf_t *, uint32_t, uint32_t, uint32_t); mbuf_svc_class_t pbuf_get_service_class(const pbuf_t *); diff --git a/bsd/net/pf_ruleset.c b/bsd/net/pf_ruleset.c index ff4c3f904..4b3be609a 100644 --- a/bsd/net/pf_ruleset.c +++ b/bsd/net/pf_ruleset.c @@ -89,23 +89,25 @@ #ifdef KERNEL -#define DPFPRINTF(format, x...) \ - if (pf_status.debug >= PF_DEBUG_NOISY) \ - printf(format, ##x) -#define rs_malloc(x) _MALLOC(x, M_TEMP, M_WAITOK) -#define rs_free(x) _FREE(x, M_TEMP) -#define strrchr _strrchr +#define DPFPRINTF(format, x ...) \ + if (pf_status.debug >= PF_DEBUG_NOISY) \ + printf(format, ##x) +#define rs_malloc(x) _MALLOC(x, M_TEMP, M_WAITOK) +#define rs_free(x) _FREE(x, M_TEMP) +#define strrchr _strrchr static char * _strrchr(const char *c, int ch) { char *p = (char *)(size_t)c, *save; - for (save = NULL; ; ++p) { - if (*p == ch) + for (save = NULL;; ++p) { + if (*p == ch) { save = (char *)p; - if (*p == '\0') - return (save); + } + if (*p == '\0') { + return save; + } } /* NOTREACHED */ } @@ -118,20 +120,20 @@ _strrchr(const char *c, int ch) #include #include #include -#define rs_malloc(x) malloc(x) -#define rs_free(x) free(x) +#define rs_malloc(x) malloc(x) +#define rs_free(x) free(x) #ifdef PFDEBUG #include -#define DPFPRINTF(format, x...) fprintf(stderr, format, ##x) +#define DPFPRINTF(format, x...) fprintf(stderr, format, ##x) #else -#define DPFPRINTF(format, x...) ((void)0) +#define DPFPRINTF(format, x...) ((void)0) #endif /* PFDEBUG */ #endif /* KERNEL */ -struct pf_anchor_global pf_anchors; -struct pf_anchor pf_main_anchor; +struct pf_anchor_global pf_anchors; +struct pf_anchor pf_main_anchor; static __inline int pf_anchor_compare(struct pf_anchor *, struct pf_anchor *); @@ -143,7 +145,7 @@ pf_anchor_compare(struct pf_anchor *a, struct pf_anchor *b) { int c = strcmp(a->path, b->path); - return (c ? (c < 0 ? -1 : 1) : 0); + return c ? (c < 0 ? -1 : 1) : 0; } int @@ -152,37 +154,37 @@ pf_get_ruleset_number(u_int8_t action) switch (action) { case PF_SCRUB: case PF_NOSCRUB: - return (PF_RULESET_SCRUB); + return PF_RULESET_SCRUB; case PF_PASS: case PF_DROP: - return (PF_RULESET_FILTER); + return PF_RULESET_FILTER; case PF_NAT: case PF_NONAT: - return (PF_RULESET_NAT); + return PF_RULESET_NAT; case PF_BINAT: case PF_NOBINAT: - return (PF_RULESET_BINAT); + return PF_RULESET_BINAT; case PF_RDR: case PF_NORDR: case PF_NAT64: case PF_NONAT64: - return (PF_RULESET_RDR); + return PF_RULESET_RDR; #if DUMMYNET case PF_DUMMYNET: case PF_NODUMMYNET: - return (PF_RULESET_DUMMYNET); + return PF_RULESET_DUMMYNET; #endif /* DUMMYNET */ default: - return (PF_RULESET_MAX); + return PF_RULESET_MAX; } } void pf_init_ruleset(struct pf_ruleset *ruleset) { - int i; + int i; - memset(ruleset, 0, sizeof (struct pf_ruleset)); + memset(ruleset, 0, sizeof(struct pf_ruleset)); for (i = 0; i < PF_RULESET_MAX; i++) { TAILQ_INIT(&ruleset->rules[i].queues[0]); TAILQ_INIT(&ruleset->rules[i].queues[1]); @@ -194,50 +196,56 @@ pf_init_ruleset(struct pf_ruleset *ruleset) struct pf_anchor * pf_find_anchor(const char *path) { - struct pf_anchor *key, *found; + struct pf_anchor *key, *found; - key = (struct pf_anchor *)rs_malloc(sizeof (*key)); - memset(key, 0, sizeof (*key)); - strlcpy(key->path, path, sizeof (key->path)); + key = (struct pf_anchor *)rs_malloc(sizeof(*key)); + memset(key, 0, sizeof(*key)); + strlcpy(key->path, path, sizeof(key->path)); found = RB_FIND(pf_anchor_global, &pf_anchors, key); rs_free(key); - return (found); + return found; } struct pf_ruleset * pf_find_ruleset(const char *path) { - struct pf_anchor *anchor; + struct pf_anchor *anchor; - while (*path == '/') + while (*path == '/') { path++; - if (!*path) - return (&pf_main_ruleset); + } + if (!*path) { + return &pf_main_ruleset; + } anchor = pf_find_anchor(path); - if (anchor == NULL) - return (NULL); - else - return (&anchor->ruleset); + if (anchor == NULL) { + return NULL; + } else { + return &anchor->ruleset; + } } struct pf_ruleset * pf_find_ruleset_with_owner(const char *path, const char *owner, int is_anchor, int *error) { - struct pf_anchor *anchor; + struct pf_anchor *anchor; - while (*path == '/') + while (*path == '/') { path++; - if (!*path) - return (&pf_main_ruleset); + } + if (!*path) { + return &pf_main_ruleset; + } anchor = pf_find_anchor(path); if (anchor == NULL) { *error = EINVAL; - return (NULL); + return NULL; } else { if ((owner && (!strcmp(owner, anchor->owner))) - || (is_anchor && !strcmp(anchor->owner, ""))) - return (&anchor->ruleset); + || (is_anchor && !strcmp(anchor->owner, ""))) { + return &anchor->ruleset; + } *error = EPERM; return NULL; } @@ -246,17 +254,20 @@ pf_find_ruleset_with_owner(const char *path, const char *owner, int is_anchor, struct pf_ruleset * pf_find_or_create_ruleset(const char *path) { - char *p, *q = NULL, *r; - struct pf_ruleset *ruleset; - struct pf_anchor *anchor = 0, *dup, *parent = NULL; + char *p, *q = NULL, *r; + struct pf_ruleset *ruleset; + struct pf_anchor *anchor = 0, *dup, *parent = NULL; - if (path[0] == 0) - return (&pf_main_ruleset); - while (*path == '/') + if (path[0] == 0) { + return &pf_main_ruleset; + } + while (*path == '/') { path++; + } ruleset = pf_find_ruleset(path); - if (ruleset != NULL) - return (ruleset); + if (ruleset != NULL) { + return ruleset; + } p = (char *)rs_malloc(MAXPATHLEN); bzero(p, MAXPATHLEN); strlcpy(p, path, MAXPATHLEN); @@ -267,38 +278,40 @@ pf_find_or_create_ruleset(const char *path) break; } } - if (q == NULL) + if (q == NULL) { q = p; - else + } else { q++; + } strlcpy(p, path, MAXPATHLEN); if (!*q) { rs_free(p); - return (NULL); + return NULL; } while ((r = strchr(q, '/')) != NULL || *q) { - if (r != NULL) + if (r != NULL) { *r = 0; + } if (!*q || strlen(q) >= PF_ANCHOR_NAME_SIZE || (parent != NULL && strlen(parent->path) >= MAXPATHLEN - PF_ANCHOR_NAME_SIZE - 1)) { rs_free(p); - return (NULL); + return NULL; } - anchor = (struct pf_anchor *)rs_malloc(sizeof (*anchor)); + anchor = (struct pf_anchor *)rs_malloc(sizeof(*anchor)); if (anchor == NULL) { rs_free(p); - return (NULL); + return NULL; } - memset(anchor, 0, sizeof (*anchor)); + memset(anchor, 0, sizeof(*anchor)); RB_INIT(&anchor->children); - strlcpy(anchor->name, q, sizeof (anchor->name)); + strlcpy(anchor->name, q, sizeof(anchor->name)); if (parent != NULL) { strlcpy(anchor->path, parent->path, - sizeof (anchor->path)); - strlcat(anchor->path, "/", sizeof (anchor->path)); + sizeof(anchor->path)); + strlcat(anchor->path, "/", sizeof(anchor->path)); } - strlcat(anchor->path, anchor->name, sizeof (anchor->path)); + strlcat(anchor->path, anchor->name, sizeof(anchor->path)); if ((dup = RB_INSERT(pf_anchor_global, &pf_anchors, anchor)) != NULL) { printf("pf_find_or_create_ruleset: RB_INSERT1 " @@ -306,7 +319,7 @@ pf_find_or_create_ruleset(const char *path) anchor->path, anchor->name, dup->path, dup->name); rs_free(anchor); rs_free(p); - return (NULL); + return NULL; } if (parent != NULL) { anchor->parent = parent; @@ -320,46 +333,51 @@ pf_find_or_create_ruleset(const char *path) anchor); rs_free(anchor); rs_free(p); - return (NULL); + return NULL; } } pf_init_ruleset(&anchor->ruleset); anchor->ruleset.anchor = anchor; parent = anchor; - if (r != NULL) + if (r != NULL) { q = r + 1; - else + } else { *q = 0; + } #if DUMMYNET - if(strncmp("com.apple.nlc", anchor->name, - sizeof("com.apple.nlc")) == 0) + if (strncmp("com.apple.nlc", anchor->name, + sizeof("com.apple.nlc")) == 0) { is_nlc_enabled_glb = TRUE; + } #endif } rs_free(p); - return (anchor ? &anchor->ruleset : 0); + return anchor ? &anchor->ruleset : 0; } void pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) { - struct pf_anchor *parent; - int i; + struct pf_anchor *parent; + int i; while (ruleset != NULL) { if (ruleset == &pf_main_ruleset || ruleset->anchor == NULL || !RB_EMPTY(&ruleset->anchor->children) || ruleset->anchor->refcnt > 0 || ruleset->tables > 0 || - ruleset->topen) + ruleset->topen) { return; - for (i = 0; i < PF_RULESET_MAX; ++i) + } + for (i = 0; i < PF_RULESET_MAX; ++i) { if (!TAILQ_EMPTY(ruleset->rules[i].active.ptr) || !TAILQ_EMPTY(ruleset->rules[i].inactive.ptr) || - ruleset->rules[i].inactive.open) + ruleset->rules[i].inactive.open) { return; + } + } RB_REMOVE(pf_anchor_global, &pf_anchors, ruleset->anchor); #if DUMMYNET - if(strncmp("com.apple.nlc", ruleset->anchor->name, + if (strncmp("com.apple.nlc", ruleset->anchor->name, sizeof("com.apple.nlc")) == 0) { struct dummynet_event dn_event; bzero(&dn_event, sizeof(dn_event)); @@ -368,12 +386,14 @@ pf_remove_if_empty_ruleset(struct pf_ruleset *ruleset) is_nlc_enabled_glb = FALSE; } #endif - if ((parent = ruleset->anchor->parent) != NULL) + if ((parent = ruleset->anchor->parent) != NULL) { RB_REMOVE(pf_anchor_node, &parent->children, ruleset->anchor); + } rs_free(ruleset->anchor); - if (parent == NULL) + if (parent == NULL) { return; + } ruleset = &parent->ruleset; } } @@ -382,40 +402,44 @@ int pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s, const char *name) { - char *p, *path; - struct pf_ruleset *ruleset; + char *p, *path; + struct pf_ruleset *ruleset; r->anchor = NULL; r->anchor_relative = 0; r->anchor_wildcard = 0; - if (!name[0]) - return (0); + if (!name[0]) { + return 0; + } path = (char *)rs_malloc(MAXPATHLEN); bzero(path, MAXPATHLEN); - if (name[0] == '/') + if (name[0] == '/') { strlcpy(path, name + 1, MAXPATHLEN); - else { + } else { /* relative path */ r->anchor_relative = 1; - if (s->anchor == NULL || !s->anchor->path[0]) + if (s->anchor == NULL || !s->anchor->path[0]) { path[0] = 0; - else + } else { strlcpy(path, s->anchor->path, MAXPATHLEN); + } while (name[0] == '.' && name[1] == '.' && name[2] == '/') { if (!path[0]) { printf("pf_anchor_setup: .. beyond root\n"); rs_free(path); - return (1); + return 1; } - if ((p = strrchr(path, '/')) != NULL) + if ((p = strrchr(path, '/')) != NULL) { *p = 0; - else + } else { path[0] = 0; + } r->anchor_relative++; name += 3; } - if (path[0]) + if (path[0]) { strlcat(path, "/", MAXPATHLEN); + } strlcat(path, name, MAXPATHLEN); } if ((p = strrchr(path, '/')) != NULL && strcmp(p, "/*") == 0) { @@ -426,11 +450,11 @@ pf_anchor_setup(struct pf_rule *r, const struct pf_ruleset *s, rs_free(path); if (ruleset == NULL || ruleset->anchor == NULL) { printf("pf_anchor_setup: ruleset\n"); - return (1); + return 1; } r->anchor = ruleset->anchor; r->anchor->refcnt++; - return (0); + return 0; } int @@ -438,57 +462,64 @@ pf_anchor_copyout(const struct pf_ruleset *rs, const struct pf_rule *r, struct pfioc_rule *pr) { pr->anchor_call[0] = 0; - if (r->anchor == NULL) - return (0); + if (r->anchor == NULL) { + return 0; + } if (!r->anchor_relative) { - strlcpy(pr->anchor_call, "/", sizeof (pr->anchor_call)); + strlcpy(pr->anchor_call, "/", sizeof(pr->anchor_call)); strlcat(pr->anchor_call, r->anchor->path, - sizeof (pr->anchor_call)); + sizeof(pr->anchor_call)); } else { - char *a, *p; - int i; + char *a, *p; + int i; a = (char *)rs_malloc(MAXPATHLEN); bzero(a, MAXPATHLEN); - if (rs->anchor == NULL) + if (rs->anchor == NULL) { a[0] = 0; - else + } else { strlcpy(a, rs->anchor->path, MAXPATHLEN); + } for (i = 1; i < r->anchor_relative; ++i) { - if ((p = strrchr(a, '/')) == NULL) + if ((p = strrchr(a, '/')) == NULL) { p = a; + } *p = 0; strlcat(pr->anchor_call, "../", - sizeof (pr->anchor_call)); + sizeof(pr->anchor_call)); } if (strncmp(a, r->anchor->path, strlen(a))) { printf("pf_anchor_copyout: '%s' '%s'\n", a, r->anchor->path); rs_free(a); - return (1); + return 1; } - if (strlen(r->anchor->path) > strlen(a)) + if (strlen(r->anchor->path) > strlen(a)) { strlcat(pr->anchor_call, r->anchor->path + (a[0] ? - strlen(a) + 1 : 0), sizeof (pr->anchor_call)); + strlen(a) + 1 : 0), sizeof(pr->anchor_call)); + } rs_free(a); } - if (r->anchor_wildcard) + if (r->anchor_wildcard) { strlcat(pr->anchor_call, pr->anchor_call[0] ? "/*" : "*", - sizeof (pr->anchor_call)); - return (0); + sizeof(pr->anchor_call)); + } + return 0; } void pf_anchor_remove(struct pf_rule *r) { - if (r->anchor == NULL) + if (r->anchor == NULL) { return; + } if (r->anchor->refcnt <= 0) { printf("pf_anchor_remove: broken refcount\n"); r->anchor = NULL; return; } - if (!--r->anchor->refcnt) + if (!--r->anchor->refcnt) { pf_remove_if_empty_ruleset(&r->anchor->ruleset); + } r->anchor = NULL; } diff --git a/bsd/net/pf_table.c b/bsd/net/pf_table.c index 5ccaf0426..c66741be2 100644 --- a/bsd/net/pf_table.c +++ b/bsd/net/pf_table.c @@ -72,57 +72,57 @@ #include #include -#define ACCEPT_FLAGS(flags, oklist) \ - do { \ - if ((flags & ~(oklist)) & \ - PFR_FLAG_ALLMASK) \ - return (EINVAL); \ +#define ACCEPT_FLAGS(flags, oklist) \ + do { \ + if ((flags & ~(oklist)) & \ + PFR_FLAG_ALLMASK) \ + return (EINVAL); \ } while (0) -#define COPYIN(from, to, size, flags) \ - ((flags & PFR_FLAG_USERIOCTL) ? \ - copyin((from), (to), (size)) : \ +#define COPYIN(from, to, size, flags) \ + ((flags & PFR_FLAG_USERIOCTL) ? \ + copyin((from), (to), (size)) : \ (bcopy((void *)(uintptr_t)(from), (to), (size)), 0)) -#define COPYOUT(from, to, size, flags) \ - ((flags & PFR_FLAG_USERIOCTL) ? \ - copyout((from), (to), (size)) : \ +#define COPYOUT(from, to, size, flags) \ + ((flags & PFR_FLAG_USERIOCTL) ? \ + copyout((from), (to), (size)) : \ (bcopy((from), (void *)(uintptr_t)(to), (size)), 0)) -#define FILLIN_SIN(sin, addr) \ - do { \ - (sin).sin_len = sizeof (sin); \ - (sin).sin_family = AF_INET; \ - (sin).sin_addr = (addr); \ +#define FILLIN_SIN(sin, addr) \ + do { \ + (sin).sin_len = sizeof (sin); \ + (sin).sin_family = AF_INET; \ + (sin).sin_addr = (addr); \ } while (0) -#define FILLIN_SIN6(sin6, addr) \ - do { \ - (sin6).sin6_len = sizeof (sin6); \ - (sin6).sin6_family = AF_INET6; \ - (sin6).sin6_addr = (addr); \ +#define FILLIN_SIN6(sin6, addr) \ + do { \ + (sin6).sin6_len = sizeof (sin6); \ + (sin6).sin6_family = AF_INET6; \ + (sin6).sin6_addr = (addr); \ } while (0) -#define SWAP(type, a1, a2) \ - do { \ - type tmp = a1; \ - a1 = a2; \ - a2 = tmp; \ +#define SWAP(type, a1, a2) \ + do { \ + type tmp = a1; \ + a1 = a2; \ + a2 = tmp; \ } while (0) -#define SUNION2PF(su, af) (((af) == AF_INET) ? \ - (struct pf_addr *)&(su)->sin.sin_addr : \ +#define SUNION2PF(su, af) (((af) == AF_INET) ? \ + (struct pf_addr *)&(su)->sin.sin_addr : \ (struct pf_addr *)&(su)->sin6.sin6_addr) -#define AF_BITS(af) (((af) == AF_INET) ? 32 : 128) -#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) -#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) +#define AF_BITS(af) (((af) == AF_INET) ? 32 : 128) +#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af)) +#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af)) #define KENTRY_RNF_ROOT(ke) \ - ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) + ((((struct radix_node *)(ke))->rn_flags & RNF_ROOT) != 0) -#define NO_ADDRESSES (-1) -#define ENQUEUE_UNMARKED_ONLY (1) -#define INVERT_NEG_FLAG (1) +#define NO_ADDRESSES (-1) +#define ENQUEUE_UNMARKED_ONLY (1) +#define INVERT_NEG_FLAG (1) struct pfr_walktree { enum pfrw_op { @@ -133,34 +133,34 @@ struct pfr_walktree { PFRW_GET_ASTATS, PFRW_POOL_GET, PFRW_DYNADDR_UPDATE - } pfrw_op; + } pfrw_op; union { - user_addr_t pfrw1_addr; - user_addr_t pfrw1_astats; - struct pfr_kentryworkq *pfrw1_workq; - struct pfr_kentry *pfrw1_kentry; - struct pfi_dynaddr *pfrw1_dyn; - } pfrw_1; - int pfrw_free; - int pfrw_flags; + user_addr_t pfrw1_addr; + user_addr_t pfrw1_astats; + struct pfr_kentryworkq *pfrw1_workq; + struct pfr_kentry *pfrw1_kentry; + struct pfi_dynaddr *pfrw1_dyn; + } pfrw_1; + int pfrw_free; + int pfrw_flags; }; -#define pfrw_addr pfrw_1.pfrw1_addr -#define pfrw_astats pfrw_1.pfrw1_astats -#define pfrw_workq pfrw_1.pfrw1_workq -#define pfrw_kentry pfrw_1.pfrw1_kentry -#define pfrw_dyn pfrw_1.pfrw1_dyn -#define pfrw_cnt pfrw_free +#define pfrw_addr pfrw_1.pfrw1_addr +#define pfrw_astats pfrw_1.pfrw1_astats +#define pfrw_workq pfrw_1.pfrw1_workq +#define pfrw_kentry pfrw_1.pfrw1_kentry +#define pfrw_dyn pfrw_1.pfrw1_dyn +#define pfrw_cnt pfrw_free -#define senderr(e) do { rv = (e); goto _bad; } while (0) +#define senderr(e) do { rv = (e); goto _bad; } while (0) -struct pool pfr_ktable_pl; -struct pool pfr_kentry_pl; +struct pool pfr_ktable_pl; +struct pool pfr_kentry_pl; -static struct pool pfr_kentry_pl2; -static struct sockaddr_in pfr_sin; -static struct sockaddr_in6 pfr_sin6; -static union sockaddr_union pfr_mask; -static struct pf_addr pfr_ffaddr; +static struct pool pfr_kentry_pl2; +static struct sockaddr_in pfr_sin; +static struct sockaddr_in6 pfr_sin6; +static union sockaddr_union pfr_mask; +static struct pf_addr pfr_ffaddr; static void pfr_copyout_addr(struct pfr_addr *, struct pfr_kentry *ke); static int pfr_validate_addr(struct pfr_addr *); @@ -204,26 +204,26 @@ RB_PROTOTYPE_SC(static, pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare); -static struct pfr_ktablehead pfr_ktables; -static struct pfr_table pfr_nulltable; -static int pfr_ktable_cnt; +static struct pfr_ktablehead pfr_ktables; +static struct pfr_table pfr_nulltable; +static int pfr_ktable_cnt; void pfr_initialize(void) { - pool_init(&pfr_ktable_pl, sizeof (struct pfr_ktable), 0, 0, 0, + pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable), 0, 0, 0, "pfrktable", NULL); - pool_init(&pfr_kentry_pl, sizeof (struct pfr_kentry), 0, 0, 0, + pool_init(&pfr_kentry_pl, sizeof(struct pfr_kentry), 0, 0, 0, "pfrkentry", NULL); - pool_init(&pfr_kentry_pl2, sizeof (struct pfr_kentry), 0, 0, 0, + pool_init(&pfr_kentry_pl2, sizeof(struct pfr_kentry), 0, 0, 0, "pfrkentry2", NULL); - pfr_sin.sin_len = sizeof (pfr_sin); + pfr_sin.sin_len = sizeof(pfr_sin); pfr_sin.sin_family = AF_INET; - pfr_sin6.sin6_len = sizeof (pfr_sin6); + pfr_sin6.sin6_len = sizeof(pfr_sin6); pfr_sin6.sin6_family = AF_INET6; - memset(&pfr_ffaddr, 0xff, sizeof (pfr_ffaddr)); + memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr)); } #if 0 @@ -239,17 +239,20 @@ pfr_destroy(void) int pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) { - struct pfr_ktable *kt; - struct pfr_kentryworkq workq; + struct pfr_ktable *kt; + struct pfr_kentryworkq workq; ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); - if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); - if (kt->pfrkt_flags & PFR_TFLAG_CONST) - return (EPERM); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } + if (kt->pfrkt_flags & PFR_TFLAG_CONST) { + return EPERM; + } pfr_enqueue_addrs(kt, &workq, ndel, 0); if (!(flags & PFR_FLAG_DUMMY)) { @@ -260,56 +263,64 @@ pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags) kt->pfrkt_cnt = 0; } } - return (0); + return 0; } int pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, int *nadd, int flags) { - struct pfr_ktable *kt, *tmpkt; - struct pfr_kentryworkq workq; - struct pfr_kentry *p, *q; - struct pfr_addr ad; - int i, rv, xadd = 0; - user_addr_t addr = _addr; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktable *kt, *tmpkt; + struct pfr_kentryworkq workq; + struct pfr_kentry *p, *q; + struct pfr_addr ad; + int i, rv, xadd = 0; + user_addr_t addr = _addr; + u_int64_t tzero = pf_calendar_time_second(); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); - if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); - if (kt->pfrkt_flags & PFR_TFLAG_CONST) - return (EPERM); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } + if (kt->pfrkt_flags & PFR_TFLAG_CONST) { + return EPERM; + } tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); - if (tmpkt == NULL) - return (ENOMEM); + if (tmpkt == NULL) { + return ENOMEM; + } SLIST_INIT(&workq); - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { senderr(EFAULT); - if (pfr_validate_addr(&ad)) + } + if (pfr_validate_addr(&ad)) { senderr(EINVAL); + } p = pfr_lookup_addr(kt, &ad, 1); q = pfr_lookup_addr(tmpkt, &ad, 1); if (flags & PFR_FLAG_FEEDBACK) { - if (q != NULL) + if (q != NULL) { ad.pfra_fback = PFR_FB_DUPLICATE; - else if (p == NULL) + } else if (p == NULL) { ad.pfra_fback = PFR_FB_ADDED; - else if (p->pfrke_not != ad.pfra_not) + } else if (p->pfrke_not != ad.pfra_not) { ad.pfra_fback = PFR_FB_CONFLICT; - else + } else { ad.pfra_fback = PFR_FB_NONE; + } } if (p == NULL && q == NULL) { p = pfr_create_kentry(&ad, !(flags & PFR_FLAG_USERIOCTL)); - if (p == NULL) + if (p == NULL) { senderr(ENOMEM); + } if (pfr_route_kentry(tmpkt, p)) { pfr_destroy_kentry(p); ad.pfra_fback = PFR_FB_NONE; @@ -318,48 +329,56 @@ pfr_add_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, xadd++; } } - if (flags & PFR_FLAG_FEEDBACK) - if (COPYOUT(&ad, addr, sizeof (ad), flags)) + if (flags & PFR_FLAG_FEEDBACK) { + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { senderr(EFAULT); + } + } } pfr_clean_node_mask(tmpkt, &workq); if (!(flags & PFR_FLAG_DUMMY)) { pfr_insert_kentries(kt, &workq, tzero); - } else + } else { pfr_destroy_kentries(&workq); - if (nadd != NULL) + } + if (nadd != NULL) { *nadd = xadd; + } pfr_destroy_ktable(tmpkt, 0); - return (0); + return 0; _bad: pfr_clean_node_mask(tmpkt, &workq); pfr_destroy_kentries(&workq); - if (flags & PFR_FLAG_FEEDBACK) + if (flags & PFR_FLAG_FEEDBACK) { pfr_reset_feedback(_addr, size, flags); + } pfr_destroy_ktable(tmpkt, 0); - return (rv); + return rv; } int pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, int *ndel, int flags) { - struct pfr_ktable *kt; - struct pfr_kentryworkq workq; - struct pfr_kentry *p; - struct pfr_addr ad; - user_addr_t addr = _addr; - int i, rv, xdel = 0, log = 1; + struct pfr_ktable *kt; + struct pfr_kentryworkq workq; + struct pfr_kentry *p; + struct pfr_addr ad; + user_addr_t addr = _addr; + int i, rv, xdel = 0, log = 1; ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); - if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); - if (kt->pfrkt_flags & PFR_TFLAG_CONST) - return (EPERM); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } + if (kt->pfrkt_flags & PFR_TFLAG_CONST) { + return EPERM; + } /* * there are two algorithms to choose from here. * with: @@ -371,39 +390,46 @@ pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, * * following code try to decide which one is best. */ - for (i = kt->pfrkt_cnt; i > 0; i >>= 1) + for (i = kt->pfrkt_cnt; i > 0; i >>= 1) { log++; - if (size > kt->pfrkt_cnt/log) { + } + if (size > kt->pfrkt_cnt / log) { /* full table scan */ pfr_mark_addrs(kt); } else { /* iterate over addresses to delete */ - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) - return (EFAULT); - if (pfr_validate_addr(&ad)) - return (EINVAL); + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { + return EFAULT; + } + if (pfr_validate_addr(&ad)) { + return EINVAL; + } p = pfr_lookup_addr(kt, &ad, 1); - if (p != NULL) + if (p != NULL) { p->pfrke_mark = 0; + } } } SLIST_INIT(&workq); - for (addr = _addr, i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) + for (addr = _addr, i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { senderr(EFAULT); - if (pfr_validate_addr(&ad)) + } + if (pfr_validate_addr(&ad)) { senderr(EINVAL); + } p = pfr_lookup_addr(kt, &ad, 1); if (flags & PFR_FLAG_FEEDBACK) { - if (p == NULL) + if (p == NULL) { ad.pfra_fback = PFR_FB_NONE; - else if (p->pfrke_not != ad.pfra_not) + } else if (p->pfrke_not != ad.pfra_not) { ad.pfra_fback = PFR_FB_CONFLICT; - else if (p->pfrke_mark) + } else if (p->pfrke_mark) { ad.pfra_fback = PFR_FB_DUPLICATE; - else + } else { ad.pfra_fback = PFR_FB_DELETED; + } } if (p != NULL && p->pfrke_not == ad.pfra_not && !p->pfrke_mark) { @@ -411,20 +437,24 @@ pfr_del_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, SLIST_INSERT_HEAD(&workq, p, pfrke_workq); xdel++; } - if (flags & PFR_FLAG_FEEDBACK) - if (COPYOUT(&ad, addr, sizeof (ad), flags)) + if (flags & PFR_FLAG_FEEDBACK) { + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { senderr(EFAULT); + } + } } if (!(flags & PFR_FLAG_DUMMY)) { pfr_remove_kentries(kt, &workq); } - if (ndel != NULL) + if (ndel != NULL) { *ndel = xdel; - return (0); + } + return 0; _bad: - if (flags & PFR_FLAG_FEEDBACK) + if (flags & PFR_FLAG_FEEDBACK) { pfr_reset_feedback(_addr, size, flags); - return (rv); + } + return rv; } int @@ -432,36 +462,42 @@ pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, int *size2, int *nadd, int *ndel, int *nchange, int flags, u_int32_t ignore_pfrt_flags) { - struct pfr_ktable *kt, *tmpkt; - struct pfr_kentryworkq addq, delq, changeq; - struct pfr_kentry *p, *q; - struct pfr_addr ad; - user_addr_t addr = _addr; - int i, rv, xadd = 0, xdel = 0, xchange = 0; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktable *kt, *tmpkt; + struct pfr_kentryworkq addq, delq, changeq; + struct pfr_kentry *p, *q; + struct pfr_addr ad; + user_addr_t addr = _addr; + int i, rv, xadd = 0, xdel = 0, xchange = 0; + u_int64_t tzero = pf_calendar_time_second(); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); if (pfr_validate_table(tbl, ignore_pfrt_flags, flags & - PFR_FLAG_USERIOCTL)) - return (EINVAL); + PFR_FLAG_USERIOCTL)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); - if (kt->pfrkt_flags & PFR_TFLAG_CONST) - return (EPERM); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } + if (kt->pfrkt_flags & PFR_TFLAG_CONST) { + return EPERM; + } tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0); - if (tmpkt == NULL) - return (ENOMEM); + if (tmpkt == NULL) { + return ENOMEM; + } pfr_mark_addrs(kt); SLIST_INIT(&addq); SLIST_INIT(&delq); SLIST_INIT(&changeq); - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { senderr(EFAULT); - if (pfr_validate_addr(&ad)) + } + if (pfr_validate_addr(&ad)) { senderr(EINVAL); + } ad.pfra_fback = PFR_FB_NONE; p = pfr_lookup_addr(kt, &ad, 1); if (p != NULL) { @@ -483,8 +519,9 @@ pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, } p = pfr_create_kentry(&ad, !(flags & PFR_FLAG_USERIOCTL)); - if (p == NULL) + if (p == NULL) { senderr(ENOMEM); + } if (pfr_route_kentry(tmpkt, p)) { pfr_destroy_kentry(p); ad.pfra_fback = PFR_FB_NONE; @@ -495,14 +532,16 @@ pfr_set_addrs(struct pfr_table *tbl, user_addr_t _addr, int size, } } _skip: - if (flags & PFR_FLAG_FEEDBACK) - if (COPYOUT(&ad, addr, sizeof (ad), flags)) + if (flags & PFR_FLAG_FEEDBACK) { + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { senderr(EFAULT); + } + } } pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY); if ((flags & PFR_FLAG_FEEDBACK) && *size2) { - if (*size2 < size+xdel) { - *size2 = size+xdel; + if (*size2 < size + xdel) { + *size2 = size + xdel; senderr(0); } i = 0; @@ -510,9 +549,10 @@ _skip: SLIST_FOREACH(p, &delq, pfrke_workq) { pfr_copyout_addr(&ad, p); ad.pfra_fback = PFR_FB_DELETED; - if (COPYOUT(&ad, addr, sizeof (ad), flags)) + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { senderr(EFAULT); - addr += sizeof (ad); + } + addr += sizeof(ad); i++; } } @@ -521,182 +561,210 @@ _skip: pfr_insert_kentries(kt, &addq, tzero); pfr_remove_kentries(kt, &delq); pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG); - } else + } else { pfr_destroy_kentries(&addq); - if (nadd != NULL) + } + if (nadd != NULL) { *nadd = xadd; - if (ndel != NULL) + } + if (ndel != NULL) { *ndel = xdel; - if (nchange != NULL) + } + if (nchange != NULL) { *nchange = xchange; - if ((flags & PFR_FLAG_FEEDBACK) && size2) - *size2 = size+xdel; + } + if ((flags & PFR_FLAG_FEEDBACK) && size2) { + *size2 = size + xdel; + } pfr_destroy_ktable(tmpkt, 0); - return (0); + return 0; _bad: pfr_clean_node_mask(tmpkt, &addq); pfr_destroy_kentries(&addq); - if (flags & PFR_FLAG_FEEDBACK) + if (flags & PFR_FLAG_FEEDBACK) { pfr_reset_feedback(_addr, size, flags); + } pfr_destroy_ktable(tmpkt, 0); - return (rv); + return rv; } int pfr_tst_addrs(struct pfr_table *tbl, user_addr_t addr, int size, - int *nmatch, int flags) + int *nmatch, int flags) { - struct pfr_ktable *kt; - struct pfr_kentry *p; - struct pfr_addr ad; - int i, xmatch = 0; + struct pfr_ktable *kt; + struct pfr_kentry *p; + struct pfr_addr ad; + int i, xmatch = 0; ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE); - if (pfr_validate_table(tbl, 0, 0)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, 0)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); - - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) - return (EFAULT); - if (pfr_validate_addr(&ad)) - return (EINVAL); - if (ADDR_NETWORK(&ad)) - return (EINVAL); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } + + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { + return EFAULT; + } + if (pfr_validate_addr(&ad)) { + return EINVAL; + } + if (ADDR_NETWORK(&ad)) { + return EINVAL; + } p = pfr_lookup_addr(kt, &ad, 0); - if (flags & PFR_FLAG_REPLACE) + if (flags & PFR_FLAG_REPLACE) { pfr_copyout_addr(&ad, p); + } ad.pfra_fback = (p == NULL) ? PFR_FB_NONE : (p->pfrke_not ? PFR_FB_NOTMATCH : PFR_FB_MATCH); - if (p != NULL && !p->pfrke_not) + if (p != NULL && !p->pfrke_not) { xmatch++; - if (COPYOUT(&ad, addr, sizeof (ad), flags)) - return (EFAULT); + } + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { + return EFAULT; + } } - if (nmatch != NULL) + if (nmatch != NULL) { *nmatch = xmatch; - return (0); + } + return 0; } int pfr_get_addrs(struct pfr_table *tbl, user_addr_t addr, int *size, - int flags) + int flags) { - struct pfr_ktable *kt; - struct pfr_walktree w; - int rv; + struct pfr_ktable *kt; + struct pfr_walktree w; + int rv; ACCEPT_FLAGS(flags, 0); - if (pfr_validate_table(tbl, 0, 0)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, 0)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } if (kt->pfrkt_cnt > *size) { *size = kt->pfrkt_cnt; - return (0); + return 0; } - bzero(&w, sizeof (w)); + bzero(&w, sizeof(w)); w.pfrw_op = PFRW_GET_ADDRS; w.pfrw_addr = addr; w.pfrw_free = kt->pfrkt_cnt; w.pfrw_flags = flags; rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); - if (!rv) + if (!rv) { rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); - if (rv) - return (rv); + } + if (rv) { + return rv; + } if (w.pfrw_free) { printf("pfr_get_addrs: corruption detected (%d).\n", w.pfrw_free); - return (ENOTTY); + return ENOTTY; } *size = kt->pfrkt_cnt; - return (0); + return 0; } int pfr_get_astats(struct pfr_table *tbl, user_addr_t addr, int *size, - int flags) + int flags) { - struct pfr_ktable *kt; - struct pfr_walktree w; - struct pfr_kentryworkq workq; - int rv; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktable *kt; + struct pfr_walktree w; + struct pfr_kentryworkq workq; + int rv; + u_int64_t tzero = pf_calendar_time_second(); /* XXX PFR_FLAG_CLSTATS disabled */ ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC); - if (pfr_validate_table(tbl, 0, 0)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, 0)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } if (kt->pfrkt_cnt > *size) { *size = kt->pfrkt_cnt; - return (0); + return 0; } - bzero(&w, sizeof (w)); + bzero(&w, sizeof(w)); w.pfrw_op = PFRW_GET_ASTATS; w.pfrw_astats = addr; w.pfrw_free = kt->pfrkt_cnt; w.pfrw_flags = flags; rv = kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); - if (!rv) + if (!rv) { rv = kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); + } if (!rv && (flags & PFR_FLAG_CLSTATS)) { pfr_enqueue_addrs(kt, &workq, NULL, 0); pfr_clstats_kentries(&workq, tzero, 0); } - if (rv) - return (rv); + if (rv) { + return rv; + } if (w.pfrw_free) { printf("pfr_get_astats: corruption detected (%d).\n", w.pfrw_free); - return (ENOTTY); + return ENOTTY; } *size = kt->pfrkt_cnt; - return (0); + return 0; } int pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size, int *nzero, int flags) { - struct pfr_ktable *kt; - struct pfr_kentryworkq workq; - struct pfr_kentry *p; - struct pfr_addr ad; - user_addr_t addr = _addr; - int i, rv, xzero = 0; + struct pfr_ktable *kt; + struct pfr_kentryworkq workq; + struct pfr_kentry *p; + struct pfr_addr ad; + user_addr_t addr = _addr; + int i, rv, xzero = 0; ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK); - if (pfr_validate_table(tbl, 0, 0)) - return (EINVAL); + if (pfr_validate_table(tbl, 0, 0)) { + return EINVAL; + } kt = pfr_lookup_table(tbl); - if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (ESRCH); + if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return ESRCH; + } SLIST_INIT(&workq); - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { senderr(EFAULT); - if (pfr_validate_addr(&ad)) + } + if (pfr_validate_addr(&ad)) { senderr(EINVAL); + } p = pfr_lookup_addr(kt, &ad, 1); if (flags & PFR_FLAG_FEEDBACK) { ad.pfra_fback = (p != NULL) ? PFR_FB_CLEARED : PFR_FB_NONE; - if (COPYOUT(&ad, addr, sizeof (ad), flags)) + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { senderr(EFAULT); + } } if (p != NULL) { SLIST_INSERT_HEAD(&workq, p, pfrke_workq); @@ -707,13 +775,15 @@ pfr_clr_astats(struct pfr_table *tbl, user_addr_t _addr, int size, if (!(flags & PFR_FLAG_DUMMY)) { pfr_clstats_kentries(&workq, 0, 0); } - if (nzero != NULL) + if (nzero != NULL) { *nzero = xzero; - return (0); + } + return 0; _bad: - if (flags & PFR_FLAG_FEEDBACK) + if (flags & PFR_FLAG_FEEDBACK) { pfr_reset_feedback(_addr, size, flags); - return (rv); + } + return rv; } static int @@ -724,130 +794,150 @@ pfr_validate_addr(struct pfr_addr *ad) switch (ad->pfra_af) { #if INET case AF_INET: - if (ad->pfra_net > 32) - return (-1); + if (ad->pfra_net > 32) { + return -1; + } break; #endif /* INET */ #if INET6 case AF_INET6: - if (ad->pfra_net > 128) - return (-1); + if (ad->pfra_net > 128) { + return -1; + } break; #endif /* INET6 */ default: - return (-1); + return -1; } if (ad->pfra_net < 128 && - (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8)))) - return (-1); - for (i = (ad->pfra_net+7)/8; i < (int)sizeof (ad->pfra_u); i++) - if (((caddr_t)ad)[i]) - return (-1); - if (ad->pfra_not && ad->pfra_not != 1) - return (-1); - if (ad->pfra_fback) - return (-1); - return (0); + (((caddr_t)ad)[ad->pfra_net / 8] & (0xFF >> (ad->pfra_net % 8)))) { + return -1; + } + for (i = (ad->pfra_net + 7) / 8; i < (int)sizeof(ad->pfra_u); i++) { + if (((caddr_t)ad)[i]) { + return -1; + } + } + if (ad->pfra_not && ad->pfra_not != 1) { + return -1; + } + if (ad->pfra_fback) { + return -1; + } + return 0; } static void pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, - int *naddr, int sweep) + int *naddr, int sweep) { - struct pfr_walktree w; + struct pfr_walktree w; SLIST_INIT(workq); - bzero(&w, sizeof (w)); + bzero(&w, sizeof(w)); w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE; w.pfrw_workq = workq; - if (kt->pfrkt_ip4 != NULL) + if (kt->pfrkt_ip4 != NULL) { if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, - pfr_walktree, &w)) + pfr_walktree, &w)) { printf("pfr_enqueue_addrs: IPv4 walktree failed.\n"); - if (kt->pfrkt_ip6 != NULL) + } + } + if (kt->pfrkt_ip6 != NULL) { if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, - pfr_walktree, &w)) + pfr_walktree, &w)) { printf("pfr_enqueue_addrs: IPv6 walktree failed.\n"); - if (naddr != NULL) + } + } + if (naddr != NULL) { *naddr = w.pfrw_cnt; + } } static void pfr_mark_addrs(struct pfr_ktable *kt) { - struct pfr_walktree w; + struct pfr_walktree w; - bzero(&w, sizeof (w)); + bzero(&w, sizeof(w)); w.pfrw_op = PFRW_MARK; - if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) + if (kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w)) { printf("pfr_mark_addrs: IPv4 walktree failed.\n"); - if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) + } + if (kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w)) { printf("pfr_mark_addrs: IPv6 walktree failed.\n"); + } } static struct pfr_kentry * pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact) { - union sockaddr_union sa, mask; - struct radix_node_head *head; - struct pfr_kentry *ke; + union sockaddr_union sa, mask; + struct radix_node_head *head; + struct pfr_kentry *ke; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - bzero(&sa, sizeof (sa)); + bzero(&sa, sizeof(sa)); if (ad->pfra_af == AF_INET) { FILLIN_SIN(sa.sin, ad->pfra_ip4addr); head = kt->pfrkt_ip4; } else if (ad->pfra_af == AF_INET6) { FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr); head = kt->pfrkt_ip6; - } - else + } else { return NULL; + } if (ADDR_NETWORK(ad)) { pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net); ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head); - if (ke && KENTRY_RNF_ROOT(ke)) + if (ke && KENTRY_RNF_ROOT(ke)) { ke = NULL; + } } else { ke = (struct pfr_kentry *)rn_match(&sa, head); - if (ke && KENTRY_RNF_ROOT(ke)) + if (ke && KENTRY_RNF_ROOT(ke)) { ke = NULL; - if (exact && ke && KENTRY_NETWORK(ke)) + } + if (exact && ke && KENTRY_NETWORK(ke)) { ke = NULL; + } } - return (ke); + return ke; } static struct pfr_kentry * pfr_create_kentry(struct pfr_addr *ad, int intr) { - struct pfr_kentry *ke; + struct pfr_kentry *ke; - if (intr) + if (intr) { ke = pool_get(&pfr_kentry_pl2, PR_WAITOK); - else + } else { ke = pool_get(&pfr_kentry_pl, PR_WAITOK); - if (ke == NULL) - return (NULL); - bzero(ke, sizeof (*ke)); + } + if (ke == NULL) { + return NULL; + } + bzero(ke, sizeof(*ke)); - if (ad->pfra_af == AF_INET) + if (ad->pfra_af == AF_INET) { FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr); - else if (ad->pfra_af == AF_INET6) + } else if (ad->pfra_af == AF_INET6) { FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr); + } ke->pfrke_af = ad->pfra_af; ke->pfrke_net = ad->pfra_net; ke->pfrke_not = ad->pfra_not; ke->pfrke_intrpool = intr; - return (ke); + return ke; } static void pfr_destroy_kentries(struct pfr_kentryworkq *workq) { - struct pfr_kentry *p, *q; + struct pfr_kentry *p, *q; for (p = SLIST_FIRST(workq); p != NULL; p = q) { q = SLIST_NEXT(p, pfrke_workq); @@ -858,18 +948,19 @@ pfr_destroy_kentries(struct pfr_kentryworkq *workq) static void pfr_destroy_kentry(struct pfr_kentry *ke) { - if (ke->pfrke_intrpool) + if (ke->pfrke_intrpool) { pool_put(&pfr_kentry_pl2, ke); - else + } else { pool_put(&pfr_kentry_pl, ke); + } } static void pfr_insert_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq, u_int64_t tzero) { - struct pfr_kentry *p; - int rv, n = 0; + struct pfr_kentry *p; + int rv, n = 0; SLIST_FOREACH(p, workq, pfrke_workq) { rv = pfr_route_kentry(kt, p); @@ -887,32 +978,35 @@ pfr_insert_kentries(struct pfr_ktable *kt, int pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, u_int64_t tzero) { - struct pfr_kentry *p; - int rv; + struct pfr_kentry *p; + int rv; p = pfr_lookup_addr(kt, ad, 1); - if (p != NULL) - return (0); + if (p != NULL) { + return 0; + } p = pfr_create_kentry(ad, 1); - if (p == NULL) - return (EINVAL); + if (p == NULL) { + return EINVAL; + } rv = pfr_route_kentry(kt, p); - if (rv) - return (rv); + if (rv) { + return rv; + } p->pfrke_tzero = tzero; kt->pfrkt_cnt++; - return (0); + return 0; } static void pfr_remove_kentries(struct pfr_ktable *kt, struct pfr_kentryworkq *workq) { - struct pfr_kentry *p; - int n = 0; + struct pfr_kentry *p; + int n = 0; SLIST_FOREACH(p, workq, pfrke_workq) { pfr_unroute_kentry(kt, p); @@ -926,25 +1020,26 @@ static void pfr_clean_node_mask(struct pfr_ktable *kt, struct pfr_kentryworkq *workq) { - struct pfr_kentry *p; + struct pfr_kentry *p; SLIST_FOREACH(p, workq, pfrke_workq) - pfr_unroute_kentry(kt, p); + pfr_unroute_kentry(kt, p); } static void pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero, int negchange) { - struct pfr_kentry *p; + struct pfr_kentry *p; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); SLIST_FOREACH(p, workq, pfrke_workq) { - if (negchange) + if (negchange) { p->pfrke_not = !p->pfrke_not; - bzero(p->pfrke_packets, sizeof (p->pfrke_packets)); - bzero(p->pfrke_bytes, sizeof (p->pfrke_bytes)); + } + bzero(p->pfrke_packets, sizeof(p->pfrke_packets)); + bzero(p->pfrke_bytes, sizeof(p->pfrke_bytes)); p->pfrke_tzero = tzero; } } @@ -952,35 +1047,37 @@ pfr_clstats_kentries(struct pfr_kentryworkq *workq, u_int64_t tzero, static void pfr_reset_feedback(user_addr_t addr, int size, int flags) { - struct pfr_addr ad; - int i; + struct pfr_addr ad; + int i; - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { break; + } ad.pfra_fback = PFR_FB_NONE; - if (COPYOUT(&ad, addr, sizeof (ad), flags)) + if (COPYOUT(&ad, addr, sizeof(ad), flags)) { break; + } } } static void pfr_prepare_network(union sockaddr_union *sa, int af, int net) { - int i; + int i; - bzero(sa, sizeof (*sa)); + bzero(sa, sizeof(*sa)); if (af == AF_INET) { - sa->sin.sin_len = sizeof (sa->sin); + sa->sin.sin_len = sizeof(sa->sin); sa->sin.sin_family = AF_INET; - sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0; + sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32 - net)) : 0; } else if (af == AF_INET6) { - sa->sin6.sin6_len = sizeof (sa->sin6); + sa->sin6.sin6_len = sizeof(sa->sin6); sa->sin6.sin6_family = AF_INET6; for (i = 0; i < 4; i++) { if (net <= 32) { sa->sin6.sin6_addr.s6_addr32[i] = - net ? htonl(-1 << (32-net)) : 0; + net ? htonl(-1 << (32 - net)) : 0; break; } sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF; @@ -992,79 +1089,85 @@ pfr_prepare_network(union sockaddr_union *sa, int af, int net) static int pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) { - union sockaddr_union mask; - struct radix_node *rn; - struct radix_node_head *head; + union sockaddr_union mask; + struct radix_node *rn; + struct radix_node_head *head; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - bzero(ke->pfrke_node, sizeof (ke->pfrke_node)); - if (ke->pfrke_af == AF_INET) + bzero(ke->pfrke_node, sizeof(ke->pfrke_node)); + if (ke->pfrke_af == AF_INET) { head = kt->pfrkt_ip4; - else if (ke->pfrke_af == AF_INET6) + } else if (ke->pfrke_af == AF_INET6) { head = kt->pfrkt_ip6; - else - return (-1); + } else { + return -1; + } if (KENTRY_NETWORK(ke)) { pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node); - } else + } else { rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node); + } - return (rn == NULL ? -1 : 0); + return rn == NULL ? -1 : 0; } static int pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke) { - union sockaddr_union mask; - struct radix_node *rn; - struct radix_node_head *head; + union sockaddr_union mask; + struct radix_node *rn; + struct radix_node_head *head; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (ke->pfrke_af == AF_INET) + if (ke->pfrke_af == AF_INET) { head = kt->pfrkt_ip4; - else if (ke->pfrke_af == AF_INET6) + } else if (ke->pfrke_af == AF_INET6) { head = kt->pfrkt_ip6; - else - return (-1); + } else { + return -1; + } if (KENTRY_NETWORK(ke)) { pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net); rn = rn_delete(&ke->pfrke_sa, &mask, head); - } else + } else { rn = rn_delete(&ke->pfrke_sa, NULL, head); + } if (rn == NULL) { printf("pfr_unroute_kentry: delete failed.\n"); - return (-1); + return -1; } - return (0); + return 0; } static void pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke) { - bzero(ad, sizeof (*ad)); - if (ke == NULL) + bzero(ad, sizeof(*ad)); + if (ke == NULL) { return; + } ad->pfra_af = ke->pfrke_af; ad->pfra_net = ke->pfrke_net; ad->pfra_not = ke->pfrke_not; - if (ad->pfra_af == AF_INET) + if (ad->pfra_af == AF_INET) { ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr; - else if (ad->pfra_af == AF_INET6) + } else if (ad->pfra_af == AF_INET6) { ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr; + } } static int pfr_walktree(struct radix_node *rn, void *arg) { - struct pfr_kentry *ke = (struct pfr_kentry *)rn; - struct pfr_walktree *w = arg; - int flags = w->pfrw_flags; + struct pfr_kentry *ke = (struct pfr_kentry *)rn; + struct pfr_walktree *w = arg; + int flags = w->pfrw_flags; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1073,9 +1176,10 @@ pfr_walktree(struct radix_node *rn, void *arg) ke->pfrke_mark = 0; break; case PFRW_SWEEP: - if (ke->pfrke_mark) + if (ke->pfrke_mark) { break; - /* FALLTHROUGH */ + } + /* FALLTHROUGH */ case PFRW_ENQUEUE: SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq); w->pfrw_cnt++; @@ -1085,9 +1189,10 @@ pfr_walktree(struct radix_node *rn, void *arg) struct pfr_addr ad; pfr_copyout_addr(&ad, ke); - if (copyout(&ad, w->pfrw_addr, sizeof (ad))) - return (EFAULT); - w->pfrw_addr += sizeof (ad); + if (copyout(&ad, w->pfrw_addr, sizeof(ad))) { + return EFAULT; + } + w->pfrw_addr += sizeof(ad); } break; case PFRW_GET_ASTATS: @@ -1102,71 +1207,80 @@ pfr_walktree(struct radix_node *rn, void *arg) as._pad = 0; #endif bcopy(ke->pfrke_packets, as.pfras_packets, - sizeof (as.pfras_packets)); + sizeof(as.pfras_packets)); bcopy(ke->pfrke_bytes, as.pfras_bytes, - sizeof (as.pfras_bytes)); + sizeof(as.pfras_bytes)); as.pfras_tzero = ke->pfrke_tzero; - if (COPYOUT(&as, w->pfrw_astats, sizeof (as), flags)) - return (EFAULT); - w->pfrw_astats += sizeof (as); + if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags)) { + return EFAULT; + } + w->pfrw_astats += sizeof(as); } break; case PFRW_POOL_GET: - if (ke->pfrke_not) + if (ke->pfrke_not) { break; /* negative entries are ignored */ + } if (!w->pfrw_cnt--) { w->pfrw_kentry = ke; - return (1); /* finish search */ + return 1; /* finish search */ } break; case PFRW_DYNADDR_UPDATE: if (ke->pfrke_af == AF_INET) { - if (w->pfrw_dyn->pfid_acnt4++ > 0) + if (w->pfrw_dyn->pfid_acnt4++ > 0) { break; + } pfr_prepare_network(&pfr_mask, AF_INET, ke->pfrke_net); w->pfrw_dyn->pfid_addr4 = *SUNION2PF( - &ke->pfrke_sa, AF_INET); + &ke->pfrke_sa, AF_INET); w->pfrw_dyn->pfid_mask4 = *SUNION2PF( - &pfr_mask, AF_INET); + &pfr_mask, AF_INET); } else if (ke->pfrke_af == AF_INET6) { - if (w->pfrw_dyn->pfid_acnt6++ > 0) + if (w->pfrw_dyn->pfid_acnt6++ > 0) { break; + } pfr_prepare_network(&pfr_mask, AF_INET6, ke->pfrke_net); w->pfrw_dyn->pfid_addr6 = *SUNION2PF( - &ke->pfrke_sa, AF_INET6); + &ke->pfrke_sa, AF_INET6); w->pfrw_dyn->pfid_mask6 = *SUNION2PF( - &pfr_mask, AF_INET6); + &pfr_mask, AF_INET6); } break; } - return (0); + return 0; } int pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) { - struct pfr_ktableworkq workq; - struct pfr_ktable *p; - int xdel = 0; + struct pfr_ktableworkq workq; + struct pfr_ktable *p; + int xdel = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS); - if (pfr_fix_anchor(filter->pfrt_anchor)) - return (EINVAL); - if (pfr_table_count(filter, flags) < 0) - return (ENOENT); + if (pfr_fix_anchor(filter->pfrt_anchor)) { + return EINVAL; + } + if (pfr_table_count(filter, flags) < 0) { + return ENOENT; + } SLIST_INIT(&workq); RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { - if (pfr_skip_table(filter, p, flags)) + if (pfr_skip_table(filter, p, flags)) { continue; - if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0) + } + if (strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR) == 0) { continue; - if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) + } + if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { continue; + } p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); xdel++; @@ -1174,48 +1288,54 @@ pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags) if (!(flags & PFR_FLAG_DUMMY)) { pfr_setflags_ktables(&workq); } - if (ndel != NULL) + if (ndel != NULL) { *ndel = xdel; - return (0); + } + return 0; } int pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags) { - struct pfr_ktableworkq addq, changeq; - struct pfr_ktable *p, *q, *r, key; - int i, rv, xadd = 0; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktableworkq addq, changeq; + struct pfr_ktable *p, *q, *r, key; + int i, rv, xadd = 0; + u_int64_t tzero = pf_calendar_time_second(); LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); SLIST_INIT(&addq); SLIST_INIT(&changeq); - for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { - if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) + for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) { + if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) { senderr(EFAULT); + } pfr_table_copyin_cleanup(&key.pfrkt_t); if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK, - flags & PFR_FLAG_USERIOCTL)) + flags & PFR_FLAG_USERIOCTL)) { senderr(EINVAL); + } key.pfrkt_flags |= PFR_TFLAG_ACTIVE; p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p == NULL) { p = pfr_create_ktable(&key.pfrkt_t, tzero, 1); - if (p == NULL) + if (p == NULL) { senderr(ENOMEM); + } SLIST_FOREACH(q, &addq, pfrkt_workq) { - if (!pfr_ktable_compare(p, q)) + if (!pfr_ktable_compare(p, q)) { goto _skip; + } } SLIST_INSERT_HEAD(&addq, p, pfrkt_workq); xadd++; - if (!key.pfrkt_anchor[0]) + if (!key.pfrkt_anchor[0]) { goto _skip; + } /* find or create root table */ - bzero(key.pfrkt_anchor, sizeof (key.pfrkt_anchor)); + bzero(key.pfrkt_anchor, sizeof(key.pfrkt_anchor)); r = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); if (r != NULL) { p->pfrkt_root = r; @@ -1229,172 +1349,192 @@ pfr_add_tables(user_addr_t tbl, int size, int *nadd, int flags) } key.pfrkt_flags = 0; r = pfr_create_ktable(&key.pfrkt_t, 0, 1); - if (r == NULL) + if (r == NULL) { senderr(ENOMEM); + } SLIST_INSERT_HEAD(&addq, r, pfrkt_workq); p->pfrkt_root = r; } else if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { SLIST_FOREACH(q, &changeq, pfrkt_workq) - if (!pfr_ktable_compare(&key, q)) - goto _skip; + if (!pfr_ktable_compare(&key, q)) { + goto _skip; + } p->pfrkt_nflags = (p->pfrkt_flags & ~PFR_TFLAG_USRMASK) | key.pfrkt_flags; SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq); xadd++; } _skip: - ; + ; } if (!(flags & PFR_FLAG_DUMMY)) { pfr_insert_ktables(&addq); pfr_setflags_ktables(&changeq); - } else + } else { pfr_destroy_ktables(&addq, 0); - if (nadd != NULL) + } + if (nadd != NULL) { *nadd = xadd; - return (0); + } + return 0; _bad: pfr_destroy_ktables(&addq, 0); - return (rv); + return rv; } int pfr_del_tables(user_addr_t tbl, int size, int *ndel, int flags) { - struct pfr_ktableworkq workq; - struct pfr_ktable *p, *q, key; - int i, xdel = 0; + struct pfr_ktableworkq workq; + struct pfr_ktable *p, *q, key; + int i, xdel = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); SLIST_INIT(&workq); - for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { - if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) - return (EFAULT); + for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) { + if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) { + return EFAULT; + } pfr_table_copyin_cleanup(&key.pfrkt_t); if (pfr_validate_table(&key.pfrkt_t, 0, - flags & PFR_FLAG_USERIOCTL)) - return (EINVAL); + flags & PFR_FLAG_USERIOCTL)) { + return EINVAL; + } p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { SLIST_FOREACH(q, &workq, pfrkt_workq) - if (!pfr_ktable_compare(p, q)) - goto _skip; + if (!pfr_ktable_compare(p, q)) { + goto _skip; + } p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE; SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); xdel++; } _skip: - ; + ; } if (!(flags & PFR_FLAG_DUMMY)) { pfr_setflags_ktables(&workq); } - if (ndel != NULL) + if (ndel != NULL) { *ndel = xdel; - return (0); + } + return 0; } int pfr_get_tables(struct pfr_table *filter, user_addr_t tbl, int *size, - int flags) + int flags) { - struct pfr_ktable *p; - int n, nn; + struct pfr_ktable *p; + int n, nn; ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS); - if (pfr_fix_anchor(filter->pfrt_anchor)) - return (EINVAL); + if (pfr_fix_anchor(filter->pfrt_anchor)) { + return EINVAL; + } n = nn = pfr_table_count(filter, flags); - if (n < 0) - return (ENOENT); + if (n < 0) { + return ENOENT; + } if (n > *size) { *size = n; - return (0); + return 0; } RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { - if (pfr_skip_table(filter, p, flags)) + if (pfr_skip_table(filter, p, flags)) { continue; - if (n-- <= 0) + } + if (n-- <= 0) { continue; - if (COPYOUT(&p->pfrkt_t, tbl, sizeof (p->pfrkt_t), flags)) - return (EFAULT); - tbl += sizeof (p->pfrkt_t); + } + if (COPYOUT(&p->pfrkt_t, tbl, sizeof(p->pfrkt_t), flags)) { + return EFAULT; + } + tbl += sizeof(p->pfrkt_t); } if (n) { printf("pfr_get_tables: corruption detected (%d).\n", n); - return (ENOTTY); + return ENOTTY; } *size = nn; - return (0); + return 0; } int pfr_get_tstats(struct pfr_table *filter, user_addr_t tbl, int *size, - int flags) + int flags) { - struct pfr_ktable *p; - struct pfr_ktableworkq workq; - int n, nn; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktable *p; + struct pfr_ktableworkq workq; + int n, nn; + u_int64_t tzero = pf_calendar_time_second(); LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); /* XXX PFR_FLAG_CLSTATS disabled */ ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_ALLRSETS); - if (pfr_fix_anchor(filter->pfrt_anchor)) - return (EINVAL); + if (pfr_fix_anchor(filter->pfrt_anchor)) { + return EINVAL; + } n = nn = pfr_table_count(filter, flags); - if (n < 0) - return (ENOENT); + if (n < 0) { + return ENOENT; + } if (n > *size) { *size = n; - return (0); + return 0; } SLIST_INIT(&workq); RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { - if (pfr_skip_table(filter, p, flags)) + if (pfr_skip_table(filter, p, flags)) { continue; - if (n-- <= 0) + } + if (n-- <= 0) { continue; - if (COPYOUT(&p->pfrkt_ts, tbl, sizeof (p->pfrkt_ts), flags)) { - return (EFAULT); } - tbl += sizeof (p->pfrkt_ts); + if (COPYOUT(&p->pfrkt_ts, tbl, sizeof(p->pfrkt_ts), flags)) { + return EFAULT; + } + tbl += sizeof(p->pfrkt_ts); SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); } - if (flags & PFR_FLAG_CLSTATS) + if (flags & PFR_FLAG_CLSTATS) { pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); + } if (n) { printf("pfr_get_tstats: corruption detected (%d).\n", n); - return (ENOTTY); + return ENOTTY; } *size = nn; - return (0); + return 0; } int pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags) { - struct pfr_ktableworkq workq; - struct pfr_ktable *p, key; - int i, xzero = 0; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktableworkq workq; + struct pfr_ktable *p, key; + int i, xzero = 0; + u_int64_t tzero = pf_calendar_time_second(); LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); SLIST_INIT(&workq); - for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { - if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) - return (EFAULT); + for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) { + if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) { + return EFAULT; + } pfr_table_copyin_cleanup(&key.pfrkt_t); - if (pfr_validate_table(&key.pfrkt_t, 0, 0)) - return (EINVAL); + if (pfr_validate_table(&key.pfrkt_t, 0, 0)) { + return EINVAL; + } p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p != NULL) { SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); @@ -1404,138 +1544,157 @@ pfr_clr_tstats(user_addr_t tbl, int size, int *nzero, int flags) if (!(flags & PFR_FLAG_DUMMY)) { pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO); } - if (nzero != NULL) + if (nzero != NULL) { *nzero = xzero; - return (0); + } + return 0; } int pfr_set_tflags(user_addr_t tbl, int size, int setflag, int clrflag, - int *nchange, int *ndel, int flags) + int *nchange, int *ndel, int flags) { - struct pfr_ktableworkq workq; - struct pfr_ktable *p, *q, key; - int i, xchange = 0, xdel = 0; + struct pfr_ktableworkq workq; + struct pfr_ktable *p, *q, key; + int i, xchange = 0, xdel = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); if ((setflag & ~PFR_TFLAG_USRMASK) || (clrflag & ~PFR_TFLAG_USRMASK) || - (setflag & clrflag)) - return (EINVAL); + (setflag & clrflag)) { + return EINVAL; + } SLIST_INIT(&workq); - for (i = 0; i < size; i++, tbl += sizeof (key.pfrkt_t)) { - if (COPYIN(tbl, &key.pfrkt_t, sizeof (key.pfrkt_t), flags)) - return (EFAULT); + for (i = 0; i < size; i++, tbl += sizeof(key.pfrkt_t)) { + if (COPYIN(tbl, &key.pfrkt_t, sizeof(key.pfrkt_t), flags)) { + return EFAULT; + } pfr_table_copyin_cleanup(&key.pfrkt_t); if (pfr_validate_table(&key.pfrkt_t, 0, - flags & PFR_FLAG_USERIOCTL)) - return (EINVAL); + flags & PFR_FLAG_USERIOCTL)) { + return EINVAL; + } p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) { p->pfrkt_nflags = (p->pfrkt_flags | setflag) & ~clrflag; - if (p->pfrkt_nflags == p->pfrkt_flags) + if (p->pfrkt_nflags == p->pfrkt_flags) { goto _skip; + } SLIST_FOREACH(q, &workq, pfrkt_workq) - if (!pfr_ktable_compare(p, q)) - goto _skip; + if (!pfr_ktable_compare(p, q)) { + goto _skip; + } SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) && (clrflag & PFR_TFLAG_PERSIST) && - !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) + !(p->pfrkt_flags & PFR_TFLAG_REFERENCED)) { xdel++; - else + } else { xchange++; + } } _skip: - ; + ; } if (!(flags & PFR_FLAG_DUMMY)) { pfr_setflags_ktables(&workq); } - if (nchange != NULL) + if (nchange != NULL) { *nchange = xchange; - if (ndel != NULL) + } + if (ndel != NULL) { *ndel = xdel; - return (0); + } + return 0; } int pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags) { - struct pfr_ktableworkq workq; - struct pfr_ktable *p; - struct pf_ruleset *rs; - int xdel = 0; + struct pfr_ktableworkq workq; + struct pfr_ktable *p; + struct pf_ruleset *rs; + int xdel = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); rs = pf_find_or_create_ruleset(trs->pfrt_anchor); - if (rs == NULL) - return (ENOMEM); + if (rs == NULL) { + return ENOMEM; + } SLIST_INIT(&workq); RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || - pfr_skip_table(trs, p, 0)) + pfr_skip_table(trs, p, 0)) { continue; + } p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); xdel++; } if (!(flags & PFR_FLAG_DUMMY)) { pfr_setflags_ktables(&workq); - if (ticket != NULL) + if (ticket != NULL) { *ticket = ++rs->tticket; + } rs->topen = 1; - } else + } else { pf_remove_if_empty_ruleset(rs); - if (ndel != NULL) + } + if (ndel != NULL) { *ndel = xdel; - return (0); + } + return 0; } int pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size, int *nadd, int *naddr, u_int32_t ticket, int flags) { - struct pfr_ktableworkq tableq; - struct pfr_kentryworkq addrq; - struct pfr_ktable *kt, *rt, *shadow, key; - struct pfr_kentry *p; - struct pfr_addr ad; - struct pf_ruleset *rs; - int i, rv, xadd = 0, xaddr = 0; + struct pfr_ktableworkq tableq; + struct pfr_kentryworkq addrq; + struct pfr_ktable *kt, *rt, *shadow, key; + struct pfr_kentry *p; + struct pfr_addr ad; + struct pf_ruleset *rs; + int i, rv, xadd = 0, xaddr = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO); - if (size && !(flags & PFR_FLAG_ADDRSTOO)) - return (EINVAL); + if (size && !(flags & PFR_FLAG_ADDRSTOO)) { + return EINVAL; + } if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK, - flags & PFR_FLAG_USERIOCTL)) - return (EINVAL); + flags & PFR_FLAG_USERIOCTL)) { + return EINVAL; + } rs = pf_find_ruleset(tbl->pfrt_anchor); - if (rs == NULL || !rs->topen || ticket != rs->tticket) - return (EBUSY); + if (rs == NULL || !rs->topen || ticket != rs->tticket) { + return EBUSY; + } tbl->pfrt_flags |= PFR_TFLAG_INACTIVE; SLIST_INIT(&tableq); kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)(void *)tbl); if (kt == NULL) { kt = pfr_create_ktable(tbl, 0, 1); - if (kt == NULL) - return (ENOMEM); + if (kt == NULL) { + return ENOMEM; + } SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq); xadd++; - if (!tbl->pfrt_anchor[0]) + if (!tbl->pfrt_anchor[0]) { goto _skip; + } /* find or create root table */ - bzero(&key, sizeof (key)); + bzero(&key, sizeof(key)); strlcpy(key.pfrkt_name, tbl->pfrt_name, - sizeof (key.pfrkt_name)); + sizeof(key.pfrkt_name)); rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key); if (rt != NULL) { kt->pfrkt_root = rt; @@ -1544,29 +1703,34 @@ pfr_ina_define(struct pfr_table *tbl, user_addr_t addr, int size, rt = pfr_create_ktable(&key.pfrkt_t, 0, 1); if (rt == NULL) { pfr_destroy_ktables(&tableq, 0); - return (ENOMEM); + return ENOMEM; } SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq); kt->pfrkt_root = rt; - } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) + } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE)) { xadd++; + } _skip: shadow = pfr_create_ktable(tbl, 0, 0); if (shadow == NULL) { pfr_destroy_ktables(&tableq, 0); - return (ENOMEM); + return ENOMEM; } SLIST_INIT(&addrq); - for (i = 0; i < size; i++, addr += sizeof (ad)) { - if (COPYIN(addr, &ad, sizeof (ad), flags)) + for (i = 0; i < size; i++, addr += sizeof(ad)) { + if (COPYIN(addr, &ad, sizeof(ad), flags)) { senderr(EFAULT); - if (pfr_validate_addr(&ad)) + } + if (pfr_validate_addr(&ad)) { senderr(EINVAL); - if (pfr_lookup_addr(shadow, &ad, 1) != NULL) + } + if (pfr_lookup_addr(shadow, &ad, 1) != NULL) { continue; + } p = pfr_create_kentry(&ad, 0); - if (p == NULL) + if (p == NULL) { senderr(ENOMEM); + } if (pfr_route_kentry(shadow, p)) { pfr_destroy_kentry(p); continue; @@ -1575,8 +1739,9 @@ _skip: xaddr++; } if (!(flags & PFR_FLAG_DUMMY)) { - if (kt->pfrkt_shadow != NULL) + if (kt->pfrkt_shadow != NULL) { pfr_destroy_ktable(kt->pfrkt_shadow, 1); + } kt->pfrkt_flags |= PFR_TFLAG_INACTIVE; pfr_insert_ktables(&tableq); shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ? @@ -1588,37 +1753,41 @@ _skip: pfr_destroy_ktables(&tableq, 0); pfr_destroy_kentries(&addrq); } - if (nadd != NULL) + if (nadd != NULL) { *nadd = xadd; - if (naddr != NULL) + } + if (naddr != NULL) { *naddr = xaddr; - return (0); + } + return 0; _bad: pfr_destroy_ktable(shadow, 0); pfr_destroy_ktables(&tableq, 0); pfr_destroy_kentries(&addrq); - return (rv); + return rv; } int pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) { - struct pfr_ktableworkq workq; - struct pfr_ktable *p; - struct pf_ruleset *rs; - int xdel = 0; + struct pfr_ktableworkq workq; + struct pfr_ktable *p; + struct pf_ruleset *rs; + int xdel = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY); rs = pf_find_ruleset(trs->pfrt_anchor); - if (rs == NULL || !rs->topen || ticket != rs->tticket) - return (0); + if (rs == NULL || !rs->topen || ticket != rs->tticket) { + return 0; + } SLIST_INIT(&workq); RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || - pfr_skip_table(trs, p, 0)) + pfr_skip_table(trs, p, 0)) { continue; + } p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE; SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); xdel++; @@ -1628,38 +1797,42 @@ pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags) rs->topen = 0; pf_remove_if_empty_ruleset(rs); } - if (ndel != NULL) + if (ndel != NULL) { *ndel = xdel; - return (0); + } + return 0; } int pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, int *nchange, int flags) { - struct pfr_ktable *p, *q; - struct pfr_ktableworkq workq; - struct pf_ruleset *rs; - int xadd = 0, xchange = 0; - u_int64_t tzero = pf_calendar_time_second(); + struct pfr_ktable *p, *q; + struct pfr_ktableworkq workq; + struct pf_ruleset *rs; + int xadd = 0, xchange = 0; + u_int64_t tzero = pf_calendar_time_second(); LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); ACCEPT_FLAGS(flags, PFR_FLAG_ATOMIC | PFR_FLAG_DUMMY); rs = pf_find_ruleset(trs->pfrt_anchor); - if (rs == NULL || !rs->topen || ticket != rs->tticket) - return (EBUSY); + if (rs == NULL || !rs->topen || ticket != rs->tticket) { + return EBUSY; + } SLIST_INIT(&workq); RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) { if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) || - pfr_skip_table(trs, p, 0)) + pfr_skip_table(trs, p, 0)) { continue; + } SLIST_INSERT_HEAD(&workq, p, pfrkt_workq); - if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) + if (p->pfrkt_flags & PFR_TFLAG_ACTIVE) { xchange++; - else + } else { xadd++; + } } if (!(flags & PFR_FLAG_DUMMY)) { @@ -1670,30 +1843,33 @@ pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd, rs->topen = 0; pf_remove_if_empty_ruleset(rs); } - if (nadd != NULL) + if (nadd != NULL) { *nadd = xadd; - if (nchange != NULL) + } + if (nchange != NULL) { *nchange = xchange; + } - return (0); + return 0; } static void pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero) { - struct pfr_ktable *shadow = kt->pfrkt_shadow; - int nflags; + struct pfr_ktable *shadow = kt->pfrkt_shadow; + int nflags; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); if (shadow->pfrkt_cnt == NO_ADDRESSES) { - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { pfr_clstats_ktable(kt, tzero, 1); + } } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) { /* kt might contain addresses */ - struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; - struct pfr_kentry *p, *q, *next; - struct pfr_addr ad; + struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq; + struct pfr_kentry *p, *q, *next; + struct pfr_addr ad; pfr_enqueue_addrs(shadow, &addrq, NULL, 0); pfr_mark_addrs(kt); @@ -1703,13 +1879,14 @@ pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero) SLIST_INIT(&garbageq); pfr_clean_node_mask(shadow, &addrq); for (p = SLIST_FIRST(&addrq); p != NULL; p = next) { - next = SLIST_NEXT(p, pfrke_workq); /* XXX */ + next = SLIST_NEXT(p, pfrke_workq); /* XXX */ pfr_copyout_addr(&ad, p); q = pfr_lookup_addr(kt, &ad, 1); if (q != NULL) { - if (q->pfrke_not != p->pfrke_not) + if (q->pfrke_not != p->pfrke_not) { SLIST_INSERT_HEAD(&changeq, q, pfrke_workq); + } q->pfrke_mark = 1; SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq); } else { @@ -1742,8 +1919,8 @@ pfr_commit_ktable(struct pfr_ktable *kt, u_int64_t tzero) void pfr_table_copyin_cleanup(struct pfr_table *tbl) { - tbl->pfrt_anchor[sizeof (tbl->pfrt_anchor) - 1] = '\0'; - tbl->pfrt_name[sizeof (tbl->pfrt_name) - 1] = '\0'; + tbl->pfrt_anchor[sizeof(tbl->pfrt_anchor) - 1] = '\0'; + tbl->pfrt_name[sizeof(tbl->pfrt_name) - 1] = '\0'; } static int @@ -1751,20 +1928,27 @@ pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved) { int i; - if (!tbl->pfrt_name[0]) - return (-1); - if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0) - return (-1); - if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1]) - return (-1); - for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) - if (tbl->pfrt_name[i]) - return (-1); - if (pfr_fix_anchor(tbl->pfrt_anchor)) - return (-1); - if (tbl->pfrt_flags & ~allowedflags) - return (-1); - return (0); + if (!tbl->pfrt_name[0]) { + return -1; + } + if (no_reserved && strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR) == 0) { + return -1; + } + if (tbl->pfrt_name[PF_TABLE_NAME_SIZE - 1]) { + return -1; + } + for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++) { + if (tbl->pfrt_name[i]) { + return -1; + } + } + if (pfr_fix_anchor(tbl->pfrt_anchor)) { + return -1; + } + if (tbl->pfrt_flags & ~allowedflags) { + return -1; + } + return 0; } /* @@ -1783,17 +1967,21 @@ pfr_fix_anchor(char *anchor) path = anchor; off = 1; - while (*++path == '/') + while (*++path == '/') { off++; + } bcopy(path, anchor, siz - off); memset(anchor + siz - off, 0, off); } - if (anchor[siz - 1]) - return (-1); - for (i = strlen(anchor); i < (int)siz; i++) - if (anchor[i]) - return (-1); - return (0); + if (anchor[siz - 1]) { + return -1; + } + for (i = strlen(anchor); i < (int)siz; i++) { + if (anchor[i]) { + return -1; + } + } + return 0; } static int @@ -1801,34 +1989,37 @@ pfr_table_count(struct pfr_table *filter, int flags) { struct pf_ruleset *rs; - if (flags & PFR_FLAG_ALLRSETS) - return (pfr_ktable_cnt); + if (flags & PFR_FLAG_ALLRSETS) { + return pfr_ktable_cnt; + } if (filter->pfrt_anchor[0]) { rs = pf_find_ruleset(filter->pfrt_anchor); - return ((rs != NULL) ? rs->tables : -1); + return (rs != NULL) ? rs->tables : -1; } - return (pf_main_ruleset.tables); + return pf_main_ruleset.tables; } static int pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags) { - if (flags & PFR_FLAG_ALLRSETS) - return (0); - if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) - return (1); - return (0); + if (flags & PFR_FLAG_ALLRSETS) { + return 0; + } + if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor)) { + return 1; + } + return 0; } static void pfr_insert_ktables(struct pfr_ktableworkq *workq) { - struct pfr_ktable *p; + struct pfr_ktable *p; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); SLIST_FOREACH(p, workq, pfrkt_workq) - pfr_insert_ktable(p); + pfr_insert_ktable(p); } static void @@ -1838,16 +2029,18 @@ pfr_insert_ktable(struct pfr_ktable *kt) RB_INSERT(pfr_ktablehead, &pfr_ktables, kt); pfr_ktable_cnt++; - if (kt->pfrkt_root != NULL) - if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) + if (kt->pfrkt_root != NULL) { + if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++) { pfr_setflags_ktable(kt->pfrkt_root, - kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR); + kt->pfrkt_root->pfrkt_flags | PFR_TFLAG_REFDANCHOR); + } + } } static void pfr_setflags_ktables(struct pfr_ktableworkq *workq) { - struct pfr_ktable *p, *q; + struct pfr_ktable *p, *q; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1860,22 +2053,26 @@ pfr_setflags_ktables(struct pfr_ktableworkq *workq) static void pfr_setflags_ktable(struct pfr_ktable *kt, int newf) { - struct pfr_kentryworkq addrq; + struct pfr_kentryworkq addrq; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); if (!(newf & PFR_TFLAG_REFERENCED) && - !(newf & PFR_TFLAG_PERSIST)) + !(newf & PFR_TFLAG_PERSIST)) { newf &= ~PFR_TFLAG_ACTIVE; - if (!(newf & PFR_TFLAG_ACTIVE)) + } + if (!(newf & PFR_TFLAG_ACTIVE)) { newf &= ~PFR_TFLAG_USRMASK; + } if (!(newf & PFR_TFLAG_SETMASK)) { RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt); - if (kt->pfrkt_root != NULL) - if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) + if (kt->pfrkt_root != NULL) { + if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]) { pfr_setflags_ktable(kt->pfrkt_root, kt->pfrkt_root->pfrkt_flags & ~PFR_TFLAG_REFDANCHOR); + } + } pfr_destroy_ktable(kt, 1); pfr_ktable_cnt--; return; @@ -1894,18 +2091,18 @@ pfr_setflags_ktable(struct pfr_ktable *kt, int newf) static void pfr_clstats_ktables(struct pfr_ktableworkq *workq, u_int64_t tzero, int recurse) { - struct pfr_ktable *p; + struct pfr_ktable *p; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); SLIST_FOREACH(p, workq, pfrkt_workq) - pfr_clstats_ktable(p, tzero, recurse); + pfr_clstats_ktable(p, tzero, recurse); } static void pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse) { - struct pfr_kentryworkq addrq; + struct pfr_kentryworkq addrq; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1913,8 +2110,8 @@ pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse) pfr_enqueue_addrs(kt, &addrq, NULL, 0); pfr_clstats_kentries(&addrq, tzero, 0); } - bzero(kt->pfrkt_packets, sizeof (kt->pfrkt_packets)); - bzero(kt->pfrkt_bytes, sizeof (kt->pfrkt_bytes)); + bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets)); + bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes)); kt->pfrkt_match = kt->pfrkt_nomatch = 0; kt->pfrkt_tzero = tzero; } @@ -1922,22 +2119,23 @@ pfr_clstats_ktable(struct pfr_ktable *kt, u_int64_t tzero, int recurse) static struct pfr_ktable * pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset) { - struct pfr_ktable *kt; - struct pf_ruleset *rs; + struct pfr_ktable *kt; + struct pf_ruleset *rs; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); kt = pool_get(&pfr_ktable_pl, PR_WAITOK); - if (kt == NULL) - return (NULL); - bzero(kt, sizeof (*kt)); + if (kt == NULL) { + return NULL; + } + bzero(kt, sizeof(*kt)); kt->pfrkt_t = *tbl; if (attachruleset) { rs = pf_find_or_create_ruleset(tbl->pfrt_anchor); if (!rs) { pfr_destroy_ktable(kt, 0); - return (NULL); + return NULL; } kt->pfrkt_rs = rs; rs->tables++; @@ -1948,17 +2146,17 @@ pfr_create_ktable(struct pfr_table *tbl, u_int64_t tzero, int attachruleset) !rn_inithead((void **)&kt->pfrkt_ip6, offsetof(struct sockaddr_in6, sin6_addr) * 8)) { pfr_destroy_ktable(kt, 0); - return (NULL); + return NULL; } kt->pfrkt_tzero = tzero; - return (kt); + return kt; } static void pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) { - struct pfr_ktable *p, *q; + struct pfr_ktable *p, *q; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1971,7 +2169,7 @@ pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr) static void pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) { - struct pfr_kentryworkq addrq; + struct pfr_kentryworkq addrq; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); @@ -1980,12 +2178,15 @@ pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr) pfr_clean_node_mask(kt, &addrq); pfr_destroy_kentries(&addrq); } - if (kt->pfrkt_ip4 != NULL) + if (kt->pfrkt_ip4 != NULL) { _FREE((caddr_t)kt->pfrkt_ip4, M_RTABLE); - if (kt->pfrkt_ip6 != NULL) + } + if (kt->pfrkt_ip6 != NULL) { _FREE((caddr_t)kt->pfrkt_ip6, M_RTABLE); - if (kt->pfrkt_shadow != NULL) + } + if (kt->pfrkt_shadow != NULL) { pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr); + } if (kt->pfrkt_rs != NULL) { kt->pfrkt_rs->tables--; pf_remove_if_empty_ruleset(kt->pfrkt_rs); @@ -1998,9 +2199,10 @@ pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q) { int d; - if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) - return (d); - return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor)); + if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE))) { + return d; + } + return strcmp(p->pfrkt_anchor, q->pfrkt_anchor); } static struct pfr_ktable * @@ -2009,85 +2211,95 @@ pfr_lookup_table(struct pfr_table *tbl) LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); /* struct pfr_ktable start like a struct pfr_table */ - return (RB_FIND(pfr_ktablehead, &pfr_ktables, - (struct pfr_ktable *)(void *)tbl)); + return RB_FIND(pfr_ktablehead, &pfr_ktables, + (struct pfr_ktable *)(void *)tbl); } int pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af) { - struct pfr_kentry *ke = NULL; - int match; + struct pfr_kentry *ke = NULL; + int match; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) { kt = kt->pfrkt_root; - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (0); + } + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return 0; + } switch (af) { #if INET case AF_INET: pfr_sin.sin_addr.s_addr = a->addr32[0]; ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); - if (ke && KENTRY_RNF_ROOT(ke)) + if (ke && KENTRY_RNF_ROOT(ke)) { ke = NULL; + } break; #endif /* INET */ #if INET6 case AF_INET6: - bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr)); + bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); - if (ke && KENTRY_RNF_ROOT(ke)) + if (ke && KENTRY_RNF_ROOT(ke)) { ke = NULL; + } break; #endif /* INET6 */ } match = (ke && !ke->pfrke_not); - if (match) + if (match) { kt->pfrkt_match++; - else + } else { kt->pfrkt_nomatch++; - return (match); + } + return match; } void pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, u_int64_t len, int dir_out, int op_pass, int notrule) { - struct pfr_kentry *ke = NULL; + struct pfr_kentry *ke = NULL; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) { kt = kt->pfrkt_root; - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) + } + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { return; + } switch (af) { #if INET case AF_INET: pfr_sin.sin_addr.s_addr = a->addr32[0]; ke = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); - if (ke && KENTRY_RNF_ROOT(ke)) + if (ke && KENTRY_RNF_ROOT(ke)) { ke = NULL; + } break; #endif /* INET */ #if INET6 case AF_INET6: - bcopy(a, &pfr_sin6.sin6_addr, sizeof (pfr_sin6.sin6_addr)); + bcopy(a, &pfr_sin6.sin6_addr, sizeof(pfr_sin6.sin6_addr)); ke = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); - if (ke && KENTRY_RNF_ROOT(ke)) + if (ke && KENTRY_RNF_ROOT(ke)) { ke = NULL; + } break; #endif /* INET6 */ default: ; } if ((ke == NULL || ke->pfrke_not) != notrule) { - if (op_pass != PFR_OP_PASS) + if (op_pass != PFR_OP_PASS) { printf("pfr_update_stats: assertion failed.\n"); + } op_pass = PFR_OP_XPASS; } kt->pfrkt_packets[dir_out][op_pass]++; @@ -2101,29 +2313,31 @@ pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af, struct pfr_ktable * pfr_attach_table(struct pf_ruleset *rs, char *name) { - struct pfr_ktable *kt, *rt; - struct pfr_table tbl; - struct pf_anchor *ac = rs->anchor; + struct pfr_ktable *kt, *rt; + struct pfr_table tbl; + struct pf_anchor *ac = rs->anchor; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - bzero(&tbl, sizeof (tbl)); - strlcpy(tbl.pfrt_name, name, sizeof (tbl.pfrt_name)); - if (ac != NULL) - strlcpy(tbl.pfrt_anchor, ac->path, sizeof (tbl.pfrt_anchor)); + bzero(&tbl, sizeof(tbl)); + strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name)); + if (ac != NULL) { + strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor)); + } kt = pfr_lookup_table(&tbl); if (kt == NULL) { kt = pfr_create_ktable(&tbl, pf_calendar_time_second(), 1); - if (kt == NULL) - return (NULL); + if (kt == NULL) { + return NULL; + } if (ac != NULL) { - bzero(tbl.pfrt_anchor, sizeof (tbl.pfrt_anchor)); + bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor)); rt = pfr_lookup_table(&tbl); if (rt == NULL) { rt = pfr_create_ktable(&tbl, 0, 1); if (rt == NULL) { pfr_destroy_ktable(kt, 0); - return (NULL); + return NULL; } pfr_insert_ktable(rt); } @@ -2131,9 +2345,10 @@ pfr_attach_table(struct pf_ruleset *rs, char *name) } pfr_insert_ktable(kt); } - if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) - pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED); - return (kt); + if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++) { + pfr_setflags_ktable(kt, kt->pfrkt_flags | PFR_TFLAG_REFERENCED); + } + return kt; } void @@ -2141,48 +2356,55 @@ pfr_detach_table(struct pfr_ktable *kt) { LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) + if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0) { printf("pfr_detach_table: refcount = %d.\n", kt->pfrkt_refcnt[PFR_REFCNT_RULE]); - else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) - pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED); + } else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE]) { + pfr_setflags_ktable(kt, kt->pfrkt_flags & ~PFR_TFLAG_REFERENCED); + } } int pfr_pool_get(struct pfr_ktable *kt, int *pidx, struct pf_addr *counter, struct pf_addr **raddr, struct pf_addr **rmask, sa_family_t af) { - struct pfr_kentry *ke, *ke2; - struct pf_addr *addr; - union sockaddr_union mask; - int idx = -1, use_counter = 0; + struct pfr_kentry *ke, *ke2; + struct pf_addr *addr; + union sockaddr_union mask; + int idx = -1, use_counter = 0; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - if (af == AF_INET) + if (af == AF_INET) { addr = (struct pf_addr *)&pfr_sin.sin_addr; - else if (af == AF_INET6) + } else if (af == AF_INET6) { addr = (struct pf_addr *)&pfr_sin6.sin6_addr; - else - return (-1); + } else { + return -1; + } - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL) { kt = kt->pfrkt_root; - if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) - return (-1); + } + if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) { + return -1; + } - if (pidx != NULL) + if (pidx != NULL) { idx = *pidx; - if (counter != NULL && idx >= 0) + } + if (counter != NULL && idx >= 0) { use_counter = 1; - if (idx < 0) + } + if (idx < 0) { idx = 0; + } _next_block: ke = pfr_kentry_byidx(kt, idx, af); if (ke == NULL) { kt->pfrkt_nomatch++; - return (1); + return 1; } pfr_prepare_network(&pfr_mask, af, ke->pfrke_net); *raddr = SUNION2PF(&ke->pfrke_sa, af); @@ -2207,25 +2429,26 @@ _next_block: PF_ACPY(counter, addr, af); *pidx = idx; kt->pfrkt_match++; - return (0); + return 0; } for (;;) { /* we don't want to use a nested block */ - if (af == AF_INET) + if (af == AF_INET) { ke2 = (struct pfr_kentry *)rn_match(&pfr_sin, kt->pfrkt_ip4); - else if (af == AF_INET6) + } else if (af == AF_INET6) { ke2 = (struct pfr_kentry *)rn_match(&pfr_sin6, kt->pfrkt_ip6); - else - return (-1); /* never happens */ + } else { + return -1; /* never happens */ + } /* no need to check KENTRY_RNF_ROOT() here */ if (ke2 == ke) { /* lookup return the same block - perfect */ PF_ACPY(counter, addr, af); *pidx = idx; kt->pfrkt_match++; - return (0); + return 0; } /* we need to increase the counter past the nested block */ @@ -2245,11 +2468,11 @@ _next_block: static struct pfr_kentry * pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) { - struct pfr_walktree w; + struct pfr_walktree w; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - bzero(&w, sizeof (w)); + bzero(&w, sizeof(w)); w.pfrw_op = PFRW_POOL_GET; w.pfrw_cnt = idx; @@ -2258,36 +2481,38 @@ pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af) case AF_INET: (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); - return (w.pfrw_kentry); + return w.pfrw_kentry; #endif /* INET */ #if INET6 case AF_INET6: (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); - return (w.pfrw_kentry); + return w.pfrw_kentry; #endif /* INET6 */ default: - return (NULL); + return NULL; } } void pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn) { - struct pfr_walktree w; + struct pfr_walktree w; LCK_MTX_ASSERT(pf_lock, LCK_MTX_ASSERT_OWNED); - bzero(&w, sizeof (w)); + bzero(&w, sizeof(w)); w.pfrw_op = PFRW_DYNADDR_UPDATE; w.pfrw_dyn = dyn; dyn->pfid_acnt4 = 0; dyn->pfid_acnt6 = 0; - if (!dyn->pfid_af || dyn->pfid_af == AF_INET) + if (!dyn->pfid_af || dyn->pfid_af == AF_INET) { (void) kt->pfrkt_ip4->rnh_walktree(kt->pfrkt_ip4, pfr_walktree, &w); - if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) + } + if (!dyn->pfid_af || dyn->pfid_af == AF_INET6) { (void) kt->pfrkt_ip6->rnh_walktree(kt->pfrkt_ip6, pfr_walktree, &w); + } } diff --git a/bsd/net/pfkeyv2.h b/bsd/net/pfkeyv2.h index 74ec44859..d10af5141 100644 --- a/bsd/net/pfkeyv2.h +++ b/bsd/net/pfkeyv2.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $KAME: pfkeyv2.h,v 1.10 2000/03/22 07:04:20 sakane Exp $ */ @@ -68,11 +68,11 @@ #include /* -This file defines structures and symbols for the PF_KEY Version 2 -key management interface. It was written at the U.S. Naval Research -Laboratory. This file is in the public domain. The authors ask that -you leave this credit intact on any copies of this file. -*/ + * This file defines structures and symbols for the PF_KEY Version 2 + * key management interface. It was written at the U.S. Naval Research + * Laboratory. This file is in the public domain. The authors ask that + * you leave this credit intact on any copies of this file. + */ #ifndef __PFKEY_V2_H #define __PFKEY_V2_H 1 @@ -95,158 +95,158 @@ you leave this credit intact on any copies of this file. #define SADB_X_SPDUPDATE 13 #define SADB_X_SPDADD 14 -#define SADB_X_SPDDELETE 15 /* by policy index */ +#define SADB_X_SPDDELETE 15 /* by policy index */ #define SADB_X_SPDGET 16 #define SADB_X_SPDACQUIRE 17 #define SADB_X_SPDDUMP 18 #define SADB_X_SPDFLUSH 19 #define SADB_X_SPDSETIDX 20 #define SADB_X_SPDEXPIRE 21 -#define SADB_X_SPDDELETE2 22 /* by policy id */ +#define SADB_X_SPDDELETE2 22 /* by policy id */ #define SADB_GETSASTAT 23 -#define SADB_X_SPDENABLE 24 /* by policy id */ -#define SADB_X_SPDDISABLE 25 /* by policy id */ +#define SADB_X_SPDENABLE 24 /* by policy id */ +#define SADB_X_SPDDISABLE 25 /* by policy id */ #define SADB_MIGRATE 26 #define SADB_MAX 26 struct sadb_msg { - u_int8_t sadb_msg_version; - u_int8_t sadb_msg_type; - u_int8_t sadb_msg_errno; - u_int8_t sadb_msg_satype; - u_int16_t sadb_msg_len; - u_int16_t sadb_msg_reserved; - u_int32_t sadb_msg_seq; - u_int32_t sadb_msg_pid; + u_int8_t sadb_msg_version; + u_int8_t sadb_msg_type; + u_int8_t sadb_msg_errno; + u_int8_t sadb_msg_satype; + u_int16_t sadb_msg_len; + u_int16_t sadb_msg_reserved; + u_int32_t sadb_msg_seq; + u_int32_t sadb_msg_pid; }; struct sadb_ext { - u_int16_t sadb_ext_len; - u_int16_t sadb_ext_type; + u_int16_t sadb_ext_len; + u_int16_t sadb_ext_type; }; struct sadb_sa { - u_int16_t sadb_sa_len; - u_int16_t sadb_sa_exttype; - u_int32_t sadb_sa_spi; - u_int8_t sadb_sa_replay; - u_int8_t sadb_sa_state; - u_int8_t sadb_sa_auth; - u_int8_t sadb_sa_encrypt; - u_int32_t sadb_sa_flags; + u_int16_t sadb_sa_len; + u_int16_t sadb_sa_exttype; + u_int32_t sadb_sa_spi; + u_int8_t sadb_sa_replay; + u_int8_t sadb_sa_state; + u_int8_t sadb_sa_auth; + u_int8_t sadb_sa_encrypt; + u_int32_t sadb_sa_flags; }; #ifdef PRIVATE struct sadb_sa_2 { - struct sadb_sa sa; - u_int16_t sadb_sa_natt_port; + struct sadb_sa sa; + u_int16_t sadb_sa_natt_port; union { - u_int16_t sadb_reserved0; - u_int16_t sadb_sa_natt_interval; + u_int16_t sadb_reserved0; + u_int16_t sadb_sa_natt_interval; }; union { - u_int32_t sadb_reserved1; - u_int16_t sadb_sa_natt_offload_interval; + u_int32_t sadb_reserved1; + u_int16_t sadb_sa_natt_offload_interval; }; }; #endif /* PRIVATE */ struct sadb_lifetime { - u_int16_t sadb_lifetime_len; - u_int16_t sadb_lifetime_exttype; - u_int32_t sadb_lifetime_allocations; - u_int64_t sadb_lifetime_bytes; - u_int64_t sadb_lifetime_addtime; - u_int64_t sadb_lifetime_usetime; + u_int16_t sadb_lifetime_len; + u_int16_t sadb_lifetime_exttype; + u_int32_t sadb_lifetime_allocations; + u_int64_t sadb_lifetime_bytes; + u_int64_t sadb_lifetime_addtime; + u_int64_t sadb_lifetime_usetime; }; struct sadb_address { - u_int16_t sadb_address_len; - u_int16_t sadb_address_exttype; - u_int8_t sadb_address_proto; - u_int8_t sadb_address_prefixlen; - u_int16_t sadb_address_reserved; + u_int16_t sadb_address_len; + u_int16_t sadb_address_exttype; + u_int8_t sadb_address_proto; + u_int8_t sadb_address_prefixlen; + u_int16_t sadb_address_reserved; }; struct sadb_key { - u_int16_t sadb_key_len; - u_int16_t sadb_key_exttype; - u_int16_t sadb_key_bits; - u_int16_t sadb_key_reserved; + u_int16_t sadb_key_len; + u_int16_t sadb_key_exttype; + u_int16_t sadb_key_bits; + u_int16_t sadb_key_reserved; }; struct sadb_ident { - u_int16_t sadb_ident_len; - u_int16_t sadb_ident_exttype; - u_int16_t sadb_ident_type; - u_int16_t sadb_ident_reserved; - u_int64_t sadb_ident_id; + u_int16_t sadb_ident_len; + u_int16_t sadb_ident_exttype; + u_int16_t sadb_ident_type; + u_int16_t sadb_ident_reserved; + u_int64_t sadb_ident_id; }; struct sadb_sens { - u_int16_t sadb_sens_len; - u_int16_t sadb_sens_exttype; - u_int32_t sadb_sens_dpd; - u_int8_t sadb_sens_sens_level; - u_int8_t sadb_sens_sens_len; - u_int8_t sadb_sens_integ_level; - u_int8_t sadb_sens_integ_len; - u_int32_t sadb_sens_reserved; + u_int16_t sadb_sens_len; + u_int16_t sadb_sens_exttype; + u_int32_t sadb_sens_dpd; + u_int8_t sadb_sens_sens_level; + u_int8_t sadb_sens_sens_len; + u_int8_t sadb_sens_integ_level; + u_int8_t sadb_sens_integ_len; + u_int32_t sadb_sens_reserved; }; struct sadb_prop { - u_int16_t sadb_prop_len; - u_int16_t sadb_prop_exttype; - u_int8_t sadb_prop_replay; - u_int8_t sadb_prop_reserved[3]; + u_int16_t sadb_prop_len; + u_int16_t sadb_prop_exttype; + u_int8_t sadb_prop_replay; + u_int8_t sadb_prop_reserved[3]; }; struct sadb_comb { - u_int8_t sadb_comb_auth; - u_int8_t sadb_comb_encrypt; - u_int16_t sadb_comb_flags; - u_int16_t sadb_comb_auth_minbits; - u_int16_t sadb_comb_auth_maxbits; - u_int16_t sadb_comb_encrypt_minbits; - u_int16_t sadb_comb_encrypt_maxbits; - u_int32_t sadb_comb_reserved; - u_int32_t sadb_comb_soft_allocations; - u_int32_t sadb_comb_hard_allocations; - u_int64_t sadb_comb_soft_bytes; - u_int64_t sadb_comb_hard_bytes; - u_int64_t sadb_comb_soft_addtime; - u_int64_t sadb_comb_hard_addtime; - u_int64_t sadb_comb_soft_usetime; - u_int64_t sadb_comb_hard_usetime; + u_int8_t sadb_comb_auth; + u_int8_t sadb_comb_encrypt; + u_int16_t sadb_comb_flags; + u_int16_t sadb_comb_auth_minbits; + u_int16_t sadb_comb_auth_maxbits; + u_int16_t sadb_comb_encrypt_minbits; + u_int16_t sadb_comb_encrypt_maxbits; + u_int32_t sadb_comb_reserved; + u_int32_t sadb_comb_soft_allocations; + u_int32_t sadb_comb_hard_allocations; + u_int64_t sadb_comb_soft_bytes; + u_int64_t sadb_comb_hard_bytes; + u_int64_t sadb_comb_soft_addtime; + u_int64_t sadb_comb_hard_addtime; + u_int64_t sadb_comb_soft_usetime; + u_int64_t sadb_comb_hard_usetime; }; struct sadb_supported { - u_int16_t sadb_supported_len; - u_int16_t sadb_supported_exttype; - u_int32_t sadb_supported_reserved; + u_int16_t sadb_supported_len; + u_int16_t sadb_supported_exttype; + u_int32_t sadb_supported_reserved; }; struct sadb_alg { - u_int8_t sadb_alg_id; - u_int8_t sadb_alg_ivlen; - u_int16_t sadb_alg_minbits; - u_int16_t sadb_alg_maxbits; - u_int16_t sadb_alg_reserved; + u_int8_t sadb_alg_id; + u_int8_t sadb_alg_ivlen; + u_int16_t sadb_alg_minbits; + u_int16_t sadb_alg_maxbits; + u_int16_t sadb_alg_reserved; }; struct sadb_spirange { - u_int16_t sadb_spirange_len; - u_int16_t sadb_spirange_exttype; - u_int32_t sadb_spirange_min; - u_int32_t sadb_spirange_max; - u_int32_t sadb_spirange_reserved; + u_int16_t sadb_spirange_len; + u_int16_t sadb_spirange_exttype; + u_int32_t sadb_spirange_min; + u_int32_t sadb_spirange_max; + u_int32_t sadb_spirange_reserved; }; struct sadb_x_kmprivate { - u_int16_t sadb_x_kmprivate_len; - u_int16_t sadb_x_kmprivate_exttype; - u_int32_t sadb_x_kmprivate_reserved; + u_int16_t sadb_x_kmprivate_len; + u_int16_t sadb_x_kmprivate_exttype; + u_int32_t sadb_x_kmprivate_reserved; }; /* @@ -256,35 +256,35 @@ struct sadb_x_kmprivate { * Mainly it's for VPN. */ struct sadb_x_sa2 { - u_int16_t sadb_x_sa2_len; - u_int16_t sadb_x_sa2_exttype; - u_int8_t sadb_x_sa2_mode; - union { - u_int8_t sadb_x_sa2_reserved1; + u_int16_t sadb_x_sa2_len; + u_int16_t sadb_x_sa2_exttype; + u_int8_t sadb_x_sa2_mode; + union { + u_int8_t sadb_x_sa2_reserved1; #ifdef PRIVATE - u_int8_t sadb_x_sa2_alwaysexpire; + u_int8_t sadb_x_sa2_alwaysexpire; #endif - }; - union { - u_int16_t sadb_x_sa2_reserved2; + }; + union { + u_int16_t sadb_x_sa2_reserved2; #ifdef PRIVATE - u_int16_t sadb_x_sa2_flags; + u_int16_t sadb_x_sa2_flags; #endif - }; - u_int32_t sadb_x_sa2_sequence; - u_int32_t sadb_x_sa2_reqid; + }; + u_int32_t sadb_x_sa2_sequence; + u_int32_t sadb_x_sa2_reqid; }; /* XXX Policy Extension */ /* sizeof(struct sadb_x_policy) == 16 */ struct sadb_x_policy { - u_int16_t sadb_x_policy_len; - u_int16_t sadb_x_policy_exttype; - u_int16_t sadb_x_policy_type; /* See policy type of ipsec.h */ - u_int8_t sadb_x_policy_dir; /* direction, see ipsec.h */ - u_int8_t sadb_x_policy_reserved; - u_int32_t sadb_x_policy_id; - u_int32_t sadb_x_policy_reserved2; + u_int16_t sadb_x_policy_len; + u_int16_t sadb_x_policy_exttype; + u_int16_t sadb_x_policy_type; /* See policy type of ipsec.h */ + u_int8_t sadb_x_policy_dir; /* direction, see ipsec.h */ + u_int8_t sadb_x_policy_reserved; + u_int32_t sadb_x_policy_id; + u_int32_t sadb_x_policy_reserved2; }; /* * When policy_type == IPSEC, it is followed by some of @@ -313,45 +313,45 @@ struct sadb_x_ipsecif { * This structure is aligned 8 bytes. */ struct sadb_x_ipsecrequest { - u_int16_t sadb_x_ipsecrequest_len; /* structure length aligned to 8 bytes. - * This value is true length of bytes. - * Not in units of 64 bits. */ - u_int16_t sadb_x_ipsecrequest_proto; /* See ipsec.h */ - u_int8_t sadb_x_ipsecrequest_mode; /* See IPSEC_MODE_XX in ipsec.h. */ - u_int8_t sadb_x_ipsecrequest_level; /* See IPSEC_LEVEL_XX in ipsec.h */ - u_int16_t sadb_x_ipsecrequest_reqid; /* See ipsec.h */ - - /* - * followed by source IP address of SA, and immediately followed by - * destination IP address of SA. These encoded into two of sockaddr - * structure without any padding. Must set each sa_len exactly. - * Each of length of the sockaddr structure are not aligned to 64bits, - * but sum of x_request and addresses is aligned to 64bits. - */ + u_int16_t sadb_x_ipsecrequest_len; /* structure length aligned to 8 bytes. + * This value is true length of bytes. + * Not in units of 64 bits. */ + u_int16_t sadb_x_ipsecrequest_proto; /* See ipsec.h */ + u_int8_t sadb_x_ipsecrequest_mode; /* See IPSEC_MODE_XX in ipsec.h. */ + u_int8_t sadb_x_ipsecrequest_level; /* See IPSEC_LEVEL_XX in ipsec.h */ + u_int16_t sadb_x_ipsecrequest_reqid; /* See ipsec.h */ + + /* + * followed by source IP address of SA, and immediately followed by + * destination IP address of SA. These encoded into two of sockaddr + * structure without any padding. Must set each sa_len exactly. + * Each of length of the sockaddr structure are not aligned to 64bits, + * but sum of x_request and addresses is aligned to 64bits. + */ }; struct sadb_session_id { - u_int16_t sadb_session_id_len; - u_int16_t sadb_session_id_exttype; - /* [0] is an arbitrary handle that means something only for requester - * [1] is a global session id for lookups in the kernel and racoon. - */ - u_int64_t sadb_session_id_v[2]; + u_int16_t sadb_session_id_len; + u_int16_t sadb_session_id_exttype; + /* [0] is an arbitrary handle that means something only for requester + * [1] is a global session id for lookups in the kernel and racoon. + */ + u_int64_t sadb_session_id_v[2]; } __attribute__ ((aligned(8))); struct sastat { - u_int32_t spi; /* SPI Value, network byte order */ - u_int32_t created; /* for lifetime */ - struct sadb_lifetime lft_c; /* CURRENT lifetime. */ + u_int32_t spi; /* SPI Value, network byte order */ + u_int32_t created; /* for lifetime */ + struct sadb_lifetime lft_c; /* CURRENT lifetime. */ }; // no need to align struct sadb_sastat { - u_int16_t sadb_sastat_len; - u_int16_t sadb_sastat_exttype; - u_int32_t sadb_sastat_dir; - u_int32_t sadb_sastat_reserved; - u_int32_t sadb_sastat_list_len; - /* list of struct sastat comes after */ + u_int16_t sadb_sastat_len; + u_int16_t sadb_sastat_exttype; + u_int32_t sadb_sastat_dir; + u_int32_t sadb_sastat_reserved; + u_int32_t sadb_sastat_list_len; + /* list of struct sastat comes after */ } __attribute__ ((aligned(8))); #define SADB_EXT_RESERVED 0 @@ -386,16 +386,16 @@ struct sadb_sastat { #define SADB_X_EXT_MIGRATE_IPSECIF 29 #define SADB_EXT_MAX 29 -#define SADB_SATYPE_UNSPEC 0 -#define SADB_SATYPE_AH 2 -#define SADB_SATYPE_ESP 3 -#define SADB_SATYPE_RSVP 5 -#define SADB_SATYPE_OSPFV2 6 -#define SADB_SATYPE_RIPV2 7 -#define SADB_SATYPE_MIP 8 -#define SADB_X_SATYPE_IPCOMP 9 -#define SADB_X_SATYPE_POLICY 10 -#define SADB_SATYPE_MAX 11 +#define SADB_SATYPE_UNSPEC 0 +#define SADB_SATYPE_AH 2 +#define SADB_SATYPE_ESP 3 +#define SADB_SATYPE_RSVP 5 +#define SADB_SATYPE_OSPFV2 6 +#define SADB_SATYPE_RIPV2 7 +#define SADB_SATYPE_MIP 8 +#define SADB_X_SATYPE_IPCOMP 9 +#define SADB_X_SATYPE_POLICY 10 +#define SADB_SATYPE_MAX 11 #define SADB_SASTATE_LARVAL 0 #define SADB_SASTATE_MATURE 1 @@ -406,41 +406,41 @@ struct sadb_sastat { #define SADB_SAFLAGS_PFS 1 /* RFC2367 numbers - meets RFC2407 */ -#define SADB_AALG_NONE 0 -#define SADB_AALG_MD5HMAC 1 /*2*/ -#define SADB_AALG_SHA1HMAC 2 /*3*/ -#define SADB_AALG_MAX 8 +#define SADB_AALG_NONE 0 +#define SADB_AALG_MD5HMAC 1 /*2*/ +#define SADB_AALG_SHA1HMAC 2 /*3*/ +#define SADB_AALG_MAX 8 /* private allocations - based on RFC2407/IANA assignment */ -#define SADB_X_AALG_SHA2_256 6 /*5*/ -#define SADB_X_AALG_SHA2_384 7 /*6*/ -#define SADB_X_AALG_SHA2_512 8 /*7*/ +#define SADB_X_AALG_SHA2_256 6 /*5*/ +#define SADB_X_AALG_SHA2_384 7 /*6*/ +#define SADB_X_AALG_SHA2_512 8 /*7*/ /* private allocations should use 249-255 (RFC2407) */ -#define SADB_X_AALG_MD5 3 /*249*/ /* Keyed MD5 */ -#define SADB_X_AALG_SHA 4 /*250*/ /* Keyed SHA */ -#define SADB_X_AALG_NULL 5 /*251*/ /* null authentication */ +#define SADB_X_AALG_MD5 3 /*249*/ /* Keyed MD5 */ +#define SADB_X_AALG_SHA 4 /*250*/ /* Keyed SHA */ +#define SADB_X_AALG_NULL 5 /*251*/ /* null authentication */ /* RFC2367 numbers - meets RFC2407 */ -#define SADB_EALG_NONE 0 -#define SADB_EALG_DESCBC 1 /*2*/ -#define SADB_EALG_3DESCBC 2 /*3*/ -#define SADB_EALG_NULL 3 /*11*/ -#define SADB_EALG_MAX 12 +#define SADB_EALG_NONE 0 +#define SADB_EALG_DESCBC 1 /*2*/ +#define SADB_EALG_3DESCBC 2 /*3*/ +#define SADB_EALG_NULL 3 /*11*/ +#define SADB_EALG_MAX 12 /* private allocations - based on RFC2407/IANA assignment */ -#define SADB_X_EALG_CAST128CBC 5 /*6*/ -#define SADB_X_EALG_BLOWFISHCBC 4 /*7*/ -#define SADB_X_EALG_RIJNDAELCBC 12 +#define SADB_X_EALG_CAST128CBC 5 /*6*/ +#define SADB_X_EALG_BLOWFISHCBC 4 /*7*/ +#define SADB_X_EALG_RIJNDAELCBC 12 #define SADB_X_EALG_AESCBC 12 -#define SADB_X_EALG_AES 12 +#define SADB_X_EALG_AES 12 #define SADB_X_EALG_AES_GCM 13 #define SADB_X_EALG_CHACHA20POLY1305 14 /* private allocations should use 249-255 (RFC2407) */ -#if 1 /*nonstandard */ -#define SADB_X_CALG_NONE 0 -#define SADB_X_CALG_OUI 1 -#define SADB_X_CALG_DEFLATE 2 -#define SADB_X_CALG_LZS 3 -#define SADB_X_CALG_MAX 4 +#if 1 /*nonstandard */ +#define SADB_X_CALG_NONE 0 +#define SADB_X_CALG_OUI 1 +#define SADB_X_CALG_DEFLATE 2 +#define SADB_X_CALG_LZS 3 +#define SADB_X_CALG_MAX 4 #endif #define SADB_IDENTTYPE_RESERVED 0 @@ -451,64 +451,64 @@ struct sadb_sastat { #define SADB_IDENTTYPE_MAX 4 /* `flags' in sadb_sa structure holds followings */ -#define SADB_X_EXT_NONE 0x0000 /* i.e. new format. */ -#define SADB_X_EXT_OLD 0x0001 /* old format. */ +#define SADB_X_EXT_NONE 0x0000 /* i.e. new format. */ +#define SADB_X_EXT_OLD 0x0001 /* old format. */ #ifdef PRIVATE -#define SADB_X_EXT_NATT 0x0002 /* Use UDP encapsulation to traverse NAT */ -#define SADB_X_EXT_NATT_KEEPALIVE 0x0004 /* Local node is behind NAT, send keepalives */ - /* Should only be set for outbound SAs */ -#define SADB_X_EXT_NATT_MULTIPLEUSERS 0x0008 /* For use on VPN server - support multiple users */ +#define SADB_X_EXT_NATT 0x0002 /* Use UDP encapsulation to traverse NAT */ +#define SADB_X_EXT_NATT_KEEPALIVE 0x0004 /* Local node is behind NAT, send keepalives */ +/* Should only be set for outbound SAs */ +#define SADB_X_EXT_NATT_MULTIPLEUSERS 0x0008 /* For use on VPN server - support multiple users */ -#endif /* PRIVATE */ +#endif /* PRIVATE */ -#define SADB_X_EXT_IV4B 0x0010 /* IV length of 4 bytes in use */ -#define SADB_X_EXT_DERIV 0x0020 /* DES derived */ -#define SADB_X_EXT_CYCSEQ 0x0040 /* allowing to cyclic sequence. */ +#define SADB_X_EXT_IV4B 0x0010 /* IV length of 4 bytes in use */ +#define SADB_X_EXT_DERIV 0x0020 /* DES derived */ +#define SADB_X_EXT_CYCSEQ 0x0040 /* allowing to cyclic sequence. */ - /* three of followings are exclusive flags each them */ -#define SADB_X_EXT_PSEQ 0x0000 /* sequencial padding for ESP */ -#define SADB_X_EXT_PRAND 0x0100 /* random padding for ESP */ -#define SADB_X_EXT_PZERO 0x0200 /* zero padding for ESP */ -#define SADB_X_EXT_PMASK 0x0300 /* mask for padding flag */ +/* three of followings are exclusive flags each them */ +#define SADB_X_EXT_PSEQ 0x0000 /* sequencial padding for ESP */ +#define SADB_X_EXT_PRAND 0x0100 /* random padding for ESP */ +#define SADB_X_EXT_PZERO 0x0200 /* zero padding for ESP */ +#define SADB_X_EXT_PMASK 0x0300 /* mask for padding flag */ -#define SADB_X_EXT_IIV 0x0400 /* Implicit IV */ +#define SADB_X_EXT_IIV 0x0400 /* Implicit IV */ #ifdef PRIVATE #define SADB_X_EXT_NATT_DETECTED_PEER 0x1000 #define SADB_X_EXT_ESP_KEEPALIVE 0x2000 #define SADB_X_EXT_PUNT_RX_KEEPALIVE 0x4000 #define SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD 0x8000 -#endif /* PRIVATE */ +#endif /* PRIVATE */ #ifdef PRIVATE -#define NATT_KEEPALIVE_OFFLOAD_INTERVAL 0x1 +#define NATT_KEEPALIVE_OFFLOAD_INTERVAL 0x1 #endif #if 1 -#define SADB_X_EXT_RAWCPI 0x0080 /* use well known CPI (IPComp) */ +#define SADB_X_EXT_RAWCPI 0x0080 /* use well known CPI (IPComp) */ #endif -#define SADB_KEY_FLAGS_MAX 0x7fff +#define SADB_KEY_FLAGS_MAX 0x7fff #ifdef PRIVATE #define SADB_X_EXT_SA2_DELETE_ON_DETACH 0x0001 #endif /* SPI size for PF_KEYv2 */ -#define PFKEY_SPI_SIZE sizeof(u_int32_t) +#define PFKEY_SPI_SIZE sizeof(u_int32_t) /* Identifier for menber of lifetime structure */ -#define SADB_X_LIFETIME_ALLOCATIONS 0 -#define SADB_X_LIFETIME_BYTES 1 -#define SADB_X_LIFETIME_ADDTIME 2 -#define SADB_X_LIFETIME_USETIME 3 +#define SADB_X_LIFETIME_ALLOCATIONS 0 +#define SADB_X_LIFETIME_BYTES 1 +#define SADB_X_LIFETIME_ADDTIME 2 +#define SADB_X_LIFETIME_USETIME 3 /* The rate for SOFT lifetime against HARD one. */ -#define PFKEY_SOFT_LIFETIME_RATE 80 +#define PFKEY_SOFT_LIFETIME_RATE 80 /* Utilities */ #define PFKEY_ALIGN8(a) (1 + (((a) - 1) | (8 - 1))) -#define PFKEY_EXTLEN(msg) \ +#define PFKEY_EXTLEN(msg) \ PFKEY_UNUNIT64(((struct sadb_ext *)(msg))->sadb_ext_len) #define PFKEY_ADDR_PREFIX(ext) \ (((struct sadb_address *)(ext))->sadb_address_prefixlen) @@ -518,8 +518,8 @@ struct sadb_sastat { ((struct sockaddr *)((caddr_t)(ext) + sizeof(struct sadb_address))) /* in 64bits */ -#define PFKEY_UNUNIT64(a) ((a) << 3) -#define PFKEY_UNIT64(a) ((a) >> 3) +#define PFKEY_UNUNIT64(a) ((a) << 3) +#define PFKEY_UNIT64(a) ((a) >> 3) #endif /* __PFKEY_V2_H */ diff --git a/bsd/net/pfvar.h b/bsd/net/pfvar.h index 8b6f61ced..2eed3f3c3 100644 --- a/bsd/net/pfvar.h +++ b/bsd/net/pfvar.h @@ -95,27 +95,27 @@ extern "C" { #if BYTE_ORDER == BIG_ENDIAN -#define htobe64(x) (x) +#define htobe64(x) (x) #else /* LITTLE ENDIAN */ -#define htobe64(x) __DARWIN_OSSwapInt64(x) +#define htobe64(x) __DARWIN_OSSwapInt64(x) #endif /* LITTLE_ENDIAN */ -#define be64toh(x) htobe64(x) +#define be64toh(x) htobe64(x) extern lck_rw_t *pf_perim_lock; extern lck_mtx_t *pf_lock; struct pool { - struct zone *pool_zone; /* pointer to backend zone */ - const char *pool_name; /* name of pool */ - unsigned int pool_count; /* # of outstanding elements */ - unsigned int pool_hiwat; /* high watermark */ - unsigned int pool_limit; /* hard limit */ - unsigned int pool_fails; /* # of failed allocs due to limit */ + struct zone *pool_zone; /* pointer to backend zone */ + const char *pool_name; /* name of pool */ + unsigned int pool_count; /* # of outstanding elements */ + unsigned int pool_hiwat; /* high watermark */ + unsigned int pool_limit; /* hard limit */ + unsigned int pool_fails; /* # of failed allocs due to limit */ }; -#define PR_NOWAIT FALSE -#define PR_WAITOK TRUE +#define PR_NOWAIT FALSE +#define PR_WAITOK TRUE __private_extern__ void pool_init(struct pool *, size_t, unsigned int, unsigned int, int, const char *, void *); @@ -130,15 +130,15 @@ __private_extern__ u_int64_t pf_calendar_time_second(void); #endif /* KERNEL */ union sockaddr_union { - struct sockaddr sa; - struct sockaddr_in sin; - struct sockaddr_in6 sin6; + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; }; -#define PF_TCPS_PROXY_SRC ((TCP_NSTATES)+0) -#define PF_TCPS_PROXY_DST ((TCP_NSTATES)+1) +#define PF_TCPS_PROXY_SRC ((TCP_NSTATES)+0) +#define PF_TCPS_PROXY_DST ((TCP_NSTATES)+1) -#define PF_MD5_DIGEST_LENGTH 16 +#define PF_MD5_DIGEST_LENGTH 16 #ifdef MD5_DIGEST_LENGTH #if PF_MD5_DIGEST_LENGTH != MD5_DIGEST_LENGTH #error @@ -153,28 +153,28 @@ struct pf_grev1_hdr; struct pf_esp_hdr; #endif /* KERNEL */ -#define PF_GRE_PPTP_VARIANT 0x01 +#define PF_GRE_PPTP_VARIANT 0x01 -enum { PF_INOUT, PF_IN, PF_OUT }; -enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT, +enum { PF_INOUT, PF_IN, PF_OUT }; +enum { PF_PASS, PF_DROP, PF_SCRUB, PF_NOSCRUB, PF_NAT, PF_NONAT, PF_BINAT, PF_NOBINAT, PF_RDR, PF_NORDR, PF_SYNPROXY_DROP, PF_DUMMYNET, PF_NODUMMYNET, PF_NAT64, PF_NONAT64 }; -enum { PF_RULESET_SCRUB, PF_RULESET_FILTER, PF_RULESET_NAT, - PF_RULESET_BINAT, PF_RULESET_RDR, PF_RULESET_DUMMYNET, +enum { PF_RULESET_SCRUB, PF_RULESET_FILTER, PF_RULESET_NAT, + PF_RULESET_BINAT, PF_RULESET_RDR, PF_RULESET_DUMMYNET, PF_RULESET_MAX }; -enum { PF_OP_NONE, PF_OP_IRG, PF_OP_EQ, PF_OP_NE, PF_OP_LT, +enum { PF_OP_NONE, PF_OP_IRG, PF_OP_EQ, PF_OP_NE, PF_OP_LT, PF_OP_LE, PF_OP_GT, PF_OP_GE, PF_OP_XRG, PF_OP_RRG }; -enum { PF_DEBUG_NONE, PF_DEBUG_URGENT, PF_DEBUG_MISC, PF_DEBUG_NOISY }; -enum { PF_CHANGE_NONE, PF_CHANGE_ADD_HEAD, PF_CHANGE_ADD_TAIL, +enum { PF_DEBUG_NONE, PF_DEBUG_URGENT, PF_DEBUG_MISC, PF_DEBUG_NOISY }; +enum { PF_CHANGE_NONE, PF_CHANGE_ADD_HEAD, PF_CHANGE_ADD_TAIL, PF_CHANGE_ADD_BEFORE, PF_CHANGE_ADD_AFTER, PF_CHANGE_REMOVE, PF_CHANGE_GET_TICKET }; -enum { PF_GET_NONE, PF_GET_CLR_CNTR }; +enum { PF_GET_NONE, PF_GET_CLR_CNTR }; /* * Note about PFTM_*: real indices into pf_rule.timeout[] come before * PFTM_MAX, special cases afterwards. See pf_state_expires(). */ -enum { PFTM_TCP_FIRST_PACKET, PFTM_TCP_OPENING, PFTM_TCP_ESTABLISHED, +enum { PFTM_TCP_FIRST_PACKET, PFTM_TCP_OPENING, PFTM_TCP_ESTABLISHED, PFTM_TCP_CLOSING, PFTM_TCP_FIN_WAIT, PFTM_TCP_CLOSED, PFTM_UDP_FIRST_PACKET, PFTM_UDP_SINGLE, PFTM_UDP_MULTIPLE, PFTM_ICMP_FIRST_PACKET, PFTM_ICMP_ERROR_REPLY, @@ -186,73 +186,73 @@ enum { PFTM_TCP_FIRST_PACKET, PFTM_TCP_OPENING, PFTM_TCP_ESTABLISHED, PFTM_TS_DIFF, PFTM_MAX, PFTM_PURGE, PFTM_UNLINKED }; /* PFTM default values */ -#define PFTM_TCP_FIRST_PACKET_VAL 120 /* First TCP packet */ -#define PFTM_TCP_OPENING_VAL 30 /* No response yet */ -#define PFTM_TCP_ESTABLISHED_VAL (24 * 60 * 60) /* Established */ -#define PFTM_TCP_CLOSING_VAL (15 * 60) /* Half closed */ -#define PFTM_TCP_FIN_WAIT_VAL 45 /* Got both FINs */ -#define PFTM_TCP_CLOSED_VAL 90 /* Got a RST */ -#define PFTM_UDP_FIRST_PACKET_VAL 60 /* First UDP packet */ -#define PFTM_UDP_SINGLE_VAL 30 /* Unidirectional */ -#define PFTM_UDP_MULTIPLE_VAL 60 /* Bidirectional */ -#define PFTM_ICMP_FIRST_PACKET_VAL 20 /* First ICMP packet */ -#define PFTM_ICMP_ERROR_REPLY_VAL 10 /* Got error response */ -#define PFTM_GREv1_FIRST_PACKET_VAL 120 -#define PFTM_GREv1_INITIATING_VAL 30 -#define PFTM_GREv1_ESTABLISHED_VAL 1800 -#define PFTM_ESP_FIRST_PACKET_VAL 120 -#define PFTM_ESP_INITIATING_VAL 30 -#define PFTM_ESP_ESTABLISHED_VAL 900 -#define PFTM_OTHER_FIRST_PACKET_VAL 60 /* First packet */ -#define PFTM_OTHER_SINGLE_VAL 30 /* Unidirectional */ -#define PFTM_OTHER_MULTIPLE_VAL 60 /* Bidirectional */ -#define PFTM_FRAG_VAL 30 /* Fragment expire */ -#define PFTM_INTERVAL_VAL 10 /* Expire interval */ -#define PFTM_SRC_NODE_VAL 0 /* Source tracking */ -#define PFTM_TS_DIFF_VAL 30 /* Allowed TS diff */ - -enum { PF_NOPFROUTE, PF_FASTROUTE, PF_ROUTETO, PF_DUPTO, PF_REPLYTO }; -enum { PF_LIMIT_STATES, +#define PFTM_TCP_FIRST_PACKET_VAL 120 /* First TCP packet */ +#define PFTM_TCP_OPENING_VAL 30 /* No response yet */ +#define PFTM_TCP_ESTABLISHED_VAL (24 * 60 * 60) /* Established */ +#define PFTM_TCP_CLOSING_VAL (15 * 60) /* Half closed */ +#define PFTM_TCP_FIN_WAIT_VAL 45 /* Got both FINs */ +#define PFTM_TCP_CLOSED_VAL 90 /* Got a RST */ +#define PFTM_UDP_FIRST_PACKET_VAL 60 /* First UDP packet */ +#define PFTM_UDP_SINGLE_VAL 30 /* Unidirectional */ +#define PFTM_UDP_MULTIPLE_VAL 60 /* Bidirectional */ +#define PFTM_ICMP_FIRST_PACKET_VAL 20 /* First ICMP packet */ +#define PFTM_ICMP_ERROR_REPLY_VAL 10 /* Got error response */ +#define PFTM_GREv1_FIRST_PACKET_VAL 120 +#define PFTM_GREv1_INITIATING_VAL 30 +#define PFTM_GREv1_ESTABLISHED_VAL 1800 +#define PFTM_ESP_FIRST_PACKET_VAL 120 +#define PFTM_ESP_INITIATING_VAL 30 +#define PFTM_ESP_ESTABLISHED_VAL 900 +#define PFTM_OTHER_FIRST_PACKET_VAL 60 /* First packet */ +#define PFTM_OTHER_SINGLE_VAL 30 /* Unidirectional */ +#define PFTM_OTHER_MULTIPLE_VAL 60 /* Bidirectional */ +#define PFTM_FRAG_VAL 30 /* Fragment expire */ +#define PFTM_INTERVAL_VAL 10 /* Expire interval */ +#define PFTM_SRC_NODE_VAL 0 /* Source tracking */ +#define PFTM_TS_DIFF_VAL 30 /* Allowed TS diff */ + +enum { PF_NOPFROUTE, PF_FASTROUTE, PF_ROUTETO, PF_DUPTO, PF_REPLYTO }; +enum { PF_LIMIT_STATES, PF_LIMIT_APP_STATES, PF_LIMIT_SRC_NODES, PF_LIMIT_FRAGS, PF_LIMIT_TABLES, PF_LIMIT_TABLE_ENTRIES, PF_LIMIT_MAX }; -#define PF_POOL_IDMASK 0x0f -enum { PF_POOL_NONE, PF_POOL_BITMASK, PF_POOL_RANDOM, +#define PF_POOL_IDMASK 0x0f +enum { PF_POOL_NONE, PF_POOL_BITMASK, PF_POOL_RANDOM, PF_POOL_SRCHASH, PF_POOL_ROUNDROBIN }; -enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL, +enum { PF_ADDR_ADDRMASK, PF_ADDR_NOROUTE, PF_ADDR_DYNIFTL, PF_ADDR_TABLE, PF_ADDR_RTLABEL, PF_ADDR_URPFFAILED, PF_ADDR_RANGE }; -#define PF_POOL_TYPEMASK 0x0f -#define PF_POOL_STICKYADDR 0x20 -#define PF_WSCALE_FLAG 0x80 -#define PF_WSCALE_MASK 0x0f +#define PF_POOL_TYPEMASK 0x0f +#define PF_POOL_STICKYADDR 0x20 +#define PF_WSCALE_FLAG 0x80 +#define PF_WSCALE_MASK 0x0f -#define PF_LOG 0x01 -#define PF_LOG_ALL 0x02 -#define PF_LOG_SOCKET_LOOKUP 0x04 +#define PF_LOG 0x01 +#define PF_LOG_ALL 0x02 +#define PF_LOG_SOCKET_LOOKUP 0x04 struct pf_addr { union { - struct in_addr _v4addr; - struct in6_addr _v6addr; - u_int8_t _addr8[16]; - u_int16_t _addr16[8]; - u_int32_t _addr32[4]; - } pfa; /* 128-bit address */ -#define v4addr pfa._v4addr -#define v6addr pfa._v6addr -#define addr8 pfa._addr8 -#define addr16 pfa._addr16 -#define addr32 pfa._addr32 -}; - -#define PF_TABLE_NAME_SIZE 32 - -#define PFI_AFLAG_NETWORK 0x01 -#define PFI_AFLAG_BROADCAST 0x02 -#define PFI_AFLAG_PEER 0x04 -#define PFI_AFLAG_MODEMASK 0x07 -#define PFI_AFLAG_NOALIAS 0x08 + struct in_addr _v4addr; + struct in6_addr _v6addr; + u_int8_t _addr8[16]; + u_int16_t _addr16[8]; + u_int32_t _addr32[4]; + } pfa; /* 128-bit address */ +#define v4addr pfa._v4addr +#define v6addr pfa._v6addr +#define addr8 pfa._addr8 +#define addr16 pfa._addr16 +#define addr32 pfa._addr32 +}; + +#define PF_TABLE_NAME_SIZE 32 + +#define PFI_AFLAG_NETWORK 0x01 +#define PFI_AFLAG_BROADCAST 0x02 +#define PFI_AFLAG_PEER 0x04 +#define PFI_AFLAG_MODEMASK 0x07 +#define PFI_AFLAG_NOALIAS 0x08 #ifndef RTLABEL_LEN #define RTLABEL_LEN 32 @@ -261,55 +261,55 @@ struct pf_addr { struct pf_addr_wrap { union { struct { - struct pf_addr addr; - struct pf_addr mask; - } a; - char ifname[IFNAMSIZ]; - char tblname[PF_TABLE_NAME_SIZE]; - char rtlabelname[RTLABEL_LEN]; - u_int32_t rtlabel; - } v; + struct pf_addr addr; + struct pf_addr mask; + } a; + char ifname[IFNAMSIZ]; + char tblname[PF_TABLE_NAME_SIZE]; + char rtlabelname[RTLABEL_LEN]; + u_int32_t rtlabel; + } v; union { #ifdef KERNEL - struct pfi_dynaddr *dyn __attribute__((aligned(8))); - struct pfr_ktable *tbl __attribute__((aligned(8))); + struct pfi_dynaddr *dyn __attribute__((aligned(8))); + struct pfr_ktable *tbl __attribute__((aligned(8))); #else /* !KERNEL */ - void *dyn __attribute__((aligned(8))); - void *tbl __attribute__((aligned(8))); + void *dyn __attribute__((aligned(8))); + void *tbl __attribute__((aligned(8))); #endif /* !KERNEL */ - int dyncnt __attribute__((aligned(8))); - int tblcnt __attribute__((aligned(8))); - } p __attribute__((aligned(8))); - u_int8_t type; /* PF_ADDR_* */ - u_int8_t iflags; /* PFI_AFLAG_* */ + int dyncnt __attribute__((aligned(8))); + int tblcnt __attribute__((aligned(8))); + } p __attribute__((aligned(8))); + u_int8_t type; /* PF_ADDR_* */ + u_int8_t iflags; /* PFI_AFLAG_* */ }; struct pf_port_range { - u_int16_t port[2]; - u_int8_t op; + u_int16_t port[2]; + u_int8_t op; }; union pf_rule_xport { - struct pf_port_range range; - u_int16_t call_id; - u_int32_t spi; + struct pf_port_range range; + u_int16_t call_id; + u_int32_t spi; }; #ifdef KERNEL struct pfi_dynaddr { - TAILQ_ENTRY(pfi_dynaddr) entry; - struct pf_addr pfid_addr4; - struct pf_addr pfid_mask4; - struct pf_addr pfid_addr6; - struct pf_addr pfid_mask6; - struct pfr_ktable *pfid_kt; - struct pfi_kif *pfid_kif; - void *pfid_hook_cookie; - int pfid_net; /* mask or 128 */ - int pfid_acnt4; /* address count IPv4 */ - int pfid_acnt6; /* address count IPv6 */ - sa_family_t pfid_af; /* rule af */ - u_int8_t pfid_iflags; /* PFI_AFLAG_* */ + TAILQ_ENTRY(pfi_dynaddr) entry; + struct pf_addr pfid_addr4; + struct pf_addr pfid_mask4; + struct pf_addr pfid_addr6; + struct pf_addr pfid_mask6; + struct pfr_ktable *pfid_kt; + struct pfi_kif *pfid_kif; + void *pfid_hook_cookie; + int pfid_net; /* mask or 128 */ + int pfid_acnt4; /* address count IPv4 */ + int pfid_acnt6; /* address count IPv6 */ + sa_family_t pfid_af; /* rule af */ + u_int8_t pfid_iflags; /* PFI_AFLAG_* */ }; /* @@ -448,13 +448,13 @@ struct pfi_dynaddr { #define PF_AINC(a, f) \ do { \ - (a)->addr32[0] = htonl(ntohl((a)->addr32[0]) + 1); \ + (a)->addr32[0] = htonl(ntohl((a)->addr32[0]) + 1); \ } while (0) #define PF_POOLMASK(a, b, c, d, f) \ do { \ - (a)->addr32[0] = ((b)->addr32[0] & (c)->addr32[0]) | \ - (((c)->addr32[0] ^ 0xffffffff) & (d)->addr32[0]); \ + (a)->addr32[0] = ((b)->addr32[0] & (c)->addr32[0]) | \ + (((c)->addr32[0] ^ 0xffffffff) & (d)->addr32[0]); \ } while (0) #endif /* PF_INET_ONLY */ @@ -462,58 +462,58 @@ struct pfi_dynaddr { #endif /* PF_INET_INET6 */ #ifdef KERNEL -#define PF_MISMATCHAW(aw, x, af, neg, ifp) \ - ( \ - (((aw)->type == PF_ADDR_NOROUTE && \ - pf_routable((x), (af), NULL)) || \ - (((aw)->type == PF_ADDR_URPFFAILED && (ifp) != NULL && \ - pf_routable((x), (af), (ifp))) || \ - ((aw)->type == PF_ADDR_RTLABEL && \ - !pf_rtlabel_match((x), (af), (aw))) || \ - ((aw)->type == PF_ADDR_TABLE && \ - !pfr_match_addr((aw)->p.tbl, (x), (af))) || \ - ((aw)->type == PF_ADDR_DYNIFTL && \ - !pfi_match_addr((aw)->p.dyn, (x), (af))) || \ - ((aw)->type == PF_ADDR_RANGE && \ - !pf_match_addr_range(&(aw)->v.a.addr, \ - &(aw)->v.a.mask, (x), (af))) || \ - ((aw)->type == PF_ADDR_ADDRMASK && \ - !PF_AZERO(&(aw)->v.a.mask, (af)) && \ - !PF_MATCHA(0, &(aw)->v.a.addr, \ - &(aw)->v.a.mask, (x), (af))))) != \ - (neg) \ +#define PF_MISMATCHAW(aw, x, af, neg, ifp) \ + ( \ + (((aw)->type == PF_ADDR_NOROUTE && \ + pf_routable((x), (af), NULL)) || \ + (((aw)->type == PF_ADDR_URPFFAILED && (ifp) != NULL && \ + pf_routable((x), (af), (ifp))) || \ + ((aw)->type == PF_ADDR_RTLABEL && \ + !pf_rtlabel_match((x), (af), (aw))) || \ + ((aw)->type == PF_ADDR_TABLE && \ + !pfr_match_addr((aw)->p.tbl, (x), (af))) || \ + ((aw)->type == PF_ADDR_DYNIFTL && \ + !pfi_match_addr((aw)->p.dyn, (x), (af))) || \ + ((aw)->type == PF_ADDR_RANGE && \ + !pf_match_addr_range(&(aw)->v.a.addr, \ + &(aw)->v.a.mask, (x), (af))) || \ + ((aw)->type == PF_ADDR_ADDRMASK && \ + !PF_AZERO(&(aw)->v.a.mask, (af)) && \ + !PF_MATCHA(0, &(aw)->v.a.addr, \ + &(aw)->v.a.mask, (x), (af))))) != \ + (neg) \ ) #endif /* KERNEL */ struct pf_rule_uid { - uid_t uid[2]; - u_int8_t op; - u_int8_t _pad[3]; + uid_t uid[2]; + u_int8_t op; + u_int8_t _pad[3]; }; struct pf_rule_gid { - uid_t gid[2]; - u_int8_t op; - u_int8_t _pad[3]; + uid_t gid[2]; + u_int8_t op; + u_int8_t _pad[3]; }; struct pf_rule_addr { - struct pf_addr_wrap addr; - union pf_rule_xport xport; - u_int8_t neg; + struct pf_addr_wrap addr; + union pf_rule_xport xport; + u_int8_t neg; }; struct pf_pooladdr { - struct pf_addr_wrap addr; - TAILQ_ENTRY(pf_pooladdr) entries; + struct pf_addr_wrap addr; + TAILQ_ENTRY(pf_pooladdr) entries; #if !defined(__LP64__) - u_int32_t _pad[2]; + u_int32_t _pad[2]; #endif /* !__LP64__ */ - char ifname[IFNAMSIZ]; + char ifname[IFNAMSIZ]; #ifdef KERNEL - struct pfi_kif *kif __attribute__((aligned(8))); + struct pfi_kif *kif __attribute__((aligned(8))); #else /* !KERNEL */ - void *kif __attribute__((aligned(8))); + void *kif __attribute__((aligned(8))); #endif /* !KERNEL */ }; @@ -521,55 +521,55 @@ TAILQ_HEAD(pf_palist, pf_pooladdr); struct pf_poolhashkey { union { - u_int8_t key8[16]; - u_int16_t key16[8]; - u_int32_t key32[4]; - } pfk; /* 128-bit hash key */ -#define key8 pfk.key8 -#define key16 pfk.key16 -#define key32 pfk.key32 + u_int8_t key8[16]; + u_int16_t key16[8]; + u_int32_t key32[4]; + } pfk; /* 128-bit hash key */ +#define key8 pfk.key8 +#define key16 pfk.key16 +#define key32 pfk.key32 }; struct pf_pool { - struct pf_palist list; + struct pf_palist list; #if !defined(__LP64__) - u_int32_t _pad[2]; + u_int32_t _pad[2]; #endif /* !__LP64__ */ #ifdef KERNEL - struct pf_pooladdr *cur __attribute__((aligned(8))); + struct pf_pooladdr *cur __attribute__((aligned(8))); #else /* !KERNEL */ - void *cur __attribute__((aligned(8))); + void *cur __attribute__((aligned(8))); #endif /* !KERNEL */ - struct pf_poolhashkey key __attribute__((aligned(8))); - struct pf_addr counter; - int tblidx; - u_int16_t proxy_port[2]; - u_int8_t port_op; - u_int8_t opts; - sa_family_t af; + struct pf_poolhashkey key __attribute__((aligned(8))); + struct pf_addr counter; + int tblidx; + u_int16_t proxy_port[2]; + u_int8_t port_op; + u_int8_t opts; + sa_family_t af; }; /* A packed Operating System description for fingerprinting */ typedef u_int32_t pf_osfp_t; -#define PF_OSFP_ANY ((pf_osfp_t)0) -#define PF_OSFP_UNKNOWN ((pf_osfp_t)-1) -#define PF_OSFP_NOMATCH ((pf_osfp_t)-2) +#define PF_OSFP_ANY ((pf_osfp_t)0) +#define PF_OSFP_UNKNOWN ((pf_osfp_t)-1) +#define PF_OSFP_NOMATCH ((pf_osfp_t)-2) struct pf_osfp_entry { SLIST_ENTRY(pf_osfp_entry) fp_entry; #if !defined(__LP64__) - u_int32_t _pad; + u_int32_t _pad; #endif /* !__LP64__ */ - pf_osfp_t fp_os; - int fp_enflags; -#define PF_OSFP_EXPANDED 0x001 /* expanded entry */ -#define PF_OSFP_GENERIC 0x002 /* generic signature */ -#define PF_OSFP_NODETAIL 0x004 /* no p0f details */ -#define PF_OSFP_LEN 32 - char fp_class_nm[PF_OSFP_LEN]; - char fp_version_nm[PF_OSFP_LEN]; - char fp_subtype_nm[PF_OSFP_LEN]; + pf_osfp_t fp_os; + int fp_enflags; +#define PF_OSFP_EXPANDED 0x001 /* expanded entry */ +#define PF_OSFP_GENERIC 0x002 /* generic signature */ +#define PF_OSFP_NODETAIL 0x004 /* no p0f details */ +#define PF_OSFP_LEN 32 + char fp_class_nm[PF_OSFP_LEN]; + char fp_version_nm[PF_OSFP_LEN]; + char fp_subtype_nm[PF_OSFP_LEN]; }; #define PF_OSFP_ENTRY_EQ(a, b) \ ((a)->fp_os == (b)->fp_os && \ @@ -578,11 +578,11 @@ struct pf_osfp_entry { memcmp((a)->fp_subtype_nm, (b)->fp_subtype_nm, PF_OSFP_LEN) == 0) /* handle pf_osfp_t packing */ -#define _FP_RESERVED_BIT 1 /* For the special negative #defines */ -#define _FP_UNUSED_BITS 1 -#define _FP_CLASS_BITS 10 /* OS Class (Windows, Linux) */ -#define _FP_VERSION_BITS 10 /* OS version (95, 98, NT, 2.4.54, 3.2) */ -#define _FP_SUBTYPE_BITS 10 /* patch level (NT SP4, SP3, ECN patch) */ +#define _FP_RESERVED_BIT 1 /* For the special negative #defines */ +#define _FP_UNUSED_BITS 1 +#define _FP_CLASS_BITS 10 /* OS Class (Windows, Linux) */ +#define _FP_VERSION_BITS 10 /* OS version (95, 98, NT, 2.4.54, 3.2) */ +#define _FP_SUBTYPE_BITS 10 /* patch level (NT SP4, SP3, ECN patch) */ #define PF_OSFP_UNPACK(osfp, class, version, subtype) do { \ (class) = ((osfp) >> (_FP_VERSION_BITS+_FP_SUBTYPE_BITS)) & \ ((1 << _FP_CLASS_BITS) - 1); \ @@ -599,346 +599,346 @@ struct pf_osfp_entry { } while (0) /* the fingerprint of an OSes TCP SYN packet */ -typedef u_int64_t pf_tcpopts_t; +typedef u_int64_t pf_tcpopts_t; struct pf_os_fingerprint { SLIST_HEAD(pf_osfp_enlist, pf_osfp_entry) fp_oses; /* list of matches */ - pf_tcpopts_t fp_tcpopts; /* packed TCP options */ - u_int16_t fp_wsize; /* TCP window size */ - u_int16_t fp_psize; /* ip->ip_len */ - u_int16_t fp_mss; /* TCP MSS */ - u_int16_t fp_flags; -#define PF_OSFP_WSIZE_MOD 0x0001 /* Window modulus */ -#define PF_OSFP_WSIZE_DC 0x0002 /* Window don't care */ -#define PF_OSFP_WSIZE_MSS 0x0004 /* Window multiple of MSS */ -#define PF_OSFP_WSIZE_MTU 0x0008 /* Window multiple of MTU */ -#define PF_OSFP_PSIZE_MOD 0x0010 /* packet size modulus */ -#define PF_OSFP_PSIZE_DC 0x0020 /* packet size don't care */ -#define PF_OSFP_WSCALE 0x0040 /* TCP window scaling */ -#define PF_OSFP_WSCALE_MOD 0x0080 /* TCP window scale modulus */ -#define PF_OSFP_WSCALE_DC 0x0100 /* TCP window scale dont-care */ -#define PF_OSFP_MSS 0x0200 /* TCP MSS */ -#define PF_OSFP_MSS_MOD 0x0400 /* TCP MSS modulus */ -#define PF_OSFP_MSS_DC 0x0800 /* TCP MSS dont-care */ -#define PF_OSFP_DF 0x1000 /* IPv4 don't fragment bit */ -#define PF_OSFP_TS0 0x2000 /* Zero timestamp */ -#define PF_OSFP_INET6 0x4000 /* IPv6 */ - u_int8_t fp_optcnt; /* TCP option count */ - u_int8_t fp_wscale; /* TCP window scaling */ - u_int8_t fp_ttl; /* IPv4 TTL */ -#define PF_OSFP_MAXTTL_OFFSET 40 + pf_tcpopts_t fp_tcpopts; /* packed TCP options */ + u_int16_t fp_wsize; /* TCP window size */ + u_int16_t fp_psize; /* ip->ip_len */ + u_int16_t fp_mss; /* TCP MSS */ + u_int16_t fp_flags; +#define PF_OSFP_WSIZE_MOD 0x0001 /* Window modulus */ +#define PF_OSFP_WSIZE_DC 0x0002 /* Window don't care */ +#define PF_OSFP_WSIZE_MSS 0x0004 /* Window multiple of MSS */ +#define PF_OSFP_WSIZE_MTU 0x0008 /* Window multiple of MTU */ +#define PF_OSFP_PSIZE_MOD 0x0010 /* packet size modulus */ +#define PF_OSFP_PSIZE_DC 0x0020 /* packet size don't care */ +#define PF_OSFP_WSCALE 0x0040 /* TCP window scaling */ +#define PF_OSFP_WSCALE_MOD 0x0080 /* TCP window scale modulus */ +#define PF_OSFP_WSCALE_DC 0x0100 /* TCP window scale dont-care */ +#define PF_OSFP_MSS 0x0200 /* TCP MSS */ +#define PF_OSFP_MSS_MOD 0x0400 /* TCP MSS modulus */ +#define PF_OSFP_MSS_DC 0x0800 /* TCP MSS dont-care */ +#define PF_OSFP_DF 0x1000 /* IPv4 don't fragment bit */ +#define PF_OSFP_TS0 0x2000 /* Zero timestamp */ +#define PF_OSFP_INET6 0x4000 /* IPv6 */ + u_int8_t fp_optcnt; /* TCP option count */ + u_int8_t fp_wscale; /* TCP window scaling */ + u_int8_t fp_ttl; /* IPv4 TTL */ +#define PF_OSFP_MAXTTL_OFFSET 40 /* TCP options packing */ -#define PF_OSFP_TCPOPT_NOP 0x0 /* TCP NOP option */ -#define PF_OSFP_TCPOPT_WSCALE 0x1 /* TCP window scaling option */ -#define PF_OSFP_TCPOPT_MSS 0x2 /* TCP max segment size opt */ -#define PF_OSFP_TCPOPT_SACK 0x3 /* TCP SACK OK option */ -#define PF_OSFP_TCPOPT_TS 0x4 /* TCP timestamp option */ -#define PF_OSFP_TCPOPT_BITS 3 /* bits used by each option */ +#define PF_OSFP_TCPOPT_NOP 0x0 /* TCP NOP option */ +#define PF_OSFP_TCPOPT_WSCALE 0x1 /* TCP window scaling option */ +#define PF_OSFP_TCPOPT_MSS 0x2 /* TCP max segment size opt */ +#define PF_OSFP_TCPOPT_SACK 0x3 /* TCP SACK OK option */ +#define PF_OSFP_TCPOPT_TS 0x4 /* TCP timestamp option */ +#define PF_OSFP_TCPOPT_BITS 3 /* bits used by each option */ #define PF_OSFP_MAX_OPTS \ (sizeof(((struct pf_os_fingerprint *)0)->fp_tcpopts) * 8) \ / PF_OSFP_TCPOPT_BITS - SLIST_ENTRY(pf_os_fingerprint) fp_next; + SLIST_ENTRY(pf_os_fingerprint) fp_next; }; struct pf_osfp_ioctl { - struct pf_osfp_entry fp_os; - pf_tcpopts_t fp_tcpopts; /* packed TCP options */ - u_int16_t fp_wsize; /* TCP window size */ - u_int16_t fp_psize; /* ip->ip_len */ - u_int16_t fp_mss; /* TCP MSS */ - u_int16_t fp_flags; - u_int8_t fp_optcnt; /* TCP option count */ - u_int8_t fp_wscale; /* TCP window scaling */ - u_int8_t fp_ttl; /* IPv4 TTL */ + struct pf_osfp_entry fp_os; + pf_tcpopts_t fp_tcpopts; /* packed TCP options */ + u_int16_t fp_wsize; /* TCP window size */ + u_int16_t fp_psize; /* ip->ip_len */ + u_int16_t fp_mss; /* TCP MSS */ + u_int16_t fp_flags; + u_int8_t fp_optcnt; /* TCP option count */ + u_int8_t fp_wscale; /* TCP window scaling */ + u_int8_t fp_ttl; /* IPv4 TTL */ - int fp_getnum; /* DIOCOSFPGET number */ + int fp_getnum; /* DIOCOSFPGET number */ }; union pf_rule_ptr { - struct pf_rule *ptr __attribute__((aligned(8))); - u_int32_t nr __attribute__((aligned(8))); + struct pf_rule *ptr __attribute__((aligned(8))); + u_int32_t nr __attribute__((aligned(8))); } __attribute__((aligned(8))); -#define PF_ANCHOR_NAME_SIZE 64 +#define PF_ANCHOR_NAME_SIZE 64 struct pf_rule { - struct pf_rule_addr src; - struct pf_rule_addr dst; -#define PF_SKIP_IFP 0 -#define PF_SKIP_DIR 1 -#define PF_SKIP_AF 2 -#define PF_SKIP_PROTO 3 -#define PF_SKIP_SRC_ADDR 4 -#define PF_SKIP_SRC_PORT 5 -#define PF_SKIP_DST_ADDR 6 -#define PF_SKIP_DST_PORT 7 -#define PF_SKIP_COUNT 8 - union pf_rule_ptr skip[PF_SKIP_COUNT]; -#define PF_RULE_LABEL_SIZE 64 - char label[PF_RULE_LABEL_SIZE]; -#define PF_QNAME_SIZE 64 - char ifname[IFNAMSIZ]; - char qname[PF_QNAME_SIZE]; - char pqname[PF_QNAME_SIZE]; -#define PF_TAG_NAME_SIZE 64 - char tagname[PF_TAG_NAME_SIZE]; - char match_tagname[PF_TAG_NAME_SIZE]; - - char overload_tblname[PF_TABLE_NAME_SIZE]; - - TAILQ_ENTRY(pf_rule) entries; + struct pf_rule_addr src; + struct pf_rule_addr dst; +#define PF_SKIP_IFP 0 +#define PF_SKIP_DIR 1 +#define PF_SKIP_AF 2 +#define PF_SKIP_PROTO 3 +#define PF_SKIP_SRC_ADDR 4 +#define PF_SKIP_SRC_PORT 5 +#define PF_SKIP_DST_ADDR 6 +#define PF_SKIP_DST_PORT 7 +#define PF_SKIP_COUNT 8 + union pf_rule_ptr skip[PF_SKIP_COUNT]; +#define PF_RULE_LABEL_SIZE 64 + char label[PF_RULE_LABEL_SIZE]; +#define PF_QNAME_SIZE 64 + char ifname[IFNAMSIZ]; + char qname[PF_QNAME_SIZE]; + char pqname[PF_QNAME_SIZE]; +#define PF_TAG_NAME_SIZE 64 + char tagname[PF_TAG_NAME_SIZE]; + char match_tagname[PF_TAG_NAME_SIZE]; + + char overload_tblname[PF_TABLE_NAME_SIZE]; + + TAILQ_ENTRY(pf_rule) entries; #if !defined(__LP64__) - u_int32_t _pad[2]; + u_int32_t _pad[2]; #endif /* !__LP64__ */ - struct pf_pool rpool; + struct pf_pool rpool; - u_int64_t evaluations; - u_int64_t packets[2]; - u_int64_t bytes[2]; + u_int64_t evaluations; + u_int64_t packets[2]; + u_int64_t bytes[2]; - u_int64_t ticket; -#define PF_OWNER_NAME_SIZE 64 - char owner[PF_OWNER_NAME_SIZE]; - u_int32_t priority; + u_int64_t ticket; +#define PF_OWNER_NAME_SIZE 64 + char owner[PF_OWNER_NAME_SIZE]; + u_int32_t priority; #ifdef KERNEL - struct pfi_kif *kif __attribute__((aligned(8))); + struct pfi_kif *kif __attribute__((aligned(8))); #else /* !KERNEL */ - void *kif __attribute__((aligned(8))); + void *kif __attribute__((aligned(8))); #endif /* !KERNEL */ - struct pf_anchor *anchor __attribute__((aligned(8))); + struct pf_anchor *anchor __attribute__((aligned(8))); #ifdef KERNEL - struct pfr_ktable *overload_tbl __attribute__((aligned(8))); + struct pfr_ktable *overload_tbl __attribute__((aligned(8))); #else /* !KERNEL */ - void *overload_tbl __attribute__((aligned(8))); + void *overload_tbl __attribute__((aligned(8))); #endif /* !KERNEL */ - pf_osfp_t os_fingerprint __attribute__((aligned(8))); + pf_osfp_t os_fingerprint __attribute__((aligned(8))); - unsigned int rtableid; - u_int32_t timeout[PFTM_MAX]; - u_int32_t states; - u_int32_t max_states; - u_int32_t src_nodes; - u_int32_t max_src_nodes; - u_int32_t max_src_states; - u_int32_t max_src_conn; + unsigned int rtableid; + u_int32_t timeout[PFTM_MAX]; + u_int32_t states; + u_int32_t max_states; + u_int32_t src_nodes; + u_int32_t max_src_nodes; + u_int32_t max_src_states; + u_int32_t max_src_conn; struct { - u_int32_t limit; - u_int32_t seconds; - } max_src_conn_rate; - u_int32_t qid; - u_int32_t pqid; - u_int32_t rt_listid; - u_int32_t nr; - u_int32_t prob; - uid_t cuid; - pid_t cpid; - - u_int16_t return_icmp; - u_int16_t return_icmp6; - u_int16_t max_mss; - u_int16_t tag; - u_int16_t match_tag; - - struct pf_rule_uid uid; - struct pf_rule_gid gid; - - u_int32_t rule_flag; - u_int8_t action; - u_int8_t direction; - u_int8_t log; - u_int8_t logif; - u_int8_t quick; - u_int8_t ifnot; - u_int8_t match_tag_not; - u_int8_t natpass; - -#define PF_STATE_NORMAL 0x1 -#define PF_STATE_MODULATE 0x2 -#define PF_STATE_SYNPROXY 0x3 - u_int8_t keep_state; - sa_family_t af; - u_int8_t proto; - u_int8_t type; - u_int8_t code; - u_int8_t flags; - u_int8_t flagset; - u_int8_t min_ttl; - u_int8_t allow_opts; - u_int8_t rt; - u_int8_t return_ttl; + u_int32_t limit; + u_int32_t seconds; + } max_src_conn_rate; + u_int32_t qid; + u_int32_t pqid; + u_int32_t rt_listid; + u_int32_t nr; + u_int32_t prob; + uid_t cuid; + pid_t cpid; + + u_int16_t return_icmp; + u_int16_t return_icmp6; + u_int16_t max_mss; + u_int16_t tag; + u_int16_t match_tag; + + struct pf_rule_uid uid; + struct pf_rule_gid gid; + + u_int32_t rule_flag; + u_int8_t action; + u_int8_t direction; + u_int8_t log; + u_int8_t logif; + u_int8_t quick; + u_int8_t ifnot; + u_int8_t match_tag_not; + u_int8_t natpass; + +#define PF_STATE_NORMAL 0x1 +#define PF_STATE_MODULATE 0x2 +#define PF_STATE_SYNPROXY 0x3 + u_int8_t keep_state; + sa_family_t af; + u_int8_t proto; + u_int8_t type; + u_int8_t code; + u_int8_t flags; + u_int8_t flagset; + u_int8_t min_ttl; + u_int8_t allow_opts; + u_int8_t rt; + u_int8_t return_ttl; /* service class categories */ -#define SCIDX_MASK 0x0f -#define SC_BE 0x10 -#define SC_BK_SYS 0x11 -#define SC_BK 0x12 -#define SC_RD 0x13 -#define SC_OAM 0x14 -#define SC_AV 0x15 -#define SC_RV 0x16 -#define SC_VI 0x17 -#define SC_SIG 0x17 -#define SC_VO 0x18 -#define SC_CTL 0x19 +#define SCIDX_MASK 0x0f +#define SC_BE 0x10 +#define SC_BK_SYS 0x11 +#define SC_BK 0x12 +#define SC_RD 0x13 +#define SC_OAM 0x14 +#define SC_AV 0x15 +#define SC_RV 0x16 +#define SC_VI 0x17 +#define SC_SIG 0x17 +#define SC_VO 0x18 +#define SC_CTL 0x19 /* diffserve code points */ -#define DSCP_MASK 0xfc -#define DSCP_CUMASK 0x03 -#define DSCP_EF 0xb8 -#define DSCP_AF11 0x28 -#define DSCP_AF12 0x30 -#define DSCP_AF13 0x38 -#define DSCP_AF21 0x48 -#define DSCP_AF22 0x50 -#define DSCP_AF23 0x58 -#define DSCP_AF31 0x68 -#define DSCP_AF32 0x70 -#define DSCP_AF33 0x78 -#define DSCP_AF41 0x88 -#define DSCP_AF42 0x90 -#define DSCP_AF43 0x98 -#define AF_CLASSMASK 0xe0 -#define AF_DROPPRECMASK 0x18 - u_int8_t tos; - u_int8_t anchor_relative; - u_int8_t anchor_wildcard; - -#define PF_FLUSH 0x01 -#define PF_FLUSH_GLOBAL 0x02 - u_int8_t flush; - - u_int8_t proto_variant; - u_int8_t extfilter; /* Filter mode [PF_EXTFILTER_xxx] */ - u_int8_t extmap; /* Mapping mode [PF_EXTMAP_xxx] */ +#define DSCP_MASK 0xfc +#define DSCP_CUMASK 0x03 +#define DSCP_EF 0xb8 +#define DSCP_AF11 0x28 +#define DSCP_AF12 0x30 +#define DSCP_AF13 0x38 +#define DSCP_AF21 0x48 +#define DSCP_AF22 0x50 +#define DSCP_AF23 0x58 +#define DSCP_AF31 0x68 +#define DSCP_AF32 0x70 +#define DSCP_AF33 0x78 +#define DSCP_AF41 0x88 +#define DSCP_AF42 0x90 +#define DSCP_AF43 0x98 +#define AF_CLASSMASK 0xe0 +#define AF_DROPPRECMASK 0x18 + u_int8_t tos; + u_int8_t anchor_relative; + u_int8_t anchor_wildcard; + +#define PF_FLUSH 0x01 +#define PF_FLUSH_GLOBAL 0x02 + u_int8_t flush; + + u_int8_t proto_variant; + u_int8_t extfilter; /* Filter mode [PF_EXTFILTER_xxx] */ + u_int8_t extmap; /* Mapping mode [PF_EXTMAP_xxx] */ u_int32_t dnpipe; u_int32_t dntype; }; /* pf device identifiers */ -#define PFDEV_PF 0 -#define PFDEV_PFM 1 -#define PFDEV_MAX 2 +#define PFDEV_PF 0 +#define PFDEV_PFM 1 +#define PFDEV_MAX 2 /* rule flags */ -#define PFRULE_DROP 0x0000 -#define PFRULE_RETURNRST 0x0001 -#define PFRULE_FRAGMENT 0x0002 -#define PFRULE_RETURNICMP 0x0004 -#define PFRULE_RETURN 0x0008 -#define PFRULE_NOSYNC 0x0010 -#define PFRULE_SRCTRACK 0x0020 /* track source states */ -#define PFRULE_RULESRCTRACK 0x0040 /* per rule */ +#define PFRULE_DROP 0x0000 +#define PFRULE_RETURNRST 0x0001 +#define PFRULE_FRAGMENT 0x0002 +#define PFRULE_RETURNICMP 0x0004 +#define PFRULE_RETURN 0x0008 +#define PFRULE_NOSYNC 0x0010 +#define PFRULE_SRCTRACK 0x0020 /* track source states */ +#define PFRULE_RULESRCTRACK 0x0040 /* per rule */ /* scrub flags */ -#define PFRULE_NODF 0x0100 -#define PFRULE_FRAGCROP 0x0200 /* non-buffering frag cache */ -#define PFRULE_FRAGDROP 0x0400 /* drop funny fragments */ -#define PFRULE_RANDOMID 0x0800 -#define PFRULE_REASSEMBLE_TCP 0x1000 +#define PFRULE_NODF 0x0100 +#define PFRULE_FRAGCROP 0x0200 /* non-buffering frag cache */ +#define PFRULE_FRAGDROP 0x0400 /* drop funny fragments */ +#define PFRULE_RANDOMID 0x0800 +#define PFRULE_REASSEMBLE_TCP 0x1000 /* rule flags for TOS/DSCP/service class differentiation */ -#define PFRULE_TOS 0x2000 -#define PFRULE_DSCP 0x4000 -#define PFRULE_SC 0x8000 +#define PFRULE_TOS 0x2000 +#define PFRULE_DSCP 0x4000 +#define PFRULE_SC 0x8000 /* rule flags again */ -#define PFRULE_IFBOUND 0x00010000 /* if-bound */ -#define PFRULE_PFM 0x00020000 /* created by pfm device */ +#define PFRULE_IFBOUND 0x00010000 /* if-bound */ +#define PFRULE_PFM 0x00020000 /* created by pfm device */ -#define PFSTATE_HIWAT 10000 /* default state table size */ -#define PFSTATE_ADAPT_START 6000 /* default adaptive timeout start */ -#define PFSTATE_ADAPT_END 12000 /* default adaptive timeout end */ +#define PFSTATE_HIWAT 10000 /* default state table size */ +#define PFSTATE_ADAPT_START 6000 /* default adaptive timeout start */ +#define PFSTATE_ADAPT_END 12000 /* default adaptive timeout end */ -#define PFAPPSTATE_HIWAT 10000 /* default same as state table */ +#define PFAPPSTATE_HIWAT 10000 /* default same as state table */ enum pf_extmap { - PF_EXTMAP_APD = 1, /* Address-port-dependent mapping */ - PF_EXTMAP_AD, /* Address-dependent mapping */ - PF_EXTMAP_EI /* Endpoint-independent mapping */ + PF_EXTMAP_APD = 1, /* Address-port-dependent mapping */ + PF_EXTMAP_AD, /* Address-dependent mapping */ + PF_EXTMAP_EI /* Endpoint-independent mapping */ }; enum pf_extfilter { - PF_EXTFILTER_APD = 1, /* Address-port-dependent filtering */ - PF_EXTFILTER_AD, /* Address-dependent filtering */ - PF_EXTFILTER_EI /* Endpoint-independent filtering */ + PF_EXTFILTER_APD = 1, /* Address-port-dependent filtering */ + PF_EXTFILTER_AD, /* Address-dependent filtering */ + PF_EXTFILTER_EI /* Endpoint-independent filtering */ }; struct pf_threshold { - u_int32_t limit; -#define PF_THRESHOLD_MULT 1000 -#define PF_THRESHOLD_MAX 0xffffffff / PF_THRESHOLD_MULT - u_int32_t seconds; - u_int32_t count; - u_int32_t last; + u_int32_t limit; +#define PF_THRESHOLD_MULT 1000 +#define PF_THRESHOLD_MAX 0xffffffff / PF_THRESHOLD_MULT + u_int32_t seconds; + u_int32_t count; + u_int32_t last; }; struct pf_src_node { RB_ENTRY(pf_src_node) entry; - struct pf_addr addr; - struct pf_addr raddr; + struct pf_addr addr; + struct pf_addr raddr; union pf_rule_ptr rule; #ifdef KERNEL - struct pfi_kif *kif; + struct pfi_kif *kif; #else /* !KERNEL */ - void *kif; + void *kif; #endif /* !KERNEL */ - u_int64_t bytes[2]; - u_int64_t packets[2]; - u_int32_t states; - u_int32_t conn; - struct pf_threshold conn_rate; - u_int64_t creation; - u_int64_t expire; - sa_family_t af; - u_int8_t ruletype; + u_int64_t bytes[2]; + u_int64_t packets[2]; + u_int32_t states; + u_int32_t conn; + struct pf_threshold conn_rate; + u_int64_t creation; + u_int64_t expire; + sa_family_t af; + u_int8_t ruletype; }; -#define PFSNODE_HIWAT 10000 /* default source node table size */ +#define PFSNODE_HIWAT 10000 /* default source node table size */ #ifdef KERNEL struct pf_state_scrub { - struct timeval pfss_last; /* time received last packet */ - u_int32_t pfss_tsecr; /* last echoed timestamp */ - u_int32_t pfss_tsval; /* largest timestamp */ - u_int32_t pfss_tsval0; /* original timestamp */ - u_int16_t pfss_flags; -#define PFSS_TIMESTAMP 0x0001 /* modulate timestamp */ -#define PFSS_PAWS 0x0010 /* stricter PAWS checks */ -#define PFSS_PAWS_IDLED 0x0020 /* was idle too long. no PAWS */ -#define PFSS_DATA_TS 0x0040 /* timestamp on data packets */ -#define PFSS_DATA_NOTS 0x0080 /* no timestamp on data packets */ - u_int8_t pfss_ttl; /* stashed TTL */ - u_int8_t pad; - u_int32_t pfss_ts_mod; /* timestamp modulation */ + struct timeval pfss_last; /* time received last packet */ + u_int32_t pfss_tsecr; /* last echoed timestamp */ + u_int32_t pfss_tsval; /* largest timestamp */ + u_int32_t pfss_tsval0; /* original timestamp */ + u_int16_t pfss_flags; +#define PFSS_TIMESTAMP 0x0001 /* modulate timestamp */ +#define PFSS_PAWS 0x0010 /* stricter PAWS checks */ +#define PFSS_PAWS_IDLED 0x0020 /* was idle too long. no PAWS */ +#define PFSS_DATA_TS 0x0040 /* timestamp on data packets */ +#define PFSS_DATA_NOTS 0x0080 /* no timestamp on data packets */ + u_int8_t pfss_ttl; /* stashed TTL */ + u_int8_t pad; + u_int32_t pfss_ts_mod; /* timestamp modulation */ }; #endif /* KERNEL */ union pf_state_xport { - u_int16_t port; - u_int16_t call_id; - u_int32_t spi; + u_int16_t port; + u_int16_t call_id; + u_int32_t spi; }; struct pf_state_host { - struct pf_addr addr; - union pf_state_xport xport; + struct pf_addr addr; + union pf_state_xport xport; }; #ifdef KERNEL struct pf_state_peer { - u_int32_t seqlo; /* Max sequence number sent */ - u_int32_t seqhi; /* Max the other end ACKd + win */ - u_int32_t seqdiff; /* Sequence number modulator */ - u_int16_t max_win; /* largest window (pre scaling) */ - u_int8_t state; /* active state level */ - u_int8_t wscale; /* window scaling factor */ - u_int16_t mss; /* Maximum segment size option */ - u_int8_t tcp_est; /* Did we reach TCPS_ESTABLISHED */ - struct pf_state_scrub *scrub; /* state is scrubbed */ - u_int8_t pad[3]; + u_int32_t seqlo; /* Max sequence number sent */ + u_int32_t seqhi; /* Max the other end ACKd + win */ + u_int32_t seqdiff; /* Sequence number modulator */ + u_int16_t max_win; /* largest window (pre scaling) */ + u_int8_t state; /* active state level */ + u_int8_t wscale; /* window scaling factor */ + u_int16_t mss; /* Maximum segment size option */ + u_int8_t tcp_est; /* Did we reach TCPS_ESTABLISHED */ + struct pf_state_scrub *scrub; /* state is scrubbed */ + u_int8_t pad[3]; }; TAILQ_HEAD(pf_state_queue, pf_state); @@ -948,7 +948,7 @@ struct pf_pdesc; struct pf_app_state; typedef void (*pf_app_handler)(struct pf_state *, int, int, struct pf_pdesc *, - struct pfi_kif *); + struct pfi_kif *); typedef int (*pf_app_compare)(struct pf_app_state *, struct pf_app_state *); @@ -965,9 +965,9 @@ struct pf_ike_state { }; struct pf_app_state { - pf_app_handler handler; - pf_app_compare compare_lan_ext; - pf_app_compare compare_ext_gwy; + pf_app_handler handler; + pf_app_compare compare_lan_ext; + pf_app_compare compare_ext_gwy; union { struct pf_pptp_state pptp; struct pf_grev1_state grev1; @@ -981,12 +981,12 @@ struct pf_state_key_cmp { struct pf_state_host gwy; struct pf_state_host ext_lan; struct pf_state_host ext_gwy; - sa_family_t af_lan; - sa_family_t af_gwy; - u_int8_t proto; - u_int8_t direction; - u_int8_t proto_variant; - struct pf_app_state *app_state; + sa_family_t af_lan; + sa_family_t af_gwy; + u_int8_t proto; + u_int8_t direction; + u_int8_t proto_variant; + struct pf_app_state *app_state; }; TAILQ_HEAD(pf_statelist, pf_state); @@ -996,35 +996,35 @@ struct pf_state_key { struct pf_state_host gwy; struct pf_state_host ext_lan; struct pf_state_host ext_gwy; - sa_family_t af_lan; - sa_family_t af_gwy; - u_int8_t proto; - u_int8_t direction; - u_int8_t proto_variant; - struct pf_app_state *app_state; - u_int32_t flowsrc; - u_int32_t flowhash; + sa_family_t af_lan; + sa_family_t af_gwy; + u_int8_t proto; + u_int8_t direction; + u_int8_t proto_variant; + struct pf_app_state *app_state; + u_int32_t flowsrc; + u_int32_t flowhash; - RB_ENTRY(pf_state_key) entry_lan_ext; - RB_ENTRY(pf_state_key) entry_ext_gwy; - struct pf_statelist states; - u_int32_t refcnt; + RB_ENTRY(pf_state_key) entry_lan_ext; + RB_ENTRY(pf_state_key) entry_ext_gwy; + struct pf_statelist states; + u_int32_t refcnt; }; /* keep synced with struct pf_state, used in RB_FIND */ struct pf_state_cmp { - u_int64_t id; - u_int32_t creatorid; - u_int32_t pad; + u_int64_t id; + u_int32_t creatorid; + u_int32_t pad; }; /* flowhash key (12-bytes multiple for performance) */ struct pf_flowhash_key { - struct pf_state_host ap1; /* address+port blob 1 */ - struct pf_state_host ap2; /* address+port blob 2 */ - u_int32_t af; - u_int32_t proto; + struct pf_state_host ap1; /* address+port blob 1 */ + struct pf_state_host ap2; /* address+port blob 2 */ + u_int32_t af; + u_int32_t proto; }; #endif /* KERNEL */ @@ -1033,159 +1033,159 @@ TAILQ_HEAD(hook_desc_head, hook_desc); #ifdef KERNEL struct pf_state { - u_int64_t id; - u_int32_t creatorid; - u_int32_t pad; - - TAILQ_ENTRY(pf_state) entry_list; - TAILQ_ENTRY(pf_state) next; - RB_ENTRY(pf_state) entry_id; - struct pf_state_peer src; - struct pf_state_peer dst; - union pf_rule_ptr rule; - union pf_rule_ptr anchor; - union pf_rule_ptr nat_rule; - struct pf_addr rt_addr; - struct hook_desc_head unlink_hooks; - struct pf_state_key *state_key; - struct pfi_kif *kif; - struct pfi_kif *rt_kif; - struct pf_src_node *src_node; - struct pf_src_node *nat_src_node; - u_int64_t packets[2]; - u_int64_t bytes[2]; - u_int64_t creation; - u_int64_t expire; - u_int64_t pfsync_time; - u_int16_t tag; - u_int8_t log; - u_int8_t allow_opts; - u_int8_t timeout; - u_int8_t sync_flags; + u_int64_t id; + u_int32_t creatorid; + u_int32_t pad; + + TAILQ_ENTRY(pf_state) entry_list; + TAILQ_ENTRY(pf_state) next; + RB_ENTRY(pf_state) entry_id; + struct pf_state_peer src; + struct pf_state_peer dst; + union pf_rule_ptr rule; + union pf_rule_ptr anchor; + union pf_rule_ptr nat_rule; + struct pf_addr rt_addr; + struct hook_desc_head unlink_hooks; + struct pf_state_key *state_key; + struct pfi_kif *kif; + struct pfi_kif *rt_kif; + struct pf_src_node *src_node; + struct pf_src_node *nat_src_node; + u_int64_t packets[2]; + u_int64_t bytes[2]; + u_int64_t creation; + u_int64_t expire; + u_int64_t pfsync_time; + u_int16_t tag; + u_int8_t log; + u_int8_t allow_opts; + u_int8_t timeout; + u_int8_t sync_flags; }; #endif /* KERNEL */ -#define PFSTATE_NOSYNC 0x01 -#define PFSTATE_FROMSYNC 0x02 -#define PFSTATE_STALE 0x04 +#define PFSTATE_NOSYNC 0x01 +#define PFSTATE_FROMSYNC 0x02 +#define PFSTATE_STALE 0x04 -#define __packed __attribute__((__packed__)) +#define __packed __attribute__((__packed__)) /* * Unified state structures for pulling states out of the kernel * used by pfsync(4) and the pf(4) ioctl. */ struct pfsync_state_scrub { - u_int16_t pfss_flags; - u_int8_t pfss_ttl; /* stashed TTL */ -#define PFSYNC_SCRUB_FLAG_VALID 0x01 - u_int8_t scrub_flag; - u_int32_t pfss_ts_mod; /* timestamp modulation */ + u_int16_t pfss_flags; + u_int8_t pfss_ttl; /* stashed TTL */ +#define PFSYNC_SCRUB_FLAG_VALID 0x01 + u_int8_t scrub_flag; + u_int32_t pfss_ts_mod; /* timestamp modulation */ } __packed; struct pfsync_state_host { - struct pf_addr addr; - union pf_state_xport xport; - u_int16_t pad[2]; + struct pf_addr addr; + union pf_state_xport xport; + u_int16_t pad[2]; } __packed; struct pfsync_state_peer { - struct pfsync_state_scrub scrub; /* state is scrubbed */ - u_int32_t seqlo; /* Max sequence number sent */ - u_int32_t seqhi; /* Max the other end ACKd + win */ - u_int32_t seqdiff; /* Sequence number modulator */ - u_int16_t max_win; /* largest window (pre scaling) */ - u_int16_t mss; /* Maximum segment size option */ - u_int8_t state; /* active state level */ - u_int8_t wscale; /* window scaling factor */ - u_int8_t pad[6]; + struct pfsync_state_scrub scrub; /* state is scrubbed */ + u_int32_t seqlo; /* Max sequence number sent */ + u_int32_t seqhi; /* Max the other end ACKd + win */ + u_int32_t seqdiff; /* Sequence number modulator */ + u_int16_t max_win; /* largest window (pre scaling) */ + u_int16_t mss; /* Maximum segment size option */ + u_int8_t state; /* active state level */ + u_int8_t wscale; /* window scaling factor */ + u_int8_t pad[6]; } __packed; struct pfsync_state { - u_int32_t id[2]; - char ifname[IFNAMSIZ]; + u_int32_t id[2]; + char ifname[IFNAMSIZ]; struct pfsync_state_host lan; struct pfsync_state_host gwy; struct pfsync_state_host ext_lan; struct pfsync_state_host ext_gwy; struct pfsync_state_peer src; struct pfsync_state_peer dst; - struct pf_addr rt_addr; + struct pf_addr rt_addr; struct hook_desc_head unlink_hooks; #if !defined(__LP64__) - u_int32_t _pad[2]; + u_int32_t _pad[2]; #endif /* !__LP64__ */ - u_int32_t rule; - u_int32_t anchor; - u_int32_t nat_rule; - u_int64_t creation; - u_int64_t expire; - u_int32_t packets[2][2]; - u_int32_t bytes[2][2]; - u_int32_t creatorid; - u_int16_t tag; - sa_family_t af_lan; - sa_family_t af_gwy; - u_int8_t proto; - u_int8_t direction; - u_int8_t log; - u_int8_t allow_opts; - u_int8_t timeout; - u_int8_t sync_flags; - u_int8_t updates; - u_int8_t proto_variant; - u_int8_t __pad; - u_int32_t flowhash; + u_int32_t rule; + u_int32_t anchor; + u_int32_t nat_rule; + u_int64_t creation; + u_int64_t expire; + u_int32_t packets[2][2]; + u_int32_t bytes[2][2]; + u_int32_t creatorid; + u_int16_t tag; + sa_family_t af_lan; + sa_family_t af_gwy; + u_int8_t proto; + u_int8_t direction; + u_int8_t log; + u_int8_t allow_opts; + u_int8_t timeout; + u_int8_t sync_flags; + u_int8_t updates; + u_int8_t proto_variant; + u_int8_t __pad; + u_int32_t flowhash; } __packed; -#define PFSYNC_FLAG_COMPRESS 0x01 -#define PFSYNC_FLAG_STALE 0x02 -#define PFSYNC_FLAG_SRCNODE 0x04 -#define PFSYNC_FLAG_NATSRCNODE 0x08 +#define PFSYNC_FLAG_COMPRESS 0x01 +#define PFSYNC_FLAG_STALE 0x02 +#define PFSYNC_FLAG_SRCNODE 0x04 +#define PFSYNC_FLAG_NATSRCNODE 0x08 #ifdef KERNEL /* for copies to/from userland via pf_ioctl() */ -#define pf_state_peer_to_pfsync(s, d) do { \ - (d)->seqlo = (s)->seqlo; \ - (d)->seqhi = (s)->seqhi; \ - (d)->seqdiff = (s)->seqdiff; \ - (d)->max_win = (s)->max_win; \ - (d)->mss = (s)->mss; \ - (d)->state = (s)->state; \ - (d)->wscale = (s)->wscale; \ - if ((s)->scrub) { \ - (d)->scrub.pfss_flags = \ - (s)->scrub->pfss_flags & PFSS_TIMESTAMP; \ - (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \ - (d)->scrub.pfss_ts_mod = (s)->scrub->pfss_ts_mod; \ - (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \ - } \ +#define pf_state_peer_to_pfsync(s, d) do { \ + (d)->seqlo = (s)->seqlo; \ + (d)->seqhi = (s)->seqhi; \ + (d)->seqdiff = (s)->seqdiff; \ + (d)->max_win = (s)->max_win; \ + (d)->mss = (s)->mss; \ + (d)->state = (s)->state; \ + (d)->wscale = (s)->wscale; \ + if ((s)->scrub) { \ + (d)->scrub.pfss_flags = \ + (s)->scrub->pfss_flags & PFSS_TIMESTAMP; \ + (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \ + (d)->scrub.pfss_ts_mod = (s)->scrub->pfss_ts_mod; \ + (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \ + } \ } while (0) -#define pf_state_peer_from_pfsync(s, d) do { \ - (d)->seqlo = (s)->seqlo; \ - (d)->seqhi = (s)->seqhi; \ - (d)->seqdiff = (s)->seqdiff; \ - (d)->max_win = (s)->max_win; \ - (d)->mss = ntohs((s)->mss); \ - (d)->state = (s)->state; \ - (d)->wscale = (s)->wscale; \ - if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \ - (d)->scrub != NULL) { \ - (d)->scrub->pfss_flags = \ - ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \ - (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \ - (d)->scrub->pfss_ts_mod = (s)->scrub.pfss_ts_mod; \ - } \ +#define pf_state_peer_from_pfsync(s, d) do { \ + (d)->seqlo = (s)->seqlo; \ + (d)->seqhi = (s)->seqhi; \ + (d)->seqdiff = (s)->seqdiff; \ + (d)->max_win = (s)->max_win; \ + (d)->mss = ntohs((s)->mss); \ + (d)->state = (s)->state; \ + (d)->wscale = (s)->wscale; \ + if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \ + (d)->scrub != NULL) { \ + (d)->scrub->pfss_flags = \ + ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \ + (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \ + (d)->scrub->pfss_ts_mod = (s)->scrub.pfss_ts_mod; \ + } \ } while (0) #endif /* KERNEL */ -#define pf_state_counter_to_pfsync(s, d) do { \ - d[0] = (s>>32)&0xffffffff; \ - d[1] = s&0xffffffff; \ +#define pf_state_counter_to_pfsync(s, d) do { \ + d[0] = (s>>32)&0xffffffff; \ + d[1] = s&0xffffffff; \ } while (0) -#define pf_state_counter_from_pfsync(s) \ +#define pf_state_counter_from_pfsync(s) \ (((u_int64_t)(s[0])<<32) | (u_int64_t)(s[1])) @@ -1196,34 +1196,34 @@ struct pf_anchor; struct pf_ruleset { struct { - struct pf_rulequeue queues[2]; + struct pf_rulequeue queues[2]; struct { - struct pf_rulequeue *ptr; - struct pf_rule **ptr_array; - u_int32_t rcount; - u_int32_t ticket; - int open; - } active, inactive; - } rules[PF_RULESET_MAX]; - struct pf_anchor *anchor; - u_int32_t tticket; - int tables; - int topen; + struct pf_rulequeue *ptr; + struct pf_rule **ptr_array; + u_int32_t rcount; + u_int32_t ticket; + int open; + } active, inactive; + } rules[PF_RULESET_MAX]; + struct pf_anchor *anchor; + u_int32_t tticket; + int tables; + int topen; }; RB_HEAD(pf_anchor_global, pf_anchor); RB_HEAD(pf_anchor_node, pf_anchor); struct pf_anchor { - RB_ENTRY(pf_anchor) entry_global; - RB_ENTRY(pf_anchor) entry_node; - struct pf_anchor *parent; - struct pf_anchor_node children; - char name[PF_ANCHOR_NAME_SIZE]; - char path[MAXPATHLEN]; - struct pf_ruleset ruleset; - int refcnt; /* anchor rules */ - int match; - char owner[PF_OWNER_NAME_SIZE]; + RB_ENTRY(pf_anchor) entry_global; + RB_ENTRY(pf_anchor) entry_node; + struct pf_anchor *parent; + struct pf_anchor_node children; + char name[PF_ANCHOR_NAME_SIZE]; + char path[MAXPATHLEN]; + struct pf_ruleset ruleset; + int refcnt; /* anchor rules */ + int match; + char owner[PF_OWNER_NAME_SIZE]; }; #ifdef KERNEL RB_PROTOTYPE_SC(__private_extern__, pf_anchor_global, pf_anchor, entry_global, @@ -1235,116 +1235,116 @@ RB_PROTOTYPE(pf_anchor_global, pf_anchor, entry_global, pf_anchor_compare); RB_PROTOTYPE(pf_anchor_node, pf_anchor, entry_node, pf_anchor_compare); #endif /* !KERNEL */ -#define PF_RESERVED_ANCHOR "_pf" +#define PF_RESERVED_ANCHOR "_pf" -#define PFR_TFLAG_PERSIST 0x00000001 -#define PFR_TFLAG_CONST 0x00000002 -#define PFR_TFLAG_ACTIVE 0x00000004 -#define PFR_TFLAG_INACTIVE 0x00000008 -#define PFR_TFLAG_REFERENCED 0x00000010 -#define PFR_TFLAG_REFDANCHOR 0x00000020 -#define PFR_TFLAG_USRMASK 0x00000003 -#define PFR_TFLAG_SETMASK 0x0000003C -#define PFR_TFLAG_ALLMASK 0x0000003F +#define PFR_TFLAG_PERSIST 0x00000001 +#define PFR_TFLAG_CONST 0x00000002 +#define PFR_TFLAG_ACTIVE 0x00000004 +#define PFR_TFLAG_INACTIVE 0x00000008 +#define PFR_TFLAG_REFERENCED 0x00000010 +#define PFR_TFLAG_REFDANCHOR 0x00000020 +#define PFR_TFLAG_USRMASK 0x00000003 +#define PFR_TFLAG_SETMASK 0x0000003C +#define PFR_TFLAG_ALLMASK 0x0000003F struct pfr_table { - char pfrt_anchor[MAXPATHLEN]; - char pfrt_name[PF_TABLE_NAME_SIZE]; - u_int32_t pfrt_flags; - u_int8_t pfrt_fback; + char pfrt_anchor[MAXPATHLEN]; + char pfrt_name[PF_TABLE_NAME_SIZE]; + u_int32_t pfrt_flags; + u_int8_t pfrt_fback; }; enum { PFR_FB_NONE, PFR_FB_MATCH, PFR_FB_ADDED, PFR_FB_DELETED, - PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_DUPLICATE, - PFR_FB_NOTMATCH, PFR_FB_CONFLICT, PFR_FB_MAX }; + PFR_FB_CHANGED, PFR_FB_CLEARED, PFR_FB_DUPLICATE, + PFR_FB_NOTMATCH, PFR_FB_CONFLICT, PFR_FB_MAX }; struct pfr_addr { union { - struct in_addr _pfra_ip4addr; - struct in6_addr _pfra_ip6addr; - } pfra_u; - u_int8_t pfra_af; - u_int8_t pfra_net; - u_int8_t pfra_not; - u_int8_t pfra_fback; + struct in_addr _pfra_ip4addr; + struct in6_addr _pfra_ip6addr; + } pfra_u; + u_int8_t pfra_af; + u_int8_t pfra_net; + u_int8_t pfra_not; + u_int8_t pfra_fback; }; -#define pfra_ip4addr pfra_u._pfra_ip4addr -#define pfra_ip6addr pfra_u._pfra_ip6addr +#define pfra_ip4addr pfra_u._pfra_ip4addr +#define pfra_ip6addr pfra_u._pfra_ip6addr enum { PFR_DIR_IN, PFR_DIR_OUT, PFR_DIR_MAX }; enum { PFR_OP_BLOCK, PFR_OP_PASS, PFR_OP_ADDR_MAX, PFR_OP_TABLE_MAX }; -#define PFR_OP_XPASS PFR_OP_ADDR_MAX +#define PFR_OP_XPASS PFR_OP_ADDR_MAX struct pfr_astats { - struct pfr_addr pfras_a; + struct pfr_addr pfras_a; #if !defined(__LP64__) - u_int32_t _pad; + u_int32_t _pad; #endif /* !__LP64__ */ - u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfras_tzero; + u_int64_t pfras_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + u_int64_t pfras_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + u_int64_t pfras_tzero; }; enum { PFR_REFCNT_RULE, PFR_REFCNT_ANCHOR, PFR_REFCNT_MAX }; struct pfr_tstats { struct pfr_table pfrts_t; - u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; - u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; - u_int64_t pfrts_match; - u_int64_t pfrts_nomatch; - u_int64_t pfrts_tzero; - int pfrts_cnt; - int pfrts_refcnt[PFR_REFCNT_MAX]; + u_int64_t pfrts_packets[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; + u_int64_t pfrts_bytes[PFR_DIR_MAX][PFR_OP_TABLE_MAX]; + u_int64_t pfrts_match; + u_int64_t pfrts_nomatch; + u_int64_t pfrts_tzero; + int pfrts_cnt; + int pfrts_refcnt[PFR_REFCNT_MAX]; #if !defined(__LP64__) - u_int32_t _pad; + u_int32_t _pad; #endif /* !__LP64__ */ }; -#define pfrts_name pfrts_t.pfrt_name -#define pfrts_flags pfrts_t.pfrt_flags +#define pfrts_name pfrts_t.pfrt_name +#define pfrts_flags pfrts_t.pfrt_flags #ifdef KERNEL SLIST_HEAD(pfr_kentryworkq, pfr_kentry); struct pfr_kentry { - struct radix_node pfrke_node[2]; - union sockaddr_union pfrke_sa; - u_int64_t pfrke_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - u_int64_t pfrke_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; - SLIST_ENTRY(pfr_kentry) pfrke_workq; - u_int64_t pfrke_tzero; - u_int8_t pfrke_af; - u_int8_t pfrke_net; - u_int8_t pfrke_not; - u_int8_t pfrke_mark; - u_int8_t pfrke_intrpool; + struct radix_node pfrke_node[2]; + union sockaddr_union pfrke_sa; + u_int64_t pfrke_packets[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + u_int64_t pfrke_bytes[PFR_DIR_MAX][PFR_OP_ADDR_MAX]; + SLIST_ENTRY(pfr_kentry) pfrke_workq; + u_int64_t pfrke_tzero; + u_int8_t pfrke_af; + u_int8_t pfrke_net; + u_int8_t pfrke_not; + u_int8_t pfrke_mark; + u_int8_t pfrke_intrpool; }; SLIST_HEAD(pfr_ktableworkq, pfr_ktable); RB_HEAD(pfr_ktablehead, pfr_ktable); struct pfr_ktable { - struct pfr_tstats pfrkt_ts; - RB_ENTRY(pfr_ktable) pfrkt_tree; - SLIST_ENTRY(pfr_ktable) pfrkt_workq; - struct radix_node_head *pfrkt_ip4; - struct radix_node_head *pfrkt_ip6; - struct pfr_ktable *pfrkt_shadow; - struct pfr_ktable *pfrkt_root; - struct pf_ruleset *pfrkt_rs; - u_int64_t pfrkt_larg; - u_int32_t pfrkt_nflags; -}; -#define pfrkt_t pfrkt_ts.pfrts_t -#define pfrkt_name pfrkt_t.pfrt_name -#define pfrkt_anchor pfrkt_t.pfrt_anchor -#define pfrkt_ruleset pfrkt_t.pfrt_ruleset -#define pfrkt_flags pfrkt_t.pfrt_flags -#define pfrkt_cnt pfrkt_ts.pfrts_cnt -#define pfrkt_refcnt pfrkt_ts.pfrts_refcnt -#define pfrkt_packets pfrkt_ts.pfrts_packets -#define pfrkt_bytes pfrkt_ts.pfrts_bytes -#define pfrkt_match pfrkt_ts.pfrts_match -#define pfrkt_nomatch pfrkt_ts.pfrts_nomatch -#define pfrkt_tzero pfrkt_ts.pfrts_tzero + struct pfr_tstats pfrkt_ts; + RB_ENTRY(pfr_ktable) pfrkt_tree; + SLIST_ENTRY(pfr_ktable) pfrkt_workq; + struct radix_node_head *pfrkt_ip4; + struct radix_node_head *pfrkt_ip6; + struct pfr_ktable *pfrkt_shadow; + struct pfr_ktable *pfrkt_root; + struct pf_ruleset *pfrkt_rs; + u_int64_t pfrkt_larg; + u_int32_t pfrkt_nflags; +}; +#define pfrkt_t pfrkt_ts.pfrts_t +#define pfrkt_name pfrkt_t.pfrt_name +#define pfrkt_anchor pfrkt_t.pfrt_anchor +#define pfrkt_ruleset pfrkt_t.pfrt_ruleset +#define pfrkt_flags pfrkt_t.pfrt_flags +#define pfrkt_cnt pfrkt_ts.pfrts_cnt +#define pfrkt_refcnt pfrkt_ts.pfrts_refcnt +#define pfrkt_packets pfrkt_ts.pfrts_packets +#define pfrkt_bytes pfrkt_ts.pfrts_bytes +#define pfrkt_match pfrkt_ts.pfrts_match +#define pfrkt_nomatch pfrkt_ts.pfrts_nomatch +#define pfrkt_tzero pfrkt_ts.pfrts_tzero RB_HEAD(pf_state_tree_lan_ext, pf_state_key); RB_PROTOTYPE_SC(__private_extern__, pf_state_tree_lan_ext, pf_state_key, @@ -1357,26 +1357,26 @@ RB_PROTOTYPE_SC(__private_extern__, pf_state_tree_ext_gwy, pf_state_key, RB_HEAD(pfi_ifhead, pfi_kif); /* state tables */ -extern struct pf_state_tree_lan_ext pf_statetbl_lan_ext; -extern struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy; +extern struct pf_state_tree_lan_ext pf_statetbl_lan_ext; +extern struct pf_state_tree_ext_gwy pf_statetbl_ext_gwy; /* keep synced with pfi_kif, used in RB_FIND */ struct pfi_kif_cmp { - char pfik_name[IFNAMSIZ]; + char pfik_name[IFNAMSIZ]; }; struct pfi_kif { - char pfik_name[IFNAMSIZ]; - RB_ENTRY(pfi_kif) pfik_tree; - u_int64_t pfik_packets[2][2][2]; - u_int64_t pfik_bytes[2][2][2]; - u_int64_t pfik_tzero; - int pfik_flags; - void *pfik_ah_cookie; - struct ifnet *pfik_ifp; - int pfik_states; - int pfik_rules; - TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs; + char pfik_name[IFNAMSIZ]; + RB_ENTRY(pfi_kif) pfik_tree; + u_int64_t pfik_packets[2][2][2]; + u_int64_t pfik_bytes[2][2][2]; + u_int64_t pfik_tzero; + int pfik_flags; + void *pfik_ah_cookie; + struct ifnet *pfik_ifp; + int pfik_states; + int pfik_rules; + TAILQ_HEAD(, pfi_dynaddr) pfik_dynaddrs; }; enum pfi_kif_refs { @@ -1389,98 +1389,98 @@ struct pfi_uif { #else /* !KERNEL */ struct pfi_kif { #endif /* !KERNEL */ - char pfik_name[IFNAMSIZ]; - u_int64_t pfik_packets[2][2][2]; - u_int64_t pfik_bytes[2][2][2]; - u_int64_t pfik_tzero; - int pfik_flags; - int pfik_states; - int pfik_rules; + char pfik_name[IFNAMSIZ]; + u_int64_t pfik_packets[2][2][2]; + u_int64_t pfik_bytes[2][2][2]; + u_int64_t pfik_tzero; + int pfik_flags; + int pfik_states; + int pfik_rules; #if !defined(__LP64__) - u_int32_t _pad; + u_int32_t _pad; #endif /* !__LP64__ */ }; -#define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */ +#define PFI_IFLAG_SKIP 0x0100 /* skip filtering on interface */ #ifdef KERNEL struct pf_pdesc { struct { - int done; - uid_t uid; - gid_t gid; - pid_t pid; - } lookup; - u_int64_t tot_len; /* Make Mickey money */ + int done; + uid_t uid; + gid_t gid; + pid_t pid; + } lookup; + u_int64_t tot_len; /* Make Mickey money */ union { - struct tcphdr *tcp; - struct udphdr *udp; - struct icmp *icmp; + struct tcphdr *tcp; + struct udphdr *udp; + struct icmp *icmp; #if INET6 - struct icmp6_hdr *icmp6; + struct icmp6_hdr *icmp6; #endif /* INET6 */ - struct pf_grev1_hdr *grev1; - struct pf_esp_hdr *esp; - void *any; + struct pf_grev1_hdr *grev1; + struct pf_esp_hdr *esp; + void *any; } hdr; /* XXX TODO: Change baddr and naddr to *saddr */ - struct pf_addr baddr; /* src address before translation */ - struct pf_addr bdaddr; /* dst address before translation */ - struct pf_addr naddr; /* src address after translation */ - struct pf_addr ndaddr; /* dst address after translation */ - struct pf_rule *nat_rule; /* nat/rdr rule applied to packet */ - struct pf_addr *src; - struct pf_addr *dst; + struct pf_addr baddr; /* src address before translation */ + struct pf_addr bdaddr; /* dst address before translation */ + struct pf_addr naddr; /* src address after translation */ + struct pf_addr ndaddr; /* dst address after translation */ + struct pf_rule *nat_rule; /* nat/rdr rule applied to packet */ + struct pf_addr *src; + struct pf_addr *dst; struct ether_header - *eh; - pbuf_t *mp; - int lmw; /* lazy writable offset */ - struct pf_mtag *pf_mtag; - u_int16_t *ip_sum; - u_int32_t off; /* protocol header offset */ - u_int32_t hdrlen; /* protocol header length */ - u_int32_t p_len; /* total length of payload */ - u_int16_t flags; /* Let SCRUB trigger behavior in */ - /* state code. Easier than tags */ -#define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */ -#define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */ -#define PFDESC_IP_FRAG 0x0004 /* This is a fragment */ - sa_family_t af; - sa_family_t naf; /* address family after translation */ - u_int8_t proto; - u_int8_t tos; - u_int8_t ttl; - u_int8_t proto_variant; - mbuf_svc_class_t sc; /* mbuf service class (MBUF_SVC) */ - u_int32_t pktflags; /* mbuf packet flags (PKTF) */ - u_int32_t flowsrc; /* flow source (FLOWSRC) */ - u_int32_t flowhash; /* flow hash to identify the sender */ + *eh; + pbuf_t *mp; + int lmw; /* lazy writable offset */ + struct pf_mtag *pf_mtag; + u_int16_t *ip_sum; + u_int32_t off; /* protocol header offset */ + u_int32_t hdrlen; /* protocol header length */ + u_int32_t p_len; /* total length of payload */ + u_int16_t flags; /* Let SCRUB trigger behavior in */ + /* state code. Easier than tags */ +#define PFDESC_TCP_NORM 0x0001 /* TCP shall be statefully scrubbed */ +#define PFDESC_IP_REAS 0x0002 /* IP frags would've been reassembled */ +#define PFDESC_IP_FRAG 0x0004 /* This is a fragment */ + sa_family_t af; + sa_family_t naf; /* address family after translation */ + u_int8_t proto; + u_int8_t tos; + u_int8_t ttl; + u_int8_t proto_variant; + mbuf_svc_class_t sc; /* mbuf service class (MBUF_SVC) */ + u_int32_t pktflags; /* mbuf packet flags (PKTF) */ + u_int32_t flowsrc; /* flow source (FLOWSRC) */ + u_int32_t flowhash; /* flow hash to identify the sender */ }; #endif /* KERNEL */ /* flags for RDR options */ -#define PF_DPORT_RANGE 0x01 /* Dest port uses range */ -#define PF_RPORT_RANGE 0x02 /* RDR'ed port uses range */ +#define PF_DPORT_RANGE 0x01 /* Dest port uses range */ +#define PF_RPORT_RANGE 0x02 /* RDR'ed port uses range */ /* Reasons code for passing/dropping a packet */ -#define PFRES_MATCH 0 /* Explicit match of a rule */ -#define PFRES_BADOFF 1 /* Bad offset for pull_hdr */ -#define PFRES_FRAG 2 /* Dropping following fragment */ -#define PFRES_SHORT 3 /* Dropping short packet */ -#define PFRES_NORM 4 /* Dropping by normalizer */ -#define PFRES_MEMORY 5 /* Dropped due to lacking mem */ -#define PFRES_TS 6 /* Bad TCP Timestamp (RFC1323) */ -#define PFRES_CONGEST 7 /* Congestion (of ipintrq) */ -#define PFRES_IPOPTIONS 8 /* IP option */ -#define PFRES_PROTCKSUM 9 /* Protocol checksum invalid */ -#define PFRES_BADSTATE 10 /* State mismatch */ -#define PFRES_STATEINS 11 /* State insertion failure */ -#define PFRES_MAXSTATES 12 /* State limit */ -#define PFRES_SRCLIMIT 13 /* Source node/conn limit */ -#define PFRES_SYNPROXY 14 /* SYN proxy */ -#define PFRES_DUMMYNET 15 /* Dummynet */ -#define PFRES_MAX 16 /* total+1 */ +#define PFRES_MATCH 0 /* Explicit match of a rule */ +#define PFRES_BADOFF 1 /* Bad offset for pull_hdr */ +#define PFRES_FRAG 2 /* Dropping following fragment */ +#define PFRES_SHORT 3 /* Dropping short packet */ +#define PFRES_NORM 4 /* Dropping by normalizer */ +#define PFRES_MEMORY 5 /* Dropped due to lacking mem */ +#define PFRES_TS 6 /* Bad TCP Timestamp (RFC1323) */ +#define PFRES_CONGEST 7 /* Congestion (of ipintrq) */ +#define PFRES_IPOPTIONS 8 /* IP option */ +#define PFRES_PROTCKSUM 9 /* Protocol checksum invalid */ +#define PFRES_BADSTATE 10 /* State mismatch */ +#define PFRES_STATEINS 11 /* State insertion failure */ +#define PFRES_MAXSTATES 12 /* State limit */ +#define PFRES_SRCLIMIT 13 /* Source node/conn limit */ +#define PFRES_SYNPROXY 14 /* SYN proxy */ +#define PFRES_DUMMYNET 15 /* Dummynet */ +#define PFRES_MAX 16 /* total+1 */ #define PFRES_NAMES { \ "match", \ @@ -1503,14 +1503,14 @@ struct pf_pdesc { } /* Counters for other things we want to keep track of */ -#define LCNT_STATES 0 /* states */ -#define LCNT_SRCSTATES 1 /* max-src-states */ -#define LCNT_SRCNODES 2 /* max-src-nodes */ -#define LCNT_SRCCONN 3 /* max-src-conn */ -#define LCNT_SRCCONNRATE 4 /* max-src-conn-rate */ -#define LCNT_OVERLOAD_TABLE 5 /* entry added to overload table */ -#define LCNT_OVERLOAD_FLUSH 6 /* state entries flushed */ -#define LCNT_MAX 7 /* total+1 */ +#define LCNT_STATES 0 /* states */ +#define LCNT_SRCSTATES 1 /* max-src-states */ +#define LCNT_SRCNODES 2 /* max-src-nodes */ +#define LCNT_SRCCONN 3 /* max-src-conn */ +#define LCNT_SRCCONNRATE 4 /* max-src-conn-rate */ +#define LCNT_OVERLOAD_TABLE 5 /* entry added to overload table */ +#define LCNT_OVERLOAD_FLUSH 6 /* state entries flushed */ +#define LCNT_MAX 7 /* total+1 */ #define LCNT_NAMES { \ "max states per rule", \ @@ -1524,11 +1524,11 @@ struct pf_pdesc { } /* UDP state enumeration */ -#define PFUDPS_NO_TRAFFIC 0 -#define PFUDPS_SINGLE 1 -#define PFUDPS_MULTIPLE 2 +#define PFUDPS_NO_TRAFFIC 0 +#define PFUDPS_SINGLE 1 +#define PFUDPS_MULTIPLE 2 -#define PFUDPS_NSTATES 3 /* number of state levels */ +#define PFUDPS_NSTATES 3 /* number of state levels */ #define PFUDPS_NAMES { \ "NO_TRAFFIC", \ @@ -1538,11 +1538,11 @@ struct pf_pdesc { } /* GREv1 protocol state enumeration */ -#define PFGRE1S_NO_TRAFFIC 0 -#define PFGRE1S_INITIATING 1 -#define PFGRE1S_ESTABLISHED 2 +#define PFGRE1S_NO_TRAFFIC 0 +#define PFGRE1S_INITIATING 1 +#define PFGRE1S_ESTABLISHED 2 -#define PFGRE1S_NSTATES 3 /* number of state levels */ +#define PFGRE1S_NSTATES 3 /* number of state levels */ #define PFGRE1S_NAMES { \ "NO_TRAFFIC", \ @@ -1551,20 +1551,20 @@ struct pf_pdesc { NULL \ } -#define PFESPS_NO_TRAFFIC 0 -#define PFESPS_INITIATING 1 -#define PFESPS_ESTABLISHED 2 +#define PFESPS_NO_TRAFFIC 0 +#define PFESPS_INITIATING 1 +#define PFESPS_ESTABLISHED 2 -#define PFESPS_NSTATES 3 /* number of state levels */ +#define PFESPS_NSTATES 3 /* number of state levels */ #define PFESPS_NAMES { "NO_TRAFFIC", "INITIATING", "ESTABLISHED", NULL } /* Other protocol state enumeration */ -#define PFOTHERS_NO_TRAFFIC 0 -#define PFOTHERS_SINGLE 1 -#define PFOTHERS_MULTIPLE 2 +#define PFOTHERS_NO_TRAFFIC 0 +#define PFOTHERS_SINGLE 1 +#define PFOTHERS_MULTIPLE 2 -#define PFOTHERS_NSTATES 3 /* number of state levels */ +#define PFOTHERS_NSTATES 3 /* number of state levels */ #define PFOTHERS_NAMES { \ "NO_TRAFFIC", \ @@ -1573,220 +1573,220 @@ struct pf_pdesc { NULL \ } -#define FCNT_STATE_SEARCH 0 -#define FCNT_STATE_INSERT 1 -#define FCNT_STATE_REMOVALS 2 -#define FCNT_MAX 3 +#define FCNT_STATE_SEARCH 0 +#define FCNT_STATE_INSERT 1 +#define FCNT_STATE_REMOVALS 2 +#define FCNT_MAX 3 -#define SCNT_SRC_NODE_SEARCH 0 -#define SCNT_SRC_NODE_INSERT 1 -#define SCNT_SRC_NODE_REMOVALS 2 -#define SCNT_MAX 3 +#define SCNT_SRC_NODE_SEARCH 0 +#define SCNT_SRC_NODE_INSERT 1 +#define SCNT_SRC_NODE_REMOVALS 2 +#define SCNT_MAX 3 #ifdef KERNEL #define ACTION_SET(a, x) \ do { \ - if ((a) != NULL) \ - *(a) = (x); \ + if ((a) != NULL) \ + *(a) = (x); \ } while (0) #define REASON_SET(a, x) \ do { \ - if ((a) != NULL) \ - *(a) = (x); \ - if (x < PFRES_MAX) \ - pf_status.counters[x]++; \ + if ((a) != NULL) \ + *(a) = (x); \ + if (x < PFRES_MAX) \ + pf_status.counters[x]++; \ } while (0) #endif /* KERNEL */ struct pf_status { - u_int64_t counters[PFRES_MAX]; - u_int64_t lcounters[LCNT_MAX]; /* limit counters */ - u_int64_t fcounters[FCNT_MAX]; - u_int64_t scounters[SCNT_MAX]; - u_int64_t pcounters[2][2][3]; - u_int64_t bcounters[2][2]; - u_int64_t stateid; - u_int32_t running; - u_int32_t states; - u_int32_t src_nodes; - u_int64_t since __attribute__((aligned(8))); - u_int32_t debug; - u_int32_t hostid; - char ifname[IFNAMSIZ]; - u_int8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; + u_int64_t counters[PFRES_MAX]; + u_int64_t lcounters[LCNT_MAX]; /* limit counters */ + u_int64_t fcounters[FCNT_MAX]; + u_int64_t scounters[SCNT_MAX]; + u_int64_t pcounters[2][2][3]; + u_int64_t bcounters[2][2]; + u_int64_t stateid; + u_int32_t running; + u_int32_t states; + u_int32_t src_nodes; + u_int64_t since __attribute__((aligned(8))); + u_int32_t debug; + u_int32_t hostid; + char ifname[IFNAMSIZ]; + u_int8_t pf_chksum[PF_MD5_DIGEST_LENGTH]; }; struct cbq_opts { - u_int32_t minburst; - u_int32_t maxburst; - u_int32_t pktsize; - u_int32_t maxpktsize; - u_int32_t ns_per_byte; - u_int32_t maxidle; - int32_t minidle; - u_int32_t offtime; - u_int32_t flags; + u_int32_t minburst; + u_int32_t maxburst; + u_int32_t pktsize; + u_int32_t maxpktsize; + u_int32_t ns_per_byte; + u_int32_t maxidle; + int32_t minidle; + u_int32_t offtime; + u_int32_t flags; }; struct priq_opts { - u_int32_t flags; + u_int32_t flags; }; struct qfq_opts { - u_int32_t flags; - u_int32_t lmax; + u_int32_t flags; + u_int32_t lmax; }; struct hfsc_opts { /* real-time service curve */ - u_int64_t rtsc_m1; /* slope of the 1st segment in bps */ - u_int64_t rtsc_d; /* the x-projection of m1 in msec */ - u_int64_t rtsc_m2; /* slope of the 2nd segment in bps */ - u_int32_t rtsc_fl; /* service curve flags */ + u_int64_t rtsc_m1; /* slope of the 1st segment in bps */ + u_int64_t rtsc_d; /* the x-projection of m1 in msec */ + u_int64_t rtsc_m2; /* slope of the 2nd segment in bps */ + u_int32_t rtsc_fl; /* service curve flags */ #if !defined(__LP64__) - u_int32_t _pad; + u_int32_t _pad; #endif /* !__LP64__ */ /* link-sharing service curve */ - u_int64_t lssc_m1; - u_int64_t lssc_d; - u_int64_t lssc_m2; - u_int32_t lssc_fl; + u_int64_t lssc_m1; + u_int64_t lssc_d; + u_int64_t lssc_m2; + u_int32_t lssc_fl; #if !defined(__LP64__) - u_int32_t __pad; + u_int32_t __pad; #endif /* !__LP64__ */ /* upper-limit service curve */ - u_int64_t ulsc_m1; - u_int64_t ulsc_d; - u_int64_t ulsc_m2; - u_int32_t ulsc_fl; - u_int32_t flags; /* scheduler flags */ + u_int64_t ulsc_m1; + u_int64_t ulsc_d; + u_int64_t ulsc_m2; + u_int32_t ulsc_fl; + u_int32_t flags; /* scheduler flags */ }; struct fairq_opts { - u_int32_t nbuckets; /* hash buckets */ - u_int32_t flags; - u_int64_t hogs_m1; /* hog detection bandwidth */ + u_int32_t nbuckets; /* hash buckets */ + u_int32_t flags; + u_int64_t hogs_m1; /* hog detection bandwidth */ /* link-sharing service curve */ - u_int64_t lssc_m1; - u_int64_t lssc_d; - u_int64_t lssc_m2; + u_int64_t lssc_m1; + u_int64_t lssc_d; + u_int64_t lssc_m2; }; /* bandwidth types */ -#define PF_ALTQ_BW_ABSOLUTE 1 /* bw in absolute value (bps) */ -#define PF_ALTQ_BW_PERCENT 2 /* bandwidth in percentage */ +#define PF_ALTQ_BW_ABSOLUTE 1 /* bw in absolute value (bps) */ +#define PF_ALTQ_BW_PERCENT 2 /* bandwidth in percentage */ /* ALTQ rule flags */ -#define PF_ALTQF_TBR 0x1 /* enable Token Bucket Regulator */ +#define PF_ALTQF_TBR 0x1 /* enable Token Bucket Regulator */ /* queue rule flags */ -#define PF_ALTQ_QRF_WEIGHT 0x1 /* weight instead of priority */ +#define PF_ALTQ_QRF_WEIGHT 0x1 /* weight instead of priority */ struct pf_altq { - char ifname[IFNAMSIZ]; + char ifname[IFNAMSIZ]; /* discipline-specific state */ - void *altq_disc __attribute__((aligned(8))); - TAILQ_ENTRY(pf_altq) entries __attribute__((aligned(8))); + void *altq_disc __attribute__((aligned(8))); + TAILQ_ENTRY(pf_altq) entries __attribute__((aligned(8))); #if !defined(__LP64__) - u_int32_t _pad[2]; + u_int32_t _pad[2]; #endif /* !__LP64__ */ - u_int32_t aflags; /* ALTQ rule flags */ - u_int32_t bwtype; /* bandwidth type */ + u_int32_t aflags; /* ALTQ rule flags */ + u_int32_t bwtype; /* bandwidth type */ /* scheduler spec */ - u_int32_t scheduler; /* scheduler type */ - u_int32_t tbrsize; /* tokenbucket regulator size */ - u_int64_t ifbandwidth; /* interface bandwidth */ + u_int32_t scheduler; /* scheduler type */ + u_int32_t tbrsize; /* tokenbucket regulator size */ + u_int64_t ifbandwidth; /* interface bandwidth */ /* queue spec */ - char qname[PF_QNAME_SIZE]; /* queue name */ - char parent[PF_QNAME_SIZE]; /* parent name */ - u_int32_t parent_qid; /* parent queue id */ - u_int32_t qrflags; /* queue rule flags */ + char qname[PF_QNAME_SIZE]; /* queue name */ + char parent[PF_QNAME_SIZE]; /* parent name */ + u_int32_t parent_qid; /* parent queue id */ + u_int32_t qrflags; /* queue rule flags */ union { - u_int32_t priority; /* priority */ - u_int32_t weight; /* weight */ + u_int32_t priority; /* priority */ + u_int32_t weight; /* weight */ }; - u_int32_t qlimit; /* queue size limit */ - u_int32_t flags; /* misc flags */ + u_int32_t qlimit; /* queue size limit */ + u_int32_t flags; /* misc flags */ #if !defined(__LP64__) - u_int32_t __pad; + u_int32_t __pad; #endif /* !__LP64__ */ - u_int64_t bandwidth; /* queue bandwidth */ + u_int64_t bandwidth; /* queue bandwidth */ union { - struct cbq_opts cbq_opts; - struct priq_opts priq_opts; - struct hfsc_opts hfsc_opts; - struct fairq_opts fairq_opts; - struct qfq_opts qfq_opts; + struct cbq_opts cbq_opts; + struct priq_opts priq_opts; + struct hfsc_opts hfsc_opts; + struct fairq_opts fairq_opts; + struct qfq_opts qfq_opts; } pq_u; - u_int32_t qid; /* return value */ + u_int32_t qid; /* return value */ }; struct pf_tagname { - TAILQ_ENTRY(pf_tagname) entries; - char name[PF_TAG_NAME_SIZE]; - u_int16_t tag; - int ref; + TAILQ_ENTRY(pf_tagname) entries; + char name[PF_TAG_NAME_SIZE]; + u_int16_t tag; + int ref; }; -#define PFFRAG_FRENT_HIWAT 5000 /* Number of fragment entries */ -#define PFFRAG_FRAG_HIWAT 1000 /* Number of fragmented packets */ -#define PFFRAG_FRCENT_HIWAT 50000 /* Number of fragment cache entries */ -#define PFFRAG_FRCACHE_HIWAT 10000 /* Number of fragment descriptors */ +#define PFFRAG_FRENT_HIWAT 5000 /* Number of fragment entries */ +#define PFFRAG_FRAG_HIWAT 1000 /* Number of fragmented packets */ +#define PFFRAG_FRCENT_HIWAT 50000 /* Number of fragment cache entries */ +#define PFFRAG_FRCACHE_HIWAT 10000 /* Number of fragment descriptors */ -#define PFR_KTABLE_HIWAT 1000 /* Number of tables */ -#define PFR_KENTRY_HIWAT 200000 /* Number of table entries */ -#define PFR_KENTRY_HIWAT_SMALL 100000 /* Number of table entries (tiny hosts) */ +#define PFR_KTABLE_HIWAT 1000 /* Number of tables */ +#define PFR_KENTRY_HIWAT 200000 /* Number of table entries */ +#define PFR_KENTRY_HIWAT_SMALL 100000 /* Number of table entries (tiny hosts) */ /* * ioctl parameter structures */ struct pfioc_pooladdr { - u_int32_t action; - u_int32_t ticket; - u_int32_t nr; - u_int32_t r_num; - u_int8_t r_action; - u_int8_t r_last; - u_int8_t af; - char anchor[MAXPATHLEN]; - struct pf_pooladdr addr; + u_int32_t action; + u_int32_t ticket; + u_int32_t nr; + u_int32_t r_num; + u_int8_t r_action; + u_int8_t r_last; + u_int8_t af; + char anchor[MAXPATHLEN]; + struct pf_pooladdr addr; }; struct pfioc_rule { - u_int32_t action; - u_int32_t ticket; - u_int32_t pool_ticket; - u_int32_t nr; - char anchor[MAXPATHLEN]; - char anchor_call[MAXPATHLEN]; - struct pf_rule rule; + u_int32_t action; + u_int32_t ticket; + u_int32_t pool_ticket; + u_int32_t nr; + char anchor[MAXPATHLEN]; + char anchor_call[MAXPATHLEN]; + struct pf_rule rule; }; struct pfioc_natlook { - struct pf_addr saddr; - struct pf_addr daddr; - struct pf_addr rsaddr; - struct pf_addr rdaddr; - union pf_state_xport sxport; - union pf_state_xport dxport; - union pf_state_xport rsxport; - union pf_state_xport rdxport; - sa_family_t af; - u_int8_t proto; - u_int8_t proto_variant; - u_int8_t direction; + struct pf_addr saddr; + struct pf_addr daddr; + struct pf_addr rsaddr; + struct pf_addr rdaddr; + union pf_state_xport sxport; + union pf_state_xport dxport; + union pf_state_xport rsxport; + union pf_state_xport rdxport; + sa_family_t af; + u_int8_t proto; + u_int8_t proto_variant; + u_int8_t direction; }; struct pfioc_state { - struct pfsync_state state; + struct pfsync_state state; }; struct pfioc_src_node_kill { @@ -1797,48 +1797,48 @@ struct pfioc_src_node_kill { }; struct pfioc_state_addr_kill { - struct pf_addr_wrap addr; - u_int8_t reserved_[3]; - u_int8_t neg; - union pf_rule_xport xport; + struct pf_addr_wrap addr; + u_int8_t reserved_[3]; + u_int8_t neg; + union pf_rule_xport xport; }; struct pfioc_state_kill { /* XXX returns the number of states killed in psk_af */ - sa_family_t psk_af; - u_int8_t psk_proto; - u_int8_t psk_proto_variant; - u_int8_t _pad; - struct pfioc_state_addr_kill psk_src; - struct pfioc_state_addr_kill psk_dst; - char psk_ifname[IFNAMSIZ]; - char psk_ownername[PF_OWNER_NAME_SIZE]; + sa_family_t psk_af; + u_int8_t psk_proto; + u_int8_t psk_proto_variant; + u_int8_t _pad; + struct pfioc_state_addr_kill psk_src; + struct pfioc_state_addr_kill psk_dst; + char psk_ifname[IFNAMSIZ]; + char psk_ownername[PF_OWNER_NAME_SIZE]; }; struct pfioc_states { - int ps_len; + int ps_len; union { - caddr_t psu_buf; - struct pfsync_state *psu_states; + caddr_t psu_buf; + struct pfsync_state *psu_states; } ps_u __attribute__((aligned(8))); -#define ps_buf ps_u.psu_buf -#define ps_states ps_u.psu_states +#define ps_buf ps_u.psu_buf +#define ps_states ps_u.psu_states }; #ifdef KERNEL struct pfioc_states_32 { - int ps_len; + int ps_len; union { - user32_addr_t psu_buf; - user32_addr_t psu_states; + user32_addr_t psu_buf; + user32_addr_t psu_states; } ps_u __attribute__((aligned(8))); }; struct pfioc_states_64 { - int ps_len; + int ps_len; union { - user64_addr_t psu_buf; - user64_addr_t psu_states; + user64_addr_t psu_buf; + user64_addr_t psu_states; } ps_u __attribute__((aligned(8))); }; #endif /* KERNEL */ @@ -1846,16 +1846,16 @@ struct pfioc_states_64 { #define PFTOK_PROCNAME_LEN 64 #pragma pack(1) struct pfioc_token { - u_int64_t token_value; - u_int64_t timestamp; - pid_t pid; - char proc_name[PFTOK_PROCNAME_LEN]; + u_int64_t token_value; + u_int64_t timestamp; + pid_t pid; + char proc_name[PFTOK_PROCNAME_LEN]; }; #pragma pack() struct pfioc_kernel_token { - SLIST_ENTRY(pfioc_kernel_token) next; - struct pfioc_token token; + SLIST_ENTRY(pfioc_kernel_token) next; + struct pfioc_token token; }; struct pfioc_remove_token { @@ -1864,293 +1864,293 @@ struct pfioc_remove_token { }; struct pfioc_tokens { - int size; + int size; union { - caddr_t pgtu_buf; - struct pfioc_token *pgtu_tokens; + caddr_t pgtu_buf; + struct pfioc_token *pgtu_tokens; } pgt_u __attribute__((aligned(8))); -#define pgt_buf pgt_u.pgtu_buf -#define pgt_tokens pgt_u.pgtu_tokens +#define pgt_buf pgt_u.pgtu_buf +#define pgt_tokens pgt_u.pgtu_tokens }; #ifdef KERNEL struct pfioc_tokens_32 { - int size; + int size; union { - user32_addr_t pgtu_buf; - user32_addr_t pgtu_tokens; + user32_addr_t pgtu_buf; + user32_addr_t pgtu_tokens; } pgt_u __attribute__((aligned(8))); }; struct pfioc_tokens_64 { - int size; + int size; union { - user64_addr_t pgtu_buf; - user64_addr_t pgtu_tokens; + user64_addr_t pgtu_buf; + user64_addr_t pgtu_tokens; } pgt_u __attribute__((aligned(8))); }; #endif /* KERNEL */ struct pfioc_src_nodes { - int psn_len; + int psn_len; union { - caddr_t psu_buf; - struct pf_src_node *psu_src_nodes; + caddr_t psu_buf; + struct pf_src_node *psu_src_nodes; } psn_u __attribute__((aligned(8))); -#define psn_buf psn_u.psu_buf -#define psn_src_nodes psn_u.psu_src_nodes +#define psn_buf psn_u.psu_buf +#define psn_src_nodes psn_u.psu_src_nodes }; #ifdef KERNEL struct pfioc_src_nodes_32 { - int psn_len; + int psn_len; union { - user32_addr_t psu_buf; - user32_addr_t psu_src_nodes; + user32_addr_t psu_buf; + user32_addr_t psu_src_nodes; } psn_u __attribute__((aligned(8))); }; struct pfioc_src_nodes_64 { - int psn_len; + int psn_len; union { - user64_addr_t psu_buf; - user64_addr_t psu_src_nodes; + user64_addr_t psu_buf; + user64_addr_t psu_src_nodes; } psn_u __attribute__((aligned(8))); }; #endif /* KERNEL */ struct pfioc_if { - char ifname[IFNAMSIZ]; + char ifname[IFNAMSIZ]; }; struct pfioc_tm { - int timeout; - int seconds; + int timeout; + int seconds; }; struct pfioc_limit { - int index; - unsigned limit; + int index; + unsigned limit; }; struct pfioc_altq { - u_int32_t action; - u_int32_t ticket; - u_int32_t nr; - struct pf_altq altq __attribute__((aligned(8))); + u_int32_t action; + u_int32_t ticket; + u_int32_t nr; + struct pf_altq altq __attribute__((aligned(8))); }; struct pfioc_qstats { - u_int32_t ticket; - u_int32_t nr; - void *buf __attribute__((aligned(8))); - int nbytes __attribute__((aligned(8))); - u_int8_t scheduler; + u_int32_t ticket; + u_int32_t nr; + void *buf __attribute__((aligned(8))); + int nbytes __attribute__((aligned(8))); + u_int8_t scheduler; }; struct pfioc_ruleset { - u_int32_t nr; - char path[MAXPATHLEN]; - char name[PF_ANCHOR_NAME_SIZE]; + u_int32_t nr; + char path[MAXPATHLEN]; + char name[PF_ANCHOR_NAME_SIZE]; }; -#define PF_RULESET_ALTQ (PF_RULESET_MAX) -#define PF_RULESET_TABLE (PF_RULESET_MAX+1) +#define PF_RULESET_ALTQ (PF_RULESET_MAX) +#define PF_RULESET_TABLE (PF_RULESET_MAX+1) struct pfioc_trans { - int size; /* number of elements */ - int esize; /* size of each element in bytes */ + int size; /* number of elements */ + int esize; /* size of each element in bytes */ struct pfioc_trans_e { - int rs_num; - char anchor[MAXPATHLEN]; - u_int32_t ticket; + int rs_num; + char anchor[MAXPATHLEN]; + u_int32_t ticket; } *array __attribute__((aligned(8))); }; #ifdef KERNEL struct pfioc_trans_32 { - int size; /* number of elements */ - int esize; /* size of each element in bytes */ - user32_addr_t array __attribute__((aligned(8))); + int size; /* number of elements */ + int esize; /* size of each element in bytes */ + user32_addr_t array __attribute__((aligned(8))); }; struct pfioc_trans_64 { - int size; /* number of elements */ - int esize; /* size of each element in bytes */ - user64_addr_t array __attribute__((aligned(8))); + int size; /* number of elements */ + int esize; /* size of each element in bytes */ + user64_addr_t array __attribute__((aligned(8))); }; #endif /* KERNEL */ -#define PFR_FLAG_ATOMIC 0x00000001 -#define PFR_FLAG_DUMMY 0x00000002 -#define PFR_FLAG_FEEDBACK 0x00000004 -#define PFR_FLAG_CLSTATS 0x00000008 -#define PFR_FLAG_ADDRSTOO 0x00000010 -#define PFR_FLAG_REPLACE 0x00000020 -#define PFR_FLAG_ALLRSETS 0x00000040 -#define PFR_FLAG_ALLMASK 0x0000007F +#define PFR_FLAG_ATOMIC 0x00000001 +#define PFR_FLAG_DUMMY 0x00000002 +#define PFR_FLAG_FEEDBACK 0x00000004 +#define PFR_FLAG_CLSTATS 0x00000008 +#define PFR_FLAG_ADDRSTOO 0x00000010 +#define PFR_FLAG_REPLACE 0x00000020 +#define PFR_FLAG_ALLRSETS 0x00000040 +#define PFR_FLAG_ALLMASK 0x0000007F #ifdef KERNEL -#define PFR_FLAG_USERIOCTL 0x10000000 +#define PFR_FLAG_USERIOCTL 0x10000000 #endif /* KERNEL */ struct pfioc_table { - struct pfr_table pfrio_table; - void *pfrio_buffer __attribute__((aligned(8))); - int pfrio_esize __attribute__((aligned(8))); - int pfrio_size; - int pfrio_size2; - int pfrio_nadd; - int pfrio_ndel; - int pfrio_nchange; - int pfrio_flags; - u_int32_t pfrio_ticket; -}; -#define pfrio_exists pfrio_nadd -#define pfrio_nzero pfrio_nadd -#define pfrio_nmatch pfrio_nadd -#define pfrio_naddr pfrio_size2 -#define pfrio_setflag pfrio_size2 -#define pfrio_clrflag pfrio_nadd + struct pfr_table pfrio_table; + void *pfrio_buffer __attribute__((aligned(8))); + int pfrio_esize __attribute__((aligned(8))); + int pfrio_size; + int pfrio_size2; + int pfrio_nadd; + int pfrio_ndel; + int pfrio_nchange; + int pfrio_flags; + u_int32_t pfrio_ticket; +}; +#define pfrio_exists pfrio_nadd +#define pfrio_nzero pfrio_nadd +#define pfrio_nmatch pfrio_nadd +#define pfrio_naddr pfrio_size2 +#define pfrio_setflag pfrio_size2 +#define pfrio_clrflag pfrio_nadd #ifdef KERNEL struct pfioc_table_32 { - struct pfr_table pfrio_table; - user32_addr_t pfrio_buffer __attribute__((aligned(8))); - int pfrio_esize __attribute__((aligned(8))); - int pfrio_size; - int pfrio_size2; - int pfrio_nadd; - int pfrio_ndel; - int pfrio_nchange; - int pfrio_flags; - u_int32_t pfrio_ticket; + struct pfr_table pfrio_table; + user32_addr_t pfrio_buffer __attribute__((aligned(8))); + int pfrio_esize __attribute__((aligned(8))); + int pfrio_size; + int pfrio_size2; + int pfrio_nadd; + int pfrio_ndel; + int pfrio_nchange; + int pfrio_flags; + u_int32_t pfrio_ticket; }; struct pfioc_table_64 { - struct pfr_table pfrio_table; - user64_addr_t pfrio_buffer __attribute__((aligned(8))); - int pfrio_esize __attribute__((aligned(8))); - int pfrio_size; - int pfrio_size2; - int pfrio_nadd; - int pfrio_ndel; - int pfrio_nchange; - int pfrio_flags; - u_int32_t pfrio_ticket; + struct pfr_table pfrio_table; + user64_addr_t pfrio_buffer __attribute__((aligned(8))); + int pfrio_esize __attribute__((aligned(8))); + int pfrio_size; + int pfrio_size2; + int pfrio_nadd; + int pfrio_ndel; + int pfrio_nchange; + int pfrio_flags; + u_int32_t pfrio_ticket; }; #endif /* KERNEL */ struct pfioc_iface { - char pfiio_name[IFNAMSIZ]; - void *pfiio_buffer __attribute__((aligned(8))); - int pfiio_esize __attribute__((aligned(8))); - int pfiio_size; - int pfiio_nzero; - int pfiio_flags; + char pfiio_name[IFNAMSIZ]; + void *pfiio_buffer __attribute__((aligned(8))); + int pfiio_esize __attribute__((aligned(8))); + int pfiio_size; + int pfiio_nzero; + int pfiio_flags; }; #ifdef KERNEL struct pfioc_iface_32 { - char pfiio_name[IFNAMSIZ]; - user32_addr_t pfiio_buffer __attribute__((aligned(8))); - int pfiio_esize __attribute__((aligned(8))); - int pfiio_size; - int pfiio_nzero; - int pfiio_flags; + char pfiio_name[IFNAMSIZ]; + user32_addr_t pfiio_buffer __attribute__((aligned(8))); + int pfiio_esize __attribute__((aligned(8))); + int pfiio_size; + int pfiio_nzero; + int pfiio_flags; }; struct pfioc_iface_64 { - char pfiio_name[IFNAMSIZ]; - user64_addr_t pfiio_buffer __attribute__((aligned(8))); - int pfiio_esize __attribute__((aligned(8))); - int pfiio_size; - int pfiio_nzero; - int pfiio_flags; + char pfiio_name[IFNAMSIZ]; + user64_addr_t pfiio_buffer __attribute__((aligned(8))); + int pfiio_esize __attribute__((aligned(8))); + int pfiio_size; + int pfiio_nzero; + int pfiio_flags; }; #endif /* KERNEL */ struct pf_ifspeed { - char ifname[IFNAMSIZ]; - u_int64_t baudrate; + char ifname[IFNAMSIZ]; + u_int64_t baudrate; }; /* * ioctl operations */ -#define DIOCSTART _IO ('D', 1) -#define DIOCSTOP _IO ('D', 2) -#define DIOCADDRULE _IOWR('D', 4, struct pfioc_rule) -#define DIOCGETSTARTERS _IOWR('D', 5, struct pfioc_tokens) -#define DIOCGETRULES _IOWR('D', 6, struct pfioc_rule) -#define DIOCGETRULE _IOWR('D', 7, struct pfioc_rule) -#define DIOCSTARTREF _IOR ('D', 8, u_int64_t) -#define DIOCSTOPREF _IOWR('D', 9, struct pfioc_remove_token) +#define DIOCSTART _IO ('D', 1) +#define DIOCSTOP _IO ('D', 2) +#define DIOCADDRULE _IOWR('D', 4, struct pfioc_rule) +#define DIOCGETSTARTERS _IOWR('D', 5, struct pfioc_tokens) +#define DIOCGETRULES _IOWR('D', 6, struct pfioc_rule) +#define DIOCGETRULE _IOWR('D', 7, struct pfioc_rule) +#define DIOCSTARTREF _IOR ('D', 8, u_int64_t) +#define DIOCSTOPREF _IOWR('D', 9, struct pfioc_remove_token) /* XXX cut 10 - 17 */ -#define DIOCCLRSTATES _IOWR('D', 18, struct pfioc_state_kill) -#define DIOCGETSTATE _IOWR('D', 19, struct pfioc_state) -#define DIOCSETSTATUSIF _IOWR('D', 20, struct pfioc_if) -#define DIOCGETSTATUS _IOWR('D', 21, struct pf_status) -#define DIOCCLRSTATUS _IO ('D', 22) -#define DIOCNATLOOK _IOWR('D', 23, struct pfioc_natlook) -#define DIOCSETDEBUG _IOWR('D', 24, u_int32_t) -#define DIOCGETSTATES _IOWR('D', 25, struct pfioc_states) -#define DIOCCHANGERULE _IOWR('D', 26, struct pfioc_rule) -#define DIOCINSERTRULE _IOWR('D', 27, struct pfioc_rule) -#define DIOCDELETERULE _IOWR('D', 28, struct pfioc_rule) -#define DIOCSETTIMEOUT _IOWR('D', 29, struct pfioc_tm) -#define DIOCGETTIMEOUT _IOWR('D', 30, struct pfioc_tm) -#define DIOCADDSTATE _IOWR('D', 37, struct pfioc_state) -#define DIOCCLRRULECTRS _IO ('D', 38) -#define DIOCGETLIMIT _IOWR('D', 39, struct pfioc_limit) -#define DIOCSETLIMIT _IOWR('D', 40, struct pfioc_limit) -#define DIOCKILLSTATES _IOWR('D', 41, struct pfioc_state_kill) -#define DIOCSTARTALTQ _IO ('D', 42) -#define DIOCSTOPALTQ _IO ('D', 43) -#define DIOCADDALTQ _IOWR('D', 45, struct pfioc_altq) -#define DIOCGETALTQS _IOWR('D', 47, struct pfioc_altq) -#define DIOCGETALTQ _IOWR('D', 48, struct pfioc_altq) -#define DIOCCHANGEALTQ _IOWR('D', 49, struct pfioc_altq) -#define DIOCGETQSTATS _IOWR('D', 50, struct pfioc_qstats) -#define DIOCBEGINADDRS _IOWR('D', 51, struct pfioc_pooladdr) -#define DIOCADDADDR _IOWR('D', 52, struct pfioc_pooladdr) -#define DIOCGETADDRS _IOWR('D', 53, struct pfioc_pooladdr) -#define DIOCGETADDR _IOWR('D', 54, struct pfioc_pooladdr) -#define DIOCCHANGEADDR _IOWR('D', 55, struct pfioc_pooladdr) +#define DIOCCLRSTATES _IOWR('D', 18, struct pfioc_state_kill) +#define DIOCGETSTATE _IOWR('D', 19, struct pfioc_state) +#define DIOCSETSTATUSIF _IOWR('D', 20, struct pfioc_if) +#define DIOCGETSTATUS _IOWR('D', 21, struct pf_status) +#define DIOCCLRSTATUS _IO ('D', 22) +#define DIOCNATLOOK _IOWR('D', 23, struct pfioc_natlook) +#define DIOCSETDEBUG _IOWR('D', 24, u_int32_t) +#define DIOCGETSTATES _IOWR('D', 25, struct pfioc_states) +#define DIOCCHANGERULE _IOWR('D', 26, struct pfioc_rule) +#define DIOCINSERTRULE _IOWR('D', 27, struct pfioc_rule) +#define DIOCDELETERULE _IOWR('D', 28, struct pfioc_rule) +#define DIOCSETTIMEOUT _IOWR('D', 29, struct pfioc_tm) +#define DIOCGETTIMEOUT _IOWR('D', 30, struct pfioc_tm) +#define DIOCADDSTATE _IOWR('D', 37, struct pfioc_state) +#define DIOCCLRRULECTRS _IO ('D', 38) +#define DIOCGETLIMIT _IOWR('D', 39, struct pfioc_limit) +#define DIOCSETLIMIT _IOWR('D', 40, struct pfioc_limit) +#define DIOCKILLSTATES _IOWR('D', 41, struct pfioc_state_kill) +#define DIOCSTARTALTQ _IO ('D', 42) +#define DIOCSTOPALTQ _IO ('D', 43) +#define DIOCADDALTQ _IOWR('D', 45, struct pfioc_altq) +#define DIOCGETALTQS _IOWR('D', 47, struct pfioc_altq) +#define DIOCGETALTQ _IOWR('D', 48, struct pfioc_altq) +#define DIOCCHANGEALTQ _IOWR('D', 49, struct pfioc_altq) +#define DIOCGETQSTATS _IOWR('D', 50, struct pfioc_qstats) +#define DIOCBEGINADDRS _IOWR('D', 51, struct pfioc_pooladdr) +#define DIOCADDADDR _IOWR('D', 52, struct pfioc_pooladdr) +#define DIOCGETADDRS _IOWR('D', 53, struct pfioc_pooladdr) +#define DIOCGETADDR _IOWR('D', 54, struct pfioc_pooladdr) +#define DIOCCHANGEADDR _IOWR('D', 55, struct pfioc_pooladdr) /* XXX cut 55 - 57 */ -#define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset) -#define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset) -#define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table) -#define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table) -#define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table) -#define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table) -#define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table) -#define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table) -#define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table) -#define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table) -#define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table) -#define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table) -#define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table) -#define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table) -#define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table) -#define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table) -#define DIOCRSETTFLAGS _IOWR('D', 74, struct pfioc_table) -#define DIOCRINADEFINE _IOWR('D', 77, struct pfioc_table) -#define DIOCOSFPFLUSH _IO('D', 78) -#define DIOCOSFPADD _IOWR('D', 79, struct pf_osfp_ioctl) -#define DIOCOSFPGET _IOWR('D', 80, struct pf_osfp_ioctl) -#define DIOCXBEGIN _IOWR('D', 81, struct pfioc_trans) -#define DIOCXCOMMIT _IOWR('D', 82, struct pfioc_trans) -#define DIOCXROLLBACK _IOWR('D', 83, struct pfioc_trans) -#define DIOCGETSRCNODES _IOWR('D', 84, struct pfioc_src_nodes) -#define DIOCCLRSRCNODES _IO('D', 85) -#define DIOCSETHOSTID _IOWR('D', 86, u_int32_t) -#define DIOCIGETIFACES _IOWR('D', 87, struct pfioc_iface) -#define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface) -#define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface) +#define DIOCGETRULESETS _IOWR('D', 58, struct pfioc_ruleset) +#define DIOCGETRULESET _IOWR('D', 59, struct pfioc_ruleset) +#define DIOCRCLRTABLES _IOWR('D', 60, struct pfioc_table) +#define DIOCRADDTABLES _IOWR('D', 61, struct pfioc_table) +#define DIOCRDELTABLES _IOWR('D', 62, struct pfioc_table) +#define DIOCRGETTABLES _IOWR('D', 63, struct pfioc_table) +#define DIOCRGETTSTATS _IOWR('D', 64, struct pfioc_table) +#define DIOCRCLRTSTATS _IOWR('D', 65, struct pfioc_table) +#define DIOCRCLRADDRS _IOWR('D', 66, struct pfioc_table) +#define DIOCRADDADDRS _IOWR('D', 67, struct pfioc_table) +#define DIOCRDELADDRS _IOWR('D', 68, struct pfioc_table) +#define DIOCRSETADDRS _IOWR('D', 69, struct pfioc_table) +#define DIOCRGETADDRS _IOWR('D', 70, struct pfioc_table) +#define DIOCRGETASTATS _IOWR('D', 71, struct pfioc_table) +#define DIOCRCLRASTATS _IOWR('D', 72, struct pfioc_table) +#define DIOCRTSTADDRS _IOWR('D', 73, struct pfioc_table) +#define DIOCRSETTFLAGS _IOWR('D', 74, struct pfioc_table) +#define DIOCRINADEFINE _IOWR('D', 77, struct pfioc_table) +#define DIOCOSFPFLUSH _IO('D', 78) +#define DIOCOSFPADD _IOWR('D', 79, struct pf_osfp_ioctl) +#define DIOCOSFPGET _IOWR('D', 80, struct pf_osfp_ioctl) +#define DIOCXBEGIN _IOWR('D', 81, struct pfioc_trans) +#define DIOCXCOMMIT _IOWR('D', 82, struct pfioc_trans) +#define DIOCXROLLBACK _IOWR('D', 83, struct pfioc_trans) +#define DIOCGETSRCNODES _IOWR('D', 84, struct pfioc_src_nodes) +#define DIOCCLRSRCNODES _IO('D', 85) +#define DIOCSETHOSTID _IOWR('D', 86, u_int32_t) +#define DIOCIGETIFACES _IOWR('D', 87, struct pfioc_iface) +#define DIOCSETIFFLAG _IOWR('D', 89, struct pfioc_iface) +#define DIOCCLRIFFLAG _IOWR('D', 90, struct pfioc_iface) #define DIOCKILLSRCNODES _IOWR('D', 91, struct pfioc_src_node_kill) -#define DIOCGIFSPEED _IOWR('D', 92, struct pf_ifspeed) +#define DIOCGIFSPEED _IOWR('D', 92, struct pf_ifspeed) #ifdef KERNEL RB_HEAD(pf_src_tree, pf_src_node); @@ -2165,11 +2165,11 @@ extern struct pf_state_tree_id tree_id; extern struct pf_state_queue state_list; TAILQ_HEAD(pf_poolqueue, pf_pool); -extern struct pf_poolqueue pf_pools[2]; -extern struct pf_palist pf_pabuf; -extern u_int32_t ticket_pabuf; -extern struct pf_poolqueue *pf_pools_active; -extern struct pf_poolqueue *pf_pools_inactive; +extern struct pf_poolqueue pf_pools[2]; +extern struct pf_palist pf_pabuf; +extern u_int32_t ticket_pabuf; +extern struct pf_poolqueue *pf_pools_active; +extern struct pf_poolqueue *pf_pools_inactive; __private_extern__ int pf_tbladdr_setup(struct pf_ruleset *, struct pf_addr_wrap *); @@ -2349,7 +2349,7 @@ __private_extern__ void pf_tag_unref(u_int16_t); __private_extern__ int pf_tag_packet(pbuf_t *, struct pf_mtag *, int, unsigned int, struct pf_pdesc *); __private_extern__ void pf_step_into_anchor(int *, struct pf_ruleset **, int, - struct pf_rule **, struct pf_rule **, int *); + struct pf_rule **, struct pf_rule **, int *); __private_extern__ int pf_step_out_of_anchor(int *, struct pf_ruleset **, int, struct pf_rule **, struct pf_rule **, int *); __private_extern__ u_int32_t pf_qname2qid(char *); @@ -2360,10 +2360,10 @@ extern struct pf_status pf_status; extern struct pool pf_frent_pl, pf_frag_pl; struct pf_pool_limit { - void *pp; - unsigned limit; + void *pp; + unsigned limit; }; -extern struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX]; +extern struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX]; __private_extern__ int pf_af_hook(struct ifnet *, struct mbuf **, struct mbuf **, unsigned int, int, struct ip_fw_args *); @@ -2376,7 +2376,7 @@ __private_extern__ void pf_ifnet_hook(struct ifnet *, int); */ extern struct pf_anchor_global pf_anchors; extern struct pf_anchor pf_main_anchor; -#define pf_main_ruleset pf_main_anchor.ruleset +#define pf_main_ruleset pf_main_anchor.ruleset extern int pf_is_enabled; extern int16_t pf_nat64_configured; @@ -2403,7 +2403,7 @@ __private_extern__ int pf_osfp_add(struct pf_osfp_ioctl *); __private_extern__ struct pf_osfp_enlist *pf_osfp_fingerprint(struct pf_pdesc *, pbuf_t *, int, const struct tcphdr *); __private_extern__ struct pf_osfp_enlist *pf_osfp_fingerprint_hdr( - const struct ip *, const struct ip6_hdr *, const struct tcphdr *); + const struct ip *, const struct ip6_hdr *, const struct tcphdr *); __private_extern__ void pf_osfp_flush(void); __private_extern__ int pf_osfp_get(struct pf_osfp_ioctl *); __private_extern__ void pf_osfp_initialize(void); @@ -2416,7 +2416,7 @@ __private_extern__ struct pf_mtag *pf_get_mtag_pbuf(pbuf_t *); #else /* !KERNEL */ extern struct pf_anchor_global pf_anchors; extern struct pf_anchor pf_main_anchor; -#define pf_main_ruleset pf_main_anchor.ruleset +#define pf_main_ruleset pf_main_anchor.ruleset /* these ruleset functions can be linked into userland programs (pfctl) */ extern int pf_get_ruleset_number(u_int8_t); diff --git a/bsd/net/pktap.c b/bsd/net/pktap.c index 41da2471d..da700024f 100644 --- a/bsd/net/pktap.c +++ b/bsd/net/pktap.c @@ -48,7 +48,7 @@ #include #include #include -#define _IP_VHL +#define _IP_VHL #include #include #include @@ -68,26 +68,26 @@ extern struct inpcbinfo ripcbinfo; struct pktap_softc { - LIST_ENTRY(pktap_softc) pktp_link; - uint32_t pktp_unit; - uint32_t pktp_dlt_raw_count; - uint32_t pktp_dlt_pkttap_count; - struct ifnet *pktp_ifp; - struct pktap_filter pktp_filters[PKTAP_MAX_FILTERS]; + LIST_ENTRY(pktap_softc) pktp_link; + uint32_t pktp_unit; + uint32_t pktp_dlt_raw_count; + uint32_t pktp_dlt_pkttap_count; + struct ifnet *pktp_ifp; + struct pktap_filter pktp_filters[PKTAP_MAX_FILTERS]; }; #ifndef PKTAP_DEBUG -#define PKTAP_DEBUG 0 +#define PKTAP_DEBUG 0 #endif /* PKTAP_DEBUG */ -#define PKTAP_FILTER_OK 0 /* Packet passes filter checks */ -#define PKTAP_FILTER_SKIP 1 /* Do not tap this packet */ +#define PKTAP_FILTER_OK 0 /* Packet passes filter checks */ +#define PKTAP_FILTER_SKIP 1 /* Do not tap this packet */ static int pktap_inited = 0; SYSCTL_DECL(_net_link); SYSCTL_NODE(_net_link, IFT_PKTAP, pktap, - CTLFLAG_RW |CTLFLAG_LOCKED, 0, "pktap virtual interface"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "pktap virtual interface"); uint32_t pktap_total_tap_count = 0; SYSCTL_UINT(_net_link_pktap, OID_AUTO, total_tap_count, @@ -101,18 +101,18 @@ static int pktap_log = 0; SYSCTL_INT(_net_link_pktap, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED, &pktap_log, 0, ""); -#define PKTAP_LOG(mask, fmt, ...) \ +#define PKTAP_LOG(mask, fmt, ...) \ do { \ if ((pktap_log & mask)) \ - printf("%s:%d " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + printf("%s:%d " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (false) -#define PKTP_LOG_FUNC 0x01 -#define PKTP_LOG_FILTER 0x02 -#define PKTP_LOG_INPUT 0x04 -#define PKTP_LOG_OUTPUT 0x08 -#define PKTP_LOG_ERROR 0x10 -#define PKTP_LOG_NOPCB 0x20 +#define PKTP_LOG_FUNC 0x01 +#define PKTP_LOG_FILTER 0x02 +#define PKTP_LOG_INPUT 0x04 +#define PKTP_LOG_OUTPUT 0x08 +#define PKTP_LOG_ERROR 0x10 +#define PKTP_LOG_NOPCB 0x20 /* * pktap_lck_rw protects the global list of pktap interfaces @@ -128,22 +128,22 @@ static LIST_HEAD(pktap_list, pktap_softc) pktap_list = int pktap_clone_create(struct if_clone *, u_int32_t, void *); int pktap_clone_destroy(struct ifnet *); -#define PKTAP_MAXUNIT IF_MAXUNIT -#define PKTAP_ZONE_MAX_ELEM MIN(IFNETS_MAX, PKTAP_MAXUNIT) +#define PKTAP_MAXUNIT IF_MAXUNIT +#define PKTAP_ZONE_MAX_ELEM MIN(IFNETS_MAX, PKTAP_MAXUNIT) static struct if_clone pktap_cloner = - IF_CLONE_INITIALIZER(PKTAP_IFNAME, - pktap_clone_create, - pktap_clone_destroy, - 0, - PKTAP_MAXUNIT, - PKTAP_ZONE_MAX_ELEM, - sizeof(struct pktap_softc)); + IF_CLONE_INITIALIZER(PKTAP_IFNAME, + pktap_clone_create, + pktap_clone_destroy, + 0, + PKTAP_MAXUNIT, + PKTAP_ZONE_MAX_ELEM, + sizeof(struct pktap_softc)); errno_t pktap_if_output(ifnet_t, mbuf_t); errno_t pktap_demux(ifnet_t, mbuf_t, char *, protocol_family_t *); errno_t pktap_add_proto(ifnet_t, protocol_family_t, - const struct ifnet_demux_desc *, u_int32_t); + const struct ifnet_demux_desc *, u_int32_t); errno_t pktap_del_proto(ifnet_t, protocol_family_t); errno_t pktap_getdrvspec(ifnet_t, struct ifdrv64 *); errno_t pktap_setdrvspec(ifnet_t, struct ifdrv64 *); @@ -160,25 +160,28 @@ pktap_hexdump(int mask, void *addr, size_t len) unsigned char *buf = addr; size_t i; - if (!(pktap_log & mask)) + if (!(pktap_log & mask)) { return; + } for (i = 0; i < len; i++) { unsigned char h = (buf[i] & 0xf0) >> 4; unsigned char l = buf[i] & 0x0f; if (i != 0) { - if (i % 32 == 0) + if (i % 32 == 0) { printf("\n"); - else if (i % 4 == 0) + } else if (i % 4 == 0) { printf(" "); + } } printf("%c%c", - h < 10 ? h + '0' : h - 10 + 'a', - l < 10 ? l + '0' : l - 10 + 'a'); + h < 10 ? h + '0' : h - 10 + 'a', + l < 10 ? l + '0' : l - 10 + 'a'); } - if (i % 32 != 0) + if (i % 32 != 0) { printf("\n"); + } } #define _CASSERT_OFFFSETOF_FIELD(s1, s2, f) \ @@ -209,9 +212,10 @@ pktap_init(void) LIST_INIT(&pktap_list); error = if_clone_attach(&pktap_cloner); - if (error != 0) + if (error != 0) { panic("%s: if_clone_attach() failed, error %d\n", __func__, error); + } } __private_extern__ int @@ -260,7 +264,7 @@ pktap_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) */ bzero(&if_init, sizeof(if_init)); if_init.ver = IFNET_INIT_CURRENT_VERSION; - if_init.len = sizeof (if_init); + if_init.len = sizeof(if_init); if_init.flags = IFNET_INIT_LEGACY; if_init.name = ifc->ifc_name; if_init.unit = unit; @@ -301,9 +305,10 @@ pktap_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params) LIST_INSERT_HEAD(&pktap_list, pktap, pktp_link); lck_rw_done(pktap_lck_rw); done: - if (error != 0 && pktap != NULL) + if (error != 0 && pktap != NULL) { if_clone_softc_deallocate(&pktap_cloner, pktap); - return (error); + } + return error; } __private_extern__ int @@ -315,7 +320,7 @@ pktap_clone_destroy(struct ifnet *ifp) (void) ifnet_detach(ifp); - return (error); + return error; } /* @@ -337,29 +342,28 @@ pktap_tap_callback(ifnet_t ifp, u_int32_t dlt, bpf_tap_mode direction) goto done; } switch (dlt) { - case DLT_RAW: - if (direction == 0) { - if (pktap->pktp_dlt_raw_count > 0) { - pktap->pktp_dlt_raw_count--; - OSAddAtomic(-1, &pktap_total_tap_count); - - } - } else { - pktap->pktp_dlt_raw_count++; - OSAddAtomic(1, &pktap_total_tap_count); + case DLT_RAW: + if (direction == 0) { + if (pktap->pktp_dlt_raw_count > 0) { + pktap->pktp_dlt_raw_count--; + OSAddAtomic(-1, &pktap_total_tap_count); } - break; - case DLT_PKTAP: - if (direction == 0) { - if (pktap->pktp_dlt_pkttap_count > 0) { - pktap->pktp_dlt_pkttap_count--; - OSAddAtomic(-1, &pktap_total_tap_count); - } - } else { - pktap->pktp_dlt_pkttap_count++; - OSAddAtomic(1, &pktap_total_tap_count); + } else { + pktap->pktp_dlt_raw_count++; + OSAddAtomic(1, &pktap_total_tap_count); + } + break; + case DLT_PKTAP: + if (direction == 0) { + if (pktap->pktp_dlt_pkttap_count > 0) { + pktap->pktp_dlt_pkttap_count--; + OSAddAtomic(-1, &pktap_total_tap_count); } - break; + } else { + pktap->pktp_dlt_pkttap_count++; + OSAddAtomic(1, &pktap_total_tap_count); + } + break; } done: /* @@ -368,7 +372,7 @@ done: */ VERIFY(pktap_total_tap_count >= 0); - return (0); + return 0; } __private_extern__ errno_t @@ -376,15 +380,15 @@ pktap_if_output(ifnet_t ifp, mbuf_t m) { PKTAP_LOG(PKTP_LOG_FUNC, "%s\n", ifp->if_xname); mbuf_freem(m); - return (ENOTSUP); + return ENOTSUP; } __private_extern__ errno_t pktap_demux(ifnet_t ifp, __unused mbuf_t m, __unused char *header, - __unused protocol_family_t *ppf) + __unused protocol_family_t *ppf) { PKTAP_LOG(PKTP_LOG_FUNC, "%s\n", ifp->if_xname); - return (ENOTSUP); + return ENOTSUP; } __private_extern__ errno_t @@ -392,14 +396,14 @@ pktap_add_proto(__unused ifnet_t ifp, protocol_family_t pf, __unused const struct ifnet_demux_desc *dmx, __unused u_int32_t cnt) { PKTAP_LOG(PKTP_LOG_FUNC, "%s pf %u\n", ifp->if_xname, pf); - return (0); + return 0; } __private_extern__ errno_t pktap_del_proto(__unused ifnet_t ifp, __unused protocol_family_t pf) { PKTAP_LOG(PKTP_LOG_FUNC, "%s pf %u\n", ifp->if_xname, pf); - return (0); + return 0; } __private_extern__ errno_t @@ -426,7 +430,7 @@ pktap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) if (ifd->ifd_len < PKTAP_MAX_FILTERS * sizeof(struct x_pktap_filter)) { printf("%s: PKTP_CMD_FILTER_GET ifd_len %llu too small - error %d\n", - __func__, ifd->ifd_len, error); + __func__, ifd->ifd_len, error); error = EINVAL; break; } @@ -437,15 +441,16 @@ pktap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) x_filter->filter_op = pktap_filter->filter_op; x_filter->filter_param = pktap_filter->filter_param; - if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) + if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) { x_filter->filter_param_if_type = pktap_filter->filter_param_if_type; - else if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) + } else if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { strlcpy(x_filter->filter_param_if_name, - pktap_filter->filter_param_if_name, - sizeof(x_filter->filter_param_if_name)); + pktap_filter->filter_param_if_name, + sizeof(x_filter->filter_param_if_name)); + } } error = copyout(x_filters, ifd->ifd_data, - PKTAP_MAX_FILTERS * sizeof(struct x_pktap_filter)); + PKTAP_MAX_FILTERS * sizeof(struct x_pktap_filter)); if (error) { printf("%s: PKTP_CMD_FILTER_GET copyout - error %d\n", __func__, error); goto done; @@ -457,7 +462,7 @@ pktap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) if (ifd->ifd_len < sizeof(tap_count)) { printf("%s: PKTP_CMD_TAP_COUNT ifd_len %llu too small - error %d\n", - __func__, ifd->ifd_len, error); + __func__, ifd->ifd_len, error); error = EINVAL; break; } @@ -474,7 +479,7 @@ pktap_getdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) } done: - return (error); + return error; } __private_extern__ errno_t @@ -500,7 +505,7 @@ pktap_setdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) if (ifd->ifd_len != PKTAP_MAX_FILTERS * sizeof(struct x_pktap_filter)) { printf("%s: PKTP_CMD_FILTER_SET bad ifd_len %llu - error %d\n", - __func__, ifd->ifd_len, error); + __func__, ifd->ifd_len, error); error = EINVAL; break; } @@ -516,61 +521,64 @@ pktap_setdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) struct x_pktap_filter *x_filter = user_filters + i; switch (x_filter->filter_op) { - case PKTAP_FILTER_OP_NONE: - /* Following entries must be PKTAP_FILTER_OP_NONE */ - got_op_none = 1; - break; - case PKTAP_FILTER_OP_PASS: - case PKTAP_FILTER_OP_SKIP: - /* Invalid after PKTAP_FILTER_OP_NONE */ - if (got_op_none) { - error = EINVAL; - break; - } - break; - default: + case PKTAP_FILTER_OP_NONE: + /* Following entries must be PKTAP_FILTER_OP_NONE */ + got_op_none = 1; + break; + case PKTAP_FILTER_OP_PASS: + case PKTAP_FILTER_OP_SKIP: + /* Invalid after PKTAP_FILTER_OP_NONE */ + if (got_op_none) { error = EINVAL; break; + } + break; + default: + error = EINVAL; + break; } - if (error != 0) + if (error != 0) { break; + } switch (x_filter->filter_param) { - case PKTAP_FILTER_OP_NONE: - if (x_filter->filter_op != PKTAP_FILTER_OP_NONE) { - error = EINVAL; - break; - } - break; - - /* - * Do not allow to tap a pktap from a pktap - */ - case PKTAP_FILTER_PARAM_IF_TYPE: - if (x_filter->filter_param_if_type == IFT_PKTAP || - x_filter->filter_param_if_type > 0xff) { - error = EINVAL; - break; - } + case PKTAP_FILTER_OP_NONE: + if (x_filter->filter_op != PKTAP_FILTER_OP_NONE) { + error = EINVAL; break; + } + break; - case PKTAP_FILTER_PARAM_IF_NAME: - if (strncmp(x_filter->filter_param_if_name, PKTAP_IFNAME, - strlen(PKTAP_IFNAME)) == 0) { - error = EINVAL; - break; - } + /* + * Do not allow to tap a pktap from a pktap + */ + case PKTAP_FILTER_PARAM_IF_TYPE: + if (x_filter->filter_param_if_type == IFT_PKTAP || + x_filter->filter_param_if_type > 0xff) { + error = EINVAL; break; + } + break; - default: + case PKTAP_FILTER_PARAM_IF_NAME: + if (strncmp(x_filter->filter_param_if_name, PKTAP_IFNAME, + strlen(PKTAP_IFNAME)) == 0) { error = EINVAL; break; + } + break; + + default: + error = EINVAL; + break; } - if (error != 0) + if (error != 0) { break; + } } - if (error != 0) + if (error != 0) { break; + } for (i = 0; i < PKTAP_MAX_FILTERS; i++) { struct pktap_filter *pktap_filter = pktap->pktp_filters + i; struct x_pktap_filter *x_filter = user_filters + i; @@ -578,22 +586,23 @@ pktap_setdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) pktap_filter->filter_op = x_filter->filter_op; pktap_filter->filter_param = x_filter->filter_param; - if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) + if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) { pktap_filter->filter_param_if_type = x_filter->filter_param_if_type; - else if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { + } else if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { size_t len; strlcpy(pktap_filter->filter_param_if_name, - x_filter->filter_param_if_name, - sizeof(pktap_filter->filter_param_if_name)); + x_filter->filter_param_if_name, + sizeof(pktap_filter->filter_param_if_name)); /* * If name does not end with a number then it's a "wildcard" match * where we compare the prefix of the interface name */ len = strlen(pktap_filter->filter_param_if_name); if (pktap_filter->filter_param_if_name[len] < '0' || - pktap_filter->filter_param_if_name[len] > '9') + pktap_filter->filter_param_if_name[len] > '9') { pktap_filter->filter_ifname_prefix_len = len; + } } } break; @@ -604,7 +613,7 @@ pktap_setdrvspec(ifnet_t ifp, struct ifdrv64 *ifd) } done: - return (error); + return error; } __private_extern__ errno_t @@ -618,8 +627,8 @@ pktap_ioctl(ifnet_t ifp, unsigned long cmd, void *data) error = kauth_authorize_generic(kauth_cred_get(), KAUTH_GENERIC_ISSUSER); if (error) { PKTAP_LOG(PKTP_LOG_ERROR, - "%s: kauth_authorize_generic(KAUTH_GENERIC_ISSUSER) - error %d\n", - __func__, error); + "%s: kauth_authorize_generic(KAUTH_GENERIC_ISSUSER) - error %d\n", + __func__, error); goto done; } } @@ -669,7 +678,7 @@ pktap_ioctl(ifnet_t ifp, unsigned long cmd, void *data) break; } done: - return (error); + return error; } __private_extern__ void @@ -705,84 +714,88 @@ pktap_filter_evaluate(struct pktap_softc *pktap, struct ifnet *ifp) for (i = 0; i < PKTAP_MAX_FILTERS; i++) { struct pktap_filter *pktap_filter = pktap->pktp_filters + i; size_t len = pktap_filter->filter_ifname_prefix_len != 0 ? - pktap_filter->filter_ifname_prefix_len : PKTAP_IFXNAMESIZE; + pktap_filter->filter_ifname_prefix_len : PKTAP_IFXNAMESIZE; switch (pktap_filter->filter_op) { - case PKTAP_FILTER_OP_NONE: - match = 1; - break; + case PKTAP_FILTER_OP_NONE: + match = 1; + break; - case PKTAP_FILTER_OP_PASS: - if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) { - if (pktap_filter->filter_param_if_type == 0 || - ifp->if_type == pktap_filter->filter_param_if_type) { - result = PKTAP_FILTER_OK; - match = 1; - PKTAP_LOG(PKTP_LOG_FILTER, "pass %s match type %u\n", - ifp->if_xname, pktap_filter->filter_param_if_type); - break; - } + case PKTAP_FILTER_OP_PASS: + if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) { + if (pktap_filter->filter_param_if_type == 0 || + ifp->if_type == pktap_filter->filter_param_if_type) { + result = PKTAP_FILTER_OK; + match = 1; + PKTAP_LOG(PKTP_LOG_FILTER, "pass %s match type %u\n", + ifp->if_xname, pktap_filter->filter_param_if_type); + break; } - if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { - if (strncmp(ifp->if_xname, pktap_filter->filter_param_if_name, - len) == 0) { - result = PKTAP_FILTER_OK; - match = 1; - PKTAP_LOG(PKTP_LOG_FILTER, "pass %s match name %s\n", - ifp->if_xname, pktap_filter->filter_param_if_name); - break; - } + } + if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { + if (strncmp(ifp->if_xname, pktap_filter->filter_param_if_name, + len) == 0) { + result = PKTAP_FILTER_OK; + match = 1; + PKTAP_LOG(PKTP_LOG_FILTER, "pass %s match name %s\n", + ifp->if_xname, pktap_filter->filter_param_if_name); + break; } - break; + } + break; - case PKTAP_FILTER_OP_SKIP: - if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) { - if (pktap_filter->filter_param_if_type == 0 || - ifp->if_type == pktap_filter->filter_param_if_type) { - result = PKTAP_FILTER_SKIP; - match = 1; - PKTAP_LOG(PKTP_LOG_FILTER, "skip %s match type %u\n", - ifp->if_xname, pktap_filter->filter_param_if_type); - break; - } + case PKTAP_FILTER_OP_SKIP: + if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_TYPE) { + if (pktap_filter->filter_param_if_type == 0 || + ifp->if_type == pktap_filter->filter_param_if_type) { + result = PKTAP_FILTER_SKIP; + match = 1; + PKTAP_LOG(PKTP_LOG_FILTER, "skip %s match type %u\n", + ifp->if_xname, pktap_filter->filter_param_if_type); + break; } - if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { - if (strncmp(ifp->if_xname, pktap_filter->filter_param_if_name, - len) == 0) { - result = PKTAP_FILTER_SKIP; - match = 1; - PKTAP_LOG(PKTP_LOG_FILTER, "skip %s match name %s\n", - ifp->if_xname, pktap_filter->filter_param_if_name); - break; - } + } + if (pktap_filter->filter_param == PKTAP_FILTER_PARAM_IF_NAME) { + if (strncmp(ifp->if_xname, pktap_filter->filter_param_if_name, + len) == 0) { + result = PKTAP_FILTER_SKIP; + match = 1; + PKTAP_LOG(PKTP_LOG_FILTER, "skip %s match name %s\n", + ifp->if_xname, pktap_filter->filter_param_if_name); + break; } - break; + } + break; } - if (match) + if (match) { break; + } } if (match == 0) { PKTAP_LOG(PKTP_LOG_FILTER, "%s no match\n", - ifp->if_xname); + ifp->if_xname); } - return (result); + return result; } static void pktap_set_procinfo(struct pktap_header *hdr, struct so_procinfo *soprocinfo) { hdr->pth_pid = soprocinfo->spi_pid; - if (hdr->pth_comm[0] == 0) + if (hdr->pth_comm[0] == 0) { proc_name(soprocinfo->spi_pid, hdr->pth_comm, MAXCOMLEN); - if (soprocinfo->spi_pid != 0) + } + if (soprocinfo->spi_pid != 0) { uuid_copy(hdr->pth_uuid, soprocinfo->spi_uuid); + } if (soprocinfo->spi_delegated != 0) { hdr->pth_flags |= PTH_FLAG_PROC_DELEGATED; hdr->pth_epid = soprocinfo->spi_epid; - if (hdr->pth_ecomm[0] == 0) - proc_name(soprocinfo->spi_epid, hdr->pth_ecomm, MAXCOMLEN); + if (hdr->pth_ecomm[0] == 0) { + proc_name(soprocinfo->spi_epid, hdr->pth_ecomm, MAXCOMLEN); + } uuid_copy(hdr->pth_euuid, soprocinfo->spi_euuid); } } @@ -793,21 +806,24 @@ pktap_finalize_proc_info(struct pktap_header *hdr) int found; struct so_procinfo soprocinfo; - if (!(hdr->pth_flags & PTH_FLAG_DELAY_PKTAP)) + if (!(hdr->pth_flags & PTH_FLAG_DELAY_PKTAP)) { return; + } - if (hdr->pth_ipproto == IPPROTO_TCP) + if (hdr->pth_ipproto == IPPROTO_TCP) { found = inp_findinpcb_procinfo(&tcbinfo, hdr->pth_flowid, &soprocinfo); - else if (hdr->pth_ipproto == IPPROTO_UDP) + } else if (hdr->pth_ipproto == IPPROTO_UDP) { found = inp_findinpcb_procinfo(&udbinfo, hdr->pth_flowid, &soprocinfo); - else + } else { found = inp_findinpcb_procinfo(&ripcbinfo, hdr->pth_flowid, &soprocinfo); + } - if (found == 1) + if (found == 1) { pktap_set_procinfo(hdr, &soprocinfo); + } } static void @@ -832,8 +848,9 @@ pktap_v2_set_procinfo(struct pktap_v2_hdr *pktap_v2_hdr, } } - if (!(pktap_v2_hdr->pth_flags & PTH_FLAG_PROC_DELEGATED)) + if (!(pktap_v2_hdr->pth_flags & PTH_FLAG_PROC_DELEGATED)) { return; + } /* * The effective UUID may be set independently from the effective pid @@ -865,8 +882,9 @@ pktap_v2_finalize_proc_info(struct pktap_v2_hdr *pktap_v2_hdr) int found; struct so_procinfo soprocinfo; - if (!(pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP)) + if (!(pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP)) { return; + } if (pktap_v2_hdr->pth_ipproto == IPPROTO_TCP) { found = inp_findinpcb_procinfo(&tcbinfo, @@ -885,7 +903,7 @@ pktap_v2_finalize_proc_info(struct pktap_v2_hdr *pktap_v2_hdr) __private_extern__ void pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, - struct mbuf *m, u_int32_t pre, int outgoing, struct ifnet *ifp) + struct mbuf *m, u_int32_t pre, int outgoing, struct ifnet *ifp) { /* * Getting the pid and procname is expensive @@ -926,7 +944,7 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, } if (hdr->pth_epid != 0 && hdr->pth_epid != -1) { - hdr->pth_flags|= PTH_FLAG_PROC_DELEGATED; + hdr->pth_flags |= PTH_FLAG_PROC_DELEGATED; proc_name(hdr->pth_epid, hdr->pth_ecomm, MAXCOMLEN); } else { hdr->pth_epid = -1; @@ -967,9 +985,10 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, struct tcphdr th; error = mbuf_copydata(m, pre + hlen, - sizeof(struct tcphdr), &th); - if (error != 0) + sizeof(struct tcphdr), &th); + if (error != 0) { goto done; + } fport = th.th_sport; lport = th.th_dport; @@ -979,7 +998,7 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, struct udphdr uh; error = mbuf_copydata(m, pre + hlen, - sizeof(struct udphdr), &uh); + sizeof(struct udphdr), &uh); if (error != 0) { PKTAP_LOG(PKTP_LOG_ERROR, "mbuf_copydata udp v4 failed for %s\n", @@ -994,12 +1013,13 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, } if (pcbinfo != NULL) { inp = in_pcblookup_hash(pcbinfo, faddr, fport, - laddr, lport, wildcard, outgoing ? NULL : ifp); + laddr, lport, wildcard, outgoing ? NULL : ifp); - if (inp == NULL && hdr->pth_iftype != IFT_LOOP) + if (inp == NULL && hdr->pth_iftype != IFT_LOOP) { PKTAP_LOG(PKTP_LOG_NOPCB, "in_pcblookup_hash no pcb %s\n", hdr->pth_ifname); + } } else { PKTAP_LOG(PKTP_LOG_NOPCB, "unknown ip_p %u on %s\n", @@ -1016,8 +1036,9 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, int wildcard = 0; error = mbuf_copydata(m, pre, sizeof(struct ip6_hdr), &ip6); - if (error != 0) + if (error != 0) { goto done; + } faddr = &ip6.ip6_src; laddr = &ip6.ip6_dst; @@ -1026,7 +1047,7 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, struct tcphdr th; error = mbuf_copydata(m, pre + sizeof(struct ip6_hdr), - sizeof(struct tcphdr), &th); + sizeof(struct tcphdr), &th); if (error != 0) { PKTAP_LOG(PKTP_LOG_ERROR, "mbuf_copydata tcp v6 failed for %s\n", @@ -1042,7 +1063,7 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, struct udphdr uh; error = mbuf_copydata(m, pre + sizeof(struct ip6_hdr), - sizeof(struct udphdr), &uh); + sizeof(struct udphdr), &uh); if (error != 0) { PKTAP_LOG(PKTP_LOG_ERROR, "mbuf_copydata udp v6 failed for %s\n", @@ -1058,12 +1079,13 @@ pktap_fill_proc_info(struct pktap_header *hdr, protocol_family_t proto, } if (pcbinfo != NULL) { inp = in6_pcblookup_hash(pcbinfo, faddr, fport, - laddr, lport, wildcard, outgoing ? NULL : ifp); + laddr, lport, wildcard, outgoing ? NULL : ifp); - if (inp == NULL && hdr->pth_iftype != IFT_LOOP) + if (inp == NULL && hdr->pth_iftype != IFT_LOOP) { PKTAP_LOG(PKTP_LOG_NOPCB, "in6_pcblookup_hash no pcb %s\n", hdr->pth_ifname); + } } else { PKTAP_LOG(PKTP_LOG_NOPCB, "unknown ip6.ip6_nxt %u on %s\n", @@ -1086,9 +1108,10 @@ done: hdr->pth_pid = -1; hdr->pth_epid = -1; - if (found != 0) - pktap_set_procinfo(hdr, &soprocinfo); -} + if (found != 0) { + pktap_set_procinfo(hdr, &soprocinfo); + } + } } __private_extern__ void @@ -1097,13 +1120,14 @@ pktap_bpf_tap(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, { struct pktap_softc *pktap; void (*bpf_tap_func)(ifnet_t, u_int32_t, mbuf_t, void *, size_t) = - outgoing ? bpf_tap_out : bpf_tap_in; + outgoing ? bpf_tap_out : bpf_tap_in; /* * Skip the coprocessor interface */ - if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) + if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) { return; + } lck_rw_lock_shared(pktap_lck_rw); @@ -1115,19 +1139,20 @@ pktap_bpf_tap(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, int filter_result; filter_result = pktap_filter_evaluate(pktap, ifp); - if (filter_result == PKTAP_FILTER_SKIP) + if (filter_result == PKTAP_FILTER_SKIP) { continue; + } if (pktap->pktp_dlt_raw_count > 0) { /* We accept only IPv4 and IPv6 packets for the raw DLT */ - if ((proto == AF_INET ||proto == AF_INET6) && - !(m->m_pkthdr.pkt_flags & PKTF_INET_RESOLVE)) { + if ((proto == AF_INET || proto == AF_INET6) && + !(m->m_pkthdr.pkt_flags & PKTF_INET_RESOLVE)) { /* * We can play just with the length of the first mbuf in the * chain because bpf_tap_imp() disregard the packet length * of the mbuf packet header. */ - if (mbuf_setdata(m, m->m_data + pre, m->m_len - pre) == 0) { + if (mbuf_setdata(m, m->m_data + pre, m->m_len - pre) == 0) { bpf_tap_func(pktap->pktp_ifp, DLT_RAW, m, NULL, 0); mbuf_setdata(m, m->m_data - pre, m->m_len + pre); } @@ -1156,80 +1181,85 @@ pktap_bpf_tap(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, * Set DLT of packet based on interface type */ switch (ifp->if_type) { - case IFT_LOOP: - case IFT_GIF: - case IFT_STF: - case IFT_CELLULAR: + case IFT_LOOP: + case IFT_GIF: + case IFT_STF: + case IFT_CELLULAR: + /* + * Packets from pdp interfaces have no loopback + * header that contain the protocol number. + * As BPF just concatenate the header and the + * packet content in a single buffer, + * stash the protocol after the pktap header + * and adjust the size of the header accordingly + */ + hdr->pth_dlt = DLT_NULL; + if (pre == 0) { + hdr_buffer.proto = proto; + hdr_size = sizeof(hdr_buffer); + pre_adjust = sizeof(hdr_buffer.proto); + } + break; + case IFT_ETHER: + case IFT_BRIDGE: + case IFT_L2VLAN: + case IFT_IEEE8023ADLAG: + hdr->pth_dlt = DLT_EN10MB; + break; + case IFT_PPP: + hdr->pth_dlt = DLT_PPP; + break; + case IFT_IEEE1394: + hdr->pth_dlt = DLT_APPLE_IP_OVER_IEEE1394; + break; + case IFT_OTHER: + if (ifp->if_subfamily == IFNET_SUBFAMILY_IPSEC || + ifp->if_subfamily == IFNET_SUBFAMILY_UTUN) { /* - * Packets from pdp interfaces have no loopback - * header that contain the protocol number. - * As BPF just concatenate the header and the - * packet content in a single buffer, - * stash the protocol after the pktap header - * and adjust the size of the header accordingly + * For utun: + * - incoming packets do not have the prefix set to four + * - some packets are as small as two bytes! */ - hdr->pth_dlt = DLT_NULL; - if (pre == 0) { - hdr_buffer.proto = proto; - hdr_size = sizeof(hdr_buffer); - pre_adjust = sizeof(hdr_buffer.proto); + if (m_pktlen(m) < 4) { + goto done; + } + if (proto != AF_INET && proto != AF_INET6) { + goto done; + } + if (proto == AF_INET && (size_t) m_pktlen(m) - 4 < sizeof(struct ip)) { + goto done; + } + if (proto == AF_INET6 && (size_t) m_pktlen(m) - 4 < sizeof(struct ip6_hdr)) { + goto done; } - break; - case IFT_ETHER: - case IFT_BRIDGE: - case IFT_L2VLAN: - case IFT_IEEE8023ADLAG: - hdr->pth_dlt = DLT_EN10MB; - break; - case IFT_PPP: - hdr->pth_dlt = DLT_PPP; - break; - case IFT_IEEE1394: - hdr->pth_dlt = DLT_APPLE_IP_OVER_IEEE1394; - break; - case IFT_OTHER: - if (ifp->if_subfamily == IFNET_SUBFAMILY_IPSEC || - ifp->if_subfamily == IFNET_SUBFAMILY_UTUN) { - /* - * For utun: - * - incoming packets do not have the prefix set to four - * - some packets are as small as two bytes! - */ - if (m_pktlen(m) < 4) - goto done; - if (proto != AF_INET && proto != AF_INET6) - goto done; - if (proto == AF_INET && (size_t) m_pktlen(m) - 4 < sizeof(struct ip)) - goto done; - if (proto == AF_INET6 && (size_t) m_pktlen(m) - 4 < sizeof(struct ip6_hdr)) - goto done; + /* + * Handle two cases: + * - The old utun encapsulation with the protocol family in network order + * - A raw IPv4 or IPv6 packet + */ + uint8_t data = *(uint8_t *)mbuf_data(m); + if ((data >> 4) == 4 || (data >> 4) == 6) { + pre = 4; + } else { /* - * Handle two cases: - * - The old utun encapsulation with the protocol family in network order - * - A raw IPv4 or IPv6 packet + * Skip the protocol in the mbuf as it's in network order */ - uint8_t data = *(uint8_t *)mbuf_data(m); - if ((data >> 4) == 4 || (data >> 4) == 6) { - pre = 4; - } else { - /* - * Skip the protocol in the mbuf as it's in network order - */ - pre = 4; - data_adjust = 4; - } + pre = 4; + data_adjust = 4; } - hdr->pth_dlt = DLT_NULL; - hdr_buffer.proto = proto; - hdr_size = sizeof(hdr_buffer); - break; - default: - if (pre == 0) - hdr->pth_dlt = DLT_RAW; - else - unknown_if_type = 1; - break; + } + hdr->pth_dlt = DLT_NULL; + hdr_buffer.proto = proto; + hdr_size = sizeof(hdr_buffer); + break; + default: + if (pre == 0) { + hdr->pth_dlt = DLT_RAW; + } else { + unknown_if_type = 1; + } + break; } if (unknown_if_type) { PKTAP_LOG(PKTP_LOG_FUNC, @@ -1246,10 +1276,12 @@ pktap_bpf_tap(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, hdr->pth_iftype = ifp->if_type; hdr->pth_ifunit = ifp->if_unit; - if (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE) + if (m->m_pkthdr.pkt_flags & PKTF_KEEPALIVE) { hdr->pth_flags |= PTH_FLAG_KEEP_ALIVE; - if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) + } + if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) { hdr->pth_flags |= PTH_FLAG_REXMIT; + } pktap_fill_proc_info(hdr, proto, m, pre, outgoing, ifp); @@ -1263,7 +1295,7 @@ pktap_bpf_tap(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, * chain because bpf_tap_imp() disregard the packet length * of the mbuf packet header. */ - if (mbuf_setdata(m, m->m_data + data_adjust, m->m_len - data_adjust) == 0) { + if (mbuf_setdata(m, m->m_data + data_adjust, m->m_len - data_adjust) == 0) { bpf_tap_func(pktap->pktp_ifp, DLT_PKTAP, m, hdr, hdr_size); mbuf_setdata(m, m->m_data - data_adjust, m->m_len + data_adjust); } @@ -1283,8 +1315,9 @@ pktap_input(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, char *start; /* Fast path */ - if (pktap_total_tap_count == 0) + if (pktap_total_tap_count == 0) { return; + } hdr = (char *)mbuf_data(m); start = (char *)mbuf_datastart(m); @@ -1295,14 +1328,14 @@ pktap_input(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, if (mbuf_setdata(m, frame_header, o_len + pre) == 0) { PKTAP_LOG(PKTP_LOG_INPUT, "ifp %s proto %u pre %u post %u\n", - ifp->if_xname, proto, pre, 0); + ifp->if_xname, proto, pre, 0); - pktap_bpf_tap(ifp, proto, m, pre, 0, 0); + pktap_bpf_tap(ifp, proto, m, pre, 0, 0); mbuf_setdata(m, hdr, o_len); } } else { PKTAP_LOG(PKTP_LOG_INPUT, "ifp %s proto %u pre %u post %u\n", - ifp->if_xname, proto, 0, 0); + ifp->if_xname, proto, 0, 0); pktap_bpf_tap(ifp, proto, m, 0, 0, 0); } @@ -1313,11 +1346,12 @@ pktap_output(struct ifnet *ifp, protocol_family_t proto, struct mbuf *m, u_int32_t pre, u_int32_t post) { /* Fast path */ - if (pktap_total_tap_count == 0) + if (pktap_total_tap_count == 0) { return; + } PKTAP_LOG(PKTP_LOG_OUTPUT, "ifp %s proto %u pre %u post %u\n", - ifp->if_xname, proto, pre, post); + ifp->if_xname, proto, pre, post); pktap_bpf_tap(ifp, proto, m, pre, post, 1); } @@ -1381,7 +1415,7 @@ convert_to_pktap_header_to_v2(struct bpf_packet *bpf_pkt, bool truncate) ptr += sizeof(uuid_t); VERIFY((void *)ptr < (void *)(pktap_v2_hdr_space + 1)); } - } else if(!uuid_is_null(pktap_header->pth_euuid)) { + } else if (!uuid_is_null(pktap_header->pth_euuid)) { pktap_v2_hdr->pth_e_uuid_offset = pktap_v2_hdr->pth_length; uuid_copy(*(uuid_t *)ptr, pktap_header->pth_euuid); pktap_v2_hdr->pth_length += sizeof(uuid_t); @@ -1425,7 +1459,7 @@ convert_to_pktap_header_to_v2(struct bpf_packet *bpf_pkt, bool truncate) pktap_v2_hdr->pth_comm_offset = pktap_v2_hdr->pth_length; - *ptr = 0; /* empty string by default */ + *ptr = 0; /* empty string by default */ pktap_v2_hdr->pth_length += strsize; ptr += strsize; VERIFY((void *)ptr < (void *)(pktap_v2_hdr_space + 1)); @@ -1452,7 +1486,7 @@ convert_to_pktap_header_to_v2(struct bpf_packet *bpf_pkt, bool truncate) size_t strsize = sizeof(pktap_v2_hdr_space->pth_e_comm); pktap_v2_hdr->pth_e_comm_offset = pktap_v2_hdr->pth_length; - *ptr = 0; /* empty string by default */ + *ptr = 0; /* empty string by default */ pktap_v2_hdr->pth_length += strsize; ptr += strsize; VERIFY((void *)ptr < (void *)(pktap_v2_hdr_space + 1)); @@ -1481,4 +1515,3 @@ convert_to_pktap_header_to_v2(struct bpf_packet *bpf_pkt, bool truncate) bpf_pkt->bpfp_header_length += pktap_v2_hdr->pth_length - sizeof(struct pktap_header); } - diff --git a/bsd/net/pktap.h b/bsd/net/pktap.h index 25ed642fd..6305b2131 100644 --- a/bsd/net/pktap.h +++ b/bsd/net/pktap.h @@ -45,49 +45,49 @@ /* * Commands via SIOCGDRVSPEC/SIOCSDRVSPEC */ -#define PKTP_CMD_FILTER_GET 1 /* array of PKTAP_MAX_FILTERS * struct pktap_filter */ -#define PKTP_CMD_FILTER_SET 3 /* array of PKTAP_MAX_FILTERS * struct pktap_filter */ -#define PKTP_CMD_TAP_COUNT 4 /* uint32_t number of active bpf tap on the interface */ +#define PKTP_CMD_FILTER_GET 1 /* array of PKTAP_MAX_FILTERS * struct pktap_filter */ +#define PKTP_CMD_FILTER_SET 3 /* array of PKTAP_MAX_FILTERS * struct pktap_filter */ +#define PKTP_CMD_TAP_COUNT 4 /* uint32_t number of active bpf tap on the interface */ /* * Filtering is currently based on network interface properties -- - * the interface type and the interface name -- and has two types of + * the interface type and the interface name -- and has two types of * operations -- pass and skip. * By default only interfaces of type IFT_ETHER and IFT_CELLULAR pass * the filter. * It's possible to include other interfaces by type or by name * The interface type is evaluated before the interface name - * The first matching rule stops the evaluation. + * The first matching rule stops the evaluation. * A rule with interface type 0 (zero) matches any interfaces */ -#define PKTAP_FILTER_OP_NONE 0 /* For inactive entries at the end of the list */ -#define PKTAP_FILTER_OP_PASS 1 -#define PKTAP_FILTER_OP_SKIP 2 +#define PKTAP_FILTER_OP_NONE 0 /* For inactive entries at the end of the list */ +#define PKTAP_FILTER_OP_PASS 1 +#define PKTAP_FILTER_OP_SKIP 2 -#define PKTAP_FILTER_PARAM_NONE 0 -#define PKTAP_FILTER_PARAM_IF_TYPE 1 -#define PKTAP_FILTER_PARAM_IF_NAME 2 +#define PKTAP_FILTER_PARAM_NONE 0 +#define PKTAP_FILTER_PARAM_IF_TYPE 1 +#define PKTAP_FILTER_PARAM_IF_NAME 2 #ifdef BSD_KERNEL_PRIVATE struct pktap_filter { - uint32_t filter_op; - uint32_t filter_param; + uint32_t filter_op; + uint32_t filter_param; union { - uint32_t _filter_if_type; - char _filter_if_name[PKTAP_IFXNAMESIZE]; + uint32_t _filter_if_type; + char _filter_if_name[PKTAP_IFXNAMESIZE]; } param_; - size_t filter_ifname_prefix_len; + size_t filter_ifname_prefix_len; }; struct x_pktap_filter { #else struct pktap_filter { #endif /* BSD_KERNEL_PRIVATE */ - uint32_t filter_op; - uint32_t filter_param; + uint32_t filter_op; + uint32_t filter_param; union { - uint32_t _filter_if_type; - char _filter_if_name[PKTAP_IFXNAMESIZE]; + uint32_t _filter_if_type; + char _filter_if_name[PKTAP_IFXNAMESIZE]; } param_; }; #define filter_param_if_type param_._filter_if_type @@ -101,34 +101,34 @@ struct pktap_filter { * In theory, there could be several types of blocks in a chain before the actual packet */ struct pktap_header { - uint32_t pth_length; /* length of this header */ - uint32_t pth_type_next; /* type of data following */ - uint32_t pth_dlt; /* DLT of packet */ - char pth_ifname[PKTAP_IFXNAMESIZE]; /* interface name */ - uint32_t pth_flags; /* flags */ - uint32_t pth_protocol_family; - uint32_t pth_frame_pre_length; - uint32_t pth_frame_post_length; - pid_t pth_pid; /* process ID */ - char pth_comm[MAXCOMLEN+1]; /* process name */ - uint32_t pth_svc; /* service class */ - uint16_t pth_iftype; - uint16_t pth_ifunit; - pid_t pth_epid; /* effective process ID */ - char pth_ecomm[MAXCOMLEN+1]; /* effective command name */ - uint32_t pth_flowid; - uint32_t pth_ipproto; - struct timeval32 pth_tstamp; - uuid_t pth_uuid; - uuid_t pth_euuid; + uint32_t pth_length; /* length of this header */ + uint32_t pth_type_next; /* type of data following */ + uint32_t pth_dlt; /* DLT of packet */ + char pth_ifname[PKTAP_IFXNAMESIZE]; /* interface name */ + uint32_t pth_flags; /* flags */ + uint32_t pth_protocol_family; + uint32_t pth_frame_pre_length; + uint32_t pth_frame_post_length; + pid_t pth_pid; /* process ID */ + char pth_comm[MAXCOMLEN + 1]; /* process name */ + uint32_t pth_svc; /* service class */ + uint16_t pth_iftype; + uint16_t pth_ifunit; + pid_t pth_epid; /* effective process ID */ + char pth_ecomm[MAXCOMLEN + 1]; /* effective command name */ + uint32_t pth_flowid; + uint32_t pth_ipproto; + struct timeval32 pth_tstamp; + uuid_t pth_uuid; + uuid_t pth_euuid; }; /* * The original version 1 of the pktap_header structure always had the field * pth_type_next set to PTH_TYPE_PACKET */ -#define PTH_TYPE_NONE 0 /* No more data following */ -#define PTH_TYPE_PACKET 1 /* Actual captured packet data */ +#define PTH_TYPE_NONE 0 /* No more data following */ +#define PTH_TYPE_PACKET 1 /* Actual captured packet data */ /* * Size of buffer that can contain any pktap header @@ -136,8 +136,8 @@ struct pktap_header { * or 16 bytes link layer header */ union pktap_header_extra { - uint8_t llhdr[16]; - uint32_t proto; + uint8_t llhdr[16]; + uint32_t proto; }; /* @@ -150,23 +150,23 @@ union pktap_header_extra { #define PKTAP_MAX_COMM_SIZE (MAXCOMLEN + 1) struct pktap_v2_hdr { - uint8_t pth_length; /* length of this header */ - uint8_t pth_uuid_offset; /* max size: sizeof(uuid_t) */ - uint8_t pth_e_uuid_offset; /* max size: sizeof(uuid_t) */ - uint8_t pth_ifname_offset; /* max size: PKTAP_IFXNAMESIZE*/ - uint8_t pth_comm_offset; /* max size: PKTAP_MAX_COMM_SIZE */ - uint8_t pth_e_comm_offset; /* max size: PKTAP_MAX_COMM_SIZE */ - uint16_t pth_dlt; /* DLT of packet */ - uint16_t pth_frame_pre_length; - uint16_t pth_frame_post_length; - uint16_t pth_iftype; - uint16_t pth_ipproto; - uint32_t pth_protocol_family; - uint32_t pth_svc; /* service class */ - uint32_t pth_flowid; - pid_t pth_pid; /* process ID */ - pid_t pth_e_pid; /* effective process ID */ - uint32_t pth_flags; /* flags */ + uint8_t pth_length; /* length of this header */ + uint8_t pth_uuid_offset; /* max size: sizeof(uuid_t) */ + uint8_t pth_e_uuid_offset; /* max size: sizeof(uuid_t) */ + uint8_t pth_ifname_offset; /* max size: PKTAP_IFXNAMESIZE*/ + uint8_t pth_comm_offset; /* max size: PKTAP_MAX_COMM_SIZE */ + uint8_t pth_e_comm_offset; /* max size: PKTAP_MAX_COMM_SIZE */ + uint16_t pth_dlt; /* DLT of packet */ + uint16_t pth_frame_pre_length; + uint16_t pth_frame_post_length; + uint16_t pth_iftype; + uint16_t pth_ipproto; + uint32_t pth_protocol_family; + uint32_t pth_svc; /* service class */ + uint32_t pth_flowid; + pid_t pth_pid; /* process ID */ + pid_t pth_e_pid; /* effective process ID */ + uint32_t pth_flags; /* flags */ }; struct pktap_v2_hdr_space { @@ -207,38 +207,38 @@ struct pktap_buffer_v2_hdr_extra { /* * Values for field pth_flags */ -#define PTH_FLAG_DIR_IN 0x00000001 /* Outgoing packet */ -#define PTH_FLAG_DIR_OUT 0x00000002 /* Incoming packet */ -#define PTH_FLAG_PROC_DELEGATED 0x00000004 /* Process delegated */ -#define PTH_FLAG_IF_DELEGATED 0x00000008 /* Interface delegated */ +#define PTH_FLAG_DIR_IN 0x00000001 /* Outgoing packet */ +#define PTH_FLAG_DIR_OUT 0x00000002 /* Incoming packet */ +#define PTH_FLAG_PROC_DELEGATED 0x00000004 /* Process delegated */ +#define PTH_FLAG_IF_DELEGATED 0x00000008 /* Interface delegated */ #ifdef BSD_KERNEL_PRIVATE -#define PTH_FLAG_DELAY_PKTAP 0x00001000 /* Finalize pktap header on read */ +#define PTH_FLAG_DELAY_PKTAP 0x00001000 /* Finalize pktap header on read */ #endif /* BSD_KERNEL_PRIVATE */ -#define PTH_FLAG_TSTAMP 0x00002000 /* Has time stamp */ -#define PTH_FLAG_NEW_FLOW 0x00004000 /* Packet from a new flow */ -#define PTH_FLAG_REXMIT 0x00008000 /* Packet is a retransmission */ -#define PTH_FLAG_KEEP_ALIVE 0x00010000 /* Is keep alive packet */ -#define PTH_FLAG_SOCKET 0x00020000 /* Packet on a Socket */ -#define PTH_FLAG_NEXUS_CHAN 0x00040000 /* Packet on a nexus channel */ -#define PTH_FLAG_V2_HDR 0x00080000 /* Version 2 of pktap */ +#define PTH_FLAG_TSTAMP 0x00002000 /* Has time stamp */ +#define PTH_FLAG_NEW_FLOW 0x00004000 /* Packet from a new flow */ +#define PTH_FLAG_REXMIT 0x00008000 /* Packet is a retransmission */ +#define PTH_FLAG_KEEP_ALIVE 0x00010000 /* Is keep alive packet */ +#define PTH_FLAG_SOCKET 0x00020000 /* Packet on a Socket */ +#define PTH_FLAG_NEXUS_CHAN 0x00040000 /* Packet on a nexus channel */ +#define PTH_FLAG_V2_HDR 0x00080000 /* Version 2 of pktap */ #ifdef BSD_KERNEL_PRIVATE #include struct pktap_header_buffer { - struct pktap_header pkth; - union pktap_header_extra extra; -} ; + struct pktap_header pkth; + union pktap_header_extra extra; +}; extern uint32_t pktap_total_tap_count; extern void pktap_init(void); extern void pktap_input(struct ifnet *, protocol_family_t, struct mbuf *, char *); -extern void pktap_output(struct ifnet *, protocol_family_t, struct mbuf *, - u_int32_t, u_int32_t); -extern void pktap_fill_proc_info(struct pktap_header *, protocol_family_t , - struct mbuf *, u_int32_t , int , struct ifnet *); +extern void pktap_output(struct ifnet *, protocol_family_t, struct mbuf *, + u_int32_t, u_int32_t); +extern void pktap_fill_proc_info(struct pktap_header *, protocol_family_t, + struct mbuf *, u_int32_t, int, struct ifnet *); extern void pktap_finalize_proc_info(struct pktap_header *); extern void pktap_v2_finalize_proc_info(struct pktap_v2_hdr *); extern void convert_to_pktap_header_to_v2(struct bpf_packet *bpf_pkt, bool truncate); diff --git a/bsd/net/pktsched/pktsched.c b/bsd/net/pktsched/pktsched.c index e757dc893..4c0d3b7fe 100644 --- a/bsd/net/pktsched/pktsched.c +++ b/bsd/net/pktsched/pktsched.c @@ -52,14 +52,14 @@ u_int32_t machclk_freq = 0; u_int64_t machclk_per_sec = 0; -u_int32_t pktsched_verbose; /* more noise if greater than 1 */ +u_int32_t pktsched_verbose; /* more noise if greater than 1 */ static void init_machclk(void); -SYSCTL_NODE(_net, OID_AUTO, pktsched, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "pktsched"); +SYSCTL_NODE(_net, OID_AUTO, pktsched, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "pktsched"); -SYSCTL_UINT(_net_pktsched, OID_AUTO, verbose, CTLFLAG_RW|CTLFLAG_LOCKED, - &pktsched_verbose, 0, "Packet scheduler verbosity level"); +SYSCTL_UINT(_net_pktsched, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED, + &pktsched_verbose, 0, "Packet scheduler verbosity level"); void pktsched_init(void) @@ -93,7 +93,7 @@ pktsched_abs_to_nsecs(u_int64_t abstime) u_int64_t nsecs; absolutetime_to_nanoseconds(abstime, &nsecs); - return (nsecs); + return nsecs; } u_int64_t @@ -102,7 +102,7 @@ pktsched_nsecs_to_abstime(u_int64_t nsecs) u_int64_t abstime; nanoseconds_to_absolutetime(nsecs, &abstime); - return (abstime); + return abstime; } int @@ -117,8 +117,9 @@ pktsched_setup(struct ifclassq *ifq, u_int32_t scheduler, u_int32_t sflags, VERIFY(machclk_freq != 0); /* Nothing to do unless the scheduler type changes */ - if (ifq->ifcq_type == scheduler) - return (0); + if (ifq->ifcq_type == scheduler) { + return 0; + } /* * Remember the flags that need to be restored upon success, as @@ -154,10 +155,11 @@ pktsched_setup(struct ifclassq *ifq, u_int32_t scheduler, u_int32_t sflags, break; } - if (error == 0) + if (error == 0) { ifq->ifcq_flags |= rflags; + } - return (error); + return error; } int @@ -191,7 +193,7 @@ pktsched_teardown(struct ifclassq *ifq) error = ENXIO; break; } - return (error); + return error; } int @@ -219,7 +221,7 @@ pktsched_getqstats(struct ifclassq *ifq, u_int32_t qid, break; } - return (error); + return error; } void @@ -263,7 +265,7 @@ pktsched_free_pkt(pktsched_pkt_t *pkt) uint32_t pktsched_get_pkt_len(pktsched_pkt_t *pkt) { - return (pkt->pktsched_plen); + return pkt->pktsched_plen; } mbuf_svc_class_t @@ -282,7 +284,7 @@ pktsched_get_pkt_svc(pktsched_pkt_t *pkt) /* NOTREACHED */ } - return (svc); + return svc; } void @@ -295,22 +297,28 @@ pktsched_get_pkt_vars(pktsched_pkt_t *pkt, uint32_t **flags, struct mbuf *m = (struct mbuf *)pkt->pktsched_pkt; struct pkthdr *pkth = &m->m_pkthdr; - if (flags != NULL) + if (flags != NULL) { *flags = &pkth->pkt_flags; - if (timestamp != NULL) + } + if (timestamp != NULL) { *timestamp = &pkth->pkt_timestamp; - if (flowid != NULL) + } + if (flowid != NULL) { *flowid = pkth->pkt_flowid; - if (flowsrc != NULL) + } + if (flowsrc != NULL) { *flowsrc = pkth->pkt_flowsrc; - if (proto != NULL) + } + if (proto != NULL) { *proto = pkth->pkt_proto; + } /* * caller should use this value only if PKTF_START_SEQ * is set in the mbuf packet flags */ - if (tcp_start_seq != NULL) + if (tcp_start_seq != NULL) { *tcp_start_seq = pkth->tx_start_seq; + } break; } @@ -333,11 +341,12 @@ pktsched_alloc_fcentry(pktsched_pkt_t *pkt, struct ifnet *ifp, int how) struct mbuf *m = (struct mbuf *)pkt->pktsched_pkt; fce = flowadv_alloc_entry(how); - if (fce == NULL) + if (fce == NULL) { break; + } - _CASSERT(sizeof (m->m_pkthdr.pkt_flowid) == - sizeof (fce->fce_flowid)); + _CASSERT(sizeof(m->m_pkthdr.pkt_flowid) == + sizeof(fce->fce_flowid)); fce->fce_flowsrc_type = m->m_pkthdr.pkt_flowsrc; fce->fce_flowid = m->m_pkthdr.pkt_flowid; @@ -350,7 +359,7 @@ pktsched_alloc_fcentry(pktsched_pkt_t *pkt, struct ifnet *ifp, int how) /* NOTREACHED */ } - return (fce); + return fce; } uint32_t * @@ -363,8 +372,8 @@ pktsched_get_pkt_sfb_vars(pktsched_pkt_t *pkt, uint32_t **sfb_flags) struct mbuf *m = (struct mbuf *)pkt->pktsched_pkt; struct pkthdr *pkth = &m->m_pkthdr; - _CASSERT(sizeof (pkth->pkt_mpriv_hash) == sizeof (uint32_t)); - _CASSERT(sizeof (pkth->pkt_mpriv_flags) == sizeof (uint32_t)); + _CASSERT(sizeof(pkth->pkt_mpriv_hash) == sizeof(uint32_t)); + _CASSERT(sizeof(pkth->pkt_mpriv_flags) == sizeof(uint32_t)); *sfb_flags = &pkth->pkt_mpriv_flags; hashp = &pkth->pkt_mpriv_hash; @@ -377,5 +386,5 @@ pktsched_get_pkt_sfb_vars(pktsched_pkt_t *pkt, uint32_t **sfb_flags) /* NOTREACHED */ } - return (hashp); + return hashp; } diff --git a/bsd/net/pktsched/pktsched.h b/bsd/net/pktsched/pktsched.h index 6c2a8eaae..b094eb623 100644 --- a/bsd/net/pktsched/pktsched.h +++ b/bsd/net/pktsched/pktsched.h @@ -27,7 +27,7 @@ */ #ifndef _PKTSCHED_PKTSCHED_H_ -#define _PKTSCHED_PKTSCHED_H_ +#define _PKTSCHED_PKTSCHED_H_ #ifdef PRIVATE #ifdef __cplusplus @@ -35,15 +35,15 @@ extern "C" { #endif /* packet scheduler type */ -#define PKTSCHEDT_NONE 0 /* reserved */ -#define PKTSCHEDT_CBQ 1 /* cbq */ -#define PKTSCHEDT_HFSC 2 /* hfsc */ -#define PKTSCHEDT_PRIQ 3 /* priority queue */ -#define PKTSCHEDT_FAIRQ 4 /* fairq */ -#define PKTSCHEDT_TCQ 5 /* traffic class queue */ -#define PKTSCHEDT_QFQ 6 /* quick fair queueing */ -#define PKTSCHEDT_FQ_CODEL 7 /* Flow queues with CoDel */ -#define PKTSCHEDT_MAX 8 /* should be max sched type + 1 */ +#define PKTSCHEDT_NONE 0 /* reserved */ +#define PKTSCHEDT_CBQ 1 /* cbq */ +#define PKTSCHEDT_HFSC 2 /* hfsc */ +#define PKTSCHEDT_PRIQ 3 /* priority queue */ +#define PKTSCHEDT_FAIRQ 4 /* fairq */ +#define PKTSCHEDT_TCQ 5 /* traffic class queue */ +#define PKTSCHEDT_QFQ 6 /* quick fair queueing */ +#define PKTSCHEDT_FQ_CODEL 7 /* Flow queues with CoDel */ +#define PKTSCHEDT_MAX 8 /* should be max sched type + 1 */ #ifdef BSD_KERNEL_PRIVATE #include @@ -51,59 +51,59 @@ extern "C" { #include /* flags for pktsched_setup */ -#define PKTSCHEDF_QALG_SFB 0x01 /* use SFB */ -#define PKTSCHEDF_QALG_ECN 0x02 /* enable ECN */ -#define PKTSCHEDF_QALG_FLOWCTL 0x04 /* enable flow control advisories */ -#define PKTSCHEDF_QALG_DELAYBASED 0x08 /* Delay based queueing */ -#define PKTSCHEDF_QALG_DRIVER_MANAGED 0x10 /* driver managed */ +#define PKTSCHEDF_QALG_SFB 0x01 /* use SFB */ +#define PKTSCHEDF_QALG_ECN 0x02 /* enable ECN */ +#define PKTSCHEDF_QALG_FLOWCTL 0x04 /* enable flow control advisories */ +#define PKTSCHEDF_QALG_DELAYBASED 0x08 /* Delay based queueing */ +#define PKTSCHEDF_QALG_DRIVER_MANAGED 0x10 /* driver managed */ typedef struct _pktsched_pkt_ { - classq_pkt_type_t __ptype; - uint32_t __plen; - void *__pkt; -#define pktsched_ptype __ptype -#define pktsched_plen __plen -#define pktsched_pkt __pkt + classq_pkt_type_t __ptype; + uint32_t __plen; + void *__pkt; +#define pktsched_ptype __ptype +#define pktsched_plen __plen +#define pktsched_pkt __pkt } pktsched_pkt_t; -#define _PKTSCHED_PKT_INIT(_p) do { \ - (_p)->pktsched_ptype = QP_INVALID; \ - (_p)->pktsched_plen = 0; \ - (_p)->pktsched_pkt = NULL; \ +#define _PKTSCHED_PKT_INIT(_p) do { \ + (_p)->pktsched_ptype = QP_INVALID; \ + (_p)->pktsched_plen = 0; \ + (_p)->pktsched_pkt = NULL; \ } while (0) /* macro for timeout/untimeout */ /* use old-style timeout/untimeout */ /* dummy callout structure */ struct callout { - void *c_arg; /* function argument */ - void (*c_func)(void *); /* function to call */ + void *c_arg; /* function argument */ + void (*c_func)(void *); /* function to call */ }; -#define CALLOUT_INIT(c) do { \ - (void) memset((c), 0, sizeof (*(c))); \ -} while (/*CONSTCOND*/ 0) +#define CALLOUT_INIT(c) do { \ + (void) memset((c), 0, sizeof (*(c))); \ +} while ( /*CONSTCOND*/ 0) -#define CALLOUT_RESET(c, t, f, a) do { \ - (c)->c_arg = (a); \ - (c)->c_func = (f); \ - timeout((f), (a), (t)); \ -} while (/*CONSTCOND*/ 0) +#define CALLOUT_RESET(c, t, f, a) do { \ + (c)->c_arg = (a); \ + (c)->c_func = (f); \ + timeout((f), (a), (t)); \ +} while ( /*CONSTCOND*/ 0) -#define CALLOUT_STOP(c) untimeout((c)->c_func, (c)->c_arg) -#define CALLOUT_INITIALIZER { NULL, NULL } +#define CALLOUT_STOP(c) untimeout((c)->c_func, (c)->c_arg) +#define CALLOUT_INITIALIZER { NULL, NULL } typedef void (timeout_t)(void *); /* * Bitmap operations */ -typedef u_int32_t pktsched_bitmap_t; +typedef u_int32_t pktsched_bitmap_t; static inline boolean_t pktsched_bit_tst(u_int32_t ix, pktsched_bitmap_t *pData) { - return (*pData & (1 << ix)); + return *pData & (1 << ix); } static inline void @@ -121,27 +121,27 @@ pktsched_bit_clr(u_int32_t ix, pktsched_bitmap_t *pData) static inline pktsched_bitmap_t pktsched_ffs(pktsched_bitmap_t pData) { - return (ffs(pData)); + return ffs(pData); } static inline pktsched_bitmap_t pktsched_fls(pktsched_bitmap_t pData) { - return ((sizeof (pktsched_bitmap_t) << 3) - clz(pData)); + return (sizeof(pktsched_bitmap_t) << 3) - clz(pData); } static inline pktsched_bitmap_t __fls(pktsched_bitmap_t word) { VERIFY(word != 0); - return (pktsched_fls(word) - 1); + return pktsched_fls(word) - 1; } /* * We can use mach_absolute_time which returns a 64-bit value with * granularity less than a microsecond even on the slowest processor. */ -#define read_machclk() mach_absolute_time() +#define read_machclk() mach_absolute_time() /* * machine dependent clock diff --git a/bsd/net/pktsched/pktsched_cbq.h b/bsd/net/pktsched/pktsched_cbq.h index 0553397d7..58736250f 100644 --- a/bsd/net/pktsched/pktsched_cbq.h +++ b/bsd/net/pktsched/pktsched_cbq.h @@ -60,7 +60,7 @@ */ #ifndef _NET_PKTSCHED_PKTSCHED_CBQ_H_ -#define _NET_PKTSCHED_PKTSCHED_CBQ_H_ +#define _NET_PKTSCHED_PKTSCHED_CBQ_H_ #ifdef PRIVATE #include @@ -75,81 +75,81 @@ extern "C" { #endif /* class flags should be same as class flags in rm_class.h */ -#define CBQCLF_RED RMCF_RED /* use RED */ -#define CBQCLF_ECN RMCF_ECN /* use ECN with RED/BLUE/SFB */ -#define CBQCLF_RIO RMCF_RIO /* use RIO */ -#define CBQCLF_FLOWVALVE RMCF_FLOWVALVE /* use flowvalve/penalty-box */ -#define CBQCLF_CLEARDSCP RMCF_CLEARDSCP /* clear diffserv codepoint */ -#define CBQCLF_BORROW 0x0020 /* borrow from parent */ +#define CBQCLF_RED RMCF_RED /* use RED */ +#define CBQCLF_ECN RMCF_ECN /* use ECN with RED/BLUE/SFB */ +#define CBQCLF_RIO RMCF_RIO /* use RIO */ +#define CBQCLF_FLOWVALVE RMCF_FLOWVALVE /* use flowvalve/penalty-box */ +#define CBQCLF_CLEARDSCP RMCF_CLEARDSCP /* clear diffserv codepoint */ +#define CBQCLF_BORROW 0x0020 /* borrow from parent */ /* class flags only for root class */ -#define CBQCLF_WRR RMCF_WRR /* weighted-round robin */ -#define CBQCLF_EFFICIENT RMCF_EFFICIENT /* work-conserving */ +#define CBQCLF_WRR RMCF_WRR /* weighted-round robin */ +#define CBQCLF_EFFICIENT RMCF_EFFICIENT /* work-conserving */ /* class flags for special classes */ -#define CBQCLF_ROOTCLASS 0x1000 /* root class */ -#define CBQCLF_DEFCLASS 0x2000 /* default class */ -#define CBQCLF_CLASSMASK 0xf000 /* class mask */ +#define CBQCLF_ROOTCLASS 0x1000 /* root class */ +#define CBQCLF_DEFCLASS 0x2000 /* default class */ +#define CBQCLF_CLASSMASK 0xf000 /* class mask */ -#define CBQCLF_BLUE RMCF_BLUE /* use BLUE */ -#define CBQCLF_SFB RMCF_SFB /* use SFB */ -#define CBQCLF_FLOWCTL RMCF_FLOWCTL /* enable flow ctl advisories */ +#define CBQCLF_BLUE RMCF_BLUE /* use BLUE */ +#define CBQCLF_SFB RMCF_SFB /* use SFB */ +#define CBQCLF_FLOWCTL RMCF_FLOWCTL /* enable flow ctl advisories */ #ifdef BSD_KERNEL_PRIVATE -#define CBQCLF_LAZY 0x10000000 /* on-demand resource allocation */ +#define CBQCLF_LAZY 0x10000000 /* on-demand resource allocation */ #endif /* BSD_KERNEL_PRIVATE */ -#define CBQCLF_USERFLAGS \ - (CBQCLF_RED | CBQCLF_ECN | CBQCLF_RIO | CBQCLF_FLOWVALVE | \ +#define CBQCLF_USERFLAGS \ + (CBQCLF_RED | CBQCLF_ECN | CBQCLF_RIO | CBQCLF_FLOWVALVE | \ CBQCLF_CLEARDSCP | CBQCLF_BORROW | CBQCLF_WRR | CBQCLF_EFFICIENT | \ - CBQCLF_ROOTCLASS | CBQCLF_DEFCLASS | CBQCLF_BLUE | CBQCLF_SFB | \ + CBQCLF_ROOTCLASS | CBQCLF_DEFCLASS | CBQCLF_BLUE | CBQCLF_SFB | \ CBQCLF_FLOWCTL) #ifdef BSD_KERNEL_PRIVATE -#define CBQCLF_BITS \ +#define CBQCLF_BITS \ "\020\1RED\2ECN\3RIO\4FLOWVALVE\5CLEARDSCP\6BORROW" \ "\11WRR\12EFFICIENT\15ROOT\16DEFAULT\21BLUE\22SFB\23FLOWCTL\35LAZY" #else -#define CBQCLF_BITS \ +#define CBQCLF_BITS \ "\020\1RED\2ECN\3RIO\4FLOWVALVE\5CLEARDSCP\6BORROW" \ "\11WRR\12EFFICIENT\15ROOT\16DEFAULT\21BLUE\22SFB\23FLOWCTL" #endif /* !BSD_KERNEL_PRIVATE */ -#define CBQ_MAXQSIZE 200 -#define CBQ_MAXPRI RM_MAXPRIO +#define CBQ_MAXQSIZE 200 +#define CBQ_MAXPRI RM_MAXPRIO typedef struct cbq_classstats { - u_int32_t handle; - u_int32_t depth; + u_int32_t handle; + u_int32_t depth; - struct pktcntr xmit_cnt; /* packets sent in this class */ - struct pktcntr drop_cnt; /* dropped packets */ - u_int32_t over; /* # times went over limit */ - u_int32_t borrows; /* # times tried to borrow */ - u_int32_t overactions; /* # times invoked overlimit action */ - u_int32_t delays; /* # times invoked delay actions */ + struct pktcntr xmit_cnt; /* packets sent in this class */ + struct pktcntr drop_cnt; /* dropped packets */ + u_int32_t over; /* # times went over limit */ + u_int32_t borrows; /* # times tried to borrow */ + u_int32_t overactions; /* # times invoked overlimit action */ + u_int32_t delays; /* # times invoked delay actions */ /* other static class parameters useful for debugging */ - int priority; - int maxidle; - int minidle; - int offtime; - int qmax; - int ns_per_byte; - int wrr_allot; + int priority; + int maxidle; + int minidle; + int offtime; + int qmax; + int ns_per_byte; + int wrr_allot; - int qcnt; /* # packets in queue */ - int avgidle; + int qcnt; /* # packets in queue */ + int avgidle; /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; + classq_type_t qtype; union { /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; + struct red_stats red[RIO_NDROPPREC]; + struct blue_stats blue; + struct sfb_stats sfb; }; - classq_state_t qstate; + classq_state_t qstate; } class_stats_t; #ifdef __cplusplus diff --git a/bsd/net/pktsched/pktsched_fairq.h b/bsd/net/pktsched/pktsched_fairq.h index bbe88c451..63c7c34d7 100644 --- a/bsd/net/pktsched/pktsched_fairq.h +++ b/bsd/net/pktsched/pktsched_fairq.h @@ -28,14 +28,14 @@ /* * Copyright (c) 2008 The DragonFly Project. All rights reserved. - * + * * This code is derived from software contributed to The DragonFly Project * by Matthew Dillon - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright @@ -45,7 +45,7 @@ * 3. Neither the name of The DragonFly Project nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific, prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS @@ -58,12 +58,12 @@ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. - * + * * $DragonFly: src/sys/net/altq/altq_fairq.h,v 1.1 2008/04/06 18:58:15 dillon Exp $ */ #ifndef _NET_PKTSCHED_PKTSCHED_FAIRQ_H_ -#define _NET_PKTSCHED_PKTSCHED_FAIRQ_H_ +#define _NET_PKTSCHED_PKTSCHED_FAIRQ_H_ #ifdef PRIVATE #include @@ -78,59 +78,59 @@ extern "C" { #endif -#define FAIRQ_MAX_BUCKETS 2048 /* maximum number of sorting buckets */ -#define FAIRQ_MAXPRI RM_MAXPRIO -#define FAIRQ_BITMAP_WIDTH (sizeof (fairq_bitmap_t) * 8) -#define FAIRQ_BITMAP_MASK (FAIRQ_BITMAP_WIDTH - 1) +#define FAIRQ_MAX_BUCKETS 2048 /* maximum number of sorting buckets */ +#define FAIRQ_MAXPRI RM_MAXPRIO +#define FAIRQ_BITMAP_WIDTH (sizeof (fairq_bitmap_t) * 8) +#define FAIRQ_BITMAP_MASK (FAIRQ_BITMAP_WIDTH - 1) /* fairq class flags */ -#define FARF_RED 0x0001 /* use RED */ -#define FARF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define FARF_RIO 0x0004 /* use RIO */ -#define FARF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define FARF_BLUE 0x0100 /* use BLUE */ -#define FARF_SFB 0x0200 /* use SFB */ -#define FARF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define FARF_DEFAULTCLASS 0x1000 /* default class */ +#define FARF_RED 0x0001 /* use RED */ +#define FARF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ +#define FARF_RIO 0x0004 /* use RIO */ +#define FARF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ +#define FARF_BLUE 0x0100 /* use BLUE */ +#define FARF_SFB 0x0200 /* use SFB */ +#define FARF_FLOWCTL 0x0400 /* enable flow control advisories */ +#define FARF_DEFAULTCLASS 0x1000 /* default class */ #ifdef BSD_KERNEL_PRIVATE -#define FARF_HAS_PACKETS 0x2000 /* might have queued packets */ -#define FARF_LAZY 0x10000000 /* on-demand resource allocation */ +#define FARF_HAS_PACKETS 0x2000 /* might have queued packets */ +#define FARF_LAZY 0x10000000 /* on-demand resource allocation */ #endif /* BSD_KERNEL_PRIVATE */ -#define FARF_USERFLAGS \ - (FARF_RED | FARF_ECN | FARF_RIO | FARF_CLEARDSCP | \ +#define FARF_USERFLAGS \ + (FARF_RED | FARF_ECN | FARF_RIO | FARF_CLEARDSCP | \ FARF_BLUE | FARF_SFB | FARF_FLOWCTL | FARF_DEFAULTCLASS) #ifdef BSD_KERNEL_PRIVATE -#define FARF_BITS \ +#define FARF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ "\16HASPKTS\35LAZY" #else -#define FARF_BITS \ +#define FARF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ "\16HASPKTS" #endif /* !BSD_KERNEL_PRIVATE */ -typedef u_int32_t fairq_bitmap_t; +typedef u_int32_t fairq_bitmap_t; struct fairq_classstats { - u_int32_t class_handle; - u_int32_t priority; + u_int32_t class_handle; + u_int32_t priority; - u_int32_t qlength; - u_int32_t qlimit; - struct pktcntr xmit_cnt; /* transmitted packet counter */ - struct pktcntr drop_cnt; /* dropped packet counter */ + u_int32_t qlength; + u_int32_t qlimit; + struct pktcntr xmit_cnt; /* transmitted packet counter */ + struct pktcntr drop_cnt; /* dropped packet counter */ /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; + classq_type_t qtype; union { /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; + struct red_stats red[RIO_NDROPPREC]; + struct blue_stats blue; + struct sfb_stats sfb; }; - classq_state_t qstate; + classq_state_t qstate; }; #ifdef __cplusplus diff --git a/bsd/net/pktsched/pktsched_fq_codel.c b/bsd/net/pktsched/pktsched_fq_codel.c index 425173a5f..b6cd0c67f 100644 --- a/bsd/net/pktsched/pktsched_fq_codel.c +++ b/bsd/net/pktsched/pktsched_fq_codel.c @@ -65,13 +65,13 @@ static void fq_if_empty_new_flow(fq_t *fq, fq_if_classq_t *fq_cl, static void fq_if_empty_old_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq, bool remove_hash); -#define FQ_IF_ZONE_MAX 32 /* Maximum elements in zone */ -#define FQ_IF_ZONE_NAME "pktsched_fq_if" /* zone for fq_if class */ +#define FQ_IF_ZONE_MAX 32 /* Maximum elements in zone */ +#define FQ_IF_ZONE_NAME "pktsched_fq_if" /* zone for fq_if class */ -#define FQ_IF_FLOW_HASH_ID(_flowid_) \ +#define FQ_IF_FLOW_HASH_ID(_flowid_) \ (((_flowid_) >> FQ_IF_HASH_TAG_SHIFT) & FQ_IF_HASH_TAG_MASK) -#define FQ_IF_CLASSQ_IDLE(_fcl_) \ +#define FQ_IF_CLASSQ_IDLE(_fcl_) \ (STAILQ_EMPTY(&(_fcl_)->fcl_new_flows) && \ STAILQ_EMPTY(&(_fcl_)->fcl_old_flows)) @@ -103,7 +103,6 @@ fq_getq_flow_mbuf(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq, while (fq->fq_deficit > 0 && limit_reached == FALSE && !MBUFQ_EMPTY(&fq->fq_mbufq)) { - _PKTSCHED_PKT_INIT(&pkt); m = fq_getq_flow(fqs, fq, &pkt); ASSERT(pkt.pktsched_ptype == QP_MBUF); @@ -129,12 +128,13 @@ fq_getq_flow_mbuf(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq, ifclassq_set_packet_metadata(ifq, ifp, m, QP_MBUF); /* Check if the limit is reached */ - if (*pkt_cnt >= pkt_limit || *byte_cnt >= byte_limit) + if (*pkt_cnt >= pkt_limit || *byte_cnt >= byte_limit) { limit_reached = TRUE; + } } *qempty = MBUFQ_EMPTY(&fq->fq_mbufq); - return (limit_reached); + return limit_reached; } void @@ -143,7 +143,7 @@ fq_codel_scheduler_init(void) /* Initialize the zone for flow queue structures */ fq_codel_init(); - fq_if_size = sizeof (fq_if_t); + fq_if_size = sizeof(fq_if_t); fq_if_zone = zinit(fq_if_size, (FQ_IF_ZONE_MAX * fq_if_size), 0, FQ_IF_ZONE_NAME); if (fq_if_zone == NULL) { @@ -152,7 +152,6 @@ fq_codel_scheduler_init(void) } zone_change(fq_if_zone, Z_EXPAND, TRUE); zone_change(fq_if_zone, Z_CALLERACCT, TRUE); - } fq_if_t * @@ -160,8 +159,9 @@ fq_if_alloc(struct ifnet *ifp, classq_pkt_type_t ptype) { fq_if_t *fqs; fqs = zalloc(fq_if_zone); - if (fqs == NULL) - return (NULL); + if (fqs == NULL) { + return NULL; + } bzero(fqs, fq_if_size); fqs->fqs_ifq = &ifp->if_snd; @@ -176,7 +176,7 @@ fq_if_alloc(struct ifnet *ifp, classq_pkt_type_t ptype) /* Configure packet drop limit across all queues */ fqs->fqs_pkt_droplimit = IFCQ_PKT_DROP_LIMIT(&ifp->if_snd); STAILQ_INIT(&fqs->fqs_fclist); - return (fqs); + return fqs; } void @@ -217,7 +217,7 @@ fq_if_service_to_priority(fq_if_t *fqs, mbuf_svc_class_t svc) pri = FQ_IF_BE_INDEX; /* Use best effort by default */ break; } - return (pri); + return pri; } /* scheduler is not managed by the driver */ @@ -259,7 +259,7 @@ fq_if_service_to_priority(fq_if_t *fqs, mbuf_svc_class_t svc) pri = FQ_IF_BE_INDEX; /* Use best effort by default */ break; } - return (pri); + return pri; } void @@ -296,7 +296,7 @@ fq_if_enqueue_classq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, IFCQ_CONVERT_LOCK(ifq); m_freem((mbuf_t)p); *pdrop = TRUE; - return (ENOBUFS); + return ENOBUFS; } pktsched_pkt_encap(&pkt, ptype, p); @@ -312,7 +312,7 @@ fq_if_enqueue_classq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, IFCQ_CONVERT_LOCK(ifq); pktsched_free_pkt(&pkt); *pdrop = TRUE; - return (EQSUSPENDED); + return EQSUSPENDED; } len = pktsched_get_pkt_len(&pkt); @@ -342,11 +342,11 @@ fq_if_enqueue_classq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, pktsched_free_pkt(&pkt); switch (ret) { case CLASSQEQ_DROP: - return (ENOBUFS); + return ENOBUFS; case CLASSQEQ_DROP_FC: - return (EQFULL); + return EQFULL; case CLASSQEQ_DROP_SP: - return (EQSUSPENDED); + return EQSUSPENDED; } } } else { @@ -354,7 +354,7 @@ fq_if_enqueue_classq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, } IFCQ_INC_LEN(ifq); IFCQ_INC_BYTES(ifq, len); - return (ret); + return ret; } static void * @@ -364,7 +364,7 @@ fq_if_dequeue_classq(struct ifclassq *ifq, classq_pkt_type_t *ptype) (void) fq_if_dequeue_classq_multi(ifq, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, &top, NULL, NULL, NULL, ptype); - return (top); + return top; } static void * @@ -381,7 +381,7 @@ fq_if_dequeue_sc_classq(struct ifclassq *ifq, mbuf_svc_class_t svc, fq_if_dequeue(fqs, fq_cl, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, &top, NULL, NULL, NULL, TRUE, ptype); - return (top); + return top; } int @@ -422,8 +422,9 @@ fq_if_dequeue_classq_multi(struct ifclassq *ifq, u_int32_t maxpktcnt, fqs->fqs_bitmaps[FQ_IF_EB] == 0) { fqs->fqs_bitmaps[FQ_IF_EB] = fqs->fqs_bitmaps[FQ_IF_IB]; fqs->fqs_bitmaps[FQ_IF_IB] = 0; - if (fqs->fqs_bitmaps[FQ_IF_EB] == 0) + if (fqs->fqs_bitmaps[FQ_IF_EB] == 0) { break; + } } pri = pktsched_ffs(fqs->fqs_bitmaps[FQ_IF_ER]); if (pri == 0) { @@ -447,8 +448,9 @@ fq_if_dequeue_classq_multi(struct ifclassq *ifq, u_int32_t maxpktcnt, fq_cl->fcl_budget += (min(fq_cl->fcl_drr_max, fq_cl->fcl_stat.fcl_flows_cnt) * fq_cl->fcl_quantum); - if (fq_cl->fcl_budget <= 0) + if (fq_cl->fcl_budget <= 0) { goto state_change; + } } fq_if_dequeue(fqs, fq_cl, (maxpktcnt - total_pktcnt), (maxbytecnt - total_bytecnt), &top, &tail, &pktcnt, @@ -489,33 +491,42 @@ state_change: pktsched_bit_clr(pri, &fqs->fqs_bitmaps[FQ_IF_ER]); VERIFY(((fqs->fqs_bitmaps[FQ_IF_ER] | fqs->fqs_bitmaps[FQ_IF_EB] | - fqs->fqs_bitmaps[FQ_IF_IB])&(1 << pri)) == 0); + fqs->fqs_bitmaps[FQ_IF_IB]) & (1 << pri)) == 0); fq_cl->fcl_budget = 0; } - if (total_pktcnt >= maxpktcnt || total_bytecnt >= maxbytecnt) + if (total_pktcnt >= maxpktcnt || total_bytecnt >= maxbytecnt) { break; + } } if (first != NULL) { - if (first_packet != NULL) + if (first_packet != NULL) { *first_packet = first; - if (last_packet != NULL) + } + if (last_packet != NULL) { *last_packet = last; - if (retpktcnt != NULL) + } + if (retpktcnt != NULL) { *retpktcnt = total_pktcnt; - if (retbytecnt != NULL) + } + if (retbytecnt != NULL) { *retbytecnt = total_bytecnt; + } IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt); } else { - if (first_packet != NULL) + if (first_packet != NULL) { *first_packet = NULL; - if (last_packet != NULL) + } + if (last_packet != NULL) { *last_packet = NULL; - if (retpktcnt != NULL) + } + if (retpktcnt != NULL) { *retpktcnt = 0; - if (retbytecnt != NULL) + } + if (retbytecnt != NULL) { *retbytecnt = 0; + } } - return (0); + return 0; } int @@ -570,25 +581,33 @@ fq_if_dequeue_sc_classq_multi(struct ifclassq *ifq, mbuf_svc_class_t svc, last = tail; } if (first != NULL) { - if (first_packet != NULL) + if (first_packet != NULL) { *first_packet = first; - if (last_packet != NULL) + } + if (last_packet != NULL) { *last_packet = last; - if (retpktcnt != NULL) + } + if (retpktcnt != NULL) { *retpktcnt = total_pktcnt; - if (retbytecnt != NULL) + } + if (retbytecnt != NULL) { *retbytecnt = total_bytecnt; + } } else { - if (first_packet != NULL) + if (first_packet != NULL) { *first_packet = NULL; - if (last_packet != NULL) + } + if (last_packet != NULL) { *last_packet = NULL; - if (retpktcnt != NULL) + } + if (retpktcnt != NULL) { *retpktcnt = 0; - if (retbytecnt != NULL) + } + if (retbytecnt != NULL) { *retbytecnt = 0; + } } - return (0); + return 0; } static void @@ -624,10 +643,12 @@ fq_if_purge_flow(fq_if_t *fqs, fq_t *fq, u_int32_t *pktsp, &fqs->fqs_bitmaps[i]); } } - if (pktsp != NULL) + if (pktsp != NULL) { *pktsp = pkts; - if (bytesp != NULL) + } + if (bytesp != NULL) { *bytesp = bytes; + } } static void @@ -669,7 +690,7 @@ fq_if_purge(fq_if_t *fqs) VERIFY(SLIST_EMPTY(&fqs->fqs_flows[i])); } - bzero(&fqs->fqs_bitmaps, sizeof (fqs->fqs_bitmaps)); + bzero(&fqs->fqs_bitmaps, sizeof(fqs->fqs_bitmaps)); IFCQ_LEN(fqs->fqs_ifq) = 0; IFCQ_BYTES(fqs->fqs_ifq) = 0; @@ -687,8 +708,9 @@ fq_if_purge_sc(fq_if_t *fqs, cqrq_purge_sc_t *req) /* packet type is needed only if we want to create a flow queue */ fq = fq_if_hash_pkt(fqs, req->flow, req->sc, 0, FALSE, QP_INVALID); - if (fq != NULL) + if (fq != NULL) { fq_if_purge_flow(fqs, fq, &req->packets, &req->bytes); + } } static void @@ -735,11 +757,12 @@ fq_if_throttle(fq_if_t *fqs, cqrq_throttle_t *tr) if (!tr->set) { tr->level = fqs->fqs_throttle; - return (0); + return 0; } - if (tr->level == fqs->fqs_throttle) - return (EALREADY); + if (tr->level == fqs->fqs_throttle) { + return EALREADY; + } /* Throttling is allowed on BK_SYS class only */ index = fq_if_service_to_priority(fqs, MBUF_SC_BK_SYS); @@ -753,7 +776,7 @@ fq_if_throttle(fq_if_t *fqs, cqrq_throttle_t *tr) default: break; } - return (0); + return 0; } void @@ -762,8 +785,9 @@ fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat) u_int32_t pri; fq_if_classq_t *fq_cl; - if (stat == NULL) + if (stat == NULL) { return; + } pri = fq_if_service_to_priority(fqs, stat->sc); fq_cl = &fqs->fqs_classq[pri]; @@ -800,7 +824,7 @@ fq_if_request_classq(struct ifclassq *ifq, cqrq_t rq, void *arg) fq_if_stat_sc(fqs, (cqrq_stat_sc_t *)arg); break; } - return (err); + return err; } int @@ -817,8 +841,9 @@ fq_if_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); fqs = fq_if_alloc(ifp, ptype); - if (fqs == NULL) - return (ENOMEM); + if (fqs == NULL) { + return ENOMEM; + } if (flags & PKTSCHEDF_QALG_DRIVER_MANAGED) { fqs->fqs_flags |= FQS_DRIVER_MANAGED; @@ -867,7 +892,7 @@ fq_if_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, "failed to attach fq_if: %d\n", __func__, err); fq_if_destroy(fqs); } - return (err); + return err; } fq_t * @@ -888,8 +913,9 @@ fq_if_hash_pkt(fq_if_t *fqs, u_int32_t flowid, mbuf_svc_class_t svc_class, SLIST_FOREACH(fq, fq_list, fq_hashlink) { if (fq->fq_flowhash == flowid && - fq->fq_sc_index == scidx) + fq->fq_sc_index == scidx) { break; + } } if (fq == NULL && create == TRUE) { ASSERT(ptype == QP_MBUF); @@ -912,10 +938,11 @@ fq_if_hash_pkt(fq_if_t *fqs, u_int32_t flowid, mbuf_svc_class_t svc_class, * If getq time is not set because this is the first packet or after * idle time, set it now so that we can detect a stall. */ - if (fq != NULL && fq->fq_getqtime == 0) + if (fq != NULL && fq->fq_getqtime == 0) { fq->fq_getqtime = now; + } - return (fq); + return fq; } void @@ -928,14 +955,13 @@ fq_if_destroy_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq) fq_cl->fcl_stat.fcl_flows_cnt--; IFCQ_CONVERT_LOCK(fqs->fqs_ifq); fq_destroy(fq); - } inline boolean_t fq_if_at_drop_limit(fq_if_t *fqs) { - return (((IFCQ_LEN(fqs->fqs_ifq) >= fqs->fqs_pkt_droplimit) ? - TRUE : FALSE)); + return (IFCQ_LEN(fqs->fqs_ifq) >= fqs->fqs_pkt_droplimit) ? + TRUE : FALSE; } static void @@ -984,8 +1010,9 @@ fq_if_drop_packet(fq_if_t *fqs) uint32_t *pkt_flags; uint64_t *pkt_timestamp; - if (fq == NULL) + if (fq == NULL) { return; + } /* queue can not be empty on the largest flow */ VERIFY(!fq_empty(fq)); @@ -998,8 +1025,9 @@ fq_if_drop_packet(fq_if_t *fqs) IFCQ_CONVERT_LOCK(fqs->fqs_ifq); *pkt_timestamp = 0; - if (pkt.pktsched_ptype == QP_MBUF) + if (pkt.pktsched_ptype == QP_MBUF) { *pkt_flags &= ~PKTF_PRIV_GUARDED; + } if (fq_empty(fq)) { fqs->fqs_large_flow = NULL; @@ -1022,16 +1050,19 @@ fq_if_is_flow_heavy(fq_if_t *fqs, fq_t *fq) fq_t *prev_fq; if (fqs->fqs_large_flow != NULL && - fqs->fqs_large_flow->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) + fqs->fqs_large_flow->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) { fqs->fqs_large_flow = NULL; + } - if (fq == NULL || fq->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) + if (fq == NULL || fq->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) { return; + } prev_fq = fqs->fqs_large_flow; if (prev_fq == NULL) { - if (!fq_empty(fq)) + if (!fq_empty(fq)) { fqs->fqs_large_flow = fq; + } return; } else if (fq->fq_bytes > prev_fq->fq_bytes) { fqs->fqs_large_flow = fq; @@ -1048,7 +1079,7 @@ fq_if_add_fcentry(fq_if_t *fqs, pktsched_pkt_t *pkt, uint32_t flowid, if ((uint8_t)fce->fce_flowsrc_type == flowsrc && fce->fce_flowid == flowid) { /* Already on flowcontrol list */ - return (TRUE); + return TRUE; } } IFCQ_CONVERT_LOCK(fqs->fqs_ifq); @@ -1058,7 +1089,7 @@ fq_if_add_fcentry(fq_if_t *fqs, pktsched_pkt_t *pkt, uint32_t flowid, STAILQ_INSERT_TAIL(&fqs->fqs_fclist, fce, fce_link); fq_cl->fcl_stat.fcl_flow_control++; } - return ((fce != NULL) ? TRUE : FALSE); + return (fce != NULL) ? TRUE : FALSE; } void @@ -1068,8 +1099,9 @@ fq_if_flow_feedback(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl) IFCQ_CONVERT_LOCK(fqs->fqs_ifq); STAILQ_FOREACH(fce, &fqs->fqs_fclist, fce_link) { - if (fce->fce_flowid == fq->fq_flowhash) + if (fce->fce_flowid == fq->fq_flowhash) { break; + } } if (fce != NULL) { STAILQ_REMOVE(&fqs->fqs_fclist, fce, flowadv_fcentry, @@ -1109,8 +1141,9 @@ fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, u_int32_t pktlimit, * maximum byte limit should not be greater than the budget for * this class */ - if ((int32_t)bytelimit > fq_cl->fcl_budget && !drvmgmt) + if ((int32_t)bytelimit > fq_cl->fcl_budget && !drvmgmt) { bytelimit = fq_cl->fcl_budget; + } VERIFY(pktlimit > 0 && bytelimit > 0 && top != NULL); @@ -1120,22 +1153,24 @@ fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, u_int32_t pktlimit, STAILQ_INIT(&temp_stailq); STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_new_flows, fq_actlink, tfq) { - ASSERT((fq->fq_flags & (FQF_NEW_FLOW|FQF_OLD_FLOW)) == + ASSERT((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == FQF_NEW_FLOW); limit_reached = fq_getq_flow_fn(fqs, fq_cl, fq, bytelimit, pktlimit, top, &last, &bytecnt, &pktcnt, &qempty, PKTF_NEW_FLOW); - if (fq->fq_deficit <= 0 || qempty) + if (fq->fq_deficit <= 0 || qempty) { fq_if_empty_new_flow(fq, fq_cl, true); + } fq->fq_deficit += fq_cl->fcl_quantum; - if (limit_reached) + if (limit_reached) { goto done; + } } STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_old_flows, fq_actlink, tfq) { - VERIFY((fq->fq_flags & (FQF_NEW_FLOW|FQF_OLD_FLOW)) == + VERIFY((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == FQF_OLD_FLOW); limit_reached = fq_getq_flow_fn(fqs, fq_cl, fq, bytelimit, @@ -1154,8 +1189,9 @@ fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, u_int32_t pktlimit, STAILQ_INSERT_TAIL(&temp_stailq, fq, fq_actlink); fq->fq_deficit += fq_cl->fcl_quantum; } - if (limit_reached) + if (limit_reached) { break; + } } done: @@ -1167,12 +1203,15 @@ done: if (last != NULL) { VERIFY(*top != NULL); - if (tail != NULL) + if (tail != NULL) { *tail = last; - if (retpktcnt != NULL) + } + if (retpktcnt != NULL) { *retpktcnt = pktcnt; - if (retbytecnt != NULL) + } + if (retbytecnt != NULL) { *retbytecnt = bytecnt; + } } } @@ -1186,27 +1225,32 @@ fq_if_teardown_ifclassq(struct ifclassq *ifq) fq_if_destroy(fqs); ifq->ifcq_disc = NULL; - return (ifclassq_detach(ifq)); + return ifclassq_detach(ifq); } static void fq_export_flowstats(fq_if_t *fqs, fq_t *fq, struct fq_codel_flowstats *flowstat) { - bzero(flowstat, sizeof (*flowstat)); + bzero(flowstat, sizeof(*flowstat)); flowstat->fqst_min_qdelay = fq->fq_min_qdelay; flowstat->fqst_bytes = fq->fq_bytes; flowstat->fqst_flowhash = fq->fq_flowhash; - if (fq->fq_flags & FQF_NEW_FLOW) + if (fq->fq_flags & FQF_NEW_FLOW) { flowstat->fqst_flags |= FQ_FLOWSTATS_NEW_FLOW; - if (fq->fq_flags & FQF_OLD_FLOW) + } + if (fq->fq_flags & FQF_OLD_FLOW) { flowstat->fqst_flags |= FQ_FLOWSTATS_OLD_FLOW; - if (fq->fq_flags & FQF_DELAY_HIGH) + } + if (fq->fq_flags & FQF_DELAY_HIGH) { flowstat->fqst_flags |= FQ_FLOWSTATS_DELAY_HIGH; - if (fq->fq_flags & FQF_FLOWCTL_ON) + } + if (fq->fq_flags & FQF_FLOWCTL_ON) { flowstat->fqst_flags |= FQ_FLOWSTATS_FLOWCTL_ON; - if (fqs->fqs_large_flow == fq) + } + if (fqs->fqs_large_flow == fq) { flowstat->fqst_flags |= FQ_FLOWSTATS_LARGE_FLOW; + } } int @@ -1219,8 +1263,9 @@ fq_if_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t qid, fq_t *fq = NULL; u_int32_t i, flowstat_cnt; - if (qid >= FQ_IF_MAX_CLASSES) - return (EINVAL); + if (qid >= FQ_IF_MAX_CLASSES) { + return EINVAL; + } fqs = (fq_if_t *)ifq->ifcq_disc; fcls = &ifqs->ifqs_fq_codel_stats; @@ -1259,23 +1304,26 @@ fq_if_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t qid, fcls->fcls_oldflows_cnt), FQ_IF_MAX_FLOWSTATS); i = 0; STAILQ_FOREACH(fq, &fq_cl->fcl_new_flows, fq_actlink) { - if (i >= fcls->fcls_newflows_cnt || i >= flowstat_cnt) + if (i >= fcls->fcls_newflows_cnt || i >= flowstat_cnt) { break; + } /* leave space for a few old flows */ if ((flowstat_cnt - i) < fcls->fcls_oldflows_cnt && - i >= (FQ_IF_MAX_FLOWSTATS >> 1)) + i >= (FQ_IF_MAX_FLOWSTATS >> 1)) { break; + } fq_export_flowstats(fqs, fq, &fcls->fcls_flowstats[i]); i++; } STAILQ_FOREACH(fq, &fq_cl->fcl_old_flows, fq_actlink) { - if (i >= flowstat_cnt) + if (i >= flowstat_cnt) { break; + } fq_export_flowstats(fqs, fq, &fcls->fcls_flowstats[i]); i++; } VERIFY(i <= flowstat_cnt); fcls->fcls_flowstats_cnt = i; - return (0); + return 0; } diff --git a/bsd/net/pktsched/pktsched_fq_codel.h b/bsd/net/pktsched/pktsched_fq_codel.h index 0929882a6..7d55fa5bf 100644 --- a/bsd/net/pktsched/pktsched_fq_codel.h +++ b/bsd/net/pktsched/pktsched_fq_codel.h @@ -27,7 +27,7 @@ */ #ifndef _NET_PKTSCHED_FQ_CODEL_H_ -#define _NET_PKTSCHED_FQ_CODEL_H_ +#define _NET_PKTSCHED_FQ_CODEL_H_ #ifdef PRIVATE #include @@ -68,28 +68,28 @@ struct fcl_stat { * hashing */ -#define FQ_IF_HASH_TAG_SIZE 8 -#define FQ_IF_HASH_TAG_SHIFT 24 -#define FQ_IF_HASH_TAG_MASK 0xFF -#define FQ_IF_HASH_TABLE_SIZE (1 << FQ_IF_HASH_TAG_SIZE) +#define FQ_IF_HASH_TAG_SIZE 8 +#define FQ_IF_HASH_TAG_SHIFT 24 +#define FQ_IF_HASH_TAG_MASK 0xFF +#define FQ_IF_HASH_TABLE_SIZE (1 << FQ_IF_HASH_TAG_SIZE) /* Set the quantum to be one MTU */ -#define FQ_IF_DEFAULT_QUANTUM 1500 +#define FQ_IF_DEFAULT_QUANTUM 1500 /* Max number of service classes currently supported */ -#define FQ_IF_MAX_CLASSES 10 +#define FQ_IF_MAX_CLASSES 10 -#define FQ_IF_LARGE_FLOW_BYTE_LIMIT 15000 +#define FQ_IF_LARGE_FLOW_BYTE_LIMIT 15000 struct flowq; typedef u_int32_t pktsched_bitmap_t; struct if_ifclassq_stats; enum fq_if_state { - FQ_IF_ER = 0, /* eligible, ready */ - FQ_IF_IR = 1, /* ineligible, ready */ - FQ_IF_EB = 2, /* eligible blocked */ - FQ_IF_IB = 3, /* ineligible, blocked */ + FQ_IF_ER = 0, /* eligible, ready */ + FQ_IF_IR = 1, /* ineligible, ready */ + FQ_IF_EB = 2, /* eligible blocked */ + FQ_IF_IB = 3, /* ineligible, blocked */ FQ_IF_MAX_STATE }; @@ -97,90 +97,90 @@ enum fq_if_state { * This priority index is used for QFQ state bitmaps, lower index gets * higher priority */ -#define FQ_IF_BK_SYS_INDEX 9 -#define FQ_IF_BK_INDEX 8 -#define FQ_IF_BE_INDEX 7 -#define FQ_IF_RD_INDEX 6 -#define FQ_IF_OAM_INDEX 5 -#define FQ_IF_AV_INDEX 4 -#define FQ_IF_RV_INDEX 3 -#define FQ_IF_VI_INDEX 2 -#define FQ_IF_SIG_INDEX 2 -#define FQ_IF_VO_INDEX 1 -#define FQ_IF_CTL_INDEX 0 +#define FQ_IF_BK_SYS_INDEX 9 +#define FQ_IF_BK_INDEX 8 +#define FQ_IF_BE_INDEX 7 +#define FQ_IF_RD_INDEX 6 +#define FQ_IF_OAM_INDEX 5 +#define FQ_IF_AV_INDEX 4 +#define FQ_IF_RV_INDEX 3 +#define FQ_IF_VI_INDEX 2 +#define FQ_IF_SIG_INDEX 2 +#define FQ_IF_VO_INDEX 1 +#define FQ_IF_CTL_INDEX 0 typedef SLIST_HEAD(, flowq) flowq_list_t; typedef STAILQ_HEAD(, flowq) flowq_stailq_t; typedef struct fq_if_classq { - u_int32_t fcl_pri; /* class priority, lower the better */ - u_int32_t fcl_service_class; /* service class */ - u_int32_t fcl_quantum; /* quantum in bytes */ - u_int32_t fcl_drr_max; /* max flows per class for DRR */ - int64_t fcl_budget; /* budget for this classq */ - flowq_stailq_t fcl_new_flows; /* List of new flows */ - flowq_stailq_t fcl_old_flows; /* List of old flows */ + u_int32_t fcl_pri; /* class priority, lower the better */ + u_int32_t fcl_service_class; /* service class */ + u_int32_t fcl_quantum; /* quantum in bytes */ + u_int32_t fcl_drr_max; /* max flows per class for DRR */ + int64_t fcl_budget; /* budget for this classq */ + flowq_stailq_t fcl_new_flows; /* List of new flows */ + flowq_stailq_t fcl_old_flows; /* List of old flows */ struct fcl_stat fcl_stat; } fq_if_classq_t; typedef struct fq_codel_sched_data { - struct ifclassq *fqs_ifq; /* back pointer to ifclassq */ - u_int64_t fqs_target_qdelay; /* Target queue delay (ns) */ - u_int64_t fqs_update_interval; /* update interval (ns) */ - flowq_list_t fqs_flows[FQ_IF_HASH_TABLE_SIZE]; /* flows table */ - pktsched_bitmap_t fqs_bitmaps[FQ_IF_MAX_STATE]; - u_int32_t fqs_pkt_droplimit; /* drop limit */ - u_int8_t fqs_throttle; /* throttle on or off */ - u_int8_t fqs_flags; /* flags */ -#define FQS_DRIVER_MANAGED 0x1 - fq_if_classq_t fqs_classq[FQ_IF_MAX_CLASSES]; /* class queues */ - struct flowadv_fclist fqs_fclist; /* flow control state */ - struct flowq *fqs_large_flow; /* flow has highest number of bytes */ - classq_pkt_type_t fqs_ptype; + struct ifclassq *fqs_ifq; /* back pointer to ifclassq */ + u_int64_t fqs_target_qdelay; /* Target queue delay (ns) */ + u_int64_t fqs_update_interval; /* update interval (ns) */ + flowq_list_t fqs_flows[FQ_IF_HASH_TABLE_SIZE]; /* flows table */ + pktsched_bitmap_t fqs_bitmaps[FQ_IF_MAX_STATE]; + u_int32_t fqs_pkt_droplimit; /* drop limit */ + u_int8_t fqs_throttle; /* throttle on or off */ + u_int8_t fqs_flags; /* flags */ +#define FQS_DRIVER_MANAGED 0x1 + fq_if_classq_t fqs_classq[FQ_IF_MAX_CLASSES]; /* class queues */ + struct flowadv_fclist fqs_fclist; /* flow control state */ + struct flowq *fqs_large_flow; /* flow has highest number of bytes */ + classq_pkt_type_t fqs_ptype; } fq_if_t; #endif /* BSD_KERNEL_PRIVATE */ struct fq_codel_flowstats { - u_int32_t fqst_min_qdelay; -#define FQ_FLOWSTATS_OLD_FLOW 0x1 -#define FQ_FLOWSTATS_NEW_FLOW 0x2 -#define FQ_FLOWSTATS_LARGE_FLOW 0x4 -#define FQ_FLOWSTATS_DELAY_HIGH 0x8 -#define FQ_FLOWSTATS_FLOWCTL_ON 0x10 - u_int32_t fqst_flags; - u_int32_t fqst_bytes; - u_int32_t fqst_flowhash; + u_int32_t fqst_min_qdelay; +#define FQ_FLOWSTATS_OLD_FLOW 0x1 +#define FQ_FLOWSTATS_NEW_FLOW 0x2 +#define FQ_FLOWSTATS_LARGE_FLOW 0x4 +#define FQ_FLOWSTATS_DELAY_HIGH 0x8 +#define FQ_FLOWSTATS_FLOWCTL_ON 0x10 + u_int32_t fqst_flags; + u_int32_t fqst_bytes; + u_int32_t fqst_flowhash; }; -#define FQ_IF_MAX_FLOWSTATS 20 +#define FQ_IF_MAX_FLOWSTATS 20 struct fq_codel_classstats { - u_int32_t fcls_pri; - u_int32_t fcls_service_class; - u_int32_t fcls_quantum; - u_int32_t fcls_drr_max; - int64_t fcls_budget; - u_int64_t fcls_target_qdelay; - u_int64_t fcls_update_interval; - u_int32_t fcls_flow_control; - u_int32_t fcls_flow_feedback; - u_int32_t fcls_dequeue_stall; - u_int32_t fcls_flow_control_fail; - u_int64_t fcls_drop_overflow; - u_int64_t fcls_drop_early; - u_int32_t fcls_drop_memfailure; - u_int32_t fcls_flows_cnt; - u_int32_t fcls_newflows_cnt; - u_int32_t fcls_oldflows_cnt; - u_int64_t fcls_pkt_cnt; - u_int64_t fcls_dequeue; - u_int64_t fcls_dequeue_bytes; - u_int64_t fcls_byte_cnt; - u_int32_t fcls_throttle_on; - u_int32_t fcls_throttle_off; - u_int32_t fcls_throttle_drops; - u_int32_t fcls_dup_rexmts; - u_int32_t fcls_flowstats_cnt; + u_int32_t fcls_pri; + u_int32_t fcls_service_class; + u_int32_t fcls_quantum; + u_int32_t fcls_drr_max; + int64_t fcls_budget; + u_int64_t fcls_target_qdelay; + u_int64_t fcls_update_interval; + u_int32_t fcls_flow_control; + u_int32_t fcls_flow_feedback; + u_int32_t fcls_dequeue_stall; + u_int32_t fcls_flow_control_fail; + u_int64_t fcls_drop_overflow; + u_int64_t fcls_drop_early; + u_int32_t fcls_drop_memfailure; + u_int32_t fcls_flows_cnt; + u_int32_t fcls_newflows_cnt; + u_int32_t fcls_oldflows_cnt; + u_int64_t fcls_pkt_cnt; + u_int64_t fcls_dequeue; + u_int64_t fcls_dequeue_bytes; + u_int64_t fcls_byte_cnt; + u_int32_t fcls_throttle_on; + u_int32_t fcls_throttle_off; + u_int32_t fcls_throttle_drops; + u_int32_t fcls_dup_rexmts; + u_int32_t fcls_flowstats_cnt; struct fq_codel_flowstats fcls_flowstats[FQ_IF_MAX_FLOWSTATS]; }; diff --git a/bsd/net/pktsched/pktsched_hfsc.h b/bsd/net/pktsched/pktsched_hfsc.h index 7f14cdcb9..7b452789d 100644 --- a/bsd/net/pktsched/pktsched_hfsc.h +++ b/bsd/net/pktsched/pktsched_hfsc.h @@ -59,7 +59,7 @@ * changes without encumbrance. */ #ifndef _NET_PKTSCHED_PKTSCHED_HFSC_H_ -#define _NET_PKTSCHED_PKTSCHED_HFSC_H_ +#define _NET_PKTSCHED_PKTSCHED_HFSC_H_ #ifdef PRIVATE #include @@ -74,103 +74,103 @@ extern "C" { #endif struct service_curve { - u_int32_t fl; /* service curve flags */ - u_int64_t m1; /* slope of the first segment in bits/sec */ - u_int32_t d; /* the x-projection of the first segment in msec */ - u_int64_t m2; /* slope of the second segment in bits/sec */ + u_int32_t fl; /* service curve flags */ + u_int64_t m1; /* slope of the first segment in bits/sec */ + u_int32_t d; /* the x-projection of the first segment in msec */ + u_int64_t m2; /* slope of the second segment in bits/sec */ }; /* valid values for service curve flags */ -#define HFSCF_M1_PCT 0x1 /* m1 is in percentage */ -#define HFSCF_M2_PCT 0x10 /* m2 is in percentage */ +#define HFSCF_M1_PCT 0x1 /* m1 is in percentage */ +#define HFSCF_M2_PCT 0x10 /* m2 is in percentage */ -#define HFSCF_USERFLAGS (HFSCF_M1_PCT | HFSCF_M2_PCT) +#define HFSCF_USERFLAGS (HFSCF_M1_PCT | HFSCF_M2_PCT) /* special class handles */ -#define HFSC_NULLCLASS_HANDLE 0 -#define HFSC_MAX_CLASSES 64 +#define HFSC_NULLCLASS_HANDLE 0 +#define HFSC_MAX_CLASSES 64 /* hfsc class flags */ -#define HFCF_RED 0x0001 /* use RED */ -#define HFCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define HFCF_RIO 0x0004 /* use RIO */ -#define HFCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define HFCF_BLUE 0x0100 /* use BLUE */ -#define HFCF_SFB 0x0200 /* use SFB */ -#define HFCF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define HFCF_DEFAULTCLASS 0x1000 /* default class */ +#define HFCF_RED 0x0001 /* use RED */ +#define HFCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ +#define HFCF_RIO 0x0004 /* use RIO */ +#define HFCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ +#define HFCF_BLUE 0x0100 /* use BLUE */ +#define HFCF_SFB 0x0200 /* use SFB */ +#define HFCF_FLOWCTL 0x0400 /* enable flow control advisories */ +#define HFCF_DEFAULTCLASS 0x1000 /* default class */ #ifdef BSD_KERNEL_PRIVATE -#define HFCF_RSC 0x10000 /* has realtime sc */ -#define HFCF_FSC 0x20000 /* has linkshare sc */ -#define HFCF_USC 0x40000 /* has upperlimit sc */ -#define HFCF_LAZY 0x10000000 /* on-demand resource allocation */ +#define HFCF_RSC 0x10000 /* has realtime sc */ +#define HFCF_FSC 0x20000 /* has linkshare sc */ +#define HFCF_USC 0x40000 /* has upperlimit sc */ +#define HFCF_LAZY 0x10000000 /* on-demand resource allocation */ #endif /* BSD_KERNEL_PRIVATE */ -#define HFCF_USERFLAGS \ - (HFCF_RED | HFCF_ECN | HFCF_RIO | HFCF_CLEARDSCP | HFCF_BLUE | \ +#define HFCF_USERFLAGS \ + (HFCF_RED | HFCF_ECN | HFCF_RIO | HFCF_CLEARDSCP | HFCF_BLUE | \ HFCF_SFB | HFCF_FLOWCTL | HFCF_DEFAULTCLASS) #ifdef BSD_KERNEL_PRIVATE -#define HFCF_BITS \ +#define HFCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ "\21RSC\22FSC\23USC\35LAZY" #else -#define HFCF_BITS \ +#define HFCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" #endif /* !BSD_KERNEL_PRIVATE */ /* service curve types */ -#define HFSC_REALTIMESC 1 -#define HFSC_LINKSHARINGSC 2 -#define HFSC_UPPERLIMITSC 4 -#define HFSC_DEFAULTSC (HFSC_REALTIMESC|HFSC_LINKSHARINGSC) +#define HFSC_REALTIMESC 1 +#define HFSC_LINKSHARINGSC 2 +#define HFSC_UPPERLIMITSC 4 +#define HFSC_DEFAULTSC (HFSC_REALTIMESC|HFSC_LINKSHARINGSC) struct hfsc_classstats { - u_int32_t class_id; - u_int32_t class_handle; - struct service_curve rsc; - struct service_curve fsc; - struct service_curve usc; /* upper limit service curve */ - - u_int64_t total; /* total work in bytes */ - u_int64_t cumul; /* cumulative work in bytes */ - /* done by real-time criteria */ - u_int64_t d; /* deadline */ - u_int64_t e; /* eligible time */ - u_int64_t vt; /* virtual time */ - u_int64_t f; /* fit time for upper-limit */ + u_int32_t class_id; + u_int32_t class_handle; + struct service_curve rsc; + struct service_curve fsc; + struct service_curve usc; /* upper limit service curve */ + + u_int64_t total; /* total work in bytes */ + u_int64_t cumul; /* cumulative work in bytes */ + /* done by real-time criteria */ + u_int64_t d; /* deadline */ + u_int64_t e; /* eligible time */ + u_int64_t vt; /* virtual time */ + u_int64_t f; /* fit time for upper-limit */ /* info helpful for debugging */ - u_int64_t initvt; /* init virtual time */ - u_int64_t vtoff; /* cl_vt_ipoff */ - u_int64_t cvtmax; /* cl_maxvt */ - u_int64_t myf; /* cl_myf */ - u_int64_t cfmin; /* cl_mincf */ - u_int64_t cvtmin; /* cl_mincvt */ - u_int64_t myfadj; /* cl_myfadj */ - u_int64_t vtadj; /* cl_vtadj */ - u_int64_t cur_time; - u_int32_t machclk_freq; - - u_int32_t qlength; - u_int32_t qlimit; - struct pktcntr xmit_cnt; - struct pktcntr drop_cnt; - u_int32_t period; - - u_int32_t vtperiod; /* vt period sequence no */ - u_int32_t parentperiod; /* parent's vt period seqno */ - int nactive; /* number of active children */ + u_int64_t initvt; /* init virtual time */ + u_int64_t vtoff; /* cl_vt_ipoff */ + u_int64_t cvtmax; /* cl_maxvt */ + u_int64_t myf; /* cl_myf */ + u_int64_t cfmin; /* cl_mincf */ + u_int64_t cvtmin; /* cl_mincvt */ + u_int64_t myfadj; /* cl_myfadj */ + u_int64_t vtadj; /* cl_vtadj */ + u_int64_t cur_time; + u_int32_t machclk_freq; + + u_int32_t qlength; + u_int32_t qlimit; + struct pktcntr xmit_cnt; + struct pktcntr drop_cnt; + u_int32_t period; + + u_int32_t vtperiod; /* vt period sequence no */ + u_int32_t parentperiod; /* parent's vt period seqno */ + int nactive; /* number of active children */ /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; + classq_type_t qtype; union { /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; + struct red_stats red[RIO_NDROPPREC]; + struct blue_stats blue; + struct sfb_stats sfb; }; - classq_state_t qstate; + classq_state_t qstate; }; #ifdef __cplusplus diff --git a/bsd/net/pktsched/pktsched_priq.h b/bsd/net/pktsched/pktsched_priq.h index 858cf9ef3..cb30eb90a 100644 --- a/bsd/net/pktsched/pktsched_priq.h +++ b/bsd/net/pktsched/pktsched_priq.h @@ -55,7 +55,7 @@ */ #ifndef _NET_PKTSCHED_PKTSCHED_PRIQ_H_ -#define _NET_PKTSCHED_PKTSCHED_PRIQ_H_ +#define _NET_PKTSCHED_PKTSCHED_PRIQ_H_ #ifdef PRIVATE #include @@ -69,53 +69,53 @@ extern "C" { #endif -#define PRIQ_MAXPRI 16 /* upper limit of the number of priorities */ +#define PRIQ_MAXPRI 16 /* upper limit of the number of priorities */ /* priq class flags */ -#define PRCF_RED 0x0001 /* use RED */ -#define PRCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define PRCF_RIO 0x0004 /* use RIO */ -#define PRCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define PRCF_BLUE 0x0100 /* use BLUE */ -#define PRCF_SFB 0x0200 /* use SFB */ -#define PRCF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define PRCF_DEFAULTCLASS 0x1000 /* default class */ +#define PRCF_RED 0x0001 /* use RED */ +#define PRCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ +#define PRCF_RIO 0x0004 /* use RIO */ +#define PRCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ +#define PRCF_BLUE 0x0100 /* use BLUE */ +#define PRCF_SFB 0x0200 /* use SFB */ +#define PRCF_FLOWCTL 0x0400 /* enable flow control advisories */ +#define PRCF_DEFAULTCLASS 0x1000 /* default class */ #ifdef BSD_KERNEL_PRIVATE -#define PRCF_LAZY 0x10000000 /* on-demand resource allocation */ +#define PRCF_LAZY 0x10000000 /* on-demand resource allocation */ #endif /* BSD_KERNEL_PRIVATE */ -#define PRCF_USERFLAGS \ - (PRCF_RED | PRCF_ECN | PRCF_RIO | PRCF_CLEARDSCP | PRCF_BLUE | \ +#define PRCF_USERFLAGS \ + (PRCF_RED | PRCF_ECN | PRCF_RIO | PRCF_CLEARDSCP | PRCF_BLUE | \ PRCF_SFB | PRCF_FLOWCTL | PRCF_DEFAULTCLASS) #ifdef BSD_KERNEL_PRIVATE -#define PRCF_BITS \ +#define PRCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ "\35LAZY" #else -#define PRCF_BITS \ +#define PRCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" #endif /* !BSD_KERNEL_PRIVATE */ struct priq_classstats { - u_int32_t class_handle; - u_int32_t priority; + u_int32_t class_handle; + u_int32_t priority; - u_int32_t qlength; - u_int32_t qlimit; - u_int32_t period; - struct pktcntr xmitcnt; /* transmitted packet counter */ - struct pktcntr dropcnt; /* dropped packet counter */ + u_int32_t qlength; + u_int32_t qlimit; + u_int32_t period; + struct pktcntr xmitcnt; /* transmitted packet counter */ + struct pktcntr dropcnt; /* dropped packet counter */ /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; + classq_type_t qtype; union { /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; + struct red_stats red[RIO_NDROPPREC]; + struct blue_stats blue; + struct sfb_stats sfb; }; - classq_state_t qstate; + classq_state_t qstate; }; #ifdef __cplusplus diff --git a/bsd/net/pktsched/pktsched_qfq.c b/bsd/net/pktsched/pktsched_qfq.c index 862ee8711..e4c6c2d8c 100644 --- a/bsd/net/pktsched/pktsched_qfq.c +++ b/bsd/net/pktsched/pktsched_qfq.c @@ -135,17 +135,17 @@ static void qfq_dump_groups(struct qfq_if *, u_int32_t); static void qfq_dump_sched(struct qfq_if *, const char *); #endif /* QFQ_DEBUG */ -#define QFQ_ZONE_MAX 32 /* maximum elements in zone */ -#define QFQ_ZONE_NAME "pktsched_qfq" /* zone name */ +#define QFQ_ZONE_MAX 32 /* maximum elements in zone */ +#define QFQ_ZONE_NAME "pktsched_qfq" /* zone name */ -static unsigned int qfq_size; /* size of zone element */ -static struct zone *qfq_zone; /* zone for qfq */ +static unsigned int qfq_size; /* size of zone element */ +static struct zone *qfq_zone; /* zone for qfq */ -#define QFQ_CL_ZONE_MAX 32 /* maximum elements in zone */ -#define QFQ_CL_ZONE_NAME "pktsched_qfq_cl" /* zone name */ +#define QFQ_CL_ZONE_MAX 32 /* maximum elements in zone */ +#define QFQ_CL_ZONE_NAME "pktsched_qfq_cl" /* zone name */ -static unsigned int qfq_cl_size; /* size of zone element */ -static struct zone *qfq_cl_zone; /* zone for qfq_class */ +static unsigned int qfq_cl_size; /* size of zone element */ +static struct zone *qfq_cl_zone; /* zone for qfq_class */ /* * Maximum number of consecutive slots occupied by backlogged classes @@ -154,12 +154,12 @@ static struct zone *qfq_cl_zone; /* zone for qfq_class */ * * XXX check because it poses constraints on MAX_INDEX */ -#define QFQ_MAX_SLOTS 32 /* default when ALTQ is available */ +#define QFQ_MAX_SLOTS 32 /* default when ALTQ is available */ void qfq_init(void) { - qfq_size = sizeof (struct qfq_if); + qfq_size = sizeof(struct qfq_if); qfq_zone = zinit(qfq_size, QFQ_ZONE_MAX * qfq_size, 0, QFQ_ZONE_NAME); if (qfq_zone == NULL) { @@ -169,7 +169,7 @@ qfq_init(void) zone_change(qfq_zone, Z_EXPAND, TRUE); zone_change(qfq_zone, Z_CALLERACCT, TRUE); - qfq_cl_size = sizeof (struct qfq_class); + qfq_cl_size = sizeof(struct qfq_class); qfq_cl_zone = zinit(qfq_cl_size, QFQ_CL_ZONE_MAX * qfq_cl_size, 0, QFQ_CL_ZONE_NAME); if (qfq_cl_zone == NULL) { @@ -183,11 +183,12 @@ qfq_init(void) struct qfq_if * qfq_alloc(struct ifnet *ifp, int how) { - struct qfq_if *qif; + struct qfq_if *qif; qif = (how == M_WAITOK) ? zalloc(qfq_zone) : zalloc_noblock(qfq_zone); - if (qif == NULL) - return (NULL); + if (qif == NULL) { + return NULL; + } bzero(qif, qfq_size); qif->qif_ifq = &ifp->if_snd; @@ -203,17 +204,17 @@ qfq_alloc(struct ifnet *ifp, int how) */ qif->qif_maxslots = QFQ_MAX_SLOTS; - if ((qif->qif_class_tbl = _MALLOC(sizeof (struct qfq_class *) * - qif->qif_maxclasses, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { + if ((qif->qif_class_tbl = _MALLOC(sizeof(struct qfq_class *) * + qif->qif_maxclasses, M_DEVBUF, M_WAITOK | M_ZERO)) == NULL) { log(LOG_ERR, "%s: %s unable to allocate class table array\n", if_name(ifp), qfq_style(qif)); goto error; } - if ((qif->qif_groups = _MALLOC(sizeof (struct qfq_group *) * - (QFQ_MAX_INDEX + 1), M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) { + if ((qif->qif_groups = _MALLOC(sizeof(struct qfq_group *) * + (QFQ_MAX_INDEX + 1), M_DEVBUF, M_WAITOK | M_ZERO)) == NULL) { log(LOG_ERR, "%s: %s unable to allocate group array\n", - if_name(ifp), qfq_style(qif)); + if_name(ifp), qfq_style(qif)); goto error; } @@ -222,7 +223,7 @@ qfq_alloc(struct ifnet *ifp, int how) if_name(ifp), qfq_style(qif)); } - return (qif); + return qif; error: if (qif->qif_class_tbl != NULL) { @@ -235,7 +236,7 @@ error: } zfree(qfq_zone, qif); - return (NULL); + return NULL; } int @@ -248,7 +249,7 @@ qfq_destroy(struct qfq_if *qif) err = qfq_destroy_locked(qif); IFCQ_UNLOCK(ifq); - return (err); + return err; } static int @@ -286,7 +287,7 @@ qfq_destroy_locked(struct qfq_if *qif) zfree(qfq_zone, qif); - return (0); + return 0; } /* @@ -302,11 +303,13 @@ qfq_clear_interface(struct qfq_if *qif) IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); /* clear out the classes */ - for (i = 0; i < qif->qif_maxclasses; i++) - if ((cl = qif->qif_class_tbl[i]) != NULL) + for (i = 0; i < qif->qif_maxclasses; i++) { + if ((cl = qif->qif_class_tbl[i]) != NULL) { qfq_class_destroy(qif, cl); + } + } - return (0); + return 0; } /* discard all the queued packets on the interface */ @@ -319,8 +322,9 @@ qfq_purge(struct qfq_if *qif) IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); for (i = 0; i < qif->qif_maxclasses; i++) { - if ((cl = qif->qif_class_tbl[i]) != NULL) + if ((cl = qif->qif_class_tbl[i]) != NULL) { qfq_purgeq(qif, cl, 0, NULL, NULL); + } } VERIFY(IFCQ_LEN(qif->qif_ifq) == 0); } @@ -365,9 +369,11 @@ qfq_event(struct qfq_if *qif, cqev_t ev) IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - for (i = 0; i < qif->qif_maxclasses; i++) - if ((cl = qif->qif_class_tbl[i]) != NULL) + for (i = 0; i < qif->qif_maxclasses; i++) { + if ((cl = qif->qif_class_tbl[i]) != NULL) { qfq_updateq(qif, cl, ev); + } + } } int @@ -380,28 +386,34 @@ qfq_add_queue(struct qfq_if *qif, u_int32_t qlimit, u_int32_t weight, IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - if (qfq_clh_to_clp(qif, qid) != NULL) - return (EBUSY); + if (qfq_clh_to_clp(qif, qid) != NULL) { + return EBUSY; + } /* check parameters */ - if (weight == 0 || weight > QFQ_MAX_WEIGHT) - return (EINVAL); + if (weight == 0 || weight > QFQ_MAX_WEIGHT) { + return EINVAL; + } w = (QFQ_ONE_FP / (QFQ_ONE_FP / weight)); - if (qif->qif_wsum + w > QFQ_MAX_WSUM) - return (EINVAL); + if (qif->qif_wsum + w > QFQ_MAX_WSUM) { + return EINVAL; + } - if (maxsz == 0 || maxsz > (1 << QFQ_MTU_SHIFT)) - return (EINVAL); + if (maxsz == 0 || maxsz > (1 << QFQ_MTU_SHIFT)) { + return EINVAL; + } cl = qfq_class_create(qif, weight, qlimit, flags, maxsz, qid, ptype); - if (cl == NULL) - return (ENOMEM); + if (cl == NULL) { + return ENOMEM; + } - if (clp != NULL) + if (clp != NULL) { *clp = cl; + } - return (0); + return 0; } static struct qfq_class * @@ -412,7 +424,7 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, struct ifclassq *ifq; struct qfq_group *grp; struct qfq_class *cl; - u_int32_t w; /* approximated weight */ + u_int32_t w; /* approximated weight */ int i; IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); @@ -421,22 +433,24 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, log(LOG_ERR, "%s: %s out of classes! (max %d)\n", if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_maxclasses); - return (NULL); + return NULL; } ifq = qif->qif_ifq; ifp = QFQIF_IFP(qif); cl = zalloc(qfq_cl_zone); - if (cl == NULL) - return (NULL); + if (cl == NULL) { + return NULL; + } bzero(cl, qfq_cl_size); if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) { qlimit = IFCQ_MAXLEN(ifq); - if (qlimit == 0) + if (qlimit == 0) { qlimit = DEFAULT_QLIMIT; /* use default */ + } } _qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype); cl->cl_qif = qif; @@ -460,7 +474,7 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, } if (i == qif->qif_maxclasses) { zfree(qfq_cl_zone, cl); - return (NULL); + return NULL; } } @@ -475,13 +489,13 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, VERIFY(i <= QFQ_MAX_INDEX); grp = qif->qif_groups[i]; if (grp == NULL) { - grp = _MALLOC(sizeof (*grp), M_DEVBUF, M_WAITOK|M_ZERO); + grp = _MALLOC(sizeof(*grp), M_DEVBUF, M_WAITOK | M_ZERO); if (grp != NULL) { grp->qfg_index = i; grp->qfg_slot_shift = QFQ_MTU_SHIFT + QFQ_FRAC_BITS - (QFQ_MAX_INDEX - i); - grp->qfg_slots = _MALLOC(sizeof (struct qfq_class *) * - qif->qif_maxslots, M_DEVBUF, M_WAITOK|M_ZERO); + grp->qfg_slots = _MALLOC(sizeof(struct qfq_class *) * + qif->qif_maxslots, M_DEVBUF, M_WAITOK | M_ZERO); if (grp->qfg_slots == NULL) { log(LOG_ERR, "%s: %s unable to allocate group " "slots for index %d\n", if_name(ifp), @@ -494,10 +508,11 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, } if (grp == NULL || grp->qfg_slots == NULL) { qif->qif_class_tbl[qid % qif->qif_maxclasses] = NULL; - if (grp != NULL) + if (grp != NULL) { _FREE(grp, M_DEVBUF); + } zfree(qfq_cl_zone, cl); - return (NULL); + return NULL; } else { qif->qif_groups[i] = grp; } @@ -509,8 +524,9 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, qif->qif_classes++; - if (flags & QFCF_DEFAULTCLASS) + if (flags & QFCF_DEFAULTCLASS) { qif->qif_default = cl; + } if (flags & QFCF_SFB) { cl->cl_qflags = 0; @@ -523,11 +539,13 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, if (flags & QFCF_DELAYBASED) { cl->cl_qflags |= SFBF_DELAYBASED; } - if (!(cl->cl_flags & QFCF_LAZY)) + if (!(cl->cl_flags & QFCF_LAZY)) { cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, qlimit(&cl->cl_q), cl->cl_qflags); - if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY)) + } + if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY)) { qtype(&cl->cl_q) = Q_SFB; + } } if (pktsched_verbose) { @@ -537,7 +555,7 @@ qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit, flags, QFCF_BITS); } - return (cl); + return cl; } int @@ -547,10 +565,11 @@ qfq_remove_queue(struct qfq_if *qif, u_int32_t qid) IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) - return (EINVAL); + if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) { + return EINVAL; + } - return (qfq_class_destroy(qif, cl)); + return qfq_class_destroy(qif, cl); } static int @@ -568,7 +587,7 @@ qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) if (cl->cl_inv_w != 0) { qif->qif_wsum -= (QFQ_ONE_FP / cl->cl_inv_w); - cl->cl_inv_w = 0; /* reset weight to avoid run twice */ + cl->cl_inv_w = 0; /* reset weight to avoid run twice */ } for (i = 0; i < qif->qif_maxclasses; i++) { @@ -580,15 +599,17 @@ qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) qif->qif_classes--; if (cl->cl_qalg.ptr != NULL) { - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_destroy(cl->cl_sfb); + } cl->cl_qalg.ptr = NULL; qtype(&cl->cl_q) = Q_DROPTAIL; qstate(&cl->cl_q) = QS_RUNNING; } - if (qif->qif_default == cl) + if (qif->qif_default == cl) { qif->qif_default = NULL; + } if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s destroyed qid=%d\n", @@ -597,7 +618,7 @@ qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) zfree(qfq_cl_zone, cl); - return (0); + return 0; } /* @@ -606,7 +627,7 @@ qfq_class_destroy(struct qfq_if *qif, struct qfq_class *cl) static inline pktsched_bitmap_t mask_from(pktsched_bitmap_t bitmap, int from) { - return (bitmap & ~((1UL << from) - 1)); + return bitmap & ~((1UL << from) - 1); } /* @@ -625,11 +646,12 @@ qfq_calc_state(struct qfq_if *qif, struct qfq_group *grp) if (mask) { next = qfq_ffs(qif, mask); - if (qfq_gt(grp->qfg_F, next->qfg_F)) + if (qfq_gt(grp->qfg_F, next->qfg_F)) { state |= EB; + } } - return (state); + return state; } /* @@ -653,8 +675,9 @@ qfq_unblock_groups(struct qfq_if *qif, int index, u_int64_t old_finish) if (mask) { next = qfq_ffs(qif, mask); - if (!qfq_gt(next->qfg_F, old_finish)) + if (!qfq_gt(next->qfg_F, old_finish)) { return; + } } mask = (1UL << index) - 1; @@ -712,8 +735,9 @@ qfq_front_slot_remove(struct qfq_group *grp) struct qfq_class **h = &grp->qfg_slots[grp->qfg_front]; *h = (*h)->cl_next; - if (!*h) + if (!*h) { pktsched_bit_clr(0, &grp->qfg_full_slots); + } } /* @@ -732,8 +756,9 @@ qfq_slot_scan(struct qfq_if *qif, struct qfq_group *grp) grp->qfg_full_slots); } - if (grp->qfg_full_slots == 0) - return (NULL); + if (grp->qfg_full_slots == 0) { + return NULL; + } i = pktsched_ffs(grp->qfg_full_slots) - 1; /* zero-based */ if (i > 0) { @@ -741,7 +766,7 @@ qfq_slot_scan(struct qfq_if *qif, struct qfq_group *grp) grp->qfg_full_slots >>= i; } - return (grp->qfg_slots[grp->qfg_front]); + return grp->qfg_slots[grp->qfg_front]; } /* @@ -773,8 +798,9 @@ qfq_update_eligible(struct qfq_if *qif, u_int64_t old_V) if (!qif->qif_bitmaps[ER]) { struct qfq_group *grp; grp = qfq_ffs(qif, ineligible); - if (qfq_gt(grp->qfg_S, qif->qif_V)) + if (qfq_gt(grp->qfg_S, qif->qif_V)) { qif->qif_V = grp->qfg_S; + } } qfq_make_eligible(qif, old_V); } @@ -789,7 +815,7 @@ qfq_update_class(struct qfq_if *qif, struct qfq_group *grp, { #pragma unused(qif) cl->cl_S = cl->cl_F; - if (qempty(&cl->cl_q)) { + if (qempty(&cl->cl_q)) { qfq_front_slot_remove(grp); } else { u_int32_t len; @@ -798,13 +824,14 @@ qfq_update_class(struct qfq_if *qif, struct qfq_group *grp, len = m_pktlen((struct mbuf *)qhead(&cl->cl_q)); cl->cl_F = cl->cl_S + (u_int64_t)len * cl->cl_inv_w; roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - if (roundedS == grp->qfg_S) - return (0); + if (roundedS == grp->qfg_S) { + return 0; + } qfq_front_slot_remove(grp); qfq_slot_insert(qif, grp, cl, roundedS); } - return (1); + return 1; } /* @@ -830,16 +857,18 @@ qfq_dequeue(struct qfq_if *qif, pktsched_pkt_t *pkt) for (;;) { if (er_bits == 0) { #if QFQ_DEBUG - if (qif->qif_queued && pktsched_verbose > 1) + if (qif->qif_queued && pktsched_verbose > 1) { qfq_dump_sched(qif, "start dequeue"); + } #endif /* QFQ_DEBUG */ /* no eligible and ready packet */ return; } grp = qfq_ffs(qif, er_bits); /* if group is non-empty, use it */ - if (grp->qfg_full_slots != 0) + if (grp->qfg_full_slots != 0) { break; + } pktsched_bit_clr(grp->qfg_index, &er_bits); #if QFQ_DEBUG qif->qif_emptygrp++; @@ -860,8 +889,9 @@ qfq_dequeue(struct qfq_if *qif, pktsched_pkt_t *pkt) IFCQ_DEC_LEN(ifq); IFCQ_DEC_BYTES(ifq, len); - if (qempty(&cl->cl_q)) + if (qempty(&cl->cl_q)) { cl->cl_period++; + } PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len); IFCQ_XMIT_ADD(ifq, 1, len); @@ -887,8 +917,9 @@ qfq_dequeue(struct qfq_if *qif, pktsched_pkt_t *pkt) u_int64_t roundedS = qfq_round_down(cl->cl_S, grp->qfg_slot_shift); - if (grp->qfg_S == roundedS) + if (grp->qfg_S == roundedS) { goto skip_unblock; + } grp->qfg_S = roundedS; grp->qfg_F = roundedS + (2ULL << grp->qfg_slot_shift); @@ -906,8 +937,9 @@ skip_unblock: qfq_update_eligible(qif, old_V); #if QFQ_DEBUG - if (!qif->qif_bitmaps[ER] && qif->qif_queued && pktsched_verbose > 1) + if (!qif->qif_bitmaps[ER] && qif->qif_queued && pktsched_verbose > 1) { qfq_dump_sched(qif, "end dequeue"); + } #endif /* QFQ_DEBUG */ } @@ -968,7 +1000,7 @@ qfq_enqueue(struct qfq_if *qif, struct qfq_class *cl, pktsched_pkt_t *pkt, cl = qif->qif_default; if (cl == NULL) { IFCQ_CONVERT_LOCK(ifq); - return (CLASSQEQ_DROP); + return CLASSQEQ_DROP; } } } @@ -983,7 +1015,7 @@ qfq_enqueue(struct qfq_if *qif, struct qfq_class *cl, pktsched_pkt_t *pkt, ret == CLASSQEQ_DROP_SP); PKTCNTR_ADD(&cl->cl_dropcnt, 1, len); IFCQ_DROP_ADD(ifq, 1, len); - return (ret); + return ret; } IFCQ_INC_LEN(ifq); IFCQ_INC_BYTES(ifq, len); @@ -993,12 +1025,13 @@ qfq_enqueue(struct qfq_if *qif, struct qfq_class *cl, pktsched_pkt_t *pkt, #endif /* QFQ_DEBUG */ /* queue was not idle, we're done */ - if (qlen(&cl->cl_q) > 1) + if (qlen(&cl->cl_q) > 1) { goto done; + } /* queue was idle */ grp = cl->cl_grp; - qfq_update_start(qif, cl); /* adjust start time */ + qfq_update_start(qif, cl); /* adjust start time */ /* compute new finish time and rounded start */ cl->cl_F = cl->cl_S + (u_int64_t)len * cl->cl_inv_w; @@ -1014,8 +1047,9 @@ qfq_enqueue(struct qfq_if *qif, struct qfq_class *cl, pktsched_pkt_t *pkt, * in this group and nobody was in ER make sure to adjust V. */ if (grp->qfg_full_slots != 0) { - if (!qfq_gt(grp->qfg_S, cl->cl_S)) + if (!qfq_gt(grp->qfg_S, cl->cl_S)) { goto skip_update; + } /* create a slot for this cl->cl_S */ qfq_slot_rotate(qif, grp, roundedS); @@ -1047,7 +1081,7 @@ skip_update: done: /* successfully queued. */ - return (ret); + return ret; } static inline void @@ -1064,12 +1098,14 @@ qfq_slot_remove(struct qfq_if *qif, struct qfq_group *grp, i = (grp->qfg_front + offset) % qif->qif_maxslots; pprev = &grp->qfg_slots[i]; - while (*pprev && *pprev != cl) + while (*pprev && *pprev != cl) { pprev = &(*pprev)->cl_next; + } *pprev = cl->cl_next; - if (!grp->qfg_slots[i]) + if (!grp->qfg_slots[i]) { pktsched_bit_clr(offset, &grp->qfg_full_slots); + } } /* @@ -1097,12 +1133,13 @@ qfq_deactivate_class(struct qfq_if *qif, struct qfq_class *cl) grp->qfg_front, qif->qif_bitmaps[ER], qif->qif_bitmaps[EB], qif->qif_bitmaps[IR], qif->qif_bitmaps[IB]); #if QFQ_DEBUG - if (pktsched_verbose > 1) + if (pktsched_verbose > 1) { qfq_dump_sched(qif, "start deactivate"); + } #endif /* QFQ_DEBUG */ } - cl->cl_F = cl->cl_S; /* not needed if the class goes away */ + cl->cl_F = cl->cl_S; /* not needed if the class goes away */ qfq_slot_remove(qif, grp, cl); if (grp->qfg_full_slots == 0) { @@ -1119,10 +1156,11 @@ qfq_deactivate_class(struct qfq_if *qif, struct qfq_class *cl) !(qif->qif_bitmaps[ER] & ~((1UL << grp->qfg_index) - 1))) { mask = qif->qif_bitmaps[ER] & ((1UL << grp->qfg_index) - 1); - if (mask) + if (mask) { mask = ~((1UL << __fls(mask)) - 1); - else + } else { mask = (pktsched_bitmap_t)~0UL; + } qfq_move_groups(qif, mask, EB, ER); qfq_move_groups(qif, mask, IB, IR); } @@ -1144,8 +1182,9 @@ qfq_deactivate_class(struct qfq_if *qif, struct qfq_class *cl) qfq_update_eligible(qif, qif->qif_V); #if QFQ_DEBUG - if (pktsched_verbose > 1) + if (pktsched_verbose > 1) { qfq_dump_sched(qif, "end deactivate"); + } #endif /* QFQ_DEBUG */ } @@ -1171,13 +1210,13 @@ qfq_state2str(int s) c = "?"; break; } - return (c); + return c; } static inline int qfq_addq(struct qfq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) { - struct qfq_if *qif = cl->cl_qif; + struct qfq_if *qif = cl->cl_qif; struct ifclassq *ifq = qif->qif_ifq; IFCQ_LOCK_ASSERT_HELD(ifq); @@ -1208,19 +1247,21 @@ qfq_addq(struct qfq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) cqrq_throttle_t tr = { 1, qif->qif_throttle }; int err = qfq_throttle(qif, &tr); - if (err == EALREADY) + if (err == EALREADY) { err = 0; + } if (err != 0) { tr.level = IFNET_THROTTLE_OFF; (void) qfq_throttle(qif, &tr); } } } - if (cl->cl_sfb != NULL) - return (sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t)); + if (cl->cl_sfb != NULL) { + return sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t); + } } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) { IFCQ_CONVERT_LOCK(ifq); - return (CLASSQEQ_DROP); + return CLASSQEQ_DROP; } #if PF_ECN @@ -1233,7 +1274,7 @@ qfq_addq(struct qfq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); _addq(&cl->cl_q, pkt->pktsched_pkt); - return (0); + return 0; } static inline void @@ -1241,10 +1282,11 @@ qfq_getq(struct qfq_class *cl, pktsched_pkt_t *pkt) { IFCQ_LOCK_ASSERT_HELD(cl->cl_qif->qif_ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) - return (sfb_getq(cl->cl_sfb, &cl->cl_q, pkt)); + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { + return sfb_getq(cl->cl_sfb, &cl->cl_q, pkt); + } - return (pktsched_pkt_encap(pkt, qptype(&cl->cl_q), _getq(&cl->cl_q))); + return pktsched_pkt_encap(pkt, qptype(&cl->cl_q), _getq(&cl->cl_q)); } static void @@ -1256,14 +1298,16 @@ qfq_purgeq(struct qfq_if *qif, struct qfq_class *cl, u_int32_t flow, IFCQ_LOCK_ASSERT_HELD(ifq); - if ((qlen = qlen(&cl->cl_q)) == 0) + if ((qlen = qlen(&cl->cl_q)) == 0) { goto done; + } IFCQ_CONVERT_LOCK(ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); - else + } else { _flushq_flow(&cl->cl_q, flow, &cnt, &len); + } if (cnt > 0) { VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); @@ -1278,8 +1322,9 @@ qfq_purgeq(struct qfq_if *qif, struct qfq_class *cl, u_int32_t flow, VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0); IFCQ_LEN(ifq) -= cnt; - if (qempty(&cl->cl_q)) + if (qempty(&cl->cl_q)) { qfq_deactivate_class(qif, cl); + } if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s purge qid=%d weight=%d " @@ -1291,10 +1336,12 @@ qfq_purgeq(struct qfq_if *qif, struct qfq_class *cl, u_int32_t flow, } } done: - if (packets != NULL) + if (packets != NULL) { *packets = cnt; - if (bytes != NULL) + } + if (bytes != NULL) { *bytes = len; + } } static void @@ -1309,8 +1356,9 @@ qfq_updateq(struct qfq_if *qif, struct qfq_class *cl, cqev_t ev) ifclassq_ev2str(ev)); } - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) - return (sfb_updateq(cl->cl_sfb, ev)); + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { + return sfb_updateq(cl->cl_sfb, ev); + } } int @@ -1321,8 +1369,9 @@ qfq_get_class_stats(struct qfq_if *qif, u_int32_t qid, IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq); - if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) - return (EINVAL); + if ((cl = qfq_clh_to_clp(qif, qid)) == NULL) { + return EINVAL; + } sp->class_handle = cl->cl_handle; sp->index = cl->cl_grp->qfg_index; @@ -1337,10 +1386,11 @@ qfq_get_class_stats(struct qfq_if *qif, u_int32_t qid, sp->qtype = qtype(&cl->cl_q); sp->qstate = qstate(&cl->cl_q); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_getstats(cl->cl_sfb, &sp->sfb); + } - return (0); + return 0; } static int @@ -1361,7 +1411,7 @@ qfq_stat_sc(struct qfq_if *qif, cqrq_stat_sc_t *sr) sr->packets = qlen(&cl->cl_q); sr->bytes = qsize(&cl->cl_q); - return (0); + return 0; } /* convert a class handle to the corresponding class pointer */ @@ -1378,21 +1428,24 @@ qfq_clh_to_clp(struct qfq_if *qif, u_int32_t chandle) * the handle. If it fails, do the linear table search. */ i = chandle % qif->qif_maxclasses; - if ((cl = qif->qif_class_tbl[i]) != NULL && cl->cl_handle == chandle) - return (cl); - for (i = 0; i < qif->qif_maxclasses; i++) + if ((cl = qif->qif_class_tbl[i]) != NULL && cl->cl_handle == chandle) { + return cl; + } + for (i = 0; i < qif->qif_maxclasses; i++) { if ((cl = qif->qif_class_tbl[i]) != NULL && - cl->cl_handle == chandle) - return (cl); + cl->cl_handle == chandle) { + return cl; + } + } - return (NULL); + return NULL; } static const char * qfq_style(struct qfq_if *qif) { #pragma unused(qif) - return ("QFQ"); + return "QFQ"; } /* @@ -1401,7 +1454,7 @@ qfq_style(struct qfq_if *qif) static inline int qfq_gt(u_int64_t a, u_int64_t b) { - return ((int64_t)(a - b) > 0); + return (int64_t)(a - b) > 0; } /* @@ -1410,7 +1463,7 @@ qfq_gt(u_int64_t a, u_int64_t b) static inline u_int64_t qfq_round_down(u_int64_t ts, u_int32_t shift) { - return (ts & ~((1ULL << shift) - 1)); + return ts & ~((1ULL << shift) - 1); } /* @@ -1419,10 +1472,10 @@ qfq_round_down(u_int64_t ts, u_int32_t shift) static inline struct qfq_group * qfq_ffs(struct qfq_if *qif, pktsched_bitmap_t bitmap) { - int index = pktsched_ffs(bitmap) - 1; /* zero-based */ + int index = pktsched_ffs(bitmap) - 1; /* zero-based */ VERIFY(index >= 0 && index <= QFQ_MAX_INDEX && qif->qif_groups[index] != NULL); - return (qif->qif_groups[index]); + return qif->qif_groups[index]; } /* @@ -1433,27 +1486,29 @@ qfq_ffs(struct qfq_if *qif, pktsched_bitmap_t bitmap) static int qfq_calc_index(struct qfq_class *cl, u_int32_t inv_w, u_int32_t maxlen) { - u_int64_t slot_size = (u_int64_t)maxlen *inv_w; + u_int64_t slot_size = (u_int64_t)maxlen * inv_w; pktsched_bitmap_t size_map; int index = 0; size_map = (pktsched_bitmap_t)(slot_size >> QFQ_MIN_SLOT_SHIFT); - if (!size_map) + if (!size_map) { goto out; + } - index = __fls(size_map) + 1; /* basically a log_2() */ + index = __fls(size_map) + 1; /* basically a log_2() */ index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1))); - if (index < 0) + if (index < 0) { index = 0; + } out: if (pktsched_verbose) { log(LOG_DEBUG, "%s: %s qid=%d grp=%d W=%u, L=%u, I=%d\n", if_name(QFQIF_IFP(cl->cl_qif)), qfq_style(cl->cl_qif), - cl->cl_handle, index, (u_int32_t)(QFQ_ONE_FP/inv_w), + cl->cl_handle, index, (u_int32_t)(QFQ_ONE_FP / inv_w), maxlen, index); } - return (index); + return index; } #if QFQ_DEBUG @@ -1465,10 +1520,12 @@ qfq_dump_groups(struct qfq_if *qif, u_int32_t mask) for (i = 0; i < QFQ_MAX_INDEX + 1; i++) { struct qfq_group *g = qif->qif_groups[i]; - if (0 == (mask & (1 << i))) + if (0 == (mask & (1 << i))) { continue; - if (g == NULL) + } + if (g == NULL) { continue; + } log(LOG_DEBUG, "%s: %s [%2d] full_slots 0x%x\n", if_name(QFQIF_IFP(qif)), qfq_style(qif), i, @@ -1483,7 +1540,7 @@ qfq_dump_groups(struct qfq_if *qif, u_int32_t mask) "qid %d\n", if_name(QFQIF_IFP(qif)), qfq_style(qif), j, (uint64_t)VM_KERNEL_ADDRPERM( - g->qfg_slots[j]), + g->qfg_slots[j]), g->qfg_slots[j]->cl_handle); } } @@ -1535,7 +1592,7 @@ qfq_enqueue_ifclassq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, IFCQ_CONVERT_LOCK(ifq); m_freem(m); *pdrop = TRUE; - return (ENOBUFS); + return ENOBUFS; } i = MBUF_SCIDX(mbuf_get_service_class(m)); t = m_pftag(m); @@ -1581,7 +1638,7 @@ qfq_enqueue_ifclassq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, default: VERIFY(0); } - return (ret); + return ret; } /* @@ -1597,10 +1654,10 @@ static void * qfq_dequeue_ifclassq(struct ifclassq *ifq, classq_pkt_type_t *ptype) { pktsched_pkt_t pkt; - bzero(&pkt, sizeof (pkt)); + bzero(&pkt, sizeof(pkt)); qfq_dequeue(ifq->ifcq_disc, &pkt); *ptype = pkt.pktsched_ptype; - return (pkt.pktsched_pkt); + return pkt.pktsched_pkt; } static int @@ -1631,7 +1688,7 @@ qfq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg) err = qfq_stat_sc(qif, (cqrq_stat_sc_t *)arg); break; } - return (err); + return err; } int @@ -1649,61 +1706,77 @@ qfq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, VERIFY(ifq->ifcq_disc == NULL); VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); - if (flags & PKTSCHEDF_QALG_SFB) + if (flags & PKTSCHEDF_QALG_SFB) { qflags |= QFCF_SFB; - if (flags & PKTSCHEDF_QALG_ECN) + } + if (flags & PKTSCHEDF_QALG_ECN) { qflags |= QFCF_ECN; - if (flags & PKTSCHEDF_QALG_FLOWCTL) + } + if (flags & PKTSCHEDF_QALG_FLOWCTL) { qflags |= QFCF_FLOWCTL; - if (flags & PKTSCHEDF_QALG_DELAYBASED) + } + if (flags & PKTSCHEDF_QALG_DELAYBASED) { qflags |= QFCF_DELAYBASED; + } qif = qfq_alloc(ifp, M_WAITOK); - if (qif == NULL) - return (ENOMEM); + if (qif == NULL) { + return ENOMEM; + } - if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) + if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) { maxlen = if_sndq_maxlen; + } if ((err = qfq_add_queue(qif, maxlen, 300, 1200, - qflags | QFCF_LAZY, SCIDX_BK_SYS, &cl0, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_BK_SYS, &cl0, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 600, 1400, - qflags | QFCF_LAZY, SCIDX_BK, &cl1, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_BK, &cl1, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 2400, 600, - qflags | QFCF_DEFAULTCLASS, SCIDX_BE, &cl2, ptype)) != 0) + qflags | QFCF_DEFAULTCLASS, SCIDX_BE, &cl2, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 2700, 600, - qflags | QFCF_LAZY, SCIDX_RD, &cl3, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_RD, &cl3, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 3000, 400, - qflags | QFCF_LAZY, SCIDX_OAM, &cl4, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_OAM, &cl4, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 8000, 1000, - qflags | QFCF_LAZY, SCIDX_AV, &cl5, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_AV, &cl5, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 15000, 1200, - qflags | QFCF_LAZY, SCIDX_RV, &cl6, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_RV, &cl6, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 20000, 1400, - qflags | QFCF_LAZY, SCIDX_VI, &cl7, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_VI, &cl7, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 23000, 200, - qflags | QFCF_LAZY, SCIDX_VO, &cl8, ptype)) != 0) + qflags | QFCF_LAZY, SCIDX_VO, &cl8, ptype)) != 0) { goto cleanup; + } if ((err = qfq_add_queue(qif, maxlen, 25000, 200, - qflags, SCIDX_CTL, &cl9, ptype)) != 0) + qflags, SCIDX_CTL, &cl9, ptype)) != 0) { goto cleanup; + } err = ifclassq_attach(ifq, PKTSCHEDT_QFQ, qif, qfq_enqueue_ifclassq, qfq_dequeue_ifclassq, NULL, @@ -1743,10 +1816,11 @@ qfq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, } cleanup: - if (err != 0) + if (err != 0) { (void) qfq_destroy_locked(qif); + } - return (err); + return err; } int @@ -1766,7 +1840,7 @@ qfq_teardown_ifclassq(struct ifclassq *ifq) ifq->ifcq_disc_slots[i].cl = NULL; } - return (ifclassq_detach(ifq)); + return ifclassq_detach(ifq); } int @@ -1778,11 +1852,12 @@ qfq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot, IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(ifq->ifcq_type == PKTSCHEDT_QFQ); - if (slot >= IFCQ_SC_MAX) - return (EINVAL); + if (slot >= IFCQ_SC_MAX) { + return EINVAL; + } - return (qfq_get_class_stats(qif, ifq->ifcq_disc_slots[slot].qid, - &ifqs->ifqs_qfq_stats)); + return qfq_get_class_stats(qif, ifq->ifcq_disc_slots[slot].qid, + &ifqs->ifqs_qfq_stats); } static int @@ -1796,11 +1871,12 @@ qfq_throttle(struct qfq_if *qif, cqrq_throttle_t *tr) if (!tr->set) { tr->level = qif->qif_throttle; - return (0); + return 0; } - if (tr->level == qif->qif_throttle) - return (EALREADY); + if (tr->level == qif->qif_throttle) { + return EALREADY; + } /* Current throttling levels only involve BK_SYS class */ cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl; @@ -1827,17 +1903,18 @@ qfq_throttle(struct qfq_if *qif, cqrq_throttle_t *tr) tr->level); } qif->qif_throttle = tr->level; - if (err != 0) + if (err != 0) { err = 0; - else + } else { qfq_purgeq(qif, cl, 0, NULL, NULL); + } } else { log(LOG_ERR, "%s: %s unable to set throttling level " "%d->%d [error=%d]\n", if_name(QFQIF_IFP(qif)), qfq_style(qif), qif->qif_throttle, tr->level, err); } - return (err); + return err; } static int @@ -1850,13 +1927,15 @@ qfq_resumeq(struct qfq_if *qif, struct qfq_class *cl) #endif IFCQ_LOCK_ASSERT_HELD(ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE); + } - if (err == 0) + if (err == 0) { qstate(&cl->cl_q) = QS_RUNNING; + } - return (err); + return err; } static int @@ -1874,12 +1953,13 @@ qfq_suspendq(struct qfq_if *qif, struct qfq_class *cl) err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE); } else { VERIFY(cl->cl_flags & QFCF_LAZY); - err = ENXIO; /* delayed throttling */ + err = ENXIO; /* delayed throttling */ } } - if (err == 0 || err == ENXIO) + if (err == 0 || err == ENXIO) { qstate(&cl->cl_q) = QS_SUSPENDED; + } - return (err); + return err; } diff --git a/bsd/net/pktsched/pktsched_qfq.h b/bsd/net/pktsched/pktsched_qfq.h index 15ce5a323..475e16fc1 100644 --- a/bsd/net/pktsched/pktsched_qfq.h +++ b/bsd/net/pktsched/pktsched_qfq.h @@ -53,7 +53,7 @@ */ #ifndef _NET_PKTSCHED_PKTSCHED_QFQ_H_ -#define _NET_PKTSCHED_PKTSCHED_QFQ_H_ +#define _NET_PKTSCHED_PKTSCHED_QFQ_H_ #ifdef PRIVATE #include @@ -68,61 +68,61 @@ extern "C" { #endif /* qfq class flags */ -#define QFCF_RED 0x0001 /* use RED */ -#define QFCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define QFCF_RIO 0x0004 /* use RIO */ -#define QFCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define QFCF_BLUE 0x0100 /* use BLUE */ -#define QFCF_SFB 0x0200 /* use SFB */ -#define QFCF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define QFCF_DEFAULTCLASS 0x1000 /* default class */ -#define QFCF_DELAYBASED 0x2000 /* queue sizing is delay based */ +#define QFCF_RED 0x0001 /* use RED */ +#define QFCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ +#define QFCF_RIO 0x0004 /* use RIO */ +#define QFCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ +#define QFCF_BLUE 0x0100 /* use BLUE */ +#define QFCF_SFB 0x0200 /* use SFB */ +#define QFCF_FLOWCTL 0x0400 /* enable flow control advisories */ +#define QFCF_DEFAULTCLASS 0x1000 /* default class */ +#define QFCF_DELAYBASED 0x2000 /* queue sizing is delay based */ #ifdef BSD_KERNEL_PRIVATE -#define QFCF_LAZY 0x10000000 /* on-demand resource allocation */ +#define QFCF_LAZY 0x10000000 /* on-demand resource allocation */ #endif /* BSD_KERNEL_PRIVATE */ -#define QFCF_USERFLAGS \ - (QFCF_RED | QFCF_ECN | QFCF_RIO | QFCF_CLEARDSCP | QFCF_BLUE | \ +#define QFCF_USERFLAGS \ + (QFCF_RED | QFCF_ECN | QFCF_RIO | QFCF_CLEARDSCP | QFCF_BLUE | \ QFCF_SFB | QFCF_FLOWCTL | QFCF_DEFAULTCLASS) #ifdef BSD_KERNEL_PRIVATE -#define QFCF_BITS \ +#define QFCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ "\35LAZY" #else -#define QFCF_BITS \ +#define QFCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" #endif /* !BSD_KERNEL_PRIVATE */ -#define QFQ_MAX_CLASSES 32 -#define QFQ_MAX_WSHIFT 16 /* log2(max_weight) */ -#define QFQ_MAX_WEIGHT (1 << QFQ_MAX_WSHIFT) +#define QFQ_MAX_CLASSES 32 +#define QFQ_MAX_WSHIFT 16 /* log2(max_weight) */ +#define QFQ_MAX_WEIGHT (1 << QFQ_MAX_WSHIFT) struct qfq_classstats { - u_int32_t class_handle; - u_int32_t index; - u_int32_t weight; - u_int32_t lmax; + u_int32_t class_handle; + u_int32_t index; + u_int32_t weight; + u_int32_t lmax; - u_int32_t qlength; - u_int32_t qlimit; - u_int32_t period; - struct pktcntr xmitcnt; /* transmitted packet counter */ - struct pktcntr dropcnt; /* dropped packet counter */ + u_int32_t qlength; + u_int32_t qlimit; + u_int32_t period; + struct pktcntr xmitcnt; /* transmitted packet counter */ + struct pktcntr dropcnt; /* dropped packet counter */ /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; + classq_type_t qtype; union { /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; + struct red_stats red[RIO_NDROPPREC]; + struct blue_stats blue; + struct sfb_stats sfb; }; - classq_state_t qstate; + classq_state_t qstate; }; #ifdef BSD_KERNEL_PRIVATE -#define QFQ_DEBUG 1 /* enable extra debugging */ +#define QFQ_DEBUG 1 /* enable extra debugging */ /* * Virtual time computations. @@ -174,69 +174,69 @@ struct qfq_classstats { * is below the MAX_INDEX region we use 0 (which is the same as * using a larger len). */ -#define QFQ_MAX_INDEX 19 -#define QFQ_MAX_WSUM (2 * QFQ_MAX_WEIGHT) +#define QFQ_MAX_INDEX 19 +#define QFQ_MAX_WSUM (2 * QFQ_MAX_WEIGHT) -#define QFQ_FRAC_BITS 30 /* fixed point arithmetic */ -#define QFQ_ONE_FP (1UL << QFQ_FRAC_BITS) -#define QFQ_IWSUM (QFQ_ONE_FP / QFQ_MAX_WSUM) +#define QFQ_FRAC_BITS 30 /* fixed point arithmetic */ +#define QFQ_ONE_FP (1UL << QFQ_FRAC_BITS) +#define QFQ_IWSUM (QFQ_ONE_FP / QFQ_MAX_WSUM) -#define QFQ_MTU_SHIFT 11 /* log2(max_len) */ -#define QFQ_MIN_SLOT_SHIFT (QFQ_FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) +#define QFQ_MTU_SHIFT 11 /* log2(max_len) */ +#define QFQ_MIN_SLOT_SHIFT (QFQ_FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX) /* * Possible group states, also indexes for the bitmaps array in * struct qfq_if. We rely on ER, IR, EB, IB being numbered 0..3 */ enum qfq_state { - ER = 0, /* eligible, ready */ - IR = 1, /* ineligible, ready */ - EB = 2, /* eligible, backlogged */ - IB = 3, /* ineligible, backlogged */ + ER = 0, /* eligible, ready */ + IR = 1, /* ineligible, ready */ + EB = 2, /* eligible, backlogged */ + IB = 3, /* ineligible, backlogged */ QFQ_MAX_STATE }; struct qfq_group; struct qfq_class { - u_int32_t cl_handle; /* class handle */ - class_queue_t cl_q; /* class queue structure */ - u_int32_t cl_qflags; /* class queue flags */ + u_int32_t cl_handle; /* class handle */ + class_queue_t cl_q; /* class queue structure */ + u_int32_t cl_qflags; /* class queue flags */ union { - void *ptr; - struct sfb *sfb; /* SFB state */ + void *ptr; + struct sfb *sfb; /* SFB state */ } cl_qalg; - struct qfq_if *cl_qif; /* back pointer to qif */ - u_int32_t cl_flags; /* class flags */ + struct qfq_if *cl_qif; /* back pointer to qif */ + u_int32_t cl_flags; /* class flags */ - u_int64_t cl_S, cl_F; /* flow timestamps (exact) */ - struct qfq_class *cl_next; /* link for the slot list */ + u_int64_t cl_S, cl_F; /* flow timestamps (exact) */ + struct qfq_class *cl_next; /* link for the slot list */ /* * Group we belong to. In principle we would need the index, * which is log_2(lmax/weight), but we never reference it * directly, only the group. */ struct qfq_group *cl_grp; - u_int32_t cl_inv_w; /* QFQ_ONE_FP/weight */ - u_int32_t cl_lmax; /* max packet size for this flow */ + u_int32_t cl_inv_w; /* QFQ_ONE_FP/weight */ + u_int32_t cl_lmax; /* max packet size for this flow */ /* statistics */ - u_int32_t cl_period; /* backlog period */ - struct pktcntr cl_xmitcnt; /* transmitted packet counter */ - struct pktcntr cl_dropcnt; /* dropped packet counter */ + u_int32_t cl_period; /* backlog period */ + struct pktcntr cl_xmitcnt; /* transmitted packet counter */ + struct pktcntr cl_dropcnt; /* dropped packet counter */ }; -#define cl_sfb cl_qalg.sfb +#define cl_sfb cl_qalg.sfb /* * Group descriptor, see the paper for details. * Basically this contains the bucket lists. */ struct qfq_group { - u_int64_t qfg_S, qfg_F; /* group timestamps (approx) */ - u_int8_t qfg_slot_shift; /* slot shift */ - u_int8_t qfg_index; /* group index */ - u_int8_t qfg_front; /* index of the front slot */ + u_int64_t qfg_S, qfg_F; /* group timestamps (approx) */ + u_int8_t qfg_slot_shift; /* slot shift */ + u_int8_t qfg_index; /* group index */ + u_int8_t qfg_front; /* index of the front slot */ pktsched_bitmap_t qfg_full_slots; /* non-empty slots */ /* array of lists of active classes */ @@ -247,26 +247,26 @@ struct qfq_group { * qfq interface state */ struct qfq_if { - struct ifclassq *qif_ifq; /* backpointer to ifclassq */ - u_int32_t qif_throttle; /* throttling level */ - u_int8_t qif_classes; /* # of classes in table */ - u_int8_t qif_maxclasses; /* max # of classes in table */ - u_int8_t qif_maxslots; /* max # of slots */ - struct qfq_class *qif_default; /* default class */ - struct qfq_class **qif_class_tbl; + struct ifclassq *qif_ifq; /* backpointer to ifclassq */ + u_int32_t qif_throttle; /* throttling level */ + u_int8_t qif_classes; /* # of classes in table */ + u_int8_t qif_maxclasses; /* max # of classes in table */ + u_int8_t qif_maxslots; /* max # of slots */ + struct qfq_class *qif_default; /* default class */ + struct qfq_class **qif_class_tbl; - u_int64_t qif_V; /* precise virtual time */ - u_int32_t qif_wsum; /* weight sum */ + u_int64_t qif_V; /* precise virtual time */ + u_int32_t qif_wsum; /* weight sum */ #if QFQ_DEBUG - u_int32_t qif_i_wsum; /* QFQ_ONE_FP/w_sum */ - u_int32_t qif_queued; /* debugging */ - u_int32_t qif_emptygrp; /* debugging */ + u_int32_t qif_i_wsum; /* QFQ_ONE_FP/w_sum */ + u_int32_t qif_queued; /* debugging */ + u_int32_t qif_emptygrp; /* debugging */ #endif /* QFQ_DEBUG */ - pktsched_bitmap_t qif_bitmaps[QFQ_MAX_STATE]; /* group bitmaps */ - struct qfq_group **qif_groups; /* the groups */ + pktsched_bitmap_t qif_bitmaps[QFQ_MAX_STATE]; /* group bitmaps */ + struct qfq_group **qif_groups; /* the groups */ }; -#define QFQIF_IFP(_qif) ((_qif)->qif_ifq->ifcq_ifp) +#define QFQIF_IFP(_qif) ((_qif)->qif_ifq->ifcq_ifp) struct if_ifclassq_stats; diff --git a/bsd/net/pktsched/pktsched_rmclass.h b/bsd/net/pktsched/pktsched_rmclass.h index b467fa835..cb1c30ce4 100644 --- a/bsd/net/pktsched/pktsched_rmclass.h +++ b/bsd/net/pktsched/pktsched_rmclass.h @@ -63,7 +63,7 @@ */ #ifndef _NET_PKTSCHED_PKTSCHED_RMCLASS_H_ -#define _NET_PKTSCHED_PKTSCHED_RMCLASS_H_ +#define _NET_PKTSCHED_PKTSCHED_RMCLASS_H_ #ifdef PRIVATE #include @@ -73,23 +73,23 @@ extern "C" { #endif -#define RM_MAXPRIO 8 /* Max priority */ +#define RM_MAXPRIO 8 /* Max priority */ /* flags for rmc_init and rmc_newclass */ /* class flags */ -#define RMCF_RED 0x0001 /* use RED */ -#define RMCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define RMCF_RIO 0x0004 /* use RIO */ -#define RMCF_FLOWVALVE 0x0008 /* use flowvalve (aka penalty-box) */ -#define RMCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ +#define RMCF_RED 0x0001 /* use RED */ +#define RMCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ +#define RMCF_RIO 0x0004 /* use RIO */ +#define RMCF_FLOWVALVE 0x0008 /* use flowvalve (aka penalty-box) */ +#define RMCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ /* flags for rmc_init */ -#define RMCF_WRR 0x0100 -#define RMCF_EFFICIENT 0x0200 +#define RMCF_WRR 0x0100 +#define RMCF_EFFICIENT 0x0200 -#define RMCF_BLUE 0x10000 /* use BLUE */ -#define RMCF_SFB 0x20000 /* use SFB */ -#define RMCF_FLOWCTL 0x40000 /* enable flow control advisories */ +#define RMCF_BLUE 0x10000 /* use BLUE */ +#define RMCF_SFB 0x20000 /* use SFB */ +#define RMCF_FLOWCTL 0x40000 /* enable flow control advisories */ #ifdef __cplusplus } diff --git a/bsd/net/pktsched/pktsched_tcq.c b/bsd/net/pktsched/pktsched_tcq.c index 01a6c7cc9..d59bf0d5e 100644 --- a/bsd/net/pktsched/pktsched_tcq.c +++ b/bsd/net/pktsched/pktsched_tcq.c @@ -77,22 +77,22 @@ static void tcq_dequeue_cl(struct tcq_if *, struct tcq_class *, static inline struct tcq_class *tcq_clh_to_clp(struct tcq_if *, u_int32_t); static const char *tcq_style(struct tcq_if *); -#define TCQ_ZONE_MAX 32 /* maximum elements in zone */ -#define TCQ_ZONE_NAME "pktsched_tcq" /* zone name */ +#define TCQ_ZONE_MAX 32 /* maximum elements in zone */ +#define TCQ_ZONE_NAME "pktsched_tcq" /* zone name */ -static unsigned int tcq_size; /* size of zone element */ -static struct zone *tcq_zone; /* zone for tcq */ +static unsigned int tcq_size; /* size of zone element */ +static struct zone *tcq_zone; /* zone for tcq */ -#define TCQ_CL_ZONE_MAX 32 /* maximum elements in zone */ -#define TCQ_CL_ZONE_NAME "pktsched_tcq_cl" /* zone name */ +#define TCQ_CL_ZONE_MAX 32 /* maximum elements in zone */ +#define TCQ_CL_ZONE_NAME "pktsched_tcq_cl" /* zone name */ -static unsigned int tcq_cl_size; /* size of zone element */ -static struct zone *tcq_cl_zone; /* zone for tcq_class */ +static unsigned int tcq_cl_size; /* size of zone element */ +static struct zone *tcq_cl_zone; /* zone for tcq_class */ void tcq_init(void) { - tcq_size = sizeof (struct tcq_if); + tcq_size = sizeof(struct tcq_if); tcq_zone = zinit(tcq_size, TCQ_ZONE_MAX * tcq_size, 0, TCQ_ZONE_NAME); if (tcq_zone == NULL) { @@ -102,7 +102,7 @@ tcq_init(void) zone_change(tcq_zone, Z_EXPAND, TRUE); zone_change(tcq_zone, Z_CALLERACCT, TRUE); - tcq_cl_size = sizeof (struct tcq_class); + tcq_cl_size = sizeof(struct tcq_class); tcq_cl_zone = zinit(tcq_cl_size, TCQ_CL_ZONE_MAX * tcq_cl_size, 0, TCQ_CL_ZONE_NAME); if (tcq_cl_zone == NULL) { @@ -116,11 +116,12 @@ tcq_init(void) struct tcq_if * tcq_alloc(struct ifnet *ifp, int how) { - struct tcq_if *tif; + struct tcq_if *tif; tif = (how == M_WAITOK) ? zalloc(tcq_zone) : zalloc_noblock(tcq_zone); - if (tif == NULL) - return (NULL); + if (tif == NULL) { + return NULL; + } bzero(tif, tcq_size); tif->tif_maxpri = -1; @@ -131,7 +132,7 @@ tcq_alloc(struct ifnet *ifp, int how) if_name(ifp), tcq_style(tif)); } - return (tif); + return tif; } int @@ -144,7 +145,7 @@ tcq_destroy(struct tcq_if *tif) err = tcq_destroy_locked(tif); IFCQ_UNLOCK(ifq); - return (err); + return err; } static int @@ -161,7 +162,7 @@ tcq_destroy_locked(struct tcq_if *tif) zfree(tcq_zone, tif); - return (0); + return 0; } /* @@ -171,17 +172,19 @@ tcq_destroy_locked(struct tcq_if *tif) static int tcq_clear_interface(struct tcq_if *tif) { - struct tcq_class *cl; + struct tcq_class *cl; int pri; IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); /* clear out the classes */ - for (pri = 0; pri <= tif->tif_maxpri; pri++) - if ((cl = tif->tif_classes[pri]) != NULL) + for (pri = 0; pri <= tif->tif_maxpri; pri++) { + if ((cl = tif->tif_classes[pri]) != NULL) { tcq_class_destroy(tif, cl); + } + } - return (0); + return 0; } /* discard all the queued packets on the interface */ @@ -194,8 +197,9 @@ tcq_purge(struct tcq_if *tif) IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); for (pri = 0; pri <= tif->tif_maxpri; pri++) { - if ((cl = tif->tif_classes[pri]) != NULL && !qempty(&cl->cl_q)) + if ((cl = tif->tif_classes[pri]) != NULL && !qempty(&cl->cl_q)) { tcq_purgeq(tif, cl, 0, NULL, NULL); + } } VERIFY(IFCQ_LEN(tif->tif_ifq) == 0); } @@ -240,9 +244,11 @@ tcq_event(struct tcq_if *tif, cqev_t ev) IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - for (pri = 0; pri <= tif->tif_maxpri; pri++) - if ((cl = tif->tif_classes[pri]) != NULL) + for (pri = 0; pri <= tif->tif_maxpri; pri++) { + if ((cl = tif->tif_classes[pri]) != NULL) { tcq_updateq(tif, cl, ev); + } + } } int @@ -254,21 +260,26 @@ tcq_add_queue(struct tcq_if *tif, int priority, u_int32_t qlimit, IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); /* check parameters */ - if (priority >= TCQ_MAXPRI) - return (EINVAL); - if (tif->tif_classes[priority] != NULL) - return (EBUSY); - if (tcq_clh_to_clp(tif, qid) != NULL) - return (EBUSY); + if (priority >= TCQ_MAXPRI) { + return EINVAL; + } + if (tif->tif_classes[priority] != NULL) { + return EBUSY; + } + if (tcq_clh_to_clp(tif, qid) != NULL) { + return EBUSY; + } cl = tcq_class_create(tif, priority, qlimit, flags, qid, ptype); - if (cl == NULL) - return (ENOMEM); + if (cl == NULL) { + return ENOMEM; + } - if (clp != NULL) + if (clp != NULL) { *clp = cl; + } - return (0); + return 0; } static struct tcq_class * @@ -286,36 +297,42 @@ tcq_class_create(struct tcq_if *tif, int pri, u_int32_t qlimit, if ((cl = tif->tif_classes[pri]) != NULL) { /* modify the class instead of creating a new one */ - if (!qempty(&cl->cl_q)) + if (!qempty(&cl->cl_q)) { tcq_purgeq(tif, cl, 0, NULL, NULL); + } - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_destroy(cl->cl_sfb); + } cl->cl_qalg.ptr = NULL; qtype(&cl->cl_q) = Q_DROPTAIL; qstate(&cl->cl_q) = QS_RUNNING; VERIFY(qptype(&cl->cl_q) == ptype); } else { cl = zalloc(tcq_cl_zone); - if (cl == NULL) - return (NULL); + if (cl == NULL) { + return NULL; + } bzero(cl, tcq_cl_size); } tif->tif_classes[pri] = cl; - if (flags & TQCF_DEFAULTCLASS) + if (flags & TQCF_DEFAULTCLASS) { tif->tif_default = cl; + } if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) { qlimit = IFCQ_MAXLEN(ifq); - if (qlimit == 0) + if (qlimit == 0) { qlimit = DEFAULT_QLIMIT; /* use default */ + } } _qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype); cl->cl_flags = flags; cl->cl_pri = pri; - if (pri > tif->tif_maxpri) + if (pri > tif->tif_maxpri) { tif->tif_maxpri = pri; + } cl->cl_tif = tif; cl->cl_handle = qid; @@ -330,11 +347,13 @@ tcq_class_create(struct tcq_if *tif, int pri, u_int32_t qlimit, if (flags & TQCF_DELAYBASED) { cl->cl_qflags |= SFBF_DELAYBASED; } - if (!(cl->cl_flags & TQCF_LAZY)) + if (!(cl->cl_flags & TQCF_LAZY)) { cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle, qlimit(&cl->cl_q), cl->cl_qflags); - if (cl->cl_sfb != NULL || (cl->cl_flags & TQCF_LAZY)) + } + if (cl->cl_sfb != NULL || (cl->cl_flags & TQCF_LAZY)) { qtype(&cl->cl_q) = Q_SFB; + } } if (pktsched_verbose) { @@ -343,7 +362,7 @@ tcq_class_create(struct tcq_if *tif, int pri, u_int32_t qlimit, cl->cl_handle, cl->cl_pri, qlimit, flags, TQCF_BITS); } - return (cl); + return cl; } int @@ -353,10 +372,11 @@ tcq_remove_queue(struct tcq_if *tif, u_int32_t qid) IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - if ((cl = tcq_clh_to_clp(tif, qid)) == NULL) - return (EINVAL); + if ((cl = tcq_clh_to_clp(tif, qid)) == NULL) { + return EINVAL; + } - return (tcq_class_destroy(tif, cl)); + return tcq_class_destroy(tif, cl); } static int @@ -369,26 +389,31 @@ tcq_class_destroy(struct tcq_if *tif, struct tcq_class *cl) #endif IFCQ_LOCK_ASSERT_HELD(ifq); - if (!qempty(&cl->cl_q)) + if (!qempty(&cl->cl_q)) { tcq_purgeq(tif, cl, 0, NULL, NULL); + } tif->tif_classes[cl->cl_pri] = NULL; if (tif->tif_maxpri == cl->cl_pri) { - for (pri = cl->cl_pri; pri >= 0; pri--) + for (pri = cl->cl_pri; pri >= 0; pri--) { if (tif->tif_classes[pri] != NULL) { tif->tif_maxpri = pri; break; } - if (pri < 0) + } + if (pri < 0) { tif->tif_maxpri = -1; + } } - if (tif->tif_default == cl) + if (tif->tif_default == cl) { tif->tif_default = NULL; + } if (cl->cl_qalg.ptr != NULL) { - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_destroy(cl->cl_sfb); + } cl->cl_qalg.ptr = NULL; qtype(&cl->cl_q) = Q_DROPTAIL; qstate(&cl->cl_q) = QS_RUNNING; @@ -401,7 +426,7 @@ tcq_class_destroy(struct tcq_if *tif, struct tcq_class *cl) } zfree(tcq_cl_zone, cl); - return (0); + return 0; } int @@ -420,7 +445,7 @@ tcq_enqueue(struct tcq_if *tif, struct tcq_class *cl, pktsched_pkt_t *pkt, cl = tif->tif_default; if (cl == NULL) { IFCQ_CONVERT_LOCK(ifq); - return (CLASSQEQ_DROP); + return CLASSQEQ_DROP; } } } @@ -435,13 +460,13 @@ tcq_enqueue(struct tcq_if *tif, struct tcq_class *cl, pktsched_pkt_t *pkt, ret == CLASSQEQ_DROP_SP); PKTCNTR_ADD(&cl->cl_dropcnt, 1, len); IFCQ_DROP_ADD(ifq, 1, len); - return (ret); + return ret; } IFCQ_INC_LEN(ifq); IFCQ_INC_BYTES(ifq, len); /* successfully queued. */ - return (ret); + return ret; } /* @@ -485,8 +510,9 @@ tcq_dequeue_cl(struct tcq_if *tif, struct tcq_class *cl, mbuf_svc_class_t sc, len = pktsched_get_pkt_len(pkt); IFCQ_DEC_LEN(ifq); IFCQ_DEC_BYTES(ifq, len); - if (qempty(&cl->cl_q)) + if (qempty(&cl->cl_q)) { cl->cl_period++; + } PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len); IFCQ_XMIT_ADD(ifq, 1, len); } @@ -526,32 +552,35 @@ tcq_addq(struct tcq_class *cl, pktsched_pkt_t *pkt, struct pf_mtag *t) cqrq_throttle_t tr = { 1, tif->tif_throttle }; int err = tcq_throttle(tif, &tr); - if (err == EALREADY) + if (err == EALREADY) { err = 0; + } if (err != 0) { tr.level = IFNET_THROTTLE_OFF; (void) tcq_throttle(tif, &tr); } } } - if (cl->cl_sfb != NULL) - return (sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t)); + if (cl->cl_sfb != NULL) { + return sfb_addq(cl->cl_sfb, &cl->cl_q, pkt, t); + } } else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) { IFCQ_CONVERT_LOCK(ifq); - return (CLASSQEQ_DROP); + return CLASSQEQ_DROP; } #if PF_ECN - if (cl->cl_flags & TQCF_CLEARDSCP) + if (cl->cl_flags & TQCF_CLEARDSCP) { /* not supported for non-BSD stack packets */ VERIFY(pkt->pktsched_ptype == QP_MBUF); - write_dsfield(m, t, 0); + } + write_dsfield(m, t, 0); #endif /* PF_ECN */ VERIFY(pkt->pktsched_ptype == qptype(&cl->cl_q)); _addq(&cl->cl_q, pkt->pktsched_pkt); - return (0); + return 0; } static inline void @@ -560,10 +589,10 @@ tcq_getq(struct tcq_class *cl, pktsched_pkt_t *pkt) IFCQ_LOCK_ASSERT_HELD(cl->cl_tif->tif_ifq); if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { - return (sfb_getq(cl->cl_sfb, &cl->cl_q, pkt)); + return sfb_getq(cl->cl_sfb, &cl->cl_q, pkt); } - return (pktsched_pkt_encap(pkt, qptype(&cl->cl_q), _getq(&cl->cl_q))); + return pktsched_pkt_encap(pkt, qptype(&cl->cl_q), _getq(&cl->cl_q)); } static void @@ -575,14 +604,16 @@ tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow, IFCQ_LOCK_ASSERT_HELD(ifq); - if ((qlen = qlen(&cl->cl_q)) == 0) + if ((qlen = qlen(&cl->cl_q)) == 0) { goto done; + } IFCQ_CONVERT_LOCK(ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len); - else + } else { _flushq_flow(&cl->cl_q, flow, &cnt, &len); + } if (cnt > 0) { VERIFY(qlen(&cl->cl_q) == (qlen - cnt)); @@ -602,10 +633,12 @@ tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow, } } done: - if (packets != NULL) + if (packets != NULL) { *packets = cnt; - if (bytes != NULL) + } + if (bytes != NULL) { *bytes = len; + } } static void @@ -619,8 +652,9 @@ tcq_updateq(struct tcq_if *tif, struct tcq_class *cl, cqev_t ev) cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev)); } - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) - return (sfb_updateq(cl->cl_sfb, ev)); + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { + return sfb_updateq(cl->cl_sfb, ev); + } } int @@ -631,8 +665,9 @@ tcq_get_class_stats(struct tcq_if *tif, u_int32_t qid, IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - if ((cl = tcq_clh_to_clp(tif, qid)) == NULL) - return (EINVAL); + if ((cl = tcq_clh_to_clp(tif, qid)) == NULL) { + return EINVAL; + } sp->class_handle = cl->cl_handle; sp->priority = cl->cl_pri; @@ -645,10 +680,11 @@ tcq_get_class_stats(struct tcq_if *tif, u_int32_t qid, sp->qtype = qtype(&cl->cl_q); sp->qstate = qstate(&cl->cl_q); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { sfb_getstats(cl->cl_sfb, &sp->sfb); + } - return (0); + return 0; } static int @@ -669,7 +705,7 @@ tcq_stat_sc(struct tcq_if *tif, cqrq_stat_sc_t *sr) sr->packets = qlen(&cl->cl_q); sr->bytes = qsize(&cl->cl_q); - return (0); + return 0; } /* convert a class handle to the corresponding class pointer */ @@ -681,19 +717,21 @@ tcq_clh_to_clp(struct tcq_if *tif, u_int32_t chandle) IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq); - for (idx = tif->tif_maxpri; idx >= 0; idx--) + for (idx = tif->tif_maxpri; idx >= 0; idx--) { if ((cl = tif->tif_classes[idx]) != NULL && - cl->cl_handle == chandle) - return (cl); + cl->cl_handle == chandle) { + return cl; + } + } - return (NULL); + return NULL; } static const char * tcq_style(struct tcq_if *tif) { #pragma unused(tif) - return ("TCQ"); + return "TCQ"; } /* @@ -720,7 +758,7 @@ tcq_enqueue_ifclassq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, IFCQ_CONVERT_LOCK(ifq); m_freem(m); *pdrop = TRUE; - return (ENOBUFS); + return ENOBUFS; } t = m_pftag(m); i = MBUF_SCIDX(mbuf_get_service_class(m)); @@ -758,7 +796,7 @@ tcq_enqueue_ifclassq(struct ifclassq *ifq, void *p, classq_pkt_type_t ptype, default: VERIFY(0); } - return (ret); + return ret; } /* @@ -779,16 +817,16 @@ tcq_dequeue_tc_ifclassq(struct ifclassq *ifq, mbuf_svc_class_t sc, VERIFY((u_int32_t)i < IFCQ_SC_MAX); - bzero(&pkt, sizeof (pkt)); + bzero(&pkt, sizeof(pkt)); (tcq_dequeue_cl(ifq->ifcq_disc, ifq->ifcq_disc_slots[i].cl, sc, &pkt)); *ptype = pkt.pktsched_ptype; - return (pkt.pktsched_pkt); + return pkt.pktsched_pkt; } static int tcq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg) { - struct tcq_if *tif = (struct tcq_if *)ifq->ifcq_disc; + struct tcq_if *tif = (struct tcq_if *)ifq->ifcq_disc; int err = 0; IFCQ_LOCK_ASSERT_HELD(ifq); @@ -814,7 +852,7 @@ tcq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg) err = tcq_stat_sc(tif, (cqrq_stat_sc_t *)arg); break; } - return (err); + return err; } int @@ -831,37 +869,47 @@ tcq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, VERIFY(ifq->ifcq_disc == NULL); VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE); - if (flags & PKTSCHEDF_QALG_SFB) + if (flags & PKTSCHEDF_QALG_SFB) { qflags |= TQCF_SFB; - if (flags & PKTSCHEDF_QALG_ECN) + } + if (flags & PKTSCHEDF_QALG_ECN) { qflags |= TQCF_ECN; - if (flags & PKTSCHEDF_QALG_FLOWCTL) + } + if (flags & PKTSCHEDF_QALG_FLOWCTL) { qflags |= TQCF_FLOWCTL; - if (flags & PKTSCHEDF_QALG_DELAYBASED) + } + if (flags & PKTSCHEDF_QALG_DELAYBASED) { qflags |= TQCF_DELAYBASED; + } tif = tcq_alloc(ifp, M_WAITOK); - if (tif == NULL) - return (ENOMEM); + if (tif == NULL) { + return ENOMEM; + } - if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) + if ((maxlen = IFCQ_MAXLEN(ifq)) == 0) { maxlen = if_sndq_maxlen; + } if ((err = tcq_add_queue(tif, 0, maxlen, - qflags | TQCF_LAZY, SCIDX_BK, &cl0, ptype)) != 0) + qflags | TQCF_LAZY, SCIDX_BK, &cl0, ptype)) != 0) { goto cleanup; + } if ((err = tcq_add_queue(tif, 1, maxlen, - qflags | TQCF_DEFAULTCLASS, SCIDX_BE, &cl1, ptype)) != 0) + qflags | TQCF_DEFAULTCLASS, SCIDX_BE, &cl1, ptype)) != 0) { goto cleanup; + } if ((err = tcq_add_queue(tif, 2, maxlen, - qflags | TQCF_LAZY, SCIDX_VI, &cl2, ptype)) != 0) + qflags | TQCF_LAZY, SCIDX_VI, &cl2, ptype)) != 0) { goto cleanup; + } if ((err = tcq_add_queue(tif, 3, maxlen, - qflags, SCIDX_VO, &cl3, ptype)) != 0) + qflags, SCIDX_VO, &cl3, ptype)) != 0) { goto cleanup; + } err = ifclassq_attach(ifq, PKTSCHEDT_TCQ, tif, tcq_enqueue_ifclassq, NULL, tcq_dequeue_tc_ifclassq, @@ -905,10 +953,11 @@ tcq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags, } cleanup: - if (err != 0) + if (err != 0) { (void) tcq_destroy_locked(tif); + } - return (err); + return err; } int @@ -928,7 +977,7 @@ tcq_teardown_ifclassq(struct ifclassq *ifq) ifq->ifcq_disc_slots[i].cl = NULL; } - return (ifclassq_detach(ifq)); + return ifclassq_detach(ifq); } int @@ -940,11 +989,12 @@ tcq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot, IFCQ_LOCK_ASSERT_HELD(ifq); VERIFY(ifq->ifcq_type == PKTSCHEDT_TCQ); - if (slot >= IFCQ_SC_MAX) - return (EINVAL); + if (slot >= IFCQ_SC_MAX) { + return EINVAL; + } - return (tcq_get_class_stats(tif, ifq->ifcq_disc_slots[slot].qid, - &ifqs->ifqs_tcq_stats)); + return tcq_get_class_stats(tif, ifq->ifcq_disc_slots[slot].qid, + &ifqs->ifqs_tcq_stats); } static int @@ -958,11 +1008,12 @@ tcq_throttle(struct tcq_if *tif, cqrq_throttle_t *tr) if (!tr->set) { tr->level = tif->tif_throttle; - return (0); + return 0; } - if (tr->level == tif->tif_throttle) - return (EALREADY); + if (tr->level == tif->tif_throttle) { + return EALREADY; + } /* Current throttling levels only involve BK_SYS class */ cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl; @@ -989,17 +1040,18 @@ tcq_throttle(struct tcq_if *tif, cqrq_throttle_t *tr) tr->level); } tif->tif_throttle = tr->level; - if (err != 0) + if (err != 0) { err = 0; - else + } else { tcq_purgeq(tif, cl, 0, NULL, NULL); + } } else { log(LOG_ERR, "%s: %s unable to set throttling level " "%d->%d [error=%d]\n", if_name(TCQIF_IFP(tif)), tcq_style(tif), tif->tif_throttle, tr->level, err); } - return (err); + return err; } static int @@ -1012,13 +1064,15 @@ tcq_resumeq(struct tcq_if *tif, struct tcq_class *cl) #endif IFCQ_LOCK_ASSERT_HELD(ifq); - if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) + if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL) { err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE); + } - if (err == 0) + if (err == 0) { qstate(&cl->cl_q) = QS_RUNNING; + } - return (err); + return err; } static int @@ -1036,12 +1090,13 @@ tcq_suspendq(struct tcq_if *tif, struct tcq_class *cl) err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE); } else { VERIFY(cl->cl_flags & TQCF_LAZY); - err = ENXIO; /* delayed throttling */ + err = ENXIO; /* delayed throttling */ } } - if (err == 0 || err == ENXIO) + if (err == 0 || err == ENXIO) { qstate(&cl->cl_q) = QS_SUSPENDED; + } - return (err); + return err; } diff --git a/bsd/net/pktsched/pktsched_tcq.h b/bsd/net/pktsched/pktsched_tcq.h index 91c2f71e8..1939b72a4 100644 --- a/bsd/net/pktsched/pktsched_tcq.h +++ b/bsd/net/pktsched/pktsched_tcq.h @@ -27,7 +27,7 @@ */ #ifndef _NET_PKTSCHED_PKTSCHED_TCQ_H_ -#define _NET_PKTSCHED_PKTSCHED_TCQ_H_ +#define _NET_PKTSCHED_PKTSCHED_TCQ_H_ #ifdef PRIVATE #include @@ -41,89 +41,89 @@ extern "C" { #endif -#define TCQ_MAXPRI 4 /* upper limit of the number of priorities */ +#define TCQ_MAXPRI 4 /* upper limit of the number of priorities */ /* tcq class flags */ -#define TQCF_RED 0x0001 /* use RED */ -#define TQCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ -#define TQCF_RIO 0x0004 /* use RIO */ -#define TQCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ -#define TQCF_BLUE 0x0100 /* use BLUE */ -#define TQCF_SFB 0x0200 /* use SFB */ -#define TQCF_FLOWCTL 0x0400 /* enable flow control advisories */ -#define TQCF_DEFAULTCLASS 0x1000 /* default class */ -#define TQCF_DELAYBASED 0x2000 /* queue sizing is delay based */ +#define TQCF_RED 0x0001 /* use RED */ +#define TQCF_ECN 0x0002 /* use ECN with RED/BLUE/SFB */ +#define TQCF_RIO 0x0004 /* use RIO */ +#define TQCF_CLEARDSCP 0x0010 /* clear diffserv codepoint */ +#define TQCF_BLUE 0x0100 /* use BLUE */ +#define TQCF_SFB 0x0200 /* use SFB */ +#define TQCF_FLOWCTL 0x0400 /* enable flow control advisories */ +#define TQCF_DEFAULTCLASS 0x1000 /* default class */ +#define TQCF_DELAYBASED 0x2000 /* queue sizing is delay based */ #ifdef BSD_KERNEL_PRIVATE -#define TQCF_LAZY 0x10000000 /* on-demand resource allocation */ +#define TQCF_LAZY 0x10000000 /* on-demand resource allocation */ #endif /* BSD_KERNEL_PRIVATE */ -#define TQCF_USERFLAGS \ - (TQCF_RED | TQCF_ECN | TQCF_RIO | TQCF_CLEARDSCP | TQCF_BLUE | \ +#define TQCF_USERFLAGS \ + (TQCF_RED | TQCF_ECN | TQCF_RIO | TQCF_CLEARDSCP | TQCF_BLUE | \ TQCF_SFB | TQCF_FLOWCTL | TQCF_DEFAULTCLASS) #ifdef BSD_KERNEL_PRIVATE -#define TQCF_BITS \ +#define TQCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL\15DEFAULT" \ "\35LAZY" #else -#define TQCF_BITS \ +#define TQCF_BITS \ "\020\1RED\2ECN\3RIO\5CLEARDSCP\11BLUE\12SFB\13FLOWCTL" #endif /* !BSD_KERNEL_PRIVATE */ struct tcq_classstats { - u_int32_t class_handle; - u_int32_t priority; + u_int32_t class_handle; + u_int32_t priority; - u_int32_t qlength; - u_int32_t qlimit; - u_int32_t period; - struct pktcntr xmitcnt; /* transmitted packet counter */ - struct pktcntr dropcnt; /* dropped packet counter */ + u_int32_t qlength; + u_int32_t qlimit; + u_int32_t period; + struct pktcntr xmitcnt; /* transmitted packet counter */ + struct pktcntr dropcnt; /* dropped packet counter */ /* RED, RIO, BLUE, SFB related info */ - classq_type_t qtype; + classq_type_t qtype; union { /* RIO has 3 red stats */ - struct red_stats red[RIO_NDROPPREC]; - struct blue_stats blue; - struct sfb_stats sfb; + struct red_stats red[RIO_NDROPPREC]; + struct blue_stats blue; + struct sfb_stats sfb; }; - classq_state_t qstate; + classq_state_t qstate; }; #ifdef BSD_KERNEL_PRIVATE struct tcq_class { - u_int32_t cl_handle; /* class handle */ - class_queue_t cl_q; /* class queue structure */ - u_int32_t cl_qflags; /* class queue flags */ + u_int32_t cl_handle; /* class handle */ + class_queue_t cl_q; /* class queue structure */ + u_int32_t cl_qflags; /* class queue flags */ union { - void *ptr; - struct sfb *sfb; /* SFB state */ + void *ptr; + struct sfb *sfb; /* SFB state */ } cl_qalg; - int32_t cl_pri; /* priority */ - u_int32_t cl_flags; /* class flags */ - struct tcq_if *cl_tif; /* back pointer to tif */ + int32_t cl_pri; /* priority */ + u_int32_t cl_flags; /* class flags */ + struct tcq_if *cl_tif; /* back pointer to tif */ /* statistics */ - u_int32_t cl_period; /* backlog period */ - struct pktcntr cl_xmitcnt; /* transmitted packet counter */ - struct pktcntr cl_dropcnt; /* dropped packet counter */ + u_int32_t cl_period; /* backlog period */ + struct pktcntr cl_xmitcnt; /* transmitted packet counter */ + struct pktcntr cl_dropcnt; /* dropped packet counter */ }; -#define cl_sfb cl_qalg.sfb +#define cl_sfb cl_qalg.sfb /* * tcq interface state */ struct tcq_if { - struct ifclassq *tif_ifq; /* backpointer to ifclassq */ - int tif_maxpri; /* max priority in use */ - u_int32_t tif_throttle; /* throttling level */ - struct tcq_class *tif_default; /* default class */ - struct tcq_class *tif_classes[TCQ_MAXPRI]; /* classes */ + struct ifclassq *tif_ifq; /* backpointer to ifclassq */ + int tif_maxpri; /* max priority in use */ + u_int32_t tif_throttle; /* throttling level */ + struct tcq_class *tif_default; /* default class */ + struct tcq_class *tif_classes[TCQ_MAXPRI]; /* classes */ }; -#define TCQIF_IFP(_tif) ((_tif)->tif_ifq->ifcq_ifp) +#define TCQIF_IFP(_tif) ((_tif)->tif_ifq->ifcq_ifp) struct if_ifclassq_stats; diff --git a/bsd/net/ppp_comp.h b/bsd/net/ppp_comp.h index 67af45e90..f9f7ca6d9 100644 --- a/bsd/net/ppp_comp.h +++ b/bsd/net/ppp_comp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -61,51 +61,51 @@ * various compression methods. */ #ifndef DO_BSD_COMPRESS -#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */ +#define DO_BSD_COMPRESS 1 /* by default, include BSD-Compress */ #endif #ifndef DO_DEFLATE -#define DO_DEFLATE 1 /* by default, include Deflate */ +#define DO_DEFLATE 1 /* by default, include Deflate */ #endif -#define DO_PREDICTOR_1 0 -#define DO_PREDICTOR_2 0 +#define DO_PREDICTOR_1 0 +#define DO_PREDICTOR_2 0 /* * Structure giving methods for compression/decompression. */ #if PACKETPTR struct compressor { - int compress_proto; /* CCP compression protocol number */ + int compress_proto; /* CCP compression protocol number */ /* Allocate space for a compressor (transmit side) */ - void *(*comp_alloc)(u_char *options, int opt_len); + void *(*comp_alloc)(u_char *options, int opt_len); /* Free space used by a compressor */ - void (*comp_free)(void *state); + void (*comp_free)(void *state); /* Initialize a compressor */ - int (*comp_init)(void *state, u_char *options, int opt_len, - int unit, int hdrlen, int debug); + int (*comp_init)(void *state, u_char *options, int opt_len, + int unit, int hdrlen, int debug); /* Reset a compressor */ - void (*comp_reset)(void *state); + void (*comp_reset)(void *state); /* Compress a packet */ - int (*compress)(void *state, PACKETPTR *mret, - PACKETPTR mp, int orig_len, int max_len); + int (*compress)(void *state, PACKETPTR *mret, + PACKETPTR mp, int orig_len, int max_len); /* Return compression statistics */ - void (*comp_stat)(void *state, struct compstat *stats); + void (*comp_stat)(void *state, struct compstat *stats); /* Allocate space for a decompressor (receive side) */ - void *(*decomp_alloc)(u_char *options, int opt_len); + void *(*decomp_alloc)(u_char *options, int opt_len); /* Free space used by a decompressor */ - void (*decomp_free)(void *state); + void (*decomp_free)(void *state); /* Initialize a decompressor */ - int (*decomp_init)(void *state, u_char *options, int opt_len, - int unit, int hdrlen, int mru, int debug); + int (*decomp_init)(void *state, u_char *options, int opt_len, + int unit, int hdrlen, int mru, int debug); /* Reset a decompressor */ - void (*decomp_reset)(void *state); + void (*decomp_reset)(void *state); /* Decompress a packet. */ - int (*decompress)(void *state, PACKETPTR mp, PACKETPTR *dmpp); + int (*decompress)(void *state, PACKETPTR mp, PACKETPTR *dmpp); /* Update state for an incompressible packet received */ - void (*incomp)(void *state, PACKETPTR mp); + void (*incomp)(void *state, PACKETPTR mp); /* Return decompression statistics */ - void (*decomp_stat)(void *state, struct compstat *stats); + void (*decomp_stat)(void *state, struct compstat *stats); }; #endif /* PACKETPTR */ @@ -117,75 +117,75 @@ struct compressor { * a patent held by Motorola. * Don't you just lurve software patents. */ -#define DECOMP_OK 0 /* everything went OK */ -#define DECOMP_ERROR 1 /* error detected before decomp. */ -#define DECOMP_FATALERROR 2 /* error detected after decomp. */ +#define DECOMP_OK 0 /* everything went OK */ +#define DECOMP_ERROR 1 /* error detected before decomp. */ +#define DECOMP_FATALERROR 2 /* error detected after decomp. */ /* * CCP codes. */ -#define CCP_CONFREQ 1 -#define CCP_CONFACK 2 -#define CCP_TERMREQ 5 -#define CCP_TERMACK 6 -#define CCP_RESETREQ 14 -#define CCP_RESETACK 15 +#define CCP_CONFREQ 1 +#define CCP_CONFACK 2 +#define CCP_TERMREQ 5 +#define CCP_TERMACK 6 +#define CCP_RESETREQ 14 +#define CCP_RESETACK 15 /* * Max # bytes for a CCP option */ -#define CCP_MAX_OPTION_LENGTH 32 +#define CCP_MAX_OPTION_LENGTH 32 /* * Parts of a CCP packet. */ -#define CCP_CODE(dp) ((dp)[0]) -#define CCP_ID(dp) ((dp)[1]) -#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) -#define CCP_HDRLEN 4 +#define CCP_CODE(dp) ((dp)[0]) +#define CCP_ID(dp) ((dp)[1]) +#define CCP_LENGTH(dp) (((dp)[2] << 8) + (dp)[3]) +#define CCP_HDRLEN 4 -#define CCP_OPT_CODE(dp) ((dp)[0]) -#define CCP_OPT_LENGTH(dp) ((dp)[1]) -#define CCP_OPT_MINLEN 2 +#define CCP_OPT_CODE(dp) ((dp)[0]) +#define CCP_OPT_LENGTH(dp) ((dp)[1]) +#define CCP_OPT_MINLEN 2 /* * Definitions for BSD-Compress. */ -#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ -#define CILEN_BSD_COMPRESS 3 /* length of config. option */ +#define CI_BSD_COMPRESS 21 /* config. option for BSD-Compress */ +#define CILEN_BSD_COMPRESS 3 /* length of config. option */ /* Macros for handling the 3rd byte of the BSD-Compress config option. */ -#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ -#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ -#define BSD_CURRENT_VERSION 1 /* current version number */ -#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) +#define BSD_NBITS(x) ((x) & 0x1F) /* number of bits requested */ +#define BSD_VERSION(x) ((x) >> 5) /* version of option format */ +#define BSD_CURRENT_VERSION 1 /* current version number */ +#define BSD_MAKE_OPT(v, n) (((v) << 5) | (n)) -#define BSD_MIN_BITS 9 /* smallest code size supported */ -#define BSD_MAX_BITS 15 /* largest code size supported */ +#define BSD_MIN_BITS 9 /* smallest code size supported */ +#define BSD_MAX_BITS 15 /* largest code size supported */ /* * Definitions for Deflate. */ -#define CI_DEFLATE 26 /* config option for Deflate */ -#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ -#define CILEN_DEFLATE 4 /* length of its config option */ - -#define DEFLATE_MIN_SIZE 8 -#define DEFLATE_MAX_SIZE 15 -#define DEFLATE_METHOD_VAL 8 -#define DEFLATE_SIZE(x) (((x) >> 4) + DEFLATE_MIN_SIZE) -#define DEFLATE_METHOD(x) ((x) & 0x0F) -#define DEFLATE_MAKE_OPT(w) ((((w) - DEFLATE_MIN_SIZE) << 4) \ - + DEFLATE_METHOD_VAL) -#define DEFLATE_CHK_SEQUENCE 0 +#define CI_DEFLATE 26 /* config option for Deflate */ +#define CI_DEFLATE_DRAFT 24 /* value used in original draft RFC */ +#define CILEN_DEFLATE 4 /* length of its config option */ + +#define DEFLATE_MIN_SIZE 8 +#define DEFLATE_MAX_SIZE 15 +#define DEFLATE_METHOD_VAL 8 +#define DEFLATE_SIZE(x) (((x) >> 4) + DEFLATE_MIN_SIZE) +#define DEFLATE_METHOD(x) ((x) & 0x0F) +#define DEFLATE_MAKE_OPT(w) ((((w) - DEFLATE_MIN_SIZE) << 4) \ + + DEFLATE_METHOD_VAL) +#define DEFLATE_CHK_SEQUENCE 0 /* * Definitions for other, as yet unsupported, compression methods. */ -#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ -#define CILEN_PREDICTOR_1 2 /* length of its config option */ -#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ -#define CILEN_PREDICTOR_2 2 /* length of its config option */ +#define CI_PREDICTOR_1 1 /* config option for Predictor-1 */ +#define CILEN_PREDICTOR_1 2 /* length of its config option */ +#define CI_PREDICTOR_2 2 /* config option for Predictor-2 */ +#define CILEN_PREDICTOR_2 2 /* length of its config option */ #endif /* KERNEL_PRIVATE */ #endif /* _NET_PPP_COMP_H */ diff --git a/bsd/net/ppp_defs.h b/bsd/net/ppp_defs.h index 2cfd6ac01..83c5e3647 100644 --- a/bsd/net/ppp_defs.h +++ b/bsd/net/ppp_defs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -60,108 +60,108 @@ /* * The basic PPP frame. */ -#define PPP_HDRLEN 4 /* octets for standard ppp header */ -#define PPP_FCSLEN 2 /* octets for FCS */ -#define PPP_MRU 1500 /* default MRU = max length of info field */ +#define PPP_HDRLEN 4 /* octets for standard ppp header */ +#define PPP_FCSLEN 2 /* octets for FCS */ +#define PPP_MRU 1500 /* default MRU = max length of info field */ -#define PPP_ADDRESS(p) (((u_char *)(p))[0]) -#define PPP_CONTROL(p) (((u_char *)(p))[1]) -#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) +#define PPP_ADDRESS(p) (((u_char *)(p))[0]) +#define PPP_CONTROL(p) (((u_char *)(p))[1]) +#define PPP_PROTOCOL(p) ((((u_char *)(p))[2] << 8) + ((u_char *)(p))[3]) /* * Significant octet values. */ -#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ -#define PPP_UI 0x03 /* Unnumbered Information */ -#define PPP_FLAG 0x7e /* Flag Sequence */ -#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ -#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ +#define PPP_ALLSTATIONS 0xff /* All-Stations broadcast address */ +#define PPP_UI 0x03 /* Unnumbered Information */ +#define PPP_FLAG 0x7e /* Flag Sequence */ +#define PPP_ESCAPE 0x7d /* Asynchronous Control Escape */ +#define PPP_TRANS 0x20 /* Asynchronous transparency modifier */ /* * Protocol field values. */ -#define PPP_IP 0x21 /* Internet Protocol */ -#define PPP_XNS 0x25 /* Xerox NS */ -#define PPP_AT 0x29 /* AppleTalk Protocol */ -#define PPP_IPX 0x2b /* IPX Datagram (RFC1552) */ -#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ -#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ -#define PPP_COMP 0xfd /* compressed packet */ -#define PPP_IPCP 0x8021 /* IP Control Protocol */ -#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ -#define PPP_IPXCP 0x802b /* IPX Control Protocol (RFC1552) */ -#define PPP_CCP 0x80fd /* Compression Control Protocol */ -#define PPP_LCP 0xc021 /* Link Control Protocol */ -#define PPP_PAP 0xc023 /* Password Authentication Protocol */ -#define PPP_LQR 0xc025 /* Link Quality Report protocol */ -#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ -#define PPP_CBCP 0xc029 /* Callback Control Protocol */ -#define PPP_IPV6 0x57 /* Internet Protocol version 6*/ -#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ +#define PPP_IP 0x21 /* Internet Protocol */ +#define PPP_XNS 0x25 /* Xerox NS */ +#define PPP_AT 0x29 /* AppleTalk Protocol */ +#define PPP_IPX 0x2b /* IPX Datagram (RFC1552) */ +#define PPP_VJC_COMP 0x2d /* VJ compressed TCP */ +#define PPP_VJC_UNCOMP 0x2f /* VJ uncompressed TCP */ +#define PPP_COMP 0xfd /* compressed packet */ +#define PPP_IPCP 0x8021 /* IP Control Protocol */ +#define PPP_ATCP 0x8029 /* AppleTalk Control Protocol */ +#define PPP_IPXCP 0x802b /* IPX Control Protocol (RFC1552) */ +#define PPP_CCP 0x80fd /* Compression Control Protocol */ +#define PPP_LCP 0xc021 /* Link Control Protocol */ +#define PPP_PAP 0xc023 /* Password Authentication Protocol */ +#define PPP_LQR 0xc025 /* Link Quality Report protocol */ +#define PPP_CHAP 0xc223 /* Cryptographic Handshake Auth. Protocol */ +#define PPP_CBCP 0xc029 /* Callback Control Protocol */ +#define PPP_IPV6 0x57 /* Internet Protocol version 6*/ +#define PPP_IPV6CP 0x8057 /* IPv6 Control Protocol */ /* * Values for FCS calculations. */ -#define PPP_INITFCS 0xffff /* Initial FCS value */ -#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ -#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) +#define PPP_INITFCS 0xffff /* Initial FCS value */ +#define PPP_GOODFCS 0xf0b8 /* Good final FCS value */ +#define PPP_FCS(fcs, c) (((fcs) >> 8) ^ fcstab[((fcs) ^ (c)) & 0xff]) /* * Extended asyncmap - allows any character to be escaped. */ -typedef u_int32_t ext_accm[8]; +typedef u_int32_t ext_accm[8]; /* * What to do with network protocol (NP) packets. */ enum NPmode { - NPMODE_PASS, /* pass the packet through */ - NPMODE_DROP, /* silently drop the packet */ - NPMODE_ERROR, /* return an error */ - NPMODE_QUEUE /* save it up for later. */ + NPMODE_PASS, /* pass the packet through */ + NPMODE_DROP, /* silently drop the packet */ + NPMODE_ERROR, /* return an error */ + NPMODE_QUEUE /* save it up for later. */ }; /* * Statistics. */ -struct pppstat { - unsigned int ppp_ibytes; /* bytes received */ - unsigned int ppp_ipackets; /* packets received */ - unsigned int ppp_ierrors; /* receive errors */ - unsigned int ppp_obytes; /* bytes sent */ - unsigned int ppp_opackets; /* packets sent */ - unsigned int ppp_oerrors; /* transmit errors */ +struct pppstat { + unsigned int ppp_ibytes; /* bytes received */ + unsigned int ppp_ipackets; /* packets received */ + unsigned int ppp_ierrors; /* receive errors */ + unsigned int ppp_obytes; /* bytes sent */ + unsigned int ppp_opackets; /* packets sent */ + unsigned int ppp_oerrors; /* transmit errors */ }; struct vjstat { - unsigned int vjs_packets; /* outbound packets */ - unsigned int vjs_compressed; /* outbound compressed packets */ - unsigned int vjs_searches; /* searches for connection state */ - unsigned int vjs_misses; /* times couldn't find conn. state */ - unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ - unsigned int vjs_compressedin; /* inbound compressed packets */ - unsigned int vjs_errorin; /* inbound unknown type packets */ - unsigned int vjs_tossed; /* inbound packets tossed because of error */ + unsigned int vjs_packets; /* outbound packets */ + unsigned int vjs_compressed; /* outbound compressed packets */ + unsigned int vjs_searches; /* searches for connection state */ + unsigned int vjs_misses; /* times couldn't find conn. state */ + unsigned int vjs_uncompressedin; /* inbound uncompressed packets */ + unsigned int vjs_compressedin; /* inbound compressed packets */ + unsigned int vjs_errorin; /* inbound unknown type packets */ + unsigned int vjs_tossed; /* inbound packets tossed because of error */ }; struct ppp_stats { - struct pppstat p; /* basic PPP statistics */ - struct vjstat vj; /* VJ header compression statistics */ + struct pppstat p; /* basic PPP statistics */ + struct vjstat vj; /* VJ header compression statistics */ }; struct compstat { - unsigned int unc_bytes; /* total uncompressed bytes */ - unsigned int unc_packets; /* total uncompressed packets */ - unsigned int comp_bytes; /* compressed bytes */ - unsigned int comp_packets; /* compressed packets */ - unsigned int inc_bytes; /* incompressible bytes */ - unsigned int inc_packets; /* incompressible packets */ - unsigned int ratio; /* recent compression ratio << 8 */ + unsigned int unc_bytes; /* total uncompressed bytes */ + unsigned int unc_packets; /* total uncompressed packets */ + unsigned int comp_bytes; /* compressed bytes */ + unsigned int comp_packets; /* compressed packets */ + unsigned int inc_bytes; /* incompressible bytes */ + unsigned int inc_packets; /* incompressible packets */ + unsigned int ratio; /* recent compression ratio << 8 */ }; struct ppp_comp_stats { - struct compstat c; /* packet compression statistics */ - struct compstat d; /* packet decompression statistics */ + struct compstat c; /* packet compression statistics */ + struct compstat d; /* packet decompression statistics */ }; /* @@ -169,8 +169,8 @@ struct ppp_comp_stats { * the last NP packet was sent or received. */ struct ppp_idle { - time_t xmit_idle; /* time since last NP packet sent */ - time_t recv_idle; /* time since last NP packet received */ + time_t xmit_idle; /* time since last NP packet sent */ + time_t recv_idle; /* time since last NP packet received */ }; #endif /* _PPP_DEFS_H_ */ diff --git a/bsd/net/radix.c b/bsd/net/radix.c index fdc7058b3..c9ea3960c 100644 --- a/bsd/net/radix.c +++ b/bsd/net/radix.c @@ -68,7 +68,7 @@ #include #include #include -#define M_DONTWAIT M_NOWAIT +#define M_DONTWAIT M_NOWAIT #include #include #include @@ -77,17 +77,17 @@ #include #endif -static int rn_walktree_from(struct radix_node_head *h, void *a, - void *m, walktree_f_t *f, void *w); +static int rn_walktree_from(struct radix_node_head *h, void *a, + void *m, walktree_f_t *f, void *w); static int rn_walktree(struct radix_node_head *, walktree_f_t *, void *); static struct radix_node - *rn_insert(void *, struct radix_node_head *, int *, - struct radix_node [2]), - *rn_newpair(void *, int, struct radix_node[2]), - *rn_search(void *, struct radix_node *), - *rn_search_m(void *, struct radix_node *, void *); +*rn_insert(void *, struct radix_node_head *, int *, + struct radix_node[2]), +*rn_newpair(void *, int, struct radix_node[2]), +*rn_search(void *, struct radix_node *), +*rn_search_m(void *, struct radix_node *, void *); -static int max_keylen; +static int max_keylen; static struct radix_mask *rn_mkfreelist; static struct radix_node_head *mask_rnhead; static char *addmask_key; @@ -95,22 +95,22 @@ static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1}; static char *rn_zeros, *rn_ones; -extern lck_grp_t *domain_proto_mtx_grp; -extern lck_attr_t *domain_proto_mtx_attr; +extern lck_grp_t *domain_proto_mtx_grp; +extern lck_attr_t *domain_proto_mtx_attr; #define rn_masktop (mask_rnhead->rnh_treetop) #undef Bcmp #define Bcmp(a, b, l) \ (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (uint32_t)l)) -static int rn_lexobetter(void *m_arg, void *n_arg); +static int rn_lexobetter(void *m_arg, void *n_arg); static struct radix_mask * - rn_new_radix_mask(struct radix_node *tt, - struct radix_mask *next); +rn_new_radix_mask(struct radix_node *tt, + struct radix_mask *next); static int rn_satisfies_leaf(char *trial, struct radix_node *leaf, int skip, rn_matchf_t *f, void *w); -#define RN_MATCHF(rn, f, arg) (f == NULL || (*f)((rn), arg)) +#define RN_MATCHF(rn, f, arg) (f == NULL || (*f)((rn), arg)) /* * The data structure for the keys is a radix tree with one way @@ -153,12 +153,13 @@ rn_search(void *v_arg, struct radix_node *head) caddr_t v; for (x = head, v = v_arg; x->rn_bit >= 0;) { - if (x->rn_bmask & v[x->rn_offset]) + if (x->rn_bmask & v[x->rn_offset]) { x = x->rn_right; - else + } else { x = x->rn_left; + } } - return (x); + return x; } static struct radix_node * @@ -169,10 +170,11 @@ rn_search_m(void *v_arg, struct radix_node *head, void *m_arg) for (x = head; x->rn_bit >= 0;) { if ((x->rn_bmask & m[x->rn_offset]) && - (x->rn_bmask & v[x->rn_offset])) + (x->rn_bmask & v[x->rn_offset])) { x = x->rn_right; - else + } else { x = x->rn_left; + } } return x; } @@ -185,28 +187,36 @@ rn_refines(void *m_arg, void *n_arg) int longer = (*(u_char *)n++) - (int)(*(u_char *)m++); int masks_are_equal = 1; - if (longer > 0) + if (longer > 0) { lim -= longer; + } while (n < lim) { - if (*n & ~(*m)) + if (*n & ~(*m)) { return 0; - if (*n++ != *m++) + } + if (*n++ != *m++) { masks_are_equal = 0; + } } - while (n < lim2) - if (*n++) + while (n < lim2) { + if (*n++) { return 0; - if (masks_are_equal && (longer < 0)) - for (lim2 = m - longer; m < lim2; ) - if (*m++) + } + } + if (masks_are_equal && (longer < 0)) { + for (lim2 = m - longer; m < lim2;) { + if (*m++) { return 1; - return (!masks_are_equal); + } + } + } + return !masks_are_equal; } struct radix_node * rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head) { - return (rn_lookup_args(v_arg, m_arg, head, NULL, NULL)); + return rn_lookup_args(v_arg, m_arg, head, NULL, NULL); } struct radix_node * @@ -218,14 +228,16 @@ rn_lookup_args(void *v_arg, void *m_arg, struct radix_node_head *head, if (m_arg) { x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_offset); - if (x == 0) - return (NULL); + if (x == 0) { + return NULL; + } netmask = x->rn_key; } x = rn_match_args(v_arg, head, f, w); if (x && netmask) { - while (x && x->rn_mask != netmask) + while (x && x->rn_mask != netmask) { x = x->rn_dupedkey; + } } return x; } @@ -245,22 +257,25 @@ rn_satisfies_leaf(char *trial, struct radix_node *leaf, int skip, char *cplim; int length = min(*(u_char *)cp, *(u_char *)cp2); - if (cp3 == 0) + if (cp3 == 0) { cp3 = rn_ones; - else + } else { length = min(length, *(u_char *)cp3); + } cplim = cp + length; cp3 += skip; cp2 += skip; - for (cp += skip; cp < cplim; cp++, cp2++, cp3++) - if ((*cp ^ *cp2) & *cp3) + for (cp += skip; cp < cplim; cp++, cp2++, cp3++) { + if ((*cp ^ *cp2) & *cp3) { return 0; + } + } - return (RN_MATCHF(leaf, f, w)); + return RN_MATCHF(leaf, f, w); } struct radix_node * rn_match(void *v_arg, struct radix_node_head *head) { - return (rn_match_args(v_arg, head, NULL, NULL)); + return rn_match_args(v_arg, head, NULL, NULL); } struct radix_node * @@ -279,11 +294,12 @@ rn_match_args(void *v_arg, struct radix_node_head *head, * Open code rn_search(v, top) to avoid overhead of extra * subroutine call. */ - for (; t->rn_bit >= 0; ) { - if (t->rn_bmask & cp[t->rn_offset]) + for (; t->rn_bit >= 0;) { + if (t->rn_bmask & cp[t->rn_offset]) { t = t->rn_right; - else + } else { t = t->rn_left; + } } /* * See if we match exactly as a host destination @@ -296,12 +312,15 @@ rn_match_args(void *v_arg, struct radix_node_head *head, * with a long one. This wins big for class B&C netmasks which * are probably the most common case... */ - if (t->rn_mask) + if (t->rn_mask) { vlen = *(u_char *)t->rn_mask; + } cp += off; cp2 = t->rn_key + off; cplim = v + vlen; - for (; cp < cplim; cp++, cp2++) - if (*cp != *cp2) + for (; cp < cplim; cp++, cp2++) { + if (*cp != *cp2) { goto on1; + } + } /* * This extra grot is in case we are explicitly asked * to look up the default. Ugh! @@ -309,10 +328,11 @@ rn_match_args(void *v_arg, struct radix_node_head *head, * Never return the root node itself, it seems to cause a * lot of confusion. */ - if (t->rn_flags & RNF_ROOT) + if (t->rn_flags & RNF_ROOT) { t = t->rn_dupedkey; + } if (t == NULL || RN_MATCHF(t, f, w)) { - return (t); + return t; } else { /* * Although we found an exact match on the key, @@ -321,15 +341,16 @@ rn_match_args(void *v_arg, struct radix_node_head *head, */ if (t->rn_parent->rn_flags & RNF_ROOT) { /* Hit the top; have to give up */ - return (NULL); + return NULL; } b = 0; goto keeplooking; } on1: test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */ - for (b = 7; (test >>= 1) > 0;) + for (b = 7; (test >>= 1) > 0;) { b--; + } keeplooking: matched_off = cp - v; b += matched_off << 3; @@ -337,8 +358,9 @@ keeplooking: /* * If there is a host route in a duped-key chain, it will be first. */ - if ((saved_t = t)->rn_mask == 0) + if ((saved_t = t)->rn_mask == 0) { t = t->rn_dupedkey; + } for (; t; t = t->rn_dupedkey) { /* * Even if we don't match exactly as a host, @@ -346,10 +368,11 @@ keeplooking: * a route to a net. */ if (t->rn_flags & RNF_NORMAL) { - if ((rn_bit <= t->rn_bit) && RN_MATCHF(t, f, w)) - return (t); + if ((rn_bit <= t->rn_bit) && RN_MATCHF(t, f, w)) { + return t; + } } else if (rn_satisfies_leaf(v, t, matched_off, f, w)) { - return (t); + return t; } } t = saved_t; @@ -367,27 +390,30 @@ keeplooking: while (m) { if (m->rm_flags & RNF_NORMAL) { if ((rn_bit <= m->rm_bit) && - RN_MATCHF(m->rm_leaf, f, w)) - return (m->rm_leaf); + RN_MATCHF(m->rm_leaf, f, w)) { + return m->rm_leaf; + } } else { off = min(t->rn_offset, matched_off); x = rn_search_m(v, t, m->rm_mask); - while (x && x->rn_mask != m->rm_mask) + while (x && x->rn_mask != m->rm_mask) { x = x->rn_dupedkey; - if (x && rn_satisfies_leaf(v, x, off, f, w)) - return (x); + } + if (x && rn_satisfies_leaf(v, x, off, f, w)) { + return x; + } } m = m->rm_mklist; } } while (t != top); - return (NULL); + return NULL; } #ifdef RN_DEBUG -int rn_nodenum; -struct radix_node *rn_clist; -int rn_saveinfo; -int rn_debug = 1; +int rn_nodenum; +struct radix_node *rn_clist; +int rn_saveinfo; +int rn_debug = 1; #endif static struct radix_node * @@ -414,7 +440,7 @@ rn_newpair(void *v, int b, struct radix_node nodes[2]) static struct radix_node * rn_insert(void *v_arg, struct radix_node_head *head, int *dupentry, - struct radix_node nodes[2]) + struct radix_node nodes[2]) { caddr_t v = v_arg; struct radix_node *top = head->rnh_treetop; @@ -423,60 +449,67 @@ rn_insert(void *v_arg, struct radix_node_head *head, int *dupentry, caddr_t cp = v + head_off; int b; struct radix_node *tt; - /* + /* * Find first bit at which v and t->rn_key differ */ - { - caddr_t cp2 = t->rn_key + head_off; - int cmp_res; - caddr_t cplim = v + vlen; + { + caddr_t cp2 = t->rn_key + head_off; + int cmp_res; + caddr_t cplim = v + vlen; - while (cp < cplim) - if (*cp2++ != *cp++) - goto on1; - *dupentry = 1; - return t; + while (cp < cplim) { + if (*cp2++ != *cp++) { + goto on1; + } + } + *dupentry = 1; + return t; on1: - *dupentry = 0; - cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; - for (b = (cp - v) << 3; cmp_res; b--) - cmp_res >>= 1; - } - { - struct radix_node *p, *x = top; - cp = v; - do { - p = x; - if (cp[x->rn_offset] & x->rn_bmask) - x = x->rn_right; - else - x = x->rn_left; - } while (b > (unsigned) x->rn_bit); - /* x->rn_bit < b && x->rn_bit >= 0 */ + *dupentry = 0; + cmp_res = (cp[-1] ^ cp2[-1]) & 0xff; + for (b = (cp - v) << 3; cmp_res; b--) { + cmp_res >>= 1; + } + } + { + struct radix_node *p, *x = top; + cp = v; + do { + p = x; + if (cp[x->rn_offset] & x->rn_bmask) { + x = x->rn_right; + } else { + x = x->rn_left; + } + } while (b > (unsigned) x->rn_bit); + /* x->rn_bit < b && x->rn_bit >= 0 */ #ifdef RN_DEBUG - if (rn_debug) - log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p); + if (rn_debug) { + log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p); + } #endif - t = rn_newpair(v_arg, b, nodes); - tt = t->rn_left; - if ((cp[p->rn_offset] & p->rn_bmask) == 0) - p->rn_left = t; - else - p->rn_right = t; - x->rn_parent = t; - t->rn_parent = p; /* frees x, p as temp vars below */ - if ((cp[t->rn_offset] & t->rn_bmask) == 0) { - t->rn_right = x; - } else { - t->rn_right = tt; - t->rn_left = x; - } + t = rn_newpair(v_arg, b, nodes); + tt = t->rn_left; + if ((cp[p->rn_offset] & p->rn_bmask) == 0) { + p->rn_left = t; + } else { + p->rn_right = t; + } + x->rn_parent = t; + t->rn_parent = p; /* frees x, p as temp vars below */ + if ((cp[t->rn_offset] & t->rn_bmask) == 0) { + t->rn_right = x; + } else { + t->rn_right = tt; + t->rn_left = x; + } #ifdef RN_DEBUG - if (rn_debug) - log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p); + if (rn_debug) { + log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p); + } #endif - } - return (tt); + } + return tt; } struct radix_node * @@ -490,78 +523,98 @@ rn_addmask(void *n_arg, int search, int skip) struct radix_node *saved_x; static int last_zeroed = 0; - if ((mlen = *(u_char *)netmask) > max_keylen) + if ((mlen = *(u_char *)netmask) > max_keylen) { mlen = max_keylen; - if (skip == 0) + } + if (skip == 0) { skip = 1; - if (mlen <= skip) - return (mask_rnhead->rnh_nodes); - if (skip > 1) + } + if (mlen <= skip) { + return mask_rnhead->rnh_nodes; + } + if (skip > 1) { Bcopy(rn_ones + 1, addmask_key + 1, skip - 1); - if ((m0 = mlen) > skip) + } + if ((m0 = mlen) > skip) { Bcopy(netmask + skip, addmask_key + skip, mlen - skip); + } /* * Trim trailing zeroes. */ - for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) + for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;) { cp--; + } mlen = cp - addmask_key; if (mlen <= skip) { - if (m0 >= last_zeroed) + if (m0 >= last_zeroed) { last_zeroed = mlen; - return (mask_rnhead->rnh_nodes); + } + return mask_rnhead->rnh_nodes; } - if (m0 < last_zeroed) + if (m0 < last_zeroed) { Bzero(addmask_key + m0, last_zeroed - m0); + } *addmask_key = last_zeroed = mlen; x = rn_search(addmask_key, rn_masktop); - if (Bcmp(addmask_key, x->rn_key, mlen) != 0) + if (Bcmp(addmask_key, x->rn_key, mlen) != 0) { x = NULL; - if (x || search) - return (x); - R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x)); - if ((saved_x = x) == 0) - return (NULL); - Bzero(x, max_keylen + 2 * sizeof (*x)); + } + if (x || search) { + return x; + } + R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof(*x)); + if ((saved_x = x) == 0) { + return NULL; + } + Bzero(x, max_keylen + 2 * sizeof(*x)); netmask = cp = (caddr_t)(x + 2); Bcopy(addmask_key, cp, mlen); x = rn_insert(cp, mask_rnhead, &maskduplicated, x); if (maskduplicated) { log(LOG_ERR, "rn_addmask: mask impossibly already in tree"); R_Free(saved_x); - return (x); + return x; } mask_rnhead->rnh_cnt++; /* * Calculate index of mask, and check for normalcy. */ cplim = netmask + mlen; isnormal = 1; - for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) + for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;) { cp++; + } if (cp != cplim) { - for (j = 0x80; (j & *cp) != 0; j >>= 1) + for (j = 0x80; (j & *cp) != 0; j >>= 1) { b++; - if (*cp != normal_chars[b] || cp != (cplim - 1)) + } + if (*cp != normal_chars[b] || cp != (cplim - 1)) { isnormal = 0; + } } b += (cp - netmask) << 3; x->rn_bit = -1 - b; - if (isnormal) + if (isnormal) { x->rn_flags |= RNF_NORMAL; - return (x); + } + return x; } -static int /* XXX: arbitrary ordering for non-contiguous masks */ +static int +/* XXX: arbitrary ordering for non-contiguous masks */ rn_lexobetter(void *m_arg, void *n_arg) { u_char *mp = m_arg, *np = n_arg, *lim; - if (*mp > *np) + if (*mp > *np) { return 1; /* not really, but need to check longer one first */ - if (*mp == *np) - for (lim = mp + *mp; mp < lim;) - if (*mp++ > *np++) + } + if (*mp == *np) { + for (lim = mp + *mp; mp < lim;) { + if (*mp++ > *np++) { return 1; + } + } + } return 0; } @@ -573,15 +626,16 @@ rn_new_radix_mask(struct radix_node *tt, struct radix_mask *next) MKGet(m); if (m == 0) { log(LOG_ERR, "Mask for route not entered\n"); - return (NULL); + return NULL; } Bzero(m, sizeof *m); m->rm_bit = tt->rn_bit; m->rm_flags = tt->rn_flags; - if (tt->rn_flags & RNF_NORMAL) + if (tt->rn_flags & RNF_NORMAL) { m->rm_leaf = tt; - else + } else { m->rm_mask = tt->rn_mask; + } m->rm_mklist = next; tt->rn_mklist = m; return m; @@ -589,7 +643,7 @@ rn_new_radix_mask(struct radix_node *tt, struct radix_mask *next) struct radix_node * rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, - struct radix_node treenodes[2]) + struct radix_node treenodes[2]) { caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg; struct radix_node *t, *x = NULL, *tt; @@ -606,9 +660,10 @@ rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, * the mask to speed avoiding duplicate references at * nodes and possibly save time in calculating indices. */ - if (netmask) { - if ((x = rn_addmask(netmask, 0, top->rn_offset)) == 0) - return (NULL); + if (netmask) { + if ((x = rn_addmask(netmask, 0, top->rn_offset)) == 0) { + return NULL; + } b_leaf = x->rn_bit; b = -1 - x->rn_bit; netmask = x->rn_key; @@ -619,14 +674,16 @@ rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes); if (keyduplicated) { for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) { - if (tt->rn_mask == netmask) - return (NULL); + if (tt->rn_mask == netmask) { + return NULL; + } if (netmask == 0 || (tt->rn_mask && - ((b_leaf < tt->rn_bit) /* index(netmask) > node */ - || rn_refines(netmask, tt->rn_mask) - || rn_lexobetter(netmask, tt->rn_mask)))) + ((b_leaf < tt->rn_bit) /* index(netmask) > node */ + || rn_refines(netmask, tt->rn_mask) + || rn_lexobetter(netmask, tt->rn_mask)))) { break; + } } /* * If the mask is not duplicated, we wouldn't @@ -639,26 +696,28 @@ rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, * the head of the list. */ if (tt == saved_tt) { - struct radix_node *xx = x; + struct radix_node *xx = x; /* link in at head of list */ (tt = treenodes)->rn_dupedkey = t; tt->rn_flags = t->rn_flags; tt->rn_parent = x = t->rn_parent; - t->rn_parent = tt; /* parent */ - if (x->rn_left == t) + t->rn_parent = tt; /* parent */ + if (x->rn_left == t) { x->rn_left = tt; - else + } else { x->rn_right = tt; + } saved_tt = tt; x = xx; } else { (tt = treenodes)->rn_dupedkey = t->rn_dupedkey; t->rn_dupedkey = tt; - tt->rn_parent = t; /* parent */ - if (tt->rn_dupedkey) /* parent */ + tt->rn_parent = t; /* parent */ + if (tt->rn_dupedkey) { /* parent */ tt->rn_dupedkey->rn_parent = tt; /* parent */ + } } #ifdef RN_DEBUG - t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; + t = tt + 1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++; tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt; #endif tt->rn_key = (caddr_t) v; @@ -675,34 +734,41 @@ rn_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, tt->rn_flags |= x->rn_flags & RNF_NORMAL; } t = saved_tt->rn_parent; - if (keyduplicated) + if (keyduplicated) { goto on2; + } b_leaf = -1 - t->rn_bit; - if (t->rn_right == saved_tt) + if (t->rn_right == saved_tt) { x = t->rn_left; - else + } else { x = t->rn_right; + } /* Promote general routes from below */ if (x->rn_bit < 0) { - for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) - if (x->rn_mask && (x->rn_bit >= b_leaf) && x->rn_mklist == 0) { - *mp = m = rn_new_radix_mask(x, NULL); - if (m) - mp = &m->rm_mklist; + for (mp = &t->rn_mklist; x; x = x->rn_dupedkey) { + if (x->rn_mask && (x->rn_bit >= b_leaf) && x->rn_mklist == 0) { + *mp = m = rn_new_radix_mask(x, NULL); + if (m) { + mp = &m->rm_mklist; + } + } } } else if (x->rn_mklist) { /* * Skip over masks whose index is > that of new node */ - for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) - if (m->rm_bit >= b_leaf) + for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { + if (m->rm_bit >= b_leaf) { break; + } + } t->rn_mklist = m; *mp = NULL; } on2: /* Add new route to highest possible ancestor's list */ - if ((netmask == 0) || (b > t->rn_bit )) + if ((netmask == 0) || (b > t->rn_bit)) { return tt; /* can't lift at all */ + } b_leaf = tt->rn_bit; do { x = t; @@ -715,27 +781,31 @@ on2: * double loop on deletion. */ for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { - if (m->rm_bit < b_leaf) + if (m->rm_bit < b_leaf) { continue; - if (m->rm_bit > b_leaf) + } + if (m->rm_bit > b_leaf) { break; + } if (m->rm_flags & RNF_NORMAL) { mmask = m->rm_leaf->rn_mask; if (tt->rn_flags & RNF_NORMAL) { - log(LOG_ERR, - "Non-unique normal route, mask not entered"); + log(LOG_ERR, + "Non-unique normal route, mask not entered"); return tt; } - } else + } else { mmask = m->rm_mask; + } if (mmask == netmask) { m->rm_refs++; tt->rn_mklist = m; return tt; } if (rn_refines(netmask, mmask) - || rn_lexobetter(netmask, mmask)) + || rn_lexobetter(netmask, mmask)) { break; + } } *mp = rn_new_radix_mask(tt, *mp); return tt; @@ -759,21 +829,26 @@ rn_delete(void *v_arg, void *netmask_arg, struct radix_node_head *head) saved_tt = tt; top = x; if (tt == 0 || - Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) - return (NULL); + Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off)) { + return NULL; + } /* * Delete our route from mask lists. */ if (netmask) { - if ((x = rn_addmask(netmask, 1, head_off)) == 0) - return (NULL); + if ((x = rn_addmask(netmask, 1, head_off)) == 0) { + return NULL; + } netmask = x->rn_key; - while (tt->rn_mask != netmask) - if ((tt = tt->rn_dupedkey) == 0) - return (NULL); + while (tt->rn_mask != netmask) { + if ((tt = tt->rn_dupedkey) == 0) { + return NULL; + } + } } - if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) + if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0) { goto on1; + } if (tt->rn_flags & RNF_NORMAL) { if (m->rm_leaf != tt || m->rm_refs > 0) { log(LOG_ERR, "rn_delete: inconsistent annotation\n"); @@ -784,39 +859,47 @@ rn_delete(void *v_arg, void *netmask_arg, struct radix_node_head *head) log(LOG_ERR, "rn_delete: inconsistent annotation\n"); goto on1; } - if (--m->rm_refs >= 0) + if (--m->rm_refs >= 0) { goto on1; + } } b = -1 - tt->rn_bit; t = saved_tt->rn_parent; - if (b > t->rn_bit) + if (b > t->rn_bit) { goto on1; /* Wasn't lifted at all */ + } do { x = t; t = t->rn_parent; } while (b <= t->rn_bit && x != top); - for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) + for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) { if (m == saved_m) { *mp = m->rm_mklist; MKFree(m); break; } + } if (m == 0) { log(LOG_ERR, "rn_delete: couldn't find our annotation\n"); - if (tt->rn_flags & RNF_NORMAL) - return (NULL); /* Dangling ref to us */ + if (tt->rn_flags & RNF_NORMAL) { + return NULL; /* Dangling ref to us */ + } } on1: /* * Eliminate us from tree */ - if (tt->rn_flags & RNF_ROOT) - return (NULL); + if (tt->rn_flags & RNF_ROOT) { + return NULL; + } head->rnh_cnt--; #ifdef RN_DEBUG /* Get us out of the creation list */ - for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {} - if (t) t->rn_ybro = tt->rn_ybro; + for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) { + } + if (t) { + t->rn_ybro = tt->rn_ybro; + } #endif t = tt->rn_parent; dupedkey = saved_tt->rn_dupedkey; @@ -828,23 +911,28 @@ on1: if (tt == saved_tt) { /* remove from head of chain */ x = dupedkey; x->rn_parent = t; - if (t->rn_left == tt) + if (t->rn_left == tt) { t->rn_left = x; - else + } else { t->rn_right = x; + } } else { /* find node in front of tt on the chain */ - for (x = p = saved_tt; p && p->rn_dupedkey != tt;) + for (x = p = saved_tt; p && p->rn_dupedkey != tt;) { p = p->rn_dupedkey; + } if (p) { p->rn_dupedkey = tt->rn_dupedkey; - if (tt->rn_dupedkey) /* parent */ + if (tt->rn_dupedkey) { /* parent */ tt->rn_dupedkey->rn_parent = p; - /* parent */ - } else log(LOG_ERR, "rn_delete: couldn't find us\n"); + } + /* parent */ + } else { + log(LOG_ERR, "rn_delete: couldn't find us\n"); + } } t = tt + 1; - if (t->rn_flags & RNF_ACTIVE) { + if (t->rn_flags & RNF_ACTIVE) { #ifndef RN_DEBUG *++x = *t; p = t->rn_parent; @@ -854,50 +942,57 @@ on1: t->rn_info = b; p = t->rn_parent; #endif - if (p->rn_left == t) + if (p->rn_left == t) { p->rn_left = x; - else + } else { p->rn_right = x; + } x->rn_left->rn_parent = x; x->rn_right->rn_parent = x; } goto out; } - if (t->rn_left == tt) + if (t->rn_left == tt) { x = t->rn_right; - else + } else { x = t->rn_left; + } p = t->rn_parent; - if (p->rn_right == t) + if (p->rn_right == t) { p->rn_right = x; - else + } else { p->rn_left = x; + } x->rn_parent = p; /* * Demote routes attached to us. */ if (t->rn_mklist) { if (x->rn_bit >= 0) { - for (mp = &x->rn_mklist; (m = *mp);) + for (mp = &x->rn_mklist; (m = *mp);) { mp = &m->rm_mklist; + } *mp = t->rn_mklist; } else { /* If there are any key,mask pairs in a sibling - duped-key chain, some subset will appear sorted - in the same order attached to our mklist */ - for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) + * duped-key chain, some subset will appear sorted + * in the same order attached to our mklist */ + for (m = t->rn_mklist; m && x; x = x->rn_dupedkey) { if (m == x->rn_mklist) { struct radix_mask *mm = m->rm_mklist; x->rn_mklist = NULL; - if (--(m->rm_refs) < 0) + if (--(m->rm_refs) < 0) { MKFree(m); + } m = mm; } - if (m) + } + if (m) { log(LOG_ERR, "rn_delete: Orphaned Mask " "0x%llx at 0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(m), (uint64_t)VM_KERNEL_ADDRPERM(x)); + } } } /* @@ -915,15 +1010,16 @@ on1: t->rn_left->rn_parent = t; t->rn_right->rn_parent = t; p = x->rn_parent; - if (p->rn_left == x) + if (p->rn_left == x) { p->rn_left = t; - else + } else { p->rn_right = t; + } } out: tt->rn_flags &= ~RNF_ACTIVE; tt[1].rn_flags &= ~RNF_ACTIVE; - return (tt); + return tt; } /* @@ -962,15 +1058,17 @@ restart: /* * rn_search_m is sort-of-open-coded here. */ - for (rn = h->rnh_treetop; rn->rn_bit >= 0; ) { + for (rn = h->rnh_treetop; rn->rn_bit >= 0;) { last = rn; - if (!(rn->rn_bmask & xm[rn->rn_offset])) + if (!(rn->rn_bmask & xm[rn->rn_offset])) { break; + } - if (rn->rn_bmask & xa[rn->rn_offset]) + if (rn->rn_bmask & xa[rn->rn_offset]) { rn = rn->rn_right; - else + } else { rn = rn->rn_left; + } } /* @@ -983,14 +1081,15 @@ restart: lastb = rn->rn_bit; /* First time through node, go left */ - while (rn->rn_bit >= 0) + while (rn->rn_bit >= 0) { rn = rn->rn_left; + } while (!stopping) { base = rn; /* If at right child go back up, otherwise, go right */ while (rn->rn_parent->rn_right == rn - && !(rn->rn_flags & RNF_ROOT)) { + && !(rn->rn_flags & RNF_ROOT)) { rn = rn->rn_parent; /* if went up beyond last, stop */ @@ -1030,27 +1129,32 @@ restart: * half, prevent the traversal of the entire tree in the * case of default route. */ - if (rn->rn_parent->rn_flags & RNF_ROOT) + if (rn->rn_parent->rn_flags & RNF_ROOT) { stopping = 1; + } #endif /* Find the next *leaf* to start from */ - for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;) + for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;) { rn = rn->rn_left; + } next = rn; /* Process leaves */ while ((rn = base) != 0) { base = rn->rn_dupedkey; if (!(rn->rn_flags & RNF_ROOT) - && (error = (*f)(rn, w))) - return (error); + && (error = (*f)(rn, w))) { + return error; + } } /* If one or more nodes got deleted, restart from top */ - if (h->rnh_cnt < rnh_cnt) + if (h->rnh_cnt < rnh_cnt) { goto restart; + } rn = next; - if (rn->rn_flags & RNF_ROOT) + if (rn->rn_flags & RNF_ROOT) { stopping = 1; + } } return 0; } @@ -1079,31 +1183,37 @@ restart: rnh_cnt = h->rnh_cnt; /* First time through node, go left */ - while (rn->rn_bit >= 0) + while (rn->rn_bit >= 0) { rn = rn->rn_left; + } for (;;) { base = rn; /* If at right child go back up, otherwise, go right */ while (rn->rn_parent->rn_right == rn && - (rn->rn_flags & RNF_ROOT) == 0) + (rn->rn_flags & RNF_ROOT) == 0) { rn = rn->rn_parent; + } /* Find the next *leaf* to start from */ - for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;) + for (rn = rn->rn_parent->rn_right; rn->rn_bit >= 0;) { rn = rn->rn_left; + } next = rn; /* Process leaves */ while ((rn = base) != NULL) { base = rn->rn_dupedkey; if (!(rn->rn_flags & RNF_ROOT) - && (error = (*f)(rn, w))) - return (error); + && (error = (*f)(rn, w))) { + return error; + } } /* If one or more nodes got deleted, restart from top */ - if (h->rnh_cnt < rnh_cnt) + if (h->rnh_cnt < rnh_cnt) { goto restart; + } rn = next; - if (rn->rn_flags & RNF_ROOT) - return (0); + if (rn->rn_flags & RNF_ROOT) { + return 0; + } } /* NOTREACHED */ } @@ -1113,12 +1223,14 @@ rn_inithead(void **head, int off) { struct radix_node_head *rnh; struct radix_node *t, *tt, *ttt; - if (*head) - return (1); - R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh)); - if (rnh == 0) - return (0); - Bzero(rnh, sizeof (*rnh)); + if (*head) { + return 1; + } + R_Malloc(rnh, struct radix_node_head *, sizeof(*rnh)); + if (rnh == 0) { + return 0; + } + Bzero(rnh, sizeof(*rnh)); *head = rnh; t = rn_newpair(rn_zeros, off, rnh->rnh_nodes); ttt = rnh->rnh_nodes + 2; @@ -1139,7 +1251,7 @@ rn_inithead(void **head, int off) rnh->rnh_walktree_from = rn_walktree_from; rnh->rnh_treetop = t; rnh->rnh_cnt = 3; - return (1); + return 1; } void @@ -1150,8 +1262,9 @@ rn_init(void) /* lock already held when rn_init is called */ TAILQ_FOREACH(dom, &domains, dom_entry) { - if (dom->dom_maxrtkey > max_keylen) + if (dom->dom_maxrtkey > max_keylen) { max_keylen = dom->dom_maxrtkey; + } } if (max_keylen == 0) { log(LOG_ERR, @@ -1159,13 +1272,16 @@ rn_init(void) return; } R_Malloc(rn_zeros, char *, 3 * max_keylen); - if (rn_zeros == NULL) + if (rn_zeros == NULL) { panic("rn_init"); + } Bzero(rn_zeros, 3 * max_keylen); rn_ones = cp = rn_zeros + max_keylen; addmask_key = cplim = rn_ones + max_keylen; - while (cp < cplim) + while (cp < cplim) { *cp++ = -1; - if (rn_inithead((void **)&mask_rnhead, 0) == 0) + } + if (rn_inithead((void **)&mask_rnhead, 0) == 0) { panic("rn_init 2"); + } } diff --git a/bsd/net/radix.h b/bsd/net/radix.h index 78f251b22..1bf016010 100644 --- a/bsd/net/radix.h +++ b/bsd/net/radix.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -62,7 +62,7 @@ */ #ifndef _RADIX_H_ -#define _RADIX_H_ +#define _RADIX_H_ #include #ifdef PRIVATE @@ -76,26 +76,26 @@ MALLOC_DECLARE(M_RTABLE); */ struct radix_node { - struct radix_mask *rn_mklist; /* list of masks contained in subtree */ - struct radix_node *rn_parent; /* parent */ - short rn_bit; /* bit offset; -1-index(netmask) */ - char rn_bmask; /* node: mask for bit test*/ - u_char rn_flags; /* enumerated next */ -#define RNF_NORMAL 1 /* leaf contains normal route */ -#define RNF_ROOT 2 /* leaf is root leaf for tree */ -#define RNF_ACTIVE 4 /* This node is alive (for rtfree) */ + struct radix_mask *rn_mklist; /* list of masks contained in subtree */ + struct radix_node *rn_parent; /* parent */ + short rn_bit; /* bit offset; -1-index(netmask) */ + char rn_bmask; /* node: mask for bit test*/ + u_char rn_flags; /* enumerated next */ +#define RNF_NORMAL 1 /* leaf contains normal route */ +#define RNF_ROOT 2 /* leaf is root leaf for tree */ +#define RNF_ACTIVE 4 /* This node is alive (for rtfree) */ union { - struct { /* leaf only data: */ - caddr_t rn_Key; /* object of search */ - caddr_t rn_Mask; /* netmask, if present */ - struct radix_node *rn_Dupedkey; + struct { /* leaf only data: */ + caddr_t rn_Key; /* object of search */ + caddr_t rn_Mask; /* netmask, if present */ + struct radix_node *rn_Dupedkey; } rn_leaf; - struct { /* node only data: */ - int rn_Off; /* where to start compare */ - struct radix_node *rn_L;/* progeny */ - struct radix_node *rn_R;/* progeny */ + struct { /* node only data: */ + int rn_Off; /* where to start compare */ + struct radix_node *rn_L;/* progeny */ + struct radix_node *rn_R;/* progeny */ } rn_node; - } rn_u; + } rn_u; #ifdef RN_DEBUG int rn_info; struct radix_node *rn_twin; @@ -112,39 +112,39 @@ struct radix_node { }; #endif -#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey -#define rn_key rn_u.rn_leaf.rn_Key -#define rn_mask rn_u.rn_leaf.rn_Mask -#define rn_offset rn_u.rn_node.rn_Off -#define rn_left rn_u.rn_node.rn_L -#define rn_right rn_u.rn_node.rn_R +#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey +#define rn_key rn_u.rn_leaf.rn_Key +#define rn_mask rn_u.rn_leaf.rn_Mask +#define rn_offset rn_u.rn_node.rn_Off +#define rn_left rn_u.rn_node.rn_L +#define rn_right rn_u.rn_node.rn_R /* * Annotations to tree concerning potential routes applying to subtrees. */ struct radix_mask { - short rm_bit; /* bit offset; -1-index(netmask) */ - char rm_unused; /* cf. rn_bmask */ - u_char rm_flags; /* cf. rn_flags */ - struct radix_mask *rm_mklist; /* more masks to try */ - union { - caddr_t rmu_mask; /* the mask */ - struct radix_node *rmu_leaf; /* for normal routes */ - } rm_rmu; - int rm_refs; /* # of references to this struct */ + short rm_bit; /* bit offset; -1-index(netmask) */ + char rm_unused; /* cf. rn_bmask */ + u_char rm_flags; /* cf. rn_flags */ + struct radix_mask *rm_mklist; /* more masks to try */ + union { + caddr_t rmu_mask; /* the mask */ + struct radix_node *rmu_leaf; /* for normal routes */ + } rm_rmu; + int rm_refs; /* # of references to this struct */ }; -#define rm_mask rm_rmu.rmu_mask -#define rm_leaf rm_rmu.rmu_leaf /* extra field would make 32 bytes */ +#define rm_mask rm_rmu.rmu_mask +#define rm_leaf rm_rmu.rmu_leaf /* extra field would make 32 bytes */ #define MKGet(m) {\ if (rn_mkfreelist) {\ - m = rn_mkfreelist; \ - rn_mkfreelist = (m)->rm_mklist; \ + m = rn_mkfreelist; \ + rn_mkfreelist = (m)->rm_mklist; \ } else \ - R_Malloc(m, struct radix_mask *, sizeof (*(m))); }\ + R_Malloc(m, struct radix_mask *, sizeof (*(m))); }\ #define MKFree(m) { (m)->rm_mklist = rn_mkfreelist; rn_mkfreelist = (m);} @@ -152,42 +152,42 @@ typedef int walktree_f_t(struct radix_node *, void *); typedef int rn_matchf_t(struct radix_node *, void *); struct radix_node_head { - struct radix_node *rnh_treetop; - int rnh_addrsize; /* permit, but not require fixed keys */ - int rnh_pktsize; /* permit, but not require fixed keys */ - struct radix_node *(*rnh_addaddr) /* add based on sockaddr */ - (void *v, void *mask, - struct radix_node_head *head, struct radix_node nodes[]); - struct radix_node *(*rnh_addpkt) /* add based on packet hdr */ - (void *v, void *mask, - struct radix_node_head *head, struct radix_node nodes[]); - struct radix_node *(*rnh_deladdr) /* remove based on sockaddr */ - (void *v, void *mask, struct radix_node_head *head); - struct radix_node *(*rnh_delpkt) /* remove based on packet hdr */ - (void *v, void *mask, struct radix_node_head *head); - struct radix_node *(*rnh_matchaddr) /* locate based on sockaddr */ - (void *v, struct radix_node_head *head); + struct radix_node *rnh_treetop; + int rnh_addrsize; /* permit, but not require fixed keys */ + int rnh_pktsize; /* permit, but not require fixed keys */ + struct radix_node *(*rnh_addaddr) /* add based on sockaddr */ + (void *v, void *mask, + struct radix_node_head *head, struct radix_node nodes[]); + struct radix_node *(*rnh_addpkt) /* add based on packet hdr */ + (void *v, void *mask, + struct radix_node_head *head, struct radix_node nodes[]); + struct radix_node *(*rnh_deladdr) /* remove based on sockaddr */ + (void *v, void *mask, struct radix_node_head *head); + struct radix_node *(*rnh_delpkt) /* remove based on packet hdr */ + (void *v, void *mask, struct radix_node_head *head); + struct radix_node *(*rnh_matchaddr) /* locate based on sockaddr */ + (void *v, struct radix_node_head *head); /* locate based on sockaddr and rn_matchf_t() */ - struct radix_node *(*rnh_matchaddr_args) - (void *v, struct radix_node_head *head, - rn_matchf_t *f, void *w); - struct radix_node *(*rnh_lookup) /* locate based on sockaddr */ - (void *v, void *mask, struct radix_node_head *head); + struct radix_node *(*rnh_matchaddr_args) + (void *v, struct radix_node_head *head, + rn_matchf_t *f, void *w); + struct radix_node *(*rnh_lookup) /* locate based on sockaddr */ + (void *v, void *mask, struct radix_node_head *head); /* locate based on sockaddr, mask and rn_matchf_t() */ - struct radix_node *(*rnh_lookup_args) - (void *v, void *mask, struct radix_node_head *head, - rn_matchf_t *f, void *); - struct radix_node *(*rnh_matchpkt) /* locate based on packet hdr */ - (void *v, struct radix_node_head *head); - int (*rnh_walktree) /* traverse tree */ - (struct radix_node_head *head, walktree_f_t *f, void *w); - int (*rnh_walktree_from) /* traverse tree below a */ - (struct radix_node_head *head, void *a, void *m, - walktree_f_t *f, void *w); - void (*rnh_close) /* do something when the last ref drops */ - (struct radix_node *rn, struct radix_node_head *head); - struct radix_node rnh_nodes[3]; /* empty tree for common case */ - int rnh_cnt; /* tree dimension */ + struct radix_node *(*rnh_lookup_args) + (void *v, void *mask, struct radix_node_head *head, + rn_matchf_t *f, void *); + struct radix_node *(*rnh_matchpkt) /* locate based on packet hdr */ + (void *v, struct radix_node_head *head); + int (*rnh_walktree) /* traverse tree */ + (struct radix_node_head *head, walktree_f_t *f, void *w); + int (*rnh_walktree_from) /* traverse tree below a */ + (struct radix_node_head *head, void *a, void *m, + walktree_f_t *f, void *w); + void (*rnh_close) /* do something when the last ref drops */ + (struct radix_node *rn, struct radix_node_head *head); + struct radix_node rnh_nodes[3]; /* empty tree for common case */ + int rnh_cnt; /* tree dimension */ }; #ifndef KERNEL @@ -201,22 +201,22 @@ struct radix_node_head { #define Bcopy(a, b, n) bcopy(((caddr_t)(a)), ((caddr_t)(b)), (unsigned)(n)) #define Bzero(p, n) bzero((caddr_t)(p), (unsigned)(n)); #define R_Malloc(p, t, n) (p = (t) _MALLOC((uint32_t)(n), M_RTABLE, M_WAITOK)) -#define R_Free(p) FREE((caddr_t)p, M_RTABLE); +#define R_Free(p) _FREE((caddr_t)p, M_RTABLE); #endif /*KERNEL*/ -void rn_init(void); -int rn_inithead(void **, int); -int rn_refines(void *, void *); +void rn_init(void); +int rn_inithead(void **, int); +int rn_refines(void *, void *); struct radix_node - *rn_addmask(void *, int, int), - *rn_addroute(void *, void *, struct radix_node_head *, - struct radix_node [2]), - *rn_delete(void *, void *, struct radix_node_head *), - *rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head), - *rn_lookup_args(void *v_arg, void *m_arg, struct radix_node_head *head, - rn_matchf_t *, void *), - *rn_match(void *, struct radix_node_head *), - *rn_match_args(void *, struct radix_node_head *, rn_matchf_t *, void *); +*rn_addmask(void *, int, int), +*rn_addroute(void *, void *, struct radix_node_head *, + struct radix_node [2]), +*rn_delete(void *, void *, struct radix_node_head *), +*rn_lookup(void *v_arg, void *m_arg, struct radix_node_head *head), +*rn_lookup_args(void *v_arg, void *m_arg, struct radix_node_head *head, + rn_matchf_t *, void *), +*rn_match(void *, struct radix_node_head *), +*rn_match_args(void *, struct radix_node_head *, rn_matchf_t *, void *); #endif /* PRIVATE */ #endif /* _RADIX_H_ */ diff --git a/bsd/net/raw_cb.c b/bsd/net/raw_cb.c index 2aaaeeb4b..9fbbd01ce 100644 --- a/bsd/net/raw_cb.c +++ b/bsd/net/raw_cb.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -81,9 +81,9 @@ struct rawcb_list_head rawcb_list; -static uint32_t raw_sendspace = RAWSNDQ; -static uint32_t raw_recvspace = RAWRCVQ; -extern lck_mtx_t *raw_mtx; /*### global raw cb mutex for now */ +static uint32_t raw_sendspace = RAWSNDQ; +static uint32_t raw_recvspace = RAWRCVQ; +extern lck_mtx_t *raw_mtx; /*### global raw cb mutex for now */ /* * Allocate a control block and a nominal amount @@ -100,18 +100,20 @@ raw_attach(struct socket *so, int proto) * after space has been allocated for the * rawcb. */ - if (rp == 0) - return (ENOBUFS); + if (rp == 0) { + return ENOBUFS; + } error = soreserve(so, raw_sendspace, raw_recvspace); - if (error) - return (error); + if (error) { + return error; + } rp->rcb_socket = so; rp->rcb_proto.sp_family = SOCK_DOM(so); rp->rcb_proto.sp_protocol = proto; lck_mtx_lock(raw_mtx); LIST_INSERT_HEAD(&rawcb_list, rp, list); lck_mtx_unlock(raw_mtx); - return (0); + return 0; } /* @@ -134,12 +136,13 @@ raw_detach(struct rawcb *rp) LIST_REMOVE(rp, list); lck_mtx_unlock(raw_mtx); #ifdef notdef - if (rp->rcb_laddr) + if (rp->rcb_laddr) { m_freem(dtom(rp->rcb_laddr)); + } rp->rcb_laddr = 0; #endif rp->rcb_socket = NULL; - FREE((caddr_t)(rp), M_PCB); + FREE(rp, M_PCB); } /* @@ -151,8 +154,9 @@ raw_disconnect(struct rawcb *rp) struct socket *so = rp->rcb_socket; #ifdef notdef - if (rp->rcb_faddr) + if (rp->rcb_faddr) { m_freem(dtom(rp->rcb_faddr)); + } rp->rcb_faddr = 0; #endif /* @@ -160,8 +164,9 @@ raw_disconnect(struct rawcb *rp) * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB; * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared. */ - if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) + if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) { raw_detach(rp); + } } #ifdef notdef @@ -173,13 +178,15 @@ raw_bind(struct socket *so, struct mbuf *nam) struct sockaddr *addr = mtod(nam, struct sockaddr *); struct rawcb *rp; - if (ifnet == 0) - return (EADDRNOTAVAIL); + if (ifnet == 0) { + return EADDRNOTAVAIL; + } rp = sotorawcb(so); nam = m_copym(nam, 0, M_COPYALL, M_WAITOK); - if (nam == NULL) + if (nam == NULL) { return ENOBUFS; + } rp->rcb_laddr = mtod(nam, struct sockaddr *); - return (0); + return 0; } #endif diff --git a/bsd/net/raw_cb.h b/bsd/net/raw_cb.h index 39a63716c..c217a5880 100644 --- a/bsd/net/raw_cb.h +++ b/bsd/net/raw_cb.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -71,20 +71,20 @@ */ struct rawcb { LIST_ENTRY(rawcb) list; - struct socket *rcb_socket; /* back pointer to socket */ - struct sockaddr *rcb_faddr; /* destination address */ - struct sockaddr *rcb_laddr; /* socket's address */ - struct sockproto rcb_proto; /* protocol family, protocol */ - uint32_t reserved[4]; /* for future use */ + struct socket *rcb_socket; /* back pointer to socket */ + struct sockaddr *rcb_faddr; /* destination address */ + struct sockaddr *rcb_laddr; /* socket's address */ + struct sockproto rcb_proto; /* protocol family, protocol */ + uint32_t reserved[4]; /* for future use */ }; -#define sotorawcb(so) ((struct rawcb *)(so)->so_pcb) +#define sotorawcb(so) ((struct rawcb *)(so)->so_pcb) /* * Nominal space allocated to a raw socket. */ -#define RAWSNDQ 8192 -#define RAWRCVQ 8192 +#define RAWSNDQ 8192 +#define RAWRCVQ 8192 extern LIST_HEAD(rawcb_list_head, rawcb) rawcb_list; @@ -98,6 +98,6 @@ extern void raw_input(struct mbuf *, struct sockproto *, struct sockaddr *, struct sockaddr *); __END_DECLS -extern struct pr_usrreqs raw_usrreqs; +extern struct pr_usrreqs raw_usrreqs; #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NET_RAW_CB_H_ */ diff --git a/bsd/net/raw_usrreq.c b/bsd/net/raw_usrreq.c index 462842129..eb8521ed7 100644 --- a/bsd/net/raw_usrreq.c +++ b/bsd/net/raw_usrreq.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -73,11 +73,11 @@ #include -decl_lck_mtx_data(,raw_mtx_data); /*### global raw cb mutex for now */ -lck_mtx_t *raw_mtx = &raw_mtx_data; -lck_attr_t *raw_mtx_attr; -lck_grp_t *raw_mtx_grp; -lck_grp_attr_t *raw_mtx_grp_attr; +decl_lck_mtx_data(, raw_mtx_data); /*### global raw cb mutex for now */ +lck_mtx_t *raw_mtx = &raw_mtx_data; +lck_attr_t *raw_mtx_attr; +lck_grp_t *raw_mtx_grp; +lck_grp_attr_t *raw_mtx_grp_attr; /* * Initialize raw connection block q. */ @@ -111,7 +111,7 @@ raw_init(struct protosw *pp, struct domain *dp) */ void raw_input(struct mbuf *m0, struct sockproto *proto, struct sockaddr *src, - struct sockaddr *dst) + struct sockaddr *dst) { struct rawcb *rp; struct mbuf *m = m0; @@ -119,18 +119,20 @@ raw_input(struct mbuf *m0, struct sockproto *proto, struct sockaddr *src, struct socket *last; int error; -//####LD raw_input is called from many places, input & output path. We have to assume the +//####LD raw_input is called from many places, input & output path. We have to assume the //####LD socket we'll find and need to append to is unlocked. //####LD calls from the output (locked) path need to make sure the socket is not locked when //####LD we call in raw_input last = NULL; lck_mtx_lock(raw_mtx); LIST_FOREACH(rp, &rawcb_list, list) { - if (rp->rcb_proto.sp_family != proto->sp_family) + if (rp->rcb_proto.sp_family != proto->sp_family) { continue; - if (rp->rcb_proto.sp_protocol && - rp->rcb_proto.sp_protocol != proto->sp_protocol) + } + if (rp->rcb_proto.sp_protocol && + rp->rcb_proto.sp_protocol != proto->sp_protocol) { continue; + } /* * We assume the lower level routines have * placed the address in a canonical format @@ -139,12 +141,14 @@ raw_input(struct mbuf *m0, struct sockproto *proto, struct sockaddr *src, * Note that if the lengths are not the same * the comparison will fail at the first byte. */ -#define equal(a1, a2) \ +#define equal(a1, a2) \ (bcmp((caddr_t)(a1), (caddr_t)(a2), a1->sa_len) == 0) - if (rp->rcb_laddr && !equal(rp->rcb_laddr, dst)) + if (rp->rcb_laddr && !equal(rp->rcb_laddr, dst)) { continue; - if (rp->rcb_faddr && !equal(rp->rcb_faddr, src)) + } + if (rp->rcb_faddr && !equal(rp->rcb_faddr, src)) { continue; + } if (last) { struct mbuf *n; n = m_copy(m, 0, (int)M_COPYALL); @@ -168,8 +172,9 @@ raw_input(struct mbuf *m0, struct sockproto *proto, struct sockaddr *src, sockets++; } socket_unlock(last, 1); - } else + } else { m_freem(m); + } lck_mtx_unlock(raw_mtx); } @@ -178,9 +183,9 @@ void raw_ctlinput(int cmd, __unused struct sockaddr *arg, __unused void *dummy, __unused struct ifnet *ifp) { - - if (cmd < 0 || cmd >= PRC_NCMDS) + if (cmd < 0 || cmd >= PRC_NCMDS) { return; + } /* INCOMPLETE */ } @@ -190,14 +195,16 @@ raw_uabort(struct socket *so) struct rawcb *rp = sotorawcb(so); lck_mtx_t * mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); - if (rp == 0) + if (rp == 0) { return EINVAL; + } raw_disconnect(rp); sofree(so); soisdisconnected(so); @@ -211,10 +218,12 @@ raw_uattach(struct socket *so, int proto, __unused struct proc *p) { struct rawcb *rp = sotorawcb(so); - if (rp == 0) + if (rp == 0) { return EINVAL; - if ((so->so_state & SS_PRIV) == 0) - return (EPERM); + } + if ((so->so_state & SS_PRIV) == 0) { + return EPERM; + } return raw_attach(so, proto); } @@ -239,13 +248,15 @@ raw_udetach(struct socket *so) struct rawcb *rp = sotorawcb(so); lck_mtx_t * mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); - if (rp == 0) + if (rp == 0) { return EINVAL; + } raw_detach(rp); return 0; @@ -256,8 +267,9 @@ raw_udisconnect(struct socket *so) { struct rawcb *rp = sotorawcb(so); - if (rp == 0) + if (rp == 0) { return EINVAL; + } if (rp->rcb_faddr == 0) { return ENOTCONN; } @@ -273,8 +285,9 @@ raw_upeeraddr(struct socket *so, struct sockaddr **nam) { struct rawcb *rp = sotorawcb(so); - if (rp == 0) + if (rp == 0) { return EINVAL; + } if (rp->rcb_faddr == 0) { return ENOTCONN; } @@ -287,16 +300,17 @@ raw_upeeraddr(struct socket *so, struct sockaddr **nam) static int raw_usend(struct socket *so, int flags, struct mbuf *m, - struct sockaddr *nam, struct mbuf *control, __unused struct proc *p) + struct sockaddr *nam, struct mbuf *control, __unused struct proc *p) { int error; struct rawcb *rp = sotorawcb(so); lck_mtx_t * mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); if (rp == 0) { @@ -330,12 +344,14 @@ raw_usend(struct socket *so, int flags, struct mbuf *m, } error = (*so->so_proto->pr_output)(m, so); m = NULL; - if (nam) + if (nam) { rp->rcb_faddr = NULL; + } release: - if (m != NULL) + if (m != NULL) { m_freem(m); - return (error); + } + return error; } /* pru_sense is null */ @@ -345,14 +361,16 @@ raw_ushutdown(struct socket *so) { struct rawcb *rp = sotorawcb(so); lck_mtx_t * mutex_held; - if (so->so_proto->pr_getlock != NULL) + if (so->so_proto->pr_getlock != NULL) { mutex_held = (*so->so_proto->pr_getlock)(so, 0); - else + } else { mutex_held = so->so_proto->pr_domain->dom_mtx; + } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); - if (rp == 0) + if (rp == 0) { return EINVAL; + } socantsendmore(so); return 0; } @@ -362,25 +380,27 @@ raw_usockaddr(struct socket *so, struct sockaddr **nam) { struct rawcb *rp = sotorawcb(so); - if (rp == 0) + if (rp == 0) { return EINVAL; - if (rp->rcb_laddr == 0) + } + if (rp->rcb_laddr == 0) { return EINVAL; + } *nam = dup_sockaddr(rp->rcb_laddr, 1); return 0; } struct pr_usrreqs raw_usrreqs = { - .pru_abort = raw_uabort, - .pru_attach = raw_uattach, - .pru_bind = raw_ubind, - .pru_connect = raw_uconnect, - .pru_detach = raw_udetach, - .pru_disconnect = raw_udisconnect, - .pru_peeraddr = raw_upeeraddr, - .pru_send = raw_usend, - .pru_shutdown = raw_ushutdown, - .pru_sockaddr = raw_usockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = raw_uabort, + .pru_attach = raw_uattach, + .pru_bind = raw_ubind, + .pru_connect = raw_uconnect, + .pru_detach = raw_udetach, + .pru_disconnect = raw_udisconnect, + .pru_peeraddr = raw_upeeraddr, + .pru_send = raw_usend, + .pru_shutdown = raw_ushutdown, + .pru_sockaddr = raw_usockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; diff --git a/bsd/net/route.c b/bsd/net/route.c index 986c2f6a2..bba83ba46 100644 --- a/bsd/net/route.c +++ b/bsd/net/route.c @@ -1385,11 +1385,12 @@ rtredirect(struct ifnet *ifp, struct sockaddr *dst, struct sockaddr *gateway, * comparison against rt_gateway below. */ #if INET6 - if ((af == AF_INET) || (af == AF_INET6)) + if ((af == AF_INET) || (af == AF_INET6)) { #else - if (af == AF_INET) + if (af == AF_INET) { #endif /* !INET6 */ src = sa_copy(src, &ss, &ifscope); + } /* * Verify the gateway is directly reachable; if scoped routing diff --git a/bsd/net/route.h b/bsd/net/route.h index 141381e0a..5b4ea82ed 100644 --- a/bsd/net/route.h +++ b/bsd/net/route.h @@ -62,7 +62,7 @@ */ #ifndef _NET_ROUTE_H_ -#define _NET_ROUTE_H_ +#define _NET_ROUTE_H_ #include #include #include @@ -73,30 +73,30 @@ * retransmission behavior and are included in the routing structure. */ struct rt_metrics { - u_int32_t rmx_locks; /* Kernel leaves these values alone */ - u_int32_t rmx_mtu; /* MTU for this path */ - u_int32_t rmx_hopcount; /* max hops expected */ - int32_t rmx_expire; /* lifetime for route, e.g. redirect */ - u_int32_t rmx_recvpipe; /* inbound delay-bandwidth product */ - u_int32_t rmx_sendpipe; /* outbound delay-bandwidth product */ - u_int32_t rmx_ssthresh; /* outbound gateway buffer limit */ - u_int32_t rmx_rtt; /* estimated round trip time */ - u_int32_t rmx_rttvar; /* estimated rtt variance */ - u_int32_t rmx_pksent; /* packets sent using this route */ - u_int32_t rmx_state; /* route state */ - u_int32_t rmx_filler[3]; /* will be used for T/TCP later */ + u_int32_t rmx_locks; /* Kernel leaves these values alone */ + u_int32_t rmx_mtu; /* MTU for this path */ + u_int32_t rmx_hopcount; /* max hops expected */ + int32_t rmx_expire; /* lifetime for route, e.g. redirect */ + u_int32_t rmx_recvpipe; /* inbound delay-bandwidth product */ + u_int32_t rmx_sendpipe; /* outbound delay-bandwidth product */ + u_int32_t rmx_ssthresh; /* outbound gateway buffer limit */ + u_int32_t rmx_rtt; /* estimated round trip time */ + u_int32_t rmx_rttvar; /* estimated rtt variance */ + u_int32_t rmx_pksent; /* packets sent using this route */ + u_int32_t rmx_state; /* route state */ + u_int32_t rmx_filler[3]; /* will be used for T/TCP later */ }; /* * rmx_rtt and rmx_rttvar are stored as microseconds; */ -#define RTM_RTTUNIT 1000000 /* units for rtt, rttvar, as units per sec */ +#define RTM_RTTUNIT 1000000 /* units for rtt, rttvar, as units per sec */ #ifdef PRIVATE struct route_old { - void *ro_rt; - uint32_t ro_flags; - struct sockaddr ro_dst; + void *ro_rt; + uint32_t ro_flags; + struct sockaddr ro_dst; }; #endif /* PRIVATE */ @@ -131,15 +131,15 @@ struct route { * because the code does some casts of a 'struct route_in6 *' * to a 'struct route *'. */ - struct rtentry *ro_rt; - struct llentry *ro_lle; + struct rtentry *ro_rt; + struct llentry *ro_lle; - struct ifaddr *ro_srcia; - uint32_t ro_flags; /* route flags (see below) */ - struct sockaddr ro_dst; + struct ifaddr *ro_srcia; + uint32_t ro_flags; /* route flags (see below) */ + struct sockaddr ro_dst; }; -#define ROF_SRCIF_SELECTED 0x0001 /* source interface was selected */ +#define ROF_SRCIF_SELECTED 0x0001 /* source interface was selected */ #if 0 /* XXX These will be used in the changes coming in later */ #define ROF_NORTREF 0x0002 /* doesn't hold reference on ro_rt */ @@ -150,36 +150,36 @@ struct route { #define ROF_BLACKHOLE 0x0040 /* Destination is blackhole */ #define ROF_HAS_GW 0x0080 /* Destination has GW */ #endif -#define ROF_LLE_CACHE 0x0100 /* Cache link layer */ +#define ROF_LLE_CACHE 0x0100 /* Cache link layer */ -#define ROUTE_UNUSABLE(_ro) \ - ((_ro)->ro_rt == NULL || \ - ((_ro)->ro_rt->rt_flags & (RTF_UP|RTF_CONDEMNED)) != RTF_UP || \ +#define ROUTE_UNUSABLE(_ro) \ + ((_ro)->ro_rt == NULL || \ + ((_ro)->ro_rt->rt_flags & (RTF_UP|RTF_CONDEMNED)) != RTF_UP || \ RT_GENID_OUTOFSYNC((_ro)->ro_rt)) -#define _ROUTE_RELEASE_COMMON(_ro, _rnh_locked) do { \ - if ((_ro)->ro_rt != NULL) { \ - RT_LOCK_ASSERT_NOTHELD((_ro)->ro_rt); \ - if (_rnh_locked) \ - rtfree_locked((_ro)->ro_rt); \ - else \ - rtfree((_ro)->ro_rt); \ - (_ro)->ro_rt = NULL; \ - } \ - if ((_ro)->ro_srcia != NULL) { \ - IFA_REMREF((_ro)->ro_srcia); \ - (_ro)->ro_srcia = NULL; \ - (_ro)->ro_flags &= ~ROF_SRCIF_SELECTED; \ - } \ - if ((_ro)->ro_lle != NULL) { \ - LLE_REMREF((_ro)->ro_lle); \ - (_ro)->ro_lle = NULL; \ - (_ro)->ro_flags &= ~ROF_LLE_CACHE; \ - } \ +#define _ROUTE_RELEASE_COMMON(_ro, _rnh_locked) do { \ + if ((_ro)->ro_rt != NULL) { \ + RT_LOCK_ASSERT_NOTHELD((_ro)->ro_rt); \ + if (_rnh_locked) \ + rtfree_locked((_ro)->ro_rt); \ + else \ + rtfree((_ro)->ro_rt); \ + (_ro)->ro_rt = NULL; \ + } \ + if ((_ro)->ro_srcia != NULL) { \ + IFA_REMREF((_ro)->ro_srcia); \ + (_ro)->ro_srcia = NULL; \ + (_ro)->ro_flags &= ~ROF_SRCIF_SELECTED; \ + } \ + if ((_ro)->ro_lle != NULL) { \ + LLE_REMREF((_ro)->ro_lle); \ + (_ro)->ro_lle = NULL; \ + (_ro)->ro_flags &= ~ROF_LLE_CACHE; \ + } \ } while (0) -#define ROUTE_RELEASE_LOCKED(_ro) _ROUTE_RELEASE_COMMON(_ro, TRUE) -#define ROUTE_RELEASE(_ro) _ROUTE_RELEASE_COMMON(_ro, FALSE) +#define ROUTE_RELEASE_LOCKED(_ro) _ROUTE_RELEASE_COMMON(_ro, TRUE) +#define ROUTE_RELEASE(_ro) _ROUTE_RELEASE_COMMON(_ro, FALSE) /* * We distinguish between routes to hosts and routes to networks, @@ -190,48 +190,48 @@ struct route { * gateway rather than the ultimate destination. */ -#define NRTT_HIST 10 +#define NRTT_HIST 10 /* * Kernel routing entry structure. */ struct rtentry { - struct radix_node rt_nodes[2]; /* tree glue, and other values */ -#define rt_key(r) (SA((r)->rt_nodes->rn_key)) -#define rt_mask(r) (SA((r)->rt_nodes->rn_mask)) + struct radix_node rt_nodes[2]; /* tree glue, and other values */ +#define rt_key(r) (SA((r)->rt_nodes->rn_key)) +#define rt_mask(r) (SA((r)->rt_nodes->rn_mask)) /* * See bsd/net/route.c for synchronization notes. */ - decl_lck_mtx_data(, rt_lock); /* lock for routing entry */ - uint32_t rt_refcnt; /* # held references */ - uint32_t rt_flags; /* up/down?, host/net */ - uint32_t rt_genid; /* route generation id */ - struct sockaddr *rt_gateway; /* value */ - struct ifnet *rt_ifp; /* the answer: interface to use */ - struct ifaddr *rt_ifa; /* the answer: interface addr to use */ - struct sockaddr *rt_genmask; /* for generation of cloned routes */ - void *rt_llinfo; /* pointer to link level info cache */ - void (*rt_llinfo_get_ri) /* llinfo get reachability info fn */ - (struct rtentry *, struct rt_reach_info *); - void (*rt_llinfo_get_iflri) /* ifnet llinfo get reach. info fn */ - (struct rtentry *, struct ifnet_llreach_info *); + decl_lck_mtx_data(, rt_lock); /* lock for routing entry */ + uint32_t rt_refcnt; /* # held references */ + uint32_t rt_flags; /* up/down?, host/net */ + uint32_t rt_genid; /* route generation id */ + struct sockaddr *rt_gateway; /* value */ + struct ifnet *rt_ifp; /* the answer: interface to use */ + struct ifaddr *rt_ifa; /* the answer: interface addr to use */ + struct sockaddr *rt_genmask; /* for generation of cloned routes */ + void *rt_llinfo; /* pointer to link level info cache */ + void (*rt_llinfo_get_ri) /* llinfo get reachability info fn */ + (struct rtentry *, struct rt_reach_info *); + void (*rt_llinfo_get_iflri) /* ifnet llinfo get reach. info fn */ + (struct rtentry *, struct ifnet_llreach_info *); void (*rt_llinfo_purge)(struct rtentry *); /* llinfo purge fn */ void (*rt_llinfo_free)(void *); /* link level info free function */ void (*rt_llinfo_refresh) (struct rtentry *); /* expedite llinfo refresh */ - struct rt_metrics rt_rmx; /* metrics used by rx'ing protocols */ -#define rt_use rt_rmx.rmx_pksent - struct rtentry *rt_gwroute; /* implied entry for gatewayed routes */ - struct rtentry *rt_parent; /* cloning parent of this route */ - struct nstat_counts *rt_stats; /* route stats */ + struct rt_metrics rt_rmx; /* metrics used by rx'ing protocols */ +#define rt_use rt_rmx.rmx_pksent + struct rtentry *rt_gwroute; /* implied entry for gatewayed routes */ + struct rtentry *rt_parent; /* cloning parent of this route */ + struct nstat_counts *rt_stats; /* route stats */ void (*rt_if_ref_fn)(struct ifnet *, int); /* interface ref func */ - uint32_t *rt_tree_genid; /* ptr to per-tree route_genid */ - uint64_t rt_expire; /* expiration time in uptime seconds */ - uint64_t base_calendartime; /* calendar time upon entry creation */ - uint64_t base_uptime; /* uptime upon entry creation */ - u_int32_t rtt_hist[NRTT_HIST]; /* RTT history sample by TCP connections */ - u_int32_t rtt_min; /* minimum RTT computed from history */ - u_int32_t rtt_expire_ts; /* RTT history expire timestamp */ - u_int8_t rtt_index; /* Index into RTT history */ + uint32_t *rt_tree_genid; /* ptr to per-tree route_genid */ + uint64_t rt_expire; /* expiration time in uptime seconds */ + uint64_t base_calendartime; /* calendar time upon entry creation */ + uint64_t base_uptime; /* uptime upon entry creation */ + u_int32_t rtt_hist[NRTT_HIST]; /* RTT history sample by TCP connections */ + u_int32_t rtt_min; /* minimum RTT computed from history */ + u_int32_t rtt_expire_ts; /* RTT history expire timestamp */ + u_int8_t rtt_index; /* Index into RTT history */ /* Event handler context for the rtentrt */ struct eventhandler_lists_ctxt rt_evhdlr_ctxt; }; @@ -260,16 +260,16 @@ EVENTHANDLER_DECLARE(route_event, route_event_fn); /* * Synchronize route entry's generation ID with the tree's. */ -#define RT_GENID_SYNC(_rt) do { \ - if ((_rt)->rt_tree_genid != NULL) \ - (_rt)->rt_genid = *(_rt)->rt_tree_genid; \ +#define RT_GENID_SYNC(_rt) do { \ + if ((_rt)->rt_tree_genid != NULL) \ + (_rt)->rt_genid = *(_rt)->rt_tree_genid; \ } while (0) /* * Indicates whether or not the route entry's generation ID is stale. */ -#define RT_GENID_OUTOFSYNC(_rt) \ - ((_rt)->rt_tree_genid != NULL && \ +#define RT_GENID_OUTOFSYNC(_rt) \ + ((_rt)->rt_tree_genid != NULL && \ *(_rt)->rt_tree_genid != (_rt)->rt_genid) enum { @@ -280,93 +280,93 @@ enum { extern int route_op_entitlement_check(struct socket *, kauth_cred_t, int, boolean_t); #endif /* BSD_KERNEL_PRIVATE */ -#define RTF_UP 0x1 /* route usable */ -#define RTF_GATEWAY 0x2 /* destination is a gateway */ -#define RTF_HOST 0x4 /* host entry (net otherwise) */ -#define RTF_REJECT 0x8 /* host or net unreachable */ -#define RTF_DYNAMIC 0x10 /* created dynamically (by redirect) */ -#define RTF_MODIFIED 0x20 /* modified dynamically (by redirect) */ -#define RTF_DONE 0x40 /* message confirmed */ -#define RTF_DELCLONE 0x80 /* delete cloned route */ -#define RTF_CLONING 0x100 /* generate new routes on use */ -#define RTF_XRESOLVE 0x200 /* external daemon resolves name */ -#define RTF_LLINFO 0x400 /* DEPRECATED - exists ONLY for backward - compatibility */ -#define RTF_LLDATA 0x400 /* used by apps to add/del L2 entries */ -#define RTF_STATIC 0x800 /* manually added */ -#define RTF_BLACKHOLE 0x1000 /* just discard pkts (during updates) */ -#define RTF_NOIFREF 0x2000 /* not eligible for RTF_IFREF */ -#define RTF_PROTO2 0x4000 /* protocol specific routing flag */ -#define RTF_PROTO1 0x8000 /* protocol specific routing flag */ - -#define RTF_PRCLONING 0x10000 /* protocol requires cloning */ -#define RTF_WASCLONED 0x20000 /* route generated through cloning */ -#define RTF_PROTO3 0x40000 /* protocol specific routing flag */ - /* 0x80000 unused */ -#define RTF_PINNED 0x100000 /* future use */ -#define RTF_LOCAL 0x200000 /* route represents a local address */ -#define RTF_BROADCAST 0x400000 /* route represents a bcast address */ -#define RTF_MULTICAST 0x800000 /* route represents a mcast address */ -#define RTF_IFSCOPE 0x1000000 /* has valid interface scope */ -#define RTF_CONDEMNED 0x2000000 /* defunct; no longer modifiable */ -#define RTF_IFREF 0x4000000 /* route holds a ref to interface */ -#define RTF_PROXY 0x8000000 /* proxying, no interface scope */ -#define RTF_ROUTER 0x10000000 /* host is a router */ -#define RTF_DEAD 0x20000000 /* Route entry is being freed */ - /* 0x40000000 and up unassigned */ - -#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ -#define RTF_BITS \ +#define RTF_UP 0x1 /* route usable */ +#define RTF_GATEWAY 0x2 /* destination is a gateway */ +#define RTF_HOST 0x4 /* host entry (net otherwise) */ +#define RTF_REJECT 0x8 /* host or net unreachable */ +#define RTF_DYNAMIC 0x10 /* created dynamically (by redirect) */ +#define RTF_MODIFIED 0x20 /* modified dynamically (by redirect) */ +#define RTF_DONE 0x40 /* message confirmed */ +#define RTF_DELCLONE 0x80 /* delete cloned route */ +#define RTF_CLONING 0x100 /* generate new routes on use */ +#define RTF_XRESOLVE 0x200 /* external daemon resolves name */ +#define RTF_LLINFO 0x400 /* DEPRECATED - exists ONLY for backward + * compatibility */ +#define RTF_LLDATA 0x400 /* used by apps to add/del L2 entries */ +#define RTF_STATIC 0x800 /* manually added */ +#define RTF_BLACKHOLE 0x1000 /* just discard pkts (during updates) */ +#define RTF_NOIFREF 0x2000 /* not eligible for RTF_IFREF */ +#define RTF_PROTO2 0x4000 /* protocol specific routing flag */ +#define RTF_PROTO1 0x8000 /* protocol specific routing flag */ + +#define RTF_PRCLONING 0x10000 /* protocol requires cloning */ +#define RTF_WASCLONED 0x20000 /* route generated through cloning */ +#define RTF_PROTO3 0x40000 /* protocol specific routing flag */ + /* 0x80000 unused */ +#define RTF_PINNED 0x100000 /* future use */ +#define RTF_LOCAL 0x200000 /* route represents a local address */ +#define RTF_BROADCAST 0x400000 /* route represents a bcast address */ +#define RTF_MULTICAST 0x800000 /* route represents a mcast address */ +#define RTF_IFSCOPE 0x1000000 /* has valid interface scope */ +#define RTF_CONDEMNED 0x2000000 /* defunct; no longer modifiable */ +#define RTF_IFREF 0x4000000 /* route holds a ref to interface */ +#define RTF_PROXY 0x8000000 /* proxying, no interface scope */ +#define RTF_ROUTER 0x10000000 /* host is a router */ +#define RTF_DEAD 0x20000000 /* Route entry is being freed */ + /* 0x40000000 and up unassigned */ + +#define RTPRF_OURS RTF_PROTO3 /* set on routes we manage */ +#define RTF_BITS \ "\020\1UP\2GATEWAY\3HOST\4REJECT\5DYNAMIC\6MODIFIED\7DONE" \ "\10DELCLONE\11CLONING\12XRESOLVE\13LLINFO\14STATIC\15BLACKHOLE" \ "\16NOIFREF\17PROTO2\20PROTO1\21PRCLONING\22WASCLONED\23PROTO3" \ "\25PINNED\26LOCAL\27BROADCAST\30MULTICAST\31IFSCOPE\32CONDEMNED" \ "\33IFREF\34PROXY\35ROUTER" -#define IS_DIRECT_HOSTROUTE(rt) \ +#define IS_DIRECT_HOSTROUTE(rt) \ (((rt)->rt_flags & (RTF_HOST | RTF_GATEWAY)) == RTF_HOST) /* * Routing statistics. */ -struct rtstat { - short rts_badredirect; /* bogus redirect calls */ - short rts_dynamic; /* routes created by redirects */ - short rts_newgateway; /* routes modified by redirects */ - short rts_unreach; /* lookups which failed */ - short rts_wildcard; /* lookups satisfied by a wildcard */ - short rts_badrtgwroute; /* route to gateway is not direct */ +struct rtstat { + short rts_badredirect; /* bogus redirect calls */ + short rts_dynamic; /* routes created by redirects */ + short rts_newgateway; /* routes modified by redirects */ + short rts_unreach; /* lookups which failed */ + short rts_wildcard; /* lookups satisfied by a wildcard */ + short rts_badrtgwroute; /* route to gateway is not direct */ }; /* * Structures for routing messages. */ struct rt_msghdr { - u_short rtm_msglen; /* to skip over non-understood messages */ - u_char rtm_version; /* future binary compatibility */ - u_char rtm_type; /* message type */ - u_short rtm_index; /* index for associated ifp */ - int rtm_flags; /* flags, incl. kern & message, e.g. DONE */ - int rtm_addrs; /* bitmask identifying sockaddrs in msg */ - pid_t rtm_pid; /* identify sender */ - int rtm_seq; /* for sender to identify action */ - int rtm_errno; /* why failed */ - int rtm_use; /* from rtentry */ - u_int32_t rtm_inits; /* which metrics we are initializing */ + u_short rtm_msglen; /* to skip over non-understood messages */ + u_char rtm_version; /* future binary compatibility */ + u_char rtm_type; /* message type */ + u_short rtm_index; /* index for associated ifp */ + int rtm_flags; /* flags, incl. kern & message, e.g. DONE */ + int rtm_addrs; /* bitmask identifying sockaddrs in msg */ + pid_t rtm_pid; /* identify sender */ + int rtm_seq; /* for sender to identify action */ + int rtm_errno; /* why failed */ + int rtm_use; /* from rtentry */ + u_int32_t rtm_inits; /* which metrics we are initializing */ struct rt_metrics rtm_rmx; /* metrics themselves */ }; struct rt_msghdr2 { - u_short rtm_msglen; /* to skip over non-understood messages */ - u_char rtm_version; /* future binary compatibility */ - u_char rtm_type; /* message type */ - u_short rtm_index; /* index for associated ifp */ - int rtm_flags; /* flags, incl. kern & message, e.g. DONE */ - int rtm_addrs; /* bitmask identifying sockaddrs in msg */ - int32_t rtm_refcnt; /* reference count */ - int rtm_parentflags; /* flags of the parent route */ - int rtm_reserved; /* reserved field set to 0 */ - int rtm_use; /* from rtentry */ - u_int32_t rtm_inits; /* which metrics we are initializing */ + u_short rtm_msglen; /* to skip over non-understood messages */ + u_char rtm_version; /* future binary compatibility */ + u_char rtm_type; /* message type */ + u_short rtm_index; /* index for associated ifp */ + int rtm_flags; /* flags, incl. kern & message, e.g. DONE */ + int rtm_addrs; /* bitmask identifying sockaddrs in msg */ + int32_t rtm_refcnt; /* reference count */ + int rtm_parentflags; /* flags of the parent route */ + int rtm_reserved; /* reserved field set to 0 */ + int rtm_use; /* from rtentry */ + u_int32_t rtm_inits; /* which metrics we are initializing */ struct rt_metrics rtm_rmx; /* metrics themselves */ }; @@ -380,182 +380,182 @@ struct kev_netevent_apnfallbk_data { * Route reachability info. */ struct rt_reach_info { - u_int32_t ri_refcnt; /* reference count */ - u_int32_t ri_probes; /* total # of probes */ - u_int64_t ri_snd_expire; /* tx expiration (calendar) time */ - u_int64_t ri_rcv_expire; /* rx expiration (calendar) time */ - int32_t ri_rssi; /* received signal strength */ - int32_t ri_lqm; /* link quality metric */ - int32_t ri_npm; /* node proximity metric */ + u_int32_t ri_refcnt; /* reference count */ + u_int32_t ri_probes; /* total # of probes */ + u_int64_t ri_snd_expire; /* tx expiration (calendar) time */ + u_int64_t ri_rcv_expire; /* rx expiration (calendar) time */ + int32_t ri_rssi; /* received signal strength */ + int32_t ri_lqm; /* link quality metric */ + int32_t ri_npm; /* node proximity metric */ }; /* * Extended routing message header (private). */ struct rt_msghdr_ext { - u_short rtm_msglen; /* to skip over non-understood messages */ - u_char rtm_version; /* future binary compatibility */ - u_char rtm_type; /* message type */ - u_int32_t rtm_index; /* index for associated ifp */ - u_int32_t rtm_flags; /* flags, incl. kern & message, e.g. DONE */ - u_int32_t rtm_reserved; /* for future use */ - u_int32_t rtm_addrs; /* bitmask identifying sockaddrs in msg */ - pid_t rtm_pid; /* identify sender */ - int rtm_seq; /* for sender to identify action */ - int rtm_errno; /* why failed */ - u_int32_t rtm_use; /* from rtentry */ - u_int32_t rtm_inits; /* which metrics we are initializing */ - struct rt_metrics rtm_rmx; /* metrics themselves */ - struct rt_reach_info rtm_ri; /* route reachability info */ + u_short rtm_msglen; /* to skip over non-understood messages */ + u_char rtm_version; /* future binary compatibility */ + u_char rtm_type; /* message type */ + u_int32_t rtm_index; /* index for associated ifp */ + u_int32_t rtm_flags; /* flags, incl. kern & message, e.g. DONE */ + u_int32_t rtm_reserved; /* for future use */ + u_int32_t rtm_addrs; /* bitmask identifying sockaddrs in msg */ + pid_t rtm_pid; /* identify sender */ + int rtm_seq; /* for sender to identify action */ + int rtm_errno; /* why failed */ + u_int32_t rtm_use; /* from rtentry */ + u_int32_t rtm_inits; /* which metrics we are initializing */ + struct rt_metrics rtm_rmx; /* metrics themselves */ + struct rt_reach_info rtm_ri; /* route reachability info */ }; #endif /* PRIVATE */ -#define RTM_VERSION 5 /* Up the ante and ignore older versions */ +#define RTM_VERSION 5 /* Up the ante and ignore older versions */ /* * Message types. */ -#define RTM_ADD 0x1 /* Add Route */ -#define RTM_DELETE 0x2 /* Delete Route */ -#define RTM_CHANGE 0x3 /* Change Metrics or flags */ -#define RTM_GET 0x4 /* Report Metrics */ -#define RTM_LOSING 0x5 /* RTM_LOSING is no longer generated by xnu - and is deprecated */ -#define RTM_REDIRECT 0x6 /* Told to use different route */ -#define RTM_MISS 0x7 /* Lookup failed on this address */ -#define RTM_LOCK 0x8 /* fix specified metrics */ -#define RTM_OLDADD 0x9 /* caused by SIOCADDRT */ -#define RTM_OLDDEL 0xa /* caused by SIOCDELRT */ -#define RTM_RESOLVE 0xb /* req to resolve dst to LL addr */ -#define RTM_NEWADDR 0xc /* address being added to iface */ -#define RTM_DELADDR 0xd /* address being removed from iface */ -#define RTM_IFINFO 0xe /* iface going up/down etc. */ -#define RTM_NEWMADDR 0xf /* mcast group membership being added to if */ -#define RTM_DELMADDR 0x10 /* mcast group membership being deleted */ +#define RTM_ADD 0x1 /* Add Route */ +#define RTM_DELETE 0x2 /* Delete Route */ +#define RTM_CHANGE 0x3 /* Change Metrics or flags */ +#define RTM_GET 0x4 /* Report Metrics */ +#define RTM_LOSING 0x5 /* RTM_LOSING is no longer generated by xnu + * and is deprecated */ +#define RTM_REDIRECT 0x6 /* Told to use different route */ +#define RTM_MISS 0x7 /* Lookup failed on this address */ +#define RTM_LOCK 0x8 /* fix specified metrics */ +#define RTM_OLDADD 0x9 /* caused by SIOCADDRT */ +#define RTM_OLDDEL 0xa /* caused by SIOCDELRT */ +#define RTM_RESOLVE 0xb /* req to resolve dst to LL addr */ +#define RTM_NEWADDR 0xc /* address being added to iface */ +#define RTM_DELADDR 0xd /* address being removed from iface */ +#define RTM_IFINFO 0xe /* iface going up/down etc. */ +#define RTM_NEWMADDR 0xf /* mcast group membership being added to if */ +#define RTM_DELMADDR 0x10 /* mcast group membership being deleted */ #ifdef PRIVATE -#define RTM_GET_SILENT 0x11 +#define RTM_GET_SILENT 0x11 #endif /* PRIVATE */ -#define RTM_IFINFO2 0x12 /* */ -#define RTM_NEWMADDR2 0x13 /* */ -#define RTM_GET2 0x14 /* */ +#define RTM_IFINFO2 0x12 /* */ +#define RTM_NEWMADDR2 0x13 /* */ +#define RTM_GET2 0x14 /* */ #ifdef PRIVATE -#define RTM_GET_EXT 0x15 +#define RTM_GET_EXT 0x15 #endif /* PRIVATE */ /* * Bitmask values for rtm_inits and rmx_locks. */ -#define RTV_MTU 0x1 /* init or lock _mtu */ -#define RTV_HOPCOUNT 0x2 /* init or lock _hopcount */ -#define RTV_EXPIRE 0x4 /* init or lock _expire */ -#define RTV_RPIPE 0x8 /* init or lock _recvpipe */ -#define RTV_SPIPE 0x10 /* init or lock _sendpipe */ -#define RTV_SSTHRESH 0x20 /* init or lock _ssthresh */ -#define RTV_RTT 0x40 /* init or lock _rtt */ -#define RTV_RTTVAR 0x80 /* init or lock _rttvar */ +#define RTV_MTU 0x1 /* init or lock _mtu */ +#define RTV_HOPCOUNT 0x2 /* init or lock _hopcount */ +#define RTV_EXPIRE 0x4 /* init or lock _expire */ +#define RTV_RPIPE 0x8 /* init or lock _recvpipe */ +#define RTV_SPIPE 0x10 /* init or lock _sendpipe */ +#define RTV_SSTHRESH 0x20 /* init or lock _ssthresh */ +#define RTV_RTT 0x40 /* init or lock _rtt */ +#define RTV_RTTVAR 0x80 /* init or lock _rttvar */ #ifdef PRIVATE -#define RTV_REFRESH_HOST 0x100 /* init host route to expedite refresh */ +#define RTV_REFRESH_HOST 0x100 /* init host route to expedite refresh */ #endif /* * Bitmask values for rtm_addrs. */ -#define RTA_DST 0x1 /* destination sockaddr present */ -#define RTA_GATEWAY 0x2 /* gateway sockaddr present */ -#define RTA_NETMASK 0x4 /* netmask sockaddr present */ -#define RTA_GENMASK 0x8 /* cloning mask sockaddr present */ -#define RTA_IFP 0x10 /* interface name sockaddr present */ -#define RTA_IFA 0x20 /* interface addr sockaddr present */ -#define RTA_AUTHOR 0x40 /* sockaddr for author of redirect */ -#define RTA_BRD 0x80 /* for NEWADDR, broadcast or p-p dest addr */ +#define RTA_DST 0x1 /* destination sockaddr present */ +#define RTA_GATEWAY 0x2 /* gateway sockaddr present */ +#define RTA_NETMASK 0x4 /* netmask sockaddr present */ +#define RTA_GENMASK 0x8 /* cloning mask sockaddr present */ +#define RTA_IFP 0x10 /* interface name sockaddr present */ +#define RTA_IFA 0x20 /* interface addr sockaddr present */ +#define RTA_AUTHOR 0x40 /* sockaddr for author of redirect */ +#define RTA_BRD 0x80 /* for NEWADDR, broadcast or p-p dest addr */ /* * Index offsets for sockaddr array for alternate internal encoding. */ -#define RTAX_DST 0 /* destination sockaddr present */ -#define RTAX_GATEWAY 1 /* gateway sockaddr present */ -#define RTAX_NETMASK 2 /* netmask sockaddr present */ -#define RTAX_GENMASK 3 /* cloning mask sockaddr present */ -#define RTAX_IFP 4 /* interface name sockaddr present */ -#define RTAX_IFA 5 /* interface addr sockaddr present */ -#define RTAX_AUTHOR 6 /* sockaddr for author of redirect */ -#define RTAX_BRD 7 /* for NEWADDR, broadcast or p-p dest addr */ -#define RTAX_MAX 8 /* size of array to allocate */ +#define RTAX_DST 0 /* destination sockaddr present */ +#define RTAX_GATEWAY 1 /* gateway sockaddr present */ +#define RTAX_NETMASK 2 /* netmask sockaddr present */ +#define RTAX_GENMASK 3 /* cloning mask sockaddr present */ +#define RTAX_IFP 4 /* interface name sockaddr present */ +#define RTAX_IFA 5 /* interface addr sockaddr present */ +#define RTAX_AUTHOR 6 /* sockaddr for author of redirect */ +#define RTAX_BRD 7 /* for NEWADDR, broadcast or p-p dest addr */ +#define RTAX_MAX 8 /* size of array to allocate */ struct rt_addrinfo { - int rti_addrs; - struct sockaddr *rti_info[RTAX_MAX]; + int rti_addrs; + struct sockaddr *rti_info[RTAX_MAX]; }; #ifdef PRIVATE /* * For scoped routing; a zero interface scope value means nil/no scope. */ -#define IFSCOPE_NONE 0 +#define IFSCOPE_NONE 0 #endif /* PRIVATE */ #ifdef BSD_KERNEL_PRIVATE /* * Generic call trace used by some subsystems (e.g. route, ifaddr) */ -#define CTRACE_STACK_SIZE 8 /* depth of stack trace */ -#define CTRACE_HIST_SIZE 4 /* refcnt history size */ +#define CTRACE_STACK_SIZE 8 /* depth of stack trace */ +#define CTRACE_HIST_SIZE 4 /* refcnt history size */ typedef struct ctrace { - void *th; /* thread ptr */ - void *pc[CTRACE_STACK_SIZE]; /* PC stack trace */ + void *th; /* thread ptr */ + void *pc[CTRACE_STACK_SIZE]; /* PC stack trace */ } ctrace_t; extern void ctrace_record(ctrace_t *); -#define RT_LOCK_ASSERT_HELD(_rt) \ +#define RT_LOCK_ASSERT_HELD(_rt) \ LCK_MTX_ASSERT(&(_rt)->rt_lock, LCK_MTX_ASSERT_OWNED) -#define RT_LOCK_ASSERT_NOTHELD(_rt) \ +#define RT_LOCK_ASSERT_NOTHELD(_rt) \ LCK_MTX_ASSERT(&(_rt)->rt_lock, LCK_MTX_ASSERT_NOTOWNED) -#define RT_LOCK(_rt) do { \ - rt_lock(_rt, FALSE); \ +#define RT_LOCK(_rt) do { \ + rt_lock(_rt, FALSE); \ } while (0) -#define RT_LOCK_SPIN(_rt) do { \ - rt_lock(_rt, TRUE); \ +#define RT_LOCK_SPIN(_rt) do { \ + rt_lock(_rt, TRUE); \ } while (0) -#define RT_CONVERT_LOCK(_rt) do { \ - RT_LOCK_ASSERT_HELD(_rt); \ - lck_mtx_convert_spin(&(_rt)->rt_lock); \ +#define RT_CONVERT_LOCK(_rt) do { \ + RT_LOCK_ASSERT_HELD(_rt); \ + lck_mtx_convert_spin(&(_rt)->rt_lock); \ } while (0) -#define RT_UNLOCK(_rt) do { \ - rt_unlock(_rt); \ +#define RT_UNLOCK(_rt) do { \ + rt_unlock(_rt); \ } while (0) -#define RT_ADDREF_LOCKED(_rt) do { \ - rtref(_rt); \ +#define RT_ADDREF_LOCKED(_rt) do { \ + rtref(_rt); \ } while (0) /* * Spin variant mutex is used here; caller is responsible for * converting any previously-held similar lock to full mutex. */ -#define RT_ADDREF(_rt) do { \ - RT_LOCK_SPIN(_rt); \ - RT_ADDREF_LOCKED(_rt); \ - RT_UNLOCK(_rt); \ +#define RT_ADDREF(_rt) do { \ + RT_LOCK_SPIN(_rt); \ + RT_ADDREF_LOCKED(_rt); \ + RT_UNLOCK(_rt); \ } while (0) -#define RT_REMREF_LOCKED(_rt) do { \ - (void) rtunref(_rt); \ +#define RT_REMREF_LOCKED(_rt) do { \ + (void) rtunref(_rt); \ } while (0) /* * Spin variant mutex is used here; caller is responsible for * converting any previously-held similar lock to full mutex. */ -#define RT_REMREF(_rt) do { \ - RT_LOCK_SPIN(_rt); \ - RT_REMREF_LOCKED(_rt); \ - RT_UNLOCK(_rt); \ +#define RT_REMREF(_rt) do { \ + RT_LOCK_SPIN(_rt); \ + RT_REMREF_LOCKED(_rt); \ + RT_UNLOCK(_rt); \ } while (0) /* @@ -565,15 +565,15 @@ extern void ctrace_record(ctrace_t *); * expects expiration times in terms of calendar times. This is used when * reporting rt_expire, ln_expire, etc. values to user space. */ -#define NET_CALCULATE_CLOCKSKEW(cc, ic, cu, iu) \ +#define NET_CALCULATE_CLOCKSKEW(cc, ic, cu, iu) \ ((cc.tv_sec - ic) - (cu - iu)) extern unsigned int rt_verbose; -extern struct radix_node_head *rt_tables[AF_MAX+1]; +extern struct radix_node_head *rt_tables[AF_MAX + 1]; extern lck_mtx_t *rnh_lock; -extern uint32_t route_genid_inet; /* INET route generation count */ +extern uint32_t route_genid_inet; /* INET route generation count */ #if INET6 -extern uint32_t route_genid_inet6; /* INET6 route generation count */ +extern uint32_t route_genid_inet6; /* INET6 route generation count */ #endif /* INET6 */ extern int rttrash; extern unsigned int rte_debug; @@ -674,9 +674,9 @@ struct route_event { eventhandler_tag evtag; }; -#define rtev_ipaddr rt_addr._rtev_ipaddr -#define rtev_lladdr rt_addr._rtev_lladdr -#define rtev_addr_bytes rt_addr._rtev_addr_bytes +#define rtev_ipaddr rt_addr._rtev_ipaddr +#define rtev_lladdr rt_addr._rtev_lladdr +#define rtev_addr_bytes rt_addr._rtev_addr_bytes extern void route_event_init(struct route_event *p_route_ev, struct rtentry *rt, struct rtentry *gwrt, int route_ev_code); diff --git a/bsd/net/rtsock.c b/bsd/net/rtsock.c index 8ae08e206..23d7bf201 100644 --- a/bsd/net/rtsock.c +++ b/bsd/net/rtsock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -96,20 +96,20 @@ MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables"); static struct sockaddr route_dst = { 2, PF_ROUTE, { 0, } }; static struct sockaddr route_src = { 2, PF_ROUTE, { 0, } }; -static struct sockaddr sa_zero = { sizeof (sa_zero), AF_INET, { 0, } }; +static struct sockaddr sa_zero = { sizeof(sa_zero), AF_INET, { 0, } }; struct route_cb { - u_int32_t ip_count; /* attached w/ AF_INET */ - u_int32_t ip6_count; /* attached w/ AF_INET6 */ - u_int32_t any_count; /* total attached */ + u_int32_t ip_count; /* attached w/ AF_INET */ + u_int32_t ip6_count; /* attached w/ AF_INET6 */ + u_int32_t any_count; /* total attached */ }; static struct route_cb route_cb; struct walkarg { - int w_tmemsize; - int w_op, w_arg; - caddr_t w_tmem; + int w_tmemsize; + int w_op, w_arg; + caddr_t w_tmem; struct sysctl_req *w_req; }; @@ -144,20 +144,20 @@ static int sysctl_rttrash(struct sysctl_req *); static int sysctl_rtsock SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD | CTLFLAG_LOCKED, - sysctl_rtsock, ""); + sysctl_rtsock, ""); -SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "routing"); +SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "routing"); /* Align x to 1024 (only power of 2) assuming x is positive */ -#define ALIGN_BYTES(x) do { \ - x = P2ALIGN(x, 1024); \ +#define ALIGN_BYTES(x) do { \ + x = P2ALIGN(x, 1024); \ } while(0) -#define ROUNDUP32(a) \ - ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \ +#define ROUNDUP32(a) \ + ((a) > 0 ? (1 + (((a) - 1) | (sizeof (uint32_t) - 1))) : \ sizeof (uint32_t)) -#define ADVANCE32(x, n) \ +#define ADVANCE32(x, n) \ (x += ROUNDUP32((n)->sa_len)) /* @@ -167,7 +167,7 @@ SYSCTL_NODE(_net, OID_AUTO, route, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "routing"); static int rts_abort(struct socket *so) { - return (raw_usrreqs.pru_abort(so)); + return raw_usrreqs.pru_abort(so); } /* pru_accept is EOPNOTSUPP */ @@ -181,9 +181,10 @@ rts_attach(struct socket *so, int proto, struct proc *p) VERIFY(so->so_pcb == NULL); - MALLOC(rp, struct rawcb *, sizeof (*rp), M_PCB, M_WAITOK | M_ZERO); - if (rp == NULL) - return (ENOBUFS); + MALLOC(rp, struct rawcb *, sizeof(*rp), M_PCB, M_WAITOK | M_ZERO); + if (rp == NULL) { + return ENOBUFS; + } so->so_pcb = (caddr_t)rp; /* don't use raw_usrreqs.pru_attach, it checks for SS_PRIV */ @@ -193,7 +194,7 @@ rts_attach(struct socket *so, int proto, struct proc *p) FREE(rp, M_PCB); so->so_pcb = NULL; so->so_flags |= SOF_PCBCLEARING; - return (error); + return error; } switch (rp->rcb_proto.sp_protocol) { @@ -209,19 +210,19 @@ rts_attach(struct socket *so, int proto, struct proc *p) /* the socket is already locked when we enter rts_attach */ soisconnected(so); so->so_options |= SO_USELOOPBACK; - return (0); + return 0; } static int rts_bind(struct socket *so, struct sockaddr *nam, struct proc *p) { - return (raw_usrreqs.pru_bind(so, nam, p)); /* xxx just EINVAL */ + return raw_usrreqs.pru_bind(so, nam, p); /* xxx just EINVAL */ } static int rts_connect(struct socket *so, struct sockaddr *nam, struct proc *p) { - return (raw_usrreqs.pru_connect(so, nam, p)); /* XXX just EINVAL */ + return raw_usrreqs.pru_connect(so, nam, p); /* XXX just EINVAL */ } /* pru_connect2 is EOPNOTSUPP */ @@ -243,13 +244,13 @@ rts_detach(struct socket *so) break; } atomic_add_32(&route_cb.any_count, -1); - return (raw_usrreqs.pru_detach(so)); + return raw_usrreqs.pru_detach(so); } static int rts_disconnect(struct socket *so) { - return (raw_usrreqs.pru_disconnect(so)); + return raw_usrreqs.pru_disconnect(so); } /* pru_listen is EOPNOTSUPP */ @@ -257,7 +258,7 @@ rts_disconnect(struct socket *so) static int rts_peeraddr(struct socket *so, struct sockaddr **nam) { - return (raw_usrreqs.pru_peeraddr(so, nam)); + return raw_usrreqs.pru_peeraddr(so, nam); } /* pru_rcvd is EOPNOTSUPP */ @@ -267,7 +268,7 @@ static int rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct proc *p) { - return (raw_usrreqs.pru_send(so, flags, m, nam, control, p)); + return raw_usrreqs.pru_send(so, flags, m, nam, control, p); } /* pru_sense is null */ @@ -275,28 +276,28 @@ rts_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, static int rts_shutdown(struct socket *so) { - return (raw_usrreqs.pru_shutdown(so)); + return raw_usrreqs.pru_shutdown(so); } static int rts_sockaddr(struct socket *so, struct sockaddr **nam) { - return (raw_usrreqs.pru_sockaddr(so, nam)); + return raw_usrreqs.pru_sockaddr(so, nam); } static struct pr_usrreqs route_usrreqs = { - .pru_abort = rts_abort, - .pru_attach = rts_attach, - .pru_bind = rts_bind, - .pru_connect = rts_connect, - .pru_detach = rts_detach, - .pru_disconnect = rts_disconnect, - .pru_peeraddr = rts_peeraddr, - .pru_send = rts_send, - .pru_shutdown = rts_shutdown, - .pru_sockaddr = rts_sockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = rts_abort, + .pru_attach = rts_attach, + .pru_bind = rts_bind, + .pru_connect = rts_connect, + .pru_detach = rts_detach, + .pru_disconnect = rts_disconnect, + .pru_peeraddr = rts_peeraddr, + .pru_send = rts_send, + .pru_shutdown = rts_shutdown, + .pru_sockaddr = rts_sockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; /*ARGSUSED*/ @@ -316,10 +317,11 @@ route_output(struct mbuf *m, struct socket *so) unsigned int ifscope = IFSCOPE_NONE; struct rawcb *rp = NULL; boolean_t is_router = FALSE; -#define senderr(e) { error = (e); goto flush; } - if (m == NULL || ((m->m_len < sizeof (intptr_t)) && - (m = m_pullup(m, sizeof (intptr_t))) == NULL)) - return (ENOBUFS); +#define senderr(e) { error = (e); goto flush; } + if (m == NULL || ((m->m_len < sizeof(intptr_t)) && + (m = m_pullup(m, sizeof(intptr_t))) == NULL)) { + return ENOBUFS; + } VERIFY(m->m_flags & M_PKTHDR); /* @@ -330,7 +332,7 @@ route_output(struct mbuf *m, struct socket *so) lck_mtx_lock(rnh_lock); len = m->m_pkthdr.len; - if (len < sizeof (*rtm) || + if (len < sizeof(*rtm) || len != mtod(m, struct rt_msghdr *)->rtm_msglen) { info.rti_info[RTAX_DST] = NULL; senderr(EINVAL); @@ -351,8 +353,9 @@ route_output(struct mbuf *m, struct socket *so) * all RTM_GETs to be silent in the future, so this is private for now. */ if (rtm->rtm_type == RTM_GET_SILENT) { - if (!(so->so_options & SO_USELOOPBACK)) + if (!(so->so_options & SO_USELOOPBACK)) { senderr(EINVAL); + } sendonlytoself = 1; rtm->rtm_type = RTM_GET; } @@ -375,17 +378,19 @@ route_output(struct mbuf *m, struct socket *so) if (info.rti_info[RTAX_DST] == NULL || info.rti_info[RTAX_DST]->sa_family >= AF_MAX || (info.rti_info[RTAX_GATEWAY] != NULL && - info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX)) + info.rti_info[RTAX_GATEWAY]->sa_family >= AF_MAX)) { senderr(EINVAL); + } if (info.rti_info[RTAX_DST]->sa_family == AF_INET && - info.rti_info[RTAX_DST]->sa_len != sizeof (dst_in)) { + info.rti_info[RTAX_DST]->sa_len != sizeof(dst_in)) { /* At minimum, we need up to sin_addr */ if (info.rti_info[RTAX_DST]->sa_len < - offsetof(struct sockaddr_in, sin_zero)) + offsetof(struct sockaddr_in, sin_zero)) { senderr(EINVAL); - bzero(&dst_in, sizeof (dst_in)); - dst_in.sin_len = sizeof (dst_in); + } + bzero(&dst_in, sizeof(dst_in)); + dst_in.sin_len = sizeof(dst_in); dst_in.sin_family = AF_INET; dst_in.sin_port = SIN(info.rti_info[RTAX_DST])->sin_port; dst_in.sin_addr = SIN(info.rti_info[RTAX_DST])->sin_addr; @@ -395,13 +400,14 @@ route_output(struct mbuf *m, struct socket *so) if (info.rti_info[RTAX_GATEWAY] != NULL && info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET && - info.rti_info[RTAX_GATEWAY]->sa_len != sizeof (gate_in)) { + info.rti_info[RTAX_GATEWAY]->sa_len != sizeof(gate_in)) { /* At minimum, we need up to sin_addr */ if (info.rti_info[RTAX_GATEWAY]->sa_len < - offsetof(struct sockaddr_in, sin_zero)) + offsetof(struct sockaddr_in, sin_zero)) { senderr(EINVAL); - bzero(&gate_in, sizeof (gate_in)); - gate_in.sin_len = sizeof (gate_in); + } + bzero(&gate_in, sizeof(gate_in)); + gate_in.sin_len = sizeof(gate_in); gate_in.sin_family = AF_INET; gate_in.sin_port = SIN(info.rti_info[RTAX_GATEWAY])->sin_port; gate_in.sin_addr = SIN(info.rti_info[RTAX_GATEWAY])->sin_addr; @@ -412,11 +418,12 @@ route_output(struct mbuf *m, struct socket *so) struct radix_node *t; t = rn_addmask((caddr_t)info.rti_info[RTAX_GENMASK], 0, 1); if (t != NULL && Bcmp(info.rti_info[RTAX_GENMASK], - t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0) + t->rn_key, *(u_char *)info.rti_info[RTAX_GENMASK]) == 0) { info.rti_info[RTAX_GENMASK] = (struct sockaddr *)(t->rn_key); - else + } else { senderr(ENOBUFS); + } } /* @@ -424,8 +431,9 @@ route_output(struct mbuf *m, struct socket *so) */ if (rtm->rtm_flags & RTF_IFSCOPE) { if (info.rti_info[RTAX_DST]->sa_family != AF_INET && - info.rti_info[RTAX_DST]->sa_family != AF_INET6) + info.rti_info[RTAX_DST]->sa_family != AF_INET6) { senderr(EINVAL); + } ifscope = rtm->rtm_index; } /* @@ -441,15 +449,17 @@ route_output(struct mbuf *m, struct socket *so) } } ifnet_head_done(); - if (intcoproc_scope == ifscope && current_proc()->p_pid != 0) + if (intcoproc_scope == ifscope && current_proc()->p_pid != 0) { senderr(EINVAL); + } } /* * RTF_PROXY can only be set internally from within the kernel. */ - if (rtm->rtm_flags & RTF_PROXY) + if (rtm->rtm_flags & RTF_PROXY) { senderr(EINVAL); + } /* * For AF_INET, always zero out the embedded scope ID. If this is @@ -458,15 +468,18 @@ route_output(struct mbuf *m, struct socket *so) * false interpretation of the scope ID because it's using the sin_zero * field, which might not be properly cleared by the requestor. */ - if (info.rti_info[RTAX_DST]->sa_family == AF_INET) + if (info.rti_info[RTAX_DST]->sa_family == AF_INET) { sin_set_ifscope(info.rti_info[RTAX_DST], IFSCOPE_NONE); + } if (info.rti_info[RTAX_GATEWAY] != NULL && - info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET) + info.rti_info[RTAX_GATEWAY]->sa_family == AF_INET) { sin_set_ifscope(info.rti_info[RTAX_GATEWAY], IFSCOPE_NONE); + } switch (rtm->rtm_type) { case RTM_ADD: - if (info.rti_info[RTAX_GATEWAY] == NULL) + if (info.rti_info[RTAX_GATEWAY] == NULL) { senderr(EINVAL); + } error = rtrequest_scoped_locked(RTM_ADD, info.rti_info[RTAX_DST], info.rti_info[RTAX_GATEWAY], @@ -530,16 +543,18 @@ route_output(struct mbuf *m, struct socket *so) case RTM_CHANGE: case RTM_LOCK: rnh = rt_tables[info.rti_info[RTAX_DST]->sa_family]; - if (rnh == NULL) + if (rnh == NULL) { senderr(EAFNOSUPPORT); + } /* * Lookup the best match based on the key-mask pair; * callee adds a reference and checks for root node. */ rt = rt_lookup(TRUE, info.rti_info[RTAX_DST], info.rti_info[RTAX_NETMASK], rnh, ifscope); - if (rt == NULL) + if (rt == NULL) { senderr(ESRCH); + } RT_LOCK(rt); /* @@ -582,33 +597,39 @@ report: } else if ((ifp = rt->rt_ifp) != NULL) { rtm->rtm_index = ifp->if_index; } - if (ifa2 != NULL) + if (ifa2 != NULL) { IFA_LOCK(ifa2); + } len = rt_msg2(rtm->rtm_type, &info, NULL, NULL, credp); - if (ifa2 != NULL) + if (ifa2 != NULL) { IFA_UNLOCK(ifa2); + } struct rt_msghdr *out_rtm; R_Malloc(out_rtm, struct rt_msghdr *, len); if (out_rtm == NULL) { RT_UNLOCK(rt); - if (ifa2 != NULL) + if (ifa2 != NULL) { IFA_REMREF(ifa2); + } senderr(ENOBUFS); } Bcopy(rtm, out_rtm, sizeof(struct rt_msghdr)); - if (ifa2 != NULL) + if (ifa2 != NULL) { IFA_LOCK(ifa2); + } (void) rt_msg2(out_rtm->rtm_type, &info, (caddr_t)out_rtm, NULL, &cred); - if (ifa2 != NULL) + if (ifa2 != NULL) { IFA_UNLOCK(ifa2); + } R_Free(rtm); rtm = out_rtm; rtm->rtm_flags = rt->rt_flags; rt_getmetrics(rt, &rtm->rtm_rmx); rtm->rtm_addrs = info.rti_addrs; - if (ifa2 != NULL) + if (ifa2 != NULL) { IFA_REMREF(ifa2); + } kauth_cred_unref(&cred); break; @@ -632,8 +653,9 @@ report: * default gateway. Changing flags still doesn't work. */ if ((rt->rt_flags & RTF_GATEWAY) && - info.rti_info[RTAX_GATEWAY] == NULL) + info.rti_info[RTAX_GATEWAY] == NULL) { info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; + } /* * On Darwin, we call rt_setif which contains the @@ -646,12 +668,13 @@ report: if ((error = rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, rt))) { - int tmp = error; - RT_UNLOCK(rt); - senderr(tmp); + int tmp = error; + RT_UNLOCK(rt); + senderr(tmp); } - if (info.rti_info[RTAX_GENMASK]) + if (info.rti_info[RTAX_GENMASK]) { rt->rt_genmask = info.rti_info[RTAX_GENMASK]; + } /* * Enqueue work item to invoke callback for this route entry @@ -670,7 +693,7 @@ report: (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); RT_LOCK(rt); } - /* FALLTHRU */ + /* FALLTHRU */ case RTM_LOCK: rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); rt->rt_rmx.rmx_locks |= @@ -684,10 +707,11 @@ report: } flush: if (rtm != NULL) { - if (error) + if (error) { rtm->rtm_errno = error; - else + } else { rtm->rtm_flags |= RTF_DONE; + } } if (rt != NULL) { RT_LOCK_ASSERT_NOTHELD(rt); @@ -702,10 +726,11 @@ flush: */ if (!(so->so_options & SO_USELOOPBACK)) { if (route_cb.any_count <= 1) { - if (rtm != NULL) + if (rtm != NULL) { R_Free(rtm); + } m_freem(m); - return (error); + return error; } /* There is another listener, so construct message */ rp = sotorawcb(so); @@ -726,23 +751,27 @@ flush: NULL, &error) != 0) { sorwakeup(so); } - if (error) - return (error); + if (error) { + return error; + } } else { struct sockproto route_proto = { PF_ROUTE, 0 }; - if (rp != NULL) + if (rp != NULL) { rp->rcb_proto.sp_family = 0; /* Avoid us */ - if (dst_sa_family != 0) + } + if (dst_sa_family != 0) { route_proto.sp_protocol = dst_sa_family; + } if (m != NULL) { socket_unlock(so, 0); raw_input(m, &route_proto, &route_src, &route_dst); socket_lock(so, 0); } - if (rp != NULL) + if (rp != NULL) { rp->rcb_proto.sp_family = PF_ROUTE; + } } - return (error); + return error; } void @@ -764,7 +793,7 @@ rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out) if (!(which & RTV_REFRESH_HOST)) { struct timeval caltime; getmicrotime(&caltime); -#define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e; +#define metric(f, e) if (which & (f)) out->rt_rmx.e = in->e; metric(RTV_RPIPE, rmx_recvpipe); metric(RTV_SPIPE, rmx_sendpipe); metric(RTV_SSTHRESH, rmx_ssthresh); @@ -778,13 +807,13 @@ rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out) /* account for system time change */ getmicrotime(&caltime); out->base_calendartime += - NET_CALCULATE_CLOCKSKEW(caltime, - out->base_calendartime, - net_uptime(), out->base_uptime); + NET_CALCULATE_CLOCKSKEW(caltime, + out->base_calendartime, + net_uptime(), out->base_uptime); rt_setexpire(out, - out->rt_rmx.rmx_expire - - out->base_calendartime + - out->base_uptime); + out->rt_rmx.rmx_expire - + out->base_calendartime + + out->base_uptime); } else { rt_setexpire(out, 0); } @@ -796,16 +825,16 @@ rt_setmetrics(u_int32_t which, struct rt_metrics *in, struct rtentry *out) if ((which & ~RTV_REFRESH_HOST) || (out->rt_flags & RTF_STATIC) || !(out->rt_flags & RTF_LLINFO)) { - return (EINVAL); + return EINVAL; } if (out->rt_llinfo_refresh == NULL) { - return (ENOTSUP); + return ENOTSUP; } out->rt_llinfo_refresh(out); } - return (0); + return 0; } static void @@ -850,8 +879,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, RT_LOCK_ASSERT_HELD(rt); /* Don't update a defunct route */ - if (rt->rt_flags & RTF_CONDEMNED) + if (rt->rt_flags & RTF_CONDEMNED) { return; + } /* Add an extra ref for ourselves */ RT_ADDREF_LOCKED(rt); @@ -879,8 +909,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, } else { ifnet_lock_shared(ifp); ifa = TAILQ_FIRST(&ifp->if_addrhead); - if (ifa != NULL) + if (ifa != NULL) { IFA_ADDREF(ifa); + } ifnet_lock_done(ifp); } } else if (Ifaaddr && @@ -896,13 +927,15 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, */ RT_UNLOCK(rt); if ((ifa = ifa_ifwithroute_scoped_locked(rt->rt_flags, - rt_key(rt), Gate, ifscope)) != NULL) + rt_key(rt), Gate, ifscope)) != NULL) { ifp = ifa->ifa_ifp; + } RT_LOCK(rt); /* Don't update a defunct route */ if (rt->rt_flags & RTF_CONDEMNED) { - if (ifa != NULL) + if (ifa != NULL) { IFA_REMREF(ifa); + } /* Release extra ref */ RT_REMREF_LOCKED(rt); return; @@ -911,11 +944,13 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, } /* trigger route cache reevaluation */ - if (rt_key(rt)->sa_family == AF_INET) + if (rt_key(rt)->sa_family == AF_INET) { routegenid_inet_update(); + } #if INET6 - else if (rt_key(rt)->sa_family == AF_INET6) + else if (rt_key(rt)->sa_family == AF_INET6) { routegenid_inet6_update(); + } #endif /* INET6 */ if (ifa != NULL) { @@ -925,8 +960,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, IFA_LOCK_SPIN(oifa); ifa_rtrequest = oifa->ifa_rtrequest; IFA_UNLOCK(oifa); - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(RTM_DELETE, rt, Gate); + } } rtsetifa(rt, ifa); @@ -934,8 +970,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } /* * Adjust route ref count for the interfaces. @@ -972,8 +1009,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, IFA_LOCK_SPIN(rt->rt_ifa); ifa_rtrequest = rt->rt_ifa->ifa_rtrequest; IFA_UNLOCK(rt->rt_ifa); - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(RTM_ADD, rt, Gate); + } } IFA_REMREF(ifa); /* Release extra ref */ @@ -989,8 +1027,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, IFA_LOCK_SPIN(rt->rt_ifa); ifa_rtrequest = rt->rt_ifa->ifa_rtrequest; IFA_UNLOCK(rt->rt_ifa); - if (ifa_rtrequest != NULL) + if (ifa_rtrequest != NULL) { ifa_rtrequest(RTM_ADD, rt, Gate); + } } /* @@ -1001,8 +1040,9 @@ rt_setif(struct rtentry *rt, struct sockaddr *Ifpaddr, struct sockaddr *Ifaaddr, (rt->rt_flags & RTF_HOST) && rt->rt_ifa->ifa_ifp == rt->rt_ifp) { ifa = ifa_ifwithaddr(rt_key(rt)); if (ifa != NULL) { - if (ifa != rt->rt_ifa) + if (ifa != rt->rt_ifa) { rtsetifa(rt, ifa); + } IFA_REMREF(ifa); } } @@ -1022,16 +1062,18 @@ rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo) struct sockaddr *sa; int i; - bzero(rtinfo->rti_info, sizeof (rtinfo->rti_info)); + bzero(rtinfo->rti_info, sizeof(rtinfo->rti_info)); for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) { - if ((rtinfo->rti_addrs & (1 << i)) == 0) + if ((rtinfo->rti_addrs & (1 << i)) == 0) { continue; + } sa = (struct sockaddr *)cp; /* * It won't fit. */ - if ((cp + sa->sa_len) > cplim) - return (EINVAL); + if ((cp + sa->sa_len) > cplim) { + return EINVAL; + } /* * there are no more.. quit now * If there are more bits, they are in error. @@ -1041,13 +1083,13 @@ rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo) */ if (sa->sa_len == 0) { rtinfo->rti_info[i] = &sa_zero; - return (0); /* should be EINVAL but for compat */ + return 0; /* should be EINVAL but for compat */ } /* accept it */ rtinfo->rti_info[i] = sa; ADVANCE32(cp, sa); } - return (0); + return 0; } static struct mbuf * @@ -1059,23 +1101,22 @@ rt_msg1(int type, struct rt_addrinfo *rtinfo) int len, dlen, off; switch (type) { - case RTM_DELADDR: case RTM_NEWADDR: - len = sizeof (struct ifa_msghdr); + len = sizeof(struct ifa_msghdr); break; case RTM_DELMADDR: case RTM_NEWMADDR: - len = sizeof (struct ifma_msghdr); + len = sizeof(struct ifma_msghdr); break; case RTM_IFINFO: - len = sizeof (struct if_msghdr); + len = sizeof(struct if_msghdr); break; default: - len = sizeof (struct rt_msghdr); + len = sizeof(struct rt_msghdr); } m = m_gethdr(M_DONTWAIT, MT_DATA); if (m && len > MHLEN) { @@ -1085,8 +1126,9 @@ rt_msg1(int type, struct rt_addrinfo *rtinfo) m = NULL; } } - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = NULL; rtm = mtod(m, struct rt_msghdr *); @@ -1099,20 +1141,22 @@ rt_msg1(int type, struct rt_addrinfo *rtinfo) /* * Make sure to accomodate the largest possible size of sa_len. */ - _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1)); + _CASSERT(sizeof(ssbuf) == (SOCK_MAXADDRLEN + 1)); - if ((sa = rtinfo->rti_info[i]) == NULL) + if ((sa = rtinfo->rti_info[i]) == NULL) { continue; + } switch (i) { case RTAX_DST: case RTAX_NETMASK: - if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL) + if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL) { hint = rtinfo->rti_info[RTAX_IFA]; + } /* Scrub away any trace of embedded interface scope */ sa = rtm_scrub(type, i, hint, sa, &ssbuf, - sizeof (ssbuf), NULL); + sizeof(ssbuf), NULL); break; default: @@ -1127,17 +1171,17 @@ rt_msg1(int type, struct rt_addrinfo *rtinfo) } if (m->m_pkthdr.len != len) { m_freem(m); - return (NULL); + return NULL; } rtm->rtm_msglen = len; rtm->rtm_version = RTM_VERSION; rtm->rtm_type = type; - return (m); + return m; } static int rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w, - kauth_cred_t* credp) + kauth_cred_t* credp) { int i; int len, dlen, rlen, second_time = 0; @@ -1146,43 +1190,43 @@ rt_msg2(int type, struct rt_addrinfo *rtinfo, caddr_t cp, struct walkarg *w, rtinfo->rti_addrs = 0; again: switch (type) { - case RTM_DELADDR: case RTM_NEWADDR: - len = sizeof (struct ifa_msghdr); + len = sizeof(struct ifa_msghdr); break; case RTM_DELMADDR: case RTM_NEWMADDR: - len = sizeof (struct ifma_msghdr); + len = sizeof(struct ifma_msghdr); break; case RTM_IFINFO: - len = sizeof (struct if_msghdr); + len = sizeof(struct if_msghdr); break; case RTM_IFINFO2: - len = sizeof (struct if_msghdr2); + len = sizeof(struct if_msghdr2); break; case RTM_NEWMADDR2: - len = sizeof (struct ifma_msghdr2); + len = sizeof(struct ifma_msghdr2); break; case RTM_GET_EXT: - len = sizeof (struct rt_msghdr_ext); + len = sizeof(struct rt_msghdr_ext); break; case RTM_GET2: - len = sizeof (struct rt_msghdr2); + len = sizeof(struct rt_msghdr2); break; default: - len = sizeof (struct rt_msghdr); + len = sizeof(struct rt_msghdr); } cp0 = cp; - if (cp0) + if (cp0) { cp += len; + } for (i = 0; i < RTAX_MAX; i++) { struct sockaddr *sa, *hint; uint8_t ssbuf[SOCK_MAXADDRLEN + 1]; @@ -1190,25 +1234,27 @@ again: /* * Make sure to accomodate the largest possible size of sa_len. */ - _CASSERT(sizeof (ssbuf) == (SOCK_MAXADDRLEN + 1)); + _CASSERT(sizeof(ssbuf) == (SOCK_MAXADDRLEN + 1)); - if ((sa = rtinfo->rti_info[i]) == NULL) + if ((sa = rtinfo->rti_info[i]) == NULL) { continue; + } switch (i) { case RTAX_DST: case RTAX_NETMASK: - if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL) + if ((hint = rtinfo->rti_info[RTAX_DST]) == NULL) { hint = rtinfo->rti_info[RTAX_IFA]; + } /* Scrub away any trace of embedded interface scope */ sa = rtm_scrub(type, i, hint, sa, &ssbuf, - sizeof (ssbuf), NULL); + sizeof(ssbuf), NULL); break; case RTAX_GATEWAY: case RTAX_IFP: sa = rtm_scrub(type, i, NULL, sa, &ssbuf, - sizeof (ssbuf), credp); + sizeof(ssbuf), credp); break; default: @@ -1220,8 +1266,9 @@ again: rlen = ROUNDUP32(dlen); if (cp) { bcopy((caddr_t)sa, cp, (size_t)dlen); - if (dlen != rlen) + if (dlen != rlen) { bzero(cp + dlen, rlen - dlen); + } cp += rlen; } len += rlen; @@ -1231,11 +1278,13 @@ again: if (rw->w_req != NULL) { if (rw->w_tmemsize < len) { - if (rw->w_tmem != NULL) + if (rw->w_tmem != NULL) { FREE(rw->w_tmem, M_RTABLE); - rw->w_tmem = _MALLOC(len, M_RTABLE, M_WAITOK); - if (rw->w_tmem != NULL) + } + rw->w_tmem = _MALLOC(len, M_RTABLE, M_ZERO | M_WAITOK); + if (rw->w_tmem != NULL) { rw->w_tmemsize = len; + } } if (rw->w_tmem != NULL) { cp = rw->w_tmem; @@ -1251,7 +1300,7 @@ again: rtm->rtm_type = type; rtm->rtm_msglen = len; } - return (len); + return len; } /* @@ -1268,11 +1317,13 @@ rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error) struct sockaddr *sa = rtinfo->rti_info[RTAX_DST]; struct sockproto route_proto = { PF_ROUTE, 0 }; - if (route_cb.any_count == 0) + if (route_cb.any_count == 0) { return; + } m = rt_msg1(type, rtinfo); - if (m == NULL) + if (m == NULL) { return; + } rtm = mtod(m, struct rt_msghdr *); rtm->rtm_flags = RTF_DONE | flags; rtm->rtm_errno = error; @@ -1291,14 +1342,16 @@ rt_ifmsg(struct ifnet *ifp) struct if_msghdr *ifm; struct mbuf *m; struct rt_addrinfo info; - struct sockproto route_proto = { PF_ROUTE, 0 }; + struct sockproto route_proto = { PF_ROUTE, 0 }; - if (route_cb.any_count == 0) + if (route_cb.any_count == 0) { return; - bzero((caddr_t)&info, sizeof (info)); + } + bzero((caddr_t)&info, sizeof(info)); m = rt_msg1(RTM_IFINFO, &info); - if (m == NULL) + if (m == NULL) { return; + } ifm = mtod(m, struct if_msghdr *); ifm->ifm_index = ifp->if_index; ifm->ifm_flags = (u_short)ifp->if_flags; @@ -1331,13 +1384,14 @@ rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (route_cb.any_count == 0) + if (route_cb.any_count == 0) { return; + } /* Become a regular mutex, just in case */ RT_CONVERT_LOCK(rt); for (pass = 1; pass < 3; pass++) { - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); if ((cmd == RTM_ADD && pass == 1) || (cmd == RTM_DELETE && pass == 2)) { struct ifa_msghdr *ifam; @@ -1375,13 +1429,15 @@ rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) (cmd == RTM_DELETE && pass == 1)) { struct rt_msghdr *rtm; - if (rt == NULL) + if (rt == NULL) { continue; + } info.rti_info[RTAX_NETMASK] = rt_mask(rt); info.rti_info[RTAX_DST] = sa = rt_key(rt); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; - if ((m = rt_msg1(cmd, &info)) == NULL) + if ((m = rt_msg1(cmd, &info)) == NULL) { continue; + } rtm = mtod(m, struct rt_msghdr *); rtm->rtm_index = ifp->if_index; rtm->rtm_flags |= rt->rt_flags; @@ -1407,12 +1463,13 @@ rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma) struct ifma_msghdr *ifmam; struct sockproto route_proto = { PF_ROUTE, 0 }; - if (route_cb.any_count == 0) + if (route_cb.any_count == 0) { return; + } /* Lock ifp for if_lladdr */ ifnet_lock_shared(ifp); - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); IFMA_LOCK(ifma); info.rti_info[RTAX_IFA] = ifma->ifma_addr; /* lladdr doesn't need lock */ @@ -1509,7 +1566,7 @@ rtm2str(int cmd) break; } - return (c); + return c; } /* @@ -1530,8 +1587,9 @@ sysctl_dumpentry(struct radix_node *rn, void *vw) RT_LOCK(rt); if ((w->w_op == NET_RT_FLAGS || w->w_op == NET_RT_FLAGS_PRIV) && - !(rt->rt_flags & w->w_arg)) + !(rt->rt_flags & w->w_arg)) { goto done; + } /* * If the matching route has RTF_LLINFO set, then we can skip scrubbing the MAC @@ -1545,7 +1603,7 @@ sysctl_dumpentry(struct radix_node *rn, void *vw) } } - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); info.rti_info[RTAX_DST] = rt_key(rt); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; info.rti_info[RTAX_NETMASK] = rt_mask(rt); @@ -1578,10 +1636,11 @@ sysctl_dumpentry(struct radix_node *rn, void *vw) rt_getmetrics(rt, &rtm->rtm_rmx); rtm->rtm_index = rt->rt_ifp->if_index; rtm->rtm_refcnt = rt->rt_refcnt; - if (rt->rt_parent) + if (rt->rt_parent) { rtm->rtm_parentflags = rt->rt_parent->rt_flags; - else + } else { rtm->rtm_parentflags = 0; + } rtm->rtm_reserved = 0; rtm->rtm_addrs = info.rti_addrs; error = SYSCTL_OUT(w->w_req, (caddr_t)rtm, size); @@ -1591,7 +1650,7 @@ sysctl_dumpentry(struct radix_node *rn, void *vw) done: RT_UNLOCK(rt); kauth_cred_unref(&cred); - return (error); + return error; } /* @@ -1609,9 +1668,10 @@ sysctl_dumpentry_ext(struct radix_node *rn, void *vw) cred = kauth_cred_proc_ref(current_proc()); RT_LOCK(rt); - if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg)) + if (w->w_op == NET_RT_DUMPX_FLAGS && !(rt->rt_flags & w->w_arg)) { goto done; - bzero(&info, sizeof (info)); + } + bzero(&info, sizeof(info)); info.rti_info[RTAX_DST] = rt_key(rt); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; info.rti_info[RTAX_NETMASK] = rt_mask(rt); @@ -1631,7 +1691,7 @@ sysctl_dumpentry_ext(struct radix_node *rn, void *vw) ertm->rtm_errno = 0; ertm->rtm_addrs = info.rti_addrs; if (rt->rt_llinfo_get_ri == NULL) { - bzero(&ertm->rtm_ri, sizeof (ertm->rtm_ri)); + bzero(&ertm->rtm_ri, sizeof(ertm->rtm_ri)); ertm->rtm_ri.ri_rssi = IFNET_RSSI_UNKNOWN; ertm->rtm_ri.ri_lqm = IFNET_LQM_THRESH_OFF; ertm->rtm_ri.ri_npm = IFNET_NPM_THRESH_UNKNOWN; @@ -1644,7 +1704,7 @@ sysctl_dumpentry_ext(struct radix_node *rn, void *vw) done: RT_UNLOCK(rt); kauth_cred_unref(&cred); - return (error); + return error; } /* @@ -1665,25 +1725,27 @@ sysctl_iflist(int af, struct walkarg *w) { struct ifnet *ifp; struct ifaddr *ifa; - struct rt_addrinfo info; - int len = 0, error = 0; - int pass = 0; - int total_len = 0, current_len = 0; - char *total_buffer = NULL, *cp = NULL; + struct rt_addrinfo info; + int len = 0, error = 0; + int pass = 0; + int total_len = 0, current_len = 0; + char *total_buffer = NULL, *cp = NULL; kauth_cred_t cred; cred = kauth_cred_proc_ref(current_proc()); - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); for (pass = 0; pass < 2; pass++) { ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (error) + if (error) { break; - if (w->w_arg && w->w_arg != ifp->if_index) + } + if (w->w_arg && w->w_arg != ifp->if_index) { continue; + } ifnet_lock_shared(ifp); /* * Holding ifnet lock here prevents the link address @@ -1718,14 +1780,14 @@ sysctl_iflist(int af, struct walkarg *w) /* * * Round bytes only for non-platform - */ + */ if (!csproc_get_platform_binary(w->w_req->p)) { ALIGN_BYTES(ifm->ifm_data.ifi_ibytes); ALIGN_BYTES(ifm->ifm_data.ifi_obytes); } cp += len; - VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); current_len += len; } while ((ifa = ifa->ifa_link.tqe_next) != NULL) { @@ -1736,7 +1798,7 @@ sysctl_iflist(int af, struct walkarg *w) } if (ifa->ifa_addr->sa_family == AF_INET6 && (((struct in6_ifaddr *)ifa)->ia6_flags & - IN6_IFF_CLAT46) != 0) { + IN6_IFF_CLAT46) != 0) { IFA_UNLOCK(ifa); continue; } @@ -1767,7 +1829,7 @@ sysctl_iflist(int af, struct walkarg *w) cp += len; VERIFY(IS_P2ALIGNED(cp, - sizeof (u_int32_t))); + sizeof(u_int32_t))); current_len += len; } IFA_UNLOCK(ifa); @@ -1780,17 +1842,19 @@ sysctl_iflist(int af, struct walkarg *w) ifnet_head_done(); if (error != 0) { - if (error == ENOBUFS) + if (error == ENOBUFS) { printf("%s: current_len (%d) + len (%d) > " "total_len (%d)\n", __func__, current_len, len, total_len); + } break; } if (pass == 0) { /* Better to return zero length buffer than ENOBUFS */ - if (total_len == 0) + if (total_len == 0) { total_len = 1; + } total_len += total_len >> 3; total_buffer = _MALLOC(total_len, M_RTABLE, M_ZERO | M_WAITOK); @@ -1801,19 +1865,21 @@ sysctl_iflist(int af, struct walkarg *w) break; } cp = total_buffer; - VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); } else { error = SYSCTL_OUT(w->w_req, total_buffer, current_len); - if (error) + if (error) { break; + } } } - if (total_buffer != NULL) + if (total_buffer != NULL) { _FREE(total_buffer, M_RTABLE); + } kauth_cred_unref(&cred); - return (error); + return error; } static int @@ -1821,16 +1887,16 @@ sysctl_iflist2(int af, struct walkarg *w) { struct ifnet *ifp; struct ifaddr *ifa; - struct rt_addrinfo info; - int len = 0, error = 0; - int pass = 0; - int total_len = 0, current_len = 0; - char *total_buffer = NULL, *cp = NULL; + struct rt_addrinfo info; + int len = 0, error = 0; + int pass = 0; + int total_len = 0, current_len = 0; + char *total_buffer = NULL, *cp = NULL; kauth_cred_t cred; cred = kauth_cred_proc_ref(current_proc()); - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); for (pass = 0; pass < 2; pass++) { struct ifmultiaddr *ifma; @@ -1838,10 +1904,12 @@ sysctl_iflist2(int af, struct walkarg *w) ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { - if (error) + if (error) { break; - if (w->w_arg && w->w_arg != ifp->if_index) + } + if (w->w_arg && w->w_arg != ifp->if_index) { continue; + } ifnet_lock_shared(ifp); /* * Holding ifnet lock here prevents the link address @@ -1881,14 +1949,14 @@ sysctl_iflist2(int af, struct walkarg *w) /* * * Round bytes only for non-platform - */ + */ if (!csproc_get_platform_binary(w->w_req->p)) { ALIGN_BYTES(ifm->ifm_data.ifi_ibytes); ALIGN_BYTES(ifm->ifm_data.ifi_obytes); } cp += len; - VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); current_len += len; } while ((ifa = ifa->ifa_link.tqe_next) != NULL) { @@ -1899,7 +1967,7 @@ sysctl_iflist2(int af, struct walkarg *w) } if (ifa->ifa_addr->sa_family == AF_INET6 && (((struct in6_ifaddr *)ifa)->ia6_flags & - IN6_IFF_CLAT46) != 0) { + IN6_IFF_CLAT46) != 0) { IFA_UNLOCK(ifa); continue; } @@ -1931,7 +1999,7 @@ sysctl_iflist2(int af, struct walkarg *w) cp += len; VERIFY(IS_P2ALIGNED(cp, - sizeof (u_int32_t))); + sizeof(u_int32_t))); current_len += len; } IFA_UNLOCK(ifa); @@ -1950,7 +2018,7 @@ sysctl_iflist2(int af, struct walkarg *w) IFMA_UNLOCK(ifma); continue; } - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); info.rti_info[RTAX_IFA] = ifma->ifma_addr; /* * Holding ifnet lock here prevents the link @@ -1960,9 +2028,10 @@ sysctl_iflist2(int af, struct walkarg *w) */ ifa0 = ifp->if_lladdr; info.rti_info[RTAX_IFP] = ifa0->ifa_addr; - if (ifma->ifma_ll != NULL) + if (ifma->ifma_ll != NULL) { info.rti_info[RTAX_GATEWAY] = ifma->ifma_ll->ifma_addr; + } len = rt_msg2(RTM_NEWMADDR2, &info, NULL, NULL, &cred); if (pass == 0) { @@ -1989,7 +2058,7 @@ sysctl_iflist2(int af, struct walkarg *w) cp += len; VERIFY(IS_P2ALIGNED(cp, - sizeof (u_int32_t))); + sizeof(u_int32_t))); current_len += len; } IFMA_UNLOCK(ifma); @@ -2001,17 +2070,19 @@ sysctl_iflist2(int af, struct walkarg *w) ifnet_head_done(); if (error) { - if (error == ENOBUFS) + if (error == ENOBUFS) { printf("%s: current_len (%d) + len (%d) > " "total_len (%d)\n", __func__, current_len, len, total_len); + } break; } if (pass == 0) { /* Better to return zero length buffer than ENOBUFS */ - if (total_len == 0) + if (total_len == 0) { total_len = 1; + } total_len += total_len >> 3; total_buffer = _MALLOC(total_len, M_RTABLE, M_ZERO | M_WAITOK); @@ -2022,79 +2093,86 @@ sysctl_iflist2(int af, struct walkarg *w) break; } cp = total_buffer; - VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(cp, sizeof(u_int32_t))); } else { error = SYSCTL_OUT(w->w_req, total_buffer, current_len); - if (error) + if (error) { break; + } } } - if (total_buffer != NULL) + if (total_buffer != NULL) { _FREE(total_buffer, M_RTABLE); + } kauth_cred_unref(&cred); - return (error); + return error; } static int sysctl_rtstat(struct sysctl_req *req) { - return (SYSCTL_OUT(req, &rtstat, sizeof (struct rtstat))); + return SYSCTL_OUT(req, &rtstat, sizeof(struct rtstat)); } static int sysctl_rttrash(struct sysctl_req *req) { - return (SYSCTL_OUT(req, &rttrash, sizeof (rttrash))); + return SYSCTL_OUT(req, &rttrash, sizeof(rttrash)); } static int sysctl_rtsock SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - int *name = (int *)arg1; - u_int namelen = arg2; + int *name = (int *)arg1; + u_int namelen = arg2; struct radix_node_head *rnh; - int i, error = EINVAL; + int i, error = EINVAL; u_char af; - struct walkarg w; + struct walkarg w; - name ++; + name++; namelen--; - if (req->newptr) - return (EPERM); - if (namelen != 3) - return (EINVAL); + if (req->newptr) { + return EPERM; + } + if (namelen != 3) { + return EINVAL; + } af = name[0]; - Bzero(&w, sizeof (w)); + Bzero(&w, sizeof(w)); w.w_op = name[1]; w.w_arg = name[2]; w.w_req = req; switch (w.w_op) { - case NET_RT_DUMP: case NET_RT_DUMP2: case NET_RT_FLAGS: case NET_RT_FLAGS_PRIV: lck_mtx_lock(rnh_lock); - for (i = 1; i <= AF_MAX; i++) + for (i = 1; i <= AF_MAX; i++) { if ((rnh = rt_tables[i]) && (af == 0 || af == i) && (error = rnh->rnh_walktree(rnh, - sysctl_dumpentry, &w))) + sysctl_dumpentry, &w))) { break; + } + } lck_mtx_unlock(rnh_lock); break; case NET_RT_DUMPX: case NET_RT_DUMPX_FLAGS: lck_mtx_lock(rnh_lock); - for (i = 1; i <= AF_MAX; i++) + for (i = 1; i <= AF_MAX; i++) { if ((rnh = rt_tables[i]) && (af == 0 || af == i) && (error = rnh->rnh_walktree(rnh, - sysctl_dumpentry_ext, &w))) + sysctl_dumpentry_ext, &w))) { break; + } + } lck_mtx_unlock(rnh_lock); break; case NET_RT_IFLIST: @@ -2110,32 +2188,33 @@ sysctl_rtsock SYSCTL_HANDLER_ARGS error = sysctl_rttrash(req); break; } - if (w.w_tmem != NULL) + if (w.w_tmem != NULL) { FREE(w.w_tmem, M_RTABLE); - return (error); + } + return error; } /* * Definitions of protocols supported in the ROUTE domain. */ static struct protosw routesw[] = { -{ - .pr_type = SOCK_RAW, - .pr_protocol = 0, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_output = route_output, - .pr_ctlinput = raw_ctlinput, - .pr_init = raw_init, - .pr_usrreqs = &route_usrreqs, -} + { + .pr_type = SOCK_RAW, + .pr_protocol = 0, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_output = route_output, + .pr_ctlinput = raw_ctlinput, + .pr_init = raw_init, + .pr_usrreqs = &route_usrreqs, + } }; -static int route_proto_count = (sizeof (routesw) / sizeof (struct protosw)); +static int route_proto_count = (sizeof(routesw) / sizeof(struct protosw)); struct domain routedomain_s = { - .dom_family = PF_ROUTE, - .dom_name = "route", - .dom_init = route_dinit, + .dom_family = PF_ROUTE, + .dom_name = "route", + .dom_init = route_dinit, }; static void @@ -2149,8 +2228,9 @@ route_dinit(struct domain *dp) routedomain = dp; - for (i = 0, pr = &routesw[0]; i < route_proto_count; i++, pr++) + for (i = 0, pr = &routesw[0]; i < route_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); + } route_init(); } diff --git a/bsd/net/skywalk_stubs.c b/bsd/net/skywalk_stubs.c new file mode 100644 index 000000000..a4425e741 --- /dev/null +++ b/bsd/net/skywalk_stubs.c @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2015-2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include + +#if !SKYWALK + +#define STUB(_name) \ +__attribute__((noreturn)) \ +int _name(void); \ +int \ +_name(void) \ +{ \ + panic("stub called in a config with no SKYWALK"); \ + /* NOTREACHED */ \ + __builtin_unreachable(); \ +} + +STUB(kern_buflet_get_data_offset); +STUB(kern_buflet_get_data_length); +STUB(kern_buflet_get_object_address); +STUB(kern_buflet_get_object_offset); +STUB(kern_buflet_get_object_segment); +STUB(kern_buflet_set_data_offset); +STUB(kern_buflet_set_data_length); +STUB(kern_buflet_get_data_limit); +STUB(kern_channel_advance_slot); +STUB(kern_channel_available_slot_count); +STUB(kern_channel_get_context); +STUB(kern_channel_get_next_slot); +STUB(kern_channel_notify); +STUB(kern_channel_reclaim); +STUB(kern_channel_ring_get_container); +STUB(kern_channel_ring_get_context); +STUB(kern_channel_slot_get_context); +STUB(kern_channel_slot_attach_packet); +STUB(kern_channel_slot_detach_packet); +STUB(kern_channel_slot_get_packet); +STUB(kern_channel_increment_ring_stats); +STUB(kern_channel_increment_ring_net_stats); +STUB(kern_channel_tx_refill); +STUB(kern_channel_get_service_class); +STUB(kern_copy_and_inet_checksum); +STUB(kern_inet_checksum); +STUB(kern_nexus_attr_create); +STUB(kern_nexus_attr_clone); +STUB(kern_nexus_attr_destroy); +STUB(kern_nexus_attr_set); +STUB(kern_nexus_attr_get); +STUB(kern_nexus_controller_create); +STUB(kern_nexus_controller_destroy); +STUB(kern_nexus_controller_alloc_provider_instance); +STUB(kern_nexus_controller_alloc_net_provider_instance); +STUB(kern_nexus_controller_bind_provider_instance); +STUB(kern_nexus_controller_deregister_provider); +STUB(kern_nexus_controller_free_provider_instance); +STUB(kern_nexus_controller_read_provider_attr); +STUB(kern_nexus_controller_register_provider); +STUB(kern_nexus_controller_unbind_provider_instance); +STUB(kern_nexus_deregister_domain_provider); +STUB(kern_nexus_get_builtin_domain_provider); +STUB(kern_nexus_get_context); +STUB(kern_nexus_get_pbufpool); +STUB(kern_nexus_register_domain_provider); +STUB(kern_packet_clear_flow_uuid); +STUB(kern_packet_get_euuid); +STUB(kern_packet_finalize); +STUB(kern_packet_get_buflet_count); +STUB(kern_packet_get_data_length); +STUB(kern_packet_get_flow_uuid); +STUB(kern_packet_get_inet_checksum); +STUB(kern_packet_get_link_broadcast); +STUB(kern_packet_get_link_ethfcs); +STUB(kern_packet_get_link_header_offset); +STUB(kern_packet_get_link_multicast); +STUB(kern_packet_get_network_header_offset); +STUB(kern_packet_get_next_buflet); +STUB(kern_packet_get_object_index); +STUB(kern_packet_get_policy_id); +STUB(kern_packet_get_service_class); +STUB(kern_packet_get_service_class_index); +STUB(kern_packet_get_traffic_class); +STUB(kern_packet_get_timestamp); +STUB(kern_packet_get_transport_header_offset); +STUB(kern_packet_get_transport_new_flow); +STUB(kern_packet_get_transport_retransmit); +STUB(kern_packet_get_transport_last_packet); +STUB(kern_packet_get_transport_traffic_background) +STUB(kern_packet_get_transport_traffic_realtime) +STUB(kern_packet_set_flow_uuid); +STUB(kern_packet_set_inet_checksum); +STUB(kern_packet_set_link_broadcast); +STUB(kern_packet_set_link_header_offset); +STUB(kern_packet_set_link_multicast); +STUB(kern_packet_set_link_ethfcs); +STUB(kern_packet_set_network_header_offset); +STUB(kern_packet_set_policy_id); +STUB(kern_packet_set_service_class); +STUB(kern_packet_set_timestamp); +STUB(kern_packet_set_traffic_class); +STUB(kern_packet_set_transport_header_offset); +STUB(kern_packet_get_timestamp_requested); +STUB(kern_packet_get_tx_completion_status); +STUB(kern_packet_set_tx_completion_status); +STUB(kern_packet_tx_completion); +STUB(kern_pbufpool_alloc); +STUB(kern_pbufpool_alloc_batch); +STUB(kern_pbufpool_alloc_nosleep); +STUB(kern_pbufpool_alloc_batch_nosleep); +STUB(kern_pbufpool_create); +STUB(kern_pbufpool_destroy); +STUB(kern_pbufpool_free); +STUB(kern_pbufpool_free_batch); +STUB(kern_pbufpool_get_context); +STUB(kern_pbufpool_get_memory_info); +STUB(kern_segment_get_index); +#undef STUB +#endif /* !SKYWALK */ diff --git a/bsd/net/zlib.h b/bsd/net/zlib.h index 1d8223213..c6596286a 100644 --- a/bsd/net/zlib.h +++ b/bsd/net/zlib.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/netinet/bootp.h b/bsd/netinet/bootp.h index 27f2581de..a423b2ad4 100644 --- a/bsd/netinet/bootp.h +++ b/bsd/netinet/bootp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -44,49 +44,49 @@ #define iaddr_t struct in_addr struct bootp { - u_char bp_op; /* packet opcode type */ -#define BOOTREQUEST 1 -#define BOOTREPLY 2 - u_char bp_htype; /* hardware addr type */ - u_char bp_hlen; /* hardware addr length */ - u_char bp_hops; /* gateway hops */ - u_int32_t bp_xid; /* transaction ID */ - u_short bp_secs; /* seconds since boot began */ - u_short bp_unused; - iaddr_t bp_ciaddr; /* client IP address */ - iaddr_t bp_yiaddr; /* 'your' IP address */ - iaddr_t bp_siaddr; /* server IP address */ - iaddr_t bp_giaddr; /* gateway IP address */ - u_char bp_chaddr[16]; /* client hardware address */ - u_char bp_sname[64]; /* server host name */ - u_char bp_file[128]; /* boot file name */ - u_char bp_vend[64]; /* vendor-specific area */ + u_char bp_op; /* packet opcode type */ +#define BOOTREQUEST 1 +#define BOOTREPLY 2 + u_char bp_htype; /* hardware addr type */ + u_char bp_hlen; /* hardware addr length */ + u_char bp_hops; /* gateway hops */ + u_int32_t bp_xid; /* transaction ID */ + u_short bp_secs; /* seconds since boot began */ + u_short bp_unused; + iaddr_t bp_ciaddr; /* client IP address */ + iaddr_t bp_yiaddr; /* 'your' IP address */ + iaddr_t bp_siaddr; /* server IP address */ + iaddr_t bp_giaddr; /* gateway IP address */ + u_char bp_chaddr[16]; /* client hardware address */ + u_char bp_sname[64]; /* server host name */ + u_char bp_file[128]; /* boot file name */ + u_char bp_vend[64]; /* vendor-specific area */ }; /* * UDP port numbers, server and client. */ -#define IPPORT_BOOTPS 67 -#define IPPORT_BOOTPC 68 +#define IPPORT_BOOTPS 67 +#define IPPORT_BOOTPC 68 /* * "vendor" data permitted for Stanford boot clients. */ struct vend { - u_char v_magic[4]; /* magic number */ - u_int32_t v_flags; /* flags/opcodes, etc. */ - u_char v_unused[56]; /* currently unused */ + u_char v_magic[4]; /* magic number */ + u_int32_t v_flags; /* flags/opcodes, etc. */ + u_char v_unused[56]; /* currently unused */ }; -#define VM_STANFORD "STAN" /* v_magic for Stanford */ +#define VM_STANFORD "STAN" /* v_magic for Stanford */ /* v_flags values */ -#define VF_PCBOOT 1 /* an IBMPC or Mac wants environment info */ -#define VF_HELP 2 /* help me, I'm not registered */ +#define VF_PCBOOT 1 /* an IBMPC or Mac wants environment info */ +#define VF_HELP 2 /* help me, I'm not registered */ -#define NVMAXTEXT 55 /* don't change this, it just fits RFC951 */ +#define NVMAXTEXT 55 /* don't change this, it just fits RFC951 */ struct nextvend { - u_char nv_magic[4]; /* Magic number for vendor specificity */ - u_char nv_version; /* NeXT protocol version */ + u_char nv_magic[4]; /* Magic number for vendor specificity */ + u_char nv_version; /* NeXT protocol version */ /* * Round the beginning * of the union to a 16 @@ -94,42 +94,41 @@ struct nextvend { * struct/union alignment * on the m68k. */ - unsigned short :0; + unsigned short :0; union { u_char NV0[58]; struct { - u_char NV1_opcode; /* opcode - Version 1 */ - u_char NV1_xid; /* transcation id */ - u_char NV1_text[NVMAXTEXT]; /* text */ - u_char NV1_null; /* null terminator */ + u_char NV1_opcode; /* opcode - Version 1 */ + u_char NV1_xid; /* transcation id */ + u_char NV1_text[NVMAXTEXT]; /* text */ + u_char NV1_null; /* null terminator */ } NV1; } nv_U; }; -#define nv_unused nv_U.NV0 -#define nv_opcode nv_U.NV1.NV1_opcode -#define nv_xid nv_U.NV1.NV1_xid -#define nv_text nv_U.NV1.NV1_text -#define nv_null nv_U.NV1.NV1_null +#define nv_unused nv_U.NV0 +#define nv_opcode nv_U.NV1.NV1_opcode +#define nv_xid nv_U.NV1.NV1_xid +#define nv_text nv_U.NV1.NV1_text +#define nv_null nv_U.NV1.NV1_null /* Magic number */ -#define VM_NEXT "NeXT" /* v_magic for NeXT, Inc. */ +#define VM_NEXT "NeXT" /* v_magic for NeXT, Inc. */ /* Opcodes */ -#define BPOP_OK 0 -#define BPOP_QUERY 1 -#define BPOP_QUERY_NE 2 -#define BPOP_ERROR 3 +#define BPOP_OK 0 +#define BPOP_QUERY 1 +#define BPOP_QUERY_NE 2 +#define BPOP_ERROR 3 struct bootp_packet { - struct ip bp_ip; - struct udphdr bp_udp; - struct bootp bp_bootp; + struct ip bp_ip; + struct udphdr bp_udp; + struct bootp bp_bootp; }; -#define BOOTP_PKTSIZE (sizeof (struct bootp_packet)) +#define BOOTP_PKTSIZE (sizeof (struct bootp_packet)) /* backoffs must be masks */ -#define BOOTP_MIN_BACKOFF 0x7ff /* 2.048 sec */ -#define BOOTP_MAX_BACKOFF 0xffff /* 65.535 sec */ -#define BOOTP_RETRY 6 /* # retries */ - +#define BOOTP_MIN_BACKOFF 0x7ff /* 2.048 sec */ +#define BOOTP_MAX_BACKOFF 0xffff /* 65.535 sec */ +#define BOOTP_RETRY 6 /* # retries */ diff --git a/bsd/netinet/cbrtf.c b/bsd/netinet/cbrtf.c index 568535a68..f4f4bddf9 100644 --- a/bsd/netinet/cbrtf.c +++ b/bsd/netinet/cbrtf.c @@ -34,386 +34,394 @@ float cbrtf(float x); struct cbrt_table_entry { - double x; - double cbrt_x; - double recip_cbrt_x; - double recip_x; + double x; + double cbrt_x; + double recip_cbrt_x; + double recip_x; }; static const struct cbrt_table_entry cbrt_table[] = { - /* mantissa = 0x1.00... */ - {0x1.0000000000000p+0, 0x1.0000000000000p+0, - 0x1.0000000000000p+0, 0x1.0000000000000p+0}, /* exponent = 0 */ - {0x1.037e200000000p+1, 0x1.4400000000000p+0, - 0x1.948b0fcd6e9e0p-1, 0x1.f91bd1b62b9cfp-2}, /* exponent = 1 */ - {0x1.0315800000000p+2, 0x1.9800000000000p+0, - 0x1.4141414141414p-1, 0x1.f9e7cba5753afp-3}, /* exponent = 2 */ - - /* mantissa = 0x1.04... */ - {0x1.060c080000000p+0, 0x1.0200000000000p+0, - 0x1.fc07f01fc07f0p-1, 0x1.f42f61dacddc6p-1}, /* exponent = 0 */ - {0x1.05ff4c356ff40p+1, 0x1.450a000000000p+0, - 0x1.933fff9b30002p-1, 0x1.f447b132ca3acp-2}, /* exponent = 1 */ - {0x1.06e9aa0000000p+2, 0x1.9a00000000000p+0, - 0x1.3fb013fb013fbp-1, 0x1.f289bb31fd41cp-3}, /* exponent = 2 */ - - /* mantissa = 0x1.08...*/ - {0x1.09fe97c0b2e80p+0, 0x1.034a000000000p+0, - 0x1.f9815c85b04a3p-1, 0x1.ecc3168ac46e4p-1}, // exponent = 0 - {0x1.0853ec0000000p+1, 0x1.4600000000000p+0, 0x1.920fb49d0e229p-1, 0x1.efde7dcdacefdp-2}, // exponent = 1 - {0x1.0ac7700000000p+2, 0x1.9c00000000000p+0, 0x1.3e22cbce4a902p-1, 0x1.eb501ca81bb3ep-3}, // exponent = 2 - - /* mantissa = 0x1.0c...*/ - {0x1.0c30400000000p+0, 0x1.0400000000000p+0, 0x1.f81f81f81f820p-1, 0x1.e8bb1d5b6e585p-1}, // exponent = 0 - {0x1.0d39000000000p+1, 0x1.4800000000000p+0, 0x1.8f9c18f9c18fap-1, 0x1.e6da80ced1523p-2}, // exponent = 1 - {0x1.0eaede0000000p+2, 0x1.9e00000000000p+0, 0x1.3c995a47babe7p-1, 0x1.e43a0fc24fe4bp-3}, // exponent = 2 - - /* mantissa = 0x1.10...*/ - {0x1.126cd80000000p+0, 0x1.0600000000000p+0, 0x1.f44659e4a4271p-1, 0x1.dd9fb30af3365p-1}, // exponent = 0 - {0x1.122d740000000p+1, 0x1.4a00000000000p+0, 0x1.8d3018d3018d3p-1, 0x1.de0e209af882ep-2}, // exponent = 1 - {0x1.12a0000000000p+2, 0x1.a000000000000p+0, 0x1.3b13b13b13b14p-1, 0x1.dd46baab49c24p-3}, // exponent = 2 - - /* mantissa = 0x1.14...*/ - {0x1.15f9b5b480000p+0, 0x1.0720000000000p+0, 0x1.f222c82dba316p-1, 0x1.d786108fd7a9fp-1}, // exponent = 0 - {0x1.1731600000000p+1, 0x1.4c00000000000p+0, 0x1.8acb90f6bf3aap-1, 0x1.d577b2f5c6f87p-2}, // exponent = 1 - {0x1.169ae20000000p+2, 0x1.a200000000000p+0, 0x1.3991c2c187f63p-1, 0x1.d67549c6f9b67p-3}, // exponent = 2 - - /* mantissa = 0x1.18...*/ - {0x1.18c2000000000p+0, 0x1.0800000000000p+0, 0x1.f07c1f07c1f08p-1, 0x1.d2d9cbd756afdp-1}, // exponent = 0 - {0x1.19fb2ce620540p+1, 0x1.4d1a000000000p+0, 0x1.897d564f5cf98p-1, 0x1.d0d34ccd78141p-2}, // exponent = 1 - {0x1.1a9f900000000p+2, 0x1.a400000000000p+0, 0x1.3813813813814p-1, 0x1.cfc4ef7db5bffp-3}, // exponent = 2 - - /* mantissa = 0x1.1c...*/ - {0x1.1f2fe80000000p+0, 0x1.0a00000000000p+0, 0x1.ecc07b301ecc0p-1, 0x1.c86636f753a66p-1}, // exponent = 0 - {0x1.1c44dc0000000p+1, 0x1.4e00000000000p+0, 0x1.886e5f0abb04ap-1, 0x1.cd159cdbba714p-2}, // exponent = 1 - {0x1.1eae160000000p+2, 0x1.a600000000000p+0, 0x1.3698df3de0748p-1, 0x1.c934e4095d202p-3}, // exponent = 2 - - /* mantissa = 0x1.20...*/ - {0x1.21fac7ca59c00p+0, 0x1.0adc000000000p+0, 0x1.eb2a412496abdp-1, 0x1.c40112c606d3ep-1}, // exponent = 0 - {0x1.2168000000000p+1, 0x1.5000000000000p+0, 0x1.8618618618618p-1, 0x1.c4e651e0c37d7p-2}, // exponent = 1 - {0x1.22c6800000000p+2, 0x1.a800000000000p+0, 0x1.3521cfb2b78c1p-1, 0x1.c2c46544650c1p-3}, // exponent = 2 - - /* mantissa = 0x1.24...*/ - {0x1.25b6c00000000p+0, 0x1.0c00000000000p+0, 0x1.e9131abf0b767p-1, 0x1.be41e7ee3f7edp-1}, // exponent = 0 - {0x1.269ae40000000p+1, 0x1.5200000000000p+0, 0x1.83c977ab2beddp-1, 0x1.bce853967753cp-2}, // exponent = 1 - {0x1.26e8da0000000p+2, 0x1.aa00000000000p+0, 0x1.33ae45b57bcb2p-1, 0x1.bc72b67ab9ce7p-3}, // exponent = 2 - - /* mantissa = 0x1.28...*/ - {0x1.29ff9aaaa2c00p+0, 0x1.0d4c000000000p+0, 0x1.e6b8275501adbp-1, 0x1.b7d7596e80007p-1}, // exponent = 0 - {0x1.2bdda00000000p+1, 0x1.5400000000000p+0, 0x1.8181818181818p-1, 0x1.b51a30f9739f8p-2}, // exponent = 1 - {0x1.2b15300000000p+2, 0x1.ac00000000000p+0, 0x1.323e34a2b10bfp-1, 0x1.b63f203c60c07p-3}, // exponent = 2 - - /* mantissa = 0x1.2c...*/ - {0x1.2c56b80000000p+0, 0x1.0e00000000000p+0, 0x1.e573ac901e574p-1, 0x1.b469f4adc7794p-1}, // exponent = 0 - {0x1.2dfff74f29dc0p+1, 0x1.54ce000000000p+0, 0x1.80987c755886ap-1, 0x1.b203708429799p-2}, // exponent = 1 - {0x1.2f4b8e0000000p+2, 0x1.ae00000000000p+0, 0x1.30d190130d190p-1, 0x1.b028f031c8644p-3}, // exponent = 2 - - /* mantissa = 0x1.30...*/ - {0x1.3310000000000p+0, 0x1.1000000000000p+0, 0x1.e1e1e1e1e1e1ep-1, 0x1.aadb93d39ae9cp-1}, // exponent = 0 - {0x1.31304c0000000p+1, 0x1.5600000000000p+0, 0x1.7f405fd017f40p-1, 0x1.ad7a85e593e54p-2}, // exponent = 1 - {0x1.338c000000000p+2, 0x1.b000000000000p+0, 0x1.2f684bda12f68p-1, 0x1.aa2f78f1b4cc6p-3}, // exponent = 2 - - /* mantissa = 0x1.34... */ - {0x1.35fb6f4579c00p+0, 0x1.10dc000000000p+0, 0x1.e05d5a24448c5p-1, 0x1.a6d6548fa984dp-1}, // exponent = 0 - {0x1.3693000000000p+1, 0x1.5800000000000p+0, 0x1.7d05f417d05f4p-1, 0x1.a607fa909db1fp-2}, // exponent = 1 - {0x1.37d6920000000p+2, 0x1.b200000000000p+0, 0x1.2e025c04b8097p-1, 0x1.a45211d8b748ap-3}, // exponent = 2 + /* mantissa = 0x1.00... */ + {0x1.0000000000000p+0, 0x1.0000000000000p+0, + 0x1.0000000000000p+0, 0x1.0000000000000p+0}, /* exponent = 0 */ + {0x1.037e200000000p+1, 0x1.4400000000000p+0, + 0x1.948b0fcd6e9e0p-1, 0x1.f91bd1b62b9cfp-2}, /* exponent = 1 */ + {0x1.0315800000000p+2, 0x1.9800000000000p+0, + 0x1.4141414141414p-1, 0x1.f9e7cba5753afp-3}, /* exponent = 2 */ + + /* mantissa = 0x1.04... */ + {0x1.060c080000000p+0, 0x1.0200000000000p+0, + 0x1.fc07f01fc07f0p-1, 0x1.f42f61dacddc6p-1}, /* exponent = 0 */ + {0x1.05ff4c356ff40p+1, 0x1.450a000000000p+0, + 0x1.933fff9b30002p-1, 0x1.f447b132ca3acp-2}, /* exponent = 1 */ + {0x1.06e9aa0000000p+2, 0x1.9a00000000000p+0, + 0x1.3fb013fb013fbp-1, 0x1.f289bb31fd41cp-3}, /* exponent = 2 */ + + /* mantissa = 0x1.08...*/ + {0x1.09fe97c0b2e80p+0, 0x1.034a000000000p+0, + 0x1.f9815c85b04a3p-1, 0x1.ecc3168ac46e4p-1}, // exponent = 0 + {0x1.0853ec0000000p+1, 0x1.4600000000000p+0, 0x1.920fb49d0e229p-1, 0x1.efde7dcdacefdp-2}, // exponent = 1 + {0x1.0ac7700000000p+2, 0x1.9c00000000000p+0, 0x1.3e22cbce4a902p-1, 0x1.eb501ca81bb3ep-3}, // exponent = 2 + + /* mantissa = 0x1.0c...*/ + {0x1.0c30400000000p+0, 0x1.0400000000000p+0, 0x1.f81f81f81f820p-1, 0x1.e8bb1d5b6e585p-1}, // exponent = 0 + {0x1.0d39000000000p+1, 0x1.4800000000000p+0, 0x1.8f9c18f9c18fap-1, 0x1.e6da80ced1523p-2}, // exponent = 1 + {0x1.0eaede0000000p+2, 0x1.9e00000000000p+0, 0x1.3c995a47babe7p-1, 0x1.e43a0fc24fe4bp-3}, // exponent = 2 + + /* mantissa = 0x1.10...*/ + {0x1.126cd80000000p+0, 0x1.0600000000000p+0, 0x1.f44659e4a4271p-1, 0x1.dd9fb30af3365p-1}, // exponent = 0 + {0x1.122d740000000p+1, 0x1.4a00000000000p+0, 0x1.8d3018d3018d3p-1, 0x1.de0e209af882ep-2}, // exponent = 1 + {0x1.12a0000000000p+2, 0x1.a000000000000p+0, 0x1.3b13b13b13b14p-1, 0x1.dd46baab49c24p-3}, // exponent = 2 + + /* mantissa = 0x1.14...*/ + {0x1.15f9b5b480000p+0, 0x1.0720000000000p+0, 0x1.f222c82dba316p-1, 0x1.d786108fd7a9fp-1}, // exponent = 0 + {0x1.1731600000000p+1, 0x1.4c00000000000p+0, 0x1.8acb90f6bf3aap-1, 0x1.d577b2f5c6f87p-2}, // exponent = 1 + {0x1.169ae20000000p+2, 0x1.a200000000000p+0, 0x1.3991c2c187f63p-1, 0x1.d67549c6f9b67p-3}, // exponent = 2 + + /* mantissa = 0x1.18...*/ + {0x1.18c2000000000p+0, 0x1.0800000000000p+0, 0x1.f07c1f07c1f08p-1, 0x1.d2d9cbd756afdp-1}, // exponent = 0 + {0x1.19fb2ce620540p+1, 0x1.4d1a000000000p+0, 0x1.897d564f5cf98p-1, 0x1.d0d34ccd78141p-2}, // exponent = 1 + {0x1.1a9f900000000p+2, 0x1.a400000000000p+0, 0x1.3813813813814p-1, 0x1.cfc4ef7db5bffp-3}, // exponent = 2 + + /* mantissa = 0x1.1c...*/ + {0x1.1f2fe80000000p+0, 0x1.0a00000000000p+0, 0x1.ecc07b301ecc0p-1, 0x1.c86636f753a66p-1}, // exponent = 0 + {0x1.1c44dc0000000p+1, 0x1.4e00000000000p+0, 0x1.886e5f0abb04ap-1, 0x1.cd159cdbba714p-2}, // exponent = 1 + {0x1.1eae160000000p+2, 0x1.a600000000000p+0, 0x1.3698df3de0748p-1, 0x1.c934e4095d202p-3}, // exponent = 2 + + /* mantissa = 0x1.20...*/ + {0x1.21fac7ca59c00p+0, 0x1.0adc000000000p+0, 0x1.eb2a412496abdp-1, 0x1.c40112c606d3ep-1}, // exponent = 0 + {0x1.2168000000000p+1, 0x1.5000000000000p+0, 0x1.8618618618618p-1, 0x1.c4e651e0c37d7p-2}, // exponent = 1 + {0x1.22c6800000000p+2, 0x1.a800000000000p+0, 0x1.3521cfb2b78c1p-1, 0x1.c2c46544650c1p-3}, // exponent = 2 + + /* mantissa = 0x1.24...*/ + {0x1.25b6c00000000p+0, 0x1.0c00000000000p+0, 0x1.e9131abf0b767p-1, 0x1.be41e7ee3f7edp-1}, // exponent = 0 + {0x1.269ae40000000p+1, 0x1.5200000000000p+0, 0x1.83c977ab2beddp-1, 0x1.bce853967753cp-2}, // exponent = 1 + {0x1.26e8da0000000p+2, 0x1.aa00000000000p+0, 0x1.33ae45b57bcb2p-1, 0x1.bc72b67ab9ce7p-3}, // exponent = 2 + + /* mantissa = 0x1.28...*/ + {0x1.29ff9aaaa2c00p+0, 0x1.0d4c000000000p+0, 0x1.e6b8275501adbp-1, 0x1.b7d7596e80007p-1}, // exponent = 0 + {0x1.2bdda00000000p+1, 0x1.5400000000000p+0, 0x1.8181818181818p-1, 0x1.b51a30f9739f8p-2}, // exponent = 1 + {0x1.2b15300000000p+2, 0x1.ac00000000000p+0, 0x1.323e34a2b10bfp-1, 0x1.b63f203c60c07p-3}, // exponent = 2 + + /* mantissa = 0x1.2c...*/ + {0x1.2c56b80000000p+0, 0x1.0e00000000000p+0, 0x1.e573ac901e574p-1, 0x1.b469f4adc7794p-1}, // exponent = 0 + {0x1.2dfff74f29dc0p+1, 0x1.54ce000000000p+0, 0x1.80987c755886ap-1, 0x1.b203708429799p-2}, // exponent = 1 + {0x1.2f4b8e0000000p+2, 0x1.ae00000000000p+0, 0x1.30d190130d190p-1, 0x1.b028f031c8644p-3}, // exponent = 2 + + /* mantissa = 0x1.30...*/ + {0x1.3310000000000p+0, 0x1.1000000000000p+0, 0x1.e1e1e1e1e1e1ep-1, 0x1.aadb93d39ae9cp-1}, // exponent = 0 + {0x1.31304c0000000p+1, 0x1.5600000000000p+0, 0x1.7f405fd017f40p-1, 0x1.ad7a85e593e54p-2}, // exponent = 1 + {0x1.338c000000000p+2, 0x1.b000000000000p+0, 0x1.2f684bda12f68p-1, 0x1.aa2f78f1b4cc6p-3}, // exponent = 2 + + /* mantissa = 0x1.34... */ + {0x1.35fb6f4579c00p+0, 0x1.10dc000000000p+0, 0x1.e05d5a24448c5p-1, 0x1.a6d6548fa984dp-1}, // exponent = 0 + {0x1.3693000000000p+1, 0x1.5800000000000p+0, 0x1.7d05f417d05f4p-1, 0x1.a607fa909db1fp-2}, // exponent = 1 + {0x1.37d6920000000p+2, 0x1.b200000000000p+0, 0x1.2e025c04b8097p-1, 0x1.a45211d8b748ap-3}, // exponent = 2 /* mantissa = 0x1.38... */ - {0x1.39e2c80000000p+0, 0x1.1200000000000p+0, 0x1.de5d6e3f8868ap-1, 0x1.a1941b013022dp-1}, // exponent = 0 - {0x1.39fe541ac7840p+1, 0x1.5942000000000p+0, 0x1.7ba298eae8947p-1, 0x1.a16f787114257p-2}, // exponent = 1 - {0x1.39ffaac000000p+2, 0x1.b300000000000p+0, 0x1.2d50a012d50a0p-1, 0x1.a16db0ec408b2p-3}, // exponent = 2 - - /* mantissa = 0x1.3c... */ - {0x1.3dfc1312b0000p+0, 0x1.1330000000000p+0, 0x1.dc4cfaf10eb5cp-1, 0x1.9c322b87f17e8p-1}, // exponent = 0 - {0x1.3c05d40000000p+1, 0x1.5a00000000000p+0, 0x1.7ad2208e0ecc3p-1, 0x1.9ec1430b0dfc7p-2}, // exponent = 1 - {0x1.3c2b500000000p+2, 0x1.b400000000000p+0, 0x1.2c9fb4d812ca0p-1, 0x1.9e9016e2211b6p-3}, // exponent = 2 - - /* mantissa = 0x1.40... */ - {0x1.40cf400000000p+0, 0x1.1400000000000p+0, 0x1.dae6076b981dbp-1, 0x1.9890fd4bf368fp-1}, // exponent = 0 - {0x1.4188e00000000p+1, 0x1.5c00000000000p+0, 0x1.78a4c8178a4c8p-1, 0x1.97a51ec6b707ep-2}, // exponent = 1 - {0x1.408a460000000p+2, 0x1.b600000000000p+0, 0x1.2b404ad012b40p-1, 0x1.98e8e88261b62p-3}, // exponent = 2 - - /* mantissa = 0x1.44... */ - {0x1.47d5980000000p+0, 0x1.1600000000000p+0, 0x1.d77b654b82c34p-1, 0x1.8fcfc9c44e2f4p-1}, // exponent = 0 - {0x1.471c3c0000000p+1, 0x1.5e00000000000p+0, 0x1.767dce434a9b1p-1, 0x1.90b25822e2a9fp-2}, // exponent = 1 - {0x1.44f3800000000p+2, 0x1.b800000000000p+0, 0x1.29e4129e4129ep-1, 0x1.935beb82c1ae7p-3}, // exponent = 2 - - /* mantissa = 0x1.48... */ - {0x1.49feb2bc0dc00p+0, 0x1.169c000000000p+0, 0x1.d67366d6ddfd0p-1, 0x1.8d31a9f2d47fbp-1}, // exponent = 0 - {0x1.49fcfb130a6c0p+1, 0x1.5f06000000000p+0, 0x1.75664a1a72c8dp-1, 0x1.8d33bb2686480p-2}, // exponent = 1 - {0x1.49670a0000000p+2, 0x1.ba00000000000p+0, 0x1.288b01288b013p-1, 0x1.8de888de6c48fp-3}, // exponent = 2 - - /* mantissa = 0x1.4c... */ - {0x1.4ef6000000000p+0, 0x1.1800000000000p+0, 0x1.d41d41d41d41dp-1, 0x1.874e2a121159fp-1}, // exponent = 0 - {0x1.4cc0000000000p+1, 0x1.6000000000000p+0, 0x1.745d1745d1746p-1, 0x1.89e7c3fdb1246p-2}, // exponent = 1 - {0x1.4de4f00000000p+2, 0x1.bc00000000000p+0, 0x1.27350b8812735p-1, 0x1.888e2da0ba19dp-3}, // exponent = 2 - - /* mantissa = 0x1.50... */ - {0x1.51ff889bc6000p+0, 0x1.18d8000000000p+0, 0x1.d2b539aeee152p-1, 0x1.83ca00a5a8f32p-1}, // exponent = 0 - {0x1.5274440000000p+1, 0x1.6200000000000p+0, 0x1.724287f46debcp-1, 0x1.8344414a70cbdp-2}, // exponent = 1 - {0x1.526d3e0000000p+2, 0x1.be00000000000p+0, 0x1.25e22708092f1p-1, 0x1.834c4ac4afd3bp-3}, // exponent = 2 - - /* mantissa = 0x1.54... */ - {0x1.5630a80000000p+0, 0x1.1a00000000000p+0, 0x1.d0cb58f6ec074p-1, 0x1.7f09e124e78b8p-1}, // exponent = 0 - {0x1.55fc05a5df140p+1, 0x1.633a000000000p+0, 0x1.70fb3e12b41c4p-1, 0x1.7f44d50c76c8ep-2}, // exponent = 1 - {0x1.5700000000000p+2, 0x1.c000000000000p+0, 0x1.2492492492492p-1, 0x1.7e225515a4f1dp-3}, // exponent = 2 - - /* mantissa = 0x1.58... */ - {0x1.59fc8db9a7e80p+0, 0x1.1b0a000000000p+0, 0x1.cf1688b3b4e6ap-1, 0x1.7ad5e68ed5f8cp-1}, // exponent = 0 - {0x1.5839200000000p+1, 0x1.6400000000000p+0, 0x1.702e05c0b8170p-1, 0x1.7cc6b8acae7cbp-2}, // exponent = 1 - {0x1.5b9d420000000p+2, 0x1.c200000000000p+0, 0x1.23456789abcdfp-1, 0x1.790fc51106751p-3}, // exponent = 2 - - /* mantissa = 0x1.5c... */ - {0x1.5d85c00000000p+0, 0x1.1c00000000000p+0, 0x1.cd85689039b0bp-1, 0x1.7700c9f78cc63p-1}, // exponent = 0 - {0x1.5e0eac0000000p+1, 0x1.6600000000000p+0, 0x1.6e1f76b4337c7p-1, 0x1.766e1c17c26ecp-2}, // exponent = 1 - {0x1.5dfdce5811360p+2, 0x1.c306000000000p+0, 0x1.229c346a04441p-1, 0x1.7680273c586edp-3}, // exponent = 2 - - /* mantissa = 0x1.60... */ - {0x1.61fbc0c515400p+0, 0x1.1d34000000000p+0, 0x1.cb92ff3a86d65p-1, 0x1.7246f92d40d4cp-1}, // exponent = 0 - {0x1.63f5000000000p+1, 0x1.6800000000000p+0, 0x1.6c16c16c16c17p-1, 0x1.70396672a04e5p-2}, // exponent = 1 - {0x1.6045100000000p+2, 0x1.c400000000000p+0, 0x1.21fb78121fb78p-1, 0x1.741416c92a70bp-3}, // exponent = 2 - - /* mantissa = 0x1.64... */ - {0x1.64f5780000000p+0, 0x1.1e00000000000p+0, 0x1.ca4b3055ee191p-1, 0x1.6f30d6649f11bp-1}, // exponent = 0 - {0x1.65fa1cdfa11c0p+1, 0x1.68ae000000000p+0, 0x1.6b671c62a2d0ap-1, 0x1.6e257c2026aefp-2}, // exponent = 1 - {0x1.64f7760000000p+2, 0x1.c600000000000p+0, 0x1.20b470c67c0d9p-1, 0x1.6f2ec9c929a29p-3}, // exponent = 2 - - /* mantissa = 0x1.68... */ - {0x1.69fc04b688980p+0, 0x1.1f56000000000p+0, 0x1.c829b51036037p-1, 0x1.6a17c8a1a662ep-1}, // exponent = 0 - {0x1.69ec340000000p+1, 0x1.6a00000000000p+0, 0x1.6a13cd1537290p-1, 0x1.6a279b3fb4a4ep-2}, // exponent = 1 - {0x1.69b4800000000p+2, 0x1.c800000000000p+0, 0x1.1f7047dc11f70p-1, 0x1.6a5f60f9b4c97p-3}, // exponent = 2 - - /* mantissa = 0x1.6c... */ - {0x1.6c80000000000p+0, 0x1.2000000000000p+0, 0x1.c71c71c71c71cp-1, 0x1.67980e0bf08c7p-1}, // exponent = 0 - {0x1.6ff4600000000p+1, 0x1.6c00000000000p+0, 0x1.6816816816817p-1, 0x1.6437c6489c8e0p-2}, // exponent = 1 - {0x1.6e7c3a0000000p+2, 0x1.ca00000000000p+0, 0x1.1e2ef3b3fb874p-1, 0x1.65a56286dbe08p-3}, // exponent = 2 - - /* mantissa = 0x1.70... */ - {0x1.71fc3c5870000p+0, 0x1.2170000000000p+0, 0x1.c4d9cd40d7cfdp-1, 0x1.6243421ae7a84p-1}, // exponent = 0 - {0x1.71fef1bff2600p+1, 0x1.6cac000000000p+0, 0x1.676caae4b2e0fp-1, 0x1.6240aa2fa0dfdp-2}, // exponent = 1 - {0x1.734eb00000000p+2, 0x1.cc00000000000p+0, 0x1.1cf06ada2811dp-1, 0x1.610057c6bdd38p-3}, // exponent = 2 - - /* mantissa = 0x1.74... */ - {0x1.7425880000000p+0, 0x1.2200000000000p+0, 0x1.c3f8f01c3f8f0p-1, 0x1.60348d4756756p-1}, // exponent = 0 - {0x1.760d9c0000000p+1, 0x1.6e00000000000p+0, 0x1.661ec6a5122f9p-1, 0x1.5e68fb4d877a7p-2}, // exponent = 1 - {0x1.75fb34f0902a0p+2, 0x1.cd1a000000000p+0, 0x1.1c4227955e4f1p-1, 0x1.5e7a396f89f71p-3}, // exponent = 2 - - /* mantissa = 0x1.78... */ - {0x1.7be6400000000p+0, 0x1.2400000000000p+0, 0x1.c0e070381c0e0p-1, 0x1.5904842e0271bp-1}, // exponent = 0 - {0x1.79fec8fa79000p+1, 0x1.6f48000000000p+0, 0x1.64def50b37b22p-1, 0x1.5ac1740057116p-2}, // exponent = 1 - {0x1.782bee0000000p+2, 0x1.ce00000000000p+0, 0x1.1bb4a4046ed29p-1, 0x1.5c6fcd2117a65p-3}, // exponent = 2 - - /* mantissa = 0x1.7c... */ - {0x1.7dfa08e162000p+0, 0x1.2488000000000p+0, 0x1.c00fc08dc4fbfp-1, 0x1.57242f8b50298p-1}, // exponent = 0 - {0x1.7c38000000000p+1, 0x1.7000000000000p+0, 0x1.642c8590b2164p-1, 0x1.58ba55b815609p-2}, // exponent = 1 - {0x1.7d14000000000p+2, 0x1.d000000000000p+0, 0x1.1a7b9611a7b96p-1, 0x1.57f351f7aa6eap-3}, // exponent = 2 - - /* mantissa = 0x1.80... */ - {0x1.83c2580000000p+0, 0x1.2600000000000p+0, 0x1.bdd2b899406f7p-1, 0x1.520635a583b96p-1}, // exponent = 0 - {0x1.8273a40000000p+1, 0x1.7200000000000p+0, 0x1.623fa77016240p-1, 0x1.532af851862acp-2}, // exponent = 1 - {0x1.8206f20000000p+2, 0x1.d200000000000p+0, 0x1.19453808ca29cp-1, 0x1.538a788f6fdd6p-3}, // exponent = 2 - - /* mantissa = 0x1.84... */ - {0x1.85fd33ff90000p+0, 0x1.2690000000000p+0, 0x1.bcf8c69606a07p-1, 0x1.50176a58004f0p-1}, // exponent = 0 - {0x1.85fccde240000p+1, 0x1.7320000000000p+0, 0x1.612cc01b977f0p-1, 0x1.5017c2589970ep-2}, // exponent = 1 - {0x1.8704d00000000p+2, 0x1.d400000000000p+0, 0x1.1811811811812p-1, 0x1.4f34d5fa956d6p-3}, // exponent = 2 - - /* mantissa = 0x1.88... */ - {0x1.8bba000000000p+0, 0x1.2800000000000p+0, 0x1.bacf914c1bad0p-1, 0x1.4b37f67f9d05cp-1}, // exponent = 0 - {0x1.88c0a00000000p+1, 0x1.7400000000000p+0, 0x1.6058160581606p-1, 0x1.4dba0cfc11861p-2}, // exponent = 1 - {0x1.89fbb1ca4e0e0p+2, 0x1.d52e000000000p+0, 0x1.175d3b160af03p-1, 0x1.4caf2b205f9ddp-3}, // exponent = 2 - - /* mantissa = 0x1.8c... */ - {0x1.8dfca52590000p+0, 0x1.2890000000000p+0, 0x1.b9f88e001b9f9p-1, 0x1.495664ea7f47dp-1}, // exponent = 0 - {0x1.8f1f0c0000000p+1, 0x1.7600000000000p+0, 0x1.5e75bb8d015e7p-1, 0x1.4866c46f405dbp-2}, // exponent = 1 - {0x1.8c0da60000000p+2, 0x1.d600000000000p+0, 0x1.16e0689427379p-1, 0x1.4af2020336a59p-3}, // exponent = 2 - - /* mantissa = 0x1.90... */ - {0x1.93cd680000000p+0, 0x1.2a00000000000p+0, 0x1.b7d6c3dda338bp-1, 0x1.44982ca42a2ebp-1}, // exponent = 0 - {0x1.91fabaf07d200p+1, 0x1.76e4000000000p+0, 0x1.5da09741396f7p-1, 0x1.461102bc1cb8fp-2}, // exponent = 1 - {0x1.9121800000000p+2, 0x1.d800000000000p+0, 0x1.15b1e5f75270dp-1, 0x1.46c19716cf2c0p-3}, // exponent = 2 - - /* mantissa = 0x1.94... */ - {0x1.95ff68a951e80p+0, 0x1.2a8a000000000p+0, 0x1.b70b72f76e7ddp-1, 0x1.42d6dab45c848p-1}, // exponent = 0 - {0x1.958f000000000p+1, 0x1.7800000000000p+0, 0x1.5c9882b931057p-1, 0x1.433055f7235dbp-2}, // exponent = 1 - {0x1.96406a0000000p+2, 0x1.da00000000000p+0, 0x1.1485f0e0acd3bp-1, 0x1.42a332325db6bp-3}, // exponent = 2 - - /* mantissa = 0x1.98... */ - {0x1.9bfcc00000000p+0, 0x1.2c00000000000p+0, 0x1.b4e81b4e81b4fp-1, 0x1.3e254e465d72cp-1}, // exponent = 0 - {0x1.99ffaac1ec3c0p+1, 0x1.795e000000000p+0, 0x1.5b55320eae3fdp-1, 0x1.3fb056724ebb2p-2}, // exponent = 1 - {0x1.9b6a700000000p+2, 0x1.dc00000000000p+0, 0x1.135c81135c811p-1, 0x1.3e9672cf3131dp-3}, // exponent = 2 - - /* mantissa = 0x1.9c... */ - {0x1.9dfc708557c00p+0, 0x1.2c7c000000000p+0, 0x1.b433cf4756912p-1, 0x1.3c9c1357411b6p-1}, // exponent = 0 - {0x1.9c10940000000p+1, 0x1.7a00000000000p+0, 0x1.5ac056b015ac0p-1, 0x1.3e15ff3643c49p-2}, // exponent = 1 - {0x1.9dfe6c1816fe0p+2, 0x1.dcfe000000000p+0, 0x1.12c9df926137bp-1, 0x1.3c9a8f2a1f8a5p-3}, // exponent = 2 - - /* mantissa = 0x1.a0... */ - {0x1.a1f8756df7480p+0, 0x1.2d72000000000p+0, 0x1.b2cfd6b4a2ec0p-1, 0x1.39976b1b376fbp-1}, // exponent = 0 - {0x1.a2a3e00000000p+1, 0x1.7c00000000000p+0, 0x1.58ed2308158edp-1, 0x1.391703ea2d9b9p-2}, // exponent = 1 - {0x1.a09f9e0000000p+2, 0x1.de00000000000p+0, 0x1.12358e75d3033p-1, 0x1.3a9afad059b87p-3}, // exponent = 2 - - /* mantissa = 0x1.a4... */ - {0x1.a448380000000p+0, 0x1.2e00000000000p+0, 0x1.b2036406c80d9p-1, 0x1.37dde124a87f2p-1}, // exponent = 0 - {0x1.a5fad7a3ee040p+1, 0x1.7d02000000000p+0, 0x1.580391c97b3f3p-1, 0x1.369cab16c4bb8p-2}, // exponent = 1 - {0x1.a5e0000000000p+2, 0x1.e000000000000p+0, 0x1.1111111111111p-1, 0x1.36b06e70b7421p-3}, // exponent = 2 - - /* mantissa = 0x1.a8... */ - {0x1.a9fbaa05b1c00p+0, 0x1.2f5c000000000p+0, 0x1.b01182b5ac1cep-1, 0x1.33b1676d97a5bp-1}, // exponent = 0 - {0x1.a948fc0000000p+1, 0x1.7e00000000000p+0, 0x1.571ed3c506b3ap-1, 0x1.3432adb274266p-2}, // exponent = 1 - {0x1.ab2ba20000000p+2, 0x1.e200000000000p+0, 0x1.0fef010fef011p-1, 0x1.32d67431a0280p-3}, // exponent = 2 - - /* mantissa = 0x1.ac... */ - {0x1.acb0000000000p+0, 0x1.3000000000000p+0, 0x1.af286bca1af28p-1, 0x1.31c079d2b089fp-1}, // exponent = 0 - {0x1.adffcaf535000p+1, 0x1.7f68000000000p+0, 0x1.55dca75792aa1p-1, 0x1.30d1b5accf7d2p-2}, // exponent = 1 - {0x1.adfb1053dbae0p+2, 0x1.e30e000000000p+0, 0x1.0f57023f898dcp-1, 0x1.30d50fe844fd2p-3}, // exponent = 2 - - /* mantissa = 0x1.b0... */ - {0x1.b1ff52f400000p+0, 0x1.3140000000000p+0, 0x1.ad646ddd321c2p-1, 0x1.2e02d4701d501p-1}, // exponent = 0 - {0x1.b000000000000p+1, 0x1.8000000000000p+0, 0x1.5555555555555p-1, 0x1.2f684bda12f68p-2}, // exponent = 1 - {0x1.b082900000000p+2, 0x1.e400000000000p+0, 0x1.0ecf56be69c90p-1, 0x1.2f0cb4ca19e1ep-3}, // exponent = 2 - - /* mantissa = 0x1.b4... */ - {0x1.b534480000000p+0, 0x1.3200000000000p+0, 0x1.ac5701ac5701bp-1, 0x1.2bcbbb0cb73f6p-1}, // exponent = 0 - {0x1.b6c9040000000p+1, 0x1.8200000000000p+0, 0x1.5390948f40febp-1, 0x1.2ab733230f96fp-2}, // exponent = 1 - {0x1.b5e4d60000000p+2, 0x1.e600000000000p+0, 0x1.0db20a88f4696p-1, 0x1.2b52db169e95ep-3}, // exponent = 2 - - /* mantissa = 0x1.b8... */ - {0x1.b9fa0378e5c00p+0, 0x1.331c000000000p+0, 0x1.aacae5fd5e77dp-1, 0x1.288f0567537ffp-1}, // exponent = 0 - {0x1.b9fd76ec78000p+1, 0x1.82f0000000000p+0, 0x1.52bdf6a7a2620p-1, 0x1.288cb4a41a9b5p-2}, // exponent = 1 - {0x1.bb52800000000p+2, 0x1.e800000000000p+0, 0x1.0c9714fbcda3bp-1, 0x1.27a894096a4f5p-3}, // exponent = 2 - - /* mantissa = 0x1.bc... */ - {0x1.bdd5400000000p+0, 0x1.3400000000000p+0, 0x1.a98ef606a63bep-1, 0x1.25fe5513ebf45p-1}, // exponent = 0 - {0x1.bda4200000000p+1, 0x1.8400000000000p+0, 0x1.51d07eae2f815p-1, 0x1.261ebd944131ep-2}, // exponent = 1 - {0x1.bdfd332712ca0p+2, 0x1.e8fa000000000p+0, 0x1.0c0dc264ce74bp-1, 0x1.25e3ff656ec87p-3}, // exponent = 2 - - /* mantissa = 0x1.c0... */ - {0x1.c1fc1c0569400p+0, 0x1.34f4000000000p+0, 0x1.a83eded1251e7p-1, 0x1.2347ec39d66b0p-1}, // exponent = 0 - {0x1.c1fd3bf5cf840p+1, 0x1.8542000000000p+0, 0x1.50b90cb22a299p-1, 0x1.234731d751cccp-2}, // exponent = 1 - {0x1.c0cb9a0000000p+2, 0x1.ea00000000000p+0, 0x1.0b7e6ec259dc8p-1, 0x1.240d8e9b4ae5dp-3}, // exponent = 2 - - /* mantissa = 0x1.c4... */ - {0x1.c693180000000p+0, 0x1.3600000000000p+0, 0x1.a6d01a6d01a6dp-1, 0x1.2057051321929p-1}, // exponent = 0 - {0x1.c4916c0000000p+1, 0x1.8600000000000p+0, 0x1.5015015015015p-1, 0x1.219e4a4924f1fp-2}, // exponent = 1 - {0x1.c650300000000p+2, 0x1.ec00000000000p+0, 0x1.0a6810a6810a7p-1, 0x1.20817bbcedd1fp-3}, // exponent = 2 - - /* mantissa = 0x1.c8... */ - {0x1.c9fc4ad339d80p+0, 0x1.36c6000000000p+0, 0x1.a5c2b87b4e25ap-1, 0x1.1e3144d16fd97p-1}, // exponent = 0 - {0x1.cb91000000000p+1, 0x1.8800000000000p+0, 0x1.4e5e0a72f0539p-1, 0x1.1d353d43a7247p-2}, // exponent = 1 - {0x1.cbe04e0000000p+2, 0x1.ee00000000000p+0, 0x1.0953f39010954p-1, 0x1.1d040e48a75cdp-3}, // exponent = 2 - - /* mantissa = 0x1.cc... */ - {0x1.cf6e000000000p+0, 0x1.3800000000000p+0, 0x1.a41a41a41a41ap-1, 0x1.1ad4948b6e145p-1}, // exponent = 0 - {0x1.cdfd181598000p+1, 0x1.88b0000000000p+0, 0x1.4dc82df5d0542p-1, 0x1.1bb66cda74540p-2}, // exponent = 1 - {0x1.cdfeef0724420p+2, 0x1.eec2000000000p+0, 0x1.08ebe9d4e24aep-1, 0x1.1bb54ba55bb8ep-3}, // exponent = 2 - - /* mantissa = 0x1.d0... */ - {0x1.d1f9c6201cc80p+0, 0x1.3892000000000p+0, 0x1.a35607552f1cdp-1, 0x1.1948fa1f5ff30p-1}, // exponent = 0 - {0x1.d2a2f40000000p+1, 0x1.8a00000000000p+0, 0x1.4cab88725af6ep-1, 0x1.18e2ff3fca5acp-2}, // exponent = 1 - {0x1.d17c000000000p+2, 0x1.f000000000000p+0, 0x1.0842108421084p-1, 0x1.1994faf4aec92p-3}, // exponent = 2 - - /* mantissa = 0x1.d4... */ - {0x1.d5f8615bde180p+0, 0x1.3976000000000p+0, 0x1.a22504db000b7p-1, 0x1.16e4ee12da718p-1}, // exponent = 0 - {0x1.d5f9b87878000p+1, 0x1.8af0000000000p+0, 0x1.4be15f5393e98p-1, 0x1.16e4227697dbfp-2}, // exponent = 1 - {0x1.d723520000000p+2, 0x1.f200000000000p+0, 0x1.073260a47f7c6p-1, 0x1.1633f845cb3dep-3}, // exponent = 2 - - /* mantissa = 0x1.d8... */ - {0x1.d866280000000p+0, 0x1.3a00000000000p+0, 0x1.a16d3f97a4b02p-1, 0x1.1575d8c8402f4p-1}, // exponent = 0 - {0x1.d9c7600000000p+1, 0x1.8c00000000000p+0, 0x1.4afd6a052bf5bp-1, 0x1.14a6fd8916ecfp-2}, // exponent = 1 - {0x1.d9fb5ac000000p+2, 0x1.f300000000000p+0, 0x1.06ab59c7912fbp-1, 0x1.1488a6b10c148p-3}, // exponent = 2 - - /* mantissa = 0x1.dc... */ - {0x1.ddfdfe805bc00p+0, 0x1.3b3c000000000p+0, 0x1.9fcacece0b241p-1, 0x1.1236b509d4023p-1}, // exponent = 0 - {0x1.ddff55aa1e600p+1, 0x1.8d2c000000000p+0, 0x1.4a036770fd266p-1, 0x1.1235f02ce295ap-2}, // exponent = 1 - {0x1.dcd6500000000p+2, 0x1.f400000000000p+0, 0x1.0624dd2f1a9fcp-1, 0x1.12e0be826d695p-3}, // exponent = 2 - - /* mantissa = 0x1.e0... */ - {0x1.e17bc00000000p+0, 0x1.3c00000000000p+0, 0x1.9ec8e951033d9p-1, 0x1.1039b25a7f122p-1}, // exponent = 0 - {0x1.e0fe5c0000000p+1, 0x1.8e00000000000p+0, 0x1.49539e3b2d067p-1, 0x1.1080a9d1be542p-2}, // exponent = 1 - {0x1.e295060000000p+2, 0x1.f600000000000p+0, 0x1.05197f7d73404p-1, 0x1.0f9b07a631f92p-3}, // exponent = 2 - - /* mantissa = 0x1.e4... */ - {0x1.e5ff3ecf6fc00p+0, 0x1.3cfc000000000p+0, 0x1.9d7f292cef9bap-1, 0x1.0db275be001a6p-1}, // exponent = 0 - {0x1.e5fefa40c0000p+1, 0x1.8f60000000000p+0, 0x1.48315b6c3fc79p-1, 0x1.0db29bc986108p-2}, // exponent = 1 - {0x1.e5fe06d9140e0p+2, 0x1.f72e000000000p+0, 0x1.047cca585fbe4p-1, 0x1.0db322dce8431p-3}, // exponent = 2 - - /* mantissa = 0x1.e8... */ - {0x1.eaaef80000000p+0, 0x1.3e00000000000p+0, 0x1.9c2d14ee4a102p-1, 0x1.0b1f0c9a4ed7cp-1}, // exponent = 0 - {0x1.e848000000000p+1, 0x1.9000000000000p+0, 0x1.47ae147ae147bp-1, 0x1.0c6f7a0b5ed8dp-2}, // exponent = 1 - {0x1.e85f800000000p+2, 0x1.f800000000000p+0, 0x1.0410410410410p-1, 0x1.0c628f55c92dep-3}, // exponent = 2 - - /* mantissa = 0x1.ec... */ - {0x1.edfb5912a5180p+0, 0x1.3eb6000000000p+0, 0x1.9b41b55ca11fcp-1, 0x1.0956733c0be03p-1}, // exponent = 0 - {0x1.efa4640000000p+1, 0x1.9200000000000p+0, 0x1.460cbc7f5cf9ap-1, 0x1.0872e8415508dp-2}, // exponent = 1 - {0x1.ee35ca0000000p+2, 0x1.fa00000000000p+0, 0x1.03091b51f5e1ap-1, 0x1.093712d33ff42p-3}, // exponent = 2 - - /* mantissa = 0x1.f0... */ - {0x1.f1fd112ab0c80p+0, 0x1.3f92000000000p+0, 0x1.9a2696dd75ba1p-1, 0x1.0733ed7907e73p-1}, // exponent = 0 - {0x1.f1fc8b255bc40p+1, 0x1.92a2000000000p+0, 0x1.45898cb57730cp-1, 0x1.0734344eaebefp-2}, // exponent = 1 - {0x1.f1ff2ff2d4ba0p+2, 0x1.fb4a000000000p+0, 0x1.02609989a73cfp-1, 0x1.0732ce999c3d1p-3}, // exponent = 2 - - /* mantissa = 0x1.f4... */ - {0x1.f400000000000p+0, 0x1.4000000000000p+0, 0x1.999999999999ap-1, 0x1.0624dd2f1a9fcp-1}, // exponent = 0 - {0x1.f713a00000000p+1, 0x1.9400000000000p+0, 0x1.446f86562d9fbp-1, 0x1.048a727489527p-2}, // exponent = 1 - {0x1.f417f00000000p+2, 0x1.fc00000000000p+0, 0x1.0204081020408p-1, 0x1.061850f2a7123p-3}, // exponent = 2 - - /* mantissa = 0x1.f8... */ - {0x1.f9fe36d7a7d80p+0, 0x1.4146000000000p+0, 0x1.97f9f956c92fdp-1, 0x1.030a055aebeddp-1}, // exponent = 0 - {0x1.f9f8b6ce70ec0p+1, 0x1.94c6000000000p+0, 0x1.43d0d2af8e146p-1, 0x1.030cd637fd65ep-2}, // exponent = 1 - {0x1.fa05fe0000000p+2, 0x1.fe00000000000p+0, 0x1.0101010101010p-1, 0x1.03060a0f151c2p-3}, // exponent = 2 - - /* mantissa = 0x1.fc... */ - {0x1.fd6f080000000p+0, 0x1.4200000000000p+0, 0x1.970e4f80cb872p-1, 0x1.014a239d8b1a9p-1}, // exponent = 0 - {0x1.fe95cc0000000p+1, 0x1.9600000000000p+0, 0x1.42d6625d51f87p-1, 0x1.00b59a78a8ffcp-2}, // exponent = 1 - {0x1.0000000000000p+3, 0x1.0000000000000p+1, 0x1.0000000000000p-1, 0x1.0000000000000p-3}, // exponent = 2 + {0x1.39e2c80000000p+0, 0x1.1200000000000p+0, 0x1.de5d6e3f8868ap-1, 0x1.a1941b013022dp-1}, // exponent = 0 + {0x1.39fe541ac7840p+1, 0x1.5942000000000p+0, 0x1.7ba298eae8947p-1, 0x1.a16f787114257p-2}, // exponent = 1 + {0x1.39ffaac000000p+2, 0x1.b300000000000p+0, 0x1.2d50a012d50a0p-1, 0x1.a16db0ec408b2p-3}, // exponent = 2 + + /* mantissa = 0x1.3c... */ + {0x1.3dfc1312b0000p+0, 0x1.1330000000000p+0, 0x1.dc4cfaf10eb5cp-1, 0x1.9c322b87f17e8p-1}, // exponent = 0 + {0x1.3c05d40000000p+1, 0x1.5a00000000000p+0, 0x1.7ad2208e0ecc3p-1, 0x1.9ec1430b0dfc7p-2}, // exponent = 1 + {0x1.3c2b500000000p+2, 0x1.b400000000000p+0, 0x1.2c9fb4d812ca0p-1, 0x1.9e9016e2211b6p-3}, // exponent = 2 + + /* mantissa = 0x1.40... */ + {0x1.40cf400000000p+0, 0x1.1400000000000p+0, 0x1.dae6076b981dbp-1, 0x1.9890fd4bf368fp-1}, // exponent = 0 + {0x1.4188e00000000p+1, 0x1.5c00000000000p+0, 0x1.78a4c8178a4c8p-1, 0x1.97a51ec6b707ep-2}, // exponent = 1 + {0x1.408a460000000p+2, 0x1.b600000000000p+0, 0x1.2b404ad012b40p-1, 0x1.98e8e88261b62p-3}, // exponent = 2 + + /* mantissa = 0x1.44... */ + {0x1.47d5980000000p+0, 0x1.1600000000000p+0, 0x1.d77b654b82c34p-1, 0x1.8fcfc9c44e2f4p-1}, // exponent = 0 + {0x1.471c3c0000000p+1, 0x1.5e00000000000p+0, 0x1.767dce434a9b1p-1, 0x1.90b25822e2a9fp-2}, // exponent = 1 + {0x1.44f3800000000p+2, 0x1.b800000000000p+0, 0x1.29e4129e4129ep-1, 0x1.935beb82c1ae7p-3}, // exponent = 2 + + /* mantissa = 0x1.48... */ + {0x1.49feb2bc0dc00p+0, 0x1.169c000000000p+0, 0x1.d67366d6ddfd0p-1, 0x1.8d31a9f2d47fbp-1}, // exponent = 0 + {0x1.49fcfb130a6c0p+1, 0x1.5f06000000000p+0, 0x1.75664a1a72c8dp-1, 0x1.8d33bb2686480p-2}, // exponent = 1 + {0x1.49670a0000000p+2, 0x1.ba00000000000p+0, 0x1.288b01288b013p-1, 0x1.8de888de6c48fp-3}, // exponent = 2 + + /* mantissa = 0x1.4c... */ + {0x1.4ef6000000000p+0, 0x1.1800000000000p+0, 0x1.d41d41d41d41dp-1, 0x1.874e2a121159fp-1}, // exponent = 0 + {0x1.4cc0000000000p+1, 0x1.6000000000000p+0, 0x1.745d1745d1746p-1, 0x1.89e7c3fdb1246p-2}, // exponent = 1 + {0x1.4de4f00000000p+2, 0x1.bc00000000000p+0, 0x1.27350b8812735p-1, 0x1.888e2da0ba19dp-3}, // exponent = 2 + + /* mantissa = 0x1.50... */ + {0x1.51ff889bc6000p+0, 0x1.18d8000000000p+0, 0x1.d2b539aeee152p-1, 0x1.83ca00a5a8f32p-1}, // exponent = 0 + {0x1.5274440000000p+1, 0x1.6200000000000p+0, 0x1.724287f46debcp-1, 0x1.8344414a70cbdp-2}, // exponent = 1 + {0x1.526d3e0000000p+2, 0x1.be00000000000p+0, 0x1.25e22708092f1p-1, 0x1.834c4ac4afd3bp-3}, // exponent = 2 + + /* mantissa = 0x1.54... */ + {0x1.5630a80000000p+0, 0x1.1a00000000000p+0, 0x1.d0cb58f6ec074p-1, 0x1.7f09e124e78b8p-1}, // exponent = 0 + {0x1.55fc05a5df140p+1, 0x1.633a000000000p+0, 0x1.70fb3e12b41c4p-1, 0x1.7f44d50c76c8ep-2}, // exponent = 1 + {0x1.5700000000000p+2, 0x1.c000000000000p+0, 0x1.2492492492492p-1, 0x1.7e225515a4f1dp-3}, // exponent = 2 + + /* mantissa = 0x1.58... */ + {0x1.59fc8db9a7e80p+0, 0x1.1b0a000000000p+0, 0x1.cf1688b3b4e6ap-1, 0x1.7ad5e68ed5f8cp-1}, // exponent = 0 + {0x1.5839200000000p+1, 0x1.6400000000000p+0, 0x1.702e05c0b8170p-1, 0x1.7cc6b8acae7cbp-2}, // exponent = 1 + {0x1.5b9d420000000p+2, 0x1.c200000000000p+0, 0x1.23456789abcdfp-1, 0x1.790fc51106751p-3}, // exponent = 2 + + /* mantissa = 0x1.5c... */ + {0x1.5d85c00000000p+0, 0x1.1c00000000000p+0, 0x1.cd85689039b0bp-1, 0x1.7700c9f78cc63p-1}, // exponent = 0 + {0x1.5e0eac0000000p+1, 0x1.6600000000000p+0, 0x1.6e1f76b4337c7p-1, 0x1.766e1c17c26ecp-2}, // exponent = 1 + {0x1.5dfdce5811360p+2, 0x1.c306000000000p+0, 0x1.229c346a04441p-1, 0x1.7680273c586edp-3}, // exponent = 2 + + /* mantissa = 0x1.60... */ + {0x1.61fbc0c515400p+0, 0x1.1d34000000000p+0, 0x1.cb92ff3a86d65p-1, 0x1.7246f92d40d4cp-1}, // exponent = 0 + {0x1.63f5000000000p+1, 0x1.6800000000000p+0, 0x1.6c16c16c16c17p-1, 0x1.70396672a04e5p-2}, // exponent = 1 + {0x1.6045100000000p+2, 0x1.c400000000000p+0, 0x1.21fb78121fb78p-1, 0x1.741416c92a70bp-3}, // exponent = 2 + + /* mantissa = 0x1.64... */ + {0x1.64f5780000000p+0, 0x1.1e00000000000p+0, 0x1.ca4b3055ee191p-1, 0x1.6f30d6649f11bp-1}, // exponent = 0 + {0x1.65fa1cdfa11c0p+1, 0x1.68ae000000000p+0, 0x1.6b671c62a2d0ap-1, 0x1.6e257c2026aefp-2}, // exponent = 1 + {0x1.64f7760000000p+2, 0x1.c600000000000p+0, 0x1.20b470c67c0d9p-1, 0x1.6f2ec9c929a29p-3}, // exponent = 2 + + /* mantissa = 0x1.68... */ + {0x1.69fc04b688980p+0, 0x1.1f56000000000p+0, 0x1.c829b51036037p-1, 0x1.6a17c8a1a662ep-1}, // exponent = 0 + {0x1.69ec340000000p+1, 0x1.6a00000000000p+0, 0x1.6a13cd1537290p-1, 0x1.6a279b3fb4a4ep-2}, // exponent = 1 + {0x1.69b4800000000p+2, 0x1.c800000000000p+0, 0x1.1f7047dc11f70p-1, 0x1.6a5f60f9b4c97p-3}, // exponent = 2 + + /* mantissa = 0x1.6c... */ + {0x1.6c80000000000p+0, 0x1.2000000000000p+0, 0x1.c71c71c71c71cp-1, 0x1.67980e0bf08c7p-1}, // exponent = 0 + {0x1.6ff4600000000p+1, 0x1.6c00000000000p+0, 0x1.6816816816817p-1, 0x1.6437c6489c8e0p-2}, // exponent = 1 + {0x1.6e7c3a0000000p+2, 0x1.ca00000000000p+0, 0x1.1e2ef3b3fb874p-1, 0x1.65a56286dbe08p-3}, // exponent = 2 + + /* mantissa = 0x1.70... */ + {0x1.71fc3c5870000p+0, 0x1.2170000000000p+0, 0x1.c4d9cd40d7cfdp-1, 0x1.6243421ae7a84p-1}, // exponent = 0 + {0x1.71fef1bff2600p+1, 0x1.6cac000000000p+0, 0x1.676caae4b2e0fp-1, 0x1.6240aa2fa0dfdp-2}, // exponent = 1 + {0x1.734eb00000000p+2, 0x1.cc00000000000p+0, 0x1.1cf06ada2811dp-1, 0x1.610057c6bdd38p-3}, // exponent = 2 + + /* mantissa = 0x1.74... */ + {0x1.7425880000000p+0, 0x1.2200000000000p+0, 0x1.c3f8f01c3f8f0p-1, 0x1.60348d4756756p-1}, // exponent = 0 + {0x1.760d9c0000000p+1, 0x1.6e00000000000p+0, 0x1.661ec6a5122f9p-1, 0x1.5e68fb4d877a7p-2}, // exponent = 1 + {0x1.75fb34f0902a0p+2, 0x1.cd1a000000000p+0, 0x1.1c4227955e4f1p-1, 0x1.5e7a396f89f71p-3}, // exponent = 2 + + /* mantissa = 0x1.78... */ + {0x1.7be6400000000p+0, 0x1.2400000000000p+0, 0x1.c0e070381c0e0p-1, 0x1.5904842e0271bp-1}, // exponent = 0 + {0x1.79fec8fa79000p+1, 0x1.6f48000000000p+0, 0x1.64def50b37b22p-1, 0x1.5ac1740057116p-2}, // exponent = 1 + {0x1.782bee0000000p+2, 0x1.ce00000000000p+0, 0x1.1bb4a4046ed29p-1, 0x1.5c6fcd2117a65p-3}, // exponent = 2 + + /* mantissa = 0x1.7c... */ + {0x1.7dfa08e162000p+0, 0x1.2488000000000p+0, 0x1.c00fc08dc4fbfp-1, 0x1.57242f8b50298p-1}, // exponent = 0 + {0x1.7c38000000000p+1, 0x1.7000000000000p+0, 0x1.642c8590b2164p-1, 0x1.58ba55b815609p-2}, // exponent = 1 + {0x1.7d14000000000p+2, 0x1.d000000000000p+0, 0x1.1a7b9611a7b96p-1, 0x1.57f351f7aa6eap-3}, // exponent = 2 + + /* mantissa = 0x1.80... */ + {0x1.83c2580000000p+0, 0x1.2600000000000p+0, 0x1.bdd2b899406f7p-1, 0x1.520635a583b96p-1}, // exponent = 0 + {0x1.8273a40000000p+1, 0x1.7200000000000p+0, 0x1.623fa77016240p-1, 0x1.532af851862acp-2}, // exponent = 1 + {0x1.8206f20000000p+2, 0x1.d200000000000p+0, 0x1.19453808ca29cp-1, 0x1.538a788f6fdd6p-3}, // exponent = 2 + + /* mantissa = 0x1.84... */ + {0x1.85fd33ff90000p+0, 0x1.2690000000000p+0, 0x1.bcf8c69606a07p-1, 0x1.50176a58004f0p-1}, // exponent = 0 + {0x1.85fccde240000p+1, 0x1.7320000000000p+0, 0x1.612cc01b977f0p-1, 0x1.5017c2589970ep-2}, // exponent = 1 + {0x1.8704d00000000p+2, 0x1.d400000000000p+0, 0x1.1811811811812p-1, 0x1.4f34d5fa956d6p-3}, // exponent = 2 + + /* mantissa = 0x1.88... */ + {0x1.8bba000000000p+0, 0x1.2800000000000p+0, 0x1.bacf914c1bad0p-1, 0x1.4b37f67f9d05cp-1}, // exponent = 0 + {0x1.88c0a00000000p+1, 0x1.7400000000000p+0, 0x1.6058160581606p-1, 0x1.4dba0cfc11861p-2}, // exponent = 1 + {0x1.89fbb1ca4e0e0p+2, 0x1.d52e000000000p+0, 0x1.175d3b160af03p-1, 0x1.4caf2b205f9ddp-3}, // exponent = 2 + + /* mantissa = 0x1.8c... */ + {0x1.8dfca52590000p+0, 0x1.2890000000000p+0, 0x1.b9f88e001b9f9p-1, 0x1.495664ea7f47dp-1}, // exponent = 0 + {0x1.8f1f0c0000000p+1, 0x1.7600000000000p+0, 0x1.5e75bb8d015e7p-1, 0x1.4866c46f405dbp-2}, // exponent = 1 + {0x1.8c0da60000000p+2, 0x1.d600000000000p+0, 0x1.16e0689427379p-1, 0x1.4af2020336a59p-3}, // exponent = 2 + + /* mantissa = 0x1.90... */ + {0x1.93cd680000000p+0, 0x1.2a00000000000p+0, 0x1.b7d6c3dda338bp-1, 0x1.44982ca42a2ebp-1}, // exponent = 0 + {0x1.91fabaf07d200p+1, 0x1.76e4000000000p+0, 0x1.5da09741396f7p-1, 0x1.461102bc1cb8fp-2}, // exponent = 1 + {0x1.9121800000000p+2, 0x1.d800000000000p+0, 0x1.15b1e5f75270dp-1, 0x1.46c19716cf2c0p-3}, // exponent = 2 + + /* mantissa = 0x1.94... */ + {0x1.95ff68a951e80p+0, 0x1.2a8a000000000p+0, 0x1.b70b72f76e7ddp-1, 0x1.42d6dab45c848p-1}, // exponent = 0 + {0x1.958f000000000p+1, 0x1.7800000000000p+0, 0x1.5c9882b931057p-1, 0x1.433055f7235dbp-2}, // exponent = 1 + {0x1.96406a0000000p+2, 0x1.da00000000000p+0, 0x1.1485f0e0acd3bp-1, 0x1.42a332325db6bp-3}, // exponent = 2 + + /* mantissa = 0x1.98... */ + {0x1.9bfcc00000000p+0, 0x1.2c00000000000p+0, 0x1.b4e81b4e81b4fp-1, 0x1.3e254e465d72cp-1}, // exponent = 0 + {0x1.99ffaac1ec3c0p+1, 0x1.795e000000000p+0, 0x1.5b55320eae3fdp-1, 0x1.3fb056724ebb2p-2}, // exponent = 1 + {0x1.9b6a700000000p+2, 0x1.dc00000000000p+0, 0x1.135c81135c811p-1, 0x1.3e9672cf3131dp-3}, // exponent = 2 + + /* mantissa = 0x1.9c... */ + {0x1.9dfc708557c00p+0, 0x1.2c7c000000000p+0, 0x1.b433cf4756912p-1, 0x1.3c9c1357411b6p-1}, // exponent = 0 + {0x1.9c10940000000p+1, 0x1.7a00000000000p+0, 0x1.5ac056b015ac0p-1, 0x1.3e15ff3643c49p-2}, // exponent = 1 + {0x1.9dfe6c1816fe0p+2, 0x1.dcfe000000000p+0, 0x1.12c9df926137bp-1, 0x1.3c9a8f2a1f8a5p-3}, // exponent = 2 + + /* mantissa = 0x1.a0... */ + {0x1.a1f8756df7480p+0, 0x1.2d72000000000p+0, 0x1.b2cfd6b4a2ec0p-1, 0x1.39976b1b376fbp-1}, // exponent = 0 + {0x1.a2a3e00000000p+1, 0x1.7c00000000000p+0, 0x1.58ed2308158edp-1, 0x1.391703ea2d9b9p-2}, // exponent = 1 + {0x1.a09f9e0000000p+2, 0x1.de00000000000p+0, 0x1.12358e75d3033p-1, 0x1.3a9afad059b87p-3}, // exponent = 2 + + /* mantissa = 0x1.a4... */ + {0x1.a448380000000p+0, 0x1.2e00000000000p+0, 0x1.b2036406c80d9p-1, 0x1.37dde124a87f2p-1}, // exponent = 0 + {0x1.a5fad7a3ee040p+1, 0x1.7d02000000000p+0, 0x1.580391c97b3f3p-1, 0x1.369cab16c4bb8p-2}, // exponent = 1 + {0x1.a5e0000000000p+2, 0x1.e000000000000p+0, 0x1.1111111111111p-1, 0x1.36b06e70b7421p-3}, // exponent = 2 + + /* mantissa = 0x1.a8... */ + {0x1.a9fbaa05b1c00p+0, 0x1.2f5c000000000p+0, 0x1.b01182b5ac1cep-1, 0x1.33b1676d97a5bp-1}, // exponent = 0 + {0x1.a948fc0000000p+1, 0x1.7e00000000000p+0, 0x1.571ed3c506b3ap-1, 0x1.3432adb274266p-2}, // exponent = 1 + {0x1.ab2ba20000000p+2, 0x1.e200000000000p+0, 0x1.0fef010fef011p-1, 0x1.32d67431a0280p-3}, // exponent = 2 + + /* mantissa = 0x1.ac... */ + {0x1.acb0000000000p+0, 0x1.3000000000000p+0, 0x1.af286bca1af28p-1, 0x1.31c079d2b089fp-1}, // exponent = 0 + {0x1.adffcaf535000p+1, 0x1.7f68000000000p+0, 0x1.55dca75792aa1p-1, 0x1.30d1b5accf7d2p-2}, // exponent = 1 + {0x1.adfb1053dbae0p+2, 0x1.e30e000000000p+0, 0x1.0f57023f898dcp-1, 0x1.30d50fe844fd2p-3}, // exponent = 2 + + /* mantissa = 0x1.b0... */ + {0x1.b1ff52f400000p+0, 0x1.3140000000000p+0, 0x1.ad646ddd321c2p-1, 0x1.2e02d4701d501p-1}, // exponent = 0 + {0x1.b000000000000p+1, 0x1.8000000000000p+0, 0x1.5555555555555p-1, 0x1.2f684bda12f68p-2}, // exponent = 1 + {0x1.b082900000000p+2, 0x1.e400000000000p+0, 0x1.0ecf56be69c90p-1, 0x1.2f0cb4ca19e1ep-3}, // exponent = 2 + + /* mantissa = 0x1.b4... */ + {0x1.b534480000000p+0, 0x1.3200000000000p+0, 0x1.ac5701ac5701bp-1, 0x1.2bcbbb0cb73f6p-1}, // exponent = 0 + {0x1.b6c9040000000p+1, 0x1.8200000000000p+0, 0x1.5390948f40febp-1, 0x1.2ab733230f96fp-2}, // exponent = 1 + {0x1.b5e4d60000000p+2, 0x1.e600000000000p+0, 0x1.0db20a88f4696p-1, 0x1.2b52db169e95ep-3}, // exponent = 2 + + /* mantissa = 0x1.b8... */ + {0x1.b9fa0378e5c00p+0, 0x1.331c000000000p+0, 0x1.aacae5fd5e77dp-1, 0x1.288f0567537ffp-1}, // exponent = 0 + {0x1.b9fd76ec78000p+1, 0x1.82f0000000000p+0, 0x1.52bdf6a7a2620p-1, 0x1.288cb4a41a9b5p-2}, // exponent = 1 + {0x1.bb52800000000p+2, 0x1.e800000000000p+0, 0x1.0c9714fbcda3bp-1, 0x1.27a894096a4f5p-3}, // exponent = 2 + + /* mantissa = 0x1.bc... */ + {0x1.bdd5400000000p+0, 0x1.3400000000000p+0, 0x1.a98ef606a63bep-1, 0x1.25fe5513ebf45p-1}, // exponent = 0 + {0x1.bda4200000000p+1, 0x1.8400000000000p+0, 0x1.51d07eae2f815p-1, 0x1.261ebd944131ep-2}, // exponent = 1 + {0x1.bdfd332712ca0p+2, 0x1.e8fa000000000p+0, 0x1.0c0dc264ce74bp-1, 0x1.25e3ff656ec87p-3}, // exponent = 2 + + /* mantissa = 0x1.c0... */ + {0x1.c1fc1c0569400p+0, 0x1.34f4000000000p+0, 0x1.a83eded1251e7p-1, 0x1.2347ec39d66b0p-1}, // exponent = 0 + {0x1.c1fd3bf5cf840p+1, 0x1.8542000000000p+0, 0x1.50b90cb22a299p-1, 0x1.234731d751cccp-2}, // exponent = 1 + {0x1.c0cb9a0000000p+2, 0x1.ea00000000000p+0, 0x1.0b7e6ec259dc8p-1, 0x1.240d8e9b4ae5dp-3}, // exponent = 2 + + /* mantissa = 0x1.c4... */ + {0x1.c693180000000p+0, 0x1.3600000000000p+0, 0x1.a6d01a6d01a6dp-1, 0x1.2057051321929p-1}, // exponent = 0 + {0x1.c4916c0000000p+1, 0x1.8600000000000p+0, 0x1.5015015015015p-1, 0x1.219e4a4924f1fp-2}, // exponent = 1 + {0x1.c650300000000p+2, 0x1.ec00000000000p+0, 0x1.0a6810a6810a7p-1, 0x1.20817bbcedd1fp-3}, // exponent = 2 + + /* mantissa = 0x1.c8... */ + {0x1.c9fc4ad339d80p+0, 0x1.36c6000000000p+0, 0x1.a5c2b87b4e25ap-1, 0x1.1e3144d16fd97p-1}, // exponent = 0 + {0x1.cb91000000000p+1, 0x1.8800000000000p+0, 0x1.4e5e0a72f0539p-1, 0x1.1d353d43a7247p-2}, // exponent = 1 + {0x1.cbe04e0000000p+2, 0x1.ee00000000000p+0, 0x1.0953f39010954p-1, 0x1.1d040e48a75cdp-3}, // exponent = 2 + + /* mantissa = 0x1.cc... */ + {0x1.cf6e000000000p+0, 0x1.3800000000000p+0, 0x1.a41a41a41a41ap-1, 0x1.1ad4948b6e145p-1}, // exponent = 0 + {0x1.cdfd181598000p+1, 0x1.88b0000000000p+0, 0x1.4dc82df5d0542p-1, 0x1.1bb66cda74540p-2}, // exponent = 1 + {0x1.cdfeef0724420p+2, 0x1.eec2000000000p+0, 0x1.08ebe9d4e24aep-1, 0x1.1bb54ba55bb8ep-3}, // exponent = 2 + + /* mantissa = 0x1.d0... */ + {0x1.d1f9c6201cc80p+0, 0x1.3892000000000p+0, 0x1.a35607552f1cdp-1, 0x1.1948fa1f5ff30p-1}, // exponent = 0 + {0x1.d2a2f40000000p+1, 0x1.8a00000000000p+0, 0x1.4cab88725af6ep-1, 0x1.18e2ff3fca5acp-2}, // exponent = 1 + {0x1.d17c000000000p+2, 0x1.f000000000000p+0, 0x1.0842108421084p-1, 0x1.1994faf4aec92p-3}, // exponent = 2 + + /* mantissa = 0x1.d4... */ + {0x1.d5f8615bde180p+0, 0x1.3976000000000p+0, 0x1.a22504db000b7p-1, 0x1.16e4ee12da718p-1}, // exponent = 0 + {0x1.d5f9b87878000p+1, 0x1.8af0000000000p+0, 0x1.4be15f5393e98p-1, 0x1.16e4227697dbfp-2}, // exponent = 1 + {0x1.d723520000000p+2, 0x1.f200000000000p+0, 0x1.073260a47f7c6p-1, 0x1.1633f845cb3dep-3}, // exponent = 2 + + /* mantissa = 0x1.d8... */ + {0x1.d866280000000p+0, 0x1.3a00000000000p+0, 0x1.a16d3f97a4b02p-1, 0x1.1575d8c8402f4p-1}, // exponent = 0 + {0x1.d9c7600000000p+1, 0x1.8c00000000000p+0, 0x1.4afd6a052bf5bp-1, 0x1.14a6fd8916ecfp-2}, // exponent = 1 + {0x1.d9fb5ac000000p+2, 0x1.f300000000000p+0, 0x1.06ab59c7912fbp-1, 0x1.1488a6b10c148p-3}, // exponent = 2 + + /* mantissa = 0x1.dc... */ + {0x1.ddfdfe805bc00p+0, 0x1.3b3c000000000p+0, 0x1.9fcacece0b241p-1, 0x1.1236b509d4023p-1}, // exponent = 0 + {0x1.ddff55aa1e600p+1, 0x1.8d2c000000000p+0, 0x1.4a036770fd266p-1, 0x1.1235f02ce295ap-2}, // exponent = 1 + {0x1.dcd6500000000p+2, 0x1.f400000000000p+0, 0x1.0624dd2f1a9fcp-1, 0x1.12e0be826d695p-3}, // exponent = 2 + + /* mantissa = 0x1.e0... */ + {0x1.e17bc00000000p+0, 0x1.3c00000000000p+0, 0x1.9ec8e951033d9p-1, 0x1.1039b25a7f122p-1}, // exponent = 0 + {0x1.e0fe5c0000000p+1, 0x1.8e00000000000p+0, 0x1.49539e3b2d067p-1, 0x1.1080a9d1be542p-2}, // exponent = 1 + {0x1.e295060000000p+2, 0x1.f600000000000p+0, 0x1.05197f7d73404p-1, 0x1.0f9b07a631f92p-3}, // exponent = 2 + + /* mantissa = 0x1.e4... */ + {0x1.e5ff3ecf6fc00p+0, 0x1.3cfc000000000p+0, 0x1.9d7f292cef9bap-1, 0x1.0db275be001a6p-1}, // exponent = 0 + {0x1.e5fefa40c0000p+1, 0x1.8f60000000000p+0, 0x1.48315b6c3fc79p-1, 0x1.0db29bc986108p-2}, // exponent = 1 + {0x1.e5fe06d9140e0p+2, 0x1.f72e000000000p+0, 0x1.047cca585fbe4p-1, 0x1.0db322dce8431p-3}, // exponent = 2 + + /* mantissa = 0x1.e8... */ + {0x1.eaaef80000000p+0, 0x1.3e00000000000p+0, 0x1.9c2d14ee4a102p-1, 0x1.0b1f0c9a4ed7cp-1}, // exponent = 0 + {0x1.e848000000000p+1, 0x1.9000000000000p+0, 0x1.47ae147ae147bp-1, 0x1.0c6f7a0b5ed8dp-2}, // exponent = 1 + {0x1.e85f800000000p+2, 0x1.f800000000000p+0, 0x1.0410410410410p-1, 0x1.0c628f55c92dep-3}, // exponent = 2 + + /* mantissa = 0x1.ec... */ + {0x1.edfb5912a5180p+0, 0x1.3eb6000000000p+0, 0x1.9b41b55ca11fcp-1, 0x1.0956733c0be03p-1}, // exponent = 0 + {0x1.efa4640000000p+1, 0x1.9200000000000p+0, 0x1.460cbc7f5cf9ap-1, 0x1.0872e8415508dp-2}, // exponent = 1 + {0x1.ee35ca0000000p+2, 0x1.fa00000000000p+0, 0x1.03091b51f5e1ap-1, 0x1.093712d33ff42p-3}, // exponent = 2 + + /* mantissa = 0x1.f0... */ + {0x1.f1fd112ab0c80p+0, 0x1.3f92000000000p+0, 0x1.9a2696dd75ba1p-1, 0x1.0733ed7907e73p-1}, // exponent = 0 + {0x1.f1fc8b255bc40p+1, 0x1.92a2000000000p+0, 0x1.45898cb57730cp-1, 0x1.0734344eaebefp-2}, // exponent = 1 + {0x1.f1ff2ff2d4ba0p+2, 0x1.fb4a000000000p+0, 0x1.02609989a73cfp-1, 0x1.0732ce999c3d1p-3}, // exponent = 2 + + /* mantissa = 0x1.f4... */ + {0x1.f400000000000p+0, 0x1.4000000000000p+0, 0x1.999999999999ap-1, 0x1.0624dd2f1a9fcp-1}, // exponent = 0 + {0x1.f713a00000000p+1, 0x1.9400000000000p+0, 0x1.446f86562d9fbp-1, 0x1.048a727489527p-2}, // exponent = 1 + {0x1.f417f00000000p+2, 0x1.fc00000000000p+0, 0x1.0204081020408p-1, 0x1.061850f2a7123p-3}, // exponent = 2 + + /* mantissa = 0x1.f8... */ + {0x1.f9fe36d7a7d80p+0, 0x1.4146000000000p+0, 0x1.97f9f956c92fdp-1, 0x1.030a055aebeddp-1}, // exponent = 0 + {0x1.f9f8b6ce70ec0p+1, 0x1.94c6000000000p+0, 0x1.43d0d2af8e146p-1, 0x1.030cd637fd65ep-2}, // exponent = 1 + {0x1.fa05fe0000000p+2, 0x1.fe00000000000p+0, 0x1.0101010101010p-1, 0x1.03060a0f151c2p-3}, // exponent = 2 + + /* mantissa = 0x1.fc... */ + {0x1.fd6f080000000p+0, 0x1.4200000000000p+0, 0x1.970e4f80cb872p-1, 0x1.014a239d8b1a9p-1}, // exponent = 0 + {0x1.fe95cc0000000p+1, 0x1.9600000000000p+0, 0x1.42d6625d51f87p-1, 0x1.00b59a78a8ffcp-2}, // exponent = 1 + {0x1.0000000000000p+3, 0x1.0000000000000p+1, 0x1.0000000000000p-1, 0x1.0000000000000p-3}, // exponent = 2 }; union floatdata { float f; int32_t x; }; -float cbrtf(float x) { - union floatdata xabs, result; - int32_t mantissa_key; - double r; - const struct cbrt_table_entry *table; - - if (x != x) return x + x; - - /* Reset the sign bit to get the absolute value */ - xabs.f = (float)((int32_t)x & 0x7fffffff); - if (xabs.f == __builtin_inff()) return (x); - - if (xabs.f < 0x1.0p-126f) { // denormal path - if (xabs.f == 0.0f) return x; - xabs.f *= 0x1.0p45f; - - result.x = ((xabs.x & 0x7f800000U) >> 23) - 1; - mantissa_key = ((xabs.x & 0x007e0000U) >> 17) * 3; - - table = cbrt_table + mantissa_key + result.x%3; - - xabs.x = (xabs.x & 0x007fffffU) | ((result.x%3 + 127) << 23); - r = ((double)xabs.f - table->x)*(table->recip_x); - result.x = (result.x / 3 + 70) << 23; - result.x = (result.x & 0x7fffffff) - | (*(int32_t *) &x & 0x80000000); - } else { - result.x = ((xabs.x & 0x7f800000U) >> 23) - 1; - mantissa_key = ((xabs.x & 0x007e0000U) >> 17) * 3; - - table = cbrt_table + mantissa_key + result.x%3; - - xabs.x = (xabs.x & 0x007fffffU) | ((result.x%3 + 127) << 23); - r = ((double)xabs.f - table->x)*(table->recip_x); - result.x = (result.x / 3 + 85) << 23; - result.x = (result.x & 0x7fffffff) - | (*(int32_t *) &x & 0x80000000); - } - - /* Bigger polynomial for correctly rounded cbrt. */ - double poly = 1.0 + (.333333333333341976693463092094589 + (-.111111111111154331658603135046499 + (0.617283944244925372967204212785709e-1 + (-0.411522622533364699898800342654033e-1 + (0.301852863186459692668300411679515e-1 - 0.234797653033909108182788624401527e-1*r)*r)*r)*r)*r)*r; - - poly *= table->cbrt_x; - result.f *= (float)poly; - return(result.f); +float +cbrtf(float x) +{ + union floatdata xabs, result; + int32_t mantissa_key; + double r; + const struct cbrt_table_entry *table; + + if (x != x) { + return x + x; + } + + /* Reset the sign bit to get the absolute value */ + xabs.f = (float)((int32_t)x & 0x7fffffff); + if (xabs.f == __builtin_inff()) { + return x; + } + + if (xabs.f < 0x1.0p-126f) { // denormal path + if (xabs.f == 0.0f) { + return x; + } + xabs.f *= 0x1.0p45f; + + result.x = ((xabs.x & 0x7f800000U) >> 23) - 1; + mantissa_key = ((xabs.x & 0x007e0000U) >> 17) * 3; + + table = cbrt_table + mantissa_key + result.x % 3; + + xabs.x = (xabs.x & 0x007fffffU) | ((result.x % 3 + 127) << 23); + r = ((double)xabs.f - table->x) * (table->recip_x); + result.x = (result.x / 3 + 70) << 23; + result.x = (result.x & 0x7fffffff) + | (*(int32_t *) &x & 0x80000000); + } else { + result.x = ((xabs.x & 0x7f800000U) >> 23) - 1; + mantissa_key = ((xabs.x & 0x007e0000U) >> 17) * 3; + + table = cbrt_table + mantissa_key + result.x % 3; + + xabs.x = (xabs.x & 0x007fffffU) | ((result.x % 3 + 127) << 23); + r = ((double)xabs.f - table->x) * (table->recip_x); + result.x = (result.x / 3 + 85) << 23; + result.x = (result.x & 0x7fffffff) + | (*(int32_t *) &x & 0x80000000); + } + + /* Bigger polynomial for correctly rounded cbrt. */ + double poly = 1.0 + (.333333333333341976693463092094589 + (-.111111111111154331658603135046499 + (0.617283944244925372967204212785709e-1 + (-0.411522622533364699898800342654033e-1 + (0.301852863186459692668300411679515e-1 - 0.234797653033909108182788624401527e-1 * r) * r) * r) * r) * r) * r; + + poly *= table->cbrt_x; + result.f *= (float)poly; + return result.f; } diff --git a/bsd/netinet/cpu_in_cksum_gen.c b/bsd/netinet/cpu_in_cksum_gen.c index 3d88e15e4..2cdb63596 100644 --- a/bsd/netinet/cpu_in_cksum_gen.c +++ b/bsd/netinet/cpu_in_cksum_gen.c @@ -64,7 +64,7 @@ #include #include #include -#define CKSUM_ERR(fmt, args...) kprintf(fmt, ## args) +#define CKSUM_ERR(fmt, args...) kprintf(fmt, ## args) #else /* !KERNEL */ #ifndef LIBSYSCALL_INTERFACE #error "LIBSYSCALL_INTERFACE not defined" @@ -79,26 +79,26 @@ /* compile time assert */ #ifndef _CASSERT -#define _CASSERT(x) _Static_assert(x, "compile-time assertion failed") +#define _CASSERT(x) _Static_assert(x, "compile-time assertion failed") #endif /* !_CASSERT */ #ifndef VERIFY -#define VERIFY(EX) ((void)0) +#define VERIFY(EX) ((void)0) #endif /* !VERIFY */ #ifndef CKSUM_ERR -#define CKSUM_ERR(fmt, args...) ((void)0) +#define CKSUM_ERR(fmt, args...) ((void)0) #endif /* !CKSUM_ERR */ -#define PREDICT_TRUE(x) __builtin_expect(!!((long)(x)), 1L) -#define PREDICT_FALSE(x) __builtin_expect(!!((long)(x)), 0L) +#define PREDICT_TRUE(x) __builtin_expect(!!((long)(x)), 1L) +#define PREDICT_FALSE(x) __builtin_expect(!!((long)(x)), 0L) /* fake mbuf struct used only for calling os_cpu_in_cksum_mbuf() */ struct _mbuf { - struct _mbuf *_m_next; - void *_m_pad; - uint8_t *_m_data; - int32_t _m_len; + struct _mbuf *_m_next; + void *_m_pad; + uint8_t *_m_data; + int32_t _m_len; }; extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t); @@ -112,12 +112,12 @@ os_cpu_in_cksum(const void *data, uint32_t len, uint32_t initial_sum) * and the amount to checksum is small, this would be quicker; * this is suitable for IPv4 header. */ - if (IS_P2ALIGNED(data, sizeof (uint32_t)) && + if (IS_P2ALIGNED(data, sizeof(uint32_t)) && len <= 64 && (len & 3) == 0) { uint8_t *p = __DECONST(uint8_t *, data); uint64_t sum = initial_sum; - if (PREDICT_TRUE(len == 20)) { /* simple IPv4 header */ + if (PREDICT_TRUE(len == 20)) { /* simple IPv4 header */ sum += *(uint32_t *)(void *)p; sum += *(uint32_t *)(void *)(p + 4); sum += *(uint32_t *)(void *)(p + 8); @@ -132,12 +132,12 @@ os_cpu_in_cksum(const void *data, uint32_t len, uint32_t initial_sum) } /* fold 64-bit to 16-bit (deferred carries) */ - sum = (sum >> 32) + (sum & 0xffffffff); /* 33-bit */ - sum = (sum >> 16) + (sum & 0xffff); /* 17-bit + carry */ - sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ - sum = (sum >> 16) + (sum & 0xffff); /* final carry */ + sum = (sum >> 32) + (sum & 0xffffffff); /* 33-bit */ + sum = (sum >> 16) + (sum & 0xffff); /* 17-bit + carry */ + sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ + sum = (sum >> 16) + (sum & 0xffff); /* final carry */ - return (sum & 0xffff); + return sum & 0xffff; } /* @@ -169,7 +169,7 @@ os_cpu_in_cksum(const void *data, uint32_t len, uint32_t initial_sum) ._m_len = len, }; - return (os_cpu_in_cksum_mbuf(&m, len, 0, initial_sum)); + return os_cpu_in_cksum_mbuf(&m, len, 0, initial_sum); } #if defined(__i386__) || defined(__x86_64__) @@ -215,7 +215,7 @@ os_cpu_in_cksum_mbuf(struct _mbuf *m, int len, int off, uint32_t initial_sum) for (;;) { if (PREDICT_FALSE(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->_m_len; if (mlen > off) { @@ -224,23 +224,26 @@ os_cpu_in_cksum_mbuf(struct _mbuf *m, int len, int off, uint32_t initial_sum) goto post_initial_offset; } off -= mlen; - if (len == 0) + if (len == 0) { break; + } m = m->_m_next; } for (; len > 0; m = m->_m_next) { if (PREDICT_FALSE(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->_m_len; data = m->_m_data; post_initial_offset: - if (mlen == 0) + if (mlen == 0) { continue; - if (mlen > len) + } + if (mlen > len) { mlen = len; + } len -= mlen; partial = 0; @@ -277,9 +280,10 @@ post_initial_offset: data += 32; mlen -= 32; if (PREDICT_FALSE(partial & 0xc0000000)) { - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 24); + } sum += (partial >> 16); sum += (partial & 0xffff); partial = 0; @@ -326,8 +330,9 @@ post_initial_offset: started_on_odd = !started_on_odd; } - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 24); + } sum += (partial >> 16) + (partial & 0xffff); /* * Reduce sum to allow potential byte swap @@ -337,7 +342,7 @@ post_initial_offset: } final_acc = ((sum >> 16) & 0xffff) + (sum & 0xffff); final_acc = (final_acc >> 16) + (final_acc & 0xffff); - return (final_acc & 0xffff); + return final_acc & 0xffff; } #else /* __LP64__ */ @@ -361,7 +366,7 @@ os_cpu_in_cksum_mbuf(struct _mbuf *m, int len, int off, uint32_t initial_sum) for (;;) { if (PREDICT_FALSE(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->_m_len; if (mlen > off) { @@ -370,23 +375,26 @@ os_cpu_in_cksum_mbuf(struct _mbuf *m, int len, int off, uint32_t initial_sum) goto post_initial_offset; } off -= mlen; - if (len == 0) + if (len == 0) { break; + } m = m->_m_next; } for (; len > 0; m = m->_m_next) { if (PREDICT_FALSE(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->_m_len; data = m->_m_data; post_initial_offset: - if (mlen == 0) + if (mlen == 0) { continue; - if (mlen > len) + } + if (mlen > len) { mlen = len; + } len -= mlen; partial = 0; @@ -403,8 +411,9 @@ post_initial_offset: } needs_swap = started_on_odd; if ((uintptr_t)data & 2) { - if (mlen < 2) + if (mlen < 2) { goto trailing_bytes; + } partial += *(uint16_t *)(void *)data; data += 2; mlen -= 2; @@ -431,9 +440,10 @@ post_initial_offset: data += 64; mlen -= 64; if (PREDICT_FALSE(partial & (3ULL << 62))) { - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 56); + } sum += (partial >> 32); sum += (partial & 0xffffffff); partial = 0; @@ -484,8 +494,9 @@ trailing_bytes: started_on_odd = !started_on_odd; } - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 56); + } sum += (partial >> 32) + (partial & 0xffffffff); /* * Reduce sum to allow potential byte swap @@ -497,7 +508,7 @@ trailing_bytes: ((sum >> 16) & 0xffff) + (sum & 0xffff); final_acc = (final_acc >> 16) + (final_acc & 0xffff); final_acc = (final_acc >> 16) + (final_acc & 0xffff); - return (final_acc & 0xffff); + return final_acc & 0xffff; } #endif /* __LP64 */ diff --git a/bsd/netinet/dhcp.h b/bsd/netinet/dhcp.h index e2868508f..29c1ee818 100644 --- a/bsd/netinet/dhcp.h +++ b/bsd/netinet/dhcp.h @@ -1,4 +1,3 @@ - #ifndef _NETINET_DHCP_H #define _NETINET_DHCP_H #include @@ -7,7 +6,7 @@ * Copyright (c) 1999-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -16,10 +15,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -27,7 +26,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -41,67 +40,67 @@ #include struct dhcp { - u_char dp_op; /* packet opcode type */ - u_char dp_htype; /* hardware addr type */ - u_char dp_hlen; /* hardware addr length */ - u_char dp_hops; /* gateway hops */ - u_int32_t dp_xid; /* transaction ID */ - u_int16_t dp_secs; /* seconds since boot began */ - u_int16_t dp_flags; /* flags */ - struct in_addr dp_ciaddr; /* client IP address */ - struct in_addr dp_yiaddr; /* 'your' IP address */ - struct in_addr dp_siaddr; /* server IP address */ - struct in_addr dp_giaddr; /* gateway IP address */ - u_char dp_chaddr[16]; /* client hardware address */ - u_char dp_sname[64]; /* server host name */ - u_char dp_file[128]; /* boot file name */ - u_char dp_options[0]; /* variable-length options field */ + u_char dp_op; /* packet opcode type */ + u_char dp_htype; /* hardware addr type */ + u_char dp_hlen; /* hardware addr length */ + u_char dp_hops; /* gateway hops */ + u_int32_t dp_xid; /* transaction ID */ + u_int16_t dp_secs; /* seconds since boot began */ + u_int16_t dp_flags; /* flags */ + struct in_addr dp_ciaddr; /* client IP address */ + struct in_addr dp_yiaddr; /* 'your' IP address */ + struct in_addr dp_siaddr; /* server IP address */ + struct in_addr dp_giaddr; /* gateway IP address */ + u_char dp_chaddr[16];/* client hardware address */ + u_char dp_sname[64];/* server host name */ + u_char dp_file[128];/* boot file name */ + u_char dp_options[0];/* variable-length options field */ }; struct dhcp_packet { - struct ip ip; - struct udphdr udp; - struct dhcp dhcp; + struct ip ip; + struct udphdr udp; + struct dhcp dhcp; }; -#define DHCP_OPTIONS_MIN 312 -#define DHCP_PACKET_MIN (sizeof(struct dhcp_packet) + DHCP_OPTIONS_MIN) -#define DHCP_PAYLOAD_MIN (sizeof(struct dhcp) + DHCP_OPTIONS_MIN) +#define DHCP_OPTIONS_MIN 312 +#define DHCP_PACKET_MIN (sizeof(struct dhcp_packet) + DHCP_OPTIONS_MIN) +#define DHCP_PAYLOAD_MIN (sizeof(struct dhcp) + DHCP_OPTIONS_MIN) /* dhcp message types */ -#define DHCPDISCOVER 1 -#define DHCPOFFER 2 -#define DHCPREQUEST 3 -#define DHCPDECLINE 4 -#define DHCPACK 5 -#define DHCPNAK 6 -#define DHCPRELEASE 7 -#define DHCPINFORM 8 +#define DHCPDISCOVER 1 +#define DHCPOFFER 2 +#define DHCPREQUEST 3 +#define DHCPDECLINE 4 +#define DHCPACK 5 +#define DHCPNAK 6 +#define DHCPRELEASE 7 +#define DHCPINFORM 8 enum { - dhcp_msgtype_none_e = 0, - dhcp_msgtype_discover_e = DHCPDISCOVER, - dhcp_msgtype_offer_e = DHCPOFFER, - dhcp_msgtype_request_e = DHCPREQUEST, - dhcp_msgtype_decline_e = DHCPDECLINE, - dhcp_msgtype_ack_e = DHCPACK, - dhcp_msgtype_nak_e = DHCPNAK, - dhcp_msgtype_release_e = DHCPRELEASE, - dhcp_msgtype_inform_e = DHCPINFORM, + dhcp_msgtype_none_e = 0, + dhcp_msgtype_discover_e = DHCPDISCOVER, + dhcp_msgtype_offer_e = DHCPOFFER, + dhcp_msgtype_request_e = DHCPREQUEST, + dhcp_msgtype_decline_e = DHCPDECLINE, + dhcp_msgtype_ack_e = DHCPACK, + dhcp_msgtype_nak_e = DHCPNAK, + dhcp_msgtype_release_e = DHCPRELEASE, + dhcp_msgtype_inform_e = DHCPINFORM, }; typedef uint8_t dhcp_msgtype_t; -typedef int32_t dhcp_time_secs_t; /* absolute time */ -typedef int32_t dhcp_lease_t; /* relative time */ -#define dhcp_time_hton htonl -#define dhcp_time_ntoh ntohl -#define dhcp_lease_hton htonl -#define dhcp_lease_ntoh ntohl +typedef int32_t dhcp_time_secs_t; /* absolute time */ +typedef int32_t dhcp_lease_t; /* relative time */ +#define dhcp_time_hton htonl +#define dhcp_time_ntoh ntohl +#define dhcp_lease_hton htonl +#define dhcp_lease_ntoh ntohl -#define DHCP_INFINITE_LEASE ((dhcp_lease_t)-1) -#define DHCP_INFINITE_TIME ((dhcp_time_secs_t)-1) +#define DHCP_INFINITE_LEASE ((dhcp_lease_t)-1) +#define DHCP_INFINITE_TIME ((dhcp_time_secs_t)-1) -#define DHCP_FLAGS_BROADCAST ((u_short)0x0001) +#define DHCP_FLAGS_BROADCAST ((u_short)0x0001) #endif /* _NETINET_DHCP_H */ diff --git a/bsd/netinet/dhcp_options.c b/bsd/netinet/dhcp_options.c index db54e59d5..c38c6a6fa 100644 --- a/bsd/netinet/dhcp_options.c +++ b/bsd/netinet/dhcp_options.c @@ -2,7 +2,7 @@ * Copyright (c) 2002-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,7 +33,7 @@ * that are encoded using the RFC 2132 encoding */ -/* +/* * Modification History * * March 15, 2002 Dieter Siegmund (dieter@apple) @@ -51,48 +51,48 @@ #ifndef TEST_DHCP_OPTIONS #include -#ifdef DHCP_DEBUG -#define dprintf(x) printf x; -#else /* !DHCP_DEBUG */ -#define dprintf(x) -#endif /* DHCP_DEBUG */ +#ifdef DHCP_DEBUG +#define dprintf(x) printf x; +#else /* !DHCP_DEBUG */ +#define dprintf(x) +#endif /* DHCP_DEBUG */ static __inline__ void my_free(void * ptr) { - _FREE(ptr, M_TEMP); + _FREE(ptr, M_TEMP); } static __inline__ void * my_malloc(int size) { - void * data; - MALLOC(data, void *, size, M_TEMP, M_WAITOK); - return (data); + void * data; + MALLOC(data, void *, size, M_TEMP, M_WAITOK); + return data; } static __inline__ void * my_realloc(void * oldptr, int oldsize, int newsize) { - void * data; + void * data; - MALLOC(data, void *, newsize, M_TEMP, M_WAITOK); - bcopy(oldptr, data, oldsize); - my_free(oldptr); - return (data); + MALLOC(data, void *, newsize, M_TEMP, M_WAITOK); + bcopy(oldptr, data, oldsize); + my_free(oldptr); + return data; } #else /* * To build: - * xcrun -sdk macosx.internal cc -DTEST_DHCP_OPTIONS -o /tmp/dhcp_options dhcp_options.c -I .. + * xcrun -sdk macosx.internal cc -DTEST_DHCP_OPTIONS -o /tmp/dhcp_options dhcp_options.c -I .. */ #include #include #include -#define my_free free +#define my_free free #define my_malloc malloc #define my_realloc(ptr, old_size, new_size) realloc(ptr, new_size) -#define dprintf(x) printf x; +#define dprintf(x) printf x; #endif /* @@ -101,103 +101,112 @@ my_realloc(void * oldptr, int oldsize, int newsize) * A dynamically growable array of pointers. */ -#define PTRLIST_NUMBER 16 +#define PTRLIST_NUMBER 16 static void ptrlist_init(ptrlist_t * list) { - bzero(list, sizeof(*list)); - return; + bzero(list, sizeof(*list)); + return; } static void ptrlist_free(ptrlist_t * list) { - if (list->array) - my_free(list->array); - ptrlist_init(list); - return; + if (list->array) { + my_free(list->array); + } + ptrlist_init(list); + return; } static int ptrlist_count(ptrlist_t * list) { - if (list == NULL || list->array == NULL) - return (0); + if (list == NULL || list->array == NULL) { + return 0; + } - return (list->count); + return list->count; } static const void * ptrlist_element(ptrlist_t * list, int i) { - if (list->array == NULL) - return (NULL); - if (i < list->count) - return (list->array[i]); - return (NULL); + if (list->array == NULL) { + return NULL; + } + if (i < list->count) { + return list->array[i]; + } + return NULL; } static boolean_t ptrlist_grow(ptrlist_t * list) { - if (list->array == NULL) { - if (list->size == 0) - list->size = PTRLIST_NUMBER; - list->count = 0; - list->array = my_malloc(sizeof(*list->array) * list->size); - } - else if (list->size == list->count) { - dprintf(("doubling %d to %d\n", list->size, list->size * 2)); - list->array = my_realloc(list->array, - sizeof(*list->array) * list->size, - sizeof(*list->array) * list->size * 2); - list->size *= 2; - } - if (list->array == NULL) - return (FALSE); - return (TRUE); + if (list->array == NULL) { + if (list->size == 0) { + list->size = PTRLIST_NUMBER; + } + list->count = 0; + list->array = my_malloc(sizeof(*list->array) * list->size); + } else if (list->size == list->count) { + dprintf(("doubling %d to %d\n", list->size, list->size * 2)); + list->array = my_realloc(list->array, + sizeof(*list->array) * list->size, + sizeof(*list->array) * list->size * 2); + list->size *= 2; + } + if (list->array == NULL) { + return FALSE; + } + return TRUE; } static boolean_t ptrlist_add(ptrlist_t * list, const void * element) { - if (ptrlist_grow(list) == FALSE) - return (FALSE); + if (ptrlist_grow(list) == FALSE) { + return FALSE; + } - list->array[list->count++] = element; - return (TRUE); + list->array[list->count++] = element; + return TRUE; } /* concatenates extra onto list */ static boolean_t ptrlist_concat(ptrlist_t * list, ptrlist_t * extra) { - if (extra->count == 0) - return (TRUE); - - if ((extra->count + list->count) > list->size) { - int old_size = list->size; - - list->size = extra->count + list->count; - if (list->array == NULL) - list->array = my_malloc(sizeof(*list->array) * list->size); - else - list->array = my_realloc(list->array, old_size, - sizeof(*list->array) * list->size); - } - if (list->array == NULL) - return (FALSE); - bcopy(extra->array, list->array + list->count, - extra->count * sizeof(*list->array)); - list->count += extra->count; - return (TRUE); + if (extra->count == 0) { + return TRUE; + } + + if ((extra->count + list->count) > list->size) { + int old_size = list->size; + + list->size = extra->count + list->count; + if (list->array == NULL) { + list->array = my_malloc(sizeof(*list->array) * list->size); + } else { + list->array = my_realloc(list->array, old_size, + sizeof(*list->array) * list->size); + } + } + if (list->array == NULL) { + return FALSE; + } + bcopy(extra->array, list->array + list->count, + extra->count * sizeof(*list->array)); + list->count += extra->count; + return TRUE; } /* - * Functions: dhcpol_* + * Functions: dhcpol_* * * Purpose: * Routines to parse/access existing options buffers. @@ -205,37 +214,37 @@ ptrlist_concat(ptrlist_t * list, ptrlist_t * extra) boolean_t dhcpol_add(dhcpol_t * list, const void * element) { - return (ptrlist_add((ptrlist_t *)list, element)); + return ptrlist_add((ptrlist_t *)list, element); } int dhcpol_count(dhcpol_t * list) { - return (ptrlist_count((ptrlist_t *)list)); + return ptrlist_count((ptrlist_t *)list); } const void * dhcpol_element(dhcpol_t * list, int i) { - return (ptrlist_element((ptrlist_t *)list, i)); + return ptrlist_element((ptrlist_t *)list, i); } void dhcpol_init(dhcpol_t * list) { - ptrlist_init((ptrlist_t *)list); + ptrlist_init((ptrlist_t *)list); } void dhcpol_free(dhcpol_t * list) { - ptrlist_free((ptrlist_t *)list); + ptrlist_free((ptrlist_t *)list); } boolean_t dhcpol_concat(dhcpol_t * list, dhcpol_t * extra) { - return (ptrlist_concat((ptrlist_t *)list, (ptrlist_t *)extra)); + return ptrlist_concat((ptrlist_t *)list, (ptrlist_t *)extra); } /* @@ -250,52 +259,50 @@ dhcpol_concat(dhcpol_t * list, dhcpol_t * extra) boolean_t dhcpol_parse_buffer(dhcpol_t * list, const void * buffer, int length) { - int len; - const uint8_t * scan; - uint8_t tag; - - dhcpol_init(list); - - len = length; - tag = dhcptag_pad_e; - for (scan = (const uint8_t *)buffer; - tag != dhcptag_end_e && len > DHCP_TAG_OFFSET; ) { - - tag = scan[DHCP_TAG_OFFSET]; - - switch (tag) { - case dhcptag_end_e: - /* remember that it was terminated */ - dhcpol_add(list, scan); - scan++; - len--; - break; - case dhcptag_pad_e: /* ignore pad */ - scan++; - len--; - break; - default: - if (len > DHCP_LEN_OFFSET) { - uint8_t option_len; - - option_len = scan[DHCP_LEN_OFFSET]; - dhcpol_add(list, scan); - len -= (option_len + DHCP_OPTION_OFFSET); - scan += (option_len + DHCP_OPTION_OFFSET); - } - else { - len = -1; - } - break; + int len; + const uint8_t * scan; + uint8_t tag; + + dhcpol_init(list); + + len = length; + tag = dhcptag_pad_e; + for (scan = (const uint8_t *)buffer; + tag != dhcptag_end_e && len > DHCP_TAG_OFFSET;) { + tag = scan[DHCP_TAG_OFFSET]; + + switch (tag) { + case dhcptag_end_e: + /* remember that it was terminated */ + dhcpol_add(list, scan); + scan++; + len--; + break; + case dhcptag_pad_e: /* ignore pad */ + scan++; + len--; + break; + default: + if (len > DHCP_LEN_OFFSET) { + uint8_t option_len; + + option_len = scan[DHCP_LEN_OFFSET]; + dhcpol_add(list, scan); + len -= (option_len + DHCP_OPTION_OFFSET); + scan += (option_len + DHCP_OPTION_OFFSET); + } else { + len = -1; + } + break; + } + } + if (len < 0) { + /* ran off the end */ + dprintf(("dhcp_options: parse failed near tag %d\n", tag)); + dhcpol_free(list); + return FALSE; } - } - if (len < 0) { - /* ran off the end */ - dprintf(("dhcp_options: parse failed near tag %d\n", tag)); - dhcpol_free(list); - return (FALSE); - } - return (TRUE); + return TRUE; } /* @@ -305,7 +312,7 @@ dhcpol_parse_buffer(dhcpol_t * list, const void * buffer, int length) * Finds the first occurence of the given option, and returns its * length and the option data pointer. * - * The optional start parameter allows this function to + * The optional start parameter allows this function to * return the next start point so that successive * calls will retrieve the next occurence of the option. * Before the first call, *start should be set to 0. @@ -313,26 +320,30 @@ dhcpol_parse_buffer(dhcpol_t * list, const void * buffer, int length) const void * dhcpol_find(dhcpol_t * list, int tag, int * len_p, int * start) { - int i = 0; - - if (tag == dhcptag_end_e || tag == dhcptag_pad_e) - return (NULL); - - if (start) - i = *start; - - for (; i < dhcpol_count(list); i++) { - const uint8_t * option = dhcpol_element(list, i); - - if (option[DHCP_TAG_OFFSET] == tag) { - if (len_p) - *len_p = option[DHCP_LEN_OFFSET]; - if (start) - *start = i + 1; - return (option + DHCP_OPTION_OFFSET); + int i = 0; + + if (tag == dhcptag_end_e || tag == dhcptag_pad_e) { + return NULL; + } + + if (start) { + i = *start; + } + + for (; i < dhcpol_count(list); i++) { + const uint8_t * option = dhcpol_element(list, i); + + if (option[DHCP_TAG_OFFSET] == tag) { + if (len_p) { + *len_p = option[DHCP_LEN_OFFSET]; + } + if (start) { + *start = i + 1; + } + return option + DHCP_OPTION_OFFSET; + } } - } - return (NULL); + return NULL; } /* @@ -349,109 +360,110 @@ dhcpol_find(dhcpol_t * list, int tag, int * len_p, int * start) boolean_t dhcpol_parse_packet(dhcpol_t * options, const struct dhcp * pkt, int len) { - char rfc_magic[4] = RFC_OPTIONS_MAGIC; - - dhcpol_init(options); /* make sure it's empty */ - - if (len < (sizeof(*pkt) + RFC_MAGIC_SIZE)) { - dprintf(("dhcp_options: packet is too short: %d < %d\n", - len, (int)sizeof(*pkt) + RFC_MAGIC_SIZE)); - return (FALSE); - } - if (bcmp(pkt->dp_options, rfc_magic, RFC_MAGIC_SIZE)) { - dprintf(("dhcp_options: missing magic number\n")); - return (FALSE); - } - if (dhcpol_parse_buffer(options, pkt->dp_options + RFC_MAGIC_SIZE, - len - sizeof(*pkt) - RFC_MAGIC_SIZE) == FALSE) - return (FALSE); - { /* get overloaded options */ - const uint8_t * overload; - int overload_len; - - overload = dhcpol_find(options, dhcptag_option_overload_e, - &overload_len, NULL); - if (overload && overload_len == 1) { /* has overloaded options */ - dhcpol_t extra; - - dhcpol_init(&extra); - if (*overload == DHCP_OVERLOAD_FILE - || *overload == DHCP_OVERLOAD_BOTH) { - if (dhcpol_parse_buffer(&extra, pkt->dp_file, - sizeof(pkt->dp_file))) { - dhcpol_concat(options, &extra); - dhcpol_free(&extra); - } - } - if (*overload == DHCP_OVERLOAD_SNAME - || *overload == DHCP_OVERLOAD_BOTH) { - if (dhcpol_parse_buffer(&extra, pkt->dp_sname, - sizeof(pkt->dp_sname))) { - dhcpol_concat(options, &extra); - dhcpol_free(&extra); + char rfc_magic[4] = RFC_OPTIONS_MAGIC; + + dhcpol_init(options); /* make sure it's empty */ + + if (len < (sizeof(*pkt) + RFC_MAGIC_SIZE)) { + dprintf(("dhcp_options: packet is too short: %d < %d\n", + len, (int)sizeof(*pkt) + RFC_MAGIC_SIZE)); + return FALSE; + } + if (bcmp(pkt->dp_options, rfc_magic, RFC_MAGIC_SIZE)) { + dprintf(("dhcp_options: missing magic number\n")); + return FALSE; + } + if (dhcpol_parse_buffer(options, pkt->dp_options + RFC_MAGIC_SIZE, + len - sizeof(*pkt) - RFC_MAGIC_SIZE) == FALSE) { + return FALSE; + } + { /* get overloaded options */ + const uint8_t * overload; + int overload_len; + + overload = dhcpol_find(options, dhcptag_option_overload_e, + &overload_len, NULL); + if (overload && overload_len == 1) { /* has overloaded options */ + dhcpol_t extra; + + dhcpol_init(&extra); + if (*overload == DHCP_OVERLOAD_FILE + || *overload == DHCP_OVERLOAD_BOTH) { + if (dhcpol_parse_buffer(&extra, pkt->dp_file, + sizeof(pkt->dp_file))) { + dhcpol_concat(options, &extra); + dhcpol_free(&extra); + } + } + if (*overload == DHCP_OVERLOAD_SNAME + || *overload == DHCP_OVERLOAD_BOTH) { + if (dhcpol_parse_buffer(&extra, pkt->dp_sname, + sizeof(pkt->dp_sname))) { + dhcpol_concat(options, &extra); + dhcpol_free(&extra); + } + } } - } } - } - return (TRUE); + return TRUE; } #ifdef TEST_DHCP_OPTIONS char test_empty[] = { - 99, 130, 83, 99, - 255, + 99, 130, 83, 99, + 255, }; char test_short[] = { - 99, 130, 83, 99, - 1, + 99, 130, 83, 99, + 1, }; char test_simple[] = { - 99, 130, 83, 99, - 1, 4, 255, 255, 252, 0, - 3, 4, 17, 202, 40, 1, - 255, + 99, 130, 83, 99, + 1, 4, 255, 255, 252, 0, + 3, 4, 17, 202, 40, 1, + 255, }; char test_vendor[] = { - 99, 130, 83, 99, - 1, 4, 255, 255, 252, 0, - 3, 4, 17, 202, 40, 1, - 43, 6, 1, 4, 1, 2, 3, 4, - 43, 6, 1, 4, 1, 2, 3, 4, - 255, + 99, 130, 83, 99, + 1, 4, 255, 255, 252, 0, + 3, 4, 17, 202, 40, 1, + 43, 6, 1, 4, 1, 2, 3, 4, + 43, 6, 1, 4, 1, 2, 3, 4, + 255, }; char test_no_end[] = { - 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x05, 0x36, - 0x04, 0xc0, 0xa8, 0x01, 0x01, 0x33, 0x04, 0x80, - 0x00, 0x80, 0x00, 0x01, 0x04, 0xff, 0xff, 0xff, - 0x00, 0x03, 0x04, 0xc0, 0xa8, 0x01, 0x01, 0x06, - 0x0c, 0x18, 0x1a, 0xa3, 0x21, 0x18, 0x1a, 0xa3, - 0x20, 0x18, 0x5e, 0xa3, 0x21, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x05, 0x36, + 0x04, 0xc0, 0xa8, 0x01, 0x01, 0x33, 0x04, 0x80, + 0x00, 0x80, 0x00, 0x01, 0x04, 0xff, 0xff, 0xff, + 0x00, 0x03, 0x04, 0xc0, 0xa8, 0x01, 0x01, 0x06, + 0x0c, 0x18, 0x1a, 0xa3, 0x21, 0x18, 0x1a, 0xa3, + 0x20, 0x18, 0x5e, 0xa3, 0x21, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; char test_no_magic[] = { - 0x1 + 0x1 }; struct test { - char * name; - char * data; - int len; - boolean_t result; + char * name; + char * data; + int len; + boolean_t result; }; struct test tests[] = { - { "empty", test_empty, sizeof(test_empty), TRUE }, - { "simple", test_simple, sizeof(test_simple), TRUE }, - { "vendor", test_vendor, sizeof(test_vendor), TRUE }, - { "no_end", test_no_end, sizeof(test_no_end), TRUE }, - { "no magic", test_no_magic, sizeof(test_no_magic), FALSE }, - { "short", test_short, sizeof(test_short), FALSE }, - { NULL, NULL, 0, FALSE }, + { "empty", test_empty, sizeof(test_empty), TRUE }, + { "simple", test_simple, sizeof(test_simple), TRUE }, + { "vendor", test_vendor, sizeof(test_vendor), TRUE }, + { "no_end", test_no_end, sizeof(test_no_end), TRUE }, + { "no magic", test_no_magic, sizeof(test_no_magic), FALSE }, + { "short", test_short, sizeof(test_short), FALSE }, + { NULL, NULL, 0, FALSE }, }; @@ -460,25 +472,24 @@ static char buf[2048]; int main() { - int i; - dhcpol_t options; - struct dhcp * pkt = (struct dhcp *)buf; - - dhcpol_init(&options); - - for (i = 0; tests[i].name; i++) { - printf("\nTest %d: ", i); - bcopy(tests[i].data, pkt->dp_options, tests[i].len); - if (dhcpol_parse_packet(&options, pkt, - sizeof(*pkt) + tests[i].len) - != tests[i].result) { - printf("test '%s' FAILED\n", tests[i].name); - } - else { - printf("test '%s' PASSED\n", tests[i].name); + int i; + dhcpol_t options; + struct dhcp * pkt = (struct dhcp *)buf; + + dhcpol_init(&options); + + for (i = 0; tests[i].name; i++) { + printf("\nTest %d: ", i); + bcopy(tests[i].data, pkt->dp_options, tests[i].len); + if (dhcpol_parse_packet(&options, pkt, + sizeof(*pkt) + tests[i].len) + != tests[i].result) { + printf("test '%s' FAILED\n", tests[i].name); + } else { + printf("test '%s' PASSED\n", tests[i].name); + } + dhcpol_free(&options); } - dhcpol_free(&options); - } - exit(0); + exit(0); } #endif /* TEST_DHCP_OPTIONS */ diff --git a/bsd/netinet/dhcp_options.h b/bsd/netinet/dhcp_options.h index 6986a9ce6..f99cb5393 100644 --- a/bsd/netinet/dhcp_options.h +++ b/bsd/netinet/dhcp_options.h @@ -1,4 +1,3 @@ - #ifndef _NETINET_DHCP_OPTIONS_H #define _NETINET_DHCP_OPTIONS_H #include @@ -6,7 +5,7 @@ * Copyright (c) 1999-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -15,10 +14,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -26,7 +25,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,125 +45,125 @@ /* overloaded option values */ -#define DHCP_OVERLOAD_FILE 1 -#define DHCP_OVERLOAD_SNAME 2 -#define DHCP_OVERLOAD_BOTH 3 +#define DHCP_OVERLOAD_FILE 1 +#define DHCP_OVERLOAD_SNAME 2 +#define DHCP_OVERLOAD_BOTH 3 /* * DHCP_OPTION_SIZE_MAX * - the largest size that an option can be (limited to an 8-bit quantity) */ -#define DHCP_OPTION_SIZE_MAX 255 +#define DHCP_OPTION_SIZE_MAX 255 -#define DHCP_TAG_OFFSET 0 -#define DHCP_LEN_OFFSET 1 -#define DHCP_OPTION_OFFSET 2 +#define DHCP_TAG_OFFSET 0 +#define DHCP_LEN_OFFSET 1 +#define DHCP_OPTION_OFFSET 2 -#define RFC_OPTIONS_MAGIC { 99, 130, 83, 99 } -#define RFC_MAGIC_SIZE 4 /* bytes */ +#define RFC_OPTIONS_MAGIC { 99, 130, 83, 99 } +#define RFC_MAGIC_SIZE 4 /* bytes */ enum { - /* rfc 1497 vendor extensions: 0..18, 255 */ - dhcptag_pad_e = 0, - dhcptag_end_e = 255, - dhcptag_subnet_mask_e = 1, - dhcptag_time_offset_e = 2, - dhcptag_router_e = 3, - dhcptag_time_server_e = 4, - dhcptag_name_server_e = 5, - dhcptag_domain_name_server_e = 6, - dhcptag_log_server_e = 7, - dhcptag_cookie_server_e = 8, - dhcptag_lpr_server_e = 9, - dhcptag_impress_server_e = 10, - dhcptag_resource_location_server_e = 11, - dhcptag_host_name_e = 12, - dhcptag_boot_file_size_e = 13, - dhcptag_merit_dump_file_e = 14, - dhcptag_domain_name_e = 15, - dhcptag_swap_server_e = 16, - dhcptag_root_path_e = 17, - dhcptag_extensions_path_e = 18, - - /* ip layer parameters per host: 19..25 */ - dhcptag_ip_forwarding_e = 19, - dhcptag_non_local_source_routing_e = 20, - dhcptag_policy_filter_e = 21, - dhcptag_max_dgram_reassembly_size_e = 22, - dhcptag_default_ip_time_to_live_e = 23, - dhcptag_path_mtu_aging_timeout_e = 24, - dhcptag_path_mtu_plateau_table_e = 25, - - /* ip layer parameters per interface: 26..33 */ - dhcptag_interface_mtu_e = 26, - dhcptag_all_subnets_local_e = 27, - dhcptag_broadcast_address_e = 28, - dhcptag_perform_mask_discovery_e = 29, - dhcptag_mask_supplier_e = 30, - dhcptag_perform_router_discovery_e = 31, - dhcptag_router_solicitation_address_e = 32, - dhcptag_static_route_e = 33, - dhcptag_trailer_encapsulation_e = 34, - dhcptag_arp_cache_timeout_e = 35, - dhcptag_ethernet_encapsulation_e = 36, - - /* tcp parameters: 37..39 */ - dhcptag_default_ttl_e = 37, - dhcptag_keepalive_interval_e = 38, - dhcptag_keepalive_garbage_e = 39, - - /* application & service parameters: 40..49, 64, 65, 68..76, 78, 79, 95 */ - dhcptag_nis_domain_e = 40, - dhcptag_nis_servers_e = 41, - dhcptag_network_time_protocol_servers_e = 42, - dhcptag_vendor_specific_e = 43, - dhcptag_nb_over_tcpip_name_server_e = 44, - dhcptag_nb_over_tcpip_dgram_dist_server_e = 45, - dhcptag_nb_over_tcpip_node_type_e = 46, - dhcptag_nb_over_tcpip_scope_e = 47, - dhcptag_x_windows_font_server_e = 48, - dhcptag_x_windows_display_manager_e = 49, - dhcptag_nis_plus_domain_e = 64, - dhcptag_nis_plus_servers_e = 65, - dhcptag_mobile_ip_home_agent_e = 68, - dhcptag_smtp_server_e = 69, - dhcptag_pop3_server_e = 70, - dhcptag_nntp_server_e = 71, - dhcptag_default_www_server_e = 72, - dhcptag_default_finger_server_e = 73, - dhcptag_default_irc_server_e = 74, - dhcptag_streettalk_server_e = 75, - dhcptag_stda_server_e = 76, - dhcptag_slp_directory_agent_e = 78, - dhcptag_slp_service_scope_e = 79, - dhcptag_ldap_url_e = 95, - - /* dhcp-specific extensions: 50..61, 66, 67 */ - dhcptag_requested_ip_address_e = 50, - dhcptag_lease_time_e = 51, - dhcptag_option_overload_e = 52, - dhcptag_dhcp_message_type_e = 53, - dhcptag_server_identifier_e = 54, - dhcptag_parameter_request_list_e = 55, - dhcptag_message_e = 56, - dhcptag_max_dhcp_message_size_e = 57, - dhcptag_renewal_t1_time_value_e = 58, - dhcptag_rebinding_t2_time_value_e = 59, - dhcptag_vendor_class_identifier_e = 60, - dhcptag_client_identifier_e = 61, - dhcptag_tftp_server_name_e = 66, - dhcptag_bootfile_name_e = 67, - - /* netinfo parent tags: 112, 113 */ - dhcptag_netinfo_server_address_e = 112, - dhcptag_netinfo_server_tag_e = 113, - - /* ad-hoc network disable option */ - dhcptag_auto_configure_e = 116, + /* rfc 1497 vendor extensions: 0..18, 255 */ + dhcptag_pad_e = 0, + dhcptag_end_e = 255, + dhcptag_subnet_mask_e = 1, + dhcptag_time_offset_e = 2, + dhcptag_router_e = 3, + dhcptag_time_server_e = 4, + dhcptag_name_server_e = 5, + dhcptag_domain_name_server_e = 6, + dhcptag_log_server_e = 7, + dhcptag_cookie_server_e = 8, + dhcptag_lpr_server_e = 9, + dhcptag_impress_server_e = 10, + dhcptag_resource_location_server_e = 11, + dhcptag_host_name_e = 12, + dhcptag_boot_file_size_e = 13, + dhcptag_merit_dump_file_e = 14, + dhcptag_domain_name_e = 15, + dhcptag_swap_server_e = 16, + dhcptag_root_path_e = 17, + dhcptag_extensions_path_e = 18, + + /* ip layer parameters per host: 19..25 */ + dhcptag_ip_forwarding_e = 19, + dhcptag_non_local_source_routing_e = 20, + dhcptag_policy_filter_e = 21, + dhcptag_max_dgram_reassembly_size_e = 22, + dhcptag_default_ip_time_to_live_e = 23, + dhcptag_path_mtu_aging_timeout_e = 24, + dhcptag_path_mtu_plateau_table_e = 25, + + /* ip layer parameters per interface: 26..33 */ + dhcptag_interface_mtu_e = 26, + dhcptag_all_subnets_local_e = 27, + dhcptag_broadcast_address_e = 28, + dhcptag_perform_mask_discovery_e = 29, + dhcptag_mask_supplier_e = 30, + dhcptag_perform_router_discovery_e = 31, + dhcptag_router_solicitation_address_e = 32, + dhcptag_static_route_e = 33, + dhcptag_trailer_encapsulation_e = 34, + dhcptag_arp_cache_timeout_e = 35, + dhcptag_ethernet_encapsulation_e = 36, + + /* tcp parameters: 37..39 */ + dhcptag_default_ttl_e = 37, + dhcptag_keepalive_interval_e = 38, + dhcptag_keepalive_garbage_e = 39, + + /* application & service parameters: 40..49, 64, 65, 68..76, 78, 79, 95 */ + dhcptag_nis_domain_e = 40, + dhcptag_nis_servers_e = 41, + dhcptag_network_time_protocol_servers_e = 42, + dhcptag_vendor_specific_e = 43, + dhcptag_nb_over_tcpip_name_server_e = 44, + dhcptag_nb_over_tcpip_dgram_dist_server_e = 45, + dhcptag_nb_over_tcpip_node_type_e = 46, + dhcptag_nb_over_tcpip_scope_e = 47, + dhcptag_x_windows_font_server_e = 48, + dhcptag_x_windows_display_manager_e = 49, + dhcptag_nis_plus_domain_e = 64, + dhcptag_nis_plus_servers_e = 65, + dhcptag_mobile_ip_home_agent_e = 68, + dhcptag_smtp_server_e = 69, + dhcptag_pop3_server_e = 70, + dhcptag_nntp_server_e = 71, + dhcptag_default_www_server_e = 72, + dhcptag_default_finger_server_e = 73, + dhcptag_default_irc_server_e = 74, + dhcptag_streettalk_server_e = 75, + dhcptag_stda_server_e = 76, + dhcptag_slp_directory_agent_e = 78, + dhcptag_slp_service_scope_e = 79, + dhcptag_ldap_url_e = 95, + + /* dhcp-specific extensions: 50..61, 66, 67 */ + dhcptag_requested_ip_address_e = 50, + dhcptag_lease_time_e = 51, + dhcptag_option_overload_e = 52, + dhcptag_dhcp_message_type_e = 53, + dhcptag_server_identifier_e = 54, + dhcptag_parameter_request_list_e = 55, + dhcptag_message_e = 56, + dhcptag_max_dhcp_message_size_e = 57, + dhcptag_renewal_t1_time_value_e = 58, + dhcptag_rebinding_t2_time_value_e = 59, + dhcptag_vendor_class_identifier_e = 60, + dhcptag_client_identifier_e = 61, + dhcptag_tftp_server_name_e = 66, + dhcptag_bootfile_name_e = 67, + + /* netinfo parent tags: 112, 113 */ + dhcptag_netinfo_server_address_e = 112, + dhcptag_netinfo_server_tag_e = 113, + + /* ad-hoc network disable option */ + dhcptag_auto_configure_e = 116, }; -typedef uint8_t dhcptag_t; +typedef uint8_t dhcptag_t; /* * Module: dhcpol (dhcp options list) @@ -174,28 +173,28 @@ typedef uint8_t dhcptag_t; */ typedef struct { - const void * * array; /* malloc'd array of pointers */ - int size; /* number of elements in array */ - int count; /* number of occupied elements */ + const void * * array;/* malloc'd array of pointers */ + int size;/* number of elements in array */ + int count;/* number of occupied elements */ } ptrlist_t; typedef ptrlist_t dhcpol_t; #ifdef BSD_KERNEL_PRIVATE -void dhcpol_init(dhcpol_t * list); -void dhcpol_free(dhcpol_t * list); -int dhcpol_count(dhcpol_t * list); -boolean_t dhcpol_add(dhcpol_t * list, const void * element); -const void * dhcpol_element(dhcpol_t * list, int i); -boolean_t dhcpol_concat(dhcpol_t * list, dhcpol_t * extra); -boolean_t dhcpol_parse_buffer(dhcpol_t * list, - const void * buffer, - int length); -const void * dhcpol_find(dhcpol_t * list, int tag, int * len_p, - int * start); -boolean_t dhcpol_parse_packet(dhcpol_t * options, - const struct dhcp * pkt, int len); +void dhcpol_init(dhcpol_t * list); +void dhcpol_free(dhcpol_t * list); +int dhcpol_count(dhcpol_t * list); +boolean_t dhcpol_add(dhcpol_t * list, const void * element); +const void * dhcpol_element(dhcpol_t * list, int i); +boolean_t dhcpol_concat(dhcpol_t * list, dhcpol_t * extra); +boolean_t dhcpol_parse_buffer(dhcpol_t * list, + const void * buffer, + int length); +const void * dhcpol_find(dhcpol_t * list, int tag, int * len_p, + int * start); +boolean_t dhcpol_parse_packet(dhcpol_t * options, + const struct dhcp * pkt, int len); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_DHCP_OPTIONS_H */ diff --git a/bsd/netinet/flow_divert.c b/bsd/netinet/flow_divert.c index c3bc7930c..13b9cad5d 100644 --- a/bsd/netinet/flow_divert.c +++ b/bsd/netinet/flow_divert.c @@ -2,7 +2,7 @@ * Copyright (c) 2012-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -59,18 +59,18 @@ #if INET6 #include #include -#endif /* INET6 */ +#endif /* INET6 */ #include #include #include #include -#define FLOW_DIVERT_CONNECT_STARTED 0x00000001 -#define FLOW_DIVERT_READ_CLOSED 0x00000002 -#define FLOW_DIVERT_WRITE_CLOSED 0x00000004 -#define FLOW_DIVERT_TUNNEL_RD_CLOSED 0x00000008 -#define FLOW_DIVERT_TUNNEL_WR_CLOSED 0x00000010 -#define FLOW_DIVERT_TRANSFERRED 0x00000020 +#define FLOW_DIVERT_CONNECT_STARTED 0x00000001 +#define FLOW_DIVERT_READ_CLOSED 0x00000002 +#define FLOW_DIVERT_WRITE_CLOSED 0x00000004 +#define FLOW_DIVERT_TUNNEL_RD_CLOSED 0x00000008 +#define FLOW_DIVERT_TUNNEL_WR_CLOSED 0x00000010 +#define FLOW_DIVERT_TRANSFERRED 0x00000020 #define FLOW_DIVERT_HAS_HMAC 0x00000040 #define FDLOG(level, pcb, format, ...) \ @@ -79,68 +79,67 @@ #define FDLOG0(level, pcb, msg) \ os_log_with_type(OS_LOG_DEFAULT, flow_divert_syslog_type_to_oslog_type(level), "(%u): " msg "\n", (pcb)->hash) -#define FDRETAIN(pcb) if ((pcb) != NULL) OSIncrementAtomic(&(pcb)->ref_count) -#define FDRELEASE(pcb) \ - do { \ - if ((pcb) != NULL && 1 == OSDecrementAtomic(&(pcb)->ref_count)) { \ - flow_divert_pcb_destroy(pcb); \ - } \ +#define FDRETAIN(pcb) if ((pcb) != NULL) OSIncrementAtomic(&(pcb)->ref_count) +#define FDRELEASE(pcb) \ + do { \ + if ((pcb) != NULL && 1 == OSDecrementAtomic(&(pcb)->ref_count)) { \ + flow_divert_pcb_destroy(pcb); \ + } \ } while (0) -#define FDLOCK(pcb) lck_mtx_lock(&(pcb)->mtx) -#define FDUNLOCK(pcb) lck_mtx_unlock(&(pcb)->mtx) +#define FDLOCK(pcb) lck_mtx_lock(&(pcb)->mtx) +#define FDUNLOCK(pcb) lck_mtx_unlock(&(pcb)->mtx) -#define FD_CTL_SENDBUFF_SIZE (128 * 1024) -#define FD_CTL_RCVBUFF_SIZE (128 * 1024) +#define FD_CTL_SENDBUFF_SIZE (128 * 1024) +#define FD_CTL_RCVBUFF_SIZE (128 * 1024) -#define GROUP_BIT_CTL_ENQUEUE_BLOCKED 0 +#define GROUP_BIT_CTL_ENQUEUE_BLOCKED 0 -#define GROUP_COUNT_MAX 32 -#define FLOW_DIVERT_MAX_NAME_SIZE 4096 -#define FLOW_DIVERT_MAX_KEY_SIZE 1024 -#define FLOW_DIVERT_MAX_TRIE_MEMORY (1024 * 1024) +#define GROUP_COUNT_MAX 32 +#define FLOW_DIVERT_MAX_NAME_SIZE 4096 +#define FLOW_DIVERT_MAX_KEY_SIZE 1024 +#define FLOW_DIVERT_MAX_TRIE_MEMORY (1024 * 1024) -struct flow_divert_trie_node -{ +struct flow_divert_trie_node { uint16_t start; uint16_t length; uint16_t child_map; }; -#define CHILD_MAP_SIZE 256 -#define NULL_TRIE_IDX 0xffff -#define TRIE_NODE(t, i) ((t)->nodes[(i)]) -#define TRIE_CHILD(t, i, b) (((t)->child_maps + (CHILD_MAP_SIZE * TRIE_NODE(t, i).child_map))[(b)]) -#define TRIE_BYTE(t, i) ((t)->bytes[(i)]) +#define CHILD_MAP_SIZE 256 +#define NULL_TRIE_IDX 0xffff +#define TRIE_NODE(t, i) ((t)->nodes[(i)]) +#define TRIE_CHILD(t, i, b) (((t)->child_maps + (CHILD_MAP_SIZE * TRIE_NODE(t, i).child_map))[(b)]) +#define TRIE_BYTE(t, i) ((t)->bytes[(i)]) -static struct flow_divert_pcb nil_pcb; +static struct flow_divert_pcb nil_pcb; decl_lck_rw_data(static, g_flow_divert_group_lck); -static struct flow_divert_group **g_flow_divert_groups = NULL; -static uint32_t g_active_group_count = 0; +static struct flow_divert_group **g_flow_divert_groups = NULL; +static uint32_t g_active_group_count = 0; -static lck_grp_attr_t *flow_divert_grp_attr = NULL; -static lck_attr_t *flow_divert_mtx_attr = NULL; -static lck_grp_t *flow_divert_mtx_grp = NULL; -static errno_t g_init_result = 0; +static lck_grp_attr_t *flow_divert_grp_attr = NULL; +static lck_attr_t *flow_divert_mtx_attr = NULL; +static lck_grp_t *flow_divert_mtx_grp = NULL; +static errno_t g_init_result = 0; -static kern_ctl_ref g_flow_divert_kctl_ref = NULL; +static kern_ctl_ref g_flow_divert_kctl_ref = NULL; -static struct protosw g_flow_divert_in_protosw; -static struct pr_usrreqs g_flow_divert_in_usrreqs; -static struct protosw g_flow_divert_in_udp_protosw; -static struct pr_usrreqs g_flow_divert_in_udp_usrreqs; +static struct protosw g_flow_divert_in_protosw; +static struct pr_usrreqs g_flow_divert_in_usrreqs; +static struct protosw g_flow_divert_in_udp_protosw; +static struct pr_usrreqs g_flow_divert_in_udp_usrreqs; #if INET6 -static struct ip6protosw g_flow_divert_in6_protosw; -static struct pr_usrreqs g_flow_divert_in6_usrreqs; -static struct ip6protosw g_flow_divert_in6_udp_protosw; -static struct pr_usrreqs g_flow_divert_in6_udp_usrreqs; -#endif /* INET6 */ +static struct ip6protosw g_flow_divert_in6_protosw; +static struct pr_usrreqs g_flow_divert_in6_usrreqs; +static struct ip6protosw g_flow_divert_in6_udp_protosw; +static struct pr_usrreqs g_flow_divert_in6_udp_usrreqs; +#endif /* INET6 */ -static struct protosw *g_tcp_protosw = NULL; -static struct ip6protosw *g_tcp6_protosw = NULL; -static struct protosw *g_udp_protosw = NULL; -static struct ip6protosw *g_udp6_protosw = NULL; +static struct protosw *g_tcp_protosw = NULL; +static struct ip6protosw *g_tcp6_protosw = NULL; +static struct protosw *g_udp_protosw = NULL; +static struct ip6protosw *g_udp6_protosw = NULL; static errno_t flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, struct sockaddr **dup); @@ -167,10 +166,10 @@ static inline uint8_t flow_divert_syslog_type_to_oslog_type(int syslog_type) { switch (syslog_type) { - case LOG_ERR: return OS_LOG_TYPE_ERROR; - case LOG_INFO: return OS_LOG_TYPE_INFO; - case LOG_DEBUG: return OS_LOG_TYPE_DEBUG; - default: return OS_LOG_TYPE_DEFAULT; + case LOG_ERR: return OS_LOG_TYPE_ERROR; + case LOG_INFO: return OS_LOG_TYPE_INFO; + case LOG_DEBUG: return OS_LOG_TYPE_DEBUG; + default: return OS_LOG_TYPE_DEFAULT; } } @@ -187,30 +186,30 @@ static const char * flow_divert_packet_type2str(uint8_t packet_type) { switch (packet_type) { - case FLOW_DIVERT_PKT_CONNECT: - return "connect"; - case FLOW_DIVERT_PKT_CONNECT_RESULT: - return "connect result"; - case FLOW_DIVERT_PKT_DATA: - return "data"; - case FLOW_DIVERT_PKT_CLOSE: - return "close"; - case FLOW_DIVERT_PKT_READ_NOTIFY: - return "read notification"; - case FLOW_DIVERT_PKT_PROPERTIES_UPDATE: - return "properties update"; - case FLOW_DIVERT_PKT_APP_MAP_CREATE: - return "app map create"; - default: - return "unknown"; + case FLOW_DIVERT_PKT_CONNECT: + return "connect"; + case FLOW_DIVERT_PKT_CONNECT_RESULT: + return "connect result"; + case FLOW_DIVERT_PKT_DATA: + return "data"; + case FLOW_DIVERT_PKT_CLOSE: + return "close"; + case FLOW_DIVERT_PKT_READ_NOTIFY: + return "read notification"; + case FLOW_DIVERT_PKT_PROPERTIES_UPDATE: + return "properties update"; + case FLOW_DIVERT_PKT_APP_MAP_CREATE: + return "app map create"; + default: + return "unknown"; } } static struct flow_divert_pcb * flow_divert_pcb_lookup(uint32_t hash, struct flow_divert_group *group) { - struct flow_divert_pcb key_item; - struct flow_divert_pcb *fd_cb = NULL; + struct flow_divert_pcb key_item; + struct flow_divert_pcb *fd_cb = NULL; key_item.hash = hash; @@ -225,12 +224,12 @@ flow_divert_pcb_lookup(uint32_t hash, struct flow_divert_group *group) static errno_t flow_divert_pcb_insert(struct flow_divert_pcb *fd_cb, uint32_t ctl_unit) { - errno_t error = 0; - struct flow_divert_pcb *exist = NULL; - struct flow_divert_group *group; - static uint32_t g_nextkey = 1; - static uint32_t g_hash_seed = 0; - int try_count = 0; + errno_t error = 0; + struct flow_divert_pcb *exist = NULL; + struct flow_divert_group *group; + static uint32_t g_nextkey = 1; + static uint32_t g_hash_seed = 0; + int try_count = 0; if (ctl_unit == 0 || ctl_unit >= GROUP_COUNT_MAX) { return EINVAL; @@ -255,8 +254,8 @@ flow_divert_pcb_insert(struct flow_divert_pcb *fd_cb, uint32_t ctl_unit) socket_lock(fd_cb->so, 0); do { - uint32_t key[2]; - uint32_t idx; + uint32_t key[2]; + uint32_t idx; key[0] = g_nextkey++; key[1] = RandomULong(); @@ -288,7 +287,7 @@ flow_divert_pcb_insert(struct flow_divert_pcb *fd_cb, uint32_t ctl_unit) if (exist == NULL) { fd_cb->group = group; - FDRETAIN(fd_cb); /* The group now has a reference */ + FDRETAIN(fd_cb); /* The group now has a reference */ } else { fd_cb->hash = 0; error = EEXIST; @@ -306,7 +305,7 @@ done: static struct flow_divert_pcb * flow_divert_pcb_create(socket_t so) { - struct flow_divert_pcb *new_pcb = NULL; + struct flow_divert_pcb *new_pcb = NULL; MALLOC_ZONE(new_pcb, struct flow_divert_pcb *, sizeof(*new_pcb), M_FLOW_DIVERT_PCB, M_WAITOK); if (new_pcb == NULL) { @@ -320,7 +319,7 @@ flow_divert_pcb_create(socket_t so) new_pcb->so = so; new_pcb->log_level = nil_pcb.log_level; - FDRETAIN(new_pcb); /* Represents the socket's reference */ + FDRETAIN(new_pcb); /* Represents the socket's reference */ return new_pcb; } @@ -329,7 +328,7 @@ static void flow_divert_pcb_destroy(struct flow_divert_pcb *fd_cb) { FDLOG(LOG_INFO, fd_cb, "Destroying, app tx %u, app rx %u, tunnel tx %u, tunnel rx %u", - fd_cb->bytes_written_by_app, fd_cb->bytes_read_by_app, fd_cb->bytes_sent, fd_cb->bytes_received); + fd_cb->bytes_written_by_app, fd_cb->bytes_read_by_app, fd_cb->bytes_sent, fd_cb->bytes_received); if (fd_cb->local_address != NULL) { FREE(fd_cb->local_address, M_SONAME); @@ -358,7 +357,7 @@ flow_divert_pcb_remove(struct flow_divert_pcb *fd_cb) FDLOG(LOG_INFO, fd_cb, "Removing from group %d, ref count = %d", group->ctl_unit, fd_cb->ref_count); RB_REMOVE(fd_pcb_tree, &group->pcb_tree, fd_cb); fd_cb->group = NULL; - FDRELEASE(fd_cb); /* Release the group's reference */ + FDRELEASE(fd_cb); /* Release the group's reference */ lck_rw_done(&group->lck); } } @@ -366,8 +365,8 @@ flow_divert_pcb_remove(struct flow_divert_pcb *fd_cb) static int flow_divert_packet_init(struct flow_divert_pcb *fd_cb, uint8_t packet_type, mbuf_t *packet) { - struct flow_divert_packet_header hdr; - int error = 0; + struct flow_divert_packet_header hdr; + int error = 0; error = mbuf_gethdr(MBUF_DONTWAIT, MBUF_TYPE_HEADER, packet); if (error) { @@ -393,8 +392,8 @@ flow_divert_packet_init(struct flow_divert_pcb *fd_cb, uint8_t packet_type, mbuf static int flow_divert_packet_append_tlv(mbuf_t packet, uint8_t type, uint32_t length, const void *value) { - uint32_t net_length = htonl(length); - int error = 0; + uint32_t net_length = htonl(length); + int error = 0; error = mbuf_copyback(packet, mbuf_pkthdr_len(packet), sizeof(type), &type, MBUF_DONTWAIT); if (error) { @@ -420,10 +419,10 @@ flow_divert_packet_append_tlv(mbuf_t packet, uint8_t type, uint32_t length, cons static int flow_divert_packet_find_tlv(mbuf_t packet, int offset, uint8_t type, int *err, int next) { - size_t cursor = offset; - int error = 0; - uint32_t curr_length; - uint8_t curr_type; + size_t cursor = offset; + int error = 0; + uint32_t curr_length; + uint8_t curr_type; *err = 0; @@ -457,9 +456,9 @@ flow_divert_packet_find_tlv(mbuf_t packet, int offset, uint8_t type, int *err, i static int flow_divert_packet_get_tlv(mbuf_t packet, int offset, uint8_t type, size_t buff_len, void *buff, uint32_t *val_size) { - int error = 0; - uint32_t length; - int tlv_offset; + int error = 0; + uint32_t length; + int tlv_offset; tlv_offset = flow_divert_packet_find_tlv(packet, offset, type, &error, 0); if (tlv_offset < 0) { @@ -491,7 +490,7 @@ flow_divert_packet_get_tlv(mbuf_t packet, int offset, uint8_t type, size_t buff_ static int flow_divert_packet_compute_hmac(mbuf_t packet, struct flow_divert_group *group, uint8_t *hmac) { - mbuf_t curr_mbuf = packet; + mbuf_t curr_mbuf = packet; if (g_crypto_funcs == NULL || group->token_key == NULL) { return ENOPROTOOPT; @@ -513,12 +512,12 @@ flow_divert_packet_compute_hmac(mbuf_t packet, struct flow_divert_group *group, static int flow_divert_packet_verify_hmac(mbuf_t packet, uint32_t ctl_unit) { - int error = 0; - struct flow_divert_group *group = NULL; - int hmac_offset; - uint8_t packet_hmac[SHA_DIGEST_LENGTH]; - uint8_t computed_hmac[SHA_DIGEST_LENGTH]; - mbuf_t tail; + int error = 0; + struct flow_divert_group *group = NULL; + int hmac_offset; + uint8_t packet_hmac[SHA_DIGEST_LENGTH]; + uint8_t computed_hmac[SHA_DIGEST_LENGTH]; + mbuf_t tail; lck_rw_lock_shared(&g_flow_divert_group_lck); @@ -581,7 +580,7 @@ flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, int data_len, Boo Boolean cell = FALSE; Boolean wifi = FALSE; Boolean wired = FALSE; - + inp = sotoinpcb(fd_cb->so); if (inp == NULL) { return; @@ -593,7 +592,7 @@ flow_divert_add_data_statistics(struct flow_divert_pcb *fd_cb, int data_len, Boo wifi = (!cell && IFNET_IS_WIFI(ifp)); wired = (!wifi && IFNET_IS_WIRED(ifp)); } - + if (send) { INP_ADD_STAT(inp, cell, wifi, wired, txpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, txbytes, data_len); @@ -611,9 +610,10 @@ flow_divert_check_no_cellular(struct flow_divert_pcb *fd_cb) inp = sotoinpcb(fd_cb->so); if (inp && INP_NO_CELLULAR(inp) && inp->inp_last_outifp && - IFNET_IS_CELLULAR(inp->inp_last_outifp)) + IFNET_IS_CELLULAR(inp->inp_last_outifp)) { return EHOSTUNREACH; - + } + return 0; } @@ -624,9 +624,10 @@ flow_divert_check_no_expensive(struct flow_divert_pcb *fd_cb) inp = sotoinpcb(fd_cb->so); if (inp && INP_NO_EXPENSIVE(inp) && inp->inp_last_outifp && - IFNET_IS_EXPENSIVE(inp->inp_last_outifp)) + IFNET_IS_EXPENSIVE(inp->inp_last_outifp)) { return EHOSTUNREACH; - + } + return 0; } @@ -706,10 +707,12 @@ flow_divert_trie_insert(struct flow_divert_trie *trie, uint16_t string_start, si current_end = TRIE_NODE(trie, current).start + TRIE_NODE(trie, current).length; for (node_idx = TRIE_NODE(trie, current).start; - node_idx < current_end && - string_idx < string_end && - TRIE_BYTE(trie, node_idx) == TRIE_BYTE(trie, string_idx); - node_idx++, string_idx++); + node_idx < current_end && + string_idx < string_end && + TRIE_BYTE(trie, node_idx) == TRIE_BYTE(trie, string_idx); + node_idx++, string_idx++) { + ; + } string_remainder = string_end - string_idx; @@ -799,7 +802,7 @@ flow_divert_trie_insert(struct flow_divert_trie *trie, uint16_t string_start, si return current; } -#define APPLE_WEBCLIP_ID_PREFIX "com.apple.webapp" +#define APPLE_WEBCLIP_ID_PREFIX "com.apple.webapp" static uint16_t flow_divert_trie_search(struct flow_divert_trie *trie, uint8_t *string_bytes) { @@ -812,15 +815,17 @@ flow_divert_trie_search(struct flow_divert_trie *trie, uint8_t *string_bytes) uint16_t node_idx; for (node_idx = TRIE_NODE(trie, current).start; - node_idx < node_end && string_bytes[string_idx] != '\0' && string_bytes[string_idx] == TRIE_BYTE(trie, node_idx); - node_idx++, string_idx++); + node_idx < node_end && string_bytes[string_idx] != '\0' && string_bytes[string_idx] == TRIE_BYTE(trie, node_idx); + node_idx++, string_idx++) { + ; + } if (node_idx == node_end) { if (string_bytes[string_idx] == '\0') { return current; /* Got an exact match */ } else if (string_idx == strlen(APPLE_WEBCLIP_ID_PREFIX) && - 0 == strncmp((const char *)string_bytes, APPLE_WEBCLIP_ID_PREFIX, string_idx)) { - string_bytes[string_idx] = '\0'; + 0 == strncmp((const char *)string_bytes, APPLE_WEBCLIP_ID_PREFIX, string_idx)) { + string_bytes[string_idx] = '\0'; return current; /* Got an apple webclip id prefix match */ } else if (TRIE_NODE(trie, current).child_map != NULL_TRIE_IDX) { next = TRIE_CHILD(trie, current, string_bytes[string_idx]); @@ -952,7 +957,7 @@ flow_divert_get_src_proc(struct socket *so, proc_t *proc) static int flow_divert_send_packet(struct flow_divert_pcb *fd_cb, mbuf_t packet, Boolean enqueue) { - int error; + int error; if (fd_cb->group == NULL) { fd_cb->so->so_error = ECONNABORTED; @@ -987,13 +992,13 @@ flow_divert_send_packet(struct flow_divert_pcb *fd_cb, mbuf_t packet, Boolean en static int flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr *to, struct socket *so, proc_t p, mbuf_t *out_connect_packet) { - int error = 0; - int flow_type = 0; - char *signing_id = NULL; - int free_signing_id = 0; - mbuf_t connect_packet = NULL; - proc_t src_proc = p; - int release_proc = 0; + int error = 0; + int flow_type = 0; + char *signing_id = NULL; + int free_signing_id = 0; + mbuf_t connect_packet = NULL; + proc_t src_proc = p; + int release_proc = 0; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT, &connect_packet); if (error) { @@ -1021,7 +1026,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr release_proc = flow_divert_get_src_proc(so, &src_proc); if (src_proc != PROC_NULL) { proc_lock(src_proc); - if (src_proc->p_csflags & (CS_VALID|CS_DEBUGGED)) { + if (src_proc->p_csflags & (CS_VALID | CS_DEBUGGED)) { const char * cs_id; cs_id = cs_identity_get(src_proc); signing_id = __DECONST(char *, cs_id); @@ -1092,9 +1097,9 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr } error = flow_divert_packet_append_tlv(connect_packet, - FLOW_DIVERT_TLV_TRAFFIC_CLASS, - sizeof(fd_cb->so->so_traffic_class), - &fd_cb->so->so_traffic_class); + FLOW_DIVERT_TLV_TRAFFIC_CLASS, + sizeof(fd_cb->so->so_traffic_class), + &fd_cb->so->so_traffic_class); if (error) { goto done; } @@ -1108,9 +1113,9 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr goto done; } error = flow_divert_packet_append_tlv(connect_packet, - FLOW_DIVERT_TLV_FLOW_TYPE, - sizeof(flow_type), - &flow_type); + FLOW_DIVERT_TLV_FLOW_TYPE, + sizeof(flow_type), + &flow_type); if (error) { goto done; @@ -1118,33 +1123,33 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr if (fd_cb->so->so_flags & SOF_DELEGATED) { error = flow_divert_packet_append_tlv(connect_packet, - FLOW_DIVERT_TLV_PID, - sizeof(fd_cb->so->e_pid), - &fd_cb->so->e_pid); + FLOW_DIVERT_TLV_PID, + sizeof(fd_cb->so->e_pid), + &fd_cb->so->e_pid); if (error) { goto done; } error = flow_divert_packet_append_tlv(connect_packet, - FLOW_DIVERT_TLV_UUID, - sizeof(fd_cb->so->e_uuid), - &fd_cb->so->e_uuid); + FLOW_DIVERT_TLV_UUID, + sizeof(fd_cb->so->e_uuid), + &fd_cb->so->e_uuid); if (error) { goto done; } } else { error = flow_divert_packet_append_tlv(connect_packet, - FLOW_DIVERT_TLV_PID, - sizeof(fd_cb->so->e_pid), - &fd_cb->so->last_pid); + FLOW_DIVERT_TLV_PID, + sizeof(fd_cb->so->e_pid), + &fd_cb->so->last_pid); if (error) { goto done; } error = flow_divert_packet_append_tlv(connect_packet, - FLOW_DIVERT_TLV_UUID, - sizeof(fd_cb->so->e_uuid), - &fd_cb->so->last_uuid); + FLOW_DIVERT_TLV_UUID, + sizeof(fd_cb->so->e_uuid), + &fd_cb->so->last_uuid); if (error) { goto done; } @@ -1186,7 +1191,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr if (fd_cb->local_address != NULL) { /* socket is bound. */ error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR, - fd_cb->local_address->sa_len, fd_cb->local_address); + fd_cb->local_address->sa_len, fd_cb->local_address); if (error) { goto done; } @@ -1213,9 +1218,9 @@ done: static int flow_divert_send_connect_result(struct flow_divert_pcb *fd_cb) { - int error = 0; - mbuf_t packet = NULL; - int rbuff_space = 0; + int error = 0; + mbuf_t packet = NULL; + int rbuff_space = 0; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT_RESULT, &packet); if (error) { @@ -1229,9 +1234,9 @@ flow_divert_send_connect_result(struct flow_divert_pcb *fd_cb) } rbuff_space = htonl(rbuff_space); error = flow_divert_packet_append_tlv(packet, - FLOW_DIVERT_TLV_SPACE_AVAILABLE, - sizeof(rbuff_space), - &rbuff_space); + FLOW_DIVERT_TLV_SPACE_AVAILABLE, + sizeof(rbuff_space), + &rbuff_space); if (error) { goto done; } @@ -1252,9 +1257,9 @@ done: static int flow_divert_send_close(struct flow_divert_pcb *fd_cb, int how) { - int error = 0; - mbuf_t packet = NULL; - uint32_t zero = 0; + int error = 0; + mbuf_t packet = NULL; + uint32_t zero = 0; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CLOSE, &packet); if (error) { @@ -1291,9 +1296,8 @@ done: static int flow_divert_tunnel_how_closed(struct flow_divert_pcb *fd_cb) { - if ((fd_cb->flags & (FLOW_DIVERT_TUNNEL_RD_CLOSED|FLOW_DIVERT_TUNNEL_WR_CLOSED)) == - (FLOW_DIVERT_TUNNEL_RD_CLOSED|FLOW_DIVERT_TUNNEL_WR_CLOSED)) - { + if ((fd_cb->flags & (FLOW_DIVERT_TUNNEL_RD_CLOSED | FLOW_DIVERT_TUNNEL_WR_CLOSED)) == + (FLOW_DIVERT_TUNNEL_RD_CLOSED | FLOW_DIVERT_TUNNEL_WR_CLOSED)) { return SHUT_RDWR; } else if (fd_cb->flags & FLOW_DIVERT_TUNNEL_RD_CLOSED) { return SHUT_RD; @@ -1311,15 +1315,15 @@ flow_divert_tunnel_how_closed(struct flow_divert_pcb *fd_cb) static void flow_divert_send_close_if_needed(struct flow_divert_pcb *fd_cb) { - int how = -1; + int how = -1; /* Do not send any close messages if there is still data in the send buffer */ if (fd_cb->so->so_snd.sb_cc == 0) { - if ((fd_cb->flags & (FLOW_DIVERT_READ_CLOSED|FLOW_DIVERT_TUNNEL_RD_CLOSED)) == FLOW_DIVERT_READ_CLOSED) { + if ((fd_cb->flags & (FLOW_DIVERT_READ_CLOSED | FLOW_DIVERT_TUNNEL_RD_CLOSED)) == FLOW_DIVERT_READ_CLOSED) { /* Socket closed reads, but tunnel did not. Tell tunnel to close reads */ how = SHUT_RD; } - if ((fd_cb->flags & (FLOW_DIVERT_WRITE_CLOSED|FLOW_DIVERT_TUNNEL_WR_CLOSED)) == FLOW_DIVERT_WRITE_CLOSED) { + if ((fd_cb->flags & (FLOW_DIVERT_WRITE_CLOSED | FLOW_DIVERT_TUNNEL_WR_CLOSED)) == FLOW_DIVERT_WRITE_CLOSED) { /* Socket closed writes, but tunnel did not. Tell tunnel to close writes */ if (how == SHUT_RD) { how = SHUT_RDWR; @@ -1350,9 +1354,9 @@ flow_divert_send_close_if_needed(struct flow_divert_pcb *fd_cb) static errno_t flow_divert_send_data_packet(struct flow_divert_pcb *fd_cb, mbuf_t data, size_t data_len, struct sockaddr *toaddr, Boolean force) { - mbuf_t packet; - mbuf_t last; - int error = 0; + mbuf_t packet; + mbuf_t last; + int error = 0; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_DATA, &packet); if (error) { @@ -1389,10 +1393,10 @@ flow_divert_send_data_packet(struct flow_divert_pcb *fd_cb, mbuf_t data, size_t static void flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force) { - size_t to_send; - size_t sent = 0; - int error = 0; - mbuf_t buffer; + size_t to_send; + size_t sent = 0; + int error = 0; + mbuf_t buffer; to_send = fd_cb->so->so_snd.sb_cc; buffer = fd_cb->so->so_snd.sb_mb; @@ -1409,8 +1413,8 @@ flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force) if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) { while (sent < to_send) { - mbuf_t data; - size_t data_len; + mbuf_t data; + size_t data_len; data_len = to_send - sent; if (data_len > FLOW_DIVERT_CHUNK_SIZE) { @@ -1438,7 +1442,7 @@ flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force) mbuf_t m; size_t data_len; - while(buffer) { + while (buffer) { struct sockaddr *toaddr = flow_divert_get_buffered_target_address(buffer); m = buffer; @@ -1449,7 +1453,7 @@ flow_divert_send_buffered_data(struct flow_divert_pcb *fd_cb, Boolean force) if (m != NULL && m->m_type == MT_DATA) { break; } - } while(m); + } while (m); if (m == NULL) { /* unexpected */ FDLOG0(LOG_ERR, fd_cb, "failed to find type MT_DATA in the mbuf chain."); @@ -1492,23 +1496,23 @@ move_on: static int flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct sockaddr *toaddr) { - size_t to_send = mbuf_pkthdr_len(data); - int error = 0; + size_t to_send = mbuf_pkthdr_len(data); + int error = 0; if (to_send > fd_cb->send_window) { to_send = fd_cb->send_window; } if (fd_cb->so->so_snd.sb_cc > 0) { - to_send = 0; /* If the send buffer is non-empty, then we can't send anything */ + to_send = 0; /* If the send buffer is non-empty, then we can't send anything */ } if (SOCK_TYPE(fd_cb->so) == SOCK_STREAM) { - size_t sent = 0; - mbuf_t remaining_data = data; - mbuf_t pkt_data = NULL; + size_t sent = 0; + mbuf_t remaining_data = data; + mbuf_t pkt_data = NULL; while (sent < to_send && remaining_data != NULL) { - size_t pkt_data_len; + size_t pkt_data_len; pkt_data = remaining_data; @@ -1547,7 +1551,7 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc if (sbspace(&fd_cb->so->so_snd) > 0) { if (!sbappendstream(&fd_cb->so->so_snd, pkt_data)) { FDLOG(LOG_ERR, fd_cb, "sbappendstream failed with pkt_data, send buffer size = %u, send_window = %u\n", - fd_cb->so->so_snd.sb_cc, fd_cb->send_window); + fd_cb->so->so_snd.sb_cc, fd_cb->send_window); } } else { error = ENOBUFS; @@ -1558,7 +1562,7 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc if (sbspace(&fd_cb->so->so_snd) > 0) { if (!sbappendstream(&fd_cb->so->so_snd, remaining_data)) { FDLOG(LOG_ERR, fd_cb, "sbappendstream failed with remaining_data, send buffer size = %u, send_window = %u\n", - fd_cb->so->so_snd.sb_cc, fd_cb->send_window); + fd_cb->so->so_snd.sb_cc, fd_cb->send_window); } } else { error = ENOBUFS; @@ -1578,14 +1582,14 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc if (toaddr != NULL) { if (!sbappendaddr(&fd_cb->so->so_snd, toaddr, data, NULL, &error)) { FDLOG(LOG_ERR, fd_cb, - "sbappendaddr failed. send buffer size = %u, send_window = %u, error = %d\n", - fd_cb->so->so_snd.sb_cc, fd_cb->send_window, error); + "sbappendaddr failed. send buffer size = %u, send_window = %u, error = %d\n", + fd_cb->so->so_snd.sb_cc, fd_cb->send_window, error); } } else { if (!sbappendrecord(&fd_cb->so->so_snd, data)) { FDLOG(LOG_ERR, fd_cb, - "sbappendrecord failed. send buffer size = %u, send_window = %u, error = %d\n", - fd_cb->so->so_snd.sb_cc, fd_cb->send_window, error); + "sbappendrecord failed. send buffer size = %u, send_window = %u, error = %d\n", + fd_cb->so->so_snd.sb_cc, fd_cb->send_window, error); } } } else { @@ -1600,9 +1604,9 @@ flow_divert_send_app_data(struct flow_divert_pcb *fd_cb, mbuf_t data, struct soc static int flow_divert_send_read_notification(struct flow_divert_pcb *fd_cb, uint32_t read_count) { - int error = 0; - mbuf_t packet = NULL; - uint32_t net_read_count = htonl(read_count); + int error = 0; + mbuf_t packet = NULL; + uint32_t net_read_count = htonl(read_count); error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_READ_NOTIFY, &packet); if (error) { @@ -1632,8 +1636,8 @@ done: static int flow_divert_send_traffic_class_update(struct flow_divert_pcb *fd_cb, int traffic_class) { - int error = 0; - mbuf_t packet = NULL; + int error = 0; + mbuf_t packet = NULL; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_PROPERTIES_UPDATE, &packet); if (error) { @@ -1663,15 +1667,15 @@ done: static void flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset) { - uint32_t connect_error; - uint32_t ctl_unit = 0; - int error = 0; - struct flow_divert_group *grp = NULL; - struct sockaddr_storage local_address; - int out_if_index = 0; - struct sockaddr_storage remote_address; - uint32_t send_window; - uint32_t app_data_length = 0; + uint32_t connect_error; + uint32_t ctl_unit = 0; + int error = 0; + struct flow_divert_group *grp = NULL; + struct sockaddr_storage local_address; + int out_if_index = 0; + struct sockaddr_storage remote_address; + uint32_t send_window; + uint32_t app_data_length = 0; memset(&local_address, 0, sizeof(local_address)); memset(&remote_address, 0, sizeof(remote_address)); @@ -1716,8 +1720,8 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, } error = 0; - connect_error = ntohl(connect_error); - ctl_unit = ntohl(ctl_unit); + connect_error = ntohl(connect_error); + ctl_unit = ntohl(ctl_unit); lck_rw_lock_shared(&g_flow_divert_group_lck); @@ -1738,9 +1742,9 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, FDLOCK(fd_cb); if (fd_cb->so != NULL) { - struct inpcb *inp = NULL; - struct ifnet *ifp = NULL; - struct flow_divert_group *old_group; + struct inpcb *inp = NULL; + struct ifnet *ifp = NULL; + struct flow_divert_group *old_group; socket_lock(fd_cb->so, 0); @@ -1776,7 +1780,7 @@ flow_divert_handle_connect_result(struct flow_divert_pcb *fd_cb, mbuf_t packet, } if (app_data_length > 0) { - uint8_t *app_data = NULL; + uint8_t *app_data = NULL; MALLOC(app_data, uint8_t *, app_data_length, M_TEMP, M_WAITOK); if (app_data != NULL) { error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_APP_DATA, app_data_length, app_data, NULL); @@ -1868,9 +1872,9 @@ done: static void flow_divert_handle_close(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset) { - uint32_t close_error; - int error = 0; - int how; + uint32_t close_error; + int error = 0; + int how; error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_ERROR_CODE, sizeof(close_error), &close_error, NULL); if (error) { @@ -1895,7 +1899,7 @@ flow_divert_handle_close(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offse fd_cb->so->so_error = ntohl(close_error); flow_divert_update_closed_state(fd_cb, how, TRUE); - + how = flow_divert_tunnel_how_closed(fd_cb); if (how == SHUT_RDWR) { flow_divert_disconnect_socket(fd_cb->so); @@ -1915,20 +1919,26 @@ flow_divert_get_control_mbuf(struct flow_divert_pcb *fd_cb) { if (fd_cb->local_address != NULL) { struct inpcb *inp = sotoinpcb(fd_cb->so); - if (inp->inp_vflag & INP_IPV4 && inp->inp_flags & INP_RECVDSTADDR) { + if ((inp->inp_vflag & INP_IPV4) && + (inp->inp_flags & INP_RECVDSTADDR) && + fd_cb->local_address->sa_family == AF_INET && + fd_cb->local_address->sa_len >= sizeof(struct sockaddr_in)) { struct sockaddr_in *sin = (struct sockaddr_in *)(void *)fd_cb->local_address; return sbcreatecontrol((caddr_t) &sin->sin_addr, sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP); - } else if (inp->inp_vflag & INP_IPV6 && (inp->inp_flags & IN6P_PKTINFO) != 0) { + } else if ((inp->inp_vflag & INP_IPV6) && + (inp->inp_flags & IN6P_PKTINFO) && + fd_cb->local_address->sa_family == AF_INET6 && + fd_cb->local_address->sa_len >= sizeof(struct sockaddr_in6)) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)(void *)fd_cb->local_address; struct in6_pktinfo pi6; - bcopy(&sin6->sin6_addr, &pi6.ipi6_addr, sizeof (struct in6_addr)); + bcopy(&sin6->sin6_addr, &pi6.ipi6_addr, sizeof(struct in6_addr)); pi6.ipi6_ifindex = 0; - return sbcreatecontrol((caddr_t)&pi6, sizeof (struct in6_pktinfo), IPV6_PKTINFO, IPPROTO_IPV6); + return sbcreatecontrol((caddr_t)&pi6, sizeof(struct in6_pktinfo), IPV6_PKTINFO, IPPROTO_IPV6); } } - return (NULL); + return NULL; } static void @@ -1936,9 +1946,9 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off { FDLOCK(fd_cb); if (fd_cb->so != NULL) { - int error = 0; - mbuf_t data = NULL; - size_t data_size; + int error = 0; + mbuf_t data = NULL; + size_t data_size; struct sockaddr_storage remote_address; boolean_t got_remote_sa = FALSE; @@ -1970,9 +1980,8 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off if (error || data == NULL) { FDLOG(LOG_ERR, fd_cb, "mbuf_split failed: %d", error); } else { - if (flow_divert_check_no_cellular(fd_cb) || - flow_divert_check_no_expensive(fd_cb)) - { + if (flow_divert_check_no_cellular(fd_cb) || + flow_divert_check_no_expensive(fd_cb)) { flow_divert_update_closed_state(fd_cb, SHUT_RDWR, TRUE); flow_divert_send_close(fd_cb, SHUT_RDWR); flow_divert_disconnect_socket(fd_cb->so); @@ -1993,10 +2002,10 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off if (got_remote_sa == TRUE) { error = flow_divert_dup_addr(fd_cb->so->so_proto->pr_domain->dom_family, - (struct sockaddr *)&remote_address, &append_sa); + (struct sockaddr *)&remote_address, &append_sa); } else { error = flow_divert_dup_addr(fd_cb->so->so_proto->pr_domain->dom_family, - fd_cb->remote_address, &append_sa); + fd_cb->remote_address, &append_sa); } if (error) { FDLOG0(LOG_ERR, fd_cb, "failed to dup the socket address."); @@ -2026,8 +2035,8 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off static void flow_divert_handle_read_notification(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset) { - uint32_t read_count; - int error = 0; + uint32_t read_count; + int error = 0; error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_READ_COUNT, sizeof(read_count), &read_count, NULL); if (error) { @@ -2096,11 +2105,11 @@ flow_divert_handle_group_init(struct flow_divert_group *group, mbuf_t packet, in static void flow_divert_handle_properties_update(struct flow_divert_pcb *fd_cb, mbuf_t packet, int offset) { - int error = 0; - struct sockaddr_storage local_address; - int out_if_index = 0; - struct sockaddr_storage remote_address; - uint32_t app_data_length = 0; + int error = 0; + struct sockaddr_storage local_address; + int out_if_index = 0; + struct sockaddr_storage remote_address; + uint32_t app_data_length = 0; FDLOG0(LOG_INFO, fd_cb, "received a properties update"); @@ -2171,7 +2180,7 @@ flow_divert_handle_properties_update(struct flow_divert_pcb *fd_cb, mbuf_t packe } if (app_data_length > 0) { - uint8_t *app_data = NULL; + uint8_t *app_data = NULL; MALLOC(app_data, uint8_t *, app_data_length, M_TEMP, M_WAITOK); if (app_data != NULL) { error = flow_divert_packet_get_tlv(packet, offset, FLOW_DIVERT_TLV_APP_DATA, app_data_length, app_data, NULL); @@ -2230,9 +2239,8 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet /* Compute the number of signing IDs and the total amount of bytes needed to store them */ for (cursor = flow_divert_packet_find_tlv(packet, offset, FLOW_DIVERT_TLV_SIGNING_ID, &error, 0); - cursor >= 0; - cursor = flow_divert_packet_find_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, &error, 1)) - { + cursor >= 0; + cursor = flow_divert_packet_find_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, &error, 1)) { uint32_t sid_size = 0; flow_divert_packet_get_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, 0, NULL, &sid_size); new_trie.bytes_count += sid_size; @@ -2248,7 +2256,7 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet new_trie.child_maps_count = (prefix_count + 1); /* + 1 for the root node */ FDLOG(LOG_INFO, &nil_pcb, "Nodes count = %lu, child maps count = %lu, bytes_count = %lu", - new_trie.nodes_count, new_trie.child_maps_count, new_trie.bytes_count); + new_trie.nodes_count, new_trie.child_maps_count, new_trie.bytes_count); nodes_mem_size = (sizeof(*new_trie.nodes) * new_trie.nodes_count); child_maps_mem_size = (sizeof(*new_trie.child_maps) * CHILD_MAP_SIZE * new_trie.child_maps_count); @@ -2264,7 +2272,7 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet MALLOC(new_trie.memory, void *, trie_memory_size, M_TEMP, M_WAITOK); if (new_trie.memory == NULL) { FDLOG(LOG_ERR, &nil_pcb, "Failed to allocate %lu bytes of memory for the signing ID trie", - nodes_mem_size + child_maps_mem_size + bytes_mem_size); + nodes_mem_size + child_maps_mem_size + bytes_mem_size); lck_rw_done(&group->lck); return; } @@ -2286,9 +2294,8 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet /* Add each signing ID to the trie */ for (cursor = flow_divert_packet_find_tlv(packet, offset, FLOW_DIVERT_TLV_SIGNING_ID, &error, 0); - cursor >= 0; - cursor = flow_divert_packet_find_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, &error, 1)) - { + cursor >= 0; + cursor = flow_divert_packet_find_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, &error, 1)) { uint32_t sid_size = 0; flow_divert_packet_get_tlv(packet, cursor, FLOW_DIVERT_TLV_SIGNING_ID, 0, NULL, &sid_size); if (new_trie.bytes_free_next + sid_size <= new_trie.bytes_count) { @@ -2318,9 +2325,9 @@ flow_divert_handle_app_map_create(struct flow_divert_group *group, mbuf_t packet static int flow_divert_input(mbuf_t packet, struct flow_divert_group *group) { - struct flow_divert_packet_header hdr; - int error = 0; - struct flow_divert_pcb *fd_cb; + struct flow_divert_packet_header hdr; + int error = 0; + struct flow_divert_pcb *fd_cb; if (mbuf_pkthdr_len(packet) < sizeof(hdr)) { FDLOG(LOG_ERR, &nil_pcb, "got a bad packet, length (%lu) < sizeof hdr (%lu)", mbuf_pkthdr_len(packet), sizeof(hdr)); @@ -2345,20 +2352,20 @@ flow_divert_input(mbuf_t packet, struct flow_divert_group *group) if (hdr.conn_id == 0) { switch (hdr.packet_type) { - case FLOW_DIVERT_PKT_GROUP_INIT: - flow_divert_handle_group_init(group, packet, sizeof(hdr)); - break; - case FLOW_DIVERT_PKT_APP_MAP_CREATE: - flow_divert_handle_app_map_create(group, packet, sizeof(hdr)); - break; - default: - FDLOG(LOG_WARNING, &nil_pcb, "got an unknown message type: %d", hdr.packet_type); - break; + case FLOW_DIVERT_PKT_GROUP_INIT: + flow_divert_handle_group_init(group, packet, sizeof(hdr)); + break; + case FLOW_DIVERT_PKT_APP_MAP_CREATE: + flow_divert_handle_app_map_create(group, packet, sizeof(hdr)); + break; + default: + FDLOG(LOG_WARNING, &nil_pcb, "got an unknown message type: %d", hdr.packet_type); + break; } goto done; } - fd_cb = flow_divert_pcb_lookup(hdr.conn_id, group); /* This retains the PCB */ + fd_cb = flow_divert_pcb_lookup(hdr.conn_id, group); /* This retains the PCB */ if (fd_cb == NULL) { if (hdr.packet_type != FLOW_DIVERT_PKT_CLOSE && hdr.packet_type != FLOW_DIVERT_PKT_READ_NOTIFY) { FDLOG(LOG_NOTICE, &nil_pcb, "got a %s message from group %d for an unknown pcb: %u", flow_divert_packet_type2str(hdr.packet_type), group->ctl_unit, hdr.conn_id); @@ -2367,24 +2374,24 @@ flow_divert_input(mbuf_t packet, struct flow_divert_group *group) } switch (hdr.packet_type) { - case FLOW_DIVERT_PKT_CONNECT_RESULT: - flow_divert_handle_connect_result(fd_cb, packet, sizeof(hdr)); - break; - case FLOW_DIVERT_PKT_CLOSE: - flow_divert_handle_close(fd_cb, packet, sizeof(hdr)); - break; - case FLOW_DIVERT_PKT_DATA: - flow_divert_handle_data(fd_cb, packet, sizeof(hdr)); - break; - case FLOW_DIVERT_PKT_READ_NOTIFY: - flow_divert_handle_read_notification(fd_cb, packet, sizeof(hdr)); - break; - case FLOW_DIVERT_PKT_PROPERTIES_UPDATE: - flow_divert_handle_properties_update(fd_cb, packet, sizeof(hdr)); - break; - default: - FDLOG(LOG_WARNING, fd_cb, "got an unknown message type: %d", hdr.packet_type); - break; + case FLOW_DIVERT_PKT_CONNECT_RESULT: + flow_divert_handle_connect_result(fd_cb, packet, sizeof(hdr)); + break; + case FLOW_DIVERT_PKT_CLOSE: + flow_divert_handle_close(fd_cb, packet, sizeof(hdr)); + break; + case FLOW_DIVERT_PKT_DATA: + flow_divert_handle_data(fd_cb, packet, sizeof(hdr)); + break; + case FLOW_DIVERT_PKT_READ_NOTIFY: + flow_divert_handle_read_notification(fd_cb, packet, sizeof(hdr)); + break; + case FLOW_DIVERT_PKT_PROPERTIES_UPDATE: + flow_divert_handle_properties_update(fd_cb, packet, sizeof(hdr)); + break; + default: + FDLOG(LOG_WARNING, fd_cb, "got an unknown message type: %d", hdr.packet_type); + break; } FDRELEASE(fd_cb); @@ -2397,8 +2404,8 @@ done: static void flow_divert_close_all(struct flow_divert_group *group) { - struct flow_divert_pcb *fd_cb; - SLIST_HEAD(, flow_divert_pcb) tmp_list; + struct flow_divert_pcb *fd_cb; + SLIST_HEAD(, flow_divert_pcb) tmp_list; SLIST_INIT(&tmp_list); @@ -2433,7 +2440,7 @@ flow_divert_close_all(struct flow_divert_group *group) void flow_divert_detach(struct socket *so) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -2458,13 +2465,13 @@ flow_divert_detach(struct socket *so) FDUNLOCK(fd_cb); socket_lock(so, 0); - FDRELEASE(fd_cb); /* Release the socket's reference */ + FDRELEASE(fd_cb); /* Release the socket's reference */ } static int flow_divert_close(struct socket *so) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -2490,16 +2497,16 @@ flow_divert_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid __unused) { if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { - return (EINVAL); + return EINVAL; } - return (flow_divert_close(so)); + return flow_divert_close(so); } static int flow_divert_shutdown(struct socket *so) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -2516,9 +2523,9 @@ flow_divert_shutdown(struct socket *so) static int flow_divert_rcvd(struct socket *so, int flags __unused) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - uint32_t latest_sb_size; - uint32_t read_count; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + uint32_t latest_sb_size; + uint32_t read_count; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -2526,7 +2533,7 @@ flow_divert_rcvd(struct socket *so, int flags __unused) if (fd_cb->sb_size < latest_sb_size) { panic("flow divert rcvd event handler (%u): saved rcv buffer size (%u) is less than latest rcv buffer size (%u)", - fd_cb->hash, fd_cb->sb_size, latest_sb_size); + fd_cb->hash, fd_cb->sb_size, latest_sb_size); } read_count = fd_cb->sb_size - latest_sb_size; @@ -2585,22 +2592,21 @@ flow_divert_get_buffered_target_address(mbuf_t buffer) static boolean_t flow_divert_is_sockaddr_valid(struct sockaddr *addr) { - switch(addr->sa_family) - { - case AF_INET: - if (addr->sa_len != sizeof(struct sockaddr_in)) { - return FALSE; - } - break; + switch (addr->sa_family) { + case AF_INET: + if (addr->sa_len != sizeof(struct sockaddr_in)) { + return FALSE; + } + break; #if INET6 - case AF_INET6: - if (addr->sa_len != sizeof(struct sockaddr_in6)) { - return FALSE; - } - break; -#endif /* INET6 */ - default: + case AF_INET6: + if (addr->sa_len != sizeof(struct sockaddr_in6)) { return FALSE; + } + break; +#endif /* INET6 */ + default: + return FALSE; } return TRUE; } @@ -2631,23 +2637,23 @@ flow_divert_inp_to_sockaddr(const struct inpcb *inp, struct sockaddr **local_soc if (*local_socket == NULL) { error = ENOBUFS; } - return (error); + return error; } static boolean_t flow_divert_has_pcb_local_address(const struct inpcb *inp) { - return (inp->inp_lport != 0 - && (inp->inp_laddr.s_addr != INADDR_ANY || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))); + return inp->inp_lport != 0 + && (inp->inp_laddr.s_addr != INADDR_ANY || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)); } static errno_t flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, - struct sockaddr **dup) + struct sockaddr **dup) { - int error = 0; - struct sockaddr *result; - struct sockaddr_storage ss; + int error = 0; + struct sockaddr *result; + struct sockaddr_storage ss; if (addr != NULL) { result = addr; @@ -2661,7 +2667,7 @@ flow_divert_dup_addr(sa_family_t family, struct sockaddr *addr, else if (ss.ss_family == AF_INET6) { ss.ss_len = sizeof(struct sockaddr_in6); } -#endif /* INET6 */ +#endif /* INET6 */ else { error = EINVAL; } @@ -2688,11 +2694,11 @@ flow_divert_disconnect_socket(struct socket *so) inp = sotoinpcb(so); if (inp != NULL) { #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ - in_pcbdetach(inp); + in_pcbdetach(inp); } } } @@ -2700,31 +2706,31 @@ flow_divert_disconnect_socket(struct socket *so) static errno_t flow_divert_getpeername(struct socket *so, struct sockaddr **sa) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); - return flow_divert_dup_addr(so->so_proto->pr_domain->dom_family, - fd_cb->remote_address, - sa); + return flow_divert_dup_addr(so->so_proto->pr_domain->dom_family, + fd_cb->remote_address, + sa); } static errno_t flow_divert_getsockaddr(struct socket *so, struct sockaddr **sa) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); - return flow_divert_dup_addr(so->so_proto->pr_domain->dom_family, - fd_cb->local_address, - sa); + return flow_divert_dup_addr(so->so_proto->pr_domain->dom_family, + fd_cb->local_address, + sa); } static errno_t flow_divert_ctloutput(struct socket *so, struct sockopt *sopt) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -2748,12 +2754,12 @@ flow_divert_ctloutput(struct socket *so, struct sockopt *sopt) errno_t flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - int error = 0; - struct inpcb *inp = sotoinpcb(so); - struct sockaddr_in *sinp; - mbuf_t connect_packet = NULL; - int do_send = 1; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + int error = 0; + struct inpcb *inp = sotoinpcb(so); + struct sockaddr_in *sinp; + mbuf_t connect_packet = NULL; + int do_send = 1; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -2848,7 +2854,7 @@ flow_divert_connectx_out_common(struct socket *so, struct sockaddr *dst, int error; if (inp == NULL) { - return (EINVAL); + return EINVAL; } VERIFY(dst != NULL); @@ -2889,10 +2895,10 @@ flow_divert_connectx_out_common(struct socket *so, struct sockaddr *dst, } if (error == 0 && pcid != NULL) { - *pcid = 1; /* there is only 1 connection for a TCP */ + *pcid = 1; /* there is only 1 connection for a TCP */ } - return (error); + return error; } static int @@ -2901,7 +2907,7 @@ flow_divert_connectx_out(struct socket *so, struct sockaddr *src __unused, sae_associd_t aid __unused, sae_connid_t *pcid, uint32_t flags __unused, void *arg __unused, uint32_t arglen __unused, struct uio *uio, user_ssize_t *bytes_written) { - return (flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written)); + return flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written); } #if INET6 @@ -2911,20 +2917,20 @@ flow_divert_connectx6_out(struct socket *so, struct sockaddr *src __unused, sae_associd_t aid __unused, sae_connid_t *pcid, uint32_t flags __unused, void *arg __unused, uint32_t arglen __unused, struct uio *uio, user_ssize_t *bytes_written) { - return (flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written)); + return flow_divert_connectx_out_common(so, dst, p, pcid, uio, bytes_written); } #endif /* INET6 */ static int flow_divert_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, - uint32_t *ifindex, int32_t *soerror, user_addr_t src, socklen_t *src_len, - user_addr_t dst, socklen_t *dst_len, uint32_t *aux_type, - user_addr_t aux_data __unused, uint32_t *aux_len) + uint32_t *ifindex, int32_t *soerror, user_addr_t src, socklen_t *src_len, + user_addr_t dst, socklen_t *dst_len, uint32_t *aux_type, + user_addr_t aux_data __unused, uint32_t *aux_len) { - int error = 0; - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - struct ifnet *ifp = NULL; - struct inpcb *inp = sotoinpcb(so); + int error = 0; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct ifnet *ifp = NULL; + struct inpcb *inp = sotoinpcb(so); VERIFY((so->so_flags & SOF_FLOW_DIVERT)); @@ -3004,36 +3010,36 @@ flow_divert_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *i int error = 0; switch (cmd) { - case SIOCGCONNINFO32: { - struct so_cinforeq32 cifr; - bcopy(data, &cifr, sizeof (cifr)); - error = flow_divert_getconninfo(so, cifr.scir_cid, &cifr.scir_flags, - &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, - &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, - &cifr.scir_aux_type, cifr.scir_aux_data, - &cifr.scir_aux_len); - if (error == 0) { - bcopy(&cifr, data, sizeof (cifr)); - } - break; + case SIOCGCONNINFO32: { + struct so_cinforeq32 cifr; + bcopy(data, &cifr, sizeof(cifr)); + error = flow_divert_getconninfo(so, cifr.scir_cid, &cifr.scir_flags, + &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, + &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, + &cifr.scir_aux_type, cifr.scir_aux_data, + &cifr.scir_aux_len); + if (error == 0) { + bcopy(&cifr, data, sizeof(cifr)); } + break; + } - case SIOCGCONNINFO64: { - struct so_cinforeq64 cifr; - bcopy(data, &cifr, sizeof (cifr)); - error = flow_divert_getconninfo(so, cifr.scir_cid, &cifr.scir_flags, - &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, - &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, - &cifr.scir_aux_type, cifr.scir_aux_data, - &cifr.scir_aux_len); - if (error == 0) { - bcopy(&cifr, data, sizeof (cifr)); - } - break; + case SIOCGCONNINFO64: { + struct so_cinforeq64 cifr; + bcopy(data, &cifr, sizeof(cifr)); + error = flow_divert_getconninfo(so, cifr.scir_cid, &cifr.scir_flags, + &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, + &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, + &cifr.scir_aux_type, cifr.scir_aux_data, + &cifr.scir_aux_len); + if (error == 0) { + bcopy(&cifr, data, sizeof(cifr)); } + break; + } - default: - error = EOPNOTSUPP; + default: + error = EOPNOTSUPP; } return error; @@ -3066,8 +3072,8 @@ flow_divert_in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifne static errno_t flow_divert_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - int error = 0; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + int error = 0; struct inpcb *inp; VERIFY((so->so_flags & SOF_FLOW_DIVERT) && so->so_fd_pcb != NULL); @@ -3087,8 +3093,8 @@ flow_divert_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr error = EINVAL; goto done; /* We don't support OOB data */ } - - error = flow_divert_check_no_cellular(fd_cb) || + + error = flow_divert_check_no_cellular(fd_cb) || flow_divert_check_no_expensive(fd_cb); if (error) { goto done; @@ -3135,7 +3141,7 @@ done: static int flow_divert_preconnect(struct socket *so) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; int error = 0; if (!(fd_cb->flags & FLOW_DIVERT_CONNECT_STARTED) && fd_cb->connect_packet != NULL) { @@ -3167,32 +3173,32 @@ flow_divert_set_protosw(struct socket *so) else { so->so_proto = (struct protosw *)&g_flow_divert_in6_protosw; } -#endif /* INET6 */ +#endif /* INET6 */ } static void flow_divert_set_udp_protosw(struct socket *so) { - so->so_flags |= SOF_FLOW_DIVERT; - if (SOCK_DOM(so) == PF_INET) { - so->so_proto = &g_flow_divert_in_udp_protosw; - } + so->so_flags |= SOF_FLOW_DIVERT; + if (SOCK_DOM(so) == PF_INET) { + so->so_proto = &g_flow_divert_in_udp_protosw; + } #if INET6 - else { - so->so_proto = (struct protosw *)&g_flow_divert_in6_udp_protosw; - } + else { + so->so_proto = (struct protosw *)&g_flow_divert_in6_udp_protosw; + } #endif /* INET6 */ } static errno_t flow_divert_attach(struct socket *so, uint32_t flow_id, uint32_t ctl_unit) { - int error = 0; - struct flow_divert_pcb *fd_cb = NULL; - struct ifnet *ifp = NULL; - struct inpcb *inp = NULL; - struct socket *old_so; - mbuf_t recv_data = NULL; + int error = 0; + struct flow_divert_pcb *fd_cb = NULL; + struct ifnet *ifp = NULL; + struct inpcb *inp = NULL; + struct socket *old_so; + mbuf_t recv_data = NULL; socket_unlock(so, 0); @@ -3218,7 +3224,7 @@ flow_divert_attach(struct socket *so, uint32_t flow_id, uint32_t ctl_unit) /* Dis-associate the flow divert control block from its current socket */ old_so = fd_cb->so; - inp = sotoinpcb(old_so); + inp = sotoinpcb(old_so); VERIFY(inp != NULL); @@ -3262,7 +3268,7 @@ done: socket_lock(so, 0); if (fd_cb != NULL) { - FDRELEASE(fd_cb); /* Release the reference obtained via flow_divert_pcb_lookup */ + FDRELEASE(fd_cb); /* Release the reference obtained via flow_divert_pcb_lookup */ } return error; @@ -3271,39 +3277,39 @@ done: errno_t flow_divert_implicit_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p) { - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; struct inpcb *inp; - int error = 0; + int error = 0; inp = sotoinpcb(so); if (inp == NULL) { - return (EINVAL); - } - - if (fd_cb == NULL) { - uint32_t fd_ctl_unit = necp_socket_get_flow_divert_control_unit(inp); - if (fd_ctl_unit > 0) { - error = flow_divert_pcb_init(so, fd_ctl_unit); - fd_cb = so->so_fd_pcb; - if (error != 0 || fd_cb == NULL) { - goto done; - } - } else { - error = ENETDOWN; - goto done; - } - } - return flow_divert_data_out(so, flags, data, to, control, p); + return EINVAL; + } + + if (fd_cb == NULL) { + uint32_t fd_ctl_unit = necp_socket_get_flow_divert_control_unit(inp); + if (fd_ctl_unit > 0) { + error = flow_divert_pcb_init(so, fd_ctl_unit); + fd_cb = so->so_fd_pcb; + if (error != 0 || fd_cb == NULL) { + goto done; + } + } else { + error = ENETDOWN; + goto done; + } + } + return flow_divert_data_out(so, flags, data, to, control, p); done: - if (data) { - mbuf_freem(data); - } - if (control) { - mbuf_free(control); - } + if (data) { + mbuf_freem(data); + } + if (control) { + mbuf_free(control); + } - return error; + return error; } errno_t @@ -3315,7 +3321,7 @@ flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit) if (so->so_flags & SOF_FLOW_DIVERT) { return EALREADY; } - + fd_cb = flow_divert_pcb_create(so); if (fd_cb != NULL) { error = flow_divert_pcb_insert(fd_cb, ctl_unit); @@ -3344,12 +3350,12 @@ flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit) errno_t flow_divert_token_set(struct socket *so, struct sockopt *sopt) { - uint32_t ctl_unit = 0; - uint32_t key_unit = 0; - uint32_t flow_id = 0; - int error = 0; - int hmac_error = 0; - mbuf_t token = NULL; + uint32_t ctl_unit = 0; + uint32_t key_unit = 0; + uint32_t flow_id = 0; + int error = 0; + int hmac_error = 0; + mbuf_t token = NULL; if (so->so_flags & SOF_FLOW_DIVERT) { error = EALREADY; @@ -3366,10 +3372,9 @@ flow_divert_token_set(struct socket *so, struct sockopt *sopt) (SOCK_PROTO(so) != IPPROTO_TCP && SOCK_PROTO(so) != IPPROTO_UDP) || (SOCK_DOM(so) != PF_INET #if INET6 - && SOCK_DOM(so) != PF_INET6 + && SOCK_DOM(so) != PF_INET6 #endif - )) - { + )) { error = EINVAL; goto done; } else { @@ -3444,7 +3449,7 @@ flow_divert_token_set(struct socket *so, struct sockopt *sopt) int log_level = LOG_NOTICE; error = flow_divert_packet_get_tlv(token, 0, FLOW_DIVERT_TLV_LOG_LEVEL, - sizeof(log_level), &log_level, NULL); + sizeof(log_level), &log_level, NULL); if (error == 0) { fd_cb->log_level = log_level; } @@ -3475,12 +3480,12 @@ done: errno_t flow_divert_token_get(struct socket *so, struct sockopt *sopt) { - uint32_t ctl_unit; - int error = 0; - uint8_t hmac[SHA_DIGEST_LENGTH]; - struct flow_divert_pcb *fd_cb = so->so_fd_pcb; - mbuf_t token = NULL; - struct flow_divert_group *control_group = NULL; + uint32_t ctl_unit; + int error = 0; + uint8_t hmac[SHA_DIGEST_LENGTH]; + struct flow_divert_pcb *fd_cb = so->so_fd_pcb; + mbuf_t token = NULL; + struct flow_divert_group *control_group = NULL; if (!(so->so_flags & SOF_FLOW_DIVERT)) { error = EINVAL; @@ -3523,8 +3528,7 @@ flow_divert_token_get(struct socket *so, struct sockopt *sopt) lck_rw_lock_shared(&g_flow_divert_group_lck); if (g_flow_divert_groups != NULL && g_active_group_count > 0 && - fd_cb->control_group_unit > 0 && fd_cb->control_group_unit < GROUP_COUNT_MAX) - { + fd_cb->control_group_unit > 0 && fd_cb->control_group_unit < GROUP_COUNT_MAX) { control_group = g_flow_divert_groups[fd_cb->control_group_unit]; } @@ -3560,7 +3564,7 @@ flow_divert_token_get(struct socket *so, struct sockopt *sopt) error = soopt_mcopyout(sopt, token); if (error) { - token = NULL; /* For some reason, soopt_mcopyout() frees the mbuf if it fails */ + token = NULL; /* For some reason, soopt_mcopyout() frees the mbuf if it fails */ goto done; } @@ -3575,8 +3579,8 @@ done: static errno_t flow_divert_kctl_connect(kern_ctl_ref kctlref __unused, struct sockaddr_ctl *sac, void **unitinfo) { - struct flow_divert_group *new_group = NULL; - int error = 0; + struct flow_divert_group *new_group = NULL; + int error = 0; if (sac->sc_unit >= GROUP_COUNT_MAX) { error = EINVAL; @@ -3603,10 +3607,10 @@ flow_divert_kctl_connect(kern_ctl_ref kctlref __unused, struct sockaddr_ctl *sac if (g_flow_divert_groups == NULL) { MALLOC(g_flow_divert_groups, - struct flow_divert_group **, - GROUP_COUNT_MAX * sizeof(struct flow_divert_group *), - M_TEMP, - M_WAITOK | M_ZERO); + struct flow_divert_group **, + GROUP_COUNT_MAX * sizeof(struct flow_divert_group *), + M_TEMP, + M_WAITOK | M_ZERO); } if (g_flow_divert_groups == NULL) { @@ -3632,8 +3636,8 @@ done: static errno_t flow_divert_kctl_disconnect(kern_ctl_ref kctlref __unused, uint32_t unit, void *unitinfo) { - struct flow_divert_group *group = NULL; - errno_t error = 0; + struct flow_divert_group *group = NULL; + errno_t error = 0; if (unit >= GROUP_COUNT_MAX) { return EINVAL; @@ -3645,7 +3649,7 @@ flow_divert_kctl_disconnect(kern_ctl_ref kctlref __unused, uint32_t unit, void * if (g_flow_divert_groups == NULL || g_active_group_count == 0) { panic("flow divert group %u is disconnecting, but no groups are active (groups = %p, active count = %u", unit, - g_flow_divert_groups, g_active_group_count); + g_flow_divert_groups, g_active_group_count); } group = g_flow_divert_groups[unit]; @@ -3696,11 +3700,11 @@ flow_divert_kctl_send(kern_ctl_ref kctlref __unused, uint32_t unit __unused, voi static void flow_divert_kctl_rcvd(kern_ctl_ref kctlref __unused, uint32_t unit __unused, void *unitinfo, int flags __unused) { - struct flow_divert_group *group = (struct flow_divert_group *)unitinfo; + struct flow_divert_group *group = (struct flow_divert_group *)unitinfo; if (!OSTestAndClear(GROUP_BIT_CTL_ENQUEUE_BLOCKED, &group->atomic_bits)) { - struct flow_divert_pcb *fd_cb; - SLIST_HEAD(, flow_divert_pcb) tmp_list; + struct flow_divert_pcb *fd_cb; + SLIST_HEAD(, flow_divert_pcb) tmp_list; lck_rw_lock_shared(&g_flow_divert_group_lck); lck_rw_lock_exclusive(&group->lck); @@ -3749,13 +3753,13 @@ flow_divert_kctl_rcvd(kern_ctl_ref kctlref __unused, uint32_t unit __unused, voi static int flow_divert_kctl_init(void) { - struct kern_ctl_reg ctl_reg; - int result; + struct kern_ctl_reg ctl_reg; + int result; memset(&ctl_reg, 0, sizeof(ctl_reg)); strlcpy(ctl_reg.ctl_name, FLOW_DIVERT_CONTROL_NAME, sizeof(ctl_reg.ctl_name)); - ctl_reg.ctl_name[sizeof(ctl_reg.ctl_name)-1] = '\0'; + ctl_reg.ctl_name[sizeof(ctl_reg.ctl_name) - 1] = '\0'; ctl_reg.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_EXTENDED; ctl_reg.ctl_sendsize = FD_CTL_SENDBUFF_SIZE; ctl_reg.ctl_recvsize = FD_CTL_RCVBUFF_SIZE; @@ -3838,10 +3842,10 @@ flow_divert_init(void) g_flow_divert_in_udp_protosw.pr_ctloutput = flow_divert_ctloutput; /* - * Socket filters shouldn't attach/detach to/from this protosw - * since pr_protosw is to be used instead, which points to the - * real protocol; if they do, it is a bug and we should panic. - */ + * Socket filters shouldn't attach/detach to/from this protosw + * since pr_protosw is to be used instead, which points to the + * real protocol; if they do, it is a bug and we should panic. + */ g_flow_divert_in_udp_protosw.pr_filter_head.tqh_first = (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef; g_flow_divert_in_udp_protosw.pr_filter_head.tqh_last = @@ -3904,15 +3908,15 @@ flow_divert_init(void) g_flow_divert_in6_udp_protosw.pr_usrreqs = &g_flow_divert_in6_udp_usrreqs; g_flow_divert_in6_udp_protosw.pr_ctloutput = flow_divert_ctloutput; /* - * Socket filters shouldn't attach/detach to/from this protosw - * since pr_protosw is to be used instead, which points to the - * real protocol; if they do, it is a bug and we should panic. - */ + * Socket filters shouldn't attach/detach to/from this protosw + * since pr_protosw is to be used instead, which points to the + * real protocol; if they do, it is a bug and we should panic. + */ g_flow_divert_in6_udp_protosw.pr_filter_head.tqh_first = (struct socket_filter *)(uintptr_t)0xdeadbeefdeadbeef; g_flow_divert_in6_udp_protosw.pr_filter_head.tqh_last = (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef; -#endif /* INET6 */ +#endif /* INET6 */ flow_divert_grp_attr = lck_grp_attr_alloc_init(); if (flow_divert_grp_attr == NULL) { diff --git a/bsd/netinet/flow_divert.h b/bsd/netinet/flow_divert.h index c430a3935..bc1b636a7 100644 --- a/bsd/netinet/flow_divert.h +++ b/bsd/netinet/flow_divert.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,34 +35,33 @@ struct flow_divert_group; struct flow_divert_trie_node; struct flow_divert_pcb { - decl_lck_mtx_data(, mtx); - socket_t so; - RB_ENTRY(flow_divert_pcb) rb_link; - uint32_t hash; - mbuf_t connect_token; - struct sockaddr *local_address; - struct sockaddr *remote_address; - uint32_t flags; - uint32_t send_window; - uint32_t sb_size; - struct flow_divert_group *group; - uint32_t control_group_unit; - int32_t ref_count; - uint32_t bytes_written_by_app; - uint32_t bytes_read_by_app; - uint32_t bytes_sent; - uint32_t bytes_received; - uint8_t log_level; - SLIST_ENTRY(flow_divert_pcb) tmp_list_entry; - mbuf_t connect_packet; - uint8_t *app_data; - size_t app_data_length; + decl_lck_mtx_data(, mtx); + socket_t so; + RB_ENTRY(flow_divert_pcb) rb_link; + uint32_t hash; + mbuf_t connect_token; + struct sockaddr *local_address; + struct sockaddr *remote_address; + uint32_t flags; + uint32_t send_window; + uint32_t sb_size; + struct flow_divert_group *group; + uint32_t control_group_unit; + int32_t ref_count; + uint32_t bytes_written_by_app; + uint32_t bytes_read_by_app; + uint32_t bytes_sent; + uint32_t bytes_received; + uint8_t log_level; + SLIST_ENTRY(flow_divert_pcb) tmp_list_entry; + mbuf_t connect_packet; + uint8_t *app_data; + size_t app_data_length; }; RB_HEAD(fd_pcb_tree, flow_divert_pcb); -struct flow_divert_trie -{ +struct flow_divert_trie { struct flow_divert_trie_node *nodes; uint16_t *child_maps; uint8_t *bytes; @@ -77,23 +76,23 @@ struct flow_divert_trie }; struct flow_divert_group { - decl_lck_rw_data(, lck); - struct fd_pcb_tree pcb_tree; - uint32_t ctl_unit; - uint8_t atomic_bits; - MBUFQ_HEAD(send_queue_head) send_queue; - uint8_t *token_key; - size_t token_key_size; - uint32_t flags; - struct flow_divert_trie signing_id_trie; + decl_lck_rw_data(, lck); + struct fd_pcb_tree pcb_tree; + uint32_t ctl_unit; + uint8_t atomic_bits; + MBUFQ_HEAD(send_queue_head) send_queue; + uint8_t *token_key; + size_t token_key_size; + uint32_t flags; + struct flow_divert_trie signing_id_trie; }; -void flow_divert_init(void); -void flow_divert_detach(struct socket *so); -errno_t flow_divert_token_set(struct socket *so, struct sockopt *sopt); -errno_t flow_divert_token_get(struct socket *so, struct sockopt *sopt); -errno_t flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit); -errno_t flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p); -errno_t flow_divert_implicit_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p); +void flow_divert_init(void); +void flow_divert_detach(struct socket *so); +errno_t flow_divert_token_set(struct socket *so, struct sockopt *sopt); +errno_t flow_divert_token_get(struct socket *so, struct sockopt *sopt); +errno_t flow_divert_pcb_init(struct socket *so, uint32_t ctl_unit); +errno_t flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p); +errno_t flow_divert_implicit_data_out(struct socket *so, int flags, mbuf_t data, struct sockaddr *to, mbuf_t control, struct proc *p); #endif /* __FLOW_DIVERT_H__ */ diff --git a/bsd/netinet/flow_divert_proto.h b/bsd/netinet/flow_divert_proto.h index 675444b77..84c39eb66 100644 --- a/bsd/netinet/flow_divert_proto.h +++ b/bsd/netinet/flow_divert_proto.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,70 +22,70 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef __FLOW_DIVERT_PROTO_H__ #define __FLOW_DIVERT_PROTO_H__ -#define FLOW_DIVERT_CONTROL_NAME "com.apple.flow-divert" +#define FLOW_DIVERT_CONTROL_NAME "com.apple.flow-divert" -#define FLOW_DIVERT_TLV_LENGTH_UINT32 1 +#define FLOW_DIVERT_TLV_LENGTH_UINT32 1 -#define FLOW_DIVERT_PKT_CONNECT 1 -#define FLOW_DIVERT_PKT_CONNECT_RESULT 2 -#define FLOW_DIVERT_PKT_DATA 3 -#define FLOW_DIVERT_PKT_CLOSE 4 -#define FLOW_DIVERT_PKT_READ_NOTIFY 5 -#define FLOW_DIVERT_PKT_GROUP_INIT 6 -#define FLOW_DIVERT_PKT_PROPERTIES_UPDATE 7 -#define FLOW_DIVERT_PKT_APP_MAP_CREATE 9 +#define FLOW_DIVERT_PKT_CONNECT 1 +#define FLOW_DIVERT_PKT_CONNECT_RESULT 2 +#define FLOW_DIVERT_PKT_DATA 3 +#define FLOW_DIVERT_PKT_CLOSE 4 +#define FLOW_DIVERT_PKT_READ_NOTIFY 5 +#define FLOW_DIVERT_PKT_GROUP_INIT 6 +#define FLOW_DIVERT_PKT_PROPERTIES_UPDATE 7 +#define FLOW_DIVERT_PKT_APP_MAP_CREATE 9 -#define FLOW_DIVERT_TLV_NIL 0 -#define FLOW_DIVERT_TLV_ERROR_CODE 5 -#define FLOW_DIVERT_TLV_HOW 7 -#define FLOW_DIVERT_TLV_READ_COUNT 8 -#define FLOW_DIVERT_TLV_SPACE_AVAILABLE 9 -#define FLOW_DIVERT_TLV_CTL_UNIT 10 -#define FLOW_DIVERT_TLV_LOCAL_ADDR 11 -#define FLOW_DIVERT_TLV_REMOTE_ADDR 12 -#define FLOW_DIVERT_TLV_OUT_IF_INDEX 13 -#define FLOW_DIVERT_TLV_TRAFFIC_CLASS 14 -#define FLOW_DIVERT_TLV_NO_CELLULAR 15 -#define FLOW_DIVERT_TLV_FLOW_ID 16 -#define FLOW_DIVERT_TLV_TOKEN_KEY 17 -#define FLOW_DIVERT_TLV_HMAC 18 -#define FLOW_DIVERT_TLV_KEY_UNIT 19 -#define FLOW_DIVERT_TLV_LOG_LEVEL 20 -#define FLOW_DIVERT_TLV_TARGET_HOSTNAME 21 -#define FLOW_DIVERT_TLV_TARGET_ADDRESS 22 -#define FLOW_DIVERT_TLV_TARGET_PORT 23 -#define FLOW_DIVERT_TLV_CDHASH 24 -#define FLOW_DIVERT_TLV_SIGNING_ID 25 -#define FLOW_DIVERT_TLV_PID 26 -#define FLOW_DIVERT_TLV_UUID 27 -#define FLOW_DIVERT_TLV_PREFIX_COUNT 28 -#define FLOW_DIVERT_TLV_FLAGS 29 +#define FLOW_DIVERT_TLV_NIL 0 +#define FLOW_DIVERT_TLV_ERROR_CODE 5 +#define FLOW_DIVERT_TLV_HOW 7 +#define FLOW_DIVERT_TLV_READ_COUNT 8 +#define FLOW_DIVERT_TLV_SPACE_AVAILABLE 9 +#define FLOW_DIVERT_TLV_CTL_UNIT 10 +#define FLOW_DIVERT_TLV_LOCAL_ADDR 11 +#define FLOW_DIVERT_TLV_REMOTE_ADDR 12 +#define FLOW_DIVERT_TLV_OUT_IF_INDEX 13 +#define FLOW_DIVERT_TLV_TRAFFIC_CLASS 14 +#define FLOW_DIVERT_TLV_NO_CELLULAR 15 +#define FLOW_DIVERT_TLV_FLOW_ID 16 +#define FLOW_DIVERT_TLV_TOKEN_KEY 17 +#define FLOW_DIVERT_TLV_HMAC 18 +#define FLOW_DIVERT_TLV_KEY_UNIT 19 +#define FLOW_DIVERT_TLV_LOG_LEVEL 20 +#define FLOW_DIVERT_TLV_TARGET_HOSTNAME 21 +#define FLOW_DIVERT_TLV_TARGET_ADDRESS 22 +#define FLOW_DIVERT_TLV_TARGET_PORT 23 +#define FLOW_DIVERT_TLV_CDHASH 24 +#define FLOW_DIVERT_TLV_SIGNING_ID 25 +#define FLOW_DIVERT_TLV_PID 26 +#define FLOW_DIVERT_TLV_UUID 27 +#define FLOW_DIVERT_TLV_PREFIX_COUNT 28 +#define FLOW_DIVERT_TLV_FLAGS 29 #define FLOW_DIVERT_TLV_FLOW_TYPE 30 -#define FLOW_DIVERT_TLV_APP_DATA 31 +#define FLOW_DIVERT_TLV_APP_DATA 31 #define FLOW_DIVERT_FLOW_TYPE_TCP 1 #define FLOW_DIVERT_FLOW_TYPE_UDP 3 -#define FLOW_DIVERT_CHUNK_SIZE 4096 +#define FLOW_DIVERT_CHUNK_SIZE 4096 -#define FLOW_DIVERT_TOKEN_GETOPT_MAX_SIZE 128 +#define FLOW_DIVERT_TOKEN_GETOPT_MAX_SIZE 128 -#define FLOW_DIVERT_TOKEN_FLAG_VALIDATED 0x0000001 -#define FLOW_DIVERT_TOKEN_FLAG_TFO 0x0000002 -#define FLOW_DIVERT_TOKEN_FLAG_MPTCP 0x0000004 +#define FLOW_DIVERT_TOKEN_FLAG_VALIDATED 0x0000001 +#define FLOW_DIVERT_TOKEN_FLAG_TFO 0x0000002 +#define FLOW_DIVERT_TOKEN_FLAG_MPTCP 0x0000004 -#define FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP 0x0000001 +#define FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP 0x0000001 struct flow_divert_packet_header { - uint8_t packet_type; - uint32_t conn_id; + uint8_t packet_type; + uint32_t conn_id; }; #endif /* __FLOW_DIVERT_PROTO_H__ */ diff --git a/bsd/netinet/icmp6.h b/bsd/netinet/icmp6.h index 7e19edab7..786869ff1 100644 --- a/bsd/netinet/icmp6.h +++ b/bsd/netinet/icmp6.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $KAME: icmp6.h,v 1.46 2001/04/27 15:09:48 itojun Exp $ */ @@ -95,139 +95,139 @@ #define _NETINET_ICMP6_H_ #include -#define ICMPV6_PLD_MAXLEN 1232 /* IPV6_MMTU - sizeof(struct ip6_hdr) - - sizeof(struct icmp6_hdr) */ +#define ICMPV6_PLD_MAXLEN 1232 /* IPV6_MMTU - sizeof(struct ip6_hdr) + * - sizeof(struct icmp6_hdr) */ struct icmp6_hdr { - u_int8_t icmp6_type; /* type field */ - u_int8_t icmp6_code; /* code field */ - u_int16_t icmp6_cksum; /* checksum field */ + u_int8_t icmp6_type; /* type field */ + u_int8_t icmp6_code; /* code field */ + u_int16_t icmp6_cksum; /* checksum field */ union { - u_int32_t icmp6_un_data32[1]; /* type-specific field */ - u_int16_t icmp6_un_data16[2]; /* type-specific field */ - u_int8_t icmp6_un_data8[4]; /* type-specific field */ + u_int32_t icmp6_un_data32[1]; /* type-specific field */ + u_int16_t icmp6_un_data16[2]; /* type-specific field */ + u_int8_t icmp6_un_data8[4]; /* type-specific field */ } icmp6_dataun; } __attribute__((__packed__)); -#define icmp6_data32 icmp6_dataun.icmp6_un_data32 -#define icmp6_data16 icmp6_dataun.icmp6_un_data16 -#define icmp6_data8 icmp6_dataun.icmp6_un_data8 -#define icmp6_pptr icmp6_data32[0] /* parameter prob */ -#define icmp6_mtu icmp6_data32[0] /* packet too big */ -#define icmp6_id icmp6_data16[0] /* echo request/reply */ -#define icmp6_seq icmp6_data16[1] /* echo request/reply */ -#define icmp6_maxdelay icmp6_data16[0] /* mcast group membership */ - -#define ICMP6_DST_UNREACH 1 /* dest unreachable, codes: */ -#define ICMP6_PACKET_TOO_BIG 2 /* packet too big */ -#define ICMP6_TIME_EXCEEDED 3 /* time exceeded, code: */ -#define ICMP6_PARAM_PROB 4 /* ip6 header bad */ - -#define ICMP6_ECHO_REQUEST 128 /* echo service */ -#define ICMP6_ECHO_REPLY 129 /* echo reply */ -#define MLD_LISTENER_QUERY 130 /* multicast listener query */ -#define MLD_LISTENER_REPORT 131 /* multicast listener report */ -#define MLD_LISTENER_DONE 132 /* multicast listener done */ +#define icmp6_data32 icmp6_dataun.icmp6_un_data32 +#define icmp6_data16 icmp6_dataun.icmp6_un_data16 +#define icmp6_data8 icmp6_dataun.icmp6_un_data8 +#define icmp6_pptr icmp6_data32[0] /* parameter prob */ +#define icmp6_mtu icmp6_data32[0] /* packet too big */ +#define icmp6_id icmp6_data16[0] /* echo request/reply */ +#define icmp6_seq icmp6_data16[1] /* echo request/reply */ +#define icmp6_maxdelay icmp6_data16[0] /* mcast group membership */ + +#define ICMP6_DST_UNREACH 1 /* dest unreachable, codes: */ +#define ICMP6_PACKET_TOO_BIG 2 /* packet too big */ +#define ICMP6_TIME_EXCEEDED 3 /* time exceeded, code: */ +#define ICMP6_PARAM_PROB 4 /* ip6 header bad */ + +#define ICMP6_ECHO_REQUEST 128 /* echo service */ +#define ICMP6_ECHO_REPLY 129 /* echo reply */ +#define MLD_LISTENER_QUERY 130 /* multicast listener query */ +#define MLD_LISTENER_REPORT 131 /* multicast listener report */ +#define MLD_LISTENER_DONE 132 /* multicast listener done */ #define MLD_LISTENER_REDUCTION MLD_LISTENER_DONE /* RFC3542 definition */ /* RFC2292 decls */ -#define ICMP6_MEMBERSHIP_QUERY 130 /* group membership query */ -#define ICMP6_MEMBERSHIP_REPORT 131 /* group membership report */ -#define ICMP6_MEMBERSHIP_REDUCTION 132 /* group membership termination */ +#define ICMP6_MEMBERSHIP_QUERY 130 /* group membership query */ +#define ICMP6_MEMBERSHIP_REPORT 131 /* group membership report */ +#define ICMP6_MEMBERSHIP_REDUCTION 132 /* group membership termination */ #ifndef KERNEL /* the followings are for backward compatibility to old KAME apps. */ -#define MLD6_LISTENER_QUERY MLD_LISTENER_QUERY -#define MLD6_LISTENER_REPORT MLD_LISTENER_REPORT -#define MLD6_LISTENER_DONE MLD_LISTENER_DONE +#define MLD6_LISTENER_QUERY MLD_LISTENER_QUERY +#define MLD6_LISTENER_REPORT MLD_LISTENER_REPORT +#define MLD6_LISTENER_DONE MLD_LISTENER_DONE #endif -#define ND_ROUTER_SOLICIT 133 /* router solicitation */ -#define ND_ROUTER_ADVERT 134 /* router advertisement */ -#define ND_NEIGHBOR_SOLICIT 135 /* neighbor solicitation */ -#define ND_NEIGHBOR_ADVERT 136 /* neighbor advertisement */ -#define ND_REDIRECT 137 /* redirect */ +#define ND_ROUTER_SOLICIT 133 /* router solicitation */ +#define ND_ROUTER_ADVERT 134 /* router advertisement */ +#define ND_NEIGHBOR_SOLICIT 135 /* neighbor solicitation */ +#define ND_NEIGHBOR_ADVERT 136 /* neighbor advertisement */ +#define ND_REDIRECT 137 /* redirect */ -#define ICMP6_ROUTER_RENUMBERING 138 /* router renumbering */ +#define ICMP6_ROUTER_RENUMBERING 138 /* router renumbering */ -#define ICMP6_WRUREQUEST 139 /* who are you request */ -#define ICMP6_WRUREPLY 140 /* who are you reply */ -#define ICMP6_FQDN_QUERY 139 /* FQDN query */ -#define ICMP6_FQDN_REPLY 140 /* FQDN reply */ -#define ICMP6_NI_QUERY 139 /* node information request */ -#define ICMP6_NI_REPLY 140 /* node information reply */ -#define MLDV2_LISTENER_REPORT 143 /* RFC3810 listener report */ +#define ICMP6_WRUREQUEST 139 /* who are you request */ +#define ICMP6_WRUREPLY 140 /* who are you reply */ +#define ICMP6_FQDN_QUERY 139 /* FQDN query */ +#define ICMP6_FQDN_REPLY 140 /* FQDN reply */ +#define ICMP6_NI_QUERY 139 /* node information request */ +#define ICMP6_NI_REPLY 140 /* node information reply */ +#define MLDV2_LISTENER_REPORT 143 /* RFC3810 listener report */ /* The definitions below are experimental. TBA */ -#define MLD_MTRACE_RESP 200 /* mtrace resp (to sender) */ -#define MLD_MTRACE 201 /* mtrace messages */ +#define MLD_MTRACE_RESP 200 /* mtrace resp (to sender) */ +#define MLD_MTRACE 201 /* mtrace messages */ #ifndef KERNEL -#define MLD6_MTRACE_RESP MLD_MTRACE_RESP -#define MLD6_MTRACE MLD_MTRACE +#define MLD6_MTRACE_RESP MLD_MTRACE_RESP +#define MLD6_MTRACE MLD_MTRACE #endif -#define ICMP6_MAXTYPE 201 +#define ICMP6_MAXTYPE 201 -#define ICMP6_DST_UNREACH_NOROUTE 0 /* no route to destination */ -#define ICMP6_DST_UNREACH_ADMIN 1 /* administratively prohibited */ -#define ICMP6_DST_UNREACH_NOTNEIGHBOR 2 /* not a neighbor(obsolete) */ -#define ICMP6_DST_UNREACH_BEYONDSCOPE 2 /* beyond scope of source address */ -#define ICMP6_DST_UNREACH_ADDR 3 /* address unreachable */ -#define ICMP6_DST_UNREACH_NOPORT 4 /* port unreachable */ +#define ICMP6_DST_UNREACH_NOROUTE 0 /* no route to destination */ +#define ICMP6_DST_UNREACH_ADMIN 1 /* administratively prohibited */ +#define ICMP6_DST_UNREACH_NOTNEIGHBOR 2 /* not a neighbor(obsolete) */ +#define ICMP6_DST_UNREACH_BEYONDSCOPE 2 /* beyond scope of source address */ +#define ICMP6_DST_UNREACH_ADDR 3 /* address unreachable */ +#define ICMP6_DST_UNREACH_NOPORT 4 /* port unreachable */ -#define ICMP6_TIME_EXCEED_TRANSIT 0 /* ttl==0 in transit */ -#define ICMP6_TIME_EXCEED_REASSEMBLY 1 /* ttl==0 in reass */ +#define ICMP6_TIME_EXCEED_TRANSIT 0 /* ttl==0 in transit */ +#define ICMP6_TIME_EXCEED_REASSEMBLY 1 /* ttl==0 in reass */ -#define ICMP6_PARAMPROB_HEADER 0 /* erroneous header field */ -#define ICMP6_PARAMPROB_NEXTHEADER 1 /* unrecognized next header */ -#define ICMP6_PARAMPROB_OPTION 2 /* unrecognized option */ +#define ICMP6_PARAMPROB_HEADER 0 /* erroneous header field */ +#define ICMP6_PARAMPROB_NEXTHEADER 1 /* unrecognized next header */ +#define ICMP6_PARAMPROB_OPTION 2 /* unrecognized option */ -#define ICMP6_INFOMSG_MASK 0x80 /* all informational messages */ +#define ICMP6_INFOMSG_MASK 0x80 /* all informational messages */ -#define ICMP6_NI_SUBJ_IPV6 0 /* Query Subject is an IPv6 address */ -#define ICMP6_NI_SUBJ_FQDN 1 /* Query Subject is a Domain name */ -#define ICMP6_NI_SUBJ_IPV4 2 /* Query Subject is an IPv4 address */ +#define ICMP6_NI_SUBJ_IPV6 0 /* Query Subject is an IPv6 address */ +#define ICMP6_NI_SUBJ_FQDN 1 /* Query Subject is a Domain name */ +#define ICMP6_NI_SUBJ_IPV4 2 /* Query Subject is an IPv4 address */ -#define ICMP6_NI_SUCCESS 0 /* node information successful reply */ -#define ICMP6_NI_REFUSED 1 /* node information request is refused */ -#define ICMP6_NI_UNKNOWN 2 /* unknown Qtype */ +#define ICMP6_NI_SUCCESS 0 /* node information successful reply */ +#define ICMP6_NI_REFUSED 1 /* node information request is refused */ +#define ICMP6_NI_UNKNOWN 2 /* unknown Qtype */ -#define ICMP6_ROUTER_RENUMBERING_COMMAND 0 /* rr command */ -#define ICMP6_ROUTER_RENUMBERING_RESULT 1 /* rr result */ -#define ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET 255 /* rr seq num reset */ +#define ICMP6_ROUTER_RENUMBERING_COMMAND 0 /* rr command */ +#define ICMP6_ROUTER_RENUMBERING_RESULT 1 /* rr result */ +#define ICMP6_ROUTER_RENUMBERING_SEQNUM_RESET 255 /* rr seq num reset */ /* Used in kernel only */ -#define ND_REDIRECT_ONLINK 0 /* redirect to an on-link node */ -#define ND_REDIRECT_ROUTER 1 /* redirect to a better router */ +#define ND_REDIRECT_ONLINK 0 /* redirect to an on-link node */ +#define ND_REDIRECT_ROUTER 1 /* redirect to a better router */ /* * Multicast Listener Discovery */ struct mld_hdr { - struct icmp6_hdr mld_icmp6_hdr; - struct in6_addr mld_addr; /* multicast address */ + struct icmp6_hdr mld_icmp6_hdr; + struct in6_addr mld_addr; /* multicast address */ } __attribute__((__packed__)); /* definitions to provide backward compatibility to old KAME applications */ #ifndef KERNEL -#define mld6_hdr mld_hdr -#define mld6_type mld_type -#define mld6_code mld_code -#define mld6_cksum mld_cksum -#define mld6_maxdelay mld_maxdelay -#define mld6_reserved mld_reserved -#define mld6_addr mld_addr +#define mld6_hdr mld_hdr +#define mld6_type mld_type +#define mld6_code mld_code +#define mld6_cksum mld_cksum +#define mld6_maxdelay mld_maxdelay +#define mld6_reserved mld_reserved +#define mld6_addr mld_addr #endif /* shortcut macro definitions */ -#define mld_type mld_icmp6_hdr.icmp6_type -#define mld_code mld_icmp6_hdr.icmp6_code -#define mld_cksum mld_icmp6_hdr.icmp6_cksum -#define mld_maxdelay mld_icmp6_hdr.icmp6_data16[0] -#define mld_reserved mld_icmp6_hdr.icmp6_data16[1] -#define mld_v2_reserved mld_icmp6_hdr.icmp6_data16[0] -#define mld_v2_numrecs mld_icmp6_hdr.icmp6_data16[1] +#define mld_type mld_icmp6_hdr.icmp6_type +#define mld_code mld_icmp6_hdr.icmp6_code +#define mld_cksum mld_icmp6_hdr.icmp6_cksum +#define mld_maxdelay mld_icmp6_hdr.icmp6_data16[0] +#define mld_reserved mld_icmp6_hdr.icmp6_data16[1] +#define mld_v2_reserved mld_icmp6_hdr.icmp6_data16[0] +#define mld_v2_numrecs mld_icmp6_hdr.icmp6_data16[1] #define ICMP6_ERRORTYPE(type) \ @@ -237,170 +237,170 @@ struct mld_hdr { * Neighbor Discovery */ -struct nd_router_solicit { /* router solicitation */ - struct icmp6_hdr nd_rs_hdr; +struct nd_router_solicit { /* router solicitation */ + struct icmp6_hdr nd_rs_hdr; /* could be followed by options */ }__attribute__((__packed__)); -#define nd_rs_type nd_rs_hdr.icmp6_type -#define nd_rs_code nd_rs_hdr.icmp6_code -#define nd_rs_cksum nd_rs_hdr.icmp6_cksum -#define nd_rs_reserved nd_rs_hdr.icmp6_data32[0] +#define nd_rs_type nd_rs_hdr.icmp6_type +#define nd_rs_code nd_rs_hdr.icmp6_code +#define nd_rs_cksum nd_rs_hdr.icmp6_cksum +#define nd_rs_reserved nd_rs_hdr.icmp6_data32[0] -struct nd_router_advert { /* router advertisement */ - struct icmp6_hdr nd_ra_hdr; - u_int32_t nd_ra_reachable; /* reachable time */ - u_int32_t nd_ra_retransmit; /* retransmit timer */ +struct nd_router_advert { /* router advertisement */ + struct icmp6_hdr nd_ra_hdr; + u_int32_t nd_ra_reachable; /* reachable time */ + u_int32_t nd_ra_retransmit; /* retransmit timer */ /* could be followed by options */ } __attribute__((__packed__)); -#define nd_ra_type nd_ra_hdr.icmp6_type -#define nd_ra_code nd_ra_hdr.icmp6_code -#define nd_ra_cksum nd_ra_hdr.icmp6_cksum -#define nd_ra_curhoplimit nd_ra_hdr.icmp6_data8[0] -#define nd_ra_flags_reserved nd_ra_hdr.icmp6_data8[1] -#define ND_RA_FLAG_MANAGED 0x80 -#define ND_RA_FLAG_OTHER 0x40 -#define ND_RA_FLAG_HA 0x20 +#define nd_ra_type nd_ra_hdr.icmp6_type +#define nd_ra_code nd_ra_hdr.icmp6_code +#define nd_ra_cksum nd_ra_hdr.icmp6_cksum +#define nd_ra_curhoplimit nd_ra_hdr.icmp6_data8[0] +#define nd_ra_flags_reserved nd_ra_hdr.icmp6_data8[1] +#define ND_RA_FLAG_MANAGED 0x80 +#define ND_RA_FLAG_OTHER 0x40 +#define ND_RA_FLAG_HA 0x20 /* * Router preference values based on draft-draves-ipngwg-router-selection-01. * These are non-standard definitions. */ -#define ND_RA_FLAG_RTPREF_MASK 0x18 /* 00011000 */ +#define ND_RA_FLAG_RTPREF_MASK 0x18 /* 00011000 */ -#define ND_RA_FLAG_RTPREF_HIGH 0x08 /* 00001000 */ -#define ND_RA_FLAG_RTPREF_MEDIUM 0x00 /* 00000000 */ -#define ND_RA_FLAG_RTPREF_LOW 0x18 /* 00011000 */ -#define ND_RA_FLAG_RTPREF_RSV 0x10 /* 00010000 */ +#define ND_RA_FLAG_RTPREF_HIGH 0x08 /* 00001000 */ +#define ND_RA_FLAG_RTPREF_MEDIUM 0x00 /* 00000000 */ +#define ND_RA_FLAG_RTPREF_LOW 0x18 /* 00011000 */ +#define ND_RA_FLAG_RTPREF_RSV 0x10 /* 00010000 */ -#define nd_ra_router_lifetime nd_ra_hdr.icmp6_data16[1] +#define nd_ra_router_lifetime nd_ra_hdr.icmp6_data16[1] -struct nd_neighbor_solicit { /* neighbor solicitation */ - struct icmp6_hdr nd_ns_hdr; - struct in6_addr nd_ns_target; /*target address */ +struct nd_neighbor_solicit { /* neighbor solicitation */ + struct icmp6_hdr nd_ns_hdr; + struct in6_addr nd_ns_target; /*target address */ /* could be followed by options */ }__attribute__((__packed__)); -#define nd_ns_type nd_ns_hdr.icmp6_type -#define nd_ns_code nd_ns_hdr.icmp6_code -#define nd_ns_cksum nd_ns_hdr.icmp6_cksum -#define nd_ns_reserved nd_ns_hdr.icmp6_data32[0] +#define nd_ns_type nd_ns_hdr.icmp6_type +#define nd_ns_code nd_ns_hdr.icmp6_code +#define nd_ns_cksum nd_ns_hdr.icmp6_cksum +#define nd_ns_reserved nd_ns_hdr.icmp6_data32[0] -struct nd_neighbor_advert { /* neighbor advertisement */ - struct icmp6_hdr nd_na_hdr; - struct in6_addr nd_na_target; /* target address */ +struct nd_neighbor_advert { /* neighbor advertisement */ + struct icmp6_hdr nd_na_hdr; + struct in6_addr nd_na_target; /* target address */ /* could be followed by options */ }__attribute__((__packed__)); -#define nd_na_type nd_na_hdr.icmp6_type -#define nd_na_code nd_na_hdr.icmp6_code -#define nd_na_cksum nd_na_hdr.icmp6_cksum -#define nd_na_flags_reserved nd_na_hdr.icmp6_data32[0] +#define nd_na_type nd_na_hdr.icmp6_type +#define nd_na_code nd_na_hdr.icmp6_code +#define nd_na_cksum nd_na_hdr.icmp6_cksum +#define nd_na_flags_reserved nd_na_hdr.icmp6_data32[0] #if BYTE_ORDER == BIG_ENDIAN -#define ND_NA_FLAG_ROUTER 0x80000000 -#define ND_NA_FLAG_SOLICITED 0x40000000 -#define ND_NA_FLAG_OVERRIDE 0x20000000 +#define ND_NA_FLAG_ROUTER 0x80000000 +#define ND_NA_FLAG_SOLICITED 0x40000000 +#define ND_NA_FLAG_OVERRIDE 0x20000000 #else #if BYTE_ORDER == LITTLE_ENDIAN -#define ND_NA_FLAG_ROUTER 0x80 -#define ND_NA_FLAG_SOLICITED 0x40 -#define ND_NA_FLAG_OVERRIDE 0x20 +#define ND_NA_FLAG_ROUTER 0x80 +#define ND_NA_FLAG_SOLICITED 0x40 +#define ND_NA_FLAG_OVERRIDE 0x20 #endif #endif -struct nd_redirect { /* redirect */ - struct icmp6_hdr nd_rd_hdr; - struct in6_addr nd_rd_target; /* target address */ - struct in6_addr nd_rd_dst; /* destination address */ +struct nd_redirect { /* redirect */ + struct icmp6_hdr nd_rd_hdr; + struct in6_addr nd_rd_target; /* target address */ + struct in6_addr nd_rd_dst; /* destination address */ /* could be followed by options */ }__attribute__((__packed__)); -#define nd_rd_type nd_rd_hdr.icmp6_type -#define nd_rd_code nd_rd_hdr.icmp6_code -#define nd_rd_cksum nd_rd_hdr.icmp6_cksum -#define nd_rd_reserved nd_rd_hdr.icmp6_data32[0] +#define nd_rd_type nd_rd_hdr.icmp6_type +#define nd_rd_code nd_rd_hdr.icmp6_code +#define nd_rd_cksum nd_rd_hdr.icmp6_cksum +#define nd_rd_reserved nd_rd_hdr.icmp6_data32[0] -struct nd_opt_hdr { /* Neighbor discovery option header */ - u_int8_t nd_opt_type; - u_int8_t nd_opt_len; +struct nd_opt_hdr { /* Neighbor discovery option header */ + u_int8_t nd_opt_type; + u_int8_t nd_opt_len; /* followed by option specific data*/ }__attribute__((__packed__)); -#define ND_OPT_SOURCE_LINKADDR 1 -#define ND_OPT_TARGET_LINKADDR 2 -#define ND_OPT_PREFIX_INFORMATION 3 -#define ND_OPT_REDIRECTED_HEADER 4 -#define ND_OPT_MTU 5 -#define ND_OPT_NONCE 14 /* RFC 3971 */ -#define ND_OPT_RDNSS 25 /* RFC 6106 */ -#define ND_OPT_DNSSL 31 /* RFC 6106 */ - -#define ND_OPT_ROUTE_INFO 200 /* draft-ietf-ipngwg-router-preference, not officially assigned yet */ - -struct nd_opt_prefix_info { /* prefix information */ - u_int8_t nd_opt_pi_type; - u_int8_t nd_opt_pi_len; - u_int8_t nd_opt_pi_prefix_len; - u_int8_t nd_opt_pi_flags_reserved; - u_int32_t nd_opt_pi_valid_time; - u_int32_t nd_opt_pi_preferred_time; - u_int32_t nd_opt_pi_reserved2; - struct in6_addr nd_opt_pi_prefix; +#define ND_OPT_SOURCE_LINKADDR 1 +#define ND_OPT_TARGET_LINKADDR 2 +#define ND_OPT_PREFIX_INFORMATION 3 +#define ND_OPT_REDIRECTED_HEADER 4 +#define ND_OPT_MTU 5 +#define ND_OPT_NONCE 14 /* RFC 3971 */ +#define ND_OPT_RDNSS 25 /* RFC 6106 */ +#define ND_OPT_DNSSL 31 /* RFC 6106 */ + +#define ND_OPT_ROUTE_INFO 200 /* draft-ietf-ipngwg-router-preference, not officially assigned yet */ + +struct nd_opt_prefix_info { /* prefix information */ + u_int8_t nd_opt_pi_type; + u_int8_t nd_opt_pi_len; + u_int8_t nd_opt_pi_prefix_len; + u_int8_t nd_opt_pi_flags_reserved; + u_int32_t nd_opt_pi_valid_time; + u_int32_t nd_opt_pi_preferred_time; + u_int32_t nd_opt_pi_reserved2; + struct in6_addr nd_opt_pi_prefix; }__attribute__((__packed__)); -#define ND_OPT_PI_FLAG_ONLINK 0x80 -#define ND_OPT_PI_FLAG_AUTO 0x40 +#define ND_OPT_PI_FLAG_ONLINK 0x80 +#define ND_OPT_PI_FLAG_AUTO 0x40 -#define ND_OPT_NONCE_LEN ((1 * 8) - 2) +#define ND_OPT_NONCE_LEN ((1 * 8) - 2) #if ((ND_OPT_NONCE_LEN + 2) % 8) != 0 #error "(ND_OPT_NONCE_LEN + 2) must be a multiple of 8." #endif -struct nd_opt_nonce { /* nonce option */ - u_int8_t nd_opt_nonce_type; - u_int8_t nd_opt_nonce_len; - u_int8_t nd_opt_nonce[ND_OPT_NONCE_LEN]; +struct nd_opt_nonce { /* nonce option */ + u_int8_t nd_opt_nonce_type; + u_int8_t nd_opt_nonce_len; + u_int8_t nd_opt_nonce[ND_OPT_NONCE_LEN]; } __attribute__((__packed__)); -struct nd_opt_rd_hdr { /* redirected header */ - u_int8_t nd_opt_rh_type; - u_int8_t nd_opt_rh_len; - u_int16_t nd_opt_rh_reserved1; - u_int32_t nd_opt_rh_reserved2; +struct nd_opt_rd_hdr { /* redirected header */ + u_int8_t nd_opt_rh_type; + u_int8_t nd_opt_rh_len; + u_int16_t nd_opt_rh_reserved1; + u_int32_t nd_opt_rh_reserved2; /* followed by IP header and data */ } __attribute__((__packed__)); -struct nd_opt_mtu { /* MTU option */ - u_int8_t nd_opt_mtu_type; - u_int8_t nd_opt_mtu_len; - u_int16_t nd_opt_mtu_reserved; - u_int32_t nd_opt_mtu_mtu; +struct nd_opt_mtu { /* MTU option */ + u_int8_t nd_opt_mtu_type; + u_int8_t nd_opt_mtu_len; + u_int16_t nd_opt_mtu_reserved; + u_int32_t nd_opt_mtu_mtu; }__attribute__((__packed__)); -struct nd_opt_route_info { /* route info */ - u_int8_t nd_opt_rti_type; - u_int8_t nd_opt_rti_len; - u_int8_t nd_opt_rti_prefixlen; - u_int8_t nd_opt_rti_flags; - u_int32_t nd_opt_rti_lifetime; +struct nd_opt_route_info { /* route info */ + u_int8_t nd_opt_rti_type; + u_int8_t nd_opt_rti_len; + u_int8_t nd_opt_rti_prefixlen; + u_int8_t nd_opt_rti_flags; + u_int32_t nd_opt_rti_lifetime; /* prefix follows */ }__attribute__((__packed__)); -struct nd_opt_rdnss { /* recursive domain name system servers */ - u_int8_t nd_opt_rdnss_type; - u_int8_t nd_opt_rdnss_len; - u_int16_t nd_opt_rdnss_reserved; - u_int32_t nd_opt_rdnss_lifetime; - struct in6_addr nd_opt_rdnss_addr[1]; +struct nd_opt_rdnss { /* recursive domain name system servers */ + u_int8_t nd_opt_rdnss_type; + u_int8_t nd_opt_rdnss_len; + u_int16_t nd_opt_rdnss_reserved; + u_int32_t nd_opt_rdnss_lifetime; + struct in6_addr nd_opt_rdnss_addr[1]; } __attribute__((__packed__)); -struct nd_opt_dnssl { /* domain name search list */ - u_int8_t nd_opt_dnssl_type; - u_int8_t nd_opt_dnssl_len; - u_int16_t nd_opt_dnssl_reserved; - u_int32_t nd_opt_dnssl_lifetime; - u_int8_t nd_opt_dnssl_domains[8]; +struct nd_opt_dnssl { /* domain name search list */ + u_int8_t nd_opt_dnssl_type; + u_int8_t nd_opt_dnssl_len; + u_int16_t nd_opt_dnssl_reserved; + u_int32_t nd_opt_dnssl_lifetime; + u_int8_t nd_opt_dnssl_domains[8]; } __attribute__((__packed__)); /* @@ -408,12 +408,12 @@ struct nd_opt_dnssl { /* domain name search list */ */ struct icmp6_namelookup { - struct icmp6_hdr icmp6_nl_hdr; - u_int8_t icmp6_nl_nonce[8]; - int32_t icmp6_nl_ttl; + struct icmp6_hdr icmp6_nl_hdr; + u_int8_t icmp6_nl_nonce[8]; + int32_t icmp6_nl_ttl; #if 0 - u_int8_t icmp6_nl_len; - u_int8_t icmp6_nl_name[3]; + u_int8_t icmp6_nl_len; + u_int8_t icmp6_nl_name[3]; #endif /* could be followed by options */ }__attribute__((__packed__)); @@ -427,65 +427,65 @@ struct icmp6_nodeinfo { /* could be followed by reply data */ }__attribute__((__packed__)); -#define ni_type icmp6_ni_hdr.icmp6_type -#define ni_code icmp6_ni_hdr.icmp6_code -#define ni_cksum icmp6_ni_hdr.icmp6_cksum -#define ni_qtype icmp6_ni_hdr.icmp6_data16[0] -#define ni_flags icmp6_ni_hdr.icmp6_data16[1] +#define ni_type icmp6_ni_hdr.icmp6_type +#define ni_code icmp6_ni_hdr.icmp6_code +#define ni_cksum icmp6_ni_hdr.icmp6_cksum +#define ni_qtype icmp6_ni_hdr.icmp6_data16[0] +#define ni_flags icmp6_ni_hdr.icmp6_data16[1] -#define NI_QTYPE_NOOP 0 /* NOOP */ -#define NI_QTYPE_SUPTYPES 1 /* Supported Qtypes */ -#define NI_QTYPE_FQDN 2 /* FQDN (draft 04) */ -#define NI_QTYPE_DNSNAME 2 /* DNS Name */ -#define NI_QTYPE_NODEADDR 3 /* Node Addresses */ -#define NI_QTYPE_IPV4ADDR 4 /* IPv4 Addresses */ +#define NI_QTYPE_NOOP 0 /* NOOP */ +#define NI_QTYPE_SUPTYPES 1 /* Supported Qtypes */ +#define NI_QTYPE_FQDN 2 /* FQDN (draft 04) */ +#define NI_QTYPE_DNSNAME 2 /* DNS Name */ +#define NI_QTYPE_NODEADDR 3 /* Node Addresses */ +#define NI_QTYPE_IPV4ADDR 4 /* IPv4 Addresses */ #if BYTE_ORDER == BIG_ENDIAN -#define NI_SUPTYPE_FLAG_COMPRESS 0x1 -#define NI_FQDN_FLAG_VALIDTTL 0x1 +#define NI_SUPTYPE_FLAG_COMPRESS 0x1 +#define NI_FQDN_FLAG_VALIDTTL 0x1 #elif BYTE_ORDER == LITTLE_ENDIAN -#define NI_SUPTYPE_FLAG_COMPRESS 0x0100 -#define NI_FQDN_FLAG_VALIDTTL 0x0100 +#define NI_SUPTYPE_FLAG_COMPRESS 0x0100 +#define NI_FQDN_FLAG_VALIDTTL 0x0100 #endif #ifdef NAME_LOOKUPS_04 #if BYTE_ORDER == BIG_ENDIAN -#define NI_NODEADDR_FLAG_LINKLOCAL 0x1 -#define NI_NODEADDR_FLAG_SITELOCAL 0x2 -#define NI_NODEADDR_FLAG_GLOBAL 0x4 -#define NI_NODEADDR_FLAG_ALL 0x8 -#define NI_NODEADDR_FLAG_TRUNCATE 0x10 -#define NI_NODEADDR_FLAG_ANYCAST 0x20 /* just experimental. not in spec */ +#define NI_NODEADDR_FLAG_LINKLOCAL 0x1 +#define NI_NODEADDR_FLAG_SITELOCAL 0x2 +#define NI_NODEADDR_FLAG_GLOBAL 0x4 +#define NI_NODEADDR_FLAG_ALL 0x8 +#define NI_NODEADDR_FLAG_TRUNCATE 0x10 +#define NI_NODEADDR_FLAG_ANYCAST 0x20 /* just experimental. not in spec */ #elif BYTE_ORDER == LITTLE_ENDIAN -#define NI_NODEADDR_FLAG_LINKLOCAL 0x0100 -#define NI_NODEADDR_FLAG_SITELOCAL 0x0200 -#define NI_NODEADDR_FLAG_GLOBAL 0x0400 -#define NI_NODEADDR_FLAG_ALL 0x0800 -#define NI_NODEADDR_FLAG_TRUNCATE 0x1000 -#define NI_NODEADDR_FLAG_ANYCAST 0x2000 /* just experimental. not in spec */ +#define NI_NODEADDR_FLAG_LINKLOCAL 0x0100 +#define NI_NODEADDR_FLAG_SITELOCAL 0x0200 +#define NI_NODEADDR_FLAG_GLOBAL 0x0400 +#define NI_NODEADDR_FLAG_ALL 0x0800 +#define NI_NODEADDR_FLAG_TRUNCATE 0x1000 +#define NI_NODEADDR_FLAG_ANYCAST 0x2000 /* just experimental. not in spec */ #endif #else /* draft-ietf-ipngwg-icmp-name-lookups-05 (and later?) */ #if BYTE_ORDER == BIG_ENDIAN -#define NI_NODEADDR_FLAG_TRUNCATE 0x1 -#define NI_NODEADDR_FLAG_ALL 0x2 -#define NI_NODEADDR_FLAG_COMPAT 0x4 -#define NI_NODEADDR_FLAG_LINKLOCAL 0x8 -#define NI_NODEADDR_FLAG_SITELOCAL 0x10 -#define NI_NODEADDR_FLAG_GLOBAL 0x20 -#define NI_NODEADDR_FLAG_ANYCAST 0x40 /* just experimental. not in spec */ +#define NI_NODEADDR_FLAG_TRUNCATE 0x1 +#define NI_NODEADDR_FLAG_ALL 0x2 +#define NI_NODEADDR_FLAG_COMPAT 0x4 +#define NI_NODEADDR_FLAG_LINKLOCAL 0x8 +#define NI_NODEADDR_FLAG_SITELOCAL 0x10 +#define NI_NODEADDR_FLAG_GLOBAL 0x20 +#define NI_NODEADDR_FLAG_ANYCAST 0x40 /* just experimental. not in spec */ #elif BYTE_ORDER == LITTLE_ENDIAN -#define NI_NODEADDR_FLAG_TRUNCATE 0x0100 -#define NI_NODEADDR_FLAG_ALL 0x0200 -#define NI_NODEADDR_FLAG_COMPAT 0x0400 -#define NI_NODEADDR_FLAG_LINKLOCAL 0x0800 -#define NI_NODEADDR_FLAG_SITELOCAL 0x1000 -#define NI_NODEADDR_FLAG_GLOBAL 0x2000 -#define NI_NODEADDR_FLAG_ANYCAST 0x4000 /* just experimental. not in spec */ +#define NI_NODEADDR_FLAG_TRUNCATE 0x0100 +#define NI_NODEADDR_FLAG_ALL 0x0200 +#define NI_NODEADDR_FLAG_COMPAT 0x0400 +#define NI_NODEADDR_FLAG_LINKLOCAL 0x0800 +#define NI_NODEADDR_FLAG_SITELOCAL 0x1000 +#define NI_NODEADDR_FLAG_GLOBAL 0x2000 +#define NI_NODEADDR_FLAG_ANYCAST 0x4000 /* just experimental. not in spec */ #endif #endif struct ni_reply_fqdn { - u_int32_t ni_fqdn_ttl; /* TTL */ + u_int32_t ni_fqdn_ttl; /* TTL */ u_int8_t ni_fqdn_namelen; /* length in octets of the FQDN */ u_int8_t ni_fqdn_name[3]; /* XXX: alignment */ }__attribute__((__packed__)); @@ -493,53 +493,53 @@ struct ni_reply_fqdn { /* * Router Renumbering. as router-renum-08.txt */ -struct icmp6_router_renum { /* router renumbering header */ - struct icmp6_hdr rr_hdr; - u_int8_t rr_segnum; - u_int8_t rr_flags; - u_int16_t rr_maxdelay; - u_int32_t rr_reserved; +struct icmp6_router_renum { /* router renumbering header */ + struct icmp6_hdr rr_hdr; + u_int8_t rr_segnum; + u_int8_t rr_flags; + u_int16_t rr_maxdelay; + u_int32_t rr_reserved; } __attribute__((__packed__)); -#define ICMP6_RR_FLAGS_TEST 0x80 -#define ICMP6_RR_FLAGS_REQRESULT 0x40 -#define ICMP6_RR_FLAGS_FORCEAPPLY 0x20 -#define ICMP6_RR_FLAGS_SPECSITE 0x10 -#define ICMP6_RR_FLAGS_PREVDONE 0x08 - -#define rr_type rr_hdr.icmp6_type -#define rr_code rr_hdr.icmp6_code -#define rr_cksum rr_hdr.icmp6_cksum -#define rr_seqnum rr_hdr.icmp6_data32[0] - -struct rr_pco_match { /* match prefix part */ - u_int8_t rpm_code; - u_int8_t rpm_len; - u_int8_t rpm_ordinal; - u_int8_t rpm_matchlen; - u_int8_t rpm_minlen; - u_int8_t rpm_maxlen; - u_int16_t rpm_reserved; - struct in6_addr rpm_prefix; +#define ICMP6_RR_FLAGS_TEST 0x80 +#define ICMP6_RR_FLAGS_REQRESULT 0x40 +#define ICMP6_RR_FLAGS_FORCEAPPLY 0x20 +#define ICMP6_RR_FLAGS_SPECSITE 0x10 +#define ICMP6_RR_FLAGS_PREVDONE 0x08 + +#define rr_type rr_hdr.icmp6_type +#define rr_code rr_hdr.icmp6_code +#define rr_cksum rr_hdr.icmp6_cksum +#define rr_seqnum rr_hdr.icmp6_data32[0] + +struct rr_pco_match { /* match prefix part */ + u_int8_t rpm_code; + u_int8_t rpm_len; + u_int8_t rpm_ordinal; + u_int8_t rpm_matchlen; + u_int8_t rpm_minlen; + u_int8_t rpm_maxlen; + u_int16_t rpm_reserved; + struct in6_addr rpm_prefix; } __attribute__((__packed__)); -#define RPM_PCO_ADD 1 -#define RPM_PCO_CHANGE 2 -#define RPM_PCO_SETGLOBAL 3 -#define RPM_PCO_MAX 4 - -struct rr_pco_use { /* use prefix part */ - u_int8_t rpu_uselen; - u_int8_t rpu_keeplen; - u_int8_t rpu_ramask; - u_int8_t rpu_raflags; - u_int32_t rpu_vltime; - u_int32_t rpu_pltime; - u_int32_t rpu_flags; - struct in6_addr rpu_prefix; +#define RPM_PCO_ADD 1 +#define RPM_PCO_CHANGE 2 +#define RPM_PCO_SETGLOBAL 3 +#define RPM_PCO_MAX 4 + +struct rr_pco_use { /* use prefix part */ + u_int8_t rpu_uselen; + u_int8_t rpu_keeplen; + u_int8_t rpu_ramask; + u_int8_t rpu_raflags; + u_int32_t rpu_vltime; + u_int32_t rpu_pltime; + u_int32_t rpu_flags; + struct in6_addr rpu_prefix; } __attribute__((__packed__)); -#define ICMP6_RR_PCOUSE_RAFLAGS_ONLINK 0x80 -#define ICMP6_RR_PCOUSE_RAFLAGS_AUTO 0x40 +#define ICMP6_RR_PCOUSE_RAFLAGS_ONLINK 0x80 +#define ICMP6_RR_PCOUSE_RAFLAGS_AUTO 0x40 #if BYTE_ORDER == BIG_ENDIAN #define ICMP6_RR_PCOUSE_FLAGS_DECRVLTIME 0x80000000 @@ -549,19 +549,19 @@ struct rr_pco_use { /* use prefix part */ #define ICMP6_RR_PCOUSE_FLAGS_DECRPLTIME 0x40 #endif -struct rr_result { /* router renumbering result message */ - u_int16_t rrr_flags; - u_int8_t rrr_ordinal; - u_int8_t rrr_matchedlen; - u_int32_t rrr_ifid; - struct in6_addr rrr_prefix; +struct rr_result { /* router renumbering result message */ + u_int16_t rrr_flags; + u_int8_t rrr_ordinal; + u_int8_t rrr_matchedlen; + u_int32_t rrr_ifid; + struct in6_addr rrr_prefix; } __attribute__((__packed__)); #if BYTE_ORDER == BIG_ENDIAN -#define ICMP6_RR_RESULT_FLAGS_OOB 0x0002 -#define ICMP6_RR_RESULT_FLAGS_FORBIDDEN 0x0001 +#define ICMP6_RR_RESULT_FLAGS_OOB 0x0002 +#define ICMP6_RR_RESULT_FLAGS_FORBIDDEN 0x0001 #elif BYTE_ORDER == LITTLE_ENDIAN -#define ICMP6_RR_RESULT_FLAGS_OOB 0x0200 -#define ICMP6_RR_RESULT_FLAGS_FORBIDDEN 0x0100 +#define ICMP6_RR_RESULT_FLAGS_OOB 0x0200 +#define ICMP6_RR_RESULT_FLAGS_FORBIDDEN 0x0100 #endif /* @@ -573,29 +573,29 @@ struct icmp6_filter { }; #ifdef KERNEL -#define ICMP6_FILTER_SETPASSALL(filterp) \ -do { \ - int i; u_char *ptr; \ - ptr = (u_char *)filterp; \ - for (i = 0; i < sizeof(struct icmp6_filter); i++) \ - ptr[i] = 0xff; \ +#define ICMP6_FILTER_SETPASSALL(filterp) \ +do { \ + int i; u_char *ptr; \ + ptr = (u_char *)filterp; \ + for (i = 0; i < sizeof(struct icmp6_filter); i++) \ + ptr[i] = 0xff; \ } while (0) -#define ICMP6_FILTER_SETBLOCKALL(filterp) \ +#define ICMP6_FILTER_SETBLOCKALL(filterp) \ bzero(filterp, sizeof(struct icmp6_filter)) #else /* KERNEL */ -#define ICMP6_FILTER_SETPASSALL(filterp) \ +#define ICMP6_FILTER_SETPASSALL(filterp) \ memset(filterp, 0xff, sizeof(struct icmp6_filter)) -#define ICMP6_FILTER_SETBLOCKALL(filterp) \ +#define ICMP6_FILTER_SETBLOCKALL(filterp) \ memset(filterp, 0x00, sizeof(struct icmp6_filter)) #endif /* KERNEL */ -#define ICMP6_FILTER_SETPASS(type, filterp) \ +#define ICMP6_FILTER_SETPASS(type, filterp) \ (((filterp)->icmp6_filt[(type) >> 5]) |= (1 << ((type) & 31))) -#define ICMP6_FILTER_SETBLOCK(type, filterp) \ +#define ICMP6_FILTER_SETBLOCK(type, filterp) \ (((filterp)->icmp6_filt[(type) >> 5]) &= ~(1 << ((type) & 31))) -#define ICMP6_FILTER_WILLPASS(type, filterp) \ +#define ICMP6_FILTER_WILLPASS(type, filterp) \ ((((filterp)->icmp6_filt[(type) >> 5]) & (1 << ((type) & 31))) != 0) -#define ICMP6_FILTER_WILLBLOCK(type, filterp) \ +#define ICMP6_FILTER_WILLBLOCK(type, filterp) \ ((((filterp)->icmp6_filt[(type) >> 5]) & (1 << ((type) & 31))) == 0) /* @@ -620,18 +620,18 @@ struct icmp6errstat { struct icmp6stat { /* statistics related to icmp6 packets generated */ - u_quad_t icp6s_error; /* # of calls to icmp6_error */ - u_quad_t icp6s_canterror; /* no error 'cuz old was icmp */ - u_quad_t icp6s_toofreq; /* no error 'cuz rate limitation */ + u_quad_t icp6s_error; /* # of calls to icmp6_error */ + u_quad_t icp6s_canterror; /* no error 'cuz old was icmp */ + u_quad_t icp6s_toofreq; /* no error 'cuz rate limitation */ u_quad_t icp6s_outhist[256]; /* statistics related to input message processed */ - u_quad_t icp6s_badcode; /* icmp6_code out of range */ - u_quad_t icp6s_tooshort; /* packet < sizeof(struct icmp6_hdr) */ - u_quad_t icp6s_checksum; /* bad checksum */ - u_quad_t icp6s_badlen; /* calculated bound mismatch */ - u_quad_t icp6s_reflect; /* number of responses */ - u_quad_t icp6s_inhist[256]; - u_quad_t icp6s_nd_toomanyopt; /* too many ND options */ + u_quad_t icp6s_badcode; /* icmp6_code out of range */ + u_quad_t icp6s_tooshort; /* packet < sizeof(struct icmp6_hdr) */ + u_quad_t icp6s_checksum; /* bad checksum */ + u_quad_t icp6s_badlen; /* calculated bound mismatch */ + u_quad_t icp6s_reflect; /* number of responses */ + u_quad_t icp6s_inhist[256]; + u_quad_t icp6s_nd_toomanyopt; /* too many ND options */ struct icmp6errstat icp6s_outerrhist; #define icp6s_odst_unreach_noroute \ icp6s_outerrhist.icp6errs_dst_unreach_noroute @@ -651,46 +651,46 @@ struct icmp6stat { #define icp6s_oparamprob_option icp6s_outerrhist.icp6errs_paramprob_option #define icp6s_oredirect icp6s_outerrhist.icp6errs_redirect #define icp6s_ounknown icp6s_outerrhist.icp6errs_unknown - u_quad_t icp6s_pmtuchg; /* path MTU changes */ - u_quad_t icp6s_nd_badopt; /* bad ND options */ - u_quad_t icp6s_badns; /* bad neighbor solicitation */ - u_quad_t icp6s_badna; /* bad neighbor advertisement */ - u_quad_t icp6s_badrs; /* bad router advertisement */ - u_quad_t icp6s_badra; /* bad router advertisement */ - u_quad_t icp6s_badredirect; /* bad redirect message */ + u_quad_t icp6s_pmtuchg; /* path MTU changes */ + u_quad_t icp6s_nd_badopt; /* bad ND options */ + u_quad_t icp6s_badns; /* bad neighbor solicitation */ + u_quad_t icp6s_badna; /* bad neighbor advertisement */ + u_quad_t icp6s_badrs; /* bad router advertisement */ + u_quad_t icp6s_badra; /* bad router advertisement */ + u_quad_t icp6s_badredirect; /* bad redirect message */ u_quad_t icp6s_rfc6980_drop; /* NDP packet dropped based on RFC 6980 */ }; /* * Names for ICMP sysctl objects */ -#define ICMPV6CTL_STATS 1 -#define ICMPV6CTL_REDIRACCEPT 2 /* accept/process redirects */ -#define ICMPV6CTL_REDIRTIMEOUT 3 /* redirect cache time */ -#if 0 /*obsoleted*/ -#define ICMPV6CTL_ERRRATELIMIT 5 /* ICMPv6 error rate limitation */ +#define ICMPV6CTL_STATS 1 +#define ICMPV6CTL_REDIRACCEPT 2 /* accept/process redirects */ +#define ICMPV6CTL_REDIRTIMEOUT 3 /* redirect cache time */ +#if 0 /*obsoleted*/ +#define ICMPV6CTL_ERRRATELIMIT 5 /* ICMPv6 error rate limitation */ #endif -#define ICMPV6CTL_ND6_PRUNE 6 -#define ICMPV6CTL_ND6_DELAY 8 -#define ICMPV6CTL_ND6_UMAXTRIES 9 -#define ICMPV6CTL_ND6_MMAXTRIES 10 -#define ICMPV6CTL_ND6_USELOOPBACK 11 +#define ICMPV6CTL_ND6_PRUNE 6 +#define ICMPV6CTL_ND6_DELAY 8 +#define ICMPV6CTL_ND6_UMAXTRIES 9 +#define ICMPV6CTL_ND6_MMAXTRIES 10 +#define ICMPV6CTL_ND6_USELOOPBACK 11 /*#define ICMPV6CTL_ND6_PROXYALL 12 obsoleted, do not reuse here */ -#define ICMPV6CTL_NODEINFO 13 -#define ICMPV6CTL_ERRPPSLIMIT 14 /* ICMPv6 error pps limitation */ -#define ICMPV6CTL_ND6_MAXNUDHINT 15 -#define ICMPV6CTL_MTUDISC_HIWAT 16 -#define ICMPV6CTL_MTUDISC_LOWAT 17 -#define ICMPV6CTL_ND6_DEBUG 18 -#define ICMPV6CTL_ND6_DRLIST 19 -#define ICMPV6CTL_ND6_PRLIST 20 -#define ICMPV6CTL_MLD_MAXSRCFILTER 21 -#define ICMPV6CTL_MLD_SOMAXSRC 22 -#define ICMPV6CTL_MLD_VERSION 23 -#define ICMPV6CTL_ND6_MAXQLEN 24 -#define ICMPV6CTL_ND6_ACCEPT_6TO4 25 -#define ICMPV6CTL_ND6_OPTIMISTIC_DAD 26 /* RFC 4429 */ -#define ICMPV6CTL_MAXID 27 +#define ICMPV6CTL_NODEINFO 13 +#define ICMPV6CTL_ERRPPSLIMIT 14 /* ICMPv6 error pps limitation */ +#define ICMPV6CTL_ND6_MAXNUDHINT 15 +#define ICMPV6CTL_MTUDISC_HIWAT 16 +#define ICMPV6CTL_MTUDISC_LOWAT 17 +#define ICMPV6CTL_ND6_DEBUG 18 +#define ICMPV6CTL_ND6_DRLIST 19 +#define ICMPV6CTL_ND6_PRLIST 20 +#define ICMPV6CTL_MLD_MAXSRCFILTER 21 +#define ICMPV6CTL_MLD_SOMAXSRC 22 +#define ICMPV6CTL_MLD_VERSION 23 +#define ICMPV6CTL_ND6_MAXQLEN 24 +#define ICMPV6CTL_ND6_ACCEPT_6TO4 25 +#define ICMPV6CTL_ND6_OPTIMISTIC_DAD 26 /* RFC 4429 */ +#define ICMPV6CTL_MAXID 27 #ifdef BSD_KERNEL_PRIVATE #define ICMPV6CTL_NAMES { \ @@ -724,96 +724,96 @@ struct icmp6stat { } # ifdef __STDC__ -struct rtentry; -struct rttimer; -struct in6_multi; +struct rtentry; +struct rttimer; +struct in6_multi; # endif struct ip6protosw; -void icmp6_init(struct ip6protosw *, struct domain *); -void icmp6_paramerror(struct mbuf *, int); +void icmp6_init(struct ip6protosw *, struct domain *); +void icmp6_paramerror(struct mbuf *, int); -void icmp6_error_flag(struct mbuf *, int, int, int, int); -#define ICMP6_ERROR_RST_MRCVIF 0x1 +void icmp6_error_flag(struct mbuf *, int, int, int, int); +#define ICMP6_ERROR_RST_MRCVIF 0x1 -void icmp6_error(struct mbuf *, int, int, int); -void icmp6_error2(struct mbuf *, int, int, int, struct ifnet *); -int icmp6_input(struct mbuf **, int *, int); -void icmp6_reflect(struct mbuf *, size_t); -void icmp6_prepare(struct mbuf *); -void icmp6_redirect_input(struct mbuf *, int); -void icmp6_redirect_output(struct mbuf *, struct rtentry *); +void icmp6_error(struct mbuf *, int, int, int); +void icmp6_error2(struct mbuf *, int, int, int, struct ifnet *); +int icmp6_input(struct mbuf **, int *, int); +void icmp6_reflect(struct mbuf *, size_t); +void icmp6_prepare(struct mbuf *); +void icmp6_redirect_input(struct mbuf *, int); +void icmp6_redirect_output(struct mbuf *, struct rtentry *); -struct ip6ctlparam; -void icmp6_mtudisc_update(struct ip6ctlparam *, int); +struct ip6ctlparam; +void icmp6_mtudisc_update(struct ip6ctlparam *, int); extern lck_rw_t icmp6_ifs_rwlock; /* XXX: is this the right place for these macros? */ /* N.B.: if_inet6data is never freed once set, so we don't need to lock */ -#define icmp6_ifstat_inc(_ifp, _tag) do { \ - if (_ifp != NULL && IN6_IFEXTRA(_ifp) != NULL) { \ - IN6_IFEXTRA(_ifp)->icmp6_ifstat._tag++; \ - } \ +#define icmp6_ifstat_inc(_ifp, _tag) do { \ + if (_ifp != NULL && IN6_IFEXTRA(_ifp) != NULL) { \ + IN6_IFEXTRA(_ifp)->icmp6_ifstat._tag++; \ + } \ } while (0) -#define icmp6_ifoutstat_inc(ifp, type, code) do { \ - icmp6_ifstat_inc(ifp, ifs6_out_msg); \ - if (type < ICMP6_INFOMSG_MASK) \ - icmp6_ifstat_inc(ifp, ifs6_out_error); \ - switch (type) { \ - case ICMP6_DST_UNREACH: \ - icmp6_ifstat_inc(ifp, ifs6_out_dstunreach); \ - if (code == ICMP6_DST_UNREACH_ADMIN) \ - icmp6_ifstat_inc(ifp, ifs6_out_adminprohib);\ - break; \ - case ICMP6_PACKET_TOO_BIG: \ - icmp6_ifstat_inc(ifp, ifs6_out_pkttoobig); \ - break; \ - case ICMP6_TIME_EXCEEDED: \ - icmp6_ifstat_inc(ifp, ifs6_out_timeexceed); \ - break; \ - case ICMP6_PARAM_PROB: \ - icmp6_ifstat_inc(ifp, ifs6_out_paramprob); \ - break; \ - case ICMP6_ECHO_REQUEST: \ - icmp6_ifstat_inc(ifp, ifs6_out_echo); \ - break; \ - case ICMP6_ECHO_REPLY: \ - icmp6_ifstat_inc(ifp, ifs6_out_echoreply); \ - break; \ - case MLD_LISTENER_QUERY: \ - icmp6_ifstat_inc(ifp, ifs6_out_mldquery); \ - break; \ - case MLD_LISTENER_REPORT: \ - icmp6_ifstat_inc(ifp, ifs6_out_mldreport); \ - break; \ - case MLD_LISTENER_DONE: \ - icmp6_ifstat_inc(ifp, ifs6_out_mlddone); \ - break; \ - case ND_ROUTER_SOLICIT: \ - icmp6_ifstat_inc(ifp, ifs6_out_routersolicit); \ - break; \ - case ND_ROUTER_ADVERT: \ - icmp6_ifstat_inc(ifp, ifs6_out_routeradvert); \ - break; \ - case ND_NEIGHBOR_SOLICIT: \ - icmp6_ifstat_inc(ifp, ifs6_out_neighborsolicit);\ - break; \ - case ND_NEIGHBOR_ADVERT: \ - icmp6_ifstat_inc(ifp, ifs6_out_neighboradvert); \ - break; \ - case ND_REDIRECT: \ - icmp6_ifstat_inc(ifp, ifs6_out_redirect); \ - break; \ - } \ +#define icmp6_ifoutstat_inc(ifp, type, code) do { \ + icmp6_ifstat_inc(ifp, ifs6_out_msg); \ + if (type < ICMP6_INFOMSG_MASK) \ + icmp6_ifstat_inc(ifp, ifs6_out_error); \ + switch (type) { \ + case ICMP6_DST_UNREACH: \ + icmp6_ifstat_inc(ifp, ifs6_out_dstunreach); \ + if (code == ICMP6_DST_UNREACH_ADMIN) \ + icmp6_ifstat_inc(ifp, ifs6_out_adminprohib);\ + break; \ + case ICMP6_PACKET_TOO_BIG: \ + icmp6_ifstat_inc(ifp, ifs6_out_pkttoobig); \ + break; \ + case ICMP6_TIME_EXCEEDED: \ + icmp6_ifstat_inc(ifp, ifs6_out_timeexceed); \ + break; \ + case ICMP6_PARAM_PROB: \ + icmp6_ifstat_inc(ifp, ifs6_out_paramprob); \ + break; \ + case ICMP6_ECHO_REQUEST: \ + icmp6_ifstat_inc(ifp, ifs6_out_echo); \ + break; \ + case ICMP6_ECHO_REPLY: \ + icmp6_ifstat_inc(ifp, ifs6_out_echoreply); \ + break; \ + case MLD_LISTENER_QUERY: \ + icmp6_ifstat_inc(ifp, ifs6_out_mldquery); \ + break; \ + case MLD_LISTENER_REPORT: \ + icmp6_ifstat_inc(ifp, ifs6_out_mldreport); \ + break; \ + case MLD_LISTENER_DONE: \ + icmp6_ifstat_inc(ifp, ifs6_out_mlddone); \ + break; \ + case ND_ROUTER_SOLICIT: \ + icmp6_ifstat_inc(ifp, ifs6_out_routersolicit); \ + break; \ + case ND_ROUTER_ADVERT: \ + icmp6_ifstat_inc(ifp, ifs6_out_routeradvert); \ + break; \ + case ND_NEIGHBOR_SOLICIT: \ + icmp6_ifstat_inc(ifp, ifs6_out_neighborsolicit);\ + break; \ + case ND_NEIGHBOR_ADVERT: \ + icmp6_ifstat_inc(ifp, ifs6_out_neighboradvert); \ + break; \ + case ND_REDIRECT: \ + icmp6_ifstat_inc(ifp, ifs6_out_redirect); \ + break; \ + } \ } while (0) -extern int icmp6_rediraccept; /* accept/process redirects */ -extern int icmp6_redirtimeout; /* cache time for redirect routes */ +extern int icmp6_rediraccept; /* accept/process redirects */ +extern int icmp6_redirtimeout; /* cache time for redirect routes */ -#define ICMP6_NODEINFO_FQDNOK 0x1 -#define ICMP6_NODEINFO_NODEADDROK 0x2 -#define ICMP6_NODEINFO_TMPADDROK 0x4 -#define ICMP6_NODEINFO_GLOBALOK 0x8 +#define ICMP6_NODEINFO_FQDNOK 0x1 +#define ICMP6_NODEINFO_NODEADDROK 0x2 +#define ICMP6_NODEINFO_TMPADDROK 0x4 +#define ICMP6_NODEINFO_GLOBALOK 0x8 #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/icmp_var.h b/bsd/netinet/icmp_var.h index bb3021d02..c1cb82595 100644 --- a/bsd/netinet/icmp_var.h +++ b/bsd/netinet/icmp_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -69,31 +69,31 @@ * Variables related to this implementation * of the internet control message protocol. */ -struct icmpstat { +struct icmpstat { /* statistics related to icmp packets generated */ - u_int32_t icps_error; /* # of calls to icmp_error */ - u_int32_t icps_oldshort; /* no error 'cuz old ip too short */ - u_int32_t icps_oldicmp; /* no error 'cuz old was icmp */ - u_int32_t icps_outhist[ICMP_MAXTYPE + 1]; + u_int32_t icps_error; /* # of calls to icmp_error */ + u_int32_t icps_oldshort; /* no error 'cuz old ip too short */ + u_int32_t icps_oldicmp; /* no error 'cuz old was icmp */ + u_int32_t icps_outhist[ICMP_MAXTYPE + 1]; /* statistics related to input messages processed */ - u_int32_t icps_badcode; /* icmp_code out of range */ - u_int32_t icps_tooshort; /* packet < ICMP_MINLEN */ - u_int32_t icps_checksum; /* bad checksum */ - u_int32_t icps_badlen; /* calculated bound mismatch */ - u_int32_t icps_reflect; /* number of responses */ - u_int32_t icps_inhist[ICMP_MAXTYPE + 1]; - u_int32_t icps_bmcastecho;/* b/mcast echo requests dropped */ - u_int32_t icps_bmcasttstamp; /* b/mcast tstamp requests dropped */ + u_int32_t icps_badcode; /* icmp_code out of range */ + u_int32_t icps_tooshort; /* packet < ICMP_MINLEN */ + u_int32_t icps_checksum; /* bad checksum */ + u_int32_t icps_badlen; /* calculated bound mismatch */ + u_int32_t icps_reflect; /* number of responses */ + u_int32_t icps_inhist[ICMP_MAXTYPE + 1]; + u_int32_t icps_bmcastecho;/* b/mcast echo requests dropped */ + u_int32_t icps_bmcasttstamp; /* b/mcast tstamp requests dropped */ }; /* * Names for ICMP sysctl objects */ -#define ICMPCTL_MASKREPL 1 /* allow replies to netmask requests */ -#define ICMPCTL_STATS 2 /* statistics (read-only) */ -#define ICMPCTL_ICMPLIM 3 -#define ICMPCTL_TIMESTAMP 4 /* allow replies to time stamp requests */ -#define ICMPCTL_MAXID 5 +#define ICMPCTL_MASKREPL 1 /* allow replies to netmask requests */ +#define ICMPCTL_STATS 2 /* statistics (read-only) */ +#define ICMPCTL_ICMPLIM 3 +#define ICMPCTL_TIMESTAMP 4 /* allow replies to time stamp requests */ +#define ICMPCTL_MAXID 5 #ifdef BSD_KERNEL_PRIVATE #define ICMPCTL_NAMES { \ @@ -116,6 +116,6 @@ extern int badport_bandlim(int); #define BANDLIM_RST_OPENPORT 4 /* No connection, listener */ #define BANDLIM_MAX 4 -extern struct icmpstat icmpstat; +extern struct icmpstat icmpstat; #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_ICMP_VAR_H_ */ diff --git a/bsd/netinet/if_ether.h b/bsd/netinet/if_ether.h index 9ed719225..71043fd1e 100644 --- a/bsd/netinet/if_ether.h +++ b/bsd/netinet/if_ether.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -69,7 +69,7 @@ #include #include -#define ea_byte ether_addr_octet +#define ea_byte ether_addr_octet /* * Macro to map an IP multicast address to an Ethernet multicast address. @@ -92,16 +92,16 @@ * The high-order 16 bits of the Ethernet address are statically assigned, * and the low-order 32 bits are taken from the low end of the IP6 address. */ -#define ETHER_MAP_IPV6_MULTICAST(ip6addr, enaddr) \ -/* struct in6_addr *ip6addr; */ \ -/* u_char enaddr[ETHER_ADDR_LEN]; */ \ +#define ETHER_MAP_IPV6_MULTICAST(ip6addr, enaddr) \ +/* struct in6_addr *ip6addr; */ \ +/* u_char enaddr[ETHER_ADDR_LEN]; */ \ { \ - (enaddr)[0] = 0x33; \ - (enaddr)[1] = 0x33; \ - (enaddr)[2] = ((const u_char *)ip6addr)[12]; \ - (enaddr)[3] = ((const u_char *)ip6addr)[13]; \ - (enaddr)[4] = ((const u_char *)ip6addr)[14]; \ - (enaddr)[5] = ((const u_char *)ip6addr)[15]; \ + (enaddr)[0] = 0x33; \ + (enaddr)[1] = 0x33; \ + (enaddr)[2] = ((const u_char *)ip6addr)[12]; \ + (enaddr)[3] = ((const u_char *)ip6addr)[13]; \ + (enaddr)[4] = ((const u_char *)ip6addr)[14]; \ + (enaddr)[5] = ((const u_char *)ip6addr)[15]; \ } /* @@ -111,44 +111,44 @@ * to resolving internet addresses. Field names used correspond to * RFC 826. */ -struct ether_arp { - struct arphdr ea_hdr; /* fixed-size header */ - u_char arp_sha[ETHER_ADDR_LEN]; /* sender hardware address */ - u_char arp_spa[4]; /* sender protocol address */ - u_char arp_tha[ETHER_ADDR_LEN]; /* target hardware address */ - u_char arp_tpa[4]; /* target protocol address */ +struct ether_arp { + struct arphdr ea_hdr; /* fixed-size header */ + u_char arp_sha[ETHER_ADDR_LEN]; /* sender hardware address */ + u_char arp_spa[4]; /* sender protocol address */ + u_char arp_tha[ETHER_ADDR_LEN]; /* target hardware address */ + u_char arp_tpa[4]; /* target protocol address */ }; -#define arp_hrd ea_hdr.ar_hrd -#define arp_pro ea_hdr.ar_pro -#define arp_hln ea_hdr.ar_hln -#define arp_pln ea_hdr.ar_pln -#define arp_op ea_hdr.ar_op +#define arp_hrd ea_hdr.ar_hrd +#define arp_pro ea_hdr.ar_pro +#define arp_hln ea_hdr.ar_hln +#define arp_pln ea_hdr.ar_pln +#define arp_op ea_hdr.ar_op struct sockaddr_inarp { - u_char sin_len; - u_char sin_family; + u_char sin_len; + u_char sin_family; u_short sin_port; - struct in_addr sin_addr; - struct in_addr sin_srcaddr; - u_short sin_tos; - u_short sin_other; -#define SIN_PROXY 0x1 -#define SIN_ROUTER 0x2 + struct in_addr sin_addr; + struct in_addr sin_srcaddr; + u_short sin_tos; + u_short sin_other; +#define SIN_PROXY 0x1 +#define SIN_ROUTER 0x2 }; /* * IP and ethernet specific routing flags */ -#define RTF_USETRAILERS RTF_PROTO1 /* use trailers */ -#define RTF_ANNOUNCE RTF_PROTO2 /* announce new arp entry */ +#define RTF_USETRAILERS RTF_PROTO1 /* use trailers */ +#define RTF_ANNOUNCE RTF_PROTO2 /* announce new arp entry */ #ifdef BSD_KERNEL_PRIVATE -extern u_char ether_ipmulticast_min[ETHER_ADDR_LEN]; -extern u_char ether_ipmulticast_max[ETHER_ADDR_LEN]; -extern struct ifqueue arpintrq; +extern u_char ether_ipmulticast_min[ETHER_ADDR_LEN]; +extern u_char ether_ipmulticast_max[ETHER_ADDR_LEN]; +extern struct ifqueue arpintrq; -int arpresolve(struct ifnet *, struct rtentry *, struct mbuf *, - struct sockaddr *, u_char *, struct rtentry *); -void arp_ifinit(struct ifnet *, struct ifaddr *); +int arpresolve(struct ifnet *, struct rtentry *, struct mbuf *, + struct sockaddr *, u_char *, struct rtentry *); +void arp_ifinit(struct ifnet *, struct ifaddr *); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_IF_ETHER_H_ */ diff --git a/bsd/netinet/if_tun.h b/bsd/netinet/if_tun.h index a5a54e238..71623501e 100644 --- a/bsd/netinet/if_tun.h +++ b/bsd/netinet/if_tun.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -47,34 +47,34 @@ #ifdef KERNEL_PRIVATE struct tun_softc { - u_short tun_flags; /* misc flags */ -#define TUN_OPEN 0x0001 -#define TUN_INITED 0x0002 -#define TUN_RCOLL 0x0004 -#define TUN_IASET 0x0008 -#define TUN_DSTADDR 0x0010 -#define TUN_RWAIT 0x0040 -#define TUN_ASYNC 0x0080 -#define TUN_NBIO 0x0100 + u_short tun_flags; /* misc flags */ +#define TUN_OPEN 0x0001 +#define TUN_INITED 0x0002 +#define TUN_RCOLL 0x0004 +#define TUN_IASET 0x0008 +#define TUN_DSTADDR 0x0010 +#define TUN_RWAIT 0x0040 +#define TUN_ASYNC 0x0080 +#define TUN_NBIO 0x0100 -#define TUN_READY (TUN_OPEN | TUN_INITED | TUN_IASET) +#define TUN_READY (TUN_OPEN | TUN_INITED | TUN_IASET) - struct ifnet tun_if; /* the interface */ - int tun_pgrp; /* the process group - if any */ - struct selinfo tun_rsel; /* read select */ - struct selinfo tun_wsel; /* write select (not used) */ + struct ifnet tun_if; /* the interface */ + int tun_pgrp; /* the process group - if any */ + struct selinfo tun_rsel; /* read select */ + struct selinfo tun_wsel; /* write select (not used) */ #if NBPFILTER > 0 - caddr_t tun_bpf; + caddr_t tun_bpf; #endif }; #endif /* KERNEL_PRIVATE */ /* ioctl's for get/set debug */ -#define TUNSDEBUG _IOW('t', 90, int) -#define TUNGDEBUG _IOR('t', 89, int) +#define TUNSDEBUG _IOW('t', 90, int) +#define TUNGDEBUG _IOR('t', 89, int) /* Maximum packet size */ -#define TUNMTU 1500 +#define TUNMTU 1500 #endif /* !_NET_IF_TUN_H_ */ diff --git a/bsd/netinet/igmp.c b/bsd/netinet/igmp.c index b96b869fa..6f4372935 100644 --- a/bsd/netinet/igmp.c +++ b/bsd/netinet/igmp.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -113,87 +113,87 @@ SLIST_HEAD(igmp_inm_relhead, in_multi); -static void igi_initvar(struct igmp_ifinfo *, struct ifnet *, int); +static void igi_initvar(struct igmp_ifinfo *, struct ifnet *, int); static struct igmp_ifinfo *igi_alloc(int); -static void igi_free(struct igmp_ifinfo *); -static void igi_delete(const struct ifnet *, struct igmp_inm_relhead *); -static void igmp_dispatch_queue(struct igmp_ifinfo *, struct ifqueue *, +static void igi_free(struct igmp_ifinfo *); +static void igi_delete(const struct ifnet *, struct igmp_inm_relhead *); +static void igmp_dispatch_queue(struct igmp_ifinfo *, struct ifqueue *, int, const int); -static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *, - struct igmp_tparams *); -static int igmp_handle_state_change(struct in_multi *, - struct igmp_ifinfo *, struct igmp_tparams *); -static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *, - struct igmp_tparams *); -static int igmp_input_v1_query(struct ifnet *, const struct ip *, - const struct igmp *); -static int igmp_input_v2_query(struct ifnet *, const struct ip *, - const struct igmp *); -static int igmp_input_v3_query(struct ifnet *, const struct ip *, - /*const*/ struct igmpv3 *); -static int igmp_input_v3_group_query(struct in_multi *, - int, /*const*/ struct igmpv3 *); -static int igmp_input_v1_report(struct ifnet *, struct mbuf *, - /*const*/ struct ip *, /*const*/ struct igmp *); -static int igmp_input_v2_report(struct ifnet *, struct mbuf *, - /*const*/ struct ip *, /*const*/ struct igmp *); -static void igmp_sendpkt(struct mbuf *); -static __inline__ int igmp_isgroupreported(const struct in_addr); +static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *, + struct igmp_tparams *); +static int igmp_handle_state_change(struct in_multi *, + struct igmp_ifinfo *, struct igmp_tparams *); +static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *, + struct igmp_tparams *); +static int igmp_input_v1_query(struct ifnet *, const struct ip *, + const struct igmp *); +static int igmp_input_v2_query(struct ifnet *, const struct ip *, + const struct igmp *); +static int igmp_input_v3_query(struct ifnet *, const struct ip *, + /*const*/ struct igmpv3 *); +static int igmp_input_v3_group_query(struct in_multi *, + int, /*const*/ struct igmpv3 *); +static int igmp_input_v1_report(struct ifnet *, struct mbuf *, + /*const*/ struct ip *, /*const*/ struct igmp *); +static int igmp_input_v2_report(struct ifnet *, struct mbuf *, + /*const*/ struct ip *, /*const*/ struct igmp *); +static void igmp_sendpkt(struct mbuf *); +static __inline__ int igmp_isgroupreported(const struct in_addr); static struct mbuf *igmp_ra_alloc(void); #ifdef IGMP_DEBUG static const char *igmp_rec_type_to_str(const int); #endif -static uint32_t igmp_set_version(struct igmp_ifinfo *, const int); -static void igmp_flush_relq(struct igmp_ifinfo *, +static uint32_t igmp_set_version(struct igmp_ifinfo *, const int); +static void igmp_flush_relq(struct igmp_ifinfo *, struct igmp_inm_relhead *); -static int igmp_v1v2_queue_report(struct in_multi *, const int); -static void igmp_v1v2_process_group_timer(struct in_multi *, const int); -static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *); -static uint32_t igmp_v2_update_group(struct in_multi *, const int); -static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *); -static uint32_t igmp_v3_dispatch_general_query(struct igmp_ifinfo *); +static int igmp_v1v2_queue_report(struct in_multi *, const int); +static void igmp_v1v2_process_group_timer(struct in_multi *, const int); +static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *); +static uint32_t igmp_v2_update_group(struct in_multi *, const int); +static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *); +static uint32_t igmp_v3_dispatch_general_query(struct igmp_ifinfo *); static struct mbuf * - igmp_v3_encap_report(struct ifnet *, struct mbuf *); -static int igmp_v3_enqueue_group_record(struct ifqueue *, - struct in_multi *, const int, const int, const int); -static int igmp_v3_enqueue_filter_change(struct ifqueue *, - struct in_multi *); -static void igmp_v3_process_group_timers(struct igmp_ifinfo *, - struct ifqueue *, struct ifqueue *, struct in_multi *, - const int); -static int igmp_v3_merge_state_changes(struct in_multi *, - struct ifqueue *); -static void igmp_v3_suppress_group_record(struct in_multi *); -static int sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS; -static int sysctl_igmp_gsr SYSCTL_HANDLER_ARGS; -static int sysctl_igmp_default_version SYSCTL_HANDLER_ARGS; - -static int igmp_timeout_run; /* IGMP timer is scheduled to run */ +igmp_v3_encap_report(struct ifnet *, struct mbuf *); +static int igmp_v3_enqueue_group_record(struct ifqueue *, + struct in_multi *, const int, const int, const int); +static int igmp_v3_enqueue_filter_change(struct ifqueue *, + struct in_multi *); +static void igmp_v3_process_group_timers(struct igmp_ifinfo *, + struct ifqueue *, struct ifqueue *, struct in_multi *, + const int); +static int igmp_v3_merge_state_changes(struct in_multi *, + struct ifqueue *); +static void igmp_v3_suppress_group_record(struct in_multi *); +static int sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS; +static int sysctl_igmp_gsr SYSCTL_HANDLER_ARGS; +static int sysctl_igmp_default_version SYSCTL_HANDLER_ARGS; + +static int igmp_timeout_run; /* IGMP timer is scheduled to run */ static void igmp_timeout(void *); static void igmp_sched_timeout(void); -static struct mbuf *m_raopt; /* Router Alert option */ +static struct mbuf *m_raopt; /* Router Alert option */ -static int querier_present_timers_running; /* IGMPv1/v2 older version - * querier present */ -static int interface_timers_running; /* IGMPv3 general - * query response */ -static int state_change_timers_running; /* IGMPv3 state-change - * retransmit */ -static int current_state_timers_running; /* IGMPv1/v2 host - * report; IGMPv3 g/sg - * query response */ +static int querier_present_timers_running; /* IGMPv1/v2 older version + * querier present */ +static int interface_timers_running; /* IGMPv3 general + * query response */ +static int state_change_timers_running; /* IGMPv3 state-change + * retransmit */ +static int current_state_timers_running; /* IGMPv1/v2 host + * report; IGMPv3 g/sg + * query response */ /* * Subsystem lock macros. */ -#define IGMP_LOCK() \ +#define IGMP_LOCK() \ lck_mtx_lock(&igmp_mtx) -#define IGMP_LOCK_ASSERT_HELD() \ +#define IGMP_LOCK_ASSERT_HELD() \ LCK_MTX_ASSERT(&igmp_mtx, LCK_MTX_ASSERT_OWNED) -#define IGMP_LOCK_ASSERT_NOTHELD() \ +#define IGMP_LOCK_ASSERT_NOTHELD() \ LCK_MTX_ASSERT(&igmp_mtx, LCK_MTX_ASSERT_NOTOWNED) -#define IGMP_UNLOCK() \ +#define IGMP_UNLOCK() \ lck_mtx_unlock(&igmp_mtx) static LIST_HEAD(, igmp_ifinfo) igi_head; @@ -235,8 +235,8 @@ SYSCTL_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW | CTLFLAG_LOCKED, &igmp_legacysupp, 0, "Allow v1/v2 reports to suppress v3 group responses"); SYSCTL_PROC(_net_inet_igmp, OID_AUTO, default_version, - CTLTYPE_INT | CTLFLAG_RW, - &igmp_default_version, 0, sysctl_igmp_default_version, "I", + CTLTYPE_INT | CTLFLAG_RW, + &igmp_default_version, 0, sysctl_igmp_default_version, "I", "Default version of IGMP to run on each interface"); SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay, CTLTYPE_INT | CTLFLAG_RW, @@ -245,16 +245,16 @@ SYSCTL_PROC(_net_inet_igmp, OID_AUTO, gsrdelay, #ifdef IGMP_DEBUG int igmp_debug = 0; SYSCTL_INT(_net_inet_igmp, OID_AUTO, - debug, CTLFLAG_RW | CTLFLAG_LOCKED, &igmp_debug, 0, ""); + debug, CTLFLAG_RW | CTLFLAG_LOCKED, &igmp_debug, 0, ""); #endif SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED, sysctl_igmp_ifinfo, "Per-interface IGMPv3 state"); /* Lock group and attribute for igmp_mtx */ -static lck_attr_t *igmp_mtx_attr; -static lck_grp_t *igmp_mtx_grp; -static lck_grp_attr_t *igmp_mtx_grp_attr; +static lck_attr_t *igmp_mtx_attr; +static lck_grp_t *igmp_mtx_grp; +static lck_grp_attr_t *igmp_mtx_grp_attr; /* * Locking and reference counting: @@ -289,38 +289,38 @@ static lck_grp_attr_t *igmp_mtx_grp_attr; static decl_lck_mtx_data(, igmp_mtx); static int igmp_timers_are_running; -#define IGMP_ADD_DETACHED_INM(_head, _inm) { \ - SLIST_INSERT_HEAD(_head, _inm, inm_dtle); \ +#define IGMP_ADD_DETACHED_INM(_head, _inm) { \ + SLIST_INSERT_HEAD(_head, _inm, inm_dtle); \ } -#define IGMP_REMOVE_DETACHED_INM(_head) { \ - struct in_multi *_inm, *_inm_tmp; \ - SLIST_FOREACH_SAFE(_inm, _head, inm_dtle, _inm_tmp) { \ - SLIST_REMOVE(_head, _inm, in_multi, inm_dtle); \ - INM_REMREF(_inm); \ - } \ - VERIFY(SLIST_EMPTY(_head)); \ +#define IGMP_REMOVE_DETACHED_INM(_head) { \ + struct in_multi *_inm, *_inm_tmp; \ + SLIST_FOREACH_SAFE(_inm, _head, inm_dtle, _inm_tmp) { \ + SLIST_REMOVE(_head, _inm, in_multi, inm_dtle); \ + INM_REMREF(_inm); \ + } \ + VERIFY(SLIST_EMPTY(_head)); \ } -#define IGI_ZONE_MAX 64 /* maximum elements in zone */ -#define IGI_ZONE_NAME "igmp_ifinfo" /* zone name */ +#define IGI_ZONE_MAX 64 /* maximum elements in zone */ +#define IGI_ZONE_NAME "igmp_ifinfo" /* zone name */ -static unsigned int igi_size; /* size of zone element */ -static struct zone *igi_zone; /* zone for igmp_ifinfo */ +static unsigned int igi_size; /* size of zone element */ +static struct zone *igi_zone; /* zone for igmp_ifinfo */ /* Store IGMPv3 record count in the module private scratch space */ -#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] +#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] static __inline void igmp_save_context(struct mbuf *m, struct ifnet *ifp) { - m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.rcvif = ifp; } static __inline void igmp_scrub_context(struct mbuf *m) { - m->m_pkthdr.rcvif = NULL; + m->m_pkthdr.rcvif = NULL; } #ifdef IGMP_DEBUG @@ -330,7 +330,7 @@ inet_ntop_haddr(in_addr_t haddr, char *buf, socklen_t size) struct in_addr ia; ia.s_addr = htonl(haddr); - return (inet_ntop(AF_INET, &ia, buf, size)); + return inet_ntop(AF_INET, &ia, buf, size); } #endif @@ -341,7 +341,7 @@ inet_ntop_haddr(in_addr_t haddr, char *buf, socklen_t size) static __inline struct ifnet * igmp_restore_context(struct mbuf *m) { - return (m->m_pkthdr.rcvif); + return m->m_pkthdr.rcvif; } /* @@ -351,20 +351,22 @@ static int sysctl_igmp_default_version SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg2) - int error; - int new; + int error; + int new; IGMP_LOCK(); error = SYSCTL_OUT(req, arg1, sizeof(int)); - if (error || !req->newptr) + if (error || !req->newptr) { goto out_locked; + } new = igmp_default_version; error = SYSCTL_IN(req, &new, sizeof(int)); - if (error) + if (error) { goto out_locked; + } if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) { error = EINVAL; @@ -378,7 +380,7 @@ sysctl_igmp_default_version SYSCTL_HANDLER_ARGS out_locked: IGMP_UNLOCK(); - return (error); + return error; } /* @@ -397,8 +399,9 @@ sysctl_igmp_gsr SYSCTL_HANDLER_ARGS i = igmp_gsrdelay.tv_sec; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { goto out_locked; + } if (i < -1 || i >= 60) { error = EINVAL; @@ -409,7 +412,7 @@ sysctl_igmp_gsr SYSCTL_HANDLER_ARGS out_locked: IGMP_UNLOCK(); - return (error); + return error; } /* @@ -421,21 +424,23 @@ static int sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - int *name; - int error; - u_int namelen; - struct ifnet *ifp; - struct igmp_ifinfo *igi; - struct igmp_ifinfo_u igi_u; + int *name; + int error; + u_int namelen; + struct ifnet *ifp; + struct igmp_ifinfo *igi; + struct igmp_ifinfo_u igi_u; name = (int *)arg1; namelen = arg2; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - if (namelen != 1) - return (EINVAL); + if (namelen != 1) { + return EINVAL; + } IGMP_LOCK(); @@ -449,10 +454,11 @@ sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); ifp = ifindex2ifnet[name[0]]; ifnet_head_done(); - if (ifp == NULL) + if (ifp == NULL) { goto out_locked; + } - bzero(&igi_u, sizeof (igi_u)); + bzero(&igi_u, sizeof(igi_u)); LIST_FOREACH(igi, &igi_head, igi_link) { IGI_LOCK(igi); @@ -472,13 +478,13 @@ sysctl_igmp_ifinfo SYSCTL_HANDLER_ARGS igi_u.igi_uri = igi->igi_uri; IGI_UNLOCK(igi); - error = SYSCTL_OUT(req, &igi_u, sizeof (igi_u)); + error = SYSCTL_OUT(req, &igi_u, sizeof(igi_u)); break; } out_locked: IGMP_UNLOCK(); - return (error); + return error; } /* @@ -493,30 +499,37 @@ igmp_dispatch_queue(struct igmp_ifinfo *igi, struct ifqueue *ifq, int limit, struct mbuf *m; struct ip *ip; - if (igi != NULL) + if (igi != NULL) { IGI_LOCK_ASSERT_HELD(igi); + } for (;;) { IF_DEQUEUE(ifq, m); - if (m == NULL) + if (m == NULL) { break; + } IGMP_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifq), (uint64_t)VM_KERNEL_ADDRPERM(m))); ip = mtod(m, struct ip *); - if (loop) + if (loop) { m->m_flags |= M_IGMP_LOOP; - if (igi != NULL) + } + if (igi != NULL) { IGI_UNLOCK(igi); + } igmp_sendpkt(m); - if (igi != NULL) + if (igi != NULL) { IGI_LOCK(igi); - if (--limit == 0) + } + if (--limit == 0) { break; + } } - if (igi != NULL) + if (igi != NULL) { IGI_LOCK_ASSERT_HELD(igi); + } } /* @@ -533,14 +546,15 @@ igmp_dispatch_queue(struct igmp_ifinfo *igi, struct ifqueue *ifq, int limit, */ static __inline__ -int igmp_isgroupreported(const struct in_addr addr) +int +igmp_isgroupreported(const struct in_addr addr) { - if (in_allhosts(addr) || - ((!igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) - return (0); + ((!igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) { + return 0; + } - return (1); + return 1; } /* @@ -549,19 +563,19 @@ int igmp_isgroupreported(const struct in_addr addr) static struct mbuf * igmp_ra_alloc(void) { - struct mbuf *m; - struct ipoption *p; + struct mbuf *m; + struct ipoption *p; MGET(m, M_WAITOK, MT_DATA); p = mtod(m, struct ipoption *); p->ipopt_dst.s_addr = INADDR_ANY; - p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */ - p->ipopt_list[1] = 0x04; /* 4 bytes long */ - p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */ - p->ipopt_list[3] = 0x00; /* pad byte */ + p->ipopt_list[0] = (char)IPOPT_RA; /* Router Alert Option */ + p->ipopt_list[1] = 0x04; /* 4 bytes long */ + p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */ + p->ipopt_list[3] = 0x00; /* pad byte */ m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1]; - return (m); + return m; } /* @@ -576,8 +590,9 @@ igmp_domifattach(struct ifnet *ifp, int how) __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name)); igi = igi_alloc(how); - if (igi == NULL) - return (NULL); + if (igi == NULL) { + return NULL; + } IGMP_LOCK(); @@ -596,9 +611,9 @@ igmp_domifattach(struct ifnet *ifp, int how) IGMP_UNLOCK(); IGMP_PRINTF(("%s: allocate igmp_ifinfo for ifp 0x%llx(%s)\n", __func__, - (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name)); + (uint64_t)VM_KERNEL_ADDRPERM(ifp), ifp->if_name)); - return (igi); + return igi; } /* @@ -696,10 +711,11 @@ igmp_initsilent(struct ifnet *ifp, struct igmp_ifinfo *igi) IGI_LOCK_ASSERT_NOTHELD(igi); IGI_LOCK(igi); - if (!(ifp->if_flags & IFF_MULTICAST)) + if (!(ifp->if_flags & IFF_MULTICAST)) { igi->igi_flags |= IGIF_SILENT; - else + } else { igi->igi_flags &= ~IGIF_SILENT; + } IGI_UNLOCK(igi); } @@ -716,8 +732,9 @@ igi_initvar(struct igmp_ifinfo *igi, struct ifnet *ifp, int reattach) igi->igi_qri = IGMP_QRI_INIT; igi->igi_uri = IGMP_URI_INIT; - if (!reattach) + if (!reattach) { SLIST_INIT(&igi->igi_relinmhead); + } /* * Responses to general queries are subject to bounds. @@ -737,7 +754,7 @@ igi_alloc(int how) lck_mtx_init(&igi->igi_lock, igmp_mtx_grp, igmp_mtx_attr); igi->igi_debug |= IFD_ALLOC; } - return (igi); + return igi; } static void @@ -767,17 +784,19 @@ igi_free(struct igmp_ifinfo *igi) void igi_addref(struct igmp_ifinfo *igi, int locked) { - if (!locked) + if (!locked) { IGI_LOCK_SPIN(igi); - else + } else { IGI_LOCK_ASSERT_HELD(igi); + } if (++igi->igi_refcnt == 0) { panic("%s: igi=%p wraparound refcnt", __func__, igi); /* NOTREACHED */ } - if (!locked) + if (!locked) { IGI_UNLOCK(igi); + } } void @@ -825,10 +844,10 @@ static int igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip, const struct igmp *igmp) { - struct igmp_ifinfo *igi; - struct in_multi *inm; - struct in_multistep step; - struct igmp_tparams itp = { 0, 0, 0, 0 }; + struct igmp_ifinfo *igi; + struct in_multi *inm; + struct in_multistep step; + struct igmp_tparams itp = { 0, 0, 0, 0 }; IGMP_LOCK_ASSERT_NOTHELD(); @@ -874,8 +893,9 @@ igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip, IN_FIRST_MULTI(step, inm); while (inm != NULL) { INM_LOCK(inm); - if (inm->inm_ifp != ifp || inm->inm_timer != 0) + if (inm->inm_ifp != ifp || inm->inm_timer != 0) { goto next; + } switch (inm->inm_state) { case IGMP_NOT_MEMBER: @@ -903,7 +923,7 @@ next: done: igmp_set_timeout(&itp); - return (0); + return 0; } /* @@ -913,11 +933,11 @@ static int igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, const struct igmp *igmp) { - struct igmp_ifinfo *igi; - struct in_multi *inm; - int is_general_query; - uint16_t timer; - struct igmp_tparams itp = { 0, 0, 0, 0 }; + struct igmp_ifinfo *igi; + struct in_multi *inm; + int is_general_query; + uint16_t timer; + struct igmp_tparams itp = { 0, 0, 0, 0 }; IGMP_LOCK_ASSERT_NOTHELD(); @@ -931,8 +951,9 @@ igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, * IGMPv2 General Query. * If this was not sent to the all-hosts group, ignore it. */ - if (!in_allhosts(ip->ip_dst)) + if (!in_allhosts(ip->ip_dst)) { goto done; + } IGMPSTAT_INC(igps_rcv_gen_queries); is_general_query = 1; } else { @@ -962,8 +983,9 @@ igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, IGI_UNLOCK(igi); timer = igmp->igmp_code / IGMP_TIMER_SCALE; - if (timer == 0) + if (timer == 0) { timer = 1; + } if (is_general_query) { struct in_multistep step; @@ -978,8 +1000,9 @@ igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, IN_FIRST_MULTI(step, inm); while (inm != NULL) { INM_LOCK(inm); - if (inm->inm_ifp == ifp) + if (inm->inm_ifp == ifp) { itp.cst += igmp_v2_update_group(inm, timer); + } INM_UNLOCK(inm); IN_NEXT_MULTI(step, inm); } @@ -1006,7 +1029,7 @@ igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, done: igmp_set_timeout(&itp); - return (0); + return 0; } /* @@ -1019,7 +1042,7 @@ done: * We may be updating the group for the first time since we switched * to IGMPv3. If we are, then we must clear any recorded source lists, * and transition to REPORTING state; the group timer is overloaded - * for group and group-source query responses. + * for group and group-source query responses. * * Unlike IGMPv3, the delay per group should be jittered * to avoid bursts of IGMPv2 reports. @@ -1027,7 +1050,6 @@ done: static uint32_t igmp_v2_update_group(struct in_multi *inm, const int timer) { - IGMP_INET_PRINTF(inm->inm_addr, ("%s: %s/%s timer=%d\n", __func__, _igmp_inet_buf, if_name(inm->inm_ifp), timer)); @@ -1045,7 +1067,7 @@ igmp_v2_update_group(struct in_multi *inm, const int timer) "skipping.\n", __func__)); break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case IGMP_SG_QUERY_PENDING_MEMBER: case IGMP_G_QUERY_PENDING_MEMBER: case IGMP_IDLE_MEMBER: @@ -1063,7 +1085,7 @@ igmp_v2_update_group(struct in_multi *inm, const int timer) break; } - return (inm->inm_timer); + return inm->inm_timer; } /* @@ -1076,13 +1098,13 @@ static int igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, /*const*/ struct igmpv3 *igmpv3) { - struct igmp_ifinfo *igi; - struct in_multi *inm; - int is_general_query; - uint32_t maxresp, nsrc, qqi; - uint16_t timer; - uint8_t qrv; - struct igmp_tparams itp = { 0, 0, 0, 0 }; + struct igmp_ifinfo *igi; + struct in_multi *inm; + int is_general_query; + uint32_t maxresp, nsrc, qqi; + uint16_t timer; + uint8_t qrv; + struct igmp_tparams itp = { 0, 0, 0, 0 }; IGMP_LOCK_ASSERT_NOTHELD(); @@ -1091,10 +1113,10 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, IGMP_PRINTF(("%s: process v3 query on ifp 0x%llx(%s)\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */ + maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */ if (maxresp >= 128) { maxresp = IGMP_MANT(igmpv3->igmp_code) << - (IGMP_EXP(igmpv3->igmp_code) + 3); + (IGMP_EXP(igmpv3->igmp_code) + 3); } /* @@ -1113,12 +1135,13 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, qqi = igmpv3->igmp_qqi; if (qqi >= 128) { qqi = IGMP_MANT(igmpv3->igmp_qqi) << - (IGMP_EXP(igmpv3->igmp_qqi) + 3); + (IGMP_EXP(igmpv3->igmp_qqi) + 3); } timer = maxresp / IGMP_TIMER_SCALE; - if (timer == 0) + if (timer == 0) { timer = 1; + } nsrc = ntohs(igmpv3->igmp_numsrc); @@ -1143,10 +1166,11 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, is_general_query = 1; } else { /* Group or group-source specific query. */ - if (nsrc == 0) + if (nsrc == 0) { IGMPSTAT_INC(igps_rcv_group_queries); - else + } else { IGMPSTAT_INC(igps_rcv_gsr_queries); + } } igi = IGMP_IFINFO(ifp); @@ -1209,8 +1233,9 @@ igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, in_multihead_lock_shared(); IN_LOOKUP_MULTI(&igmpv3->igmp_group, ifp, inm); in_multihead_lock_done(); - if (inm == NULL) + if (inm == NULL) { goto done; + } INM_LOCK(inm); if (nsrc > 0) { @@ -1252,7 +1277,7 @@ done: } igmp_set_timeout(&itp); - return (0); + return 0; } /* @@ -1264,8 +1289,8 @@ static int igmp_input_v3_group_query(struct in_multi *inm, int timer, /*const*/ struct igmpv3 *igmpv3) { - int retval; - uint16_t nsrc; + int retval; + uint16_t nsrc; INM_LOCK_ASSERT_HELD(inm); @@ -1279,7 +1304,7 @@ igmp_input_v3_group_query(struct in_multi *inm, case IGMP_AWAKENING_MEMBER: case IGMP_IDLE_MEMBER: case IGMP_LEAVING_MEMBER: - return (retval); + return retval; case IGMP_REPORTING_MEMBER: case IGMP_G_QUERY_PENDING_MEMBER: case IGMP_SG_QUERY_PENDING_MEMBER: @@ -1302,7 +1327,7 @@ igmp_input_v3_group_query(struct in_multi *inm, } inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER; inm->inm_timer = IGMP_RANDOM_DELAY(timer); - return (retval); + return retval; } /* @@ -1312,7 +1337,7 @@ igmp_input_v3_group_query(struct in_multi *inm, if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) { timer = min(inm->inm_timer, timer); inm->inm_timer = IGMP_RANDOM_DELAY(timer); - return (retval); + return retval; } /* @@ -1330,15 +1355,16 @@ igmp_input_v3_group_query(struct in_multi *inm, * m_getptr() to walk the chain. */ if (inm->inm_nsrc > 0) { - const struct in_addr *ap; - int i, nrecorded; + const struct in_addr *ap; + int i, nrecorded; ap = (const struct in_addr *)(igmpv3 + 1); nrecorded = 0; for (i = 0; i < nsrc; i++, ap++) { retval = inm_record_source(inm, ap->s_addr); - if (retval < 0) + if (retval < 0) { break; + } nrecorded += retval; } if (nrecorded > 0) { @@ -1349,7 +1375,7 @@ igmp_input_v3_group_query(struct in_multi *inm, } } - return (retval); + return retval; } /* @@ -1368,14 +1394,15 @@ igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, OIGMPSTAT_INC(igps_rcv_reports); if ((ifp->if_flags & IFF_LOOPBACK) || - (m->m_pkthdr.pkt_flags & PKTF_LOOP)) - return (0); + (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { + return 0; + } if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr) || !in_hosteq(igmp->igmp_group, ip->ip_dst))) { IGMPSTAT_INC(igps_rcv_badreports); OIGMPSTAT_INC(igps_rcv_badreports); - return (EINVAL); + return EINVAL; } /* @@ -1425,12 +1452,13 @@ igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, */ IGI_LOCK(igi); if (igi->igi_version == IGMP_VERSION_3) { - if (igmp_legacysupp) + if (igmp_legacysupp) { igmp_v3_suppress_group_record(inm); + } IGI_UNLOCK(igi); INM_UNLOCK(inm); INM_REMREF(inm); /* from IN_LOOKUP_MULTI */ - return (0); + return 0; } INM_LOCK_ASSERT_HELD(inm); @@ -1455,10 +1483,11 @@ igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, ("report suppressed for %s on ifp 0x%llx(%s)\n", _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - if (igi->igi_version == IGMP_VERSION_1) + if (igi->igi_version == IGMP_VERSION_1) { inm->inm_state = IGMP_LAZY_MEMBER; - else if (igi->igi_version == IGMP_VERSION_2) + } else if (igi->igi_version == IGMP_VERSION_2) { inm->inm_state = IGMP_SLEEPING_MEMBER; + } break; case IGMP_G_QUERY_PENDING_MEMBER: case IGMP_SG_QUERY_PENDING_MEMBER: @@ -1470,7 +1499,7 @@ igmp_input_v1_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, INM_REMREF(inm); /* from IN_LOOKUP_MULTI */ } - return (0); + return 0; } /* @@ -1496,7 +1525,7 @@ igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, if (in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) { IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); - return (0); + return 0; } IFA_UNLOCK(&ia->ia_ifa); } @@ -1506,18 +1535,20 @@ igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, if ((ifp->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - return (0); + } + return 0; } if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || !in_hosteq(igmp->igmp_group, ip->ip_dst)) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } IGMPSTAT_INC(igps_rcv_badreports); OIGMPSTAT_INC(igps_rcv_badreports); - return (EINVAL); + return EINVAL; } /* @@ -1534,8 +1565,9 @@ igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, IFA_UNLOCK(&ia->ia_ifa); } } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } IGMP_INET_PRINTF(igmp->igmp_group, ("process v2 report %s on ifp 0x%llx(%s)\n", _igmp_inet_buf, @@ -1567,12 +1599,13 @@ igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, */ IGI_LOCK(igi); if (igi->igi_version == IGMP_VERSION_3) { - if (igmp_legacysupp) + if (igmp_legacysupp) { igmp_v3_suppress_group_record(inm); + } IGI_UNLOCK(igi); INM_UNLOCK(inm); INM_REMREF(inm); - return (0); + return 0; } inm->inm_timer = 0; @@ -1602,7 +1635,7 @@ igmp_input_v2_report(struct ifnet *ifp, struct mbuf *m, /*const*/ struct ip *ip, INM_REMREF(inm); } - return (0); + return 0; } void @@ -1647,10 +1680,11 @@ igmp_input(struct mbuf *m, int off) * Always pullup to the minimum size for v1/v2 or v3 * to amortize calls to m_pulldown(). */ - if (igmplen >= IGMP_V3_QUERY_MINLEN) + if (igmplen >= IGMP_V3_QUERY_MINLEN) { minlen = IGMP_V3_QUERY_MINLEN; - else + } else { minlen = IGMP_MINLEN; + } /* A bit more expensive than M_STRUCT_GET, but ensures alignment */ M_STRUCT_GET0(igmp, struct igmp *, m, off, minlen); @@ -1689,10 +1723,11 @@ igmp_input(struct mbuf *m, int off) switch (igmp->igmp_type) { case IGMP_HOST_MEMBERSHIP_QUERY: if (igmplen == IGMP_MINLEN) { - if (igmp->igmp_code == 0) + if (igmp->igmp_code == 0) { queryver = IGMP_VERSION_1; - else + } else { queryver = IGMP_VERSION_2; + } } else if (igmplen >= IGMP_V3_QUERY_MINLEN) { queryver = IGMP_VERSION_3; } else { @@ -1707,8 +1742,9 @@ igmp_input(struct mbuf *m, int off) switch (queryver) { case IGMP_VERSION_1: IGMPSTAT_INC(igps_rcv_v1v2_queries); - if (!igmp_v1enable) + if (!igmp_v1enable) { break; + } if (igmp_input_v1_query(ifp, ip, igmp) != 0) { m_freem(m); return; @@ -1717,8 +1753,9 @@ igmp_input(struct mbuf *m, int off) case IGMP_VERSION_2: IGMPSTAT_INC(igps_rcv_v1v2_queries); - if (!igmp_v2enable) + if (!igmp_v2enable) { break; + } if (igmp_input_v2_query(ifp, ip, igmp) != 0) { m_freem(m); return; @@ -1726,63 +1763,64 @@ igmp_input(struct mbuf *m, int off) break; case IGMP_VERSION_3: { - struct igmpv3 *igmpv3; - uint16_t igmpv3len; - uint16_t srclen; - int nsrc; + struct igmpv3 *igmpv3; + uint16_t igmpv3len; + uint16_t srclen; + int nsrc; - IGMPSTAT_INC(igps_rcv_v3_queries); - igmpv3 = (struct igmpv3 *)igmp; - /* - * Validate length based on source count. - */ - nsrc = ntohs(igmpv3->igmp_numsrc); - /* - * The max vaue of nsrc is limited by the - * MTU of the network on which the datagram - * is received - */ - if (nsrc < 0 || nsrc > IGMP_V3_QUERY_MAX_SRCS) { - IGMPSTAT_INC(igps_rcv_tooshort); - OIGMPSTAT_INC(igps_rcv_tooshort); - m_freem(m); - return; - } - srclen = sizeof(struct in_addr) * nsrc; - if (igmplen < (IGMP_V3_QUERY_MINLEN + srclen)) { - IGMPSTAT_INC(igps_rcv_tooshort); - OIGMPSTAT_INC(igps_rcv_tooshort); - m_freem(m); - return; - } - igmpv3len = IGMP_V3_QUERY_MINLEN + srclen; - /* - * A bit more expensive than M_STRUCT_GET, - * but ensures alignment. - */ - M_STRUCT_GET0(igmpv3, struct igmpv3 *, m, - off, igmpv3len); - if (igmpv3 == NULL) { - IGMPSTAT_INC(igps_rcv_tooshort); - OIGMPSTAT_INC(igps_rcv_tooshort); - return; - } - /* - * N.B.: we assume the packet was correctly - * aligned in ip_input. - */ - if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) { - m_freem(m); - return; - } + IGMPSTAT_INC(igps_rcv_v3_queries); + igmpv3 = (struct igmpv3 *)igmp; + /* + * Validate length based on source count. + */ + nsrc = ntohs(igmpv3->igmp_numsrc); + /* + * The max vaue of nsrc is limited by the + * MTU of the network on which the datagram + * is received + */ + if (nsrc < 0 || nsrc > IGMP_V3_QUERY_MAX_SRCS) { + IGMPSTAT_INC(igps_rcv_tooshort); + OIGMPSTAT_INC(igps_rcv_tooshort); + m_freem(m); + return; } - break; + srclen = sizeof(struct in_addr) * nsrc; + if (igmplen < (IGMP_V3_QUERY_MINLEN + srclen)) { + IGMPSTAT_INC(igps_rcv_tooshort); + OIGMPSTAT_INC(igps_rcv_tooshort); + m_freem(m); + return; + } + igmpv3len = IGMP_V3_QUERY_MINLEN + srclen; + /* + * A bit more expensive than M_STRUCT_GET, + * but ensures alignment. + */ + M_STRUCT_GET0(igmpv3, struct igmpv3 *, m, + off, igmpv3len); + if (igmpv3 == NULL) { + IGMPSTAT_INC(igps_rcv_tooshort); + OIGMPSTAT_INC(igps_rcv_tooshort); + return; + } + /* + * N.B.: we assume the packet was correctly + * aligned in ip_input. + */ + if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) { + m_freem(m); + return; + } + } + break; } break; case IGMP_v1_HOST_MEMBERSHIP_REPORT: - if (!igmp_v1enable) + if (!igmp_v1enable) { break; + } if (igmp_input_v1_report(ifp, m, ip, igmp) != 0) { m_freem(m); return; @@ -1790,10 +1828,12 @@ igmp_input(struct mbuf *m, int off) break; case IGMP_v2_HOST_MEMBERSHIP_REPORT: - if (!igmp_v2enable) + if (!igmp_v2enable) { break; - if (!ip_checkrouteralert(m)) + } + if (!ip_checkrouteralert(m)) { IGMPSTAT_INC(igps_rcv_nora); + } if (igmp_input_v2_report(ifp, m, ip, igmp) != 0) { m_freem(m); return; @@ -1805,8 +1845,9 @@ igmp_input(struct mbuf *m, int off) * Hosts do not need to process IGMPv3 membership reports, * as report suppression is no longer required. */ - if (!ip_checkrouteralert(m)) + if (!ip_checkrouteralert(m)) { IGMPSTAT_INC(igps_rcv_nora); + } break; default: @@ -1833,14 +1874,18 @@ igmp_set_timeout(struct igmp_tparams *itp) if (itp->qpt != 0 || itp->it != 0 || itp->cst != 0 || itp->sct != 0) { IGMP_LOCK(); - if (itp->qpt != 0) + if (itp->qpt != 0) { querier_present_timers_running = 1; - if (itp->it != 0) + } + if (itp->it != 0) { interface_timers_running = 1; - if (itp->cst != 0) + } + if (itp->cst != 0) { current_state_timers_running = 1; - if (itp->sct != 0) + } + if (itp->sct != 0) { state_change_timers_running = 1; + } igmp_sched_timeout(); IGMP_UNLOCK(); } @@ -1853,13 +1898,13 @@ static void igmp_timeout(void *arg) { #pragma unused(arg) - struct ifqueue scq; /* State-change packets */ - struct ifqueue qrq; /* Query response packets */ - struct ifnet *ifp; - struct igmp_ifinfo *igi; - struct in_multi *inm; - int loop = 0, uri_sec = 0; - SLIST_HEAD(, in_multi) inm_dthead; + struct ifqueue scq; /* State-change packets */ + struct ifqueue qrq; /* Query response packets */ + struct ifnet *ifp; + struct igmp_ifinfo *igi; + struct in_multi *inm; + int loop = 0, uri_sec = 0; + SLIST_HEAD(, in_multi) inm_dthead; SLIST_INIT(&inm_dthead); @@ -1884,8 +1929,9 @@ igmp_timeout(void *arg) LIST_FOREACH(igi, &igi_head, igi_link) { IGI_LOCK(igi); igmp_v1v2_process_querier_timers(igi); - if (igi->igi_v1_timer > 0 || igi->igi_v2_timer > 0) + if (igi->igi_v1_timer > 0 || igi->igi_v2_timer > 0) { querier_present_timers_running = 1; + } IGI_UNLOCK(igi); } } @@ -1905,8 +1951,9 @@ igmp_timeout(void *arg) if (igi->igi_v3_timer == 0) { /* Do nothing. */ } else if (--igi->igi_v3_timer == 0) { - if (igmp_v3_dispatch_general_query(igi) > 0) + if (igmp_v3_dispatch_general_query(igi) > 0) { interface_timers_running = 1; + } } else { interface_timers_running = 1; } @@ -1915,8 +1962,9 @@ igmp_timeout(void *arg) } if (!current_state_timers_running && - !state_change_timers_running) + !state_change_timers_running) { goto out_locked; + } current_state_timers_running = 0; state_change_timers_running = 0; @@ -1946,20 +1994,21 @@ igmp_timeout(void *arg) IN_FIRST_MULTI(step, inm); while (inm != NULL) { INM_LOCK(inm); - if (inm->inm_ifp != ifp) + if (inm->inm_ifp != ifp) { goto next; + } IGI_LOCK(igi); switch (igi->igi_version) { - case IGMP_VERSION_1: - case IGMP_VERSION_2: - igmp_v1v2_process_group_timer(inm, - igi->igi_version); - break; - case IGMP_VERSION_3: - igmp_v3_process_group_timers(igi, &qrq, - &scq, inm, uri_sec); - break; + case IGMP_VERSION_1: + case IGMP_VERSION_2: + igmp_v1v2_process_group_timer(inm, + igi->igi_version); + break; + case IGMP_VERSION_3: + igmp_v3_process_group_timers(igi, &qrq, + &scq, inm, uri_sec); + break; } IGI_UNLOCK(igi); next: @@ -2105,8 +2154,8 @@ igmp_v1v2_process_group_timer(struct in_multi *inm, const int igmp_version) inm->inm_state = IGMP_IDLE_MEMBER; (void) igmp_v1v2_queue_report(inm, (igmp_version == IGMP_VERSION_2) ? - IGMP_v2_HOST_MEMBERSHIP_REPORT : - IGMP_v1_HOST_MEMBERSHIP_REPORT); + IGMP_v2_HOST_MEMBERSHIP_REPORT : + IGMP_v1_HOST_MEMBERSHIP_REPORT); INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_HELD(inm->inm_igi); } @@ -2165,8 +2214,9 @@ igmp_v3_process_group_timers(struct igmp_ifinfo *igi, /* We are in timer callback, so be quick about it. */ if (!state_change_retransmit_timer_expired && - !query_response_timer_expired) + !query_response_timer_expired) { return; + } switch (inm->inm_state) { case IGMP_NOT_MEMBER: @@ -2195,7 +2245,7 @@ igmp_v3_process_group_timers(struct igmp_ifinfo *igi, /* XXX Clear recorded sources for next time. */ inm_clear_recorded(inm); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case IGMP_REPORTING_MEMBER: case IGMP_LEAVING_MEMBER: if (state_change_retransmit_timer_expired) { @@ -2264,18 +2314,19 @@ igmp_v3_process_group_timers(struct igmp_ifinfo *igi, static void igmp_v3_suppress_group_record(struct in_multi *inm) { - INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_HELD(inm->inm_igi); VERIFY(inm->inm_igi->igi_version == IGMP_VERSION_3); if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER || - inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) + inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) { return; + } - if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) + if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) { inm_clear_recorded(inm); + } inm->inm_timer = 0; inm->inm_state = IGMP_REPORTING_MEMBER; @@ -2326,7 +2377,7 @@ igmp_set_version(struct igmp_ifinfo *igi, const int igmp_version) IGI_LOCK_ASSERT_HELD(igi); - return (MAX(igi->igi_v1_timer, igi->igi_v2_timer)); + return MAX(igi->igi_v1_timer, igi->igi_v2_timer); } /* @@ -2341,9 +2392,9 @@ igmp_set_version(struct igmp_ifinfo *igi, const int igmp_version) static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) { - struct ifnet *ifp; - struct in_multi *inm; - struct in_multistep step; + struct ifnet *ifp; + struct in_multi *inm; + struct in_multistep step; IGI_LOCK_ASSERT_HELD(igi); @@ -2368,8 +2419,9 @@ igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) IN_FIRST_MULTI(step, inm); while (inm != NULL) { INM_LOCK(inm); - if (inm->inm_ifp != ifp) + if (inm->inm_ifp != ifp) { goto next; + } switch (inm->inm_state) { case IGMP_NOT_MEMBER: @@ -2400,11 +2452,11 @@ igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) IGI_LOCK(igi); SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele); IGI_UNLOCK(igi); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case IGMP_G_QUERY_PENDING_MEMBER: case IGMP_SG_QUERY_PENDING_MEMBER: inm_clear_recorded(inm); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case IGMP_REPORTING_MEMBER: inm->inm_state = IGMP_REPORTING_MEMBER; break; @@ -2517,11 +2569,11 @@ igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi) static int igmp_v1v2_queue_report(struct in_multi *inm, const int type) { - struct ifnet *ifp; - struct igmp *igmp; - struct ip *ip; - struct mbuf *m; - int error = 0; + struct ifnet *ifp; + struct igmp *igmp; + struct ip *ip; + struct mbuf *m; + int error = 0; INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_HELD(inm->inm_igi); @@ -2529,8 +2581,9 @@ igmp_v1v2_queue_report(struct in_multi *inm, const int type) ifp = inm->inm_ifp; MGETHDR(m, M_DONTWAIT, MT_DATA); - if (m == NULL) - return (ENOMEM); + if (m == NULL) { + return ENOMEM; + } MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp)); m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp); @@ -2555,16 +2608,18 @@ igmp_v1v2_queue_report(struct in_multi *inm, const int type) ip->ip_p = IPPROTO_IGMP; ip->ip_src.s_addr = INADDR_ANY; - if (type == IGMP_HOST_LEAVE_MESSAGE) + if (type == IGMP_HOST_LEAVE_MESSAGE) { ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP); - else + } else { ip->ip_dst = inm->inm_addr; + } igmp_save_context(m, ifp); m->m_flags |= M_IGMPV2; - if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) + if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) { m->m_flags |= M_IGMP_LOOP; + } /* * Due to the fact that at this point we are possibly holding @@ -2582,7 +2637,7 @@ igmp_v1v2_queue_report(struct in_multi *inm, const int type) IF_ENQUEUE(&inm->inm_igi->igi_v2q, m); VERIFY(error == 0); } - return (error); + return error; } /* @@ -2611,7 +2666,7 @@ igmp_change_state(struct in_multi *inm, struct igmp_tparams *itp) int error = 0; VERIFY(itp != NULL); - bzero(itp, sizeof (*itp)); + bzero(itp, sizeof(*itp)); INM_LOCK_ASSERT_HELD(inm); VERIFY(inm->inm_igi != NULL); @@ -2654,7 +2709,7 @@ igmp_change_state(struct in_multi *inm, struct igmp_tparams *itp) error = igmp_handle_state_change(inm, igi, itp); out: - return (error); + return error; } /* @@ -2671,9 +2726,9 @@ static int igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi, struct igmp_tparams *itp) { - struct ifnet *ifp; - struct ifqueue *ifq; - int error, retval, syncstates; + struct ifnet *ifp; + struct ifqueue *ifq; + int error, retval, syncstates; INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_NOTHELD(igi); @@ -2731,8 +2786,8 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi, inm->inm_state = IGMP_IDLE_MEMBER; error = igmp_v1v2_queue_report(inm, (igi->igi_version == IGMP_VERSION_2) ? - IGMP_v2_HOST_MEMBERSHIP_REPORT : - IGMP_v1_HOST_MEMBERSHIP_REPORT); + IGMP_v2_HOST_MEMBERSHIP_REPORT : + IGMP_v1_HOST_MEMBERSHIP_REPORT); INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_HELD(igi); @@ -2802,7 +2857,7 @@ igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi, _igmp_inet_buf, if_name(inm->inm_ifp))); } - return (error); + return error; } /* @@ -2812,8 +2867,8 @@ static int igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi, struct igmp_tparams *itp) { - struct ifnet *ifp; - int retval = 0; + struct ifnet *ifp; + int retval = 0; INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_NOTHELD(igi); @@ -2865,7 +2920,7 @@ igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi, itp->sct = 1; IGI_UNLOCK(igi); done: - return (retval); + return retval; } /* @@ -2958,11 +3013,11 @@ igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi, VERIFY(inm->inm_nrelecnt != 0); retval = igmp_v3_enqueue_group_record( - &inm->inm_scq, inm, 1, 0, 0); + &inm->inm_scq, inm, 1, 0, 0); itp->cst = (inm->inm_scq.ifq_len > 0); KASSERT(retval != 0, ("%s: enqueue record = %d\n", __func__, - retval)); + retval)); inm->inm_state = IGMP_LEAVING_MEMBER; inm->inm_sctimer = 1; @@ -3023,19 +3078,19 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, const int is_state_change, const int is_group_query, const int is_source_query) { - struct igmp_grouprec ig; - struct igmp_grouprec *pig; - struct ifnet *ifp; - struct ip_msource *ims, *nims; - struct mbuf *m0, *m, *md; - int error, is_filter_list_change; - int minrec0len, m0srcs, msrcs, nbytes, off; - int record_has_sources; - int now; - int type; - in_addr_t naddr; - uint8_t mode; - u_int16_t ig_numsrc; + struct igmp_grouprec ig; + struct igmp_grouprec *pig; + struct ifnet *ifp; + struct ip_msource *ims, *nims; + struct mbuf *m0, *m, *md; + int error, is_filter_list_change; + int minrec0len, m0srcs, msrcs, nbytes, off; + int record_has_sources; + int now; + int type; + in_addr_t naddr; + uint8_t mode; + u_int16_t ig_numsrc; INM_LOCK_ASSERT_HELD(inm); IGI_LOCK_ASSERT_HELD(inm->inm_igi); @@ -3060,8 +3115,9 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, * the generation of source records. */ if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 && - inm->inm_nsrc == 0) + inm->inm_nsrc == 0) { record_has_sources = 0; + } if (is_state_change) { /* @@ -3083,8 +3139,9 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, IGMP_PRINTF(("%s: change to INCLUDE\n", __func__)); type = IGMP_CHANGE_TO_INCLUDE_MODE; - if (mode == MCAST_UNDEFINED) + if (mode == MCAST_UNDEFINED) { record_has_sources = 0; + } } } else { if (record_has_sources) { @@ -3108,15 +3165,16 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, /* * Generate the filter list changes using a separate function. */ - if (is_filter_list_change) - return (igmp_v3_enqueue_filter_change(ifq, inm)); + if (is_filter_list_change) { + return igmp_v3_enqueue_filter_change(ifq, inm); + } if (type == IGMP_DO_NOTHING) { IGMP_INET_PRINTF(inm->inm_addr, ("%s: nothing to do for %s/%s\n", __func__, _igmp_inet_buf, if_name(inm->inm_ifp))); - return (0); + return 0; } /* @@ -3125,8 +3183,9 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, * ideally more. */ minrec0len = sizeof(struct igmp_grouprec); - if (record_has_sources) + if (record_has_sources) { minrec0len += sizeof(in_addr_t); + } IGMP_INET_PRINTF(inm->inm_addr, ("%s: queueing %s for %s/%s\n", __func__, @@ -3146,31 +3205,34 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, m0 != NULL && (m0->m_pkthdr.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && (m0->m_pkthdr.len + minrec0len) < - (ifp->if_mtu - IGMP_LEADINGSPACE)) { + (ifp->if_mtu - IGMP_LEADINGSPACE)) { m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - - sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); + sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); m = m0; IGMP_PRINTF(("%s: use existing packet\n", __func__)); } else { if (IF_QFULL(ifq)) { IGMP_PRINTF(("%s: outbound queue full\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m = NULL; m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); if (!is_state_change && !is_group_query) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m) + if (m) { m->m_data += IGMP_LEADINGSPACE; + } } if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m) + if (m) { MH_ALIGN(m, IGMP_LEADINGSPACE); + } + } + if (m == NULL) { + return -ENOMEM; } - if (m == NULL) - return (-ENOMEM); igmp_save_context(m, ifp); @@ -3186,10 +3248,11 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, ig.ig_numsrc = 0; ig.ig_group = inm->inm_addr; if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { - if (m != m0) + if (m != m0) { m_freem(m); + } IGMP_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nbytes += sizeof(struct igmp_grouprec); @@ -3239,29 +3302,32 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, IGMP_PRINTF(("%s: append node\n", __func__)); naddr = htonl(ims->ims_haddr); if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } IGMP_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nbytes += sizeof(in_addr_t); ++msrcs; - if (msrcs == m0srcs) + if (msrcs == m0srcs) { break; + } } IGMP_PRINTF(("%s: msrcs is %d this packet\n", __func__, msrcs)); ig_numsrc = htons(msrcs); - bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof (ig_numsrc)); + bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof(ig_numsrc)); nbytes += (msrcs * sizeof(in_addr_t)); } if (is_source_query && msrcs == 0) { IGMP_PRINTF(("%s: no recorded sources to report\n", __func__)); - if (m != m0) + if (m != m0) { m_freem(m); - return (0); + } + return 0; } /* @@ -3277,8 +3343,9 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, /* * No further work needed if no source list in packet(s). */ - if (!record_has_sources) - return (nbytes); + if (!record_has_sources) { + return nbytes; + } /* * Whilst sources remain to be announced, we need to allocate @@ -3288,18 +3355,21 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, while (nims != NULL) { if (IF_QFULL(ifq)) { IGMP_PRINTF(("%s: outbound queue full\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m) + if (m) { m->m_data += IGMP_LEADINGSPACE; + } if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m) + if (m) { MH_ALIGN(m, IGMP_LEADINGSPACE); + } + } + if (m == NULL) { + return -ENOMEM; } - if (m == NULL) - return (-ENOMEM); igmp_save_context(m, ifp); md = m_getptr(m, 0, &off); pig = (struct igmp_grouprec *)(void *) @@ -3307,10 +3377,11 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, IGMP_PRINTF(("%s: allocated next packet\n", __func__)); if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { - if (m != m0) + if (m != m0) { m_freem(m); + } IGMP_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m->m_pkthdr.vt_nrecs = 1; nbytes += sizeof(struct igmp_grouprec); @@ -3340,25 +3411,27 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, IGMP_PRINTF(("%s: append node\n", __func__)); naddr = htonl(ims->ims_haddr); if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } IGMP_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } ++msrcs; - if (msrcs == m0srcs) + if (msrcs == m0srcs) { break; + } } ig_numsrc = htons(msrcs); - bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof (ig_numsrc)); + bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof(ig_numsrc)); nbytes += (msrcs * sizeof(in_addr_t)); IGMP_PRINTF(("%s: enqueueing next packet\n", __func__)); IF_ENQUEUE(ifq, m); } - return (nbytes); + return nbytes; } /* @@ -3367,9 +3440,9 @@ igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, * current filter modes on each ip_msource node. */ typedef enum { - REC_NONE = 0x00, /* MCAST_UNDEFINED */ - REC_ALLOW = 0x01, /* MCAST_INCLUDE */ - REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ + REC_NONE = 0x00, /* MCAST_UNDEFINED */ + REC_ALLOW = 0x01, /* MCAST_INCLUDE */ + REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ REC_FULL = REC_ALLOW | REC_BLOCK } rectype_t; @@ -3399,37 +3472,38 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) { static const int MINRECLEN = sizeof(struct igmp_grouprec) + sizeof(in_addr_t); - struct ifnet *ifp; - struct igmp_grouprec ig; - struct igmp_grouprec *pig; - struct ip_msource *ims, *nims; - struct mbuf *m, *m0, *md; - in_addr_t naddr; - int m0srcs, nbytes, npbytes, off, rsrcs, schanged; - int nallow, nblock; - uint8_t mode, now, then; - rectype_t crt, drt, nrt; - u_int16_t ig_numsrc; + struct ifnet *ifp; + struct igmp_grouprec ig; + struct igmp_grouprec *pig; + struct ip_msource *ims, *nims; + struct mbuf *m, *m0, *md; + in_addr_t naddr; + int m0srcs, nbytes, npbytes, off, rsrcs, schanged; + int nallow, nblock; + uint8_t mode, now, then; + rectype_t crt, drt, nrt; + u_int16_t ig_numsrc; INM_LOCK_ASSERT_HELD(inm); if (inm->inm_nsrc == 0 || - (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) - return (0); - - ifp = inm->inm_ifp; /* interface */ - mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */ - crt = REC_NONE; /* current group record type */ - drt = REC_NONE; /* mask of completed group record types */ - nrt = REC_NONE; /* record type for current node */ - m0srcs = 0; /* # source which will fit in current mbuf chain */ - nbytes = 0; /* # of bytes appended to group's state-change queue */ - npbytes = 0; /* # of bytes appended this packet */ - rsrcs = 0; /* # sources encoded in current record */ - schanged = 0; /* # nodes encoded in overall filter change */ - nallow = 0; /* # of source entries in ALLOW_NEW */ - nblock = 0; /* # of source entries in BLOCK_OLD */ - nims = NULL; /* next tree node pointer */ + (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) { + return 0; + } + + ifp = inm->inm_ifp; /* interface */ + mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */ + crt = REC_NONE; /* current group record type */ + drt = REC_NONE; /* mask of completed group record types */ + nrt = REC_NONE; /* record type for current node */ + m0srcs = 0; /* # source which will fit in current mbuf chain */ + nbytes = 0; /* # of bytes appended to group's state-change queue */ + npbytes = 0; /* # of bytes appended this packet */ + rsrcs = 0; /* # sources encoded in current record */ + schanged = 0; /* # nodes encoded in overall filter change */ + nallow = 0; /* # of source entries in ALLOW_NEW */ + nblock = 0; /* # of source entries in BLOCK_OLD */ + nims = NULL; /* next tree node pointer */ /* * For each possible filter record mode. @@ -3443,28 +3517,30 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) m0 = ifq->ifq_tail; if (m0 != NULL && (m0->m_pkthdr.vt_nrecs + 1 <= - IGMP_V3_REPORT_MAXRECS) && + IGMP_V3_REPORT_MAXRECS) && (m0->m_pkthdr.len + MINRECLEN) < - (ifp->if_mtu - IGMP_LEADINGSPACE)) { + (ifp->if_mtu - IGMP_LEADINGSPACE)) { m = m0; m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - - sizeof(struct igmp_grouprec)) / + sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); IGMP_PRINTF(("%s: use previous packet\n", __func__)); } else { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m) + if (m) { m->m_data += IGMP_LEADINGSPACE; + } if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m) + if (m) { MH_ALIGN(m, IGMP_LEADINGSPACE); + } } if (m == NULL) { IGMP_PRINTF(("%s: m_get*() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m->m_pkthdr.vt_nrecs = 0; igmp_save_context(m, ifp); @@ -3485,11 +3561,12 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) memset(&ig, 0, sizeof(ig)); ig.ig_group = inm->inm_addr; if (!m_append(m, sizeof(ig), (void *)&ig)) { - if (m != m0) + if (m != m0) { m_freem(m); + } IGMP_PRINTF(("%s: m_append() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } npbytes += sizeof(struct igmp_grouprec); if (m != m0) { @@ -3515,8 +3592,9 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) * however the converse is not true. */ rsrcs = 0; - if (nims == NULL) + if (nims == NULL) { nims = RB_MIN(ip_msource_tree, &inm->inm_srcs); + } RB_FOREACH_FROM(ims, ip_msource_tree, nims) { #ifdef IGMP_DEBUG char buf[MAX_IPv4_STR_LEN]; @@ -3540,25 +3618,29 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) continue; } nrt = (rectype_t)now; - if (nrt == REC_NONE) + if (nrt == REC_NONE) { nrt = (rectype_t)(~mode & REC_FULL); + } if (schanged++ == 0) { crt = nrt; - } else if (crt != nrt) + } else if (crt != nrt) { continue; + } naddr = htonl(ims->ims_haddr); if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } IGMP_PRINTF(("%s: m_append() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nallow += !!(crt == REC_ALLOW); nblock += !!(crt == REC_BLOCK); - if (++rsrcs == m0srcs) + if (++rsrcs == m0srcs) { break; + } } /* * If we did not append any tree nodes on this @@ -3574,24 +3656,26 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) IGMP_PRINTF(("%s: m_adj(m, -ig)\n", __func__)); m_adj(m, -((int)sizeof( - struct igmp_grouprec))); + struct igmp_grouprec))); } continue; } npbytes += (rsrcs * sizeof(in_addr_t)); - if (crt == REC_ALLOW) + if (crt == REC_ALLOW) { pig->ig_type = IGMP_ALLOW_NEW_SOURCES; - else if (crt == REC_BLOCK) + } else if (crt == REC_BLOCK) { pig->ig_type = IGMP_BLOCK_OLD_SOURCES; + } ig_numsrc = htons(rsrcs); - bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof (ig_numsrc)); + bcopy(&ig_numsrc, &pig->ig_numsrc, sizeof(ig_numsrc)); /* * Count the new group record, and enqueue this * packet if it wasn't already queued. */ m->m_pkthdr.vt_nrecs++; - if (m != m0) + if (m != m0) { IF_ENQUEUE(ifq, m); + } nbytes += npbytes; } while (nims != NULL); drt |= crt; @@ -3601,19 +3685,19 @@ igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) IGMP_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__, nallow, nblock)); - return (nbytes); + return nbytes; } static int igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) { - struct ifqueue *gq; - struct mbuf *m; /* pending state-change */ - struct mbuf *m0; /* copy of pending state-change */ - struct mbuf *mt; /* last state-change in packet */ - struct mbuf *n; - int docopy, domerge; - u_int recslen; + struct ifqueue *gq; + struct mbuf *m; /* pending state-change */ + struct mbuf *m0; /* copy of pending state-change */ + struct mbuf *mt; /* last state-change in packet */ + struct mbuf *n; + int docopy, domerge; + u_int recslen; INM_LOCK_ASSERT_HELD(inm); @@ -3625,8 +3709,9 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) * If there are further pending retransmissions, make a writable * copy of each queued state-change message before merging. */ - if (inm->inm_scrv > 0) + if (inm->inm_scrv > 0) { docopy = 1; + } gq = &inm->inm_scq; #ifdef IGMP_DEBUG @@ -3659,8 +3744,9 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) m->m_pkthdr.vt_nrecs <= IGMP_V3_REPORT_MAXRECS) && (mt->m_pkthdr.len + recslen <= - (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) + (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) { domerge = 1; + } } if (!domerge && IF_QFULL(gq)) { @@ -3687,8 +3773,9 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) IGMP_PRINTF(("%s: copying 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m))); m0 = m_dup(m, M_NOWAIT); - if (m0 == NULL) - return (ENOMEM); + if (m0 == NULL) { + return ENOMEM; + } m0->m_nextpkt = NULL; m = m->m_nextpkt; } @@ -3699,7 +3786,7 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) (uint64_t)VM_KERNEL_ADDRPERM(ifscq))); IF_ENQUEUE(ifscq, m0); } else { - struct mbuf *mtl; /* last mbuf of packet mt */ + struct mbuf *mtl; /* last mbuf of packet mt */ IGMP_PRINTF(("%s: merging 0x%llx with ifscq tail " "0x%llx)\n", __func__, @@ -3716,7 +3803,7 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) } } - return (0); + return 0; } /* @@ -3725,10 +3812,10 @@ igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) static uint32_t igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi) { - struct ifnet *ifp; - struct in_multi *inm; - struct in_multistep step; - int retval, loop; + struct ifnet *ifp; + struct in_multi *inm; + struct in_multistep step; + int retval, loop; IGI_LOCK_ASSERT_HELD(igi); @@ -3741,8 +3828,9 @@ igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi) IN_FIRST_MULTI(step, inm); while (inm != NULL) { INM_LOCK(inm); - if (inm->inm_ifp != ifp) + if (inm->inm_ifp != ifp) { goto next; + } switch (inm->inm_state) { case IGMP_NOT_MEMBER: @@ -3782,10 +3870,10 @@ next: */ if (igi->igi_gq.ifq_head != NULL) { igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY( - IGMP_RESPONSE_BURST_INTERVAL); + IGMP_RESPONSE_BURST_INTERVAL); } - return (igi->igi_v3_timer); + return igi->igi_v3_timer; } /* @@ -3796,11 +3884,11 @@ next: static void igmp_sendpkt(struct mbuf *m) { - struct ip_moptions *imo; - struct mbuf *ipopts, *m0; - int error; - struct route ro; - struct ifnet *ifp; + struct ip_moptions *imo; + struct mbuf *ipopts, *m0; + int error; + struct route ro; + struct ifnet *ifp; IGMP_PRINTF(("%s: transmit 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m))); @@ -3835,10 +3923,11 @@ igmp_sendpkt(struct mbuf *m) * MANET interface and the routing protocol needs to see the * updates), handle this now. */ - if (m->m_flags & M_IGMP_LOOP) + if (m->m_flags & M_IGMP_LOOP) { imo->imo_multicast_ifp = lo_ifp; - else + } else { imo->imo_multicast_ifp = ifp; + } if (m->m_flags & M_IGMPV2) { m0 = m; @@ -3872,7 +3961,7 @@ igmp_sendpkt(struct mbuf *m) */ (void) m_set_service_class(m0, MBUF_SC_CTL); } - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); error = ip_output(m0, ipopts, &ro, 0, imo, NULL); ROUTE_RELEASE(&ro); @@ -3901,9 +3990,9 @@ igmp_sendpkt(struct mbuf *m) static struct mbuf * igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) { - struct igmp_report *igmp; - struct ip *ip; - int hdrlen, igmpreclen; + struct igmp_report *igmp; + struct ip *ip; + int hdrlen, igmpreclen; VERIFY((m->m_flags & M_PKTHDR)); @@ -3914,8 +4003,9 @@ igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) igmpreclen -= hdrlen; } else { M_PREPEND(m, hdrlen, M_DONTWAIT, 1); - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } m->m_flags |= M_IGMPV3_HDR; } @@ -3959,7 +4049,7 @@ igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP); - return (m); + return m; } #ifdef IGMP_DEBUG @@ -3967,20 +4057,20 @@ static const char * igmp_rec_type_to_str(const int type) { switch (type) { - case IGMP_CHANGE_TO_EXCLUDE_MODE: - return "TO_EX"; - case IGMP_CHANGE_TO_INCLUDE_MODE: - return "TO_IN"; - case IGMP_MODE_IS_EXCLUDE: - return "MODE_EX"; - case IGMP_MODE_IS_INCLUDE: - return "MODE_IN"; - case IGMP_ALLOW_NEW_SOURCES: - return "ALLOW_NEW"; - case IGMP_BLOCK_OLD_SOURCES: - return "BLOCK_OLD"; - default: - break; + case IGMP_CHANGE_TO_EXCLUDE_MODE: + return "TO_EX"; + case IGMP_CHANGE_TO_INCLUDE_MODE: + return "TO_IN"; + case IGMP_MODE_IS_EXCLUDE: + return "MODE_EX"; + case IGMP_MODE_IS_INCLUDE: + return "MODE_IN"; + case IGMP_ALLOW_NEW_SOURCES: + return "ALLOW_NEW"; + case IGMP_BLOCK_OLD_SOURCES: + return "BLOCK_OLD"; + default: + break; } return "unknown"; } @@ -3992,10 +4082,11 @@ igmp_init(struct protosw *pp, struct domain *dp) #pragma unused(dp) static int igmp_initialized = 0; - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - if (igmp_initialized) + if (igmp_initialized) { return; + } igmp_initialized = 1; IGMP_PRINTF(("%s: initializing\n", __func__)); @@ -4011,7 +4102,7 @@ igmp_init(struct protosw *pp, struct domain *dp) LIST_INIT(&igi_head); m_raopt = igmp_ra_alloc(); - igi_size = sizeof (struct igmp_ifinfo); + igi_size = sizeof(struct igmp_ifinfo); igi_zone = zinit(igi_size, IGI_ZONE_MAX * igi_size, 0, IGI_ZONE_NAME); if (igi_zone == NULL) { diff --git a/bsd/netinet/igmp.h b/bsd/netinet/igmp.h index 271b9855c..8c3d71a71 100644 --- a/bsd/netinet/igmp.h +++ b/bsd/netinet/igmp.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -78,104 +78,104 @@ */ /* Minimum length of any IGMP protocol message. */ -#define IGMP_MINLEN 8 +#define IGMP_MINLEN 8 /* * IGMPv1/v2 query and host report format. */ struct igmp { - u_char igmp_type; /* version & type of IGMP message */ - u_char igmp_code; /* subtype for routing msgs */ - u_short igmp_cksum; /* IP-style checksum */ - struct in_addr igmp_group; /* group address being reported */ -}; /* (zero for queries) */ + u_char igmp_type; /* version & type of IGMP message */ + u_char igmp_code; /* subtype for routing msgs */ + u_short igmp_cksum; /* IP-style checksum */ + struct in_addr igmp_group; /* group address being reported */ +}; /* (zero for queries) */ /* * IGMP v3 query format. */ struct igmpv3 { - u_char igmp_type; /* version & type of IGMP message */ - u_char igmp_code; /* subtype for routing msgs */ - u_short igmp_cksum; /* IP-style checksum */ - struct in_addr igmp_group; /* group address being reported */ - /* (zero for queries) */ - u_char igmp_misc; /* reserved/suppress/robustness */ - u_char igmp_qqi; /* querier's query interval */ - u_short igmp_numsrc; /* number of sources */ + u_char igmp_type; /* version & type of IGMP message */ + u_char igmp_code; /* subtype for routing msgs */ + u_short igmp_cksum; /* IP-style checksum */ + struct in_addr igmp_group; /* group address being reported */ + /* (zero for queries) */ + u_char igmp_misc; /* reserved/suppress/robustness */ + u_char igmp_qqi; /* querier's query interval */ + u_short igmp_numsrc; /* number of sources */ /*struct in_addr igmp_sources[1];*/ /* source addresses */ }; -#define IGMP_V3_QUERY_MINLEN 12 -#define IGMP_V3_QUERY_MAX_SRCS 366 /* From RFC 3376, section 4.1.8 */ -#define IGMP_EXP(x) (((x) >> 4) & 0x07) -#define IGMP_MANT(x) ((x) & 0x0f) -#define IGMP_QRESV(x) (((x) >> 4) & 0x0f) -#define IGMP_SFLAG(x) (((x) >> 3) & 0x01) -#define IGMP_QRV(x) ((x) & 0x07) +#define IGMP_V3_QUERY_MINLEN 12 +#define IGMP_V3_QUERY_MAX_SRCS 366 /* From RFC 3376, section 4.1.8 */ +#define IGMP_EXP(x) (((x) >> 4) & 0x07) +#define IGMP_MANT(x) ((x) & 0x0f) +#define IGMP_QRESV(x) (((x) >> 4) & 0x0f) +#define IGMP_SFLAG(x) (((x) >> 3) & 0x01) +#define IGMP_QRV(x) ((x) & 0x07) struct igmp_grouprec { - u_char ig_type; /* record type */ - u_char ig_datalen; /* length of auxiliary data */ - u_short ig_numsrc; /* number of sources */ - struct in_addr ig_group; /* group address being reported */ + u_char ig_type; /* record type */ + u_char ig_datalen; /* length of auxiliary data */ + u_short ig_numsrc; /* number of sources */ + struct in_addr ig_group; /* group address being reported */ /*struct in_addr ig_sources[1];*/ /* source addresses */ }; -#define IGMP_GRPREC_HDRLEN 8 +#define IGMP_GRPREC_HDRLEN 8 /* * IGMPv3 host membership report header. */ struct igmp_report { - u_char ir_type; /* IGMP_v3_HOST_MEMBERSHIP_REPORT */ - u_char ir_rsv1; /* must be zero */ - u_short ir_cksum; /* checksum */ - u_short ir_rsv2; /* must be zero */ - u_short ir_numgrps; /* number of group records */ + u_char ir_type; /* IGMP_v3_HOST_MEMBERSHIP_REPORT */ + u_char ir_rsv1; /* must be zero */ + u_short ir_cksum; /* checksum */ + u_short ir_rsv2; /* must be zero */ + u_short ir_numgrps; /* number of group records */ /*struct igmp_grouprec ir_groups[1];*/ /* group records */ }; -#define IGMP_V3_REPORT_MINLEN 8 -#define IGMP_V3_REPORT_MAXRECS 65535 +#define IGMP_V3_REPORT_MINLEN 8 +#define IGMP_V3_REPORT_MAXRECS 65535 /* * Message types, including version number. */ -#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* membership query */ -#define IGMP_v1_HOST_MEMBERSHIP_REPORT 0x12 /* Ver. 1 membership report */ -#define IGMP_DVMRP 0x13 /* DVMRP routing message */ -#define IGMP_PIM 0x14 /* PIMv1 message (historic) */ -#define IGMP_v2_HOST_MEMBERSHIP_REPORT 0x16 /* Ver. 2 membership report */ -#define IGMP_HOST_LEAVE_MESSAGE 0x17 /* Leave-group message */ -#define IGMP_MTRACE_REPLY 0x1e /* mtrace(8) reply */ -#define IGMP_MTRACE_QUERY 0x1f /* mtrace(8) probe */ -#define IGMP_v3_HOST_MEMBERSHIP_REPORT 0x22 /* Ver. 3 membership report */ +#define IGMP_HOST_MEMBERSHIP_QUERY 0x11 /* membership query */ +#define IGMP_v1_HOST_MEMBERSHIP_REPORT 0x12 /* Ver. 1 membership report */ +#define IGMP_DVMRP 0x13 /* DVMRP routing message */ +#define IGMP_PIM 0x14 /* PIMv1 message (historic) */ +#define IGMP_v2_HOST_MEMBERSHIP_REPORT 0x16 /* Ver. 2 membership report */ +#define IGMP_HOST_LEAVE_MESSAGE 0x17 /* Leave-group message */ +#define IGMP_MTRACE_REPLY 0x1e /* mtrace(8) reply */ +#define IGMP_MTRACE_QUERY 0x1f /* mtrace(8) probe */ +#define IGMP_v3_HOST_MEMBERSHIP_REPORT 0x22 /* Ver. 3 membership report */ /* * IGMPv3 report modes. */ -#define IGMP_DO_NOTHING 0 /* don't send a record */ -#define IGMP_MODE_IS_INCLUDE 1 /* MODE_IN */ -#define IGMP_MODE_IS_EXCLUDE 2 /* MODE_EX */ -#define IGMP_CHANGE_TO_INCLUDE_MODE 3 /* TO_IN */ -#define IGMP_CHANGE_TO_EXCLUDE_MODE 4 /* TO_EX */ -#define IGMP_ALLOW_NEW_SOURCES 5 /* ALLOW_NEW */ -#define IGMP_BLOCK_OLD_SOURCES 6 /* BLOCK_OLD */ +#define IGMP_DO_NOTHING 0 /* don't send a record */ +#define IGMP_MODE_IS_INCLUDE 1 /* MODE_IN */ +#define IGMP_MODE_IS_EXCLUDE 2 /* MODE_EX */ +#define IGMP_CHANGE_TO_INCLUDE_MODE 3 /* TO_IN */ +#define IGMP_CHANGE_TO_EXCLUDE_MODE 4 /* TO_EX */ +#define IGMP_ALLOW_NEW_SOURCES 5 /* ALLOW_NEW */ +#define IGMP_BLOCK_OLD_SOURCES 6 /* BLOCK_OLD */ /* * IGMPv3 query types. */ -#define IGMP_V3_GENERAL_QUERY 1 -#define IGMP_V3_GROUP_QUERY 2 -#define IGMP_V3_GROUP_SOURCE_QUERY 3 +#define IGMP_V3_GENERAL_QUERY 1 +#define IGMP_V3_GROUP_QUERY 2 +#define IGMP_V3_GROUP_SOURCE_QUERY 3 /* * Maximum report interval for IGMP v1/v2 host membership reports [RFC 1112] */ -#define IGMP_V1V2_MAX_RI 10 -#define IGMP_MAX_HOST_REPORT_DELAY IGMP_V1V2_MAX_RI +#define IGMP_V1V2_MAX_RI 10 +#define IGMP_MAX_HOST_REPORT_DELAY IGMP_V1V2_MAX_RI /* * IGMP_TIMER_SCALE denotes that the igmp code field specifies * time in tenths of a second. */ -#define IGMP_TIMER_SCALE 10 +#define IGMP_TIMER_SCALE 10 #endif /* _NETINET_IGMP_H_ */ diff --git a/bsd/netinet/igmp_var.h b/bsd/netinet/igmp_var.h index 22181c9e3..5a592101d 100644 --- a/bsd/netinet/igmp_var.h +++ b/bsd/netinet/igmp_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -81,33 +81,33 @@ struct igmpstat_v3 { /* * Structure header (to insulate ABI changes). */ - uint32_t igps_version; /* version of this structure */ - uint32_t igps_len; /* length of this structure */ + uint32_t igps_version; /* version of this structure */ + uint32_t igps_len; /* length of this structure */ /* * Message statistics. */ - uint64_t igps_rcv_total; /* total IGMP messages received */ - uint64_t igps_rcv_tooshort; /* received with too few bytes */ - uint64_t igps_rcv_badttl; /* received with ttl other than 1 */ - uint64_t igps_rcv_badsum; /* received with bad checksum */ + uint64_t igps_rcv_total; /* total IGMP messages received */ + uint64_t igps_rcv_tooshort; /* received with too few bytes */ + uint64_t igps_rcv_badttl; /* received with ttl other than 1 */ + uint64_t igps_rcv_badsum; /* received with bad checksum */ /* * Query statistics. */ - uint64_t igps_rcv_v1v2_queries; /* received IGMPv1/IGMPv2 queries */ - uint64_t igps_rcv_v3_queries; /* received IGMPv3 queries */ - uint64_t igps_rcv_badqueries; /* received invalid queries */ - uint64_t igps_rcv_gen_queries; /* received general queries */ + uint64_t igps_rcv_v1v2_queries; /* received IGMPv1/IGMPv2 queries */ + uint64_t igps_rcv_v3_queries; /* received IGMPv3 queries */ + uint64_t igps_rcv_badqueries; /* received invalid queries */ + uint64_t igps_rcv_gen_queries; /* received general queries */ uint64_t igps_rcv_group_queries;/* received group queries */ - uint64_t igps_rcv_gsr_queries; /* received group-source queries */ - uint64_t igps_drop_gsr_queries; /* dropped group-source queries */ + uint64_t igps_rcv_gsr_queries; /* received group-source queries */ + uint64_t igps_drop_gsr_queries; /* dropped group-source queries */ /* * Report statistics. */ - uint64_t igps_rcv_reports; /* received membership reports */ - uint64_t igps_rcv_badreports; /* received invalid reports */ - uint64_t igps_rcv_ourreports; /* received reports for our groups */ - uint64_t igps_rcv_nora; /* received w/o Router Alert option */ - uint64_t igps_snd_reports; /* sent membership reports */ + uint64_t igps_rcv_reports; /* received membership reports */ + uint64_t igps_rcv_badreports; /* received invalid reports */ + uint64_t igps_rcv_ourreports; /* received reports for our groups */ + uint64_t igps_rcv_nora; /* received w/o Router Alert option */ + uint64_t igps_snd_reports; /* sent membership reports */ /* * Padding for future additions. */ @@ -120,19 +120,19 @@ struct igmpstat_v3 { */ struct igmpstat { - u_int igps_rcv_total; /* total IGMP messages received */ - u_int igps_rcv_tooshort; /* received with too few bytes */ - u_int igps_rcv_badsum; /* received with bad checksum */ - u_int igps_rcv_queries; /* received membership queries */ - u_int igps_rcv_badqueries; /* received invalid queries */ - u_int igps_rcv_reports; /* received membership reports */ - u_int igps_rcv_badreports; /* received invalid reports */ - u_int igps_rcv_ourreports; /* received reports for our groups */ - u_int igps_snd_reports; /* sent membership reports */ + u_int igps_rcv_total; /* total IGMP messages received */ + u_int igps_rcv_tooshort; /* received with too few bytes */ + u_int igps_rcv_badsum; /* received with bad checksum */ + u_int igps_rcv_queries; /* received membership queries */ + u_int igps_rcv_badqueries; /* received invalid queries */ + u_int igps_rcv_reports; /* received membership reports */ + u_int igps_rcv_badreports; /* received invalid reports */ + u_int igps_rcv_ourreports; /* received reports for our groups */ + u_int igps_snd_reports; /* sent membership reports */ }; -#define IGPS_VERSION_3 3 -#define IGPS_VERSION3_LEN 168 +#define IGPS_VERSION_3 3 +#define IGPS_VERSION3_LEN 168 #ifdef PRIVATE /* @@ -143,28 +143,28 @@ struct igmp_ifinfo { #else struct igmp_ifinfo_u { #endif /* XNU_KERNEL_PRIVATE */ - uint32_t igi_ifindex; /* interface this instance belongs to */ - uint32_t igi_version; /* IGMPv3 Host Compatibility Mode */ - uint32_t igi_v1_timer; /* IGMPv1 Querier Present timer (s) */ - uint32_t igi_v2_timer; /* IGMPv2 Querier Present timer (s) */ - uint32_t igi_v3_timer; /* IGMPv3 General Query (interface) timer (s)*/ - uint32_t igi_flags; /* IGMP per-interface flags */ - uint32_t igi_rv; /* IGMPv3 Robustness Variable */ - uint32_t igi_qi; /* IGMPv3 Query Interval (s) */ - uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */ - uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */ + uint32_t igi_ifindex; /* interface this instance belongs to */ + uint32_t igi_version; /* IGMPv3 Host Compatibility Mode */ + uint32_t igi_v1_timer; /* IGMPv1 Querier Present timer (s) */ + uint32_t igi_v2_timer; /* IGMPv2 Querier Present timer (s) */ + uint32_t igi_v3_timer; /* IGMPv3 General Query (interface) timer (s)*/ + uint32_t igi_flags; /* IGMP per-interface flags */ + uint32_t igi_rv; /* IGMPv3 Robustness Variable */ + uint32_t igi_qi; /* IGMPv3 Query Interval (s) */ + uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */ + uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */ }; -#define IGIF_SILENT 0x00000001 /* Do not use IGMP on this ifp */ -#define IGIF_LOOPBACK 0x00000002 /* Send IGMP reports to loopback */ +#define IGIF_SILENT 0x00000001 /* Do not use IGMP on this ifp */ +#define IGIF_LOOPBACK 0x00000002 /* Send IGMP reports to loopback */ /* * IGMP version tag. */ -#define IGMP_VERSION_NONE 0 /* Invalid */ -#define IGMP_VERSION_1 1 -#define IGMP_VERSION_2 2 -#define IGMP_VERSION_3 3 /* Default */ +#define IGMP_VERSION_NONE 0 /* Invalid */ +#define IGMP_VERSION_1 1 +#define IGMP_VERSION_2 2 +#define IGMP_VERSION_3 3 /* Default */ #endif /* PRIVATE */ #ifdef BSD_KERNEL_PRIVATE @@ -173,147 +173,147 @@ struct igmp_ifinfo_u { #ifdef IGMP_DEBUG extern int igmp_debug; -#define IGMP_PRINTF(x) do { if (igmp_debug) printf x; } while (0) -#define IGMP_INET_PRINTF(addr, x) do { \ - if (igmp_debug) { \ - char _igmp_inet_buf[MAX_IPv4_STR_LEN]; \ - inet_ntop(AF_INET, &(addr), _igmp_inet_buf, \ - sizeof(_igmp_inet_buf)); \ - printf x; \ - } \ +#define IGMP_PRINTF(x) do { if (igmp_debug) printf x; } while (0) +#define IGMP_INET_PRINTF(addr, x) do { \ + if (igmp_debug) { \ + char _igmp_inet_buf[MAX_IPv4_STR_LEN]; \ + inet_ntop(AF_INET, &(addr), _igmp_inet_buf, \ + sizeof(_igmp_inet_buf)); \ + printf x; \ + } \ } while (0) #else -#define IGMP_PRINTF(x) +#define IGMP_PRINTF(x) #endif -#define OIGMPSTAT_ADD(name, val) atomic_add_32(&igmpstat.name , (val)) -#define OIGMPSTAT_INC(name) OIGMPSTAT_ADD(name, 1) +#define OIGMPSTAT_ADD(name, val) atomic_add_32(&igmpstat.name , (val)) +#define OIGMPSTAT_INC(name) OIGMPSTAT_ADD(name, 1) -#define IGMPSTAT_ADD(name, val) atomic_add_64(&igmpstat_v3.name , (val)) -#define IGMPSTAT_INC(name) IGMPSTAT_ADD(name, 1) +#define IGMPSTAT_ADD(name, val) atomic_add_64(&igmpstat_v3.name , (val)) +#define IGMPSTAT_INC(name) IGMPSTAT_ADD(name, 1) #define IGMP_RANDOM_DELAY(X) (random() % (X) + 1) -#define IGMP_MAX_STATE_CHANGES 24 /* Max pending changes per group */ +#define IGMP_MAX_STATE_CHANGES 24 /* Max pending changes per group */ /* * IGMP per-group states. */ -#define IGMP_NOT_MEMBER 0 /* Can garbage collect in_multi */ -#define IGMP_SILENT_MEMBER 1 /* Do not perform IGMP for group */ -#define IGMP_REPORTING_MEMBER 2 /* IGMPv1/2/3 we are reporter */ -#define IGMP_IDLE_MEMBER 3 /* IGMPv1/2 we reported last */ -#define IGMP_LAZY_MEMBER 4 /* IGMPv1/2 other member reporting */ -#define IGMP_SLEEPING_MEMBER 5 /* IGMPv1/2 start query response */ -#define IGMP_AWAKENING_MEMBER 6 /* IGMPv1/2 group timer will start */ -#define IGMP_G_QUERY_PENDING_MEMBER 7 /* IGMPv3 group query pending */ -#define IGMP_SG_QUERY_PENDING_MEMBER 8 /* IGMPv3 source query pending */ -#define IGMP_LEAVING_MEMBER 9 /* IGMPv3 dying gasp (pending last */ - /* retransmission of INCLUDE {}) */ +#define IGMP_NOT_MEMBER 0 /* Can garbage collect in_multi */ +#define IGMP_SILENT_MEMBER 1 /* Do not perform IGMP for group */ +#define IGMP_REPORTING_MEMBER 2 /* IGMPv1/2/3 we are reporter */ +#define IGMP_IDLE_MEMBER 3 /* IGMPv1/2 we reported last */ +#define IGMP_LAZY_MEMBER 4 /* IGMPv1/2 other member reporting */ +#define IGMP_SLEEPING_MEMBER 5 /* IGMPv1/2 start query response */ +#define IGMP_AWAKENING_MEMBER 6 /* IGMPv1/2 group timer will start */ +#define IGMP_G_QUERY_PENDING_MEMBER 7 /* IGMPv3 group query pending */ +#define IGMP_SG_QUERY_PENDING_MEMBER 8 /* IGMPv3 source query pending */ +#define IGMP_LEAVING_MEMBER 9 /* IGMPv3 dying gasp (pending last */ + /* retransmission of INCLUDE {}) */ /* * IGMPv3 protocol control variables. */ -#define IGMP_RV_INIT 2 /* Robustness Variable */ -#define IGMP_RV_MIN 1 -#define IGMP_RV_MAX 7 +#define IGMP_RV_INIT 2 /* Robustness Variable */ +#define IGMP_RV_MIN 1 +#define IGMP_RV_MAX 7 -#define IGMP_QI_INIT 125 /* Query Interval (s) */ -#define IGMP_QI_MIN 1 -#define IGMP_QI_MAX 255 +#define IGMP_QI_INIT 125 /* Query Interval (s) */ +#define IGMP_QI_MIN 1 +#define IGMP_QI_MAX 255 -#define IGMP_QRI_INIT 10 /* Query Response Interval (s) */ -#define IGMP_QRI_MIN 1 -#define IGMP_QRI_MAX 255 +#define IGMP_QRI_INIT 10 /* Query Response Interval (s) */ +#define IGMP_QRI_MIN 1 +#define IGMP_QRI_MAX 255 -#define IGMP_URI_INIT 3 /* Unsolicited Report Interval (s) */ -#define IGMP_URI_MIN 0 -#define IGMP_URI_MAX 10 +#define IGMP_URI_INIT 3 /* Unsolicited Report Interval (s) */ +#define IGMP_URI_MIN 0 +#define IGMP_URI_MAX 10 -#define IGMP_MAX_G_GS_PACKETS 8 /* # of packets to answer G/GS */ -#define IGMP_MAX_STATE_CHANGE_PACKETS 8 /* # of packets per state change */ -#define IGMP_MAX_RESPONSE_PACKETS 16 /* # of packets for general query */ -#define IGMP_MAX_RESPONSE_BURST 4 /* # of responses to send at once */ -#define IGMP_RESPONSE_BURST_INTERVAL 1 /* 1 second */ +#define IGMP_MAX_G_GS_PACKETS 8 /* # of packets to answer G/GS */ +#define IGMP_MAX_STATE_CHANGE_PACKETS 8 /* # of packets per state change */ +#define IGMP_MAX_RESPONSE_PACKETS 16 /* # of packets for general query */ +#define IGMP_MAX_RESPONSE_BURST 4 /* # of responses to send at once */ +#define IGMP_RESPONSE_BURST_INTERVAL 1 /* 1 second */ /* * IGMP-specific mbuf flags. */ -#define M_IGMPV2 M_PROTO1 /* Packet is IGMPv2 */ -#define M_IGMPV3_HDR M_PROTO2 /* Packet has IGMPv3 headers */ -#define M_GROUPREC M_PROTO3 /* mbuf chain is a group record */ -#define M_IGMP_LOOP M_LOOP /* transmit on loif, not real ifp */ +#define M_IGMPV2 M_PROTO1 /* Packet is IGMPv2 */ +#define M_IGMPV3_HDR M_PROTO2 /* Packet has IGMPv3 headers */ +#define M_GROUPREC M_PROTO3 /* mbuf chain is a group record */ +#define M_IGMP_LOOP M_LOOP /* transmit on loif, not real ifp */ /* * Default amount of leading space for IGMPv3 to allocate at the * beginning of its mbuf packet chains, to avoid fragmentation and * unnecessary allocation of leading mbufs. */ -#define RAOPT_LEN 4 /* Length of IP Router Alert option */ -#define IGMP_LEADINGSPACE \ +#define RAOPT_LEN 4 /* Length of IP Router Alert option */ +#define IGMP_LEADINGSPACE \ (sizeof(struct ip) + RAOPT_LEN + sizeof(struct igmp_report)) struct igmp_ifinfo { decl_lck_mtx_data(, igi_lock); - uint32_t igi_refcnt; /* reference count */ - uint32_t igi_debug; /* see ifa_debug flags */ + uint32_t igi_refcnt; /* reference count */ + uint32_t igi_debug; /* see ifa_debug flags */ LIST_ENTRY(igmp_ifinfo) igi_link; - struct ifnet *igi_ifp; /* interface this instance belongs to */ - uint32_t igi_version; /* IGMPv3 Host Compatibility Mode */ - uint32_t igi_v1_timer; /* IGMPv1 Querier Present timer (s) */ - uint32_t igi_v2_timer; /* IGMPv2 Querier Present timer (s) */ - uint32_t igi_v3_timer; /* IGMPv3 General Query (interface) timer (s)*/ - uint32_t igi_flags; /* IGMP per-interface flags */ - uint32_t igi_rv; /* IGMPv3 Robustness Variable */ - uint32_t igi_qi; /* IGMPv3 Query Interval (s) */ - uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */ - uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */ - SLIST_HEAD(,in_multi) igi_relinmhead; /* released groups */ - struct ifqueue igi_gq; /* queue of general query responses */ + struct ifnet *igi_ifp; /* interface this instance belongs to */ + uint32_t igi_version; /* IGMPv3 Host Compatibility Mode */ + uint32_t igi_v1_timer; /* IGMPv1 Querier Present timer (s) */ + uint32_t igi_v2_timer; /* IGMPv2 Querier Present timer (s) */ + uint32_t igi_v3_timer; /* IGMPv3 General Query (interface) timer (s)*/ + uint32_t igi_flags; /* IGMP per-interface flags */ + uint32_t igi_rv; /* IGMPv3 Robustness Variable */ + uint32_t igi_qi; /* IGMPv3 Query Interval (s) */ + uint32_t igi_qri; /* IGMPv3 Query Response Interval (s) */ + uint32_t igi_uri; /* IGMPv3 Unsolicited Report Interval (s) */ + SLIST_HEAD(, in_multi) igi_relinmhead; /* released groups */ + struct ifqueue igi_gq; /* queue of general query responses */ struct ifqueue igi_v2q; /* queue of v1/v2 packets */ }; -#define IGI_LOCK_ASSERT_HELD(_igi) \ +#define IGI_LOCK_ASSERT_HELD(_igi) \ LCK_MTX_ASSERT(&(_igi)->igi_lock, LCK_MTX_ASSERT_OWNED) -#define IGI_LOCK_ASSERT_NOTHELD(_igi) \ +#define IGI_LOCK_ASSERT_NOTHELD(_igi) \ LCK_MTX_ASSERT(&(_igi)->igi_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IGI_LOCK(_igi) \ +#define IGI_LOCK(_igi) \ lck_mtx_lock(&(_igi)->igi_lock) -#define IGI_LOCK_SPIN(_igi) \ +#define IGI_LOCK_SPIN(_igi) \ lck_mtx_lock_spin(&(_igi)->igi_lock) -#define IGI_CONVERT_LOCK(_igi) do { \ - IGI_LOCK_ASSERT_HELD(_igi); \ - lck_mtx_convert_spin(&(_igi)->igi_lock); \ +#define IGI_CONVERT_LOCK(_igi) do { \ + IGI_LOCK_ASSERT_HELD(_igi); \ + lck_mtx_convert_spin(&(_igi)->igi_lock); \ } while (0) -#define IGI_UNLOCK(_igi) \ +#define IGI_UNLOCK(_igi) \ lck_mtx_unlock(&(_igi)->igi_lock) -#define IGI_ADDREF(_igi) \ +#define IGI_ADDREF(_igi) \ igi_addref(_igi, 0) -#define IGI_ADDREF_LOCKED(_igi) \ +#define IGI_ADDREF_LOCKED(_igi) \ igi_addref(_igi, 1) -#define IGI_REMREF(_igi) \ +#define IGI_REMREF(_igi) \ igi_remref(_igi) /* * Per-link IGMP context. */ -#define IGMP_IFINFO(ifp) ((ifp)->if_igi) +#define IGMP_IFINFO(ifp) ((ifp)->if_igi) /* * IGMP timer schedule parameters */ struct igmp_tparams { - int qpt; /* querier_present_timers_running */ - int it; /* interface_timers_running */ - int cst; /* current_state_timers_running */ - int sct; /* state_change_timers_running */ + int qpt; /* querier_present_timers_running */ + int it; /* interface_timers_running */ + int cst; /* current_state_timers_running */ + int sct; /* state_change_timers_running */ }; extern void igmp_init(struct protosw *, struct domain *); @@ -336,8 +336,8 @@ SYSCTL_DECL(_net_inet_igmp); /* * Names for IGMP sysctl objects */ -#define IGMPCTL_STATS 1 /* statistics (read-only) */ -#define IGMPCTL_MAXID 2 +#define IGMPCTL_STATS 1 /* statistics (read-only) */ +#define IGMPCTL_MAXID 2 #ifdef BSD_KERNEL_PRIVATE #define IGMPCTL_NAMES { \ diff --git a/bsd/netinet/in.c b/bsd/netinet/in.c index 61de1526d..5f464c325 100644 --- a/bsd/netinet/in.c +++ b/bsd/netinet/in.c @@ -118,13 +118,13 @@ static void in_socktrim(struct sockaddr_in *); static int in_ifinit(struct ifnet *, struct in_ifaddr *, struct sockaddr_in *, int); -#define IA_HASH_INIT(ia) { \ - (ia)->ia_hash.tqe_next = (void *)(uintptr_t)-1; \ - (ia)->ia_hash.tqe_prev = (void *)(uintptr_t)-1; \ +#define IA_HASH_INIT(ia) { \ + (ia)->ia_hash.tqe_next = (void *)(uintptr_t)-1; \ + (ia)->ia_hash.tqe_prev = (void *)(uintptr_t)-1; \ } -#define IA_IS_HASHED(ia) \ - (!((ia)->ia_hash.tqe_next == (void *)(uintptr_t)-1 || \ +#define IA_IS_HASHED(ia) \ + (!((ia)->ia_hash.tqe_next == (void *)(uintptr_t)-1 || \ (ia)->ia_hash.tqe_prev == (void *)(uintptr_t)-1)) static void in_iahash_remove(struct in_ifaddr *); @@ -159,31 +159,31 @@ static struct lltable * in_lltattach(struct ifnet *ifp); static int subnetsarelocal = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, subnets_are_local, - CTLFLAG_RW | CTLFLAG_LOCKED, &subnetsarelocal, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &subnetsarelocal, 0, ""); /* Track whether or not the SIOCARPIPLL ioctl has been called */ u_int32_t ipv4_ll_arp_aware = 0; -#define INIFA_TRACE_HIST_SIZE 32 /* size of trace history */ +#define INIFA_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int inifa_trace_hist_size = INIFA_TRACE_HIST_SIZE; struct in_ifaddr_dbg { - struct in_ifaddr inifa; /* in_ifaddr */ - struct in_ifaddr inifa_old; /* saved in_ifaddr */ - u_int16_t inifa_refhold_cnt; /* # of IFA_ADDREF */ - u_int16_t inifa_refrele_cnt; /* # of IFA_REMREF */ + struct in_ifaddr inifa; /* in_ifaddr */ + struct in_ifaddr inifa_old; /* saved in_ifaddr */ + u_int16_t inifa_refhold_cnt; /* # of IFA_ADDREF */ + u_int16_t inifa_refrele_cnt; /* # of IFA_REMREF */ /* * Alloc and free callers. */ - ctrace_t inifa_alloc; - ctrace_t inifa_free; + ctrace_t inifa_alloc; + ctrace_t inifa_free; /* * Circular lists of IFA_ADDREF and IFA_REMREF callers. */ - ctrace_t inifa_refhold[INIFA_TRACE_HIST_SIZE]; - ctrace_t inifa_refrele[INIFA_TRACE_HIST_SIZE]; + ctrace_t inifa_refhold[INIFA_TRACE_HIST_SIZE]; + ctrace_t inifa_refrele[INIFA_TRACE_HIST_SIZE]; /* * Trash list linkage */ @@ -195,19 +195,19 @@ static TAILQ_HEAD(, in_ifaddr_dbg) inifa_trash_head; static decl_lck_mtx_data(, inifa_trash_lock); #if DEBUG -static unsigned int inifa_debug = 1; /* debugging (enabled) */ +static unsigned int inifa_debug = 1; /* debugging (enabled) */ #else -static unsigned int inifa_debug; /* debugging (disabled) */ +static unsigned int inifa_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int inifa_size; /* size of zone element */ -static struct zone *inifa_zone; /* zone for in_ifaddr */ +static unsigned int inifa_size; /* size of zone element */ +static struct zone *inifa_zone; /* zone for in_ifaddr */ -#define INIFA_ZONE_MAX 64 /* maximum elements in zone */ -#define INIFA_ZONE_NAME "in_ifaddr" /* zone name */ +#define INIFA_ZONE_MAX 64 /* maximum elements in zone */ +#define INIFA_ZONE_NAME "in_ifaddr" /* zone name */ -static const unsigned int in_extra_size = sizeof (struct in_ifextra); +static const unsigned int in_extra_size = sizeof(struct in_ifextra); static const unsigned int in_extra_bufsize = in_extra_size + - sizeof (void *) + sizeof (uint64_t); + sizeof(void *) + sizeof(uint64_t); /* * Return 1 if the address is @@ -228,25 +228,26 @@ inaddr_local(struct in_addr in) local = 1; } else if (ntohl(in.s_addr) >= INADDR_UNSPEC_GROUP && ntohl(in.s_addr) <= INADDR_MAX_LOCAL_GROUP) { - local = 1; + local = 1; } else { sin.sin_family = AF_INET; - sin.sin_len = sizeof (sin); + sin.sin_len = sizeof(sin); sin.sin_addr = in; rt = rtalloc1((struct sockaddr *)&sin, 0, 0); if (rt != NULL) { RT_LOCK_SPIN(rt); if (rt->rt_gateway->sa_family == AF_LINK || - (rt->rt_ifp->if_flags & IFF_LOOPBACK)) + (rt->rt_ifp->if_flags & IFF_LOOPBACK)) { local = 1; + } RT_UNLOCK(rt); rtfree(rt); } else { local = in_localaddr(in); } } - return (local); + return local; } /* @@ -262,8 +263,9 @@ in_localaddr(struct in_addr in) u_int32_t i = ntohl(in.s_addr); struct in_ifaddr *ia; - if (IN_LINKLOCAL(i)) - return (1); + if (IN_LINKLOCAL(i)) { + return 1; + } if (subnetsarelocal) { lck_rw_lock_shared(in_ifaddr_rwlock); @@ -273,7 +275,7 @@ in_localaddr(struct in_addr in) if ((i & ia->ia_netmask) == ia->ia_net) { IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(in_ifaddr_rwlock); - return (1); + return 1; } IFA_UNLOCK(&ia->ia_ifa); } @@ -286,13 +288,13 @@ in_localaddr(struct in_addr in) if ((i & ia->ia_subnetmask) == ia->ia_subnet) { IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(in_ifaddr_rwlock); - return (1); + return 1; } IFA_UNLOCK(&ia->ia_ifa); } lck_rw_done(in_ifaddr_rwlock); } - return (0); + return 0; } /* @@ -306,14 +308,16 @@ in_canforward(struct in_addr in) u_int32_t i = ntohl(in.s_addr); u_int32_t net; - if (IN_EXPERIMENTAL(i) || IN_MULTICAST(i)) - return (FALSE); + if (IN_EXPERIMENTAL(i) || IN_MULTICAST(i)) { + return FALSE; + } if (IN_CLASSA(i)) { net = i & IN_CLASSA_NET; - if (net == 0 || net == (IN_LOOPBACKNET << IN_CLASSA_NSHIFT)) - return (FALSE); + if (net == 0 || net == (IN_LOOPBACKNET << IN_CLASSA_NSHIFT)) { + return FALSE; + } } - return (TRUE); + return TRUE; } /* @@ -326,14 +330,15 @@ in_socktrim(struct sockaddr_in *ap) char *cp = (char *)(&ap->sin_addr + 1); ap->sin_len = 0; - while (--cp >= cplim) + while (--cp >= cplim) { if (*cp) { (ap)->sin_len = cp - (char *)(ap) + 1; break; } + } } -static int in_interfaces; /* number of external internet interfaces */ +static int in_interfaces; /* number of external internet interfaces */ static int in_domifattach(struct ifnet *ifp) @@ -351,7 +356,7 @@ in_domifattach(struct ifnet *ifp) int errorx; if ((ext = (struct in_ifextra *)_MALLOC(in_extra_bufsize, - M_IFADDR, M_WAITOK|M_ZERO)) == NULL) { + M_IFADDR, M_WAITOK | M_ZERO)) == NULL) { error = ENOMEM; errorx = proto_unplumb(PF_INET, ifp); if (errorx != 0) { @@ -364,15 +369,15 @@ in_domifattach(struct ifnet *ifp) } /* Align on 64-bit boundary */ - base = (void *)P2ROUNDUP((intptr_t)ext + sizeof (uint64_t), - sizeof (uint64_t)); + base = (void *)P2ROUNDUP((intptr_t)ext + sizeof(uint64_t), + sizeof(uint64_t)); VERIFY(((intptr_t)base + in_extra_size) <= ((intptr_t)ext + in_extra_bufsize)); - pbuf = (void **)((intptr_t)base - sizeof (void *)); + pbuf = (void **)((intptr_t)base - sizeof(void *)); *pbuf = ext; ifp->if_inetdata = base; IN_IFEXTRA(ifp)->ii_llt = in_lltattach(ifp); - VERIFY(IS_P2ALIGNED(ifp->if_inetdata, sizeof (uint64_t))); + VERIFY(IS_P2ALIGNED(ifp->if_inetdata, sizeof(uint64_t))); } done: if (error == 0 && ifp->if_inetdata != NULL) { @@ -384,7 +389,7 @@ done: */ bzero(ifp->if_inetdata, in_extra_size); } - return (error); + return error; } static __attribute__((noinline)) int @@ -399,18 +404,20 @@ inctl_associd(struct socket *so, u_long cmd, caddr_t data) VERIFY(so != NULL); switch (cmd) { - case SIOCGASSOCIDS32: /* struct so_aidreq32 */ - bcopy(data, &u.a32, sizeof (u.a32)); + case SIOCGASSOCIDS32: /* struct so_aidreq32 */ + bcopy(data, &u.a32, sizeof(u.a32)); error = in_getassocids(so, &u.a32.sar_cnt, u.a32.sar_aidp); - if (error == 0) - bcopy(&u.a32, data, sizeof (u.a32)); + if (error == 0) { + bcopy(&u.a32, data, sizeof(u.a32)); + } break; - case SIOCGASSOCIDS64: /* struct so_aidreq64 */ - bcopy(data, &u.a64, sizeof (u.a64)); + case SIOCGASSOCIDS64: /* struct so_aidreq64 */ + bcopy(data, &u.a64, sizeof(u.a64)); error = in_getassocids(so, &u.a64.sar_cnt, u.a64.sar_aidp); - if (error == 0) - bcopy(&u.a64, data, sizeof (u.a64)); + if (error == 0) { + bcopy(&u.a64, data, sizeof(u.a64)); + } break; default: @@ -418,7 +425,7 @@ inctl_associd(struct socket *so, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -433,20 +440,22 @@ inctl_connid(struct socket *so, u_long cmd, caddr_t data) VERIFY(so != NULL); switch (cmd) { - case SIOCGCONNIDS32: /* struct so_cidreq32 */ - bcopy(data, &u.c32, sizeof (u.c32)); + case SIOCGCONNIDS32: /* struct so_cidreq32 */ + bcopy(data, &u.c32, sizeof(u.c32)); error = in_getconnids(so, u.c32.scr_aid, &u.c32.scr_cnt, u.c32.scr_cidp); - if (error == 0) - bcopy(&u.c32, data, sizeof (u.c32)); + if (error == 0) { + bcopy(&u.c32, data, sizeof(u.c32)); + } break; - case SIOCGCONNIDS64: /* struct so_cidreq64 */ - bcopy(data, &u.c64, sizeof (u.c64)); + case SIOCGCONNIDS64: /* struct so_cidreq64 */ + bcopy(data, &u.c64, sizeof(u.c64)); error = in_getconnids(so, u.c64.scr_aid, &u.c64.scr_cnt, u.c64.scr_cidp); - if (error == 0) - bcopy(&u.c64, data, sizeof (u.c64)); + if (error == 0) { + bcopy(&u.c64, data, sizeof(u.c64)); + } break; default: @@ -454,7 +463,7 @@ inctl_connid(struct socket *so, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -469,26 +478,28 @@ inctl_conninfo(struct socket *so, u_long cmd, caddr_t data) VERIFY(so != NULL); switch (cmd) { - case SIOCGCONNINFO32: /* struct so_cinforeq32 */ - bcopy(data, &u.ci32, sizeof (u.ci32)); + case SIOCGCONNINFO32: /* struct so_cinforeq32 */ + bcopy(data, &u.ci32, sizeof(u.ci32)); error = in_getconninfo(so, u.ci32.scir_cid, &u.ci32.scir_flags, &u.ci32.scir_ifindex, &u.ci32.scir_error, u.ci32.scir_src, &u.ci32.scir_src_len, u.ci32.scir_dst, &u.ci32.scir_dst_len, &u.ci32.scir_aux_type, u.ci32.scir_aux_data, &u.ci32.scir_aux_len); - if (error == 0) - bcopy(&u.ci32, data, sizeof (u.ci32)); + if (error == 0) { + bcopy(&u.ci32, data, sizeof(u.ci32)); + } break; - case SIOCGCONNINFO64: /* struct so_cinforeq64 */ - bcopy(data, &u.ci64, sizeof (u.ci64)); + case SIOCGCONNINFO64: /* struct so_cinforeq64 */ + bcopy(data, &u.ci64, sizeof(u.ci64)); error = in_getconninfo(so, u.ci64.scir_cid, &u.ci64.scir_flags, &u.ci64.scir_ifindex, &u.ci64.scir_error, u.ci64.scir_src, &u.ci64.scir_src_len, u.ci64.scir_dst, &u.ci64.scir_dst_len, &u.ci64.scir_aux_type, u.ci64.scir_aux_data, &u.ci64.scir_aux_len); - if (error == 0) - bcopy(&u.ci64, data, sizeof (u.ci64)); + if (error == 0) { + bcopy(&u.ci64, data, sizeof(u.ci64)); + } break; default: @@ -496,7 +507,7 @@ inctl_conninfo(struct socket *so, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } /* @@ -511,7 +522,7 @@ inctl_autoaddr(struct ifnet *ifp, struct ifreq *ifr) VERIFY(ifp != NULL); - bcopy(&ifr->ifr_intval, &intval, sizeof (intval)); + bcopy(&ifr->ifr_intval, &intval, sizeof(intval)); ifnet_lock_exclusive(ifp); if (intval) { @@ -522,17 +533,18 @@ inctl_autoaddr(struct ifnet *ifp, struct ifreq *ifr) * being set in that mode. */ if (ifp->if_eflags & IFEF_IPV4_ROUTER) { - intval = 0; /* be safe; clear flag if set */ + intval = 0; /* be safe; clear flag if set */ error = EBUSY; } else { ifp->if_eflags |= IFEF_AUTOCONFIGURING; } } - if (!intval) + if (!intval) { ifp->if_eflags &= ~IFEF_AUTOCONFIGURING; + } ifnet_lock_done(ifp); - return (error); + return error; } /* @@ -547,7 +559,7 @@ inctl_arpipll(struct ifnet *ifp, struct ifreq *ifr) VERIFY(ifp != NULL); - bcopy(&ifr->ifr_intval, &intval, sizeof (intval)); + bcopy(&ifr->ifr_intval, &intval, sizeof(intval)); ipv4_ll_arp_aware = 1; ifnet_lock_exclusive(ifp); @@ -559,17 +571,18 @@ inctl_arpipll(struct ifnet *ifp, struct ifreq *ifr) * prevent SIOCARPIPLL from being set in that mode. */ if (ifp->if_eflags & IFEF_IPV4_ROUTER) { - intval = 0; /* be safe; clear flag if set */ + intval = 0; /* be safe; clear flag if set */ error = EBUSY; } else { ifp->if_eflags |= IFEF_ARPLL; } } - if (!intval) + if (!intval) { ifp->if_eflags &= ~IFEF_ARPLL; + } ifnet_lock_done(ifp); - return (error); + return error; } /* @@ -592,10 +605,11 @@ inctl_setrouter(struct ifnet *ifp, struct ifreq *ifr) VERIFY(ifp != NULL); /* Router mode isn't valid for loopback */ - if (ifp->if_flags & IFF_LOOPBACK) - return (ENODEV); + if (ifp->if_flags & IFF_LOOPBACK) { + return ENODEV; + } - bcopy(&ifr->ifr_intval, &intval, sizeof (intval)); + bcopy(&ifr->ifr_intval, &intval, sizeof(intval)); ifnet_lock_exclusive(ifp); if (intval) { @@ -609,7 +623,7 @@ inctl_setrouter(struct ifnet *ifp, struct ifreq *ifr) /* purge all IPv4 addresses configured on this interface */ in_purgeaddrs(ifp); - return (error); + return error; } /* @@ -629,23 +643,23 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, VERIFY(ifp != NULL); - bzero(&in_event_data, sizeof (struct kev_in_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&in_event_data, sizeof(struct kev_in_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); switch (cmd) { - case SIOCGIFADDR: /* struct ifreq */ + case SIOCGIFADDR: /* struct ifreq */ if (ia == NULL) { error = EADDRNOTAVAIL; break; } IFA_LOCK(&ia->ia_ifa); - bcopy(&ia->ia_addr, &ifr->ifr_addr, sizeof (addr)); + bcopy(&ia->ia_addr, &ifr->ifr_addr, sizeof(addr)); IFA_UNLOCK(&ia->ia_ifa); break; - case SIOCSIFADDR: /* struct ifreq */ + case SIOCSIFADDR: /* struct ifreq */ VERIFY(ia != NULL); - bcopy(&ifr->ifr_addr, &addr, sizeof (addr)); + bcopy(&ifr->ifr_addr, &addr, sizeof(addr)); /* * If this is a new address, the reference count for the * hash table has been taken at creation time above. @@ -656,15 +670,15 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } break; - case SIOCAIFADDR: { /* struct {if,in_}aliasreq */ + case SIOCAIFADDR: { /* struct {if,in_}aliasreq */ struct in_aliasreq *ifra = (struct in_aliasreq *)ifr; struct sockaddr_in broadaddr, mask; int hostIsNew, maskIsNew; VERIFY(ia != NULL); - bcopy(&ifra->ifra_addr, &addr, sizeof (addr)); - bcopy(&ifra->ifra_broadaddr, &broadaddr, sizeof (broadaddr)); - bcopy(&ifra->ifra_mask, &mask, sizeof (mask)); + bcopy(&ifra->ifra_addr, &addr, sizeof(addr)); + bcopy(&ifra->ifra_broadaddr, &broadaddr, sizeof(broadaddr)); + bcopy(&ifra->ifra_mask, &mask, sizeof(mask)); maskIsNew = 0; hostIsNew = 1; @@ -695,7 +709,7 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, in_ifscrub(ifp, ia, 0); IFA_LOCK(&ia->ia_ifa); ia->ia_dstaddr = broadaddr; - ia->ia_dstaddr.sin_len = sizeof (struct sockaddr_in); + ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); maskIsNew = 1; /* We lie; but the effect's the same */ } if (addr.sin_family == AF_INET && (hostIsNew || maskIsNew)) { @@ -709,21 +723,23 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } IFA_LOCK(&ia->ia_ifa); if ((ifp->if_flags & IFF_BROADCAST) && - (broadaddr.sin_family == AF_INET)) + (broadaddr.sin_family == AF_INET)) { ia->ia_broadaddr = broadaddr; + } /* * Report event. */ if ((error == 0) || (error == EEXIST)) { - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_INET_SUBCLASS; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; - if (hostIsNew) + if (hostIsNew) { ev_msg.event_code = KEV_INET_NEW_ADDR; - else + } else { ev_msg.event_code = KEV_INET_CHANGED_ADDR; + } if (ia->ia_ifa.ifa_dstaddr) { in_event_data.ia_dstaddr = @@ -732,20 +748,20 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } else { in_event_data.ia_dstaddr.s_addr = INADDR_ANY; } - in_event_data.ia_addr = ia->ia_addr.sin_addr; - in_event_data.ia_net = ia->ia_net; - in_event_data.ia_netmask = ia->ia_netmask; - in_event_data.ia_subnet = ia->ia_subnet; - in_event_data.ia_subnetmask = ia->ia_subnetmask; - in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; IFA_UNLOCK(&ia->ia_ifa); (void) strlcpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); in_event_data.link_data.if_family = ifp->if_family; in_event_data.link_data.if_unit = ifp->if_unit; - ev_msg.dv[0].data_ptr = &in_event_data; - ev_msg.dv[0].data_length = sizeof (struct kev_in_data); + ev_msg.dv[0].data_ptr = &in_event_data; + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(ifp, &ev_msg); @@ -755,11 +771,12 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, break; } - case SIOCDIFADDR: /* struct ifreq */ + case SIOCDIFADDR: /* struct ifreq */ VERIFY(ia != NULL); error = ifnet_ioctl(ifp, PF_INET, SIOCDIFADDR, ia); - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = 0; + } if (error != 0) { /* Reset the detaching flag */ IFA_LOCK(&ia->ia_ifa); @@ -769,11 +786,11 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } /* Fill out the kernel event information */ - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_INET_SUBCLASS; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; - ev_msg.event_code = KEV_INET_ADDR_DELETED; + ev_msg.event_code = KEV_INET_ADDR_DELETED; IFA_LOCK(&ia->ia_ifa); if (ia->ia_ifa.ifa_dstaddr) { @@ -782,12 +799,12 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } else { in_event_data.ia_dstaddr.s_addr = INADDR_ANY; } - in_event_data.ia_addr = ia->ia_addr.sin_addr; - in_event_data.ia_net = ia->ia_net; - in_event_data.ia_netmask = ia->ia_netmask; - in_event_data.ia_subnet = ia->ia_subnet; - in_event_data.ia_subnetmask = ia->ia_subnetmask; - in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; IFA_UNLOCK(&ia->ia_ifa); (void) strlcpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); @@ -804,8 +821,9 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, IFA_REMREF(ifa); TAILQ_REMOVE(&in_ifaddrhead, ia, ia_link); IFA_LOCK(ifa); - if (IA_IS_HASHED(ia)) + if (IA_IS_HASHED(ia)) { in_iahash_remove(ia); + } IFA_UNLOCK(ifa); lck_rw_done(in_ifaddr_rwlock); @@ -829,7 +847,6 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, */ if ((ifp->if_flags & IFF_MULTICAST) || ifp->if_allhostsinm != NULL) { - TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { IFA_LOCK(ifa); if (ifa->ifa_addr->sa_family == AF_INET) { @@ -870,8 +887,9 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, * for legacy reasons. */ error = ifnet_ioctl(ifp, PF_INET, SIOCSIFADDR, ifa); - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = 0; + } /* Release reference from ifa_ifpgetprimary() */ IFA_REMREF(ifa); @@ -884,7 +902,7 @@ inctl_ifaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, /* NOTREACHED */ } - return (error); + return error; } /* @@ -903,30 +921,32 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, VERIFY(ifp != NULL); - if (!(ifp->if_flags & IFF_POINTOPOINT)) - return (EINVAL); + if (!(ifp->if_flags & IFF_POINTOPOINT)) { + return EINVAL; + } - bzero(&in_event_data, sizeof (struct kev_in_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&in_event_data, sizeof(struct kev_in_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); switch (cmd) { - case SIOCGIFDSTADDR: /* struct ifreq */ + case SIOCGIFDSTADDR: /* struct ifreq */ if (ia == NULL) { error = EADDRNOTAVAIL; break; } IFA_LOCK(&ia->ia_ifa); - bcopy(&ia->ia_dstaddr, &ifr->ifr_dstaddr, sizeof (dstaddr)); + bcopy(&ia->ia_dstaddr, &ifr->ifr_dstaddr, sizeof(dstaddr)); IFA_UNLOCK(&ia->ia_ifa); break; - case SIOCSIFDSTADDR: /* struct ifreq */ + case SIOCSIFDSTADDR: /* struct ifreq */ VERIFY(ia != NULL); IFA_LOCK(&ia->ia_ifa); dstaddr = ia->ia_dstaddr; - bcopy(&ifr->ifr_dstaddr, &ia->ia_dstaddr, sizeof (dstaddr)); - if (ia->ia_dstaddr.sin_family == AF_INET) - ia->ia_dstaddr.sin_len = sizeof (struct sockaddr_in); + bcopy(&ifr->ifr_dstaddr, &ia->ia_dstaddr, sizeof(dstaddr)); + if (ia->ia_dstaddr.sin_family == AF_INET) { + ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); + } IFA_UNLOCK(&ia->ia_ifa); /* * NOTE: SIOCSIFDSTADDR is defined with struct ifreq @@ -936,8 +956,9 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, */ error = ifnet_ioctl(ifp, PF_INET, SIOCSIFDSTADDR, ia); IFA_LOCK(&ia->ia_ifa); - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = 0; + } if (error != 0) { ia->ia_dstaddr = dstaddr; IFA_UNLOCK(&ia->ia_ifa); @@ -945,11 +966,11 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } IFA_LOCK_ASSERT_HELD(&ia->ia_ifa); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_INET_SUBCLASS; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; - ev_msg.event_code = KEV_INET_SIFDSTADDR; + ev_msg.event_code = KEV_INET_SIFDSTADDR; if (ia->ia_ifa.ifa_dstaddr) { in_event_data.ia_dstaddr = ((struct sockaddr_in *) @@ -958,12 +979,12 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, in_event_data.ia_dstaddr.s_addr = INADDR_ANY; } - in_event_data.ia_addr = ia->ia_addr.sin_addr; - in_event_data.ia_net = ia->ia_net; - in_event_data.ia_netmask = ia->ia_netmask; - in_event_data.ia_subnet = ia->ia_subnet; - in_event_data.ia_subnetmask = ia->ia_subnetmask; - in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; IFA_UNLOCK(&ia->ia_ifa); (void) strlcpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); @@ -971,7 +992,7 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit; ev_msg.dv[0].data_ptr = &in_event_data; - ev_msg.dv[0].data_length = sizeof (struct kev_in_data); + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(ifp, &ev_msg); @@ -987,7 +1008,7 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, (struct sockaddr *)&ia->ia_dstaddr; IFA_UNLOCK(&ia->ia_ifa); rtinit_locked(&(ia->ia_ifa), (int)RTM_ADD, - RTF_HOST|RTF_UP); + RTF_HOST | RTF_UP); } else { IFA_UNLOCK(&ia->ia_ifa); } @@ -1001,7 +1022,7 @@ inctl_ifdstaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, /* NOTREACHED */ } - return (error); + return error; } /* @@ -1019,31 +1040,33 @@ inctl_ifbrdaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, VERIFY(ifp != NULL); - if (ia == NULL) - return (EADDRNOTAVAIL); + if (ia == NULL) { + return EADDRNOTAVAIL; + } - if (!(ifp->if_flags & IFF_BROADCAST)) - return (EINVAL); + if (!(ifp->if_flags & IFF_BROADCAST)) { + return EINVAL; + } - bzero(&in_event_data, sizeof (struct kev_in_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&in_event_data, sizeof(struct kev_in_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); switch (cmd) { - case SIOCGIFBRDADDR: /* struct ifreq */ + case SIOCGIFBRDADDR: /* struct ifreq */ IFA_LOCK(&ia->ia_ifa); bcopy(&ia->ia_broadaddr, &ifr->ifr_broadaddr, - sizeof (struct sockaddr_in)); + sizeof(struct sockaddr_in)); IFA_UNLOCK(&ia->ia_ifa); break; - case SIOCSIFBRDADDR: /* struct ifreq */ + case SIOCSIFBRDADDR: /* struct ifreq */ IFA_LOCK(&ia->ia_ifa); bcopy(&ifr->ifr_broadaddr, &ia->ia_broadaddr, - sizeof (struct sockaddr_in)); + sizeof(struct sockaddr_in)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_INET_SUBCLASS; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; ev_msg.event_code = KEV_INET_SIFBRDADDR; @@ -1053,12 +1076,12 @@ inctl_ifbrdaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } else { in_event_data.ia_dstaddr.s_addr = INADDR_ANY; } - in_event_data.ia_addr = ia->ia_addr.sin_addr; - in_event_data.ia_net = ia->ia_net; - in_event_data.ia_netmask = ia->ia_netmask; - in_event_data.ia_subnet = ia->ia_subnet; - in_event_data.ia_subnetmask = ia->ia_subnetmask; - in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; IFA_UNLOCK(&ia->ia_ifa); (void) strlcpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); @@ -1066,7 +1089,7 @@ inctl_ifbrdaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit; ev_msg.dv[0].data_ptr = &in_event_data; - ev_msg.dv[0].data_length = sizeof (struct kev_in_data); + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(ifp, &ev_msg); @@ -1077,7 +1100,7 @@ inctl_ifbrdaddr(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, /* NOTREACHED */ } - return (error); + return error; } /* @@ -1096,32 +1119,32 @@ inctl_ifnetmask(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, VERIFY(ifp != NULL); - bzero(&in_event_data, sizeof (struct kev_in_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&in_event_data, sizeof(struct kev_in_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); switch (cmd) { - case SIOCGIFNETMASK: /* struct ifreq */ + case SIOCGIFNETMASK: /* struct ifreq */ if (ia == NULL) { error = EADDRNOTAVAIL; break; } IFA_LOCK(&ia->ia_ifa); - bcopy(&ia->ia_sockmask, &ifr->ifr_addr, sizeof (mask)); + bcopy(&ia->ia_sockmask, &ifr->ifr_addr, sizeof(mask)); IFA_UNLOCK(&ia->ia_ifa); break; - case SIOCSIFNETMASK: { /* struct ifreq */ + case SIOCSIFNETMASK: { /* struct ifreq */ in_addr_t i; - bcopy(&ifr->ifr_addr, &mask, sizeof (mask)); + bcopy(&ifr->ifr_addr, &mask, sizeof(mask)); i = mask.sin_addr.s_addr; VERIFY(ia != NULL); IFA_LOCK(&ia->ia_ifa); ia->ia_subnetmask = ntohl(ia->ia_sockmask.sin_addr.s_addr = i); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_INET_SUBCLASS; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET_SUBCLASS; ev_msg.event_code = KEV_INET_SIFNETMASK; @@ -1131,12 +1154,12 @@ inctl_ifnetmask(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, } else { in_event_data.ia_dstaddr.s_addr = INADDR_ANY; } - in_event_data.ia_addr = ia->ia_addr.sin_addr; - in_event_data.ia_net = ia->ia_net; - in_event_data.ia_netmask = ia->ia_netmask; - in_event_data.ia_subnet = ia->ia_subnet; - in_event_data.ia_subnetmask = ia->ia_subnetmask; - in_event_data.ia_netbroadcast = ia->ia_netbroadcast; + in_event_data.ia_addr = ia->ia_addr.sin_addr; + in_event_data.ia_net = ia->ia_net; + in_event_data.ia_netmask = ia->ia_netmask; + in_event_data.ia_subnet = ia->ia_subnet; + in_event_data.ia_subnetmask = ia->ia_subnetmask; + in_event_data.ia_netbroadcast = ia->ia_netbroadcast; IFA_UNLOCK(&ia->ia_ifa); (void) strlcpy(&in_event_data.link_data.if_name[0], ifp->if_name, IFNAMSIZ); @@ -1144,7 +1167,7 @@ inctl_ifnetmask(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, in_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit; ev_msg.dv[0].data_ptr = &in_event_data; - ev_msg.dv[0].data_length = sizeof (struct kev_in_data); + ev_msg.dv[0].data_length = sizeof(struct kev_in_data); ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(ifp, &ev_msg); @@ -1156,7 +1179,7 @@ inctl_ifnetmask(struct ifnet *ifp, struct in_ifaddr *ia, u_long cmd, /* NOTREACHED */ } - return (error); + return error; } /* @@ -1192,19 +1215,19 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * ioctls which don't require ifp, but require socket. */ switch (cmd) { - case SIOCGASSOCIDS32: /* struct so_aidreq32 */ - case SIOCGASSOCIDS64: /* struct so_aidreq64 */ - return (inctl_associd(so, cmd, data)); - /* NOTREACHED */ - - case SIOCGCONNIDS32: /* struct so_cidreq32 */ - case SIOCGCONNIDS64: /* struct so_cidreq64 */ - return (inctl_connid(so, cmd, data)); - /* NOTREACHED */ - - case SIOCGCONNINFO32: /* struct so_cinforeq32 */ - case SIOCGCONNINFO64: /* struct so_cinforeq64 */ - return (inctl_conninfo(so, cmd, data)); + case SIOCGASSOCIDS32: /* struct so_aidreq32 */ + case SIOCGASSOCIDS64: /* struct so_aidreq64 */ + return inctl_associd(so, cmd, data); + /* NOTREACHED */ + + case SIOCGCONNIDS32: /* struct so_cidreq32 */ + case SIOCGCONNIDS64: /* struct so_cidreq64 */ + return inctl_connid(so, cmd, data); + /* NOTREACHED */ + + case SIOCGCONNINFO32: /* struct so_cinforeq32 */ + case SIOCGCONNINFO64: /* struct so_cinforeq64 */ + return inctl_conninfo(so, cmd, data); /* NOTREACHED */ } @@ -1212,40 +1235,46 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * The rest of ioctls require ifp; reject if we don't have one; * return ENXIO to be consistent with ifioctl(). */ - if (ifp == NULL) - return (ENXIO); + if (ifp == NULL) { + return ENXIO; + } /* * ioctls which require ifp but not interface address. */ switch (cmd) { - case SIOCAUTOADDR: /* struct ifreq */ - if (!privileged) - return (EPERM); - return (inctl_autoaddr(ifp, ifr)); - /* NOTREACHED */ + case SIOCAUTOADDR: /* struct ifreq */ + if (!privileged) { + return EPERM; + } + return inctl_autoaddr(ifp, ifr); + /* NOTREACHED */ - case SIOCARPIPLL: /* struct ifreq */ - if (!privileged) - return (EPERM); - return (inctl_arpipll(ifp, ifr)); - /* NOTREACHED */ + case SIOCARPIPLL: /* struct ifreq */ + if (!privileged) { + return EPERM; + } + return inctl_arpipll(ifp, ifr); + /* NOTREACHED */ - case SIOCSETROUTERMODE: /* struct ifreq */ - if (!privileged) - return (EPERM); - return (inctl_setrouter(ifp, ifr)); - /* NOTREACHED */ + case SIOCSETROUTERMODE: /* struct ifreq */ + if (!privileged) { + return EPERM; + } + return inctl_setrouter(ifp, ifr); + /* NOTREACHED */ - case SIOCPROTOATTACH: /* struct ifreq */ - if (!privileged) - return (EPERM); - return (in_domifattach(ifp)); - /* NOTREACHED */ + case SIOCPROTOATTACH: /* struct ifreq */ + if (!privileged) { + return EPERM; + } + return in_domifattach(ifp); + /* NOTREACHED */ - case SIOCPROTODETACH: /* struct ifreq */ - if (!privileged) - return (EPERM); + case SIOCPROTODETACH: /* struct ifreq */ + if (!privileged) { + return EPERM; + } /* * If an IPv4 address is still present, refuse to detach. @@ -1260,7 +1289,7 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, IFA_UNLOCK(ifa); } ifnet_lock_done(ifp); - return ((ifa == NULL) ? proto_unplumb(PF_INET, ifp) : EBUSY); + return (ifa == NULL) ? proto_unplumb(PF_INET, ifp) : EBUSY; /* NOTREACHED */ } @@ -1268,27 +1297,29 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * ioctls which require interface address; obtain sockaddr_in. */ switch (cmd) { - case SIOCAIFADDR: /* struct {if,in_}aliasreq */ - if (!privileged) - return (EPERM); + case SIOCAIFADDR: /* struct {if,in_}aliasreq */ + if (!privileged) { + return EPERM; + } bcopy(&((struct in_aliasreq *)(void *)data)->ifra_addr, - &sin, sizeof (sin)); + &sin, sizeof(sin)); sa = &sin; break; - case SIOCDIFADDR: /* struct ifreq */ - case SIOCSIFADDR: /* struct ifreq */ - case SIOCSIFDSTADDR: /* struct ifreq */ - case SIOCSIFNETMASK: /* struct ifreq */ - case SIOCSIFBRDADDR: /* struct ifreq */ - if (!privileged) - return (EPERM); - /* FALLTHRU */ - case SIOCGIFADDR: /* struct ifreq */ - case SIOCGIFDSTADDR: /* struct ifreq */ - case SIOCGIFNETMASK: /* struct ifreq */ - case SIOCGIFBRDADDR: /* struct ifreq */ - bcopy(&ifr->ifr_addr, &sin, sizeof (sin)); + case SIOCDIFADDR: /* struct ifreq */ + case SIOCSIFADDR: /* struct ifreq */ + case SIOCSIFDSTADDR: /* struct ifreq */ + case SIOCSIFNETMASK: /* struct ifreq */ + case SIOCSIFBRDADDR: /* struct ifreq */ + if (!privileged) { + return EPERM; + } + /* FALLTHRU */ + case SIOCGIFADDR: /* struct ifreq */ + case SIOCGIFDSTADDR: /* struct ifreq */ + case SIOCGIFNETMASK: /* struct ifreq */ + case SIOCGIFBRDADDR: /* struct ifreq */ + bcopy(&ifr->ifr_addr, &sin, sizeof(sin)); sa = &sin; break; } @@ -1352,8 +1383,9 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, IFA_UNLOCK(&iap->ia_ifa); } /* take a reference on ia before releasing lock */ - if (ia != NULL) + if (ia != NULL) { IFA_ADDREF(&ia->ia_ifa); + } ifnet_lock_done(ifp); } } @@ -1369,18 +1401,18 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, } switch (cmd) { - case SIOCAIFADDR: /* struct {if,in_}aliasreq */ - case SIOCDIFADDR: /* struct ifreq */ + case SIOCAIFADDR: /* struct {if,in_}aliasreq */ + case SIOCDIFADDR: /* struct ifreq */ if (cmd == SIOCAIFADDR) { bcopy(&((struct in_aliasreq *)(void *)data)-> - ifra_addr, &addr, sizeof (addr)); + ifra_addr, &addr, sizeof(addr)); bcopy(&((struct in_aliasreq *)(void *)data)-> - ifra_dstaddr, &dstaddr, sizeof (dstaddr)); + ifra_dstaddr, &dstaddr, sizeof(dstaddr)); } else { VERIFY(cmd == SIOCDIFADDR); bcopy(&((struct ifreq *)(void *)data)->ifr_addr, - &addr, sizeof (addr)); - bzero(&dstaddr, sizeof (dstaddr)); + &addr, sizeof(addr)); + bzero(&dstaddr, sizeof(dstaddr)); } if (addr.sin_family == AF_INET) { @@ -1399,8 +1431,9 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, IFA_UNLOCK(&ia->ia_ifa); } lck_rw_done(in_ifaddr_rwlock); - if (oia != NULL) + if (oia != NULL) { IFA_REMREF(&oia->ia_ifa); + } if ((ifp->if_flags & IFF_POINTOPOINT) && (cmd == SIOCAIFADDR) && (dstaddr.sin_addr.s_addr == INADDR_ANY)) { @@ -1415,19 +1448,19 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = EADDRNOTAVAIL; goto done; } - /* FALLTHROUGH */ - case SIOCSIFADDR: /* struct ifreq */ - case SIOCSIFDSTADDR: /* struct ifreq */ - case SIOCSIFNETMASK: /* struct ifreq */ + /* FALLTHROUGH */ + case SIOCSIFADDR: /* struct ifreq */ + case SIOCSIFDSTADDR: /* struct ifreq */ + case SIOCSIFNETMASK: /* struct ifreq */ if (cmd == SIOCAIFADDR) { /* fell thru from above; just repeat it */ bcopy(&((struct in_aliasreq *)(void *)data)-> - ifra_addr, &addr, sizeof (addr)); + ifra_addr, &addr, sizeof(addr)); } else { VERIFY(cmd == SIOCDIFADDR || cmd == SIOCSIFADDR || cmd == SIOCSIFNETMASK || cmd == SIOCSIFDSTADDR); bcopy(&((struct ifreq *)(void *)data)->ifr_addr, - &addr, sizeof (addr)); + &addr, sizeof(addr)); } if (addr.sin_family != AF_INET && cmd == SIOCSIFADDR) { @@ -1451,12 +1484,13 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, ifa->ifa_netmask = (struct sockaddr *)&ia->ia_sockmask; ia->ia_sockmask.sin_len = 8; if (ifp->if_flags & IFF_BROADCAST) { - ia->ia_broadaddr.sin_len = sizeof (ia->ia_addr); + ia->ia_broadaddr.sin_len = sizeof(ia->ia_addr); ia->ia_broadaddr.sin_family = AF_INET; } ia->ia_ifp = ifp; - if (!(ifp->if_flags & IFF_LOOPBACK)) + if (!(ifp->if_flags & IFF_LOOPBACK)) { in_interfaces++; + } /* if_attach_ifa() holds a reference for ifa_link */ if_attach_ifa(ifp, ifa); /* @@ -1465,8 +1499,9 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * via PFC_IFUP event, before the link resolver (ARP) * initializes it. */ - if (cmd == SIOCAIFADDR || cmd == SIOCSIFADDR) + if (cmd == SIOCAIFADDR || cmd == SIOCSIFADDR) { ifa->ifa_debug |= IFD_NOTREADY; + } IFA_UNLOCK(ifa); ifnet_lock_done(ifp); lck_rw_lock_exclusive(in_ifaddr_rwlock); @@ -1482,25 +1517,25 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, } switch (cmd) { - case SIOCGIFDSTADDR: /* struct ifreq */ - case SIOCSIFDSTADDR: /* struct ifreq */ + case SIOCGIFDSTADDR: /* struct ifreq */ + case SIOCSIFDSTADDR: /* struct ifreq */ error = inctl_ifdstaddr(ifp, ia, cmd, ifr); break; - case SIOCGIFBRDADDR: /* struct ifreq */ - case SIOCSIFBRDADDR: /* struct ifreq */ + case SIOCGIFBRDADDR: /* struct ifreq */ + case SIOCSIFBRDADDR: /* struct ifreq */ error = inctl_ifbrdaddr(ifp, ia, cmd, ifr); break; - case SIOCGIFNETMASK: /* struct ifreq */ - case SIOCSIFNETMASK: /* struct ifreq */ + case SIOCGIFNETMASK: /* struct ifreq */ + case SIOCSIFNETMASK: /* struct ifreq */ error = inctl_ifnetmask(ifp, ia, cmd, ifr); break; - case SIOCGIFADDR: /* struct ifreq */ - case SIOCSIFADDR: /* struct ifreq */ - case SIOCAIFADDR: /* struct {if,in_}aliasreq */ - case SIOCDIFADDR: /* struct ifreq */ + case SIOCGIFADDR: /* struct ifreq */ + case SIOCSIFADDR: /* struct ifreq */ + case SIOCAIFADDR: /* struct {if,in_}aliasreq */ + case SIOCDIFADDR: /* struct ifreq */ error = inctl_ifaddr(ifp, ia, cmd, ifr); break; @@ -1509,12 +1544,14 @@ in_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, break; } done: - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - if (so_unlocked) + } + if (so_unlocked) { socket_lock(so, 0); + } - return (error); + return error; } /* @@ -1529,17 +1566,20 @@ in_ifscrub(struct ifnet *ifp, struct in_ifaddr *ia, int locked) return; } IFA_UNLOCK(&ia->ia_ifa); - if (!locked) + if (!locked) { lck_mtx_lock(rnh_lock); - if (ifp->if_flags & (IFF_LOOPBACK|IFF_POINTOPOINT)) + } + if (ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) { rtinit_locked(&(ia->ia_ifa), (int)RTM_DELETE, RTF_HOST); - else + } else { rtinit_locked(&(ia->ia_ifa), (int)RTM_DELETE, 0); + } IFA_LOCK(&ia->ia_ifa); ia->ia_flags &= ~IFA_ROUTE; IFA_UNLOCK(&ia->ia_ifa); - if (!locked) + if (!locked) { lck_mtx_unlock(rnh_lock); + } } /* @@ -1665,13 +1705,14 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, * Interface addresses should not contain port or sin_zero information. */ SIN(&ia->ia_addr)->sin_family = AF_INET; - SIN(&ia->ia_addr)->sin_len = sizeof (struct sockaddr_in); + SIN(&ia->ia_addr)->sin_len = sizeof(struct sockaddr_in); SIN(&ia->ia_addr)->sin_port = 0; - bzero(&SIN(&ia->ia_addr)->sin_zero, sizeof (sin->sin_zero)); - if ((ifp->if_flags & IFF_POINTOPOINT)) + bzero(&SIN(&ia->ia_addr)->sin_zero, sizeof(sin->sin_zero)); + if ((ifp->if_flags & IFF_POINTOPOINT)) { in_iahash_insert_ptp(ia); - else + } else { in_iahash_insert(ia); + } IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(in_ifaddr_rwlock); @@ -1685,8 +1726,9 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, ifa0 = ifa_ifpgetprimary(ifp, AF_INET); cmd = (&ia->ia_ifa == ifa0) ? SIOCSIFADDR : SIOCAIFADDR; error = ifnet_ioctl(ifp, PF_INET, cmd, ia); - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = 0; + } /* * If we've just sent down SIOCAIFADDR, send another ioctl down * for SIOCSIFADDR for the first IPV4 address of the interface, @@ -1702,8 +1744,9 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, * for legacy reasons. */ error = ifnet_ioctl(ifp, PF_INET, SIOCSIFADDR, ifa0); - if (error == EOPNOTSUPP) + if (error == EOPNOTSUPP) { error = 0; + } } /* Release reference from ifa_ifpgetprimary() */ @@ -1712,20 +1755,22 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, if (error) { lck_rw_lock_exclusive(in_ifaddr_rwlock); IFA_LOCK(&ia->ia_ifa); - if (IA_IS_HASHED(ia)) + if (IA_IS_HASHED(ia)) { in_iahash_remove(ia); + } ia->ia_addr = oldaddr; if (oldremoved) { - if ((ifp->if_flags & IFF_POINTOPOINT)) + if ((ifp->if_flags & IFF_POINTOPOINT)) { in_iahash_insert_ptp(ia); - else + } else { in_iahash_insert(ia); + } } IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(in_ifaddr_rwlock); /* Release extra reference taken above */ IFA_REMREF(&ia->ia_ifa); - return (error); + return error; } lck_mtx_lock(rnh_lock); IFA_LOCK(&ia->ia_ifa); @@ -1742,12 +1787,13 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, ia->ia_ifa.ifa_addr = (struct sockaddr *)&ia->ia_addr; } IFA_LOCK_ASSERT_HELD(&ia->ia_ifa); - if (IN_CLASSA(i)) + if (IN_CLASSA(i)) { ia->ia_netmask = IN_CLASSA_NET; - else if (IN_CLASSB(i)) + } else if (IN_CLASSB(i)) { ia->ia_netmask = IN_CLASSB_NET; - else + } else { ia->ia_netmask = IN_CLASSC_NET; + } /* * The subnet mask usually includes at least the standard network part, * but may may be smaller in the case of supernetting. @@ -1756,8 +1802,9 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, if (ia->ia_subnetmask == 0) { ia->ia_subnetmask = ia->ia_netmask; ia->ia_sockmask.sin_addr.s_addr = htonl(ia->ia_subnetmask); - } else + } else { ia->ia_netmask &= ia->ia_subnetmask; + } ia->ia_net = i & ia->ia_netmask; ia->ia_subnet = i & ia->ia_subnetmask; in_socktrim(&ia->ia_sockmask); @@ -1769,7 +1816,7 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, ia->ia_broadaddr.sin_addr.s_addr = htonl(ia->ia_subnet | ~ia->ia_subnetmask); ia->ia_netbroadcast.s_addr = - htonl(ia->ia_net | ~ ia->ia_netmask); + htonl(ia->ia_net | ~ia->ia_netmask); } else if (ifp->if_flags & IFF_LOOPBACK) { ia->ia_ifa.ifa_dstaddr = ia->ia_ifa.ifa_addr; flags |= RTF_HOST; @@ -1779,9 +1826,9 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, lck_mtx_unlock(rnh_lock); /* Release extra reference taken above */ IFA_REMREF(&ia->ia_ifa); - return (0); + return 0; } - ia->ia_dstaddr.sin_len = sizeof (struct sockaddr_in); + ia->ia_dstaddr.sin_len = sizeof(struct sockaddr_in); flags |= RTF_HOST; } IFA_UNLOCK(&ia->ia_ifa); @@ -1794,8 +1841,9 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, lck_mtx_unlock(rnh_lock); /* XXX check if the subnet route points to the same interface */ - if (error == EEXIST) + if (error == EEXIST) { error = 0; + } /* * If the interface supports multicast, join the "all hosts" @@ -1834,7 +1882,7 @@ in_ifinit(struct ifnet *ifp, struct in_ifaddr *ia, struct sockaddr_in *sin, routegenid_inet_update(); } - return (error); + return error; } /* @@ -1846,41 +1894,43 @@ in_broadcast(struct in_addr in, struct ifnet *ifp) struct ifaddr *ifa; u_int32_t t; - if (in.s_addr == INADDR_BROADCAST || in.s_addr == INADDR_ANY) - return (TRUE); - if (!(ifp->if_flags & IFF_BROADCAST)) - return (FALSE); + if (in.s_addr == INADDR_BROADCAST || in.s_addr == INADDR_ANY) { + return TRUE; + } + if (!(ifp->if_flags & IFF_BROADCAST)) { + return FALSE; + } t = ntohl(in.s_addr); /* * Look through the list of addresses for a match * with a broadcast address. */ -#define ia ((struct in_ifaddr *)ifa) +#define ia ((struct in_ifaddr *)ifa) ifnet_lock_shared(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { IFA_LOCK(ifa); if (ifa->ifa_addr->sa_family == AF_INET && (in.s_addr == ia->ia_broadaddr.sin_addr.s_addr || - in.s_addr == ia->ia_netbroadcast.s_addr || - /* - * Check for old-style (host 0) broadcast. - */ - t == ia->ia_subnet || t == ia->ia_net) && - /* - * Check for an all one subnetmask. These - * only exist when an interface gets a secondary - * address. - */ - ia->ia_subnetmask != (u_int32_t)0xffffffff) { + in.s_addr == ia->ia_netbroadcast.s_addr || + /* + * Check for old-style (host 0) broadcast. + */ + t == ia->ia_subnet || t == ia->ia_net) && + /* + * Check for an all one subnetmask. These + * only exist when an interface gets a secondary + * address. + */ + ia->ia_subnetmask != (u_int32_t)0xffffffff) { IFA_UNLOCK(ifa); ifnet_lock_done(ifp); - return (TRUE); + return TRUE; } IFA_UNLOCK(ifa); } ifnet_lock_done(ifp); - return (FALSE); + return FALSE; #undef ia } @@ -1903,8 +1953,8 @@ in_purgeaddrs(struct ifnet *ifp) if (err == 0 && ifap != NULL) { struct ifreq ifr; - bzero(&ifr, sizeof (ifr)); - (void) snprintf(ifr.ifr_name, sizeof (ifr.ifr_name), + bzero(&ifr, sizeof(ifr)); + (void) snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", if_name(ifp)); for (i = 0; ifap[i] != NULL; i++) { @@ -1913,13 +1963,14 @@ in_purgeaddrs(struct ifnet *ifp) ifa = ifap[i]; IFA_LOCK(ifa); bcopy(ifa->ifa_addr, &ifr.ifr_addr, - sizeof (struct sockaddr_in)); + sizeof(struct sockaddr_in)); IFA_UNLOCK(ifa); err = in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, kernproc); /* if we lost the race, ignore it */ - if (err == EADDRNOTAVAIL) + if (err == EADDRNOTAVAIL) { err = 0; + } if (err != 0) { char s_addr[MAX_IPv4_STR_LEN]; char s_dstaddr[MAX_IPv4_STR_LEN]; @@ -1931,9 +1982,9 @@ in_purgeaddrs(struct ifnet *ifp) d = &((struct sockaddr_in *) (void *)ifa->ifa_dstaddr)->sin_addr; (void) inet_ntop(AF_INET, &s->s_addr, s_addr, - sizeof (s_addr)); + sizeof(s_addr)); (void) inet_ntop(AF_INET, &d->s_addr, s_dstaddr, - sizeof (s_dstaddr)); + sizeof(s_dstaddr)); IFA_UNLOCK(ifa); printf("%s: SIOCDIFADDR ifp=%s ifa_addr=%s " @@ -1956,10 +2007,10 @@ in_ifaddr_init(void) { in_multi_init(); - PE_parse_boot_argn("ifa_debug", &inifa_debug, sizeof (inifa_debug)); + PE_parse_boot_argn("ifa_debug", &inifa_debug, sizeof(inifa_debug)); - inifa_size = (inifa_debug == 0) ? sizeof (struct in_ifaddr) : - sizeof (struct in_ifaddr_dbg); + inifa_size = (inifa_debug == 0) ? sizeof(struct in_ifaddr) : + sizeof(struct in_ifaddr_dbg); inifa_zone = zinit(inifa_size, INIFA_ZONE_MAX * inifa_size, 0, INIFA_ZONE_NAME); @@ -1996,7 +2047,7 @@ in_ifaddr_alloc(int how) ctrace_record(&inifa_dbg->inifa_alloc); } } - return (inifa); + return inifa; } static void @@ -2007,7 +2058,8 @@ in_ifaddr_free(struct ifaddr *ifa) if (ifa->ifa_refcnt != 0) { panic("%s: ifa %p bad ref cnt", __func__, ifa); /* NOTREACHED */ - } if (!(ifa->ifa_debug & IFD_ALLOC)) { + } + if (!(ifa->ifa_debug & IFD_ALLOC)) { panic("%s: ifa %p cannot be freed", __func__, ifa); /* NOTREACHED */ } @@ -2015,7 +2067,7 @@ in_ifaddr_free(struct ifaddr *ifa) struct in_ifaddr_dbg *inifa_dbg = (struct in_ifaddr_dbg *)ifa; ctrace_record(&inifa_dbg->inifa_free); bcopy(&inifa_dbg->inifa, &inifa_dbg->inifa_old, - sizeof (struct in_ifaddr)); + sizeof(struct in_ifaddr)); if (ifa->ifa_debug & IFD_TRASHED) { /* Become a regular mutex, just in case */ IFA_CONVERT_LOCK(ifa); @@ -2028,7 +2080,7 @@ in_ifaddr_free(struct ifaddr *ifa) } IFA_UNLOCK(ifa); ifa_lock_destroy(ifa); - bzero(ifa, sizeof (struct in_ifaddr)); + bzero(ifa, sizeof(struct in_ifaddr)); zfree(inifa_zone, ifa); } @@ -2108,18 +2160,20 @@ in_getassocids(struct socket *so, uint32_t *cnt, user_addr_t aidp) struct inpcb *inp = sotoinpcb(so); sae_associd_t aid; - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } /* INPCB has no concept of association */ aid = SAE_ASSOCID_ANY; *cnt = 0; /* just asking how many there are? */ - if (aidp == USER_ADDR_NULL) - return (0); + if (aidp == USER_ADDR_NULL) { + return 0; + } - return (copyout(&aid, aidp, sizeof (aid))); + return copyout(&aid, aidp, sizeof(aid)); } /* @@ -2132,23 +2186,26 @@ in_getconnids(struct socket *so, sae_associd_t aid, uint32_t *cnt, struct inpcb *inp = sotoinpcb(so); sae_connid_t cid; - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } - if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) - return (EINVAL); + if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { + return EINVAL; + } /* if connected, return 1 connection count */ *cnt = ((so->so_state & SS_ISCONNECTED) ? 1 : 0); /* just asking how many there are? */ - if (cidp == USER_ADDR_NULL) - return (0); + if (cidp == USER_ADDR_NULL) { + return 0; + } /* if INPCB is connected, assign it connid 1 */ cid = ((*cnt != 0) ? 1 : SAE_CONNID_ANY); - return (copyout(&cid, cidp, sizeof (cid))); + return copyout(&cid, cidp, sizeof(cid)); } /* @@ -2184,17 +2241,21 @@ in_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *ifindex = ((ifp != NULL) ? ifp->if_index : 0); *soerror = so->so_error; *flags = 0; - if (so->so_state & SS_ISCONNECTED) + if (so->so_state & SS_ISCONNECTED) { *flags |= (CIF_CONNECTED | CIF_PREFERRED); - if (inp->inp_flags & INP_BOUND_IF) + } + if (inp->inp_flags & INP_BOUND_IF) { *flags |= CIF_BOUND_IF; - if (!(inp->inp_flags & INP_INADDR_ANY)) + } + if (!(inp->inp_flags & INP_INADDR_ANY)) { *flags |= CIF_BOUND_IP; - if (!(inp->inp_flags & INP_ANONPORT)) + } + if (!(inp->inp_flags & INP_ANONPORT)) { *flags |= CIF_BOUND_PORT; + } - bzero(&sin, sizeof (sin)); - sin.sin_len = sizeof (sin); + bzero(&sin, sizeof(sin)); + sin.sin_len = sizeof(sin); sin.sin_family = AF_INET; /* source address and port */ @@ -2204,10 +2265,11 @@ in_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *src_len = sin.sin_len; } else { if (src != USER_ADDR_NULL) { - copy_len = min(*src_len, sizeof (sin)); + copy_len = min(*src_len, sizeof(sin)); error = copyout(&sin, src, copy_len); - if (error != 0) + if (error != 0) { goto out; + } *src_len = copy_len; } } @@ -2219,10 +2281,11 @@ in_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *dst_len = sin.sin_len; } else { if (dst != USER_ADDR_NULL) { - copy_len = min(*dst_len, sizeof (sin)); + copy_len = min(*dst_len, sizeof(sin)); error = copyout(&sin, dst, copy_len); - if (error != 0) + if (error != 0) { goto out; + } *dst_len = copy_len; } } @@ -2232,15 +2295,16 @@ in_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *aux_type = CIAUX_TCP; if (*aux_len == 0) { - *aux_len = sizeof (tcp_ci); + *aux_len = sizeof(tcp_ci); } else { if (aux_data != USER_ADDR_NULL) { - copy_len = min(*aux_len, sizeof (tcp_ci)); - bzero(&tcp_ci, sizeof (tcp_ci)); + copy_len = min(*aux_len, sizeof(tcp_ci)); + bzero(&tcp_ci, sizeof(tcp_ci)); tcp_getconninfo(so, &tcp_ci); error = copyout(&tcp_ci, aux_data, copy_len); - if (error != 0) + if (error != 0) { goto out; + } *aux_len = copy_len; } } @@ -2250,7 +2314,7 @@ in_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, } out: - return (error); + return error; } struct in_llentry { @@ -2290,8 +2354,9 @@ in_lltable_new(struct in_addr addr4, u_int flags) struct in_llentry *lle; MALLOC(lle, struct in_llentry *, sizeof(struct in_llentry), M_LLTABLE, M_NOWAIT | M_ZERO); - if (lle == NULL) /* NB: caller generates msg */ + if (lle == NULL) { /* NB: caller generates msg */ return NULL; + } /* * For IPv4 this will trigger "arpresolve" to generate @@ -2306,7 +2371,7 @@ in_lltable_new(struct in_addr addr4, u_int flags) LLE_REQ_INIT(&lle->base); //callout_init(&lle->base.lle_timer, 1); - return (&lle->base); + return &lle->base; } #define IN_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \ @@ -2322,8 +2387,9 @@ in_lltable_match_prefix(const struct sockaddr *saddr, mask = ((const struct sockaddr_in *)(const void *)smask)->sin_addr; lle_addr.s_addr = ntohl(lle->r_l3addr.addr4.s_addr); - if (IN_ARE_MASKED_ADDR_EQUAL(lle_addr, addr, mask) == 0) - return (0); + if (IN_ARE_MASKED_ADDR_EQUAL(lle_addr, addr, mask) == 0) { + return 0; + } if (lle->la_flags & LLE_IFADDR) { /* @@ -2333,16 +2399,18 @@ in_lltable_match_prefix(const struct sockaddr *saddr, * Note also we should handle 'ifdown' cases without removing * ifaddr macs. */ - if (addr.s_addr == lle_addr.s_addr && (flags & LLE_STATIC) != 0) - return (1); - return (0); + if (addr.s_addr == lle_addr.s_addr && (flags & LLE_STATIC) != 0) { + return 1; + } + return 0; } /* flags & LLE_STATIC means deleting both dynamic and static entries */ - if ((flags & LLE_STATIC) || !(lle->la_flags & LLE_STATIC)) - return (1); + if ((flags & LLE_STATIC) || !(lle->la_flags & LLE_STATIC)) { + return 1; + } - return (0); + return 0; } static void @@ -2363,8 +2431,9 @@ in_lltable_free_entry(struct lltable *llt, struct llentry *lle) #if 0 /* cancel timer */ - if (callout_stop(&lle->lle_timer) > 0) + if (callout_stop(&lle->lle_timer) > 0) { LLE_REMREF(lle); + } #endif /* Drop hold queue */ pkts_dropped = llentry_free(lle); @@ -2379,16 +2448,17 @@ in_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr struct rtentry *rt; KASSERT(l3addr->sa_family == AF_INET, - ("sin_family %d", l3addr->sa_family)); + ("sin_family %d", l3addr->sa_family)); /* XXX rtalloc1 should take a const param */ rt = rtalloc1(__DECONST(struct sockaddr *, l3addr), 0, 0); if (rt == NULL || (rt->rt_flags & RTF_GATEWAY) || rt->rt_ifp != ifp) { log(LOG_INFO, "IPv4 address: \"%s\" is not on the network\n", - inet_ntoa(((const struct sockaddr_in *)(const void *)l3addr)->sin_addr)); - if (rt != NULL) + inet_ntoa(((const struct sockaddr_in *)(const void *)l3addr)->sin_addr)); + if (rt != NULL) { rtfree_locked(rt); - return (EINVAL); + } + return EINVAL; } rtfree_locked(rt); return 0; @@ -2397,13 +2467,13 @@ in_lltable_rtcheck(struct ifnet *ifp, u_int flags, const struct sockaddr *l3addr static inline uint32_t in_lltable_hash_dst(const struct in_addr dst, uint32_t hsize) { - return (IN_LLTBL_HASH(dst.s_addr, hsize)); + return IN_LLTBL_HASH(dst.s_addr, hsize); } static uint32_t in_lltable_hash(const struct llentry *lle, uint32_t hsize) { - return (in_lltable_hash_dst(lle->r_l3addr.addr4, hsize)); + return in_lltable_hash_dst(lle->r_l3addr.addr4, hsize); } @@ -2429,13 +2499,15 @@ in_lltable_find_dst(struct lltable *llt, struct in_addr dst) hashidx = in_lltable_hash_dst(dst, llt->llt_hsize); lleh = &llt->lle_head[hashidx]; LIST_FOREACH(lle, lleh, lle_next) { - if (lle->la_flags & LLE_DELETED) + if (lle->la_flags & LLE_DELETED) { continue; - if (lle->r_l3addr.addr4.s_addr == dst.s_addr) + } + if (lle->r_l3addr.addr4.s_addr == dst.s_addr) { break; + } } - return (lle); + return lle; } static void @@ -2458,7 +2530,7 @@ in_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr struct llentry *lle; KASSERT(l3addr->sa_family == AF_INET, - ("sin_family %d", l3addr->sa_family)); + ("sin_family %d", l3addr->sa_family)); /* * A route that covers the given address must have @@ -2466,23 +2538,25 @@ in_lltable_alloc(struct lltable *llt, u_int flags, const struct sockaddr *l3addr * verify this. */ if (!(flags & LLE_IFADDR) && - in_lltable_rtcheck(ifp, flags, l3addr) != 0) - return (NULL); + in_lltable_rtcheck(ifp, flags, l3addr) != 0) { + return NULL; + } lle = in_lltable_new(sin->sin_addr, flags); if (lle == NULL) { log(LOG_INFO, "lla_lookup: new lle malloc failed\n"); - return (NULL); + return NULL; } lle->la_flags = flags & ~LLE_CREATE; - if (flags & LLE_STATIC) + if (flags & LLE_STATIC) { lle->r_flags |= RLLE_VALID; + } if ((flags & LLE_IFADDR) == LLE_IFADDR) { lltable_set_entry_addr(ifp, lle, LLADDR(SDL(ifp->if_lladdr->ifa_addr))); lle->la_flags |= LLE_STATIC; lle->r_flags |= (RLLE_VALID | RLLE_IFADDR); } - return (lle); + return lle; } /* @@ -2498,25 +2572,28 @@ in_lltable_lookup(struct lltable *llt, u_int flags, const struct sockaddr *l3add IF_AFDATA_WLOCK_ASSERT(llt->llt_ifp, llt->llt_af); KASSERT(l3addr->sa_family == AF_INET, - ("sin_family %d", l3addr->sa_family)); + ("sin_family %d", l3addr->sa_family)); lle = in_lltable_find_dst(llt, sin->sin_addr); - if (lle == NULL) - return (NULL); + if (lle == NULL) { + return NULL; + } - KASSERT((flags & (LLE_UNLOCKED|LLE_EXCLUSIVE)) != - (LLE_UNLOCKED|LLE_EXCLUSIVE),("wrong lle request flags: 0x%X", - flags)); + KASSERT((flags & (LLE_UNLOCKED | LLE_EXCLUSIVE)) != + (LLE_UNLOCKED | LLE_EXCLUSIVE), ("wrong lle request flags: 0x%X", + flags)); - if (flags & LLE_UNLOCKED) - return (lle); + if (flags & LLE_UNLOCKED) { + return lle; + } - if (flags & LLE_EXCLUSIVE) + if (flags & LLE_EXCLUSIVE) { LLE_WLOCK(lle); - else + } else { LLE_RLOCK(lle); + } - return (lle); + return lle; } static int @@ -2535,10 +2612,11 @@ in_lltable_dump_entry(struct lltable *llt, struct llentry *lle, bzero(&arpc, sizeof(arpc)); /* skip deleted entries */ - if ((lle->la_flags & LLE_DELETED) == LLE_DELETED) - return (0); + if ((lle->la_flags & LLE_DELETED) == LLE_DELETED) { + return 0; + } /* Skip if jailed and not a valid IP of the prison. */ - lltable_fill_sa_entry(lle,(struct sockaddr *)&arpc.sin); + lltable_fill_sa_entry(lle, (struct sockaddr *)&arpc.sin); /* * produce a msg made of: * struct rt_msghdr; @@ -2552,8 +2630,9 @@ in_lltable_dump_entry(struct lltable *llt, struct llentry *lle, arpc.rtm.rtm_addrs = RTA_DST | RTA_GATEWAY; /* publish */ - if (lle->la_flags & LLE_PUB) + if (lle->la_flags & LLE_PUB) { arpc.rtm.rtm_flags |= RTF_ANNOUNCE; + } sdl = &arpc.sdl; sdl->sdl_family = AF_LINK; @@ -2569,17 +2648,19 @@ in_lltable_dump_entry(struct lltable *llt, struct llentry *lle, } arpc.rtm.rtm_rmx.rmx_expire = - lle->la_flags & LLE_STATIC ? 0 : lle->la_expire; + lle->la_flags & LLE_STATIC ? 0 : lle->la_expire; arpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA); - if (lle->la_flags & LLE_STATIC) + if (lle->la_flags & LLE_STATIC) { arpc.rtm.rtm_flags |= RTF_STATIC; - if (lle->la_flags & LLE_IFADDR) + } + if (lle->la_flags & LLE_IFADDR) { arpc.rtm.rtm_flags |= RTF_PINNED; + } arpc.rtm.rtm_flags |= RTF_PINNED; arpc.rtm.rtm_index = ifp->if_index; error = SYSCTL_OUT(wr, &arpc, sizeof(arpc)); - return (error); + return error; } static struct lltable * @@ -2601,7 +2682,7 @@ in_lltattach(struct ifnet *ifp) llt->llt_match_prefix = in_lltable_match_prefix; lltable_link(llt); - return (llt); + return llt; } struct in_ifaddr* @@ -2626,7 +2707,7 @@ inifa_ifpwithflag(struct ifnet * ifp, uint32_t flag) } ifnet_lock_done(ifp); - return ((struct in_ifaddr *)ifa); + return (struct in_ifaddr *)ifa; } struct in_ifaddr * @@ -2655,5 +2736,5 @@ inifa_ifpclatv4(struct ifnet * ifp) } ifnet_lock_done(ifp); - return ((struct in_ifaddr *)ifa); + return (struct in_ifaddr *)ifa; } diff --git a/bsd/netinet/in.h b/bsd/netinet/in.h index 5a8400e22..6be1d8272 100644 --- a/bsd/netinet/in.h +++ b/bsd/netinet/in.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -65,7 +65,7 @@ #define _NETINET_IN_H_ #include #include -#include /* uint(8|16|32)_t */ +#include /* uint(8|16|32)_t */ #ifndef KERNEL #include @@ -96,131 +96,131 @@ /* * Protocols (RFC 1700) */ -#define IPPROTO_IP 0 /* dummy for IP */ +#define IPPROTO_IP 0 /* dummy for IP */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define IPPROTO_ICMP 1 /* control message protocol */ +#define IPPROTO_HOPOPTS 0 /* IP6 hop-by-hop options */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IPPROTO_ICMP 1 /* control message protocol */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPPROTO_IGMP 2 /* group mgmt protocol */ -#define IPPROTO_GGP 3 /* gateway^2 (deprecated) */ -#define IPPROTO_IPV4 4 /* IPv4 encapsulation */ -#define IPPROTO_IPIP IPPROTO_IPV4 /* for compatibility */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define IPPROTO_TCP 6 /* tcp */ +#define IPPROTO_IGMP 2 /* group mgmt protocol */ +#define IPPROTO_GGP 3 /* gateway^2 (deprecated) */ +#define IPPROTO_IPV4 4 /* IPv4 encapsulation */ +#define IPPROTO_IPIP IPPROTO_IPV4 /* for compatibility */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IPPROTO_TCP 6 /* tcp */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPPROTO_ST 7 /* Stream protocol II */ -#define IPPROTO_EGP 8 /* exterior gateway protocol */ -#define IPPROTO_PIGP 9 /* private interior gateway */ -#define IPPROTO_RCCMON 10 /* BBN RCC Monitoring */ -#define IPPROTO_NVPII 11 /* network voice protocol*/ -#define IPPROTO_PUP 12 /* pup */ -#define IPPROTO_ARGUS 13 /* Argus */ -#define IPPROTO_EMCON 14 /* EMCON */ -#define IPPROTO_XNET 15 /* Cross Net Debugger */ -#define IPPROTO_CHAOS 16 /* Chaos*/ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define IPPROTO_UDP 17 /* user datagram protocol */ +#define IPPROTO_ST 7 /* Stream protocol II */ +#define IPPROTO_EGP 8 /* exterior gateway protocol */ +#define IPPROTO_PIGP 9 /* private interior gateway */ +#define IPPROTO_RCCMON 10 /* BBN RCC Monitoring */ +#define IPPROTO_NVPII 11 /* network voice protocol*/ +#define IPPROTO_PUP 12 /* pup */ +#define IPPROTO_ARGUS 13 /* Argus */ +#define IPPROTO_EMCON 14 /* EMCON */ +#define IPPROTO_XNET 15 /* Cross Net Debugger */ +#define IPPROTO_CHAOS 16 /* Chaos*/ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IPPROTO_UDP 17 /* user datagram protocol */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPPROTO_MUX 18 /* Multiplexing */ -#define IPPROTO_MEAS 19 /* DCN Measurement Subsystems */ -#define IPPROTO_HMP 20 /* Host Monitoring */ -#define IPPROTO_PRM 21 /* Packet Radio Measurement */ -#define IPPROTO_IDP 22 /* xns idp */ -#define IPPROTO_TRUNK1 23 /* Trunk-1 */ -#define IPPROTO_TRUNK2 24 /* Trunk-2 */ -#define IPPROTO_LEAF1 25 /* Leaf-1 */ -#define IPPROTO_LEAF2 26 /* Leaf-2 */ -#define IPPROTO_RDP 27 /* Reliable Data */ -#define IPPROTO_IRTP 28 /* Reliable Transaction */ -#define IPPROTO_TP 29 /* tp-4 w/ class negotiation */ -#define IPPROTO_BLT 30 /* Bulk Data Transfer */ -#define IPPROTO_NSP 31 /* Network Services */ -#define IPPROTO_INP 32 /* Merit Internodal */ -#define IPPROTO_SEP 33 /* Sequential Exchange */ -#define IPPROTO_3PC 34 /* Third Party Connect */ -#define IPPROTO_IDPR 35 /* InterDomain Policy Routing */ -#define IPPROTO_XTP 36 /* XTP */ -#define IPPROTO_DDP 37 /* Datagram Delivery */ -#define IPPROTO_CMTP 38 /* Control Message Transport */ -#define IPPROTO_TPXX 39 /* TP++ Transport */ -#define IPPROTO_IL 40 /* IL transport protocol */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define IPPROTO_IPV6 41 /* IP6 header */ +#define IPPROTO_MUX 18 /* Multiplexing */ +#define IPPROTO_MEAS 19 /* DCN Measurement Subsystems */ +#define IPPROTO_HMP 20 /* Host Monitoring */ +#define IPPROTO_PRM 21 /* Packet Radio Measurement */ +#define IPPROTO_IDP 22 /* xns idp */ +#define IPPROTO_TRUNK1 23 /* Trunk-1 */ +#define IPPROTO_TRUNK2 24 /* Trunk-2 */ +#define IPPROTO_LEAF1 25 /* Leaf-1 */ +#define IPPROTO_LEAF2 26 /* Leaf-2 */ +#define IPPROTO_RDP 27 /* Reliable Data */ +#define IPPROTO_IRTP 28 /* Reliable Transaction */ +#define IPPROTO_TP 29 /* tp-4 w/ class negotiation */ +#define IPPROTO_BLT 30 /* Bulk Data Transfer */ +#define IPPROTO_NSP 31 /* Network Services */ +#define IPPROTO_INP 32 /* Merit Internodal */ +#define IPPROTO_SEP 33 /* Sequential Exchange */ +#define IPPROTO_3PC 34 /* Third Party Connect */ +#define IPPROTO_IDPR 35 /* InterDomain Policy Routing */ +#define IPPROTO_XTP 36 /* XTP */ +#define IPPROTO_DDP 37 /* Datagram Delivery */ +#define IPPROTO_CMTP 38 /* Control Message Transport */ +#define IPPROTO_TPXX 39 /* TP++ Transport */ +#define IPPROTO_IL 40 /* IL transport protocol */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IPPROTO_IPV6 41 /* IP6 header */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPPROTO_SDRP 42 /* Source Demand Routing */ -#define IPPROTO_ROUTING 43 /* IP6 routing header */ -#define IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */ -#define IPPROTO_IDRP 45 /* InterDomain Routing*/ -#define IPPROTO_RSVP 46 /* resource reservation */ -#define IPPROTO_GRE 47 /* General Routing Encap. */ -#define IPPROTO_MHRP 48 /* Mobile Host Routing */ -#define IPPROTO_BHA 49 /* BHA */ -#define IPPROTO_ESP 50 /* IP6 Encap Sec. Payload */ -#define IPPROTO_AH 51 /* IP6 Auth Header */ -#define IPPROTO_INLSP 52 /* Integ. Net Layer Security */ -#define IPPROTO_SWIPE 53 /* IP with encryption */ -#define IPPROTO_NHRP 54 /* Next Hop Resolution */ +#define IPPROTO_SDRP 42 /* Source Demand Routing */ +#define IPPROTO_ROUTING 43 /* IP6 routing header */ +#define IPPROTO_FRAGMENT 44 /* IP6 fragmentation header */ +#define IPPROTO_IDRP 45 /* InterDomain Routing*/ +#define IPPROTO_RSVP 46 /* resource reservation */ +#define IPPROTO_GRE 47 /* General Routing Encap. */ +#define IPPROTO_MHRP 48 /* Mobile Host Routing */ +#define IPPROTO_BHA 49 /* BHA */ +#define IPPROTO_ESP 50 /* IP6 Encap Sec. Payload */ +#define IPPROTO_AH 51 /* IP6 Auth Header */ +#define IPPROTO_INLSP 52 /* Integ. Net Layer Security */ +#define IPPROTO_SWIPE 53 /* IP with encryption */ +#define IPPROTO_NHRP 54 /* Next Hop Resolution */ /* 55-57: Unassigned */ -#define IPPROTO_ICMPV6 58 /* ICMP6 */ -#define IPPROTO_NONE 59 /* IP6 no next header */ -#define IPPROTO_DSTOPTS 60 /* IP6 destination option */ -#define IPPROTO_AHIP 61 /* any host internal protocol */ -#define IPPROTO_CFTP 62 /* CFTP */ -#define IPPROTO_HELLO 63 /* "hello" routing protocol */ -#define IPPROTO_SATEXPAK 64 /* SATNET/Backroom EXPAK */ -#define IPPROTO_KRYPTOLAN 65 /* Kryptolan */ -#define IPPROTO_RVD 66 /* Remote Virtual Disk */ -#define IPPROTO_IPPC 67 /* Pluribus Packet Core */ -#define IPPROTO_ADFS 68 /* Any distributed FS */ -#define IPPROTO_SATMON 69 /* Satnet Monitoring */ -#define IPPROTO_VISA 70 /* VISA Protocol */ -#define IPPROTO_IPCV 71 /* Packet Core Utility */ -#define IPPROTO_CPNX 72 /* Comp. Prot. Net. Executive */ -#define IPPROTO_CPHB 73 /* Comp. Prot. HeartBeat */ -#define IPPROTO_WSN 74 /* Wang Span Network */ -#define IPPROTO_PVP 75 /* Packet Video Protocol */ -#define IPPROTO_BRSATMON 76 /* BackRoom SATNET Monitoring */ -#define IPPROTO_ND 77 /* Sun net disk proto (temp.) */ -#define IPPROTO_WBMON 78 /* WIDEBAND Monitoring */ -#define IPPROTO_WBEXPAK 79 /* WIDEBAND EXPAK */ -#define IPPROTO_EON 80 /* ISO cnlp */ -#define IPPROTO_VMTP 81 /* VMTP */ -#define IPPROTO_SVMTP 82 /* Secure VMTP */ -#define IPPROTO_VINES 83 /* Banyon VINES */ -#define IPPROTO_TTP 84 /* TTP */ -#define IPPROTO_IGP 85 /* NSFNET-IGP */ -#define IPPROTO_DGP 86 /* dissimilar gateway prot. */ -#define IPPROTO_TCF 87 /* TCF */ -#define IPPROTO_IGRP 88 /* Cisco/GXS IGRP */ -#define IPPROTO_OSPFIGP 89 /* OSPFIGP */ -#define IPPROTO_SRPC 90 /* Strite RPC protocol */ -#define IPPROTO_LARP 91 /* Locus Address Resoloution */ -#define IPPROTO_MTP 92 /* Multicast Transport */ -#define IPPROTO_AX25 93 /* AX.25 Frames */ -#define IPPROTO_IPEIP 94 /* IP encapsulated in IP */ -#define IPPROTO_MICP 95 /* Mobile Int.ing control */ -#define IPPROTO_SCCSP 96 /* Semaphore Comm. security */ -#define IPPROTO_ETHERIP 97 /* Ethernet IP encapsulation */ -#define IPPROTO_ENCAP 98 /* encapsulation header */ -#define IPPROTO_APES 99 /* any private encr. scheme */ -#define IPPROTO_GMTP 100 /* GMTP*/ +#define IPPROTO_ICMPV6 58 /* ICMP6 */ +#define IPPROTO_NONE 59 /* IP6 no next header */ +#define IPPROTO_DSTOPTS 60 /* IP6 destination option */ +#define IPPROTO_AHIP 61 /* any host internal protocol */ +#define IPPROTO_CFTP 62 /* CFTP */ +#define IPPROTO_HELLO 63 /* "hello" routing protocol */ +#define IPPROTO_SATEXPAK 64 /* SATNET/Backroom EXPAK */ +#define IPPROTO_KRYPTOLAN 65 /* Kryptolan */ +#define IPPROTO_RVD 66 /* Remote Virtual Disk */ +#define IPPROTO_IPPC 67 /* Pluribus Packet Core */ +#define IPPROTO_ADFS 68 /* Any distributed FS */ +#define IPPROTO_SATMON 69 /* Satnet Monitoring */ +#define IPPROTO_VISA 70 /* VISA Protocol */ +#define IPPROTO_IPCV 71 /* Packet Core Utility */ +#define IPPROTO_CPNX 72 /* Comp. Prot. Net. Executive */ +#define IPPROTO_CPHB 73 /* Comp. Prot. HeartBeat */ +#define IPPROTO_WSN 74 /* Wang Span Network */ +#define IPPROTO_PVP 75 /* Packet Video Protocol */ +#define IPPROTO_BRSATMON 76 /* BackRoom SATNET Monitoring */ +#define IPPROTO_ND 77 /* Sun net disk proto (temp.) */ +#define IPPROTO_WBMON 78 /* WIDEBAND Monitoring */ +#define IPPROTO_WBEXPAK 79 /* WIDEBAND EXPAK */ +#define IPPROTO_EON 80 /* ISO cnlp */ +#define IPPROTO_VMTP 81 /* VMTP */ +#define IPPROTO_SVMTP 82 /* Secure VMTP */ +#define IPPROTO_VINES 83 /* Banyon VINES */ +#define IPPROTO_TTP 84 /* TTP */ +#define IPPROTO_IGP 85 /* NSFNET-IGP */ +#define IPPROTO_DGP 86 /* dissimilar gateway prot. */ +#define IPPROTO_TCF 87 /* TCF */ +#define IPPROTO_IGRP 88 /* Cisco/GXS IGRP */ +#define IPPROTO_OSPFIGP 89 /* OSPFIGP */ +#define IPPROTO_SRPC 90 /* Strite RPC protocol */ +#define IPPROTO_LARP 91 /* Locus Address Resoloution */ +#define IPPROTO_MTP 92 /* Multicast Transport */ +#define IPPROTO_AX25 93 /* AX.25 Frames */ +#define IPPROTO_IPEIP 94 /* IP encapsulated in IP */ +#define IPPROTO_MICP 95 /* Mobile Int.ing control */ +#define IPPROTO_SCCSP 96 /* Semaphore Comm. security */ +#define IPPROTO_ETHERIP 97 /* Ethernet IP encapsulation */ +#define IPPROTO_ENCAP 98 /* encapsulation header */ +#define IPPROTO_APES 99 /* any private encr. scheme */ +#define IPPROTO_GMTP 100 /* GMTP*/ /* 101-254: Partly Unassigned */ -#define IPPROTO_PIM 103 /* Protocol Independent Mcast */ -#define IPPROTO_IPCOMP 108 /* payload compression (IPComp) */ -#define IPPROTO_PGM 113 /* PGM */ -#define IPPROTO_SCTP 132 /* SCTP */ +#define IPPROTO_PIM 103 /* Protocol Independent Mcast */ +#define IPPROTO_IPCOMP 108 /* payload compression (IPComp) */ +#define IPPROTO_PGM 113 /* PGM */ +#define IPPROTO_SCTP 132 /* SCTP */ /* 255: Reserved */ /* BSD Private, local use, namespace incursion */ -#define IPPROTO_DIVERT 254 /* divert pseudo-protocol */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define IPPROTO_RAW 255 /* raw IP packet */ +#define IPPROTO_DIVERT 254 /* divert pseudo-protocol */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IPPROTO_RAW 255 /* raw IP packet */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPPROTO_MAX 256 +#define IPPROTO_MAX 256 /* last return value of *_input(), meaning "all job for this pkt is done". */ -#define IPPROTO_DONE 257 +#define IPPROTO_DONE 257 #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* @@ -268,7 +268,7 @@ * */ -#define __DARWIN_IPPORT_RESERVED 1024 +#define __DARWIN_IPPORT_RESERVED 1024 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* @@ -278,15 +278,15 @@ * for servers, not necessarily privileged. (IP_PORTRANGE_DEFAULT) */ #ifndef IPPORT_RESERVED -#define IPPORT_RESERVED __DARWIN_IPPORT_RESERVED +#define IPPORT_RESERVED __DARWIN_IPPORT_RESERVED #endif -#define IPPORT_USERRESERVED 5000 +#define IPPORT_USERRESERVED 5000 /* * Default local port range to use by setting IP_PORTRANGE_HIGH */ -#define IPPORT_HIFIRSTAUTO 49152 -#define IPPORT_HILASTAUTO 65535 +#define IPPORT_HIFIRSTAUTO 49152 +#define IPPORT_HILASTAUTO 65535 /* * Scanning for a free reserved port return a value below IPPORT_RESERVED, @@ -294,8 +294,8 @@ * 512, but that conflicts with some well-known-services that firewalls may * have a fit if we use. */ -#define IPPORT_RESERVEDSTART 600 -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IPPORT_RESERVEDSTART 600 +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Internet address (a structure for historical reasons) @@ -309,92 +309,92 @@ struct in_addr { * On subnets, the decomposition of addresses to host and net parts * is done according to subnet mask, not the masks here. */ -#define INADDR_ANY (u_int32_t)0x00000000 -#define INADDR_BROADCAST (u_int32_t)0xffffffff /* must be masked */ +#define INADDR_ANY (u_int32_t)0x00000000 +#define INADDR_BROADCAST (u_int32_t)0xffffffff /* must be masked */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IN_CLASSA(i) (((u_int32_t)(i) & 0x80000000) == 0) -#define IN_CLASSA_NET 0xff000000 -#define IN_CLASSA_NSHIFT 24 -#define IN_CLASSA_HOST 0x00ffffff -#define IN_CLASSA_MAX 128 - -#define IN_CLASSB(i) (((u_int32_t)(i) & 0xc0000000) == 0x80000000) -#define IN_CLASSB_NET 0xffff0000 -#define IN_CLASSB_NSHIFT 16 -#define IN_CLASSB_HOST 0x0000ffff -#define IN_CLASSB_MAX 65536 - -#define IN_CLASSC(i) (((u_int32_t)(i) & 0xe0000000) == 0xc0000000) -#define IN_CLASSC_NET 0xffffff00 -#define IN_CLASSC_NSHIFT 8 -#define IN_CLASSC_HOST 0x000000ff - -#define IN_CLASSD(i) (((u_int32_t)(i) & 0xf0000000) == 0xe0000000) -#define IN_CLASSD_NET 0xf0000000 /* These ones aren't really */ -#define IN_CLASSD_NSHIFT 28 /* net and host fields, but */ -#define IN_CLASSD_HOST 0x0fffffff /* routing needn't know. */ -#define IN_MULTICAST(i) IN_CLASSD(i) - -#define IN_EXPERIMENTAL(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) -#define IN_BADCLASS(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) - -#define INADDR_LOOPBACK (u_int32_t)0x7f000001 +#define IN_CLASSA(i) (((u_int32_t)(i) & 0x80000000) == 0) +#define IN_CLASSA_NET 0xff000000 +#define IN_CLASSA_NSHIFT 24 +#define IN_CLASSA_HOST 0x00ffffff +#define IN_CLASSA_MAX 128 + +#define IN_CLASSB(i) (((u_int32_t)(i) & 0xc0000000) == 0x80000000) +#define IN_CLASSB_NET 0xffff0000 +#define IN_CLASSB_NSHIFT 16 +#define IN_CLASSB_HOST 0x0000ffff +#define IN_CLASSB_MAX 65536 + +#define IN_CLASSC(i) (((u_int32_t)(i) & 0xe0000000) == 0xc0000000) +#define IN_CLASSC_NET 0xffffff00 +#define IN_CLASSC_NSHIFT 8 +#define IN_CLASSC_HOST 0x000000ff + +#define IN_CLASSD(i) (((u_int32_t)(i) & 0xf0000000) == 0xe0000000) +#define IN_CLASSD_NET 0xf0000000 /* These ones aren't really */ +#define IN_CLASSD_NSHIFT 28 /* net and host fields, but */ +#define IN_CLASSD_HOST 0x0fffffff /* routing needn't know. */ +#define IN_MULTICAST(i) IN_CLASSD(i) + +#define IN_EXPERIMENTAL(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) +#define IN_BADCLASS(i) (((u_int32_t)(i) & 0xf0000000) == 0xf0000000) + +#define INADDR_LOOPBACK (u_int32_t)0x7f000001 #ifndef KERNEL -#define INADDR_NONE 0xffffffff /* -1 return */ +#define INADDR_NONE 0xffffffff /* -1 return */ #endif -#define INADDR_UNSPEC_GROUP (u_int32_t)0xe0000000 /* 224.0.0.0 */ -#define INADDR_ALLHOSTS_GROUP (u_int32_t)0xe0000001 /* 224.0.0.1 */ -#define INADDR_ALLRTRS_GROUP (u_int32_t)0xe0000002 /* 224.0.0.2 */ -#define INADDR_ALLRPTS_GROUP (u_int32_t)0xe0000016 /* 224.0.0.22, IGMPv3 */ -#define INADDR_CARP_GROUP (u_int32_t)0xe0000012 /* 224.0.0.18 */ -#define INADDR_PFSYNC_GROUP (u_int32_t)0xe00000f0 /* 224.0.0.240 */ -#define INADDR_ALLMDNS_GROUP (u_int32_t)0xe00000fb /* 224.0.0.251 */ -#define INADDR_MAX_LOCAL_GROUP (u_int32_t)0xe00000ff /* 224.0.0.255 */ +#define INADDR_UNSPEC_GROUP (u_int32_t)0xe0000000 /* 224.0.0.0 */ +#define INADDR_ALLHOSTS_GROUP (u_int32_t)0xe0000001 /* 224.0.0.1 */ +#define INADDR_ALLRTRS_GROUP (u_int32_t)0xe0000002 /* 224.0.0.2 */ +#define INADDR_ALLRPTS_GROUP (u_int32_t)0xe0000016 /* 224.0.0.22, IGMPv3 */ +#define INADDR_CARP_GROUP (u_int32_t)0xe0000012 /* 224.0.0.18 */ +#define INADDR_PFSYNC_GROUP (u_int32_t)0xe00000f0 /* 224.0.0.240 */ +#define INADDR_ALLMDNS_GROUP (u_int32_t)0xe00000fb /* 224.0.0.251 */ +#define INADDR_MAX_LOCAL_GROUP (u_int32_t)0xe00000ff /* 224.0.0.255 */ #ifdef __APPLE__ -#define IN_LINKLOCALNETNUM (u_int32_t)0xA9FE0000 /* 169.254.0.0 */ -#define IN_LINKLOCAL(i) (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM) -#define IN_LOOPBACK(i) (((u_int32_t)(i) & 0xff000000) == 0x7f000000) -#define IN_ZERONET(i) (((u_int32_t)(i) & 0xff000000) == 0) +#define IN_LINKLOCALNETNUM (u_int32_t)0xA9FE0000 /* 169.254.0.0 */ +#define IN_LINKLOCAL(i) (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM) +#define IN_LOOPBACK(i) (((u_int32_t)(i) & 0xff000000) == 0x7f000000) +#define IN_ZERONET(i) (((u_int32_t)(i) & 0xff000000) == 0) -#define IN_PRIVATE(i) ((((u_int32_t)(i) & 0xff000000) == 0x0a000000) || \ - (((u_int32_t)(i) & 0xfff00000) == 0xac100000) || \ - (((u_int32_t)(i) & 0xffff0000) == 0xc0a80000)) +#define IN_PRIVATE(i) ((((u_int32_t)(i) & 0xff000000) == 0x0a000000) || \ + (((u_int32_t)(i) & 0xfff00000) == 0xac100000) || \ + (((u_int32_t)(i) & 0xffff0000) == 0xc0a80000)) #ifdef PRIVATE -#define IN_SHARED_ADDRESS_SPACE(i) ((((u_int32_t)(i)) & (u_int32_t)0xffc00000) \ - == (u_int32_t)0x64400000) +#define IN_SHARED_ADDRESS_SPACE(i) ((((u_int32_t)(i)) & (u_int32_t)0xffc00000) \ + == (u_int32_t)0x64400000) -#define IN_DS_LITE(i) ((((u_int32_t)(i)) & (u_int32_t)0xfffffff8) == (u_int32_t)0xc0000000) +#define IN_DS_LITE(i) ((((u_int32_t)(i)) & (u_int32_t)0xfffffff8) == (u_int32_t)0xc0000000) -#define IN_6TO4_RELAY_ANYCAST(i) ((((u_int32_t)(i)) & (u_int32_t)IN_CLASSC_NET) == (u_int32_t)0xc0586300) +#define IN_6TO4_RELAY_ANYCAST(i) ((((u_int32_t)(i)) & (u_int32_t)IN_CLASSC_NET) == (u_int32_t)0xc0586300) #endif -#define IN_LOCAL_GROUP(i) (((u_int32_t)(i) & 0xffffff00) == 0xe0000000) +#define IN_LOCAL_GROUP(i) (((u_int32_t)(i) & 0xffffff00) == 0xe0000000) -#define IN_ANY_LOCAL(i) (IN_LINKLOCAL(i) || IN_LOCAL_GROUP(i)) +#define IN_ANY_LOCAL(i) (IN_LINKLOCAL(i) || IN_LOCAL_GROUP(i)) #endif /* __APPLE__ */ -#define IN_LOOPBACKNET 127 /* official! */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define IN_LOOPBACKNET 127 /* official! */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Socket address, internet style. */ struct sockaddr_in { - __uint8_t sin_len; - sa_family_t sin_family; - in_port_t sin_port; - struct in_addr sin_addr; - char sin_zero[8]; + __uint8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; + char sin_zero[8]; }; #define IN_ARE_ADDR_EQUAL(a, b) \ (bcmp(&(a)->s_addr, &(b)->s_addr, \ - sizeof (struct in_addr)) == 0) + sizeof (struct in_addr)) == 0) #ifdef PRIVATE /* @@ -404,10 +404,10 @@ struct sockaddr_in { * current implementation which could change in future. */ struct sockaddr_inifscope { - __uint8_t sin_len; - sa_family_t sin_family; - in_port_t sin_port; - struct in_addr sin_addr; + __uint8_t sin_len; + sa_family_t sin_family; + in_port_t sin_port; + struct in_addr sin_addr; /* * To avoid possible conflict with an overlaid sockaddr_inarp * having sin_other set to SIN_PROXY, we use the first 4-bytes @@ -415,12 +415,12 @@ struct sockaddr_inifscope { * in sockaddr_inarp. */ union { - char sin_zero[8]; + char sin_zero[8]; struct { - __uint32_t ifscope; + __uint32_t ifscope; } _in_index; } un; -#define sin_scope_id un._in_index.ifscope +#define sin_scope_id un._in_index.ifscope }; #endif /* PRIVATE */ @@ -436,122 +436,122 @@ struct sockaddr_inifscope { * (this gets put into the header proper). */ struct ip_opts { - struct in_addr ip_dst; /* first hop, 0 w/o src rt */ - char ip_opts[40]; /* actually variable in size */ + struct in_addr ip_dst; /* first hop, 0 w/o src rt */ + char ip_opts[40]; /* actually variable in size */ }; /* * Options for use with [gs]etsockopt at the IP level. * First word of comment is data type; bool is stored in int. */ -#define IP_OPTIONS 1 /* buf/ip_opts; set/get IP options */ -#define IP_HDRINCL 2 /* int; header is included with data */ -#define IP_TOS 3 /* int; IP type of service and preced. */ -#define IP_TTL 4 /* int; IP time to live */ -#define IP_RECVOPTS 5 /* bool; receive all IP opts w/dgram */ -#define IP_RECVRETOPTS 6 /* bool; receive IP opts for response */ -#define IP_RECVDSTADDR 7 /* bool; receive IP dst addr w/dgram */ -#define IP_RETOPTS 8 /* ip_opts; set/get IP options */ -#define IP_MULTICAST_IF 9 /* u_char; set/get IP multicast i/f */ -#define IP_MULTICAST_TTL 10 /* u_char; set/get IP multicast ttl */ -#define IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */ -#define IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */ -#define IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */ -#define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */ -#define IP_RSVP_ON 15 /* enable RSVP in kernel */ -#define IP_RSVP_OFF 16 /* disable RSVP in kernel */ -#define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */ -#define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */ -#define IP_PORTRANGE 19 /* int; range to choose for unspec port */ -#define IP_RECVIF 20 /* bool; receive reception if w/dgram */ +#define IP_OPTIONS 1 /* buf/ip_opts; set/get IP options */ +#define IP_HDRINCL 2 /* int; header is included with data */ +#define IP_TOS 3 /* int; IP type of service and preced. */ +#define IP_TTL 4 /* int; IP time to live */ +#define IP_RECVOPTS 5 /* bool; receive all IP opts w/dgram */ +#define IP_RECVRETOPTS 6 /* bool; receive IP opts for response */ +#define IP_RECVDSTADDR 7 /* bool; receive IP dst addr w/dgram */ +#define IP_RETOPTS 8 /* ip_opts; set/get IP options */ +#define IP_MULTICAST_IF 9 /* u_char; set/get IP multicast i/f */ +#define IP_MULTICAST_TTL 10 /* u_char; set/get IP multicast ttl */ +#define IP_MULTICAST_LOOP 11 /* u_char; set/get IP multicast loopback */ +#define IP_ADD_MEMBERSHIP 12 /* ip_mreq; add an IP group membership */ +#define IP_DROP_MEMBERSHIP 13 /* ip_mreq; drop an IP group membership */ +#define IP_MULTICAST_VIF 14 /* set/get IP mcast virt. iface */ +#define IP_RSVP_ON 15 /* enable RSVP in kernel */ +#define IP_RSVP_OFF 16 /* disable RSVP in kernel */ +#define IP_RSVP_VIF_ON 17 /* set RSVP per-vif socket */ +#define IP_RSVP_VIF_OFF 18 /* unset RSVP per-vif socket */ +#define IP_PORTRANGE 19 /* int; range to choose for unspec port */ +#define IP_RECVIF 20 /* bool; receive reception if w/dgram */ /* for IPSEC */ -#define IP_IPSEC_POLICY 21 /* int; set/get security policy */ -#define IP_FAITH 22 /* deprecated */ +#define IP_IPSEC_POLICY 21 /* int; set/get security policy */ +#define IP_FAITH 22 /* deprecated */ #ifdef __APPLE__ -#define IP_STRIPHDR 23 /* bool: drop receive of raw IP header */ +#define IP_STRIPHDR 23 /* bool: drop receive of raw IP header */ #endif -#define IP_RECVTTL 24 /* bool; receive reception TTL w/dgram */ -#define IP_BOUND_IF 25 /* int; set/get bound interface */ -#define IP_PKTINFO 26 /* get pktinfo on recv socket, set src on sent dgram */ -#define IP_RECVPKTINFO IP_PKTINFO /* receive pktinfo w/dgram */ -#define IP_RECVTOS 27 /* bool; receive IP TOS w/dgram */ - -#define IP_FW_ADD 40 /* add a firewall rule to chain */ -#define IP_FW_DEL 41 /* delete a firewall rule from chain */ -#define IP_FW_FLUSH 42 /* flush firewall rule chain */ -#define IP_FW_ZERO 43 /* clear single/all firewall counter(s) */ -#define IP_FW_GET 44 /* get entire firewall rule chain */ -#define IP_FW_RESETLOG 45 /* reset logging counters */ +#define IP_RECVTTL 24 /* bool; receive reception TTL w/dgram */ +#define IP_BOUND_IF 25 /* int; set/get bound interface */ +#define IP_PKTINFO 26 /* get pktinfo on recv socket, set src on sent dgram */ +#define IP_RECVPKTINFO IP_PKTINFO /* receive pktinfo w/dgram */ +#define IP_RECVTOS 27 /* bool; receive IP TOS w/dgram */ + +#define IP_FW_ADD 40 /* add a firewall rule to chain */ +#define IP_FW_DEL 41 /* delete a firewall rule from chain */ +#define IP_FW_FLUSH 42 /* flush firewall rule chain */ +#define IP_FW_ZERO 43 /* clear single/all firewall counter(s) */ +#define IP_FW_GET 44 /* get entire firewall rule chain */ +#define IP_FW_RESETLOG 45 /* reset logging counters */ /* These older firewall socket option codes are maintained for backward compatibility. */ -#define IP_OLD_FW_ADD 50 /* add a firewall rule to chain */ -#define IP_OLD_FW_DEL 51 /* delete a firewall rule from chain */ -#define IP_OLD_FW_FLUSH 52 /* flush firewall rule chain */ -#define IP_OLD_FW_ZERO 53 /* clear single/all firewall counter(s) */ -#define IP_OLD_FW_GET 54 /* get entire firewall rule chain */ -#define IP_NAT__XXX 55 /* set/get NAT opts XXX Deprecated, do not use */ -#define IP_OLD_FW_RESETLOG 56 /* reset logging counters */ - -#define IP_DUMMYNET_CONFIGURE 60 /* add/configure a dummynet pipe */ -#define IP_DUMMYNET_DEL 61 /* delete a dummynet pipe from chain */ -#define IP_DUMMYNET_FLUSH 62 /* flush dummynet */ -#define IP_DUMMYNET_GET 64 /* get entire dummynet pipes */ - -#define IP_TRAFFIC_MGT_BACKGROUND 65 /* int*; get background IO flags; set background IO */ -#define IP_MULTICAST_IFINDEX 66 /* int*; set/get IP multicast i/f index */ +#define IP_OLD_FW_ADD 50 /* add a firewall rule to chain */ +#define IP_OLD_FW_DEL 51 /* delete a firewall rule from chain */ +#define IP_OLD_FW_FLUSH 52 /* flush firewall rule chain */ +#define IP_OLD_FW_ZERO 53 /* clear single/all firewall counter(s) */ +#define IP_OLD_FW_GET 54 /* get entire firewall rule chain */ +#define IP_NAT__XXX 55 /* set/get NAT opts XXX Deprecated, do not use */ +#define IP_OLD_FW_RESETLOG 56 /* reset logging counters */ + +#define IP_DUMMYNET_CONFIGURE 60 /* add/configure a dummynet pipe */ +#define IP_DUMMYNET_DEL 61 /* delete a dummynet pipe from chain */ +#define IP_DUMMYNET_FLUSH 62 /* flush dummynet */ +#define IP_DUMMYNET_GET 64 /* get entire dummynet pipes */ + +#define IP_TRAFFIC_MGT_BACKGROUND 65 /* int*; get background IO flags; set background IO */ +#define IP_MULTICAST_IFINDEX 66 /* int*; set/get IP multicast i/f index */ /* IPv4 Source Filter Multicast API [RFC3678] */ -#define IP_ADD_SOURCE_MEMBERSHIP 70 /* join a source-specific group */ -#define IP_DROP_SOURCE_MEMBERSHIP 71 /* drop a single source */ -#define IP_BLOCK_SOURCE 72 /* block a source */ -#define IP_UNBLOCK_SOURCE 73 /* unblock a source */ +#define IP_ADD_SOURCE_MEMBERSHIP 70 /* join a source-specific group */ +#define IP_DROP_SOURCE_MEMBERSHIP 71 /* drop a single source */ +#define IP_BLOCK_SOURCE 72 /* block a source */ +#define IP_UNBLOCK_SOURCE 73 /* unblock a source */ /* The following option is private; do not use it from user applications. */ -#define IP_MSFILTER 74 /* set/get filter list */ +#define IP_MSFILTER 74 /* set/get filter list */ /* Protocol Independent Multicast API [RFC3678] */ -#define MCAST_JOIN_GROUP 80 /* join an any-source group */ -#define MCAST_LEAVE_GROUP 81 /* leave all sources for group */ -#define MCAST_JOIN_SOURCE_GROUP 82 /* join a source-specific group */ -#define MCAST_LEAVE_SOURCE_GROUP 83 /* leave a single source */ -#define MCAST_BLOCK_SOURCE 84 /* block a source */ -#define MCAST_UNBLOCK_SOURCE 85 /* unblock a source */ +#define MCAST_JOIN_GROUP 80 /* join an any-source group */ +#define MCAST_LEAVE_GROUP 81 /* leave all sources for group */ +#define MCAST_JOIN_SOURCE_GROUP 82 /* join a source-specific group */ +#define MCAST_LEAVE_SOURCE_GROUP 83 /* leave a single source */ +#define MCAST_BLOCK_SOURCE 84 /* block a source */ +#define MCAST_UNBLOCK_SOURCE 85 /* unblock a source */ #ifdef PRIVATE -#define IP_FORCE_OUT_IFP 69 /* not implemented; use IP_BOUND_IF instead */ -#define IP_NO_IFT_CELLULAR 6969 /* for internal use only */ -#define IP_NO_IFT_PDP IP_NO_IFT_CELLULAR /* deprecated */ -#define IP_OUT_IF 9696 /* for internal use only */ +#define IP_FORCE_OUT_IFP 69 /* not implemented; use IP_BOUND_IF instead */ +#define IP_NO_IFT_CELLULAR 6969 /* for internal use only */ +#define IP_NO_IFT_PDP IP_NO_IFT_CELLULAR /* deprecated */ +#define IP_OUT_IF 9696 /* for internal use only */ #endif /* PRIVATE */ /* * Defaults and limits for options */ -#define IP_DEFAULT_MULTICAST_TTL 1 /* normally limit m'casts to 1 hop */ -#define IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ +#define IP_DEFAULT_MULTICAST_TTL 1 /* normally limit m'casts to 1 hop */ +#define IP_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ /* * The imo_membership vector for each socket is now dynamically allocated at * run-time, bounded by USHRT_MAX, and is reallocated when needed, sized * according to a power-of-two increment. */ -#define IP_MIN_MEMBERSHIPS 31 -#define IP_MAX_MEMBERSHIPS 4095 +#define IP_MIN_MEMBERSHIPS 31 +#define IP_MAX_MEMBERSHIPS 4095 /* * Default resource limits for IPv4 multicast source filtering. * These may be modified by sysctl. */ -#define IP_MAX_GROUP_SRC_FILTER 512 /* sources per group */ -#define IP_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ -#define IP_MAX_SOCK_MUTE_FILTER 128 /* XXX no longer used */ +#define IP_MAX_GROUP_SRC_FILTER 512 /* sources per group */ +#define IP_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ +#define IP_MAX_SOCK_MUTE_FILTER 128 /* XXX no longer used */ /* * Argument structure for IP_ADD_MEMBERSHIP and IP_DROP_MEMBERSHIP. */ struct ip_mreq { - struct in_addr imr_multiaddr; /* IP multicast address of group */ - struct in_addr imr_interface; /* local IP address of interface */ + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_interface; /* local IP address of interface */ }; /* @@ -560,9 +560,9 @@ struct ip_mreq { * the IPv4 legacy APIs do not support this (unless IP_SENDIF is available). */ struct ip_mreqn { - struct in_addr imr_multiaddr; /* IP multicast address of group */ - struct in_addr imr_address; /* local IP address of interface */ - int imr_ifindex; /* Interface index; cast to uint32_t */ + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_address; /* local IP address of interface */ + int imr_ifindex; /* Interface index; cast to uint32_t */ }; #pragma pack(4) @@ -570,9 +570,9 @@ struct ip_mreqn { * Argument structure for IPv4 Multicast Source Filter APIs. [RFC3678] */ struct ip_mreq_source { - struct in_addr imr_multiaddr; /* IP multicast address of group */ - struct in_addr imr_sourceaddr; /* IP address of source */ - struct in_addr imr_interface; /* local IP address of interface */ + struct in_addr imr_multiaddr; /* IP multicast address of group */ + struct in_addr imr_sourceaddr; /* IP address of source */ + struct in_addr imr_interface; /* local IP address of interface */ }; /* @@ -580,14 +580,14 @@ struct ip_mreq_source { * Filter APIs. [RFC3678] */ struct group_req { - uint32_t gr_interface; /* interface index */ - struct sockaddr_storage gr_group; /* group address */ + uint32_t gr_interface; /* interface index */ + struct sockaddr_storage gr_group; /* group address */ }; struct group_source_req { - uint32_t gsr_interface; /* interface index */ - struct sockaddr_storage gsr_group; /* group address */ - struct sockaddr_storage gsr_source; /* source address */ + uint32_t gsr_interface; /* interface index */ + struct sockaddr_storage gsr_group; /* group address */ + struct sockaddr_storage gsr_source; /* source address */ }; #ifndef __MSFILTERREQ_DEFINED @@ -598,31 +598,31 @@ struct group_source_req { * the RFC 3678 libc functions and the kernel. */ struct __msfilterreq { - uint32_t msfr_ifindex; /* interface index */ - uint32_t msfr_fmode; /* filter mode for group */ - uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ - uint32_t __msfr_align; - struct sockaddr_storage msfr_group; /* group address */ - struct sockaddr_storage *msfr_srcs; + uint32_t msfr_ifindex; /* interface index */ + uint32_t msfr_fmode; /* filter mode for group */ + uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ + uint32_t __msfr_align; + struct sockaddr_storage msfr_group; /* group address */ + struct sockaddr_storage *msfr_srcs; }; #ifdef BSD_KERNEL_PRIVATE struct __msfilterreq32 { - uint32_t msfr_ifindex; /* interface index */ - uint32_t msfr_fmode; /* filter mode for group */ - uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ - uint32_t __msfr_align; - struct sockaddr_storage msfr_group; /* group address */ - user32_addr_t msfr_srcs; + uint32_t msfr_ifindex; /* interface index */ + uint32_t msfr_fmode; /* filter mode for group */ + uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ + uint32_t __msfr_align; + struct sockaddr_storage msfr_group; /* group address */ + user32_addr_t msfr_srcs; }; struct __msfilterreq64 { - uint32_t msfr_ifindex; /* interface index */ - uint32_t msfr_fmode; /* filter mode for group */ - uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ - uint32_t __msfr_align; - struct sockaddr_storage msfr_group; /* group address */ - user64_addr_t msfr_srcs; + uint32_t msfr_ifindex; /* interface index */ + uint32_t msfr_fmode; /* filter mode for group */ + uint32_t msfr_nsrcs; /* # of sources in msfr_srcs */ + uint32_t __msfr_align; + struct sockaddr_storage msfr_group; /* group address */ + user64_addr_t msfr_srcs; }; #endif /* BSD_KERNEL_PRIVATE */ #endif /* __MSFILTERREQ_DEFINED */ @@ -636,43 +636,43 @@ struct sockaddr; * The RFC specifies uint_t for the 6th argument to [sg]etsourcefilter(). * We use uint32_t here to be consistent. */ -int setipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t, - uint32_t, struct in_addr *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -int getipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t *, - uint32_t *, struct in_addr *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -int setsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, - uint32_t, uint32_t, struct sockaddr_storage *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); -int getsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, - uint32_t *, uint32_t *, struct sockaddr_storage *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +int setipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t, + uint32_t, struct in_addr *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +int getipv4sourcefilter(int, struct in_addr, struct in_addr, uint32_t *, + uint32_t *, struct in_addr *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +int setsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, + uint32_t, uint32_t, struct sockaddr_storage *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +int getsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, + uint32_t *, uint32_t *, struct sockaddr_storage *) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); #endif /* * Filter modes; also used to represent per-socket filter mode internally. */ -#define MCAST_UNDEFINED 0 /* fmode: not yet defined */ -#define MCAST_INCLUDE 1 /* fmode: include these source(s) */ -#define MCAST_EXCLUDE 2 /* fmode: exclude these source(s) */ +#define MCAST_UNDEFINED 0 /* fmode: not yet defined */ +#define MCAST_INCLUDE 1 /* fmode: include these source(s) */ +#define MCAST_EXCLUDE 2 /* fmode: exclude these source(s) */ /* * Argument for IP_PORTRANGE: * - which range to search when port is unspecified at bind() or connect() */ -#define IP_PORTRANGE_DEFAULT 0 /* default range */ -#define IP_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ -#define IP_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ +#define IP_PORTRANGE_DEFAULT 0 /* default range */ +#define IP_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ +#define IP_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ /* * IP_PKTINFO: Packet information (equivalent to RFC2292 sec 5 for IPv4) - * This structure is used for + * This structure is used for * - * 1) Receiving ancilliary data about the datagram if IP_PKTINFO sockopt is + * 1) Receiving ancilliary data about the datagram if IP_PKTINFO sockopt is * set on the socket. In this case ipi_ifindex will contain the interface - * index the datagram was received on, ipi_addr is the IP address the + * index the datagram was received on, ipi_addr is the IP address the * datagram was received to. * * 2) Sending a datagram using a specific interface or IP source address. - * if ipi_ifindex is set to non-zero when in_pktinfo is passed as + * if ipi_ifindex is set to non-zero when in_pktinfo is passed as * ancilliary data of type IP_PKTINFO, this will be used as the source * interface to send the datagram from. If ipi_ifindex is null, ip_spec_dst * will be used for the source address. @@ -682,9 +682,9 @@ int getsourcefilter(int, uint32_t, struct sockaddr *, socklen_t, * specified during send time. */ struct in_pktinfo { - unsigned int ipi_ifindex; /* send/recv interface index */ - struct in_addr ipi_spec_dst; /* Local address */ - struct in_addr ipi_addr; /* IP Header dst address */ + unsigned int ipi_ifindex; /* send/recv interface index */ + struct in_addr ipi_spec_dst; /* Local address */ + struct in_addr ipi_addr; /* IP Header dst address */ }; /* @@ -693,10 +693,10 @@ struct in_pktinfo { * Third level is protocol number. * Fourth level is desired variable within that protocol. */ -#define IPPROTO_MAXID (IPPROTO_AH + 1) /* don't list to IPPROTO_MAX */ +#define IPPROTO_MAXID (IPPROTO_AH + 1) /* don't list to IPPROTO_MAX */ #ifdef BSD_KERNEL_PRIVATE -#define CTL_IPPROTO_NAMES { \ +#define CTL_IPPROTO_NAMES { \ { "ip", CTLTYPE_NODE }, \ { "icmp", CTLTYPE_NODE }, \ { "igmp", CTLTYPE_NODE }, \ @@ -755,29 +755,29 @@ struct in_pktinfo { /* * Names for IP sysctl objects */ -#define IPCTL_FORWARDING 1 /* act as router */ -#define IPCTL_SENDREDIRECTS 2 /* may send redirects when forwarding */ -#define IPCTL_DEFTTL 3 /* default TTL */ +#define IPCTL_FORWARDING 1 /* act as router */ +#define IPCTL_SENDREDIRECTS 2 /* may send redirects when forwarding */ +#define IPCTL_DEFTTL 3 /* default TTL */ #ifdef notyet -#define IPCTL_DEFMTU 4 /* default MTU */ +#define IPCTL_DEFMTU 4 /* default MTU */ #endif -#define IPCTL_RTEXPIRE 5 /* cloned route expiration time */ -#define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */ -#define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */ -#define IPCTL_SOURCEROUTE 8 /* may perform source routes */ -#define IPCTL_DIRECTEDBROADCAST 9 /* may re-broadcast received packets */ -#define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */ -#define IPCTL_INTRQDROPS 11 /* number of netisr q drops */ -#define IPCTL_STATS 12 /* ipstat structure */ -#define IPCTL_ACCEPTSOURCEROUTE 13 /* may accept source routed packets */ -#define IPCTL_FASTFORWARDING 14 /* use fast IP forwarding code */ -#define IPCTL_KEEPFAITH 15 /* deprecated */ -#define IPCTL_GIF_TTL 16 /* default TTL for gif encap packet */ -#define IPCTL_MAXID 17 +#define IPCTL_RTEXPIRE 5 /* cloned route expiration time */ +#define IPCTL_RTMINEXPIRE 6 /* min value for expiration time */ +#define IPCTL_RTMAXCACHE 7 /* trigger level for dynamic expire */ +#define IPCTL_SOURCEROUTE 8 /* may perform source routes */ +#define IPCTL_DIRECTEDBROADCAST 9 /* may re-broadcast received packets */ +#define IPCTL_INTRQMAXLEN 10 /* max length of netisr queue */ +#define IPCTL_INTRQDROPS 11 /* number of netisr q drops */ +#define IPCTL_STATS 12 /* ipstat structure */ +#define IPCTL_ACCEPTSOURCEROUTE 13 /* may accept source routed packets */ +#define IPCTL_FASTFORWARDING 14 /* use fast IP forwarding code */ +#define IPCTL_KEEPFAITH 15 /* deprecated */ +#define IPCTL_GIF_TTL 16 /* default TTL for gif encap packet */ +#define IPCTL_MAXID 17 #ifdef BSD_KERNEL_PRIVATE -#define IPCTL_NAMES { \ +#define IPCTL_NAMES { \ { 0, 0 }, \ { "forwarding", CTLTYPE_INT }, \ { "redirect", CTLTYPE_INT }, \ @@ -787,7 +787,7 @@ struct in_pktinfo { { "rtminexpire", CTLTYPE_INT }, \ { "rtmaxcache", CTLTYPE_INT }, \ { "sourceroute", CTLTYPE_INT }, \ - { "directed-broadcast", CTLTYPE_INT }, \ + { "directed-broadcast", CTLTYPE_INT }, \ { "intr-queue-maxlen", CTLTYPE_INT }, \ { "intr-queue-drops", CTLTYPE_INT }, \ { "stats", CTLTYPE_STRUCT }, \ @@ -797,7 +797,7 @@ struct in_pktinfo { { "gifttl", CTLTYPE_INT }, \ } #endif /* BSD_KERNEL_PRIVATE */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* INET6 stuff */ #define __KAME_NETINET_IN_H_INCLUDED_ @@ -805,7 +805,7 @@ struct in_pktinfo { #undef __KAME_NETINET_IN_H_INCLUDED_ #ifdef PRIVATE -/* +/* * Minimal sized structure to hold an IPv4 or IPv6 socket address * as sockaddr_storage can waste memory */ @@ -815,42 +815,42 @@ union sockaddr_in_4_6 { struct sockaddr_in6 sin6; }; -#define CLAT46_HDR_EXPANSION_OVERHD (sizeof(struct ip6_hdr) - sizeof(struct ip)) +#define CLAT46_HDR_EXPANSION_OVERHD (sizeof(struct ip6_hdr) - sizeof(struct ip)) /* * Recommended DiffServ Code Point values */ -#define _DSCP_DF 0 /* RFC 2474 */ +#define _DSCP_DF 0 /* RFC 2474 */ -#define _DSCP_CS0 0 /* RFC 2474 */ -#define _DSCP_CS1 8 /* RFC 2474 */ -#define _DSCP_CS2 16 /* RFC 2474 */ -#define _DSCP_CS3 24 /* RFC 2474 */ -#define _DSCP_CS4 32 /* RFC 2474 */ -#define _DSCP_CS5 40 /* RFC 2474 */ -#define _DSCP_CS6 48 /* RFC 2474 */ -#define _DSCP_CS7 56 /* RFC 2474 */ +#define _DSCP_CS0 0 /* RFC 2474 */ +#define _DSCP_CS1 8 /* RFC 2474 */ +#define _DSCP_CS2 16 /* RFC 2474 */ +#define _DSCP_CS3 24 /* RFC 2474 */ +#define _DSCP_CS4 32 /* RFC 2474 */ +#define _DSCP_CS5 40 /* RFC 2474 */ +#define _DSCP_CS6 48 /* RFC 2474 */ +#define _DSCP_CS7 56 /* RFC 2474 */ -#define _DSCP_EF 46 /* RFC 2474 */ -#define _DSCP_VA 44 /* RFC 5865 */ +#define _DSCP_EF 46 /* RFC 2474 */ +#define _DSCP_VA 44 /* RFC 5865 */ -#define _DSCP_AF11 10 /* RFC 2597 */ -#define _DSCP_AF12 12 /* RFC 2597 */ -#define _DSCP_AF13 14 /* RFC 2597 */ -#define _DSCP_AF21 18 /* RFC 2597 */ -#define _DSCP_AF22 20 /* RFC 2597 */ -#define _DSCP_AF23 22 /* RFC 2597 */ -#define _DSCP_AF31 26 /* RFC 2597 */ -#define _DSCP_AF32 28 /* RFC 2597 */ -#define _DSCP_AF33 30 /* RFC 2597 */ -#define _DSCP_AF41 34 /* RFC 2597 */ -#define _DSCP_AF42 36 /* RFC 2597 */ -#define _DSCP_AF43 38 /* RFC 2597 */ +#define _DSCP_AF11 10 /* RFC 2597 */ +#define _DSCP_AF12 12 /* RFC 2597 */ +#define _DSCP_AF13 14 /* RFC 2597 */ +#define _DSCP_AF21 18 /* RFC 2597 */ +#define _DSCP_AF22 20 /* RFC 2597 */ +#define _DSCP_AF23 22 /* RFC 2597 */ +#define _DSCP_AF31 26 /* RFC 2597 */ +#define _DSCP_AF32 28 /* RFC 2597 */ +#define _DSCP_AF33 30 /* RFC 2597 */ +#define _DSCP_AF41 34 /* RFC 2597 */ +#define _DSCP_AF42 36 /* RFC 2597 */ +#define _DSCP_AF43 38 /* RFC 2597 */ -#define _DSCP_52 52 /* Wi-Fi WMM Certification: Sigma */ +#define _DSCP_52 52 /* Wi-Fi WMM Certification: Sigma */ -#define _MAX_DSCP 63 /* coded on 6 bits */ +#define _MAX_DSCP 63 /* coded on 6 bits */ #endif /* PRIVATE */ @@ -889,33 +889,33 @@ extern int in_getconninfo(struct socket *, sae_connid_t, uint32_t *, extern struct in_ifaddr * inifa_ifpwithflag(struct ifnet *, uint32_t); extern struct in_ifaddr * inifa_ifpclatv4(struct ifnet *); -#define in_cksum(_m, _l) \ +#define in_cksum(_m, _l) \ inet_cksum(_m, 0, 0, _l) -#define in_cksum_buffer(_b, _l) \ +#define in_cksum_buffer(_b, _l) \ inet_cksum_buffer(_b, 0, 0, _l) -#define ip_cksum_hdr_in(_m, _l) \ +#define ip_cksum_hdr_in(_m, _l) \ ip_cksum_hdr_dir(_m, _l, 0) -#define ip_cksum_hdr_out(_m, _l) \ +#define ip_cksum_hdr_out(_m, _l) \ ip_cksum_hdr_dir(_m, _l, 1) -#define in_cksum_hdr(_ip) \ +#define in_cksum_hdr(_ip) \ (~b_sum16(_ip, sizeof (struct ip)) & 0xffff) -#define in_cksum_offset(_m, _o) \ +#define in_cksum_offset(_m, _o) \ ((void) in_finalize_cksum(_m, _o, CSUM_DELAY_IP)) -#define in_delayed_cksum(_m) \ +#define in_delayed_cksum(_m) \ ((void) in_finalize_cksum(_m, 0, CSUM_DELAY_DATA)) -#define in_delayed_cksum_offset(_m, _o) \ +#define in_delayed_cksum_offset(_m, _o) \ ((void) in_finalize_cksum(_m, _o, CSUM_DELAY_DATA)) -#define in_hosteq(s, t) ((s).s_addr == (t).s_addr) -#define in_nullhost(x) ((x).s_addr == INADDR_ANY) -#define in_allhosts(x) ((x).s_addr == htonl(INADDR_ALLHOSTS_GROUP)) +#define in_hosteq(s, t) ((s).s_addr == (t).s_addr) +#define in_nullhost(x) ((x).s_addr == INADDR_ANY) +#define in_allhosts(x) ((x).s_addr == htonl(INADDR_ALLHOSTS_GROUP)) -#define SIN(s) ((struct sockaddr_in *)(void *)s) -#define satosin(sa) SIN(sa) -#define sintosa(sin) ((struct sockaddr *)(void *)(sin)) -#define SINIFSCOPE(s) ((struct sockaddr_inifscope *)(void *)(s)) +#define SIN(s) ((struct sockaddr_in *)(void *)s) +#define satosin(sa) SIN(sa) +#define sintosa(sin) ((struct sockaddr *)(void *)(sin)) +#define SINIFSCOPE(s) ((struct sockaddr_inifscope *)(void *)(s)) #endif /* BSD_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE @@ -923,16 +923,16 @@ extern struct in_ifaddr * inifa_ifpclatv4(struct ifnet *); extern int in_localaddr(struct in_addr); extern int inaddr_local(struct in_addr); -extern char *inet_ntoa(struct in_addr); -extern char *inet_ntoa_r(struct in_addr ina, char *buf, +extern char *inet_ntoa(struct in_addr); +extern char *inet_ntoa_r(struct in_addr ina, char *buf, size_t buflen); -extern int inet_pton(int af, const char *, void *); +extern int inet_pton(int af, const char *, void *); #endif /* KERNEL_PRIVATE */ -#define MAX_IPv4_STR_LEN 16 -#define MAX_IPv6_STR_LEN 64 +#define MAX_IPv4_STR_LEN 16 +#define MAX_IPv6_STR_LEN 64 -extern int inet_aton(const char *, struct in_addr *); /* in libkern */ +extern int inet_aton(const char *, struct in_addr *); /* in libkern */ extern const char *inet_ntop(int, const void *, char *, socklen_t); /* in libkern*/ #endif /* KERNEL */ diff --git a/bsd/netinet/in_arp.c b/bsd/netinet/in_arp.c index 2b717a5d9..1aec999a6 100644 --- a/bsd/netinet/in_arp.c +++ b/bsd/netinet/in_arp.c @@ -90,7 +90,7 @@ #include #include -#define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen)) +#define CONST_LLADDR(s) ((const u_char*)((s)->sdl_data + (s)->sdl_nlen)) static const size_t MAX_HW_LEN = 10; @@ -143,12 +143,12 @@ struct llinfo_arp { static LIST_HEAD(, llinfo_arp) llinfo_arp; static thread_call_t arp_timeout_tcall; -static int arp_timeout_run; /* arp_timeout is scheduled to run */ +static int arp_timeout_run; /* arp_timeout is scheduled to run */ static void arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1); static void arp_sched_timeout(struct timeval *); static thread_call_t arp_probe_tcall; -static int arp_probe_run; /* arp_probe is scheduled to run */ +static int arp_probe_run; /* arp_probe is scheduled to run */ static void arp_probe(thread_call_param_t arg0, thread_call_param_t arg1); static void arp_sched_probe(struct timeval *); @@ -175,91 +175,91 @@ extern int tvtohz(struct timeval *); static int arpinit_done; SYSCTL_DECL(_net_link_ether); -SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW|CTLFLAG_LOCKED, 0, ""); +SYSCTL_NODE(_net_link_ether, PF_INET, inet, CTLFLAG_RW | CTLFLAG_LOCKED, 0, ""); -static int arpt_prune = (5*60*1); /* walk list every 5 minutes */ +static int arpt_prune = (5 * 60 * 1); /* walk list every 5 minutes */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, prune_intvl, - CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_prune, 0, ""); -#define ARP_PROBE_TIME 7 /* seconds */ +#define ARP_PROBE_TIME 7 /* seconds */ static u_int32_t arpt_probe = ARP_PROBE_TIME; SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, probe_intvl, - CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_probe, 0, ""); -static int arpt_keep = (20*60); /* once resolved, good for 20 more minutes */ +static int arpt_keep = (20 * 60); /* once resolved, good for 20 more minutes */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, max_age, - CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_keep, 0, ""); -static int arpt_down = 20; /* once declared down, don't send for 20 sec */ +static int arpt_down = 20; /* once declared down, don't send for 20 sec */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, host_down_time, - CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arpt_down, 0, ""); -static int arp_llreach_base = 120; /* seconds */ +static int arp_llreach_base = 120; /* seconds */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_llreach_base, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0, - "default ARP link-layer reachability max lifetime (in seconds)"); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_llreach_base, 0, + "default ARP link-layer reachability max lifetime (in seconds)"); -#define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */ +#define ARP_UNICAST_LIMIT 3 /* # of probes until ARP refresh broadcast */ static u_int32_t arp_unicast_lim = ARP_UNICAST_LIMIT; SYSCTL_INT(_net_link_ether_inet, OID_AUTO, arp_unicast_lim, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT, - "number of unicast ARP refresh probes before using broadcast"); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_unicast_lim, ARP_UNICAST_LIMIT, + "number of unicast ARP refresh probes before using broadcast"); static u_int32_t arp_maxtries = 5; SYSCTL_INT(_net_link_ether_inet, OID_AUTO, maxtries, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxtries, 0, ""); static u_int32_t arp_maxhold = 16; SYSCTL_UINT(_net_link_ether_inet, OID_AUTO, maxhold, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_maxhold, 0, ""); -static int useloopback = 1; /* use loopback interface for local traffic */ +static int useloopback = 1; /* use loopback interface for local traffic */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, useloopback, - CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &useloopback, 0, ""); static int arp_proxyall = 0; SYSCTL_INT(_net_link_ether_inet, OID_AUTO, proxyall, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_proxyall, 0, ""); static int arp_sendllconflict = 0; SYSCTL_INT(_net_link_ether_inet, OID_AUTO, sendllconflict, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_sendllconflict, 0, ""); -static int log_arp_warnings = 0; /* Thread safe: no accumulated state */ +static int log_arp_warnings = 0; /* Thread safe: no accumulated state */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, log_arp_warnings, - CTLFLAG_RW | CTLFLAG_LOCKED, - &log_arp_warnings, 0, - "log arp warning messages"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &log_arp_warnings, 0, + "log arp warning messages"); -static int keep_announcements = 1; /* Thread safe: no aging of state */ +static int keep_announcements = 1; /* Thread safe: no aging of state */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, keep_announcements, - CTLFLAG_RW | CTLFLAG_LOCKED, - &keep_announcements, 0, - "keep arp announcements"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &keep_announcements, 0, + "keep arp announcements"); -static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */ +static int send_conflicting_probes = 1; /* Thread safe: no accumulated state */ SYSCTL_INT(_net_link_ether_inet, OID_AUTO, send_conflicting_probes, - CTLFLAG_RW | CTLFLAG_LOCKED, - &send_conflicting_probes, 0, - "send conflicting link-local arp probes"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &send_conflicting_probes, 0, + "send conflicting link-local arp probes"); static int arp_verbose; SYSCTL_INT(_net_link_ether_inet, OID_AUTO, verbose, - CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &arp_verbose, 0, ""); /* * Generally protected by rnh_lock; use atomic operations on fields * that are also modified outside of that lock (if needed). */ -struct arpstat arpstat __attribute__((aligned(sizeof (uint64_t)))); +struct arpstat arpstat __attribute__((aligned(sizeof(uint64_t)))); SYSCTL_PROC(_net_link_ether_inet, OID_AUTO, stats, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, arp_getstat, "S,arpstat", - "ARP statistics (struct arpstat, net/if_arp.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, arp_getstat, "S,arpstat", + "ARP statistics (struct arpstat, net/if_arp.h)"); static struct zone *llinfo_arp_zone; -#define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */ -#define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */ +#define LLINFO_ARP_ZONE_MAX 256 /* maximum elements in zone */ +#define LLINFO_ARP_ZONE_NAME "llinfo_arp" /* name for zone */ void arp_init(void) @@ -268,11 +268,12 @@ arp_init(void) LIST_INIT(&llinfo_arp); - llinfo_arp_zone = zinit(sizeof (struct llinfo_arp), - LLINFO_ARP_ZONE_MAX * sizeof (struct llinfo_arp), 0, + llinfo_arp_zone = zinit(sizeof(struct llinfo_arp), + LLINFO_ARP_ZONE_MAX * sizeof(struct llinfo_arp), 0, LLINFO_ARP_ZONE_NAME); - if (llinfo_arp_zone == NULL) + if (llinfo_arp_zone == NULL) { panic("%s: failed allocating llinfo_arp_zone", __func__); + } zone_change(llinfo_arp_zone, Z_EXPAND, TRUE); zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE); @@ -288,7 +289,7 @@ arp_llinfo_alloc(int how) la = (how == M_WAITOK) ? zalloc(llinfo_arp_zone) : zalloc_noblock(llinfo_arp_zone); if (la != NULL) { - bzero(la, sizeof (*la)); + bzero(la, sizeof(*la)); /* * The type of queue (Q_DROPHEAD) here is just a hint; * the actual logic that works on this queue performs @@ -298,7 +299,7 @@ arp_llinfo_alloc(int how) (uint32_t)-1 : arp_maxhold, QP_MBUF); } - return (la); + return la; } static void @@ -316,8 +317,9 @@ arp_llinfo_free(void *arg) /* Purge any link-layer info caching */ VERIFY(la->la_rt->rt_llinfo == la); - if (la->la_rt->rt_llinfo_purge != NULL) + if (la->la_rt->rt_llinfo_purge != NULL) { la->la_rt->rt_llinfo_purge(la->la_rt); + } zfree(llinfo_arp_zone, la); } @@ -329,8 +331,9 @@ arp_llinfo_addq(struct llinfo_arp *la, struct mbuf *m) struct mbuf *_m; /* prune less than CTL, else take what's at the head */ _m = _getq_scidx_lt(&la->la_holdq, SCIDX_CTL); - if (_m == NULL) + if (_m == NULL) { _m = _getq(&la->la_holdq); + } VERIFY(_m != NULL); if (arp_verbose) { log(LOG_DEBUG, "%s: dropping packet (scidx %u)\n", @@ -361,7 +364,7 @@ arp_llinfo_flushq(struct llinfo_arp *la) } la->la_prbreq_cnt = 0; VERIFY(qempty(&la->la_holdq)); - return (held); + return held; } static void @@ -387,7 +390,7 @@ arp_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri) struct if_llreach *lr = la->la_llreach; if (lr == NULL) { - bzero(ri, sizeof (*ri)); + bzero(ri, sizeof(*ri)); ri->ri_rssi = IFNET_RSSI_UNKNOWN; ri->ri_lqm = IFNET_LQM_THRESH_OFF; ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN; @@ -409,7 +412,7 @@ arp_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri) struct if_llreach *lr = la->la_llreach; if (lr == NULL) { - bzero(iflri, sizeof (*iflri)); + bzero(iflri, sizeof(*iflri)); iflri->iflri_rssi = IFNET_RSSI_UNKNOWN; iflri->iflri_lqm = IFNET_LQM_THRESH_OFF; iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN; @@ -439,8 +442,9 @@ arp_llinfo_refresh(struct rtentry *rt) return; } - if (rt->rt_expire > timenow) + if (rt->rt_expire > timenow) { rt->rt_expire = timenow; + } return; } @@ -448,8 +452,9 @@ void arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen) { /* Nothing more to do if it's disabled */ - if (arp_llreach_base == 0) + if (arp_llreach_base == 0) { return; + } ifnet_llreach_set_reachable(ifp, ETHERTYPE_IP, addr, alen); } @@ -457,8 +462,9 @@ arp_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen) static __inline void arp_llreach_use(struct llinfo_arp *la) { - if (la->la_llreach != NULL) + if (la->la_llreach != NULL) { la->la_lastused = net_uptime(); + } } static __inline int @@ -468,23 +474,25 @@ arp_llreach_reachable(struct llinfo_arp *la) const char *why = NULL; /* Nothing more to do if it's disabled; pretend it's reachable */ - if (arp_llreach_base == 0) - return (1); + if (arp_llreach_base == 0) { + return 1; + } if ((lr = la->la_llreach) == NULL) { /* * Link-layer reachability record isn't present for this * ARP entry; pretend it's reachable and use it as is. */ - return (1); + return 1; } else if (ifnet_llreach_reachable(lr)) { /* * Record is present, it's not shared with other ARP * entries and a packet has recently been received * from the remote host; consider it reachable. */ - if (lr->lr_reqcnt == 1) - return (1); + if (lr->lr_reqcnt == 1) { + return 1; + } /* Prime it up, if this is the first time */ if (la->la_lastused == 0) { @@ -500,8 +508,9 @@ arp_llreach_reachable(struct llinfo_arp *la) * layer reachability alone; consider it reachable if * this ARP entry has been used "recently." */ - if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) - return (1); + if (ifnet_llreach_reachable_delta(lr, la->la_lastused)) { + return 1; + } why = "has alias(es) and hasn't been used in a while"; } else { @@ -515,12 +524,11 @@ arp_llreach_reachable(struct llinfo_arp *la) log(LOG_DEBUG, "%s: ARP probe(s) needed for %s; " "%s [lastused %lld, lastrcvd %lld] secs ago\n", if_name(lr->lr_ifp), inet_ntop(AF_INET, - &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof (tmp)), why, + &SIN(rt_key(la->la_rt))->sin_addr, tmp, sizeof(tmp)), why, (la->la_lastused ? (int64_t)(now - la->la_lastused) : -1), (lr->lr_lastrcvd ? (int64_t)(now - lr->lr_lastrcvd) : -1)); - } - return (0); + return 0; } /* @@ -537,7 +545,7 @@ arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, if (arp_llreach_base != 0 && rt->rt_expire != 0 && !(rt->rt_ifp->if_flags & IFF_LOOPBACK) && - ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */ + ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */ alen == ifp->if_addrlen) { struct llinfo_arp *la = rt->rt_llinfo; struct if_llreach *lr; @@ -567,10 +575,11 @@ arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, * If we were doing unicast probing, we need to * deliver an event for neighbor cache resolution */ - if (lr->lr_probes != 0) + if (lr->lr_probes != 0) { *p_rt_event_code = ROUTE_LLENTRY_RESOLVED; + } - lr->lr_probes = 0; /* reset probe count */ + lr->lr_probes = 0; /* reset probe count */ IFLR_UNLOCK(lr); if (solicited) { why = " for same target HW address; " @@ -583,9 +592,10 @@ arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, lr = la->la_llreach = ifnet_llreach_alloc(ifp, ETHERTYPE_IP, addr, alen, arp_llreach_base); if (lr != NULL) { - lr->lr_probes = 0; /* reset probe count */ - if (why == NULL) + lr->lr_probes = 0; /* reset probe count */ + if (why == NULL) { why = "creating new llreach record"; + } } *p_rt_event_code = ROUTE_LLENTRY_RESOLVED; } @@ -595,7 +605,7 @@ arp_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, log(LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp), type, why, inet_ntop(AF_INET, - &SIN(rt_key(rt))->sin_addr, tmp, sizeof (tmp))); + &SIN(rt_key(rt))->sin_addr, tmp, sizeof(tmp))); } } } @@ -636,8 +646,9 @@ arptfree(struct llinfo_arp *la, void *arg) if (ap->probing && (la->la_flags & LLINFO_PROBING) && la->la_probeexp <= timenow) { struct sockaddr_dl *sdl = SDL(rt->rt_gateway); - if (sdl != NULL) + if (sdl != NULL) { sdl->sdl_alen = 0; + } (void) arp_llinfo_flushq(la); /* * Enqueue work item to invoke callback for this route entry @@ -681,8 +692,9 @@ arptfree(struct llinfo_arp *la, void *arg) */ if (!ap->draining && !ap->probing) { struct sockaddr_dl *sdl = SDL(rt->rt_gateway); - if (sdl != NULL) + if (sdl != NULL) { sdl->sdl_alen = 0; + } la->la_asked = 0; rt->rt_flags &= ~RTF_REJECT; } @@ -713,12 +725,13 @@ in_arpdrain(void *arg) struct llinfo_arp *la, *ola; struct arptf_arg farg; - if (arp_verbose) + if (arp_verbose) { log(LOG_DEBUG, "%s: draining ARP entries\n", __func__); + } lck_mtx_lock(rnh_lock); la = llinfo_arp.lh_first; - bzero(&farg, sizeof (farg)); + bzero(&farg, sizeof(farg)); farg.draining = TRUE; while ((ola = la) != NULL) { la = la->la_le.le_next; @@ -746,7 +759,7 @@ arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1) lck_mtx_lock(rnh_lock); la = llinfo_arp.lh_first; - bzero(&farg, sizeof (farg)); + bzero(&farg, sizeof(farg)); while ((ola = la) != NULL) { la = la->la_le.le_next; arptfree(ola, &farg); @@ -761,10 +774,11 @@ arp_timeout(thread_call_param_t arg0, thread_call_param_t arg1) atv.tv_sec = MAX(arpt_prune, 5); /* re-arm the timer if there's work to do */ arp_timeout_run = 0; - if (farg.aging > 0) + if (farg.aging > 0) { arp_sched_timeout(&atv); - else if (arp_verbose) + } else if (arp_verbose) { log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__); + } lck_mtx_unlock(rnh_lock); } @@ -814,7 +828,7 @@ arp_probe(thread_call_param_t arg0, thread_call_param_t arg1) lck_mtx_lock(rnh_lock); la = llinfo_arp.lh_first; - bzero(&farg, sizeof (farg)); + bzero(&farg, sizeof(farg)); farg.probing = TRUE; while ((ola = la) != NULL) { la = la->la_le.le_next; @@ -830,10 +844,11 @@ arp_probe(thread_call_param_t arg0, thread_call_param_t arg1) atv.tv_sec = MAX(arpt_probe, ARP_PROBE_TIME); /* re-arm the probe if there's work to do */ arp_probe_run = 0; - if (farg.qlen > 0) + if (farg.qlen > 0) { arp_sched_probe(&atv); - else if (arp_verbose) + } else if (arp_verbose) { log(LOG_DEBUG, "%s: not rescheduling probe\n", __func__); + } lck_mtx_unlock(rnh_lock); } @@ -880,7 +895,7 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) struct sockaddr *gate = rt->rt_gateway; struct llinfo_arp *la = rt->rt_llinfo; static struct sockaddr_dl null_sdl = - { .sdl_len = sizeof (null_sdl), .sdl_family = AF_LINK }; + { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK }; uint64_t timenow; char buf[MAX_IPv4_STR_LEN]; @@ -888,8 +903,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (rt->rt_flags & RTF_GATEWAY) + if (rt->rt_flags & RTF_GATEWAY) { return; + } timenow = net_uptime(); switch (req) { @@ -900,8 +916,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * restore cloning bit. */ if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != NULL && - SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) + SIN(rt_mask(rt))->sin_addr.s_addr != INADDR_BROADCAST) { rt->rt_flags |= RTF_CLONING; + } if (rt->rt_flags & RTF_CLONING) { /* @@ -921,33 +938,34 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) } /* Announce a new entry if requested. */ if (rt->rt_flags & RTF_ANNOUNCE) { - if (la != NULL) + if (la != NULL) { arp_llreach_use(la); /* Mark use timestamp */ + } RT_UNLOCK(rt); dlil_send_arp(rt->rt_ifp, ARPOP_REQUEST, SDL(gate), rt_key(rt), NULL, rt_key(rt), 0); RT_LOCK(rt); arpstat.txannounces++; } - /* FALLTHRU */ + /* FALLTHRU */ case RTM_RESOLVE: if (gate->sa_family != AF_LINK || - gate->sa_len < sizeof (null_sdl)) { + gate->sa_len < sizeof(null_sdl)) { arpstat.invalidreqs++; log(LOG_ERR, "%s: route to %s has bad gateway address " "(sa_family %u sa_len %u) on %s\n", __func__, inet_ntop(AF_INET, &SIN(rt_key(rt))->sin_addr.s_addr, buf, - sizeof (buf)), gate->sa_family, gate->sa_len, + sizeof(buf)), gate->sa_family, gate->sa_len, if_name(rt->rt_ifp)); break; } SDL(gate)->sdl_type = rt->rt_ifp->if_type; SDL(gate)->sdl_index = rt->rt_ifp->if_index; - if (la != NULL) + if (la != NULL) { break; /* This happens on a route change */ - + } /* * Case 2: This route may come from cloning, or a manual route * add with a LL address. @@ -957,10 +975,10 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) arpstat.reqnobufs++; break; } - rt->rt_llinfo_get_ri = arp_llinfo_get_ri; - rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri; - rt->rt_llinfo_purge = arp_llinfo_purge; - rt->rt_llinfo_free = arp_llinfo_free; + rt->rt_llinfo_get_ri = arp_llinfo_get_ri; + rt->rt_llinfo_get_iflri = arp_llinfo_get_iflri; + rt->rt_llinfo_purge = arp_llinfo_purge; + rt->rt_llinfo_free = arp_llinfo_free; rt->rt_llinfo_refresh = arp_llinfo_refresh; rt->rt_flags |= RTF_LLINFO; la->la_rt = rt; @@ -980,7 +998,7 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) if (IN_MULTICAST(ntohl(SIN(rt_key(rt))->sin_addr.s_addr))) { RT_UNLOCK(rt); dlil_resolve_multi(rt->rt_ifp, rt_key(rt), gate, - sizeof (struct sockaddr_dl)); + sizeof(struct sockaddr_dl)); RT_LOCK(rt); rt_setexpire(rt, 0); } else if (in_broadcast(SIN(rt_key(rt))->sin_addr, @@ -988,11 +1006,11 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) struct sockaddr_dl *gate_ll = SDL(gate); size_t broadcast_len; ifnet_llbroadcast_copy_bytes(rt->rt_ifp, - LLADDR(gate_ll), sizeof (gate_ll->sdl_data), + LLADDR(gate_ll), sizeof(gate_ll->sdl_data), &broadcast_len); gate_ll->sdl_alen = broadcast_len; gate_ll->sdl_family = AF_LINK; - gate_ll->sdl_len = sizeof (struct sockaddr_dl); + gate_ll->sdl_len = sizeof(struct sockaddr_dl); /* In case we're called before 1.0 sec. has elapsed */ rt_setexpire(rt, MAX(timenow, 1)); } else if (IN_LINKLOCAL(ntohl(SIN(rt_key(rt))-> @@ -1028,8 +1046,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } /* * Adjust route ref count for the @@ -1045,8 +1064,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * If rmx_mtu is not locked, update it * to the MTU used by the new interface. */ - if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) + if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) { rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; + } } } else { IFA_UNLOCK(rt->rt_ifa); @@ -1054,8 +1074,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) break; case RTM_DELETE: - if (la == NULL) + if (la == NULL) { break; + } /* * Unchain it but defer the actual freeing until the route * itself is to be freed. rt->rt_llinfo still points to @@ -1070,8 +1091,9 @@ arp_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } rt->rt_flags &= ~RTF_LLINFO; (void) arp_llinfo_flushq(la); @@ -1099,7 +1121,7 @@ sdl_addr_to_hex(const struct sockaddr_dl *sdl, char *orig_buf, int buflen) *buf = (i == maxbytes - 1) ? '\0' : ':'; buf++; } - return (orig_buf); + return orig_buf; } /* @@ -1114,9 +1136,9 @@ arp_lookup_route(const struct in_addr *addr, int create, int proxy, route_t *route, unsigned int ifscope) { struct sockaddr_inarp sin = - { sizeof (sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 }; + { sizeof(sin), AF_INET, 0, { 0 }, { 0 }, 0, 0 }; const char *why = NULL; - errno_t error = 0; + errno_t error = 0; route_t rt; *route = NULL; @@ -1128,12 +1150,14 @@ arp_lookup_route(const struct in_addr *addr, int create, int proxy, * If the destination is a link-local address, don't * constrain the lookup (don't scope it). */ - if (IN_LINKLOCAL(ntohl(addr->s_addr))) + if (IN_LINKLOCAL(ntohl(addr->s_addr))) { ifscope = IFSCOPE_NONE; + } rt = rtalloc1_scoped((struct sockaddr *)&sin, create, 0, ifscope); - if (rt == NULL) - return (ENETUNREACH); + if (rt == NULL) { + return ENETUNREACH; + } RT_LOCK(rt); @@ -1153,7 +1177,7 @@ arp_lookup_route(const struct in_addr *addr, int create, int proxy, char tmp[MAX_IPv4_STR_LEN]; log(LOG_DEBUG, "%s: link#%d %s failed: %s\n", __func__, ifscope, inet_ntop(AF_INET, addr, tmp, - sizeof (tmp)), why); + sizeof(tmp)), why); } /* @@ -1179,27 +1203,28 @@ arp_lookup_route(const struct in_addr *addr, int create, int proxy, RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); } - return (error); + return error; } /* * Caller releases reference and does RT_UNLOCK(rt). */ *route = rt; - return (0); + return 0; } boolean_t -arp_is_entry_probing (route_t p_route) +arp_is_entry_probing(route_t p_route) { struct llinfo_arp *llinfo = p_route->rt_llinfo; if (llinfo != NULL && llinfo->la_llreach != NULL && - llinfo->la_llreach->lr_probes != 0) - return (TRUE); + llinfo->la_llreach->lr_probes != 0) { + return TRUE; + } - return (FALSE); + return FALSE; } /* @@ -1210,11 +1235,11 @@ arp_is_entry_probing (route_t p_route) */ errno_t arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, - struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint, + struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint, mbuf_t packet) { - route_t route = NULL; /* output route */ - errno_t result = 0; + route_t route = NULL; /* output route */ + errno_t result = 0; struct sockaddr_dl *gateway; struct llinfo_arp *llinfo = NULL; boolean_t usable, probing = FALSE; @@ -1226,14 +1251,17 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, struct sockaddr_dl sdl; boolean_t send_probe_notif = FALSE; - if (ifp == NULL || net_dest == NULL) - return (EINVAL); + if (ifp == NULL || net_dest == NULL) { + return EINVAL; + } - if (net_dest->sin_family != AF_INET) - return (EAFNOSUPPORT); + if (net_dest->sin_family != AF_INET) { + return EAFNOSUPPORT; + } - if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) - return (ENETDOWN); + if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) { + return ENETDOWN; + } /* * If we were given a route, verify the route and grab the gateway @@ -1245,10 +1273,12 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, */ result = route_to_gwroute((const struct sockaddr *) net_dest, hint, &route); - if (result != 0) - return (result); - if (route != NULL) + if (result != 0) { + return result; + } + if (route != NULL) { RT_LOCK_ASSERT_HELD(route); + } } if ((packet != NULL && (packet->m_flags & M_BCAST)) || @@ -1261,20 +1291,22 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, if (result == 0) { ll_dest->sdl_alen = broadcast_len; ll_dest->sdl_family = AF_LINK; - ll_dest->sdl_len = sizeof (struct sockaddr_dl); + ll_dest->sdl_len = sizeof(struct sockaddr_dl); } goto release; } if ((packet != NULL && (packet->m_flags & M_MCAST)) || ((ifp->if_flags & IFF_MULTICAST) && IN_MULTICAST(ntohl(net_dest->sin_addr.s_addr)))) { - if (route != NULL) + if (route != NULL) { RT_UNLOCK(route); + } result = dlil_resolve_multi(ifp, (const struct sockaddr *)net_dest, (struct sockaddr *)ll_dest, ll_dest_len); - if (route != NULL) + if (route != NULL) { RT_LOCK(route); + } goto release; } @@ -1300,20 +1332,22 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, */ result = arp_lookup_route(&net_dest->sin_addr, 1, 0, &route, ifp->if_index); - if (result == 0) + if (result == 0) { RT_LOCK_ASSERT_HELD(route); + } } if (result || route == NULL || (llinfo = route->rt_llinfo) == NULL) { /* In case result is 0 but no route, return an error */ - if (result == 0) + if (result == 0) { result = EHOSTUNREACH; + } if (route != NULL && route->rt_llinfo == NULL) { char tmp[MAX_IPv4_STR_LEN]; log(LOG_ERR, "%s: can't allocate llinfo for %s\n", __func__, inet_ntop(AF_INET, &net_dest->sin_addr, - tmp, sizeof (tmp))); + tmp, sizeof(tmp))); } goto release; } @@ -1336,11 +1370,12 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, /* Entry is usable, so fill in info for caller */ bcopy(gateway, ll_dest, MIN(gateway->sdl_len, ll_dest_len)); result = 0; - arp_llreach_use(llinfo); /* Mark use timestamp */ + arp_llreach_use(llinfo); /* Mark use timestamp */ lr = llinfo->la_llreach; - if (lr == NULL) + if (lr == NULL) { goto release; + } rt_ifa = route->rt_ifa; /* Become a regular mutex, just in case */ @@ -1375,7 +1410,7 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, * we'll broadcast ARP next time around. */ lr->lr_probes++; - bzero(&sdl, sizeof (sdl)); + bzero(&sdl, sizeof(sdl)); sdl.sdl_alen = ifp->if_addrlen; bcopy(&lr->lr_key.addr, LLADDR(&sdl), ifp->if_addrlen); @@ -1419,10 +1454,11 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, * caller free the packet instead. It's safe to do that since * we still hold the route's rt_lock. */ - if (packet != NULL) + if (packet != NULL) { arp_llinfo_addq(llinfo, packet); - else + } else { llinfo->la_prbreq_cnt++; + } /* * Regardless of permanent vs. expirable entry, we need to * avoid having packets sit in la_holdq forever; thus mark the @@ -1490,7 +1526,7 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, ev_msg.dv[0].data_ptr = &in_arpfailure; ev_msg.dv[0].data_length = sizeof(struct - kev_in_arpfailure); + kev_in_arpfailure); dlil_post_complete_msg(NULL, &ev_msg); } result = EJUSTRETURN; @@ -1531,8 +1567,9 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, result = EJUSTRETURN; release: - if (result == EHOSTUNREACH) + if (result == EHOSTUNREACH) { atomic_add_32(&arpstat.dropped, 1); + } if (route != NULL) { if (send_probe_notif) { @@ -1551,9 +1588,10 @@ release: lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET]; - if (rnh != NULL) + if (rnh != NULL) { (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); + } lck_mtx_unlock(rnh_lock); RT_LOCK(route); } @@ -1573,7 +1611,7 @@ release: arp_sched_probe(NULL); lck_mtx_unlock(rnh_lock); } - return (result); + return result; } errno_t @@ -1588,10 +1626,10 @@ arp_ip_handle_input(ifnet_t ifp, u_short arpop, struct in_ifaddr *ia; struct in_ifaddr *best_ia = NULL; struct sockaddr_in best_ia_sin; - route_t route = NULL; + route_t route = NULL; char buf[3 * MAX_HW_LEN]; /* enough for MAX_HW_LEN byte hw address */ struct llinfo_arp *llinfo; - errno_t error; + errno_t error; int created_announcement = 0; int bridged = 0, is_bridge = 0; uint32_t rt_evcode = 0; @@ -1604,16 +1642,20 @@ arp_ip_handle_input(ifnet_t ifp, u_short arpop, arpstat.received++; /* Do not respond to requests for 0.0.0.0 */ - if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) + if (target_ip->sin_addr.s_addr == INADDR_ANY && arpop == ARPOP_REQUEST) { goto done; + } - if (ifp->if_bridge) + if (ifp->if_bridge) { bridged = 1; - if (ifp->if_type == IFT_BRIDGE) + } + if (ifp->if_type == IFT_BRIDGE) { is_bridge = 1; + } - if (arpop == ARPOP_REPLY) + if (arpop == ARPOP_REPLY) { arpstat.rxreplies++; + } /* * Determine if this ARP is for us @@ -1651,8 +1693,8 @@ arp_ip_handle_input(ifnet_t ifp, u_short arpop, IFA_UNLOCK(&ia->ia_ifa); } -#define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \ - (ia->ia_ifp->if_bridge == ifp->if_softc && \ +#define BDG_MEMBER_MATCHES_ARP(addr, ifp, ia) \ + (ia->ia_ifp->if_bridge == ifp->if_softc && \ bcmp(IF_LLADDR(ia->ia_ifp), IF_LLADDR(ifp), ifp->if_addrlen) == 0 && \ addr == ia->ia_addr.sin_addr.s_addr) /* @@ -1706,30 +1748,32 @@ arp_ip_handle_input(ifnet_t ifp, u_short arpop, * If we're not a bridge member, or if we are but there's no * IPv4 address to use for the interface, drop the packet. */ - if (!bridged || best_ia == NULL) + if (!bridged || best_ia == NULL) { goto done; + } match: /* If the packet is from this interface, ignore the packet */ if (bcmp(CONST_LLADDR(sender_hw), IF_LLADDR(ifp), - sender_hw->sdl_alen) == 0) + sender_hw->sdl_alen) == 0) { goto done; + } /* Check for a conflict */ if (!bridged && sender_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr) { struct kev_msg ev_msg; - struct kev_in_collision *in_collision; - u_char storage[sizeof (struct kev_in_collision) + MAX_HW_LEN]; + struct kev_in_collision *in_collision; + u_char storage[sizeof(struct kev_in_collision) + MAX_HW_LEN]; - bzero(&ev_msg, sizeof (struct kev_msg)); - bzero(storage, (sizeof (struct kev_in_collision) + MAX_HW_LEN)); + bzero(&ev_msg, sizeof(struct kev_msg)); + bzero(storage, (sizeof(struct kev_in_collision) + MAX_HW_LEN)); in_collision = (struct kev_in_collision *)(void *)storage; log(LOG_ERR, "%s duplicate IP address %s sent from " "address %s\n", if_name(ifp), inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str, - sizeof (ipv4str)), sdl_addr_to_hex(sender_hw, buf, - sizeof (buf))); + sizeof(ipv4str)), sdl_addr_to_hex(sender_hw, buf, + sizeof(buf))); /* Send a kernel event so anyone can learn of the conflict */ in_collision->link_data.if_family = ifp->if_family; @@ -1747,7 +1791,7 @@ match: ev_msg.event_code = KEV_INET_ARPCOLLISION; ev_msg.dv[0].data_ptr = in_collision; ev_msg.dv[0].data_length = - sizeof (struct kev_in_collision) + in_collision->hw_len; + sizeof(struct kev_in_collision) + in_collision->hw_len; ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(NULL, &ev_msg); atomic_add_32(&arpstat.dupips, 1); @@ -1764,12 +1808,14 @@ match: (target_ip->sin_addr.s_addr == best_ia_sin.sin_addr.s_addr && sender_ip->sin_addr.s_addr != 0), 0, &route, ifp->if_index); - if (error == 0) + if (error == 0) { RT_LOCK_ASSERT_HELD(route); + } if (error || route == NULL || route->rt_gateway == NULL) { - if (arpop != ARPOP_REQUEST) + if (arpop != ARPOP_REQUEST) { goto respond; + } if (arp_sendllconflict && send_conflicting_probes != 0 && (ifp->if_eflags & IFEF_ARPLL) && @@ -1792,8 +1838,9 @@ match: &route, ifp->if_index); if (error != 0 || route == NULL || - route->rt_gateway == NULL) + route->rt_gateway == NULL) { goto respond; + } RT_LOCK_ASSERT_HELD(route); @@ -1812,19 +1859,20 @@ match: log(LOG_INFO, "arp: %s on %s sent " "probe for %s, already on %s\n", sdl_addr_to_hex(sender_hw, buf, - sizeof (buf)), if_name(ifp), + sizeof(buf)), if_name(ifp), inet_ntop(AF_INET, &target_ip->sin_addr, ipv4str, - sizeof (ipv4str)), + sizeof(ipv4str)), if_name(route->rt_ifp)); log(LOG_INFO, "arp: sending " "conflicting probe to %s on %s\n", sdl_addr_to_hex(sender_hw, buf, - sizeof (buf)), if_name(ifp)); + sizeof(buf)), if_name(ifp)); } /* Mark use timestamp */ - if (route->rt_llinfo != NULL) + if (route->rt_llinfo != NULL) { arp_llreach_use(route->rt_llinfo); + } /* We're done with the route */ RT_REMREF_LOCKED(route); RT_UNLOCK(route); @@ -1874,15 +1922,18 @@ match: error = arp_lookup_route(&sender_ip->sin_addr, 1, 0, &route, ifp->if_index); - if (error == 0) + if (error == 0) { RT_LOCK_ASSERT_HELD(route); + } if (error == 0 && route != NULL && - route->rt_gateway != NULL) + route->rt_gateway != NULL) { created_announcement = 1; + } } - if (created_announcement == 0) + if (created_announcement == 0) { goto respond; + } } else { goto respond; } @@ -1896,19 +1947,21 @@ match: if (!bridged && route->rt_ifp != ifp) { if (!IN_LINKLOCAL(ntohl(sender_ip->sin_addr.s_addr)) || !(ifp->if_eflags & IFEF_ARPLL)) { - if (arp_verbose || log_arp_warnings) + if (arp_verbose || log_arp_warnings) { log(LOG_ERR, "arp: %s is on %s but got " "reply from %s on %s\n", inet_ntop(AF_INET, &sender_ip->sin_addr, - ipv4str, sizeof (ipv4str)), + ipv4str, sizeof(ipv4str)), if_name(route->rt_ifp), sdl_addr_to_hex(sender_hw, buf, - sizeof (buf)), if_name(ifp)); + sizeof(buf)), if_name(ifp)); + } goto respond; } else { /* Don't change a permanent address */ - if (route->rt_expire == 0) + if (route->rt_expire == 0) { goto respond; + } /* * We're about to check and/or change the route's ifp @@ -1942,8 +1995,9 @@ match: /* * Purge any link-layer info caching. */ - if (route->rt_llinfo_purge != NULL) + if (route->rt_llinfo_purge != NULL) { route->rt_llinfo_purge(route); + } /* Adjust route ref count for the interfaces */ if (route->rt_if_ref_fn != NULL) { @@ -1972,8 +2026,9 @@ match: lck_mtx_unlock(rnh_lock); RT_LOCK(route); /* Don't bother if the route is down */ - if (!(route->rt_flags & RTF_UP)) + if (!(route->rt_flags & RTF_UP)) { goto respond; + } /* Refresh gateway pointer */ gateway = SDL(route->rt_gateway); } @@ -1987,18 +2042,18 @@ match: char buf2[3 * MAX_HW_LEN]; log(LOG_INFO, "arp: %s moved from %s to %s on %s\n", inet_ntop(AF_INET, &sender_ip->sin_addr, ipv4str, - sizeof (ipv4str)), - sdl_addr_to_hex(gateway, buf, sizeof (buf)), - sdl_addr_to_hex(sender_hw, buf2, sizeof (buf2)), + sizeof(ipv4str)), + sdl_addr_to_hex(gateway, buf, sizeof(buf)), + sdl_addr_to_hex(sender_hw, buf2, sizeof(buf2)), if_name(ifp)); } else if (route->rt_expire == 0) { if (arp_verbose || log_arp_warnings) { log(LOG_ERR, "arp: %s attempts to modify " "permanent entry for %s on %s\n", sdl_addr_to_hex(sender_hw, buf, - sizeof (buf)), + sizeof(buf)), inet_ntop(AF_INET, &sender_ip->sin_addr, - ipv4str, sizeof (ipv4str)), + ipv4str, sizeof(ipv4str)), if_name(ifp)); } goto respond; @@ -2010,8 +2065,9 @@ match: bcopy(CONST_LLADDR(sender_hw), LLADDR(gateway), gateway->sdl_alen); /* Update the expire time for the route and clear the reject flag */ - if (route->rt_expire != 0) + if (route->rt_expire != 0) { rt_setexpire(route, net_uptime() + arpt_keep); + } route->rt_flags &= ~RTF_REJECT; /* cache the gateway (sender HW) address */ @@ -2065,9 +2121,10 @@ match: lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET]; - if (rnh != NULL) + if (rnh != NULL) { (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); + } lck_mtx_unlock(rnh_lock); RT_LOCK(route); } @@ -2093,15 +2150,17 @@ match: respond: if (route != NULL) { /* Mark use timestamp if we're going to send a reply */ - if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) + if (arpop == ARPOP_REQUEST && route->rt_llinfo != NULL) { arp_llreach_use(route->rt_llinfo); + } RT_REMREF_LOCKED(route); RT_UNLOCK(route); route = NULL; } - if (arpop != ARPOP_REQUEST) + if (arpop != ARPOP_REQUEST) { goto done; + } /* See comments at the beginning of this routine */ arpstat.rxrequests++; @@ -2139,8 +2198,9 @@ respond: * use proxy. If we aren't supposed to proxy all, * we are done. */ - if (!arp_proxyall) + if (!arp_proxyall) { goto done; + } /* * See if we have a route to the target ip before @@ -2148,8 +2208,9 @@ respond: */ route = rtalloc1_scoped((struct sockaddr *) (size_t)target_ip, 0, 0, ifp->if_index); - if (!route) + if (!route) { goto done; + } /* * Don't proxy for hosts already on the same interface. @@ -2162,8 +2223,9 @@ respond: } } /* Mark use timestamp */ - if (route->rt_llinfo != NULL) + if (route->rt_llinfo != NULL) { arp_llreach_use(route->rt_llinfo); + } RT_REMREF_LOCKED(route); RT_UNLOCK(route); } @@ -2173,9 +2235,10 @@ respond: sender_hw, (const struct sockaddr *)sender_ip, 0); done: - if (best_ia != NULL) + if (best_ia != NULL) { IFA_REMREF(&best_ia->ia_ifa); - return (0); + } + return 0; } void @@ -2195,8 +2258,9 @@ static int arp_getstat SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct arpstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct arpstat); + } - return (SYSCTL_OUT(req, &arpstat, MIN(sizeof (arpstat), req->oldlen))); + return SYSCTL_OUT(req, &arpstat, MIN(sizeof(arpstat), req->oldlen)); } diff --git a/bsd/netinet/in_arp.h b/bsd/netinet/in_arp.h index 51310486a..cc1d08d77 100644 --- a/bsd/netinet/in_arp.h +++ b/bsd/netinet/in_arp.h @@ -27,7 +27,7 @@ */ #ifndef _NETINET_IN_ARP_H_ -#define _NETINET_IN_ARP_H_ +#define _NETINET_IN_ARP_H_ #ifdef KERNEL #include @@ -61,7 +61,7 @@ struct sockaddr_in; * the packet. */ #ifdef BSD_KERNEL_PRIVATE -extern boolean_t arp_is_entry_probing (route_t p_route); +extern boolean_t arp_is_entry_probing(route_t p_route); extern errno_t arp_lookup_ip(ifnet_t interface, const struct sockaddr_in *ip_dest, struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint, mbuf_t packet); @@ -124,7 +124,7 @@ extern errno_t inet_arp_handle_input(ifnet_t ifp, u_int16_t arpop, */ #ifdef BSD_KERNEL_PRIVATE /* inet_arp_init_ifaddr is aliased to arp_ifinit (if_ether.h) */ -#define inet_arp_init_ifaddr arp_ifinit +#define inet_arp_init_ifaddr arp_ifinit #else extern void inet_arp_init_ifaddr(ifnet_t interface, ifaddr_t ipaddr); #endif /* !BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/in_cksum.c b/bsd/netinet/in_cksum.c index 538794d8d..b4cd509ff 100644 --- a/bsd/netinet/in_cksum.c +++ b/bsd/netinet/in_cksum.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -66,7 +66,7 @@ #include #include #include -#define _IP_VHL +#define _IP_VHL #include #include @@ -76,22 +76,22 @@ * This routine is very heavily used in the network * code and should be modified for each CPU to be as fast as possible. */ -#define REDUCE16 { \ - q_util.q = sum; \ +#define REDUCE16 { \ + q_util.q = sum; \ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \ - sum = l_util.s[0] + l_util.s[1]; \ - ADDCARRY(sum); \ + sum = l_util.s[0] + l_util.s[1]; \ + ADDCARRY(sum); \ } union l_util { - uint16_t s[2]; - uint32_t l; + uint16_t s[2]; + uint32_t l; }; union q_util { - uint16_t s[4]; - uint32_t l[2]; - uint64_t q; + uint16_t s[4]; + uint32_t l[2]; + uint64_t q; }; extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t); @@ -102,7 +102,7 @@ extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t); uint16_t b_sum16(const void *buf, int len) { - return (os_cpu_in_cksum(buf, len, 0)); + return os_cpu_in_cksum(buf, len, 0); } uint16_t inet_cksum_simple(struct mbuf *, int); @@ -112,7 +112,7 @@ uint16_t inet_cksum_simple(struct mbuf *, int); uint16_t inet_cksum_simple(struct mbuf *m, int len) { - return (inet_cksum(m, 0, 0, len)); + return inet_cksum(m, 0, 0, len); } uint16_t @@ -121,19 +121,19 @@ in_addword(uint16_t a, uint16_t b) uint64_t sum = a + b; ADDCARRY(sum); - return (sum); + return sum; } uint16_t in_pseudo(uint32_t a, uint32_t b, uint32_t c) { - uint64_t sum; - union q_util q_util; - union l_util l_util; + uint64_t sum; + union q_util q_util; + union l_util l_util; - sum = (uint64_t)a + b + c; - REDUCE16; - return (sum); + sum = (uint64_t)a + b + c; + REDUCE16; + return sum; } uint16_t @@ -145,7 +145,7 @@ in_pseudo64(uint64_t a, uint64_t b, uint64_t c) sum = a + b + c; REDUCE16; - return (sum); + return sum; } /* @@ -154,7 +154,7 @@ in_pseudo64(uint64_t a, uint64_t b, uint64_t c) uint16_t in_cksum_hdr_opt(const struct ip *ip) { - return (~b_sum16(ip, (IP_VHL_HL(ip->ip_vhl) << 2)) & 0xffff); + return ~b_sum16(ip, (IP_VHL_HL(ip->ip_vhl) << 2)) & 0xffff; } /* @@ -175,11 +175,12 @@ ip_cksum_hdr_dir(struct mbuf *m, uint32_t hlen, int out) ipstat.ips_rcv_swcsum_bytes += hlen; } - if (hlen == sizeof (*ip) && - m->m_len >= sizeof (*ip) && IP_HDR_ALIGNED_P(ip)) - return (in_cksum_hdr(ip)); + if (hlen == sizeof(*ip) && + m->m_len >= sizeof(*ip) && IP_HDR_ALIGNED_P(ip)) { + return in_cksum_hdr(ip); + } - return (inet_cksum(m, 0, 0, hlen)); + return inet_cksum(m, 0, 0, hlen); } uint16_t @@ -196,11 +197,12 @@ ip_cksum_hdr_dir_buffer(const void *buffer, uint32_t hlen, uint32_t len, ipstat.ips_rcv_swcsum_bytes += hlen; } - if (hlen == sizeof (*ip) && - len >= sizeof (*ip) && IP_HDR_ALIGNED_P(ip)) - return (in_cksum_hdr(ip)); + if (hlen == sizeof(*ip) && + len >= sizeof(*ip) && IP_HDR_ALIGNED_P(ip)) { + return in_cksum_hdr(ip); + } - return (inet_cksum_buffer(buffer, 0, 0, hlen)); + return inet_cksum_buffer(buffer, 0, 0, hlen); } /* @@ -219,7 +221,7 @@ inet_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) /* include pseudo header checksum? */ if (nxt != 0) { struct ip *ip; - unsigned char buf[sizeof ((*ip))] __attribute__((aligned(8))); + unsigned char buf[sizeof((*ip))] __attribute__((aligned(8))); uint32_t mlen; /* @@ -229,7 +231,7 @@ inet_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) * the caller setting m_pkthdr.len correctly, if the mbuf is * a M_PKTHDR one. */ - if ((mlen = m_length2(m, NULL)) < sizeof (*ip)) { + if ((mlen = m_length2(m, NULL)) < sizeof(*ip)) { panic("%s: mbuf %p too short (%d) for IPv4 header", __func__, m, mlen); /* NOTREACHED */ @@ -240,9 +242,9 @@ inet_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) * aligned, copy it to a local buffer. Note here that we * expect the data pointer to point to the IP header. */ - if ((sizeof (*ip) > m->m_len) || + if ((sizeof(*ip) > m->m_len) || !IP_HDR_ALIGNED_P(mtod(m, caddr_t))) { - m_copydata(m, 0, sizeof (*ip), (caddr_t)buf); + m_copydata(m, 0, sizeof(*ip), (caddr_t)buf); ip = (struct ip *)(void *)buf; } else { ip = (struct ip *)(void *)(m->m_data); @@ -256,7 +258,7 @@ inet_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) ADDCARRY(sum); } - return (~sum & 0xffff); + return ~sum & 0xffff; } /* @@ -271,15 +273,16 @@ inet_cksum_buffer(const void *buffer, uint32_t nxt, uint32_t off, { uint32_t sum; - if (off >= len) + if (off >= len) { panic("%s: off (%d) >= len (%d)", __func__, off, len); + } sum = b_sum16(&((const uint8_t *)buffer)[off], len); /* include pseudo header checksum? */ if (nxt != 0) { const struct ip *ip; - unsigned char buf[sizeof ((*ip))] __attribute__((aligned(8))); + unsigned char buf[sizeof((*ip))] __attribute__((aligned(8))); /* * In case the IP header is not contiguous, or not 32-bit @@ -287,7 +290,7 @@ inet_cksum_buffer(const void *buffer, uint32_t nxt, uint32_t off, * expect the data pointer to point to the IP header. */ if (!IP_HDR_ALIGNED_P(buffer)) { - memcpy(buf, buffer, sizeof (*ip)); + memcpy(buf, buffer, sizeof(*ip)); ip = (const struct ip *)(const void *)buf; } else { ip = (const struct ip *)buffer; @@ -301,13 +304,13 @@ inet_cksum_buffer(const void *buffer, uint32_t nxt, uint32_t off, ADDCARRY(sum); } - return (~sum & 0xffff); + return ~sum & 0xffff; } #if DEBUG || DEVELOPMENT #include -#define CKSUM_ERR kprintf +#define CKSUM_ERR kprintf /* * The following routines implement the portable, reference implementation @@ -337,7 +340,7 @@ in_cksum_mbuf_ref(struct mbuf *m, int len, int off, uint32_t initial_sum) for (;;) { if (__improbable(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->m_len; if (mlen > off) { @@ -346,23 +349,26 @@ in_cksum_mbuf_ref(struct mbuf *m, int len, int off, uint32_t initial_sum) goto post_initial_offset; } off -= mlen; - if (len == 0) + if (len == 0) { break; + } m = m->m_next; } for (; len > 0; m = m->m_next) { if (__improbable(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->m_len; data = mtod(m, uint8_t *); post_initial_offset: - if (mlen == 0) + if (mlen == 0) { continue; - if (mlen > len) + } + if (mlen > len) { mlen = len; + } len -= mlen; partial = 0; @@ -399,9 +405,10 @@ post_initial_offset: data += 32; mlen -= 32; if (__improbable(partial & 0xc0000000)) { - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 24); + } sum += (partial >> 16); sum += (partial & 0xffff); partial = 0; @@ -448,8 +455,9 @@ post_initial_offset: started_on_odd = !started_on_odd; } - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 24); + } sum += (partial >> 16) + (partial & 0xffff); /* * Reduce sum to allow potential byte swap @@ -459,7 +467,7 @@ post_initial_offset: } final_acc = ((sum >> 16) & 0xffff) + (sum & 0xffff); final_acc = (final_acc >> 16) + (final_acc & 0xffff); - return (final_acc & 0xffff); + return final_acc & 0xffff; } #else /* __LP64__ */ @@ -483,7 +491,7 @@ in_cksum_mbuf_ref(struct mbuf *m, int len, int off, uint32_t initial_sum) for (;;) { if (__improbable(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->m_len; if (mlen > off) { @@ -492,23 +500,26 @@ in_cksum_mbuf_ref(struct mbuf *m, int len, int off, uint32_t initial_sum) goto post_initial_offset; } off -= mlen; - if (len == 0) + if (len == 0) { break; + } m = m->m_next; } for (; len > 0; m = m->m_next) { if (__improbable(m == NULL)) { CKSUM_ERR("%s: out of data\n", __func__); - return ((uint32_t)-1); + return (uint32_t)-1; } mlen = m->m_len; data = mtod(m, uint8_t *); post_initial_offset: - if (mlen == 0) + if (mlen == 0) { continue; - if (mlen > len) + } + if (mlen > len) { mlen = len; + } len -= mlen; partial = 0; @@ -525,8 +536,9 @@ post_initial_offset: } needs_swap = started_on_odd; if ((uintptr_t)data & 2) { - if (mlen < 2) + if (mlen < 2) { goto trailing_bytes; + } partial += *(uint16_t *)(void *)data; data += 2; mlen -= 2; @@ -553,9 +565,10 @@ post_initial_offset: data += 64; mlen -= 64; if (__improbable(partial & (3ULL << 62))) { - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 56); + } sum += (partial >> 32); sum += (partial & 0xffffffff); partial = 0; @@ -606,8 +619,9 @@ trailing_bytes: started_on_odd = !started_on_odd; } - if (needs_swap) + if (needs_swap) { partial = (partial << 8) + (partial >> 56); + } sum += (partial >> 32) + (partial & 0xffffffff); /* * Reduce sum to allow potential byte swap @@ -619,7 +633,7 @@ trailing_bytes: ((sum >> 16) & 0xffff) + (sum & 0xffff); final_acc = (final_acc >> 16) + (final_acc & 0xffff); final_acc = (final_acc >> 16) + (final_acc & 0xffff); - return (final_acc & 0xffff); + return final_acc & 0xffff; } #endif /* __LP64 */ #endif /* DEBUG || DEVELOPMENT */ diff --git a/bsd/netinet/in_gif.c b/bsd/netinet/in_gif.c index 86a2b9920..390bdf4e2 100644 --- a/bsd/netinet/in_gif.c +++ b/bsd/netinet/in_gif.c @@ -91,13 +91,13 @@ int ip_gif_ttl = GIF_TTL; SYSCTL_INT(_net_inet_ip, IPCTL_GIF_TTL, gifttl, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_gif_ttl, 0, ""); + &ip_gif_ttl, 0, ""); int in_gif_output( - struct ifnet *ifp, - int family, - struct mbuf *m, + struct ifnet *ifp, + int family, + struct mbuf *m, __unused struct rtentry *rt) { struct gif_softc *sc = ifnet_softc(ifp); @@ -107,7 +107,7 @@ in_gif_output( (void *)sc->gif_psrc; struct sockaddr_in *sin_dst = (struct sockaddr_in *) (void *)sc->gif_pdst; - struct ip iphdr; /* capsule IP header, host byte ordered */ + struct ip iphdr; /* capsule IP header, host byte ordered */ int proto, error; u_int8_t tos; struct ip_out_args ipoa; @@ -124,77 +124,81 @@ in_gif_output( sin_src->sin_family != AF_INET || sin_dst->sin_family != AF_INET) { m_freem(m); - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } switch (family) { #if INET case AF_INET: - { + { struct ip *ip; proto = IPPROTO_IPV4; - if (mbuf_len(m) < sizeof (*ip)) { - m = m_pullup(m, sizeof (*ip)); - if (!m) - return (ENOBUFS); + if (mbuf_len(m) < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) { + return ENOBUFS; + } } ip = mtod(m, struct ip *); tos = ip->ip_tos; break; - } + } #endif /* INET */ #if INET6 case AF_INET6: - { + { struct ip6_hdr *ip6; proto = IPPROTO_IPV6; - if (mbuf_len(m) < sizeof (*ip6)) { - m = m_pullup(m, sizeof (*ip6)); - if (!m) - return (ENOBUFS); + if (mbuf_len(m) < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) { + return ENOBUFS; + } } ip6 = mtod(m, struct ip6_hdr *); tos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; break; - } + } #endif /* INET6 */ default: #if DEBUG printf("in_gif_output: warning: unknown family %d passed\n", - family); + family); #endif m_freem(m); - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } - bzero(&iphdr, sizeof (iphdr)); + bzero(&iphdr, sizeof(iphdr)); iphdr.ip_src = sin_src->sin_addr; /* bidirectional configured tunnel mode */ - if (sin_dst->sin_addr.s_addr != INADDR_ANY) + if (sin_dst->sin_addr.s_addr != INADDR_ANY) { iphdr.ip_dst = sin_dst->sin_addr; - else { + } else { m_freem(m); - return (ENETUNREACH); + return ENETUNREACH; } iphdr.ip_p = proto; /* version will be set in ip_output() */ iphdr.ip_ttl = ip_gif_ttl; - iphdr.ip_len = m->m_pkthdr.len + sizeof (struct ip); - if (ifp->if_flags & IFF_LINK1) + iphdr.ip_len = m->m_pkthdr.len + sizeof(struct ip); + if (ifp->if_flags & IFF_LINK1) { ip_ecn_ingress(ECN_NORMAL, &iphdr.ip_tos, &tos); - else + } else { ip_ecn_ingress(ECN_NOCARE, &iphdr.ip_tos, &tos); + } /* prepend new IP header */ - M_PREPEND(m, sizeof (struct ip), M_DONTWAIT, 0); - if (m && mbuf_len(m) < sizeof (struct ip)) - m = m_pullup(m, sizeof (struct ip)); + M_PREPEND(m, sizeof(struct ip), M_DONTWAIT, 0); + if (m && mbuf_len(m) < sizeof(struct ip)) { + m = m_pullup(m, sizeof(struct ip)); + } if (m == NULL) { printf("ENOBUFS in in_gif_output %d\n", __LINE__); - return (ENOBUFS); + return ENOBUFS; } - bcopy(&iphdr, mtod(m, struct ip *), sizeof (struct ip)); + bcopy(&iphdr, mtod(m, struct ip *), sizeof(struct ip)); if (ROUTE_UNUSABLE(&sc->gif_ro) || dst->sin_family != sin_dst->sin_family || @@ -202,7 +206,7 @@ in_gif_output( (sc->gif_ro.ro_rt != NULL && sc->gif_ro.ro_rt->rt_ifp == ifp)) { /* cache route doesn't match or recursive route */ dst->sin_family = sin_dst->sin_family; - dst->sin_len = sizeof (struct sockaddr_in); + dst->sin_len = sizeof(struct sockaddr_in); dst->sin_addr = sin_dst->sin_addr; ROUTE_RELEASE(&sc->gif_ro); #if 0 @@ -214,7 +218,7 @@ in_gif_output( rtalloc(&sc->gif_ro); if (sc->gif_ro.ro_rt == NULL) { m_freem(m); - return (ENETUNREACH); + return ENETUNREACH; } /* if it constitutes infinite encapsulation, punt. */ @@ -222,18 +226,18 @@ in_gif_output( if (sc->gif_ro.ro_rt->rt_ifp == ifp) { RT_UNLOCK(sc->gif_ro.ro_rt); m_freem(m); - return (ENETUNREACH); /* XXX */ + return ENETUNREACH; /* XXX */ } #if 0 ifp->if_mtu = sc->gif_ro.ro_rt->rt_ifp->if_mtu - - sizeof (struct ip); + - sizeof(struct ip); #endif RT_UNLOCK(sc->gif_ro.ro_rt); } error = ip_output(m, NULL, &sc->gif_ro, IP_OUTARGS, NULL, &ipoa); - return (error); + return error; } void @@ -264,50 +268,54 @@ in_gif_input(struct mbuf *m, int off) switch (proto) { #if INET case IPPROTO_IPV4: - { + { af = AF_INET; - if (mbuf_len(m) < sizeof (*ip)) { - m = m_pullup(m, sizeof (*ip)); - if (!m) + if (mbuf_len(m) < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) { return; + } } ip = mtod(m, struct ip *); if (gifp->if_flags & IFF_LINK1) { old_tos = ip->ip_tos; egress_success = ip_ecn_egress(ECN_NORMAL, &otos, &ip->ip_tos); if (old_tos != ip->ip_tos) { - sum = ~ntohs(ip->ip_sum) & 0xffff; - sum += (~otos & 0xffff) + ip->ip_tos; - sum = (sum >> 16) + (sum & 0xffff); - sum += (sum >> 16); /* add carry */ - ip->ip_sum = htons(~sum & 0xffff); + sum = ~ntohs(ip->ip_sum) & 0xffff; + sum += (~otos & 0xffff) + ip->ip_tos; + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); /* add carry */ + ip->ip_sum = htons(~sum & 0xffff); } - } else + } else { egress_success = ip_ecn_egress(ECN_NOCARE, &otos, &ip->ip_tos); + } break; - } + } #endif #if INET6 case IPPROTO_IPV6: - { + { struct ip6_hdr *ip6; u_int8_t itos; af = AF_INET6; - if (mbuf_len(m) < sizeof (*ip6)) { - m = m_pullup(m, sizeof (*ip6)); - if (!m) + if (mbuf_len(m) < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) { return; + } } ip6 = mtod(m, struct ip6_hdr *); itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; - if (gifp->if_flags & IFF_LINK1) + if (gifp->if_flags & IFF_LINK1) { egress_success = ip_ecn_egress(ECN_NORMAL, &otos, &itos); - else + } else { egress_success = ip_ecn_egress(ECN_NOCARE, &otos, &itos); + } ip6->ip6_flow &= ~htonl(0xff << 20); ip6->ip6_flow |= htonl((u_int32_t)itos << 20); break; - } + } #endif /* INET6 */ default: OSAddAtomic(1, &ipstat.ips_nogif); @@ -323,8 +331,9 @@ in_gif_input(struct mbuf *m, int off) #ifdef __APPLE__ /* Replace the rcvif by gifp for dlil to route it correctly */ - if (m->m_pkthdr.rcvif) + if (m->m_pkthdr.rcvif) { m->m_pkthdr.rcvif = gifp; + } ifnet_input(gifp, m, NULL); #else gif_input(m, af, gifp); @@ -355,35 +364,40 @@ gif_encapcheck4( GIF_LOCK_ASSERT(sc); - mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof (ip), &ip); + mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof(ip), &ip); /* check for address match */ addrmatch = 0; - if (src->sin_addr.s_addr == ip.ip_dst.s_addr) + if (src->sin_addr.s_addr == ip.ip_dst.s_addr) { addrmatch |= 1; - if (dst->sin_addr.s_addr == ip.ip_src.s_addr) + } + if (dst->sin_addr.s_addr == ip.ip_src.s_addr) { addrmatch |= 2; - if (addrmatch != 3) - return (0); + } + if (addrmatch != 3) { + return 0; + } /* martian filters on outer source - NOT done in ip_input! */ - if (IN_MULTICAST(ntohl(ip.ip_src.s_addr))) - return (0); + if (IN_MULTICAST(ntohl(ip.ip_src.s_addr))) { + return 0; + } switch ((ntohl(ip.ip_src.s_addr) & 0xff000000) >> 24) { case 0: case 127: case 255: - return (0); + return 0; } /* reject packets with broadcast on source */ lck_rw_lock_shared(in_ifaddr_rwlock); for (ia4 = TAILQ_FIRST(&in_ifaddrhead); ia4; ia4 = TAILQ_NEXT(ia4, ia_link)) { - if ((ifnet_flags(ia4->ia_ifa.ifa_ifp) & IFF_BROADCAST) == 0) + if ((ifnet_flags(ia4->ia_ifa.ifa_ifp) & IFF_BROADCAST) == 0) { continue; + } IFA_LOCK(&ia4->ia_ifa); if (ip.ip_src.s_addr == ia4->ia_broadaddr.sin_addr.s_addr) { IFA_UNLOCK(&ia4->ia_ifa); lck_rw_done(in_ifaddr_rwlock); - return (0); + return 0; } IFA_UNLOCK(&ia4->ia_ifa); } @@ -395,24 +409,25 @@ gif_encapcheck4( struct sockaddr_in sin; struct rtentry *rt; - bzero(&sin, sizeof (sin)); + bzero(&sin, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_len = sizeof (struct sockaddr_in); + sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr = ip.ip_src; rt = rtalloc1_scoped((struct sockaddr *)&sin, 0, 0, m->m_pkthdr.rcvif->if_index); - if (rt != NULL) + if (rt != NULL) { RT_LOCK(rt); + } if (rt == NULL || rt->rt_ifp != m->m_pkthdr.rcvif) { if (rt != NULL) { RT_UNLOCK(rt); rtfree(rt); } - return (0); + return 0; } RT_UNLOCK(rt); rtfree(rt); } - return (32 * 2); + return 32 * 2; } diff --git a/bsd/netinet/in_gif.h b/bsd/netinet/in_gif.h index 69247eed8..de4a30469 100644 --- a/bsd/netinet/in_gif.h +++ b/bsd/netinet/in_gif.h @@ -57,11 +57,11 @@ */ #ifndef _NETINET_IN_GIF_H_ -#define _NETINET_IN_GIF_H_ +#define _NETINET_IN_GIF_H_ #include #ifdef BSD_KERNEL_PRIVATE -#define GIF_TTL 30 +#define GIF_TTL 30 extern int ip_gif_ttl; diff --git a/bsd/netinet/in_mcast.c b/bsd/netinet/in_mcast.c index d772cc806..03532eaa3 100644 --- a/bsd/netinet/in_mcast.c +++ b/bsd/netinet/in_mcast.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -106,43 +106,43 @@ * XXX: Both carp and pf need to use the legacy (*,G) KPIs in_addmulti() * and in_delmulti(). */ -static void imf_commit(struct in_mfilter *); -static int imf_get_source(struct in_mfilter *imf, - const struct sockaddr_in *psin, - struct in_msource **); +static void imf_commit(struct in_mfilter *); +static int imf_get_source(struct in_mfilter *imf, + const struct sockaddr_in *psin, + struct in_msource **); static struct in_msource * - imf_graft(struct in_mfilter *, const uint8_t, - const struct sockaddr_in *); -static int imf_prune(struct in_mfilter *, const struct sockaddr_in *); -static void imf_rollback(struct in_mfilter *); -static void imf_reap(struct in_mfilter *); -static int imo_grow(struct ip_moptions *, size_t); -static size_t imo_match_group(const struct ip_moptions *, - const struct ifnet *, const struct sockaddr_in *); +imf_graft(struct in_mfilter *, const uint8_t, + const struct sockaddr_in *); +static int imf_prune(struct in_mfilter *, const struct sockaddr_in *); +static void imf_rollback(struct in_mfilter *); +static void imf_reap(struct in_mfilter *); +static int imo_grow(struct ip_moptions *, size_t); +static size_t imo_match_group(const struct ip_moptions *, + const struct ifnet *, const struct sockaddr_in *); static struct in_msource * - imo_match_source(const struct ip_moptions *, const size_t, - const struct sockaddr_in *); -static void ims_merge(struct ip_msource *ims, - const struct in_msource *lims, const int rollback); -static int in_getmulti(struct ifnet *, const struct in_addr *, - struct in_multi **); -static int in_joingroup(struct ifnet *, const struct in_addr *, - struct in_mfilter *, struct in_multi **); -static int inm_get_source(struct in_multi *inm, const in_addr_t haddr, - const int noalloc, struct ip_msource **pims); -static int inm_is_ifp_detached(const struct in_multi *); -static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *); -static void inm_reap(struct in_multi *); +imo_match_source(const struct ip_moptions *, const size_t, + const struct sockaddr_in *); +static void ims_merge(struct ip_msource *ims, + const struct in_msource *lims, const int rollback); +static int in_getmulti(struct ifnet *, const struct in_addr *, + struct in_multi **); +static int in_joingroup(struct ifnet *, const struct in_addr *, + struct in_mfilter *, struct in_multi **); +static int inm_get_source(struct in_multi *inm, const in_addr_t haddr, + const int noalloc, struct ip_msource **pims); +static int inm_is_ifp_detached(const struct in_multi *); +static int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *); +static void inm_reap(struct in_multi *); static struct ip_moptions * - inp_findmoptions(struct inpcb *); -static int inp_get_source_filters(struct inpcb *, struct sockopt *); +inp_findmoptions(struct inpcb *); +static int inp_get_source_filters(struct inpcb *, struct sockopt *); static struct ifnet * - inp_lookup_mcast_ifp(const struct inpcb *, - const struct sockaddr_in *, const struct in_addr); -static int inp_block_unblock_source(struct inpcb *, struct sockopt *); -static int inp_set_multicast_if(struct inpcb *, struct sockopt *); -static int inp_set_source_filters(struct inpcb *, struct sockopt *); -static int sysctl_ip_mcast_filters SYSCTL_HANDLER_ARGS; +inp_lookup_mcast_ifp(const struct inpcb *, + const struct sockaddr_in *, const struct in_addr); +static int inp_block_unblock_source(struct inpcb *, struct sockopt *); +static int inp_set_multicast_if(struct inpcb *, struct sockopt *); +static int inp_set_source_filters(struct inpcb *, struct sockopt *); +static int sysctl_ip_mcast_filters SYSCTL_HANDLER_ARGS; static struct ifnet * ip_multicast_if(struct in_addr *, unsigned int *); static __inline__ int ip_msource_cmp(const struct ip_msource *, const struct ip_msource *); @@ -155,11 +155,11 @@ SYSCTL_LONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc, static u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER; SYSCTL_LONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc, - CTLFLAG_RW | CTLFLAG_LOCKED, &in_mcast_maxsocksrc, + CTLFLAG_RW | CTLFLAG_LOCKED, &in_mcast_maxsocksrc, "Max source filters per socket"); int in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP; -SYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_LOCKED, +SYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_LOCKED, &in_mcast_loop, 0, "Loopback multicast datagrams by default"); SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters, @@ -168,20 +168,20 @@ SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters, RB_GENERATE_PREV(ip_msource_tree, ip_msource, ims_link, ip_msource_cmp); -#define INM_TRACE_HIST_SIZE 32 /* size of trace history */ +#define INM_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int inm_trace_hist_size = INM_TRACE_HIST_SIZE; struct in_multi_dbg { - struct in_multi inm; /* in_multi */ - u_int16_t inm_refhold_cnt; /* # of ref */ - u_int16_t inm_refrele_cnt; /* # of rele */ + struct in_multi inm; /* in_multi */ + u_int16_t inm_refhold_cnt; /* # of ref */ + u_int16_t inm_refrele_cnt; /* # of rele */ /* * Circular lists of inm_addref and inm_remref callers. */ - ctrace_t inm_refhold[INM_TRACE_HIST_SIZE]; - ctrace_t inm_refrele[INM_TRACE_HIST_SIZE]; + ctrace_t inm_refhold[INM_TRACE_HIST_SIZE]; + ctrace_t inm_refrele[INM_TRACE_HIST_SIZE]; /* * Trash list linkage */ @@ -192,33 +192,33 @@ struct in_multi_dbg { static TAILQ_HEAD(, in_multi_dbg) inm_trash_head; static decl_lck_mtx_data(, inm_trash_lock); -#define INM_ZONE_MAX 64 /* maximum elements in zone */ -#define INM_ZONE_NAME "in_multi" /* zone name */ +#define INM_ZONE_MAX 64 /* maximum elements in zone */ +#define INM_ZONE_NAME "in_multi" /* zone name */ #if DEBUG -static unsigned int inm_debug = 1; /* debugging (enabled) */ +static unsigned int inm_debug = 1; /* debugging (enabled) */ #else -static unsigned int inm_debug; /* debugging (disabled) */ +static unsigned int inm_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int inm_size; /* size of zone element */ -static struct zone *inm_zone; /* zone for in_multi */ +static unsigned int inm_size; /* size of zone element */ +static struct zone *inm_zone; /* zone for in_multi */ -#define IPMS_ZONE_MAX 64 /* maximum elements in zone */ -#define IPMS_ZONE_NAME "ip_msource" /* zone name */ +#define IPMS_ZONE_MAX 64 /* maximum elements in zone */ +#define IPMS_ZONE_NAME "ip_msource" /* zone name */ -static unsigned int ipms_size; /* size of zone element */ -static struct zone *ipms_zone; /* zone for ip_msource */ +static unsigned int ipms_size; /* size of zone element */ +static struct zone *ipms_zone; /* zone for ip_msource */ -#define INMS_ZONE_MAX 64 /* maximum elements in zone */ -#define INMS_ZONE_NAME "in_msource" /* zone name */ +#define INMS_ZONE_MAX 64 /* maximum elements in zone */ +#define INMS_ZONE_NAME "in_msource" /* zone name */ -static unsigned int inms_size; /* size of zone element */ -static struct zone *inms_zone; /* zone for in_msource */ +static unsigned int inms_size; /* size of zone element */ +static struct zone *inms_zone; /* zone for in_msource */ /* Lock group and attribute for in_multihead_lock lock */ -static lck_attr_t *in_multihead_lock_attr; -static lck_grp_t *in_multihead_lock_grp; -static lck_grp_attr_t *in_multihead_lock_grp_attr; +static lck_attr_t *in_multihead_lock_attr; +static lck_grp_t *in_multihead_lock_grp; +static lck_grp_attr_t *in_multihead_lock_grp_attr; static decl_lck_rw_data(, in_multihead_lock); struct in_multihead in_multihead; @@ -236,12 +236,13 @@ static void inms_free(struct in_msource *); static __inline int ip_msource_cmp(const struct ip_msource *a, const struct ip_msource *b) { - - if (a->ims_haddr < b->ims_haddr) - return (-1); - if (a->ims_haddr == b->ims_haddr) - return (0); - return (1); + if (a->ims_haddr < b->ims_haddr) { + return -1; + } + if (a->ims_haddr == b->ims_haddr) { + return 0; + } + return 1; } /* @@ -253,7 +254,7 @@ inm_is_ifp_detached(const struct in_multi *inm) VERIFY(inm->inm_ifma != NULL); VERIFY(inm->inm_ifp == inm->inm_ifma->ifma_ifp); - return (!ifnet_is_attached(inm->inm_ifp, 0)); + return !ifnet_is_attached(inm->inm_ifp, 0); } /* @@ -275,12 +276,12 @@ imf_init(struct in_mfilter *imf, const int st0, const int st1) static int imo_grow(struct ip_moptions *imo, size_t newmax) { - struct in_multi **nmships; - struct in_multi **omships; - struct in_mfilter *nmfilters; - struct in_mfilter *omfilters; - size_t idx; - size_t oldmax; + struct in_multi **nmships; + struct in_multi **omships; + struct in_mfilter *nmfilters; + struct in_mfilter *omfilters; + size_t idx; + size_t oldmax; IMO_LOCK_ASSERT_HELD(imo); @@ -289,33 +290,38 @@ imo_grow(struct ip_moptions *imo, size_t newmax) omships = imo->imo_membership; omfilters = imo->imo_mfilters; oldmax = imo->imo_max_memberships; - if (newmax == 0) + if (newmax == 0) { newmax = ((oldmax + 1) * 2) - 1; + } - if (newmax > IP_MAX_MEMBERSHIPS) - return (ETOOMANYREFS); + if (newmax > IP_MAX_MEMBERSHIPS) { + return ETOOMANYREFS; + } if ((nmships = (struct in_multi **)_REALLOC(omships, - sizeof (struct in_multi *) * newmax, M_IPMOPTS, - M_WAITOK | M_ZERO)) == NULL) - return (ENOMEM); + sizeof(struct in_multi *) * newmax, M_IPMOPTS, + M_WAITOK | M_ZERO)) == NULL) { + return ENOMEM; + } imo->imo_membership = nmships; if ((nmfilters = (struct in_mfilter *)_REALLOC(omfilters, - sizeof (struct in_mfilter) * newmax, M_INMFILTER, - M_WAITOK | M_ZERO)) == NULL) - return (ENOMEM); + sizeof(struct in_mfilter) * newmax, M_INMFILTER, + M_WAITOK | M_ZERO)) == NULL) { + return ENOMEM; + } imo->imo_mfilters = nmfilters; /* Initialize newly allocated source filter heads. */ - for (idx = oldmax; idx < newmax; idx++) + for (idx = oldmax; idx < newmax; idx++) { imf_init(&nmfilters[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); + } imo->imo_max_memberships = newmax; - return (0); + return 0; } /* @@ -327,22 +333,24 @@ static size_t imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp, const struct sockaddr_in *group) { - struct in_multi *pinm; - int idx; - int nmships; + struct in_multi *pinm; + int idx; + int nmships; IMO_LOCK_ASSERT_HELD(__DECONST(struct ip_moptions *, imo)); /* The imo_membership array may be lazy allocated. */ - if (imo->imo_membership == NULL || imo->imo_num_memberships == 0) - return (-1); + if (imo->imo_membership == NULL || imo->imo_num_memberships == 0) { + return -1; + } nmships = imo->imo_num_memberships; for (idx = 0; idx < nmships; idx++) { pinm = imo->imo_membership[idx]; - if (pinm == NULL) + if (pinm == NULL) { continue; + } INM_LOCK(pinm); if ((ifp == NULL || (pinm->inm_ifp == ifp)) && in_hosteq(pinm->inm_addr, group->sin_addr)) { @@ -351,10 +359,11 @@ imo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp, } INM_UNLOCK(pinm); } - if (idx >= nmships) + if (idx >= nmships) { idx = -1; + } - return (idx); + return idx; } /* @@ -368,9 +377,9 @@ static struct in_msource * imo_match_source(const struct ip_moptions *imo, const size_t gidx, const struct sockaddr_in *src) { - struct ip_msource find; - struct in_mfilter *imf; - struct ip_msource *ims; + struct ip_msource find; + struct in_mfilter *imf; + struct ip_msource *ims; IMO_LOCK_ASSERT_HELD(__DECONST(struct ip_moptions *, imo)); @@ -378,15 +387,16 @@ imo_match_source(const struct ip_moptions *imo, const size_t gidx, VERIFY(gidx != (size_t)-1 && gidx < imo->imo_num_memberships); /* The imo_mfilters array may be lazy allocated. */ - if (imo->imo_mfilters == NULL) - return (NULL); + if (imo->imo_mfilters == NULL) { + return NULL; + } imf = &imo->imo_mfilters[gidx]; /* Source trees are keyed in host byte order. */ find.ims_haddr = ntohl(src->sin_addr.s_addr); ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); - return ((struct in_msource *)ims); + return (struct in_msource *)ims; } /* @@ -407,8 +417,9 @@ imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp, VERIFY(ifp != NULL); gidx = imo_match_group(imo, ifp, group); - if (gidx == (size_t)-1) - return (MCAST_NOTGMEMBER); + if (gidx == (size_t)-1) { + return MCAST_NOTGMEMBER; + } /* * Check if the source was included in an (S,G) join. @@ -424,10 +435,10 @@ imo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp, if ((ims == NULL && mode == MCAST_INCLUDE) || (ims != NULL && ims->imsl_st[0] != mode)) { - return (MCAST_NOTSMEMBER); + return MCAST_NOTSMEMBER; } - return (MCAST_PASS); + return MCAST_PASS; } int @@ -438,22 +449,23 @@ imo_clone(struct inpcb *from_inp, struct inpcb *to_inp) struct ip_moptions *to; from = inp_findmoptions(from_inp); - if (from == NULL) - return (ENOMEM); + if (from == NULL) { + return ENOMEM; + } to = inp_findmoptions(to_inp); if (to == NULL) { IMO_REMREF(from); - return (ENOMEM); + return ENOMEM; } IMO_LOCK(from); IMO_LOCK(to); - to->imo_multicast_ifp = from->imo_multicast_ifp; - to->imo_multicast_vif = from->imo_multicast_vif; - to->imo_multicast_ttl = from->imo_multicast_ttl; - to->imo_multicast_loop = from->imo_multicast_loop; + to->imo_multicast_ifp = from->imo_multicast_ifp; + to->imo_multicast_vif = from->imo_multicast_vif; + to->imo_multicast_ttl = from->imo_multicast_ttl; + to->imo_multicast_loop = from->imo_multicast_loop; /* * We're cloning, so drop any existing memberships and source @@ -463,13 +475,15 @@ imo_clone(struct inpcb *from_inp, struct inpcb *to_inp) struct in_mfilter *imf; imf = to->imo_mfilters ? &to->imo_mfilters[i] : NULL; - if (imf != NULL) + if (imf != NULL) { imf_leave(imf); + } (void) in_leavegroup(to->imo_membership[i], imf); - if (imf != NULL) + if (imf != NULL) { imf_purge(imf); + } INM_REMREF(to->imo_membership[i]); to->imo_membership[i] = NULL; @@ -483,8 +497,9 @@ imo_clone(struct inpcb *from_inp, struct inpcb *to_inp) * and source filters arrays are at least equal in size. */ err = imo_grow(to, from->imo_max_memberships); - if (err != 0) + if (err != 0) { goto done; + } } VERIFY(to->imo_max_memberships >= from->imo_max_memberships); @@ -493,13 +508,14 @@ imo_clone(struct inpcb *from_inp, struct inpcb *to_inp) * so simply hold additional reference count per membership. */ for (i = 0; i < from->imo_num_memberships; i++) { - to->imo_membership[i] = - in_addmulti(&from->imo_membership[i]->inm_addr, - from->imo_membership[i]->inm_ifp); - if (to->imo_membership[i] == NULL) + to->imo_membership[i] = + in_addmulti(&from->imo_membership[i]->inm_addr, + from->imo_membership[i]->inm_ifp); + if (to->imo_membership[i] == NULL) { break; + } to->imo_num_memberships++; - } + } VERIFY(to->imo_num_memberships == from->imo_num_memberships); done: @@ -508,7 +524,7 @@ done: IMO_UNLOCK(from); IMO_REMREF(from); - return (err); + return err; } /* @@ -522,10 +538,10 @@ static int in_getmulti(struct ifnet *ifp, const struct in_addr *group, struct in_multi **pinm) { - struct sockaddr_in gsin; - struct ifmultiaddr *ifma; - struct in_multi *inm; - int error; + struct sockaddr_in gsin; + struct ifmultiaddr *ifma; + struct in_multi *inm; + int error; in_multihead_lock_shared(); IN_LOOKUP_MULTI(group, ifp, inm); @@ -541,7 +557,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, * We already joined this group; return the inm * with a refcount held (via lookup) for caller. */ - return (0); + return 0; } in_multihead_lock_done(); @@ -555,8 +571,9 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, * with this network-layer group on the given ifnet. */ error = if_addmulti(ifp, (struct sockaddr *)&gsin, &ifma); - if (error != 0) - return (error); + if (error != 0) { + return error; + } /* * See comments in inm_remref() for access to ifma_protospec. @@ -566,7 +583,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, if ((inm = ifma->ifma_protospec) != NULL) { VERIFY(ifma->ifma_addr != NULL); VERIFY(ifma->ifma_addr->sa_family == AF_INET); - INM_ADDREF(inm); /* for caller */ + INM_ADDREF(inm); /* for caller */ IFMA_UNLOCK(ifma); INM_LOCK(inm); VERIFY(inm->inm_ifma == ifma); @@ -586,7 +603,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, * been joined; return the inm with a refcount * held for caller. */ - return (0); + return 0; } /* * We lost the race with another thread doing in_delmulti(); @@ -601,7 +618,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, INM_UNLOCK(inm); in_multihead_lock_done(); IFMA_REMREF(ifma); - return (0); + return 0; } IFMA_UNLOCK(ifma); @@ -616,7 +633,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, if (inm == NULL) { in_multihead_lock_done(); IFMA_REMREF(ifma); - return (ENOMEM); + return ENOMEM; } INM_LOCK(inm); inm->inm_addr = *group; @@ -624,7 +641,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, inm->inm_igi = IGMP_IFINFO(ifp); VERIFY(inm->inm_igi != NULL); IGI_ADDREF(inm->inm_igi); - inm->inm_ifma = ifma; /* keep refcount from if_addmulti() */ + inm->inm_ifma = ifma; /* keep refcount from if_addmulti() */ inm->inm_state = IGMP_NOT_MEMBER; /* * Pending state-changes per group are subject to a bounds check. @@ -636,7 +653,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, *pinm = inm; in_multi_attach(inm); VERIFY((inm->inm_debug & (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED); - INM_ADDREF_LOCKED(inm); /* for caller */ + INM_ADDREF_LOCKED(inm); /* for caller */ INM_UNLOCK(inm); IFMA_LOCK(ifma); @@ -645,7 +662,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, IFMA_UNLOCK(ifma); in_multihead_lock_done(); - return (0); + return 0; } /* @@ -656,7 +673,7 @@ in_getmulti(struct ifnet *ifp, const struct in_addr *group, void inm_clear_recorded(struct in_multi *inm) { - struct ip_msource *ims; + struct ip_msource *ims; INM_LOCK_ASSERT_HELD(inm); @@ -690,21 +707,24 @@ inm_clear_recorded(struct in_multi *inm) int inm_record_source(struct in_multi *inm, const in_addr_t naddr) { - struct ip_msource find; - struct ip_msource *ims, *nims; + struct ip_msource find; + struct ip_msource *ims, *nims; INM_LOCK_ASSERT_HELD(inm); find.ims_haddr = ntohl(naddr); ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); - if (ims && ims->ims_stp) - return (0); + if (ims && ims->ims_stp) { + return 0; + } if (ims == NULL) { - if (inm->inm_nsrc == in_mcast_maxgrpsrc) - return (-ENOSPC); + if (inm->inm_nsrc == in_mcast_maxgrpsrc) { + return -ENOSPC; + } nims = ipms_alloc(M_WAITOK); - if (nims == NULL) - return (-ENOMEM); + if (nims == NULL) { + return -ENOMEM; + } nims->ims_haddr = find.ims_haddr; RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); ++inm->inm_nsrc; @@ -718,7 +738,7 @@ inm_record_source(struct in_multi *inm, const in_addr_t naddr) ++ims->ims_stp; ++inm->inm_st[1].iss_rec; - return (1); + return 1; } /* @@ -736,10 +756,10 @@ static int imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, struct in_msource **plims) { - struct ip_msource find; - struct ip_msource *ims; - struct in_msource *lims; - int error; + struct ip_msource find; + struct ip_msource *ims; + struct in_msource *lims; + int error; error = 0; ims = NULL; @@ -750,11 +770,13 @@ imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); lims = (struct in_msource *)ims; if (lims == NULL) { - if (imf->imf_nsrc == in_mcast_maxsocksrc) - return (ENOSPC); + if (imf->imf_nsrc == in_mcast_maxsocksrc) { + return ENOSPC; + } lims = inms_alloc(M_WAITOK); - if (lims == NULL) - return (ENOMEM); + if (lims == NULL) { + return ENOMEM; + } lims->ims_haddr = find.ims_haddr; lims->imsl_st[0] = MCAST_UNDEFINED; RB_INSERT(ip_msource_tree, &imf->imf_sources, @@ -764,7 +786,7 @@ imf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, *plims = lims; - return (error); + return error; } /* @@ -781,11 +803,12 @@ static struct in_msource * imf_graft(struct in_mfilter *imf, const uint8_t st1, const struct sockaddr_in *psin) { - struct in_msource *lims; + struct in_msource *lims; lims = inms_alloc(M_WAITOK); - if (lims == NULL) - return (NULL); + if (lims == NULL) { + return NULL; + } lims->ims_haddr = ntohl(psin->sin_addr.s_addr); lims->imsl_st[0] = MCAST_UNDEFINED; lims->imsl_st[1] = st1; @@ -793,7 +816,7 @@ imf_graft(struct in_mfilter *imf, const uint8_t st1, (struct ip_msource *)lims); ++imf->imf_nsrc; - return (lims); + return lims; } /* @@ -809,18 +832,19 @@ imf_graft(struct in_mfilter *imf, const uint8_t st1, static int imf_prune(struct in_mfilter *imf, const struct sockaddr_in *psin) { - struct ip_msource find; - struct ip_msource *ims; - struct in_msource *lims; + struct ip_msource find; + struct ip_msource *ims; + struct in_msource *lims; /* key is host byte order */ find.ims_haddr = ntohl(psin->sin_addr.s_addr); ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); - if (ims == NULL) - return (ENOENT); + if (ims == NULL) { + return ENOENT; + } lims = (struct in_msource *)ims; lims->imsl_st[1] = MCAST_UNDEFINED; - return (0); + return 0; } /* @@ -831,8 +855,8 @@ imf_prune(struct in_mfilter *imf, const struct sockaddr_in *psin) static void imf_rollback(struct in_mfilter *imf) { - struct ip_msource *ims, *tims; - struct in_msource *lims; + struct ip_msource *ims, *tims; + struct in_msource *lims; RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { lims = (struct in_msource *)ims; @@ -862,8 +886,8 @@ imf_rollback(struct in_mfilter *imf) void imf_leave(struct in_mfilter *imf) { - struct ip_msource *ims; - struct in_msource *lims; + struct ip_msource *ims; + struct in_msource *lims; RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; @@ -880,8 +904,8 @@ imf_leave(struct in_mfilter *imf) static void imf_commit(struct in_mfilter *imf) { - struct ip_msource *ims; - struct in_msource *lims; + struct ip_msource *ims; + struct in_msource *lims; RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; @@ -898,8 +922,8 @@ imf_commit(struct in_mfilter *imf) static void imf_reap(struct in_mfilter *imf) { - struct ip_msource *ims, *tims; - struct in_msource *lims; + struct ip_msource *ims, *tims; + struct in_msource *lims; RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { lims = (struct in_msource *)ims; @@ -922,8 +946,8 @@ imf_reap(struct in_mfilter *imf) void imf_purge(struct in_mfilter *imf) { - struct ip_msource *ims, *tims; - struct in_msource *lims; + struct ip_msource *ims, *tims; + struct in_msource *lims; RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { lims = (struct in_msource *)ims; @@ -951,8 +975,8 @@ static int inm_get_source(struct in_multi *inm, const in_addr_t haddr, const int noalloc, struct ip_msource **pims) { - struct ip_msource find; - struct ip_msource *ims, *nims; + struct ip_msource find; + struct ip_msource *ims, *nims; #ifdef IGMP_DEBUG struct in_addr ia; char buf[MAX_IPv4_STR_LEN]; @@ -962,11 +986,13 @@ inm_get_source(struct in_multi *inm, const in_addr_t haddr, find.ims_haddr = haddr; ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); if (ims == NULL && !noalloc) { - if (inm->inm_nsrc == in_mcast_maxgrpsrc) - return (ENOSPC); + if (inm->inm_nsrc == in_mcast_maxgrpsrc) { + return ENOSPC; + } nims = ipms_alloc(M_WAITOK); - if (nims == NULL) - return (ENOMEM); + if (nims == NULL) { + return ENOMEM; + } nims->ims_haddr = haddr; RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); ++inm->inm_nsrc; @@ -980,7 +1006,7 @@ inm_get_source(struct in_multi *inm, const in_addr_t haddr, } *pims = ims; - return (0); + return 0; } /* @@ -999,11 +1025,12 @@ ims_get_mode(const struct in_multi *inm, const struct ip_msource *ims, t = !!t; if (inm->inm_st[t].iss_ex > 0 && - inm->inm_st[t].iss_ex == ims->ims_st[t].ex) - return (MCAST_EXCLUDE); - else if (ims->ims_st[t].in > 0 && ims->ims_st[t].ex == 0) - return (MCAST_INCLUDE); - return (MCAST_UNDEFINED); + inm->inm_st[t].iss_ex == ims->ims_st[t].ex) { + return MCAST_EXCLUDE; + } else if (ims->ims_st[t].in > 0 && ims->ims_st[t].ex == 0) { + return MCAST_INCLUDE; + } + return MCAST_UNDEFINED; } /* @@ -1063,10 +1090,10 @@ ims_merge(struct ip_msource *ims, const struct in_msource *lims, static int inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf) { - struct ip_msource *ims, *nims = NULL; - struct in_msource *lims; - int schanged, error; - int nsrc0, nsrc1; + struct ip_msource *ims, *nims = NULL; + struct in_msource *lims; + int schanged, error; + int nsrc0, nsrc1; INM_LOCK_ASSERT_HELD(inm); @@ -1083,13 +1110,20 @@ inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf) */ RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; - if (lims->imsl_st[0] == imf->imf_st[0]) nsrc0++; - if (lims->imsl_st[1] == imf->imf_st[1]) nsrc1++; - if (lims->imsl_st[0] == lims->imsl_st[1]) continue; + if (lims->imsl_st[0] == imf->imf_st[0]) { + nsrc0++; + } + if (lims->imsl_st[1] == imf->imf_st[1]) { + nsrc1++; + } + if (lims->imsl_st[0] == lims->imsl_st[1]) { + continue; + } error = inm_get_source(inm, lims->ims_haddr, 0, &nims); ++schanged; - if (error) + if (error) { break; + } ims_merge(nims, lims, 0); } if (error) { @@ -1097,11 +1131,13 @@ inm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf) RB_FOREACH_REVERSE_FROM(ims, ip_msource_tree, nims) { lims = (struct in_msource *)ims; - if (lims->imsl_st[0] == lims->imsl_st[1]) + if (lims->imsl_st[0] == lims->imsl_st[1]) { continue; + } (void) inm_get_source(inm, lims->ims_haddr, 1, &bims); - if (bims == NULL) + if (bims == NULL) { continue; + } ims_merge(bims, lims, 1); } goto out_reap; @@ -1185,7 +1221,7 @@ out_reap: IGMP_PRINTF(("%s: sources changed; reaping\n", __func__)); inm_reap(inm); } - return (error); + return error; } /* @@ -1195,7 +1231,7 @@ out_reap: void inm_commit(struct in_multi *inm) { - struct ip_msource *ims; + struct ip_msource *ims; INM_LOCK_ASSERT_HELD(inm); @@ -1216,15 +1252,16 @@ inm_commit(struct in_multi *inm) static void inm_reap(struct in_multi *inm) { - struct ip_msource *ims, *tims; + struct ip_msource *ims, *tims; INM_LOCK_ASSERT_HELD(inm); RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { if (ims->ims_st[0].ex > 0 || ims->ims_st[0].in > 0 || ims->ims_st[1].ex > 0 || ims->ims_st[1].in > 0 || - ims->ims_stp != 0) + ims->ims_stp != 0) { continue; + } IGMP_PRINTF(("%s: free ims 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ims))); RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); @@ -1239,7 +1276,7 @@ inm_reap(struct in_multi *inm) void inm_purge(struct in_multi *inm) { - struct ip_msource *ims, *tims; + struct ip_msource *ims, *tims; INM_LOCK_ASSERT_HELD(inm); @@ -1265,15 +1302,15 @@ static int in_joingroup(struct ifnet *ifp, const struct in_addr *gina, /*const*/ struct in_mfilter *imf, struct in_multi **pinm) { - struct in_mfilter timf; - struct in_multi *inm = NULL; - int error = 0; - struct igmp_tparams itp; + struct in_mfilter timf; + struct in_multi *inm = NULL; + int error = 0; + struct igmp_tparams itp; IGMP_INET_PRINTF(*gina, ("%s: join %s on 0x%llx(%s))\n", __func__, _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - bzero(&itp, sizeof (itp)); + bzero(&itp, sizeof(itp)); *pinm = NULL; /* @@ -1288,7 +1325,7 @@ in_joingroup(struct ifnet *ifp, const struct in_addr *gina, error = in_getmulti(ifp, gina, &inm); if (error) { IGMP_PRINTF(("%s: in_getmulti() failure\n", __func__)); - return (error); + return error; } IGMP_PRINTF(("%s: merge inm state\n", __func__)); @@ -1316,13 +1353,13 @@ out_inm_release: INM_REMREF(inm); } else { INM_UNLOCK(inm); - *pinm = inm; /* keep refcount from in_getmulti() */ + *pinm = inm; /* keep refcount from in_getmulti() */ } /* schedule timer now that we've dropped the lock(s) */ igmp_set_timeout(&itp); - return (error); + return error; } /* @@ -1337,17 +1374,17 @@ out_inm_release: int in_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf) { - struct in_mfilter timf; - int error, lastref; - struct igmp_tparams itp; + struct in_mfilter timf; + int error, lastref; + struct igmp_tparams itp; - bzero(&itp, sizeof (itp)); + bzero(&itp, sizeof(itp)); error = 0; INM_LOCK_ASSERT_NOTHELD(inm); - in_multihead_lock_exclusive(); - INM_LOCK(inm); + in_multihead_lock_exclusive(); + INM_LOCK(inm); IGMP_INET_PRINTF(inm->inm_addr, ("%s: leave inm 0x%llx, %s/%s%d, imf 0x%llx\n", __func__, @@ -1379,22 +1416,23 @@ in_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf) IGMP_PRINTF(("%s: doing igmp downcall\n", __func__)); error = igmp_change_state(inm, &itp); #if IGMP_DEBUG - if (error) + if (error) { IGMP_PRINTF(("%s: failed igmp downcall\n", __func__)); + } #endif - lastref = in_multi_detach(inm); - VERIFY(!lastref || (!(inm->inm_debug & IFD_ATTACHED) && - inm->inm_reqcnt == 0)); + lastref = in_multi_detach(inm); + VERIFY(!lastref || (!(inm->inm_debug & IFD_ATTACHED) && + inm->inm_reqcnt == 0)); INM_UNLOCK(inm); - in_multihead_lock_done(); - - if (lastref) - INM_REMREF(inm); /* for in_multihead list */ + in_multihead_lock_done(); + if (lastref) { + INM_REMREF(inm); /* for in_multihead list */ + } /* schedule timer now that we've dropped the lock(s) */ igmp_set_timeout(&itp); - return (error); + return error; } /* @@ -1414,7 +1452,7 @@ in_addmulti(struct in_addr *ap, struct ifnet *ifp) error = in_joingroup(ifp, ap, NULL, &pinm); VERIFY(pinm != NULL || error != 0); - return (pinm); + return pinm; } /* @@ -1424,7 +1462,6 @@ in_addmulti(struct in_addr *ap, struct ifnet *ifp) void in_delmulti(struct in_multi *inm) { - (void) in_leavegroup(inm, NULL); } @@ -1440,20 +1477,20 @@ in_delmulti(struct in_multi *inm) static int inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) { - struct group_source_req gsr; - struct sockaddr_in *gsa, *ssa; - struct ifnet *ifp; - struct in_mfilter *imf; - struct ip_moptions *imo; - struct in_msource *ims; - struct in_multi *inm; - size_t idx; - uint16_t fmode; - int error, doblock; - unsigned int ifindex = 0; - struct igmp_tparams itp; - - bzero(&itp, sizeof (itp)); + struct group_source_req gsr; + struct sockaddr_in *gsa, *ssa; + struct ifnet *ifp; + struct in_mfilter *imf; + struct ip_moptions *imo; + struct in_msource *ims; + struct in_multi *inm; + size_t idx; + uint16_t fmode; + int error, doblock; + unsigned int ifindex = 0; + struct igmp_tparams itp; + + bzero(&itp, sizeof(itp)); ifp = NULL; error = 0; doblock = 0; @@ -1465,13 +1502,14 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) switch (sopt->sopt_name) { case IP_BLOCK_SOURCE: case IP_UNBLOCK_SOURCE: { - struct ip_mreq_source mreqs; + struct ip_mreq_source mreqs; error = sooptcopyin(sopt, &mreqs, sizeof(struct ip_mreq_source), sizeof(struct ip_mreq_source)); - if (error) - return (error); + if (error) { + return error; + } gsa->sin_family = AF_INET; gsa->sin_len = sizeof(struct sockaddr_in); @@ -1481,66 +1519,75 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) ssa->sin_len = sizeof(struct sockaddr_in); ssa->sin_addr = mreqs.imr_sourceaddr; - if (!in_nullhost(mreqs.imr_interface)) + if (!in_nullhost(mreqs.imr_interface)) { ifp = ip_multicast_if(&mreqs.imr_interface, &ifindex); + } - if (sopt->sopt_name == IP_BLOCK_SOURCE) + if (sopt->sopt_name == IP_BLOCK_SOURCE) { doblock = 1; + } IGMP_INET_PRINTF(mreqs.imr_interface, ("%s: imr_interface = %s, ifp = 0x%llx\n", __func__, _igmp_inet_buf, (uint64_t)VM_KERNEL_ADDRPERM(ifp))); break; - } + } case MCAST_BLOCK_SOURCE: case MCAST_UNBLOCK_SOURCE: error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); - if (error) - return (error); + if (error) { + return error; + } if (gsa->sin_family != AF_INET || - gsa->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); + gsa->sin_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } if (ssa->sin_family != AF_INET || - ssa->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); + ssa->sin_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } ifnet_head_lock_shared(); if (gsr.gsr_interface == 0 || (u_int)if_index < gsr.gsr_interface) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[gsr.gsr_interface]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } - if (sopt->sopt_name == MCAST_BLOCK_SOURCE) + if (sopt->sopt_name == MCAST_BLOCK_SOURCE) { doblock = 1; + } break; default: IGMP_PRINTF(("%s: unknown sopt_name %d\n", __func__, sopt->sopt_name)); - return (EOPNOTSUPP); + return EOPNOTSUPP; } - if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) - return (EINVAL); + if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) { + return EINVAL; + } /* * Check if we are actually a member of this group. */ imo = inp_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IMO_LOCK(imo); idx = imo_match_group(imo, ifp, gsa); @@ -1584,8 +1631,9 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) if (doblock) { IGMP_PRINTF(("%s: %s source\n", __func__, "block")); ims = imf_graft(imf, fmode, ssa); - if (ims == NULL) + if (ims == NULL) { error = ENOMEM; + } } else { IGMP_PRINTF(("%s: %s source\n", __func__, "allow")); error = imf_prune(imf, ssa); @@ -1612,26 +1660,28 @@ inp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) error = igmp_change_state(inm, &itp); INM_UNLOCK(inm); #if IGMP_DEBUG - if (error) + if (error) { IGMP_PRINTF(("%s: failed igmp downcall\n", __func__)); + } #endif out_imf_rollback: - if (error) + if (error) { imf_rollback(imf); - else + } else { imf_commit(imf); + } imf_reap(imf); out_imo_locked: IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ igmp_set_timeout(&itp); - return (error); + return error; } /* @@ -1643,33 +1693,34 @@ out_imo_locked: static struct ip_moptions * inp_findmoptions(struct inpcb *inp) { - struct ip_moptions *imo; - struct in_multi **immp; - struct in_mfilter *imfp; - size_t idx; + struct ip_moptions *imo; + struct in_multi **immp; + struct in_mfilter *imfp; + size_t idx; if ((imo = inp->inp_moptions) != NULL) { - IMO_ADDREF(imo); /* for caller */ - return (imo); + IMO_ADDREF(imo); /* for caller */ + return imo; } imo = ip_allocmoptions(M_WAITOK); - if (imo == NULL) - return (NULL); + if (imo == NULL) { + return NULL; + } - immp = _MALLOC(sizeof (*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS, + immp = _MALLOC(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS, M_WAITOK | M_ZERO); if (immp == NULL) { IMO_REMREF(imo); - return (NULL); + return NULL; } - imfp = _MALLOC(sizeof (struct in_mfilter) * IP_MIN_MEMBERSHIPS, + imfp = _MALLOC(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS, M_INMFILTER, M_WAITOK | M_ZERO); if (imfp == NULL) { _FREE(immp, M_IPMOPTS); IMO_REMREF(imo); - return (NULL); + return NULL; } imo->imo_multicast_ifp = NULL; @@ -1682,14 +1733,15 @@ inp_findmoptions(struct inpcb *inp) imo->imo_membership = immp; /* Initialize per-group source filters. */ - for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++) + for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++) { imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); + } imo->imo_mfilters = imfp; inp->inp_moptions = imo; /* keep reference from ip_allocmoptions() */ - IMO_ADDREF(imo); /* for caller */ + IMO_ADDREF(imo); /* for caller */ - return (imo); + return imo; } /* * Atomically get source filters on a socket for an IPv4 multicast group. @@ -1697,20 +1749,20 @@ inp_findmoptions(struct inpcb *inp) static int inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) { - struct __msfilterreq64 msfr = {}, msfr64; - struct __msfilterreq32 msfr32; - struct sockaddr_in *gsa; - struct ifnet *ifp; - struct ip_moptions *imo; - struct in_mfilter *imf; - struct ip_msource *ims; - struct in_msource *lims; - struct sockaddr_in *psin; - struct sockaddr_storage *ptss; - struct sockaddr_storage *tss; - int error; - size_t idx, nsrcs, ncsrcs; - user_addr_t tmp_ptr; + struct __msfilterreq64 msfr = {}, msfr64; + struct __msfilterreq32 msfr32; + struct sockaddr_in *gsa; + struct ifnet *ifp; + struct ip_moptions *imo; + struct in_mfilter *imf; + struct ip_msource *ims; + struct in_msource *lims; + struct sockaddr_in *psin; + struct sockaddr_storage *ptss; + struct sockaddr_storage *tss; + int error; + size_t idx, nsrcs, ncsrcs; + user_addr_t tmp_ptr; imo = inp->inp_moptions; VERIFY(imo != NULL); @@ -1719,16 +1771,18 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) error = sooptcopyin(sopt, &msfr64, sizeof(struct __msfilterreq64), sizeof(struct __msfilterreq64)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr64, sizeof(msfr64)); } else { error = sooptcopyin(sopt, &msfr32, sizeof(struct __msfilterreq32), sizeof(struct __msfilterreq32)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr32, sizeof(msfr32)); } @@ -1736,21 +1790,24 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) ifnet_head_lock_shared(); if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[msfr.msfr_ifindex]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } if ((size_t) msfr.msfr_nsrcs > - UINT32_MAX / sizeof(struct sockaddr_storage)) + UINT32_MAX / sizeof(struct sockaddr_storage)) { msfr.msfr_nsrcs = UINT32_MAX / sizeof(struct sockaddr_storage); + } - if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) + if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) { msfr.msfr_nsrcs = in_mcast_maxsocksrc; + } IMO_LOCK(imo); /* @@ -1761,7 +1818,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) idx = imo_match_group(imo, ifp, gsa); if (idx == (size_t)-1 || imo->imo_mfilters == NULL) { IMO_UNLOCK(imo); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } imf = &imo->imo_mfilters[idx]; @@ -1770,7 +1827,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) */ if (imf->imf_st[1] == MCAST_UNDEFINED) { IMO_UNLOCK(imo); - return (EAGAIN); + return EAGAIN; } msfr.msfr_fmode = imf->imf_st[1]; @@ -1782,10 +1839,11 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) * buffer really needs to be. */ - if (IS_64BIT_PROCESS(current_proc())) + if (IS_64BIT_PROCESS(current_proc())) { tmp_ptr = msfr64.msfr_srcs; - else + } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); + } tss = NULL; if (tmp_ptr != USER_ADDR_NULL && msfr.msfr_nsrcs > 0) { @@ -1793,7 +1851,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) M_TEMP, M_WAITOK | M_ZERO); if (tss == NULL) { IMO_UNLOCK(imo); - return (ENOBUFS); + return ENOBUFS; } } @@ -1807,8 +1865,9 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { lims = (struct in_msource *)ims; if (lims->imsl_st[0] == MCAST_UNDEFINED || - lims->imsl_st[0] != imf->imf_st[0]) + lims->imsl_st[0] != imf->imf_st[0]) { continue; + } if (tss != NULL && nsrcs > 0) { psin = (struct sockaddr_in *)ptss; psin->sin_family = AF_INET; @@ -1826,8 +1885,9 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) if (tss != NULL) { error = copyout(tss, tmp_ptr, ncsrcs * sizeof(*tss)); FREE(tss, M_TEMP); - if (error) - return (error); + if (error) { + return error; + } } msfr.msfr_nsrcs = ncsrcs; @@ -1849,7 +1909,7 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) sizeof(struct __msfilterreq32)); } - return (error); + return error; } /* @@ -1858,13 +1918,13 @@ inp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) int inp_getmoptions(struct inpcb *inp, struct sockopt *sopt) { - struct ip_mreqn mreqn; - struct ip_moptions *imo; - struct ifnet *ifp; - struct in_ifaddr *ia; - int error, optval; - unsigned int ifindex; - u_char coptval; + struct ip_mreqn mreqn; + struct ip_moptions *imo; + struct ifnet *ifp; + struct in_ifaddr *ia; + int error, optval; + unsigned int ifindex; + u_char coptval; imo = inp->inp_moptions; /* @@ -1874,7 +1934,7 @@ inp_getmoptions(struct inpcb *inp, struct sockopt *sopt) if (SOCK_PROTO(inp->inp_socket) == IPPROTO_DIVERT || (SOCK_TYPE(inp->inp_socket) != SOCK_RAW && SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } error = 0; @@ -1909,44 +1969,48 @@ inp_getmoptions(struct inpcb *inp, struct sockopt *sopt) break; case IP_MULTICAST_IFINDEX: - if (imo != NULL) + if (imo != NULL) { IMO_LOCK(imo); + } if (imo == NULL || imo->imo_multicast_ifp == NULL) { ifindex = 0; } else { ifindex = imo->imo_multicast_ifp->if_index; } - if (imo != NULL) + if (imo != NULL) { IMO_UNLOCK(imo); - error = sooptcopyout(sopt, &ifindex, sizeof (ifindex)); + } + error = sooptcopyout(sopt, &ifindex, sizeof(ifindex)); break; case IP_MULTICAST_TTL: - if (imo == NULL) + if (imo == NULL) { optval = coptval = IP_DEFAULT_MULTICAST_TTL; - else { + } else { IMO_LOCK(imo); optval = coptval = imo->imo_multicast_ttl; IMO_UNLOCK(imo); } - if (sopt->sopt_valsize == sizeof(u_char)) + if (sopt->sopt_valsize == sizeof(u_char)) { error = sooptcopyout(sopt, &coptval, sizeof(u_char)); - else + } else { error = sooptcopyout(sopt, &optval, sizeof(int)); + } break; case IP_MULTICAST_LOOP: - if (imo == 0) + if (imo == 0) { optval = coptval = IP_DEFAULT_MULTICAST_LOOP; - else { + } else { IMO_LOCK(imo); optval = coptval = imo->imo_multicast_loop; IMO_UNLOCK(imo); } - if (sopt->sopt_valsize == sizeof(u_char)) + if (sopt->sopt_valsize == sizeof(u_char)) { error = sooptcopyout(sopt, &coptval, sizeof(u_char)); - else + } else { error = sooptcopyout(sopt, &optval, sizeof(int)); + } break; case IP_MSFILTER: @@ -1962,7 +2026,7 @@ inp_getmoptions(struct inpcb *inp, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -1990,8 +2054,8 @@ static struct ifnet * inp_lookup_mcast_ifp(const struct inpcb *inp, const struct sockaddr_in *gsin, const struct in_addr ina) { - struct ifnet *ifp; - unsigned int ifindex = 0; + struct ifnet *ifp; + unsigned int ifindex = 0; VERIFY(gsin->sin_family == AF_INET); VERIFY(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr))); @@ -2005,10 +2069,11 @@ inp_lookup_mcast_ifp(const struct inpcb *inp, struct route ro; unsigned int ifscope = IFSCOPE_NONE; - if (inp != NULL && (inp->inp_flags & INP_BOUND_IF)) + if (inp != NULL && (inp->inp_flags & INP_BOUND_IF)) { ifscope = inp->inp_boundifp->if_index; + } - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); memcpy(&ro.ro_dst, gsin, sizeof(struct sockaddr_in)); rtalloc_scoped_ign(&ro, 0, ifscope); if (ro.ro_rt != NULL) { @@ -2025,7 +2090,7 @@ inp_lookup_mcast_ifp(const struct inpcb *inp, mifp = ia->ia_ifp; IFA_UNLOCK(&ia->ia_ifa); if (!(mifp->if_flags & IFF_LOOPBACK) && - (mifp->if_flags & IFF_MULTICAST)) { + (mifp->if_flags & IFF_MULTICAST)) { ifp = mifp; break; } @@ -2035,7 +2100,7 @@ inp_lookup_mcast_ifp(const struct inpcb *inp, ROUTE_RELEASE(&ro); } - return (ifp); + return ifp; } /* @@ -2049,18 +2114,18 @@ inp_lookup_mcast_ifp(const struct inpcb *inp, int inp_join_group(struct inpcb *inp, struct sockopt *sopt) { - struct group_source_req gsr; - struct sockaddr_in *gsa, *ssa; - struct ifnet *ifp; - struct in_mfilter *imf; - struct ip_moptions *imo; - struct in_multi *inm = NULL; - struct in_msource *lims; - size_t idx; - int error, is_new; - struct igmp_tparams itp; - - bzero(&itp, sizeof (itp)); + struct group_source_req gsr; + struct sockaddr_in *gsa, *ssa; + struct ifnet *ifp; + struct in_mfilter *imf; + struct ip_moptions *imo; + struct in_multi *inm = NULL; + struct in_msource *lims; + size_t idx; + int error, is_new; + struct igmp_tparams itp; + + bzero(&itp, sizeof(itp)); ifp = NULL; imf = NULL; error = 0; @@ -2075,7 +2140,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) switch (sopt->sopt_name) { case IP_ADD_MEMBERSHIP: case IP_ADD_SOURCE_MEMBERSHIP: { - struct ip_mreq_source mreqs; + struct ip_mreq_source mreqs; if (sopt->sopt_name == IP_ADD_MEMBERSHIP) { error = sooptcopyin(sopt, &mreqs, @@ -2096,7 +2161,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) IGMP_PRINTF(("%s: error copyin IP_ADD_MEMBERSHIP/" "IP_ADD_SOURCE_MEMBERSHIP %d err=%d\n", __func__, sopt->sopt_name, error)); - return (error); + return error; } gsa->sin_family = AF_INET; @@ -2109,8 +2174,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) ssa->sin_addr = mreqs.imr_sourceaddr; } - if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) - return (EINVAL); + if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) { + return EINVAL; + } ifp = inp_lookup_mcast_ifp(inp, gsa, mreqs.imr_interface); IGMP_INET_PRINTF(mreqs.imr_interface, @@ -2130,12 +2196,14 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) sizeof(struct group_source_req), sizeof(struct group_source_req)); } - if (error) - return (error); + if (error) { + return error; + } if (gsa->sin_family != AF_INET || - gsa->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); + gsa->sin_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } /* * Overwrite the port field if present, as the sockaddr @@ -2144,19 +2212,21 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) gsa->sin_port = 0; if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { if (ssa->sin_family != AF_INET || - ssa->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); + ssa->sin_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } ssa->sin_port = 0; } - if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) - return (EINVAL); + if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) { + return EINVAL; + } ifnet_head_lock_shared(); if (gsr.gsr_interface == 0 || (u_int)if_index < gsr.gsr_interface) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[gsr.gsr_interface]; ifnet_head_done(); @@ -2166,11 +2236,12 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) default: IGMP_PRINTF(("%s: unknown sopt_name %d\n", __func__, sopt->sopt_name)); - return (EOPNOTSUPP); + return EOPNOTSUPP; } - if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) - return (EADDRNOTAVAIL); + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + return EADDRNOTAVAIL; + } INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_mcast_join_total); /* @@ -2181,8 +2252,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) } imo = inp_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IMO_LOCK(imo); idx = imo_match_group(imo, ifp, gsa); @@ -2219,7 +2291,7 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) */ lims = imo_match_source(imo, idx, ssa); if (lims != NULL /*&& - lims->imsl_st[1] == MCAST_INCLUDE*/) { + * lims->imsl_st[1] == MCAST_INCLUDE*/) { error = EADDRNOTAVAIL; goto out_imo_locked; } @@ -2240,8 +2312,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) */ error = EINVAL; /* See comments above for EADDRINUSE */ - if (imf->imf_st[1] == MCAST_EXCLUDE) + if (imf->imf_st[1] == MCAST_EXCLUDE) { error = EADDRINUSE; + } goto out_imo_locked; } } @@ -2253,8 +2326,9 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) if (is_new) { if (imo->imo_num_memberships == imo->imo_max_memberships) { error = imo_grow(imo, 0); - if (error) + if (error) { goto out_imo_locked; + } } /* * Allocate the new slot upfront so we can deal with @@ -2320,9 +2394,10 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) IMO_LOCK(imo); VERIFY(inm != NULL || error != 0); - if (error) + if (error) { goto out_imo_free; - imo->imo_membership[idx] = inm; /* from in_joingroup() */ + } + imo->imo_membership[idx] = inm; /* from in_joingroup() */ } else { IGMP_PRINTF(("%s: merge inm state\n", __func__)); INM_LOCK(inm); @@ -2346,10 +2421,11 @@ inp_join_group(struct inpcb *inp, struct sockopt *sopt) out_imf_rollback: if (error) { imf_rollback(imf); - if (is_new) + if (is_new) { imf_purge(imf); - else + } else { imf_reap(imf); + } } else { imf_commit(imf); } @@ -2363,12 +2439,12 @@ out_imo_free: out_imo_locked: IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ igmp_set_timeout(&itp); - return (error); + return error; } /* @@ -2380,20 +2456,20 @@ out_imo_locked: int inp_leave_group(struct inpcb *inp, struct sockopt *sopt) { - struct group_source_req gsr; - struct ip_mreq_source mreqs; - struct sockaddr_in *gsa, *ssa; - struct ifnet *ifp; - struct in_mfilter *imf; - struct ip_moptions *imo; - struct in_msource *ims; - struct in_multi *inm = NULL; - size_t idx; - int error, is_final; - unsigned int ifindex = 0; - struct igmp_tparams itp; - - bzero(&itp, sizeof (itp)); + struct group_source_req gsr; + struct ip_mreq_source mreqs; + struct sockaddr_in *gsa, *ssa; + struct ifnet *ifp; + struct in_mfilter *imf; + struct ip_moptions *imo; + struct in_msource *ims; + struct in_multi *inm = NULL; + size_t idx; + int error, is_final; + unsigned int ifindex = 0; + struct igmp_tparams itp; + + bzero(&itp, sizeof(itp)); ifp = NULL; error = 0; is_final = 1; @@ -2421,8 +2497,9 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt) sizeof(struct ip_mreq_source), sizeof(struct ip_mreq_source)); } - if (error) - return (error); + if (error) { + return error; + } gsa->sin_family = AF_INET; gsa->sin_len = sizeof(struct sockaddr_in); @@ -2440,8 +2517,9 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt) * XXX NOTE WELL: The RFC 3678 API is preferred because * using an IPv4 address as a key is racy. */ - if (!in_nullhost(mreqs.imr_interface)) + if (!in_nullhost(mreqs.imr_interface)) { ifp = ip_multicast_if(&mreqs.imr_interface, &ifindex); + } IGMP_INET_PRINTF(mreqs.imr_interface, ("%s: imr_interface = %s, ifp = 0x%llx\n", __func__, @@ -2460,24 +2538,27 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt) sizeof(struct group_source_req), sizeof(struct group_source_req)); } - if (error) - return (error); + if (error) { + return error; + } if (gsa->sin_family != AF_INET || - gsa->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); + gsa->sin_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { if (ssa->sin_family != AF_INET || - ssa->sin_len != sizeof(struct sockaddr_in)) - return (EINVAL); + ssa->sin_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } } ifnet_head_lock_shared(); if (gsr.gsr_interface == 0 || (u_int)if_index < gsr.gsr_interface) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[gsr.gsr_interface]; @@ -2487,18 +2568,20 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt) default: IGMP_PRINTF(("%s: unknown sopt_name %d\n", __func__, sopt->sopt_name)); - return (EOPNOTSUPP); + return EOPNOTSUPP; } - if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) - return (EINVAL); + if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) { + return EINVAL; + } /* * Find the membership in the membership array. */ imo = inp_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IMO_LOCK(imo); idx = imo_match_group(imo, ifp, gsa); @@ -2579,10 +2662,11 @@ inp_leave_group(struct inpcb *inp, struct sockopt *sopt) } out_imf_rollback: - if (error) + if (error) { imf_rollback(imf); - else + } else { imf_commit(imf); + } imf_reap(imf); @@ -2605,20 +2689,20 @@ out_imf_rollback: IMO_LOCK(imo); for (++idx; idx < imo->imo_num_memberships; ++idx) { - imo->imo_membership[idx-1] = imo->imo_membership[idx]; - imo->imo_mfilters[idx-1] = imo->imo_mfilters[idx]; + imo->imo_membership[idx - 1] = imo->imo_membership[idx]; + imo->imo_mfilters[idx - 1] = imo->imo_mfilters[idx]; } imo->imo_num_memberships--; } out_locked: IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ igmp_set_timeout(&itp); - return (error); + return error; } /* @@ -2632,12 +2716,12 @@ out_locked: static int inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) { - struct in_addr addr; - struct ip_mreqn mreqn; - struct ifnet *ifp; - struct ip_moptions *imo; - int error = 0 ; - unsigned int ifindex = 0; + struct in_addr addr; + struct ip_mreqn mreqn; + struct ifnet *ifp; + struct ip_moptions *imo; + int error = 0; + unsigned int ifindex = 0; bzero(&addr, sizeof(addr)); if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { @@ -2647,13 +2731,14 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) */ error = sooptcopyin(sopt, &mreqn, sizeof(struct ip_mreqn), sizeof(struct ip_mreqn)); - if (error) - return (error); + if (error) { + return error; + } ifnet_head_lock_shared(); if (mreqn.imr_ifindex < 0 || if_index < mreqn.imr_ifindex) { ifnet_head_done(); - return (EINVAL); + return EINVAL; } if (mreqn.imr_ifindex == 0) { @@ -2662,7 +2747,7 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) ifp = ifindex2ifnet[mreqn.imr_ifindex]; if (ifp == NULL) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } } ifnet_head_done(); @@ -2673,8 +2758,9 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) */ error = sooptcopyin(sopt, &addr, sizeof(struct in_addr), sizeof(struct in_addr)); - if (error) - return (error); + if (error) { + return error; + } if (in_nullhost(addr)) { ifp = NULL; } else { @@ -2683,7 +2769,7 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) IGMP_INET_PRINTF(addr, ("%s: can't find ifp for addr=%s\n", __func__, _igmp_inet_buf)); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } } /* XXX remove? */ @@ -2694,23 +2780,26 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) } /* Reject interfaces which do not support multicast. */ - if (ifp != NULL && (ifp->if_flags & IFF_MULTICAST) == 0) - return (EOPNOTSUPP); + if (ifp != NULL && (ifp->if_flags & IFF_MULTICAST) == 0) { + return EOPNOTSUPP; + } imo = inp_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IMO_LOCK(imo); imo->imo_multicast_ifp = ifp; - if (ifindex) + if (ifindex) { imo->imo_multicast_addr = addr; - else + } else { imo->imo_multicast_addr.s_addr = INADDR_ANY; + } IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ - return (0); + return 0; } /* @@ -2719,76 +2808,85 @@ inp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) static int inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) { - struct __msfilterreq64 msfr = {}, msfr64; - struct __msfilterreq32 msfr32; - struct sockaddr_in *gsa; - struct ifnet *ifp; - struct in_mfilter *imf; - struct ip_moptions *imo; - struct in_multi *inm; - size_t idx; - int error; - user_addr_t tmp_ptr; - struct igmp_tparams itp; - - bzero(&itp, sizeof (itp)); + struct __msfilterreq64 msfr = {}, msfr64; + struct __msfilterreq32 msfr32; + struct sockaddr_in *gsa; + struct ifnet *ifp; + struct in_mfilter *imf; + struct ip_moptions *imo; + struct in_multi *inm; + size_t idx; + int error; + user_addr_t tmp_ptr; + struct igmp_tparams itp; + + bzero(&itp, sizeof(itp)); if (IS_64BIT_PROCESS(current_proc())) { error = sooptcopyin(sopt, &msfr64, sizeof(struct __msfilterreq64), sizeof(struct __msfilterreq64)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr64, sizeof(msfr64)); } else { error = sooptcopyin(sopt, &msfr32, sizeof(struct __msfilterreq32), sizeof(struct __msfilterreq32)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr32, sizeof(msfr32)); } if ((size_t) msfr.msfr_nsrcs > - UINT32_MAX / sizeof(struct sockaddr_storage)) + UINT32_MAX / sizeof(struct sockaddr_storage)) { msfr.msfr_nsrcs = UINT32_MAX / sizeof(struct sockaddr_storage); + } - if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) - return (ENOBUFS); + if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) { + return ENOBUFS; + } if ((msfr.msfr_fmode != MCAST_EXCLUDE && - msfr.msfr_fmode != MCAST_INCLUDE)) - return (EINVAL); + msfr.msfr_fmode != MCAST_INCLUDE)) { + return EINVAL; + } if (msfr.msfr_group.ss_family != AF_INET || - msfr.msfr_group.ss_len != sizeof(struct sockaddr_in)) - return (EINVAL); + msfr.msfr_group.ss_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } gsa = (struct sockaddr_in *)&msfr.msfr_group; - if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) - return (EINVAL); + if (!IN_MULTICAST(ntohl(gsa->sin_addr.s_addr))) { + return EINVAL; + } - gsa->sin_port = 0; /* ignore port */ + gsa->sin_port = 0; /* ignore port */ ifnet_head_lock_shared(); if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[msfr.msfr_ifindex]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } /* * Check if this socket is a member of this group. */ imo = inp_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IMO_LOCK(imo); idx = imo_match_group(imo, ifp, gsa); @@ -2812,15 +2910,16 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * allows us to deal with page faults up-front. */ if (msfr.msfr_nsrcs > 0) { - struct in_msource *lims; - struct sockaddr_in *psin; - struct sockaddr_storage *kss, *pkss; - int i; + struct in_msource *lims; + struct sockaddr_in *psin; + struct sockaddr_storage *kss, *pkss; + int i; - if (IS_64BIT_PROCESS(current_proc())) + if (IS_64BIT_PROCESS(current_proc())) { tmp_ptr = msfr64.msfr_srcs; - else + } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); + } IGMP_PRINTF(("%s: loading %lu source list entries\n", __func__, (unsigned long)msfr.msfr_nsrcs)); @@ -2868,15 +2967,17 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) break; } error = imf_get_source(imf, psin, &lims); - if (error) + if (error) { break; + } lims->imsl_st[1] = imf->imf_st[1]; } FREE(kss, M_TEMP); } - if (error) + if (error) { goto out_imf_rollback; + } /* * Begin state merge transaction at IGMP layer. @@ -2894,26 +2995,28 @@ inp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) error = igmp_change_state(inm, &itp); INM_UNLOCK(inm); #ifdef IGMP_DEBUG - if (error) + if (error) { IGMP_PRINTF(("%s: failed igmp downcall\n", __func__)); + } #endif out_imf_rollback: - if (error) + if (error) { imf_rollback(imf); - else + } else { imf_commit(imf); + } imf_reap(imf); out_imo_locked: IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ igmp_set_timeout(&itp); - return (error); + return error; } /* @@ -2928,10 +3031,10 @@ out_imo_locked: int inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) { - struct ip_moptions *imo; - int error; - unsigned int ifindex; - struct ifnet *ifp; + struct ip_moptions *imo; + int error; + unsigned int ifindex; + struct ifnet *ifp; error = 0; @@ -2941,8 +3044,9 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) */ if (SOCK_PROTO(inp->inp_socket) == IPPROTO_DIVERT || (SOCK_TYPE(inp->inp_socket) != SOCK_RAW && - SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) - return (EOPNOTSUPP); + SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) { + return EOPNOTSUPP; + } switch (sopt->sopt_name) { case IP_MULTICAST_IF: @@ -2953,10 +3057,11 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) /* * Select the interface for outgoing multicast packets. */ - error = sooptcopyin(sopt, &ifindex, sizeof (ifindex), - sizeof (ifindex)); - if (error) + error = sooptcopyin(sopt, &ifindex, sizeof(ifindex), + sizeof(ifindex)); + if (error) { break; + } imo = inp_findmoptions(inp); if (imo == NULL) { @@ -2972,7 +3077,7 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) IMO_LOCK(imo); imo->imo_multicast_ifp = NULL; IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ break; } @@ -2980,8 +3085,8 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) /* Don't need to check is ifindex is < 0 since it's unsigned */ if ((unsigned int)if_index < ifindex) { ifnet_head_done(); - IMO_REMREF(imo); /* from inp_findmoptions() */ - error = ENXIO; /* per IPV6_MULTICAST_IF */ + IMO_REMREF(imo); /* from inp_findmoptions() */ + error = ENXIO; /* per IPV6_MULTICAST_IF */ break; } ifp = ifindex2ifnet[ifindex]; @@ -2989,7 +3094,7 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) /* If it's detached or isn't a multicast interface, bail out */ if (ifp == NULL || !(ifp->if_flags & IFF_MULTICAST)) { - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ error = EADDRNOTAVAIL; break; } @@ -3004,7 +3109,7 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) */ imo->imo_multicast_addr.s_addr = INADDR_ANY; IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ break; case IP_MULTICAST_TTL: { @@ -3019,15 +3124,17 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) if (sopt->sopt_valsize == sizeof(u_char)) { error = sooptcopyin(sopt, &ttl, sizeof(u_char), sizeof(u_char)); - if (error) + if (error) { break; + } } else { u_int ittl; error = sooptcopyin(sopt, &ittl, sizeof(u_int), sizeof(u_int)); - if (error) + if (error) { break; + } if (ittl > 255) { error = EINVAL; break; @@ -3042,7 +3149,7 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) IMO_LOCK(imo); imo->imo_multicast_ttl = ttl; IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ break; } @@ -3058,15 +3165,17 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) if (sopt->sopt_valsize == sizeof(u_char)) { error = sooptcopyin(sopt, &loop, sizeof(u_char), sizeof(u_char)); - if (error) + if (error) { break; + } } else { u_int iloop; error = sooptcopyin(sopt, &iloop, sizeof(u_int), - sizeof(u_int)); - if (error) + sizeof(u_int)); + if (error) { break; + } loop = (u_char)iloop; } imo = inp_findmoptions(inp); @@ -3077,7 +3186,7 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) IMO_LOCK(imo); imo->imo_multicast_loop = !!loop; IMO_UNLOCK(imo); - IMO_REMREF(imo); /* from inp_findmoptions() */ + IMO_REMREF(imo); /* from inp_findmoptions() */ break; } @@ -3111,7 +3220,7 @@ inp_setmoptions(struct inpcb *inp, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -3126,24 +3235,26 @@ sysctl_ip_mcast_filters SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - struct in_addr src = {}, group; - struct ifnet *ifp; - struct in_multi *inm; - struct in_multistep step; - struct ip_msource *ims; - int *name; - int retval = 0; - u_int namelen; - uint32_t fmode, ifindex; + struct in_addr src = {}, group; + struct ifnet *ifp; + struct in_multi *inm; + struct in_multistep step; + struct ip_msource *ims; + int *name; + int retval = 0; + u_int namelen; + uint32_t fmode, ifindex; name = (int *)arg1; namelen = (u_int)arg2; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - if (namelen != 2) - return (EINVAL); + if (namelen != 2) { + return EINVAL; + } ifindex = name[0]; ifnet_head_lock_shared(); @@ -3151,7 +3262,7 @@ sysctl_ip_mcast_filters SYSCTL_HANDLER_ARGS IGMP_PRINTF(("%s: ifindex %u out of range\n", __func__, ifindex)); ifnet_head_done(); - return (ENOENT); + return ENOENT; } group.s_addr = name[1]; @@ -3160,31 +3271,33 @@ sysctl_ip_mcast_filters SYSCTL_HANDLER_ARGS ("%s: group %s is not multicast\n", __func__, _igmp_inet_buf)); ifnet_head_done(); - return (EINVAL); + return EINVAL; } ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); if (ifp == NULL) { IGMP_PRINTF(("%s: no ifp for ifindex %u\n", __func__, ifindex)); - return (ENOENT); + return ENOENT; } in_multihead_lock_shared(); IN_FIRST_MULTI(step, inm); while (inm != NULL) { INM_LOCK(inm); - if (inm->inm_ifp != ifp) + if (inm->inm_ifp != ifp) { goto next; + } - if (!in_hosteq(inm->inm_addr, group)) + if (!in_hosteq(inm->inm_addr, group)) { goto next; + } fmode = inm->inm_st[1].iss_fmode; retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); if (retval != 0) { INM_UNLOCK(inm); - break; /* abort */ + break; /* abort */ } RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { #ifdef IGMP_DEBUG @@ -3203,8 +3316,9 @@ sysctl_ip_mcast_filters SYSCTL_HANDLER_ARGS } src.s_addr = htonl(ims->ims_haddr); retval = SYSCTL_OUT(req, &src, sizeof(struct in_addr)); - if (retval != 0) - break; /* process next inm */ + if (retval != 0) { + break; /* process next inm */ + } } next: INM_UNLOCK(inm); @@ -3212,7 +3326,7 @@ next: } in_multihead_lock_done(); - return (retval); + return retval; } /* @@ -3231,30 +3345,32 @@ ip_multicast_if(struct in_addr *a, unsigned int *ifindexp) unsigned int ifindex; struct ifnet *ifp; - if (ifindexp != NULL) + if (ifindexp != NULL) { *ifindexp = 0; + } if (ntohl(a->s_addr) >> 24 == 0) { ifindex = ntohl(a->s_addr) & 0xffffff; ifnet_head_lock_shared(); /* Don't need to check is ifindex is < 0 since it's unsigned */ if ((unsigned int)if_index < ifindex) { ifnet_head_done(); - return (NULL); + return NULL; } ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); - if (ifp != NULL && ifindexp != NULL) + if (ifp != NULL && ifindexp != NULL) { *ifindexp = ifindex; + } } else { INADDR_TO_IFP(*a, ifp); } - return (ifp); + return ifp; } void in_multi_init(void) { - PE_parse_boot_argn("ifa_debug", &inm_debug, sizeof (inm_debug)); + PE_parse_boot_argn("ifa_debug", &inm_debug, sizeof(inm_debug)); /* Setup lock group and attribute for in_multihead */ in_multihead_lock_grp_attr = lck_grp_attr_alloc_init(); @@ -3268,8 +3384,8 @@ in_multi_init(void) in_multihead_lock_attr); TAILQ_INIT(&inm_trash_head); - inm_size = (inm_debug == 0) ? sizeof (struct in_multi) : - sizeof (struct in_multi_dbg); + inm_size = (inm_debug == 0) ? sizeof(struct in_multi) : + sizeof(struct in_multi_dbg); inm_zone = zinit(inm_size, INM_ZONE_MAX * inm_size, 0, INM_ZONE_NAME); if (inm_zone == NULL) { @@ -3278,7 +3394,7 @@ in_multi_init(void) } zone_change(inm_zone, Z_EXPAND, TRUE); - ipms_size = sizeof (struct ip_msource); + ipms_size = sizeof(struct ip_msource); ipms_zone = zinit(ipms_size, IPMS_ZONE_MAX * ipms_size, 0, IPMS_ZONE_NAME); if (ipms_zone == NULL) { @@ -3287,7 +3403,7 @@ in_multi_init(void) } zone_change(ipms_zone, Z_EXPAND, TRUE); - inms_size = sizeof (struct in_msource); + inms_size = sizeof(struct in_msource); inms_zone = zinit(inms_size, INMS_ZONE_MAX * inms_size, 0, INMS_ZONE_NAME); if (inms_zone == NULL) { @@ -3313,7 +3429,7 @@ in_multi_alloc(int how) inm->inm_trace = inm_trace; } } - return (inm); + return inm; } static void @@ -3405,8 +3521,9 @@ in_multi_detach(struct in_multi *inm) } --inm->inm_reqcnt; - if (inm->inm_reqcnt > 0) - return (0); + if (inm->inm_reqcnt > 0) { + return 0; + } if (!(inm->inm_debug & IFD_ATTACHED)) { panic("%s: Attempt to detach an unattached record inm=%p", @@ -3433,16 +3550,17 @@ in_multi_detach(struct in_multi *inm) inm->inm_debug |= IFD_TRASHED; } - return (1); + return 1; } void inm_addref(struct in_multi *inm, int locked) { - if (!locked) + if (!locked) { INM_LOCK_SPIN(inm); - else + } else { INM_LOCK_ASSERT_HELD(inm); + } if (++inm->inm_refcount == 0) { panic("%s: inm=%p wraparound refcnt", __func__, inm); @@ -3450,8 +3568,9 @@ inm_addref(struct in_multi *inm, int locked) } else if (inm->inm_trace != NULL) { (*inm->inm_trace)(inm, TRUE); } - if (!locked) + if (!locked) { INM_UNLOCK(inm); + } } void @@ -3460,10 +3579,11 @@ inm_remref(struct in_multi *inm, int locked) struct ifmultiaddr *ifma; struct igmp_ifinfo *igi; - if (!locked) + if (!locked) { INM_LOCK_SPIN(inm); - else + } else { INM_LOCK_ASSERT_HELD(inm); + } if (inm->inm_refcount == 0 || (inm->inm_refcount == 1 && locked)) { panic("%s: inm=%p negative/missing refcnt", __func__, inm); @@ -3474,8 +3594,9 @@ inm_remref(struct in_multi *inm, int locked) --inm->inm_refcount; if (inm->inm_refcount > 0) { - if (!locked) + if (!locked) { INM_UNLOCK(inm); + } return; } @@ -3500,8 +3621,9 @@ inm_remref(struct in_multi *inm, int locked) INM_UNLOCK(inm); in_multihead_lock_done(); /* If it was locked, return it as such */ - if (locked) + if (locked) { INM_LOCK(inm); + } return; } inm_purge(inm); @@ -3521,8 +3643,9 @@ inm_remref(struct in_multi *inm, int locked) /* Release reference held to the underlying ifmultiaddr */ IFMA_REMREF(ifma); - if (igi != NULL) + if (igi != NULL) { IGI_REMREF(igi); + } } static void @@ -3582,10 +3705,11 @@ ipms_alloc(int how) struct ip_msource *ims; ims = (how == M_WAITOK) ? zalloc(ipms_zone) : zalloc_noblock(ipms_zone); - if (ims != NULL) + if (ims != NULL) { bzero(ims, ipms_size); + } - return (ims); + return ims; } static void @@ -3601,10 +3725,11 @@ inms_alloc(int how) inms = (how == M_WAITOK) ? zalloc(inms_zone) : zalloc_noblock(inms_zone); - if (inms != NULL) + if (inms != NULL) { bzero(inms, inms_size); + } - return (inms); + return inms; } static void @@ -3620,9 +3745,10 @@ static const char *inm_modestrs[] = { "un\n", "in", "ex" }; static const char * inm_mode_str(const int mode) { - if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) - return (inm_modestrs[mode]); - return ("??"); + if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) { + return inm_modestrs[mode]; + } + return "??"; } static const char *inm_statestrs[] = { @@ -3641,9 +3767,10 @@ static const char *inm_statestrs[] = { static const char * inm_state_str(const int state) { - if (state >= IGMP_NOT_MEMBER && state <= IGMP_LEAVING_MEMBER) - return (inm_statestrs[state]); - return ("??"); + if (state >= IGMP_NOT_MEMBER && state <= IGMP_LEAVING_MEMBER) { + return inm_statestrs[state]; + } + return "??"; } /* @@ -3657,8 +3784,9 @@ inm_print(const struct in_multi *inm) INM_LOCK_ASSERT_HELD(__DECONST(struct in_multi *, inm)); - if (igmp_debug == 0) + if (igmp_debug == 0) { return; + } inet_ntop(AF_INET, &inm->inm_addr, buf, sizeof(buf)); printf("%s: --- begin inm 0x%llx ---\n", __func__, @@ -3695,7 +3823,6 @@ inm_print(const struct in_multi *inm) void inm_print(__unused const struct in_multi *inm) { - } #endif diff --git a/bsd/netinet/in_pcb.c b/bsd/netinet/in_pcb.c index 3d2e8c91d..6f2754373 100644 --- a/bsd/netinet/in_pcb.c +++ b/bsd/netinet/in_pcb.c @@ -120,27 +120,27 @@ #include #include -static lck_grp_t *inpcb_lock_grp; -static lck_attr_t *inpcb_lock_attr; -static lck_grp_attr_t *inpcb_lock_grp_attr; -decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */ +static lck_grp_t *inpcb_lock_grp; +static lck_attr_t *inpcb_lock_attr; +static lck_grp_attr_t *inpcb_lock_grp_attr; +decl_lck_mtx_data(static, inpcb_lock); /* global INPCB lock */ decl_lck_mtx_data(static, inpcb_timeout_lock); static TAILQ_HEAD(, inpcbinfo) inpcb_head = TAILQ_HEAD_INITIALIZER(inpcb_head); -static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */ +static u_int16_t inpcb_timeout_run = 0; /* INPCB timer is scheduled to run */ static boolean_t inpcb_garbage_collecting = FALSE; /* gc timer is scheduled */ -static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */ +static boolean_t inpcb_ticking = FALSE; /* "slow" timer is scheduled */ static boolean_t inpcb_fast_timer_on = FALSE; -#define INPCB_GCREQ_THRESHOLD 50000 +#define INPCB_GCREQ_THRESHOLD 50000 static thread_call_t inpcb_thread_call, inpcb_fast_thread_call; static void inpcb_sched_timeout(void); static void inpcb_sched_lazy_timeout(void); static void _inpcb_sched_timeout(unsigned int); static void inpcb_timeout(void *, void *); -const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */ +const int inpcb_timeout_lazy = 10; /* 10 seconds leeway for lazy timers */ extern int tvtohz(struct timeval *); #if CONFIG_PROC_UUID_POLICY @@ -150,21 +150,21 @@ static void inp_update_necp_want_app_policy(struct inpcb *, boolean_t); #endif /* NECP */ #endif /* !CONFIG_PROC_UUID_POLICY */ -#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8)) -#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1)) +#define DBG_FNC_PCB_LOOKUP NETDBG_CODE(DBG_NETTCP, (6 << 8)) +#define DBG_FNC_PCB_HLOOKUP NETDBG_CODE(DBG_NETTCP, ((6 << 8) | 1)) /* * These configure the range of local port addresses assigned to * "unspecified" outgoing connections/packets/whatever. */ -int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */ -int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */ -int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ -int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */ -int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ -int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */ - -#define RANGECHK(var, min, max) \ +int ipport_lowfirstauto = IPPORT_RESERVED - 1; /* 1023 */ +int ipport_lowlastauto = IPPORT_RESERVEDSTART; /* 600 */ +int ipport_firstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ +int ipport_lastauto = IPPORT_HILASTAUTO; /* 65535 */ +int ipport_hifirstauto = IPPORT_HIFIRSTAUTO; /* 49152 */ +int ipport_hilastauto = IPPORT_HILASTAUTO; /* 65535 */ + +#define RANGECHK(var, min, max) \ if ((var) < (min)) { (var) = (min); } \ else if ((var) > (max)) { (var) = (max); } @@ -183,32 +183,32 @@ sysctl_net_ipport_check SYSCTL_HANDLER_ARGS RANGECHK(ipport_hifirstauto, IPPORT_RESERVED, USHRT_MAX); RANGECHK(ipport_hilastauto, IPPORT_RESERVED, USHRT_MAX); } - return (error); + return error; } #undef RANGECHK SYSCTL_NODE(_net_inet_ip, IPPROTO_IP, portrange, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IP Ports"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP Ports"); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowfirst, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ipport_lowfirstauto, 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, lowlast, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ipport_lowlastauto, 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, first, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ipport_firstauto, 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, last, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ipport_lastauto, 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hifirst, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ipport_hifirstauto, 0, &sysctl_net_ipport_check, "I", ""); SYSCTL_PROC(_net_inet_ip_portrange, OID_AUTO, hilast, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ipport_hilastauto, 0, &sysctl_net_ipport_check, "I", ""); static uint32_t apn_fallbk_debug = 0; #define apn_fallbk_log(x) do { if (apn_fallbk_debug >= 1) log x; } while (0) @@ -217,7 +217,7 @@ static uint32_t apn_fallbk_debug = 0; static boolean_t apn_fallbk_enabled = TRUE; SYSCTL_DECL(_net_inet); -SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "APN Fallback"); +SYSCTL_NODE(_net_inet, OID_AUTO, apn_fallback, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "APN Fallback"); SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED, &apn_fallbk_enabled, 0, "APN fallback enable"); SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -226,29 +226,29 @@ SYSCTL_UINT(_net_inet_apn_fallback, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED static boolean_t apn_fallbk_enabled = FALSE; #endif -extern int udp_use_randomport; -extern int tcp_use_randomport; +extern int udp_use_randomport; +extern int tcp_use_randomport; /* Structs used for flowhash computation */ struct inp_flowhash_key_addr { union { - struct in_addr v4; + struct in_addr v4; struct in6_addr v6; - u_int8_t addr8[16]; - u_int16_t addr16[8]; - u_int32_t addr32[4]; + u_int8_t addr8[16]; + u_int16_t addr16[8]; + u_int32_t addr32[4]; } infha; }; struct inp_flowhash_key { - struct inp_flowhash_key_addr infh_laddr; - struct inp_flowhash_key_addr infh_faddr; - u_int32_t infh_lport; - u_int32_t infh_fport; - u_int32_t infh_af; - u_int32_t infh_proto; - u_int32_t infh_rand1; - u_int32_t infh_rand2; + struct inp_flowhash_key_addr infh_laddr; + struct inp_flowhash_key_addr infh_faddr; + u_int32_t infh_lport; + u_int32_t infh_fport; + u_int32_t infh_af; + u_int32_t infh_proto; + u_int32_t infh_rand1; + u_int32_t infh_rand2; }; static u_int32_t inp_hash_seed = 0; @@ -256,8 +256,8 @@ static u_int32_t inp_hash_seed = 0; static int infc_cmp(const struct inpcb *, const struct inpcb *); /* Flags used by inp_fc_getinp */ -#define INPFC_SOLOCKED 0x1 -#define INPFC_REMOVE 0x2 +#define INPFC_SOLOCKED 0x1 +#define INPFC_REMOVE 0x2 static struct inpcb *inp_fc_getinp(u_int32_t, u_int32_t); static void inp_fc_feedback(struct inpcb *); @@ -295,9 +295,10 @@ in_pcbinit(void) inpcb_thread_call = thread_call_allocate_with_priority(inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL); inpcb_fast_thread_call = thread_call_allocate_with_priority( - inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL); - if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) + inpcb_timeout, NULL, THREAD_CALL_PRIORITY_KERNEL); + if (inpcb_thread_call == NULL || inpcb_fast_thread_call == NULL) { panic("unable to alloc the inpcb thread call"); + } /* * Initialize data structures required to deliver @@ -310,7 +311,7 @@ in_pcbinit(void) lck_mtx_unlock(&inp_fc_lck); } -#define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \ +#define INPCB_HAVE_TIMER_REQ(req) (((req).intimer_lazy > 0) || \ ((req).intimer_fast > 0) || ((req).intimer_nodelay > 0)) static void inpcb_timeout(void *arg0, void *arg1) @@ -344,7 +345,7 @@ inpcb_timeout(void *arg0, void *arg1) TAILQ_FOREACH(ipi, &inpcb_head, ipi_entry) { if (INPCB_HAVE_TIMER_REQ(ipi->ipi_gc_req)) { bzero(&ipi->ipi_gc_req, - sizeof(ipi->ipi_gc_req)); + sizeof(ipi->ipi_gc_req)); if (gc && ipi->ipi_gc != NULL) { ipi->ipi_gc(ipi); gccnt.intimer_lazy += @@ -357,7 +358,7 @@ inpcb_timeout(void *arg0, void *arg1) } if (INPCB_HAVE_TIMER_REQ(ipi->ipi_timer_req)) { bzero(&ipi->ipi_timer_req, - sizeof(ipi->ipi_timer_req)); + sizeof(ipi->ipi_timer_req)); if (t && ipi->ipi_timer != NULL) { ipi->ipi_timer(ipi); tmcnt.intimer_lazy += @@ -374,22 +375,25 @@ inpcb_timeout(void *arg0, void *arg1) } /* lock was dropped above, so check first before overriding */ - if (!inpcb_garbage_collecting) + if (!inpcb_garbage_collecting) { inpcb_garbage_collecting = INPCB_HAVE_TIMER_REQ(gccnt); - if (!inpcb_ticking) + } + if (!inpcb_ticking) { inpcb_ticking = INPCB_HAVE_TIMER_REQ(tmcnt); + } /* re-arm the timer if there's work to do */ inpcb_timeout_run--; VERIFY(inpcb_timeout_run >= 0 && inpcb_timeout_run < 2); - if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) + if (gccnt.intimer_nodelay > 0 || tmcnt.intimer_nodelay > 0) { inpcb_sched_timeout(); - else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) + } else if ((gccnt.intimer_fast + tmcnt.intimer_fast) <= 5) { /* be lazy when idle with little activity */ inpcb_sched_lazy_timeout(); - else + } else { inpcb_sched_timeout(); + } lck_mtx_unlock(&inpcb_timeout_lock); } @@ -426,8 +430,8 @@ _inpcb_sched_timeout(unsigned int offset) clock_interval_to_absolutetime_interval(offset, NSEC_PER_SEC, &leeway); thread_call_enter_delayed_with_leeway( - inpcb_thread_call, NULL, deadline, leeway, - THREAD_CALL_DELAY_LEEWAY); + inpcb_thread_call, NULL, deadline, leeway, + THREAD_CALL_DELAY_LEEWAY); } } else if (inpcb_timeout_run == 1 && offset == 0 && !inpcb_fast_timer_on) { @@ -451,7 +455,7 @@ inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type) lck_mtx_lock_spin(&inpcb_timeout_lock); inpcb_garbage_collecting = TRUE; gccnt = ipi->ipi_gc_req.intimer_nodelay + - ipi->ipi_gc_req.intimer_fast; + ipi->ipi_gc_req.intimer_fast; if (gccnt > INPCB_GCREQ_THRESHOLD) { type = INPCB_TIMER_FAST; @@ -477,7 +481,6 @@ inpcb_gc_sched(struct inpcbinfo *ipi, u_int32_t type) void inpcb_timer_sched(struct inpcbinfo *ipi, u_int32_t type) { - lck_mtx_lock_spin(&inpcb_timeout_lock); inpcb_ticking = TRUE; switch (type) { @@ -522,16 +525,18 @@ in_pcbinfo_detach(struct inpcbinfo *ipi) lck_mtx_lock(&inpcb_lock); TAILQ_FOREACH(ipi0, &inpcb_head, ipi_entry) { - if (ipi0 == ipi) + if (ipi0 == ipi) { break; + } } - if (ipi0 != NULL) + if (ipi0 != NULL) { TAILQ_REMOVE(&inpcb_head, ipi0, ipi_entry); - else + } else { error = ENXIO; + } lck_mtx_unlock(&inpcb_lock); - return (error); + return error; } /* @@ -546,20 +551,21 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) { #pragma unused(p) struct inpcb *inp; - caddr_t temp; + caddr_t temp; #if CONFIG_MACF_NET int mac_error; #endif /* CONFIG_MACF_NET */ if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) { inp = (struct inpcb *)zalloc(pcbinfo->ipi_zone); - if (inp == NULL) - return (ENOBUFS); - bzero((caddr_t)inp, sizeof (*inp)); + if (inp == NULL) { + return ENOBUFS; + } + bzero((caddr_t)inp, sizeof(*inp)); } else { inp = (struct inpcb *)(void *)so->so_saved_pcb; temp = inp->inp_saved_ppcb; - bzero((caddr_t)inp, sizeof (*inp)); + bzero((caddr_t)inp, sizeof(*inp)); inp->inp_saved_ppcb = temp; } @@ -569,44 +575,45 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) #if CONFIG_MACF_NET mac_error = mac_inpcb_label_init(inp, M_WAITOK); if (mac_error != 0) { - if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) + if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) { zfree(pcbinfo->ipi_zone, inp); - return (mac_error); + } + return mac_error; } mac_inpcb_label_associate(so, inp); #endif /* CONFIG_MACF_NET */ /* make sure inp_stat is always 64-bit aligned */ inp->inp_stat = (struct inp_stat *)P2ROUNDUP(inp->inp_stat_store, - sizeof (u_int64_t)); + sizeof(u_int64_t)); if (((uintptr_t)inp->inp_stat - (uintptr_t)inp->inp_stat_store) + - sizeof (*inp->inp_stat) > sizeof (inp->inp_stat_store)) { + sizeof(*inp->inp_stat) > sizeof(inp->inp_stat_store)) { panic("%s: insufficient space to align inp_stat", __func__); /* NOTREACHED */ } /* make sure inp_cstat is always 64-bit aligned */ inp->inp_cstat = (struct inp_stat *)P2ROUNDUP(inp->inp_cstat_store, - sizeof (u_int64_t)); + sizeof(u_int64_t)); if (((uintptr_t)inp->inp_cstat - (uintptr_t)inp->inp_cstat_store) + - sizeof (*inp->inp_cstat) > sizeof (inp->inp_cstat_store)) { + sizeof(*inp->inp_cstat) > sizeof(inp->inp_cstat_store)) { panic("%s: insufficient space to align inp_cstat", __func__); /* NOTREACHED */ } /* make sure inp_wstat is always 64-bit aligned */ inp->inp_wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_wstat_store, - sizeof (u_int64_t)); + sizeof(u_int64_t)); if (((uintptr_t)inp->inp_wstat - (uintptr_t)inp->inp_wstat_store) + - sizeof (*inp->inp_wstat) > sizeof (inp->inp_wstat_store)) { + sizeof(*inp->inp_wstat) > sizeof(inp->inp_wstat_store)) { panic("%s: insufficient space to align inp_wstat", __func__); /* NOTREACHED */ } /* make sure inp_Wstat is always 64-bit aligned */ inp->inp_Wstat = (struct inp_stat *)P2ROUNDUP(inp->inp_Wstat_store, - sizeof (u_int64_t)); + sizeof(u_int64_t)); if (((uintptr_t)inp->inp_Wstat - (uintptr_t)inp->inp_Wstat_store) + - sizeof (*inp->inp_Wstat) > sizeof (inp->inp_Wstat_store)) { + sizeof(*inp->inp_Wstat) > sizeof(inp->inp_Wstat_store)) { panic("%s: insufficient space to align inp_Wstat", __func__); /* NOTREACHED */ } @@ -619,14 +626,17 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) } #if INET6 - if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) + if (SOCK_DOM(so) == PF_INET6 && !ip6_mapped_addr_on) { inp->inp_flags |= IN6P_IPV6_V6ONLY; + } - if (ip6_auto_flowlabel) + if (ip6_auto_flowlabel) { inp->inp_flags |= IN6P_AUTOFLOWLABEL; + } #endif /* INET6 */ - if (intcoproc_unrestricted) + if (intcoproc_unrestricted) { inp->inp_flags2 |= INP2_INTCOPROC_ALLOWED; + } (void) inp_update_policy(inp); @@ -635,7 +645,7 @@ in_pcballoc(struct socket *so, struct inpcbinfo *pcbinfo, struct proc *p) LIST_INSERT_HEAD(pcbinfo->ipi_listhead, inp, inp_list); pcbinfo->ipi_count++; lck_rw_done(pcbinfo->ipi_lock); - return (0); + return 0; } /* @@ -663,16 +673,17 @@ in_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, struct in_addr laddr, socket_lock(so, 0); if (so->so_usecount == 0) { - if (inp->inp_state != INPCB_STATE_DEAD) + if (inp->inp_state != INPCB_STATE_DEAD) { in_pcbdetach(inp); - in_pcbdispose(inp); /* will unlock & destroy */ + } + in_pcbdispose(inp); /* will unlock & destroy */ inp = NULL; } else { socket_unlock(so, 0); } } - return (inp); + return inp; } static void @@ -684,18 +695,18 @@ in_pcb_conflict_post_msg(u_int16_t port) * who has set SOF_NOTIFYCONFLICT owns. */ struct kev_msg ev_msg; - struct kev_in_portinuse in_portinuse; + struct kev_in_portinuse in_portinuse; - bzero(&in_portinuse, sizeof (struct kev_in_portinuse)); - bzero(&ev_msg, sizeof (struct kev_msg)); - in_portinuse.port = ntohs(port); /* port in host order */ + bzero(&in_portinuse, sizeof(struct kev_in_portinuse)); + bzero(&ev_msg, sizeof(struct kev_msg)); + in_portinuse.port = ntohs(port); /* port in host order */ in_portinuse.req_pid = proc_selfpid(); ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; ev_msg.kev_subclass = KEV_INET_SUBCLASS; ev_msg.event_code = KEV_INET_PORTINUSE; ev_msg.dv[0].data_ptr = &in_portinuse; - ev_msg.dv[0].data_length = sizeof (struct kev_in_portinuse); + ev_msg.dv[0].data_length = sizeof(struct kev_in_portinuse); ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(NULL, &ev_msg); } @@ -727,10 +738,12 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) struct in_addr laddr; struct ifnet *outif = NULL; - if (TAILQ_EMPTY(&in_ifaddrhead)) /* XXX broken! */ - return (EADDRNOTAVAIL); - if (!(so->so_options & (SO_REUSEADDR|SO_REUSEPORT))) + if (TAILQ_EMPTY(&in_ifaddrhead)) { /* XXX broken! */ + return EADDRNOTAVAIL; + } + if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) { wild = 1; + } bzero(&laddr, sizeof(laddr)); @@ -740,14 +753,14 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) /* another thread completed the bind */ lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EINVAL); + return EINVAL; } if (nam != NULL) { - if (nam->sa_len != sizeof (struct sockaddr_in)) { + if (nam->sa_len != sizeof(struct sockaddr_in)) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EINVAL); + return EINVAL; } #if 0 /* @@ -757,7 +770,7 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (nam->sa_family != AF_INET) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } #endif /* 0 */ lport = SIN(nam)->sin_port; @@ -770,23 +783,24 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) * and a multicast address is bound on both * new and duplicated sockets. */ - if (so->so_options & SO_REUSEADDR) - reuseport = SO_REUSEADDR|SO_REUSEPORT; + if (so->so_options & SO_REUSEADDR) { + reuseport = SO_REUSEADDR | SO_REUSEPORT; + } } else if (SIN(nam)->sin_addr.s_addr != INADDR_ANY) { struct sockaddr_in sin; struct ifaddr *ifa; /* Sanitized for interface address searches */ - bzero(&sin, sizeof (sin)); + bzero(&sin, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_len = sizeof (struct sockaddr_in); + sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr; ifa = ifa_ifwithaddr(SA(&sin)); if (ifa == NULL) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } else { /* * Opportunistically determine the outbound @@ -808,7 +822,7 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) #if !CONFIG_EMBEDDED if (ntohs(lport) < IPPORT_RESERVED && - SIN(nam)->sin_addr.s_addr != 0) { + SIN(nam)->sin_addr.s_addr != 0) { cred = kauth_cred_proc_ref(p); error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0); @@ -816,15 +830,15 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (error != 0) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EACCES); + return EACCES; } } #endif /* !CONFIG_EMBEDDED */ if (!IN_MULTICAST(ntohl(SIN(nam)->sin_addr.s_addr)) && (u = kauth_cred_getuid(so->so_cred)) != 0 && (t = in_pcblookup_local_and_cleanup( - inp->inp_pcbinfo, SIN(nam)->sin_addr, lport, - INPLOOKUP_WILDCARD)) != NULL && + inp->inp_pcbinfo, SIN(nam)->sin_addr, lport, + INPLOOKUP_WILDCARD)) != NULL && (SIN(nam)->sin_addr.s_addr != INADDR_ANY || t->inp_laddr.s_addr != INADDR_ANY || !(t->inp_socket->so_options & SO_REUSEPORT)) && @@ -834,16 +848,18 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) t->inp_laddr.s_addr != INADDR_ANY)) { if ((t->inp_socket->so_flags & SOF_NOTIFYCONFLICT) && - !(so->so_flags & SOF_NOTIFYCONFLICT)) + !(so->so_flags & SOF_NOTIFYCONFLICT)) { conflict = 1; + } lck_rw_done(pcbinfo->ipi_lock); - if (conflict) + if (conflict) { in_pcb_conflict_post_msg(lport); + } socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } t = in_pcblookup_local_and_cleanup(pcbinfo, SIN(nam)->sin_addr, lport, wild); @@ -856,18 +872,19 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) SOCK_DOM(t->inp_socket) != PF_INET6) #endif /* INET6 */ { - if ((t->inp_socket->so_flags & SOF_NOTIFYCONFLICT) && - !(so->so_flags & SOF_NOTIFYCONFLICT)) + !(so->so_flags & SOF_NOTIFYCONFLICT)) { conflict = 1; + } lck_rw_done(pcbinfo->ipi_lock); - if (conflict) + if (conflict) { in_pcb_conflict_post_msg(lport); + } socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } } } @@ -888,7 +905,7 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) */ anonport = TRUE; if (inp->inp_flags & INP_HIGHPORT) { - first = ipport_hifirstauto; /* sysctl */ + first = ipport_hifirstauto; /* sysctl */ last = ipport_hilastauto; lastport = &pcbinfo->ipi_lasthi; } else if (inp->inp_flags & INP_LOWPORT) { @@ -899,20 +916,21 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (error != 0) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (error); + return error; } - first = ipport_lowfirstauto; /* 1023 */ - last = ipport_lowlastauto; /* 600 */ + first = ipport_lowfirstauto; /* 1023 */ + last = ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->ipi_lastlow; } else { - first = ipport_firstauto; /* sysctl */ + first = ipport_firstauto; /* sysctl */ last = ipport_lastauto; lastport = &pcbinfo->ipi_lastport; } /* No point in randomizing if only one port is available */ - if (first == last) + if (first == last) { randomport = 0; + } /* * Simple check to ensure all ports are not used up causing * a deadlock here. @@ -927,7 +945,7 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) * counting down */ if (randomport) { - read_frandom(&rand_port, sizeof (rand_port)); + read_frandom(&rand_port, sizeof(rand_port)); *lastport = first - (rand_port % (first - last)); } @@ -938,14 +956,15 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) found = false; do { - if (count-- < 0) { /* completely used? */ + if (count-- < 0) { /* completely used? */ lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } --*lastport; - if (*lastport > first || *lastport < last) + if (*lastport > first || *lastport < last) { *lastport = first; + } lport = htons(*lastport); found = in_pcblookup_local_and_cleanup(pcbinfo, @@ -958,7 +977,7 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) * counting up */ if (randomport) { - read_frandom(&rand_port, sizeof (rand_port)); + read_frandom(&rand_port, sizeof(rand_port)); *lastport = first + (rand_port % (first - last)); } @@ -969,14 +988,15 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) found = false; do { - if (count-- < 0) { /* completely used? */ + if (count-- < 0) { /* completely used? */ lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ++*lastport; - if (*lastport < first || *lastport > last) + if (*lastport < first || *lastport > last) { *lastport = first; + } lport = htons(*lastport); found = in_pcblookup_local_and_cleanup(pcbinfo, @@ -993,12 +1013,12 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) */ if (inp->inp_state == INPCB_STATE_DEAD) { lck_rw_done(pcbinfo->ipi_lock); - return (ECONNABORTED); + return ECONNABORTED; } if (inp->inp_lport != 0 || inp->inp_laddr.s_addr != INADDR_ANY) { lck_rw_done(pcbinfo->ipi_lock); - return (EINVAL); + return EINVAL; } if (laddr.s_addr != INADDR_ANY) { @@ -1006,36 +1026,38 @@ in_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) inp->inp_last_outifp = outif; } inp->inp_lport = lport; - if (anonport) + if (anonport) { inp->inp_flags |= INP_ANONPORT; + } if (in_pcbinshash(inp, 1) != 0) { inp->inp_laddr.s_addr = INADDR_ANY; inp->inp_last_outifp = NULL; inp->inp_lport = 0; - if (anonport) + if (anonport) { inp->inp_flags &= ~INP_ANONPORT; + } lck_rw_done(pcbinfo->ipi_lock); - return (EAGAIN); + return EAGAIN; } lck_rw_done(pcbinfo->ipi_lock); sflt_notify(so, sock_evt_bound, NULL); - return (0); + return 0; } -#define APN_FALLBACK_IP_FILTER(a) \ +#define APN_FALLBACK_IP_FILTER(a) \ (IN_LINKLOCAL(ntohl((a)->sin_addr.s_addr)) || \ IN_LOOPBACK(ntohl((a)->sin_addr.s_addr)) || \ IN_ZERONET(ntohl((a)->sin_addr.s_addr)) || \ IN_MULTICAST(ntohl((a)->sin_addr.s_addr)) || \ IN_PRIVATE(ntohl((a)->sin_addr.s_addr))) -#define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */ +#define APN_FALLBACK_NOTIF_INTERVAL 2 /* Magic Number */ static uint64_t last_apn_fallback = 0; static boolean_t -apn_fallback_required (proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4) +apn_fallback_required(proc_t proc, struct socket *so, struct sockaddr_in *p_dstv4) { uint64_t timenow; struct sockaddr_storage lookup_default_addr; @@ -1043,14 +1065,17 @@ apn_fallback_required (proc_t proc, struct socket *so, struct sockaddr_in *p_dst VERIFY(proc != NULL); - if (apn_fallbk_enabled == FALSE) + if (apn_fallbk_enabled == FALSE) { return FALSE; + } - if (proc == kernproc) + if (proc == kernproc) { return FALSE; + } - if (so && (so->so_options & SO_NOAPNFALLBK)) + if (so && (so->so_options & SO_NOAPNFALLBK)) { return FALSE; + } timenow = net_uptime(); if ((timenow - last_apn_fallback) < APN_FALLBACK_NOTIF_INTERVAL) { @@ -1058,8 +1083,9 @@ apn_fallback_required (proc_t proc, struct socket *so, struct sockaddr_in *p_dst return FALSE; } - if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) + if (p_dstv4 && APN_FALLBACK_IP_FILTER(p_dstv4)) { return FALSE; + } /* Check if we have unscoped IPv6 default route through cellular */ bzero(&lookup_default_addr, sizeof(lookup_default_addr)); @@ -1163,7 +1189,7 @@ apn_fallback_trigger(proc_t proc, struct socket *so) proc_getexecutableuuid(proc, application_uuid, sizeof(application_uuid)); - bzero(&ev_msg, sizeof (struct kev_msg)); + bzero(&ev_msg, sizeof(struct kev_msg)); ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_NETWORK_CLASS; ev_msg.kev_subclass = KEV_NETEVENT_SUBCLASS; @@ -1215,14 +1241,18 @@ in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr, int error = 0; boolean_t restricted = FALSE; - if (outif != NULL) + if (outif != NULL) { *outif = NULL; - if (nam->sa_len != sizeof (struct sockaddr_in)) - return (EINVAL); - if (SIN(nam)->sin_family != AF_INET) - return (EAFNOSUPPORT); - if (raw == 0 && SIN(nam)->sin_port == 0) - return (EADDRNOTAVAIL); + } + if (nam->sa_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } + if (SIN(nam)->sin_family != AF_INET) { + return EAFNOSUPPORT; + } + if (raw == 0 && SIN(nam)->sin_port == 0) { + return EADDRNOTAVAIL; + } /* * If the destination address is INADDR_ANY, @@ -1254,15 +1284,16 @@ in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr, if (inp->inp_laddr.s_addr != INADDR_ANY) { VERIFY(ia == NULL); *laddr = inp->inp_laddr; - return (0); + return 0; } /* * If the ifscope is specified by the caller (e.g. IP_PKTINFO) * then it overrides the sticky ifscope set for the socket. */ - if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) + if (ifscope == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) { ifscope = inp->inp_boundifp->if_index; + } /* * If route is known or can be allocated now, @@ -1270,33 +1301,37 @@ in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr, * Note that we should check the address family of the cached * destination, in case of sharing the cache with IPv6. */ - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_LOCK_SPIN(ro->ro_rt); + } if (ROUTE_UNUSABLE(ro) || ro->ro_dst.sa_family != AF_INET || SIN(&ro->ro_dst)->sin_addr.s_addr != SIN(nam)->sin_addr.s_addr || (inp->inp_socket->so_options & SO_DONTROUTE)) { - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } ROUTE_RELEASE(ro); } if (!(inp->inp_socket->so_options & SO_DONTROUTE) && (ro->ro_rt == NULL || ro->ro_rt->rt_ifp == NULL)) { - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } ROUTE_RELEASE(ro); /* No route yet, so try to acquire one */ - bzero(&ro->ro_dst, sizeof (struct sockaddr_in)); + bzero(&ro->ro_dst, sizeof(struct sockaddr_in)); ro->ro_dst.sa_family = AF_INET; - ro->ro_dst.sa_len = sizeof (struct sockaddr_in); + ro->ro_dst.sa_len = sizeof(struct sockaddr_in); SIN(&ro->ro_dst)->sin_addr = SIN(nam)->sin_addr; rtalloc_scoped(ro, ifscope); - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_LOCK_SPIN(ro->ro_rt); + } } /* Sanitized local copy for interface address searches */ - bzero(&sin, sizeof (sin)); + bzero(&sin, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_len = sizeof (struct sockaddr_in); + sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr; /* * If we did not find (or use) a route, assume dest is reachable @@ -1308,13 +1343,15 @@ in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr, VERIFY(ia == NULL); ia = ifatoia(ifa_ifwithdstaddr(SA(&sin))); - if (ia == NULL) + if (ia == NULL) { ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope)); + } error = ((ia == NULL) ? ENETUNREACH : 0); if (apn_fallback_required(proc, inp->inp_socket, - (void *)nam)) + (void *)nam)) { apn_fallback_trigger(proc, inp->inp_socket); + } goto done; } @@ -1352,8 +1389,9 @@ in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr, * The reason is that we only want to send notification * if the flow was ever used to send data. */ - if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) + if (IS_INTF_CLAT46(ro->ro_rt->rt_ifp)) { inp->inp_flags2 |= INP2_CLAT46_FLOW; + } RT_UNLOCK(ro->ro_rt); error = 0; @@ -1371,15 +1409,18 @@ in_pcbladdr(struct inpcb *inp, struct sockaddr *nam, struct in_addr *laddr, */ VERIFY(ia == NULL); ia = ifatoia(ifa_ifwithdstaddr(SA(&sin))); - if (ia == NULL) + if (ia == NULL) { ia = ifatoia(ifa_ifwithaddr_scoped(SA(&sin), ifscope)); - if (ia == NULL) + } + if (ia == NULL) { ia = ifatoia(ifa_ifwithnet_scoped(SA(&sin), ifscope)); + } if (ia == NULL) { RT_LOCK(ro->ro_rt); ia = ifatoia(ro->ro_rt->rt_ifa); - if (ia != NULL) + if (ia != NULL) { IFA_ADDREF(&ia->ia_ifa); + } RT_UNLOCK(ro->ro_rt); } error = ((ia == NULL) ? ENETUNREACH : 0); @@ -1400,20 +1441,24 @@ done: if (imo->imo_multicast_ifp != NULL && (ia == NULL || ia->ia_ifp != imo->imo_multicast_ifp)) { ifp = imo->imo_multicast_ifp; - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } lck_rw_lock_shared(in_ifaddr_rwlock); TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) { - if (ia->ia_ifp == ifp) + if (ia->ia_ifp == ifp) { break; + } } - if (ia != NULL) + if (ia != NULL) { IFA_ADDREF(&ia->ia_ifa); + } lck_rw_done(in_ifaddr_rwlock); - if (ia == NULL) + if (ia == NULL) { error = EADDRNOTAVAIL; - else + } else { error = 0; + } } IMO_UNLOCK(imo); } @@ -1438,16 +1483,18 @@ done: if (outif != NULL) { struct ifnet *ifp; - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { ifp = ro->ro_rt->rt_ifp; - else + } else { ifp = ia->ia_ifp; + } VERIFY(ifp != NULL); IFA_CONVERT_LOCK(&ia->ia_ifa); - ifnet_reference(ifp); /* for caller */ - if (*outif != NULL) + ifnet_reference(ifp); /* for caller */ + if (*outif != NULL) { ifnet_release(*outif); + } *outif = ifp; } IFA_UNLOCK(&ia->ia_ifa); @@ -1463,7 +1510,7 @@ done: SO_FILT_HINT_IFDENIED)); } - return (error); + return error; } /* @@ -1487,15 +1534,17 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p, struct socket *so = inp->inp_socket; #if CONTENT_FILTER - if (so) + if (so) { so->so_state_change_cnt++; + } #endif /* * Call inner routine, to assign local interface address. */ - if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) - return (error); + if ((error = in_pcbladdr(inp, nam, &laddr, ifscope, outif, 0)) != 0) { + return error; + } socket_unlock(so, 0); pcb = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port, @@ -1508,18 +1557,20 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p, * embryonic socket, it can get aborted if another thread is closing * the listener (radar 7947600). */ - if ((so->so_flags & SOF_ABORTED) != 0) - return (ECONNREFUSED); + if ((so->so_flags & SOF_ABORTED) != 0) { + return ECONNREFUSED; + } if (pcb != NULL) { in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0); - return (EADDRINUSE); + return EADDRINUSE; } if (inp->inp_laddr.s_addr == INADDR_ANY) { if (inp->inp_lport == 0) { error = in_pcbbind(inp, NULL, p); - if (error) - return (error); + if (error) { + return error; + } } if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) { /* @@ -1543,8 +1594,9 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p, * This routines can be refactored and handle this better * in future. */ - if (inp->inp_lport == 0) - return (EINVAL); + if (inp->inp_lport == 0) { + return EINVAL; + } if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) { /* * Lock inversion issue, mostly with udp @@ -1557,11 +1609,12 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p, } inp->inp_faddr = sin->sin_addr; inp->inp_fport = sin->sin_port; - if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) + if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) { nstat_pcb_invalidate_cache(inp); + } in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->ipi_lock); - return (0); + return 0; } void @@ -1569,15 +1622,17 @@ in_pcbdisconnect(struct inpcb *inp) { struct socket *so = inp->inp_socket; - if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) + if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) { nstat_pcb_cache(inp); + } inp->inp_faddr.s_addr = INADDR_ANY; inp->inp_fport = 0; #if CONTENT_FILTER - if (so) + if (so) { so->so_state_change_cnt++; + } #endif if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) { @@ -1594,8 +1649,9 @@ in_pcbdisconnect(struct inpcb *inp) * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB; * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared. */ - if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) + if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) { in_pcbdetach(inp); + } } void @@ -1627,8 +1683,9 @@ in_pcbdetach(struct inpcb *inp) * before we detach it. */ if (nstat_collect && - (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) + (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) { nstat_pcb_detach(inp); + } /* Free memory buffer held for generating keep alives */ if (inp->inp_keepalive_data != NULL) { @@ -1670,17 +1727,17 @@ in_pcbdetach(struct inpcb *inp) */ if (inp->inp_stat != NULL && (inp->inp_stat->txbytes != 0 || - inp->inp_stat->rxbytes !=0)) { + inp->inp_stat->rxbytes != 0)) { if (so->so_flags & SOF_DELEGATED) { in6_clat46_event_enqueue_nwk_wq_entry( - IN6_CLAT46_EVENT_V4_FLOW, - so->e_pid, - so->e_uuid); + IN6_CLAT46_EVENT_V4_FLOW, + so->e_pid, + so->e_uuid); } else { in6_clat46_event_enqueue_nwk_wq_entry( - IN6_CLAT46_EVENT_V4_FLOW, - so->last_pid, - so->last_uuid); + IN6_CLAT46_EVENT_V4_FLOW, + so->last_pid, + so->last_uuid); } } } @@ -1798,22 +1855,23 @@ in_getsockaddr(struct socket *so, struct sockaddr **nam) /* * Do the malloc first in case it blocks. */ - MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK); - if (sin == NULL) - return (ENOBUFS); - bzero(sin, sizeof (*sin)); + MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK); + if (sin == NULL) { + return ENOBUFS; + } + bzero(sin, sizeof(*sin)); sin->sin_family = AF_INET; - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); if ((inp = sotoinpcb(so)) == NULL) { FREE(sin, M_SONAME); - return (EINVAL); + return EINVAL; } sin->sin_port = inp->inp_lport; sin->sin_addr = inp->inp_laddr; *nam = (struct sockaddr *)sin; - return (0); + return 0; } int @@ -1823,17 +1881,18 @@ in_getsockaddr_s(struct socket *so, struct sockaddr_in *ss) struct inpcb *inp; VERIFY(ss != NULL); - bzero(ss, sizeof (*ss)); + bzero(ss, sizeof(*ss)); sin->sin_family = AF_INET; - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); - if ((inp = sotoinpcb(so)) == NULL) - return (EINVAL); + if ((inp = sotoinpcb(so)) == NULL) { + return EINVAL; + } sin->sin_port = inp->inp_lport; sin->sin_addr = inp->inp_laddr; - return (0); + return 0; } int @@ -1845,22 +1904,23 @@ in_getpeeraddr(struct socket *so, struct sockaddr **nam) /* * Do the malloc first in case it blocks. */ - MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK); - if (sin == NULL) - return (ENOBUFS); - bzero((caddr_t)sin, sizeof (*sin)); + MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK); + if (sin == NULL) { + return ENOBUFS; + } + bzero((caddr_t)sin, sizeof(*sin)); sin->sin_family = AF_INET; - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); if ((inp = sotoinpcb(so)) == NULL) { FREE(sin, M_SONAME); - return (EINVAL); + return EINVAL; } sin->sin_port = inp->inp_fport; sin->sin_addr = inp->inp_faddr; *nam = (struct sockaddr *)sin; - return (0); + return 0; } void @@ -1873,14 +1933,17 @@ in_pcbnotifyall(struct inpcbinfo *pcbinfo, struct in_addr faddr, LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ if (inp->inp_faddr.s_addr != faddr.s_addr || - inp->inp_socket == NULL) + inp->inp_socket == NULL) { continue; - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + } + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } socket_lock(inp->inp_socket, 1); (*notify)(inp, errno); (void) in_pcb_checkstate(inp, WNT_RELEASE, 1); @@ -1927,11 +1990,13 @@ in_losing(struct inpcb *inp) */ release = TRUE; } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } } - if (rt == NULL || release) + if (rt == NULL || release) { ROUTE_RELEASE(&inp->inp_route); + } } /* @@ -1957,11 +2022,13 @@ in_rtchange(struct inpcb *inp, int errno) */ release = TRUE; } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } } - if (rt == NULL || release) + if (rt == NULL || release) { ROUTE_RELEASE(&inp->inp_route); + } } /* @@ -1987,8 +2054,9 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ if (inp->inp_faddr.s_addr == INADDR_ANY && inp->inp_laddr.s_addr == laddr.s_addr && @@ -1996,14 +2064,14 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, /* * Found. */ - return (inp); + return inp; } } /* * Not found. */ KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, 0, 0, 0, 0, 0); - return (NULL); + return NULL; } else { struct inpcbporthead *porthash; struct inpcbport *phd; @@ -2017,8 +2085,9 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport, pcbinfo->ipi_porthashmask)]; LIST_FOREACH(phd, porthash, phd_hash) { - if (phd->phd_port == lport) + if (phd->phd_port == lport) { break; + } } if (phd != NULL) { /* @@ -2028,20 +2097,24 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { wildcard = 0; #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ - if (inp->inp_faddr.s_addr != INADDR_ANY) + if (inp->inp_faddr.s_addr != INADDR_ANY) { wildcard++; + } if (inp->inp_laddr.s_addr != INADDR_ANY) { - if (laddr.s_addr == INADDR_ANY) + if (laddr.s_addr == INADDR_ANY) { wildcard++; - else if (inp->inp_laddr.s_addr != - laddr.s_addr) + } else if (inp->inp_laddr.s_addr != + laddr.s_addr) { continue; + } } else { - if (laddr.s_addr != INADDR_ANY) + if (laddr.s_addr != INADDR_ANY) { wildcard++; + } } if (wildcard < matchwild) { match = inp; @@ -2054,7 +2127,7 @@ in_pcblookup_local(struct inpcbinfo *pcbinfo, struct in_addr laddr, } KERNEL_DEBUG(DBG_FNC_PCB_LOOKUP | DBG_FUNC_END, match, 0, 0, 0, 0); - return (match); + return match; } } @@ -2091,11 +2164,13 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (inp->inp_faddr.s_addr == faddr.s_addr && inp->inp_laddr.s_addr == laddr.s_addr && @@ -2106,12 +2181,12 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, * Found. */ *uid = kauth_cred_getuid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); *gid = kauth_cred_getgid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } } @@ -2120,38 +2195,40 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, * Not found. */ lck_rw_done(pcbinfo->ipi_lock); - return (0); + return 0; } head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (inp->inp_faddr.s_addr == INADDR_ANY && inp->inp_lport == lport) { if (inp->inp_laddr.s_addr == laddr.s_addr) { if ((found = (inp->inp_socket != NULL))) { *uid = kauth_cred_getuid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); *gid = kauth_cred_getgid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } else if (inp->inp_laddr.s_addr == INADDR_ANY) { #if INET6 if (inp->inp_socket && - SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) + SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) { local_wild_mapped = inp; - else + } else #endif /* INET6 */ - local_wild = inp; + local_wild = inp; } } } @@ -2160,25 +2237,25 @@ in_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in_addr faddr, if (local_wild_mapped != NULL) { if ((found = (local_wild_mapped->inp_socket != NULL))) { *uid = kauth_cred_getuid( - local_wild_mapped->inp_socket->so_cred); + local_wild_mapped->inp_socket->so_cred); *gid = kauth_cred_getgid( - local_wild_mapped->inp_socket->so_cred); + local_wild_mapped->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } #endif /* INET6 */ lck_rw_done(pcbinfo->ipi_lock); - return (0); + return 0; } if ((found = (local_wild->inp_socket != NULL))) { *uid = kauth_cred_getuid( - local_wild->inp_socket->so_cred); + local_wild->inp_socket->so_cred); *gid = kauth_cred_getgid( - local_wild->inp_socket->so_cred); + local_wild->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } /* @@ -2210,11 +2287,13 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (inp->inp_faddr.s_addr == faddr.s_addr && inp->inp_laddr.s_addr == laddr.s_addr && @@ -2226,11 +2305,11 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (inp); + return inp; } else { /* it's there but dead, say it isn't found */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } } } @@ -2240,18 +2319,20 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, * Not found. */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { #if INET6 - if (!(inp->inp_vflag & INP_IPV4)) + if (!(inp->inp_vflag & INP_IPV4)) { continue; + } #endif /* INET6 */ - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (inp->inp_faddr.s_addr == INADDR_ANY && inp->inp_lport == lport) { @@ -2259,19 +2340,19 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (inp); + return inp; } else { /* it's dead; say it isn't found */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } } else if (inp->inp_laddr.s_addr == INADDR_ANY) { #if INET6 - if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) + if (SOCK_CHECK_DOM(inp->inp_socket, PF_INET6)) { local_wild_mapped = inp; - else + } else #endif /* INET6 */ - local_wild = inp; + local_wild = inp; } } } @@ -2281,26 +2362,26 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, if (in_pcb_checkstate(local_wild_mapped, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (local_wild_mapped); + return local_wild_mapped; } else { /* it's dead; say it isn't found */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } } #endif /* INET6 */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } if (in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (local_wild); + return local_wild; } /* * It's either not found or is already dead. */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } /* @@ -2308,7 +2389,7 @@ in_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in_addr faddr, * * @param inp Pointer to internet protocol control block * @param locked Implies if ipi_lock (protecting pcb list) - * is already locked or not. + * is already locked or not. * * @return int error on failure and 0 on success */ @@ -2343,16 +2424,16 @@ in_pcbinshash(struct inpcb *inp, int locked) if (!locked) { lck_rw_done(pcbinfo->ipi_lock); } - return (ECONNABORTED); + return ECONNABORTED; } #if INET6 - if (inp->inp_vflag & INP_IPV6) + if (inp->inp_vflag & INP_IPV6) { hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; - else + } else #endif /* INET6 */ - hashkey_faddr = inp->inp_faddr.s_addr; + hashkey_faddr = inp->inp_faddr.s_addr; inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, pcbinfo->ipi_hashmask); @@ -2366,20 +2447,22 @@ in_pcbinshash(struct inpcb *inp, int locked) * Go through port list and look for a head for this lport. */ LIST_FOREACH(phd, pcbporthash, phd_hash) { - if (phd->phd_port == inp->inp_lport) + if (phd->phd_port == inp->inp_lport) { break; + } } /* * If none exists, malloc one and tack it on. */ if (phd == NULL) { - MALLOC(phd, struct inpcbport *, sizeof (struct inpcbport), + MALLOC(phd, struct inpcbport *, sizeof(struct inpcbport), M_PCB, M_WAITOK); if (phd == NULL) { - if (!locked) + if (!locked) { lck_rw_done(pcbinfo->ipi_lock); - return (ENOBUFS); /* XXX */ + } + return ENOBUFS; /* XXX */ } phd->phd_port = inp->inp_lport; LIST_INIT(&phd->phd_pcblist); @@ -2394,15 +2477,16 @@ in_pcbinshash(struct inpcb *inp, int locked) LIST_INSERT_HEAD(pcbhash, inp, inp_hash); inp->inp_flags2 |= INP2_INHASHLIST; - if (!locked) + if (!locked) { lck_rw_done(pcbinfo->ipi_lock); + } #if NECP // This call catches the original setting of the local address inp_update_necp_policy(inp, NULL, NULL, 0); #endif /* NECP */ - return (0); + return 0; } /* @@ -2418,11 +2502,11 @@ in_pcbrehash(struct inpcb *inp) u_int32_t hashkey_faddr; #if INET6 - if (inp->inp_vflag & INP_IPV6) + if (inp->inp_vflag & INP_IPV6) { hashkey_faddr = inp->in6p_faddr.s6_addr32[3] /* XXX */; - else + } else #endif /* INET6 */ - hashkey_faddr = inp->inp_faddr.s_addr; + hashkey_faddr = inp->inp_faddr.s_addr; inp->inp_hash_element = INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, inp->inp_pcbinfo->ipi_hashmask); @@ -2490,7 +2574,7 @@ in_pcbremlists(struct inpcb *inp) } if (inp->inp_flags2 & INP2_IN_FCTREE) { - inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED|INPFC_REMOVE)); + inp_fc_getinp(inp->inp_flowhash, (INPFC_SOLOCKED | INPFC_REMOVE)); VERIFY(!(inp->inp_flags2 & INP2_IN_FCTREE)); } @@ -2520,8 +2604,9 @@ in_pcb_checkstate(struct inpcb *pcb, int mode, int locked) * STOPUSING, if success we're good, if it's in use, will * be marked later */ - if (locked == 0) + if (locked == 0) { socket_lock(pcb->inp_socket, 1); + } pcb->inp_state = INPCB_STATE_DEAD; stopusing: @@ -2530,20 +2615,22 @@ stopusing: __func__, pcb, pcb->inp_socket); /* NOTREACHED */ } - if (locked == 0) + if (locked == 0) { socket_unlock(pcb->inp_socket, 1); + } inpcb_gc_sched(pcb->inp_pcbinfo, INPCB_TIMER_FAST); origwant = *wantcnt; - if ((UInt16) origwant == 0xffff) /* should stop using */ - return (WNT_STOPUSING); + if ((UInt16) origwant == 0xffff) { /* should stop using */ + return WNT_STOPUSING; + } newwant = 0xffff; if ((UInt16) origwant == 0) { /* try to mark it as unsuable now */ OSCompareAndSwap(origwant, newwant, wantcnt); } - return (WNT_STOPUSING); + return WNT_STOPUSING; case WNT_ACQUIRE: /* @@ -2555,19 +2642,20 @@ stopusing: origwant = *wantcnt; if ((UInt16) origwant == 0xffff) { /* should stop using */ - return (WNT_STOPUSING); + return WNT_STOPUSING; } newwant = origwant + 1; } while (!OSCompareAndSwap(origwant, newwant, wantcnt)); - return (WNT_ACQUIRE); + return WNT_ACQUIRE; case WNT_RELEASE: /* * Release reference. If result is null and pcb state * is DEAD, set wanted bit to STOPUSING */ - if (locked == 0) + if (locked == 0) { socket_lock(pcb->inp_socket, 1); + } do { origwant = *wantcnt; @@ -2578,24 +2666,27 @@ stopusing: } if ((UInt16) origwant == 0xffff) { /* should stop using */ - if (locked == 0) + if (locked == 0) { socket_unlock(pcb->inp_socket, 1); - return (WNT_STOPUSING); + } + return WNT_STOPUSING; } newwant = origwant - 1; } while (!OSCompareAndSwap(origwant, newwant, wantcnt)); - if (pcb->inp_state == INPCB_STATE_DEAD) + if (pcb->inp_state == INPCB_STATE_DEAD) { goto stopusing; + } if (pcb->inp_socket->so_usecount < 0) { panic("%s: RELEASE pcb=%p so=%p usecount is negative\n", __func__, pcb, pcb->inp_socket); /* NOTREACHED */ } - if (locked == 0) + if (locked == 0) { socket_unlock(pcb->inp_socket, 1); - return (WNT_RELEASE); + } + return WNT_RELEASE; default: panic("%s: so=%p not a valid state =%x\n", __func__, @@ -2604,7 +2695,7 @@ stopusing: } /* NOTREACHED */ - return (mode); + return mode; } /* @@ -2615,7 +2706,7 @@ stopusing: void inpcb_to_compat(struct inpcb *inp, struct inpcb_compat *inp_compat) { - bzero(inp_compat, sizeof (*inp_compat)); + bzero(inp_compat, sizeof(*inp_compat)); inp_compat->inp_fport = inp->inp_fport; inp_compat->inp_lport = inp->inp_lport; inp_compat->nat_owner = 0; @@ -2696,10 +2787,11 @@ inp_route_copyout(struct inpcb *inp, struct route *dst) * If the route in the PCB is stale or not for IPv4, blow it away; * this is possible in the case of IPv4-mapped address case. */ - if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) + if (ROUTE_UNUSABLE(src) || rt_key(src->ro_rt)->sa_family != AF_INET) { ROUTE_RELEASE(src); + } - route_copyout(dst, src, sizeof (*dst)); + route_copyout(dst, src, sizeof(*dst)); } void @@ -2710,10 +2802,11 @@ inp_route_copyin(struct inpcb *inp, struct route *src) socket_lock_assert_owned(inp->inp_socket); /* Minor sanity check */ - if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) + if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) { panic("%s: wrong or corrupted route: %p", __func__, src); + } - route_copyin(src, dst, sizeof (*src)); + route_copyin(src, dst, sizeof(*src)); } /* @@ -2728,7 +2821,7 @@ inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp) if ((ifscope > (unsigned)if_index) || (ifscope != IFSCOPE_NONE && (ifp = ifindex2ifnet[ifscope]) == NULL)) { ifnet_head_done(); - return (ENXIO); + return ENXIO; } ifnet_head_done(); @@ -2744,18 +2837,20 @@ inp_bindif(struct inpcb *inp, unsigned int ifscope, struct ifnet **pifp) * exact match for the embedded interface scope. */ inp->inp_boundifp = ifp; - if (inp->inp_boundifp == NULL) + if (inp->inp_boundifp == NULL) { inp->inp_flags &= ~INP_BOUND_IF; - else + } else { inp->inp_flags |= INP_BOUND_IF; + } /* Blow away any cached route in the PCB */ ROUTE_RELEASE(&inp->inp_route); - if (pifp != NULL) + if (pifp != NULL) { *pifp = ifp; + } - return (0); + return 0; } /* @@ -2882,13 +2977,14 @@ inp_calc_flowhash(struct inpcb *inp) u_int32_t flowhash = 0; struct inpcb *tmp_inp = NULL; - if (inp_hash_seed == 0) + if (inp_hash_seed == 0) { inp_hash_seed = RandomULong(); + } - bzero(&fh, sizeof (fh)); + bzero(&fh, sizeof(fh)); - bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof (fh.infh_laddr)); - bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof (fh.infh_faddr)); + bcopy(&inp->inp_dependladdr, &fh.infh_laddr, sizeof(fh.infh_laddr)); + bcopy(&inp->inp_dependfaddr, &fh.infh_faddr, sizeof(fh.infh_faddr)); fh.infh_lport = inp->inp_lport; fh.infh_fport = inp->inp_fport; @@ -2898,7 +2994,7 @@ inp_calc_flowhash(struct inpcb *inp) fh.infh_rand2 = RandomULong(); try_again: - flowhash = net_flowhash(&fh, sizeof (fh), inp_hash_seed); + flowhash = net_flowhash(&fh, sizeof(fh), inp_hash_seed); if (flowhash == 0) { /* try to get a non-zero flowhash */ inp_hash_seed = RandomULong(); @@ -2927,7 +3023,7 @@ try_again: inp->inp_flags2 |= INP2_IN_FCTREE; lck_mtx_unlock(&inp_fc_lck); - return (flowhash); + return flowhash; } void @@ -2937,8 +3033,9 @@ inp_flowadv(uint32_t flowhash) inp = inp_fc_getinp(flowhash, 0); - if (inp == NULL) + if (inp == NULL) { return; + } inp_fc_feedback(inp); } @@ -2948,8 +3045,8 @@ inp_flowadv(uint32_t flowhash) static inline int infc_cmp(const struct inpcb *inp1, const struct inpcb *inp2) { - return (memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash), - sizeof(inp1->inp_flowhash))); + return memcmp(&(inp1->inp_flowhash), &(inp2->inp_flowhash), + sizeof(inp1->inp_flowhash)); } static struct inpcb * @@ -2964,23 +3061,24 @@ inp_fc_getinp(u_int32_t flowhash, u_int32_t flags) if (inp == NULL) { /* inp is not present, return */ lck_mtx_unlock(&inp_fc_lck); - return (NULL); + return NULL; } if (flags & INPFC_REMOVE) { RB_REMOVE(inp_fc_tree, &inp_fc_tree, inp); lck_mtx_unlock(&inp_fc_lck); - bzero(&(inp->infc_link), sizeof (inp->infc_link)); + bzero(&(inp->infc_link), sizeof(inp->infc_link)); inp->inp_flags2 &= ~INP2_IN_FCTREE; - return (NULL); + return NULL; } - if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) + if (in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) { inp = NULL; + } lck_mtx_unlock(&inp_fc_lck); - return (inp); + return inp; } static void @@ -2997,8 +3095,9 @@ inp_fc_feedback(struct inpcb *inp) return; } - if (inp->inp_sndinprog_cnt > 0) + if (inp->inp_sndinprog_cnt > 0) { inp->inp_flags |= INP_FC_FEEDBACK; + } /* * Return if the connection is not in flow-controlled state. @@ -3011,8 +3110,9 @@ inp_fc_feedback(struct inpcb *inp) } inp_reset_fc_state(inp); - if (SOCK_TYPE(so) == SOCK_STREAM) + if (SOCK_TYPE(so) == SOCK_STREAM) { inp_fc_unthrottle_tcp(inp); + } socket_unlock(so, 1); } @@ -3032,8 +3132,9 @@ inp_reset_fc_state(struct inpcb *inp) } /* Give a write wakeup to unblock the socket */ - if (needwakeup) + if (needwakeup) { sowwakeup(so); + } } int @@ -3047,14 +3148,16 @@ inp_set_fc_state(struct inpcb *inp, int advcode) * flow controlled state and receiving feedback from * the interface */ - if (inp->inp_flags & INP_FC_FEEDBACK) - return (0); + if (inp->inp_flags & INP_FC_FEEDBACK) { + return 0; + } inp->inp_flags &= ~(INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED); if ((tmp_inp = inp_fc_getinp(inp->inp_flowhash, INPFC_SOLOCKED)) != NULL) { - if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) - return (0); + if (in_pcb_checkstate(tmp_inp, WNT_RELEASE, 1) == WNT_STOPUSING) { + return 0; + } VERIFY(tmp_inp == inp); switch (advcode) { case FADV_FLOW_CONTROLLED: @@ -3069,9 +3172,9 @@ inp_set_fc_state(struct inpcb *inp, int advcode) inp->inp_socket->so_flags |= SOF_SUSPENDED; break; } - return (1); + return 1; } - return (0); + return 0; } /* @@ -3084,24 +3187,28 @@ inp_flush(struct inpcb *inp, int optval) struct ifnet *rtifp, *oifp; /* Either all classes or one of the valid ones */ - if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) - return (EINVAL); + if (optval != SO_TC_ALL && !SO_VALID_TC(optval)) { + return EINVAL; + } /* We need a flow hash for identification */ - if (flowhash == 0) - return (0); + if (flowhash == 0) { + return 0; + } /* Grab the interfaces from the route and pcb */ rtifp = ((inp->inp_route.ro_rt != NULL) ? inp->inp_route.ro_rt->rt_ifp : NULL); oifp = inp->inp_last_outifp; - if (rtifp != NULL) + if (rtifp != NULL) { if_qflush_sc(rtifp, so_tc2msc(optval), flowhash, NULL, NULL, 0); - if (oifp != NULL && oifp != rtifp) + } + if (oifp != NULL && oifp != rtifp) { if_qflush_sc(oifp, so_tc2msc(optval), flowhash, NULL, NULL, 0); + } - return (0); + return 0; } /* @@ -3126,8 +3233,9 @@ inp_get_soprocinfo(struct inpcb *inp, struct so_procinfo *soprocinfo) struct socket *so = inp->inp_socket; soprocinfo->spi_pid = so->last_pid; - if (so->last_pid != 0) + if (so->last_pid != 0) { uuid_copy(soprocinfo->spi_uuid, so->last_uuid); + } /* * When not delegated, the effective pid is the same as the real pid */ @@ -3148,10 +3256,11 @@ inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash, struct inpcb *inp = NULL; int found = 0; - bzero(soprocinfo, sizeof (struct so_procinfo)); + bzero(soprocinfo, sizeof(struct so_procinfo)); - if (!flowhash) - return (-1); + if (!flowhash) { + return -1; + } lck_rw_lock_shared(pcbinfo->ipi_lock); LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { @@ -3165,7 +3274,7 @@ inp_findinpcb_procinfo(struct inpcbinfo *pcbinfo, uint32_t flowhash, } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } #if CONFIG_PROC_UUID_POLICY @@ -3263,9 +3372,9 @@ inp_update_necp_policy(struct inpcb *inp, struct sockaddr *override_local_addr, { necp_socket_find_policy_match(inp, override_local_addr, override_remote_addr, override_bound_interface); if (necp_socket_should_rescope(inp) && - inp->inp_lport == 0 && - inp->inp_laddr.s_addr == INADDR_ANY && - IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { + inp->inp_lport == 0 && + inp->inp_laddr.s_addr == INADDR_ANY && + IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { // If we should rescope, and the socket is not yet bound inp_bindif(inp, necp_socket_get_rescope_if_index(inp), NULL); } @@ -3282,15 +3391,17 @@ inp_update_policy(struct inpcb *inp) int err = 0; if (!net_io_policy_uuid || - so == NULL || inp->inp_state == INPCB_STATE_DEAD) - return (0); + so == NULL || inp->inp_state == INPCB_STATE_DEAD) { + return 0; + } /* * Kernel-created sockets that aren't delegating other sockets * are currently exempted from UUID policy checks. */ - if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) - return (0); + if (so->last_pid == 0 && !(so->so_flags & SOF_DELEGATED)) { + return 0; + } ogencnt = so->so_policy_gencnt; err = proc_uuid_policy_lookup(((so->so_flags & SOF_DELEGATED) ? @@ -3300,8 +3411,9 @@ inp_update_policy(struct inpcb *inp) * Discard cached generation count if the entry is gone (ENOENT), * so that we go thru the checks below. */ - if (err == ENOENT && ogencnt != 0) + if (err == ENOENT && ogencnt != 0) { so->so_policy_gencnt = 0; + } /* * If the generation count has changed, inspect the policy flags @@ -3326,10 +3438,10 @@ inp_update_policy(struct inpcb *inp) #endif /* NECP */ } - return ((err == ENOENT) ? 0 : err); + return (err == ENOENT) ? 0 : err; #else /* !CONFIG_PROC_UUID_POLICY */ #pragma unused(inp) - return (0); + return 0; #endif /* !CONFIG_PROC_UUID_POLICY */ } @@ -3351,34 +3463,43 @@ _inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp) /* * Inbound restrictions. */ - if (!sorestrictrecv) - return (FALSE); + if (!sorestrictrecv) { + return FALSE; + } - if (ifp == NULL) - return (FALSE); + if (ifp == NULL) { + return FALSE; + } - if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) - return (TRUE); + if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) { + return TRUE; + } - if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) - return (TRUE); + if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) { + return TRUE; + } - if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) - return (TRUE); + if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) { + return TRUE; + } - if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) - return (FALSE); + if (!(ifp->if_eflags & IFEF_RESTRICTED_RECV)) { + return FALSE; + } - if (inp->inp_flags & INP_RECV_ANYIF) - return (FALSE); + if (inp->inp_flags & INP_RECV_ANYIF) { + return FALSE; + } - if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) - return (FALSE); + if ((inp->inp_flags & INP_BOUND_IF) && inp->inp_boundifp == ifp) { + return FALSE; + } - if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) - return (TRUE); + if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) { + return TRUE; + } - return (TRUE); + return TRUE; } boolean_t @@ -3392,7 +3513,7 @@ inp_restricted_recv(struct inpcb *inp, struct ifnet *ifp) current_proc()->p_pid, proc_best_name(current_proc()), ifp->if_xname); } - return (ret); + return ret; } /* @@ -3408,25 +3529,31 @@ _inp_restricted_send(struct inpcb *inp, struct ifnet *ifp) /* * Outbound restrictions. */ - if (!sorestrictsend) - return (FALSE); + if (!sorestrictsend) { + return FALSE; + } - if (ifp == NULL) - return (FALSE); + if (ifp == NULL) { + return FALSE; + } - if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) - return (TRUE); + if (IFNET_IS_CELLULAR(ifp) && INP_NO_CELLULAR(inp)) { + return TRUE; + } - if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) - return (TRUE); + if (IFNET_IS_EXPENSIVE(ifp) && INP_NO_EXPENSIVE(inp)) { + return TRUE; + } - if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) - return (TRUE); + if (IFNET_IS_AWDL_RESTRICTED(ifp) && !INP_AWDL_UNRESTRICTED(inp)) { + return TRUE; + } - if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) - return (TRUE); + if (IFNET_IS_INTCOPROC(ifp) && !INP_INTCOPROC_ALLOWED(inp)) { + return TRUE; + } - return (FALSE); + return FALSE; } boolean_t @@ -3440,7 +3567,7 @@ inp_restricted_send(struct inpcb *inp, struct ifnet *ifp) current_proc()->p_pid, proc_best_name(current_proc()), ifp->if_xname); } - return (ret); + return ret; } inline void @@ -3459,12 +3586,14 @@ inp_count_sndbytes(struct inpcb *inp, u_int32_t th_ack) * There can be data outstanding before the connection * becomes established -- TFO case */ - if (so->so_snd.sb_cc > 0) + if (so->so_snd.sb_cc > 0) { inp_incr_sndbytes_total(so, so->so_snd.sb_cc); + } unsent = inp_get_sndbytes_allunsent(so, th_ack); - if (unsent > 0) + if (unsent > 0) { inp_incr_sndbytes_unsent(so, unsent); + } } } @@ -3510,14 +3639,16 @@ inp_decr_sndbytes_unsent(struct socket *so, int32_t len) struct inpcb *inp = (struct inpcb *)so->so_pcb; struct ifnet *ifp = inp->inp_last_outifp; - if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) + if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) { return; + } if (ifp != NULL) { - if (ifp->if_sndbyte_unsent >= len) + if (ifp->if_sndbyte_unsent >= len) { OSAddAtomic64(-len, &ifp->if_sndbyte_unsent); - else + } else { ifp->if_sndbyte_unsent = 0; + } } } @@ -3526,8 +3657,9 @@ inp_decr_sndbytes_allunsent(struct socket *so, u_int32_t th_ack) { int32_t len; - if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) + if (so == NULL || !(so->so_snd.sb_flags & SB_SNDBYTE_CNT)) { return; + } len = inp_get_sndbytes_allunsent(so, th_ack); inp_decr_sndbytes_unsent(so, len); @@ -3543,5 +3675,5 @@ inp_set_activity_bitmap(struct inpcb *inp) inline void inp_get_activity_bitmap(struct inpcb *inp, activity_bitmap_t *ab) { - bcopy(&inp->inp_nw_activity, ab, sizeof (*ab)); + bcopy(&inp->inp_nw_activity, ab, sizeof(*ab)); } diff --git a/bsd/netinet/in_pcb.h b/bsd/netinet/in_pcb.h index e1a7c9941..90e0e0769 100644 --- a/bsd/netinet/in_pcb.h +++ b/bsd/netinet/in_pcb.h @@ -68,7 +68,7 @@ */ #ifndef _NETINET_IN_PCB_H_ -#define _NETINET_IN_PCB_H_ +#define _NETINET_IN_PCB_H_ #include #include @@ -102,7 +102,7 @@ LIST_HEAD(inpcbhead, inpcb); LIST_HEAD(inpcbporthead, inpcbport); #endif /* BSD_KERNEL_PRIVATE */ -typedef u_quad_t inp_gen_t; +typedef u_quad_t inp_gen_t; /* * PCB with AF_INET6 null bind'ed laddr can receive AF_INET input packet. @@ -110,8 +110,8 @@ typedef u_quad_t inp_gen_t; * the following structure. */ struct in_addr_4in6 { - u_int32_t ia46_pad32[3]; - struct in_addr ia46_addr4; + u_int32_t ia46_pad32[3]; + struct in_addr ia46_addr4; }; #ifdef BSD_KERNEL_PRIVATE @@ -121,17 +121,17 @@ struct in_addr_4in6 { * that position not contain any information which is required to be * stable. */ -struct icmp6_filter; +struct icmp6_filter; #if CONFIG_MACF_NET -struct label; +struct label; #endif struct ifnet; struct inp_stat { - u_int64_t rxpackets; - u_int64_t rxbytes; - u_int64_t txpackets; - u_int64_t txbytes; + u_int64_t rxpackets; + u_int64_t rxbytes; + u_int64_t txpackets; + u_int64_t txbytes; }; /* @@ -140,34 +140,34 @@ struct inp_stat { * hung off of inp_ppcb most of the time. */ struct inpcb { - decl_lck_mtx_data(, inpcb_mtx); /* inpcb per-socket mutex */ - LIST_ENTRY(inpcb) inp_hash; /* hash list */ - LIST_ENTRY(inpcb) inp_list; /* list for all PCBs of this proto */ - void *inp_ppcb; /* pointer to per-protocol pcb */ - struct inpcbinfo *inp_pcbinfo; /* PCB list info */ - struct socket *inp_socket; /* back pointer to socket */ - LIST_ENTRY(inpcb) inp_portlist; /* list for this PCB's local port */ - RB_ENTRY(inpcb) infc_link; /* link for flowhash RB tree */ - struct inpcbport *inp_phd; /* head of this list */ - inp_gen_t inp_gencnt; /* generation count of this instance */ - int inp_hash_element; /* array index of pcb's hash list */ - int inp_wantcnt; /* wanted count; atomically updated */ - int inp_state; /* state (INUSE/CACHED/DEAD) */ - u_short inp_fport; /* foreign port */ - u_short inp_lport; /* local port */ - u_int32_t inp_flags; /* generic IP/datagram flags */ - u_int32_t inp_flags2; /* generic IP/datagram flags #2 */ - u_int32_t inp_flow; /* IPv6 flow information */ - - u_char inp_sndinprog_cnt; /* outstanding send operations */ - u_char inp_vflag; /* INP_IPV4 or INP_IPV6 */ - - u_char inp_ip_ttl; /* time to live proto */ - u_char inp_ip_p; /* protocol proto */ - - struct ifnet *inp_boundifp; /* interface for INP_BOUND_IF */ - struct ifnet *inp_last_outifp; /* last known outgoing interface */ - u_int32_t inp_flowhash; /* flow hash */ + decl_lck_mtx_data(, inpcb_mtx); /* inpcb per-socket mutex */ + LIST_ENTRY(inpcb) inp_hash; /* hash list */ + LIST_ENTRY(inpcb) inp_list; /* list for all PCBs of this proto */ + void *inp_ppcb; /* pointer to per-protocol pcb */ + struct inpcbinfo *inp_pcbinfo; /* PCB list info */ + struct socket *inp_socket; /* back pointer to socket */ + LIST_ENTRY(inpcb) inp_portlist; /* list for this PCB's local port */ + RB_ENTRY(inpcb) infc_link; /* link for flowhash RB tree */ + struct inpcbport *inp_phd; /* head of this list */ + inp_gen_t inp_gencnt; /* generation count of this instance */ + int inp_hash_element; /* array index of pcb's hash list */ + int inp_wantcnt; /* wanted count; atomically updated */ + int inp_state; /* state (INUSE/CACHED/DEAD) */ + u_short inp_fport; /* foreign port */ + u_short inp_lport; /* local port */ + u_int32_t inp_flags; /* generic IP/datagram flags */ + u_int32_t inp_flags2; /* generic IP/datagram flags #2 */ + u_int32_t inp_flow; /* IPv6 flow information */ + + u_char inp_sndinprog_cnt; /* outstanding send operations */ + u_char inp_vflag; /* INP_IPV4 or INP_IPV6 */ + + u_char inp_ip_ttl; /* time to live proto */ + u_char inp_ip_p; /* protocol proto */ + + struct ifnet *inp_boundifp; /* interface for INP_BOUND_IF */ + struct ifnet *inp_last_outifp; /* last known outgoing interface */ + u_int32_t inp_flowhash; /* flow hash */ /* Protocol-dependent part */ union { @@ -197,22 +197,22 @@ struct inpcb { /* IP options */ struct mbuf *inp6_options; /* IP6 options for outgoing packets */ - struct ip6_pktopts *inp6_outputopts; + struct ip6_pktopts *inp6_outputopts; /* IP multicast options */ - struct ip6_moptions *inp6_moptions; + struct ip6_moptions *inp6_moptions; /* ICMPv6 code type filter */ - struct icmp6_filter *inp6_icmp6filt; + struct icmp6_filter *inp6_icmp6filt; /* IPV6_CHECKSUM setsockopt */ - int inp6_cksum; - short inp6_hops; + int inp6_cksum; + short inp6_hops; } inp_depend6; - caddr_t inp_saved_ppcb; /* place to save pointer while cached */ + caddr_t inp_saved_ppcb; /* place to save pointer while cached */ #if CONFIG_MACF_NET - struct label *inp_label; /* MAC label */ + struct label *inp_label; /* MAC label */ #endif #if IPSEC - struct inpcbpolicy *inp_sp; /* for IPSec */ + struct inpcbpolicy *inp_sp; /* for IPSec */ #endif /* IPSEC */ #if NECP struct { @@ -223,32 +223,32 @@ struct inpcb { uuid_t necp_client_uuid; necp_client_flow_cb necp_cb; #endif - u_char *inp_keepalive_data; /* for keepalive offload */ + u_char *inp_keepalive_data; /* for keepalive offload */ u_int8_t inp_keepalive_datalen; /* keepalive data length */ - u_int8_t inp_keepalive_type; /* type of application */ + u_int8_t inp_keepalive_type; /* type of application */ u_int16_t inp_keepalive_interval; /* keepalive interval */ uint32_t inp_nstat_refcnt __attribute__((aligned(4))); - struct inp_stat *inp_stat; - struct inp_stat *inp_cstat; /* cellular data */ - struct inp_stat *inp_wstat; /* Wi-Fi data */ - struct inp_stat *inp_Wstat; /* Wired data */ - u_int8_t inp_stat_store[sizeof (struct inp_stat) + sizeof (u_int64_t)]; - u_int8_t inp_cstat_store[sizeof (struct inp_stat) + sizeof (u_int64_t)]; - u_int8_t inp_wstat_store[sizeof (struct inp_stat) + sizeof (u_int64_t)]; - u_int8_t inp_Wstat_store[sizeof (struct inp_stat) + sizeof (u_int64_t)]; + struct inp_stat *inp_stat; + struct inp_stat *inp_cstat; /* cellular data */ + struct inp_stat *inp_wstat; /* Wi-Fi data */ + struct inp_stat *inp_Wstat; /* Wired data */ + u_int8_t inp_stat_store[sizeof(struct inp_stat) + sizeof(u_int64_t)]; + u_int8_t inp_cstat_store[sizeof(struct inp_stat) + sizeof(u_int64_t)]; + u_int8_t inp_wstat_store[sizeof(struct inp_stat) + sizeof(u_int64_t)]; + u_int8_t inp_Wstat_store[sizeof(struct inp_stat) + sizeof(u_int64_t)]; activity_bitmap_t inp_nw_activity; u_int64_t inp_start_timestamp; }; -#define INP_ADD_STAT(_inp, _cnt_cellular, _cnt_wifi, _cnt_wired, _a, _n)\ -do { \ - locked_add_64(&((_inp)->inp_stat->_a), (_n)); \ - if (_cnt_cellular) \ - locked_add_64(&((_inp)->inp_cstat->_a), (_n)); \ - if (_cnt_wifi) \ - locked_add_64(&((_inp)->inp_wstat->_a), (_n)); \ - if (_cnt_wired) \ - locked_add_64(&((_inp)->inp_Wstat->_a), (_n)); \ +#define INP_ADD_STAT(_inp, _cnt_cellular, _cnt_wifi, _cnt_wired, _a, _n) \ +do { \ + locked_add_64(&((_inp)->inp_stat->_a), (_n)); \ + if (_cnt_cellular) \ + locked_add_64(&((_inp)->inp_cstat->_a), (_n)); \ + if (_cnt_wifi) \ + locked_add_64(&((_inp)->inp_wstat->_a), (_n)); \ + if (_cnt_wired) \ + locked_add_64(&((_inp)->inp_Wstat->_a), (_n)); \ } while (0); #endif /* BSD_KERNEL_PRIVATE */ @@ -260,14 +260,14 @@ do { \ #if defined(__LP64__) struct _inpcb_list_entry { - u_int32_t le_next; - u_int32_t le_prev; + u_int32_t le_next; + u_int32_t le_prev; }; -#define _INPCB_PTR(x) u_int32_t -#define _INPCB_LIST_ENTRY(x) struct _inpcb_list_entry +#define _INPCB_PTR(x) u_int32_t +#define _INPCB_LIST_ENTRY(x) struct _inpcb_list_entry #else /* !__LP64__ */ -#define _INPCB_PTR(x) x -#define _INPCB_LIST_ENTRY(x) LIST_ENTRY(x) +#define _INPCB_PTR(x) x +#define _INPCB_LIST_ENTRY(x) LIST_ENTRY(x) #endif /* !__LP64__ */ #ifdef XNU_KERNEL_PRIVATE @@ -291,27 +291,27 @@ struct inpcbpolicy; struct inpcb { #endif /* KERNEL_PRIVATE */ - _INPCB_LIST_ENTRY(inpcb) inp_hash; /* hash list */ - struct in_addr reserved1; /* reserved */ - struct in_addr reserved2; /* reserved */ - u_short inp_fport; /* foreign port */ - u_short inp_lport; /* local port */ - _INPCB_LIST_ENTRY(inpcb) inp_list; /* list for all peer PCBs */ - _INPCB_PTR(caddr_t) inp_ppcb; /* per-protocol pcb */ - _INPCB_PTR(struct inpcbinfo *) inp_pcbinfo; /* PCB list info */ - _INPCB_PTR(void *) inp_socket; /* back pointer to socket */ - u_char nat_owner; /* Used to NAT TCP/UDP traffic */ - u_int32_t nat_cookie; /* Cookie stored and returned to NAT */ - _INPCB_LIST_ENTRY(inpcb) inp_portlist; /* this PCB's local port list */ + _INPCB_LIST_ENTRY(inpcb) inp_hash; /* hash list */ + struct in_addr reserved1; /* reserved */ + struct in_addr reserved2; /* reserved */ + u_short inp_fport; /* foreign port */ + u_short inp_lport; /* local port */ + _INPCB_LIST_ENTRY(inpcb) inp_list; /* list for all peer PCBs */ + _INPCB_PTR(caddr_t) inp_ppcb; /* per-protocol pcb */ + _INPCB_PTR(struct inpcbinfo *) inp_pcbinfo; /* PCB list info */ + _INPCB_PTR(void *) inp_socket; /* back pointer to socket */ + u_char nat_owner; /* Used to NAT TCP/UDP traffic */ + u_int32_t nat_cookie; /* Cookie stored and returned to NAT */ + _INPCB_LIST_ENTRY(inpcb) inp_portlist; /* this PCB's local port list */ _INPCB_PTR(struct inpcbport *) inp_phd; /* head of this list */ - inp_gen_t inp_gencnt; /* generation count of this instance */ - int inp_flags; /* generic IP/datagram flags */ + inp_gen_t inp_gencnt; /* generation count of this instance */ + int inp_flags; /* generic IP/datagram flags */ u_int32_t inp_flow; u_char inp_vflag; - u_char inp_ip_ttl; /* time to live proto */ - u_char inp_ip_p; /* protocol proto */ + u_char inp_ip_ttl; /* time to live proto */ + u_char inp_ip_p; /* protocol proto */ /* protocol dependent part */ union { /* foreign host table entry */ @@ -350,118 +350,118 @@ struct inpcb { /* ICMPv6 code type filter */ _INPCB_PTR(struct icmp6_filter *) inp6_icmp6filt; /* IPV6_CHECKSUM setsockopt */ - int inp6_cksum; - u_short inp6_ifindex; - short inp6_hops; + int inp6_cksum; + u_short inp6_ifindex; + short inp6_hops; } inp_depend6; - int hash_element; /* Array index of pcb's hash list */ + int hash_element; /* Array index of pcb's hash list */ _INPCB_PTR(caddr_t) inp_saved_ppcb; /* pointer while cached */ _INPCB_PTR(struct inpcbpolicy *) inp_sp; - u_int32_t reserved[3]; /* reserved */ + u_int32_t reserved[3]; /* reserved */ }; -struct xinpcb { - u_int32_t xi_len; /* length of this structure */ +struct xinpcb { + u_int32_t xi_len; /* length of this structure */ #ifdef XNU_KERNEL_PRIVATE - struct inpcb_compat xi_inp; + struct inpcb_compat xi_inp; #else - struct inpcb xi_inp; + struct inpcb xi_inp; #endif - struct xsocket xi_socket; - u_quad_t xi_alignment_hack; + struct xsocket xi_socket; + u_quad_t xi_alignment_hack; }; #if !CONFIG_EMBEDDED struct inpcb64_list_entry { - u_int64_t le_next; - u_int64_t le_prev; + u_int64_t le_next; + u_int64_t le_prev; }; -struct xinpcb64 { - u_int64_t xi_len; /* length of this structure */ - u_int64_t xi_inpp; - u_short inp_fport; /* foreign port */ - u_short inp_lport; /* local port */ +struct xinpcb64 { + u_int64_t xi_len; /* length of this structure */ + u_int64_t xi_inpp; + u_short inp_fport; /* foreign port */ + u_short inp_lport; /* local port */ struct inpcb64_list_entry inp_list; /* list for all PCBs */ - u_int64_t inp_ppcb; /* ptr to per-protocol PCB */ - u_int64_t inp_pcbinfo; /* PCB list info */ - struct inpcb64_list_entry inp_portlist; /* this PCB's local port list */ - u_int64_t inp_phd; /* head of this list */ - inp_gen_t inp_gencnt; /* current generation count */ - int inp_flags; /* generic IP/datagram flags */ - u_int32_t inp_flow; - u_char inp_vflag; - u_char inp_ip_ttl; /* time to live */ - u_char inp_ip_p; /* protocol */ - union { /* foreign host table entry */ - struct in_addr_4in6 inp46_foreign; - struct in6_addr inp6_foreign; + u_int64_t inp_ppcb; /* ptr to per-protocol PCB */ + u_int64_t inp_pcbinfo; /* PCB list info */ + struct inpcb64_list_entry inp_portlist; /* this PCB's local port list */ + u_int64_t inp_phd; /* head of this list */ + inp_gen_t inp_gencnt; /* current generation count */ + int inp_flags; /* generic IP/datagram flags */ + u_int32_t inp_flow; + u_char inp_vflag; + u_char inp_ip_ttl; /* time to live */ + u_char inp_ip_p; /* protocol */ + union { /* foreign host table entry */ + struct in_addr_4in6 inp46_foreign; + struct in6_addr inp6_foreign; } inp_dependfaddr; - union { /* local host table entry */ - struct in_addr_4in6 inp46_local; - struct in6_addr inp6_local; + union { /* local host table entry */ + struct in_addr_4in6 inp46_local; + struct in6_addr inp6_local; } inp_dependladdr; struct { - u_char inp4_ip_tos; /* type of service */ + u_char inp4_ip_tos; /* type of service */ } inp_depend4; struct { u_int8_t inp6_hlim; - int inp6_cksum; - u_short inp6_ifindex; - short inp6_hops; + int inp6_cksum; + u_short inp6_ifindex; + short inp6_hops; } inp_depend6; struct xsocket64 xi_socket; - u_quad_t xi_alignment_hack; + u_quad_t xi_alignment_hack; }; #endif /* !CONFIG_EMBEDDED */ #ifdef PRIVATE struct xinpcb_list_entry { - u_int64_t le_next; - u_int64_t le_prev; + u_int64_t le_next; + u_int64_t le_prev; }; -struct xinpcb_n { - u_int32_t xi_len; /* length of this structure */ - u_int32_t xi_kind; /* XSO_INPCB */ - u_int64_t xi_inpp; - u_short inp_fport; /* foreign port */ - u_short inp_lport; /* local port */ - u_int64_t inp_ppcb; /* pointer to per-protocol pcb */ - inp_gen_t inp_gencnt; /* generation count of this instance */ - int inp_flags; /* generic IP/datagram flags */ - u_int32_t inp_flow; - u_char inp_vflag; - u_char inp_ip_ttl; /* time to live */ - u_char inp_ip_p; /* protocol */ - union { /* foreign host table entry */ - struct in_addr_4in6 inp46_foreign; - struct in6_addr inp6_foreign; +struct xinpcb_n { + u_int32_t xi_len; /* length of this structure */ + u_int32_t xi_kind; /* XSO_INPCB */ + u_int64_t xi_inpp; + u_short inp_fport; /* foreign port */ + u_short inp_lport; /* local port */ + u_int64_t inp_ppcb; /* pointer to per-protocol pcb */ + inp_gen_t inp_gencnt; /* generation count of this instance */ + int inp_flags; /* generic IP/datagram flags */ + u_int32_t inp_flow; + u_char inp_vflag; + u_char inp_ip_ttl; /* time to live */ + u_char inp_ip_p; /* protocol */ + union { /* foreign host table entry */ + struct in_addr_4in6 inp46_foreign; + struct in6_addr inp6_foreign; } inp_dependfaddr; - union { /* local host table entry */ - struct in_addr_4in6 inp46_local; - struct in6_addr inp6_local; + union { /* local host table entry */ + struct in_addr_4in6 inp46_local; + struct in6_addr inp6_local; } inp_dependladdr; struct { - u_char inp4_ip_tos; /* type of service */ + u_char inp4_ip_tos; /* type of service */ } inp_depend4; struct { u_int8_t inp6_hlim; - int inp6_cksum; - u_short inp6_ifindex; - short inp6_hops; + int inp6_cksum; + u_short inp6_ifindex; + short inp6_hops; } inp_depend6; - u_int32_t inp_flowhash; - u_int32_t inp_flags2; + u_int32_t inp_flowhash; + u_int32_t inp_flags2; }; #endif /* PRIVATE */ -struct xinpgen { - u_int32_t xig_len; /* length of this structure */ - u_int xig_count; /* number of PCBs at this time */ - inp_gen_t xig_gen; /* generation count at this time */ - so_gen_t xig_sogen; /* current socket generation count */ +struct xinpgen { + u_int32_t xig_len; /* length of this structure */ + u_int xig_count; /* number of PCBs at this time */ + inp_gen_t xig_gen; /* generation count at this time */ + so_gen_t xig_sogen; /* current socket generation count */ }; #pragma pack() @@ -469,49 +469,49 @@ struct xinpgen { /* * These defines are for use with the inpcb. */ -#define INP_IPV4 0x1 -#define INP_IPV6 0x2 -#define inp_faddr inp_dependfaddr.inp46_foreign.ia46_addr4 -#define inp_laddr inp_dependladdr.inp46_local.ia46_addr4 -#define in6p_faddr inp_dependfaddr.inp6_foreign -#define in6p_laddr inp_dependladdr.inp6_local +#define INP_IPV4 0x1 +#define INP_IPV6 0x2 +#define inp_faddr inp_dependfaddr.inp46_foreign.ia46_addr4 +#define inp_laddr inp_dependladdr.inp46_local.ia46_addr4 +#define in6p_faddr inp_dependfaddr.inp6_foreign +#define in6p_laddr inp_dependladdr.inp6_local #ifdef BSD_KERNEL_PRIVATE -#define inp_route inp_dependroute.inp4_route -#define inp_ip_tos inp_depend4.inp4_ip_tos -#define inp_options inp_depend4.inp4_options -#define inp_moptions inp_depend4.inp4_moptions -#define in6p_route inp_dependroute.inp6_route -#define in6p_ip6_hlim inp_depend6.inp6_hlim -#define in6p_hops inp_depend6.inp6_hops /* default hop limit */ -#define in6p_ip6_nxt inp_ip_p -#define in6p_vflag inp_vflag -#define in6p_options inp_depend6.inp6_options -#define in6p_outputopts inp_depend6.inp6_outputopts -#define in6p_moptions inp_depend6.inp6_moptions -#define in6p_icmp6filt inp_depend6.inp6_icmp6filt -#define in6p_cksum inp_depend6.inp6_cksum -#define in6p_ifindex inp_depend6.inp6_ifindex -#define in6p_flags inp_flags -#define in6p_flags2 inp_flags2 -#define in6p_socket inp_socket -#define in6p_lport inp_lport -#define in6p_fport inp_fport -#define in6p_ppcb inp_ppcb -#define in6p_state inp_state -#define in6p_wantcnt inp_wantcnt -#define in6p_last_outifp inp_last_outifp -#define in6pcb inpcb +#define inp_route inp_dependroute.inp4_route +#define inp_ip_tos inp_depend4.inp4_ip_tos +#define inp_options inp_depend4.inp4_options +#define inp_moptions inp_depend4.inp4_moptions +#define in6p_route inp_dependroute.inp6_route +#define in6p_ip6_hlim inp_depend6.inp6_hlim +#define in6p_hops inp_depend6.inp6_hops /* default hop limit */ +#define in6p_ip6_nxt inp_ip_p +#define in6p_vflag inp_vflag +#define in6p_options inp_depend6.inp6_options +#define in6p_outputopts inp_depend6.inp6_outputopts +#define in6p_moptions inp_depend6.inp6_moptions +#define in6p_icmp6filt inp_depend6.inp6_icmp6filt +#define in6p_cksum inp_depend6.inp6_cksum +#define in6p_ifindex inp_depend6.inp6_ifindex +#define in6p_flags inp_flags +#define in6p_flags2 inp_flags2 +#define in6p_socket inp_socket +#define in6p_lport inp_lport +#define in6p_fport inp_fport +#define in6p_ppcb inp_ppcb +#define in6p_state inp_state +#define in6p_wantcnt inp_wantcnt +#define in6p_last_outifp inp_last_outifp +#define in6pcb inpcb #if IPSEC -#define in6p_sp inp_sp +#define in6p_sp inp_sp #endif /* IPSEC */ #define INP_INC_IFNET_STAT(_inp_, _stat_) { \ if ((_inp_)->inp_last_outifp != NULL) { \ - if ((_inp_)->inp_vflag & INP_IPV6) { \ - (_inp_)->inp_last_outifp->if_ipv6_stat->_stat_++;\ - } else { \ - (_inp_)->inp_last_outifp->if_ipv4_stat->_stat_++;\ - }\ + if ((_inp_)->inp_vflag & INP_IPV6) { \ + (_inp_)->inp_last_outifp->if_ipv6_stat->_stat_++;\ + } else { \ + (_inp_)->inp_last_outifp->if_ipv4_stat->_stat_++;\ + }\ }\ } @@ -522,7 +522,7 @@ struct inpcbport { }; struct intimercount { - u_int32_t intimer_lazy; /* lazy requests for timer scheduling */ + u_int32_t intimer_lazy; /* lazy requests for timer scheduling */ u_int32_t intimer_fast; /* fast requests, can be coalesced */ u_int32_t intimer_nodelay; /* fast requests, never coalesced */ }; @@ -542,91 +542,91 @@ struct inpcbinfo { * timer callbacks, protected by inpcb_lock. Callout request * counts are atomically updated. */ - TAILQ_ENTRY(inpcbinfo) ipi_entry; - inpcb_timer_func_t ipi_gc; - inpcb_timer_func_t ipi_timer; - struct intimercount ipi_gc_req; - struct intimercount ipi_timer_req; + TAILQ_ENTRY(inpcbinfo) ipi_entry; + inpcb_timer_func_t ipi_gc; + inpcb_timer_func_t ipi_timer; + struct intimercount ipi_gc_req; + struct intimercount ipi_timer_req; /* * Per-protocol lock protecting pcb list, pcb count, etc. */ - lck_rw_t *ipi_lock; + lck_rw_t *ipi_lock; /* * List and count of pcbs on the protocol. */ - struct inpcbhead *ipi_listhead; - uint32_t ipi_count; + struct inpcbhead *ipi_listhead; + uint32_t ipi_count; /* * Count of pcbs marked with INP2_TIMEWAIT flag. */ - uint32_t ipi_twcount; + uint32_t ipi_twcount; /* * Generation count -- incremented each time a connection is * allocated or freed. */ - uint64_t ipi_gencnt; + uint64_t ipi_gencnt; /* * Fields associated with port lookup and allocation. */ - uint16_t ipi_lastport; - uint16_t ipi_lastlow; - uint16_t ipi_lasthi; + uint16_t ipi_lastport; + uint16_t ipi_lastlow; + uint16_t ipi_lasthi; /* * Zone from which inpcbs are allocated for this protocol. */ - struct zone *ipi_zone; + struct zone *ipi_zone; /* * Per-protocol hash of pcbs, hashed by local and foreign * addresses and port numbers. */ - struct inpcbhead *ipi_hashbase; - u_long ipi_hashmask; + struct inpcbhead *ipi_hashbase; + u_long ipi_hashmask; /* * Per-protocol hash of pcbs, hashed by only local port number. */ - struct inpcbporthead *ipi_porthashbase; - u_long ipi_porthashmask; + struct inpcbporthead *ipi_porthashbase; + u_long ipi_porthashmask; /* * Misc. */ - lck_attr_t *ipi_lock_attr; - lck_grp_t *ipi_lock_grp; - lck_grp_attr_t *ipi_lock_grp_attr; + lck_attr_t *ipi_lock_attr; + lck_grp_t *ipi_lock_grp; + lck_grp_attr_t *ipi_lock_grp_attr; -#define INPCBINFO_UPDATE_MSS 0x1 -#define INPCBINFO_HANDLE_LQM_ABORT 0x2 - u_int32_t ipi_flags; +#define INPCBINFO_UPDATE_MSS 0x1 +#define INPCBINFO_HANDLE_LQM_ABORT 0x2 + u_int32_t ipi_flags; }; -#define INP_PCBHASH(faddr, lport, fport, mask) \ +#define INP_PCBHASH(faddr, lport, fport, mask) \ (((faddr) ^ ((faddr) >> 16) ^ ntohs((lport) ^ (fport))) & (mask)) -#define INP_PCBPORTHASH(lport, mask) \ +#define INP_PCBPORTHASH(lport, mask) \ (ntohs((lport)) & (mask)) -#define INP_IS_FLOW_CONTROLLED(_inp_) \ +#define INP_IS_FLOW_CONTROLLED(_inp_) \ ((_inp_)->inp_flags & INP_FLOW_CONTROLLED) -#define INP_IS_FLOW_SUSPENDED(_inp_) \ - (((_inp_)->inp_flags & INP_FLOW_SUSPENDED) || \ +#define INP_IS_FLOW_SUSPENDED(_inp_) \ + (((_inp_)->inp_flags & INP_FLOW_SUSPENDED) || \ ((_inp_)->inp_socket->so_flags & SOF_SUSPENDED)) -#define INP_WAIT_FOR_IF_FEEDBACK(_inp_) \ +#define INP_WAIT_FOR_IF_FEEDBACK(_inp_) \ (((_inp_)->inp_flags & (INP_FLOW_CONTROLLED | INP_FLOW_SUSPENDED)) != 0) #define INP_NO_CELLULAR(_inp) \ ((_inp)->inp_flags & INP_NO_IFT_CELLULAR) #define INP_NO_EXPENSIVE(_inp) \ ((_inp)->inp_flags2 & INP2_NO_IFF_EXPENSIVE) -#define INP_AWDL_UNRESTRICTED(_inp) \ +#define INP_AWDL_UNRESTRICTED(_inp) \ ((_inp)->inp_flags2 & INP2_AWDL_UNRESTRICTED) -#define INP_INTCOPROC_ALLOWED(_inp) \ +#define INP_INTCOPROC_ALLOWED(_inp) \ ((_inp)->inp_flags2 & INP2_INTCOPROC_ALLOWED) #endif /* BSD_KERNEL_PRIVATE */ @@ -639,59 +639,59 @@ struct inpcbinfo { * time, whether or not the OS supports certain features. */ #ifdef BSD_KERNEL_PRIVATE -#define INP_RECVOPTS 0x00000001 /* receive incoming IP options */ -#define INP_RECVRETOPTS 0x00000002 /* receive IP options for reply */ -#define INP_RECVDSTADDR 0x00000004 /* receive IP dst address */ -#define INP_HDRINCL 0x00000008 /* user supplies entire IP header */ -#define INP_HIGHPORT 0x00000010 /* user wants "high" port binding */ -#define INP_LOWPORT 0x00000020 /* user wants "low" port binding */ +#define INP_RECVOPTS 0x00000001 /* receive incoming IP options */ +#define INP_RECVRETOPTS 0x00000002 /* receive IP options for reply */ +#define INP_RECVDSTADDR 0x00000004 /* receive IP dst address */ +#define INP_HDRINCL 0x00000008 /* user supplies entire IP header */ +#define INP_HIGHPORT 0x00000010 /* user wants "high" port binding */ +#define INP_LOWPORT 0x00000020 /* user wants "low" port binding */ #endif /* BSD_KERNEL_PRIVATE */ -#define INP_ANONPORT 0x00000040 /* port chosen for user */ +#define INP_ANONPORT 0x00000040 /* port chosen for user */ #ifdef BSD_KERNEL_PRIVATE -#define INP_RECVIF 0x00000080 /* receive incoming interface */ -#define INP_MTUDISC 0x00000100 /* unused */ -#define INP_STRIPHDR 0x00000200 /* strip hdrs in raw_ip (for OT) */ -#define INP_RECV_ANYIF 0x00000400 /* don't restrict inbound iface */ -#define INP_INADDR_ANY 0x00000800 /* local address wasn't specified */ -#define INP_IN6ADDR_ANY INP_INADDR_ANY -#define INP_RECVTTL 0x00001000 /* receive incoming IP TTL */ -#define INP_UDP_NOCKSUM 0x00002000 /* turn off outbound UDP checksum */ -#define INP_BOUND_IF 0x00004000 /* bind socket to an interface */ +#define INP_RECVIF 0x00000080 /* receive incoming interface */ +#define INP_MTUDISC 0x00000100 /* unused */ +#define INP_STRIPHDR 0x00000200 /* strip hdrs in raw_ip (for OT) */ +#define INP_RECV_ANYIF 0x00000400 /* don't restrict inbound iface */ +#define INP_INADDR_ANY 0x00000800 /* local address wasn't specified */ +#define INP_IN6ADDR_ANY INP_INADDR_ANY +#define INP_RECVTTL 0x00001000 /* receive incoming IP TTL */ +#define INP_UDP_NOCKSUM 0x00002000 /* turn off outbound UDP checksum */ +#define INP_BOUND_IF 0x00004000 /* bind socket to an interface */ #endif /* BSD_KERNEL_PRIVATE */ -#define IN6P_IPV6_V6ONLY 0x00008000 /* restrict AF_INET6 socket for v6 */ +#define IN6P_IPV6_V6ONLY 0x00008000 /* restrict AF_INET6 socket for v6 */ #ifdef BSD_KERNEL_PRIVATE -#define IN6P_PKTINFO 0x00010000 /* receive IP6 dst and I/F */ -#define IN6P_HOPLIMIT 0x00020000 /* receive hoplimit */ -#define IN6P_HOPOPTS 0x00040000 /* receive hop-by-hop options */ -#define IN6P_DSTOPTS 0x00080000 /* receive dst options after rthdr */ -#define IN6P_RTHDR 0x00100000 /* receive routing header */ -#define IN6P_RTHDRDSTOPTS 0x00200000 /* receive dstoptions before rthdr */ -#define IN6P_TCLASS 0x00400000 /* receive traffic class value */ -#define INP_RECVTOS IN6P_TCLASS /* receive incoming IP TOS */ -#define IN6P_AUTOFLOWLABEL 0x00800000 /* attach flowlabel automatically */ +#define IN6P_PKTINFO 0x00010000 /* receive IP6 dst and I/F */ +#define IN6P_HOPLIMIT 0x00020000 /* receive hoplimit */ +#define IN6P_HOPOPTS 0x00040000 /* receive hop-by-hop options */ +#define IN6P_DSTOPTS 0x00080000 /* receive dst options after rthdr */ +#define IN6P_RTHDR 0x00100000 /* receive routing header */ +#define IN6P_RTHDRDSTOPTS 0x00200000 /* receive dstoptions before rthdr */ +#define IN6P_TCLASS 0x00400000 /* receive traffic class value */ +#define INP_RECVTOS IN6P_TCLASS /* receive incoming IP TOS */ +#define IN6P_AUTOFLOWLABEL 0x00800000 /* attach flowlabel automatically */ #endif /* BSD_KERNEL_PRIVATE */ -#define IN6P_BINDV6ONLY 0x01000000 /* do not grab IPv4 traffic */ +#define IN6P_BINDV6ONLY 0x01000000 /* do not grab IPv4 traffic */ #ifdef BSD_KERNEL_PRIVATE -#define IN6P_RFC2292 0x02000000 /* used RFC2292 API on the socket */ -#define IN6P_MTU 0x04000000 /* receive path MTU */ -#define INP_PKTINFO 0x08000000 /* rcv and snd PKTINFO for IPv4 */ -#define INP_FLOW_SUSPENDED 0x10000000 /* flow suspended */ -#define INP_NO_IFT_CELLULAR 0x20000000 /* do not use cellular interface */ -#define INP_FLOW_CONTROLLED 0x40000000 /* flow controlled */ -#define INP_FC_FEEDBACK 0x80000000 /* got interface flow adv feedback */ - -#define INP_CONTROLOPTS \ +#define IN6P_RFC2292 0x02000000 /* used RFC2292 API on the socket */ +#define IN6P_MTU 0x04000000 /* receive path MTU */ +#define INP_PKTINFO 0x08000000 /* rcv and snd PKTINFO for IPv4 */ +#define INP_FLOW_SUSPENDED 0x10000000 /* flow suspended */ +#define INP_NO_IFT_CELLULAR 0x20000000 /* do not use cellular interface */ +#define INP_FLOW_CONTROLLED 0x40000000 /* flow controlled */ +#define INP_FC_FEEDBACK 0x80000000 /* got interface flow adv feedback */ + +#define INP_CONTROLOPTS \ (INP_RECVOPTS|INP_RECVRETOPTS|INP_RECVDSTADDR|INP_RECVIF|INP_RECVTTL| \ INP_PKTINFO|IN6P_PKTINFO|IN6P_HOPLIMIT|IN6P_HOPOPTS|IN6P_DSTOPTS| \ IN6P_RTHDR|IN6P_RTHDRDSTOPTS|IN6P_TCLASS|IN6P_RFC2292|IN6P_MTU) -#define INP_UNMAPPABLEOPTS \ +#define INP_UNMAPPABLEOPTS \ (IN6P_HOPOPTS|IN6P_DSTOPTS|IN6P_RTHDR|IN6P_AUTOFLOWLABEL) /* @@ -699,24 +699,24 @@ struct inpcbinfo { * * Overflowed INP flags; use INP2 prefix to avoid misuse. */ -#define INP2_TIMEWAIT 0x00000001 /* in TIMEWAIT */ -#define INP2_IN_FCTREE 0x00000002 /* in inp_fc_tree */ -#define INP2_WANT_APP_POLICY 0x00000004 /* necp app policy check is desired */ -#define INP2_NO_IFF_EXPENSIVE 0x00000008 /* do not use expensive interface */ -#define INP2_INHASHLIST 0x00000010 /* pcb is in inp_hash list */ -#define INP2_AWDL_UNRESTRICTED 0x00000020 /* AWDL restricted mode allowed */ -#define INP2_KEEPALIVE_OFFLOAD 0x00000040 /* Enable UDP or TCP keepalive offload */ -#define INP2_INTCOPROC_ALLOWED 0x00000080 /* Allow communication via internal co-processor interfaces */ -#define INP2_CONNECT_IN_PROGRESS 0x00000100 /* A connect call is in progress, so binds are intermediate steps */ -#define INP2_CLAT46_FLOW 0x00000200 /* The flow is going to use CLAT46 path */ +#define INP2_TIMEWAIT 0x00000001 /* in TIMEWAIT */ +#define INP2_IN_FCTREE 0x00000002 /* in inp_fc_tree */ +#define INP2_WANT_APP_POLICY 0x00000004 /* necp app policy check is desired */ +#define INP2_NO_IFF_EXPENSIVE 0x00000008 /* do not use expensive interface */ +#define INP2_INHASHLIST 0x00000010 /* pcb is in inp_hash list */ +#define INP2_AWDL_UNRESTRICTED 0x00000020 /* AWDL restricted mode allowed */ +#define INP2_KEEPALIVE_OFFLOAD 0x00000040 /* Enable UDP or TCP keepalive offload */ +#define INP2_INTCOPROC_ALLOWED 0x00000080 /* Allow communication via internal co-processor interfaces */ +#define INP2_CONNECT_IN_PROGRESS 0x00000100 /* A connect call is in progress, so binds are intermediate steps */ +#define INP2_CLAT46_FLOW 0x00000200 /* The flow is going to use CLAT46 path */ /* * Flags passed to in_pcblookup*() functions. */ -#define INPLOOKUP_WILDCARD 1 +#define INPLOOKUP_WILDCARD 1 -#define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb) -#define sotoin6pcb(so) sotoinpcb(so) +#define sotoinpcb(so) ((struct inpcb *)(so)->so_pcb) +#define sotoin6pcb(so) sotoinpcb(so) struct sysctl_req; @@ -728,18 +728,18 @@ extern int ipport_hifirstauto; extern int ipport_hilastauto; /* freshly allocated PCB, it's in use */ -#define INPCB_STATE_INUSE 0x1 +#define INPCB_STATE_INUSE 0x1 /* this pcb is sitting in a a cache */ -#define INPCB_STATE_CACHED 0x2 +#define INPCB_STATE_CACHED 0x2 /* should treat as gone, will be garbage collected and freed */ -#define INPCB_STATE_DEAD 0x3 +#define INPCB_STATE_DEAD 0x3 /* marked as ready to be garbaged collected, should be treated as not found */ -#define WNT_STOPUSING 0xffff +#define WNT_STOPUSING 0xffff /* that pcb is being acquired, do not recycle this time */ -#define WNT_ACQUIRE 0x1 +#define WNT_ACQUIRE 0x1 /* release acquired mode, can be garbage collected when wantcnt is null */ -#define WNT_RELEASE 0x2 +#define WNT_RELEASE 0x2 extern void in_pcbinit(void); extern void in_pcbinfo_attach(struct inpcbinfo *); @@ -788,16 +788,16 @@ extern void inpcb_to_xinpcb64(struct inpcb *, struct xinpcb64 *); #endif extern int get_pcblist_n(short, struct sysctl_req *, struct inpcbinfo *); -#define INPCB_GET_PORTS_USED_WILDCARDOK 0x01 -#define INPCB_GET_PORTS_USED_NOWAKEUPOK 0x02 -#define INPCB_GET_PORTS_USED_RECVANYIFONLY 0x04 -#define INPCB_GET_PORTS_USED_EXTBGIDLEONLY 0x08 -#define INPCB_GET_PORTS_USED_ACTIVEONLY 0x10 +#define INPCB_GET_PORTS_USED_WILDCARDOK 0x01 +#define INPCB_GET_PORTS_USED_NOWAKEUPOK 0x02 +#define INPCB_GET_PORTS_USED_RECVANYIFONLY 0x04 +#define INPCB_GET_PORTS_USED_EXTBGIDLEONLY 0x08 +#define INPCB_GET_PORTS_USED_ACTIVEONLY 0x10 extern void inpcb_get_ports_used(u_int32_t, int, u_int32_t, bitstr_t *, struct inpcbinfo *); -#define INPCB_OPPORTUNISTIC_THROTTLEON 0x0001 -#define INPCB_OPPORTUNISTIC_SETCMD 0x0002 +#define INPCB_OPPORTUNISTIC_THROTTLEON 0x0001 +#define INPCB_OPPORTUNISTIC_SETCMD 0x0002 extern uint32_t inpcb_count_opportunistic(unsigned int, struct inpcbinfo *, u_int32_t); extern uint32_t inpcb_find_anypcb_byaddr(struct ifaddr *, struct inpcbinfo *); diff --git a/bsd/netinet/in_pcblist.c b/bsd/netinet/in_pcblist.c index 7865d6a1e..73b55db69 100644 --- a/bsd/netinet/in_pcblist.c +++ b/bsd/netinet/in_pcblist.c @@ -94,11 +94,11 @@ #include #ifndef ROUNDUP64 -#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) +#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) #endif #ifndef ADVANCE64 -#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) +#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) #endif static void inpcb_to_xinpcb_n(struct inpcb *, struct xinpcb_n *); @@ -109,7 +109,7 @@ void shutdown_sockets_on_interface(struct ifnet *ifp); __private_extern__ void sotoxsocket_n(struct socket *so, struct xsocket_n *xso) { - xso->xso_len = sizeof (struct xsocket_n); + xso->xso_len = sizeof(struct xsocket_n); xso->xso_kind = XSO_SOCKET; if (so != NULL) { @@ -141,7 +141,7 @@ sotoxsocket_n(struct socket *so, struct xsocket_n *xso) __private_extern__ void sbtoxsockbuf_n(struct sockbuf *sb, struct xsockbuf_n *xsb) { - xsb->xsb_len = sizeof (struct xsockbuf_n); + xsb->xsb_len = sizeof(struct xsockbuf_n); xsb->xsb_kind = (sb->sb_flags & SB_RECV) ? XSO_RCVBUF : XSO_SNDBUF; if (sb != NULL) { @@ -153,8 +153,9 @@ sbtoxsockbuf_n(struct sockbuf *sb, struct xsockbuf_n *xsb) xsb->sb_flags = sb->sb_flags; xsb->sb_timeo = (short)(sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick; - if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) + if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0) { xsb->sb_timeo = 1; + } } } @@ -163,7 +164,7 @@ sbtoxsockstat_n(struct socket *so, struct xsockstat_n *xst) { int i; - xst->xst_len = sizeof (struct xsockstat_n); + xst->xst_len = sizeof(struct xsockstat_n); xst->xst_kind = XSO_STATS; for (i = 0; i < SO_TC_STATS_MAX; i++) { @@ -177,7 +178,7 @@ sbtoxsockstat_n(struct socket *so, struct xsockstat_n *xst) static void inpcb_to_xinpcb_n(struct inpcb *inp, struct xinpcb_n *xinp) { - xinp->xi_len = sizeof (struct xinpcb_n); + xinp->xi_len = sizeof(struct xinpcb_n); xinp->xi_kind = XSO_INPCB; xinp->xi_inpp = (uint64_t)VM_KERNEL_ADDRPERM(inp); xinp->inp_fport = inp->inp_fport; @@ -203,7 +204,7 @@ inpcb_to_xinpcb_n(struct inpcb *inp, struct xinpcb_n *xinp) __private_extern__ void tcpcb_to_xtcpcb_n(struct tcpcb *tp, struct xtcpcb_n *xt) { - xt->xt_len = sizeof (struct xtcpcb_n); + xt->xt_len = sizeof(struct xtcpcb_n); xt->xt_kind = XSO_TCPCB; xt->t_segq = (uint32_t)VM_KERNEL_ADDRPERM(tp->t_segq.lh_first); @@ -269,17 +270,18 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) inp_gen_t gencnt; struct xinpgen xig; void *buf = NULL; - size_t item_size = ROUNDUP64(sizeof (struct xinpcb_n)) + - ROUNDUP64(sizeof (struct xsocket_n)) + - 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) + - ROUNDUP64(sizeof (struct xsockstat_n)); + size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) + + ROUNDUP64(sizeof(struct xsocket_n)) + + 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) + + ROUNDUP64(sizeof(struct xsockstat_n)); - if (proto == IPPROTO_TCP) - item_size += ROUNDUP64(sizeof (struct xtcpcb_n)); + if (proto == IPPROTO_TCP) { + item_size += ROUNDUP64(sizeof(struct xtcpcb_n)); + } if (req->oldptr == USER_ADDR_NULL) { n = pcbinfo->ipi_count; - req->oldidx = 2 * (sizeof (xig)) + (n + n/8 + 1) * item_size; + req->oldidx = 2 * (sizeof(xig)) + (n + n / 8 + 1) * item_size; return 0; } @@ -299,12 +301,12 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) gencnt = pcbinfo->ipi_gencnt; n = pcbinfo->ipi_count; - bzero(&xig, sizeof (xig)); - xig.xig_len = sizeof (xig); + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); xig.xig_count = n; xig.xig_gen = gencnt; xig.xig_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xig, sizeof (xig)); + error = SYSCTL_OUT(req, &xig, sizeof(xig)); if (error) { goto done; } @@ -321,7 +323,7 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) goto done; } - inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK); + inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK); if (inp_list == NULL) { error = ENOMEM; goto done; @@ -334,10 +336,11 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) n = get_tcp_inp_list(inp_list, n, gencnt); } else { for (inp = pcbinfo->ipi_listhead->lh_first, i = 0; inp && i < n; - inp = inp->inp_list.le_next) { + inp = inp->inp_list.le_next) { if (inp->inp_gencnt <= gencnt && - inp->inp_state != INPCB_STATE_DEAD) + inp->inp_state != INPCB_STATE_DEAD) { inp_list[i++] = inp; + } } n = i; } @@ -350,13 +353,13 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) inp->inp_state != INPCB_STATE_DEAD) { struct xinpcb_n *xi = (struct xinpcb_n *)buf; struct xsocket_n *xso = (struct xsocket_n *) - ADVANCE64(xi, sizeof (*xi)); + ADVANCE64(xi, sizeof(*xi)); struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) - ADVANCE64(xso, sizeof (*xso)); + ADVANCE64(xso, sizeof(*xso)); struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) - ADVANCE64(xsbrcv, sizeof (*xsbrcv)); + ADVANCE64(xsbrcv, sizeof(*xsbrcv)); struct xsockstat_n *xsostats = (struct xsockstat_n *) - ADVANCE64(xsbsnd, sizeof (*xsbsnd)); + ADVANCE64(xsbsnd, sizeof(*xsbsnd)); bzero(buf, item_size); @@ -369,15 +372,16 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) sbtoxsockstat_n(inp->inp_socket, xsostats); if (proto == IPPROTO_TCP) { struct xtcpcb_n *xt = (struct xtcpcb_n *) - ADVANCE64(xsostats, sizeof (*xsostats)); + ADVANCE64(xsostats, sizeof(*xsostats)); /* * inp->inp_ppcb, can only be NULL on * an initialization race window. * No need to lock. */ - if (inp->inp_ppcb == NULL) + if (inp->inp_ppcb == NULL) { continue; + } tcpcb_to_xtcpcb_n((struct tcpcb *) inp->inp_ppcb, xt); @@ -397,21 +401,23 @@ get_pcblist_n(short proto, struct sysctl_req *req, struct inpcbinfo *pcbinfo) * while we were processing this request, and it * might be necessary to retry. */ - bzero(&xig, sizeof (xig)); - xig.xig_len = sizeof (xig); + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); xig.xig_gen = pcbinfo->ipi_gencnt; xig.xig_sogen = so_gencnt; xig.xig_count = pcbinfo->ipi_count; - error = SYSCTL_OUT(req, &xig, sizeof (xig)); + error = SYSCTL_OUT(req, &xig, sizeof(xig)); } done: lck_rw_done(pcbinfo->ipi_lock); - if (inp_list != NULL) + if (inp_list != NULL) { FREE(inp_list, M_TEMP); - if (buf != NULL) + } + if (buf != NULL) { FREE(buf, M_TEMP); - return (error); + } + return error; } __private_extern__ void @@ -440,13 +446,15 @@ inpcb_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, if (inp->inp_gencnt > gencnt || inp->inp_state == INPCB_STATE_DEAD || - inp->inp_wantcnt == WNT_STOPUSING) + inp->inp_wantcnt == WNT_STOPUSING) { continue; + } if ((so = inp->inp_socket) == NULL || (so->so_state & SS_DEFUNCT) || - (so->so_state & SS_ISDISCONNECTED)) + (so->so_state & SS_ISDISCONNECTED)) { continue; + } /* * If protocol is specified, filter out inpcbs that @@ -483,37 +491,44 @@ inpcb_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, } if (SOCK_PROTO(inp->inp_socket) != IPPROTO_UDP && - SOCK_PROTO(inp->inp_socket) != IPPROTO_TCP) + SOCK_PROTO(inp->inp_socket) != IPPROTO_TCP) { continue; + } iswildcard = (((inp->inp_vflag & INP_IPV4) && inp->inp_laddr.s_addr == INADDR_ANY) || ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))); - if (!wildcardok && iswildcard) + if (!wildcardok && iswildcard) { continue; + } if ((so->so_options & SO_NOWAKEFROMSLEEP) && - !nowakeok) + !nowakeok) { continue; + } if (!(inp->inp_flags & INP_RECV_ANYIF) && - recvanyifonly) + recvanyifonly) { continue; + } if (!(so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) && - extbgidleok) + extbgidleok) { continue; + } if (!iswildcard && !(ifindex == 0 || inp->inp_last_outifp == NULL || - ifindex == inp->inp_last_outifp->if_index)) + ifindex == inp->inp_last_outifp->if_index)) { continue; + } if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP && - so->so_state & SS_CANTRCVMORE) + so->so_state & SS_CANTRCVMORE) { continue; + } if (SOCK_PROTO(inp->inp_socket) == IPPROTO_TCP) { struct tcpcb *tp = sototcpcb(inp->inp_socket); @@ -522,47 +537,50 @@ inpcb_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, * Workaround race where inp_ppcb is NULL during * socket initialization */ - if (tp == NULL) + if (tp == NULL) { continue; + } switch (tp->t_state) { - case TCPS_CLOSED: - continue; - /* NOT REACHED */ - case TCPS_LISTEN: - case TCPS_SYN_SENT: - case TCPS_SYN_RECEIVED: - case TCPS_ESTABLISHED: - case TCPS_FIN_WAIT_1: - /* - * Note: FIN_WAIT_1 is an active state - * because we need our FIN to be - * acknowledged - */ - break; - case TCPS_CLOSE_WAIT: - case TCPS_CLOSING: - case TCPS_LAST_ACK: - case TCPS_FIN_WAIT_2: - /* - * In the closing states, the connection - * is not idle when there is outgoing - * data having to be acknowledged - */ - if (activeonly && so->so_snd.sb_cc == 0) - continue; - break; - case TCPS_TIME_WAIT: + case TCPS_CLOSED: + continue; + /* NOT REACHED */ + case TCPS_LISTEN: + case TCPS_SYN_SENT: + case TCPS_SYN_RECEIVED: + case TCPS_ESTABLISHED: + case TCPS_FIN_WAIT_1: + /* + * Note: FIN_WAIT_1 is an active state + * because we need our FIN to be + * acknowledged + */ + break; + case TCPS_CLOSE_WAIT: + case TCPS_CLOSING: + case TCPS_LAST_ACK: + case TCPS_FIN_WAIT_2: + /* + * In the closing states, the connection + * is not idle when there is outgoing + * data having to be acknowledged + */ + if (activeonly && so->so_snd.sb_cc == 0) { continue; - /* NOT REACHED */ + } + break; + case TCPS_TIME_WAIT: + continue; + /* NOT REACHED */ } } /* * Final safeguard to exclude unspecified local port */ port = ntohs(inp->inp_lport); - if (port == 0) + if (port == 0) { continue; + } bitstr_set(bitfield, port); if_ports_used_add_inpcb(ifindex, inp); @@ -617,7 +635,7 @@ inpcb_count_opportunistic(unsigned int ifindex, struct inpcbinfo *pcbinfo, lck_rw_done(pcbinfo->ipi_lock); - return (opportunistic); + return opportunistic; } __private_extern__ uint32_t @@ -630,41 +648,42 @@ inpcb_find_anypcb_byaddr(struct ifaddr *ifa, struct inpcbinfo *pcbinfo) if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6)) { - return (0); + return 0; } lck_rw_lock_shared(pcbinfo->ipi_lock); for (inp = LIST_FIRST(pcbinfo->ipi_listhead); inp != NULL; inp = LIST_NEXT(inp, inp_list)) { - if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD && inp->inp_socket != NULL) { so = inp->inp_socket; af = SOCK_DOM(so); - if (af != ifa->ifa_addr->sa_family) + if (af != ifa->ifa_addr->sa_family) { continue; - if (inp->inp_last_outifp != ifa->ifa_ifp) + } + if (inp->inp_last_outifp != ifa->ifa_ifp) { continue; + } if (af == AF_INET) { if (inp->inp_laddr.s_addr == (satosin(ifa->ifa_addr))->sin_addr.s_addr) { lck_rw_done(pcbinfo->ipi_lock); - return (1); + return 1; } } if (af == AF_INET6) { if (IN6_ARE_ADDR_EQUAL(IFA_IN6(ifa), &inp->in6p_laddr)) { lck_rw_done(pcbinfo->ipi_lock); - return (1); + return 1; } } } } lck_rw_done(pcbinfo->ipi_lock); - return (0); + return 0; } static int @@ -674,13 +693,14 @@ shutdown_sockets_on_interface_proc_callout(proc_t p, void *arg) int i; struct ifnet *ifp = (struct ifnet *)arg; - if (ifp == NULL) - return (PROC_RETURNED); + if (ifp == NULL) { + return PROC_RETURNED; + } proc_fdlock(p); fdp = p->p_fd; for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp = fdp->fd_ofiles[i]; + struct fileproc *fp = fdp->fd_ofiles[i]; struct fileglob *fg; struct socket *so; struct inpcb *inp; @@ -692,17 +712,20 @@ shutdown_sockets_on_interface_proc_callout(proc_t p, void *arg) } fg = fp->f_fglob; - if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) + if (FILEGLOB_DTYPE(fg) != DTYPE_SOCKET) { continue; + } so = (struct socket *)fp->f_fglob->fg_data; - if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) + if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { continue; + } inp = (struct inpcb *)so->so_pcb; - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } socket_lock(so, 1); @@ -740,13 +763,13 @@ shutdown_sockets_on_interface_proc_callout(proc_t p, void *arg) } proc_fdunlock(p); - return (PROC_RETURNED); + return PROC_RETURNED; } void shutdown_sockets_on_interface(struct ifnet *ifp) { proc_iterate(PROC_ALLPROCLIST, - shutdown_sockets_on_interface_proc_callout, - ifp, NULL, NULL); + shutdown_sockets_on_interface_proc_callout, + ifp, NULL, NULL); } diff --git a/bsd/netinet/in_proto.c b/bsd/netinet/in_proto.c index 4a9d19819..bc3ce2a5e 100644 --- a/bsd/netinet/in_proto.c +++ b/bsd/netinet/in_proto.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -116,178 +116,179 @@ extern int icmp_dgram_ctloutput(struct socket *, struct sockopt *); struct domain *inetdomain = NULL; /* Thanks to PPP, this still needs to be exported */ -lck_mtx_t *inet_domain_mutex; +lck_mtx_t *inet_domain_mutex; static struct protosw inetsw[] = { -{ - .pr_type = 0, - .pr_protocol = 0, - .pr_init = ip_init, - .pr_drain = ip_drain, - .pr_usrreqs = &nousrreqs, -}, -{ - .pr_type = SOCK_DGRAM, - .pr_protocol = IPPROTO_UDP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK|PR_PCBLOCK| - PR_EVCONNINFO|PR_PRECONN_WRITE, - .pr_input = udp_input, - .pr_ctlinput = udp_ctlinput, - .pr_ctloutput = udp_ctloutput, - .pr_init = udp_init, - .pr_usrreqs = &udp_usrreqs, - .pr_lock = udp_lock, - .pr_unlock = udp_unlock, - .pr_getlock = udp_getlock, -}, -{ - .pr_type = SOCK_STREAM, - .pr_protocol = IPPROTO_TCP, - .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_PCBLOCK| - PR_PROTOLOCK|PR_DISPOSE|PR_EVCONNINFO| - PR_PRECONN_WRITE|PR_DATA_IDEMPOTENT, - .pr_input = tcp_input, - .pr_ctlinput = tcp_ctlinput, - .pr_ctloutput = tcp_ctloutput, - .pr_init = tcp_init, - .pr_drain = tcp_drain, - .pr_usrreqs = &tcp_usrreqs, - .pr_lock = tcp_lock, - .pr_unlock = tcp_unlock, - .pr_getlock = tcp_getlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_RAW, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = rip_input, - .pr_ctlinput = rip_ctlinput, - .pr_ctloutput = rip_ctloutput, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_ICMP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = icmp_input, - .pr_ctloutput = rip_ctloutput, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_DGRAM, - .pr_protocol = IPPROTO_ICMP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = icmp_input, - .pr_ctloutput = icmp_dgram_ctloutput, - .pr_usrreqs = &icmp_dgram_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IGMP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = igmp_input, - .pr_ctloutput = rip_ctloutput, - .pr_init = igmp_init, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_GRE, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = gre_input, - .pr_ctlinput = rip_ctlinput, - .pr_ctloutput = rip_ctloutput, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = 0, + .pr_protocol = 0, + .pr_init = ip_init, + .pr_drain = ip_drain, + .pr_usrreqs = &nousrreqs, + }, + { + .pr_type = SOCK_DGRAM, + .pr_protocol = IPPROTO_UDP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK | PR_PCBLOCK | + PR_EVCONNINFO | PR_PRECONN_WRITE, + .pr_input = udp_input, + .pr_ctlinput = udp_ctlinput, + .pr_ctloutput = udp_ctloutput, + .pr_init = udp_init, + .pr_usrreqs = &udp_usrreqs, + .pr_lock = udp_lock, + .pr_unlock = udp_unlock, + .pr_getlock = udp_getlock, + }, + { + .pr_type = SOCK_STREAM, + .pr_protocol = IPPROTO_TCP, + .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD | PR_PCBLOCK | + PR_PROTOLOCK | PR_DISPOSE | PR_EVCONNINFO | + PR_PRECONN_WRITE | PR_DATA_IDEMPOTENT, + .pr_input = tcp_input, + .pr_ctlinput = tcp_ctlinput, + .pr_ctloutput = tcp_ctloutput, + .pr_init = tcp_init, + .pr_drain = tcp_drain, + .pr_usrreqs = &tcp_usrreqs, + .pr_lock = tcp_lock, + .pr_unlock = tcp_unlock, + .pr_getlock = tcp_getlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_RAW, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = rip_input, + .pr_ctlinput = rip_ctlinput, + .pr_ctloutput = rip_ctloutput, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_ICMP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = icmp_input, + .pr_ctloutput = rip_ctloutput, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_DGRAM, + .pr_protocol = IPPROTO_ICMP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = icmp_input, + .pr_ctloutput = icmp_dgram_ctloutput, + .pr_usrreqs = &icmp_dgram_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IGMP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = igmp_input, + .pr_ctloutput = rip_ctloutput, + .pr_init = igmp_init, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_GRE, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = gre_input, + .pr_ctlinput = rip_ctlinput, + .pr_ctloutput = rip_ctloutput, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, #if IPSEC -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_AH, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = ah4_input, - .pr_usrreqs = &nousrreqs, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_AH, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = ah4_input, + .pr_usrreqs = &nousrreqs, + }, #if IPSEC_ESP -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_ESP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = esp4_input, - .pr_usrreqs = &nousrreqs, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_ESP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = esp4_input, + .pr_usrreqs = &nousrreqs, + }, #endif /* IPSEC_ESP */ -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPCOMP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = ipcomp4_input, - .pr_usrreqs = &nousrreqs, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPCOMP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = ipcomp4_input, + .pr_init = ipcomp_init, + .pr_usrreqs = &nousrreqs, + }, #endif /* IPSEC */ -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPV4, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = encap4_input, - .pr_ctloutput = rip_ctloutput, - .pr_init = encap4_init, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPV4, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = encap4_input, + .pr_ctloutput = rip_ctloutput, + .pr_init = encap4_init, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, #if INET6 -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPV6, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = encap4_input, - .pr_ctloutput = rip_ctloutput, - .pr_init = encap4_init, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPV6, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = encap4_input, + .pr_ctloutput = rip_ctloutput, + .pr_init = encap4_init, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, #endif /* INET6 */ #if IPDIVERT -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_DIVERT, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PCBLOCK, - .pr_input = div_input, - .pr_ctloutput = ip_ctloutput, - .pr_init = div_init, - .pr_usrreqs = &div_usrreqs, - .pr_lock = div_lock, - .pr_unlock = div_unlock, - .pr_getlock = div_getlock, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_DIVERT, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PCBLOCK, + .pr_input = div_input, + .pr_ctloutput = ip_ctloutput, + .pr_init = div_init, + .pr_usrreqs = &div_usrreqs, + .pr_lock = div_lock, + .pr_unlock = div_unlock, + .pr_getlock = div_getlock, + }, #endif /* IPDIVERT */ /* raw wildcard */ -{ - .pr_type = SOCK_RAW, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = rip_input, - .pr_ctloutput = rip_ctloutput, - .pr_init = rip_init, - .pr_usrreqs = &rip_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = SOCK_RAW, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = rip_input, + .pr_ctloutput = rip_ctloutput, + .pr_init = rip_init, + .pr_usrreqs = &rip_usrreqs, + .pr_unlock = rip_unlock, + }, }; -static int in_proto_count = (sizeof (inetsw) / sizeof (struct protosw)); +static int in_proto_count = (sizeof(inetsw) / sizeof(struct protosw)); struct domain inetdomain_s = { - .dom_family = PF_INET, - .dom_flags = DOM_REENTRANT, - .dom_name = "internet", - .dom_init = in_dinit, - .dom_rtattach = in_inithead, - .dom_rtoffset = 32, - .dom_maxrtkey = sizeof (struct sockaddr_in), - .dom_protohdrlen = sizeof (struct tcpiphdr), + .dom_family = PF_INET, + .dom_flags = DOM_REENTRANT, + .dom_name = "internet", + .dom_init = in_dinit, + .dom_rtattach = in_inithead, + .dom_rtoffset = 32, + .dom_maxrtkey = sizeof(struct sockaddr_in), + .dom_protohdrlen = sizeof(struct tcpiphdr), }; /* Initialize the PF_INET domain, and add in the pre-defined protos */ @@ -313,10 +314,12 @@ in_dinit(struct domain *dp) /* * Attach first, then initialize; ip_init() needs raw IP handler. */ - for (i = 0, pr = &inetsw[0]; i < in_proto_count; i++, pr++) + for (i = 0, pr = &inetsw[0]; i < in_proto_count; i++, pr++) { net_add_proto(pr, dp, 0); - for (i = 0, pr = &inetsw[0]; i < in_proto_count; i++, pr++) + } + for (i = 0, pr = &inetsw[0]; i < in_proto_count; i++, pr++) { net_init_proto(pr, dp); + } inet_domain_mutex = dp->dom_mtx; @@ -347,25 +350,25 @@ ip_proto_input(protocol_family_t protocol, mbuf_t packet_list) } SYSCTL_NODE(_net, PF_INET, inet, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Internet Family"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Internet Family"); SYSCTL_NODE(_net_inet, IPPROTO_IP, ip, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IP"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP"); SYSCTL_NODE(_net_inet, IPPROTO_ICMP, icmp, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "ICMP"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "ICMP"); SYSCTL_NODE(_net_inet, IPPROTO_UDP, udp, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "UDP"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "UDP"); SYSCTL_NODE(_net_inet, IPPROTO_TCP, tcp, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "TCP"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "TCP"); SYSCTL_NODE(_net_inet, IPPROTO_IGMP, igmp, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IGMP"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IGMP"); #if IPSEC SYSCTL_NODE(_net_inet, IPPROTO_AH, ipsec, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IPSEC"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPSEC"); #endif /* IPSEC */ SYSCTL_NODE(_net_inet, IPPROTO_RAW, raw, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "RAW"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "RAW"); #if IPDIVERT SYSCTL_NODE(_net_inet, IPPROTO_DIVERT, div, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "DIVERT"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "DIVERT"); #endif /* IPDIVERT */ diff --git a/bsd/netinet/in_rmx.c b/bsd/netinet/in_rmx.c index 4aa686402..074008870 100644 --- a/bsd/netinet/in_rmx.c +++ b/bsd/netinet/in_rmx.c @@ -90,7 +90,7 @@ extern int tvtohz(struct timeval *); -static int in_rtqtimo_run; /* in_rtqtimo is scheduled to run */ +static int in_rtqtimo_run; /* in_rtqtimo is scheduled to run */ static void in_rtqtimo(void *); static void in_sched_rtqtimo(struct timeval *); @@ -123,17 +123,20 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (verbose) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + if (verbose) { + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); + } /* * For IP, all unicast non-host routes are automatically cloning. */ - if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) + if (IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { rt->rt_flags |= RTF_MULTICAST; + } - if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) + if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) { rt->rt_flags |= RTF_PRCLONING; + } /* * A little bit of help for both IP output and input: @@ -159,8 +162,9 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, RT_CONVERT_LOCK(rt); IFA_LOCK_SPIN(rt->rt_ifa); if (satosin(rt->rt_ifa->ifa_addr)->sin_addr.s_addr == - sin->sin_addr.s_addr) + sin->sin_addr.s_addr) { rt->rt_flags |= RTF_LOCAL; + } IFA_UNLOCK(rt->rt_ifa); } } @@ -189,8 +193,9 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, char dbufc[MAX_IPv4_STR_LEN]; RT_LOCK(rt2); - if (verbose) - rt_str(rt2, dbufc, sizeof (dbufc), NULL, 0); + if (verbose) { + rt_str(rt2, dbufc, sizeof(dbufc), NULL, 0); + } if ((rt2->rt_flags & RTF_LLINFO) && (rt2->rt_flags & RTF_HOST) && @@ -228,8 +233,9 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, } } - if (!verbose) + if (!verbose) { goto done; + } if (ret != NULL) { if (flags != rt->rt_flags) { @@ -251,7 +257,7 @@ in_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, rt->rt_flags, RTF_BITS); } done: - return (ret); + return ret; } static struct radix_node * @@ -267,13 +273,13 @@ in_deleteroute(void *v_arg, void *netmask_arg, struct radix_node_head *head) struct rtentry *rt = (struct rtentry *)rn; RT_LOCK(rt); - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); log(LOG_DEBUG, "%s: route to %s->%s->%s deleted, " "flags=%b\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", rt->rt_flags, RTF_BITS); RT_UNLOCK(rt); } - return (rn); + return rn; } /* @@ -291,7 +297,7 @@ in_validate(struct radix_node *rn) if (rt_verbose > 2) { char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); log(LOG_DEBUG, "%s: route to %s->%s->%s validated, " "flags=%b\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", @@ -308,7 +314,7 @@ in_validate(struct radix_node *rn) rt_setexpire(rt, 0); } } - return (rn); + return rn; } /* @@ -317,7 +323,7 @@ in_validate(struct radix_node *rn) static struct radix_node * in_matroute(void *v_arg, struct radix_node_head *head) { - return (in_matroute_args(v_arg, head, NULL, NULL)); + return in_matroute_args(v_arg, head, NULL, NULL); } /* @@ -336,26 +342,26 @@ in_matroute_args(void *v_arg, struct radix_node_head *head, in_validate(rn); RT_UNLOCK((struct rtentry *)rn); } - return (rn); + return rn; } /* one hour is ``really old'' */ -static uint32_t rtq_reallyold = 60*60; +static uint32_t rtq_reallyold = 60 * 60; SYSCTL_UINT(_net_inet_ip, IPCTL_RTEXPIRE, rtexpire, - CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0, - "Default expiration time on dynamically learned routes"); + CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0, + "Default expiration time on dynamically learned routes"); /* never automatically crank down to less */ static uint32_t rtq_minreallyold = 10; SYSCTL_UINT(_net_inet_ip, IPCTL_RTMINEXPIRE, rtminexpire, - CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0, - "Minimum time to attempt to hold onto dynamically learned routes"); + CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0, + "Minimum time to attempt to hold onto dynamically learned routes"); /* 128 cached routes is ``too many'' */ static uint32_t rtq_toomany = 128; SYSCTL_UINT(_net_inet_ip, IPCTL_RTMAXCACHE, rtmaxcache, - CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0, - "Upper limit on dynamically learned routes"); + CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0, + "Upper limit on dynamically learned routes"); /* * On last reference drop, mark the route as belong to us so that it can be @@ -372,20 +378,24 @@ in_clsroute(struct radix_node *rn, struct radix_node_head *head) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (!(rt->rt_flags & RTF_UP)) + if (!(rt->rt_flags & RTF_UP)) { return; /* prophylactic measures */ - - if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) + } + if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) { return; + } - if (rt->rt_flags & RTPRF_OURS) + if (rt->rt_flags & RTPRF_OURS) { return; + } - if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) + if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) { return; + } - if (verbose) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + if (verbose) { + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); + } /* * Delete the route immediately if RTF_DELCLONE is set or @@ -419,9 +429,10 @@ in_clsroute(struct radix_node *rn, struct radix_node_head *head) RT_REMREF_LOCKED(rt); } else { RT_LOCK(rt); - if (!verbose) - rt_str(rt, dbuf, sizeof (dbuf), - gbuf, sizeof (gbuf)); + if (!verbose) { + rt_str(rt, dbuf, sizeof(dbuf), + gbuf, sizeof(gbuf)); + } log(LOG_ERR, "%s: error deleting route to " "%s->%s->%s, flags=%b, err=%d\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? @@ -477,8 +488,9 @@ in_rtqkill(struct radix_node *rn, void *rock) if (rt->rt_flags & RTPRF_OURS) { char dbuf[MAX_IPv4_STR_LEN], gbuf[MAX_IPv4_STR_LEN]; - if (verbose) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + if (verbose) { + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); + } ap->found++; VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0); @@ -498,7 +510,7 @@ in_rtqkill(struct radix_node *rn, void *rock) rt->rt_ifp->if_xname : "", rt->rt_flags, RTF_BITS, ap->draining); } - RT_ADDREF_LOCKED(rt); /* for us to free below */ + RT_ADDREF_LOCKED(rt); /* for us to free below */ /* * Delete this route since we're done with it; * the route may be freed afterwards, so we @@ -513,9 +525,10 @@ in_rtqkill(struct radix_node *rn, void *rock) rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); if (err != 0) { RT_LOCK(rt); - if (!verbose) - rt_str(rt, dbuf, sizeof (dbuf), - gbuf, sizeof (gbuf)); + if (!verbose) { + rt_str(rt, dbuf, sizeof(dbuf), + gbuf, sizeof(gbuf)); + } log(LOG_ERR, "%s: error deleting route to " "%s->%s->%s, flags=%b, err=%d\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? @@ -549,10 +562,10 @@ in_rtqkill(struct radix_node *rn, void *rock) RT_UNLOCK(rt); } - return (0); + return 0; } -#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ +#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ static int rtq_timeout = RTQ_TIMEOUT; static void @@ -577,7 +590,7 @@ in_rtqtimo(void *targ) log(LOG_DEBUG, "%s: initial nextstop is T+%u seconds\n", __func__, rtq_timeout); } - bzero(&arg, sizeof (arg)); + bzero(&arg, sizeof(arg)); arg.rnh = rnh; arg.nextstop = timenow + rtq_timeout; rnh->rnh_walktree(rnh, in_rtqkill, &arg); @@ -598,8 +611,9 @@ in_rtqtimo(void *targ) ((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2 * rtq_reallyold / 3; - if (rtq_reallyold < rtq_minreallyold) + if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; + } last_adjusted_timeout = timenow; if (verbose) { @@ -615,10 +629,11 @@ in_rtqtimo(void *targ) atv.tv_sec = arg.nextstop - timenow; /* re-arm the timer only if there's work to do */ in_rtqtimo_run = 0; - if (ours > 0) + if (ours > 0) { in_sched_rtqtimo(&atv); - else if (verbose) + } else if (verbose) { log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__); + } lck_mtx_unlock(rnh_lock); } @@ -651,13 +666,14 @@ in_rtqdrain(void) struct radix_node_head *rnh; struct rtqk_arg arg; - if (rt_verbose > 1) + if (rt_verbose > 1) { log(LOG_DEBUG, "%s: draining routes\n", __func__); + } lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET]; VERIFY(rnh != NULL); - bzero(&arg, sizeof (arg)); + bzero(&arg, sizeof(arg)); arg.rnh = rnh; arg.draining = 1; rnh->rnh_walktree(rnh, in_rtqkill, &arg); @@ -675,8 +691,9 @@ in_inithead(void **head, int off) /* If called from route_init(), make sure it is exactly once */ VERIFY(head != (void **)&rt_tables[AF_INET] || *head == NULL); - if (!rn_inithead(head, off)) - return (0); + if (!rn_inithead(head, off)) { + return 0; + } /* * We can get here from nfs_subs.c as well, in which case this @@ -684,16 +701,16 @@ in_inithead(void **head, int off) * this also takes care of the case when we're called more than * once from anywhere but route_init(). */ - if (head != (void **)&rt_tables[AF_INET]) - return (1); /* only do this for the real routing table */ - + if (head != (void **)&rt_tables[AF_INET]) { + return 1; /* only do this for the real routing table */ + } rnh = *head; rnh->rnh_addaddr = in_addroute; rnh->rnh_deladdr = in_deleteroute; rnh->rnh_matchaddr = in_matroute; rnh->rnh_matchaddr_args = in_matroute_args; rnh->rnh_close = in_clsroute; - return (1); + return 1; } /* @@ -725,14 +742,14 @@ in_ifadownkill(struct radix_node *rn, void *xap) RT_LOCK(rt); if (rt->rt_ifa == ap->ifa && (ap->del || !(rt->rt_flags & RTF_STATIC))) { - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); if (verbose) { log(LOG_DEBUG, "%s: deleting route to %s->%s->%s, " "flags=%b\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", rt->rt_flags, RTF_BITS); } - RT_ADDREF_LOCKED(rt); /* for us to free below */ + RT_ADDREF_LOCKED(rt); /* for us to free below */ /* * We need to disable the automatic prune that happens * in this case in rtrequest() because it will blow @@ -750,9 +767,10 @@ in_ifadownkill(struct radix_node *rn, void *xap) rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); if (err != 0) { RT_LOCK(rt); - if (!verbose) - rt_str(rt, dbuf, sizeof (dbuf), - gbuf, sizeof (gbuf)); + if (!verbose) { + rt_str(rt, dbuf, sizeof(dbuf), + gbuf, sizeof(gbuf)); + } log(LOG_ERR, "%s: error deleting route to " "%s->%s->%s, flags=%b, err=%d\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? @@ -764,7 +782,7 @@ in_ifadownkill(struct radix_node *rn, void *xap) } else { RT_UNLOCK(rt); } - return (0); + return 0; } int @@ -780,8 +798,9 @@ in_ifadown(struct ifaddr *ifa, int delete) * ifa from changing (e.g. in_ifinit), so it is safe * to access its ifa_addr without locking. */ - if (ifa->ifa_addr->sa_family != AF_INET) - return (1); + if (ifa->ifa_addr->sa_family != AF_INET) { + return 1; + } /* trigger route cache reevaluation */ routegenid_inet_update(); @@ -793,5 +812,5 @@ in_ifadown(struct ifaddr *ifa, int delete) IFA_LOCK_SPIN(ifa); ifa->ifa_flags &= ~IFA_ROUTE; IFA_UNLOCK(ifa); - return (0); + return 0; } diff --git a/bsd/netinet/in_stat.c b/bsd/netinet/in_stat.c index c7f36defb..522bf5184 100644 --- a/bsd/netinet/in_stat.c +++ b/bsd/netinet/in_stat.c @@ -28,20 +28,21 @@ #include -#define IN_STAT_ACTIVITY_GRANULARITY 8 /* 8 sec granularity */ -#define IN_STAT_ACTIVITY_TIME_SEC_SHIFT 3 /* 8 sec per bit */ -#define IN_STAT_ACTIVITY_BITMAP_TOTAL_SIZE ((uint64_t) 128) -#define IN_STAT_ACTIVITY_BITMAP_FIELD_SIZE ((uint64_t) 64) -#define IN_STAT_ACTIVITY_TOTAL_TIME ((uint64_t) (8 * 128)) -#define IN_STAT_SET_MOST_SIGNIFICANT_BIT ((u_int64_t )0x8000000000000000) +#define IN_STAT_ACTIVITY_GRANULARITY 8 /* 8 sec granularity */ +#define IN_STAT_ACTIVITY_TIME_SEC_SHIFT 3 /* 8 sec per bit */ +#define IN_STAT_ACTIVITY_BITMAP_TOTAL_SIZE ((uint64_t) 128) +#define IN_STAT_ACTIVITY_BITMAP_FIELD_SIZE ((uint64_t) 64) +#define IN_STAT_ACTIVITY_TOTAL_TIME ((uint64_t) (8 * 128)) +#define IN_STAT_SET_MOST_SIGNIFICANT_BIT ((u_int64_t )0x8000000000000000) void in_stat_set_activity_bitmap(activity_bitmap_t *activity, uint64_t now) { uint64_t elapsed_time, slot; uint64_t *bitmap; - if (activity->start == 0) + if (activity->start == 0) { activity->start = now; + } elapsed_time = now - activity->start; slot = elapsed_time >> IN_STAT_ACTIVITY_TIME_SEC_SHIFT; @@ -70,10 +71,11 @@ in_stat_set_activity_bitmap(activity_bitmap_t *activity, uint64_t now) activity->bitmap[0] = activity->bitmap[1]; activity->bitmap[1] = 0; shift -= IN_STAT_ACTIVITY_BITMAP_FIELD_SIZE; - if (shift == IN_STAT_ACTIVITY_BITMAP_FIELD_SIZE) + if (shift == IN_STAT_ACTIVITY_BITMAP_FIELD_SIZE) { activity->bitmap[0] = 0; - else + } else { activity->bitmap[0] >>= shift; + } } else { uint64_t mask_lower, tmp; uint64_t b1_low, b0_high; @@ -89,7 +91,7 @@ in_stat_set_activity_bitmap(activity_bitmap_t *activity, uint64_t now) b0_high = (b1_low << (IN_STAT_ACTIVITY_BITMAP_FIELD_SIZE - - shift)); + shift)); activity->bitmap[0] |= b0_high; activity->bitmap[1] >>= shift; } diff --git a/bsd/netinet/in_stat.h b/bsd/netinet/in_stat.h index 0c31148c7..6c8e7529d 100644 --- a/bsd/netinet/in_stat.h +++ b/bsd/netinet/in_stat.h @@ -34,8 +34,8 @@ #include typedef struct activity_bitmap { - uint64_t start; /* Start timestamp using uptime */ - uint64_t bitmap[2]; /* 128 bit map, each bit == 8 sec */ + uint64_t start; /* Start timestamp using uptime */ + uint64_t bitmap[2]; /* 128 bit map, each bit == 8 sec */ } activity_bitmap_t; #endif /* PRIVATE */ diff --git a/bsd/netinet/in_systm.h b/bsd/netinet/in_systm.h index 1eb03c11b..638f14bbe 100644 --- a/bsd/netinet/in_systm.h +++ b/bsd/netinet/in_systm.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -79,10 +79,10 @@ * the bytes before transmission at each protocol level. The n_ types * represent the types with the bytes in ``high-ender'' order. */ -typedef __uint16_t n_short; /* short as received from the net */ -typedef __uint32_t n_long; /* long as received from the net */ +typedef __uint16_t n_short; /* short as received from the net */ +typedef __uint32_t n_long; /* long as received from the net */ -typedef __uint32_t n_time; /* ms since 00:00 GMT, byte rev */ +typedef __uint32_t n_time; /* ms since 00:00 GMT, byte rev */ #ifdef BSD_KERNEL_PRIVATE u_int32_t iptime(void); diff --git a/bsd/netinet/in_tclass.c b/bsd/netinet/in_tclass.c index 530161345..7d8f336bd 100644 --- a/bsd/netinet/in_tclass.c +++ b/bsd/netinet/in_tclass.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009-2018 Apple Inc. All rights reserved. + * Copyright (c) 2009-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -62,8 +62,8 @@ #include struct dcsp_msc_map { - u_int8_t dscp; - mbuf_svc_class_t msc; + u_int8_t dscp; + mbuf_svc_class_t msc; }; static inline int so_throttle_best_effort(struct socket *, struct ifnet *); static void set_dscp_to_wifi_ac_map(const struct dcsp_msc_map *, int); @@ -71,13 +71,13 @@ static errno_t dscp_msc_map_from_netsvctype_dscp_map(struct netsvctype_dscp_map struct dcsp_msc_map *); static lck_grp_attr_t *tclass_lck_grp_attr = NULL; /* mutex group attributes */ -static lck_grp_t *tclass_lck_grp = NULL; /* mutex group definition */ -static lck_attr_t *tclass_lck_attr = NULL; /* mutex attributes */ +static lck_grp_t *tclass_lck_grp = NULL; /* mutex group definition */ +static lck_attr_t *tclass_lck_attr = NULL; /* mutex attributes */ decl_lck_mtx_data(static, tclass_lock_data); static lck_mtx_t *tclass_lock = &tclass_lock_data; SYSCTL_NODE(_net, OID_AUTO, qos, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "QoS"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "QoS"); static int sysctl_default_netsvctype_to_dscp_map SYSCTL_HANDLER_ARGS; SYSCTL_PROC(_net_qos, OID_AUTO, default_netsvctype_to_dscp_map, @@ -103,7 +103,7 @@ SYSCTL_INT(_net_qos, OID_AUTO, verbose, * By Default allow all apps to get traffic class to DSCP mapping */ SYSCTL_NODE(_net_qos, OID_AUTO, policy, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, ""); int net_qos_policy_restricted = 0; SYSCTL_INT(_net_qos_policy, OID_AUTO, restricted, @@ -129,15 +129,15 @@ SYSCTL_INT(_net_qos_policy, OID_AUTO, capable_enabled, * Socket traffic class from network service type */ const int sotc_by_netservicetype[_NET_SERVICE_TYPE_COUNT] = { - SO_TC_BE, /* NET_SERVICE_TYPE_BE */ - SO_TC_BK_SYS, /* NET_SERVICE_TYPE_BK */ - SO_TC_VI, /* NET_SERVICE_TYPE_SIG */ - SO_TC_VI, /* NET_SERVICE_TYPE_VI */ - SO_TC_VO, /* NET_SERVICE_TYPE_VO */ - SO_TC_RV, /* NET_SERVICE_TYPE_RV */ - SO_TC_AV, /* NET_SERVICE_TYPE_AV */ - SO_TC_OAM, /* NET_SERVICE_TYPE_OAM */ - SO_TC_RD /* NET_SERVICE_TYPE_RD */ + SO_TC_BE, /* NET_SERVICE_TYPE_BE */ + SO_TC_BK_SYS, /* NET_SERVICE_TYPE_BK */ + SO_TC_VI, /* NET_SERVICE_TYPE_SIG */ + SO_TC_VI, /* NET_SERVICE_TYPE_VI */ + SO_TC_VO, /* NET_SERVICE_TYPE_VO */ + SO_TC_RV, /* NET_SERVICE_TYPE_RV */ + SO_TC_AV, /* NET_SERVICE_TYPE_AV */ + SO_TC_OAM, /* NET_SERVICE_TYPE_OAM */ + SO_TC_RD /* NET_SERVICE_TYPE_RD */ }; /* @@ -145,15 +145,15 @@ const int sotc_by_netservicetype[_NET_SERVICE_TYPE_COUNT] = { */ static const struct netsvctype_dscp_map fastlane_netsvctype_dscp_map[_NET_SERVICE_TYPE_COUNT] = { - { NET_SERVICE_TYPE_BE, _DSCP_DF }, - { NET_SERVICE_TYPE_BK, _DSCP_AF11 }, - { NET_SERVICE_TYPE_SIG, _DSCP_CS3 }, - { NET_SERVICE_TYPE_VI, _DSCP_AF41 }, - { NET_SERVICE_TYPE_VO, _DSCP_EF }, - { NET_SERVICE_TYPE_RV, _DSCP_CS4 }, - { NET_SERVICE_TYPE_AV, _DSCP_AF31 }, - { NET_SERVICE_TYPE_OAM, _DSCP_CS2 }, - { NET_SERVICE_TYPE_RD, _DSCP_AF21 }, + { NET_SERVICE_TYPE_BE, _DSCP_DF }, + { NET_SERVICE_TYPE_BK, _DSCP_AF11 }, + { NET_SERVICE_TYPE_SIG, _DSCP_CS3 }, + { NET_SERVICE_TYPE_VI, _DSCP_AF41 }, + { NET_SERVICE_TYPE_VO, _DSCP_EF }, + { NET_SERVICE_TYPE_RV, _DSCP_CS4 }, + { NET_SERVICE_TYPE_AV, _DSCP_AF31 }, + { NET_SERVICE_TYPE_OAM, _DSCP_CS2 }, + { NET_SERVICE_TYPE_RD, _DSCP_AF21 }, }; static struct net_qos_dscp_map default_net_qos_dscp_map; @@ -161,7 +161,7 @@ static struct net_qos_dscp_map default_net_qos_dscp_map; /* * The size is one more than the max because DSCP start at zero */ -#define DSCP_ARRAY_SIZE (_MAX_DSCP + 1) +#define DSCP_ARRAY_SIZE (_MAX_DSCP + 1) /* * The DSCP to UP mapping (via mbuf service class) for WiFi follows is the mapping @@ -174,79 +174,79 @@ static struct net_qos_dscp_map default_net_qos_dscp_map; * option instead to select L2 QoS marking instead of IP_TOS or IPV6_TCLASS. */ static const struct dcsp_msc_map default_dscp_to_wifi_ac_map[] = { - { _DSCP_DF, MBUF_SC_BE }, /* RFC 2474 Standard */ - { 1, MBUF_SC_BE }, /* */ - { 2, MBUF_SC_BE }, /* */ - { 3, MBUF_SC_BE }, /* */ - { 4, MBUF_SC_BE }, /* */ - { 5, MBUF_SC_BE }, /* */ - { 6, MBUF_SC_BE }, /* */ - { 7, MBUF_SC_BE }, /* */ - - { _DSCP_CS1, MBUF_SC_BK }, /* RFC 3662 Low-Priority Data */ - { 9, MBUF_SC_BK }, /* */ - { _DSCP_AF11, MBUF_SC_BK }, /* RFC 2597 High-Throughput Data */ - { 11, MBUF_SC_BK }, /* */ - { _DSCP_AF12, MBUF_SC_BK }, /* RFC 2597 High-Throughput Data */ - { 13, MBUF_SC_BK }, /* */ - { _DSCP_AF13, MBUF_SC_BK }, /* RFC 2597 High-Throughput Data */ - { 15, MBUF_SC_BK }, /* */ - - { _DSCP_CS2, MBUF_SC_BK }, /* RFC 4594 OAM */ - { 17, MBUF_SC_BK }, /* */ - { _DSCP_AF21, MBUF_SC_BK }, /* RFC 2597 Low-Latency Data */ - { 19, MBUF_SC_BK }, /* */ - { _DSCP_AF22, MBUF_SC_BK }, /* RFC 2597 Low-Latency Data */ - { 21, MBUF_SC_BK }, /* */ - { _DSCP_AF23, MBUF_SC_BK }, /* RFC 2597 Low-Latency Data */ - { 23, MBUF_SC_BK }, /* */ - - { _DSCP_CS3, MBUF_SC_BE }, /* RFC 2474 Broadcast Video */ - { 25, MBUF_SC_BE }, /* */ - { _DSCP_AF31, MBUF_SC_BE }, /* RFC 2597 Multimedia Streaming */ - { 27, MBUF_SC_BE }, /* */ - { _DSCP_AF32, MBUF_SC_BE }, /* RFC 2597 Multimedia Streaming */ - { 29, MBUF_SC_BE }, /* */ - { _DSCP_AF33, MBUF_SC_BE }, /* RFC 2597 Multimedia Streaming */ - { 31, MBUF_SC_BE }, /* */ - - { _DSCP_CS4, MBUF_SC_VI }, /* RFC 2474 Real-Time Interactive */ - { 33, MBUF_SC_VI }, /* */ - { _DSCP_AF41, MBUF_SC_VI }, /* RFC 2597 Multimedia Conferencing */ - { 35, MBUF_SC_VI }, /* */ - { _DSCP_AF42, MBUF_SC_VI }, /* RFC 2597 Multimedia Conferencing */ - { 37, MBUF_SC_VI }, /* */ - { _DSCP_AF43, MBUF_SC_VI }, /* RFC 2597 Multimedia Conferencing */ - { 39, MBUF_SC_VI }, /* */ - - { _DSCP_CS5, MBUF_SC_VI }, /* RFC 2474 Signaling */ - { 41, MBUF_SC_VI }, /* */ - { 42, MBUF_SC_VI }, /* */ - { 43, MBUF_SC_VI }, /* */ - { _DSCP_VA, MBUF_SC_VI }, /* RFC 5865 VOICE-ADMIT */ - { 45, MBUF_SC_VI }, /* */ - { _DSCP_EF, MBUF_SC_VI }, /* RFC 3246 Telephony */ - { 47, MBUF_SC_VI }, /* */ - - { _DSCP_CS6, MBUF_SC_VO }, /* Wi-Fi WMM Certification: Chariot */ - { 49, MBUF_SC_VO }, /* */ - { 50, MBUF_SC_VO }, /* */ - { 51, MBUF_SC_VO }, /* */ - { 52, MBUF_SC_VO }, /* Wi-Fi WMM Certification: Sigma */ - { 53, MBUF_SC_VO }, /* */ - { 54, MBUF_SC_VO }, /* */ - { 55, MBUF_SC_VO }, /* */ - - { _DSCP_CS7, MBUF_SC_VO }, /* Wi-Fi WMM Certification: Chariot */ - { 57, MBUF_SC_VO }, /* */ - { 58, MBUF_SC_VO }, /* */ - { 59, MBUF_SC_VO }, /* */ - { 60, MBUF_SC_VO }, /* */ - { 61, MBUF_SC_VO }, /* */ - { 62, MBUF_SC_VO }, /* */ - { 63, MBUF_SC_VO }, /* */ - - { 255, MBUF_SC_UNSPEC } /* invalid DSCP to mark last entry */ + { _DSCP_DF, MBUF_SC_BE }, /* RFC 2474 Standard */ + { 1, MBUF_SC_BE }, /* */ + { 2, MBUF_SC_BE }, /* */ + { 3, MBUF_SC_BE }, /* */ + { 4, MBUF_SC_BE }, /* */ + { 5, MBUF_SC_BE }, /* */ + { 6, MBUF_SC_BE }, /* */ + { 7, MBUF_SC_BE }, /* */ + + { _DSCP_CS1, MBUF_SC_BK }, /* RFC 3662 Low-Priority Data */ + { 9, MBUF_SC_BK }, /* */ + { _DSCP_AF11, MBUF_SC_BK }, /* RFC 2597 High-Throughput Data */ + { 11, MBUF_SC_BK }, /* */ + { _DSCP_AF12, MBUF_SC_BK }, /* RFC 2597 High-Throughput Data */ + { 13, MBUF_SC_BK }, /* */ + { _DSCP_AF13, MBUF_SC_BK }, /* RFC 2597 High-Throughput Data */ + { 15, MBUF_SC_BK }, /* */ + + { _DSCP_CS2, MBUF_SC_BK }, /* RFC 4594 OAM */ + { 17, MBUF_SC_BK }, /* */ + { _DSCP_AF21, MBUF_SC_BK }, /* RFC 2597 Low-Latency Data */ + { 19, MBUF_SC_BK }, /* */ + { _DSCP_AF22, MBUF_SC_BK }, /* RFC 2597 Low-Latency Data */ + { 21, MBUF_SC_BK }, /* */ + { _DSCP_AF23, MBUF_SC_BK }, /* RFC 2597 Low-Latency Data */ + { 23, MBUF_SC_BK }, /* */ + + { _DSCP_CS3, MBUF_SC_BE }, /* RFC 2474 Broadcast Video */ + { 25, MBUF_SC_BE }, /* */ + { _DSCP_AF31, MBUF_SC_BE }, /* RFC 2597 Multimedia Streaming */ + { 27, MBUF_SC_BE }, /* */ + { _DSCP_AF32, MBUF_SC_BE }, /* RFC 2597 Multimedia Streaming */ + { 29, MBUF_SC_BE }, /* */ + { _DSCP_AF33, MBUF_SC_BE }, /* RFC 2597 Multimedia Streaming */ + { 31, MBUF_SC_BE }, /* */ + + { _DSCP_CS4, MBUF_SC_VI }, /* RFC 2474 Real-Time Interactive */ + { 33, MBUF_SC_VI }, /* */ + { _DSCP_AF41, MBUF_SC_VI }, /* RFC 2597 Multimedia Conferencing */ + { 35, MBUF_SC_VI }, /* */ + { _DSCP_AF42, MBUF_SC_VI }, /* RFC 2597 Multimedia Conferencing */ + { 37, MBUF_SC_VI }, /* */ + { _DSCP_AF43, MBUF_SC_VI }, /* RFC 2597 Multimedia Conferencing */ + { 39, MBUF_SC_VI }, /* */ + + { _DSCP_CS5, MBUF_SC_VI }, /* RFC 2474 Signaling */ + { 41, MBUF_SC_VI }, /* */ + { 42, MBUF_SC_VI }, /* */ + { 43, MBUF_SC_VI }, /* */ + { _DSCP_VA, MBUF_SC_VI }, /* RFC 5865 VOICE-ADMIT */ + { 45, MBUF_SC_VI }, /* */ + { _DSCP_EF, MBUF_SC_VI }, /* RFC 3246 Telephony */ + { 47, MBUF_SC_VI }, /* */ + + { _DSCP_CS6, MBUF_SC_VO }, /* Wi-Fi WMM Certification: Chariot */ + { 49, MBUF_SC_VO }, /* */ + { 50, MBUF_SC_VO }, /* */ + { 51, MBUF_SC_VO }, /* */ + { 52, MBUF_SC_VO }, /* Wi-Fi WMM Certification: Sigma */ + { 53, MBUF_SC_VO }, /* */ + { 54, MBUF_SC_VO }, /* */ + { 55, MBUF_SC_VO }, /* */ + + { _DSCP_CS7, MBUF_SC_VO }, /* Wi-Fi WMM Certification: Chariot */ + { 57, MBUF_SC_VO }, /* */ + { 58, MBUF_SC_VO }, /* */ + { 59, MBUF_SC_VO }, /* */ + { 60, MBUF_SC_VO }, /* */ + { 61, MBUF_SC_VO }, /* */ + { 62, MBUF_SC_VO }, /* */ + { 63, MBUF_SC_VO }, /* */ + + { 255, MBUF_SC_UNSPEC } /* invalid DSCP to mark last entry */ }; mbuf_svc_class_t wifi_dscp_to_msc_array[DSCP_ARRAY_SIZE]; @@ -256,7 +256,7 @@ mbuf_svc_class_t wifi_dscp_to_msc_array[DSCP_ARRAY_SIZE]; * seconds, the background connections can switch to foreground TCP * congestion control. */ -#define TCP_BG_SWITCH_TIME 2 /* seconds */ +#define TCP_BG_SWITCH_TIME 2 /* seconds */ #if (DEVELOPMENT || DEBUG) @@ -266,11 +266,11 @@ static TAILQ_HEAD(, tclass_for_proc) tfp_head = TAILQ_HEAD_INITIALIZER(tfp_head); struct tclass_for_proc { - TAILQ_ENTRY(tclass_for_proc) tfp_link; - int tfp_class; - pid_t tfp_pid; - char tfp_pname[(2 * MAXCOMLEN) + 1]; - u_int32_t tfp_qos_mode; + TAILQ_ENTRY(tclass_for_proc) tfp_link; + int tfp_class; + pid_t tfp_pid; + char tfp_pname[(2 * MAXCOMLEN) + 1]; + u_int32_t tfp_qos_mode; }; static int get_pid_tclass(struct so_tcdbg *); @@ -291,10 +291,11 @@ find_tfp_by_pid(pid_t pid) struct tclass_for_proc *tfp; TAILQ_FOREACH(tfp, &tfp_head, tfp_link) { - if (tfp->tfp_pid == pid) + if (tfp->tfp_pid == pid) { break; + } } - return (tfp); + return tfp; } /* @@ -307,17 +308,18 @@ find_tfp_by_pname(const char *pname) TAILQ_FOREACH(tfp, &tfp_head, tfp_link) { if (strncmp(pname, tfp->tfp_pname, - sizeof (tfp->tfp_pname)) == 0) + sizeof(tfp->tfp_pname)) == 0) { break; + } } - return (tfp); + return tfp; } __private_extern__ void set_tclass_for_curr_proc(struct socket *so) { struct tclass_for_proc *tfp = NULL; - proc_t p = current_proc(); /* Not ref counted */ + proc_t p = current_proc(); /* Not ref counted */ pid_t pid = proc_pid(p); char *pname = proc_best_name(p); @@ -326,14 +328,16 @@ set_tclass_for_curr_proc(struct socket *so) TAILQ_FOREACH(tfp, &tfp_head, tfp_link) { if ((tfp->tfp_pid == pid) || (tfp->tfp_pid == -1 && strncmp(pname, tfp->tfp_pname, - sizeof (tfp->tfp_pname)) == 0)) { - if (tfp->tfp_class != SO_TC_UNSPEC) + sizeof(tfp->tfp_pname)) == 0)) { + if (tfp->tfp_class != SO_TC_UNSPEC) { so->so_traffic_class = tfp->tfp_class; + } - if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_ENABLE) + if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_ENABLE) { so->so_flags1 |= SOF1_QOSMARKING_ALLOWED; - else if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_DISABLE) + } else if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_DISABLE) { so->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } break; } } @@ -355,8 +359,9 @@ purge_tclass_for_proc(void) TAILQ_FOREACH_SAFE(tfp, &tfp_head, tfp_link, tvar) { proc_t p; - if (tfp->tfp_pid == -1) + if (tfp->tfp_pid == -1) { continue; + } if ((p = proc_find(tfp->tfp_pid)) == NULL) { tfp_count--; TAILQ_REMOVE(&tfp_head, tfp, tfp_link); @@ -369,7 +374,7 @@ purge_tclass_for_proc(void) lck_mtx_unlock(tclass_lock); - return (error); + return error; } /* @@ -379,8 +384,9 @@ purge_tclass_for_proc(void) static void free_tclass_for_proc(struct tclass_for_proc *tfp) { - if (tfp == NULL) + if (tfp == NULL) { return; + } tfp_count--; TAILQ_REMOVE(&tfp_head, tfp, tfp_link); _FREE(tfp, M_TEMP); @@ -403,8 +409,7 @@ flush_tclass_for_proc(void) lck_mtx_unlock(tclass_lock); - return (error); - + return error; } /* @@ -415,12 +420,14 @@ alloc_tclass_for_proc(pid_t pid, const char *pname) { struct tclass_for_proc *tfp; - if (pid == -1 && pname == NULL) - return (NULL); + if (pid == -1 && pname == NULL) { + return NULL; + } - tfp = _MALLOC(sizeof (struct tclass_for_proc), M_TEMP, M_NOWAIT|M_ZERO); - if (tfp == NULL) - return (NULL); + tfp = _MALLOC(sizeof(struct tclass_for_proc), M_TEMP, M_NOWAIT | M_ZERO); + if (tfp == NULL) { + return NULL; + } tfp->tfp_pid = pid; /* @@ -430,13 +437,13 @@ alloc_tclass_for_proc(pid_t pid, const char *pname) if (pid != -1) { TAILQ_INSERT_HEAD(&tfp_head, tfp, tfp_link); } else { - strlcpy(tfp->tfp_pname, pname, sizeof (tfp->tfp_pname)); + strlcpy(tfp->tfp_pname, pname, sizeof(tfp->tfp_pname)); TAILQ_INSERT_TAIL(&tfp_head, tfp, tfp_link); } tfp_count++; - return (tfp); + return tfp; } /* @@ -488,27 +495,31 @@ set_pid_tclass(struct so_tcdbg *so_tcdbg) fp = fdp->fd_ofiles[i]; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) + FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { continue; + } so = (struct socket *)fp->f_fglob->fg_data; - if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) + if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { continue; + } socket_lock(so, 1); - if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_ENABLE) + if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_ENABLE) { so->so_flags1 |= SOF1_QOSMARKING_ALLOWED; - else if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_DISABLE) + } else if (tfp->tfp_qos_mode == QOS_MODE_MARKING_POLICY_DISABLE) { so->so_flags1 &= ~SOF1_QOSMARKING_ALLOWED; + } socket_unlock(so, 1); - if (netsvctype != _NET_SERVICE_TYPE_UNSPEC) + if (netsvctype != _NET_SERVICE_TYPE_UNSPEC) { error = sock_setsockopt(so, SOL_SOCKET, SO_NET_SERVICE_TYPE, &netsvctype, sizeof(int)); - if (tclass != SO_TC_UNSPEC) + } + if (tclass != SO_TC_UNSPEC) { error = sock_setsockopt(so, SOL_SOCKET, SO_TRAFFIC_CLASS, &tclass, sizeof(int)); - + } } proc_fdunlock(p); @@ -516,10 +527,11 @@ set_pid_tclass(struct so_tcdbg *so_tcdbg) error = 0; done: - if (p != NULL) + if (p != NULL) { proc_rele(p); + } - return (error); + return error; } int @@ -547,7 +559,7 @@ set_pname_tclass(struct so_tcdbg *so_tcdbg) error = 0; done: - return (error); + return error; } static int @@ -575,12 +587,13 @@ flush_pid_tclass(struct so_tcdbg *so_tcdbg) fp = fdp->fd_ofiles[i]; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 || - FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) + FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) { continue; + } so = (struct socket *)fp->f_fglob->fg_data; error = sock_setsockopt(so, SOL_SOCKET, SO_FLUSH, &tclass, - sizeof (tclass)); + sizeof(tclass)); if (error != 0) { printf("%s: setsockopt(SO_FLUSH) (so=0x%llx, fd=%d, " "tclass=%d) failed %d\n", __func__, @@ -593,10 +606,11 @@ flush_pid_tclass(struct so_tcdbg *so_tcdbg) error = 0; done: - if (p != PROC_NULL) + if (p != PROC_NULL) { proc_rele(p); + } - return (error); + return error; } int @@ -626,10 +640,11 @@ get_pid_tclass(struct so_tcdbg *so_tcdbg) } lck_mtx_unlock(tclass_lock); done: - if (p != NULL) + if (p != NULL) { proc_rele(p); + } - return (error); + return error; } int @@ -651,7 +666,7 @@ get_pname_tclass(struct so_tcdbg *so_tcdbg) } lck_mtx_unlock(tclass_lock); - return (error); + return error; } static int @@ -663,10 +678,11 @@ delete_tclass_for_pid_pname(struct so_tcdbg *so_tcdbg) lck_mtx_lock(tclass_lock); - if (pid != -1) + if (pid != -1) { tfp = find_tfp_by_pid(pid); - else + } else { tfp = find_tfp_by_pname(so_tcdbg->so_tcdbg_pname); + } if (tfp != NULL) { free_tclass_for_proc(tfp); @@ -675,7 +691,7 @@ delete_tclass_for_pid_pname(struct so_tcdbg *so_tcdbg) lck_mtx_unlock(tclass_lock); - return (error); + return error; } /* @@ -686,44 +702,45 @@ so_set_tcdbg(struct socket *so, struct so_tcdbg *so_tcdbg) { int error = 0; - if ((so->so_state & SS_PRIV) == 0) - return (EPERM); + if ((so->so_state & SS_PRIV) == 0) { + return EPERM; + } socket_unlock(so, 0); switch (so_tcdbg->so_tcdbg_cmd) { - case SO_TCDBG_PID: - error = set_pid_tclass(so_tcdbg); - break; + case SO_TCDBG_PID: + error = set_pid_tclass(so_tcdbg); + break; - case SO_TCDBG_PNAME: - error = set_pname_tclass(so_tcdbg); - break; + case SO_TCDBG_PNAME: + error = set_pname_tclass(so_tcdbg); + break; - case SO_TCDBG_PURGE: - error = purge_tclass_for_proc(); - break; + case SO_TCDBG_PURGE: + error = purge_tclass_for_proc(); + break; - case SO_TCDBG_FLUSH: - error = flush_tclass_for_proc(); - break; + case SO_TCDBG_FLUSH: + error = flush_tclass_for_proc(); + break; - case SO_TCDBG_DELETE: - error = delete_tclass_for_pid_pname(so_tcdbg); - break; + case SO_TCDBG_DELETE: + error = delete_tclass_for_pid_pname(so_tcdbg); + break; - case SO_TCDBG_TCFLUSH_PID: - error = flush_pid_tclass(so_tcdbg); - break; + case SO_TCDBG_TCFLUSH_PID: + error = flush_pid_tclass(so_tcdbg); + break; - default: - error = EINVAL; - break; + default: + error = EINVAL; + break; } socket_lock(so, 0); - return (error); + return error; } /* @@ -737,78 +754,80 @@ sogetopt_tcdbg(struct socket *so, struct sockopt *sopt) void *buf = NULL; size_t len = sopt->sopt_valsize; - error = sooptcopyin(sopt, &so_tcdbg, sizeof (struct so_tcdbg), - sizeof (struct so_tcdbg)); - if (error != 0) - return (error); + error = sooptcopyin(sopt, &so_tcdbg, sizeof(struct so_tcdbg), + sizeof(struct so_tcdbg)); + if (error != 0) { + return error; + } sopt->sopt_valsize = len; socket_unlock(so, 0); switch (so_tcdbg.so_tcdbg_cmd) { - case SO_TCDBG_PID: - error = get_pid_tclass(&so_tcdbg); - break; + case SO_TCDBG_PID: + error = get_pid_tclass(&so_tcdbg); + break; - case SO_TCDBG_PNAME: - error = get_pname_tclass(&so_tcdbg); - break; + case SO_TCDBG_PNAME: + error = get_pname_tclass(&so_tcdbg); + break; - case SO_TCDBG_COUNT: - lck_mtx_lock(tclass_lock); - so_tcdbg.so_tcdbg_count = tfp_count; - lck_mtx_unlock(tclass_lock); - break; + case SO_TCDBG_COUNT: + lck_mtx_lock(tclass_lock); + so_tcdbg.so_tcdbg_count = tfp_count; + lck_mtx_unlock(tclass_lock); + break; - case SO_TCDBG_LIST: { - struct tclass_for_proc *tfp; - int n, alloc_count; - struct so_tcdbg *ptr; + case SO_TCDBG_LIST: { + struct tclass_for_proc *tfp; + int n, alloc_count; + struct so_tcdbg *ptr; - lck_mtx_lock(tclass_lock); - if ((alloc_count = tfp_count) == 0) { - lck_mtx_unlock(tclass_lock); - error = EINVAL; - break; - } - len = alloc_count * sizeof (struct so_tcdbg); + lck_mtx_lock(tclass_lock); + if ((alloc_count = tfp_count) == 0) { lck_mtx_unlock(tclass_lock); + error = EINVAL; + break; + } + len = alloc_count * sizeof(struct so_tcdbg); + lck_mtx_unlock(tclass_lock); - buf = _MALLOC(len, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) { - error = ENOBUFS; + buf = _MALLOC(len, M_TEMP, M_WAITOK | M_ZERO); + if (buf == NULL) { + error = ENOBUFS; + break; + } + + lck_mtx_lock(tclass_lock); + n = 0; + ptr = (struct so_tcdbg *)buf; + TAILQ_FOREACH(tfp, &tfp_head, tfp_link) { + if (++n > alloc_count) { break; } - - lck_mtx_lock(tclass_lock); - n = 0; - ptr = (struct so_tcdbg *)buf; - TAILQ_FOREACH(tfp, &tfp_head, tfp_link) { - if (++n > alloc_count) - break; - if (tfp->tfp_pid != -1) { - ptr->so_tcdbg_cmd = SO_TCDBG_PID; - ptr->so_tcdbg_pid = tfp->tfp_pid; - } else { - ptr->so_tcdbg_cmd = SO_TCDBG_PNAME; - ptr->so_tcdbg_pid = -1; - strlcpy(ptr->so_tcdbg_pname, - tfp->tfp_pname, - sizeof (ptr->so_tcdbg_pname)); - } - ptr->so_tcdbg_tclass = tfp->tfp_class; - ptr->so_tcbbg_qos_mode = tfp->tfp_qos_mode; - ptr++; + if (tfp->tfp_pid != -1) { + ptr->so_tcdbg_cmd = SO_TCDBG_PID; + ptr->so_tcdbg_pid = tfp->tfp_pid; + } else { + ptr->so_tcdbg_cmd = SO_TCDBG_PNAME; + ptr->so_tcdbg_pid = -1; + strlcpy(ptr->so_tcdbg_pname, + tfp->tfp_pname, + sizeof(ptr->so_tcdbg_pname)); } + ptr->so_tcdbg_tclass = tfp->tfp_class; + ptr->so_tcbbg_qos_mode = tfp->tfp_qos_mode; + ptr++; + } - lck_mtx_unlock(tclass_lock); - } - break; + lck_mtx_unlock(tclass_lock); + } + break; - default: - error = EINVAL; - break; + default: + error = EINVAL; + break; } socket_lock(so, 0); @@ -816,13 +835,13 @@ sogetopt_tcdbg(struct socket *so, struct sockopt *sopt) if (error == 0) { if (buf == NULL) { error = sooptcopyout(sopt, &so_tcdbg, - sizeof (struct so_tcdbg)); + sizeof(struct so_tcdbg)); } else { error = sooptcopyout(sopt, buf, len); _FREE(buf, M_TEMP); } } - return (error); + return error; } #endif /* (DEVELOPMENT || DEBUG) */ @@ -834,36 +853,39 @@ so_get_netsvc_marking_level(struct socket *so) struct ifnet *ifp = NULL; switch (SOCK_DOM(so)) { - case PF_INET: { - struct inpcb *inp = sotoinpcb(so); + case PF_INET: { + struct inpcb *inp = sotoinpcb(so); - if (inp != NULL) - ifp = inp->inp_last_outifp; - break; + if (inp != NULL) { + ifp = inp->inp_last_outifp; } - case PF_INET6: { - struct in6pcb *in6p = sotoin6pcb(so); + break; + } + case PF_INET6: { + struct in6pcb *in6p = sotoin6pcb(so); - if (in6p != NULL) - ifp = in6p->in6p_last_outifp; - break; + if (in6p != NULL) { + ifp = in6p->in6p_last_outifp; } - default: - break; + break; + } + default: + break; } if (ifp != NULL) { if ((ifp->if_eflags & (IFEF_QOSMARKING_ENABLED | IFEF_QOSMARKING_CAPABLE)) == (IFEF_QOSMARKING_ENABLED | IFEF_QOSMARKING_CAPABLE)) { - if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { marking_level = NETSVC_MRKNG_LVL_L3L2_ALL; - else + } else { marking_level = NETSVC_MRKNG_LVL_L3L2_BK; + } } else { marking_level = NETSVC_MRKNG_LVL_L2; } } - return (marking_level); + return marking_level; } __private_extern__ int @@ -885,8 +907,9 @@ so_set_traffic_class(struct socket *so, int optval) optval = SO_TC_VO; break; default: - if (!SO_VALID_TC(optval)) + if (!SO_VALID_TC(optval)) { error = EINVAL; + } break; } @@ -898,8 +921,9 @@ so_set_traffic_class(struct socket *so, int optval) if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) && - SOCK_TYPE(so) == SOCK_STREAM) + SOCK_TYPE(so) == SOCK_STREAM) { set_tcp_stream_priority(so); + } if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6) && @@ -909,8 +933,9 @@ so_set_traffic_class(struct socket *so, int optval) * If the app switches from BK_SYS to something * else, resume the socket if it was suspended. */ - if (oldval == SO_TC_BK_SYS) + if (oldval == SO_TC_BK_SYS) { inp_reset_fc_state(so->so_pcb); + } SOTHROTTLELOG("throttle[%d]: so 0x%llx " "[%d,%d] opportunistic %s\n", so->last_pid, @@ -920,7 +945,7 @@ so_set_traffic_class(struct socket *so, int optval) } } } - return (error); + return error; } __private_extern__ int @@ -929,17 +954,19 @@ so_set_net_service_type(struct socket *so, int netsvctype) int sotc; int error; - if (!IS_VALID_NET_SERVICE_TYPE(netsvctype)) - return (EINVAL); + if (!IS_VALID_NET_SERVICE_TYPE(netsvctype)) { + return EINVAL; + } sotc = sotc_by_netservicetype[netsvctype]; error = so_set_traffic_class(so, sotc); - if (error != 0) - return (error); + if (error != 0) { + return error; + } so->so_netsvctype = netsvctype; so->so_flags1 |= SOF1_TC_NET_SERV_TYPE; - return (0); + return 0; } __private_extern__ void @@ -948,11 +975,13 @@ so_set_default_traffic_class(struct socket *so) so->so_traffic_class = SO_TC_BE; if ((SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6)) { - if (net_qos_policy_restricted == 0) + if (net_qos_policy_restricted == 0) { so->so_flags1 |= SOF1_QOSMARKING_ALLOWED; + } #if (DEVELOPMENT || DEBUG) - if (tfp_count > 0) + if (tfp_count > 0) { set_tclass_for_curr_proc(so); + } #endif /* (DEVELOPMENT || DEBUG) */ } } @@ -960,14 +989,14 @@ so_set_default_traffic_class(struct socket *so) __private_extern__ int so_set_opportunistic(struct socket *so, int optval) { - return (so_set_traffic_class(so, (optval == 0) ? - SO_TC_BE : SO_TC_BK_SYS)); + return so_set_traffic_class(so, (optval == 0) ? + SO_TC_BE : SO_TC_BK_SYS); } __private_extern__ int so_get_opportunistic(struct socket *so) { - return (so->so_traffic_class == SO_TC_BK_SYS); + return so->so_traffic_class == SO_TC_BK_SYS; } __private_extern__ int @@ -978,47 +1007,48 @@ so_tc_from_control(struct mbuf *control, int *out_netsvctype) *out_netsvctype = _NET_SERVICE_TYPE_UNSPEC; - for (cm = M_FIRST_CMSGHDR(control); cm != NULL; + for (cm = M_FIRST_CMSGHDR(control); + is_cmsg_valid(control, cm); cm = M_NXT_CMSGHDR(control, cm)) { - int val; + int val; - if (cm->cmsg_len < sizeof (struct cmsghdr)) - break; if (cm->cmsg_level != SOL_SOCKET || - cm->cmsg_len != CMSG_LEN(sizeof(int))) - continue; + cm->cmsg_len != CMSG_LEN(sizeof(int))) { + continue; + } val = *(int *)(void *)CMSG_DATA(cm); /* * The first valid option wins */ switch (cm->cmsg_type) { - case SO_TRAFFIC_CLASS: - if (SO_VALID_TC(val)) { - sotc = val; - return (sotc); - /* NOT REACHED */ - } else if (val < SO_TC_NET_SERVICE_OFFSET) { - break; - } - /* - * Handle the case SO_NET_SERVICE_TYPE values are - * passed using SO_TRAFFIC_CLASS - */ - val = val - SO_TC_NET_SERVICE_OFFSET; - /* FALLTHROUGH */ - case SO_NET_SERVICE_TYPE: - if (!IS_VALID_NET_SERVICE_TYPE(val)) - break; - *out_netsvctype = val; - sotc = sotc_by_netservicetype[val]; - return (sotc); + case SO_TRAFFIC_CLASS: + if (SO_VALID_TC(val)) { + sotc = val; + return sotc; /* NOT REACHED */ - default: + } else if (val < SO_TC_NET_SERVICE_OFFSET) { break; + } + /* + * Handle the case SO_NET_SERVICE_TYPE values are + * passed using SO_TRAFFIC_CLASS + */ + val = val - SO_TC_NET_SERVICE_OFFSET; + /* FALLTHROUGH */ + case SO_NET_SERVICE_TYPE: + if (!IS_VALID_NET_SERVICE_TYPE(val)) { + break; + } + *out_netsvctype = val; + sotc = sotc_by_netservicetype[val]; + return sotc; + /* NOT REACHED */ + default: + break; } } - return (sotc); + return sotc; } __private_extern__ void @@ -1026,8 +1056,9 @@ so_recv_data_stat(struct socket *so, struct mbuf *m, size_t off) { uint32_t mtc = m_get_traffic_class(m); - if (mtc >= SO_TC_STATS_MAX) + if (mtc >= SO_TC_STATS_MAX) { mtc = MBUF_TC_BE; + } so->so_tc_stats[mtc].rxpackets += 1; so->so_tc_stats[mtc].rxbytes += @@ -1038,8 +1069,9 @@ __private_extern__ void so_inc_recv_data_stat(struct socket *so, size_t pkts, size_t bytes, uint32_t mtc) { - if (mtc >= SO_TC_STATS_MAX) + if (mtc >= SO_TC_STATS_MAX) { mtc = MBUF_TC_BE; + } so->so_tc_stats[mtc].rxpackets += pkts; so->so_tc_stats[mtc].rxbytes += bytes; @@ -1049,10 +1081,10 @@ static inline int so_throttle_best_effort(struct socket *so, struct ifnet *ifp) { u_int32_t uptime = net_uptime(); - return (soissrcbesteffort(so) && - net_io_policy_throttle_best_effort == 1 && - ifp->if_rt_sendts > 0 && - (int)(uptime - ifp->if_rt_sendts) <= TCP_BG_SWITCH_TIME); + return soissrcbesteffort(so) && + net_io_policy_throttle_best_effort == 1 && + ifp->if_rt_sendts > 0 && + (int)(uptime - ifp->if_rt_sendts) <= TCP_BG_SWITCH_TIME; } __private_extern__ void @@ -1072,8 +1104,9 @@ set_tcp_stream_priority(struct socket *so) SOCK_CHECK_PROTO(so, IPPROTO_TCP)); /* Return if the socket is in a terminal state */ - if (inp->inp_state == INPCB_STATE_DEAD) + if (inp->inp_state == INPCB_STATE_DEAD) { return; + } outifp = inp->inp_last_outifp; uptime = net_uptime(); @@ -1085,8 +1118,9 @@ set_tcp_stream_priority(struct socket *so) * background. The variable sotcdb which can be set with sysctl * is used to disable these settings for testing. */ - if (outifp == NULL || (outifp->if_flags & IFF_LOOPBACK)) + if (outifp == NULL || (outifp->if_flags & IFF_LOOPBACK)) { is_local = true; + } /* Check if there has been recent foreground activity */ if (outifp != NULL) { @@ -1097,8 +1131,9 @@ set_tcp_stream_priority(struct socket *so) * activity. */ if (soissrcbackground(so) && outifp->if_fg_sendts > 0 && - (int)(uptime - outifp->if_fg_sendts) <= TCP_BG_SWITCH_TIME) + (int)(uptime - outifp->if_fg_sendts) <= TCP_BG_SWITCH_TIME) { fg_active = true; + } /* * The traffic source is best-effort -- check if @@ -1108,8 +1143,9 @@ set_tcp_stream_priority(struct socket *so) * algorithms that respond to increased latency * on best-effort traffic. */ - if (so_throttle_best_effort(so, outifp)) + if (so_throttle_best_effort(so, outifp)) { fg_active = true; + } } /* @@ -1133,11 +1169,13 @@ set_tcp_stream_priority(struct socket *so) */ if ((sotcdb & SOTCDB_NO_SENDTCPBG) != 0 || is_local || !IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class)) { - if (old_cc == TCP_CC_ALGO_BACKGROUND_INDEX) + if (old_cc == TCP_CC_ALGO_BACKGROUND_INDEX) { tcp_set_foreground_cc(so); + } } else { - if (old_cc != TCP_CC_ALGO_BACKGROUND_INDEX) + if (old_cc != TCP_CC_ALGO_BACKGROUND_INDEX) { tcp_set_background_cc(so); + } } /* Set receive side background flags */ @@ -1149,8 +1187,9 @@ set_tcp_stream_priority(struct socket *so) } } else { tcp_clear_recv_bg(so); - if (old_cc == TCP_CC_ALGO_BACKGROUND_INDEX) + if (old_cc == TCP_CC_ALGO_BACKGROUND_INDEX) { tcp_set_foreground_cc(so); + } } if (old_cc != tp->tcp_cc_index || recvbg != IS_TCP_RECV_BG(so)) { @@ -1173,11 +1212,12 @@ __private_extern__ void set_packet_service_class(struct mbuf *m, struct socket *so, int sotc, u_int32_t flags) { - mbuf_svc_class_t msc = MBUF_SC_BE; /* Best effort by default */ + mbuf_svc_class_t msc = MBUF_SC_BE; /* Best effort by default */ struct inpcb *inp = sotoinpcb(so); /* in6pcb and inpcb are the same */ - if (!(m->m_flags & M_PKTHDR)) + if (!(m->m_flags & M_PKTHDR)) { return; + } /* * Here is the precedence: @@ -1196,23 +1236,28 @@ set_packet_service_class(struct mbuf *m, struct socket *so, * If TRAFFIC_MGT_SO_BACKGROUND is set or policy to throttle * best effort is set, depress the priority. */ - if (!IS_MBUF_SC_BACKGROUND(msc) && soisthrottled(so)) + if (!IS_MBUF_SC_BACKGROUND(msc) && soisthrottled(so)) { msc = MBUF_SC_BK; + } if (IS_MBUF_SC_BESTEFFORT(msc) && inp->inp_last_outifp != NULL && - so_throttle_best_effort(so, inp->inp_last_outifp)) + so_throttle_best_effort(so, inp->inp_last_outifp)) { msc = MBUF_SC_BK; + } - if (soissrcbackground(so)) + if (soissrcbackground(so)) { m->m_pkthdr.pkt_flags |= PKTF_SO_BACKGROUND; + } - if (soissrcrealtime(so) || IS_MBUF_SC_REALTIME(msc)) + if (soissrcrealtime(so) || IS_MBUF_SC_REALTIME(msc)) { m->m_pkthdr.pkt_flags |= PKTF_SO_REALTIME; + } /* * Set the traffic class in the mbuf packet header svc field */ - if (sotcdb & SOTCDB_NO_MTC) + if (sotcdb & SOTCDB_NO_MTC) { goto no_mbtc; + } /* * Elevate service class if the packet is a pure TCP ACK. @@ -1221,8 +1266,9 @@ set_packet_service_class(struct mbuf *m, struct socket *so, * transmit-start model. */ if (!IS_MBUF_SC_BACKGROUND(msc) && - (flags & (PKT_SCF_TCP_ACK | PKT_SCF_TCP_SYN)) != 0) + (flags & (PKT_SCF_TCP_ACK | PKT_SCF_TCP_SYN)) != 0) { msc = MBUF_SC_CTL; + } (void) m_set_service_class(m, msc); @@ -1231,17 +1277,19 @@ set_packet_service_class(struct mbuf *m, struct socket *so, * or clear it. */ if (!(sotcdb & SOTCDB_NO_PRIVILEGED) && soisprivilegedtraffic(so) && - msc != MBUF_SC_UNSPEC) + msc != MBUF_SC_UNSPEC) { m->m_pkthdr.pkt_flags |= PKTF_PRIO_PRIVILEGED; - else + } else { m->m_pkthdr.pkt_flags &= ~PKTF_PRIO_PRIVILEGED; + } no_mbtc: /* * For TCP with background traffic class switch CC algo based on sysctl */ - if (so->so_type == SOCK_STREAM) + if (so->so_type == SOCK_STREAM) { set_tcp_stream_priority(so); + } so_tc_update_stats(m, so, msc); } @@ -1322,7 +1370,7 @@ so_tc2msc(int tc) break; } - return (msc); + return msc; } __private_extern__ int @@ -1330,30 +1378,30 @@ so_svc2tc(mbuf_svc_class_t svc) { switch (svc) { case MBUF_SC_BK_SYS: - return (SO_TC_BK_SYS); + return SO_TC_BK_SYS; case MBUF_SC_BK: - return (SO_TC_BK); + return SO_TC_BK; case MBUF_SC_BE: - return (SO_TC_BE); + return SO_TC_BE; case MBUF_SC_RD: - return (SO_TC_RD); + return SO_TC_RD; case MBUF_SC_OAM: - return (SO_TC_OAM); + return SO_TC_OAM; case MBUF_SC_AV: - return (SO_TC_AV); + return SO_TC_AV; case MBUF_SC_RV: - return (SO_TC_RV); + return SO_TC_RV; case MBUF_SC_VI: - return (SO_TC_VI); + return SO_TC_VI; case MBUF_SC_SIG: - return (SO_TC_NETSVC_SIG); + return SO_TC_NETSVC_SIG; case MBUF_SC_VO: - return (SO_TC_VO); + return SO_TC_VO; case MBUF_SC_CTL: - return (SO_TC_CTL); + return SO_TC_CTL; case MBUF_SC_UNSPEC: default: - return (SO_TC_BE); + return SO_TC_BE; } } @@ -1375,9 +1423,9 @@ so_set_lro(struct socket *so, int optval) tp = intotcpcb(inp); if (tp && (tp->t_flagsext & TF_LRO_OFFLOADED)) { tcp_lro_remove_state(inp->inp_laddr, - inp->inp_faddr, - inp->inp_lport, - inp->inp_fport); + inp->inp_faddr, + inp->inp_lport, + inp->inp_fport); tp->t_flagsext &= ~TF_LRO_OFFLOADED; } } @@ -1389,40 +1437,40 @@ static size_t sotc_index(int sotc) { switch (sotc) { - case SO_TC_BK_SYS: - return (SOTCIX_BK_SYS); - case _SO_TC_BK: - case SO_TC_BK: - return (SOTCIX_BK); - - case SO_TC_BE: - return (SOTCIX_BE); - case SO_TC_RD: - return (SOTCIX_RD); - case SO_TC_OAM: - return (SOTCIX_OAM); - - case SO_TC_AV: - return (SOTCIX_AV); - case SO_TC_RV: - return (SOTCIX_RV); - case _SO_TC_VI: - case SO_TC_VI: - return (SOTCIX_VI); + case SO_TC_BK_SYS: + return SOTCIX_BK_SYS; + case _SO_TC_BK: + case SO_TC_BK: + return SOTCIX_BK; - case _SO_TC_VO: - case SO_TC_VO: - return (SOTCIX_VO); - case SO_TC_CTL: - return (SOTCIX_CTL); + case SO_TC_BE: + return SOTCIX_BE; + case SO_TC_RD: + return SOTCIX_RD; + case SO_TC_OAM: + return SOTCIX_OAM; - default: - break; + case SO_TC_AV: + return SOTCIX_AV; + case SO_TC_RV: + return SOTCIX_RV; + case _SO_TC_VI: + case SO_TC_VI: + return SOTCIX_VI; + + case _SO_TC_VO: + case SO_TC_VO: + return SOTCIX_VO; + case SO_TC_CTL: + return SOTCIX_CTL; + + default: + break; } /* * Unknown traffic class value */ - return (SIZE_T_MAX); + return SIZE_T_MAX; } /* @@ -1439,17 +1487,20 @@ set_netsvctype_dscp_map(size_t in_count, /* * Do not accept more that max number of distinct DSCPs */ - if (in_count > _MAX_DSCP || netsvctype_dscp_map == NULL) - return (EINVAL); + if (in_count > _MAX_DSCP || netsvctype_dscp_map == NULL) { + return EINVAL; + } /* * Validate input parameters */ for (i = 0; i < in_count; i++) { - if (!IS_VALID_NET_SERVICE_TYPE(netsvctype_dscp_map[i].netsvctype)) - return (EINVAL); - if (netsvctype_dscp_map[i].dscp > _MAX_DSCP) - return (EINVAL); + if (!IS_VALID_NET_SERVICE_TYPE(netsvctype_dscp_map[i].netsvctype)) { + return EINVAL; + } + if (netsvctype_dscp_map[i].dscp > _MAX_DSCP) { + return EINVAL; + } } net_qos_dscp_map = &default_net_qos_dscp_map; @@ -1462,29 +1513,29 @@ set_netsvctype_dscp_map(size_t in_count, } for (netsvctype = 0; netsvctype < _NET_SERVICE_TYPE_COUNT; netsvctype++) { switch (netsvctype) { - case NET_SERVICE_TYPE_BE: - case NET_SERVICE_TYPE_BK: - case NET_SERVICE_TYPE_VI: - case NET_SERVICE_TYPE_VO: - case NET_SERVICE_TYPE_RV: - case NET_SERVICE_TYPE_AV: - case NET_SERVICE_TYPE_OAM: - case NET_SERVICE_TYPE_RD: { - size_t sotcix; - - sotcix = sotc_index(sotc_by_netservicetype[netsvctype]); - if (sotcix != SIZE_T_MAX) { - net_qos_dscp_map->sotc_to_dscp[sotcix] = - netsvctype_dscp_map[netsvctype].dscp; - } - break; + case NET_SERVICE_TYPE_BE: + case NET_SERVICE_TYPE_BK: + case NET_SERVICE_TYPE_VI: + case NET_SERVICE_TYPE_VO: + case NET_SERVICE_TYPE_RV: + case NET_SERVICE_TYPE_AV: + case NET_SERVICE_TYPE_OAM: + case NET_SERVICE_TYPE_RD: { + size_t sotcix; + + sotcix = sotc_index(sotc_by_netservicetype[netsvctype]); + if (sotcix != SIZE_T_MAX) { + net_qos_dscp_map->sotc_to_dscp[sotcix] = + netsvctype_dscp_map[netsvctype].dscp; } - case NET_SERVICE_TYPE_SIG: - /* Signaling does not have its own traffic class */ - break; - default: - /* We should not be here */ - ASSERT(0); + break; + } + case NET_SERVICE_TYPE_SIG: + /* Signaling does not have its own traffic class */ + break; + default: + /* We should not be here */ + ASSERT(0); } } /* Network control socket traffic class is always best effort */ @@ -1492,9 +1543,9 @@ set_netsvctype_dscp_map(size_t in_count, /* Backround socket traffic class DSCP same as backround system */ net_qos_dscp_map->sotc_to_dscp[SOTCIX_BK] = - net_qos_dscp_map->sotc_to_dscp[SOTCIX_BK_SYS]; + net_qos_dscp_map->sotc_to_dscp[SOTCIX_BK_SYS]; - return (0); + return 0; } /* @@ -1510,21 +1561,22 @@ get_netsvctype_dscp_map(size_t *out_count, /* * Do not accept more that max number of distinct DSCPs */ - if (out_count == NULL || netsvctype_dscp_map == NULL) - return (EINVAL); - if (*out_count > _MAX_DSCP) - return (EINVAL); + if (out_count == NULL || netsvctype_dscp_map == NULL) { + return EINVAL; + } + if (*out_count > _MAX_DSCP) { + return EINVAL; + } net_qos_dscp_map = &default_net_qos_dscp_map; for (i = 0; i < MIN(_NET_SERVICE_TYPE_COUNT, *out_count); i++) { netsvctype_dscp_map[i].netsvctype = i; netsvctype_dscp_map[i].dscp = net_qos_dscp_map->netsvctype_to_dscp[i]; - } *out_count = i; - return (0); + return 0; } void @@ -1536,7 +1588,7 @@ net_qos_map_init() * By default use the Fastlane DSCP mappngs */ error = set_netsvctype_dscp_map(_NET_SERVICE_TYPE_COUNT, - fastlane_netsvctype_dscp_map); + fastlane_netsvctype_dscp_map); ASSERT(error == 0); /* @@ -1564,21 +1616,25 @@ sysctl_default_netsvctype_to_dscp_map SYSCTL_HANDLER_ARGS } else if (req->oldlen > 0) { count = _NET_SERVICE_TYPE_COUNT; error = get_netsvctype_dscp_map(&count, netsvctype_dscp_map); - if (error != 0) + if (error != 0) { goto done; + } len = count * sizeof(struct netsvctype_dscp_map); error = SYSCTL_OUT(req, netsvctype_dscp_map, - MIN(len, req->oldlen)); - if (error != 0) + MIN(len, req->oldlen)); + if (error != 0) { goto done; + } } - if (req->newptr == USER_ADDR_NULL) + if (req->newptr == USER_ADDR_NULL) { goto done; + } error = proc_suser(current_proc()); - if (error != 0) + if (error != 0) { goto done; + } /* * Check input length @@ -1591,21 +1647,23 @@ sysctl_default_netsvctype_to_dscp_map SYSCTL_HANDLER_ARGS * Cap the number of entries to copy from input buffer */ error = SYSCTL_IN(req, netsvctype_dscp_map, req->newlen); - if (error != 0) + if (error != 0) { goto done; + } count = req->newlen / sizeof(struct netsvctype_dscp_map); error = set_netsvctype_dscp_map(count, netsvctype_dscp_map); done: - return (error); + return error; } __private_extern__ errno_t set_packet_qos(struct mbuf *m, struct ifnet *ifp, boolean_t qos_allowed, int sotc, int netsvctype, u_int8_t *dscp_inout) { - if (ifp == NULL || dscp_inout == NULL) - return (EINVAL); + if (ifp == NULL || dscp_inout == NULL) { + return EINVAL; + } if ((ifp->if_eflags & (IFEF_QOSMARKING_ENABLED | IFEF_QOSMARKING_CAPABLE)) == @@ -1636,8 +1694,9 @@ set_packet_qos(struct mbuf *m, struct ifnet *ifp, boolean_t qos_allowed, netsvctype != NET_SERVICE_TYPE_BE && netsvctype != NET_SERVICE_TYPE_BK) { dscp = _DSCP_DF; - if (sotc != SO_TC_CTL) + if (sotc != SO_TC_CTL) { m_set_service_class(m, MBUF_SC_BE); + } } } else if (sotc != SO_TC_UNSPEC) { size_t sotcix = sotc_index(sotc); @@ -1648,14 +1707,16 @@ set_packet_qos(struct mbuf *m, struct ifnet *ifp, boolean_t qos_allowed, sotc != SO_TC_BK && sotc != SO_TC_BK_SYS && sotc != SO_TC_CTL) { dscp = _DSCP_DF; - if (sotc != SO_TC_CTL) + if (sotc != SO_TC_CTL) { m_set_service_class(m, MBUF_SC_BE); + } } } } - if (net_qos_verbose != 0) + if (net_qos_verbose != 0) { printf("%s qos_allowed %d sotc %u netsvctype %u dscp %u\n", __func__, qos_allowed, sotc, netsvctype, dscp); + } if (*dscp_inout != dscp) { *dscp_inout = dscp; @@ -1674,14 +1735,15 @@ set_packet_qos(struct mbuf *m, struct ifnet *ifp, boolean_t qos_allowed, if (msc != MBUF_SC_BE) { m_set_service_class(m, msc); - if (net_qos_verbose != 0) + if (net_qos_verbose != 0) { printf("%s set msc %u for dscp %u\n", __func__, msc, *dscp_inout); + } } } } - return (0); + return 0; } static void @@ -1689,34 +1751,36 @@ set_dscp_to_wifi_ac_map(const struct dcsp_msc_map *map, int clear) { int i; - if (clear) + if (clear) { bzero(wifi_dscp_to_msc_array, sizeof(wifi_dscp_to_msc_array)); + } for (i = 0; i < DSCP_ARRAY_SIZE; i++) { const struct dcsp_msc_map *elem = map + i; - if (elem->dscp > _MAX_DSCP || elem->msc == MBUF_SC_UNSPEC) + if (elem->dscp > _MAX_DSCP || elem->msc == MBUF_SC_UNSPEC) { break; + } switch (elem->msc) { - case MBUF_SC_BK_SYS: - case MBUF_SC_BK: - wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_BK; - break; - default: - case MBUF_SC_BE: - case MBUF_SC_RD: - case MBUF_SC_OAM: - wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_BE; - break; - case MBUF_SC_AV: - case MBUF_SC_RV: - case MBUF_SC_VI: - wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_VI; - break; - case MBUF_SC_VO: - case MBUF_SC_CTL: - wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_VO; - break; + case MBUF_SC_BK_SYS: + case MBUF_SC_BK: + wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_BK; + break; + default: + case MBUF_SC_BE: + case MBUF_SC_RD: + case MBUF_SC_OAM: + wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_BE; + break; + case MBUF_SC_AV: + case MBUF_SC_RV: + case MBUF_SC_VI: + wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_VI; + break; + case MBUF_SC_VO: + case MBUF_SC_CTL: + wifi_dscp_to_msc_array[elem->dscp] = MBUF_SC_VO; + break; } } } @@ -1749,7 +1813,7 @@ dscp_msc_map_from_netsvctype_dscp_map(struct netsvctype_dscp_map *netsvctype_dsc dcsp_msc_map[i].msc = so_tc2msc(netsvctype_dscp_map[i].netsvctype); } done: - return (error); + return error; } int @@ -1772,17 +1836,20 @@ sysctl_dscp_to_wifi_ac_map SYSCTL_HANDLER_ARGS so_svc2tc(wifi_dscp_to_msc_array[i]); } error = SYSCTL_OUT(req, netsvctype_dscp_map, - MIN(len, req->oldlen)); - if (error != 0) + MIN(len, req->oldlen)); + if (error != 0) { goto done; + } } - if (req->newptr == USER_ADDR_NULL) + if (req->newptr == USER_ADDR_NULL) { goto done; + } error = proc_suser(current_proc()); - if (error != 0) + if (error != 0) { goto done; + } /* * Check input length @@ -1794,8 +1861,9 @@ sysctl_dscp_to_wifi_ac_map SYSCTL_HANDLER_ARGS /* * Cap the number of entries to copy from input buffer */ - if (len > req->newlen) + if (len > req->newlen) { len = req->newlen; + } error = SYSCTL_IN(req, netsvctype_dscp_map, len); if (error != 0) { goto done; @@ -1809,7 +1877,7 @@ sysctl_dscp_to_wifi_ac_map SYSCTL_HANDLER_ARGS } set_dscp_to_wifi_ac_map(dcsp_msc_map, 0); done: - return (error); + return error; } int @@ -1820,12 +1888,13 @@ sysctl_reset_dscp_to_wifi_ac_map SYSCTL_HANDLER_ARGS int val = 0; error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } set_dscp_to_wifi_ac_map(default_dscp_to_wifi_ac_map, 1); - return (0); + return 0; } /* @@ -1842,19 +1911,20 @@ net_qos_guideline(struct proc *p, struct net_qos_guideline_args *arg, int *retval) { #pragma unused(p) -#define RETURN_USE_BK 1 -#define RETURN_USE_DEFAULT 0 +#define RETURN_USE_BK 1 +#define RETURN_USE_DEFAULT 0 struct net_qos_param qos_arg; struct ifnet *ipv4_primary, *ipv6_primary; int err = 0; if (arg->param == USER_ADDR_NULL || retval == NULL || - arg->param_len != sizeof (qos_arg)) { - return (EINVAL); + arg->param_len != sizeof(qos_arg)) { + return EINVAL; + } + err = copyin(arg->param, (caddr_t) &qos_arg, sizeof(qos_arg)); + if (err != 0) { + return err; } - err = copyin(arg->param, (caddr_t) &qos_arg, sizeof (qos_arg)); - if (err != 0) - return (err); *retval = RETURN_USE_DEFAULT; ipv4_primary = ifindex2ifnet[get_primary_ifscope(AF_INET)]; @@ -1870,7 +1940,7 @@ net_qos_guideline(struct proc *p, struct net_qos_guideline_args *arg, (ipv6_primary != NULL && (ipv6_primary->if_xflags & IFXF_LOW_INTERNET_UL))) { *retval = RETURN_USE_BK; - return (0); + return 0; } } else { if ((ipv4_primary != NULL && @@ -1878,7 +1948,7 @@ net_qos_guideline(struct proc *p, struct net_qos_guideline_args *arg, (ipv6_primary != NULL && (ipv6_primary->if_xflags & IFXF_LOW_INTERNET_DL))) { *retval = RETURN_USE_BK; - return (0); + return 0; } } @@ -1891,19 +1961,19 @@ net_qos_guideline(struct proc *p, struct net_qos_guideline_args *arg, if (ipv4_primary != NULL && IFNET_IS_EXPENSIVE(ipv4_primary) && ipv6_primary != NULL && IFNET_IS_EXPENSIVE(ipv6_primary)) { if (qos_arg.nq_use_expensive) { - return (0); + return 0; } else { *retval = RETURN_USE_BK; - return (0); + return 0; } } if (qos_arg.nq_transfer_size >= 5 * 1024 * 1024) { *retval = RETURN_USE_BK; - return (0); + return 0; } -#undef RETURN_USE_BK -#undef RETURN_USE_DEFAULT - return (0); +#undef RETURN_USE_BK +#undef RETURN_USE_DEFAULT + return 0; } diff --git a/bsd/netinet/in_tclass.h b/bsd/netinet/in_tclass.h index a62203024..1d8493b57 100644 --- a/bsd/netinet/in_tclass.h +++ b/bsd/netinet/in_tclass.h @@ -38,37 +38,37 @@ #include #include -#define SO_TCDBG_PID 0x01 /* Set/get traffic class policy for PID */ -#define SO_TCDBG_PNAME 0x02 /* Set/get traffic class policy for processes of that name */ -#define SO_TCDBG_PURGE 0x04 /* Purge entries for unused PIDs */ -#define SO_TCDBG_FLUSH 0x08 /* Flush all entries */ -#define SO_TCDBG_COUNT 0x10 /* Get count of entries */ -#define SO_TCDBG_LIST 0x20 /* List entries */ -#define SO_TCDBG_DELETE 0x40 /* Delete a process entry */ -#define SO_TCDBG_TCFLUSH_PID 0x80 /* Flush traffic class for PID */ +#define SO_TCDBG_PID 0x01 /* Set/get traffic class policy for PID */ +#define SO_TCDBG_PNAME 0x02 /* Set/get traffic class policy for processes of that name */ +#define SO_TCDBG_PURGE 0x04 /* Purge entries for unused PIDs */ +#define SO_TCDBG_FLUSH 0x08 /* Flush all entries */ +#define SO_TCDBG_COUNT 0x10 /* Get count of entries */ +#define SO_TCDBG_LIST 0x20 /* List entries */ +#define SO_TCDBG_DELETE 0x40 /* Delete a process entry */ +#define SO_TCDBG_TCFLUSH_PID 0x80 /* Flush traffic class for PID */ struct so_tcdbg { - u_int32_t so_tcdbg_cmd; - int32_t so_tcdbg_tclass; - int32_t so_tcdbg_netsvctype; - u_int32_t so_tcdbg_count; - pid_t so_tcdbg_pid; - u_int32_t so_tcbbg_qos_mode; - char so_tcdbg_pname[(2 * MAXCOMLEN) + 1]; + u_int32_t so_tcdbg_cmd; + int32_t so_tcdbg_tclass; + int32_t so_tcdbg_netsvctype; + u_int32_t so_tcdbg_count; + pid_t so_tcdbg_pid; + u_int32_t so_tcbbg_qos_mode; + char so_tcdbg_pname[(2 * MAXCOMLEN) + 1]; }; -#define QOS_MODE_MARKING_POLICY_DEFAULT 0 -#define QOS_MODE_MARKING_POLICY_ENABLE 1 -#define QOS_MODE_MARKING_POLICY_DISABLE 2 +#define QOS_MODE_MARKING_POLICY_DEFAULT 0 +#define QOS_MODE_MARKING_POLICY_ENABLE 1 +#define QOS_MODE_MARKING_POLICY_DISABLE 2 -#define NET_QOS_MARKING_POLICY_DEFAULT QOS_MODE_MARKING_POLICY_DEFAULT /* obsolete, to be removed */ -#define NET_QOS_MARKING_POLICY_ENABLE QOS_MODE_MARKING_POLICY_ENABLE /* obsolete, to be removed */ -#define NET_QOS_MARKING_POLICY_DISABLE QOS_MODE_MARKING_POLICY_DISABLE /* obsolete, to be removed */ +#define NET_QOS_MARKING_POLICY_DEFAULT QOS_MODE_MARKING_POLICY_DEFAULT /* obsolete, to be removed */ +#define NET_QOS_MARKING_POLICY_ENABLE QOS_MODE_MARKING_POLICY_ENABLE /* obsolete, to be removed */ +#define NET_QOS_MARKING_POLICY_DISABLE QOS_MODE_MARKING_POLICY_DISABLE /* obsolete, to be removed */ struct net_qos_param { - u_int64_t nq_transfer_size; /* transfer size in bytes */ - u_int32_t nq_use_expensive:1, /* allowed = 1 otherwise 0 */ - nq_uplink:1; /* uplink = 1 otherwise 0 */ - u_int32_t nq_unused; /* for future expansion */ + u_int64_t nq_transfer_size; /* transfer size in bytes */ + u_int32_t nq_use_expensive:1, /* allowed = 1 otherwise 0 */ + nq_uplink:1; /* uplink = 1 otherwise 0 */ + u_int32_t nq_unused; /* for future expansion */ }; #ifndef KERNEL diff --git a/bsd/netinet/in_var.h b/bsd/netinet/in_var.h index 5b0506120..18c55a17a 100644 --- a/bsd/netinet/in_var.h +++ b/bsd/netinet/in_var.h @@ -61,7 +61,7 @@ */ #ifndef _NETINET_IN_VAR_H_ -#define _NETINET_IN_VAR_H_ +#define _NETINET_IN_VAR_H_ #include #include @@ -78,31 +78,31 @@ * of the structure and is assumed to be first. */ struct in_ifaddr { - struct ifaddr ia_ifa; /* protocol-independent info */ -#define ia_ifp ia_ifa.ifa_ifp -#define ia_flags ia_ifa.ifa_flags - /* ia_{,sub}net{,mask} in host order */ - u_int32_t ia_net; /* network number of interface */ - u_int32_t ia_netmask; /* mask of net part */ - u_int32_t ia_subnet; /* subnet number, including net */ - u_int32_t ia_subnetmask; /* mask of subnet part */ - struct in_addr ia_netbroadcast; /* to recognize net broadcasts */ - TAILQ_ENTRY(in_ifaddr) ia_link; /* tailq macro glue */ - struct sockaddr_in ia_addr; /* reserve space for interface name */ - struct sockaddr_in ia_dstaddr; /* reserve space for broadcast addr */ -#define ia_broadaddr ia_dstaddr - struct sockaddr_in ia_sockmask; /* reserve space for general netmask */ - TAILQ_ENTRY(in_ifaddr) ia_hash; /* hash bucket entry */ + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + /* ia_{,sub}net{,mask} in host order */ + u_int32_t ia_net; /* network number of interface */ + u_int32_t ia_netmask; /* mask of net part */ + u_int32_t ia_subnet; /* subnet number, including net */ + u_int32_t ia_subnetmask; /* mask of subnet part */ + struct in_addr ia_netbroadcast; /* to recognize net broadcasts */ + TAILQ_ENTRY(in_ifaddr) ia_link; /* tailq macro glue */ + struct sockaddr_in ia_addr; /* reserve space for interface name */ + struct sockaddr_in ia_dstaddr; /* reserve space for broadcast addr */ +#define ia_broadaddr ia_dstaddr + struct sockaddr_in ia_sockmask; /* reserve space for general netmask */ + TAILQ_ENTRY(in_ifaddr) ia_hash; /* hash bucket entry */ }; -#define ifatoia(ifa) ((struct in_ifaddr *)(void *)(ifa)) +#define ifatoia(ifa) ((struct in_ifaddr *)(void *)(ifa)) #endif /* BSD_KERNEL_PRIVATE */ struct in_aliasreq { - char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ + char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ struct sockaddr_in ifra_addr; struct sockaddr_in ifra_broadaddr; -#define ifra_dstaddr ifra_broadaddr +#define ifra_dstaddr ifra_broadaddr struct sockaddr_in ifra_mask; }; @@ -111,20 +111,20 @@ struct in_aliasreq { */ struct kev_in_data { struct net_event_data link_data; - struct in_addr ia_addr; /* interface address */ - u_int32_t ia_net; /* network number of interface */ - u_int32_t ia_netmask; /* mask of net part */ - u_int32_t ia_subnet; /* subnet number, including net */ - u_int32_t ia_subnetmask; /* mask of subnet part */ - struct in_addr ia_netbroadcast; /* to recognize net broadcasts */ + struct in_addr ia_addr; /* interface address */ + u_int32_t ia_net; /* network number of interface */ + u_int32_t ia_netmask; /* mask of net part */ + u_int32_t ia_subnet; /* subnet number, including net */ + u_int32_t ia_subnetmask; /* mask of subnet part */ + struct in_addr ia_netbroadcast; /* to recognize net broadcasts */ struct in_addr ia_dstaddr; }; struct kev_in_collision { struct net_event_data link_data; /* link where ARP was received on */ - struct in_addr ia_ipaddr; /* conflicting IP address */ - u_char hw_len; /* length of hardware address */ - u_char hw_addr[0]; /* variable length hardware address */ + struct in_addr ia_ipaddr; /* conflicting IP address */ + u_char hw_len; /* length of hardware address */ + u_char hw_addr[0]; /* variable length hardware address */ }; struct kev_in_arpfailure { @@ -138,8 +138,8 @@ struct kev_in_arpalive { #ifdef __APPLE_API_PRIVATE struct kev_in_portinuse { - u_int16_t port; /* conflicting port number in host order */ - u_int32_t req_pid; /* PID port requestor */ + u_int16_t port; /* conflicting port number in host order */ + u_int32_t req_pid; /* PID port requestor */ u_int32_t reserved[2]; }; #endif /* __APPLE_API_PRIVATE */ @@ -154,44 +154,44 @@ struct kev_in_portinuse { * Given a pointer to an in_ifaddr (ifaddr), * return a pointer to the addr as a sockaddr_in. */ -#define IA_SIN(ia) (&(((struct in_ifaddr *)(ia))->ia_addr)) -#define IA_DSTSIN(ia) (&(((struct in_ifaddr *)(ia))->ia_dstaddr)) +#define IA_SIN(ia) (&(((struct in_ifaddr *)(ia))->ia_addr)) +#define IA_DSTSIN(ia) (&(((struct in_ifaddr *)(ia))->ia_dstaddr)) -#define IN_LNAOF(in, ifa) \ +#define IN_LNAOF(in, ifa) \ ((ntohl((in).s_addr) & ~((struct in_ifaddr *)(ifa)->ia_subnetmask)) /* * Hash table for IPv4 addresses. */ extern TAILQ_HEAD(in_ifaddrhead, in_ifaddr) in_ifaddrhead; -extern TAILQ_HEAD(in_ifaddrhashhead, in_ifaddr) *in_ifaddrhashtbl; +extern TAILQ_HEAD(in_ifaddrhashhead, in_ifaddr) * in_ifaddrhashtbl; extern lck_rw_t *in_ifaddr_rwlock; -#define INADDR_HASH(x) (&in_ifaddrhashtbl[inaddr_hashval(x)]) +#define INADDR_HASH(x) (&in_ifaddrhashtbl[inaddr_hashval(x)]) -extern u_char inetctlerrmap[]; +extern u_char inetctlerrmap[]; /* * Macro for finding the interface (ifnet structure) corresponding to one * of our IP addresses. */ -#define INADDR_TO_IFP(addr, ifp) \ - /* struct in_addr addr; */ \ - /* struct ifnet *ifp; */ \ -{ \ - struct in_ifaddr *ia; \ - \ - lck_rw_lock_shared(in_ifaddr_rwlock); \ - TAILQ_FOREACH(ia, INADDR_HASH((addr).s_addr), ia_hash) { \ - IFA_LOCK_SPIN(&ia->ia_ifa); \ - if (IA_SIN(ia)->sin_addr.s_addr == (addr).s_addr) { \ - IFA_UNLOCK(&ia->ia_ifa); \ - break; \ - } \ - IFA_UNLOCK(&ia->ia_ifa); \ - } \ - (ifp) = (ia == NULL) ? NULL : ia->ia_ifp; \ - lck_rw_done(in_ifaddr_rwlock); \ +#define INADDR_TO_IFP(addr, ifp) \ + /* struct in_addr addr; */ \ + /* struct ifnet *ifp; */ \ +{ \ + struct in_ifaddr *ia; \ + \ + lck_rw_lock_shared(in_ifaddr_rwlock); \ + TAILQ_FOREACH(ia, INADDR_HASH((addr).s_addr), ia_hash) { \ + IFA_LOCK_SPIN(&ia->ia_ifa); \ + if (IA_SIN(ia)->sin_addr.s_addr == (addr).s_addr) { \ + IFA_UNLOCK(&ia->ia_ifa); \ + break; \ + } \ + IFA_UNLOCK(&ia->ia_ifa); \ + } \ + (ifp) = (ia == NULL) ? NULL : ia->ia_ifp; \ + lck_rw_done(in_ifaddr_rwlock); \ } /* @@ -199,18 +199,18 @@ extern u_char inetctlerrmap[]; * to a given interface (ifnet structure). Caller is responsible for freeing * the reference. */ -#define IFP_TO_IA(ifp, ia) \ - /* struct ifnet *ifp; */ \ - /* struct in_ifaddr *ia; */ \ -{ \ - lck_rw_lock_shared(in_ifaddr_rwlock); \ - for ((ia) = TAILQ_FIRST(&in_ifaddrhead); \ - (ia) != NULL && (ia)->ia_ifp != (ifp); \ - (ia) = TAILQ_NEXT((ia), ia_link)) \ - continue; \ - if ((ia) != NULL) \ - IFA_ADDREF(&(ia)->ia_ifa); \ - lck_rw_done(in_ifaddr_rwlock); \ +#define IFP_TO_IA(ifp, ia) \ + /* struct ifnet *ifp; */ \ + /* struct in_ifaddr *ia; */ \ +{ \ + lck_rw_lock_shared(in_ifaddr_rwlock); \ + for ((ia) = TAILQ_FIRST(&in_ifaddrhead); \ + (ia) != NULL && (ia)->ia_ifp != (ifp); \ + (ia) = TAILQ_NEXT((ia), ia_link)) \ + continue; \ + if ((ia) != NULL) \ + IFA_ADDREF(&(ia)->ia_ifa); \ + lck_rw_done(in_ifaddr_rwlock); \ } /* @@ -232,25 +232,25 @@ struct router_info { * IPv4 multicast IGMP-layer source entry. */ struct ip_msource { - RB_ENTRY(ip_msource) ims_link; /* RB tree links */ - in_addr_t ims_haddr; /* host byte order */ + RB_ENTRY(ip_msource) ims_link; /* RB tree links */ + in_addr_t ims_haddr; /* host byte order */ struct ims_st { - uint16_t ex; /* # of exclusive members */ - uint16_t in; /* # of inclusive members */ - } ims_st[2]; /* state at t0, t1 */ - uint8_t ims_stp; /* pending query */ + uint16_t ex; /* # of exclusive members */ + uint16_t in; /* # of inclusive members */ + } ims_st[2]; /* state at t0, t1 */ + uint8_t ims_stp; /* pending query */ }; /* * IPv4 multicast PCB-layer source entry. */ struct in_msource { - RB_ENTRY(ip_msource) ims_link; /* RB tree links */ - in_addr_t ims_haddr; /* host byte order */ - uint8_t imsl_st[2]; /* state before/at commit */ + RB_ENTRY(ip_msource) ims_link; /* RB tree links */ + in_addr_t ims_haddr; /* host byte order */ + uint8_t imsl_st[2]; /* state before/at commit */ }; -RB_HEAD(ip_msource_tree, ip_msource); /* define struct ip_msource_tree */ +RB_HEAD(ip_msource_tree, ip_msource); /* define struct ip_msource_tree */ RB_PROTOTYPE_SC_PREV(__private_extern__, ip_msource_tree, ip_msource, ims_link, ip_msource_cmp); @@ -259,9 +259,9 @@ RB_PROTOTYPE_SC_PREV(__private_extern__, ip_msource_tree, ip_msource, * IPv4 multicast PCB-layer group filter descriptor. */ struct in_mfilter { - struct ip_msource_tree imf_sources; /* source list for (S,G) */ - u_long imf_nsrc; /* # of source entries */ - uint8_t imf_st[2]; /* state before/at commit */ + struct ip_msource_tree imf_sources; /* source list for (S,G) */ + u_long imf_nsrc; /* # of source entries */ + uint8_t imf_st[2]; /* state before/at commit */ }; struct igmp_ifinfo; @@ -291,30 +291,30 @@ struct igmp_ifinfo; */ struct in_multi { decl_lck_mtx_data(, inm_lock); - u_int32_t inm_refcount; /* reference count */ - u_int32_t inm_reqcnt; /* request count for this address */ - u_int32_t inm_debug; /* see ifa_debug flags */ - LIST_ENTRY(in_multi) inm_link; /* queue macro glue */ - struct in_addr inm_addr; /* IP multicast address, convenience */ - struct ifnet *inm_ifp; /* back pointer to ifnet */ - struct ifmultiaddr *inm_ifma; /* back pointer to ifmultiaddr */ - u_int inm_timer; /* IGMPv1/v2 group / v3 query timer */ - u_int inm_state; /* state of the membership */ - void *inm_rti; /* unused, legacy field */ + u_int32_t inm_refcount; /* reference count */ + u_int32_t inm_reqcnt; /* request count for this address */ + u_int32_t inm_debug; /* see ifa_debug flags */ + LIST_ENTRY(in_multi) inm_link; /* queue macro glue */ + struct in_addr inm_addr; /* IP multicast address, convenience */ + struct ifnet *inm_ifp; /* back pointer to ifnet */ + struct ifmultiaddr *inm_ifma; /* back pointer to ifmultiaddr */ + u_int inm_timer; /* IGMPv1/v2 group / v3 query timer */ + u_int inm_state; /* state of the membership */ + void *inm_rti; /* unused, legacy field */ /* New fields for IGMPv3 follow. */ - struct igmp_ifinfo *inm_igi; /* IGMP info */ - SLIST_ENTRY(in_multi) inm_dtle; /* detached waiting for rele */ - SLIST_ENTRY(in_multi) inm_nrele; /* to-be-released by IGMP */ - u_int32_t inm_nrelecnt; /* deferred release count */ - struct ip_msource_tree inm_srcs; /* tree of sources */ - u_long inm_nsrc; /* # of tree entries */ - - struct ifqueue inm_scq; /* queue of pending - * state-change packets */ - struct timeval inm_lastgsrtv; /* Time of last G-S-R query */ - uint16_t inm_sctimer; /* state-change timer */ - uint16_t inm_scrv; /* state-change rexmit count */ + struct igmp_ifinfo *inm_igi; /* IGMP info */ + SLIST_ENTRY(in_multi) inm_dtle; /* detached waiting for rele */ + SLIST_ENTRY(in_multi) inm_nrele; /* to-be-released by IGMP */ + u_int32_t inm_nrelecnt; /* deferred release count */ + struct ip_msource_tree inm_srcs; /* tree of sources */ + u_long inm_nsrc; /* # of tree entries */ + + struct ifqueue inm_scq; /* queue of pending + * state-change packets */ + struct timeval inm_lastgsrtv; /* Time of last G-S-R query */ + uint16_t inm_sctimer; /* state-change timer */ + uint16_t inm_scrv; /* state-change rexmit count */ /* * SSM state counters which track state at T0 (the time the last @@ -324,47 +324,47 @@ struct in_multi { * are maintained here to optimize for common use-cases. */ struct inm_st { - uint16_t iss_fmode; /* IGMP filter mode */ - uint16_t iss_asm; /* # of ASM listeners */ - uint16_t iss_ex; /* # of exclusive members */ - uint16_t iss_in; /* # of inclusive members */ - uint16_t iss_rec; /* # of recorded sources */ - } inm_st[2]; /* state at t0, t1 */ - - void (*inm_trace) /* callback fn for tracing refs */ - (struct in_multi *, int); + uint16_t iss_fmode; /* IGMP filter mode */ + uint16_t iss_asm; /* # of ASM listeners */ + uint16_t iss_ex; /* # of exclusive members */ + uint16_t iss_in; /* # of inclusive members */ + uint16_t iss_rec; /* # of recorded sources */ + } inm_st[2]; /* state at t0, t1 */ + + void (*inm_trace) /* callback fn for tracing refs */ + (struct in_multi *, int); }; -#define INM_LOCK_ASSERT_HELD(_inm) \ +#define INM_LOCK_ASSERT_HELD(_inm) \ LCK_MTX_ASSERT(&(_inm)->inm_lock, LCK_MTX_ASSERT_OWNED) -#define INM_LOCK_ASSERT_NOTHELD(_inm) \ +#define INM_LOCK_ASSERT_NOTHELD(_inm) \ LCK_MTX_ASSERT(&(_inm)->inm_lock, LCK_MTX_ASSERT_NOTOWNED) -#define INM_LOCK(_inm) \ +#define INM_LOCK(_inm) \ lck_mtx_lock(&(_inm)->inm_lock) -#define INM_LOCK_SPIN(_inm) \ +#define INM_LOCK_SPIN(_inm) \ lck_mtx_lock_spin(&(_inm)->inm_lock) -#define INM_CONVERT_LOCK(_inm) do { \ - INM_LOCK_ASSERT_HELD(_inm); \ - lck_mtx_convert_spin(&(_inm)->inm_lock); \ +#define INM_CONVERT_LOCK(_inm) do { \ + INM_LOCK_ASSERT_HELD(_inm); \ + lck_mtx_convert_spin(&(_inm)->inm_lock); \ } while (0) -#define INM_UNLOCK(_inm) \ +#define INM_UNLOCK(_inm) \ lck_mtx_unlock(&(_inm)->inm_lock) -#define INM_ADDREF(_inm) \ +#define INM_ADDREF(_inm) \ inm_addref(_inm, 0) -#define INM_ADDREF_LOCKED(_inm) \ +#define INM_ADDREF_LOCKED(_inm) \ inm_addref(_inm, 1) -#define INM_REMREF(_inm) \ +#define INM_REMREF(_inm) \ inm_remref(_inm, 0) -#define INM_REMREF_LOCKED(_inm) \ +#define INM_REMREF_LOCKED(_inm) \ inm_remref(_inm, 1) #ifdef SYSCTL_DECL @@ -396,24 +396,24 @@ struct in_multistep { * * Must be called with in_multihead_lock held. */ -#define IN_LOOKUP_MULTI(addr, ifp, inm) \ - /* struct in_addr *addr; */ \ - /* struct ifnet *ifp; */ \ - /* struct in_multi *inm; */ \ -do { \ - struct in_multistep _step; \ - IN_FIRST_MULTI(_step, inm); \ - while ((inm) != NULL) { \ - INM_LOCK_SPIN(inm); \ - if ((inm)->inm_ifp == (ifp) && \ - (inm)->inm_addr.s_addr == (addr)->s_addr) { \ - INM_ADDREF_LOCKED(inm); \ - INM_UNLOCK(inm); \ - break; \ - } \ - INM_UNLOCK(inm); \ - IN_NEXT_MULTI(_step, inm); \ - } \ +#define IN_LOOKUP_MULTI(addr, ifp, inm) \ + /* struct in_addr *addr; */ \ + /* struct ifnet *ifp; */ \ + /* struct in_multi *inm; */ \ +do { \ + struct in_multistep _step; \ + IN_FIRST_MULTI(_step, inm); \ + while ((inm) != NULL) { \ + INM_LOCK_SPIN(inm); \ + if ((inm)->inm_ifp == (ifp) && \ + (inm)->inm_addr.s_addr == (addr)->s_addr) { \ + INM_ADDREF_LOCKED(inm); \ + INM_UNLOCK(inm); \ + break; \ + } \ + INM_UNLOCK(inm); \ + IN_NEXT_MULTI(_step, inm); \ + } \ } while (0) /* @@ -425,22 +425,22 @@ do { \ * * Must be called with in_multihead_lock held. */ -#define IN_NEXT_MULTI(step, inm) \ - /* struct in_multistep step; */ \ - /* struct in_multi *inm; */ \ -do { \ - in_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ - if (((inm) = (step).i_inm) != NULL) \ - (step).i_inm = LIST_NEXT((step).i_inm, inm_link); \ +#define IN_NEXT_MULTI(step, inm) \ + /* struct in_multistep step; */ \ + /* struct in_multi *inm; */ \ +do { \ + in_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ + if (((inm) = (step).i_inm) != NULL) \ + (step).i_inm = LIST_NEXT((step).i_inm, inm_link); \ } while (0) -#define IN_FIRST_MULTI(step, inm) \ - /* struct in_multistep step; */ \ - /* struct in_multi *inm; */ \ -do { \ - in_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ - (step).i_inm = LIST_FIRST(&in_multihead); \ - IN_NEXT_MULTI((step), (inm)); \ +#define IN_FIRST_MULTI(step, inm) \ + /* struct in_multistep step; */ \ + /* struct in_multi *inm; */ \ +do { \ + in_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ + (step).i_inm = LIST_FIRST(&in_multihead); \ + IN_NEXT_MULTI((step), (inm)); \ } while (0) extern lck_mtx_t *inet_domain_mutex; @@ -452,21 +452,21 @@ struct inpcb; /* * Return values for imo_multi_filter(). */ -#define MCAST_PASS 0 /* Pass */ -#define MCAST_NOTGMEMBER 1 /* This host not a member of group */ -#define MCAST_NOTSMEMBER 2 /* This host excluded source */ -#define MCAST_MUTED 3 /* [deprecated] */ +#define MCAST_PASS 0 /* Pass */ +#define MCAST_NOTGMEMBER 1 /* This host not a member of group */ +#define MCAST_NOTSMEMBER 2 /* This host excluded source */ +#define MCAST_MUTED 3 /* [deprecated] */ /* * Per-interface IPv4 structures. */ struct in_ifextra { - uint32_t netsig_len; - u_int8_t netsig[IFNET_SIGNATURELEN]; - struct lltable *ii_llt; /* ARP state */ + uint32_t netsig_len; + u_int8_t netsig[IFNET_SIGNATURELEN]; + struct lltable *ii_llt; /* ARP state */ }; -#define IN_IFEXTRA(_ifp) ((struct in_ifextra *)(_ifp->if_inetdata)) -#define LLTABLE(ifp) ((IN_IFEXTRA(ifp) == NULL) ? NULL : IN_IFEXTRA(ifp)->ii_llt) +#define IN_IFEXTRA(_ifp) ((struct in_ifextra *)(_ifp->if_inetdata)) +#define LLTABLE(ifp) ((IN_IFEXTRA(ifp) == NULL) ? NULL : IN_IFEXTRA(ifp)->ii_llt) extern u_int32_t ipv4_ll_arp_aware; diff --git a/bsd/netinet/ip.h b/bsd/netinet/ip.h index 0574f5c23..823c337db 100644 --- a/bsd/netinet/ip.h +++ b/bsd/netinet/ip.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -64,7 +64,7 @@ #ifndef _NETINET_IP_H_ #define _NETINET_IP_H_ #include -#include /* XXX temporary hack to get u_ types */ +#include /* XXX temporary hack to get u_ types */ #include #include @@ -73,144 +73,144 @@ * Definitions for internet protocol version 4. * Per RFC 791, September 1981. */ -#define IPVERSION 4 +#define IPVERSION 4 /* * Structure of an internet header, naked of options. */ struct ip { #ifdef _IP_VHL - u_char ip_vhl; /* version << 4 | header length >> 2 */ + u_char ip_vhl; /* version << 4 | header length >> 2 */ #else #if BYTE_ORDER == LITTLE_ENDIAN - u_int ip_hl:4, /* header length */ - ip_v:4; /* version */ + u_int ip_hl:4, /* header length */ + ip_v:4; /* version */ #endif #if BYTE_ORDER == BIG_ENDIAN - u_int ip_v:4, /* version */ - ip_hl:4; /* header length */ + u_int ip_v:4, /* version */ + ip_hl:4; /* header length */ #endif #endif /* not _IP_VHL */ - u_char ip_tos; /* type of service */ - u_short ip_len; /* total length */ - u_short ip_id; /* identification */ - u_short ip_off; /* fragment offset field */ -#define IP_RF 0x8000 /* reserved fragment flag */ -#define IP_DF 0x4000 /* dont fragment flag */ -#define IP_MF 0x2000 /* more fragments flag */ -#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ - u_char ip_ttl; /* time to live */ - u_char ip_p; /* protocol */ - u_short ip_sum; /* checksum */ - struct in_addr ip_src,ip_dst; /* source and dest address */ + u_char ip_tos; /* type of service */ + u_short ip_len; /* total length */ + u_short ip_id; /* identification */ + u_short ip_off; /* fragment offset field */ +#define IP_RF 0x8000 /* reserved fragment flag */ +#define IP_DF 0x4000 /* dont fragment flag */ +#define IP_MF 0x2000 /* more fragments flag */ +#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ + u_char ip_ttl; /* time to live */ + u_char ip_p; /* protocol */ + u_short ip_sum; /* checksum */ + struct in_addr ip_src, ip_dst; /* source and dest address */ }; #ifdef _IP_VHL -#define IP_MAKE_VHL(v, hl) ((v) << 4 | (hl)) -#define IP_VHL_HL(vhl) ((vhl) & 0x0f) -#define IP_VHL_V(vhl) ((vhl) >> 4) -#define IP_VHL_BORING 0x45 +#define IP_MAKE_VHL(v, hl) ((v) << 4 | (hl)) +#define IP_VHL_HL(vhl) ((vhl) & 0x0f) +#define IP_VHL_V(vhl) ((vhl) >> 4) +#define IP_VHL_BORING 0x45 #endif -#define IP_MAXPACKET 65535 /* maximum packet size */ +#define IP_MAXPACKET 65535 /* maximum packet size */ /* * Definitions for IP type of service (ip_tos) */ -#define IPTOS_LOWDELAY 0x10 -#define IPTOS_THROUGHPUT 0x08 -#define IPTOS_RELIABILITY 0x04 -#define IPTOS_MINCOST 0x02 +#define IPTOS_LOWDELAY 0x10 +#define IPTOS_THROUGHPUT 0x08 +#define IPTOS_RELIABILITY 0x04 +#define IPTOS_MINCOST 0x02 #if 1 /* ECN RFC3168 obsoletes RFC2481, and these will be deprecated soon. */ -#define IPTOS_CE 0x01 -#define IPTOS_ECT 0x02 +#define IPTOS_CE 0x01 +#define IPTOS_ECT 0x02 #endif -#define IPTOS_DSCP_SHIFT 2 +#define IPTOS_DSCP_SHIFT 2 /* * ECN (Explicit Congestion Notification) codepoints in RFC3168 * mapped to the lower 2 bits of the TOS field. */ -#define IPTOS_ECN_NOTECT 0x00 /* not-ECT */ -#define IPTOS_ECN_ECT1 0x01 /* ECN-capable transport (1) */ -#define IPTOS_ECN_ECT0 0x02 /* ECN-capable transport (0) */ -#define IPTOS_ECN_CE 0x03 /* congestion experienced */ -#define IPTOS_ECN_MASK 0x03 /* ECN field mask */ +#define IPTOS_ECN_NOTECT 0x00 /* not-ECT */ +#define IPTOS_ECN_ECT1 0x01 /* ECN-capable transport (1) */ +#define IPTOS_ECN_ECT0 0x02 /* ECN-capable transport (0) */ +#define IPTOS_ECN_CE 0x03 /* congestion experienced */ +#define IPTOS_ECN_MASK 0x03 /* ECN field mask */ /* * Definitions for IP precedence (also in ip_tos) (hopefully unused) */ -#define IPTOS_PREC_NETCONTROL 0xe0 -#define IPTOS_PREC_INTERNETCONTROL 0xc0 -#define IPTOS_PREC_CRITIC_ECP 0xa0 -#define IPTOS_PREC_FLASHOVERRIDE 0x80 -#define IPTOS_PREC_FLASH 0x60 -#define IPTOS_PREC_IMMEDIATE 0x40 -#define IPTOS_PREC_PRIORITY 0x20 -#define IPTOS_PREC_ROUTINE 0x00 +#define IPTOS_PREC_NETCONTROL 0xe0 +#define IPTOS_PREC_INTERNETCONTROL 0xc0 +#define IPTOS_PREC_CRITIC_ECP 0xa0 +#define IPTOS_PREC_FLASHOVERRIDE 0x80 +#define IPTOS_PREC_FLASH 0x60 +#define IPTOS_PREC_IMMEDIATE 0x40 +#define IPTOS_PREC_PRIORITY 0x20 +#define IPTOS_PREC_ROUTINE 0x00 #ifdef PRIVATE /* - * Definitions of traffic class for use within wireless LAN. + * Definitions of traffic class for use within wireless LAN. * Mainly used by AFP for backup. Not recommended for general use. */ -#define IP_TCLASS_BE 0x00 /* standard, best effort */ -#define IP_TCLASS_BK 0x20 /* Background, low priority */ -#define IP_TCLASS_VI 0x80 /* Interactive */ -#define IP_TCLASS_VO 0xc0 /* Signalling */ +#define IP_TCLASS_BE 0x00 /* standard, best effort */ +#define IP_TCLASS_BK 0x20 /* Background, low priority */ +#define IP_TCLASS_VI 0x80 /* Interactive */ +#define IP_TCLASS_VO 0xc0 /* Signalling */ #endif /* * Definitions for options. */ -#define IPOPT_COPIED(o) ((o)&0x80) -#define IPOPT_CLASS(o) ((o)&0x60) -#define IPOPT_NUMBER(o) ((o)&0x1f) - -#define IPOPT_CONTROL 0x00 -#define IPOPT_RESERVED1 0x20 -#define IPOPT_DEBMEAS 0x40 -#define IPOPT_RESERVED2 0x60 - -#define IPOPT_EOL 0 /* end of option list */ -#define IPOPT_NOP 1 /* no operation */ - -#define IPOPT_RR 7 /* record packet route */ -#define IPOPT_TS 68 /* timestamp */ -#define IPOPT_SECURITY 130 /* provide s,c,h,tcc */ -#define IPOPT_LSRR 131 /* loose source route */ -#define IPOPT_SATID 136 /* satnet id */ -#define IPOPT_SSRR 137 /* strict source route */ -#define IPOPT_RA 148 /* router alert */ +#define IPOPT_COPIED(o) ((o)&0x80) +#define IPOPT_CLASS(o) ((o)&0x60) +#define IPOPT_NUMBER(o) ((o)&0x1f) + +#define IPOPT_CONTROL 0x00 +#define IPOPT_RESERVED1 0x20 +#define IPOPT_DEBMEAS 0x40 +#define IPOPT_RESERVED2 0x60 + +#define IPOPT_EOL 0 /* end of option list */ +#define IPOPT_NOP 1 /* no operation */ + +#define IPOPT_RR 7 /* record packet route */ +#define IPOPT_TS 68 /* timestamp */ +#define IPOPT_SECURITY 130 /* provide s,c,h,tcc */ +#define IPOPT_LSRR 131 /* loose source route */ +#define IPOPT_SATID 136 /* satnet id */ +#define IPOPT_SSRR 137 /* strict source route */ +#define IPOPT_RA 148 /* router alert */ /* * Offsets to fields in options other than EOL and NOP. */ -#define IPOPT_OPTVAL 0 /* option ID */ -#define IPOPT_OLEN 1 /* option length */ -#define IPOPT_OFFSET 2 /* offset within option */ -#define IPOPT_MINOFF 4 /* min value of above */ +#define IPOPT_OPTVAL 0 /* option ID */ +#define IPOPT_OLEN 1 /* option length */ +#define IPOPT_OFFSET 2 /* offset within option */ +#define IPOPT_MINOFF 4 /* min value of above */ /* * Time stamp option structure. */ -struct ip_timestamp { - u_char ipt_code; /* IPOPT_TS */ - u_char ipt_len; /* size of structure (variable) */ - u_char ipt_ptr; /* index of current entry */ +struct ip_timestamp { + u_char ipt_code; /* IPOPT_TS */ + u_char ipt_len; /* size of structure (variable) */ + u_char ipt_ptr; /* index of current entry */ #if BYTE_ORDER == LITTLE_ENDIAN - u_int ipt_flg:4, /* flags, see below */ - ipt_oflw:4; /* overflow counter */ + u_int ipt_flg:4, /* flags, see below */ + ipt_oflw:4; /* overflow counter */ #endif #if BYTE_ORDER == BIG_ENDIAN - u_int ipt_oflw:4, /* overflow counter */ - ipt_flg:4; /* flags, see below */ + u_int ipt_oflw:4, /* overflow counter */ + ipt_flg:4; /* flags, see below */ #endif union ipt_timestamp { - n_long ipt_time[1]; - struct ipt_ta { + n_long ipt_time[1]; + struct ipt_ta { struct in_addr ipt_addr; n_long ipt_time; } ipt_ta[1]; @@ -218,27 +218,27 @@ struct ip_timestamp { }; /* flag bits for ipt_flg */ -#define IPOPT_TS_TSONLY 0 /* timestamps only */ -#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */ -#define IPOPT_TS_PRESPEC 3 /* specified modules only */ +#define IPOPT_TS_TSONLY 0 /* timestamps only */ +#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */ +#define IPOPT_TS_PRESPEC 3 /* specified modules only */ /* bits for security (not byte swapped) */ -#define IPOPT_SECUR_UNCLASS 0x0000 -#define IPOPT_SECUR_CONFID 0xf135 -#define IPOPT_SECUR_EFTO 0x789a -#define IPOPT_SECUR_MMMM 0xbc4d -#define IPOPT_SECUR_RESTR 0xaf13 -#define IPOPT_SECUR_SECRET 0xd788 -#define IPOPT_SECUR_TOPSECRET 0x6bc5 +#define IPOPT_SECUR_UNCLASS 0x0000 +#define IPOPT_SECUR_CONFID 0xf135 +#define IPOPT_SECUR_EFTO 0x789a +#define IPOPT_SECUR_MMMM 0xbc4d +#define IPOPT_SECUR_RESTR 0xaf13 +#define IPOPT_SECUR_SECRET 0xd788 +#define IPOPT_SECUR_TOPSECRET 0x6bc5 /* * Internet implementation parameters. */ -#define MAXTTL 255 /* maximum time to live (seconds) */ -#define IPDEFTTL 64 /* default ttl, from RFC 1340 */ -#define IPFRAGTTL 30 /* time to live for frags (seconds) */ -#define IPTTLDEC 1 /* subtracted when forwarding */ +#define MAXTTL 255 /* maximum time to live (seconds) */ +#define IPDEFTTL 64 /* default ttl, from RFC 1340 */ +#define IPFRAGTTL 30 /* time to live for frags (seconds) */ +#define IPTTLDEC 1 /* subtracted when forwarding */ -#define IP_MSS 576 /* default maximum segment size */ +#define IP_MSS 576 /* default maximum segment size */ #endif diff --git a/bsd/netinet/ip6.h b/bsd/netinet/ip6.h index 7d181e66c..3bee96451 100644 --- a/bsd/netinet/ip6.h +++ b/bsd/netinet/ip6.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2016 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $KAME: ip6.h,v 1.18 2001/03/29 05:34:30 itojun Exp $*/ @@ -103,55 +103,55 @@ struct ip6_hdr { union { struct ip6_hdrctl { - u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */ - u_int16_t ip6_un1_plen; /* payload length */ - u_int8_t ip6_un1_nxt; /* next header */ - u_int8_t ip6_un1_hlim; /* hop limit */ + u_int32_t ip6_un1_flow; /* 20 bits of flow-ID */ + u_int16_t ip6_un1_plen; /* payload length */ + u_int8_t ip6_un1_nxt; /* next header */ + u_int8_t ip6_un1_hlim; /* hop limit */ } ip6_un1; - u_int8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */ + u_int8_t ip6_un2_vfc; /* 4 bits version, top 4 bits class */ } ip6_ctlun; - struct in6_addr ip6_src; /* source address */ - struct in6_addr ip6_dst; /* destination address */ + struct in6_addr ip6_src; /* source address */ + struct in6_addr ip6_dst; /* destination address */ } __attribute__((__packed__)); -#define ip6_vfc ip6_ctlun.ip6_un2_vfc -#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow -#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen -#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt -#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim -#define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim +#define ip6_vfc ip6_ctlun.ip6_un2_vfc +#define ip6_flow ip6_ctlun.ip6_un1.ip6_un1_flow +#define ip6_plen ip6_ctlun.ip6_un1.ip6_un1_plen +#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt +#define ip6_hlim ip6_ctlun.ip6_un1.ip6_un1_hlim +#define ip6_hops ip6_ctlun.ip6_un1.ip6_un1_hlim -#define IPV6_VERSION 0x60 -#define IPV6_VERSION_MASK 0xf0 +#define IPV6_VERSION 0x60 +#define IPV6_VERSION_MASK 0xf0 #if BYTE_ORDER == BIG_ENDIAN -#define IPV6_FLOWINFO_MASK 0x0fffffff /* flow info (28 bits) */ -#define IPV6_FLOWLABEL_MASK 0x000fffff /* flow label (20 bits) */ -#define IPV6_FLOW_ECN_MASK 0x00300000 /* the 2 ECN bits */ +#define IPV6_FLOWINFO_MASK 0x0fffffff /* flow info (28 bits) */ +#define IPV6_FLOWLABEL_MASK 0x000fffff /* flow label (20 bits) */ +#define IPV6_FLOW_ECN_MASK 0x00300000 /* the 2 ECN bits */ #else #if BYTE_ORDER == LITTLE_ENDIAN -#define IPV6_FLOWINFO_MASK 0xffffff0f /* flow info (28 bits) */ -#define IPV6_FLOWLABEL_MASK 0xffff0f00 /* flow label (20 bits) */ -#define IPV6_FLOW_ECN_MASK 0x00003000 /* the 2 ECN bits */ +#define IPV6_FLOWINFO_MASK 0xffffff0f /* flow info (28 bits) */ +#define IPV6_FLOWLABEL_MASK 0xffff0f00 /* flow label (20 bits) */ +#define IPV6_FLOW_ECN_MASK 0x00003000 /* the 2 ECN bits */ #endif /* LITTLE_ENDIAN */ #endif #if 1 /* ECN bits proposed by Sally Floyd */ -#define IP6TOS_CE 0x01 /* congestion experienced */ -#define IP6TOS_ECT 0x02 /* ECN-capable transport */ +#define IP6TOS_CE 0x01 /* congestion experienced */ +#define IP6TOS_ECT 0x02 /* ECN-capable transport */ #endif /* * To access the 6 bits of the DSCP value in the 32 bits ip6_flow field */ -#define IP6FLOW_DSCP_MASK 0x0fc00000 -#define IP6FLOW_DSCP_SHIFT 22 +#define IP6FLOW_DSCP_MASK 0x0fc00000 +#define IP6FLOW_DSCP_SHIFT 22 /* * Extension Headers */ -struct ip6_ext { +struct ip6_ext { u_int8_t ip6e_nxt; u_int8_t ip6e_len; } __attribute__((__packed__)); @@ -159,45 +159,45 @@ struct ip6_ext { /* Hop-by-Hop options header */ /* XXX should we pad it to force alignment on an 8-byte boundary? */ struct ip6_hbh { - u_int8_t ip6h_nxt; /* next header */ - u_int8_t ip6h_len; /* length in units of 8 octets */ + u_int8_t ip6h_nxt; /* next header */ + u_int8_t ip6h_len; /* length in units of 8 octets */ /* followed by options */ } __attribute__((__packed__)); /* Destination options header */ /* XXX should we pad it to force alignment on an 8-byte boundary? */ struct ip6_dest { - u_int8_t ip6d_nxt; /* next header */ - u_int8_t ip6d_len; /* length in units of 8 octets */ + u_int8_t ip6d_nxt; /* next header */ + u_int8_t ip6d_len; /* length in units of 8 octets */ /* followed by options */ } __attribute__((__packed__)); /* Option types and related macros */ -#define IP6OPT_PAD1 0x00 /* 00 0 00000 */ -#define IP6OPT_PADN 0x01 /* 00 0 00001 */ -#define IP6OPT_JUMBO 0xC2 /* 11 0 00010 = 194 */ -#define IP6OPT_NSAP_ADDR 0xC3 /* 11 0 00011 */ -#define IP6OPT_TUNNEL_LIMIT 0x04 /* 00 0 00100 */ +#define IP6OPT_PAD1 0x00 /* 00 0 00000 */ +#define IP6OPT_PADN 0x01 /* 00 0 00001 */ +#define IP6OPT_JUMBO 0xC2 /* 11 0 00010 = 194 */ +#define IP6OPT_NSAP_ADDR 0xC3 /* 11 0 00011 */ +#define IP6OPT_TUNNEL_LIMIT 0x04 /* 00 0 00100 */ #ifndef KERNEL_PRIVATE -#define IP6OPT_RTALERT 0x05 /* 00 0 00101 (KAME definition) */ +#define IP6OPT_RTALERT 0x05 /* 00 0 00101 (KAME definition) */ #endif -#define IP6OPT_ROUTER_ALERT 0x05 /* 00 0 00101 (RFC3542, recommended) */ +#define IP6OPT_ROUTER_ALERT 0x05 /* 00 0 00101 (RFC3542, recommended) */ -#define IP6OPT_RTALERT_LEN 4 -#define IP6OPT_RTALERT_MLD 0 /* Datagram contains an MLD message */ -#define IP6OPT_RTALERT_RSVP 1 /* Datagram contains an RSVP message */ -#define IP6OPT_RTALERT_ACTNET 2 /* contains an Active Networks msg */ -#define IP6OPT_MINLEN 2 +#define IP6OPT_RTALERT_LEN 4 +#define IP6OPT_RTALERT_MLD 0 /* Datagram contains an MLD message */ +#define IP6OPT_RTALERT_RSVP 1 /* Datagram contains an RSVP message */ +#define IP6OPT_RTALERT_ACTNET 2 /* contains an Active Networks msg */ +#define IP6OPT_MINLEN 2 -#define IP6OPT_EID 0x8a /* 10 0 01010 */ +#define IP6OPT_EID 0x8a /* 10 0 01010 */ -#define IP6OPT_TYPE(o) ((o) & 0xC0) -#define IP6OPT_TYPE_SKIP 0x00 -#define IP6OPT_TYPE_DISCARD 0x40 -#define IP6OPT_TYPE_FORCEICMP 0x80 -#define IP6OPT_TYPE_ICMP 0xC0 +#define IP6OPT_TYPE(o) ((o) & 0xC0) +#define IP6OPT_TYPE_SKIP 0x00 +#define IP6OPT_TYPE_DISCARD 0x40 +#define IP6OPT_TYPE_FORCEICMP 0x80 +#define IP6OPT_TYPE_ICMP 0xC0 -#define IP6OPT_MUTABLE 0x20 +#define IP6OPT_MUTABLE 0x20 /* IPv6 options: common part */ struct ip6_opt { @@ -211,7 +211,7 @@ struct ip6_opt_jumbo { u_int8_t ip6oj_len; u_int8_t ip6oj_jumbo_len[4]; } __attribute__((__packed__)); -#define IP6OPT_JUMBO_LEN 6 +#define IP6OPT_JUMBO_LEN 6 /* NSAP Address Option */ struct ip6_opt_nsap { @@ -238,66 +238,65 @@ struct ip6_opt_router { }__attribute__((__packed__)); /* Router alert values (in network byte order) */ #if BYTE_ORDER == BIG_ENDIAN -#define IP6_ALERT_MLD 0x0000 -#define IP6_ALERT_RSVP 0x0001 -#define IP6_ALERT_AN 0x0002 +#define IP6_ALERT_MLD 0x0000 +#define IP6_ALERT_RSVP 0x0001 +#define IP6_ALERT_AN 0x0002 #else #if BYTE_ORDER == LITTLE_ENDIAN -#define IP6_ALERT_MLD 0x0000 -#define IP6_ALERT_RSVP 0x0100 -#define IP6_ALERT_AN 0x0200 +#define IP6_ALERT_MLD 0x0000 +#define IP6_ALERT_RSVP 0x0100 +#define IP6_ALERT_AN 0x0200 #endif /* LITTLE_ENDIAN */ #endif /* Routing header */ struct ip6_rthdr { - u_int8_t ip6r_nxt; /* next header */ - u_int8_t ip6r_len; /* length in units of 8 octets */ - u_int8_t ip6r_type; /* routing type */ - u_int8_t ip6r_segleft; /* segments left */ + u_int8_t ip6r_nxt; /* next header */ + u_int8_t ip6r_len; /* length in units of 8 octets */ + u_int8_t ip6r_type; /* routing type */ + u_int8_t ip6r_segleft; /* segments left */ /* followed by routing type specific data */ } __attribute__((__packed__)); -/* Type 0 Routing header */ +/* Type 0 Routing header, deprecated by RFC 5095. */ struct ip6_rthdr0 { - u_int8_t ip6r0_nxt; /* next header */ - u_int8_t ip6r0_len; /* length in units of 8 octets */ - u_int8_t ip6r0_type; /* always zero */ - u_int8_t ip6r0_segleft; /* segments left */ - u_int8_t ip6r0_reserved; /* reserved field */ - u_int8_t ip6r0_slmap[3]; /* strict/loose bit map */ - struct in6_addr ip6r0_addr[1]; /* up to 23 addresses */ + u_int8_t ip6r0_nxt; /* next header */ + u_int8_t ip6r0_len; /* length in units of 8 octets */ + u_int8_t ip6r0_type; /* always zero */ + u_int8_t ip6r0_segleft; /* segments left */ + u_int32_t ip6r0_reserved; /* reserved field */ + /* followed by up to 127 struct in6_addr */ } __attribute__((__packed__)); /* Fragment header */ struct ip6_frag { - u_int8_t ip6f_nxt; /* next header */ - u_int8_t ip6f_reserved; /* reserved field */ - u_int16_t ip6f_offlg; /* offset, reserved, and flag */ - u_int32_t ip6f_ident; /* identification */ + u_int8_t ip6f_nxt; /* next header */ + u_int8_t ip6f_reserved; /* reserved field */ + u_int16_t ip6f_offlg; /* offset, reserved, and flag */ + u_int32_t ip6f_ident; /* identification */ } __attribute__((__packed__)); #if BYTE_ORDER == BIG_ENDIAN -#define IP6F_OFF_MASK 0xfff8 /* mask out offset from _offlg */ -#define IP6F_RESERVED_MASK 0x0006 /* reserved bits in ip6f_offlg */ -#define IP6F_MORE_FRAG 0x0001 /* more-fragments flag */ +#define IP6F_OFF_MASK 0xfff8 /* mask out offset from _offlg */ +#define IP6F_RESERVED_MASK 0x0006 /* reserved bits in ip6f_offlg */ +#define IP6F_MORE_FRAG 0x0001 /* more-fragments flag */ #else /* BYTE_ORDER == LITTLE_ENDIAN */ -#define IP6F_OFF_MASK 0xf8ff /* mask out offset from _offlg */ -#define IP6F_RESERVED_MASK 0x0600 /* reserved bits in ip6f_offlg */ -#define IP6F_MORE_FRAG 0x0100 /* more-fragments flag */ +#define IP6F_OFF_MASK 0xf8ff /* mask out offset from _offlg */ +#define IP6F_RESERVED_MASK 0x0600 /* reserved bits in ip6f_offlg */ +#define IP6F_MORE_FRAG 0x0100 /* more-fragments flag */ #endif /* BYTE_ORDER == LITTLE_ENDIAN */ /* * Internet implementation parameters. */ -#define IPV6_MAXHLIM 255 /* maximum hoplimit */ -#define IPV6_DEFHLIM 64 /* default hlim */ -#define IPV6_FRAGTTL 60 /* ttl for fragment packets (seconds) */ -#define IPV6_HLIMDEC 1 /* subtracted when forwarding */ +#define IPV6_MAXHLIM 255 /* maximum hoplimit */ +#define IPV6_DEFHLIM 64 /* default hlim */ +#define IPV6_FRAGTTL 60 /* ttl for fragment packets (seconds) */ +#define IPV6_HLIMDEC 1 /* subtracted when forwarding */ -#define IPV6_MMTU 1280 /* minimal MTU and reassembly. 1024 + 256 */ -#define IPV6_MAXPACKET 65535 /* ip6 max packet size without Jumbo payload*/ -#define IPV6_MAXOPTHDR 2048 /* max option header size, 256 64-bit words */ +#define IPV6_MMTU 1280 /* minimal MTU and reassembly. 1024 + 256 */ +#define IPV6_MAXPACKET 65535 /* ip6 max packet size without Jumbo payload*/ +#define IPV6_MAXOPTHDR 2048 /* max option header size, 256 64-bit words */ #ifdef BSD_KERNEL_PRIVATE /* @@ -309,38 +308,38 @@ struct ip6_frag { * supposed to never be matched but is prepared just in case. */ -#define IP6_EXTHDR_CHECK(m, off, hlen, action) \ -do { \ - if ((m)->m_next != NULL) { \ - if (((m)->m_flags & M_LOOP) && \ - ((m)->m_len < (off) + (hlen)) && \ - (((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \ - ip6stat.ip6s_exthdrtoolong++; \ - action; \ - } else if ((m)->m_flags & M_EXT) { \ - if ((m)->m_len < (off) + (hlen)) { \ - ip6stat.ip6s_exthdrtoolong++; \ - m_freem(m); \ - (m) = NULL; \ - action; \ - } \ - } else { \ - if ((m)->m_len < (off) + (hlen)) { \ - ip6stat.ip6s_exthdrtoolong++; \ - m_freem(m); \ - (m) = NULL; \ - action; \ - } \ - } \ - } else { \ - if ((m)->m_len < (off) + (hlen)) { \ - ip6stat.ip6s_tooshort++; \ - in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \ - m_freem(m); \ - (m) = NULL; \ - action; \ - } \ - } \ +#define IP6_EXTHDR_CHECK(m, off, hlen, action) \ +do { \ + if ((m)->m_next != NULL) { \ + if (((m)->m_flags & M_LOOP) && \ + ((m)->m_len < (off) + (hlen)) && \ + (((m) = m_pullup((m), (off) + (hlen))) == NULL)) { \ + ip6stat.ip6s_exthdrtoolong++; \ + action; \ + } else if ((m)->m_flags & M_EXT) { \ + if ((m)->m_len < (off) + (hlen)) { \ + ip6stat.ip6s_exthdrtoolong++; \ + m_freem(m); \ + (m) = NULL; \ + action; \ + } \ + } else { \ + if ((m)->m_len < (off) + (hlen)) { \ + ip6stat.ip6s_exthdrtoolong++; \ + m_freem(m); \ + (m) = NULL; \ + action; \ + } \ + } \ + } else { \ + if ((m)->m_len < (off) + (hlen)) { \ + ip6stat.ip6s_tooshort++; \ + in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated); \ + m_freem(m); \ + (m) = NULL; \ + action; \ + } \ + } \ } while (0) /* @@ -351,10 +350,10 @@ do { \ * IP6_EXTHDR_GET0 does the same, except that it aligns the structure at the * very top of mbuf. GET0 is likely to make memory copy than GET. */ -#define IP6_EXTHDR_GET(val, typ, m, off, len) \ +#define IP6_EXTHDR_GET(val, typ, m, off, len) \ M_STRUCT_GET(val, typ, m, off, len) -#define IP6_EXTHDR_GET0(val, typ, m, off, len) \ +#define IP6_EXTHDR_GET0(val, typ, m, off, len) \ M_STRUCT_GET0(val, typ, m, off, len) #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/ip_compat.h b/bsd/netinet/ip_compat.h index 5fb846683..d8de311b8 100644 --- a/bsd/netinet/ip_compat.h +++ b/bsd/netinet/ip_compat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -38,89 +38,89 @@ #if 0 -#ifndef __IP_COMPAT_H__ -#define __IP_COMPAT_H__ +#ifndef __IP_COMPAT_H__ +#define __IP_COMPAT_H__ -#ifndef __STDC__ -# define const +#ifndef __STDC__ +# define const #endif -#ifndef SOLARIS -#define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) +#ifndef SOLARIS +#define SOLARIS (defined(sun) && (defined(__svr4__) || defined(__SVR4))) #endif #if defined(_KERNEL) && !defined(KERNEL) -# define KERNEL +# define KERNEL #endif #if defined(KERNEL) && !defined(_KERNEL) -# define _KERNEL +# define _KERNEL #endif #if!defined(__KERNEL__) && defined(KERNEL) -# define __KERNEL__ +# define __KERNEL__ #endif #if defined(__SVR4) || defined(__svr4__) || defined(__sgi) #define index strchr # if !defined(_KERNEL) -# define bzero(a,b) memset(a,0,b) -# define bcmp memcmp -# define bcopy(a,b,c) memmove(b,a,c) +# define bzero(a, b) memset(a,0,b) +# define bcmp memcmp +# define bcopy(a, b, c) memmove(b,a,c) # endif #endif struct ether_addr { - u_char ether_addr_octet[6]; + u_char ether_addr_octet[6]; }; -#ifdef linux +#ifdef linux # include #endif -#if SOLARIS -# define MTYPE(m) ((m)->b_datap->db_type) -# include -# include -# include +#if SOLARIS +# define MTYPE(m) ((m)->b_datap->db_type) +# include +# include +# include /* * because Solaris 2 defines these in two places :-/ */ -# undef IPOPT_EOL -# undef IPOPT_NOP -# undef IPOPT_LSRR -# undef IPOPT_RR -# undef IPOPT_SSRR -# ifndef _KERNEL -# define _KERNEL -# undef RES_INIT +# undef IPOPT_EOL +# undef IPOPT_NOP +# undef IPOPT_LSRR +# undef IPOPT_RR +# undef IPOPT_SSRR +# ifndef _KERNEL +# define _KERNEL +# undef RES_INIT # include # include # include -# undef _KERNEL +# undef _KERNEL # else /* _KERNEL */ # include # include # include # endif /* _KERNEL */ #endif /* SOLARIS */ -#define IPMINLEN(i, h) ((i)->ip_len >= ((i)->ip_hl * 4 + sizeof(struct h))) +#define IPMINLEN(i, h) ((i)->ip_len >= ((i)->ip_hl * 4 + sizeof(struct h))) -#ifndef IP_OFFMASK -#define IP_OFFMASK 0x1fff +#ifndef IP_OFFMASK +#define IP_OFFMASK 0x1fff #endif -#if BSD > 199306 -# define USE_QUAD_T -# define U_QUAD_T u_quad_t -# define QUAD_T quad_t +#if BSD > 199306 +# define USE_QUAD_T +# define U_QUAD_T u_quad_t +# define QUAD_T quad_t #else /* BSD > 199306 */ -# define U_QUAD_T u_int32_t -# define QUAD_T long +# define U_QUAD_T u_int32_t +# define QUAD_T long #endif /* BSD > 199306 */ /* * These operating systems already take care of the problem for us. */ #if defined(__NetBSD__) || defined(__OpenBSD__) || defined(__FreeBSD__) || \ - defined(__sgi) + defined(__sgi) typedef u_int32_t u_32_t; #else /* @@ -133,8 +133,8 @@ typedef u_int32_t u_32_t; # endif #endif /* __NetBSD__ || __OpenBSD__ || __FreeBSD__ || __sgi */ -#ifndef MAX -#define MAX(a,b) (((a) > (b)) ? (a) : (b)) +#ifndef MAX +#define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* @@ -151,47 +151,47 @@ typedef u_int32_t u_32_t; * 10101011 - Unclassified * 11110001 - (Reserved 1) */ -#define IPSO_CLASS_RES4 0x01 -#define IPSO_CLASS_TOPS 0x3d -#define IPSO_CLASS_SECR 0x5a -#define IPSO_CLASS_CONF 0x96 -#define IPSO_CLASS_RES3 0x66 -#define IPSO_CLASS_RES2 0xcc -#define IPSO_CLASS_UNCL 0xab -#define IPSO_CLASS_RES1 0xf1 - -#define IPSO_AUTH_GENSER 0x80 -#define IPSO_AUTH_ESI 0x40 -#define IPSO_AUTH_SCI 0x20 -#define IPSO_AUTH_NSA 0x10 -#define IPSO_AUTH_DOE 0x08 -#define IPSO_AUTH_UN 0x06 -#define IPSO_AUTH_FTE 0x01 +#define IPSO_CLASS_RES4 0x01 +#define IPSO_CLASS_TOPS 0x3d +#define IPSO_CLASS_SECR 0x5a +#define IPSO_CLASS_CONF 0x96 +#define IPSO_CLASS_RES3 0x66 +#define IPSO_CLASS_RES2 0xcc +#define IPSO_CLASS_UNCL 0xab +#define IPSO_CLASS_RES1 0xf1 + +#define IPSO_AUTH_GENSER 0x80 +#define IPSO_AUTH_ESI 0x40 +#define IPSO_AUTH_SCI 0x20 +#define IPSO_AUTH_NSA 0x10 +#define IPSO_AUTH_DOE 0x08 +#define IPSO_AUTH_UN 0x06 +#define IPSO_AUTH_FTE 0x01 /* * IP option #defines */ /*#define IPOPT_RR 7 */ -#define IPOPT_ZSU 10 /* ZSU */ -#define IPOPT_MTUP 11 /* MTUP */ -#define IPOPT_MTUR 12 /* MTUR */ -#define IPOPT_ENCODE 15 /* ENCODE */ +#define IPOPT_ZSU 10 /* ZSU */ +#define IPOPT_MTUP 11 /* MTUP */ +#define IPOPT_MTUR 12 /* MTUR */ +#define IPOPT_ENCODE 15 /* ENCODE */ /*#define IPOPT_TS 68 */ -#define IPOPT_TR 82 /* TR */ +#define IPOPT_TR 82 /* TR */ /*#define IPOPT_SECURITY 130 */ /*#define IPOPT_LSRR 131 */ -#define IPOPT_E_SEC 133 /* E-SEC */ -#define IPOPT_CIPSO 134 /* CIPSO */ +#define IPOPT_E_SEC 133 /* E-SEC */ +#define IPOPT_CIPSO 134 /* CIPSO */ /*#define IPOPT_SATID 136 */ -#ifndef IPOPT_SID -# define IPOPT_SID IPOPT_SATID +#ifndef IPOPT_SID +# define IPOPT_SID IPOPT_SATID #endif /*#define IPOPT_SSRR 137 */ -#define IPOPT_ADDEXT 147 /* ADDEXT */ -#define IPOPT_VISA 142 /* VISA */ -#define IPOPT_IMITD 144 /* IMITD */ -#define IPOPT_EIP 145 /* EIP */ -#define IPOPT_FINN 205 /* FINN */ +#define IPOPT_ADDEXT 147 /* ADDEXT */ +#define IPOPT_VISA 142 /* VISA */ +#define IPOPT_IMITD 144 /* IMITD */ +#define IPOPT_EIP 145 /* EIP */ +#define IPOPT_FINN 205 /* FINN */ #if defined(__FreeBSD__) && defined(KERNEL) @@ -206,162 +206,162 @@ typedef u_int32_t u_32_t; */ #ifdef KERNEL # if SOLARIS -# define MUTEX_ENTER(x) mutex_enter(x) -# define MUTEX_EXIT(x) mutex_exit(x) -# define MTOD(m,t) (t)((m)->b_rptr) -# define IRCOPY(a,b,c) copyin((a), (b), (c)) -# define IWCOPY(a,b,c) copyout((a), (b), (c)) -# define FREE_MB_T(m) freemsg(m) -# define SPL_NET(x) ; -# define SPL_IMP(x) ; -# undef SPL_X -# define SPL_X(x) ; +# define MUTEX_ENTER(x) mutex_enter(x) +# define MUTEX_EXIT(x) mutex_exit(x) +# define MTOD(m, t) (t)((m)->b_rptr) +# define IRCOPY(a, b, c) copyin((a), (b), (c)) +# define IWCOPY(a, b, c) copyout((a), (b), (c)) +# define FREE_MB_T(m) freemsg(m) +# define SPL_NET(x) ; +# define SPL_IMP(x) ; +# undef SPL_X +# define SPL_X(x) ; # ifdef sparc -# define ntohs(x) (x) -# define ntohl(x) (x) -# define htons(x) (x) -# define htonl(x) (x) +# define ntohs(x) (x) +# define ntohl(x) (x) +# define htons(x) (x) +# define htonl(x) (x) # endif /* sparc */ -# define KMALLOC(a,b,c) (a) = (b)kmem_alloc((c), KM_NOSLEEP) -# define GET_MINOR(x) getminor(x) -typedef struct qif { - struct qif *qf_next; - ill_t *qf_ill; - kmutex_t qf_lock; - void *qf_iptr; - void *qf_optr; - queue_t *qf_in; - queue_t *qf_out; - struct qinit *qf_wqinfo; - struct qinit *qf_rqinfo; - struct qinit qf_wqinit; - struct qinit qf_rqinit; - mblk_t *qf_m; /* These three fields are for passing data up from */ - queue_t *qf_q; /* fr_qin and fr_qout to the packet processing. */ - int qf_off; - int qf_len; /* this field is used for in ipfr_fastroute */ - char qf_name[8]; +# define KMALLOC(a, b, c) (a) = (b)kmem_alloc((c), KM_NOSLEEP) +# define GET_MINOR(x) getminor(x) +typedef struct qif { + struct qif *qf_next; + ill_t *qf_ill; + kmutex_t qf_lock; + void *qf_iptr; + void *qf_optr; + queue_t *qf_in; + queue_t *qf_out; + struct qinit *qf_wqinfo; + struct qinit *qf_rqinfo; + struct qinit qf_wqinit; + struct qinit qf_rqinit; + mblk_t *qf_m; /* These three fields are for passing data up from */ + queue_t *qf_q; /* fr_qin and fr_qout to the packet processing. */ + int qf_off; + int qf_len; /* this field is used for in ipfr_fastroute */ + char qf_name[8]; /* * in case the ILL has disappeared... */ - int qf_hl; /* header length */ + int qf_hl; /* header length */ } qif_t; -extern ill_t *get_unit(char *); -# define GETUNIT(n) get_unit((n)) +extern ill_t *get_unit(char *); +# define GETUNIT(n) get_unit((n)) # else /* SOLARIS */ # if defined(__sgi) # include -# define IPF_LOCK_PL plhi +# define IPF_LOCK_PL plhi # include #undef kmutex_t typedef struct { lock_t *l; int pl; } kmutex_t; -# define MUTEX_ENTER(x) (x)->pl = LOCK((x)->l, IPF_LOCK_PL); -# define MUTEX_EXIT(x) UNLOCK((x)->l, (x)->pl); +# define MUTEX_ENTER(x) (x)->pl = LOCK((x)->l, IPF_LOCK_PL); +# define MUTEX_EXIT(x) UNLOCK((x)->l, (x)->pl); # else /* __sgi */ -# define MUTEX_ENTER(x) ; -# define MUTEX_EXIT(x) ; +# define MUTEX_ENTER(x) ; +# define MUTEX_EXIT(x) ; # endif /* __sgi */ # ifndef linux -# define FREE_MB_T(m) m_freem(m) -# define MTOD(m,t) mtod(m,t) -# define IRCOPY(a,b,c) bcopy((a), (b), (c)) -# define IWCOPY(a,b,c) bcopy((a), (b), (c)) +# define FREE_MB_T(m) m_freem(m) +# define MTOD(m, t) mtod(m,t) +# define IRCOPY(a, b, c) bcopy((a), (b), (c)) +# define IWCOPY(a, b, c) bcopy((a), (b), (c)) # endif /* !linux */ # endif /* SOLARIS */ # ifdef sun # if !SOLARIS -# include -# define GETUNIT(n) ifunit((n), IFNAMSIZ) +# include +# define GETUNIT(n) ifunit((n), IFNAMSIZ) # endif # else -# ifndef linux -# define GETUNIT(n) ifunit((n)) +# ifndef linux +# define GETUNIT(n) ifunit((n)) # endif # endif /* sun */ # if defined(sun) && !defined(linux) || defined(__sgi) -# define UIOMOVE(a,b,c,d) uiomove((caddr_t)a,b,c,d) -# define SLEEP(id, n) sleep((id), PZERO+1) -# define WAKEUP(id) wakeup(id) -# define KFREE(x) kmem_free((char *)(x), sizeof(*(x))) -# define KFREES(x,s) kmem_free((char *)(x), (s)) +# define UIOMOVE(a, b, c, d) uiomove((caddr_t)a,b,c,d) +# define SLEEP(id, n) sleep((id), PZERO+1) +# define WAKEUP(id) wakeup(id) +# define KFREE(x) kmem_free((char *)(x), sizeof(*(x))) +# define KFREES(x, s) kmem_free((char *)(x), (s)) # if !SOLARIS -extern void m_copydata(struct mbuf *, int, int, caddr_t); -extern void m_copyback(struct mbuf *, int, int, caddr_t); +extern void m_copydata(struct mbuf *, int, int, caddr_t); +extern void m_copyback(struct mbuf *, int, int, caddr_t); # endif # ifdef __sgi # include # include -# define KMALLOC(a,b,c) (a) = (b)kmem_alloc((c), KM_NOSLEEP) -# define GET_MINOR(x) getminor(x) +# define KMALLOC(a, b, c) (a) = (b)kmem_alloc((c), KM_NOSLEEP) +# define GET_MINOR(x) getminor(x) # else # if !SOLARIS -# define KMALLOC(a,b,c) (a) = (b)new_kmem_alloc((c), KMEM_NOSLEEP) +# define KMALLOC(a, b, c) (a) = (b)new_kmem_alloc((c), KMEM_NOSLEEP) # endif /* SOLARIS */ # endif /* __sgi */ # endif /* sun && !linux */ -# ifndef GET_MINOR -# define GET_MINOR(x) minor(x) +# ifndef GET_MINOR +# define GET_MINOR(x) minor(x) # endif # if (BSD >= 199306) || defined(__FreeBSD__) # include -# if !defined(__FreeBSD__) || (defined (__FreeBSD__) && __FreeBSD__>=3) +# if !defined(__FreeBSD__) || (defined (__FreeBSD__) && __FreeBSD__ >= 3) # include # include -extern vm_map_t kmem_map; +extern vm_map_t kmem_map; # else /* !__FreeBSD__ || (__FreeBSD__ && __FreeBSD__>=3) */ # include # endif /* !__FreeBSD__ || (__FreeBSD__ && __FreeBSD__>=3) */ -# ifdef M_PFIL -# define KMALLOC(a, b, c) MALLOC((a), b, (c), M_PFIL, M_NOWAIT) -# define KFREE(x) FREE((x), M_PFIL) -# define KFREES(x,s) FREE((x), M_PFIL) +# ifdef M_PFIL +# define KMALLOC(a, b, c) MALLOC((a), b, (c), M_PFIL, M_NOWAIT) +# define KFREE(x) FREE((x), M_PFIL) +# define KFREES(x, s) FREE((x), M_PFIL) # else -# define KMALLOC(a, b, c) MALLOC((a), b, (c), M_TEMP, M_NOWAIT) -# define KFREE(x) FREE((x), M_TEMP) -# define KFREES(x,s) FREE((x), M_TEMP) +# define KMALLOC(a, b, c) MALLOC((a), b, (c), M_TEMP, M_NOWAIT) +# define KFREE(x) FREE((x), M_TEMP) +# define KFREES(x, s) FREE((x), M_TEMP) # endif /* M_PFIL */ -# define UIOMOVE(a,b,c,d) uiomove(a,b,d) -# define SLEEP(id, n) tsleep((id), PPAUSE|PCATCH, n, 0) -# define WAKEUP(id) wakeup(id) +# define UIOMOVE(a, b, c, d) uiomove(a,b,d) +# define SLEEP(id, n) tsleep((id), PPAUSE|PCATCH, n, 0) +# define WAKEUP(id) wakeup(id) # endif /* BSD */ # if defined(NetBSD) && NetBSD <= 1991011 && NetBSD >= 199407 -# define SPL_NET(x) x = splsoftnet() -# define SPL_X(x) (void) splx(x) +# define SPL_NET(x) x = splsoftnet() +# define SPL_X(x) (void) splx(x) # else # if !SOLARIS && !defined(linux) -# define SPL_IMP(x) ; -# define SPL_NET(x) ; -# define SPL_X(x) ; +# define SPL_IMP(x) ; +# define SPL_NET(x) ; +# define SPL_X(x) ; # endif # endif /* NetBSD && NetBSD <= 1991011 && NetBSD >= 199407 */ -# define PANIC(x,y) if (x) panic y +# define PANIC(x, y) if (x) panic y #else /* KERNEL */ -# define SLEEP(x,y) ; -# define WAKEUP(x) ; -# define PANIC(x,y) ; -# define MUTEX_ENTER(x) ; -# define MUTEX_EXIT(x) ; -# define SPL_NET(x) ; -# define SPL_IMP(x) ; -# undef SPL_X -# define SPL_X(x) ; +# define SLEEP(x, y) ; +# define WAKEUP(x) ; +# define PANIC(x, y) ; +# define MUTEX_ENTER(x) ; +# define MUTEX_EXIT(x) ; +# define SPL_NET(x) ; +# define SPL_IMP(x) ; +# undef SPL_X +# define SPL_X(x) ; /*# define KMALLOC(a,b,c) (a) = (b)malloc(c) */ -# define KFREE(x) FREE(x) -# define KFREES(x,s) FREE(x) -# define GETUNIT(x) get_unit(x) -# define IRCOPY(a,b,c) bcopy((a), (b), (c)) -# define IWCOPY(a,b,c) bcopy((a), (b), (c)) +# define KFREE(x) FREE(x) +# define KFREES(x, s) FREE(x) +# define GETUNIT(x) get_unit(x) +# define IRCOPY(a, b, c) bcopy((a), (b), (c)) +# define IWCOPY(a, b, c) bcopy((a), (b), (c)) #endif /* KERNEL */ #if SOLARIS typedef mblk_t mb_t; #else -# ifdef linux +# ifdef linux typedef struct sk_buff mb_t; # else typedef struct mbuf mb_t; @@ -374,228 +374,228 @@ typedef struct mbuf mb_t; * not be in other places or maybe one day linux will grow up and some * of these will turn up there too. */ -#ifndef ICMP_MINLEN -# define ICMP_MINLEN 8 +#ifndef ICMP_MINLEN +# define ICMP_MINLEN 8 #endif -#ifndef ICMP_UNREACH -# define ICMP_UNREACH ICMP_DEST_UNREACH +#ifndef ICMP_UNREACH +# define ICMP_UNREACH ICMP_DEST_UNREACH #endif -#ifndef ICMP_SOURCEQUENCH -# define ICMP_SOURCEQUENCH ICMP_SOURCE_QUENCH +#ifndef ICMP_SOURCEQUENCH +# define ICMP_SOURCEQUENCH ICMP_SOURCE_QUENCH #endif -#ifndef ICMP_TIMXCEED -# define ICMP_TIMXCEED ICMP_TIME_EXCEEDED +#ifndef ICMP_TIMXCEED +# define ICMP_TIMXCEED ICMP_TIME_EXCEEDED #endif -#ifndef ICMP_PARAMPROB -# define ICMP_PARAMPROB ICMP_PARAMETERPROB +#ifndef ICMP_PARAMPROB +# define ICMP_PARAMPROB ICMP_PARAMETERPROB #endif #ifndef ICMP_TSTAMP -# define ICMP_TSTAMP ICMP_TIMESTAMP +# define ICMP_TSTAMP ICMP_TIMESTAMP #endif #ifndef ICMP_TSTAMPREPLY -# define ICMP_TSTAMPREPLY ICMP_TIMESTAMPREPLY +# define ICMP_TSTAMPREPLY ICMP_TIMESTAMPREPLY #endif #ifndef ICMP_IREQ -# define ICMP_IREQ ICMP_INFO_REQUEST +# define ICMP_IREQ ICMP_INFO_REQUEST #endif #ifndef ICMP_IREQREPLY -# define ICMP_IREQREPLY ICMP_INFO_REPLY +# define ICMP_IREQREPLY ICMP_INFO_REPLY #endif -#ifndef ICMP_MASKREQ -# define ICMP_MASKREQ ICMP_ADDRESS +#ifndef ICMP_MASKREQ +# define ICMP_MASKREQ ICMP_ADDRESS #endif #ifndef ICMP_MASKREPLY -# define ICMP_MASKREPLY ICMP_ADDRESSREPLY +# define ICMP_MASKREPLY ICMP_ADDRESSREPLY #endif -#ifndef IPVERSION -# define IPVERSION 4 +#ifndef IPVERSION +# define IPVERSION 4 #endif -#ifndef IPOPT_MINOFF -# define IPOPT_MINOFF 4 +#ifndef IPOPT_MINOFF +# define IPOPT_MINOFF 4 #endif -#ifndef IPOPT_COPIED -# define IPOPT_COPIED(x) ((x)&0x80) +#ifndef IPOPT_COPIED +# define IPOPT_COPIED(x) ((x)&0x80) #endif -#ifndef IPOPT_EOL -# define IPOPT_EOL 0 +#ifndef IPOPT_EOL +# define IPOPT_EOL 0 #endif -#ifndef IPOPT_NOP -# define IPOPT_NOP 1 +#ifndef IPOPT_NOP +# define IPOPT_NOP 1 #endif -#ifndef IP_MF -# define IP_MF ((u_short)0x2000) +#ifndef IP_MF +# define IP_MF ((u_short)0x2000) #endif -#ifndef ETHERTYPE_IP -# define ETHERTYPE_IP ((u_short)0x0800) +#ifndef ETHERTYPE_IP +# define ETHERTYPE_IP ((u_short)0x0800) #endif -#ifndef TH_FIN -# define TH_FIN 0x01 +#ifndef TH_FIN +# define TH_FIN 0x01 #endif -#ifndef TH_SYN -# define TH_SYN 0x02 +#ifndef TH_SYN +# define TH_SYN 0x02 #endif -#ifndef TH_RST -# define TH_RST 0x04 +#ifndef TH_RST +# define TH_RST 0x04 #endif -#ifndef TH_PUSH -# define TH_PUSH 0x08 +#ifndef TH_PUSH +# define TH_PUSH 0x08 #endif -#ifndef TH_ACK -# define TH_ACK 0x10 +#ifndef TH_ACK +# define TH_ACK 0x10 #endif -#ifndef TH_URG -# define TH_URG 0x20 +#ifndef TH_URG +# define TH_URG 0x20 #endif -#ifndef IPOPT_EOL -# define IPOPT_EOL 0 +#ifndef IPOPT_EOL +# define IPOPT_EOL 0 #endif -#ifndef IPOPT_NOP -# define IPOPT_NOP 1 +#ifndef IPOPT_NOP +# define IPOPT_NOP 1 #endif -#ifndef IPOPT_RR -# define IPOPT_RR 7 +#ifndef IPOPT_RR +# define IPOPT_RR 7 #endif -#ifndef IPOPT_TS -# define IPOPT_TS 68 +#ifndef IPOPT_TS +# define IPOPT_TS 68 #endif -#ifndef IPOPT_SECURITY -# define IPOPT_SECURITY 130 +#ifndef IPOPT_SECURITY +# define IPOPT_SECURITY 130 #endif -#ifndef IPOPT_LSRR -# define IPOPT_LSRR 131 +#ifndef IPOPT_LSRR +# define IPOPT_LSRR 131 #endif -#ifndef IPOPT_SATID -# define IPOPT_SATID 136 +#ifndef IPOPT_SATID +# define IPOPT_SATID 136 #endif -#ifndef IPOPT_SSRR -# define IPOPT_SSRR 137 +#ifndef IPOPT_SSRR +# define IPOPT_SSRR 137 #endif -#ifndef IPOPT_SECUR_UNCLASS -# define IPOPT_SECUR_UNCLASS ((u_short)0x0000) +#ifndef IPOPT_SECUR_UNCLASS +# define IPOPT_SECUR_UNCLASS ((u_short)0x0000) #endif -#ifndef IPOPT_SECUR_CONFID -# define IPOPT_SECUR_CONFID ((u_short)0xf135) +#ifndef IPOPT_SECUR_CONFID +# define IPOPT_SECUR_CONFID ((u_short)0xf135) #endif -#ifndef IPOPT_SECUR_EFTO -# define IPOPT_SECUR_EFTO ((u_short)0x789a) +#ifndef IPOPT_SECUR_EFTO +# define IPOPT_SECUR_EFTO ((u_short)0x789a) #endif -#ifndef IPOPT_SECUR_MMMM -# define IPOPT_SECUR_MMMM ((u_short)0xbc4d) +#ifndef IPOPT_SECUR_MMMM +# define IPOPT_SECUR_MMMM ((u_short)0xbc4d) #endif -#ifndef IPOPT_SECUR_RESTR -# define IPOPT_SECUR_RESTR ((u_short)0xaf13) +#ifndef IPOPT_SECUR_RESTR +# define IPOPT_SECUR_RESTR ((u_short)0xaf13) #endif -#ifndef IPOPT_SECUR_SECRET -# define IPOPT_SECUR_SECRET ((u_short)0xd788) +#ifndef IPOPT_SECUR_SECRET +# define IPOPT_SECUR_SECRET ((u_short)0xd788) #endif #ifndef IPOPT_SECUR_TOPSECRET -# define IPOPT_SECUR_TOPSECRET ((u_short)0x6bc5) +# define IPOPT_SECUR_TOPSECRET ((u_short)0x6bc5) #endif #ifndef IPOPT_OLEN -# define IPOPT_OLEN 1 +# define IPOPT_OLEN 1 #endif #endif /* linux || __sgi */ -#ifdef linux +#ifdef linux /* * TCP States */ -#define TCPS_CLOSED 0 /* closed */ -#define TCPS_LISTEN 1 /* listening for connection */ -#define TCPS_SYN_SENT 2 /* active, have sent syn */ -#define TCPS_SYN_RECEIVED 3 /* have send and received syn */ +#define TCPS_CLOSED 0 /* closed */ +#define TCPS_LISTEN 1 /* listening for connection */ +#define TCPS_SYN_SENT 2 /* active, have sent syn */ +#define TCPS_SYN_RECEIVED 3 /* have send and received syn */ /* states < TCPS_ESTABLISHED are those where connections not established */ -#define TCPS_ESTABLISHED 4 /* established */ -#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ +#define TCPS_ESTABLISHED 4 /* established */ +#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ /* states > TCPS_CLOSE_WAIT are those where user has closed */ -#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ -#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ -#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ +#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ +#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ +#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ /* states > TCPS_CLOSE_WAIT && < TCPS_FIN_WAIT_2 await ACK of FIN */ -#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ -#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ +#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ +#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ /* * file flags. */ -#define FWRITE WRITE -#define FREAD READ +#define FWRITE WRITE +#define FREAD READ /* * mbuf related problems. */ -#define mtod(m,t) (t)((m)->data) -#define m_len len -#define m_next next - -#define IP_DF 0x8000 - -typedef struct { - __u16 th_sport; - __u16 th_dport; - __u32 th_seq; - __u32 th_ack; -# if defined(__i386__) || defined(__MIPSEL__) || defined(__alpha__) ||\ - defined(vax) - __u8 th_res:4; - __u8 th_off:4; +#define mtod(m, t) (t)((m)->data) +#define m_len len +#define m_next next + +#define IP_DF 0x8000 + +typedef struct { + __u16 th_sport; + __u16 th_dport; + __u32 th_seq; + __u32 th_ack; +# if defined(__i386__) || defined(__MIPSEL__) || defined(__alpha__) || \ + defined(vax) + __u8 th_res:4; + __u8 th_off:4; #else - __u8 th_off:4; - __u8 th_res:4; + __u8 th_off:4; + __u8 th_res:4; #endif - __u8 th_flags; - __u16 th_win; - __u16 th_sum; - __u16 th_urp; + __u8 th_flags; + __u16 th_win; + __u16 th_sum; + __u16 th_urp; } tcphdr_t; -typedef struct { - __u16 uh_sport; - __u16 uh_dport; - __u16 uh_ulen; - __u16 uh_sum; +typedef struct { + __u16 uh_sport; + __u16 uh_dport; + __u16 uh_ulen; + __u16 uh_sum; } udphdr_t; -typedef struct { -# if defined(__i386__) || defined(__MIPSEL__) || defined(__alpha__) ||\ - defined(vax) - __u8 ip_hl:4; - __u8 ip_v:4; +typedef struct { +# if defined(__i386__) || defined(__MIPSEL__) || defined(__alpha__) || \ + defined(vax) + __u8 ip_hl:4; + __u8 ip_v:4; # else - __u8 ip_hl:4; - __u8 ip_v:4; + __u8 ip_hl:4; + __u8 ip_v:4; # endif - __u8 ip_tos; - __u16 ip_len; - __u16 ip_id; - __u16 ip_off; - __u8 ip_ttl; - __u8 ip_p; - __u16 ip_sum; - struct in_addr ip_src; - struct in_addr ip_dst; + __u8 ip_tos; + __u16 ip_len; + __u16 ip_id; + __u16 ip_off; + __u8 ip_ttl; + __u8 ip_p; + __u16 ip_sum; + struct in_addr ip_src; + struct in_addr ip_dst; } ip_t; /* * Structure of an icmp header. */ typedef struct icmp { - u_char icmp_type; /* type of message, see below */ - u_char icmp_code; /* type sub code */ - u_short icmp_cksum; /* ones complement cksum of struct */ + u_char icmp_type; /* type of message, see below */ + u_char icmp_code; /* type sub code */ + u_short icmp_cksum; /* ones complement cksum of struct */ union { - u_char ih_pptr; /* ICMP_PARAMPROB */ - struct in_addr ih_gwaddr; /* ICMP_REDIRECT */ + u_char ih_pptr; /* ICMP_PARAMPROB */ + struct in_addr ih_gwaddr; /* ICMP_REDIRECT */ struct ih_idseq { - n_short icd_id; - n_short icd_seq; + n_short icd_id; + n_short icd_seq; } ih_idseq; int ih_void; } icmp_hun; -# define icmp_pptr icmp_hun.ih_pptr -# define icmp_gwaddr icmp_hun.ih_gwaddr -# define icmp_id icmp_hun.ih_idseq.icd_id -# define icmp_seq icmp_hun.ih_idseq.icd_seq -# define icmp_void icmp_hun.ih_void +# define icmp_pptr icmp_hun.ih_pptr +# define icmp_gwaddr icmp_hun.ih_gwaddr +# define icmp_id icmp_hun.ih_idseq.icd_id +# define icmp_seq icmp_hun.ih_idseq.icd_seq +# define icmp_void icmp_hun.ih_void union { struct id_ts { n_time its_otime; @@ -606,129 +606,129 @@ typedef struct icmp { ip_t idi_ip; /* options and then 64 bits of data */ } id_ip; - u_int32_t id_mask; - char id_data[1]; + u_int32_t id_mask; + char id_data[1]; } icmp_dun; -# define icmp_otime icmp_dun.id_ts.its_otime -# define icmp_rtime icmp_dun.id_ts.its_rtime -# define icmp_ttime icmp_dun.id_ts.its_ttime -# define icmp_ip icmp_dun.id_ip.idi_ip -# define icmp_mask icmp_dun.id_mask -# define icmp_data icmp_dun.id_data +# define icmp_otime icmp_dun.id_ts.its_otime +# define icmp_rtime icmp_dun.id_ts.its_rtime +# define icmp_ttime icmp_dun.id_ts.its_ttime +# define icmp_ip icmp_dun.id_ip.idi_ip +# define icmp_mask icmp_dun.id_mask +# define icmp_data icmp_dun.id_data } icmphdr_t; # ifndef LINUX_IPOVLY # define LINUX_IPOVLY struct ipovly { - caddr_t ih_next, ih_prev; /* for protocol sequence q's */ - u_char ih_x1; /* (unused) */ - u_char ih_pr; /* protocol */ - short ih_len; /* protocol length */ - struct in_addr ih_src; /* source internet address */ - struct in_addr ih_dst; /* destination internet address */ + caddr_t ih_next, ih_prev; /* for protocol sequence q's */ + u_char ih_x1; /* (unused) */ + u_char ih_pr; /* protocol */ + short ih_len; /* protocol length */ + struct in_addr ih_src; /* source internet address */ + struct in_addr ih_dst; /* destination internet address */ }; # endif typedef struct { - __u8 ether_dhost[6]; - __u8 ether_shost[6]; - __u16 ether_type; + __u8 ether_dhost[6]; + __u8 ether_shost[6]; + __u16 ether_type; } ether_header_t; -typedef struct uio { - int uio_resid; - int uio_rw; - caddr_t uio_buf; +typedef struct uio { + int uio_resid; + int uio_rw; + caddr_t uio_buf; } uio_t; -# define UIO_READ 0 -# define UIO_WRITE 1 -# define UIOMOVE(a, b, c, d) uiomove(a,b,c,d) +# define UIO_READ 0 +# define UIO_WRITE 1 +# define UIOMOVE(a, b, c, d) uiomove(a,b,c,d) /* * For masking struct ifnet onto struct device */ -# define if_name name +# define if_name name -# ifdef KERNEL -# define GETUNIT(x) dev_get(x) -# define FREE_MB_T(m) kfree_skb(m, FREE_WRITE) -# define uniqtime do_gettimeofday +# ifdef KERNEL +# define GETUNIT(x) dev_get(x) +# define FREE_MB_T(m) kfree_skb(m, FREE_WRITE) +# define uniqtime do_gettimeofday # undef INT_MAX # undef UINT_MAX # undef LONG_MAX # undef ULONG_MAX # include -# define SPL_X(x) -# define SPL_NET(x) -# define SPL_IMP(x) - -# define bcmp(a,b,c) memcmp(a,b,c) -# define bcopy(a,b,c) memcpy(b,a,c) -# define bzero(a,c) memset(a,0,c) - -# define UNITNAME(n) dev_get((n)) - -# define KMALLOC(a,b,c) (a) = (b)kmalloc((c), GFP_ATOMIC) -# define KFREE(x) kfree_s((x), sizeof(*(x))) -# define KFREES(x,s) kfree_s((x), (s)) -# define IRCOPY(a,b,c) { \ - error = verify_area(VERIFY_READ, (a) ,(c)); \ - if (!error) \ - memcpy_fromfs((b), (a), (c)); \ - } -# define IWCOPY(a,b,c) { \ - error = verify_area(VERIFY_WRITE, (b), (c)); \ - if (!error) \ - memcpy_tofs((b), (a), (c)); \ - } +# define SPL_X(x) +# define SPL_NET(x) +# define SPL_IMP(x) + +# define bcmp(a, b, c) memcmp(a,b,c) +# define bcopy(a, b, c) memcpy(b,a,c) +# define bzero(a, c) memset(a,0,c) + +# define UNITNAME(n) dev_get((n)) + +# define KMALLOC(a, b, c) (a) = (b)kmalloc((c), GFP_ATOMIC) +# define KFREE(x) kfree_s((x), sizeof(*(x))) +# define KFREES(x, s) kfree_s((x), (s)) +# define IRCOPY(a, b, c) {\ + error = verify_area(VERIFY_READ, (a) ,(c)); \ + if (!error) \ + memcpy_fromfs((b), (a), (c)); \ + } +# define IWCOPY(a, b, c) {\ + error = verify_area(VERIFY_WRITE, (b), (c)); \ + if (!error) \ + memcpy_tofs((b), (a), (c)); \ + } # else -# define __KERNEL__ +# define __KERNEL__ # undef INT_MAX # undef UINT_MAX # undef LONG_MAX # undef ULONG_MAX -# define s8 __s8 -# define u8 __u8 -# define s16 __s16 -# define u16 __u16 -# define s32 __s32 -# define u32 __u32 +# define s8 __s8 +# define u8 __u8 +# define s16 __s16 +# define u16 __u16 +# define s32 __s32 +# define u32 __u32 # include -# undef __KERNEL__ +# undef __KERNEL__ # endif -# define ifnet device +# define ifnet device #else -typedef struct tcphdr tcphdr_t; -typedef struct udphdr udphdr_t; -typedef struct icmp icmphdr_t; -typedef struct ip ip_t; -typedef struct ether_header ether_header_t; +typedef struct tcphdr tcphdr_t; +typedef struct udphdr udphdr_t; +typedef struct icmp icmphdr_t; +typedef struct ip ip_t; +typedef struct ether_header ether_header_t; #endif /* linux */ -typedef struct tcpiphdr tcpiphdr_t; +typedef struct tcpiphdr tcpiphdr_t; #if defined(hpux) || defined(linux) -struct ether_addr { - char ether_addr_octet[6]; +struct ether_addr { + char ether_addr_octet[6]; }; #endif /* * XXX - This is one of those *awful* hacks which nobody likes */ -#ifdef ultrix -#define A_A +#ifdef ultrix +#define A_A #else -#define A_A & +#define A_A & #endif -#ifndef ICMP_ROUTERADVERT -# define ICMP_ROUTERADVERT 9 +#ifndef ICMP_ROUTERADVERT +# define ICMP_ROUTERADVERT 9 #endif -#ifndef ICMP_ROUTERSOLICIT -# define ICMP_ROUTERSOLICIT 10 +#ifndef ICMP_ROUTERSOLICIT +# define ICMP_ROUTERSOLICIT 10 #endif -#endif /* __IP_COMPAT_H__ */ +#endif /* __IP_COMPAT_H__ */ #endif /* #if 0 */ diff --git a/bsd/netinet/ip_divert.c b/bsd/netinet/ip_divert.c index 74efc18df..723b49961 100644 --- a/bsd/netinet/ip_divert.c +++ b/bsd/netinet/ip_divert.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -101,8 +101,8 @@ /* * Allocate enough space to hold a full IP packet */ -#define DIVSNDQ (65536 + 100) -#define DIVRCVQ (65536 + 100) +#define DIVSNDQ (65536 + 100) +#define DIVRCVQ (65536 + 100) /* * Divert sockets work in conjunction with ipfw, see the divert(4) @@ -132,15 +132,15 @@ static struct inpcbhead divcb; static struct inpcbinfo divcbinfo; -static u_int32_t div_sendspace = DIVSNDQ; /* XXX sysctl ? */ -static u_int32_t div_recvspace = DIVRCVQ; /* XXX sysctl ? */ +static u_int32_t div_sendspace = DIVSNDQ; /* XXX sysctl ? */ +static u_int32_t div_recvspace = DIVRCVQ; /* XXX sysctl ? */ /* Optimization: have this preinitialized */ -static struct sockaddr_in divsrc = { sizeof(divsrc), AF_INET, 0, { 0 }, { 0,0,0,0,0,0,0,0 } }; +static struct sockaddr_in divsrc = { sizeof(divsrc), AF_INET, 0, { 0 }, { 0, 0, 0, 0, 0, 0, 0, 0 } }; /* Internal functions */ static int div_output(struct socket *so, - struct mbuf *m, struct sockaddr_in *addr, struct mbuf *control); + struct mbuf *m, struct sockaddr_in *addr, struct mbuf *control); extern int load_ipfw(void); /* @@ -153,10 +153,11 @@ div_init(struct protosw *pp, struct domain *dp) static int div_initialized = 0; struct inpcbinfo *pcbinfo; - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - if (div_initialized) + if (div_initialized) { return; + } div_initialized = 1; LIST_INIT(&divcb); @@ -168,10 +169,10 @@ div_init(struct protosw *pp, struct domain *dp) */ divcbinfo.ipi_hashbase = hashinit(1, M_PCB, &divcbinfo.ipi_hashmask); divcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &divcbinfo.ipi_porthashmask); - divcbinfo.ipi_zone = zinit(sizeof(struct inpcb),(512 * sizeof(struct inpcb)), - 4096, "divzone"); + divcbinfo.ipi_zone = zinit(sizeof(struct inpcb), (512 * sizeof(struct inpcb)), + 4096, "divzone"); pcbinfo = &divcbinfo; - /* + /* * allocate lock group attribute and group for udp pcb mutexes */ pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init(); @@ -226,7 +227,7 @@ divert_packet(struct mbuf *m, int incoming, int port, int rule) /* Sanity check */ KASSERT(port != 0, ("%s: port=0", __FUNCTION__)); - divsrc.sin_port = rule; /* record matching rule */ + divsrc.sin_port = rule; /* record matching rule */ /* Assure header */ if (m->m_len < sizeof(struct ip) && @@ -267,10 +268,10 @@ divert_packet(struct mbuf *m, int incoming, int port, int rule) bzero(&divsrc.sin_zero, sizeof(divsrc.sin_zero)); if (m->m_pkthdr.rcvif) { /* - * Hide the actual interface name in there in the + * Hide the actual interface name in there in the * sin_zero array. XXX This needs to be moved to a * different sockaddr type for divert, e.g. - * sockaddr_div with multiple fields like + * sockaddr_div with multiple fields like * sockaddr_dl. Presently we have only 7 bytes * but that will do for now as most interfaces * are 4 or less + 2 or less bytes for unit. @@ -283,33 +284,35 @@ divert_packet(struct mbuf *m, int incoming, int port, int rule) * and re-uses the sockaddr_in as suggested in the man pages, * this iface name will come along for the ride. * (see div_output for the other half of this.) - */ + */ snprintf(divsrc.sin_zero, sizeof(divsrc.sin_zero), - "%s", if_name(m->m_pkthdr.rcvif)); + "%s", if_name(m->m_pkthdr.rcvif)); } /* Put packet on socket queue, if any */ sa = NULL; nport = htons((u_int16_t)port); - lck_rw_lock_shared(divcbinfo.ipi_lock); + lck_rw_lock_shared(divcbinfo.ipi_lock); LIST_FOREACH(inp, &divcb, inp_list) { - if (inp->inp_lport == nport) + if (inp->inp_lport == nport) { sa = inp->inp_socket; + } } if (sa) { int error = 0; - + socket_lock(sa, 1); if (sbappendaddr(&sa->so_rcv, (struct sockaddr *)&divsrc, - m, (struct mbuf *)0, &error) != 0) + m, (struct mbuf *)0, &error) != 0) { sorwakeup(sa); + } socket_unlock(sa, 1); } else { m_freem(m); OSAddAtomic(1, &ipstat.ips_noproto); OSAddAtomic(-1, &ipstat.ips_delivered); - } - lck_rw_done(divcbinfo.ipi_lock); + } + lck_rw_done(divcbinfo.ipi_lock); } /* @@ -318,11 +321,11 @@ divert_packet(struct mbuf *m, int incoming, int port, int rule) * If no address specified, or address is 0.0.0.0, send to ip_output(); * otherwise, send to ip_input() and mark as having been received on * the interface with that address. - * ###LOCK called in inet_proto mutex when from div_send. + * ###LOCK called in inet_proto mutex when from div_send. */ static int div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, - struct mbuf *control) + struct mbuf *control) { struct inpcb *const inp = sotoinpcb(so); struct ip *const ip = mtod(m, struct ip *); @@ -334,42 +337,46 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, (void) so_tc_from_control(contro, &sotc, &ignored); - m_freem(control); /* XXX */ + m_freem(control); /* XXX */ control = NULL; } - if (sotc == SO_TC_UNSPEC) + if (sotc == SO_TC_UNSPEC) { sotc = so->so_traffic_class; + } /* Loopback avoidance and state recovery */ if (sin) { struct m_tag *mtag; struct divert_tag *dt; - int len = 0; - char *c = sin->sin_zero; + int len = 0; + char *c = sin->sin_zero; mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DIVERT, - sizeof(struct divert_tag), M_NOWAIT, m); + sizeof(struct divert_tag), M_NOWAIT, m); if (mtag == NULL) { error = ENOBUFS; goto cantsend; } - dt = (struct divert_tag *)(mtag+1); + dt = (struct divert_tag *)(mtag + 1); dt->info = 0; dt->cookie = sin->sin_port; m_tag_prepend(m, mtag); /* * Find receive interface with the given name or IP address. - * The name is user supplied data so don't trust it's size or + * The name is user supplied data so don't trust it's size or * that it is zero terminated. The name has priority. - * We are presently assuming that the sockaddr_in + * We are presently assuming that the sockaddr_in * has not been replaced by a sockaddr_div, so we limit it * to 16 bytes in total. the name is stuffed (if it exists) * in the sin_zero[] field. */ - while (*c++ && (len++ < sizeof(sin->sin_zero))); - if ((len > 0) && (len < sizeof(sin->sin_zero))) + while (*c++ && (len++ < sizeof(sin->sin_zero))) { + ; + } + if ((len > 0) && (len < sizeof(sin->sin_zero))) { m->m_pkthdr.rcvif = ifunit(sin->sin_zero); + } } /* Reinject packet into the system as incoming or outgoing */ @@ -388,8 +395,8 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, * Don't allow both user specified and setsockopt options, * and don't allow packet length sizes that will crash */ - if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options) || - ((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) { + if (((ip->ip_hl != (sizeof(*ip) >> 2)) && inp->inp_options) || + ((u_short)ntohs(ip->ip_len) > m->m_pkthdr.len)) { error = EINVAL; goto cantsend; } @@ -411,25 +418,27 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, set_packet_service_class(m, so, sotc, 0); imo = inp->inp_moptions; - if (imo != NULL) + if (imo != NULL) { IMO_ADDREF(imo); + } socket_unlock(so, 0); #if CONFIG_MACF_NET mac_mbuf_label_associate_inpcb(inp, m); #endif /* Send packet to output processing */ error = ip_output(m, inp->inp_options, &ro, - (so->so_options & SO_DONTROUTE) | - IP_ALLOWBROADCAST | IP_RAWOUTPUT | IP_OUTARGS, - imo, &ipoa); + (so->so_options & SO_DONTROUTE) | + IP_ALLOWBROADCAST | IP_RAWOUTPUT | IP_OUTARGS, + imo, &ipoa); socket_lock(so, 0); - if (imo != NULL) + if (imo != NULL) { IMO_REMREF(imo); + } /* Synchronize cached PCB route */ inp_route_copyin(inp, &ro); } else { - struct ifaddr *ifa; + struct ifaddr *ifa; /* If no luck with the name above. check by IP address. */ if (m->m_pkthdr.rcvif == NULL) { @@ -438,9 +447,9 @@ div_output(struct socket *so, struct mbuf *m, struct sockaddr_in *sin, * Make sure there are no distractions for * ifa_ifwithaddr; use sanitized version. */ - bzero(&_sin, sizeof (_sin)); + bzero(&_sin, sizeof(_sin)); _sin.sin_family = AF_INET; - _sin.sin_len = sizeof (struct sockaddr_in); + _sin.sin_len = sizeof(struct sockaddr_in); _sin.sin_addr.s_addr = sin->sin_addr.s_addr; if (!(ifa = ifa_ifwithaddr(SA(&_sin)))) { error = EADDRNOTAVAIL; @@ -471,23 +480,27 @@ div_attach(struct socket *so, int proto, struct proc *p) inp = sotoinpcb(so); - if (inp) + if (inp) { panic("div_attach"); - if ((error = proc_suser(p)) != 0) + } + if ((error = proc_suser(p)) != 0) { return error; + } error = soreserve(so, div_sendspace, div_recvspace); - if (error) + if (error) { return error; + } error = in_pcballoc(so, &divcbinfo, p); - if (error) + if (error) { return error; + } inp = (struct inpcb *)so->so_pcb; inp->inp_ip_p = proto; inp->inp_vflag |= INP_IPV4; inp->inp_flags |= INP_HDRINCL; /* The socket is always "connected" because - we always know "where" to send the packet */ + * we always know "where" to send the packet */ so->so_state |= SS_ISCONNECTED; #ifdef MORE_DICVLOCK_DEBUG @@ -513,8 +526,9 @@ div_detach(struct socket *so) so->so_usecount); #endif inp = sotoinpcb(so); - if (inp == 0) + if (inp == 0) { panic("div_detach: so=%p null inp\n", so); + } in_pcbdetach(inp); inp->inp_state = INPCB_STATE_DEAD; return 0; @@ -530,8 +544,9 @@ div_abort(struct socket *so) static int div_disconnect(struct socket *so) { - if ((so->so_state & SS_ISCONNECTED) == 0) + if ((so->so_state & SS_ISCONNECTED) == 0) { return ENOTCONN; + } return div_abort(so); } @@ -543,16 +558,16 @@ div_bind(struct socket *so, struct sockaddr *nam, struct proc *p) inp = sotoinpcb(so); /* in_pcbbind assumes that the socket is a sockaddr_in - * and in_pcbbind requires a valid address. Since divert - * sockets don't we need to make sure the address is - * filled in properly. - * XXX -- divert should not be abusing in_pcbind - * and should probably have its own family. - */ + * and in_pcbbind requires a valid address. Since divert + * sockets don't we need to make sure the address is + * filled in properly. + * XXX -- divert should not be abusing in_pcbind + * and should probably have its own family. + */ if (nam->sa_family != AF_INET) { error = EAFNOSUPPORT; } else { - ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr = INADDR_ANY; + ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr = INADDR_ANY; error = in_pcbbind(inp, nam, p); } return error; @@ -567,11 +582,11 @@ div_shutdown(struct socket *so) static int div_send(struct socket *so, __unused int flags, struct mbuf *m, struct sockaddr *nam, - struct mbuf *control, __unused struct proc *p) + struct mbuf *control, __unused struct proc *p) { /* Packet must have a header (but that's about it) */ - if (m->m_len < sizeof (struct ip) && - (m = m_pullup(m, sizeof (struct ip))) == 0) { + if (m->m_len < sizeof(struct ip) && + (m = m_pullup(m, sizeof(struct ip))) == 0) { OSAddAtomic(1, &ipstat.ips_toosmall); m_freem(m); return EINVAL; @@ -599,7 +614,7 @@ div_pcblist SYSCTL_HANDLER_ARGS if (req->oldptr == USER_ADDR_NULL) { n = divcbinfo.ipi_count; req->oldidx = 2 * (sizeof xig) - + (n + n/8) * sizeof(struct xinpcb); + + (n + n / 8) * sizeof(struct xinpcb); lck_rw_done(divcbinfo.ipi_lock); return 0; } @@ -631,15 +646,15 @@ div_pcblist SYSCTL_HANDLER_ARGS lck_rw_done(divcbinfo.ipi_lock); return ENOMEM; } - + for (inp = LIST_FIRST(divcbinfo.ipi_listhead), i = 0; inp && i < n; - inp = LIST_NEXT(inp, inp_list)) { + inp = LIST_NEXT(inp, inp_list)) { #ifdef __APPLE__ if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) #else if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp)) #endif - inp_list[i++] = inp; + { inp_list[i++] = inp;} } n = i; @@ -653,8 +668,9 @@ div_pcblist SYSCTL_HANDLER_ARGS xi.xi_len = sizeof xi; /* XXX should avoid extra copy */ inpcb_to_compat(inp, &xi.xi_inp); - if (inp->inp_socket) + if (inp->inp_socket) { sotoxsocket(inp->inp_socket, &xi.xi_socket); + } error = SYSCTL_OUT(req, &xi, sizeof xi); } } @@ -684,10 +700,11 @@ div_lock(struct socket *so, int refcount, void *lr) { void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } #ifdef MORE_DICVLOCK_DEBUG printf("div_lock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x " @@ -698,8 +715,8 @@ div_lock(struct socket *so, int refcount, void *lr) #endif if (so->so_pcb) { lck_mtx_lock(&((struct inpcb *)so->so_pcb)->inpcb_mtx); - } else { - panic("div_lock: so=%p NO PCB! lr=%p lrh= lrh= %s\n", + } else { + panic("div_lock: so=%p NO PCB! lr=%p lrh= lrh= %s\n", so, lr_saved, solockhistory_nr(so)); /* NOTREACHED */ } @@ -711,12 +728,13 @@ div_lock(struct socket *so, int refcount, void *lr) /* NOTREACHED */ } - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; - return (0); + return 0; } __private_extern__ int @@ -726,10 +744,11 @@ div_unlock(struct socket *so, int refcount, void *lr) lck_mtx_t * mutex_held; struct inpcb *inp = sotoinpcb(so); - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } #ifdef MORE_DICVLOCK_DEBUG printf("div_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x " @@ -738,11 +757,12 @@ div_unlock(struct socket *so, int refcount, void *lr) (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)) : NULL, so->so_usecount, lr_saved); #endif - if (refcount) + if (refcount) { so->so_usecount--; + } if (so->so_usecount < 0) { - panic("div_unlock: so=%p usecount=%x lrh= %s\n", + panic("div_unlock: so=%p usecount=%x lrh= %s\n", so, so->so_usecount, solockhistory_nr(so)); /* NOTREACHED */ } @@ -755,17 +775,18 @@ div_unlock(struct socket *so, int refcount, void *lr) if (so->so_usecount == 0 && (inp->inp_wantcnt == WNT_STOPUSING)) { lck_rw_lock_exclusive(divcbinfo.ipi_lock); - if (inp->inp_state != INPCB_STATE_DEAD) + if (inp->inp_state != INPCB_STATE_DEAD) { in_pcbdetach(inp); + } in_pcbdispose(inp); lck_rw_done(divcbinfo.ipi_lock); - return (0); + return 0; } LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; lck_mtx_unlock(mutex_held); - return (0); + return 0; } __private_extern__ lck_mtx_t * @@ -773,30 +794,30 @@ div_getlock(struct socket *so, __unused int flags) { struct inpcb *inpcb = (struct inpcb *)so->so_pcb; - if (so->so_pcb) { - if (so->so_usecount < 0) + if (so->so_pcb) { + if (so->so_usecount < 0) { panic("div_getlock: so=%p usecount=%x lrh= %s\n", so, so->so_usecount, solockhistory_nr(so)); - return(&inpcb->inpcb_mtx); + } + return &inpcb->inpcb_mtx; } else { panic("div_getlock: so=%p NULL NO PCB lrh= %s\n", so, solockhistory_nr(so)); - return (so->so_proto->pr_domain->dom_mtx); + return so->so_proto->pr_domain->dom_mtx; } } struct pr_usrreqs div_usrreqs = { - .pru_abort = div_abort, - .pru_attach = div_attach, - .pru_bind = div_bind, - .pru_control = in_control, - .pru_detach = div_detach, - .pru_disconnect = div_disconnect, - .pru_peeraddr = in_getpeeraddr, - .pru_send = div_send, - .pru_shutdown = div_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = div_abort, + .pru_attach = div_attach, + .pru_bind = div_bind, + .pru_control = in_control, + .pru_detach = div_detach, + .pru_disconnect = div_disconnect, + .pru_peeraddr = in_getpeeraddr, + .pru_send = div_send, + .pru_shutdown = div_shutdown, + .pru_sockaddr = in_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; - diff --git a/bsd/netinet/ip_divert.h b/bsd/netinet/ip_divert.h index e4a89ef6f..1536f0416 100644 --- a/bsd/netinet/ip_divert.h +++ b/bsd/netinet/ip_divert.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -61,7 +61,7 @@ */ #ifndef _NETINET_IP_DIVERT_H_ -#define _NETINET_IP_DIVERT_H_ +#define _NETINET_IP_DIVERT_H_ #if IPDIVERT #ifdef BSD_KERNEL_PRIVATE @@ -72,8 +72,8 @@ /* 32-bit unique unsigned value used to identify a module */ struct divert_tag { - u_int32_t info; /* port & flags */ - u_int16_t cookie; /* ipfw rule number */ + u_int32_t info; /* port & flags */ + u_int16_t cookie; /* ipfw rule number */ }; /* @@ -82,13 +82,13 @@ struct divert_tag { static __inline u_int16_t divert_cookie(struct m_tag *mtag) { - return ((struct divert_tag *)(mtag+1))->cookie; + return ((struct divert_tag *)(mtag + 1))->cookie; } static __inline u_int16_t divert_find_cookie(struct mbuf *m) { struct m_tag *mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DIVERT, NULL); + KERNEL_TAG_TYPE_DIVERT, NULL); return mtag ? divert_cookie(mtag) : 0; } @@ -98,23 +98,23 @@ divert_find_cookie(struct mbuf *m) static __inline u_int32_t divert_info(struct m_tag *mtag) { - return ((struct divert_tag *)(mtag+1))->info; + return ((struct divert_tag *)(mtag + 1))->info; } static __inline u_int32_t divert_find_info(struct mbuf *m) { struct m_tag *mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DIVERT, NULL); + KERNEL_TAG_TYPE_DIVERT, NULL); return mtag ? divert_info(mtag) : 0; } -extern void div_init(struct protosw *, struct domain *); -extern void div_input(struct mbuf *, int); -lck_mtx_t * - div_getlock(struct socket *, int ); -int div_unlock(struct socket *, int, void *); -int div_lock(struct socket *, int , void *); -extern void divert_packet(struct mbuf *m, int incoming, int port, int rule); +extern void div_init(struct protosw *, struct domain *); +extern void div_input(struct mbuf *, int); +lck_mtx_t * +div_getlock(struct socket *, int ); +int div_unlock(struct socket *, int, void *); +int div_lock(struct socket *, int, void *); +extern void divert_packet(struct mbuf *m, int incoming, int port, int rule); extern struct pr_usrreqs div_usrreqs; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/ip_dummynet.h b/bsd/netinet/ip_dummynet.h index 7472e958c..884ce05da 100644 --- a/bsd/netinet/ip_dummynet.h +++ b/bsd/netinet/ip_dummynet.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -63,7 +63,7 @@ #include /* Apply ipv6 mask on ipv6 addr */ -#define APPLY_MASK(addr,mask) \ +#define APPLY_MASK(addr, mask) \ (addr)->__u6_addr.__u6_addr32[0] &= (mask)->__u6_addr.__u6_addr32[0]; \ (addr)->__u6_addr.__u6_addr32[1] &= (mask)->__u6_addr.__u6_addr32[1]; \ (addr)->__u6_addr.__u6_addr32[2] &= (mask)->__u6_addr.__u6_addr32[2]; \ @@ -96,13 +96,13 @@ * MY_M is used as a shift count when doing fixed point arithmetic * (a better name would be useful...). */ -typedef u_int64_t dn_key ; /* sorting key */ -#define DN_KEY_LT(a,b) ((int64_t)((a)-(b)) < 0) -#define DN_KEY_LEQ(a,b) ((int64_t)((a)-(b)) <= 0) -#define DN_KEY_GT(a,b) ((int64_t)((a)-(b)) > 0) -#define DN_KEY_GEQ(a,b) ((int64_t)((a)-(b)) >= 0) -#define MAX64(x,y) (( (int64_t) ( (y)-(x) )) > 0 ) ? (y) : (x) -#define MY_M 16 /* number of left shift to obtain a larger precision */ +typedef u_int64_t dn_key; /* sorting key */ +#define DN_KEY_LT(a, b) ((int64_t)((a)-(b)) < 0) +#define DN_KEY_LEQ(a, b) ((int64_t)((a)-(b)) <= 0) +#define DN_KEY_GT(a, b) ((int64_t)((a)-(b)) > 0) +#define DN_KEY_GEQ(a, b) ((int64_t)((a)-(b)) >= 0) +#define MAX64(x, y) (( (int64_t) ( (y)-(x) )) > 0 ) ? (y) : (x) +#define MY_M 16 /* number of left shift to obtain a larger precision */ /* * XXX With this scaling, max 1000 flows, max weight 100, 1Gbit/s, the @@ -137,16 +137,16 @@ typedef u_int64_t dn_key ; /* sorting key */ * is non-zero if we want to support extract from the middle. */ struct dn_heap_entry { - dn_key key ; /* sorting key. Topmost element is smallest one */ - void *object ; /* object pointer */ -} ; + dn_key key; /* sorting key. Topmost element is smallest one */ + void *object; /* object pointer */ +}; struct dn_heap { - int size ; - int elements ; - int offset ; /* XXX if > 0 this is the offset of direct ptr to obj */ - struct dn_heap_entry *p ; /* really an array of "size" entries */ -} ; + int size; + int elements; + int offset; /* XXX if > 0 this is the offset of direct ptr to obj */ + struct dn_heap_entry *p; /* really an array of "size" entries */ +}; /* * Packets processed by dummynet have an mbuf tag associated with @@ -157,48 +157,48 @@ struct dn_heap { #ifdef KERNEL #include #include -#include /* for ip_out_args */ -#include /* for ip6_out_args */ +#include /* for ip_out_args */ +#include /* for ip6_out_args */ #include -#include /* for ip6_out_args */ +#include /* for ip6_out_args */ struct dn_pkt_tag { - struct ip_fw *dn_ipfw_rule; /* matching IPFW rule */ - void *dn_pf_rule; /* matching PF rule */ - int dn_dir; /* action when packet comes out. */ -#define DN_TO_IP_OUT 1 -#define DN_TO_IP_IN 2 -#define DN_TO_BDG_FWD 3 + struct ip_fw *dn_ipfw_rule; /* matching IPFW rule */ + void *dn_pf_rule; /* matching PF rule */ + int dn_dir; /* action when packet comes out. */ +#define DN_TO_IP_OUT 1 +#define DN_TO_IP_IN 2 +#define DN_TO_BDG_FWD 3 #define DN_TO_IP6_IN 4 #define DN_TO_IP6_OUT 5 - dn_key dn_output_time; /* when the pkt is due for delivery */ - struct ifnet *dn_ifp; /* interface, for ip[6]_output */ - union { - struct sockaddr_in _dn_dst; - struct sockaddr_in6 _dn_dst6 ; - } dn_dst_; + dn_key dn_output_time; /* when the pkt is due for delivery */ + struct ifnet *dn_ifp; /* interface, for ip[6]_output */ + union { + struct sockaddr_in _dn_dst; + struct sockaddr_in6 _dn_dst6; + } dn_dst_; #define dn_dst dn_dst_._dn_dst #define dn_dst6 dn_dst_._dn_dst6 - union { - struct route _dn_ro; /* route, for ip_output. MUST COPY */ - struct route_in6 _dn_ro6; /* route, for ip6_output. MUST COPY */ - } dn_ro_; + union { + struct route _dn_ro; /* route, for ip_output. MUST COPY */ + struct route_in6 _dn_ro6;/* route, for ip6_output. MUST COPY */ + } dn_ro_; #define dn_ro dn_ro_._dn_ro #define dn_ro6 dn_ro_._dn_ro6 - struct route_in6 dn_ro6_pmtu; /* for ip6_output */ - struct ifnet *dn_origifp; /* for ip6_output */ - u_int32_t dn_mtu; /* for ip6_output */ - int dn_alwaysfrag; /* for ip6_output */ - u_int32_t dn_unfragpartlen; /* for ip6_output */ - struct ip6_exthdrs dn_exthdrs; /* for ip6_output */ - int dn_flags ; /* flags, for ip[6]_output */ - int dn_client; -#define DN_CLIENT_IPFW 1 -#define DN_CLIENT_PF 2 - union { - struct ip_out_args _dn_ipoa; /* output args, for ip_output. MUST COPY */ - struct ip6_out_args _dn_ip6oa; /* output args, for ip_output. MUST COPY */ - } dn_ipoa_; + struct route_in6 dn_ro6_pmtu; /* for ip6_output */ + struct ifnet *dn_origifp; /* for ip6_output */ + u_int32_t dn_mtu; /* for ip6_output */ + int dn_alwaysfrag; /* for ip6_output */ + u_int32_t dn_unfragpartlen; /* for ip6_output */ + struct ip6_exthdrs dn_exthdrs; /* for ip6_output */ + int dn_flags; /* flags, for ip[6]_output */ + int dn_client; +#define DN_CLIENT_IPFW 1 +#define DN_CLIENT_PF 2 + union { + struct ip_out_args _dn_ipoa;/* output args, for ip_output. MUST COPY */ + struct ip6_out_args _dn_ip6oa;/* output args, for ip_output. MUST COPY */ + } dn_ipoa_; #define dn_ipoa dn_ipoa_._dn_ipoa #define dn_ip6oa dn_ipoa_._dn_ip6oa }; @@ -208,61 +208,61 @@ struct dn_pkt; /* * Overall structure of dummynet (with WF2Q+): - -In dummynet, packets are selected with the firewall rules, and passed -to two different objects: PIPE or QUEUE. - -A QUEUE is just a queue with configurable size and queue management -policy. It is also associated with a mask (to discriminate among -different flows), a weight (used to give different shares of the -bandwidth to different flows) and a "pipe", which essentially -supplies the transmit clock for all queues associated with that -pipe. - -A PIPE emulates a fixed-bandwidth link, whose bandwidth is -configurable. The "clock" for a pipe can come from either an -internal timer, or from the transmit interrupt of an interface. -A pipe is also associated with one (or more, if masks are used) -queue, where all packets for that pipe are stored. - -The bandwidth available on the pipe is shared by the queues -associated with that pipe (only one in case the packet is sent -to a PIPE) according to the WF2Q+ scheduling algorithm and the -configured weights. - -In general, incoming packets are stored in the appropriate queue, -which is then placed into one of a few heaps managed by a scheduler -to decide when the packet should be extracted. -The scheduler (a function called dummynet()) is run at every timer -tick, and grabs queues from the head of the heaps when they are -ready for processing. - -There are three data structures definining a pipe and associated queues: - + * + * In dummynet, packets are selected with the firewall rules, and passed + * to two different objects: PIPE or QUEUE. + * + * A QUEUE is just a queue with configurable size and queue management + * policy. It is also associated with a mask (to discriminate among + * different flows), a weight (used to give different shares of the + * bandwidth to different flows) and a "pipe", which essentially + * supplies the transmit clock for all queues associated with that + * pipe. + * + * A PIPE emulates a fixed-bandwidth link, whose bandwidth is + * configurable. The "clock" for a pipe can come from either an + * internal timer, or from the transmit interrupt of an interface. + * A pipe is also associated with one (or more, if masks are used) + * queue, where all packets for that pipe are stored. + * + * The bandwidth available on the pipe is shared by the queues + * associated with that pipe (only one in case the packet is sent + * to a PIPE) according to the WF2Q+ scheduling algorithm and the + * configured weights. + * + * In general, incoming packets are stored in the appropriate queue, + * which is then placed into one of a few heaps managed by a scheduler + * to decide when the packet should be extracted. + * The scheduler (a function called dummynet()) is run at every timer + * tick, and grabs queues from the head of the heaps when they are + * ready for processing. + * + * There are three data structures definining a pipe and associated queues: + * + dn_pipe, which contains the main configuration parameters related - to delay and bandwidth; + + to delay and bandwidth; + dn_flow_set, which contains WF2Q+ configuration, flow - masks, plr and RED configuration; + + masks, plr and RED configuration; + dn_flow_queue, which is the per-flow queue (containing the packets) - -Multiple dn_flow_set can be linked to the same pipe, and multiple -dn_flow_queue can be linked to the same dn_flow_set. -All data structures are linked in a linear list which is used for -housekeeping purposes. - -During configuration, we create and initialize the dn_flow_set -and dn_pipe structures (a dn_pipe also contains a dn_flow_set). - -At runtime: packets are sent to the appropriate dn_flow_set (either -WFQ ones, or the one embedded in the dn_pipe for fixed-rate flows), -which in turn dispatches them to the appropriate dn_flow_queue -(created dynamically according to the masks). - -The transmit clock for fixed rate flows (ready_event()) selects the -dn_flow_queue to be used to transmit the next packet. For WF2Q, -wfq_ready_event() extract a pipe which in turn selects the right -flow using a number of heaps defined into the pipe itself. - + + + + Multiple dn_flow_set can be linked to the same pipe, and multiple + + dn_flow_queue can be linked to the same dn_flow_set. + + All data structures are linked in a linear list which is used for + + housekeeping purposes. + + + + During configuration, we create and initialize the dn_flow_set + + and dn_pipe structures (a dn_pipe also contains a dn_flow_set). + + + + At runtime: packets are sent to the appropriate dn_flow_set (either + + WFQ ones, or the one embedded in the dn_pipe for fixed-rate flows), + + which in turn dispatches them to the appropriate dn_flow_queue + + (created dynamically according to the masks). + + + + The transmit clock for fixed rate flows (ready_event()) selects the + + dn_flow_queue to be used to transmit the next packet. For WF2Q, + + wfq_ready_event() extract a pipe which in turn selects the right + + flow using a number of heaps defined into the pipe itself. + + * */ @@ -275,37 +275,37 @@ flow using a number of heaps defined into the pipe itself. * a new flow arrives. */ struct dn_flow_queue { - struct dn_flow_queue *next ; - struct ip_flow_id id ; - - struct mbuf *head, *tail ; /* queue of packets */ - u_int len ; - u_int len_bytes ; - u_int32_t numbytes ; /* credit for transmission (dynamic queues) */ - - u_int64_t tot_pkts ; /* statistics counters */ - u_int64_t tot_bytes ; - u_int32_t drops ; - - int hash_slot ; /* debugging/diagnostic */ - - /* RED parameters */ - int avg ; /* average queue length est. (scaled) */ - int count ; /* arrivals since last RED drop */ - int random ; /* random value (scaled) */ - u_int32_t q_time ; /* start of queue idle time */ - - /* WF2Q+ support */ - struct dn_flow_set *fs ; /* parent flow set */ - int heap_pos ; /* position (index) of struct in heap */ - dn_key sched_time ; /* current time when queue enters ready_heap */ - - dn_key S,F ; /* start time, finish time */ - /* - * Setting F < S means the timestamp is invalid. We only need - * to test this when the queue is empty. - */ -} ; + struct dn_flow_queue *next; + struct ip_flow_id id; + + struct mbuf *head, *tail; /* queue of packets */ + u_int len; + u_int len_bytes; + u_int32_t numbytes; /* credit for transmission (dynamic queues) */ + + u_int64_t tot_pkts; /* statistics counters */ + u_int64_t tot_bytes; + u_int32_t drops; + + int hash_slot; /* debugging/diagnostic */ + + /* RED parameters */ + int avg; /* average queue length est. (scaled) */ + int count; /* arrivals since last RED drop */ + int random; /* random value (scaled) */ + u_int32_t q_time; /* start of queue idle time */ + + /* WF2Q+ support */ + struct dn_flow_set *fs; /* parent flow set */ + int heap_pos; /* position (index) of struct in heap */ + dn_key sched_time; /* current time when queue enters ready_heap */ + + dn_key S, F; /* start time, finish time */ + /* + * Setting F < S means the timestamp is invalid. We only need + * to test this when the queue is empty. + */ +}; /* * flow_set descriptor. Contains the "template" parameters for the @@ -320,55 +320,55 @@ struct dn_flow_queue { * latter case, the structure is located inside the struct dn_pipe). */ struct dn_flow_set { - SLIST_ENTRY(dn_flow_set) next; /* linked list in a hash slot */ + SLIST_ENTRY(dn_flow_set) next;/* linked list in a hash slot */ - u_short fs_nr ; /* flow_set number */ - u_short flags_fs; -#define DN_HAVE_FLOW_MASK 0x0001 -#define DN_IS_RED 0x0002 -#define DN_IS_GENTLE_RED 0x0004 -#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ -#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */ -#define DN_IS_PIPE 0x4000 -#define DN_IS_QUEUE 0x8000 + u_short fs_nr; /* flow_set number */ + u_short flags_fs; +#define DN_HAVE_FLOW_MASK 0x0001 +#define DN_IS_RED 0x0002 +#define DN_IS_GENTLE_RED 0x0004 +#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ +#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */ +#define DN_IS_PIPE 0x4000 +#define DN_IS_QUEUE 0x8000 - struct dn_pipe *pipe ; /* pointer to parent pipe */ - u_short parent_nr ; /* parent pipe#, 0 if local to a pipe */ + struct dn_pipe *pipe; /* pointer to parent pipe */ + u_short parent_nr; /* parent pipe#, 0 if local to a pipe */ - int weight ; /* WFQ queue weight */ - int qsize ; /* queue size in slots or bytes */ - int plr ; /* pkt loss rate (2^31-1 means 100%) */ + int weight; /* WFQ queue weight */ + int qsize; /* queue size in slots or bytes */ + int plr; /* pkt loss rate (2^31-1 means 100%) */ - struct ip_flow_id flow_mask ; + struct ip_flow_id flow_mask; - /* hash table of queues onto this flow_set */ - int rq_size ; /* number of slots */ - int rq_elements ; /* active elements */ - struct dn_flow_queue **rq; /* array of rq_size entries */ + /* hash table of queues onto this flow_set */ + int rq_size; /* number of slots */ + int rq_elements; /* active elements */ + struct dn_flow_queue **rq; /* array of rq_size entries */ - u_int32_t last_expired ; /* do not expire too frequently */ - int backlogged ; /* #active queues for this flowset */ + u_int32_t last_expired; /* do not expire too frequently */ + int backlogged; /* #active queues for this flowset */ - /* RED parameters */ + /* RED parameters */ #define SCALE_RED 16 #define SCALE(x) ( (x) << SCALE_RED ) #define SCALE_VAL(x) ( (x) >> SCALE_RED ) -#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED ) - int w_q ; /* queue weight (scaled) */ - int max_th ; /* maximum threshold for queue (scaled) */ - int min_th ; /* minimum threshold for queue (scaled) */ - int max_p ; /* maximum value for p_b (scaled) */ - u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */ - u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */ - u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */ - u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */ - u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */ - u_int lookup_depth ; /* depth of lookup table */ - int lookup_step ; /* granularity inside the lookup table */ - int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ - int avg_pkt_size ; /* medium packet size */ - int max_pkt_size ; /* max packet size */ -} ; +#define SCALE_MUL(x, y) ( ( (x) * (y) ) >> SCALE_RED ) + int w_q; /* queue weight (scaled) */ + int max_th; /* maximum threshold for queue (scaled) */ + int min_th; /* minimum threshold for queue (scaled) */ + int max_p; /* maximum value for p_b (scaled) */ + u_int c_1; /* max_p/(max_th-min_th) (scaled) */ + u_int c_2; /* max_p*min_th/(max_th-min_th) (scaled) */ + u_int c_3; /* for GRED, (1-max_p)/max_th (scaled) */ + u_int c_4; /* for GRED, 1 - 2*max_p (scaled) */ + u_int * w_q_lookup; /* lookup table for computing (1-w_q)^t */ + u_int lookup_depth; /* depth of lookup table */ + int lookup_step; /* granularity inside the lookup table */ + int lookup_weight; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ + int avg_pkt_size; /* medium packet size */ + int max_pkt_size; /* max packet size */ +}; SLIST_HEAD(dn_flow_set_head, dn_flow_set); @@ -386,35 +386,35 @@ SLIST_HEAD(dn_flow_set_head, dn_flow_set); * operations during forwarding. * */ -struct dn_pipe { /* a pipe */ - SLIST_ENTRY(dn_pipe) next; /* linked list in a hash slot */ +struct dn_pipe { /* a pipe */ + SLIST_ENTRY(dn_pipe) next;/* linked list in a hash slot */ - int pipe_nr ; /* number */ - int bandwidth; /* really, bytes/tick. */ - int delay ; /* really, ticks */ + int pipe_nr; /* number */ + int bandwidth; /* really, bytes/tick. */ + int delay; /* really, ticks */ - struct mbuf *head, *tail ; /* packets in delay line */ + struct mbuf *head, *tail; /* packets in delay line */ - /* WF2Q+ */ - struct dn_heap scheduler_heap ; /* top extract - key Finish time*/ - struct dn_heap not_eligible_heap; /* top extract- key Start time */ - struct dn_heap idle_heap ; /* random extract - key Start=Finish time */ + /* WF2Q+ */ + struct dn_heap scheduler_heap; /* top extract - key Finish time*/ + struct dn_heap not_eligible_heap; /* top extract- key Start time */ + struct dn_heap idle_heap; /* random extract - key Start=Finish time */ - dn_key V ; /* virtual time */ - int sum; /* sum of weights of all active sessions */ - int numbytes; /* bits I can transmit (more or less). */ + dn_key V; /* virtual time */ + int sum; /* sum of weights of all active sessions */ + int numbytes; /* bits I can transmit (more or less). */ - dn_key sched_time ; /* time pipe was scheduled in ready_heap */ + dn_key sched_time; /* time pipe was scheduled in ready_heap */ - /* - * When the tx clock come from an interface (if_name[0] != '\0'), its name - * is stored below, whereas the ifp is filled when the rule is configured. - */ - char if_name[IFNAMSIZ]; - struct ifnet *ifp ; - int ready ; /* set if ifp != NULL and we got a signal from it */ + /* + * When the tx clock come from an interface (if_name[0] != '\0'), its name + * is stored below, whereas the ifp is filled when the rule is configured. + */ + char if_name[IFNAMSIZ]; + struct ifnet *ifp; + int ready; /* set if ifp != NULL and we got a signal from it */ - struct dn_flow_set fs ; /* used with fixed-rate flows */ + struct dn_flow_set fs; /* used with fixed-rate flows */ }; SLIST_HEAD(dn_pipe_head, dn_pipe); @@ -423,261 +423,261 @@ SLIST_HEAD(dn_pipe_head, dn_pipe); extern uint32_t my_random(void); void ip_dn_init(void); /* called from raw_ip.c:load_ipfw() */ -typedef int ip_dn_ctl_t(struct sockopt *); /* raw_ip.c */ -typedef int ip_dn_io_t(struct mbuf *m, int pipe_nr, int dir, - struct ip_fw_args *fwa, int ); -extern ip_dn_ctl_t *ip_dn_ctl_ptr; -extern ip_dn_io_t *ip_dn_io_ptr; +typedef int ip_dn_ctl_t(struct sockopt *); /* raw_ip.c */ +typedef int ip_dn_io_t(struct mbuf *m, int pipe_nr, int dir, + struct ip_fw_args *fwa, int ); +extern ip_dn_ctl_t *ip_dn_ctl_ptr; +extern ip_dn_io_t *ip_dn_io_ptr; void dn_ipfw_rule_delete(void *); -#define DUMMYNET_LOADED (ip_dn_io_ptr != NULL) +#define DUMMYNET_LOADED (ip_dn_io_ptr != NULL) #pragma pack(4) struct dn_heap_32 { - int size ; - int elements ; - int offset ; /* XXX if > 0 this is the offset of direct ptr to obj */ - user32_addr_t p ; /* really an array of "size" entries */ -} ; + int size; + int elements; + int offset; /* XXX if > 0 this is the offset of direct ptr to obj */ + user32_addr_t p; /* really an array of "size" entries */ +}; struct dn_flow_queue_32 { - user32_addr_t next ; - struct ip_flow_id id ; - - user32_addr_t head, tail ; /* queue of packets */ - u_int len ; - u_int len_bytes ; - u_int32_t numbytes ; /* credit for transmission (dynamic queues) */ - - u_int64_t tot_pkts ; /* statistics counters */ - u_int64_t tot_bytes ; - u_int32_t drops ; - - int hash_slot ; /* debugging/diagnostic */ - - /* RED parameters */ - int avg ; /* average queue length est. (scaled) */ - int count ; /* arrivals since last RED drop */ - int random ; /* random value (scaled) */ - u_int32_t q_time ; /* start of queue idle time */ - - /* WF2Q+ support */ - user32_addr_t fs ; /* parent flow set */ - int heap_pos ; /* position (index) of struct in heap */ - dn_key sched_time ; /* current time when queue enters ready_heap */ - - dn_key S,F ; /* start time, finish time */ - /* - * Setting F < S means the timestamp is invalid. We only need - * to test this when the queue is empty. - */ -} ; + user32_addr_t next; + struct ip_flow_id id; + + user32_addr_t head, tail; /* queue of packets */ + u_int len; + u_int len_bytes; + u_int32_t numbytes; /* credit for transmission (dynamic queues) */ + + u_int64_t tot_pkts; /* statistics counters */ + u_int64_t tot_bytes; + u_int32_t drops; + + int hash_slot; /* debugging/diagnostic */ + + /* RED parameters */ + int avg; /* average queue length est. (scaled) */ + int count; /* arrivals since last RED drop */ + int random; /* random value (scaled) */ + u_int32_t q_time; /* start of queue idle time */ + + /* WF2Q+ support */ + user32_addr_t fs; /* parent flow set */ + int heap_pos; /* position (index) of struct in heap */ + dn_key sched_time; /* current time when queue enters ready_heap */ + + dn_key S, F; /* start time, finish time */ + /* + * Setting F < S means the timestamp is invalid. We only need + * to test this when the queue is empty. + */ +}; struct dn_flow_set_32 { - user32_addr_t next; /* next flow set in all_flow_sets list */ - - u_short fs_nr ; /* flow_set number */ - u_short flags_fs; -#define DN_HAVE_FLOW_MASK 0x0001 -#define DN_IS_RED 0x0002 -#define DN_IS_GENTLE_RED 0x0004 -#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ -#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */ -#define DN_IS_PIPE 0x4000 -#define DN_IS_QUEUE 0x8000 - - user32_addr_t pipe ; /* pointer to parent pipe */ - u_short parent_nr ; /* parent pipe#, 0 if local to a pipe */ - - int weight ; /* WFQ queue weight */ - int qsize ; /* queue size in slots or bytes */ - int plr ; /* pkt loss rate (2^31-1 means 100%) */ - - struct ip_flow_id flow_mask ; - - /* hash table of queues onto this flow_set */ - int rq_size ; /* number of slots */ - int rq_elements ; /* active elements */ - user32_addr_t rq; /* array of rq_size entries */ - - u_int32_t last_expired ; /* do not expire too frequently */ - int backlogged ; /* #active queues for this flowset */ - + user32_addr_t next;/* next flow set in all_flow_sets list */ + + u_short fs_nr; /* flow_set number */ + u_short flags_fs; +#define DN_HAVE_FLOW_MASK 0x0001 +#define DN_IS_RED 0x0002 +#define DN_IS_GENTLE_RED 0x0004 +#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ +#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */ +#define DN_IS_PIPE 0x4000 +#define DN_IS_QUEUE 0x8000 + + user32_addr_t pipe; /* pointer to parent pipe */ + u_short parent_nr; /* parent pipe#, 0 if local to a pipe */ + + int weight; /* WFQ queue weight */ + int qsize; /* queue size in slots or bytes */ + int plr; /* pkt loss rate (2^31-1 means 100%) */ + + struct ip_flow_id flow_mask; + + /* hash table of queues onto this flow_set */ + int rq_size; /* number of slots */ + int rq_elements; /* active elements */ + user32_addr_t rq; /* array of rq_size entries */ + + u_int32_t last_expired; /* do not expire too frequently */ + int backlogged; /* #active queues for this flowset */ + /* RED parameters */ #define SCALE_RED 16 #define SCALE(x) ( (x) << SCALE_RED ) #define SCALE_VAL(x) ( (x) >> SCALE_RED ) -#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED ) - int w_q ; /* queue weight (scaled) */ - int max_th ; /* maximum threshold for queue (scaled) */ - int min_th ; /* minimum threshold for queue (scaled) */ - int max_p ; /* maximum value for p_b (scaled) */ - u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */ - u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */ - u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */ - u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */ - user32_addr_t w_q_lookup ; /* lookup table for computing (1-w_q)^t */ - u_int lookup_depth ; /* depth of lookup table */ - int lookup_step ; /* granularity inside the lookup table */ - int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ - int avg_pkt_size ; /* medium packet size */ - int max_pkt_size ; /* max packet size */ -} ; - -struct dn_pipe_32 { /* a pipe */ - user32_addr_t next ; - - int pipe_nr ; /* number */ - int bandwidth; /* really, bytes/tick. */ - int delay ; /* really, ticks */ - - user32_addr_t head, tail ; /* packets in delay line */ - - /* WF2Q+ */ - struct dn_heap_32 scheduler_heap ; /* top extract - key Finish time*/ - struct dn_heap_32 not_eligible_heap; /* top extract- key Start time */ - struct dn_heap_32 idle_heap ; /* random extract - key Start=Finish time */ - - dn_key V ; /* virtual time */ - int sum; /* sum of weights of all active sessions */ - int numbytes; /* bits I can transmit (more or less). */ - - dn_key sched_time ; /* time pipe was scheduled in ready_heap */ - - /* - * When the tx clock come from an interface (if_name[0] != '\0'), its name - * is stored below, whereas the ifp is filled when the rule is configured. - */ - char if_name[IFNAMSIZ]; - user32_addr_t ifp ; - int ready ; /* set if ifp != NULL and we got a signal from it */ - - struct dn_flow_set_32 fs ; /* used with fixed-rate flows */ +#define SCALE_MUL(x, y) ( ( (x) * (y) ) >> SCALE_RED ) + int w_q; /* queue weight (scaled) */ + int max_th; /* maximum threshold for queue (scaled) */ + int min_th; /* minimum threshold for queue (scaled) */ + int max_p; /* maximum value for p_b (scaled) */ + u_int c_1; /* max_p/(max_th-min_th) (scaled) */ + u_int c_2; /* max_p*min_th/(max_th-min_th) (scaled) */ + u_int c_3; /* for GRED, (1-max_p)/max_th (scaled) */ + u_int c_4; /* for GRED, 1 - 2*max_p (scaled) */ + user32_addr_t w_q_lookup; /* lookup table for computing (1-w_q)^t */ + u_int lookup_depth; /* depth of lookup table */ + int lookup_step; /* granularity inside the lookup table */ + int lookup_weight; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ + int avg_pkt_size; /* medium packet size */ + int max_pkt_size; /* max packet size */ +}; + +struct dn_pipe_32 { /* a pipe */ + user32_addr_t next; + + int pipe_nr; /* number */ + int bandwidth; /* really, bytes/tick. */ + int delay; /* really, ticks */ + + user32_addr_t head, tail; /* packets in delay line */ + + /* WF2Q+ */ + struct dn_heap_32 scheduler_heap; /* top extract - key Finish time*/ + struct dn_heap_32 not_eligible_heap; /* top extract- key Start time */ + struct dn_heap_32 idle_heap; /* random extract - key Start=Finish time */ + + dn_key V; /* virtual time */ + int sum; /* sum of weights of all active sessions */ + int numbytes; /* bits I can transmit (more or less). */ + + dn_key sched_time; /* time pipe was scheduled in ready_heap */ + + /* + * When the tx clock come from an interface (if_name[0] != '\0'), its name + * is stored below, whereas the ifp is filled when the rule is configured. + */ + char if_name[IFNAMSIZ]; + user32_addr_t ifp; + int ready; /* set if ifp != NULL and we got a signal from it */ + + struct dn_flow_set_32 fs; /* used with fixed-rate flows */ }; #pragma pack() struct dn_heap_64 { - int size ; - int elements ; - int offset ; /* XXX if > 0 this is the offset of direct ptr to obj */ - user64_addr_t p ; /* really an array of "size" entries */ -} ; + int size; + int elements; + int offset; /* XXX if > 0 this is the offset of direct ptr to obj */ + user64_addr_t p; /* really an array of "size" entries */ +}; struct dn_flow_queue_64 { - user64_addr_t next ; - struct ip_flow_id id ; - - user64_addr_t head, tail ; /* queue of packets */ - u_int len ; - u_int len_bytes ; - u_int32_t numbytes ; /* credit for transmission (dynamic queues) */ - - u_int64_t tot_pkts ; /* statistics counters */ - u_int64_t tot_bytes ; - u_int32_t drops ; - - int hash_slot ; /* debugging/diagnostic */ - - /* RED parameters */ - int avg ; /* average queue length est. (scaled) */ - int count ; /* arrivals since last RED drop */ - int random ; /* random value (scaled) */ - u_int32_t q_time ; /* start of queue idle time */ - - /* WF2Q+ support */ - user64_addr_t fs ; /* parent flow set */ - int heap_pos ; /* position (index) of struct in heap */ - dn_key sched_time ; /* current time when queue enters ready_heap */ - - dn_key S,F ; /* start time, finish time */ - /* - * Setting F < S means the timestamp is invalid. We only need - * to test this when the queue is empty. - */ -} ; + user64_addr_t next; + struct ip_flow_id id; + + user64_addr_t head, tail; /* queue of packets */ + u_int len; + u_int len_bytes; + u_int32_t numbytes; /* credit for transmission (dynamic queues) */ + + u_int64_t tot_pkts; /* statistics counters */ + u_int64_t tot_bytes; + u_int32_t drops; + + int hash_slot; /* debugging/diagnostic */ + + /* RED parameters */ + int avg; /* average queue length est. (scaled) */ + int count; /* arrivals since last RED drop */ + int random; /* random value (scaled) */ + u_int32_t q_time; /* start of queue idle time */ + + /* WF2Q+ support */ + user64_addr_t fs; /* parent flow set */ + int heap_pos; /* position (index) of struct in heap */ + dn_key sched_time; /* current time when queue enters ready_heap */ + + dn_key S, F; /* start time, finish time */ + /* + * Setting F < S means the timestamp is invalid. We only need + * to test this when the queue is empty. + */ +}; struct dn_flow_set_64 { - user64_addr_t next; /* next flow set in all_flow_sets list */ - - u_short fs_nr ; /* flow_set number */ - u_short flags_fs; -#define DN_HAVE_FLOW_MASK 0x0001 -#define DN_IS_RED 0x0002 -#define DN_IS_GENTLE_RED 0x0004 -#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ -#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */ -#define DN_IS_PIPE 0x4000 -#define DN_IS_QUEUE 0x8000 - - user64_addr_t pipe ; /* pointer to parent pipe */ - u_short parent_nr ; /* parent pipe#, 0 if local to a pipe */ - - int weight ; /* WFQ queue weight */ - int qsize ; /* queue size in slots or bytes */ - int plr ; /* pkt loss rate (2^31-1 means 100%) */ - - struct ip_flow_id flow_mask ; - - /* hash table of queues onto this flow_set */ - int rq_size ; /* number of slots */ - int rq_elements ; /* active elements */ - user64_addr_t rq; /* array of rq_size entries */ - - u_int32_t last_expired ; /* do not expire too frequently */ - int backlogged ; /* #active queues for this flowset */ - + user64_addr_t next; /* next flow set in all_flow_sets list */ + + u_short fs_nr; /* flow_set number */ + u_short flags_fs; +#define DN_HAVE_FLOW_MASK 0x0001 +#define DN_IS_RED 0x0002 +#define DN_IS_GENTLE_RED 0x0004 +#define DN_QSIZE_IS_BYTES 0x0008 /* queue size is measured in bytes */ +#define DN_NOERROR 0x0010 /* do not report ENOBUFS on drops */ +#define DN_IS_PIPE 0x4000 +#define DN_IS_QUEUE 0x8000 + + user64_addr_t pipe; /* pointer to parent pipe */ + u_short parent_nr; /* parent pipe#, 0 if local to a pipe */ + + int weight; /* WFQ queue weight */ + int qsize; /* queue size in slots or bytes */ + int plr; /* pkt loss rate (2^31-1 means 100%) */ + + struct ip_flow_id flow_mask; + + /* hash table of queues onto this flow_set */ + int rq_size; /* number of slots */ + int rq_elements; /* active elements */ + user64_addr_t rq; /* array of rq_size entries */ + + u_int32_t last_expired; /* do not expire too frequently */ + int backlogged; /* #active queues for this flowset */ + /* RED parameters */ #define SCALE_RED 16 #define SCALE(x) ( (x) << SCALE_RED ) #define SCALE_VAL(x) ( (x) >> SCALE_RED ) -#define SCALE_MUL(x,y) ( ( (x) * (y) ) >> SCALE_RED ) - int w_q ; /* queue weight (scaled) */ - int max_th ; /* maximum threshold for queue (scaled) */ - int min_th ; /* minimum threshold for queue (scaled) */ - int max_p ; /* maximum value for p_b (scaled) */ - u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */ - u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */ - u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */ - u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */ - user64_addr_t w_q_lookup ; /* lookup table for computing (1-w_q)^t */ - u_int lookup_depth ; /* depth of lookup table */ - int lookup_step ; /* granularity inside the lookup table */ - int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ - int avg_pkt_size ; /* medium packet size */ - int max_pkt_size ; /* max packet size */ -} ; - -struct dn_pipe_64 { /* a pipe */ - user64_addr_t next ; - - int pipe_nr ; /* number */ - int bandwidth; /* really, bytes/tick. */ - int delay ; /* really, ticks */ - - user64_addr_t head, tail ; /* packets in delay line */ - - /* WF2Q+ */ - struct dn_heap_64 scheduler_heap ; /* top extract - key Finish time*/ - struct dn_heap_64 not_eligible_heap; /* top extract- key Start time */ - struct dn_heap_64 idle_heap ; /* random extract - key Start=Finish time */ - - dn_key V ; /* virtual time */ - int sum; /* sum of weights of all active sessions */ - int numbytes; /* bits I can transmit (more or less). */ - - dn_key sched_time ; /* time pipe was scheduled in ready_heap */ - - /* - * When the tx clock come from an interface (if_name[0] != '\0'), its name - * is stored below, whereas the ifp is filled when the rule is configured. - */ - char if_name[IFNAMSIZ]; - user64_addr_t ifp ; - int ready ; /* set if ifp != NULL and we got a signal from it */ - - struct dn_flow_set_64 fs ; /* used with fixed-rate flows */ +#define SCALE_MUL(x, y) ( ( (x) * (y) ) >> SCALE_RED ) + int w_q; /* queue weight (scaled) */ + int max_th; /* maximum threshold for queue (scaled) */ + int min_th; /* minimum threshold for queue (scaled) */ + int max_p; /* maximum value for p_b (scaled) */ + u_int c_1; /* max_p/(max_th-min_th) (scaled) */ + u_int c_2; /* max_p*min_th/(max_th-min_th) (scaled) */ + u_int c_3; /* for GRED, (1-max_p)/max_th (scaled) */ + u_int c_4; /* for GRED, 1 - 2*max_p (scaled) */ + user64_addr_t w_q_lookup; /* lookup table for computing (1-w_q)^t */ + u_int lookup_depth; /* depth of lookup table */ + int lookup_step; /* granularity inside the lookup table */ + int lookup_weight; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */ + int avg_pkt_size; /* medium packet size */ + int max_pkt_size; /* max packet size */ +}; + +struct dn_pipe_64 { /* a pipe */ + user64_addr_t next; + + int pipe_nr; /* number */ + int bandwidth; /* really, bytes/tick. */ + int delay; /* really, ticks */ + + user64_addr_t head, tail; /* packets in delay line */ + + /* WF2Q+ */ + struct dn_heap_64 scheduler_heap; /* top extract - key Finish time*/ + struct dn_heap_64 not_eligible_heap; /* top extract- key Start time */ + struct dn_heap_64 idle_heap; /* random extract - key Start=Finish time */ + + dn_key V; /* virtual time */ + int sum; /* sum of weights of all active sessions */ + int numbytes; /* bits I can transmit (more or less). */ + + dn_key sched_time; /* time pipe was scheduled in ready_heap */ + + /* + * When the tx clock come from an interface (if_name[0] != '\0'), its name + * is stored below, whereas the ifp is filled when the rule is configured. + */ + char if_name[IFNAMSIZ]; + user64_addr_t ifp; + int ready; /* set if ifp != NULL and we got a signal from it */ + + struct dn_flow_set_64 fs; /* used with fixed-rate flows */ }; /* @@ -688,12 +688,13 @@ static __inline struct ip_fw * ip_dn_claim_rule(struct mbuf *m) { struct m_tag *mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_DUMMYNET, NULL); + KERNEL_TAG_TYPE_DUMMYNET, NULL); if (mtag != NULL) { mtag->m_tag_type = KERNEL_TAG_TYPE_NONE; - return (((struct dn_pkt_tag *)(mtag+1))->dn_ipfw_rule); - } else - return (NULL); + return ((struct dn_pkt_tag *)(mtag + 1))->dn_ipfw_rule; + } else { + return NULL; + } } #include @@ -732,8 +733,8 @@ struct dummynet_event { } dn_event; }; -#define dn_event_pipe_config dn_event._dnev_pipe_config -#define dn_event_rule_config dn_event._dnev_rule_config +#define dn_event_pipe_config dn_event._dnev_pipe_config +#define dn_event_rule_config dn_event._dnev_rule_config extern void dummynet_event_enqueue_nwk_wq_entry(struct dummynet_event *); diff --git a/bsd/netinet/ip_ecn.c b/bsd/netinet/ip_ecn.c index da773e9e7..c7023fa4d 100644 --- a/bsd/netinet/ip_ecn.c +++ b/bsd/netinet/ip_ecn.c @@ -83,17 +83,18 @@ void ip_ecn_ingress(int mode, u_int8_t *outer, const u_int8_t *inner) { - if (!outer || !inner) + if (!outer || !inner) { panic("NULL pointer passed to ip_ecn_ingress"); + } *outer = *inner; switch (mode) { - case ECN_NORMAL: /* ECN normal mode, copy flags */ + case ECN_NORMAL: /* ECN normal mode, copy flags */ break; - case ECN_COMPATIBILITY: /* ECN compatibility mode */ + case ECN_COMPATIBILITY: /* ECN compatibility mode */ *outer &= ~IPTOS_ECN_MASK; break; - case ECN_NOCARE: /* no consideration to ECN */ + case ECN_NOCARE: /* no consideration to ECN */ break; } } @@ -104,8 +105,9 @@ ip_ecn_ingress(int mode, u_int8_t *outer, const u_int8_t *inner) int ip_ecn_egress(int mode, const u_int8_t *outer, u_int8_t *inner) { - if (!outer || !inner) + if (!outer || !inner) { panic("NULL pointer passed to ip_ecn_egress"); + } switch (mode) { /* Process ECN for both normal and compatibility modes */ @@ -115,14 +117,14 @@ ip_ecn_egress(int mode, const u_int8_t *outer, u_int8_t *inner) ((*inner & IPTOS_ECN_MASK) != IPTOS_ECN_NOTECT)) { *inner |= IPTOS_ECN_CE; } else if ((*outer & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1 && - (*inner & IPTOS_ECN_MASK) == IPTOS_ECN_ECT0) { + (*inner & IPTOS_ECN_MASK) == IPTOS_ECN_ECT0) { *inner = *outer; } break; - case ECN_NOCARE: /* no consideration to ECN */ + case ECN_NOCARE: /* no consideration to ECN */ break; } - return (1); + return 1; } #if INET6 @@ -131,8 +133,9 @@ ip6_ecn_ingress(int mode, u_int32_t *outer, const u_int32_t *inner) { u_int8_t outer8, inner8; - if (!outer || !inner) + if (!outer || !inner) { panic("NULL pointer passed to ip6_ecn_ingress"); + } inner8 = (ntohl(*inner) >> 20) & 0xff; ip_ecn_ingress(mode, &outer8, &inner8); @@ -145,17 +148,18 @@ ip6_ecn_egress(int mode, const u_int32_t *outer, u_int32_t *inner) { u_int8_t outer8, inner8; - if (!outer || !inner) + if (!outer || !inner) { panic("NULL pointer passed to ip6_ecn_egress"); + } outer8 = (ntohl(*outer) >> 20) & 0xff; inner8 = (ntohl(*inner) >> 20) & 0xff; if (ip_ecn_egress(mode, &outer8, &inner8) == 0) { - return (0); + return 0; } *inner &= ~htonl(0xff << 20); *inner |= htonl((u_int32_t)inner8 << 20); - return (1); + return 1; } /* @@ -167,8 +171,9 @@ ip46_ecn_ingress(int mode, u_int32_t *outer, const u_int8_t *tos) { u_int8_t outer8; - if (!outer || !tos) + if (!outer || !tos) { panic("NULL pointer passed to ip46_ecn_ingress"); + } ip_ecn_ingress(mode, &outer8, tos); *outer &= ~htonl(0xff << 20); @@ -184,8 +189,9 @@ ip46_ecn_egress(int mode, const u_int32_t *outer, u_int8_t *tos) { u_int8_t outer8; - if (!outer || !tos) + if (!outer || !tos) { panic("NULL pointer passed to ip46_ecn_egress"); + } outer8 = (ntohl(*outer) >> 20) & 0xff; return ip_ecn_egress(mode, &outer8, tos); @@ -200,8 +206,9 @@ ip64_ecn_ingress(int mode, u_int8_t *outer, const u_int32_t *inner) { u_int8_t inner8; - if (!outer || ! inner) + if (!outer || !inner) { panic("NULL pointer passed to ip64_ecn_ingress"); + } inner8 = (ntohl(*inner) >> 20) & 0xff; ip_ecn_ingress(mode, outer, &inner8); @@ -216,17 +223,18 @@ ip64_ecn_egress(int mode, const u_int8_t *outer, u_int32_t *inner) { u_int8_t inner8; - if (!outer || !inner) + if (!outer || !inner) { panic("NULL pointer passed to ip64_ecn_egress"); + } inner8 = (ntohl(*inner) >> 20) & 0xff; if (ip_ecn_egress(mode, outer, &inner8) == 0) { - return (0); + return 0; } *inner &= ~htonl(0xff << 20); *inner |= htonl((u_int32_t)inner8 << 20); - return (1); + return 1; } #endif diff --git a/bsd/netinet/ip_ecn.h b/bsd/netinet/ip_ecn.h index 959a8e24d..846f47860 100644 --- a/bsd/netinet/ip_ecn.h +++ b/bsd/netinet/ip_ecn.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013, 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -61,9 +61,9 @@ #include #ifdef BSD_KERNEL_PRIVATE -#define ECN_NORMAL 1 /* ECN normal mode */ -#define ECN_COMPATIBILITY 0 /* ECN comptability mode */ -#define ECN_NOCARE (-1) /* Ignore ECN. Use caution with this mode. */ +#define ECN_NORMAL 1 /* ECN normal mode */ +#define ECN_COMPATIBILITY 0 /* ECN comptability mode */ +#define ECN_NOCARE (-1) /* Ignore ECN. Use caution with this mode. */ extern void ip_ecn_ingress(int, u_int8_t *, const u_int8_t *); extern int ip_ecn_egress(int, const u_int8_t *, u_int8_t *); diff --git a/bsd/netinet/ip_encap.c b/bsd/netinet/ip_encap.c index 5276504a9..02c4e8141 100644 --- a/bsd/netinet/ip_encap.c +++ b/bsd/netinet/ip_encap.c @@ -119,10 +119,10 @@ MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure"); #endif static void encap_init(struct protosw *, struct domain *); -static void encap_add(struct encaptab *); +static void encap_add_locked(struct encaptab *); static int mask_match(const struct encaptab *, const struct sockaddr *, - const struct sockaddr *); -static void encap_fillarg(struct mbuf *, const struct encaptab *); + const struct sockaddr *); +static void encap_fillarg(struct mbuf *, void *arg); #ifndef LIST_HEAD_INITIALIZER /* rely upon BSS initialization */ @@ -131,17 +131,34 @@ LIST_HEAD(, encaptab) encaptab; LIST_HEAD(, encaptab) encaptab = LIST_HEAD_INITIALIZER(&encaptab); #endif +decl_lck_rw_data(static, encaptab_lock); + static void encap_init(struct protosw *pp, struct domain *dp) { #pragma unused(dp) static int encap_initialized = 0; + lck_grp_attr_t *encaptab_grp_attrib = NULL; + lck_attr_t *encaptab_lck_attrib = NULL; + lck_grp_t *encaptab_lck_group = NULL; - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); /* This gets called by more than one protocols, so initialize once */ - if (encap_initialized) + if (encap_initialized) { return; + } + + encaptab_grp_attrib = lck_grp_attr_alloc_init(); + encaptab_lck_group = lck_grp_alloc_init("encaptab lock", encaptab_grp_attrib); + lck_grp_attr_free(encaptab_grp_attrib); + + encaptab_lck_attrib = lck_attr_alloc_init(); + lck_rw_init(&encaptab_lock, encaptab_lck_group, encaptab_lck_attrib); + + lck_grp_free(encaptab_lck_group); + lck_attr_free(encaptab_lck_attrib); + encap_initialized = 1; #if 0 /* @@ -177,6 +194,7 @@ encap4_input(struct mbuf *m, int off) const struct protosw *psw; struct encaptab *ep, *match; int prio, matchprio; + void *match_arg = NULL; #ifndef __APPLE__ va_start(ap, m); @@ -204,14 +222,18 @@ encap4_input(struct mbuf *m, int off) match = NULL; matchprio = 0; + + lck_rw_lock_shared(&encaptab_lock); for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) { - if (ep->af != AF_INET) + if (ep->af != AF_INET) { continue; - if (ep->proto >= 0 && ep->proto != proto) + } + if (ep->proto >= 0 && ep->proto != proto) { continue; - if (ep->func) + } + if (ep->func) { prio = (*ep->func)(m, off, proto, ep->arg); - else { + } else { /* * it's inbound traffic, we need to match in reverse * order @@ -238,22 +260,26 @@ encap4_input(struct mbuf *m, int off) * to get the best match - the search takes O(n) for * n attachments (i.e. interfaces). */ - if (prio <= 0) + if (prio <= 0) { continue; + } if (prio > matchprio) { matchprio = prio; match = ep; + psw = (const struct protosw *)match->psw; + match_arg = ep->arg; } } + lck_rw_unlock_shared(&encaptab_lock); if (match) { /* found a match, "match" has the best one */ - psw = (const struct protosw *)match->psw; if (psw && psw->pr_input) { - encap_fillarg(m, match); + encap_fillarg(m, match_arg); (*psw->pr_input)(m, off); - } else + } else { m_freem(m); + } return; } @@ -272,6 +298,7 @@ encap6_input(struct mbuf **mp, int *offp, int proto) const struct ip6protosw *psw; struct encaptab *ep, *match; int prio, matchprio; + void *match_arg = NULL; /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); @@ -288,14 +315,18 @@ encap6_input(struct mbuf **mp, int *offp, int proto) match = NULL; matchprio = 0; + + lck_rw_lock_shared(&encaptab_lock); for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) { - if (ep->af != AF_INET6) + if (ep->af != AF_INET6) { continue; - if (ep->proto >= 0 && ep->proto != proto) + } + if (ep->proto >= 0 && ep->proto != proto) { continue; - if (ep->func) + } + if (ep->func) { prio = (*ep->func)(m, *offp, proto, ep->arg); - else { + } else { /* * it's inbound traffic, we need to match in reverse * order @@ -305,19 +336,22 @@ encap6_input(struct mbuf **mp, int *offp, int proto) } /* see encap4_input() for issues here */ - if (prio <= 0) + if (prio <= 0) { continue; + } if (prio > matchprio) { matchprio = prio; match = ep; + psw = (const struct ip6protosw *)match->psw; + match_arg = ep->arg; } } + lck_rw_unlock_shared(&encaptab_lock); if (match) { /* found a match */ - psw = (const struct ip6protosw *)match->psw; if (psw && psw->pr_input) { - encap_fillarg(m, match); + encap_fillarg(m, match_arg); return (*psw->pr_input)(mp, offp, proto); } else { m_freem(m); @@ -331,8 +365,9 @@ encap6_input(struct mbuf **mp, int *offp, int proto) #endif static void -encap_add(struct encaptab *ep) +encap_add_locked(struct encaptab *ep) { + LCK_RW_ASSERT(&encaptab_lock, LCK_RW_ASSERT_EXCLUSIVE); LIST_INSERT_HEAD(&encaptab, ep, chain); } @@ -343,14 +378,15 @@ encap_add(struct encaptab *ep) */ const struct encaptab * encap_attach(int af, int proto, const struct sockaddr *sp, - const struct sockaddr *sm, const struct sockaddr *dp, - const struct sockaddr *dm, const struct protosw *psw, void *arg) + const struct sockaddr *sm, const struct sockaddr *dp, + const struct sockaddr *dm, const struct protosw *psw, void *arg) { - struct encaptab *ep; + struct encaptab *ep = NULL; + struct encaptab *new_ep = NULL; int error; /* sanity check on args */ - if (sp->sa_len > sizeof(ep->src) || dp->sa_len > sizeof(ep->dst)) { + if (sp->sa_len > sizeof(new_ep->src) || dp->sa_len > sizeof(new_ep->dst)) { error = EINVAL; goto fail; } @@ -363,53 +399,64 @@ encap_attach(int af, int proto, const struct sockaddr *sp, goto fail; } + new_ep = _MALLOC(sizeof(*new_ep), M_NETADDR, M_WAITOK | M_ZERO); + if (new_ep == NULL) { + error = ENOBUFS; + goto fail; + } + /* check if anyone have already attached with exactly same config */ + lck_rw_lock_exclusive(&encaptab_lock); for (ep = LIST_FIRST(&encaptab); ep; ep = LIST_NEXT(ep, chain)) { - if (ep->af != af) + if (ep->af != af) { continue; - if (ep->proto != proto) + } + if (ep->proto != proto) { continue; + } if (ep->src.ss_len != sp->sa_len || bcmp(&ep->src, sp, sp->sa_len) != 0 || - bcmp(&ep->srcmask, sm, sp->sa_len) != 0) + bcmp(&ep->srcmask, sm, sp->sa_len) != 0) { continue; + } if (ep->dst.ss_len != dp->sa_len || bcmp(&ep->dst, dp, dp->sa_len) != 0 || - bcmp(&ep->dstmask, dm, dp->sa_len) != 0) + bcmp(&ep->dstmask, dm, dp->sa_len) != 0) { continue; + } error = EEXIST; - goto fail; - } - - ep = _MALLOC(sizeof(*ep), M_NETADDR, M_WAITOK | M_ZERO); /* XXX */ - if (ep == NULL) { - error = ENOBUFS; - goto fail; + goto fail_locked; } - ep->af = af; - ep->proto = proto; - bcopy(sp, &ep->src, sp->sa_len); - bcopy(sm, &ep->srcmask, sp->sa_len); - bcopy(dp, &ep->dst, dp->sa_len); - bcopy(dm, &ep->dstmask, dp->sa_len); - ep->psw = psw; - ep->arg = arg; + new_ep->af = af; + new_ep->proto = proto; + bcopy(sp, &new_ep->src, sp->sa_len); + bcopy(sm, &new_ep->srcmask, sp->sa_len); + bcopy(dp, &new_ep->dst, dp->sa_len); + bcopy(dm, &new_ep->dstmask, dp->sa_len); + new_ep->psw = psw; + new_ep->arg = arg; - encap_add(ep); + encap_add_locked(new_ep); + lck_rw_unlock_exclusive(&encaptab_lock); error = 0; - return ep; + return new_ep; +fail_locked: + lck_rw_unlock_exclusive(&encaptab_lock); + if (new_ep != NULL) { + _FREE(new_ep, M_NETADDR); + } fail: return NULL; } const struct encaptab * encap_attach_func( int af, int proto, - int (*func)(const struct mbuf *, int, int, void *), - const struct protosw *psw, void *arg) + int (*func)(const struct mbuf *, int, int, void *), + const struct protosw *psw, void *arg) { struct encaptab *ep; int error; @@ -432,7 +479,9 @@ encap_attach_func( int af, int proto, ep->psw = psw; ep->arg = arg; - encap_add(ep); + lck_rw_lock_exclusive(&encaptab_lock); + encap_add_locked(ep); + lck_rw_unlock_exclusive(&encaptab_lock); error = 0; return ep; @@ -447,20 +496,23 @@ encap_detach(const struct encaptab *cookie) const struct encaptab *ep = cookie; struct encaptab *p; + lck_rw_lock_exclusive(&encaptab_lock); for (p = LIST_FIRST(&encaptab); p; p = LIST_NEXT(p, chain)) { if (p == ep) { LIST_REMOVE(p, chain); - _FREE(p, M_NETADDR); /*XXX*/ + lck_rw_unlock_exclusive(&encaptab_lock); + _FREE(p, M_NETADDR); /*XXX*/ return 0; } } + lck_rw_unlock_exclusive(&encaptab_lock); return EINVAL; } static int mask_match(const struct encaptab *ep, const struct sockaddr *sp, - const struct sockaddr *dp) + const struct sockaddr *dp) { struct sockaddr_storage s; struct sockaddr_storage d; @@ -469,19 +521,22 @@ mask_match(const struct encaptab *ep, const struct sockaddr *sp, u_int8_t *r; int matchlen; - if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d)) + if (sp->sa_len > sizeof(s) || dp->sa_len > sizeof(d)) { return 0; - if (sp->sa_family != ep->af || dp->sa_family != ep->af) + } + if (sp->sa_family != ep->af || dp->sa_family != ep->af) { return 0; - if (sp->sa_len != ep->src.ss_len || dp->sa_len != ep->dst.ss_len) + } + if (sp->sa_len != ep->src.ss_len || dp->sa_len != ep->dst.ss_len) { return 0; + } matchlen = 0; p = (const u_int8_t *)sp; q = (const u_int8_t *)&ep->srcmask; r = (u_int8_t *)&s; - for (i = 0 ; i < sp->sa_len; i++) { + for (i = 0; i < sp->sa_len; i++) { r[i] = p[i] & q[i]; /* XXX estimate */ matchlen += (q[i] ? 8 : 0); @@ -490,7 +545,7 @@ mask_match(const struct encaptab *ep, const struct sockaddr *sp, p = (const u_int8_t *)dp; q = (const u_int8_t *)&ep->dstmask; r = (u_int8_t *)&d; - for (i = 0 ; i < dp->sa_len; i++) { + for (i = 0; i < dp->sa_len; i++) { r[i] = p[i] & q[i]; /* XXX rough estimate */ matchlen += (q[i] ? 8 : 0); @@ -505,28 +560,29 @@ mask_match(const struct encaptab *ep, const struct sockaddr *sp, if (bcmp(&s, &ep->src, ep->src.ss_len) == 0 && bcmp(&d, &ep->dst, ep->dst.ss_len) == 0) { return matchlen; - } else + } else { return 0; + } } struct encaptabtag { - void* *arg; + void* *arg; }; static void encap_fillarg( struct mbuf *m, - const struct encaptab *ep) + void *arg) { - struct m_tag *tag; + struct m_tag *tag; struct encaptabtag *et; - + tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_ENCAP, - sizeof(struct encaptabtag), M_WAITOK, m); - + sizeof(struct encaptabtag), M_WAITOK, m); + if (tag != NULL) { et = (struct encaptabtag*)(tag + 1); - et->arg = ep->arg; + et->arg = arg; m_tag_prepend(m, tag); } } @@ -534,16 +590,16 @@ encap_fillarg( void * encap_getarg(struct mbuf *m) { - struct m_tag *tag; + struct m_tag *tag; struct encaptabtag *et; void *p = NULL; - + tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_ENCAP, NULL); if (tag) { et = (struct encaptabtag*)(tag + 1); p = et->arg; m_tag_delete(m, tag); } - + return p; } diff --git a/bsd/netinet/ip_encap.h b/bsd/netinet/ip_encap.h index 34f39d29d..3db5269fb 100644 --- a/bsd/netinet/ip_encap.h +++ b/bsd/netinet/ip_encap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $KAME: ip_encap.h,v 1.6 2000/03/06 04:34:21 itojun Exp $ */ @@ -63,14 +63,14 @@ struct encaptab { LIST_ENTRY(encaptab) chain; int af; - int proto; /* -1: don't care, I'll check myself */ - struct sockaddr_storage src; /* my addr */ + int proto; /* -1: don't care, I'll check myself */ + struct sockaddr_storage src; /* my addr */ struct sockaddr_storage srcmask; - struct sockaddr_storage dst; /* remote addr */ + struct sockaddr_storage dst; /* remote addr */ struct sockaddr_storage dstmask; int (*func)(const struct mbuf *, int, int, void *); - const struct protosw *psw; /* only pr_input will be used */ - void *arg; /* passed via m->m_pkthdr.aux */ + const struct protosw *psw; /* only pr_input will be used */ + void *arg; /* passed via m->m_pkthdr.aux */ }; struct protosw; @@ -78,18 +78,18 @@ struct ip6protosw; struct domain; __BEGIN_DECLS -void encap4_init(struct protosw *, struct domain *); -void encap6_init(struct ip6protosw *, struct domain *); -void encap4_input(struct mbuf *, int); -int encap6_input(struct mbuf **, int *, int); +void encap4_init(struct protosw *, struct domain *); +void encap6_init(struct ip6protosw *, struct domain *); +void encap4_input(struct mbuf *, int); +int encap6_input(struct mbuf **, int *, int); const struct encaptab *encap_attach(int, int, const struct sockaddr *, - const struct sockaddr *, const struct sockaddr *, - const struct sockaddr *, const struct protosw *, void *); + const struct sockaddr *, const struct sockaddr *, + const struct sockaddr *, const struct protosw *, void *); const struct encaptab *encap_attach_func(int, int, - int (*)(const struct mbuf *, int, int, void *), - const struct protosw *, void *); -int encap_detach(const struct encaptab *); -void *encap_getarg(struct mbuf *); + int (*)(const struct mbuf *, int, int, void *), + const struct protosw *, void *); +int encap_detach(const struct encaptab *); +void *encap_getarg(struct mbuf *); __END_DECLS #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/ip_flowid.h b/bsd/netinet/ip_flowid.h index 8e84eec1f..4c7f8f371 100644 --- a/bsd/netinet/ip_flowid.h +++ b/bsd/netinet/ip_flowid.h @@ -96,8 +96,8 @@ struct ip_fw_args { struct ip_fw *fwa_ipfw_rule; /* matching IPFW rule */ struct pf_rule *fwa_pf_rule; /* matching PF rule */ struct ether_header *fwa_eh; /* for bridged packets */ - int fwa_flags; /* for dummynet */ - int fwa_oflags; /* for dummynet */ + int fwa_flags; /* for dummynet */ + int fwa_oflags; /* for dummynet */ union { struct ip_out_args *_fwa_ipoa; /* for dummynet */ struct ip6_out_args *_fwa_ip6oa; /* for dummynet */ @@ -111,11 +111,11 @@ struct ip_fw_args { struct sockaddr_in6 *_fwa_dst6; /* for IPv6 dummynet */ } fwa_dst_; struct route_in6 *fwa_ro6_pmtu; /* for IPv6 output */ - struct ifnet *fwa_origifp; /* for IPv6 output */ - u_int32_t fwa_mtu; /* for IPv6 output */ - int fwa_alwaysfrag; /* for IPv6 output */ - u_int32_t fwa_unfragpartlen; /* for IPv6 output */ - struct ip6_exthdrs *fwa_exthdrs; /* for IPv6 output */ + struct ifnet *fwa_origifp; /* for IPv6 output */ + u_int32_t fwa_mtu; /* for IPv6 output */ + int fwa_alwaysfrag; /* for IPv6 output */ + u_int32_t fwa_unfragpartlen; /* for IPv6 output */ + struct ip6_exthdrs *fwa_exthdrs; /* for IPv6 output */ struct ip_flow_id fwa_id; /* grabbed from IP header */ u_int16_t fwa_divert_rule;/* divert cookie */ u_int32_t fwa_cookie; @@ -129,10 +129,10 @@ struct ip_fw_args { /* Allocate a separate structure for inputs args to save space and bzero time */ struct ip_fw_in_args { - struct sockaddr_in *fwai_next_hop; /* forward address */ - struct ip_fw *fwai_ipfw_rule;/* matching IPFW rule */ - struct pf_rule *fwai_pf_rule; /* matching PF rule */ - u_int16_t fwai_divert_rule;/* divert cookie */ + struct sockaddr_in *fwai_next_hop; /* forward address */ + struct ip_fw *fwai_ipfw_rule;/* matching IPFW rule */ + struct pf_rule *fwai_pf_rule; /* matching PF rule */ + u_int16_t fwai_divert_rule;/* divert cookie */ }; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/ip_fw.h b/bsd/netinet/ip_fw.h index 1eb861d3b..75f519064 100644 --- a/bsd/netinet/ip_fw.h +++ b/bsd/netinet/ip_fw.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -51,9 +51,9 @@ #else /* !IPFW2, good old ipfw */ #include -#include /* u_ types */ +#include /* u_ types */ -#define IP_FW_CURRENT_API_VERSION 20 /* Version of this API */ +#define IP_FW_CURRENT_API_VERSION 20 /* Version of this API */ /* @@ -70,12 +70,12 @@ */ union ip_fw_if { - struct in_addr fu_via_ip; /* Specified by IP address */ - struct { /* Specified by interface name */ + struct in_addr fu_via_ip; /* Specified by IP address */ + struct { /* Specified by interface name */ #define FW_IFNLEN 10 /* need room ! was IFNAMSIZ */ - char name[FW_IFNLEN]; - short unit; /* -1 means match any unit */ - } fu_via_if; + char name[FW_IFNLEN]; + short unit; /* -1 means match any unit */ + } fu_via_if; }; /* @@ -87,81 +87,81 @@ union ip_fw_if { */ struct ip_fw { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS 10 /* A reasonable maximum */ + u_int32_t version; /* Version of this structure. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION by clients. */ + void *context; /* Context that is usable by user processes to */ + /* identify this rule. */ + u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ + struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ + u_short fw_number; /* Rule number */ + u_int fw_flg; /* Flags word */ +#define IP_FW_MAX_PORTS 10 /* A reasonable maximum */ union { - u_short fw_pts[IP_FW_MAX_PORTS]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX 128 -#define IP_FW_ICMPTYPES_DIM (IP_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + u_short fw_pts[IP_FW_MAX_PORTS]; /* Array of port numbers to match */ +#define IP_FW_ICMPTYPES_MAX 128 +#define IP_FW_ICMPTYPES_DIM (IP_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) + unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ } fw_uar; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt,fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt,fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un; - u_char fw_prot; /* IP protocol */ + u_int fw_ipflg; /* IP flags word */ + u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ + u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ + long timestamp; /* timestamp (tv_sec) of last match */ + union ip_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ + u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + struct sockaddr_in fu_fwd_ip; + } fw_un; + u_char fw_prot; /* IP protocol */ /* * N'of src ports and # of dst ports in ports array (dst ports * follow src ports; max of 10 ports in all; count of 0 means * match all ports) */ - u_char fw_nports; - void *pipe_ptr; /* flow_set ptr for dummynet pipe */ - void *next_rule_ptr ; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ + u_char fw_nports; + void *pipe_ptr; /* flow_set ptr for dummynet pipe */ + void *next_rule_ptr; /* next rule in case of match */ + uid_t fw_uid; /* uid to match */ + int fw_logamount; /* amount to log */ + u_int64_t fw_loghighest; /* highest number packet to log */ }; /* * extended ipfw structure... some fields in the original struct * can be used to pass parameters up/down, namely pointers * void *pipe_ptr - * void *next_rule_ptr + * void *next_rule_ptr * some others can be used to pass parameters down, namely counters etc. * u_int64_t fw_pcnt,fw_bcnt; * long timestamp; */ struct ip_fw_ext { /* extended structure */ - struct ip_fw rule; /* must be at offset 0 */ - long dont_match_prob; /* 0x7fffffff means 1.0, always fail */ - u_int dyn_type; /* type for dynamic rule */ + struct ip_fw rule; /* must be at offset 0 */ + long dont_match_prob; /* 0x7fffffff means 1.0, always fail */ + u_int dyn_type;/* type for dynamic rule */ }; -#define IP_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) -#define IP_FW_SETNSRCP(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IP_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) -#define IP_FW_SETNDSTP(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) - -#define fw_divert_port fw_un.fu_divert_port -#define fw_skipto_rule fw_un.fu_skipto_rule -#define fw_reject_code fw_un.fu_reject_code -#define fw_pipe_nr fw_un.fu_pipe_nr -#define fw_fwd_ip fw_un.fu_fwd_ip +#define IP_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) +#define IP_FW_SETNSRCP(rule, n) do { \ + (rule)->fw_nports &= ~0x0f; \ + (rule)->fw_nports |= (n); \ + } while (0) +#define IP_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) +#define IP_FW_SETNDSTP(rule, n) do { \ + (rule)->fw_nports &= ~0xf0; \ + (rule)->fw_nports |= (n) << 4;\ + } while (0) + +#define fw_divert_port fw_un.fu_divert_port +#define fw_skipto_rule fw_un.fu_skipto_rule +#define fw_reject_code fw_un.fu_reject_code +#define fw_pipe_nr fw_un.fu_pipe_nr +#define fw_fwd_ip fw_un.fu_fwd_ip struct ip_fw_chain { LIST_ENTRY(ip_fw_chain) next; @@ -172,88 +172,88 @@ struct ip_fw_chain { * Flow mask/flow id for each queue. */ struct ipfw_flow_id { - u_int32_t dst_ip, src_ip ; - u_int16_t dst_port, src_port ; - u_int8_t proto ; - u_int8_t flags ; /* protocol-specific flags */ -} ; + u_int32_t dst_ip, src_ip; + u_int16_t dst_port, src_port; + u_int8_t proto; + u_int8_t flags; /* protocol-specific flags */ +}; /* * dynamic ipfw rule */ struct ipfw_dyn_rule { - struct ipfw_dyn_rule *next ; - - struct ipfw_flow_id id ; - struct ipfw_flow_id mask ; - struct ip_fw_chain *chain ; /* pointer to parent rule */ - u_int32_t type ; /* rule type */ - u_int32_t expire ; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket ; /* which bucket in hash table */ - u_int32_t state ; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -} ; + struct ipfw_dyn_rule *next; + + struct ipfw_flow_id id; + struct ipfw_flow_id mask; + struct ip_fw_chain *chain; /* pointer to parent rule */ + u_int32_t type; /* rule type */ + u_int32_t expire; /* expire time */ + u_int64_t pcnt, bcnt; /* match counters */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typ. a */ + /* combination of TCP flags) */ +}; /* * Values for "flags" field . */ -#define IP_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ -#define IP_FW_F_DENY 0x00000000 /* This is a deny rule */ -#define IP_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ -#define IP_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ -#define IP_FW_F_COUNT 0x00000003 /* This is a count rule */ -#define IP_FW_F_DIVERT 0x00000004 /* This is a divert rule */ -#define IP_FW_F_TEE 0x00000005 /* This is a tee rule */ -#define IP_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ -#define IP_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ -#define IP_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ -#define IP_FW_F_QUEUE 0x00000009 /* This is a dummynet queue */ +#define IP_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ +#define IP_FW_F_DENY 0x00000000 /* This is a deny rule */ +#define IP_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ +#define IP_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ +#define IP_FW_F_COUNT 0x00000003 /* This is a count rule */ +#define IP_FW_F_DIVERT 0x00000004 /* This is a divert rule */ +#define IP_FW_F_TEE 0x00000005 /* This is a tee rule */ +#define IP_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ +#define IP_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ +#define IP_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ +#define IP_FW_F_QUEUE 0x00000009 /* This is a dummynet queue */ -#define IP_FW_F_IN 0x00000100 /* Check inbound packets */ -#define IP_FW_F_OUT 0x00000200 /* Check outbound packets */ -#define IP_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ -#define IP_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ +#define IP_FW_F_IN 0x00000100 /* Check inbound packets */ +#define IP_FW_F_OUT 0x00000200 /* Check outbound packets */ +#define IP_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ +#define IP_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ -#define IP_FW_F_PRN 0x00001000 /* Print if this rule matches */ +#define IP_FW_F_PRN 0x00001000 /* Print if this rule matches */ -#define IP_FW_F_SRNG 0x00002000 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ +#define IP_FW_F_SRNG 0x00002000 /* The first two src ports are a min * + * and max range (stored in host byte * + * order). */ -#define IP_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ +#define IP_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * + * and max range (stored in host byte * + * order). */ -#define IP_FW_F_FRAG 0x00008000 /* Fragment */ +#define IP_FW_F_FRAG 0x00008000 /* Fragment */ -#define IP_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ -#define IP_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ +#define IP_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ +#define IP_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ -#define IP_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ -#define IP_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ +#define IP_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ +#define IP_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ -#define IP_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ +#define IP_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ -#define IP_FW_F_UID 0x00200000 /* filter by uid */ +#define IP_FW_F_UID 0x00200000 /* filter by uid */ -#define IP_FW_F_RND_MATCH 0x00800000 /* probabilistic rule match */ -#define IP_FW_F_SMSK 0x01000000 /* src-port + mask */ -#define IP_FW_F_DMSK 0x02000000 /* dst-port + mask */ -#define IP_FW_BRIDGED 0x04000000 /* only match bridged packets */ -#define IP_FW_F_KEEP_S 0x08000000 /* keep state */ -#define IP_FW_F_CHECK_S 0x10000000 /* check state */ +#define IP_FW_F_RND_MATCH 0x00800000 /* probabilistic rule match */ +#define IP_FW_F_SMSK 0x01000000 /* src-port + mask */ +#define IP_FW_F_DMSK 0x02000000 /* dst-port + mask */ +#define IP_FW_BRIDGED 0x04000000 /* only match bridged packets */ +#define IP_FW_F_KEEP_S 0x08000000 /* keep state */ +#define IP_FW_F_CHECK_S 0x10000000 /* check state */ -#define IP_FW_F_SME 0x20000000 /* source = me */ -#define IP_FW_F_DME 0x40000000 /* destination = me */ +#define IP_FW_F_SME 0x20000000 /* source = me */ +#define IP_FW_F_DME 0x40000000 /* destination = me */ -#define IP_FW_F_MASK 0x7FFFFFFF /* All possible flag bits mask */ +#define IP_FW_F_MASK 0x7FFFFFFF /* All possible flag bits mask */ /* * Flags for the 'fw_ipflg' field, for comparing values of ip and its protocols. */ -#define IP_FW_IF_TCPEST 0x00000020 /* established TCP connection */ -#define IP_FW_IF_TCPMSK 0x00000020 /* mask of all TCP values */ +#define IP_FW_IF_TCPEST 0x00000020 /* established TCP connection */ +#define IP_FW_IF_TCPMSK 0x00000020 /* mask of all TCP values */ /* * For backwards compatibility with rules specifying "via iface" but @@ -261,49 +261,49 @@ struct ipfw_dyn_rule { * of bits to represent this configuration. */ -#define IF_FW_F_VIAHACK (IP_FW_F_IN|IP_FW_F_OUT|IP_FW_F_IIFACE|IP_FW_F_OIFACE) +#define IF_FW_F_VIAHACK (IP_FW_F_IN|IP_FW_F_OUT|IP_FW_F_IIFACE|IP_FW_F_OIFACE) /* * Definitions for REJECT response codes. * Values less than 256 correspond to ICMP unreachable codes. */ -#define IP_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ +#define IP_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ /* * Definitions for IP option names. */ -#define IP_FW_IPOPT_LSRR 0x01 -#define IP_FW_IPOPT_SSRR 0x02 -#define IP_FW_IPOPT_RR 0x04 -#define IP_FW_IPOPT_TS 0x08 +#define IP_FW_IPOPT_LSRR 0x01 +#define IP_FW_IPOPT_SSRR 0x02 +#define IP_FW_IPOPT_RR 0x04 +#define IP_FW_IPOPT_TS 0x08 /* * Definitions for TCP option names. */ -#define IP_FW_TCPOPT_MSS 0x01 -#define IP_FW_TCPOPT_WINDOW 0x02 -#define IP_FW_TCPOPT_SACK 0x04 -#define IP_FW_TCPOPT_TS 0x08 -#define IP_FW_TCPOPT_CC 0x10 +#define IP_FW_TCPOPT_MSS 0x01 +#define IP_FW_TCPOPT_WINDOW 0x02 +#define IP_FW_TCPOPT_SACK 0x04 +#define IP_FW_TCPOPT_TS 0x08 +#define IP_FW_TCPOPT_CC 0x10 /* * Definitions for TCP flags. */ -#define IP_FW_TCPF_FIN TH_FIN -#define IP_FW_TCPF_SYN TH_SYN -#define IP_FW_TCPF_RST TH_RST -#define IP_FW_TCPF_PSH TH_PUSH -#define IP_FW_TCPF_ACK TH_ACK -#define IP_FW_TCPF_URG TH_URG +#define IP_FW_TCPF_FIN TH_FIN +#define IP_FW_TCPF_SYN TH_SYN +#define IP_FW_TCPF_RST TH_RST +#define IP_FW_TCPF_PSH TH_PUSH +#define IP_FW_TCPF_ACK TH_ACK +#define IP_FW_TCPF_URG TH_URG /* * Main firewall chains definitions and global var's definitions. */ #ifdef BSD_KERNEL_PRIVATE -#define IP_FW_PORT_DYNT_FLAG 0x10000 -#define IP_FW_PORT_TEE_FLAG 0x20000 -#define IP_FW_PORT_DENY_FLAG 0x40000 +#define IP_FW_PORT_DYNT_FLAG 0x10000 +#define IP_FW_PORT_TEE_FLAG 0x20000 +#define IP_FW_PORT_DENY_FLAG 0x40000 /* * Function definitions. @@ -313,15 +313,15 @@ void ip_fw_init(void); /* Firewall hooks */ struct ip; struct sockopt; -typedef int ip_fw_chk_t(struct ip **, int, struct ifnet *, u_int16_t *, - struct mbuf **, struct ip_fw_chain **, struct sockaddr_in **); -typedef int ip_fw_ctl_t(struct sockopt *); -extern ip_fw_chk_t *ip_fw_chk_ptr; -extern ip_fw_ctl_t *ip_fw_ctl_ptr; +typedef int ip_fw_chk_t(struct ip **, int, struct ifnet *, u_int16_t *, + struct mbuf **, struct ip_fw_chain **, struct sockaddr_in **); +typedef int ip_fw_ctl_t(struct sockopt *); +extern ip_fw_chk_t *ip_fw_chk_ptr; +extern ip_fw_ctl_t *ip_fw_ctl_ptr; extern int fw_one_pass; extern int fw_enable; #define IPFW_LOADED (ip_fw_chk_ptr != NULL) -extern struct ipfw_flow_id last_pkt ; +extern struct ipfw_flow_id last_pkt; #endif /* BSD_KERNEL_PRIVATE */ #endif /* !IPFW2 */ diff --git a/bsd/netinet/ip_fw2.c b/bsd/netinet/ip_fw2.c index 9b365b2c0..acbb060cb 100644 --- a/bsd/netinet/ip_fw2.c +++ b/bsd/netinet/ip_fw2.c @@ -117,14 +117,14 @@ #include /* -#include -*/ /* XXX for in_cksum */ + #include + */ /* XXX for in_cksum */ /* * XXX This one should go in sys/mbuf.h. It is used to avoid that * a firewall-generated packet loops forever through the firewall. */ -#ifndef M_SKIP_FIREWALL +#ifndef M_SKIP_FIREWALL #define M_SKIP_FIREWALL 0x4000 #endif @@ -163,7 +163,7 @@ static int Get64static_len(void); static int ipfw_sysctl SYSCTL_HANDLER_ARGS; -SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Firewall"); +SYSCTL_NODE(_net_inet_ip, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Firewall"); SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &fw_enable, 0, ipfw_sysctl, "I", "Enable ipfw"); @@ -186,22 +186,22 @@ SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, verbose_limit, CTLFLAG_RW | CTLFLAG_LOCKED * IP FW Stealth Logging: */ typedef enum ipfw_stealth_stats_type { - IPFW_STEALTH_STATS_UDP, - IPFW_STEALTH_STATS_TCP, - IPFW_STEALTH_STATS_UDPv6, - IPFW_STEALTH_STATS_TCPv6, - IPFW_STEALTH_STATS_MAX, + IPFW_STEALTH_STATS_UDP, + IPFW_STEALTH_STATS_TCP, + IPFW_STEALTH_STATS_UDPv6, + IPFW_STEALTH_STATS_TCPv6, + IPFW_STEALTH_STATS_MAX, } ipfw_stealth_stats_type_t; #define IPFW_STEALTH_TIMEOUT_SEC 30 -#define DYN_KEEPALIVE_LEEWAY 15 +#define DYN_KEEPALIVE_LEEWAY 15 // Piggybagging Stealth stats with ipfw_tick(). #define IPFW_STEALTH_TIMEOUT_FREQUENCY (30 / dyn_keepalive_period) -static const char* ipfw_stealth_stats_str [IPFW_STEALTH_STATS_MAX] = { - "UDP", "TCP", "UDP v6", "TCP v6", +static const char* ipfw_stealth_stats_str[IPFW_STEALTH_STATS_MAX] = { + "UDP", "TCP", "UDP v6", "TCP v6", }; static uint32_t ipfw_stealth_stats_needs_flush = FALSE; @@ -273,14 +273,14 @@ static u_int32_t dyn_short_lifetime = 5; static u_int32_t dyn_keepalive_interval = 25; static u_int32_t dyn_keepalive_period = 5; -static u_int32_t dyn_keepalive = 1; /* do send keepalives */ +static u_int32_t dyn_keepalive = 1; /* do send keepalives */ -static u_int32_t static_count; /* # of static rules */ -static u_int32_t static_len; /* size in bytes of static rules */ -static u_int32_t static_len_32; /* size in bytes of static rules for 32 bit client */ -static u_int32_t static_len_64; /* size in bytes of static rules for 64 bit client */ -static u_int32_t dyn_count; /* # of dynamic rules */ -static u_int32_t dyn_max = 4096; /* max # of dynamic rules */ +static u_int32_t static_count; /* # of static rules */ +static u_int32_t static_len; /* size in bytes of static rules */ +static u_int32_t static_len_32; /* size in bytes of static rules for 32 bit client */ +static u_int32_t static_len_64; /* size in bytes of static rules for 64 bit client */ +static u_int32_t dyn_count; /* # of dynamic rules */ +static u_int32_t dyn_max = 4096; /* max # of dynamic rules */ SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, dyn_buckets, CTLFLAG_RW | CTLFLAG_LOCKED, &dyn_buckets, 0, "Number of dyn. buckets"); @@ -313,177 +313,188 @@ ipfw_sysctl SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) int error; - + error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); - if (error || !req->newptr) - return (error); - + if (error || !req->newptr) { + return error; + } + ipfw_kev_post_msg(KEV_IPFW_ENABLE); - + return error; } #endif /* SYSCTL_NODE */ -static ip_fw_chk_t ipfw_chk; +static ip_fw_chk_t ipfw_chk; /* firewall lock */ lck_grp_t *ipfw_mutex_grp; lck_grp_attr_t *ipfw_mutex_grp_attr; lck_attr_t *ipfw_mutex_attr; -decl_lck_mtx_data(,ipfw_mutex_data); +decl_lck_mtx_data(, ipfw_mutex_data); lck_mtx_t *ipfw_mutex = &ipfw_mutex_data; -extern void ipfwsyslog( int level, const char *format,...); +extern void ipfwsyslog( int level, const char *format, ...); #define ipfwstring "ipfw:" -static size_t ipfwstringlen; +static size_t ipfwstringlen; -#define dolog( a ) { \ - if ( fw_verbose == 2 ) /* Apple logging, log to ipfw.log */ \ - ipfwsyslog a ; \ - else log a ; \ +#define dolog( a ) { \ + if ( fw_verbose == 2 ) /* Apple logging, log to ipfw.log */ \ + ipfwsyslog a ; \ + else log a ; \ } #define RULESIZE64(rule) (sizeof(struct ip_fw_64) + \ - ((struct ip_fw *)(rule))->cmd_len * 4 - 4) + ((struct ip_fw *)(rule))->cmd_len * 4 - 4) #define RULESIZE32(rule) (sizeof(struct ip_fw_32) + \ - ((struct ip_fw *)(rule))->cmd_len * 4 - 4) + ((struct ip_fw *)(rule))->cmd_len * 4 - 4) -void ipfwsyslog( int level, const char *format,...) +void +ipfwsyslog( int level, const char *format, ...) { -#define msgsize 100 +#define msgsize 100 - struct kev_msg ev_msg; - va_list ap; - char msgBuf[msgsize]; - char *dptr = msgBuf; - unsigned char pri; - int loglen; + struct kev_msg ev_msg; + va_list ap; + char msgBuf[msgsize]; + char *dptr = msgBuf; + unsigned char pri; + int loglen; bzero(msgBuf, msgsize); bzero(&ev_msg, sizeof(struct kev_msg)); va_start( ap, format ); - loglen = vsnprintf(msgBuf, msgsize, format, ap); - va_end( ap ); + loglen = vsnprintf(msgBuf, msgsize, format, ap); + va_end( ap ); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_LOG_SUBCLASS; - ev_msg.event_code = IPFWLOGEVENT; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_LOG_SUBCLASS; + ev_msg.event_code = IPFWLOGEVENT; /* get rid of the trailing \n */ - if (loglen < msgsize) - dptr[loglen-1] = 0; - else - dptr[msgsize-1] = 0; - - pri = LOG_PRI(level); - - /* remove "ipfw:" prefix if logging to ipfw log */ - if ( !(strncmp( ipfwstring, msgBuf, ipfwstringlen))){ - dptr = msgBuf+ipfwstringlen; - } - - ev_msg.dv[0].data_ptr = &pri; - ev_msg.dv[0].data_length = 1; - ev_msg.dv[1].data_ptr = dptr; - ev_msg.dv[1].data_length = 100; /* bug in kern_post_msg, it can't handle size > 256-msghdr */ - ev_msg.dv[2].data_length = 0; - - kev_post_msg(&ev_msg); + if (loglen < msgsize) { + dptr[loglen - 1] = 0; + } else { + dptr[msgsize - 1] = 0; + } + + pri = LOG_PRI(level); + + /* remove "ipfw:" prefix if logging to ipfw log */ + if (!(strncmp( ipfwstring, msgBuf, ipfwstringlen))) { + dptr = msgBuf + ipfwstringlen; + } + + ev_msg.dv[0].data_ptr = &pri; + ev_msg.dv[0].data_length = 1; + ev_msg.dv[1].data_ptr = dptr; + ev_msg.dv[1].data_length = 100; /* bug in kern_post_msg, it can't handle size > 256-msghdr */ + ev_msg.dv[2].data_length = 0; + + kev_post_msg(&ev_msg); } -static inline void ipfw_stealth_stats_incr(uint32_t type) +static inline void +ipfw_stealth_stats_incr(uint32_t type) { - if (type >= IPFW_STEALTH_STATS_MAX) - return; + if (type >= IPFW_STEALTH_STATS_MAX) { + return; + } - ipfw_stealth_stats[type]++; + ipfw_stealth_stats[type]++; - if (!ipfw_stealth_stats_needs_flush) { - ipfw_stealth_stats_needs_flush = TRUE; - } + if (!ipfw_stealth_stats_needs_flush) { + ipfw_stealth_stats_needs_flush = TRUE; + } } -void ipfw_stealth_stats_incr_udp(void) +void +ipfw_stealth_stats_incr_udp(void) { - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_UDP); + ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_UDP); } -void ipfw_stealth_stats_incr_tcp(void) +void +ipfw_stealth_stats_incr_tcp(void) { - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_TCP); + ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_TCP); } -void ipfw_stealth_stats_incr_udpv6(void) +void +ipfw_stealth_stats_incr_udpv6(void) { - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_UDPv6); + ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_UDPv6); } -void ipfw_stealth_stats_incr_tcpv6(void) +void +ipfw_stealth_stats_incr_tcpv6(void) { - ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_TCPv6); + ipfw_stealth_stats_incr(IPFW_STEALTH_STATS_TCPv6); } -static void ipfw_stealth_flush_stats(void) +static void +ipfw_stealth_flush_stats(void) { - int i; - - for (i = 0; i < IPFW_STEALTH_STATS_MAX; i++) { - if (ipfw_stealth_stats[i]) { - ipfwsyslog (LOG_INFO, "Stealth Mode connection attempt to %s %d times", - ipfw_stealth_stats_str[i], ipfw_stealth_stats[i]); - ipfw_stealth_stats[i] = 0; - } - } - ipfw_stealth_stats_needs_flush = FALSE; + int i; + + for (i = 0; i < IPFW_STEALTH_STATS_MAX; i++) { + if (ipfw_stealth_stats[i]) { + ipfwsyslog(LOG_INFO, "Stealth Mode connection attempt to %s %d times", + ipfw_stealth_stats_str[i], ipfw_stealth_stats[i]); + ipfw_stealth_stats[i] = 0; + } + } + ipfw_stealth_stats_needs_flush = FALSE; } /* * This macro maps an ip pointer into a layer3 header pointer of type T */ -#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl)) +#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl)) static __inline int icmptype_match(struct ip *ip, ipfw_insn_u32 *cmd) { - int type = L3HDR(struct icmp,ip)->icmp_type; + int type = L3HDR(struct icmp, ip)->icmp_type; - return (type <= ICMP_MAXTYPE && (cmd->d[0] & (1<d[0] & (1 << type)); } -#define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \ +#define TT ( (1 << ICMP_ECHO) | (1 << ICMP_ROUTERSOLICIT) | \ (1 << ICMP_TSTAMP) | (1 << ICMP_IREQ) | (1 << ICMP_MASKREQ) ) static int is_icmp_query(struct ip *ip) { int type = L3HDR(struct icmp, ip)->icmp_type; - return (type <= ICMP_MAXTYPE && (TT & (1<next) { + for (rule = layer3_chain; rule; rule = rule->next) { if (rule->reserved_1 == IPFW_RULE_INACTIVE) { continue; } - if ( rule->act_ofs ){ - useraction = (char*)ACTION_PTR( rule ); - if ( ((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE){ + if (rule->act_ofs) { + useraction = (char*)ACTION_PTR( rule ); + if (((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE) { diff = sizeof(ipfw_insn_pipe) - sizeof(ipfw_insn_pipe_32); - if (diff) + if (diff) { len -= diff; + } } } } @@ -493,100 +504,103 @@ Get32static_len(void) static int Get64static_len(void) { - int diff; + int diff; int len = static_len_64; struct ip_fw *rule; - char *useraction; + char *useraction; - for (rule = layer3_chain; rule ; rule = rule->next) { + for (rule = layer3_chain; rule; rule = rule->next) { if (rule->reserved_1 == IPFW_RULE_INACTIVE) { continue; } - if ( rule->act_ofs ){ - useraction = (char *)ACTION_PTR( rule ); - if ( ((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE){ + if (rule->act_ofs) { + useraction = (char *)ACTION_PTR( rule ); + if (((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE) { diff = sizeof(ipfw_insn_pipe_64) - sizeof(ipfw_insn_pipe); - if (diff) + if (diff) { len += diff; + } } } } return len; } -static void -copyto32fw_insn( struct ip_fw_32 *fw32 , struct ip_fw *user_ip_fw, int cmdsize) +static void +copyto32fw_insn( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, int cmdsize) { - char *end; - char *fw32action; - char *useraction; - int justcmdsize; - int diff=0; - int actioncopysize; + char *end; + char *fw32action; + char *useraction; + int justcmdsize; + int diff = 0; + int actioncopysize; end = ((char*)user_ip_fw->cmd) + cmdsize; useraction = (char*)ACTION_PTR( user_ip_fw ); fw32action = (char*)fw32->cmd + (user_ip_fw->act_ofs * sizeof(uint32_t)); - if ( ( justcmdsize = ( fw32action - (char*)fw32->cmd))) - bcopy( user_ip_fw->cmd, fw32->cmd, justcmdsize); - while ( useraction < end ){ - if ( ((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE){ + if ((justcmdsize = (fw32action - (char*)fw32->cmd))) { + bcopy( user_ip_fw->cmd, fw32->cmd, justcmdsize); + } + while (useraction < end) { + if (((ipfw_insn*)useraction)->opcode == O_QUEUE || ((ipfw_insn*)useraction)->opcode == O_PIPE) { actioncopysize = sizeof(ipfw_insn_pipe_32); ((ipfw_insn*)fw32action)->opcode = ((ipfw_insn*)useraction)->opcode; ((ipfw_insn*)fw32action)->arg1 = ((ipfw_insn*)useraction)->arg1; ((ipfw_insn*)fw32action)->len = F_INSN_SIZE(ipfw_insn_pipe_32); diff = ((ipfw_insn*)useraction)->len - ((ipfw_insn*)fw32action)->len; - if ( diff ){ + if (diff) { fw32->cmd_len -= diff; } - } else{ - actioncopysize = (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1 ) * sizeof(uint32_t); + } else { + actioncopysize = (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); bcopy( useraction, fw32action, actioncopysize ); } - useraction += (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1 ) * sizeof(uint32_t); + useraction += (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); fw32action += actioncopysize; } } static void -copyto64fw_insn( struct ip_fw_64 *fw64 , struct ip_fw *user_ip_fw, int cmdsize) +copyto64fw_insn( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, int cmdsize) { - char *end; - char *fw64action; - char *useraction; - int justcmdsize; - int diff; - int actioncopysize; + char *end; + char *fw64action; + char *useraction; + int justcmdsize; + int diff; + int actioncopysize; end = ((char *)user_ip_fw->cmd) + cmdsize; useraction = (char*)ACTION_PTR( user_ip_fw ); - if ( (justcmdsize = (useraction - (char*)user_ip_fw->cmd))) - bcopy( user_ip_fw->cmd, fw64->cmd, justcmdsize); + if ((justcmdsize = (useraction - (char*)user_ip_fw->cmd))) { + bcopy( user_ip_fw->cmd, fw64->cmd, justcmdsize); + } fw64action = (char*)fw64->cmd + justcmdsize; - while ( useraction < end ){ - if ( ((ipfw_insn*)user_ip_fw)->opcode == O_QUEUE || ((ipfw_insn*)user_ip_fw)->opcode == O_PIPE){ + while (useraction < end) { + if (((ipfw_insn*)user_ip_fw)->opcode == O_QUEUE || ((ipfw_insn*)user_ip_fw)->opcode == O_PIPE) { actioncopysize = sizeof(ipfw_insn_pipe_64); ((ipfw_insn*)fw64action)->opcode = ((ipfw_insn*)useraction)->opcode; ((ipfw_insn*)fw64action)->arg1 = ((ipfw_insn*)useraction)->arg1; ((ipfw_insn*)fw64action)->len = F_INSN_SIZE(ipfw_insn_pipe_64); diff = ((ipfw_insn*)fw64action)->len - ((ipfw_insn*)useraction)->len; - if (diff) + if (diff) { fw64->cmd_len += diff; - - } else{ - actioncopysize = (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1 ) * sizeof(uint32_t); + } + } else { + actioncopysize = (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); bcopy( useraction, fw64action, actioncopysize ); } - useraction += (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1 ) * sizeof(uint32_t); + useraction += (F_LEN((ipfw_insn*)useraction) ? (F_LEN((ipfw_insn*)useraction)) : 1) * sizeof(uint32_t); fw64action += actioncopysize; } } -static void -copyto32fw( struct ip_fw *user_ip_fw, struct ip_fw_32 *fw32 , __unused size_t copysize) +static void +copyto32fw( struct ip_fw *user_ip_fw, struct ip_fw_32 *fw32, __unused size_t copysize) { - size_t rulesize, cmdsize; - + size_t rulesize, cmdsize; + fw32->version = user_ip_fw->version; fw32->context = CAST_DOWN_EXPLICIT( user32_addr_t, user_ip_fw->context); fw32->next = CAST_DOWN_EXPLICIT(user32_addr_t, user_ip_fw->next); @@ -608,9 +622,9 @@ copyto32fw( struct ip_fw *user_ip_fw, struct ip_fw_32 *fw32 , __unused size_t co } static void -copyto64fw( struct ip_fw *user_ip_fw, struct ip_fw_64 *fw64, size_t copysize) +copyto64fw( struct ip_fw *user_ip_fw, struct ip_fw_64 *fw64, size_t copysize) { - size_t rulesize, cmdsize; + size_t rulesize, cmdsize; fw64->version = user_ip_fw->version; fw64->context = CAST_DOWN_EXPLICIT(__uint64_t, user_ip_fw->context); @@ -628,91 +642,94 @@ copyto64fw( struct ip_fw *user_ip_fw, struct ip_fw_64 *fw64, size_t copysize) fw64->reserved_1 = user_ip_fw->reserved_1; fw64->reserved_2 = user_ip_fw->reserved_2; rulesize = sizeof(struct ip_fw_64) + (user_ip_fw->cmd_len * sizeof(ipfw_insn) - 4); - if (rulesize > copysize) + if (rulesize > copysize) { cmdsize = copysize - sizeof(struct ip_fw_64) + 4; - else + } else { cmdsize = user_ip_fw->cmd_len * sizeof(u_int32_t); + } copyto64fw_insn( fw64, user_ip_fw, cmdsize); } static int -copyfrom32fw_insn( struct ip_fw_32 *fw32 , struct ip_fw *user_ip_fw, int cmdsize) +copyfrom32fw_insn( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, int cmdsize) { - char *end; - char *fw32action; - char *useraction; - int justcmdsize; - int diff; - int actioncopysize; + char *end; + char *fw32action; + char *useraction; + int justcmdsize; + int diff; + int actioncopysize; end = ((char*)fw32->cmd) + cmdsize; fw32action = (char*)ACTION_PTR( fw32 ); - if ((justcmdsize = (fw32action - (char*)fw32->cmd))) - bcopy( fw32->cmd, user_ip_fw->cmd, justcmdsize); + if ((justcmdsize = (fw32action - (char*)fw32->cmd))) { + bcopy( fw32->cmd, user_ip_fw->cmd, justcmdsize); + } useraction = (char*)user_ip_fw->cmd + justcmdsize; - while ( fw32action < end ){ - if ( ((ipfw_insn*)fw32action)->opcode == O_QUEUE || ((ipfw_insn*)fw32action)->opcode == O_PIPE){ + while (fw32action < end) { + if (((ipfw_insn*)fw32action)->opcode == O_QUEUE || ((ipfw_insn*)fw32action)->opcode == O_PIPE) { actioncopysize = sizeof(ipfw_insn_pipe); ((ipfw_insn*)useraction)->opcode = ((ipfw_insn*)fw32action)->opcode; ((ipfw_insn*)useraction)->arg1 = ((ipfw_insn*)fw32action)->arg1; ((ipfw_insn*)useraction)->len = F_INSN_SIZE(ipfw_insn_pipe); diff = ((ipfw_insn*)useraction)->len - ((ipfw_insn*)fw32action)->len; - if (diff){ + if (diff) { /* readjust the cmd_len */ user_ip_fw->cmd_len += diff; } - } else{ - actioncopysize = (F_LEN((ipfw_insn*)fw32action) ? (F_LEN((ipfw_insn*)fw32action)) : 1 ) * sizeof(uint32_t); + } else { + actioncopysize = (F_LEN((ipfw_insn*)fw32action) ? (F_LEN((ipfw_insn*)fw32action)) : 1) * sizeof(uint32_t); bcopy( fw32action, useraction, actioncopysize ); } - fw32action += (F_LEN((ipfw_insn*)fw32action) ? (F_LEN((ipfw_insn*)fw32action)) : 1 ) * sizeof(uint32_t); + fw32action += (F_LEN((ipfw_insn*)fw32action) ? (F_LEN((ipfw_insn*)fw32action)) : 1) * sizeof(uint32_t); useraction += actioncopysize; } - return( useraction - (char*)user_ip_fw->cmd ); + return useraction - (char*)user_ip_fw->cmd; } static int -copyfrom64fw_insn( struct ip_fw_64 *fw64 , struct ip_fw *user_ip_fw, int cmdsize) +copyfrom64fw_insn( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, int cmdsize) { - char *end; - char *fw64action; - char *useraction; - int justcmdsize; - int diff; - int actioncopysize; - - end = ((char *)fw64->cmd) + cmdsize ; + char *end; + char *fw64action; + char *useraction; + int justcmdsize; + int diff; + int actioncopysize; + + end = ((char *)fw64->cmd) + cmdsize; fw64action = (char*)ACTION_PTR( fw64 ); - if ( (justcmdsize = (fw64action - (char*)fw64->cmd))) - bcopy( fw64->cmd, user_ip_fw->cmd, justcmdsize); + if ((justcmdsize = (fw64action - (char*)fw64->cmd))) { + bcopy( fw64->cmd, user_ip_fw->cmd, justcmdsize); + } useraction = (char*)user_ip_fw->cmd + justcmdsize; - while ( fw64action < end ){ - if ( ((ipfw_insn*)fw64action)->opcode == O_QUEUE || ((ipfw_insn*)fw64action)->opcode == O_PIPE){ + while (fw64action < end) { + if (((ipfw_insn*)fw64action)->opcode == O_QUEUE || ((ipfw_insn*)fw64action)->opcode == O_PIPE) { actioncopysize = sizeof(ipfw_insn_pipe); ((ipfw_insn*)useraction)->opcode = ((ipfw_insn*)fw64action)->opcode; ((ipfw_insn*)useraction)->arg1 = ((ipfw_insn*)fw64action)->arg1; ((ipfw_insn*)useraction)->len = F_INSN_SIZE(ipfw_insn_pipe); - diff = ((ipfw_insn*)fw64action)->len - ((ipfw_insn*)useraction)->len; + diff = ((ipfw_insn*)fw64action)->len - ((ipfw_insn*)useraction)->len; if (diff) { /* readjust the cmd_len */ user_ip_fw->cmd_len -= diff; } - } else{ - actioncopysize = (F_LEN((ipfw_insn*)fw64action) ? (F_LEN((ipfw_insn*)fw64action)) : 1 ) * sizeof(uint32_t); + } else { + actioncopysize = (F_LEN((ipfw_insn*)fw64action) ? (F_LEN((ipfw_insn*)fw64action)) : 1) * sizeof(uint32_t); bcopy( fw64action, useraction, actioncopysize ); } - fw64action += (F_LEN((ipfw_insn*)fw64action) ? (F_LEN((ipfw_insn*)fw64action)) : 1 ) * sizeof(uint32_t); + fw64action += (F_LEN((ipfw_insn*)fw64action) ? (F_LEN((ipfw_insn*)fw64action)) : 1) * sizeof(uint32_t); useraction += actioncopysize; } - return( useraction - (char*)user_ip_fw->cmd ); + return useraction - (char*)user_ip_fw->cmd; } -static size_t -copyfrom32fw( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, size_t copysize) +static size_t +copyfrom32fw( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, size_t copysize) { size_t rulesize, cmdsize; - + user_ip_fw->version = fw32->version; user_ip_fw->context = CAST_DOWN(void *, fw32->context); user_ip_fw->next = CAST_DOWN(struct ip_fw*, fw32->next); @@ -729,19 +746,20 @@ copyfrom32fw( struct ip_fw_32 *fw32, struct ip_fw *user_ip_fw, size_t copysize) user_ip_fw->reserved_1 = fw32->reserved_1; user_ip_fw->reserved_2 = fw32->reserved_2; rulesize = sizeof(struct ip_fw_32) + (fw32->cmd_len * sizeof(ipfw_insn) - 4); - if ( rulesize > copysize ) - cmdsize = copysize - sizeof(struct ip_fw_32)-4; - else + if (rulesize > copysize) { + cmdsize = copysize - sizeof(struct ip_fw_32) - 4; + } else { cmdsize = fw32->cmd_len * sizeof(ipfw_insn); + } cmdsize = copyfrom32fw_insn( fw32, user_ip_fw, cmdsize); - return( sizeof(struct ip_fw) + cmdsize - 4); + return sizeof(struct ip_fw) + cmdsize - 4; } -static size_t +static size_t copyfrom64fw( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, size_t copysize) { size_t rulesize, cmdsize; - + user_ip_fw->version = fw64->version; user_ip_fw->context = CAST_DOWN_EXPLICIT( void *, fw64->context); user_ip_fw->next = CAST_DOWN_EXPLICIT(struct ip_fw*, fw64->next); @@ -759,12 +777,13 @@ copyfrom64fw( struct ip_fw_64 *fw64, struct ip_fw *user_ip_fw, size_t copysize) user_ip_fw->reserved_2 = fw64->reserved_2; //bcopy( fw64->cmd, user_ip_fw->cmd, fw64->cmd_len * sizeof(ipfw_insn)); rulesize = sizeof(struct ip_fw_64) + (fw64->cmd_len * sizeof(ipfw_insn) - 4); - if ( rulesize > copysize ) - cmdsize = copysize - sizeof(struct ip_fw_64)-4; - else + if (rulesize > copysize) { + cmdsize = copysize - sizeof(struct ip_fw_64) - 4; + } else { cmdsize = fw64->cmd_len * sizeof(ipfw_insn); + } cmdsize = copyfrom64fw_insn( fw64, user_ip_fw, cmdsize); - return( sizeof(struct ip_fw) + cmdsize - 4); + return sizeof(struct ip_fw) + cmdsize - 4; } void @@ -781,15 +800,16 @@ externalize_flow_id(struct ipfw_flow_id *dst, struct ip_flow_id *src) } static -void cp_dyn_to_comp_32( struct ipfw_dyn_rule_compat_32 *dyn_rule_vers1, int *len) +void +cp_dyn_to_comp_32( struct ipfw_dyn_rule_compat_32 *dyn_rule_vers1, int *len) { - struct ipfw_dyn_rule_compat_32 *dyn_last=NULL; - ipfw_dyn_rule *p; + struct ipfw_dyn_rule_compat_32 *dyn_last = NULL; + ipfw_dyn_rule *p; int i; if (ipfw_dyn_v) { for (i = 0; i < curr_dyn_buckets; i++) { - for ( p = ipfw_dyn_v[i] ; p != NULL ; p = p->next) { + for (p = ipfw_dyn_v[i]; p != NULL; p = p->next) { dyn_rule_vers1->chain = (user32_addr_t)(p->rule->rulenum); externalize_flow_id(&dyn_rule_vers1->id, &p->id); externalize_flow_id(&dyn_rule_vers1->mask, &p->id); @@ -799,15 +819,15 @@ void cp_dyn_to_comp_32( struct ipfw_dyn_rule_compat_32 *dyn_rule_vers1, int *len dyn_rule_vers1->bcnt = p->bcnt; dyn_rule_vers1->bucket = p->bucket; dyn_rule_vers1->state = p->state; - + dyn_rule_vers1->next = CAST_DOWN_EXPLICIT( user32_addr_t, p->next); dyn_last = dyn_rule_vers1; - + *len += sizeof(*dyn_rule_vers1); dyn_rule_vers1++; } } - + if (dyn_last != NULL) { dyn_last->next = ((user32_addr_t)0); } @@ -816,15 +836,16 @@ void cp_dyn_to_comp_32( struct ipfw_dyn_rule_compat_32 *dyn_rule_vers1, int *len static -void cp_dyn_to_comp_64( struct ipfw_dyn_rule_compat_64 *dyn_rule_vers1, int *len) +void +cp_dyn_to_comp_64( struct ipfw_dyn_rule_compat_64 *dyn_rule_vers1, int *len) { - struct ipfw_dyn_rule_compat_64 *dyn_last=NULL; - ipfw_dyn_rule *p; + struct ipfw_dyn_rule_compat_64 *dyn_last = NULL; + ipfw_dyn_rule *p; int i; if (ipfw_dyn_v) { for (i = 0; i < curr_dyn_buckets; i++) { - for ( p = ipfw_dyn_v[i] ; p != NULL ; p = p->next) { + for (p = ipfw_dyn_v[i]; p != NULL; p = p->next) { dyn_rule_vers1->chain = (user64_addr_t) p->rule->rulenum; externalize_flow_id(&dyn_rule_vers1->id, &p->id); externalize_flow_id(&dyn_rule_vers1->mask, &p->id); @@ -834,15 +855,15 @@ void cp_dyn_to_comp_64( struct ipfw_dyn_rule_compat_64 *dyn_rule_vers1, int *len dyn_rule_vers1->bcnt = p->bcnt; dyn_rule_vers1->bucket = p->bucket; dyn_rule_vers1->state = p->state; - + dyn_rule_vers1->next = CAST_DOWN(user64_addr_t, p->next); dyn_last = dyn_rule_vers1; - + *len += sizeof(*dyn_rule_vers1); dyn_rule_vers1++; } } - + if (dyn_last != NULL) { dyn_last->next = CAST_DOWN(user64_addr_t, NULL); } @@ -852,65 +873,71 @@ void cp_dyn_to_comp_64( struct ipfw_dyn_rule_compat_64 *dyn_rule_vers1, int *len static int sooptcopyin_fw( struct sockopt *sopt, struct ip_fw *user_ip_fw, size_t *size ) { - size_t valsize, copyinsize = 0; - int error = 0; + size_t valsize, copyinsize = 0; + int error = 0; - valsize = sopt->sopt_valsize; - if ( size ) + valsize = sopt->sopt_valsize; + if (size) { copyinsize = *size; + } if (proc_is64bit(sopt->sopt_p)) { - struct ip_fw_64 *fw64=NULL; - - if ( valsize < sizeof(struct ip_fw_64) ) { - return(EINVAL); + struct ip_fw_64 *fw64 = NULL; + + if (valsize < sizeof(struct ip_fw_64)) { + return EINVAL; } - if ( !copyinsize ) + if (!copyinsize) { copyinsize = sizeof(struct ip_fw_64); - if ( valsize > copyinsize ) + } + if (valsize > copyinsize) { sopt->sopt_valsize = valsize = copyinsize; - - if ( sopt->sopt_p != 0) { + } + + if (sopt->sopt_p != 0) { fw64 = _MALLOC(copyinsize, M_TEMP, M_WAITOK); - if ( fw64 == NULL ) - return(ENOBUFS); - if ((error = copyin(sopt->sopt_val, fw64, valsize)) != 0){ + if (fw64 == NULL) { + return ENOBUFS; + } + if ((error = copyin(sopt->sopt_val, fw64, valsize)) != 0) { _FREE(fw64, M_TEMP); return error; } - } - else { + } else { bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), fw64, valsize); } valsize = copyfrom64fw( fw64, user_ip_fw, valsize ); _FREE( fw64, M_TEMP); - }else { - struct ip_fw_32 *fw32=NULL; - - if ( valsize < sizeof(struct ip_fw_32) ) { - return(EINVAL); + } else { + struct ip_fw_32 *fw32 = NULL; + + if (valsize < sizeof(struct ip_fw_32)) { + return EINVAL; } - if ( !copyinsize) + if (!copyinsize) { copyinsize = sizeof(struct ip_fw_32); - if ( valsize > copyinsize) + } + if (valsize > copyinsize) { sopt->sopt_valsize = valsize = copyinsize; - - if ( sopt->sopt_p != 0) { + } + + if (sopt->sopt_p != 0) { fw32 = _MALLOC(copyinsize, M_TEMP, M_WAITOK); - if ( fw32 == NULL ) - return(ENOBUFS); - if ( (error = copyin(sopt->sopt_val, fw32, valsize)) != 0){ + if (fw32 == NULL) { + return ENOBUFS; + } + if ((error = copyin(sopt->sopt_val, fw32, valsize)) != 0) { _FREE( fw32, M_TEMP); - return( error ); + return error; } - } - else { + } else { bcopy(CAST_DOWN(caddr_t, sopt->sopt_val), fw32, valsize); } valsize = copyfrom32fw( fw32, user_ip_fw, valsize); _FREE( fw32, M_TEMP); } - if ( size ) + if (size) { *size = valsize; + } return error; } @@ -932,11 +959,13 @@ flags_match(ipfw_insn *cmd, u_int8_t bits) u_char want_clear; bits = ~bits; - if ( ((cmd->arg1 & 0xff) & bits) != 0) + if (((cmd->arg1 & 0xff) & bits) != 0) { return 0; /* some bits we want set were clear */ + } want_clear = (cmd->arg1 >> 8) & 0xff; - if ( (want_clear & bits) != want_clear) + if ((want_clear & bits) != want_clear) { return 0; /* some bits we want clear were set */ + } return 1; } @@ -945,22 +974,23 @@ ipopts_match(struct ip *ip, ipfw_insn *cmd) { int optlen, bits = 0; u_char *cp = (u_char *)(ip + 1); - int x = (ip->ip_hl << 2) - sizeof (struct ip); + int x = (ip->ip_hl << 2) - sizeof(struct ip); for (; x > 0; x -= optlen, cp += optlen) { int opt = cp[IPOPT_OPTVAL]; - if (opt == IPOPT_EOL) + if (opt == IPOPT_EOL) { break; - if (opt == IPOPT_NOP) + } + if (opt == IPOPT_NOP) { optlen = 1; - else { + } else { optlen = cp[IPOPT_OLEN]; - if (optlen <= 0 || optlen > x) + if (optlen <= 0 || optlen > x) { return 0; /* invalid or truncated */ + } } switch (opt) { - default: break; @@ -981,31 +1011,32 @@ ipopts_match(struct ip *ip, ipfw_insn *cmd) break; } } - return (flags_match(cmd, bits)); + return flags_match(cmd, bits); } static int tcpopts_match(struct ip *ip, ipfw_insn *cmd) { int optlen, bits = 0; - struct tcphdr *tcp = L3HDR(struct tcphdr,ip); + struct tcphdr *tcp = L3HDR(struct tcphdr, ip); u_char *cp = (u_char *)(tcp + 1); int x = (tcp->th_off << 2) - sizeof(struct tcphdr); for (; x > 0; x -= optlen, cp += optlen) { int opt = cp[0]; - if (opt == TCPOPT_EOL) + if (opt == TCPOPT_EOL) { break; - if (opt == TCPOPT_NOP) + } + if (opt == TCPOPT_NOP) { optlen = 1; - else { + } else { optlen = cp[1]; - if (optlen <= 0) + if (optlen <= 0) { break; + } } switch (opt) { - default: break; @@ -1033,22 +1064,25 @@ tcpopts_match(struct ip *ip, ipfw_insn *cmd) break; } } - return (flags_match(cmd, bits)); + return flags_match(cmd, bits); } static int iface_match(struct ifnet *ifp, ipfw_insn_if *cmd) { - if (ifp == NULL) /* no iface with this packet, match fails */ + if (ifp == NULL) { /* no iface with this packet, match fails */ return 0; + } /* Check by name or by IP address */ if (cmd->name[0] != '\0') { /* match by name */ /* Check unit number (-1 is wildcard) */ - if (cmd->p.unit != -1 && cmd->p.unit != ifp->if_unit) - return(0); + if (cmd->p.unit != -1 && cmd->p.unit != ifp->if_unit) { + return 0; + } /* Check name */ - if (!strncmp(ifp->if_name, cmd->name, IFNAMSIZ)) - return(1); + if (!strncmp(ifp->if_name, cmd->name, IFNAMSIZ)) { + return 1; + } } else { struct ifaddr *ia; @@ -1063,13 +1097,13 @@ iface_match(struct ifnet *ifp, ipfw_insn_if *cmd) (ia->ifa_addr))->sin_addr.s_addr) { IFA_UNLOCK(ia); ifnet_lock_done(ifp); - return(1); /* match */ + return 1; /* match */ } IFA_UNLOCK(ia); } ifnet_lock_done(ifp); } - return(0); /* no match, fail ... */ + return 0; /* no match, fail ... */ } /* @@ -1092,7 +1126,7 @@ verify_rev_path(struct in_addr src, struct ifnet *ifp) static struct route ro; struct sockaddr_in *dst; - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); dst = (struct sockaddr_in *)&(ro.ro_dst); /* Check if we've cached the route from the previous call. */ @@ -1101,27 +1135,27 @@ verify_rev_path(struct in_addr src, struct ifnet *ifp) dst->sin_len = sizeof(*dst); dst->sin_addr = src; - rtalloc_ign(&ro, RTF_CLONING|RTF_PRCLONING, false); + rtalloc_ign(&ro, RTF_CLONING | RTF_PRCLONING, false); } if (ro.ro_rt != NULL) { RT_LOCK_SPIN(ro.ro_rt); } else { ROUTE_RELEASE(&ro); - return 0; /* No route */ + return 0; /* No route */ } if ((ifp == NULL) || (ro.ro_rt->rt_ifp->if_index != ifp->if_index)) { RT_UNLOCK(ro.ro_rt); ROUTE_RELEASE(&ro); return 0; - } + } RT_UNLOCK(ro.ro_rt); ROUTE_RELEASE(&ro); return 1; } -static u_int64_t norule_counter; /* counter for ipfw_log(NULL...) */ +static u_int64_t norule_counter; /* counter for ipfw_log(NULL...) */ #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 #define SNP(buf) buf, sizeof(buf) @@ -1132,35 +1166,40 @@ static u_int64_t norule_counter; /* counter for ipfw_log(NULL...) */ */ static void ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, - struct mbuf *m, struct ifnet *oif) + struct mbuf *m, struct ifnet *oif) { const char *action; int limit_reached = 0; char ipv4str[MAX_IPv4_STR_LEN]; char action2[40], proto[48], fragment[28]; - + fragment[0] = '\0'; proto[0] = '\0'; - if (f == NULL) { /* bogus pkt */ - if (verbose_limit != 0 && norule_counter >= verbose_limit) + if (f == NULL) { /* bogus pkt */ + if (verbose_limit != 0 && norule_counter >= verbose_limit) { return; + } norule_counter++; - if (norule_counter == verbose_limit) + if (norule_counter == verbose_limit) { limit_reached = verbose_limit; + } action = "Refuse"; - } else { /* O_LOG is the first action, find the real one */ + } else { /* O_LOG is the first action, find the real one */ ipfw_insn *cmd = ACTION_PTR(f); ipfw_insn_log *l = (ipfw_insn_log *)cmd; - if (l->max_log != 0 && l->log_left == 0) + if (l->max_log != 0 && l->log_left == 0) { return; + } l->log_left--; - if (l->log_left == 0) + if (l->log_left == 0) { limit_reached = l->max_log; - cmd += F_LEN(cmd); /* point to first action */ - if (cmd->opcode == O_PROB) + } + cmd += F_LEN(cmd); /* point to first action */ + if (cmd->opcode == O_PROB) { cmd += F_LEN(cmd); + } action = action2; switch (cmd->opcode) { @@ -1169,13 +1208,14 @@ ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, break; case O_REJECT: - if (cmd->arg1==ICMP_REJECT_RST) + if (cmd->arg1 == ICMP_REJECT_RST) { action = "Reset"; - else if (cmd->arg1==ICMP_UNREACH_HOST) + } else if (cmd->arg1 == ICMP_UNREACH_HOST) { action = "Reject"; - else + } else { snprintf(SNPARGS(action2, 0), "Unreach %d", - cmd->arg1); + cmd->arg1); + } break; case O_ACCEPT: @@ -1186,23 +1226,23 @@ ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, break; case O_DIVERT: snprintf(SNPARGS(action2, 0), "Divert %d", - cmd->arg1); + cmd->arg1); break; case O_TEE: snprintf(SNPARGS(action2, 0), "Tee %d", - cmd->arg1); + cmd->arg1); break; case O_SKIPTO: snprintf(SNPARGS(action2, 0), "SkipTo %d", - cmd->arg1); + cmd->arg1); break; case O_PIPE: snprintf(SNPARGS(action2, 0), "Pipe %d", - cmd->arg1); + cmd->arg1); break; case O_QUEUE: snprintf(SNPARGS(action2, 0), "Queue %d", - cmd->arg1); + cmd->arg1); break; case O_FORWARD_IP: { ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd; @@ -1212,19 +1252,20 @@ ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, break; } len = snprintf(SNPARGS(action2, 0), "Forward to %s", - inet_ntop(AF_INET, &sa->sa.sin_addr, ipv4str, sizeof(ipv4str))); - if (sa->sa.sin_port) + inet_ntop(AF_INET, &sa->sa.sin_addr, ipv4str, sizeof(ipv4str))); + if (sa->sa.sin_port) { snprintf(SNPARGS(action2, len), ":%d", sa->sa.sin_port); } - break; + } + break; default: action = "UNKNOWN"; break; } } - if (hlen == 0) { /* non-ip */ + if (hlen == 0) { /* non-ip */ snprintf(SNPARGS(proto, 0), "MAC"); } else { struct ip *ip = mtod(m, struct ip *); @@ -1249,36 +1290,39 @@ ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, case IPPROTO_TCP: len = snprintf(SNPARGS(proto, 0), "TCP %s", inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); - if (offset == 0) + if (offset == 0) { snprintf(SNPARGS(proto, len), ":%d %s:%d", ntohs(tcp->th_sport), inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)), ntohs(tcp->th_dport)); - else + } else { snprintf(SNPARGS(proto, len), " %s", inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str))); + } break; case IPPROTO_UDP: len = snprintf(SNPARGS(proto, 0), "UDP %s", - inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); - if (offset == 0) + inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); + if (offset == 0) { snprintf(SNPARGS(proto, len), ":%d %s:%d", ntohs(udp->uh_sport), inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str)), ntohs(udp->uh_dport)); - else + } else { snprintf(SNPARGS(proto, len), " %s", inet_ntop(AF_INET, &ip->ip_dst, ipv4str, sizeof(ipv4str))); + } break; case IPPROTO_ICMP: - if (offset == 0) + if (offset == 0) { len = snprintf(SNPARGS(proto, 0), "ICMP:%u.%u ", icmp->icmp_type, icmp->icmp_code); - else + } else { len = snprintf(SNPARGS(proto, 0), "ICMP "); + } len += snprintf(SNPARGS(proto, len), "%s", inet_ntop(AF_INET, &ip->ip_src, ipv4str, sizeof(ipv4str))); snprintf(SNPARGS(proto, len), " %s", @@ -1293,29 +1337,28 @@ ipfw_log(struct ip_fw *f, u_int hlen, struct ether_header *eh, break; } - if (ip_off & (IP_MF | IP_OFFMASK)) + if (ip_off & (IP_MF | IP_OFFMASK)) { snprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)", - ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2), - offset << 3, - (ip_off & IP_MF) ? "+" : ""); + ntohs(ip->ip_id), ip_len - (ip->ip_hl << 2), + offset << 3, + (ip_off & IP_MF) ? "+" : ""); + } } - if (oif || m->m_pkthdr.rcvif) - { + if (oif || m->m_pkthdr.rcvif) { dolog((LOG_AUTHPRIV | LOG_INFO, "ipfw: %d %s %s %s via %s%d%s\n", f ? f->rulenum : -1, action, proto, oif ? "out" : "in", oif ? oif->if_name : m->m_pkthdr.rcvif->if_name, oif ? oif->if_unit : m->m_pkthdr.rcvif->if_unit, - fragment)); - } - else{ + fragment)); + } else { dolog((LOG_AUTHPRIV | LOG_INFO, "ipfw: %d %s %s [no if info]%s\n", f ? f->rulenum : -1, action, proto, fragment)); } - if (limit_reached){ + if (limit_reached) { dolog((LOG_AUTHPRIV | LOG_NOTICE, "ipfw: limit %d reached on entry %d\n", limit_reached, f ? f->rulenum : -1)); @@ -1343,23 +1386,23 @@ hash_packet(struct ip_flow_id *id) * head is a pointer to the head of the queue. * Modifies q and potentially also head. */ -#define UNLINK_DYN_RULE(prev, head, q) { \ - ipfw_dyn_rule *old_q = q; \ - \ - /* remove a refcount to the parent */ \ - if (q->dyn_type == O_LIMIT) \ - q->parent->count--; \ +#define UNLINK_DYN_RULE(prev, head, q) { \ + ipfw_dyn_rule *old_q = q; \ + \ + /* remove a refcount to the parent */ \ + if (q->dyn_type == O_LIMIT) \ + q->parent->count--; \ DEB(printf("ipfw: unlink entry 0x%08x %d -> 0x%08x %d, %d left\n",\ - (q->id.src_ip), (q->id.src_port), \ - (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); ) \ - if (prev != NULL) \ - prev->next = q = q->next; \ - else \ - head = q = q->next; \ - dyn_count--; \ + (q->id.src_ip), (q->id.src_port), \ + (q->id.dst_ip), (q->id.dst_port), dyn_count-1 ); ) \ + if (prev != NULL) \ + prev->next = q = q->next; \ + else \ + head = q = q->next; \ + dyn_count--; \ _FREE(old_q, M_IPFW); } -#define TIME_LEQ(a,b) ((int)((a)-(b)) <= 0) +#define TIME_LEQ(a, b) ((int)((a)-(b)) <= 0) /** * Remove dynamic rules pointing to "rule", or all of them if rule == NULL. @@ -1386,11 +1429,13 @@ remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me) getmicrotime(&timenow); - if (ipfw_dyn_v == NULL || dyn_count == 0) + if (ipfw_dyn_v == NULL || dyn_count == 0) { return; + } /* do not expire more than once per second, it is useless */ - if (!FORCE && last_remove == timenow.tv_sec) + if (!FORCE && last_remove == timenow.tv_sec) { return; + } last_remove = timenow.tv_sec; /* @@ -1399,44 +1444,49 @@ remove_dyn_rule(struct ip_fw *rule, ipfw_dyn_rule *keep_me) * them in a second pass. */ next_pass: - for (i = 0 ; i < curr_dyn_buckets ; i++) { - for (prev=NULL, q = ipfw_dyn_v[i] ; q ; ) { + for (i = 0; i < curr_dyn_buckets; i++) { + for (prev = NULL, q = ipfw_dyn_v[i]; q;) { /* * Logic can become complex here, so we split tests. */ - if (q == keep_me) + if (q == keep_me) { goto next; - if (rule != NULL && rule != q->rule) + } + if (rule != NULL && rule != q->rule) { goto next; /* not the one we are looking for */ + } if (q->dyn_type == O_LIMIT_PARENT) { /* * handle parent in the second pass, * record we need one. */ max_pass = 1; - if (pass == 0) + if (pass == 0) { goto next; - if (FORCE && q->count != 0 ) { + } + if (FORCE && q->count != 0) { /* XXX should not happen! */ printf("ipfw: OUCH! cannot remove rule," - " count %d\n", q->count); + " count %d\n", q->count); } } else { if (!FORCE && - !TIME_LEQ( q->expire, timenow.tv_sec )) + !TIME_LEQ( q->expire, timenow.tv_sec )) { goto next; + } } if (q->dyn_type != O_LIMIT_PARENT || !q->count) { UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); continue; } next: - prev=q; - q=q->next; + prev = q; + q = q->next; } } - if (pass++ < max_pass) + if (pass++ < max_pass) { goto next_pass; + } } @@ -1445,59 +1495,61 @@ next: */ static ipfw_dyn_rule * lookup_dyn_rule(struct ip_flow_id *pkt, int *match_direction, - struct tcphdr *tcp) + struct tcphdr *tcp) { /* * stateful ipfw extensions. * Lookup into dynamic session queue */ -#define MATCH_REVERSE 0 -#define MATCH_FORWARD 1 -#define MATCH_NONE 2 -#define MATCH_UNKNOWN 3 +#define MATCH_REVERSE 0 +#define MATCH_FORWARD 1 +#define MATCH_NONE 2 +#define MATCH_UNKNOWN 3 #define BOTH_SYN (TH_SYN | (TH_SYN << 8)) #define BOTH_FIN (TH_FIN | (TH_FIN << 8)) int i, dir = MATCH_NONE; - ipfw_dyn_rule *prev, *q=NULL; + ipfw_dyn_rule *prev, *q = NULL; struct timeval timenow; getmicrotime(&timenow); - if (ipfw_dyn_v == NULL) - goto done; /* not found */ + if (ipfw_dyn_v == NULL) { + goto done; /* not found */ + } i = hash_packet( pkt ); - for (prev=NULL, q = ipfw_dyn_v[i] ; q != NULL ; ) { - if (q->dyn_type == O_LIMIT_PARENT && q->count) + for (prev = NULL, q = ipfw_dyn_v[i]; q != NULL;) { + if (q->dyn_type == O_LIMIT_PARENT && q->count) { goto next; + } if (TIME_LEQ( q->expire, timenow.tv_sec)) { /* expire entry */ - int dounlink = 1; + int dounlink = 1; /* check if entry is TCP */ - if ( q->id.proto == IPPROTO_TCP ) - { - /* do not delete an established TCP connection which hasn't been closed by both sides */ - if ( (q->state & (BOTH_SYN | BOTH_FIN)) != (BOTH_SYN | BOTH_FIN) ) - dounlink = 0; - } - if ( dounlink ){ - UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); - continue; - } + if (q->id.proto == IPPROTO_TCP) { + /* do not delete an established TCP connection which hasn't been closed by both sides */ + if ((q->state & (BOTH_SYN | BOTH_FIN)) != (BOTH_SYN | BOTH_FIN)) { + dounlink = 0; + } + } + if (dounlink) { + UNLINK_DYN_RULE(prev, ipfw_dyn_v[i], q); + continue; + } } if (pkt->proto == q->id.proto && q->dyn_type != O_LIMIT_PARENT) { if (pkt->src_ip == q->id.src_ip && pkt->dst_ip == q->id.dst_ip && pkt->src_port == q->id.src_port && - pkt->dst_port == q->id.dst_port ) { + pkt->dst_port == q->id.dst_port) { dir = MATCH_FORWARD; break; } if (pkt->src_ip == q->id.dst_ip && pkt->dst_ip == q->id.src_ip && pkt->src_port == q->id.dst_port && - pkt->dst_port == q->id.src_port ) { + pkt->dst_port == q->id.src_port) { dir = MATCH_REVERSE; break; } @@ -1506,49 +1558,50 @@ next: prev = q; q = q->next; } - if (q == NULL) + if (q == NULL) { goto done; /* q = NULL, not found */ - - if ( prev != NULL) { /* found and not in front */ + } + if (prev != NULL) { /* found and not in front */ prev->next = q->next; q->next = ipfw_dyn_v[i]; ipfw_dyn_v[i] = q; } if (pkt->proto == IPPROTO_TCP) { /* update state according to flags */ - u_char flags = pkt->flags & (TH_FIN|TH_SYN|TH_RST); + u_char flags = pkt->flags & (TH_FIN | TH_SYN | TH_RST); - q->state |= (dir == MATCH_FORWARD ) ? flags : (flags << 8); + q->state |= (dir == MATCH_FORWARD) ? flags : (flags << 8); switch (q->state) { - case TH_SYN: /* opening */ + case TH_SYN: /* opening */ q->expire = timenow.tv_sec + dyn_syn_lifetime; break; - case BOTH_SYN: /* move to established */ - case BOTH_SYN | TH_FIN : /* one side tries to close */ - case BOTH_SYN | (TH_FIN << 8) : - if (tcp) { -#define _SEQ_GE(a,b) ((int)(a) - (int)(b) >= 0) - u_int32_t ack = ntohl(tcp->th_ack); - if (dir == MATCH_FORWARD) { - if (q->ack_fwd == 0 || _SEQ_GE(ack, q->ack_fwd)) - q->ack_fwd = ack; - else { /* ignore out-of-sequence */ - break; - } - } else { - if (q->ack_rev == 0 || _SEQ_GE(ack, q->ack_rev)) - q->ack_rev = ack; - else { /* ignore out-of-sequence */ - break; + case BOTH_SYN: /* move to established */ + case BOTH_SYN | TH_FIN: /* one side tries to close */ + case BOTH_SYN | (TH_FIN << 8): + if (tcp) { +#define _SEQ_GE(a, b) ((int)(a) - (int)(b) >= 0) + u_int32_t ack = ntohl(tcp->th_ack); + if (dir == MATCH_FORWARD) { + if (q->ack_fwd == 0 || _SEQ_GE(ack, q->ack_fwd)) { + q->ack_fwd = ack; + } else { /* ignore out-of-sequence */ + break; + } + } else { + if (q->ack_rev == 0 || _SEQ_GE(ack, q->ack_rev)) { + q->ack_rev = ack; + } else { /* ignore out-of-sequence */ + break; + } } - } } q->expire = timenow.tv_sec + dyn_ack_lifetime; break; - case BOTH_SYN | BOTH_FIN: /* both sides closed */ - if (dyn_fin_lifetime >= dyn_keepalive_period) + case BOTH_SYN | BOTH_FIN: /* both sides closed */ + if (dyn_fin_lifetime >= dyn_keepalive_period) { dyn_fin_lifetime = dyn_keepalive_period - 1; + } q->expire = timenow.tv_sec + dyn_fin_lifetime; break; @@ -1558,11 +1611,13 @@ next: * reset or some invalid combination, but can also * occur if we use keep-state the wrong way. */ - if ( (q->state & ((TH_RST << 8)|TH_RST)) == 0) + if ((q->state & ((TH_RST << 8) | TH_RST)) == 0) { printf("invalid state: 0x%x\n", q->state); + } #endif - if (dyn_rst_lifetime >= dyn_keepalive_period) + if (dyn_rst_lifetime >= dyn_keepalive_period) { dyn_rst_lifetime = dyn_keepalive_period - 1; + } q->expire = timenow.tv_sec + dyn_rst_lifetime; break; } @@ -1573,8 +1628,9 @@ next: q->expire = timenow.tv_sec + dyn_short_lifetime; } done: - if (match_direction) + if (match_direction) { *match_direction = dir; + } return q; } @@ -1587,20 +1643,23 @@ realloc_dynamic_table(void) * default to 1024. */ - if (dyn_buckets > 65536) + if (dyn_buckets > 65536) { dyn_buckets = 1024; - if ((dyn_buckets & (dyn_buckets-1)) != 0) { /* not a power of 2 */ + } + if ((dyn_buckets & (dyn_buckets - 1)) != 0) { /* not a power of 2 */ dyn_buckets = curr_dyn_buckets; /* reset */ return; } curr_dyn_buckets = dyn_buckets; - if (ipfw_dyn_v != NULL) + if (ipfw_dyn_v != NULL) { _FREE(ipfw_dyn_v, M_IPFW); + } for (;;) { ipfw_dyn_v = _MALLOC(curr_dyn_buckets * sizeof(ipfw_dyn_rule *), - M_IPFW, M_NOWAIT | M_ZERO); - if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2) + M_IPFW, M_NOWAIT | M_ZERO); + if (ipfw_dyn_v != NULL || curr_dyn_buckets <= 2) { break; + } curr_dyn_buckets /= 2; } } @@ -1627,15 +1686,16 @@ add_dyn_rule(struct ip_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule) if (ipfw_dyn_v == NULL || (dyn_count == 0 && dyn_buckets != curr_dyn_buckets)) { realloc_dynamic_table(); - if (ipfw_dyn_v == NULL) + if (ipfw_dyn_v == NULL) { return NULL; /* failed ! */ + } } i = hash_packet(id); r = _MALLOC(sizeof *r, M_IPFW, M_NOWAIT | M_ZERO); if (r == NULL) { #if IPFW_DEBUG - printf ("ipfw: sorry cannot allocate state\n"); + printf("ipfw: sorry cannot allocate state\n"); #endif return NULL; } @@ -1643,8 +1703,9 @@ add_dyn_rule(struct ip_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule) /* increase refcount on parent, and set pointer */ if (dyn_type == O_LIMIT) { ipfw_dyn_rule *parent = (ipfw_dyn_rule *)rule; - if ( parent->dyn_type != O_LIMIT_PARENT) + if (parent->dyn_type != O_LIMIT_PARENT) { panic("invalid parent"); + } parent->count++; r->parent = parent; rule = parent->rule; @@ -1662,10 +1723,10 @@ add_dyn_rule(struct ip_flow_id *id, u_int8_t dyn_type, struct ip_fw *rule) ipfw_dyn_v[i] = r; dyn_count++; DEB(printf("ipfw: add dyn entry ty %d 0x%08x %d -> 0x%08x %d, total %d\n", - dyn_type, - (r->id.src_ip), (r->id.src_port), - (r->id.dst_ip), (r->id.dst_port), - dyn_count ); ) + dyn_type, + (r->id.src_ip), (r->id.src_port), + (r->id.dst_ip), (r->id.dst_port), + dyn_count ); ) return r; } @@ -1684,9 +1745,9 @@ lookup_dyn_parent(struct ip_flow_id *pkt, struct ip_fw *rule) if (ipfw_dyn_v) { i = hash_packet( pkt ); - for (q = ipfw_dyn_v[i] ; q != NULL ; q=q->next) + for (q = ipfw_dyn_v[i]; q != NULL; q = q->next) { if (q->dyn_type == O_LIMIT_PARENT && - rule== q->rule && + rule == q->rule && pkt->proto == q->id.proto && pkt->src_ip == q->id.src_ip && pkt->dst_ip == q->id.dst_ip && @@ -1694,9 +1755,10 @@ lookup_dyn_parent(struct ip_flow_id *pkt, struct ip_fw *rule) pkt->dst_port == q->id.dst_port) { q->expire = timenow.tv_sec + dyn_short_lifetime; DEB(printf("ipfw: lookup_dyn_parent found " - "0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(q));) + "0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(q)); ) return q; } + } } return add_dyn_rule(pkt, O_LIMIT_PARENT, rule); } @@ -1709,7 +1771,7 @@ lookup_dyn_parent(struct ip_flow_id *pkt, struct ip_fw *rule) */ static int install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, - struct ip_fw_args *args) + struct ip_fw_args *args) { static int last_log; struct timeval timenow; @@ -1720,7 +1782,7 @@ install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, DEB(printf("ipfw: install state type %d 0x%08x %u -> 0x%08x %u\n", cmd->o.opcode, (args->fwa_id.src_ip), (args->fwa_id.src_port), - (args->fwa_id.dst_ip), (args->fwa_id.dst_port) );) + (args->fwa_id.dst_ip), (args->fwa_id.dst_port)); ) q = lookup_dyn_rule(&args->fwa_id, NULL, NULL); @@ -1732,11 +1794,12 @@ install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, return 0; } - if (dyn_count >= dyn_max) + if (dyn_count >= dyn_max) { /* * Run out of slots, try to remove any expired rule. */ remove_dyn_rule(NULL, (ipfw_dyn_rule *)1); + } if (dyn_count >= dyn_max) { if (last_log != timenow.tv_sec) { @@ -1752,26 +1815,30 @@ install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, break; case O_LIMIT: /* limit number of sessions */ - { + { u_int16_t limit_mask = cmd->limit_mask; struct ip_flow_id id; ipfw_dyn_rule *parent; DEB(printf("ipfw: installing dyn-limit rule %d\n", - cmd->conn_limit);) + cmd->conn_limit); ) id.dst_ip = id.src_ip = 0; id.dst_port = id.src_port = 0; id.proto = args->fwa_id.proto; - if (limit_mask & DYN_SRC_ADDR) + if (limit_mask & DYN_SRC_ADDR) { id.src_ip = args->fwa_id.src_ip; - if (limit_mask & DYN_DST_ADDR) + } + if (limit_mask & DYN_DST_ADDR) { id.dst_ip = args->fwa_id.dst_ip; - if (limit_mask & DYN_SRC_PORT) + } + if (limit_mask & DYN_SRC_PORT) { id.src_port = args->fwa_id.src_port; - if (limit_mask & DYN_DST_PORT) + } + if (limit_mask & DYN_DST_PORT) { id.dst_port = args->fwa_id.dst_port; + } parent = lookup_dyn_parent(&id, rule); if (parent == NULL) { printf("ipfw: add parent failed\n"); @@ -1792,8 +1859,8 @@ install_state(struct ip_fw *rule, ipfw_insn_limit *cmd, } } add_dyn_rule(&args->fwa_id, O_LIMIT, (struct ip_fw *)parent); - } - break; + } + break; default: printf("ipfw: unknown dynamic rule type %u\n", cmd->o.opcode); return 1; @@ -1815,9 +1882,10 @@ send_pkt(struct ip_flow_id *id, u_int32_t seq, u_int32_t ack, int flags) struct ip *ip; struct tcphdr *tcp; - MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (m == 0) + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + if (m == 0) { return NULL; + } m->m_pkthdr.rcvif = (struct ifnet *)0; m->m_pkthdr.len = m->m_len = sizeof(struct ip) + sizeof(struct tcphdr); m->m_data += max_linkhdr; @@ -1835,14 +1903,15 @@ send_pkt(struct ip_flow_id *id, u_int32_t seq, u_int32_t ack, int flags) ip->ip_dst.s_addr = htonl(id->src_ip); tcp->th_sport = htons(id->dst_port); tcp->th_dport = htons(id->src_port); - if (flags & TH_RST) { /* we are sending a RST */ + if (flags & TH_RST) { /* we are sending a RST */ if (flags & TH_ACK) { tcp->th_seq = htonl(ack); tcp->th_ack = htonl(0); tcp->th_flags = TH_RST; } else { - if (flags & TH_SYN) + if (flags & TH_SYN) { seq++; + } tcp->th_seq = htonl(0); tcp->th_ack = htonl(seq); tcp->th_flags = TH_RST | TH_ACK; @@ -1880,7 +1949,7 @@ send_pkt(struct ip_flow_id *id, u_int32_t seq, u_int32_t ack, int flags) ip->ip_ttl = ip_defttl; ip->ip_len = m->m_pkthdr.len; m->m_flags |= M_SKIP_FIREWALL; - + return m; } @@ -1890,7 +1959,6 @@ send_pkt(struct ip_flow_id *id, u_int32_t seq, u_int32_t ack, int flags) static void send_reject(struct ip_fw_args *args, int code, int offset, __unused int ip_len) { - if (code != ICMP_REJECT_RST) { /* Send an ICMP unreach */ /* We need the IP header in host order for icmp_error(). */ if (args->fwa_eh != NULL) { @@ -1903,23 +1971,24 @@ send_reject(struct ip_fw_args *args, int code, int offset, __unused int ip_len) } else if (offset == 0 && args->fwa_id.proto == IPPROTO_TCP) { struct tcphdr *const tcp = L3HDR(struct tcphdr, mtod(args->fwa_m, struct ip *)); - if ( (tcp->th_flags & TH_RST) == 0) { + if ((tcp->th_flags & TH_RST) == 0) { struct mbuf *m; m = send_pkt(&(args->fwa_id), ntohl(tcp->th_seq), - ntohl(tcp->th_ack), - tcp->th_flags | TH_RST); + ntohl(tcp->th_ack), + tcp->th_flags | TH_RST); if (m != NULL) { - struct route sro; /* fake route */ + struct route sro; /* fake route */ - bzero (&sro, sizeof (sro)); + bzero(&sro, sizeof(sro)); ip_output(m, NULL, &sro, 0, NULL, NULL); ROUTE_RELEASE(&sro); } } m_freem(args->fwa_m); - } else + } else { m_freem(args->fwa_m); + } args->fwa_m = NULL; } @@ -1946,14 +2015,19 @@ lookup_next_rule(struct ip_fw *me) /* look for action, in case it is a skipto */ cmd = ACTION_PTR(me); - if (cmd->opcode == O_LOG) + if (cmd->opcode == O_LOG) { cmd += F_LEN(cmd); - if ( cmd->opcode == O_SKIPTO ) - for (rule = me->next; rule ; rule = rule->next) - if (rule->rulenum >= cmd->arg1) + } + if (cmd->opcode == O_SKIPTO) { + for (rule = me->next; rule; rule = rule->next) { + if (rule->rulenum >= cmd->arg1) { break; - if (rule == NULL) /* failure or not a skipto */ + } + } + } + if (rule == NULL) { /* failure or not a skipto */ rule = me->next; + } me->next_rule = rule; return rule; } @@ -1983,7 +2057,7 @@ lookup_next_rule(struct ip_fw *me) * * IP_FW_PORT_DENY_FLAG the packet must be dropped. * 0 The packet is to be accepted and routed normally OR - * the packet was denied/rejected and has been dropped; + * the packet was denied/rejected and has been dropped; * in the latter case, *m is equal to NULL upon return. * port Divert the packet to port, with these caveats: * @@ -2029,14 +2103,14 @@ ipfw_chk(struct ip_fw_args *args) */ struct ifnet *oif = args->fwa_oif; - struct ip_fw *f = NULL; /* matching rule */ + struct ip_fw *f = NULL; /* matching rule */ int retval = 0; /* * hlen The length of the IPv4 header. * hlen >0 means we have an IPv4 packet. */ - u_int hlen = 0; /* hlen >0 means we have an IP pkt */ + u_int hlen = 0; /* hlen >0 means we have an IP pkt */ /* * offset The offset of a fragment. offset != 0 means that @@ -2061,47 +2135,48 @@ ipfw_chk(struct ip_fw_args *args) * Only valid for IPv4 packets. */ u_int8_t proto; - u_int16_t src_port = 0, dst_port = 0; /* NOTE: host format */ - struct in_addr src_ip = { 0 } , dst_ip = { 0 }; /* NOTE: network format */ - u_int16_t ip_len=0; + u_int16_t src_port = 0, dst_port = 0; /* NOTE: host format */ + struct in_addr src_ip = { 0 }, dst_ip = { 0 }; /* NOTE: network format */ + u_int16_t ip_len = 0; int pktlen; int dyn_dir = MATCH_UNKNOWN; ipfw_dyn_rule *q = NULL; struct timeval timenow; if (m->m_flags & M_SKIP_FIREWALL || fw_bypass) { - return 0; /* accept */ + return 0; /* accept */ } - /* + /* * Clear packet chain if we find one here. */ - + if (m->m_nextpkt != NULL) { m_freem_list(m->m_nextpkt); m->m_nextpkt = NULL; } - + lck_mtx_lock(ipfw_mutex); getmicrotime(&timenow); /* * dyn_dir = MATCH_UNKNOWN when rules unchecked, - * MATCH_NONE when checked and not matched (q = NULL), + * MATCH_NONE when checked and not matched (q = NULL), * MATCH_FORWARD or MATCH_REVERSE otherwise (q != NULL) */ pktlen = m->m_pkthdr.len; - if (args->fwa_eh == NULL || /* layer 3 packet */ - ( m->m_pkthdr.len >= sizeof(struct ip) && - ntohs(args->fwa_eh->ether_type) == ETHERTYPE_IP)) - hlen = ip->ip_hl << 2; + if (args->fwa_eh == NULL || /* layer 3 packet */ + (m->m_pkthdr.len >= sizeof(struct ip) && + ntohs(args->fwa_eh->ether_type) == ETHERTYPE_IP)) { + hlen = ip->ip_hl << 2; + } /* * Collect parameters into local variables for faster matching. */ - if (hlen == 0) { /* do not grab addresses for non-ip pkts */ - proto = args->fwa_id.proto = 0; /* mark f_id invalid */ + if (hlen == 0) { /* do not grab addresses for non-ip pkts */ + proto = args->fwa_id.proto = 0; /* mark f_id invalid */ goto after_ip_checks; } @@ -2117,20 +2192,20 @@ ipfw_chk(struct ip_fw_args *args) } pktlen = ip_len < pktlen ? ip_len : pktlen; -#define PULLUP_TO(len) \ - do { \ - if ((m)->m_len < (len)) { \ - args->fwa_m = m = m_pullup(m, (len)); \ - if (m == 0) \ - goto pullup_failed; \ - ip = mtod(m, struct ip *); \ - } \ - } while (0) +#define PULLUP_TO(len) \ + do { \ + if ((m)->m_len < (len)) { \ + args->fwa_m = m = m_pullup(m, (len)); \ + if (m == 0) \ + goto pullup_failed; \ + ip = mtod(m, struct ip *); \ + } \ + } while (0) if (offset == 0) { switch (proto) { case IPPROTO_TCP: - { + { struct tcphdr *tcp; PULLUP_TO(hlen + sizeof(struct tcphdr)); @@ -2138,22 +2213,22 @@ ipfw_chk(struct ip_fw_args *args) dst_port = tcp->th_dport; src_port = tcp->th_sport; args->fwa_id.flags = tcp->th_flags; - } - break; + } + break; case IPPROTO_UDP: - { + { struct udphdr *udp; PULLUP_TO(hlen + sizeof(struct udphdr)); udp = L3HDR(struct udphdr, ip); dst_port = udp->uh_dport; src_port = udp->uh_sport; - } - break; + } + break; case IPPROTO_ICMP: - PULLUP_TO(hlen + 4); /* type, code and checksum. */ + PULLUP_TO(hlen + 4); /* type, code and checksum. */ args->fwa_id.flags = L3HDR(struct icmp, ip)->icmp_type; break; @@ -2184,8 +2259,9 @@ after_ip_checks: } f = args->fwa_ipfw_rule->next_rule; - if (f == NULL) + if (f == NULL) { f = lookup_next_rule(args->fwa_ipfw_rule); + } } else { /* * Find the starting rule. It can be either the first @@ -2197,17 +2273,18 @@ after_ip_checks: if (args->fwa_eh == NULL && skipto != 0) { if (skipto >= IPFW_DEFAULT_RULE) { lck_mtx_unlock(ipfw_mutex); - return(IP_FW_PORT_DENY_FLAG); /* invalid */ + return IP_FW_PORT_DENY_FLAG; /* invalid */ } - while (f && f->rulenum <= skipto) + while (f && f->rulenum <= skipto) { f = f->next; - if (f == NULL) { /* drop packet */ + } + if (f == NULL) { /* drop packet */ lck_mtx_unlock(ipfw_mutex); - return(IP_FW_PORT_DENY_FLAG); + return IP_FW_PORT_DENY_FLAG; } } } - args->fwa_divert_rule = 0; /* reset to avoid confusion later */ + args->fwa_divert_rule = 0; /* reset to avoid confusion later */ /* * Now scan the rules, and parse microinstructions for each rule. @@ -2221,12 +2298,13 @@ again: if (f->reserved_1 == IPFW_RULE_INACTIVE) { continue; } - - if (set_disable & (1 << f->set) ) + + if (set_disable & (1 << f->set)) { continue; + } skip_or = 0; - for (l = f->cmd_len, cmd = f->cmd ; l > 0 ; + for (l = f->cmd_len, cmd = f->cmd; l > 0; l -= cmdlen, cmd += cmdlen) { int match; @@ -2245,9 +2323,10 @@ check_body: * the following instructions to be skipped until * past the one with the F_OR bit clear. */ - if (skip_or) { /* skip this instruction */ - if ((cmd->len & F_OR) == 0) - skip_or = 0; /* next one is good */ + if (skip_or) { /* skip this instruction */ + if ((cmd->len & F_OR) == 0) { + skip_or = 0; /* next one is good */ + } continue; } match = 0; /* set to 1 if we succeed */ @@ -2278,59 +2357,62 @@ check_body: * as this ensures that we have an IPv4 * packet with the ports info. */ - if (offset!=0) - break; - - { - struct inpcbinfo *pi; - int wildcard; - struct inpcb *pcb; - - if (proto == IPPROTO_TCP) { - wildcard = 0; - pi = &tcbinfo; - } else if (proto == IPPROTO_UDP) { - wildcard = 1; - pi = &udbinfo; - } else + if (offset != 0) { break; + } + + { + struct inpcbinfo *pi; + int wildcard; + struct inpcb *pcb; + + if (proto == IPPROTO_TCP) { + wildcard = 0; + pi = &tcbinfo; + } else if (proto == IPPROTO_UDP) { + wildcard = 1; + pi = &udbinfo; + } else { + break; + } - pcb = (oif) ? - in_pcblookup_hash(pi, + pcb = (oif) ? + in_pcblookup_hash(pi, dst_ip, htons(dst_port), src_ip, htons(src_port), wildcard, oif) : - in_pcblookup_hash(pi, + in_pcblookup_hash(pi, src_ip, htons(src_port), dst_ip, htons(dst_port), wildcard, NULL); - if (pcb == NULL || pcb->inp_socket == NULL) - break; + if (pcb == NULL || pcb->inp_socket == NULL) { + break; + } #if __FreeBSD_version < 500034 -#define socheckuid(a,b) (kauth_cred_getuid((a)->so_cred) != (b)) +#define socheckuid(a, b) (kauth_cred_getuid((a)->so_cred) != (b)) #endif - if (cmd->opcode == O_UID) { - match = + if (cmd->opcode == O_UID) { + match = #ifdef __APPLE__ - (kauth_cred_getuid(pcb->inp_socket->so_cred) == (uid_t)((ipfw_insn_u32 *)cmd)->d[0]); + (kauth_cred_getuid(pcb->inp_socket->so_cred) == (uid_t)((ipfw_insn_u32 *)cmd)->d[0]); #else - !socheckuid(pcb->inp_socket, - (uid_t)((ipfw_insn_u32 *)cmd)->d[0]); + !socheckuid(pcb->inp_socket, + (uid_t)((ipfw_insn_u32 *)cmd)->d[0]); #endif - } + } #ifndef __APPLE__ - else { - match = 0; - kauth_cred_ismember_gid(pcb->inp_socket->so_cred, - (gid_t)((ipfw_insn_u32 *)cmd)->d[0], &match); - } + else { + match = 0; + kauth_cred_ismember_gid(pcb->inp_socket->so_cred, + (gid_t)((ipfw_insn_u32 *)cmd)->d[0], &match); + } #endif - /* release reference on pcb */ - in_pcb_checkstate(pcb, WNT_RELEASE, 0); + /* release reference on pcb */ + in_pcb_checkstate(pcb, WNT_RELEASE, 0); } - break; + break; case O_RECV: match = iface_match(m->m_pkthdr.rcvif, @@ -2347,17 +2429,17 @@ check_body: break; case O_MACADDR2: - if (args->fwa_eh != NULL) { /* have MAC header */ + if (args->fwa_eh != NULL) { /* have MAC header */ u_int32_t *want = (u_int32_t *) - ((ipfw_insn_mac *)cmd)->addr; + ((ipfw_insn_mac *)cmd)->addr; u_int32_t *mask = (u_int32_t *) - ((ipfw_insn_mac *)cmd)->mask; + ((ipfw_insn_mac *)cmd)->mask; u_int32_t *hdr = (u_int32_t *)args->fwa_eh; match = - ( want[0] == (hdr[0] & mask[0]) && - want[1] == (hdr[1] & mask[1]) && - want[2] == (hdr[2] & mask[2]) ); + (want[0] == (hdr[0] & mask[0]) && + want[1] == (hdr[1] & mask[1]) && + want[2] == (hdr[2] & mask[2])); } break; @@ -2369,9 +2451,10 @@ check_body: ((ipfw_insn_u16 *)cmd)->ports; int i; - for (i = cmdlen - 1; !match && i>0; - i--, p += 2) - match = (t>=p[0] && t<=p[1]); + for (i = cmdlen - 1; !match && i > 0; + i--, p += 2) { + match = (t >= p[0] && t <= p[1]); + } } break; @@ -2379,7 +2462,7 @@ check_body: match = (hlen > 0 && offset != 0); break; - case O_IN: /* "out" is "not in" */ + case O_IN: /* "out" is "not in" */ match = (oif == NULL); break; @@ -2404,14 +2487,15 @@ check_body: case O_IP_SRC_MASK: case O_IP_DST_MASK: if (hlen > 0) { - uint32_t a = - (cmd->opcode == O_IP_DST_MASK) ? + uint32_t a = + (cmd->opcode == O_IP_DST_MASK) ? dst_ip.s_addr : src_ip.s_addr; - uint32_t *p = ((ipfw_insn_u32 *)cmd)->d; - int i = cmdlen-1; + uint32_t *p = ((ipfw_insn_u32 *)cmd)->d; + int i = cmdlen - 1; - for (; !match && i>0; i-= 2, p+= 2) - match = (p[0] == (a & p[1])); + for (; !match && i > 0; i -= 2, p += 2) { + match = (p[0] == (a & p[1])); + } } break; @@ -2427,18 +2511,19 @@ check_body: case O_IP_DST_SET: case O_IP_SRC_SET: if (hlen > 0) { - u_int32_t *d = (u_int32_t *)(cmd+1); + u_int32_t *d = (u_int32_t *)(cmd + 1); u_int32_t addr = cmd->opcode == O_IP_DST_SET ? - args->fwa_id.dst_ip : - args->fwa_id.src_ip; - - if (addr < d[0]) - break; - addr -= d[0]; /* subtract base */ - match = (addr < cmd->arg1) && - ( d[ 1 + (addr>>5)] & - (1<<(addr & 0x1f)) ); + args->fwa_id.dst_ip : + args->fwa_id.src_ip; + + if (addr < d[0]) { + break; + } + addr -= d[0]; /* subtract base */ + match = (addr < cmd->arg1) && + (d[1 + (addr >> 5)] & + (1 << (addr & 0x1f))); } break; @@ -2464,28 +2549,29 @@ check_body: * to guarantee that we have an IPv4 * packet with port info. */ - if ((proto==IPPROTO_UDP || proto==IPPROTO_TCP) + if ((proto == IPPROTO_UDP || proto == IPPROTO_TCP) && offset == 0) { u_int16_t x = (cmd->opcode == O_IP_SRCPORT) ? - src_port : dst_port ; + src_port : dst_port; u_int16_t *p = ((ipfw_insn_u16 *)cmd)->ports; int i; - for (i = cmdlen - 1; !match && i>0; - i--, p += 2) - match = (x>=p[0] && x<=p[1]); + for (i = cmdlen - 1; !match && i > 0; + i--, p += 2) { + match = (x >= p[0] && x <= p[1]); + } } break; case O_ICMPTYPE: - match = (offset == 0 && proto==IPPROTO_ICMP && - icmptype_match(ip, (ipfw_insn_u32 *)cmd) ); + match = (offset == 0 && proto == IPPROTO_ICMP && + icmptype_match(ip, (ipfw_insn_u32 *)cmd)); break; case O_IPOPT: - match = (hlen > 0 && ipopts_match(ip, cmd) ); + match = (hlen > 0 && ipopts_match(ip, cmd)); break; case O_IPVER: @@ -2495,32 +2581,34 @@ check_body: case O_IPID: case O_IPLEN: case O_IPTTL: - if (hlen > 0) { /* only for IP packets */ - uint16_t x; - uint16_t *p; - int i; - - if (cmd->opcode == O_IPLEN) - x = ip_len; - else if (cmd->opcode == O_IPTTL) - x = ip->ip_ttl; - else /* must be IPID */ - x = ntohs(ip->ip_id); - if (cmdlen == 1) { - match = (cmd->arg1 == x); - break; - } - /* otherwise we have ranges */ - p = ((ipfw_insn_u16 *)cmd)->ports; - i = cmdlen - 1; - for (; !match && i>0; i--, p += 2) - match = (x >= p[0] && x <= p[1]); + if (hlen > 0) { /* only for IP packets */ + uint16_t x; + uint16_t *p; + int i; + + if (cmd->opcode == O_IPLEN) { + x = ip_len; + } else if (cmd->opcode == O_IPTTL) { + x = ip->ip_ttl; + } else { /* must be IPID */ + x = ntohs(ip->ip_id); + } + if (cmdlen == 1) { + match = (cmd->arg1 == x); + break; + } + /* otherwise we have ranges */ + p = ((ipfw_insn_u16 *)cmd)->ports; + i = cmdlen - 1; + for (; !match && i > 0; i--, p += 2) { + match = (x >= p[0] && x <= p[1]); + } } break; case O_IPPRECEDENCE: match = (hlen > 0 && - (cmd->arg1 == (ip->ip_tos & 0xe0)) ); + (cmd->arg1 == (ip->ip_tos & 0xe0))); break; case O_IPTOS: @@ -2531,7 +2619,7 @@ check_body: case O_TCPFLAGS: match = (proto == IPPROTO_TCP && offset == 0 && flags_match(cmd, - L3HDR(struct tcphdr,ip)->th_flags)); + L3HDR(struct tcphdr, ip)->th_flags)); break; case O_TCPOPTS: @@ -2542,37 +2630,38 @@ check_body: case O_TCPSEQ: match = (proto == IPPROTO_TCP && offset == 0 && ((ipfw_insn_u32 *)cmd)->d[0] == - L3HDR(struct tcphdr,ip)->th_seq); + L3HDR(struct tcphdr, ip)->th_seq); break; case O_TCPACK: match = (proto == IPPROTO_TCP && offset == 0 && ((ipfw_insn_u32 *)cmd)->d[0] == - L3HDR(struct tcphdr,ip)->th_ack); + L3HDR(struct tcphdr, ip)->th_ack); break; case O_TCPWIN: match = (proto == IPPROTO_TCP && offset == 0 && cmd->arg1 == - L3HDR(struct tcphdr,ip)->th_win); + L3HDR(struct tcphdr, ip)->th_win); break; case O_ESTAB: /* reject packets which have SYN only */ /* XXX should i also check for TH_ACK ? */ match = (proto == IPPROTO_TCP && offset == 0 && - (L3HDR(struct tcphdr,ip)->th_flags & - (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); + (L3HDR(struct tcphdr, ip)->th_flags & + (TH_RST | TH_ACK | TH_SYN)) != TH_SYN); break; case O_LOG: - if (fw_verbose) + if (fw_verbose) { ipfw_log(f, hlen, args->fwa_eh, m, oif); + } match = 1; break; case O_PROB: - match = (random()<((ipfw_insn_u32 *)cmd)->d[0]); + match = (random() < ((ipfw_insn_u32 *)cmd)->d[0]); break; case O_VERREVPATH: @@ -2654,9 +2743,9 @@ check_body: */ if (dyn_dir == MATCH_UNKNOWN && (q = lookup_dyn_rule(&args->fwa_id, - &dyn_dir, proto == IPPROTO_TCP ? - L3HDR(struct tcphdr, ip) : NULL)) - != NULL) { + &dyn_dir, proto == IPPROTO_TCP ? + L3HDR(struct tcphdr, ip) : NULL)) + != NULL) { /* * Found dynamic entry, update stats * and jump to the 'action' part of @@ -2674,13 +2763,14 @@ check_body: * skip to next rule, if PROBE_STATE just * ignore and continue with next opcode. */ - if (cmd->opcode == O_CHECK_STATE) + if (cmd->opcode == O_CHECK_STATE) { goto next_rule; + } match = 1; break; case O_ACCEPT: - retval = 0; /* accept */ + retval = 0; /* accept */ goto done; case O_PIPE: @@ -2691,8 +2781,9 @@ check_body: case O_DIVERT: case O_TEE: - if (args->fwa_eh) /* not on layer 2 */ + if (args->fwa_eh) { /* not on layer 2 */ break; + } args->fwa_divert_rule = f->rulenum; retval = (cmd->opcode == O_DIVERT) ? cmd->arg1 : @@ -2701,14 +2792,16 @@ check_body: case O_COUNT: case O_SKIPTO: - f->pcnt++; /* update stats */ + f->pcnt++; /* update stats */ f->bcnt += pktlen; f->timestamp = timenow.tv_sec; - if (cmd->opcode == O_COUNT) + if (cmd->opcode == O_COUNT) { goto next_rule; + } /* handle skipto */ - if (f->next_rule == NULL) + if (f->next_rule == NULL) { lookup_next_rule(f); + } f = f->next_rule; goto again; @@ -2720,24 +2813,26 @@ check_body: */ if (hlen > 0 && offset == 0 && (proto != IPPROTO_ICMP || - is_icmp_query(ip)) && - !(m->m_flags & (M_BCAST|M_MCAST)) && + is_icmp_query(ip)) && + !(m->m_flags & (M_BCAST | M_MCAST)) && !IN_MULTICAST(dst_ip.s_addr)) { send_reject(args, cmd->arg1, - offset,ip_len); + offset, ip_len); m = args->fwa_m; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case O_DENY: retval = IP_FW_PORT_DENY_FLAG; goto done; case O_FORWARD_IP: - if (args->fwa_eh) /* not valid on layer2 pkts */ + if (args->fwa_eh) { /* not valid on layer2 pkts */ break; - if (!q || dyn_dir == MATCH_FORWARD) + } + if (!q || dyn_dir == MATCH_FORWARD) { args->fwa_next_hop = &((ipfw_insn_sa *)cmd)->sa; + } retval = 0; goto done; @@ -2745,25 +2840,26 @@ check_body: panic("-- unknown opcode %d\n", cmd->opcode); } /* end of switch() on opcodes */ - if (cmd->len & F_NOT) + if (cmd->len & F_NOT) { match = !match; + } if (match) { - if (cmd->len & F_OR) + if (cmd->len & F_OR) { skip_or = 1; + } } else { - if (!(cmd->len & F_OR)) /* not an OR block, */ - break; /* try next rule */ + if (!(cmd->len & F_OR)) { /* not an OR block, */ + break; /* try next rule */ + } } + } /* end of inner for, scan opcodes */ - } /* end of inner for, scan opcodes */ - -next_rule:; /* try next rule */ - - } /* end of outer for, scan rules */ +next_rule: ; /* try next rule */ + } /* end of outer for, scan rules */ printf("ipfw: ouch!, skip past end of rules, denying packet\n"); lck_mtx_unlock(ipfw_mutex); - return(IP_FW_PORT_DENY_FLAG); + return IP_FW_PORT_DENY_FLAG; done: /* Update statistics */ @@ -2774,10 +2870,11 @@ done: return retval; pullup_failed: - if (fw_verbose) + if (fw_verbose) { printf("ipfw: pullup failed\n"); + } lck_mtx_unlock(ipfw_mutex); - return(IP_FW_PORT_DENY_FLAG); + return IP_FW_PORT_DENY_FLAG; } /* @@ -2790,8 +2887,9 @@ flush_rule_ptrs(void) { struct ip_fw *rule; - for (rule = layer3_chain; rule; rule = rule->next) + for (rule = layer3_chain; rule; rule = rule->next) { rule->next_rule = NULL; + } } /* @@ -2807,8 +2905,9 @@ flush_pipe_ptrs(struct dn_flow_set *match) for (rule = layer3_chain; rule; rule = rule->next) { ipfw_insn_pipe *cmd = (ipfw_insn_pipe *)ACTION_PTR(rule); - if (cmd->o.opcode != O_PIPE && cmd->o.opcode != O_QUEUE) + if (cmd->o.opcode != O_PIPE && cmd->o.opcode != O_QUEUE) { continue; + } /* * XXX Use bcmp/bzero to handle pipe_ptr to overcome * possible alignment problems on 64-bit architectures. @@ -2816,8 +2915,9 @@ flush_pipe_ptrs(struct dn_flow_set *match) * much about efficiency. */ if (match == NULL || - !bcmp(&cmd->pipe_ptr, &match, sizeof(match)) ) + !bcmp(&cmd->pipe_ptr, &match, sizeof(match))) { bzero(&cmd->pipe_ptr, sizeof(cmd->pipe_ptr)); + } } } @@ -2832,15 +2932,16 @@ add_rule(struct ip_fw **head, struct ip_fw *input_rule) struct ip_fw *rule, *f, *prev; int l = RULESIZE(input_rule); - if (*head == NULL && input_rule->rulenum != IPFW_DEFAULT_RULE) - return (EINVAL); + if (*head == NULL && input_rule->rulenum != IPFW_DEFAULT_RULE) { + return EINVAL; + } rule = _MALLOC(l, M_IPFW, M_WAIT | M_ZERO); if (rule == NULL) { printf("ipfw2: add_rule MALLOC failed\n"); - return (ENOSPC); + return ENOSPC; } - + bcopy(input_rule, rule, l); rule->next = NULL; @@ -2850,30 +2951,33 @@ add_rule(struct ip_fw **head, struct ip_fw *input_rule) rule->bcnt = 0; rule->timestamp = 0; - if (*head == NULL) { /* default rule */ + if (*head == NULL) { /* default rule */ *head = rule; goto done; - } + } /* * If rulenum is 0, find highest numbered rule before the * default rule, and add autoinc_step */ - if (autoinc_step < 1) + if (autoinc_step < 1) { autoinc_step = 1; - else if (autoinc_step > 1000) + } else if (autoinc_step > 1000) { autoinc_step = 1000; + } if (rule->rulenum == 0) { /* * locate the highest numbered rule before default */ for (f = *head; f; f = f->next) { - if (f->rulenum == IPFW_DEFAULT_RULE) + if (f->rulenum == IPFW_DEFAULT_RULE) { break; + } rule->rulenum = f->rulenum; } - if (rule->rulenum < IPFW_DEFAULT_RULE - autoinc_step) + if (rule->rulenum < IPFW_DEFAULT_RULE - autoinc_step) { rule->rulenum += autoinc_step; + } input_rule->rulenum = rule->rulenum; } @@ -2899,8 +3003,8 @@ done: static_len_32 += RULESIZE32(input_rule); static_len_64 += RULESIZE64(input_rule); DEB(printf("ipfw: installed rule %d, static count now %d\n", - rule->rulenum, static_count);) - return (0); + rule->rulenum, static_count); ) + return 0; } /** @@ -2920,18 +3024,20 @@ delete_rule(struct ip_fw **head, struct ip_fw *prev, struct ip_fw *rule) n = rule->next; remove_dyn_rule(rule, NULL /* force removal */); - if (prev == NULL) + if (prev == NULL) { *head = n; - else + } else { prev->next = n; + } static_count--; static_len -= l; static_len_32 -= RULESIZE32(rule); static_len_64 -= RULESIZE64(rule); #if DUMMYNET - if (DUMMYNET_LOADED) + if (DUMMYNET_LOADED) { dn_ipfw_rule_delete(rule); + } #endif /* DUMMYNET */ _FREE(rule, M_IPFW); return n; @@ -2942,54 +3048,55 @@ static void print_chain(struct ip_fw **chain) { struct ip_fw *rule = *chain; - + for (; rule; rule = rule->next) { - ipfw_insn *cmd = ACTION_PTR(rule); - + ipfw_insn *cmd = ACTION_PTR(rule); + printf("ipfw: rule->rulenum = %d\n", rule->rulenum); - + if (rule->reserved_1 == IPFW_RULE_INACTIVE) { printf("ipfw: rule->reserved = IPFW_RULE_INACTIVE\n"); } - + switch (cmd->opcode) { - case O_DENY: - printf("ipfw: ACTION: Deny\n"); - break; - - case O_REJECT: - if (cmd->arg1==ICMP_REJECT_RST) - printf("ipfw: ACTION: Reset\n"); - else if (cmd->arg1==ICMP_UNREACH_HOST) - printf("ipfw: ACTION: Reject\n"); - break; - - case O_ACCEPT: - printf("ipfw: ACTION: Accept\n"); - break; - case O_COUNT: - printf("ipfw: ACTION: Count\n"); - break; - case O_DIVERT: - printf("ipfw: ACTION: Divert\n"); - break; - case O_TEE: - printf("ipfw: ACTION: Tee\n"); - break; - case O_SKIPTO: - printf("ipfw: ACTION: SkipTo\n"); - break; - case O_PIPE: - printf("ipfw: ACTION: Pipe\n"); - break; - case O_QUEUE: - printf("ipfw: ACTION: Queue\n"); - break; - case O_FORWARD_IP: - printf("ipfw: ACTION: Forward\n"); - break; - default: - printf("ipfw: invalid action! %d\n", cmd->opcode); + case O_DENY: + printf("ipfw: ACTION: Deny\n"); + break; + + case O_REJECT: + if (cmd->arg1 == ICMP_REJECT_RST) { + printf("ipfw: ACTION: Reset\n"); + } else if (cmd->arg1 == ICMP_UNREACH_HOST) { + printf("ipfw: ACTION: Reject\n"); + } + break; + + case O_ACCEPT: + printf("ipfw: ACTION: Accept\n"); + break; + case O_COUNT: + printf("ipfw: ACTION: Count\n"); + break; + case O_DIVERT: + printf("ipfw: ACTION: Divert\n"); + break; + case O_TEE: + printf("ipfw: ACTION: Tee\n"); + break; + case O_SKIPTO: + printf("ipfw: ACTION: SkipTo\n"); + break; + case O_PIPE: + printf("ipfw: ACTION: Pipe\n"); + break; + case O_QUEUE: + printf("ipfw: ACTION: Queue\n"); + break; + case O_FORWARD_IP: + printf("ipfw: ACTION: Forward\n"); + break; + default: + printf("ipfw: invalid action! %d\n", cmd->opcode); } } } @@ -3000,28 +3107,26 @@ flush_inactive(void *param) { struct ip_fw *inactive_rule = (struct ip_fw *)param; struct ip_fw *rule, *prev; - + lck_mtx_lock(ipfw_mutex); - - for (rule = layer3_chain, prev = NULL; rule; ) { + + for (rule = layer3_chain, prev = NULL; rule;) { if (rule == inactive_rule && rule->reserved_1 == IPFW_RULE_INACTIVE) { struct ip_fw *n = rule; - + if (prev == NULL) { layer3_chain = rule->next; - } - else { + } else { prev->next = rule->next; } rule = rule->next; _FREE(n, M_IPFW); - } - else { + } else { prev = rule; rule = rule->next; } } - + #if DEBUG_INACTIVE_RULES print_chain(&layer3_chain); #endif @@ -3031,7 +3136,7 @@ flush_inactive(void *param) static void mark_inactive(struct ip_fw **prev, struct ip_fw **rule) { - int l = RULESIZE(*rule); + int l = RULESIZE(*rule); if ((*rule)->reserved_1 != IPFW_RULE_INACTIVE) { (*rule)->reserved_1 = IPFW_RULE_INACTIVE; @@ -3039,10 +3144,10 @@ mark_inactive(struct ip_fw **prev, struct ip_fw **rule) static_len -= l; static_len_32 -= RULESIZE32(*rule); static_len_64 -= RULESIZE64(*rule); - - timeout(flush_inactive, *rule, 30*hz); /* 30 sec. */ + + timeout(flush_inactive, *rule, 30 * hz); /* 30 sec. */ } - + *prev = *rule; *rule = (*rule)->next; } @@ -3058,24 +3163,23 @@ free_chain(struct ip_fw **chain, int kill_default) struct ip_fw *prev, *rule; flush_rule_ptrs(); /* more efficient to do outside the loop */ - for (prev = NULL, rule = *chain; rule ; ) + for (prev = NULL, rule = *chain; rule;) { if (kill_default || rule->set != RESVD_SET) { - ipfw_insn *cmd = ACTION_PTR(rule); - - /* skip over forwarding rules so struct isn't + ipfw_insn *cmd = ACTION_PTR(rule); + + /* skip over forwarding rules so struct isn't * deleted while pointer is still in use elsewhere */ if (cmd->opcode == O_FORWARD_IP) { mark_inactive(&prev, &rule); - } - else { + } else { rule = delete_rule(chain, prev, rule); } - } - else { + } else { prev = rule; rule = rule->next; } + } } /** @@ -3095,34 +3199,40 @@ static int del_entry(struct ip_fw **chain, u_int32_t arg) { struct ip_fw *prev = NULL, *rule = *chain; - u_int16_t rulenum; /* rule or old_set */ + u_int16_t rulenum; /* rule or old_set */ u_int8_t cmd, new_set; rulenum = arg & 0xffff; cmd = (arg >> 24) & 0xff; new_set = (arg >> 16) & 0xff; - if (cmd > 4) + if (cmd > 4) { return EINVAL; - if (new_set > RESVD_SET) + } + if (new_set > RESVD_SET) { return EINVAL; + } if (cmd == 0 || cmd == 2) { - if (rulenum >= IPFW_DEFAULT_RULE) + if (rulenum >= IPFW_DEFAULT_RULE) { return EINVAL; + } } else { - if (rulenum > RESVD_SET) /* old_set */ + if (rulenum > RESVD_SET) { /* old_set */ return EINVAL; + } } switch (cmd) { - case 0: /* delete rules with given number */ + case 0: /* delete rules with given number */ /* * locate first rule to delete */ - for (; rule->rulenum < rulenum; prev = rule, rule = rule->next) + for (; rule->rulenum < rulenum; prev = rule, rule = rule->next) { ; - if (rule->rulenum != rulenum) + } + if (rule->rulenum != rulenum) { return EINVAL; + } /* * flush pointers outside the loop, then delete all matching @@ -3130,61 +3240,64 @@ del_entry(struct ip_fw **chain, u_int32_t arg) */ flush_rule_ptrs(); while (rule->rulenum == rulenum) { - ipfw_insn *insn = ACTION_PTR(rule); - - /* keep forwarding rules around so struct isn't + ipfw_insn *insn = ACTION_PTR(rule); + + /* keep forwarding rules around so struct isn't * deleted while pointer is still in use elsewhere */ if (insn->opcode == O_FORWARD_IP) { mark_inactive(&prev, &rule); - } - else { + } else { rule = delete_rule(chain, prev, rule); } } break; - case 1: /* delete all rules with given set number */ + case 1: /* delete all rules with given set number */ flush_rule_ptrs(); while (rule->rulenum < IPFW_DEFAULT_RULE) { if (rule->set == rulenum) { - ipfw_insn *insn = ACTION_PTR(rule); - - /* keep forwarding rules around so struct isn't + ipfw_insn *insn = ACTION_PTR(rule); + + /* keep forwarding rules around so struct isn't * deleted while pointer is still in use elsewhere */ if (insn->opcode == O_FORWARD_IP) { mark_inactive(&prev, &rule); - } - else { + } else { rule = delete_rule(chain, prev, rule); } - } - else { + } else { prev = rule; rule = rule->next; } } break; - case 2: /* move rules with given number to new set */ - for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) - if (rule->rulenum == rulenum) + case 2: /* move rules with given number to new set */ + for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) { + if (rule->rulenum == rulenum) { rule->set = new_set; + } + } break; case 3: /* move rules with given set number to new set */ - for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) - if (rule->set == rulenum) + for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) { + if (rule->set == rulenum) { rule->set = new_set; + } + } break; case 4: /* swap two sets */ - for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) - if (rule->set == rulenum) + for (; rule->rulenum < IPFW_DEFAULT_RULE; rule = rule->next) { + if (rule->set == rulenum) { rule->set = new_set; - else if (rule->set == new_set) + } else if (rule->set == new_set) { rule->set = rulenum; + } + } break; } return 0; @@ -3202,8 +3315,9 @@ clear_counters(struct ip_fw *rule, int log_only) rule->bcnt = rule->pcnt = 0; rule->timestamp = 0; } - if (l->o.opcode == O_LOG) + if (l->o.opcode == O_LOG) { l->log_left = l->max_log; + } } /** @@ -3220,17 +3334,18 @@ zero_entry(int rulenum, int log_only) if (rulenum == 0) { norule_counter = 0; - for (rule = layer3_chain; rule; rule = rule->next) + for (rule = layer3_chain; rule; rule = rule->next) { clear_counters(rule, log_only); + } msg = log_only ? "ipfw: All logging counts reset.\n" : - "ipfw: Accounting cleared.\n"; + "ipfw: Accounting cleared.\n"; } else { int cleared = 0; /* * We can have multiple rules with the same number, so we * need to clear them all. */ - for (rule = layer3_chain; rule; rule = rule->next) + for (rule = layer3_chain; rule; rule = rule->next) { if (rule->rulenum == rulenum) { while (rule && rule->rulenum == rulenum) { clear_counters(rule, log_only); @@ -3239,16 +3354,17 @@ zero_entry(int rulenum, int log_only) cleared = 1; break; } - if (!cleared) /* we did not find any matching rules */ - return (EINVAL); + } + if (!cleared) { /* we did not find any matching rules */ + return EINVAL; + } msg = log_only ? "ipfw: Entry %d logging count reset.\n" : - "ipfw: Entry %d cleared.\n"; + "ipfw: Entry %d cleared.\n"; } - if (fw_verbose) - { + if (fw_verbose) { dolog((LOG_AUTHPRIV | LOG_NOTICE, msg, rulenum)); } - return (0); + return 0; } /* @@ -3259,32 +3375,32 @@ static int check_ipfw_struct(struct ip_fw *rule, int size) { int l, cmdlen = 0; - int have_action=0; + int have_action = 0; ipfw_insn *cmd; if (size < sizeof(*rule)) { printf("ipfw: rule too short\n"); - return (EINVAL); + return EINVAL; } /* first, check for valid size */ l = RULESIZE(rule); if (l != size) { printf("ipfw: size mismatch (have %d want %d)\n", size, l); - return (EINVAL); + return EINVAL; } /* * Now go for the individual checks. Very simple ones, basically only * instruction sizes. */ - for (l = rule->cmd_len, cmd = rule->cmd ; - l > 0 ; l -= cmdlen, cmd += cmdlen) { + for (l = rule->cmd_len, cmd = rule->cmd; + l > 0; l -= cmdlen, cmd += cmdlen) { cmdlen = F_LEN(cmd); if (cmdlen > l) { printf("ipfw: opcode %d size truncated\n", cmd->opcode); return EINVAL; } - DEB(printf("ipfw: opcode %d\n", cmd->opcode);) + DEB(printf("ipfw: opcode %d\n", cmd->opcode); ) switch (cmd->opcode) { case O_PROBE_STATE: case O_KEEP_STATE: @@ -3304,8 +3420,9 @@ check_ipfw_struct(struct ip_fw *rule, int size) case O_ESTAB: case O_VERREVPATH: case O_IPSEC: - if (cmdlen != F_INSN_SIZE(ipfw_insn)) + if (cmdlen != F_INSN_SIZE(ipfw_insn)) { goto bad_size; + } break; case O_UID: #ifndef __APPLE__ @@ -3317,22 +3434,25 @@ check_ipfw_struct(struct ip_fw *rule, int size) case O_TCPACK: case O_PROB: case O_ICMPTYPE: - if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_u32)) { goto bad_size; + } break; case O_LIMIT: - if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_limit)) { goto bad_size; + } break; case O_LOG: - if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_log)) { goto bad_size; - + } + /* enforce logging limit */ if (fw_verbose && - ((ipfw_insn_log *)cmd)->max_log == 0 && verbose_limit != 0) { + ((ipfw_insn_log *)cmd)->max_log == 0 && verbose_limit != 0) { ((ipfw_insn_log *)cmd)->max_log = verbose_limit; } @@ -3344,58 +3464,66 @@ check_ipfw_struct(struct ip_fw *rule, int size) case O_IP_SRC_MASK: case O_IP_DST_MASK: /* only odd command lengths */ - if ( !(cmdlen & 1) || cmdlen > 31) + if (!(cmdlen & 1) || cmdlen > 31) { goto bad_size; + } break; case O_IP_SRC_SET: case O_IP_DST_SET: if (cmd->arg1 == 0 || cmd->arg1 > 256) { printf("ipfw: invalid set size %d\n", - cmd->arg1); + cmd->arg1); return EINVAL; } if (cmdlen != F_INSN_SIZE(ipfw_insn_u32) + - (cmd->arg1+31)/32 ) + (cmd->arg1 + 31) / 32) { goto bad_size; + } break; case O_MACADDR2: - if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_mac)) { goto bad_size; + } break; case O_NOP: case O_IPID: case O_IPTTL: case O_IPLEN: - if (cmdlen < 1 || cmdlen > 31) + if (cmdlen < 1 || cmdlen > 31) { goto bad_size; + } break; case O_MAC_TYPE: case O_IP_SRCPORT: case O_IP_DSTPORT: /* XXX artificial limit, 30 port pairs */ - if (cmdlen < 2 || cmdlen > 31) + if (cmdlen < 2 || cmdlen > 31) { goto bad_size; + } break; case O_RECV: case O_XMIT: case O_VIA: - if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_if)) { goto bad_size; + } break; case O_PIPE: case O_QUEUE: - if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_pipe)) { goto bad_size; + } goto check_action; case O_FORWARD_IP: - if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) + if (cmdlen != F_INSN_SIZE(ipfw_insn_sa)) { goto bad_size; + } goto check_action; case O_FORWARD_MAC: /* XXX not implemented yet */ @@ -3407,26 +3535,27 @@ check_ipfw_struct(struct ip_fw *rule, int size) case O_SKIPTO: case O_DIVERT: case O_TEE: - if (cmdlen != F_INSN_SIZE(ipfw_insn)) + if (cmdlen != F_INSN_SIZE(ipfw_insn)) { goto bad_size; + } check_action: if (have_action) { printf("ipfw: opcode %d, multiple actions" - " not allowed\n", - cmd->opcode); + " not allowed\n", + cmd->opcode); return EINVAL; } have_action = 1; if (l != cmdlen) { printf("ipfw: opcode %d, action must be" - " last opcode\n", - cmd->opcode); + " last opcode\n", + cmd->opcode); return EINVAL; } break; default: printf("ipfw: opcode %d, unknown opcode\n", - cmd->opcode); + cmd->opcode); return EINVAL; } } @@ -3438,7 +3567,7 @@ check_action: bad_size: printf("ipfw: opcode %d size %d wrong\n", - cmd->opcode, cmdlen); + cmd->opcode, cmdlen); return EINVAL; } @@ -3446,17 +3575,16 @@ bad_size: static void ipfw_kev_post_msg(u_int32_t event_code) { - struct kev_msg ev_msg; + struct kev_msg ev_msg; bzero(&ev_msg, sizeof(struct kev_msg)); - + ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_FIREWALL_CLASS; ev_msg.kev_subclass = KEV_IPFW_SUBCLASS; ev_msg.event_code = event_code; kev_post_msg(&ev_msg); - } /** @@ -3465,21 +3593,21 @@ ipfw_kev_post_msg(u_int32_t event_code) static int ipfw_ctl(struct sockopt *sopt) { -#define RULE_MAXSIZE (256*sizeof(u_int32_t)) +#define RULE_MAXSIZE (256*sizeof(u_int32_t)) u_int32_t api_version; int command; int error; size_t size; - size_t rulesize = RULE_MAXSIZE; - struct ip_fw *bp , *buf, *rule; - int is64user = 0; - + size_t rulesize = RULE_MAXSIZE; + struct ip_fw *bp, *buf, *rule; + int is64user = 0; + /* copy of orig sopt to send to ipfw_get_command_and_version() */ - struct sockopt tmp_sopt = *sopt; + struct sockopt tmp_sopt = *sopt; struct timeval timenow; getmicrotime(&timenow); - + /* * Disallow modifications in really-really secure mode, but still allow * the logging counters to be reset. @@ -3488,11 +3616,13 @@ ipfw_ctl(struct sockopt *sopt) (sopt->sopt_dir == SOPT_SET && sopt->sopt_name != IP_FW_RESETLOG)) { #if __FreeBSD_version >= 500034 error = securelevel_ge(sopt->sopt_td->td_ucred, 3); - if (error) - return (error); + if (error) { + return error; + } #else /* FreeBSD 4.x */ - if (securelevel >= 3) - return (EPERM); + if (securelevel >= 3) { + return EPERM; + } #endif } @@ -3502,14 +3632,15 @@ ipfw_ctl(struct sockopt *sopt) /* error getting the version */ return error; } - - if (proc_is64bit(sopt->sopt_p)) + + if (proc_is64bit(sopt->sopt_p)) { is64user = 1; + } switch (command) { case IP_FW_GET: { - size_t dynrulesize; + size_t dynrulesize; /* * pass up a copy of the current rules. Static rules * come first (the last of which has number IPFW_DEFAULT_RULE), @@ -3517,17 +3648,19 @@ ipfw_ctl(struct sockopt *sopt) * The last dynamic rule has NULL in the "next" field. */ lck_mtx_lock(ipfw_mutex); - - if (is64user){ + + if (is64user) { size = Get64static_len(); dynrulesize = sizeof(ipfw_dyn_rule_64); - if (ipfw_dyn_v) + if (ipfw_dyn_v) { size += (dyn_count * dynrulesize); - }else { + } + } else { size = Get32static_len(); dynrulesize = sizeof(ipfw_dyn_rule_32); - if (ipfw_dyn_v) + if (ipfw_dyn_v) { size += (dyn_count * dynrulesize); + } } /* @@ -3543,25 +3676,24 @@ ipfw_ctl(struct sockopt *sopt) } bp = buf; - for (rule = layer3_chain; rule ; rule = rule->next) { - + for (rule = layer3_chain; rule; rule = rule->next) { if (rule->reserved_1 == IPFW_RULE_INACTIVE) { continue; } - - if (is64user){ + + if (is64user) { int rulesize_64; copyto64fw( rule, (struct ip_fw_64 *)bp, size); - bcopy(&set_disable, &(( (struct ip_fw_64*)bp)->next_rule), sizeof(set_disable)); + bcopy(&set_disable, &(((struct ip_fw_64*)bp)->next_rule), sizeof(set_disable)); /* do not use macro RULESIZE64 since we want RULESIZE for ip_fw_64 */ rulesize_64 = sizeof(struct ip_fw_64) + ((struct ip_fw_64 *)(bp))->cmd_len * 4 - 4; bp = (struct ip_fw *)((char *)bp + rulesize_64); - }else{ + } else { int rulesize_32; copyto32fw( rule, (struct ip_fw_32*)bp, size); - bcopy(&set_disable, &(( (struct ip_fw_32*)bp)->next_rule), sizeof(set_disable)); + bcopy(&set_disable, &(((struct ip_fw_32*)bp)->next_rule), sizeof(set_disable)); /* do not use macro RULESIZE32 since we want RULESIZE for ip_fw_32 */ rulesize_32 = sizeof(struct ip_fw_32) + ((struct ip_fw_32 *)(bp))->cmd_len * 4 - 4; bp = (struct ip_fw *)((char *)bp + rulesize_32); @@ -3571,14 +3703,14 @@ ipfw_ctl(struct sockopt *sopt) int i; ipfw_dyn_rule *p; char *dst, *last = NULL; - + dst = (char *)bp; - for (i = 0 ; i < curr_dyn_buckets ; i++ ) - for ( p = ipfw_dyn_v[i] ; p != NULL ; - p = p->next, dst += dynrulesize ) { - if ( is64user ){ - ipfw_dyn_rule_64 *ipfw_dyn_dst; - + for (i = 0; i < curr_dyn_buckets; i++) { + for (p = ipfw_dyn_v[i]; p != NULL; + p = p->next, dst += dynrulesize) { + if (is64user) { + ipfw_dyn_rule_64 *ipfw_dyn_dst; + ipfw_dyn_dst = (ipfw_dyn_rule_64 *)dst; /* * store a non-null value in "next". @@ -3593,8 +3725,8 @@ ipfw_ctl(struct sockopt *sopt) ipfw_dyn_dst->bcnt = p->bcnt; externalize_flow_id(&ipfw_dyn_dst->id, &p->id); ipfw_dyn_dst->expire = - TIME_LEQ(p->expire, timenow.tv_sec) ? - 0 : p->expire - timenow.tv_sec; + TIME_LEQ(p->expire, timenow.tv_sec) ? + 0 : p->expire - timenow.tv_sec; ipfw_dyn_dst->bucket = p->bucket; ipfw_dyn_dst->state = p->state; ipfw_dyn_dst->ack_fwd = p->ack_fwd; @@ -3603,8 +3735,8 @@ ipfw_ctl(struct sockopt *sopt) ipfw_dyn_dst->count = p->count; last = (char*)ipfw_dyn_dst; } else { - ipfw_dyn_rule_32 *ipfw_dyn_dst; - + ipfw_dyn_rule_32 *ipfw_dyn_dst; + ipfw_dyn_dst = (ipfw_dyn_rule_32 *)dst; /* * store a non-null value in "next". @@ -3619,8 +3751,8 @@ ipfw_ctl(struct sockopt *sopt) ipfw_dyn_dst->bcnt = p->bcnt; externalize_flow_id(&ipfw_dyn_dst->id, &p->id); ipfw_dyn_dst->expire = - TIME_LEQ(p->expire, timenow.tv_sec) ? - 0 : p->expire - timenow.tv_sec; + TIME_LEQ(p->expire, timenow.tv_sec) ? + 0 : p->expire - timenow.tv_sec; ipfw_dyn_dst->bucket = p->bucket; ipfw_dyn_dst->state = p->state; ipfw_dyn_dst->ack_fwd = p->ack_fwd; @@ -3630,32 +3762,34 @@ ipfw_ctl(struct sockopt *sopt) last = (char*)ipfw_dyn_dst; } } + } /* mark last dynamic rule */ if (last != NULL) { - if (is64user) + if (is64user) { ((ipfw_dyn_rule_64 *)last)->next = 0; - else + } else { ((ipfw_dyn_rule_32 *)last)->next = 0; + } } } lck_mtx_unlock(ipfw_mutex); /* convert back if necessary and copyout */ if (api_version == IP_FW_VERSION_0) { - int i, len = 0; - struct ip_old_fw *buf2, *rule_vers0; - + int i, len = 0; + struct ip_old_fw *buf2, *rule_vers0; + lck_mtx_lock(ipfw_mutex); buf2 = _MALLOC(static_count * sizeof(struct ip_old_fw), M_TEMP, M_WAITOK | M_ZERO); if (buf2 == 0) { lck_mtx_unlock(ipfw_mutex); error = ENOBUFS; } - + if (!error) { bp = buf; rule_vers0 = buf2; - + for (i = 0; i < static_count; i++) { /* static rules have different sizes */ int j = RULESIZE(bp); @@ -3669,24 +3803,24 @@ ipfw_ctl(struct sockopt *sopt) _FREE(buf2, M_TEMP); } } else if (api_version == IP_FW_VERSION_1) { - int i, len = 0, buf_size; - struct ip_fw_compat *buf2; - size_t ipfwcompsize; - size_t ipfwdyncompsize; - char *rule_vers1; + int i, len = 0, buf_size; + struct ip_fw_compat *buf2; + size_t ipfwcompsize; + size_t ipfwdyncompsize; + char *rule_vers1; lck_mtx_lock(ipfw_mutex); - if ( is64user ){ + if (is64user) { ipfwcompsize = sizeof(struct ip_fw_compat_64); ipfwdyncompsize = sizeof(struct ipfw_dyn_rule_compat_64); } else { ipfwcompsize = sizeof(struct ip_fw_compat_32); ipfwdyncompsize = sizeof(struct ipfw_dyn_rule_compat_32); } - - buf_size = static_count * ipfwcompsize + - dyn_count * ipfwdyncompsize; - + + buf_size = static_count * ipfwcompsize + + dyn_count * ipfwdyncompsize; + buf2 = _MALLOC(buf_size, M_TEMP, M_WAITOK | M_ZERO); if (buf2 == 0) { lck_mtx_unlock(ipfw_mutex); @@ -3695,16 +3829,16 @@ ipfw_ctl(struct sockopt *sopt) if (!error) { bp = buf; rule_vers1 = (char*)buf2; - + /* first do static rules */ for (i = 0; i < static_count; i++) { /* static rules have different sizes */ - if ( is64user ){ + if (is64user) { int rulesize_64; ipfw_convert_from_latest(bp, (void *)rule_vers1, api_version, is64user); rulesize_64 = sizeof(struct ip_fw_64) + ((struct ip_fw_64 *)(bp))->cmd_len * 4 - 4; bp = (struct ip_fw *)((char *)bp + rulesize_64); - }else { + } else { int rulesize_32; ipfw_convert_from_latest(bp, (void *)rule_vers1, api_version, is64user); rulesize_32 = sizeof(struct ip_fw_32) + ((struct ip_fw_32 *)(bp))->cmd_len * 4 - 4; @@ -3714,10 +3848,11 @@ ipfw_ctl(struct sockopt *sopt) rule_vers1 += ipfwcompsize; } /* now do dynamic rules */ - if ( is64user ) - cp_dyn_to_comp_64( (struct ipfw_dyn_rule_compat_64 *)rule_vers1, &len); - else - cp_dyn_to_comp_32( (struct ipfw_dyn_rule_compat_32 *)rule_vers1, &len); + if (is64user) { + cp_dyn_to_comp_64((struct ipfw_dyn_rule_compat_64 *)rule_vers1, &len); + } else { + cp_dyn_to_comp_32((struct ipfw_dyn_rule_compat_32 *)rule_vers1, &len); + } lck_mtx_unlock(ipfw_mutex); error = sooptcopyout(sopt, buf2, len); @@ -3726,11 +3861,11 @@ ipfw_ctl(struct sockopt *sopt) } else { error = sooptcopyout(sopt, buf, size); } - + _FREE(buf, M_TEMP); break; } - + case IP_FW_FLUSH: /* * Normally we cannot release the lock on each iteration. @@ -3749,14 +3884,14 @@ ipfw_ctl(struct sockopt *sopt) free_chain(&layer3_chain, 0 /* keep default rule */); fw_bypass = 1; #if DEBUG_INACTIVE_RULES - print_chain(&layer3_chain); + print_chain(&layer3_chain); #endif lck_mtx_unlock(ipfw_mutex); break; case IP_FW_ADD: { - size_t savedsopt_valsize=0; + size_t savedsopt_valsize = 0; rule = _MALLOC(RULE_MAXSIZE, M_TEMP, M_WAITOK | M_ZERO); if (rule == 0) { error = ENOBUFS; @@ -3765,13 +3900,11 @@ ipfw_ctl(struct sockopt *sopt) if (api_version != IP_FW_CURRENT_API_VERSION) { error = ipfw_convert_to_latest(sopt, rule, api_version, is64user); - } - else { + } else { savedsopt_valsize = sopt->sopt_valsize; /* it might get modified in sooptcopyin_fw */ - error = sooptcopyin_fw( sopt, rule, &rulesize); - + error = sooptcopyin_fw( sopt, rule, &rulesize); } - + if (!error) { if ((api_version == IP_FW_VERSION_0) || (api_version == IP_FW_VERSION_1)) { /* the rule has already been checked so just @@ -3784,45 +3917,47 @@ ipfw_ctl(struct sockopt *sopt) if (!error) { lck_mtx_lock(ipfw_mutex); error = add_rule(&layer3_chain, rule); - if (!error && fw_bypass) + if (!error && fw_bypass) { fw_bypass = 0; + } lck_mtx_unlock(ipfw_mutex); - + size = RULESIZE(rule); if (!error && sopt->sopt_dir == SOPT_GET) { /* convert back if necessary and copyout */ if (api_version == IP_FW_VERSION_0) { - struct ip_old_fw rule_vers0 = {}; - + struct ip_old_fw rule_vers0 = {}; + ipfw_convert_from_latest(rule, &rule_vers0, api_version, is64user); sopt->sopt_valsize = sizeof(struct ip_old_fw); - + error = sooptcopyout(sopt, &rule_vers0, sizeof(struct ip_old_fw)); } else if (api_version == IP_FW_VERSION_1) { - struct ip_fw_compat rule_vers1 = {}; + struct ip_fw_compat rule_vers1 = {}; ipfw_convert_from_latest(rule, &rule_vers1, api_version, is64user); sopt->sopt_valsize = sizeof(struct ip_fw_compat); - + error = sooptcopyout(sopt, &rule_vers1, sizeof(struct ip_fw_compat)); } else { char *userrule; userrule = _MALLOC(savedsopt_valsize, M_TEMP, M_WAITOK | M_ZERO); - if ( userrule == NULL ) + if (userrule == NULL) { userrule = (char*)rule; - if (proc_is64bit(sopt->sopt_p)){ - copyto64fw( rule, (struct ip_fw_64*)userrule, savedsopt_valsize); } - else { - copyto32fw( rule, (struct ip_fw_32*)userrule, savedsopt_valsize); + if (proc_is64bit(sopt->sopt_p)) { + copyto64fw( rule, (struct ip_fw_64*)userrule, savedsopt_valsize); + } else { + copyto32fw( rule, (struct ip_fw_32*)userrule, savedsopt_valsize); } error = sooptcopyout(sopt, userrule, savedsopt_valsize); - if ( userrule ) + if (userrule) { _FREE(userrule, M_TEMP); + } } } } } - + _FREE(rule, M_TEMP); break; } @@ -3830,26 +3965,25 @@ ipfw_ctl(struct sockopt *sopt) { /* * IP_FW_DEL is used for deleting single rules or sets, - * and (ab)used to atomically manipulate sets. + * and (ab)used to atomically manipulate sets. * rule->rulenum != 0 indicates single rule delete * rule->set_masks used to manipulate sets - * rule->set_masks[0] contains info on sets to be + * rule->set_masks[0] contains info on sets to be * disabled, swapped, or moved * rule->set_masks[1] contains sets to be enabled. */ - + /* there is only a simple rule passed in * (no cmds), so use a temp struct to copy */ - struct ip_fw temp_rule; - u_int32_t arg; - u_int8_t cmd; - + struct ip_fw temp_rule; + u_int32_t arg; + u_int8_t cmd; + bzero(&temp_rule, sizeof(struct ip_fw)); if (api_version != IP_FW_CURRENT_API_VERSION) { error = ipfw_convert_to_latest(sopt, &temp_rule, api_version, is64user); - } - else { + } else { error = sooptcopyin_fw(sopt, &temp_rule, 0 ); } @@ -3858,34 +3992,33 @@ ipfw_ctl(struct sockopt *sopt) * single rules or atomically manipulating sets */ lck_mtx_lock(ipfw_mutex); - + arg = temp_rule.set_masks[0]; cmd = (arg >> 24) & 0xff; - + if (temp_rule.rulenum) { /* single rule */ error = del_entry(&layer3_chain, temp_rule.rulenum); #if DEBUG_INACTIVE_RULES print_chain(&layer3_chain); #endif - } - else if (cmd) { + } else if (cmd) { /* set reassignment - see comment above del_entry() for details */ error = del_entry(&layer3_chain, temp_rule.set_masks[0]); #if DEBUG_INACTIVE_RULES print_chain(&layer3_chain); #endif - } - else if (temp_rule.set_masks[0] != 0 || - temp_rule.set_masks[1] != 0) { + } else if (temp_rule.set_masks[0] != 0 || + temp_rule.set_masks[1] != 0) { /* set enable/disable */ set_disable = - (set_disable | temp_rule.set_masks[0]) & ~temp_rule.set_masks[1] & - ~(1<next) + + if (!layer3_chain->next) { fw_bypass = 1; + } lck_mtx_unlock(ipfw_mutex); } break; @@ -3897,15 +4030,14 @@ ipfw_ctl(struct sockopt *sopt) * (no cmds), so use a temp struct to copy */ struct ip_fw temp_rule; - + bzero(&temp_rule, sizeof(struct ip_fw)); - + if (api_version != IP_FW_CURRENT_API_VERSION) { error = ipfw_convert_to_latest(sopt, &temp_rule, api_version, is64user); - } - else { + } else { if (sopt->sopt_val != 0) { - error = sooptcopyin_fw( sopt, &temp_rule, 0); + error = sooptcopyin_fw( sopt, &temp_rule, 0); } } @@ -3923,25 +4055,25 @@ ipfw_ctl(struct sockopt *sopt) if (error != EINVAL) { switch (command) { - case IP_FW_ADD: - case IP_OLD_FW_ADD: - ipfw_kev_post_msg(KEV_IPFW_ADD); - break; - case IP_OLD_FW_DEL: - case IP_FW_DEL: - ipfw_kev_post_msg(KEV_IPFW_DEL); - break; - case IP_FW_FLUSH: - case IP_OLD_FW_FLUSH: - ipfw_kev_post_msg(KEV_IPFW_FLUSH); - break; + case IP_FW_ADD: + case IP_OLD_FW_ADD: + ipfw_kev_post_msg(KEV_IPFW_ADD); + break; + case IP_OLD_FW_DEL: + case IP_FW_DEL: + ipfw_kev_post_msg(KEV_IPFW_DEL); + break; + case IP_FW_FLUSH: + case IP_OLD_FW_FLUSH: + ipfw_kev_post_msg(KEV_IPFW_FLUSH); + break; - default: - break; + default: + break; } } - return (error); + return error; } /** @@ -3967,14 +4099,15 @@ ipfw_tick(__unused void * unused) static int stealth_cnt = 0; if (ipfw_stealth_stats_needs_flush) { - stealth_cnt++; - if (!(stealth_cnt % IPFW_STEALTH_TIMEOUT_FREQUENCY)) { - ipfw_stealth_flush_stats(); - } + stealth_cnt++; + if (!(stealth_cnt % IPFW_STEALTH_TIMEOUT_FREQUENCY)) { + ipfw_stealth_flush_stats(); + } } - if (dyn_keepalive == 0 || ipfw_dyn_v == NULL || dyn_count == 0) + if (dyn_keepalive == 0 || ipfw_dyn_v == NULL || dyn_count == 0) { goto done; + } getmicrotime(&timenow); @@ -3986,45 +4119,51 @@ ipfw_tick(__unused void * unused) */ m0 = NULL; mtailp = &m0; - + lck_mtx_lock(ipfw_mutex); - for (i = 0 ; i < curr_dyn_buckets ; i++) { - for (q = ipfw_dyn_v[i] ; q ; q = q->next ) { - if (q->dyn_type == O_LIMIT_PARENT) + for (i = 0; i < curr_dyn_buckets; i++) { + for (q = ipfw_dyn_v[i]; q; q = q->next) { + if (q->dyn_type == O_LIMIT_PARENT) { continue; - if (q->id.proto != IPPROTO_TCP) + } + if (q->id.proto != IPPROTO_TCP) { continue; - if ( (q->state & BOTH_SYN) != BOTH_SYN) + } + if ((q->state & BOTH_SYN) != BOTH_SYN) { continue; - if (TIME_LEQ( timenow.tv_sec+dyn_keepalive_interval, - q->expire)) - continue; /* too early */ - if (TIME_LEQ(q->expire, timenow.tv_sec)) - continue; /* too late, rule expired */ - + } + if (TIME_LEQ( timenow.tv_sec + dyn_keepalive_interval, + q->expire)) { + continue; /* too early */ + } + if (TIME_LEQ(q->expire, timenow.tv_sec)) { + continue; /* too late, rule expired */ + } *mtailp = send_pkt(&(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN); - if (*mtailp != NULL) + if (*mtailp != NULL) { mtailp = &(*mtailp)->m_nextpkt; + } *mtailp = send_pkt(&(q->id), q->ack_fwd - 1, q->ack_rev, 0); - if (*mtailp != NULL) + if (*mtailp != NULL) { mtailp = &(*mtailp)->m_nextpkt; + } } } lck_mtx_unlock(ipfw_mutex); for (m = mnext = m0; m != NULL; m = mnext) { - struct route sro; /* fake route */ + struct route sro; /* fake route */ mnext = m->m_nextpkt; m->m_nextpkt = NULL; - bzero (&sro, sizeof (sro)); + bzero(&sro, sizeof(sro)); ip_output(m, NULL, &sro, 0, NULL, NULL); ROUTE_RELEASE(&sro); } done: - timeout_with_leeway(ipfw_tick, NULL, dyn_keepalive_period*hz, - DYN_KEEPALIVE_LEEWAY*hz); + timeout_with_leeway(ipfw_tick, NULL, dyn_keepalive_period * hz, + DYN_KEEPALIVE_LEEWAY * hz); } void @@ -4050,18 +4189,17 @@ ipfw_init(void) default_rule.cmd[0].len = 1; default_rule.cmd[0].opcode = #ifdef IPFIREWALL_DEFAULT_TO_ACCEPT - (1) ? O_ACCEPT : + (1) ? O_ACCEPT : #endif - O_DENY; + O_DENY; if (add_rule(&layer3_chain, &default_rule)) { printf("ipfw2: add_rule failed adding default rule\n"); printf("ipfw2 failed initialization!!\n"); fw_enable = 0; - } - else { + } else { ip_fw_default_rule = layer3_chain; - + #ifdef IPFIREWALL_VERBOSE fw_verbose = 1; #endif @@ -4069,18 +4207,19 @@ ipfw_init(void) verbose_limit = IPFIREWALL_VERBOSE_LIMIT; #endif if (fw_verbose) { - if (!verbose_limit) + if (!verbose_limit) { printf("ipfw2 verbose logging enabled: unlimited logging by default\n"); - else + } else { printf("ipfw2 verbose logging enabled: limited to %d packets/entry by default\n", - verbose_limit); + verbose_limit); + } } } ip_fw_chk_ptr = ipfw_chk; ip_fw_ctl_ptr = ipfw_ctl; - ipfwstringlen = strlen( ipfwstring ); + ipfwstringlen = strlen( ipfwstring ); timeout(ipfw_tick, NULL, hz); } diff --git a/bsd/netinet/ip_fw2.h b/bsd/netinet/ip_fw2.h index 5e7a3ffdf..6137ac792 100644 --- a/bsd/netinet/ip_fw2.h +++ b/bsd/netinet/ip_fw2.h @@ -62,34 +62,34 @@ */ /*! - @defined KEV_IPFW_SUBCLASS - @discussion The kernel event subclass for IP Firewall. -*/ -#define KEV_IPFW_SUBCLASS 1 + * @defined KEV_IPFW_SUBCLASS + * @discussion The kernel event subclass for IP Firewall. + */ +#define KEV_IPFW_SUBCLASS 1 /*! - @defined KEV_IPFW_ADD - @discussion The event code indicating a rule has been added. -*/ -#define KEV_IPFW_ADD 1 + * @defined KEV_IPFW_ADD + * @discussion The event code indicating a rule has been added. + */ +#define KEV_IPFW_ADD 1 /*! - @defined KEV_IPFW_DEL - @discussion The event code indicating a rule has been removed. -*/ -#define KEV_IPFW_DEL 2 + * @defined KEV_IPFW_DEL + * @discussion The event code indicating a rule has been removed. + */ +#define KEV_IPFW_DEL 2 /*! - @defined KEV_IPFW_FLUSH - @discussion The event code indicating the rule set has been flushed. -*/ -#define KEV_IPFW_FLUSH 3 + * @defined KEV_IPFW_FLUSH + * @discussion The event code indicating the rule set has been flushed. + */ +#define KEV_IPFW_FLUSH 3 /*! - @defined KEV_IPFW_ENABLE - @discussion The event code indicating the enable flag has been changed -*/ -#define KEV_IPFW_ENABLE 4 + * @defined KEV_IPFW_ENABLE + * @discussion The event code indicating the enable flag has been changed + */ +#define KEV_IPFW_ENABLE 4 @@ -112,86 +112,86 @@ * to 256 different opcodes. */ -enum ipfw_opcodes { /* arguments (4 byte each) */ +enum ipfw_opcodes { /* arguments (4 byte each) */ O_NOP, - O_IP_SRC, /* u32 = IP */ - O_IP_SRC_MASK, /* ip = IP/mask */ - O_IP_SRC_ME, /* none */ - O_IP_SRC_SET, /* u32=base, arg1=len, bitmap */ - - O_IP_DST, /* u32 = IP */ - O_IP_DST_MASK, /* ip = IP/mask */ - O_IP_DST_ME, /* none */ - O_IP_DST_SET, /* u32=base, arg1=len, bitmap */ - - O_IP_SRCPORT, /* (n)port list:mask 4 byte ea */ - O_IP_DSTPORT, /* (n)port list:mask 4 byte ea */ - O_PROTO, /* arg1=protocol */ - - O_MACADDR2, /* 2 mac addr:mask */ - O_MAC_TYPE, /* same as srcport */ - - O_LAYER2, /* none */ - O_IN, /* none */ - O_FRAG, /* none */ - - O_RECV, /* none */ - O_XMIT, /* none */ - O_VIA, /* none */ - - O_IPOPT, /* arg1 = 2*u8 bitmap */ - O_IPLEN, /* arg1 = len */ - O_IPID, /* arg1 = id */ - - O_IPTOS, /* arg1 = id */ - O_IPPRECEDENCE, /* arg1 = precedence << 5 */ - O_IPTTL, /* arg1 = TTL */ - - O_IPVER, /* arg1 = version */ - O_UID, /* u32 = id */ - O_GID, /* u32 = id */ - O_ESTAB, /* none (tcp established) */ - O_TCPFLAGS, /* arg1 = 2*u8 bitmap */ - O_TCPWIN, /* arg1 = desired win */ - O_TCPSEQ, /* u32 = desired seq. */ - O_TCPACK, /* u32 = desired seq. */ - O_ICMPTYPE, /* u32 = icmp bitmap */ - O_TCPOPTS, /* arg1 = 2*u8 bitmap */ - - O_VERREVPATH, /* none */ - - O_PROBE_STATE, /* none */ - O_KEEP_STATE, /* none */ - O_LIMIT, /* ipfw_insn_limit */ - O_LIMIT_PARENT, /* dyn_type, not an opcode. */ + O_IP_SRC, /* u32 = IP */ + O_IP_SRC_MASK, /* ip = IP/mask */ + O_IP_SRC_ME, /* none */ + O_IP_SRC_SET, /* u32=base, arg1=len, bitmap */ + + O_IP_DST, /* u32 = IP */ + O_IP_DST_MASK, /* ip = IP/mask */ + O_IP_DST_ME, /* none */ + O_IP_DST_SET, /* u32=base, arg1=len, bitmap */ + + O_IP_SRCPORT, /* (n)port list:mask 4 byte ea */ + O_IP_DSTPORT, /* (n)port list:mask 4 byte ea */ + O_PROTO, /* arg1=protocol */ + + O_MACADDR2, /* 2 mac addr:mask */ + O_MAC_TYPE, /* same as srcport */ + + O_LAYER2, /* none */ + O_IN, /* none */ + O_FRAG, /* none */ + + O_RECV, /* none */ + O_XMIT, /* none */ + O_VIA, /* none */ + + O_IPOPT, /* arg1 = 2*u8 bitmap */ + O_IPLEN, /* arg1 = len */ + O_IPID, /* arg1 = id */ + + O_IPTOS, /* arg1 = id */ + O_IPPRECEDENCE, /* arg1 = precedence << 5 */ + O_IPTTL, /* arg1 = TTL */ + + O_IPVER, /* arg1 = version */ + O_UID, /* u32 = id */ + O_GID, /* u32 = id */ + O_ESTAB, /* none (tcp established) */ + O_TCPFLAGS, /* arg1 = 2*u8 bitmap */ + O_TCPWIN, /* arg1 = desired win */ + O_TCPSEQ, /* u32 = desired seq. */ + O_TCPACK, /* u32 = desired seq. */ + O_ICMPTYPE, /* u32 = icmp bitmap */ + O_TCPOPTS, /* arg1 = 2*u8 bitmap */ + + O_VERREVPATH, /* none */ + + O_PROBE_STATE, /* none */ + O_KEEP_STATE, /* none */ + O_LIMIT, /* ipfw_insn_limit */ + O_LIMIT_PARENT, /* dyn_type, not an opcode. */ /* * These are really 'actions'. */ - O_LOG, /* ipfw_insn_log */ - O_PROB, /* u32 = match probability */ - - O_CHECK_STATE, /* none */ - O_ACCEPT, /* none */ - O_DENY, /* none */ - O_REJECT, /* arg1=icmp arg (same as deny) */ - O_COUNT, /* none */ - O_SKIPTO, /* arg1=next rule number */ - O_PIPE, /* arg1=pipe number */ - O_QUEUE, /* arg1=queue number */ - O_DIVERT, /* arg1=port number */ - O_TEE, /* arg1=port number */ - O_FORWARD_IP, /* fwd sockaddr */ - O_FORWARD_MAC, /* fwd mac */ + O_LOG, /* ipfw_insn_log */ + O_PROB, /* u32 = match probability */ + + O_CHECK_STATE, /* none */ + O_ACCEPT, /* none */ + O_DENY, /* none */ + O_REJECT, /* arg1=icmp arg (same as deny) */ + O_COUNT, /* none */ + O_SKIPTO, /* arg1=next rule number */ + O_PIPE, /* arg1=pipe number */ + O_QUEUE, /* arg1=queue number */ + O_DIVERT, /* arg1=port number */ + O_TEE, /* arg1=port number */ + O_FORWARD_IP, /* fwd sockaddr */ + O_FORWARD_MAC, /* fwd mac */ /* * More opcodes. */ - O_IPSEC, /* has ipsec history */ + O_IPSEC, /* has ipsec history */ - O_LAST_OPCODE /* not an opcode! */ + O_LAST_OPCODE /* not an opcode! */ }; /* @@ -221,47 +221,47 @@ enum ipfw_opcodes { /* arguments (4 byte each) */ * this needs to be fixed. * */ -typedef struct _ipfw_insn { /* template for instructions */ - enum ipfw_opcodes opcode:8; - u_int8_t len; /* numer of 32-byte words */ -#define F_NOT 0x80 -#define F_OR 0x40 -#define F_LEN_MASK 0x3f -#define F_LEN(cmd) ((cmd)->len & F_LEN_MASK) - - u_int16_t arg1; +typedef struct _ipfw_insn { /* template for instructions */ + enum ipfw_opcodes opcode:8; + u_int8_t len; /* numer of 32-byte words */ +#define F_NOT 0x80 +#define F_OR 0x40 +#define F_LEN_MASK 0x3f +#define F_LEN(cmd) ((cmd)->len & F_LEN_MASK) + + u_int16_t arg1; } ipfw_insn; /* * The F_INSN_SIZE(type) computes the size, in 4-byte words, of * a given type. */ -#define F_INSN_SIZE(t) ((sizeof (t))/sizeof(u_int32_t)) +#define F_INSN_SIZE(t) ((sizeof (t))/sizeof(u_int32_t)) /* * This is used to store an array of 16-bit entries (ports etc.) */ -typedef struct _ipfw_insn_u16 { +typedef struct _ipfw_insn_u16 { ipfw_insn o; - u_int16_t ports[2]; /* there may be more */ + u_int16_t ports[2]; /* there may be more */ } ipfw_insn_u16; /* * This is used to store an array of 32-bit entries * (uid, single IPv4 addresses etc.) */ -typedef struct _ipfw_insn_u32 { +typedef struct _ipfw_insn_u32 { ipfw_insn o; - u_int32_t d[1]; /* one or more */ + u_int32_t d[1]; /* one or more */ } ipfw_insn_u32; /* * This is used to store IP addr-mask pairs. */ -typedef struct _ipfw_insn_ip { +typedef struct _ipfw_insn_ip { ipfw_insn o; - struct in_addr addr; - struct in_addr mask; + struct in_addr addr; + struct in_addr mask; } ipfw_insn_ip; /* @@ -275,16 +275,16 @@ typedef struct _ipfw_insn_sa { /* * This is used for MAC addr-mask pairs. */ -typedef struct _ipfw_insn_mac { +typedef struct _ipfw_insn_mac { ipfw_insn o; - u_char addr[12]; /* dst[6] + src[6] */ - u_char mask[12]; /* dst[6] + src[6] */ + u_char addr[12]; /* dst[6] + src[6] */ + u_char mask[12]; /* dst[6] + src[6] */ } ipfw_insn_mac; /* * This is used for interface match rules (recv xx, xmit xx). */ -typedef struct _ipfw_insn_if { +typedef struct _ipfw_insn_if { ipfw_insn o; union { struct in_addr ip; @@ -301,22 +301,22 @@ typedef struct _ipfw_insn_if { * be unaligned in the overall structure, so it needs to be * manipulated with care. */ -typedef struct _ipfw_insn_pipe { - ipfw_insn o; - void *pipe_ptr; /* XXX */ +typedef struct _ipfw_insn_pipe { + ipfw_insn o; + void *pipe_ptr; /* XXX */ } ipfw_insn_pipe; /* * This is used for limit rules. */ -typedef struct _ipfw_insn_limit { +typedef struct _ipfw_insn_limit { ipfw_insn o; u_int8_t _pad; - u_int8_t limit_mask; /* combination of DYN_* below */ -#define DYN_SRC_ADDR 0x1 -#define DYN_SRC_PORT 0x2 -#define DYN_DST_ADDR 0x4 -#define DYN_DST_PORT 0x8 + u_int8_t limit_mask; /* combination of DYN_* below */ +#define DYN_SRC_ADDR 0x1 +#define DYN_SRC_PORT 0x2 +#define DYN_DST_ADDR 0x4 +#define DYN_DST_PORT 0x8 u_int16_t conn_limit; } ipfw_insn_limit; @@ -325,17 +325,17 @@ typedef struct _ipfw_insn_limit { * This is used for log instructions. */ typedef struct _ipfw_insn_log { - ipfw_insn o; - u_int32_t max_log; /* how many do we log -- 0 = all */ - u_int32_t log_left; /* how many left to log */ + ipfw_insn o; + u_int32_t max_log; /* how many do we log -- 0 = all */ + u_int32_t log_left; /* how many left to log */ } ipfw_insn_log; /* Version of this API */ -#define IP_FW_VERSION_NONE 0 -#define IP_FW_VERSION_0 10 /* old ipfw */ -#define IP_FW_VERSION_1 20 /* ipfw in Jaguar/Panther */ -#define IP_FW_VERSION_2 30 /* ipfw2 */ -#define IP_FW_CURRENT_API_VERSION IP_FW_VERSION_2 +#define IP_FW_VERSION_NONE 0 +#define IP_FW_VERSION_0 10 /* old ipfw */ +#define IP_FW_VERSION_1 20 /* ipfw in Jaguar/Panther */ +#define IP_FW_VERSION_2 30 /* ipfw2 */ +#define IP_FW_CURRENT_API_VERSION IP_FW_VERSION_2 /* * Here we have the structure representing an ipfw rule. @@ -358,40 +358,40 @@ typedef struct _ipfw_insn_log { * (at ACTION_PTR(r)) MUST be O_LOG * * NOTE: we use a simple linked list of rules because we never need - * to delete a rule without scanning the list. We do not use + * to delete a rule without scanning the list. We do not use * queue(3) macros for portability and readability. */ struct ip_fw { - u_int32_t version; /* Version of this structure. MUST be set */ - /* by clients. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - struct ip_fw *next; /* linked list of rules */ - struct ip_fw *next_rule; /* ptr to next [skipto] rule */ + u_int32_t version; /* Version of this structure. MUST be set */ + /* by clients. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION. */ + void *context; /* Context that is usable by user processes to */ + /* identify this rule. */ + struct ip_fw *next; /* linked list of rules */ + struct ip_fw *next_rule; /* ptr to next [skipto] rule */ /* 'next_rule' is used to pass up 'set_disable' status */ - u_int16_t act_ofs; /* offset of action in 32-bit units */ - u_int16_t cmd_len; /* # of 32-bit words in cmd */ - u_int16_t rulenum; /* rule number */ - u_int8_t set; /* rule set (0..31) */ - u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ -#define RESVD_SET 31 /* set for default and persistent rules */ - u_int8_t _pad; /* padding */ + u_int16_t act_ofs; /* offset of action in 32-bit units */ + u_int16_t cmd_len; /* # of 32-bit words in cmd */ + u_int16_t rulenum; /* rule number */ + u_int8_t set; /* rule set (0..31) */ + u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ +#define RESVD_SET 31 /* set for default and persistent rules */ + u_int8_t _pad; /* padding */ /* These fields are present in all rules. */ - u_int64_t pcnt; /* Packet counter */ - u_int64_t bcnt; /* Byte counter */ - u_int32_t timestamp; /* tv_sec of last match */ - - u_int32_t reserved_1; /* reserved - set to 0 */ - u_int32_t reserved_2; /* reserved - set to 0 */ - - ipfw_insn cmd[1]; /* storage for commands */ + u_int64_t pcnt; /* Packet counter */ + u_int64_t bcnt; /* Byte counter */ + u_int32_t timestamp; /* tv_sec of last match */ + + u_int32_t reserved_1; /* reserved - set to 0 */ + u_int32_t reserved_2; /* reserved - set to 0 */ + + ipfw_insn cmd[1]; /* storage for commands */ }; -#define ACTION_PTR(rule) \ +#define ACTION_PTR(rule) \ (ipfw_insn *)( (u_int32_t *)((rule)->cmd) + ((rule)->act_ofs) ) #define RULESIZE(rule) (sizeof(struct ip_fw) + \ @@ -402,12 +402,12 @@ struct ip_fw { * parts of the code. */ struct ipfw_flow_id { - u_int32_t dst_ip; - u_int32_t src_ip; - u_int16_t dst_port; - u_int16_t src_port; - u_int8_t proto; - u_int8_t flags; /* protocol-specific flags */ + u_int32_t dst_ip; + u_int32_t src_ip; + u_int16_t dst_port; + u_int16_t src_port; + u_int8_t proto; + u_int8_t flags; /* protocol-specific flags */ }; /* @@ -420,8 +420,8 @@ typedef struct _ipfw_dyn_rule ipfw_dyn_rule; #include /* - * Note: - * The internal version of "struct _ipfw_dyn_rule" differs from + * Note: + * The internal version of "struct _ipfw_dyn_rule" differs from * its external version because the field "id" is of type * "struct ip_flow_id" in the internal version. The type of the * field "id" for the external version is "ipfw_dyn_rule for @@ -429,66 +429,66 @@ typedef struct _ipfw_dyn_rule ipfw_dyn_rule; */ struct _ipfw_dyn_rule { - ipfw_dyn_rule *next; /* linked list of rules. */ - struct ip_fw *rule; /* pointer to rule */ + ipfw_dyn_rule *next; /* linked list of rules. */ + struct ip_fw *rule; /* pointer to rule */ /* 'rule' is used to pass up the rule number (from the parent) */ - ipfw_dyn_rule *parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ip_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ + ipfw_dyn_rule *parent; /* pointer to parent rule */ + u_int64_t pcnt; /* packet match counter */ + u_int64_t bcnt; /* byte match counter */ + struct ip_flow_id id; /* (masked) flow id */ + u_int32_t expire; /* expire time */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typically a + * combination of TCP flags) + */ + u_int32_t ack_fwd; /* most recent ACKs in forward */ + u_int32_t ack_rev; /* and reverse directions (used */ + /* to generate keepalives) */ + u_int16_t dyn_type; /* rule type */ + u_int16_t count; /* refcount */ }; #else /* XNU_KERNEL_PRIVATE */ struct _ipfw_dyn_rule { - ipfw_dyn_rule *next; /* linked list of rules. */ - struct ip_fw *rule; /* pointer to rule */ + ipfw_dyn_rule *next; /* linked list of rules. */ + struct ip_fw *rule; /* pointer to rule */ /* 'rule' is used to pass up the rule number (from the parent) */ - ipfw_dyn_rule *parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ipfw_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ + ipfw_dyn_rule *parent; /* pointer to parent rule */ + u_int64_t pcnt; /* packet match counter */ + u_int64_t bcnt; /* byte match counter */ + struct ipfw_flow_id id; /* (masked) flow id */ + u_int32_t expire; /* expire time */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typically a + * combination of TCP flags) + */ + u_int32_t ack_fwd; /* most recent ACKs in forward */ + u_int32_t ack_rev; /* and reverse directions (used */ + /* to generate keepalives) */ + u_int16_t dyn_type; /* rule type */ + u_int16_t count; /* refcount */ }; #endif /* XNU_KERNEL_PRIVATE */ /* * Definitions for IP option names. */ -#define IP_FW_IPOPT_LSRR 0x01 -#define IP_FW_IPOPT_SSRR 0x02 -#define IP_FW_IPOPT_RR 0x04 -#define IP_FW_IPOPT_TS 0x08 +#define IP_FW_IPOPT_LSRR 0x01 +#define IP_FW_IPOPT_SSRR 0x02 +#define IP_FW_IPOPT_RR 0x04 +#define IP_FW_IPOPT_TS 0x08 /* * Definitions for TCP option names. */ -#define IP_FW_TCPOPT_MSS 0x01 -#define IP_FW_TCPOPT_WINDOW 0x02 -#define IP_FW_TCPOPT_SACK 0x04 -#define IP_FW_TCPOPT_TS 0x08 -#define IP_FW_TCPOPT_CC 0x10 +#define IP_FW_TCPOPT_MSS 0x01 +#define IP_FW_TCPOPT_WINDOW 0x02 +#define IP_FW_TCPOPT_SACK 0x04 +#define IP_FW_TCPOPT_TS 0x08 +#define IP_FW_TCPOPT_CC 0x10 -#define ICMP_REJECT_RST 0x100 /* fake ICMP code (send a TCP RST) */ +#define ICMP_REJECT_RST 0x100 /* fake ICMP code (send a TCP RST) */ /* * Main firewall chains definitions and global var's definitions. @@ -496,64 +496,64 @@ struct _ipfw_dyn_rule { #ifdef BSD_KERNEL_PRIVATE #pragma pack(4) -struct ip_fw_32{ - u_int32_t version; /* Version of this structure. MUST be set */ - /* by clients. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION. */ - user32_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - user32_addr_t next; /* linked list of rules */ - user32_addr_t next_rule;/* ptr to next [skipto] rule */ - /* 'next_rule' is used to pass up 'set_disable' status */ - - u_int16_t act_ofs; /* offset of action in 32-bit units */ - u_int16_t cmd_len; /* # of 32-bit words in cmd */ - u_int16_t rulenum; /* rule number */ - u_int8_t set; /* rule set (0..31) */ - u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ +struct ip_fw_32 { + u_int32_t version; /* Version of this structure. MUST be set */ + /* by clients. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION. */ + user32_addr_t context; /* Context that is usable by user processes to */ + /* identify this rule. */ + user32_addr_t next; /* linked list of rules */ + user32_addr_t next_rule;/* ptr to next [skipto] rule */ + /* 'next_rule' is used to pass up 'set_disable' status */ + + u_int16_t act_ofs; /* offset of action in 32-bit units */ + u_int16_t cmd_len; /* # of 32-bit words in cmd */ + u_int16_t rulenum; /* rule number */ + u_int8_t set; /* rule set (0..31) */ + u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ #define RESVD_SET 31 /* set for default and persistent rules */ - u_int8_t _pad; /* padding */ + u_int8_t _pad; /* padding */ - /* These fields are present in all rules. */ - u_int64_t pcnt; /* Packet counter */ - u_int64_t bcnt; /* Byte counter */ - u_int32_t timestamp; /* tv_sec of last match */ + /* These fields are present in all rules. */ + u_int64_t pcnt; /* Packet counter */ + u_int64_t bcnt; /* Byte counter */ + u_int32_t timestamp; /* tv_sec of last match */ - u_int32_t reserved_1; /* reserved - set to 0 */ - u_int32_t reserved_2; /* reserved - set to 0 */ + u_int32_t reserved_1; /* reserved - set to 0 */ + u_int32_t reserved_2; /* reserved - set to 0 */ - ipfw_insn cmd[1]; /* storage for commands */ + ipfw_insn cmd[1]; /* storage for commands */ }; #pragma pack() -struct ip_fw_64{ - u_int32_t version; /* Version of this structure. MUST be set */ - /* by clients. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION. */ - __uint64_t context __attribute__((aligned(8))); /* Context that is usable by user processes to */ - /* identify this rule. */ - user64_addr_t next; /* linked list of rules */ - user64_addr_t next_rule; /* ptr to next [skipto] rule */ - /* 'next_rule' is used to pass up 'set_disable' status */ - - u_int16_t act_ofs; /* offset of action in 32-bit units */ - u_int16_t cmd_len; /* # of 32-bit words in cmd */ - u_int16_t rulenum; /* rule number */ - u_int8_t set; /* rule set (0..31) */ - u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ +struct ip_fw_64 { + u_int32_t version; /* Version of this structure. MUST be set */ + /* by clients. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION. */ + __uint64_t context __attribute__((aligned(8))); /* Context that is usable by user processes to */ + /* identify this rule. */ + user64_addr_t next; /* linked list of rules */ + user64_addr_t next_rule; /* ptr to next [skipto] rule */ + /* 'next_rule' is used to pass up 'set_disable' status */ + + u_int16_t act_ofs; /* offset of action in 32-bit units */ + u_int16_t cmd_len; /* # of 32-bit words in cmd */ + u_int16_t rulenum; /* rule number */ + u_int8_t set; /* rule set (0..31) */ + u_int32_t set_masks[2]; /* masks for manipulating sets atomically */ #define RESVD_SET 31 /* set for default and persistent rules */ - u_int8_t _pad; /* padding */ + u_int8_t _pad; /* padding */ - /* These fields are present in all rules. */ - u_int64_t pcnt __attribute__((aligned(8))); /* Packet counter */ - u_int64_t bcnt __attribute__((aligned(8))); /* Byte counter */ - u_int32_t timestamp; /* tv_sec of last match */ + /* These fields are present in all rules. */ + u_int64_t pcnt __attribute__((aligned(8))); /* Packet counter */ + u_int64_t bcnt __attribute__((aligned(8))); /* Byte counter */ + u_int32_t timestamp; /* tv_sec of last match */ - u_int32_t reserved_1; /* reserved - set to 0 */ - u_int32_t reserved_2; /* reserved - set to 0 */ + u_int32_t reserved_1; /* reserved - set to 0 */ + u_int32_t reserved_2; /* reserved - set to 0 */ - ipfw_insn cmd[1]; /* storage for commands */ + ipfw_insn cmd[1]; /* storage for commands */ }; @@ -562,58 +562,58 @@ typedef struct _ipfw_dyn_rule_32 ipfw_dyn_rule_32; #pragma pack(4) struct _ipfw_dyn_rule_32 { - user32_addr_t next; /* linked list of rules. */ - user32_addr_t rule; /* pointer to rule */ - /* 'rule' is used to pass up the rule number (from the parent) */ - - user32_addr_t parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ipfw_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ + user32_addr_t next; /* linked list of rules. */ + user32_addr_t rule; /* pointer to rule */ + /* 'rule' is used to pass up the rule number (from the parent) */ + + user32_addr_t parent; /* pointer to parent rule */ + u_int64_t pcnt; /* packet match counter */ + u_int64_t bcnt; /* byte match counter */ + struct ipfw_flow_id id; /* (masked) flow id */ + u_int32_t expire; /* expire time */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typically a + * combination of TCP flags) + */ + u_int32_t ack_fwd; /* most recent ACKs in forward */ + u_int32_t ack_rev; /* and reverse directions (used */ + /* to generate keepalives) */ + u_int16_t dyn_type; /* rule type */ + u_int16_t count; /* refcount */ }; #pragma pack() struct _ipfw_dyn_rule_64 { - user64_addr_t next; /* linked list of rules. */ - user64_addr_t rule; /* pointer to rule */ - /* 'rule' is used to pass up the rule number (from the parent) */ - - user64_addr_t parent; /* pointer to parent rule */ - u_int64_t pcnt; /* packet match counter */ - u_int64_t bcnt; /* byte match counter */ - struct ipfw_flow_id id; /* (masked) flow id */ - u_int32_t expire; /* expire time */ - u_int32_t bucket; /* which bucket in hash table */ - u_int32_t state; /* state of this rule (typically a - * combination of TCP flags) - */ - u_int32_t ack_fwd; /* most recent ACKs in forward */ - u_int32_t ack_rev; /* and reverse directions (used */ - /* to generate keepalives) */ - u_int16_t dyn_type; /* rule type */ - u_int16_t count; /* refcount */ + user64_addr_t next; /* linked list of rules. */ + user64_addr_t rule; /* pointer to rule */ + /* 'rule' is used to pass up the rule number (from the parent) */ + + user64_addr_t parent; /* pointer to parent rule */ + u_int64_t pcnt; /* packet match counter */ + u_int64_t bcnt; /* byte match counter */ + struct ipfw_flow_id id; /* (masked) flow id */ + u_int32_t expire; /* expire time */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typically a + * combination of TCP flags) + */ + u_int32_t ack_fwd; /* most recent ACKs in forward */ + u_int32_t ack_rev; /* and reverse directions (used */ + /* to generate keepalives) */ + u_int16_t dyn_type; /* rule type */ + u_int16_t count; /* refcount */ }; typedef struct _ipfw_insn_pipe_64 { - ipfw_insn o; - user64_addr_t pipe_ptr; /* XXX */ + ipfw_insn o; + user64_addr_t pipe_ptr; /* XXX */ } ipfw_insn_pipe_64; -typedef struct _ipfw_insn_pipe_32{ - ipfw_insn o; - user32_addr_t pipe_ptr; /* XXX */ +typedef struct _ipfw_insn_pipe_32 { + ipfw_insn o; + user32_addr_t pipe_ptr; /* XXX */ } ipfw_insn_pipe_32; @@ -621,9 +621,9 @@ typedef struct _ipfw_insn_pipe_32{ #if IPFIREWALL -#define IP_FW_PORT_DYNT_FLAG 0x10000 -#define IP_FW_PORT_TEE_FLAG 0x20000 -#define IP_FW_PORT_DENY_FLAG 0x40000 +#define IP_FW_PORT_DYNT_FLAG 0x10000 +#define IP_FW_PORT_TEE_FLAG 0x20000 +#define IP_FW_PORT_DENY_FLAG 0x40000 #ifdef PRIVATE #include @@ -639,7 +639,7 @@ struct sockopt; struct dn_flow_set; void flush_pipe_ptrs(struct dn_flow_set *match); /* used by dummynet */ -void ipfw_init(void); /* called from raw_ip.c: load_ipfw() */ +void ipfw_init(void); /* called from raw_ip.c: load_ipfw() */ typedef int ip_fw_chk_t (struct ip_fw_args *args); typedef int ip_fw_ctl_t (struct sockopt *); @@ -647,7 +647,7 @@ extern ip_fw_chk_t *ip_fw_chk_ptr; extern ip_fw_ctl_t *ip_fw_ctl_ptr; extern int fw_one_pass; extern int fw_enable; -#define IPFW_LOADED (ip_fw_chk_ptr != NULL) +#define IPFW_LOADED (ip_fw_chk_ptr != NULL) #endif /* IPFIREWALL */ #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/ip_fw2_compat.c b/bsd/netinet/ip_fw2_compat.c index c4f1bf576..2965c4adc 100644 --- a/bsd/netinet/ip_fw2_compat.c +++ b/bsd/netinet/ip_fw2_compat.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IPFW2 Backward Compatibility */ @@ -77,19 +77,19 @@ static struct _s_x f_tcpflags[] = { { "rst", TH_RST }, { "urg", TH_URG }, { "tcp flag", 0 }, - { NULL, 0 } + { NULL, 0 } }; static struct _s_x f_tcpopts[] = { - { "mss", IP_FW_TCPOPT_MSS }, - { "maxseg", IP_FW_TCPOPT_MSS }, - { "window", IP_FW_TCPOPT_WINDOW }, - { "sack", IP_FW_TCPOPT_SACK }, - { "ts", IP_FW_TCPOPT_TS }, - { "timestamp", IP_FW_TCPOPT_TS }, - { "cc", IP_FW_TCPOPT_CC }, - { "tcp option", 0 }, - { NULL, 0 } + { "mss", IP_FW_TCPOPT_MSS }, + { "maxseg", IP_FW_TCPOPT_MSS }, + { "window", IP_FW_TCPOPT_WINDOW }, + { "sack", IP_FW_TCPOPT_SACK }, + { "ts", IP_FW_TCPOPT_TS }, + { "timestamp", IP_FW_TCPOPT_TS }, + { "cc", IP_FW_TCPOPT_CC }, + { "tcp option", 0 }, + { NULL, 0 } }; @@ -98,32 +98,32 @@ static struct _s_x f_tcpopts[] = { * (though in fact only the low 5 bits are significant). */ static struct _s_x f_ipopts[] = { - { "ssrr", IP_FW_IPOPT_SSRR}, - { "lsrr", IP_FW_IPOPT_LSRR}, - { "rr", IP_FW_IPOPT_RR}, - { "ts", IP_FW_IPOPT_TS}, - { "ip option", 0 }, - { NULL, 0 } + { "ssrr", IP_FW_IPOPT_SSRR}, + { "lsrr", IP_FW_IPOPT_LSRR}, + { "rr", IP_FW_IPOPT_RR}, + { "ts", IP_FW_IPOPT_TS}, + { "ip option", 0 }, + { NULL, 0 } }; static struct _s_x f_iptos[] = { - { "lowdelay", IPTOS_LOWDELAY}, - { "throughput", IPTOS_THROUGHPUT}, + { "lowdelay", IPTOS_LOWDELAY}, + { "throughput", IPTOS_THROUGHPUT}, { "reliability", IPTOS_RELIABILITY}, - { "mincost", IPTOS_MINCOST}, - { "congestion", IPTOS_CE}, + { "mincost", IPTOS_MINCOST}, + { "congestion", IPTOS_CE}, { "ecntransport", IPTOS_ECT}, { "ip tos option", 0}, - { NULL, 0 } + { NULL, 0 } }; static struct _s_x limit_masks[] = { - {"all", DYN_SRC_ADDR|DYN_SRC_PORT|DYN_DST_ADDR|DYN_DST_PORT}, - {"src-addr", DYN_SRC_ADDR}, - {"src-port", DYN_SRC_PORT}, - {"dst-addr", DYN_DST_ADDR}, - {"dst-port", DYN_DST_PORT}, - {NULL, 0} + {"all", DYN_SRC_ADDR | DYN_SRC_PORT | DYN_DST_ADDR | DYN_DST_PORT}, + {"src-addr", DYN_SRC_ADDR}, + {"src-port", DYN_SRC_PORT}, + {"dst-addr", DYN_DST_ADDR}, + {"dst-port", DYN_DST_PORT}, + {NULL, 0} }; #endif /* !FW2_DEBUG_VERBOSE */ @@ -135,42 +135,42 @@ ipfw_print_fw_flags(u_int flags) { /* print action */ switch (flags & IP_FW_F_COMMAND_COMPAT) { - case IP_FW_F_ACCEPT_COMPAT: - printf("IP_FW_F_ACCEPT_COMPAT\n"); - break; - case IP_FW_F_COUNT_COMPAT: - printf("IP_FW_F_COUNT_COMPAT\n"); - break; - case IP_FW_F_PIPE_COMPAT: - printf("IP_FW_F_PIPE_COMPAT\n"); - break; - case IP_FW_F_QUEUE_COMPAT: - printf("IP_FW_F_QUEUE_COMPAT\n"); - break; - case IP_FW_F_SKIPTO_COMPAT: - printf("IP_FW_F_SKIPTO_COMPAT\n"); - break; - case IP_FW_F_DIVERT_COMPAT: - printf("IP_FW_F_DIVERT_COMPAT\n"); - break; - case IP_FW_F_TEE_COMPAT: - printf("IP_FW_F_TEE_COMPAT\n"); - break; - case IP_FW_F_FWD_COMPAT: - printf("IP_FW_F_FWD_COMPAT\n"); - break; - case IP_FW_F_DENY_COMPAT: - printf("IP_FW_F_DENY_COMPAT\n"); - break; - case IP_FW_F_REJECT_COMPAT: - printf("IP_FW_F_REJECT_COMPAT\n"); - break; - case IP_FW_F_CHECK_S_COMPAT: - printf("IP_FW_F_CHECK_S_COMPAT\n"); - break; - default: - printf("No action given\n"); - break; + case IP_FW_F_ACCEPT_COMPAT: + printf("IP_FW_F_ACCEPT_COMPAT\n"); + break; + case IP_FW_F_COUNT_COMPAT: + printf("IP_FW_F_COUNT_COMPAT\n"); + break; + case IP_FW_F_PIPE_COMPAT: + printf("IP_FW_F_PIPE_COMPAT\n"); + break; + case IP_FW_F_QUEUE_COMPAT: + printf("IP_FW_F_QUEUE_COMPAT\n"); + break; + case IP_FW_F_SKIPTO_COMPAT: + printf("IP_FW_F_SKIPTO_COMPAT\n"); + break; + case IP_FW_F_DIVERT_COMPAT: + printf("IP_FW_F_DIVERT_COMPAT\n"); + break; + case IP_FW_F_TEE_COMPAT: + printf("IP_FW_F_TEE_COMPAT\n"); + break; + case IP_FW_F_FWD_COMPAT: + printf("IP_FW_F_FWD_COMPAT\n"); + break; + case IP_FW_F_DENY_COMPAT: + printf("IP_FW_F_DENY_COMPAT\n"); + break; + case IP_FW_F_REJECT_COMPAT: + printf("IP_FW_F_REJECT_COMPAT\n"); + break; + case IP_FW_F_CHECK_S_COMPAT: + printf("IP_FW_F_CHECK_S_COMPAT\n"); + break; + default: + printf("No action given\n"); + break; } /* print commands */ @@ -246,21 +246,21 @@ static void print_fw_version(u_int32_t api_version) { switch (api_version) { - case IP_FW_VERSION_0: - printf("Version: %s\n", VERSION_ZERO_STR); - break; - case IP_FW_VERSION_1: - printf("Version: %s\n", VERSION_ONE_STR); - break; - case IP_FW_CURRENT_API_VERSION: - printf("Version: %s\n", CURRENT_API_VERSION_STR); - break; - case IP_FW_VERSION_NONE: - printf("Version: %s\n", NO_VERSION_STR); - break; - default: - printf("Unrecognized version\n"); - break; + case IP_FW_VERSION_0: + printf("Version: %s\n", VERSION_ZERO_STR); + break; + case IP_FW_VERSION_1: + printf("Version: %s\n", VERSION_ONE_STR); + break; + case IP_FW_CURRENT_API_VERSION: + printf("Version: %s\n", CURRENT_API_VERSION_STR); + break; + case IP_FW_VERSION_NONE: + printf("Version: %s\n", NO_VERSION_STR); + break; + default: + printf("Unrecognized version\n"); + break; } } @@ -268,12 +268,13 @@ static void print_icmptypes(ipfw_insn_u32 *cmd) { int i; - char sep= ' '; + char sep = ' '; printf(" icmptypes"); for (i = 0; i < 32; i++) { - if ( (cmd->d[0] & (1 << (i))) == 0) + if ((cmd->d[0] & (1 << (i))) == 0) { continue; + } printf("%c%d", sep, i); sep = ','; } @@ -297,7 +298,7 @@ print_flags(char const *name, ipfw_insn *cmd, struct _s_x *list) } printf(" %s ", name); - for (i=0; list[i].x != 0; i++) { + for (i = 0; list[i].x != 0; i++) { if (set & list[i].x) { set &= ~list[i].x; printf("%s%s", comma, list[i].s); @@ -316,12 +317,16 @@ contigmask(uint8_t *p, int len) { int i, n; - for (i=0; i 0; len--, a += 2) { - int mb = /* mask length */ - (cmd->o.opcode == O_IP_SRC || cmd->o.opcode == O_IP_DST) ? - 32 : contigmask((uint8_t *)&(a[1]), 32); - if (mb == 0) { /* any */ - printf("any"); - } else { /* numeric IP followed by some kind of mask */ - printf("%s", inet_ntop(AF_INET, &a[0], ipv4str, sizeof(ipv4str))); - if (mb < 0) - printf(":%s", inet_ntop(AF_INET, &a[1], ipv4str, sizeof(ipv4str))); - else if (mb < 32) - printf("/%d", mb); - } - if (len > 1) - printf(","); - } + for (len = len / 2; len > 0; len--, a += 2) { + int mb = /* mask length */ + (cmd->o.opcode == O_IP_SRC || cmd->o.opcode == O_IP_DST) ? + 32 : contigmask((uint8_t *)&(a[1]), 32); + if (mb == 0) { /* any */ + printf("any"); + } else { /* numeric IP followed by some kind of mask */ + printf("%s", inet_ntop(AF_INET, &a[0], ipv4str, sizeof(ipv4str))); + if (mb < 0) { + printf(":%s", inet_ntop(AF_INET, &a[1], ipv4str, sizeof(ipv4str))); + } else if (mb < 32) { + printf("/%d", mb); + } + } + if (len > 1) { + printf(","); + } + } } /* @@ -373,17 +380,18 @@ print_mac(uint8_t *addr, uint8_t *mask) { int l = contigmask(mask, 48); - if (l == 0) + if (l == 0) { printf(" any"); - else { + } else { printf(" %02x:%02x:%02x:%02x:%02x:%02x", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); - if (l == -1) + if (l == -1) { printf("&%02x:%02x:%02x:%02x:%02x:%02x", mask[0], mask[1], mask[2], mask[3], mask[4], mask[5]); - else if (l < 48) + } else if (l < 48) { printf("/%d", l); + } } } @@ -393,11 +401,11 @@ print_mac(uint8_t *addr, uint8_t *mask) static void ipfw_print_vers2_struct(struct ip_fw *vers2_rule) { - int l; - ipfw_insn *cmd; - ipfw_insn_log *logptr = NULL; - char ipv4str[MAX_IPv4_STR_LEN]; - + int l; + ipfw_insn *cmd; + ipfw_insn_log *logptr = NULL; + char ipv4str[MAX_IPv4_STR_LEN]; + print_fw_version(vers2_rule->version); printf("act_ofs: %d\n", vers2_rule->act_ofs); @@ -407,358 +415,366 @@ ipfw_print_vers2_struct(struct ip_fw *vers2_rule) printf("pcnt: %llu\n", vers2_rule->pcnt); printf("bcnt: %llu\n", vers2_rule->bcnt); printf("timestamp: %d\n", vers2_rule->timestamp); - + /* * first print actions */ for (l = vers2_rule->cmd_len - vers2_rule->act_ofs, cmd = ACTION_PTR(vers2_rule); - l > 0 ; l -= F_LEN(cmd), cmd += F_LEN(cmd)) { - switch(cmd->opcode) { - case O_CHECK_STATE: - printf("check-state"); - break; - - case O_ACCEPT: - printf("allow"); - break; - - case O_COUNT: - printf("count"); - break; - - case O_DENY: - printf("deny"); - break; - - case O_REJECT: - if (cmd->arg1 == ICMP_REJECT_RST) - printf("reset"); - else if (cmd->arg1 == ICMP_UNREACH_HOST) - printf("reject"); - else - printf("unreach %u", cmd->arg1); - break; - - case O_SKIPTO: - printf("skipto %u", cmd->arg1); - break; - - case O_PIPE: - printf("pipe %u", cmd->arg1); - break; - - case O_QUEUE: - printf("queue %u", cmd->arg1); - break; - - case O_DIVERT: - printf("divert %u", cmd->arg1); - break; - - case O_TEE: - printf("tee %u", cmd->arg1); - break; - - case O_FORWARD_IP: - { - ipfw_insn_sa *s = (ipfw_insn_sa *)cmd; - - printf("fwd %s", - inet_ntop(AF_INET, &s->sa.sin_addr, ipv4str, - sizeof(ipv4str))); - if (s->sa.sin_port) - printf(",%d", s->sa.sin_port); - break; + l > 0; l -= F_LEN(cmd), cmd += F_LEN(cmd)) { + switch (cmd->opcode) { + case O_CHECK_STATE: + printf("check-state"); + break; + + case O_ACCEPT: + printf("allow"); + break; + + case O_COUNT: + printf("count"); + break; + + case O_DENY: + printf("deny"); + break; + + case O_REJECT: + if (cmd->arg1 == ICMP_REJECT_RST) { + printf("reset"); + } else if (cmd->arg1 == ICMP_UNREACH_HOST) { + printf("reject"); + } else { + printf("unreach %u", cmd->arg1); } - - case O_LOG: /* O_LOG is printed last */ - logptr = (ipfw_insn_log *)cmd; - break; - - default: - printf("** unrecognized action %d len %d", - cmd->opcode, cmd->len); + break; + + case O_SKIPTO: + printf("skipto %u", cmd->arg1); + break; + + case O_PIPE: + printf("pipe %u", cmd->arg1); + break; + + case O_QUEUE: + printf("queue %u", cmd->arg1); + break; + + case O_DIVERT: + printf("divert %u", cmd->arg1); + break; + + case O_TEE: + printf("tee %u", cmd->arg1); + break; + + case O_FORWARD_IP: + { + ipfw_insn_sa *s = (ipfw_insn_sa *)cmd; + + printf("fwd %s", + inet_ntop(AF_INET, &s->sa.sin_addr, ipv4str, + sizeof(ipv4str))); + if (s->sa.sin_port) { + printf(",%d", s->sa.sin_port); + } + break; + } + + case O_LOG: /* O_LOG is printed last */ + logptr = (ipfw_insn_log *)cmd; + break; + + default: + printf("** unrecognized action %d len %d", + cmd->opcode, cmd->len); } } if (logptr) { - if (logptr->max_log > 0) + if (logptr->max_log > 0) { printf(" log logamount %d", logptr->max_log); - else + } else { printf(" log"); + } } /* * then print the body. */ - for (l = vers2_rule->act_ofs, cmd = vers2_rule->cmd ; - l > 0 ; l -= F_LEN(cmd) , cmd += F_LEN(cmd)) { + for (l = vers2_rule->act_ofs, cmd = vers2_rule->cmd; + l > 0; l -= F_LEN(cmd), cmd += F_LEN(cmd)) { /* useful alias */ ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; - switch(cmd->opcode) { - case O_PROB: - break; /* done already */ - - case O_PROBE_STATE: - break; /* no need to print anything here */ - - case O_MACADDR2: - { - ipfw_insn_mac *m = (ipfw_insn_mac *)cmd; - - if (cmd->len & F_NOT) - printf(" not"); - printf(" MAC"); - print_mac(m->addr, m->mask); - print_mac(m->addr + 6, m->mask + 6); - printf("\n"); - break; + switch (cmd->opcode) { + case O_PROB: + break; /* done already */ + + case O_PROBE_STATE: + break; /* no need to print anything here */ + + case O_MACADDR2: + { + ipfw_insn_mac *m = (ipfw_insn_mac *)cmd; + + if (cmd->len & F_NOT) { + printf(" not"); } - case O_MAC_TYPE: - { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); + printf(" MAC"); + print_mac(m->addr, m->mask); + print_mac(m->addr + 6, m->mask + 6); + printf("\n"); + break; + } + case O_MAC_TYPE: + { + uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; + int i; + + for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { + printf("0x%04x", p[0]); + if (p[0] != p[1]) { + printf("-"); + printf("0x%04x", p[1]); } - break; + printf(","); + } + break; + } + case O_IP_SRC: + case O_IP_SRC_MASK: + case O_IP_SRC_ME: + print_ip((ipfw_insn_ip *)cmd); + break; + + case O_IP_DST: + case O_IP_DST_MASK: + case O_IP_DST_ME: + print_ip((ipfw_insn_ip *)cmd); + break; + + case O_IP_DSTPORT: + case O_IP_SRCPORT: + { + uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; + int i; + + for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { + printf("0x%04x", p[0]); + if (p[0] != p[1]) { + printf("-"); + printf("0x%04x", p[1]); + } + printf(","); + } + break; + } + case O_PROTO: + { + printf("O_PROTO"); + + if (cmd->len & F_NOT) { + printf(" not"); + } + + printf(" %u", cmd->arg1); + + break; + } + + default: /*options ... */ + { + if (cmd->len & F_NOT && cmd->opcode != O_IN) { + printf(" not"); } - case O_IP_SRC: - case O_IP_SRC_MASK: - case O_IP_SRC_ME: - print_ip((ipfw_insn_ip *)cmd); + switch (cmd->opcode) { + case O_FRAG: + printf("O_FRAG"); break; - - case O_IP_DST: - case O_IP_DST_MASK: - case O_IP_DST_ME: - print_ip((ipfw_insn_ip *)cmd); + + case O_IN: + printf(cmd->len & F_NOT ? " out" : " O_IN"); break; - - case O_IP_DSTPORT: - case O_IP_SRCPORT: - { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } + + case O_LAYER2: + printf(" O_LAYER2"); break; - } - case O_PROTO: + case O_XMIT: + case O_RECV: + case O_VIA: { - printf("O_PROTO"); - - if (cmd->len & F_NOT) - printf(" not"); - - printf(" %u", cmd->arg1); - - break; + char const *s; + ipfw_insn_if *cmdif = (ipfw_insn_if *)cmd; + + if (cmd->opcode == O_XMIT) { + s = "O_XMIT"; + } else if (cmd->opcode == O_RECV) { + s = "O_RECV"; + } else { /* if (cmd->opcode == O_VIA) */ + s = "O_VIA"; + } + if (cmdif->name[0] == '\0') { + printf(" %s %s", s, + inet_ntop(AF_INET, &cmdif->p.ip, ipv4str, + sizeof(ipv4str))); + } else if (cmdif->p.unit == -1) { + printf(" %s %s*", s, cmdif->name); + } else { + printf(" %s %s%d", s, cmdif->name, + cmdif->p.unit); + } } - - default: /*options ... */ - { - if (cmd->len & F_NOT && cmd->opcode != O_IN) - printf(" not"); - switch(cmd->opcode) { - case O_FRAG: - printf("O_FRAG"); - break; - - case O_IN: - printf(cmd->len & F_NOT ? " out" : " O_IN"); - break; - - case O_LAYER2: - printf(" O_LAYER2"); - break; - case O_XMIT: - case O_RECV: - case O_VIA: - { - char const *s; - ipfw_insn_if *cmdif = (ipfw_insn_if *)cmd; - - if (cmd->opcode == O_XMIT) - s = "O_XMIT"; - else if (cmd->opcode == O_RECV) - s = "O_RECV"; - else /* if (cmd->opcode == O_VIA) */ - s = "O_VIA"; - if (cmdif->name[0] == '\0') { - printf(" %s %s", s, - inet_ntop(AF_INET, &cmdif->p.ip, ipv4str, - sizeof(ipv4str))); + break; + + case O_IPID: + if (F_LEN(cmd) == 1) { + printf(" ipid %u", cmd->arg1 ); + } else { + uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; + int i; + + for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { + printf("0x%04x", p[0]); + if (p[0] != p[1]) { + printf("-"); + printf("0x%04x", p[1]); } - else if (cmdif->p.unit == -1) - printf(" %s %s*", s, cmdif->name); - else - printf(" %s %s%d", s, cmdif->name, - cmdif->p.unit); + printf(","); } - break; - - case O_IPID: - if (F_LEN(cmd) == 1) - printf(" ipid %u", cmd->arg1 ); - else { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } - } - - break; - - case O_IPTTL: - if (F_LEN(cmd) == 1) - printf(" ipttl %u", cmd->arg1 ); - else { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } + } + + break; + + case O_IPTTL: + if (F_LEN(cmd) == 1) { + printf(" ipttl %u", cmd->arg1 ); + } else { + uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; + int i; + + for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { + printf("0x%04x", p[0]); + if (p[0] != p[1]) { + printf("-"); + printf("0x%04x", p[1]); } - - break; - - case O_IPVER: - printf(" ipver %u", cmd->arg1 ); - break; - - case O_IPPRECEDENCE: - printf(" ipprecedence %u", (cmd->arg1) >> 5 ); - break; - - case O_IPLEN: - if (F_LEN(cmd) == 1) - printf(" iplen %u", cmd->arg1 ); - else { - uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; - int i; - - for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { - printf("0x%04x", p[0]); - if (p[0] != p[1]) { - printf("-"); - printf("0x%04x", p[1]); - } - printf(","); - } + printf(","); + } + } + + break; + + case O_IPVER: + printf(" ipver %u", cmd->arg1 ); + break; + + case O_IPPRECEDENCE: + printf(" ipprecedence %u", (cmd->arg1) >> 5 ); + break; + + case O_IPLEN: + if (F_LEN(cmd) == 1) { + printf(" iplen %u", cmd->arg1 ); + } else { + uint16_t *p = ((ipfw_insn_u16 *)cmd)->ports; + int i; + + for (i = F_LEN((ipfw_insn *)cmd) - 1; i > 0; i--, p += 2) { + printf("0x%04x", p[0]); + if (p[0] != p[1]) { + printf("-"); + printf("0x%04x", p[1]); } - - break; - - case O_IPOPT: - print_flags("ipoptions", cmd, f_ipopts); - break; - - case O_IPTOS: - print_flags("iptos", cmd, f_iptos); - break; - - case O_ICMPTYPE: - print_icmptypes((ipfw_insn_u32 *)cmd); - break; - - case O_ESTAB: - printf(" established"); - break; - - case O_TCPFLAGS: - print_flags("tcpflags", cmd, f_tcpflags); - break; - - case O_TCPOPTS: - print_flags("tcpoptions", cmd, f_tcpopts); - break; - - case O_TCPWIN: - printf(" tcpwin %d", ntohs(cmd->arg1)); - break; - - case O_TCPACK: - printf(" tcpack %u", ntohl(cmd32->d[0])); - break; - - case O_TCPSEQ: - printf(" tcpseq %u", ntohl(cmd32->d[0])); - break; - - case O_UID: - printf(" uid %u", cmd32->d[0]); - break; - - case O_GID: - printf(" gid %u", cmd32->d[0]); - break; - - case O_VERREVPATH: - printf(" verrevpath"); - break; - - case O_IPSEC: - printf(" ipsec"); - break; - - case O_NOP: - break; - - case O_KEEP_STATE: - printf(" keep-state"); - break; - - case O_LIMIT: - { - struct _s_x *p = limit_masks; - ipfw_insn_limit *c = (ipfw_insn_limit *)cmd; - uint8_t x = c->limit_mask; - char const *comma = " "; - - printf(" limit"); - for (; p->x != 0 ; p++) - if ((x & p->x) == p->x) { - x &= ~p->x; - printf("%s%s", comma, p->s); - comma = ","; - } - printf(" %d", c->conn_limit); - - break; + printf(","); + } + } + + break; + + case O_IPOPT: + print_flags("ipoptions", cmd, f_ipopts); + break; + + case O_IPTOS: + print_flags("iptos", cmd, f_iptos); + break; + + case O_ICMPTYPE: + print_icmptypes((ipfw_insn_u32 *)cmd); + break; + + case O_ESTAB: + printf(" established"); + break; + + case O_TCPFLAGS: + print_flags("tcpflags", cmd, f_tcpflags); + break; + + case O_TCPOPTS: + print_flags("tcpoptions", cmd, f_tcpopts); + break; + + case O_TCPWIN: + printf(" tcpwin %d", ntohs(cmd->arg1)); + break; + + case O_TCPACK: + printf(" tcpack %u", ntohl(cmd32->d[0])); + break; + + case O_TCPSEQ: + printf(" tcpseq %u", ntohl(cmd32->d[0])); + break; + + case O_UID: + printf(" uid %u", cmd32->d[0]); + break; + + case O_GID: + printf(" gid %u", cmd32->d[0]); + break; + + case O_VERREVPATH: + printf(" verrevpath"); + break; + + case O_IPSEC: + printf(" ipsec"); + break; + + case O_NOP: + break; + + case O_KEEP_STATE: + printf(" keep-state"); + break; + + case O_LIMIT: + { + struct _s_x *p = limit_masks; + ipfw_insn_limit *c = (ipfw_insn_limit *)cmd; + uint8_t x = c->limit_mask; + char const *comma = " "; + + printf(" limit"); + for (; p->x != 0; p++) { + if ((x & p->x) == p->x) { + x &= ~p->x; + printf("%s%s", comma, p->s); + comma = ","; } - - default: - printf(" [opcode %d len %d]", - cmd->opcode, cmd->len); - } /* switch */ - } /* default */ + } + printf(" %d", c->conn_limit); + + break; + } + + default: + printf(" [opcode %d len %d]", + cmd->opcode, cmd->len); + } /* switch */ + } /* default */ } /* switch */ } /* for */ } @@ -795,29 +811,36 @@ fill_cmd(ipfw_insn *cmd, enum ipfw_opcodes opcode, uint16_t arg) static u_int32_t -fill_compat_tcpflags(u_int32_t flags) { - u_int32_t flags_compat = 0; - - if (flags & TH_FIN) +fill_compat_tcpflags(u_int32_t flags) +{ + u_int32_t flags_compat = 0; + + if (flags & TH_FIN) { flags_compat |= IP_FW_TCPF_FIN_COMPAT; - if (flags & TH_SYN) + } + if (flags & TH_SYN) { flags_compat |= IP_FW_TCPF_SYN_COMPAT; - if (flags & TH_RST) + } + if (flags & TH_RST) { flags_compat |= IP_FW_TCPF_RST_COMPAT; - if (flags & TH_PUSH) + } + if (flags & TH_PUSH) { flags_compat |= IP_FW_TCPF_PSH_COMPAT; - if (flags & TH_ACK) + } + if (flags & TH_ACK) { flags_compat |= IP_FW_TCPF_ACK_COMPAT; - if (flags & TH_URG) + } + if (flags & TH_URG) { flags_compat |= IP_FW_TCPF_URG_COMPAT; - + } + return flags_compat; } /* ******************************************** - * *********** Convert from Latest ************ - * ********************************************/ +* *********** Convert from Latest ************ +* ********************************************/ /* * Things we're actively ignoring: @@ -826,279 +849,275 @@ fill_compat_tcpflags(u_int32_t flags) { static void ipfw_map_from_cmds_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *compat_rule) { - int l; - ipfw_insn *cmd; + int l; + ipfw_insn *cmd; - for (l = curr_rule->act_ofs, cmd = curr_rule->cmd ; - l > 0 ; - l -= F_LEN(cmd) , cmd += F_LEN(cmd)) { + for (l = curr_rule->act_ofs, cmd = curr_rule->cmd; + l > 0; + l -= F_LEN(cmd), cmd += F_LEN(cmd)) { /* useful alias */ ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; switch (cmd->opcode) { - case O_PROTO: - /* protocol */ - compat_rule->fw_prot = cmd->arg1; - break; - - case O_IP_SRC_ME: - compat_rule->fw_flg |= IP_FW_F_SME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRC_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_src = ip->addr; - compat_rule->fw_smsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - } - - case O_IP_SRC: - /* one IP */ - /* source - - * for now we only deal with one address - * per rule and ignore sets of addresses - */ - compat_rule->fw_src.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRCPORT: - { - /* source ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j; - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1, j = 0; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_SRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNSRCP_COMPAT(compat_rule, j); - - break; - } + case O_PROTO: + /* protocol */ + compat_rule->fw_prot = cmd->arg1; + break; - case O_IP_DST_ME: - /* destination */ - compat_rule->fw_flg |= IP_FW_F_DME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; + case O_IP_SRC_ME: + compat_rule->fw_flg |= IP_FW_F_SME_COMPAT; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; + } + break; - case O_IP_DST_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_dst = ip->addr; - compat_rule->fw_dmsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; + case O_IP_SRC_MASK: + { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + compat_rule->fw_src = ip->addr; + compat_rule->fw_smsk = ip->mask; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; } - case O_IP_DST: - /* one IP */ - /* dest - - * for now we only deal with one address - * per rule, and ignore sets of addresses - */ - compat_rule->fw_dst.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - - case O_IP_DSTPORT: - { - /* dest. ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, - j = IP_FW_GETNSRCP_COMPAT(compat_rule); - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_DRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNDSTP_COMPAT(compat_rule, (j - IP_FW_GETNSRCP_COMPAT(compat_rule))); - - break; + break; + } + + case O_IP_SRC: + /* one IP */ + /* source - + * for now we only deal with one address + * per rule and ignore sets of addresses + */ + compat_rule->fw_src.s_addr = cmd32->d[0]; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; } - - case O_LOG: - { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - compat_rule->fw_flg |= IP_FW_F_PRN_COMPAT; - compat_rule->fw_logamount = c->max_log; - break; - } - case O_UID: - compat_rule->fw_flg |= IP_FW_F_UID_COMPAT; - compat_rule->fw_uid = cmd32->d[0]; - break; - - case O_IN: - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_OUT_COMPAT; - } else { - compat_rule->fw_flg |= IP_FW_F_IN_COMPAT; - } - break; - - case O_KEEP_STATE: - compat_rule->fw_flg |= IP_FW_F_KEEP_S_COMPAT; - break; + break; - case O_LAYER2: - compat_rule->fw_flg |= IP_FW_BRIDGED_COMPAT; - break; - - case O_XMIT: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } - else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; + case O_IP_SRCPORT: + { + /* source ports */ + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i, j; + + /* copy list of ports */ + for (i = F_LEN(cmd) - 1, j = 0; i > 0; i--, j++, p += 2) { + if (p[0] != p[1]) { + /* this is a range */ + compat_rule->fw_flg |= IP_FW_F_SRNG_COMPAT; + compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; + compat_rule->fw_uar_compat.fw_pts[j] = p[1]; } else { - compat_rule->fw_flg |= IP_FW_F_OIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; + compat_rule->fw_uar_compat.fw_pts[j] = p[0]; } - compat_rule->fw_out_if = ifu; - - break; } - - case O_RECV: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } - else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; - } else { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } - compat_rule->fw_in_if = ifu; - - break; + IP_FW_SETNSRCP_COMPAT(compat_rule, j); + + break; + } + + case O_IP_DST_ME: + /* destination */ + compat_rule->fw_flg |= IP_FW_F_DME_COMPAT; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; } - - case O_VIA: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - ifu.fu_via_ip.s_addr = 0; - } - else if (ifcmd->name[0] != '\0') { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; + break; + + case O_IP_DST_MASK: + { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + compat_rule->fw_dst = ip->addr; + compat_rule->fw_dmsk = ip->mask; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; + } + break; + } + case O_IP_DST: + /* one IP */ + /* dest - + * for now we only deal with one address + * per rule, and ignore sets of addresses + */ + compat_rule->fw_dst.s_addr = cmd32->d[0]; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; + } + break; + + case O_IP_DSTPORT: + { + /* dest. ports */ + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i, + j = IP_FW_GETNSRCP_COMPAT(compat_rule); + + /* copy list of ports */ + for (i = F_LEN(cmd) - 1; i > 0; i--, j++, p += 2) { + if (p[0] != p[1]) { + /* this is a range */ + compat_rule->fw_flg |= IP_FW_F_DRNG_COMPAT; + compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; + compat_rule->fw_uar_compat.fw_pts[j] = p[1]; } else { - ifu.fu_via_ip = ifcmd->p.ip; + compat_rule->fw_uar_compat.fw_pts[j] = p[0]; } - compat_rule->fw_flg |= IF_FW_F_VIAHACK_COMPAT; - compat_rule->fw_out_if = compat_rule->fw_in_if = ifu; - - break; } + IP_FW_SETNDSTP_COMPAT(compat_rule, (j - IP_FW_GETNSRCP_COMPAT(compat_rule))); - case O_FRAG: - compat_rule->fw_flg |= IP_FW_F_FRAG_COMPAT; - break; - - case O_IPOPT: - /* IP options */ - compat_rule->fw_ipopt = (cmd->arg1 & 0xff); - compat_rule->fw_ipnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_TCPFLAGS: - /* check for "setup" */ - if ((cmd->arg1 & 0xff) == TH_SYN && - ((cmd->arg1 >> 8) & 0xff) == TH_ACK) { - compat_rule->fw_tcpf = IP_FW_TCPF_SYN_COMPAT; - compat_rule->fw_tcpnf = IP_FW_TCPF_ACK_COMPAT; - } - else { - compat_rule->fw_tcpf = fill_compat_tcpflags(cmd->arg1 & 0xff); - compat_rule->fw_tcpnf = fill_compat_tcpflags((cmd->arg1 >> 8) & 0xff); - } - break; - - case O_TCPOPTS: - /* TCP options */ - compat_rule->fw_tcpopt = (cmd->arg1 & 0xff); - compat_rule->fw_tcpnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_ESTAB: - compat_rule->fw_ipflg |= IP_FW_IF_TCPEST_COMPAT; - break; - - case O_ICMPTYPE: - { - /* ICMP */ - /* XXX: check this */ - int i, type; - - compat_rule->fw_flg |= IP_FW_F_ICMPBIT_COMPAT; - for (i = 0; i < sizeof(uint32_t) ; i++) { - type = cmd32->d[0] & i; - - compat_rule->fw_uar_compat.fw_icmptypes[type / (sizeof(unsigned) * 8)] |= - 1 << (type % (sizeof(unsigned) * 8)); - } - break; + break; + } + + case O_LOG: + { + ipfw_insn_log *c = (ipfw_insn_log *)cmd; + + compat_rule->fw_flg |= IP_FW_F_PRN_COMPAT; + compat_rule->fw_logamount = c->max_log; + break; + } + case O_UID: + compat_rule->fw_flg |= IP_FW_F_UID_COMPAT; + compat_rule->fw_uid = cmd32->d[0]; + break; + + case O_IN: + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_OUT_COMPAT; + } else { + compat_rule->fw_flg |= IP_FW_F_IN_COMPAT; } - default: - break; + break; + + case O_KEEP_STATE: + compat_rule->fw_flg |= IP_FW_F_KEEP_S_COMPAT; + break; + + case O_LAYER2: + compat_rule->fw_flg |= IP_FW_BRIDGED_COMPAT; + break; + + case O_XMIT: + { + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu; + + if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { + /* any */ + compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; + ifu.fu_via_ip.s_addr = 0; + } else if (ifcmd->p.ip.s_addr != 0) { + compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; + ifu.fu_via_ip = ifcmd->p.ip; + } else { + compat_rule->fw_flg |= IP_FW_F_OIFNAME_COMPAT; + strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); + ifu.fu_via_if_compat.unit = ifcmd->p.unit; + } + compat_rule->fw_out_if = ifu; + + break; + } + + case O_RECV: + { + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu; + + if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { + /* any */ + compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; + ifu.fu_via_ip.s_addr = 0; + } else if (ifcmd->p.ip.s_addr != 0) { + compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; + ifu.fu_via_ip = ifcmd->p.ip; + } else { + compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; + strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); + ifu.fu_via_if_compat.unit = ifcmd->p.unit; + } + compat_rule->fw_in_if = ifu; + + break; + } + + case O_VIA: + { + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu; + + if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { + /* any */ + ifu.fu_via_ip.s_addr = 0; + } else if (ifcmd->name[0] != '\0') { + compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; + strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); + ifu.fu_via_if_compat.unit = ifcmd->p.unit; + } else { + ifu.fu_via_ip = ifcmd->p.ip; + } + compat_rule->fw_flg |= IF_FW_F_VIAHACK_COMPAT; + compat_rule->fw_out_if = compat_rule->fw_in_if = ifu; + + break; + } + + case O_FRAG: + compat_rule->fw_flg |= IP_FW_F_FRAG_COMPAT; + break; + + case O_IPOPT: + /* IP options */ + compat_rule->fw_ipopt = (cmd->arg1 & 0xff); + compat_rule->fw_ipnopt = ((cmd->arg1 >> 8) & 0xff); + break; + + case O_TCPFLAGS: + /* check for "setup" */ + if ((cmd->arg1 & 0xff) == TH_SYN && + ((cmd->arg1 >> 8) & 0xff) == TH_ACK) { + compat_rule->fw_tcpf = IP_FW_TCPF_SYN_COMPAT; + compat_rule->fw_tcpnf = IP_FW_TCPF_ACK_COMPAT; + } else { + compat_rule->fw_tcpf = fill_compat_tcpflags(cmd->arg1 & 0xff); + compat_rule->fw_tcpnf = fill_compat_tcpflags((cmd->arg1 >> 8) & 0xff); + } + break; + + case O_TCPOPTS: + /* TCP options */ + compat_rule->fw_tcpopt = (cmd->arg1 & 0xff); + compat_rule->fw_tcpnopt = ((cmd->arg1 >> 8) & 0xff); + break; + + case O_ESTAB: + compat_rule->fw_ipflg |= IP_FW_IF_TCPEST_COMPAT; + break; + + case O_ICMPTYPE: + { + /* ICMP */ + /* XXX: check this */ + int i, type; + + compat_rule->fw_flg |= IP_FW_F_ICMPBIT_COMPAT; + for (i = 0; i < sizeof(uint32_t); i++) { + type = cmd32->d[0] & i; + + compat_rule->fw_uar_compat.fw_icmptypes[type / (sizeof(unsigned) * 8)] |= + 1 << (type % (sizeof(unsigned) * 8)); + } + break; + } + default: + break; } /* switch */ } /* for */ } @@ -1106,278 +1125,274 @@ ipfw_map_from_cmds_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *compat static void ipfw_map_from_cmds_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *compat_rule) { - int l; - ipfw_insn *cmd; - for (l = curr_rule->act_ofs, cmd = curr_rule->cmd ; - l > 0 ; - l -= F_LEN(cmd) , cmd += F_LEN(cmd)) { + int l; + ipfw_insn *cmd; + for (l = curr_rule->act_ofs, cmd = curr_rule->cmd; + l > 0; + l -= F_LEN(cmd), cmd += F_LEN(cmd)) { /* useful alias */ ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; switch (cmd->opcode) { - case O_PROTO: - /* protocol */ - compat_rule->fw_prot = cmd->arg1; - break; - - case O_IP_SRC_ME: - compat_rule->fw_flg |= IP_FW_F_SME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRC_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_src = ip->addr; - compat_rule->fw_smsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; + case O_PROTO: + /* protocol */ + compat_rule->fw_prot = cmd->arg1; + break; + + case O_IP_SRC_ME: + compat_rule->fw_flg |= IP_FW_F_SME_COMPAT; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; } - - case O_IP_SRC: - /* one IP */ - /* source - - * for now we only deal with one address - * per rule and ignore sets of addresses - */ - compat_rule->fw_src.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; - } - break; - - case O_IP_SRCPORT: - { - /* source ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j; - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1, j = 0; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_SRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } + break; + + case O_IP_SRC_MASK: + { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + compat_rule->fw_src = ip->addr; + compat_rule->fw_smsk = ip->mask; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; + } + break; + } + + case O_IP_SRC: + /* one IP */ + /* source - + * for now we only deal with one address + * per rule and ignore sets of addresses + */ + compat_rule->fw_src.s_addr = cmd32->d[0]; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVSRC_COMPAT; + } + break; + + case O_IP_SRCPORT: + { + /* source ports */ + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i, j; + + /* copy list of ports */ + for (i = F_LEN(cmd) - 1, j = 0; i > 0; i--, j++, p += 2) { + if (p[0] != p[1]) { + /* this is a range */ + compat_rule->fw_flg |= IP_FW_F_SRNG_COMPAT; + compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; + compat_rule->fw_uar_compat.fw_pts[j] = p[1]; + } else { + compat_rule->fw_uar_compat.fw_pts[j] = p[0]; } - IP_FW_SETNSRCP_COMPAT(compat_rule, j); - - break; } + IP_FW_SETNSRCP_COMPAT(compat_rule, j); + + break; + } - case O_IP_DST_ME: + case O_IP_DST_ME: /* destination */ - compat_rule->fw_flg |= IP_FW_F_DME_COMPAT; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; + compat_rule->fw_flg |= IP_FW_F_DME_COMPAT; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; + } + break; - case O_IP_DST_MASK: - { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - compat_rule->fw_dst = ip->addr; - compat_rule->fw_dmsk = ip->mask; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; + case O_IP_DST_MASK: + { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + compat_rule->fw_dst = ip->addr; + compat_rule->fw_dmsk = ip->mask; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; } - case O_IP_DST: - /* one IP */ - /* dest - - * for now we only deal with one address - * per rule, and ignore sets of addresses - */ - compat_rule->fw_dst.s_addr = cmd32->d[0]; - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; - } - break; - - case O_IP_DSTPORT: - { - /* dest. ports */ - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, - j = IP_FW_GETNSRCP_COMPAT(compat_rule); - - /* copy list of ports */ - for (i = F_LEN(cmd) - 1; i > 0; i--, j++, p += 2) { - if (p[0] != p[1]) { - /* this is a range */ - compat_rule->fw_flg |= IP_FW_F_DRNG_COMPAT; - compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; - compat_rule->fw_uar_compat.fw_pts[j] = p[1]; - } else { - compat_rule->fw_uar_compat.fw_pts[j] = p[0]; - } - } - IP_FW_SETNDSTP_COMPAT(compat_rule, (j - IP_FW_GETNSRCP_COMPAT(compat_rule))); - - break; + break; + } + case O_IP_DST: + /* one IP */ + /* dest - + * for now we only deal with one address + * per rule, and ignore sets of addresses + */ + compat_rule->fw_dst.s_addr = cmd32->d[0]; + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_INVDST_COMPAT; } - - case O_LOG: - { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - compat_rule->fw_flg |= IP_FW_F_PRN_COMPAT; - compat_rule->fw_logamount = c->max_log; - break; - } - case O_UID: - compat_rule->fw_flg |= IP_FW_F_UID_COMPAT; - compat_rule->fw_uid = cmd32->d[0]; - break; - - case O_IN: - if (cmd->len & F_NOT) { - compat_rule->fw_flg |= IP_FW_F_OUT_COMPAT; - } else { - compat_rule->fw_flg |= IP_FW_F_IN_COMPAT; - } - break; - - case O_KEEP_STATE: - compat_rule->fw_flg |= IP_FW_F_KEEP_S_COMPAT; - break; + break; - case O_LAYER2: - compat_rule->fw_flg |= IP_FW_BRIDGED_COMPAT; - break; - - case O_XMIT: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } - else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; + case O_IP_DSTPORT: + { + /* dest. ports */ + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i, + j = IP_FW_GETNSRCP_COMPAT(compat_rule); + + /* copy list of ports */ + for (i = F_LEN(cmd) - 1; i > 0; i--, j++, p += 2) { + if (p[0] != p[1]) { + /* this is a range */ + compat_rule->fw_flg |= IP_FW_F_DRNG_COMPAT; + compat_rule->fw_uar_compat.fw_pts[j++] = p[0]; + compat_rule->fw_uar_compat.fw_pts[j] = p[1]; } else { - compat_rule->fw_flg |= IP_FW_F_OIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; + compat_rule->fw_uar_compat.fw_pts[j] = p[0]; } - compat_rule->fw_out_if = ifu; - - break; } - - case O_RECV: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip.s_addr = 0; - } - else if (ifcmd->p.ip.s_addr != 0) { - compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; - ifu.fu_via_ip = ifcmd->p.ip; - } else { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } - compat_rule->fw_in_if = ifu; - - break; + IP_FW_SETNDSTP_COMPAT(compat_rule, (j - IP_FW_GETNSRCP_COMPAT(compat_rule))); + + break; + } + + case O_LOG: + { + ipfw_insn_log *c = (ipfw_insn_log *)cmd; + + compat_rule->fw_flg |= IP_FW_F_PRN_COMPAT; + compat_rule->fw_logamount = c->max_log; + break; + } + case O_UID: + compat_rule->fw_flg |= IP_FW_F_UID_COMPAT; + compat_rule->fw_uid = cmd32->d[0]; + break; + + case O_IN: + if (cmd->len & F_NOT) { + compat_rule->fw_flg |= IP_FW_F_OUT_COMPAT; + } else { + compat_rule->fw_flg |= IP_FW_F_IN_COMPAT; } - - case O_VIA: - { - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu; - - if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { - /* any */ - ifu.fu_via_ip.s_addr = 0; - } - else if (ifcmd->name[0] != '\0') { - compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; - strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); - ifu.fu_via_if_compat.unit = ifcmd->p.unit; - } else { - ifu.fu_via_ip = ifcmd->p.ip; - } - compat_rule->fw_flg |= IF_FW_F_VIAHACK_COMPAT; - compat_rule->fw_out_if = compat_rule->fw_in_if = ifu; - - break; + break; + + case O_KEEP_STATE: + compat_rule->fw_flg |= IP_FW_F_KEEP_S_COMPAT; + break; + + case O_LAYER2: + compat_rule->fw_flg |= IP_FW_BRIDGED_COMPAT; + break; + + case O_XMIT: + { + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu; + + if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { + /* any */ + compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; + ifu.fu_via_ip.s_addr = 0; + } else if (ifcmd->p.ip.s_addr != 0) { + compat_rule->fw_flg |= IP_FW_F_OIFACE_COMPAT; + ifu.fu_via_ip = ifcmd->p.ip; + } else { + compat_rule->fw_flg |= IP_FW_F_OIFNAME_COMPAT; + strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); + ifu.fu_via_if_compat.unit = ifcmd->p.unit; } + compat_rule->fw_out_if = ifu; - case O_FRAG: - compat_rule->fw_flg |= IP_FW_F_FRAG_COMPAT; - break; - - case O_IPOPT: - /* IP options */ - compat_rule->fw_ipopt = (cmd->arg1 & 0xff); - compat_rule->fw_ipnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_TCPFLAGS: - /* check for "setup" */ - if ((cmd->arg1 & 0xff) == TH_SYN && - ((cmd->arg1 >> 8) & 0xff) == TH_ACK) { - compat_rule->fw_tcpf = IP_FW_TCPF_SYN_COMPAT; - compat_rule->fw_tcpnf = IP_FW_TCPF_ACK_COMPAT; - } - else { - compat_rule->fw_tcpf = fill_compat_tcpflags(cmd->arg1 & 0xff); - compat_rule->fw_tcpnf = fill_compat_tcpflags((cmd->arg1 >> 8) & 0xff); - } - break; - - case O_TCPOPTS: - /* TCP options */ - compat_rule->fw_tcpopt = (cmd->arg1 & 0xff); - compat_rule->fw_tcpnopt = ((cmd->arg1 >> 8) & 0xff); - break; - - case O_ESTAB: - compat_rule->fw_ipflg |= IP_FW_IF_TCPEST_COMPAT; - break; - - case O_ICMPTYPE: - { - /* ICMP */ - /* XXX: check this */ - int i, type; - - compat_rule->fw_flg |= IP_FW_F_ICMPBIT_COMPAT; - for (i = 0; i < sizeof(uint32_t) ; i++) { - type = cmd32->d[0] & i; - - compat_rule->fw_uar_compat.fw_icmptypes[type / (sizeof(unsigned) * 8)] |= - 1 << (type % (sizeof(unsigned) * 8)); - } - break; + break; + } + + case O_RECV: + { + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu; + + if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { + /* any */ + compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; + ifu.fu_via_ip.s_addr = 0; + } else if (ifcmd->p.ip.s_addr != 0) { + compat_rule->fw_flg |= IP_FW_F_IIFACE_COMPAT; + ifu.fu_via_ip = ifcmd->p.ip; + } else { + compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; + strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); + ifu.fu_via_if_compat.unit = ifcmd->p.unit; } - default: - break; + compat_rule->fw_in_if = ifu; + + break; + } + + case O_VIA: + { + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu; + + if ((ifcmd->o.len == 0) && (ifcmd->name[0] == '\0')) { + /* any */ + ifu.fu_via_ip.s_addr = 0; + } else if (ifcmd->name[0] != '\0') { + compat_rule->fw_flg |= IP_FW_F_IIFNAME_COMPAT; + strncpy(ifu.fu_via_if_compat.name, ifcmd->name, sizeof(ifu.fu_via_if_compat.name)); + ifu.fu_via_if_compat.unit = ifcmd->p.unit; + } else { + ifu.fu_via_ip = ifcmd->p.ip; + } + compat_rule->fw_flg |= IF_FW_F_VIAHACK_COMPAT; + compat_rule->fw_out_if = compat_rule->fw_in_if = ifu; + + break; + } + + case O_FRAG: + compat_rule->fw_flg |= IP_FW_F_FRAG_COMPAT; + break; + + case O_IPOPT: + /* IP options */ + compat_rule->fw_ipopt = (cmd->arg1 & 0xff); + compat_rule->fw_ipnopt = ((cmd->arg1 >> 8) & 0xff); + break; + + case O_TCPFLAGS: + /* check for "setup" */ + if ((cmd->arg1 & 0xff) == TH_SYN && + ((cmd->arg1 >> 8) & 0xff) == TH_ACK) { + compat_rule->fw_tcpf = IP_FW_TCPF_SYN_COMPAT; + compat_rule->fw_tcpnf = IP_FW_TCPF_ACK_COMPAT; + } else { + compat_rule->fw_tcpf = fill_compat_tcpflags(cmd->arg1 & 0xff); + compat_rule->fw_tcpnf = fill_compat_tcpflags((cmd->arg1 >> 8) & 0xff); + } + break; + + case O_TCPOPTS: + /* TCP options */ + compat_rule->fw_tcpopt = (cmd->arg1 & 0xff); + compat_rule->fw_tcpnopt = ((cmd->arg1 >> 8) & 0xff); + break; + + case O_ESTAB: + compat_rule->fw_ipflg |= IP_FW_IF_TCPEST_COMPAT; + break; + + case O_ICMPTYPE: + { + /* ICMP */ + /* XXX: check this */ + int i, type; + + compat_rule->fw_flg |= IP_FW_F_ICMPBIT_COMPAT; + for (i = 0; i < sizeof(uint32_t); i++) { + type = cmd32->d[0] & i; + + compat_rule->fw_uar_compat.fw_icmptypes[type / (sizeof(unsigned) * 8)] |= + 1 << (type % (sizeof(unsigned) * 8)); + } + break; + } + default: + break; } /* switch */ } /* for */ } @@ -1386,62 +1401,62 @@ static void ipfw_map_from_actions_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *compat_rule) { int l; - ipfw_insn *cmd; - + ipfw_insn *cmd; + for (l = curr_rule->cmd_len - curr_rule->act_ofs, cmd = ACTION_PTR(curr_rule); - l > 0 ; - l -= F_LEN(cmd), cmd += F_LEN(cmd)) { + l > 0; + l -= F_LEN(cmd), cmd += F_LEN(cmd)) { switch (cmd->opcode) { - case O_ACCEPT: - compat_rule->fw_flg |= IP_FW_F_ACCEPT_COMPAT; - break; - case O_COUNT: - compat_rule->fw_flg |= IP_FW_F_COUNT_COMPAT; - break; - case O_PIPE: - compat_rule->fw_flg |= IP_FW_F_PIPE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_QUEUE: - compat_rule->fw_flg |= IP_FW_F_QUEUE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_SKIPTO: - compat_rule->fw_flg |= IP_FW_F_SKIPTO_COMPAT; - compat_rule->fw_skipto_rule_compat = cmd->arg1; - break; - case O_DIVERT: - compat_rule->fw_flg |= IP_FW_F_DIVERT_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_TEE: - compat_rule->fw_flg |= IP_FW_F_TEE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_FORWARD_IP: - { - ipfw_insn_sa *p = (ipfw_insn_sa *)cmd; - - compat_rule->fw_flg |= IP_FW_F_FWD_COMPAT; - compat_rule->fw_fwd_ip_compat.sin_len = p->sa.sin_len; - compat_rule->fw_fwd_ip_compat.sin_family = p->sa.sin_family; - compat_rule->fw_fwd_ip_compat.sin_port = p->sa.sin_port; - compat_rule->fw_fwd_ip_compat.sin_addr = p->sa.sin_addr; + case O_ACCEPT: + compat_rule->fw_flg |= IP_FW_F_ACCEPT_COMPAT; + break; + case O_COUNT: + compat_rule->fw_flg |= IP_FW_F_COUNT_COMPAT; + break; + case O_PIPE: + compat_rule->fw_flg |= IP_FW_F_PIPE_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_QUEUE: + compat_rule->fw_flg |= IP_FW_F_QUEUE_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_SKIPTO: + compat_rule->fw_flg |= IP_FW_F_SKIPTO_COMPAT; + compat_rule->fw_skipto_rule_compat = cmd->arg1; + break; + case O_DIVERT: + compat_rule->fw_flg |= IP_FW_F_DIVERT_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_TEE: + compat_rule->fw_flg |= IP_FW_F_TEE_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_FORWARD_IP: + { + ipfw_insn_sa *p = (ipfw_insn_sa *)cmd; - break; - } - case O_DENY: - compat_rule->fw_flg |= IP_FW_F_DENY_COMPAT; - break; - case O_REJECT: - compat_rule->fw_flg |= IP_FW_F_REJECT_COMPAT; - compat_rule->fw_reject_code_compat = cmd->arg1; - break; - case O_CHECK_STATE: - compat_rule->fw_flg |= IP_FW_F_CHECK_S_COMPAT; - break; - default: - break; + compat_rule->fw_flg |= IP_FW_F_FWD_COMPAT; + compat_rule->fw_fwd_ip_compat.sin_len = p->sa.sin_len; + compat_rule->fw_fwd_ip_compat.sin_family = p->sa.sin_family; + compat_rule->fw_fwd_ip_compat.sin_port = p->sa.sin_port; + compat_rule->fw_fwd_ip_compat.sin_addr = p->sa.sin_addr; + + break; + } + case O_DENY: + compat_rule->fw_flg |= IP_FW_F_DENY_COMPAT; + break; + case O_REJECT: + compat_rule->fw_flg |= IP_FW_F_REJECT_COMPAT; + compat_rule->fw_reject_code_compat = cmd->arg1; + break; + case O_CHECK_STATE: + compat_rule->fw_flg |= IP_FW_F_CHECK_S_COMPAT; + break; + default: + break; } } } @@ -1450,61 +1465,61 @@ static void ipfw_map_from_actions_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *compat_rule) { int l; - ipfw_insn *cmd; + ipfw_insn *cmd; for (l = curr_rule->cmd_len - curr_rule->act_ofs, cmd = ACTION_PTR(curr_rule); - l > 0 ; - l -= F_LEN(cmd), cmd += F_LEN(cmd)) { + l > 0; + l -= F_LEN(cmd), cmd += F_LEN(cmd)) { switch (cmd->opcode) { - case O_ACCEPT: - compat_rule->fw_flg |= IP_FW_F_ACCEPT_COMPAT; - break; - case O_COUNT: - compat_rule->fw_flg |= IP_FW_F_COUNT_COMPAT; - break; - case O_PIPE: - compat_rule->fw_flg |= IP_FW_F_PIPE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_QUEUE: - compat_rule->fw_flg |= IP_FW_F_QUEUE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_SKIPTO: - compat_rule->fw_flg |= IP_FW_F_SKIPTO_COMPAT; - compat_rule->fw_skipto_rule_compat = cmd->arg1; - break; - case O_DIVERT: - compat_rule->fw_flg |= IP_FW_F_DIVERT_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_TEE: - compat_rule->fw_flg |= IP_FW_F_TEE_COMPAT; - compat_rule->fw_divert_port_compat = cmd->arg1; - break; - case O_FORWARD_IP: - { - ipfw_insn_sa *p = (ipfw_insn_sa *)cmd; - - compat_rule->fw_flg |= IP_FW_F_FWD_COMPAT; - compat_rule->fw_fwd_ip_compat.sin_len = p->sa.sin_len; - compat_rule->fw_fwd_ip_compat.sin_family = p->sa.sin_family; - compat_rule->fw_fwd_ip_compat.sin_port = p->sa.sin_port; - compat_rule->fw_fwd_ip_compat.sin_addr = p->sa.sin_addr; + case O_ACCEPT: + compat_rule->fw_flg |= IP_FW_F_ACCEPT_COMPAT; + break; + case O_COUNT: + compat_rule->fw_flg |= IP_FW_F_COUNT_COMPAT; + break; + case O_PIPE: + compat_rule->fw_flg |= IP_FW_F_PIPE_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_QUEUE: + compat_rule->fw_flg |= IP_FW_F_QUEUE_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_SKIPTO: + compat_rule->fw_flg |= IP_FW_F_SKIPTO_COMPAT; + compat_rule->fw_skipto_rule_compat = cmd->arg1; + break; + case O_DIVERT: + compat_rule->fw_flg |= IP_FW_F_DIVERT_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_TEE: + compat_rule->fw_flg |= IP_FW_F_TEE_COMPAT; + compat_rule->fw_divert_port_compat = cmd->arg1; + break; + case O_FORWARD_IP: + { + ipfw_insn_sa *p = (ipfw_insn_sa *)cmd; - break; - } - case O_DENY: - compat_rule->fw_flg |= IP_FW_F_DENY_COMPAT; - break; - case O_REJECT: - compat_rule->fw_flg |= IP_FW_F_REJECT_COMPAT; - compat_rule->fw_reject_code_compat = cmd->arg1; - break; - case O_CHECK_STATE: - compat_rule->fw_flg |= IP_FW_F_CHECK_S_COMPAT; - break; - default: - break; + compat_rule->fw_flg |= IP_FW_F_FWD_COMPAT; + compat_rule->fw_fwd_ip_compat.sin_len = p->sa.sin_len; + compat_rule->fw_fwd_ip_compat.sin_family = p->sa.sin_family; + compat_rule->fw_fwd_ip_compat.sin_port = p->sa.sin_port; + compat_rule->fw_fwd_ip_compat.sin_addr = p->sa.sin_addr; + + break; + } + case O_DENY: + compat_rule->fw_flg |= IP_FW_F_DENY_COMPAT; + break; + case O_REJECT: + compat_rule->fw_flg |= IP_FW_F_REJECT_COMPAT; + compat_rule->fw_reject_code_compat = cmd->arg1; + break; + case O_CHECK_STATE: + compat_rule->fw_flg |= IP_FW_F_CHECK_S_COMPAT; + break; + default: + break; } } } @@ -1512,24 +1527,25 @@ ipfw_map_from_actions_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *com static void ipfw_version_latest_to_one_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 *rule_vers1) { - if (!rule_vers1) + if (!rule_vers1) { return; - + } + bzero(rule_vers1, sizeof(struct ip_fw_compat_32)); - + rule_vers1->version = IP_FW_VERSION_1; - rule_vers1->context = CAST_DOWN_EXPLICIT(user32_addr_t,curr_rule->context); + rule_vers1->context = CAST_DOWN_EXPLICIT(user32_addr_t, curr_rule->context); rule_vers1->fw_number = curr_rule->rulenum; rule_vers1->fw_pcnt = curr_rule->pcnt; rule_vers1->fw_bcnt = curr_rule->bcnt; rule_vers1->timestamp = curr_rule->timestamp; - + /* convert actions */ ipfw_map_from_actions_32(curr_rule, rule_vers1); /* convert commands */ ipfw_map_from_cmds_32(curr_rule, rule_vers1); - + #if FW2_DEBUG_VERBOSE ipfw_print_vers1_struct_32(rule_vers1); #endif @@ -1538,24 +1554,25 @@ ipfw_version_latest_to_one_32(struct ip_fw_32 *curr_rule, struct ip_fw_compat_32 static void ipfw_version_latest_to_one_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 *rule_vers1) { - if (!rule_vers1) + if (!rule_vers1) { return; - + } + bzero(rule_vers1, sizeof(struct ip_fw_compat_64)); - + rule_vers1->version = IP_FW_VERSION_1; rule_vers1->context = CAST_DOWN_EXPLICIT(__uint64_t, curr_rule->context); rule_vers1->fw_number = curr_rule->rulenum; rule_vers1->fw_pcnt = curr_rule->pcnt; rule_vers1->fw_bcnt = curr_rule->bcnt; rule_vers1->timestamp = curr_rule->timestamp; - + /* convert actions */ ipfw_map_from_actions_64(curr_rule, rule_vers1); /* convert commands */ ipfw_map_from_cmds_64(curr_rule, rule_vers1); - + #if FW2_DEBUG_VERBOSE ipfw_print_vers1_struct_64(rule_vers1); #endif @@ -1565,9 +1582,8 @@ ipfw_version_latest_to_one_64(struct ip_fw_64 *curr_rule, struct ip_fw_compat_64 static void ipfw_version_latest_to_zero(struct ip_fw *curr_rule, struct ip_old_fw *rule_vers0, int is64user) { - - if ( is64user ){ - struct ip_fw_compat_64 rule_vers1; + if (is64user) { + struct ip_fw_compat_64 rule_vers1; ipfw_version_latest_to_one_64((struct ip_fw_64*)curr_rule, &rule_vers1); bzero(rule_vers0, sizeof(struct ip_old_fw)); bcopy(&rule_vers1.fw_uar_compat, &rule_vers0->fw_uar, sizeof(rule_vers1.fw_uar_compat)); @@ -1592,11 +1608,12 @@ ipfw_version_latest_to_zero(struct ip_fw *curr_rule, struct ip_old_fw *rule_vers rule_vers0->pipe_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.pipe_ptr); rule_vers0->next_rule_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.next_rule_ptr); - if (rule_vers1.fw_ipflg & IP_FW_IF_TCPEST_COMPAT) rule_vers0->fw_tcpf |= IP_OLD_FW_TCPF_ESTAB; - } - else { - struct ip_fw_compat_32 rule_vers1; - ipfw_version_latest_to_one_32( (struct ip_fw_32*)curr_rule, &rule_vers1); + if (rule_vers1.fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { + rule_vers0->fw_tcpf |= IP_OLD_FW_TCPF_ESTAB; + } + } else { + struct ip_fw_compat_32 rule_vers1; + ipfw_version_latest_to_one_32((struct ip_fw_32*)curr_rule, &rule_vers1); bzero(rule_vers0, sizeof(struct ip_old_fw)); bcopy(&rule_vers1.fw_uar_compat, &rule_vers0->fw_uar, sizeof(rule_vers1.fw_uar_compat)); bcopy(&rule_vers1.fw_in_if, &rule_vers0->fw_in_if, sizeof(rule_vers1.fw_in_if)); @@ -1620,45 +1637,47 @@ ipfw_version_latest_to_zero(struct ip_fw *curr_rule, struct ip_old_fw *rule_vers rule_vers0->pipe_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.pipe_ptr); rule_vers0->next_rule_ptr = CAST_DOWN_EXPLICIT(void*, rule_vers1.next_rule_ptr); - if (rule_vers1.fw_ipflg & IP_FW_IF_TCPEST_COMPAT) rule_vers0->fw_tcpf |= IP_OLD_FW_TCPF_ESTAB; + if (rule_vers1.fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { + rule_vers0->fw_tcpf |= IP_OLD_FW_TCPF_ESTAB; + } } - } void ipfw_convert_from_latest(struct ip_fw *curr_rule, void *old_rule, u_int32_t api_version, int is64user) { switch (api_version) { - case IP_FW_VERSION_0: - { - struct ip_old_fw *rule_vers0 = old_rule; - - ipfw_version_latest_to_zero(curr_rule, rule_vers0, is64user); - break; - } - case IP_FW_VERSION_1: - { - if ( is64user ) - ipfw_version_latest_to_one_64((struct ip_fw_64*)curr_rule, (struct ip_fw_compat_64 *)old_rule); - else - ipfw_version_latest_to_one_32((struct ip_fw_32*)curr_rule, (struct ip_fw_compat_32 *)old_rule); + case IP_FW_VERSION_0: + { + struct ip_old_fw *rule_vers0 = old_rule; - break; + ipfw_version_latest_to_zero(curr_rule, rule_vers0, is64user); + break; + } + case IP_FW_VERSION_1: + { + if (is64user) { + ipfw_version_latest_to_one_64((struct ip_fw_64*)curr_rule, (struct ip_fw_compat_64 *)old_rule); + } else { + ipfw_version_latest_to_one_32((struct ip_fw_32*)curr_rule, (struct ip_fw_compat_32 *)old_rule); } - case IP_FW_CURRENT_API_VERSION: - /* ipfw2 for now, don't need to do anything */ - break; - - default: - /* unknown version */ - break; + + break; + } + case IP_FW_CURRENT_API_VERSION: + /* ipfw2 for now, don't need to do anything */ + break; + + default: + /* unknown version */ + break; } } /* ******************************************** - * *********** Convert to Latest ************** - * ********************************************/ +* *********** Convert to Latest ************** +* ********************************************/ /* from ip_fw.c */ static int @@ -1666,65 +1685,65 @@ ipfw_check_vers1_struct_32(struct ip_fw_compat_32 *frwl) { /* Check for invalid flag bits */ if ((frwl->fw_flg & ~IP_FW_F_MASK_COMPAT) != 0) { - /* - printf(("%s undefined flag bits set (flags=%x)\n", - err_prefix, frwl->fw_flg)); - */ - return (EINVAL); + /* + * printf(("%s undefined flag bits set (flags=%x)\n", + * err_prefix, frwl->fw_flg)); + */ + return EINVAL; } if (frwl->fw_flg == IP_FW_F_CHECK_S_COMPAT) { /* check-state */ - return 0 ; + return 0; } /* Must apply to incoming or outgoing (or both) */ if (!(frwl->fw_flg & (IP_FW_F_IN_COMPAT | IP_FW_F_OUT_COMPAT))) { /* - printf(("%s neither in nor out\n", err_prefix)); - */ - return (EINVAL); + * printf(("%s neither in nor out\n", err_prefix)); + */ + return EINVAL; } /* Empty interface name is no good */ if (((frwl->fw_flg & IP_FW_F_IIFNAME_COMPAT) - && !*frwl->fw_in_if.fu_via_if_compat.name) + && !*frwl->fw_in_if.fu_via_if_compat.name) || ((frwl->fw_flg & IP_FW_F_OIFNAME_COMPAT) - && !*frwl->fw_out_if.fu_via_if_compat.name)) { + && !*frwl->fw_out_if.fu_via_if_compat.name)) { /* - printf(("%s empty interface name\n", err_prefix)); - */ - return (EINVAL); + * printf(("%s empty interface name\n", err_prefix)); + */ + return EINVAL; } /* Sanity check interface matching */ if ((frwl->fw_flg & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - ; /* allow "via" backwards compatibility */ + ; /* allow "via" backwards compatibility */ } else if ((frwl->fw_flg & IP_FW_F_IN_COMPAT) && (frwl->fw_flg & IP_FW_F_OIFACE_COMPAT)) { /* - printf(("%s outgoing interface check on incoming\n", - err_prefix)); - */ - return (EINVAL); + * printf(("%s outgoing interface check on incoming\n", + * err_prefix)); + */ + return EINVAL; } /* Sanity check port ranges */ if ((frwl->fw_flg & IP_FW_F_SRNG_COMPAT) && IP_FW_GETNSRCP_COMPAT(frwl) < 2) { /* - printf(("%s src range set but n_src_p=%d\n", - err_prefix, IP_FW_GETNSRCP_COMPAT(frwl))); - */ - return (EINVAL); + * printf(("%s src range set but n_src_p=%d\n", + * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl))); + */ + return EINVAL; } if ((frwl->fw_flg & IP_FW_F_DRNG_COMPAT) && IP_FW_GETNDSTP_COMPAT(frwl) < 2) { /* - printf(("%s dst range set but n_dst_p=%d\n", - err_prefix, IP_FW_GETNDSTP_COMPAT(frwl))); - */ - return (EINVAL); + * printf(("%s dst range set but n_dst_p=%d\n", + * err_prefix, IP_FW_GETNDSTP_COMPAT(frwl))); + */ + return EINVAL; } if (IP_FW_GETNSRCP_COMPAT(frwl) + IP_FW_GETNDSTP_COMPAT(frwl) > IP_FW_MAX_PORTS_COMPAT) { /* - printf(("%s too many ports (%d+%d)\n", - err_prefix, IP_FW_GETNSRCP_COMPAT(frwl), IP_FW_GETNDSTP_COMPAT(frwl))); - */ - return (EINVAL); + * printf(("%s too many ports (%d+%d)\n", + * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl), IP_FW_GETNDSTP_COMPAT(frwl))); + */ + return EINVAL; } /* * Protocols other than TCP/UDP don't use port range @@ -1733,64 +1752,63 @@ ipfw_check_vers1_struct_32(struct ip_fw_compat_32 *frwl) (frwl->fw_prot != IPPROTO_UDP) && (IP_FW_GETNSRCP_COMPAT(frwl) || IP_FW_GETNDSTP_COMPAT(frwl))) { /* - printf(("%s port(s) specified for non TCP/UDP rule\n", - err_prefix)); - */ - return (EINVAL); + * printf(("%s port(s) specified for non TCP/UDP rule\n", + * err_prefix)); + */ + return EINVAL; } /* - * Rather than modify the entry to make such entries work, + * Rather than modify the entry to make such entries work, * we reject this rule and require user level utilities * to enforce whatever policy they deem appropriate. */ - if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || - (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { + if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || + (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { /* - printf(("%s rule never matches\n", err_prefix)); - */ - return (EINVAL); + * printf(("%s rule never matches\n", err_prefix)); + */ + return EINVAL; } if ((frwl->fw_flg & IP_FW_F_FRAG_COMPAT) && - (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { + (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { if (frwl->fw_nports) { - /* - printf(("%s cannot mix 'frag' and ports\n", err_prefix)); - */ - return (EINVAL); + /* + * printf(("%s cannot mix 'frag' and ports\n", err_prefix)); + */ + return EINVAL; } if (frwl->fw_prot == IPPROTO_TCP && - frwl->fw_tcpf != frwl->fw_tcpnf) { - /* - printf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); - */ - return (EINVAL); + frwl->fw_tcpf != frwl->fw_tcpnf) { + /* + * printf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); + */ + return EINVAL; } } /* Check command specific stuff */ - switch (frwl->fw_flg & IP_FW_F_COMMAND_COMPAT) - { + switch (frwl->fw_flg & IP_FW_F_COMMAND_COMPAT) { case IP_FW_F_REJECT_COMPAT: if (frwl->fw_reject_code_compat >= 0x100 && !(frwl->fw_prot == IPPROTO_TCP - && frwl->fw_reject_code_compat == IP_FW_REJECT_RST_COMPAT)) { - /* - printf(("%s unknown reject code\n", err_prefix)); - */ - return (EINVAL); + && frwl->fw_reject_code_compat == IP_FW_REJECT_RST_COMPAT)) { + /* + * printf(("%s unknown reject code\n", err_prefix)); + */ + return EINVAL; } break; - case IP_FW_F_DIVERT_COMPAT: /* Diverting to port zero is invalid */ + case IP_FW_F_DIVERT_COMPAT: /* Diverting to port zero is invalid */ case IP_FW_F_TEE_COMPAT: case IP_FW_F_PIPE_COMPAT: /* piping through 0 is invalid */ case IP_FW_F_QUEUE_COMPAT: /* piping through 0 is invalid */ if (frwl->fw_divert_port_compat == 0) { - /* - printf(("%s can't divert to port 0\n", err_prefix)); - */ - return (EINVAL); + /* + * printf(("%s can't divert to port 0\n", err_prefix)); + */ + return EINVAL; } break; case IP_FW_F_DENY_COMPAT: @@ -1802,9 +1820,9 @@ ipfw_check_vers1_struct_32(struct ip_fw_compat_32 *frwl) break; default: /* - printf(("%s invalid command\n", err_prefix)); - */ - return (EINVAL); + * printf(("%s invalid command\n", err_prefix)); + */ + return EINVAL; } return 0; @@ -1815,72 +1833,72 @@ ipfw_check_vers1_struct_64(struct ip_fw_compat_64 *frwl) { /* Check for invalid flag bits */ if ((frwl->fw_flg & ~IP_FW_F_MASK_COMPAT) != 0) { - /* - printf(("%s undefined flag bits set (flags=%x)\n", - err_prefix, frwl->fw_flg)); - */ - - return (EINVAL); + /* + * printf(("%s undefined flag bits set (flags=%x)\n", + * err_prefix, frwl->fw_flg)); + */ + + return EINVAL; } if (frwl->fw_flg == IP_FW_F_CHECK_S_COMPAT) { /* check-state */ - return 0 ; + return 0; } /* Must apply to incoming or outgoing (or both) */ if (!(frwl->fw_flg & (IP_FW_F_IN_COMPAT | IP_FW_F_OUT_COMPAT))) { /* - printf(("%s neither in nor out\n", err_prefix)); - */ - - return (EINVAL); + * printf(("%s neither in nor out\n", err_prefix)); + */ + + return EINVAL; } /* Empty interface name is no good */ if (((frwl->fw_flg & IP_FW_F_IIFNAME_COMPAT) - && !*frwl->fw_in_if.fu_via_if_compat.name) + && !*frwl->fw_in_if.fu_via_if_compat.name) || ((frwl->fw_flg & IP_FW_F_OIFNAME_COMPAT) - && !*frwl->fw_out_if.fu_via_if_compat.name)) { + && !*frwl->fw_out_if.fu_via_if_compat.name)) { /* - printf(("%s empty interface name\n", err_prefix)); - */ - - return (EINVAL); + * printf(("%s empty interface name\n", err_prefix)); + */ + + return EINVAL; } /* Sanity check interface matching */ if ((frwl->fw_flg & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - ; /* allow "via" backwards compatibility */ + ; /* allow "via" backwards compatibility */ } else if ((frwl->fw_flg & IP_FW_F_IN_COMPAT) && (frwl->fw_flg & IP_FW_F_OIFACE_COMPAT)) { /* - printf(("%s outgoing interface check on incoming\n", - err_prefix)); - */ - - return (EINVAL); + * printf(("%s outgoing interface check on incoming\n", + * err_prefix)); + */ + + return EINVAL; } /* Sanity check port ranges */ if ((frwl->fw_flg & IP_FW_F_SRNG_COMPAT) && IP_FW_GETNSRCP_COMPAT(frwl) < 2) { /* - printf(("%s src range set but n_src_p=%d\n", - err_prefix, IP_FW_GETNSRCP_COMPAT(frwl))); - */ - - return (EINVAL); + * printf(("%s src range set but n_src_p=%d\n", + * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl))); + */ + + return EINVAL; } if ((frwl->fw_flg & IP_FW_F_DRNG_COMPAT) && IP_FW_GETNDSTP_COMPAT(frwl) < 2) { /* - printf(("%s dst range set but n_dst_p=%d\n", - err_prefix, IP_FW_GETNDSTP_COMPAT(frwl))); - */ + * printf(("%s dst range set but n_dst_p=%d\n", + * err_prefix, IP_FW_GETNDSTP_COMPAT(frwl))); + */ - return (EINVAL); + return EINVAL; } if (IP_FW_GETNSRCP_COMPAT(frwl) + IP_FW_GETNDSTP_COMPAT(frwl) > IP_FW_MAX_PORTS_COMPAT) { /* - printf(("%s too many ports (%d+%d)\n", - err_prefix, IP_FW_GETNSRCP_COMPAT(frwl), IP_FW_GETNDSTP_COMPAT(frwl))); - */ - - return (EINVAL); + * printf(("%s too many ports (%d+%d)\n", + * err_prefix, IP_FW_GETNSRCP_COMPAT(frwl), IP_FW_GETNDSTP_COMPAT(frwl))); + */ + + return EINVAL; } /* * Protocols other than TCP/UDP don't use port range @@ -1889,70 +1907,69 @@ ipfw_check_vers1_struct_64(struct ip_fw_compat_64 *frwl) (frwl->fw_prot != IPPROTO_UDP) && (IP_FW_GETNSRCP_COMPAT(frwl) || IP_FW_GETNDSTP_COMPAT(frwl))) { /* - printf(("%s port(s) specified for non TCP/UDP rule\n", - err_prefix)); - */ - - return (EINVAL); + * printf(("%s port(s) specified for non TCP/UDP rule\n", + * err_prefix)); + */ + + return EINVAL; } /* - * Rather than modify the entry to make such entries work, + * Rather than modify the entry to make such entries work, * we reject this rule and require user level utilities * to enforce whatever policy they deem appropriate. */ - if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || - (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { + if ((frwl->fw_src.s_addr & (~frwl->fw_smsk.s_addr)) || + (frwl->fw_dst.s_addr & (~frwl->fw_dmsk.s_addr))) { /* - printf(("%s rule never matches\n", err_prefix)); - */ - - return (EINVAL); + * printf(("%s rule never matches\n", err_prefix)); + */ + + return EINVAL; } if ((frwl->fw_flg & IP_FW_F_FRAG_COMPAT) && - (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { + (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { if (frwl->fw_nports) { - /* - printf(("%s cannot mix 'frag' and ports\n", err_prefix)); - */ - - return (EINVAL); + /* + * printf(("%s cannot mix 'frag' and ports\n", err_prefix)); + */ + + return EINVAL; } if (frwl->fw_prot == IPPROTO_TCP && - frwl->fw_tcpf != frwl->fw_tcpnf) { - /* - printf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); - */ - - return (EINVAL); + frwl->fw_tcpf != frwl->fw_tcpnf) { + /* + * printf(("%s cannot mix 'frag' and TCP flags\n", err_prefix)); + */ + + return EINVAL; } } /* Check command specific stuff */ - switch (frwl->fw_flg & IP_FW_F_COMMAND_COMPAT) - { + switch (frwl->fw_flg & IP_FW_F_COMMAND_COMPAT) { case IP_FW_F_REJECT_COMPAT: if (frwl->fw_reject_code_compat >= 0x100 && !(frwl->fw_prot == IPPROTO_TCP - && frwl->fw_reject_code_compat == IP_FW_REJECT_RST_COMPAT)) { - /* - printf(("%s unknown reject code\n", err_prefix)); - */ - - return (EINVAL); + && frwl->fw_reject_code_compat == IP_FW_REJECT_RST_COMPAT)) { + /* + * printf(("%s unknown reject code\n", err_prefix)); + */ + + return EINVAL; } break; - case IP_FW_F_DIVERT_COMPAT: /* Diverting to port zero is invalid */ + case IP_FW_F_DIVERT_COMPAT: /* Diverting to port zero is invalid */ case IP_FW_F_TEE_COMPAT: case IP_FW_F_PIPE_COMPAT: /* piping through 0 is invalid */ case IP_FW_F_QUEUE_COMPAT: /* piping through 0 is invalid */ if (frwl->fw_divert_port_compat == 0) { - /* - printf(("%s can't divert to port 0\n", err_prefix)); - */ - - return (EINVAL); + /* + * printf(("%s can't divert to port 0\n", err_prefix)); + */ + + return EINVAL; } break; case IP_FW_F_DENY_COMPAT: @@ -1964,10 +1981,10 @@ ipfw_check_vers1_struct_64(struct ip_fw_compat_64 *frwl) break; default: /* - printf(("%s invalid command\n", err_prefix)); - */ - - return (EINVAL); + * printf(("%s invalid command\n", err_prefix)); + */ + + return EINVAL; } return 0; @@ -1976,11 +1993,11 @@ ipfw_check_vers1_struct_64(struct ip_fw_compat_64 *frwl) static void ipfw_convert_to_cmds_32(struct ip_fw *curr_rule, struct ip_fw_compat_32 *compat_rule) { - int k; - uint32_t actbuf[255], cmdbuf[255]; - ipfw_insn *action, *cmd, *src, *dst; - ipfw_insn *have_state = NULL; /* track check-state or keep-state */ - + int k; + uint32_t actbuf[255], cmdbuf[255]; + ipfw_insn *action, *cmd, *src, *dst; + ipfw_insn *have_state = NULL; /* track check-state or keep-state */ + if (!compat_rule || !curr_rule) { return; } @@ -1992,23 +2009,22 @@ ipfw_convert_to_cmds_32(struct ip_fw *curr_rule, struct ip_fw_compat_32 *compat_ /* bad rule */ return; } - - bzero(actbuf, sizeof(actbuf)); /* actions go here */ + + bzero(actbuf, sizeof(actbuf)); /* actions go here */ bzero(cmdbuf, sizeof(cmdbuf)); /* fill in action */ action = (ipfw_insn *)actbuf; { - u_int flag = compat_rule->fw_flg; - - action->len = 1; /* default */ - - if (flag & IP_FW_F_CHECK_S_COMPAT) { - have_state = action; - action->opcode = O_CHECK_STATE; - } - else { - switch (flag & IP_FW_F_COMMAND_COMPAT) { + u_int flag = compat_rule->fw_flg; + + action->len = 1; /* default */ + + if (flag & IP_FW_F_CHECK_S_COMPAT) { + have_state = action; + action->opcode = O_CHECK_STATE; + } else { + switch (flag & IP_FW_F_COMMAND_COMPAT) { case IP_FW_F_ACCEPT_COMPAT: action->opcode = O_ACCEPT; break; @@ -2040,15 +2056,15 @@ ipfw_convert_to_cmds_32(struct ip_fw *curr_rule, struct ip_fw_compat_32 *compat_ case IP_FW_F_FWD_COMPAT: { ipfw_insn_sa *p = (ipfw_insn_sa *)action; - + action->opcode = O_FORWARD_IP; action->len = F_INSN_SIZE(ipfw_insn_sa); - + p->sa.sin_len = compat_rule->fw_fwd_ip_compat.sin_len; p->sa.sin_family = compat_rule->fw_fwd_ip_compat.sin_family; p->sa.sin_port = compat_rule->fw_fwd_ip_compat.sin_port; p->sa.sin_addr = compat_rule->fw_fwd_ip_compat.sin_addr; - + break; } case IP_FW_F_DENY_COMPAT: @@ -2062,306 +2078,232 @@ ipfw_convert_to_cmds_32(struct ip_fw *curr_rule, struct ip_fw_compat_32 *compat_ default: action->opcode = O_NOP; break; + } } - } - - /* action is mandatory */ - if (action->opcode == O_NOP) { + + /* action is mandatory */ + if (action->opcode == O_NOP) { return; - } - - action = next_cmd(action); + } + + action = next_cmd(action); } /* end actions */ - + cmd = (ipfw_insn *)cmdbuf; /* this is O_CHECK_STATE, we're done */ if (have_state) { - goto done; + goto done; } { - ipfw_insn *prev = NULL; - u_int flag = compat_rule->fw_flg; - - /* logging */ - if (flag & IP_FW_F_PRN_COMPAT) { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - cmd->opcode = O_LOG; - cmd->len |= F_INSN_SIZE(ipfw_insn_log); - c->max_log = compat_rule->fw_logamount; - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* protocol */ - if (compat_rule->fw_prot != 0) { - fill_cmd(cmd, O_PROTO, compat_rule->fw_prot); - prev = cmd; - cmd = next_cmd(cmd); - } - - /* source */ - if (flag & IP_FW_F_SME_COMPAT) { - cmd->opcode = O_IP_SRC_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_smsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_src; - ip->mask = compat_rule->fw_smsk; - cmd->opcode = O_IP_SRC_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ - } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - if (compat_rule->fw_src.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ - } else { - cmd32->d[0] = compat_rule->fw_src.s_addr; - cmd32->o.opcode = O_IP_SRC; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - } - } - - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ + ipfw_insn *prev = NULL; + u_int flag = compat_rule->fw_flg; + + /* logging */ + if (flag & IP_FW_F_PRN_COMPAT) { + ipfw_insn_log *c = (ipfw_insn_log *)cmd; + + cmd->opcode = O_LOG; + cmd->len |= F_INSN_SIZE(ipfw_insn_log); + c->max_log = compat_rule->fw_logamount; + + prev = cmd; + cmd = next_cmd(cmd); } - if (F_LEN(cmd) != 0) { /* !any */ + /* protocol */ + if (compat_rule->fw_prot != 0) { + fill_cmd(cmd, O_PROTO, compat_rule->fw_prot); prev = cmd; cmd = next_cmd(cmd); } - } - - /* source ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j = 0, - nports = IP_FW_GETNSRCP_COMPAT(compat_rule), - have_range = 0; - - cmd->opcode = O_IP_SRCPORT; - for (i = 0; i < nports; i++) { - if (((flag & IP_FW_F_SRNG_COMPAT) || - (flag & IP_FW_F_SMSK_COMPAT)) && !have_range) { - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + + /* source */ + if (flag & IP_FW_F_SME_COMPAT) { + cmd->opcode = O_IP_SRC_ME; + cmd->len |= F_INSN_SIZE(ipfw_insn); + if (flag & IP_FW_F_INVSRC_COMPAT) { + cmd->len ^= F_NOT; /* toggle F_NOT */ } - p += 2; - j++; - } - - if (j > 0) { - ports->o.len |= j+1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* destination */ - if (flag & IP_FW_F_DME_COMPAT) { - cmd->opcode = O_IP_DST_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_dmsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_dst; - ip->mask = compat_rule->fw_dmsk; - cmd->opcode = O_IP_DST_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ + prev = cmd; + cmd = next_cmd(cmd); } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - if (compat_rule->fw_dst.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ + if (compat_rule->fw_smsk.s_addr != 0) { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + ip->addr = compat_rule->fw_src; + ip->mask = compat_rule->fw_smsk; + cmd->opcode = O_IP_SRC_MASK; + cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ } else { - cmd32->d[0] = compat_rule->fw_dst.s_addr; - cmd32->o.opcode = O_IP_DST; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + /* one IP */ + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + if (compat_rule->fw_src.s_addr == 0) { + /* any */ + cmd32->o.len &= ~F_LEN_MASK; /* zero len */ + } else { + cmd32->d[0] = compat_rule->fw_src.s_addr; + cmd32->o.opcode = O_IP_SRC; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + } + } + + if (flag & IP_FW_F_INVSRC_COMPAT) { + cmd->len ^= F_NOT; /* toggle F_NOT */ + } + + if (F_LEN(cmd) != 0) { /* !any */ + prev = cmd; + cmd = next_cmd(cmd); } } - - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - if (F_LEN(cmd) != 0) { /* !any */ + /* source ports */ + { + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i, j = 0, + nports = IP_FW_GETNSRCP_COMPAT(compat_rule), + have_range = 0; + + cmd->opcode = O_IP_SRCPORT; + for (i = 0; i < nports; i++) { + if (((flag & IP_FW_F_SRNG_COMPAT) || + (flag & IP_FW_F_SMSK_COMPAT)) && !have_range) { + p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; + p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + have_range = 1; + } else { + p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + } + p += 2; + j++; + } + + if (j > 0) { + ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ + } + prev = cmd; cmd = next_cmd(cmd); } - } - - /* dest. ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i = IP_FW_GETNSRCP_COMPAT(compat_rule), - j = 0, - nports = (IP_FW_GETNDSTP_COMPAT(compat_rule) + i), - have_range = 0; - - cmd->opcode = O_IP_DSTPORT; - for (; i < nports; i++, p += 2) { - if (((flag & IP_FW_F_DRNG_COMPAT) || - (flag & IP_FW_F_DMSK_COMPAT)) && !have_range) { - /* range */ - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - } - j++; - } - - if (j > 0) { - ports->o.len |= j+1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_UID_COMPAT) { - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - cmd32->o.opcode = O_UID; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - cmd32->d[0] = compat_rule->fw_uid; - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_KEEP_S_COMPAT) { - have_state = cmd; - fill_cmd(cmd, O_KEEP_STATE, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - if (flag & IP_FW_BRIDGED_COMPAT) { - fill_cmd(cmd, O_LAYER2, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if ((flag & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - /* via */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_VIA; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } - else if (compat_rule->fw_flg & IP_FW_F_IIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (flag & IP_FW_F_IN_COMPAT) { - fill_cmd(cmd, O_IN, 0); - + /* destination */ + if (flag & IP_FW_F_DME_COMPAT) { + cmd->opcode = O_IP_DST_ME; + cmd->len |= F_INSN_SIZE(ipfw_insn); + if (flag & IP_FW_F_INVDST_COMPAT) { + cmd->len ^= F_NOT; /* toggle F_NOT */ + } + prev = cmd; cmd = next_cmd(cmd); - } - if (flag & IP_FW_F_OUT_COMPAT) { - /* if the previous command was O_IN, and this - * is being set as well, it's equivalent to not - * having either command, so let's back up prev - * to the cmd before it and move cmd to prev. - */ - if (prev->opcode == O_IN) { - cmd = prev; - bzero(cmd, sizeof(*cmd)); + } else { + if (compat_rule->fw_dmsk.s_addr != 0) { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + ip->addr = compat_rule->fw_dst; + ip->mask = compat_rule->fw_dmsk; + cmd->opcode = O_IP_DST_MASK; + cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ } else { + /* one IP */ + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + if (compat_rule->fw_dst.s_addr == 0) { + /* any */ + cmd32->o.len &= ~F_LEN_MASK; /* zero len */ + } else { + cmd32->d[0] = compat_rule->fw_dst.s_addr; + cmd32->o.opcode = O_IP_DST; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + } + } + + if (flag & IP_FW_F_INVDST_COMPAT) { cmd->len ^= F_NOT; /* toggle F_NOT */ - fill_cmd(cmd, O_IN, 0); - + } + + if (F_LEN(cmd) != 0) { /* !any */ prev = cmd; cmd = next_cmd(cmd); } } - if (flag & IP_FW_F_OIFACE_COMPAT) { - /* xmit */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_out_if; - - cmd->opcode = O_XMIT; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; + + /* dest. ports */ + { + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i = IP_FW_GETNSRCP_COMPAT(compat_rule), + j = 0, + nports = (IP_FW_GETNDSTP_COMPAT(compat_rule) + i), + have_range = 0; + + cmd->opcode = O_IP_DSTPORT; + for (; i < nports; i++, p += 2) { + if (((flag & IP_FW_F_DRNG_COMPAT) || + (flag & IP_FW_F_DMSK_COMPAT)) && !have_range) { + /* range */ + p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; + p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + have_range = 1; + } else { + p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + } + j++; } - else if (flag & IP_FW_F_OIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; + + if (j > 0) { + ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ } - + + prev = cmd; + cmd = next_cmd(cmd); + } + + if (flag & IP_FW_F_UID_COMPAT) { + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + cmd32->o.opcode = O_UID; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + cmd32->d[0] = compat_rule->fw_uid; + + prev = cmd; + cmd = next_cmd(cmd); + } + + if (flag & IP_FW_F_KEEP_S_COMPAT) { + have_state = cmd; + fill_cmd(cmd, O_KEEP_STATE, 0); + + prev = cmd; + cmd = next_cmd(cmd); + } + if (flag & IP_FW_BRIDGED_COMPAT) { + fill_cmd(cmd, O_LAYER2, 0); + prev = cmd; cmd = next_cmd(cmd); - } - else if (flag & IP_FW_F_IIFACE_COMPAT) { - /* recv */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_RECV; + } + + if ((flag & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { + /* via */ + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu = compat_rule->fw_in_if; + + cmd->opcode = O_VIA; ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - + if (ifu.fu_via_ip.s_addr == 0) { /* "any" */ ifcmd->name[0] = '\0'; ifcmd->o.len = 0; - } - else if (flag & IP_FW_F_IIFNAME_COMPAT) { + } else if (compat_rule->fw_flg & IP_FW_F_IIFNAME_COMPAT) { /* by name */ strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); ifcmd->p.unit = ifu.fu_via_if_compat.unit; @@ -2369,100 +2311,169 @@ ipfw_convert_to_cmds_32(struct ip_fw *curr_rule, struct ip_fw_compat_32 *compat_ /* by addr */ ifcmd->p.ip = ifu.fu_via_ip; } - + + prev = cmd; + cmd = next_cmd(cmd); + } else { + if (flag & IP_FW_F_IN_COMPAT) { + fill_cmd(cmd, O_IN, 0); + + prev = cmd; + cmd = next_cmd(cmd); + } + if (flag & IP_FW_F_OUT_COMPAT) { + /* if the previous command was O_IN, and this + * is being set as well, it's equivalent to not + * having either command, so let's back up prev + * to the cmd before it and move cmd to prev. + */ + if (prev->opcode == O_IN) { + cmd = prev; + bzero(cmd, sizeof(*cmd)); + } else { + cmd->len ^= F_NOT; /* toggle F_NOT */ + fill_cmd(cmd, O_IN, 0); + + prev = cmd; + cmd = next_cmd(cmd); + } + } + if (flag & IP_FW_F_OIFACE_COMPAT) { + /* xmit */ + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu = compat_rule->fw_out_if; + + cmd->opcode = O_XMIT; + ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); + + if (ifu.fu_via_ip.s_addr == 0) { + /* "any" */ + ifcmd->name[0] = '\0'; + ifcmd->o.len = 0; + } else if (flag & IP_FW_F_OIFNAME_COMPAT) { + /* by name */ + strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); + ifcmd->p.unit = ifu.fu_via_if_compat.unit; + } else { + /* by addr */ + ifcmd->p.ip = ifu.fu_via_ip; + } + + prev = cmd; + cmd = next_cmd(cmd); + } else if (flag & IP_FW_F_IIFACE_COMPAT) { + /* recv */ + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu = compat_rule->fw_in_if; + + cmd->opcode = O_RECV; + ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); + + if (ifu.fu_via_ip.s_addr == 0) { + /* "any" */ + ifcmd->name[0] = '\0'; + ifcmd->o.len = 0; + } else if (flag & IP_FW_F_IIFNAME_COMPAT) { + /* by name */ + strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); + ifcmd->p.unit = ifu.fu_via_if_compat.unit; + } else { + /* by addr */ + ifcmd->p.ip = ifu.fu_via_ip; + } + + prev = cmd; + cmd = next_cmd(cmd); + } + } + + if (flag & IP_FW_F_FRAG_COMPAT) { + fill_cmd(cmd, O_FRAG, 0); + prev = cmd; cmd = next_cmd(cmd); } - } - - if (flag & IP_FW_F_FRAG_COMPAT) { - fill_cmd(cmd, O_FRAG, 0); - prev = cmd; - cmd = next_cmd(cmd); - } - - /* IP options */ - if (compat_rule->fw_ipopt != 0 || compat_rule->fw_ipnopt != 0) { - fill_cmd(cmd, O_IPOPT, (compat_rule->fw_ipopt & 0xff) | - (compat_rule->fw_ipnopt & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (compat_rule->fw_prot == IPPROTO_TCP) { - if (compat_rule->fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { - fill_cmd(cmd, O_ESTAB, 0); - + /* IP options */ + if (compat_rule->fw_ipopt != 0 || compat_rule->fw_ipnopt != 0) { + fill_cmd(cmd, O_IPOPT, (compat_rule->fw_ipopt & 0xff) | + (compat_rule->fw_ipnopt & 0xff) << 8); + prev = cmd; cmd = next_cmd(cmd); } - - /* TCP options and flags */ - if (compat_rule->fw_tcpf != 0 || compat_rule->fw_tcpnf != 0) { - if ((compat_rule->fw_tcpf & IP_FW_TCPF_SYN_COMPAT) && - compat_rule->fw_tcpnf & IP_FW_TCPF_ACK_COMPAT) { - fill_cmd(cmd, O_TCPFLAGS, (TH_SYN) | ( (TH_ACK) & 0xff) <<8); - + + if (compat_rule->fw_prot == IPPROTO_TCP) { + if (compat_rule->fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { + fill_cmd(cmd, O_ESTAB, 0); + prev = cmd; cmd = next_cmd(cmd); } - else { - fill_cmd(cmd, O_TCPFLAGS, (compat_rule->fw_tcpf & 0xff) | - (compat_rule->fw_tcpnf & 0xff) << 8); - + + /* TCP options and flags */ + if (compat_rule->fw_tcpf != 0 || compat_rule->fw_tcpnf != 0) { + if ((compat_rule->fw_tcpf & IP_FW_TCPF_SYN_COMPAT) && + compat_rule->fw_tcpnf & IP_FW_TCPF_ACK_COMPAT) { + fill_cmd(cmd, O_TCPFLAGS, (TH_SYN) | ((TH_ACK) & 0xff) << 8); + + prev = cmd; + cmd = next_cmd(cmd); + } else { + fill_cmd(cmd, O_TCPFLAGS, (compat_rule->fw_tcpf & 0xff) | + (compat_rule->fw_tcpnf & 0xff) << 8); + + prev = cmd; + cmd = next_cmd(cmd); + } + } + if (compat_rule->fw_tcpopt != 0 || compat_rule->fw_tcpnopt != 0) { + fill_cmd(cmd, O_TCPOPTS, (compat_rule->fw_tcpopt & 0xff) | + (compat_rule->fw_tcpnopt & 0xff) << 8); + prev = cmd; cmd = next_cmd(cmd); } } - if (compat_rule->fw_tcpopt != 0 || compat_rule->fw_tcpnopt != 0) { - fill_cmd(cmd, O_TCPOPTS, (compat_rule->fw_tcpopt & 0xff) | - (compat_rule->fw_tcpnopt & 0xff) << 8); - + + /* ICMP */ + /* XXX: check this */ + if (flag & IP_FW_F_ICMPBIT_COMPAT) { + int i; + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + cmd32->o.opcode = O_ICMPTYPE; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + + for (i = 0; i < IP_FW_ICMPTYPES_DIM_COMPAT; i++) { + cmd32->d[0] |= compat_rule->fw_uar_compat.fw_icmptypes[i]; + } + prev = cmd; cmd = next_cmd(cmd); } - } - - /* ICMP */ - /* XXX: check this */ - if (flag & IP_FW_F_ICMPBIT_COMPAT) { - int i; - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - cmd32->o.opcode = O_ICMPTYPE; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - - for (i = 0; i < IP_FW_ICMPTYPES_DIM_COMPAT; i++) { - cmd32->d[0] |= compat_rule->fw_uar_compat.fw_icmptypes[i]; - } - - prev = cmd; - cmd = next_cmd(cmd); - } } /* end commands */ - + done: - /* finally, copy everything into the current + /* finally, copy everything into the current * rule buffer in the right order. */ dst = curr_rule->cmd; - + /* first, do match probability */ if (compat_rule->fw_flg & IP_FW_F_RND_MATCH_COMPAT) { dst->opcode = O_PROB; dst->len = 2; - *((int32_t *)(dst+1)) = compat_rule->pipe_ptr; + *((int32_t *)(dst + 1)) = compat_rule->pipe_ptr; dst += dst->len; } - + /* generate O_PROBE_STATE if necessary */ if (have_state && have_state->opcode != O_CHECK_STATE) { fill_cmd(dst, O_PROBE_STATE, 0); dst = next_cmd(dst); } - + /* * copy all commands but O_LOG, O_KEEP_STATE */ @@ -2487,7 +2498,7 @@ done: bcopy(have_state, dst, k * sizeof(uint32_t)); dst += k; } - + /* * start action section */ @@ -2502,7 +2513,7 @@ done: bcopy(src, dst, k * sizeof(uint32_t)); dst += k; } - + /* * copy all other actions */ @@ -2513,18 +2524,18 @@ done: } curr_rule->cmd_len = (uint32_t *)dst - (uint32_t *)(curr_rule->cmd); - + return; } static void ipfw_convert_to_cmds_64(struct ip_fw *curr_rule, struct ip_fw_compat_64 *compat_rule) { - int k; - uint32_t actbuf[255], cmdbuf[255]; - ipfw_insn *action, *cmd, *src, *dst; - ipfw_insn *have_state = NULL; /* track check-state or keep-state */ - + int k; + uint32_t actbuf[255], cmdbuf[255]; + ipfw_insn *action, *cmd, *src, *dst; + ipfw_insn *have_state = NULL; /* track check-state or keep-state */ + if (!compat_rule || !curr_rule) { return; } @@ -2536,22 +2547,21 @@ ipfw_convert_to_cmds_64(struct ip_fw *curr_rule, struct ip_fw_compat_64 *compat_ /* bad rule */ return; } - - bzero(actbuf, sizeof(actbuf)); /* actions go here */ + + bzero(actbuf, sizeof(actbuf)); /* actions go here */ bzero(cmdbuf, sizeof(cmdbuf)); /* fill in action */ action = (ipfw_insn *)actbuf; { - u_int flag = compat_rule->fw_flg; - - action->len = 1; /* default */ - - if (flag & IP_FW_F_CHECK_S_COMPAT) { - have_state = action; - action->opcode = O_CHECK_STATE; - } - else { - switch (flag & IP_FW_F_COMMAND_COMPAT) { + u_int flag = compat_rule->fw_flg; + + action->len = 1; /* default */ + + if (flag & IP_FW_F_CHECK_S_COMPAT) { + have_state = action; + action->opcode = O_CHECK_STATE; + } else { + switch (flag & IP_FW_F_COMMAND_COMPAT) { case IP_FW_F_ACCEPT_COMPAT: action->opcode = O_ACCEPT; break; @@ -2583,15 +2593,15 @@ ipfw_convert_to_cmds_64(struct ip_fw *curr_rule, struct ip_fw_compat_64 *compat_ case IP_FW_F_FWD_COMPAT: { ipfw_insn_sa *p = (ipfw_insn_sa *)action; - + action->opcode = O_FORWARD_IP; action->len = F_INSN_SIZE(ipfw_insn_sa); - + p->sa.sin_len = compat_rule->fw_fwd_ip_compat.sin_len; p->sa.sin_family = compat_rule->fw_fwd_ip_compat.sin_family; p->sa.sin_port = compat_rule->fw_fwd_ip_compat.sin_port; p->sa.sin_addr = compat_rule->fw_fwd_ip_compat.sin_addr; - + break; } case IP_FW_F_DENY_COMPAT: @@ -2605,306 +2615,232 @@ ipfw_convert_to_cmds_64(struct ip_fw *curr_rule, struct ip_fw_compat_64 *compat_ default: action->opcode = O_NOP; break; + } } - } - - /* action is mandatory */ - if (action->opcode == O_NOP) { + + /* action is mandatory */ + if (action->opcode == O_NOP) { return; - } - - action = next_cmd(action); + } + + action = next_cmd(action); } /* end actions */ - + cmd = (ipfw_insn *)cmdbuf; /* this is O_CHECK_STATE, we're done */ if (have_state) { - goto done; + goto done; } { - ipfw_insn *prev = NULL; - u_int flag = compat_rule->fw_flg; - - /* logging */ - if (flag & IP_FW_F_PRN_COMPAT) { - ipfw_insn_log *c = (ipfw_insn_log *)cmd; - - cmd->opcode = O_LOG; - cmd->len |= F_INSN_SIZE(ipfw_insn_log); - c->max_log = compat_rule->fw_logamount; - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* protocol */ - if (compat_rule->fw_prot != 0) { - fill_cmd(cmd, O_PROTO, compat_rule->fw_prot); - prev = cmd; - cmd = next_cmd(cmd); - } - - /* source */ - if (flag & IP_FW_F_SME_COMPAT) { - cmd->opcode = O_IP_SRC_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_smsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_src; - ip->mask = compat_rule->fw_smsk; - cmd->opcode = O_IP_SRC_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ - } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - if (compat_rule->fw_src.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ - } else { - cmd32->d[0] = compat_rule->fw_src.s_addr; - cmd32->o.opcode = O_IP_SRC; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - } - } - - if (flag & IP_FW_F_INVSRC_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ + ipfw_insn *prev = NULL; + u_int flag = compat_rule->fw_flg; + + /* logging */ + if (flag & IP_FW_F_PRN_COMPAT) { + ipfw_insn_log *c = (ipfw_insn_log *)cmd; + + cmd->opcode = O_LOG; + cmd->len |= F_INSN_SIZE(ipfw_insn_log); + c->max_log = compat_rule->fw_logamount; + + prev = cmd; + cmd = next_cmd(cmd); } - if (F_LEN(cmd) != 0) { /* !any */ + /* protocol */ + if (compat_rule->fw_prot != 0) { + fill_cmd(cmd, O_PROTO, compat_rule->fw_prot); prev = cmd; cmd = next_cmd(cmd); } - } - - /* source ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i, j = 0, - nports = IP_FW_GETNSRCP_COMPAT(compat_rule), - have_range = 0; - - cmd->opcode = O_IP_SRCPORT; - for (i = 0; i < nports; i++) { - if (((flag & IP_FW_F_SRNG_COMPAT) || - (flag & IP_FW_F_SMSK_COMPAT)) && !have_range) { - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + + /* source */ + if (flag & IP_FW_F_SME_COMPAT) { + cmd->opcode = O_IP_SRC_ME; + cmd->len |= F_INSN_SIZE(ipfw_insn); + if (flag & IP_FW_F_INVSRC_COMPAT) { + cmd->len ^= F_NOT; /* toggle F_NOT */ } - p += 2; - j++; - } - - if (j > 0) { - ports->o.len |= j+1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - /* destination */ - if (flag & IP_FW_F_DME_COMPAT) { - cmd->opcode = O_IP_DST_ME; - cmd->len |= F_INSN_SIZE(ipfw_insn); - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (compat_rule->fw_dmsk.s_addr != 0) { - /* addr/mask */ - ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; - - ip->addr = compat_rule->fw_dst; - ip->mask = compat_rule->fw_dmsk; - cmd->opcode = O_IP_DST_MASK; - cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ + prev = cmd; + cmd = next_cmd(cmd); } else { - /* one IP */ - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - if (compat_rule->fw_dst.s_addr == 0) { - /* any */ - cmd32->o.len &= ~F_LEN_MASK; /* zero len */ + if (compat_rule->fw_smsk.s_addr != 0) { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + ip->addr = compat_rule->fw_src; + ip->mask = compat_rule->fw_smsk; + cmd->opcode = O_IP_SRC_MASK; + cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ } else { - cmd32->d[0] = compat_rule->fw_dst.s_addr; - cmd32->o.opcode = O_IP_DST; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + /* one IP */ + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + if (compat_rule->fw_src.s_addr == 0) { + /* any */ + cmd32->o.len &= ~F_LEN_MASK; /* zero len */ + } else { + cmd32->d[0] = compat_rule->fw_src.s_addr; + cmd32->o.opcode = O_IP_SRC; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + } + } + + if (flag & IP_FW_F_INVSRC_COMPAT) { + cmd->len ^= F_NOT; /* toggle F_NOT */ + } + + if (F_LEN(cmd) != 0) { /* !any */ + prev = cmd; + cmd = next_cmd(cmd); } } - - if (flag & IP_FW_F_INVDST_COMPAT) { - cmd->len ^= F_NOT; /* toggle F_NOT */ - } - if (F_LEN(cmd) != 0) { /* !any */ + /* source ports */ + { + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i, j = 0, + nports = IP_FW_GETNSRCP_COMPAT(compat_rule), + have_range = 0; + + cmd->opcode = O_IP_SRCPORT; + for (i = 0; i < nports; i++) { + if (((flag & IP_FW_F_SRNG_COMPAT) || + (flag & IP_FW_F_SMSK_COMPAT)) && !have_range) { + p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; + p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + have_range = 1; + } else { + p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + } + p += 2; + j++; + } + + if (j > 0) { + ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ + } + prev = cmd; cmd = next_cmd(cmd); } - } - - /* dest. ports */ - { - ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; - uint16_t *p = ports->ports; - int i = IP_FW_GETNSRCP_COMPAT(compat_rule), - j = 0, - nports = (IP_FW_GETNDSTP_COMPAT(compat_rule) + i), - have_range = 0; - - cmd->opcode = O_IP_DSTPORT; - for (; i < nports; i++, p += 2) { - if (((flag & IP_FW_F_DRNG_COMPAT) || - (flag & IP_FW_F_DMSK_COMPAT)) && !have_range) { - /* range */ - p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; - p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - have_range = 1; - } else { - p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; - } - j++; - } - - if (j > 0) { - ports->o.len |= j+1; /* leave F_NOT and F_OR untouched */ - } - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_UID_COMPAT) { - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - - cmd32->o.opcode = O_UID; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - cmd32->d[0] = compat_rule->fw_uid; - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (flag & IP_FW_F_KEEP_S_COMPAT) { - have_state = cmd; - fill_cmd(cmd, O_KEEP_STATE, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - if (flag & IP_FW_BRIDGED_COMPAT) { - fill_cmd(cmd, O_LAYER2, 0); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if ((flag & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { - /* via */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_VIA; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; - } - else if (compat_rule->fw_flg & IP_FW_F_IIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; - } - prev = cmd; - cmd = next_cmd(cmd); - } else { - if (flag & IP_FW_F_IN_COMPAT) { - fill_cmd(cmd, O_IN, 0); - + /* destination */ + if (flag & IP_FW_F_DME_COMPAT) { + cmd->opcode = O_IP_DST_ME; + cmd->len |= F_INSN_SIZE(ipfw_insn); + if (flag & IP_FW_F_INVDST_COMPAT) { + cmd->len ^= F_NOT; /* toggle F_NOT */ + } + prev = cmd; cmd = next_cmd(cmd); - } - if (flag & IP_FW_F_OUT_COMPAT) { - /* if the previous command was O_IN, and this - * is being set as well, it's equivalent to not - * having either command, so let's back up prev - * to the cmd before it and move cmd to prev. - */ - if (prev->opcode == O_IN) { - cmd = prev; - bzero(cmd, sizeof(*cmd)); + } else { + if (compat_rule->fw_dmsk.s_addr != 0) { + /* addr/mask */ + ipfw_insn_ip *ip = (ipfw_insn_ip *)cmd; + + ip->addr = compat_rule->fw_dst; + ip->mask = compat_rule->fw_dmsk; + cmd->opcode = O_IP_DST_MASK; + cmd->len |= F_INSN_SIZE(ipfw_insn_ip); /* double check this */ } else { + /* one IP */ + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + if (compat_rule->fw_dst.s_addr == 0) { + /* any */ + cmd32->o.len &= ~F_LEN_MASK; /* zero len */ + } else { + cmd32->d[0] = compat_rule->fw_dst.s_addr; + cmd32->o.opcode = O_IP_DST; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + } + } + + if (flag & IP_FW_F_INVDST_COMPAT) { cmd->len ^= F_NOT; /* toggle F_NOT */ - fill_cmd(cmd, O_IN, 0); - + } + + if (F_LEN(cmd) != 0) { /* !any */ prev = cmd; cmd = next_cmd(cmd); } } - if (flag & IP_FW_F_OIFACE_COMPAT) { - /* xmit */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_out_if; - - cmd->opcode = O_XMIT; - ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - - if (ifu.fu_via_ip.s_addr == 0) { - /* "any" */ - ifcmd->name[0] = '\0'; - ifcmd->o.len = 0; + + /* dest. ports */ + { + ipfw_insn_u16 *ports = (ipfw_insn_u16 *)cmd; + uint16_t *p = ports->ports; + int i = IP_FW_GETNSRCP_COMPAT(compat_rule), + j = 0, + nports = (IP_FW_GETNDSTP_COMPAT(compat_rule) + i), + have_range = 0; + + cmd->opcode = O_IP_DSTPORT; + for (; i < nports; i++, p += 2) { + if (((flag & IP_FW_F_DRNG_COMPAT) || + (flag & IP_FW_F_DMSK_COMPAT)) && !have_range) { + /* range */ + p[0] = compat_rule->fw_uar_compat.fw_pts[i++]; + p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + have_range = 1; + } else { + p[0] = p[1] = compat_rule->fw_uar_compat.fw_pts[i]; + } + j++; } - else if (flag & IP_FW_F_OIFNAME_COMPAT) { - /* by name */ - strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); - ifcmd->p.unit = ifu.fu_via_if_compat.unit; - } else { - /* by addr */ - ifcmd->p.ip = ifu.fu_via_ip; + + if (j > 0) { + ports->o.len |= j + 1; /* leave F_NOT and F_OR untouched */ } - + + prev = cmd; + cmd = next_cmd(cmd); + } + + if (flag & IP_FW_F_UID_COMPAT) { + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + + cmd32->o.opcode = O_UID; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + cmd32->d[0] = compat_rule->fw_uid; + + prev = cmd; + cmd = next_cmd(cmd); + } + + if (flag & IP_FW_F_KEEP_S_COMPAT) { + have_state = cmd; + fill_cmd(cmd, O_KEEP_STATE, 0); + prev = cmd; cmd = next_cmd(cmd); - } - else if (flag & IP_FW_F_IIFACE_COMPAT) { - /* recv */ - ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; - union ip_fw_if_compat ifu = compat_rule->fw_in_if; - - cmd->opcode = O_RECV; + } + if (flag & IP_FW_BRIDGED_COMPAT) { + fill_cmd(cmd, O_LAYER2, 0); + + prev = cmd; + cmd = next_cmd(cmd); + } + + if ((flag & IF_FW_F_VIAHACK_COMPAT) == IF_FW_F_VIAHACK_COMPAT) { + /* via */ + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu = compat_rule->fw_in_if; + + cmd->opcode = O_VIA; ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); - + if (ifu.fu_via_ip.s_addr == 0) { /* "any" */ ifcmd->name[0] = '\0'; ifcmd->o.len = 0; - } - else if (flag & IP_FW_F_IIFNAME_COMPAT) { + } else if (compat_rule->fw_flg & IP_FW_F_IIFNAME_COMPAT) { /* by name */ strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); ifcmd->p.unit = ifu.fu_via_if_compat.unit; @@ -2912,98 +2848,167 @@ ipfw_convert_to_cmds_64(struct ip_fw *curr_rule, struct ip_fw_compat_64 *compat_ /* by addr */ ifcmd->p.ip = ifu.fu_via_ip; } - + + prev = cmd; + cmd = next_cmd(cmd); + } else { + if (flag & IP_FW_F_IN_COMPAT) { + fill_cmd(cmd, O_IN, 0); + + prev = cmd; + cmd = next_cmd(cmd); + } + if (flag & IP_FW_F_OUT_COMPAT) { + /* if the previous command was O_IN, and this + * is being set as well, it's equivalent to not + * having either command, so let's back up prev + * to the cmd before it and move cmd to prev. + */ + if (prev->opcode == O_IN) { + cmd = prev; + bzero(cmd, sizeof(*cmd)); + } else { + cmd->len ^= F_NOT; /* toggle F_NOT */ + fill_cmd(cmd, O_IN, 0); + + prev = cmd; + cmd = next_cmd(cmd); + } + } + if (flag & IP_FW_F_OIFACE_COMPAT) { + /* xmit */ + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu = compat_rule->fw_out_if; + + cmd->opcode = O_XMIT; + ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); + + if (ifu.fu_via_ip.s_addr == 0) { + /* "any" */ + ifcmd->name[0] = '\0'; + ifcmd->o.len = 0; + } else if (flag & IP_FW_F_OIFNAME_COMPAT) { + /* by name */ + strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); + ifcmd->p.unit = ifu.fu_via_if_compat.unit; + } else { + /* by addr */ + ifcmd->p.ip = ifu.fu_via_ip; + } + + prev = cmd; + cmd = next_cmd(cmd); + } else if (flag & IP_FW_F_IIFACE_COMPAT) { + /* recv */ + ipfw_insn_if *ifcmd = (ipfw_insn_if *)cmd; + union ip_fw_if_compat ifu = compat_rule->fw_in_if; + + cmd->opcode = O_RECV; + ifcmd->o.len |= F_INSN_SIZE(ipfw_insn_if); + + if (ifu.fu_via_ip.s_addr == 0) { + /* "any" */ + ifcmd->name[0] = '\0'; + ifcmd->o.len = 0; + } else if (flag & IP_FW_F_IIFNAME_COMPAT) { + /* by name */ + strncpy(ifcmd->name, ifu.fu_via_if_compat.name, sizeof(ifcmd->name)); + ifcmd->p.unit = ifu.fu_via_if_compat.unit; + } else { + /* by addr */ + ifcmd->p.ip = ifu.fu_via_ip; + } + + prev = cmd; + cmd = next_cmd(cmd); + } + } + + if (flag & IP_FW_F_FRAG_COMPAT) { + fill_cmd(cmd, O_FRAG, 0); + prev = cmd; cmd = next_cmd(cmd); } - } - - if (flag & IP_FW_F_FRAG_COMPAT) { - fill_cmd(cmd, O_FRAG, 0); - prev = cmd; - cmd = next_cmd(cmd); - } - - /* IP options */ - if (compat_rule->fw_ipopt != 0 || compat_rule->fw_ipnopt != 0) { - fill_cmd(cmd, O_IPOPT, (compat_rule->fw_ipopt & 0xff) | - (compat_rule->fw_ipnopt & 0xff) << 8); - - prev = cmd; - cmd = next_cmd(cmd); - } - - if (compat_rule->fw_prot == IPPROTO_TCP) { - if (compat_rule->fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { - fill_cmd(cmd, O_ESTAB, 0); - + /* IP options */ + if (compat_rule->fw_ipopt != 0 || compat_rule->fw_ipnopt != 0) { + fill_cmd(cmd, O_IPOPT, (compat_rule->fw_ipopt & 0xff) | + (compat_rule->fw_ipnopt & 0xff) << 8); + prev = cmd; cmd = next_cmd(cmd); } - - /* TCP options and flags */ - if (compat_rule->fw_tcpf != 0 || compat_rule->fw_tcpnf != 0) { - if ((compat_rule->fw_tcpf & IP_FW_TCPF_SYN_COMPAT) && - compat_rule->fw_tcpnf & IP_FW_TCPF_ACK_COMPAT) { - fill_cmd(cmd, O_TCPFLAGS, (TH_SYN) | ( (TH_ACK) & 0xff) <<8); - + + if (compat_rule->fw_prot == IPPROTO_TCP) { + if (compat_rule->fw_ipflg & IP_FW_IF_TCPEST_COMPAT) { + fill_cmd(cmd, O_ESTAB, 0); + prev = cmd; cmd = next_cmd(cmd); } - else { - fill_cmd(cmd, O_TCPFLAGS, (compat_rule->fw_tcpf & 0xff) | - (compat_rule->fw_tcpnf & 0xff) << 8); - + + /* TCP options and flags */ + if (compat_rule->fw_tcpf != 0 || compat_rule->fw_tcpnf != 0) { + if ((compat_rule->fw_tcpf & IP_FW_TCPF_SYN_COMPAT) && + compat_rule->fw_tcpnf & IP_FW_TCPF_ACK_COMPAT) { + fill_cmd(cmd, O_TCPFLAGS, (TH_SYN) | ((TH_ACK) & 0xff) << 8); + + prev = cmd; + cmd = next_cmd(cmd); + } else { + fill_cmd(cmd, O_TCPFLAGS, (compat_rule->fw_tcpf & 0xff) | + (compat_rule->fw_tcpnf & 0xff) << 8); + + prev = cmd; + cmd = next_cmd(cmd); + } + } + if (compat_rule->fw_tcpopt != 0 || compat_rule->fw_tcpnopt != 0) { + fill_cmd(cmd, O_TCPOPTS, (compat_rule->fw_tcpopt & 0xff) | + (compat_rule->fw_tcpnopt & 0xff) << 8); + prev = cmd; cmd = next_cmd(cmd); } } - if (compat_rule->fw_tcpopt != 0 || compat_rule->fw_tcpnopt != 0) { - fill_cmd(cmd, O_TCPOPTS, (compat_rule->fw_tcpopt & 0xff) | - (compat_rule->fw_tcpnopt & 0xff) << 8); - + + /* ICMP */ + /* XXX: check this */ + if (flag & IP_FW_F_ICMPBIT_COMPAT) { + int i; + ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd;/* alias for cmd */ + cmd32->o.opcode = O_ICMPTYPE; + cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); + + for (i = 0; i < IP_FW_ICMPTYPES_DIM_COMPAT; i++) { + cmd32->d[0] |= compat_rule->fw_uar_compat.fw_icmptypes[i]; + } + prev = cmd; cmd = next_cmd(cmd); } - } - - /* ICMP */ - /* XXX: check this */ - if (flag & IP_FW_F_ICMPBIT_COMPAT) { - int i; - ipfw_insn_u32 *cmd32 = (ipfw_insn_u32 *)cmd; /* alias for cmd */ - cmd32->o.opcode = O_ICMPTYPE; - cmd32->o.len |= F_INSN_SIZE(ipfw_insn_u32); - - for (i = 0; i < IP_FW_ICMPTYPES_DIM_COMPAT; i++) { - cmd32->d[0] |= compat_rule->fw_uar_compat.fw_icmptypes[i]; - } - - prev = cmd; - cmd = next_cmd(cmd); - } } /* end commands */ done: - /* finally, copy everything into the current + /* finally, copy everything into the current * rule buffer in the right order. */ dst = curr_rule->cmd; - + /* first, do match probability */ if (compat_rule->fw_flg & IP_FW_F_RND_MATCH_COMPAT) { dst->opcode = O_PROB; dst->len = 2; - *((int32_t *)(dst+1)) = compat_rule->pipe_ptr; + *((int32_t *)(dst + 1)) = compat_rule->pipe_ptr; dst += dst->len; } - + /* generate O_PROBE_STATE if necessary */ if (have_state && have_state->opcode != O_CHECK_STATE) { fill_cmd(dst, O_PROBE_STATE, 0); dst = next_cmd(dst); } - + /* * copy all commands but O_LOG, O_KEEP_STATE */ @@ -3027,7 +3032,7 @@ done: bcopy(have_state, dst, k * sizeof(uint32_t)); dst += k; } - + /* * start action section */ @@ -3042,7 +3047,7 @@ done: bcopy(src, dst, k * sizeof(uint32_t)); dst += k; } - + /* * copy all other actions */ @@ -3057,26 +3062,27 @@ done: } static int -ipfw_version_one_to_version_two_32(struct sockopt *sopt, struct ip_fw *curr_rule, - struct ip_fw_compat_32 *rule_vers1) +ipfw_version_one_to_version_two_32(struct sockopt *sopt, struct ip_fw *curr_rule, + struct ip_fw_compat_32 *rule_vers1) { - int err = EINVAL; - struct ip_fw_compat_32 *rule_ptr; - struct ip_fw_compat_32 rule; - + int err = EINVAL; + struct ip_fw_compat_32 *rule_ptr; + struct ip_fw_compat_32 rule; + if (rule_vers1) { rule_ptr = rule_vers1; err = 0; } else { /* do some basic size checking here, more extensive checking later */ - if (!sopt->sopt_val || sopt->sopt_valsize < sizeof(struct ip_fw_compat_32)) + if (!sopt->sopt_val || sopt->sopt_valsize < sizeof(struct ip_fw_compat_32)) { return err; - - if ((err = sooptcopyin(sopt, &rule, sizeof(struct ip_fw_compat_32), - sizeof(struct ip_fw_compat_32)))) { + } + + if ((err = sooptcopyin(sopt, &rule, sizeof(struct ip_fw_compat_32), + sizeof(struct ip_fw_compat_32)))) { return err; } - + rule_ptr = &rule; } @@ -3090,32 +3096,33 @@ ipfw_version_one_to_version_two_32(struct sockopt *sopt, struct ip_fw *curr_rule curr_rule->bcnt = rule_ptr->fw_bcnt; curr_rule->timestamp = rule_ptr->timestamp; - + #if FW2_DEBUG_VERBOSE ipfw_print_vers2_struct(curr_rule); #endif - + return err; } static int -ipfw_version_one_to_version_two_64(struct sockopt *sopt, struct ip_fw *curr_rule, - struct ip_fw_compat_64 *rule_vers1) +ipfw_version_one_to_version_two_64(struct sockopt *sopt, struct ip_fw *curr_rule, + struct ip_fw_compat_64 *rule_vers1) { - int err = EINVAL; - struct ip_fw_compat_64 *rule_ptr; - struct ip_fw_compat_64 rule; - + int err = EINVAL; + struct ip_fw_compat_64 *rule_ptr; + struct ip_fw_compat_64 rule; + if (rule_vers1) { rule_ptr = rule_vers1; err = 0; } else { /* do some basic size checking here, more extensive checking later */ - if (!sopt->sopt_val || sopt->sopt_valsize < sizeof(struct ip_fw_compat_64)) + if (!sopt->sopt_val || sopt->sopt_valsize < sizeof(struct ip_fw_compat_64)) { return err; - - if ((err = sooptcopyin(sopt, &rule, sizeof(struct ip_fw_compat_64), - sizeof(struct ip_fw_compat_64)))) { + } + + if ((err = sooptcopyin(sopt, &rule, sizeof(struct ip_fw_compat_64), + sizeof(struct ip_fw_compat_64)))) { return err; } rule_ptr = &rule; @@ -3131,29 +3138,29 @@ ipfw_version_one_to_version_two_64(struct sockopt *sopt, struct ip_fw *curr_rule curr_rule->bcnt = rule_ptr->fw_bcnt; curr_rule->timestamp = rule_ptr->timestamp; - + #if FW2_DEBUG_VERBOSE ipfw_print_vers2_struct(curr_rule); #endif - + return err; } -/* This converts to whatever the latest version is. Currently the +/* This converts to whatever the latest version is. Currently the * latest version of the firewall is ipfw2. */ static int ipfw_version_one_to_latest_32(struct sockopt *sopt, struct ip_fw *curr_rule, struct ip_fw_compat_32 *rule_vers1) { int err; - + /* if rule_vers1 is not null then this is coming from * ipfw_version_zero_to_latest(), so pass that along; * otherwise let ipfw_version_one_to_version_two() * get the rule from sopt. */ err = ipfw_version_one_to_version_two_32(sopt, curr_rule, rule_vers1); - + return err; } @@ -3161,26 +3168,26 @@ static int ipfw_version_one_to_latest_64(struct sockopt *sopt, struct ip_fw *curr_rule, struct ip_fw_compat_64 *rule_vers1) { int err; - + /* if rule_vers1 is not null then this is coming from * ipfw_version_zero_to_latest(), so pass that along; * otherwise let ipfw_version_one_to_version_two() * get the rule from sopt. */ err = ipfw_version_one_to_version_two_64(sopt, curr_rule, rule_vers1); - + return err; } #if 0 -/* +/* * XXX - ipfw_version_zero_to_one - * + * * This function is only used in version #1 of ipfw, which is now deprecated. * - */ + */ static void ipfw_version_zero_to_one(struct ip_old_fw *rule_vers0, struct ip_fw_compat *rule_vers1) @@ -3214,42 +3221,43 @@ ipfw_version_zero_to_one(struct ip_old_fw *rule_vers0, struct ip_fw_compat *rule #endif /* !ipfw_version_zero_to_one */ -/* rule is a u_int32_t buffer[255] into which the converted +/* rule is a u_int32_t buffer[255] into which the converted * (if necessary) rules go. */ int ipfw_convert_to_latest(struct sockopt *sopt, struct ip_fw *curr_rule, int api_version, int is64user) { - int err = 0; - + int err = 0; + /* the following functions copy the rules passed in and * convert to latest structures based on version */ switch (api_version) { - case IP_FW_VERSION_0: - /* we're not supporting VERSION 0 */ - err = EOPNOTSUPP; - break; - - case IP_FW_VERSION_1: - /* this is the version supported in Panther */ - if ( is64user ) - err = ipfw_version_one_to_latest_64(sopt, curr_rule, NULL); - else - err = ipfw_version_one_to_latest_32(sopt, curr_rule, NULL); - break; - - case IP_FW_CURRENT_API_VERSION: - /* IPFW2 for now */ - /* do nothing here... */ - break; - - default: - /* unrecognized/unsupported version */ - err = EINVAL; - break; + case IP_FW_VERSION_0: + /* we're not supporting VERSION 0 */ + err = EOPNOTSUPP; + break; + + case IP_FW_VERSION_1: + /* this is the version supported in Panther */ + if (is64user) { + err = ipfw_version_one_to_latest_64(sopt, curr_rule, NULL); + } else { + err = ipfw_version_one_to_latest_32(sopt, curr_rule, NULL); + } + break; + + case IP_FW_CURRENT_API_VERSION: + /* IPFW2 for now */ + /* do nothing here... */ + break; + + default: + /* unrecognized/unsupported version */ + err = EINVAL; + break; } - + return err; } @@ -3258,55 +3266,51 @@ ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t *api_ { int cmd; int err = 0; - u_int32_t vers = IP_FW_VERSION_NONE; - + u_int32_t vers = IP_FW_VERSION_NONE; + /* first deal with the oldest version */ - if (sopt->sopt_name == IP_OLD_FW_GET) { + if (sopt->sopt_name == IP_OLD_FW_GET) { vers = IP_FW_VERSION_0; cmd = IP_FW_GET; - } - else if (sopt->sopt_name == IP_OLD_FW_FLUSH) { + } else if (sopt->sopt_name == IP_OLD_FW_FLUSH) { vers = IP_FW_VERSION_0; cmd = IP_FW_FLUSH; - } - else if (sopt->sopt_name == IP_OLD_FW_ZERO) { + } else if (sopt->sopt_name == IP_OLD_FW_ZERO) { vers = IP_FW_VERSION_0; cmd = IP_FW_ZERO; - } - else if (sopt->sopt_name == IP_OLD_FW_ADD) { + } else if (sopt->sopt_name == IP_OLD_FW_ADD) { vers = IP_FW_VERSION_0; cmd = IP_FW_ADD; - } - else if (sopt->sopt_name == IP_OLD_FW_DEL) { + } else if (sopt->sopt_name == IP_OLD_FW_DEL) { vers = IP_FW_VERSION_0; cmd = IP_FW_DEL; - } - else if (sopt->sopt_name == IP_OLD_FW_RESETLOG) { + } else if (sopt->sopt_name == IP_OLD_FW_RESETLOG) { vers = IP_FW_VERSION_0; cmd = IP_FW_RESETLOG; - } - else { + } else { cmd = sopt->sopt_name; } - + if (vers == IP_FW_VERSION_NONE) { /* working off the fact that the offset * is the same in both structs. */ struct ip_fw_64 rule; - size_t copyinsize; - - if (proc_is64bit(sopt->sopt_p)) - copyinsize = sizeof(struct ip_fw_64); - else - copyinsize = sizeof(struct ip_fw_32); - - if (!sopt->sopt_val || sopt->sopt_valsize < copyinsize) + size_t copyinsize; + + if (proc_is64bit(sopt->sopt_p)) { + copyinsize = sizeof(struct ip_fw_64); + } else { + copyinsize = sizeof(struct ip_fw_32); + } + + if (!sopt->sopt_val || sopt->sopt_valsize < copyinsize) { return EINVAL; + } if ((err = sooptcopyin(sopt, &rule, copyinsize, copyinsize))) { return err; } - + vers = rule.version; } @@ -3316,7 +3320,6 @@ ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t *api_ if (api_version) { *api_version = vers; } - + return err; } - diff --git a/bsd/netinet/ip_fw2_compat.h b/bsd/netinet/ip_fw2_compat.h index def315c4d..a26563d9e 100644 --- a/bsd/netinet/ip_fw2_compat.h +++ b/bsd/netinet/ip_fw2_compat.h @@ -4,9 +4,9 @@ #define _IP_FW_COMPAT_H_ /* prototypes */ -void ipfw_convert_from_latest(struct ip_fw *curr_rule, void *old_rule, u_int32_t api_version, int is64user); -int ipfw_convert_to_latest(struct sockopt *sopt, struct ip_fw *rule, int api_version, int is64user); -int ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t *api_version); +void ipfw_convert_from_latest(struct ip_fw *curr_rule, void *old_rule, u_int32_t api_version, int is64user); +int ipfw_convert_to_latest(struct sockopt *sopt, struct ip_fw *rule, int api_version, int is64user); +int ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t *api_version); /* @@ -29,12 +29,12 @@ int ipfw_get_command_and_version(struct sockopt *sopt, int *command, u_int32_t * */ union ip_fw_if_compat { - struct in_addr fu_via_ip; /* Specified by IP address */ - struct { /* Specified by interface name */ + struct in_addr fu_via_ip; /* Specified by IP address */ + struct { /* Specified by interface name */ #define FW_IFNLEN_COMPAT 10 /* need room ! was IFNAMSIZ */ - char name[FW_IFNLEN_COMPAT]; - short unit; /* -1 means match any unit */ - } fu_via_if_compat; + char name[FW_IFNLEN_COMPAT]; + short unit; /* -1 means match any unit */ + } fu_via_if_compat; }; /* @@ -47,63 +47,63 @@ union ip_fw_if_compat { struct ip_fw_compat { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ + u_int32_t version; /* Version of this structure. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION by clients. */ + void *context; /* Context that is usable by user processes to */ + /* identify this rule. */ + u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ + struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ + u_short fw_number; /* Rule number */ + u_int fw_flg; /* Flags word */ +#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ union { - u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX_COMPAT 128 -#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) + u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ +#define IP_FW_ICMPTYPES_MAX_COMPAT 128 +#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM_COMPAT]; /* ICMP types bitmap */ } fw_uar_compat; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt,fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt,fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ + u_int fw_ipflg; /* IP flags word */ + u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ + u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ + long timestamp; /* timestamp (tv_sec) of last match */ + union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ + u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ struct sockaddr_in fu_fwd_ip; - } fw_un_compat; - u_char fw_prot; /* IP protocol */ + } fw_un_compat; + u_char fw_prot; /* IP protocol */ /* * N'of src ports and # of dst ports in ports array (dst ports * follow src ports; max of 10 ports in all; count of 0 means * match all ports) */ - u_char fw_nports; - void *pipe_ptr; /* flow_set ptr for dummynet pipe */ - void *next_rule_ptr ; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ + u_char fw_nports; + void *pipe_ptr; /* flow_set ptr for dummynet pipe */ + void *next_rule_ptr; /* next rule in case of match */ + uid_t fw_uid; /* uid to match */ + int fw_logamount; /* amount to log */ + u_int64_t fw_loghighest; /* highest number packet to log */ }; /* * extended ipfw structure... some fields in the original struct * can be used to pass parameters up/down, namely pointers * void *pipe_ptr - * void *next_rule_ptr + * void *next_rule_ptr * some others can be used to pass parameters down, namely counters etc. * u_int64_t fw_pcnt,fw_bcnt; * long timestamp; */ struct ip_fw_ext_compat { /* extended structure */ - struct ip_fw rule; /* must be at offset 0 */ - long dont_match_prob; /* 0x7fffffff means 1.0, always fail */ - u_int dyn_type; /* type for dynamic rule */ + struct ip_fw rule; /* must be at offset 0 */ + long dont_match_prob; /* 0x7fffffff means 1.0, always fail */ + u_int dyn_type;/* type for dynamic rule */ }; struct ip_fw_chain_compat { @@ -114,229 +114,229 @@ struct ip_fw_chain_compat { /* * dynamic ipfw rule */ - + struct ipfw_dyn_rule_compat { - struct ipfw_dyn_rule *next ; - - struct ipfw_flow_id id ; - struct ipfw_flow_id mask ; - struct ip_fw_chain_compat *chain ; /* pointer to parent rule */ - u_int32_t type ; /* rule type */ - u_int32_t expire ; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket ; /* which bucket in hash table */ - u_int32_t state ; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -} ; + struct ipfw_dyn_rule *next; + + struct ipfw_flow_id id; + struct ipfw_flow_id mask; + struct ip_fw_chain_compat *chain; /* pointer to parent rule */ + u_int32_t type; /* rule type */ + u_int32_t expire; /* expire time */ + u_int64_t pcnt, bcnt; /* match counters */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typ. a */ + /* combination of TCP flags) */ +}; #ifdef BSD_KERNEL_PRIVATE #pragma pack(4) struct ip_fw_compat_32 { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - user32_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk;/* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ + u_int32_t version; /* Version of this structure. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION by clients. */ + user32_addr_t context; /* Context that is usable by user processes to */ + /* identify this rule. */ + u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ + struct in_addr fw_smsk, fw_dmsk;/* Mask for src and dest IP addr */ + u_short fw_number; /* Rule number */ + u_int fw_flg; /* Flags word */ +#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ union { - u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX_COMPAT 128 -#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) + u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ +#define IP_FW_ICMPTYPES_MAX_COMPAT 128 +#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM_COMPAT]; /* ICMP types bitmap */ } fw_uar_compat; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt,fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt,fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ - u_int32_t timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ + u_int fw_ipflg; /* IP flags word */ + u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ + u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ + u_int32_t timestamp; /* timestamp (tv_sec) of last match */ + union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ + u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ struct sockaddr_in fu_fwd_ip; - } fw_un_compat; - u_char fw_prot; /* IP protocol */ + } fw_un_compat; + u_char fw_prot; /* IP protocol */ /* * N'of src ports and # of dst ports in ports array (dst ports * follow src ports; max of 10 ports in all; count of 0 means * match all ports) */ - u_char fw_nports; - user32_addr_t pipe_ptr; /* flow_set ptr for dummynet pipe */ - user32_addr_t next_rule_ptr ; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ + u_char fw_nports; + user32_addr_t pipe_ptr; /* flow_set ptr for dummynet pipe */ + user32_addr_t next_rule_ptr; /* next rule in case of match */ + uid_t fw_uid; /* uid to match */ + int fw_logamount; /* amount to log */ + u_int64_t fw_loghighest; /* highest number packet to log */ }; #pragma pack() struct ip_fw_compat_64 { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP_FW_CURRENT_API_VERSION by clients. */ - user64_addr_t context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int64_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk;/* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ + u_int32_t version; /* Version of this structure. Should always be */ + /* set to IP_FW_CURRENT_API_VERSION by clients. */ + user64_addr_t context; /* Context that is usable by user processes to */ + /* identify this rule. */ + u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ + struct in_addr fw_smsk, fw_dmsk;/* Mask for src and dest IP addr */ + u_short fw_number; /* Rule number */ + u_int fw_flg; /* Flags word */ +#define IP_FW_MAX_PORTS_COMPAT 10 /* A reasonable maximum */ union { - u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ -#define IP_FW_ICMPTYPES_MAX_COMPAT 128 -#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) + u_short fw_pts[IP_FW_MAX_PORTS_COMPAT]; /* Array of port numbers to match */ +#define IP_FW_ICMPTYPES_MAX_COMPAT 128 +#define IP_FW_ICMPTYPES_DIM_COMPAT (IP_FW_ICMPTYPES_MAX_COMPAT / (sizeof(unsigned) * 8)) unsigned fw_icmptypes[IP_FW_ICMPTYPES_DIM_COMPAT]; /* ICMP types bitmap */ } fw_uar_compat; - u_int fw_ipflg; /* IP flags word */ - u_char fw_ipopt,fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpopt,fw_tcpnopt; /* TCP options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ - u_int64_t timestamp; /* timestamp (tv_sec) of last match */ - union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ + u_int fw_ipflg; /* IP flags word */ + u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ + u_char fw_tcpopt, fw_tcpnopt; /* TCP options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ + u_int64_t timestamp; /* timestamp (tv_sec) of last match */ + union ip_fw_if_compat fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ + u_short fu_pipe_nr; /* queue number (option DUMMYNET) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ struct sockaddr_in fu_fwd_ip; - } fw_un_compat; - u_char fw_prot; /* IP protocol */ + } fw_un_compat; + u_char fw_prot; /* IP protocol */ /* * N'of src ports and # of dst ports in ports array (dst ports * follow src ports; max of 10 ports in all; count of 0 means * match all ports) */ - u_char fw_nports; - user64_addr_t pipe_ptr; /* flow_set ptr for dummynet pipe */ - user64_addr_t next_rule_ptr ; /* next rule in case of match */ - uid_t fw_uid; /* uid to match */ - int fw_logamount; /* amount to log */ - u_int64_t fw_loghighest; /* highest number packet to log */ + u_char fw_nports; + user64_addr_t pipe_ptr; /* flow_set ptr for dummynet pipe */ + user64_addr_t next_rule_ptr; /* next rule in case of match */ + uid_t fw_uid; /* uid to match */ + int fw_logamount; /* amount to log */ + u_int64_t fw_loghighest; /* highest number packet to log */ }; struct ipfw_dyn_rule_compat_32 { - user32_addr_t next ; - - struct ipfw_flow_id id ; - struct ipfw_flow_id mask ; - user32_addr_t chain ; /* pointer to parent rule */ - u_int32_t type ; /* rule type */ - u_int32_t expire ; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket ; /* which bucket in hash table */ - u_int32_t state ; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -} ; + user32_addr_t next; + + struct ipfw_flow_id id; + struct ipfw_flow_id mask; + user32_addr_t chain; /* pointer to parent rule */ + u_int32_t type; /* rule type */ + u_int32_t expire; /* expire time */ + u_int64_t pcnt, bcnt; /* match counters */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typ. a */ + /* combination of TCP flags) */ +}; struct ipfw_dyn_rule_compat_64 { - user64_addr_t next ; - - struct ipfw_flow_id id ; - struct ipfw_flow_id mask ; - user64_addr_t chain ; /* pointer to parent rule */ - u_int32_t type ; /* rule type */ - u_int32_t expire ; /* expire time */ - u_int64_t pcnt, bcnt; /* match counters */ - u_int32_t bucket ; /* which bucket in hash table */ - u_int32_t state ; /* state of this rule (typ. a */ - /* combination of TCP flags) */ -} ; + user64_addr_t next; + + struct ipfw_flow_id id; + struct ipfw_flow_id mask; + user64_addr_t chain; /* pointer to parent rule */ + u_int32_t type; /* rule type */ + u_int32_t expire; /* expire time */ + u_int64_t pcnt, bcnt; /* match counters */ + u_int32_t bucket; /* which bucket in hash table */ + u_int32_t state; /* state of this rule (typ. a */ + /* combination of TCP flags) */ +}; #endif /* BSD_KERNEL_PRIVATE */ -#define IP_FW_GETNSRCP_COMPAT(rule) ((rule)->fw_nports & 0x0f) -#define IP_FW_SETNSRCP_COMPAT(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IP_FW_GETNDSTP_COMPAT(rule) ((rule)->fw_nports >> 4) -#define IP_FW_SETNDSTP_COMPAT(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) +#define IP_FW_GETNSRCP_COMPAT(rule) ((rule)->fw_nports & 0x0f) +#define IP_FW_SETNSRCP_COMPAT(rule, n) do { \ + (rule)->fw_nports &= ~0x0f; \ + (rule)->fw_nports |= (n); \ + } while (0) +#define IP_FW_GETNDSTP_COMPAT(rule) ((rule)->fw_nports >> 4) +#define IP_FW_SETNDSTP_COMPAT(rule, n) do { \ + (rule)->fw_nports &= ~0xf0; \ + (rule)->fw_nports |= (n) << 4;\ + } while (0) -#define fw_divert_port_compat fw_un_compat.fu_divert_port -#define fw_skipto_rule_compat fw_un_compat.fu_skipto_rule -#define fw_reject_code_compat fw_un_compat.fu_reject_code -#define fw_pipe_nr_compat fw_un_compat.fu_pipe_nr -#define fw_fwd_ip_compat fw_un_compat.fu_fwd_ip +#define fw_divert_port_compat fw_un_compat.fu_divert_port +#define fw_skipto_rule_compat fw_un_compat.fu_skipto_rule +#define fw_reject_code_compat fw_un_compat.fu_reject_code +#define fw_pipe_nr_compat fw_un_compat.fu_pipe_nr +#define fw_fwd_ip_compat fw_un_compat.fu_fwd_ip /* * Values for "flags" field . */ -#define IP_FW_F_COMMAND_COMPAT 0x000000ff /* Mask for type of chain entry: */ -#define IP_FW_F_DENY_COMPAT 0x00000000 /* This is a deny rule */ -#define IP_FW_F_REJECT_COMPAT 0x00000001 /* Deny and send a response packet */ -#define IP_FW_F_ACCEPT_COMPAT 0x00000002 /* This is an accept rule */ -#define IP_FW_F_COUNT_COMPAT 0x00000003 /* This is a count rule */ -#define IP_FW_F_DIVERT_COMPAT 0x00000004 /* This is a divert rule */ -#define IP_FW_F_TEE_COMPAT 0x00000005 /* This is a tee rule */ -#define IP_FW_F_SKIPTO_COMPAT 0x00000006 /* This is a skipto rule */ -#define IP_FW_F_FWD_COMPAT 0x00000007 /* This is a "change forwarding address" rule */ -#define IP_FW_F_PIPE_COMPAT 0x00000008 /* This is a dummynet rule */ -#define IP_FW_F_QUEUE_COMPAT 0x00000009 /* This is a dummynet queue */ +#define IP_FW_F_COMMAND_COMPAT 0x000000ff /* Mask for type of chain entry: */ +#define IP_FW_F_DENY_COMPAT 0x00000000 /* This is a deny rule */ +#define IP_FW_F_REJECT_COMPAT 0x00000001 /* Deny and send a response packet */ +#define IP_FW_F_ACCEPT_COMPAT 0x00000002 /* This is an accept rule */ +#define IP_FW_F_COUNT_COMPAT 0x00000003 /* This is a count rule */ +#define IP_FW_F_DIVERT_COMPAT 0x00000004 /* This is a divert rule */ +#define IP_FW_F_TEE_COMPAT 0x00000005 /* This is a tee rule */ +#define IP_FW_F_SKIPTO_COMPAT 0x00000006 /* This is a skipto rule */ +#define IP_FW_F_FWD_COMPAT 0x00000007 /* This is a "change forwarding address" rule */ +#define IP_FW_F_PIPE_COMPAT 0x00000008 /* This is a dummynet rule */ +#define IP_FW_F_QUEUE_COMPAT 0x00000009 /* This is a dummynet queue */ -#define IP_FW_F_IN_COMPAT 0x00000100 /* Check inbound packets */ -#define IP_FW_F_OUT_COMPAT 0x00000200 /* Check outbound packets */ -#define IP_FW_F_IIFACE_COMPAT 0x00000400 /* Apply inbound interface test */ -#define IP_FW_F_OIFACE_COMPAT 0x00000800 /* Apply outbound interface test */ +#define IP_FW_F_IN_COMPAT 0x00000100 /* Check inbound packets */ +#define IP_FW_F_OUT_COMPAT 0x00000200 /* Check outbound packets */ +#define IP_FW_F_IIFACE_COMPAT 0x00000400 /* Apply inbound interface test */ +#define IP_FW_F_OIFACE_COMPAT 0x00000800 /* Apply outbound interface test */ -#define IP_FW_F_PRN_COMPAT 0x00001000 /* Print if this rule matches */ +#define IP_FW_F_PRN_COMPAT 0x00001000 /* Print if this rule matches */ -#define IP_FW_F_SRNG_COMPAT 0x00002000 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ +#define IP_FW_F_SRNG_COMPAT 0x00002000 /* The first two src ports are a min * + * and max range (stored in host byte * + * order). */ -#define IP_FW_F_DRNG_COMPAT 0x00004000 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ +#define IP_FW_F_DRNG_COMPAT 0x00004000 /* The first two dst ports are a min * + * and max range (stored in host byte * + * order). */ -#define IP_FW_F_FRAG_COMPAT 0x00008000 /* Fragment */ +#define IP_FW_F_FRAG_COMPAT 0x00008000 /* Fragment */ -#define IP_FW_F_IIFNAME_COMPAT 0x00010000 /* In interface by name/unit (not IP) */ -#define IP_FW_F_OIFNAME_COMPAT 0x00020000 /* Out interface by name/unit (not IP) */ +#define IP_FW_F_IIFNAME_COMPAT 0x00010000 /* In interface by name/unit (not IP) */ +#define IP_FW_F_OIFNAME_COMPAT 0x00020000 /* Out interface by name/unit (not IP) */ -#define IP_FW_F_INVSRC_COMPAT 0x00040000 /* Invert sense of src check */ -#define IP_FW_F_INVDST_COMPAT 0x00080000 /* Invert sense of dst check */ +#define IP_FW_F_INVSRC_COMPAT 0x00040000 /* Invert sense of src check */ +#define IP_FW_F_INVDST_COMPAT 0x00080000 /* Invert sense of dst check */ -#define IP_FW_F_ICMPBIT_COMPAT 0x00100000 /* ICMP type bitmap is valid */ +#define IP_FW_F_ICMPBIT_COMPAT 0x00100000 /* ICMP type bitmap is valid */ -#define IP_FW_F_UID_COMPAT 0x00200000 /* filter by uid */ +#define IP_FW_F_UID_COMPAT 0x00200000 /* filter by uid */ -#define IP_FW_F_RND_MATCH_COMPAT 0x00800000 /* probabilistic rule match */ -#define IP_FW_F_SMSK_COMPAT 0x01000000 /* src-port + mask */ -#define IP_FW_F_DMSK_COMPAT 0x02000000 /* dst-port + mask */ -#define IP_FW_BRIDGED_COMPAT 0x04000000 /* only match bridged packets */ -#define IP_FW_F_KEEP_S_COMPAT 0x08000000 /* keep state */ -#define IP_FW_F_CHECK_S_COMPAT 0x10000000 /* check state */ +#define IP_FW_F_RND_MATCH_COMPAT 0x00800000 /* probabilistic rule match */ +#define IP_FW_F_SMSK_COMPAT 0x01000000 /* src-port + mask */ +#define IP_FW_F_DMSK_COMPAT 0x02000000 /* dst-port + mask */ +#define IP_FW_BRIDGED_COMPAT 0x04000000 /* only match bridged packets */ +#define IP_FW_F_KEEP_S_COMPAT 0x08000000 /* keep state */ +#define IP_FW_F_CHECK_S_COMPAT 0x10000000 /* check state */ -#define IP_FW_F_SME_COMPAT 0x20000000 /* source = me */ -#define IP_FW_F_DME_COMPAT 0x40000000 /* destination = me */ +#define IP_FW_F_SME_COMPAT 0x20000000 /* source = me */ +#define IP_FW_F_DME_COMPAT 0x40000000 /* destination = me */ -#define IP_FW_F_MASK_COMPAT 0x7FFFFFFF /* All possible flag bits mask */ +#define IP_FW_F_MASK_COMPAT 0x7FFFFFFF /* All possible flag bits mask */ /* * Flags for the 'fw_ipflg' field, for comparing values of ip and its protocols. */ -#define IP_FW_IF_TCPEST_COMPAT 0x00000020 /* established TCP connection */ -#define IP_FW_IF_TCPMSK_COMPAT 0x00000020 /* mask of all TCP values */ +#define IP_FW_IF_TCPEST_COMPAT 0x00000020 /* established TCP connection */ +#define IP_FW_IF_TCPMSK_COMPAT 0x00000020 /* mask of all TCP values */ /* * Definitions for TCP flags. */ -#define IP_FW_TCPF_FIN_COMPAT TH_FIN -#define IP_FW_TCPF_SYN_COMPAT TH_SYN -#define IP_FW_TCPF_RST_COMPAT TH_RST -#define IP_FW_TCPF_PSH_COMPAT TH_PUSH -#define IP_FW_TCPF_ACK_COMPAT TH_ACK -#define IP_FW_TCPF_URG_COMPAT TH_URG +#define IP_FW_TCPF_FIN_COMPAT TH_FIN +#define IP_FW_TCPF_SYN_COMPAT TH_SYN +#define IP_FW_TCPF_RST_COMPAT TH_RST +#define IP_FW_TCPF_PSH_COMPAT TH_PUSH +#define IP_FW_TCPF_ACK_COMPAT TH_ACK +#define IP_FW_TCPF_URG_COMPAT TH_URG /* * For backwards compatibility with rules specifying "via iface" but @@ -344,13 +344,13 @@ struct ipfw_dyn_rule_compat_64 { * of bits to represent this configuration. */ -#define IF_FW_F_VIAHACK_COMPAT (IP_FW_F_IN_COMPAT|IP_FW_F_OUT_COMPAT|IP_FW_F_IIFACE_COMPAT|IP_FW_F_OIFACE_COMPAT) +#define IF_FW_F_VIAHACK_COMPAT (IP_FW_F_IN_COMPAT|IP_FW_F_OUT_COMPAT|IP_FW_F_IIFACE_COMPAT|IP_FW_F_OIFACE_COMPAT) /* * Definitions for REJECT response codes. * Values less than 256 correspond to ICMP unreachable codes. */ -#define IP_FW_REJECT_RST_COMPAT 0x0100 /* TCP packets: send RST */ +#define IP_FW_REJECT_RST_COMPAT 0x0100 /* TCP packets: send RST */ /* @@ -373,12 +373,12 @@ struct ipfw_dyn_rule_compat_64 { */ union ip_old_fw_if { - struct in_addr fu_via_ip; /* Specified by IP address */ - struct { /* Specified by interface name */ + struct in_addr fu_via_ip; /* Specified by IP address */ + struct { /* Specified by interface name */ #define OLD_FW_IFNLEN 10 /* need room ! was IFNAMSIZ */ - char name[OLD_FW_IFNLEN]; - short unit; /* -1 means match any unit */ - } fu_via_if; + char name[OLD_FW_IFNLEN]; + short unit; /* -1 means match any unit */ + } fu_via_if; }; /* @@ -391,95 +391,95 @@ union ip_old_fw_if { */ struct ip_old_fw { - u_int64_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ - struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ - u_short fw_number; /* Rule number */ - u_int fw_flg; /* Flags word */ -#define IP_OLD_FW_MAX_PORTS 10 /* A reasonable maximum */ + u_int64_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in_addr fw_src, fw_dst; /* Source and destination IP addr */ + struct in_addr fw_smsk, fw_dmsk; /* Mask for src and dest IP addr */ + u_short fw_number; /* Rule number */ + u_int fw_flg; /* Flags word */ +#define IP_OLD_FW_MAX_PORTS 10 /* A reasonable maximum */ union { - u_short fw_pts[IP_OLD_FW_MAX_PORTS]; /* Array of port numbers to match */ -#define IP_OLD_FW_ICMPTYPES_MAX 128 -#define IP_OLD_FW_ICMPTYPES_DIM (IP_OLD_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) - unsigned fw_icmptypes[IP_OLD_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + u_short fw_pts[IP_OLD_FW_MAX_PORTS]; /* Array of port numbers to match */ +#define IP_OLD_FW_ICMPTYPES_MAX 128 +#define IP_OLD_FW_ICMPTYPES_DIM (IP_OLD_FW_ICMPTYPES_MAX / (sizeof(unsigned) * 8)) + unsigned fw_icmptypes[IP_OLD_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ } fw_uar; - u_char fw_ipopt,fw_ipnopt; /* IP options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip_old_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ - u_short fu_pipe_nr; /* pipe number (option DUMMYNET) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - struct sockaddr_in fu_fwd_ip; - } fw_un; - u_char fw_prot; /* IP protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ - /* in ports array (dst ports follow */ - /* src ports; max of 10 ports in all; */ - /* count of 0 means match all ports) */ - void *pipe_ptr; /* Pipe ptr in case of dummynet pipe */ - void *next_rule_ptr ; /* next rule in case of match */ + u_char fw_ipopt, fw_ipnopt; /* IP options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ + long timestamp; /* timestamp (tv_sec) of last match */ + union ip_old_fw_if fw_in_if, fw_out_if; /* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IPDIVERT) */ + u_short fu_pipe_nr; /* pipe number (option DUMMYNET) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + struct sockaddr_in fu_fwd_ip; + } fw_un; + u_char fw_prot; /* IP protocol */ + u_char fw_nports; /* N'of src ports and # of dst ports */ + /* in ports array (dst ports follow */ + /* src ports; max of 10 ports in all; */ + /* count of 0 means match all ports) */ + void *pipe_ptr; /* Pipe ptr in case of dummynet pipe */ + void *next_rule_ptr; /* next rule in case of match */ }; -#define IP_OLD_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) -#define IP_OLD_FW_SETNSRCP(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IP_OLD_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) -#define IP_OLD_FW_SETNDSTP(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) - -#define old_fw_divert_port fw_un.fu_divert_port -#define old_fw_skipto_rule fw_un.fu_skipto_rule -#define old_fw_reject_code fw_un.fu_reject_code -#define old_fw_pipe_nr fw_un.fu_pipe_nr -#define old_fw_fwd_ip fw_un.fu_fwd_ip +#define IP_OLD_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) +#define IP_OLD_FW_SETNSRCP(rule, n) do { \ + (rule)->fw_nports &= ~0x0f; \ + (rule)->fw_nports |= (n); \ + } while (0) +#define IP_OLD_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) +#define IP_OLD_FW_SETNDSTP(rule, n) do { \ + (rule)->fw_nports &= ~0xf0; \ + (rule)->fw_nports |= (n) << 4;\ + } while (0) + +#define old_fw_divert_port fw_un.fu_divert_port +#define old_fw_skipto_rule fw_un.fu_skipto_rule +#define old_fw_reject_code fw_un.fu_reject_code +#define old_fw_pipe_nr fw_un.fu_pipe_nr +#define old_fw_fwd_ip fw_un.fu_fwd_ip /* * Values for "flags" field . */ -#define IP_OLD_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ -#define IP_OLD_FW_F_DENY 0x00000000 /* This is a deny rule */ -#define IP_OLD_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ -#define IP_OLD_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ -#define IP_OLD_FW_F_COUNT 0x00000003 /* This is a count rule */ -#define IP_OLD_FW_F_DIVERT 0x00000004 /* This is a divert rule */ -#define IP_OLD_FW_F_TEE 0x00000005 /* This is a tee rule */ -#define IP_OLD_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ -#define IP_OLD_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ -#define IP_OLD_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ +#define IP_OLD_FW_F_COMMAND 0x000000ff /* Mask for type of chain entry: */ +#define IP_OLD_FW_F_DENY 0x00000000 /* This is a deny rule */ +#define IP_OLD_FW_F_REJECT 0x00000001 /* Deny and send a response packet */ +#define IP_OLD_FW_F_ACCEPT 0x00000002 /* This is an accept rule */ +#define IP_OLD_FW_F_COUNT 0x00000003 /* This is a count rule */ +#define IP_OLD_FW_F_DIVERT 0x00000004 /* This is a divert rule */ +#define IP_OLD_FW_F_TEE 0x00000005 /* This is a tee rule */ +#define IP_OLD_FW_F_SKIPTO 0x00000006 /* This is a skipto rule */ +#define IP_OLD_FW_F_FWD 0x00000007 /* This is a "change forwarding address" rule */ +#define IP_OLD_FW_F_PIPE 0x00000008 /* This is a dummynet rule */ -#define IP_OLD_FW_F_IN 0x00000100 /* Check inbound packets */ -#define IP_OLD_FW_F_OUT 0x00000200 /* Check outbound packets */ -#define IP_OLD_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ -#define IP_OLD_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ +#define IP_OLD_FW_F_IN 0x00000100 /* Check inbound packets */ +#define IP_OLD_FW_F_OUT 0x00000200 /* Check outbound packets */ +#define IP_OLD_FW_F_IIFACE 0x00000400 /* Apply inbound interface test */ +#define IP_OLD_FW_F_OIFACE 0x00000800 /* Apply outbound interface test */ -#define IP_OLD_FW_F_PRN 0x00001000 /* Print if this rule matches */ +#define IP_OLD_FW_F_PRN 0x00001000 /* Print if this rule matches */ -#define IP_OLD_FW_F_SRNG 0x00002000 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ +#define IP_OLD_FW_F_SRNG 0x00002000 /* The first two src ports are a min * + * and max range (stored in host byte * + * order). */ -#define IP_OLD_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ +#define IP_OLD_FW_F_DRNG 0x00004000 /* The first two dst ports are a min * + * and max range (stored in host byte * + * order). */ -#define IP_OLD_FW_F_FRAG 0x00008000 /* Fragment */ +#define IP_OLD_FW_F_FRAG 0x00008000 /* Fragment */ -#define IP_OLD_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ -#define IP_OLD_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ +#define IP_OLD_FW_F_IIFNAME 0x00010000 /* In interface by name/unit (not IP) */ +#define IP_OLD_FW_F_OIFNAME 0x00020000 /* Out interface by name/unit (not IP) */ -#define IP_OLD_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ -#define IP_OLD_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ +#define IP_OLD_FW_F_INVSRC 0x00040000 /* Invert sense of src check */ +#define IP_OLD_FW_F_INVDST 0x00080000 /* Invert sense of dst check */ -#define IP_OLD_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ +#define IP_OLD_FW_F_ICMPBIT 0x00100000 /* ICMP type bitmap is valid */ -#define IP_OLD_FW_F_MASK 0x001FFFFF /* All possible flag bits mask */ +#define IP_OLD_FW_F_MASK 0x001FFFFF /* All possible flag bits mask */ /* * For backwards compatibility with rules specifying "via iface" but @@ -487,11 +487,11 @@ struct ip_old_fw { * of bits to represent this configuration. */ -#define IF_OLD_FW_F_VIAHACK (IP_OLD_FW_F_IN|IP_OLD_FW_F_OUT|IP_OLD_FW_F_IIFACE|IP_OLD_FW_F_OIFACE) +#define IF_OLD_FW_F_VIAHACK (IP_OLD_FW_F_IN|IP_OLD_FW_F_OUT|IP_OLD_FW_F_IIFACE|IP_OLD_FW_F_OIFACE) /* * Definitions for TCP flags - abridged */ -#define IP_OLD_FW_TCPF_ESTAB 0x40 +#define IP_OLD_FW_TCPF_ESTAB 0x40 #endif /* _IP_FW_COMPAT_H_ */ diff --git a/bsd/netinet/ip_icmp.c b/bsd/netinet/ip_icmp.c index 260449c30..74b051aa1 100644 --- a/bsd/netinet/ip_icmp.c +++ b/bsd/netinet/ip_icmp.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -105,12 +105,12 @@ #include #endif /* NECP */ - /* XXX This one should go in sys/mbuf.h. It is used to avoid that +/* XXX This one should go in sys/mbuf.h. It is used to avoid that * a firewall-generated packet loops forever through the firewall. */ #ifndef M_SKIP_FIREWALL #define M_SKIP_FIREWALL 0x4000 -#endif +#endif #if CONFIG_MACF_NET #include @@ -123,34 +123,34 @@ * host table maintenance routines. */ -struct icmpstat icmpstat; +struct icmpstat icmpstat; SYSCTL_STRUCT(_net_inet_icmp, ICMPCTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED, &icmpstat, icmpstat, ""); -static int icmpmaskrepl = 0; +static int icmpmaskrepl = 0; SYSCTL_INT(_net_inet_icmp, ICMPCTL_MASKREPL, maskrepl, CTLFLAG_RW | CTLFLAG_LOCKED, &icmpmaskrepl, 0, ""); -static int icmptimestamp = 0; +static int icmptimestamp = 0; SYSCTL_INT(_net_inet_icmp, ICMPCTL_TIMESTAMP, timestamp, CTLFLAG_RW | CTLFLAG_LOCKED, &icmptimestamp, 0, ""); -static int drop_redirect = 1; +static int drop_redirect = 1; SYSCTL_INT(_net_inet_icmp, OID_AUTO, drop_redirect, CTLFLAG_RW | CTLFLAG_LOCKED, &drop_redirect, 0, ""); -static int log_redirect = 0; +static int log_redirect = 0; SYSCTL_INT(_net_inet_icmp, OID_AUTO, log_redirect, CTLFLAG_RW | CTLFLAG_LOCKED, &log_redirect, 0, ""); const static int icmp_datalen = 8; -#if ICMP_BANDLIM +#if ICMP_BANDLIM /* Default values in case CONFIG_ICMP_BANDLIM is not defined in the MASTER file */ #ifndef CONFIG_ICMP_BANDLIM @@ -161,39 +161,39 @@ const static int icmp_datalen = 8; #endif /* CONFIG_EMBEDDED */ #endif /* CONFIG_ICMP_BANDLIM */ -/* +/* * ICMP error-response bandwidth limiting sysctl. If not enabled, sysctl * variable content is -1 and read-only. - */ - + */ + static int icmplim = CONFIG_ICMP_BANDLIM; SYSCTL_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RW | CTLFLAG_LOCKED, - &icmplim, 0, ""); + &icmplim, 0, ""); #else /* ICMP_BANDLIM */ static int icmplim = -1; SYSCTL_INT(_net_inet_icmp, ICMPCTL_ICMPLIM, icmplim, CTLFLAG_RD | CTLFLAG_LOCKED, - &icmplim, 0, ""); - + &icmplim, 0, ""); + #endif /* ICMP_BANDLIM */ /* * ICMP broadcast echo sysctl */ -static int icmpbmcastecho = 1; +static int icmpbmcastecho = 1; SYSCTL_INT(_net_inet_icmp, OID_AUTO, bmcastecho, CTLFLAG_RW | CTLFLAG_LOCKED, - &icmpbmcastecho, 0, ""); + &icmpbmcastecho, 0, ""); #if (DEBUG | DEVELOPMENT) -static int icmpprintfs = 0; +static int icmpprintfs = 0; SYSCTL_INT(_net_inet_icmp, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED, - &icmpprintfs, 0, ""); + &icmpprintfs, 0, ""); #endif -static void icmp_reflect(struct mbuf *); -static void icmp_send(struct mbuf *, struct mbuf *); +static void icmp_reflect(struct mbuf *); +static void icmp_send(struct mbuf *, struct mbuf *); /* * Generate an error packet of type error @@ -220,8 +220,9 @@ icmp_error( /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(n); - if (type != ICMP_REDIRECT) + if (type != ICMP_REDIRECT) { icmpstat.icps_error++; + } /* * Don't send error: * if not the first fragment of message @@ -229,34 +230,39 @@ icmp_error( * if the old packet protocol was ICMP * error message, only known informational types. */ - if (n->m_flags & (M_BCAST|M_MCAST)) + if (n->m_flags & (M_BCAST | M_MCAST)) { goto freeit; + } /* * Drop if IP header plus ICMP_MINLEN bytes are not contiguous * in first mbuf. */ - if (n->m_len < sizeof(struct ip) + ICMP_MINLEN) + if (n->m_len < sizeof(struct ip) + ICMP_MINLEN) { goto freeit; + } oip = mtod(n, struct ip *); oiphlen = IP_VHL_HL(oip->ip_vhl) << 2; - if (n->m_len < oiphlen + ICMP_MINLEN) + if (n->m_len < oiphlen + ICMP_MINLEN) { goto freeit; + } #if (DEBUG | DEVELOPMENT) - if (icmpprintfs > 1) + if (icmpprintfs > 1) { printf("icmp_error(0x%llx, %x, %d)\n", (uint64_t)VM_KERNEL_ADDRPERM(oip), type, code); + } #endif - if (oip->ip_off & ~(IP_MF|IP_DF)) + if (oip->ip_off & ~(IP_MF | IP_DF)) { goto freeit; + } if (oip->ip_p == IPPROTO_ICMP && type != ICMP_REDIRECT && n->m_len >= oiphlen + ICMP_MINLEN && !ICMP_INFOTYPE(((struct icmp *)(void *)((caddr_t)oip + oiphlen))-> - icmp_type)) { + icmp_type)) { icmpstat.icps_oldicmp++; goto freeit; } @@ -278,16 +284,18 @@ icmp_error( * standard reply with only IP header as payload */ if (oiphlen + sizeof(struct tcphdr) > n->m_len && - n->m_next == NULL) + n->m_next == NULL) { goto stdreply; + } /* * Otherwise, pull up to get IP and TCP headers * together */ if (n->m_len < (oiphlen + sizeof(struct tcphdr)) && - (n = m_pullup(n, (oiphlen + sizeof(struct tcphdr)))) == NULL) + (n = m_pullup(n, (oiphlen + sizeof(struct tcphdr)))) == NULL) { goto freeit; + } /* * Reinit pointers derived from mbuf data pointer @@ -297,20 +305,25 @@ icmp_error( th = (struct tcphdr *)(void *)((caddr_t)oip + oiphlen); if (th != ((struct tcphdr *)P2ROUNDDOWN(th, - sizeof(u_int32_t)))) + sizeof(u_int32_t)))) { goto freeit; + } tcphlen = th->th_off << 2; /* Sanity checks */ - if (tcphlen < sizeof(struct tcphdr)) + if (tcphlen < sizeof(struct tcphdr)) { goto freeit; - if (oip->ip_len < (oiphlen + tcphlen)) + } + if (oip->ip_len < (oiphlen + tcphlen)) { goto freeit; - if ((oiphlen + tcphlen) > n->m_len && n->m_next == NULL) + } + if ((oiphlen + tcphlen) > n->m_len && n->m_next == NULL) { goto stdreply; + } if (n->m_len < (oiphlen + tcphlen) && - (n = m_pullup(n, (oiphlen + tcphlen))) == NULL) + (n = m_pullup(n, (oiphlen + tcphlen))) == NULL) { goto freeit; + } /* * Reinit pointers derived from mbuf data pointer @@ -321,26 +334,30 @@ icmp_error( icmpelen = max(tcphlen, min(icmp_datalen, (oip->ip_len - oiphlen))); - } else -stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, + } else { +stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, (oip->ip_len - oiphlen))); + } icmplen = min(oiphlen + icmpelen, nlen); - if (icmplen < sizeof(struct ip)) + if (icmplen < sizeof(struct ip)) { goto freeit; + } /* * First, formulate icmp message * Allocate enough space for the IP header, ICMP header * and the payload (part of the original message to be sent back). */ - if (MHLEN > (sizeof(struct ip) + ICMP_MINLEN + icmplen)) - m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */ - else + if (MHLEN > (sizeof(struct ip) + ICMP_MINLEN + icmplen)) { + m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */ + } else { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + } - if (m == NULL) + if (m == NULL) { goto freeit; + } #if CONFIG_MACF_NET mac_mbuf_label_associate_netlayer(n, m); @@ -358,9 +375,9 @@ stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, icp = mtod(m, struct icmp *); icmpstat.icps_outhist[type]++; icp->icmp_type = type; - if (type == ICMP_REDIRECT) + if (type == ICMP_REDIRECT) { icp->icmp_gwaddr.s_addr = dest; - else { + } else { icp->icmp_void = 0; /* * The following assignments assume an overlay with the @@ -396,7 +413,7 @@ stdreply: icmpelen = max(ICMP_MINLEN, min(icmp_datalen, * in front of ICMP message. * If the original mbuf was meant to bypass the firewall, the error * reply should bypass as well. - */ + */ m->m_flags |= n->m_flags & M_SKIP_FIREWALL; m->m_data -= sizeof(struct ip); m->m_len += sizeof(struct ip); @@ -439,7 +456,7 @@ icmp_input(struct mbuf *m, int hlen) * that not corrupted and of at least minimum length. */ #if (DEBUG | DEVELOPMENT) - if (icmpprintfs > 2) { + if (icmpprintfs > 2) { char src_str[MAX_IPv4_STR_LEN]; char dst_str[MAX_IPv4_STR_LEN]; @@ -454,7 +471,7 @@ icmp_input(struct mbuf *m, int hlen) goto freeit; } i = hlen + min(icmplen, ICMP_ADVLENMIN); - if (m->m_len < i && (m = m_pullup(m, i)) == 0) { + if (m->m_len < i && (m = m_pullup(m, i)) == 0) { icmpstat.icps_tooshort++; return; } @@ -470,88 +487,92 @@ icmp_input(struct mbuf *m, int hlen) m->m_data -= hlen; #if (DEBUG | DEVELOPMENT) - if (icmpprintfs > 2) + if (icmpprintfs > 2) { printf("icmp_input, type %d code %d\n", icp->icmp_type, icp->icmp_code); + } #endif /* * Message type specific processing. */ - if (icp->icmp_type > ICMP_MAXTYPE) + if (icp->icmp_type > ICMP_MAXTYPE) { goto raw; + } /* Initialize */ - bzero(&icmpsrc, sizeof (icmpsrc)); - icmpsrc.sin_len = sizeof (struct sockaddr_in); + bzero(&icmpsrc, sizeof(icmpsrc)); + icmpsrc.sin_len = sizeof(struct sockaddr_in); icmpsrc.sin_family = AF_INET; - bzero(&icmpdst, sizeof (icmpdst)); - icmpdst.sin_len = sizeof (struct sockaddr_in); + bzero(&icmpdst, sizeof(icmpdst)); + icmpdst.sin_len = sizeof(struct sockaddr_in); icmpdst.sin_family = AF_INET; - bzero(&icmpgw, sizeof (icmpgw)); - icmpgw.sin_len = sizeof (struct sockaddr_in); + bzero(&icmpgw, sizeof(icmpgw)); + icmpgw.sin_len = sizeof(struct sockaddr_in); icmpgw.sin_family = AF_INET; icmpstat.icps_inhist[icp->icmp_type]++; code = icp->icmp_code; switch (icp->icmp_type) { - case ICMP_UNREACH: switch (code) { - case ICMP_UNREACH_NET: - case ICMP_UNREACH_HOST: - case ICMP_UNREACH_SRCFAIL: - case ICMP_UNREACH_NET_UNKNOWN: - case ICMP_UNREACH_HOST_UNKNOWN: - case ICMP_UNREACH_ISOLATED: - case ICMP_UNREACH_TOSNET: - case ICMP_UNREACH_TOSHOST: - case ICMP_UNREACH_HOST_PRECEDENCE: - case ICMP_UNREACH_PRECEDENCE_CUTOFF: - code = PRC_UNREACH_NET; - break; + case ICMP_UNREACH_NET: + case ICMP_UNREACH_HOST: + case ICMP_UNREACH_SRCFAIL: + case ICMP_UNREACH_NET_UNKNOWN: + case ICMP_UNREACH_HOST_UNKNOWN: + case ICMP_UNREACH_ISOLATED: + case ICMP_UNREACH_TOSNET: + case ICMP_UNREACH_TOSHOST: + case ICMP_UNREACH_HOST_PRECEDENCE: + case ICMP_UNREACH_PRECEDENCE_CUTOFF: + code = PRC_UNREACH_NET; + break; - case ICMP_UNREACH_NEEDFRAG: - code = PRC_MSGSIZE; - break; + case ICMP_UNREACH_NEEDFRAG: + code = PRC_MSGSIZE; + break; - /* - * RFC 1122, Sections 3.2.2.1 and 4.2.3.9. - * Treat subcodes 2,3 as immediate RST - */ - case ICMP_UNREACH_PROTOCOL: - case ICMP_UNREACH_PORT: - code = PRC_UNREACH_PORT; - break; + /* + * RFC 1122, Sections 3.2.2.1 and 4.2.3.9. + * Treat subcodes 2,3 as immediate RST + */ + case ICMP_UNREACH_PROTOCOL: + case ICMP_UNREACH_PORT: + code = PRC_UNREACH_PORT; + break; - case ICMP_UNREACH_NET_PROHIB: - case ICMP_UNREACH_HOST_PROHIB: - case ICMP_UNREACH_FILTER_PROHIB: - code = PRC_UNREACH_ADMIN_PROHIB; - break; + case ICMP_UNREACH_NET_PROHIB: + case ICMP_UNREACH_HOST_PROHIB: + case ICMP_UNREACH_FILTER_PROHIB: + code = PRC_UNREACH_ADMIN_PROHIB; + break; - default: - goto badcode; + default: + goto badcode; } goto deliver; case ICMP_TIMXCEED: - if (code > 1) + if (code > 1) { goto badcode; + } code += PRC_TIMXCEED_INTRANS; goto deliver; case ICMP_PARAMPROB: - if (code > 1) + if (code > 1) { goto badcode; + } code = PRC_PARAMPROB; goto deliver; case ICMP_SOURCEQUENCH: - if (code) + if (code) { goto badcode; + } code = PRC_QUENCH; - deliver: +deliver: /* * Problem with datagram; advise higher level routines. */ @@ -567,12 +588,14 @@ icmp_input(struct mbuf *m, int hlen) #endif /* Discard ICMP's in response to multicast packets */ - if (IN_MULTICAST(ntohl(icp->icmp_ip.ip_dst.s_addr))) + if (IN_MULTICAST(ntohl(icp->icmp_ip.ip_dst.s_addr))) { goto badcode; + } #if (DEBUG | DEVELOPMENT) - if (icmpprintfs > 2) + if (icmpprintfs > 2) { printf("deliver to protocol %d\n", icp->icmp_ip.ip_p); + } #endif icmpsrc.sin_addr = icp->icmp_ip.ip_dst; @@ -588,24 +611,24 @@ icmp_input(struct mbuf *m, int hlen) lck_mtx_unlock(inet_domain_mutex); (*ctlfunc)(code, (struct sockaddr *)&icmpsrc, - (void *)&icp->icmp_ip, m->m_pkthdr.rcvif); + (void *)&icp->icmp_ip, m->m_pkthdr.rcvif); lck_mtx_lock(inet_domain_mutex); } break; - badcode: +badcode: icmpstat.icps_badcode++; break; case ICMP_ECHO: - if ((m->m_flags & (M_MCAST | M_BCAST))) { + if ((m->m_flags & (M_MCAST | M_BCAST))) { if (icmpbmcastecho == 0) { icmpstat.icps_bmcastecho++; break; } } - + /* * rdar://18644769 * Do not reply when the destination is link local multicast or broadcast @@ -631,15 +654,16 @@ icmp_input(struct mbuf *m, int hlen) icp->icmp_type = ICMP_ECHOREPLY; #if ICMP_BANDLIM - if (badport_bandlim(BANDLIM_ICMP_ECHO) < 0) + if (badport_bandlim(BANDLIM_ICMP_ECHO) < 0) { goto freeit; - else + } else #endif - goto reflect; + goto reflect; case ICMP_TSTAMP: - if (icmptimestamp == 0) + if (icmptimestamp == 0) { break; + } if (!icmpbmcastecho && (m->m_flags & (M_MCAST | M_BCAST)) != 0) { @@ -652,25 +676,26 @@ icmp_input(struct mbuf *m, int hlen) } icp->icmp_type = ICMP_TSTAMPREPLY; icp->icmp_rtime = iptime(); - icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */ + icp->icmp_ttime = icp->icmp_rtime; /* bogus, do later! */ #if ICMP_BANDLIM - if (badport_bandlim(BANDLIM_ICMP_TSTAMP) < 0) + if (badport_bandlim(BANDLIM_ICMP_TSTAMP) < 0) { goto freeit; - else + } else #endif - goto reflect; + goto reflect; case ICMP_MASKREQ: - if (icmpmaskrepl == 0) + if (icmpmaskrepl == 0) { break; + } /* * We are not able to respond with all ones broadcast * unless we receive it over a point-to-point interface. */ - if (icmplen < ICMP_MASKLEN) + if (icmplen < ICMP_MASKLEN) { break; + } switch (ip->ip_dst.s_addr) { - case INADDR_BROADCAST: case INADDR_ANY: icmpdst.sin_addr = ip->ip_src; @@ -680,9 +705,10 @@ icmp_input(struct mbuf *m, int hlen) icmpdst.sin_addr = ip->ip_dst; } ia = (struct in_ifaddr *)ifaof_ifpforaddr( - (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif); - if (ia == 0) + (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif); + if (ia == 0) { break; + } IFA_LOCK(&ia->ia_ifa); if (ia->ia_ifp == 0) { IFA_UNLOCK(&ia->ia_ifa); @@ -693,15 +719,16 @@ icmp_input(struct mbuf *m, int hlen) icp->icmp_type = ICMP_MASKREPLY; icp->icmp_mask = ia->ia_sockmask.sin_addr.s_addr; if (ip->ip_src.s_addr == 0) { - if (ia->ia_ifp->if_flags & IFF_BROADCAST) - ip->ip_src = satosin(&ia->ia_broadaddr)->sin_addr; - else if (ia->ia_ifp->if_flags & IFF_POINTOPOINT) - ip->ip_src = satosin(&ia->ia_dstaddr)->sin_addr; + if (ia->ia_ifp->if_flags & IFF_BROADCAST) { + ip->ip_src = satosin(&ia->ia_broadaddr)->sin_addr; + } else if (ia->ia_ifp->if_flags & IFF_POINTOPOINT) { + ip->ip_src = satosin(&ia->ia_dstaddr)->sin_addr; + } } IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); reflect: - ip->ip_len += hlen; /* since ip_input deducts this */ + ip->ip_len += hlen; /* since ip_input deducts this */ icmpstat.icps_reflect++; icmpstat.icps_outhist[icp->icmp_type]++; icmp_reflect(m); @@ -715,18 +742,20 @@ reflect: dst = ntohl(icp->icmp_ip.ip_dst.s_addr); gw = ntohl(icp->icmp_gwaddr.s_addr); printf("icmp redirect from %d.%d.%d.%d: " - "%d.%d.%d.%d => %d.%d.%d.%d\n", - (int)(src >> 24), (int)((src >> 16) & 0xff), - (int)((src >> 8) & 0xff), (int)(src & 0xff), - (int)(dst >> 24), (int)((dst >> 16) & 0xff), - (int)((dst >> 8) & 0xff), (int)(dst & 0xff), - (int)(gw >> 24), (int)((gw >> 16) & 0xff), - (int)((gw >> 8) & 0xff), (int)(gw & 0xff)); + "%d.%d.%d.%d => %d.%d.%d.%d\n", + (int)(src >> 24), (int)((src >> 16) & 0xff), + (int)((src >> 8) & 0xff), (int)(src & 0xff), + (int)(dst >> 24), (int)((dst >> 16) & 0xff), + (int)((dst >> 8) & 0xff), (int)(dst & 0xff), + (int)(gw >> 24), (int)((gw >> 16) & 0xff), + (int)((gw >> 8) & 0xff), (int)(gw & 0xff)); } - if (drop_redirect) + if (drop_redirect) { break; - if (code > 3) + } + if (code > 3) { goto badcode; + } if (icmplen < ICMP_ADVLENMIN || icmplen < ICMP_ADVLEN(icp) || IP_VHL_HL(icp->icmp_ip.ip_vhl) < (sizeof(struct ip) >> 2)) { icmpstat.icps_badlen++; @@ -753,8 +782,8 @@ reflect: #endif icmpsrc.sin_addr = icp->icmp_ip.ip_dst; rtredirect(m->m_pkthdr.rcvif, (struct sockaddr *)&icmpsrc, - (struct sockaddr *)&icmpdst, NULL, RTF_GATEWAY | RTF_HOST, - (struct sockaddr *)&icmpgw, NULL); + (struct sockaddr *)&icmpdst, NULL, RTF_GATEWAY | RTF_HOST, + (struct sockaddr *)&icmpgw, NULL); pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&icmpsrc); #if IPSEC key_sa_routechange((struct sockaddr *)&icmpsrc); @@ -798,9 +827,9 @@ icmp_reflect(struct mbuf *m) if (!in_canforward(ip->ip_src) && ((ntohl(ip->ip_src.s_addr) & IN_CLASSA_NET) != - (IN_LOOPBACKNET << IN_CLASSA_NSHIFT))) { - m_freem(m); /* Bad return address */ - goto done; /* Ip_output() will check for broadcast */ + (IN_LOOPBACKNET << IN_CLASSA_NSHIFT))) { + m_freem(m); /* Bad return address */ + goto done; /* Ip_output() will check for broadcast */ } t = ip->ip_dst; ip->ip_dst = ip->ip_src; @@ -839,13 +868,14 @@ match: lck_rw_done(in_ifaddr_rwlock); /* Initialize */ - bzero(&icmpdst, sizeof (icmpdst)); - icmpdst.sin_len = sizeof (struct sockaddr_in); + bzero(&icmpdst, sizeof(icmpdst)); + icmpdst.sin_len = sizeof(struct sockaddr_in); icmpdst.sin_family = AF_INET; icmpdst.sin_addr = t; - if ((ia == (struct in_ifaddr *)0) && m->m_pkthdr.rcvif) + if ((ia == (struct in_ifaddr *)0) && m->m_pkthdr.rcvif) { ia = (struct in_ifaddr *)ifaof_ifpforaddr( (struct sockaddr *)&icmpdst, m->m_pkthdr.rcvif); + } /* * The following happens if the packet was not addressed to us, * and was received on an interface with no IP address. @@ -883,52 +913,57 @@ match: */ cp = (u_char *) (ip + 1); if ((opts = ip_srcroute()) == 0 && - (opts = m_gethdr(M_DONTWAIT, MT_HEADER))) { /* MAC-OK */ + (opts = m_gethdr(M_DONTWAIT, MT_HEADER))) { /* MAC-OK */ opts->m_len = sizeof(struct in_addr); mtod(opts, struct in_addr *)->s_addr = 0; } if (opts) { #if (DEBUG | DEVELOPMENT) - if (icmpprintfs > 1) - printf("icmp_reflect optlen %d rt %d => ", - optlen, opts->m_len); + if (icmpprintfs > 1) { + printf("icmp_reflect optlen %d rt %d => ", + optlen, opts->m_len); + } #endif - for (cnt = optlen; cnt > 0; cnt -= len, cp += len) { - opt = cp[IPOPT_OPTVAL]; - if (opt == IPOPT_EOL) - break; - if (opt == IPOPT_NOP) - len = 1; - else { - if (cnt < IPOPT_OLEN + sizeof(*cp)) - break; - len = cp[IPOPT_OLEN]; - if (len < IPOPT_OLEN + sizeof(*cp) || - len > cnt) - break; - } - /* - * Should check for overflow, but it "can't happen" - */ - if (opt == IPOPT_RR || opt == IPOPT_TS || - opt == IPOPT_SECURITY) { - bcopy((caddr_t)cp, - mtod(opts, caddr_t) + opts->m_len, len); - opts->m_len += len; - } - } - /* Terminate & pad, if necessary */ - cnt = opts->m_len % 4; - if (cnt) { - for (; cnt < 4; cnt++) { - *(mtod(opts, caddr_t) + opts->m_len) = - IPOPT_EOL; - opts->m_len++; - } - } + for (cnt = optlen; cnt > 0; cnt -= len, cp += len) { + opt = cp[IPOPT_OPTVAL]; + if (opt == IPOPT_EOL) { + break; + } + if (opt == IPOPT_NOP) { + len = 1; + } else { + if (cnt < IPOPT_OLEN + sizeof(*cp)) { + break; + } + len = cp[IPOPT_OLEN]; + if (len < IPOPT_OLEN + sizeof(*cp) || + len > cnt) { + break; + } + } + /* + * Should check for overflow, but it "can't happen" + */ + if (opt == IPOPT_RR || opt == IPOPT_TS || + opt == IPOPT_SECURITY) { + bcopy((caddr_t)cp, + mtod(opts, caddr_t) + opts->m_len, len); + opts->m_len += len; + } + } + /* Terminate & pad, if necessary */ + cnt = opts->m_len % 4; + if (cnt) { + for (; cnt < 4; cnt++) { + *(mtod(opts, caddr_t) + opts->m_len) = + IPOPT_EOL; + opts->m_len++; + } + } #if (DEBUG | DEVELOPMENT) - if (icmpprintfs > 1) - printf("%d\n", opts->m_len); + if (icmpprintfs > 1) { + printf("%d\n", opts->m_len); + } #endif } /* @@ -938,17 +973,19 @@ match: ip->ip_len -= optlen; ip->ip_vhl = IP_VHL_BORING; m->m_len -= optlen; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { m->m_pkthdr.len -= optlen; + } optlen += sizeof(struct ip); bcopy((caddr_t)ip + optlen, (caddr_t)(ip + 1), - (unsigned)(m->m_len - sizeof(struct ip))); + (unsigned)(m->m_len - sizeof(struct ip))); } - m->m_flags &= ~(M_BCAST|M_MCAST); + m->m_flags &= ~(M_BCAST | M_MCAST); icmp_send(m, opts); done: - if (opts) + if (opts) { (void)m_free(opts); + } } /* @@ -1008,8 +1045,8 @@ iptime(void) u_int32_t t; getmicrotime(&atv); - t = (atv.tv_sec % (24*60*60)) * 1000 + atv.tv_usec / 1000; - return (htonl(t)); + t = (atv.tv_sec % (24 * 60 * 60)) * 1000 + atv.tv_usec / 1000; + return htonl(t); } #if 1 @@ -1028,8 +1065,9 @@ ip_next_mtu(int mtu, int dir) int i; for (i = 0; i < (sizeof mtutab) / (sizeof mtutab[0]); i++) { - if (mtu >= mtutab[i]) + if (mtu >= mtutab[i]) { break; + } } if (dir < 0) { @@ -1041,7 +1079,7 @@ ip_next_mtu(int mtu, int dir) } else { if (mtutab[i] == 0) { return 0; - } else if(mtu > mtutab[i]) { + } else if (mtu > mtutab[i]) { return mtutab[i]; } else { return mtutab[i + 1]; @@ -1056,7 +1094,7 @@ ip_next_mtu(int mtu, int dir) * badport_bandlim() - check for ICMP bandwidth limit * * Return 0 if it is ok to send an ICMP error response, -1 if we have - * hit our bandwidth limit and it is not ok. + * hit our bandwidth limit and it is not ok. * * If icmplim is <= 0, the feature is disabled and 0 is returned. * @@ -1067,7 +1105,7 @@ ip_next_mtu(int mtu, int dir) * Note that the printing of the error message is delayed so we can * properly print the icmp error rate that the system was trying to do * (i.e. 22000/100 pps, etc...). This can cause long delays in printing - * the 'final' error, but it doesn't make sense to solve the printing + * the 'final' error, but it doesn't make sense to solve the printing * delay with more complex code. */ @@ -1085,15 +1123,16 @@ badport_bandlim(int which) "Limiting icmp tstamp response", "Limiting closed port RST response", "Limiting open port RST response" - }; + }; /* * Return ok status if feature disabled or argument out of * ranage. */ - if (icmplim <= 0 || which > BANDLIM_MAX || which < 0) - return(0); + if (icmplim <= 0 || which > BANDLIM_MAX || which < 0) { + return 0; + } secs = time - lticks[which]; @@ -1104,10 +1143,10 @@ badport_bandlim(int which) if (secs > 1) { if (lpackets[which] > icmplim) { printf("%s from %d to %d packets per second\n", - bandlimittype[which], - lpackets[which], - icmplim - ); + bandlimittype[which], + lpackets[which], + icmplim + ); } lticks[which] = time; lpackets[which] = 0; @@ -1118,9 +1157,9 @@ badport_bandlim(int which) */ if (++lpackets[which] > icmplim) { - return(-1); + return -1; } - return(0); + return 0; } #endif @@ -1153,43 +1192,46 @@ __private_extern__ int icmp_dgram_attach(struct socket *so, int proto, struct pr __private_extern__ int icmp_dgram_ctloutput(struct socket *so, struct sockopt *sopt); __private_extern__ struct pr_usrreqs icmp_dgram_usrreqs = { - .pru_abort = rip_abort, - .pru_attach = icmp_dgram_attach, - .pru_bind = rip_bind, - .pru_connect = rip_connect, - .pru_control = in_control, - .pru_detach = rip_detach, - .pru_disconnect = rip_disconnect, - .pru_peeraddr = in_getpeeraddr, - .pru_send = icmp_dgram_send, - .pru_shutdown = rip_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = rip_abort, + .pru_attach = icmp_dgram_attach, + .pru_bind = rip_bind, + .pru_connect = rip_connect, + .pru_control = in_control, + .pru_detach = rip_detach, + .pru_disconnect = rip_disconnect, + .pru_peeraddr = in_getpeeraddr, + .pru_send = icmp_dgram_send, + .pru_shutdown = rip_shutdown, + .pru_sockaddr = in_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; /* Like rip_attach but without root privilege enforcement */ __private_extern__ int icmp_dgram_attach(struct socket *so, __unused int proto, struct proc *p) { - struct inpcb *inp; - int error; - - inp = sotoinpcb(so); - if (inp) - panic("icmp_dgram_attach"); - - error = soreserve(so, rip_sendspace, rip_recvspace); - if (error) - return error; - error = in_pcballoc(so, &ripcbinfo, p); - if (error) - return error; - inp = (struct inpcb *)so->so_pcb; - inp->inp_vflag |= INP_IPV4; - inp->inp_ip_p = IPPROTO_ICMP; - inp->inp_ip_ttl = ip_defttl; - return 0; + struct inpcb *inp; + int error; + + inp = sotoinpcb(so); + if (inp) { + panic("icmp_dgram_attach"); + } + + error = soreserve(so, rip_sendspace, rip_recvspace); + if (error) { + return error; + } + error = in_pcballoc(so, &ripcbinfo, p); + if (error) { + return error; + } + inp = (struct inpcb *)so->so_pcb; + inp->inp_vflag |= INP_IPV4; + inp->inp_ip_p = IPPROTO_ICMP; + inp->inp_ip_ttl = ip_defttl; + return 0; } /* @@ -1198,43 +1240,44 @@ icmp_dgram_attach(struct socket *so, __unused int proto, struct proc *p) __private_extern__ int icmp_dgram_ctloutput(struct socket *so, struct sockopt *sopt) { - int error; + int error; - if (sopt->sopt_level != IPPROTO_IP) - return (EINVAL); + if (sopt->sopt_level != IPPROTO_IP) { + return EINVAL; + } switch (sopt->sopt_name) { - case IP_OPTIONS: - case IP_HDRINCL: - case IP_TOS: - case IP_TTL: - case IP_RECVOPTS: - case IP_RECVRETOPTS: - case IP_RECVDSTADDR: - case IP_RETOPTS: - case IP_MULTICAST_IF: - case IP_MULTICAST_IFINDEX: - case IP_MULTICAST_TTL: - case IP_MULTICAST_LOOP: - case IP_ADD_MEMBERSHIP: - case IP_DROP_MEMBERSHIP: - case IP_MULTICAST_VIF: - case IP_PORTRANGE: - case IP_RECVIF: - case IP_IPSEC_POLICY: - case IP_STRIPHDR: - case IP_RECVTTL: - case IP_BOUND_IF: - case IP_NO_IFT_CELLULAR: - error = rip_ctloutput(so, sopt); - break; + case IP_OPTIONS: + case IP_HDRINCL: + case IP_TOS: + case IP_TTL: + case IP_RECVOPTS: + case IP_RECVRETOPTS: + case IP_RECVDSTADDR: + case IP_RETOPTS: + case IP_MULTICAST_IF: + case IP_MULTICAST_IFINDEX: + case IP_MULTICAST_TTL: + case IP_MULTICAST_LOOP: + case IP_ADD_MEMBERSHIP: + case IP_DROP_MEMBERSHIP: + case IP_MULTICAST_VIF: + case IP_PORTRANGE: + case IP_RECVIF: + case IP_IPSEC_POLICY: + case IP_STRIPHDR: + case IP_RECVTTL: + case IP_BOUND_IF: + case IP_NO_IFT_CELLULAR: + error = rip_ctloutput(so, sopt); + break; - default: - error = EINVAL; - break; + default: + error = EINVAL; + break; } - return (error); + return error; } __private_extern__ int @@ -1245,17 +1288,18 @@ icmp_dgram_send(struct socket *so, int flags, struct mbuf *m, struct inpcb *inp = sotoinpcb(so); int hlen; struct icmp *icp; - struct in_ifaddr *ia = NULL; + struct in_ifaddr *ia = NULL; int icmplen; int error = EINVAL; if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) { - if (inp != NULL) + ) { + if (inp != NULL) { error = EPROTOTYPE; + } goto bad; } @@ -1274,16 +1318,20 @@ icmp_dgram_send(struct socket *so, int flags, struct mbuf *m, goto bad; } /* Only IPv4 */ - if (IP_VHL_V(ip->ip_vhl) != 4) + if (IP_VHL_V(ip->ip_vhl) != 4) { goto bad; - if (hlen < 20 || hlen > 40 || ip->ip_len != m->m_pkthdr.len) + } + if (hlen < 20 || hlen > 40 || ip->ip_len != m->m_pkthdr.len) { goto bad; + } /* Bogus fragments can tie up peer resources */ - if ((ip->ip_off & ~IP_DF) != 0) + if ((ip->ip_off & ~IP_DF) != 0) { goto bad; + } /* Allow only ICMP even for user provided IP header */ - if (ip->ip_p != IPPROTO_ICMP) + if (ip->ip_p != IPPROTO_ICMP) { goto bad; + } /* * To prevent spoofing, specified source address must * be one of ours. @@ -1327,32 +1375,37 @@ ours: /* * Allow only to send request types with code 0 */ - if (icp->icmp_code != 0) + if (icp->icmp_code != 0) { goto bad; + } switch (icp->icmp_type) { - case ICMP_ECHO: - break; - case ICMP_TSTAMP: - if (icmplen != 20) - goto bad; - break; - case ICMP_MASKREQ: - if (icmplen != 12) - goto bad; - break; - default: + case ICMP_ECHO: + break; + case ICMP_TSTAMP: + if (icmplen != 20) { + goto bad; + } + break; + case ICMP_MASKREQ: + if (icmplen != 12) { goto bad; + } + break; + default: + goto bad; } - return (rip_send(so, flags, m, nam, control, p)); + return rip_send(so, flags, m, nam, control, p); bad: VERIFY(error != 0); - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } - return (error); + return error; } #endif /* __APPLE__ */ diff --git a/bsd/netinet/ip_icmp.h b/bsd/netinet/ip_icmp.h index 2de986d94..70cb460e8 100644 --- a/bsd/netinet/ip_icmp.h +++ b/bsd/netinet/ip_icmp.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -82,15 +82,15 @@ struct icmp_ra_addr { * Structure of an icmp header. */ struct icmp { - u_char icmp_type; /* type of message, see below */ - u_char icmp_code; /* type sub code */ - u_short icmp_cksum; /* ones complement cksum of struct */ + u_char icmp_type; /* type of message, see below */ + u_char icmp_code; /* type sub code */ + u_short icmp_cksum; /* ones complement cksum of struct */ union { - u_char ih_pptr; /* ICMP_PARAMPROB */ - struct in_addr ih_gwaddr; /* ICMP_REDIRECT */ + u_char ih_pptr; /* ICMP_PARAMPROB */ + struct in_addr ih_gwaddr; /* ICMP_REDIRECT */ struct ih_idseq { - n_short icd_id; - n_short icd_seq; + n_short icd_id; + n_short icd_seq; } ih_idseq; int ih_void; @@ -106,16 +106,16 @@ struct icmp { u_int16_t irt_lifetime; } ih_rtradv; } icmp_hun; -#define icmp_pptr icmp_hun.ih_pptr -#define icmp_gwaddr icmp_hun.ih_gwaddr -#define icmp_id icmp_hun.ih_idseq.icd_id -#define icmp_seq icmp_hun.ih_idseq.icd_seq -#define icmp_void icmp_hun.ih_void -#define icmp_pmvoid icmp_hun.ih_pmtu.ipm_void -#define icmp_nextmtu icmp_hun.ih_pmtu.ipm_nextmtu -#define icmp_num_addrs icmp_hun.ih_rtradv.irt_num_addrs -#define icmp_wpa icmp_hun.ih_rtradv.irt_wpa -#define icmp_lifetime icmp_hun.ih_rtradv.irt_lifetime +#define icmp_pptr icmp_hun.ih_pptr +#define icmp_gwaddr icmp_hun.ih_gwaddr +#define icmp_id icmp_hun.ih_idseq.icd_id +#define icmp_seq icmp_hun.ih_idseq.icd_seq +#define icmp_void icmp_hun.ih_void +#define icmp_pmvoid icmp_hun.ih_pmtu.ipm_void +#define icmp_nextmtu icmp_hun.ih_pmtu.ipm_nextmtu +#define icmp_num_addrs icmp_hun.ih_rtradv.irt_num_addrs +#define icmp_wpa icmp_hun.ih_rtradv.irt_wpa +#define icmp_lifetime icmp_hun.ih_rtradv.irt_lifetime union { struct id_ts { n_time its_otime; @@ -128,15 +128,15 @@ struct icmp { } id_ip; struct icmp_ra_addr id_radv; u_int32_t id_mask; - char id_data[1]; + char id_data[1]; } icmp_dun; -#define icmp_otime icmp_dun.id_ts.its_otime -#define icmp_rtime icmp_dun.id_ts.its_rtime -#define icmp_ttime icmp_dun.id_ts.its_ttime -#define icmp_ip icmp_dun.id_ip.idi_ip -#define icmp_radv icmp_dun.id_radv -#define icmp_mask icmp_dun.id_mask -#define icmp_data icmp_dun.id_data +#define icmp_otime icmp_dun.id_ts.its_otime +#define icmp_rtime icmp_dun.id_ts.its_rtime +#define icmp_ttime icmp_dun.id_ts.its_ttime +#define icmp_ip icmp_dun.id_ip.idi_ip +#define icmp_radv icmp_dun.id_radv +#define icmp_mask icmp_dun.id_mask +#define icmp_data icmp_dun.id_data }; /* @@ -147,80 +147,80 @@ struct icmp { * data have been returned, since we need to check the returned * ip header length. */ -#define ICMP_MINLEN 8 /* abs minimum */ -#define ICMP_TSLEN (8 + 3 * sizeof (n_time)) /* timestamp */ -#define ICMP_MASKLEN 12 /* address mask */ -#define ICMP_ADVLENMIN (8 + sizeof (struct ip) + 8) /* min */ +#define ICMP_MINLEN 8 /* abs minimum */ +#define ICMP_TSLEN (8 + 3 * sizeof (n_time)) /* timestamp */ +#define ICMP_MASKLEN 12 /* address mask */ +#define ICMP_ADVLENMIN (8 + sizeof (struct ip) + 8) /* min */ #ifndef _IP_VHL -#define ICMP_ADVLEN(p) (8 + ((p)->icmp_ip.ip_hl << 2) + 8) - /* N.B.: must separately check that ip_hl >= 5 */ +#define ICMP_ADVLEN(p) (8 + ((p)->icmp_ip.ip_hl << 2) + 8) +/* N.B.: must separately check that ip_hl >= 5 */ #else -#define ICMP_ADVLEN(p) (8 + (IP_VHL_HL((p)->icmp_ip.ip_vhl) << 2) + 8) - /* N.B.: must separately check that header length >= 5 */ +#define ICMP_ADVLEN(p) (8 + (IP_VHL_HL((p)->icmp_ip.ip_vhl) << 2) + 8) +/* N.B.: must separately check that header length >= 5 */ #endif /* * Definition of type and code field values. */ -#define ICMP_ECHOREPLY 0 /* echo reply */ -#define ICMP_UNREACH 3 /* dest unreachable, codes: */ -#define ICMP_UNREACH_NET 0 /* bad net */ -#define ICMP_UNREACH_HOST 1 /* bad host */ -#define ICMP_UNREACH_PROTOCOL 2 /* bad protocol */ -#define ICMP_UNREACH_PORT 3 /* bad port */ -#define ICMP_UNREACH_NEEDFRAG 4 /* IP_DF caused drop */ -#define ICMP_UNREACH_SRCFAIL 5 /* src route failed */ -#define ICMP_UNREACH_NET_UNKNOWN 6 /* unknown net */ -#define ICMP_UNREACH_HOST_UNKNOWN 7 /* unknown host */ -#define ICMP_UNREACH_ISOLATED 8 /* src host isolated */ -#define ICMP_UNREACH_NET_PROHIB 9 /* prohibited access */ -#define ICMP_UNREACH_HOST_PROHIB 10 /* ditto */ -#define ICMP_UNREACH_TOSNET 11 /* bad tos for net */ -#define ICMP_UNREACH_TOSHOST 12 /* bad tos for host */ -#define ICMP_UNREACH_FILTER_PROHIB 13 /* admin prohib */ -#define ICMP_UNREACH_HOST_PRECEDENCE 14 /* host prec vio. */ -#define ICMP_UNREACH_PRECEDENCE_CUTOFF 15 /* prec cutoff */ -#define ICMP_SOURCEQUENCH 4 /* packet lost, slow down */ -#define ICMP_REDIRECT 5 /* shorter route, codes: */ -#define ICMP_REDIRECT_NET 0 /* for network */ -#define ICMP_REDIRECT_HOST 1 /* for host */ -#define ICMP_REDIRECT_TOSNET 2 /* for tos and net */ -#define ICMP_REDIRECT_TOSHOST 3 /* for tos and host */ -#define ICMP_ALTHOSTADDR 6 /* alternate host address */ -#define ICMP_ECHO 8 /* echo service */ -#define ICMP_ROUTERADVERT 9 /* router advertisement */ -#define ICMP_ROUTERADVERT_NORMAL 0 /* normal advertisement */ -#define ICMP_ROUTERADVERT_NOROUTE_COMMON 16 /* selective routing */ -#define ICMP_ROUTERSOLICIT 10 /* router solicitation */ -#define ICMP_TIMXCEED 11 /* time exceeded, code: */ -#define ICMP_TIMXCEED_INTRANS 0 /* ttl==0 in transit */ -#define ICMP_TIMXCEED_REASS 1 /* ttl==0 in reass */ -#define ICMP_PARAMPROB 12 /* ip header bad */ -#define ICMP_PARAMPROB_ERRATPTR 0 /* error at param ptr */ -#define ICMP_PARAMPROB_OPTABSENT 1 /* req. opt. absent */ -#define ICMP_PARAMPROB_LENGTH 2 /* bad length */ -#define ICMP_TSTAMP 13 /* timestamp request */ -#define ICMP_TSTAMPREPLY 14 /* timestamp reply */ -#define ICMP_IREQ 15 /* information request */ -#define ICMP_IREQREPLY 16 /* information reply */ -#define ICMP_MASKREQ 17 /* address mask request */ -#define ICMP_MASKREPLY 18 /* address mask reply */ -#define ICMP_TRACEROUTE 30 /* traceroute */ -#define ICMP_DATACONVERR 31 /* data conversion error */ -#define ICMP_MOBILE_REDIRECT 32 /* mobile host redirect */ -#define ICMP_IPV6_WHEREAREYOU 33 /* IPv6 where-are-you */ -#define ICMP_IPV6_IAMHERE 34 /* IPv6 i-am-here */ -#define ICMP_MOBILE_REGREQUEST 35 /* mobile registration req */ -#define ICMP_MOBILE_REGREPLY 36 /* mobile registration reply */ -#define ICMP_SKIP 39 /* SKIP */ -#define ICMP_PHOTURIS 40 /* Photuris */ -#define ICMP_PHOTURIS_UNKNOWN_INDEX 1 /* unknown sec index */ -#define ICMP_PHOTURIS_AUTH_FAILED 2 /* auth failed */ -#define ICMP_PHOTURIS_DECRYPT_FAILED 3 /* decrypt failed */ +#define ICMP_ECHOREPLY 0 /* echo reply */ +#define ICMP_UNREACH 3 /* dest unreachable, codes: */ +#define ICMP_UNREACH_NET 0 /* bad net */ +#define ICMP_UNREACH_HOST 1 /* bad host */ +#define ICMP_UNREACH_PROTOCOL 2 /* bad protocol */ +#define ICMP_UNREACH_PORT 3 /* bad port */ +#define ICMP_UNREACH_NEEDFRAG 4 /* IP_DF caused drop */ +#define ICMP_UNREACH_SRCFAIL 5 /* src route failed */ +#define ICMP_UNREACH_NET_UNKNOWN 6 /* unknown net */ +#define ICMP_UNREACH_HOST_UNKNOWN 7 /* unknown host */ +#define ICMP_UNREACH_ISOLATED 8 /* src host isolated */ +#define ICMP_UNREACH_NET_PROHIB 9 /* prohibited access */ +#define ICMP_UNREACH_HOST_PROHIB 10 /* ditto */ +#define ICMP_UNREACH_TOSNET 11 /* bad tos for net */ +#define ICMP_UNREACH_TOSHOST 12 /* bad tos for host */ +#define ICMP_UNREACH_FILTER_PROHIB 13 /* admin prohib */ +#define ICMP_UNREACH_HOST_PRECEDENCE 14 /* host prec vio. */ +#define ICMP_UNREACH_PRECEDENCE_CUTOFF 15 /* prec cutoff */ +#define ICMP_SOURCEQUENCH 4 /* packet lost, slow down */ +#define ICMP_REDIRECT 5 /* shorter route, codes: */ +#define ICMP_REDIRECT_NET 0 /* for network */ +#define ICMP_REDIRECT_HOST 1 /* for host */ +#define ICMP_REDIRECT_TOSNET 2 /* for tos and net */ +#define ICMP_REDIRECT_TOSHOST 3 /* for tos and host */ +#define ICMP_ALTHOSTADDR 6 /* alternate host address */ +#define ICMP_ECHO 8 /* echo service */ +#define ICMP_ROUTERADVERT 9 /* router advertisement */ +#define ICMP_ROUTERADVERT_NORMAL 0 /* normal advertisement */ +#define ICMP_ROUTERADVERT_NOROUTE_COMMON 16 /* selective routing */ +#define ICMP_ROUTERSOLICIT 10 /* router solicitation */ +#define ICMP_TIMXCEED 11 /* time exceeded, code: */ +#define ICMP_TIMXCEED_INTRANS 0 /* ttl==0 in transit */ +#define ICMP_TIMXCEED_REASS 1 /* ttl==0 in reass */ +#define ICMP_PARAMPROB 12 /* ip header bad */ +#define ICMP_PARAMPROB_ERRATPTR 0 /* error at param ptr */ +#define ICMP_PARAMPROB_OPTABSENT 1 /* req. opt. absent */ +#define ICMP_PARAMPROB_LENGTH 2 /* bad length */ +#define ICMP_TSTAMP 13 /* timestamp request */ +#define ICMP_TSTAMPREPLY 14 /* timestamp reply */ +#define ICMP_IREQ 15 /* information request */ +#define ICMP_IREQREPLY 16 /* information reply */ +#define ICMP_MASKREQ 17 /* address mask request */ +#define ICMP_MASKREPLY 18 /* address mask reply */ +#define ICMP_TRACEROUTE 30 /* traceroute */ +#define ICMP_DATACONVERR 31 /* data conversion error */ +#define ICMP_MOBILE_REDIRECT 32 /* mobile host redirect */ +#define ICMP_IPV6_WHEREAREYOU 33 /* IPv6 where-are-you */ +#define ICMP_IPV6_IAMHERE 34 /* IPv6 i-am-here */ +#define ICMP_MOBILE_REGREQUEST 35 /* mobile registration req */ +#define ICMP_MOBILE_REGREPLY 36 /* mobile registration reply */ +#define ICMP_SKIP 39 /* SKIP */ +#define ICMP_PHOTURIS 40 /* Photuris */ +#define ICMP_PHOTURIS_UNKNOWN_INDEX 1 /* unknown sec index */ +#define ICMP_PHOTURIS_AUTH_FAILED 2 /* auth failed */ +#define ICMP_PHOTURIS_DECRYPT_FAILED 3 /* decrypt failed */ -#define ICMP_MAXTYPE 40 +#define ICMP_MAXTYPE 40 -#define ICMP_INFOTYPE(type) \ +#define ICMP_INFOTYPE(type) \ ((type) == ICMP_ECHOREPLY || (type) == ICMP_ECHO || \ (type) == ICMP_ROUTERADVERT || (type) == ICMP_ROUTERSOLICIT || \ (type) == ICMP_TSTAMP || (type) == ICMP_TSTAMPREPLY || \ @@ -233,8 +233,8 @@ struct icmp { (type) == ICMP_PARAMPROB) #ifdef BSD_KERNEL_PRIVATE -void icmp_error(struct mbuf *, int, int, n_long, u_int32_t); -void icmp_input(struct mbuf *, int); +void icmp_error(struct mbuf *, int, int, n_long, u_int32_t); +void icmp_input(struct mbuf *, int); int ip_next_mtu(int, int); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_IP_ICMP_H_ */ diff --git a/bsd/netinet/ip_id.c b/bsd/netinet/ip_id.c index 94c791ea0..800b1dcf4 100644 --- a/bsd/netinet/ip_id.c +++ b/bsd/netinet/ip_id.c @@ -115,7 +115,7 @@ /* * Size of L (see comments above on the lower and upper limits.) */ -#define ARRAY_SIZE (4096) +#define ARRAY_SIZE (4096) static uint16_t *id_array = NULL; static bitstr_t *id_bits = NULL; @@ -130,14 +130,14 @@ static lck_grp_t *ipid_lock_grp; static lck_grp_attr_t *ipid_lock_grp_attr; SYSCTL_UINT(_net_inet_ip, OID_AUTO, random_id_statistics, - CTLFLAG_RW | CTLFLAG_LOCKED, &random_id_statistics, 0, - "Enable IP ID statistics"); + CTLFLAG_RW | CTLFLAG_LOCKED, &random_id_statistics, 0, + "Enable IP ID statistics"); SYSCTL_QUAD(_net_inet_ip, OID_AUTO, random_id_collisions, - CTLFLAG_RD | CTLFLAG_LOCKED, &random_id_collisions, - "Count of IP ID collisions"); + CTLFLAG_RD | CTLFLAG_LOCKED, &random_id_collisions, + "Count of IP ID collisions"); SYSCTL_QUAD(_net_inet_ip, OID_AUTO, random_id_total, - CTLFLAG_RD | CTLFLAG_LOCKED, &random_id_total, - "Count of IP IDs created"); + CTLFLAG_RD | CTLFLAG_LOCKED, &random_id_total, + "Count of IP IDs created"); /* * Called once from ip_init(). @@ -155,7 +155,7 @@ ip_initid(void) ipid_lock_attr = lck_attr_alloc_init(); lck_mtx_init(&ipid_lock, ipid_lock_grp, ipid_lock_attr); - id_array = (uint16_t *)_MALLOC(ARRAY_SIZE * sizeof (uint16_t), + id_array = (uint16_t *)_MALLOC(ARRAY_SIZE * sizeof(uint16_t), M_TEMP, M_WAITOK | M_ZERO); id_bits = (bitstr_t *)_MALLOC(bitstr_size(65536), M_TEMP, M_WAITOK | M_ZERO); @@ -182,8 +182,9 @@ ip_randomid(void) * Given that we don't allow the size of the array to change, accessing * id_array and id_bits prior to acquiring the lock below is safe. */ - if (id_array == NULL || ip_use_randomid == 0) - return (htons(ip_id++)); + if (id_array == NULL || ip_use_randomid == 0) { + return htons(ip_id++); + } /* * To avoid a conflict with the zeros that the array is initially @@ -192,9 +193,10 @@ ip_randomid(void) */ new_id = 0; do { - if (random_id_statistics && new_id != 0) + if (random_id_statistics && new_id != 0) { random_id_collisions++; - read_random(&new_id, sizeof (new_id)); + } + read_random(&new_id, sizeof(new_id)); } while (bitstr_test(id_bits, new_id) || new_id == 0); /* @@ -204,12 +206,14 @@ ip_randomid(void) bitstr_clear(id_bits, id_array[array_ptr]); bitstr_set(id_bits, new_id); id_array[array_ptr] = new_id; - if (++array_ptr == ARRAY_SIZE) + if (++array_ptr == ARRAY_SIZE) { array_ptr = 0; + } lck_mtx_unlock(&ipid_lock); - if (random_id_statistics) + if (random_id_statistics) { random_id_total++; + } - return (new_id); + return new_id; } diff --git a/bsd/netinet/ip_input.c b/bsd/netinet/ip_input.c index 70ee5fac6..35ee30666 100644 --- a/bsd/netinet/ip_input.c +++ b/bsd/netinet/ip_input.c @@ -66,7 +66,7 @@ * Version 2.0. */ -#define _IP_VHL +#define _IP_VHL #include #include @@ -137,24 +137,24 @@ #include #endif /* IPSEC */ -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2) -#define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8)) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2) +#define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8)) #if IPSEC extern int ipsec_bypass; extern lck_mtx_t *sadb_mutex; -lck_grp_t *sadb_stat_mutex_grp; -lck_grp_attr_t *sadb_stat_mutex_grp_attr; -lck_attr_t *sadb_stat_mutex_attr; +lck_grp_t *sadb_stat_mutex_grp; +lck_grp_attr_t *sadb_stat_mutex_grp_attr; +lck_attr_t *sadb_stat_mutex_attr; decl_lck_mtx_data(, sadb_stat_mutex_data); -lck_mtx_t *sadb_stat_mutex = &sadb_stat_mutex_data; +lck_mtx_t *sadb_stat_mutex = &sadb_stat_mutex_data; #endif /* IPSEC */ MBUFQ_HEAD(fq_head); -static int frag_timeout_run; /* frag timer is scheduled to run */ +static int frag_timeout_run; /* frag timer is scheduled to run */ static void frag_timeout(void *); static void frag_sched_timeout(void); @@ -165,24 +165,24 @@ static void ip_input_second_pass(struct mbuf *, struct ifnet *, u_int32_t, int, int, struct ip_fw_in_args *, int); decl_lck_mtx_data(static, ipqlock); -static lck_attr_t *ipqlock_attr; -static lck_grp_t *ipqlock_grp; -static lck_grp_attr_t *ipqlock_grp_attr; +static lck_attr_t *ipqlock_attr; +static lck_grp_t *ipqlock_grp; +static lck_grp_attr_t *ipqlock_grp_attr; /* Packet reassembly stuff */ -#define IPREASS_NHASH_LOG2 6 -#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) -#define IPREASS_HMASK (IPREASS_NHASH - 1) -#define IPREASS_HASH(x, y) \ +#define IPREASS_NHASH_LOG2 6 +#define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2) +#define IPREASS_HMASK (IPREASS_NHASH - 1) +#define IPREASS_HASH(x, y) \ (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK) /* IP fragment reassembly queues (protected by ipqlock) */ static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; /* ip reassembly queues */ -static int maxnipq; /* max packets in reass queues */ -static u_int32_t maxfragsperpacket; /* max frags/packet in reass queues */ -static u_int32_t nipq; /* # of packets in reass queues */ -static u_int32_t ipq_limit; /* ipq allocation limit */ -static u_int32_t ipq_count; /* current # of allocated ipq's */ +static int maxnipq; /* max packets in reass queues */ +static u_int32_t maxfragsperpacket; /* max frags/packet in reass queues */ +static u_int32_t nipq; /* # of packets in reass queues */ +static u_int32_t ipq_limit; /* ipq allocation limit */ +static u_int32_t ipq_count; /* current # of allocated ipq's */ static int sysctl_ipforwarding SYSCTL_HANDLER_ARGS; static int sysctl_maxnipq SYSCTL_HANDLER_ARGS; @@ -196,54 +196,54 @@ static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS; int ipforwarding = 0; SYSCTL_PROC(_net_inet_ip, IPCTL_FORWARDING, forwarding, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0, - sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0, + sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces"); static int ipsendredirects = 1; /* XXX */ SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, - CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0, - "Enable sending IP redirects"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0, + "Enable sending IP redirects"); int ip_defttl = IPDEFTTL; SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_defttl, 0, "Maximum TTL on IP packets"); + &ip_defttl, 0, "Maximum TTL on IP packets"); static int ip_dosourceroute = 0; SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0, - "Enable forwarding source routed IP packets"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0, + "Enable forwarding source routed IP packets"); static int ip_acceptsourceroute = 0; SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0, - "Enable accepting source routed IP packets"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0, + "Enable accepting source routed IP packets"); static int ip_sendsourcequench = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0, - "Enable the transmission of source quench packets"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0, + "Enable the transmission of source quench packets"); SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq, - "I", "Maximum number of IPv4 fragment reassembly queue entries"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq, + "I", "Maximum number of IPv4 fragment reassembly queue entries"); SYSCTL_UINT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD | CTLFLAG_LOCKED, - &nipq, 0, "Current number of IPv4 fragment reassembly queue entries"); + &nipq, 0, "Current number of IPv4 fragment reassembly queue entries"); SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragsperpacket, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0, - sysctl_maxfragsperpacket, "I", - "Maximum number of IPv4 fragments allowed per packet"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0, + sysctl_maxfragsperpacket, "I", + "Maximum number of IPv4 fragments allowed per packet"); static uint32_t ip_adj_clear_hwcksum = 0; SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_clear_hwcksum, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0, - "Invalidate hwcksum info when adjusting length"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0, + "Invalidate hwcksum info when adjusting length"); static uint32_t ip_adj_partial_sum = 1; SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0, - "Perform partial sum adjustment of trailing bytes at IP layer"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0, + "Perform partial sum adjustment of trailing bytes at IP layer"); /* * XXX - Setting ip_checkinterface mostly implements the receive side of @@ -260,33 +260,33 @@ SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum, */ static int ip_checkinterface = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_checkinterface, 0, "Verify packet arrives on correct interface"); + &ip_checkinterface, 0, "Verify packet arrives on correct interface"); static int ip_chaining = 1; SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chaining, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_chaining, 1, "Do receive side ip address based chaining"); + &ip_chaining, 1, "Do receive side ip address based chaining"); static int ip_chainsz = 6; SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chainsz, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_chainsz, 1, "IP receive side max chaining"); + &ip_chainsz, 1, "IP receive side max chaining"); #if (DEBUG || DEVELOPMENT) static int ip_input_measure = 0; SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement"); static uint64_t ip_input_measure_bins = 0; SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_bins, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0, - sysctl_ip_input_measure_bins, "I", - "bins for chaining performance data histogram"); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0, + sysctl_ip_input_measure_bins, "I", + "bins for chaining performance data histogram"); static net_perf_t net_perf; SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_data, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_ip_input_getperf, "S,net_perf", - "IP input performance data (struct net_perf, net/net_perf.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_ip_input_getperf, "S,net_perf", + "IP input performance data (struct net_perf, net/net_perf.h)"); #endif /* (DEBUG || DEVELOPMENT) */ #if DIAGNOSTIC @@ -295,36 +295,36 @@ static int ipprintfs = 0; struct protosw *ip_protox[IPPROTO_MAX]; -static lck_grp_attr_t *in_ifaddr_rwlock_grp_attr; -static lck_grp_t *in_ifaddr_rwlock_grp; -static lck_attr_t *in_ifaddr_rwlock_attr; +static lck_grp_attr_t *in_ifaddr_rwlock_grp_attr; +static lck_grp_t *in_ifaddr_rwlock_grp; +static lck_attr_t *in_ifaddr_rwlock_attr; decl_lck_rw_data(, in_ifaddr_rwlock_data); -lck_rw_t *in_ifaddr_rwlock = &in_ifaddr_rwlock_data; +lck_rw_t *in_ifaddr_rwlock = &in_ifaddr_rwlock_data; /* Protected by in_ifaddr_rwlock */ -struct in_ifaddrhead in_ifaddrhead; /* first inet address */ -struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */ +struct in_ifaddrhead in_ifaddrhead; /* first inet address */ +struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */ -#define INADDR_NHASH 61 -static u_int32_t inaddr_nhash; /* hash table size */ -static u_int32_t inaddr_hashp; /* next largest prime */ +#define INADDR_NHASH 61 +static u_int32_t inaddr_nhash; /* hash table size */ +static u_int32_t inaddr_hashp; /* next largest prime */ static int ip_getstat SYSCTL_HANDLER_ARGS; struct ipstat ipstat; SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, ip_getstat, "S,ipstat", - "IP statistics (struct ipstat, netinet/ip_var.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, ip_getstat, "S,ipstat", + "IP statistics (struct ipstat, netinet/ip_var.h)"); #if IPCTL_DEFMTU SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_mtu, 0, "Default MTU"); + &ip_mtu, 0, "Default MTU"); #endif /* IPCTL_DEFMTU */ #if IPSTEALTH -static int ipstealth = 0; +static int ipstealth = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED, - &ipstealth, 0, ""); + &ipstealth, 0, ""); #endif /* IPSTEALTH */ /* Firewall hooks */ @@ -340,20 +340,20 @@ ip_dn_io_t *ip_dn_io_ptr; #endif /* DUMMYNET */ SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal, - CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local"); struct ip_linklocal_stat ip_linklocal_stat; SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat, - CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat, - "Number of link local packets with TTL less than 255"); + CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat, + "Number of link local packets with TTL less than 255"); SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in, - CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input"); int ip_linklocal_in_allowbadttl = 1; SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0, - "Allow incoming link local packets with TTL less than 255"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0, + "Allow incoming link local packets with TTL less than 255"); /* @@ -363,12 +363,12 @@ SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl, * maintenance when the remote end is on a network that is not known * to us. */ -static int ip_nhops = 0; -static struct ip_srcrt { - struct in_addr dst; /* final destination */ - char nop; /* one NOP to align */ - char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ - struct in_addr route[MAX_IPOPTLEN / sizeof (struct in_addr)]; +static int ip_nhops = 0; +static struct ip_srcrt { + struct in_addr dst; /* final destination */ + char nop; /* one NOP to align */ + char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */ + struct in_addr route[MAX_IPOPTLEN / sizeof(struct in_addr)]; } ip_srcrt; static void in_ifaddrhashtbl_init(void); @@ -391,7 +391,7 @@ static inline u_short ip_cksum(struct mbuf *, int); int ip_use_randomid = 1; SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_use_randomid, 0, "Randomize IP packets IDs"); + &ip_use_randomid, 0, "Randomize IP packets IDs"); /* * On platforms which require strict alignment (currently for anything but @@ -401,27 +401,27 @@ SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW | CTLFLAG_LOCKED, * it's needed later on. */ #if defined(__i386__) || defined(__x86_64__) -#define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0) +#define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0) #else /* !__i386__ && !__x86_64__ */ -#define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \ - if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \ - struct mbuf *_n; \ - struct ifnet *__ifp = (_ifp); \ - atomic_add_64(&(__ifp)->if_alignerrs, 1); \ - if (((_m)->m_flags & M_PKTHDR) && \ - (_m)->m_pkthdr.pkt_hdr != NULL) \ - (_m)->m_pkthdr.pkt_hdr = NULL; \ - _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \ - if (_n == NULL) { \ - atomic_add_32(&ipstat.ips_toosmall, 1); \ - m_freem(_m); \ - (_m) = NULL; \ - _action; \ - } else { \ - VERIFY(_n != (_m)); \ - (_m) = _n; \ - } \ - } \ +#define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \ + if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \ + struct mbuf *_n; \ + struct ifnet *__ifp = (_ifp); \ + atomic_add_64(&(__ifp)->if_alignerrs, 1); \ + if (((_m)->m_flags & M_PKTHDR) && \ + (_m)->m_pkthdr.pkt_hdr != NULL) \ + (_m)->m_pkthdr.pkt_hdr = NULL; \ + _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \ + if (_n == NULL) { \ + atomic_add_32(&ipstat.ips_toosmall, 1); \ + m_freem(_m); \ + (_m) = NULL; \ + _action; \ + } else { \ + VERIFY(_n != (_m)); \ + (_m) = _n; \ + } \ + } \ } while (0) #endif /* !__i386__ && !__x86_64__ */ @@ -444,9 +444,10 @@ ip_init_delayed(void) sin->sin_family = AF_INET; sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK); error = in_control(NULL, SIOCSIFADDR, (caddr_t)&ifr, lo_ifp, kernproc); - if (error) + if (error) { printf("%s: failed to initialise lo0's address, error=%d\n", __func__, error); + } } /* @@ -462,19 +463,20 @@ ip_init(struct protosw *pp, struct domain *dp) int i; domain_proto_mtx_lock_assert_held(); - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); /* ipq_alloc() uses mbufs for IP fragment queue structures */ - _CASSERT(sizeof (struct ipq) <= _MLEN); + _CASSERT(sizeof(struct ipq) <= _MLEN); /* * Some ioctls (e.g. SIOCAIFADDR) use ifaliasreq struct, which is * interchangeable with in_aliasreq; they must have the same size. */ - _CASSERT(sizeof (struct ifaliasreq) == sizeof (struct in_aliasreq)); + _CASSERT(sizeof(struct ifaliasreq) == sizeof(struct in_aliasreq)); - if (ip_initialized) + if (ip_initialized) { return; + } ip_initialized = 1; in_ifaddr_init(); @@ -499,8 +501,9 @@ ip_init(struct protosw *pp, struct domain *dp) } /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */ - for (i = 0; i < IPPROTO_MAX; i++) + for (i = 0; i < IPPROTO_MAX; i++) { ip_protox[i] = pr; + } /* * Cycle through IP protocols and put them into the appropriate place * in ip_protox[], skipping protocols IPPROTO_{IP,RAW}. @@ -510,8 +513,9 @@ ip_init(struct protosw *pp, struct domain *dp) VERIFY(pr->pr_domain == dp); if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) { /* Be careful to only index valid IP protocols. */ - if (pr->pr_protocol < IPPROTO_MAX) + if (pr->pr_protocol < IPPROTO_MAX) { ip_protox[pr->pr_protocol] = pr; + } } } @@ -523,8 +527,9 @@ ip_init(struct protosw *pp, struct domain *dp) lck_mtx_lock(&ipqlock); /* Initialize IP reassembly queue. */ - for (i = 0; i < IPREASS_NHASH; i++) + for (i = 0; i < IPREASS_NHASH; i++) { TAILQ_INIT(&ipq[i]); + } maxnipq = nmbclusters / 32; maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */ @@ -558,19 +563,22 @@ in_ifaddrhashtbl_init(void) { int i, k, p; - if (in_ifaddrhashtbl != NULL) + if (in_ifaddrhashtbl != NULL) { return; + } PE_parse_boot_argn("inaddr_nhash", &inaddr_nhash, - sizeof (inaddr_nhash)); - if (inaddr_nhash == 0) + sizeof(inaddr_nhash)); + if (inaddr_nhash == 0) { inaddr_nhash = INADDR_NHASH; + } MALLOC(in_ifaddrhashtbl, struct in_ifaddrhashhead *, - inaddr_nhash * sizeof (*in_ifaddrhashtbl), + inaddr_nhash * sizeof(*in_ifaddrhashtbl), M_IFADDR, M_WAITOK | M_ZERO); - if (in_ifaddrhashtbl == NULL) + if (in_ifaddrhashtbl == NULL) { panic("in_ifaddrhashtbl_init allocation failed"); + } /* * Generate the next largest prime greater than inaddr_nhash. @@ -579,11 +587,13 @@ in_ifaddrhashtbl_init(void) for (;;) { p = 1; for (i = 3; i * i <= k; i += 2) { - if (k % i == 0) + if (k % i == 0) { p = 0; + } } - if (p == 1) + if (p == 1) { break; + } k += 2; } inaddr_hashp = k; @@ -597,10 +607,11 @@ inaddr_hashval(u_int32_t key) * the hash size, as documented in "Introduction to Algorithms" * (Cormen, Leiserson, Rivest). */ - if (inaddr_nhash > 1) - return ((key * inaddr_hashp) % inaddr_nhash); - else - return (0); + if (inaddr_nhash > 1) { + return (key * inaddr_hashp) % inaddr_nhash; + } else { + return 0; + } } void @@ -615,7 +626,7 @@ ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, { struct ipfilter *filter; int seen = (inject_ipfref == NULL); - int changed_header = 0; + int changed_header = 0; struct ip *ip; void (*pr_input)(struct mbuf *, int len); @@ -623,8 +634,9 @@ ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, ipf_ref(); TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) { if (seen == 0) { - if ((struct ipfilter *)inject_ipfref == filter) + if ((struct ipfilter *)inject_ipfref == filter) { seen = 1; + } } else if (filter->ipf_filter.ipf_input) { errno_t result; @@ -638,8 +650,9 @@ ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, m->m_pkthdr.rcvif, ipf_unref()); /* ipf_unref() already called */ - if (m == NULL) + if (m == NULL) { return; + } changed_header = 1; ip = mtod(m, struct ip *); @@ -649,8 +662,8 @@ ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, ip->ip_sum = ip_cksum_hdr_in(m, hlen); } result = filter->ipf_filter.ipf_input( - filter->ipf_filter.cookie, (mbuf_t *)&m, - hlen, proto); + filter->ipf_filter.cookie, (mbuf_t *)&m, + hlen, proto); if (result == EJUSTRETURN) { ipf_unref(); return; @@ -666,7 +679,7 @@ ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, } /* Perform IP header alignment fixup (post-filters), if needed */ - IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return); + IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return ); /* * If there isn't a specific lock for the protocol @@ -692,25 +705,25 @@ ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto, } struct pktchain_elm { - struct mbuf *pkte_head; - struct mbuf *pkte_tail; - struct in_addr pkte_saddr; - struct in_addr pkte_daddr; - uint16_t pkte_npkts; - uint16_t pkte_proto; - uint32_t pkte_nbytes; + struct mbuf *pkte_head; + struct mbuf *pkte_tail; + struct in_addr pkte_saddr; + struct in_addr pkte_daddr; + uint16_t pkte_npkts; + uint16_t pkte_proto; + uint32_t pkte_nbytes; }; typedef struct pktchain_elm pktchain_elm_t; /* Store upto PKTTBL_SZ unique flows on the stack */ -#define PKTTBL_SZ 7 +#define PKTTBL_SZ 7 static struct mbuf * ip_chain_insert(struct mbuf *packet, pktchain_elm_t *tbl) { - struct ip* ip; - int pkttbl_idx = 0; + struct ip* ip; + int pkttbl_idx = 0; ip = mtod(packet, struct ip*); @@ -726,16 +739,17 @@ ip_chain_insert(struct mbuf *packet, pktchain_elm_t *tbl) (ip->ip_src.s_addr == tbl[pkttbl_idx].pkte_saddr.s_addr) && (ip->ip_p == tbl[pkttbl_idx].pkte_proto)) { } else { - return (packet); + return packet; } } - if (tbl[pkttbl_idx].pkte_tail != NULL) + if (tbl[pkttbl_idx].pkte_tail != NULL) { mbuf_setnextpkt(tbl[pkttbl_idx].pkte_tail, packet); + } tbl[pkttbl_idx].pkte_tail = packet; tbl[pkttbl_idx].pkte_npkts += 1; tbl[pkttbl_idx].pkte_nbytes += packet->m_pkthdr.len; - return (NULL); + return NULL; } /* args is a dummy variable here for backward compatibility */ @@ -750,13 +764,16 @@ ip_input_second_pass_loop_tbl(pktchain_elm_t *tbl, struct ip_fw_in_args *args) ip_input_second_pass(m, m->m_pkthdr.rcvif, 0, tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args, 0); - if (tbl[i].pkte_npkts > 2) + if (tbl[i].pkte_npkts > 2) { ipstat.ips_rxc_chainsz_gt2++; - if (tbl[i].pkte_npkts > 4) + } + if (tbl[i].pkte_npkts > 4) { ipstat.ips_rxc_chainsz_gt4++; + } #if (DEBUG || DEVELOPMENT) - if (ip_input_measure) + if (ip_input_measure) { net_perf_histogram(&net_perf, tbl[i].pkte_npkts); + } #endif /* (DEBUG || DEVELOPMENT) */ tbl[i].pkte_head = tbl[i].pkte_tail = NULL; tbl[i].pkte_npkts = 0; @@ -820,14 +837,16 @@ ip_input_dispatch_chain(struct mbuf *m) ip = mtod(tmp_mbuf, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; - while(tmp_mbuf) { + while (tmp_mbuf) { nxt_mbuf = mbuf_nextpkt(tmp_mbuf); mbuf_setnextpkt(tmp_mbuf, NULL); - if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) + if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) { tmp_mbuf = tcp_lro(tmp_mbuf, hlen); - if (tmp_mbuf) + } + if (tmp_mbuf) { ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0); + } tmp_mbuf = nxt_mbuf; if (tmp_mbuf) { ip = mtod(tmp_mbuf, struct ip *); @@ -879,15 +898,15 @@ ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp) * prepended extraneous bytes (else it will do both.) */ if (ip_adj_partial_sum && - (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID|CSUM_PARTIAL)) == - (CSUM_DATA_VALID|CSUM_PARTIAL)) { + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) == + (CSUM_DATA_VALID | CSUM_PARTIAL)) { m->m_pkthdr.csum_rx_val = m_adj_sum16(m, m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start, (ip->ip_len - m->m_pkthdr.csum_rx_start), m->m_pkthdr.csum_rx_val); } else if ((m->m_pkthdr.csum_flags & - (CSUM_DATA_VALID|CSUM_PARTIAL)) == - (CSUM_DATA_VALID|CSUM_PARTIAL)) { + (CSUM_DATA_VALID | CSUM_PARTIAL)) == + (CSUM_DATA_VALID | CSUM_PARTIAL)) { /* * If packet has partial checksum info and we decided not * to subtract the partial sum of postpended extraneous @@ -928,21 +947,21 @@ static ipinput_chain_ret_t ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, struct ip_fw_in_args *args, int *ours, struct mbuf **modm) { - struct ip *ip; - struct ifnet *inifp; - unsigned int hlen; - int retval = IPINPUT_DOCHAIN; - int len = 0; - struct in_addr src_ip; + struct ip *ip; + struct ifnet *inifp; + unsigned int hlen; + int retval = IPINPUT_DOCHAIN; + int len = 0; + struct in_addr src_ip; #if IPFIREWALL - int i; + int i; #endif #if IPFIREWALL || DUMMYNET - struct m_tag *copy; - struct m_tag *p; - boolean_t delete = FALSE; - struct ip_fw_args args1; - boolean_t init = FALSE; + struct m_tag *copy; + struct m_tag *p; + boolean_t delete = FALSE; + struct ip_fw_args args1; + boolean_t init = FALSE; #endif ipfilter_t inject_filter_ref = NULL; @@ -974,8 +993,9 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, /* * Don't bother searching for tag(s) if there's none. */ - if (SLIST_EMPTY(&m->m_pkthdr.tags)) + if (SLIST_EMPTY(&m->m_pkthdr.tags)) { goto ipfw_tags_done; + } /* Grab info from mtags prepended to the chain */ p = m_tag_first(m); @@ -985,7 +1005,7 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) { struct dn_pkt_tag *dn_tag; - dn_tag = (struct dn_pkt_tag *)(p+1); + dn_tag = (struct dn_pkt_tag *)(p + 1); args->fwai_ipfw_rule = dn_tag->dn_ipfw_rule; args->fwai_pf_rule = dn_tag->dn_pf_rule; delete = TRUE; @@ -996,7 +1016,7 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, if (p->m_tag_type == KERNEL_TAG_TYPE_DIVERT) { struct divert_tag *div_tag; - div_tag = (struct divert_tag *)(p+1); + div_tag = (struct divert_tag *)(p + 1); args->fwai_divert_rule = div_tag->cookie; delete = TRUE; } @@ -1005,7 +1025,7 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, if (p->m_tag_type == KERNEL_TAG_TYPE_IPFORWARD) { struct ip_fwd_tag *ipfwd_tag; - ipfwd_tag = (struct ip_fwd_tag *)(p+1); + ipfwd_tag = (struct ip_fwd_tag *)(p + 1); args->fwai_next_hop = ipfwd_tag->next_hop; delete = TRUE; } @@ -1014,7 +1034,7 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, copy = p; p = m_tag_next(m, p); m_tag_delete(m, copy); - } else { + } else { p = m_tag_next(m, p); } } else { @@ -1023,8 +1043,9 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, } #if DIAGNOSTIC - if (m == NULL || !(m->m_flags & M_PKTHDR)) + if (m == NULL || !(m->m_flags & M_PKTHDR)) { panic("ip_input no HDR"); + } #endif #if DUMMYNET @@ -1034,11 +1055,13 @@ ip_input_first_pass(struct mbuf *m, u_int32_t *div_info, hlen = IP_VHL_HL(ip->ip_vhl) << 2; inject_filter_ref = ipf_get_inject_filter(m); #if IPFIREWALL - if (args->fwai_ipfw_rule) + if (args->fwai_ipfw_rule) { goto iphack; + } #endif /* IPFIREWALL */ - if (args->fwai_pf_rule) + if (args->fwai_pf_rule) { goto check_with_pf; + } } #endif /* DUMMYNET */ ipfw_tags_done: @@ -1047,8 +1070,9 @@ ipfw_tags_done: /* * No need to process packet twice if we've already seen it. */ - if (!SLIST_EMPTY(&m->m_pkthdr.tags)) + if (!SLIST_EMPTY(&m->m_pkthdr.tags)) { inject_filter_ref = ipf_get_inject_filter(m); + } if (inject_filter_ref != NULL) { ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -1060,21 +1084,21 @@ ipfw_tags_done: ip->ip_len = ntohs(ip->ip_len) - hlen; ip->ip_off = ntohs(ip->ip_off); ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref); - return (IPINPUT_DONE); + return IPINPUT_DONE; } - if (m->m_pkthdr.len < sizeof (struct ip)) { + if (m->m_pkthdr.len < sizeof(struct ip)) { OSAddAtomic(1, &ipstat.ips_total); OSAddAtomic(1, &ipstat.ips_tooshort); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } - if (m->m_len < sizeof (struct ip) && - (m = m_pullup(m, sizeof (struct ip))) == NULL) { + if (m->m_len < sizeof(struct ip) && + (m = m_pullup(m, sizeof(struct ip))) == NULL) { OSAddAtomic(1, &ipstat.ips_total); OSAddAtomic(1, &ipstat.ips_toosmall); - return (IPINPUT_FREED); + return IPINPUT_FREED; } ip = mtod(m, struct ip *); @@ -1088,16 +1112,16 @@ ipfw_tags_done: OSAddAtomic(1, &ipstat.ips_badvers); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } hlen = IP_VHL_HL(ip->ip_vhl) << 2; - if (hlen < sizeof (struct ip)) { + if (hlen < sizeof(struct ip)) { OSAddAtomic(1, &ipstat.ips_total); OSAddAtomic(1, &ipstat.ips_badhlen); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } if (hlen > m->m_len) { @@ -1105,7 +1129,7 @@ ipfw_tags_done: OSAddAtomic(1, &ipstat.ips_total); OSAddAtomic(1, &ipstat.ips_badhlen); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); - return (IPINPUT_FREED); + return IPINPUT_FREED; } ip = mtod(m, struct ip *); *modm = m; @@ -1128,12 +1152,12 @@ ipfw_tags_done: * therefore we don't care so much about PKTF_IFINFO. */ if (!(inifp->if_flags & IFF_LOOPBACK) && - !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) { + !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) { OSAddAtomic(1, &ipstat.ips_total); OSAddAtomic(1, &ipstat.ips_badaddr); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } } @@ -1148,7 +1172,7 @@ ipfw_tags_done: OSAddAtomic(1, &ipstat.ips_total); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } } } @@ -1157,7 +1181,7 @@ ipfw_tags_done: OSAddAtomic(1, &ipstat.ips_total); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL, @@ -1176,7 +1200,7 @@ ipfw_tags_done: OSAddAtomic(1, &ipstat.ips_badlen); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } #if BYTE_ORDER != BIG_ENDIAN @@ -1194,7 +1218,7 @@ ipfw_tags_done: OSAddAtomic(1, &ipstat.ips_tooshort); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; } if (m->m_pkthdr.len > ip->ip_len) { @@ -1234,7 +1258,7 @@ check_with_pf: ip_input_update_nstat(inifp, src_ip, 1, len); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); OSAddAtomic(1, &ipstat.ips_total); - return (IPINPUT_FREED); + return IPINPUT_FREED; } ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -1266,9 +1290,9 @@ iphack: */ if (args->fwai_next_hop) { *ours = 1; - return (IPINPUT_DONTCHAIN); + return IPINPUT_DONTCHAIN; } -#endif /* IPFIREWALL_FORWARD */ +#endif /* IPFIREWALL_FORWARD */ ip_input_cpout_args(args, &args1, &init); args1.fwa_m = m; @@ -1276,12 +1300,13 @@ iphack: m = args1.fwa_m; if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */ - if (m) + if (m) { m_freem(m); + } ip_input_update_nstat(inifp, src_ip, 1, len); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); OSAddAtomic(1, &ipstat.ips_total); - return (IPINPUT_FREED); + return IPINPUT_FREED; } ip = mtod(m, struct ip *); /* just in case m changed */ *modm = m; @@ -1293,12 +1318,12 @@ iphack: #if DUMMYNET if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) { /* Send packet to the appropriate pipe */ - ip_dn_io_ptr(m, i&0xffff, DN_TO_IP_IN, &args1, + ip_dn_io_ptr(m, i & 0xffff, DN_TO_IP_IN, &args1, DN_CLIENT_IPFW); ip_input_update_nstat(inifp, src_ip, 1, len); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); OSAddAtomic(1, &ipstat.ips_total); - return (IPINPUT_FREED); + return IPINPUT_FREED; } #endif /* DUMMYNET */ #if IPDIVERT @@ -1306,7 +1331,7 @@ iphack: /* Divert or tee packet */ *div_info = i; *ours = 1; - return (IPINPUT_DONTCHAIN); + return IPINPUT_DONTCHAIN; } #endif #if IPFIREWALL_FORWARD @@ -1322,7 +1347,7 @@ iphack: KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); m_freem(m); OSAddAtomic(1, &ipstat.ips_total); - return (IPINPUT_FREED); + return IPINPUT_FREED; } #endif /* IPFIREWALL */ #if IPSEC | IPFIREWALL @@ -1334,17 +1359,17 @@ pass: * error was detected (causing an icmp message * to be sent and the original packet to be freed). */ - ip_nhops = 0; /* for source routed packets */ + ip_nhops = 0; /* for source routed packets */ #if IPFIREWALL - if (hlen > sizeof (struct ip) && + if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, args->fwai_next_hop)) { #else /* !IPFIREWALL */ - if (hlen > sizeof (struct ip) && ip_dooptions(m, 0, NULL)) { + if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) { #endif /* !IPFIREWALL */ ip_input_update_nstat(inifp, src_ip, 1, len); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); OSAddAtomic(1, &ipstat.ips_total); - return (IPINPUT_FREED); + return IPINPUT_FREED; } /* @@ -1352,38 +1377,40 @@ pass: * if it is our fragment or someone else's plus the complexity of * divert and fw args makes it harder to do chaining. */ - if (ip->ip_off & ~(IP_DF | IP_RF)) - return (IPINPUT_DONTCHAIN); + if (ip->ip_off & ~(IP_DF | IP_RF)) { + return IPINPUT_DONTCHAIN; + } /* Allow DHCP/BootP responses through */ if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) && - hlen == sizeof (struct ip) && ip->ip_p == IPPROTO_UDP) { + hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) { struct udpiphdr *ui; - if (m->m_len < sizeof (struct udpiphdr) && - (m = m_pullup(m, sizeof (struct udpiphdr))) == NULL) { + if (m->m_len < sizeof(struct udpiphdr) && + (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) { OSAddAtomic(1, &udpstat.udps_hdrops); KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0); OSAddAtomic(1, &ipstat.ips_total); - return (IPINPUT_FREED); + return IPINPUT_FREED; } *modm = m; ui = mtod(m, struct udpiphdr *); if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) { ip_setdstifaddr_info(m, inifp->if_index, NULL); - return (IPINPUT_DONTCHAIN); + return IPINPUT_DONTCHAIN; } } /* Avoid chaining raw sockets as ipsec checks occur later for them */ - if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) - return (IPINPUT_DONTCHAIN); + if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) { + return IPINPUT_DONTCHAIN; + } - return (retval); + return retval; #if !defined(__i386__) && !defined(__x86_64__) bad: m_freem(m); - return (IPINPUT_FREED); + return IPINPUT_FREED; #endif } @@ -1391,11 +1418,11 @@ static void ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args, int ours) { - unsigned int checkif; - struct mbuf *tmp_mbuf = NULL; - struct in_ifaddr *ia = NULL; - struct in_addr pkt_dst; - unsigned int hlen; + unsigned int checkif; + struct mbuf *tmp_mbuf = NULL; + struct in_ifaddr *ia = NULL; + struct in_addr pkt_dst; + unsigned int hlen; #if !IPFIREWALL #pragma unused (args) @@ -1424,8 +1451,9 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain, bytes_in_chain); - if (ours) + if (ours) { goto ours; + } /* * Check our list of addresses, to see if the packet is for us. @@ -1436,7 +1464,7 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, tmp_mbuf = m; if (TAILQ_EMPTY(&in_ifaddrhead)) { while (tmp_mbuf) { - if (!(tmp_mbuf->m_flags & (M_MCAST|M_BCAST))) { + if (!(tmp_mbuf->m_flags & (M_MCAST | M_BCAST))) { ip_setdstifaddr_info(tmp_mbuf, inifp->if_index, NULL); } @@ -1475,7 +1503,7 @@ ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info, #if IPFIREWALL && (args->fwai_next_hop == NULL); #else /* !IPFIREWALL */ - ; + ; #endif /* !IPFIREWALL */ /* @@ -1596,8 +1624,9 @@ ours: #else m = ip_reass(m); #endif - if (m == NULL) + if (m == NULL) { return; + } ip = mtod(m, struct ip *); /* Get the header length of the reassembled packet */ hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -1637,8 +1666,9 @@ ours: VERIFY(npkts_in_chain == 1); /* Clone packet if we're doing a 'tee' */ - if (div_info & IP_FW_PORT_TEE_FLAG) + if (div_info & IP_FW_PORT_TEE_FLAG) { clone = m_dup(m, M_DONTWAIT); + } /* Restore packet header fields to original values */ ip->ip_len += hlen; @@ -1688,12 +1718,13 @@ ours: VERIFY(npkts_in_chain == 1); fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, sizeof (*ipfwd_tag), + KERNEL_TAG_TYPE_IPFORWARD, sizeof(*ipfwd_tag), M_NOWAIT, m); - if (fwd_tag == NULL) + if (fwd_tag == NULL) { goto bad; + } - ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1); + ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1); ipfwd_tag->next_hop = args->fwai_next_hop; m_tag_prepend(m, fwd_tag); @@ -1708,7 +1739,6 @@ ours: ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len); ip_input_dispatch_chain(m); - } #else /* !IPFIREWALL */ ip_input_dispatch_chain(m); @@ -1724,25 +1754,26 @@ bad: void ip_input_process_list(struct mbuf *packet_list) { - pktchain_elm_t pktchain_tbl[PKTTBL_SZ]; + pktchain_elm_t pktchain_tbl[PKTTBL_SZ]; - struct mbuf *packet = NULL; - struct mbuf *modm = NULL; /* modified mbuf */ - int retval = 0; - u_int32_t div_info = 0; - int ours = 0; + struct mbuf *packet = NULL; + struct mbuf *modm = NULL; /* modified mbuf */ + int retval = 0; + u_int32_t div_info = 0; + int ours = 0; #if (DEBUG || DEVELOPMENT) struct timeval start_tv; #endif /* (DEBUG || DEVELOPMENT) */ - int num_pkts = 0; + int num_pkts = 0; int chain = 0; struct ip_fw_in_args args; if (ip_chaining == 0) { struct mbuf *m = packet_list; #if (DEBUG || DEVELOPMENT) - if (ip_input_measure) + if (ip_input_measure) { net_perf_start_time(&net_perf, &start_tv); + } #endif /* (DEBUG || DEVELOPMENT) */ while (m) { @@ -1753,14 +1784,16 @@ ip_input_process_list(struct mbuf *packet_list) num_pkts++; } #if (DEBUG || DEVELOPMENT) - if (ip_input_measure) + if (ip_input_measure) { net_perf_measure_time(&net_perf, &start_tv, num_pkts); + } #endif /* (DEBUG || DEVELOPMENT) */ return; } #if (DEBUG || DEVELOPMENT) - if (ip_input_measure) + if (ip_input_measure) { net_perf_start_time(&net_perf, &start_tv); + } #endif /* (DEBUG || DEVELOPMENT) */ bzero(&pktchain_tbl, sizeof(pktchain_tbl)); @@ -1773,28 +1806,31 @@ restart_list_process: num_pkts++; modm = NULL; div_info = 0; - bzero(&args, sizeof (args)); + bzero(&args, sizeof(args)); retval = ip_input_first_pass(packet, &div_info, &args, &ours, &modm); if (retval == IPINPUT_DOCHAIN) { - if (modm) + if (modm) { packet = modm; + } packet = ip_chain_insert(packet, &pktchain_tbl[0]); if (packet == NULL) { ipstat.ips_rxc_chained++; chain++; - if (chain > ip_chainsz) + if (chain > ip_chainsz) { break; + } } else { ipstat.ips_rxc_collisions++; break; } } else if (retval == IPINPUT_DONTCHAIN) { /* in order to preserve order, exit from chaining */ - if (modm) + if (modm) { packet = modm; + } ipstat.ips_rxc_notchain++; break; } else { @@ -1803,8 +1839,9 @@ restart_list_process: } /* do second pass here for pktchain_tbl */ - if (chain) + if (chain) { ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args); + } if (packet) { /* @@ -1812,19 +1849,22 @@ restart_list_process: * ip_input_second_pass_loop_tbl(). */ #if (DEBUG || DEVELOPMENT) - if (ip_input_measure) + if (ip_input_measure) { net_perf_histogram(&net_perf, 1); + } #endif /* (DEBUG || DEVELOPMENT) */ ip_input_second_pass(packet, packet->m_pkthdr.rcvif, div_info, 1, packet->m_pkthdr.len, &args, ours); } - if (packet_list) + if (packet_list) { goto restart_list_process; + } #if (DEBUG || DEVELOPMENT) - if (ip_input_measure) + if (ip_input_measure) { net_perf_measure_time(&net_perf, &start_tv, num_pkts); + } #endif /* (DEBUG || DEVELOPMENT) */ } /* @@ -1841,11 +1881,11 @@ ip_input(struct mbuf *m) struct in_addr pkt_dst; #if IPFIREWALL int i; - u_int32_t div_info = 0; /* packet divert/tee info */ + u_int32_t div_info = 0; /* packet divert/tee info */ #endif #if IPFIREWALL || DUMMYNET struct ip_fw_args args; - struct m_tag *tag; + struct m_tag *tag; #endif ipfilter_t inject_filter_ref = NULL; struct ifnet *inifp; @@ -1863,13 +1903,14 @@ ip_input(struct mbuf *m) m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED; #if IPFIREWALL || DUMMYNET - bzero(&args, sizeof (struct ip_fw_args)); + bzero(&args, sizeof(struct ip_fw_args)); /* * Don't bother searching for tag(s) if there's none. */ - if (SLIST_EMPTY(&m->m_pkthdr.tags)) + if (SLIST_EMPTY(&m->m_pkthdr.tags)) { goto ipfw_tags_done; + } /* Grab info from mtags prepended to the chain */ #if DUMMYNET @@ -1877,7 +1918,7 @@ ip_input(struct mbuf *m) KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) { struct dn_pkt_tag *dn_tag; - dn_tag = (struct dn_pkt_tag *)(tag+1); + dn_tag = (struct dn_pkt_tag *)(tag + 1); args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule; args.fwa_pf_rule = dn_tag->dn_pf_rule; @@ -1890,7 +1931,7 @@ ip_input(struct mbuf *m) KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) { struct divert_tag *div_tag; - div_tag = (struct divert_tag *)(tag+1); + div_tag = (struct divert_tag *)(tag + 1); args.fwa_divert_rule = div_tag->cookie; m_tag_delete(m, tag); @@ -1901,15 +1942,16 @@ ip_input(struct mbuf *m) KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) { struct ip_fwd_tag *ipfwd_tag; - ipfwd_tag = (struct ip_fwd_tag *)(tag+1); + ipfwd_tag = (struct ip_fwd_tag *)(tag + 1); args.fwa_next_hop = ipfwd_tag->next_hop; m_tag_delete(m, tag); } -#if DIAGNOSTIC - if (m == NULL || !(m->m_flags & M_PKTHDR)) +#if DIAGNOSTIC + if (m == NULL || !(m->m_flags & M_PKTHDR)) { panic("ip_input no HDR"); + } #endif #if DUMMYNET @@ -1919,11 +1961,13 @@ ip_input(struct mbuf *m) hlen = IP_VHL_HL(ip->ip_vhl) << 2; inject_filter_ref = ipf_get_inject_filter(m); #if IPFIREWALL - if (args.fwa_ipfw_rule) + if (args.fwa_ipfw_rule) { goto iphack; + } #endif /* IPFIREWALL */ - if (args.fwa_pf_rule) + if (args.fwa_pf_rule) { goto check_with_pf; + } } #endif /* DUMMYNET */ ipfw_tags_done: @@ -1932,8 +1976,9 @@ ipfw_tags_done: /* * No need to process packet twice if we've already seen it. */ - if (!SLIST_EMPTY(&m->m_pkthdr.tags)) + if (!SLIST_EMPTY(&m->m_pkthdr.tags)) { inject_filter_ref = ipf_get_inject_filter(m); + } if (inject_filter_ref != NULL) { ip = mtod(m, struct ip *); hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -1949,11 +1994,12 @@ ipfw_tags_done: } OSAddAtomic(1, &ipstat.ips_total); - if (m->m_pkthdr.len < sizeof (struct ip)) + if (m->m_pkthdr.len < sizeof(struct ip)) { goto tooshort; + } - if (m->m_len < sizeof (struct ip) && - (m = m_pullup(m, sizeof (struct ip))) == NULL) { + if (m->m_len < sizeof(struct ip) && + (m = m_pullup(m, sizeof(struct ip))) == NULL) { OSAddAtomic(1, &ipstat.ips_toosmall); return; } @@ -1968,7 +2014,7 @@ ipfw_tags_done: } hlen = IP_VHL_HL(ip->ip_vhl) << 2; - if (hlen < sizeof (struct ip)) { /* minimum header length */ + if (hlen < sizeof(struct ip)) { /* minimum header length */ OSAddAtomic(1, &ipstat.ips_badhlen); goto bad; } @@ -2010,8 +2056,9 @@ ipfw_tags_done: if (ip->ip_ttl != MAXTTL) { OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl); /* Silently drop link local traffic with bad TTL */ - if (!ip_linklocal_in_allowbadttl) + if (!ip_linklocal_in_allowbadttl) { goto bad; + } } } @@ -2104,8 +2151,9 @@ check_with_pf: #endif /* PF */ #if IPSEC - if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) + if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) { goto pass; + } #endif #if IPFIREWALL @@ -2122,9 +2170,10 @@ iphack: * If we've been forwarded from the output side, then * skip the firewall a second time */ - if (args.fwa_next_hop) + if (args.fwa_next_hop) { goto ours; -#endif /* IPFIREWALL_FORWARD */ + } +#endif /* IPFIREWALL_FORWARD */ args.fwa_m = m; @@ -2132,8 +2181,9 @@ iphack: m = args.fwa_m; if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */ - if (m) + if (m) { m_freem(m); + } return; } ip = mtod(m, struct ip *); /* just in case m changed */ @@ -2144,7 +2194,7 @@ iphack: #if DUMMYNET if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) { /* Send packet to the appropriate pipe */ - ip_dn_io_ptr(m, i&0xffff, DN_TO_IP_IN, &args, + ip_dn_io_ptr(m, i & 0xffff, DN_TO_IP_IN, &args, DN_CLIENT_IPFW); return; } @@ -2177,12 +2227,12 @@ pass: * error was detected (causing an icmp message * to be sent and the original packet to be freed). */ - ip_nhops = 0; /* for source routed packets */ + ip_nhops = 0; /* for source routed packets */ #if IPFIREWALL - if (hlen > sizeof (struct ip) && + if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, args.fwa_next_hop)) { #else /* !IPFIREWALL */ - if (hlen > sizeof (struct ip) && ip_dooptions(m, 0, NULL)) { + if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) { #endif /* !IPFIREWALL */ return; } @@ -2193,7 +2243,7 @@ pass: * we receive might be for us (and let the upper layers deal * with it). */ - if (TAILQ_EMPTY(&in_ifaddrhead) && !(m->m_flags & (M_MCAST|M_BCAST))) { + if (TAILQ_EMPTY(&in_ifaddrhead) && !(m->m_flags & (M_MCAST | M_BCAST))) { ip_setdstifaddr_info(m, inifp->if_index, NULL); goto ours; } @@ -2229,7 +2279,7 @@ pass: #if IPFIREWALL && (args.fwa_next_hop == NULL); #else /* !IPFIREWALL */ - ; + ; #endif /* !IPFIREWALL */ /* @@ -2305,11 +2355,11 @@ pass: /* Allow DHCP/BootP responses through */ if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) && - hlen == sizeof (struct ip) && ip->ip_p == IPPROTO_UDP) { + hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) { struct udpiphdr *ui; - if (m->m_len < sizeof (struct udpiphdr) && - (m = m_pullup(m, sizeof (struct udpiphdr))) == NULL) { + if (m->m_len < sizeof(struct udpiphdr) && + (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) { OSAddAtomic(1, &udpstat.udps_hdrops); return; } @@ -2350,8 +2400,9 @@ ours: #else m = ip_reass(m); #endif - if (m == NULL) + if (m == NULL) { return; + } ip = mtod(m, struct ip *); /* Get the header length of the reassembled packet */ hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -2389,8 +2440,9 @@ ours: struct mbuf *clone = NULL; /* Clone packet if we're doing a 'tee' */ - if (div_info & IP_FW_PORT_TEE_FLAG) + if (div_info & IP_FW_PORT_TEE_FLAG) { clone = m_dup(m, M_DONTWAIT); + } /* Restore packet header fields to original values */ ip->ip_len += hlen; @@ -2438,12 +2490,13 @@ ours: struct ip_fwd_tag *ipfwd_tag; fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFORWARD, sizeof (*ipfwd_tag), + KERNEL_TAG_TYPE_IPFORWARD, sizeof(*ipfwd_tag), M_NOWAIT, m); - if (fwd_tag == NULL) + if (fwd_tag == NULL) { goto bad; + } - ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1); + ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1); ipfwd_tag->next_hop = args.fwa_next_hop; m_tag_prepend(m, fwd_tag); @@ -2459,8 +2512,9 @@ ours: if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) { m = tcp_lro(m, hlen); - if (m == NULL) + if (m == NULL) { return; + } } ip_proto_dispatch_in(m, hlen, ip->ip_p, 0); @@ -2468,8 +2522,9 @@ ours: #else /* !IPFIREWALL */ if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) { m = tcp_lro(m, hlen); - if (m == NULL) + if (m == NULL) { return; + } } ip_proto_dispatch_in(m, hlen, ip->ip_p, 0); #endif /* !IPFIREWALL */ @@ -2487,20 +2542,23 @@ ipq_updateparams(void) /* * -1 for unlimited allocation. */ - if (maxnipq < 0) + if (maxnipq < 0) { ipq_limit = 0; + } /* * Positive number for specific bound. */ - if (maxnipq > 0) + if (maxnipq > 0) { ipq_limit = maxnipq; + } /* * Zero specifies no further fragment queue allocation -- set the * bound very low, but rely on implementation elsewhere to actually * prevent allocation and reclaim current queues. */ - if (maxnipq == 0) + if (maxnipq == 0) { ipq_limit = 1; + } /* * Arm the purge timer if not already and if there's work to do */ @@ -2516,8 +2574,9 @@ sysctl_maxnipq SYSCTL_HANDLER_ARGS lck_mtx_lock(&ipqlock); i = maxnipq; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < -1 || i > (nmbclusters / 4)) { error = EINVAL; @@ -2527,7 +2586,7 @@ sysctl_maxnipq SYSCTL_HANDLER_ARGS ipq_updateparams(); done: lck_mtx_unlock(&ipqlock); - return (error); + return error; } static int @@ -2539,13 +2598,14 @@ sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS lck_mtx_lock(&ipqlock); i = maxfragsperpacket; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } maxfragsperpacket = i; - ipq_updateparams(); /* see if we need to arm timer */ + ipq_updateparams(); /* see if we need to arm timer */ done: lck_mtx_unlock(&ipqlock); - return (error); + return error; } /* @@ -2581,7 +2641,7 @@ ip_reass(struct mbuf *m) uint16_t hash; struct fq_head dfq; - MBUFQ_INIT(&dfq); /* for deferred frees */ + MBUFQ_INIT(&dfq); /* for deferred frees */ /* If maxnipq or maxfragsperpacket is 0, never accept fragments. */ if (maxnipq == 0 || maxfragsperpacket == 0) { @@ -2590,10 +2650,10 @@ ip_reass(struct mbuf *m) m_freem(m); if (nipq > 0) { lck_mtx_lock(&ipqlock); - frag_sched_timeout(); /* purge stale fragments */ + frag_sched_timeout(); /* purge stale fragments */ lck_mtx_unlock(&ipqlock); } - return (NULL); + return NULL; } ip = mtod(m, struct ip *); @@ -2615,8 +2675,9 @@ ip_reass(struct mbuf *m) #if CONFIG_MACF_NET mac_ipq_label_compare(m, fp) && #endif - ip->ip_p == fp->ipq_p) + ip->ip_p == fp->ipq_p) { goto found; + } } fp = NULL; @@ -2659,7 +2720,7 @@ found: * has already performed its header checksum validation. Also take * care of any trailing bytes and subtract out their partial sum. */ - if (ip->ip_p == IPPROTO_UDP && hlen == sizeof (struct ip) && + if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PARTIAL)) { @@ -2680,10 +2741,11 @@ found: /* callee folds in sum */ csum = m_adj_sum16(m, start, hlen, (ip->ip_len - hlen), csum); - if (hlen > start) + if (hlen > start) { swbytes += (hlen - start); - else + } else { swbytes += (start - hlen); + } #if BYTE_ORDER != BIG_ENDIAN if (start < hlen) { NTOHS(ip->ip_off); @@ -2693,10 +2755,12 @@ found: } csum_flags = m->m_pkthdr.csum_flags; - if (swbytes != 0) + if (swbytes != 0) { udp_in_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } else { csum = 0; csum_flags = 0; @@ -2751,8 +2815,9 @@ found: */ if (fp == NULL) { fp = ipq_alloc(M_DONTWAIT); - if (fp == NULL) + if (fp == NULL) { goto dropfrag; + } #if CONFIG_MACF_NET if (mac_ipq_label_init(fp, M_NOWAIT) != 0) { ipq_free(fp); @@ -2795,7 +2860,7 @@ found: *divinfo = 0; *divcookie = 0; #endif /* IPDIVERT */ - m = NULL; /* nothing to return */ + m = NULL; /* nothing to return */ goto done; } else { fp->ipq_nfrags++; @@ -2804,7 +2869,7 @@ found: #endif } -#define GETIP(m) ((struct ip *)((m)->m_pkthdr.pkt_hdr)) +#define GETIP(m) ((struct ip *)((m)->m_pkthdr.pkt_hdr)) /* * Handle ECN by comparing this segment with the first one; @@ -2814,20 +2879,25 @@ found: ecn = ip->ip_tos & IPTOS_ECN_MASK; ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK; if (ecn == IPTOS_ECN_CE) { - if (ecn0 == IPTOS_ECN_NOTECT) + if (ecn0 == IPTOS_ECN_NOTECT) { goto dropfrag; - if (ecn0 != IPTOS_ECN_CE) + } + if (ecn0 != IPTOS_ECN_CE) { GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE; + } } - if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) + if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { goto dropfrag; + } /* * Find a segment which begins after this one does. */ - for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) - if (GETIP(q)->ip_off > ip->ip_off) + for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) { + if (GETIP(q)->ip_off > ip->ip_off) { break; + } + } /* * If there is a preceding segment, it may provide some of @@ -2841,8 +2911,9 @@ found: if (p) { i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off; if (i > 0) { - if (i >= ip->ip_len) + if (i >= ip->ip_len) { goto dropfrag; + } m_adj(m, i); fp->ipq_csum_flags = 0; ip->ip_off += i; @@ -2882,10 +2953,11 @@ found: * as that of the existing ones, accumulate checksum. Otherwise, * invalidate checksum offload info for the entire datagram. */ - if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) + if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) { fp->ipq_csum += csum; - else if (fp->ipq_csum_flags != 0) + } else if (fp->ipq_csum_flags != 0) { fp->ipq_csum_flags = 0; + } #if IPDIVERT /* @@ -2921,7 +2993,7 @@ found: ipstat.ips_fragdropped += fp->ipq_nfrags; frag_freef(head, fp); } - m = NULL; /* nothing to return */ + m = NULL; /* nothing to return */ goto done; } next += GETIP(q)->ip_len; @@ -2932,7 +3004,7 @@ found: ipstat.ips_fragdropped += fp->ipq_nfrags; frag_freef(head, fp); } - m = NULL; /* nothing to return */ + m = NULL; /* nothing to return */ goto done; } @@ -2945,7 +3017,7 @@ found: ipstat.ips_toolong++; ipstat.ips_fragdropped += fp->ipq_nfrags; frag_freef(head, fp); - m = NULL; /* nothing to return */ + m = NULL; /* nothing to return */ goto done; } @@ -2975,7 +3047,7 @@ found: ADDCARRY(csum); m->m_pkthdr.csum_rx_val = csum; - m->m_pkthdr.csum_rx_start = sizeof (struct ip); + m->m_pkthdr.csum_rx_start = sizeof(struct ip); m->m_pkthdr.csum_flags = fp->ipq_csum_flags; } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { @@ -3012,25 +3084,27 @@ found: ip->ip_src = fp->ipq_src; ip->ip_dst = fp->ipq_dst; - fp->ipq_frags = NULL; /* return to caller as 'm' */ + fp->ipq_frags = NULL; /* return to caller as 'm' */ frag_freef(head, fp); fp = NULL; m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2); m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2); /* some debugging cruft by sklower, below, will go away soon */ - if (m->m_flags & M_PKTHDR) /* XXX this should be done elsewhere */ + if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */ m_fixhdr(m); + } ipstat.ips_reassembled++; /* arm the purge timer if not already and if there's work to do */ frag_sched_timeout(); lck_mtx_unlock(&ipqlock); /* perform deferred free (if needed) now that lock is dropped */ - if (!MBUFQ_EMPTY(&dfq)) + if (!MBUFQ_EMPTY(&dfq)) { MBUFQ_DRAIN(&dfq); + } VERIFY(MBUFQ_EMPTY(&dfq)); - return (m); + return m; done: VERIFY(m == NULL); @@ -3038,10 +3112,11 @@ done: frag_sched_timeout(); lck_mtx_unlock(&ipqlock); /* perform deferred free (if needed) */ - if (!MBUFQ_EMPTY(&dfq)) + if (!MBUFQ_EMPTY(&dfq)) { MBUFQ_DRAIN(&dfq); + } VERIFY(MBUFQ_EMPTY(&dfq)); - return (NULL); + return NULL; dropfrag: #if IPDIVERT @@ -3049,17 +3124,19 @@ dropfrag: *divcookie = 0; #endif /* IPDIVERT */ ipstat.ips_fragdropped++; - if (fp != NULL) + if (fp != NULL) { fp->ipq_nfrags--; + } /* arm the purge timer if not already and if there's work to do */ frag_sched_timeout(); lck_mtx_unlock(&ipqlock); m_freem(m); /* perform deferred free (if needed) */ - if (!MBUFQ_EMPTY(&dfq)) + if (!MBUFQ_EMPTY(&dfq)) { MBUFQ_DRAIN(&dfq); + } VERIFY(MBUFQ_EMPTY(&dfq)); - return (NULL); + return NULL; #undef GETIP } @@ -3101,7 +3178,7 @@ frag_timeout(void *arg) lck_mtx_lock(&ipqlock); for (i = 0; i < IPREASS_NHASH; i++) { - for (fp = TAILQ_FIRST(&ipq[i]); fp; ) { + for (fp = TAILQ_FIRST(&ipq[i]); fp;) { struct ipq *fpp; fpp = fp; @@ -3174,18 +3251,19 @@ ipq_alloc(int how) * from nipq since the latter represents the elements already * in the reassembly queues. */ - if (ipq_limit > 0 && ipq_count > ipq_limit) - return (NULL); + if (ipq_limit > 0 && ipq_count > ipq_limit) { + return NULL; + } t = m_get(how, MT_FTABLE); if (t != NULL) { atomic_add_32(&ipq_count, 1); fp = mtod(t, struct ipq *); - bzero(fp, sizeof (*fp)); + bzero(fp, sizeof(*fp)); } else { fp = NULL; } - return (fp); + return fp; } static void @@ -3201,9 +3279,9 @@ ipq_free(struct ipq *fp) void ip_drain(void) { - frag_drain(); /* fragments */ - in_rtqdrain(); /* protocol cloned routes */ - in_arpdrain(NULL); /* cloned routes: ARP */ + frag_drain(); /* fragments */ + in_rtqdrain(); /* protocol cloned routes */ + in_arpdrain(NULL); /* cloned routes: ARP */ } /* @@ -3230,34 +3308,35 @@ ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop) struct in_addr *sin, dst; u_int32_t ntime; struct sockaddr_in ipaddr = { - sizeof (ipaddr), AF_INET, 0, { 0 }, { 0, } }; + sizeof(ipaddr), AF_INET, 0, { 0 }, { 0, } + }; /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); dst = ip->ip_dst; cp = (u_char *)(ip + 1); - cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[IPOPT_OPTVAL]; - if (opt == IPOPT_EOL) + if (opt == IPOPT_EOL) { break; - if (opt == IPOPT_NOP) + } + if (opt == IPOPT_NOP) { optlen = 1; - else { - if (cnt < IPOPT_OLEN + sizeof (*cp)) { + } else { + if (cnt < IPOPT_OLEN + sizeof(*cp)) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } optlen = cp[IPOPT_OLEN]; - if (optlen < IPOPT_OLEN + sizeof (*cp) || + if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } } switch (opt) { - default: break; @@ -3272,7 +3351,7 @@ ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop) */ case IPOPT_LSRR: case IPOPT_SSRR: - if (optlen < IPOPT_OFFSET + sizeof (*cp)) { + if (optlen < IPOPT_OFFSET + sizeof(*cp)) { code = &cp[IPOPT_OLEN] - (u_char *)ip; goto bad; } @@ -3288,8 +3367,9 @@ ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop) code = ICMP_UNREACH_SRCFAIL; goto bad; } - if (!ip_dosourceroute) + if (!ip_dosourceroute) { goto nosourcerouting; + } /* * Loose routing, and not at next destination * yet; nothing to do except forward. @@ -3299,13 +3379,14 @@ ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop) IFA_REMREF(&ia->ia_ifa); ia = NULL; } - off--; /* 0 origin */ - if (off > optlen - (int)sizeof (struct in_addr)) { + off--; /* 0 origin */ + if (off > optlen - (int)sizeof(struct in_addr)) { /* * End of source route. Should be for us. */ - if (!ip_acceptsourceroute) + if (!ip_acceptsourceroute) { goto nosourcerouting; + } save_rte(cp, ip->ip_src); break; } @@ -3322,9 +3403,9 @@ nosourcerouting: "attempted source route from %s " "to %s\n", inet_ntop(AF_INET, &ip->ip_src, - buf, sizeof (buf)), + buf, sizeof(buf)), inet_ntop(AF_INET, &ip->ip_dst, - buf2, sizeof (buf2))); + buf2, sizeof(buf2))); type = ICMP_UNREACH; code = ICMP_UNREACH_SRCFAIL; goto bad; @@ -3335,7 +3416,7 @@ nosourcerouting: */ OSAddAtomic(1, &ipstat.ips_cantforward); m_freem(m); - return (1); + return 1; } } @@ -3343,12 +3424,12 @@ nosourcerouting: * locate outgoing interface */ (void) memcpy(&ipaddr.sin_addr, cp + off, - sizeof (ipaddr.sin_addr)); + sizeof(ipaddr.sin_addr)); if (opt == IPOPT_SSRR) { -#define INA struct in_ifaddr * +#define INA struct in_ifaddr * if ((ia = (INA)ifa_ifwithdstaddr( - SA(&ipaddr))) == NULL) { + SA(&ipaddr))) == NULL) { ia = (INA)ifa_ifwithnet(SA(&ipaddr)); } } else { @@ -3362,11 +3443,11 @@ nosourcerouting: ip->ip_dst = ipaddr.sin_addr; IFA_LOCK(&ia->ia_ifa); (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr), - sizeof (struct in_addr)); + sizeof(struct in_addr)); IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); ia = NULL; - cp[IPOPT_OFFSET] += sizeof (struct in_addr); + cp[IPOPT_OFFSET] += sizeof(struct in_addr); /* * Let ip_intr's mcast routing check handle mcast pkts */ @@ -3374,7 +3455,7 @@ nosourcerouting: break; case IPOPT_RR: - if (optlen < IPOPT_OFFSET + sizeof (*cp)) { + if (optlen < IPOPT_OFFSET + sizeof(*cp)) { code = &cp[IPOPT_OFFSET] - (u_char *)ip; goto bad; } @@ -3385,11 +3466,12 @@ nosourcerouting: /* * If no space remains, ignore. */ - off--; /* 0 origin */ - if (off > optlen - (int)sizeof (struct in_addr)) + off--; /* 0 origin */ + if (off > optlen - (int)sizeof(struct in_addr)) { break; + } (void) memcpy(&ipaddr.sin_addr, &ip->ip_dst, - sizeof (ipaddr.sin_addr)); + sizeof(ipaddr.sin_addr)); /* * locate outgoing interface; if we're the destination, * use the incoming interface (should be same). @@ -3403,11 +3485,11 @@ nosourcerouting: } IFA_LOCK(&ia->ia_ifa); (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr), - sizeof (struct in_addr)); + sizeof(struct in_addr)); IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); ia = NULL; - cp[IPOPT_OFFSET] += sizeof (struct in_addr); + cp[IPOPT_OFFSET] += sizeof(struct in_addr); break; case IPOPT_TS: @@ -3422,7 +3504,7 @@ nosourcerouting: goto bad; } if (ipt->ipt_ptr > - ipt->ipt_len - (int)sizeof (int32_t)) { + ipt->ipt_len - (int)sizeof(int32_t)) { if (++ipt->ipt_oflw == 0) { code = (u_char *)&ipt->ipt_ptr - (u_char *)ip; @@ -3432,13 +3514,12 @@ nosourcerouting: } sin = (struct in_addr *)(void *)(cp + ipt->ipt_ptr - 1); switch (ipt->ipt_flg) { - case IPOPT_TS_TSONLY: break; case IPOPT_TS_TSANDADDR: - if (ipt->ipt_ptr - 1 + sizeof (n_time) + - sizeof (struct in_addr) > ipt->ipt_len) { + if (ipt->ipt_ptr - 1 + sizeof(n_time) + + sizeof(struct in_addr) > ipt->ipt_len) { code = (u_char *)&ipt->ipt_ptr - (u_char *)ip; goto bad; @@ -3446,32 +3527,34 @@ nosourcerouting: ipaddr.sin_addr = dst; ia = (INA)ifaof_ifpforaddr(SA(&ipaddr), m->m_pkthdr.rcvif); - if (ia == NULL) + if (ia == NULL) { continue; + } IFA_LOCK(&ia->ia_ifa); (void) memcpy(sin, &IA_SIN(ia)->sin_addr, - sizeof (struct in_addr)); + sizeof(struct in_addr)); IFA_UNLOCK(&ia->ia_ifa); - ipt->ipt_ptr += sizeof (struct in_addr); + ipt->ipt_ptr += sizeof(struct in_addr); IFA_REMREF(&ia->ia_ifa); ia = NULL; break; case IPOPT_TS_PRESPEC: - if (ipt->ipt_ptr - 1 + sizeof (n_time) + - sizeof (struct in_addr) > ipt->ipt_len) { + if (ipt->ipt_ptr - 1 + sizeof(n_time) + + sizeof(struct in_addr) > ipt->ipt_len) { code = (u_char *)&ipt->ipt_ptr - (u_char *)ip; goto bad; } (void) memcpy(&ipaddr.sin_addr, sin, - sizeof (struct in_addr)); + sizeof(struct in_addr)); if ((ia = (struct in_ifaddr *)ifa_ifwithaddr( - SA(&ipaddr))) == NULL) + SA(&ipaddr))) == NULL) { continue; + } IFA_REMREF(&ia->ia_ifa); ia = NULL; - ipt->ipt_ptr += sizeof (struct in_addr); + ipt->ipt_ptr += sizeof(struct in_addr); break; default: @@ -3482,19 +3565,19 @@ nosourcerouting: } ntime = iptime(); (void) memcpy(cp + ipt->ipt_ptr - 1, &ntime, - sizeof (n_time)); - ipt->ipt_ptr += sizeof (n_time); + sizeof(n_time)); + ipt->ipt_ptr += sizeof(n_time); } } if (forward && ipforwarding) { ip_forward(m, 1, next_hop); - return (1); + return 1; } - return (0); + return 0; bad: icmp_error(m, type, code, 0, 0); OSAddAtomic(1, &ipstat.ips_badoptions); - return (1); + return 1; } /* @@ -3522,40 +3605,43 @@ ip_checkrouteralert(struct mbuf *m) found_ra = 0; cp = (u_char *)(ip + 1); - cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[IPOPT_OPTVAL]; - if (opt == IPOPT_EOL) + if (opt == IPOPT_EOL) { break; - if (opt == IPOPT_NOP) + } + if (opt == IPOPT_NOP) { optlen = 1; - else { + } else { #ifdef DIAGNOSTIC - if (cnt < IPOPT_OLEN + sizeof (*cp)) + if (cnt < IPOPT_OLEN + sizeof(*cp)) { break; + } #endif optlen = cp[IPOPT_OLEN]; #ifdef DIAGNOSTIC - if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt) + if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { break; + } #endif } switch (opt) { case IPOPT_RA: #ifdef DIAGNOSTIC - if (optlen != IPOPT_OFFSET + sizeof (uint16_t) || - (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) + if (optlen != IPOPT_OFFSET + sizeof(uint16_t) || + (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) { break; - else + } else #endif - found_ra = 1; + found_ra = 1; break; default: break; } } - return (found_ra); + return found_ra; } /* @@ -3569,25 +3655,26 @@ ip_rtaddr(struct in_addr dst) struct ifaddr *rt_ifa; struct route ro; - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); sin = SIN(&ro.ro_dst); sin->sin_family = AF_INET; - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); sin->sin_addr = dst; rtalloc_ign(&ro, RTF_PRCLONING); if (ro.ro_rt == NULL) { ROUTE_RELEASE(&ro); - return (NULL); + return NULL; } RT_LOCK(ro.ro_rt); - if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) + if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) { IFA_ADDREF(rt_ifa); + } RT_UNLOCK(ro.ro_rt); ROUTE_RELEASE(&ro); - return ((struct in_ifaddr *)rt_ifa); + return (struct in_ifaddr *)rt_ifa; } /* @@ -3601,13 +3688,15 @@ save_rte(u_char *option, struct in_addr dst) olen = option[IPOPT_OLEN]; #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf("save_rte: olen %d\n", olen); + } #endif - if (olen > sizeof (ip_srcrt) - (1 + sizeof (dst))) + if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) { return; + } bcopy(option, ip_srcrt.srcopt, olen); - ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof (struct in_addr); + ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr); ip_srcrt.dst = dst; } @@ -3622,21 +3711,24 @@ ip_srcroute(void) struct in_addr *p, *q; struct mbuf *m; - if (ip_nhops == 0) - return (NULL); + if (ip_nhops == 0) { + return NULL; + } m = m_get(M_DONTWAIT, MT_HEADER); - if (m == NULL) - return (NULL); + if (m == NULL) { + return NULL; + } -#define OPTSIZ (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt)) +#define OPTSIZ (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt)) /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */ - m->m_len = ip_nhops * sizeof (struct in_addr) + - sizeof (struct in_addr) + OPTSIZ; + m->m_len = ip_nhops * sizeof(struct in_addr) + + sizeof(struct in_addr) + OPTSIZ; #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len); + } #endif /* @@ -3645,9 +3737,10 @@ ip_srcroute(void) p = &ip_srcrt.route[ip_nhops - 1]; *(mtod(m, struct in_addr *)) = *p--; #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf(" hops %lx", (u_int32_t)ntohl(mtod(m, struct in_addr *)->s_addr)); + } #endif /* @@ -3655,10 +3748,10 @@ ip_srcroute(void) */ ip_srcrt.nop = IPOPT_NOP; ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF; - (void) memcpy(mtod(m, caddr_t) + sizeof (struct in_addr), + (void) memcpy(mtod(m, caddr_t) + sizeof(struct in_addr), &ip_srcrt.nop, OPTSIZ); q = (struct in_addr *)(void *)(mtod(m, caddr_t) + - sizeof (struct in_addr) + OPTSIZ); + sizeof(struct in_addr) + OPTSIZ); #undef OPTSIZ /* * Record return path as an IP source route, @@ -3666,8 +3759,9 @@ ip_srcroute(void) */ while (p >= ip_srcrt.route) { #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf(" %lx", (u_int32_t)ntohl(q->s_addr)); + } #endif *q++ = *p--; } @@ -3676,10 +3770,11 @@ ip_srcroute(void) */ *q = ip_srcrt.dst; #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf(" %lx\n", (u_int32_t)ntohl(q->s_addr)); + } #endif - return (m); + return m; } /* @@ -3697,14 +3792,15 @@ ip_stripoptions(struct mbuf *m) MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); /* use bcopy() since it supports overlapping range */ - olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); opts = (caddr_t)(ip + 1); - i = m->m_len - (sizeof (struct ip) + olen); + i = m->m_len - (sizeof(struct ip) + olen); bcopy(opts + olen, opts, (unsigned)i); m->m_len -= olen; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { m->m_pkthdr.len -= olen; - ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof (struct ip) >> 2); + } + ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2); /* * We expect ip_{off,len} to be in host order by now, and @@ -3712,7 +3808,7 @@ ip_stripoptions(struct mbuf *m) * out from ip_len. Temporarily adjust ip_len for checksum * recalculation, and restore it afterwards. */ - ip->ip_len += sizeof (struct ip); + ip->ip_len += sizeof(struct ip); /* recompute checksum now that IP header is smaller */ #if BYTE_ORDER != BIG_ENDIAN @@ -3725,16 +3821,16 @@ ip_stripoptions(struct mbuf *m) NTOHS(ip->ip_len); #endif /* BYTE_ORDER != BIG_ENDIAN */ - ip->ip_len -= sizeof (struct ip); + ip->ip_len -= sizeof(struct ip); } u_char inetctlerrmap[PRC_NCMDS] = { - 0, 0, 0, 0, - 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, - ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, - EMSGSIZE, EHOSTUNREACH, 0, 0, - 0, 0, 0, 0, - ENOPROTOOPT, ECONNREFUSED + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, + ENOPROTOOPT, ECONNREFUSED }; static int @@ -3744,8 +3840,9 @@ sysctl_ipforwarding SYSCTL_HANDLER_ARGS int i, was_ipforwarding = ipforwarding; i = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); - if (i != 0 || req->newptr == USER_ADDR_NULL) - return (i); + if (i != 0 || req->newptr == USER_ADDR_NULL) { + return i; + } if (was_ipforwarding && !ipforwarding) { /* clean up IPv4 forwarding cached routes */ @@ -3756,14 +3853,14 @@ sysctl_ipforwarding SYSCTL_HANDLER_ARGS lck_mtx_lock(&ifp->if_cached_route_lock); ROUTE_RELEASE(&ifp->if_fwd_route); bzero(&ifp->if_fwd_route, - sizeof (ifp->if_fwd_route)); + sizeof(ifp->if_fwd_route)); lck_mtx_unlock(&ifp->if_cached_route_lock); } } ifnet_head_done(); } - return (0); + return 0; } /* @@ -3780,10 +3877,11 @@ ip_fwd_route_copyout(struct ifnet *ifp, struct route *dst) lck_mtx_convert_spin(&ifp->if_cached_route_lock); /* Minor sanity check */ - if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) + if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) { panic("%s: wrong or corrupted route: %p", __func__, src); + } - route_copyout(dst, src, sizeof (*dst)); + route_copyout(dst, src, sizeof(*dst)); lck_mtx_unlock(&ifp->if_cached_route_lock); } @@ -3797,11 +3895,13 @@ ip_fwd_route_copyin(struct ifnet *ifp, struct route *src) lck_mtx_convert_spin(&ifp->if_cached_route_lock); /* Minor sanity check */ - if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) + if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) { panic("%s: wrong or corrupted route: %p", __func__, src); + } - if (ifp->if_fwd_cacheok) - route_copyin(src, dst, sizeof (*src)); + if (ifp->if_fwd_cacheok) { + route_copyin(src, dst, sizeof(*src)); + } lck_mtx_unlock(&ifp->if_cached_route_lock); } @@ -3863,13 +3963,14 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) #endif /* !IPFIREWALL */ #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf("forward: src %lx dst %lx ttl %x\n", (u_int32_t)ip->ip_src.s_addr, (u_int32_t)pkt_dst.s_addr, ip->ip_ttl); + } #endif - if (m->m_flags & (M_BCAST|M_MCAST) || !in_canforward(pkt_dst)) { + if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) { OSAddAtomic(1, &ipstat.ips_cantforward); m_freem(m); return; @@ -3877,13 +3978,13 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) #if IPSTEALTH if (!ipstealth) { #endif /* IPSTEALTH */ - if (ip->ip_ttl <= IPTTLDEC) { - icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, - dest, 0); - return; - } -#if IPSTEALTH + if (ip->ip_ttl <= IPTTLDEC) { + icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS, + dest, 0); + return; } +#if IPSTEALTH +} #endif /* IPSTEALTH */ #if PF @@ -3901,7 +4002,7 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) ROUTE_RELEASE(&fwd_rt); sin->sin_family = AF_INET; - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); sin->sin_addr = pkt_dst; rtalloc_scoped_ign(&fwd_rt, RTF_PRCLONING, ipoa.ipoa_boundif); @@ -3932,9 +4033,9 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) #if IPSTEALTH if (!ipstealth) { #endif /* IPSTEALTH */ - ip->ip_ttl -= IPTTLDEC; + ip->ip_ttl -= IPTTLDEC; #if IPSTEALTH - } +} #endif /* IPSTEALTH */ /* @@ -3947,7 +4048,7 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) */ RT_LOCK_SPIN(rt); if (rt->rt_ifp == m->m_pkthdr.rcvif && - !(rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) && + !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) && satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY && ipsendredirects && !srcrt && rt->rt_ifa != NULL) { struct in_ifaddr *ia = (struct in_ifaddr *)rt->rt_ifa; @@ -3957,10 +4058,11 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) RT_CONVERT_LOCK(rt); IFA_LOCK_SPIN(&ia->ia_ifa); if ((src & ia->ia_subnetmask) == ia->ia_subnet) { - if (rt->rt_flags & RTF_GATEWAY) + if (rt->rt_flags & RTF_GATEWAY) { dest = satosin(rt->rt_gateway)->sin_addr.s_addr; - else + } else { dest = pkt_dst.s_addr; + } /* * Router requirements says to only send * host redirects. @@ -3968,9 +4070,10 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) type = ICMP_REDIRECT; code = ICMP_REDIRECT_HOST; #if DIAGNOSTIC - if (ipprintfs) + if (ipprintfs) { printf("redirect (%d) to %lx\n", code, (u_int32_t)dest); + } #endif } IFA_UNLOCK(&ia->ia_ifa); @@ -3985,14 +4088,14 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFORWARD, - sizeof (*ipfwd_tag), M_NOWAIT, m); + sizeof(*ipfwd_tag), M_NOWAIT, m); if (tag == NULL) { error = ENOBUFS; m_freem(m); goto done; } - ipfwd_tag = (struct ip_fwd_tag *)(tag+1); + ipfwd_tag = (struct ip_fwd_tag *)(tag + 1); ipfwd_tag->next_hop = next_hop; m_tag_prepend(m, tag); @@ -4036,15 +4139,16 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) goto done; } } - if (mcopy == NULL) + if (mcopy == NULL) { goto done; + } switch (error) { - case 0: /* forwarded, but need redirect */ + case 0: /* forwarded, but need redirect */ /* type, code set above */ break; - case ENETUNREACH: /* shouldn't happen, checked above */ + case ENETUNREACH: /* shouldn't happen, checked above */ case EHOSTUNREACH: case ENETDOWN: case EHOSTDOWN: @@ -4061,13 +4165,15 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) break; } else { RT_LOCK_SPIN(rt); - if (rt->rt_ifp != NULL) + if (rt->rt_ifp != NULL) { nextmtu = rt->rt_ifp->if_mtu; + } RT_UNLOCK(rt); } #ifdef IPSEC - if (ipsec_bypass) + if (ipsec_bypass) { break; + } /* * If the packet is routed over IPsec tunnel, tell the @@ -4078,8 +4184,9 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND, IP_FORWARDING, &ipsecerror); - if (sp == NULL) + if (sp == NULL) { break; + } /* * find the correct route for outer IPv4 @@ -4099,24 +4206,24 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) ipsechdr = ipsec_hdrsiz(sp); ipm = mtod(mcopy, struct ip *); - bcopy(&sp->req->saidx, &saidx, sizeof (saidx)); + bcopy(&sp->req->saidx, &saidx, sizeof(saidx)); saidx.mode = sp->req->saidx.mode; saidx.reqid = sp->req->saidx.reqid; sin = SIN(&saidx.src); if (sin->sin_len == 0) { - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; sin->sin_port = IPSEC_PORT_ANY; bcopy(&ipm->ip_src, &sin->sin_addr, - sizeof (sin->sin_addr)); + sizeof(sin->sin_addr)); } sin = SIN(&saidx.dst); if (sin->sin_len == 0) { - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; sin->sin_port = IPSEC_PORT_ANY; bcopy(&ipm->ip_dst, &sin->sin_addr, - sizeof (sin->sin_addr)); + sizeof(sin->sin_addr)); } sav = key_allocsa_policy(&saidx); if (sav != NULL) { @@ -4159,13 +4266,14 @@ ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop) } break; - case EACCES: /* ipfw denied packet */ + case EACCES: /* ipfw denied packet */ m_freem(mcopy); goto done; } - if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) + if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) { OSAddAtomic(1, &ipstat.ips_cantfrag); + } icmp_error(mcopy, type, code, dest, nextmtu); done: @@ -4181,7 +4289,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, struct timeval tv; getmicrotime(&tv); - mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof (tv), + mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv), SCM_TIMESTAMP, SOL_SOCKET, mp); if (*mp == NULL) { goto no_mbufs; @@ -4191,7 +4299,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, uint64_t time; time = mach_absolute_time(); - mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof (time), + mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time), SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp); if (*mp == NULL) { goto no_mbufs; @@ -4201,15 +4309,15 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, uint64_t time; time = mach_continuous_time(); - mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof (time), - SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp); + mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time), + SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp); if (*mp == NULL) { goto no_mbufs; } } if (inp->inp_flags & INP_RECVDSTADDR) { mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst, - sizeof (struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp); + sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp); if (*mp == NULL) { goto no_mbufs; } @@ -4223,7 +4331,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, /* options were tossed already */ if (inp->inp_flags & INP_RECVOPTS) { mp = sbcreatecontrol_mbuf((caddr_t)opts_deleted_above, - sizeof (struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp); + sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp); if (*mp == NULL) { goto no_mbufs; } @@ -4231,7 +4339,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, /* ip_srcroute doesn't do what we want here, need to fix */ if (inp->inp_flags & INP_RECVRETOPTS) { mp = sbcreatecontrol_mbuf((caddr_t)ip_srcroute(), - sizeof (struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp); + sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp); if (*mp == NULL) { goto no_mbufs; } @@ -4246,7 +4354,7 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, * Make sure to accomodate the largest possible * size of SA(if_lladdr)->sa_len. */ - _CASSERT(sizeof (sdlbuf) == (SOCK_MAXADDRLEN + 1)); + _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1)); ifnet_head_lock_shared(); if ((ifp = m->m_pkthdr.rcvif) != NULL && @@ -4254,8 +4362,9 @@ ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip, struct ifaddr *ifa = ifnet_addrs[ifp->if_index - 1]; struct sockaddr_dl *sdp; - if (!ifa || !ifa->ifa_addr) + if (!ifa || !ifa->ifa_addr) { goto makedummy; + } IFA_LOCK_SPIN(ifa); sdp = SDL(ifa->ifa_addr); @@ -4286,7 +4395,7 @@ makedummy: } if (inp->inp_flags & INP_RECVTTL) { mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_ttl, - sizeof (ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp); + sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp); if (*mp == NULL) { goto no_mbufs; } @@ -4294,7 +4403,7 @@ makedummy: if (inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) { int tc = m_get_traffic_class(m); - mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof (tc), + mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc), SO_TRAFFIC_CLASS, SOL_SOCKET, mp); if (*mp == NULL) { goto no_mbufs; @@ -4303,13 +4412,13 @@ makedummy: if (inp->inp_flags & INP_PKTINFO) { struct in_pktinfo pi; - bzero(&pi, sizeof (struct in_pktinfo)); - bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof (struct in_addr)); + bzero(&pi, sizeof(struct in_pktinfo)); + bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof(struct in_addr)); pi.ipi_ifindex = (m != NULL && m->m_pkthdr.rcvif != NULL) ? m->m_pkthdr.rcvif->if_index : 0; mp = sbcreatecontrol_mbuf((caddr_t)&pi, - sizeof (struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp); + sizeof(struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp); if (*mp == NULL) { goto no_mbufs; } @@ -4321,11 +4430,11 @@ makedummy: goto no_mbufs; } } - return (0); + return 0; no_mbufs: ipstat.ips_pktdropcntrl++; - return (ENOBUFS); + return ENOBUFS; } static inline u_short @@ -4349,20 +4458,22 @@ ip_cksum(struct mbuf *m, int hlen) m->m_pkthdr.csum_data = 0xffff; } - if (sum != 0) + if (sum != 0) { OSAddAtomic(1, &ipstat.ips_badsum); + } - return (sum); + return sum; } static int ip_getstat SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct ipstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct ipstat); + } - return (SYSCTL_OUT(req, &ipstat, MIN(sizeof (ipstat), req->oldlen))); + return SYSCTL_OUT(req, &ipstat, MIN(sizeof(ipstat), req->oldlen)); } void @@ -4380,8 +4491,9 @@ ip_setsrcifaddr_info(struct mbuf *m, uint32_t src_idx, struct in_ifaddr *ia) m->m_pkthdr.src_ifindex = ia->ia_ifp->if_index; } else { m->m_pkthdr.src_ifindex = src_idx; - if (src_idx != 0) + if (src_idx != 0) { m->m_pkthdr.pkt_flags |= PKTF_IFAINFO; + } } } @@ -4400,8 +4512,9 @@ ip_setdstifaddr_info(struct mbuf *m, uint32_t dst_idx, struct in_ifaddr *ia) m->m_pkthdr.dst_ifindex = ia->ia_ifp->if_index; } else { m->m_pkthdr.dst_ifindex = dst_idx; - if (dst_idx != 0) + if (dst_idx != 0) { m->m_pkthdr.pkt_flags |= PKTF_IFAINFO; + } } } @@ -4410,16 +4523,19 @@ ip_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *iaf) { VERIFY(m->m_flags & M_PKTHDR); - if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) - return (-1); + if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) { + return -1; + } - if (src_idx != NULL) + if (src_idx != NULL) { *src_idx = m->m_pkthdr.src_ifindex; + } - if (iaf != NULL) + if (iaf != NULL) { *iaf = 0; + } - return (0); + return 0; } int @@ -4427,16 +4543,19 @@ ip_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *iaf) { VERIFY(m->m_flags & M_PKTHDR); - if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) - return (-1); + if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) { + return -1; + } - if (dst_idx != NULL) + if (dst_idx != NULL) { *dst_idx = m->m_pkthdr.dst_ifindex; + } - if (iaf != NULL) + if (iaf != NULL) { *iaf = 0; + } - return (0); + return 0; } /* @@ -4460,8 +4579,9 @@ gre_input(struct mbuf *m, int off) * If no matching tunnel that is up is found, we inject * the mbuf to raw ip socket to see if anyone picks it up. */ - if (m != NULL) + if (m != NULL) { rip_input(m, off); + } } /* @@ -4474,7 +4594,7 @@ ip_gre_register_input(gre_input_func_t fn) gre_input_func = fn; lck_mtx_unlock(inet_domain_mutex); - return (0); + return 0; } #if (DEBUG || DEVELOPMENT) @@ -4486,8 +4606,9 @@ sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS i = ip_input_measure; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < 0 || i > 1) { error = EINVAL; @@ -4498,7 +4619,7 @@ sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS } ip_input_measure = i; done: - return (error); + return error; } static int @@ -4510,8 +4631,9 @@ sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS i = ip_input_measure_bins; error = sysctl_handle_quad(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* validate data */ if (!net_perf_validate_bins(i)) { error = EINVAL; @@ -4519,16 +4641,17 @@ sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS } ip_input_measure_bins = i; done: - return (error); + return error; } static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct ipstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct ipstat); + } - return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen))); + return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen)); } #endif /* (DEBUG || DEVELOPMENT) */ diff --git a/bsd/netinet/ip_output.c b/bsd/netinet/ip_output.c index 35f778d25..ee8eef60f 100644 --- a/bsd/netinet/ip_output.c +++ b/bsd/netinet/ip_output.c @@ -66,7 +66,7 @@ * Version 2.0. */ -#define _IP_VHL +#define _IP_VHL #include #include @@ -113,10 +113,10 @@ #include #endif /* CONFIG_MACF_NET */ -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3) -#define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1) -#define DBG_FNC_IPSEC4_OUTPUT NETDBG_CODE(DBG_NETIP, (2 << 8) | 1) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3) +#define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1) +#define DBG_FNC_IPSEC4_OUTPUT NETDBG_CODE(DBG_NETIP, (2 << 8) | 1) #if IPSEC #include @@ -124,7 +124,7 @@ #if IPSEC_DEBUG #include #else -#define KEYDEBUG(lev, arg) +#define KEYDEBUG(lev, arg) #endif #endif /* IPSEC */ @@ -148,10 +148,10 @@ #endif /* PF */ #if IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG -#define print_ip(a) \ - printf("%ld.%ld.%ld.%ld", (ntohl(a.s_addr) >> 24) & 0xFF, \ - (ntohl(a.s_addr) >> 16) & 0xFF, \ - (ntohl(a.s_addr) >> 8) & 0xFF, \ +#define print_ip(a) \ + printf("%ld.%ld.%ld.%ld", (ntohl(a.s_addr) >> 24) & 0xFF, \ + (ntohl(a.s_addr) >> 16) & 0xFF, \ + (ntohl(a.s_addr) >> 8) & 0xFF, \ (ntohl(a.s_addr)) & 0xFF); #endif /* IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG */ @@ -178,73 +178,73 @@ extern int ipsec_bypass; static int ip_maxchainsent = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, maxchainsent, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_maxchainsent, 0, - "use dlil_output_list"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_maxchainsent, 0, + "use dlil_output_list"); #if DEBUG static int forge_ce = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, forge_ce, - CTLFLAG_RW | CTLFLAG_LOCKED, &forge_ce, 0, - "Forge ECN CE"); + CTLFLAG_RW | CTLFLAG_LOCKED, &forge_ce, 0, + "Forge ECN CE"); #endif /* DEBUG */ static int ip_select_srcif_debug = 0; SYSCTL_INT(_net_inet_ip, OID_AUTO, select_srcif_debug, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip_select_srcif_debug, 0, - "log source interface selection debug info"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip_select_srcif_debug, 0, + "log source interface selection debug info"); static int ip_output_measure = 0; SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ip_output_measure, 0, sysctl_reset_ip_output_stats, "I", - "Do time measurement"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip_output_measure, 0, sysctl_reset_ip_output_stats, "I", + "Do time measurement"); static uint64_t ip_output_measure_bins = 0; SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_bins, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_output_measure_bins, 0, - sysctl_ip_output_measure_bins, "I", - "bins for chaining performance data histogram"); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_output_measure_bins, 0, + sysctl_ip_output_measure_bins, "I", + "bins for chaining performance data histogram"); static net_perf_t net_perf; SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_data, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_ip_output_getperf, "S,net_perf", - "IP output performance data (struct net_perf, net/net_perf.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_ip_output_getperf, "S,net_perf", + "IP output performance data (struct net_perf, net/net_perf.h)"); __private_extern__ int rfc6864 = 1; SYSCTL_INT(_net_inet_ip, OID_AUTO, rfc6864, CTLFLAG_RW | CTLFLAG_LOCKED, - &rfc6864, 0, "updated ip id field behavior"); + &rfc6864, 0, "updated ip id field behavior"); -#define IMO_TRACE_HIST_SIZE 32 /* size of trace history */ +#define IMO_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int imo_trace_hist_size = IMO_TRACE_HIST_SIZE; struct ip_moptions_dbg { - struct ip_moptions imo; /* ip_moptions */ - u_int16_t imo_refhold_cnt; /* # of IMO_ADDREF */ - u_int16_t imo_refrele_cnt; /* # of IMO_REMREF */ + struct ip_moptions imo; /* ip_moptions */ + u_int16_t imo_refhold_cnt; /* # of IMO_ADDREF */ + u_int16_t imo_refrele_cnt; /* # of IMO_REMREF */ /* * Alloc and free callers. */ - ctrace_t imo_alloc; - ctrace_t imo_free; + ctrace_t imo_alloc; + ctrace_t imo_free; /* * Circular lists of IMO_ADDREF and IMO_REMREF callers. */ - ctrace_t imo_refhold[IMO_TRACE_HIST_SIZE]; - ctrace_t imo_refrele[IMO_TRACE_HIST_SIZE]; + ctrace_t imo_refhold[IMO_TRACE_HIST_SIZE]; + ctrace_t imo_refrele[IMO_TRACE_HIST_SIZE]; }; #if DEBUG -static unsigned int imo_debug = 1; /* debugging (enabled) */ +static unsigned int imo_debug = 1; /* debugging (enabled) */ #else -static unsigned int imo_debug; /* debugging (disabled) */ +static unsigned int imo_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int imo_size; /* size of zone element */ -static struct zone *imo_zone; /* zone for ip_moptions */ +static unsigned int imo_size; /* size of zone element */ +static struct zone *imo_zone; /* zone for ip_moptions */ -#define IMO_ZONE_MAX 64 /* maximum elements in zone */ -#define IMO_ZONE_NAME "ip_moptions" /* zone name */ +#define IMO_ZONE_MAX 64 /* maximum elements in zone */ +#define IMO_ZONE_NAME "ip_moptions" /* zone name */ /* * IP output. The packet in mbuf chain m contains a skeletal IP @@ -256,7 +256,7 @@ int ip_output(struct mbuf *m0, struct mbuf *opt, struct route *ro, int flags, struct ip_moptions *imo, struct ip_out_args *ipoa) { - return (ip_output_list(m0, 0, opt, ro, flags, imo, ipoa)); + return ip_output_list(m0, 0, opt, ro, flags, imo, ipoa); } /* @@ -278,9 +278,9 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, struct ip_out_args *ipoa) { struct ip *ip; - struct ifnet *ifp = NULL; /* not refcnt'd */ + struct ifnet *ifp = NULL; /* not refcnt'd */ struct mbuf *m = m0, *prevnxt = NULL, **mppn = &prevnxt; - int hlen = sizeof (struct ip); + int hlen = sizeof(struct ip); int len = 0, error = 0; struct sockaddr_in *dst = NULL; struct in_ifaddr *ia = NULL, *src_ia = NULL; @@ -331,21 +331,21 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, #endif /* DUMMYNET */ struct ipf_pktopts ipf_pktopts; } ipobz; -#define ipsec_state ipobz.ipsec_state -#define necp_route ipobz.necp_route -#define args ipobz.args -#define sro_fwd ipobz.sro_fwd -#define saved_route ipobz.saved_route -#define ipf_pktopts ipobz.ipf_pktopts +#define ipsec_state ipobz.ipsec_state +#define necp_route ipobz.necp_route +#define args ipobz.args +#define sro_fwd ipobz.sro_fwd +#define saved_route ipobz.saved_route +#define ipf_pktopts ipobz.ipf_pktopts union { struct { - boolean_t select_srcif : 1; /* set once */ - boolean_t srcbound : 1; /* set once */ - boolean_t nocell : 1; /* set once */ + boolean_t select_srcif : 1; /* set once */ + boolean_t srcbound : 1; /* set once */ + boolean_t nocell : 1; /* set once */ boolean_t isbroadcast : 1; boolean_t didfilter : 1; - boolean_t noexpensive : 1; /* set once */ - boolean_t awdl_unrestricted : 1; /* set once */ + boolean_t noexpensive : 1; /* set once */ + boolean_t awdl_unrestricted : 1; /* set once */ #if IPFIREWALL_FORWARD boolean_t fwd_rewrite_src : 1; #endif /* IPFIREWALL_FORWARD */ @@ -359,34 +359,36 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, * Here we check for restrictions when sending frames. * N.B.: IPv4 over internal co-processor interfaces is not allowed. */ -#define IP_CHECK_RESTRICTIONS(_ifp, _ipobf) \ - (((_ipobf).nocell && IFNET_IS_CELLULAR(_ifp)) || \ - ((_ipobf).noexpensive && IFNET_IS_EXPENSIVE(_ifp)) || \ - (IFNET_IS_INTCOPROC(_ifp)) || \ +#define IP_CHECK_RESTRICTIONS(_ifp, _ipobf) \ + (((_ipobf).nocell && IFNET_IS_CELLULAR(_ifp)) || \ + ((_ipobf).noexpensive && IFNET_IS_EXPENSIVE(_ifp)) || \ + (IFNET_IS_INTCOPROC(_ifp)) || \ (!(_ipobf).awdl_unrestricted && IFNET_IS_AWDL_RESTRICTED(_ifp))) - if (ip_output_measure) + if (ip_output_measure) { net_perf_start_time(&net_perf, &start_tv); + } KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); VERIFY(m0->m_flags & M_PKTHDR); packetlist = m0; /* zero out {ipsec_state, args, sro_fwd, saved_route, ipf_pktops} */ - bzero(&ipobz, sizeof (ipobz)); + bzero(&ipobz, sizeof(ipobz)); ippo = &ipf_pktopts; #if IPFIREWALL || DUMMYNET - if (SLIST_EMPTY(&m0->m_pkthdr.tags)) + if (SLIST_EMPTY(&m0->m_pkthdr.tags)) { goto ipfw_tags_done; + } /* Grab info from mtags prepended to the chain */ #if DUMMYNET if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) { - struct dn_pkt_tag *dn_tag; + struct dn_pkt_tag *dn_tag; - dn_tag = (struct dn_pkt_tag *)(tag+1); + dn_tag = (struct dn_pkt_tag *)(tag + 1); args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule; args.fwa_pf_rule = dn_tag->dn_pf_rule; opt = NULL; @@ -394,7 +396,7 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, ro = &saved_route; imo = NULL; - bcopy(&dn_tag->dn_dst, &dst_buf, sizeof (dst_buf)); + bcopy(&dn_tag->dn_dst, &dst_buf, sizeof(dst_buf)); dst = &dst_buf; ifp = dn_tag->dn_ifp; flags = dn_tag->dn_flags; @@ -410,9 +412,9 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, #if IPDIVERT if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) { - struct divert_tag *div_tag; + struct divert_tag *div_tag; - div_tag = (struct divert_tag *)(tag+1); + div_tag = (struct divert_tag *)(tag + 1); args.fwa_divert_rule = div_tag->cookie; m_tag_delete(m0, tag); @@ -422,9 +424,9 @@ ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt, #if IPFIREWALL if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) { - struct ip_fwd_tag *ipfwd_tag; + struct ip_fwd_tag *ipfwd_tag; - ipfwd_tag = (struct ip_fwd_tag *)(tag+1); + ipfwd_tag = (struct ip_fwd_tag *)(tag + 1); next_hop_from_ipfwd_tag = ipfwd_tag->next_hop; m_tag_delete(m0, tag); @@ -435,17 +437,18 @@ ipfw_tags_done: #endif /* IPFIREWALL || DUMMYNET */ m = m0; - m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP|PKTF_IFAINFO); + m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP | PKTF_IFAINFO); #if IPSEC if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) { /* If packet is bound to an interface, check bound policies */ if ((flags & IP_OUTARGS) && (ipoa != NULL) && - (ipoa->ipoa_flags & IPOAF_BOUND_IF) && - ipoa->ipoa_boundif != IFSCOPE_NONE) { + (ipoa->ipoa_flags & IPOAF_BOUND_IF) && + ipoa->ipoa_boundif != IFSCOPE_NONE) { if (ipsec4_getpolicybyinterface(m, IPSEC_DIR_OUTBOUND, - &flags, ipoa, &sp) != 0) + &flags, ipoa, &sp) != 0) { goto bad; + } } } #endif /* IPSEC */ @@ -471,8 +474,9 @@ ipfw_tags_done: /* double negation needed for bool bit field */ ipobf.srcbound = !!(ipoa->ipoa_flags & IPOAF_BOUND_SRCADDR); - if (ipobf.srcbound) + if (ipobf.srcbound) { ipf_pktopts.ippo_flags |= IPPOF_BOUND_SRCADDR; + } } else { ipobf.select_srcif = FALSE; ipobf.srcbound = FALSE; @@ -493,8 +497,9 @@ ipfw_tags_done: ipobf.noexpensive = TRUE; ipf_pktopts.ippo_flags |= IPPOF_NO_IFF_EXPENSIVE; } - if (ipoa->ipoa_flags & IPOAF_AWDL_UNRESTRICTED) + if (ipoa->ipoa_flags & IPOAF_AWDL_UNRESTRICTED) { ipobf.awdl_unrestricted = TRUE; + } adv = &ipoa->ipoa_flowadv; adv->code = FADV_SUCCESS; ipoa->ipoa_retflags = 0; @@ -527,11 +532,13 @@ ipfw_tags_done: } #if IPFIREWALL - if (args.fwa_ipfw_rule != NULL) + if (args.fwa_ipfw_rule != NULL) { goto skip_ipsec; + } #endif /* IPFIREWALL */ - if (args.fwa_pf_rule != NULL) + if (args.fwa_pf_rule != NULL) { goto sendit; + } } #endif /* DUMMYNET */ @@ -547,18 +554,20 @@ loopit: /* * No need to proccess packet twice if we've already seen it. */ - if (!SLIST_EMPTY(&m->m_pkthdr.tags)) + if (!SLIST_EMPTY(&m->m_pkthdr.tags)) { inject_filter_ref = ipf_get_inject_filter(m); - else + } else { inject_filter_ref = NULL; + } if (opt) { m = ip_insertoptions(m, opt, &len); hlen = len; /* Update the chain */ if (m != m0) { - if (m0 == packetlist) + if (m0 == packetlist) { packetlist = m; + } m0 = m; } } @@ -592,7 +601,7 @@ loopit: /* * Fill in IP header. */ - if (!(flags & (IP_FORWARDING|IP_RAWOUTPUT))) { + if (!(flags & (IP_FORWARDING | IP_RAWOUTPUT))) { ip->ip_vhl = IP_MAKE_VHL(IPVERSION, hlen >> 2); ip->ip_off &= IP_DF; if (rfc6864 && IP_OFF_IS_ATOMIC(ip->ip_off)) { @@ -647,8 +656,9 @@ loopit: * as a hint) or during the next transmit. */ if (ROUTE_UNUSABLE(ro) || dst->sin_family != AF_INET || - dst->sin_addr.s_addr != pkt_dst.s_addr) + dst->sin_addr.s_addr != pkt_dst.s_addr) { ROUTE_RELEASE(ro); + } /* * If we're doing source interface selection, we may not @@ -656,13 +666,14 @@ loopit: * count otherwise. */ if (!ipobf.select_srcif && ro->ro_rt != NULL && - RT_GENID_OUTOFSYNC(ro->ro_rt)) + RT_GENID_OUTOFSYNC(ro->ro_rt)) { RT_GENID_SYNC(ro->ro_rt); + } } if (ro->ro_rt == NULL) { - bzero(dst, sizeof (*dst)); + bzero(dst, sizeof(*dst)); dst->sin_family = AF_INET; - dst->sin_len = sizeof (*dst); + dst->sin_len = sizeof(*dst); dst->sin_addr = pkt_dst; } /* @@ -670,8 +681,9 @@ loopit: * short circuit routing lookup. */ if (flags & IP_ROUTETOIF) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL) { ia = ifatoia(ifa_ifwithnet(sintosa(dst))); if (ia == NULL) { @@ -701,8 +713,9 @@ loopit: * packets if the interface is specified. */ ipobf.isbroadcast = FALSE; - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } /* Macro takes reference on ia */ IFP_TO_IA(ifp, ia); @@ -734,8 +747,9 @@ loopit: IFA_REMREF(ia0); ia0 = NULL; error = EHOSTUNREACH; - if (flags & IP_OUTARGS) + if (flags & IP_OUTARGS) { ipoa->ipoa_retflags |= IPOARF_IFDENIED; + } goto bad; } @@ -768,8 +782,9 @@ loopit: * the primary interface of the system. */ if (ia0 != NULL) { - if (ifscope == IFSCOPE_NONE) + if (ifscope == IFSCOPE_NONE) { ifscope = ia0->ifa_ifp->if_index; + } cloneok = (!(flags & IP_RAWOUTPUT) && !(IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)))); } @@ -804,8 +819,9 @@ loopit: * These exceptions will no longer be necessary when * the RTF_PRCLONING scheme is no longer present. */ - if (cloneok || dst->sin_addr.s_addr == INADDR_BROADCAST) + if (cloneok || dst->sin_addr.s_addr == INADDR_BROADCAST) { ign &= ~RTF_PRCLONING; + } /* * Loosen the route lookup criteria if the ifscope @@ -816,10 +832,11 @@ loopit: * address will be rewritten by the packet filter * prior to the RFC1122 loopback check below. */ - if (ifscope == lo_ifp->if_index) + if (ifscope == lo_ifp->if_index) { rtalloc_ign(ro, ign); - else + } else { rtalloc_scoped_ign(ro, ign, ifscope); + } /* * If the route points to a cellular/expensive interface @@ -852,8 +869,9 @@ loopit: goto bad; } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } RT_LOCK_SPIN(ro->ro_rt); ia = ifatoia(ro->ro_rt->rt_ifa); if (ia != NULL) { @@ -894,13 +912,14 @@ loopit: m->m_pkthdr.rcvif = ia->ia_ifa.ifa_ifp; - if (ia0 != NULL) + if (ia0 != NULL) { srcidx = ia0->ifa_ifp->if_index; - else if ((ro->ro_flags & ROF_SRCIF_SELECTED) && - ro->ro_srcia != NULL) + } else if ((ro->ro_flags & ROF_SRCIF_SELECTED) && + ro->ro_srcia != NULL) { srcidx = ro->ro_srcia->ifa_ifp->if_index; - else + } else { srcidx = 0; + } ip_setsrcifaddr_info(m, srcidx, NULL); ip_setdstifaddr_info(m, 0, ia); @@ -934,10 +953,12 @@ loopit: vif = imo->imo_multicast_vif; ttl = imo->imo_multicast_ttl; loop = imo->imo_multicast_loop; - if (!(flags & IP_RAWOUTPUT)) + if (!(flags & IP_RAWOUTPUT)) { ip->ip_ttl = ttl; - if (imo->imo_multicast_ifp != NULL) + } + if (imo->imo_multicast_ifp != NULL) { ifp = imo->imo_multicast_ifp; + } IMO_UNLOCK(imo); } else if (!(flags & IP_RAWOUTPUT)) { vif = -1; @@ -987,7 +1008,7 @@ loopit: * forbid loopback, loop back a copy. */ if (!TAILQ_EMPTY(&ipv4_filters)) { - struct ipfilter *filter; + struct ipfilter *filter; int seen = (inject_filter_ref == NULL); if (imo != NULL) { @@ -1011,8 +1032,9 @@ loopit: TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) { if (seen == 0) { if ((struct ipfilter *) - inject_filter_ref == filter) + inject_filter_ref == filter) { seen = 1; + } } else if (filter->ipf_filter. ipf_output != NULL) { errno_t result; @@ -1044,8 +1066,9 @@ loopit: } ip_mloopback(srcifp, ifp, m, dst, hlen); } - if (inm != NULL) + if (inm != NULL) { INM_REMREF(inm); + } /* * Multicasts with a time-to-live of zero may be looped- * back, above, but must not be transmitted on a network. @@ -1118,8 +1141,9 @@ sendit: args.fwa_ro = ro; args.fwa_dst = dst; args.fwa_oflags = flags; - if (flags & IP_OUTARGS) + if (flags & IP_OUTARGS) { args.fwa_ipoa = ipoa; + } rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, &args); #else /* DUMMYNET */ rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, NULL); @@ -1129,8 +1153,9 @@ sendit: m = *mppn; /* Skip ahead if first packet in list got dropped */ - if (packetlist == m0) + if (packetlist == m0) { packetlist = m; + } if (m != NULL) { m0 = m; @@ -1162,7 +1187,7 @@ sendit: } if (!ipobf.didfilter && !TAILQ_EMPTY(&ipv4_filters)) { - struct ipfilter *filter; + struct ipfilter *filter; int seen = (inject_filter_ref == NULL); ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS; @@ -1186,8 +1211,9 @@ sendit: TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) { if (seen == 0) { if ((struct ipfilter *)inject_filter_ref == - filter) + filter) { seen = 1; + } } else if (filter->ipf_filter.ipf_output) { errno_t result; result = filter->ipf_filter. @@ -1214,12 +1240,29 @@ sendit: #if NECP /* Process Network Extension Policy. Will Pass, Drop, or Rebind packet. */ - necp_matched_policy_id = necp_ip_output_find_policy_match (m, - flags, (flags & IP_OUTARGS) ? ipoa : NULL, &necp_result, &necp_result_parameter); + necp_matched_policy_id = necp_ip_output_find_policy_match(m, + flags, (flags & IP_OUTARGS) ? ipoa : NULL, &necp_result, &necp_result_parameter); if (necp_matched_policy_id) { necp_mark_packet_from_ip(m, necp_matched_policy_id); switch (necp_result) { - case NECP_KERNEL_POLICY_RESULT_PASS: + case NECP_KERNEL_POLICY_RESULT_PASS: + /* Check if the interface is allowed */ + if (!necp_packet_is_allowed_over_interface(m, ifp)) { + error = EHOSTUNREACH; + OSAddAtomic(1, &ipstat.ips_necp_policy_drop); + goto bad; + } + goto skip_ipsec; + case NECP_KERNEL_POLICY_RESULT_DROP: + case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: + /* Flow divert packets should be blocked at the IP layer */ + error = EHOSTUNREACH; + OSAddAtomic(1, &ipstat.ips_necp_policy_drop); + goto bad; + case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: { + /* Verify that the packet is being routed to the tunnel */ + struct ifnet *policy_ifp = necp_get_ifnet_from_result_parameter(&necp_result_parameter); + if (policy_ifp == ifp) { /* Check if the interface is allowed */ if (!necp_packet_is_allowed_over_interface(m, ifp)) { error = EHOSTUNREACH; @@ -1227,45 +1270,28 @@ sendit: goto bad; } goto skip_ipsec; - case NECP_KERNEL_POLICY_RESULT_DROP: - case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT: - /* Flow divert packets should be blocked at the IP layer */ - error = EHOSTUNREACH; - OSAddAtomic(1, &ipstat.ips_necp_policy_drop); - goto bad; - case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: { - /* Verify that the packet is being routed to the tunnel */ - struct ifnet *policy_ifp = necp_get_ifnet_from_result_parameter(&necp_result_parameter); - if (policy_ifp == ifp) { + } else { + if (necp_packet_can_rebind_to_ifnet(m, policy_ifp, &necp_route, AF_INET)) { /* Check if the interface is allowed */ - if (!necp_packet_is_allowed_over_interface(m, ifp)) { + if (!necp_packet_is_allowed_over_interface(m, policy_ifp)) { error = EHOSTUNREACH; OSAddAtomic(1, &ipstat.ips_necp_policy_drop); goto bad; } + + /* Set ifp to the tunnel interface, since it is compatible with the packet */ + ifp = policy_ifp; + ro = &necp_route; goto skip_ipsec; } else { - if (necp_packet_can_rebind_to_ifnet(m, policy_ifp, &necp_route, AF_INET)) { - /* Check if the interface is allowed */ - if (!necp_packet_is_allowed_over_interface(m, policy_ifp)) { - error = EHOSTUNREACH; - OSAddAtomic(1, &ipstat.ips_necp_policy_drop); - goto bad; - } - - /* Set ifp to the tunnel interface, since it is compatible with the packet */ - ifp = policy_ifp; - ro = &necp_route; - goto skip_ipsec; - } else { - error = ENETUNREACH; - OSAddAtomic(1, &ipstat.ips_necp_policy_drop); - goto bad; - } + error = ENETUNREACH; + OSAddAtomic(1, &ipstat.ips_necp_policy_drop); + goto bad; } } - default: - break; + } + default: + break; } } /* Catch-all to check if the interface is allowed */ @@ -1277,8 +1303,9 @@ sendit: #endif /* NECP */ #if IPSEC - if (ipsec_bypass != 0 || (flags & IP_NOIPSEC)) + if (ipsec_bypass != 0 || (flags & IP_NOIPSEC)) { goto skip_ipsec; + } KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -1286,15 +1313,15 @@ sendit: /* get SP for this packet */ if (so != NULL) { sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND, - so, &error); + so, &error); } else { sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, - flags, &error); + flags, &error); } if (sp == NULL) { IPSEC_STAT_INCREMENT(ipsecstat.out_inval); KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, - 0, 0, 0, 0, 0); + 0, 0, 0, 0, 0); goto bad; } } @@ -1342,86 +1369,88 @@ sendit: printf("ip_output: Invalid policy found. %d\n", sp->policy); } { - ipsec_state.m = m; - if (flags & IP_ROUTETOIF) { - bzero(&ipsec_state.ro, sizeof (ipsec_state.ro)); - } else { - route_copyout((struct route *)&ipsec_state.ro, ro, sizeof (struct route)); - } - ipsec_state.dst = SA(dst); + ipsec_state.m = m; + if (flags & IP_ROUTETOIF) { + bzero(&ipsec_state.ro, sizeof(ipsec_state.ro)); + } else { + route_copyout((struct route *)&ipsec_state.ro, ro, sizeof(struct route)); + } + ipsec_state.dst = SA(dst); - ip->ip_sum = 0; + ip->ip_sum = 0; - /* - * XXX - * delayed checksums are not currently compatible with IPsec - */ - if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) - in_delayed_cksum(m); + /* + * XXX + * delayed checksums are not currently compatible with IPsec + */ + if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { + in_delayed_cksum(m); + } #if BYTE_ORDER != BIG_ENDIAN - HTONS(ip->ip_len); - HTONS(ip->ip_off); + HTONS(ip->ip_len); + HTONS(ip->ip_off); #endif - DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL, - struct ip *, ip, struct ifnet *, ifp, - struct ip *, ip, struct ip6_hdr *, NULL); + DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL, + struct ip *, ip, struct ifnet *, ifp, + struct ip *, ip, struct ip6_hdr *, NULL); - error = ipsec4_output(&ipsec_state, sp, flags); - if (ipsec_state.tunneled == 6) { - m0 = m = NULL; - error = 0; - goto bad; - } + error = ipsec4_output(&ipsec_state, sp, flags); + if (ipsec_state.tunneled == 6) { + m0 = m = NULL; + error = 0; + goto bad; + } - m0 = m = ipsec_state.m; + m0 = m = ipsec_state.m; #if DUMMYNET - /* - * If we're about to use the route in ipsec_state - * and this came from dummynet, cleaup now. - */ - if (ro == &saved_route && - (!(flags & IP_ROUTETOIF) || ipsec_state.tunneled)) - ROUTE_RELEASE(ro); -#endif /* DUMMYNET */ - - if (flags & IP_ROUTETOIF) { /* - * if we have tunnel mode SA, we may need to ignore - * IP_ROUTETOIF. + * If we're about to use the route in ipsec_state + * and this came from dummynet, cleaup now. */ - if (ipsec_state.tunneled) { - flags &= ~IP_ROUTETOIF; + if (ro == &saved_route && + (!(flags & IP_ROUTETOIF) || ipsec_state.tunneled)) { + ROUTE_RELEASE(ro); + } +#endif /* DUMMYNET */ + + if (flags & IP_ROUTETOIF) { + /* + * if we have tunnel mode SA, we may need to ignore + * IP_ROUTETOIF. + */ + if (ipsec_state.tunneled) { + flags &= ~IP_ROUTETOIF; + ro = (struct route *)&ipsec_state.ro; + } + } else { ro = (struct route *)&ipsec_state.ro; } - } else { - ro = (struct route *)&ipsec_state.ro; - } - dst = SIN(ipsec_state.dst); - if (error) { - /* mbuf is already reclaimed in ipsec4_output. */ - m0 = NULL; - switch (error) { - case EHOSTUNREACH: - case ENETUNREACH: - case EMSGSIZE: - case ENOBUFS: - case ENOMEM: - break; - default: - printf("ip4_output (ipsec): error code %d\n", error); + dst = SIN(ipsec_state.dst); + if (error) { + /* mbuf is already reclaimed in ipsec4_output. */ + m0 = NULL; + switch (error) { + case EHOSTUNREACH: + case ENETUNREACH: + case EMSGSIZE: + case ENOBUFS: + case ENOMEM: + break; + default: + printf("ip4_output (ipsec): error code %d\n", error); /* FALLTHRU */ - case ENOENT: - /* don't show these error codes to the user */ - error = 0; - break; + case ENOENT: + /* don't show these error codes to the user */ + error = 0; + break; + } + KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, + 4, 0, 0, 0, 0); + goto bad; } - KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, - 4, 0, 0, 0, 0); - goto bad; - } } /* be sure to update variables that are affected by ipsec4_output() */ @@ -1454,14 +1483,15 @@ sendit: if (!(flags & IP_ROUTETOIF)) { printf("%s: can't update route after " "IPsec processing\n", __func__); - error = EHOSTUNREACH; /* XXX */ + error = EHOSTUNREACH; /* XXX */ KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END, 6, 0, 0, 0, 0); goto bad; } } else { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } RT_LOCK_SPIN(ro->ro_rt); ia = ifatoia(ro->ro_rt->rt_ifa); if (ia != NULL) { @@ -1483,7 +1513,7 @@ sendit: /* Pass to filters again */ if (!TAILQ_EMPTY(&ipv4_filters)) { - struct ipfilter *filter; + struct ipfilter *filter; ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS; @@ -1562,14 +1592,15 @@ skip_ipsec: */ m0 = m; if ((ipfwoff & IP_FW_PORT_DENY_FLAG) || m == NULL) { - if (m) + if (m) { m_freem(m); + } error = EACCES; goto done; } ip = mtod(m, struct ip *); - if (ipfwoff == 0 && dst == old) { /* common case */ + if (ipfwoff == 0 && dst == old) { /* common case */ goto pass; } #if DUMMYNET @@ -1586,8 +1617,9 @@ skip_ipsec: args.fwa_ro = ro; args.fwa_dst = dst; args.fwa_oflags = flags; - if (flags & IP_OUTARGS) + if (flags & IP_OUTARGS) { args.fwa_ipoa = ipoa; + } error = ip_dn_io_ptr(m, ipfwoff & 0xffff, DN_TO_IP_OUT, &args, DN_CLIENT_IPFW); @@ -1599,15 +1631,17 @@ skip_ipsec: struct mbuf *clone = NULL; /* Clone packet if we're doing a 'tee' */ - if ((ipfwoff & IP_FW_PORT_TEE_FLAG) != 0) + if ((ipfwoff & IP_FW_PORT_TEE_FLAG) != 0) { clone = m_dup(m, M_DONTWAIT); + } /* * XXX * delayed checksums are not currently compatible * with divert sockets. */ - if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) + if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { in_delayed_cksum(m); + } /* Restore packet header fields to original values */ @@ -1680,24 +1714,25 @@ skip_ipsec: lck_rw_done(in_ifaddr_rwlock); if (ia_fw) { /* tell ip_input "dont filter" */ - struct m_tag *fwd_tag; - struct ip_fwd_tag *ipfwd_tag; + struct m_tag *fwd_tag; + struct ip_fwd_tag *ipfwd_tag; fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFORWARD, - sizeof (*ipfwd_tag), M_NOWAIT, m); + sizeof(*ipfwd_tag), M_NOWAIT, m); if (fwd_tag == NULL) { error = ENOBUFS; goto bad; } - ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1); + ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1); ipfwd_tag->next_hop = args.fwa_next_hop; m_tag_prepend(m, fwd_tag); - if (m->m_pkthdr.rcvif == NULL) + if (m->m_pkthdr.rcvif == NULL) { m->m_pkthdr.rcvif = lo_ifp; + } #if BYTE_ORDER != BIG_ENDIAN HTONS(ip->ip_len); @@ -1725,7 +1760,7 @@ skip_ipsec: * Is this what we want to do? */ ROUTE_RELEASE(ro_fwd); - bcopy(dst, &ro_fwd->ro_dst, sizeof (*dst)); + bcopy(dst, &ro_fwd->ro_dst, sizeof(*dst)); rtalloc_ign(ro_fwd, RTF_PRCLONING, false); @@ -1744,8 +1779,9 @@ skip_ipsec: } ifp = ro_fwd->ro_rt->rt_ifp; ro_fwd->ro_rt->rt_use++; - if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY) + if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY) { dst = SIN(ro_fwd->ro_rt->rt_gateway); + } if (ro_fwd->ro_rt->rt_flags & RTF_HOST) { /* double negation needed for bool bit field */ ipobf.isbroadcast = @@ -1819,7 +1855,7 @@ pass: * packets with DSCP bits set -- see radr://9331522 -- so as a * workaround we clear the DSCP bits and set the service class to BE */ - if (IN_MULTICAST(ntohl(pkt_dst.s_addr)) && IFNET_IS_WIFI_INFRA(ifp)) { + if (IN_MULTICAST(ntohl(pkt_dst.s_addr)) && IFNET_IS_WIFI_INFRA(ifp)) { ip->ip_tos &= IPTOS_ECN_MASK; mbuf_set_service_class(m, MBUF_SC_BE); } @@ -1855,25 +1891,28 @@ pass: #if IPSEC /* clean ipsec history once it goes out of the node */ - if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) + if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) { ipsec_delaux(m); + } #endif /* IPSEC */ if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) && - (m->m_pkthdr.tso_segsz > 0)) + (m->m_pkthdr.tso_segsz > 0)) { scnt += m->m_pkthdr.len / m->m_pkthdr.tso_segsz; - else + } else { scnt++; + } if (packetchain == 0) { - if (ro->ro_rt != NULL && nstat_collect) + if (ro->ro_rt != NULL && nstat_collect) { nstat_route_tx(ro->ro_rt, scnt, m->m_pkthdr.len, 0); + } error = dlil_output(ifp, PF_INET, m, ro->ro_rt, SA(dst), 0, adv); if (dlil_verbose && error) { printf("dlil_output error on interface %s: %d\n", - ifp->if_xname, error); + ifp->if_xname, error); } scnt = 0; goto done; @@ -1889,23 +1928,24 @@ pass: #if PF sendchain: #endif /* PF */ - if (pktcnt > ip_maxchainsent) + if (pktcnt > ip_maxchainsent) { ip_maxchainsent = pktcnt; - if (ro->ro_rt != NULL && nstat_collect) + } + if (ro->ro_rt != NULL && nstat_collect) { nstat_route_tx(ro->ro_rt, scnt, bytecnt, 0); + } error = dlil_output(ifp, PF_INET, packetlist, ro->ro_rt, SA(dst), 0, adv); if (dlil_verbose && error) { printf("dlil_output error on interface %s: %d\n", - ifp->if_xname, error); + ifp->if_xname, error); } pktcnt = 0; scnt = 0; bytecnt = 0; goto done; - } m0 = m; pktcnt++; @@ -1960,10 +2000,10 @@ sendchain: !(m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_PARTIAL))) { struct udphdr *uh = NULL; - if (m->m_len < hlen + sizeof (struct udphdr)) { - m = m_pullup(m, hlen + sizeof (struct udphdr)); + if (m->m_len < hlen + sizeof(struct udphdr)) { + m = m_pullup(m, hlen + sizeof(struct udphdr)); if (m == NULL) { - error = ENOBUFS; + error = ENOBUFS; m0 = m; goto bad; } @@ -1978,8 +2018,9 @@ sendchain: if (uh->uh_sum == 0) { uh->uh_sum = inet_cksum(m, IPPROTO_UDP, hlen, ip->ip_len - hlen); - if (uh->uh_sum == 0) + if (uh->uh_sum == 0) { uh->uh_sum = 0xffff; + } } } } @@ -1998,8 +2039,9 @@ sendchain: m->m_nextpkt = 0; #if IPSEC /* clean ipsec history once it goes out of the node */ - if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) + if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) { ipsec_delaux(m); + } #endif /* IPSEC */ if (error == 0) { if ((packetchain != 0) && (pktcnt > 0)) { @@ -2015,15 +2057,16 @@ sendchain: SA(dst), 0, adv); if (dlil_verbose && error) { printf("dlil_output error on interface %s: %d\n", - ifp->if_xname, error); + ifp->if_xname, error); } } else { m_freem(m); } } - if (error == 0) + if (error == 0) { OSAddAtomic(1, &ipstat.ips_fragmented); + } done: if (ia != NULL) { @@ -2053,10 +2096,11 @@ done: net_perf_measure_time(&net_perf, &start_tv, packets_processed); net_perf_histogram(&net_perf, packets_processed); } - return (error); + return error; bad: - if (pktcnt > 0) + if (pktcnt > 0) { m0 = packetlist; + } m_freem_list(m0); goto done; @@ -2089,14 +2133,15 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) * for IPv6 fragment header if it needs to be translated * from IPv4 to IPv6. */ - if (IS_INTF_CLAT46(ifp)) + if (IS_INTF_CLAT46(ifp)) { mtu -= sizeof(struct ip6_frag); + } #endif - firstlen = len = (mtu - hlen) &~ 7; + firstlen = len = (mtu - hlen) & ~7; if (len < 8) { m_freem(m); - return (EMSGSIZE); + return EMSGSIZE; } /* @@ -2104,17 +2149,18 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) * fragmented packets, then do it here. */ if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) && - !(ifp->if_hwassist & CSUM_IP_FRAGS)) + !(ifp->if_hwassist & CSUM_IP_FRAGS)) { in_delayed_cksum(m); + } /* * Loop through length of segment after first fragment, * make new header and copy data of each part and link onto chain. */ m0 = m; - mhlen = sizeof (struct ip); + mhlen = sizeof(struct ip); for (off = hlen + len; off < (u_short)ip->ip_len; off += len) { - MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (m == NULL) { error = ENOBUFS; OSAddAtomic(1, &ipstat.ips_odropped); @@ -2124,23 +2170,25 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) m->m_data += max_linkhdr; mhip = mtod(m, struct ip *); *mhip = *ip; - if (hlen > sizeof (struct ip)) { - mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip); + if (hlen > sizeof(struct ip)) { + mhlen = ip_optcopy(ip, mhip) + sizeof(struct ip); mhip->ip_vhl = IP_MAKE_VHL(IPVERSION, mhlen >> 2); } m->m_len = mhlen; mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF); - if (ip->ip_off & IP_MF) + if (ip->ip_off & IP_MF) { mhip->ip_off |= IP_MF; - if (off + len >= (u_short)ip->ip_len) + } + if (off + len >= (u_short)ip->ip_len) { len = (u_short)ip->ip_len - off; - else + } else { mhip->ip_off |= IP_MF; + } mhip->ip_len = htons((u_short)(len + mhlen)); m->m_next = m_copy(m0, off, len); if (m->m_next == NULL) { (void) m_free(m); - error = ENOBUFS; /* ??? */ + error = ENOBUFS; /* ??? */ OSAddAtomic(1, &ipstat.ips_odropped); goto sendorfree; } @@ -2195,10 +2243,11 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum) m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP; } sendorfree: - if (error) + if (error) { m_freem_list(m0); + } - return (error); + return error; } static void @@ -2233,21 +2282,22 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) uint32_t offset, _hlen, mlen, hlen, len, sw_csum; uint16_t csum, ip_len; - _CASSERT(sizeof (csum) == sizeof (uint16_t)); + _CASSERT(sizeof(csum) == sizeof(uint16_t)); VERIFY(m->m_flags & M_PKTHDR); sw_csum = (csum_flags & m->m_pkthdr.csum_flags); - if ((sw_csum &= (CSUM_DELAY_IP | CSUM_DELAY_DATA)) == 0) + if ((sw_csum &= (CSUM_DELAY_IP | CSUM_DELAY_DATA)) == 0) { goto done; + } - mlen = m->m_pkthdr.len; /* total mbuf len */ + mlen = m->m_pkthdr.len; /* total mbuf len */ /* sanity check (need at least simple IP header) */ - if (mlen < (hoff + sizeof (*ip))) { + if (mlen < (hoff + sizeof(*ip))) { panic("%s: mbuf %p pkt len (%u) < hoff+ip_hdr " "(%u+%u)\n", __func__, m, mlen, hoff, - (uint32_t)sizeof (*ip)); + (uint32_t)sizeof(*ip)); /* NOTREACHED */ } @@ -2257,17 +2307,17 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) * buffer. Copy only the simple IP header here (IP options case * is handled below.) */ - if ((sw_csum & CSUM_DELAY_IP) || (hoff + sizeof (*ip)) > m->m_len || + if ((sw_csum & CSUM_DELAY_IP) || (hoff + sizeof(*ip)) > m->m_len || !IP_HDR_ALIGNED_P(mtod(m, caddr_t) + hoff)) { - m_copydata(m, hoff, sizeof (*ip), (caddr_t)buf); + m_copydata(m, hoff, sizeof(*ip), (caddr_t)buf); ip = (struct ip *)(void *)buf; - _hlen = sizeof (*ip); + _hlen = sizeof(*ip); } else { ip = (struct ip *)(void *)(m->m_data + hoff); _hlen = 0; } - hlen = IP_VHL_HL(ip->ip_vhl) << 2; /* IP header len */ + hlen = IP_VHL_HL(ip->ip_vhl) << 2; /* IP header len */ /* sanity check */ if (mlen < (hoff + hlen)) { @@ -2299,7 +2349,7 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) } } - len = ip_len - hlen; /* csum span */ + len = ip_len - hlen; /* csum span */ if (sw_csum & CSUM_DELAY_DATA) { uint16_t ulpoff; @@ -2309,8 +2359,8 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) * which is expected to contain the ULP offset; therefore * CSUM_PARTIAL offset adjustment must be undone. */ - if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL|CSUM_DATA_VALID)) == - (CSUM_PARTIAL|CSUM_DATA_VALID)) { + if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL | CSUM_DATA_VALID)) == + (CSUM_PARTIAL | CSUM_DATA_VALID)) { /* * Get back the original ULP offset (this will * undo the CSUM_PARTIAL logic in ip_output.) @@ -2320,9 +2370,9 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) } ulpoff = (m->m_pkthdr.csum_data & 0xffff); /* ULP csum offset */ - offset = hoff + hlen; /* ULP header */ + offset = hoff + hlen; /* ULP header */ - if (mlen < (ulpoff + sizeof (csum))) { + if (mlen < (ulpoff + sizeof(csum))) { panic("%s: mbuf %p pkt len (%u) proto %d invalid ULP " "cksum offset (%u) cksum flags 0x%x\n", __func__, m, mlen, ip->ip_p, ulpoff, m->m_pkthdr.csum_flags); @@ -2336,17 +2386,18 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) /* RFC1122 4.1.3.4 */ if (csum == 0 && - (m->m_pkthdr.csum_flags & (CSUM_UDP|CSUM_ZERO_INVERT))) + (m->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_ZERO_INVERT))) { csum = 0xffff; + } /* Insert the checksum in the ULP csum field */ offset += ulpoff; - if (offset + sizeof (csum) > m->m_len) { - m_copyback(m, offset, sizeof (csum), &csum); + if (offset + sizeof(csum) > m->m_len) { + m_copyback(m, offset, sizeof(csum), &csum); } else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) { *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum; } else { - bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum)); + bcopy(&csum, (mtod(m, char *) + offset), sizeof(csum)); } m->m_pkthdr.csum_flags &= ~(CSUM_DELAY_DATA | CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_ZERO_INVERT); @@ -2354,9 +2405,9 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) if (sw_csum & CSUM_DELAY_IP) { /* IP header must be in the local buffer */ - VERIFY(_hlen == sizeof (*ip)); + VERIFY(_hlen == sizeof(*ip)); if (_hlen != hlen) { - VERIFY(hlen <= sizeof (buf)); + VERIFY(hlen <= sizeof(buf)); m_copydata(m, hoff, hlen, (caddr_t)buf); ip = (struct ip *)(void *)buf; _hlen = hlen; @@ -2381,18 +2432,18 @@ in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags) * csum field; all other fields are left unchanged. */ offset = hoff + offsetof(struct ip, ip_sum); - if (offset + sizeof (csum) > m->m_len) { - m_copyback(m, offset, sizeof (csum), &csum); + if (offset + sizeof(csum) > m->m_len) { + m_copyback(m, offset, sizeof(csum), &csum); } else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) { *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum; } else { - bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum)); + bcopy(&csum, (mtod(m, char *) + offset), sizeof(csum)); } m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP; } done: - return (sw_csum); + return sw_csum; } /* @@ -2410,39 +2461,42 @@ ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen) struct ip *ip = mtod(m, struct ip *); unsigned optlen; - optlen = opt->m_len - sizeof (p->ipopt_dst); - if (optlen + (u_short)ip->ip_len > IP_MAXPACKET) - return (m); /* XXX should fail */ - if (p->ipopt_dst.s_addr) + optlen = opt->m_len - sizeof(p->ipopt_dst); + if (optlen + (u_short)ip->ip_len > IP_MAXPACKET) { + return m; /* XXX should fail */ + } + if (p->ipopt_dst.s_addr) { ip->ip_dst = p->ipopt_dst; + } if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) { - MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (n == NULL) - return (m); + MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + if (n == NULL) { + return m; + } n->m_pkthdr.rcvif = 0; #if CONFIG_MACF_NET mac_mbuf_label_copy(m, n); #endif /* CONFIG_MACF_NET */ n->m_pkthdr.len = m->m_pkthdr.len + optlen; - m->m_len -= sizeof (struct ip); - m->m_data += sizeof (struct ip); + m->m_len -= sizeof(struct ip); + m->m_data += sizeof(struct ip); n->m_next = m; m = n; - m->m_len = optlen + sizeof (struct ip); + m->m_len = optlen + sizeof(struct ip); m->m_data += max_linkhdr; - (void) memcpy(mtod(m, void *), ip, sizeof (struct ip)); + (void) memcpy(mtod(m, void *), ip, sizeof(struct ip)); } else { m->m_data -= optlen; m->m_len += optlen; m->m_pkthdr.len += optlen; - ovbcopy((caddr_t)ip, mtod(m, caddr_t), sizeof (struct ip)); + ovbcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); } ip = mtod(m, struct ip *); bcopy(p->ipopt_list, ip + 1, optlen); - *phlen = sizeof (struct ip) + optlen; + *phlen = sizeof(struct ip) + optlen; ip->ip_vhl = IP_MAKE_VHL(IPVERSION, *phlen >> 2); ip->ip_len += optlen; - return (m); + return m; } /* @@ -2457,11 +2511,12 @@ ip_optcopy(struct ip *ip, struct ip *jp) cp = (u_char *)(ip + 1); dp = (u_char *)(jp + 1); - cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip); + cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[0]; - if (opt == IPOPT_EOL) + if (opt == IPOPT_EOL) { break; + } if (opt == IPOPT_NOP) { /* Preserve for IP mcast tunnel's LSRR alignment. */ *dp++ = IPOPT_NOP; @@ -2469,29 +2524,31 @@ ip_optcopy(struct ip *ip, struct ip *jp) continue; } #if DIAGNOSTIC - if (cnt < IPOPT_OLEN + sizeof (*cp)) { + if (cnt < IPOPT_OLEN + sizeof(*cp)) { panic("malformed IPv4 option passed to ip_optcopy"); /* NOTREACHED */ } #endif optlen = cp[IPOPT_OLEN]; #if DIAGNOSTIC - if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt) { + if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { panic("malformed IPv4 option passed to ip_optcopy"); /* NOTREACHED */ } #endif /* bogus lengths should have been caught by ip_dooptions */ - if (optlen > cnt) + if (optlen > cnt) { optlen = cnt; + } if (IPOPT_COPIED(opt)) { bcopy(cp, dp, optlen); dp += optlen; } } - for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++) + for (optlen = dp - (u_char *)(jp + 1); optlen & 0x3; optlen++) { *dp++ = IPOPT_EOL; - return (optlen); + } + return optlen; } /* @@ -2500,12 +2557,13 @@ ip_optcopy(struct ip *ip, struct ip *jp) int ip_ctloutput(struct socket *so, struct sockopt *sopt) { - struct inpcb *inp = sotoinpcb(so); - int error, optval; + struct inpcb *inp = sotoinpcb(so); + int error, optval; error = optval = 0; - if (sopt->sopt_level != IPPROTO_IP) - return (EINVAL); + if (sopt->sopt_level != IPPROTO_IP) { + return EINVAL; + } switch (sopt->sopt_dir) { case SOPT_SET: @@ -2534,8 +2592,8 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) break; } - return (ip_pcbopts(sopt->sopt_name, - &inp->inp_options, m)); + return ip_pcbopts(sopt->sopt_name, + &inp->inp_options, m); } case IP_TOS: @@ -2547,10 +2605,11 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_RECVTTL: case IP_RECVPKTINFO: case IP_RECVTOS: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error) { break; + } switch (sopt->sopt_name) { case IP_TOS: @@ -2560,11 +2619,11 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_TTL: inp->inp_ip_ttl = optval; break; -#define OPTSET(bit) \ +#define OPTSET(bit) \ if (optval) \ - inp->inp_flags |= bit; \ + inp->inp_flags |= bit; \ else \ - inp->inp_flags &= ~bit; + inp->inp_flags &= ~bit; case IP_RECVOPTS: OPTSET(INP_RECVOPTS); @@ -2622,10 +2681,11 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) break; case IP_PORTRANGE: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error) { break; + } switch (optval) { case IP_PORTRANGE_DEFAULT: @@ -2657,10 +2717,12 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) struct mbuf *m; int optname; - if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */ + if ((error = soopt_getm(sopt, &m)) != 0) { /* XXX */ break; - if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */ + } + if ((error = soopt_mcopyin(sopt, m)) != 0) { /* XXX */ break; + } priv = (proc_suser(sopt->sopt_p) == 0); if (m) { req = mtod(m, caddr_t); @@ -2678,9 +2740,10 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) unsigned background = 0; error = sooptcopyin(sopt, &background, - sizeof (background), sizeof (background)); - if (error) + sizeof(background), sizeof(background)); + if (error) { break; + } if (background) { socket_set_traffic_mgt_flags_locked(so, @@ -2719,11 +2782,12 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) break; } - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); - if (error) + if (error) { break; + } error = inp_bindif(inp, optval, NULL); break; @@ -2735,11 +2799,12 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) break; } - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); - if (error) + if (error) { break; + } /* once set, it cannot be unset */ if (!optval && INP_NO_CELLULAR(inp)) { @@ -2794,7 +2859,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) optval = inp->inp_ip_ttl; break; -#define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0) +#define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0) case IP_RECVOPTS: optval = OPTBIT(INP_RECVOPTS); @@ -2817,12 +2882,13 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) break; case IP_PORTRANGE: - if (inp->inp_flags & INP_HIGHPORT) + if (inp->inp_flags & INP_HIGHPORT) { optval = IP_PORTRANGE_HIGH; - else if (inp->inp_flags & INP_LOWPORT) + } else if (inp->inp_flags & INP_LOWPORT) { optval = IP_PORTRANGE_LOW; - else + } else { optval = 0; + } break; case IP_RECVPKTINFO: @@ -2833,7 +2899,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) optval = OPTBIT(INP_RECVTOS); break; } - error = sooptcopyout(sopt, &optval, sizeof (optval)); + error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case IP_MULTICAST_IF: @@ -2856,26 +2922,27 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_TRAFFIC_MGT_BACKGROUND: { unsigned background = (so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND) ? 1 : 0; - return (sooptcopyout(sopt, &background, - sizeof (background))); + return sooptcopyout(sopt, &background, + sizeof(background)); } #endif /* TRAFFIC_MGT */ case IP_BOUND_IF: - if (inp->inp_flags & INP_BOUND_IF) + if (inp->inp_flags & INP_BOUND_IF) { optval = inp->inp_boundifp->if_index; - error = sooptcopyout(sopt, &optval, sizeof (optval)); + } + error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case IP_NO_IFT_CELLULAR: optval = INP_NO_CELLULAR(inp) ? 1 : 0; - error = sooptcopyout(sopt, &optval, sizeof (optval)); + error = sooptcopyout(sopt, &optval, sizeof(optval)); break; case IP_OUT_IF: optval = (inp->inp_last_outifp != NULL) ? inp->inp_last_outifp->if_index : 0; - error = sooptcopyout(sopt, &optval, sizeof (optval)); + error = sooptcopyout(sopt, &optval, sizeof(optval)); break; default: @@ -2884,7 +2951,7 @@ ip_ctloutput(struct socket *so, struct sockopt *sopt) } break; } - return (error); + return error; } /* @@ -2901,49 +2968,55 @@ ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m) u_char opt; /* turn off any old options */ - if (*pcbopt) + if (*pcbopt) { (void) m_free(*pcbopt); + } *pcbopt = 0; if (m == (struct mbuf *)0 || m->m_len == 0) { /* * Only turning off any previous options. */ - if (m) + if (m) { (void) m_free(m); - return (0); + } + return 0; } - if (m->m_len % sizeof (int32_t)) + if (m->m_len % sizeof(int32_t)) { goto bad; + } /* * IP first-hop destination address will be stored before * actual options; move other options back * and clear it when none present. */ - if (m->m_data + m->m_len + sizeof (struct in_addr) >= &m->m_dat[MLEN]) + if (m->m_data + m->m_len + sizeof(struct in_addr) >= &m->m_dat[MLEN]) { goto bad; + } cnt = m->m_len; - m->m_len += sizeof (struct in_addr); - cp = mtod(m, u_char *) + sizeof (struct in_addr); + m->m_len += sizeof(struct in_addr); + cp = mtod(m, u_char *) + sizeof(struct in_addr); ovbcopy(mtod(m, caddr_t), (caddr_t)cp, (unsigned)cnt); - bzero(mtod(m, caddr_t), sizeof (struct in_addr)); + bzero(mtod(m, caddr_t), sizeof(struct in_addr)); for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[IPOPT_OPTVAL]; - if (opt == IPOPT_EOL) + if (opt == IPOPT_EOL) { break; - if (opt == IPOPT_NOP) + } + if (opt == IPOPT_NOP) { optlen = 1; - else { - if (cnt < IPOPT_OLEN + sizeof (*cp)) + } else { + if (cnt < IPOPT_OLEN + sizeof(*cp)) { goto bad; + } optlen = cp[IPOPT_OLEN]; - if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt) + if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) { goto bad; + } } switch (opt) { - default: break; @@ -2957,45 +3030,47 @@ ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m) * A is first hop destination, which doesn't appear in * actual IP option, but is stored before the options. */ - if (optlen < IPOPT_MINOFF - 1 + sizeof (struct in_addr)) + if (optlen < IPOPT_MINOFF - 1 + sizeof(struct in_addr)) { goto bad; - m->m_len -= sizeof (struct in_addr); - cnt -= sizeof (struct in_addr); - optlen -= sizeof (struct in_addr); + } + m->m_len -= sizeof(struct in_addr); + cnt -= sizeof(struct in_addr); + optlen -= sizeof(struct in_addr); cp[IPOPT_OLEN] = optlen; /* * Move first hop before start of options. */ - bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t), - sizeof (struct in_addr)); + bcopy((caddr_t)&cp[IPOPT_OFFSET + 1], mtod(m, caddr_t), + sizeof(struct in_addr)); /* * Then copy rest of options back * to close up the deleted entry. */ - ovbcopy((caddr_t)(&cp[IPOPT_OFFSET+1] + - sizeof (struct in_addr)), - (caddr_t)&cp[IPOPT_OFFSET+1], - (unsigned)cnt + sizeof (struct in_addr)); + ovbcopy((caddr_t)(&cp[IPOPT_OFFSET + 1] + + sizeof(struct in_addr)), + (caddr_t)&cp[IPOPT_OFFSET + 1], + (unsigned)cnt + sizeof(struct in_addr)); break; } } - if (m->m_len > MAX_IPOPTLEN + sizeof (struct in_addr)) + if (m->m_len > MAX_IPOPTLEN + sizeof(struct in_addr)) { goto bad; + } *pcbopt = m; - return (0); + return 0; bad: (void) m_free(m); - return (EINVAL); + return EINVAL; } void ip_moptions_init(void) { - PE_parse_boot_argn("ifa_debug", &imo_debug, sizeof (imo_debug)); + PE_parse_boot_argn("ifa_debug", &imo_debug, sizeof(imo_debug)); - imo_size = (imo_debug == 0) ? sizeof (struct ip_moptions) : - sizeof (struct ip_moptions_dbg); + imo_size = (imo_debug == 0) ? sizeof(struct ip_moptions) : + sizeof(struct ip_moptions_dbg); imo_zone = zinit(imo_size, IMO_ZONE_MAX * imo_size, 0, IMO_ZONE_NAME); @@ -3009,10 +3084,11 @@ ip_moptions_init(void) void imo_addref(struct ip_moptions *imo, int locked) { - if (!locked) + if (!locked) { IMO_LOCK(imo); - else + } else { IMO_LOCK_ASSERT_HELD(imo); + } if (++imo->imo_refcnt == 0) { panic("%s: imo %p wraparound refcnt\n", __func__, imo); @@ -3021,8 +3097,9 @@ imo_addref(struct ip_moptions *imo, int locked) (*imo->imo_trace)(imo, TRUE); } - if (!locked) + if (!locked) { IMO_UNLOCK(imo); + } } void @@ -3048,13 +3125,15 @@ imo_remref(struct ip_moptions *imo) struct in_mfilter *imf; imf = imo->imo_mfilters ? &imo->imo_mfilters[i] : NULL; - if (imf != NULL) + if (imf != NULL) { imf_leave(imf); + } (void) in_leavegroup(imo->imo_membership[i], imf); - if (imf != NULL) + if (imf != NULL) { imf_purge(imf); + } INM_REMREF(imo->imo_membership[i]); imo->imo_membership[i] = NULL; @@ -3120,7 +3199,7 @@ ip_allocmoptions(int how) IMO_ADDREF(imo); } - return (imo); + return imo; } /* @@ -3137,8 +3216,9 @@ ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, struct mbuf *copym; struct ip *ip; - if (lo_ifp == NULL) + if (lo_ifp == NULL) { return; + } /* * Copy the packet header as it's needed for the checksum @@ -3147,11 +3227,13 @@ ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, * header portion later. */ copym = m_copym_mode(m, 0, M_COPYALL, M_DONTWAIT, M_COPYM_COPY_HDR); - if (copym != NULL && ((copym->m_flags & M_EXT) || copym->m_len < hlen)) + if (copym != NULL && ((copym->m_flags & M_EXT) || copym->m_len < hlen)) { copym = m_pullup(copym, hlen); + } - if (copym == NULL) + if (copym == NULL) { return; + } /* * We don't bother to fragment if the IP length is greater @@ -3171,7 +3253,7 @@ ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, * interface itself is lo0, this will be overridden by if_loop. */ if (hwcksum_rx) { - copym->m_pkthdr.csum_flags &= ~(CSUM_PARTIAL|CSUM_ZERO_INVERT); + copym->m_pkthdr.csum_flags &= ~(CSUM_PARTIAL | CSUM_ZERO_INVERT); copym->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; copym->m_pkthdr.csum_data = 0xffff; @@ -3212,8 +3294,9 @@ ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, } lck_rw_done(in_ifaddr_rwlock); } - if (srcifp != NULL) + if (srcifp != NULL) { ip_setsrcifaddr_info(copym, srcifp->if_index, NULL); + } ip_setdstifaddr_info(copym, origifp->if_index, NULL); dlil_output(lo_ifp, PF_INET, copym, NULL, SA(dst), 0, NULL); @@ -3241,12 +3324,13 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope) VERIFY(src.s_addr != INADDR_ANY); if (ip_select_srcif_debug) { - (void) inet_ntop(AF_INET, &src.s_addr, s_src, sizeof (s_src)); - (void) inet_ntop(AF_INET, &dst.s_addr, s_dst, sizeof (s_dst)); + (void) inet_ntop(AF_INET, &src.s_addr, s_src, sizeof(s_src)); + (void) inet_ntop(AF_INET, &dst.s_addr, s_dst, sizeof(s_dst)); } - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_LOCK(ro->ro_rt); + } rt_ifp = (ro->ro_rt != NULL) ? ro->ro_rt->rt_ifp : NULL; @@ -3272,8 +3356,9 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope) if (scope == IFSCOPE_NONE) { scope = rt_ifp->if_index; if (scope != get_primary_ifscope(AF_INET) && - ROUTE_UNUSABLE(ro)) + ROUTE_UNUSABLE(ro)) { scope = get_primary_ifscope(AF_INET); + } } ifa = (struct ifaddr *)ifa_foraddr_scoped(src.s_addr, scope); @@ -3332,9 +3417,9 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope) struct sockaddr_in sin; struct ifaddr *oifa = NULL; - bzero(&sin, sizeof (sin)); + bzero(&sin, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_len = sizeof (sin); + sin.sin_len = sizeof(sin); sin.sin_addr = dst; lck_mtx_lock(rnh_lock); @@ -3368,7 +3453,7 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope) * address of the packet. */ iifa = (struct ifaddr *)ifa_foraddr_scoped( - src.s_addr, ifa->ifa_ifp->if_index); + src.s_addr, ifa->ifa_ifp->if_index); if (iifa != NULL) { /* @@ -3424,8 +3509,9 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope) } } - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_LOCK_ASSERT_HELD(ro->ro_rt); + } /* * If there is a non-loopback route with the wrong interface, or if * there is no interface configured with such an address, blow it @@ -3484,25 +3570,28 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope) if (ro->ro_rt != NULL && (!IN_LINKLOCAL(ntohl(dst.s_addr)) || (ro->ro_rt->rt_gateway->sa_family == AF_LINK && SDL(ro->ro_rt->rt_gateway)->sdl_alen != 0))) { - if (ifa != NULL) - IFA_ADDREF(ifa); /* for route */ - if (ro->ro_srcia != NULL) + if (ifa != NULL) { + IFA_ADDREF(ifa); /* for route */ + } + if (ro->ro_srcia != NULL) { IFA_REMREF(ro->ro_srcia); + } ro->ro_srcia = ifa; ro->ro_flags |= ROF_SRCIF_SELECTED; RT_GENID_SYNC(ro->ro_rt); } - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } - return (ifa); + return ifa; } /* * @brief Given outgoing interface it determines what checksum needs - * to be computed in software and what needs to be offloaded to the - * interface. + * to be computed in software and what needs to be offloaded to the + * interface. * * @param ifp Pointer to the outgoing interface * @param m Pointer to the packet @@ -3531,7 +3620,7 @@ ip_output_checksum(struct ifnet *ifp, struct mbuf *m, int hlen, int ip_len, ~IF_HWASSIST_CSUM_FLAGS(hwcap); } - if (hlen != sizeof (struct ip)) { + if (hlen != sizeof(struct ip)) { *sw_csum |= ((CSUM_DELAY_DATA | CSUM_DELAY_IP) & m->m_pkthdr.csum_flags); } else if (!(*sw_csum & CSUM_DELAY_DATA) && (hwcap & CSUM_PARTIAL)) { @@ -3554,7 +3643,7 @@ ip_output_checksum(struct ifnet *ifp, struct mbuf *m, int hlen, int ip_len, ((hwcap & CSUM_ZERO_INVERT) && (m->m_pkthdr.csum_flags & CSUM_ZERO_INVERT))) && ip_len <= interface_mtu) { - uint16_t start = sizeof (struct ip); + uint16_t start = sizeof(struct ip); uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff; m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PARTIAL); @@ -3596,13 +3685,13 @@ ip_gre_output(struct mbuf *m) struct route ro; int error; - bzero(&ro, sizeof (ro)); + bzero(&ro, sizeof(ro)); error = ip_output(m, NULL, &ro, 0, NULL, NULL); ROUTE_RELEASE(&ro); - return (error); + return error; } static int @@ -3613,8 +3702,9 @@ sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS i = ip_output_measure; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < 0 || i > 1) { error = EINVAL; @@ -3625,7 +3715,7 @@ sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS } ip_output_measure = i; done: - return (error); + return error; } static int @@ -3637,8 +3727,9 @@ sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS i = ip_output_measure_bins; error = sysctl_handle_quad(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* validate data */ if (!net_perf_validate_bins(i)) { error = EINVAL; @@ -3646,15 +3737,16 @@ sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS } ip_output_measure_bins = i; done: - return (error); + return error; } static int sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct ipstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct ipstat); + } - return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen))); + return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen)); } diff --git a/bsd/netinet/ip_var.h b/bsd/netinet/ip_var.h index 87985cc1c..9c2f73035 100644 --- a/bsd/netinet/ip_var.h +++ b/bsd/netinet/ip_var.h @@ -67,18 +67,18 @@ */ #ifndef _NETINET_IP_VAR_H_ -#define _NETINET_IP_VAR_H_ +#define _NETINET_IP_VAR_H_ #include /* * Overlay for ip header used by other protocols (tcp, udp). */ struct ipovly { - u_char ih_x1[9]; /* (unused) */ - u_char ih_pr; /* protocol */ - u_short ih_len; /* protocol length */ - struct in_addr ih_src; /* source internet address */ - struct in_addr ih_dst; /* destination internet address */ + u_char ih_x1[9]; /* (unused) */ + u_char ih_pr; /* protocol */ + u_short ih_len; /* protocol length */ + struct in_addr ih_src; /* source internet address */ + struct in_addr ih_dst; /* destination internet address */ }; #ifdef BSD_KERNEL_PRIVATE @@ -92,25 +92,25 @@ struct label; * be reclaimed if memory becomes tight. */ struct ipq { - TAILQ_ENTRY(ipq) ipq_list; /* to other reass headers */ - struct mbuf *ipq_frags; /* to ip headers of fragments */ + TAILQ_ENTRY(ipq) ipq_list; /* to other reass headers */ + struct mbuf *ipq_frags; /* to ip headers of fragments */ #if CONFIG_MACF_NET - struct label *ipq_label; /* MAC label */ + struct label *ipq_label; /* MAC label */ #endif /* CONFIG_MACF_NET */ - u_char ipq_ttl; /* time for reass q to live */ - u_char ipq_p; /* protocol of this fragment */ - u_short ipq_id; /* sequence id for reassembly */ - struct in_addr ipq_src, ipq_dst; - u_int32_t ipq_nfrags; /* # frags in this packet */ - uint32_t ipq_csum_flags; /* checksum flags */ - uint32_t ipq_csum; /* partial checksum value */ + u_char ipq_ttl; /* time for reass q to live */ + u_char ipq_p; /* protocol of this fragment */ + u_short ipq_id; /* sequence id for reassembly */ + struct in_addr ipq_src, ipq_dst; + u_int32_t ipq_nfrags; /* # frags in this packet */ + uint32_t ipq_csum_flags; /* checksum flags */ + uint32_t ipq_csum; /* partial checksum value */ #if IPDIVERT #ifdef IPDIVERT_44 - u_int32_t ipq_div_info; /* ipfw divert port & flags */ + u_int32_t ipq_div_info; /* ipfw divert port & flags */ #else /* !IPDIVERT_44 */ - u_int16_t ipq_divert; /* ipfw divert port (legacy) */ + u_int16_t ipq_divert; /* ipfw divert port (legacy) */ #endif /* !IPDIVERT_44 */ - u_int16_t ipq_div_cookie; /* ipfw divert cookie */ + u_int16_t ipq_div_cookie; /* ipfw divert cookie */ #endif /* IPDIVERT */ }; @@ -121,11 +121,11 @@ struct ipq { * is in m_len. */ #endif /* BSD_KERNEL_PRIVATE */ -#define MAX_IPOPTLEN 40 +#define MAX_IPOPTLEN 40 #ifdef BSD_KERNEL_PRIVATE struct ipoption { - struct in_addr ipopt_dst; /* first-hop dst if source routed */ - char ipopt_list[MAX_IPOPTLEN]; /* options proper */ + struct in_addr ipopt_dst; /* first-hop dst if source routed */ + char ipopt_list[MAX_IPOPTLEN]; /* options proper */ }; /* @@ -134,108 +134,108 @@ struct ipoption { */ struct ip_moptions { decl_lck_mtx_data(, imo_lock); - uint32_t imo_refcnt; /* ref count */ - uint32_t imo_debug; /* see ifa_debug flags */ - struct ifnet *imo_multicast_ifp; /* ifp for outgoing multicasts */ - u_char imo_multicast_ttl; /* TTL for outgoing multicasts */ - u_char imo_multicast_loop; /* 1 => hear sends if a member */ - u_short imo_num_memberships; /* no. memberships this socket */ - u_short imo_max_memberships; /* max memberships this socket */ - struct in_multi **imo_membership; /* group memberships */ - struct in_mfilter *imo_mfilters; /* source filters */ - u_int32_t imo_multicast_vif; /* vif num outgoing multicasts */ - struct in_addr imo_multicast_addr; /* ifindex/addr on MULTICAST_IF */ - void (*imo_trace) /* callback fn for tracing refs */ - (struct ip_moptions *, int); + uint32_t imo_refcnt; /* ref count */ + uint32_t imo_debug; /* see ifa_debug flags */ + struct ifnet *imo_multicast_ifp; /* ifp for outgoing multicasts */ + u_char imo_multicast_ttl; /* TTL for outgoing multicasts */ + u_char imo_multicast_loop; /* 1 => hear sends if a member */ + u_short imo_num_memberships; /* no. memberships this socket */ + u_short imo_max_memberships; /* max memberships this socket */ + struct in_multi **imo_membership; /* group memberships */ + struct in_mfilter *imo_mfilters; /* source filters */ + u_int32_t imo_multicast_vif; /* vif num outgoing multicasts */ + struct in_addr imo_multicast_addr; /* ifindex/addr on MULTICAST_IF */ + void (*imo_trace) /* callback fn for tracing refs */ + (struct ip_moptions *, int); }; -#define IMO_LOCK_ASSERT_HELD(_imo) \ +#define IMO_LOCK_ASSERT_HELD(_imo) \ LCK_MTX_ASSERT(&(_imo)->imo_lock, LCK_MTX_ASSERT_OWNED) -#define IMO_LOCK_ASSERT_NOTHELD(_imo) \ +#define IMO_LOCK_ASSERT_NOTHELD(_imo) \ LCK_MTX_ASSERT(&(_imo)->imo_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IMO_LOCK(_imo) \ +#define IMO_LOCK(_imo) \ lck_mtx_lock(&(_imo)->imo_lock) -#define IMO_LOCK_SPIN(_imo) \ +#define IMO_LOCK_SPIN(_imo) \ lck_mtx_lock_spin(&(_imo)->imo_lock) -#define IMO_CONVERT_LOCK(_imo) do { \ - IMO_LOCK_ASSERT_HELD(_imo); \ - lck_mtx_convert_spin(&(_imo)->imo_lock); \ +#define IMO_CONVERT_LOCK(_imo) do { \ + IMO_LOCK_ASSERT_HELD(_imo); \ + lck_mtx_convert_spin(&(_imo)->imo_lock); \ } while (0) -#define IMO_UNLOCK(_imo) \ +#define IMO_UNLOCK(_imo) \ lck_mtx_unlock(&(_imo)->imo_lock) -#define IMO_ADDREF(_imo) \ +#define IMO_ADDREF(_imo) \ imo_addref(_imo, 0) -#define IMO_ADDREF_LOCKED(_imo) \ +#define IMO_ADDREF_LOCKED(_imo) \ imo_addref(_imo, 1) -#define IMO_REMREF(_imo) \ +#define IMO_REMREF(_imo) \ imo_remref(_imo) /* mbuf tag for ip_forwarding info */ struct ip_fwd_tag { - struct sockaddr_in *next_hop; /* next_hop */ + struct sockaddr_in *next_hop; /* next_hop */ }; #endif /* BSD_KERNEL_PRIVATE */ -struct ipstat { - u_int32_t ips_total; /* total packets received */ - u_int32_t ips_badsum; /* checksum bad */ - u_int32_t ips_tooshort; /* packet too short */ - u_int32_t ips_toosmall; /* not enough data */ - u_int32_t ips_badhlen; /* ip header length < data size */ - u_int32_t ips_badlen; /* ip length < ip header length */ - u_int32_t ips_fragments; /* fragments received */ - u_int32_t ips_fragdropped; /* frags dropped (dups, out of space) */ - u_int32_t ips_fragtimeout; /* fragments timed out */ - u_int32_t ips_forward; /* packets forwarded */ - u_int32_t ips_fastforward; /* packets fast forwarded */ - u_int32_t ips_cantforward; /* packets rcvd for unreachable dest */ - u_int32_t ips_redirectsent; /* packets forwarded on same net */ - u_int32_t ips_noproto; /* unknown or unsupported protocol */ - u_int32_t ips_delivered; /* datagrams delivered to upper level */ - u_int32_t ips_localout; /* total ip packets generated here */ - u_int32_t ips_odropped; /* lost packets due to nobufs, etc. */ - u_int32_t ips_reassembled; /* total packets reassembled ok */ - u_int32_t ips_fragmented; /* datagrams successfully fragmented */ - u_int32_t ips_ofragments; /* output fragments created */ - u_int32_t ips_cantfrag; /* don't fragment flag was set, etc. */ - u_int32_t ips_badoptions; /* error in option processing */ - u_int32_t ips_noroute; /* packets discarded due to no route */ - u_int32_t ips_badvers; /* ip version != 4 */ - u_int32_t ips_rawout; /* total raw ip packets generated */ - u_int32_t ips_toolong; /* ip length > max ip packet size */ - u_int32_t ips_notmember; /* multicasts for unregistered grps */ - u_int32_t ips_nogif; /* no match gif found */ - u_int32_t ips_badaddr; /* invalid address on header */ - u_int32_t ips_pktdropcntrl; /* pkt dropped, no mbufs for ctl data */ - u_int32_t ips_rcv_swcsum; /* ip hdr swcksum (inbound), packets */ - u_int32_t ips_rcv_swcsum_bytes; /* ip hdr swcksum (inbound), bytes */ - u_int32_t ips_snd_swcsum; /* ip hdr swcksum (outbound), packets */ - u_int32_t ips_snd_swcsum_bytes; /* ip hdr swcksum (outbound), bytes */ - u_int32_t ips_adj; /* total packets trimmed/adjusted */ - u_int32_t ips_adj_hwcsum_clr; /* hwcksum discarded during adj */ - u_int32_t ips_rxc_collisions; /* rx chaining collisions */ - u_int32_t ips_rxc_chained; /* rx chains */ - u_int32_t ips_rxc_notchain; /* rx bypassed chaining */ - u_int32_t ips_rxc_chainsz_gt2; /* rx chain size greater than 2 */ +struct ipstat { + u_int32_t ips_total; /* total packets received */ + u_int32_t ips_badsum; /* checksum bad */ + u_int32_t ips_tooshort; /* packet too short */ + u_int32_t ips_toosmall; /* not enough data */ + u_int32_t ips_badhlen; /* ip header length < data size */ + u_int32_t ips_badlen; /* ip length < ip header length */ + u_int32_t ips_fragments; /* fragments received */ + u_int32_t ips_fragdropped; /* frags dropped (dups, out of space) */ + u_int32_t ips_fragtimeout; /* fragments timed out */ + u_int32_t ips_forward; /* packets forwarded */ + u_int32_t ips_fastforward; /* packets fast forwarded */ + u_int32_t ips_cantforward; /* packets rcvd for unreachable dest */ + u_int32_t ips_redirectsent; /* packets forwarded on same net */ + u_int32_t ips_noproto; /* unknown or unsupported protocol */ + u_int32_t ips_delivered; /* datagrams delivered to upper level */ + u_int32_t ips_localout; /* total ip packets generated here */ + u_int32_t ips_odropped; /* lost packets due to nobufs, etc. */ + u_int32_t ips_reassembled; /* total packets reassembled ok */ + u_int32_t ips_fragmented; /* datagrams successfully fragmented */ + u_int32_t ips_ofragments; /* output fragments created */ + u_int32_t ips_cantfrag; /* don't fragment flag was set, etc. */ + u_int32_t ips_badoptions; /* error in option processing */ + u_int32_t ips_noroute; /* packets discarded due to no route */ + u_int32_t ips_badvers; /* ip version != 4 */ + u_int32_t ips_rawout; /* total raw ip packets generated */ + u_int32_t ips_toolong; /* ip length > max ip packet size */ + u_int32_t ips_notmember; /* multicasts for unregistered grps */ + u_int32_t ips_nogif; /* no match gif found */ + u_int32_t ips_badaddr; /* invalid address on header */ + u_int32_t ips_pktdropcntrl; /* pkt dropped, no mbufs for ctl data */ + u_int32_t ips_rcv_swcsum; /* ip hdr swcksum (inbound), packets */ + u_int32_t ips_rcv_swcsum_bytes; /* ip hdr swcksum (inbound), bytes */ + u_int32_t ips_snd_swcsum; /* ip hdr swcksum (outbound), packets */ + u_int32_t ips_snd_swcsum_bytes; /* ip hdr swcksum (outbound), bytes */ + u_int32_t ips_adj; /* total packets trimmed/adjusted */ + u_int32_t ips_adj_hwcsum_clr; /* hwcksum discarded during adj */ + u_int32_t ips_rxc_collisions; /* rx chaining collisions */ + u_int32_t ips_rxc_chained; /* rx chains */ + u_int32_t ips_rxc_notchain; /* rx bypassed chaining */ + u_int32_t ips_rxc_chainsz_gt2; /* rx chain size greater than 2 */ u_int32_t ips_rxc_chainsz_gt4; /* rx chain size greater than 4 */ - u_int32_t ips_rxc_notlist; /* count of pkts through ip_input */ - u_int32_t ips_raw_sappend_fail; /* sock append failed */ + u_int32_t ips_rxc_notlist; /* count of pkts through ip_input */ + u_int32_t ips_raw_sappend_fail; /* sock append failed */ u_int32_t ips_necp_policy_drop; /* NECP policy related drop */ }; struct ip_linklocal_stat { - u_int32_t iplls_in_total; - u_int32_t iplls_in_badttl; - u_int32_t iplls_out_total; - u_int32_t iplls_out_badttl; + u_int32_t iplls_in_total; + u_int32_t iplls_in_badttl; + u_int32_t iplls_out_total; + u_int32_t iplls_out_badttl; }; #ifdef KERNEL_PRIVATE @@ -246,15 +246,15 @@ struct ip_moptions; #ifdef BSD_KERNEL_PRIVATE /* flags passed to ip_output as last parameter */ -#define IP_FORWARDING 0x1 /* most of ip header exists */ -#define IP_RAWOUTPUT 0x2 /* raw ip header exists */ -#define IP_NOIPSEC 0x4 /* No IPSec processing */ -#define IP_ROUTETOIF SO_DONTROUTE /* bypass routing tables (0x0010) */ -#define IP_ALLOWBROADCAST SO_BROADCAST /* can send broadcast pkts (0x0020) */ -#define IP_OUTARGS 0x100 /* has ancillary output info */ +#define IP_FORWARDING 0x1 /* most of ip header exists */ +#define IP_RAWOUTPUT 0x2 /* raw ip header exists */ +#define IP_NOIPSEC 0x4 /* No IPSec processing */ +#define IP_ROUTETOIF SO_DONTROUTE /* bypass routing tables (0x0010) */ +#define IP_ALLOWBROADCAST SO_BROADCAST /* can send broadcast pkts (0x0020) */ +#define IP_OUTARGS 0x100 /* has ancillary output info */ -#define IP_HDR_ALIGNED_P(_ip) ((((uintptr_t)(_ip)) & ((uintptr_t)3)) == 0) -#define IP_OFF_IS_ATOMIC(_ip_off) ((_ip_off & (IP_DF | IP_MF | IP_OFFMASK)) == IP_DF) +#define IP_HDR_ALIGNED_P(_ip) ((((uintptr_t)(_ip)) & ((uintptr_t)3)) == 0) +#define IP_OFF_IS_ATOMIC(_ip_off) ((_ip_off & (IP_DF | IP_MF | IP_OFFMASK)) == IP_DF) /* * On platforms which require strict alignment (currently for anything but @@ -262,13 +262,13 @@ struct ip_moptions; * is 32-bit aligned, and assert otherwise. */ #if defined(__i386__) || defined(__x86_64__) -#define IP_HDR_STRICT_ALIGNMENT_CHECK(_ip) do { } while (0) +#define IP_HDR_STRICT_ALIGNMENT_CHECK(_ip) do { } while (0) #else /* !__i386__ && !__x86_64__ */ -#define IP_HDR_STRICT_ALIGNMENT_CHECK(_ip) do { \ - if (!IP_HDR_ALIGNED_P(_ip)) { \ - panic_plain("\n%s: Unaligned IP header %p\n", \ - __func__, _ip); \ - } \ +#define IP_HDR_STRICT_ALIGNMENT_CHECK(_ip) do { \ + if (!IP_HDR_ALIGNED_P(_ip)) { \ + panic_plain("\n%s: Unaligned IP header %p\n", \ + __func__, _ip); \ + } \ } while (0) #endif /* !__i386__ && !__x86_64__ */ @@ -286,28 +286,28 @@ struct sockopt; * ipoa_retflags any additional information regarding the error. */ struct ip_out_args { - unsigned int ipoa_boundif; /* boundif interface index */ - struct flowadv ipoa_flowadv; /* flow advisory code */ - u_int32_t ipoa_flags; /* IPOAF output flags (see below) */ -#define IPOAF_SELECT_SRCIF 0x00000001 /* src interface selection */ -#define IPOAF_BOUND_IF 0x00000002 /* boundif value is valid */ -#define IPOAF_BOUND_SRCADDR 0x00000004 /* bound to src address */ -#define IPOAF_NO_CELLULAR 0x00000010 /* skip IFT_CELLULAR */ -#define IPOAF_NO_EXPENSIVE 0x00000020 /* skip IFT_EXPENSIVE */ -#define IPOAF_AWDL_UNRESTRICTED 0x00000040 /* can send over - AWDL_RESTRICTED */ -#define IPOAF_QOSMARKING_ALLOWED 0x00000080 /* policy allows Fastlane DSCP marking */ - u_int32_t ipoa_retflags; /* IPOARF return flags (see below) */ -#define IPOARF_IFDENIED 0x00000001 /* denied access to interface */ - int ipoa_sotc; /* traffic class for Fastlane DSCP mapping */ - int ipoa_netsvctype; /* network service type */ + unsigned int ipoa_boundif; /* boundif interface index */ + struct flowadv ipoa_flowadv; /* flow advisory code */ + u_int32_t ipoa_flags; /* IPOAF output flags (see below) */ +#define IPOAF_SELECT_SRCIF 0x00000001 /* src interface selection */ +#define IPOAF_BOUND_IF 0x00000002 /* boundif value is valid */ +#define IPOAF_BOUND_SRCADDR 0x00000004 /* bound to src address */ +#define IPOAF_NO_CELLULAR 0x00000010 /* skip IFT_CELLULAR */ +#define IPOAF_NO_EXPENSIVE 0x00000020 /* skip IFT_EXPENSIVE */ +#define IPOAF_AWDL_UNRESTRICTED 0x00000040 /* can send over + * AWDL_RESTRICTED */ +#define IPOAF_QOSMARKING_ALLOWED 0x00000080 /* policy allows Fastlane DSCP marking */ + u_int32_t ipoa_retflags; /* IPOARF return flags (see below) */ +#define IPOARF_IFDENIED 0x00000001 /* denied access to interface */ + int ipoa_sotc; /* traffic class for Fastlane DSCP mapping */ + int ipoa_netsvctype; /* network service type */ }; extern struct ipstat ipstat; extern int ip_use_randomid; -extern u_short ip_id; /* ip packet ctr, for ids */ -extern int ip_defttl; /* default IP ttl */ -extern int ipforwarding; /* ip forwarding */ +extern u_short ip_id; /* ip packet ctr, for ids */ +extern int ip_defttl; /* default IP ttl */ +extern int ipforwarding; /* ip forwarding */ extern int rfc6864; extern struct protosw *ip_protox[]; extern struct pr_usrreqs rip_usrreqs; diff --git a/bsd/netinet/isakmp.h b/bsd/netinet/isakmp.h index 299e90a66..ed9f40ad9 100644 --- a/bsd/netinet/isakmp.h +++ b/bsd/netinet/isakmp.h @@ -37,40 +37,40 @@ typedef u_char cookie_t[8]; typedef u_char msgid_t[4]; /* 3.1 ISAKMP Header Format (IKEv1 and IKEv2) - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - ! Initiator ! - ! Cookie ! + * ! Initiator ! + * ! Cookie ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - ! Responder ! - ! Cookie ! + * ! Responder ! + * ! Cookie ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - ! Next Payload ! MjVer ! MnVer ! Exchange Type ! Flags ! + * ! Next Payload ! MjVer ! MnVer ! Exchange Type ! Flags ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - ! Message ID ! + * ! Message ID ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - ! Length ! + * ! Length ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct isakmp { - cookie_t i_ck; /* Initiator Cookie */ - cookie_t r_ck; /* Responder Cookie */ - uint8_t np; /* Next Payload Type */ + cookie_t i_ck; /* Initiator Cookie */ + cookie_t r_ck; /* Responder Cookie */ + uint8_t np; /* Next Payload Type */ uint8_t vers; -#define ISAKMP_VERS_MAJOR 0xf0 -#define ISAKMP_VERS_MAJOR_SHIFT 4 -#define ISAKMP_VERS_MINOR 0x0f -#define ISAKMP_VERS_MINOR_SHIFT 0 - uint8_t etype; /* Exchange Type */ - uint8_t flags; /* Flags */ +#define ISAKMP_VERS_MAJOR 0xf0 +#define ISAKMP_VERS_MAJOR_SHIFT 4 +#define ISAKMP_VERS_MINOR 0x0f +#define ISAKMP_VERS_MINOR_SHIFT 0 + uint8_t etype; /* Exchange Type */ + uint8_t flags; /* Flags */ msgid_t msgid; - uint32_t len; /* Length */ + uint32_t len; /* Length */ }; /* 3.2 Payload Generic Header - 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - ! Next Payload ! RESERVED ! Payload Length ! + * ! Next Payload ! RESERVED ! Payload Length ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct isakmp_gen { diff --git a/bsd/netinet/kpi_ipfilter.c b/bsd/netinet/kpi_ipfilter.c index ad5e5f664..07ac30572 100644 --- a/bsd/netinet/kpi_ipfilter.c +++ b/bsd/netinet/kpi_ipfilter.c @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include /* for definition of NULL */ +#include /* for definition of NULL */ #include #include #include @@ -36,7 +36,7 @@ #include -#define _IP_VHL +#define _IP_VHL #include #include #include @@ -67,9 +67,9 @@ static u_int32_t kipf_ref = 0; static u_int32_t kipf_delayed_remove = 0; u_int32_t kipf_count = 0; -__private_extern__ struct ipfilter_list ipv4_filters = TAILQ_HEAD_INITIALIZER(ipv4_filters); -__private_extern__ struct ipfilter_list ipv6_filters = TAILQ_HEAD_INITIALIZER(ipv6_filters); -__private_extern__ struct ipfilter_list tbr_filters = TAILQ_HEAD_INITIALIZER(tbr_filters); +__private_extern__ struct ipfilter_list ipv4_filters = TAILQ_HEAD_INITIALIZER(ipv4_filters); +__private_extern__ struct ipfilter_list ipv6_filters = TAILQ_HEAD_INITIALIZER(ipv6_filters); +__private_extern__ struct ipfilter_list tbr_filters = TAILQ_HEAD_INITIALIZER(tbr_filters); #undef ipf_addv4 #undef ipf_addv6 @@ -94,8 +94,9 @@ ipf_unref(void) { lck_mtx_lock(kipf_lock); - if (kipf_ref == 0) + if (kipf_ref == 0) { panic("ipf_unref: kipf_ref == 0\n"); + } kipf_ref--; if (kipf_ref == 0 && kipf_delayed_remove != 0) { @@ -116,8 +117,9 @@ ipf_unref(void) ipf_detach(cookie); lck_mtx_lock(kipf_lock); /* In case some filter got to run while we released the lock */ - if (kipf_ref != 0) + if (kipf_ref != 0) { break; + } } } } @@ -131,13 +133,15 @@ ipf_add( struct ipfilter_list *head, bool is_internal) { - struct ipfilter *new_filter; - if (filter->name == NULL || (filter->ipf_input == NULL && filter->ipf_output == NULL)) - return (EINVAL); + struct ipfilter *new_filter; + if (filter->name == NULL || (filter->ipf_input == NULL && filter->ipf_output == NULL)) { + return EINVAL; + } MALLOC(new_filter, struct ipfilter *, sizeof(*new_filter), M_IFADDR, M_WAITOK); - if (new_filter == NULL) - return (ENOMEM); + if (new_filter == NULL) { + return ENOMEM; + } lck_mtx_lock(kipf_lock); new_filter->ipf_filter = *filter; @@ -159,7 +163,7 @@ ipf_add( OSAddAtomic(1, &kipf_count); routegenid_update(); - return (0); + return 0; } errno_t @@ -167,7 +171,7 @@ ipf_addv4_internal( const struct ipf_filter *filter, ipfilter_t *filter_ref) { - return (ipf_add(filter, filter_ref, &ipv4_filters, true)); + return ipf_add(filter, filter_ref, &ipv4_filters, true); } errno_t @@ -175,7 +179,7 @@ ipf_addv4( const struct ipf_filter *filter, ipfilter_t *filter_ref) { - return (ipf_add(filter, filter_ref, &ipv4_filters, false)); + return ipf_add(filter, filter_ref, &ipv4_filters, false); } errno_t @@ -183,7 +187,7 @@ ipf_addv6_internal( const struct ipf_filter *filter, ipfilter_t *filter_ref) { - return (ipf_add(filter, filter_ref, &ipv6_filters, true)); + return ipf_add(filter, filter_ref, &ipv6_filters, true); } errno_t @@ -191,7 +195,7 @@ ipf_addv6( const struct ipf_filter *filter, ipfilter_t *filter_ref) { - return (ipf_add(filter, filter_ref, &ipv6_filters, false)); + return ipf_add(filter, filter_ref, &ipv6_filters, false); } static errno_t @@ -203,7 +207,7 @@ ipf_input_detached(void *cookie, mbuf_t *data, int offset, u_int8_t protocol) printf("ipf_input_detached\n"); #endif /* DEBUG */ - return (0); + return 0; } static errno_t @@ -215,18 +219,19 @@ ipf_output_detached(void *cookie, mbuf_t *data, ipf_pktopts_t options) printf("ipf_output_detached\n"); #endif /* DEBUG */ - return (0); + return 0; } errno_t ipf_remove( ipfilter_t filter_ref) { - struct ipfilter *match = (struct ipfilter *)filter_ref; + struct ipfilter *match = (struct ipfilter *)filter_ref; struct ipfilter_list *head; - if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters)) - return (EINVAL); + if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters)) { + return EINVAL; + } head = match->ipf_head; @@ -251,21 +256,21 @@ ipf_remove( TAILQ_REMOVE(head, match, ipf_link); lck_mtx_unlock(kipf_lock); - if (ipf_detach) + if (ipf_detach) { ipf_detach(cookie); + } FREE(match, M_IFADDR); /* This will force TCP to re-evaluate its use of TSO */ OSAddAtomic(-1, &kipf_count); routegenid_update(); - } - return (0); + return 0; } } lck_mtx_unlock(kipf_lock); - return (ENOENT); + return ENOENT; } int log_for_en1 = 0; @@ -279,7 +284,7 @@ ipf_inject_input( struct m_tag *mtag = 0; struct ip *ip = mtod(m, struct ip *); struct ip6_hdr *ip6; - u_int8_t vers; + u_int8_t vers; int hlen; errno_t error = 0; protocol_family_t proto; @@ -291,15 +296,15 @@ ipf_inject_input( vers = IP_VHL_V(ip->ip_vhl); switch (vers) { - case 4: - proto = PF_INET; - break; - case 6: - proto = PF_INET6; - break; - default: - error = ENOTSUP; - goto done; + case 4: + proto = PF_INET; + break; + case 6: + proto = PF_INET6; + break; + default: + error = ENOTSUP; + goto done; } if (filter_ref == 0 && m->m_pkthdr.rcvif == 0) { @@ -307,33 +312,33 @@ ipf_inject_input( * Search for interface with the local address */ switch (proto) { - case PF_INET: - pkt_dst = &ip->ip_dst; - lck_rw_lock_shared(in_ifaddr_rwlock); - TAILQ_FOREACH(ia, INADDR_HASH(pkt_dst->s_addr), ia_hash) { - if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst->s_addr) { - m->m_pkthdr.rcvif = ia->ia_ifp; - break; - } + case PF_INET: + pkt_dst = &ip->ip_dst; + lck_rw_lock_shared(in_ifaddr_rwlock); + TAILQ_FOREACH(ia, INADDR_HASH(pkt_dst->s_addr), ia_hash) { + if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst->s_addr) { + m->m_pkthdr.rcvif = ia->ia_ifp; + break; } - lck_rw_done(in_ifaddr_rwlock); - break; - - case PF_INET6: - ip6 = mtod(m, struct ip6_hdr *); - pkt_dst6.sin6_addr = ip6->ip6_dst; - lck_rw_lock_shared(&in6_ifaddr_rwlock); - for (ia6 = in6_ifaddrs; ia6 != NULL; ia6 = ia6->ia_next) { - if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &pkt_dst6.sin6_addr)) { - m->m_pkthdr.rcvif = ia6->ia_ifp; - break; - } + } + lck_rw_done(in_ifaddr_rwlock); + break; + + case PF_INET6: + ip6 = mtod(m, struct ip6_hdr *); + pkt_dst6.sin6_addr = ip6->ip6_dst; + lck_rw_lock_shared(&in6_ifaddr_rwlock); + for (ia6 = in6_ifaddrs; ia6 != NULL; ia6 = ia6->ia_next) { + if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &pkt_dst6.sin6_addr)) { + m->m_pkthdr.rcvif = ia6->ia_ifp; + break; } - lck_rw_done(&in6_ifaddr_rwlock); - break; + } + lck_rw_done(&in6_ifaddr_rwlock); + break; - default: - break; + default: + break; } /* @@ -353,19 +358,19 @@ ipf_inject_input( } if (filter_ref != 0) { mtag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT, - sizeof (ipfilter_t), M_NOWAIT, m); + sizeof(ipfilter_t), M_NOWAIT, m); if (mtag == NULL) { error = ENOMEM; goto done; } - *(ipfilter_t *)(mtag+1) = filter_ref; + *(ipfilter_t *)(mtag + 1) = filter_ref; m_tag_prepend(m, mtag); } error = proto_inject(proto, data); done: - return (error); + return error; } static errno_t @@ -385,19 +390,20 @@ ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; /* Make the IP header contiguous in the mbuf */ - if ((size_t)m->m_len < sizeof (struct ip)) { - m = m_pullup(m, sizeof (struct ip)); - if (m == NULL) - return (ENOMEM); + if ((size_t)m->m_len < sizeof(struct ip)) { + m = m_pullup(m, sizeof(struct ip)); + if (m == NULL) { + return ENOMEM; + } } ip = (struct ip *)m_mtod(m); if (filter_ref != 0) { mtag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFILT, sizeof (ipfilter_t), M_NOWAIT, m); + KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t), M_NOWAIT, m); if (mtag == NULL) { m_freem(m); - return (ENOMEM); + return ENOMEM; } *(ipfilter_t *)(mtag + 1) = filter_ref; m_tag_prepend(m, mtag); @@ -411,19 +417,23 @@ ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) } if (options != NULL) { - if (options->ippo_flags & IPPOF_SELECT_SRCIF) + if (options->ippo_flags & IPPOF_SELECT_SRCIF) { ipoa.ipoa_flags |= IPOAF_SELECT_SRCIF; + } if (options->ippo_flags & IPPOF_BOUND_IF) { ipoa.ipoa_flags |= IPOAF_BOUND_IF; ipoa.ipoa_boundif = options->ippo_flags >> IPPOF_SHIFT_IFSCOPE; } - if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) + if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) { ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; - if (options->ippo_flags & IPPOF_BOUND_SRCADDR) + } + if (options->ippo_flags & IPPOF_BOUND_SRCADDR) { ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR; - if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) + } + if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) { ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; + } } bzero(&ro, sizeof(struct route)); @@ -442,10 +452,11 @@ ipf_injectv4_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) /* Release the route */ ROUTE_RELEASE(&ro); - if (imo != NULL) + if (imo != NULL) { IMO_REMREF(imo); + } - return (error); + return error; } #if INET6 @@ -468,17 +479,18 @@ ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) /* Make the IP header contiguous in the mbuf */ if ((size_t)m->m_len < sizeof(struct ip6_hdr)) { m = m_pullup(m, sizeof(struct ip6_hdr)); - if (m == NULL) - return (ENOMEM); + if (m == NULL) { + return ENOMEM; + } } ip6 = (struct ip6_hdr *)m_mtod(m); if (filter_ref != 0) { mtag = m_tag_create(KERNEL_MODULE_TAG_ID, - KERNEL_TAG_TYPE_IPFILT, sizeof (ipfilter_t), M_NOWAIT, m); + KERNEL_TAG_TYPE_IPFILT, sizeof(ipfilter_t), M_NOWAIT, m); if (mtag == NULL) { m_freem(m); - return (ENOMEM); + return ENOMEM; } *(ipfilter_t *)(mtag + 1) = filter_ref; m_tag_prepend(m, mtag); @@ -492,19 +504,23 @@ ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) } if (options != NULL) { - if (options->ippo_flags & IPPOF_SELECT_SRCIF) + if (options->ippo_flags & IPPOF_SELECT_SRCIF) { ip6oa.ip6oa_flags |= IP6OAF_SELECT_SRCIF; + } if (options->ippo_flags & IPPOF_BOUND_IF) { ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; ip6oa.ip6oa_boundif = options->ippo_flags >> IPPOF_SHIFT_IFSCOPE; } - if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) + if (options->ippo_flags & IPPOF_NO_IFT_CELLULAR) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - if (options->ippo_flags & IPPOF_BOUND_SRCADDR) + } + if (options->ippo_flags & IPPOF_BOUND_SRCADDR) { ip6oa.ip6oa_flags |= IP6OAF_BOUND_SRCADDR; - if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) + } + if (options->ippo_flags & IPPOF_NO_IFF_EXPENSIVE) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; + } } bzero(&ro, sizeof(struct route_in6)); @@ -519,10 +535,11 @@ ipf_injectv6_out(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options) /* Release the route */ ROUTE_RELEASE(&ro); - if (im6o != NULL) + if (im6o != NULL) { IM6O_REMREF(im6o); + } - return (error); + return error; } #endif /* INET6 */ @@ -532,35 +549,36 @@ ipf_inject_output( ipfilter_t filter_ref, ipf_pktopts_t options) { - struct mbuf *m = (struct mbuf *)data; - u_int8_t vers; - errno_t error = 0; + struct mbuf *m = (struct mbuf *)data; + u_int8_t vers; + errno_t error = 0; /* Make one byte of the header contiguous in the mbuf */ if (m->m_len < 1) { m = m_pullup(m, 1); - if (m == NULL) + if (m == NULL) { goto done; + } } vers = (*(u_int8_t *)m_mtod(m)) >> 4; switch (vers) { - case 4: - error = ipf_injectv4_out(data, filter_ref, options); - break; + case 4: + error = ipf_injectv4_out(data, filter_ref, options); + break; #if INET6 - case 6: - error = ipf_injectv6_out(data, filter_ref, options); - break; + case 6: + error = ipf_injectv6_out(data, filter_ref, options); + break; #endif - default: - m_freem(m); - error = ENOTSUP; - break; + default: + m_freem(m); + error = ENOTSUP; + break; } done: - return (error); + return error; } __private_extern__ ipfilter_t @@ -571,11 +589,11 @@ ipf_get_inject_filter(struct mbuf *m) mtag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFILT, NULL); if (mtag) { - filter_ref = *(ipfilter_t *)(mtag+1); + filter_ref = *(ipfilter_t *)(mtag + 1); m_tag_delete(m, mtag); } - return (filter_ref); + return filter_ref; } __private_extern__ int @@ -609,7 +627,7 @@ ipf_init(void) lck_mtx_init(kipf_lock, lck_grp, lck_attributes); - done: +done: if (lck_grp) { lck_grp_free(lck_grp); lck_grp = 0; @@ -623,5 +641,5 @@ ipf_init(void) lck_attributes = 0; } - return (error); + return error; } diff --git a/bsd/netinet/kpi_ipfilter.h b/bsd/netinet/kpi_ipfilter.h index ae9adf13d..1739bc708 100644 --- a/bsd/netinet/kpi_ipfilter.h +++ b/bsd/netinet/kpi_ipfilter.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_ipfilter.h - This header defines an API to attach IP filters. IP filters may be - attached to intercept either IPv4 or IPv6 packets. The filters can - intercept all IP packets in to and out of the host regardless of - interface. + * @header kpi_ipfilter.h + * This header defines an API to attach IP filters. IP filters may be + * attached to intercept either IPv4 or IPv6 packets. The filters can + * intercept all IP packets in to and out of the host regardless of + * interface. */ #ifndef __KPI_IPFILTER__ @@ -41,23 +41,23 @@ /* * ipf_pktopts * - * Options for outgoing packets. The options need to be preserved when + * Options for outgoing packets. The options need to be preserved when * re-injecting a packet. */ struct ipf_pktopts { - u_int32_t ippo_flags; - ifnet_t ippo_mcast_ifnet; - int ippo_mcast_loop; - u_int8_t ippo_mcast_ttl; + u_int32_t ippo_flags; + ifnet_t ippo_mcast_ifnet; + int ippo_mcast_loop; + u_int8_t ippo_mcast_ttl; }; -#define IPPOF_MCAST_OPTS 0x1 +#define IPPOF_MCAST_OPTS 0x1 #ifdef PRIVATE -#define IPPOF_BOUND_IF 0x2 -#define IPPOF_NO_IFT_CELLULAR 0x4 -#define IPPOF_SELECT_SRCIF 0x8 -#define IPPOF_BOUND_SRCADDR 0x10 -#define IPPOF_SHIFT_IFSCOPE 16 -#define IPPOF_NO_IFF_EXPENSIVE 0x20 +#define IPPOF_BOUND_IF 0x2 +#define IPPOF_NO_IFT_CELLULAR 0x4 +#define IPPOF_SELECT_SRCIF 0x8 +#define IPPOF_BOUND_SRCADDR 0x10 +#define IPPOF_SHIFT_IFSCOPE 16 +#define IPPOF_NO_IFF_EXPENSIVE 0x20 #endif /* PRIVATE */ typedef struct ipf_pktopts *ipf_pktopts_t; @@ -65,96 +65,96 @@ typedef struct ipf_pktopts *ipf_pktopts_t; __BEGIN_DECLS /*! - @typedef ipf_input_func - - @discussion ipf_input_func is used to filter incoming ip packets. - The IP filter is called for packets from all interfaces. The - filter is called between when the general IP processing is - handled and when the packet is passed up to the next layer - protocol such as udp or tcp. In the case of encapsulation, such - as UDP in ESP (IPSec), your filter will be called once for ESP - and then again for UDP. This will give your filter an - opportunity to process the ESP header as well as the decrypted - packet. Offset and protocol are used to determine where in the - packet processing is currently occuring. If you're only - interested in TCP or UDP packets, just return 0 if protocol - doesn't match TCP or UDP. - @param cookie The cookie specified when your filter was attached. - @param data The reassembled ip packet, data will start at the ip - header. - @param offset An offset to the next header - (udp/tcp/icmp/esp/etc...). - @param protocol The protocol type (udp/tcp/icmp/etc...) of the IP packet - @result Return: - 0 - The caller will continue with normal processing of the - packet. - EJUSTRETURN - The caller will stop processing the packet, - the packet will not be freed. - Anything Else - The caller will free the packet and stop - processing. -*/ -typedef errno_t (*ipf_input_func)(void *cookie, mbuf_t *data, int offset, + * @typedef ipf_input_func + * + * @discussion ipf_input_func is used to filter incoming ip packets. + * The IP filter is called for packets from all interfaces. The + * filter is called between when the general IP processing is + * handled and when the packet is passed up to the next layer + * protocol such as udp or tcp. In the case of encapsulation, such + * as UDP in ESP (IPSec), your filter will be called once for ESP + * and then again for UDP. This will give your filter an + * opportunity to process the ESP header as well as the decrypted + * packet. Offset and protocol are used to determine where in the + * packet processing is currently occuring. If you're only + * interested in TCP or UDP packets, just return 0 if protocol + * doesn't match TCP or UDP. + * @param cookie The cookie specified when your filter was attached. + * @param data The reassembled ip packet, data will start at the ip + * header. + * @param offset An offset to the next header + * (udp/tcp/icmp/esp/etc...). + * @param protocol The protocol type (udp/tcp/icmp/etc...) of the IP packet + * @result Return: + * 0 - The caller will continue with normal processing of the + * packet. + * EJUSTRETURN - The caller will stop processing the packet, + * the packet will not be freed. + * Anything Else - The caller will free the packet and stop + * processing. + */ +typedef errno_t (*ipf_input_func)(void *cookie, mbuf_t *data, int offset, u_int8_t protocol); /*! - @typedef ipf_output_func - - @discussion ipf_output_func is used to filter outbound ip packets. - The IP filter is called for packets to all interfaces. The - filter is called before fragmentation and IPSec processing. If - you need to change the destination IP address, call - ipf_inject_output and return EJUSTRETURN. - @param cookie The cookie specified when your filter was attached. - @param data The ip packet, will contain an IP header followed by the - rest of the IP packet. - @result Return: - 0 - The caller will continue with normal processing of the - packet. - EJUSTRETURN - The caller will stop processing the packet, - the packet will not be freed. - Anything Else - The caller will free the packet and stop - processing. -*/ -typedef errno_t (*ipf_output_func)(void *cookie, mbuf_t *data, + * @typedef ipf_output_func + * + * @discussion ipf_output_func is used to filter outbound ip packets. + * The IP filter is called for packets to all interfaces. The + * filter is called before fragmentation and IPSec processing. If + * you need to change the destination IP address, call + * ipf_inject_output and return EJUSTRETURN. + * @param cookie The cookie specified when your filter was attached. + * @param data The ip packet, will contain an IP header followed by the + * rest of the IP packet. + * @result Return: + * 0 - The caller will continue with normal processing of the + * packet. + * EJUSTRETURN - The caller will stop processing the packet, + * the packet will not be freed. + * Anything Else - The caller will free the packet and stop + * processing. + */ +typedef errno_t (*ipf_output_func)(void *cookie, mbuf_t *data, ipf_pktopts_t options); /*! - @typedef ipf_detach_func - - @discussion ipf_detach_func is called to notify your filter that it - has been detached. - @param cookie The cookie specified when your filter was attached. -*/ -typedef void (*ipf_detach_func)(void *cookie); + * @typedef ipf_detach_func + * + * @discussion ipf_detach_func is called to notify your filter that it + * has been detached. + * @param cookie The cookie specified when your filter was attached. + */ +typedef void (*ipf_detach_func)(void *cookie); /*! - @typedef ipf_filter - @discussion This structure is used to define an IP filter for - use with the ipf_addv4 or ipf_addv6 function. - @field cookie A kext defined cookie that will be passed to all - filter functions. - @field name A filter name used for debugging purposes. - @field ipf_input The filter function to handle inbound packets. - @field ipf_output The filter function to handle outbound packets. - @field ipf_detach The filter function to notify of a detach. -*/ + * @typedef ipf_filter + * @discussion This structure is used to define an IP filter for + * use with the ipf_addv4 or ipf_addv6 function. + * @field cookie A kext defined cookie that will be passed to all + * filter functions. + * @field name A filter name used for debugging purposes. + * @field ipf_input The filter function to handle inbound packets. + * @field ipf_output The filter function to handle outbound packets. + * @field ipf_detach The filter function to notify of a detach. + */ struct ipf_filter { - void *cookie; - const char *name; - ipf_input_func ipf_input; - ipf_output_func ipf_output; - ipf_detach_func ipf_detach; + void *cookie; + const char *name; + ipf_input_func ipf_input; + ipf_output_func ipf_output; + ipf_detach_func ipf_detach; }; struct opaque_ipfilter; -typedef struct opaque_ipfilter *ipfilter_t; +typedef struct opaque_ipfilter *ipfilter_t; /*! - @function ipf_addv4 - @discussion Attaches an IPv4 ip filter. - @param filter A structure defining the filter. - @param filter_ref A reference to the filter used to detach it. - @result 0 on success otherwise the errno error. + * @function ipf_addv4 + * @discussion Attaches an IPv4 ip filter. + * @param filter A structure defining the filter. + * @param filter_ref A reference to the filter used to detach it. + * @result 0 on success otherwise the errno error. */ #ifdef KERNEL_PRIVATE extern errno_t ipf_addv4_internal(const struct ipf_filter *filter, @@ -168,11 +168,11 @@ extern errno_t ipf_addv4(const struct ipf_filter *filter, #endif /* KERNEL_PRIVATE */ /*! - @function ipf_addv6 - @discussion Attaches an IPv6 ip filter. - @param filter A structure defining the filter. - @param filter_ref A reference to the filter used to detach it. - @result 0 on success otherwise the errno error. + * @function ipf_addv6 + * @discussion Attaches an IPv6 ip filter. + * @param filter A structure defining the filter. + * @param filter_ref A reference to the filter used to detach it. + * @result 0 on success otherwise the errno error. */ #ifdef KERNEL_PRIVATE extern errno_t ipf_addv6_internal(const struct ipf_filter *filter, @@ -186,49 +186,49 @@ extern errno_t ipf_addv6(const struct ipf_filter *filter, #endif /* KERNEL_PRIVATE */ /*! - @function ipf_remove - @discussion Detaches an IPv4 or IPv6 filter. - @param filter_ref The reference to the filter returned from ipf_addv4 or - ipf_addv6. - @result 0 on success otherwise the errno error. + * @function ipf_remove + * @discussion Detaches an IPv4 or IPv6 filter. + * @param filter_ref The reference to the filter returned from ipf_addv4 or + * ipf_addv6. + * @result 0 on success otherwise the errno error. */ extern errno_t ipf_remove(ipfilter_t filter_ref); /*! - @function ipf_inject_input - @discussion Inject an IP packet as though it had just been - reassembled in ip_input. When re-injecting a packet intercepted - by the filter's ipf_input function, an IP filter can pass its - reference to avoid processing the packet twice. This also - prevents ip filters installed before this filter from - getting a chance to process the packet. If the filter modified - the packet, it should not specify the filter ref to give other - filters a chance to process the new packet. - - Caller is responsible for freeing mbuf chain in the event that - ipf_inject_input returns an error. - @param data The complete IPv4 or IPv6 packet, receive interface must - be set. - @param filter_ref The reference to the filter injecting the data - @result 0 on success otherwise the errno error. + * @function ipf_inject_input + * @discussion Inject an IP packet as though it had just been + * reassembled in ip_input. When re-injecting a packet intercepted + * by the filter's ipf_input function, an IP filter can pass its + * reference to avoid processing the packet twice. This also + * prevents ip filters installed before this filter from + * getting a chance to process the packet. If the filter modified + * the packet, it should not specify the filter ref to give other + * filters a chance to process the new packet. + * + * Caller is responsible for freeing mbuf chain in the event that + * ipf_inject_input returns an error. + * @param data The complete IPv4 or IPv6 packet, receive interface must + * be set. + * @param filter_ref The reference to the filter injecting the data + * @result 0 on success otherwise the errno error. */ extern errno_t ipf_inject_input(mbuf_t data, ipfilter_t filter_ref); /*! - @function ipf_inject_output - @discussion Inject an IP packet as though it had just been sent to - ip_output. When re-injecting a packet intercepted by the - filter's ipf_output function, an IP filter can pass its - reference to avoid processing the packet twice. This also - prevents ip filters installed before this filter from getting a - chance to process the packet. If the filter modified the packet, - it should not specify the filter ref to give other filters a - chance to process the new packet. - @param data The complete IPv4 or IPv6 packet. - @param filter_ref The reference to the filter injecting the data - @param options Output options for the packet - @result 0 on success otherwise the errno error. ipf_inject_output - will always free the mbuf. + * @function ipf_inject_output + * @discussion Inject an IP packet as though it had just been sent to + * ip_output. When re-injecting a packet intercepted by the + * filter's ipf_output function, an IP filter can pass its + * reference to avoid processing the packet twice. This also + * prevents ip filters installed before this filter from getting a + * chance to process the packet. If the filter modified the packet, + * it should not specify the filter ref to give other filters a + * chance to process the new packet. + * @param data The complete IPv4 or IPv6 packet. + * @param filter_ref The reference to the filter injecting the data + * @param options Output options for the packet + * @result 0 on success otherwise the errno error. ipf_inject_output + * will always free the mbuf. */ extern errno_t ipf_inject_output(mbuf_t data, ipfilter_t filter_ref, ipf_pktopts_t options); diff --git a/bsd/netinet/kpi_ipfilter_var.h b/bsd/netinet/kpi_ipfilter_var.h index 74f3e9bbc..d9c5f9f47 100644 --- a/bsd/netinet/kpi_ipfilter_var.h +++ b/bsd/netinet/kpi_ipfilter_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,16 +40,16 @@ #include struct ipfilter { - TAILQ_ENTRY(ipfilter) ipf_link; - struct ipf_filter ipf_filter; - struct ipfilter_list *ipf_head; - TAILQ_ENTRY(ipfilter) ipf_tbr; + TAILQ_ENTRY(ipfilter) ipf_link; + struct ipf_filter ipf_filter; + struct ipfilter_list *ipf_head; + TAILQ_ENTRY(ipfilter) ipf_tbr; }; TAILQ_HEAD(ipfilter_list, ipfilter); -extern struct ipfilter_list ipv6_filters; -extern struct ipfilter_list ipv4_filters; +extern struct ipfilter_list ipv6_filters; +extern struct ipfilter_list ipv4_filters; extern ipfilter_t ipf_get_inject_filter(struct mbuf *m); extern void ipf_ref(void); diff --git a/bsd/netinet/lro_ext.h b/bsd/netinet/lro_ext.h index 1dbf135fd..cabdfcd05 100644 --- a/bsd/netinet/lro_ext.h +++ b/bsd/netinet/lro_ext.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,11 +38,11 @@ extern int lrodebug; extern unsigned int coalesc_sz; /* flow return values */ -#define TCP_LRO_NAN 0x00 /* No flow exists */ -#define TCP_LRO_CONSUMED 0x01 /* LRO consumed the packet */ -#define TCP_LRO_EJECT_FLOW 0x02 /* LRO ejected the flow */ -#define TCP_LRO_COALESCE 0x03 /* LRO to coalesce the packet */ -#define TCP_LRO_COLLISION 0x04 /* Two flows map to the same slot */ +#define TCP_LRO_NAN 0x00 /* No flow exists */ +#define TCP_LRO_CONSUMED 0x01 /* LRO consumed the packet */ +#define TCP_LRO_EJECT_FLOW 0x02 /* LRO ejected the flow */ +#define TCP_LRO_COALESCE 0x03 /* LRO to coalesce the packet */ +#define TCP_LRO_COLLISION 0x04 /* Two flows map to the same slot */ void tcp_lro_init(void); @@ -53,12 +53,12 @@ struct mbuf* tcp_lro(struct mbuf *m, unsigned int hlen); int tcp_start_coalescing(struct ip *, struct tcphdr *, int tlen); /* TCP calls this to stop coalescing a flow */ -int tcp_lro_remove_state(struct in_addr, struct in_addr, unsigned short, - unsigned short); +int tcp_lro_remove_state(struct in_addr, struct in_addr, unsigned short, + unsigned short); /* TCP calls this to keep the seq number updated */ void tcp_update_lro_seq(__uint32_t, struct in_addr, struct in_addr, - unsigned short, unsigned short); + unsigned short, unsigned short); #endif diff --git a/bsd/netinet/mp_pcb.c b/bsd/netinet/mp_pcb.c index 254e2a167..6aabe6b8d 100644 --- a/bsd/netinet/mp_pcb.c +++ b/bsd/netinet/mp_pcb.c @@ -45,15 +45,15 @@ #include #include -static lck_grp_t *mp_lock_grp; -static lck_attr_t *mp_lock_attr; -static lck_grp_attr_t *mp_lock_grp_attr; -decl_lck_mtx_data(static, mp_lock); /* global MULTIPATH lock */ +static lck_grp_t *mp_lock_grp; +static lck_attr_t *mp_lock_attr; +static lck_grp_attr_t *mp_lock_grp_attr; +decl_lck_mtx_data(static, mp_lock); /* global MULTIPATH lock */ decl_lck_mtx_data(static, mp_timeout_lock); static TAILQ_HEAD(, mppcbinfo) mppi_head = TAILQ_HEAD_INITIALIZER(mppi_head); -static boolean_t mp_timeout_run; /* MP timer is scheduled to run */ +static boolean_t mp_timeout_run; /* MP timer is scheduled to run */ static boolean_t mp_garbage_collecting; static boolean_t mp_ticking; static void mp_sched_timeout(void); @@ -105,10 +105,12 @@ mp_timeout(void *arg) if ((gc && mppi->mppi_gc != NULL) || (t && mppi->mppi_timer != NULL)) { lck_mtx_lock(&mppi->mppi_lock); - if (gc && mppi->mppi_gc != NULL) + if (gc && mppi->mppi_gc != NULL) { gc_act += mppi->mppi_gc(mppi); - if (t && mppi->mppi_timer != NULL) + } + if (t && mppi->mppi_timer != NULL) { t_act += mppi->mppi_timer(mppi); + } lck_mtx_unlock(&mppi->mppi_lock); } } @@ -118,10 +120,12 @@ mp_timeout(void *arg) } /* lock was dropped above, so check first before overriding */ - if (!mp_garbage_collecting) + if (!mp_garbage_collecting) { mp_garbage_collecting = (gc_act != 0); - if (!mp_ticking) + } + if (!mp_ticking) { mp_ticking = (t_act != 0); + } /* re-arm the timer if there's work to do */ mp_timeout_run = FALSE; @@ -184,16 +188,18 @@ mp_pcbinfo_detach(struct mppcbinfo *mppi) lck_mtx_lock(&mp_lock); TAILQ_FOREACH(mppi0, &mppi_head, mppi_entry) { - if (mppi0 == mppi) + if (mppi0 == mppi) { break; + } } - if (mppi0 != NULL) + if (mppi0 != NULL) { TAILQ_REMOVE(&mppi_head, mppi0, mppi_entry); - else + } else { error = ENXIO; + } lck_mtx_unlock(&mp_lock); - return (error); + return error; } int @@ -206,7 +212,7 @@ mp_pcballoc(struct socket *so, struct mppcbinfo *mppi) mpp = zalloc(mppi->mppi_zone); if (mpp == NULL) { - return (ENOBUFS); + return ENOBUFS; } bzero(mpp, mppi->mppi_size); @@ -220,7 +226,7 @@ mp_pcballoc(struct socket *so, struct mppcbinfo *mppi) if (error) { lck_mtx_destroy(&mpp->mpp_lock, mppi->mppi_lock_grp); zfree(mppi->mppi_zone, mpp); - return (error); + return error; } lck_mtx_lock(&mppi->mppi_lock); @@ -229,7 +235,7 @@ mp_pcballoc(struct socket *so, struct mppcbinfo *mppi) mppi->mppi_count++; lck_mtx_unlock(&mppi->mppi_lock); - return (0); + return 0; } void @@ -238,8 +244,9 @@ mp_pcbdetach(struct socket *mp_so) struct mppcb *mpp = mpsotomppcb(mp_so); mpp->mpp_state = MPPCB_STATE_DEAD; - if (!(mp_so->so_flags & SOF_PCBCLEARING)) + if (!(mp_so->so_flags & SOF_PCBCLEARING)) { mp_so->so_flags |= SOF_PCBCLEARING; + } mp_gc_sched(); } @@ -287,12 +294,13 @@ mp_getaddr_v4(struct socket *mp_so, struct sockaddr **nam, boolean_t peer) /* * Do the malloc first in case it blocks. */ - MALLOC(sin, struct sockaddr_in *, sizeof (*sin), M_SONAME, M_WAITOK); - if (sin == NULL) - return (ENOBUFS); - bzero(sin, sizeof (*sin)); + MALLOC(sin, struct sockaddr_in *, sizeof(*sin), M_SONAME, M_WAITOK); + if (sin == NULL) { + return ENOBUFS; + } + bzero(sin, sizeof(*sin)); sin->sin_family = AF_INET; - sin->sin_len = sizeof (*sin); + sin->sin_len = sizeof(*sin); if (!peer) { sin->sin_port = mpte->__mpte_src_v4.sin_port; @@ -303,7 +311,7 @@ mp_getaddr_v4(struct socket *mp_so, struct sockaddr **nam, boolean_t peer) } *nam = (struct sockaddr *)sin; - return (0); + return 0; } static int @@ -322,10 +330,11 @@ mp_getaddr_v6(struct socket *mp_so, struct sockaddr **nam, boolean_t peer) } *nam = in6_sockaddr(port, &addr); - if (*nam == NULL) - return (ENOBUFS); + if (*nam == NULL) { + return ENOBUFS; + } - return (0); + return 0; } int @@ -333,12 +342,13 @@ mp_getsockaddr(struct socket *mp_so, struct sockaddr **nam) { struct mptses *mpte = mpsotompte(mp_so); - if (mpte->mpte_src.sa_family == AF_INET || mpte->mpte_src.sa_family == 0) + if (mpte->mpte_src.sa_family == AF_INET || mpte->mpte_src.sa_family == 0) { return mp_getaddr_v4(mp_so, nam, false); - else if (mpte->mpte_src.sa_family == AF_INET6) + } else if (mpte->mpte_src.sa_family == AF_INET6) { return mp_getaddr_v6(mp_so, nam, false); - else - return (EINVAL); + } else { + return EINVAL; + } } int @@ -346,10 +356,11 @@ mp_getpeeraddr(struct socket *mp_so, struct sockaddr **nam) { struct mptses *mpte = mpsotompte(mp_so); - if (mpte->mpte_src.sa_family == AF_INET || mpte->mpte_src.sa_family == 0) + if (mpte->mpte_src.sa_family == AF_INET || mpte->mpte_src.sa_family == 0) { return mp_getaddr_v4(mp_so, nam, true); - else if (mpte->mpte_src.sa_family == AF_INET6) + } else if (mpte->mpte_src.sa_family == AF_INET6) { return mp_getaddr_v6(mp_so, nam, true); - else - return (EINVAL); + } else { + return EINVAL; + } } diff --git a/bsd/netinet/mp_pcb.h b/bsd/netinet/mp_pcb.h index 5d1cd3ef0..0fc2a103d 100644 --- a/bsd/netinet/mp_pcb.h +++ b/bsd/netinet/mp_pcb.h @@ -27,7 +27,7 @@ */ #ifndef _NETINET_MP_PCB_H_ -#define _NETINET_MP_PCB_H_ +#define _NETINET_MP_PCB_H_ #ifdef BSD_KERNEL_PRIVATE #include @@ -39,21 +39,21 @@ /* Keep in sync with bsd/dev/dtrace/scripts/mptcp.d */ typedef enum mppcb_state { - MPPCB_STATE_INUSE = 1, - MPPCB_STATE_DEAD = 2, + MPPCB_STATE_INUSE = 1, + MPPCB_STATE_DEAD = 2, } mppcb_state_t; /* * Multipath Protocol Control Block */ struct mppcb { - TAILQ_ENTRY(mppcb) mpp_entry; /* glue to all PCBs */ - decl_lck_mtx_data(, mpp_lock); /* per PCB lock */ - struct mppcbinfo *mpp_pcbinfo; /* PCB info */ - struct mptses *mpp_pcbe; /* ptr to MPTCP-session */ - struct socket *mpp_socket; /* back pointer to socket */ - uint32_t mpp_flags; /* PCB flags */ - mppcb_state_t mpp_state; /* PCB state */ + TAILQ_ENTRY(mppcb) mpp_entry; /* glue to all PCBs */ + decl_lck_mtx_data(, mpp_lock); /* per PCB lock */ + struct mppcbinfo *mpp_pcbinfo; /* PCB info */ + struct mptses *mpp_pcbe; /* ptr to MPTCP-session */ + struct socket *mpp_socket; /* back pointer to socket */ + uint32_t mpp_flags; /* PCB flags */ + mppcb_state_t mpp_state; /* PCB state */ #if NECP uuid_t necp_client_uuid; @@ -65,21 +65,21 @@ static inline struct mppcb * mpsotomppcb(struct socket *mp_so) { VERIFY(SOCK_DOM(mp_so) == PF_MULTIPATH); - return ((struct mppcb *)mp_so->so_pcb); + return (struct mppcb *)mp_so->so_pcb; } /* valid values for mpp_flags */ -#define MPP_ATTACHED 0x001 -#define MPP_INSIDE_OUTPUT 0x002 /* MPTCP-stack is inside mptcp_subflow_output */ -#define MPP_INSIDE_INPUT 0x004 /* MPTCP-stack is inside mptcp_subflow_input */ -#define MPP_RUPCALL 0x008 /* MPTCP-stack is handling a read upcall */ -#define MPP_WUPCALL 0x010 /* MPTCP-stack is handling a read upcall */ -#define MPP_SHOULD_WORKLOOP 0x020 /* MPTCP-stack should call the workloop function */ -#define MPP_SHOULD_RWAKEUP 0x040 /* MPTCP-stack should call sorwakeup */ -#define MPP_SHOULD_WWAKEUP 0x080 /* MPTCP-stack should call sowwakeup */ -#define MPP_CREATE_SUBFLOWS 0x100 /* This connection needs to create subflows */ -#define MPP_SET_CELLICON 0x200 /* Set the cellicon (deferred) */ -#define MPP_UNSET_CELLICON 0x400 /* Unset the cellicon (deferred) */ +#define MPP_ATTACHED 0x001 +#define MPP_INSIDE_OUTPUT 0x002 /* MPTCP-stack is inside mptcp_subflow_output */ +#define MPP_INSIDE_INPUT 0x004 /* MPTCP-stack is inside mptcp_subflow_input */ +#define MPP_RUPCALL 0x008 /* MPTCP-stack is handling a read upcall */ +#define MPP_WUPCALL 0x010 /* MPTCP-stack is handling a read upcall */ +#define MPP_SHOULD_WORKLOOP 0x020 /* MPTCP-stack should call the workloop function */ +#define MPP_SHOULD_RWAKEUP 0x040 /* MPTCP-stack should call sorwakeup */ +#define MPP_SHOULD_WWAKEUP 0x080 /* MPTCP-stack should call sowwakeup */ +#define MPP_CREATE_SUBFLOWS 0x100 /* This connection needs to create subflows */ +#define MPP_SET_CELLICON 0x200 /* Set the cellicon (deferred) */ +#define MPP_UNSET_CELLICON 0x400 /* Unset the cellicon (deferred) */ static inline boolean_t mptcp_should_defer_upcall(struct mppcb *mpp) @@ -91,15 +91,15 @@ mptcp_should_defer_upcall(struct mppcb *mpp) * Multipath PCB Information */ struct mppcbinfo { - TAILQ_ENTRY(mppcbinfo) mppi_entry; /* glue to all PCB info */ - TAILQ_HEAD(, mppcb) mppi_pcbs; /* list of PCBs */ - uint32_t mppi_count; /* # of PCBs in list */ - struct zone *mppi_zone; /* zone for this PCB */ - uint32_t mppi_size; /* size of PCB structure */ - lck_grp_t *mppi_lock_grp; /* lock grp */ - lck_attr_t *mppi_lock_attr; /* lock attr */ - lck_grp_attr_t *mppi_lock_grp_attr; /* lock grp attr */ - decl_lck_mtx_data(, mppi_lock); /* global PCB lock */ + TAILQ_ENTRY(mppcbinfo) mppi_entry; /* glue to all PCB info */ + TAILQ_HEAD(, mppcb) mppi_pcbs; /* list of PCBs */ + uint32_t mppi_count; /* # of PCBs in list */ + struct zone *mppi_zone; /* zone for this PCB */ + uint32_t mppi_size; /* size of PCB structure */ + lck_grp_t *mppi_lock_grp; /* lock grp */ + lck_attr_t *mppi_lock_attr; /* lock attr */ + lck_grp_attr_t *mppi_lock_grp_attr; /* lock grp attr */ + decl_lck_mtx_data(, mppi_lock); /* global PCB lock */ uint32_t (*mppi_gc)(struct mppcbinfo *); /* garbage collector func */ uint32_t (*mppi_timer)(struct mppcbinfo *); /* timer func */ }; diff --git a/bsd/netinet/mp_proto.c b/bsd/netinet/mp_proto.c index 1093fcc9b..ef7dd39f2 100644 --- a/bsd/netinet/mp_proto.c +++ b/bsd/netinet/mp_proto.c @@ -43,24 +43,24 @@ extern struct domain mpdomain_s; static void mp_dinit(struct domain *); static struct protosw mpsw = { - .pr_type = SOCK_STREAM, - .pr_protocol = IPPROTO_TCP, - .pr_flags = PR_CONNREQUIRED|PR_MULTICONN|PR_EVCONNINFO| - PR_WANTRCVD|PR_PCBLOCK|PR_PROTOLOCK| - PR_PRECONN_WRITE|PR_DATA_IDEMPOTENT, - .pr_ctloutput = mptcp_ctloutput, - .pr_init = mptcp_init, - .pr_usrreqs = &mptcp_usrreqs, - .pr_lock = mptcp_lock, - .pr_unlock = mptcp_unlock, - .pr_getlock = mptcp_getlock, + .pr_type = SOCK_STREAM, + .pr_protocol = IPPROTO_TCP, + .pr_flags = PR_CONNREQUIRED | PR_MULTICONN | PR_EVCONNINFO | + PR_WANTRCVD | PR_PCBLOCK | PR_PROTOLOCK | + PR_PRECONN_WRITE | PR_DATA_IDEMPOTENT, + .pr_ctloutput = mptcp_ctloutput, + .pr_init = mptcp_init, + .pr_usrreqs = &mptcp_usrreqs, + .pr_lock = mptcp_lock, + .pr_unlock = mptcp_unlock, + .pr_getlock = mptcp_getlock, }; struct domain mpdomain_s = { - .dom_family = PF_MULTIPATH, - .dom_flags = DOM_REENTRANT, - .dom_name = "multipath", - .dom_init = mp_dinit, + .dom_family = PF_MULTIPATH, + .dom_flags = DOM_REENTRANT, + .dom_name = "multipath", + .dom_init = mp_dinit, }; /* Initialize the PF_MULTIPATH domain, and add in the pre-defined protos */ diff --git a/bsd/netinet/mptcp.c b/bsd/netinet/mptcp.c index 80db1552d..c40a144a5 100644 --- a/bsd/netinet/mptcp.c +++ b/bsd/netinet/mptcp.c @@ -109,13 +109,13 @@ int mptcp_enable = 1; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, enable, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_enable, 0, "Enable Multipath TCP Support"); + &mptcp_enable, 0, "Enable Multipath TCP Support"); /* Number of times to try negotiating MPTCP on SYN retransmissions */ int mptcp_mpcap_retries = MPTCP_CAPABLE_RETRIES; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, mptcp_cap_retr, - CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_mpcap_retries, 0, "Number of MP Capable SYN Retries"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &mptcp_mpcap_retries, 0, "Number of MP Capable SYN Retries"); /* * By default, DSS checksum is turned off, revisit if we ever do @@ -123,7 +123,7 @@ SYSCTL_INT(_net_inet_mptcp, OID_AUTO, mptcp_cap_retr, */ int mptcp_dss_csum = 0; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, dss_csum, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_dss_csum, 0, "Enable DSS checksum"); + &mptcp_dss_csum, 0, "Enable DSS checksum"); /* * When mptcp_fail_thresh number of retransmissions are sent, subflow failover @@ -131,7 +131,7 @@ SYSCTL_INT(_net_inet_mptcp, OID_AUTO, dss_csum, CTLFLAG_RW | CTLFLAG_LOCKED, */ int mptcp_fail_thresh = 1; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, fail, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_fail_thresh, 0, "Failover threshold"); + &mptcp_fail_thresh, 0, "Failover threshold"); /* @@ -139,41 +139,41 @@ SYSCTL_INT(_net_inet_mptcp, OID_AUTO, fail, CTLFLAG_RW | CTLFLAG_LOCKED, * as carrier networks mostly have a 30 minute to 60 minute NAT Timeout. * Some carrier networks have a timeout of 10 or 15 minutes. */ -int mptcp_subflow_keeptime = 60*14; +int mptcp_subflow_keeptime = 60 * 14; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, keepalive, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_subflow_keeptime, 0, "Keepalive in seconds"); + &mptcp_subflow_keeptime, 0, "Keepalive in seconds"); int mptcp_rtthist_rtthresh = 600; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rtthist_thresh, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_rtthist_rtthresh, 0, "Rtt threshold"); + &mptcp_rtthist_rtthresh, 0, "Rtt threshold"); /* * Use RTO history for sending new data */ int mptcp_use_rto = 1; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, userto, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_use_rto, 0, "Disable RTO for subflow selection"); + &mptcp_use_rto, 0, "Disable RTO for subflow selection"); int mptcp_rtothresh = 1500; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rto_thresh, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_rtothresh, 0, "RTO threshold"); + &mptcp_rtothresh, 0, "RTO threshold"); /* * Probe the preferred path, when it is not in use */ uint32_t mptcp_probeto = 1000; SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, probeto, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_probeto, 0, "Disable probing by setting to 0"); + &mptcp_probeto, 0, "Disable probing by setting to 0"); uint32_t mptcp_probecnt = 5; SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, probecnt, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_probecnt, 0, "Number of probe writes"); + &mptcp_probecnt, 0, "Number of probe writes"); /* * Static declarations */ static uint16_t mptcp_input_csum(struct tcpcb *, struct mbuf *, uint64_t, - uint32_t, uint16_t, uint16_t, uint16_t); + uint32_t, uint16_t, uint16_t, uint16_t); static int mptcp_reass_present(struct socket *mp_so) @@ -187,19 +187,22 @@ mptcp_reass_present(struct socket *mp_so) * Present data to user, advancing rcv_nxt through * completed sequence space. */ - if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) - return (flags); + if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) { + return flags; + } q = LIST_FIRST(&mp_tp->mpt_segq); - if (!q || q->tqe_m->m_pkthdr.mp_dsn != mp_tp->mpt_rcvnxt) - return (flags); + if (!q || q->tqe_m->m_pkthdr.mp_dsn != mp_tp->mpt_rcvnxt) { + return flags; + } /* * If there is already another thread doing reassembly for this * connection, it is better to let it finish the job -- * (radar 16316196) */ - if (mp_tp->mpt_flags & MPTCPF_REASS_INPROG) - return (flags); + if (mp_tp->mpt_flags & MPTCPF_REASS_INPROG) { + return flags; + } mp_tp->mpt_flags |= MPTCPF_REASS_INPROG; @@ -210,8 +213,9 @@ mptcp_reass_present(struct socket *mp_so) m_freem(q->tqe_m); } else { flags = !!(q->tqe_m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN); - if (sbappendstream_rcvdemux(mp_so, q->tqe_m, 0, 0)) + if (sbappendstream_rcvdemux(mp_so, q->tqe_m, 0, 0)) { dowakeup = 1; + } } zfree(tcp_reass_zone, q); mp_tp->mpt_reassqlen--; @@ -219,10 +223,10 @@ mptcp_reass_present(struct socket *mp_so) } while (q && q->tqe_m->m_pkthdr.mp_dsn == mp_tp->mpt_rcvnxt); mp_tp->mpt_flags &= ~MPTCPF_REASS_INPROG; - if (dowakeup) + if (dowakeup) { sorwakeup(mp_so); /* done with socket lock held */ - return (flags); - + } + return flags; } static int @@ -250,7 +254,7 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * tcpstat.tcps_mptcp_rcvmemdrop++; m_freem(m); *tlenp = 0; - return (0); + return 0; } /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */ @@ -258,7 +262,7 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * if (te == NULL) { tcpstat.tcps_mptcp_rcvmemdrop++; m_freem(m); - return (0); + return 0; } mp_tp->mpt_reassqlen++; @@ -267,8 +271,9 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * * Find a segment which begins after this one does. */ LIST_FOREACH(q, &mp_tp->mpt_segq, tqe_q) { - if (MPTCP_SEQ_GT(q->tqe_m->m_pkthdr.mp_dsn, mb_dsn)) + if (MPTCP_SEQ_GT(q->tqe_m->m_pkthdr.mp_dsn, mb_dsn)) { break; + } p = q; } @@ -310,8 +315,9 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * */ while (q) { int64_t i = (mb_dsn + *tlenp) - q->tqe_m->m_pkthdr.mp_dsn; - if (i <= 0) + if (i <= 0) { break; + } if (i < q->tqe_len) { q->tqe_m->m_pkthdr.mp_dsn += i; @@ -340,7 +346,7 @@ mptcp_reass(struct socket *mp_so, struct pkthdr *phdr, int *tlenp, struct mbuf * } out: - return (mptcp_reass_present(mp_so)); + return mptcp_reass_present(mp_so); } /* @@ -357,7 +363,7 @@ mptcp_input(struct mptses *mpte, struct mbuf *m) VERIFY(m->m_flags & M_PKTHDR); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_so = mptetoso(mpte); mp_tp = mpte->mpte_mptcb; @@ -414,8 +420,9 @@ fallback: * assume degraded flow as this may be the first packet * without DSS, and the subflow state is not updated yet. */ - if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) + if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) { sorwakeup(mp_so); + } DTRACE_MPTCP5(receive__degraded, struct mbuf *, m, struct socket *, mp_so, @@ -443,8 +450,9 @@ fallback: int mb_dfin = 0; /* If fallback occurs, mbufs will not have PKTF_MPTCP set */ - if (!(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) + if (!(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { goto fallback; + } save = m->m_next; /* @@ -461,10 +469,11 @@ fallback: prev = save; save = save->m_next; } - if (prev) + if (prev) { prev->m_next = NULL; - else + } else { m->m_next = NULL; + } mb_dsn = m->m_pkthdr.mp_dsn; mb_datalen = m->m_pkthdr.mp_rlen; @@ -474,15 +483,17 @@ fallback: tcpstat.tcps_mptcp_rcvpackafterwin++; if (todrop >= mb_datalen) { - if (freelist == NULL) + if (freelist == NULL) { freelist = m; - else + } else { tail->m_next = m; + } - if (prev != NULL) + if (prev != NULL) { tail = prev; - else + } else { tail = m; + } m = save; prev = save = NULL; @@ -503,15 +514,17 @@ fallback: if (MPTCP_SEQ_LT(mb_dsn, mp_tp->mpt_rcvnxt)) { if (MPTCP_SEQ_LEQ((mb_dsn + mb_datalen), mp_tp->mpt_rcvnxt)) { - if (freelist == NULL) + if (freelist == NULL) { freelist = m; - else + } else { tail->m_next = m; + } - if (prev != NULL) + if (prev != NULL) { tail = prev; - else + } else { tail = m; + } m = save; prev = save = NULL; @@ -534,8 +547,9 @@ fallback: mptcp_sbrcv_grow(mp_tp); - if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) + if (sbappendstream_rcvdemux(mp_so, m, 0, 0)) { wakeup = 1; + } DTRACE_MPTCP6(receive, struct mbuf *, m, struct socket *, mp_so, struct sockbuf *, &mp_so->so_rcv, @@ -560,11 +574,13 @@ next: count = mp_so->so_rcv.sb_cc; } while (m); - if (freelist) + if (freelist) { m_freem(freelist); + } - if (wakeup) + if (wakeup) { sorwakeup(mp_so); + } } boolean_t @@ -575,8 +591,9 @@ mptcp_can_send_more(struct mptcb *mp_tp, boolean_t ignore_reinject) /* * Always send if there is data in the reinject-queue. */ - if (!ignore_reinject && mp_tp->mpt_mpte->mpte_reinjectq) - return (TRUE); + if (!ignore_reinject && mp_tp->mpt_mpte->mpte_reinjectq) { + return TRUE; + } /* * Don't send, if: @@ -587,19 +604,23 @@ mptcp_can_send_more(struct mptcb *mp_tp, boolean_t ignore_reinject) * 3. snd_nxt + 1 == snd_max and we are closing: A DATA_FIN is scheduled. */ - if (!(mp_so->so_flags1 & SOF1_PRECONNECT_DATA) && MPTCP_SEQ_GEQ(mp_tp->mpt_sndnxt, mp_tp->mpt_sndmax)) - return (FALSE); + if (!(mp_so->so_flags1 & SOF1_PRECONNECT_DATA) && MPTCP_SEQ_GEQ(mp_tp->mpt_sndnxt, mp_tp->mpt_sndmax)) { + return FALSE; + } - if (MPTCP_SEQ_LEQ(mp_tp->mpt_snduna + mp_tp->mpt_sndwnd, mp_tp->mpt_sndnxt)) - return (FALSE); + if (MPTCP_SEQ_LEQ(mp_tp->mpt_snduna + mp_tp->mpt_sndwnd, mp_tp->mpt_sndnxt)) { + return FALSE; + } - if (mp_tp->mpt_sndnxt + 1 == mp_tp->mpt_sndmax && mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) - return (FALSE); + if (mp_tp->mpt_sndnxt + 1 == mp_tp->mpt_sndmax && mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) { + return FALSE; + } - if (mp_tp->mpt_state >= MPTCPS_FIN_WAIT_2) - return (FALSE); + if (mp_tp->mpt_state >= MPTCPS_FIN_WAIT_2) { + return FALSE; + } - return (TRUE); + return TRUE; } /* @@ -624,11 +645,11 @@ mptcp_output(struct mptses *mpte) mpte->mpte_mppcb->mpp_flags |= MPP_WUPCALL; mptcplog((LOG_DEBUG, "%s: snxt %u sndmax %u suna %u swnd %u reinjectq %u state %u\n", - __func__, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax, - (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_sndwnd, - mpte->mpte_reinjectq ? 1 : 0, - mp_tp->mpt_state), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax, + (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_sndwnd, + mpte->mpte_reinjectq ? 1 : 0, + mp_tp->mpt_state), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); old_snd_nxt = mp_tp->mpt_sndnxt; while (mptcp_can_send_more(mp_tp, FALSE)) { @@ -672,9 +693,9 @@ mptcp_output(struct mptses *mpte) mp_so->so_snd.sb_idealsize = mp_so->so_snd.sb_hiwat; mptcplog((LOG_DEBUG, "%s: increased snd hiwat to %u lowat %u\n", - __func__, mp_so->so_snd.sb_hiwat, - mp_so->so_snd.sb_lowat), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, mp_so->so_snd.sb_hiwat, + mp_so->so_snd.sb_lowat), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); } } } @@ -687,10 +708,11 @@ mptcp_output(struct mptses *mpte) mpts->mpts_flags |= MPTSF_FAILINGOVER; mpts->mpts_flags &= ~MPTSF_ACTIVE; mpts_tried = mpts; - if (error != ECANCELED) + if (error != ECANCELED) { mptcplog((LOG_ERR, "%s: Error = %d mpts_flags %#x\n", __func__, - error, mpts->mpts_flags), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + error, mpts->mpts_flags), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + } break; } /* The model is to have only one active flow at a time */ @@ -733,14 +755,15 @@ mptcp_output(struct mptses *mpte) if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) { if (mp_tp->mpt_sndnxt + 1 == mp_tp->mpt_sndmax && - mp_tp->mpt_snduna == mp_tp->mpt_sndnxt) + mp_tp->mpt_snduna == mp_tp->mpt_sndnxt) { mptcp_finish_usrclosed(mpte); + } } mptcp_handle_deferred_upcalls(mpte->mpte_mppcb, MPP_WUPCALL); /* subflow errors should not be percolated back up */ - return (0); + return 0; } @@ -756,9 +779,9 @@ mptcp_choose_subflow(struct mptsub *mpts, struct mptsub *curbest, int *currtt) */ if (tp->t_srtt && *currtt > tp->t_srtt && (curbest == NULL || tp->t_rxtshift == 0 || - sototcpcb(curbest->mpts_socket)->t_rxtshift)) { + sototcpcb(curbest->mpts_socket)->t_rxtshift)) { *currtt = tp->t_srtt; - return (mpts); + return mpts; } /* @@ -768,19 +791,20 @@ mptcp_choose_subflow(struct mptsub *mpts, struct mptsub *curbest, int *currtt) sototcpcb(curbest->mpts_socket)->t_rxtshift && tp->t_rxtshift == 0) { *currtt = tp->t_srtt; - return (mpts); + return mpts; } - return (curbest != NULL ? curbest : mpts); + return curbest != NULL ? curbest : mpts; } static struct mptsub * mptcp_return_subflow(struct mptsub *mpts) { - if (mpts && mptcp_subflow_cwnd_space(mpts->mpts_socket) <= 0) - return (NULL); + if (mpts && mptcp_subflow_cwnd_space(mpts->mpts_socket) <= 0) { + return NULL; + } - return (mpts); + return mpts; } /* @@ -807,22 +831,24 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr struct inpcb *inp = sotoinpcb(so); mptcplog((LOG_DEBUG, "%s mpts %u ignore %d, mpts_flags %#x, suspended %u sostate %#x tpstate %u cellular %d rtt %u rxtshift %u cheap %u exp %u cwnd %d\n", - __func__, mpts->mpts_connid, ignore ? ignore->mpts_connid : -1, mpts->mpts_flags, - INP_WAIT_FOR_IF_FEEDBACK(inp), so->so_state, tp->t_state, - inp->inp_last_outifp ? IFNET_IS_CELLULAR(inp->inp_last_outifp) : -1, - tp->t_srtt, tp->t_rxtshift, cheap_rtt, exp_rtt, - mptcp_subflow_cwnd_space(so)), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, mpts->mpts_connid, ignore ? ignore->mpts_connid : -1, mpts->mpts_flags, + INP_WAIT_FOR_IF_FEEDBACK(inp), so->so_state, tp->t_state, + inp->inp_last_outifp ? IFNET_IS_CELLULAR(inp->inp_last_outifp) : -1, + tp->t_srtt, tp->t_rxtshift, cheap_rtt, exp_rtt, + mptcp_subflow_cwnd_space(so)), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); /* * First, the hard conditions to reject subflows * (e.g., not connected,...) */ - if (mpts == ignore || inp->inp_last_outifp == NULL) + if (mpts == ignore || inp->inp_last_outifp == NULL) { continue; + } - if (INP_WAIT_FOR_IF_FEEDBACK(inp)) + if (INP_WAIT_FOR_IF_FEEDBACK(inp)) { continue; + } /* There can only be one subflow in degraded state */ if (mpts->mpts_flags & MPTSF_MP_DEGRADED) { @@ -833,50 +859,57 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr /* * If this subflow is waiting to finally send, do it! */ - if (so->so_flags1 & SOF1_PRECONNECT_DATA) - return (mptcp_return_subflow(mpts)); + if (so->so_flags1 & SOF1_PRECONNECT_DATA) { + return mptcp_return_subflow(mpts); + } /* * Only send if the subflow is MP_CAPABLE. The exceptions to * this rule (degraded or TFO) have been taken care of above. */ - if (!(mpts->mpts_flags & MPTSF_MP_CAPABLE)) + if (!(mpts->mpts_flags & MPTSF_MP_CAPABLE)) { continue; + } if ((so->so_state & SS_ISDISCONNECTED) || !(so->so_state & SS_ISCONNECTED) || !TCPS_HAVEESTABLISHED(tp->t_state) || - tp->t_state > TCPS_CLOSE_WAIT) + tp->t_state > TCPS_CLOSE_WAIT) { continue; + } /* * Second, the soft conditions to find the subflow with best * conditions for each set (aka cellular vs non-cellular) */ - if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) + if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) { second_best = mptcp_choose_subflow(mpts, second_best, - &exp_rtt); - else + &exp_rtt); + } else { best = mptcp_choose_subflow(mpts, best, &cheap_rtt); + } } /* * If there is no preferred or backup subflow, and there is no active * subflow use the last usable subflow. */ - if (best == NULL) - return (mptcp_return_subflow(second_best)); + if (best == NULL) { + return mptcp_return_subflow(second_best); + } - if (second_best == NULL) - return (mptcp_return_subflow(best)); + if (second_best == NULL) { + return mptcp_return_subflow(best); + } besttp = sototcpcb(best->mpts_socket); bestinp = sotoinpcb(best->mpts_socket); secondtp = sototcpcb(second_best->mpts_socket); secondinp = sotoinpcb(second_best->mpts_socket); - if (preferred != NULL) + if (preferred != NULL) { *preferred = mptcp_return_subflow(best); + } /* * Second Step: Among best and second_best. Choose the one that is @@ -887,10 +920,11 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr * Only handover if Symptoms tells us to do so. */ if (!IFNET_IS_CELLULAR(bestinp->inp_last_outifp) && - mptcp_is_wifi_unusable(mpte) != 0 && mptcp_subflow_is_bad(mpte, best)) - return (mptcp_return_subflow(second_best)); + mptcp_is_wifi_unusable(mpte) != 0 && mptcp_subflow_is_bad(mpte, best)) { + return mptcp_return_subflow(second_best); + } - return (mptcp_return_subflow(best)); + return mptcp_return_subflow(best); } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_INTERACTIVE) { int rtt_thresh = mptcp_rtthist_rtthresh << TCP_RTT_SHIFT; int rto_thresh = mptcp_rtothresh; @@ -911,12 +945,12 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr second_best->mpts_connid, secondtp->t_srtt >> TCP_RTT_SHIFT), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); - return (mptcp_return_subflow(second_best)); + return mptcp_return_subflow(second_best); } if (mptcp_subflow_is_bad(mpte, best) && secondtp->t_rxtshift == 0) { - return (mptcp_return_subflow(second_best)); + return mptcp_return_subflow(second_best); } /* Compare RTOs, select second_best if best's rto exceeds rtothresh */ @@ -929,7 +963,7 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr second_best->mpts_connid, secondtp->t_rxtcur), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); - return (mptcp_return_subflow(second_best)); + return mptcp_return_subflow(second_best); } /* @@ -937,7 +971,7 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr * were true. So, let's schedule on the best one, if he still * has some space in the congestion-window. */ - return (mptcp_return_subflow(best)); + return mptcp_return_subflow(best); } else if (mpte->mpte_svctype == MPTCP_SVCTYPE_AGGREGATE) { struct mptsub *tmp; @@ -956,15 +990,16 @@ mptcp_get_subflow(struct mptses *mpte, struct mptsub *ignore, struct mptsub **pr } /* Is there still space in the congestion window? */ - if (mptcp_subflow_cwnd_space(bestinp->inp_socket) <= 0) - return (mptcp_return_subflow(second_best)); + if (mptcp_subflow_cwnd_space(bestinp->inp_socket) <= 0) { + return mptcp_return_subflow(second_best); + } - return (mptcp_return_subflow(best)); + return mptcp_return_subflow(best); } else { panic("Unknown service-type configured for MPTCP"); } - return (NULL); + return NULL; } static const char * @@ -982,13 +1017,13 @@ mptcp_event_to_str(uint32_t event) c = "MPCE_RECV_DATA_FIN"; break; } - return (c); + return c; } static const char * mptcp_state_to_str(mptcp_state_t state) { - const char *c = "UNDEFINED"; + const char *c = "UNDEFINED"; switch (state) { case MPTCPS_CLOSED: c = "MPTCPS_CLOSED"; @@ -1021,7 +1056,7 @@ mptcp_state_to_str(mptcp_state_t state) c = "MPTCPS_TERMINATE"; break; } - return (c); + return c; } void @@ -1066,13 +1101,15 @@ mptcp_close_fsm(struct mptcb *mp_tp, uint32_t event) break; case MPTCPS_CLOSING: - if (event == MPCE_RECV_DATA_ACK) + if (event == MPCE_RECV_DATA_ACK) { mp_tp->mpt_state = MPTCPS_TIME_WAIT; + } break; case MPTCPS_LAST_ACK: - if (event == MPCE_RECV_DATA_ACK) + if (event == MPCE_RECV_DATA_ACK) { mptcp_close(mp_tp->mpt_mpte, mp_tp); + } break; case MPTCPS_FIN_WAIT_2: @@ -1116,7 +1153,6 @@ mptcp_update_dss_rcv_state(struct mptcp_dsn_opt *dss_info, struct tcpcb *tp, mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, dss_info->mdss_subflow_seqn, dss_info->mdss_data_len, csum); - } void @@ -1156,8 +1192,9 @@ mptcp_validate_dss_map(struct socket *so, struct tcpcb *tp, struct mbuf *m, { u_int32_t datalen; - if (!(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) + if (!(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { return 0; + } datalen = m->m_pkthdr.mp_rlen; @@ -1177,12 +1214,13 @@ mptcp_validate_dss_map(struct socket *so, struct tcpcb *tp, struct mbuf *m, int mptcp_input_preproc(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, - int drop_hdrlen) + int drop_hdrlen) { mptcp_insert_rmap(tp, m, th); if (mptcp_validate_dss_map(tp->t_inpcb->inp_socket, tp, m, - drop_hdrlen) != 0) + drop_hdrlen) != 0) { return -1; + } return 0; } @@ -1196,7 +1234,7 @@ mptcp_input_preproc(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th, int mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, - uint32_t sseq, uint16_t dlen, uint16_t csum, uint16_t dfin) + uint32_t sseq, uint16_t dlen, uint16_t csum, uint16_t dfin) { uint16_t mptcp_csum; @@ -1206,38 +1244,42 @@ mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, mptcp_notify_mpfail(tp->t_inpcb->inp_socket); m_freem(m); tcpstat.tcps_mp_badcsum++; - return (-1); + return -1; } - return (0); + return 0; } static uint16_t mptcp_input_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, uint32_t sseq, - uint16_t dlen, uint16_t csum, uint16_t dfin) + uint16_t dlen, uint16_t csum, uint16_t dfin) { struct mptcb *mp_tp = tptomptp(tp); uint16_t real_len = dlen - dfin; uint32_t sum = 0; - if (mp_tp == NULL) - return (0); + if (mp_tp == NULL) { + return 0; + } - if (!(mp_tp->mpt_flags & MPTCPF_CHECKSUM)) - return (0); + if (!(mp_tp->mpt_flags & MPTCPF_CHECKSUM)) { + return 0; + } - if (tp->t_mpflags & TMPF_TCP_FALLBACK) - return (0); + if (tp->t_mpflags & TMPF_TCP_FALLBACK) { + return 0; + } /* * The remote side may send a packet with fewer bytes than the * claimed DSS checksum length. */ if ((int)m_length2(m, NULL) < real_len) { - return (0xffff); + return 0xffff; } - if (real_len != 0) + if (real_len != 0) { sum = m_sum16(m, 0, real_len); + } sum += in_pseudo64(htonll(dsn), htonl(sseq), htons(dlen) + csum); ADDCARRY(sum); @@ -1246,7 +1288,7 @@ mptcp_input_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, uint32_t sseq, mptcplog((LOG_DEBUG, "%s: sum = %x \n", __func__, sum), MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_VERBOSE); - return (~sum & 0xffff); + return ~sum & 0xffff; } uint32_t @@ -1254,8 +1296,9 @@ mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen { uint32_t sum = 0; - if (dlen) + if (dlen) { sum = m_sum16(m, 0, dlen); + } dss_val = mptcp_hton64(dss_val); sseq = htonl(sseq); @@ -1266,7 +1309,7 @@ mptcp_output_csum(struct mbuf *m, uint64_t dss_val, uint32_t sseq, uint16_t dlen sum = ~sum & 0xffff; DTRACE_MPTCP2(checksum__result, struct mbuf *, m, uint32_t, sum); mptcplog((LOG_DEBUG, "%s: sum = %x \n", __func__, sum), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); return sum; } @@ -1289,14 +1332,13 @@ mptcp_no_rto_spike(struct socket *so) __func__, spike, tp->t_rxtcur, tp->t_rttbest >> TCP_RTT_SHIFT, tp->t_rttcur), - (MPTCP_SOCKET_DBG|MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); - + (MPTCP_SOCKET_DBG | MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); } - if (spike > 0 ) { - return (FALSE); + if (spike > 0) { + return FALSE; } else { - return (TRUE); + return TRUE; } } @@ -1306,8 +1348,9 @@ mptcp_handle_deferred_upcalls(struct mppcb *mpp, uint32_t flag) VERIFY(mpp->mpp_flags & flag); mpp->mpp_flags &= ~flag; - if (mptcp_should_defer_upcall(mpp)) + if (mptcp_should_defer_upcall(mpp)) { return; + } if (mpp->mpp_flags & MPP_SHOULD_WORKLOOP) { mpp->mpp_flags &= ~MPP_SHOULD_WORKLOOP; @@ -1346,8 +1389,8 @@ mptcp_ask_for_nat64(struct ifnet *ifp) in6_post_msg(ifp, KEV_INET6_REQUEST_NAT64_PREFIX, NULL, NULL); os_log_info(mptcp_log_handle, - "%s: asked for NAT64-prefix on %s\n", __func__, - ifp->if_name); + "%s: asked for NAT64-prefix on %s\n", __func__, + ifp->if_name); } static void @@ -1361,7 +1404,7 @@ mptcp_reset_itfinfo(struct mpt_itf_info *info) void mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, - uint32_t necp_flags, __unused bool *viable) + uint32_t necp_flags, __unused bool *viable) { boolean_t has_v4 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_IPV4); boolean_t has_v6 = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_HAS_IPV6); @@ -1378,16 +1421,18 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, VERIFY(ifindex != IFSCOPE_NONE); /* About to be garbage-collected (see note about MPTCP/NECP interactions) */ - if (mp->mpp_socket->so_usecount == 0) + if (mp->mpp_socket->so_usecount == 0) { return; + } if (action != NECP_CLIENT_CBACTION_INITIAL) { mpte_lock(mpte); locked = 1; /* Check again, because it might have changed while waiting */ - if (mp->mpp_socket->so_usecount == 0) + if (mp->mpp_socket->so_usecount == 0) { goto out; + } } mpte_lock_assert_held(mpte); @@ -1396,32 +1441,36 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, mp_so = mptetoso(mpte); os_log_info(mptcp_log_handle, "%s, action: %u ifindex %u usecount %u mpt_flags %#x state %u v4 %u v6 %u nat64 %u power %u\n", - __func__, action, ifindex, mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state, - has_v4, has_v6, has_nat64, low_power); + __func__, action, ifindex, mp->mpp_socket->so_usecount, mp_tp->mpt_flags, mp_tp->mpt_state, + has_v4, has_v6, has_nat64, low_power); /* No need on fallen back sockets */ - if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) + if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) { goto out; + } /* * When the interface goes in low-power mode we don't want to establish * new subflows on it. Thus, mark it internally as non-viable. */ - if (low_power) + if (low_power) { action = NECP_CLIENT_CBACTION_NONVIABLE; + } if (action == NECP_CLIENT_CBACTION_NONVIABLE) { for (i = 0; i < mpte->mpte_itfinfo_size; i++) { - if (mpte->mpte_itfinfo[i].ifindex == IFSCOPE_NONE) + if (mpte->mpte_itfinfo[i].ifindex == IFSCOPE_NONE) { continue; + } - if (mpte->mpte_itfinfo[i].ifindex == ifindex) + if (mpte->mpte_itfinfo[i].ifindex == ifindex) { mptcp_reset_itfinfo(&mpte->mpte_itfinfo[i]); + } } mptcp_sched_create_subflows(mpte); } else if (action == NECP_CLIENT_CBACTION_VIABLE || - action == NECP_CLIENT_CBACTION_INITIAL) { + action == NECP_CLIENT_CBACTION_INITIAL) { int found_slot = 0, slot_index = -1; struct ifnet *ifp; @@ -1429,19 +1478,23 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); - if (ifp == NULL) + if (ifp == NULL) { goto out; + } if (IFNET_IS_EXPENSIVE(ifp) && - (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) + (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) { goto out; + } if (IFNET_IS_CELLULAR(ifp) && - (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) + (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) { goto out; + } - if (IS_INTF_CLAT46(ifp)) + if (IS_INTF_CLAT46(ifp)) { has_v4 = FALSE; + } /* Look for the slot on where to store/update the interface-info. */ for (i = 0; i < mpte->mpte_itfinfo_size; i++) { @@ -1457,8 +1510,8 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, */ if (mpte->mpte_itfinfo[i].ifindex == ifindex && (mpte->mpte_itfinfo[i].has_v4_conn != has_v4 || - mpte->mpte_itfinfo[i].has_v6_conn != has_v6 || - mpte->mpte_itfinfo[i].has_nat64_conn != has_nat64)) { + mpte->mpte_itfinfo[i].has_v6_conn != has_v6 || + mpte->mpte_itfinfo[i].has_nat64_conn != has_nat64)) { found_slot = 1; slot_index = i; break; @@ -1490,14 +1543,15 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, if (info == NULL) { os_log_error(mptcp_log_handle, "%s malloc failed for %u\n", - __func__, new_size); + __func__, new_size); goto out; } memcpy(info, mpte->mpte_itfinfo, mpte->mpte_itfinfo_size * sizeof(*info)); - if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) + if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) { _FREE(mpte->mpte_itfinfo, M_TEMP); + } /* We allocated a new one, thus the first must be empty */ slot_index = mpte->mpte_itfinfo_size; @@ -1516,8 +1570,9 @@ mptcp_session_necp_cb(void *handle, int action, uint32_t interface_index, } out: - if (locked) + if (locked) { mpte_unlock(mpte); + } } void @@ -1535,22 +1590,25 @@ mptcp_set_restrictions(struct socket *mp_so) uint32_t ifindex = info->ifindex; struct ifnet *ifp; - if (ifindex == IFSCOPE_NONE) + if (ifindex == IFSCOPE_NONE) { continue; + } ifp = ifindex2ifnet[ifindex]; - if (ifp == NULL) + if (ifp == NULL) { continue; + } if (IFNET_IS_EXPENSIVE(ifp) && - (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) + (mp_so->so_restrictions & SO_RESTRICT_DENY_EXPENSIVE)) { info->ifindex = IFSCOPE_NONE; + } if (IFNET_IS_CELLULAR(ifp) && - (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) + (mp_so->so_restrictions & SO_RESTRICT_DENY_CELLULAR)) { info->ifindex = IFSCOPE_NONE; + } } ifnet_head_done(); } - diff --git a/bsd/netinet/mptcp.h b/bsd/netinet/mptcp.h index d0b77e6b6..d98f50e83 100644 --- a/bsd/netinet/mptcp.h +++ b/bsd/netinet/mptcp.h @@ -26,8 +26,8 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _NETINET_MPTCP_H_ -#define _NETINET_MPTCP_H_ +#ifndef _NETINET_MPTCP_H_ +#define _NETINET_MPTCP_H_ #ifdef BSD_KERNEL_PRIVATE @@ -36,27 +36,27 @@ #include #if BYTE_ORDER == BIG_ENDIAN -#define mptcp_hton64(x) (x) -#define mptcp_ntoh64(x) (x) +#define mptcp_hton64(x) (x) +#define mptcp_ntoh64(x) (x) #else /* LITTLE_ENDIAN */ -#define mptcp_hton64(x) __DARWIN_OSSwapInt64(x) -#define mptcp_ntoh64(x) __DARWIN_OSSwapInt64(x) +#define mptcp_hton64(x) __DARWIN_OSSwapInt64(x) +#define mptcp_ntoh64(x) __DARWIN_OSSwapInt64(x) #endif /* * MPTCP Option Subtype Field values */ -#define MPO_CAPABLE 0x0 -#define MPO_JOIN 0x1 -#define MPO_DSS 0x2 -#define MPO_ADD_ADDR 0x3 -#define MPO_REMOVE_ADDR 0x4 -#define MPO_PRIO 0x5 -#define MPO_FAIL 0x6 -#define MPO_FASTCLOSE 0x7 +#define MPO_CAPABLE 0x0 +#define MPO_JOIN 0x1 +#define MPO_DSS 0x2 +#define MPO_ADD_ADDR 0x3 +#define MPO_REMOVE_ADDR 0x4 +#define MPO_PRIO 0x5 +#define MPO_FAIL 0x6 +#define MPO_FASTCLOSE 0x7 /* MPTCP Protocol version */ -#define MPTCP_STD_VERSION_0 0x0 +#define MPTCP_STD_VERSION_0 0x0 /* * MPTCP MP_CAPABLE TCP Option definitions @@ -64,26 +64,26 @@ * Used to establish an MPTCP connection and first subflow. */ struct mptcp_mpcapable_opt_common { - u_int8_t mmco_kind; - u_int8_t mmco_len; + u_int8_t mmco_kind; + u_int8_t mmco_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mmco_version:4, - mmco_subtype:4; + u_int8_t mmco_version:4, + mmco_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mmco_subtype:4, - mmco_version:4; + u_int8_t mmco_subtype:4, + mmco_version:4; #endif -#define MPCAP_PROPOSAL_SBIT 0x01 /* SHA1 Algorithm */ -#define MPCAP_HBIT 0x01 /* alias of MPCAP_PROPOSAL_SBIT */ -#define MPCAP_GBIT 0x02 /* must be 0 */ -#define MPCAP_FBIT 0x04 /* must be 0 */ -#define MPCAP_EBIT 0x08 /* must be 0 */ -#define MPCAP_DBIT 0x10 /* must be 0 */ -#define MPCAP_CBIT 0x20 /* must be 0 */ -#define MPCAP_BBIT 0x40 /* Extensibility bit, must be 0 */ -#define MPCAP_ABIT 0x80 /* alias of MPCAP_CHECKSUM_CBIT */ -#define MPCAP_CHECKSUM_CBIT 0x80 /* DSS Checksum bit */ - u_int8_t mmco_flags; +#define MPCAP_PROPOSAL_SBIT 0x01 /* SHA1 Algorithm */ +#define MPCAP_HBIT 0x01 /* alias of MPCAP_PROPOSAL_SBIT */ +#define MPCAP_GBIT 0x02 /* must be 0 */ +#define MPCAP_FBIT 0x04 /* must be 0 */ +#define MPCAP_EBIT 0x08 /* must be 0 */ +#define MPCAP_DBIT 0x10 /* must be 0 */ +#define MPCAP_CBIT 0x20 /* must be 0 */ +#define MPCAP_BBIT 0x40 /* Extensibility bit, must be 0 */ +#define MPCAP_ABIT 0x80 /* alias of MPCAP_CHECKSUM_CBIT */ +#define MPCAP_CHECKSUM_CBIT 0x80 /* DSS Checksum bit */ + u_int8_t mmco_flags; } __attribute__((__packed__)); struct mptcp_mpcapable_opt_rsp { @@ -105,39 +105,39 @@ struct mptcp_mpcapable_opt_rsp1 { /* MP_JOIN Option for SYN */ struct mptcp_mpjoin_opt_req { - u_int8_t mmjo_kind; - u_int8_t mmjo_len; -#define MPTCP_BACKUP 0x1 - u_int8_t mmjo_subtype_bkp; - u_int8_t mmjo_addr_id; - u_int32_t mmjo_peer_token; - u_int32_t mmjo_rand; + u_int8_t mmjo_kind; + u_int8_t mmjo_len; +#define MPTCP_BACKUP 0x1 + u_int8_t mmjo_subtype_bkp; + u_int8_t mmjo_addr_id; + u_int32_t mmjo_peer_token; + u_int32_t mmjo_rand; } __attribute__((__packed__)); /* MP_JOIN Option for SYN/ACK */ struct mptcp_mpjoin_opt_rsp { - u_int8_t mmjo_kind; - u_int8_t mmjo_len; -#define MPTCP_BACKUP 0x1 - u_int8_t mmjo_subtype_bkp; - u_int8_t mmjo_addr_id; - u_int64_t mmjo_mac; /* Truncated message auth code */ - u_int32_t mmjo_rand; + u_int8_t mmjo_kind; + u_int8_t mmjo_len; +#define MPTCP_BACKUP 0x1 + u_int8_t mmjo_subtype_bkp; + u_int8_t mmjo_addr_id; + u_int64_t mmjo_mac; /* Truncated message auth code */ + u_int32_t mmjo_rand; } __attribute__((__packed__)); /* MP_Join Option for ACK */ struct mptcp_mpjoin_opt_rsp2 { - u_int8_t mmjo_kind; - u_int8_t mmjo_len; + u_int8_t mmjo_kind; + u_int8_t mmjo_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mmjo_reserved1:4, - mmjo_subtype:4; + u_int8_t mmjo_reserved1:4, + mmjo_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mmjo_subtype:4, - mmjo_reserved1:4; + u_int8_t mmjo_subtype:4, + mmjo_reserved1:4; #endif - u_int8_t mmjo_reserved2; - u_int8_t mmjo_mac[SHA1_RESULTLEN]; /* This is 160 bits HMAC SHA-1 per RFC */ + u_int8_t mmjo_reserved2; + u_int8_t mmjo_mac[SHA1_RESULTLEN]; /* This is 160 bits HMAC SHA-1 per RFC */ } __attribute__((__packed__)); @@ -150,41 +150,41 @@ struct mptcp_mpjoin_opt_rsp2 { /* Add Address Option */ struct mptcp_addaddr_opt { - u_int8_t ma_kind; - u_int8_t ma_len; + u_int8_t ma_kind; + u_int8_t ma_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t ma_ipver:4, - ma_subtype:4; + u_int8_t ma_ipver:4, + ma_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t ma_subtype:4, - ma_ipver:4; + u_int8_t ma_subtype:4, + ma_ipver:4; #endif -#define MA_IPVer_V4 4 /* IPv4 Address tagged to the option */ -#define MA_IPVer_V6 6 /* IPv6 Address tagged to the option */ - u_int8_t ma_addr_id; +#define MA_IPVer_V4 4 /* IPv4 Address tagged to the option */ +#define MA_IPVer_V6 6 /* IPv6 Address tagged to the option */ + u_int8_t ma_addr_id; } __attribute__((__packed__)); /* Address sent in the ADD_ADDR option */ struct mptcp_addr_family_val { union { - struct in_addr ma_v4_addr; - struct in6_addr ma_v6_addr; + struct in_addr ma_v4_addr; + struct in6_addr ma_v6_addr; } ma_addr; /* u_int16_t ma_ports; */ /* optional field */ } __attribute__((__packed__)); /* Remove Address Option */ struct mptcp_remaddr_opt { - u_int8_t mr_kind; - u_int8_t mr_len; + u_int8_t mr_kind; + u_int8_t mr_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mr_rest:4, - mr_subtype:4; + u_int8_t mr_rest:4, + mr_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mr_subtype:4, - mr_rest:4; + u_int8_t mr_subtype:4, + mr_rest:4; #endif - u_int8_t mr_addr_id; + u_int8_t mr_addr_id; } __attribute__((__packed__)); /* @@ -197,93 +197,92 @@ struct mptcp_remaddr_opt { /* * DSS Option variants coded as flags in the DSS option flags field */ -#define MDSS_A 0x01 /* Data ACK present if set */ -#define MDSS_a 0x02 /* 64-bit Data ACK present if set */ -#define MDSS_M 0x04 /* Data Sequence Number present if set */ -#define MDSS_m 0x08 /* 64-bit Data Sequence Number present if set */ -#define MDSS_F 0x10 /* Data FIN present */ +#define MDSS_A 0x01 /* Data ACK present if set */ +#define MDSS_a 0x02 /* 64-bit Data ACK present if set */ +#define MDSS_M 0x04 /* Data Sequence Number present if set */ +#define MDSS_m 0x08 /* 64-bit Data Sequence Number present if set */ +#define MDSS_F 0x10 /* Data FIN present */ /* DSS fields common to all DSS option variants */ struct mptcp_dss_copt { - u_int8_t mdss_kind; - u_int8_t mdss_len; + u_int8_t mdss_kind; + u_int8_t mdss_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mdss_reserved1:4, - mdss_subtype:4; + u_int8_t mdss_reserved1:4, + mdss_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mdss_subtype:4, - mdss_reserved1:4; + u_int8_t mdss_subtype:4, + mdss_reserved1:4; #endif - u_int8_t mdss_flags; + u_int8_t mdss_flags; }__attribute__((__packed__)); /* 32-bit DSS option */ struct mptcp_dsn_opt { - struct mptcp_dss_copt mdss_copt; - u_int32_t mdss_dsn; /* Data Sequence Number */ - u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ - u_int16_t mdss_data_len; /* Data Length */ + struct mptcp_dss_copt mdss_copt; + u_int32_t mdss_dsn; /* Data Sequence Number */ + u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ + u_int16_t mdss_data_len; /* Data Length */ /* u_int16_t mdss_xsum; */ /* Data checksum - optional */ - }__attribute__((__packed__)); /* 64-bit DSS option */ struct mptcp_dsn64_opt { struct mptcp_dss_copt mdss_copt; - u_int64_t mdss_dsn; /* Data Sequence Number */ - u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ - u_int16_t mdss_data_len; /* Data Length */ + u_int64_t mdss_dsn; /* Data Sequence Number */ + u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ + u_int16_t mdss_data_len; /* Data Length */ /* u_int16_t mdss_xsum; */ /* Data checksum - optional */ }__attribute__((__packed__)); /* 32-bit DSS Data ACK option */ struct mptcp_data_ack_opt { struct mptcp_dss_copt mdss_copt; - u_int32_t mdss_ack; + u_int32_t mdss_ack; }__attribute__((__packed__)); /* 64-bit DSS Data ACK option */ struct mptcp_data_ack64_opt { struct mptcp_dss_copt mdss_copt; - u_int64_t mdss_ack; + u_int64_t mdss_ack; }__attribute__((__packed__)); /* 32-bit DSS+Data ACK option */ struct mptcp_dss_ack_opt { struct mptcp_dss_copt mdss_copt; - u_int32_t mdss_ack; /* Data ACK */ - u_int32_t mdss_dsn; /* Data Sequence Number */ - u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ - u_int16_t mdss_data_len; /* Data Length */ + u_int32_t mdss_ack; /* Data ACK */ + u_int32_t mdss_dsn; /* Data Sequence Number */ + u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ + u_int16_t mdss_data_len; /* Data Length */ /* u_int16_t mdss_xsum; */ /* Data checksum - optional */ }__attribute__((__packed__)); /* 64-bit DSS+Data ACK option */ struct mptcp_dss64_ack64_opt { struct mptcp_dss_copt mdss_copt; - u_int64_t mdss_ack; /* Data ACK */ - u_int64_t mdss_dsn; /* Data Sequence Number */ - u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ - u_int16_t mdss_data_len; /* Data Length */ + u_int64_t mdss_ack; /* Data ACK */ + u_int64_t mdss_dsn; /* Data Sequence Number */ + u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ + u_int16_t mdss_data_len; /* Data Length */ /* u_int16_t mdss_xsum; */ /* Data checksum - optional */ }__attribute__((__packed__)); /* DSS+Data ACK mixed option variants */ struct mptcp_dss32_ack64_opt { struct mptcp_dss_copt mdss_copt; - u_int64_t mdss_ack; /* Data ACK */ - u_int32_t mdss_dsn; /* Data Sequence Number */ - u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ - u_int16_t mdss_data_len; /* Data Length */ + u_int64_t mdss_ack; /* Data ACK */ + u_int32_t mdss_dsn; /* Data Sequence Number */ + u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ + u_int16_t mdss_data_len; /* Data Length */ /* u_int16_t mdss_xsum; */ /* Data checksum - optional */ }__attribute__((__packed__)); struct mptcp_dss64_ack32_opt { struct mptcp_dss_copt mdss_copt; - u_int32_t mdss_ack; /* Data ACK */ - u_int64_t mdss_dsn; /* Data Sequence Number */ - u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ - u_int16_t mdss_data_len; /* Data Length */ + u_int32_t mdss_ack; /* Data ACK */ + u_int64_t mdss_dsn; /* Data Sequence Number */ + u_int32_t mdss_subflow_seqn; /* Relative Subflow Seq Num */ + u_int16_t mdss_data_len; /* Data Length */ /* u_int16_t mdss_xsum; */ /* Data checksum - optional */ }__attribute__((__packed__)); @@ -296,17 +295,17 @@ struct mptcp_dss64_ack32_opt { * API is supported. */ struct mptcp_fastclose_opt { - u_int8_t mfast_kind; - u_int8_t mfast_len; + u_int8_t mfast_kind; + u_int8_t mfast_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mfast_reserved:4, - mfast_subtype:4; + u_int8_t mfast_reserved:4, + mfast_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mfast_subtype:4, - mfast_reserved:4; + u_int8_t mfast_subtype:4, + mfast_reserved:4; #endif - u_int8_t mfast_reserved1; - u_int64_t mfast_key; /* Option receiver's key */ + u_int8_t mfast_reserved1; + u_int64_t mfast_key; /* Option receiver's key */ }__attribute__((__packed__)); /* @@ -317,17 +316,17 @@ struct mptcp_fastclose_opt { * option. */ struct mptcp_mpfail_opt { - u_int8_t mfail_kind; - u_int8_t mfail_len; + u_int8_t mfail_kind; + u_int8_t mfail_len; #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mfail_reserved:4, - mfail_subtype:4; + u_int8_t mfail_reserved:4, + mfail_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mfail_subtype:4, - mfail_reserved:4; + u_int8_t mfail_subtype:4, + mfail_reserved:4; #endif - u_int8_t mfail_reserved1:8; - u_int64_t mfail_dsn; + u_int8_t mfail_reserved1:8; + u_int64_t mfail_dsn; }__attribute__((__packed__)); @@ -341,31 +340,31 @@ struct mptcp_mpfail_opt { /* Option to change priority of self */ struct mptcp_mpprio_opt { - u_int8_t mpprio_kind; - u_int8_t mpprio_len; -#define MPTCP_MPPRIO_BKP 0x1 + u_int8_t mpprio_kind; + u_int8_t mpprio_len; +#define MPTCP_MPPRIO_BKP 0x1 #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mpprio_flags:4, - mpprio_subtype:4; + u_int8_t mpprio_flags:4, + mpprio_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mpprio_subtype:4, - mpprio_flags:4; + u_int8_t mpprio_subtype:4, + mpprio_flags:4; #endif }__attribute__((__packed__)); /* Option to change priority of some other subflow(s) using addr_id */ struct mptcp_mpprio_addr_opt { - u_int8_t mpprio_kind; - u_int8_t mpprio_len; -#define MPTCP_MPPRIO_BKP 0x1 + u_int8_t mpprio_kind; + u_int8_t mpprio_len; +#define MPTCP_MPPRIO_BKP 0x1 #if BYTE_ORDER == LITTLE_ENDIAN - u_int8_t mpprio_flags:4, - mpprio_subtype:4; + u_int8_t mpprio_flags:4, + mpprio_subtype:4; #else /* BIG_ENDIAN */ - u_int8_t mpprio_subtype:4, - mpprio_flags:4; + u_int8_t mpprio_subtype:4, + mpprio_flags:4; #endif - u_int8_t mpprio_addrid; + u_int8_t mpprio_addrid; }__attribute__((__packed__)); /* @@ -373,10 +372,10 @@ struct mptcp_mpprio_addr_opt { * */ struct mptcp_pseudohdr { - u_int64_t mphdr_dsn; /* Data Sequence Number */ - u_int32_t mphdr_ssn; /* Subflow Sequence Number */ - u_int16_t mphdr_len; /* Data-Level Length */ - u_int16_t mphdr_xsum; /* MPTCP Level Checksum */ + u_int64_t mphdr_dsn; /* Data Sequence Number */ + u_int32_t mphdr_ssn; /* Subflow Sequence Number */ + u_int16_t mphdr_len; /* Data-Level Length */ + u_int16_t mphdr_xsum; /* MPTCP Level Checksum */ }__attribute__((__packed__)); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet/mptcp_opt.c b/bsd/netinet/mptcp_opt.c index 13a205586..377f0d567 100644 --- a/bsd/netinet/mptcp_opt.c +++ b/bsd/netinet/mptcp_opt.c @@ -78,31 +78,32 @@ mptcp_setup_first_subflow_syn_opts(struct socket *so, u_char *opt, unsigned optl mp_tp->mpt_flags |= MPTCPF_HEURISTIC_TRAC; tcp_heuristic_mptcp_loss(tp); } - return (optlen); + return optlen; } if (!tcp_heuristic_do_mptcp(tp)) { mp_tp->mpt_flags |= MPTCPF_FALLBACK_HEURISTIC; - return (optlen); + return optlen; } - bzero(&mptcp_opt, sizeof (struct mptcp_mpcapable_opt_common)); + bzero(&mptcp_opt, sizeof(struct mptcp_mpcapable_opt_common)); mptcp_opt.mmco_kind = TCPOPT_MULTIPATH; mptcp_opt.mmco_len = - sizeof (struct mptcp_mpcapable_opt_common) + - sizeof (mptcp_key_t); + sizeof(struct mptcp_mpcapable_opt_common) + + sizeof(mptcp_key_t); mptcp_opt.mmco_subtype = MPO_CAPABLE; mptcp_opt.mmco_version = mp_tp->mpt_version; mptcp_opt.mmco_flags |= MPCAP_PROPOSAL_SBIT; - if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) + if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) { mptcp_opt.mmco_flags |= MPCAP_CHECKSUM_CBIT; - memcpy(opt + optlen, &mptcp_opt, sizeof (struct mptcp_mpcapable_opt_common)); - optlen += sizeof (struct mptcp_mpcapable_opt_common); - memcpy(opt + optlen, &mp_tp->mpt_localkey, sizeof (mptcp_key_t)); - optlen += sizeof (mptcp_key_t); + } + memcpy(opt + optlen, &mptcp_opt, sizeof(struct mptcp_mpcapable_opt_common)); + optlen += sizeof(struct mptcp_mpcapable_opt_common); + memcpy(opt + optlen, &mp_tp->mpt_localkey, sizeof(mptcp_key_t)); + optlen += sizeof(mptcp_key_t); - return (optlen); + return optlen; } static unsigned @@ -113,27 +114,29 @@ mptcp_setup_join_subflow_syn_opts(struct socket *so, u_char *opt, unsigned optle struct tcpcb *tp = NULL; struct mptsub *mpts; - if (!inp) - return (optlen); + if (!inp) { + return optlen; + } tp = intotcpcb(inp); - if (!tp) - return (optlen); + if (!tp) { + return optlen; + } mpts = tp->t_mpsub; VERIFY(tptomptp(tp)); mpte_lock_assert_held(tptomptp(tp)->mpt_mpte); - bzero(&mpjoin_req, sizeof (mpjoin_req)); + bzero(&mpjoin_req, sizeof(mpjoin_req)); mpjoin_req.mmjo_kind = TCPOPT_MULTIPATH; - mpjoin_req.mmjo_len = sizeof (mpjoin_req); + mpjoin_req.mmjo_len = sizeof(mpjoin_req); mpjoin_req.mmjo_subtype_bkp = MPO_JOIN << 4; if (tp->t_mpflags & TMPF_BACKUP_PATH) { mpjoin_req.mmjo_subtype_bkp |= MPTCP_BACKUP; } else if (inp->inp_boundifp && IFNET_IS_CELLULAR(inp->inp_boundifp) && - mpts->mpts_mpte->mpte_svctype != MPTCP_SVCTYPE_AGGREGATE) { + mpts->mpts_mpte->mpte_svctype != MPTCP_SVCTYPE_AGGREGATE) { mpjoin_req.mmjo_subtype_bkp |= MPTCP_BACKUP; tp->t_mpflags |= TMPF_BACKUP_PATH; } else { @@ -144,14 +147,14 @@ mptcp_setup_join_subflow_syn_opts(struct socket *so, u_char *opt, unsigned optle mpjoin_req.mmjo_peer_token = tptomptp(tp)->mpt_remotetoken; if (mpjoin_req.mmjo_peer_token == 0) { mptcplog((LOG_DEBUG, "%s: peer token 0", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); } mptcp_get_rands(tp->t_local_aid, tptomptp(tp), &mpjoin_req.mmjo_rand, NULL); memcpy(opt + optlen, &mpjoin_req, mpjoin_req.mmjo_len); optlen += mpjoin_req.mmjo_len; - return (optlen); + return optlen; } unsigned @@ -160,20 +163,20 @@ mptcp_setup_join_ack_opts(struct tcpcb *tp, u_char *opt, unsigned optlen) unsigned new_optlen; struct mptcp_mpjoin_opt_rsp2 join_rsp2; - if ((MAX_TCPOPTLEN - optlen) < sizeof (struct mptcp_mpjoin_opt_rsp2)) { + if ((MAX_TCPOPTLEN - optlen) < sizeof(struct mptcp_mpjoin_opt_rsp2)) { printf("%s: no space left %d \n", __func__, optlen); - return (optlen); + return optlen; } - bzero(&join_rsp2, sizeof (struct mptcp_mpjoin_opt_rsp2)); + bzero(&join_rsp2, sizeof(struct mptcp_mpjoin_opt_rsp2)); join_rsp2.mmjo_kind = TCPOPT_MULTIPATH; - join_rsp2.mmjo_len = sizeof (struct mptcp_mpjoin_opt_rsp2); + join_rsp2.mmjo_len = sizeof(struct mptcp_mpjoin_opt_rsp2); join_rsp2.mmjo_subtype = MPO_JOIN; mptcp_get_hmac(tp->t_local_aid, tptomptp(tp), (u_char*)&join_rsp2.mmjo_mac); memcpy(opt + optlen, &join_rsp2, join_rsp2.mmjo_len); new_optlen = optlen + join_rsp2.mmjo_len; - return (new_optlen); + return new_optlen; } unsigned @@ -181,12 +184,13 @@ mptcp_setup_syn_opts(struct socket *so, u_char *opt, unsigned optlen) { unsigned new_optlen; - if (!(so->so_flags & SOF_MP_SEC_SUBFLOW)) + if (!(so->so_flags & SOF_MP_SEC_SUBFLOW)) { new_optlen = mptcp_setup_first_subflow_syn_opts(so, opt, optlen); - else + } else { new_optlen = mptcp_setup_join_subflow_syn_opts(so, opt, optlen); + } - return (new_optlen); + return new_optlen; } static int @@ -197,25 +201,25 @@ mptcp_send_mpfail(struct tcpcb *tp, u_char *opt, unsigned int optlen) struct mptcb *mp_tp = NULL; struct mptcp_mpfail_opt fail_opt; uint64_t dsn; - int len = sizeof (struct mptcp_mpfail_opt); + int len = sizeof(struct mptcp_mpfail_opt); mp_tp = tptomptp(tp); if (mp_tp == NULL) { tp->t_mpflags &= ~TMPF_SND_MPFAIL; - return (optlen); + return optlen; } mpte_lock_assert_held(mp_tp->mpt_mpte); /* if option space low give up */ - if ((MAX_TCPOPTLEN - optlen) < sizeof (struct mptcp_mpfail_opt)) { + if ((MAX_TCPOPTLEN - optlen) < sizeof(struct mptcp_mpfail_opt)) { tp->t_mpflags &= ~TMPF_SND_MPFAIL; - return (optlen); + return optlen; } dsn = mp_tp->mpt_rcvnxt; - bzero(&fail_opt, sizeof (fail_opt)); + bzero(&fail_opt, sizeof(fail_opt)); fail_opt.mfail_kind = TCPOPT_MULTIPATH; fail_opt.mfail_len = len; fail_opt.mfail_subtype = MPO_FAIL; @@ -226,7 +230,7 @@ mptcp_send_mpfail(struct tcpcb *tp, u_char *opt, unsigned int optlen) mptcplog((LOG_DEBUG, "%s: %d \n", __func__, tp->t_local_aid), (MPTCP_SOCKET_DBG | MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); - return (optlen); + return optlen; } static int @@ -234,27 +238,31 @@ mptcp_send_infinite_mapping(struct tcpcb *tp, u_char *opt, unsigned int optlen) { struct mptcp_dsn_opt infin_opt; struct mptcb *mp_tp = NULL; - size_t len = sizeof (struct mptcp_dsn_opt); + size_t len = sizeof(struct mptcp_dsn_opt); struct socket *so = tp->t_inpcb->inp_socket; int csum_len = 0; - if (!so) - return (optlen); + if (!so) { + return optlen; + } mp_tp = tptomptp(tp); - if (mp_tp == NULL) - return (optlen); + if (mp_tp == NULL) { + return optlen; + } mpte_lock_assert_held(mp_tp->mpt_mpte); - if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) + if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) { csum_len = 2; + } /* try later */ - if ((MAX_TCPOPTLEN - optlen) < (len + csum_len)) - return (optlen); + if ((MAX_TCPOPTLEN - optlen) < (len + csum_len)) { + return optlen; + } - bzero(&infin_opt, sizeof (infin_opt)); + bzero(&infin_opt, sizeof(infin_opt)); infin_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH; infin_opt.mdss_copt.mdss_len = len + csum_len; infin_opt.mdss_copt.mdss_subtype = MPO_DSS; @@ -285,7 +293,7 @@ mptcp_send_infinite_mapping(struct tcpcb *tp, u_char *opt, unsigned int optlen) } if ((infin_opt.mdss_dsn == 0) || (infin_opt.mdss_subflow_seqn == 0)) { - return (optlen); + return optlen; } infin_opt.mdss_dsn = htonl(infin_opt.mdss_dsn); infin_opt.mdss_subflow_seqn = htonl(infin_opt.mdss_subflow_seqn); @@ -309,7 +317,7 @@ mptcp_send_infinite_mapping(struct tcpcb *tp, u_char *opt, unsigned int optlen) tp->t_mpflags |= TMPF_INFIN_SENT; tcpstat.tcps_estab_fallback++; - return (optlen); + return optlen; } @@ -321,16 +329,17 @@ mptcp_ok_to_fin(struct tcpcb *tp, u_int64_t dsn, u_int32_t datalen) mpte_lock_assert_held(mp_tp->mpt_mpte); dsn = (mp_tp->mpt_sndmax & MPTCP_DATASEQ_LOW32_MASK) | dsn; - if ((dsn + datalen) == mp_tp->mpt_sndmax) - return (1); + if ((dsn + datalen) == mp_tp->mpt_sndmax) { + return 1; + } - return (0); + return 0; } unsigned int mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, - unsigned int optlen, int flags, int len, - boolean_t *p_mptcp_acknow) + unsigned int optlen, int flags, int len, + boolean_t *p_mptcp_acknow) { struct inpcb *inp = (struct inpcb *)tp->t_inpcb; struct socket *so = inp->inp_socket; @@ -357,7 +366,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } if ((MAX_TCPOPTLEN - optlen) < - sizeof (struct mptcp_mpcapable_opt_common)) { + sizeof(struct mptcp_mpcapable_opt_common)) { mptcplog((LOG_ERR, "%s: no space left %d flags %x tp->t_mpflags %x len %d\n", __func__, optlen, flags, tp->t_mpflags, len), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); @@ -365,28 +374,31 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } if (tp->t_mpflags & TMPF_TCP_FALLBACK) { - if (tp->t_mpflags & TMPF_SND_MPFAIL) + if (tp->t_mpflags & TMPF_SND_MPFAIL) { optlen = mptcp_send_mpfail(tp, opt, optlen); - else if (!(tp->t_mpflags & TMPF_INFIN_SENT)) + } else if (!(tp->t_mpflags & TMPF_INFIN_SENT)) { optlen = mptcp_send_infinite_mapping(tp, opt, optlen); + } goto ret_optlen; } if (tp->t_mpflags & TMPF_SND_KEYS) { struct mptcp_mpcapable_opt_rsp1 mptcp_opt; if ((MAX_TCPOPTLEN - optlen) < - sizeof (struct mptcp_mpcapable_opt_rsp1)) + sizeof(struct mptcp_mpcapable_opt_rsp1)) { goto ret_optlen; - bzero(&mptcp_opt, sizeof (struct mptcp_mpcapable_opt_rsp1)); + } + bzero(&mptcp_opt, sizeof(struct mptcp_mpcapable_opt_rsp1)); mptcp_opt.mmc_common.mmco_kind = TCPOPT_MULTIPATH; mptcp_opt.mmc_common.mmco_len = - sizeof (struct mptcp_mpcapable_opt_rsp1); + sizeof(struct mptcp_mpcapable_opt_rsp1); mptcp_opt.mmc_common.mmco_subtype = MPO_CAPABLE; mptcp_opt.mmc_common.mmco_version = mp_tp->mpt_version; /* HMAC-SHA1 is the proposal */ mptcp_opt.mmc_common.mmco_flags |= MPCAP_PROPOSAL_SBIT; - if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) + if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) { mptcp_opt.mmc_common.mmco_flags |= MPCAP_CHECKSUM_CBIT; + } mptcp_opt.mmc_localkey = mp_tp->mpt_localkey; mptcp_opt.mmc_remotekey = mp_tp->mpt_remotekey; memcpy(opt + optlen, &mptcp_opt, mptcp_opt.mmc_common.mmco_len); @@ -409,14 +421,15 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, } /* Start a timer to retransmit the ACK */ tp->t_timer[TCPT_JACK_RXMT] = - OFFSET_FROM_START(tp, tcp_jack_rxmt); + OFFSET_FROM_START(tp, tcp_jack_rxmt); tp->t_mpflags &= ~TMPF_SND_JACK; goto ret_optlen; } - if (!(tp->t_mpflags & TMPF_MPTCP_TRUE)) + if (!(tp->t_mpflags & TMPF_MPTCP_TRUE)) { goto ret_optlen; + } /* * From here on, all options are sent only if MPTCP_TRUE * or when data is sent early on as in Fast Join @@ -424,7 +437,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, if ((tp->t_mpflags & TMPF_MPTCP_TRUE) && (tp->t_mpflags & TMPF_SND_REM_ADDR)) { - int rem_opt_len = sizeof (struct mptcp_remaddr_opt); + int rem_opt_len = sizeof(struct mptcp_remaddr_opt); if ((optlen + rem_opt_len) <= MAX_TCPOPTLEN) { mptcp_send_remaddr_opt(tp, (struct mptcp_remaddr_opt *)(opt + optlen)); @@ -441,39 +454,40 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, if (mp_tp->mpt_flags & MPTCPF_SND_64BITDSN) { send_64bit_dsn = TRUE; } - if (mp_tp->mpt_flags & MPTCPF_SND_64BITACK) + if (mp_tp->mpt_flags & MPTCPF_SND_64BITACK) { send_64bit_ack = TRUE; + } -#define CHECK_OPTLEN { \ - if ((MAX_TCPOPTLEN - optlen) < dssoptlen) { \ - mptcplog((LOG_ERR, "%s: dssoptlen %d optlen %d \n", __func__, \ - dssoptlen, optlen), \ - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); \ - goto ret_optlen; \ - } \ +#define CHECK_OPTLEN { \ + if ((MAX_TCPOPTLEN - optlen) < dssoptlen) { \ + mptcplog((LOG_ERR, "%s: dssoptlen %d optlen %d \n", __func__, \ + dssoptlen, optlen), \ + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); \ + goto ret_optlen; \ + } \ } -#define DO_FIN(dsn_opt) { \ - int sndfin = 0; \ - sndfin = mptcp_ok_to_fin(tp, dsn_opt.mdss_dsn, len); \ - if (sndfin) { \ - dsn_opt.mdss_copt.mdss_flags |= MDSS_F; \ - dsn_opt.mdss_data_len += 1; \ - if (do_csum) \ - dss_csum = in_addword(dss_csum, 1); \ - } \ +#define DO_FIN(dsn_opt) { \ + int sndfin = 0; \ + sndfin = mptcp_ok_to_fin(tp, dsn_opt.mdss_dsn, len); \ + if (sndfin) { \ + dsn_opt.mdss_copt.mdss_flags |= MDSS_F; \ + dsn_opt.mdss_data_len += 1; \ + if (do_csum) \ + dss_csum = in_addword(dss_csum, 1); \ + } \ } -#define CHECK_DATALEN { \ - /* MPTCP socket does not support IP options */ \ - if ((len + optlen + dssoptlen) > tp->t_maxopd) { \ - mptcplog((LOG_ERR, "%s: nosp %d len %d opt %d %d %d\n", \ - __func__, len, dssoptlen, optlen, \ - tp->t_maxseg, tp->t_maxopd), \ - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); \ - /* remove option length from payload len */ \ - len = tp->t_maxopd - optlen - dssoptlen; \ - } \ +#define CHECK_DATALEN { \ + /* MPTCP socket does not support IP options */ \ + if ((len + optlen + dssoptlen) > tp->t_maxopd) { \ + mptcplog((LOG_ERR, "%s: nosp %d len %d opt %d %d %d\n", \ + __func__, len, dssoptlen, optlen, \ + tp->t_maxseg, tp->t_maxopd), \ + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); \ + /* remove option length from payload len */ \ + len = tp->t_maxopd - optlen - dssoptlen; \ + } \ } if ((tp->t_mpflags & TMPF_SEND_DSN) && @@ -488,7 +502,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, * XXX If this delay causes issue, remove the 2-byte padding. */ struct mptcp_dss64_ack32_opt dsn_ack_opt; - unsigned int dssoptlen = sizeof (dsn_ack_opt); + unsigned int dssoptlen = sizeof(dsn_ack_opt); uint16_t dss_csum; if (do_csum) { @@ -497,7 +511,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, CHECK_OPTLEN; - bzero(&dsn_ack_opt, sizeof (dsn_ack_opt)); + bzero(&dsn_ack_opt, sizeof(dsn_ack_opt)); dsn_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH; dsn_ack_opt.mdss_copt.mdss_subtype = MPO_DSS; dsn_ack_opt.mdss_copt.mdss_len = dssoptlen; @@ -507,10 +521,10 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, CHECK_DATALEN; mptcp_output_getm_dsnmap64(so, off, - &dsn_ack_opt.mdss_dsn, - &dsn_ack_opt.mdss_subflow_seqn, - &dsn_ack_opt.mdss_data_len, - &dss_csum); + &dsn_ack_opt.mdss_dsn, + &dsn_ack_opt.mdss_subflow_seqn, + &dsn_ack_opt.mdss_data_len, + &dss_csum); if ((dsn_ack_opt.mdss_data_len == 0) || (dsn_ack_opt.mdss_dsn == 0)) { @@ -526,16 +540,17 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, dsn_ack_opt.mdss_dsn = mptcp_hton64(dsn_ack_opt.mdss_dsn); dsn_ack_opt.mdss_subflow_seqn = htonl( - dsn_ack_opt.mdss_subflow_seqn); + dsn_ack_opt.mdss_subflow_seqn); dsn_ack_opt.mdss_data_len = htons( - dsn_ack_opt.mdss_data_len); + dsn_ack_opt.mdss_data_len); - memcpy(opt + optlen, &dsn_ack_opt, sizeof (dsn_ack_opt)); - if (do_csum) - *((uint16_t *)(void *)(opt + optlen + sizeof (dsn_ack_opt))) = dss_csum; + memcpy(opt + optlen, &dsn_ack_opt, sizeof(dsn_ack_opt)); + if (do_csum) { + *((uint16_t *)(void *)(opt + optlen + sizeof(dsn_ack_opt))) = dss_csum; + } optlen += dssoptlen; - mptcplog((LOG_DEBUG,"%s: long DSS = %llx ACK = %llx \n", __func__, + mptcplog((LOG_DEBUG, "%s: long DSS = %llx ACK = %llx \n", __func__, mptcp_ntoh64(dsn_ack_opt.mdss_dsn), mptcp_ntoh64(dsn_ack_opt.mdss_ack)), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); @@ -546,9 +561,9 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, if ((tp->t_mpflags & TMPF_SEND_DSN) && (!send_64bit_dsn) && - !(tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { + !(tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { struct mptcp_dsn_opt dsn_opt; - unsigned int dssoptlen = sizeof (struct mptcp_dsn_opt); + unsigned int dssoptlen = sizeof(struct mptcp_dsn_opt); uint16_t dss_csum; if (do_csum) { @@ -557,7 +572,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, CHECK_OPTLEN; - bzero(&dsn_opt, sizeof (dsn_opt)); + bzero(&dsn_opt, sizeof(dsn_opt)); dsn_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH; dsn_opt.mdss_copt.mdss_subtype = MPO_DSS; dsn_opt.mdss_copt.mdss_len = dssoptlen; @@ -566,9 +581,9 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, CHECK_DATALEN; mptcp_output_getm_dsnmap32(so, off, &dsn_opt.mdss_dsn, - &dsn_opt.mdss_subflow_seqn, - &dsn_opt.mdss_data_len, - &dss_csum); + &dsn_opt.mdss_subflow_seqn, + &dsn_opt.mdss_data_len, + &dss_csum); if ((dsn_opt.mdss_data_len == 0) || (dsn_opt.mdss_dsn == 0)) { @@ -582,9 +597,10 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, dsn_opt.mdss_dsn = htonl(dsn_opt.mdss_dsn); dsn_opt.mdss_subflow_seqn = htonl(dsn_opt.mdss_subflow_seqn); dsn_opt.mdss_data_len = htons(dsn_opt.mdss_data_len); - memcpy(opt + optlen, &dsn_opt, sizeof (dsn_opt)); - if (do_csum) - *((uint16_t *)(void *)(opt + optlen + sizeof (dsn_opt))) = dss_csum; + memcpy(opt + optlen, &dsn_opt, sizeof(dsn_opt)); + if (do_csum) { + *((uint16_t *)(void *)(opt + optlen + sizeof(dsn_opt))) = dss_csum; + } optlen += dssoptlen; tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW; @@ -596,11 +612,10 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, (!send_64bit_ack) && !(tp->t_mpflags & TMPF_SEND_DSN) && !(tp->t_mpflags & TMPF_SEND_DFIN)) { - struct mptcp_data_ack_opt dack_opt; unsigned int dssoptlen = 0; do_ack32_only: - dssoptlen = sizeof (dack_opt); + dssoptlen = sizeof(dack_opt); CHECK_OPTLEN; @@ -626,7 +641,7 @@ do_ack32_only: struct mptcp_data_ack64_opt dack_opt; unsigned int dssoptlen = 0; do_ack64_only: - dssoptlen = sizeof (dack_opt); + dssoptlen = sizeof(dack_opt); CHECK_OPTLEN; @@ -654,15 +669,16 @@ do_ack64_only: (!send_64bit_ack) && (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { struct mptcp_dss_ack_opt dss_ack_opt; - unsigned int dssoptlen = sizeof (dss_ack_opt); + unsigned int dssoptlen = sizeof(dss_ack_opt); uint16_t dss_csum; - if (do_csum) + if (do_csum) { dssoptlen += 2; + } CHECK_OPTLEN; - bzero(&dss_ack_opt, sizeof (dss_ack_opt)); + bzero(&dss_ack_opt, sizeof(dss_ack_opt)); dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH; dss_ack_opt.mdss_copt.mdss_len = dssoptlen; dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS; @@ -673,9 +689,9 @@ do_ack64_only: CHECK_DATALEN; mptcp_output_getm_dsnmap32(so, off, &dss_ack_opt.mdss_dsn, - &dss_ack_opt.mdss_subflow_seqn, - &dss_ack_opt.mdss_data_len, - &dss_csum); + &dss_ack_opt.mdss_subflow_seqn, + &dss_ack_opt.mdss_data_len, + &dss_csum); if ((dss_ack_opt.mdss_data_len == 0) || (dss_ack_opt.mdss_dsn == 0)) { @@ -690,14 +706,16 @@ do_ack64_only: dss_ack_opt.mdss_subflow_seqn = htonl(dss_ack_opt.mdss_subflow_seqn); dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len); - memcpy(opt + optlen, &dss_ack_opt, sizeof (dss_ack_opt)); - if (do_csum) - *((uint16_t *)(void *)(opt + optlen + sizeof (dss_ack_opt))) = dss_csum; + memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt)); + if (do_csum) { + *((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum; + } optlen += dssoptlen; - if (optlen > MAX_TCPOPTLEN) + if (optlen > MAX_TCPOPTLEN) { panic("optlen too large"); + } tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW; goto ret_optlen; } @@ -708,15 +726,16 @@ do_ack64_only: (send_64bit_ack) && (tp->t_mpflags & TMPF_MPTCP_ACKNOW)) { struct mptcp_dss32_ack64_opt dss_ack_opt; - unsigned int dssoptlen = sizeof (dss_ack_opt); + unsigned int dssoptlen = sizeof(dss_ack_opt); uint16_t dss_csum; - if (do_csum) + if (do_csum) { dssoptlen += 2; + } CHECK_OPTLEN; - bzero(&dss_ack_opt, sizeof (dss_ack_opt)); + bzero(&dss_ack_opt, sizeof(dss_ack_opt)); dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH; dss_ack_opt.mdss_copt.mdss_len = dssoptlen; dss_ack_opt.mdss_copt.mdss_subtype = MPO_DSS; @@ -727,9 +746,9 @@ do_ack64_only: CHECK_DATALEN; mptcp_output_getm_dsnmap32(so, off, &dss_ack_opt.mdss_dsn, - &dss_ack_opt.mdss_subflow_seqn, - &dss_ack_opt.mdss_data_len, - &dss_csum); + &dss_ack_opt.mdss_subflow_seqn, + &dss_ack_opt.mdss_data_len, + &dss_csum); if ((dss_ack_opt.mdss_data_len == 0) || (dss_ack_opt.mdss_dsn == 0)) { @@ -744,14 +763,16 @@ do_ack64_only: dss_ack_opt.mdss_subflow_seqn = htonl(dss_ack_opt.mdss_subflow_seqn); dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len); - memcpy(opt + optlen, &dss_ack_opt, sizeof (dss_ack_opt)); - if (do_csum) - *((uint16_t *)(void *)(opt + optlen + sizeof (dss_ack_opt))) = dss_csum; + memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt)); + if (do_csum) { + *((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum; + } optlen += dssoptlen; - if (optlen > MAX_TCPOPTLEN) + if (optlen > MAX_TCPOPTLEN) { panic("optlen too large"); + } tp->t_mpflags &= ~TMPF_MPTCP_ACKNOW; goto ret_optlen; } @@ -777,15 +798,16 @@ do_ack64_only: CHECK_OPTLEN; - bzero(&dss_ack_opt, sizeof (dss_ack_opt)); + bzero(&dss_ack_opt, sizeof(dss_ack_opt)); /* * Data FIN occupies one sequence space. * Don't send it if it has been Acked. */ if ((mp_tp->mpt_sndnxt + 1 != mp_tp->mpt_sndmax) || - (mp_tp->mpt_snduna == mp_tp->mpt_sndmax)) + (mp_tp->mpt_snduna == mp_tp->mpt_sndmax)) { goto ret_optlen; + } dss_ack_opt.mdss_copt.mdss_kind = TCPOPT_MULTIPATH; dss_ack_opt.mdss_copt.mdss_len = dssoptlen; @@ -798,15 +820,16 @@ do_ack64_only: dss_ack_opt.mdss_subflow_seqn = 0; dss_ack_opt.mdss_data_len = 1; dss_ack_opt.mdss_data_len = htons(dss_ack_opt.mdss_data_len); - memcpy(opt + optlen, &dss_ack_opt, sizeof (dss_ack_opt)); - if (do_csum) - *((uint16_t *)(void *)(opt + optlen + sizeof (dss_ack_opt))) = dss_csum; + memcpy(opt + optlen, &dss_ack_opt, sizeof(dss_ack_opt)); + if (do_csum) { + *((uint16_t *)(void *)(opt + optlen + sizeof(dss_ack_opt))) = dss_csum; + } optlen += dssoptlen; } ret_optlen: - if (TRUE == *p_mptcp_acknow ) { + if (TRUE == *p_mptcp_acknow) { VERIFY(old_mpt_flags != 0); u_int32_t new_mpt_flags = tp->t_mpflags & TMPF_MPTCP_SIGNALS; @@ -829,7 +852,7 @@ ret_optlen: tp->t_mpflags &= ~TMPF_MPTCP_SIGNALS; *p_mptcp_acknow = FALSE; mptcplog((LOG_DEBUG, "%s: no action \n", __func__), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); } else { mptcplog((LOG_DEBUG, "%s: acknow set, old flags %x new flags %x \n", __func__, old_mpt_flags, new_mpt_flags), @@ -853,30 +876,31 @@ mptcp_sanitize_option(struct tcpcb *tp, int mptcp_subtype) if (mp_tp == NULL) { mptcplog((LOG_ERR, "%s: NULL mpsocket \n", __func__), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (0); + return 0; } switch (mptcp_subtype) { - case MPO_CAPABLE: - break; - case MPO_JOIN: /* fall through */ - case MPO_DSS: /* fall through */ - case MPO_FASTCLOSE: /* fall through */ - case MPO_FAIL: /* fall through */ - case MPO_REMOVE_ADDR: /* fall through */ - case MPO_ADD_ADDR: /* fall through */ - case MPO_PRIO: /* fall through */ - if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) - ret = 0; - break; - default: + case MPO_CAPABLE: + break; + case MPO_JOIN: /* fall through */ + case MPO_DSS: /* fall through */ + case MPO_FASTCLOSE: /* fall through */ + case MPO_FAIL: /* fall through */ + case MPO_REMOVE_ADDR: /* fall through */ + case MPO_ADD_ADDR: /* fall through */ + case MPO_PRIO: /* fall through */ + if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) { ret = 0; - mptcplog((LOG_ERR, "%s: type = %d \n", __func__, - mptcp_subtype), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - break; + } + break; + default: + ret = 0; + mptcplog((LOG_ERR, "%s: type = %d \n", __func__, + mptcp_subtype), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + break; } - return (ret); + return ret; } static int @@ -887,14 +911,16 @@ mptcp_valid_mpcapable_common_opt(u_char *cp) /* mmco_kind, mmco_len and mmco_subtype are validated before */ - if (!(rsp->mmco_flags & MPCAP_PROPOSAL_SBIT)) - return (0); + if (!(rsp->mmco_flags & MPCAP_PROPOSAL_SBIT)) { + return 0; + } if (rsp->mmco_flags & (MPCAP_BBIT | MPCAP_CBIT | MPCAP_DBIT | - MPCAP_EBIT | MPCAP_FBIT | MPCAP_GBIT)) - return (0); + MPCAP_EBIT | MPCAP_FBIT | MPCAP_GBIT)) { + return 0; + } - return (1); + return 1; } @@ -908,8 +934,9 @@ mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, mpte_lock_assert_held(mp_tp->mpt_mpte); /* Only valid on SYN/ACK */ - if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) + if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) { return; + } /* Validate the kind, len, flags */ if (mptcp_valid_mpcapable_common_opt(cp) != 1) { @@ -918,15 +945,16 @@ mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, } /* handle SYN/ACK retransmission by acknowledging with ACK */ - if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) + if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) { return; + } /* A SYN/ACK contains peer's key and flags */ - if (optlen != sizeof (struct mptcp_mpcapable_opt_rsp)) { + if (optlen != sizeof(struct mptcp_mpcapable_opt_rsp)) { /* complain */ mptcplog((LOG_ERR, "%s: SYN_ACK optlen = %d, sizeof mp opt = %lu \n", __func__, optlen, - sizeof (struct mptcp_mpcapable_opt_rsp)), + sizeof(struct mptcp_mpcapable_opt_rsp)), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); tcpstat.tcps_invalid_mpcap++; return; @@ -937,8 +965,9 @@ mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, * it was not negotiated on the first SYN. */ if (((struct mptcp_mpcapable_opt_common *)cp)->mmco_flags & - MPCAP_CHECKSUM_CBIT) + MPCAP_CHECKSUM_CBIT) { mp_tp->mpt_flags |= MPTCPF_CHECKSUM; + } rsp = (struct mptcp_mpcapable_opt_rsp *)cp; mp_tp->mpt_remotekey = rsp->mmc_localkey; @@ -960,26 +989,27 @@ mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, static void mptcp_do_mpjoin_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, int optlen) { -#define MPTCP_JOPT_ERROR_PATH(tp) { \ - tp->t_mpflags |= TMPF_RESET; \ - tcpstat.tcps_invalid_joins++; \ - if (tp->t_inpcb->inp_socket != NULL) { \ - soevent(tp->t_inpcb->inp_socket, \ - SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); \ - } \ +#define MPTCP_JOPT_ERROR_PATH(tp) { \ + tp->t_mpflags |= TMPF_RESET; \ + tcpstat.tcps_invalid_joins++; \ + if (tp->t_inpcb->inp_socket != NULL) { \ + soevent(tp->t_inpcb->inp_socket, \ + SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); \ + } \ } int error = 0; struct mptcp_mpjoin_opt_rsp *join_rsp = (struct mptcp_mpjoin_opt_rsp *)cp; /* Only valid on SYN/ACK */ - if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) + if ((th->th_flags & (TH_SYN | TH_ACK)) != (TH_SYN | TH_ACK)) { return; + } - if (optlen != sizeof (struct mptcp_mpjoin_opt_rsp)) { + if (optlen != sizeof(struct mptcp_mpjoin_opt_rsp)) { mptcplog((LOG_ERR, "%s: SYN_ACK: unexpected optlen = %d mp " "option = %lu\n", __func__, optlen, - sizeof (struct mptcp_mpjoin_opt_rsp)), + sizeof(struct mptcp_mpjoin_opt_rsp)), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); tp->t_mpflags &= ~TMPF_PREESTABLISHED; /* send RST and close */ @@ -1014,19 +1044,20 @@ mptcp_validate_join_hmac(struct tcpcb *tp, u_char* hmac, int mac_len) rem_rand = loc_rand = 0; mptcp_get_rands(tp->t_local_aid, mp_tp, &loc_rand, &rem_rand); - if ((rem_rand == 0) || (loc_rand == 0)) - return (-1); + if ((rem_rand == 0) || (loc_rand == 0)) { + return -1; + } mptcp_hmac_sha1(mp_tp->mpt_remotekey, mp_tp->mpt_localkey, rem_rand, loc_rand, digest); - if (bcmp(digest, hmac, mac_len) == 0) - return (0); /* matches */ - else { + if (bcmp(digest, hmac, mac_len) == 0) { + return 0; /* matches */ + } else { printf("%s: remote key %llx local key %llx remote rand %x " "local rand %x \n", __func__, mp_tp->mpt_remotekey, mp_tp->mpt_localkey, rem_rand, loc_rand); - return (-1); + return -1; } } @@ -1044,12 +1075,13 @@ mptcp_data_ack_rcvd(struct mptcb *mp_tp, struct tcpcb *tp, u_int64_t full_dack) if (acked > mp_so->so_snd.sb_cc) { if (acked > mp_so->so_snd.sb_cc + 1 || - mp_tp->mpt_state < MPTCPS_FIN_WAIT_1) + mp_tp->mpt_state < MPTCPS_FIN_WAIT_1) { mptcplog((LOG_ERR, "%s: acked %u, sb_cc %u full %u suna %u state %u\n", - __func__, (uint32_t)acked, mp_so->so_snd.sb_cc, - (uint32_t)full_dack, (uint32_t)mp_tp->mpt_snduna, - mp_tp->mpt_state), - MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR); + __func__, (uint32_t)acked, mp_so->so_snd.sb_cc, + (uint32_t)full_dack, (uint32_t)mp_tp->mpt_snduna, + mp_tp->mpt_state), + MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR); + } sbdrop(&mp_so->so_snd, (int)mp_so->so_snd.sb_cc); } else { @@ -1100,8 +1132,8 @@ mptcp_update_window(struct mptcb *mp_tp, u_int64_t ack, u_int64_t seq, u_int32_t { if (SEQ_LT(mp_tp->mpt_sndwl1, seq) || (mp_tp->mpt_sndwl1 == seq && - (SEQ_LT(mp_tp->mpt_sndwl2, ack) || - (mp_tp->mpt_sndwl2 == ack && tiwin > mp_tp->mpt_sndwnd)))) { + (SEQ_LT(mp_tp->mpt_sndwl2, ack) || + (mp_tp->mpt_sndwl2 == ack && tiwin > mp_tp->mpt_sndwnd)))) { mp_tp->mpt_sndwnd = tiwin; mp_tp->mpt_sndwl1 = seq; mp_tp->mpt_sndwl2 = ack; @@ -1110,7 +1142,7 @@ mptcp_update_window(struct mptcb *mp_tp, u_int64_t ack, u_int64_t seq, u_int32_t static void mptcp_do_dss_opt_ack_meat(u_int64_t full_dack, u_int64_t full_dsn, - struct tcpcb *tp, u_int32_t tiwin) + struct tcpcb *tp, u_int32_t tiwin) { struct mptcb *mp_tp = tptomptp(tp); int close_notify = 0; @@ -1120,21 +1152,23 @@ mptcp_do_dss_opt_ack_meat(u_int64_t full_dack, u_int64_t full_dsn, if (MPTCP_SEQ_LEQ(full_dack, mp_tp->mpt_sndmax) && MPTCP_SEQ_GEQ(full_dack, mp_tp->mpt_snduna)) { mptcp_data_ack_rcvd(mp_tp, tp, full_dack); - if (mp_tp->mpt_state > MPTCPS_FIN_WAIT_2) + if (mp_tp->mpt_state > MPTCPS_FIN_WAIT_2) { close_notify = 1; + } if (mp_tp->mpt_flags & MPTCPF_RCVD_64BITACK) { mp_tp->mpt_flags &= ~MPTCPF_RCVD_64BITACK; mp_tp->mpt_flags &= ~MPTCPF_SND_64BITDSN; } mptcp_notify_mpready(tp->t_inpcb->inp_socket); - if (close_notify) + if (close_notify) { mptcp_notify_close(tp->t_inpcb->inp_socket); + } } else { os_log_error(mptcp_log_handle, - "%s: unexpected dack %u snduna %u sndmax %u\n", - __func__, (u_int32_t)full_dack, - (u_int32_t)mp_tp->mpt_snduna, - (u_int32_t)mp_tp->mpt_sndmax); + "%s: unexpected dack %u snduna %u sndmax %u\n", + __func__, (u_int32_t)full_dack, + (u_int32_t)mp_tp->mpt_snduna, + (u_int32_t)mp_tp->mpt_sndmax); } mptcp_update_window(mp_tp, full_dack, full_dsn, tiwin); @@ -1149,226 +1183,233 @@ mptcp_do_dss_opt_meat(u_char *cp, struct tcpcb *tp, struct tcphdr *th) struct mptcb *mp_tp = tptomptp(tp); int csum_len = 0; -#define MPTCP_DSS_OPT_SZ_CHK(len, expected_len) { \ - if (len != expected_len) { \ - mptcplog((LOG_ERR, "%s: bad len = %d dss: %x \n", __func__, \ - len, dss_rsp->mdss_flags), \ - (MPTCP_SOCKET_DBG|MPTCP_RECEIVER_DBG), \ - MPTCP_LOGLVL_LOG); \ - return; \ - } \ +#define MPTCP_DSS_OPT_SZ_CHK(len, expected_len) { \ + if (len != expected_len) { \ + mptcplog((LOG_ERR, "%s: bad len = %d dss: %x \n", __func__, \ + len, dss_rsp->mdss_flags), \ + (MPTCP_SOCKET_DBG|MPTCP_RECEIVER_DBG), \ + MPTCP_LOGLVL_LOG); \ + return; \ + } \ } - if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) + if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) { csum_len = 2; + } - dss_rsp->mdss_flags &= (MDSS_A|MDSS_a|MDSS_M|MDSS_m); + dss_rsp->mdss_flags &= (MDSS_A | MDSS_a | MDSS_M | MDSS_m); switch (dss_rsp->mdss_flags) { - case (MDSS_M): - { - /* 32-bit DSS, No Data ACK */ - struct mptcp_dsn_opt *dss_rsp1; - dss_rsp1 = (struct mptcp_dsn_opt *)cp; - - MPTCP_DSS_OPT_SZ_CHK(dss_rsp1->mdss_copt.mdss_len, - sizeof (struct mptcp_dsn_opt) + csum_len); - if (csum_len == 0) - mptcp_update_dss_rcv_state(dss_rsp1, tp, 0); - else - mptcp_update_dss_rcv_state(dss_rsp1, tp, - *(uint16_t *)(void *)(cp + - (dss_rsp1->mdss_copt.mdss_len - csum_len))); - break; - } - case (MDSS_A): - { - /* 32-bit Data ACK, no DSS */ - struct mptcp_data_ack_opt *dack_opt; - dack_opt = (struct mptcp_data_ack_opt *)cp; - - MPTCP_DSS_OPT_SZ_CHK(dack_opt->mdss_copt.mdss_len, - sizeof (struct mptcp_data_ack_opt)); - - u_int32_t dack = dack_opt->mdss_ack; - NTOHL(dack); - MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack); - mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin); - break; - } - case (MDSS_M | MDSS_A): - { - /* 32-bit Data ACK + 32-bit DSS */ - struct mptcp_dss_ack_opt *dss_ack_rsp; - dss_ack_rsp = (struct mptcp_dss_ack_opt *)cp; - u_int64_t full_dsn; - uint16_t csum = 0; - - MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len, - sizeof (struct mptcp_dss_ack_opt) + csum_len); - - u_int32_t dack = dss_ack_rsp->mdss_ack; - NTOHL(dack); - MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack); - - NTOHL(dss_ack_rsp->mdss_dsn); - NTOHL(dss_ack_rsp->mdss_subflow_seqn); - NTOHS(dss_ack_rsp->mdss_data_len); - MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, dss_ack_rsp->mdss_dsn, full_dsn); - - mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); - - if (csum_len != 0) - csum = *(uint16_t *)(void *)(cp + (dss_ack_rsp->mdss_copt.mdss_len - csum_len)); - - mptcp_update_rcv_state_meat(mp_tp, tp, - full_dsn, - dss_ack_rsp->mdss_subflow_seqn, - dss_ack_rsp->mdss_data_len, - csum); - break; + case (MDSS_M): + { + /* 32-bit DSS, No Data ACK */ + struct mptcp_dsn_opt *dss_rsp1; + dss_rsp1 = (struct mptcp_dsn_opt *)cp; + + MPTCP_DSS_OPT_SZ_CHK(dss_rsp1->mdss_copt.mdss_len, + sizeof(struct mptcp_dsn_opt) + csum_len); + if (csum_len == 0) { + mptcp_update_dss_rcv_state(dss_rsp1, tp, 0); + } else { + mptcp_update_dss_rcv_state(dss_rsp1, tp, + *(uint16_t *)(void *)(cp + + (dss_rsp1->mdss_copt.mdss_len - csum_len))); } - case (MDSS_M | MDSS_m): - { - /* 64-bit DSS , No Data ACK */ - struct mptcp_dsn64_opt *dsn64; - dsn64 = (struct mptcp_dsn64_opt *)cp; - u_int64_t full_dsn; - uint16_t csum = 0; + break; + } + case (MDSS_A): + { + /* 32-bit Data ACK, no DSS */ + struct mptcp_data_ack_opt *dack_opt; + dack_opt = (struct mptcp_data_ack_opt *)cp; + + MPTCP_DSS_OPT_SZ_CHK(dack_opt->mdss_copt.mdss_len, + sizeof(struct mptcp_data_ack_opt)); + + u_int32_t dack = dack_opt->mdss_ack; + NTOHL(dack); + MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack); + mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin); + break; + } + case (MDSS_M | MDSS_A): + { + /* 32-bit Data ACK + 32-bit DSS */ + struct mptcp_dss_ack_opt *dss_ack_rsp; + dss_ack_rsp = (struct mptcp_dss_ack_opt *)cp; + u_int64_t full_dsn; + uint16_t csum = 0; - MPTCP_DSS_OPT_SZ_CHK(dsn64->mdss_copt.mdss_len, - sizeof (struct mptcp_dsn64_opt) + csum_len); + MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len, + sizeof(struct mptcp_dss_ack_opt) + csum_len); - mp_tp->mpt_flags |= MPTCPF_SND_64BITACK; + u_int32_t dack = dss_ack_rsp->mdss_ack; + NTOHL(dack); + MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack); - full_dsn = mptcp_ntoh64(dsn64->mdss_dsn); - NTOHL(dsn64->mdss_subflow_seqn); - NTOHS(dsn64->mdss_data_len); + NTOHL(dss_ack_rsp->mdss_dsn); + NTOHL(dss_ack_rsp->mdss_subflow_seqn); + NTOHS(dss_ack_rsp->mdss_data_len); + MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, dss_ack_rsp->mdss_dsn, full_dsn); - if (csum_len != 0) - csum = *(uint16_t *)(void *)(cp + dsn64->mdss_copt.mdss_len - csum_len); + mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); - mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, - dsn64->mdss_subflow_seqn, - dsn64->mdss_data_len, - csum); - break; + if (csum_len != 0) { + csum = *(uint16_t *)(void *)(cp + (dss_ack_rsp->mdss_copt.mdss_len - csum_len)); } - case (MDSS_A | MDSS_a): - { - /* 64-bit Data ACK, no DSS */ - struct mptcp_data_ack64_opt *dack64; - dack64 = (struct mptcp_data_ack64_opt *)cp; - MPTCP_DSS_OPT_SZ_CHK(dack64->mdss_copt.mdss_len, - sizeof (struct mptcp_data_ack64_opt)); + mptcp_update_rcv_state_meat(mp_tp, tp, + full_dsn, + dss_ack_rsp->mdss_subflow_seqn, + dss_ack_rsp->mdss_data_len, + csum); + break; + } + case (MDSS_M | MDSS_m): + { + /* 64-bit DSS , No Data ACK */ + struct mptcp_dsn64_opt *dsn64; + dsn64 = (struct mptcp_dsn64_opt *)cp; + u_int64_t full_dsn; + uint16_t csum = 0; + + MPTCP_DSS_OPT_SZ_CHK(dsn64->mdss_copt.mdss_len, + sizeof(struct mptcp_dsn64_opt) + csum_len); - mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK; + mp_tp->mpt_flags |= MPTCPF_SND_64BITACK; - full_dack = mptcp_ntoh64(dack64->mdss_ack); - mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin); - break; + full_dsn = mptcp_ntoh64(dsn64->mdss_dsn); + NTOHL(dsn64->mdss_subflow_seqn); + NTOHS(dsn64->mdss_data_len); + + if (csum_len != 0) { + csum = *(uint16_t *)(void *)(cp + dsn64->mdss_copt.mdss_len - csum_len); } - case (MDSS_M | MDSS_m | MDSS_A): - { - /* 64-bit DSS + 32-bit Data ACK */ - struct mptcp_dss64_ack32_opt *dss_ack_rsp; - dss_ack_rsp = (struct mptcp_dss64_ack32_opt *)cp; - u_int64_t full_dsn; - uint16_t csum = 0; - MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len, - sizeof (struct mptcp_dss64_ack32_opt) + csum_len); + mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, + dsn64->mdss_subflow_seqn, + dsn64->mdss_data_len, + csum); + break; + } + case (MDSS_A | MDSS_a): + { + /* 64-bit Data ACK, no DSS */ + struct mptcp_data_ack64_opt *dack64; + dack64 = (struct mptcp_data_ack64_opt *)cp; + + MPTCP_DSS_OPT_SZ_CHK(dack64->mdss_copt.mdss_len, + sizeof(struct mptcp_data_ack64_opt)); - u_int32_t dack = dss_ack_rsp->mdss_ack; - NTOHL(dack); - mp_tp->mpt_flags |= MPTCPF_SND_64BITACK; - MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack); + mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK; - full_dsn = mptcp_ntoh64(dss_ack_rsp->mdss_dsn); - NTOHL(dss_ack_rsp->mdss_subflow_seqn); - NTOHS(dss_ack_rsp->mdss_data_len); + full_dack = mptcp_ntoh64(dack64->mdss_ack); + mptcp_do_dss_opt_ack_meat(full_dack, mp_tp->mpt_sndwl1, tp, tiwin); + break; + } + case (MDSS_M | MDSS_m | MDSS_A): + { + /* 64-bit DSS + 32-bit Data ACK */ + struct mptcp_dss64_ack32_opt *dss_ack_rsp; + dss_ack_rsp = (struct mptcp_dss64_ack32_opt *)cp; + u_int64_t full_dsn; + uint16_t csum = 0; - mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); + MPTCP_DSS_OPT_SZ_CHK(dss_ack_rsp->mdss_copt.mdss_len, + sizeof(struct mptcp_dss64_ack32_opt) + csum_len); - if (csum_len != 0) - csum = *(uint16_t *)(void *)(cp + dss_ack_rsp->mdss_copt.mdss_len - csum_len); + u_int32_t dack = dss_ack_rsp->mdss_ack; + NTOHL(dack); + mp_tp->mpt_flags |= MPTCPF_SND_64BITACK; + MPTCP_EXTEND_DSN(mp_tp->mpt_snduna, dack, full_dack); - mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, - dss_ack_rsp->mdss_subflow_seqn, - dss_ack_rsp->mdss_data_len, - csum); + full_dsn = mptcp_ntoh64(dss_ack_rsp->mdss_dsn); + NTOHL(dss_ack_rsp->mdss_subflow_seqn); + NTOHS(dss_ack_rsp->mdss_data_len); - break; + mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); + + if (csum_len != 0) { + csum = *(uint16_t *)(void *)(cp + dss_ack_rsp->mdss_copt.mdss_len - csum_len); } - case (MDSS_M | MDSS_A | MDSS_a): - { - /* 32-bit DSS + 64-bit Data ACK */ - struct mptcp_dss32_ack64_opt *dss32_ack64_opt; - dss32_ack64_opt = (struct mptcp_dss32_ack64_opt *)cp; - u_int64_t full_dsn; - - MPTCP_DSS_OPT_SZ_CHK( - dss32_ack64_opt->mdss_copt.mdss_len, - sizeof (struct mptcp_dss32_ack64_opt) + csum_len); - - full_dack = mptcp_ntoh64(dss32_ack64_opt->mdss_ack); - NTOHL(dss32_ack64_opt->mdss_dsn); - mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK; - MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, - dss32_ack64_opt->mdss_dsn, full_dsn); - NTOHL(dss32_ack64_opt->mdss_subflow_seqn); - NTOHS(dss32_ack64_opt->mdss_data_len); - - mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); - if (csum_len == 0) - mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, - dss32_ack64_opt->mdss_subflow_seqn, - dss32_ack64_opt->mdss_data_len, 0); - else - mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, - dss32_ack64_opt->mdss_subflow_seqn, - dss32_ack64_opt->mdss_data_len, - *(uint16_t *)(void *)(cp + - dss32_ack64_opt->mdss_copt.mdss_len - - csum_len)); - break; + + mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, + dss_ack_rsp->mdss_subflow_seqn, + dss_ack_rsp->mdss_data_len, + csum); + + break; + } + case (MDSS_M | MDSS_A | MDSS_a): + { + /* 32-bit DSS + 64-bit Data ACK */ + struct mptcp_dss32_ack64_opt *dss32_ack64_opt; + dss32_ack64_opt = (struct mptcp_dss32_ack64_opt *)cp; + u_int64_t full_dsn; + + MPTCP_DSS_OPT_SZ_CHK( + dss32_ack64_opt->mdss_copt.mdss_len, + sizeof(struct mptcp_dss32_ack64_opt) + csum_len); + + full_dack = mptcp_ntoh64(dss32_ack64_opt->mdss_ack); + NTOHL(dss32_ack64_opt->mdss_dsn); + mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK; + MPTCP_EXTEND_DSN(mp_tp->mpt_rcvnxt, + dss32_ack64_opt->mdss_dsn, full_dsn); + NTOHL(dss32_ack64_opt->mdss_subflow_seqn); + NTOHS(dss32_ack64_opt->mdss_data_len); + + mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); + if (csum_len == 0) { + mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, + dss32_ack64_opt->mdss_subflow_seqn, + dss32_ack64_opt->mdss_data_len, 0); + } else { + mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, + dss32_ack64_opt->mdss_subflow_seqn, + dss32_ack64_opt->mdss_data_len, + *(uint16_t *)(void *)(cp + + dss32_ack64_opt->mdss_copt.mdss_len - + csum_len)); } - case (MDSS_M | MDSS_m | MDSS_A | MDSS_a): - { - /* 64-bit DSS + 64-bit Data ACK */ - struct mptcp_dss64_ack64_opt *dss64_ack64; - dss64_ack64 = (struct mptcp_dss64_ack64_opt *)cp; - u_int64_t full_dsn; - - MPTCP_DSS_OPT_SZ_CHK(dss64_ack64->mdss_copt.mdss_len, - sizeof (struct mptcp_dss64_ack64_opt) + csum_len); - - mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK; - mp_tp->mpt_flags |= MPTCPF_SND_64BITACK; - full_dsn = mptcp_ntoh64(dss64_ack64->mdss_dsn); - full_dack = mptcp_ntoh64(dss64_ack64->mdss_dsn); - mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); - NTOHL(dss64_ack64->mdss_subflow_seqn); - NTOHS(dss64_ack64->mdss_data_len); - if (csum_len == 0) - mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, - dss64_ack64->mdss_subflow_seqn, - dss64_ack64->mdss_data_len, 0); - else - mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, - dss64_ack64->mdss_subflow_seqn, - dss64_ack64->mdss_data_len, - *(uint16_t *)(void *)(cp + - dss64_ack64->mdss_copt.mdss_len - - csum_len)); - break; + break; + } + case (MDSS_M | MDSS_m | MDSS_A | MDSS_a): + { + /* 64-bit DSS + 64-bit Data ACK */ + struct mptcp_dss64_ack64_opt *dss64_ack64; + dss64_ack64 = (struct mptcp_dss64_ack64_opt *)cp; + u_int64_t full_dsn; + + MPTCP_DSS_OPT_SZ_CHK(dss64_ack64->mdss_copt.mdss_len, + sizeof(struct mptcp_dss64_ack64_opt) + csum_len); + + mp_tp->mpt_flags |= MPTCPF_RCVD_64BITACK; + mp_tp->mpt_flags |= MPTCPF_SND_64BITACK; + full_dsn = mptcp_ntoh64(dss64_ack64->mdss_dsn); + full_dack = mptcp_ntoh64(dss64_ack64->mdss_dsn); + mptcp_do_dss_opt_ack_meat(full_dack, full_dsn, tp, tiwin); + NTOHL(dss64_ack64->mdss_subflow_seqn); + NTOHS(dss64_ack64->mdss_data_len); + if (csum_len == 0) { + mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, + dss64_ack64->mdss_subflow_seqn, + dss64_ack64->mdss_data_len, 0); + } else { + mptcp_update_rcv_state_meat(mp_tp, tp, full_dsn, + dss64_ack64->mdss_subflow_seqn, + dss64_ack64->mdss_data_len, + *(uint16_t *)(void *)(cp + + dss64_ack64->mdss_copt.mdss_len - + csum_len)); } - default: - mptcplog((LOG_DEBUG,"%s: File bug, DSS flags = %x\n", - __func__, dss_rsp->mdss_flags), - (MPTCP_SOCKET_DBG|MPTCP_RECEIVER_DBG), - MPTCP_LOGLVL_LOG); - break; + break; + } + default: + mptcplog((LOG_DEBUG, "%s: File bug, DSS flags = %x\n", + __func__, dss_rsp->mdss_flags), + (MPTCP_SOCKET_DBG | MPTCP_RECEIVER_DBG), + MPTCP_LOGLVL_LOG); + break; } } @@ -1378,8 +1419,9 @@ mptcp_do_dss_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, int optlen) #pragma unused(optlen) struct mptcb *mp_tp = tptomptp(tp); - if (!mp_tp) + if (!mp_tp) { return; + } /* We may get Data ACKs just during fallback, so don't ignore those */ if ((tp->t_mpflags & TMPF_MPTCP_TRUE) || @@ -1387,8 +1429,9 @@ mptcp_do_dss_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, int optlen) struct mptcp_dss_copt *dss_rsp = (struct mptcp_dss_copt *)cp; if (dss_rsp->mdss_subtype == MPO_DSS) { - if (dss_rsp->mdss_flags & MDSS_F) + if (dss_rsp->mdss_flags & MDSS_F) { tp->t_rcv_map.mpt_dfin = 1; + } mptcp_do_dss_opt_meat(cp, tp, th); } @@ -1401,17 +1444,19 @@ mptcp_do_fastclose_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th) struct mptcb *mp_tp = NULL; struct mptcp_fastclose_opt *fc_opt = (struct mptcp_fastclose_opt *)cp; - if (th->th_flags != TH_ACK) + if (th->th_flags != TH_ACK) { return; + } - if (fc_opt->mfast_len != sizeof (struct mptcp_fastclose_opt)) { + if (fc_opt->mfast_len != sizeof(struct mptcp_fastclose_opt)) { tcpstat.tcps_invalid_opt++; return; } mp_tp = tptomptp(tp); - if (!mp_tp) + if (!mp_tp) { return; + } if (fc_opt->mfast_key != mp_tp->mpt_localkey) { tcpstat.tcps_invalid_opt++; @@ -1455,11 +1500,13 @@ mptcp_do_mpfail_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th) } /* A packet without RST, must atleast have the ACK bit set */ - if ((th->th_flags != TH_ACK) && (th->th_flags != TH_RST)) + if ((th->th_flags != TH_ACK) && (th->th_flags != TH_RST)) { return; + } - if (fail_opt->mfail_len != sizeof (struct mptcp_mpfail_opt)) + if (fail_opt->mfail_len != sizeof(struct mptcp_mpfail_opt)) { return; + } mp_tp = tptomptp(tp); @@ -1481,43 +1528,46 @@ tcp_do_mptcp_options(struct tcpcb *tp, u_char *cp, struct tcphdr *th, int mptcp_subtype; struct mptcb *mp_tp = tptomptp(tp); - if (mp_tp == NULL) + if (mp_tp == NULL) { return; + } mpte_lock_assert_held(mp_tp->mpt_mpte); /* All MPTCP options have atleast 4 bytes */ - if (optlen < 4) + if (optlen < 4) { return; + } mptcp_subtype = (cp[2] >> 4); - if (mptcp_sanitize_option(tp, mptcp_subtype) == 0) + if (mptcp_sanitize_option(tp, mptcp_subtype) == 0) { return; + } switch (mptcp_subtype) { - case MPO_CAPABLE: - mptcp_do_mpcapable_opt(tp, cp, th, optlen); - break; - case MPO_JOIN: - mptcp_do_mpjoin_opt(tp, cp, th, optlen); - break; - case MPO_DSS: - mptcp_do_dss_opt(tp, cp, th, optlen); - break; - case MPO_FASTCLOSE: - mptcp_do_fastclose_opt(tp, cp, th); - break; - case MPO_FAIL: - mptcp_do_mpfail_opt(tp, cp, th); - break; - case MPO_ADD_ADDR: /* fall through */ - case MPO_REMOVE_ADDR: /* fall through */ - case MPO_PRIO: - to->to_flags |= TOF_MPTCP; - break; - default: - break; + case MPO_CAPABLE: + mptcp_do_mpcapable_opt(tp, cp, th, optlen); + break; + case MPO_JOIN: + mptcp_do_mpjoin_opt(tp, cp, th, optlen); + break; + case MPO_DSS: + mptcp_do_dss_opt(tp, cp, th, optlen); + break; + case MPO_FASTCLOSE: + mptcp_do_fastclose_opt(tp, cp, th); + break; + case MPO_FAIL: + mptcp_do_mpfail_opt(tp, cp, th); + break; + case MPO_ADD_ADDR: /* fall through */ + case MPO_REMOVE_ADDR: /* fall through */ + case MPO_PRIO: + to->to_flags |= TOF_MPTCP; + break; + default: + break; } return; } @@ -1526,13 +1576,13 @@ tcp_do_mptcp_options(struct tcpcb *tp, u_char *cp, struct tcphdr *th, static void mptcp_send_remaddr_opt(struct tcpcb *tp, struct mptcp_remaddr_opt *opt) { - mptcplog((LOG_DEBUG,"%s: local id %d remove id %d \n", + mptcplog((LOG_DEBUG, "%s: local id %d remove id %d \n", __func__, tp->t_local_aid, tp->t_rem_aid), - (MPTCP_SOCKET_DBG|MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); + (MPTCP_SOCKET_DBG | MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); - bzero(opt, sizeof (*opt)); + bzero(opt, sizeof(*opt)); opt->mr_kind = TCPOPT_MULTIPATH; - opt->mr_len = sizeof (*opt); + opt->mr_len = sizeof(*opt); opt->mr_subtype = MPO_REMOVE_ADDR; opt->mr_addr_id = tp->t_rem_aid; tp->t_mpflags &= ~TMPF_SND_REM_ADDR; @@ -1546,26 +1596,27 @@ mptcp_snd_mpprio(struct tcpcb *tp, u_char *cp, int optlen) if (tp->t_state != TCPS_ESTABLISHED) { tp->t_mpflags &= ~TMPF_SND_MPPRIO; - return (optlen); + return optlen; } if ((MAX_TCPOPTLEN - optlen) < - (int)sizeof (mpprio)) - return (optlen); + (int)sizeof(mpprio)) { + return optlen; + } - bzero(&mpprio, sizeof (mpprio)); + bzero(&mpprio, sizeof(mpprio)); mpprio.mpprio_kind = TCPOPT_MULTIPATH; - mpprio.mpprio_len = sizeof (mpprio); + mpprio.mpprio_len = sizeof(mpprio); mpprio.mpprio_subtype = MPO_PRIO; - if (tp->t_mpflags & TMPF_BACKUP_PATH) + if (tp->t_mpflags & TMPF_BACKUP_PATH) { mpprio.mpprio_flags |= MPTCP_MPPRIO_BKP; + } mpprio.mpprio_addrid = tp->t_local_aid; - memcpy(cp + optlen, &mpprio, sizeof (mpprio)); - optlen += sizeof (mpprio); + memcpy(cp + optlen, &mpprio, sizeof(mpprio)); + optlen += sizeof(mpprio); tp->t_mpflags &= ~TMPF_SND_MPPRIO; mptcplog((LOG_DEBUG, "%s: aid = %d \n", __func__, - tp->t_local_aid), - (MPTCP_SOCKET_DBG|MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); - return (optlen); + tp->t_local_aid), + (MPTCP_SOCKET_DBG | MPTCP_SENDER_DBG), MPTCP_LOGLVL_LOG); + return optlen; } - diff --git a/bsd/netinet/mptcp_opt.h b/bsd/netinet/mptcp_opt.h index f00653f08..0a65d5651 100644 --- a/bsd/netinet/mptcp_opt.h +++ b/bsd/netinet/mptcp_opt.h @@ -27,7 +27,7 @@ */ #ifndef _NETINET_MPTCP_OPT_H_ -#define _NETINET_MPTCP_OPT_H_ +#define _NETINET_MPTCP_OPT_H_ #ifdef BSD_KERNEL_PRIVATE @@ -38,7 +38,7 @@ * are not the reason for retries. Generally, on weak wifi and cold start * cellular, more than 2 retries are necessary. */ -#define MPTCP_CAPABLE_RETRIES (2) +#define MPTCP_CAPABLE_RETRIES (2) __BEGIN_DECLS extern void mptcp_data_ack_rcvd(struct mptcb *mp_tp, struct tcpcb *tp, u_int64_t full_dack); @@ -48,8 +48,8 @@ extern void tcp_do_mptcp_options(struct tcpcb *, u_char *, struct tcphdr *, extern unsigned mptcp_setup_syn_opts(struct socket *, u_char*, unsigned); extern unsigned mptcp_setup_join_ack_opts(struct tcpcb *, u_char*, unsigned); extern unsigned int mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, - unsigned int optlen, int flags, int len, - boolean_t *p_mptcp_acknow); + unsigned int optlen, int flags, int len, + boolean_t *p_mptcp_acknow); extern void mptcp_update_dss_rcv_state(struct mptcp_dsn_opt *, struct tcpcb *, uint16_t); extern void mptcp_update_rcv_state_meat(struct mptcb *, struct tcpcb *, diff --git a/bsd/netinet/mptcp_seq.h b/bsd/netinet/mptcp_seq.h index c79232ee6..ab49e61c9 100644 --- a/bsd/netinet/mptcp_seq.h +++ b/bsd/netinet/mptcp_seq.h @@ -27,16 +27,16 @@ */ #ifndef _NETINET_MPTCP_SEQ_H_ -#define _NETINET_MPTCP_SEQ_H_ +#define _NETINET_MPTCP_SEQ_H_ /* * Use 64-bit modulo arithmetic for comparing * Data Sequence Numbers and Data ACKs. Implies * 2**63 space is available for sending data. */ -#define MPTCP_SEQ_LT(a, b) ((int64_t)((a) - (b)) < 0) -#define MPTCP_SEQ_LEQ(a, b) ((int64_t)((a) - (b)) <= 0) -#define MPTCP_SEQ_GT(a, b) ((int64_t)((a) - (b)) > 0) -#define MPTCP_SEQ_GEQ(a, b) ((int64_t)((a) - (b)) >= 0) +#define MPTCP_SEQ_LT(a, b) ((int64_t)((a) - (b)) < 0) +#define MPTCP_SEQ_LEQ(a, b) ((int64_t)((a) - (b)) <= 0) +#define MPTCP_SEQ_GT(a, b) ((int64_t)((a) - (b)) > 0) +#define MPTCP_SEQ_GEQ(a, b) ((int64_t)((a) - (b)) >= 0) -#endif /* _NETINET_MPTCP_SEQ_H_ */ +#endif /* _NETINET_MPTCP_SEQ_H_ */ diff --git a/bsd/netinet/mptcp_subr.c b/bsd/netinet/mptcp_subr.c index c7b154796..f21312da5 100644 --- a/bsd/netinet/mptcp_subr.c +++ b/bsd/netinet/mptcp_subr.c @@ -136,10 +136,10 @@ static void mptcp_send_dfin(struct socket *so); * until the next round of events processing. */ typedef enum { - MPTS_EVRET_DELETE = 1, /* delete this subflow */ - MPTS_EVRET_OK = 2, /* OK */ - MPTS_EVRET_CONNECT_PENDING = 3, /* resume pended connects */ - MPTS_EVRET_DISCONNECT_FALLBACK = 4, /* abort all but preferred */ + MPTS_EVRET_DELETE = 1, /* delete this subflow */ + MPTS_EVRET_OK = 2, /* OK */ + MPTS_EVRET_CONNECT_PENDING = 3, /* resume pended connects */ + MPTS_EVRET_DISCONNECT_FALLBACK = 4, /* abort all but preferred */ } ev_ret_t; static ev_ret_t mptcp_subflow_events(struct mptses *, struct mptsub *, uint64_t *); @@ -160,39 +160,39 @@ static const char *mptcp_evret2str(ev_ret_t); static void mptcp_do_sha1(mptcp_key_t *, char *); static void mptcp_init_local_parms(struct mptses *); -static unsigned int mptsub_zone_size; /* size of mptsub */ -static struct zone *mptsub_zone; /* zone for mptsub */ +static unsigned int mptsub_zone_size; /* size of mptsub */ +static struct zone *mptsub_zone; /* zone for mptsub */ -static unsigned int mptopt_zone_size; /* size of mptopt */ -static struct zone *mptopt_zone; /* zone for mptopt */ +static unsigned int mptopt_zone_size; /* size of mptopt */ +static struct zone *mptopt_zone; /* zone for mptopt */ -static unsigned int mpt_subauth_entry_size; /* size of subf auth entry */ -static struct zone *mpt_subauth_zone; /* zone of subf auth entry */ +static unsigned int mpt_subauth_entry_size; /* size of subf auth entry */ +static struct zone *mpt_subauth_zone; /* zone of subf auth entry */ struct mppcbinfo mtcbinfo; -#define MPTCP_SUBFLOW_WRITELEN (8 * 1024) /* bytes to write each time */ -#define MPTCP_SUBFLOW_READLEN (8 * 1024) /* bytes to read each time */ +#define MPTCP_SUBFLOW_WRITELEN (8 * 1024) /* bytes to write each time */ +#define MPTCP_SUBFLOW_READLEN (8 * 1024) /* bytes to read each time */ SYSCTL_DECL(_net_inet); -SYSCTL_NODE(_net_inet, OID_AUTO, mptcp, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "MPTCP"); +SYSCTL_NODE(_net_inet, OID_AUTO, mptcp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "MPTCP"); -uint32_t mptcp_dbg_area = 31; /* more noise if greater than 1 */ -SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, dbg_area, CTLFLAG_RW|CTLFLAG_LOCKED, - &mptcp_dbg_area, 0, "MPTCP debug area"); +uint32_t mptcp_dbg_area = 31; /* more noise if greater than 1 */ +SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, dbg_area, CTLFLAG_RW | CTLFLAG_LOCKED, + &mptcp_dbg_area, 0, "MPTCP debug area"); uint32_t mptcp_dbg_level = 1; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, dbg_level, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_dbg_level, 0, "MPTCP debug level"); + &mptcp_dbg_level, 0, "MPTCP debug level"); -SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, pcbcount, CTLFLAG_RD|CTLFLAG_LOCKED, - &mtcbinfo.mppi_count, 0, "Number of active PCBs"); +SYSCTL_UINT(_net_inet_mptcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED, + &mtcbinfo.mppi_count, 0, "Number of active PCBs"); static int mptcp_alternate_port = 0; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, alternate_port, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_alternate_port, 0, "Set alternate port for MPTCP connections"); + &mptcp_alternate_port, 0, "Set alternate port for MPTCP connections"); static struct protosw mptcp_subflow_protosw; static struct pr_usrreqs mptcp_subflow_usrreqs; @@ -201,29 +201,29 @@ static struct ip6protosw mptcp_subflow_protosw6; static struct pr_usrreqs mptcp_subflow_usrreqs6; #endif /* INET6 */ -static uint8_t mptcp_create_subflows_scheduled; +static uint8_t mptcp_create_subflows_scheduled; typedef struct mptcp_subflow_event_entry { uint64_t sofilt_hint_mask; ev_ret_t (*sofilt_hint_ev_hdlr)( - struct mptses *mpte, - struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, - uint64_t event); + struct mptses *mpte, + struct mptsub *mpts, + uint64_t *p_mpsofilt_hint, + uint64_t event); } mptsub_ev_entry_t; static uint8_t mptcp_cellicon_is_set; static uint32_t mptcp_last_cellicon_set; -#define MPTCP_CELLICON_TOGGLE_RATE (5 * TCP_RETRANSHZ) /* Only toggle every 5 seconds */ +#define MPTCP_CELLICON_TOGGLE_RATE (5 * TCP_RETRANSHZ) /* Only toggle every 5 seconds */ /* * XXX The order of the event handlers below is really * really important. Think twice before changing it. */ -static mptsub_ev_entry_t mpsub_ev_entry_tbl [] = { +static mptsub_ev_entry_t mpsub_ev_entry_tbl[] = { { .sofilt_hint_mask = SO_FILT_HINT_MPCANTRCVMORE, - .sofilt_hint_ev_hdlr = mptcp_subflow_mpcantrcvmore_ev, + .sofilt_hint_ev_hdlr = mptcp_subflow_mpcantrcvmore_ev, }, { .sofilt_hint_mask = SO_FILT_HINT_MPFAILOVER, @@ -290,11 +290,12 @@ mptcp_init(struct protosw *pp, struct domain *dp) struct ip6protosw *prp6; #endif /* INET6 */ - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); /* do this only once */ - if (mptcp_initialized) + if (mptcp_initialized) { return; + } mptcp_initialized = 1; /* @@ -303,9 +304,9 @@ mptcp_init(struct protosw *pp, struct domain *dp) */ prp = pffindproto_locked(PF_INET, IPPROTO_TCP, SOCK_STREAM); VERIFY(prp != NULL); - bcopy(prp, &mptcp_subflow_protosw, sizeof (*prp)); + bcopy(prp, &mptcp_subflow_protosw, sizeof(*prp)); bcopy(prp->pr_usrreqs, &mptcp_subflow_usrreqs, - sizeof (mptcp_subflow_usrreqs)); + sizeof(mptcp_subflow_usrreqs)); mptcp_subflow_protosw.pr_entry.tqe_next = NULL; mptcp_subflow_protosw.pr_entry.tqe_prev = NULL; mptcp_subflow_protosw.pr_usrreqs = &mptcp_subflow_usrreqs; @@ -326,9 +327,9 @@ mptcp_init(struct protosw *pp, struct domain *dp) prp6 = (struct ip6protosw *)pffindproto_locked(PF_INET6, IPPROTO_TCP, SOCK_STREAM); VERIFY(prp6 != NULL); - bcopy(prp6, &mptcp_subflow_protosw6, sizeof (*prp6)); + bcopy(prp6, &mptcp_subflow_protosw6, sizeof(*prp6)); bcopy(prp6->pr_usrreqs, &mptcp_subflow_usrreqs6, - sizeof (mptcp_subflow_usrreqs6)); + sizeof(mptcp_subflow_usrreqs6)); mptcp_subflow_protosw6.pr_entry.tqe_next = NULL; mptcp_subflow_protosw6.pr_entry.tqe_prev = NULL; mptcp_subflow_protosw6.pr_usrreqs = &mptcp_subflow_usrreqs6; @@ -346,9 +347,9 @@ mptcp_init(struct protosw *pp, struct domain *dp) (struct socket_filter **)(uintptr_t)0xdeadbeefdeadbeef; #endif /* INET6 */ - bzero(&mtcbinfo, sizeof (mtcbinfo)); + bzero(&mtcbinfo, sizeof(mtcbinfo)); TAILQ_INIT(&mtcbinfo.mppi_pcbs); - mtcbinfo.mppi_size = sizeof (struct mpp_mtp); + mtcbinfo.mppi_size = sizeof(struct mpp_mtp); if ((mtcbinfo.mppi_zone = zinit(mtcbinfo.mppi_size, 1024 * mtcbinfo.mppi_size, 8192, "mptcb")) == NULL) { panic("%s: unable to allocate MPTCP PCB zone\n", __func__); @@ -370,7 +371,7 @@ mptcp_init(struct protosw *pp, struct domain *dp) /* attach to MP domain for garbage collection to take place */ mp_pcbinfo_attach(&mtcbinfo); - mptsub_zone_size = sizeof (struct mptsub); + mptsub_zone_size = sizeof(struct mptsub); if ((mptsub_zone = zinit(mptsub_zone_size, 1024 * mptsub_zone_size, 8192, "mptsub")) == NULL) { panic("%s: unable to allocate MPTCP subflow zone\n", __func__); @@ -379,7 +380,7 @@ mptcp_init(struct protosw *pp, struct domain *dp) zone_change(mptsub_zone, Z_CALLERACCT, FALSE); zone_change(mptsub_zone, Z_EXPAND, TRUE); - mptopt_zone_size = sizeof (struct mptopt); + mptopt_zone_size = sizeof(struct mptopt); if ((mptopt_zone = zinit(mptopt_zone_size, 128 * mptopt_zone_size, 1024, "mptopt")) == NULL) { panic("%s: unable to allocate MPTCP option zone\n", __func__); @@ -388,7 +389,7 @@ mptcp_init(struct protosw *pp, struct domain *dp) zone_change(mptopt_zone, Z_CALLERACCT, FALSE); zone_change(mptopt_zone, Z_EXPAND, TRUE); - mpt_subauth_entry_size = sizeof (struct mptcp_subf_auth_entry); + mpt_subauth_entry_size = sizeof(struct mptcp_subf_auth_entry); if ((mpt_subauth_zone = zinit(mpt_subauth_entry_size, 1024 * mpt_subauth_entry_size, 8192, "mptauth")) == NULL) { panic("%s: unable to allocate MPTCP address auth zone \n", @@ -412,30 +413,32 @@ mptcp_get_statsindex(struct mptcp_itf_stats *stats, const struct mptsub *mpts) if (ifp == NULL) { mptcplog((LOG_ERR, "%s: no ifp on subflow\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (-1); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + return -1; } for (i = 0; i < MPTCP_ITFSTATS_SIZE; i++) { if (stats[i].ifindex == IFSCOPE_NONE) { - if (index < 0) + if (index < 0) { index = i; + } continue; } if (stats[i].ifindex == ifp->if_index) { index = i; - return (index); + return index; } } if (index != -1) { stats[index].ifindex = ifp->if_index; - if (stats[index].is_expensive == 0) + if (stats[index].is_expensive == 0) { stats[index].is_expensive = IFNET_IS_CELLULAR(ifp); + } } - return (index); + return index; } void @@ -448,8 +451,9 @@ mptcpstats_inc_switch(struct mptses *mpte, const struct mptsub *mpts) index = mptcp_get_statsindex(mpte->mpte_itfstats, mpts); - if (index != -1) + if (index != -1) { mpte->mpte_itfstats[index].switches++; + } } /* @@ -485,7 +489,7 @@ mptcp_sescreate(struct mppcb *mpp) __IGNORE_WCASTALIGN(mp_tp = &((struct mpp_mtp *)mpp)->mtcb); /* MPTCP Multipath PCB Extension */ - bzero(mpte, sizeof (*mpte)); + bzero(mpte, sizeof(*mpte)); VERIFY(mpp->mpp_pcbe == NULL); mpp->mpp_pcbe = mpte; mpte->mpte_mppcb = mpp; @@ -499,22 +503,23 @@ mptcp_sescreate(struct mppcb *mpp) mpte->mpte_itfinfo = &mpte->_mpte_itfinfo[0]; mpte->mpte_itfinfo_size = MPTE_ITFINFO_SIZE; - if (mptcp_alternate_port) + if (mptcp_alternate_port) { mpte->mpte_alternate_port = htons(mptcp_alternate_port); + } /* MPTCP Protocol Control Block */ - bzero(mp_tp, sizeof (*mp_tp)); + bzero(mp_tp, sizeof(*mp_tp)); mp_tp->mpt_mpte = mpte; mp_tp->mpt_state = MPTCPS_CLOSED; DTRACE_MPTCP1(session__create, struct mppcb *, mpp); - return (0); + return 0; } static void mptcpstats_get_bytes(struct mptses *mpte, boolean_t initial_cell, - uint64_t *cellbytes, uint64_t *allbytes) + uint64_t *cellbytes, uint64_t *allbytes) { int64_t mycellbytes = 0; uint64_t myallbytes = 0; @@ -537,7 +542,7 @@ mptcpstats_get_bytes(struct mptses *mpte, boolean_t initial_cell, if (mycellbytes < 0) { mptcplog((LOG_ERR, "%s cellbytes is %d\n", __func__, mycellbytes), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); *cellbytes = 0; *allbytes = 0; } else { @@ -559,13 +564,15 @@ mptcpstats_session_wrapup(struct mptses *mpte) if (cell && mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_fp_handover_success_cell++; - if (mpte->mpte_used_wifi) + if (mpte->mpte_used_wifi) { tcpstat.tcps_mptcp_handover_wifi_from_cell++; + } } else if (mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_fp_handover_success_wifi++; - if (mpte->mpte_used_cell) + if (mpte->mpte_used_cell) { tcpstat.tcps_mptcp_handover_cell_from_wifi++; + } } } else { tcpstat.tcps_mptcp_handover_attempt++; @@ -573,13 +580,15 @@ mptcpstats_session_wrapup(struct mptses *mpte) if (cell && mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_handover_success_cell++; - if (mpte->mpte_used_wifi) + if (mpte->mpte_used_wifi) { tcpstat.tcps_mptcp_handover_wifi_from_cell++; + } } else if (mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_handover_success_wifi++; - if (mpte->mpte_used_cell) + if (mpte->mpte_used_cell) { tcpstat.tcps_mptcp_handover_cell_from_wifi++; + } } } @@ -600,8 +609,9 @@ mptcpstats_session_wrapup(struct mptses *mpte) if (mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_fp_interactive_success++; - if (!cell && mpte->mpte_used_cell) + if (!cell && mpte->mpte_used_cell) { tcpstat.tcps_mptcp_interactive_cell_from_wifi++; + } } } else { tcpstat.tcps_mptcp_interactive_attempt++; @@ -609,8 +619,9 @@ mptcpstats_session_wrapup(struct mptses *mpte) if (mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_interactive_success++; - if (!cell && mpte->mpte_used_cell) + if (!cell && mpte->mpte_used_cell) { tcpstat.tcps_mptcp_interactive_cell_from_wifi++; + } } } @@ -628,8 +639,9 @@ mptcpstats_session_wrapup(struct mptses *mpte) if (mpte->mpte_flags & MPTE_FIRSTPARTY) { tcpstat.tcps_mptcp_fp_aggregate_attempt++; - if (mpte->mpte_handshake_success) + if (mpte->mpte_handshake_success) { tcpstat.tcps_mptcp_fp_aggregate_success++; + } } else { tcpstat.tcps_mptcp_aggregate_attempt++; @@ -650,11 +662,13 @@ mptcpstats_session_wrapup(struct mptses *mpte) break; } - if (cell && mpte->mpte_handshake_success && mpte->mpte_used_wifi) + if (cell && mpte->mpte_handshake_success && mpte->mpte_used_wifi) { tcpstat.tcps_mptcp_back_to_wifi++; + } - if (mpte->mpte_triggered_cell) + if (mpte->mpte_triggered_cell) { tcpstat.tcps_mptcp_triggered_cell++; + } } /* @@ -665,7 +679,7 @@ mptcp_session_destroy(struct mptses *mpte) { struct mptcb *mp_tp; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_tp = mpte->mpte_mptcb; VERIFY(mp_tp != NULL); @@ -680,8 +694,9 @@ mptcp_session_destroy(struct mptses *mpte) mptcp_flush_sopts(mpte); VERIFY(TAILQ_EMPTY(&mpte->mpte_subflows) && mpte->mpte_numflows == 0); - if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) + if (mpte->mpte_itfinfo_size > MPTE_ITFINFO_SIZE) { _FREE(mpte->mpte_itfinfo, M_TEMP); + } mpte->mpte_itfinfo = NULL; @@ -697,9 +712,9 @@ mptcp_session_destroy(struct mptses *mpte) static boolean_t mptcp_ok_to_create_subflows(struct mptcb *mp_tp) { - return (mp_tp->mpt_state >= MPTCPS_ESTABLISHED && - mp_tp->mpt_state < MPTCPS_FIN_WAIT_1 && - !(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP)); + return mp_tp->mpt_state >= MPTCPS_ESTABLISHED && + mp_tp->mpt_state < MPTCPS_FIN_WAIT_1 && + !(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP); } static int @@ -707,8 +722,8 @@ mptcp_synthesize_nat64(struct in6_addr *addr, uint32_t len, struct in_addr *addr { static const struct in6_addr well_known_prefix = { .__u6_addr.__u6_addr8 = {0x00, 0x64, 0xff, 0x9b, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00}, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}, }; char buf[MAX_IPv6_STR_LEN]; char *ptrv4 = (char *)addrv4; @@ -721,48 +736,49 @@ mptcp_synthesize_nat64(struct in6_addr *addr, uint32_t len, struct in_addr *addr IN_6TO4_RELAY_ANYCAST(ntohl(addrv4->s_addr)) || // 192.88.99.0/24 6to4 Relay Anycast IN_MULTICAST(ntohl(addrv4->s_addr)) || // 224.0.0.0/4 Multicast INADDR_BROADCAST == addrv4->s_addr) { // 255.255.255.255/32 Limited Broadcast - return (-1); + return -1; } /* Check for the well-known prefix */ if (len == NAT64_PREFIX_LEN_96 && IN6_ARE_ADDR_EQUAL(addr, &well_known_prefix)) { if (IN_PRIVATE(ntohl(addrv4->s_addr)) || // 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 Private-Use - IN_SHARED_ADDRESS_SPACE(ntohl(addrv4->s_addr))) // 100.64.0.0/10 Shared Address Space - return (-1); + IN_SHARED_ADDRESS_SPACE(ntohl(addrv4->s_addr))) { // 100.64.0.0/10 Shared Address Space + return -1; + } } switch (len) { - case NAT64_PREFIX_LEN_96: - memcpy(ptr + 12, ptrv4, 4); - break; - case NAT64_PREFIX_LEN_64: - memcpy(ptr + 9, ptrv4, 4); - break; - case NAT64_PREFIX_LEN_56: - memcpy(ptr + 7, ptrv4, 1); - memcpy(ptr + 9, ptrv4 + 1, 3); - break; - case NAT64_PREFIX_LEN_48: - memcpy(ptr + 6, ptrv4, 2); - memcpy(ptr + 9, ptrv4 + 2, 2); - break; - case NAT64_PREFIX_LEN_40: - memcpy(ptr + 5, ptrv4, 3); - memcpy(ptr + 9, ptrv4 + 3, 1); - break; - case NAT64_PREFIX_LEN_32: - memcpy(ptr + 4, ptrv4, 4); - break; - default: - panic("NAT64-prefix len is wrong: %u\n", len); + case NAT64_PREFIX_LEN_96: + memcpy(ptr + 12, ptrv4, 4); + break; + case NAT64_PREFIX_LEN_64: + memcpy(ptr + 9, ptrv4, 4); + break; + case NAT64_PREFIX_LEN_56: + memcpy(ptr + 7, ptrv4, 1); + memcpy(ptr + 9, ptrv4 + 1, 3); + break; + case NAT64_PREFIX_LEN_48: + memcpy(ptr + 6, ptrv4, 2); + memcpy(ptr + 9, ptrv4 + 2, 2); + break; + case NAT64_PREFIX_LEN_40: + memcpy(ptr + 5, ptrv4, 3); + memcpy(ptr + 9, ptrv4 + 3, 1); + break; + case NAT64_PREFIX_LEN_32: + memcpy(ptr + 4, ptrv4, 4); + break; + default: + panic("NAT64-prefix len is wrong: %u\n", len); } os_log_info(mptcp_log_handle, "%s: nat64prefix-len %u synthesized %s\n", - __func__, len, - inet_ntop(AF_INET6, (void *)addr, buf, sizeof(buf))); + __func__, len, + inet_ntop(AF_INET6, (void *)addr, buf, sizeof(buf))); - return (0); + return 0; } static void @@ -776,15 +792,16 @@ mptcp_trigger_cell_bringup(struct mptses *mpte) mpte_unlock(mpte); err = necp_client_assert_bb_radio_manager(mpsotomppcb(mp_so)->necp_client_uuid, - TRUE); + TRUE); mpte_lock(mpte); - if (err == 0) + if (err == 0) { mpte->mpte_triggered_cell = 1; + } uuid_unparse_upper(mpsotomppcb(mp_so)->necp_client_uuid, uuidstr); os_log_info(mptcp_log_handle, "%s asked irat to bringup cell for uuid %s, err %d\n", - __func__, uuidstr, err); + __func__, uuidstr, err); } else { os_log_info(mptcp_log_handle, "%s UUID is already null\n", __func__); } @@ -799,8 +816,9 @@ mptcp_check_subflows_and_add(struct mptses *mpte) boolean_t want_cellular = TRUE; uint32_t i; - if (!mptcp_ok_to_create_subflows(mp_tp)) + if (!mptcp_ok_to_create_subflows(mp_tp)) { return; + } for (i = 0; i < mpte->mpte_itfinfo_size; i++) { struct mpt_itf_info *info; @@ -811,28 +829,33 @@ mptcp_check_subflows_and_add(struct mptses *mpte) info = &mpte->mpte_itfinfo[i]; - if (info->no_mptcp_support) + if (info->no_mptcp_support) { continue; + } ifindex = info->ifindex; - if (ifindex == IFSCOPE_NONE) + if (ifindex == IFSCOPE_NONE) { continue; + } ifnet_head_lock_shared(); ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); - if (ifp == NULL) + if (ifp == NULL) { continue; + } - if (IFNET_IS_CELLULAR(ifp)) + if (IFNET_IS_CELLULAR(ifp)) { cellular_viable = TRUE; + } TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) { const struct ifnet *subifp = sotoinpcb(mpts->mpts_socket)->inp_last_outifp; - if (subifp == NULL) + if (subifp == NULL) { continue; + } /* * In Handover mode, only create cell subflow if @@ -855,14 +878,14 @@ mptcp_check_subflows_and_add(struct mptses *mpte) !IFNET_IS_CELLULAR(subifp) && !(mpts->mpts_flags & (MPTSF_DISCONNECTING | MPTSF_DISCONNECTED | MPTSF_CLOSE_REQD)) && (mptcp_is_wifi_unusable(mpte) == 0 || - (sototcpcb(mpts->mpts_socket)->t_rxtshift < mptcp_fail_thresh * 2 && - ((mpte->mpte_flags & MPTE_FIRSTPARTY) || mptetoso(mpte)->so_snd.sb_cc)))) { + (sototcpcb(mpts->mpts_socket)->t_rxtshift < mptcp_fail_thresh * 2 && + ((mpte->mpte_flags & MPTE_FIRSTPARTY) || mptetoso(mpte)->so_snd.sb_cc)))) { os_log_debug(mptcp_log_handle, "%s handover, wifi state %d rxt %u first-party %u sb_cc %u ifindex %u this %u\n", - __func__, mptcp_is_wifi_unusable(mpte), - sototcpcb(mpts->mpts_socket)->t_rxtshift, - !!(mpte->mpte_flags & MPTE_FIRSTPARTY), - mptetoso(mpte)->so_snd.sb_cc, - ifindex, subifp->if_index); + __func__, mptcp_is_wifi_unusable(mpte), + sototcpcb(mpts->mpts_socket)->t_rxtshift, + !!(mpte->mpte_flags & MPTE_FIRSTPARTY), + mptetoso(mpte)->so_snd.sb_cc, + ifindex, subifp->if_index); found = 1; /* We found a proper subflow on WiFi - no need for cell */ @@ -870,10 +893,9 @@ mptcp_check_subflows_and_add(struct mptses *mpte) break; } else { os_log_debug(mptcp_log_handle, "%s svc %u cell %u flags %#x unusable %d rtx %u first %u sbcc %u\n", - __func__, mpte->mpte_svctype, IFNET_IS_CELLULAR(subifp), mpts->mpts_flags, - mptcp_is_wifi_unusable(mpte), sototcpcb(mpts->mpts_socket)->t_rxtshift, - !!(mpte->mpte_flags & MPTE_FIRSTPARTY), mptetoso(mpte)->so_snd.sb_cc); - + __func__, mpte->mpte_svctype, IFNET_IS_CELLULAR(subifp), mpts->mpts_flags, + mptcp_is_wifi_unusable(mpte), sototcpcb(mpts->mpts_socket)->t_rxtshift, + !!(mpte->mpte_flags & MPTE_FIRSTPARTY), mptetoso(mpte)->so_snd.sb_cc); } if (subifp->if_index == ifindex && @@ -909,29 +931,30 @@ mptcp_check_subflows_and_add(struct mptses *mpte) error = ifnet_get_nat64prefix(ifp, nat64prefixes); if (error) { os_log_error(mptcp_log_handle, "%s: no NAT64-prefix on itf %s, error %d\n", - __func__, ifp->if_name, error); + __func__, ifp->if_name, error); continue; } for (j = 0; j < NAT64_MAX_NUM_PREFIXES; j++) { - if (nat64prefixes[j].prefix_len != 0) + if (nat64prefixes[j].prefix_len != 0) { break; + } } VERIFY(j < NAT64_MAX_NUM_PREFIXES); error = mptcp_synthesize_nat64(&nat64prefixes[j].ipv6_prefix, - nat64prefixes[j].prefix_len, - &mpte->__mpte_dst_v4.sin_addr); + nat64prefixes[j].prefix_len, + &mpte->__mpte_dst_v4.sin_addr); if (error != 0) { os_log_info(mptcp_log_handle, "%s: cannot synthesize this addr\n", - __func__); + __func__); continue; } memcpy(&nat64pre.sin6_addr, - &nat64prefixes[j].ipv6_prefix, - sizeof(nat64pre.sin6_addr)); + &nat64prefixes[j].ipv6_prefix, + sizeof(nat64pre.sin6_addr)); nat64pre.sin6_len = sizeof(struct sockaddr_in6); nat64pre.sin6_family = AF_INET6; nat64pre.sin6_port = mpte->__mpte_dst_v6.sin6_port; @@ -947,10 +970,12 @@ mptcp_check_subflows_and_add(struct mptses *mpte) dst = (struct sockaddr *)&mpte->mpte_dst_v4_nat64; } - if (dst->sa_family == AF_INET && !info->has_v4_conn) + if (dst->sa_family == AF_INET && !info->has_v4_conn) { continue; - if (dst->sa_family == AF_INET6 && !info->has_v6_conn) + } + if (dst->sa_family == AF_INET6 && !info->has_v6_conn) { continue; + } mptcp_subflow_add(mpte, NULL, dst, ifindex, NULL); } @@ -973,8 +998,9 @@ mptcp_check_subflows_and_remove(struct mptses *mpte) int found_working_subflow = 0, removed_some = 0; int wifi_unusable = mptcp_is_wifi_unusable(mpte); - if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER) + if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER) { return; + } /* * Look for a subflow that is on a non-cellular interface @@ -985,45 +1011,52 @@ mptcp_check_subflows_and_remove(struct mptses *mpte) struct socket *so; struct tcpcb *tp; - if (ifp == NULL || IFNET_IS_CELLULAR(ifp)) + if (ifp == NULL || IFNET_IS_CELLULAR(ifp)) { continue; + } so = mpts->mpts_socket; tp = sototcpcb(so); if (!(mpts->mpts_flags & MPTSF_CONNECTED) || - tp->t_state != TCPS_ESTABLISHED) + tp->t_state != TCPS_ESTABLISHED) { continue; + } /* Is this subflow in good condition? */ - if (tp->t_rxtshift == 0) + if (tp->t_rxtshift == 0) { found_working_subflow = 1; + } /* Or WiFi is fine */ - if (!wifi_unusable) + if (!wifi_unusable) { found_working_subflow = 1; + } } /* * Couldn't find a working subflow, let's not remove those on a cellular * interface. */ - if (!found_working_subflow) + if (!found_working_subflow) { return; + } TAILQ_FOREACH_SAFE(mpts, &mpte->mpte_subflows, mpts_entry, tmpts) { const struct ifnet *ifp = sotoinpcb(mpts->mpts_socket)->inp_last_outifp; /* Only remove cellular subflows */ - if (ifp == NULL || !IFNET_IS_CELLULAR(ifp)) + if (ifp == NULL || !IFNET_IS_CELLULAR(ifp)) { continue; + } soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); removed_some = 1; } - if (removed_some) + if (removed_some) { mptcp_unset_cellicon(); + } } static void @@ -1036,7 +1069,7 @@ mptcp_remove_subflows(struct mptses *mpte) mpts->mpts_flags &= ~MPTSF_CLOSE_REQD; soevent(mpts->mpts_socket, - SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOSRCADDR); + SO_FILT_HINT_LOCKED | SO_FILT_HINT_NOSRCADDR); } } } @@ -1050,9 +1083,10 @@ mptcp_create_subflows(__unused void *arg) * Start with clearing, because we might be processing connections * while a new event comes in. */ - if (OSTestAndClear(0x01, &mptcp_create_subflows_scheduled)) + if (OSTestAndClear(0x01, &mptcp_create_subflows_scheduled)) { mptcplog((LOG_ERR, "%s: bit was already cleared!\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + } /* Iterate over all MPTCP connections */ @@ -1062,8 +1096,9 @@ mptcp_create_subflows(__unused void *arg) struct mptses *mpte; struct socket *mp_so; - if (!(mpp->mpp_flags & MPP_CREATE_SUBFLOWS)) + if (!(mpp->mpp_flags & MPP_CREATE_SUBFLOWS)) { continue; + } mpp_lock(mpp); @@ -1102,8 +1137,8 @@ mptcp_sched_create_subflows(struct mptses *mpte) if (!mptcp_ok_to_create_subflows(mp_tp)) { mptcplog((LOG_DEBUG, "%s: not a good time for subflows, state %u flags %#x", - __func__, mp_tp->mpt_state, mp_tp->mpt_flags), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, mp_tp->mpt_state, mp_tp->mpt_flags), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); return; } @@ -1112,11 +1147,12 @@ mptcp_sched_create_subflows(struct mptses *mpte) mpp->mpp_flags |= MPP_CREATE_SUBFLOWS; } - if (OSTestAndSet(0x01, &mptcp_create_subflows_scheduled)) + if (OSTestAndSet(0x01, &mptcp_create_subflows_scheduled)) { return; + } /* Do the call in 100ms to allow NECP to schedule it on all sockets */ - timeout(mptcp_create_subflows, NULL, hz/10); + timeout(mptcp_create_subflows, NULL, hz / 10); } /* @@ -1133,7 +1169,7 @@ mptcp_sopt_alloc(int how) bzero(mpo, mptopt_zone_size); } - return (mpo); + return mpo; } /* @@ -1153,7 +1189,7 @@ mptcp_sopt_free(struct mptopt *mpo) void mptcp_sopt_insert(struct mptses *mpte, struct mptopt *mpo) { - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mpo->mpo_flags |= MPOF_ATTACHED; TAILQ_INSERT_TAIL(&mpte->mpte_sopts, mpo, mpo_entry); } @@ -1164,7 +1200,7 @@ mptcp_sopt_insert(struct mptses *mpte, struct mptopt *mpo) void mptcp_sopt_remove(struct mptses *mpte, struct mptopt *mpo) { - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpo->mpo_flags & MPOF_ATTACHED); mpo->mpo_flags &= ~MPOF_ATTACHED; TAILQ_REMOVE(&mpte->mpte_sopts, mpo, mpo_entry); @@ -1178,14 +1214,15 @@ mptcp_sopt_find(struct mptses *mpte, struct sockopt *sopt) { struct mptopt *mpo; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ TAILQ_FOREACH(mpo, &mpte->mpte_sopts, mpo_entry) { if (mpo->mpo_level == sopt->sopt_level && - mpo->mpo_name == sopt->sopt_name) + mpo->mpo_name == sopt->sopt_name) { break; + } } - return (mpo); + return mpo; } /* @@ -1196,11 +1233,12 @@ mptcp_subflow_alloc(void) { struct mptsub *mpts = zalloc(mptsub_zone); - if (mpts == NULL) - return (NULL); + if (mpts == NULL) { + return NULL; + } bzero(mpts, mptsub_zone_size); - return (mpts); + return mpts; } /* @@ -1226,9 +1264,10 @@ mptcp_subflow_free(struct mptsub *mpts) static void mptcp_subflow_addref(struct mptsub *mpts) { - if (++mpts->mpts_refcnt == 0) + if (++mpts->mpts_refcnt == 0) { panic("%s: mpts %p wraparound refcnt\n", __func__, mpts); - /* NOTREACHED */ + } + /* NOTREACHED */ } static void @@ -1238,8 +1277,9 @@ mptcp_subflow_remref(struct mptsub *mpts) panic("%s: mpts %p negative refcnt\n", __func__, mpts); /* NOTREACHED */ } - if (--mpts->mpts_refcnt > 0) + if (--mpts->mpts_refcnt > 0) { return; + } /* callee will unlock and destroy lock */ mptcp_subflow_free(mpts); @@ -1271,14 +1311,14 @@ mptcp_subflow_attach(struct mptses *mpte, struct mptsub *mpts, struct socket *so mpts->mpts_mpte = mpte; mpts->mpts_socket = so; tp->t_mpsub = mpts; - mptcp_subflow_addref(mpts); /* for being in MPTCP subflow list */ - mptcp_subflow_addref(mpts); /* for subflow socket */ + mptcp_subflow_addref(mpts); /* for being in MPTCP subflow list */ + mptcp_subflow_addref(mpts); /* for subflow socket */ } static void mptcp_subflow_necp_cb(void *handle, __unused int action, - __unused uint32_t interface_index, - uint32_t necp_flags, bool *viable) + __unused uint32_t interface_index, + uint32_t necp_flags, bool *viable) { boolean_t low_power = !!(necp_flags & NECP_CLIENT_RESULT_FLAG_INTERFACE_LOW_POWER); struct inpcb *inp = (struct inpcb *)handle; @@ -1286,37 +1326,42 @@ mptcp_subflow_necp_cb(void *handle, __unused int action, struct mptsub *mpts; struct mptses *mpte; - if (low_power) + if (low_power) { action = NECP_CLIENT_CBACTION_NONVIABLE; + } - if (action != NECP_CLIENT_CBACTION_NONVIABLE) + if (action != NECP_CLIENT_CBACTION_NONVIABLE) { return; + } /* * The socket is being garbage-collected. There is nothing to be done * here. */ - if (so->so_usecount == 0) + if (so->so_usecount == 0) { return; + } socket_lock(so, 1); /* Check again after we acquired the lock. */ - if (so->so_usecount == 0) + if (so->so_usecount == 0) { goto out; + } mpte = tptomptp(sototcpcb(so))->mpt_mpte; mpts = sototcpcb(so)->t_mpsub; os_log_debug(mptcp_log_handle, "%s Subflow on itf %u became non-viable, power %u", - __func__, mpts->mpts_ifscope, low_power); + __func__, mpts->mpts_ifscope, low_power); mpts->mpts_flags |= MPTSF_CLOSE_REQD; mptcp_sched_create_subflows(mpte); - if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER && viable != NULL) + if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER && viable != NULL) { *viable = 1; + } out: socket_unlock(so, 1); @@ -1336,15 +1381,15 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, int error; *so = NULL; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_so = mptetoso(mpte); p = proc_find(mp_so->last_pid); if (p == PROC_NULL) { mptcplog((LOG_ERR, "%s: Couldn't find proc for pid %u\n", __func__, mp_so->last_pid), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (ESRCH); + return ESRCH; } /* @@ -1362,17 +1407,17 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, */ mpte_unlock(mpte); error = socreate_internal(dom, so, SOCK_STREAM, IPPROTO_TCP, p, - SOCF_ASYNC, PROC_NULL); + SOCF_ASYNC, PROC_NULL); mpte_lock(mpte); if (error) { mptcplog((LOG_ERR, "%s: subflow socreate mp_so 0x%llx unable to create subflow socket error %d\n", - __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), error), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), error), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); proc_rele(p); mptcp_subflow_free(mpts); - return (error); + return error; } /* @@ -1404,10 +1449,12 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, (*so)->so_snd.sb_flags |= SB_NOCOMPRESS; /* Inherit preconnect and TFO data flags */ - if (mp_so->so_flags1 & SOF1_PRECONNECT_DATA) + if (mp_so->so_flags1 & SOF1_PRECONNECT_DATA) { (*so)->so_flags1 |= SOF1_PRECONNECT_DATA; - if (mp_so->so_flags1 & SOF1_DATA_IDEMPOTENT) + } + if (mp_so->so_flags1 & SOF1_DATA_IDEMPOTENT) { (*so)->so_flags1 |= SOF1_DATA_IDEMPOTENT; + } /* Inherit uuid and create the related flow. */ if (!uuid_is_null(mpsotomppcb(mp_so)->necp_client_uuid)) { @@ -1427,13 +1474,15 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, mpsotomppcb(mp_so)->necp_client_uuid, sotoinpcb(*so)); mpte_lock(mpte); - if (error) + if (error) { goto out_err; + } /* Possible state-change during the unlock above */ if (mp_tp->mpt_state >= MPTCPS_TIME_WAIT || - (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP)) + (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP)) { goto out_err; + } uuid_copy(sotoinpcb(*so)->necp_client_uuid, mpsotomppcb(mp_so)->necp_client_uuid); } else { @@ -1442,31 +1491,35 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, } /* inherit the other socket options */ - bzero(&smpo, sizeof (smpo)); + bzero(&smpo, sizeof(smpo)); smpo.mpo_flags |= MPOF_SUBFLOW_OK; smpo.mpo_level = SOL_SOCKET; smpo.mpo_intval = 1; /* disable SIGPIPE */ smpo.mpo_name = SO_NOSIGPIPE; - if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) + if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) { goto out_err; + } /* find out if the subflow's source address goes away */ smpo.mpo_name = SO_NOADDRERR; - if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) + if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) { goto out_err; + } /* enable keepalive */ smpo.mpo_name = SO_KEEPALIVE; - if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) + if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) { goto out_err; + } smpo.mpo_level = IPPROTO_TCP; smpo.mpo_intval = mptcp_subflow_keeptime; smpo.mpo_name = TCP_KEEPALIVE; - if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) + if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) { goto out_err; + } if (mpte->mpte_mptcb->mpt_state >= MPTCPS_ESTABLISHED) { /* @@ -1476,16 +1529,18 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, smpo.mpo_level = SOL_SOCKET; smpo.mpo_name = SO_MARK_CELLFALLBACK; smpo.mpo_intval = 1; - if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) + if ((error = mptcp_subflow_sosetopt(mpte, mpts, &smpo)) != 0) { goto out_err; + } } /* replay setsockopt(2) on the subflow sockets for eligible options */ TAILQ_FOREACH_SAFE(mpo, &mpte->mpte_sopts, mpo_entry, tmpo) { int interim; - if (!(mpo->mpo_flags & MPOF_SUBFLOW_OK)) + if (!(mpo->mpo_flags & MPOF_SUBFLOW_OK)) { continue; + } /* * Skip those that are handled internally; these options @@ -1495,8 +1550,9 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, if (mpo->mpo_level == SOL_SOCKET && (mpo->mpo_name == SO_NOSIGPIPE || mpo->mpo_name == SO_NOADDRERR || - mpo->mpo_name == SO_KEEPALIVE)) + mpo->mpo_name == SO_KEEPALIVE)) { continue; + } interim = (mpo->mpo_flags & MPOF_INTERIM); if (mptcp_subflow_sosetopt(mpte, mpts, mpo) != 0 && interim) { @@ -1536,7 +1592,7 @@ mptcp_subflow_socreate(struct mptses *mpte, struct mptsub *mpts, int dom, DTRACE_MPTCP3(subflow__create, struct mptses *, mpte, int, dom, int, error); - return (0); + return 0; out_err: mptcp_subflow_abort(mpts, error); @@ -1544,9 +1600,9 @@ out_err: proc_rele(p); mptcplog((LOG_ERR, "%s: subflow socreate failed with error %d\n", - __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (error); + return error; } /* @@ -1560,12 +1616,13 @@ mptcp_subflow_soclose(struct mptsub *mpts) { struct socket *so = mpts->mpts_socket; - if (mpts->mpts_flags & MPTSF_CLOSED) + if (mpts->mpts_flags & MPTSF_CLOSED) { return; + } VERIFY(so != NULL); VERIFY(so->so_flags & SOF_MP_SUBFLOW); - VERIFY((so->so_state & (SS_NBIO|SS_NOFDREF)) == (SS_NBIO|SS_NOFDREF)); + VERIFY((so->so_state & (SS_NBIO | SS_NOFDREF)) == (SS_NBIO | SS_NOFDREF)); DTRACE_MPTCP5(subflow__close, struct mptsub *, mpts, struct socket *, so, @@ -1611,28 +1668,28 @@ mptcp_subflow_soconnectx(struct mptses *mpte, struct mptsub *mpts) af = mpts->mpts_dst.sa_family; dst = &mpts->mpts_dst; - VERIFY((mpts->mpts_flags & (MPTSF_CONNECTING|MPTSF_CONNECTED)) == MPTSF_CONNECTING); + VERIFY((mpts->mpts_flags & (MPTSF_CONNECTING | MPTSF_CONNECTED)) == MPTSF_CONNECTING); VERIFY(mpts->mpts_socket != NULL); VERIFY(af == AF_INET || af == AF_INET6); if (af == AF_INET) { - inet_ntop(af, &SIN(dst)->sin_addr.s_addr, dbuf, sizeof (dbuf)); + inet_ntop(af, &SIN(dst)->sin_addr.s_addr, dbuf, sizeof(dbuf)); dport = ntohs(SIN(dst)->sin_port); } else { - inet_ntop(af, &SIN6(dst)->sin6_addr, dbuf, sizeof (dbuf)); + inet_ntop(af, &SIN6(dst)->sin6_addr, dbuf, sizeof(dbuf)); dport = ntohs(SIN6(dst)->sin6_port); } os_log_info(mptcp_log_handle, - "%s: ifindex %u dst %s:%d pended %u\n", __func__, mpts->mpts_ifscope, - dbuf, dport, !!(mpts->mpts_flags & MPTSF_CONNECT_PENDING)); + "%s: ifindex %u dst %s:%d pended %u\n", __func__, mpts->mpts_ifscope, + dbuf, dport, !!(mpts->mpts_flags & MPTSF_CONNECT_PENDING)); p = proc_find(mp_so->last_pid); if (p == PROC_NULL) { mptcplog((LOG_ERR, "%s: Couldn't find proc for pid %u\n", __func__, mp_so->last_pid), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (ESRCH); + return ESRCH; } mpts->mpts_flags &= ~MPTSF_CONNECT_PENDING; @@ -1654,19 +1711,21 @@ mptcp_subflow_soconnectx(struct mptses *mpte, struct mptsub *mpts) /* Allocate a unique address id per subflow */ mpte->mpte_addrid_last++; - if (mpte->mpte_addrid_last == 0) + if (mpte->mpte_addrid_last == 0) { mpte->mpte_addrid_last++; + } proc_rele(p); DTRACE_MPTCP3(subflow__connect, struct mptses *, mpte, struct mptsub *, mpts, int, error); - if (error) + if (error) { mptcplog((LOG_ERR, "%s: connectx failed with error %d ifscope %u\n", - __func__, error, mpts->mpts_ifscope), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + __func__, error, mpts->mpts_ifscope), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + } - return (error); + return error; } /* @@ -1697,21 +1756,25 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, * to the MPTCP layer, so we require that the caller passes in the * expected parameters. */ - if (mp == NULL || controlp != NULL) - return (EINVAL); + if (mp == NULL || controlp != NULL) { + return EINVAL; + } *mp = NULL; - if (psa != NULL) + if (psa != NULL) { *psa = NULL; - if (flagsp != NULL) - flags = *flagsp &~ MSG_EOR; - else + } + if (flagsp != NULL) { + flags = *flagsp & ~MSG_EOR; + } else { flags = 0; + } - if (flags & (MSG_PEEK|MSG_OOB|MSG_NEEDSA|MSG_WAITALL|MSG_WAITSTREAM)) - return (EOPNOTSUPP); + if (flags & (MSG_PEEK | MSG_OOB | MSG_NEEDSA | MSG_WAITALL | MSG_WAITSTREAM)) { + return EOPNOTSUPP; + } - flags |= (MSG_DONTWAIT|MSG_NBIO); + flags |= (MSG_DONTWAIT | MSG_NBIO); /* * If a recv attempt is made on a previously-accepted socket @@ -1727,9 +1790,10 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, * prior to being returned from sodefunct(); there should * be no data on its receive list, so panic otherwise. */ - if (so->so_state & SS_DEFUNCT) + if (so->so_state & SS_DEFUNCT) { sb_empty_assert(sb, __func__); - return (error); + } + return error; } /* @@ -1749,16 +1813,18 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, * socket is closed for real, SOF_MP_SUBFLOW would be cleared. */ if ((so->so_state & (SS_NOFDREF | SS_CANTRCVMORE)) == - (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW)) - return (0); + (SS_NOFDREF | SS_CANTRCVMORE) && !(so->so_flags & SOF_MP_SUBFLOW)) { + return 0; + } /* * For consistency with soreceive() semantics, we need to obey * SB_LOCK in case some other code path has locked the buffer. */ error = sblock(&so->so_rcv, 0); - if (error != 0) - return (error); + if (error != 0) { + return error; + } m = so->so_rcv.sb_mb; if (m == NULL) { @@ -1781,7 +1847,7 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, goto release; } - if (!(so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING))) { + if (!(so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))) { error = ENOTCONN; goto release; } @@ -1835,7 +1901,6 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, mp = &m->m_next; so->so_rcv.sb_mb = m = m->m_next; *mp = NULL; - } if (m != NULL) { @@ -1847,19 +1912,21 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, continue; } - if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) + if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) { dfin = 1; + } /* * Check if the full mapping is now present */ if ((int)so->so_rcv.sb_cc < dlen - dfin) { mptcplog((LOG_INFO, "%s not enough data (%u) need %u for dsn %u\n", - __func__, so->so_rcv.sb_cc, dlen, (uint32_t)dsn), - MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG); + __func__, so->so_rcv.sb_cc, dlen, (uint32_t)dsn), + MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG); - if (*mp0 == NULL) + if (*mp0 == NULL) { error = EWOULDBLOCK; + } goto release; } @@ -1869,7 +1936,8 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, error_out = 1; error = EIO; dlen = 0; - soevent(so, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); + *mp0 = NULL; + mptcp_subflow_abort(sototcpcb(so)->t_mpsub, ECONNABORTED); break; } @@ -1883,8 +1951,9 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, *mp = NULL; } - if (dlen - dfin == 0) + if (dlen - dfin == 0) { dlen = 0; + } VERIFY(dlen <= 0 || m); } @@ -1897,9 +1966,9 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, SB_EMPTY_FIXUP(&so->so_rcv); } - if (error_out) + if (error_out) { goto release; - + } if (mptcp_validate_csum(sototcpcb(so), start, dsn, sseq, orig_dlen, csum, dfin)) { error = EIO; @@ -1914,17 +1983,18 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, DTRACE_MPTCP3(subflow__receive, struct socket *, so, struct sockbuf *, &so->so_rcv, struct sockbuf *, &so->so_snd); - if (flagsp != NULL) + if (flagsp != NULL) { *flagsp |= flags; + } release: sbunlock(&so->so_rcv, TRUE); - if (proc_held) + if (proc_held) { proc_rele(p); + } - return (error); - + return error; } /* @@ -1986,18 +2056,21 @@ mptcp_subflow_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd); error = sosendcheck(so, NULL, top->m_pkthdr.len, 0, 1, 0, &sblocked, NULL); - if (error) + if (error) { goto out; + } error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, top, NULL, NULL, p); top = NULL; out: - if (top != NULL) + if (top != NULL) { m_freem(top); + } - if (proc_held) + if (proc_held) { proc_rele(p); + } soclearfastopen(so); @@ -2008,8 +2081,7 @@ out: (int64_t)en_tracing_val); } - return (error); - + return error; } /* @@ -2025,14 +2097,14 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, struct mptsub *mpts = NULL; int af, error = 0; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_so = mptetoso(mpte); mp_tp = mpte->mpte_mptcb; if (mp_tp->mpt_state >= MPTCPS_CLOSE_WAIT) { /* If the remote end sends Data FIN, refuse subflow adds */ mptcplog((LOG_ERR, "%s state %u\n", __func__, mp_tp->mpt_state), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = ENOTCONN; goto out_err; } @@ -2040,33 +2112,71 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, mpts = mptcp_subflow_alloc(); if (mpts == NULL) { mptcplog((LOG_ERR, "%s malloc subflow failed\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = ENOMEM; goto out_err; } - if (src != NULL) { - int len = src->sa_len; + if (src) { + if (src->sa_family != AF_INET && src->sa_family != AF_INET6) { + error = EAFNOSUPPORT; + goto out_err; + } - MALLOC(mpts->mpts_src, struct sockaddr *, len, M_SONAME, + if (src->sa_family == AF_INET && + src->sa_len != sizeof(struct sockaddr_in)) { + error = EINVAL; + goto out_err; + } + + if (src->sa_family == AF_INET6 && + src->sa_len != sizeof(struct sockaddr_in6)) { + error = EINVAL; + goto out_err; + } + + MALLOC(mpts->mpts_src, struct sockaddr *, src->sa_len, M_SONAME, M_WAITOK | M_ZERO); if (mpts->mpts_src == NULL) { - mptcplog((LOG_ERR, "%s malloc mpts_src failed", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = ENOMEM; goto out_err; } - bcopy(src, mpts->mpts_src, len); + bcopy(src, mpts->mpts_src, src->sa_len); + } + + if (dst->sa_family != AF_INET && dst->sa_family != AF_INET6) { + error = EAFNOSUPPORT; + goto out_err; + } + + if (dst->sa_family == AF_INET && + dst->sa_len != sizeof(mpts->__mpts_dst_v4)) { + error = EINVAL; + goto out_err; + } + + if (dst->sa_family == AF_INET6 && + dst->sa_len != sizeof(mpts->__mpts_dst_v6)) { + error = EINVAL; + goto out_err; } memcpy(&mpts->mpts_dst, dst, dst->sa_len); af = mpts->mpts_dst.sa_family; + ifnet_head_lock_shared(); + if ((ifscope > (unsigned)if_index)) { + ifnet_head_done(); + error = ENXIO; + goto out_err; + } + ifnet_head_done(); + mpts->mpts_ifscope = ifscope; /* create the subflow socket */ - if ((error = mptcp_subflow_socreate(mpte, mpts, af, &so)) != 0) + if ((error = mptcp_subflow_socreate(mpte, mpts, af, &so)) != 0) { /* * Returning (error) and not cleaning up, because up to here * all we did is creating mpts. @@ -2074,7 +2184,8 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, * And the contract is that the call to mptcp_subflow_socreate, * moves ownership of mpts to mptcp_subflow_socreate. */ - return (error); + return error; + } /* * We may be called from within the kernel. Still need to account this @@ -2088,8 +2199,9 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, */ mpte->mpte_connid_last++; if (mpte->mpte_connid_last == SAE_CONNID_ALL || - mpte->mpte_connid_last == SAE_CONNID_ANY) + mpte->mpte_connid_last == SAE_CONNID_ANY) { mpte->mpte_connid_last++; + } mpts->mpts_connid = mpte->mpte_connid_last; @@ -2097,8 +2209,9 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, /* Allocate a unique address id per subflow */ mpte->mpte_addrid_last++; - if (mpte->mpte_addrid_last == 0) + if (mpte->mpte_addrid_last == 0) { mpte->mpte_addrid_last++; + } /* register for subflow socket read/write events */ sock_setupcalls_locked(so, mptcp_subflow_rupcall, mpts, mptcp_subflow_wupcall, mpts, 1); @@ -2115,7 +2228,7 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, /* sanity check */ VERIFY(!(mpts->mpts_flags & - (MPTSF_CONNECTING|MPTSF_CONNECTED|MPTSF_CONNECT_PENDING))); + (MPTSF_CONNECTING | MPTSF_CONNECTED | MPTSF_CONNECT_PENDING))); /* * Indicate to the TCP subflow whether or not it should establish @@ -2132,11 +2245,13 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, soisconnecting(mp_so); /* If fastopen is requested, set state in mpts */ - if (so->so_flags1 & SOF1_PRECONNECT_DATA) + if (so->so_flags1 & SOF1_PRECONNECT_DATA) { mpts->mpts_flags |= MPTSF_TFO_REQD; + } } else { - if (!(mp_tp->mpt_flags & MPTCPF_JOIN_READY)) + if (!(mp_tp->mpt_flags & MPTCPF_JOIN_READY)) { mpts->mpts_flags |= MPTSF_CONNECT_PENDING; + } } mpts->mpts_flags |= MPTSF_CONNECTING; @@ -2151,7 +2266,7 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, inet_ntop(af, ((af == AF_INET) ? (void *)&SIN(&mpts->mpts_dst)->sin_addr.s_addr : (void *)&SIN6(&mpts->mpts_dst)->sin6_addr), - dbuf, sizeof (dbuf)), ((af == AF_INET) ? + dbuf, sizeof(dbuf)), ((af == AF_INET) ? ntohs(SIN(&mpts->mpts_dst)->sin_port) : ntohs(SIN6(&mpts->mpts_dst)->sin6_port)), mpts->mpts_connid, @@ -2161,27 +2276,31 @@ mptcp_subflow_add(struct mptses *mpte, struct sockaddr *src, } /* connect right away if first attempt, or if join can be done now */ - if (!(mpts->mpts_flags & MPTSF_CONNECT_PENDING)) + if (!(mpts->mpts_flags & MPTSF_CONNECT_PENDING)) { error = mptcp_subflow_soconnectx(mpte, mpts); + } - if (error) + if (error) { goto out_err_close; + } - if (pcid) + if (pcid) { *pcid = mpts->mpts_connid; + } - return (0); + return 0; out_err_close: mptcp_subflow_abort(mpts, error); - return (error); + return error; out_err: - if (mpts) + if (mpts) { mptcp_subflow_free(mpts); + } - return (error); + return error; } void @@ -2209,17 +2328,17 @@ mptcp_subflow_del(struct mptses *mpte, struct mptsub *mpts) struct socket *so = mpts->mpts_socket; struct tcpcb *tp = sototcpcb(so); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpts->mpts_mpte == mpte); VERIFY(mpts->mpts_flags & MPTSF_ATTACHED); VERIFY(mpte->mpte_numflows != 0); VERIFY(mp_so->so_usecount > 0); mptcplog((LOG_DEBUG, "%s: mp_so 0x%llx [u=%d,r=%d] cid %d %x error %d\n", - __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), - mp_so->so_usecount, mp_so->so_retaincnt, mpts->mpts_connid, - mpts->mpts_flags, mp_so->so_error), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), + mp_so->so_usecount, mp_so->so_retaincnt, mpts->mpts_connid, + mpts->mpts_flags, mp_so->so_error), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); mptcpstats_update(mpte->mpte_itfstats, mpts); mpte->mpte_init_rxbytes = sotoinpcb(so)->inp_stat->rxbytes; @@ -2228,8 +2347,9 @@ mptcp_subflow_del(struct mptses *mpte, struct mptsub *mpts) atomic_bitclear_32(&mpts->mpts_flags, MPTSF_ATTACHED); TAILQ_REMOVE(&mpte->mpte_subflows, mpts, mpts_entry); mpte->mpte_numflows--; - if (mpte->mpte_active_sub == mpts) + if (mpte->mpte_active_sub == mpts) { mpte->mpte_active_sub = NULL; + } /* * Drop references held by this subflow socket; there @@ -2240,12 +2360,12 @@ mptcp_subflow_del(struct mptses *mpte, struct mptsub *mpts) mptcp_detach_mptcb_from_subf(mpte->mpte_mptcb, so); - mp_so->so_usecount--; /* for subflow socket */ + mp_so->so_usecount--; /* for subflow socket */ mpts->mpts_mpte = NULL; mpts->mpts_socket = NULL; - mptcp_subflow_remref(mpts); /* for MPTCP subflow list */ - mptcp_subflow_remref(mpts); /* for subflow socket */ + mptcp_subflow_remref(mpts); /* for MPTCP subflow list */ + mptcp_subflow_remref(mpts); /* for subflow socket */ so->so_flags &= ~SOF_MP_SUBFLOW; tp->t_mptcb = NULL; @@ -2259,8 +2379,9 @@ mptcp_subflow_shutdown(struct mptses *mpte, struct mptsub *mpts) struct mptcb *mp_tp = mpte->mpte_mptcb; int send_dfin = 0; - if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) + if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) { send_dfin = 1; + } if (!(so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) && (so->so_state & SS_ISCONNECTED)) { @@ -2268,11 +2389,11 @@ mptcp_subflow_shutdown(struct mptses *mpte, struct mptsub *mpts) __func__, mpts->mpts_connid, send_dfin), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); - if (send_dfin) + if (send_dfin) { mptcp_send_dfin(so); + } soshutdownlock(so, SHUT_WR); } - } static void @@ -2281,14 +2402,16 @@ mptcp_subflow_abort(struct mptsub *mpts, int error) struct socket *so = mpts->mpts_socket; struct tcpcb *tp = sototcpcb(so); - if (mpts->mpts_flags & MPTSF_DISCONNECTED) + if (mpts->mpts_flags & MPTSF_DISCONNECTED) { return; + } mptcplog((LOG_DEBUG, "%s aborting connection state %u\n", __func__, tp->t_state), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); - if (tp->t_state != TCPS_CLOSED) + if (tp->t_state != TCPS_CLOSED) { tcp_drop(tp, error); + } mptcp_subflow_eupcall1(so, mpts, SO_FILT_HINT_DISCONNECTED); } @@ -2303,20 +2426,22 @@ mptcp_subflow_disconnect(struct mptses *mpte, struct mptsub *mpts) struct mptcb *mp_tp; int send_dfin = 0; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpts->mpts_mpte == mpte); VERIFY(mpts->mpts_socket != NULL); - if (mpts->mpts_flags & (MPTSF_DISCONNECTING|MPTSF_DISCONNECTED)) + if (mpts->mpts_flags & (MPTSF_DISCONNECTING | MPTSF_DISCONNECTED)) { return; + } mpts->mpts_flags |= MPTSF_DISCONNECTING; so = mpts->mpts_socket; mp_tp = mpte->mpte_mptcb; - if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) + if (mp_tp->mpt_state > MPTCPS_CLOSE_WAIT) { send_dfin = 1; + } if (!(so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) && (so->so_state & SS_ISCONNECTED)) { @@ -2324,8 +2449,9 @@ mptcp_subflow_disconnect(struct mptses *mpte, struct mptsub *mpts) __func__, mpts->mpts_connid, send_dfin), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); - if (send_dfin) + if (send_dfin) { mptcp_send_dfin(so); + } (void) soshutdownlock(so, SHUT_RD); (void) soshutdownlock(so, SHUT_WR); (void) sodisconnectlocked(so); @@ -2351,8 +2477,9 @@ mptcp_subflow_rupcall(struct socket *so, void *arg, int waitf) VERIFY(mpte != NULL); if (mptcp_should_defer_upcall(mpte->mpte_mppcb)) { - if (!(mpte->mpte_mppcb->mpp_flags & MPP_RUPCALL)) + if (!(mpte->mpte_mppcb->mpp_flags & MPP_RUPCALL)) { mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_RWAKEUP; + } return; } @@ -2368,7 +2495,7 @@ mptcp_subflow_rupcall(struct socket *so, void *arg, int waitf) mptcp_subflow_input(mpte, mpts); - mptcp_subflow_remref(mpts); /* ours */ + mptcp_subflow_remref(mpts); /* ours */ VERIFY(mpts->mpts_socket->so_usecount != 0); mpts->mpts_socket->so_usecount--; @@ -2394,8 +2521,9 @@ mptcp_subflow_input(struct mptses *mpte, struct mptsub *mpts) DTRACE_MPTCP2(subflow__input, struct mptses *, mpte, struct mptsub *, mpts); - if (!(mpts->mpts_flags & MPTSF_CONNECTED)) + if (!(mpts->mpts_flags & MPTSF_CONNECTED)) { goto out; + } so = mpts->mpts_socket; @@ -2444,12 +2572,14 @@ mptcp_subflow_input(struct mptses *mpte, struct mptsub *mpts) /* notify protocol that we drained all the data */ if (error == 0 && m != NULL && - (so->so_proto->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) + (so->so_proto->pr_flags & PR_WANTRCVD) && so->so_pcb != NULL) { (*so->so_proto->pr_usrreqs->pru_rcvd)(so, 0); + } out: - if (wakeup) + if (wakeup) { mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_RWAKEUP; + } mptcp_handle_deferred_upcalls(mpte->mpte_mppcb, MPP_INSIDE_INPUT); } @@ -2469,8 +2599,9 @@ mptcp_subflow_wupcall(struct socket *so, void *arg, int waitf) VERIFY(mpte != NULL); if (mptcp_should_defer_upcall(mpte->mpte_mppcb)) { - if (!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL)) + if (!(mpte->mpte_mppcb->mpp_flags & MPP_WUPCALL)) { mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_WWAKEUP; + } return; } @@ -2489,8 +2620,9 @@ mptcp_search_seq_in_sub(struct mbuf *m, struct socket *so) /* Part of the segment is covered, don't reinject here */ if (so_m->m_pkthdr.mp_dsn <= dsn && - so_m->m_pkthdr.mp_dsn + so_m->m_pkthdr.mp_rlen > dsn) + so_m->m_pkthdr.mp_dsn + so_m->m_pkthdr.mp_rlen > dsn) { return TRUE; + } so_m = so_m->m_next; } @@ -2527,14 +2659,14 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) VERIFY(!INP_WAIT_FOR_IF_FEEDBACK(sotoinpcb(so))); VERIFY((mpts->mpts_flags & MPTSF_MP_CAPABLE) || - (mpts->mpts_flags & MPTSF_MP_DEGRADED) || - (mpts->mpts_flags & MPTSF_TFO_REQD)); + (mpts->mpts_flags & MPTSF_MP_DEGRADED) || + (mpts->mpts_flags & MPTSF_TFO_REQD)); VERIFY(mptcp_subflow_cwnd_space(mpts->mpts_socket) > 0); mptcplog((LOG_DEBUG, "%s mpts_flags %#x, mpte_flags %#x cwnd_space %u\n", - __func__, mpts->mpts_flags, mpte->mpte_flags, - mptcp_subflow_cwnd_space(so)), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, mpts->mpts_flags, mpte->mpte_flags, + mptcp_subflow_cwnd_space(so)), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); DTRACE_MPTCP2(subflow__output, struct mptses *, mpte, struct mptsub *, mpts); @@ -2559,20 +2691,22 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) * pkt_flags marked with the PKTF_MPTCP flag. */ - if (mpte->mpte_reinjectq) + if (mpte->mpte_reinjectq) { sb_mb = mpte->mpte_reinjectq; - else + } else { sb_mb = mp_so->so_snd.sb_mb; + } if (sb_mb == NULL) { mptcplog((LOG_ERR, "%s: No data in MPTCP-sendbuffer! smax %u snxt %u suna %u state %u flags %#x\n", - __func__, (uint32_t)mp_tp->mpt_sndmax, (uint32_t)mp_tp->mpt_sndnxt, - (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_state, mp_so->so_flags1), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + __func__, (uint32_t)mp_tp->mpt_sndmax, (uint32_t)mp_tp->mpt_sndnxt, + (uint32_t)mp_tp->mpt_snduna, mp_tp->mpt_state, mp_so->so_flags1), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); /* Fix it to prevent looping */ - if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) + if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) { mp_tp->mpt_sndnxt = mp_tp->mpt_snduna; + } goto out; } @@ -2590,10 +2724,10 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) /* First, drop acknowledged data */ if (MPTCP_SEQ_LT(mpt_dsn, mp_tp->mpt_snduna)) { mptcplog((LOG_ERR, "%s: dropping data, should have been done earlier " - "dsn %u suna %u reinject? %u\n", - __func__, (uint32_t)mpt_dsn, - (uint32_t)mp_tp->mpt_snduna, !!mpte->mpte_reinjectq), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + "dsn %u suna %u reinject? %u\n", + __func__, (uint32_t)mpt_dsn, + (uint32_t)mp_tp->mpt_snduna, !!mpte->mpte_reinjectq), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); if (mpte->mpte_reinjectq) { mptcp_clean_reinjectq(mpte); } else { @@ -2607,7 +2741,7 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) /* Check again because of above sbdrop */ if (mp_so->so_snd.sb_mb == NULL && mpte->mpte_reinjectq == NULL) { mptcplog((LOG_ERR, "%s send-buffer is empty\n", __func__), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); goto out; } @@ -2626,8 +2760,8 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) wakeup = 1; mptcplog((LOG_ERR, "%s: dropping data in degraded mode, should have been done earlier dsn %u sndnxt %u suna %u\n", - __func__, (uint32_t)mpt_dsn, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_snduna), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + __func__, (uint32_t)mpt_dsn, (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_snduna), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); } } @@ -2641,18 +2775,20 @@ mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags) * Adjust the top level notion of next byte used for retransmissions * and sending FINs. */ - if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) + if (MPTCP_SEQ_LT(mp_tp->mpt_sndnxt, mp_tp->mpt_snduna)) { mp_tp->mpt_sndnxt = mp_tp->mpt_snduna; + } /* Now determine the offset from which to start transmitting data */ - if (mpte->mpte_reinjectq) + if (mpte->mpte_reinjectq) { sb_mb = mpte->mpte_reinjectq; - else + } else { dont_reinject: sb_mb = mp_so->so_snd.sb_mb; + } if (sb_mb == NULL) { mptcplog((LOG_ERR, "%s send-buffer is still empty\n", __func__), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); goto out; } @@ -2686,9 +2822,9 @@ dont_reinject: sb_cc -= off; } else { mptcplog((LOG_ERR, "%s this should not happen: sndnxt %u sndmax %u\n", - __func__, (uint32_t)mp_tp->mpt_sndnxt, - (uint32_t)mp_tp->mpt_sndmax), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + __func__, (uint32_t)mp_tp->mpt_sndnxt, + (uint32_t)mp_tp->mpt_sndmax), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); goto out; } @@ -2697,10 +2833,10 @@ dont_reinject: sb_cc = min(sb_cc, mptcp_subflow_cwnd_space(so)); if (sb_cc <= 0) { mptcplog((LOG_ERR, "%s sb_cc is %d, mp_so->sb_cc %u, sndwnd %u,sndnxt %u sndmax %u cwnd %u\n", - __func__, sb_cc, mp_so->so_snd.sb_cc, mp_tp->mpt_sndwnd, - (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax, - mptcp_subflow_cwnd_space(so)), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + __func__, sb_cc, mp_so->so_snd.sb_cc, mp_tp->mpt_sndwnd, + (uint32_t)mp_tp->mpt_sndnxt, (uint32_t)mp_tp->mpt_sndmax, + mptcp_subflow_cwnd_space(so)), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); } sb_cc = min(sb_cc, UINT16_MAX); @@ -2709,23 +2845,25 @@ dont_reinject: * Create a DSN mapping for the data we are about to send. It all * has the same mapping. */ - if (reinjected) + if (reinjected) { mpt_dsn = sb_mb->m_pkthdr.mp_dsn; - else + } else { mpt_dsn = mp_tp->mpt_snduna + off; + } mpt_mbuf = sb_mb; while (mpt_mbuf && reinjected == FALSE && - (mpt_mbuf->m_pkthdr.mp_rlen == 0 || - mpt_mbuf->m_pkthdr.mp_rlen <= (uint32_t)off)) { + (mpt_mbuf->m_pkthdr.mp_rlen == 0 || + mpt_mbuf->m_pkthdr.mp_rlen <= (uint32_t)off)) { off -= mpt_mbuf->m_pkthdr.mp_rlen; mpt_mbuf = mpt_mbuf->m_next; } - if (mpts->mpts_flags & MPTSF_MP_DEGRADED) + if (mpts->mpts_flags & MPTSF_MP_DEGRADED) { mptcplog((LOG_DEBUG, "%s: %u snduna = %u sndnxt = %u probe %d\n", __func__, mpts->mpts_connid, (uint32_t)mp_tp->mpt_snduna, (uint32_t)mp_tp->mpt_sndnxt, mpts->mpts_probecnt), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + } VERIFY((mpt_mbuf == NULL) || (mpt_mbuf->m_pkthdr.pkt_flags & PKTF_MPTCP)); @@ -2740,20 +2878,21 @@ dont_reinject: if (mlen < 0) { mptcplog((LOG_ERR, "%s mlen %d mp_rlen %u off %u sb_cc %u tot_sent %u\n", - __func__, (int)mlen, mpt_mbuf->m_pkthdr.mp_rlen, - (uint32_t)off, sb_cc, tot_sent), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + __func__, (int)mlen, mpt_mbuf->m_pkthdr.mp_rlen, + (uint32_t)off, sb_cc, tot_sent), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); goto out; } - if (mlen == 0) + if (mlen == 0) { goto next; + } m = m_copym_mode(mpt_mbuf, (int)off, mlen, M_DONTWAIT, M_COPYM_MUST_COPY_HDR); if (m == NULL) { mptcplog((LOG_ERR, "%s m_copym_mode failed\n", __func__), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_ERR); error = ENOBUFS; break; } @@ -2769,7 +2908,7 @@ dont_reinject: m->m_pkthdr.len = mlen; if (head == NULL) { - head = tail = m; + head = tail = m; } else { tail->m_next = m; tail = m; @@ -2798,28 +2937,30 @@ next: } mptcplog((LOG_DEBUG, "%s: Queued dsn %u ssn %u len %u on sub %u\n", - __func__, (uint32_t)mpt_dsn, mpts->mpts_rel_seq, - tot_sent, mpts->mpts_connid), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, (uint32_t)mpt_dsn, mpts->mpts_rel_seq, + tot_sent, mpts->mpts_connid), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); if (head && (mp_tp->mpt_flags & MPTCPF_CHECKSUM)) { dss_csum = mptcp_output_csum(head, mpt_dsn, mpts->mpts_rel_seq, - tot_sent); + tot_sent); } /* Now, let's update rel-seq and the data-level length */ mpts->mpts_rel_seq += tot_sent; m = head; while (m) { - if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) + if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) { m->m_pkthdr.mp_csum = dss_csum; + } m->m_pkthdr.mp_rlen = tot_sent; m = m->m_next; } if (head != NULL) { if ((mpts->mpts_flags & MPTSF_TFO_REQD) && - (tp->t_tfo_stats == 0)) + (tp->t_tfo_stats == 0)) { tp->t_mpflags |= TMPF_TFO_REQUEST; + } error = sock_sendmbuf(so, NULL, head, 0, NULL); @@ -2837,17 +2978,19 @@ done_sending: if (mpts->mpts_probesoon && mpts->mpts_maxseg && tot_sent) { tcpstat.tcps_mp_num_probes++; - if ((uint32_t)tot_sent < mpts->mpts_maxseg) + if ((uint32_t)tot_sent < mpts->mpts_maxseg) { mpts->mpts_probecnt += 1; - else + } else { mpts->mpts_probecnt += - tot_sent/mpts->mpts_maxseg; + tot_sent / mpts->mpts_maxseg; + } } if (!reinjected && !(flags & MPTCP_SUBOUT_PROBING)) { if (MPTCP_DATASEQ_HIGH32(new_sndnxt) > - MPTCP_DATASEQ_HIGH32(mp_tp->mpt_sndnxt)) + MPTCP_DATASEQ_HIGH32(mp_tp->mpt_sndnxt)) { mp_tp->mpt_flags |= MPTCPF_SND_64BITDSN; + } mp_tp->mpt_sndnxt = new_sndnxt; } @@ -2857,13 +3000,14 @@ done_sending: soclearfastopen(mp_so); if ((mpts->mpts_flags & MPTSF_MP_DEGRADED) || - (mpts->mpts_probesoon != 0)) + (mpts->mpts_probesoon != 0)) { mptcplog((LOG_DEBUG, "%s %u degraded %u wrote %d %d probe %d probedelta %d\n", __func__, mpts->mpts_connid, !!(mpts->mpts_flags & MPTSF_MP_DEGRADED), tot_sent, (int) sb_cc, mpts->mpts_probecnt, (tcp_now - mpts->mpts_probesoon)), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + } if (IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp)) { mpte->mpte_mppcb->mpp_flags |= MPP_SET_CELLICON; @@ -2887,16 +3031,17 @@ done_sending: } out: - if (wakeup) + if (wakeup) { mpte->mpte_mppcb->mpp_flags |= MPP_SHOULD_WWAKEUP; + } mptcp_handle_deferred_upcalls(mpte->mpte_mppcb, MPP_INSIDE_OUTPUT); - return (error); + return error; zero_len_write: /* Opting to call pru_send as no mbuf at subflow level */ error = (*so->so_proto->pr_usrreqs->pru_send)(so, 0, NULL, NULL, - NULL, current_proc()); + NULL, current_proc()); goto done_sending; } @@ -2907,9 +3052,9 @@ mptcp_add_reinjectq(struct mptses *mpte, struct mbuf *m) struct mbuf *n, *prev = NULL; mptcplog((LOG_DEBUG, "%s reinjecting dsn %u dlen %u rseq %u\n", - __func__, (uint32_t)m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen, - m->m_pkthdr.mp_rseq), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, (uint32_t)m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen, + m->m_pkthdr.mp_rseq), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); n = mpte->mpte_reinjectq; @@ -2917,8 +3062,9 @@ mptcp_add_reinjectq(struct mptses *mpte, struct mbuf *m) * equal than m's sequence number. */ while (n) { - if (MPTCP_SEQ_GEQ(n->m_pkthdr.mp_dsn, m->m_pkthdr.mp_dsn)) + if (MPTCP_SEQ_GEQ(n->m_pkthdr.mp_dsn, m->m_pkthdr.mp_dsn)) { break; + } prev = n; @@ -2930,8 +3076,8 @@ mptcp_add_reinjectq(struct mptses *mpte, struct mbuf *m) if (n->m_pkthdr.mp_dsn == m->m_pkthdr.mp_dsn && n->m_pkthdr.mp_rlen >= m->m_pkthdr.mp_rlen) { mptcplog((LOG_DEBUG, "%s fully covered with len %u\n", - __func__, n->m_pkthdr.mp_rlen), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, n->m_pkthdr.mp_rlen), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); goto dont_queue; } @@ -2940,36 +3086,37 @@ mptcp_add_reinjectq(struct mptses *mpte, struct mbuf *m) struct mbuf *tmp = n->m_nextpkt; mptcplog((LOG_DEBUG, "%s m is covering that guy dsn %u len %u dsn %u len %u\n", - __func__, m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen, - n->m_pkthdr.mp_dsn, n->m_pkthdr.mp_rlen), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, m->m_pkthdr.mp_dsn, m->m_pkthdr.mp_rlen, + n->m_pkthdr.mp_dsn, n->m_pkthdr.mp_rlen), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); m->m_nextpkt = NULL; - if (prev == NULL) + if (prev == NULL) { mpte->mpte_reinjectq = tmp; - else + } else { prev->m_nextpkt = tmp; + } m_freem(n); n = tmp; } - } if (prev) { /* m is already fully covered by the previous mbuf in the queue */ if (prev->m_pkthdr.mp_dsn + prev->m_pkthdr.mp_rlen >= m->m_pkthdr.mp_dsn + m->m_pkthdr.len) { mptcplog((LOG_DEBUG, "%s prev covers us from %u with len %u\n", - __func__, prev->m_pkthdr.mp_dsn, prev->m_pkthdr.mp_rlen), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, prev->m_pkthdr.mp_dsn, prev->m_pkthdr.mp_rlen), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); goto dont_queue; } } - if (prev == NULL) + if (prev == NULL) { mpte->mpte_reinjectq = m; - else + } else { prev->m_nextpkt = m; + } m->m_nextpkt = n; @@ -2991,13 +3138,15 @@ mptcp_lookup_dsn(struct mptses *mpte, uint64_t dsn) while (m) { /* If this segment covers what we are looking for, return it. */ if (MPTCP_SEQ_LEQ(m->m_pkthdr.mp_dsn, dsn) && - MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, dsn)) + MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, dsn)) { break; + } /* Segment is no more in the queue */ - if (MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn, dsn)) + if (MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn, dsn)) { return NULL; + } m = m->m_next; } @@ -3024,7 +3173,7 @@ mptcp_copy_mbuf_list(struct mbuf *m, int len) n = m_copym_mode(m, 0, m->m_len, M_DONTWAIT, M_COPYM_MUST_COPY_HDR); if (n == NULL) { mptcplog((LOG_ERR, "%s m_copym_mode returned NULL\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); goto err; } @@ -3037,11 +3186,13 @@ mptcp_copy_mbuf_list(struct mbuf *m, int len) n->m_pkthdr.pkt_flags |= (PKTF_MPSO | PKTF_MPTCP); - if (top == NULL) + if (top == NULL) { top = n; + } - if (tail != NULL) + if (tail != NULL) { tail->m_next = n; + } tail = n; @@ -3052,8 +3203,9 @@ mptcp_copy_mbuf_list(struct mbuf *m, int len) return top; err: - if (top) + if (top) { m_freem(top); + } return NULL; } @@ -3073,32 +3225,36 @@ mptcp_reinject_mbufs(struct socket *so) struct mbuf *n = m->m_next, *orig = m; mptcplog((LOG_DEBUG, "%s working on suna %u relseq %u iss %u len %u pktflags %#x\n", - __func__, tp->snd_una, m->m_pkthdr.mp_rseq, mpts->mpts_iss, - m->m_pkthdr.mp_rlen, m->m_pkthdr.pkt_flags), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, tp->snd_una, m->m_pkthdr.mp_rseq, mpts->mpts_iss, + m->m_pkthdr.mp_rlen, m->m_pkthdr.pkt_flags), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); VERIFY((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)); - if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_REINJ) + if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_REINJ) { goto next; + } /* Has it all already been acknowledged at the data-level? */ - if (MPTCP_SEQ_GEQ(mp_tp->mpt_snduna, m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen)) + if (MPTCP_SEQ_GEQ(mp_tp->mpt_snduna, m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen)) { goto next; + } /* Part of this has already been acknowledged - lookup in the * MPTCP-socket for the segment. */ if (SEQ_GT(tp->snd_una - mpts->mpts_iss, m->m_pkthdr.mp_rseq)) { m = mptcp_lookup_dsn(mpte, m->m_pkthdr.mp_dsn); - if (m == NULL) + if (m == NULL) { goto next; + } } /* Copy the mbuf with headers (aka, DSN-numbers) */ m = mptcp_copy_mbuf_list(m, m->m_pkthdr.mp_rlen); - if (m == NULL) + if (m == NULL) { break; + } VERIFY(m->m_nextpkt == NULL); @@ -3114,8 +3270,9 @@ next: while (n) { VERIFY((n->m_flags & M_PKTHDR) && (n->m_pkthdr.pkt_flags & PKTF_MPTCP)); - if (n->m_pkthdr.mp_dsn != orig->m_pkthdr.mp_dsn) + if (n->m_pkthdr.mp_dsn != orig->m_pkthdr.mp_dsn) { break; + } n->m_pkthdr.pkt_flags |= PKTF_MPTCP_REINJ; n = n->m_next; @@ -3136,8 +3293,9 @@ mptcp_clean_reinjectq(struct mptses *mpte) struct mbuf *m = mpte->mpte_reinjectq; if (MPTCP_SEQ_GEQ(m->m_pkthdr.mp_dsn, mp_tp->mpt_snduna) || - MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, mp_tp->mpt_snduna)) + MPTCP_SEQ_GT(m->m_pkthdr.mp_dsn + m->m_pkthdr.mp_rlen, mp_tp->mpt_snduna)) { break; + } mpte->mpte_reinjectq = m->m_nextpkt; m->m_nextpkt = NULL; @@ -3158,8 +3316,9 @@ mptcp_subflow_eupcall1(struct socket *so, void *arg, uint32_t events) VERIFY(mpte != NULL); mpte_lock_assert_held(mpte); - if ((mpts->mpts_evctl & events) == events) + if ((mpts->mpts_evctl & events) == events) { return; + } mpts->mpts_evctl |= events; @@ -3178,21 +3337,22 @@ mptcp_subflow_eupcall1(struct socket *so, void *arg, uint32_t events) */ static ev_ret_t mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint) + uint64_t *p_mpsofilt_hint) { ev_ret_t ret = MPTS_EVRET_OK; int i, mpsub_ev_entry_count = sizeof(mpsub_ev_entry_tbl) / - sizeof(mpsub_ev_entry_tbl[0]); + sizeof(mpsub_ev_entry_tbl[0]); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ /* bail if there's nothing to process */ - if (!mpts->mpts_evctl) - return (ret); + if (!mpts->mpts_evctl) { + return ret; + } - if (mpts->mpts_evctl & (SO_FILT_HINT_CONNRESET|SO_FILT_HINT_MUSTRST| - SO_FILT_HINT_CANTSENDMORE|SO_FILT_HINT_TIMEOUT| - SO_FILT_HINT_NOSRCADDR|SO_FILT_HINT_IFDENIED| + if (mpts->mpts_evctl & (SO_FILT_HINT_CONNRESET | SO_FILT_HINT_MUSTRST | + SO_FILT_HINT_CANTSENDMORE | SO_FILT_HINT_TIMEOUT | + SO_FILT_HINT_NOSRCADDR | SO_FILT_HINT_IFDENIED | SO_FILT_HINT_DISCONNECTED)) { mpts->mpts_evctl |= SO_FILT_HINT_MPFAILOVER; } @@ -3201,8 +3361,8 @@ mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, struct mptsub *, mpts, uint32_t, mpts->mpts_evctl); mptcplog((LOG_DEBUG, "%s cid %d events=%b\n", __func__, - mpts->mpts_connid, mpts->mpts_evctl, SO_FILT_HINT_BITS), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); + mpts->mpts_connid, mpts->mpts_evctl, SO_FILT_HINT_BITS), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); /* * Process all the socket filter hints and reset the hint @@ -3215,10 +3375,10 @@ mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, */ if ((mpts->mpts_evctl & mpsub_ev_entry_tbl[i].sofilt_hint_mask) && (ret >= MPTS_EVRET_OK || - mpsub_ev_entry_tbl[i].sofilt_hint_mask == SO_FILT_HINT_DISCONNECTED)) { + mpsub_ev_entry_tbl[i].sofilt_hint_mask == SO_FILT_HINT_DISCONNECTED)) { mpts->mpts_evctl &= ~mpsub_ev_entry_tbl[i].sofilt_hint_mask; ev_ret_t error = - mpsub_ev_entry_tbl[i].sofilt_hint_ev_hdlr(mpte, mpts, p_mpsofilt_hint, mpsub_ev_entry_tbl[i].sofilt_hint_mask); + mpsub_ev_entry_tbl[i].sofilt_hint_ev_hdlr(mpte, mpts, p_mpsofilt_hint, mpsub_ev_entry_tbl[i].sofilt_hint_mask); ret = ((error >= MPTS_EVRET_OK) ? MAX(error, ret) : error); } } @@ -3227,28 +3387,29 @@ mptcp_subflow_events(struct mptses *mpte, struct mptsub *mpts, * We should be getting only events specified via sock_catchevents(), * so loudly complain if we have any unprocessed one(s). */ - if (mpts->mpts_evctl || ret < MPTS_EVRET_OK) + if (mpts->mpts_evctl || ret < MPTS_EVRET_OK) { mptcplog((LOG_WARNING, "%s%s: cid %d evret %s (%d) unhandled events=%b\n", __func__, (mpts->mpts_evctl && ret == MPTS_EVRET_OK) ? "MPTCP_ERROR " : "", mpts->mpts_connid, mptcp_evret2str(ret), ret, mpts->mpts_evctl, SO_FILT_HINT_BITS), MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); - else + } else { mptcplog((LOG_DEBUG, "%s: Done, events %b\n", __func__, - mpts->mpts_evctl, SO_FILT_HINT_BITS), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); + mpts->mpts_evctl, SO_FILT_HINT_BITS), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); + } - return (ret); + return ret; } static ev_ret_t mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { struct socket *mp_so, *so; struct mptcb *mp_tp; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mppcb != NULL); mp_so = mptetoso(mpte); mp_tp = mpte->mpte_mptcb; @@ -3268,7 +3429,7 @@ mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts, *p_mpsofilt_hint |= event; } - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } /* @@ -3276,13 +3437,13 @@ mptcp_subflow_propagate_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_nosrcaddr_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(p_mpsofilt_hint, event) struct socket *mp_so; struct tcpcb *tp; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mppcb != NULL); mp_so = mptetoso(mpte); @@ -3296,17 +3457,18 @@ mptcp_subflow_nosrcaddr_ev(struct mptses *mpte, struct mptsub *mpts, mpte->mpte_lost_aid = tp->t_local_aid; mptcplog((LOG_DEBUG, "%s cid %d\n", __func__, mpts->mpts_connid), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); /* * The subflow connection has lost its source address. */ mptcp_subflow_abort(mpts, EADDRNOTAVAIL); - if (mp_so->so_flags & SOF_NOADDRAVAIL) + if (mp_so->so_flags & SOF_NOADDRAVAIL) { mptcp_subflow_propagate_ev(mpte, mpts, p_mpsofilt_hint, event); + } - return (MPTS_EVRET_DELETE); + return MPTS_EVRET_DELETE; } /* @@ -3315,7 +3477,7 @@ mptcp_subflow_nosrcaddr_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_mpcantrcvmore_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event) struct mptcb *mp_tp; @@ -3327,15 +3489,16 @@ mptcp_subflow_mpcantrcvmore_ev(struct mptses *mpte, struct mptsub *mpts, MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); /* - * We got a Data FIN for the MPTCP connection. - * The FIN may arrive with data. The data is handed up to the - * mptcp socket and the user is notified so that it may close - * the socket if needed. - */ - if (mp_tp->mpt_state == MPTCPS_CLOSE_WAIT) + * We got a Data FIN for the MPTCP connection. + * The FIN may arrive with data. The data is handed up to the + * mptcp socket and the user is notified so that it may close + * the socket if needed. + */ + if (mp_tp->mpt_state == MPTCPS_CLOSE_WAIT) { *p_mpsofilt_hint |= SO_FILT_HINT_CANTRCVMORE; + } - return (MPTS_EVRET_OK); /* keep the subflow socket around */ + return MPTS_EVRET_OK; /* keep the subflow socket around */ } /* @@ -3343,7 +3506,7 @@ mptcp_subflow_mpcantrcvmore_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_failover_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event, p_mpsofilt_hint) struct mptsub *mpts_alt = NULL; @@ -3354,8 +3517,8 @@ mptcp_subflow_failover_ev(struct mptses *mpte, struct mptsub *mpts, mpte_lock_assert_held(mpte); mp_so = mptetoso(mpte); mptcplog((LOG_NOTICE, "%s: mp_so 0x%llx\n", __func__, - (u_int64_t)VM_KERNEL_ADDRPERM(mp_so)), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + (u_int64_t)VM_KERNEL_ADDRPERM(mp_so)), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); mptcp_reinject_mbufs(mpts->mpts_socket); @@ -3366,7 +3529,7 @@ mptcp_subflow_failover_ev(struct mptses *mpte, struct mptsub *mpts, */ if (mpts_alt == NULL) { mptcplog((LOG_WARNING, "%s: no alternate path\n", __func__), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); goto done; } @@ -3391,21 +3554,21 @@ mptcp_subflow_failover_ev(struct mptses *mpte, struct mptsub *mpts, mpts->mpts_flags &= ~MPTSF_ACTIVE; mptcplog((LOG_NOTICE, "%s: switched from %d to %d\n", - __func__, mpts->mpts_connid, mpts_alt->mpts_connid), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + __func__, mpts->mpts_connid, mpts_alt->mpts_connid), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); mptcpstats_inc_switch(mpte, mpts); sowwakeup(alt_so); } else { mptcplog((LOG_DEBUG, "%s: no alt cid = %d\n", __func__, - mpts->mpts_connid), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + mpts->mpts_connid), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); done: mpts->mpts_socket->so_flags &= ~SOF_MP_TRYFAILOVER; } - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } /* @@ -3413,9 +3576,9 @@ done: */ static ev_ret_t mptcp_subflow_ifdenied_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mppcb != NULL); mptcplog((LOG_DEBUG, "%s: cid %d\n", __func__, @@ -3429,7 +3592,7 @@ mptcp_subflow_ifdenied_ev(struct mptses *mpte, struct mptsub *mpts, mptcp_subflow_propagate_ev(mpte, mpts, p_mpsofilt_hint, event); - return (MPTS_EVRET_DELETE); + return MPTS_EVRET_DELETE; } /* @@ -3438,45 +3601,46 @@ mptcp_subflow_ifdenied_ev(struct mptses *mpte, struct mptsub *mpts, */ static boolean_t mptcp_desynthesize_ipv6_addr(const struct in6_addr *addr, - const struct ipv6_prefix *prefix, - struct in_addr *addrv4) + const struct ipv6_prefix *prefix, + struct in_addr *addrv4) { char buf[MAX_IPv4_STR_LEN]; char *ptrv4 = (char *)addrv4; const char *ptr = (const char *)addr; - if (memcmp(addr, &prefix->ipv6_prefix, prefix->prefix_len) != 0) + if (memcmp(addr, &prefix->ipv6_prefix, prefix->prefix_len) != 0) { return false; + } switch (prefix->prefix_len) { - case NAT64_PREFIX_LEN_96: - memcpy(ptrv4, ptr + 12, 4); - break; - case NAT64_PREFIX_LEN_64: - memcpy(ptrv4, ptr + 9, 4); - break; - case NAT64_PREFIX_LEN_56: - memcpy(ptrv4, ptr + 7, 1); - memcpy(ptrv4 + 1, ptr + 9, 3); - break; - case NAT64_PREFIX_LEN_48: - memcpy(ptrv4, ptr + 6, 2); - memcpy(ptrv4 + 2, ptr + 9, 2); - break; - case NAT64_PREFIX_LEN_40: - memcpy(ptrv4, ptr + 5, 3); - memcpy(ptrv4 + 3, ptr + 9, 1); - break; - case NAT64_PREFIX_LEN_32: - memcpy(ptrv4, ptr + 4, 4); - break; - default: - panic("NAT64-prefix len is wrong: %u\n", - prefix->prefix_len); + case NAT64_PREFIX_LEN_96: + memcpy(ptrv4, ptr + 12, 4); + break; + case NAT64_PREFIX_LEN_64: + memcpy(ptrv4, ptr + 9, 4); + break; + case NAT64_PREFIX_LEN_56: + memcpy(ptrv4, ptr + 7, 1); + memcpy(ptrv4 + 1, ptr + 9, 3); + break; + case NAT64_PREFIX_LEN_48: + memcpy(ptrv4, ptr + 6, 2); + memcpy(ptrv4 + 2, ptr + 9, 2); + break; + case NAT64_PREFIX_LEN_40: + memcpy(ptrv4, ptr + 5, 3); + memcpy(ptrv4 + 3, ptr + 9, 1); + break; + case NAT64_PREFIX_LEN_32: + memcpy(ptrv4, ptr + 4, 4); + break; + default: + panic("NAT64-prefix len is wrong: %u\n", + prefix->prefix_len); } os_log_info(mptcp_log_handle, "%s desynthesized to %s\n", __func__, - inet_ntop(AF_INET, (void *)addrv4, buf, sizeof(buf))); + inet_ntop(AF_INET, (void *)addrv4, buf, sizeof(buf))); return true; } @@ -3500,12 +3664,13 @@ mptcp_handle_ipv6_connection(struct mptses *mpte, const struct mptsub *mpts) for (j = 0; j < NAT64_MAX_NUM_PREFIXES; j++) { int success; - if (nat64prefixes[j].prefix_len == 0) + if (nat64prefixes[j].prefix_len == 0) { continue; + } success = mptcp_desynthesize_ipv6_addr(&mpte->__mpte_dst_v6.sin6_addr, - &nat64prefixes[j], - &mpte->mpte_dst_v4_nat64.sin_addr); + &nat64prefixes[j], + &mpte->mpte_dst_v4_nat64.sin_addr); if (success) { mpte->mpte_dst_v4_nat64.sin_len = sizeof(mpte->mpte_dst_v4_nat64); mpte->mpte_dst_v4_nat64.sin_family = AF_INET; @@ -3520,7 +3685,7 @@ mptcp_handle_ipv6_connection(struct mptses *mpte, const struct mptsub *mpts) */ static ev_ret_t mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event, p_mpsofilt_hint) struct socket *mp_so, *so; @@ -3530,7 +3695,7 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, int af; boolean_t mpok = FALSE; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mppcb != NULL); mp_so = mptetoso(mpte); @@ -3539,21 +3704,22 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, tp = sototcpcb(so); af = mpts->mpts_dst.sa_family; - if (mpts->mpts_flags & MPTSF_CONNECTED) - return (MPTS_EVRET_OK); + if (mpts->mpts_flags & MPTSF_CONNECTED) { + return MPTS_EVRET_OK; + } if ((mpts->mpts_flags & MPTSF_DISCONNECTED) || (mpts->mpts_flags & MPTSF_DISCONNECTING)) { if (!(so->so_state & (SS_ISDISCONNECTING | SS_ISDISCONNECTED)) && (so->so_state & SS_ISCONNECTED)) { - mptcplog((LOG_DEBUG, "%s: cid %d disconnect before tcp connect\n", - __func__, mpts->mpts_connid), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + mptcplog((LOG_DEBUG, "%s: cid %d disconnect before tcp connect\n", + __func__, mpts->mpts_connid), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); (void) soshutdownlock(so, SHUT_RD); (void) soshutdownlock(so, SHUT_WR); (void) sodisconnectlocked(so); } - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } /* @@ -3580,17 +3746,19 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, * where the subflow could get disconnected before the * connected event is processed. */ - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } - if (mpts->mpts_flags & MPTSF_TFO_REQD) + if (mpts->mpts_flags & MPTSF_TFO_REQD) { mptcp_drop_tfo_data(mpte, mpts); + } mpts->mpts_flags &= ~(MPTSF_CONNECTING | MPTSF_TFO_REQD); mpts->mpts_flags |= MPTSF_CONNECTED; - if (tp->t_mpflags & TMPF_MPTCP_TRUE) + if (tp->t_mpflags & TMPF_MPTCP_TRUE) { mpts->mpts_flags |= MPTSF_MP_CAPABLE; + } tp->t_mpflags &= ~TMPF_TFO_REQUEST; @@ -3637,13 +3805,15 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, mpts->mpts_flags |= MPTSF_MPCAP_CTRSET; mpte->mpte_nummpcapflows++; - if (SOCK_DOM(so) == AF_INET6) + if (SOCK_DOM(so) == AF_INET6) { mptcp_handle_ipv6_connection(mpte, mpts); + } mptcp_check_subflows_and_add(mpte); - if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) + if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) { mpte->mpte_initial_cell = 1; + } mpte->mpte_handshake_success = 1; } @@ -3693,7 +3863,7 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, dst_in->sin_port = mpte->mpte_alternate_port; mptcp_subflow_add(mpte, NULL, (struct sockaddr *)&dst, - mpts->mpts_ifscope , NULL); + mpts->mpts_ifscope, NULL); } else { /* Else, we tried all we could, mark this interface as non-MPTCP */ for (i = 0; i < mpte->mpte_itfinfo_size; i++) { struct mpt_itf_info *info = &mpte->mpte_itfinfo[i]; @@ -3706,14 +3876,15 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, } tcpstat.tcps_join_fallback++; - if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) + if (IFNET_IS_CELLULAR(inp->inp_last_outifp)) { tcpstat.tcps_mptcp_cell_proxy++; - else + } else { tcpstat.tcps_mptcp_wifi_proxy++; + } soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } /* This call, just to "book" an entry in the stats-table for this ifindex */ @@ -3721,7 +3892,7 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, mptcp_output(mpte); - return (MPTS_EVRET_OK); /* keep the subflow socket around */ + return MPTS_EVRET_OK; /* keep the subflow socket around */ } /* @@ -3729,13 +3900,13 @@ mptcp_subflow_connected_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event, p_mpsofilt_hint) struct socket *mp_so, *so; struct mptcb *mp_tp; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mppcb != NULL); mp_so = mptetoso(mpte); mp_tp = mpte->mpte_mptcb; @@ -3747,8 +3918,9 @@ mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, !!(mpts->mpts_flags & MPTSF_ACTIVE), sototcpcb(so)->t_mpflags), MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); - if (mpts->mpts_flags & MPTSF_DISCONNECTED) - return (MPTS_EVRET_DELETE); + if (mpts->mpts_flags & MPTSF_DISCONNECTED) { + return MPTS_EVRET_DELETE; + } mpts->mpts_flags |= MPTSF_DISCONNECTED; @@ -3765,20 +3937,23 @@ mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, } if (mp_tp->mpt_state < MPTCPS_ESTABLISHED || - ((mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && (mpts->mpts_flags & MPTSF_ACTIVE)) || - (sototcpcb(so)->t_mpflags & TMPF_FASTCLOSERCV)) { + ((mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && (mpts->mpts_flags & MPTSF_ACTIVE))) { mptcp_drop(mpte, mp_tp, so->so_error); } + if (sototcpcb(so)->t_mpflags & TMPF_FASTCLOSERCV) { + mptcp_drop(mpte, mp_tp, mp_so->so_error); + } + /* * Clear flags that are used by getconninfo to return state. * Retain like MPTSF_DELETEOK for internal purposes. */ - mpts->mpts_flags &= ~(MPTSF_CONNECTING|MPTSF_CONNECT_PENDING| - MPTSF_CONNECTED|MPTSF_DISCONNECTING|MPTSF_PREFERRED| - MPTSF_MP_CAPABLE|MPTSF_MP_READY|MPTSF_MP_DEGRADED|MPTSF_ACTIVE); + mpts->mpts_flags &= ~(MPTSF_CONNECTING | MPTSF_CONNECT_PENDING | + MPTSF_CONNECTED | MPTSF_DISCONNECTING | MPTSF_PREFERRED | + MPTSF_MP_CAPABLE | MPTSF_MP_READY | MPTSF_MP_DEGRADED | MPTSF_ACTIVE); - return (MPTS_EVRET_DELETE); + return MPTS_EVRET_DELETE; } /* @@ -3786,36 +3961,39 @@ mptcp_subflow_disconnected_ev(struct mptses *mpte, struct mptsub *mpts, */ static ev_ret_t mptcp_subflow_mpstatus_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event, p_mpsofilt_hint) struct socket *mp_so, *so; struct mptcb *mp_tp; ev_ret_t ret = MPTS_EVRET_OK; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mppcb != NULL); mp_so = mptetoso(mpte); mp_tp = mpte->mpte_mptcb; so = mpts->mpts_socket; - if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_TRUE) + if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_TRUE) { mpts->mpts_flags |= MPTSF_MP_CAPABLE; - else + } else { mpts->mpts_flags &= ~MPTSF_MP_CAPABLE; + } if (sototcpcb(so)->t_mpflags & TMPF_TCP_FALLBACK) { - if (mpts->mpts_flags & MPTSF_MP_DEGRADED) + if (mpts->mpts_flags & MPTSF_MP_DEGRADED) { goto done; + } mpts->mpts_flags |= MPTSF_MP_DEGRADED; } else { mpts->mpts_flags &= ~MPTSF_MP_DEGRADED; } - if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_READY) + if (sototcpcb(so)->t_mpflags & TMPF_MPTCP_READY) { mpts->mpts_flags |= MPTSF_MP_READY; - else + } else { mpts->mpts_flags &= ~MPTSF_MP_READY; + } if (mpts->mpts_flags & MPTSF_MP_DEGRADED) { mp_tp->mpt_flags |= MPTCPF_FALLBACK_TO_TCP; @@ -3834,13 +4012,13 @@ mptcp_subflow_mpstatus_ev(struct mptses *mpte, struct mptsub *mpts, } mptcplog((LOG_DEBUG, "%s: mp_so 0x%llx mpt_flags=%b cid %d mptsf=%b\n", - __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), - mp_tp->mpt_flags, MPTCPF_BITS, mpts->mpts_connid, - mpts->mpts_flags, MPTSF_BITS), - MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); + __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), + mp_tp->mpt_flags, MPTCPF_BITS, mpts->mpts_connid, + mpts->mpts_flags, MPTSF_BITS), + MPTCP_EVENTS_DBG, MPTCP_LOGLVL_LOG); done: - return (ret); + return ret; } /* @@ -3848,7 +4026,7 @@ done: */ static ev_ret_t mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event) struct socket *mp_so, *so; @@ -3876,10 +4054,11 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, struct tcp_respond_args tra; bzero(&tra, sizeof(tra)); - if (inp->inp_flags & INP_BOUND_IF) + if (inp->inp_flags & INP_BOUND_IF) { tra.ifscope = inp->inp_boundifp->if_index; - else + } else { tra.ifscope = IFSCOPE_NONE; + } tra.awdl_unrestricted = 1; tcp_respond(tp, t_template->tt_ipgen, @@ -3897,10 +4076,11 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, if (!(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) && is_fastclose) { *p_mpsofilt_hint |= SO_FILT_HINT_CONNRESET; - if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) + if (mp_tp->mpt_state < MPTCPS_ESTABLISHED) { mp_so->so_error = ECONNABORTED; - else + } else { mp_so->so_error = ECONNRESET; + } /* * mptcp_drop is being called after processing the events, to fully @@ -3908,15 +4088,16 @@ mptcp_subflow_mustrst_ev(struct mptses *mpte, struct mptsub *mpts, */ } - if (mp_tp->mpt_gc_ticks == MPT_GC_TICKS) + if (mp_tp->mpt_gc_ticks == MPT_GC_TICKS) { mp_tp->mpt_gc_ticks = MPT_GC_TICKS_FAST; + } - return (MPTS_EVRET_DELETE); + return MPTS_EVRET_DELETE; } static ev_ret_t mptcp_subflow_adaptive_rtimo_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event) bool found_active = false; @@ -3927,8 +4108,9 @@ mptcp_subflow_adaptive_rtimo_ev(struct mptses *mpte, struct mptsub *mpts, struct tcpcb *tp = sototcpcb(mpts->mpts_socket); if (!TCPS_HAVEESTABLISHED(tp->t_state) || - TCPS_HAVERCVDFIN2(tp->t_state)) + TCPS_HAVERCVDFIN2(tp->t_state)) { continue; + } if (!(mpts->mpts_flags & MPTSF_READ_STALL)) { found_active = true; @@ -3936,15 +4118,16 @@ mptcp_subflow_adaptive_rtimo_ev(struct mptses *mpte, struct mptsub *mpts, } } - if (!found_active) + if (!found_active) { *p_mpsofilt_hint |= SO_FILT_HINT_ADAPTIVE_RTIMO; + } - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } static ev_ret_t mptcp_subflow_adaptive_wtimo_ev(struct mptses *mpte, struct mptsub *mpts, - uint64_t *p_mpsofilt_hint, uint64_t event) + uint64_t *p_mpsofilt_hint, uint64_t event) { #pragma unused(event) bool found_active = false; @@ -3955,8 +4138,9 @@ mptcp_subflow_adaptive_wtimo_ev(struct mptses *mpte, struct mptsub *mpts, struct tcpcb *tp = sototcpcb(mpts->mpts_socket); if (!TCPS_HAVEESTABLISHED(tp->t_state) || - tp->t_state > TCPS_CLOSE_WAIT) + tp->t_state > TCPS_CLOSE_WAIT) { continue; + } if (!(mpts->mpts_flags & MPTSF_WRITE_STALL)) { found_active = true; @@ -3964,10 +4148,11 @@ mptcp_subflow_adaptive_wtimo_ev(struct mptses *mpte, struct mptsub *mpts, } } - if (!found_active) + if (!found_active) { *p_mpsofilt_hint |= SO_FILT_HINT_ADAPTIVE_WTIMO; + } - return (MPTS_EVRET_OK); + return MPTS_EVRET_OK; } static const char * @@ -3991,7 +4176,7 @@ mptcp_evret2str(ev_ret_t ret) default: break; } - return (c); + return c; } /* @@ -4018,10 +4203,10 @@ mptcp_subflow_sosetopt(struct mptses *mpte, struct mptsub *mpts, struct mptopt * struct ifnet *ifp = ifindex2ifnet[mpts->mpts_ifscope]; mptcplog((LOG_DEBUG, "%s Setting CELL_FALLBACK, mpte_flags %#x, svctype %u wifi unusable %d lastcell? %d boundcell? %d\n", - __func__, mpte->mpte_flags, mpte->mpte_svctype, mptcp_is_wifi_unusable(mpte), - sotoinpcb(so)->inp_last_outifp ? IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp) : -1, - mpts->mpts_ifscope != IFSCOPE_NONE && ifp ? IFNET_IS_CELLULAR(ifp) : -1), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, mpte->mpte_flags, mpte->mpte_svctype, mptcp_is_wifi_unusable(mpte), + sotoinpcb(so)->inp_last_outifp ? IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp) : -1, + mpts->mpts_ifscope != IFSCOPE_NONE && ifp ? IFNET_IS_CELLULAR(ifp) : -1), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); /* * When we open a new subflow, mark it as cell fallback, if @@ -4030,12 +4215,14 @@ mptcp_subflow_sosetopt(struct mptses *mpte, struct mptsub *mpts, struct mptopt * * (except for first-party apps) */ - if (mpte->mpte_flags & MPTE_FIRSTPARTY) - return (0); + if (mpte->mpte_flags & MPTE_FIRSTPARTY) { + return 0; + } if (sotoinpcb(so)->inp_last_outifp && - !IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp)) - return (0); + !IFNET_IS_CELLULAR(sotoinpcb(so)->inp_last_outifp)) { + return 0; + } /* * This here is an OR, because if the app is not binding to the @@ -4043,18 +4230,19 @@ mptcp_subflow_sosetopt(struct mptses *mpte, struct mptsub *mpts, struct mptopt * * connection. */ if (mpts->mpts_ifscope == IFSCOPE_NONE || ifp == NULL || - !IFNET_IS_CELLULAR(ifp)) - return (0); + !IFNET_IS_CELLULAR(ifp)) { + return 0; + } } mpo->mpo_flags &= ~MPOF_INTERIM; - bzero(&sopt, sizeof (sopt)); + bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = mpo->mpo_level; sopt.sopt_name = mpo->mpo_name; sopt.sopt_val = CAST_USER_ADDR_T(&mpo->mpo_intval); - sopt.sopt_valsize = sizeof (int); + sopt.sopt_valsize = sizeof(int); sopt.sopt_p = kernproc; error = sosetoptlock(so, &sopt, 0); @@ -4073,7 +4261,7 @@ mptcp_subflow_sosetopt(struct mptses *mpte, struct mptsub *mpts, struct mptopt * mpo->mpo_intval, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); } - return (error); + return error; } /* @@ -4090,18 +4278,18 @@ mptcp_subflow_sogetopt(struct mptses *mpte, struct socket *so, int error; VERIFY(mpo->mpo_flags & MPOF_SUBFLOW_OK); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_so = mptetoso(mpte); - bzero(&sopt, sizeof (sopt)); + bzero(&sopt, sizeof(sopt)); sopt.sopt_dir = SOPT_GET; sopt.sopt_level = mpo->mpo_level; sopt.sopt_name = mpo->mpo_name; sopt.sopt_val = CAST_USER_ADDR_T(&mpo->mpo_intval); - sopt.sopt_valsize = sizeof (int); + sopt.sopt_valsize = sizeof(int); sopt.sopt_p = kernproc; - error = sogetoptlock(so, &sopt, 0); /* already locked */ + error = sogetoptlock(so, &sopt, 0); /* already locked */ if (error == 0) { mptcplog((LOG_DEBUG, "MPTCP Socket: " "%s: mp_so 0x%llx sopt %s " @@ -4117,7 +4305,7 @@ mptcp_subflow_sogetopt(struct mptses *mpte, struct socket *so, mptcp_sopt2str(mpo->mpo_level, mpo->mpo_name), error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); } - return (error); + return error; } @@ -4182,8 +4370,9 @@ mptcp_gc(struct mppcbinfo *mppi) MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); if (mp_tp->mpt_state >= MPTCPS_FIN_WAIT_1) { - if (mp_tp->mpt_gc_ticks > 0) + if (mp_tp->mpt_gc_ticks > 0) { mp_tp->mpt_gc_ticks--; + } if (mp_tp->mpt_gc_ticks == 0) { wakeup = TRUE; } @@ -4202,14 +4391,15 @@ mptcp_gc(struct mppcbinfo *mppi) if (mpp->mpp_state != MPPCB_STATE_DEAD) { panic("MPTCP Socket: %s: mp_so 0x%llx skipped state " - "[u=%d,r=%d,s=%d]\n", __func__, - (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), - mp_so->so_usecount, mp_so->so_retaincnt, - mpp->mpp_state); + "[u=%d,r=%d,s=%d]\n", __func__, + (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), + mp_so->so_usecount, mp_so->so_retaincnt, + mpp->mpp_state); } - if (mp_tp->mpt_state == MPTCPS_TIME_WAIT) + if (mp_tp->mpt_state == MPTCPS_TIME_WAIT) { mptcp_close(mpte, mp_tp); + } mptcp_session_destroy(mpte); @@ -4228,7 +4418,7 @@ mptcp_gc(struct mppcbinfo *mppi) sodealloc(mp_so); } - return (active); + return active; } /* @@ -4239,18 +4429,19 @@ mptcp_drop(struct mptses *mpte, struct mptcb *mp_tp, int errno) { struct socket *mp_so; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mptcb == mp_tp); mp_so = mptetoso(mpte); DTRACE_MPTCP2(state__change, struct mptcb *, mp_tp, uint32_t, 0 /* event */); - if (errno == ETIMEDOUT && mp_tp->mpt_softerror != 0) + if (errno == ETIMEDOUT && mp_tp->mpt_softerror != 0) { errno = mp_tp->mpt_softerror; + } mp_so->so_error = errno; - return (mptcp_close(mpte, mp_tp)); + return mptcp_close(mpte, mp_tp); } /* @@ -4262,7 +4453,7 @@ mptcp_close(struct mptses *mpte, struct mptcb *mp_tp) struct socket *mp_so = NULL; struct mptsub *mpts = NULL, *tmpts = NULL; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ VERIFY(mpte->mpte_mptcb == mp_tp); mp_so = mptetoso(mpte); @@ -4277,7 +4468,7 @@ mptcp_close(struct mptses *mpte, struct mptcb *mp_tp) mptcp_subflow_disconnect(mpte, mpts); } - return (NULL); + return NULL; } void @@ -4292,15 +4483,24 @@ mptcp_notify_close(struct socket *so) void mptcp_subflow_workloop(struct mptses *mpte) { - struct socket *mp_so; - struct mptsub *mpts, *tmpts; boolean_t connect_pending = FALSE, disconnect_fallback = FALSE; - uint64_t mpsofilt_hint_mask = SO_FILT_HINT_LOCKED; + uint64_t mpsofilt_hint_mask; + struct mptsub *mpts, *tmpts; + struct socket *mp_so; mpte_lock_assert_held(mpte); - VERIFY(mpte->mpte_mppcb != NULL); + + if (mpte->mpte_flags & MPTE_IN_WORKLOOP) { + mpte->mpte_flags |= MPTE_WORKLOOP_RELAUNCH; + return; + } + mpte->mpte_flags |= MPTE_IN_WORKLOOP; + mp_so = mptetoso(mpte); - VERIFY(mp_so != NULL); + +relaunch: + mpsofilt_hint_mask = SO_FILT_HINT_LOCKED; + mpte->mpte_flags &= ~MPTE_WORKLOOP_RELAUNCH; TAILQ_FOREACH_SAFE(mpts, &mpte->mpte_subflows, mpts_entry, tmpts) { ev_ret_t ret; @@ -4321,8 +4521,9 @@ mptcp_subflow_workloop(struct mptses *mpte) * be handled during the next iteration, causing a * non-zero error to be returned above. */ - if (mp_so->so_flags & SOF_PCBCLEARING) + if (mp_so->so_flags & SOF_PCBCLEARING) { mptcp_subflow_disconnect(mpte, mpts); + } switch (ret) { case MPTS_EVRET_OK: @@ -4340,12 +4541,12 @@ mptcp_subflow_workloop(struct mptses *mpte) default: mptcplog((LOG_DEBUG, "MPTCP Socket: %s: mptcp_subflow_events " - "returned invalid value: %d\n", __func__, + "returned invalid value: %d\n", __func__, ret), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); break; } - mptcp_subflow_remref(mpts); /* ours */ + mptcp_subflow_remref(mpts); /* ours */ VERIFY(mpts->mpts_socket->so_usecount != 0); mpts->mpts_socket->so_usecount--; @@ -4357,8 +4558,9 @@ mptcp_subflow_workloop(struct mptses *mpte) soevent(mp_so, mpsofilt_hint_mask); } - if (!connect_pending && !disconnect_fallback) - return; + if (!connect_pending && !disconnect_fallback) { + goto exit; + } TAILQ_FOREACH_SAFE(mpts, &mpte->mpte_subflows, mpts_entry, tmpts) { if (disconnect_fallback) { @@ -4366,14 +4568,16 @@ mptcp_subflow_workloop(struct mptses *mpte) struct inpcb *inp = NULL; struct tcpcb *tp = NULL; - if (mpts->mpts_flags & MPTSF_MP_DEGRADED) + if (mpts->mpts_flags & MPTSF_MP_DEGRADED) { continue; + } mpts->mpts_flags |= MPTSF_MP_DEGRADED; - if (mpts->mpts_flags & (MPTSF_DISCONNECTING| - MPTSF_DISCONNECTED|MPTSF_CONNECT_PENDING)) + if (mpts->mpts_flags & (MPTSF_DISCONNECTING | + MPTSF_DISCONNECTED | MPTSF_CONNECT_PENDING)) { continue; + } so = mpts->mpts_socket; @@ -4389,7 +4593,7 @@ mptcp_subflow_workloop(struct mptses *mpte) inp = sotoinpcb(so); tp = intotcpcb(inp); tp->t_mpflags &= - ~(TMPF_MPTCP_READY|TMPF_MPTCP_TRUE); + ~(TMPF_MPTCP_READY | TMPF_MPTCP_TRUE); tp->t_mpflags |= TMPF_TCP_FALLBACK; if (mpts->mpts_flags & MPTSF_ACTIVE) { @@ -4407,11 +4611,19 @@ mptcp_subflow_workloop(struct mptses *mpte) if (mpts->mpts_flags & MPTSF_CONNECT_PENDING) { int error = mptcp_subflow_soconnectx(mpte, mpts); - if (error) + if (error) { mptcp_subflow_abort(mpts, error); + } } } } + +exit: + if (mpte->mpte_flags & MPTE_WORKLOOP_RELAUNCH) { + goto relaunch; + } + + mpte->mpte_flags &= ~MPTE_IN_WORKLOOP; } /* @@ -4423,10 +4635,11 @@ mptcp_lock(struct socket *mp_so, int refcount, void *lr) struct mppcb *mpp = mpsotomppcb(mp_so); void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } if (mpp == NULL) { panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__, @@ -4441,12 +4654,13 @@ mptcp_lock(struct socket *mp_so, int refcount, void *lr) solockhistory_nr(mp_so)); /* NOTREACHED */ } - if (refcount != 0) + if (refcount != 0) { mp_so->so_usecount++; + } mp_so->lock_lr[mp_so->next_lock_lr] = lr_saved; mp_so->next_lock_lr = (mp_so->next_lock_lr + 1) % SO_LCKDBG_MAX; - return (0); + return 0; } /* @@ -4458,10 +4672,11 @@ mptcp_unlock(struct socket *mp_so, int refcount, void *lr) struct mppcb *mpp = mpsotomppcb(mp_so); void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } if (mpp == NULL) { panic("%s: so=%p NO PCB usecount=%x lr=%p lrh= %s\n", __func__, @@ -4471,8 +4686,9 @@ mptcp_unlock(struct socket *mp_so, int refcount, void *lr) } mpp_lock_assert_held(mpp); - if (refcount != 0) + if (refcount != 0) { mp_so->so_usecount--; + } if (mp_so->so_usecount < 0) { panic("%s: so=%p usecount=%x lrh= %s\n", __func__, @@ -4483,7 +4699,7 @@ mptcp_unlock(struct socket *mp_so, int refcount, void *lr) mp_so->next_unlock_lr = (mp_so->next_unlock_lr + 1) % SO_LCKDBG_MAX; mpp_unlock(mpp); - return (0); + return 0; } /* @@ -4504,7 +4720,7 @@ mptcp_getlock(struct socket *mp_so, int flags) mp_so, mp_so->so_usecount, solockhistory_nr(mp_so)); /* NOTREACHED */ } - return (mpp_getlock(mpp, flags)); + return mpp_getlock(mpp, flags); } /* @@ -4535,8 +4751,9 @@ mptcp_attach_to_subf(struct socket *so, struct mptcb *mp_tp, sauth_entry->msae_raddr_rand = 0; try_again: sauth_entry->msae_laddr_rand = RandomULong(); - if (sauth_entry->msae_laddr_rand == 0) + if (sauth_entry->msae_laddr_rand == 0) { goto try_again; + } LIST_INSERT_HEAD(&mp_tp->mpt_subauth_list, sauth_entry, msae_next); } @@ -4548,8 +4765,9 @@ mptcp_detach_mptcb_from_subf(struct mptcb *mp_tp, struct socket *so) int found = 0; tp = sototcpcb(so); - if (tp == NULL) + if (tp == NULL) { return; + } LIST_FOREACH(sauth_entry, &mp_tp->mpt_subauth_list, msae_next) { if (sauth_entry->msae_laddr_id == tp->t_local_aid) { @@ -4561,8 +4779,9 @@ mptcp_detach_mptcb_from_subf(struct mptcb *mp_tp, struct socket *so) LIST_REMOVE(sauth_entry, msae_next); } - if (found) + if (found) { zfree(mpt_subauth_zone, sauth_entry); + } } void @@ -4574,10 +4793,12 @@ mptcp_get_rands(mptcp_addr_id addr_id, struct mptcb *mp_tp, u_int32_t *lrand, LIST_FOREACH(sauth_entry, &mp_tp->mpt_subauth_list, msae_next) { if (sauth_entry->msae_laddr_id == addr_id) { - if (lrand) + if (lrand) { *lrand = sauth_entry->msae_laddr_rand; - if (rrand) + } + if (rrand) { *rrand = sauth_entry->msae_raddr_rand; + } break; } } @@ -4627,7 +4848,7 @@ mptcp_do_sha1(mptcp_key_t *key, char *sha_digest) int sha1_size; sha1_base = (const unsigned char *) key; - sha1_size = sizeof (mptcp_key_t); + sha1_size = sizeof(mptcp_key_t); SHA1Init(&sha1ctxt); SHA1Update(&sha1ctxt, sha1_base, sha1_size); SHA1Final(sha_digest, &sha1ctxt); @@ -4635,7 +4856,7 @@ mptcp_do_sha1(mptcp_key_t *key, char *sha_digest) void mptcp_hmac_sha1(mptcp_key_t key1, mptcp_key_t key2, - u_int32_t rand1, u_int32_t rand2, u_char *digest) + u_int32_t rand1, u_int32_t rand2, u_char *digest) { SHA1_CTX sha1ctxt; mptcp_key_t key_ipad[8] = {0}; /* key XOR'd with inner pad */ @@ -4667,13 +4888,13 @@ mptcp_hmac_sha1(mptcp_key_t key1, mptcp_key_t key2, /* Perform inner SHA1 */ SHA1Init(&sha1ctxt); - SHA1Update(&sha1ctxt, (unsigned char *)key_ipad, sizeof (key_ipad)); - SHA1Update(&sha1ctxt, (unsigned char *)data, sizeof (data)); + SHA1Update(&sha1ctxt, (unsigned char *)key_ipad, sizeof(key_ipad)); + SHA1Update(&sha1ctxt, (unsigned char *)data, sizeof(data)); SHA1Final(digest, &sha1ctxt); /* Perform outer SHA1 */ SHA1Init(&sha1ctxt); - SHA1Update(&sha1ctxt, (unsigned char *)key_opad, sizeof (key_opad)); + SHA1Update(&sha1ctxt, (unsigned char *)key_opad, sizeof(key_opad)); SHA1Update(&sha1ctxt, (unsigned char *)digest, SHA1_RESULTLEN); SHA1Final(digest, &sha1ctxt); } @@ -4702,11 +4923,11 @@ static void mptcp_generate_token(char *sha_digest, int sha_digest_len, caddr_t token, int token_len) { - VERIFY(token_len == sizeof (u_int32_t)); + VERIFY(token_len == sizeof(u_int32_t)); VERIFY(sha_digest_len == SHA1_RESULTLEN); /* Most significant 32 bits of the SHA1 hash */ - bcopy(sha_digest, token, sizeof (u_int32_t)); + bcopy(sha_digest, token, sizeof(u_int32_t)); return; } @@ -4714,7 +4935,7 @@ static void mptcp_generate_idsn(char *sha_digest, int sha_digest_len, caddr_t idsn, int idsn_len) { - VERIFY(idsn_len == sizeof (u_int64_t)); + VERIFY(idsn_len == sizeof(u_int64_t)); VERIFY(sha_digest_len == SHA1_RESULTLEN); /* @@ -4739,8 +4960,9 @@ mptcp_conn_properties(struct mptcb *mp_tp) mp_tp->mpt_version = MPTCP_STD_VERSION_0; /* Set DSS checksum flag */ - if (mptcp_dss_csum) + if (mptcp_dss_csum) { mp_tp->mpt_flags |= MPTCPF_CHECKSUM; + } /* Set up receive window */ mp_tp->mpt_rcvwnd = mptcp_sbspace(mp_tp); @@ -4759,9 +4981,9 @@ mptcp_init_local_parms(struct mptses *mpte) mptcp_do_sha1(&mp_tp->mpt_localkey, key_digest); mptcp_generate_token(key_digest, SHA1_RESULTLEN, - (caddr_t)&mp_tp->mpt_localtoken, sizeof (mp_tp->mpt_localtoken)); + (caddr_t)&mp_tp->mpt_localtoken, sizeof(mp_tp->mpt_localtoken)); mptcp_generate_idsn(key_digest, SHA1_RESULTLEN, - (caddr_t)&mp_tp->mpt_local_idsn, sizeof (u_int64_t)); + (caddr_t)&mp_tp->mpt_local_idsn, sizeof(u_int64_t)); /* The subflow SYN is also first MPTCP byte */ mp_tp->mpt_snduna = mp_tp->mpt_sndmax = mp_tp->mpt_local_idsn + 1; @@ -4777,18 +4999,19 @@ mptcp_init_remote_parms(struct mptcb *mp_tp) mpte_lock_assert_held(mp_tp->mpt_mpte); /* Only Version 0 is supported for auth purposes */ - if (mp_tp->mpt_version != MPTCP_STD_VERSION_0) - return (-1); + if (mp_tp->mpt_version != MPTCP_STD_VERSION_0) { + return -1; + } /* Setup local and remote tokens and Initial DSNs */ mptcp_do_sha1(&mp_tp->mpt_remotekey, remote_digest); mptcp_generate_token(remote_digest, SHA1_RESULTLEN, - (caddr_t)&mp_tp->mpt_remotetoken, sizeof (mp_tp->mpt_remotetoken)); + (caddr_t)&mp_tp->mpt_remotetoken, sizeof(mp_tp->mpt_remotetoken)); mptcp_generate_idsn(remote_digest, SHA1_RESULTLEN, - (caddr_t)&mp_tp->mpt_remote_idsn, sizeof (u_int64_t)); + (caddr_t)&mp_tp->mpt_remote_idsn, sizeof(u_int64_t)); mp_tp->mpt_rcvnxt = mp_tp->mpt_remote_idsn + 1; - return (0); + return 0; } static void @@ -4798,15 +5021,18 @@ mptcp_send_dfin(struct socket *so) struct inpcb *inp = NULL; inp = sotoinpcb(so); - if (!inp) + if (!inp) { return; + } tp = intotcpcb(inp); - if (!tp) + if (!tp) { return; + } - if (!(tp->t_mpflags & TMPF_RESET)) + if (!(tp->t_mpflags & TMPF_RESET)) { tp->t_mpflags |= TMPF_SEND_DFIN; + } } /* @@ -4817,8 +5043,9 @@ mptcp_insert_dsn(struct mppcb *mpp, struct mbuf *m) { struct mptcb *mp_tp; - if (m == NULL) + if (m == NULL) { return; + } __IGNORE_WCASTALIGN(mp_tp = &((struct mpp_mtp *)mpp)->mtcb); mpte_lock_assert_held(mp_tp->mpt_mpte); @@ -4840,8 +5067,9 @@ mptcp_fallback_sbdrop(struct socket *so, struct mbuf *m, int len) uint64_t data_ack; uint64_t dsn; - if (!m || len == 0) + if (!m || len == 0) { return; + } while (m && len > 0) { VERIFY(m->m_flags & M_PKTHDR); @@ -4879,7 +5107,7 @@ mptcp_fallback_sbdrop(struct socket *so, struct mbuf *m, int len) } mptcplog((LOG_DEBUG, "%s inferred ack up to %u\n", __func__, (uint32_t)data_ack), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); mptcp_data_ack_rcvd(mp_tp, sototcpcb(so), data_ack); } @@ -4910,11 +5138,13 @@ mptcp_preproc_sbdrop(struct socket *so, struct mbuf *m, unsigned int len) len -= sub_len; } else { /* sub_len >= len */ - if (rewinding == 0) + if (rewinding == 0) { m->m_pkthdr.mp_dsn += len; + } if (!(m->m_pkthdr.pkt_flags & PKTF_MPSO)) { - if (rewinding == 0) + if (rewinding == 0) { m->m_pkthdr.mp_rseq += len; + } } mptcplog((LOG_DEBUG, "%s: dsn %u ssn %u len %d %d\n", __func__, (u_int32_t)m->m_pkthdr.mp_dsn, @@ -4951,8 +5181,8 @@ mptcp_output_getm_dsnmap32(struct socket *so, int off, void mptcp_output_getm_dsnmap64(struct socket *so, int off, uint64_t *dsn, - uint32_t *relseq, uint16_t *data_len, - uint16_t *dss_csum) + uint32_t *relseq, uint16_t *data_len, + uint16_t *dss_csum) { struct mbuf *m = so->so_snd.sb_mb; int off_orig = off; @@ -4988,8 +5218,8 @@ mptcp_output_getm_dsnmap64(struct socket *so, int off, uint64_t *dsn, *dss_csum = m->m_pkthdr.mp_csum; mptcplog((LOG_DEBUG, "%s: dsn %u ssn %u data_len %d off %d off_orig %d\n", - __func__, (u_int32_t)(*dsn), *relseq, *data_len, off, off_orig), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, (u_int32_t)(*dsn), *relseq, *data_len, off, off_orig), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); } /* @@ -5014,38 +5244,41 @@ mptcp_insert_rmap(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th) m->m_pkthdr.mp_rseq = tp->t_rcv_map.mpt_sseq; m->m_pkthdr.mp_rlen = tp->t_rcv_map.mpt_len; m->m_pkthdr.mp_csum = tp->t_rcv_map.mpt_csum; - if (tp->t_rcv_map.mpt_dfin) + if (tp->t_rcv_map.mpt_dfin) { m->m_pkthdr.pkt_flags |= PKTF_MPTCP_DFIN; + } m->m_pkthdr.pkt_flags |= PKTF_MPTCP; tp->t_mpflags &= ~TMPF_EMBED_DSN; tp->t_mpflags |= TMPF_MPTCP_ACKNOW; } else if (tp->t_mpflags & TMPF_TCP_FALLBACK) { - if (th->th_flags & TH_FIN) + if (th->th_flags & TH_FIN) { m->m_pkthdr.pkt_flags |= PKTF_MPTCP_DFIN; + } } } int mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn, - uint32_t rseq, uint16_t dlen) + uint32_t rseq, uint16_t dlen) { struct mptsub *mpts = sototcpcb(so)->t_mpsub; - if (m_pktlen(m) == 0) - return (0); + if (m_pktlen(m) == 0) { + return 0; + } if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { if (off && (dsn != m->m_pkthdr.mp_dsn || - rseq != m->m_pkthdr.mp_rseq || - dlen != m->m_pkthdr.mp_rlen)) { + rseq != m->m_pkthdr.mp_rseq || + dlen != m->m_pkthdr.mp_rlen)) { mptcplog((LOG_ERR, "%s: Received incorrect second mapping: %llu - %llu , %u - %u, %u - %u\n", - __func__, dsn, m->m_pkthdr.mp_dsn, - rseq, m->m_pkthdr.mp_rseq, - dlen, m->m_pkthdr.mp_rlen), - MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR); - return (-1); + __func__, dsn, m->m_pkthdr.mp_dsn, + rseq, m->m_pkthdr.mp_rseq, + dlen, m->m_pkthdr.mp_rlen), + MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_ERR); + return -1; } m->m_pkthdr.mp_dsn += off; m->m_pkthdr.mp_rseq += off; @@ -5061,7 +5294,7 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn, mpts->mpts_flags |= MPTSF_CONFIRMED; - return (0); + return 0; } /* @@ -5074,15 +5307,18 @@ mptcp_act_on_txfail(struct socket *so) struct tcpcb *tp = NULL; struct inpcb *inp = sotoinpcb(so); - if (inp == NULL) + if (inp == NULL) { return; + } tp = intotcpcb(inp); - if (tp == NULL) + if (tp == NULL) { return; + } - if (so->so_flags & SOF_MP_TRYFAILOVER) + if (so->so_flags & SOF_MP_TRYFAILOVER) { return; + } so->so_flags |= SOF_MP_TRYFAILOVER; soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MPFAILOVER)); @@ -5099,8 +5335,9 @@ mptcp_get_map_for_dsn(struct socket *so, u_int64_t dsn_fail, u_int32_t *tcp_seq) int off = 0; u_int32_t datalen; - if (m == NULL) - return (-1); + if (m == NULL) { + return -1; + } while (m != NULL) { VERIFY(m->m_pkthdr.pkt_flags & PKTF_MPTCP); @@ -5112,8 +5349,8 @@ mptcp_get_map_for_dsn(struct socket *so, u_int64_t dsn_fail, u_int32_t *tcp_seq) off = dsn_fail - dsn; *tcp_seq = m->m_pkthdr.mp_rseq + off; mptcplog((LOG_DEBUG, "%s: %llu %llu \n", __func__, dsn, - dsn_fail), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); - return (0); + dsn_fail), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); + return 0; } m = m->m_next; @@ -5127,7 +5364,7 @@ mptcp_get_map_for_dsn(struct socket *so, u_int64_t dsn_fail, u_int32_t *tcp_seq) mptcplog((LOG_ERR, "MPTCP Sender: " "%s: %llu not found \n", __func__, dsn_fail), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); - return (-1); + return -1; } /* @@ -5146,7 +5383,7 @@ mptcp_adj_sendlen(struct socket *so, int32_t off) uint16_t dss_csum; mptcp_output_getm_dsnmap64(so, off, &mdss_dsn, &mdss_subflow_seq, - &mdss_data_len, &dss_csum); + &mdss_data_len, &dss_csum); /* * We need to compute how much of the mapping still remains. @@ -5158,18 +5395,20 @@ mptcp_adj_sendlen(struct socket *so, int32_t off) * When TFO is used, we are sending the mpts->mpts_iss although the relative * seq has been set to 1 (while it should be 0). */ - if (tp->t_mpflags & TMPF_TFO_REQUEST) + if (tp->t_mpflags & TMPF_TFO_REQUEST) { mdss_subflow_off--; + } - if (off < mdss_subflow_off) + if (off < mdss_subflow_off) { printf("%s off %d mdss_subflow_off %d mdss_subflow_seq %u iss %u suna %u\n", __func__, - off, mdss_subflow_off, mdss_subflow_seq, mpts->mpts_iss, tp->snd_una); + off, mdss_subflow_off, mdss_subflow_seq, mpts->mpts_iss, tp->snd_una); + } VERIFY(off >= mdss_subflow_off); mptcplog((LOG_DEBUG, "%s dlen %u off %d sub_off %d sub_seq %u iss %u suna %u\n", - __func__, mdss_data_len, off, mdss_subflow_off, mdss_subflow_seq, - mpts->mpts_iss, tp->snd_una), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); - return (mdss_data_len - (off - mdss_subflow_off)); + __func__, mdss_data_len, off, mdss_subflow_off, mdss_subflow_seq, + mpts->mpts_iss, tp->snd_una), MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + return mdss_data_len - (off - mdss_subflow_off); } static uint32_t @@ -5182,14 +5421,16 @@ mptcp_get_maxseg(struct mptses *mpte) struct tcpcb *tp = sototcpcb(mpts->mpts_socket); if (!TCPS_HAVEESTABLISHED(tp->t_state) || - TCPS_HAVERCVDFIN2(tp->t_state)) + TCPS_HAVERCVDFIN2(tp->t_state)) { continue; + } - if (tp->t_maxseg > maxseg) + if (tp->t_maxseg > maxseg) { maxseg = tp->t_maxseg; + } } - return (maxseg); + return maxseg; } static uint8_t @@ -5202,20 +5443,22 @@ mptcp_get_rcvscale(struct mptses *mpte) struct tcpcb *tp = sototcpcb(mpts->mpts_socket); if (!TCPS_HAVEESTABLISHED(tp->t_state) || - TCPS_HAVERCVDFIN2(tp->t_state)) + TCPS_HAVERCVDFIN2(tp->t_state)) { continue; + } - if (tp->rcv_scale < rcvscale) + if (tp->rcv_scale < rcvscale) { rcvscale = tp->rcv_scale; + } } - return (rcvscale); + return rcvscale; } /* Similar to tcp_sbrcv_reserve */ static void mptcp_sbrcv_reserve(struct mptcb *mp_tp, struct sockbuf *sbrcv, - u_int32_t newsize, u_int32_t idealsize) + u_int32_t newsize, u_int32_t idealsize) { uint8_t rcvscale = mptcp_get_rcvscale(mp_tp->mpt_mpte); @@ -5230,7 +5473,7 @@ mptcp_sbrcv_reserve(struct mptcb *mp_tp, struct sockbuf *sbrcv, /* Set new socket buffer size */ if (newsize > sbrcv->sb_hiwat && - (sbreserve(sbrcv, newsize) == 1)) { + (sbreserve(sbrcv, newsize) == 1)) { sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize, (idealsize != 0) ? idealsize : newsize), tcp_autorcvbuf_max); @@ -5238,7 +5481,7 @@ mptcp_sbrcv_reserve(struct mptcb *mp_tp, struct sockbuf *sbrcv, * window scale */ sbrcv->sb_idealsize = min(sbrcv->sb_idealsize, - TCP_MAXWIN << rcvscale); + TCP_MAXWIN << rcvscale); } } @@ -5308,8 +5551,9 @@ mptcp_sbrcv_grow_rwin(struct mptcb *mp_tp, struct sockbuf *sb) u_int32_t rcvbufinc = mptcp_get_maxseg(mp_tp->mpt_mpte) << 4; u_int32_t rcvbuf = sb->sb_hiwat; - if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(mp_so)) + if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(mp_so)) { return; + } if (tcp_do_autorcvbuf == 1 && tcp_cansbgrow(sb) && @@ -5340,20 +5584,22 @@ mptcp_sbspace(struct mptcb *mp_tp) rcvbuf = sb->sb_hiwat; space = ((int32_t) imin((rcvbuf - sb->sb_cc), - (sb->sb_mbmax - sb->sb_mbcnt))); - if (space < 0) + (sb->sb_mbmax - sb->sb_mbcnt))); + if (space < 0) { space = 0; + } #if CONTENT_FILTER /* Compensate for data being processed by content filters */ pending = cfil_sock_data_space(sb); #endif /* CONTENT_FILTER */ - if (pending > space) + if (pending > space) { space = 0; - else + } else { space -= pending; + } - return (space); + return space; } /* @@ -5364,23 +5610,27 @@ mptcp_notify_mpready(struct socket *so) { struct tcpcb *tp = NULL; - if (so == NULL) + if (so == NULL) { return; + } tp = intotcpcb(sotoinpcb(so)); - if (tp == NULL) + if (tp == NULL) { return; + } DTRACE_MPTCP4(multipath__ready, struct socket *, so, struct sockbuf *, &so->so_rcv, struct sockbuf *, &so->so_snd, struct tcpcb *, tp); - if (!(tp->t_mpflags & TMPF_MPTCP_TRUE)) + if (!(tp->t_mpflags & TMPF_MPTCP_TRUE)) { return; + } - if (tp->t_mpflags & TMPF_MPTCP_READY) + if (tp->t_mpflags & TMPF_MPTCP_READY) { return; + } tp->t_mpflags &= ~TMPF_TCP_FALLBACK; tp->t_mpflags |= TMPF_MPTCP_READY; @@ -5393,22 +5643,25 @@ mptcp_notify_mpfail(struct socket *so) { struct tcpcb *tp = NULL; - if (so == NULL) + if (so == NULL) { return; + } tp = intotcpcb(sotoinpcb(so)); - if (tp == NULL) + if (tp == NULL) { return; + } DTRACE_MPTCP4(multipath__failed, struct socket *, so, struct sockbuf *, &so->so_rcv, struct sockbuf *, &so->so_snd, struct tcpcb *, tp); - if (tp->t_mpflags & TMPF_TCP_FALLBACK) + if (tp->t_mpflags & TMPF_TCP_FALLBACK) { return; + } - tp->t_mpflags &= ~(TMPF_MPTCP_READY|TMPF_MPTCP_TRUE); + tp->t_mpflags &= ~(TMPF_MPTCP_READY | TMPF_MPTCP_TRUE); tp->t_mpflags |= TMPF_TCP_FALLBACK; soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_MPSTATUS)); @@ -5426,7 +5679,7 @@ mptcp_ok_to_keepalive(struct mptcb *mp_tp) if (mp_tp->mpt_state >= MPTCPS_CLOSE_WAIT) { ret = 0; } - return (ret); + return ret; } /* @@ -5438,16 +5691,17 @@ mptcp_adj_mss(struct tcpcb *tp, boolean_t mtudisc) int mss_lower = 0; struct mptcb *mp_tp = tptomptp(tp); -#define MPTCP_COMPUTE_LEN { \ - mss_lower = sizeof (struct mptcp_dss_ack_opt); \ - if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) \ - mss_lower += 2; \ - else \ - /* adjust to 32-bit boundary + EOL */ \ - mss_lower += 2; \ +#define MPTCP_COMPUTE_LEN { \ + mss_lower = sizeof (struct mptcp_dss_ack_opt); \ + if (mp_tp->mpt_flags & MPTCPF_CHECKSUM) \ + mss_lower += 2; \ + else \ + /* adjust to 32-bit boundary + EOL */ \ + mss_lower += 2; \ } - if (mp_tp == NULL) - return (0); + if (mp_tp == NULL) { + return 0; + } mpte_lock_assert_held(mp_tp->mpt_mpte); @@ -5472,7 +5726,7 @@ mptcp_adj_mss(struct tcpcb *tp, boolean_t mtudisc) } } - return (mss_lower); + return mss_lower; } /* @@ -5542,16 +5796,17 @@ mptcp_pcblist SYSCTL_HANDLER_ARGS conninfo_mptcp_t mptcpci; mptcp_flow_t *flows = NULL; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } lck_mtx_lock(&mtcbinfo.mppi_lock); if (req->oldptr == USER_ADDR_NULL) { size_t n = mtcbinfo.mppi_count; lck_mtx_unlock(&mtcbinfo.mppi_lock); - req->oldidx = (n + n/8) * sizeof(conninfo_mptcp_t) + - 4 * (n + n/8) * sizeof(mptcp_flow_t); - return (0); + req->oldidx = (n + n / 8) * sizeof(conninfo_mptcp_t) + + 4 * (n + n / 8) * sizeof(mptcp_flow_t); + return 0; } TAILQ_FOREACH(mpp, &mtcbinfo.mppi_pcbs, mpp_entry) { flows = NULL; @@ -5615,13 +5870,14 @@ mptcp_pcblist SYSCTL_HANDLER_ARGS if (flows) { error = SYSCTL_OUT(req, flows, len); FREE(flows, M_TEMP); - if (error) + if (error) { break; + } } } lck_mtx_unlock(&mtcbinfo.mppi_lock); - return (error); + return error; } SYSCTL_PROC(_net_inet_mptcp, OID_AUTO, pcblist, CTLFLAG_RD | CTLFLAG_LOCKED, @@ -5637,15 +5893,17 @@ mptcp_set_notsent_lowat(struct mptses *mpte, int optval) struct mptcb *mp_tp = NULL; int error = 0; - if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED) + if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED) { mp_tp = mpte->mpte_mptcb; + } - if (mp_tp) + if (mp_tp) { mp_tp->mpt_notsent_lowat = optval; - else + } else { error = EINVAL; + } - return (error); + return error; } u_int32_t @@ -5653,13 +5911,15 @@ mptcp_get_notsent_lowat(struct mptses *mpte) { struct mptcb *mp_tp = NULL; - if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED) + if (mpte->mpte_mppcb->mpp_flags & MPP_ATTACHED) { mp_tp = mpte->mpte_mptcb; + } - if (mp_tp) - return (mp_tp->mpt_notsent_lowat); - else - return (0); + if (mp_tp) { + return mp_tp->mpt_notsent_lowat; + } else { + return 0; + } } int @@ -5674,7 +5934,7 @@ mptcp_notsent_lowat_check(struct socket *so) mpp = mpsotomppcb(so); if (mpp == NULL || mpp->mpp_state == MPPCB_STATE_DEAD) { - return (0); + return 0; } mpte = mptompte(mpp); @@ -5690,8 +5950,8 @@ mptcp_notsent_lowat_check(struct socket *so) "lowat %d notsent %d actual %d \n", mp_tp->mpt_notsent_lowat, notsent, notsent - (mp_tp->mpt_sndnxt - mp_tp->mpt_snduna)), - MPTCP_SENDER_DBG , MPTCP_LOGLVL_VERBOSE); - return (1); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + return 1; } /* When Nagle's algorithm is not disabled, it is better @@ -5705,7 +5965,7 @@ mptcp_notsent_lowat_check(struct socket *so) struct tcpcb *tp = intotcpcb(sotoinpcb(subf_so)); notsent = so->so_snd.sb_cc - - (tp->snd_nxt - tp->snd_una); + (tp->snd_nxt - tp->snd_una); if ((tp->t_flags & TF_NODELAY) == 0 && notsent > 0 && (notsent <= (int)tp->t_maxseg)) { @@ -5714,11 +5974,11 @@ mptcp_notsent_lowat_check(struct socket *so) mptcplog((LOG_DEBUG, "MPTCP Sender: lowat %d notsent %d" " nodelay false \n", mp_tp->mpt_notsent_lowat, notsent), - MPTCP_SENDER_DBG , MPTCP_LOGLVL_VERBOSE); - return (retval); + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + return retval; } } - return (0); + return 0; } /* Using Symptoms Advisory to detect poor WiFi or poor Cell */ @@ -5729,16 +5989,17 @@ symptoms_advisory_t mptcp_advisory; static errno_t mptcp_symptoms_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, - void **unitinfo) + void **unitinfo) { #pragma unused(kctlref, sac, unitinfo) - if (OSIncrementAtomic(&mptcp_kern_skt_inuse) > 0) + if (OSIncrementAtomic(&mptcp_kern_skt_inuse) > 0) { os_log_error(mptcp_log_handle, "%s MPTCP kernel-control socket for Symptoms already open!", __func__); + } mptcp_kern_skt_unit = sac->sc_unit; - return (0); + return 0; } static void @@ -5760,11 +6021,15 @@ mptcp_allow_uuid(uuid_t uuid) mp_so = mpp->mpp_socket; if (mp_so->so_flags & SOF_DELEGATED && - uuid_compare(uuid, mp_so->e_uuid)) + uuid_compare(uuid, mp_so->e_uuid)) { goto next; - else if (!(mp_so->so_flags & SOF_DELEGATED) && - uuid_compare(uuid, mp_so->last_uuid)) + } else if (!(mp_so->so_flags & SOF_DELEGATED) && + uuid_compare(uuid, mp_so->last_uuid)) { goto next; + } + + os_log(mptcp_log_handle, "%s - %lx: Got allowance for useApp\n", + __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte)); mpte->mpte_flags |= MPTE_ACCESS_GRANTED; @@ -5799,8 +6064,9 @@ mptcp_wifi_status_changed(void) mp_so = mpp->mpp_socket; /* Only handover-mode is purely driven by Symptom's Wi-Fi status */ - if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER) + if (mpte->mpte_svctype != MPTCP_SVCTYPE_HANDOVER) { goto next; + } mptcp_check_subflows_and_add(mpte); mptcp_check_subflows_and_remove(mpte); @@ -5827,10 +6093,11 @@ mptcp_ask_symptoms(struct mptses *mpte) mp_so = mptetoso(mpte); - if (mp_so->so_flags & SOF_DELEGATED) + if (mp_so->so_flags & SOF_DELEGATED) { pid = mp_so->e_pid; - else + } else { pid = mp_so->last_pid; + } p = proc_find(pid); if (p == PROC_NULL) { @@ -5840,25 +6107,27 @@ mptcp_ask_symptoms(struct mptses *mpte) ask.cmd = MPTCP_SYMPTOMS_ASK_UUID; - if (mp_so->so_flags & SOF_DELEGATED) + if (mp_so->so_flags & SOF_DELEGATED) { uuid_copy(ask.uuid, mp_so->e_uuid); - else + } else { uuid_copy(ask.uuid, mp_so->last_uuid); + } prio = proc_get_effective_task_policy(proc_task(p), TASK_POLICY_ROLE); - if (prio == TASK_BACKGROUND_APPLICATION) + if (prio == TASK_BACKGROUND_APPLICATION) { ask.priority = MPTCP_SYMPTOMS_BACKGROUND; - else if (prio == TASK_FOREGROUND_APPLICATION) + } else if (prio == TASK_FOREGROUND_APPLICATION) { ask.priority = MPTCP_SYMPTOMS_FOREGROUND; - else + } else { ask.priority = MPTCP_SYMPTOMS_UNKNOWN; + } err = ctl_enqueuedata(mptcp_kern_ctrl_ref, mptcp_kern_skt_unit, - &ask, sizeof(ask), CTL_DATA_EOR); + &ask, sizeof(ask), CTL_DATA_EOR); os_log_debug(mptcp_log_handle, "%s asked symptoms about pid %u, prio %u, err %d\n", - __func__, pid, ask.priority, err); + __func__, pid, ask.priority, err); proc_rele(p); @@ -5866,34 +6135,37 @@ mptcp_ask_symptoms(struct mptses *mpte) static errno_t mptcp_symptoms_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, - void *unitinfo) + void *unitinfo) { #pragma unused(kctlref, kcunit, unitinfo) OSDecrementAtomic(&mptcp_kern_skt_inuse); - return (0); + return 0; } static errno_t mptcp_symptoms_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, - mbuf_t m, int flags) + mbuf_t m, int flags) { #pragma unused(kctlref, unitinfo, flags) - symptoms_advisory_t *sa = NULL; + symptoms_advisory_t *sa = NULL; - if (kcunit != mptcp_kern_skt_unit) + if (kcunit != mptcp_kern_skt_unit) { os_log_error(mptcp_log_handle, "%s kcunit %u is different from expected one %u\n", - __func__, kcunit, mptcp_kern_skt_unit); + __func__, kcunit, mptcp_kern_skt_unit); + } if (mbuf_pkthdr_len(m) < sizeof(*sa)) { mbuf_freem(m); - return (EINVAL); + return EINVAL; } if (mbuf_len(m) < sizeof(*sa)) { + os_log_error(mptcp_log_handle, "%s: mbuf is %lu but need %lu\n", + __func__, mbuf_len(m), sizeof(*sa)); mbuf_freem(m); - return (EINVAL); + return EINVAL; } sa = mbuf_data(m); @@ -5908,28 +6180,40 @@ mptcp_symptoms_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, if ((sa->sa_wifi_status & (SYMPTOMS_ADVISORY_WIFI_BAD | SYMPTOMS_ADVISORY_WIFI_OK)) != - (SYMPTOMS_ADVISORY_WIFI_BAD | SYMPTOMS_ADVISORY_WIFI_OK)) + (SYMPTOMS_ADVISORY_WIFI_BAD | SYMPTOMS_ADVISORY_WIFI_OK)) { mptcp_advisory.sa_wifi_status = sa->sa_wifi_status; + } - if (old_wifi_status != mptcp_advisory.sa_wifi_status) + if (old_wifi_status != mptcp_advisory.sa_wifi_status) { mptcp_wifi_status_changed(); + } } else if (sa->sa_nwk_status == SYMPTOMS_ADVISORY_NOCOMMENT) { mptcplog((LOG_DEBUG, "%s: NOCOMMENT wifi %d\n", __func__, mptcp_advisory.sa_wifi_status), MPTCP_EVENTS_DBG, MPTCP_LOGLVL_VERBOSE); } else if (sa->sa_nwk_status == SYMPTOMS_ADVISORY_USEAPP) { uuid_t uuid; + errno_t err; - mptcplog((LOG_DEBUG, "%s Got response about useApp\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + if (mbuf_len(m) < sizeof(uuid_t) + sizeof(*sa)) { + os_log_error(mptcp_log_handle, "%s: mbuf is %lu but need %lu\n", + __func__, mbuf_len(m), sizeof(uuid_t) + sizeof(*sa)); + mbuf_free(m); + return EINVAL; + } - uuid_copy(uuid, (unsigned char *)(sa + 1)); + err = mbuf_copydata(m, sizeof(*sa), sizeof(uuid_t), uuid); + if (err) { + os_log_error(mptcp_log_handle, "%s: mbuf_copydata returned %d\n", __func__, err); + mbuf_free(m); + return err; + } mptcp_allow_uuid(uuid); } mbuf_freem(m); - return (0); + return 0; } void @@ -5959,17 +6243,18 @@ int mptcp_is_wifi_unusable(struct mptses *mpte) { if (mpte->mpte_flags & MPTE_FIRSTPARTY) { - if (mptcp_advisory.sa_wifi_status) - return ((mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD) ? 1 : 0); + if (mptcp_advisory.sa_wifi_status) { + return (mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD) ? 1 : 0; + } /* * If it's a first-party app and we don't have any info * about the Wi-Fi state, let's be pessimistic. */ - return (-1); + return -1; } - return ((mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD) ? 1 : 0); + return (mptcp_advisory.sa_wifi_status & SYMPTOMS_ADVISORY_WIFI_BAD) ? 1 : 0; } boolean_t @@ -5978,11 +6263,12 @@ mptcp_subflow_is_bad(struct mptses *mpte, struct mptsub *mpts) struct tcpcb *tp = sototcpcb(mpts->mpts_socket); int fail_thresh = mptcp_fail_thresh; - if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) + if (mpte->mpte_svctype == MPTCP_SVCTYPE_HANDOVER) { fail_thresh *= 2; + } - return (tp->t_rxtshift >= fail_thresh && - (mptetoso(mpte)->so_snd.sb_cc || mpte->mpte_reinjectq)); + return tp->t_rxtshift >= fail_thresh && + (mptetoso(mpte)->so_snd.sb_cc || mpte->mpte_reinjectq); } /* If TFO data is succesfully acked, it must be dropped from the mptcp so */ @@ -6023,9 +6309,9 @@ mptcp_drop_tfo_data(struct mptses *mpte, struct mptsub *mpts) sbdrop(&mp_so->so_snd, (int)mp_droplen); } mptcplog((LOG_DEBUG, "%s: mp_so 0x%llx cid %d TFO tcp len %d mptcp len %d\n", - __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), - mpts->mpts_connid, tcp_droplen, mp_droplen), - MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); + __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so), + mpts->mpts_connid, tcp_droplen, mp_droplen), + MPTCP_SENDER_DBG, MPTCP_LOGLVL_VERBOSE); } } @@ -6042,7 +6328,7 @@ mptcp_freeq(struct mptcb *mp_tp) rv = 1; } mp_tp->mpt_reassqlen = 0; - return (rv); + return rv; } static int @@ -6053,14 +6339,14 @@ mptcp_post_event(u_int32_t event_code, int value) memset(&ev_msg, 0, sizeof(ev_msg)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_MPTCP_SUBCLASS; - ev_msg.event_code = event_code; + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_MPTCP_SUBCLASS; + ev_msg.event_code = event_code; event_data.value = value; - ev_msg.dv[0].data_ptr = &event_data; + ev_msg.dv[0].data_ptr = &event_data; ev_msg.dv[0].data_length = sizeof(event_data); return kev_post_msg(&ev_msg); @@ -6072,24 +6358,27 @@ mptcp_set_cellicon(struct mptses *mpte) int error; /* First-party apps (Siri) don't flip the cellicon */ - if (mpte->mpte_flags & MPTE_FIRSTPARTY) + if (mpte->mpte_flags & MPTE_FIRSTPARTY) { return; + } /* Remember the last time we set the cellicon (see mptcp_unset_cellicon) */ mptcp_last_cellicon_set = tcp_now; /* If cellicon is already set, get out of here! */ - if (OSTestAndSet(7, &mptcp_cellicon_is_set)) + if (OSTestAndSet(7, &mptcp_cellicon_is_set)) { return; + } error = mptcp_post_event(KEV_MPTCP_CELLUSE, 1); - if (error) + if (error) { mptcplog((LOG_ERR, "%s: Setting cellicon failed with %d\n", - __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - else + __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + } else { mptcplog((LOG_DEBUG, "%s successfully set the cellicon\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + } } void @@ -6098,8 +6387,9 @@ mptcp_unset_cellicon(void) int error; /* If cellicon is already unset, get out of here! */ - if (OSTestAndClear(7, &mptcp_cellicon_is_set)) + if (OSTestAndClear(7, &mptcp_cellicon_is_set)) { return; + } /* * If during the past MPTCP_CELLICON_TOGGLE_RATE seconds we didn't @@ -6107,19 +6397,20 @@ mptcp_unset_cellicon(void) * it again. */ if (TSTMP_GT(mptcp_last_cellicon_set + MPTCP_CELLICON_TOGGLE_RATE, - tcp_now)) { + tcp_now)) { OSTestAndSet(7, &mptcp_cellicon_is_set); return; } error = mptcp_post_event(KEV_MPTCP_CELLUSE, 0); - if (error) + if (error) { mptcplog((LOG_ERR, "%s: Unsetting cellicon failed with %d\n", - __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - else + __func__, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + } else { mptcplog((LOG_DEBUG, "%s successfully unset the cellicon\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + } } void @@ -6130,15 +6421,18 @@ mptcp_reset_rexmit_state(struct tcpcb *tp) struct socket *so; inp = tp->t_inpcb; - if (inp == NULL) + if (inp == NULL) { return; + } so = inp->inp_socket; - if (so == NULL) + if (so == NULL) { return; + } - if (!(so->so_flags & SOF_MP_SUBFLOW)) + if (!(so->so_flags & SOF_MP_SUBFLOW)) { return; + } mpts = tp->t_mpsub; @@ -6153,4 +6447,3 @@ mptcp_reset_keepalive(struct tcpcb *tp) mpts->mpts_flags &= ~MPTSF_READ_STALL; } - diff --git a/bsd/netinet/mptcp_timer.c b/bsd/netinet/mptcp_timer.c index 969ceddf8..b376cea47 100644 --- a/bsd/netinet/mptcp_timer.c +++ b/bsd/netinet/mptcp_timer.c @@ -51,20 +51,20 @@ */ static u_int32_t mptcp_rto = 3; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, rto, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_rto, 0, "MPTCP Retransmission Timeout"); + &mptcp_rto, 0, "MPTCP Retransmission Timeout"); static int mptcp_nrtos = 3; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, nrto, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_rto, 0, "MPTCP Retransmissions"); + &mptcp_rto, 0, "MPTCP Retransmissions"); /* * MPTCP connections timewait interval in seconds. */ static u_int32_t mptcp_tw = 60; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, tw, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_tw, 0, "MPTCP Timewait Period"); + &mptcp_tw, 0, "MPTCP Timewait Period"); -#define TIMEVAL_TO_HZ(_tv_) ((_tv_).tv_sec * hz + (_tv_).tv_usec / hz) +#define TIMEVAL_TO_HZ(_tv_) ((_tv_).tv_sec * hz + (_tv_).tv_usec / hz) static int mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) @@ -78,10 +78,11 @@ mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) mpte_lock_assert_held(mpte); switch (mp_tp->mpt_timer_vals) { case MPTT_REXMT: - if (mp_tp->mpt_rxtstart == 0) + if (mp_tp->mpt_rxtstart == 0) { break; + } if ((now_msecs - mp_tp->mpt_rxtstart) > - (mptcp_rto*hz)) { + (mptcp_rto * hz)) { if (MPTCP_SEQ_GT(mp_tp->mpt_snduna, mp_tp->mpt_rtseq)) { mp_tp->mpt_timer_vals = 0; mp_tp->mpt_rtseq = 0; @@ -94,9 +95,9 @@ mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) } else { mp_tp->mpt_sndnxt = mp_tp->mpt_rtseq; os_log_info(mptcp_log_handle, - "%s: REXMT %d sndnxt %u\n", - __func__, mp_tp->mpt_rxtshift, - (uint32_t)mp_tp->mpt_sndnxt); + "%s: REXMT %d sndnxt %u\n", + __func__, mp_tp->mpt_rxtshift, + (uint32_t)mp_tp->mpt_sndnxt); mptcp_output(mpte); } } else { @@ -105,8 +106,9 @@ mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) break; case MPTT_TW: /* Allows for break before make XXX */ - if (mp_tp->mpt_timewait == 0) + if (mp_tp->mpt_timewait == 0) { VERIFY(0); + } if ((now_msecs - mp_tp->mpt_timewait) > (mptcp_tw * hz)) { mp_tp->mpt_softerror = ETIMEDOUT; @@ -122,7 +124,7 @@ mptcp_timer_demux(struct mptses *mpte, uint32_t now_msecs) break; } - return (resched_timer); + return resched_timer; } uint32_t @@ -148,12 +150,13 @@ mptcp_timer(struct mppcbinfo *mppi) mpte_lock(mpte); VERIFY(mpp->mpp_flags & MPP_ATTACHED); - if (mptcp_timer_demux(mpte, now_msecs)) + if (mptcp_timer_demux(mpte, now_msecs)) { resched_timer = 1; + } mpte_unlock(mpte); } - return (resched_timer); + return resched_timer; } void diff --git a/bsd/netinet/mptcp_timer.h b/bsd/netinet/mptcp_timer.h index 35ee57c48..cab9306d6 100644 --- a/bsd/netinet/mptcp_timer.h +++ b/bsd/netinet/mptcp_timer.h @@ -27,12 +27,12 @@ */ #ifndef _NETINET_MPTCP_TIMER_H_ -#define _NETINET_MPTCP_TIMER_H_ +#define _NETINET_MPTCP_TIMER_H_ #ifdef BSD_KERNEL_PRIVATE -#define MPT_REXMT 0 /* retransmit */ -#define MPT_TIMEWAIT 1 /* timewait timer */ +#define MPT_REXMT 0 /* retransmit */ +#define MPT_TIMEWAIT 1 /* timewait timer */ __BEGIN_DECLS extern uint32_t mptcp_timer(struct mppcbinfo *); diff --git a/bsd/netinet/mptcp_usrreq.c b/bsd/netinet/mptcp_usrreq.c index b3de24b3d..a73d3339f 100644 --- a/bsd/netinet/mptcp_usrreq.c +++ b/bsd/netinet/mptcp_usrreq.c @@ -82,33 +82,33 @@ static int mptcp_default_tcp_optval(struct mptses *, struct sockopt *, int *); static int mptcp_usr_preconnect(struct socket *so); struct pr_usrreqs mptcp_usrreqs = { - .pru_attach = mptcp_usr_attach, - .pru_connectx = mptcp_usr_connectx, - .pru_control = mptcp_usr_control, - .pru_detach = mptcp_usr_detach, - .pru_disconnect = mptcp_usr_disconnect, - .pru_disconnectx = mptcp_usr_disconnectx, - .pru_peeraddr = mp_getpeeraddr, - .pru_rcvd = mptcp_usr_rcvd, - .pru_send = mptcp_usr_send, - .pru_shutdown = mptcp_usr_shutdown, - .pru_sockaddr = mp_getsockaddr, - .pru_sosend = mptcp_usr_sosend, - .pru_soreceive = soreceive, - .pru_socheckopt = mptcp_usr_socheckopt, - .pru_preconnect = mptcp_usr_preconnect, + .pru_attach = mptcp_usr_attach, + .pru_connectx = mptcp_usr_connectx, + .pru_control = mptcp_usr_control, + .pru_detach = mptcp_usr_detach, + .pru_disconnect = mptcp_usr_disconnect, + .pru_disconnectx = mptcp_usr_disconnectx, + .pru_peeraddr = mp_getpeeraddr, + .pru_rcvd = mptcp_usr_rcvd, + .pru_send = mptcp_usr_send, + .pru_shutdown = mptcp_usr_shutdown, + .pru_sockaddr = mp_getsockaddr, + .pru_sosend = mptcp_usr_sosend, + .pru_soreceive = soreceive, + .pru_socheckopt = mptcp_usr_socheckopt, + .pru_preconnect = mptcp_usr_preconnect, }; #if (DEVELOPMENT || DEBUG) static int mptcp_disable_entitlements = 0; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, disable_entitlements, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_disable_entitlements, 0, "Disable Multipath TCP Entitlement Checking"); + &mptcp_disable_entitlements, 0, "Disable Multipath TCP Entitlement Checking"); #endif int mptcp_developer_mode = 0; SYSCTL_INT(_net_inet_mptcp, OID_AUTO, allow_aggregate, CTLFLAG_RW | CTLFLAG_LOCKED, - &mptcp_developer_mode, 0, "Allow the Multipath aggregation mode"); + &mptcp_developer_mode, 0, "Allow the Multipath aggregation mode"); /* @@ -123,17 +123,19 @@ mptcp_usr_attach(struct socket *mp_so, int proto, struct proc *p) VERIFY(mpsotomppcb(mp_so) == NULL); error = mptcp_attach(mp_so, p); - if (error != 0) + if (error != 0) { goto out; + } /* * XXX: adi@apple.com * * Might want to use a different SO_LINGER timeout than TCP's? */ - if ((mp_so->so_options & SO_LINGER) && mp_so->so_linger == 0) + if ((mp_so->so_options & SO_LINGER) && mp_so->so_linger == 0) { mp_so->so_linger = TCP_LINGERTIME * hz; + } out: - return (error); + return error; } /* @@ -147,9 +149,9 @@ mptcp_usr_detach(struct socket *mp_so) if (mpp == NULL || mpp->mpp_state == MPPCB_STATE_DEAD) { mptcplog((LOG_ERR, "%s state: %d\n", __func__, - mpp ? mpp->mpp_state : -1), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (EINVAL); + mpp ? mpp->mpp_state : -1), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + return EINVAL; } /* @@ -162,7 +164,7 @@ mptcp_usr_detach(struct socket *mp_so) mptcp_disconnect(mpte); - return (0); + return 0; } /* @@ -180,18 +182,21 @@ mptcp_attach(struct socket *mp_so, struct proc *p) if (mp_so->so_snd.sb_hiwat == 0 || mp_so->so_rcv.sb_hiwat == 0) { error = soreserve(mp_so, tcp_sendspace, tcp_recvspace); - if (error != 0) + if (error != 0) { goto out; + } } if (mp_so->so_snd.sb_preconn_hiwat == 0) { soreserve_preconnect(mp_so, 2048); } - if ((mp_so->so_rcv.sb_flags & SB_USRSIZE) == 0) + if ((mp_so->so_rcv.sb_flags & SB_USRSIZE) == 0) { mp_so->so_rcv.sb_flags |= SB_AUTOSIZE; - if ((mp_so->so_snd.sb_flags & SB_USRSIZE) == 0) + } + if ((mp_so->so_snd.sb_flags & SB_USRSIZE) == 0) { mp_so->so_snd.sb_flags |= SB_AUTOSIZE; + } /* * MPTCP socket buffers cannot be compressed, due to the @@ -212,7 +217,7 @@ mptcp_attach(struct socket *mp_so, struct proc *p) mp_tp = mpte->mpte_mptcb; VERIFY(mp_tp != NULL); out: - return (error); + return error; } static int @@ -230,28 +235,29 @@ mptcp_entitlement_check(struct socket *mp_so) } #if (DEVELOPMENT || DEBUG) - if (mptcp_disable_entitlements) + if (mptcp_disable_entitlements) { goto grant; + } #endif if (soopt_cred_check(mp_so, PRIV_NET_PRIVILEGED_MULTIPATH, TRUE)) { mptcplog((LOG_NOTICE, "%s Multipath Capability needed\n", __func__), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); - return (-1); + return -1; } if (mpte->mpte_svctype > MPTCP_SVCTYPE_INTERACTIVE && mptcp_developer_mode == 0) { mptcplog((LOG_NOTICE, "%s need to set allow_aggregate sysctl\n", - __func__), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); - return (-1); + __func__), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); + return -1; } grant: mptcplog((LOG_NOTICE, "%s entitlement granted for %u\n", __func__, mpte->mpte_svctype), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); - return (0); + return 0; } /* @@ -277,7 +283,7 @@ mptcp_connectx(struct mptses *mpte, struct sockaddr *src, error = mptcp_subflow_add(mpte, src, dst, ifscope, pcid); - return (error); + return error; } /* @@ -293,13 +299,13 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, struct mppcb *mpp = mpsotomppcb(mp_so); struct mptses *mpte = NULL; struct mptcb *mp_tp = NULL; - user_ssize_t datalen; + user_ssize_t datalen; int error = 0; if (mpp == NULL || mpp->mpp_state == MPPCB_STATE_DEAD) { mptcplog((LOG_ERR, "%s state %d\n", __func__, - mpp ? mpp->mpp_state : -1), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + mpp ? mpp->mpp_state : -1), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = EINVAL; goto out; } @@ -312,7 +318,7 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) { mptcplog((LOG_ERR, "%s fell back to TCP\n", __func__), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = EINVAL; goto out; } @@ -325,8 +331,8 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, if (dst->sa_family == AF_INET && dst->sa_len != sizeof(mpte->__mpte_dst_v4)) { mptcplog((LOG_ERR, "%s IPv4 dst len %u\n", __func__, - dst->sa_len), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + dst->sa_len), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = EINVAL; goto out; } @@ -334,8 +340,8 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, if (dst->sa_family == AF_INET6 && dst->sa_len != sizeof(mpte->__mpte_dst_v6)) { mptcplog((LOG_ERR, "%s IPv6 dst len %u\n", __func__, - dst->sa_len), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + dst->sa_len), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = EINVAL; goto out; } @@ -349,7 +355,7 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, mpte->mpte_flags |= MPTE_SVCTYPE_CHECKED; } - if ((mp_so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { + if ((mp_so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0) { memcpy(&mpte->mpte_dst, dst, dst->sa_len); } @@ -362,8 +368,8 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, if (src->sa_family == AF_INET && src->sa_len != sizeof(mpte->__mpte_src_v4)) { mptcplog((LOG_ERR, "%s IPv4 src len %u\n", __func__, - src->sa_len), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + src->sa_len), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = EINVAL; goto out; } @@ -371,13 +377,13 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, if (src->sa_family == AF_INET6 && src->sa_len != sizeof(mpte->__mpte_src_v6)) { mptcplog((LOG_ERR, "%s IPv6 src len %u\n", __func__, - src->sa_len), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + src->sa_len), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); error = EINVAL; goto out; } - if ((mp_so->so_state & (SS_ISCONNECTED|SS_ISCONNECTING)) == 0) { + if ((mp_so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING)) == 0) { memcpy(&mpte->mpte_src, src, src->sa_len); } } @@ -391,17 +397,19 @@ mptcp_usr_connectx(struct socket *mp_so, struct sockaddr *src, error = mp_so->so_proto->pr_usrreqs->pru_sosend(mp_so, NULL, (uio_t) auio, NULL, NULL, 0); - if (error == 0 || error == EWOULDBLOCK) + if (error == 0 || error == EWOULDBLOCK) { *bytes_written = datalen - uio_resid(auio); + } - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { error = EINPROGRESS; + } socket_lock(mp_so, 0); } out: - return (error); + return error; } /* @@ -410,17 +418,18 @@ out: static int mptcp_getassocids(struct mptses *mpte, uint32_t *cnt, user_addr_t aidp) { - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ /* MPTCP has at most 1 association */ *cnt = (mpte->mpte_associd != SAE_ASSOCID_ANY) ? 1 : 0; /* just asking how many there are? */ - if (aidp == USER_ADDR_NULL) - return (0); + if (aidp == USER_ADDR_NULL) { + return 0; + } - return (copyout(&mpte->mpte_associd, aidp, - sizeof (mpte->mpte_associd))); + return copyout(&mpte->mpte_associd, aidp, + sizeof(mpte->mpte_associd)); } /* @@ -433,27 +442,30 @@ mptcp_getconnids(struct mptses *mpte, sae_associd_t aid, uint32_t *cnt, struct mptsub *mpts; int error = 0; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL && - aid != mpte->mpte_associd) - return (EINVAL); + aid != mpte->mpte_associd) { + return EINVAL; + } *cnt = mpte->mpte_numflows; /* just asking how many there are? */ - if (cidp == USER_ADDR_NULL) - return (0); + if (cidp == USER_ADDR_NULL) { + return 0; + } TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) { if ((error = copyout(&mpts->mpts_connid, cidp, - sizeof (mpts->mpts_connid))) != 0) + sizeof(mpts->mpts_connid))) != 0) { break; + } - cidp += sizeof (mpts->mpts_connid); + cidp += sizeof(mpts->mpts_connid); } - return (error); + return error; } /* @@ -480,21 +492,28 @@ mptcp_getconninfo(struct mptses *mpte, sae_connid_t *cid, uint32_t *flags, struct mptcb *mp_tp = mpte->mpte_mptcb; struct conninfo_multipathtcp mptcp_ci; - if (*aux_len != 0 && *aux_len != sizeof(mptcp_ci)) - return (EINVAL); + if (*aux_len != 0 && *aux_len != sizeof(mptcp_ci)) { + return EINVAL; + } - if (mp_so->so_state & SS_ISCONNECTING) + if (mp_so->so_state & SS_ISCONNECTING) { *flags |= CIF_CONNECTING; - if (mp_so->so_state & SS_ISCONNECTED) + } + if (mp_so->so_state & SS_ISCONNECTED) { *flags |= CIF_CONNECTED; - if (mp_so->so_state & SS_ISDISCONNECTING) + } + if (mp_so->so_state & SS_ISDISCONNECTING) { *flags |= CIF_DISCONNECTING; - if (mp_so->so_state & SS_ISDISCONNECTED) + } + if (mp_so->so_state & SS_ISDISCONNECTED) { *flags |= CIF_DISCONNECTED; - if (!(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP)) + } + if (!(mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP)) { *flags |= CIF_MP_CAPABLE; - if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) + } + if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) { *flags |= CIF_MP_DEGRADED; + } *src_len = 0; *dst_len = 0; @@ -506,7 +525,7 @@ mptcp_getconninfo(struct mptses *mpte, sae_connid_t *cid, uint32_t *flags, unsigned long i = 0; int initial_info_set = 0; - bzero(&mptcp_ci, sizeof (mptcp_ci)); + bzero(&mptcp_ci, sizeof(mptcp_ci)); mptcp_ci.mptcpci_subflow_count = mpte->mpte_numflows; mptcp_ci.mptcpci_switch_count = mpte->mpte_subflow_switches; @@ -514,8 +533,9 @@ mptcp_getconninfo(struct mptses *mpte, sae_connid_t *cid, uint32_t *flags, memcpy(mptcp_ci.mptcpci_itfstats, mpte->mpte_itfstats, sizeof(mptcp_ci.mptcpci_itfstats)); TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) { - if (i >= sizeof(mptcp_ci.mptcpci_subflow_connids) / sizeof(sae_connid_t)) + if (i >= sizeof(mptcp_ci.mptcpci_subflow_connids) / sizeof(sae_connid_t)) { break; + } mptcp_ci.mptcpci_subflow_connids[i] = mpts->mpts_connid; if (mpts->mpts_flags & MPTSF_INITIAL_SUB) { @@ -536,61 +556,69 @@ mptcp_getconninfo(struct mptses *mpte, sae_connid_t *cid, uint32_t *flags, mptcp_ci.mptcpci_init_txbytes = mpte->mpte_init_txbytes; } - if (mpte->mpte_flags & MPTE_FIRSTPARTY) + if (mpte->mpte_flags & MPTE_FIRSTPARTY) { mptcp_ci.mptcpci_flags |= MPTCPCI_FIRSTPARTY; + } error = copyout(&mptcp_ci, aux_data, sizeof(mptcp_ci)); if (error != 0) { mptcplog((LOG_ERR, "%s copyout failed: %d\n", - __func__, error), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (error); + __func__, error), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + return error; } } - return (0); + return 0; } TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) { - if (mpts->mpts_connid == *cid || *cid == SAE_CONNID_ANY) + if (mpts->mpts_connid == *cid || *cid == SAE_CONNID_ANY) { break; + } + } + if (mpts == NULL) { + return (*cid == SAE_CONNID_ANY) ? ENXIO : EINVAL; } - if (mpts == NULL) - return ((*cid == SAE_CONNID_ANY) ? ENXIO : EINVAL); so = mpts->mpts_socket; inp = sotoinpcb(so); - if (inp->inp_vflag & INP_IPV4) + if (inp->inp_vflag & INP_IPV4) { error = in_getconninfo(so, SAE_CONNID_ANY, flags, ifindex, - soerror, src, src_len, dst, dst_len, - aux_type, aux_data, aux_len); - else + soerror, src, src_len, dst, dst_len, + aux_type, aux_data, aux_len); + } else { error = in6_getconninfo(so, SAE_CONNID_ANY, flags, ifindex, - soerror, src, src_len, dst, dst_len, - aux_type, aux_data, aux_len); + soerror, src, src_len, dst, dst_len, + aux_type, aux_data, aux_len); + } if (error != 0) { mptcplog((LOG_ERR, "%s error from in_getconninfo %d\n", - __func__, error), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (error); + __func__, error), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + return error; } - if (mpts->mpts_flags & MPTSF_MP_CAPABLE) + if (mpts->mpts_flags & MPTSF_MP_CAPABLE) { *flags |= CIF_MP_CAPABLE; - if (mpts->mpts_flags & MPTSF_MP_DEGRADED) + } + if (mpts->mpts_flags & MPTSF_MP_DEGRADED) { *flags |= CIF_MP_DEGRADED; - if (mpts->mpts_flags & MPTSF_MP_READY) + } + if (mpts->mpts_flags & MPTSF_MP_READY) { *flags |= CIF_MP_READY; - if (mpts->mpts_flags & MPTSF_ACTIVE) + } + if (mpts->mpts_flags & MPTSF_ACTIVE) { *flags |= CIF_MP_ACTIVE; + } mptcplog((LOG_DEBUG, "%s: cid %d flags %x \n", __func__, - mpts->mpts_connid, mpts->mpts_flags), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); + mpts->mpts_connid, mpts->mpts_flags), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_VERBOSE); - return (0); + return 0; } /* @@ -612,72 +640,78 @@ mptcp_usr_control(struct socket *mp_so, u_long cmd, caddr_t data, mpte = mptompte(mpp); VERIFY(mpte != NULL); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ switch (cmd) { - case SIOCGASSOCIDS32: { /* struct so_aidreq32 */ + case SIOCGASSOCIDS32: { /* struct so_aidreq32 */ struct so_aidreq32 aidr; - bcopy(data, &aidr, sizeof (aidr)); + bcopy(data, &aidr, sizeof(aidr)); error = mptcp_getassocids(mpte, &aidr.sar_cnt, aidr.sar_aidp); - if (error == 0) - bcopy(&aidr, data, sizeof (aidr)); + if (error == 0) { + bcopy(&aidr, data, sizeof(aidr)); + } break; } - case SIOCGASSOCIDS64: { /* struct so_aidreq64 */ + case SIOCGASSOCIDS64: { /* struct so_aidreq64 */ struct so_aidreq64 aidr; - bcopy(data, &aidr, sizeof (aidr)); + bcopy(data, &aidr, sizeof(aidr)); error = mptcp_getassocids(mpte, &aidr.sar_cnt, aidr.sar_aidp); - if (error == 0) - bcopy(&aidr, data, sizeof (aidr)); + if (error == 0) { + bcopy(&aidr, data, sizeof(aidr)); + } break; } - case SIOCGCONNIDS32: { /* struct so_cidreq32 */ + case SIOCGCONNIDS32: { /* struct so_cidreq32 */ struct so_cidreq32 cidr; - bcopy(data, &cidr, sizeof (cidr)); + bcopy(data, &cidr, sizeof(cidr)); error = mptcp_getconnids(mpte, cidr.scr_aid, &cidr.scr_cnt, cidr.scr_cidp); - if (error == 0) - bcopy(&cidr, data, sizeof (cidr)); + if (error == 0) { + bcopy(&cidr, data, sizeof(cidr)); + } break; } - case SIOCGCONNIDS64: { /* struct so_cidreq64 */ + case SIOCGCONNIDS64: { /* struct so_cidreq64 */ struct so_cidreq64 cidr; - bcopy(data, &cidr, sizeof (cidr)); + bcopy(data, &cidr, sizeof(cidr)); error = mptcp_getconnids(mpte, cidr.scr_aid, &cidr.scr_cnt, cidr.scr_cidp); - if (error == 0) - bcopy(&cidr, data, sizeof (cidr)); + if (error == 0) { + bcopy(&cidr, data, sizeof(cidr)); + } break; } - case SIOCGCONNINFO32: { /* struct so_cinforeq32 */ + case SIOCGCONNINFO32: { /* struct so_cinforeq32 */ struct so_cinforeq32 cifr; - bcopy(data, &cifr, sizeof (cifr)); + bcopy(data, &cifr, sizeof(cifr)); error = mptcp_getconninfo(mpte, &cifr.scir_cid, &cifr.scir_flags, &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, &cifr.scir_aux_type, cifr.scir_aux_data, &cifr.scir_aux_len); - if (error == 0) - bcopy(&cifr, data, sizeof (cifr)); + if (error == 0) { + bcopy(&cifr, data, sizeof(cifr)); + } break; } - case SIOCGCONNINFO64: { /* struct so_cinforeq64 */ + case SIOCGCONNINFO64: { /* struct so_cinforeq64 */ struct so_cinforeq64 cifr; - bcopy(data, &cifr, sizeof (cifr)); + bcopy(data, &cifr, sizeof(cifr)); error = mptcp_getconninfo(mpte, &cifr.scir_cid, &cifr.scir_flags, &cifr.scir_ifindex, &cifr.scir_error, cifr.scir_src, &cifr.scir_src_len, cifr.scir_dst, &cifr.scir_dst_len, &cifr.scir_aux_type, cifr.scir_aux_data, &cifr.scir_aux_len); - if (error == 0) - bcopy(&cifr, data, sizeof (cifr)); + if (error == 0) { + bcopy(&cifr, data, sizeof(cifr)); + } break; } @@ -686,7 +720,7 @@ mptcp_usr_control(struct socket *mp_so, u_long cmd, caddr_t data, break; } out: - return (error); + return error; } static int @@ -696,7 +730,7 @@ mptcp_disconnect(struct mptses *mpte) struct mptcb *mp_tp; int error = 0; - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_so = mptetoso(mpte); mp_tp = mpte->mpte_mptcb; @@ -710,7 +744,7 @@ mptcp_disconnect(struct mptses *mpte) /* if we're not detached, go thru socket state checks */ if (!(mp_so->so_flags & SOF_PCBCLEARING)) { - if (!(mp_so->so_state & (SS_ISCONNECTED| + if (!(mp_so->so_state & (SS_ISCONNECTED | SS_ISCONNECTING))) { error = ENOTCONN; goto out; @@ -730,15 +764,17 @@ mptcp_disconnect(struct mptses *mpte) } else { soisdisconnecting(mp_so); sbflush(&mp_so->so_rcv); - if (mptcp_usrclosed(mpte) != NULL) + if (mptcp_usrclosed(mpte) != NULL) { mptcp_output(mpte); + } } - if (error == 0) + if (error == 0) { mptcp_subflow_workloop(mpte); + } out: - return (error); + return error; } /* @@ -747,7 +783,7 @@ out: static int mptcp_usr_disconnect(struct socket *mp_so) { - return (mptcp_disconnect(mpsotompte(mp_so))); + return mptcp_disconnect(mpsotompte(mp_so)); } /* @@ -756,13 +792,15 @@ mptcp_usr_disconnect(struct socket *mp_so) static int mptcp_usr_disconnectx(struct socket *mp_so, sae_associd_t aid, sae_connid_t cid) { - if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) - return (EINVAL); + if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { + return EINVAL; + } - if (cid != SAE_CONNID_ANY && cid != SAE_CONNID_ALL) - return (EINVAL); + if (cid != SAE_CONNID_ANY && cid != SAE_CONNID_ALL) { + return EINVAL; + } - return (mptcp_usr_disconnect(mp_so)); + return mptcp_usr_disconnect(mp_so); } void @@ -779,11 +817,12 @@ mptcp_finish_usrclosed(struct mptses *mpte) struct mptsub *mpts; TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) { - if ((mp_so->so_state & (SS_CANTRCVMORE|SS_CANTSENDMORE)) == - (SS_CANTRCVMORE | SS_CANTSENDMORE)) + if ((mp_so->so_state & (SS_CANTRCVMORE | SS_CANTSENDMORE)) == + (SS_CANTRCVMORE | SS_CANTSENDMORE)) { mptcp_subflow_disconnect(mpte, mpts); - else + } else { mptcp_subflow_shutdown(mpte, mpts); + } } } } @@ -799,12 +838,13 @@ mptcp_usrclosed(struct mptses *mpte) mptcp_close_fsm(mp_tp, MPCE_CLOSE); /* Not everything has been acknowledged - don't close the subflows! */ - if (mp_tp->mpt_sndnxt + 1 != mp_tp->mpt_sndmax) - return (mpte); + if (mp_tp->mpt_sndnxt + 1 != mp_tp->mpt_sndmax) { + return mpte; + } mptcp_finish_usrclosed(mpte); - return (mpte); + return mpte; } /* @@ -827,7 +867,7 @@ mptcp_usr_rcvd(struct socket *mp_so, int flags) error = mptcp_output(mpte); out: - return (error); + return error; } /* @@ -842,7 +882,7 @@ mptcp_usr_send(struct socket *mp_so, int prus_flags, struct mbuf *m, struct mptses *mpte; int error = 0; - if (prus_flags & (PRUS_OOB|PRUS_EOF)) { + if (prus_flags & (PRUS_OOB | PRUS_EOF)) { error = EOPNOTSUPP; goto out; } @@ -876,24 +916,28 @@ mptcp_usr_send(struct socket *mp_so, int prus_flags, struct mbuf *m, m = NULL; error = mptcp_output(mpte); - if (error != 0) + if (error != 0) { goto out; + } if (mp_so->so_state & SS_ISCONNECTING) { - if (mp_so->so_state & SS_NBIO) + if (mp_so->so_state & SS_NBIO) { error = EWOULDBLOCK; - else + } else { error = sbwait(&mp_so->so_snd); + } } out: if (error) { - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } } - return (error); + return error; } /* @@ -916,10 +960,11 @@ mptcp_usr_shutdown(struct socket *mp_so) socantsendmore(mp_so); mpte = mptcp_usrclosed(mpte); - if (mpte != NULL) + if (mpte != NULL) { error = mptcp_output(mpte); + } out: - return (error); + return error; } /* @@ -930,7 +975,7 @@ mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, struct mbuf **top) { struct mbuf *m, *mb, *nm = NULL, *mtail = NULL; - user_ssize_t resid, tot, len, progress; /* must be user_ssize_t */ + user_ssize_t resid, tot, len, progress; /* must be user_ssize_t */ int error; VERIFY(top != NULL && *top == NULL); @@ -940,55 +985,61 @@ mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, * the total data supplied by the uio. */ resid = uio_resid(uio); - if (space > 0) + if (space > 0) { tot = imin(resid, space); - else + } else { tot = resid; + } /* * The smallest unit is a single mbuf with pkthdr. * We can't align past it. */ - if (align >= MHLEN) - return (EINVAL); + if (align >= MHLEN) { + return EINVAL; + } /* * Give us the full allocation or nothing. * If space is zero return the smallest empty mbuf. */ - if ((len = tot + align) == 0) + if ((len = tot + align) == 0) { len = 1; + } /* Loop and append maximum sized mbufs to the chain tail. */ while (len > 0) { uint32_t m_needed = 1; - if (njcl > 0 && len > MBIGCLBYTES) + if (njcl > 0 && len > MBIGCLBYTES) { mb = m_getpackets_internal(&m_needed, 1, how, 1, M16KCLBYTES); - else if (len > MCLBYTES) + } else if (len > MCLBYTES) { mb = m_getpackets_internal(&m_needed, 1, how, 1, MBIGCLBYTES); - else if (len >= (signed)MINCLSIZE) + } else if (len >= (signed)MINCLSIZE) { mb = m_getpackets_internal(&m_needed, 1, how, 1, MCLBYTES); - else + } else { mb = m_gethdr(how, MT_DATA); + } /* Fail the whole operation if one mbuf can't be allocated. */ if (mb == NULL) { - if (nm != NULL) + if (nm != NULL) { m_freem(nm); - return (ENOBUFS); + } + return ENOBUFS; } /* Book keeping. */ VERIFY(mb->m_flags & M_PKTHDR); len -= ((mb->m_flags & M_EXT) ? mb->m_ext.ext_size : MHLEN); - if (mtail != NULL) + if (mtail != NULL) { mtail->m_next = mb; - else + } else { nm = mb; + } mtail = mb; } @@ -1003,7 +1054,7 @@ mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, error = uiomove(mtod(mb, char *), len, uio); if (error != 0) { m_freem(m); - return (error); + return error; } /* each mbuf is M_PKTHDR chained via m_next */ @@ -1014,7 +1065,7 @@ mptcp_uiotombuf(struct uio *uio, int how, int space, uint32_t align, } VERIFY(progress == tot); *top = m; - return (0); + return 0; } /* @@ -1045,7 +1096,7 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, VERIFY(mp_so->so_type == SOCK_STREAM); VERIFY(!(mp_so->so_flags & SOF_MP_SUBFLOW)); - if ((flags & (MSG_OOB|MSG_DONTROUTE|MSG_HOLD|MSG_SEND|MSG_FLUSH)) || + if ((flags & (MSG_OOB | MSG_DONTROUTE | MSG_HOLD | MSG_SEND | MSG_FLUSH)) || (mp_so->so_flags & SOF_ENABLE_MSGS)) { error = EOPNOTSUPP; socket_unlock(mp_so, 1); @@ -1070,8 +1121,9 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, do { error = sosendcheck(mp_so, NULL, resid, 0, 0, flags, &sblocked, NULL); - if (error != 0) + if (error != 0) { goto release; + } space = sbspace(&mp_so->so_snd); do { @@ -1120,25 +1172,29 @@ mptcp_usr_sosend(struct socket *mp_so, struct sockaddr *addr, struct uio *uio, (mp_so, sendflags, top, NULL, NULL, p); top = NULL; - if (error != 0) + if (error != 0) { goto release; + } } while (resid != 0 && space > 0); } while (resid != 0); release: - if (sblocked) + if (sblocked) { sbunlock(&mp_so->so_snd, FALSE); /* will unlock socket */ - else + } else { socket_unlock(mp_so, 1); + } out: - if (top != NULL) + if (top != NULL) { m_freem(top); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } soclearfastopen(mp_so); - return (error); + return error; } /* @@ -1171,28 +1227,28 @@ mptcp_usr_socheckopt(struct socket *mp_so, struct sockopt *sopt) * be existing subflow sockets that are already connected. */ switch (sopt->sopt_name) { - case SO_LINGER: /* MP */ - case SO_LINGER_SEC: /* MP */ - case SO_TYPE: /* MP */ - case SO_NREAD: /* MP */ - case SO_NWRITE: /* MP */ - case SO_ERROR: /* MP */ - case SO_SNDBUF: /* MP */ - case SO_RCVBUF: /* MP */ - case SO_SNDLOWAT: /* MP */ - case SO_RCVLOWAT: /* MP */ - case SO_SNDTIMEO: /* MP */ - case SO_RCVTIMEO: /* MP */ - case SO_NKE: /* MP */ - case SO_NOSIGPIPE: /* MP */ - case SO_NOADDRERR: /* MP */ - case SO_LABEL: /* MP */ - case SO_PEERLABEL: /* MP */ - case SO_DEFUNCTOK: /* MP */ - case SO_ISDEFUNCT: /* MP */ - case SO_TRAFFIC_CLASS_DBG: /* MP */ - case SO_DELEGATED: /* MP */ - case SO_DELEGATED_UUID: /* MP */ + case SO_LINGER: /* MP */ + case SO_LINGER_SEC: /* MP */ + case SO_TYPE: /* MP */ + case SO_NREAD: /* MP */ + case SO_NWRITE: /* MP */ + case SO_ERROR: /* MP */ + case SO_SNDBUF: /* MP */ + case SO_RCVBUF: /* MP */ + case SO_SNDLOWAT: /* MP */ + case SO_RCVLOWAT: /* MP */ + case SO_SNDTIMEO: /* MP */ + case SO_RCVTIMEO: /* MP */ + case SO_NKE: /* MP */ + case SO_NOSIGPIPE: /* MP */ + case SO_NOADDRERR: /* MP */ + case SO_LABEL: /* MP */ + case SO_PEERLABEL: /* MP */ + case SO_DEFUNCTOK: /* MP */ + case SO_ISDEFUNCT: /* MP */ + case SO_TRAFFIC_CLASS_DBG: /* MP */ + case SO_DELEGATED: /* MP */ + case SO_DELEGATED_UUID: /* MP */ #if NECP case SO_NECP_ATTRIBUTES: case SO_NECP_CLIENTUUID: @@ -1202,16 +1258,16 @@ mptcp_usr_socheckopt(struct socket *mp_so, struct sockopt *sopt) */ break; - case SO_DEBUG: /* MP + subflow */ - case SO_KEEPALIVE: /* MP + subflow */ - case SO_USELOOPBACK: /* MP + subflow */ - case SO_RANDOMPORT: /* MP + subflow */ - case SO_TRAFFIC_CLASS: /* MP + subflow */ - case SO_RECV_TRAFFIC_CLASS: /* MP + subflow */ - case SO_PRIVILEGED_TRAFFIC_CLASS: /* MP + subflow */ - case SO_RECV_ANYIF: /* MP + subflow */ - case SO_RESTRICTIONS: /* MP + subflow */ - case SO_FLUSH: /* MP + subflow */ + case SO_DEBUG: /* MP + subflow */ + case SO_KEEPALIVE: /* MP + subflow */ + case SO_USELOOPBACK: /* MP + subflow */ + case SO_RANDOMPORT: /* MP + subflow */ + case SO_TRAFFIC_CLASS: /* MP + subflow */ + case SO_RECV_TRAFFIC_CLASS: /* MP + subflow */ + case SO_PRIVILEGED_TRAFFIC_CLASS: /* MP + subflow */ + case SO_RECV_ANYIF: /* MP + subflow */ + case SO_RESTRICTIONS: /* MP + subflow */ + case SO_FLUSH: /* MP + subflow */ case SO_NOWAKEFROMSLEEP: case SO_NOAPNFALLBK: case SO_MARK_CELLFALLBACK: @@ -1221,8 +1277,9 @@ mptcp_usr_socheckopt(struct socket *mp_so, struct sockopt *sopt) * * NOTE: Only support integer option value for now. */ - if (sopt->sopt_valsize != sizeof (int)) + if (sopt->sopt_valsize != sizeof(int)) { error = EINVAL; + } break; default: @@ -1233,7 +1290,7 @@ mptcp_usr_socheckopt(struct socket *mp_so, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -1264,7 +1321,7 @@ mptcp_setopt_apply(struct mptses *mpte, struct mptopt *mpo) goto out; } - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ mp_so = mptetoso(mpte); /* @@ -1280,7 +1337,7 @@ mptcp_setopt_apply(struct mptses *mpte, struct mptopt *mpo) goto out; } - bzero(&smpo, sizeof (smpo)); + bzero(&smpo, sizeof(smpo)); smpo.mpo_flags |= MPOF_SUBFLOW_OK; smpo.mpo_level = mpo->mpo_level; smpo.mpo_name = mpo->mpo_name; @@ -1289,7 +1346,7 @@ mptcp_setopt_apply(struct mptses *mpte, struct mptopt *mpo) TAILQ_FOREACH(mpts, &mpte->mpte_subflows, mpts_entry) { struct socket *so; - mpts->mpts_flags &= ~(MPTSF_SOPT_OLDVAL|MPTSF_SOPT_INPROG); + mpts->mpts_flags &= ~(MPTSF_SOPT_OLDVAL | MPTSF_SOPT_INPROG); mpts->mpts_oldintval = 0; smpo.mpo_intval = 0; VERIFY(mpts->mpts_socket != NULL); @@ -1308,8 +1365,9 @@ mptcp_setopt_apply(struct mptses *mpte, struct mptopt *mpo) VERIFY(mpts->mpts_socket != NULL); so = mpts->mpts_socket; error = mptcp_subflow_sosetopt(mpte, mpts, mpo); - if (error != 0) + if (error != 0) { break; + } } /* cleanup, and rollback if needed */ @@ -1335,11 +1393,11 @@ mptcp_setopt_apply(struct mptses *mpte, struct mptopt *mpo) mptcp_subflow_sosetopt(mpte, mpts, &smpo); } mpts->mpts_oldintval = 0; - mpts->mpts_flags &= ~(MPTSF_SOPT_OLDVAL|MPTSF_SOPT_INPROG); + mpts->mpts_flags &= ~(MPTSF_SOPT_OLDVAL | MPTSF_SOPT_INPROG); } out: - return (error); + return error; } /* @@ -1392,17 +1450,18 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) } error = sooptcopyin(sopt, &mpsotomppcb(mp_so)->necp_client_uuid, - sizeof(uuid_t), sizeof(uuid_t)); + sizeof(uuid_t), sizeof(uuid_t)); if (error != 0) { goto out; } mpsotomppcb(mp_so)->necp_cb = mptcp_session_necp_cb; error = necp_client_register_multipath_cb(mp_so->last_pid, - mpsotomppcb(mp_so)->necp_client_uuid, - mpsotomppcb(mp_so)); - if (error) + mpsotomppcb(mp_so)->necp_client_uuid, + mpsotomppcb(mp_so)); + if (error) { goto out; + } if (uuid_is_null(mpsotomppcb(mp_so)->necp_client_uuid)) { error = EINVAL; @@ -1434,15 +1493,16 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) /* record at MPTCP level */ error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error) + if (error) { goto out; + } if (optval < 0) { error = EINVAL; goto out; } else { if (optval == 0) { mp_so->so_flags &= ~SOF_NOTSENT_LOWAT; - error = mptcp_set_notsent_lowat(mpte,0); + error = mptcp_set_notsent_lowat(mpte, 0); } else { mp_so->so_flags |= SOF_NOTSENT_LOWAT; error = mptcp_set_notsent_lowat(mpte, @@ -1454,8 +1514,9 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) /* record at MPTCP level */ error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error) + if (error) { goto out; + } if (optval < 0 || optval >= MPTCP_SVCTYPE_MAX) { error = EINVAL; goto out; @@ -1475,8 +1536,9 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) /* record at MPTCP level */ error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error) + if (error) { goto out; + } if (optval < 0 || optval > UINT16_MAX) { error = EINVAL; @@ -1493,14 +1555,16 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) } } - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { goto out; + } if (rec) { /* search for an existing one; if not found, allocate */ - if ((mpo = mptcp_sopt_find(mpte, sopt)) == NULL) + if ((mpo = mptcp_sopt_find(mpte, sopt)) == NULL) { mpo = mptcp_sopt_alloc(M_WAITOK); + } if (mpo == NULL) { error = ENOBUFS; @@ -1523,7 +1587,7 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) mpo->mpo_flags |= MPOF_SUBFLOW_OK; } } else { - bzero(&smpo, sizeof (smpo)); + bzero(&smpo, sizeof(smpo)); mpo = &smpo; mpo->mpo_flags |= MPOF_SUBFLOW_OK; mpo->mpo_level = level; @@ -1539,8 +1603,9 @@ mptcp_setopt(struct mptses *mpte, struct sockopt *sopt) mptcp_sopt_remove(mpte, mpo); mptcp_sopt_free(mpo); } - if (mpo == &smpo) + if (mpo == &smpo) { mpo->mpo_flags &= ~MPOF_INTERIM; + } } out: if (error == 0 && mpo != NULL) { @@ -1556,7 +1621,7 @@ out: mptcp_sopt2str(level, optname), level, optname, optval, error), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); } - return (error); + return error; } /* @@ -1602,10 +1667,11 @@ mptcp_getopt(struct mptses *mpte, struct sockopt *sopt) switch (sopt->sopt_name) { case TCP_NOTSENT_LOWAT: - if (mptetoso(mpte)->so_flags & SOF_NOTSENT_LOWAT) + if (mptetoso(mpte)->so_flags & SOF_NOTSENT_LOWAT) { optval = mptcp_get_notsent_lowat(mpte); - else + } else { optval = 0; + } goto out; case MPTCP_SERVICE_TYPE: optval = mpte->mpte_svctype; @@ -1625,13 +1691,14 @@ mptcp_getopt(struct mptses *mpte, struct sockopt *sopt) if (error == 0) { struct mptopt *mpo; - if ((mpo = mptcp_sopt_find(mpte, sopt)) != NULL) + if ((mpo = mptcp_sopt_find(mpte, sopt)) != NULL) { optval = mpo->mpo_intval; + } - error = sooptcopyout(sopt, &optval, sizeof (int)); + error = sooptcopyout(sopt, &optval, sizeof(int)); } out: - return (error); + return error; } /* @@ -1647,7 +1714,7 @@ mptcp_default_tcp_optval(struct mptses *mpte, struct sockopt *sopt, int *optval) VERIFY(sopt->sopt_level == IPPROTO_TCP); VERIFY(sopt->sopt_dir == SOPT_GET); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ /* try to do what tcp_newtcpcb() does */ switch (sopt->sopt_name) { @@ -1677,7 +1744,7 @@ mptcp_default_tcp_optval(struct mptses *mpte, struct sockopt *sopt, int *optval) error = ENOPROTOOPT; break; } - return (error); + return error; } /* @@ -1697,7 +1764,7 @@ mptcp_ctloutput(struct socket *mp_so, struct sockopt *sopt) goto out; } mpte = mptompte(mpp); - mpte_lock_assert_held(mpte); /* same as MP socket lock */ + mpte_lock_assert_held(mpte); /* same as MP socket lock */ /* we only handle socket and TCP-level socket options for MPTCP */ if (sopt->sopt_level != SOL_SOCKET && sopt->sopt_level != IPPROTO_TCP) { @@ -1720,7 +1787,7 @@ mptcp_ctloutput(struct socket *mp_so, struct sockopt *sopt) break; } out: - return (error); + return error; } const char * @@ -1730,82 +1797,82 @@ mptcp_sopt2str(int level, int optname) case SOL_SOCKET: switch (optname) { case SO_LINGER: - return ("SO_LINGER"); + return "SO_LINGER"; case SO_LINGER_SEC: - return ("SO_LINGER_SEC"); + return "SO_LINGER_SEC"; case SO_DEBUG: - return ("SO_DEBUG"); + return "SO_DEBUG"; case SO_KEEPALIVE: - return ("SO_KEEPALIVE"); + return "SO_KEEPALIVE"; case SO_USELOOPBACK: - return ("SO_USELOOPBACK"); + return "SO_USELOOPBACK"; case SO_TYPE: - return ("SO_TYPE"); + return "SO_TYPE"; case SO_NREAD: - return ("SO_NREAD"); + return "SO_NREAD"; case SO_NWRITE: - return ("SO_NWRITE"); + return "SO_NWRITE"; case SO_ERROR: - return ("SO_ERROR"); + return "SO_ERROR"; case SO_SNDBUF: - return ("SO_SNDBUF"); + return "SO_SNDBUF"; case SO_RCVBUF: - return ("SO_RCVBUF"); + return "SO_RCVBUF"; case SO_SNDLOWAT: - return ("SO_SNDLOWAT"); + return "SO_SNDLOWAT"; case SO_RCVLOWAT: - return ("SO_RCVLOWAT"); + return "SO_RCVLOWAT"; case SO_SNDTIMEO: - return ("SO_SNDTIMEO"); + return "SO_SNDTIMEO"; case SO_RCVTIMEO: - return ("SO_RCVTIMEO"); + return "SO_RCVTIMEO"; case SO_NKE: - return ("SO_NKE"); + return "SO_NKE"; case SO_NOSIGPIPE: - return ("SO_NOSIGPIPE"); + return "SO_NOSIGPIPE"; case SO_NOADDRERR: - return ("SO_NOADDRERR"); + return "SO_NOADDRERR"; case SO_RESTRICTIONS: - return ("SO_RESTRICTIONS"); + return "SO_RESTRICTIONS"; case SO_LABEL: - return ("SO_LABEL"); + return "SO_LABEL"; case SO_PEERLABEL: - return ("SO_PEERLABEL"); + return "SO_PEERLABEL"; case SO_RANDOMPORT: - return ("SO_RANDOMPORT"); + return "SO_RANDOMPORT"; case SO_TRAFFIC_CLASS: - return ("SO_TRAFFIC_CLASS"); + return "SO_TRAFFIC_CLASS"; case SO_RECV_TRAFFIC_CLASS: - return ("SO_RECV_TRAFFIC_CLASS"); + return "SO_RECV_TRAFFIC_CLASS"; case SO_TRAFFIC_CLASS_DBG: - return ("SO_TRAFFIC_CLASS_DBG"); + return "SO_TRAFFIC_CLASS_DBG"; case SO_PRIVILEGED_TRAFFIC_CLASS: - return ("SO_PRIVILEGED_TRAFFIC_CLASS"); + return "SO_PRIVILEGED_TRAFFIC_CLASS"; case SO_DEFUNCTOK: - return ("SO_DEFUNCTOK"); + return "SO_DEFUNCTOK"; case SO_ISDEFUNCT: - return ("SO_ISDEFUNCT"); + return "SO_ISDEFUNCT"; case SO_OPPORTUNISTIC: - return ("SO_OPPORTUNISTIC"); + return "SO_OPPORTUNISTIC"; case SO_FLUSH: - return ("SO_FLUSH"); + return "SO_FLUSH"; case SO_RECV_ANYIF: - return ("SO_RECV_ANYIF"); + return "SO_RECV_ANYIF"; case SO_NOWAKEFROMSLEEP: - return ("SO_NOWAKEFROMSLEEP"); + return "SO_NOWAKEFROMSLEEP"; case SO_NOAPNFALLBK: - return ("SO_NOAPNFALLBK"); + return "SO_NOAPNFALLBK"; case SO_MARK_CELLFALLBACK: - return ("SO_CELLFALLBACK"); + return "SO_CELLFALLBACK"; case SO_DELEGATED: - return ("SO_DELEGATED"); + return "SO_DELEGATED"; case SO_DELEGATED_UUID: - return ("SO_DELEGATED_UUID"); + return "SO_DELEGATED_UUID"; #if NECP case SO_NECP_ATTRIBUTES: - return ("SO_NECP_ATTRIBUTES"); + return "SO_NECP_ATTRIBUTES"; case SO_NECP_CLIENTUUID: - return ("SO_NECP_CLIENTUUID"); + return "SO_NECP_CLIENTUUID"; #endif /* NECP */ } @@ -1813,35 +1880,35 @@ mptcp_sopt2str(int level, int optname) case IPPROTO_TCP: switch (optname) { case TCP_NODELAY: - return ("TCP_NODELAY"); + return "TCP_NODELAY"; case TCP_KEEPALIVE: - return ("TCP_KEEPALIVE"); + return "TCP_KEEPALIVE"; case TCP_KEEPINTVL: - return ("TCP_KEEPINTVL"); + return "TCP_KEEPINTVL"; case TCP_KEEPCNT: - return ("TCP_KEEPCNT"); + return "TCP_KEEPCNT"; case TCP_CONNECTIONTIMEOUT: - return ("TCP_CONNECTIONTIMEOUT"); + return "TCP_CONNECTIONTIMEOUT"; case TCP_RXT_CONNDROPTIME: - return ("TCP_RXT_CONNDROPTIME"); + return "TCP_RXT_CONNDROPTIME"; case PERSIST_TIMEOUT: - return ("PERSIST_TIMEOUT"); + return "PERSIST_TIMEOUT"; case TCP_NOTSENT_LOWAT: - return ("NOTSENT_LOWAT"); + return "NOTSENT_LOWAT"; case TCP_ADAPTIVE_READ_TIMEOUT: - return ("ADAPTIVE_READ_TIMEOUT"); + return "ADAPTIVE_READ_TIMEOUT"; case TCP_ADAPTIVE_WRITE_TIMEOUT: - return ("ADAPTIVE_WRITE_TIMEOUT"); + return "ADAPTIVE_WRITE_TIMEOUT"; case MPTCP_SERVICE_TYPE: - return ("MPTCP_SERVICE_TYPE"); + return "MPTCP_SERVICE_TYPE"; case MPTCP_ALTERNATE_PORT: - return ("MPTCP_ALTERNATE_PORT"); + return "MPTCP_ALTERNATE_PORT"; } break; } - return ("unknown"); + return "unknown"; } static int @@ -1861,9 +1928,9 @@ mptcp_usr_preconnect(struct socket *mp_so) mpts = mptcp_get_subflow(mpte, NULL, NULL); if (mpts == NULL) { mptcplog((LOG_ERR, "%s: mp_so 0x%llx invalid preconnect ", - __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so)), - MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); - return (EINVAL); + __func__, (u_int64_t)VM_KERNEL_ADDRPERM(mp_so)), + MPTCP_SOCKET_DBG, MPTCP_LOGLVL_ERR); + return EINVAL; } mpts->mpts_flags &= ~MPTSF_TFO_REQD; so = mpts->mpts_socket; @@ -1873,5 +1940,5 @@ mptcp_usr_preconnect(struct socket *mp_so) soclearfastopen(mp_so); - return (error); + return error; } diff --git a/bsd/netinet/mptcp_var.h b/bsd/netinet/mptcp_var.h index 1a85f2e3e..5ae998afc 100644 --- a/bsd/netinet/mptcp_var.h +++ b/bsd/netinet/mptcp_var.h @@ -27,7 +27,7 @@ */ #ifndef _NETINET_MPTCP_VAR_H_ -#define _NETINET_MPTCP_VAR_H_ +#define _NETINET_MPTCP_VAR_H_ #ifdef PRIVATE #include @@ -46,9 +46,9 @@ struct mpt_itf_info { uint32_t ifindex; uint32_t has_v4_conn:1, - has_v6_conn:1, - has_nat64_conn:1, - no_mptcp_support:1; + has_v6_conn:1, + has_nat64_conn:1, + no_mptcp_support:1; }; /* @@ -58,79 +58,81 @@ struct mpt_itf_info { * the per-PCB mpp_lock (also the socket's lock); */ struct mptses { - struct mppcb *mpte_mppcb; /* back ptr to multipath PCB */ - struct mptcb *mpte_mptcb; /* ptr to MPTCP PCB */ - TAILQ_HEAD(, mptopt) mpte_sopts; /* list of socket options */ - TAILQ_HEAD(, mptsub) mpte_subflows; /* list of subflows */ - uint16_t mpte_numflows; /* # of subflows in list */ - uint16_t mpte_nummpcapflows; /* # of MP_CAP subflows */ - sae_associd_t mpte_associd; /* MPTCP association ID */ - sae_connid_t mpte_connid_last; /* last used connection ID */ + struct mppcb *mpte_mppcb; /* back ptr to multipath PCB */ + struct mptcb *mpte_mptcb; /* ptr to MPTCP PCB */ + TAILQ_HEAD(, mptopt) mpte_sopts; /* list of socket options */ + TAILQ_HEAD(, mptsub) mpte_subflows; /* list of subflows */ + uint16_t mpte_numflows; /* # of subflows in list */ + uint16_t mpte_nummpcapflows; /* # of MP_CAP subflows */ + sae_associd_t mpte_associd; /* MPTCP association ID */ + sae_connid_t mpte_connid_last; /* last used connection ID */ union { /* Source address of initial subflow */ - struct sockaddr mpte_src; + struct sockaddr mpte_src; struct sockaddr_in __mpte_src_v4; struct sockaddr_in6 __mpte_src_v6; }; union { /* Destination address of initial subflow */ - struct sockaddr mpte_dst; + struct sockaddr mpte_dst; struct sockaddr_in __mpte_dst_v4; struct sockaddr_in6 __mpte_dst_v6; }; struct sockaddr_in mpte_dst_v4_nat64; - uint16_t mpte_alternate_port; /* Alternate port for subflow establishment (network-byte-order) */ + uint16_t mpte_alternate_port; /* Alternate port for subflow establishment (network-byte-order) */ - struct mptsub *mpte_active_sub; /* ptr to last active subf */ - uint8_t mpte_flags; /* per mptcp session flags */ -#define MPTE_SND_REM_ADDR 0x01 /* Send Remove_addr option */ -#define MPTE_SVCTYPE_CHECKED 0x02 /* Did entitlement-check for service-type */ -#define MPTE_FIRSTPARTY 0x04 /* First-party app used multipath_extended entitlement */ -#define MPTE_ACCESS_GRANTED 0x08 /* Access to cellular has been granted for this connection */ - uint8_t mpte_svctype; /* MPTCP Service type */ - uint8_t mpte_lost_aid; /* storing lost address id */ - uint8_t mpte_addrid_last; /* storing address id parm */ + struct mptsub *mpte_active_sub; /* ptr to last active subf */ + uint8_t mpte_flags; /* per mptcp session flags */ +#define MPTE_SND_REM_ADDR 0x01 /* Send Remove_addr option */ +#define MPTE_SVCTYPE_CHECKED 0x02 /* Did entitlement-check for service-type */ +#define MPTE_FIRSTPARTY 0x04 /* First-party app used multipath_extended entitlement */ +#define MPTE_ACCESS_GRANTED 0x08 /* Access to cellular has been granted for this connection */ +#define MPTE_IN_WORKLOOP 0x10 /* Are we currently inside the workloop ? */ +#define MPTE_WORKLOOP_RELAUNCH 0x20 /* Another event got queued, we should restart the workloop */ + uint8_t mpte_svctype; /* MPTCP Service type */ + uint8_t mpte_lost_aid; /* storing lost address id */ + uint8_t mpte_addrid_last; /* storing address id parm */ -#define MPTE_ITFINFO_SIZE 4 - uint32_t mpte_itfinfo_size; - struct mpt_itf_info _mpte_itfinfo[MPTE_ITFINFO_SIZE]; - struct mpt_itf_info *mpte_itfinfo; +#define MPTE_ITFINFO_SIZE 4 + uint32_t mpte_itfinfo_size; + struct mpt_itf_info _mpte_itfinfo[MPTE_ITFINFO_SIZE]; + struct mpt_itf_info *mpte_itfinfo; - struct mbuf *mpte_reinjectq; + struct mbuf *mpte_reinjectq; /* The below is used for stats */ - uint32_t mpte_subflow_switches; /* Number of subflow-switches in sending */ - uint32_t mpte_used_cell:1, - mpte_used_wifi:1, - mpte_initial_cell:1, - mpte_triggered_cell, - mpte_handshake_success:1; - - struct mptcp_itf_stats mpte_itfstats[MPTCP_ITFSTATS_SIZE]; - uint64_t mpte_init_txbytes __attribute__((aligned(8))); - uint64_t mpte_init_rxbytes __attribute__((aligned(8))); + uint32_t mpte_subflow_switches; /* Number of subflow-switches in sending */ + uint32_t mpte_used_cell:1, + mpte_used_wifi:1, + mpte_initial_cell:1, + mpte_triggered_cell, + mpte_handshake_success:1; + + struct mptcp_itf_stats mpte_itfstats[MPTCP_ITFSTATS_SIZE]; + uint64_t mpte_init_txbytes __attribute__((aligned(8))); + uint64_t mpte_init_rxbytes __attribute__((aligned(8))); }; static inline struct socket * mptetoso(struct mptses *mpte) { - return (mpte->mpte_mppcb->mpp_socket); + return mpte->mpte_mppcb->mpp_socket; } static inline struct mptses * mptompte(struct mppcb *mp) { - return ((struct mptses *)mp->mpp_pcbe); + return (struct mptses *)mp->mpp_pcbe; } static inline struct mptses * mpsotompte(struct socket *so) { - return (mptompte(mpsotomppcb(so))); + return mptompte(mpsotomppcb(so)); } static inline void @@ -154,8 +156,9 @@ mpp_lock_assert_notheld(struct mppcb *mp) static inline boolean_t mpp_try_lock(struct mppcb *mp) { - if (!lck_mtx_try_lock(&mp->mpp_lock)) + if (!lck_mtx_try_lock(&mp->mpp_lock)) { return false; + } VERIFY(!(mp->mpp_flags & MPP_INSIDE_OUTPUT)); VERIFY(!(mp->mpp_flags & MPP_INSIDE_INPUT)); @@ -187,7 +190,7 @@ mpp_getlock(struct mppcb *mp, int flags) VERIFY(!(mp->mpp_flags & MPP_INSIDE_INPUT)); } - return (&mp->mpp_lock); + return &mp->mpp_lock; } static inline void @@ -205,7 +208,7 @@ mpte_lock_assert_notheld(struct mptses *mpte) static inline boolean_t mpte_try_lock(struct mptses *mpte) { - return (mpp_try_lock(mpte->mpte_mppcb)); + return mpp_try_lock(mpte->mpte_mppcb); } static inline void @@ -232,7 +235,7 @@ mptcp_subflow_cwnd_space(struct socket *so) struct tcpcb *tp = sototcpcb(so); int cwnd = min(tp->snd_wnd, tp->snd_cwnd) - (so->so_snd.sb_cc); - return (min(cwnd, sbspace(&so->so_snd))); + return min(cwnd, sbspace(&so->so_snd)); } @@ -240,16 +243,16 @@ mptcp_subflow_cwnd_space(struct socket *so) * MPTCP socket options */ struct mptopt { - TAILQ_ENTRY(mptopt) mpo_entry; /* glue to other options */ - uint32_t mpo_flags; /* see flags below */ - int mpo_level; /* sopt_level */ - int mpo_name; /* sopt_name */ - int mpo_intval; /* sopt_val */ + TAILQ_ENTRY(mptopt) mpo_entry; /* glue to other options */ + uint32_t mpo_flags; /* see flags below */ + int mpo_level; /* sopt_level */ + int mpo_name; /* sopt_name */ + int mpo_intval; /* sopt_val */ }; -#define MPOF_ATTACHED 0x1 /* attached to MP socket */ -#define MPOF_SUBFLOW_OK 0x2 /* can be issued on subflow socket */ -#define MPOF_INTERIM 0x4 /* has not been issued on any subflow */ +#define MPOF_ATTACHED 0x1 /* attached to MP socket */ +#define MPOF_SUBFLOW_OK 0x2 /* can be issued on subflow socket */ +#define MPOF_INTERIM 0x4 /* has not been issued on any subflow */ /* * MPTCP subflow @@ -257,29 +260,29 @@ struct mptopt { * Note that mpts_flags and mpts_evctl are modified via atomic operations. */ struct mptsub { - TAILQ_ENTRY(mptsub) mpts_entry; /* glue to peer subflows */ - uint32_t mpts_refcnt; /* reference count */ - uint32_t mpts_flags; /* see flags below */ - uint32_t mpts_evctl; /* subflow control events */ - sae_connid_t mpts_connid; /* subflow connection ID */ - int mpts_oldintval; /* sopt_val before sosetopt */ - struct mptses *mpts_mpte; /* back ptr to MPTCP session */ - struct socket *mpts_socket; /* subflow socket */ - struct sockaddr *mpts_src; /* source address */ + TAILQ_ENTRY(mptsub) mpts_entry; /* glue to peer subflows */ + uint32_t mpts_refcnt; /* reference count */ + uint32_t mpts_flags; /* see flags below */ + uint32_t mpts_evctl; /* subflow control events */ + sae_connid_t mpts_connid; /* subflow connection ID */ + int mpts_oldintval; /* sopt_val before sosetopt */ + struct mptses *mpts_mpte; /* back ptr to MPTCP session */ + struct socket *mpts_socket; /* subflow socket */ + struct sockaddr *mpts_src; /* source address */ union { /* destination address */ - struct sockaddr mpts_dst; - struct sockaddr_in __mpts_dst_v4; - struct sockaddr_in6 __mpts_dst_v6; + struct sockaddr mpts_dst; + struct sockaddr_in __mpts_dst_v4; + struct sockaddr_in6 __mpts_dst_v6; }; - u_int32_t mpts_rel_seq; /* running count of subflow # */ - u_int32_t mpts_iss; /* Initial sequence number, taking TFO into account */ - u_int32_t mpts_ifscope; /* scoped to the interface */ - uint32_t mpts_probesoon; /* send probe after probeto */ - uint32_t mpts_probecnt; /* number of probes sent */ - uint32_t mpts_maxseg; /* cached value of t_maxseg */ + u_int32_t mpts_rel_seq; /* running count of subflow # */ + u_int32_t mpts_iss; /* Initial sequence number, taking TFO into account */ + u_int32_t mpts_ifscope; /* scoped to the interface */ + uint32_t mpts_probesoon; /* send probe after probeto */ + uint32_t mpts_probecnt; /* number of probes sent */ + uint32_t mpts_maxseg; /* cached value of t_maxseg */ }; /* @@ -311,30 +314,30 @@ struct mptsub { * * Keep in sync with bsd/dev/dtrace/scripts/mptcp.d. */ -#define MPTSF_ATTACHED 0x00000001 /* attached to MPTCP PCB */ -#define MPTSF_CONNECTING 0x00000002 /* connection was attempted */ -#define MPTSF_CONNECT_PENDING 0x00000004 /* will connect when MPTCP is ready */ -#define MPTSF_CONNECTED 0x00000008 /* connection is established */ -#define MPTSF_DISCONNECTING 0x00000010 /* disconnection was attempted */ -#define MPTSF_DISCONNECTED 0x00000020 /* has been disconnected */ -#define MPTSF_MP_CAPABLE 0x00000040 /* connected as a MPTCP subflow */ -#define MPTSF_MP_READY 0x00000080 /* MPTCP has been confirmed */ -#define MPTSF_MP_DEGRADED 0x00000100 /* has lost its MPTCP capabilities */ -#define MPTSF_PREFERRED 0x00000200 /* primary/preferred subflow */ -#define MPTSF_SOPT_OLDVAL 0x00000400 /* old option value is valid */ -#define MPTSF_SOPT_INPROG 0x00000800 /* sosetopt in progress */ -#define MPTSF_FAILINGOVER 0x00001000 /* subflow not used for output */ -#define MPTSF_ACTIVE 0x00002000 /* subflow currently in use */ -#define MPTSF_MPCAP_CTRSET 0x00004000 /* mpcap counter */ -#define MPTSF_CLOSED 0x00008000 /* soclose_locked has been called on this subflow */ -#define MPTSF_TFO_REQD 0x00010000 /* TFO requested */ -#define MPTSF_CLOSE_REQD 0x00020000 /* A close has been requested from NECP */ -#define MPTSF_INITIAL_SUB 0x00040000 /* This is the initial subflow */ -#define MPTSF_READ_STALL 0x00080000 /* A read-stall has been detected */ -#define MPTSF_WRITE_STALL 0x00100000 /* A write-stall has been detected */ -#define MPTSF_CONFIRMED 0x00200000 /* Subflow confirmed to be MPTCP-capable */ - -#define MPTSF_BITS \ +#define MPTSF_ATTACHED 0x00000001 /* attached to MPTCP PCB */ +#define MPTSF_CONNECTING 0x00000002 /* connection was attempted */ +#define MPTSF_CONNECT_PENDING 0x00000004 /* will connect when MPTCP is ready */ +#define MPTSF_CONNECTED 0x00000008 /* connection is established */ +#define MPTSF_DISCONNECTING 0x00000010 /* disconnection was attempted */ +#define MPTSF_DISCONNECTED 0x00000020 /* has been disconnected */ +#define MPTSF_MP_CAPABLE 0x00000040 /* connected as a MPTCP subflow */ +#define MPTSF_MP_READY 0x00000080 /* MPTCP has been confirmed */ +#define MPTSF_MP_DEGRADED 0x00000100 /* has lost its MPTCP capabilities */ +#define MPTSF_PREFERRED 0x00000200 /* primary/preferred subflow */ +#define MPTSF_SOPT_OLDVAL 0x00000400 /* old option value is valid */ +#define MPTSF_SOPT_INPROG 0x00000800 /* sosetopt in progress */ +#define MPTSF_FAILINGOVER 0x00001000 /* subflow not used for output */ +#define MPTSF_ACTIVE 0x00002000 /* subflow currently in use */ +#define MPTSF_MPCAP_CTRSET 0x00004000 /* mpcap counter */ +#define MPTSF_CLOSED 0x00008000 /* soclose_locked has been called on this subflow */ +#define MPTSF_TFO_REQD 0x00010000 /* TFO requested */ +#define MPTSF_CLOSE_REQD 0x00020000 /* A close has been requested from NECP */ +#define MPTSF_INITIAL_SUB 0x00040000 /* This is the initial subflow */ +#define MPTSF_READ_STALL 0x00080000 /* A read-stall has been detected */ +#define MPTSF_WRITE_STALL 0x00100000 /* A write-stall has been detected */ +#define MPTSF_CONFIRMED 0x00200000 /* Subflow confirmed to be MPTCP-capable */ + +#define MPTSF_BITS \ "\020\1ATTACHED\2CONNECTING\3PENDING\4CONNECTED\5DISCONNECTING" \ "\6DISCONNECTED\7MP_CAPABLE\10MP_READY\11MP_DEGRADED" \ "\12PREFERRED\13SOPT_OLDVAL" \ @@ -347,30 +350,30 @@ struct mptsub { * Keep in sync with bsd/dev/dtrace/mptcp.d */ typedef enum mptcp_state { - MPTCPS_CLOSED = 0, /* closed */ - MPTCPS_LISTEN = 1, /* not yet implemented */ - MPTCPS_ESTABLISHED = 2, /* MPTCP connection established */ - MPTCPS_CLOSE_WAIT = 3, /* rcvd DFIN, waiting for close */ - MPTCPS_FIN_WAIT_1 = 4, /* have closed, sent DFIN */ - MPTCPS_CLOSING = 5, /* closed xchd DFIN, waiting DFIN ACK */ - MPTCPS_LAST_ACK = 6, /* had DFIN and close; await DFIN ACK */ - MPTCPS_FIN_WAIT_2 = 7, /* have closed, DFIN is acked */ - MPTCPS_TIME_WAIT = 8, /* in 2*MSL quiet wait after close */ - MPTCPS_TERMINATE = 9, /* terminal state */ + MPTCPS_CLOSED = 0, /* closed */ + MPTCPS_LISTEN = 1, /* not yet implemented */ + MPTCPS_ESTABLISHED = 2, /* MPTCP connection established */ + MPTCPS_CLOSE_WAIT = 3, /* rcvd DFIN, waiting for close */ + MPTCPS_FIN_WAIT_1 = 4, /* have closed, sent DFIN */ + MPTCPS_CLOSING = 5, /* closed xchd DFIN, waiting DFIN ACK */ + MPTCPS_LAST_ACK = 6, /* had DFIN and close; await DFIN ACK */ + MPTCPS_FIN_WAIT_2 = 7, /* have closed, DFIN is acked */ + MPTCPS_TIME_WAIT = 8, /* in 2*MSL quiet wait after close */ + MPTCPS_TERMINATE = 9, /* terminal state */ } mptcp_state_t; -typedef u_int64_t mptcp_key_t; -typedef u_int32_t mptcp_token_t; -typedef u_int8_t mptcp_addr_id; +typedef u_int64_t mptcp_key_t; +typedef u_int32_t mptcp_token_t; +typedef u_int8_t mptcp_addr_id; /* Address ID list */ struct mptcp_subf_auth_entry { LIST_ENTRY(mptcp_subf_auth_entry) msae_next; - u_int32_t msae_laddr_rand; /* Local nonce */ - u_int32_t msae_raddr_rand; /* Remote nonce */ - mptcp_addr_id msae_laddr_id; /* Local addr ID */ - mptcp_addr_id msae_raddr_id; /* Remote addr ID */ + u_int32_t msae_laddr_rand; /* Local nonce */ + u_int32_t msae_raddr_rand; /* Remote nonce */ + mptcp_addr_id msae_laddr_id; /* Local addr ID */ + mptcp_addr_id msae_raddr_id; /* Remote addr ID */ }; /* @@ -380,96 +383,97 @@ struct mptcp_subf_auth_entry { * Keep in sync with bsd/dev/dtrace/scripts/mptcp.d. */ struct mptcb { - struct mptses *mpt_mpte; /* back ptr to MPTCP session */ - mptcp_state_t mpt_state; /* MPTCP state */ - u_int32_t mpt_flags; /* see flags below */ - u_int32_t mpt_version; /* MPTCP proto version */ - int mpt_softerror; /* error not yet reported */ + struct mptses *mpt_mpte; /* back ptr to MPTCP session */ + mptcp_state_t mpt_state; /* MPTCP state */ + u_int32_t mpt_flags; /* see flags below */ + u_int32_t mpt_version; /* MPTCP proto version */ + int mpt_softerror; /* error not yet reported */ /* * Authentication and metadata invariants */ - mptcp_key_t mpt_localkey; /* in network byte order */ - mptcp_key_t mpt_remotekey; /* in network byte order */ - mptcp_token_t mpt_localtoken; /* HMAC SHA1 of local key */ - mptcp_token_t mpt_remotetoken; /* HMAC SHA1 of remote key */ + mptcp_key_t mpt_localkey; /* in network byte order */ + mptcp_key_t mpt_remotekey; /* in network byte order */ + mptcp_token_t mpt_localtoken; /* HMAC SHA1 of local key */ + mptcp_token_t mpt_remotetoken; /* HMAC SHA1 of remote key */ /* * Timer vars for scenarios where subflow level acks arrive, but * Data ACKs do not. */ - int mpt_rxtshift; /* num of consecutive retrans */ - u_int32_t mpt_rxtstart; /* time at which rxt started */ - u_int64_t mpt_rtseq; /* seq # being tracked */ - u_int32_t mpt_timer_vals; /* timer related values */ - u_int32_t mpt_timewait; /* timewait */ + int mpt_rxtshift; /* num of consecutive retrans */ + u_int32_t mpt_rxtstart; /* time at which rxt started */ + u_int64_t mpt_rtseq; /* seq # being tracked */ + u_int32_t mpt_timer_vals; /* timer related values */ + u_int32_t mpt_timewait; /* timewait */ /* * Sending side */ - u_int64_t mpt_snduna; /* DSN of last unacked byte */ - u_int64_t mpt_sndnxt; /* DSN of next byte to send */ - u_int64_t mpt_sndmax; /* DSN of max byte sent */ - u_int64_t mpt_local_idsn; /* First byte's DSN */ - u_int32_t mpt_sndwnd; - u_int64_t mpt_sndwl1; - u_int64_t mpt_sndwl2; + u_int64_t mpt_snduna; /* DSN of last unacked byte */ + u_int64_t mpt_sndnxt; /* DSN of next byte to send */ + u_int64_t mpt_sndmax; /* DSN of max byte sent */ + u_int64_t mpt_local_idsn; /* First byte's DSN */ + u_int32_t mpt_sndwnd; + u_int64_t mpt_sndwl1; + u_int64_t mpt_sndwl2; /* * Receiving side */ - u_int64_t mpt_rcvnxt; /* Next expected DSN */ - u_int64_t mpt_remote_idsn; /* Peer's IDSN */ - u_int32_t mpt_rcvwnd; + u_int64_t mpt_rcvnxt; /* Next expected DSN */ + u_int64_t mpt_remote_idsn; /* Peer's IDSN */ + u_int32_t mpt_rcvwnd; LIST_HEAD(, mptcp_subf_auth_entry) mpt_subauth_list; /* address IDs */ /* * Fastclose */ - u_int64_t mpt_dsn_at_csum_fail; /* MPFail Opt DSN */ - u_int32_t mpt_ssn_at_csum_fail; /* MPFail Subflow Seq */ + u_int64_t mpt_dsn_at_csum_fail; /* MPFail Opt DSN */ + u_int32_t mpt_ssn_at_csum_fail; /* MPFail Subflow Seq */ /* * Zombie handling */ -#define MPT_GC_TICKS (30) -#define MPT_GC_TICKS_FAST (10) - int32_t mpt_gc_ticks; /* Used for zombie deletion */ +#define MPT_GC_TICKS (30) +#define MPT_GC_TICKS_FAST (10) + int32_t mpt_gc_ticks; /* Used for zombie deletion */ - u_int32_t mpt_notsent_lowat; /* TCP_NOTSENT_LOWAT support */ - u_int32_t mpt_peer_version; /* Version from peer */ + u_int32_t mpt_notsent_lowat; /* TCP_NOTSENT_LOWAT support */ + u_int32_t mpt_peer_version; /* Version from peer */ - struct tsegqe_head mpt_segq; - u_int16_t mpt_reassqlen; /* length of reassembly queue */ + struct tsegqe_head mpt_segq; + u_int16_t mpt_reassqlen; /* length of reassembly queue */ }; /* valid values for mpt_flags (see also notes on mpts_flags above) */ -#define MPTCPF_CHECKSUM 0x001 /* checksum DSS option */ -#define MPTCPF_FALLBACK_TO_TCP 0x002 /* Fallback to TCP */ -#define MPTCPF_JOIN_READY 0x004 /* Ready to start 2 or more subflows */ -#define MPTCPF_RECVD_MPFAIL 0x008 /* Received MP_FAIL option */ -#define MPTCPF_SND_64BITDSN 0x010 /* Send full 64-bit DSN */ -#define MPTCPF_SND_64BITACK 0x020 /* Send 64-bit ACK response */ -#define MPTCPF_RCVD_64BITACK 0x040 /* Received 64-bit Data ACK */ -#define MPTCPF_POST_FALLBACK_SYNC 0x080 /* Post fallback resend data */ -#define MPTCPF_FALLBACK_HEURISTIC 0x100 /* Send SYN without MP_CAPABLE due to heuristic */ -#define MPTCPF_HEURISTIC_TRAC 0x200 /* Tracked this connection in the heuristics as a failure */ -#define MPTCPF_REASS_INPROG 0x400 /* Reassembly is in progress */ - -#define MPTCPF_BITS \ +#define MPTCPF_CHECKSUM 0x001 /* checksum DSS option */ +#define MPTCPF_FALLBACK_TO_TCP 0x002 /* Fallback to TCP */ +#define MPTCPF_JOIN_READY 0x004 /* Ready to start 2 or more subflows */ +#define MPTCPF_RECVD_MPFAIL 0x008 /* Received MP_FAIL option */ +#define MPTCPF_SND_64BITDSN 0x010 /* Send full 64-bit DSN */ +#define MPTCPF_SND_64BITACK 0x020 /* Send 64-bit ACK response */ +#define MPTCPF_RCVD_64BITACK 0x040 /* Received 64-bit Data ACK */ +#define MPTCPF_POST_FALLBACK_SYNC 0x080 /* Post fallback resend data */ +#define MPTCPF_FALLBACK_HEURISTIC 0x100 /* Send SYN without MP_CAPABLE due to heuristic */ +#define MPTCPF_HEURISTIC_TRAC 0x200 /* Tracked this connection in the heuristics as a failure */ +#define MPTCPF_REASS_INPROG 0x400 /* Reassembly is in progress */ + +#define MPTCPF_BITS \ "\020\1CHECKSUM\2FALLBACK_TO_TCP\3JOIN_READY\4RECVD_MPFAIL" \ "\5SND_64BITDSN\6SND_64BITACK\7RCVD_64BITACK\10POST_FALLBACK_SYNC" \ "\11FALLBACK_HEURISTIC\12HEURISTIC_TRAC\13REASS_INPROG" /* valid values for mpt_timer_vals */ -#define MPTT_REXMT 0x01 /* Starting Retransmit Timer */ -#define MPTT_TW 0x02 /* Starting Timewait Timer */ -#define MPTT_FASTCLOSE 0x04 /* Starting Fastclose wait timer */ +#define MPTT_REXMT 0x01 /* Starting Retransmit Timer */ +#define MPTT_TW 0x02 /* Starting Timewait Timer */ +#define MPTT_FASTCLOSE 0x04 /* Starting Fastclose wait timer */ /* events for close FSM */ -#define MPCE_CLOSE 0x1 -#define MPCE_RECV_DATA_ACK 0x2 -#define MPCE_RECV_DATA_FIN 0x4 +#define MPCE_CLOSE 0x1 +#define MPCE_RECV_DATA_ACK 0x2 +#define MPCE_RECV_DATA_FIN 0x4 /* mptcb manipulation */ -static inline struct mptcb *tptomptp(struct tcpcb *tp) +static inline struct mptcb * +tptomptp(struct tcpcb *tp) { - return (tp->t_mptcb); + return tp->t_mptcb; } /* @@ -477,9 +481,9 @@ static inline struct mptcb *tptomptp(struct tcpcb *tp) * the MP protocol control block; the folllowing represents the layout. */ struct mpp_mtp { - struct mppcb mpp; /* Multipath PCB */ - struct mptses mpp_ses; /* MPTCP session */ - struct mptcb mtcb; /* MPTCP PCB */ + struct mppcb mpp; /* Multipath PCB */ + struct mptses mpp_ses; /* MPTCP session */ + struct mptcb mtcb; /* MPTCP PCB */ }; #ifdef SYSCTL_DECL @@ -491,37 +495,37 @@ extern struct pr_usrreqs mptcp_usrreqs; extern os_log_t mptcp_log_handle; /* Encryption algorithm related definitions */ -#define SHA1_TRUNCATED 8 +#define SHA1_TRUNCATED 8 /* MPTCP Debugging Levels */ -#define MPTCP_LOGLVL_NONE 0x0 /* No debug logging */ -#define MPTCP_LOGLVL_ERR 0x1 /* Errors in execution are logged */ -#define MPTCP_LOGLVL_LOG 0x2 /* Important logs */ -#define MPTCP_LOGLVL_VERBOSE 0x4 /* Verbose logs */ +#define MPTCP_LOGLVL_NONE 0x0 /* No debug logging */ +#define MPTCP_LOGLVL_ERR 0x1 /* Errors in execution are logged */ +#define MPTCP_LOGLVL_LOG 0x2 /* Important logs */ +#define MPTCP_LOGLVL_VERBOSE 0x4 /* Verbose logs */ /* MPTCP sub-components for debug logging */ -#define MPTCP_NO_DBG 0x00 /* No areas are logged */ -#define MPTCP_STATE_DBG 0x01 /* State machine logging */ -#define MPTCP_SOCKET_DBG 0x02 /* Socket call logging */ -#define MPTCP_SENDER_DBG 0x04 /* Sender side logging */ -#define MPTCP_RECEIVER_DBG 0x08 /* Receiver logging */ -#define MPTCP_EVENTS_DBG 0x10 /* Subflow events logging */ +#define MPTCP_NO_DBG 0x00 /* No areas are logged */ +#define MPTCP_STATE_DBG 0x01 /* State machine logging */ +#define MPTCP_SOCKET_DBG 0x02 /* Socket call logging */ +#define MPTCP_SENDER_DBG 0x04 /* Sender side logging */ +#define MPTCP_RECEIVER_DBG 0x08 /* Receiver logging */ +#define MPTCP_EVENTS_DBG 0x10 /* Subflow events logging */ /* Mask to obtain 32-bit portion of data sequence number */ -#define MPTCP_DATASEQ_LOW32_MASK (0xffffffff) -#define MPTCP_DATASEQ_LOW32(seq) (seq & MPTCP_DATASEQ_LOW32_MASK) +#define MPTCP_DATASEQ_LOW32_MASK (0xffffffff) +#define MPTCP_DATASEQ_LOW32(seq) (seq & MPTCP_DATASEQ_LOW32_MASK) /* Mask to obtain upper 32-bit portion of data sequence number */ -#define MPTCP_DATASEQ_HIGH32_MASK (0xffffffff00000000) -#define MPTCP_DATASEQ_HIGH32(seq) (seq & MPTCP_DATASEQ_HIGH32_MASK) +#define MPTCP_DATASEQ_HIGH32_MASK (0xffffffff00000000) +#define MPTCP_DATASEQ_HIGH32(seq) (seq & MPTCP_DATASEQ_HIGH32_MASK) /* Mask to obtain 32-bit portion of data ack */ -#define MPTCP_DATAACK_LOW32_MASK (0xffffffff) -#define MPTCP_DATAACK_LOW32(ack) (ack & MPTCP_DATAACK_LOW32_MASK) +#define MPTCP_DATAACK_LOW32_MASK (0xffffffff) +#define MPTCP_DATAACK_LOW32(ack) (ack & MPTCP_DATAACK_LOW32_MASK) /* Mask to obtain upper 32-bit portion of data ack */ -#define MPTCP_DATAACK_HIGH32_MASK (0xffffffff00000000) -#define MPTCP_DATAACK_HIGH32(ack) (ack & MPTCP_DATAACK_HIGH32_MASK) +#define MPTCP_DATAACK_HIGH32_MASK (0xffffffff00000000) +#define MPTCP_DATAACK_HIGH32(ack) (ack & MPTCP_DATAACK_HIGH32_MASK) /* * x is the 64-bit data sequence number, y the 32-bit data seq number to be @@ -537,45 +541,45 @@ extern os_log_t mptcp_log_handle; * comparing against rwnd. Bogus DSNs within rwnd cannot be protected against * and are as weak as bogus TCP sequence numbers. */ -#define MPTCP_EXTEND_DSN(x, y, z) { \ - if ((MPTCP_DATASEQ_LOW32(x) > y) && \ - ((((u_int32_t)MPTCP_DATASEQ_LOW32(x)) - (u_int32_t)y) >= \ - (u_int32_t)(1 << 31))) { \ - /* \ - * y wrapped around and x and y are 2**31 bytes apart \ - */ \ - z = MPTCP_DATASEQ_HIGH32(x) + 0x100000000; \ - z |= y; \ - } else if ((MPTCP_DATASEQ_LOW32(x) < y) && \ - (((u_int32_t)y - \ - ((u_int32_t)MPTCP_DATASEQ_LOW32(x))) >= \ - (u_int32_t)(1 << 31))) { \ - /* \ - * x wrapped around and x and y are 2**31 apart \ - */ \ - z = MPTCP_DATASEQ_HIGH32(x) - 0x100000000; \ - z |= y; \ - } else { \ - z = MPTCP_DATASEQ_HIGH32(x) | y; \ - } \ +#define MPTCP_EXTEND_DSN(x, y, z) { \ + if ((MPTCP_DATASEQ_LOW32(x) > y) && \ + ((((u_int32_t)MPTCP_DATASEQ_LOW32(x)) - (u_int32_t)y) >= \ + (u_int32_t)(1 << 31))) { \ + /* \ + * y wrapped around and x and y are 2**31 bytes apart \ + */ \ + z = MPTCP_DATASEQ_HIGH32(x) + 0x100000000; \ + z |= y; \ + } else if ((MPTCP_DATASEQ_LOW32(x) < y) && \ + (((u_int32_t)y - \ + ((u_int32_t)MPTCP_DATASEQ_LOW32(x))) >= \ + (u_int32_t)(1 << 31))) { \ + /* \ + * x wrapped around and x and y are 2**31 apart \ + */ \ + z = MPTCP_DATASEQ_HIGH32(x) - 0x100000000; \ + z |= y; \ + } else { \ + z = MPTCP_DATASEQ_HIGH32(x) | y; \ + } \ } -#define mptcplog(x, y, z) do { \ - if ((mptcp_dbg_area & y) && (mptcp_dbg_level & z)) \ - log x; \ +#define mptcplog(x, y, z) do { \ + if ((mptcp_dbg_area & y) && (mptcp_dbg_level & z)) \ + log x; \ } while (0) -extern int mptcp_enable; /* Multipath TCP */ -extern int mptcp_mpcap_retries; /* Multipath TCP retries */ -extern int mptcp_join_retries; /* Multipath TCP Join retries */ -extern int mptcp_dss_csum; /* Multipath DSS Option checksum */ -extern int mptcp_fail_thresh; /* Multipath failover thresh of retransmits */ +extern int mptcp_enable; /* Multipath TCP */ +extern int mptcp_mpcap_retries; /* Multipath TCP retries */ +extern int mptcp_join_retries; /* Multipath TCP Join retries */ +extern int mptcp_dss_csum; /* Multipath DSS Option checksum */ +extern int mptcp_fail_thresh; /* Multipath failover thresh of retransmits */ extern int mptcp_subflow_keeptime; /* Multipath subflow TCP_KEEPALIVE opt */ -extern uint32_t mptcp_dbg_level; /* Multipath TCP debugging level */ -extern uint32_t mptcp_dbg_area; /* Multipath TCP debugging area */ -extern int mptcp_developer_mode; /* Allow aggregation mode */ +extern uint32_t mptcp_dbg_level; /* Multipath TCP debugging level */ +extern uint32_t mptcp_dbg_area; /* Multipath TCP debugging area */ +extern int mptcp_developer_mode; /* Allow aggregation mode */ -extern int tcp_jack_rxmt; /* Join ACK retransmission value in msecs */ +extern int tcp_jack_rxmt; /* Join ACK retransmission value in msecs */ __BEGIN_DECLS extern void mptcp_init(struct protosw *, struct domain *); @@ -583,7 +587,7 @@ extern int mptcp_ctloutput(struct socket *, struct sockopt *); extern int mptcp_sescreate(struct mppcb *); extern void mptcp_check_subflows_and_add(struct mptses *); extern int mptcp_get_statsindex(struct mptcp_itf_stats *stats, - const struct mptsub *mpts); + const struct mptsub *mpts); extern void mptcpstats_inc_switch(struct mptses *, const struct mptsub *); extern struct mptses *mptcp_drop(struct mptses *, struct mptcb *, int); extern struct mptses *mptcp_close(struct mptses *, struct mptcb *); @@ -607,7 +611,7 @@ extern int mptcp_subflow_add(struct mptses *, struct sockaddr *, extern void mptcpstats_update(struct mptcp_itf_stats *stats, struct mptsub *mpts); extern void mptcp_subflow_del(struct mptses *, struct mptsub *); -#define MPTCP_SUBOUT_PROBING 0x01 +#define MPTCP_SUBOUT_PROBING 0x01 extern int mptcp_subflow_output(struct mptses *mpte, struct mptsub *mpts, int flags); extern void mptcp_clean_reinjectq(struct mptses *mpte); extern void mptcp_subflow_shutdown(struct mptses *, struct mptsub *); @@ -633,11 +637,11 @@ extern int mptcp_init_remote_parms(struct mptcb *); extern boolean_t mptcp_ok_to_keepalive(struct mptcb *); extern void mptcp_insert_dsn(struct mppcb *, struct mbuf *); extern void mptcp_output_getm_dsnmap32(struct socket *so, int off, - uint32_t *dsn, uint32_t *relseq, - uint16_t *data_len, uint16_t *dss_csum); + uint32_t *dsn, uint32_t *relseq, + uint16_t *data_len, uint16_t *dss_csum); extern void mptcp_output_getm_dsnmap64(struct socket *so, int off, - uint64_t *dsn, uint32_t *relseq, - uint16_t *data_len, uint16_t *dss_csum); + uint64_t *dsn, uint32_t *relseq, + uint16_t *data_len, uint16_t *dss_csum); extern void mptcp_act_on_txfail(struct socket *); extern struct mptsub *mptcp_get_subflow(struct mptses *, struct mptsub *, struct mptsub **); @@ -665,87 +669,87 @@ extern void mptcp_unset_cellicon(void); extern void mptcp_reset_rexmit_state(struct tcpcb *tp); extern void mptcp_reset_keepalive(struct tcpcb *tp); extern int mptcp_validate_csum(struct tcpcb *tp, struct mbuf *m, uint64_t dsn, - uint32_t sseq, uint16_t dlen, uint16_t csum, - uint16_t dfin); + uint32_t sseq, uint16_t dlen, uint16_t csum, + uint16_t dfin); __END_DECLS #endif /* BSD_KERNEL_PRIVATE */ #ifdef PRIVATE typedef struct mptcp_flow { - size_t flow_len; - size_t flow_tcpci_offset; - uint32_t flow_flags; - sae_connid_t flow_cid; + size_t flow_len; + size_t flow_tcpci_offset; + uint32_t flow_flags; + sae_connid_t flow_cid; struct sockaddr_storage flow_src; struct sockaddr_storage flow_dst; - uint32_t flow_relseq; /* last subflow rel seq# */ - int32_t flow_soerror; /* subflow level error */ - uint32_t flow_probecnt; /* number of probes sent */ - conninfo_tcp_t flow_ci; /* must be the last field */ + uint32_t flow_relseq; /* last subflow rel seq# */ + int32_t flow_soerror; /* subflow level error */ + uint32_t flow_probecnt; /* number of probes sent */ + conninfo_tcp_t flow_ci; /* must be the last field */ } mptcp_flow_t; typedef struct conninfo_mptcp { - size_t mptcpci_len; - size_t mptcpci_flow_offset; /* offsetof first flow */ - size_t mptcpci_nflows; /* number of subflows */ - uint32_t mptcpci_state; /* MPTCP level state */ - uint32_t mptcpci_mpte_flags; /* Session flags */ - uint32_t mptcpci_flags; /* MPTCB flags */ - uint32_t mptcpci_ltoken; /* local token */ - uint32_t mptcpci_rtoken; /* remote token */ - uint32_t mptcpci_notsent_lowat; /* NOTSENT_LOWAT */ + size_t mptcpci_len; + size_t mptcpci_flow_offset; /* offsetof first flow */ + size_t mptcpci_nflows; /* number of subflows */ + uint32_t mptcpci_state; /* MPTCP level state */ + uint32_t mptcpci_mpte_flags; /* Session flags */ + uint32_t mptcpci_flags; /* MPTCB flags */ + uint32_t mptcpci_ltoken; /* local token */ + uint32_t mptcpci_rtoken; /* remote token */ + uint32_t mptcpci_notsent_lowat; /* NOTSENT_LOWAT */ /* Send side */ - uint64_t mptcpci_snduna; /* DSN of last unacked byte */ - uint64_t mptcpci_sndnxt; /* DSN of next byte to send */ - uint64_t mptcpci_sndmax; /* DSN of max byte sent */ - uint64_t mptcpci_lidsn; /* Local IDSN */ - uint32_t mptcpci_sndwnd; /* Send window snapshot */ + uint64_t mptcpci_snduna; /* DSN of last unacked byte */ + uint64_t mptcpci_sndnxt; /* DSN of next byte to send */ + uint64_t mptcpci_sndmax; /* DSN of max byte sent */ + uint64_t mptcpci_lidsn; /* Local IDSN */ + uint32_t mptcpci_sndwnd; /* Send window snapshot */ /* Receive side */ - uint64_t mptcpci_rcvnxt; /* Next expected DSN */ - uint64_t mptcpci_rcvatmark; /* Session level rcvnxt */ - uint64_t mptcpci_ridsn; /* Peer's IDSN */ - uint32_t mptcpci_rcvwnd; /* Receive window */ + uint64_t mptcpci_rcvnxt; /* Next expected DSN */ + uint64_t mptcpci_rcvatmark; /* Session level rcvnxt */ + uint64_t mptcpci_ridsn; /* Peer's IDSN */ + uint32_t mptcpci_rcvwnd; /* Receive window */ - uint8_t mptcpci_mpte_addrid; /* last addr id */ + uint8_t mptcpci_mpte_addrid; /* last addr id */ - mptcp_flow_t mptcpci_flows[1]; + mptcp_flow_t mptcpci_flows[1]; } conninfo_mptcp_t; /* Use SymptomsD notifications of wifi and cell status in subflow selection */ #define MPTCP_KERN_CTL_NAME "com.apple.network.advisory" typedef struct symptoms_advisory { union { - uint32_t sa_nwk_status_int; + uint32_t sa_nwk_status_int; struct { union { -#define SYMPTOMS_ADVISORY_NOCOMMENT 0x0000 -#define SYMPTOMS_ADVISORY_USEAPP 0xFFFF /* Very ugly workaround to avoid breaking backwards compatibility - ToDo: Fix it in +1 */ - uint16_t sa_nwk_status; +#define SYMPTOMS_ADVISORY_NOCOMMENT 0x0000 +#define SYMPTOMS_ADVISORY_USEAPP 0xFFFF /* Very ugly workaround to avoid breaking backwards compatibility - ToDo: Fix it in +1 */ + uint16_t sa_nwk_status; struct { #define SYMPTOMS_ADVISORY_WIFI_BAD 0x01 #define SYMPTOMS_ADVISORY_WIFI_OK 0x02 - uint8_t sa_wifi_status; + uint8_t sa_wifi_status; #define SYMPTOMS_ADVISORY_CELL_BAD 0x01 #define SYMPTOMS_ADVISORY_CELL_OK 0x02 - uint8_t sa_cell_status; + uint8_t sa_cell_status; }; }; - uint16_t sa_unused; + uint16_t sa_unused; }; }; } symptoms_advisory_t; struct mptcp_symptoms_ask_uuid { - uint32_t cmd; -#define MPTCP_SYMPTOMS_ASK_UUID 1 - uuid_t uuid; - uint32_t priority; -#define MPTCP_SYMPTOMS_UNKNOWN 0 -#define MPTCP_SYMPTOMS_BACKGROUND 1 -#define MPTCP_SYMPTOMS_FOREGROUND 2 + uint32_t cmd; +#define MPTCP_SYMPTOMS_ASK_UUID 1 + uuid_t uuid; + uint32_t priority; +#define MPTCP_SYMPTOMS_UNKNOWN 0 +#define MPTCP_SYMPTOMS_BACKGROUND 1 +#define MPTCP_SYMPTOMS_FOREGROUND 2 }; struct kev_mptcp_data { diff --git a/bsd/netinet/raw_ip.c b/bsd/netinet/raw_ip.c index 65f2d2a41..d88d42b99 100644 --- a/bsd/netinet/raw_ip.c +++ b/bsd/netinet/raw_ip.c @@ -122,8 +122,8 @@ int rip_bind(struct socket *, struct sockaddr *, struct proc *); int rip_connect(struct socket *, struct sockaddr *, struct proc *); int rip_shutdown(struct socket *); -struct inpcbhead ripcb; -struct inpcbinfo ripcbinfo; +struct inpcbhead ripcb; +struct inpcbinfo ripcbinfo; /* control hooks for ipfw and dummynet */ #if IPFIREWALL @@ -136,8 +136,8 @@ ip_dn_ctl_t *ip_dn_ctl_ptr; /* * Nominal space allocated to a raw ip socket. */ -#define RIPSNDQ 8192 -#define RIPRCVQ 8192 +#define RIPSNDQ 8192 +#define RIPRCVQ 8192 /* * Raw interface to IP protocol. @@ -153,10 +153,11 @@ rip_init(struct protosw *pp, struct domain *dp) static int rip_initialized = 0; struct inpcbinfo *pcbinfo; - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - if (rip_initialized) + if (rip_initialized) { return; + } rip_initialized = 1; LIST_INIT(&ripcb); @@ -173,7 +174,7 @@ rip_init(struct protosw *pp, struct domain *dp) (4096 * sizeof(struct inpcb)), 4096, "ripzone"); pcbinfo = &ripcbinfo; - /* + /* * allocate lock group attribute and group for udp pcb mutexes */ pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init(); @@ -192,7 +193,7 @@ rip_init(struct protosw *pp, struct domain *dp) in_pcbinfo_attach(&ripcbinfo); } -static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET , 0, {0}, {0,0,0,0,0,0,0,0,} }; +static struct sockaddr_in ripsrc = { sizeof(ripsrc), AF_INET, 0, {0}, {0, 0, 0, 0, 0, 0, 0, 0, } }; /* * Setup generic address and protocol structures * for raw_input routine, then pass them along with @@ -215,19 +216,24 @@ rip_input(struct mbuf *m, int iphlen) lck_rw_lock_shared(ripcbinfo.ipi_lock); LIST_FOREACH(inp, &ripcb, inp_list) { #if INET6 - if ((inp->inp_vflag & INP_IPV4) == 0) + if ((inp->inp_vflag & INP_IPV4) == 0) { continue; + } #endif - if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p)) + if (inp->inp_ip_p && (inp->inp_ip_p != ip->ip_p)) { continue; + } if (inp->inp_laddr.s_addr && - inp->inp_laddr.s_addr != ip->ip_dst.s_addr) + inp->inp_laddr.s_addr != ip->ip_dst.s_addr) { continue; + } if (inp->inp_faddr.s_addr && - inp->inp_faddr.s_addr != ip->ip_src.s_addr) + inp->inp_faddr.s_addr != ip->ip_src.s_addr) { continue; - if (inp_restricted_recv(inp, ifp)) + } + if (inp_restricted_recv(inp, ifp)) { continue; + } if (last) { struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); @@ -235,7 +241,7 @@ rip_input(struct mbuf *m, int iphlen) #if NECP if (n && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0, - &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { + &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { m_freem(n); /* do not inject data to pcb */ skipit = 1; @@ -255,7 +261,7 @@ rip_input(struct mbuf *m, int iphlen) if ((last->inp_flags & INP_CONTROLOPTS) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip_savecontrol(last, &opts, ip, n); if (ret != 0) { m_freem(n); @@ -289,7 +295,7 @@ rip_input(struct mbuf *m, int iphlen) skipit = 0; #if NECP if (last && !necp_socket_is_allowed_to_send_recv_v4(last, 0, 0, - &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { + &ip->ip_dst, &ip->ip_src, ifp, NULL, NULL, NULL)) { m_freem(m); OSAddAtomic(1, &ipstat.ips_delivered); /* do not inject data to pcb */ @@ -307,9 +313,9 @@ rip_input(struct mbuf *m, int iphlen) if (skipit == 0) { if (last) { if ((last->inp_flags & INP_CONTROLOPTS) != 0 || - (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || - (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || + (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || + (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip_savecontrol(last, &opts, ip, m); if (ret != 0) { m_freem(m); @@ -324,7 +330,7 @@ rip_input(struct mbuf *m, int iphlen) } so_recv_data_stat(last->inp_socket, m, 0); if (sbappendaddr(&last->inp_socket->so_rcv, - (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) { + (struct sockaddr *)&ripsrc, m, opts, NULL) != 0) { sorwakeup(last->inp_socket); } else { ipstat.ips_raw_sappend_fail++; @@ -382,13 +388,14 @@ rip_output( if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) { - if (m != NULL) + ) { + if (m != NULL) { m_freem(m); + } VERIFY(control == NULL); - return (inp == NULL ? EINVAL : EPROTOTYPE); + return inp == NULL ? EINVAL : EPROTOTYPE; } flags |= IP_OUTARGS; @@ -397,17 +404,21 @@ rip_output( ipoa.ipoa_boundif = inp->inp_boundifp->if_index; ipoa.ipoa_flags |= IPOAF_BOUND_IF; } - if (INP_NO_CELLULAR(inp)) + if (INP_NO_CELLULAR(inp)) { ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; - if (INP_NO_EXPENSIVE(inp)) + } + if (INP_NO_EXPENSIVE(inp)) { ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; - if (INP_AWDL_UNRESTRICTED(inp)) + } + if (INP_AWDL_UNRESTRICTED(inp)) { ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; + } ipoa.ipoa_sotc = sotc; ipoa.ipoa_netsvctype = netsvctype; - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } /* * If the user handed us a complete IP packet, use it. @@ -416,11 +427,12 @@ rip_output( if ((inp->inp_flags & INP_HDRINCL) == 0) { if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) { m_freem(m); - return(EMSGSIZE); + return EMSGSIZE; } M_PREPEND(m, sizeof(struct ip), M_WAIT, 1); - if (m == NULL) + if (m == NULL) { return ENOBUFS; + } ip = mtod(m, struct ip *); ip->ip_tos = inp->inp_ip_tos; ip->ip_off = 0; @@ -432,27 +444,29 @@ rip_output( } else { if (m->m_pkthdr.len > IP_MAXPACKET) { m_freem(m); - return(EMSGSIZE); + return EMSGSIZE; } ip = mtod(m, struct ip *); /* don't allow both user specified and setsockopt options, - and don't allow packet length sizes that will crash */ - if (((IP_VHL_HL(ip->ip_vhl) != (sizeof (*ip) >> 2)) - && inp->inp_options) + * and don't allow packet length sizes that will crash */ + if (((IP_VHL_HL(ip->ip_vhl) != (sizeof(*ip) >> 2)) + && inp->inp_options) || (ip->ip_len > m->m_pkthdr.len) || (ip->ip_len < (IP_VHL_HL(ip->ip_vhl) << 2))) { m_freem(m); return EINVAL; } - if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off)))) + if (ip->ip_id == 0 && !(rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off)))) { ip->ip_id = ip_randomid(); + } /* XXX prevent ip_output from overwriting header fields */ flags |= IP_RAWOUTPUT; OSAddAtomic(1, &ipstat.ips_rawout); } - if (inp->inp_laddr.s_addr != INADDR_ANY) + if (inp->inp_laddr.s_addr != INADDR_ANY) { ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR; + } #if NECP { @@ -484,9 +498,9 @@ rip_output( if ((error = in_pcbladdr(inp, (struct sockaddr *)&to, &laddr, ipoa.ipoa_boundif, NULL, 1)) != 0) { printf("%s in_pcbladdr(%p) error %d\n", - __func__, inp, error); + __func__, inp, error); m_freem(m); - return (error); + return error; } inp_update_necp_policy(inp, (struct sockaddr *)&from, @@ -495,9 +509,9 @@ rip_output( } if (!necp_socket_is_allowed_to_send_recv_v4(inp, 0, 0, - &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + &ip->ip_src, &ip->ip_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { m_freem(m); - return(EHOSTUNREACH); + return EHOSTUNREACH; } necp_mark_packet_from_socket(m, inp, policy_id, route_rule_id, skip_policy_id); @@ -505,16 +519,18 @@ rip_output( if (net_qos_policy_restricted != 0) { struct ifnet *rt_ifp = NULL; - if (inp->inp_route.ro_rt != NULL) + if (inp->inp_route.ro_rt != NULL) { rt_ifp = inp->inp_route.ro_rt->rt_ifp; + } necp_socket_update_qos_marking(inp, inp->inp_route.ro_rt, NULL, route_rule_id); } } #endif /* NECP */ - if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + } #if IPSEC if (inp->inp_sp != NULL && ipsec_setsocket(m, so) != 0) { @@ -523,8 +539,9 @@ rip_output( } #endif /*IPSEC*/ - if (ROUTE_UNUSABLE(&inp->inp_route)) + if (ROUTE_UNUSABLE(&inp->inp_route)) { ROUTE_RELEASE(&inp->inp_route); + } set_packet_service_class(m, so, sotc, 0); m->m_pkthdr.pkt_flowsrc = FLOWSRC_INPCB; @@ -534,18 +551,20 @@ rip_output( m->m_pkthdr.pkt_proto = inp->inp_ip_p; m->m_pkthdr.tx_rawip_pid = so->last_pid; m->m_pkthdr.tx_rawip_e_pid = so->e_pid; - if (so->so_flags & SOF_DELEGATED) + if (so->so_flags & SOF_DELEGATED) { m->m_pkthdr.tx_rawip_e_pid = so->e_pid; - else + } else { m->m_pkthdr.tx_rawip_e_pid = 0; + } #if CONFIG_MACF_NET mac_mbuf_label_associate_inpcb(inp, m); #endif imo = inp->inp_moptions; - if (imo != NULL) + if (imo != NULL) { IMO_ADDREF(imo); + } /* * The domain lock is held across ip_output, so it is okay * to pass the PCB cached route pointer directly to IP and @@ -555,24 +574,26 @@ rip_output( error = ip_output(m, inp->inp_options, &inp->inp_route, flags, imo, &ipoa); - if (imo != NULL) + if (imo != NULL) { IMO_REMREF(imo); + } if (inp->inp_route.ro_rt != NULL) { struct rtentry *rt = inp->inp_route.ro_rt; struct ifnet *outif; - if ((rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) || + if ((rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) || inp->inp_socket == NULL || !(inp->inp_socket->so_state & SS_ISCONNECTED)) { - rt = NULL; /* unusable */ + rt = NULL; /* unusable */ } /* * Always discard the cached route for unconnected * socket or if it is a multicast route. */ - if (rt == NULL) + if (rt == NULL) { ROUTE_RELEASE(&inp->inp_route); + } /* * If this is a connected socket and the destination @@ -592,23 +613,25 @@ rip_output( * denied access to it, generate an event. */ if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) && - (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) - soevent(so, (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED)); + (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) { + soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } - return (error); + return error; } #if IPFIREWALL int load_ipfw(void) { - kern_return_t err; + kern_return_t err; ipfw_init(); #if DUMMYNET - if (!DUMMYNET_LOADED) + if (!DUMMYNET_LOADED) { ip_dn_init(); + } #endif /* DUMMYNET */ err = 0; @@ -622,13 +645,14 @@ load_ipfw(void) int rip_ctloutput(struct socket *so, struct sockopt *sopt) { - struct inpcb *inp = sotoinpcb(so); - int error, optval; + struct inpcb *inp = sotoinpcb(so); + int error, optval; /* Allow at this level */ if (sopt->sopt_level != IPPROTO_IP && - !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) - return (EINVAL); + !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) { + return EINVAL; + } error = 0; @@ -650,24 +674,28 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_FW_GET: case IP_OLD_FW_ADD: case IP_OLD_FW_GET: - if (ip_fw_ctl_ptr == 0) + if (ip_fw_ctl_ptr == 0) { error = load_ipfw(); - if (ip_fw_ctl_ptr && error == 0) + } + if (ip_fw_ctl_ptr && error == 0) { error = ip_fw_ctl_ptr(sopt); - else + } else { error = ENOPROTOOPT; + } break; #endif /* IPFIREWALL */ #if DUMMYNET case IP_DUMMYNET_GET: - if (!DUMMYNET_LOADED) + if (!DUMMYNET_LOADED) { ip_dn_init(); - if (DUMMYNET_LOADED) + } + if (DUMMYNET_LOADED) { error = ip_dn_ctl_ptr(sopt); - else + } else { error = ENOPROTOOPT; - break ; + } + break; #endif /* DUMMYNET */ default: @@ -680,24 +708,28 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) switch (sopt->sopt_name) { case IP_HDRINCL: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; - if (optval) + } + if (optval) { inp->inp_flags |= INP_HDRINCL; - else + } else { inp->inp_flags &= ~INP_HDRINCL; + } break; case IP_STRIPHDR: error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); - if (error) + if (error) { break; - if (optval) + } + if (optval) { inp->inp_flags |= INP_STRIPHDR; - else + } else { inp->inp_flags &= ~INP_STRIPHDR; + } break; #if IPFIREWALL @@ -711,12 +743,14 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_OLD_FW_FLUSH: case IP_OLD_FW_ZERO: case IP_OLD_FW_RESETLOG: - if (ip_fw_ctl_ptr == 0) + if (ip_fw_ctl_ptr == 0) { error = load_ipfw(); - if (ip_fw_ctl_ptr && error == 0) + } + if (ip_fw_ctl_ptr && error == 0) { error = ip_fw_ctl_ptr(sopt); - else + } else { error = ENOPROTOOPT; + } break; #endif /* IPFIREWALL */ @@ -724,19 +758,22 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) case IP_DUMMYNET_CONFIGURE: case IP_DUMMYNET_DEL: case IP_DUMMYNET_FLUSH: - if (!DUMMYNET_LOADED) + if (!DUMMYNET_LOADED) { ip_dn_init(); - if (DUMMYNET_LOADED) + } + if (DUMMYNET_LOADED) { error = ip_dn_ctl_ptr(sopt); - else - error = ENOPROTOOPT ; - break ; + } else { + error = ENOPROTOOPT; + } + break; #endif case SO_FLUSH: - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } error = inp_flush(inp, optval); break; @@ -748,7 +785,7 @@ rip_ctloutput(struct socket *so, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -774,7 +811,7 @@ rip_ctlinput( case PRC_IFDOWN: lck_rw_lock_shared(in_ifaddr_rwlock); for (ia = in_ifaddrhead.tqh_first; ia; - ia = ia->ia_link.tqe_next) { + ia = ia->ia_link.tqe_next) { IFA_LOCK(&ia->ia_ifa); if (ia->ia_ifa.ifa_addr == sa && (ia->ia_flags & IFA_ROUTE)) { @@ -800,14 +837,15 @@ rip_ctlinput( } IFA_UNLOCK(&ia->ia_ifa); } - if (!done) + if (!done) { lck_rw_done(in_ifaddr_rwlock); + } break; case PRC_IFUP: lck_rw_lock_shared(in_ifaddr_rwlock); for (ia = in_ifaddrhead.tqh_first; ia; - ia = ia->ia_link.tqe_next) { + ia = ia->ia_link.tqe_next) { IFA_LOCK(&ia->ia_ifa); if (ia->ia_ifa.ifa_addr == sa) { /* keep it locked */ @@ -817,8 +855,9 @@ rip_ctlinput( } if (ia == NULL || (ia->ia_flags & IFA_ROUTE) || (ia->ia_ifa.ifa_debug & IFD_NOTREADY)) { - if (ia != NULL) + if (ia != NULL) { IFA_UNLOCK(&ia->ia_ifa); + } lck_rw_done(in_ifaddr_rwlock); return; } @@ -830,8 +869,9 @@ rip_ctlinput( iaifp = ia->ia_ifa.ifa_ifp; if ((iaifp->if_flags & IFF_LOOPBACK) - || (iaifp->if_flags & IFF_POINTOPOINT)) + || (iaifp->if_flags & IFF_POINTOPOINT)) { flags |= RTF_HOST; + } err = rtinit(&ia->ia_ifa, RTM_ADD, flags); if (err == 0) { @@ -844,8 +884,8 @@ rip_ctlinput( } } -u_int32_t rip_sendspace = RIPSNDQ; -u_int32_t rip_recvspace = RIPRCVQ; +u_int32_t rip_sendspace = RIPSNDQ; +u_int32_t rip_recvspace = RIPRCVQ; SYSCTL_INT(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW | CTLFLAG_LOCKED, &rip_sendspace, 0, "Maximum outgoing raw IP datagram size"); @@ -861,17 +901,21 @@ rip_attach(struct socket *so, int proto, struct proc *p) int error; inp = sotoinpcb(so); - if (inp) + if (inp) { panic("rip_attach"); - if ((so->so_state & SS_PRIV) == 0) - return (EPERM); + } + if ((so->so_state & SS_PRIV) == 0) { + return EPERM; + } error = soreserve(so, rip_sendspace, rip_recvspace); - if (error) + if (error) { return error; + } error = in_pcballoc(so, &ripcbinfo, p); - if (error) + if (error) { return error; + } inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV4; inp->inp_ip_p = proto; @@ -885,8 +929,9 @@ rip_detach(struct socket *so) struct inpcb *inp; inp = sotoinpcb(so); - if (inp == 0) + if (inp == 0) { panic("rip_detach"); + } in_pcbdetach(inp); return 0; } @@ -901,8 +946,9 @@ rip_abort(struct socket *so) __private_extern__ int rip_disconnect(struct socket *so) { - if ((so->so_state & SS_ISCONNECTED) == 0) + if ((so->so_state & SS_ISCONNECTED) == 0) { return ENOTCONN; + } return rip_abort(so); } @@ -917,24 +963,26 @@ rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p) if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) - return (inp == NULL ? EINVAL : EPROTOTYPE); + ) { + return inp == NULL ? EINVAL : EPROTOTYPE; + } - if (nam->sa_len != sizeof (struct sockaddr_in)) - return (EINVAL); + if (nam->sa_len != sizeof(struct sockaddr_in)) { + return EINVAL; + } /* Sanitized local copy for interface address searches */ - bzero(&sin, sizeof (sin)); + bzero(&sin, sizeof(sin)); sin.sin_family = AF_INET; - sin.sin_len = sizeof (struct sockaddr_in); + sin.sin_len = sizeof(struct sockaddr_in); sin.sin_addr.s_addr = SIN(nam)->sin_addr.s_addr; if (TAILQ_EMPTY(&ifnet_head) || (sin.sin_family != AF_INET && sin.sin_family != AF_IMPLINK) || (sin.sin_addr.s_addr && (ifa = ifa_ifwithaddr(SA(&sin))) == 0)) { - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } else if (ifa) { /* * Opportunistically determine the outbound @@ -952,7 +1000,7 @@ rip_bind(struct socket *so, struct sockaddr *nam, struct proc *p) inp->inp_laddr = sin.sin_addr; inp->inp_last_outifp = outif; - return (0); + return 0; } __private_extern__ int @@ -963,17 +1011,21 @@ rip_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) - return (inp == NULL ? EINVAL : EPROTOTYPE); - if (nam->sa_len != sizeof(*addr)) + ) { + return inp == NULL ? EINVAL : EPROTOTYPE; + } + if (nam->sa_len != sizeof(*addr)) { return EINVAL; - if (TAILQ_EMPTY(&ifnet_head)) + } + if (TAILQ_EMPTY(&ifnet_head)) { return EADDRNOTAVAIL; + } if ((addr->sin_family != AF_INET) && - (addr->sin_family != AF_IMPLINK)) + (addr->sin_family != AF_IMPLINK)) { return EAFNOSUPPORT; + } if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) { so->so_flags1 |= SOF1_CONNECT_COUNTED; @@ -1004,13 +1056,14 @@ rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE)) + || (necp_socket_should_use_flow_divert(inp) && (error = EPROTOTYPE)) #endif /* NECP */ - ) { - if (inp == NULL) + ) { + if (inp == NULL) { error = EINVAL; - else + } else { error = EPROTOTYPE; + } goto bad; } @@ -1027,17 +1080,19 @@ rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, } dst = ((struct sockaddr_in *)(void *)nam)->sin_addr.s_addr; } - return (rip_output(m, so, dst, control)); + return rip_output(m, so, dst, control); bad: VERIFY(error != 0); - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } - return (error); + return error; } /* note: rip_unlock is called from different protos instead of the generic socket_unlock, @@ -1049,10 +1104,11 @@ rip_unlock(struct socket *so, int refcount, void *debug) void *lr_saved; struct inpcb *inp = sotoinpcb(so); - if (debug == NULL) + if (debug == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = debug; + } if (refcount) { if (so->so_usecount <= 0) { @@ -1067,21 +1123,21 @@ rip_unlock(struct socket *so, int refcount, void *debug) lck_rw_lock_exclusive(ripcbinfo.ipi_lock); if (inp->inp_state != INPCB_STATE_DEAD) { #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ in_pcbdetach(inp); } in_pcbdispose(inp); lck_rw_done(ripcbinfo.ipi_lock); - return(0); + return 0; } } so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; lck_mtx_unlock(so->so_proto->pr_domain->dom_mtx); - return(0); + return 0; } static int @@ -1101,7 +1157,7 @@ rip_pcblist SYSCTL_HANDLER_ARGS if (req->oldptr == USER_ADDR_NULL) { n = ripcbinfo.ipi_count; req->oldidx = 2 * (sizeof xig) - + (n + n/8) * sizeof(struct xinpcb); + + (n + n / 8) * sizeof(struct xinpcb); lck_rw_done(ripcbinfo.ipi_lock); return 0; } @@ -1127,13 +1183,13 @@ rip_pcblist SYSCTL_HANDLER_ARGS lck_rw_done(ripcbinfo.ipi_lock); return error; } - /* - * We are done if there is no pcb - */ - if (n == 0) { - lck_rw_done(ripcbinfo.ipi_lock); - return 0; - } + /* + * We are done if there is no pcb + */ + if (n == 0) { + lck_rw_done(ripcbinfo.ipi_lock); + return 0; + } inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); if (inp_list == 0) { @@ -1142,9 +1198,10 @@ rip_pcblist SYSCTL_HANDLER_ARGS } for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; - inp = inp->inp_list.le_next) { - if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) + inp = inp->inp_list.le_next) { + if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { inp_list[i++] = inp; + } } n = i; @@ -1158,8 +1215,9 @@ rip_pcblist SYSCTL_HANDLER_ARGS xi.xi_len = sizeof xi; /* XXX should avoid extra copy */ inpcb_to_compat(inp, &xi.xi_inp); - if (inp->inp_socket) + if (inp->inp_socket) { sotoxsocket(inp->inp_socket, &xi.xi_socket); + } error = SYSCTL_OUT(req, &xi, sizeof xi); } } @@ -1183,9 +1241,9 @@ rip_pcblist SYSCTL_HANDLER_ARGS return error; } -SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); +SYSCTL_PROC(_net_inet_raw, OID_AUTO /*XXX*/, pcblist, + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + rip_pcblist, "S,xinpcb", "List of active raw IP sockets"); #if !CONFIG_EMBEDDED @@ -1193,103 +1251,105 @@ static int rip_pcblist64 SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error, i, n; - struct inpcb *inp, **inp_list; - inp_gen_t gencnt; - struct xinpgen xig; - - /* - * The process of preparing the TCB list is too time-consuming and - * resource-intensive to repeat twice on every request. - */ - lck_rw_lock_exclusive(ripcbinfo.ipi_lock); - if (req->oldptr == USER_ADDR_NULL) { - n = ripcbinfo.ipi_count; - req->oldidx = 2 * (sizeof xig) - + (n + n/8) * sizeof(struct xinpcb64); - lck_rw_done(ripcbinfo.ipi_lock); - return 0; - } - - if (req->newptr != USER_ADDR_NULL) { - lck_rw_done(ripcbinfo.ipi_lock); - return EPERM; - } - - /* - * OK, now we're committed to doing something. - */ - gencnt = ripcbinfo.ipi_gencnt; - n = ripcbinfo.ipi_count; - - bzero(&xig, sizeof(xig)); - xig.xig_len = sizeof xig; - xig.xig_count = n; - xig.xig_gen = gencnt; - xig.xig_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xig, sizeof xig); - if (error) { - lck_rw_done(ripcbinfo.ipi_lock); - return error; - } - /* - * We are done if there is no pcb - */ - if (n == 0) { - lck_rw_done(ripcbinfo.ipi_lock); - return 0; - } - - inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); - if (inp_list == 0) { - lck_rw_done(ripcbinfo.ipi_lock); - return ENOMEM; - } - - for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; - inp = inp->inp_list.le_next) { - if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) - inp_list[i++] = inp; - } - n = i; - - error = 0; - for (i = 0; i < n; i++) { - inp = inp_list[i]; - if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { - struct xinpcb64 xi; - - bzero(&xi, sizeof(xi)); - xi.xi_len = sizeof xi; - inpcb_to_xinpcb64(inp, &xi); - if (inp->inp_socket) - sotoxsocket64(inp->inp_socket, &xi.xi_socket); - error = SYSCTL_OUT(req, &xi, sizeof xi); - } - } - if (!error) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xig, sizeof(xig)); - xig.xig_len = sizeof xig; - xig.xig_gen = ripcbinfo.ipi_gencnt; - xig.xig_sogen = so_gencnt; - xig.xig_count = ripcbinfo.ipi_count; - error = SYSCTL_OUT(req, &xig, sizeof xig); - } - FREE(inp_list, M_TEMP); - lck_rw_done(ripcbinfo.ipi_lock); - return error; + int error, i, n; + struct inpcb *inp, **inp_list; + inp_gen_t gencnt; + struct xinpgen xig; + + /* + * The process of preparing the TCB list is too time-consuming and + * resource-intensive to repeat twice on every request. + */ + lck_rw_lock_exclusive(ripcbinfo.ipi_lock); + if (req->oldptr == USER_ADDR_NULL) { + n = ripcbinfo.ipi_count; + req->oldidx = 2 * (sizeof xig) + + (n + n / 8) * sizeof(struct xinpcb64); + lck_rw_done(ripcbinfo.ipi_lock); + return 0; + } + + if (req->newptr != USER_ADDR_NULL) { + lck_rw_done(ripcbinfo.ipi_lock); + return EPERM; + } + + /* + * OK, now we're committed to doing something. + */ + gencnt = ripcbinfo.ipi_gencnt; + n = ripcbinfo.ipi_count; + + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof xig; + xig.xig_count = n; + xig.xig_gen = gencnt; + xig.xig_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xig, sizeof xig); + if (error) { + lck_rw_done(ripcbinfo.ipi_lock); + return error; + } + /* + * We are done if there is no pcb + */ + if (n == 0) { + lck_rw_done(ripcbinfo.ipi_lock); + return 0; + } + + inp_list = _MALLOC(n * sizeof *inp_list, M_TEMP, M_WAITOK); + if (inp_list == 0) { + lck_rw_done(ripcbinfo.ipi_lock); + return ENOMEM; + } + + for (inp = ripcbinfo.ipi_listhead->lh_first, i = 0; inp && i < n; + inp = inp->inp_list.le_next) { + if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { + inp_list[i++] = inp; + } + } + n = i; + + error = 0; + for (i = 0; i < n; i++) { + inp = inp_list[i]; + if (inp->inp_gencnt <= gencnt && inp->inp_state != INPCB_STATE_DEAD) { + struct xinpcb64 xi; + + bzero(&xi, sizeof(xi)); + xi.xi_len = sizeof xi; + inpcb_to_xinpcb64(inp, &xi); + if (inp->inp_socket) { + sotoxsocket64(inp->inp_socket, &xi.xi_socket); + } + error = SYSCTL_OUT(req, &xi, sizeof xi); + } + } + if (!error) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof xig; + xig.xig_gen = ripcbinfo.ipi_gencnt; + xig.xig_sogen = so_gencnt; + xig.xig_count = ripcbinfo.ipi_count; + error = SYSCTL_OUT(req, &xig, sizeof xig); + } + FREE(inp_list, M_TEMP); + lck_rw_done(ripcbinfo.ipi_lock); + return error; } SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist64, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + rip_pcblist64, "S,xinpcb64", "List of active raw IP sockets"); #endif /* !CONFIG_EMBEDDED */ @@ -1306,22 +1366,22 @@ rip_pcblist_n SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_net_inet_raw, OID_AUTO, pcblist_n, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + rip_pcblist_n, "S,xinpcb_n", "List of active raw IP sockets"); struct pr_usrreqs rip_usrreqs = { - .pru_abort = rip_abort, - .pru_attach = rip_attach, - .pru_bind = rip_bind, - .pru_connect = rip_connect, - .pru_control = in_control, - .pru_detach = rip_detach, - .pru_disconnect = rip_disconnect, - .pru_peeraddr = in_getpeeraddr, - .pru_send = rip_send, - .pru_shutdown = rip_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = rip_abort, + .pru_attach = rip_attach, + .pru_bind = rip_bind, + .pru_connect = rip_connect, + .pru_control = in_control, + .pru_detach = rip_detach, + .pru_disconnect = rip_disconnect, + .pru_peeraddr = in_getpeeraddr, + .pru_send = rip_send, + .pru_shutdown = rip_shutdown, + .pru_sockaddr = in_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; /* DSEP Review Done pl-20051213-v02 @3253 */ diff --git a/bsd/netinet/tcp.h b/bsd/netinet/tcp.h index b64798dd2..369a709d7 100644 --- a/bsd/netinet/tcp.h +++ b/bsd/netinet/tcp.h @@ -69,86 +69,86 @@ #include /* __uint32_t */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -typedef __uint32_t tcp_seq; -typedef __uint32_t tcp_cc; /* connection count per rfc1644 */ +typedef __uint32_t tcp_seq; +typedef __uint32_t tcp_cc; /* connection count per rfc1644 */ -#define tcp6_seq tcp_seq /* for KAME src sync over BSD*'s */ -#define tcp6hdr tcphdr /* for KAME src sync over BSD*'s */ +#define tcp6_seq tcp_seq /* for KAME src sync over BSD*'s */ +#define tcp6hdr tcphdr /* for KAME src sync over BSD*'s */ /* * TCP header. * Per RFC 793, September, 1981. */ struct tcphdr { - unsigned short th_sport; /* source port */ - unsigned short th_dport; /* destination port */ - tcp_seq th_seq; /* sequence number */ - tcp_seq th_ack; /* acknowledgement number */ + unsigned short th_sport; /* source port */ + unsigned short th_dport; /* destination port */ + tcp_seq th_seq; /* sequence number */ + tcp_seq th_ack; /* acknowledgement number */ #if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN - unsigned int th_x2:4, /* (unused) */ - th_off:4; /* data offset */ + unsigned int th_x2:4, /* (unused) */ + th_off:4; /* data offset */ #endif #if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN - unsigned int th_off:4, /* data offset */ - th_x2:4; /* (unused) */ + unsigned int th_off:4, /* data offset */ + th_x2:4; /* (unused) */ #endif - unsigned char th_flags; -#define TH_FIN 0x01 -#define TH_SYN 0x02 -#define TH_RST 0x04 -#define TH_PUSH 0x08 -#define TH_ACK 0x10 -#define TH_URG 0x20 -#define TH_ECE 0x40 -#define TH_CWR 0x80 -#define TH_FLAGS (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG|TH_ECE|TH_CWR) -#define TH_ACCEPT (TH_FIN|TH_SYN|TH_RST|TH_ACK) - - unsigned short th_win; /* window */ - unsigned short th_sum; /* checksum */ - unsigned short th_urp; /* urgent pointer */ + unsigned char th_flags; +#define TH_FIN 0x01 +#define TH_SYN 0x02 +#define TH_RST 0x04 +#define TH_PUSH 0x08 +#define TH_ACK 0x10 +#define TH_URG 0x20 +#define TH_ECE 0x40 +#define TH_CWR 0x80 +#define TH_FLAGS (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG|TH_ECE|TH_CWR) +#define TH_ACCEPT (TH_FIN|TH_SYN|TH_RST|TH_ACK) + + unsigned short th_win; /* window */ + unsigned short th_sum; /* checksum */ + unsigned short th_urp; /* urgent pointer */ }; -#define TCPOPT_EOL 0 -#define TCPOPT_NOP 1 -#define TCPOPT_MAXSEG 2 -#define TCPOLEN_MAXSEG 4 -#define TCPOPT_WINDOW 3 -#define TCPOLEN_WINDOW 3 -#define TCPOPT_SACK_PERMITTED 4 /* Experimental */ -#define TCPOLEN_SACK_PERMITTED 2 -#define TCPOPT_SACK 5 /* Experimental */ -#define TCPOLEN_SACK 8 /* len of sack block */ -#define TCPOPT_TIMESTAMP 8 -#define TCPOLEN_TIMESTAMP 10 -#define TCPOLEN_TSTAMP_APPA (TCPOLEN_TIMESTAMP+2) /* appendix A */ -#define TCPOPT_TSTAMP_HDR \ +#define TCPOPT_EOL 0 +#define TCPOPT_NOP 1 +#define TCPOPT_MAXSEG 2 +#define TCPOLEN_MAXSEG 4 +#define TCPOPT_WINDOW 3 +#define TCPOLEN_WINDOW 3 +#define TCPOPT_SACK_PERMITTED 4 /* Experimental */ +#define TCPOLEN_SACK_PERMITTED 2 +#define TCPOPT_SACK 5 /* Experimental */ +#define TCPOLEN_SACK 8 /* len of sack block */ +#define TCPOPT_TIMESTAMP 8 +#define TCPOLEN_TIMESTAMP 10 +#define TCPOLEN_TSTAMP_APPA (TCPOLEN_TIMESTAMP+2) /* appendix A */ +#define TCPOPT_TSTAMP_HDR \ (TCPOPT_NOP<<24|TCPOPT_NOP<<16|TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP) -#define MAX_TCPOPTLEN 40 /* Absolute maximum TCP options len */ +#define MAX_TCPOPTLEN 40 /* Absolute maximum TCP options len */ -#define TCPOPT_CC 11 /* CC options: RFC-1644 */ -#define TCPOPT_CCNEW 12 -#define TCPOPT_CCECHO 13 -#define TCPOLEN_CC 6 -#define TCPOLEN_CC_APPA (TCPOLEN_CC+2) -#define TCPOPT_CC_HDR(ccopt) \ +#define TCPOPT_CC 11 /* CC options: RFC-1644 */ +#define TCPOPT_CCNEW 12 +#define TCPOPT_CCECHO 13 +#define TCPOLEN_CC 6 +#define TCPOLEN_CC_APPA (TCPOLEN_CC+2) +#define TCPOPT_CC_HDR(ccopt) \ (TCPOPT_NOP<<24|TCPOPT_NOP<<16|(ccopt)<<8|TCPOLEN_CC) -#define TCPOPT_SIGNATURE 19 /* Keyed MD5: RFC 2385 */ -#define TCPOLEN_SIGNATURE 18 +#define TCPOPT_SIGNATURE 19 /* Keyed MD5: RFC 2385 */ +#define TCPOLEN_SIGNATURE 18 #if MPTCP -#define TCPOPT_MULTIPATH 30 +#define TCPOPT_MULTIPATH 30 #endif -#define TCPOPT_FASTOPEN 34 -#define TCPOLEN_FASTOPEN_REQ 2 +#define TCPOPT_FASTOPEN 34 +#define TCPOLEN_FASTOPEN_REQ 2 /* Option definitions */ -#define TCPOPT_SACK_PERMIT_HDR \ +#define TCPOPT_SACK_PERMIT_HDR \ (TCPOPT_NOP<<24|TCPOPT_NOP<<16|TCPOPT_SACK_PERMITTED<<8|TCPOLEN_SACK_PERMITTED) -#define TCPOPT_SACK_HDR (TCPOPT_NOP<<24|TCPOPT_NOP<<16|TCPOPT_SACK<<8) +#define TCPOPT_SACK_HDR (TCPOPT_NOP<<24|TCPOPT_NOP<<16|TCPOPT_SACK<<8) /* Miscellaneous constants */ -#define MAX_SACK_BLKS 6 /* Max # SACK blocks stored at sender side */ +#define MAX_SACK_BLKS 6 /* Max # SACK blocks stored at sender side */ /* * A SACK option that specifies n blocks will have a length of (8*n + 2) @@ -156,7 +156,7 @@ struct tcphdr { * maximum of 4 blocks. */ -#define TCP_MAX_SACK 4 /* MAX # SACKs sent in any segment */ +#define TCP_MAX_SACK 4 /* MAX # SACKs sent in any segment */ /* @@ -165,7 +165,7 @@ struct tcphdr { * but 512 is probably more convenient. * This should be defined as MIN(512, IP_MSS - sizeof (struct tcpiphdr)). */ -#define TCP_MSS 512 +#define TCP_MSS 512 /* * TCP_MINMSS is defined to be 216 which is fine for the smallest @@ -176,7 +176,7 @@ struct tcphdr { * See tcp_subr.c tcp_minmss SYSCTL declaration for more comments. * Setting this to "0" disables the minmss check. */ -#define TCP_MINMSS 216 +#define TCP_MINMSS 216 /* * Default maximum segment size for TCP6. @@ -184,76 +184,76 @@ struct tcphdr { * but 1024 is probably more convenient. (xxx kazu in doubt) * This should be defined as MIN(1024, IP6_MSS - sizeof (struct tcpip6hdr)) */ -#define TCP6_MSS 1024 +#define TCP6_MSS 1024 -#define TCP_MAXWIN 65535 /* largest value for (unscaled) window */ -#define TTCP_CLIENT_SND_WND 4096 /* dflt send window for T/TCP client */ +#define TCP_MAXWIN 65535 /* largest value for (unscaled) window */ +#define TTCP_CLIENT_SND_WND 4096 /* dflt send window for T/TCP client */ -#define TCP_MAX_WINSHIFT 14 /* maximum window shift */ +#define TCP_MAX_WINSHIFT 14 /* maximum window shift */ -#define TCP_MAXHLEN (0xf<<2) /* max length of header in bytes */ -#define TCP_MAXOLEN (TCP_MAXHLEN - sizeof(struct tcphdr)) - /* max space left for options */ +#define TCP_MAXHLEN (0xf<<2) /* max length of header in bytes */ +#define TCP_MAXOLEN (TCP_MAXHLEN - sizeof(struct tcphdr)) +/* max space left for options */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* * User-settable options (used with setsockopt). */ -#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ +#define TCP_NODELAY 0x01 /* don't delay send to coalesce packets */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define TCP_MAXSEG 0x02 /* set maximum segment size */ +#define TCP_MAXSEG 0x02 /* set maximum segment size */ #define TCP_NOPUSH 0x04 /* don't push last block of write */ #define TCP_NOOPT 0x08 /* don't use TCP options */ #define TCP_KEEPALIVE 0x10 /* idle time used when SO_KEEPALIVE is enabled */ #define TCP_CONNECTIONTIMEOUT 0x20 /* connection timeout */ -#define PERSIST_TIMEOUT 0x40 /* time after which a connection in - * persist timeout will terminate. - * see draft-ananth-tcpm-persist-02.txt - */ -#define TCP_RXT_CONNDROPTIME 0x80 /* time after which tcp retransmissions will be - * stopped and the connection will be dropped - */ -#define TCP_RXT_FINDROP 0x100 /* when this option is set, drop a connection - * after retransmitting the FIN 3 times. It will - * prevent holding too many mbufs in socket - * buffer queues. - */ -#define TCP_KEEPINTVL 0x101 /* interval between keepalives */ -#define TCP_KEEPCNT 0x102 /* number of keepalives before close */ -#define TCP_SENDMOREACKS 0x103 /* always ack every other packet */ -#define TCP_ENABLE_ECN 0x104 /* Enable ECN on a connection */ -#define TCP_FASTOPEN 0x105 /* Enable/Disable TCP Fastopen on this socket */ -#define TCP_CONNECTION_INFO 0x106 /* State of TCP connection */ +#define PERSIST_TIMEOUT 0x40 /* time after which a connection in + * persist timeout will terminate. + * see draft-ananth-tcpm-persist-02.txt + */ +#define TCP_RXT_CONNDROPTIME 0x80 /* time after which tcp retransmissions will be + * stopped and the connection will be dropped + */ +#define TCP_RXT_FINDROP 0x100 /* when this option is set, drop a connection + * after retransmitting the FIN 3 times. It will + * prevent holding too many mbufs in socket + * buffer queues. + */ +#define TCP_KEEPINTVL 0x101 /* interval between keepalives */ +#define TCP_KEEPCNT 0x102 /* number of keepalives before close */ +#define TCP_SENDMOREACKS 0x103 /* always ack every other packet */ +#define TCP_ENABLE_ECN 0x104 /* Enable ECN on a connection */ +#define TCP_FASTOPEN 0x105 /* Enable/Disable TCP Fastopen on this socket */ +#define TCP_CONNECTION_INFO 0x106 /* State of TCP connection */ #ifdef PRIVATE -#define TCP_INFO 0x200 /* retrieve tcp_info structure */ -#define TCP_MEASURE_SND_BW 0x202 /* Measure sender's bandwidth for this connection */ +#define TCP_INFO 0x200 /* retrieve tcp_info structure */ +#define TCP_MEASURE_SND_BW 0x202 /* Measure sender's bandwidth for this connection */ #endif /* PRIVATE */ -#define TCP_NOTSENT_LOWAT 0x201 /* Low water mark for TCP unsent data */ +#define TCP_NOTSENT_LOWAT 0x201 /* Low water mark for TCP unsent data */ #ifdef PRIVATE -#define TCP_MEASURE_BW_BURST 0x203 /* Burst size to use for bandwidth measurement */ -#define TCP_PEER_PID 0x204 /* Lookup pid of the process we're connected to */ -#define TCP_ADAPTIVE_READ_TIMEOUT 0x205 /* Read timeout used as a multiple of RTT */ +#define TCP_MEASURE_BW_BURST 0x203 /* Burst size to use for bandwidth measurement */ +#define TCP_PEER_PID 0x204 /* Lookup pid of the process we're connected to */ +#define TCP_ADAPTIVE_READ_TIMEOUT 0x205 /* Read timeout used as a multiple of RTT */ /* * Enable message delivery on a socket, this feature is currently unsupported and * is subjected to change in future. */ -#define TCP_ENABLE_MSGS 0x206 -#define TCP_ADAPTIVE_WRITE_TIMEOUT 0x207 /* Write timeout used as a multiple of RTT */ -#define TCP_NOTIMEWAIT 0x208 /* Avoid going into time-wait */ -#define TCP_DISABLE_BLACKHOLE_DETECTION 0x209 /* disable PMTU blackhole detection */ -#define TCP_ECN_MODE 0x210 /* fine grain control for A/B testing */ -#define TCP_KEEPALIVE_OFFLOAD 0x211 /* offload keep alive processing to firmware */ +#define TCP_ENABLE_MSGS 0x206 +#define TCP_ADAPTIVE_WRITE_TIMEOUT 0x207 /* Write timeout used as a multiple of RTT */ +#define TCP_NOTIMEWAIT 0x208 /* Avoid going into time-wait */ +#define TCP_DISABLE_BLACKHOLE_DETECTION 0x209 /* disable PMTU blackhole detection */ +#define TCP_ECN_MODE 0x210 /* fine grain control for A/B testing */ +#define TCP_KEEPALIVE_OFFLOAD 0x211 /* offload keep alive processing to firmware */ /* * TCP_ECN_MODE values */ -#define ECN_MODE_DEFAULT 0x0 /* per interface or system wide default */ -#define ECN_MODE_ENABLE 0x1 /* force enable ECN on connection */ -#define ECN_MODE_DISABLE 0x2 /* force disable ECN on connection */ +#define ECN_MODE_DEFAULT 0x0 /* per interface or system wide default */ +#define ECN_MODE_ENABLE 0x1 /* force enable ECN on connection */ +#define ECN_MODE_DISABLE 0x2 /* force disable ECN on connection */ /* * TCP_NOTIFY_ACKNOWLEDGEMENT @@ -271,53 +271,53 @@ struct tcphdr { * returned if they have been successfully acknowledged in each call. */ -#define TCP_MAX_NOTIFY_ACK 10 +#define TCP_MAX_NOTIFY_ACK 10 -typedef u_int32_t tcp_notify_ack_id_t; +typedef u_int32_t tcp_notify_ack_id_t; struct tcp_notify_ack_complete { - u_int32_t notify_pending; /* still pending */ - u_int32_t notify_complete_count; + u_int32_t notify_pending; /* still pending */ + u_int32_t notify_complete_count; tcp_notify_ack_id_t notify_complete_id[TCP_MAX_NOTIFY_ACK]; }; -#define TCP_NOTIFY_ACKNOWLEDGEMENT 0x212 /* Notify when data is acknowledged */ -#define MPTCP_SERVICE_TYPE 0x213 /* MPTCP Service type */ -#define TCP_FASTOPEN_FORCE_HEURISTICS 0x214 /* Make sure TFO-heuristics never get disabled */ +#define TCP_NOTIFY_ACKNOWLEDGEMENT 0x212 /* Notify when data is acknowledged */ +#define MPTCP_SERVICE_TYPE 0x213 /* MPTCP Service type */ +#define TCP_FASTOPEN_FORCE_HEURISTICS 0x214 /* Make sure TFO-heuristics never get disabled */ -#define MPTCP_SVCTYPE_HANDOVER 0 /* Default 0 */ -#define MPTCP_SVCTYPE_INTERACTIVE 1 -#define MPTCP_SVCTYPE_AGGREGATE 2 -#define MPTCP_SVCTYPE_MAX 3 +#define MPTCP_SVCTYPE_HANDOVER 0 /* Default 0 */ +#define MPTCP_SVCTYPE_INTERACTIVE 1 +#define MPTCP_SVCTYPE_AGGREGATE 2 +#define MPTCP_SVCTYPE_MAX 3 /* * Specify minimum time in seconds before which an established * TCP connection will not be dropped when there is no response from the * peer */ -#define TCP_RXT_MINIMUM_TIMEOUT 0x215 +#define TCP_RXT_MINIMUM_TIMEOUT 0x215 -#define TCP_RXT_MINIMUM_TIMEOUT_LIMIT (5 * 60) /* Limit is 5 minutes */ +#define TCP_RXT_MINIMUM_TIMEOUT_LIMIT (5 * 60) /* Limit is 5 minutes */ -#define MPTCP_ALTERNATE_PORT 0x216 +#define MPTCP_ALTERNATE_PORT 0x216 /* * The TCP_INFO socket option is a private API and is subject to change */ #pragma pack(4) -#define TCPI_OPT_TIMESTAMPS 0x01 -#define TCPI_OPT_SACK 0x02 -#define TCPI_OPT_WSCALE 0x04 -#define TCPI_OPT_ECN 0x08 +#define TCPI_OPT_TIMESTAMPS 0x01 +#define TCPI_OPT_SACK 0x02 +#define TCPI_OPT_WSCALE 0x04 +#define TCPI_OPT_ECN 0x08 -#define TCPI_FLAG_LOSSRECOVERY 0x01 /* Currently in loss recovery */ -#define TCPI_FLAG_STREAMING_ON 0x02 /* Streaming detection on */ +#define TCPI_FLAG_LOSSRECOVERY 0x01 /* Currently in loss recovery */ +#define TCPI_FLAG_STREAMING_ON 0x02 /* Streaming detection on */ struct tcp_conn_status { - unsigned int probe_activated : 1; - unsigned int write_probe_failed : 1; - unsigned int read_probe_failed : 1; - unsigned int conn_probe_failed : 1; + unsigned int probe_activated : 1; + unsigned int write_probe_failed : 1; + unsigned int read_probe_failed : 1; + unsigned int conn_probe_failed : 1; }; /* @@ -325,114 +325,114 @@ struct tcp_conn_status { * binary compatibility. */ struct tcp_info { - u_int8_t tcpi_state; /* TCP FSM state. */ - u_int8_t tcpi_options; /* Options enabled on conn. */ - u_int8_t tcpi_snd_wscale; /* RFC1323 send shift value. */ - u_int8_t tcpi_rcv_wscale; /* RFC1323 recv shift value. */ - - u_int32_t tcpi_flags; /* extra flags (TCPI_FLAG_xxx) */ - - u_int32_t tcpi_rto; /* Retransmission timeout in milliseconds */ - u_int32_t tcpi_snd_mss; /* Max segment size for send. */ - u_int32_t tcpi_rcv_mss; /* Max segment size for receive. */ - - u_int32_t tcpi_rttcur; /* Most recent value of RTT */ - u_int32_t tcpi_srtt; /* Smoothed RTT */ - u_int32_t tcpi_rttvar; /* RTT variance */ - u_int32_t tcpi_rttbest; /* Best RTT we've seen */ - - u_int32_t tcpi_snd_ssthresh; /* Slow start threshold. */ - u_int32_t tcpi_snd_cwnd; /* Send congestion window. */ - - u_int32_t tcpi_rcv_space; /* Advertised recv window. */ - - u_int32_t tcpi_snd_wnd; /* Advertised send window. */ - u_int32_t tcpi_snd_nxt; /* Next egress seqno */ - u_int32_t tcpi_rcv_nxt; /* Next ingress seqno */ - - int32_t tcpi_last_outif; /* if_index of interface used to send last */ - u_int32_t tcpi_snd_sbbytes; /* bytes in snd buffer including data inflight */ - - u_int64_t tcpi_txpackets __attribute__((aligned(8))); /* total packets sent */ - u_int64_t tcpi_txbytes __attribute__((aligned(8))); - /* total bytes sent */ - u_int64_t tcpi_txretransmitbytes __attribute__((aligned(8))); - /* total bytes retransmitted */ - u_int64_t tcpi_txunacked __attribute__((aligned(8))); - /* current number of bytes not acknowledged */ - u_int64_t tcpi_rxpackets __attribute__((aligned(8))); /* total packets received */ - u_int64_t tcpi_rxbytes __attribute__((aligned(8))); - /* total bytes received */ - u_int64_t tcpi_rxduplicatebytes __attribute__((aligned(8))); - /* total duplicate bytes received */ - u_int64_t tcpi_rxoutoforderbytes __attribute__((aligned(8))); - /* total out of order bytes received */ - u_int64_t tcpi_snd_bw __attribute__((aligned(8))); /* measured send bandwidth in bits/sec */ - u_int8_t tcpi_synrexmits; /* Number of syn retransmits before connect */ - u_int8_t tcpi_unused1; - u_int16_t tcpi_unused2; - u_int64_t tcpi_cell_rxpackets __attribute((aligned(8))); /* packets received over cellular */ - u_int64_t tcpi_cell_rxbytes __attribute((aligned(8))); /* bytes received over cellular */ - u_int64_t tcpi_cell_txpackets __attribute((aligned(8))); /* packets transmitted over cellular */ - u_int64_t tcpi_cell_txbytes __attribute((aligned(8))); /* bytes transmitted over cellular */ - u_int64_t tcpi_wifi_rxpackets __attribute((aligned(8))); /* packets received over Wi-Fi */ - u_int64_t tcpi_wifi_rxbytes __attribute((aligned(8))); /* bytes received over Wi-Fi */ - u_int64_t tcpi_wifi_txpackets __attribute((aligned(8))); /* packets transmitted over Wi-Fi */ - u_int64_t tcpi_wifi_txbytes __attribute((aligned(8))); /* bytes transmitted over Wi-Fi */ - u_int64_t tcpi_wired_rxpackets __attribute((aligned(8))); /* packets received over Wired */ - u_int64_t tcpi_wired_rxbytes __attribute((aligned(8))); /* bytes received over Wired */ - u_int64_t tcpi_wired_txpackets __attribute((aligned(8))); /* packets transmitted over Wired */ - u_int64_t tcpi_wired_txbytes __attribute((aligned(8))); /* bytes transmitted over Wired */ - struct tcp_conn_status tcpi_connstatus; /* status of connection probes */ + u_int8_t tcpi_state; /* TCP FSM state. */ + u_int8_t tcpi_options; /* Options enabled on conn. */ + u_int8_t tcpi_snd_wscale; /* RFC1323 send shift value. */ + u_int8_t tcpi_rcv_wscale; /* RFC1323 recv shift value. */ + + u_int32_t tcpi_flags; /* extra flags (TCPI_FLAG_xxx) */ + + u_int32_t tcpi_rto; /* Retransmission timeout in milliseconds */ + u_int32_t tcpi_snd_mss; /* Max segment size for send. */ + u_int32_t tcpi_rcv_mss; /* Max segment size for receive. */ + + u_int32_t tcpi_rttcur; /* Most recent value of RTT */ + u_int32_t tcpi_srtt; /* Smoothed RTT */ + u_int32_t tcpi_rttvar; /* RTT variance */ + u_int32_t tcpi_rttbest; /* Best RTT we've seen */ + + u_int32_t tcpi_snd_ssthresh; /* Slow start threshold. */ + u_int32_t tcpi_snd_cwnd; /* Send congestion window. */ + + u_int32_t tcpi_rcv_space; /* Advertised recv window. */ + + u_int32_t tcpi_snd_wnd; /* Advertised send window. */ + u_int32_t tcpi_snd_nxt; /* Next egress seqno */ + u_int32_t tcpi_rcv_nxt; /* Next ingress seqno */ + + int32_t tcpi_last_outif; /* if_index of interface used to send last */ + u_int32_t tcpi_snd_sbbytes; /* bytes in snd buffer including data inflight */ + + u_int64_t tcpi_txpackets __attribute__((aligned(8))); /* total packets sent */ + u_int64_t tcpi_txbytes __attribute__((aligned(8))); + /* total bytes sent */ + u_int64_t tcpi_txretransmitbytes __attribute__((aligned(8))); + /* total bytes retransmitted */ + u_int64_t tcpi_txunacked __attribute__((aligned(8))); + /* current number of bytes not acknowledged */ + u_int64_t tcpi_rxpackets __attribute__((aligned(8))); /* total packets received */ + u_int64_t tcpi_rxbytes __attribute__((aligned(8))); + /* total bytes received */ + u_int64_t tcpi_rxduplicatebytes __attribute__((aligned(8))); + /* total duplicate bytes received */ + u_int64_t tcpi_rxoutoforderbytes __attribute__((aligned(8))); + /* total out of order bytes received */ + u_int64_t tcpi_snd_bw __attribute__((aligned(8))); /* measured send bandwidth in bits/sec */ + u_int8_t tcpi_synrexmits; /* Number of syn retransmits before connect */ + u_int8_t tcpi_unused1; + u_int16_t tcpi_unused2; + u_int64_t tcpi_cell_rxpackets __attribute((aligned(8))); /* packets received over cellular */ + u_int64_t tcpi_cell_rxbytes __attribute((aligned(8))); /* bytes received over cellular */ + u_int64_t tcpi_cell_txpackets __attribute((aligned(8))); /* packets transmitted over cellular */ + u_int64_t tcpi_cell_txbytes __attribute((aligned(8))); /* bytes transmitted over cellular */ + u_int64_t tcpi_wifi_rxpackets __attribute((aligned(8))); /* packets received over Wi-Fi */ + u_int64_t tcpi_wifi_rxbytes __attribute((aligned(8))); /* bytes received over Wi-Fi */ + u_int64_t tcpi_wifi_txpackets __attribute((aligned(8))); /* packets transmitted over Wi-Fi */ + u_int64_t tcpi_wifi_txbytes __attribute((aligned(8))); /* bytes transmitted over Wi-Fi */ + u_int64_t tcpi_wired_rxpackets __attribute((aligned(8))); /* packets received over Wired */ + u_int64_t tcpi_wired_rxbytes __attribute((aligned(8))); /* bytes received over Wired */ + u_int64_t tcpi_wired_txpackets __attribute((aligned(8))); /* packets transmitted over Wired */ + u_int64_t tcpi_wired_txbytes __attribute((aligned(8))); /* bytes transmitted over Wired */ + struct tcp_conn_status tcpi_connstatus; /* status of connection probes */ u_int16_t - tcpi_tfo_cookie_req:1, /* Cookie requested? */ - tcpi_tfo_cookie_rcv:1, /* Cookie received? */ - tcpi_tfo_syn_loss:1, /* Fallback to reg. TCP after SYN-loss */ - tcpi_tfo_syn_data_sent:1, /* SYN+data has been sent out */ - tcpi_tfo_syn_data_acked:1, /* SYN+data has been fully acknowledged */ - tcpi_tfo_syn_data_rcv:1, /* Server received SYN+data with a valid cookie */ - tcpi_tfo_cookie_req_rcv:1, /* Server received cookie-request */ - tcpi_tfo_cookie_sent:1, /* Server announced cookie */ - tcpi_tfo_cookie_invalid:1, /* Server received an invalid cookie */ - tcpi_tfo_cookie_wrong:1, /* Our sent cookie was wrong */ - tcpi_tfo_no_cookie_rcv:1, /* We did not receive a cookie upon our request */ - tcpi_tfo_heuristics_disable:1, /* TFO-heuristics disabled it */ - tcpi_tfo_send_blackhole:1, /* A sending-blackhole got detected */ - tcpi_tfo_recv_blackhole:1, /* A receiver-blackhole got detected */ - tcpi_tfo_onebyte_proxy:1; /* A proxy acknowledges all but one byte of the SYN */ - - u_int16_t tcpi_ecn_client_setup:1, /* Attempted ECN setup from client side */ - tcpi_ecn_server_setup:1, /* Attempted ECN setup from server side */ - tcpi_ecn_success:1, /* peer negotiated ECN */ - tcpi_ecn_lost_syn:1, /* Lost SYN with ECN setup */ - tcpi_ecn_lost_synack:1, /* Lost SYN-ACK with ECN setup */ - tcpi_local_peer:1, /* Local to the host or the subnet */ - tcpi_if_cell:1, /* Interface is cellular */ - tcpi_if_wifi:1, /* Interface is WiFi */ - tcpi_if_wired:1, /* Interface is wired - ethernet , thunderbolt etc,. */ - tcpi_if_wifi_infra:1, /* Interface is wifi infrastructure */ - tcpi_if_wifi_awdl:1, /* Interface is wifi AWDL */ - tcpi_snd_background:1, /* Using delay based algorithm on sender side */ - tcpi_rcv_background:1; /* Using delay based algorithm on receive side */ - - u_int32_t tcpi_ecn_recv_ce; /* Packets received with CE */ - u_int32_t tcpi_ecn_recv_cwr; /* Packets received with CWR */ - - u_int32_t tcpi_rcvoopack; /* out-of-order packets received */ - u_int32_t tcpi_pawsdrop; /* segments dropped due to PAWS */ - u_int32_t tcpi_sack_recovery_episode; /* SACK recovery episodes */ - u_int32_t tcpi_reordered_pkts; /* packets reorderd */ - u_int32_t tcpi_dsack_sent; /* Sent DSACK notification */ - u_int32_t tcpi_dsack_recvd; /* Received a valid DSACK option */ - u_int32_t tcpi_flowhash; /* Unique id for the connection */ - - u_int64_t tcpi_txretransmitpackets __attribute__((aligned(8))); + tcpi_tfo_cookie_req:1, /* Cookie requested? */ + tcpi_tfo_cookie_rcv:1, /* Cookie received? */ + tcpi_tfo_syn_loss:1, /* Fallback to reg. TCP after SYN-loss */ + tcpi_tfo_syn_data_sent:1, /* SYN+data has been sent out */ + tcpi_tfo_syn_data_acked:1, /* SYN+data has been fully acknowledged */ + tcpi_tfo_syn_data_rcv:1, /* Server received SYN+data with a valid cookie */ + tcpi_tfo_cookie_req_rcv:1, /* Server received cookie-request */ + tcpi_tfo_cookie_sent:1, /* Server announced cookie */ + tcpi_tfo_cookie_invalid:1, /* Server received an invalid cookie */ + tcpi_tfo_cookie_wrong:1, /* Our sent cookie was wrong */ + tcpi_tfo_no_cookie_rcv:1, /* We did not receive a cookie upon our request */ + tcpi_tfo_heuristics_disable:1, /* TFO-heuristics disabled it */ + tcpi_tfo_send_blackhole:1, /* A sending-blackhole got detected */ + tcpi_tfo_recv_blackhole:1, /* A receiver-blackhole got detected */ + tcpi_tfo_onebyte_proxy:1; /* A proxy acknowledges all but one byte of the SYN */ + + u_int16_t tcpi_ecn_client_setup:1, /* Attempted ECN setup from client side */ + tcpi_ecn_server_setup:1, /* Attempted ECN setup from server side */ + tcpi_ecn_success:1, /* peer negotiated ECN */ + tcpi_ecn_lost_syn:1, /* Lost SYN with ECN setup */ + tcpi_ecn_lost_synack:1, /* Lost SYN-ACK with ECN setup */ + tcpi_local_peer:1, /* Local to the host or the subnet */ + tcpi_if_cell:1, /* Interface is cellular */ + tcpi_if_wifi:1, /* Interface is WiFi */ + tcpi_if_wired:1, /* Interface is wired - ethernet , thunderbolt etc,. */ + tcpi_if_wifi_infra:1, /* Interface is wifi infrastructure */ + tcpi_if_wifi_awdl:1, /* Interface is wifi AWDL */ + tcpi_snd_background:1, /* Using delay based algorithm on sender side */ + tcpi_rcv_background:1; /* Using delay based algorithm on receive side */ + + u_int32_t tcpi_ecn_recv_ce; /* Packets received with CE */ + u_int32_t tcpi_ecn_recv_cwr; /* Packets received with CWR */ + + u_int32_t tcpi_rcvoopack; /* out-of-order packets received */ + u_int32_t tcpi_pawsdrop; /* segments dropped due to PAWS */ + u_int32_t tcpi_sack_recovery_episode; /* SACK recovery episodes */ + u_int32_t tcpi_reordered_pkts; /* packets reorderd */ + u_int32_t tcpi_dsack_sent; /* Sent DSACK notification */ + u_int32_t tcpi_dsack_recvd; /* Received a valid DSACK option */ + u_int32_t tcpi_flowhash; /* Unique id for the connection */ + + u_int64_t tcpi_txretransmitpackets __attribute__((aligned(8))); }; struct tcp_measure_bw_burst { - u_int32_t min_burst_size; /* Minimum number of packets to use */ - u_int32_t max_burst_size; /* Maximum number of packets to use */ + u_int32_t min_burst_size; /* Minimum number of packets to use */ + u_int32_t max_burst_size; /* Maximum number of packets to use */ }; /* @@ -440,25 +440,25 @@ struct tcp_measure_bw_burst { */ struct info_tuple { - u_int8_t itpl_proto; + u_int8_t itpl_proto; union { - struct sockaddr _itpl_sa; - struct sockaddr_in _itpl_sin; - struct sockaddr_in6 _itpl_sin6; + struct sockaddr _itpl_sa; + struct sockaddr_in _itpl_sin; + struct sockaddr_in6 _itpl_sin6; } itpl_localaddr; union { - struct sockaddr _itpl_sa; - struct sockaddr_in _itpl_sin; - struct sockaddr_in6 _itpl_sin6; + struct sockaddr _itpl_sa; + struct sockaddr_in _itpl_sin; + struct sockaddr_in6 _itpl_sin6; } itpl_remoteaddr; }; -#define itpl_local_sa itpl_localaddr._itpl_sa -#define itpl_local_sin itpl_localaddr._itpl_sin -#define itpl_local_sin6 itpl_localaddr._itpl_sin6 -#define itpl_remote_sa itpl_remoteaddr._itpl_sa -#define itpl_remote_sin itpl_remoteaddr._itpl_sin -#define itpl_remote_sin6 itpl_remoteaddr._itpl_sin6 +#define itpl_local_sa itpl_localaddr._itpl_sa +#define itpl_local_sin itpl_localaddr._itpl_sin +#define itpl_local_sin6 itpl_localaddr._itpl_sin6 +#define itpl_remote_sa itpl_remoteaddr._itpl_sa +#define itpl_remote_sin itpl_remoteaddr._itpl_sin +#define itpl_remote_sin6 itpl_remoteaddr._itpl_sin6 /* * TCP connection info auxiliary data (CIAUX_TCP) @@ -467,86 +467,86 @@ struct info_tuple { * structure towards the end. This will preserve binary compatibility. */ typedef struct conninfo_tcp { - pid_t tcpci_peer_pid; /* loopback peer PID if > 0 */ - struct tcp_info tcpci_tcp_info; /* TCP info */ + pid_t tcpci_peer_pid; /* loopback peer PID if > 0 */ + struct tcp_info tcpci_tcp_info; /* TCP info */ } conninfo_tcp_t; #pragma pack() struct mptcp_itf_stats { - uint16_t ifindex; - uint16_t switches; - uint32_t is_expensive:1; - uint64_t mpis_txbytes __attribute__((aligned(8))); - uint64_t mpis_rxbytes __attribute__((aligned(8))); + uint16_t ifindex; + uint16_t switches; + uint32_t is_expensive:1; + uint64_t mpis_txbytes __attribute__((aligned(8))); + uint64_t mpis_rxbytes __attribute__((aligned(8))); }; /* Version solely used to let libnetcore survive */ -#define CONNINFO_MPTCP_VERSION 3 +#define CONNINFO_MPTCP_VERSION 3 typedef struct conninfo_multipathtcp { - uint32_t mptcpci_subflow_count; - uint32_t mptcpci_switch_count; - sae_connid_t mptcpci_subflow_connids[4]; + uint32_t mptcpci_subflow_count; + uint32_t mptcpci_switch_count; + sae_connid_t mptcpci_subflow_connids[4]; - uint64_t mptcpci_init_rxbytes; - uint64_t mptcpci_init_txbytes; + uint64_t mptcpci_init_rxbytes; + uint64_t mptcpci_init_txbytes; -#define MPTCP_ITFSTATS_SIZE 4 +#define MPTCP_ITFSTATS_SIZE 4 struct mptcp_itf_stats mptcpci_itfstats[MPTCP_ITFSTATS_SIZE]; - uint32_t mptcpci_flags; -#define MPTCPCI_FIRSTPARTY 0x01 + uint32_t mptcpci_flags; +#define MPTCPCI_FIRSTPARTY 0x01 } conninfo_multipathtcp_t; #endif /* PRIVATE */ struct tcp_connection_info { - u_int8_t tcpi_state; /* connection state */ - u_int8_t tcpi_snd_wscale; /* Window scale for send window */ - u_int8_t tcpi_rcv_wscale; /* Window scale for receive window */ - u_int8_t __pad1; - u_int32_t tcpi_options; /* TCP options supported */ -#define TCPCI_OPT_TIMESTAMPS 0x00000001 /* Timestamps enabled */ -#define TCPCI_OPT_SACK 0x00000002 /* SACK enabled */ -#define TCPCI_OPT_WSCALE 0x00000004 /* Window scaling enabled */ -#define TCPCI_OPT_ECN 0x00000008 /* ECN enabled */ - u_int32_t tcpi_flags; /* flags */ + u_int8_t tcpi_state; /* connection state */ + u_int8_t tcpi_snd_wscale; /* Window scale for send window */ + u_int8_t tcpi_rcv_wscale; /* Window scale for receive window */ + u_int8_t __pad1; + u_int32_t tcpi_options; /* TCP options supported */ +#define TCPCI_OPT_TIMESTAMPS 0x00000001 /* Timestamps enabled */ +#define TCPCI_OPT_SACK 0x00000002 /* SACK enabled */ +#define TCPCI_OPT_WSCALE 0x00000004 /* Window scaling enabled */ +#define TCPCI_OPT_ECN 0x00000008 /* ECN enabled */ + u_int32_t tcpi_flags; /* flags */ #define TCPCI_FLAG_LOSSRECOVERY 0x00000001 #define TCPCI_FLAG_REORDERING_DETECTED 0x00000002 - u_int32_t tcpi_rto; /* retransmit timeout in ms */ - u_int32_t tcpi_maxseg; /* maximum segment size supported */ - u_int32_t tcpi_snd_ssthresh; /* slow start threshold in bytes */ - u_int32_t tcpi_snd_cwnd; /* send congestion window in bytes */ - u_int32_t tcpi_snd_wnd; /* send widnow in bytes */ - u_int32_t tcpi_snd_sbbytes; /* bytes in send socket buffer, including in-flight data */ - u_int32_t tcpi_rcv_wnd; /* receive window in bytes*/ - u_int32_t tcpi_rttcur; /* most recent RTT in ms */ - u_int32_t tcpi_srtt; /* average RTT in ms */ - u_int32_t tcpi_rttvar; /* RTT variance */ + u_int32_t tcpi_rto; /* retransmit timeout in ms */ + u_int32_t tcpi_maxseg; /* maximum segment size supported */ + u_int32_t tcpi_snd_ssthresh; /* slow start threshold in bytes */ + u_int32_t tcpi_snd_cwnd; /* send congestion window in bytes */ + u_int32_t tcpi_snd_wnd; /* send widnow in bytes */ + u_int32_t tcpi_snd_sbbytes; /* bytes in send socket buffer, including in-flight data */ + u_int32_t tcpi_rcv_wnd; /* receive window in bytes*/ + u_int32_t tcpi_rttcur; /* most recent RTT in ms */ + u_int32_t tcpi_srtt; /* average RTT in ms */ + u_int32_t tcpi_rttvar; /* RTT variance */ u_int32_t - tcpi_tfo_cookie_req:1, /* Cookie requested? */ - tcpi_tfo_cookie_rcv:1, /* Cookie received? */ - tcpi_tfo_syn_loss:1, /* Fallback to reg. TCP after SYN-loss */ - tcpi_tfo_syn_data_sent:1, /* SYN+data has been sent out */ - tcpi_tfo_syn_data_acked:1, /* SYN+data has been fully acknowledged */ - tcpi_tfo_syn_data_rcv:1, /* Server received SYN+data with a valid cookie */ - tcpi_tfo_cookie_req_rcv:1, /* Server received cookie-request */ - tcpi_tfo_cookie_sent:1, /* Server announced cookie */ - tcpi_tfo_cookie_invalid:1, /* Server received an invalid cookie */ - tcpi_tfo_cookie_wrong:1, /* Our sent cookie was wrong */ - tcpi_tfo_no_cookie_rcv:1, /* We did not receive a cookie upon our request */ - tcpi_tfo_heuristics_disable:1, /* TFO-heuristics disabled it */ - tcpi_tfo_send_blackhole:1, /* A sending-blackhole got detected */ - tcpi_tfo_recv_blackhole:1, /* A receiver-blackhole got detected */ - tcpi_tfo_onebyte_proxy:1, /* A proxy acknowledges all but one byte of the SYN */ - __pad2:17; - u_int64_t tcpi_txpackets __attribute__((aligned(8))); - u_int64_t tcpi_txbytes __attribute__((aligned(8))); - u_int64_t tcpi_txretransmitbytes __attribute__((aligned(8))); - u_int64_t tcpi_rxpackets __attribute__((aligned(8))); - u_int64_t tcpi_rxbytes __attribute__((aligned(8))); - u_int64_t tcpi_rxoutoforderbytes __attribute__((aligned(8))); - u_int64_t tcpi_txretransmitpackets __attribute__((aligned(8))); + tcpi_tfo_cookie_req:1, /* Cookie requested? */ + tcpi_tfo_cookie_rcv:1, /* Cookie received? */ + tcpi_tfo_syn_loss:1, /* Fallback to reg. TCP after SYN-loss */ + tcpi_tfo_syn_data_sent:1, /* SYN+data has been sent out */ + tcpi_tfo_syn_data_acked:1, /* SYN+data has been fully acknowledged */ + tcpi_tfo_syn_data_rcv:1, /* Server received SYN+data with a valid cookie */ + tcpi_tfo_cookie_req_rcv:1, /* Server received cookie-request */ + tcpi_tfo_cookie_sent:1, /* Server announced cookie */ + tcpi_tfo_cookie_invalid:1, /* Server received an invalid cookie */ + tcpi_tfo_cookie_wrong:1, /* Our sent cookie was wrong */ + tcpi_tfo_no_cookie_rcv:1, /* We did not receive a cookie upon our request */ + tcpi_tfo_heuristics_disable:1, /* TFO-heuristics disabled it */ + tcpi_tfo_send_blackhole:1, /* A sending-blackhole got detected */ + tcpi_tfo_recv_blackhole:1, /* A receiver-blackhole got detected */ + tcpi_tfo_onebyte_proxy:1, /* A proxy acknowledges all but one byte of the SYN */ + __pad2:17; + u_int64_t tcpi_txpackets __attribute__((aligned(8))); + u_int64_t tcpi_txbytes __attribute__((aligned(8))); + u_int64_t tcpi_txretransmitbytes __attribute__((aligned(8))); + u_int64_t tcpi_rxpackets __attribute__((aligned(8))); + u_int64_t tcpi_rxbytes __attribute__((aligned(8))); + u_int64_t tcpi_rxoutoforderbytes __attribute__((aligned(8))); + u_int64_t tcpi_txretransmitpackets __attribute__((aligned(8))); }; #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ diff --git a/bsd/netinet/tcp_cache.c b/bsd/netinet/tcp_cache.c index d746db8c5..deaa3bcb2 100644 --- a/bsd/netinet/tcp_cache.c +++ b/bsd/netinet/tcp_cache.c @@ -50,49 +50,49 @@ struct tcp_heuristic_key { uint8_t thk_net_signature[IFNET_SIGNATURELEN]; in_4_6_addr thk_ip; }; - sa_family_t thk_family; + sa_family_t thk_family; }; struct tcp_heuristic { SLIST_ENTRY(tcp_heuristic) list; - uint32_t th_last_access; + uint32_t th_last_access; - struct tcp_heuristic_key th_key; + struct tcp_heuristic_key th_key; - char th_val_start[0]; /* Marker for memsetting to 0 */ + char th_val_start[0]; /* Marker for memsetting to 0 */ - uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */ - uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */ - uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */ - uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */ - uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */ - uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */ - uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */ - uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */ - uint8_t th_ecn_droprxmt; /* The number of times ECN connection is dropped after multiple retransmits */ - uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */ - uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */ - uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */ - uint32_t th_tfo_backoff; /* Current backoff timer */ - uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */ - uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */ + uint8_t th_tfo_data_loss; /* The number of times a SYN+data has been lost */ + uint8_t th_tfo_req_loss; /* The number of times a SYN+cookie-req has been lost */ + uint8_t th_tfo_data_rst; /* The number of times a SYN+data has received a RST */ + uint8_t th_tfo_req_rst; /* The number of times a SYN+cookie-req has received a RST */ + uint8_t th_mptcp_loss; /* The number of times a SYN+MP_CAPABLE has been lost */ + uint8_t th_ecn_loss; /* The number of times a SYN+ecn has been lost */ + uint8_t th_ecn_aggressive; /* The number of times we did an aggressive fallback */ + uint8_t th_ecn_droprst; /* The number of times ECN connections received a RST after first data pkt */ + uint8_t th_ecn_droprxmt; /* The number of times ECN connection is dropped after multiple retransmits */ + uint8_t th_ecn_synrst; /* number of times RST was received in response to an ECN enabled SYN */ + uint32_t th_tfo_enabled_time; /* The moment when we reenabled TFO after backing off */ + uint32_t th_tfo_backoff_until; /* Time until when we should not try out TFO */ + uint32_t th_tfo_backoff; /* Current backoff timer */ + uint32_t th_mptcp_backoff; /* Time until when we should not try out MPTCP */ + uint32_t th_ecn_backoff; /* Time until when we should not try out ECN */ - uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */ - th_mptcp_in_backoff:1; /* Are we avoiding MPTCP due to the backoff timer? */ + uint8_t th_tfo_in_backoff:1, /* Are we avoiding TFO due to the backoff timer? */ + th_mptcp_in_backoff:1; /* Are we avoiding MPTCP due to the backoff timer? */ - char th_val_end[0]; /* Marker for memsetting to 0 */ + char th_val_end[0]; /* Marker for memsetting to 0 */ }; struct tcp_heuristics_head { SLIST_HEAD(tcp_heur_bucket, tcp_heuristic) tcp_heuristics; /* Per-hashbucket lock to avoid lock-contention */ - lck_mtx_t thh_mtx; + lck_mtx_t thh_mtx; }; struct tcp_cache_key { - sa_family_t tck_family; + sa_family_t tck_family; struct tcp_heuristic_key tck_src; in_4_6_addr tck_dst; @@ -101,19 +101,19 @@ struct tcp_cache_key { struct tcp_cache { SLIST_ENTRY(tcp_cache) list; - u_int32_t tc_last_access; + u_int32_t tc_last_access; struct tcp_cache_key tc_key; - u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; - u_int8_t tc_tfo_cookie_len; + u_int8_t tc_tfo_cookie[TFO_COOKIE_LEN_MAX]; + u_int8_t tc_tfo_cookie_len; }; struct tcp_cache_head { SLIST_HEAD(tcp_cache_bucket, tcp_cache) tcp_caches; /* Per-hashbucket lock to avoid lock-contention */ - lck_mtx_t tch_mtx; + lck_mtx_t tch_mtx; }; struct tcp_cache_key_src { @@ -131,41 +131,43 @@ size_t tcp_cache_size; * The maximum depth of the hash-bucket. This way we limit the tcp_cache to * TCP_CACHE_BUCKET_SIZE * tcp_cache_size and have "natural" garbage collection */ -#define TCP_CACHE_BUCKET_SIZE 5 +#define TCP_CACHE_BUCKET_SIZE 5 static struct tcp_cache_head *tcp_cache; decl_lck_mtx_data(, tcp_cache_mtx); -static lck_attr_t *tcp_cache_mtx_attr; -static lck_grp_t *tcp_cache_mtx_grp; -static lck_grp_attr_t *tcp_cache_mtx_grp_attr; +static lck_attr_t *tcp_cache_mtx_attr; +static lck_grp_t *tcp_cache_mtx_grp; +static lck_grp_attr_t *tcp_cache_mtx_grp_attr; static struct tcp_heuristics_head *tcp_heuristics; decl_lck_mtx_data(, tcp_heuristics_mtx); -static lck_attr_t *tcp_heuristic_mtx_attr; -static lck_grp_t *tcp_heuristic_mtx_grp; -static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr; +static lck_attr_t *tcp_heuristic_mtx_attr; +static lck_grp_t *tcp_heuristic_mtx_grp; +static lck_grp_attr_t *tcp_heuristic_mtx_grp_attr; static uint32_t tcp_backoff_maximum = 65536; SYSCTL_UINT(_net_inet_tcp, OID_AUTO, backoff_maximum, CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO"); + &tcp_backoff_maximum, 0, "Maximum time for which we won't try TFO"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, ecn_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN"); + static int, tcp_ecn_timeout, 60, "Initial minutes to wait before re-trying ECN"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, disable_tcp_heuristics, CTLFLAG_RW | CTLFLAG_LOCKED, static int, disable_tcp_heuristics, 0, "Set to 1, to disable all TCP heuristics (TFO, ECN, MPTCP)"); -static uint32_t tcp_min_to_hz(uint32_t minutes) +static uint32_t +tcp_min_to_hz(uint32_t minutes) { - if (minutes > 65536) - return ((uint32_t)65536 * 60 * TCP_RETRANSHZ); + if (minutes > 65536) { + return (uint32_t)65536 * 60 * TCP_RETRANSHZ; + } - return (minutes * 60 * TCP_RETRANSHZ); + return minutes * 60 * TCP_RETRANSHZ; } /* @@ -173,37 +175,37 @@ static uint32_t tcp_min_to_hz(uint32_t minutes) * integer overflow. Need to find an unexpensive way to prevent integer overflow * while still allowing a dynamic sysctl. */ -#define TCP_CACHE_OVERFLOW_PROTECT 9 +#define TCP_CACHE_OVERFLOW_PROTECT 9 /* Number of SYN-losses we accept */ -#define TFO_MAX_COOKIE_LOSS 2 -#define ECN_MAX_SYN_LOSS 2 -#define MPTCP_MAX_SYN_LOSS 2 -#define ECN_MAX_DROPRST 1 -#define ECN_MAX_DROPRXMT 4 -#define ECN_MAX_SYNRST 4 +#define TFO_MAX_COOKIE_LOSS 2 +#define ECN_MAX_SYN_LOSS 2 +#define MPTCP_MAX_SYN_LOSS 2 +#define ECN_MAX_DROPRST 1 +#define ECN_MAX_DROPRXMT 4 +#define ECN_MAX_SYNRST 4 /* Flags for setting/unsetting loss-heuristics, limited to 4 bytes */ -#define TCPCACHE_F_TFO_REQ 0x01 -#define TCPCACHE_F_TFO_DATA 0x02 -#define TCPCACHE_F_ECN 0x04 -#define TCPCACHE_F_MPTCP 0x08 -#define TCPCACHE_F_ECN_DROPRST 0x10 -#define TCPCACHE_F_ECN_DROPRXMT 0x20 -#define TCPCACHE_F_TFO_REQ_RST 0x40 -#define TCPCACHE_F_TFO_DATA_RST 0x80 -#define TCPCACHE_F_ECN_SYNRST 0x100 +#define TCPCACHE_F_TFO_REQ 0x01 +#define TCPCACHE_F_TFO_DATA 0x02 +#define TCPCACHE_F_ECN 0x04 +#define TCPCACHE_F_MPTCP 0x08 +#define TCPCACHE_F_ECN_DROPRST 0x10 +#define TCPCACHE_F_ECN_DROPRXMT 0x20 +#define TCPCACHE_F_TFO_REQ_RST 0x40 +#define TCPCACHE_F_TFO_DATA_RST 0x80 +#define TCPCACHE_F_ECN_SYNRST 0x100 /* Always retry ECN after backing off to this level for some heuristics */ -#define ECN_RETRY_LIMIT 9 +#define ECN_RETRY_LIMIT 9 #define TCP_CACHE_INC_IFNET_STAT(_ifp_, _af_, _stat_) { \ if ((_ifp_) != NULL) { \ - if ((_af_) == AF_INET6) { \ - (_ifp_)->if_ipv6_stat->_stat_++;\ - } else { \ - (_ifp_)->if_ipv4_stat->_stat_++;\ - }\ + if ((_af_) == AF_INET6) { \ + (_ifp_)->if_ipv6_stat->_stat_++;\ + } else { \ + (_ifp_)->if_ipv4_stat->_stat_++;\ + }\ }\ } @@ -213,7 +215,8 @@ static uint32_t tcp_min_to_hz(uint32_t minutes) * Might be worth moving this to a library so that others * (e.g., scale_to_powerof2()) can use this as well instead of a while-loop. */ -static u_int32_t tcp_cache_roundup2(u_int32_t a) +static u_int32_t +tcp_cache_roundup2(u_int32_t a) { a--; a |= a >> 1; @@ -226,7 +229,8 @@ static u_int32_t tcp_cache_roundup2(u_int32_t a) return a; } -static void tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) +static void +tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) { struct ifnet *ifp = tcks->ifp; uint8_t len = sizeof(key->thk_net_signature); @@ -244,26 +248,29 @@ static void tcp_cache_hash_src(struct tcp_cache_key_src *tcks, struct tcp_heuris * (we made sure that in the other cases it does not). So, * in this case we should take the connection's address. */ - if (ret == ENOENT || ret == EINVAL) + if (ret == ENOENT || ret == EINVAL) { memcpy(&key->thk_ip.addr6, &tcks->laddr.addr6, sizeof(struct in6_addr)); + } } else { int ret; key->thk_family = AF_INET; ret = ifnet_get_netsignature(ifp, AF_INET, &len, &flags, - key->thk_net_signature); + key->thk_net_signature); /* * ifnet_get_netsignature only returns EINVAL if ifn is NULL * (we made sure that in the other cases it does not). So, * in this case we should take the connection's address. */ - if (ret == ENOENT || ret == EINVAL) + if (ret == ENOENT || ret == EINVAL) { memcpy(&key->thk_ip.addr, &tcks->laddr.addr, sizeof(struct in_addr)); + } } } -static u_int16_t tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) +static u_int16_t +tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache_key *key) { u_int32_t hash; @@ -284,10 +291,11 @@ static u_int16_t tcp_cache_hash(struct tcp_cache_key_src *tcks, struct tcp_cache hash = net_flowhash(key, sizeof(struct tcp_cache_key), tcp_cache_hash_seed); - return (hash & (tcp_cache_size - 1)); + return hash & (tcp_cache_size - 1); } -static void tcp_cache_unlock(struct tcp_cache_head *head) +static void +tcp_cache_unlock(struct tcp_cache_head *head) { lck_mtx_unlock(&head->tch_mtx); } @@ -302,7 +310,8 @@ static void tcp_cache_unlock(struct tcp_cache_head *head) * That's why we provide the head as a "return"-pointer so that the caller * can give it back to use for tcp_cache_unlock(). */ -static struct tcp_cache *tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, +static struct tcp_cache * +tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, int create, struct tcp_cache_head **headarg) { struct tcp_cache *tpcache = NULL; @@ -318,8 +327,9 @@ static struct tcp_cache *tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, /*** First step: Look for the tcp_cache in our bucket ***/ SLIST_FOREACH(tpcache, &head->tcp_caches, list) { - if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) + if (memcmp(&tpcache->tc_key, &key, sizeof(key)) == 0) { break; + } i++; } @@ -348,8 +358,9 @@ static struct tcp_cache *tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, /* Create a new cache and add it to the list */ tpcache = _MALLOC(sizeof(struct tcp_cache), M_TEMP, M_NOWAIT | M_ZERO); - if (tpcache == NULL) + if (tpcache == NULL) { goto out_null; + } SLIST_INSERT_HEAD(&head->tcp_caches, tpcache, list); } @@ -357,21 +368,23 @@ static struct tcp_cache *tcp_getcache_with_lock(struct tcp_cache_key_src *tcks, memcpy(&tpcache->tc_key, &key, sizeof(key)); } - if (tpcache == NULL) + if (tpcache == NULL) { goto out_null; + } /* Update timestamp for garbage collection purposes */ tpcache->tc_last_access = tcp_now; *headarg = head; - return (tpcache); + return tpcache; out_null: tcp_cache_unlock(head); - return (NULL); + return NULL; } -static void tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks) +static void +tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src *tcks) { struct inpcb *inp = tp->t_inpcb; memset(tcks, 0, sizeof(*tcks)); @@ -391,24 +404,27 @@ static void tcp_cache_key_src_create(struct tcpcb *tp, struct tcp_cache_key_src return; } -static void tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t len) +static void +tcp_cache_set_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t len) { struct tcp_cache_head *head; struct tcp_cache *tpcache; /* Call lookup/create function */ tpcache = tcp_getcache_with_lock(tcks, 1, &head); - if (tpcache == NULL) + if (tpcache == NULL) { return; + } tpcache->tc_tfo_cookie_len = len > TFO_COOKIE_LEN_MAX ? - TFO_COOKIE_LEN_MAX : len; + TFO_COOKIE_LEN_MAX : len; memcpy(tpcache->tc_tfo_cookie, cookie, tpcache->tc_tfo_cookie_len); tcp_cache_unlock(head); } -void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) +void +tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) { struct tcp_cache_key_src tcks; @@ -416,7 +432,8 @@ void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len) tcp_cache_set_cookie_common(&tcks, cookie, len); } -static int tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t *len) +static int +tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *cookie, u_int8_t *len) { struct tcp_cache_head *head; struct tcp_cache *tpcache; @@ -424,12 +441,12 @@ static int tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *c /* Call lookup/create function */ tpcache = tcp_getcache_with_lock(tcks, 1, &head); if (tpcache == NULL) { - return (0); + return 0; } if (tpcache->tc_tfo_cookie_len == 0) { tcp_cache_unlock(head); - return (0); + return 0; } /* @@ -443,7 +460,7 @@ static int tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *c tcp_cache_unlock(head); - return (1); + return 1; } /* @@ -454,7 +471,8 @@ static int tcp_cache_get_cookie_common(struct tcp_cache_key_src *tcks, u_char *c * Returns 0 if we should request a cookie. * Returns 1 if the cookie has been found and written. */ -int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) +int +tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) { struct tcp_cache_key_src tcks; @@ -462,7 +480,8 @@ int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len) return tcp_cache_get_cookie_common(&tcks, cookie, len); } -static unsigned int tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks) +static unsigned int +tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tcks) { struct tcp_cache_head *head; struct tcp_cache *tpcache; @@ -470,8 +489,9 @@ static unsigned int tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tc /* Call lookup/create function */ tpcache = tcp_getcache_with_lock(tcks, 1, &head); - if (tpcache == NULL) - return (0); + if (tpcache == NULL) { + return 0; + } cookie_len = tpcache->tc_tfo_cookie_len; @@ -480,7 +500,8 @@ static unsigned int tcp_cache_get_cookie_len_common(struct tcp_cache_key_src *tc return cookie_len; } -unsigned int tcp_cache_get_cookie_len(struct tcpcb *tp) +unsigned int +tcp_cache_get_cookie_len(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -488,7 +509,8 @@ unsigned int tcp_cache_get_cookie_len(struct tcpcb *tp) return tcp_cache_get_cookie_len_common(&tcks); } -static u_int16_t tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) +static u_int16_t +tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_heuristic_key *key) { u_int32_t hash; @@ -499,10 +521,11 @@ static u_int16_t tcp_heuristics_hash(struct tcp_cache_key_src *tcks, struct tcp_ hash = net_flowhash(key, sizeof(struct tcp_heuristic_key), tcp_cache_hash_seed); - return (hash & (tcp_cache_size - 1)); + return hash & (tcp_cache_size - 1); } -static void tcp_heuristic_unlock(struct tcp_heuristics_head *head) +static void +tcp_heuristic_unlock(struct tcp_heuristics_head *head) { lck_mtx_unlock(&head->thh_mtx); } @@ -521,7 +544,8 @@ static void tcp_heuristic_unlock(struct tcp_heuristics_head *head) * ToDo - way too much code-duplication. We should create an interface to handle * bucketized hashtables with recycling of the oldest element. */ -static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, +static struct tcp_heuristic * +tcp_getheuristic_with_lock(struct tcp_cache_key_src *tcks, int create, struct tcp_heuristics_head **headarg) { struct tcp_heuristic *tpheur = NULL; @@ -537,8 +561,9 @@ static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcp_cache_key_src /*** First step: Look for the tcp_heur in our bucket ***/ SLIST_FOREACH(tpheur, &head->tcp_heuristics, list) { - if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) + if (memcmp(&tpheur->th_key, &key, sizeof(key)) == 0) { break; + } i++; } @@ -563,13 +588,14 @@ static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcp_cache_key_src /* We recycle - set everything to 0 */ bzero(tpheur->th_val_start, - tpheur->th_val_end - tpheur->th_val_start); + tpheur->th_val_end - tpheur->th_val_start); } else { /* Create a new heuristic and add it to the list */ tpheur = _MALLOC(sizeof(struct tcp_heuristic), M_TEMP, M_NOWAIT | M_ZERO); - if (tpheur == NULL) + if (tpheur == NULL) { goto out_null; + } SLIST_INSERT_HEAD(&head->tcp_heuristics, tpheur, list); } @@ -586,21 +612,23 @@ static struct tcp_heuristic *tcp_getheuristic_with_lock(struct tcp_cache_key_src memcpy(&tpheur->th_key, &key, sizeof(key)); } - if (tpheur == NULL) + if (tpheur == NULL) { goto out_null; + } /* Update timestamp for garbage collection purposes */ tpheur->th_last_access = tcp_now; *headarg = head; - return (tpheur); + return tpheur; out_null: tcp_heuristic_unlock(head); - return (NULL); + return NULL; } -static void tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_t flags) +static void +tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_t flags) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; @@ -611,8 +639,9 @@ static void tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_ * our side. */ tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); - if (tpheur == NULL) + if (tpheur == NULL) { return; + } if (flags & TCPCACHE_F_TFO_DATA) { tpheur->th_tfo_data_loss = 0; @@ -635,29 +664,34 @@ static void tcp_heuristic_reset_counters(struct tcp_cache_key_src *tcks, u_int8_ tpheur->th_ecn_synrst = 0; } - if (flags & TCPCACHE_F_MPTCP) + if (flags & TCPCACHE_F_MPTCP) { tpheur->th_mptcp_loss = 0; + } tcp_heuristic_unlock(head); } -void tcp_heuristic_tfo_success(struct tcpcb *tp) +void +tcp_heuristic_tfo_success(struct tcpcb *tp) { struct tcp_cache_key_src tcks; uint8_t flag = 0; tcp_cache_key_src_create(tp, &tcks); - if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) + if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) { flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ | - TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST ); - if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) + TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST); + } + if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) { flag = (TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST); + } tcp_heuristic_reset_counters(&tcks, flag); } -void tcp_heuristic_mptcp_success(struct tcpcb *tp) +void +tcp_heuristic_mptcp_success(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -665,7 +699,8 @@ void tcp_heuristic_mptcp_success(struct tcpcb *tp) tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_MPTCP); } -void tcp_heuristic_ecn_success(struct tcpcb *tp) +void +tcp_heuristic_ecn_success(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -673,10 +708,12 @@ void tcp_heuristic_ecn_success(struct tcpcb *tp) tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_ECN); } -static void __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur) +static void +__tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur) { - if (tpheur->th_tfo_in_backoff) + if (tpheur->th_tfo_in_backoff) { return; + } tpheur->th_tfo_in_backoff = 1; @@ -684,8 +721,9 @@ static void __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur) uint32_t old_backoff = tpheur->th_tfo_backoff; tpheur->th_tfo_backoff -= (tcp_now - tpheur->th_tfo_enabled_time); - if (tpheur->th_tfo_backoff > old_backoff) + if (tpheur->th_tfo_backoff > old_backoff) { tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout); + } } tpheur->th_tfo_backoff_until = tcp_now + tpheur->th_tfo_backoff; @@ -693,61 +731,70 @@ static void __tcp_heuristic_tfo_middlebox_common(struct tcp_heuristic *tpheur) /* Then, increase the backoff time */ tpheur->th_tfo_backoff *= 2; - if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) + if (tpheur->th_tfo_backoff > tcp_min_to_hz(tcp_backoff_maximum)) { tpheur->th_tfo_backoff = tcp_min_to_hz(tcp_ecn_timeout); + } } -static void tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks) +static void +tcp_heuristic_tfo_middlebox_common(struct tcp_cache_key_src *tcks) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); - if (tpheur == NULL) + if (tpheur == NULL) { return; + } __tcp_heuristic_tfo_middlebox_common(tpheur); tcp_heuristic_unlock(head); } -static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, +static void +tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, u_int32_t flags) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); - if (tpheur == NULL) + if (tpheur == NULL) { return; + } /* Limit to prevent integer-overflow during exponential backoff */ if ((flags & TCPCACHE_F_TFO_DATA) && tpheur->th_tfo_data_loss < TCP_CACHE_OVERFLOW_PROTECT) { tpheur->th_tfo_data_loss++; - if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) + if (tpheur->th_tfo_data_loss >= TFO_MAX_COOKIE_LOSS) { __tcp_heuristic_tfo_middlebox_common(tpheur); + } } if ((flags & TCPCACHE_F_TFO_REQ) && tpheur->th_tfo_req_loss < TCP_CACHE_OVERFLOW_PROTECT) { tpheur->th_tfo_req_loss++; - if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) + if (tpheur->th_tfo_req_loss >= TFO_MAX_COOKIE_LOSS) { __tcp_heuristic_tfo_middlebox_common(tpheur); + } } if ((flags & TCPCACHE_F_TFO_DATA_RST) && tpheur->th_tfo_data_rst < TCP_CACHE_OVERFLOW_PROTECT) { tpheur->th_tfo_data_rst++; - if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) + if (tpheur->th_tfo_data_rst >= TFO_MAX_COOKIE_LOSS) { __tcp_heuristic_tfo_middlebox_common(tpheur); + } } if ((flags & TCPCACHE_F_TFO_REQ_RST) && tpheur->th_tfo_req_rst < TCP_CACHE_OVERFLOW_PROTECT) { tpheur->th_tfo_req_rst++; - if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) + if (tpheur->th_tfo_req_rst >= TFO_MAX_COOKIE_LOSS) { __tcp_heuristic_tfo_middlebox_common(tpheur); + } } if ((flags & TCPCACHE_F_ECN) && tpheur->th_ecn_loss < TCP_CACHE_OVERFLOW_PROTECT) { @@ -785,7 +832,6 @@ static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tpheur->th_ecn_backoff = tcp_now + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_droprst - ECN_MAX_DROPRST)); - } } @@ -816,37 +862,44 @@ static void tcp_heuristic_inc_counters(struct tcp_cache_key_src *tcks, tcp_heuristic_unlock(head); } -void tcp_heuristic_tfo_loss(struct tcpcb *tp) +void +tcp_heuristic_tfo_loss(struct tcpcb *tp) { struct tcp_cache_key_src tcks; uint32_t flag = 0; tcp_cache_key_src_create(tp, &tcks); - if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) + if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) { flag = (TCPCACHE_F_TFO_DATA | TCPCACHE_F_TFO_REQ); - if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) + } + if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) { flag = TCPCACHE_F_TFO_REQ; + } tcp_heuristic_inc_counters(&tcks, flag); } -void tcp_heuristic_tfo_rst(struct tcpcb *tp) +void +tcp_heuristic_tfo_rst(struct tcpcb *tp) { struct tcp_cache_key_src tcks; uint32_t flag = 0; tcp_cache_key_src_create(tp, &tcks); - if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) + if (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) { flag = (TCPCACHE_F_TFO_DATA_RST | TCPCACHE_F_TFO_REQ_RST); - if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) + } + if (tp->t_tfo_stats & TFO_S_COOKIE_REQ) { flag = TCPCACHE_F_TFO_REQ_RST; + } tcp_heuristic_inc_counters(&tcks, flag); } -void tcp_heuristic_mptcp_loss(struct tcpcb *tp) +void +tcp_heuristic_mptcp_loss(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -855,7 +908,8 @@ void tcp_heuristic_mptcp_loss(struct tcpcb *tp) tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_MPTCP); } -void tcp_heuristic_ecn_loss(struct tcpcb *tp) +void +tcp_heuristic_ecn_loss(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -864,7 +918,8 @@ void tcp_heuristic_ecn_loss(struct tcpcb *tp) tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN); } -void tcp_heuristic_ecn_droprst(struct tcpcb *tp) +void +tcp_heuristic_ecn_droprst(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -873,7 +928,8 @@ void tcp_heuristic_ecn_droprst(struct tcpcb *tp) tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRST); } -void tcp_heuristic_ecn_droprxmt(struct tcpcb *tp) +void +tcp_heuristic_ecn_droprxmt(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -882,7 +938,8 @@ void tcp_heuristic_ecn_droprxmt(struct tcpcb *tp) tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_DROPRXMT); } -void tcp_heuristic_ecn_synrst(struct tcpcb *tp) +void +tcp_heuristic_ecn_synrst(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -891,7 +948,8 @@ void tcp_heuristic_ecn_synrst(struct tcpcb *tp) tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_ECN_SYNRST); } -void tcp_heuristic_tfo_middlebox(struct tcpcb *tp) +void +tcp_heuristic_tfo_middlebox(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -901,30 +959,34 @@ void tcp_heuristic_tfo_middlebox(struct tcpcb *tp) tcp_heuristic_tfo_middlebox_common(&tcks); } -static void tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks) +static void +tcp_heuristic_ecn_aggressive_common(struct tcp_cache_key_src *tcks) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; tpheur = tcp_getheuristic_with_lock(tcks, 1, &head); - if (tpheur == NULL) + if (tpheur == NULL) { return; + } /* Must be done before, otherwise we will start off with expo-backoff */ tpheur->th_ecn_backoff = tcp_now + - (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive)); + (tcp_min_to_hz(tcp_ecn_timeout) << (tpheur->th_ecn_aggressive)); /* * Ugly way to prevent integer overflow... limit to prevent in * overflow during exp. backoff. */ - if (tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT) + if (tpheur->th_ecn_aggressive < TCP_CACHE_OVERFLOW_PROTECT) { tpheur->th_ecn_aggressive++; + } tcp_heuristic_unlock(head); } -void tcp_heuristic_ecn_aggressive(struct tcpcb *tp) +void +tcp_heuristic_ecn_aggressive(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -932,21 +994,25 @@ void tcp_heuristic_ecn_aggressive(struct tcpcb *tp) tcp_heuristic_ecn_aggressive_common(&tcks); } -static boolean_t tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks) +static boolean_t +tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; - if (disable_tcp_heuristics) - return (TRUE); + if (disable_tcp_heuristics) { + return TRUE; + } /* Get the tcp-heuristic. */ tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); - if (tpheur == NULL) - return (TRUE); + if (tpheur == NULL) { + return TRUE; + } - if (tpheur->th_tfo_in_backoff == 0) + if (tpheur->th_tfo_in_backoff == 0) { goto tfo_ok; + } if (TSTMP_GT(tcp_now, tpheur->th_tfo_backoff_until)) { tpheur->th_tfo_in_backoff = 0; @@ -956,91 +1022,106 @@ static boolean_t tcp_heuristic_do_tfo_common(struct tcp_cache_key_src *tcks) } tcp_heuristic_unlock(head); - return (FALSE); + return FALSE; tfo_ok: tcp_heuristic_unlock(head); - return (TRUE); + return TRUE; } -boolean_t tcp_heuristic_do_tfo(struct tcpcb *tp) +boolean_t +tcp_heuristic_do_tfo(struct tcpcb *tp) { struct tcp_cache_key_src tcks; tcp_cache_key_src_create(tp, &tcks); - if (tcp_heuristic_do_tfo_common(&tcks)) - return (TRUE); + if (tcp_heuristic_do_tfo_common(&tcks)) { + return TRUE; + } - return (FALSE); + return FALSE; } -boolean_t tcp_heuristic_do_mptcp(struct tcpcb *tp) +boolean_t +tcp_heuristic_do_mptcp(struct tcpcb *tp) { struct tcp_cache_key_src tcks; struct tcp_heuristics_head *head = NULL; struct tcp_heuristic *tpheur; - if (disable_tcp_heuristics) - return (TRUE); + if (disable_tcp_heuristics) { + return TRUE; + } tcp_cache_key_src_create(tp, &tcks); /* Get the tcp-heuristic. */ tpheur = tcp_getheuristic_with_lock(&tcks, 0, &head); - if (tpheur == NULL) - return (TRUE); + if (tpheur == NULL) { + return TRUE; + } - if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) + if (TSTMP_GT(tpheur->th_mptcp_backoff, tcp_now)) { goto fallback; + } tcp_heuristic_unlock(head); - return (TRUE); + return TRUE; fallback: - if (head) + if (head) { tcp_heuristic_unlock(head); + } - if (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FIRSTPARTY) + if (tptomptp(tp)->mpt_mpte->mpte_flags & MPTE_FIRSTPARTY) { tcpstat.tcps_mptcp_fp_heuristic_fallback++; - else + } else { tcpstat.tcps_mptcp_heuristic_fallback++; + } - return (FALSE); + return FALSE; } -static boolean_t tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks) +static boolean_t +tcp_heuristic_do_ecn_common(struct tcp_cache_key_src *tcks) { struct tcp_heuristics_head *head; struct tcp_heuristic *tpheur; boolean_t ret = TRUE; - if (disable_tcp_heuristics) - return (TRUE); + if (disable_tcp_heuristics) { + return TRUE; + } /* Get the tcp-heuristic. */ tpheur = tcp_getheuristic_with_lock(tcks, 0, &head); - if (tpheur == NULL) + if (tpheur == NULL) { return ret; + } if (TSTMP_GT(tpheur->th_ecn_backoff, tcp_now)) { ret = FALSE; } else { /* Reset the following counters to start re-evaluating */ - if (tpheur->th_ecn_droprst >= ECN_RETRY_LIMIT) + if (tpheur->th_ecn_droprst >= ECN_RETRY_LIMIT) { tpheur->th_ecn_droprst = 0; - if (tpheur->th_ecn_droprxmt >= ECN_RETRY_LIMIT) + } + if (tpheur->th_ecn_droprxmt >= ECN_RETRY_LIMIT) { tpheur->th_ecn_droprxmt = 0; - if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) + } + if (tpheur->th_ecn_synrst >= ECN_RETRY_LIMIT) { tpheur->th_ecn_synrst = 0; + } } tcp_heuristic_unlock(head); - return (ret); + return ret; } -boolean_t tcp_heuristic_do_ecn(struct tcpcb *tp) +boolean_t +tcp_heuristic_do_ecn(struct tcpcb *tp) { struct tcp_cache_key_src tcks; @@ -1048,7 +1129,8 @@ boolean_t tcp_heuristic_do_ecn(struct tcpcb *tp) return tcp_heuristic_do_ecn_common(&tcks); } -boolean_t tcp_heuristic_do_ecn_with_address(struct ifnet *ifp, +boolean_t +tcp_heuristic_do_ecn_with_address(struct ifnet *ifp, union sockaddr_in_4_6 *local_address) { struct tcp_cache_key_src tcks; @@ -1069,7 +1151,8 @@ boolean_t tcp_heuristic_do_ecn_with_address(struct ifnet *ifp, return tcp_heuristic_do_ecn_common(&tcks); } -void tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, +void +tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, struct ifnet *ifp, union sockaddr_in_4_6 *local_address) { struct tcp_cache_key_src tcks; @@ -1104,7 +1187,8 @@ void tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, return; } -boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, +boolean_t +tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address, u_int8_t *cookie, u_int8_t *cookie_len) { @@ -1127,7 +1211,7 @@ boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, if (tcp_heuristic_do_tfo_common(&tcks)) { if (!tcp_cache_get_cookie_common(&tcks, cookie, cookie_len)) { - *cookie_len = 0; + *cookie_len = 0; } return TRUE; } @@ -1135,7 +1219,8 @@ boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, return FALSE; } -void tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer, +void +tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer, struct ifnet *ifp, union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address) { @@ -1156,37 +1241,45 @@ void tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer, tcks.af = AF_INET; } - if (necp_buffer->necp_tcp_tfo_heuristics_success) + if (necp_buffer->necp_tcp_tfo_heuristics_success) { tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA | - TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST); + TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST); + } - if (necp_buffer->necp_tcp_tfo_heuristics_success_req) + if (necp_buffer->necp_tcp_tfo_heuristics_success_req) { tcp_heuristic_reset_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_REQ_RST); + } - if (necp_buffer->necp_tcp_tfo_heuristics_loss) + if (necp_buffer->necp_tcp_tfo_heuristics_loss) { tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ | TCPCACHE_F_TFO_DATA); + } - if (necp_buffer->necp_tcp_tfo_heuristics_loss_req) + if (necp_buffer->necp_tcp_tfo_heuristics_loss_req) { tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ); + } - if (necp_buffer->necp_tcp_tfo_heuristics_rst_data) + if (necp_buffer->necp_tcp_tfo_heuristics_rst_data) { tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST | TCPCACHE_F_TFO_DATA_RST); + } - if (necp_buffer->necp_tcp_tfo_heuristics_rst_req) + if (necp_buffer->necp_tcp_tfo_heuristics_rst_req) { tcp_heuristic_inc_counters(&tcks, TCPCACHE_F_TFO_REQ_RST); + } - if (necp_buffer->necp_tcp_tfo_heuristics_middlebox) + if (necp_buffer->necp_tcp_tfo_heuristics_middlebox) { tcp_heuristic_tfo_middlebox_common(&tcks); + } if (necp_buffer->necp_tcp_tfo_cookie_len != 0) { tcp_cache_set_cookie_common(&tcks, - necp_buffer->necp_tcp_tfo_cookie, necp_buffer->necp_tcp_tfo_cookie_len); + necp_buffer->necp_tcp_tfo_cookie, necp_buffer->necp_tcp_tfo_cookie_len); } return; } -static void sysctl_cleartfocache(void) +static void +sysctl_cleartfocache(void) { int i; @@ -1222,8 +1315,9 @@ static int sysctl_cleartfo SYSCTL_HANDLER_ARGS val = oldval; error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } /* * The actual value does not matter. If the value is set, it triggers @@ -1231,19 +1325,21 @@ static int sysctl_cleartfo SYSCTL_HANDLER_ARGS * use the route entry to hold the TFO cache, replace the route sysctl. */ - if (val != oldval) + if (val != oldval) { sysctl_cleartfocache(); + } tcpcleartfo = val; - return (error); + return error; } SYSCTL_PROC(_net_inet_tcp, OID_AUTO, clear_tfocache, CTLTYPE_INT | CTLFLAG_RW | - CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I", - "Toggle to clear the TFO destination based heuristic cache"); + CTLFLAG_LOCKED, &tcpcleartfo, 0, &sysctl_cleartfo, "I", + "Toggle to clear the TFO destination based heuristic cache"); -void tcp_cache_init(void) +void +tcp_cache_init(void) { uint64_t sane_size_meg = sane_size / 1024 / 1024; int i; @@ -1258,15 +1354,17 @@ void tcp_cache_init(void) * 16000 TB, we loose precision. But, who cares? :) */ tcp_cache_size = tcp_cache_roundup2((u_int32_t)(sane_size_meg >> 2)); - if (tcp_cache_size < 32) + if (tcp_cache_size < 32) { tcp_cache_size = 32; - else if (tcp_cache_size > 1024) + } else if (tcp_cache_size > 1024) { tcp_cache_size = 1024; + } tcp_cache = _MALLOC(sizeof(struct tcp_cache_head) * tcp_cache_size, M_TEMP, M_ZERO); - if (tcp_cache == NULL) + if (tcp_cache == NULL) { panic("Allocating tcp_cache failed at boot-time!"); + } tcp_cache_mtx_grp_attr = lck_grp_attr_alloc_init(); tcp_cache_mtx_grp = lck_grp_alloc_init("tcpcache", tcp_cache_mtx_grp_attr); @@ -1274,8 +1372,9 @@ void tcp_cache_init(void) tcp_heuristics = _MALLOC(sizeof(struct tcp_heuristics_head) * tcp_cache_size, M_TEMP, M_ZERO); - if (tcp_heuristics == NULL) + if (tcp_heuristics == NULL) { panic("Allocating tcp_heuristic failed at boot-time!"); + } tcp_heuristic_mtx_grp_attr = lck_grp_attr_alloc_init(); tcp_heuristic_mtx_grp = lck_grp_alloc_init("tcpheuristic", tcp_heuristic_mtx_grp_attr); diff --git a/bsd/netinet/tcp_cache.h b/bsd/netinet/tcp_cache.h index bd7044ea3..d9344c84e 100644 --- a/bsd/netinet/tcp_cache.h +++ b/bsd/netinet/tcp_cache.h @@ -34,8 +34,8 @@ #include #include -#define ECN_MIN_CE_PROBES 10 /* Probes are basically the number of incoming packets */ -#define ECN_MAX_CE_RATIO 7 /* Ratio is the maximum number of CE-packets we accept per incoming "probe" */ +#define ECN_MIN_CE_PROBES 10 /* Probes are basically the number of incoming packets */ +#define ECN_MAX_CE_RATIO 7 /* Ratio is the maximum number of CE-packets we accept per incoming "probe" */ extern void tcp_cache_set_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t len); extern int tcp_cache_get_cookie(struct tcpcb *tp, u_char *cookie, u_int8_t *len); @@ -58,17 +58,16 @@ extern void tcp_heuristic_ecn_droprxmt(struct tcpcb *tp); extern void tcp_heuristic_ecn_synrst(struct tcpcb *tp); extern boolean_t tcp_heuristic_do_ecn_with_address(struct ifnet *ifp, - union sockaddr_in_4_6 *local_address); + union sockaddr_in_4_6 *local_address); extern void tcp_heuristics_ecn_update(struct necp_tcp_ecn_cache *necp_buffer, - struct ifnet *ifp, union sockaddr_in_4_6 *local_address); + struct ifnet *ifp, union sockaddr_in_4_6 *local_address); extern boolean_t tcp_heuristic_do_tfo_with_address(struct ifnet *ifp, - union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address, - u_int8_t *cookie, u_int8_t *cookie_len); + union sockaddr_in_4_6 *local_address, union sockaddr_in_4_6 *remote_address, + u_int8_t *cookie, u_int8_t *cookie_len); extern void tcp_heuristics_tfo_update(struct necp_tcp_tfo_cache *necp_buffer, - struct ifnet *ifp, union sockaddr_in_4_6 *local_address, - union sockaddr_in_4_6 *remote_address); + struct ifnet *ifp, union sockaddr_in_4_6 *local_address, + union sockaddr_in_4_6 *remote_address); extern void tcp_cache_init(void); #endif /* _NETINET_TCP_CACHE_H */ - diff --git a/bsd/netinet/tcp_cc.c b/bsd/netinet/tcp_cc.c index 1f634a174..3512bc9a1 100644 --- a/bsd/netinet/tcp_cc.c +++ b/bsd/netinet/tcp_cc.c @@ -77,26 +77,26 @@ struct tcp_cc_debug_state { }; SYSCTL_SKMEM_TCP_INT(OID_AUTO, cc_debug, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_cc_debug, 0, "Enable debug data collection"); + int, tcp_cc_debug, 0, "Enable debug data collection"); extern struct tcp_cc_algo tcp_cc_newreno; SYSCTL_INT(_net_inet_tcp, OID_AUTO, newreno_sockets, - CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_cc_newreno.num_sockets, - 0, "Number of sockets using newreno"); + CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_cc_newreno.num_sockets, + 0, "Number of sockets using newreno"); extern struct tcp_cc_algo tcp_cc_ledbat; SYSCTL_INT(_net_inet_tcp, OID_AUTO, background_sockets, - CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_cc_ledbat.num_sockets, - 0, "Number of sockets using background transport"); + CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_cc_ledbat.num_sockets, + 0, "Number of sockets using background transport"); extern struct tcp_cc_algo tcp_cc_cubic; SYSCTL_INT(_net_inet_tcp, OID_AUTO, cubic_sockets, - CTLFLAG_RD | CTLFLAG_LOCKED,&tcp_cc_cubic.num_sockets, - 0, "Number of sockets using cubic"); + CTLFLAG_RD | CTLFLAG_LOCKED, &tcp_cc_cubic.num_sockets, + 0, "Number of sockets using cubic"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, use_newreno, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_use_newreno, 0, - "Use TCP NewReno by default"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_use_newreno, 0, + "Use TCP NewReno by default"); static int tcp_check_cwnd_nonvalidated = 1; #if (DEBUG || DEVELOPMENT) @@ -122,14 +122,14 @@ volatile UInt32 tcp_ccdbg_unit = TCP_CCDBG_NOUNIT; void tcp_cc_init(void); static void tcp_cc_control_register(void); static errno_t tcp_ccdbg_control_connect(kern_ctl_ref kctl, - struct sockaddr_ctl *sac, void **uinfo); + struct sockaddr_ctl *sac, void **uinfo); static errno_t tcp_ccdbg_control_disconnect(kern_ctl_ref kctl, - u_int32_t unit, void *uinfo); + u_int32_t unit, void *uinfo); static struct tcp_cc_algo tcp_cc_algo_none; /* * Initialize TCP congestion control algorithms. */ - + void tcp_cc_init(void) { @@ -167,7 +167,7 @@ tcp_cc_control_register(void) /* Allow only one socket to connect at any time for debugging */ static errno_t tcp_ccdbg_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, - void **uinfo) + void **uinfo) { #pragma unused(kctl) #pragma unused(uinfo) @@ -175,13 +175,15 @@ tcp_ccdbg_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, UInt32 old_value = TCP_CCDBG_NOUNIT; UInt32 new_value = sac->sc_unit; - if (tcp_ccdbg_unit != old_value) - return (EALREADY); + if (tcp_ccdbg_unit != old_value) { + return EALREADY; + } - if (OSCompareAndSwap(old_value, new_value, &tcp_ccdbg_unit)) - return (0); - else - return (EALREADY); + if (OSCompareAndSwap(old_value, new_value, &tcp_ccdbg_unit)) { + return 0; + } else { + return EALREADY; + } } static errno_t @@ -192,15 +194,17 @@ tcp_ccdbg_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo) if (unit == tcp_ccdbg_unit) { UInt32 old_value = tcp_ccdbg_unit; UInt32 new_value = TCP_CCDBG_NOUNIT; - if (tcp_ccdbg_unit == new_value) - return (0); + if (tcp_ccdbg_unit == new_value) { + return 0; + } if (!OSCompareAndSwap(old_value, new_value, - &tcp_ccdbg_unit)) - log(LOG_DEBUG, + &tcp_ccdbg_unit)) { + log(LOG_DEBUG, "failed to disconnect tcp_cc debug control"); + } } - return (0); + return 0; } inline void @@ -216,11 +220,11 @@ tcp_ccdbg_trace(struct tcpcb *tp, struct tcphdr *th, int32_t event) struct timespec tv; bzero(&dbg_state, sizeof(dbg_state)); - + nanotime(&tv); /* Take time in seconds */ dbg_state.ccd_tsns = (tv.tv_sec * 1000000000) + tv.tv_nsec; - inet_ntop(SOCK_DOM(inp->inp_socket), + inet_ntop(SOCK_DOM(inp->inp_socket), ((SOCK_DOM(inp->inp_socket) == PF_INET) ? (void *)&inp->inp_laddr.s_addr : (void *)&inp->in6p_laddr), dbg_state.ccd_srcaddr, @@ -246,7 +250,7 @@ tcp_ccdbg_trace(struct tcpcb *tp, struct tcphdr *th, int32_t event) dbg_state.ccd_bytes_acked = tp->t_bytes_acked; dbg_state.ccd_cc_index = tp->tcp_cc_index; switch (tp->tcp_cc_index) { - case TCP_CC_ALGO_CUBIC_INDEX: + case TCP_CC_ALGO_CUBIC_INDEX: dbg_state.u.cubic_state.ccd_last_max = tp->t_ccstate->cub_last_max; dbg_state.u.cubic_state.ccd_tcp_win = @@ -258,22 +262,23 @@ tcp_ccdbg_trace(struct tcpcb *tp, struct tcphdr *th, int32_t event) dbg_state.u.cubic_state.ccd_mean_deviation = tp->t_ccstate->cub_mean_dev; break; - case TCP_CC_ALGO_BACKGROUND_INDEX: + case TCP_CC_ALGO_BACKGROUND_INDEX: dbg_state.u.ledbat_state.led_base_rtt = get_base_rtt(tp); break; - default: + default: break; } ctl_enqueuedata(tcp_ccdbg_ctlref, tcp_ccdbg_unit, - &dbg_state, sizeof(dbg_state), 0); + &dbg_state, sizeof(dbg_state), 0); } DTRACE_TCP5(cc, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, struct tcphdr *, th, int32_t, event); + struct tcpcb *, tp, struct tcphdr *, th, int32_t, event); } -void tcp_cc_resize_sndbuf(struct tcpcb *tp) +void +tcp_cc_resize_sndbuf(struct tcpcb *tp) { struct sockbuf *sb; /* @@ -283,7 +288,7 @@ void tcp_cc_resize_sndbuf(struct tcpcb *tp) */ sb = &tp->t_inpcb->inp_socket->so_snd; if (sb->sb_hiwat > tp->snd_ssthresh && - (sb->sb_flags & SB_AUTOSIZE)) { + (sb->sb_flags & SB_AUTOSIZE)) { if (sb->sb_idealsize > tp->snd_ssthresh) { SET_SNDSB_IDEAL_SIZE(sb, tp->snd_ssthresh); } @@ -291,11 +296,12 @@ void tcp_cc_resize_sndbuf(struct tcpcb *tp) } } -void tcp_bad_rexmt_fix_sndbuf(struct tcpcb *tp) +void +tcp_bad_rexmt_fix_sndbuf(struct tcpcb *tp) { struct sockbuf *sb; sb = &tp->t_inpcb->inp_socket->so_snd; - if ((sb->sb_flags & (SB_TRIM|SB_AUTOSIZE)) == (SB_TRIM|SB_AUTOSIZE)) { + if ((sb->sb_flags & (SB_TRIM | SB_AUTOSIZE)) == (SB_TRIM | SB_AUTOSIZE)) { /* * If there was a retransmission that was not necessary * then the size of socket buffer can be restored to @@ -322,30 +328,31 @@ tcp_cc_cwnd_init_or_reset(struct tcpcb *tp) tp->snd_cwnd = tp->t_maxseg * ss_fltsz_local; } else { /* initial congestion window according to RFC 3390 */ - if (tcp_do_rfc3390) + if (tcp_do_rfc3390) { tp->snd_cwnd = min(4 * tp->t_maxseg, - max(2 * tp->t_maxseg, TCP_CC_CWND_INIT_BYTES)); - else + max(2 * tp->t_maxseg, TCP_CC_CWND_INIT_BYTES)); + } else { tp->snd_cwnd = tp->t_maxseg * ss_fltsz; + } } } /* * Indicate whether this ack should be delayed. * Here is the explanation for different settings of tcp_delack_enabled: - * - when set to 1, the bhavior is same as when set to 2. We kept this + * - when set to 1, the bhavior is same as when set to 2. We kept this * for binary compatibility. * - when set to 2, will "ack every other packet" * - if our last ack wasn't a 0-sized window. - * - if the peer hasn't sent us a TH_PUSH data packet (radar 3649245). - * If TH_PUSH is set, take this as a clue that we need to ACK - * with no delay. This helps higher level protocols who - * won't send us more data even if the window is open + * - if the peer hasn't sent us a TH_PUSH data packet (radar 3649245). + * If TH_PUSH is set, take this as a clue that we need to ACK + * with no delay. This helps higher level protocols who + * won't send us more data even if the window is open * because their last "segment" hasn't been ACKed - * - when set to 3, will do "streaming detection" - * - if we receive more than "maxseg_unacked" full packets + * - when set to 3, will do "streaming detection" + * - if we receive more than "maxseg_unacked" full packets * in the last 100ms - * - if the connection is not in slow-start or idle or + * - if the connection is not in slow-start or idle or * loss/recovery states * - if those criteria aren't met, it will ack every other packet. */ @@ -353,30 +360,32 @@ int tcp_cc_delay_ack(struct tcpcb *tp, struct tcphdr *th) { switch (tcp_delack_enabled) { - case 1: - case 2: + case 1: + case 2: if ((tp->t_flags & TF_RXWIN0SENT) == 0 && (th->th_flags & TH_PUSH) == 0 && - (tp->t_unacksegs == 1)) - return(1); - break; - case 3: + (tp->t_unacksegs == 1)) { + return 1; + } + break; + case 3: if ((tp->t_flags & TF_RXWIN0SENT) == 0 && (th->th_flags & TH_PUSH) == 0 && ((tp->t_unacksegs == 1) || ((tp->t_flags & TF_STRETCHACK) != 0 && - tp->t_unacksegs < (maxseg_unacked)))) - return(1); + tp->t_unacksegs < (maxseg_unacked)))) { + return 1; + } break; } - return(0); + return 0; } void tcp_cc_allocate_state(struct tcpcb *tp) { if (tp->tcp_cc_index == TCP_CC_ALGO_CUBIC_INDEX && - tp->t_ccstate == NULL) { + tp->t_ccstate == NULL) { tp->t_ccstate = (struct tcp_ccstate *)zalloc(tcp_cc_zone); /* @@ -384,29 +393,32 @@ tcp_cc_allocate_state(struct tcpcb *tp) * state, revert to using TCP NewReno as it does not * require any state */ - if (tp->t_ccstate == NULL) + if (tp->t_ccstate == NULL) { tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX; - else + } else { bzero(tp->t_ccstate, sizeof(*tp->t_ccstate)); + } } } /* - * If stretch ack was disabled automatically on long standing connections, + * If stretch ack was disabled automatically on long standing connections, * re-evaluate the situation after 15 minutes to enable it. */ -#define TCP_STRETCHACK_DISABLE_WIN (15 * 60 * TCP_RETRANSHZ) +#define TCP_STRETCHACK_DISABLE_WIN (15 * 60 * TCP_RETRANSHZ) void tcp_cc_after_idle_stretchack(struct tcpcb *tp) { int32_t tdiff; - if (!(tp->t_flagsext & TF_DISABLE_STRETCHACK)) + if (!(tp->t_flagsext & TF_DISABLE_STRETCHACK)) { return; + } tdiff = timer_diff(tcp_now, 0, tp->rcv_nostrack_ts, 0); - if (tdiff < 0) + if (tdiff < 0) { tdiff = -tdiff; + } if (tdiff > TCP_STRETCHACK_DISABLE_WIN) { tp->t_flagsext &= ~TF_DISABLE_STRETCHACK; @@ -427,7 +439,7 @@ tcp_cc_is_cwnd_nonvalidated(struct tcpcb *tp) struct socket *so = tp->t_inpcb->inp_socket; if (tp->t_pipeack == 0 || tcp_check_cwnd_nonvalidated == 0) { tp->t_flagsext &= ~TF_CWND_NONVALIDATED; - return (0); + return 0; } /* @@ -436,11 +448,12 @@ tcp_cc_is_cwnd_nonvalidated(struct tcpcb *tp) * data to send in the send socket buffer */ if (tp->t_pipeack >= (tp->snd_cwnd >> 1) || - (so != NULL && so->so_snd.sb_cc > tp->snd_cwnd)) + (so != NULL && so->so_snd.sb_cc > tp->snd_cwnd)) { tp->t_flagsext &= ~TF_CWND_NONVALIDATED; - else + } else { tp->t_flagsext |= TF_CWND_NONVALIDATED; - return (tp->t_flagsext & TF_CWND_NONVALIDATED); + } + return tp->t_flagsext & TF_CWND_NONVALIDATED; } /* @@ -472,7 +485,7 @@ tcp_get_max_pipeack(struct tcpcb *tp) max_pipeack = (tp->t_pipeack_sample[2] > max_pipeack) ? tp->t_pipeack_sample[2] : max_pipeack; - return (max_pipeack); + return max_pipeack; } inline void diff --git a/bsd/netinet/tcp_cc.h b/bsd/netinet/tcp_cc.h index 7c83900d1..8a1f584ad 100644 --- a/bsd/netinet/tcp_cc.h +++ b/bsd/netinet/tcp_cc.h @@ -66,18 +66,18 @@ #include #include -#define TCP_CC_ALGO_NONE 0 -#define TCP_CC_ALGO_NEWRENO_INDEX 1 -#define TCP_CC_ALGO_BACKGROUND_INDEX 2 /* CC for background transport */ -#define TCP_CC_ALGO_CUBIC_INDEX 3 /* default CC algorithm */ -#define TCP_CC_ALGO_COUNT 4 /* Count of CC algorithms */ +#define TCP_CC_ALGO_NONE 0 +#define TCP_CC_ALGO_NEWRENO_INDEX 1 +#define TCP_CC_ALGO_BACKGROUND_INDEX 2 /* CC for background transport */ +#define TCP_CC_ALGO_CUBIC_INDEX 3 /* default CC algorithm */ +#define TCP_CC_ALGO_COUNT 4 /* Count of CC algorithms */ -#define TCP_CA_NAME_MAX 16 /* Maximum characters in the name of a CC algorithm */ +#define TCP_CA_NAME_MAX 16 /* Maximum characters in the name of a CC algorithm */ extern int tcp_recv_bg; /* - * Structure to hold definition various actions defined by a congestion + * Structure to hold definition various actions defined by a congestion * control algorithm for TCP. This can be used to change the congestion * control on a connection based on the user settings of priority of a * connection. @@ -91,10 +91,10 @@ struct tcp_cc_algo { int (*init) (struct tcpcb *tp); /* - * cleanup any state that is stored in the connection + * cleanup any state that is stored in the connection * related to the algorithm */ - int (*cleanup) (struct tcpcb *tp); + int (*cleanup) (struct tcpcb *tp); /* initialize cwnd at the start of a connection */ void (*cwnd_init) (struct tcpcb *tp); @@ -124,29 +124,28 @@ struct tcp_cc_algo { int (*delay_ack)(struct tcpcb *tp, struct tcphdr *th); /* Switch a connection to this CC algorithm after sending some packets */ - void (*switch_to)(struct tcpcb *tp, uint16_t old_cc_index); - + void (*switch_to)(struct tcpcb *tp, uint16_t old_cc_index); } __attribute__((aligned(4))); -extern struct zone *tcp_cc_zone; +extern struct zone *tcp_cc_zone; extern struct tcp_cc_algo* tcp_cc_algo_list[TCP_CC_ALGO_COUNT]; #define CC_ALGO(tp) (tcp_cc_algo_list[tp->tcp_cc_index]) -#define TCP_CC_CWND_INIT_BYTES 4380 +#define TCP_CC_CWND_INIT_BYTES 4380 /* * The congestion window will have to be reset after a * non-validated period -- currently set to 3 minutes */ -#define TCP_CC_CWND_NONVALIDATED_PERIOD (3 * 60 * TCP_RETRANSHZ) +#define TCP_CC_CWND_NONVALIDATED_PERIOD (3 * 60 * TCP_RETRANSHZ) -extern void tcp_cc_init(void); +extern void tcp_cc_init(void); extern void tcp_cc_resize_sndbuf(struct tcpcb *tp); extern void tcp_bad_rexmt_fix_sndbuf(struct tcpcb *tp); extern void tcp_cc_cwnd_init_or_reset(struct tcpcb *tp); extern int tcp_cc_delay_ack(struct tcpcb *tp, struct tcphdr *th); extern void tcp_ccdbg_trace(struct tcpcb *tp, struct tcphdr *th, - int32_t event); + int32_t event); extern void tcp_cc_allocate_state(struct tcpcb *tp); extern void tcp_cc_after_idle_stretchack(struct tcpcb *tp); extern uint32_t tcp_cc_is_cwnd_nonvalidated(struct tcpcb *tp); diff --git a/bsd/netinet/tcp_cubic.c b/bsd/netinet/tcp_cubic.c index c9ce59c04..2d1ad246b 100644 --- a/bsd/netinet/tcp_cubic.c +++ b/bsd/netinet/tcp_cubic.c @@ -90,37 +90,40 @@ const float tcp_cubic_coeff = 0.4; const float tcp_cubic_fast_convergence_factor = 0.875; SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_tcp_friendliness, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_cubic_tcp_friendliness, 0, "Enable TCP friendliness"); + static int, tcp_cubic_tcp_friendliness, 0, "Enable TCP friendliness"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_fast_convergence, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_cubic_fast_convergence, 0, "Enable fast convergence"); + static int, tcp_cubic_fast_convergence, 0, "Enable fast convergence"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, cubic_use_minrtt, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_cubic_use_minrtt, 0, "use a min of 5 sec rtt"); + static int, tcp_cubic_use_minrtt, 0, "use a min of 5 sec rtt"); -static int tcp_cubic_init(struct tcpcb *tp) +static int +tcp_cubic_init(struct tcpcb *tp) { OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); VERIFY(tp->t_ccstate != NULL); tcp_cubic_clear_state(tp); - return (0); + return 0; } -static int tcp_cubic_cleanup(struct tcpcb *tp) +static int +tcp_cubic_cleanup(struct tcpcb *tp) { #pragma unused(tp) OSDecrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); - return (0); + return 0; } /* - * Initialize the congestion window at the beginning of a connection or + * Initialize the congestion window at the beginning of a connection or * after idle time */ -static void tcp_cubic_cwnd_init_or_reset(struct tcpcb *tp) +static void +tcp_cubic_cwnd_init_or_reset(struct tcpcb *tp) { - VERIFY(tp->t_ccstate != NULL); + VERIFY(tp->t_ccstate != NULL); tcp_cubic_clear_state(tp); tcp_cc_cwnd_init_or_reset(tp); @@ -138,15 +141,16 @@ static void tcp_cubic_cwnd_init_or_reset(struct tcpcb *tp) * to always probe to find the initial slow-start threshold. */ if (tp->t_inpcb->inp_stat->txbytes <= TCP_CC_CWND_INIT_BYTES - && tp->snd_ssthresh < (TCP_MAXWIN << TCP_MAX_WINSHIFT)) + && tp->snd_ssthresh < (TCP_MAXWIN << TCP_MAX_WINSHIFT)) { tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; + } /* Initialize cubic last max to be same as ssthresh */ tp->t_ccstate->cub_last_max = tp->snd_ssthresh; } /* - * Compute the target congestion window for the next RTT according to + * Compute the target congestion window for the next RTT according to * cubic equation when an ack is received. * * W(t) = C(t-K)^3 + W(last_max) @@ -158,20 +162,21 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) u_int32_t elapsed_time, win; win = min(tp->snd_cwnd, tp->snd_wnd); - if (tp->t_ccstate->cub_last_max == 0) + if (tp->t_ccstate->cub_last_max == 0) { tp->t_ccstate->cub_last_max = tp->snd_ssthresh; + } if (tp->t_ccstate->cub_epoch_start == 0) { /* * This is the beginning of a new epoch, initialize some of - * the variables that we need to use for computing the + * the variables that we need to use for computing the * congestion window later. */ tp->t_ccstate->cub_epoch_start = tcp_now; - if (tp->t_ccstate->cub_epoch_start == 0) + if (tp->t_ccstate->cub_epoch_start == 0) { tp->t_ccstate->cub_epoch_start = 1; + } if (win < tp->t_ccstate->cub_last_max) { - VERIFY(current_task() == kernel_task); /* @@ -184,32 +189,33 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) K = cbrtf(K); tp->t_ccstate->cub_epoch_period = K * TCP_RETRANSHZ; /* Origin point */ - tp->t_ccstate->cub_origin_point = - tp->t_ccstate->cub_last_max; + tp->t_ccstate->cub_origin_point = + tp->t_ccstate->cub_last_max; } else { tp->t_ccstate->cub_epoch_period = 0; tp->t_ccstate->cub_origin_point = win; } tp->t_ccstate->cub_target_win = 0; } - - VERIFY(tp->t_ccstate->cub_origin_point > 0); + + VERIFY(tp->t_ccstate->cub_origin_point > 0); /* * Compute the target window for the next RTT using smoothed RTT * as an estimate for next RTT. */ - elapsed_time = timer_diff(tcp_now, 0, - tp->t_ccstate->cub_epoch_start, 0); + elapsed_time = timer_diff(tcp_now, 0, + tp->t_ccstate->cub_epoch_start, 0); - if (tcp_cubic_use_minrtt) + if (tcp_cubic_use_minrtt) { elapsed_time += max(tcp_cubic_use_minrtt, rtt); - else + } else { elapsed_time += rtt; + } var = (elapsed_time - tp->t_ccstate->cub_epoch_period) / TCP_RETRANSHZ; var = var * var * var * (tcp_cubic_coeff * tp->t_maxseg); tp->t_ccstate->cub_target_win = (u_int32_t)(tp->t_ccstate->cub_origin_point + var); - return (tp->t_ccstate->cub_target_win); + return tp->t_ccstate->cub_target_win; } /* @@ -222,7 +228,7 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) * link because of the steady-state behavior. Using average and mean * absolute deviation of W(lastmax), we try to detect if the congestion * window is close to the bottleneck bandwidth. In that case, disabling - * TCP mode will help to minimize packet loss at this link. + * TCP mode will help to minimize packet loss at this link. * * Disable TCP mode if the W(lastmax) (the window where previous packet * loss happened) is within a small range from the average last max @@ -233,11 +239,11 @@ tcp_cubic_update(struct tcpcb *tp, u_int32_t rtt) (_tp_)->t_ccstate->cub_mean_dev > (tp->t_maxseg << 1)) ? 1 : 0) /* - * Compute the window growth if standard TCP (AIMD) was used with + * Compute the window growth if standard TCP (AIMD) was used with * a backoff of 0.5 and additive increase of 1 packet per RTT. - * + * * TCP window at time t can be calculated using the following equation - * with beta as 0.8 + * with beta as 0.8 * * W(t) <- Wmax * beta + 3 * ((1 - beta)/(1 + beta)) * t/RTT * @@ -257,8 +263,8 @@ tcp_cubic_tcpwin(struct tcpcb *tp, struct tcphdr *th) tp->t_ccstate->cub_tcp_win; tp->t_ccstate->cub_tcp_win += tp->t_maxseg; } - } - return (tp->t_ccstate->cub_tcp_win); + } + return tp->t_ccstate->cub_tcp_win; } /* @@ -270,8 +276,9 @@ tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) u_int32_t cubic_target_win, tcp_win, rtt; /* Do not increase congestion window in non-validated phase */ - if (tcp_cc_is_cwnd_nonvalidated(tp) != 0) + if (tcp_cc_is_cwnd_nonvalidated(tp) != 0) { return; + } tp->t_bytes_acked += BYTES_ACKED(th, tp); @@ -299,8 +306,8 @@ tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) /* * The target win is computed for the next RTT. * To reach this value, cwnd will have to be updated - * one segment at a time. Compute how many bytes - * need to be acknowledged before we can increase + * one segment at a time. Compute how many bytes + * need to be acknowledged before we can increase * the cwnd by one segment. */ u_int64_t incr_win; @@ -309,7 +316,7 @@ tcp_cubic_congestion_avd(struct tcpcb *tp, struct tcphdr *th) if (incr_win > 0 && tp->t_bytes_acked >= incr_win) { tp->t_bytes_acked -= incr_win; - tp->snd_cwnd = + tp->snd_cwnd = min((tp->snd_cwnd + tp->t_maxseg), TCP_MAXWIN << tp->snd_scale); } @@ -321,8 +328,9 @@ static void tcp_cubic_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { /* Do not increase the congestion window in non-validated phase */ - if (tcp_cc_is_cwnd_nonvalidated(tp) != 0) + if (tcp_cc_is_cwnd_nonvalidated(tp) != 0) { return; + } if (tp->snd_cwnd >= tp->snd_ssthresh) { /* Congestion avoidance phase */ @@ -335,14 +343,14 @@ tcp_cubic_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) uint32_t acked, abc_lim, incr; acked = BYTES_ACKED(th, tp); - abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? - 2 * tp->t_maxseg : tp->t_maxseg; + abc_lim = (tcp_do_rfc3465_lim2 && + tp->snd_nxt == tp->snd_max) ? + 2 * tp->t_maxseg : tp->t_maxseg; incr = min(acked, abc_lim); tp->snd_cwnd += incr; - tp->snd_cwnd = min(tp->snd_cwnd, - TCP_MAXWIN << tp->snd_scale); + tp->snd_cwnd = min(tp->snd_cwnd, + TCP_MAXWIN << tp->snd_scale); } } @@ -368,18 +376,19 @@ tcp_cubic_pre_fr(struct tcpcb *tp) * cub_last_max. * * If the congestion window is less than the last max window when - * loss occurred, it indicates that capacity available in the + * loss occurred, it indicates that capacity available in the * network has gone down. This can happen if a new flow has started * and it is capturing some of the bandwidth. To reach convergence - * quickly, backoff a little more. Disable fast convergence to + * quickly, backoff a little more. Disable fast convergence to * disable this behavior. */ if (win < tp->t_ccstate->cub_last_max && - tcp_cubic_fast_convergence == 1) + tcp_cubic_fast_convergence == 1) { tp->t_ccstate->cub_last_max = (u_int32_t)(win * - tcp_cubic_fast_convergence_factor); - else + tcp_cubic_fast_convergence_factor); + } else { tp->t_ccstate->cub_last_max = win; + } if (tp->t_ccstate->cub_last_max == 0) { /* @@ -404,15 +413,16 @@ tcp_cubic_pre_fr(struct tcpcb *tp) avg = tp->t_ccstate->cub_avg_lastmax; avg = (avg << 6) - avg; tp->t_ccstate->cub_avg_lastmax = - (avg + tp->t_ccstate->cub_last_max) >> 6; + (avg + tp->t_ccstate->cub_last_max) >> 6; } /* caluclate deviation from average */ dev = tp->t_ccstate->cub_avg_lastmax - tp->t_ccstate->cub_last_max; /* Take the absolute value */ - if (dev < 0) + if (dev < 0) { dev = -dev; + } if (tp->t_ccstate->cub_mean_dev == 0) { tp->t_ccstate->cub_mean_dev = dev; @@ -425,8 +435,9 @@ tcp_cubic_pre_fr(struct tcpcb *tp) /* Backoff congestion window by tcp_cubic_backoff factor */ win = (u_int32_t)(win - (win * tcp_cubic_backoff)); win = (win / tp->t_maxseg); - if (win < 2) + if (win < 2) { win = 2; + } tp->snd_ssthresh = win * tp->t_maxseg; tcp_cc_resize_sndbuf(tp); } @@ -436,8 +447,9 @@ tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th) { uint32_t flight_size = 0; - if (SEQ_LEQ(th->th_ack, tp->snd_max)) + if (SEQ_LEQ(th->th_ack, tp->snd_max)) { flight_size = tp->snd_max - th->th_ack; + } if (SACK_ENABLED(tp) && tp->t_lossflightsize > 0) { u_int32_t total_rxt_size = 0, ncwnd; @@ -463,22 +475,23 @@ tcp_cubic_post_fr(struct tcpcb *tp, struct tcphdr *th) * Complete ack. The current window was inflated for fast recovery. * It has to be deflated post recovery. * - * Window inflation should have left us with approx snd_ssthresh + * Window inflation should have left us with approx snd_ssthresh * outstanding data. If the flight size is zero or one segment, * make congestion window to be at least as big as 2 segments to * avoid delayed acknowledgements. This is according to RFC 6582. */ - if (flight_size < tp->snd_ssthresh) - tp->snd_cwnd = max(flight_size, tp->t_maxseg) - + tp->t_maxseg; - else + if (flight_size < tp->snd_ssthresh) { + tp->snd_cwnd = max(flight_size, tp->t_maxseg) + + tp->t_maxseg; + } else { tp->snd_cwnd = tp->snd_ssthresh; + } tp->t_ccstate->cub_tcp_win = 0; tp->t_ccstate->cub_target_win = 0; tp->t_ccstate->cub_tcp_bytes_acked = 0; } -static void +static void tcp_cubic_after_timeout(struct tcpcb *tp) { VERIFY(tp->t_ccstate != NULL); @@ -489,8 +502,9 @@ tcp_cubic_after_timeout(struct tcpcb *tp) * needed to adjust the window. */ if (tp->t_state < TCPS_ESTABLISHED && - ((int)(tp->snd_max - tp->snd_una) <= 1)) + ((int)(tp->snd_max - tp->snd_una) <= 1)) { return; + } if (!IN_FASTRECOVERY(tp)) { tcp_cubic_clear_state(tp); @@ -507,11 +521,11 @@ tcp_cubic_after_timeout(struct tcpcb *tp) static int tcp_cubic_delay_ack(struct tcpcb *tp, struct tcphdr *th) { - return (tcp_cc_delay_ack(tp, th)); + return tcp_cc_delay_ack(tp, th); } /* - * When switching from a different CC it is better for Cubic to start + * When switching from a different CC it is better for Cubic to start * fresh. The state required for Cubic calculation might be stale and it * might not represent the current state of the network. If it starts as * a new connection it will probe and learn the existing network conditions. @@ -525,7 +539,8 @@ tcp_cubic_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) OSIncrementAtomic((volatile SInt32 *)&tcp_cc_cubic.num_sockets); } -static inline void tcp_cubic_clear_state(struct tcpcb *tp) +static inline void +tcp_cubic_clear_state(struct tcpcb *tp) { tp->t_ccstate->cub_last_max = 0; tp->t_ccstate->cub_epoch_start = 0; diff --git a/bsd/netinet/tcp_debug.c b/bsd/netinet/tcp_debug.c index 8ba9eb6af..68a9a4807 100644 --- a/bsd/netinet/tcp_debug.c +++ b/bsd/netinet/tcp_debug.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -70,8 +70,8 @@ /* load symbolic names */ #define PRUREQUESTS #define TCPSTATES -#define TCPTIMERS -#define TANAMES +#define TCPTIMERS +#define TANAMES #endif #include @@ -95,24 +95,24 @@ #include #if TCPDEBUG -__private_extern__ int tcpconsdebug = 0; +__private_extern__ int tcpconsdebug = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcpconsdebug, CTLFLAG_RW | CTLFLAG_LOCKED, &tcpconsdebug, 0, "Turn tcp debugging on or off"); #endif static struct tcp_debug tcp_debug[TCP_NDEBUG]; -static int tcp_debx; +static int tcp_debx; /* * Tcp debug routines */ void tcp_trace(act, ostate, tp, ipgen, th, req) - short act, ostate; - struct tcpcb *tp; - void *ipgen; - struct tcphdr *th; - int req; +short act, ostate; +struct tcpcb *tp; +void *ipgen; +struct tcphdr *th; +int req; { #if INET6 int isipv6; @@ -126,38 +126,40 @@ tcp_trace(act, ostate, tp, ipgen, th, req) #endif /* INET6 */ td->td_family = #if INET6 - (isipv6 != 0) ? AF_INET6 : + (isipv6 != 0) ? AF_INET6 : #endif - AF_INET; - if (tcp_debx == TCP_NDEBUG) + AF_INET; + if (tcp_debx == TCP_NDEBUG) { tcp_debx = 0; + } td->td_time = iptime(); td->td_act = act; td->td_ostate = ostate; td->td_tcb = (caddr_t)tp; - if (tp) + if (tp) { td->td_cb = *tp; - else - bzero((caddr_t)&td->td_cb, sizeof (*tp)); + } else { + bzero((caddr_t)&td->td_cb, sizeof(*tp)); + } if (ipgen) { switch (td->td_family) { case AF_INET: bcopy((caddr_t)ipgen, (caddr_t)&td->td_ti.ti_i, - sizeof(td->td_ti.ti_i)); + sizeof(td->td_ti.ti_i)); bzero((caddr_t)td->td_ip6buf, sizeof(td->td_ip6buf)); break; #if INET6 case AF_INET6: bcopy((caddr_t)ipgen, (caddr_t)td->td_ip6buf, - sizeof(td->td_ip6buf)); + sizeof(td->td_ip6buf)); bzero((caddr_t)&td->td_ti.ti_i, - sizeof(td->td_ti.ti_i)); + sizeof(td->td_ti.ti_i)); break; #endif default: bzero((caddr_t)td->td_ip6buf, sizeof(td->td_ip6buf)); bzero((caddr_t)&td->td_ti.ti_i, - sizeof(td->td_ti.ti_i)); + sizeof(td->td_ti.ti_i)); break; } } else { @@ -174,12 +176,12 @@ tcp_trace(act, ostate, tp, ipgen, th, req) case AF_INET6: td->td_ti6.th = *th; bzero((caddr_t)&td->td_ti.ti_t, - sizeof(td->td_ti.ti_t)); + sizeof(td->td_ti.ti_t)); break; #endif default: bzero((caddr_t)&td->td_ti.ti_t, - sizeof(td->td_ti.ti_t)); + sizeof(td->td_ti.ti_t)); bzero((caddr_t)&td->td_ti6.th, sizeof(td->td_ti6.th)); break; } @@ -189,47 +191,51 @@ tcp_trace(act, ostate, tp, ipgen, th, req) } td->td_req = req; #if TCPDEBUG - if (tcpconsdebug == 0) + if (tcpconsdebug == 0) { return; - if (tp) + } + if (tp) { printf("%x %s:", tp, tcpstates[ostate]); - else + } else { printf("???????? "); + } printf("%s ", tanames[act]); switch (act) { - case TA_INPUT: case TA_OUTPUT: case TA_DROP: - if (ipgen == NULL || th == NULL) + if (ipgen == NULL || th == NULL) { break; + } seq = th->th_seq; ack = th->th_ack; len = #if INET6 - isipv6 ? ((struct ip6_hdr *)ipgen)->ip6_plen : + isipv6 ? ((struct ip6_hdr *)ipgen)->ip6_plen : #endif - ((struct ip *)ipgen)->ip_len; + ((struct ip *)ipgen)->ip_len; if (act == TA_OUTPUT) { seq = ntohl(seq); ack = ntohl(ack); len = ntohs((u_short)len); } - if (act == TA_OUTPUT) - len -= sizeof (struct tcphdr); - if (len) - printf("[%x..%x)", seq, seq+len); - else + if (act == TA_OUTPUT) { + len -= sizeof(struct tcphdr); + } + if (len) { + printf("[%x..%x)", seq, seq + len); + } else { printf("%x", seq); + } printf("@%x, urp=%x", ack, th->th_urp); flags = th->th_flags; if (flags) { char *cp = "<"; -#define pf(f) { \ - if (th->th_flags & TH_##f) { \ - printf("%s%s", cp, #f); \ - cp = ","; \ - } \ +#define pf(f) { \ + if (th->th_flags & TH_##f) { \ + printf("%s%s", cp, #f); \ + cp = ","; \ + } \ } pf(SYN); pf(ACK); pf(FIN); pf(RST); pf(PUSH); pf(URG); printf(">"); @@ -237,21 +243,24 @@ tcp_trace(act, ostate, tp, ipgen, th, req) break; case TA_USER: - printf("%s", prurequests[req&0xff]); - if ((req & 0xff) == PRU_SLOWTIMO) - printf("<%s>", tcptimers[req>>8]); + printf("%s", prurequests[req & 0xff]); + if ((req & 0xff) == PRU_SLOWTIMO) { + printf("<%s>", tcptimers[req >> 8]); + } break; } - if (tp) + if (tp) { printf(" -> %s", tcpstates[tp->t_state]); + } /* print out internal state of tp !?! */ printf("\n"); - if (tp == 0) + if (tp == 0) { return; + } printf( - "\trcv_(nxt,wnd,up) (%lx,%lx,%lx) snd_(una,nxt,max) (%lx,%lx,%lx)\n", - (uint32_t)tp->rcv_nxt, tp->rcv_wnd, (uint32_t)tp->rcv_up, - (uint32_t)tp->snd_una, (uint32_t)tp->snd_nxt, (uint32_t)tp->snd_max); + "\trcv_(nxt,wnd,up) (%lx,%lx,%lx) snd_(una,nxt,max) (%lx,%lx,%lx)\n", + (uint32_t)tp->rcv_nxt, tp->rcv_wnd, (uint32_t)tp->rcv_up, + (uint32_t)tp->snd_una, (uint32_t)tp->snd_nxt, (uint32_t)tp->snd_max); printf("\tsnd_(wl1,wl2,wnd) (%lx,%lx,%lx)\n", (uint32_t)tp->snd_wl1, (uint32_t)tp->snd_wl2, tp->snd_wnd); #endif /* TCPDEBUG */ diff --git a/bsd/netinet/tcp_debug.h b/bsd/netinet/tcp_debug.h index 89dc86651..4a850d5c3 100644 --- a/bsd/netinet/tcp_debug.h +++ b/bsd/netinet/tcp_debug.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -62,46 +62,46 @@ */ #ifndef _NETINET_TCP_DEBUG_H_ -#define _NETINET_TCP_DEBUG_H_ +#define _NETINET_TCP_DEBUG_H_ #include #ifdef PRIVATE -struct tcp_debug { +struct tcp_debug { u_int32_t td_time; - short td_act; - short td_ostate; - caddr_t td_tcb; - int td_family; + short td_act; + short td_ostate; + caddr_t td_tcb; + int td_family; /* * Co-existense of td_ti and td_ti6 below is ugly, but it is necessary * to achieve backword compatibility to some extent. */ - struct tcpiphdr td_ti; + struct tcpiphdr td_ti; struct { #if !defined(KERNEL) && defined(INET6) - struct ip6_hdr ip6; + struct ip6_hdr ip6; #else - u_char ip6buf[40]; /* sizeof(struct ip6_hdr) */ + u_char ip6buf[40]; /* sizeof(struct ip6_hdr) */ #endif - struct tcphdr th; + struct tcphdr th; } td_ti6; -#define td_ip6buf td_ti6.ip6buf - short td_req; - struct tcpcb td_cb; +#define td_ip6buf td_ti6.ip6buf + short td_req; + struct tcpcb td_cb; }; -#define TA_INPUT 0 -#define TA_OUTPUT 1 -#define TA_USER 2 -#define TA_RESPOND 3 -#define TA_DROP 4 +#define TA_INPUT 0 +#define TA_OUTPUT 1 +#define TA_USER 2 +#define TA_RESPOND 3 +#define TA_DROP 4 #ifdef TANAMES -static char *tanames[] = - { "input", "output", "user", "respond", "drop" }; +static char *tanames[] = +{ "input", "output", "user", "respond", "drop" }; #endif -#define TCP_NDEBUG 100 +#define TCP_NDEBUG 100 #endif /* PRIVATE */ diff --git a/bsd/netinet/tcp_fsm.h b/bsd/netinet/tcp_fsm.h index 2a46e6ca5..dab262972 100644 --- a/bsd/netinet/tcp_fsm.h +++ b/bsd/netinet/tcp_fsm.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -70,80 +70,80 @@ * Per RFC793, September, 1981. */ -#define TCP_NSTATES 11 +#define TCP_NSTATES 11 -#define TCPS_CLOSED 0 /* closed */ -#define TCPS_LISTEN 1 /* listening for connection */ -#define TCPS_SYN_SENT 2 /* active, have sent syn */ -#define TCPS_SYN_RECEIVED 3 /* have send and received syn */ +#define TCPS_CLOSED 0 /* closed */ +#define TCPS_LISTEN 1 /* listening for connection */ +#define TCPS_SYN_SENT 2 /* active, have sent syn */ +#define TCPS_SYN_RECEIVED 3 /* have send and received syn */ /* states < TCPS_ESTABLISHED are those where connections not established */ -#define TCPS_ESTABLISHED 4 /* established */ -#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ +#define TCPS_ESTABLISHED 4 /* established */ +#define TCPS_CLOSE_WAIT 5 /* rcvd fin, waiting for close */ /* states > TCPS_CLOSE_WAIT are those where user has closed */ -#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ -#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ -#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ +#define TCPS_FIN_WAIT_1 6 /* have closed, sent fin */ +#define TCPS_CLOSING 7 /* closed xchd FIN; await FIN ACK */ +#define TCPS_LAST_ACK 8 /* had fin and close; await FIN ACK */ /* states > TCPS_CLOSE_WAIT && < TCPS_FIN_WAIT_2 await ACK of FIN */ -#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ -#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ +#define TCPS_FIN_WAIT_2 9 /* have closed, fin is acked */ +#define TCPS_TIME_WAIT 10 /* in 2*msl quiet wait after close */ /* for KAME src sync over BSD*'s */ -#define TCP6_NSTATES TCP_NSTATES -#define TCP6S_CLOSED TCPS_CLOSED -#define TCP6S_LISTEN TCPS_LISTEN -#define TCP6S_SYN_SENT TCPS_SYN_SENT -#define TCP6S_SYN_RECEIVED TCPS_SYN_RECEIVED -#define TCP6S_ESTABLISHED TCPS_ESTABLISHED -#define TCP6S_CLOSE_WAIT TCPS_CLOSE_WAIT -#define TCP6S_FIN_WAIT_1 TCPS_FIN_WAIT_1 -#define TCP6S_CLOSING TCPS_CLOSING -#define TCP6S_LAST_ACK TCPS_LAST_ACK -#define TCP6S_FIN_WAIT_2 TCPS_FIN_WAIT_2 -#define TCP6S_TIME_WAIT TCPS_TIME_WAIT +#define TCP6_NSTATES TCP_NSTATES +#define TCP6S_CLOSED TCPS_CLOSED +#define TCP6S_LISTEN TCPS_LISTEN +#define TCP6S_SYN_SENT TCPS_SYN_SENT +#define TCP6S_SYN_RECEIVED TCPS_SYN_RECEIVED +#define TCP6S_ESTABLISHED TCPS_ESTABLISHED +#define TCP6S_CLOSE_WAIT TCPS_CLOSE_WAIT +#define TCP6S_FIN_WAIT_1 TCPS_FIN_WAIT_1 +#define TCP6S_CLOSING TCPS_CLOSING +#define TCP6S_LAST_ACK TCPS_LAST_ACK +#define TCP6S_FIN_WAIT_2 TCPS_FIN_WAIT_2 +#define TCP6S_TIME_WAIT TCPS_TIME_WAIT -#define TCPS_HAVERCVDSYN(s) ((s) >= TCPS_SYN_RECEIVED) -#define TCPS_HAVEESTABLISHED(s) ((s) >= TCPS_ESTABLISHED) -#define TCPS_HAVERCVDFIN(s) ((s) >= TCPS_TIME_WAIT) -#define TCPS_HAVERCVDFIN2(s) ((s) == TCPS_CLOSE_WAIT || \ - (s) == TCPS_CLOSING || \ - (s) == TCPS_LAST_ACK || \ - (s) == TCPS_TIME_WAIT) +#define TCPS_HAVERCVDSYN(s) ((s) >= TCPS_SYN_RECEIVED) +#define TCPS_HAVEESTABLISHED(s) ((s) >= TCPS_ESTABLISHED) +#define TCPS_HAVERCVDFIN(s) ((s) >= TCPS_TIME_WAIT) +#define TCPS_HAVERCVDFIN2(s) ((s) == TCPS_CLOSE_WAIT || \ + (s) == TCPS_CLOSING || \ + (s) == TCPS_LAST_ACK || \ + (s) == TCPS_TIME_WAIT) #ifdef KERNEL_PRIVATE -#ifdef TCPOUTFLAGS +#ifdef TCPOUTFLAGS /* * Flags used when sending segments in tcp_output. * Basic flags (TH_RST,TH_ACK,TH_SYN,TH_FIN) are totally * determined by state, with the proviso that TH_FIN is sent only * if all data queued for output is included in the segment. */ -static u_char tcp_outflags[TCP_NSTATES] = { - TH_RST|TH_ACK, /* 0, CLOSED */ - 0, /* 1, LISTEN */ - TH_SYN, /* 2, SYN_SENT */ - TH_SYN|TH_ACK, /* 3, SYN_RECEIVED */ - TH_ACK, /* 4, ESTABLISHED */ - TH_ACK, /* 5, CLOSE_WAIT */ - TH_FIN|TH_ACK, /* 6, FIN_WAIT_1 */ - TH_FIN|TH_ACK, /* 7, CLOSING */ - TH_FIN|TH_ACK, /* 8, LAST_ACK */ - TH_ACK, /* 9, FIN_WAIT_2 */ - TH_ACK, /* 10, TIME_WAIT */ -}; +static u_char tcp_outflags[TCP_NSTATES] = { + TH_RST | TH_ACK, /* 0, CLOSED */ + 0, /* 1, LISTEN */ + TH_SYN, /* 2, SYN_SENT */ + TH_SYN | TH_ACK, /* 3, SYN_RECEIVED */ + TH_ACK, /* 4, ESTABLISHED */ + TH_ACK, /* 5, CLOSE_WAIT */ + TH_FIN | TH_ACK, /* 6, FIN_WAIT_1 */ + TH_FIN | TH_ACK, /* 7, CLOSING */ + TH_FIN | TH_ACK, /* 8, LAST_ACK */ + TH_ACK, /* 9, FIN_WAIT_2 */ + TH_ACK, /* 10, TIME_WAIT */ +}; #endif #endif /* KERNEL_PRIVATE */ #if KPROF #ifdef KERNEL_PRIVATE -int tcp_acounts[TCP_NSTATES][PRU_NREQ]; +int tcp_acounts[TCP_NSTATES][PRU_NREQ]; #endif /* KERNEL_PRIVATE */ #endif -#ifdef TCPSTATES +#ifdef TCPSTATES char *tcpstates[] = { - "CLOSED", "LISTEN", "SYN_SENT", "SYN_RCVD", - "ESTABLISHED", "CLOSE_WAIT", "FIN_WAIT_1", "CLOSING", - "LAST_ACK", "FIN_WAIT_2", "TIME_WAIT" + "CLOSED", "LISTEN", "SYN_SENT", "SYN_RCVD", + "ESTABLISHED", "CLOSE_WAIT", "FIN_WAIT_1", "CLOSING", + "LAST_ACK", "FIN_WAIT_2", "TIME_WAIT" }; #endif diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c index c18f014e1..8990100b8 100644 --- a/bsd/netinet/tcp_input.c +++ b/bsd/netinet/tcp_input.c @@ -73,7 +73,7 @@ #include #include #include -#include /* for proc0 declaration */ +#include /* for proc0 declaration */ #include #include #include @@ -83,7 +83,7 @@ #include #endif #include -#include /* before tcp_seq.h, for tcp_random18() */ +#include /* before tcp_seq.h, for tcp_random18() */ #include @@ -98,7 +98,7 @@ #include #include /* for ICMP_BANDLIM */ #include -#include /* for ICMP_BANDLIM */ +#include /* for ICMP_BANDLIM */ #include #include #include @@ -150,16 +150,16 @@ struct tcphdr tcp_savetcp; #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETTCP, 0) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETTCP, 2) #define DBG_FNC_TCP_INPUT NETDBG_CODE(DBG_NETTCP, (3 << 8)) #define DBG_FNC_TCP_NEWCONN NETDBG_CODE(DBG_NETTCP, (7 << 8)) -#define TCP_RTT_HISTORY_EXPIRE_TIME (60 * TCP_RETRANSHZ) -#define TCP_RECV_THROTTLE_WIN (5 * TCP_RETRANSHZ) -#define TCP_STRETCHACK_ENABLE_PKTCNT 2000 +#define TCP_RTT_HISTORY_EXPIRE_TIME (60 * TCP_RETRANSHZ) +#define TCP_RECV_THROTTLE_WIN (5 * TCP_RETRANSHZ) +#define TCP_STRETCHACK_ENABLE_PKTCNT 2000 -struct tcpstat tcpstat; +struct tcpstat tcpstat; static int log_in_vain = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, @@ -188,7 +188,7 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, drop_synfin, "Drop TCP packets with SYN+FIN set"); #endif -SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW|CTLFLAG_LOCKED, 0, +SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "TCP Segment Reassembly Queue"); static int tcp_reass_overflows = 0; @@ -198,8 +198,8 @@ SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, SYSCTL_SKMEM_TCP_INT(OID_AUTO, slowlink_wsize, CTLFLAG_RW | CTLFLAG_LOCKED, - __private_extern__ int, slowlink_wsize, 8192, - "Maximum advertised window size for slowlink"); + __private_extern__ int, slowlink_wsize, 8192, + "Maximum advertised window size for slowlink"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, maxseg_unacked, CTLFLAG_RW | CTLFLAG_LOCKED, int, maxseg_unacked, 8, @@ -241,9 +241,9 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, autorcvbufmax, int sw_lro = 1; #else int sw_lro = 0; -#endif /* !CONFIG_EMBEDDED */ +#endif /* !CONFIG_EMBEDDED */ SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_LOCKED, - &sw_lro, 0, "Used to coalesce TCP packets"); + &sw_lro, 0, "Used to coalesce TCP packets"); int lrodebug = 0; SYSCTL_INT(_net_inet_tcp, OID_AUTO, lrodbg, @@ -298,11 +298,11 @@ extern int tcp_acc_iaj_react_limit; int tcprexmtthresh = 3; u_int32_t tcp_now; -struct timeval tcp_uptime; /* uptime when tcp_now was last updated */ -lck_spin_t *tcp_uptime_lock; /* Used to sychronize updates to tcp_now */ +struct timeval tcp_uptime; /* uptime when tcp_now was last updated */ +lck_spin_t *tcp_uptime_lock; /* Used to sychronize updates to tcp_now */ struct inpcbhead tcb; -#define tcb6 tcb /* for KAME src sync over BSD*'s */ +#define tcb6 tcb /* for KAME src sync over BSD*'s */ struct inpcbinfo tcbinfo; static void tcp_dooptions(struct tcpcb *, u_char *, int, struct tcphdr *, @@ -357,19 +357,19 @@ static void tcp_bad_rexmt_check(struct tcpcb *tp, struct tcphdr *th, #define TCP_EARLY_REXMT_WIN (60 * TCP_RETRANSHZ) /* 60 seconds */ #define TCP_EARLY_REXMT_LIMIT 10 -extern void ipfwsyslog( int level, const char *format,...); +extern void ipfwsyslog( int level, const char *format, ...); extern int fw_verbose; #if IPFIREWALL extern void ipfw_stealth_stats_incr_tcp(void); #define log_in_vain_log( a ) { \ - if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \ - ipfwsyslog a ; \ - } else if ( (log_in_vain == 4 ) && (fw_verbose == 2)) { \ - ipfw_stealth_stats_incr_tcp(); \ - } \ - else log a ; \ + if ( (log_in_vain == 3 ) && (fw_verbose == 2)) { /* Apple logging, log to ipfw.log */ \ + ipfwsyslog a ; \ + } else if ( (log_in_vain == 4 ) && (fw_verbose == 2)) { \ + ipfw_stealth_stats_incr_tcp(); \ + } \ + else log a ; \ } #else #define log_in_vain_log( a ) { log a; } @@ -378,7 +378,7 @@ extern void ipfw_stealth_stats_incr_tcp(void); int tcp_rcvunackwin = TCPTV_UNACKWIN; int tcp_maxrcvidle = TCPTV_MAXRCVIDLE; SYSCTL_SKMEM_TCP_INT(OID_AUTO, rcvsspktcnt, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_rcvsspktcnt, TCP_RCV_SS_PKTCOUNT, "packets to be seen before receiver stretches acks"); + int, tcp_rcvsspktcnt, TCP_RCV_SS_PKTCOUNT, "packets to be seen before receiver stretches acks"); #define DELAY_ACK(tp, th) \ (CC_ALGO(tp)->delay_ack != NULL && CC_ALGO(tp)->delay_ack(tp, th)) @@ -402,8 +402,9 @@ reset_acc_iaj(struct tcpcb *tp) static inline void update_iaj_state(struct tcpcb *tp, uint32_t size, int rst_size) { - if (rst_size > 0) + if (rst_size > 0) { tp->iaj_size = 0; + } if (tp->iaj_size == 0 || size >= tp->iaj_size) { tp->iaj_size = size; tp->iaj_rcv_ts = tcp_now; @@ -419,8 +420,8 @@ static inline int isqrt(unsigned int val) { unsigned int sqrt_cache[11] = {0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100}; - unsigned int temp, g=0, b=0x8000, bshft=15; - if ( val <= 100) { + unsigned int temp, g = 0, b = 0x8000, bshft = 15; + if (val <= 100) { for (g = 0; g <= 10; ++g) { if (sqrt_cache[g] > val) { g--; @@ -437,20 +438,20 @@ isqrt(unsigned int val) val -= temp; } b >>= 1; - } while ( b > 0 && val > 0); + } while (b > 0 && val > 0); } - return(g); + return g; } /* -* With LRO, roughly estimate the inter arrival time between -* each sub coalesced packet as an average. Count the delay -* cur_iaj to be the delay between the last packet received -* and the first packet of the LRO stream. Due to round off errors -* cur_iaj may be the same as lro_delay_factor. Averaging has -* round off errors too. lro_delay_factor may be close to 0 -* in steady state leading to lower values fed to compute_iaj_meat. -*/ + * With LRO, roughly estimate the inter arrival time between + * each sub coalesced packet as an average. Count the delay + * cur_iaj to be the delay between the last packet received + * and the first packet of the LRO stream. Due to round off errors + * cur_iaj may be the same as lro_delay_factor. Averaging has + * round off errors too. lro_delay_factor may be close to 0 + * in steady state leading to lower values fed to compute_iaj_meat. + */ void compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor) { @@ -463,22 +464,23 @@ compute_iaj(struct tcpcb *tp, int nlropkts, int lro_delay_factor) compute_iaj_meat(tp, cur_iaj); - if (nlropkts <= 1) + if (nlropkts <= 1) { return; + } nlropkts--; - timediff = lro_delay_factor/nlropkts; + timediff = lro_delay_factor / nlropkts; - while (nlropkts > 0) - { + while (nlropkts > 0) { compute_iaj_meat(tp, timediff); nlropkts--; } } static -void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj) +void +compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj) { /* When accumulated IAJ reaches MAX_ACC_IAJ in milliseconds, * throttle the receive window to a minimum of MIN_IAJ_WIN packets @@ -499,8 +501,9 @@ void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj) * using standard deviation. */ allowed_iaj = tp->avg_iaj + tp->std_dev_iaj; - if (allowed_iaj < tcp_allowed_iaj) + if (allowed_iaj < tcp_allowed_iaj) { allowed_iaj = tcp_allowed_iaj; + } /* Initially when the connection starts, the senders congestion * window is small. During this period we avoid throttling a @@ -509,18 +512,19 @@ void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj) * the first few packets. */ if (tp->iaj_pktcnt > IAJ_IGNORE_PKTCNT) { - if ( cur_iaj <= allowed_iaj ) { - if (tp->acc_iaj >= 2) + if (cur_iaj <= allowed_iaj) { + if (tp->acc_iaj >= 2) { acc_iaj = tp->acc_iaj - 2; - else + } else { acc_iaj = 0; - + } } else { acc_iaj = tp->acc_iaj + (cur_iaj - allowed_iaj); } - if (acc_iaj > MAX_ACC_IAJ) + if (acc_iaj > MAX_ACC_IAJ) { acc_iaj = MAX_ACC_IAJ; + } tp->acc_iaj = acc_iaj; } @@ -532,20 +536,20 @@ void compute_iaj_meat(struct tcpcb *tp, uint32_t cur_iaj) * instead of round-down */ tp->avg_iaj = (((tp->avg_iaj << IAJ_DIV_SHIFT) - tp->avg_iaj) - + cur_iaj + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT; + + cur_iaj + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT; /* Compute Root-mean-square of deviation where mean is a weighted * average as described above. */ temp = tp->std_dev_iaj * tp->std_dev_iaj; mean = (((temp << IAJ_DIV_SHIFT) - temp) - + (cur_iaj_dev * cur_iaj_dev) - + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT; + + (cur_iaj_dev * cur_iaj_dev) + + IAJ_ROUNDUP_CONST) >> IAJ_DIV_SHIFT; tp->std_dev_iaj = isqrt(mean); DTRACE_TCP3(iaj, struct tcpcb *, tp, uint32_t, cur_iaj, - uint32_t, allowed_iaj); + uint32_t, allowed_iaj); return; } @@ -579,10 +583,11 @@ tcp_is_ack_ratelimited(struct tcpcb *tp) } /* Careful about wrap-around */ - if (ret == FALSE && (tp->t_challengeack_count + 1 > 0)) + if (ret == FALSE && (tp->t_challengeack_count + 1 > 0)) { tp->t_challengeack_count++; + } - return (ret); + return ret; } /* Check if enough amount of data has been acknowledged since @@ -594,8 +599,9 @@ tcp_bwmeas_check(struct tcpcb *tp) int32_t bw_meas_bytes; uint32_t bw, bytes, elapsed_time; - if (SEQ_LEQ(tp->snd_una, tp->t_bwmeas->bw_start)) + if (SEQ_LEQ(tp->snd_una, tp->t_bwmeas->bw_start)) { return; + } bw_meas_bytes = tp->snd_una - tp->t_bwmeas->bw_start; if ((tp->t_flagsext & TF_BWMEAS_INPROGRESS) && @@ -604,7 +610,7 @@ tcp_bwmeas_check(struct tcpcb *tp) elapsed_time = tcp_now - tp->t_bwmeas->bw_ts; if (elapsed_time > 0) { bw = bytes / elapsed_time; - if ( bw > 0) { + if (bw > 0) { if (tp->t_bwmeas->bw_sndbw > 0) { tp->t_bwmeas->bw_sndbw = (((tp->t_bwmeas->bw_sndbw << 3) @@ -653,8 +659,9 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, * Call with th==0 after become established to * force pre-ESTABLISHED data up to user socket. */ - if (th == NULL) + if (th == NULL) { goto present; + } /* * If the reassembly queue already has entries or if we are going @@ -664,8 +671,9 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, tcp_reset_stretch_ack(tp); #if TRAFFIC_MGT - if (tp->acc_iaj > 0) + if (tp->acc_iaj > 0) { reset_acc_iaj(tp); + } #endif /* TRAFFIC_MGT */ /* @@ -683,7 +691,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, tcpstat.tcps_rcvmemdrop++; m_freem(m); *tlenp = 0; - return (0); + return 0; } /* Allocate a new queue entry. If we can't, just drop the pkt. XXX */ @@ -691,7 +699,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, if (te == NULL) { tcpstat.tcps_rcvmemdrop++; m_freem(m); - return (0); + return 0; } tp->t_reassqlen++; @@ -699,8 +707,9 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, * Find a segment which begins after this one does. */ LIST_FOREACH(q, &tp->t_segq, tqe_q) { - if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) + if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) { break; + } p = q; } @@ -778,8 +787,9 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, */ while (q) { int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; - if (i <= 0) + if (i <= 0) { break; + } /* * Report only the first part of partial/non-contiguous @@ -834,22 +844,24 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, * can be copied off to sockbuf after in-order data * is copied off. */ - if (!(so->so_state & SS_CANTRCVMORE)) + if (!(so->so_state & SS_CANTRCVMORE)) { copy_oodata = 1; + } present: /* * Present data to user, advancing rcv_nxt through * completed sequence space. */ - if (!TCPS_HAVEESTABLISHED(tp->t_state)) - return (0); + if (!TCPS_HAVEESTABLISHED(tp->t_state)) { + return 0; + } q = LIST_FIRST(&tp->t_segq); if (!q || q->tqe_th->th_seq != tp->rcv_nxt) { /* Stop using LRO once out of order packets arrive */ if (tp->t_flagsext & TF_LRO_OFFLOADED) { tcp_lro_remove_state(inp->inp_laddr, inp->inp_faddr, - th->th_dport, th->th_sport); + th->th_dport, th->th_sport); tp->t_flagsext &= ~TF_LRO_OFFLOADED; } @@ -857,10 +869,11 @@ present: * continue processing if out-of-order data * can be delivered */ - if (q && (so->so_flags & SOF_ENABLE_MSGS)) + if (q && (so->so_flags & SOF_ENABLE_MSGS)) { goto msg_unordered_delivery; + } - return (0); + return 0; } /* @@ -868,8 +881,9 @@ present: * connection, it is better to let it finish the job -- * (radar 16316196) */ - if (tp->t_flagsext & TF_REASS_INPROG) - return (0); + if (tp->t_flagsext & TF_REASS_INPROG) { + return 0; + } tp->t_flagsext |= TF_REASS_INPROG; /* lost packet was recovered, so ooo data can be returned */ @@ -893,16 +907,18 @@ present: * the received data filled a gap, and * is now in order! */ - if (q == te) + if (q == te) { copy_oodata = 0; + } } if (sbappendstream_rcvdemux(so, q->tqe_m, - q->tqe_th->th_seq - (tp->irs + 1), 0)) + q->tqe_th->th_seq - (tp->irs + 1), 0)) { dowakeup = 1; + } if (tp->t_flagsext & TF_LRO_OFFLOADED) { tcp_update_lro_seq(tp->rcv_nxt, - inp->inp_laddr, inp->inp_faddr, - th->th_dport, th->th_sport); + inp->inp_laddr, inp->inp_faddr, + th->th_dport, th->th_sport); } } zfree(tcp_reass_zone, q); @@ -913,21 +929,19 @@ present: #if INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { - KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | - (inp->in6p_faddr.s6_addr16[0] & 0xffff)), - 0,0,0); - } - else + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->in6p_laddr.s6_addr16[0] & 0xffff) << 16) | + (inp->in6p_faddr.s6_addr16[0] & 0xffff)), + 0, 0, 0); + } else #endif { KERNEL_DEBUG(DBG_LAYER_BEG, - ((inp->inp_fport << 16) | inp->inp_lport), - (((inp->inp_laddr.s_addr & 0xffff) << 16) | - (inp->inp_faddr.s_addr & 0xffff)), - 0,0,0); + ((inp->inp_fport << 16) | inp->inp_lport), + (((inp->inp_laddr.s_addr & 0xffff) << 16) | + (inp->inp_faddr.s_addr & 0xffff)), + 0, 0, 0); } msg_unordered_delivery: @@ -940,7 +954,7 @@ msg_unordered_delivery: oodata = m_copym(te->tqe_m, 0, M_COPYALL, M_DONTWAIT); if (oodata != NULL) { if (sbappendmsgstream_rcv(&so->so_rcv, oodata, - te->tqe_th->th_seq - (tp->irs + 1), 1)) { + te->tqe_th->th_seq - (tp->irs + 1), 1)) { dowakeup = 1; tcpstat.tcps_msg_unopkts++; } else { @@ -949,9 +963,10 @@ msg_unordered_delivery: } } - if (dowakeup) + if (dowakeup) { sorwakeup(so); /* done with socket lock held */ - return (flags); + } + return flags; } /* @@ -960,20 +975,22 @@ msg_unordered_delivery: */ static void tcp_reduce_congestion_window( - struct tcpcb *tp) + struct tcpcb *tp) { /* * If the current tcp cc module has * defined a hook for tasks to run * before entering FR, call it */ - if (CC_ALGO(tp)->pre_fr != NULL) + if (CC_ALGO(tp)->pre_fr != NULL) { CC_ALGO(tp)->pre_fr(tp); + } ENTER_FASTRECOVERY(tp); - if (tp->t_flags & TF_SENTFIN) + if (tp->t_flags & TF_SENTFIN) { tp->snd_recover = tp->snd_max - 1; - else + } else { tp->snd_recover = tp->snd_max; + } tp->t_timer[TCPT_REXMT] = 0; tp->t_timer[TCPT_PTO] = 0; tp->t_rtttime = 0; @@ -1007,7 +1024,7 @@ tcp_adaptive_rwtimo_check(struct tcpcb *tp, int tlen) && tlen > 0 && tp->t_state == TCPS_ESTABLISHED) { tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - (TCP_REXMTVAL(tp) << 1)); + (TCP_REXMTVAL(tp) << 1)); tp->t_flagsext |= TF_DETECT_READSTALL; tp->t_rtimo_probes = 0; } @@ -1017,7 +1034,7 @@ inline void tcp_keepalive_reset(struct tcpcb *tp) { tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPIDLE(tp)); + TCP_CONN_KEEPIDLE(tp)); tp->t_flagsext &= ~(TF_DETECT_READSTALL); tp->t_rtimo_probes = 0; } @@ -1055,12 +1072,12 @@ tcp6_input(struct mbuf **mp, int *offp, int proto) IF_TCP_STATINC(ifp, icmp6unreach); - return (IPPROTO_DONE); + return IPPROTO_DONE; } } tcp_input(m, *offp); - return (IPPROTO_DONE); + return IPPROTO_DONE; } #endif @@ -1084,17 +1101,17 @@ tcp_cansbgrow(struct sockbuf *sb) u_int32_t sbspacelim = ((nmbclusters >> 4) << MCLSHIFT); if ((total_sbmb_cnt < mblim) && - (sb->sb_hiwat < sbspacelim)) { - return(1); + (sb->sb_hiwat < sbspacelim)) { + return 1; } else { OSIncrementAtomic64(&sbmb_limreached); } - return(0); + return 0; } static void tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv, - u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max) + u_int32_t newsize, u_int32_t idealsize, u_int32_t rcvbuf_max) { /* newsize should not exceed max */ newsize = min(newsize, rcvbuf_max); @@ -1107,7 +1124,7 @@ tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv, /* Set new socket buffer size */ if (newsize > sbrcv->sb_hiwat && - (sbreserve(sbrcv, newsize) == 1)) { + (sbreserve(sbrcv, newsize) == 1)) { sbrcv->sb_idealsize = min(max(sbrcv->sb_idealsize, (idealsize != 0) ? idealsize : newsize), rcvbuf_max); @@ -1115,7 +1132,7 @@ tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv, * window scale */ sbrcv->sb_idealsize = min(sbrcv->sb_idealsize, - TCP_MAXWIN << tp->rcv_scale); + TCP_MAXWIN << tp->rcv_scale); } } @@ -1126,7 +1143,7 @@ tcp_sbrcv_reserve(struct tcpcb *tp, struct sockbuf *sbrcv, */ static void tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, - struct tcpopt *to, u_int32_t pktlen, u_int32_t rcvbuf_max) + struct tcpopt *to, u_int32_t pktlen, u_int32_t rcvbuf_max) { struct socket *so = sbrcv->sb_so; @@ -1142,18 +1159,18 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, * recovery should not change the receive window */ if (tcp_do_autorcvbuf == 0 || - (sbrcv->sb_flags & SB_AUTOSIZE) == 0 || - tcp_cansbgrow(sbrcv) == 0 || - sbrcv->sb_hiwat >= rcvbuf_max || - (tp->t_flagsext & TF_RECV_THROTTLE) || - (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) || - !LIST_EMPTY(&tp->t_segq)) { + (sbrcv->sb_flags & SB_AUTOSIZE) == 0 || + tcp_cansbgrow(sbrcv) == 0 || + sbrcv->sb_hiwat >= rcvbuf_max || + (tp->t_flagsext & TF_RECV_THROTTLE) || + (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) || + !LIST_EMPTY(&tp->t_segq)) { /* Can not resize the socket buffer, just return */ goto out; } if (TSTMP_GT(tcp_now, - tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) { + tp->rfbuf_ts + TCPTV_RCVBUFIDLE)) { /* If there has been an idle period in the * connection, just restart the measurement */ @@ -1168,7 +1185,7 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, * the high water mark. */ if (TSTMP_GEQ(tcp_now, - tp->rfbuf_ts + TCPTV_RCVNOTS_QUANTUM)) { + tp->rfbuf_ts + TCPTV_RCVNOTS_QUANTUM)) { if (tp->rfbuf_cnt >= TCP_RCVNOTS_BYTELEVEL) { tcp_sbrcv_reserve(tp, sbrcv, tcp_autorcvbuf_max, 0, @@ -1191,7 +1208,7 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, */ if (TSTMP_GEQ(to->to_tsecr, tp->rfbuf_ts)) { if (tp->rfbuf_cnt > (sbrcv->sb_hiwat - - (sbrcv->sb_hiwat >> 1))) { + (sbrcv->sb_hiwat >> 1))) { int32_t rcvbuf_inc, min_incr; /* * Increment the receive window by a @@ -1213,8 +1230,9 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, * at least */ min_incr = tp->t_maxseg << tcp_autorcvbuf_inc_shift; - if (rcvbuf_inc < min_incr) + if (rcvbuf_inc < min_incr) { rcvbuf_inc = min_incr; + } rcvbuf_inc = (rcvbuf_inc / tp->t_maxseg) * tp->t_maxseg; @@ -1232,7 +1250,7 @@ tcp_sbrcv_grow(struct tcpcb *tp, struct sockbuf *sbrcv, tp->t_bwmeas->bw_rcvbw_max = rcv_bw; } else { tp->t_bwmeas->bw_rcvbw_max = max( - tp->t_bwmeas->bw_rcvbw_max, rcv_bw); + tp->t_bwmeas->bw_rcvbw_max, rcv_bw); } } goto out; @@ -1257,7 +1275,7 @@ void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv) { if (tcp_do_autorcvbuf == 1 && sbrcv->sb_idealsize > 0 && - sbrcv->sb_hiwat > sbrcv->sb_idealsize) { + sbrcv->sb_hiwat > sbrcv->sb_idealsize) { int32_t trim; /* compute the difference between ideal and current sizes */ u_int32_t diff = sbrcv->sb_hiwat - sbrcv->sb_idealsize; @@ -1275,16 +1293,18 @@ tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sbrcv) u_int32_t leave = max(advwin, (sbrcv->sb_idealsize >> 4)); /* Sometimes leave can be zero, in that case leave at least - * a few segments worth of space. + * a few segments worth of space. */ - if (leave == 0) + if (leave == 0) { leave = tp->t_maxseg << tcp_autorcvbuf_inc_shift; + } trim = sbrcv->sb_hiwat - (sbrcv->sb_cc + leave); trim = imin(trim, (int32_t)diff); - if (trim > 0) + if (trim > 0) { sbreserve(sbrcv, (sbrcv->sb_hiwat - trim)); + } } } @@ -1298,10 +1318,10 @@ void tcp_sbsnd_trim(struct sockbuf *sbsnd) { if (tcp_do_autosendbuf == 1 && - ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) == - (SB_AUTOSIZE | SB_TRIM)) && - (sbsnd->sb_idealsize > 0) && - (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) { + ((sbsnd->sb_flags & (SB_AUTOSIZE | SB_TRIM)) == + (SB_AUTOSIZE | SB_TRIM)) && + (sbsnd->sb_idealsize > 0) && + (sbsnd->sb_hiwat > sbsnd->sb_idealsize)) { u_int32_t trim = 0; if (sbsnd->sb_cc <= sbsnd->sb_idealsize) { trim = sbsnd->sb_hiwat - sbsnd->sb_idealsize; @@ -1310,8 +1330,9 @@ tcp_sbsnd_trim(struct sockbuf *sbsnd) } sbreserve(sbsnd, (sbsnd->sb_hiwat - trim)); } - if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize) + if (sbsnd->sb_hiwat <= sbsnd->sb_idealsize) { sbsnd->sb_flags &= ~(SB_TRIM); + } } /* @@ -1329,8 +1350,8 @@ tcp_sbrcv_tstmp_check(struct tcpcb *tp) struct sockbuf *sbrcv = &so->so_rcv; if ((tp->t_flags & (TF_REQ_TSTMP | TF_RCVD_TSTMP)) != - (TF_REQ_TSTMP | TF_RCVD_TSTMP) && - (sbrcv->sb_flags & SB_AUTOSIZE) != 0) { + (TF_REQ_TSTMP | TF_RCVD_TSTMP) && + (sbrcv->sb_flags & SB_AUTOSIZE) != 0) { tcp_sbrcv_reserve(tp, sbrcv, newsize, 0, newsize); } } @@ -1358,21 +1379,24 @@ static inline int tcp_stretch_ack_enable(struct tcpcb *tp, int thflags) { if (tp->rcv_by_unackwin >= (maxseg_unacked * tp->t_maxseg) && - TSTMP_GEQ(tp->rcv_unackwin, tcp_now)) + TSTMP_GEQ(tp->rcv_unackwin, tcp_now)) { tp->t_flags |= TF_STREAMING_ON; - else + } else { tp->t_flags &= ~TF_STREAMING_ON; + } /* If there has been an idle time, reset streaming detection */ - if (TSTMP_GT(tcp_now, tp->rcv_unackwin + tcp_maxrcvidle)) + if (TSTMP_GT(tcp_now, tp->rcv_unackwin + tcp_maxrcvidle)) { tp->t_flags &= ~TF_STREAMING_ON; + } /* * If there are flags other than TH_ACK set, reset streaming * detection */ - if (thflags & ~TH_ACK) + if (thflags & ~TH_ACK) { tp->t_flags &= ~TF_STREAMING_ON; + } if (tp->t_flagsext & TF_DISABLE_STRETCHACK) { if (tp->rcv_nostrack_pkts >= TCP_STRETCHACK_ENABLE_PKTCNT) { @@ -1384,14 +1408,14 @@ tcp_stretch_ack_enable(struct tcpcb *tp, int thflags) } } - if (!(tp->t_flagsext & (TF_NOSTRETCHACK|TF_DISABLE_STRETCHACK)) && + if (!(tp->t_flagsext & (TF_NOSTRETCHACK | TF_DISABLE_STRETCHACK)) && (tp->t_flags & TF_STREAMING_ON) && (!(tp->t_flagsext & TF_RCVUNACK_WAITSS) || (tp->rcv_waitforss >= tcp_rcvsspktcnt))) { - return(1); + return 1; } - return(0); + return 0; } /* @@ -1403,7 +1427,7 @@ tcp_stretch_ack_enable(struct tcpcb *tp, int thflags) void tcp_reset_stretch_ack(struct tcpcb *tp) { - tp->t_flags &= ~(TF_STRETCHACK|TF_STREAMING_ON); + tp->t_flags &= ~(TF_STRETCHACK | TF_STREAMING_ON); tp->rcv_by_unackwin = 0; tp->rcv_unackwin = tcp_now + tcp_rcvunackwin; @@ -1431,29 +1455,32 @@ tcp_reset_stretch_ack(struct tcpcb *tp) */ int tcp_detect_bad_rexmt(struct tcpcb *tp, struct tcphdr *th, - struct tcpopt *to, u_int32_t rxtime) + struct tcpopt *to, u_int32_t rxtime) { int32_t tdiff, bad_rexmt_win; bad_rexmt_win = (tp->t_srtt >> (TCP_RTT_SHIFT + 1)); /* If the ack has ECN CE bit, then cwnd has to be adjusted */ - if (TCP_ECN_ENABLED(tp) && (th->th_flags & TH_ECE)) - return (0); + if (TCP_ECN_ENABLED(tp) && (th->th_flags & TH_ECE)) { + return 0; + } if (TSTMP_SUPPORTED(tp)) { if (rxtime > 0 && (to->to_flags & TOF_TS) && to->to_tsecr != 0 - && TSTMP_LT(to->to_tsecr, rxtime)) - return (1); + && TSTMP_LT(to->to_tsecr, rxtime)) { + return 1; + } } else { if ((tp->t_rxtshift == 1 || (tp->t_flagsext & TF_SENT_TLPROBE)) && rxtime > 0) { tdiff = (int32_t)(tcp_now - rxtime); - if (tdiff < bad_rexmt_win) - return(1); + if (tdiff < bad_rexmt_win) { + return 1; + } } } - return(0); + return 0; } @@ -1476,16 +1503,17 @@ tcp_bad_rexmt_restore_state(struct tcpcb *tp, struct tcphdr *th) tp->snd_ssthresh = tp->snd_ssthresh_prev; /* Initialize cwnd to the initial window */ - if (CC_ALGO(tp)->cwnd_init != NULL) + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } tp->snd_cwnd = fsize + min(acked, tp->snd_cwnd); - } else { tp->snd_cwnd = tp->snd_cwnd_prev; tp->snd_ssthresh = tp->snd_ssthresh_prev; - if (tp->t_flags & TF_WASFRECOVERY) + if (tp->t_flags & TF_WASFRECOVERY) { ENTER_FASTRECOVERY(tp); + } /* Do not use the loss flight size in this case */ tp->t_lossflightsize = 0; @@ -1582,8 +1610,9 @@ out: tcp_pmtud_revert_segment_size(tp); } } - if (tp->t_pmtud_start_ts > 0) + if (tp->t_pmtud_start_ts > 0) { tp->t_pmtud_start_ts = 0; + } } /* @@ -1599,7 +1628,7 @@ out: * packet reordering. */ static void -tcp_early_rexmt_check (struct tcpcb *tp, struct tcphdr *th) +tcp_early_rexmt_check(struct tcpcb *tp, struct tcphdr *th) { u_int32_t obytes, snd_off; int32_t snd_len; @@ -1621,7 +1650,7 @@ tcp_early_rexmt_check (struct tcpcb *tp, struct tcphdr *th) if (SACK_ENABLED(tp) && !TAILQ_EMPTY(&tp->snd_holes)) { obytes = (tp->snd_max - tp->snd_fack) + - tp->sackhint.sack_bytes_rexmit; + tp->sackhint.sack_bytes_rexmit; } else { obytes = (tp->snd_max - tp->snd_una); } @@ -1642,8 +1671,9 @@ tcp_early_rexmt_check (struct tcpcb *tp, struct tcphdr *th) u_int32_t osegs; osegs = obytes / tp->t_maxseg; - if ((osegs * tp->t_maxseg) < obytes) + if ((osegs * tp->t_maxseg) < obytes) { osegs++; + } /* * Since the connection might have already @@ -1666,8 +1696,9 @@ tcp_early_rexmt_check (struct tcpcb *tp, struct tcphdr *th) tp->t_rexmtthresh = max(tp->t_rexmtthresh, tp->t_dupacks); - if (tp->t_early_rexmt_count == 0) + if (tp->t_early_rexmt_count == 0) { tp->t_early_rexmt_win = tcp_now; + } if (tp->t_flagsext & TF_SENT_TLPROBE) { tcpstat.tcps_tlp_recovery++; @@ -1701,15 +1732,16 @@ tcp_tfo_syn(struct tcpcb *tp, struct tcpopt *to) unsigned char len; if (!(to->to_flags & (TOF_TFO | TOF_TFOREQ)) || - !(tcp_fastopen & TCP_FASTOPEN_SERVER)) - return (FALSE); + !(tcp_fastopen & TCP_FASTOPEN_SERVER)) { + return FALSE; + } if ((to->to_flags & TOF_TFOREQ)) { tp->t_tfo_flags |= TFO_F_OFFER_COOKIE; tp->t_tfo_stats |= TFO_S_COOKIEREQ_RECV; tcpstat.tcps_tfo_cookie_req_rcv++; - return (FALSE); + return FALSE; } /* Ok, then it must be an offered cookie. We need to check that ... */ @@ -1723,13 +1755,13 @@ tcp_tfo_syn(struct tcpcb *tp, struct tcpopt *to) tp->t_tfo_stats |= TFO_S_COOKIE_INVALID; tcpstat.tcps_tfo_cookie_invalid++; - return (FALSE); + return FALSE; } if (OSIncrementAtomic(&tcp_tfo_halfcnt) >= tcp_tfo_backlog) { /* Need to decrement again as we just increased it... */ OSDecrementAtomic(&tcp_tfo_halfcnt); - return (FALSE); + return FALSE; } tp->t_tfo_flags |= TFO_F_COOKIE_VALID; @@ -1737,7 +1769,7 @@ tcp_tfo_syn(struct tcpcb *tp, struct tcpopt *to) tp->t_tfo_stats |= TFO_S_SYNDATA_RCV; tcpstat.tcps_tfo_syn_data_rcv++; - return (TRUE); + return TRUE; } static void @@ -1789,8 +1821,9 @@ tcp_tfo_synack(struct tcpcb *tp, struct tcpopt *to) static void tcp_tfo_rcv_probe(struct tcpcb *tp, int tlen) { - if (tlen != 0) + if (tlen != 0) { return; + } tp->t_tfo_probe_state = TFO_PROBE_PROBING; @@ -1805,8 +1838,9 @@ static void tcp_tfo_rcv_data(struct tcpcb *tp) { /* Transition from PROBING to NONE as data has been received */ - if (tp->t_tfo_probe_state >= TFO_PROBE_PROBING) + if (tp->t_tfo_probe_state >= TFO_PROBE_PROBING) { tp->t_tfo_probe_state = TFO_PROBE_NONE; + } } static void @@ -1840,19 +1874,22 @@ tcp_update_window(struct tcpcb *tp, int thflags, struct tcphdr * th, (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { /* keep track of pure window updates */ if (tlen == 0 && - tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) + tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) { tcpstat.tcps_rcvwinupd++; + } tp->snd_wnd = tiwin; tp->snd_wl1 = th->th_seq; tp->snd_wl2 = th->th_ack; - if (tp->snd_wnd > tp->max_sndwnd) + if (tp->snd_wnd > tp->max_sndwnd) { tp->max_sndwnd = tp->snd_wnd; + } - if (tp->t_inpcb->inp_socket->so_flags & SOF_MP_SUBFLOW) + if (tp->t_inpcb->inp_socket->so_flags & SOF_MP_SUBFLOW) { mptcp_update_window_wakeup(tp); - return (true); + } + return true; } - return (false); + return false; } void @@ -1876,7 +1913,7 @@ tcp_input(struct mbuf *m, int off0) int dropsocket = 0; int iss = 0, nosock = 0; u_int32_t tiwin, sack_bytes_acked = 0; - struct tcpopt to; /* options in this segment */ + struct tcpopt to; /* options in this segment */ #if TCPDEBUG short ostate = 0; #endif @@ -1899,9 +1936,10 @@ tcp_input(struct mbuf *m, int off0) boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp)); boolean_t recvd_dsack = FALSE; struct tcp_respond_args tra; + bool findpcb_iterated = false; -#define TCP_INC_VAR(stat, npkts) do { \ - stat += npkts; \ +#define TCP_INC_VAR(stat, npkts) do { \ + stat += npkts; \ } while (0) TCP_INC_VAR(tcpstat.tcps_rcvtotal, nlropkts); @@ -1915,7 +1953,7 @@ tcp_input(struct mbuf *m, int off0) } if (fwd_tag != NULL) { struct ip_fwd_tag *ipfwd_tag = - (struct ip_fwd_tag *)(fwd_tag+1); + (struct ip_fwd_tag *)(fwd_tag + 1); next_hop = ipfwd_tag->next_hop; m_tag_delete(m, fwd_tag); @@ -1927,9 +1965,9 @@ tcp_input(struct mbuf *m, int off0) int isipv6; #endif /* INET6 */ int rstreason; /* For badport_bandlim accounting purposes */ - struct proc *proc0=current_proc(); + struct proc *proc0 = current_proc(); - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_START, 0, 0, 0, 0, 0); #if INET6 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; @@ -1949,12 +1987,13 @@ tcp_input(struct mbuf *m, int off0) tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0); - if (tcp_input_checksum(AF_INET6, m, th, off0, tlen)) + if (tcp_input_checksum(AF_INET6, m, th, off0, tlen)) { goto dropnosock; + } KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport), - (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), - th->th_seq, th->th_ack, th->th_win); + (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), + th->th_seq, th->th_ack, th->th_win); /* * Be proactive about unspecified IPv6 address in source. * As we use all-zero to indicate unbounded/unconnected pcb, @@ -1969,51 +2008,51 @@ tcp_input(struct mbuf *m, int off0) goto dropnosock; } DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL, - struct ip6_hdr *, ip6, struct tcpcb *, NULL, - struct tcphdr *, th); + struct ip6_hdr *, ip6, struct tcpcb *, NULL, + struct tcphdr *, th); ip_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; } else #endif /* INET6 */ { - /* - * Get IP and TCP header together in first mbuf. - * Note: IP leaves IP header in first mbuf. - */ - if (off0 > sizeof (struct ip)) { - ip_stripoptions(m); - off0 = sizeof(struct ip); - } - if (m->m_len < sizeof (struct tcpiphdr)) { - if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == 0) { - tcpstat.tcps_rcvshort++; - return; + /* + * Get IP and TCP header together in first mbuf. + * Note: IP leaves IP header in first mbuf. + */ + if (off0 > sizeof(struct ip)) { + ip_stripoptions(m); + off0 = sizeof(struct ip); + } + if (m->m_len < sizeof(struct tcpiphdr)) { + if ((m = m_pullup(m, sizeof(struct tcpiphdr))) == 0) { + tcpstat.tcps_rcvshort++; + return; + } } - } - /* Expect 32-bit aligned data pointer on strict-align platforms */ - MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); + /* Expect 32-bit aligned data pointer on strict-align platforms */ + MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); - ip = mtod(m, struct ip *); - th = (struct tcphdr *)(void *)((caddr_t)ip + off0); - tlen = ip->ip_len; + ip = mtod(m, struct ip *); + th = (struct tcphdr *)(void *)((caddr_t)ip + off0); + tlen = ip->ip_len; - if (tcp_input_checksum(AF_INET, m, th, off0, tlen)) - goto dropnosock; + if (tcp_input_checksum(AF_INET, m, th, off0, tlen)) { + goto dropnosock; + } #if INET6 - /* Re-initialization for later version check */ - ip->ip_v = IPVERSION; + /* Re-initialization for later version check */ + ip->ip_v = IPVERSION; #endif - ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK); + ip_ecn = (ip->ip_tos & IPTOS_ECN_MASK); - DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL, - struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th); - - KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport), - (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), - th->th_seq, th->th_ack, th->th_win); + DTRACE_TCP5(receive, struct mbuf *, m, struct inpcb *, NULL, + struct ip *, ip, struct tcpcb *, NULL, struct tcphdr *, th); + KERNEL_DEBUG(DBG_LAYER_BEG, ((th->th_dport << 16) | th->th_sport), + (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), + th->th_seq, th->th_ack, th->th_win); } /* @@ -2021,23 +2060,23 @@ tcp_input(struct mbuf *m, int off0) * pull out TCP options and adjust length. */ off = th->th_off << 2; - if (off < sizeof (struct tcphdr) || off > tlen) { + if (off < sizeof(struct tcphdr) || off > tlen) { tcpstat.tcps_rcvbadoff++; IF_TCP_STATINC(ifp, badformat); goto dropnosock; } - tlen -= off; /* tlen is used instead of ti->ti_len */ - if (off > sizeof (struct tcphdr)) { + tlen -= off; /* tlen is used instead of ti->ti_len */ + if (off > sizeof(struct tcphdr)) { #if INET6 if (isipv6) { - IP6_EXTHDR_CHECK(m, off0, off, return); + IP6_EXTHDR_CHECK(m, off0, off, return ); ip6 = mtod(m, struct ip6_hdr *); th = (struct tcphdr *)(void *)((caddr_t)ip6 + off0); } else #endif /* INET6 */ { if (m->m_len < sizeof(struct ip) + off) { - if ((m = m_pullup(m, sizeof (struct ip) + off)) == 0) { + if ((m = m_pullup(m, sizeof(struct ip) + off)) == 0) { tcpstat.tcps_rcvshort++; return; } @@ -2045,7 +2084,7 @@ tcp_input(struct mbuf *m, int off0) th = (struct tcphdr *)(void *)((caddr_t)ip + off0); } } - optlen = off - sizeof (struct tcphdr); + optlen = off - sizeof(struct tcphdr); optp = (u_char *)(th + 1); /* * Do quick retrieval of timestamp options ("options @@ -2055,14 +2094,14 @@ tcp_input(struct mbuf *m, int off0) * tcp_dooptions(), etc. */ if ((optlen == TCPOLEN_TSTAMP_APPA || - (optlen > TCPOLEN_TSTAMP_APPA && - optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && - *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) && - (th->th_flags & TH_SYN) == 0) { + (optlen > TCPOLEN_TSTAMP_APPA && + optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && + *(u_int32_t *)(void *)optp == htonl(TCPOPT_TSTAMP_HDR) && + (th->th_flags & TH_SYN) == 0) { to.to_flags |= TOF_TS; to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4)); to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8)); - optp = NULL; /* we've parsed the options */ + optp = NULL; /* we've parsed the options */ } } thflags = th->th_flags; @@ -2075,7 +2114,7 @@ tcp_input(struct mbuf *m, int off0) * * This is a violation of the TCP specification. */ - if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) { + if (drop_synfin && (thflags & (TH_SYN | TH_FIN)) == (TH_SYN | TH_FIN)) { IF_TCP_STATINC(ifp, synfin); goto dropnosock; } @@ -2103,20 +2142,21 @@ tcp_input(struct mbuf *m, int off0) * absence of a PCB, when scoped routing (and thus source interface * selection) are enabled. */ - if ((m->m_pkthdr.pkt_flags & PKTF_LOOP) || m->m_pkthdr.rcvif == NULL) + if ((m->m_pkthdr.pkt_flags & PKTF_LOOP) || m->m_pkthdr.rcvif == NULL) { ifscope = IFSCOPE_NONE; - else + } else { ifscope = m->m_pkthdr.rcvif->if_index; + } - /* - * Convert TCP protocol specific fields to host format. - */ + /* + * Convert TCP protocol specific fields to host format. + */ #if BYTE_ORDER != BIG_ENDIAN - NTOHL(th->th_seq); - NTOHL(th->th_ack); - NTOHS(th->th_win); - NTOHS(th->th_urp); + NTOHL(th->th_seq); + NTOHL(th->th_ack); + NTOHS(th->th_win); + NTOHS(th->th_urp); #endif /* @@ -2138,7 +2178,7 @@ findpcb: * already got one like this? */ inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, - ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif); + ip->ip_dst, th->th_dport, 0, m->m_pkthdr.rcvif); if (!inp) { /* * No, then it's new. Try find the ambushing socket @@ -2150,24 +2190,24 @@ findpcb: } else { inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, - next_hop->sin_addr, + next_hop->sin_addr, ntohs(next_hop->sin_port), 1, m->m_pkthdr.rcvif); } } } else -#endif /* IPFIREWALL_FORWARD */ - { +#endif /* IPFIREWALL_FORWARD */ + { #if INET6 - if (isipv6) - inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, - &ip6->ip6_dst, th->th_dport, 1, - m->m_pkthdr.rcvif); - else + if (isipv6) { + inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_src, th->th_sport, + &ip6->ip6_dst, th->th_dport, 1, + m->m_pkthdr.rcvif); + } else #endif /* INET6 */ - inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, - ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif); - } + inp = in_pcblookup_hash(&tcbinfo, ip->ip_src, th->th_sport, + ip->ip_dst, th->th_dport, 1, m->m_pkthdr.rcvif); + } /* * Use the interface scope information from the PCB for outbound @@ -2175,8 +2215,9 @@ findpcb: * enabled, tcp_respond will use the scope of the interface where * the segment arrived on. */ - if (inp != NULL && (inp->inp_flags & INP_BOUND_IF)) + if (inp != NULL && (inp->inp_flags & INP_BOUND_IF)) { ifscope = inp->inp_boundifp->if_index; + } /* * If the state is CLOSED (i.e., TCB does not exist) then @@ -2204,53 +2245,56 @@ findpcb: } switch (log_in_vain) { case 1: - if(thflags & TH_SYN) + if (thflags & TH_SYN) { log(LOG_INFO, - "Connection attempt to TCP %s:%d from %s:%d\n", - dbuf, ntohs(th->th_dport), - sbuf, - ntohs(th->th_sport)); + "Connection attempt to TCP %s:%d from %s:%d\n", + dbuf, ntohs(th->th_dport), + sbuf, + ntohs(th->th_sport)); + } break; case 2: log(LOG_INFO, - "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n", - dbuf, ntohs(th->th_dport), sbuf, - ntohs(th->th_sport), thflags); + "Connection attempt to TCP %s:%d from %s:%d flags:0x%x\n", + dbuf, ntohs(th->th_dport), sbuf, + ntohs(th->th_sport), thflags); break; case 3: case 4: if ((thflags & TH_SYN) && !(thflags & TH_ACK) && - !(m->m_flags & (M_BCAST | M_MCAST)) && + !(m->m_flags & (M_BCAST | M_MCAST)) && #if INET6 - ((isipv6 && !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) || - (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr)) + ((isipv6 && !IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) || + (!isipv6 && ip->ip_dst.s_addr != ip->ip_src.s_addr)) #else - ip->ip_dst.s_addr != ip->ip_src.s_addr + ip->ip_dst.s_addr != ip->ip_src.s_addr #endif - ) + ) { log_in_vain_log((LOG_INFO, - "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n", - dbuf, ntohs(th->th_dport), - sbuf, - ntohs(th->th_sport))); + "Stealth Mode connection attempt to TCP %s:%d from %s:%d\n", + dbuf, ntohs(th->th_dport), + sbuf, + ntohs(th->th_sport))); + } break; default: break; } } if (blackhole) { - if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP) - + if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type != IFT_LOOP) { switch (blackhole) { case 1: - if (thflags & TH_SYN) + if (thflags & TH_SYN) { goto dropnosock; + } break; case 2: goto dropnosock; default: goto dropnosock; } + } } rstreason = BANDLIM_RST_CLOSEDPORT; IF_TCP_STATINC(ifp, noconnnolist); @@ -2273,10 +2317,46 @@ findpcb: socket_lock(so, 1); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { socket_unlock(so, 1); - inp = NULL; // pretend we didn't find it + inp = NULL; // pretend we didn't find it goto dropnosock; } + if (!isipv6 && inp->inp_faddr.s_addr != INADDR_ANY) { + if (inp->inp_faddr.s_addr != ip->ip_src.s_addr || + inp->inp_laddr.s_addr != ip->ip_dst.s_addr || + inp->inp_fport != th->th_sport || + inp->inp_lport != th->th_dport) { + os_log_error(OS_LOG_DEFAULT, "%s 5-tuple does not match: %u:%u %u:%u\n", + __func__, + ntohs(inp->inp_fport), ntohs(th->th_sport), + ntohs(inp->inp_lport), ntohs(th->th_dport)); + if (findpcb_iterated) { + goto drop; + } + findpcb_iterated = true; + socket_unlock(so, 1); + inp = NULL; + goto findpcb; + } + } else if (isipv6 && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { + if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &ip6->ip6_src) || + !IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &ip6->ip6_dst) || + inp->inp_fport != th->th_sport || + inp->inp_lport != th->th_dport) { + os_log_error(OS_LOG_DEFAULT, "%s 5-tuple does not match: %u:%u %u:%u\n", + __func__, + ntohs(inp->inp_fport), ntohs(th->th_sport), + ntohs(inp->inp_lport), ntohs(th->th_dport)); + if (findpcb_iterated) { + goto drop; + } + findpcb_iterated = true; + socket_unlock(so, 1); + inp = NULL; + goto findpcb; + } + } + #if NECP if (so->so_state & SS_ISCONNECTED) { // Connected TCP sockets have a fully-bound local and remote, @@ -2289,8 +2369,8 @@ findpcb: #if INET6 if (isipv6) { if (!necp_socket_is_allowed_to_send_recv_v6(inp, - th->th_dport, th->th_sport, &ip6->ip6_dst, - &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + th->th_dport, th->th_sport, &ip6->ip6_dst, + &ip6->ip6_src, ifp, NULL, NULL, NULL)) { IF_TCP_STATINC(ifp, badformat); goto drop; } @@ -2298,8 +2378,8 @@ findpcb: #endif { if (!necp_socket_is_allowed_to_send_recv_v4(inp, - th->th_dport, th->th_sport, &ip->ip_dst, &ip->ip_src, - ifp, NULL, NULL, NULL)) { + th->th_dport, th->th_sport, &ip->ip_dst, &ip->ip_src, + ifp, NULL, NULL, NULL)) { IF_TCP_STATINC(ifp, badformat); goto drop; } @@ -2313,38 +2393,43 @@ findpcb: IF_TCP_STATINC(ifp, noconnlist); goto dropwithreset; } - if (tp->t_state == TCPS_CLOSED) + if (tp->t_state == TCPS_CLOSED) { goto drop; + } /* If none of the FIN|SYN|RST|ACK flag is set, drop */ - if (tcp_do_rfc5961 && (thflags & TH_ACCEPT) == 0) + if (tcp_do_rfc5961 && (thflags & TH_ACCEPT) == 0) { goto drop; + } /* Unscale the window into a 32-bit value. */ - if ((thflags & TH_SYN) == 0) + if ((thflags & TH_SYN) == 0) { tiwin = th->th_win << tp->snd_scale; - else + } else { tiwin = th->th_win; + } #if CONFIG_MACF_NET - if (mac_inpcb_check_deliver(inp, m, AF_INET, SOCK_STREAM)) + if (mac_inpcb_check_deliver(inp, m, AF_INET, SOCK_STREAM)) { goto drop; + } #endif /* Avoid processing packets while closing a listen socket */ if (tp->t_state == TCPS_LISTEN && - (so->so_options & SO_ACCEPTCONN) == 0) + (so->so_options & SO_ACCEPTCONN) == 0) { goto drop; + } - if (so->so_options & (SO_DEBUG|SO_ACCEPTCONN)) { + if (so->so_options & (SO_DEBUG | SO_ACCEPTCONN)) { #if TCPDEBUG if (so->so_options & SO_DEBUG) { ostate = tp->t_state; #if INET6 - if (isipv6) + if (isipv6) { bcopy((char *)ip6, (char *)tcp_saveipgen, - sizeof(*ip6)); - else + sizeof(*ip6)); + } else #endif /* INET6 */ bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); tcp_savetcp = *th; @@ -2360,8 +2445,8 @@ findpcb: #endif /* INET6 */ struct ifnet *head_ifscope; unsigned int head_nocell, head_recvanyif, - head_noexpensive, head_awdl_unrestricted, - head_intcoproc_allowed; + head_noexpensive, head_awdl_unrestricted, + head_intcoproc_allowed; /* Get listener's bound-to-interface, if any */ head_ifscope = (inp->inp_flags & INP_BOUND_IF) ? @@ -2381,7 +2466,7 @@ findpcb: * If it does not contain a SYN then it is not interesting; drop it. * If it is from this socket, drop it, it must be forged. */ - if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) { + if ((thflags & (TH_RST | TH_ACK | TH_SYN)) != TH_SYN) { IF_TCP_STATINC(ifp, listbadsyn); if (thflags & TH_RST) { @@ -2398,17 +2483,19 @@ findpcb: tcpstat.tcps_badsyn++; goto drop; } - KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START, 0, 0, 0, 0, 0); if (th->th_dport == th->th_sport) { #if INET6 if (isipv6) { if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, - &ip6->ip6_src)) + &ip6->ip6_src)) { goto drop; + } } else #endif /* INET6 */ - if (ip->ip_dst.s_addr == ip->ip_src.s_addr) - goto drop; + if (ip->ip_dst.s_addr == ip->ip_src.s_addr) { + goto drop; + } } /* * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN @@ -2418,20 +2505,23 @@ findpcb: * Packets with a multicast source address should also * be discarded. */ - if (m->m_flags & (M_BCAST|M_MCAST)) + if (m->m_flags & (M_BCAST | M_MCAST)) { goto drop; + } #if INET6 if (isipv6) { if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || - IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) + IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { goto drop; + } } else #endif if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || - IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || - ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || - in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) + IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || + ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || + in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { goto drop; + } #if INET6 @@ -2467,7 +2557,7 @@ findpcb: if (so->so_filt) { #if INET6 if (isipv6) { - struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)&from; + struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)&from; sin6->sin6_len = sizeof(*sin6); sin6->sin6_family = AF_INET6; @@ -2475,8 +2565,7 @@ findpcb: sin6->sin6_flowinfo = 0; sin6->sin6_addr = ip6->ip6_src; sin6->sin6_scope_id = 0; - } - else + } else #endif { struct sockaddr_in *sin = (struct sockaddr_in*)&from; @@ -2493,13 +2582,15 @@ findpcb: if (so2 == 0) { tcpstat.tcps_listendrop++; if (tcp_dropdropablreq(so)) { - if (so->so_filt) + if (so->so_filt) { so2 = sonewconn(so, 0, (struct sockaddr*)&from); - else + } else { so2 = sonewconn(so, 0, NULL); + } } - if (!so2) + if (!so2) { goto drop; + } } /* Point "inp" and "tp" in tandem to new socket */ @@ -2538,31 +2629,36 @@ findpcb: /* * Inherit restrictions from listener. */ - if (head_nocell) + if (head_nocell) { inp_set_nocellular(inp); - if (head_noexpensive) + } + if (head_noexpensive) { inp_set_noexpensive(inp); - if (head_awdl_unrestricted) + } + if (head_awdl_unrestricted) { inp_set_awdl_unrestricted(inp); - if (head_intcoproc_allowed) + } + if (head_intcoproc_allowed) { inp_set_intcoproc_allowed(inp); + } /* * Inherit {IN,IN6}_RECV_ANYIF from listener. */ - if (head_recvanyif) + if (head_recvanyif) { inp->inp_flags |= INP_RECV_ANYIF; - else + } else { inp->inp_flags &= ~INP_RECV_ANYIF; + } #if INET6 - if (isipv6) + if (isipv6) { inp->in6p_laddr = ip6->ip6_dst; - else { + } else { inp->inp_vflag &= ~INP_IPV6; inp->inp_vflag |= INP_IPV4; #endif /* INET6 */ - inp->inp_laddr = ip->ip_dst; + inp->inp_laddr = ip->ip_dst; #if INET6 - } + } #endif /* INET6 */ inp->inp_lport = th->th_dport; if (in_pcbinshash(inp, 0) != 0) { @@ -2571,13 +2667,13 @@ findpcb: * put the PCB on the hash lists. */ #if INET6 - if (isipv6) + if (isipv6) { inp->in6p_laddr = in6addr_any; - else + } else #endif /* INET6 */ - inp->inp_laddr.s_addr = INADDR_ANY; + inp->inp_laddr.s_addr = INADDR_ANY; inp->inp_lport = 0; - socket_lock(oso, 0); /* release ref on parent */ + socket_lock(oso, 0); /* release ref on parent */ socket_unlock(oso, 1); goto drop; } @@ -2597,11 +2693,12 @@ findpcb: * calling the accept system call. */ inp->inp_flags |= - oinp->inp_flags & INP_CONTROLOPTS; - if (oinp->in6p_outputopts) + oinp->inp_flags & INP_CONTROLOPTS; + if (oinp->in6p_outputopts) { inp->in6p_outputopts = - ip6_copypktopts(oinp->in6p_outputopts, - M_NOWAIT); + ip6_copypktopts(oinp->in6p_outputopts, + M_NOWAIT); + } } else #endif /* INET6 */ { @@ -2611,30 +2708,32 @@ findpcb: socket_lock(oso, 0); #if IPSEC /* copy old policy into new socket's */ - if (sotoinpcb(oso)->inp_sp) - { + if (sotoinpcb(oso)->inp_sp) { int error = 0; /* Is it a security hole here to silently fail to copy the policy? */ - if (inp->inp_sp != NULL) + if (inp->inp_sp != NULL) { error = ipsec_init_policy(so, &inp->inp_sp); - if (error != 0 || ipsec_copy_policy(sotoinpcb(oso)->inp_sp, inp->inp_sp)) + } + if (error != 0 || ipsec_copy_policy(sotoinpcb(oso)->inp_sp, inp->inp_sp)) { printf("tcp_input: could not copy policy\n"); + } } #endif /* inherit states from the listener */ DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_LISTEN); + struct tcpcb *, tp, int32_t, TCPS_LISTEN); tp->t_state = TCPS_LISTEN; - tp->t_flags |= tp0->t_flags & (TF_NOPUSH|TF_NOOPT|TF_NODELAY); - tp->t_flagsext |= (tp0->t_flagsext & (TF_RXTFINDROP|TF_NOTIMEWAIT|TF_FASTOPEN)); + tp->t_flags |= tp0->t_flags & (TF_NOPUSH | TF_NOOPT | TF_NODELAY); + tp->t_flagsext |= (tp0->t_flagsext & (TF_RXTFINDROP | TF_NOTIMEWAIT | TF_FASTOPEN)); tp->t_keepinit = tp0->t_keepinit; tp->t_keepcnt = tp0->t_keepcnt; tp->t_keepintvl = tp0->t_keepintvl; tp->t_adaptive_wtimo = tp0->t_adaptive_wtimo; tp->t_adaptive_rtimo = tp0->t_adaptive_rtimo; tp->t_inpcb->inp_ip_ttl = tp0->t_inpcb->inp_ip_ttl; - if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0) + if ((so->so_flags & SOF_NOTSENT_LOWAT) != 0) { tp->t_notsent_lowat = tp0->t_notsent_lowat; + } tp->t_inpcb->inp_flags2 |= tp0->t_inpcb->inp_flags2 & INP2_KEEPALIVE_OFFLOAD; @@ -2643,7 +2742,7 @@ findpcb: tcp_set_max_rwinscale(tp, so, ifp); - KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_END, 0, 0, 0, 0, 0); } } socket_lock_assert_owned(so); @@ -2676,18 +2775,18 @@ findpcb: /* * Keep track of how many bytes were received in the LRO packet */ - if ((pktf_sw_lro_pkt) && (nlropkts > 2)) { + if ((pktf_sw_lro_pkt) && (nlropkts > 2)) { tp->t_lropktlen += tlen; } /* * Explicit Congestion Notification - Flag that we need to send ECT if - * + The IP Congestion experienced flag was set. - * + Socket is in established state - * + We negotiated ECN in the TCP setup - * + This isn't a pure ack (tlen > 0) - * + The data is in the valid window + * + The IP Congestion experienced flag was set. + * + Socket is in established state + * + We negotiated ECN in the TCP setup + * + This isn't a pure ack (tlen > 0) + * + The data is in the valid window * - * TE_SENDECE will be cleared when we receive a packet with TH_CWR set. + * TE_SENDECE will be cleared when we receive a packet with TH_CWR set. */ if (ip_ecn == IPTOS_ECN_CE && tp->t_state == TCPS_ESTABLISHED && TCP_ECN_ENABLED(tp) && tlen > 0 && @@ -2738,7 +2837,7 @@ findpcb: tcpstat.tcps_ecn_fallback_ce++; tcp_heuristic_ecn_aggressive(tp); tp->ecn_flags |= TE_CEHEURI_SET; - INP_INC_IFNET_STAT(inp,ecn_fallback_ce); + INP_INC_IFNET_STAT(inp, ecn_fallback_ce); } else { /* We tracked the first ECN_MIN_CE_PROBES segments, we * now know that the path is good. @@ -2754,10 +2853,10 @@ findpcb: * we want to avoid doing LRO if the remote end is not up to date * on initial window support and starts with 1 or 2 packets as its IW. */ - if (sw_lro && (tp->t_flagsext & TF_LRO_OFFLOADED) && - ((tcp_now - tp->t_rcvtime) >= (TCP_IDLETIMEOUT(tp)))) { + if (sw_lro && (tp->t_flagsext & TF_LRO_OFFLOADED) && + ((tcp_now - tp->t_rcvtime) >= (TCP_IDLETIMEOUT(tp)))) { turnoff_lro = 1; - } + } /* Update rcvtime as a new segment was received on the connection */ tp->t_rcvtime = tcp_now; @@ -2769,8 +2868,9 @@ findpcb: if (TCPS_HAVEESTABLISHED(tp->t_state)) { tcp_keepalive_reset(tp); - if (tp->t_mpsub) + if (tp->t_mpsub) { mptcp_reset_keepalive(tp); + } } /* @@ -2788,15 +2888,16 @@ findpcb: tcp_check_timer_state(tp); socket_unlock(so, 1); KERNEL_DEBUG(DBG_FNC_TCP_INPUT | - DBG_FUNC_END,0,0,0,0,0); + DBG_FUNC_END, 0, 0, 0, 0, 0); return; } #endif /* MPTCP */ if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { if (!(thflags & TH_ACK) || (SEQ_GT(th->th_ack, tp->iss) && - SEQ_LEQ(th->th_ack, tp->snd_max))) + SEQ_LEQ(th->th_ack, tp->snd_max))) { tcp_finalize_options(tp, &to, ifscope); + } } #if TRAFFIC_MGT @@ -2812,10 +2913,10 @@ findpcb: * to queuing in edge/access routers. */ if (tp->t_state == TCPS_ESTABLISHED && - (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE|TH_PUSH)) == TH_ACK && - ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && + (thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK | TH_ECE | TH_PUSH)) == TH_ACK && + ((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) == 0) && ((to.to_flags & TOF_TS) == 0 || - TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && + TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && th->th_seq == tp->rcv_nxt && LIST_EMPTY(&tp->t_segq)) { int seg_size = tlen; if (tp->iaj_pktcnt <= IAJ_IGNORE_PKTCNT) { @@ -2825,8 +2926,8 @@ findpcb: if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_PKT) { seg_size = m->m_pkthdr.lro_pktlen; } - if ( tp->iaj_size == 0 || seg_size > tp->iaj_size || - (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) { + if (tp->iaj_size == 0 || seg_size > tp->iaj_size || + (seg_size == tp->iaj_size && tp->iaj_rcv_ts == 0)) { /* * State related to inter-arrival jitter is * uninitialized or we are trying to find a good @@ -2839,13 +2940,14 @@ findpcb: * Compute inter-arrival jitter taking * this packet as the second packet */ - if (pktf_sw_lro_pkt) + if (pktf_sw_lro_pkt) { compute_iaj(tp, nlropkts, m->m_pkthdr.lro_elapsed); - else + } else { compute_iaj(tp, 1, 0); + } } - if (seg_size < tp->iaj_size) { + if (seg_size < tp->iaj_size) { /* * There is a smaller packet in the stream. * Some times the maximum size supported @@ -2889,14 +2991,13 @@ findpcb: * be TH_NEEDSYN. */ if (tp->t_state == TCPS_ESTABLISHED && - (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK|TH_ECE|TH_CWR)) == TH_ACK && - ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && + (thflags & (TH_SYN | TH_FIN | TH_RST | TH_URG | TH_ACK | TH_ECE | TH_CWR)) == TH_ACK && + ((tp->t_flags & (TF_NEEDSYN | TF_NEEDFIN)) == 0) && ((to.to_flags & TOF_TS) == 0 || - TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && + TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && th->th_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd && tp->snd_nxt == tp->snd_max) { - /* * If last ACK falls within this segment's sequence numbers, * record the timestamp. @@ -2904,7 +3005,7 @@ findpcb: * proposal of the tcplw@cray.com list (Braden 1993/04/26). */ if ((to.to_flags & TOF_TS) != 0 && - SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { + SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { tp->ts_recent_age = tcp_now; tp->ts_recent = to.to_tsval; } @@ -2939,8 +3040,9 @@ findpcb: * calculations in this function * assume that snd_una is not updated yet. */ - if (CC_ALGO(tp)->congestion_avd != NULL) + if (CC_ALGO(tp)->congestion_avd != NULL) { CC_ALGO(tp)->congestion_avd(tp, th); + } tcp_ccdbg_trace(tp, th, TCP_CC_INSEQ_ACK_RCVD); sbdrop(&so->so_snd, acked); if (so->so_flags & SOF_ENABLE_MSGS) { @@ -2950,8 +3052,9 @@ findpcb: tcp_sbsnd_trim(&so->so_snd); if (SEQ_GT(tp->snd_una, tp->snd_recover) && - SEQ_LEQ(th->th_ack, tp->snd_recover)) + SEQ_LEQ(th->th_ack, tp->snd_recover)) { tp->snd_recover = th->th_ack - 1; + } tp->snd_una = th->th_ack; TCP_RESET_REXMT_STATE(tp); @@ -2988,16 +3091,19 @@ findpcb: } if (!SLIST_EMPTY(&tp->t_rxt_segments) && !TCP_DSACK_SEQ_IN_WINDOW(tp, - tp->t_dsack_lastuna, tp->snd_una)) + tp->t_dsack_lastuna, tp->snd_una)) { tcp_rxtseg_clean(tp); + } if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 && - tp->t_bwmeas != NULL) + tp->t_bwmeas != NULL) { tcp_bwmeas_check(tp); + } sowwakeup(so); /* has to be done with socket lock held */ - if (!SLIST_EMPTY(&tp->t_notify_ack)) + if (!SLIST_EMPTY(&tp->t_notify_ack)) { tcp_notify_acknowledgement(tp, so); + } if ((so->so_snd.sb_cc) || (tp->t_flags & TF_ACKNOW)) { (void) tcp_output(tp); @@ -3007,7 +3113,7 @@ findpcb: tcp_check_timer_state(tp); socket_unlock(so, 1); - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } } else if (th->th_ack == tp->snd_una && @@ -3025,27 +3131,28 @@ findpcb: */ if (turnoff_lro) { tcp_lro_remove_state(tp->t_inpcb->inp_laddr, - tp->t_inpcb->inp_faddr, - tp->t_inpcb->inp_lport, - tp->t_inpcb->inp_fport); + tp->t_inpcb->inp_faddr, + tp->t_inpcb->inp_lport, + tp->t_inpcb->inp_fport); tp->t_flagsext &= ~TF_LRO_OFFLOADED; tp->t_idleat = tp->rcv_nxt; } else if (sw_lro && !pktf_sw_lro_pkt && !isipv6 && (so->so_flags & SOF_USELRO) && !IFNET_IS_CELLULAR(m->m_pkthdr.rcvif) && - (m->m_pkthdr.rcvif->if_type != IFT_LOOP) && + (m->m_pkthdr.rcvif->if_type != IFT_LOOP) && ((th->th_seq - tp->irs) > (tp->t_maxseg << lro_start)) && ((tp->t_idleat == 0) || ((th->th_seq - - tp->t_idleat) > (tp->t_maxseg << lro_start)))) { + tp->t_idleat) > (tp->t_maxseg << lro_start)))) { tp->t_flagsext |= TF_LRO_OFFLOADED; tcp_start_coalescing(ip, th, tlen); tp->t_idleat = 0; } /* Clean receiver SACK report if present */ - if (SACK_ENABLED(tp) && tp->rcv_numsacks) + if (SACK_ENABLED(tp) && tp->rcv_numsacks) { tcp_clean_sackreport(tp); + } ++tcpstat.tcps_preddat; tp->rcv_nxt += tlen; /* @@ -3068,7 +3175,7 @@ findpcb: INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1); } - INP_ADD_STAT(inp, cell, wifi, wired,rxbytes, + INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, tlen); inp_set_activity_bitmap(inp); } @@ -3078,8 +3185,9 @@ findpcb: * connection is in streaming mode and the last * packet was not an end-of-write */ - if (tp->t_flags & TF_STREAMING_ON) + if (tp->t_flags & TF_STREAMING_ON) { tcp_compute_rtt(tp, &to, th); + } tcp_sbrcv_grow(tp, &so->so_rcv, &to, tlen, TCP_AUTORCVBUF_MAX(ifp)); @@ -3088,7 +3196,7 @@ findpcb: * Add data to socket buffer. */ so_recv_data_stat(so, m, 0); - m_adj(m, drop_hdrlen); /* delayed header drop */ + m_adj(m, drop_hdrlen); /* delayed header drop */ /* * If message delivery (SOF_ENABLE_MSGS) is enabled on @@ -3102,20 +3210,19 @@ findpcb: #if INET6 if (isipv6) { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), - (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), - th->th_seq, th->th_ack, th->th_win); - } - else + (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), + th->th_seq, th->th_ack, th->th_win); + } else #endif { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), - (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), - th->th_seq, th->th_ack, th->th_win); + (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), + th->th_seq, th->th_ack, th->th_win); } TCP_INC_VAR(tp->t_unacksegs, nlropkts); - if (DELAY_ACK(tp, th)) { + if (DELAY_ACK(tp, th)) { if ((tp->t_flags & TF_DELACK) == 0) { - tp->t_flags |= TF_DELACK; + tp->t_flags |= TF_DELACK; tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack); } } else { @@ -3125,12 +3232,13 @@ findpcb: tcp_adaptive_rwtimo_check(tp, tlen); - if (tlen > 0) + if (tlen > 0) { tcp_tfo_rcv_data(tp); + } tcp_check_timer_state(tp); socket_unlock(so, 1); - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } } @@ -3143,11 +3251,12 @@ findpcb: */ socket_lock_assert_owned(so); win = tcp_sbspace(tp); - if (win < 0) + if (win < 0) { win = 0; - else { /* clip rcv window to 4K for modems */ - if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) + } else { /* clip rcv window to 4K for modems */ + if (tp->t_flags & TF_SLOWLINK && slowlink_wsize > 0) { win = min(win, slowlink_wsize); + } } tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); #if MPTCP @@ -3166,7 +3275,6 @@ findpcb: #endif /* MPTCP */ switch (tp->t_state) { - /* * Initialize tp->rcv_nxt, and tp->irs, select an initial * tp->iss, and send a segment: @@ -3186,19 +3294,21 @@ findpcb: #if INET6 if (isipv6) { MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6, - M_SONAME, M_NOWAIT); - if (sin6 == NULL) + M_SONAME, M_NOWAIT); + if (sin6 == NULL) { goto drop; + } bzero(sin6, sizeof(*sin6)); sin6->sin6_family = AF_INET6; sin6->sin6_len = sizeof(*sin6); sin6->sin6_addr = ip6->ip6_src; sin6->sin6_port = th->th_sport; laddr6 = inp->in6p_laddr; - if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { inp->in6p_laddr = ip6->ip6_dst; + } if (in6_pcbconnect(inp, (struct sockaddr *)sin6, - proc0)) { + proc0)) { inp->in6p_laddr = laddr6; FREE(sin6, M_SONAME); goto drop; @@ -3206,20 +3316,22 @@ findpcb: FREE(sin6, M_SONAME); } else #endif - { + { socket_lock_assert_owned(so); MALLOC(sin, struct sockaddr_in *, sizeof *sin, M_SONAME, M_NOWAIT); - if (sin == NULL) + if (sin == NULL) { goto drop; + } sin->sin_family = AF_INET; sin->sin_len = sizeof(*sin); sin->sin_addr = ip->ip_src; sin->sin_port = th->th_sport; bzero((caddr_t)sin->sin_zero, sizeof(sin->sin_zero)); laddr = inp->inp_laddr; - if (inp->inp_laddr.s_addr == INADDR_ANY) + if (inp->inp_laddr.s_addr == INADDR_ANY) { inp->inp_laddr = ip->ip_dst; + } if (in_pcbconnect(inp, (struct sockaddr *)sin, proc0, IFSCOPE_NONE, NULL)) { inp->inp_laddr = laddr; @@ -3232,14 +3344,15 @@ findpcb: tcp_dooptions(tp, optp, optlen, th, &to); tcp_finalize_options(tp, &to, ifscope); - if (tfo_enabled(tp) && tcp_tfo_syn(tp, &to)) + if (tfo_enabled(tp) && tcp_tfo_syn(tp, &to)) { isconnected = TRUE; + } - if (iss) + if (iss) { tp->iss = iss; - else { + } else { tp->iss = tcp_new_isn(tp); - } + } tp->irs = th->th_seq; tcp_sendseqinit(tp); tcp_rcvseqinit(tp); @@ -3249,19 +3362,20 @@ findpcb: * set SND.WND = SEG.WND, * initialize CCsend and CCrecv. */ - tp->snd_wnd = tiwin; /* initial send-window */ + tp->snd_wnd = tiwin; /* initial send-window */ tp->max_sndwnd = tp->snd_wnd; tp->t_flags |= TF_ACKNOW; tp->t_unacksegs = 0; DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED); + struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED); tp->t_state = TCPS_SYN_RECEIVED; tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPINIT(tp)); - dropsocket = 0; /* committed to socket */ + TCP_CONN_KEEPINIT(tp)); + dropsocket = 0; /* committed to socket */ - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } #if INET6 /* update flowinfo - RFC 6437 */ if (inp->inp_flow == 0 && @@ -3281,7 +3395,7 @@ findpcb: } goto trimthenstep6; - } + } /* * If the state is SYN_RECEIVED and the seg contains an ACK, @@ -3290,10 +3404,10 @@ findpcb: case TCPS_SYN_RECEIVED: if ((thflags & TH_ACK) && (SEQ_LEQ(th->th_ack, tp->snd_una) || - SEQ_GT(th->th_ack, tp->snd_max))) { - rstreason = BANDLIM_RST_OPENPORT; - IF_TCP_STATINC(ifp, ooopacket); - goto dropwithreset; + SEQ_GT(th->th_ack, tp->snd_max))) { + rstreason = BANDLIM_RST_OPENPORT; + IF_TCP_STATINC(ifp, ooopacket); + goto dropwithreset; } /* @@ -3304,8 +3418,9 @@ findpcb: */ if ((thflags & TH_SYN) && (tp->irs == th->th_seq) && - !(to.to_flags & TOF_SCALE)) + !(to.to_flags & TOF_SCALE)) { tp->t_flags &= ~TF_RCVD_SCALE; + } break; /* @@ -3323,15 +3438,16 @@ findpcb: case TCPS_SYN_SENT: if ((thflags & TH_ACK) && (SEQ_LEQ(th->th_ack, tp->iss) || - SEQ_GT(th->th_ack, tp->snd_max))) { + SEQ_GT(th->th_ack, tp->snd_max))) { rstreason = BANDLIM_UNLIMITED; IF_TCP_STATINC(ifp, ooopacket); goto dropwithreset; } if (thflags & TH_RST) { if ((thflags & TH_ACK) != 0) { - if (tfo_enabled(tp)) + if (tfo_enabled(tp)) { tcp_heuristic_tfo_rst(tp); + } if ((tp->ecn_flags & (TE_SETUPSENT | TE_RCVD_SYN_RST)) == TE_SETUPSENT) { /* * On local connections, send @@ -3353,9 +3469,10 @@ findpcb: } goto drop; } - if ((thflags & TH_SYN) == 0) + if ((thflags & TH_SYN) == 0) { goto drop; - tp->snd_wnd = th->th_win; /* initial send window */ + } + tp->snd_wnd = th->th_win; /* initial send window */ tp->max_sndwnd = tp->snd_wnd; tp->irs = th->th_seq; @@ -3377,8 +3494,9 @@ findpcb: tcpstat.tcps_ecn_not_supported++; } if (tp->ecn_flags & TE_SETUPSENT && - tp->t_rxtshift > 0) + tp->t_rxtshift > 0) { tcp_heuristic_ecn_loss(tp); + } /* non-ECN-setup SYN-ACK */ tp->ecn_flags &= ~TE_SENDIPECT; @@ -3396,9 +3514,10 @@ findpcb: } tp->rcv_adv += min(tp->rcv_wnd, TCP_MAXWIN << tp->rcv_scale); - tp->snd_una++; /* SYN is acked */ - if (SEQ_LT(tp->snd_nxt, tp->snd_una)) + tp->snd_una++; /* SYN is acked */ + if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { tp->snd_nxt = tp->snd_una; + } /* * We have sent more in the SYN than what is being @@ -3430,13 +3549,12 @@ findpcb: * ACKNOW will be turned on later. */ TCP_INC_VAR(tp->t_unacksegs, nlropkts); - if (DELAY_ACK(tp, th) && tlen != 0 ) { + if (DELAY_ACK(tp, th) && tlen != 0) { if ((tp->t_flags & TF_DELACK) == 0) { tp->t_flags |= TF_DELACK; tp->t_timer[TCPT_DELACK] = OFFSET_FROM_START(tp, tcp_delack); } - } - else { + } else { tp->t_flags |= TF_ACKNOW; } /* @@ -3463,9 +3581,10 @@ findpcb: tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_CONN_KEEPIDLE(tp)); - if (nstat_collect) + if (nstat_collect) { nstat_route_connect_success( - inp->inp_route.ro_rt); + inp->inp_route.ro_rt); + } /* * The SYN is acknowledged but una is not * updated yet. So pass the value of @@ -3483,7 +3602,7 @@ findpcb: isconnected = FALSE; } else #endif /* MPTCP */ - isconnected = TRUE; + isconnected = TRUE; if ((tp->t_tfo_flags & (TFO_F_COOKIE_REQ | TFO_F_COOKIE_SENT)) || (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT)) { @@ -3494,8 +3613,9 @@ findpcb: tp->t_tfo_stats |= TFO_S_SYN_DATA_ACKED; tcpstat.tcps_tfo_syn_data_acked++; #if MPTCP - if (so->so_flags & SOF_MP_SUBFLOW) + if (so->so_flags & SOF_MP_SUBFLOW) { so->so_flags1 |= SOF1_TFO_REWIND; + } #endif tcp_tfo_rcv_probe(tp, tlen); } @@ -3512,7 +3632,7 @@ findpcb: tp->t_flags |= TF_ACKNOW; tp->t_timer[TCPT_REXMT] = 0; DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED); + struct tcpcb *, tp, int32_t, TCPS_SYN_RECEIVED); tp->t_state = TCPS_SYN_RECEIVED; /* @@ -3547,8 +3667,9 @@ trimthenstep6: * processing in the middle of step 5, ack processing. * Otherwise, goto step 6. */ - if (thflags & TH_ACK) + if (thflags & TH_ACK) { goto process_ACK; + } goto step6; /* * If the state is LAST_ACK or CLOSING or TIME_WAIT: @@ -3559,7 +3680,7 @@ trimthenstep6: case TCPS_LAST_ACK: case TCPS_CLOSING: case TCPS_TIME_WAIT: - break; /* continue normal processing */ + break; /* continue normal processing */ /* Received a SYN while connection is already established. * This is a "half open connection and other anomalies" described @@ -3652,17 +3773,16 @@ trimthenstep6: * there is already a RST in flight from the peer. * In that case, accept the RST for non-established * state if it's one off from last_ack_sent. - + * */ if (thflags & TH_RST) { if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || (tp->rcv_wnd == 0 && ((tp->last_ack_sent == th->th_seq) || - ((tp->last_ack_sent -1) == th->th_seq)))) { + ((tp->last_ack_sent - 1) == th->th_seq)))) { if (tcp_do_rfc5961 == 0 || tp->last_ack_sent == th->th_seq) { switch (tp->t_state) { - case TCPS_SYN_RECEIVED: IF_TCP_STATINC(ifp, rstinsynrcv); so->so_error = ECONNREFUSED; @@ -3685,12 +3805,12 @@ trimthenstep6: } case TCPS_FIN_WAIT_1: case TCPS_CLOSE_WAIT: - /* - Drop through ... - */ + /* + * Drop through ... + */ case TCPS_FIN_WAIT_2: so->so_error = ECONNRESET; - close: +close: postevent(so, 0, EV_RESET); soevent(so, (SO_FILT_HINT_LOCKED | @@ -3729,7 +3849,6 @@ trimthenstep6: */ if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && TSTMP_LT(to.to_tsval, tp->ts_recent)) { - /* Check to see if ts_recent is over 24 days old. */ if ((int)(tcp_now - tp->ts_recent_age) > TCP_PAWS_IDLE) { /* @@ -3767,7 +3886,7 @@ trimthenstep6: if (nstat_collect) { nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, - 1, tlen, NSTAT_RX_FLAG_DUPLICATE); + 1, tlen, NSTAT_RX_FLAG_DUPLICATE); INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, @@ -3775,8 +3894,9 @@ trimthenstep6: tp->t_stat.rxduplicatebytes += tlen; inp_set_activity_bitmap(inp); } - if (tlen > 0) + if (tlen > 0) { goto dropafterack; + } goto drop; } } @@ -3806,10 +3926,11 @@ trimthenstep6: is_syn_set = TRUE; thflags &= ~TH_SYN; th->th_seq++; - if (th->th_urp > 1) + if (th->th_urp > 1) { th->th_urp--; - else + } else { thflags &= ~TH_URG; + } todrop--; } /* @@ -3845,7 +3966,7 @@ trimthenstep6: if (todrop == 1) { /* This could be a keepalive */ soevent(so, SO_FILT_HINT_LOCKED | - SO_FILT_HINT_KEEPALIVE); + SO_FILT_HINT_KEEPALIVE); } todrop = tlen; tcpstat.tcps_rcvduppack++; @@ -3866,18 +3987,18 @@ trimthenstep6: } if (nstat_collect) { nstat_route_rx(tp->t_inpcb->inp_route.ro_rt, 1, - todrop, NSTAT_RX_FLAG_DUPLICATE); + todrop, NSTAT_RX_FLAG_DUPLICATE); INP_ADD_STAT(inp, cell, wifi, wired, rxpackets, 1); INP_ADD_STAT(inp, cell, wifi, wired, rxbytes, todrop); tp->t_stat.rxduplicatebytes += todrop; inp_set_activity_bitmap(inp); } - drop_hdrlen += todrop; /* drop from the top afterwards */ + drop_hdrlen += todrop; /* drop from the top afterwards */ th->th_seq += todrop; tlen -= todrop; - if (th->th_urp > todrop) + if (th->th_urp > todrop) { th->th_urp -= todrop; - else { + } else { thflags &= ~TH_URG; th->th_urp = 0; } @@ -3896,15 +4017,18 @@ trimthenstep6: boolean_t close_it = FALSE; if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF) && - tp->t_state > TCPS_CLOSE_WAIT) + tp->t_state > TCPS_CLOSE_WAIT) { close_it = TRUE; + } if ((so->so_flags & SOF_MP_SUBFLOW) && (mptetoso(tptomptp(tp)->mpt_mpte)->so_state & SS_NOFDREF) && - tp->t_state > TCPS_CLOSE_WAIT) + tp->t_state > TCPS_CLOSE_WAIT) { close_it = TRUE; + } - if ((so->so_flags & SOF_DEFUNCT) && tp->t_state > TCPS_FIN_WAIT_1) + if ((so->so_flags & SOF_DEFUNCT) && tp->t_state > TCPS_FIN_WAIT_1) { close_it = TRUE; + } if (close_it) { tp = tcp_close(tp); @@ -3919,7 +4043,7 @@ trimthenstep6: * If segment ends after window, drop trailing data * (and PUSH and FIN); if nothing left, just ACK. */ - todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd); + todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); if (todrop > 0) { tcpstat.tcps_rcvpackafterwin++; if (todrop >= tlen) { @@ -3948,13 +4072,15 @@ trimthenstep6: if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { tp->t_flags |= TF_ACKNOW; tcpstat.tcps_rcvwinprobe++; - } else + } else { goto dropafterack; - } else + } + } else { tcpstat.tcps_rcvbyteafterwin += todrop; + } m_adj(m, -todrop); tlen -= todrop; - thflags &= ~(TH_PUSH|TH_FIN); + thflags &= ~(TH_PUSH | TH_FIN); } /* @@ -3978,7 +4104,7 @@ trimthenstep6: if ((to.to_flags & TOF_TS) != 0 && SEQ_LEQ(th->th_seq, tp->last_ack_sent) && SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + - ((thflags & (TH_SYN|TH_FIN)) != 0))) { + ((thflags & (TH_SYN | TH_FIN)) != 0))) { tp->ts_recent_age = tcp_now; tp->ts_recent = to.to_tsval; } @@ -4040,15 +4166,17 @@ trimthenstep6: * We need to make absolutely sure that we are * going to reply upon a duplicate SYN-segment. */ - if (th->th_flags & TH_SYN) + if (th->th_flags & TH_SYN) { needoutput = 1; + } } goto step6; - } else if (tp->t_flags & TF_ACKNOW) + } else if (tp->t_flags & TF_ACKNOW) { goto dropafterack; - else + } else { goto drop; + } } /* @@ -4056,7 +4184,6 @@ trimthenstep6: */ switch (tp->t_state) { - /* * In SYN_RECEIVED state, the ack ACKs our SYN, so enter * ESTABLISHED state and continue processing. @@ -4093,10 +4220,11 @@ trimthenstep6: struct tcpcb *, tp, int32_t, TCPS_ESTABLISHED); tp->t_state = TCPS_ESTABLISHED; tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPIDLE(tp)); - if (nstat_collect) + TCP_CONN_KEEPIDLE(tp)); + if (nstat_collect) { nstat_route_connect_success( - tp->t_inpcb->inp_route.ro_rt); + tp->t_inpcb->inp_route.ro_rt); + } /* * The SYN is acknowledged but una is not updated * yet. So pass the value of ack to compute @@ -4108,9 +4236,10 @@ trimthenstep6: * If segment contains data or ACK, will call tcp_reass() * later; if not, do so now to pass queued data to user. */ - if (tlen == 0 && (thflags & TH_FIN) == 0) + if (tlen == 0 && (thflags & TH_FIN) == 0) { (void) tcp_reass(tp, (struct tcphdr *)0, &tlen, NULL, ifp); + } tp->snd_wl1 = th->th_seq - 1; #if MPTCP @@ -4123,7 +4252,7 @@ trimthenstep6: isconnected = FALSE; } else #endif /* MPTCP */ - isconnected = TRUE; + isconnected = TRUE; if ((tp->t_tfo_flags & TFO_F_COOKIE_VALID)) { /* Done this when receiving the SYN */ isconnected = FALSE; @@ -4149,9 +4278,10 @@ trimthenstep6: * handled by the label "dodata" right after step6. */ if (so->so_snd.sb_cc) { - tp->snd_una++; /* SYN is acked */ - if (SEQ_LT(tp->snd_nxt, tp->snd_una)) + tp->snd_una++; /* SYN is acked */ + if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { tp->snd_nxt = tp->snd_una; + } /* * No duplicate-ACK handling is needed. So, we @@ -4165,14 +4295,15 @@ trimthenstep6: */ if (SACK_ENABLED(tp) && (to.to_nsacks > 0 || - !TAILQ_EMPTY(&tp->snd_holes))) + !TAILQ_EMPTY(&tp->snd_holes))) { tcp_sack_doack(tp, &to, th, &sack_bytes_acked); + } goto process_ACK; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ /* * In ESTABLISHED state: drop duplicate ACKs; ACK out of range @@ -4220,8 +4351,9 @@ trimthenstep6: } if (SACK_ENABLED(tp) && - (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) + (to.to_nsacks > 0 || !TAILQ_EMPTY(&tp->snd_holes))) { tcp_sack_doack(tp, &to, th, &sack_bytes_acked); + } #if MPTCP if (tp->t_mpuna && SEQ_GEQ(th->th_ack, tp->t_mpuna)) { @@ -4235,7 +4367,7 @@ trimthenstep6: tp->t_mpflags |= TMPF_MPTCP_TRUE; mptcplog((LOG_DEBUG, "MPTCP " - "Sockets: %s \n",__func__), + "Sockets: %s \n", __func__), MPTCP_SOCKET_DBG, MPTCP_LOGLVL_LOG); @@ -4274,8 +4406,9 @@ trimthenstep6: * instead of the dupack */ if ((thflags & TH_FIN) && - !TCPS_HAVERCVDFIN(tp->t_state)) + !TCPS_HAVERCVDFIN(tp->t_state)) { break; + } process_dupack: #if MPTCP /* @@ -4301,8 +4434,9 @@ process_dupack: * so that we can process partial acks * correctly */ - if (tp->ecn_flags & TE_INRECOVERY) + if (tp->ecn_flags & TE_INRECOVERY) { tp->ecn_flags &= ~TE_INRECOVERY; + } tcpstat.tcps_rcvdupack++; ++tp->t_dupacks; @@ -4314,16 +4448,18 @@ process_dupack: if (tp->t_early_rexmt_count > 0 && TSTMP_GEQ(tcp_now, (tp->t_early_rexmt_win + - TCP_EARLY_REXMT_WIN))) + TCP_EARLY_REXMT_WIN))) { tp->t_early_rexmt_count = 0; + } /* * Is early retransmit needed? We check for * this when the connection is waiting for * duplicate acks to enter fast recovery. */ - if (!IN_FASTRECOVERY(tp)) + if (!IN_FASTRECOVERY(tp)) { tcp_early_rexmt_check(tp, th); + } /* * If we've seen exactly rexmt threshold @@ -4351,8 +4487,7 @@ process_dupack: tp->t_dupacks = 0; tp->t_rexmtthresh = tcprexmtthresh; } else if (tp->t_dupacks > tp->t_rexmtthresh || - IN_FASTRECOVERY(tp)) { - + IN_FASTRECOVERY(tp)) { /* * If this connection was seeing packet * reordering, then recovery might be @@ -4361,8 +4496,8 @@ process_dupack: */ if (SACK_ENABLED(tp) && !IN_FASTRECOVERY(tp) && (tp->t_flagsext & - (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) == - (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) { + (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) == + (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) { /* * Since the SACK information is already * updated, this ACK will be dropped @@ -4381,20 +4516,22 @@ process_dupack: * worth of data in flight. */ awnd = (tp->snd_nxt - tp->snd_fack) + - tp->sackhint.sack_bytes_rexmit; + tp->sackhint.sack_bytes_rexmit; if (awnd < tp->snd_ssthresh) { tp->snd_cwnd += tp->t_maxseg; - if (tp->snd_cwnd > tp->snd_ssthresh) + if (tp->snd_cwnd > tp->snd_ssthresh) { tp->snd_cwnd = tp->snd_ssthresh; + } } } else { tp->snd_cwnd += tp->t_maxseg; } /* Process any window updates */ - if (tiwin > tp->snd_wnd) + if (tiwin > tp->snd_wnd) { tcp_update_window(tp, thflags, th, tiwin, tlen); + } tcp_ccdbg_trace(tp, th, TCP_CC_IN_FASTRECOVERY); @@ -4425,10 +4562,11 @@ process_dupack: break; } } - if (tp->t_flags & TF_SENTFIN) + if (tp->t_flags & TF_SENTFIN) { tp->snd_recover = tp->snd_max - 1; - else + } else { tp->snd_recover = tp->snd_max; + } tp->t_timer[TCPT_PTO] = 0; tp->t_rtttime = 0; @@ -4440,7 +4578,7 @@ process_dupack: */ if (SACK_ENABLED(tp) && (tp->t_flagsext & - (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) + (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) == TF_PKTS_REORDERED && !IN_FASTRECOVERY(tp) && tp->t_reorderwin > 0 && @@ -4462,12 +4600,14 @@ process_dupack: * defined a hook for tasks to run * before entering FR, call it */ - if (CC_ALGO(tp)->pre_fr != NULL) + if (CC_ALGO(tp)->pre_fr != NULL) { CC_ALGO(tp)->pre_fr(tp); + } ENTER_FASTRECOVERY(tp); tp->t_timer[TCPT_REXMT] = 0; - if (TCP_ECN_ENABLED(tp)) + if (TCP_ECN_ENABLED(tp)) { tp->ecn_flags |= TE_SENDCWR; + } if (SACK_ENABLED(tp)) { tcpstat.tcps_sack_recovery_episode++; @@ -4478,10 +4618,11 @@ process_dupack: ~TF_CWND_NONVALIDATED; /* Process any window updates */ - if (tiwin > tp->snd_wnd) + if (tiwin > tp->snd_wnd) { tcp_update_window( - tp, thflags, - th, tiwin, tlen); + tp, thflags, + th, tiwin, tlen); + } tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY); @@ -4492,28 +4633,30 @@ process_dupack: tp->snd_cwnd = tp->t_maxseg; /* Process any window updates */ - if (tiwin > tp->snd_wnd) + if (tiwin > tp->snd_wnd) { tcp_update_window(tp, thflags, th, tiwin, tlen); + } (void) tcp_output(tp); if (tp->t_flagsext & TF_CWND_NONVALIDATED) { tcp_cc_adjust_nonvalidated_cwnd(tp); } else { tp->snd_cwnd = tp->snd_ssthresh + - tp->t_maxseg * tp->t_dupacks; + tp->t_maxseg * tp->t_dupacks; } - if (SEQ_GT(onxt, tp->snd_nxt)) + if (SEQ_GT(onxt, tp->snd_nxt)) { tp->snd_nxt = onxt; + } tcp_ccdbg_trace(tp, th, TCP_CC_ENTER_FASTRECOVERY); goto drop; } else if (limited_txmt && - ALLOW_LIMITED_TRANSMIT(tp) && - (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) && - (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) { + ALLOW_LIMITED_TRANSMIT(tp) && + (!(SACK_ENABLED(tp)) || sack_bytes_acked > 0) && + (so->so_snd.sb_cc - (tp->snd_max - tp->snd_una)) > 0) { u_int32_t incr = (tp->t_maxseg * tp->t_dupacks); /* Use Limited Transmit algorithm on the first two @@ -4542,26 +4685,29 @@ process_dupack: * recovery, the subsequent ACKs should * not be treated as partial acks. */ - if (tp->ecn_flags & TE_INRECOVERY) + if (tp->ecn_flags & TE_INRECOVERY) { goto process_ACK; + } - if (SACK_ENABLED(tp)) + if (SACK_ENABLED(tp)) { tcp_sack_partialack(tp, th); - else + } else { tcp_newreno_partial_ack(tp, th); + } tcp_ccdbg_trace(tp, th, TCP_CC_PARTIAL_ACK); } else { EXIT_FASTRECOVERY(tp); - if (CC_ALGO(tp)->post_fr != NULL) + if (CC_ALGO(tp)->post_fr != NULL) { CC_ALGO(tp)->post_fr(tp, th); + } tp->t_pipeack = 0; tcp_clear_pipeack_state(tp); tcp_ccdbg_trace(tp, th, TCP_CC_EXIT_FASTRECOVERY); } } else if ((tp->t_flagsext & - (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) - == (TF_PKTS_REORDERED|TF_DELAY_RECOVERY)) { + (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) + == (TF_PKTS_REORDERED | TF_DELAY_RECOVERY)) { /* * If the ack acknowledges upto snd_recover or if * it acknowledges all the snd holes, exit @@ -4642,16 +4788,18 @@ process_ACK: tp->t_timer[TCPT_REXMT] = 0; tp->t_timer[TCPT_PTO] = 0; needoutput = 1; - } else if (tp->t_timer[TCPT_PERSIST] == 0) + } else if (tp->t_timer[TCPT_PERSIST] == 0) { tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur); + } /* * If no data (only SYN) was ACK'd, skip rest of ACK * processing. */ - if (acked == 0) + if (acked == 0) { goto step6; + } /* * When outgoing data has been acked (except the SYN+data), we @@ -4659,8 +4807,9 @@ process_ACK: */ if ((tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) && !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) && - !(th->th_flags & TH_SYN)) + !(th->th_flags & TH_SYN)) { tp->t_tfo_flags |= TFO_F_NO_SNDPROBING; + } /* * If TH_ECE is received, make sure that ECN is enabled @@ -4674,7 +4823,7 @@ process_ACK: */ if (!IN_FASTRECOVERY(tp)) { tcp_reduce_congestion_window(tp); - tp->ecn_flags |= (TE_INRECOVERY|TE_SENDCWR); + tp->ecn_flags |= (TE_INRECOVERY | TE_SENDCWR); /* * Also note that the connection received * ECE atleast once @@ -4695,8 +4844,9 @@ process_ACK: * not updated yet. */ if (!IN_FASTRECOVERY(tp)) { - if (CC_ALGO(tp)->ack_rcvd != NULL) + if (CC_ALGO(tp)->ack_rcvd != NULL) { CC_ALGO(tp)->ack_rcvd(tp, th); + } tcp_ccdbg_trace(tp, th, TCP_CC_ACK_RCVD); } if (acked > so->so_snd.sb_cc) { @@ -4704,44 +4854,50 @@ process_ACK: sbdrop(&so->so_snd, (int)so->so_snd.sb_cc); if (so->so_flags & SOF_ENABLE_MSGS) { so->so_msg_state->msg_serial_bytes -= - (int)so->so_snd.sb_cc; + (int)so->so_snd.sb_cc; } ourfinisacked = 1; } else { sbdrop(&so->so_snd, acked); if (so->so_flags & SOF_ENABLE_MSGS) { so->so_msg_state->msg_serial_bytes -= - acked; + acked; } tcp_sbsnd_trim(&so->so_snd); tp->snd_wnd -= acked; ourfinisacked = 0; } /* detect una wraparound */ - if ( !IN_FASTRECOVERY(tp) && + if (!IN_FASTRECOVERY(tp) && SEQ_GT(tp->snd_una, tp->snd_recover) && - SEQ_LEQ(th->th_ack, tp->snd_recover)) + SEQ_LEQ(th->th_ack, tp->snd_recover)) { tp->snd_recover = th->th_ack - 1; + } if (IN_FASTRECOVERY(tp) && - SEQ_GEQ(th->th_ack, tp->snd_recover)) + SEQ_GEQ(th->th_ack, tp->snd_recover)) { EXIT_FASTRECOVERY(tp); + } tp->snd_una = th->th_ack; if (SACK_ENABLED(tp)) { - if (SEQ_GT(tp->snd_una, tp->snd_recover)) + if (SEQ_GT(tp->snd_una, tp->snd_recover)) { tp->snd_recover = tp->snd_una; + } } - if (SEQ_LT(tp->snd_nxt, tp->snd_una)) + if (SEQ_LT(tp->snd_nxt, tp->snd_una)) { tp->snd_nxt = tp->snd_una; + } if (!SLIST_EMPTY(&tp->t_rxt_segments) && !TCP_DSACK_SEQ_IN_WINDOW(tp, tp->t_dsack_lastuna, - tp->snd_una)) + tp->snd_una)) { tcp_rxtseg_clean(tp); + } if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 && - tp->t_bwmeas != NULL) + tp->t_bwmeas != NULL) { tcp_bwmeas_check(tp); + } /* * sowwakeup must happen after snd_una, et al. are @@ -4750,11 +4906,11 @@ process_ACK: */ sowwakeup(so); - if (!SLIST_EMPTY(&tp->t_notify_ack)) + if (!SLIST_EMPTY(&tp->t_notify_ack)) { tcp_notify_acknowledgement(tp, so); + } switch (tp->t_state) { - /* * In FIN_WAIT_1 STATE in addition to the processing * for the ESTABLISHED state if our FIN is now acknowledged @@ -4771,14 +4927,14 @@ process_ACK: */ if (so->so_state & SS_CANTRCVMORE) { tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, - TCP_CONN_MAXIDLE(tp)); + TCP_CONN_MAXIDLE(tp)); isconnected = FALSE; isdisconnected = TRUE; } DTRACE_TCP4(state__change, void, NULL, - struct inpcb *, inp, - struct tcpcb *, tp, - int32_t, TCPS_FIN_WAIT_2); + struct inpcb *, inp, + struct tcpcb *, tp, + int32_t, TCPS_FIN_WAIT_2); tp->t_state = TCPS_FIN_WAIT_2; /* fall through and make sure we also recognize * data ACKed with the FIN @@ -4786,7 +4942,7 @@ process_ACK: } break; - /* + /* * In CLOSING STATE in addition to the processing for * the ESTABLISHED state if the ACK acknowledges our FIN * then enter the TIME-WAIT state, otherwise ignore @@ -4795,9 +4951,9 @@ process_ACK: case TCPS_CLOSING: if (ourfinisacked) { DTRACE_TCP4(state__change, void, NULL, - struct inpcb *, inp, - struct tcpcb *, tp, - int32_t, TCPS_TIME_WAIT); + struct inpcb *, inp, + struct tcpcb *, tp, + int32_t, TCPS_TIME_WAIT); tp->t_state = TCPS_TIME_WAIT; tcp_canceltimers(tp); if (tp->t_flagsext & TF_NOTIMEWAIT) { @@ -4858,8 +5014,9 @@ step6: /* * Update window information. */ - if (tcp_update_window(tp, thflags, th, tiwin, tlen)) + if (tcp_update_window(tp, thflags, th, tiwin, tlen)) { needoutput = 1; + } /* * Process segments with URG. @@ -4873,9 +5030,9 @@ step6: * actually wanting to send this much urgent data. */ if (th->th_urp + so->so_rcv.sb_cc > sb_max) { - th->th_urp = 0; /* XXX */ - thflags &= ~TH_URG; /* XXX */ - goto dodata; /* XXX */ + th->th_urp = 0; /* XXX */ + thflags &= ~TH_URG; /* XXX */ + goto dodata; /* XXX */ } /* * If this segment advances the known urgent pointer, @@ -4891,7 +5048,7 @@ step6: * of data past the urgent section as the original * spec states (in one of two places). */ - if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { + if (SEQ_GT(th->th_seq + th->th_urp, tp->rcv_up)) { tp->rcv_up = th->th_seq + th->th_urp; so->so_oobmark = so->so_rcv.sb_cc + (tp->rcv_up - tp->rcv_nxt) - 1; @@ -4910,19 +5067,21 @@ step6: */ if (th->th_urp <= (u_int32_t)tlen #if SO_OOBINLINE - && (so->so_options & SO_OOBINLINE) == 0 + && (so->so_options & SO_OOBINLINE) == 0 #endif - ) + ) { tcp_pulloutofband(so, th, m, - drop_hdrlen); /* hdr drop is delayed */ + drop_hdrlen); /* hdr drop is delayed */ + } } else { /* * If no out of band data is expected, * pull receive urgent pointer along * with the receive window. */ - if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) + if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) { tp->rcv_up = tp->rcv_nxt; + } } dodata: @@ -4958,11 +5117,11 @@ dodata: if ((tlen || (thflags & TH_FIN)) && TCPS_HAVERCVDFIN(tp->t_state) == 0 && (TCPS_HAVEESTABLISHED(tp->t_state) || - (tp->t_state == TCPS_SYN_RECEIVED && - (tp->t_tfo_flags & TFO_F_COOKIE_VALID)))) { + (tp->t_state == TCPS_SYN_RECEIVED && + (tp->t_tfo_flags & TFO_F_COOKIE_VALID)))) { tcp_seq save_start = th->th_seq; tcp_seq save_end = th->th_seq + tlen; - m_adj(m, drop_hdrlen); /* delayed header drop */ + m_adj(m, drop_hdrlen); /* delayed header drop */ /* * Insert segment which includes th into TCP reassembly queue * with control block tp. Set thflags to whether reassembly now @@ -4982,18 +5141,18 @@ dodata: * connection is in streaming mode and the last * packet was not an end-of-write */ - if (tp->t_flags & TF_STREAMING_ON) + if (tp->t_flags & TF_STREAMING_ON) { tcp_compute_rtt(tp, &to, th); + } if (DELAY_ACK(tp, th) && - ((tp->t_flags & TF_ACKNOW) == 0) ) { + ((tp->t_flags & TF_ACKNOW) == 0)) { if ((tp->t_flags & TF_DELACK) == 0) { tp->t_flags |= TF_DELACK; tp->t_timer[TCPT_DELACK] = - OFFSET_FROM_START(tp, tcp_delack); + OFFSET_FROM_START(tp, tcp_delack); } - } - else { + } else { tp->t_flags |= TF_ACKNOW; } tp->rcv_nxt += tlen; @@ -5026,38 +5185,37 @@ dodata: } if ((tlen > 0 || (th->th_flags & TH_FIN)) && SACK_ENABLED(tp)) { - if (th->th_flags & TH_FIN) + if (th->th_flags & TH_FIN) { save_end++; + } tcp_update_sack_list(tp, save_start, save_end); } tcp_adaptive_rwtimo_check(tp, tlen); - if (tlen > 0) + if (tlen > 0) { tcp_tfo_rcv_data(tp); + } - if (tp->t_flags & TF_DELACK) - { + if (tp->t_flags & TF_DELACK) { #if INET6 if (isipv6) { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), - (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), - th->th_seq, th->th_ack, th->th_win); - } - else + (((ip6->ip6_src.s6_addr16[0]) << 16) | (ip6->ip6_dst.s6_addr16[0])), + th->th_seq, th->th_ack, th->th_win); + } else #endif { KERNEL_DEBUG(DBG_LAYER_END, ((th->th_dport << 16) | th->th_sport), - (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), - th->th_seq, th->th_ack, th->th_win); + (((ip->ip_src.s_addr & 0xffff) << 16) | (ip->ip_dst.s_addr & 0xffff)), + th->th_seq, th->th_ack, th->th_win); } - } } else { if ((so->so_flags & SOF_MP_SUBFLOW) && tlen == 0 && (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { - m_adj(m, drop_hdrlen); /* delayed header drop */ + m_adj(m, drop_hdrlen); /* delayed header drop */ mptcp_input(tptomptp(tp)->mpt_mpte, m); tp->t_flags |= TF_ACKNOW; } else { @@ -5093,8 +5251,7 @@ dodata: tp->rcv_nxt++; } switch (tp->t_state) { - - /* + /* * In SYN_RECEIVED and ESTABLISHED STATES * enter the CLOSE_WAIT state. */ @@ -5102,30 +5259,30 @@ dodata: tp->t_starttime = tcp_now; case TCPS_ESTABLISHED: DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT); + struct tcpcb *, tp, int32_t, TCPS_CLOSE_WAIT); tp->t_state = TCPS_CLOSE_WAIT; break; - /* + /* * If still in FIN_WAIT_1 STATE FIN has not been acked so * enter the CLOSING state. */ case TCPS_FIN_WAIT_1: DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_CLOSING); + struct tcpcb *, tp, int32_t, TCPS_CLOSING); tp->t_state = TCPS_CLOSING; break; - /* + /* * In FIN_WAIT_2 state enter the TIME_WAIT state, * starting the time-wait timer, turning off the other * standard timers. */ case TCPS_FIN_WAIT_2: DTRACE_TCP4(state__change, void, NULL, - struct inpcb *, inp, - struct tcpcb *, tp, - int32_t, TCPS_TIME_WAIT); + struct inpcb *, inp, + struct tcpcb *, tp, + int32_t, TCPS_TIME_WAIT); tp->t_state = TCPS_TIME_WAIT; tcp_canceltimers(tp); tp->t_flags |= TF_ACKNOW; @@ -5146,9 +5303,10 @@ dodata: } } #if TCPDEBUG - if (so->so_options & SO_DEBUG) + if (so->so_options & SO_DEBUG) { tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, - &tcp_savetcp, 0); + &tcp_savetcp, 0); + } #endif /* @@ -5162,7 +5320,7 @@ dodata: socket_unlock(so, 1); - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; dropafterack: @@ -5183,15 +5341,16 @@ dropafterack: */ if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && (SEQ_GT(tp->snd_una, th->th_ack) || - SEQ_GT(th->th_ack, tp->snd_max)) ) { + SEQ_GT(th->th_ack, tp->snd_max))) { rstreason = BANDLIM_RST_OPENPORT; IF_TCP_STATINC(ifp, dospacket); goto dropwithreset; } #if TCPDEBUG - if (so->so_options & SO_DEBUG) + if (so->so_options & SO_DEBUG) { tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, - &tcp_savetcp, 0); + &tcp_savetcp, 0); + } #endif m_freem(m); tp->t_flags |= TF_ACKNOW; @@ -5199,7 +5358,7 @@ dropafterack: /* Don't need to check timer state as we should have done it during tcp_output */ socket_unlock(so, 1); - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; dropwithresetnosock: nosock = 1; @@ -5209,49 +5368,55 @@ dropwithreset: * Make ACK acceptable to originator of segment. * Don't bother to respond if destination was broadcast/multicast. */ - if ((thflags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) + if ((thflags & TH_RST) || m->m_flags & (M_BCAST | M_MCAST)) { goto drop; + } #if INET6 if (isipv6) { if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || - IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) + IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { goto drop; + } } else #endif /* INET6 */ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || - in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) + in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { goto drop; + } /* IPv6 anycast check is done at tcp6_input() */ /* * Perform bandwidth limiting. */ #if ICMP_BANDLIM - if (badport_bandlim(rstreason) < 0) + if (badport_bandlim(rstreason) < 0) { goto drop; + } #endif #if TCPDEBUG - if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) { tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, - &tcp_savetcp, 0); + &tcp_savetcp, 0); + } #endif bzero(&tra, sizeof(tra)); tra.ifscope = ifscope; tra.awdl_unrestricted = 1; tra.intcoproc_allowed = 1; - if (thflags & TH_ACK) + if (thflags & TH_ACK) { /* mtod() below is safe as long as hdr dropping is delayed */ tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, th->th_ack, TH_RST, &tra); - else { - if (thflags & TH_SYN) + } else { + if (thflags & TH_SYN) { tlen++; + } /* mtod() below is safe as long as hdr dropping is delayed */ - tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, - (tcp_seq)0, TH_RST|TH_ACK, &tra); + tcp_respond(tp, mtod(m, void *), th, m, th->th_seq + tlen, + (tcp_seq)0, TH_RST | TH_ACK, &tra); } /* destroy temporarily created socket */ if (dropsocket) { @@ -5260,7 +5425,7 @@ dropwithreset: } else if ((inp != NULL) && (nosock == 0)) { socket_unlock(so, 1); } - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; dropnosock: nosock = 1; @@ -5269,20 +5434,20 @@ drop: * Drop space held by incoming segment and return. */ #if TCPDEBUG - if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + if (tp == 0 || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) { tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, - &tcp_savetcp, 0); + &tcp_savetcp, 0); + } #endif m_freem(m); /* destroy temporarily created socket */ if (dropsocket) { (void) soabort(so); socket_unlock(so, 1); - } - else if (nosock == 0) { + } else if (nosock == 0) { socket_unlock(so, 1); } - KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END,0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_TCP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } @@ -5298,27 +5463,31 @@ tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th, for (; cnt > 0; cnt -= optlen, cp += optlen) { opt = cp[0]; - if (opt == TCPOPT_EOL) + if (opt == TCPOPT_EOL) { break; - if (opt == TCPOPT_NOP) + } + if (opt == TCPOPT_NOP) { optlen = 1; - else { - if (cnt < 2) + } else { + if (cnt < 2) { break; + } optlen = cp[1]; - if (optlen < 2 || optlen > cnt) + if (optlen < 2 || optlen > cnt) { break; + } } switch (opt) { - default: continue; case TCPOPT_MAXSEG: - if (optlen != TCPOLEN_MAXSEG) + if (optlen != TCPOLEN_MAXSEG) { continue; - if (!(th->th_flags & TH_SYN)) + } + if (!(th->th_flags & TH_SYN)) { continue; + } bcopy((char *) cp + 2, (char *) &mss, sizeof(mss)); NTOHS(mss); to->to_mss = mss; @@ -5326,17 +5495,20 @@ tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th, break; case TCPOPT_WINDOW: - if (optlen != TCPOLEN_WINDOW) + if (optlen != TCPOLEN_WINDOW) { continue; - if (!(th->th_flags & TH_SYN)) + } + if (!(th->th_flags & TH_SYN)) { continue; + } to->to_flags |= TOF_SCALE; to->to_requested_s_scale = min(cp[2], TCP_MAX_WINSHIFT); break; case TCPOPT_TIMESTAMP: - if (optlen != TCPOLEN_TIMESTAMP) + if (optlen != TCPOLEN_TIMESTAMP) { continue; + } to->to_flags |= TOF_TS; bcopy((char *)cp + 2, (char *)&to->to_tsval, sizeof(to->to_tsval)); @@ -5346,19 +5518,23 @@ tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th, NTOHL(to->to_tsecr); /* Re-enable sending Timestamps if we received them */ if (!(tp->t_flags & TF_REQ_TSTMP) && - tcp_do_rfc1323 == 1) + tcp_do_rfc1323 == 1) { tp->t_flags |= TF_REQ_TSTMP; + } break; case TCPOPT_SACK_PERMITTED: if (!tcp_do_sack || - optlen != TCPOLEN_SACK_PERMITTED) + optlen != TCPOLEN_SACK_PERMITTED) { continue; - if (th->th_flags & TH_SYN) + } + if (th->th_flags & TH_SYN) { to->to_flags |= TOF_SACK; + } break; case TCPOPT_SACK: - if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) + if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) { continue; + } to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; to->to_sacks = cp + 2; tcpstat.tcps_sack_rcv_blocks++; @@ -5366,18 +5542,21 @@ tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcphdr *th, break; case TCPOPT_FASTOPEN: if (optlen == TCPOLEN_FASTOPEN_REQ) { - if (tp->t_state != TCPS_LISTEN) + if (tp->t_state != TCPS_LISTEN) { continue; + } to->to_flags |= TOF_TFOREQ; } else { if (optlen < TCPOLEN_FASTOPEN_REQ || (optlen - TCPOLEN_FASTOPEN_REQ) > TFO_COOKIE_LEN_MAX || - (optlen - TCPOLEN_FASTOPEN_REQ) < TFO_COOKIE_LEN_MIN) + (optlen - TCPOLEN_FASTOPEN_REQ) < TFO_COOKIE_LEN_MIN) { continue; + } if (tp->t_state != TCPS_LISTEN && - tp->t_state != TCPS_SYN_SENT) + tp->t_state != TCPS_SYN_SENT) { continue; + } to->to_flags |= TOF_TFO; to->to_tfo = cp + 1; @@ -5400,23 +5579,25 @@ tcp_finalize_options(struct tcpcb *tp, struct tcpopt *to, unsigned int ifscope) tp->t_flags |= TF_RCVD_TSTMP; tp->ts_recent = to->to_tsval; tp->ts_recent_age = tcp_now; - } - if (to->to_flags & TOF_MSS) + if (to->to_flags & TOF_MSS) { tcp_mss(tp, to->to_mss, ifscope); + } if (SACK_ENABLED(tp)) { - if (!(to->to_flags & TOF_SACK)) + if (!(to->to_flags & TOF_SACK)) { tp->t_flagsext &= ~(TF_SACK_ENABLE); - else + } else { tp->t_flags |= TF_SACK_PERMIT; + } } if (to->to_flags & TOF_SCALE) { tp->t_flags |= TF_RCVD_SCALE; tp->requested_s_scale = to->to_requested_s_scale; /* Re-enable window scaling, if the option is received */ - if (tp->request_r_scale > 0) + if (tp->request_r_scale > 0) { tp->t_flags |= TF_REQ_SCALE; + } } } @@ -5440,16 +5621,18 @@ tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, int off) tp->t_iobc = *cp; tp->t_oobflags |= TCPOOB_HAVEDATA; - bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); + bcopy(cp + 1, cp, (unsigned)(m->m_len - cnt - 1)); m->m_len--; - if (m->m_flags & M_PKTHDR) + if (m->m_flags & M_PKTHDR) { m->m_pkthdr.len--; + } return; } cnt -= m->m_len; m = m->m_next; - if (m == 0) + if (m == 0) { break; + } } panic("tcp_pulloutofband"); } @@ -5458,7 +5641,7 @@ uint32_t get_base_rtt(struct tcpcb *tp) { struct rtentry *rt = tp->t_inpcb->inp_route.ro_rt; - return ((rt == NULL) ? 0 : rt->rtt_min); + return (rt == NULL) ? 0 : rt->rtt_min; } /* Each value of RTT base represents the minimum RTT seen in a minute. @@ -5470,8 +5653,9 @@ update_base_rtt(struct tcpcb *tp, uint32_t rtt) u_int32_t base_rtt, i; struct rtentry *rt; - if ((rt = tp->t_inpcb->inp_route.ro_rt) == NULL) + if ((rt = tp->t_inpcb->inp_route.ro_rt) == NULL) { return; + } if (rt->rtt_expire_ts == 0) { RT_LOCK_SPIN(rt); if (rt->rtt_expire_ts != 0) { @@ -5508,8 +5692,9 @@ update: if ((int)(tcp_now - rt->rtt_expire_ts) >= TCP_RTT_HISTORY_EXPIRE_TIME) { rt->rtt_index++; - if (rt->rtt_index >= NRTT_HIST) + if (rt->rtt_index >= NRTT_HIST) { rt->rtt_index = 0; + } rt->rtt_hist[rt->rtt_index] = rtt; rt->rtt_expire_ts = tcp_now; } else { @@ -5521,17 +5706,19 @@ update: for (i = 0; i < NRTT_HIST; ++i) { if (rt->rtt_hist[i] != 0 && (rt->rtt_min == 0 || - rt->rtt_hist[i] < rt->rtt_min)) + rt->rtt_hist[i] < rt->rtt_min)) { rt->rtt_min = rt->rtt_hist[i]; + } } RT_UNLOCK(rt); } else { rt->rtt_hist[rt->rtt_index] = min(rt->rtt_hist[rt->rtt_index], rtt); - if (rt->rtt_min == 0) + if (rt->rtt_min == 0) { rt->rtt_min = rtt; - else + } else { rt->rtt_min = min(rt->rtt_min, rtt); + } } } @@ -5569,17 +5756,17 @@ tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) /* Compute the max of the pipeack samples */ pipe_ack_val = tcp_get_max_pipeack(tp); tp->t_pipeack = (pipe_ack_val > - TCP_CC_CWND_INIT_BYTES) ? - pipe_ack_val : 0; + TCP_CC_CWND_INIT_BYTES) ? + pipe_ack_val : 0; } /* start another measurement */ tp->t_rtttime = 0; } if (((to->to_flags & TOF_TS) != 0) && - (to->to_tsecr != 0) && - TSTMP_GEQ(tcp_now, to->to_tsecr)) { + (to->to_tsecr != 0) && + TSTMP_GEQ(tcp_now, to->to_tsecr)) { tcp_xmit_timer(tp, (tcp_now - to->to_tsecr), - to->to_tsecr, th->th_ack); + to->to_tsecr, th->th_ack); } else if (rtt > 0) { tcp_xmit_timer(tp, rtt, 0, th->th_ack); } @@ -5591,7 +5778,7 @@ tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) */ static void tcp_xmit_timer(struct tcpcb *tp, int rtt, - u_int32_t tsecr, tcp_seq th_ack) + u_int32_t tsecr, tcp_seq th_ack) { int delta; @@ -5603,8 +5790,9 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, */ if (tp->t_inpcb->inp_last_outifp != NULL && (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) && - th_ack == tp->iss + 1) + th_ack == tp->iss + 1) { return; + } if (tp->t_flagsext & TF_RECOMPUTE_RTT) { if (SEQ_GT(th_ack, tp->snd_una) && @@ -5623,8 +5811,9 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, tp->t_rttvar = max(tp->t_rttvar_prev, (rtt >> 1)); tp->t_rttvar = tp->t_rttvar << TCP_RTTVAR_SHIFT; - if (tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) + if (tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) { tp->t_rttbest = tp->t_srtt + tp->t_rttvar; + } goto compute_rto; } else { @@ -5655,10 +5844,11 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, * tcp_now, this extra adjustment is not needed. */ delta = (rtt << TCP_DELTA_SHIFT) - - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); + - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); - if ((tp->t_srtt += delta) <= 0) + if ((tp->t_srtt += delta) <= 0) { tp->t_srtt = 1; + } /* * We accumulate a smoothed rtt variance (actually, a @@ -5670,14 +5860,17 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, * (rttvar = rttvar*3/4 + |delta| / 4). This replaces * rfc793's wired-in beta. */ - if (delta < 0) + if (delta < 0) { delta = -delta; + } delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); - if ((tp->t_rttvar += delta) <= 0) + if ((tp->t_rttvar += delta) <= 0) { tp->t_rttvar = 1; - if (tp->t_rttbest == 0 || - tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) + } + if (tp->t_rttbest == 0 || + tp->t_rttbest > (tp->t_srtt + tp->t_rttvar)) { tp->t_rttbest = tp->t_srtt + tp->t_rttvar; + } } else { /* * No rtt measurement yet - use the unsmoothed rtt. @@ -5690,7 +5883,7 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, compute_rto: nstat_route_rtt(tp->t_inpcb->inp_route.ro_rt, tp->t_srtt, - tp->t_rttvar); + tp->t_rttvar); /* * the retransmit should happen at rtt + 4 * rttvar. @@ -5704,8 +5897,8 @@ compute_rto: * the minimum feasible timer (which is 2 ticks). */ TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), - max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX, - TCP_ADD_REXMTSLOP(tp)); + max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX, + TCP_ADD_REXMTSLOP(tp)); /* * We received an ack for a packet that wasn't retransmitted; @@ -5733,12 +5926,13 @@ tcp_maxmtu(struct rtentry *rt) interface_mtu -= CLAT46_HDR_EXPANSION_OVERHD; } - if (rt->rt_rmx.rmx_mtu == 0) + if (rt->rt_rmx.rmx_mtu == 0) { maxmtu = interface_mtu; - else + } else { maxmtu = MIN(rt->rt_rmx.rmx_mtu, interface_mtu); + } - return (maxmtu); + return maxmtu; } #if INET6 @@ -5749,18 +5943,22 @@ tcp_maxmtu6(struct rtentry *rt) struct nd_ifinfo *ndi = NULL; RT_LOCK_ASSERT_HELD(rt); - if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized) + if ((ndi = ND_IFINFO(rt->rt_ifp)) != NULL && !ndi->initialized) { ndi = NULL; - if (ndi != NULL) + } + if (ndi != NULL) { lck_mtx_lock(&ndi->lock); - if (rt->rt_rmx.rmx_mtu == 0) + } + if (rt->rt_rmx.rmx_mtu == 0) { maxmtu = IN6_LINKMTU(rt->rt_ifp); - else + } else { maxmtu = MIN(rt->rt_rmx.rmx_mtu, IN6_LINKMTU(rt->rt_ifp)); - if (ndi != NULL) + } + if (ndi != NULL) { lck_mtx_unlock(&ndi->lock); + } - return (maxmtu); + return maxmtu; } #endif @@ -5781,7 +5979,7 @@ get_maxmtu(struct rtentry *rt) RT_UNLOCK(rt); - return (maxmtu); + return maxmtu; } /* @@ -5830,8 +6028,8 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) inp = tp->t_inpcb; #if INET6 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; - min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr) - : sizeof (struct tcpiphdr); + min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + : sizeof(struct tcpiphdr); #else #define min_protoh (sizeof (struct tcpiphdr)) #endif @@ -5839,8 +6037,7 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) #if INET6 if (isipv6) { rt = tcp_rtlookup6(inp, input_ifscope); - } - else + } else #endif /* INET6 */ { rt = tcp_rtlookup(inp, input_ifscope); @@ -5850,9 +6047,9 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) if (rt == NULL) { tp->t_maxopd = tp->t_maxseg = #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt; + tcp_mssdflt; return; } ifp = rt->rt_ifp; @@ -5874,19 +6071,20 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) * Offer == -1 means that we didn't receive SYN yet, * use cached value in that case; */ - if (offer == -1) + if (offer == -1) { offer = taop->tao_mssopt; + } /* * Offer == 0 means that there was no MSS on the SYN segment, * in this case we use tcp_mssdflt. */ - if (offer == 0) + if (offer == 0) { offer = #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt; - else { + tcp_mssdflt; + } else { /* * Prevent DoS attack with too small MSS. Round up * to at least minmss. @@ -5929,12 +6127,14 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) if (rt->rt_rmx.rmx_mtu == 0) { #if INET6 if (isipv6) { - if (!isnetlocal) + if (!isnetlocal) { mss = min(mss, tcp_v6mssdflt); + } } else #endif /* INET6 */ - if (!isnetlocal) + if (!isnetlocal) { mss = min(mss, tcp_mssdflt); + } } mss = min(mss, offer); @@ -5951,10 +6151,11 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) * origoffer==-1 indicates, that no segments were received yet. * In this case we just guess. */ - if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && + if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && (origoffer == -1 || - (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) + (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) { mss -= TCPOLEN_TSTAMP_APPA; + } #if MPTCP mss -= mptcp_adj_mss(tp, FALSE); @@ -5978,13 +6179,14 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) bufsize = rt->rt_rmx.rmx_sendpipe; if (bufsize < so->so_snd.sb_hiwat) #endif - bufsize = so->so_snd.sb_hiwat; - if (bufsize < mss) + bufsize = so->so_snd.sb_hiwat; + if (bufsize < mss) { mss = bufsize; - else { + } else { bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss); - if (bufsize > sb_max_corrected) + if (bufsize > sb_max_corrected) { bufsize = sb_max_corrected; + } (void)sbreserve(&so->so_snd, bufsize); } tp->t_maxseg = mss; @@ -6001,11 +6203,12 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) bufsize = rt->rt_rmx.rmx_recvpipe; if (bufsize < so->so_rcv.sb_hiwat) #endif - bufsize = so->so_rcv.sb_hiwat; + bufsize = so->so_rcv.sb_hiwat; if (bufsize > mss) { bufsize = (((bufsize + (u_int64_t)mss - 1) / (u_int64_t)mss) * (u_int64_t)mss); - if (bufsize > sb_max_corrected) + if (bufsize > sb_max_corrected) { bufsize = sb_max_corrected; + } (void)sbreserve(&so->so_rcv, bufsize); } @@ -6028,8 +6231,9 @@ tcp_mss(struct tcpcb *tp, int offer, unsigned int input_ifscope) * Set the slow-start flight size depending on whether this * is a local network or not. */ - if (CC_ALGO(tp)->cwnd_init != NULL) + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } tcp_ccdbg_trace(tp, NULL, TCP_CC_CWND_INIT); @@ -6052,24 +6256,24 @@ tcp_mssopt(struct tcpcb *tp) #if INET6 isipv6 = ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) ? 1 : 0; - min_protoh = isipv6 ? sizeof (struct ip6_hdr) + sizeof (struct tcphdr) - : sizeof (struct tcpiphdr); + min_protoh = isipv6 ? sizeof(struct ip6_hdr) + sizeof(struct tcphdr) + : sizeof(struct tcpiphdr); #else #define min_protoh (sizeof (struct tcpiphdr)) #endif #if INET6 - if (isipv6) + if (isipv6) { rt = tcp_rtlookup6(tp->t_inpcb, IFSCOPE_NONE); - else + } else #endif /* INET6 */ rt = tcp_rtlookup(tp->t_inpcb, IFSCOPE_NONE); if (rt == NULL) { - return ( + return #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt); + tcp_mssdflt; } /* * Slower link window correction: @@ -6095,7 +6299,7 @@ tcp_mssopt(struct tcpcb *tp) mss = necp_socket_get_effective_mtu(tp->t_inpcb, mss); #endif /* NECP */ - return (mss - min_protoh); + return mss - min_protoh; } /* @@ -6107,32 +6311,34 @@ tcp_mssopt(struct tcpcb *tp) static void tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) { - tcp_seq onxt = tp->snd_nxt; - u_int32_t ocwnd = tp->snd_cwnd; - tp->t_timer[TCPT_REXMT] = 0; - tp->t_timer[TCPT_PTO] = 0; - tp->t_rtttime = 0; - tp->snd_nxt = th->th_ack; - /* - * Set snd_cwnd to one segment beyond acknowledged offset - * (tp->snd_una has not yet been updated when this function - * is called) - */ - tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp); - tp->t_flags |= TF_ACKNOW; - (void) tcp_output(tp); - tp->snd_cwnd = ocwnd; - if (SEQ_GT(onxt, tp->snd_nxt)) - tp->snd_nxt = onxt; - /* - * Partial window deflation. Relies on fact that tp->snd_una - * not updated yet. - */ - if (tp->snd_cwnd > BYTES_ACKED(th, tp)) - tp->snd_cwnd -= BYTES_ACKED(th, tp); - else - tp->snd_cwnd = 0; - tp->snd_cwnd += tp->t_maxseg; + tcp_seq onxt = tp->snd_nxt; + u_int32_t ocwnd = tp->snd_cwnd; + tp->t_timer[TCPT_REXMT] = 0; + tp->t_timer[TCPT_PTO] = 0; + tp->t_rtttime = 0; + tp->snd_nxt = th->th_ack; + /* + * Set snd_cwnd to one segment beyond acknowledged offset + * (tp->snd_una has not yet been updated when this function + * is called) + */ + tp->snd_cwnd = tp->t_maxseg + BYTES_ACKED(th, tp); + tp->t_flags |= TF_ACKNOW; + (void) tcp_output(tp); + tp->snd_cwnd = ocwnd; + if (SEQ_GT(onxt, tp->snd_nxt)) { + tp->snd_nxt = onxt; + } + /* + * Partial window deflation. Relies on fact that tp->snd_una + * not updated yet. + */ + if (tp->snd_cwnd > BYTES_ACKED(th, tp)) { + tp->snd_cwnd -= BYTES_ACKED(th, tp); + } else { + tp->snd_cwnd = 0; + } + tp->snd_cwnd += tp->t_maxseg; } /* @@ -6156,11 +6362,13 @@ tcp_dropdropablreq(struct socket *head) struct inpcb *inp = NULL; struct tcpcb *tp; - if ((head->so_options & SO_ACCEPTCONN) == 0) - return (0); + if ((head->so_options & SO_ACCEPTCONN) == 0) { + return 0; + } - if (TAILQ_EMPTY(&head->so_incomp)) - return (0); + if (TAILQ_EMPTY(&head->so_incomp)) { + return 0; + } so_acquire_accept_list(head, NULL); socket_unlock(head, 0); @@ -6176,8 +6384,8 @@ tcp_dropdropablreq(struct socket *head) tp = intotcpcb(inp); if (tp != NULL && tp->t_state == TCPS_CLOSED && so->so_head != NULL && - (so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) == - (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) { + (so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) == + (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) { /* * The listen socket is already locked but we * can lock this socket here without lock ordering @@ -6203,15 +6411,17 @@ tcp_dropdropablreq(struct socket *head) } qlen = head->so_incqlen; - if (rnd == 0) + if (rnd == 0) { rnd = RandomULong(); + } if (++cur_cnt > qlen || old_cnt > qlen) { rnd = (314159 * rnd + 66329) & 0xffff; j = ((qlen + 1) * rnd) >> 16; - while (j-- && so) + while (j-- && so) { so = TAILQ_NEXT(so, so_list); + } } /* Find a connection that is not already closing (or being served) */ while (so) { @@ -6231,7 +6441,7 @@ tcp_dropdropablreq(struct socket *head) so->so_usecount++; if ((so->so_usecount == 2) && (so->so_state & SS_INCOMP) && - !(so->so_flags & SOF_INCOMP_INPROGRESS)) { + !(so->so_flags & SOF_INCOMP_INPROGRESS)) { break; } else { /* @@ -6256,7 +6466,7 @@ tcp_dropdropablreq(struct socket *head) if (so == NULL) { socket_lock(head, 0); so_release_accept_list(head); - return (0); + return 0; } /* Makes sure socket is still in the right state to be discarded */ @@ -6265,7 +6475,7 @@ tcp_dropdropablreq(struct socket *head) socket_unlock(so, 1); socket_lock(head, 0); so_release_accept_list(head); - return (0); + return 0; } found_victim: @@ -6274,7 +6484,7 @@ found_victim: socket_unlock(so, 1); socket_lock(head, 0); so_release_accept_list(head); - return (0); + return 0; } socket_lock(head, 0); @@ -6339,7 +6549,7 @@ found_victim: tcpstat.tcps_drops++; socket_lock(head, 0); - return(1); + return 1; } /* Set background congestion control on a socket */ @@ -6353,10 +6563,11 @@ tcp_set_background_cc(struct socket *so) void tcp_set_foreground_cc(struct socket *so) { - if (tcp_use_newreno) + if (tcp_use_newreno) { tcp_set_new_cc(so, TCP_CC_ALGO_NEWRENO_INDEX); - else + } else { tcp_set_new_cc(so, TCP_CC_ALGO_CUBIC_INDEX); + } } static void @@ -6366,17 +6577,18 @@ tcp_set_new_cc(struct socket *so, uint16_t cc_index) struct tcpcb *tp = intotcpcb(inp); u_char old_cc_index = 0; if (tp->tcp_cc_index != cc_index) { - old_cc_index = tp->tcp_cc_index; - if (CC_ALGO(tp)->cleanup != NULL) + if (CC_ALGO(tp)->cleanup != NULL) { CC_ALGO(tp)->cleanup(tp); + } tp->tcp_cc_index = cc_index; tcp_cc_allocate_state(tp); - if (CC_ALGO(tp)->switch_to != NULL) + if (CC_ALGO(tp)->switch_to != NULL) { CC_ALGO(tp)->switch_to(tp, old_cc_index); + } tcp_ccdbg_trace(tp, NULL, TCP_CC_CHANGE_ALGO); } @@ -6385,8 +6597,9 @@ tcp_set_new_cc(struct socket *so, uint16_t cc_index) void tcp_set_recv_bg(struct socket *so) { - if (!IS_TCP_RECV_BG(so)) + if (!IS_TCP_RECV_BG(so)) { so->so_flags1 |= SOF1_TRAFFIC_MGT_TCP_RECVBG; + } /* Unset Large Receive Offload on background sockets */ so_set_lro(so, SO_TC_BK); @@ -6395,8 +6608,9 @@ tcp_set_recv_bg(struct socket *so) void tcp_clear_recv_bg(struct socket *so) { - if (IS_TCP_RECV_BG(so)) + if (IS_TCP_RECV_BG(so)) { so->so_flags1 &= ~(SOF1_TRAFFIC_MGT_TCP_RECVBG); + } /* * Set/unset use of Large Receive Offload depending on @@ -6413,8 +6627,9 @@ inp_fc_unthrottle_tcp(struct inpcb *inp) * Back off the slow-start threshold and enter * congestion avoidance phase */ - if (CC_ALGO(tp)->pre_fr != NULL) + if (CC_ALGO(tp)->pre_fr != NULL) { CC_ALGO(tp)->pre_fr(tp); + } tp->snd_cwnd = tp->snd_ssthresh; tp->t_flagsext &= ~TF_CWND_NONVALIDATED; @@ -6480,8 +6695,9 @@ tcp_getstat SYSCTL_HANDLER_ARGS NULL); } } - if (caller != PROC_NULL) + if (caller != PROC_NULL) { proc_rele(caller); + } if (tcp_disable_access_to_stats && !kauth_cred_issuser(kauth_cred_get())) { bzero(&zero_stat, sizeof(zero_stat)); @@ -6491,13 +6707,12 @@ tcp_getstat SYSCTL_HANDLER_ARGS #endif /* !CONFIG_EMBEDDED */ if (req->oldptr == 0) { - req->oldlen= (size_t)sizeof(struct tcpstat); + req->oldlen = (size_t)sizeof(struct tcpstat); } - error = SYSCTL_OUT(req, stat, MIN(sizeof (tcpstat), req->oldlen)); - - return (error); + error = SYSCTL_OUT(req, stat, MIN(sizeof(tcpstat), req->oldlen)); + return error; } /* @@ -6513,11 +6728,12 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) struct ip *ip = mtod(m, struct ip *); struct ipovly *ipov = (struct ipovly *)ip; - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM) - return (0); + if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM) { + return 0; + } /* ip_stripoptions() must have been called before we get here */ - ASSERT((ip->ip_hl << 2) == sizeof (*ip)); + ASSERT((ip->ip_hl << 2) == sizeof(*ip)); if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) && @@ -6549,7 +6765,7 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) uint32_t swbytes = (uint32_t)trailer; if (start < off) { - ip->ip_len += sizeof (*ip); + ip->ip_len += sizeof(*ip); #if BYTE_ORDER != BIG_ENDIAN HTONS(ip->ip_len); HTONS(ip->ip_off); @@ -6558,23 +6774,26 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) /* callee folds in sum */ sum = m_adj_sum16(m, start, off, tlen, sum); - if (off > start) + if (off > start) { swbytes += (off - start); - else + } else { swbytes += (start - off); + } if (start < off) { #if BYTE_ORDER != BIG_ENDIAN NTOHS(ip->ip_off); NTOHS(ip->ip_len); #endif /* BYTE_ORDER != BIG_ENDIAN */ - ip->ip_len -= sizeof (*ip); + ip->ip_len -= sizeof(*ip); } - if (swbytes != 0) + if (swbytes != 0) { tcp_in_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } /* callee folds in sum */ @@ -6588,16 +6807,16 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) int len; char b[9]; - bcopy(ipov->ih_x1, b, sizeof (ipov->ih_x1)); - bzero(ipov->ih_x1, sizeof (ipov->ih_x1)); + bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1)); + bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); ip_sum = ipov->ih_len; ipov->ih_len = (u_short)tlen; #if BYTE_ORDER != BIG_ENDIAN HTONS(ipov->ih_len); #endif - len = sizeof (struct ip) + tlen; + len = sizeof(struct ip) + tlen; th->th_sum = in_cksum(m, len); - bcopy(b, ipov->ih_x1, sizeof (ipov->ih_x1)); + bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1)); ipov->ih_len = ip_sum; tcp_in_cksum_stats(len); @@ -6608,8 +6827,9 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) case AF_INET6: { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); - if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM) - return (0); + if (m->m_pkthdr.pkt_flags & PKTF_SW_LRO_DID_CSUM) { + return 0; + } if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) && @@ -6636,7 +6856,7 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { s = ip6->ip6_src.s6_addr16[1]; - ip6->ip6_src.s6_addr16[1] = 0 ; + ip6->ip6_src.s6_addr16[1] = 0; } if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { d = ip6->ip6_dst.s6_addr16[1]; @@ -6646,25 +6866,30 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) /* callee folds in sum */ sum = m_adj_sum16(m, start, off, tlen, sum); - if (off > start) + if (off > start) { swbytes += (off - start); - else + } else { swbytes += (start - off); + } - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = s; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = d; + } - if (swbytes != 0) + if (swbytes != 0) { tcp_in6_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } th->th_sum = in6_pseudo( - &ip6->ip6_src, &ip6->ip6_dst, - sum + htonl(tlen + IPPROTO_TCP)); + &ip6->ip6_src, &ip6->ip6_dst, + sum + htonl(tlen + IPPROTO_TCP)); } th->th_sum ^= 0xffff; } else { @@ -6682,10 +6907,10 @@ tcp_input_checksum(int af, struct mbuf *m, struct tcphdr *th, int off, int tlen) if (th->th_sum != 0) { tcpstat.tcps_rcvbadsum++; IF_TCP_STATINC(ifp, badformat); - return (-1); + return -1; } - return (0); + return 0; } @@ -6701,8 +6926,9 @@ sysctl_rexmtthresh SYSCTL_HANDLER_ARGS int error, val = tcprexmtthresh; error = sysctl_handle_int(oidp, &val, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } /* * Constrain the number of duplicate ACKs @@ -6710,14 +6936,15 @@ sysctl_rexmtthresh SYSCTL_HANDLER_ARGS * to either 2 or 3 */ - if (val < 2 || val > 3) - return (EINVAL); + if (val < 2 || val > 3) { + return EINVAL; + } - tcprexmtthresh = val; + tcprexmtthresh = val; - return (0); + return 0; } SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmt_thresh, CTLTYPE_INT | CTLFLAG_RW | - CTLFLAG_LOCKED, &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I", - "Duplicate ACK Threshold for Fast Retransmit"); + CTLFLAG_LOCKED, &tcprexmtthresh, 0, &sysctl_rexmtthresh, "I", + "Duplicate ACK Threshold for Fast Retransmit"); diff --git a/bsd/netinet/tcp_ledbat.c b/bsd/netinet/tcp_ledbat.c index fd066b42a..f2a588b27 100644 --- a/bsd/netinet/tcp_ledbat.c +++ b/bsd/netinet/tcp_ledbat.c @@ -82,61 +82,63 @@ struct tcp_cc_algo tcp_cc_ledbat = { .switch_to = tcp_ledbat_switch_cc }; -/* Target queuing delay in milliseconds. This includes the processing - * and scheduling delay on both of the end-hosts. A LEDBAT sender tries +/* Target queuing delay in milliseconds. This includes the processing + * and scheduling delay on both of the end-hosts. A LEDBAT sender tries * to keep queuing delay below this limit. When the queuing delay - * goes above this limit, a LEDBAT sender will start reducing the + * goes above this limit, a LEDBAT sender will start reducing the * congestion window. * - * The LEDBAT draft says that target queue delay MUST be 100 ms for + * The LEDBAT draft says that target queue delay MUST be 100 ms for * inter-operability. */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED, - int, target_qdelay, 100, "Target queuing delay"); + int, target_qdelay, 100, "Target queuing delay"); /* Allowed increase and tether are used to place an upper bound on * congestion window based on the amount of data that is outstanding. - * This will limit the congestion window when the amount of data in + * This will limit the congestion window when the amount of data in * flight is little because the application is writing to the socket - * intermittently and is preventing the connection from becoming idle . + * intermittently and is preventing the connection from becoming idle . * * max_allowed_cwnd = allowed_increase + (tether * flight_size) * cwnd = min(cwnd, max_allowed_cwnd) * * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then * we want the congestion window to be at least 8 packets to reduce the - * delay induced by delayed ack. This helps when the receiver is acking + * delay induced by delayed ack. This helps when the receiver is acking * more than 2 packets at a time (stretching acks for better performance). - * + * * 'Tether' is also set to 2. We do not want this to limit the growth of cwnd * during slow-start. - */ + */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED, - int, allowed_increase, 8, - "Additive constant used to calculate max allowed congestion window"); + int, allowed_increase, 8, + "Additive constant used to calculate max allowed congestion window"); /* Left shift for cwnd to get tether value of 2 */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tether_shift, 1, "Tether shift for max allowed congestion window"); + int, tether_shift, 1, "Tether shift for max allowed congestion window"); -/* Start with an initial window of 2. This will help to get more accurate +/* Start with an initial window of 2. This will help to get more accurate * minimum RTT measurement in the beginning. It will help to probe * the path slowly and will not add to the existing delay if the path is * already congested. Using 2 packets will reduce the delay induced by delayed-ack. */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED, - uint32_t, bg_ss_fltsz, 2, "Initial congestion window for background transport"); + uint32_t, bg_ss_fltsz, 2, "Initial congestion window for background transport"); extern int rtt_samples_per_slot; -static void update_cwnd(struct tcpcb *tp, uint32_t incr) { +static void +update_cwnd(struct tcpcb *tp, uint32_t incr) +{ uint32_t max_allowed_cwnd = 0, flight_size = 0; uint32_t base_rtt; base_rtt = get_base_rtt(tp); /* If we do not have a good RTT measurement yet, increment - * congestion window by the default value. + * congestion window by the default value. */ if (base_rtt == 0 || tp->t_rttcur == 0) { tp->snd_cwnd += incr; @@ -150,30 +152,32 @@ static void update_cwnd(struct tcpcb *tp, uint32_t incr) { * * Move background slow-start threshold to current * congestion window so that the next time (after some idle - * period), we can attempt to do slow-start till here if there + * period), we can attempt to do slow-start till here if there * is no increase in rtt */ - if (tp->bg_ssthresh < tp->snd_cwnd) + if (tp->bg_ssthresh < tp->snd_cwnd) { tp->bg_ssthresh = tp->snd_cwnd; - tp->snd_cwnd += incr; - + } + tp->snd_cwnd += incr; } else { - /* In response to an increase in rtt, reduce the congestion - * window by one-eighth. This will help to yield immediately + /* In response to an increase in rtt, reduce the congestion + * window by one-eighth. This will help to yield immediately * to a competing stream. */ uint32_t redwin; - redwin = tp->snd_cwnd >> 3; + redwin = tp->snd_cwnd >> 3; tp->snd_cwnd -= redwin; - if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) + if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) { tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg; + } - /* Lower background slow-start threshold so that the connection + /* Lower background slow-start threshold so that the connection * will go into congestion avoidance phase */ - if (tp->bg_ssthresh > tp->snd_cwnd) + if (tp->bg_ssthresh > tp->snd_cwnd) { tp->bg_ssthresh = tp->snd_cwnd; + } } check_max: /* Calculate the outstanding flight size and restrict the @@ -181,40 +185,46 @@ check_max: */ flight_size = tp->snd_max - tp->snd_una; - max_allowed_cwnd = (allowed_increase * tp->t_maxseg) - + (flight_size << tether_shift); + max_allowed_cwnd = (allowed_increase * tp->t_maxseg) + + (flight_size << tether_shift); tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd); return; } -int tcp_ledbat_init(struct tcpcb *tp) { +int +tcp_ledbat_init(struct tcpcb *tp) +{ #pragma unused(tp) OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); return 0; } -int tcp_ledbat_cleanup(struct tcpcb *tp) { +int +tcp_ledbat_cleanup(struct tcpcb *tp) +{ #pragma unused(tp) OSDecrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); return 0; } -/* Initialize the congestion window for a connection - * +/* Initialize the congestion window for a connection + * */ void -tcp_ledbat_cwnd_init(struct tcpcb *tp) { +tcp_ledbat_cwnd_init(struct tcpcb *tp) +{ tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; tp->bg_ssthresh = tp->snd_ssthresh; } -/* Function to handle an in-sequence ack which is fast-path processing - * of an in sequence ack in tcp_input function (called as header prediction). +/* Function to handle an in-sequence ack which is fast-path processing + * of an in sequence ack in tcp_input function (called as header prediction). * This gets called only during congestion avoidance phase. */ void -tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) +{ int acked = 0; u_int32_t incr = 0; @@ -232,7 +242,8 @@ tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th) { /* Function to process an ack. */ void -tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) +{ /* * RFC 3465 - Appropriate Byte Counting. * @@ -269,33 +280,39 @@ tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { u_int abc_lim; abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; + tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; incr = lmin(acked, abc_lim); } - if (tp->t_bytes_acked >= cw) + if (tp->t_bytes_acked >= cw) { tp->t_bytes_acked -= cw; - if (incr > 0) + } + if (incr > 0) { update_cwnd(tp, incr); + } } void -tcp_ledbat_pre_fr(struct tcpcb *tp) { +tcp_ledbat_pre_fr(struct tcpcb *tp) +{ uint32_t win; - win = min(tp->snd_wnd, tp->snd_cwnd) / - 2 / tp->t_maxseg; - if ( win < 2 ) + win = min(tp->snd_wnd, tp->snd_cwnd) / + 2 / tp->t_maxseg; + if (win < 2) { win = 2; - tp->snd_ssthresh = win * tp->t_maxseg; - if (tp->bg_ssthresh > tp->snd_ssthresh) + } + tp->snd_ssthresh = win * tp->t_maxseg; + if (tp->bg_ssthresh > tp->snd_ssthresh) { tp->bg_ssthresh = tp->snd_ssthresh; + } tcp_cc_resize_sndbuf(tp); } void -tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) +{ int32_t ss; ss = tp->snd_max - th->th_ack; @@ -309,14 +326,15 @@ tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { * would be inclined to send a burst, better to do * it via the slow start mechanism. * - * If the flight size is zero, then make congestion - * window to be worth at least 2 segments to avoid + * If the flight size is zero, then make congestion + * window to be worth at least 2 segments to avoid * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05). */ - if (ss < (int32_t)tp->snd_ssthresh) + if (ss < (int32_t)tp->snd_ssthresh) { tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg; - else + } else { tp->snd_cwnd = tp->snd_ssthresh; + } tp->t_bytes_acked = 0; } @@ -326,13 +344,13 @@ tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { * Clear base history after idle time. */ void -tcp_ledbat_after_idle(struct tcpcb *tp) { - +tcp_ledbat_after_idle(struct tcpcb *tp) +{ /* Reset the congestion window */ tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; } -/* Function to change the congestion window when the retransmit +/* Function to change the congestion window when the retransmit * timer fires. The behavior is the same as that for best-effort * TCP, reduce congestion window to one segment and start probing * the link using "slow start". The slow start threshold is set @@ -340,15 +358,18 @@ tcp_ledbat_after_idle(struct tcpcb *tp) { * threshold also. */ void -tcp_ledbat_after_timeout(struct tcpcb *tp) { - if (tp->t_state >= TCPS_ESTABLISHED) { +tcp_ledbat_after_timeout(struct tcpcb *tp) +{ + if (tp->t_state >= TCPS_ESTABLISHED) { u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; - if (win < 2) + if (win < 2) { win = 2; + } tp->snd_ssthresh = win * tp->t_maxseg; - if (tp->bg_ssthresh > tp->snd_ssthresh) + if (tp->bg_ssthresh > tp->snd_ssthresh) { tp->bg_ssthresh = tp->snd_ssthresh; + } tp->snd_cwnd = tp->t_maxseg; tcp_cc_resize_sndbuf(tp); @@ -359,44 +380,50 @@ tcp_ledbat_after_timeout(struct tcpcb *tp) { * Indicate whether this ack should be delayed. * We can delay the ack if: * - our last ack wasn't a 0-sized window. - * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this - * as a clue that we need to ACK without any delay. This helps higher - * level protocols who won't send us more data even if the window is - * open because their last "segment" hasn't been ACKed + * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this + * as a clue that we need to ACK without any delay. This helps higher + * level protocols who won't send us more data even if the window is + * open because their last "segment" hasn't been ACKed * Otherwise the receiver will ack every other full-sized segment or when the - * delayed ack timer fires. This will help to generate better rtt estimates for + * delayed ack timer fires. This will help to generate better rtt estimates for * the other end if it is a ledbat sender. - * + * */ int -tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) { +tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) +{ if ((tp->t_flags & TF_RXWIN0SENT) == 0 && - (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) - return(1); - return(0); + (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) { + return 1; + } + return 0; } /* Change a connection to use ledbat. First, lower bg_ssthresh value - * if it needs to be. + * if it needs to be. */ void -tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) { +tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) +{ #pragma unused(old_cc_index) uint32_t cwnd; - if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) + if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) { tp->bg_ssthresh = tp->snd_ssthresh; + } cwnd = min(tp->snd_wnd, tp->snd_cwnd); - if (tp->snd_cwnd > tp->bg_ssthresh) + if (tp->snd_cwnd > tp->bg_ssthresh) { cwnd = cwnd / tp->t_maxseg; - else + } else { cwnd = cwnd / 2 / tp->t_maxseg; + } - if (cwnd < bg_ss_fltsz) + if (cwnd < bg_ss_fltsz) { cwnd = bg_ss_fltsz; + } tp->snd_cwnd = cwnd * tp->t_maxseg; tp->t_bytes_acked = 0; diff --git a/bsd/netinet/tcp_lro.c b/bsd/netinet/tcp_lro.c index bc1f21b6d..baacc13f3 100644 --- a/bsd/netinet/tcp_lro.c +++ b/bsd/netinet/tcp_lro.c @@ -2,7 +2,7 @@ * Copyright (c) 2011-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -60,20 +60,20 @@ unsigned int lro_good_flushes = 0; unsigned int coalesc_sz = LRO_MX_COALESCE_PKTS; SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_sz, CTLFLAG_RW | CTLFLAG_LOCKED, - &coalesc_sz, 0, "Max coalescing size"); + &coalesc_sz, 0, "Max coalescing size"); unsigned int coalesc_time = LRO_MX_TIME_TO_BUFFER; SYSCTL_INT(_net_inet_tcp, OID_AUTO, lro_time, CTLFLAG_RW | CTLFLAG_LOCKED, - &coalesc_time, 0, "Max coalescing time"); + &coalesc_time, 0, "Max coalescing time"); -struct lro_flow lro_flow_list[TCP_LRO_NUM_FLOWS]; +struct lro_flow lro_flow_list[TCP_LRO_NUM_FLOWS]; -char lro_flow_map[TCP_LRO_FLOW_MAP]; +char lro_flow_map[TCP_LRO_FLOW_MAP]; -static lck_attr_t *tcp_lro_mtx_attr = NULL; /* mutex attributes */ -static lck_grp_t *tcp_lro_mtx_grp = NULL; /* mutex group */ -static lck_grp_attr_t *tcp_lro_mtx_grp_attr = NULL; /* mutex group attrs */ -decl_lck_mtx_data( ,tcp_lro_lock); /* Used to synchronize updates */ +static lck_attr_t *tcp_lro_mtx_attr = NULL; /* mutex attributes */ +static lck_grp_t *tcp_lro_mtx_grp = NULL; /* mutex group */ +static lck_grp_attr_t *tcp_lro_mtx_grp_attr = NULL; /* mutex group attrs */ +decl_lck_mtx_data(, tcp_lro_lock); /* Used to synchronize updates */ unsigned int lro_byte_count = 0; @@ -86,15 +86,15 @@ thread_call_t tcp_lro_timer; extern u_int32_t kipf_count; -static void tcp_lro_timer_proc(void*, void*); -static void lro_update_stats(struct mbuf*); -static void lro_update_flush_stats(struct mbuf *); -static void tcp_lro_flush_flows(void); -static void tcp_lro_sched_timer(uint64_t); -static void lro_proto_input(struct mbuf *); +static void tcp_lro_timer_proc(void*, void*); +static void lro_update_stats(struct mbuf*); +static void lro_update_flush_stats(struct mbuf *); +static void tcp_lro_flush_flows(void); +static void tcp_lro_sched_timer(uint64_t); +static void lro_proto_input(struct mbuf *); -static struct mbuf *lro_tcp_xsum_validate(struct mbuf*, struct ip *, - struct tcphdr*); +static struct mbuf *lro_tcp_xsum_validate(struct mbuf*, struct ip *, + struct tcphdr*); static struct mbuf *tcp_lro_process_pkt(struct mbuf*, int); void @@ -102,7 +102,7 @@ tcp_lro_init(void) { int i; - bzero(lro_flow_list, sizeof (struct lro_flow) * TCP_LRO_NUM_FLOWS); + bzero(lro_flow_list, sizeof(struct lro_flow) * TCP_LRO_NUM_FLOWS); for (i = 0; i < TCP_LRO_FLOW_MAP; i++) { lro_flow_map[i] = TCP_LRO_FLOW_UNINIT; } @@ -124,16 +124,16 @@ tcp_lro_init(void) } static int -tcp_lro_matching_tuple(struct ip* ip_hdr, struct tcphdr *tcp_hdr, int *hash, - int *flow_id ) +tcp_lro_matching_tuple(struct ip* ip_hdr, struct tcphdr *tcp_hdr, int *hash, + int *flow_id ) { struct lro_flow *flow; tcp_seq seqnum; unsigned int off = 0; int payload_len = 0; - *hash = LRO_HASH(ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, - tcp_hdr->th_sport, tcp_hdr->th_dport, (TCP_LRO_FLOW_MAP - 1)); + *hash = LRO_HASH(ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, + tcp_hdr->th_sport, tcp_hdr->th_dport, (TCP_LRO_FLOW_MAP - 1)); *flow_id = lro_flow_map[*hash]; if (*flow_id == TCP_LRO_FLOW_NOTFOUND) { @@ -147,46 +147,46 @@ tcp_lro_matching_tuple(struct ip* ip_hdr, struct tcphdr *tcp_hdr, int *hash, flow = &lro_flow_list[*flow_id]; if ((flow->lr_faddr.s_addr == ip_hdr->ip_src.s_addr) && - (flow->lr_laddr.s_addr == ip_hdr->ip_dst.s_addr) && - (flow->lr_fport == tcp_hdr->th_sport) && - (flow->lr_lport == tcp_hdr->th_dport)) { + (flow->lr_laddr.s_addr == ip_hdr->ip_dst.s_addr) && + (flow->lr_fport == tcp_hdr->th_sport) && + (flow->lr_lport == tcp_hdr->th_dport)) { if (flow->lr_tcphdr == NULL) { if (ntohl(seqnum) == flow->lr_seq) { return TCP_LRO_COALESCE; } if (lrodebug >= 4) { printf("%s: seqnum = %x, lr_seq = %x\n", - __func__, ntohl(seqnum), flow->lr_seq); + __func__, ntohl(seqnum), flow->lr_seq); } lro_seq_mismatch++; if (SEQ_GT(ntohl(seqnum), flow->lr_seq)) { lro_seq_outoforder++; - /* + /* * Whenever we receive out of order packets it - * signals loss and recovery and LRO doesn't + * signals loss and recovery and LRO doesn't * let flows recover quickly. So eject. */ - flow->lr_flags |= LRO_EJECT_REQ; - + flow->lr_flags |= LRO_EJECT_REQ; } return TCP_LRO_NAN; } if (flow->lr_flags & LRO_EJECT_REQ) { - if (lrodebug) + if (lrodebug) { printf("%s: eject. \n", __func__); + } return TCP_LRO_EJECT_FLOW; } - if (SEQ_GT(tcp_hdr->th_ack, flow->lr_tcphdr->th_ack)) { + if (SEQ_GT(tcp_hdr->th_ack, flow->lr_tcphdr->th_ack)) { if (lrodebug) { - printf("%s: th_ack = %x flow_ack = %x \n", - __func__, tcp_hdr->th_ack, - flow->lr_tcphdr->th_ack); + printf("%s: th_ack = %x flow_ack = %x \n", + __func__, tcp_hdr->th_ack, + flow->lr_tcphdr->th_ack); } return TCP_LRO_EJECT_FLOW; } - if (ntohl(seqnum) == (ntohl(lro_flow_list[*flow_id].lr_tcphdr->th_seq) + lro_flow_list[*flow_id].lr_len)) { + if (ntohl(seqnum) == (ntohl(lro_flow_list[*flow_id].lr_tcphdr->th_seq) + lro_flow_list[*flow_id].lr_len)) { return TCP_LRO_COALESCE; } else { /* LRO does not handle loss recovery well, eject */ @@ -194,13 +194,15 @@ tcp_lro_matching_tuple(struct ip* ip_hdr, struct tcphdr *tcp_hdr, int *hash, return TCP_LRO_EJECT_FLOW; } } - if (lrodebug) printf("tcp_lro_matching_tuple: collision \n"); + if (lrodebug) { + printf("tcp_lro_matching_tuple: collision \n"); + } return TCP_LRO_COLLISION; } static void -tcp_lro_init_flow(int flow_id, struct ip* ip_hdr, struct tcphdr *tcp_hdr, - int hash, u_int32_t timestamp, int payload_len) +tcp_lro_init_flow(int flow_id, struct ip* ip_hdr, struct tcphdr *tcp_hdr, + int hash, u_int32_t timestamp, int payload_len) { struct lro_flow *flow = NULL; @@ -219,9 +221,9 @@ tcp_lro_init_flow(int flow_id, struct ip* ip_hdr, struct tcphdr *tcp_hdr, } static void -tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, - int payload_len, int drop_hdrlen, struct tcpopt *topt, - u_int32_t* tsval, u_int32_t* tsecr, int thflags) +tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, + int payload_len, int drop_hdrlen, struct tcpopt *topt, + u_int32_t* tsval, u_int32_t* tsecr, int thflags) { struct lro_flow *flow = NULL; struct mbuf *last; @@ -229,9 +231,10 @@ tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, flow = &lro_flow_list[flow_id]; if (flow->lr_mhead) { - if (lrodebug) + if (lrodebug) { printf("%s: lr_mhead %x %d \n", __func__, flow->lr_seq, - payload_len); + payload_len); + } m_adj(lro_mb, drop_hdrlen); last = flow->lr_mtail; @@ -251,33 +254,33 @@ tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, } flow->lr_len += payload_len; flow->lr_seq += payload_len; - /* - * This bit is re-OR'd each time a packet is added to the + /* + * This bit is re-OR'd each time a packet is added to the * large coalesced packet. */ flow->lr_mhead->m_pkthdr.pkt_flags |= PKTF_SW_LRO_PKT; flow->lr_mhead->m_pkthdr.lro_npkts++; /* for tcpstat.tcps_rcvpack */ - if (flow->lr_mhead->m_pkthdr.lro_pktlen < - lro_mb->m_pkthdr.lro_pktlen) { - /* - * For TCP Inter Arrival Jitter calculation, return max + if (flow->lr_mhead->m_pkthdr.lro_pktlen < + lro_mb->m_pkthdr.lro_pktlen) { + /* + * For TCP Inter Arrival Jitter calculation, return max * size encountered while coalescing a stream of pkts. */ - flow->lr_mhead->m_pkthdr.lro_pktlen = - lro_mb->m_pkthdr.lro_pktlen; + flow->lr_mhead->m_pkthdr.lro_pktlen = + lro_mb->m_pkthdr.lro_pktlen; } - /* Update the timestamp value */ + /* Update the timestamp value */ if (topt->to_flags & TOF_TS) { - if ((flow->lr_tsval) && - (TSTMP_GT(topt->to_tsval, ntohl(*(flow->lr_tsval))))) { + if ((flow->lr_tsval) && + (TSTMP_GT(topt->to_tsval, ntohl(*(flow->lr_tsval))))) { *(flow->lr_tsval) = htonl(topt->to_tsval); } if ((flow->lr_tsecr) && - (topt->to_tsecr != 0) && - (TSTMP_GT(topt->to_tsecr, ntohl(*(flow->lr_tsecr))))) { + (topt->to_tsecr != 0) && + (TSTMP_GT(topt->to_tsecr, ntohl(*(flow->lr_tsecr))))) { if (lrodebug >= 2) { - printf("%s: instantaneous RTT = %d \n", __func__, - topt->to_tsecr - ntohl(*(flow->lr_tsecr))); + printf("%s: instantaneous RTT = %d \n", __func__, + topt->to_tsecr - ntohl(*(flow->lr_tsecr))); } *(flow->lr_tsecr) = htonl(topt->to_tsecr); } @@ -296,19 +299,19 @@ tcp_lro_coalesce(int flow_id, struct mbuf *lro_mb, struct tcphdr *tcphdr, if ((topt) && (topt->to_flags & TOF_TS)) { ASSERT(tsval != NULL); ASSERT(tsecr != NULL); - flow->lr_tsval = tsval; + flow->lr_tsval = tsval; flow->lr_tsecr = tsecr; - } + } flow->lr_len = payload_len; calculate_tcp_clock(); flow->lr_timestamp = tcp_now; tcp_lro_sched_timer(0); - } + } flow->lr_seq = ntohl(tcphdr->th_seq) + payload_len; } - if (lro_mb) { + if (lro_mb) { tcpstat.tcps_coalesced_pack++; - } + } return; } @@ -321,7 +324,7 @@ tcp_lro_eject_flow(int flow_id) ASSERT(lro_flow_map[lro_flow_list[flow_id].lr_hash_map] == flow_id); lro_flow_map[lro_flow_list[flow_id].lr_hash_map] = TCP_LRO_FLOW_UNINIT; bzero(&lro_flow_list[flow_id], sizeof(struct lro_flow)); - + return mb; } @@ -330,27 +333,27 @@ tcp_lro_eject_coalesced_pkt(int flow_id) { struct mbuf *mb = NULL; mb = lro_flow_list[flow_id].lr_mhead; - lro_flow_list[flow_id].lr_mhead = - lro_flow_list[flow_id].lr_mtail = NULL; + lro_flow_list[flow_id].lr_mhead = + lro_flow_list[flow_id].lr_mtail = NULL; lro_flow_list[flow_id].lr_tcphdr = NULL; return mb; } static struct mbuf* -tcp_lro_insert_flow(struct mbuf *lro_mb, struct ip *ip_hdr, - struct tcphdr *tcp_hdr, int payload_len, - int drop_hdrlen, int hash, struct tcpopt *topt, - u_int32_t *tsval, u_int32_t *tsecr) +tcp_lro_insert_flow(struct mbuf *lro_mb, struct ip *ip_hdr, + struct tcphdr *tcp_hdr, int payload_len, + int drop_hdrlen, int hash, struct tcpopt *topt, + u_int32_t *tsval, u_int32_t *tsecr) { int i; int slot_available = 0; - int candidate_flow = 0; + int candidate_flow = 0; u_int32_t oldest_timestamp; struct mbuf *mb = NULL; int collision = 0; oldest_timestamp = tcp_now; - + /* handle collision */ if (lro_flow_map[hash] != TCP_LRO_FLOW_UNINIT) { if (lrodebug) { @@ -381,21 +384,20 @@ kick_flow: if (lrodebug) { if (!slot_available) { - printf("%s: slot unavailable.\n",__func__); + printf("%s: slot unavailable.\n", __func__); } if (collision) { - printf("%s: collision.\n",__func__); + printf("%s: collision.\n", __func__); } } } else { candidate_flow = i; /* this is now the flow to be used */ - } - tcp_lro_init_flow(candidate_flow, ip_hdr, tcp_hdr, hash, - tcp_now, payload_len); - tcp_lro_coalesce(candidate_flow, lro_mb, tcp_hdr, payload_len, - drop_hdrlen, topt, tsval, tsecr, 0); + tcp_lro_init_flow(candidate_flow, ip_hdr, tcp_hdr, hash, + tcp_now, payload_len); + tcp_lro_coalesce(candidate_flow, lro_mb, tcp_hdr, payload_len, + drop_hdrlen, topt, tsval, tsecr, 0); return mb; } @@ -418,21 +420,21 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) u_int8_t ecn; struct ip *ip_hdr; struct tcphdr *tcp_hdr; - + if (lro_mb->m_len < drop_hdrlen) { if ((lro_mb = m_pullup(lro_mb, drop_hdrlen)) == NULL) { tcpstat.tcps_rcvshort++; - m_freem(lro_mb); + m_freem(lro_mb); if (lrodebug) { printf("tcp_lro_process_pkt:mbuf too short.\n"); } - return (NULL); + return NULL; } } - + ip_hdr = mtod(lro_mb, struct ip*); tcp_hdr = (struct tcphdr *)((caddr_t)ip_hdr + sizeof(struct ip)); - + /* Just in case */ lro_mb->m_pkthdr.pkt_flags &= ~PKTF_SW_LRO_DID_CSUM; @@ -440,7 +442,7 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) if (lrodebug) { printf("tcp_lro_process_pkt: TCP xsum failed.\n"); } - return (NULL); + return NULL; } /* Update stats */ @@ -450,7 +452,7 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) lro_mb->m_pkthdr.pkt_flags |= PKTF_SW_LRO_DID_CSUM; off = tcp_hdr->th_off << 2; - optlen = off - sizeof (struct tcphdr); + optlen = off - sizeof(struct tcphdr); payload_len = ip_hdr->ip_len - off; optp = (u_char *)(tcp_hdr + 1); /* @@ -462,17 +464,17 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) */ bzero(&to, sizeof(to)); if ((optlen == TCPOLEN_TSTAMP_APPA || - (optlen > TCPOLEN_TSTAMP_APPA && - optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && - *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && - (tcp_hdr->th_flags & TH_SYN) == 0) { - to.to_flags |= TOF_TS; - to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4)); - to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8)); + (optlen > TCPOLEN_TSTAMP_APPA && + optp[TCPOLEN_TSTAMP_APPA] == TCPOPT_EOL)) && + *(u_int32_t *)optp == htonl(TCPOPT_TSTAMP_HDR) && + (tcp_hdr->th_flags & TH_SYN) == 0) { + to.to_flags |= TOF_TS; + to.to_tsval = ntohl(*(u_int32_t *)(void *)(optp + 4)); + to.to_tsecr = ntohl(*(u_int32_t *)(void *)(optp + 8)); } else { /* - * If TCP timestamps are not in use, or not the first option, - * skip LRO path since timestamps are used to avoid LRO + * If TCP timestamps are not in use, or not the first option, + * skip LRO path since timestamps are used to avoid LRO * from introducing additional latencies for retransmissions * and other slow-paced transmissions. */ @@ -481,17 +483,17 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) } /* list all the conditions that can trigger a flow ejection here */ - + thflags = tcp_hdr->th_flags; - if (thflags & (TH_SYN | TH_URG | TH_ECE | TH_CWR | TH_PUSH | TH_RST | TH_FIN)) { + if (thflags & (TH_SYN | TH_URG | TH_ECE | TH_CWR | TH_PUSH | TH_RST | TH_FIN)) { eject_flow = tcpflags = 1; - } - - if (optlen && !((optlen == TCPOLEN_TSTAMP_APPA) && - (to.to_flags & TOF_TS))) { + } + + if (optlen && !((optlen == TCPOLEN_TSTAMP_APPA) && + (to.to_flags & TOF_TS))) { eject_flow = unknown_tcpopts = 1; - } - + } + if (payload_len <= LRO_MIN_COALESC_SZ) { /* zero payload ACK */ eject_flow = 1; } @@ -519,19 +521,19 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) break; case TCP_LRO_COALESCE: - if ((payload_len != 0) && (unknown_tcpopts == 0) && - (tcpflags == 0) && (ecn != IPTOS_ECN_CE) && (to.to_flags & TOF_TS)) { + if ((payload_len != 0) && (unknown_tcpopts == 0) && + (tcpflags == 0) && (ecn != IPTOS_ECN_CE) && (to.to_flags & TOF_TS)) { tcp_lro_coalesce(flow_id, lro_mb, tcp_hdr, payload_len, - drop_hdrlen, &to, - (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 4) : NULL, - (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 8) : NULL, - thflags); - if (lrodebug >= 2) { + drop_hdrlen, &to, + (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 4) : NULL, + (to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 8) : NULL, + thflags); + if (lrodebug >= 2) { printf("tcp_lro_process_pkt: coalesce len = %d. flow_id = %d payload_len = %d drop_hdrlen = %d optlen = %d lport = %d seqnum = %x.\n", - lro_flow_list[flow_id].lr_len, flow_id, - payload_len, drop_hdrlen, optlen, - ntohs(lro_flow_list[flow_id].lr_lport), - ntohl(tcp_hdr->th_seq)); + lro_flow_list[flow_id].lr_len, flow_id, + payload_len, drop_hdrlen, optlen, + ntohs(lro_flow_list[flow_id].lr_lport), + ntohl(tcp_hdr->th_seq)); } if (lro_flow_list[flow_id].lr_mhead->m_pkthdr.lro_npkts >= coalesc_sz) { eject_flow = 1; @@ -541,9 +543,9 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) if (eject_flow) { mb = tcp_lro_eject_coalesced_pkt(flow_id); lro_flow_list[flow_id].lr_seq = ntohl(tcp_hdr->th_seq) + - payload_len; - calculate_tcp_clock(); - u_int8_t timestamp = tcp_now - lro_flow_list[flow_id].lr_timestamp; + payload_len; + calculate_tcp_clock(); + u_int8_t timestamp = tcp_now - lro_flow_list[flow_id].lr_timestamp; lck_mtx_unlock(&tcp_lro_lock); if (mb) { mb->m_pkthdr.lro_elapsed = timestamp; @@ -566,8 +568,9 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) u_int8_t timestamp = tcp_now - lro_flow_list[flow_id].lr_timestamp; lck_mtx_unlock(&tcp_lro_lock); if (mb) { - if (lrodebug) + if (lrodebug) { printf("tcp_lro_process_pkt eject_flow, len = %d\n", mb->m_pkthdr.len); + } mb->m_pkthdr.lro_elapsed = timestamp; lro_proto_input(mb); } @@ -583,13 +586,13 @@ tcp_lro_process_pkt(struct mbuf *lro_mb, int drop_hdrlen) default: lck_mtx_unlock(&tcp_lro_lock); panic_plain("%s: unrecognized type %d", __func__, retval); - break; + break; } if (ret_response == TCP_LRO_FLOW_NOTFOUND) { lro_proto_input(lro_mb); } - return (NULL); + return NULL; } static void @@ -616,17 +619,17 @@ tcp_lro_flush_flows(void) while (i < TCP_LRO_NUM_FLOWS) { flow = &lro_flow_list[i]; if (flow->lr_mhead != NULL) { - if (!tcpclock_updated) { calculate_tcp_clock(); tcpclock_updated = 1; } - if (lrodebug >= 2) + if (lrodebug >= 2) { printf("tcp_lro_flush_flows: len =%d n_pkts = %d %d %d \n", - flow->lr_len, - flow->lr_mhead->m_pkthdr.lro_npkts, - flow->lr_timestamp, tcp_now); + flow->lr_len, + flow->lr_mhead->m_pkthdr.lro_npkts, + flow->lr_timestamp, tcp_now); + } u_int8_t timestamp = tcp_now - flow->lr_timestamp; @@ -660,11 +663,11 @@ tcp_lro_sched_timer(uint64_t hint) lro_timer_set = 1; if (!hint) { /* the intent is to wake up every coalesc_time msecs */ - clock_interval_to_deadline(coalesc_time, - (NSEC_PER_SEC / TCP_RETRANSHZ), &lro_deadline); + clock_interval_to_deadline(coalesc_time, + (NSEC_PER_SEC / TCP_RETRANSHZ), &lro_deadline); } else { clock_interval_to_deadline(hint, NSEC_PER_SEC / TCP_RETRANSHZ, - &lro_deadline); + &lro_deadline); } thread_call_enter_delayed(tcp_lro_timer, lro_deadline); } @@ -677,60 +680,64 @@ tcp_lro(struct mbuf *m, unsigned int hlen) struct tcphdr * tcp_hdr = NULL; unsigned int off = 0; - if (kipf_count != 0) - return (m); + if (kipf_count != 0) { + return m; + } - /* - * Experiments on cellular show that the RTT is much higher + /* + * Experiments on cellular show that the RTT is much higher * than the coalescing time of 5 msecs, causing lro to flush - * 80% of the time on a single packet. Increasing - * coalescing time for cellular does not show marked + * 80% of the time on a single packet. Increasing + * coalescing time for cellular does not show marked * improvement to throughput either. Loopback perf is hurt * by the 5 msec latency and it already sends large packets. */ if (IFNET_IS_CELLULAR(m->m_pkthdr.rcvif) || - (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) { - return (m); + (m->m_pkthdr.rcvif->if_type == IFT_LOOP)) { + return m; } ip_hdr = mtod(m, struct ip*); /* don't deal with IP options */ - if (hlen != sizeof (struct ip)) - return (m); + if (hlen != sizeof(struct ip)) { + return m; + } /* only TCP is coalesced */ if (ip_hdr->ip_p != IPPROTO_TCP) { - return (m); + return m; } - if (m->m_len < (int32_t) sizeof (struct tcpiphdr)) { - if (lrodebug) printf("tcp_lro m_pullup \n"); - if ((m = m_pullup(m, sizeof (struct tcpiphdr))) == NULL) { - tcpstat.tcps_rcvshort++; + if (m->m_len < (int32_t) sizeof(struct tcpiphdr)) { + if (lrodebug) { + printf("tcp_lro m_pullup \n"); + } + if ((m = m_pullup(m, sizeof(struct tcpiphdr))) == NULL) { + tcpstat.tcps_rcvshort++; if (lrodebug) { printf("ip_lro: rcvshort.\n"); } - return (NULL); + return NULL; } ip_hdr = mtod(m, struct ip*); } tcp_hdr = (struct tcphdr *)((caddr_t)ip_hdr + hlen); - tlen = ip_hdr->ip_len ; //ignore IP header bytes len + tlen = ip_hdr->ip_len; //ignore IP header bytes len m->m_pkthdr.lro_pktlen = tlen; /* Used to return max pkt encountered to tcp */ m->m_pkthdr.lro_npkts = 1; /* Initialize a counter to hold num pkts coalesced */ m->m_pkthdr.lro_elapsed = 0; /* Initialize the field to carry elapsed time */ off = tcp_hdr->th_off << 2; - if (off < sizeof (struct tcphdr) || off > tlen) { - tcpstat.tcps_rcvbadoff++; + if (off < sizeof(struct tcphdr) || off > tlen) { + tcpstat.tcps_rcvbadoff++; if (lrodebug) { printf("ip_lro: TCP off greater than TCP header.\n"); } - return (m); + return m; } - return (tcp_lro_process_pkt(m, hlen + off)); + return tcp_lro_process_pkt(m, hlen + off); } static void @@ -739,8 +746,8 @@ lro_proto_input(struct mbuf *m) struct ip* ip_hdr = mtod(m, struct ip*); if (lrodebug >= 3) { - printf("lro_proto_input: ip_len = %d \n", - ip_hdr->ip_len); + printf("lro_proto_input: ip_len = %d \n", + ip_hdr->ip_len); } lro_update_stats(m); ip_proto_dispatch_in_wrapper(m, ip_hdr->ip_hl << 2, ip_hdr->ip_p); @@ -753,34 +760,35 @@ lro_tcp_xsum_validate(struct mbuf *m, struct ip *ip, struct tcphdr * th) MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); /* we shouldn't get here for IP with options; hence sizeof (ip) */ - if (tcp_input_checksum(AF_INET, m, th, sizeof (*ip), ip->ip_len)) { - if (lrodebug) + if (tcp_input_checksum(AF_INET, m, th, sizeof(*ip), ip->ip_len)) { + if (lrodebug) { printf("%s: bad xsum and drop m = 0x%llx.\n", __func__, - (uint64_t)VM_KERNEL_ADDRPERM(m)); + (uint64_t)VM_KERNEL_ADDRPERM(m)); + } m_freem(m); - return (NULL); + return NULL; } - return (m); + return m; } /* - * When TCP detects a stable, steady flow without out of ordering, + * When TCP detects a stable, steady flow without out of ordering, * with a sufficiently high cwnd, it invokes LRO. */ int -tcp_start_coalescing(struct ip *ip_hdr, struct tcphdr *tcp_hdr, int tlen) +tcp_start_coalescing(struct ip *ip_hdr, struct tcphdr *tcp_hdr, int tlen) { int hash; int flow_id; struct mbuf *eject_mb; struct lro_flow *lf; - hash = LRO_HASH(ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, - tcp_hdr->th_sport, tcp_hdr->th_dport, - (TCP_LRO_FLOW_MAP - 1)); + hash = LRO_HASH(ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, + tcp_hdr->th_sport, tcp_hdr->th_dport, + (TCP_LRO_FLOW_MAP - 1)); + - lck_mtx_lock_spin(&tcp_lro_lock); flow_id = lro_flow_map[hash]; if (flow_id != TCP_LRO_FLOW_NOTFOUND) { @@ -789,21 +797,21 @@ tcp_start_coalescing(struct ip *ip_hdr, struct tcphdr *tcp_hdr, int tlen) (lf->lr_laddr.s_addr == ip_hdr->ip_dst.s_addr) && (lf->lr_fport == tcp_hdr->th_sport) && (lf->lr_lport == tcp_hdr->th_dport)) { - if ((lf->lr_tcphdr == NULL) && - (lf->lr_seq != (tcp_hdr->th_seq + tlen))) { + if ((lf->lr_tcphdr == NULL) && + (lf->lr_seq != (tcp_hdr->th_seq + tlen))) { lf->lr_seq = tcp_hdr->th_seq + tlen; - } + } lf->lr_flags &= ~LRO_EJECT_REQ; } - lck_mtx_unlock(&tcp_lro_lock); + lck_mtx_unlock(&tcp_lro_lock); return 0; } HTONL(tcp_hdr->th_seq); HTONL(tcp_hdr->th_ack); - eject_mb = - tcp_lro_insert_flow(NULL, ip_hdr, tcp_hdr, tlen, 0, hash, - NULL, NULL, NULL); + eject_mb = + tcp_lro_insert_flow(NULL, ip_hdr, tcp_hdr, tlen, 0, hash, + NULL, NULL, NULL); lck_mtx_unlock(&tcp_lro_lock); @@ -811,8 +819,8 @@ tcp_start_coalescing(struct ip *ip_hdr, struct tcphdr *tcp_hdr, int tlen) NTOHL(tcp_hdr->th_ack); if (lrodebug >= 3) { printf("%s: src = %x dst = %x sport = %d dport = %d seq %x \n", - __func__, ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, - tcp_hdr->th_sport, tcp_hdr->th_dport, tcp_hdr->th_seq); + __func__, ip_hdr->ip_src.s_addr, ip_hdr->ip_dst.s_addr, + tcp_hdr->th_sport, tcp_hdr->th_dport, tcp_hdr->th_seq); } ASSERT(eject_mb == NULL); return 0; @@ -820,17 +828,17 @@ tcp_start_coalescing(struct ip *ip_hdr, struct tcphdr *tcp_hdr, int tlen) /* * When TCP detects loss or idle condition, it stops offloading - * to LRO. + * to LRO. */ int -tcp_lro_remove_state(struct in_addr saddr, struct in_addr daddr, - unsigned short sport, unsigned short dport) +tcp_lro_remove_state(struct in_addr saddr, struct in_addr daddr, + unsigned short sport, unsigned short dport) { int hash, flow_id; struct lro_flow *lf; hash = LRO_HASH(daddr.s_addr, saddr.s_addr, dport, sport, - (TCP_LRO_FLOW_MAP - 1)); + (TCP_LRO_FLOW_MAP - 1)); lck_mtx_lock_spin(&tcp_lro_lock); flow_id = lro_flow_map[hash]; if (flow_id == TCP_LRO_FLOW_UNINIT) { @@ -838,13 +846,13 @@ tcp_lro_remove_state(struct in_addr saddr, struct in_addr daddr, return 0; } lf = &lro_flow_list[flow_id]; - if ((lf->lr_faddr.s_addr == daddr.s_addr) && + if ((lf->lr_faddr.s_addr == daddr.s_addr) && (lf->lr_laddr.s_addr == saddr.s_addr) && (lf->lr_fport == dport) && (lf->lr_lport == sport)) { if (lrodebug) { - printf("%s: %x %x\n", __func__, - lf->lr_flags, lf->lr_seq); + printf("%s: %x %x\n", __func__, + lf->lr_flags, lf->lr_seq); } lf->lr_flags |= LRO_EJECT_REQ; } @@ -854,13 +862,13 @@ tcp_lro_remove_state(struct in_addr saddr, struct in_addr daddr, void tcp_update_lro_seq(__uint32_t rcv_nxt, struct in_addr saddr, struct in_addr daddr, - unsigned short sport, unsigned short dport) + unsigned short sport, unsigned short dport) { int hash, flow_id; struct lro_flow *lf; - hash = LRO_HASH(daddr.s_addr, saddr.s_addr, dport, sport, - (TCP_LRO_FLOW_MAP - 1)); + hash = LRO_HASH(daddr.s_addr, saddr.s_addr, dport, sport, + (TCP_LRO_FLOW_MAP - 1)); lck_mtx_lock_spin(&tcp_lro_lock); flow_id = lro_flow_map[hash]; if (flow_id == TCP_LRO_FLOW_UNINIT) { @@ -882,21 +890,21 @@ tcp_update_lro_seq(__uint32_t rcv_nxt, struct in_addr saddr, struct in_addr dadd static void lro_update_stats(struct mbuf *m) { - switch(m->m_pkthdr.lro_npkts) { + switch (m->m_pkthdr.lro_npkts) { case 0: /* fall through */ - case 1: + case 1: break; - - case 2: + + case 2: tcpstat.tcps_lro_twopack++; break; - + case 3: /* fall through */ case 4: tcpstat.tcps_lro_multpack++; break; - - default: + + default: tcpstat.tcps_lro_largepack++; break; } @@ -907,7 +915,7 @@ static void lro_update_flush_stats(struct mbuf *m) { lro_flushes++; - switch(m->m_pkthdr.lro_npkts) { + switch (m->m_pkthdr.lro_npkts) { case 0: ASSERT(0); case 1: lro_single_flushes++; break; diff --git a/bsd/netinet/tcp_lro.h b/bsd/netinet/tcp_lro.h index 9f1fe01c9..50ef3d841 100644 --- a/bsd/netinet/tcp_lro.h +++ b/bsd/netinet/tcp_lro.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,28 +31,28 @@ #ifdef BSD_KERNEL_PRIVATE -#define TCP_LRO_NUM_FLOWS (16) /* must be <= 255 for char lro_flow_map */ +#define TCP_LRO_NUM_FLOWS (16) /* must be <= 255 for char lro_flow_map */ #define TCP_LRO_FLOW_MAP (1024) struct lro_flow { - struct mbuf *lr_mhead; /* coalesced mbuf chain head */ - struct mbuf *lr_mtail; /* coalesced mbuf chain tail */ - struct tcphdr *lr_tcphdr; /* ptr to TCP hdr in frame */ - u_int32_t *lr_tsval; /* address of tsval in frame */ - u_int32_t *lr_tsecr; /* tsecr field in TCP header */ - tcp_seq lr_seq; /* next expected seq num */ - unsigned int lr_len; /* length of LRO frame */ - struct in_addr lr_faddr; /* foreign address */ - struct in_addr lr_laddr; /* local address */ - unsigned short int lr_fport; /* foreign port */ - unsigned short int lr_lport; /* local port */ - u_int32_t lr_timestamp; /* for ejecting the flow */ - unsigned short int lr_hash_map; /* back pointer to hash map */ - unsigned short int lr_flags; /* pad */ + struct mbuf *lr_mhead; /* coalesced mbuf chain head */ + struct mbuf *lr_mtail; /* coalesced mbuf chain tail */ + struct tcphdr *lr_tcphdr; /* ptr to TCP hdr in frame */ + u_int32_t *lr_tsval; /* address of tsval in frame */ + u_int32_t *lr_tsecr; /* tsecr field in TCP header */ + tcp_seq lr_seq; /* next expected seq num */ + unsigned int lr_len; /* length of LRO frame */ + struct in_addr lr_faddr; /* foreign address */ + struct in_addr lr_laddr; /* local address */ + unsigned short int lr_fport; /* foreign port */ + unsigned short int lr_lport; /* local port */ + u_int32_t lr_timestamp; /* for ejecting the flow */ + unsigned short int lr_hash_map; /* back pointer to hash map */ + unsigned short int lr_flags; /* pad */ } __attribute__((aligned(8))); /* lr_flags - only 16 bits available */ -#define LRO_EJECT_REQ 0x1 +#define LRO_EJECT_REQ 0x1 #define TCP_LRO_FLOW_UNINIT TCP_LRO_NUM_FLOWS+1 @@ -68,7 +68,7 @@ struct lro_flow { /* * Max amount of time to wait before flushing flows in msecs. - * Units are in msecs. + * Units are in msecs. * This number has been carefully chosen and should be altered with care. */ #define LRO_MX_TIME_TO_BUFFER 10 diff --git a/bsd/netinet/tcp_newreno.c b/bsd/netinet/tcp_newreno.c index c0def7ffa..7e9e778ac 100644 --- a/bsd/netinet/tcp_newreno.c +++ b/bsd/netinet/tcp_newreno.c @@ -110,13 +110,17 @@ struct tcp_cc_algo tcp_cc_newreno = { .switch_to = tcp_newreno_switch_cc }; -int tcp_newreno_init(struct tcpcb *tp) { +int +tcp_newreno_init(struct tcpcb *tp) +{ #pragma unused(tp) OSIncrementAtomic((volatile SInt32 *)&tcp_cc_newreno.num_sockets); return 0; } -int tcp_newreno_cleanup(struct tcpcb *tp) { +int +tcp_newreno_cleanup(struct tcpcb *tp) +{ #pragma unused(tp) OSDecrementAtomic((volatile SInt32 *)&tcp_cc_newreno.num_sockets); return 0; @@ -132,7 +136,8 @@ int tcp_newreno_cleanup(struct tcpcb *tp) { * this is a local network or not. */ void -tcp_newreno_cwnd_init_or_reset(struct tcpcb *tp) { +tcp_newreno_cwnd_init_or_reset(struct tcpcb *tp) +{ tcp_cc_cwnd_init_or_reset(tp); } @@ -141,7 +146,8 @@ tcp_newreno_cwnd_init_or_reset(struct tcpcb *tp) { * This will get called from header prediction code. */ void -tcp_newreno_congestion_avd(struct tcpcb *tp, struct tcphdr *th) { +tcp_newreno_congestion_avd(struct tcpcb *tp, struct tcphdr *th) +{ uint32_t acked = 0; acked = BYTES_ACKED(th, tp); /* @@ -159,7 +165,8 @@ tcp_newreno_congestion_avd(struct tcpcb *tp, struct tcphdr *th) { /* Function to process an ack. */ void -tcp_newreno_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { +tcp_newreno_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) +{ /* * RFC 3465 - Appropriate Byte Counting. * @@ -179,7 +186,6 @@ tcp_newreno_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { acked = BYTES_ACKED(th, tp); if (tcp_do_rfc3465) { - if (cw >= tp->snd_ssthresh) { tp->t_bytes_acked += acked; if (tp->t_bytes_acked >= cw) { @@ -199,8 +205,8 @@ tcp_newreno_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { */ uint32_t abc_lim; abc_lim = (tcp_do_rfc3465_lim2 && - tp->snd_nxt == tp->snd_max) ? incr * 2 - : incr; + tp->snd_nxt == tp->snd_max) ? incr * 2 + : incr; incr = lmin(acked, abc_lim); } @@ -212,28 +218,30 @@ tcp_newreno_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { * (segsz^2 / cwnd per packet). */ - if (cw >= tp->snd_ssthresh) + if (cw >= tp->snd_ssthresh) { incr = max((incr * incr / cw), 1); + } } - tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<snd_scale); + tp->snd_cwnd = min(cw + incr, TCP_MAXWIN << tp->snd_scale); } void -tcp_newreno_pre_fr(struct tcpcb *tp) { - +tcp_newreno_pre_fr(struct tcpcb *tp) +{ uint32_t win; - win = min(tp->snd_wnd, tp->snd_cwnd) / - 2 / tp->t_maxseg; - if ( win < 2 ) + win = min(tp->snd_wnd, tp->snd_cwnd) / + 2 / tp->t_maxseg; + if (win < 2) { win = 2; - tp->snd_ssthresh = win * tp->t_maxseg; + } + tp->snd_ssthresh = win * tp->t_maxseg; tcp_cc_resize_sndbuf(tp); - } void -tcp_newreno_post_fr(struct tcpcb *tp, struct tcphdr *th) { +tcp_newreno_post_fr(struct tcpcb *tp, struct tcphdr *th) +{ int32_t ss; ss = tp->snd_max - th->th_ack; @@ -247,22 +255,24 @@ tcp_newreno_post_fr(struct tcpcb *tp, struct tcphdr *th) { * would be inclined to send a burst, better to do * it via the slow start mechanism. * - * If the flight size is zero, then make congestion - * window to be worth at least 2 segments to avoid + * If the flight size is zero, then make congestion + * window to be worth at least 2 segments to avoid * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05). */ - if (ss < (int32_t)tp->snd_ssthresh) + if (ss < (int32_t)tp->snd_ssthresh) { tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg; - else + } else { tp->snd_cwnd = tp->snd_ssthresh; + } tp->t_bytes_acked = 0; } -/* Function to change the congestion window when the retransmit +/* Function to change the congestion window when the retransmit * timer fires. */ void -tcp_newreno_after_timeout(struct tcpcb *tp) { +tcp_newreno_after_timeout(struct tcpcb *tp) +{ /* * Close the congestion window down to one segment * (we'll open it by one segment for each ack we get). @@ -287,10 +297,11 @@ tcp_newreno_after_timeout(struct tcpcb *tp) { * growth is 2 mss. We don't allow the threshhold * to go below this.) */ - if (tp->t_state >= TCPS_ESTABLISHED) { + if (tp->t_state >= TCPS_ESTABLISHED) { u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; - if (win < 2) + if (win < 2) { win = 2; + } tp->snd_ssthresh = win * tp->t_maxseg; tp->snd_cwnd = tp->t_maxseg; @@ -301,41 +312,43 @@ tcp_newreno_after_timeout(struct tcpcb *tp) { /* * Indicate whether this ack should be delayed. * We can delay the ack if: - * - delayed acks are enabled and set to 1, same as when value is set to 2. + * - delayed acks are enabled and set to 1, same as when value is set to 2. * We kept this for binary compatibility. * - delayed acks are enabled and set to 2, will "ack every other packet" * - if our last ack wasn't a 0-sized window. - * - if the peer hasn't sent us a TH_PUSH data packet (this solves 3649245). - * If TH_PUSH is set, take this as a clue that we need to ACK - * with no delay. This helps higher level protocols who won't send - * us more data even if the window is open because their + * - if the peer hasn't sent us a TH_PUSH data packet (this solves 3649245). + * If TH_PUSH is set, take this as a clue that we need to ACK + * with no delay. This helps higher level protocols who won't send + * us more data even if the window is open because their * last "segment" hasn't been ACKed - * - delayed acks are enabled and set to 3, will do "streaming detection" + * - delayed acks are enabled and set to 3, will do "streaming detection" * (see the comment in tcp_input.c) and * - if we receive more than "maxseg_unacked" full packets in the last 100ms - * - if the connection is not in slow-start or idle or loss/recovery states + * - if the connection is not in slow-start or idle or loss/recovery states * - if those criteria aren't met, it will ack every other packet. */ int -tcp_newreno_delay_ack(struct tcpcb *tp, struct tcphdr *th) { - return (tcp_cc_delay_ack(tp, th)); +tcp_newreno_delay_ack(struct tcpcb *tp, struct tcphdr *th) +{ + return tcp_cc_delay_ack(tp, th); } /* Switch to newreno from a different CC. If the connection is in * congestion avoidance state, it can continue to use the current * congestion window because it is going to be conservative. But * if the connection is in slow-start, we will halve the congestion - * window and let newreno work from there. + * window and let newreno work from there. */ void -tcp_newreno_switch_cc(struct tcpcb *tp, uint16_t old_index) { +tcp_newreno_switch_cc(struct tcpcb *tp, uint16_t old_index) +{ #pragma unused(old_index) uint32_t cwnd = min(tp->snd_wnd, tp->snd_cwnd); if (tp->snd_cwnd >= tp->snd_ssthresh) { cwnd = cwnd / tp->t_maxseg; - } else { + } else { cwnd = cwnd / 2 / tp->t_maxseg; } tp->snd_cwnd = max(TCP_CC_CWND_INIT_BYTES, cwnd * tp->t_maxseg); diff --git a/bsd/netinet/tcp_sack.c b/bsd/netinet/tcp_sack.c index 4b4c04dbe..a7c8e38d2 100644 --- a/bsd/netinet/tcp_sack.c +++ b/bsd/netinet/tcp_sack.c @@ -59,7 +59,7 @@ * */ -#define _IP_VHL +#define _IP_VHL #include @@ -106,13 +106,13 @@ #include SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_do_sack, 1, "Enable/Disable TCP SACK support"); + int, tcp_do_sack, 1, "Enable/Disable TCP SACK support"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_maxholes, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, tcp_sack_maxholes, 128, + static int, tcp_sack_maxholes, 128, "Maximum number of TCP SACK holes allowed per connection"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_globalmaxholes, - CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_sack_globalmaxholes, 65536, + CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_sack_globalmaxholes, 65536, "Global maximum number of TCP SACK holes"); static SInt32 tcp_sack_globalholes = 0; @@ -135,7 +135,7 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, ignore_hw_duplicates, extern struct zone *sack_hole_zone; -#define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \ +#define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \ (SEQ_GT((_sb_)->end, (_sb_)->start) && \ SEQ_GT((_sb_)->start, (_tp_)->snd_una) && \ SEQ_GT((_sb_)->start, (_ack_)) && \ @@ -177,15 +177,17 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) * Discard this SACK block. */ } else if (SEQ_LEQ(head_blk.start, end) && - SEQ_GEQ(head_blk.end, start)) { + SEQ_GEQ(head_blk.end, start)) { /* * Merge this SACK block into head_blk. * This SACK block itself will be discarded. */ - if (SEQ_GT(head_blk.start, start)) + if (SEQ_GT(head_blk.start, start)) { head_blk.start = start; - if (SEQ_LT(head_blk.end, end)) + } + if (SEQ_LT(head_blk.end, end)) { head_blk.end = end; + } } else { /* * Save this SACK block. @@ -211,15 +213,16 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) * If the number of saved SACK blocks exceeds its limit, * discard the last SACK block. */ - if (num_saved >= MAX_SACK_BLKS) + if (num_saved >= MAX_SACK_BLKS) { num_saved--; + } } if (num_saved > 0) { /* * Copy the saved SACK blocks back. */ bcopy(saved_blks, &tp->sackblks[num_head], - sizeof(struct sackblk) * num_saved); + sizeof(struct sackblk) * num_saved); } /* Save the number of SACK blocks. */ @@ -229,12 +232,14 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) * so that connection will generate more acks after recovery and * sender's cwnd will open. */ - if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0) + if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0) { tcp_reset_stretch_ack(tp); + } #if TRAFFIC_MGT - if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) + if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) { reset_acc_iaj(tp); + } #endif /* TRAFFIC_MGT */ } @@ -244,9 +249,8 @@ tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end) void tcp_clean_sackreport( struct tcpcb *tp) { - tp->rcv_numsacks = 0; - bzero(&tp->sackblks[0], sizeof (struct sackblk) * MAX_SACK_BLKS); + bzero(&tp->sackblks[0], sizeof(struct sackblk) * MAX_SACK_BLKS); } /* @@ -264,8 +268,9 @@ tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end) } hole = (struct sackhole *)zalloc(sack_hole_zone); - if (hole == NULL) + if (hole == NULL) { return NULL; + } hole->start = start; hole->end = end; @@ -294,26 +299,29 @@ tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole) */ static struct sackhole * tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end, - struct sackhole *after) + struct sackhole *after) { struct sackhole *hole; /* Allocate a new SACK hole. */ hole = tcp_sackhole_alloc(tp, start, end); - if (hole == NULL) + if (hole == NULL) { return NULL; + } hole->rxmit_start = tcp_now; /* Insert the new SACK hole into scoreboard */ - if (after != NULL) + if (after != NULL) { TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink); - else + } else { TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink); + } /* Update SACK hint. */ - if (tp->sackhint.nexthole == NULL) + if (tp->sackhint.nexthole == NULL) { tp->sackhint.nexthole = hole; + } - return(hole); + return hole; } /* @@ -323,8 +331,9 @@ static void tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole) { /* Update SACK hint. */ - if (tp->sackhint.nexthole == hole) + if (tp->sackhint.nexthole == hole) { tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink); + } /* Remove this SACK hole. */ TAILQ_REMOVE(&tp->snd_holes, hole, scblink); @@ -348,15 +357,17 @@ tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, * If the SACK hole is past snd_fack, this is from new SACK * information, so we can ignore it. */ - if (SEQ_GT(s->end, snd_fack)) + if (SEQ_GT(s->end, snd_fack)) { return; + } /* - * If there has been a retransmit timeout, then the timestamp on + * If there has been a retransmit timeout, then the timestamp on * the SACK segment will be newer. This might lead to a * false-positive. Avoid re-ordering detection in this case. */ - if (tp->t_rxtshift > 0) + if (tp->t_rxtshift > 0) { return; + } /* * Detect reordering from SACK information by checking @@ -391,8 +402,9 @@ tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, if (s->rxmit_start > 0) { rext = timer_diff(tcp_now, 0, s->rxmit_start, 0); - if (rext < 0) + if (rext < 0) { return; + } /* * We take the maximum reorder window to schedule @@ -420,8 +432,8 @@ tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s, * the sequence space). */ void -tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, - u_int32_t *newbytes_acked) +tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, + u_int32_t *newbytes_acked) { struct sackhole *cur, *temp; struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp; @@ -446,16 +458,18 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, &sack, sizeof(sack)); sack.start = ntohl(sack.start); sack.end = ntohl(sack.end); - if (TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &sack, th_ack)) + if (TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &sack, th_ack)) { sack_blocks[num_sack_blks++] = sack; + } } /* * Return if SND.UNA is not advanced and no valid SACK block * is received. */ - if (num_sack_blks == 0) + if (num_sack_blks == 0) { return; + } VERIFY(num_sack_blks <= (TCP_MAX_SACK + 1)); /* @@ -495,7 +509,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, * the highest three or four SACK blocks with ack number advancing * are received. */ - sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */ + sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */ if (SEQ_LT(tp->snd_fack, sblkp->start)) { /* * The highest SACK block is beyond fack. @@ -504,7 +518,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, * beyond the current fack, they will be inserted by * way of hole splitting in the while-loop below. */ - temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL); + temp = tcp_sackhole_insert(tp, tp->snd_fack, sblkp->start, NULL); if (temp != NULL) { tp->snd_fack = sblkp->end; *newbytes_acked += (sblkp->end - sblkp->start); @@ -512,18 +526,19 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, /* Go to the previous sack block. */ sblkp--; } else { - /* - * We failed to add a new hole based on the current - * sack block. Skip over all the sack blocks that + /* + * We failed to add a new hole based on the current + * sack block. Skip over all the sack blocks that * fall completely to the right of snd_fack and proceed * to trim the scoreboard based on the remaining sack - * blocks. This also trims the scoreboard for th_ack + * blocks. This also trims the scoreboard for th_ack * (which is sack_blocks[0]). */ - while (sblkp >= sack_blocks && - SEQ_LT(tp->snd_fack, sblkp->start)) + while (sblkp >= sack_blocks && + SEQ_LT(tp->snd_fack, sblkp->start)) { sblkp--; - if (sblkp >= sack_blocks && + } + if (sblkp >= sack_blocks && SEQ_LT(tp->snd_fack, sblkp->end)) { *newbytes_acked += (sblkp->end - tp->snd_fack); tp->snd_fack = sblkp->end; @@ -540,7 +555,7 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, * Since the incoming sack blocks are sorted, we can process them * making one sweep of the scoreboard. */ - while (sblkp >= sack_blocks && cur != NULL) { + while (sblkp >= sack_blocks && cur != NULL) { if (SEQ_GEQ(sblkp->start, cur->end)) { /* * SACKs data beyond the current hole. @@ -605,12 +620,12 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, if (SEQ_GT(cur->rxmit, temp->rxmit)) { temp->rxmit = cur->rxmit; tp->sackhint.sack_bytes_rexmit - += (temp->rxmit - - temp->start); + += (temp->rxmit + - temp->start); } cur->end = sblkp->start; cur->rxmit = SEQ_MIN(cur->rxmit, - cur->end); + cur->end); /* * Reset the rxmit_start to that of * the current hole as that will @@ -627,10 +642,11 @@ tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th, * we're done with the sack block or the sack hole. * Accordingly, we advance one or the other. */ - if (SEQ_LEQ(sblkp->start, cur->start)) + if (SEQ_LEQ(sblkp->start, cur->start)) { cur = TAILQ_PREV(cur, sackhole_head, scblink); - else + } else { sblkp--; + } } } @@ -642,20 +658,20 @@ tcp_free_sackholes(struct tcpcb *tp) { struct sackhole *q; - while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) + while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) { tcp_sackhole_remove(tp, q); + } tp->sackhint.sack_bytes_rexmit = 0; tp->sackhint.nexthole = NULL; tp->sack_newdata = 0; - } /* - * Partial ack handling within a sack recovery episode. + * Partial ack handling within a sack recovery episode. * Keeping this very simple for now. When a partial ack * is received, force snd_cwnd to a value that will allow * the sender to transmit no more than 2 segments. - * If necessary, a better scheme can be adopted at a + * If necessary, a better scheme can be adopted at a * later point, but for now, the goal is to prevent the * sender from bursting a large amount of data in the midst * of sack recovery. @@ -668,13 +684,15 @@ tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) tp->t_timer[TCPT_REXMT] = 0; tp->t_rtttime = 0; /* send one or 2 segments based on how much new data was acked */ - if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) + if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) { num_segs = 2; + } tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit + - (tp->snd_nxt - tp->sack_newdata) + - num_segs * tp->t_maxseg); - if (tp->snd_cwnd > tp->snd_ssthresh) + (tp->snd_nxt - tp->sack_newdata) + + num_segs * tp->t_maxseg); + if (tp->snd_cwnd > tp->snd_ssthresh) { tp->snd_cwnd = tp->snd_ssthresh; + } if (SEQ_LT(tp->snd_fack, tp->snd_recover) && tp->snd_fack == th->th_ack && TAILQ_EMPTY(&tp->snd_holes)) { struct sackhole *temp; @@ -686,8 +704,9 @@ tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th) */ temp = tcp_sackhole_insert(tp, tp->snd_fack, tp->snd_recover, NULL); - if (temp != NULL) + if (temp != NULL) { tp->snd_fack = tp->snd_recover; + } } (void) tcp_output(tp); } @@ -712,7 +731,7 @@ tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt) } *sack_bytes_rexmt += (p->rxmit - p->start); } - return (p); + return p; } /* @@ -741,8 +760,9 @@ tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt) dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt); *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit; hole = tp->sackhint.nexthole; - if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) + if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) { goto out; + } while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) { if (SEQ_LT(hole->rxmit, hole->end)) { tp->sackhint.nexthole = hole; @@ -756,11 +776,11 @@ out: } if (*sack_bytes_rexmt != dbg_bytes_rexmt) { printf("%s: Computed sack_bytes_retransmitted (%d) not " - "the same as cached value (%d)\n", - __func__, dbg_bytes_rexmt, *sack_bytes_rexmt); + "the same as cached value (%d)\n", + __func__, dbg_bytes_rexmt, *sack_bytes_rexmt); *sack_bytes_rexmt = dbg_bytes_rexmt; } - return (hole); + return hole; } /* @@ -773,34 +793,38 @@ tcp_sack_adjust(struct tcpcb *tp) { struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes); - if (cur == NULL) + if (cur == NULL) { return; /* No holes */ - if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) + } + if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) { return; /* We're already beyond any SACKed blocks */ + } /* * Two cases for which we want to advance snd_nxt: * i) snd_nxt lies between end of one hole and beginning of another * ii) snd_nxt lies between end of last hole and snd_fack */ while ((p = TAILQ_NEXT(cur, scblink)) != NULL) { - if (SEQ_LT(tp->snd_nxt, cur->end)) + if (SEQ_LT(tp->snd_nxt, cur->end)) { return; - if (SEQ_GEQ(tp->snd_nxt, p->start)) + } + if (SEQ_GEQ(tp->snd_nxt, p->start)) { cur = p; - else { + } else { tp->snd_nxt = p->start; return; } } - if (SEQ_LT(tp->snd_nxt, cur->end)) + if (SEQ_LT(tp->snd_nxt, cur->end)) { return; + } tp->snd_nxt = tp->snd_fack; return; } /* * This function returns TRUE if more than (tcprexmtthresh - 1) * SMSS - * bytes with sequence numbers greater than snd_una have been SACKed. + * bytes with sequence numbers greater than snd_una have been SACKed. */ boolean_t tcp_sack_byte_islost(struct tcpcb *tp) @@ -809,8 +833,9 @@ tcp_sack_byte_islost(struct tcpcb *tp) struct sackhole *sndhole; if (!SACK_ENABLED(tp) || IN_FASTRECOVERY(tp) || TAILQ_EMPTY(&tp->snd_holes) || - (tp->t_flagsext & TF_PKTS_REORDERED)) - return (FALSE); + (tp->t_flagsext & TF_PKTS_REORDERED)) { + return FALSE; + } unacked_bytes = tp->snd_max - tp->snd_una; @@ -819,8 +844,8 @@ tcp_sack_byte_islost(struct tcpcb *tp) } VERIFY(unacked_bytes >= sndhole_bytes); - return ((unacked_bytes - sndhole_bytes) > - ((tcprexmtthresh - 1) * tp->t_maxseg)); + return (unacked_bytes - sndhole_bytes) > + ((tcprexmtthresh - 1) * tp->t_maxseg); } /* @@ -867,7 +892,7 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, * returning true here so that the ack will not be * treated as duplicate ack. */ - return (TRUE); + return TRUE; } } else if (to->to_nsacks > 1 && SEQ_LEQ(second_sack.start, first_sack.start) && @@ -886,11 +911,11 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, to->to_nsacks--; to->to_sacks += TCPOLEN_SACK; tcpstat.tcps_dsack_recvd_old++; - return (TRUE); + return TRUE; } } else { /* no dsack options, proceed with processing the sack */ - return (FALSE); + return FALSE; } /* Update the tcpopt pointer to exclude dsack block */ @@ -900,15 +925,17 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, tp->t_dsack_recvd++; /* ignore DSACK option, if DSACK is disabled */ - if (tp->t_flagsext & TF_DISABLE_DSACK) - return (TRUE); + if (tp->t_flagsext & TF_DISABLE_DSACK) { + return TRUE; + } /* If the DSACK is for TLP mark it as such */ if ((tp->t_flagsext & TF_SENT_TLPROBE) && first_sack.end == tp->t_tlphighrxt) { if ((rxseg = tcp_rxtseg_find(tp, first_sack.start, - (first_sack.end - 1))) != NULL) + (first_sack.end - 1))) != NULL) { rxseg->rx_flags |= TCP_RXT_DSACK_FOR_TLP; + } } /* Update the sender's retransmit segment state */ if (((tp->t_rxtshift == 1 && first_sack.start == tp->snd_una) || @@ -927,7 +954,7 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, */ tcpstat.tcps_dsack_ackloss++; - return (TRUE); + return TRUE; } else if ((rxseg = tcp_rxtseg_find(tp, first_sack.start, (first_sack.end - 1))) == NULL) { /* @@ -944,10 +971,11 @@ tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to, * If the segment was retransmitted only once, mark it as * spurious. Otherwise ignore the duplicate notification. */ - if (rxseg->rx_count == 1) + if (rxseg->rx_count == 1) { rxseg->rx_flags |= TCP_RXT_SPURIOUS; - else + } else { rxseg->rx_flags &= ~TCP_RXT_SPURIOUS; + } } - return (TRUE); + return TRUE; } diff --git a/bsd/netinet/tcp_seq.h b/bsd/netinet/tcp_seq.h index 772d33f36..963912783 100644 --- a/bsd/netinet/tcp_seq.h +++ b/bsd/netinet/tcp_seq.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -69,32 +69,32 @@ * on with modular arithmetic. These macros can be * used to compare such integers. */ -#define SEQ_LT(a,b) ((int)((a)-(b)) < 0) -#define SEQ_LEQ(a,b) ((int)((a)-(b)) <= 0) -#define SEQ_GT(a,b) ((int)((a)-(b)) > 0) -#define SEQ_GEQ(a,b) ((int)((a)-(b)) >= 0) +#define SEQ_LT(a, b) ((int)((a)-(b)) < 0) +#define SEQ_LEQ(a, b) ((int)((a)-(b)) <= 0) +#define SEQ_GT(a, b) ((int)((a)-(b)) > 0) +#define SEQ_GEQ(a, b) ((int)((a)-(b)) >= 0) -#define SEQ_MIN(a, b) ((SEQ_LT(a, b)) ? (a) : (b)) -#define SEQ_MAX(a, b) ((SEQ_GT(a, b)) ? (a) : (b)) +#define SEQ_MIN(a, b) ((SEQ_LT(a, b)) ? (a) : (b)) +#define SEQ_MAX(a, b) ((SEQ_GT(a, b)) ? (a) : (b)) /* for modulo comparisons of timestamps */ -#define TSTMP_LT(a,b) ((int)((a)-(b)) < 0) -#define TSTMP_GT(a,b) ((int)((a)-(b)) > 0) -#define TSTMP_LEQ(a,b) ((int)((a)-(b)) <= 0) -#define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0) +#define TSTMP_LT(a, b) ((int)((a)-(b)) < 0) +#define TSTMP_GT(a, b) ((int)((a)-(b)) > 0) +#define TSTMP_LEQ(a, b) ((int)((a)-(b)) <= 0) +#define TSTMP_GEQ(a, b) ((int)((a)-(b)) >= 0) /* * TCP connection counts are 32 bit integers operated * on with modular arithmetic. These macros can be * used to compare such integers. */ -#define CC_LT(a,b) ((int)((a)-(b)) < 0) -#define CC_LEQ(a,b) ((int)((a)-(b)) <= 0) -#define CC_GT(a,b) ((int)((a)-(b)) > 0) -#define CC_GEQ(a,b) ((int)((a)-(b)) >= 0) +#define CC_LT(a, b) ((int)((a)-(b)) < 0) +#define CC_LEQ(a, b) ((int)((a)-(b)) <= 0) +#define CC_GT(a, b) ((int)((a)-(b)) > 0) +#define CC_GEQ(a, b) ((int)((a)-(b)) >= 0) /* Macro to increment a CC: skip 0 which has a special meaning */ -#define CC_INC(c) (++(c) == 0 ? ++(c) : (c)) +#define CC_INC(c) (++(c) == 0 ? ++(c) : (c)) #ifdef KERNEL_PRIVATE /* @@ -102,14 +102,14 @@ * send and receive from initial send and receive * sequence numbers. */ -#define tcp_rcvseqinit(tp) \ +#define tcp_rcvseqinit(tp) \ (tp)->rcv_adv = (tp)->rcv_nxt = (tp)->irs + 1 -#define tcp_sendseqinit(tp) \ +#define tcp_sendseqinit(tp) \ (tp)->snd_una = (tp)->snd_nxt = (tp)->snd_max = (tp)->snd_up = \ (tp)->snd_recover = (tp)->iss -#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * TCP_RETRANSHZ) +#define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * TCP_RETRANSHZ) /* timestamp wrap-around time */ #endif /* KERNEL_PRIVATE */ #endif /* _NETINET_TCP_SEQ_H_ */ diff --git a/bsd/netinet/tcp_subr.c b/bsd/netinet/tcp_subr.c index 1c9b36da3..67381dffe 100644 --- a/bsd/netinet/tcp_subr.c +++ b/bsd/netinet/tcp_subr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -92,8 +92,8 @@ #include #include -#define tcp_minmssoverload fring -#define _IP_VHL +#define tcp_minmssoverload fring +#define _IP_VHL #include #include #include @@ -156,7 +156,7 @@ #include -#define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2)) +#define DBG_FNC_TCP_CLOSE NETDBG_CODE(DBG_NETTCP, ((5 << 8) | 2)) static tcp_cc tcp_ccgen; extern int tcp_lq_overflow; @@ -165,36 +165,36 @@ extern struct tcptimerlist tcp_timer_list; extern struct tcptailq tcp_tw_tailq; SYSCTL_SKMEM_TCP_INT(TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size"); + int, tcp_mssdflt, TCP_MSS, "Default TCP Maximum Segment Size"); #if INET6 SYSCTL_SKMEM_TCP_INT(TCPCTL_V6MSSDFLT, v6mssdflt, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS, - "Default TCP Maximum Segment Size for IPv6"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_v6mssdflt, TCP6_MSS, + "Default TCP Maximum Segment Size for IPv6"); #endif int tcp_sysctl_fastopenkey(struct sysctl_oid *, void *, int, struct sysctl_req *); SYSCTL_PROC(_net_inet_tcp, OID_AUTO, fastopen_key, CTLTYPE_STRING | CTLFLAG_WR, - 0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key"); + 0, 0, tcp_sysctl_fastopenkey, "S", "TCP Fastopen key"); /* Current count of half-open TFO connections */ -int tcp_tfo_halfcnt = 0; +int tcp_tfo_halfcnt = 0; /* Maximum of half-open TFO connection backlog */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen_backlog, - CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10, - "Backlog queue for half-open TFO connections"); + CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_tfo_backlog, 10, + "Backlog queue for half-open TFO connections"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, fastopen, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER, - "Enable TCP Fastopen (RFC 7413)"); + int, tcp_fastopen, TCP_FASTOPEN_CLIENT | TCP_FASTOPEN_SERVER, + "Enable TCP Fastopen (RFC 7413)"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, now_init, CTLFLAG_RD | CTLFLAG_LOCKED, - uint32_t, tcp_now_init, 0, "Initial tcp now value"); + uint32_t, tcp_now_init, 0, "Initial tcp now value"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED, - uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds"); + uint32_t, tcp_microuptime_init, 0, "Initial tcp uptime value in micro seconds"); /* * Minimum MSS we accept and use. This prevents DoS attacks where @@ -205,73 +205,73 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, microuptime_init, CTLFLAG_RD | CTLFLAG_LOCKED, * checking. This setting prevents us from sending too small packets. */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, minmss, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size"); + int, tcp_minmss, TCP_MINMSS, "Minmum TCP Maximum Segment Size"); int tcp_do_rfc1323 = 1; #if (DEVELOPMENT || DEBUG) SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1323, 0, - "Enable rfc1323 (high performance TCP) extensions"); + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1323, 0, + "Enable rfc1323 (high performance TCP) extensions"); #endif /* (DEVELOPMENT || DEBUG) */ // Not used -static int tcp_do_rfc1644 = 0; +static int tcp_do_rfc1644 = 0; SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, - CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1644, 0, - "Enable rfc1644 (TTCP) extensions"); + CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_do_rfc1644, 0, + "Enable rfc1644 (TTCP) extensions"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, do_tcpdrain, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, do_tcpdrain, 0, - "Enable tcp_drain routine for extra help when low on mbufs"); + static int, do_tcpdrain, 0, + "Enable tcp_drain routine for extra help when low on mbufs"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED, - &tcbinfo.ipi_count, 0, "Number of active PCBs"); + &tcbinfo.ipi_count, 0, "Number of active PCBs"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, tw_pcbcount, CTLFLAG_RD | CTLFLAG_LOCKED, - &tcbinfo.ipi_twcount, 0, "Number of pcbs in time-wait state"); + &tcbinfo.ipi_twcount, 0, "Number of pcbs in time-wait state"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, icmp_may_rst, CTLFLAG_RW | CTLFLAG_LOCKED, - static int, icmp_may_rst, 1, - "Certain ICMP unreachable messages may abort connections in SYN_SENT"); + static int, icmp_may_rst, 1, + "Certain ICMP unreachable messages may abort connections in SYN_SENT"); -static int tcp_strict_rfc1948 = 0; -static int tcp_isn_reseed_interval = 0; +static int tcp_strict_rfc1948 = 0; +static int tcp_isn_reseed_interval = 0; #if (DEVELOPMENT || DEBUG) SYSCTL_INT(_net_inet_tcp, OID_AUTO, strict_rfc1948, CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly"); + &tcp_strict_rfc1948, 0, "Determines if RFC1948 is followed exactly"); SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, - CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); + CTLFLAG_RW | CTLFLAG_LOCKED, + &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); #endif /* (DEVELOPMENT || DEBUG) */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, rtt_min, CTLFLAG_RW | CTLFLAG_LOCKED, - int, tcp_TCPTV_MIN, 100, "min rtt value allowed"); + int, tcp_TCPTV_MIN, 100, "min rtt value allowed"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, rexmt_slop, CTLFLAG_RW, - int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout"); + int, tcp_rexmt_slop, TCPTV_REXMTSLOP, "Slop added to retransmit timeout"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, randomize_ports, CTLFLAG_RW | CTLFLAG_LOCKED, - __private_extern__ int , tcp_use_randomport, 0, - "Randomize TCP port numbers"); + __private_extern__ int, tcp_use_randomport, 0, + "Randomize TCP port numbers"); SYSCTL_SKMEM_TCP_INT(OID_AUTO, win_scale_factor, CTLFLAG_RW | CTLFLAG_LOCKED, - __private_extern__ int, tcp_win_scale, 3, "Window scaling factor"); + __private_extern__ int, tcp_win_scale, 3, "Window scaling factor"); -static void tcp_cleartaocache(void); -static void tcp_notify(struct inpcb *, int); +static void tcp_cleartaocache(void); +static void tcp_notify(struct inpcb *, int); -struct zone *sack_hole_zone; -struct zone *tcp_reass_zone; -struct zone *tcp_bwmeas_zone; -struct zone *tcp_rxt_seg_zone; +struct zone *sack_hole_zone; +struct zone *tcp_reass_zone; +struct zone *tcp_bwmeas_zone; +struct zone *tcp_rxt_seg_zone; -extern int slowlink_wsize; /* window correction for slow links */ +extern int slowlink_wsize; /* window correction for slow links */ extern int path_mtu_discovery; static void tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb); -#define TCP_BWMEAS_BURST_MINSIZE 6 -#define TCP_BWMEAS_BURST_MAXSIZE 25 +#define TCP_BWMEAS_BURST_MINSIZE 6 +#define TCP_BWMEAS_BURST_MAXSIZE 25 static uint32_t bwmeas_elm_size; @@ -282,12 +282,12 @@ static uint32_t bwmeas_elm_size; * variable net.inet.tcp.tcbhashsize */ #ifndef TCBHASHSIZE -#define TCBHASHSIZE CONFIG_TCBHASHSIZE +#define TCBHASHSIZE CONFIG_TCBHASHSIZE #endif -__private_extern__ int tcp_tcbhashsize = TCBHASHSIZE; +__private_extern__ int tcp_tcbhashsize = TCBHASHSIZE; SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED, - &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); + &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); /* * This is the actual shape of what we allocate using the zone @@ -297,10 +297,10 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD | CTLFLAG_LOCKED, * we avoid changing most of the rest of the code (although it needs * to be changed, eventually, for greater efficiency). */ -#define ALIGNMENT 32 -struct inp_tp { - struct inpcb inp; - struct tcpcb tcb __attribute__((aligned(ALIGNMENT))); +#define ALIGNMENT 32 +struct inp_tp { + struct inpcb inp; + struct tcpcb tcb __attribute__((aligned(ALIGNMENT))); }; #undef ALIGNMENT @@ -334,11 +334,11 @@ tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size) bzero(&out[0], CCAES_BLOCK_SIZE); #if INET6 - if (isipv6) + if (isipv6) { memcpy(in, &inp->in6p_faddr, sizeof(struct in6_addr)); - else + } else #endif /* INET6 */ - memcpy(in, &inp->inp_faddr, sizeof(struct in_addr)); + memcpy(in, &inp->inp_faddr, sizeof(struct in_addr)); aes_encrypt_cbc(in, NULL, 1, out, &tfo_ctx); } @@ -370,8 +370,9 @@ tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1, bzero(keystring, sizeof(keystring)); error = sysctl_io_string(req, keystring, sizeof(keystring), 0, NULL); - if (error) + if (error) { goto exit; + } for (i = 0; i < (TCP_FASTOPEN_KEYLEN / sizeof(u_int32_t)); i++) { /* @@ -387,19 +388,19 @@ tcp_sysctl_fastopenkey(__unused struct sysctl_oid *oidp, __unused void *arg1, aes_encrypt_key128((u_char *)key, &tfo_ctx); exit: - return (error); + return error; } int get_inpcb_str_size(void) { - return (sizeof(struct inpcb)); + return sizeof(struct inpcb); } int get_tcp_str_size(void) { - return (sizeof(struct tcpcb)); + return sizeof(struct tcpcb); } static int scale_to_powerof2(int size); @@ -415,7 +416,8 @@ static int scale_to_powerof2(int size); * 3. Same value as argument size if it is already a power of two. */ static int -scale_to_powerof2(int size) { +scale_to_powerof2(int size) +{ /* Handle special case of size = 0 */ int ret = size ? size : 1; @@ -427,7 +429,7 @@ scale_to_powerof2(int size) { * its highest set bit at which point * it is rounded down power of two. */ - size = size & (size -1); + size = size & (size - 1); } /* Check for overflow when rounding up */ @@ -438,7 +440,7 @@ scale_to_powerof2(int size) { } } - return (ret); + return ret; } static void @@ -461,10 +463,11 @@ tcp_init(struct protosw *pp, struct domain *dp) vm_size_t str_size; struct inpcbinfo *pcbinfo; - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - if (tcp_initialized) + if (tcp_initialized) { return; + } tcp_initialized = 1; tcp_ccgen = 1; @@ -527,17 +530,17 @@ tcp_init(struct protosw *pp, struct domain *dp) tcp_tcbhashsize = 16; } printf("WARNING: TCB hash size not a power of 2, " - "scaled from %d to %d.\n", - old_hash_size, - tcp_tcbhashsize); + "scaled from %d to %d.\n", + old_hash_size, + tcp_tcbhashsize); } tcbinfo.ipi_hashbase = hashinit(tcp_tcbhashsize, M_PCB, &tcbinfo.ipi_hashmask); tcbinfo.ipi_porthashbase = hashinit(tcp_tcbhashsize, M_PCB, - &tcbinfo.ipi_porthashmask); + &tcbinfo.ipi_porthashmask); str_size = P2ROUNDUP(sizeof(struct inp_tp), sizeof(u_int64_t)); - tcbinfo.ipi_zone = zinit(str_size, 120000*str_size, 8192, "tcpcb"); + tcbinfo.ipi_zone = zinit(str_size, 120000 * str_size, 8192, "tcpcb"); zone_change(tcbinfo.ipi_zone, Z_CALLERACCT, FALSE); zone_change(tcbinfo.ipi_zone, Z_EXPAND, TRUE); @@ -546,14 +549,14 @@ tcp_init(struct protosw *pp, struct domain *dp) in_pcbinfo_attach(&tcbinfo); str_size = P2ROUNDUP(sizeof(struct sackhole), sizeof(u_int64_t)); - sack_hole_zone = zinit(str_size, 120000*str_size, 8192, + sack_hole_zone = zinit(str_size, 120000 * str_size, 8192, "sack_hole zone"); zone_change(sack_hole_zone, Z_CALLERACCT, FALSE); zone_change(sack_hole_zone, Z_EXPAND, TRUE); str_size = P2ROUNDUP(sizeof(struct tseg_qent), sizeof(u_int64_t)); tcp_reass_zone = zinit(str_size, (nmbclusters >> 4) * str_size, - 0, "tcp_reass_zone"); + 0, "tcp_reass_zone"); if (tcp_reass_zone == NULL) { panic("%s: failed allocating tcp_reass_zone", __func__); /* NOTREACHED */ @@ -583,16 +586,17 @@ tcp_init(struct protosw *pp, struct domain *dp) zone_change(tcp_rxt_seg_zone, Z_EXPAND, TRUE); #if INET6 -#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) +#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) #else /* INET6 */ -#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) +#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) #endif /* INET6 */ if (max_protohdr < TCP_MINPROTOHDR) { _max_protohdr = TCP_MINPROTOHDR; - _max_protohdr = max_protohdr; /* round it up */ + _max_protohdr = max_protohdr; /* round it up */ } - if (max_linkhdr + max_protohdr > MCLBYTES) + if (max_linkhdr + max_protohdr > MCLBYTES) { panic("tcp_init"); + } #undef TCP_MINPROTOHDR /* Initialize time wait and timer lists */ @@ -611,7 +615,8 @@ tcp_init(struct protosw *pp, struct domain *dp) if ((tcp_timer_list.mtx = lck_mtx_alloc_init(tcp_timer_list.mtx_grp, tcp_timer_list.mtx_attr)) == NULL) { panic("failed to allocate memory for tcp_timer_list.mtx\n"); - }; + } + ; tcp_timer_list.call = thread_call_allocate(tcp_run_timerlist, NULL); if (tcp_timer_list.call == NULL) { panic("failed to allocate call entry 1 in tcp_init\n"); @@ -664,16 +669,16 @@ tcp_fillheaders(struct tcpcb *tp, void *ip_ptr, void *tcp_ptr) ip6 = (struct ip6_hdr *)ip_ptr; ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | - (inp->inp_flow & IPV6_FLOWINFO_MASK); + (inp->inp_flow & IPV6_FLOWINFO_MASK); ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | - (IPV6_VERSION & IPV6_VERSION_MASK); + (IPV6_VERSION & IPV6_VERSION_MASK); ip6->ip6_plen = htons(sizeof(struct tcphdr)); ip6->ip6_nxt = IPPROTO_TCP; ip6->ip6_hlim = 0; ip6->ip6_src = inp->in6p_laddr; ip6->ip6_dst = inp->in6p_faddr; tcp_hdr->th_sum = in6_pseudo(&inp->in6p_laddr, &inp->in6p_faddr, - htonl(sizeof (struct tcphdr) + IPPROTO_TCP)); + htonl(sizeof(struct tcphdr) + IPPROTO_TCP)); } else #endif { @@ -717,13 +722,14 @@ tcp_maketemplate(struct tcpcb *tp) struct tcptemp *n; m = m_get(M_DONTWAIT, MT_HEADER); - if (m == NULL) - return (0); + if (m == NULL) { + return 0; + } m->m_len = sizeof(struct tcptemp); n = mtod(m, struct tcptemp *); tcp_fillheaders(tp, (void *)&n->tt_ipgen, (void *)&n->tt_t); - return (n); + return n; } /* @@ -768,13 +774,14 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, if (tp) { if (!(flags & TH_RST)) { win = tcp_sbspace(tp); - if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) + if (win > (int32_t)TCP_MAXWIN << tp->rcv_scale) { win = (int32_t)TCP_MAXWIN << tp->rcv_scale; + } } #if INET6 - if (isipv6) + if (isipv6) { ro6 = &tp->t_inpcb->in6p_route; - else + } else #endif /* INET6 */ ro = &tp->t_inpcb->inp_route; } else { @@ -790,15 +797,16 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, } } if (m == 0) { - m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (m == NULL) + m = m_gethdr(M_DONTWAIT, MT_HEADER); /* MAC-OK */ + if (m == NULL) { return; + } tlen = 0; m->m_data += max_linkhdr; #if INET6 if (isipv6) { VERIFY((MHLEN - max_linkhdr) >= - (sizeof (*ip6) + sizeof (*nth))); + (sizeof(*ip6) + sizeof(*nth))); bcopy((caddr_t)ip6, mtod(m, caddr_t), sizeof(struct ip6_hdr)); ip6 = mtod(m, struct ip6_hdr *); @@ -807,16 +815,16 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, #endif /* INET6 */ { VERIFY((MHLEN - max_linkhdr) >= - (sizeof (*ip) + sizeof (*nth))); + (sizeof(*ip) + sizeof(*nth))); bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); ip = mtod(m, struct ip *); nth = (struct tcphdr *)(void *)(ip + 1); } bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); #if MPTCP - if ((tp) && (tp->t_mpflags & TMPF_RESET)) + if ((tp) && (tp->t_mpflags & TMPF_RESET)) { flags = (TH_RST | TH_ACK); - else + } else #endif flags = TH_ACK; } else { @@ -825,7 +833,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, m->m_data = (caddr_t)ipgen; /* m_len is set later */ tlen = 0; -#define xchg(a, b, type) { type t; t = a; a = b; b = t; } +#define xchg(a, b, type) { type t; t = a; a = b; b = t; } #if INET6 if (isipv6) { /* Expect 32-bit aligned IP on strict-align platforms */ @@ -854,13 +862,13 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, } #if INET6 if (isipv6) { - ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + - tlen)); - tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); + ip6->ip6_plen = htons((u_short)(sizeof(struct tcphdr) + + tlen)); + tlen += sizeof(struct ip6_hdr) + sizeof(struct tcphdr); } else #endif { - tlen += sizeof (struct tcpiphdr); + tlen += sizeof(struct tcpiphdr); ip->ip_len = tlen; ip->ip_ttl = ip_defttl; } @@ -886,18 +894,19 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, nth->th_seq = htonl(seq); nth->th_ack = htonl(ack); nth->th_x2 = 0; - nth->th_off = sizeof (struct tcphdr) >> 2; + nth->th_off = sizeof(struct tcphdr) >> 2; nth->th_flags = flags; - if (tp) + if (tp) { nth->th_win = htons((u_short) (win >> tp->rcv_scale)); - else + } else { nth->th_win = htons((u_short)win); + } nth->th_urp = 0; #if INET6 if (isipv6) { nth->th_sum = 0; nth->th_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, - htonl((tlen - sizeof (struct ip6_hdr)) + IPPROTO_TCP)); + htonl((tlen - sizeof(struct ip6_hdr)) + IPPROTO_TCP)); m->m_pkthdr.csum_flags = CSUM_TCPIPV6; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); ip6->ip6_hlim = in6_selecthlim(tp ? tp->t_inpcb : NULL, @@ -906,13 +915,14 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, #endif /* INET6 */ { nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, - htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); + htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); m->m_pkthdr.csum_flags = CSUM_TCP; m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); } #if TCPDEBUG - if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) + if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) { tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); + } #endif #if NECP @@ -921,7 +931,7 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, #if IPSEC if (tp != NULL && tp->t_inpcb->inp_sp != NULL && - ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) { + ipsec_setsocket(m, tp ? tp->t_inpcb->inp_socket : NULL) != 0) { m_freem(m); return; } @@ -954,20 +964,26 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, ip6oa.ip6oa_sotc = SO_TC_UNSPEC; ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; - if (tra->ifscope != IFSCOPE_NONE) + if (tra->ifscope != IFSCOPE_NONE) { ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; - if (tra->nocell) + } + if (tra->nocell) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - if (tra->noexpensive) + } + if (tra->noexpensive) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; - if (tra->awdl_unrestricted) + } + if (tra->awdl_unrestricted) { ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; - if (tra->intcoproc_allowed) + } + if (tra->intcoproc_allowed) { ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED; + } ip6oa.ip6oa_sotc = sotc; if (tp != NULL) { - if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; + } ip6oa.ip6oa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype; } (void) ip6_output(m, NULL, ro6, IPV6_OUTARGS, NULL, @@ -979,8 +995,9 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, tp->t_inpcb->in6p_last_outifp = outif; } - if (ro6 == &sro6) + if (ro6 == &sro6) { ROUTE_RELEASE(ro6); + } } else #endif /* INET6 */ { @@ -991,18 +1008,23 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, ipoa.ipoa_sotc = SO_TC_UNSPEC; ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; - if (tra->ifscope != IFSCOPE_NONE) + if (tra->ifscope != IFSCOPE_NONE) { ipoa.ipoa_flags |= IPOAF_BOUND_IF; - if (tra->nocell) + } + if (tra->nocell) { ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; - if (tra->noexpensive) + } + if (tra->noexpensive) { ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; - if (tra->awdl_unrestricted) + } + if (tra->awdl_unrestricted) { ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; + } ipoa.ipoa_sotc = sotc; if (tp != NULL) { - if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((tp->t_inpcb->inp_socket->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + } ipoa.ipoa_netsvctype = tp->t_inpcb->inp_socket->so_netsvctype; } if (ro != &sro) { @@ -1018,7 +1040,6 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m, (outif = sro.ro_rt->rt_ifp) != tp->t_inpcb->inp_last_outifp) { tp->t_inpcb->inp_last_outifp = outif; - } if (ro != &sro) { /* Synchronize cached PCB route */ @@ -1048,24 +1069,26 @@ tcp_newtcpcb(struct inpcb *inp) calculate_tcp_clock(); if ((so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) == 0) { - it = (struct inp_tp *)(void *)inp; - tp = &it->tcb; + it = (struct inp_tp *)(void *)inp; + tp = &it->tcb; } else { - tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb; + tp = (struct tcpcb *)(void *)inp->inp_saved_ppcb; } bzero((char *) tp, sizeof(struct tcpcb)); LIST_INIT(&tp->t_segq); tp->t_maxseg = tp->t_maxopd = #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt; + tcp_mssdflt; - if (tcp_do_rfc1323) - tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); - if (tcp_do_sack) + if (tcp_do_rfc1323) { + tp->t_flags = (TF_REQ_SCALE | TF_REQ_TSTMP); + } + if (tcp_do_sack) { tp->t_flagsext |= TF_SACK_ENABLE; + } TAILQ_INIT(&tp->snd_holes); SLIST_INIT(&tp->t_rxt_segments); @@ -1082,16 +1105,18 @@ tcp_newtcpcb(struct inpcb *inp) tp->t_rttmin = tcp_TCPTV_MIN; tp->t_rxtcur = TCPTV_RTOBASE; - if (tcp_use_newreno) + if (tcp_use_newreno) { /* use newreno by default */ tp->tcp_cc_index = TCP_CC_ALGO_NEWRENO_INDEX; - else + } else { tp->tcp_cc_index = TCP_CC_ALGO_CUBIC_INDEX; + } tcp_cc_allocate_state(tp); - if (CC_ALGO(tp)->init != NULL) + if (CC_ALGO(tp)->init != NULL) { CC_ALGO(tp)->init(tp); + } tp->snd_cwnd = TCP_CC_CWND_INIT_BYTES; tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; @@ -1107,8 +1132,9 @@ tcp_newtcpcb(struct inpcb *inp) tp->t_flagsext |= TF_MEASURESNDBW; if (tp->t_bwmeas == NULL) { tp->t_bwmeas = tcp_bwmeas_alloc(tp); - if (tp->t_bwmeas == NULL) + if (tp->t_bwmeas == NULL) { tp->t_flagsext &= ~TF_MEASURESNDBW; + } } /* Clear time wait tailq entry */ @@ -1122,7 +1148,7 @@ tcp_newtcpcb(struct inpcb *inp) */ inp->inp_ip_ttl = ip_defttl; inp->inp_ppcb = (caddr_t)tp; - return (tp); /* XXX */ + return tp; /* XXX */ } /* @@ -1140,16 +1166,18 @@ tcp_drop(struct tcpcb *tp, int errno) if (TCPS_HAVERCVDSYN(tp->t_state)) { DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_CLOSED); + struct tcpcb *, tp, int32_t, TCPS_CLOSED); tp->t_state = TCPS_CLOSED; (void) tcp_output(tp); tcpstat.tcps_drops++; - } else + } else { tcpstat.tcps_conndrops++; - if (errno == ETIMEDOUT && tp->t_softerror) + } + if (errno == ETIMEDOUT && tp->t_softerror) { errno = tp->t_softerror; + } so->so_error = errno; - return (tcp_close(tp)); + return tcp_close(tp); } void @@ -1163,11 +1191,12 @@ tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt) * XXX the lock bit for RTT indicates that the value * is also a minimum value; this is subject to time. */ - if (rt->rt_rmx.rmx_locks & RTV_RTT) + if (rt->rt_rmx.rmx_locks & RTV_RTT) { tp->t_rttmin = rtt / (RTM_RTTUNIT / TCP_RETRANSHZ); - else + } else { tp->t_rttmin = isnetlocal ? tcp_TCPTV_MIN : TCPTV_REXMTMIN; + } tp->t_srtt = rtt / (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE)); tcpstat.tcps_usedrtt++; @@ -1181,9 +1210,9 @@ tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt) tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; } TCPT_RANGESET(tp->t_rxtcur, - ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, - tp->t_rttmin, TCPTV_REXMTMAX, - TCP_ADD_REXMTSLOP(tp)); + ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, + tp->t_rttmin, TCPTV_REXMTMAX, + TCP_ADD_REXMTSLOP(tp)); } } @@ -1193,8 +1222,9 @@ tcp_create_ifnet_stats_per_flow(struct tcpcb *tp, { struct inpcb *inp; struct socket *so; - if (tp == NULL || ifs == NULL) + if (tp == NULL || ifs == NULL) { return; + } bzero(ifs, sizeof(*ifs)); inp = tp->t_inpcb; @@ -1223,7 +1253,7 @@ tcp_create_ifnet_stats_per_flow(struct tcpcb *tp, } else { ifs->bw_sndbw_max = 0; } - if (tp->t_bwmeas!= NULL && tp->t_bwmeas->bw_rcvbw_max > 0) { + if (tp->t_bwmeas != NULL && tp->t_bwmeas->bw_rcvbw_max > 0) { ifs->bw_rcvbw_max = tp->t_bwmeas->bw_rcvbw_max; } else { ifs->bw_rcvbw_max = 0; @@ -1270,8 +1300,9 @@ tcp_flow_ecn_perf_stats(struct ifnet_stats_per_flow *ifs, /* SACK episodes */ stat->sack_episodes += ifs->sack_recovery_episodes; - if (ifs->connreset) + if (ifs->connreset) { stat->rst_drop++; + } } static inline void @@ -1331,12 +1362,12 @@ tcp_flow_lim_stats(struct ifnet_stats_per_flow *ifs, /* connection timeouts */ stat->lim_conn_attempts++; - if (ifs->conntimeout) + if (ifs->conntimeout) { stat->lim_conn_timeouts++; + } /* bytes sent using background delay-based algorithms */ stat->lim_bk_txpkts += ifs->bk_txpackets; - } /* @@ -1359,8 +1390,9 @@ tcp_close(struct tcpcb *tp) struct ifnet_stats_per_flow ifs; /* tcp_close was called previously, bail */ - if (inp->inp_ppcb == NULL) - return (NULL); + if (inp->inp_ppcb == NULL) { + return NULL; + } tcp_canceltimers(tp); KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_START, tp, 0, 0, 0, 0); @@ -1378,13 +1410,13 @@ tcp_close(struct tcpcb *tp) * with the cleanup. */ if ((tp->t_flags & TF_CLOSING) || - inp->inp_sndinprog_cnt > 0) { + inp->inp_sndinprog_cnt > 0) { tp->t_flags |= TF_CLOSING; - return (NULL); + return NULL; } DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_CLOSED); + struct tcpcb *, tp, int32_t, TCPS_CLOSED); #if INET6 ro = (isipv6 ? (struct route *)&inp->in6p_route : &inp->inp_route); @@ -1392,8 +1424,9 @@ tcp_close(struct tcpcb *tp) ro = &inp->inp_route; #endif rt = ro->ro_rt; - if (rt != NULL) + if (rt != NULL) { RT_LOCK_SPIN(rt); + } /* * If we got enough samples through the srtt filter, @@ -1413,13 +1446,14 @@ tcp_close(struct tcpcb *tp) if (isipv6) { struct sockaddr_in6 *sin6; - if (rt == NULL) + if (rt == NULL) { goto no_valid_rt; + } sin6 = (struct sockaddr_in6 *)(void *)rt_key(rt); - if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) + if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { goto no_valid_rt; - } - else + } + } else #endif /* INET6 */ if (ROUTE_UNUSABLE(ro) || SIN(rt_key(rt))->sin_addr.s_addr == INADDR_ANY) { @@ -1434,7 +1468,7 @@ tcp_close(struct tcpcb *tp) if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { i = tp->t_srtt * (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTT_SCALE)); - if (rt->rt_rmx.rmx_rtt && i) + if (rt->rt_rmx.rmx_rtt && i) { /* * filter this update to half the old & half * the new values, converting scale. @@ -1443,18 +1477,20 @@ tcp_close(struct tcpcb *tp) */ rt->rt_rmx.rmx_rtt = (rt->rt_rmx.rmx_rtt + i) / 2; - else + } else { rt->rt_rmx.rmx_rtt = i; + } tcpstat.tcps_cachedrtt++; } if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { i = tp->t_rttvar * (RTM_RTTUNIT / (TCP_RETRANSHZ * TCP_RTTVAR_SCALE)); - if (rt->rt_rmx.rmx_rttvar && i) + if (rt->rt_rmx.rmx_rttvar && i) { rt->rt_rmx.rmx_rttvar = (rt->rt_rmx.rmx_rttvar + i) / 2; - else + } else { rt->rt_rmx.rmx_rttvar = i; + } tcpstat.tcps_cachedrttvar++; } /* @@ -1472,10 +1508,11 @@ tcp_close(struct tcpcb *tp) * way to calculate the pipesize, it will have to do. */ i = tp->snd_ssthresh; - if (rt->rt_rmx.rmx_sendpipe != 0) + if (rt->rt_rmx.rmx_sendpipe != 0) { dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); - else + } else { dosavessthresh = (i < so->so_snd.sb_hiwat / 2); + } if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && i != 0 && rt->rt_rmx.rmx_ssthresh != 0) || dosavessthresh) { @@ -1484,19 +1521,21 @@ tcp_close(struct tcpcb *tp) * packets then to packet data bytes. */ i = (i + tp->t_maxseg / 2) / tp->t_maxseg; - if (i < 2) + if (i < 2) { i = 2; + } i *= (u_int32_t)(tp->t_maxseg + #if INET6 - isipv6 ? sizeof (struct ip6_hdr) + - sizeof (struct tcphdr) : + isipv6 ? sizeof(struct ip6_hdr) + + sizeof(struct tcphdr) : #endif /* INET6 */ - sizeof (struct tcpiphdr)); - if (rt->rt_rmx.rmx_ssthresh) + sizeof(struct tcpiphdr)); + if (rt->rt_rmx.rmx_ssthresh) { rt->rt_rmx.rmx_ssthresh = (rt->rt_rmx.rmx_ssthresh + i) / 2; - else + } else { rt->rt_rmx.rmx_ssthresh = i; + } tcpstat.tcps_cachedssthresh++; } } @@ -1512,8 +1551,9 @@ tcp_close(struct tcpcb *tp) } no_valid_rt: - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } /* free the reassembly queue, if any */ (void) tcp_freeq(tp); @@ -1532,12 +1572,14 @@ no_valid_rt: } tcp_rxtseg_clean(tp); /* Free the packet list */ - if (tp->t_pktlist_head != NULL) + if (tp->t_pktlist_head != NULL) { m_freem_list(tp->t_pktlist_head); + } TCP_PKTLIST_CLEAR(tp); - if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) - inp->inp_saved_ppcb = (caddr_t) tp; + if (so->so_flags1 & SOF1_CACHED_IN_SOCK_LAYER) { + inp->inp_saved_ppcb = (caddr_t) tp; + } tp->t_state = TCPS_CLOSED; @@ -1563,8 +1605,9 @@ no_valid_rt: */ if ((so->so_options & SO_NOWAKEFROMSLEEP) && inp->inp_state != INPCB_STATE_DEAD && - !(inp->inp_flags2 & INP2_TIMEWAIT)) + !(inp->inp_flags2 & INP2_TIMEWAIT)) { socket_post_kev_msg_closed(so); + } if (CC_ALGO(tp)->cleanup != NULL) { CC_ALGO(tp)->cleanup(tp); @@ -1587,9 +1630,9 @@ no_valid_rt: } #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ in_pcbdetach(inp); @@ -1600,7 +1643,7 @@ no_valid_rt: tcpstat.tcps_closed++; KERNEL_DEBUG(DBG_FNC_TCP_CLOSE | DBG_FUNC_END, tcpstat.tcps_closed, 0, 0, 0, 0); - return (NULL); + return NULL; } int @@ -1616,7 +1659,7 @@ tcp_freeq(struct tcpcb *tp) rv = 1; } tp->t_reassqlen = 0; - return (rv); + return rv; } @@ -1632,23 +1675,25 @@ tcp_drain(void) struct inpcb *inp; struct tcpcb *tp; - if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock)) + if (!lck_rw_try_lock_exclusive(tcbinfo.ipi_lock)) { return; + } LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != - WNT_STOPUSING) { + WNT_STOPUSING) { socket_lock(inp->inp_socket, 1); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) - == WNT_STOPUSING) { + == WNT_STOPUSING) { /* lost a race, try the next one */ socket_unlock(inp->inp_socket, 1); continue; } tp = intotcpcb(inp); - if (do_tcpdrain) + if (do_tcpdrain) { tcp_freeq(tp); + } so_drain_extended_bk_idle(inp->inp_socket); @@ -1656,7 +1701,6 @@ tcp_drain(void) } } lck_rw_done(tcbinfo.ipi_lock); - } /* @@ -1672,9 +1716,9 @@ tcp_notify(struct inpcb *inp, int error) { struct tcpcb *tp; - if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) + if (inp == NULL || (inp->inp_state == INPCB_STATE_DEAD)) { return; /* pcb is gone already */ - + } tp = (struct tcpcb *)inp->inp_ppcb; VERIFY(tp != NULL); @@ -1693,10 +1737,11 @@ tcp_notify(struct inpcb *inp, int error) inp->inp_route.ro_rt = (struct rtentry *)NULL; } } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && - tp->t_softerror) + tp->t_softerror) { tcp_drop(tp, error); - else + } else { tp->t_softerror = error; + } #if 0 wakeup((caddr_t) &so->so_timeo); sorwakeup(so); @@ -1709,13 +1754,14 @@ tcp_bwmeas_alloc(struct tcpcb *tp) { struct bwmeas *elm; elm = zalloc(tcp_bwmeas_zone); - if (elm == NULL) - return (elm); + if (elm == NULL) { + return elm; + } bzero(elm, bwmeas_elm_size); elm->bw_minsizepkts = TCP_BWMEAS_BURST_MINSIZE; elm->bw_minsize = elm->bw_minsizepkts * tp->t_maxseg; - return (elm); + return elm; } void @@ -1735,21 +1781,25 @@ get_tcp_inp_list(struct inpcb **inp_list, int n, inp_gen_t gencnt) LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { if (inp->inp_gencnt <= gencnt && - inp->inp_state != INPCB_STATE_DEAD) + inp->inp_state != INPCB_STATE_DEAD) { inp_list[i++] = inp; - if (i >= n) + } + if (i >= n) { break; + } } TAILQ_FOREACH(tp, &tcp_tw_tailq, t_twentry) { inp = tp->t_inpcb; if (inp->inp_gencnt <= gencnt && - inp->inp_state != INPCB_STATE_DEAD) + inp->inp_state != INPCB_STATE_DEAD) { inp_list[i++] = inp; - if (i >= n) + } + if (i >= n) { break; + } } - return (i); + return i; } /* @@ -1833,14 +1883,14 @@ tcp_pcblist SYSCTL_HANDLER_ARGS if (req->oldptr == USER_ADDR_NULL) { n = tcbinfo.ipi_count; req->oldidx = 2 * (sizeof(xig)) - + (n + n/8) * sizeof(struct xtcpcb); + + (n + n / 8) * sizeof(struct xtcpcb); lck_rw_done(tcbinfo.ipi_lock); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { lck_rw_done(tcbinfo.ipi_lock); - return (EPERM); + return EPERM; } /* @@ -1857,20 +1907,20 @@ tcp_pcblist SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, &xig, sizeof(xig)); if (error) { lck_rw_done(tcbinfo.ipi_lock); - return (error); + return error; } /* * We are done if there is no pcb */ if (n == 0) { lck_rw_done(tcbinfo.ipi_lock); - return (0); + return 0; } - inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK); + inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK); if (inp_list == 0) { lck_rw_done(tcbinfo.ipi_lock); - return (ENOMEM); + return ENOMEM; } n = get_tcp_inp_list(inp_list, n, gencnt); @@ -1883,8 +1933,9 @@ tcp_pcblist SYSCTL_HANDLER_ARGS inp = inp_list[i]; - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } socket_lock(inp->inp_socket, 1); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { socket_unlock(inp->inp_socket, 1); @@ -1906,8 +1957,9 @@ tcp_pcblist SYSCTL_HANDLER_ARGS } else { bzero((char *) &xt.xt_tp, sizeof(xt.xt_tp)); } - if (inp->inp_socket) + if (inp->inp_socket) { sotoxsocket(inp->inp_socket, &xt.xt_socket); + } socket_unlock(inp->inp_socket, 1); @@ -1930,12 +1982,12 @@ tcp_pcblist SYSCTL_HANDLER_ARGS } FREE(inp_list, M_TEMP); lck_rw_done(tcbinfo.ipi_lock); - return (error); + return error; } SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); #if !CONFIG_EMBEDDED @@ -2015,14 +2067,14 @@ tcp_pcblist64 SYSCTL_HANDLER_ARGS if (req->oldptr == USER_ADDR_NULL) { n = tcbinfo.ipi_count; req->oldidx = 2 * (sizeof(xig)) - + (n + n/8) * sizeof(struct xtcpcb64); + + (n + n / 8) * sizeof(struct xtcpcb64); lck_rw_done(tcbinfo.ipi_lock); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { lck_rw_done(tcbinfo.ipi_lock); - return (EPERM); + return EPERM; } /* @@ -2039,20 +2091,20 @@ tcp_pcblist64 SYSCTL_HANDLER_ARGS error = SYSCTL_OUT(req, &xig, sizeof(xig)); if (error) { lck_rw_done(tcbinfo.ipi_lock); - return (error); + return error; } /* * We are done if there is no pcb */ if (n == 0) { lck_rw_done(tcbinfo.ipi_lock); - return (0); + return 0; } - inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK); + inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK); if (inp_list == 0) { lck_rw_done(tcbinfo.ipi_lock); - return (ENOMEM); + return ENOMEM; } n = get_tcp_inp_list(inp_list, n, gencnt); @@ -2064,8 +2116,9 @@ tcp_pcblist64 SYSCTL_HANDLER_ARGS inp = inp_list[i]; - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } socket_lock(inp->inp_socket, 1); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { socket_unlock(inp->inp_socket, 1); @@ -2081,40 +2134,42 @@ tcp_pcblist64 SYSCTL_HANDLER_ARGS inpcb_to_xinpcb64(inp, &xt.xt_inpcb); xt.xt_inpcb.inp_ppcb = (uint64_t)VM_KERNEL_ADDRPERM(inp->inp_ppcb); - if (inp->inp_ppcb != NULL) + if (inp->inp_ppcb != NULL) { tcpcb_to_xtcpcb64((struct tcpcb *)inp->inp_ppcb, &xt); - if (inp->inp_socket) + } + if (inp->inp_socket) { sotoxsocket64(inp->inp_socket, &xt.xt_inpcb.xi_socket); + } socket_unlock(inp->inp_socket, 1); error = SYSCTL_OUT(req, &xt, sizeof(xt)); } if (!error) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xig, sizeof(xig)); - xig.xig_len = sizeof(xig); - xig.xig_gen = tcbinfo.ipi_gencnt; - xig.xig_sogen = so_gencnt; - xig.xig_count = tcbinfo.ipi_count; - error = SYSCTL_OUT(req, &xig, sizeof(xig)); + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); + xig.xig_gen = tcbinfo.ipi_gencnt; + xig.xig_sogen = so_gencnt; + xig.xig_count = tcbinfo.ipi_count; + error = SYSCTL_OUT(req, &xig, sizeof(xig)); } FREE(inp_list, M_TEMP); lck_rw_done(tcbinfo.ipi_lock); - return (error); + return error; } SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist64, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + tcp_pcblist64, "S,xtcpcb64", "List of active TCP connections"); #endif /* !CONFIG_EMBEDDED */ @@ -2126,25 +2181,25 @@ tcp_pcblist_n SYSCTL_HANDLER_ARGS error = get_pcblist_n(IPPROTO_TCP, req, &tcbinfo); - return (error); + return error; } SYSCTL_PROC(_net_inet_tcp, OID_AUTO, pcblist_n, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + tcp_pcblist_n, "S,xtcpcb_n", "List of active TCP connections"); static int tcp_progress_indicators SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - return (ntstat_tcp_progress_indicators(req)); + return ntstat_tcp_progress_indicators(req); } SYSCTL_PROC(_net_inet_tcp, OID_AUTO, progress, - CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0, - tcp_progress_indicators, "S", "Various items that indicate the current state of progress on the link"); + CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, 0, + tcp_progress_indicators, "S", "Various items that indicate the current state of progress on the link"); __private_extern__ void @@ -2158,13 +2213,13 @@ tcp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, __private_extern__ uint32_t tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags) { - return (inpcb_count_opportunistic(ifindex, &tcbinfo, flags)); + return inpcb_count_opportunistic(ifindex, &tcbinfo, flags); } __private_extern__ uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa) { - return (inpcb_find_anypcb_byaddr(ifa, &tcbinfo)); + return inpcb_find_anypcb_byaddr(ifa, &tcbinfo); } static void @@ -2174,9 +2229,10 @@ tcp_handle_msgsize(struct ip *ip, struct inpcb *inp) u_short ifscope = IFSCOPE_NONE; int mtu; struct sockaddr_in icmpsrc = { - sizeof (struct sockaddr_in), - AF_INET, 0, { 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0 } }; + sizeof(struct sockaddr_in), + AF_INET, 0, { 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0 } + }; struct icmp *icp = NULL; icp = (struct icmp *)(void *) @@ -2198,8 +2254,9 @@ tcp_handle_msgsize(struct ip *ip, struct inpcb *inp) * lock bit, indicating that we are no longer doing MTU * discovery. */ - if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) + if (ROUTE_UNUSABLE(&(inp->inp_route)) == false) { rt = inp->inp_route.ro_rt; + } /* * icmp6_mtudisc_update scopes the routing lookup @@ -2212,12 +2269,13 @@ tcp_handle_msgsize(struct ip *ip, struct inpcb *inp) * Take the interface scope from cached route or * the last outgoing interface from inp */ - if (rt != NULL) + if (rt != NULL) { ifscope = (rt->rt_ifp != NULL) ? rt->rt_ifp->if_index : IFSCOPE_NONE; - else + } else { ifscope = (inp->inp_last_outifp != NULL) ? inp->inp_last_outifp->if_index : IFSCOPE_NONE; + } if ((rt == NULL) || !(rt->rt_flags & RTF_HOST) || @@ -2242,17 +2300,18 @@ tcp_handle_msgsize(struct ip *ip, struct inpcb *inp) * < route's MTU. We may want to adopt * that change. */ - if (mtu == 0) + if (mtu == 0) { mtu = ip_next_mtu(rt->rt_rmx. rmx_mtu, 1); + } #if DEBUG_MTUDISC printf("MTU for %s reduced to %d\n", inet_ntop(AF_INET, &icmpsrc.sin_addr, ipv4str, - sizeof (ipv4str)), mtu); + sizeof(ipv4str)), mtu); #endif if (mtu < max(296, (tcp_minmss + - sizeof (struct tcpiphdr)))) { + sizeof(struct tcpiphdr)))) { rt->rt_rmx.rmx_locks |= RTV_MTU; } else if (rt->rt_rmx.rmx_mtu > mtu) { rt->rt_rmx.rmx_mtu = mtu; @@ -2276,31 +2335,36 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp void (*notify)(struct inpcb *, int) = tcp_notify; faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr; - if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) + if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) { return; + } - if ((unsigned)cmd >= PRC_NCMDS) + if ((unsigned)cmd >= PRC_NCMDS) { return; + } /* Source quench is deprecated */ - if (cmd == PRC_QUENCH) - return; + if (cmd == PRC_QUENCH) { + return; + } - if (cmd == PRC_MSGSIZE) + if (cmd == PRC_MSGSIZE) { notify = tcp_mtudisc; - else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || - cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || - cmd == PRC_TIMXCEED_INTRANS) && ip) + } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || + cmd == PRC_UNREACH_PORT || cmd == PRC_UNREACH_PROTOCOL || + cmd == PRC_TIMXCEED_INTRANS) && ip) { notify = tcp_drop_syn_sent; + } /* * Hostdead is ugly because it goes linearly through all PCBs. * XXX: We never get this from ICMP, otherwise it makes an * excellent DoS attack on machines with many connections. */ - else if (cmd == PRC_HOSTDEAD) + else if (cmd == PRC_HOSTDEAD) { ip = NULL; - else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) + } else if (inetctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) { return; + } if (ip == NULL) { @@ -2335,8 +2399,9 @@ tcp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet *ifp tp = intotcpcb(inp); if (SEQ_GEQ(icmp_tcp_seq, tp->snd_una) && SEQ_LT(icmp_tcp_seq, tp->snd_max)) { - if (cmd == PRC_MSGSIZE) + if (cmd == PRC_MSGSIZE) { tcp_handle_msgsize(ip, inp); + } (*notify)(inp, inetctlerrmap[cmd]); } @@ -2350,7 +2415,6 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) { tcp_seq icmp_tcp_seq; struct in6_addr *dst; - struct tcphdr *th; void (*notify)(struct inpcb *, int) = tcp_notify; struct ip6_hdr *ip6; struct mbuf *m; @@ -2362,16 +2426,24 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) unsigned int mtu; unsigned int off; + struct tcp_ports { + uint16_t th_sport; + uint16_t th_dport; + } t_ports; + if (sa->sa_family != AF_INET6 || - sa->sa_len != sizeof(struct sockaddr_in6)) + sa->sa_len != sizeof(struct sockaddr_in6)) { return; + } /* Source quench is deprecated */ - if (cmd == PRC_QUENCH) + if (cmd == PRC_QUENCH) { return; + } - if ((unsigned)cmd >= PRC_NCMDS) + if ((unsigned)cmd >= PRC_NCMDS) { return; + } /* if the parameter is from icmp6, decode it. */ if (d != NULL) { @@ -2385,26 +2457,28 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) } else { m = NULL; ip6 = NULL; - off = 0; /* fool gcc */ + off = 0; /* fool gcc */ sa6_src = &sa6_any; dst = NULL; } - if (cmd == PRC_MSGSIZE) + if (cmd == PRC_MSGSIZE) { notify = tcp_mtudisc; - else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || + } else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && - ip6 != NULL) + ip6 != NULL) { notify = tcp_drop_syn_sent; + } /* * Hostdead is ugly because it goes linearly through all PCBs. * XXX: We never get this from ICMP, otherwise it makes an * excellent DoS attack on machines with many connections. */ - else if (cmd == PRC_HOSTDEAD) + else if (cmd == PRC_HOSTDEAD) { ip6 = NULL; - else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) + } else if (inet6ctlerrmap[cmd] == 0 && !PRC_IS_REDIRECT(cmd)) { return; + } if (ip6 == NULL) { @@ -2413,12 +2487,21 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) return; } + /* Check if we can safely get the ports from the tcp hdr */ if (m == NULL || - (m->m_pkthdr.len < (int32_t) (off + offsetof(struct tcphdr, th_ack)))) + (m->m_pkthdr.len < + (int32_t) (off + sizeof(struct tcp_ports)))) { return; + } + bzero(&t_ports, sizeof(struct tcp_ports)); + m_copydata(m, off, sizeof(struct tcp_ports), (caddr_t)&t_ports); - th = (struct tcphdr *)(void *)mtodo(m, off); - icmp_tcp_seq = ntohl(th->th_seq); + off += sizeof(struct tcp_ports); + if (m->m_pkthdr.len < (int32_t) (off + sizeof(tcp_seq))) { + return; + } + m_copydata(m, off, sizeof(tcp_seq), (caddr_t)&icmp_tcp_seq); + icmp_tcp_seq = ntohl(icmp_tcp_seq); if (cmd == PRC_MSGSIZE) { mtu = ntohl(icmp6->icmp6_mtu); @@ -2426,12 +2509,13 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) * If no alternative MTU was proposed, or the proposed * MTU was too small, set to the min. */ - if (mtu < IPV6_MMTU) + if (mtu < IPV6_MMTU) { mtu = IPV6_MMTU - 8; + } } - inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, th->th_dport, - &ip6->ip6_src, th->th_sport, 0, NULL); + inp = in6_pcblookup_hash(&tcbinfo, &ip6->ip6_dst, t_ports.th_dport, + &ip6->ip6_src, t_ports.th_sport, 0, NULL); if (inp == NULL || inp->inp_socket == NULL) { @@ -2458,10 +2542,12 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) * is smaller than the current one. */ if (mtu < tp->t_maxseg + - (sizeof (*th) + sizeof (*ip6))) + (sizeof(struct tcphdr) + sizeof(struct ip6_hdr))) { (*notify)(inp, inetctlerrmap[cmd]); - } else + } + } else { (*notify)(inp, inetctlerrmap[cmd]); + } } } socket_unlock(inp->inp_socket, 1); @@ -2512,7 +2598,7 @@ tcp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) * */ -#define ISN_BYTES_PER_SECOND 1048576 +#define ISN_BYTES_PER_SECOND 1048576 tcp_seq tcp_new_isn(struct tcpcb *tp) @@ -2528,17 +2614,17 @@ tcp_new_isn(struct tcpcb *tp) if (((tp->t_state == TCPS_LISTEN) || (tp->t_state == TCPS_TIME_WAIT)) && tcp_strict_rfc1948 == 0) #ifdef __APPLE__ - return (RandomULong()); + { return RandomULong(); } #else - return (arc4random()); + { return arc4random(); } #endif getmicrotime(&timenow); /* Seed if this is the first use, reseed if requested. */ if ((isn_last_reseed == 0) || ((tcp_strict_rfc1948 == 0) && (tcp_isn_reseed_interval > 0) && - (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) - < (u_int)timenow.tv_sec))) { + (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval * hz) + < (u_int)timenow.tv_sec))) { #ifdef __APPLE__ read_frandom(&isn_secret, sizeof(isn_secret)); #else @@ -2571,7 +2657,7 @@ tcp_new_isn(struct tcpcb *tp) MD5Final((u_char *) &md5_buffer, &isn_ctx); new_isn = (tcp_seq) md5_buffer[0]; new_isn += timenow.tv_sec * (ISN_BYTES_PER_SECOND / hz); - return (new_isn); + return new_isn; } @@ -2585,8 +2671,9 @@ tcp_drop_syn_sent(struct inpcb *inp, int errno) { struct tcpcb *tp = intotcpcb(inp); - if (tp && tp->t_state == TCPS_SYN_SENT) + if (tp && tp->t_state == TCPS_SYN_SENT) { tcp_drop(tp, errno); + } } /* @@ -2599,7 +2686,7 @@ void tcp_mtudisc( struct inpcb *inp, __unused int errno -) + ) { struct tcpcb *tp = intotcpcb(inp); struct rtentry *rt; @@ -2608,32 +2695,34 @@ tcp_mtudisc( int offered; int mss; u_int32_t mtu; - u_int32_t protoHdrOverhead = sizeof (struct tcpiphdr); + u_int32_t protoHdrOverhead = sizeof(struct tcpiphdr); #if INET6 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; - if (isipv6) + if (isipv6) { protoHdrOverhead = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); + } #endif /* INET6 */ if (tp) { #if INET6 - if (isipv6) + if (isipv6) { rt = tcp_rtlookup6(inp, IFSCOPE_NONE); - else + } else #endif /* INET6 */ - rt = tcp_rtlookup(inp, IFSCOPE_NONE); + rt = tcp_rtlookup(inp, IFSCOPE_NONE); if (!rt || !rt->rt_rmx.rmx_mtu) { tp->t_maxopd = tp->t_maxseg = #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt; + tcp_mssdflt; /* Route locked during lookup above */ - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } return; } taop = rmx_taop(rt->rt_rmx); @@ -2649,8 +2738,9 @@ tcp_mtudisc( #endif /* NECP */ mss = mtu - protoHdrOverhead; - if (offered) + if (offered) { mss = min(mss, offered); + } /* * XXX - The above conditional probably violates the TCP * spec. The problem is that, since we don't know the @@ -2668,19 +2758,22 @@ tcp_mtudisc( * will get recorded and the new parameters should get * recomputed. For Further Study. */ - if (tp->t_maxopd <= mss) + if (tp->t_maxopd <= mss) { return; + } tp->t_maxopd = mss; - if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && - (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) + if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP && + (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) { mss -= TCPOLEN_TSTAMP_APPA; + } #if MPTCP mss -= mptcp_adj_mss(tp, TRUE); #endif - if (so->so_snd.sb_hiwat < mss) + if (so->so_snd.sb_hiwat < mss) { mss = so->so_snd.sb_hiwat; + } tp->t_maxseg = mss; @@ -2690,8 +2783,9 @@ tcp_mtudisc( * Reset the slow-start flight size as it may depends on the * new MSS */ - if (CC_ALGO(tp)->cwnd_init != NULL) + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } tcpstat.tcps_mturesent++; tp->t_rtttime = 0; tp->snd_nxt = tp->snd_una; @@ -2716,8 +2810,9 @@ tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); ro = &inp->inp_route; - if ((rt = ro->ro_rt) != NULL) + if ((rt = ro->ro_rt) != NULL) { RT_LOCK(rt); + } if (ROUTE_UNUSABLE(ro)) { if (rt != NULL) { @@ -2732,7 +2827,7 @@ tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope) ro->ro_dst.sa_family = AF_INET; ro->ro_dst.sa_len = sizeof(struct sockaddr_in); ((struct sockaddr_in *)(void *)&ro->ro_dst)->sin_addr = - inp->inp_faddr; + inp->inp_faddr; /* * If the socket was bound to an interface, then @@ -2745,12 +2840,14 @@ tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope) inp->inp_boundifp->if_index : input_ifscope; rtalloc_scoped(ro, ifscope); - if ((rt = ro->ro_rt) != NULL) + if ((rt = ro->ro_rt) != NULL) { RT_LOCK(rt); + } } } - if (rt != NULL) + if (rt != NULL) { RT_LOCK_ASSERT_HELD(rt); + } /* * Update MTU discovery determination. Don't do it if: @@ -2763,10 +2860,11 @@ tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope) tp = intotcpcb(inp); if (!path_mtu_discovery || ((rt != NULL) && - (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) + (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) { tp->t_flags &= ~TF_PMTUD; - else + } else { tp->t_flags |= TF_PMTUD; + } if (rt != NULL && rt->rt_ifp != NULL) { somultipages(inp->inp_socket, @@ -2777,22 +2875,21 @@ tcp_rtlookup(struct inpcb *inp, unsigned int input_ifscope) tcp_set_ecn(tp, rt->rt_ifp); if (inp->inp_last_outifp == NULL) { inp->inp_last_outifp = rt->rt_ifp; - } } /* Note if the peer is local */ if (rt != NULL && !(rt->rt_ifp->if_flags & IFF_POINTOPOINT) && - (rt->rt_gateway->sa_family == AF_LINK || - rt->rt_ifp->if_flags & IFF_LOOPBACK || - in_localaddr(inp->inp_faddr))) { + (rt->rt_gateway->sa_family == AF_LINK || + rt->rt_ifp->if_flags & IFF_LOOPBACK || + in_localaddr(inp->inp_faddr))) { tp->t_flags |= TF_LOCAL; } /* * Caller needs to call RT_UNLOCK(rt). */ - return (rt); + return rt; } #if INET6 @@ -2806,8 +2903,9 @@ tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); ro6 = &inp->in6p_route; - if ((rt = ro6->ro_rt) != NULL) + if ((rt = ro6->ro_rt) != NULL) { RT_LOCK(rt); + } if (ROUTE_UNUSABLE(ro6)) { if (rt != NULL) { @@ -2836,12 +2934,14 @@ tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) inp->inp_boundifp->if_index : input_ifscope; rtalloc_scoped((struct route *)ro6, ifscope); - if ((rt = ro6->ro_rt) != NULL) + if ((rt = ro6->ro_rt) != NULL) { RT_LOCK(rt); + } } } - if (rt != NULL) + if (rt != NULL) { RT_LOCK_ASSERT_HELD(rt); + } /* * Update path MTU Discovery determination @@ -2863,10 +2963,11 @@ tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) */ if (!path_mtu_discovery || ((rt != NULL) && - (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) + (!(rt->rt_flags & RTF_UP) || (rt->rt_rmx.rmx_locks & RTV_MTU)))) { tp->t_flags &= ~TF_PMTUD; - else + } else { tp->t_flags |= TF_PMTUD; + } if (rt != NULL && rt->rt_ifp != NULL) { somultipages(inp->inp_socket, @@ -2881,10 +2982,10 @@ tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) /* Note if the peer is local */ if (!(rt->rt_ifp->if_flags & IFF_POINTOPOINT) && - (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) || - IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) || - rt->rt_gateway->sa_family == AF_LINK || - in6_localaddr(&inp->in6p_faddr))) { + (IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr) || + IN6_IS_ADDR_LINKLOCAL(&inp->in6p_faddr) || + rt->rt_gateway->sa_family == AF_LINK || + in6_localaddr(&inp->in6p_faddr))) { tp->t_flags |= TF_LOCAL; } } @@ -2892,7 +2993,7 @@ tcp_rtlookup6(struct inpcb *inp, unsigned int input_ifscope) /* * Caller needs to call RT_UNLOCK(rt). */ - return (rt); + return rt; } #endif /* INET6 */ @@ -2910,18 +3011,20 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp) #endif /* INET6 */ struct tcphdr *th; - if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) - return (0); - MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */ - if (!m) - return (0); + if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) { + return 0; + } + MGETHDR(m, M_DONTWAIT, MT_DATA); /* MAC-OK */ + if (!m) { + return 0; + } #if INET6 if ((inp->inp_vflag & INP_IPV6) != 0) { ip6 = mtod(m, struct ip6_hdr *); th = (struct tcphdr *)(void *)(ip6 + 1); m->m_pkthdr.len = m->m_len = - sizeof(struct ip6_hdr) + sizeof(struct tcphdr); + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); tcp_fillheaders(tp, ip6, th); hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); } else @@ -2934,7 +3037,7 @@ ipsec_hdrsiz_tcp(struct tcpcb *tp) hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); } m_free(m); - return (hdrsiz); + return hdrsiz; } #endif /* IPSEC */ @@ -2950,25 +3053,26 @@ tcp_gettaocache(struct inpcb *inp) struct rmxp_tao *taop; #if INET6 - if ((inp->inp_vflag & INP_IPV6) != 0) + if ((inp->inp_vflag & INP_IPV6) != 0) { rt = tcp_rtlookup6(inp, IFSCOPE_NONE); - else + } else #endif /* INET6 */ rt = tcp_rtlookup(inp, IFSCOPE_NONE); /* Make sure this is a host route and is up. */ if (rt == NULL || - (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) { + (rt->rt_flags & (RTF_UP | RTF_HOST)) != (RTF_UP | RTF_HOST)) { /* Route locked during lookup above */ - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); - return (NULL); + } + return NULL; } taop = rmx_taop(rt->rt_rmx); /* Route locked during lookup above */ RT_UNLOCK(rt); - return (taop); + return taop; } /* @@ -2989,10 +3093,11 @@ tcp_lock(struct socket *so, int refcount, void *lr) { void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } retry: if (so->so_pcb != NULL) { @@ -3024,7 +3129,7 @@ retry: goto retry; } } - } else { + } else { panic("tcp_lock: so=%p NO PCB! lr=%p lrh= %s\n", so, lr_saved, solockhistory_nr(so)); /* NOTREACHED */ @@ -3036,11 +3141,12 @@ retry: solockhistory_nr(so)); /* NOTREACHED */ } - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; - return (0); + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; + return 0; } int @@ -3048,10 +3154,11 @@ tcp_unlock(struct socket *so, int refcount, void *lr) { void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } #ifdef MORE_TCPLOCK_DEBUG printf("tcp_unlock: so=0x%llx sopcb=0x%llx lock=0x%llx ref=%x " @@ -3060,8 +3167,9 @@ tcp_unlock(struct socket *so, int refcount, void *lr) (uint64_t)VM_KERNEL_ADDRPERM(&(sotoinpcb(so)->inpcb_mtx)), so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved)); #endif - if (refcount) + if (refcount) { so->so_usecount--; + } if (so->so_usecount < 0) { panic("tcp_unlock: so=%p usecount=%x lrh= %s\n", @@ -3074,7 +3182,7 @@ tcp_unlock(struct socket *so, int refcount, void *lr) /* NOTREACHED */ } else { so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; if (so->so_flags & SOF_MP_SUBFLOW) { struct mptcb *mp_tp = tptomptp(sototcpcb(so)); @@ -3089,7 +3197,7 @@ tcp_unlock(struct socket *so, int refcount, void *lr) lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx); } } - return (0); + return 0; } lck_mtx_t * @@ -3097,22 +3205,23 @@ tcp_getlock(struct socket *so, int flags) { struct inpcb *inp = sotoinpcb(so); - if (so->so_pcb) { - if (so->so_usecount < 0) + if (so->so_pcb) { + if (so->so_usecount < 0) { panic("tcp_getlock: so=%p usecount=%x lrh= %s\n", so, so->so_usecount, solockhistory_nr(so)); + } if (so->so_flags & SOF_MP_SUBFLOW) { struct mptcb *mp_tp = tptomptp(sototcpcb(so)); - return (mpte_getlock(mp_tp->mpt_mpte, flags)); + return mpte_getlock(mp_tp->mpt_mpte, flags); } else { - return (&inp->inpcb_mtx); + return &inp->inpcb_mtx; } } else { panic("tcp_getlock: so=%p NULL so_pcb %s\n", so, solockhistory_nr(so)); - return (so->so_proto->pr_domain->dom_mtx); + return so->so_proto->pr_domain->dom_mtx; } } @@ -3130,23 +3239,25 @@ tcp_sbrcv_grow_rwin(struct tcpcb *tp, struct sockbuf *sb) u_int32_t rcvbuf = sb->sb_hiwat; struct socket *so = tp->t_inpcb->inp_socket; - if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) + if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) { return; + } /* * If message delivery is enabled, do not count * unordered bytes in receive buffer towards hiwat */ - if (so->so_flags & SOF_ENABLE_MSGS) + if (so->so_flags & SOF_ENABLE_MSGS) { rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes; + } if (tcp_do_autorcvbuf == 1 && - tcp_cansbgrow(sb) && - (tp->t_flags & TF_SLOWLINK) == 0 && - (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 && - (rcvbuf - sb->sb_cc) < rcvbufinc && - rcvbuf < tcp_autorcvbuf_max && - (sb->sb_idealsize > 0 && - sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) { + tcp_cansbgrow(sb) && + (tp->t_flags & TF_SLOWLINK) == 0 && + (so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) == 0 && + (rcvbuf - sb->sb_cc) < rcvbufinc && + rcvbuf < tcp_autorcvbuf_max && + (sb->sb_idealsize > 0 && + sb->sb_hiwat <= (sb->sb_idealsize + rcvbufinc))) { sbreserve(sb, min((sb->sb_hiwat + rcvbufinc), tcp_autorcvbuf_max)); } @@ -3173,22 +3284,25 @@ tcp_sbspace(struct tcpcb *tp) * not reflect the extra unordered bytes added to the * receive socket buffer. */ - if (so->so_flags & SOF_ENABLE_MSGS) + if (so->so_flags & SOF_ENABLE_MSGS) { rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes; + } space = ((int32_t) imin((rcvbuf - sb->sb_cc), - (sb->sb_mbmax - sb->sb_mbcnt))); - if (space < 0) + (sb->sb_mbmax - sb->sb_mbcnt))); + if (space < 0) { space = 0; + } #if CONTENT_FILTER /* Compensate for data being processed by content filters */ pending = cfil_sock_data_space(sb); #endif /* CONTENT_FILTER */ - if (pending > space) + if (pending > space) { space = 0; - else + } else { space -= pending; + } /* * Avoid increasing window size if the current window @@ -3196,15 +3310,17 @@ tcp_sbspace(struct tcpcb *tp) * we could break some apps (see rdar://5409343) */ - if (space < tp->t_maxseg) - return (space); + if (space < tp->t_maxseg) { + return space; + } /* Clip window size for slower link */ - if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) - return (imin(space, slowlink_wsize)); + if (((tp->t_flags & TF_SLOWLINK) != 0) && slowlink_wsize > 0) { + return imin(space, slowlink_wsize); + } - return (space); + return space; } /* * Checks TCP Segment Offloading capability for a given connection @@ -3233,29 +3349,32 @@ tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp) if (isipv6) { if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV6)) { tp->t_flags |= TF_TSO; - if (ifp->if_tso_v6_mtu != 0) + if (ifp->if_tso_v6_mtu != 0) { tp->tso_max_segment_size = ifp->if_tso_v6_mtu; - else + } else { tp->tso_max_segment_size = TCP_MAXWIN; - } else - tp->t_flags &= ~TF_TSO; - + } + } else { + tp->t_flags &= ~TF_TSO; + } } else #endif /* INET6 */ { if (ifp && (ifp->if_hwassist & IFNET_TSO_IPV4)) { tp->t_flags |= TF_TSO; - if (ifp->if_tso_v4_mtu != 0) + if (ifp->if_tso_v4_mtu != 0) { tp->tso_max_segment_size = ifp->if_tso_v4_mtu; - else + } else { tp->tso_max_segment_size = TCP_MAXWIN; - } else - tp->t_flags &= ~TF_TSO; + } + } else { + tp->t_flags &= ~TF_TSO; + } } } -#define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + \ +#define TIMEVAL_TO_TCPHZ(_tv_) ((_tv_).tv_sec * TCP_RETRANSHZ + \ (_tv_).tv_usec / TCP_RETRANSHZ_TO_USEC) /* @@ -3330,20 +3449,21 @@ tcp_set_max_rwinscale(struct tcpcb *tp, struct socket *so, struct ifnet *ifp) * When we start a connection and don't know about the interface, set * the scaling factor simply to the max - we can always announce less. */ - if (!ifp || (IFNET_IS_CELLULAR(ifp) && (ifp->if_eflags & IFEF_3CA))) + if (!ifp || (IFNET_IS_CELLULAR(ifp) && (ifp->if_eflags & IFEF_3CA))) { rcvbuf_max = (tcp_autorcvbuf_max << 1); - else + } else { rcvbuf_max = tcp_autorcvbuf_max; + } tp->request_r_scale = max(tcp_win_scale, tp->request_r_scale); maxsockbufsize = ((so->so_rcv.sb_flags & SB_USRSIZE) != 0) ? - so->so_rcv.sb_hiwat : rcvbuf_max; + so->so_rcv.sb_hiwat : rcvbuf_max; while (tp->request_r_scale < TCP_MAX_WINSHIFT && - (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize) + (TCP_MAXWIN << tp->request_r_scale) < maxsockbufsize) { tp->request_r_scale++; + } tp->request_r_scale = min(tp->request_r_scale, TCP_MAX_WINSHIFT); - } int @@ -3358,11 +3478,11 @@ tcp_notsent_lowat_check(struct socket *so) } if (tp == NULL) { - return (0); + return 0; } notsent = so->so_snd.sb_cc - - (tp->snd_nxt - tp->snd_una); + (tp->snd_nxt - tp->snd_una); /* * When we send a FIN or SYN, not_sent can be negative. @@ -3371,7 +3491,7 @@ tcp_notsent_lowat_check(struct socket *so) * get an error from send because cantsendmore will be set. */ if (notsent <= tp->t_notsent_lowat) { - return (1); + return 1; } /* @@ -3380,10 +3500,10 @@ tcp_notsent_lowat_check(struct socket *so) * maxseg of data to write. */ if ((tp->t_flags & TF_NODELAY) == 0 && - notsent > 0 && notsent < tp->t_maxseg) { - return (1); + notsent > 0 && notsent < tp->t_maxseg) { + return 1; } - return (0); + return 0; } void @@ -3392,16 +3512,18 @@ tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end) struct tcp_rxt_seg *rxseg = NULL, *prev = NULL, *next = NULL; u_int32_t rxcount = 0; - if (SLIST_EMPTY(&tp->t_rxt_segments)) + if (SLIST_EMPTY(&tp->t_rxt_segments)) { tp->t_dsack_lastuna = tp->snd_una; + } /* * First check if there is a segment already existing for this * sequence space. */ SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { - if (SEQ_GT(rxseg->rx_start, start)) + if (SEQ_GT(rxseg->rx_start, start)) { break; + } prev = rxseg; } next = rxseg; @@ -3439,8 +3561,9 @@ tcp_rxtseg_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end) rxcount = next->rx_count; } } - if (!SEQ_LT(start, end)) + if (!SEQ_LT(start, end)) { return; + } rxseg = (struct tcp_rxt_seg *) zalloc(tcp_rxt_seg_zone); if (rxseg == NULL) { @@ -3462,17 +3585,20 @@ struct tcp_rxt_seg * tcp_rxtseg_find(struct tcpcb *tp, tcp_seq start, tcp_seq end) { struct tcp_rxt_seg *rxseg; - if (SLIST_EMPTY(&tp->t_rxt_segments)) - return (NULL); + if (SLIST_EMPTY(&tp->t_rxt_segments)) { + return NULL; + } SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { if (SEQ_LEQ(rxseg->rx_start, start) && - SEQ_GEQ(rxseg->rx_end, end)) - return (rxseg); - if (SEQ_GT(rxseg->rx_start, start)) + SEQ_GEQ(rxseg->rx_end, end)) { + return rxseg; + } + if (SEQ_GT(rxseg->rx_start, start)) { break; + } } - return (NULL); + return NULL; } void @@ -3494,16 +3620,18 @@ tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack) boolean_t bad_rexmt; struct tcp_rxt_seg *rxseg; - if (SLIST_EMPTY(&tp->t_rxt_segments)) - return (FALSE); + if (SLIST_EMPTY(&tp->t_rxt_segments)) { + return FALSE; + } /* * If all of the segments in this window are not cumulatively * acknowledged, then there can still be undetected packet loss. * Do not restore congestion window in that case. */ - if (SEQ_LT(th_ack, tp->snd_recover)) - return (FALSE); + if (SEQ_LT(th_ack, tp->snd_recover)) { + return FALSE; + } bad_rexmt = TRUE; SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { @@ -3513,7 +3641,7 @@ tcp_rxtseg_detect_bad_rexmt(struct tcpcb *tp, tcp_seq th_ack) break; } } - return (bad_rexmt); + return bad_rexmt; } boolean_t @@ -3521,8 +3649,9 @@ tcp_rxtseg_dsack_for_tlp(struct tcpcb *tp) { boolean_t dsack_for_tlp = FALSE; struct tcp_rxt_seg *rxseg; - if (SLIST_EMPTY(&tp->t_rxt_segments)) - return (FALSE); + if (SLIST_EMPTY(&tp->t_rxt_segments)) { + return FALSE; + } SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { if (rxseg->rx_count == 1 && @@ -3532,7 +3661,7 @@ tcp_rxtseg_dsack_for_tlp(struct tcpcb *tp) break; } } - return (dsack_for_tlp); + return dsack_for_tlp; } u_int32_t @@ -3544,15 +3673,16 @@ tcp_rxtseg_total_size(struct tcpcb *tp) SLIST_FOREACH(rxseg, &tp->t_rxt_segments, rx_link) { total_size += (rxseg->rx_end - rxseg->rx_start) + 1; } - return (total_size); + return total_size; } void tcp_get_connectivity_status(struct tcpcb *tp, - struct tcp_conn_status *connstatus) + struct tcp_conn_status *connstatus) { - if (tp == NULL || connstatus == NULL) + if (tp == NULL || connstatus == NULL) { return; + } bzero(connstatus, sizeof(*connstatus)); if (tp->t_rxtshift >= TCP_CONNECTIVITY_PROBES_MAX) { if (TCPS_HAVEESTABLISHED(tp->t_state)) { @@ -3561,17 +3691,19 @@ tcp_get_connectivity_status(struct tcpcb *tp, connstatus->conn_probe_failed = 1; } } - if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) + if (tp->t_rtimo_probes >= TCP_CONNECTIVITY_PROBES_MAX) { connstatus->read_probe_failed = 1; + } if (tp->t_inpcb != NULL && tp->t_inpcb->inp_last_outifp != NULL && - (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) + (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_PROBE_CONNECTIVITY)) { connstatus->probe_activated = 1; + } } boolean_t tfo_enabled(const struct tcpcb *tp) { - return ((tp->t_flagsext & TF_FASTOPEN)? TRUE : FALSE); + return (tp->t_flagsext & TF_FASTOPEN)? TRUE : FALSE; } void @@ -3598,7 +3730,7 @@ tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp, MGETHDR(m, M_WAIT, MT_HEADER); if (m == NULL) { - return (NULL); + return NULL; } m->m_pkthdr.pkt_proto = IPPROTO_TCP; @@ -3642,16 +3774,19 @@ tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp, ip6->ip6_hlim = in6_selecthlim(inp, ifp); ip6->ip6_flow = ip6->ip6_flow & ~IPV6_FLOW_ECN_MASK; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = 0; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = 0; + } } th->th_flags = TH_ACK; win = tcp_sbspace(tp); - if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) - win = (int32_t)TCP_MAXWIN << tp->rcv_scale; + if (win > ((int32_t)TCP_MAXWIN << tp->rcv_scale)) { + win = (int32_t)TCP_MAXWIN << tp->rcv_scale; + } th->th_win = htons((u_short) (win >> tp->rcv_scale)); if (is_probe) { @@ -3671,7 +3806,7 @@ tcp_make_keepalive_frame(struct tcpcb *tp, struct ifnet *ifp, sizeof(struct ip6_hdr), sizeof(struct tcphdr)); } - return (m); + return m; } void @@ -3687,8 +3822,9 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, if (ifp == NULL || frames_array == NULL || frames_array_count == 0 || frame_index >= frames_array_count || - frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) + frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) { return; + } /* * This function is called outside the regular TCP processing @@ -3704,16 +3840,19 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, struct mbuf *m = NULL; struct tcpcb *tp = intotcpcb(inp); - if (frame_index >= frames_array_count) + if (frame_index >= frames_array_count) { break; + } if (inp->inp_gencnt > gencnt || - inp->inp_state == INPCB_STATE_DEAD) + inp->inp_state == INPCB_STATE_DEAD) { continue; + } if ((so = inp->inp_socket) == NULL || - (so->so_state & SS_DEFUNCT)) + (so->so_state & SS_DEFUNCT)) { continue; + } /* * check for keepalive offload flag without socket * lock to avoid a deadlock @@ -3726,8 +3865,9 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, continue; } if (inp->inp_ppcb == NULL || - in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } socket_lock(so, 1); /* Release the want count */ if (inp->inp_ppcb == NULL || @@ -3813,13 +3953,15 @@ tcp_fill_keepalive_offload_frames(ifnet_t ifp, frame->addr_length = sizeof(struct in6_addr); ip6 = (struct in6_addr *)(void *)frame->local_addr; bcopy(&inp->in6p_laddr, ip6, sizeof(struct in6_addr)); - if (IN6_IS_SCOPE_EMBED(ip6)) + if (IN6_IS_SCOPE_EMBED(ip6)) { ip6->s6_addr16[1] = 0; + } ip6 = (struct in6_addr *)(void *)frame->remote_addr; bcopy(&inp->in6p_faddr, ip6, sizeof(struct in6_addr)); - if (IN6_IS_SCOPE_EMBED(ip6)) + if (IN6_IS_SCOPE_EMBED(ip6)) { ip6->s6_addr16[1] = 0; + } } /* @@ -3859,18 +4001,21 @@ tcp_notify_ack_id_valid(struct tcpcb *tp, struct socket *so, { struct tcp_notify_ack_marker *elm; - if (so->so_snd.sb_cc == 0) - return (ENOBUFS); + if (so->so_snd.sb_cc == 0) { + return ENOBUFS; + } SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) { /* Duplicate id is not allowed */ - if (elm->notify_id == notify_id) - return (EINVAL); + if (elm->notify_id == notify_id) { + return EINVAL; + } /* Duplicate position is not allowed */ - if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) - return (EINVAL); + if (elm->notify_snd_una == tp->snd_una + so->so_snd.sb_cc) { + return EINVAL; + } } - return (0); + return 0; } errno_t @@ -3879,16 +4024,18 @@ tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id) struct tcp_notify_ack_marker *nm, *elm = NULL; struct socket *so = tp->t_inpcb->inp_socket; - MALLOC(nm, struct tcp_notify_ack_marker *, sizeof (*nm), + MALLOC(nm, struct tcp_notify_ack_marker *, sizeof(*nm), M_TEMP, M_WAIT | M_ZERO); - if (nm == NULL) - return (ENOMEM); + if (nm == NULL) { + return ENOMEM; + } nm->notify_id = notify_id; nm->notify_snd_una = tp->snd_una + so->so_snd.sb_cc; SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) { - if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) + if (SEQ_GT(nm->notify_snd_una, elm->notify_snd_una)) { break; + } } if (elm == NULL) { @@ -3898,15 +4045,16 @@ tcp_add_notify_ack_marker(struct tcpcb *tp, u_int32_t notify_id) SLIST_INSERT_AFTER(elm, nm, notify_next); } tp->t_notify_ack_count++; - return (0); + return 0; } void tcp_notify_ack_free(struct tcpcb *tp) { struct tcp_notify_ack_marker *elm, *next; - if (SLIST_EMPTY(&tp->t_notify_ack)) + if (SLIST_EMPTY(&tp->t_notify_ack)) { return; + } SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) { SLIST_REMOVE(&tp->t_notify_ack, elm, tcp_notify_ack_marker, @@ -3933,13 +4081,14 @@ tcp_get_notify_ack_count(struct tcpcb *tp, struct tcp_notify_ack_complete *retid) { struct tcp_notify_ack_marker *elm; - size_t complete = 0; + size_t complete = 0; SLIST_FOREACH(elm, &tp->t_notify_ack, notify_next) { - if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) + if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) { complete++; - else + } else { break; + } } retid->notify_pending = tp->t_notify_ack_count - complete; retid->notify_complete_count = min(TCP_MAX_NOTIFY_ACK, complete); @@ -3953,8 +4102,9 @@ tcp_get_notify_ack_ids(struct tcpcb *tp, struct tcp_notify_ack_marker *elm, *next; SLIST_FOREACH_SAFE(elm, &tp->t_notify_ack, notify_next, next) { - if (i >= retid->notify_complete_count) + if (i >= retid->notify_complete_count) { break; + } if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) { retid->notify_complete_id[i++] = elm->notify_id; SLIST_REMOVE(&tp->t_notify_ack, elm, @@ -3977,11 +4127,12 @@ tcp_notify_ack_active(struct socket *so) if (!SLIST_EMPTY(&tp->t_notify_ack)) { struct tcp_notify_ack_marker *elm; elm = SLIST_FIRST(&tp->t_notify_ack); - if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) - return (true); + if (SEQ_GEQ(tp->snd_una, elm->notify_snd_una)) { + return true; + } } } - return (false); + return false; } inline int32_t @@ -3994,30 +4145,33 @@ inp_get_sndbytes_allunsent(struct socket *so, u_int32_t th_ack) so->so_snd.sb_cc > 0) { int32_t unsent, sent; sent = tp->snd_max - th_ack; - if (tp->t_flags & TF_SENTFIN) + if (tp->t_flags & TF_SENTFIN) { sent--; + } unsent = so->so_snd.sb_cc - sent; - return (unsent); + return unsent; } - return (0); + return 0; } #define IFP_PER_FLOW_STAT(_ipv4_, _stat_) { \ if (_ipv4_) { \ - ifp->if_ipv4_stat->_stat_++; \ + ifp->if_ipv4_stat->_stat_++; \ } else { \ - ifp->if_ipv6_stat->_stat_++; \ + ifp->if_ipv6_stat->_stat_++; \ } \ } #define FLOW_ECN_ENABLED(_flags_) \ ((_flags_ & (TE_ECN_ON)) == (TE_ECN_ON)) -void tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs, +void +tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs, struct ifnet *ifp) { - if (ifp == NULL || !IF_FULLY_ATTACHED(ifp)) + if (ifp == NULL || !IF_FULLY_ATTACHED(ifp)) { return; + } ifnet_lock_shared(ifp); if (ifs->ecn_flags & TE_SETUPSENT) { @@ -4105,20 +4259,27 @@ void tcp_update_stats_per_flow(struct ifnet_stats_per_flow *ifs, IFP_PER_FLOW_STAT(ifs->ipv4, ecn_off.rxmit_drop); } } - if (ifs->ecn_fallback_synloss) + if (ifs->ecn_fallback_synloss) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_synloss); - if (ifs->ecn_fallback_droprst) + } + if (ifs->ecn_fallback_droprst) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprst); - if (ifs->ecn_fallback_droprxmt) + } + if (ifs->ecn_fallback_droprxmt) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_droprxmt); - if (ifs->ecn_fallback_ce) + } + if (ifs->ecn_fallback_ce) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_ce); - if (ifs->ecn_fallback_reorder) + } + if (ifs->ecn_fallback_reorder) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_fallback_reorder); - if (ifs->ecn_recv_ce > 0) + } + if (ifs->ecn_recv_ce > 0) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ce); - if (ifs->ecn_recv_ece > 0) + } + if (ifs->ecn_recv_ece > 0) { IFP_PER_FLOW_STAT(ifs->ipv4, ecn_recv_ece); + } tcp_flow_lim_stats(ifs, &ifp->if_lim_stat); ifnet_lock_done(ifp); diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index 189603549..fda0f86f6 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -74,7 +74,7 @@ #include #include #include -#include /* before tcp_seq.h, for tcp_random18() */ +#include /* before tcp_seq.h, for tcp_random18() */ #include #include @@ -107,13 +107,13 @@ #include /* Max number of times a stretch ack can be delayed on a connection */ -#define TCP_STRETCHACK_DELAY_THRESHOLD 5 +#define TCP_STRETCHACK_DELAY_THRESHOLD 5 /* * If the host processor has been sleeping for too long, this is the threshold * used to avoid sending stale retransmissions. */ -#define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */ +#define TCP_SLEEP_TOO_LONG (10 * 60 * 1000) /* 10 minutes in ms */ /* tcp timer list */ struct tcptimerlist tcp_timer_list; @@ -131,46 +131,48 @@ sysctl_msec_to_ticks SYSCTL_HANDLER_ARGS s = tt * 1000 / TCP_RETRANSHZ;; error = sysctl_handle_int(oidp, &s, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } tt = s * TCP_RETRANSHZ / 1000; - if (tt < 1) - return (EINVAL); + if (tt < 1) { + return EINVAL; + } *(int *)arg1 = tt; SYSCTL_SKMEM_UPDATE_AT_OFFSET(arg2, *(int*)arg1); - return (0); + return 0; } #if SYSCTL_SKMEM -int tcp_keepinit = TCPTV_KEEP_INIT; +int tcp_keepinit = TCPTV_KEEP_INIT; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepinit, offsetof(skmem_sysctl, tcp.keepinit), - sysctl_msec_to_ticks, "I", ""); + sysctl_msec_to_ticks, "I", ""); -int tcp_keepidle = TCPTV_KEEP_IDLE; +int tcp_keepidle = TCPTV_KEEP_IDLE; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepidle, offsetof(skmem_sysctl, tcp.keepidle), - sysctl_msec_to_ticks, "I", ""); + sysctl_msec_to_ticks, "I", ""); -int tcp_keepintvl = TCPTV_KEEPINTVL; +int tcp_keepintvl = TCPTV_KEEPINTVL; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_keepintvl, offsetof(skmem_sysctl, tcp.keepintvl), - sysctl_msec_to_ticks, "I", ""); + sysctl_msec_to_ticks, "I", ""); SYSCTL_SKMEM_TCP_INT(OID_AUTO, keepcnt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, int, tcp_keepcnt, TCPTV_KEEPCNT, "number of times to repeat keepalive"); -int tcp_msl = TCPTV_MSL; +int tcp_msl = TCPTV_MSL; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_msl, offsetof(skmem_sysctl, tcp.msl), - sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); + sysctl_msec_to_ticks, "I", "Maximum segment lifetime"); #else /* SYSCTL_SKMEM */ int tcp_keepinit; SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, @@ -212,7 +214,7 @@ u_int32_t tcp_max_persist_timeout = 0; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_max_persist_timeout, offsetof(skmem_sysctl, tcp.max_persist_timeout), - sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP"); + sysctl_msec_to_ticks, "I", "Maximum persistence timeout for ZWP"); #else /* SYSCTL_SKMEM */ u_int32_t tcp_max_persist_timeout = 0; SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, @@ -223,7 +225,7 @@ SYSCTL_PROC(_net_inet_tcp, OID_AUTO, max_persist_timeout, SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive, CTLFLAG_RW | CTLFLAG_LOCKED, static int, always_keepalive, 0, - "Assume SO_KEEPALIVE on all TCP connections"); + "Assume SO_KEEPALIVE on all TCP connections"); /* * This parameter determines how long the timer list will stay in fast or @@ -232,7 +234,7 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, always_keepalive, */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax, CTLFLAG_RW | CTLFLAG_LOCKED, int, timer_fastmode_idlemax, - TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode"); + TCP_FASTMODE_IDLERUN_MAX, "Maximum idle generations in fast mode"); /* * See tcp_syn_backoff[] for interval values between SYN retransmits; @@ -243,7 +245,7 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, timer_fastmode_idlemax, */ SYSCTL_SKMEM_TCP_INT(OID_AUTO, broken_peer_syn_rexmit_thres, CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_broken_peer_syn_rxmit_thres, - 10, "Number of retransmitted SYNs before disabling RFC 1323 " + 10, "Number of retransmitted SYNs before disabling RFC 1323 " "options on local connections"); static int tcp_timer_advanced = 0; @@ -267,27 +269,27 @@ SYSCTL_SKMEM_TCP_INT(OID_AUTO, pmtud_blackhole_mss, static u_int32_t tcp_mss_rec_medium = 1200; static u_int32_t tcp_mss_rec_low = 512; -#define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */ +#define TCP_REPORT_STATS_INTERVAL 43200 /* 12 hours, in seconds */ int tcp_report_stats_interval = TCP_REPORT_STATS_INTERVAL; /* performed garbage collection of "used" sockets */ static boolean_t tcp_gc_done = FALSE; /* max idle probes */ -int tcp_maxpersistidle = TCPTV_KEEP_IDLE; +int tcp_maxpersistidle = TCPTV_KEEP_IDLE; /* * TCP delack timer is set to 100 ms. Since the processing of timer list * in fast mode will happen no faster than 100 ms, the delayed ack timer * will fire some where between 100 and 200 ms. */ -int tcp_delack = TCP_RETRANSHZ / 10; +int tcp_delack = TCP_RETRANSHZ / 10; #if MPTCP /* * MP_JOIN retransmission of 3rd ACK will be every 500 msecs without backoff */ -int tcp_jack_rxmt = TCP_RETRANSHZ / 2; +int tcp_jack_rxmt = TCP_RETRANSHZ / 2; #endif /* MPTCP */ static boolean_t tcp_itimer_done = FALSE; @@ -302,80 +304,80 @@ __private_extern__ void tcp_remove_from_time_wait(struct inpcb *inp); static inline void tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp); __private_extern__ void tcp_report_stats(void); -static u_int64_t tcp_last_report_time; +static u_int64_t tcp_last_report_time; /* * Structure to store previously reported stats so that we can send * incremental changes in each report interval. */ struct tcp_last_report_stats { - u_int32_t tcps_connattempt; - u_int32_t tcps_accepts; - u_int32_t tcps_ecn_client_setup; - u_int32_t tcps_ecn_server_setup; - u_int32_t tcps_ecn_client_success; - u_int32_t tcps_ecn_server_success; - u_int32_t tcps_ecn_not_supported; - u_int32_t tcps_ecn_lost_syn; - u_int32_t tcps_ecn_lost_synack; - u_int32_t tcps_ecn_recv_ce; - u_int32_t tcps_ecn_recv_ece; - u_int32_t tcps_ecn_sent_ece; - u_int32_t tcps_ecn_conn_recv_ce; - u_int32_t tcps_ecn_conn_recv_ece; - u_int32_t tcps_ecn_conn_plnoce; - u_int32_t tcps_ecn_conn_pl_ce; - u_int32_t tcps_ecn_conn_nopl_ce; - u_int32_t tcps_ecn_fallback_synloss; - u_int32_t tcps_ecn_fallback_reorder; - u_int32_t tcps_ecn_fallback_ce; + u_int32_t tcps_connattempt; + u_int32_t tcps_accepts; + u_int32_t tcps_ecn_client_setup; + u_int32_t tcps_ecn_server_setup; + u_int32_t tcps_ecn_client_success; + u_int32_t tcps_ecn_server_success; + u_int32_t tcps_ecn_not_supported; + u_int32_t tcps_ecn_lost_syn; + u_int32_t tcps_ecn_lost_synack; + u_int32_t tcps_ecn_recv_ce; + u_int32_t tcps_ecn_recv_ece; + u_int32_t tcps_ecn_sent_ece; + u_int32_t tcps_ecn_conn_recv_ce; + u_int32_t tcps_ecn_conn_recv_ece; + u_int32_t tcps_ecn_conn_plnoce; + u_int32_t tcps_ecn_conn_pl_ce; + u_int32_t tcps_ecn_conn_nopl_ce; + u_int32_t tcps_ecn_fallback_synloss; + u_int32_t tcps_ecn_fallback_reorder; + u_int32_t tcps_ecn_fallback_ce; /* TFO-related statistics */ - u_int32_t tcps_tfo_syn_data_rcv; - u_int32_t tcps_tfo_cookie_req_rcv; - u_int32_t tcps_tfo_cookie_sent; - u_int32_t tcps_tfo_cookie_invalid; - u_int32_t tcps_tfo_cookie_req; - u_int32_t tcps_tfo_cookie_rcv; - u_int32_t tcps_tfo_syn_data_sent; - u_int32_t tcps_tfo_syn_data_acked; - u_int32_t tcps_tfo_syn_loss; - u_int32_t tcps_tfo_blackhole; - u_int32_t tcps_tfo_cookie_wrong; - u_int32_t tcps_tfo_no_cookie_rcv; - u_int32_t tcps_tfo_heuristics_disable; - u_int32_t tcps_tfo_sndblackhole; + u_int32_t tcps_tfo_syn_data_rcv; + u_int32_t tcps_tfo_cookie_req_rcv; + u_int32_t tcps_tfo_cookie_sent; + u_int32_t tcps_tfo_cookie_invalid; + u_int32_t tcps_tfo_cookie_req; + u_int32_t tcps_tfo_cookie_rcv; + u_int32_t tcps_tfo_syn_data_sent; + u_int32_t tcps_tfo_syn_data_acked; + u_int32_t tcps_tfo_syn_loss; + u_int32_t tcps_tfo_blackhole; + u_int32_t tcps_tfo_cookie_wrong; + u_int32_t tcps_tfo_no_cookie_rcv; + u_int32_t tcps_tfo_heuristics_disable; + u_int32_t tcps_tfo_sndblackhole; /* MPTCP-related statistics */ - u_int32_t tcps_mptcp_handover_attempt; - u_int32_t tcps_mptcp_interactive_attempt; - u_int32_t tcps_mptcp_aggregate_attempt; - u_int32_t tcps_mptcp_fp_handover_attempt; - u_int32_t tcps_mptcp_fp_interactive_attempt; - u_int32_t tcps_mptcp_fp_aggregate_attempt; - u_int32_t tcps_mptcp_heuristic_fallback; - u_int32_t tcps_mptcp_fp_heuristic_fallback; - u_int32_t tcps_mptcp_handover_success_wifi; - u_int32_t tcps_mptcp_handover_success_cell; - u_int32_t tcps_mptcp_interactive_success; - u_int32_t tcps_mptcp_aggregate_success; - u_int32_t tcps_mptcp_fp_handover_success_wifi; - u_int32_t tcps_mptcp_fp_handover_success_cell; - u_int32_t tcps_mptcp_fp_interactive_success; - u_int32_t tcps_mptcp_fp_aggregate_success; - u_int32_t tcps_mptcp_handover_cell_from_wifi; - u_int32_t tcps_mptcp_handover_wifi_from_cell; - u_int32_t tcps_mptcp_interactive_cell_from_wifi; - u_int64_t tcps_mptcp_handover_cell_bytes; - u_int64_t tcps_mptcp_interactive_cell_bytes; - u_int64_t tcps_mptcp_aggregate_cell_bytes; - u_int64_t tcps_mptcp_handover_all_bytes; - u_int64_t tcps_mptcp_interactive_all_bytes; - u_int64_t tcps_mptcp_aggregate_all_bytes; - u_int32_t tcps_mptcp_back_to_wifi; - u_int32_t tcps_mptcp_wifi_proxy; - u_int32_t tcps_mptcp_cell_proxy; - u_int32_t tcps_mptcp_triggered_cell; + u_int32_t tcps_mptcp_handover_attempt; + u_int32_t tcps_mptcp_interactive_attempt; + u_int32_t tcps_mptcp_aggregate_attempt; + u_int32_t tcps_mptcp_fp_handover_attempt; + u_int32_t tcps_mptcp_fp_interactive_attempt; + u_int32_t tcps_mptcp_fp_aggregate_attempt; + u_int32_t tcps_mptcp_heuristic_fallback; + u_int32_t tcps_mptcp_fp_heuristic_fallback; + u_int32_t tcps_mptcp_handover_success_wifi; + u_int32_t tcps_mptcp_handover_success_cell; + u_int32_t tcps_mptcp_interactive_success; + u_int32_t tcps_mptcp_aggregate_success; + u_int32_t tcps_mptcp_fp_handover_success_wifi; + u_int32_t tcps_mptcp_fp_handover_success_cell; + u_int32_t tcps_mptcp_fp_interactive_success; + u_int32_t tcps_mptcp_fp_aggregate_success; + u_int32_t tcps_mptcp_handover_cell_from_wifi; + u_int32_t tcps_mptcp_handover_wifi_from_cell; + u_int32_t tcps_mptcp_interactive_cell_from_wifi; + u_int64_t tcps_mptcp_handover_cell_bytes; + u_int64_t tcps_mptcp_interactive_cell_bytes; + u_int64_t tcps_mptcp_aggregate_cell_bytes; + u_int64_t tcps_mptcp_handover_all_bytes; + u_int64_t tcps_mptcp_interactive_all_bytes; + u_int64_t tcps_mptcp_aggregate_all_bytes; + u_int32_t tcps_mptcp_back_to_wifi; + u_int32_t tcps_mptcp_wifi_proxy; + u_int32_t tcps_mptcp_cell_proxy; + u_int32_t tcps_mptcp_triggered_cell; }; @@ -391,25 +393,25 @@ static boolean_t tcp_garbage_collect(struct inpcb *, int); #define TIMERENTRY_TO_TP(te) ((struct tcpcb *)((uintptr_t)te - offsetof(struct tcpcb, tentry.le.le_next))) -#define VERIFY_NEXT_LINK(elm,field) do { \ - if (LIST_NEXT((elm),field) != NULL && \ - LIST_NEXT((elm),field)->field.le_prev != \ - &((elm)->field.le_next)) \ - panic("Bad link elm %p next->prev != elm", (elm)); \ +#define VERIFY_NEXT_LINK(elm, field) do { \ + if (LIST_NEXT((elm),field) != NULL && \ + LIST_NEXT((elm),field)->field.le_prev != \ + &((elm)->field.le_next)) \ + panic("Bad link elm %p next->prev != elm", (elm)); \ } while(0) -#define VERIFY_PREV_LINK(elm,field) do { \ - if (*(elm)->field.le_prev != (elm)) \ - panic("Bad link elm %p prev->next != elm", (elm)); \ +#define VERIFY_PREV_LINK(elm, field) do { \ + if (*(elm)->field.le_prev != (elm)) \ + panic("Bad link elm %p prev->next != elm", (elm)); \ } while(0) #define TCP_SET_TIMER_MODE(mode, i) do { \ if (IS_TIMER_HZ_10MS(i)) \ - (mode) |= TCP_TIMERLIST_10MS_MODE; \ + (mode) |= TCP_TIMERLIST_10MS_MODE; \ else if (IS_TIMER_HZ_100MS(i)) \ - (mode) |= TCP_TIMERLIST_100MS_MODE; \ + (mode) |= TCP_TIMERLIST_100MS_MODE; \ else \ - (mode) |= TCP_TIMERLIST_500MS_MODE; \ + (mode) |= TCP_TIMERLIST_500MS_MODE; \ } while(0) #if (DEVELOPMENT || DEBUG) @@ -430,12 +432,12 @@ sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS struct if_link_status ifsr; struct if_cellular_status_v1 *new_cell_sr; err = sysctl_io_number(req, tcp_change_mss_recommended, - sizeof (int32_t), &i, &changed); + sizeof(int32_t), &i, &changed); if (changed) { ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_link) { if (IFNET_IS_CELLULAR(ifp)) { - bzero(&ifsr, sizeof (ifsr)); + bzero(&ifsr, sizeof(ifsr)); new_cell_sr = &ifsr.ifsr_u.ifsr_cell.if_cell_u.if_status_v1; ifsr.ifsr_version = IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION; ifsr.ifsr_len = sizeof(*new_cell_sr); @@ -443,7 +445,7 @@ sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS /* Set MSS recommended */ new_cell_sr->valid_bitmask |= IF_CELL_UL_MSS_RECOMMENDED_VALID; new_cell_sr->mss_recommended = i; - err = ifnet_link_status_report(ifp, new_cell_sr, sizeof (new_cell_sr)); + err = ifnet_link_status_report(ifp, new_cell_sr, sizeof(new_cell_sr)); if (err == 0) { tcp_change_mss_recommended = i; } else { @@ -453,7 +455,7 @@ sysctl_change_mss_recommended SYSCTL_HANDLER_ARGS } ifnet_head_done(); } - return (err); + return err; } SYSCTL_PROC(_net_inet_tcp, OID_AUTO, change_mss_recommended, @@ -472,7 +474,8 @@ SYSCTL_INT(_net_inet_tcp, OID_AUTO, report_stats_interval, * with the sign bit reset is actually ahead of the other. */ inline int32_t -timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) { +timer_diff(uint32_t t1, uint32_t toff1, uint32_t t2, uint32_t toff2) +{ return (int32_t)((t1 + toff1) - (t2 + toff2)); }; @@ -505,8 +508,9 @@ add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay) /* We will use the TCPT_2MSL timer for tracking this delay */ - if (TIMER_IS_ON_LIST(tp)) + if (TIMER_IS_ON_LIST(tp)) { tcp_remove_timer(tp); + } tp->t_timer[TCPT_2MSL] = timer; TAILQ_INSERT_TAIL(&tcp_tw_tailq, tp, t_twentry); @@ -516,8 +520,9 @@ void add_to_time_wait(struct tcpcb *tp, uint32_t delay) { struct inpcbinfo *pcbinfo = &tcbinfo; - if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) + if (tp->t_inpcb->inp_socket->so_options & SO_NOWAKEFROMSLEEP) { socket_post_kev_msg_closed(tp->t_inpcb->inp_socket); + } /* 19182803: Notify nstat that connection is closing before waiting. */ nstat_pcb_detach(tp->t_inpcb); @@ -538,8 +543,9 @@ void tcp_remove_from_time_wait(struct inpcb *inp) { struct tcpcb *tp = intotcpcb(inp); - if (inp->inp_flags2 & INP2_TIMEWAIT) + if (inp->inp_flags2 & INP2_TIMEWAIT) { TAILQ_REMOVE(&tcp_tw_tailq, tp, t_twentry); + } } static boolean_t @@ -576,8 +582,9 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) /* Check again under the lock */ if (so->so_usecount > 1) { - if (inp->inp_wantcnt == WNT_STOPUSING) + if (inp->inp_wantcnt == WNT_STOPUSING) { active = TRUE; + } lck_mtx_unlock(&inp->inpcb_mtx); goto out; } @@ -602,23 +609,23 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) ((istimewait && (so->so_flags & SOF_OVERFLOW)) || ((tp != NULL) && (tp->t_state == TCPS_CLOSED) && (so->so_head != NULL) && - ((so->so_state & (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE)) == - (SS_INCOMP|SS_CANTSENDMORE|SS_CANTRCVMORE))))) { - + ((so->so_state & (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE)) == + (SS_INCOMP | SS_CANTSENDMORE | SS_CANTRCVMORE))))) { if (inp->inp_state != INPCB_STATE_DEAD) { /* Become a regular mutex */ lck_mtx_convert_spin(&inp->inpcb_mtx); #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ - in_pcbdetach(inp); + in_pcbdetach(inp); } VERIFY(so->so_usecount > 0); so->so_usecount--; - if (inp->inp_wantcnt == WNT_STOPUSING) + if (inp->inp_wantcnt == WNT_STOPUSING) { active = TRUE; + } lck_mtx_unlock(&inp->inpcb_mtx); goto out; } else if (inp->inp_wantcnt != WNT_STOPUSING) { @@ -640,7 +647,7 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) */ if (so->so_usecount == 0) { DTRACE_TCP4(state__change, void, NULL, struct inpcb *, inp, - struct tcpcb *, tp, int32_t, TCPS_CLOSED); + struct tcpcb *, tp, int32_t, TCPS_CLOSED); /* Become a regular mutex */ lck_mtx_convert_spin(&inp->inpcb_mtx); @@ -654,11 +661,11 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) if (inp->inp_state != INPCB_STATE_DEAD) { #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ - in_pcbdetach(inp); + in_pcbdetach(inp); } if (mp_so) { @@ -678,10 +685,11 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) active = TRUE; out: - if (mp_so) + if (mp_so) { socket_unlock(mp_so, 1); + } - return (active); + return active; } /* @@ -731,8 +739,9 @@ tcp_gc(struct inpcbinfo *ipi) tcp_gc_done = TRUE; LIST_FOREACH_SAFE(inp, &tcb, inp_list, nxt) { - if (tcp_garbage_collect(inp, 0)) + if (tcp_garbage_collect(inp, 0)) { atomic_add_32(&ipi->ipi_gc_req.intimer_fast, 1); + } } /* Now cleanup the time wait ones */ @@ -749,8 +758,9 @@ tcp_gc(struct inpcbinfo *ipi) */ if (tw_tp->t_state == TCPS_CLOSED || TSTMP_GEQ(tcp_now, tw_tp->t_timer[TCPT_2MSL])) { - if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) + if (tcp_garbage_collect(tw_tp->t_inpcb, 1)) { atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1); + } } } @@ -760,8 +770,9 @@ tcp_gc(struct inpcbinfo *ipi) lck_rw_done(ipi->ipi_lock); /* Clean up the socache while we are here */ - if (so_cache_timer()) + if (so_cache_timer()) { atomic_add_32(&ipi->ipi_gc_req.intimer_lazy, 1); + } KERNEL_DEBUG(DBG_FNC_TCP_SLOW | DBG_FUNC_END, tws_checked, cur_tw_slot, 0, 0, 0); @@ -778,19 +789,20 @@ tcp_canceltimers(struct tcpcb *tp) int i; tcp_remove_timer(tp); - for (i = 0; i < TCPT_NTIMERS; i++) + for (i = 0; i < TCPT_NTIMERS; i++) { tp->t_timer[i] = 0; + } tp->tentry.timer_start = tcp_now; tp->tentry.index = TCPT_NONE; } -int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = - { 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; +int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] = +{ 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 }; -int tcp_backoff[TCP_MAXRXTSHIFT + 1] = - { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; +int tcp_backoff[TCP_MAXRXTSHIFT + 1] = +{ 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; -static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ +static int tcp_totbackoff = 511; /* sum of tcp_backoff[] */ void tcp_rexmt_save_state(struct tcpcb *tp) @@ -818,10 +830,11 @@ tcp_rexmt_save_state(struct tcpcb *tp) tp->snd_cwnd_prev = tp->snd_cwnd; tp->snd_ssthresh_prev = tp->snd_ssthresh; tp->snd_recover_prev = tp->snd_recover; - if (IN_FASTRECOVERY(tp)) + if (IN_FASTRECOVERY(tp)) { tp->t_flags |= TF_WASFRECOVERY; - else + } else { tp->t_flags &= ~TF_WASFRECOVERY; + } } tp->t_srtt_prev = (tp->t_srtt >> TCP_RTT_SHIFT) + 2; tp->t_rttvar_prev = (tp->t_rttvar >> TCP_RTTVAR_SHIFT); @@ -848,8 +861,9 @@ tcp_pmtud_revert_segment_size(struct tcpcb *tp) * Reset the slow-start flight size as it * may depend on the new MSS */ - if (CC_ALGO(tp)->cwnd_init != NULL) + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } tp->t_pmtud_start_ts = 0; tcpstat.tcps_pmtudbh_reverted++; @@ -880,7 +894,6 @@ tcp_timers(struct tcpcb *tp, int timer) idle_time = tcp_now - tp->t_rcvtime; switch (timer) { - /* * 2 MSL timeout in shutdown went off. If we're closed but * still waiting for peer to close and connection has been idle @@ -895,10 +908,10 @@ tcp_timers(struct tcpcb *tp, int timer) tp->t_state != TCPS_FIN_WAIT_2 && ((idle_time > 0) && (idle_time < TCP_CONN_MAXIDLE(tp)))) { tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, - (u_int32_t)TCP_CONN_KEEPINTVL(tp)); + (u_int32_t)TCP_CONN_KEEPINTVL(tp)); } else { tp = tcp_close(tp); - return(tp); + return tp; } break; @@ -911,8 +924,9 @@ tcp_timers(struct tcpcb *tp, int timer) absolutetime_to_nanoseconds(mach_absolutetime_asleep, &accsleep_ms); accsleep_ms = accsleep_ms / 1000000UL; - if (accsleep_ms > tp->t_accsleep_ms) + if (accsleep_ms > tp->t_accsleep_ms) { last_sleep_ms = accsleep_ms - tp->t_accsleep_ms; + } /* * Drop a connection in the retransmit timer * 1. If we have retransmitted more than TCP_MAXRXTSHIFT @@ -963,11 +977,12 @@ tcp_timers(struct tcpcb *tp, int timer) tp->t_rxtshift = TCP_MAXRXTSHIFT; postevent(so, 0, EV_TIMEOUT); soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); if (TCP_ECN_ENABLED(tp) && - tp->t_state == TCPS_ESTABLISHED) + tp->t_state == TCPS_ESTABLISHED) { tcp_heuristic_ecn_droprxmt(tp); + } tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); @@ -979,7 +994,7 @@ retransmit_packet: tp->t_accsleep_ms = accsleep_ms; if (tp->t_rxtshift == 1 && - tp->t_state == TCPS_ESTABLISHED) { + tp->t_state == TCPS_ESTABLISHED) { /* Set the time at which retransmission started. */ tp->t_rxtstart = tcp_now; @@ -993,8 +1008,9 @@ retransmit_packet: #if MPTCP if ((tp->t_rxtshift >= mptcp_fail_thresh) && (tp->t_state == TCPS_ESTABLISHED) && - (tp->t_mpflags & TMPF_MPTCP_TRUE)) + (tp->t_mpflags & TMPF_MPTCP_TRUE)) { mptcp_act_on_txfail(so); + } if (so->so_flags & SOF_MP_SUBFLOW) { struct mptses *mpte = tptomptp(tp)->mpt_mpte; @@ -1004,12 +1020,12 @@ retransmit_packet: #endif /* MPTCP */ if (tp->t_adaptive_wtimo > 0 && - tp->t_rxtshift > tp->t_adaptive_wtimo && - TCPS_HAVEESTABLISHED(tp->t_state)) { + tp->t_rxtshift > tp->t_adaptive_wtimo && + TCPS_HAVEESTABLISHED(tp->t_state)) { /* Send an event to the application */ soevent(so, - (SO_FILT_HINT_LOCKED| - SO_FILT_HINT_ADAPTIVE_WTIMO)); + (SO_FILT_HINT_LOCKED | + SO_FILT_HINT_ADAPTIVE_WTIMO)); } /* @@ -1033,14 +1049,15 @@ retransmit_packet: tp->t_flagsext &= ~(TF_DELAY_RECOVERY); } - if (tp->t_state == TCPS_SYN_RECEIVED) + if (tp->t_state == TCPS_SYN_RECEIVED) { tcp_disable_tfo(tp); + } if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) && (tp->t_tfo_stats & TFO_S_SYN_DATA_SENT) && !(tp->t_tfo_flags & TFO_F_NO_SNDPROBING) && ((tp->t_state != TCPS_SYN_SENT && tp->t_rxtshift > 1) || - tp->t_rxtshift > 4)) { + tp->t_rxtshift > 4)) { /* * For regular retransmissions, a first one is being * done for tail-loss probe. @@ -1079,7 +1096,7 @@ retransmit_packet: /* When retransmitting, disable TFO */ if (tfo_enabled(tp) && (!(so->so_flags1 & SOF1_DATA_AUTHENTICATED) || - (tp->t_flagsext & TF_FASTOPEN_HEUR))) { + (tp->t_flagsext & TF_FASTOPEN_HEUR))) { tp->t_flagsext &= ~TF_FASTOPEN; tp->t_tfo_flags |= TFO_F_SYN_LOSS; } @@ -1088,19 +1105,20 @@ retransmit_packet: } TCPT_RANGESET(tp->t_rxtcur, rexmt, tp->t_rttmin, TCPTV_REXMTMAX, - TCP_ADD_REXMTSLOP(tp)); + TCP_ADD_REXMTSLOP(tp)); tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur); - if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) + if (INP_WAIT_FOR_IF_FEEDBACK(tp->t_inpcb)) { goto fc_output; + } tcp_free_sackholes(tp); /* * Check for potential Path MTU Discovery Black Hole */ if (tcp_pmtud_black_hole_detect && - !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) && - (tp->t_state == TCPS_ESTABLISHED)) { + !(tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) && + (tp->t_state == TCPS_ESTABLISHED)) { if ((tp->t_flags & TF_PMTUD) && ((tp->t_flags & TF_MAXSEGSNT) || tp->t_pmtud_lastseg_size > tcp_pmtud_black_hole_mss) && @@ -1119,17 +1137,18 @@ retransmit_packet: /* Keep track of previous MSS */ tp->t_pmtud_saved_maxopd = tp->t_maxopd; tp->t_pmtud_start_ts = tcp_now; - if (tp->t_pmtud_start_ts == 0) + if (tp->t_pmtud_start_ts == 0) { tp->t_pmtud_start_ts++; + } /* Reduce the MSS to intermediary value */ if (tp->t_maxopd > tcp_pmtud_black_hole_mss) { tp->t_maxopd = tcp_pmtud_black_hole_mss; } else { tp->t_maxopd = /* use the default MSS */ #if INET6 - isipv6 ? tcp_v6mssdflt : + isipv6 ? tcp_v6mssdflt : #endif /* INET6 */ - tcp_mssdflt; + tcp_mssdflt; } tp->t_maxseg = tp->t_maxopd - optlen; @@ -1137,8 +1156,9 @@ retransmit_packet: * Reset the slow-start flight size * as it may depend on the new MSS */ - if (CC_ALGO(tp)->cwnd_init != NULL) + if (CC_ALGO(tp)->cwnd_init != NULL) { CC_ALGO(tp)->cwnd_init(tp); + } tp->snd_cwnd = tp->t_maxseg; } /* @@ -1148,7 +1168,6 @@ retransmit_packet: * MSS and blackhole detection flags. */ else { - if ((tp->t_flags & TF_BLACKHOLE) && (tp->t_rxtshift > 4)) { tcp_pmtud_revert_segment_size(tp); @@ -1168,8 +1187,9 @@ retransmit_packet: * Do this only on non-local connections. */ if (tp->t_state == TCPS_SYN_SENT && - tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) - tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_REQ_CC); + tp->t_rxtshift == tcp_broken_peer_syn_rxmit_thres) { + tp->t_flags &= ~(TF_REQ_SCALE | TF_REQ_TSTMP | TF_REQ_CC); + } /* * If losing, let the lower level know and try for @@ -1181,9 +1201,9 @@ retransmit_packet: */ if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) { #if INET6 - if (isipv6) + if (isipv6) { in6_losing(tp->t_inpcb); - else + } else #endif /* INET6 */ in_losing(tp->t_inpcb); tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT); @@ -1203,8 +1223,9 @@ retransmit_packet: /* If timing a segment in this window, stop the timer */ tp->t_rtttime = 0; - if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) + if (!IN_FASTRECOVERY(tp) && tp->t_rxtshift == 1) { tcpstat.tcps_tailloss_rto++; + } /* @@ -1221,8 +1242,9 @@ retransmit_packet: * right after Fast Retransmits and ECE * notification receipts. */ - if (TCP_ECN_ENABLED(tp)) + if (TCP_ECN_ENABLED(tp)) { tp->ecn_flags |= TE_SENDCWR; + } } EXIT_FASTRECOVERY(tp); @@ -1258,11 +1280,11 @@ fc_output: (idle_time >= tcp_maxpersistidle || idle_time >= TCP_REXMTVAL(tp) * tcp_totbackoff)) || ((tp->t_persist_stop != 0) && - TSTMP_LEQ(tp->t_persist_stop, tcp_now))) { + TSTMP_LEQ(tp->t_persist_stop, tcp_now))) { tcpstat.tcps_persistdrop++; postevent(so, 0, EV_TIMEOUT); soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); break; } @@ -1288,19 +1310,22 @@ fc_output: (tp->t_state > TCPS_ESTABLISHED)) { goto dropit; } else if (mp_tp != NULL) { - if ((mptcp_ok_to_keepalive(mp_tp) == 0)) + if ((mptcp_ok_to_keepalive(mp_tp) == 0)) { goto dropit; + } } #endif /* MPTCP */ - if (tp->t_state < TCPS_ESTABLISHED) + if (tp->t_state < TCPS_ESTABLISHED) { goto dropit; + } if ((always_keepalive || (tp->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) || (tp->t_flagsext & TF_DETECT_READSTALL) || (tp->t_tfo_probe_state == TFO_PROBE_PROBING)) && (tp->t_state <= TCPS_CLOSING || tp->t_state == TCPS_FIN_WAIT_2)) { - if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) + if (idle_time >= TCP_CONN_KEEPIDLE(tp) + TCP_CONN_MAXIDLE(tp)) { goto dropit; + } /* * Send a packet designed to force a response * if the peer is up and reachable: @@ -1324,16 +1349,18 @@ fc_output: tra.noexpensive = INP_NO_EXPENSIVE(inp); tra.awdl_unrestricted = INP_AWDL_UNRESTRICTED(inp); tra.intcoproc_allowed = INP_INTCOPROC_ALLOWED(inp); - if (tp->t_inpcb->inp_flags & INP_BOUND_IF) + if (tp->t_inpcb->inp_flags & INP_BOUND_IF) { tra.ifscope = tp->t_inpcb->inp_boundifp->if_index; - else + } else { tra.ifscope = IFSCOPE_NONE; + } tcp_respond(tp, t_template->tt_ipgen, &t_template->tt_t, (struct mbuf *)NULL, tp->rcv_nxt, tp->snd_una - 1, 0, &tra); (void) m_free(dtom(t_template)); - if (tp->t_flagsext & TF_DETECT_READSTALL) + if (tp->t_flagsext & TF_DETECT_READSTALL) { tp->t_rtimo_probes++; + } } tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_CONN_KEEPINTVL(tp)); @@ -1372,7 +1399,7 @@ fc_output: int ind = min(tp->t_rtimo_probes, TCP_MAXRXTSHIFT); tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START( - tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)); + tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)); } } if (tp->t_tfo_probe_state == TFO_PROBE_PROBING) { @@ -1389,10 +1416,10 @@ fc_output: * backing off. */ tp->t_timer[TCPT_KEEP] = min(OFFSET_FROM_START( - tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)), + tp, tcp_backoff[ind] * TCP_REXMTVAL(tp)), tp->t_timer[TCPT_KEEP]); } else if (!(tp->t_tfo_flags & TFO_F_HEURISTIC_DONE) && - tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) { + tp->t_tfo_probe_state == TFO_PROBE_WAIT_DATA) { /* Still no data! Let's assume a TFO-error and err out... */ tcp_heuristic_tfo_middlebox(tp); @@ -1419,11 +1446,12 @@ fc_output: */ if ((tp->t_flags & TF_STRETCHACK)) { if (tp->t_unacksegs > 1 && - tp->t_unacksegs < maxseg_unacked) + tp->t_unacksegs < maxseg_unacked) { tp->t_stretchack_delayed++; + } if (tp->t_stretchack_delayed > - TCP_STRETCHACK_DELAY_THRESHOLD) { + TCP_STRETCHACK_DELAY_THRESHOLD) { tp->t_flagsext |= TF_DISABLE_STRETCHACK; /* * Note the time at which stretch @@ -1459,7 +1487,7 @@ fc_output: tcpstat.tcps_timeoutdrop++; postevent(so, 0, EV_TIMEOUT); soevent(so, - (SO_FILT_HINT_LOCKED| + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, tp->t_softerror ? tp->t_softerror : ETIMEDOUT); @@ -1492,8 +1520,9 @@ fc_output: tp->snd_max == tp->snd_una || !SACK_ENABLED(tp) || !TAILQ_EMPTY(&tp->snd_holes) || - IN_FASTRECOVERY(tp)) + IN_FASTRECOVERY(tp)) { break; + } /* * If there is no new data to send or if the @@ -1512,8 +1541,9 @@ fc_output: } tcpstat.tcps_pto++; - if (tp->t_flagsext & TF_PROBING) + if (tp->t_flagsext & TF_PROBING) { tcpstat.tcps_probe_if++; + } /* If timing a segment in this window, stop the timer */ tp->t_rtttime = 0; @@ -1531,7 +1561,7 @@ fc_output: */ tp->t_timer[TCPT_REXMT] = 0; - (void )tcp_output(tp); + (void)tcp_output(tp); tp->snd_cwnd -= tp->t_maxseg; tp->t_tlphighrxt = tp->snd_nxt; @@ -1548,15 +1578,17 @@ fc_output: */ if (IN_FASTRECOVERY(tp) || SEQ_GEQ(tp->snd_una, tp->snd_recover) || - tp->t_rxtshift > 0) + tp->t_rxtshift > 0) { break; + } VERIFY(SACK_ENABLED(tp)); tcp_rexmt_save_state(tp); if (CC_ALGO(tp)->pre_fr != NULL) { CC_ALGO(tp)->pre_fr(tp); - if (TCP_ECN_ENABLED(tp)) + if (TCP_ECN_ENABLED(tp)) { tp->ecn_flags |= TE_SENDCWR; + } } ENTER_FASTRECOVERY(tp); @@ -1568,20 +1600,21 @@ fc_output: tcp_ccdbg_trace(tp, NULL, TCP_CC_ENTER_FASTRECOVERY); (void) tcp_output(tp); break; - dropit: +dropit: tcpstat.tcps_keepdrops++; postevent(so, 0, EV_TIMEOUT); soevent(so, - (SO_FILT_HINT_LOCKED|SO_FILT_HINT_TIMEOUT)); + (SO_FILT_HINT_LOCKED | SO_FILT_HINT_TIMEOUT)); tp = tcp_drop(tp, ETIMEDOUT); break; } #if TCPDEBUG - if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) + if (tp->t_inpcb->inp_socket->so_options & SO_DEBUG) { tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0, - PRU_SLOWTIMO); + PRU_SLOWTIMO); + } #endif - return (tp); + return tp; } /* Remove a timer entry from timer list */ @@ -1602,8 +1635,9 @@ tcp_remove_timer(struct tcpcb *tp) return; } - if (listp->next_te != NULL && listp->next_te == &tp->tentry) + if (listp->next_te != NULL && listp->next_te == &tp->tentry) { listp->next_te = LIST_NEXT(&tp->tentry, le); + } LIST_REMOVE(&tp->tentry, le); tp->t_flags &= ~(TF_TIMER_ONLIST); @@ -1632,29 +1666,34 @@ need_to_resched_timerlist(u_int32_t runtime, u_int16_t mode) * in flux. In this case always acquire the lock and set the state * correctly. */ - if (listp->running) - return (TRUE); + if (listp->running) { + return TRUE; + } - if (!listp->scheduled) - return (TRUE); + if (!listp->scheduled) { + return TRUE; + } diff = timer_diff(listp->runtime, 0, runtime, 0); if (diff <= 0) { /* The list is going to run before this timer */ - return (FALSE); + return FALSE; } else { if (mode & TCP_TIMERLIST_10MS_MODE) { - if (diff <= TCP_TIMER_10MS_QUANTUM) - return (FALSE); + if (diff <= TCP_TIMER_10MS_QUANTUM) { + return FALSE; + } } else if (mode & TCP_TIMERLIST_100MS_MODE) { - if (diff <= TCP_TIMER_100MS_QUANTUM) - return (FALSE); + if (diff <= TCP_TIMER_100MS_QUANTUM) { + return FALSE; + } } else { - if (diff <= TCP_TIMER_500MS_QUANTUM) - return (FALSE); + if (diff <= TCP_TIMER_500MS_QUANTUM) { + return FALSE; + } } } - return (TRUE); + return TRUE; } void @@ -1690,7 +1729,7 @@ tcp_sched_timerlist(uint32_t offset) */ u_int32_t tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, - u_int16_t probe_if_index) + u_int16_t probe_if_index) { struct socket *so; u_int16_t i = 0, index = TCPT_NONE, lo_index = TCPT_NONE; @@ -1708,7 +1747,7 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, so = tp->t_inpcb->inp_socket; /* Release the want count on inp */ if (in_pcb_checkstate(tp->t_inpcb, WNT_RELEASE, 1) - == WNT_STOPUSING) { + == WNT_STOPUSING) { if (TIMER_IS_ON_LIST(tp)) { tcp_remove_timer(tp); } @@ -1736,8 +1775,9 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, * with another thread that can cancel or reschedule the timer * that is about to run. Check if we need to run anything. */ - if ((index = tp->tentry.index) == TCPT_NONE) + if ((index = tp->tentry.index) == TCPT_NONE) { goto done; + } timer_val = tp->t_timer[index]; @@ -1753,8 +1793,9 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, tp->t_timer[index] = 0; if (timer_val > 0) { tp = tcp_timers(tp, index); - if (tp == NULL) + if (tp == NULL) { goto done; + } } /* @@ -1765,7 +1806,7 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, for (i = 0; i < TCPT_NTIMERS; ++i) { if (tp->t_timer[i] != 0) { diff = timer_diff(tp->tentry.timer_start, - tp->t_timer[i], tcp_now, 0); + tp->t_timer[i], tcp_now, 0); if (diff <= 0) { needtorun[i] = TRUE; count++; @@ -1787,9 +1828,10 @@ tcp_run_conn_timer(struct tcpcb *tp, u_int16_t *te_mode, if (tp->tentry.index != TCPT_NONE) { tp->tentry.runtime = tp->tentry.timer_start + - tp->t_timer[tp->tentry.index]; - if (tp->tentry.runtime == 0) + tp->t_timer[tp->tentry.index]; + if (tp->tentry.runtime == 0) { tp->tentry.runtime++; + } } if (count > 0) { @@ -1820,7 +1862,7 @@ done: } socket_unlock(so, 1); - return(offset); + return offset; } void @@ -1831,7 +1873,7 @@ tcp_run_timerlist(void * arg1, void * arg2) struct tcptimerlist *listp = &tcp_timer_list; struct tcpcb *tp; uint32_t next_timer = 0; /* offset of the next timer on the list */ - u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */ + u_int16_t te_mode = 0; /* modes of all active timers in a tcpcb */ u_int16_t list_mode = 0; /* cumulative of modes of all tcpcbs */ uint32_t active_count = 0; @@ -1925,21 +1967,23 @@ tcp_run_timerlist(void * arg1, void * arg2) if (offset > 0 && te_mode != 0) { list_mode |= te_mode; - if (next_timer == 0 || offset < next_timer) + if (next_timer == 0 || offset < next_timer) { next_timer = offset; + } } } if (!LIST_EMPTY(&listp->lhead)) { u_int16_t next_mode = 0; if ((list_mode & TCP_TIMERLIST_10MS_MODE) || - (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) + (listp->pref_mode & TCP_TIMERLIST_10MS_MODE)) { next_mode = TCP_TIMERLIST_10MS_MODE; - else if ((list_mode & TCP_TIMERLIST_100MS_MODE) || - (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) + } else if ((list_mode & TCP_TIMERLIST_100MS_MODE) || + (listp->pref_mode & TCP_TIMERLIST_100MS_MODE)) { next_mode = TCP_TIMERLIST_100MS_MODE; - else + } else { next_mode = TCP_TIMERLIST_500MS_MODE; + } if (next_mode != TCP_TIMERLIST_500MS_MODE) { listp->idleruns = 0; @@ -1962,12 +2006,14 @@ tcp_run_timerlist(void * arg1, void * arg2) } } listp->mode = next_mode; - if (listp->pref_offset != 0) + if (listp->pref_offset != 0) { next_timer = min(listp->pref_offset, next_timer); + } - if (listp->mode == TCP_TIMERLIST_500MS_MODE) + if (listp->mode == TCP_TIMERLIST_500MS_MODE) { next_timer = max(next_timer, - TCP_TIMER_500MS_QUANTUM); + TCP_TIMER_500MS_QUANTUM); + } tcp_sched_timerlist(next_timer); } else { @@ -2035,12 +2081,14 @@ tcp_sched_timers(struct tcpcb *tp) tp->t_flags |= TF_TIMER_ONLIST; listp->entries++; - if (listp->entries > listp->maxentries) + if (listp->entries > listp->maxentries) { listp->maxentries = listp->entries; + } /* if the list is not scheduled, just schedule it */ - if (!listp->scheduled) + if (!listp->scheduled) { goto schedule; + } } } @@ -2062,7 +2110,7 @@ tcp_sched_timers(struct tcpcb *tp) if (listp->running) { listp->pref_mode |= mode; if (listp->pref_offset == 0 || - offset < listp->pref_offset) { + offset < listp->pref_offset) { listp->pref_offset = offset; } } else { @@ -2074,10 +2122,11 @@ tcp_sched_timers(struct tcpcb *tp) int32_t diff; diff = timer_diff(listp->runtime, 0, tcp_now, offset); - if (diff <= 0) + if (diff <= 0) { goto done; - else + } else { goto schedule; + } } else { goto schedule; } @@ -2096,16 +2145,18 @@ schedule: listp->idleruns = 0; offset = min(offset, TCP_TIMER_10MS_QUANTUM); } else if (mode & TCP_TIMERLIST_100MS_MODE) { - if (listp->mode > TCP_TIMERLIST_100MS_MODE) + if (listp->mode > TCP_TIMERLIST_100MS_MODE) { listp->mode = TCP_TIMERLIST_100MS_MODE; + } listp->idleruns = 0; offset = min(offset, TCP_TIMER_100MS_QUANTUM); } tcp_sched_timerlist(offset); done: - if (list_locked) + if (list_locked) { lck_mtx_unlock(listp->mtx); + } return; } @@ -2131,8 +2182,9 @@ tcp_set_lotimer_index(struct tcpcb *tp) if (tp->tentry.index != TCPT_NONE) { tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[tp->tentry.index]; - if (tp->tentry.runtime == 0) + if (tp->tentry.runtime == 0) { tp->tentry.runtime++; + } } } @@ -2141,8 +2193,9 @@ tcp_check_timer_state(struct tcpcb *tp) { socket_lock_assert_owned(tp->t_inpcb->inp_socket); - if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) + if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT) { return; + } tcp_set_lotimer_index(tp); @@ -2155,10 +2208,11 @@ tcp_cumulative_stat(u_int32_t cur, u_int32_t *prev, u_int32_t *dest) { /* handle wrap around */ int32_t diff = (int32_t) (cur - *prev); - if (diff > 0) + if (diff > 0) { *dest = diff; - else + } else { *dest = 0; + } *prev = cur; return; } @@ -2168,10 +2222,11 @@ tcp_cumulative_stat64(u_int64_t cur, u_int64_t *prev, u_int64_t *dest) { /* handle wrap around */ int64_t diff = (int64_t) (cur - *prev); - if (diff > 0) + if (diff > 0) { *dest = diff; - else + } else { *dest = 0; + } *prev = cur; return; } @@ -2186,10 +2241,11 @@ tcp_report_stats(void) static struct tcp_last_report_stats prev; u_int64_t var, uptime; -#define stat data.u.tcp_stats +#define stat data.u.tcp_stats if (((uptime = net_uptime()) - tcp_last_report_time) < - tcp_report_stats_interval) + tcp_report_stats_interval) { return; + } tcp_last_report_time = uptime; @@ -2203,12 +2259,12 @@ tcp_report_stats(void) /* ipv4 avg rtt */ lck_mtx_lock(rnh_lock); rt = rt_lookup(TRUE, (struct sockaddr *)&dst, NULL, - rt_tables[AF_INET], IFSCOPE_NONE); + rt_tables[AF_INET], IFSCOPE_NONE); lck_mtx_unlock(rnh_lock); if (rt != NULL) { RT_LOCK(rt); if (rt_primary_default(rt, rt_key(rt)) && - rt->rt_stats != NULL) { + rt->rt_stats != NULL) { stat.ipv4_avgrtt = rt->rt_stats->nstat_avg_rtt; } RT_UNLOCK(rt); @@ -2222,13 +2278,13 @@ tcp_report_stats(void) dst6.sin6_family = AF_INET6; lck_mtx_lock(rnh_lock); - rt = rt_lookup(TRUE,(struct sockaddr *)&dst6, NULL, - rt_tables[AF_INET6], IFSCOPE_NONE); + rt = rt_lookup(TRUE, (struct sockaddr *)&dst6, NULL, + rt_tables[AF_INET6], IFSCOPE_NONE); lck_mtx_unlock(rnh_lock); if (rt != NULL) { RT_LOCK(rt); if (rt_primary_default(rt, rt_key(rt)) && - rt->rt_stats != NULL) { + rt->rt_stats != NULL) { stat.ipv6_avgrtt = rt->rt_stats->nstat_avg_rtt; } RT_UNLOCK(rt); @@ -2253,20 +2309,22 @@ tcp_report_stats(void) && tcpstat.tcps_tailloss_rto > 0) { var = tcpstat.tcps_tailloss_rto << 10; stat.send_tlrto_rate = - (var * 100) / tcpstat.tcps_sndrexmitpack; + (var * 100) / tcpstat.tcps_sndrexmitpack; } /* packet reordering */ if (tcpstat.tcps_sndpack > 0 && tcpstat.tcps_reordered_pkts > 0) { var = tcpstat.tcps_reordered_pkts << 10; stat.send_reorder_rate = - (var * 100) / tcpstat.tcps_sndpack; + (var * 100) / tcpstat.tcps_sndpack; } - if (tcp_ecn_outbound == 1) + if (tcp_ecn_outbound == 1) { stat.ecn_client_enabled = 1; - if (tcp_ecn_inbound == 1) + } + if (tcp_ecn_inbound == 1) { stat.ecn_server_enabled = 1; + } tcp_cumulative_stat(tcpstat.tcps_connattempt, &prev.tcps_connattempt, &stat.connection_attempts); tcp_cumulative_stat(tcpstat.tcps_accepts, @@ -2342,67 +2400,67 @@ tcp_report_stats(void) tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_attempt, - &prev.tcps_mptcp_handover_attempt , &stat.mptcp_handover_attempt); + &prev.tcps_mptcp_handover_attempt, &stat.mptcp_handover_attempt); tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_attempt, - &prev.tcps_mptcp_interactive_attempt , &stat.mptcp_interactive_attempt); + &prev.tcps_mptcp_interactive_attempt, &stat.mptcp_interactive_attempt); tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_attempt, - &prev.tcps_mptcp_aggregate_attempt , &stat.mptcp_aggregate_attempt); + &prev.tcps_mptcp_aggregate_attempt, &stat.mptcp_aggregate_attempt); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_attempt, - &prev.tcps_mptcp_fp_handover_attempt , &stat.mptcp_fp_handover_attempt); + &prev.tcps_mptcp_fp_handover_attempt, &stat.mptcp_fp_handover_attempt); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_attempt, - &prev.tcps_mptcp_fp_interactive_attempt , &stat.mptcp_fp_interactive_attempt); + &prev.tcps_mptcp_fp_interactive_attempt, &stat.mptcp_fp_interactive_attempt); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_attempt, - &prev.tcps_mptcp_fp_aggregate_attempt , &stat.mptcp_fp_aggregate_attempt); + &prev.tcps_mptcp_fp_aggregate_attempt, &stat.mptcp_fp_aggregate_attempt); tcp_cumulative_stat(tcpstat.tcps_mptcp_heuristic_fallback, - &prev.tcps_mptcp_heuristic_fallback , &stat.mptcp_heuristic_fallback); + &prev.tcps_mptcp_heuristic_fallback, &stat.mptcp_heuristic_fallback); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_heuristic_fallback, - &prev.tcps_mptcp_fp_heuristic_fallback , &stat.mptcp_fp_heuristic_fallback); + &prev.tcps_mptcp_fp_heuristic_fallback, &stat.mptcp_fp_heuristic_fallback); tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_wifi, - &prev.tcps_mptcp_handover_success_wifi , &stat.mptcp_handover_success_wifi); + &prev.tcps_mptcp_handover_success_wifi, &stat.mptcp_handover_success_wifi); tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_success_cell, - &prev.tcps_mptcp_handover_success_cell , &stat.mptcp_handover_success_cell); + &prev.tcps_mptcp_handover_success_cell, &stat.mptcp_handover_success_cell); tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_success, - &prev.tcps_mptcp_interactive_success , &stat.mptcp_interactive_success); + &prev.tcps_mptcp_interactive_success, &stat.mptcp_interactive_success); tcp_cumulative_stat(tcpstat.tcps_mptcp_aggregate_success, - &prev.tcps_mptcp_aggregate_success , &stat.mptcp_aggregate_success); + &prev.tcps_mptcp_aggregate_success, &stat.mptcp_aggregate_success); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_wifi, - &prev.tcps_mptcp_fp_handover_success_wifi , &stat.mptcp_fp_handover_success_wifi); + &prev.tcps_mptcp_fp_handover_success_wifi, &stat.mptcp_fp_handover_success_wifi); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_handover_success_cell, - &prev.tcps_mptcp_fp_handover_success_cell , &stat.mptcp_fp_handover_success_cell); + &prev.tcps_mptcp_fp_handover_success_cell, &stat.mptcp_fp_handover_success_cell); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_interactive_success, - &prev.tcps_mptcp_fp_interactive_success , &stat.mptcp_fp_interactive_success); + &prev.tcps_mptcp_fp_interactive_success, &stat.mptcp_fp_interactive_success); tcp_cumulative_stat(tcpstat.tcps_mptcp_fp_aggregate_success, - &prev.tcps_mptcp_fp_aggregate_success , &stat.mptcp_fp_aggregate_success); + &prev.tcps_mptcp_fp_aggregate_success, &stat.mptcp_fp_aggregate_success); tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_cell_from_wifi, - &prev.tcps_mptcp_handover_cell_from_wifi , &stat.mptcp_handover_cell_from_wifi); + &prev.tcps_mptcp_handover_cell_from_wifi, &stat.mptcp_handover_cell_from_wifi); tcp_cumulative_stat(tcpstat.tcps_mptcp_handover_wifi_from_cell, - &prev.tcps_mptcp_handover_wifi_from_cell , &stat.mptcp_handover_wifi_from_cell); + &prev.tcps_mptcp_handover_wifi_from_cell, &stat.mptcp_handover_wifi_from_cell); tcp_cumulative_stat(tcpstat.tcps_mptcp_interactive_cell_from_wifi, - &prev.tcps_mptcp_interactive_cell_from_wifi , &stat.mptcp_interactive_cell_from_wifi); + &prev.tcps_mptcp_interactive_cell_from_wifi, &stat.mptcp_interactive_cell_from_wifi); tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_cell_bytes, - &prev.tcps_mptcp_handover_cell_bytes , &stat.mptcp_handover_cell_bytes); + &prev.tcps_mptcp_handover_cell_bytes, &stat.mptcp_handover_cell_bytes); tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_cell_bytes, - &prev.tcps_mptcp_interactive_cell_bytes , &stat.mptcp_interactive_cell_bytes); + &prev.tcps_mptcp_interactive_cell_bytes, &stat.mptcp_interactive_cell_bytes); tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_cell_bytes, - &prev.tcps_mptcp_aggregate_cell_bytes , &stat.mptcp_aggregate_cell_bytes); + &prev.tcps_mptcp_aggregate_cell_bytes, &stat.mptcp_aggregate_cell_bytes); tcp_cumulative_stat64(tcpstat.tcps_mptcp_handover_all_bytes, - &prev.tcps_mptcp_handover_all_bytes , &stat.mptcp_handover_all_bytes); + &prev.tcps_mptcp_handover_all_bytes, &stat.mptcp_handover_all_bytes); tcp_cumulative_stat64(tcpstat.tcps_mptcp_interactive_all_bytes, - &prev.tcps_mptcp_interactive_all_bytes , &stat.mptcp_interactive_all_bytes); + &prev.tcps_mptcp_interactive_all_bytes, &stat.mptcp_interactive_all_bytes); tcp_cumulative_stat64(tcpstat.tcps_mptcp_aggregate_all_bytes, - &prev.tcps_mptcp_aggregate_all_bytes , &stat.mptcp_aggregate_all_bytes); + &prev.tcps_mptcp_aggregate_all_bytes, &stat.mptcp_aggregate_all_bytes); tcp_cumulative_stat(tcpstat.tcps_mptcp_back_to_wifi, - &prev.tcps_mptcp_back_to_wifi , &stat.mptcp_back_to_wifi); + &prev.tcps_mptcp_back_to_wifi, &stat.mptcp_back_to_wifi); tcp_cumulative_stat(tcpstat.tcps_mptcp_wifi_proxy, - &prev.tcps_mptcp_wifi_proxy , &stat.mptcp_wifi_proxy); + &prev.tcps_mptcp_wifi_proxy, &stat.mptcp_wifi_proxy); tcp_cumulative_stat(tcpstat.tcps_mptcp_cell_proxy, - &prev.tcps_mptcp_cell_proxy , &stat.mptcp_cell_proxy); + &prev.tcps_mptcp_cell_proxy, &stat.mptcp_cell_proxy); tcp_cumulative_stat(tcpstat.tcps_mptcp_triggered_cell, &prev.tcps_mptcp_triggered_cell, &stat.mptcp_triggered_cell); nstat_sysinfo_send_data(&data); -#undef stat +#undef stat } void @@ -2421,8 +2479,9 @@ tcp_interface_send_probe(u_int16_t probe_if_index) } listp->probe_if_index = probe_if_index; - if (listp->running) + if (listp->running) { goto done; + } /* * Reschedule the timerlist to run within the next 10ms, which is @@ -2480,8 +2539,9 @@ tcp_enable_read_probe(struct tcpcb *tp, struct ifnet *ifp) tp->tentry.index = TCPT_KEEP; tp->tentry.runtime = tcp_now + TCP_TIMER_10MS_QUANTUM; - if (tp->tentry.runtime == 0) + if (tp->tentry.runtime == 0) { tp->tentry.runtime++; + } } } } @@ -2498,8 +2558,9 @@ tcp_disable_read_probe(struct tcpcb *tp) tp->t_rtimo_probes > 0)) { tcp_keepalive_reset(tp); - if (tp->t_mpsub) + if (tp->t_mpsub) { mptcp_reset_keepalive(tp); + } } } @@ -2515,8 +2576,9 @@ tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable) struct inpcbinfo *pcbinfo = &tcbinfo; struct inpcb *inp, *nxt; - if (ifp == NULL) + if (ifp == NULL) { return; + } /* update clock */ calculate_tcp_clock(); @@ -2530,8 +2592,9 @@ tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable) LIST_FOREACH_SAFE(inp, pcbinfo->ipi_listhead, inp_list, nxt) { struct tcpcb *tp = NULL; if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == - WNT_STOPUSING) + WNT_STOPUSING) { continue; + } /* Acquire lock to look at the state of the connection */ socket_lock(inp->inp_socket, 1); @@ -2543,10 +2606,11 @@ tcp_probe_connectivity(struct ifnet *ifp, u_int32_t enable) continue; } tp = intotcpcb(inp); - if (enable) + if (enable) { tcp_enable_read_probe(tp, ifp); - else + } else { tcp_disable_read_probe(tp); + } socket_unlock(inp->inp_socket, 1); } @@ -2610,8 +2674,9 @@ tcp_update_mss_core(struct tcpcb *tp, struct ifnet *ifp) /* * clear the cached value if it is same as the current */ - if (tp->t_maxopd == tp->t_cached_maxopd) + if (tp->t_maxopd == tp->t_cached_maxopd) { tp->t_cached_maxopd = 0; + } } } @@ -2621,8 +2686,9 @@ tcp_update_mss_locked(struct socket *so, struct ifnet *ifp) struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); - if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) + if (ifp == NULL && (ifp = inp->inp_last_outifp) == NULL) { return; + } if (!IFNET_IS_CELLULAR(ifp)) { /* @@ -2631,15 +2697,17 @@ tcp_update_mss_locked(struct socket *so, struct ifnet *ifp) */ return; } - if ( tp->t_state <= TCPS_CLOSE_WAIT) { + if (tp->t_state <= TCPS_CLOSE_WAIT) { /* * If the connection is currently doing or has done PMTU * blackhole detection, do not change the MSS */ - if (tp->t_flags & TF_BLACKHOLE) + if (tp->t_flags & TF_BLACKHOLE) { return; - if (ifp->if_link_status == NULL) + } + if (ifp->if_link_status == NULL) { return; + } tcp_update_mss_core(tp, ifp); } } @@ -2665,8 +2733,9 @@ tcp_itimer(struct inpcbinfo *ipi) struct ifnet *ifp; if (inp->inp_ppcb == NULL || - in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } so = inp->inp_socket; ifp = inp->inp_last_outifp; socket_lock(so, 1); diff --git a/bsd/netinet/tcp_timer.h b/bsd/netinet/tcp_timer.h index 04a9eb5e4..e09f01de9 100644 --- a/bsd/netinet/tcp_timer.h +++ b/bsd/netinet/tcp_timer.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -70,7 +70,7 @@ #endif /* BSD_KERNEL_PRIVATE */ /* Keep the external definition the same for binary compatibility */ -#define TCPT_NTIMERS_EXT 4 +#define TCPT_NTIMERS_EXT 4 /* * Definitions of the TCP timers. @@ -81,7 +81,7 @@ * timeout will send the last unacknowledged segment to generate more acks * with SACK information which can be used for fast-retransmiting the lost * packets. This will fire in the order of 10ms. - * + * * The TCPT_REXMT timer is used to force retransmissions. * The TCP has the TCPT_REXMT timer set whenever segments * have been sent for which ACKs are expected but not yet @@ -108,7 +108,7 @@ * a window update from the peer. * * The TCPT_KEEP timer is used to keep connections alive. If an - * connection is idle (no segments received) for TCPTV_KEEP_INIT amount + * connection is idle (no segments received) for TCPTV_KEEP_INIT amount * of time, but not yet established, then we drop the connection. * Once the connection is established, if the connection is idle for * TCPTV_KEEP_IDLE time (and keepalives have been enabled on the socket), @@ -125,44 +125,44 @@ */ #ifdef BSD_KERNEL_PRIVATE -#define TCPT_PTO 0 /* Probe timeout */ -#define TCPT_DELAYFR 1 /* Delay recovery if there is reordering */ -#define TCPT_REXMT 2 /* retransmit */ -#define TCPT_DELACK 3 /* delayed ack */ -#define TCPT_PERSIST 4 /* retransmit persistence */ -#define TCPT_KEEP 5 /* keep alive */ -#define TCPT_2MSL 6 /* 2*msl quiet time timer */ +#define TCPT_PTO 0 /* Probe timeout */ +#define TCPT_DELAYFR 1 /* Delay recovery if there is reordering */ +#define TCPT_REXMT 2 /* retransmit */ +#define TCPT_DELACK 3 /* delayed ack */ +#define TCPT_PERSIST 4 /* retransmit persistence */ +#define TCPT_KEEP 5 /* keep alive */ +#define TCPT_2MSL 6 /* 2*msl quiet time timer */ #if MPTCP -#define TCPT_JACK_RXMT 7 /* retransmit timer for join ack */ -#define TCPT_MAX 7 +#define TCPT_JACK_RXMT 7 /* retransmit timer for join ack */ +#define TCPT_MAX 7 #else /* MPTCP */ -#define TCPT_MAX 6 +#define TCPT_MAX 6 #endif /* !MPTCP */ -#define TCPT_NONE (TCPT_MAX + 1) -#define TCPT_NTIMERS (TCPT_MAX + 1) +#define TCPT_NONE (TCPT_MAX + 1) +#define TCPT_NTIMERS (TCPT_MAX + 1) /* External definitions */ -#define TCPT_REXMT_EXT 0 -#define TCPT_PERSIST_EXT 1 -#define TCPT_KEEP_EXT 2 -#define TCPT_2MSL_EXT 3 -#define TCPT_DELACK_EXT 4 +#define TCPT_REXMT_EXT 0 +#define TCPT_PERSIST_EXT 1 +#define TCPT_KEEP_EXT 2 +#define TCPT_2MSL_EXT 3 +#define TCPT_DELACK_EXT 4 #else /* !BSD_KERNEL_PRIVATE */ -#define TCPT_REXMT 0 /* retransmit */ -#define TCPT_PERSIST 1 /* retransmit persistence */ -#define TCPT_KEEP 2 /* keep alive */ -#define TCPT_2MSL 3 /* 2*msl quiet time timer */ -#define TCPT_DELACK 4 /* delayed ack timer */ +#define TCPT_REXMT 0 /* retransmit */ +#define TCPT_PERSIST 1 /* retransmit persistence */ +#define TCPT_KEEP 2 /* keep alive */ +#define TCPT_2MSL 3 /* 2*msl quiet time timer */ +#define TCPT_DELACK 4 /* delayed ack timer */ #if MPTCP -#define TCPT_JACK_RXMT 5 /* retransmit timer for join ack */ -#define TCPT_MAX 5 +#define TCPT_JACK_RXMT 5 /* retransmit timer for join ack */ +#define TCPT_MAX 5 #else /* MPTCP */ -#define TCPT_MAX 4 +#define TCPT_MAX 4 #endif /* !MPTCP */ -#define TCPT_NONE (TCPT_MAX + 1) -#define TCPT_NTIMERS (TCPT_MAX + 1) +#define TCPT_NONE (TCPT_MAX + 1) +#define TCPT_NTIMERS (TCPT_MAX + 1) #endif /* BSD_KERNEL_PRIVATE */ @@ -170,140 +170,139 @@ /* * Time constants. */ -#define TCPTV_MSL ( 15*TCP_RETRANSHZ) /* max seg lifetime */ -#define TCPTV_SRTTBASE 0 /* base roundtrip time; if 0, no idea yet */ -#define TCPTV_RTOBASE ( 1*TCP_RETRANSHZ) /* assumed RTO if no info */ -#define TCPTV_SRTTDFLT ( 1*TCP_RETRANSHZ) /* assumed RTT if no info */ -#define TCPTV_PERSMIN ( 5*TCP_RETRANSHZ) /* retransmit persistence */ -#define TCPTV_PERSMAX ( 60*TCP_RETRANSHZ) /* maximum persist interval */ +#define TCPTV_MSL ( 15*TCP_RETRANSHZ) /* max seg lifetime */ +#define TCPTV_SRTTBASE 0 /* base roundtrip time; if 0, no idea yet */ +#define TCPTV_RTOBASE ( 1*TCP_RETRANSHZ) /* assumed RTO if no info */ +#define TCPTV_SRTTDFLT ( 1*TCP_RETRANSHZ) /* assumed RTT if no info */ +#define TCPTV_PERSMIN ( 5*TCP_RETRANSHZ) /* retransmit persistence */ +#define TCPTV_PERSMAX ( 60*TCP_RETRANSHZ) /* maximum persist interval */ extern int tcptv_persmin_val; -#define TCPTV_KEEP_INIT ( 75*TCP_RETRANSHZ) /* connect keep alive */ -#define TCPTV_KEEP_IDLE (120*60*TCP_RETRANSHZ) /* time before probing */ -#define TCPTV_KEEPINTVL ( 75*TCP_RETRANSHZ) /* default probe interval */ -#define TCPTV_KEEPCNT 8 /* max probes before drop */ +#define TCPTV_KEEP_INIT ( 75*TCP_RETRANSHZ) /* connect keep alive */ +#define TCPTV_KEEP_IDLE (120*60*TCP_RETRANSHZ) /* time before probing */ +#define TCPTV_KEEPINTVL ( 75*TCP_RETRANSHZ) /* default probe interval */ +#define TCPTV_KEEPCNT 8 /* max probes before drop */ -#define TCPTV_REXMTMAX ( 64*TCP_RETRANSHZ ) /* max REXMT value */ -#define TCPTV_REXMTMIN ( TCP_RETRANSHZ/33 ) /* min REXMT for non-local connections */ +#define TCPTV_REXMTMAX ( 64*TCP_RETRANSHZ ) /* max REXMT value */ +#define TCPTV_REXMTMIN ( TCP_RETRANSHZ/33 ) /* min REXMT for non-local connections */ /* * Window for counting received bytes to see if ack-stretching * can start (default 100 ms) - */ -#define TCPTV_UNACKWIN ( TCP_RETRANSHZ/10 ) + */ +#define TCPTV_UNACKWIN ( TCP_RETRANSHZ/10 ) -/* Receiver idle time, avoid ack-stretching after this idle time */ +/* Receiver idle time, avoid ack-stretching after this idle time */ #define TCPTV_MAXRCVIDLE (TCP_RETRANSHZ/5 ) /* * No ack stretching during slow-start, until we see some packets. - * By the time the receiver gets 512 packets, the senders cwnd - * should open by a few hundred packets consdering the + * By the time the receiver gets 512 packets, the senders cwnd + * should open by a few hundred packets consdering the * slow-start progression. */ #define TCP_RCV_SS_PKTCOUNT 512 /* Receiver idle time, for rcv socket buffer resizing */ #define TCPTV_RCVBUFIDLE (TCP_RETRANSHZ/2) -#define TCPTV_TWTRUNC 8 /* RTO factor to truncate TW */ +#define TCPTV_TWTRUNC 8 /* RTO factor to truncate TW */ -#define TCP_LINGERTIME 120 /* linger at most 2 minutes */ +#define TCP_LINGERTIME 120 /* linger at most 2 minutes */ -#define TCP_MAXRXTSHIFT 12 /* maximum retransmits */ +#define TCP_MAXRXTSHIFT 12 /* maximum retransmits */ -#ifdef TCPTIMERS +#ifdef TCPTIMERS static char *tcptimers[] = - { "REXMT", "PERSIST", "KEEP", "2MSL" , "DELACK"}; +{ "REXMT", "PERSIST", "KEEP", "2MSL", "DELACK"}; #endif /* TCPTIMERS */ /* * Persist, keep, 2msl and MPTCP's join-ack timer as slow timers which can * be coalesced at a higher granularity (500 ms). * - * Rexmt and delayed ack timers are considered as fast timers which run + * Rexmt and delayed ack timers are considered as fast timers which run * in the order of 100ms. * * Probe timeout is a quick timer which will run in the order of 10ms. */ -#define IS_TIMER_HZ_500MS(i) ((i) >= TCPT_PERSIST) -#define IS_TIMER_HZ_100MS(i) ((i) >= TCPT_REXMT && (i) < TCPT_PERSIST) -#define IS_TIMER_HZ_10MS(i) ((i) < TCPT_REXMT) +#define IS_TIMER_HZ_500MS(i) ((i) >= TCPT_PERSIST) +#define IS_TIMER_HZ_100MS(i) ((i) >= TCPT_REXMT && (i) < TCPT_PERSIST) +#define IS_TIMER_HZ_10MS(i) ((i) < TCPT_REXMT) struct tcptimerlist; struct tcptimerentry { - LIST_ENTRY(tcptimerentry) le; /* links for timer list */ - uint32_t timer_start; /* tcp clock when the timer was started */ - uint16_t index; /* index of lowest timer that needs to run first */ - uint16_t mode; /* Bit-wise OR of timers that are active */ - uint32_t runtime; /* deadline at which the first timer has to fire */ + LIST_ENTRY(tcptimerentry) le; /* links for timer list */ + uint32_t timer_start; /* tcp clock when the timer was started */ + uint16_t index; /* index of lowest timer that needs to run first */ + uint16_t mode; /* Bit-wise OR of timers that are active */ + uint32_t runtime; /* deadline at which the first timer has to fire */ }; LIST_HEAD(timerlisthead, tcptimerentry); struct tcptimerlist { - struct timerlisthead lhead; /* head of the list */ - lck_mtx_t *mtx; /* lock to protect the list */ - lck_attr_t *mtx_attr; /* mutex attributes */ - lck_grp_t *mtx_grp; /* mutex group definition */ - lck_grp_attr_t *mtx_grp_attr; /* mutex group attributes */ - thread_call_t call; /* call entry */ - uint32_t runtime; /* time at which this list is going to run */ - uint32_t schedtime; /* time at which this list was scheduled */ - uint32_t entries; /* Number of entries on the list */ - uint32_t maxentries; /* Max number of entries at any time */ + struct timerlisthead lhead; /* head of the list */ + lck_mtx_t *mtx; /* lock to protect the list */ + lck_attr_t *mtx_attr; /* mutex attributes */ + lck_grp_t *mtx_grp; /* mutex group definition */ + lck_grp_attr_t *mtx_grp_attr; /* mutex group attributes */ + thread_call_t call; /* call entry */ + uint32_t runtime; /* time at which this list is going to run */ + uint32_t schedtime; /* time at which this list was scheduled */ + uint32_t entries; /* Number of entries on the list */ + uint32_t maxentries; /* Max number of entries at any time */ /* Set desired mode when timer list running */ - boolean_t running; /* Set when timer list is being processed */ - boolean_t scheduled; /* set when the timer is scheduled */ + boolean_t running; /* Set when timer list is being processed */ + boolean_t scheduled; /* set when the timer is scheduled */ #define TCP_TIMERLIST_10MS_MODE 0x1 #define TCP_TIMERLIST_100MS_MODE 0x2 #define TCP_TIMERLIST_500MS_MODE 0x4 - uint32_t mode; /* Current mode of the timer */ - uint32_t pref_mode; /* Preferred mode set by a connection */ - uint32_t pref_offset; /* Preferred offset set by a connection */ - uint32_t idleruns; /* Number of times the list has been idle in fast mode */ - struct tcptimerentry *next_te; /* next timer entry pointer to process */ + uint32_t mode; /* Current mode of the timer */ + uint32_t pref_mode; /* Preferred mode set by a connection */ + uint32_t pref_offset; /* Preferred offset set by a connection */ + uint32_t idleruns; /* Number of times the list has been idle in fast mode */ + struct tcptimerentry *next_te; /* next timer entry pointer to process */ u_int16_t probe_if_index; /* Interface index that needs to send probes */ - }; /* number of idle runs allowed for TCP timer list in fast or quick modes */ #define TCP_FASTMODE_IDLERUN_MAX 10 /* - * Minimum retransmit timeout is set to 30ms. We add a slop of - * 200 ms to the retransmit value to account for processing - * variance and delayed ack. This extra 200ms will help to avoid - * spurious retransmits by taking into consideration the receivers - * that wait for delayed ack timer instead of generating an ack + * Minimum retransmit timeout is set to 30ms. We add a slop of + * 200 ms to the retransmit value to account for processing + * variance and delayed ack. This extra 200ms will help to avoid + * spurious retransmits by taking into consideration the receivers + * that wait for delayed ack timer instead of generating an ack * for every two packets. * * On a local link, the minimum retransmit timeout is 100ms and * variance is set to 0. This will make the sender a little bit more * aggressive on local link. When the connection is not established yet, * there is no need to add an extra 200ms to retransmit timeout because - * the initial value is high (1s) and delayed ack is not a problem in + * the initial value is high (1s) and delayed ack is not a problem in * that case. */ -#define TCPTV_REXMTSLOP ( TCP_RETRANSHZ/5 ) /* extra 200 ms slop */ +#define TCPTV_REXMTSLOP ( TCP_RETRANSHZ/5 ) /* extra 200 ms slop */ /* macro to decide when retransmit slop (described above) should be added */ -#define TCP_ADD_REXMTSLOP(tp) (tp->t_state >= TCPS_ESTABLISHED) +#define TCP_ADD_REXMTSLOP(tp) (tp->t_state >= TCPS_ESTABLISHED) -#define TCPT_RANGESET(tv, value, tvmin, tvmax, addslop) do { \ +#define TCPT_RANGESET(tv, value, tvmin, tvmax, addslop) do { \ (tv) = ((addslop) ? tcp_rexmt_slop : 0) + (value); \ if ((uint32_t)(tv) < (uint32_t)(tvmin)) \ - (tv) = (tvmin); \ + (tv) = (tvmin); \ else if ((uint32_t)(tv) > (uint32_t)(tvmax)) \ - (tv) = (tvmax); \ + (tv) = (tvmax); \ } while(0) #define TCP_CONN_KEEPIDLE(tp) \ ((tp)->t_keepidle && \ ((tp)->t_inpcb->inp_socket->so_options & SO_KEEPALIVE) ? \ - (tp)->t_keepidle : tcp_keepidle) + (tp)->t_keepidle : tcp_keepidle) #define TCP_CONN_KEEPINIT(tp) \ (((tp)->t_keepinit > 0) ? (tp)->t_keepinit : tcp_keepinit) #define TCP_CONN_KEEPCNT(tp) \ @@ -318,20 +317,19 @@ struct tcptimerlist { TAILQ_HEAD(tcptailq, tcpcb); -extern int tcp_keepinit; /* time to establish connection */ -extern int tcp_keepidle; /* time before keepalive probes begin */ -extern int tcp_keepintvl; /* time between keepalive probes */ -extern int tcp_keepcnt; /* number of keepalives */ -extern int tcp_delack; /* delayed ack timer */ +extern int tcp_keepinit; /* time to establish connection */ +extern int tcp_keepidle; /* time before keepalive probes begin */ +extern int tcp_keepintvl; /* time between keepalive probes */ +extern int tcp_keepcnt; /* number of keepalives */ +extern int tcp_delack; /* delayed ack timer */ extern int tcp_maxpersistidle; extern int tcp_msl; -extern int tcp_ttl; /* time to live for TCP segs */ +extern int tcp_ttl; /* time to live for TCP segs */ extern int tcp_backoff[]; extern int tcp_rexmt_slop; -extern u_int32_t tcp_max_persist_timeout; /* Maximum persistence for Zero Window Probes */ +extern u_int32_t tcp_max_persist_timeout; /* Maximum persistence for Zero Window Probes */ #define OFFSET_FROM_START(tp, off) ((tcp_now + (off)) - (tp)->tentry.timer_start) #endif /* BSD_KERNEL_PRIVATE */ #endif /* !_NETINET_TCP_TIMER_H_ */ - diff --git a/bsd/netinet/tcp_usrreq.c b/bsd/netinet/tcp_usrreq.c index bea1e4c0d..ef53f55b9 100644 --- a/bsd/netinet/tcp_usrreq.c +++ b/bsd/netinet/tcp_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -122,40 +122,40 @@ errno_t tcp_fill_info_for_info_tuple(struct info_tuple *, struct tcp_info *); -int tcp_sysctl_info(struct sysctl_oid *, void *, int , struct sysctl_req *); +int tcp_sysctl_info(struct sysctl_oid *, void *, int, struct sysctl_req *); static void tcp_connection_fill_info(struct tcpcb *tp, struct tcp_connection_info *tci); /* * TCP protocol interface to socket abstraction. */ -extern char *tcpstates[]; /* XXX ??? */ +extern char *tcpstates[]; /* XXX ??? */ -static int tcp_attach(struct socket *, struct proc *); -static int tcp_connect(struct tcpcb *, struct sockaddr *, struct proc *); +static int tcp_attach(struct socket *, struct proc *); +static int tcp_connect(struct tcpcb *, struct sockaddr *, struct proc *); #if INET6 -static int tcp6_connect(struct tcpcb *, struct sockaddr *, struct proc *); -static int tcp6_usr_connect(struct socket *, struct sockaddr *, - struct proc *); +static int tcp6_connect(struct tcpcb *, struct sockaddr *, struct proc *); +static int tcp6_usr_connect(struct socket *, struct sockaddr *, + struct proc *); #endif /* INET6 */ static struct tcpcb *tcp_disconnect(struct tcpcb *); static struct tcpcb *tcp_usrclosed(struct tcpcb *); extern void tcp_sbrcv_trim(struct tcpcb *tp, struct sockbuf *sb); #if TCPDEBUG -#define TCPDEBUG0 int ostate = 0 -#define TCPDEBUG1() ostate = tp ? tp->t_state : 0 -#define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \ - tcp_trace(TA_USER, ostate, tp, 0, 0, req) +#define TCPDEBUG0 int ostate = 0 +#define TCPDEBUG1() ostate = tp ? tp->t_state : 0 +#define TCPDEBUG2(req) if (tp && (so->so_options & SO_DEBUG)) \ + tcp_trace(TA_USER, ostate, tp, 0, 0, req) #else -#define TCPDEBUG0 -#define TCPDEBUG1() -#define TCPDEBUG2(req) +#define TCPDEBUG0 +#define TCPDEBUG1() +#define TCPDEBUG2(req) #endif SYSCTL_PROC(_net_inet_tcp, OID_AUTO, info, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLFLAG_KERN, - 0 , 0, tcp_sysctl_info, "S", "TCP info per tuple"); + 0, 0, tcp_sysctl_info, "S", "TCP info per tuple"); /* * TCP attaches to socket via pru_attach(), reserving space, @@ -182,11 +182,13 @@ tcp_usr_attach(struct socket *so, __unused int proto, struct proc *p) } error = tcp_attach(so, p); - if (error) + if (error) { goto out; + } - if ((so->so_options & SO_LINGER) && so->so_linger == 0) + if ((so->so_options & SO_LINGER) && so->so_linger == 0) { so->so_linger = TCP_LINGERTIME * hz; + } tp = sototcpcb(so); out: TCPDEBUG2(PRU_ATTACH); @@ -209,13 +211,14 @@ tcp_usr_detach(struct socket *so) TCPDEBUG0; if (inp == 0 || (inp->inp_state == INPCB_STATE_DEAD)) { - return EINVAL; /* XXX */ + return EINVAL; /* XXX */ } socket_lock_assert_owned(so); tp = intotcpcb(inp); /* In case we got disconnected from the peer */ - if (tp == NULL) + if (tp == NULL) { goto out; + } TCPDEBUG1(); calculate_tcp_clock(); @@ -227,28 +230,28 @@ out: } #if NECP -#define COMMON_START() TCPDEBUG0; \ -do { \ - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \ - return (EINVAL); \ - if (necp_socket_should_use_flow_divert(inp)) \ - return (EPROTOTYPE); \ - tp = intotcpcb(inp); \ - TCPDEBUG1(); \ - calculate_tcp_clock(); \ +#define COMMON_START() TCPDEBUG0; \ +do { \ + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \ + return (EINVAL); \ + if (necp_socket_should_use_flow_divert(inp)) \ + return (EPROTOTYPE); \ + tp = intotcpcb(inp); \ + TCPDEBUG1(); \ + calculate_tcp_clock(); \ } while (0) #else /* NECP */ -#define COMMON_START() TCPDEBUG0; \ -do { \ - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \ - return (EINVAL); \ - tp = intotcpcb(inp); \ - TCPDEBUG1(); \ - calculate_tcp_clock(); \ +#define COMMON_START() TCPDEBUG0; \ +do { \ + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) \ + return (EINVAL); \ + tp = intotcpcb(inp); \ + TCPDEBUG1(); \ + calculate_tcp_clock(); \ } while (0) #endif /* !NECP */ -#define COMMON_END(req) out: TCPDEBUG2(req); return error; goto out +#define COMMON_END(req) out: TCPDEBUG2(req); return error; goto out /* @@ -291,13 +294,14 @@ tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) goto out; } error = in_pcbbind(inp, nam, p); - if (error) + if (error) { goto out; + } #if NECP /* Update NECP client with bind result if not in middle of connect */ if ((inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) && - !uuid_is_null(inp->necp_client_uuid)) { + !uuid_is_null(inp->necp_client_uuid)) { socket_unlock(so, 0); necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp); socket_lock(so, 0); @@ -305,7 +309,6 @@ tcp_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* NECP */ COMMON_END(PRU_BIND); - } #if INET6 @@ -337,9 +340,9 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) inp->inp_vflag &= ~INP_IPV4; inp->inp_vflag |= INP_IPV6; if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { - if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr)) + if (IN6_IS_ADDR_UNSPECIFIED(&sin6p->sin6_addr)) { inp->inp_vflag |= INP_IPV4; - else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { + } else if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { struct sockaddr_in sin; in6_sin6_2_sin(&sin, sin6p); @@ -350,8 +353,9 @@ tcp6_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p) } } error = in6_pcbbind(inp, nam, p); - if (error) + if (error) { goto out; + } COMMON_END(PRU_BIND); } #endif /* INET6 */ @@ -377,10 +381,12 @@ tcp_usr_listen(struct socket *so, struct proc *p) struct tcpcb *tp; COMMON_START(); - if (inp->inp_lport == 0) + if (inp->inp_lport == 0) { error = in_pcbbind(inp, NULL, p); - if (error == 0) + } + if (error == 0) { tp->t_state = TCPS_LISTEN; + } COMMON_END(PRU_LISTEN); } @@ -395,12 +401,14 @@ tcp6_usr_listen(struct socket *so, struct proc *p) COMMON_START(); if (inp->inp_lport == 0) { inp->inp_vflag &= ~INP_IPV4; - if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) + if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { inp->inp_vflag |= INP_IPV4; + } error = in6_pcbbind(inp, NULL, p); } - if (error == 0) + if (error == 0) { tp->t_state = TCPS_LISTEN; + } COMMON_END(PRU_LISTEN); } #endif /* INET6 */ @@ -414,8 +422,9 @@ tcp_connect_complete(struct socket *so) /* TFO delays the tcp_output until later, when the app calls write() */ if (so->so_flags1 & SOF1_PRECONNECT_DATA) { - if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so), NULL, NULL, NULL)) - return (EHOSTUNREACH); + if (!necp_socket_is_allowed_to_send_recv(sotoinpcb(so), NULL, NULL, NULL)) { + return EHOSTUNREACH; + } /* Initialize enough state so that we can actually send data */ tcp_mss(tp, -1, IFSCOPE_NONE); @@ -434,7 +443,7 @@ tcp_connect_complete(struct socket *so) } #endif /* NECP */ - return (error); + return error; } /* @@ -460,8 +469,9 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) error = so->so_error; so->so_error = 0; return error; - } else + } else { return EINVAL; + } } #if NECP #if FLOW_DIVERT @@ -481,8 +491,9 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* FLOW_DIVERT */ #if CONTENT_FILTER error = cfil_sock_attach(so); - if (error != 0) + if (error != 0) { return error; + } #endif /* CONTENT_FILTER */ #endif /* NECP */ tp = intotcpcb(inp); @@ -504,8 +515,9 @@ tcp_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) goto out; } - if ((error = tcp_connect(tp, nam, p)) != 0) + if ((error = tcp_connect(tp, nam, p)) != 0) { goto out; + } error = tcp_connect_complete(so); @@ -524,8 +536,9 @@ tcp_usr_connectx_common(struct socket *so, int af, int error = 0; user_ssize_t datalen = 0; - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } VERIFY(dst != NULL); @@ -537,18 +550,19 @@ tcp_usr_connectx_common(struct socket *so, int af, #endif /* NECP */ if ((so->so_flags1 & SOF1_DATA_IDEMPOTENT) && - (tcp_fastopen & TCP_FASTOPEN_CLIENT)) + (tcp_fastopen & TCP_FASTOPEN_CLIENT)) { sototcpcb(so)->t_flagsext |= TF_FASTOPEN; + } /* bind socket to the specified interface, if requested */ if (ifscope != IFSCOPE_NONE && - (error = inp_bindif(inp, ifscope, NULL)) != 0) { + (error = inp_bindif(inp, ifscope, NULL)) != 0) { goto done; } /* if source address and/or port is specified, bind to it */ if (src != NULL) { - error = sobindlock(so, src, 0); /* already locked */ + error = sobindlock(so, src, 0); /* already locked */ if (error != 0) { goto done; } @@ -580,11 +594,12 @@ tcp_usr_connectx_common(struct socket *so, int af, datalen = uio_resid(auio); error = so->so_proto->pr_usrreqs->pru_sosend(so, NULL, - (uio_t)auio, NULL, NULL, 0); + (uio_t)auio, NULL, NULL, 0); socket_lock(so, 0); - if (error == 0 || error == EWOULDBLOCK) + if (error == 0 || error == EWOULDBLOCK) { *bytes_written = datalen - uio_resid(auio); + } /* * sosend returns EWOULDBLOCK if it's a non-blocking @@ -594,19 +609,21 @@ tcp_usr_connectx_common(struct socket *so, int af, * However, connectx() returns EINPROGRESS in case of a * blocking socket. So we change the return value here. */ - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { error = EINPROGRESS; + } } - if (error == 0 && pcid != NULL) + if (error == 0 && pcid != NULL) { *pcid = 1; /* there is only one connection in regular TCP */ - + } done: - if (error && error != EINPROGRESS) + if (error && error != EINPROGRESS) { so->so_flags1 &= ~SOF1_PRECONNECT_DATA; + } inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS; - return (error); + return error; } static int @@ -615,8 +632,8 @@ tcp_usr_connectx(struct socket *so, struct sockaddr *src, sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg, uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written) { - return (tcp_usr_connectx_common(so, AF_INET, src, dst, p, ifscope, aid, - pcid, flags, arg, arglen, uio, bytes_written)); + return tcp_usr_connectx_common(so, AF_INET, src, dst, p, ifscope, aid, + pcid, flags, arg, arglen, uio, bytes_written); } #if INET6 @@ -636,8 +653,9 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) error = so->so_error; so->so_error = 0; return error; - } else + } else { return EINVAL; + } } #if NECP #if FLOW_DIVERT @@ -657,8 +675,9 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* FLOW_DIVERT */ #if CONTENT_FILTER error = cfil_sock_attach(so); - if (error != 0) + if (error != 0) { return error; + } #endif /* CONTENT_FILTER */ #endif /* NECP */ @@ -685,22 +704,25 @@ tcp6_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p) if (IN6_IS_ADDR_V4MAPPED(&sin6p->sin6_addr)) { struct sockaddr_in sin; - if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) - return (EINVAL); + if ((inp->inp_flags & IN6P_IPV6_V6ONLY) != 0) { + return EINVAL; + } in6_sin6_2_sin(&sin, sin6p); inp->inp_vflag |= INP_IPV4; inp->inp_vflag &= ~INP_IPV6; - if ((error = tcp_connect(tp, (struct sockaddr *)&sin, p)) != 0) + if ((error = tcp_connect(tp, (struct sockaddr *)&sin, p)) != 0) { goto out; + } error = tcp_connect_complete(so); goto out; } inp->inp_vflag &= ~INP_IPV4; inp->inp_vflag |= INP_IPV6; - if ((error = tcp6_connect(tp, nam, p)) != 0) + if ((error = tcp6_connect(tp, nam, p)) != 0) { goto out; + } error = tcp_connect_complete(so); COMMON_END(PRU_CONNECT); @@ -712,8 +734,8 @@ tcp6_usr_connectx(struct socket *so, struct sockaddr*src, sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg, uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written) { - return (tcp_usr_connectx_common(so, AF_INET6, src, dst, p, ifscope, aid, - pcid, flags, arg, arglen, uio, bytes_written)); + return tcp_usr_connectx_common(so, AF_INET6, src, dst, p, ifscope, aid, + pcid, flags, arg, arglen, uio, bytes_written); } #endif /* INET6 */ @@ -737,9 +759,10 @@ tcp_usr_disconnect(struct socket *so) socket_lock_assert_owned(so); COMMON_START(); - /* In case we got disconnected from the peer */ - if (tp == NULL) + /* In case we got disconnected from the peer */ + if (tp == NULL) { goto out; + } tp = tcp_disconnect(tp); COMMON_END(PRU_DISCONNECT); } @@ -751,10 +774,11 @@ static int tcp_usr_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid) { #pragma unused(cid) - if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) - return (EINVAL); + if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { + return EINVAL; + } - return (tcp_usr_disconnect(so)); + return tcp_usr_disconnect(so); } /* @@ -776,15 +800,18 @@ tcp_usr_accept(struct socket *so, struct sockaddr **nam) error = ECONNABORTED; goto out; } - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } #if NECP - else if (necp_socket_should_use_flow_divert(inp)) - return (EPROTOTYPE); + else if (necp_socket_should_use_flow_divert(inp)) { + return EPROTOTYPE; + } #if CONTENT_FILTER error = cfil_sock_attach(so); - if (error != 0) - return (error); + if (error != 0) { + return error; + } #endif /* CONTENT_FILTER */ #endif /* NECP */ @@ -809,15 +836,18 @@ tcp6_usr_accept(struct socket *so, struct sockaddr **nam) error = ECONNABORTED; goto out; } - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } #if NECP - else if (necp_socket_should_use_flow_divert(inp)) - return (EPROTOTYPE); + else if (necp_socket_should_use_flow_divert(inp)) { + return EPROTOTYPE; + } #if CONTENT_FILTER error = cfil_sock_attach(so); - if (error != 0) - return (error); + if (error != 0) { + return error; + } #endif /* CONTENT_FILTER */ #endif /* NECP */ @@ -856,12 +886,13 @@ tcp_usr_shutdown(struct socket *so) struct tcpcb *tp; TCPDEBUG0; - if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } socantsendmore(so); - /* + /* * In case we got disconnected from the peer, or if this is * a socket that is to be flow-diverted (but not yet). */ @@ -870,11 +901,12 @@ tcp_usr_shutdown(struct socket *so) if (tp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) { - if (tp != NULL) + ) { + if (tp != NULL) { error = EPROTOTYPE; + } goto out; } @@ -891,11 +923,13 @@ tcp_usr_shutdown(struct socket *so) #if CONTENT_FILTER /* Don't send a FIN yet */ if (tp && !(so->so_state & SS_ISDISCONNECTED) && - cfil_sock_data_pending(&so->so_snd)) + cfil_sock_data_pending(&so->so_snd)) { goto out; + } #endif /* CONTENT_FILTER */ - if (tp) + if (tp) { error = tcp_output(tp); + } COMMON_END(PRU_SHUTDOWN); } @@ -910,9 +944,10 @@ tcp_usr_rcvd(struct socket *so, __unused int flags) struct tcpcb *tp; COMMON_START(); - /* In case we got disconnected from the peer */ - if (tp == NULL) + /* In case we got disconnected from the peer */ + if (tp == NULL) { goto out; + } tcp_sbrcv_trim(tp, &so->so_rcv); /* @@ -920,8 +955,9 @@ tcp_usr_rcvd(struct socket *so, __unused int flags) * However, we really do not want these window-updates while we * are still in SYN_SENT or SYN_RECEIVED. */ - if (TCPS_HAVEESTABLISHED(tp->t_state)) + if (TCPS_HAVEESTABLISHED(tp->t_state)) { tcp_output(tp); + } #if CONTENT_FILTER cfil_sock_buf_update(&so->so_rcv); @@ -963,7 +999,7 @@ tcp_usr_rcvd(struct socket *so, __unused int flags) */ static int tcp_usr_send(struct socket *so, int flags, struct mbuf *m, - struct sockaddr *nam, struct mbuf *control, struct proc *p) + struct sockaddr *nam, struct mbuf *control, struct proc *p) { int error = 0; struct inpcb *inp = sotoinpcb(so); @@ -976,25 +1012,27 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, if (inp == NULL || inp->inp_state == INPCB_STATE_DEAD #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) { + ) { /* * OOPS! we lost a race, the TCP session got reset after * we checked SS_CANTSENDMORE, eg: while doing uiomove or a * network interrupt in the non-splnet() section of sosend(). */ - if (m != NULL) + if (m != NULL) { m_freem(m); + } if (control != NULL) { m_freem(control); control = NULL; } - if (inp == NULL) - error = ECONNRESET; /* XXX EPIPE? */ - else + if (inp == NULL) { + error = ECONNRESET; /* XXX EPIPE? */ + } else { error = EPROTOTYPE; + } tp = NULL; TCPDEBUG1(); goto out; @@ -1013,8 +1051,9 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, error = tcp_get_msg_priority(control, &msgpri); if (error) { m_freem(control); - if (m != NULL) + if (m != NULL) { m_freem(m); + } control = NULL; m = NULL; goto out; @@ -1027,8 +1066,9 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, * control mbufs */ m_freem(control); - if (m != NULL) + if (m != NULL) { m_freem(m); + } control = NULL; m = NULL; error = EINVAL; @@ -1045,15 +1085,15 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, VERIFY(!(so->so_flags & SOF_MP_SUBFLOW) || (so->so_snd.sb_flags & SB_NOCOMPRESS)); - if(!(flags & PRUS_OOB) || (so->so_flags1 & SOF1_PRECONNECT_DATA)) { + if (!(flags & PRUS_OOB) || (so->so_flags1 & SOF1_PRECONNECT_DATA)) { /* Call msg send if message delivery is enabled */ - if (so->so_flags & SOF_ENABLE_MSGS) + if (so->so_flags & SOF_ENABLE_MSGS) { sbappendmsg_snd(&so->so_snd, m); - else + } else { sbappendstream(&so->so_snd, m); + } if (nam && tp->t_state < TCPS_SYN_SENT) { - /* * Do implied connect if not yet connected, * initialize window to default value, and @@ -1061,13 +1101,14 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, * MSS. */ #if INET6 - if (isipv6) + if (isipv6) { error = tcp6_connect(tp, nam, p); - else + } else #endif /* INET6 */ - error = tcp_connect(tp, nam, p); - if (error) + error = tcp_connect(tp, nam, p); + if (error) { goto out; + } tp->snd_wnd = TTCP_CLIENT_SND_WND; tp->max_sndwnd = tp->snd_wnd; tcp_mss(tp, -1, IFSCOPE_NONE); @@ -1082,11 +1123,13 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, tp = tcp_usrclosed(tp); } if (tp != NULL) { - if (flags & PRUS_MORETOCOME) + if (flags & PRUS_MORETOCOME) { tp->t_flags |= TF_MORETOCOME; + } error = tcp_output(tp); - if (flags & PRUS_MORETOCOME) + if (flags & PRUS_MORETOCOME) { tp->t_flags &= ~TF_MORETOCOME; + } } } else { if (sbspace(&so->so_snd) == 0) { @@ -1113,13 +1156,14 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, * MSS. */ #if INET6 - if (isipv6) + if (isipv6) { error = tcp6_connect(tp, nam, p); - else + } else #endif /* INET6 */ error = tcp_connect(tp, nam, p); - if (error) + if (error) { goto out; + } tp->snd_wnd = TTCP_CLIENT_SND_WND; tp->max_sndwnd = tp->snd_wnd; tcp_mss(tp, -1, IFSCOPE_NONE); @@ -1136,14 +1180,15 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m, * This allows us to signal a timeout to the application. */ if (so->so_state & SS_ISCONNECTING) { - if (so->so_state & SS_NBIO) + if (so->so_state & SS_NBIO) { error = EWOULDBLOCK; - else + } else { error = sbwait(&so->so_snd); + } } COMMON_END((flags & PRUS_OOB) ? PRU_SENDOOB : - ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND)); + ((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND)); } /* @@ -1157,9 +1202,10 @@ tcp_usr_abort(struct socket *so) struct tcpcb *tp; COMMON_START(); - /* In case we got disconnected from the peer */ - if (tp == NULL) + /* In case we got disconnected from the peer */ + if (tp == NULL) { goto out; + } tp = tcp_drop(tp, ECONNABORTED); VERIFY(so->so_usecount > 0); so->so_usecount--; @@ -1183,7 +1229,7 @@ tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags) COMMON_START(); if ((so->so_oobmark == 0 && - (so->so_state & SS_RCVATMARK) == 0) || + (so->so_state & SS_RCVATMARK) == 0) || so->so_options & SO_OOBINLINE || tp->t_oobflags & TCPOOB_HADDATA) { error = EINVAL; @@ -1196,8 +1242,9 @@ tcp_usr_rcvoob(struct socket *so, struct mbuf *m, int flags) m->m_len = 1; *mtod(m, caddr_t) = tp->t_iobc; so->so_state &= ~SS_RCVATMARK; - if ((flags & MSG_PEEK) == 0) + if ((flags & MSG_PEEK) == 0) { tp->t_oobflags ^= (TCPOOB_HAVEDATA | TCPOOB_HADDATA); + } COMMON_END(PRU_RCVOOB); } @@ -1226,50 +1273,50 @@ tcp_usr_preconnect(struct socket *so) /* xxx - should be const */ struct pr_usrreqs tcp_usrreqs = { - .pru_abort = tcp_usr_abort, - .pru_accept = tcp_usr_accept, - .pru_attach = tcp_usr_attach, - .pru_bind = tcp_usr_bind, - .pru_connect = tcp_usr_connect, - .pru_connectx = tcp_usr_connectx, - .pru_control = in_control, - .pru_detach = tcp_usr_detach, - .pru_disconnect = tcp_usr_disconnect, - .pru_disconnectx = tcp_usr_disconnectx, - .pru_listen = tcp_usr_listen, - .pru_peeraddr = in_getpeeraddr, - .pru_rcvd = tcp_usr_rcvd, - .pru_rcvoob = tcp_usr_rcvoob, - .pru_send = tcp_usr_send, - .pru_shutdown = tcp_usr_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, - .pru_preconnect = tcp_usr_preconnect, + .pru_abort = tcp_usr_abort, + .pru_accept = tcp_usr_accept, + .pru_attach = tcp_usr_attach, + .pru_bind = tcp_usr_bind, + .pru_connect = tcp_usr_connect, + .pru_connectx = tcp_usr_connectx, + .pru_control = in_control, + .pru_detach = tcp_usr_detach, + .pru_disconnect = tcp_usr_disconnect, + .pru_disconnectx = tcp_usr_disconnectx, + .pru_listen = tcp_usr_listen, + .pru_peeraddr = in_getpeeraddr, + .pru_rcvd = tcp_usr_rcvd, + .pru_rcvoob = tcp_usr_rcvoob, + .pru_send = tcp_usr_send, + .pru_shutdown = tcp_usr_shutdown, + .pru_sockaddr = in_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, + .pru_preconnect = tcp_usr_preconnect, }; #if INET6 struct pr_usrreqs tcp6_usrreqs = { - .pru_abort = tcp_usr_abort, - .pru_accept = tcp6_usr_accept, - .pru_attach = tcp_usr_attach, - .pru_bind = tcp6_usr_bind, - .pru_connect = tcp6_usr_connect, - .pru_connectx = tcp6_usr_connectx, - .pru_control = in6_control, - .pru_detach = tcp_usr_detach, - .pru_disconnect = tcp_usr_disconnect, - .pru_disconnectx = tcp_usr_disconnectx, - .pru_listen = tcp6_usr_listen, - .pru_peeraddr = in6_mapped_peeraddr, - .pru_rcvd = tcp_usr_rcvd, - .pru_rcvoob = tcp_usr_rcvoob, - .pru_send = tcp_usr_send, - .pru_shutdown = tcp_usr_shutdown, - .pru_sockaddr = in6_mapped_sockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, - .pru_preconnect = tcp_usr_preconnect, + .pru_abort = tcp_usr_abort, + .pru_accept = tcp6_usr_accept, + .pru_attach = tcp_usr_attach, + .pru_bind = tcp6_usr_bind, + .pru_connect = tcp6_usr_connect, + .pru_connectx = tcp6_usr_connectx, + .pru_control = in6_control, + .pru_detach = tcp_usr_detach, + .pru_disconnect = tcp_usr_disconnect, + .pru_disconnectx = tcp_usr_disconnectx, + .pru_listen = tcp6_usr_listen, + .pru_peeraddr = in6_mapped_peeraddr, + .pru_rcvd = tcp_usr_rcvd, + .pru_rcvoob = tcp_usr_rcvoob, + .pru_send = tcp_usr_send, + .pru_shutdown = tcp_usr_shutdown, + .pru_sockaddr = in6_mapped_sockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, + .pru_preconnect = tcp_usr_preconnect, }; #endif /* INET6 */ @@ -1310,8 +1357,9 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) if (inp->inp_lport == 0) { error = in_pcbbind(inp, NULL, p); - if (error) + if (error) { goto done; + } } /* @@ -1320,22 +1368,25 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) * TIME_WAIT state, creating an ADDRINUSE error. */ error = in_pcbladdr(inp, nam, &laddr, IFSCOPE_NONE, &outif, 0); - if (error) + if (error) { goto done; + } socket_unlock(inp->inp_socket, 0); oinp = in_pcblookup_hash(inp->inp_pcbinfo, sin->sin_addr, sin->sin_port, inp->inp_laddr.s_addr != INADDR_ANY ? inp->inp_laddr : laddr, - inp->inp_lport, 0, NULL); + inp->inp_lport, 0, NULL); socket_lock(inp->inp_socket, 0); if (oinp) { - if (oinp != inp) /* 4143933: avoid deadlock if inp == oinp */ + if (oinp != inp) { /* 4143933: avoid deadlock if inp == oinp */ socket_lock(oinp->inp_socket, 1); + } if (in_pcb_checkstate(oinp, WNT_RELEASE, 1) == WNT_STOPUSING) { - if (oinp != inp) + if (oinp != inp) { socket_unlock(oinp->inp_socket, 1); + } goto skip_oinp; } @@ -1347,13 +1398,15 @@ tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) } else { printf("tcp_connect: inp=0x%llx err=EADDRINUSE\n", (uint64_t)VM_KERNEL_ADDRPERM(inp)); - if (oinp != inp) + if (oinp != inp) { socket_unlock(oinp->inp_socket, 1); + } error = EADDRINUSE; goto done; } - if (oinp != inp) + if (oinp != inp) { socket_unlock(oinp->inp_socket, 1); + } } skip_oinp: if ((inp->inp_laddr.s_addr == INADDR_ANY ? laddr.s_addr : @@ -1380,8 +1433,9 @@ skip_oinp: in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->ipi_lock); - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } tcp_set_max_rwinscale(tp, so, outif); @@ -1391,14 +1445,16 @@ skip_oinp: tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, TCP_CONN_KEEPINIT(tp)); tp->iss = tcp_new_isn(tp); tcp_sendseqinit(tp); - if (nstat_collect) + if (nstat_collect) { nstat_route_connect_attempt(inp->inp_route.ro_rt); + } done: - if (outif != NULL) + if (outif != NULL) { ifnet_release(outif); + } - return (error); + return error; } #if INET6 @@ -1415,8 +1471,9 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) if (inp->inp_lport == 0) { error = in6_pcbbind(inp, NULL, p); - if (error) + if (error) { goto done; + } } /* @@ -1429,15 +1486,16 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) * whenever it's non-NULL. */ error = in6_pcbladdr(inp, nam, &addr6, &outif); - if (error) + if (error) { goto done; + } socket_unlock(inp->inp_socket, 0); oinp = in6_pcblookup_hash(inp->inp_pcbinfo, - &sin6->sin6_addr, sin6->sin6_port, - IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) - ? &addr6 - : &inp->in6p_laddr, - inp->inp_lport, 0, NULL); + &sin6->sin6_addr, sin6->sin6_port, + IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr) + ? &addr6 + : &inp->in6p_laddr, + inp->inp_lport, 0, NULL); socket_lock(inp->inp_socket, 0); if (oinp) { if (oinp != inp && (otp = intotcpcb(oinp)) != NULL && @@ -1458,18 +1516,20 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) } if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { inp->in6p_laddr = addr6; - inp->in6p_last_outifp = outif; /* no reference needed */ + inp->in6p_last_outifp = outif; /* no reference needed */ inp->in6p_flags |= INP_IN6ADDR_ANY; } inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; - if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0) + if ((sin6->sin6_flowinfo & IPV6_FLOWINFO_MASK) != 0) { inp->inp_flow = sin6->sin6_flowinfo; + } in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->ipi_lock); - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } /* update flowinfo - RFC 6437 */ if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) { inp->inp_flow &= ~IPV6_FLOWLABEL_MASK; @@ -1483,17 +1543,19 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p) tcpstat.tcps_connattempt++; tp->t_state = TCPS_SYN_SENT; tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPINIT(tp)); + TCP_CONN_KEEPINIT(tp)); tp->iss = tcp_new_isn(tp); tcp_sendseqinit(tp); - if (nstat_collect) + if (nstat_collect) { nstat_route_connect_attempt(inp->inp_route.ro_rt); + } done: - if (outif != NULL) + if (outif != NULL) { ifnet_release(outif); + } - return (error); + return error; } #endif /* INET6 */ @@ -1511,24 +1573,29 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti) ti->tcpi_flowhash = inp->inp_flowhash; if (tp->t_state > TCPS_LISTEN) { - if (TSTMP_SUPPORTED(tp)) + if (TSTMP_SUPPORTED(tp)) { ti->tcpi_options |= TCPI_OPT_TIMESTAMPS; - if (SACK_ENABLED(tp)) + } + if (SACK_ENABLED(tp)) { ti->tcpi_options |= TCPI_OPT_SACK; + } if (TCP_WINDOW_SCALE_ENABLED(tp)) { ti->tcpi_options |= TCPI_OPT_WSCALE; ti->tcpi_snd_wscale = tp->snd_scale; ti->tcpi_rcv_wscale = tp->rcv_scale; } - if (TCP_ECN_ENABLED(tp)) + if (TCP_ECN_ENABLED(tp)) { ti->tcpi_options |= TCPI_OPT_ECN; + } /* Are we in retranmission episode */ - if (IN_FASTRECOVERY(tp) || tp->t_rxtshift > 0) + if (IN_FASTRECOVERY(tp) || tp->t_rxtshift > 0) { ti->tcpi_flags |= TCPI_FLAG_LOSSRECOVERY; + } - if (tp->t_flags & TF_STREAMING_ON) + if (tp->t_flags & TF_STREAMING_ON) { ti->tcpi_flags |= TCPI_FLAG_STREAMING_ON; + } ti->tcpi_rto = tp->t_timer[TCPT_REXMT] ? tp->t_rxtcur : 0; ti->tcpi_snd_mss = tp->t_maxseg; @@ -1551,8 +1618,8 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti) /* convert bytes/msec to bits/sec */ if ((tp->t_flagsext & TF_MEASURESNDBW) != 0 && - tp->t_bwmeas != NULL) { - ti->tcpi_snd_bw = (tp->t_bwmeas->bw_sndbw * 8000); + tp->t_bwmeas != NULL) { + ti->tcpi_snd_bw = (tp->t_bwmeas->bw_sndbw * 8000); } ti->tcpi_last_outif = (tp->t_inpcb->inp_last_outifp == NULL) ? 0 : @@ -1616,22 +1683,29 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti) ti->tcpi_local_peer = !!(tp->t_flags & TF_LOCAL); if (tp->t_inpcb->inp_last_outifp != NULL) { - if (IFNET_IS_CELLULAR(tp->t_inpcb->inp_last_outifp)) + if (IFNET_IS_CELLULAR(tp->t_inpcb->inp_last_outifp)) { ti->tcpi_if_cell = 1; - if (IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) + } + if (IFNET_IS_WIFI(tp->t_inpcb->inp_last_outifp)) { ti->tcpi_if_wifi = 1; - if (IFNET_IS_WIRED(tp->t_inpcb->inp_last_outifp)) + } + if (IFNET_IS_WIRED(tp->t_inpcb->inp_last_outifp)) { ti->tcpi_if_wired = 1; - if (IFNET_IS_WIFI_INFRA(tp->t_inpcb->inp_last_outifp)) + } + if (IFNET_IS_WIFI_INFRA(tp->t_inpcb->inp_last_outifp)) { ti->tcpi_if_wifi_infra = 1; - if (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) + } + if (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) { ti->tcpi_if_wifi_awdl = 1; + } } - if (tp->tcp_cc_index == TCP_CC_ALGO_BACKGROUND_INDEX) + if (tp->tcp_cc_index == TCP_CC_ALGO_BACKGROUND_INDEX) { ti->tcpi_snd_background = 1; + } if (tcp_recv_bg == 1 || - IS_TCP_RECV_BG(tp->t_inpcb->inp_socket)) + IS_TCP_RECV_BG(tp->t_inpcb->inp_socket)) { ti->tcpi_rcv_background = 1; + } ti->tcpi_ecn_recv_ce = tp->t_ecn_recv_ce; ti->tcpi_ecn_recv_cwr = tp->t_ecn_recv_cwr; @@ -1653,45 +1727,49 @@ tcp_fill_info_for_info_tuple(struct info_tuple *itpl, struct tcp_info *ti) struct socket *so; struct tcpcb *tp; - if (itpl->itpl_proto == IPPROTO_TCP) + if (itpl->itpl_proto == IPPROTO_TCP) { pcbinfo = &tcbinfo; - else + } else { return EINVAL; + } if (itpl->itpl_local_sa.sa_family == AF_INET && - itpl->itpl_remote_sa.sa_family == AF_INET) { + itpl->itpl_remote_sa.sa_family == AF_INET) { inp = in_pcblookup_hash(pcbinfo, - itpl->itpl_remote_sin.sin_addr, - itpl->itpl_remote_sin.sin_port, - itpl->itpl_local_sin.sin_addr, - itpl->itpl_local_sin.sin_port, - 0, NULL); + itpl->itpl_remote_sin.sin_addr, + itpl->itpl_remote_sin.sin_port, + itpl->itpl_local_sin.sin_addr, + itpl->itpl_local_sin.sin_port, + 0, NULL); } else if (itpl->itpl_local_sa.sa_family == AF_INET6 && - itpl->itpl_remote_sa.sa_family == AF_INET6) { + itpl->itpl_remote_sa.sa_family == AF_INET6) { struct in6_addr ina6_local; struct in6_addr ina6_remote; ina6_local = itpl->itpl_local_sin6.sin6_addr; if (IN6_IS_SCOPE_LINKLOCAL(&ina6_local) && - itpl->itpl_local_sin6.sin6_scope_id) + itpl->itpl_local_sin6.sin6_scope_id) { ina6_local.s6_addr16[1] = htons(itpl->itpl_local_sin6.sin6_scope_id); + } ina6_remote = itpl->itpl_remote_sin6.sin6_addr; if (IN6_IS_SCOPE_LINKLOCAL(&ina6_remote) && - itpl->itpl_remote_sin6.sin6_scope_id) + itpl->itpl_remote_sin6.sin6_scope_id) { ina6_remote.s6_addr16[1] = htons(itpl->itpl_remote_sin6.sin6_scope_id); + } inp = in6_pcblookup_hash(pcbinfo, - &ina6_remote, - itpl->itpl_remote_sin6.sin6_port, - &ina6_local, - itpl->itpl_local_sin6.sin6_port, - 0, NULL); + &ina6_remote, + itpl->itpl_remote_sin6.sin6_port, + &ina6_local, + itpl->itpl_local_sin6.sin6_port, + 0, NULL); } else { return EINVAL; } - if (inp == NULL || (so = inp->inp_socket) == NULL) + if (inp == NULL || (so = inp->inp_socket) == NULL) { return ENOENT; + } socket_lock(so, 0); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { @@ -1714,23 +1792,28 @@ tcp_connection_fill_info(struct tcpcb *tp, struct tcp_connection_info *tci) bzero(tci, sizeof(*tci)); tci->tcpi_state = tp->t_state; if (tp->t_state > TCPS_LISTEN) { - if (TSTMP_SUPPORTED(tp)) + if (TSTMP_SUPPORTED(tp)) { tci->tcpi_options |= TCPCI_OPT_TIMESTAMPS; - if (SACK_ENABLED(tp)) + } + if (SACK_ENABLED(tp)) { tci->tcpi_options |= TCPCI_OPT_SACK; + } if (TCP_WINDOW_SCALE_ENABLED(tp)) { tci->tcpi_options |= TCPCI_OPT_WSCALE; tci->tcpi_snd_wscale = tp->snd_scale; tci->tcpi_rcv_wscale = tp->rcv_scale; } - if (TCP_ECN_ENABLED(tp)) + if (TCP_ECN_ENABLED(tp)) { tci->tcpi_options |= TCPCI_OPT_ECN; - if (IN_FASTRECOVERY(tp) || tp->t_rxtshift > 0) + } + if (IN_FASTRECOVERY(tp) || tp->t_rxtshift > 0) { tci->tcpi_flags |= TCPCI_FLAG_LOSSRECOVERY; - if (tp->t_flagsext & TF_PKTS_REORDERED) + } + if (tp->t_flagsext & TF_PKTS_REORDERED) { tci->tcpi_flags |= TCPCI_FLAG_REORDERING_DETECTED; + } tci->tcpi_rto = (tp->t_timer[TCPT_REXMT] > 0) ? - tp->t_rxtcur : 0; + tp->t_rxtcur : 0; tci->tcpi_maxseg = tp->t_maxseg; tci->tcpi_snd_ssthresh = tp->snd_ssthresh; tci->tcpi_snd_cwnd = tp->snd_cwnd; @@ -1807,8 +1890,9 @@ tcp_sysctl_info(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused } } - if (caller != PROC_NULL) + if (caller != PROC_NULL) { proc_rele(caller); + } #endif /* !CONFIG_EMBEDDED */ if (req->newptr == USER_ADDR_NULL) { @@ -1838,12 +1922,14 @@ tcp_lookup_peer_pid_locked(struct socket *so, pid_t *out_pid) { int error = EHOSTUNREACH; *out_pid = -1; - if ((so->so_state & SS_ISCONNECTED) == 0) return ENOTCONN; + if ((so->so_state & SS_ISCONNECTED) == 0) { + return ENOTCONN; + } - struct inpcb *inp = (struct inpcb*)so->so_pcb; - uint16_t lport = inp->inp_lport; - uint16_t fport = inp->inp_fport; - struct inpcb *finp = NULL; + struct inpcb *inp = (struct inpcb*)so->so_pcb; + uint16_t lport = inp->inp_lport; + uint16_t fport = inp->inp_fport; + struct inpcb *finp = NULL; struct in6_addr laddr6, faddr6; struct in_addr laddr4, faddr4; @@ -1889,29 +1975,29 @@ tcp_getconninfo(struct socket *so, struct conninfo_tcp *tcp_ci) int tcp_ctloutput(struct socket *so, struct sockopt *sopt) { - int error = 0, opt = 0, optval = 0; - struct inpcb *inp; - struct tcpcb *tp; + int error = 0, opt = 0, optval = 0; + struct inpcb *inp; + struct tcpcb *tp; inp = sotoinpcb(so); if (inp == NULL) { - return (ECONNRESET); + return ECONNRESET; } /* Allow at this level */ if (sopt->sopt_level != IPPROTO_TCP && !(sopt->sopt_level == SOL_SOCKET && (sopt->sopt_name == SO_FLUSH || sopt->sopt_name == SO_TRAFFIC_MGT_BACKGROUND))) { #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { error = ip6_ctloutput(so, sopt); - else + } else #endif /* INET6 */ error = ip_ctloutput(so, sopt); - return (error); + return error; } tp = intotcpcb(inp); if (tp == NULL) { - return (ECONNRESET); + return ECONNRESET; } calculate_tcp_clock(); @@ -1923,9 +2009,10 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_NOOPT: case TCP_NOPUSH: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; + } switch (sopt->sopt_name) { case TCP_NODELAY: @@ -1942,17 +2029,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) break; } - if (optval) + if (optval) { tp->t_flags |= opt; - else + } else { tp->t_flags &= ~opt; + } break; case TCP_RXT_FINDROP: case TCP_NOTIMEWAIT: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; + } switch (sopt->sopt_name) { case TCP_RXT_FINDROP: opt = TF_RXTFINDROP; @@ -1964,16 +2053,18 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) opt = 0; break; } - if (optval) + if (optval) { tp->t_flagsext |= opt; - else + } else { tp->t_flagsext &= ~opt; + } break; case TCP_MEASURE_SND_BW: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; + } opt = TF_MEASURESNDBW; if (optval) { if (tp->t_bwmeas == NULL) { @@ -1999,18 +2090,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) bzero(&in, sizeof(in)); error = sooptcopyin(sopt, &in, sizeof(in), - sizeof(in)); - if (error) + sizeof(in)); + if (error) { break; + } if ((tp->t_flagsext & TF_MEASURESNDBW) == 0 || - tp->t_bwmeas == NULL) { + tp->t_bwmeas == NULL) { error = EINVAL; break; } minpkts = (in.min_burst_size != 0) ? in.min_burst_size : - tp->t_bwmeas->bw_minsizepkts; + tp->t_bwmeas->bw_minsizepkts; maxpkts = (in.max_burst_size != 0) ? in.max_burst_size : - tp->t_bwmeas->bw_maxsizepkts; + tp->t_bwmeas->bw_maxsizepkts; if (minpkts > maxpkts) { error = EINVAL; break; @@ -2023,46 +2115,50 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) } case TCP_MAXSEG: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; + } if (optval > 0 && optval <= tp->t_maxseg && - optval + 40 >= tcp_minmss) + optval + 40 >= tcp_minmss) { tp->t_maxseg = optval; - else + } else { error = EINVAL; + } break; case TCP_KEEPALIVE: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; - if (optval < 0 || optval > UINT32_MAX/TCP_RETRANSHZ) { + } + if (optval < 0 || optval > UINT32_MAX / TCP_RETRANSHZ) { error = EINVAL; } else { tp->t_keepidle = optval * TCP_RETRANSHZ; /* reset the timer to new value */ tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPIDLE(tp)); + TCP_CONN_KEEPIDLE(tp)); tcp_check_timer_state(tp); } - break; + break; case TCP_CONNECTIONTIMEOUT: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; - if (optval < 0 || optval > UINT32_MAX/TCP_RETRANSHZ) { + } + if (optval < 0 || optval > UINT32_MAX / TCP_RETRANSHZ) { error = EINVAL; } else { tp->t_keepinit = optval * TCP_RETRANSHZ; if (tp->t_state == TCPS_SYN_RECEIVED || - tp->t_state == TCPS_SYN_SENT) { + tp->t_state == TCPS_SYN_SENT) { tp->t_timer[TCPT_KEEP] = OFFSET_FROM_START(tp, - TCP_CONN_KEEPINIT(tp)); + TCP_CONN_KEEPINIT(tp)); tcp_check_timer_state(tp); } } @@ -2070,17 +2166,18 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_KEEPINTVL: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; - if (optval < 0 || optval > UINT32_MAX/TCP_RETRANSHZ) { + } + if (optval < 0 || optval > UINT32_MAX / TCP_RETRANSHZ) { error = EINVAL; } else { tp->t_keepintvl = optval * TCP_RETRANSHZ; if (tp->t_state == TCPS_FIN_WAIT_2 && - TCP_CONN_MAXIDLE(tp) > 0) { + TCP_CONN_MAXIDLE(tp) > 0) { tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, - TCP_CONN_MAXIDLE(tp)); + TCP_CONN_MAXIDLE(tp)); tcp_check_timer_state(tp); } } @@ -2088,17 +2185,18 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_KEEPCNT: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > INT32_MAX) { error = EINVAL; } else { tp->t_keepcnt = optval; if (tp->t_state == TCPS_FIN_WAIT_2 && - TCP_CONN_MAXIDLE(tp) > 0) { + TCP_CONN_MAXIDLE(tp) > 0) { tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, - TCP_CONN_MAXIDLE(tp)); + TCP_CONN_MAXIDLE(tp)); tcp_check_timer_state(tp); } } @@ -2106,44 +2204,51 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_KEEPALIVE_OFFLOAD: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > INT32_MAX) { error = EINVAL; break; } - if (optval != 0) + if (optval != 0) { inp->inp_flags2 |= INP2_KEEPALIVE_OFFLOAD; - else + } else { inp->inp_flags2 &= ~INP2_KEEPALIVE_OFFLOAD; + } break; case PERSIST_TIMEOUT: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; - if (optval < 0) + } + if (optval < 0) { error = EINVAL; - else + } else { tp->t_persist_timeout = optval * TCP_RETRANSHZ; + } break; case TCP_RXT_CONNDROPTIME: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; - if (optval < 0) + } + if (optval < 0) { error = EINVAL; - else + } else { tp->t_rxt_conndroptime = optval * TCP_RETRANSHZ; + } break; case TCP_NOTSENT_LOWAT: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0) { error = EINVAL; break; @@ -2158,10 +2263,11 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) } break; case TCP_ADAPTIVE_READ_TIMEOUT: - error = sooptcopyin(sopt, &optval, sizeof (optval), + error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error) + if (error) { break; + } if (optval < 0 || optval > TCP_ADAPTIVE_TIMEOUT_MAX) { error = EINVAL; @@ -2170,17 +2276,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) tp->t_adaptive_rtimo = 0; tcp_keepalive_reset(tp); - if (tp->t_mpsub) + if (tp->t_mpsub) { mptcp_reset_keepalive(tp); + } } else { tp->t_adaptive_rtimo = optval; } break; case TCP_ADAPTIVE_WRITE_TIMEOUT: - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > TCP_ADAPTIVE_TIMEOUT_MAX) { error = EINVAL; @@ -2191,9 +2299,10 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) break; case TCP_ENABLE_MSGS: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > 1) { error = EINVAL; } else if (optval == 1) { @@ -2212,9 +2321,9 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) */ VERIFY(so->so_msg_state == NULL); MALLOC(so->so_msg_state, - struct msg_state *, - sizeof(struct msg_state), - M_TEMP, M_WAITOK | M_ZERO); + struct msg_state *, + sizeof(struct msg_state), + M_TEMP, M_WAITOK | M_ZERO); if (so->so_msg_state == NULL) { error = ENOMEM; break; @@ -2233,9 +2342,10 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) break; case TCP_SENDMOREACKS: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > 1) { error = EINVAL; } else if (optval == 0) { @@ -2246,9 +2356,10 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) break; case TCP_DISABLE_BLACKHOLE_DETECTION: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > 1) { error = EINVAL; } else if (optval == 0) { @@ -2256,8 +2367,9 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) } else { tp->t_flagsext |= TF_NOBLACKHOLE_DETECTION; if ((tp->t_flags & TF_BLACKHOLE) && - tp->t_pmtud_saved_maxopd > 0) + tp->t_pmtud_saved_maxopd > 0) { tcp_pmtud_revert_segment_size(tp); + } } break; case TCP_FASTOPEN: @@ -2267,9 +2379,10 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) } error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); - if (error) + sizeof(optval)); + if (error) { break; + } if (optval < 0 || optval > 1) { error = EINVAL; break; @@ -2278,17 +2391,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) error = EINVAL; break; } - if (optval) + if (optval) { tp->t_flagsext |= TF_FASTOPEN; - else + } else { tcp_disable_tfo(tp); + } break; case TCP_FASTOPEN_FORCE_HEURISTICS: error = sooptcopyin(sopt, &optval, sizeof(optval), - sizeof(optval)); + sizeof(optval)); - if (error) + if (error) { break; + } if (optval < 0 || optval > 1) { error = EINVAL; break; @@ -2298,17 +2413,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) error = EINVAL; break; } - if (optval) + if (optval) { tp->t_flagsext |= TF_FASTOPEN_HEUR; - else + } else { tp->t_flagsext &= ~TF_FASTOPEN_HEUR; + } break; case TCP_ENABLE_ECN: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; + } if (optval) { tp->ecn_flags |= TE_ECN_MODE_ENABLE; tp->ecn_flags &= ~TE_ECN_MODE_DISABLE; @@ -2319,9 +2436,10 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) break; case TCP_ECN_MODE: error = sooptcopyin(sopt, &optval, sizeof optval, - sizeof optval); - if (error) + sizeof optval); + if (error) { break; + } if (optval == ECN_MODE_DEFAULT) { tp->ecn_flags &= ~TE_ECN_MODE_ENABLE; tp->ecn_flags &= ~TE_ECN_MODE_DISABLE; @@ -2338,8 +2456,9 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_NOTIFY_ACKNOWLEDGEMENT: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error) + if (error) { break; + } if (optval <= 0) { error = EINVAL; break; @@ -2360,17 +2479,19 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) error = tcp_add_notify_ack_marker(tp, optval); break; case SO_FLUSH: - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } error = inp_flush(inp, optval); break; case SO_TRAFFIC_MGT_BACKGROUND: - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } if (optval) { socket_set_traffic_mgt_flags_locked(so, @@ -2383,8 +2504,9 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_RXT_MINIMUM_TIMEOUT: error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); - if (error) + if (error) { break; + } if (optval < 0) { error = EINVAL; break; @@ -2413,22 +2535,25 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) optval = tp->t_maxseg; break; case TCP_KEEPALIVE: - if (tp->t_keepidle > 0) + if (tp->t_keepidle > 0) { optval = tp->t_keepidle / TCP_RETRANSHZ; - else + } else { optval = tcp_keepidle / TCP_RETRANSHZ; + } break; case TCP_KEEPINTVL: - if (tp->t_keepintvl > 0) + if (tp->t_keepintvl > 0) { optval = tp->t_keepintvl / TCP_RETRANSHZ; - else + } else { optval = tcp_keepintvl / TCP_RETRANSHZ; + } break; case TCP_KEEPCNT: - if (tp->t_keepcnt > 0) + if (tp->t_keepcnt > 0) { optval = tp->t_keepcnt; - else + } else { optval = tcp_keepcnt; + } break; case TCP_KEEPALIVE_OFFLOAD: optval = !!(inp->inp_flags2 & INP2_KEEPALIVE_OFFLOAD); @@ -2443,12 +2568,13 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) optval = (tp->ecn_flags & TE_ECN_MODE_ENABLE) ? 1 : 0; break; case TCP_ECN_MODE: - if (tp->ecn_flags & TE_ECN_MODE_ENABLE) + if (tp->ecn_flags & TE_ECN_MODE_ENABLE) { optval = ECN_MODE_ENABLE; - else if (tp->ecn_flags & TE_ECN_MODE_DISABLE) + } else if (tp->ecn_flags & TE_ECN_MODE_DISABLE) { optval = ECN_MODE_DISABLE; - else + } else { optval = ECN_MODE_DEFAULT; + } break; case TCP_CONNECTIONTIMEOUT: optval = tp->t_keepinit / TCP_RETRANSHZ; @@ -2497,7 +2623,7 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_MEASURE_BW_BURST: { struct tcp_measure_bw_burst out = {}; if ((tp->t_flagsext & TF_MEASURESNDBW) == 0 || - tp->t_bwmeas == NULL) { + tp->t_bwmeas == NULL) { error = EINVAL; break; } @@ -2522,22 +2648,25 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) } break; case TCP_SENDMOREACKS: - if (tp->t_flagsext & TF_NOSTRETCHACK) + if (tp->t_flagsext & TF_NOSTRETCHACK) { optval = 1; - else + } else { optval = 0; + } break; case TCP_DISABLE_BLACKHOLE_DETECTION: - if (tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) + if (tp->t_flagsext & TF_NOBLACKHOLE_DETECTION) { optval = 1; - else + } else { optval = 0; + } break; case TCP_PEER_PID: { - pid_t pid; + pid_t pid; error = tcp_lookup_peer_pid_locked(so, &pid); - if (error == 0) + if (error == 0) { error = sooptcopyout(sopt, &pid, sizeof(pid)); + } goto done; } case TCP_ADAPTIVE_READ_TIMEOUT: @@ -2553,16 +2682,17 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) case TCP_NOTIFY_ACKNOWLEDGEMENT: { struct tcp_notify_ack_complete retid; - if (sopt->sopt_valsize != sizeof (retid)) { + if (sopt->sopt_valsize != sizeof(retid)) { error = EINVAL; break; } - bzero(&retid, sizeof (retid)); + bzero(&retid, sizeof(retid)); tcp_get_notify_ack_count(tp, &retid); - if (retid.notify_complete_count > 0) + if (retid.notify_complete_count > 0) { tcp_get_notify_ack_ids(tp, &retid); + } - error = sooptcopyout(sopt, &retid, sizeof (retid)); + error = sooptcopyout(sopt, &retid, sizeof(retid)); goto done; } case TCP_RXT_MINIMUM_TIMEOUT: @@ -2572,12 +2702,13 @@ tcp_ctloutput(struct socket *so, struct sockopt *sopt) error = ENOPROTOOPT; break; } - if (error == 0) + if (error == 0) { error = sooptcopyout(sopt, &optval, sizeof optval); + } break; } done: - return (error); + return error; } /* @@ -2585,8 +2716,8 @@ done: * sizes, respectively. These are obsolescent (this information should * be set by the route). */ -u_int32_t tcp_sendspace = 1448*256; -u_int32_t tcp_recvspace = 1448*384; +u_int32_t tcp_sendspace = 1448 * 256; +u_int32_t tcp_recvspace = 1448 * 384; /* During attach, the size of socket buffer allocated is limited to * sb_max in sbreserve. Disallow setting the tcp send and recv space @@ -2595,25 +2726,25 @@ u_int32_t tcp_recvspace = 1448*384; */ static int sysctl_tcp_sospace(struct sysctl_oid *oidp, __unused void *arg1, - int arg2, struct sysctl_req *req) + int arg2, struct sysctl_req *req) { #pragma unused(arg2) u_int32_t new_value = 0, *space_p = NULL; int changed = 0, error = 0; - u_quad_t sb_effective_max = (sb_max / (MSIZE+MCLBYTES)) * MCLBYTES; + u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES; switch (oidp->oid_number) { - case TCPCTL_SENDSPACE: - space_p = &tcp_sendspace; - break; - case TCPCTL_RECVSPACE: - space_p = &tcp_recvspace; - break; - default: - return EINVAL; + case TCPCTL_SENDSPACE: + space_p = &tcp_sendspace; + break; + case TCPCTL_RECVSPACE: + space_p = &tcp_recvspace; + break; + default: + return EINVAL; } error = sysctl_io_number(req, *space_p, sizeof(u_int32_t), - &new_value, &changed); + &new_value, &changed); if (changed) { if (new_value > 0 && new_value <= sb_effective_max) { *space_p = new_value; @@ -2627,18 +2758,18 @@ sysctl_tcp_sospace(struct sysctl_oid *oidp, __unused void *arg1, #if SYSCTL_SKMEM SYSCTL_PROC(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_sendspace, - offsetof(skmem_sysctl, tcp.sendspace), sysctl_tcp_sospace, - "IU", "Maximum outgoing TCP datagram size"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_sendspace, + offsetof(skmem_sysctl, tcp.sendspace), sysctl_tcp_sospace, + "IU", "Maximum outgoing TCP datagram size"); SYSCTL_PROC(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_recvspace, - offsetof(skmem_sysctl, tcp.recvspace), sysctl_tcp_sospace, - "IU", "Maximum incoming TCP datagram size"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &tcp_recvspace, + offsetof(skmem_sysctl, tcp.recvspace), sysctl_tcp_sospace, + "IU", "Maximum incoming TCP datagram size"); #else /* SYSCTL_SKMEM */ SYSCTL_PROC(_net_inet_tcp, TCPCTL_SENDSPACE, sendspace, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_sendspace , 0, &sysctl_tcp_sospace, "IU", "Maximum outgoing TCP datagram size"); + &tcp_sendspace, 0, &sysctl_tcp_sospace, "IU", "Maximum outgoing TCP datagram size"); SYSCTL_PROC(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &tcp_recvspace , 0, &sysctl_tcp_sospace, "IU", "Maximum incoming TCP datagram size"); + &tcp_recvspace, 0, &sysctl_tcp_sospace, "IU", "Maximum incoming TCP datagram size"); #endif /* SYSCTL_SKMEM */ /* @@ -2663,52 +2794,56 @@ tcp_attach(struct socket *so, struct proc *p) #endif error = in_pcballoc(so, &tcbinfo, p); - if (error) - return (error); + if (error) { + return error; + } inp = sotoinpcb(so); if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = soreserve(so, tcp_sendspace, tcp_recvspace); - if (error) - return (error); + if (error) { + return error; + } } if (so->so_snd.sb_preconn_hiwat == 0) { soreserve_preconnect(so, 2048); } - if ((so->so_rcv.sb_flags & SB_USRSIZE) == 0) + if ((so->so_rcv.sb_flags & SB_USRSIZE) == 0) { so->so_rcv.sb_flags |= SB_AUTOSIZE; - if ((so->so_snd.sb_flags & SB_USRSIZE) == 0) + } + if ((so->so_snd.sb_flags & SB_USRSIZE) == 0) { so->so_snd.sb_flags |= SB_AUTOSIZE; + } #if INET6 if (isipv6) { inp->inp_vflag |= INP_IPV6; - inp->in6p_hops = -1; /* use kernel default */ - } - else + inp->in6p_hops = -1; /* use kernel default */ + } else #endif /* INET6 */ inp->inp_vflag |= INP_IPV4; tp = tcp_newtcpcb(inp); if (tp == NULL) { - int nofd = so->so_state & SS_NOFDREF; /* XXX */ + int nofd = so->so_state & SS_NOFDREF; /* XXX */ - so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */ + so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */ #if INET6 - if (isipv6) + if (isipv6) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ in_pcbdetach(inp); so->so_state |= nofd; - return (ENOBUFS); + return ENOBUFS; } - if (nstat_collect) + if (nstat_collect) { nstat_tcp_new_pcb(inp); + } tp->t_state = TCPS_CLOSED; - return (0); + return 0; } /* @@ -2724,27 +2859,30 @@ tcp_disconnect(struct tcpcb *tp) { struct socket *so = tp->t_inpcb->inp_socket; - if (so->so_rcv.sb_cc != 0 || tp->t_reassqlen != 0) + if (so->so_rcv.sb_cc != 0 || tp->t_reassqlen != 0) { return tcp_drop(tp, 0); + } - if (tp->t_state < TCPS_ESTABLISHED) + if (tp->t_state < TCPS_ESTABLISHED) { tp = tcp_close(tp); - else if ((so->so_options & SO_LINGER) && so->so_linger == 0) + } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { tp = tcp_drop(tp, 0); - else { + } else { soisdisconnecting(so); sbflush(&so->so_rcv); tp = tcp_usrclosed(tp); #if MPTCP /* A reset has been sent but socket exists, do not send FIN */ if ((so->so_flags & SOF_MP_SUBFLOW) && - (tp) && (tp->t_mpflags & TMPF_RESET)) - return (tp); + (tp) && (tp->t_mpflags & TMPF_RESET)) { + return tp; + } #endif - if (tp) + if (tp) { (void) tcp_output(tp); + } } - return (tp); + return tp; } /* @@ -2761,7 +2899,6 @@ static struct tcpcb * tcp_usrclosed(struct tcpcb *tp) { switch (tp->t_state) { - case TCPS_CLOSED: case TCPS_LISTEN: case TCPS_SYN_SENT: @@ -2774,28 +2911,29 @@ tcp_usrclosed(struct tcpcb *tp) case TCPS_ESTABLISHED: DTRACE_TCP4(state__change, void, NULL, - struct inpcb *, tp->t_inpcb, - struct tcpcb *, tp, - int32_t, TCPS_FIN_WAIT_1); + struct inpcb *, tp->t_inpcb, + struct tcpcb *, tp, + int32_t, TCPS_FIN_WAIT_1); tp->t_state = TCPS_FIN_WAIT_1; break; case TCPS_CLOSE_WAIT: DTRACE_TCP4(state__change, void, NULL, - struct inpcb *, tp->t_inpcb, - struct tcpcb *, tp, - int32_t, TCPS_LAST_ACK); + struct inpcb *, tp->t_inpcb, + struct tcpcb *, tp, + int32_t, TCPS_LAST_ACK); tp->t_state = TCPS_LAST_ACK; break; } if (tp && tp->t_state >= TCPS_FIN_WAIT_2) { soisdisconnected(tp->t_inpcb->inp_socket); /* To prevent the connection hanging in FIN_WAIT_2 forever. */ - if (tp->t_state == TCPS_FIN_WAIT_2) + if (tp->t_state == TCPS_FIN_WAIT_2) { tp->t_timer[TCPT_2MSL] = OFFSET_FROM_START(tp, - TCP_CONN_MAXIDLE(tp)); + TCP_CONN_MAXIDLE(tp)); + } } - return (tp); + return tp; } void @@ -2835,23 +2973,25 @@ int tcp_get_msg_priority(struct mbuf *control, uint32_t *msgpri) { struct cmsghdr *cm; - if (control == NULL) - return(EINVAL); + if (control == NULL) { + return EINVAL; + } - for (cm = M_FIRST_CMSGHDR(control); cm; - cm = M_NXT_CMSGHDR(control, cm)) { - if (cm->cmsg_len < sizeof(struct cmsghdr) || - cm->cmsg_len > control->m_len) { - return (EINVAL); - } + for (cm = M_FIRST_CMSGHDR(control); + is_cmsg_valid(control, cm); + cm = M_NXT_CMSGHDR(control, cm)) { if (cm->cmsg_level == SOL_SOCKET && - cm->cmsg_type == SCM_MSG_PRIORITY) { - *msgpri = *(unsigned int *)(void *)CMSG_DATA(cm); + cm->cmsg_type == SCM_MSG_PRIORITY) { + if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) { + return EINVAL; + } + *msgpri = *(uint32_t *)(void *)CMSG_DATA(cm); + if (*msgpri < MSG_PRI_MIN || *msgpri > MSG_PRI_MAX) { + return EINVAL; + } break; } } - - VERIFY(*msgpri >= MSG_PRI_MIN && *msgpri <= MSG_PRI_MAX); - return (0); + return 0; } #endif /* INET6 */ diff --git a/bsd/netinet/tcp_var.h b/bsd/netinet/tcp_var.h index 49e3f9731..e9fde2f3a 100644 --- a/bsd/netinet/tcp_var.h +++ b/bsd/netinet/tcp_var.h @@ -71,27 +71,27 @@ #include #if defined(__LP64__) -#define _TCPCB_PTR(x) u_int32_t -#define _TCPCB_LIST_HEAD(name, type) \ -struct name { \ - u_int32_t lh_first; \ +#define _TCPCB_PTR(x) u_int32_t +#define _TCPCB_LIST_HEAD(name, type) \ +struct name { \ + u_int32_t lh_first; \ } #else -#define _TCPCB_PTR(x) x -#define _TCPCB_LIST_HEAD(name, type) LIST_HEAD(name, type) +#define _TCPCB_PTR(x) x +#define _TCPCB_LIST_HEAD(name, type) LIST_HEAD(name, type) #endif #ifdef KERNEL_PRIVATE -#define TCP_RETRANSHZ 1000 /* granularity of TCP timestamps, 1ms */ +#define TCP_RETRANSHZ 1000 /* granularity of TCP timestamps, 1ms */ /* Minimum time quantum within which the timers are coalesced */ -#define TCP_TIMER_10MS_QUANTUM (TCP_RETRANSHZ/100) /* every 10ms */ +#define TCP_TIMER_10MS_QUANTUM (TCP_RETRANSHZ/100) /* every 10ms */ #define TCP_TIMER_100MS_QUANTUM (TCP_RETRANSHZ/10) /* every 100ms */ #define TCP_TIMER_500MS_QUANTUM (TCP_RETRANSHZ/2) /* every 500ms */ #define TCP_RETRANSHZ_TO_USEC 1000 -#define N_TIME_WAIT_SLOTS 128 /* must be power of 2 */ +#define N_TIME_WAIT_SLOTS 128 /* must be power of 2 */ /* Always allow at least 16 packets worth of recv window when adjusting * recv window using inter-packet arrival jitter. @@ -141,9 +141,9 @@ struct name { \ * The maximum value of adaptive timeout is set to 10 which will allow * transmission of enough number of probes to the peer. */ -#define TCP_ADAPTIVE_TIMEOUT_MAX 10 +#define TCP_ADAPTIVE_TIMEOUT_MAX 10 -#define TCP_CONNECTIVITY_PROBES_MAX 5 +#define TCP_CONNECTIVITY_PROBES_MAX 5 /* * Kernel variables for tcp. @@ -152,28 +152,28 @@ struct name { \ /* TCP segment queue entry */ struct tseg_qent { LIST_ENTRY(tseg_qent) tqe_q; - int tqe_len; /* TCP segment data length */ - struct tcphdr *tqe_th; /* a pointer to tcp header */ - struct mbuf *tqe_m; /* mbuf contains packet */ + int tqe_len; /* TCP segment data length */ + struct tcphdr *tqe_th; /* a pointer to tcp header */ + struct mbuf *tqe_m; /* mbuf contains packet */ }; LIST_HEAD(tsegqe_head, tseg_qent); struct sackblk { - tcp_seq start; /* start seq no. of sack block */ - tcp_seq end; /* end seq no. */ + tcp_seq start; /* start seq no. of sack block */ + tcp_seq end; /* end seq no. */ }; struct sackhole { - tcp_seq start; /* start seq no. of hole */ - tcp_seq end; /* end seq no. */ - tcp_seq rxmit; /* next seq. no in hole to be retransmitted */ - u_int32_t rxmit_start; /* timestamp of first retransmission */ - TAILQ_ENTRY(sackhole) scblink; /* scoreboard linkage */ + tcp_seq start; /* start seq no. of hole */ + tcp_seq end; /* end seq no. */ + tcp_seq rxmit; /* next seq. no in hole to be retransmitted */ + u_int32_t rxmit_start; /* timestamp of first retransmission */ + TAILQ_ENTRY(sackhole) scblink; /* scoreboard linkage */ }; struct sackhint { - struct sackhole *nexthole; - int sack_bytes_rexmit; + struct sackhole *nexthole; + int sack_bytes_rexmit; }; struct tcp_rxt_seg { @@ -181,44 +181,44 @@ struct tcp_rxt_seg { tcp_seq rx_end; u_int16_t rx_count; u_int16_t rx_flags; -#define TCP_RXT_SPURIOUS 0x1 /* received DSACK notification */ -#define TCP_RXT_DSACK_FOR_TLP 0x2 +#define TCP_RXT_SPURIOUS 0x1 /* received DSACK notification */ +#define TCP_RXT_DSACK_FOR_TLP 0x2 SLIST_ENTRY(tcp_rxt_seg) rx_link; }; struct tcp_notify_ack_marker { - tcp_seq notify_snd_una; /* Notify when snd_una crosses this seq */ + tcp_seq notify_snd_una; /* Notify when snd_una crosses this seq */ tcp_notify_ack_id_t notify_id; SLIST_ENTRY(tcp_notify_ack_marker) notify_next; }; struct tcptemp { - u_char tt_ipgen[40]; /* the size must be of max ip header, now IPv6 */ - struct tcphdr tt_t; + u_char tt_ipgen[40]; /* the size must be of max ip header, now IPv6 */ + struct tcphdr tt_t; }; struct bwmeas { - tcp_seq bw_start; /* start of bw measurement */ - uint32_t bw_ts; /* timestamp when bw measurement started */ - uint32_t bw_size; /* burst size in bytes for this bw measurement */ + tcp_seq bw_start; /* start of bw measurement */ + uint32_t bw_ts; /* timestamp when bw measurement started */ + uint32_t bw_size; /* burst size in bytes for this bw measurement */ uint32_t bw_minsizepkts; /* Min burst size as segments */ uint32_t bw_maxsizepkts; /* Max burst size as segments */ - uint32_t bw_minsize; /* Min size in bytes */ - uint32_t bw_maxsize; /* Max size in bytes */ - uint32_t bw_sndbw; /* Measured send bandwidth */ - uint32_t bw_sndbw_max; /* Max measured bandwidth */ - uint32_t bw_rcvbw_max; /* Max receive bandwidth measured */ + uint32_t bw_minsize; /* Min size in bytes */ + uint32_t bw_maxsize; /* Max size in bytes */ + uint32_t bw_sndbw; /* Measured send bandwidth */ + uint32_t bw_sndbw_max; /* Max measured bandwidth */ + uint32_t bw_rcvbw_max; /* Max receive bandwidth measured */ }; /* MPTCP Data sequence map entry */ struct mpt_dsn_map { - uint64_t mpt_dsn; /* data seq num recvd */ - uint32_t mpt_sseq; /* relative subflow # */ - uint16_t mpt_len; /* length of mapping */ - uint16_t mpt_csum; /* checksum value if on */ - uint8_t mpt_dfin; /* It's a DATA_FIN */ + uint64_t mpt_dsn; /* data seq num recvd */ + uint32_t mpt_sseq; /* relative subflow # */ + uint16_t mpt_len; /* length of mapping */ + uint16_t mpt_csum; /* checksum value if on */ + uint8_t mpt_dfin; /* It's a DATA_FIN */ }; -#define tcp6cb tcpcb /* for KAME src sync over BSD*'s */ +#define tcp6cb tcpcb /* for KAME src sync over BSD*'s */ struct tcp_ccstate { union { @@ -250,338 +250,337 @@ struct tcp_ccstate { * Organized for 16 byte cacheline efficiency. */ struct tcpcb { - struct tsegqe_head t_segq; - int t_dupacks; /* consecutive dup acks recd */ - int t_state; /* state of this connection */ - uint32_t t_timer[TCPT_NTIMERS]; /* tcp timers */ - struct tcptimerentry tentry; /* entry in timer list */ - - struct inpcb *t_inpcb; /* back pointer to internet pcb */ - uint32_t t_flags; -#define TF_ACKNOW 0x00001 /* ack peer immediately */ -#define TF_DELACK 0x00002 /* ack, but try to delay it */ -#define TF_NODELAY 0x00004 /* don't delay packets to coalesce */ -#define TF_NOOPT 0x00008 /* don't use tcp options */ -#define TF_SENTFIN 0x00010 /* have sent FIN */ -#define TF_REQ_SCALE 0x00020 /* have/will request window scaling */ -#define TF_RCVD_SCALE 0x00040 /* other side has requested scaling */ -#define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ -#define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ -#define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ -#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ -#define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ -#define TF_NOPUSH 0x01000 /* don't push */ -#define TF_REQ_CC 0x02000 /* have/will request CC */ -#define TF_RCVD_CC 0x04000 /* a CC was received in SYN */ -#define TF_SENDCCNEW 0x08000 /* Unused */ -#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */ -#define TF_LOCAL 0x20000 /* connection to a host on local link */ -#define TF_RXWIN0SENT 0x40000 /* sent a receiver win 0 in response */ -#define TF_SLOWLINK 0x80000 /* route is a on a modem speed link */ -#define TF_LASTIDLE 0x100000 /* connection was previously idle */ -#define TF_FASTRECOVERY 0x200000 /* in NewReno Fast Recovery */ -#define TF_WASFRECOVERY 0x400000 /* was in NewReno Fast Recovery */ -#define TF_SIGNATURE 0x800000 /* require MD5 digests (RFC2385) */ -#define TF_MAXSEGSNT 0x1000000 /* last segment sent was a full segment */ -#define TF_STREAMING_ON 0x2000000 /* Receiver detected streaming */ -#define TF_PMTUD 0x4000000 /* Perform Path MTU Discovery for this connection */ -#define TF_CLOSING 0x8000000 /* pending tcp close */ -#define TF_TSO 0x10000000 /* TCP Segment Offloading is enable on this connection */ -#define TF_BLACKHOLE 0x20000000 /* Path MTU Discovery Black Hole detection */ -#define TF_TIMER_ONLIST 0x40000000 /* pcb is on tcp_timer_list */ -#define TF_STRETCHACK 0x80000000 /* receiver is going to delay acks */ - - tcp_seq snd_una; /* send unacknowledged */ - tcp_seq snd_max; /* highest sequence number sent; - * used to recognize retransmits - */ - tcp_seq snd_nxt; /* send next */ - tcp_seq snd_up; /* send urgent pointer */ - - tcp_seq snd_wl1; /* window update seg seq number */ - tcp_seq snd_wl2; /* window update seg ack number */ - tcp_seq iss; /* initial send sequence number */ - tcp_seq irs; /* initial receive sequence number */ - - tcp_seq rcv_nxt; /* receive next */ - tcp_seq rcv_adv; /* advertised window */ - u_int32_t rcv_wnd; /* receive window */ - tcp_seq rcv_up; /* receive urgent pointer */ - - u_int32_t snd_wnd; /* send window */ - u_int32_t snd_cwnd; /* congestion-controlled window */ - u_int32_t snd_ssthresh; /* snd_cwnd size threshold for - * for slow start exponential to - * linear switch - */ - tcp_seq snd_recover; /* for use in NewReno Fast Recovery */ - - u_int32_t t_maxopd; /* mss plus options */ - u_int32_t t_rcvtime; /* time at which a packet was received */ - u_int32_t t_sndtime; /* time at which we last sent new data */ - u_int32_t t_starttime; /* time connection was established */ - int t_rtttime; /* tcp clock when rtt calculation was started */ - tcp_seq t_rtseq; /* sequence number being timed */ - - u_int32_t rfbuf_ts; /* recv buffer autoscaling timestamp */ - u_int32_t rfbuf_cnt; /* recv buffer autoscaling byte count */ - - int t_rxtcur; /* current retransmit value (ticks) */ - u_int t_maxseg; /* maximum segment size */ - int t_srtt; /* smoothed round-trip time */ - int t_rttvar; /* variance in round-trip time */ - - u_int64_t t_accsleep_ms; /* accumulated sleep time since last boot */ - u_int16_t t_reassqlen; /* length of reassembly queue */ - u_int16_t t_rxtshift; /* log(2) of rexmt exp. backoff */ - u_int32_t t_rttmin; /* minimum rtt allowed */ - u_int32_t t_rttbest; /* best rtt we've seen */ - u_int32_t t_rttcur; /* most recent value of rtt */ - u_int32_t t_rttupdated; /* number of times rtt sampled */ - u_int32_t t_rxt_conndroptime; /* retxmt conn gets dropped after this time, when set */ - u_int32_t t_rxtstart; /* time at which retransmission started */ - u_int32_t max_sndwnd; /* largest window peer has offered */ - - int t_softerror; /* possible error not yet reported */ + struct tsegqe_head t_segq; + int t_dupacks; /* consecutive dup acks recd */ + int t_state; /* state of this connection */ + uint32_t t_timer[TCPT_NTIMERS]; /* tcp timers */ + struct tcptimerentry tentry; /* entry in timer list */ + + struct inpcb *t_inpcb; /* back pointer to internet pcb */ + uint32_t t_flags; +#define TF_ACKNOW 0x00001 /* ack peer immediately */ +#define TF_DELACK 0x00002 /* ack, but try to delay it */ +#define TF_NODELAY 0x00004 /* don't delay packets to coalesce */ +#define TF_NOOPT 0x00008 /* don't use tcp options */ +#define TF_SENTFIN 0x00010 /* have sent FIN */ +#define TF_REQ_SCALE 0x00020 /* have/will request window scaling */ +#define TF_RCVD_SCALE 0x00040 /* other side has requested scaling */ +#define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ +#define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ +#define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ +#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ +#define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ +#define TF_NOPUSH 0x01000 /* don't push */ +#define TF_REQ_CC 0x02000 /* have/will request CC */ +#define TF_RCVD_CC 0x04000 /* a CC was received in SYN */ +#define TF_SENDCCNEW 0x08000 /* Unused */ +#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */ +#define TF_LOCAL 0x20000 /* connection to a host on local link */ +#define TF_RXWIN0SENT 0x40000 /* sent a receiver win 0 in response */ +#define TF_SLOWLINK 0x80000 /* route is a on a modem speed link */ +#define TF_LASTIDLE 0x100000 /* connection was previously idle */ +#define TF_FASTRECOVERY 0x200000 /* in NewReno Fast Recovery */ +#define TF_WASFRECOVERY 0x400000 /* was in NewReno Fast Recovery */ +#define TF_SIGNATURE 0x800000 /* require MD5 digests (RFC2385) */ +#define TF_MAXSEGSNT 0x1000000 /* last segment sent was a full segment */ +#define TF_STREAMING_ON 0x2000000 /* Receiver detected streaming */ +#define TF_PMTUD 0x4000000 /* Perform Path MTU Discovery for this connection */ +#define TF_CLOSING 0x8000000 /* pending tcp close */ +#define TF_TSO 0x10000000 /* TCP Segment Offloading is enable on this connection */ +#define TF_BLACKHOLE 0x20000000 /* Path MTU Discovery Black Hole detection */ +#define TF_TIMER_ONLIST 0x40000000 /* pcb is on tcp_timer_list */ +#define TF_STRETCHACK 0x80000000 /* receiver is going to delay acks */ + + tcp_seq snd_una; /* send unacknowledged */ + tcp_seq snd_max; /* highest sequence number sent; + * used to recognize retransmits + */ + tcp_seq snd_nxt; /* send next */ + tcp_seq snd_up; /* send urgent pointer */ + + tcp_seq snd_wl1; /* window update seg seq number */ + tcp_seq snd_wl2; /* window update seg ack number */ + tcp_seq iss; /* initial send sequence number */ + tcp_seq irs; /* initial receive sequence number */ + + tcp_seq rcv_nxt; /* receive next */ + tcp_seq rcv_adv; /* advertised window */ + u_int32_t rcv_wnd; /* receive window */ + tcp_seq rcv_up; /* receive urgent pointer */ + + u_int32_t snd_wnd; /* send window */ + u_int32_t snd_cwnd; /* congestion-controlled window */ + u_int32_t snd_ssthresh; /* snd_cwnd size threshold for + * for slow start exponential to + * linear switch + */ + tcp_seq snd_recover; /* for use in NewReno Fast Recovery */ + + u_int32_t t_maxopd; /* mss plus options */ + u_int32_t t_rcvtime; /* time at which a packet was received */ + u_int32_t t_sndtime; /* time at which we last sent new data */ + u_int32_t t_starttime; /* time connection was established */ + int t_rtttime; /* tcp clock when rtt calculation was started */ + tcp_seq t_rtseq; /* sequence number being timed */ + + u_int32_t rfbuf_ts; /* recv buffer autoscaling timestamp */ + u_int32_t rfbuf_cnt; /* recv buffer autoscaling byte count */ + + int t_rxtcur; /* current retransmit value (ticks) */ + u_int t_maxseg; /* maximum segment size */ + int t_srtt; /* smoothed round-trip time */ + int t_rttvar; /* variance in round-trip time */ + + u_int64_t t_accsleep_ms; /* accumulated sleep time since last boot */ + u_int16_t t_reassqlen; /* length of reassembly queue */ + u_int16_t t_rxtshift; /* log(2) of rexmt exp. backoff */ + u_int32_t t_rttmin; /* minimum rtt allowed */ + u_int32_t t_rttbest; /* best rtt we've seen */ + u_int32_t t_rttcur; /* most recent value of rtt */ + u_int32_t t_rttupdated; /* number of times rtt sampled */ + u_int32_t t_rxt_conndroptime; /* retxmt conn gets dropped after this time, when set */ + u_int32_t t_rxtstart; /* time at which retransmission started */ + u_int32_t max_sndwnd; /* largest window peer has offered */ + + int t_softerror; /* possible error not yet reported */ /* out-of-band data */ - char t_oobflags; /* have some */ - char t_iobc; /* input character */ -#define TCPOOB_HAVEDATA 0x01 -#define TCPOOB_HADDATA 0x02 + char t_oobflags; /* have some */ + char t_iobc; /* input character */ +#define TCPOOB_HAVEDATA 0x01 +#define TCPOOB_HADDATA 0x02 /* RFC 1323 variables */ - u_int8_t snd_scale; /* window scaling for send window */ - u_int8_t rcv_scale; /* window scaling for recv window */ - u_int8_t request_r_scale; /* pending window scaling */ - u_int8_t requested_s_scale; - u_int8_t tcp_cc_index; /* index of congestion control algorithm */ - u_int8_t t_adaptive_rtimo; /* Read timeout used as a multiple of RTT */ - u_int8_t t_adaptive_wtimo; /* Write timeout used as a multiple of RTT */ - u_int8_t t_stretchack_delayed; /* stretch ack delayed */ + u_int8_t snd_scale; /* window scaling for send window */ + u_int8_t rcv_scale; /* window scaling for recv window */ + u_int8_t request_r_scale; /* pending window scaling */ + u_int8_t requested_s_scale; + u_int8_t tcp_cc_index; /* index of congestion control algorithm */ + u_int8_t t_adaptive_rtimo; /* Read timeout used as a multiple of RTT */ + u_int8_t t_adaptive_wtimo; /* Write timeout used as a multiple of RTT */ + u_int8_t t_stretchack_delayed; /* stretch ack delayed */ /* State for limiting early retransmits when SACK is not enabled */ - u_int16_t t_early_rexmt_count; /* count of early rexmts */ - u_int32_t t_early_rexmt_win; /* window for limiting early rexmts */ + u_int16_t t_early_rexmt_count; /* count of early rexmts */ + u_int32_t t_early_rexmt_win; /* window for limiting early rexmts */ - u_int32_t ts_recent; /* timestamp echo data */ + u_int32_t ts_recent; /* timestamp echo data */ - u_int32_t ts_recent_age; /* when last updated */ - tcp_seq last_ack_sent; + u_int32_t ts_recent_age; /* when last updated */ + tcp_seq last_ack_sent; /* RFC 3465 variables */ - u_int32_t t_bytes_acked; /* ABC "bytes_acked" parameter */ + u_int32_t t_bytes_acked; /* ABC "bytes_acked" parameter */ - int t_lastchain; /* amount of packets chained last time around */ - u_int16_t t_unacksegs; /* received but unacked segments for delaying acks */ - u_int8_t t_rexmtthresh; /* duplicate ack threshold for entering fast recovery */ - u_int8_t t_rtimo_probes; /* number of adaptive rtimo probes sent */ - u_int32_t t_persist_timeout; /* ZWP persistence limit as set by PERSIST_TIMEOUT */ - u_int32_t t_persist_stop; /* persistence limit deadline if triggered by ZWP */ - u_int32_t t_notsent_lowat; /* Low water for not sent data */ + int t_lastchain; /* amount of packets chained last time around */ + u_int16_t t_unacksegs; /* received but unacked segments for delaying acks */ + u_int8_t t_rexmtthresh; /* duplicate ack threshold for entering fast recovery */ + u_int8_t t_rtimo_probes; /* number of adaptive rtimo probes sent */ + u_int32_t t_persist_timeout; /* ZWP persistence limit as set by PERSIST_TIMEOUT */ + u_int32_t t_persist_stop; /* persistence limit deadline if triggered by ZWP */ + u_int32_t t_notsent_lowat; /* Low water for not sent data */ /* Receiver state for stretch-ack algorithm */ - u_int32_t rcv_unackwin; /* to measure win for stretching acks */ - u_int32_t rcv_by_unackwin; /* bytes seen during the last ack-stretching win */ - u_int32_t rcv_nostrack_ts; /* timestamp when stretch ack was disabled automatically */ - u_int32_t rcv_nostrack_pkts; /* pkts received since strech ack was disabled */ - u_int16_t rcv_waitforss; /* wait for packets during slow-start */ + u_int32_t rcv_unackwin; /* to measure win for stretching acks */ + u_int32_t rcv_by_unackwin; /* bytes seen during the last ack-stretching win */ + u_int32_t rcv_nostrack_ts; /* timestamp when stretch ack was disabled automatically */ + u_int32_t rcv_nostrack_pkts; /* pkts received since strech ack was disabled */ + u_int16_t rcv_waitforss; /* wait for packets during slow-start */ /* ECN stats */ - u_int16_t ecn_flags; -#define TE_SETUPSENT 0x0001 /* Indicate we have sent ECN-SETUP SYN or SYN-ACK */ -#define TE_SETUPRECEIVED 0x0002 /* Indicate we have received ECN-SETUP SYN or SYN-ACK */ -#define TE_SENDIPECT 0x0004 /* Indicate we haven't sent or received non-ECN-setup SYN or SYN-ACK */ -#define TE_SENDCWR 0x0008 /* Indicate that the next non-retransmit should have the TCP CWR flag set */ -#define TE_SENDECE 0x0010 /* Indicate that the next packet should have the TCP ECE flag set */ -#define TE_INRECOVERY 0x0020 /* connection entered recovery after receiving ECE */ -#define TE_RECV_ECN_CE 0x0040 /* Received IPTOS_ECN_CE marking atleast once */ -#define TE_RECV_ECN_ECE 0x0080 /* Received ECE marking atleast once */ -#define TE_LOST_SYN 0x0100 /* Lost SYN with ECN setup */ -#define TE_LOST_SYNACK 0x0200 /* Lost SYN-ACK with ECN setup */ -#define TE_ECN_MODE_ENABLE 0x0400 /* Option ECN mode set to enable */ -#define TE_ECN_MODE_DISABLE 0x0800 /* Option ECN mode set to disable */ -#define TE_ENABLE_ECN 0x1000 /* Enable negotiation of ECN */ -#define TE_ECN_ON (TE_SETUPSENT | TE_SETUPRECEIVED) /* Indicate ECN was successfully negotiated on a connection) */ -#define TE_CEHEURI_SET 0x2000 /* We did our CE-probing at the beginning */ -#define TE_CLIENT_SETUP 0x4000 /* setup from client side */ -#define TE_RCVD_SYN_RST 0x8000 /* Received RST to the first ECN enabled SYN */ - - u_int32_t t_ecn_recv_ce; /* Received CE from the network */ - u_int32_t t_ecn_recv_cwr; /* Packets received with CWR */ + u_int16_t ecn_flags; +#define TE_SETUPSENT 0x0001 /* Indicate we have sent ECN-SETUP SYN or SYN-ACK */ +#define TE_SETUPRECEIVED 0x0002 /* Indicate we have received ECN-SETUP SYN or SYN-ACK */ +#define TE_SENDIPECT 0x0004 /* Indicate we haven't sent or received non-ECN-setup SYN or SYN-ACK */ +#define TE_SENDCWR 0x0008 /* Indicate that the next non-retransmit should have the TCP CWR flag set */ +#define TE_SENDECE 0x0010 /* Indicate that the next packet should have the TCP ECE flag set */ +#define TE_INRECOVERY 0x0020 /* connection entered recovery after receiving ECE */ +#define TE_RECV_ECN_CE 0x0040 /* Received IPTOS_ECN_CE marking atleast once */ +#define TE_RECV_ECN_ECE 0x0080 /* Received ECE marking atleast once */ +#define TE_LOST_SYN 0x0100 /* Lost SYN with ECN setup */ +#define TE_LOST_SYNACK 0x0200 /* Lost SYN-ACK with ECN setup */ +#define TE_ECN_MODE_ENABLE 0x0400 /* Option ECN mode set to enable */ +#define TE_ECN_MODE_DISABLE 0x0800 /* Option ECN mode set to disable */ +#define TE_ENABLE_ECN 0x1000 /* Enable negotiation of ECN */ +#define TE_ECN_ON (TE_SETUPSENT | TE_SETUPRECEIVED) /* Indicate ECN was successfully negotiated on a connection) */ +#define TE_CEHEURI_SET 0x2000 /* We did our CE-probing at the beginning */ +#define TE_CLIENT_SETUP 0x4000 /* setup from client side */ +#define TE_RCVD_SYN_RST 0x8000 /* Received RST to the first ECN enabled SYN */ + + u_int32_t t_ecn_recv_ce; /* Received CE from the network */ + u_int32_t t_ecn_recv_cwr; /* Packets received with CWR */ /* state for bad retransmit recovery */ - u_int32_t snd_cwnd_prev; /* cwnd prior to retransmit */ - u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ - tcp_seq snd_recover_prev; /* snd_recover prior to retransmit */ - int t_srtt_prev; /* srtt prior to retransmit */ - int t_rttvar_prev; /* rttvar prior to retransmit */ - u_int32_t t_badrexmt_time; /* bad rexmt detection time */ + u_int32_t snd_cwnd_prev; /* cwnd prior to retransmit */ + u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ + tcp_seq snd_recover_prev; /* snd_recover prior to retransmit */ + int t_srtt_prev; /* srtt prior to retransmit */ + int t_rttvar_prev; /* rttvar prior to retransmit */ + u_int32_t t_badrexmt_time; /* bad rexmt detection time */ /* Packet reordering metric */ - u_int16_t t_reorderwin; /* Reordering late time offset */ + u_int16_t t_reorderwin; /* Reordering late time offset */ /* SACK related state */ - int16_t snd_numholes; /* number of holes seen by sender */ - tcp_seq sack_newdata; /* New data xmitted in this recovery - episode starts at this seq number */ + int16_t snd_numholes; /* number of holes seen by sender */ + tcp_seq sack_newdata; /* New data xmitted in this recovery + * episode starts at this seq number */ TAILQ_HEAD(sackhole_head, sackhole) snd_holes; - /* SACK scoreboard (sorted) */ - tcp_seq snd_fack; /* last seq number(+1) sack'd by rcv'r*/ - int rcv_numsacks; /* # distinct sack blks present */ + /* SACK scoreboard (sorted) */ + tcp_seq snd_fack; /* last seq number(+1) sack'd by rcv'r*/ + int rcv_numsacks; /* # distinct sack blks present */ struct sackblk sackblks[MAX_SACK_BLKS]; /* seq nos. of sack blocks */ - struct sackhint sackhint; /* SACK scoreboard hint */ - - struct mbuf *t_pktlist_head; /* First packet in transmit chain */ - struct mbuf *t_pktlist_tail; /* Last packet in transmit chain */ - u_int32_t t_pktlist_sentlen; /* total bytes in transmit chain */ - - u_int32_t t_keepidle; /* keepalive idle timer (override global if > 0) */ - u_int32_t t_keepinit; /* connection timeout, i.e. idle time - in SYN_SENT or SYN_RECV state */ - u_int32_t t_keepintvl; /* interval between keepalives */ - u_int32_t t_keepcnt; /* number of keepalives before close */ - - u_int32_t tso_max_segment_size; /* TSO maximum segment unit for NIC */ - u_int16_t t_pmtud_lastseg_size; /* size of the last sent segment */ - u_int16_t t_pmtud_saved_maxopd; /* MSS saved before performing PMTU-D BlackHole detection */ - u_int32_t t_pmtud_start_ts; /* Time of PMTUD blackhole detection */ - - struct - { - u_int32_t rxduplicatebytes; - u_int32_t rxoutoforderbytes; - u_int32_t txretransmitbytes; - u_int8_t synrxtshift; - u_int8_t unused; - u_int16_t unused_pad_to_8; - u_int32_t rxmitpkts; + struct sackhint sackhint; /* SACK scoreboard hint */ + + struct mbuf *t_pktlist_head; /* First packet in transmit chain */ + struct mbuf *t_pktlist_tail; /* Last packet in transmit chain */ + u_int32_t t_pktlist_sentlen; /* total bytes in transmit chain */ + + u_int32_t t_keepidle; /* keepalive idle timer (override global if > 0) */ + u_int32_t t_keepinit; /* connection timeout, i.e. idle time + * in SYN_SENT or SYN_RECV state */ + u_int32_t t_keepintvl; /* interval between keepalives */ + u_int32_t t_keepcnt; /* number of keepalives before close */ + + u_int32_t tso_max_segment_size; /* TSO maximum segment unit for NIC */ + u_int16_t t_pmtud_lastseg_size; /* size of the last sent segment */ + u_int16_t t_pmtud_saved_maxopd; /* MSS saved before performing PMTU-D BlackHole detection */ + u_int32_t t_pmtud_start_ts; /* Time of PMTUD blackhole detection */ + + struct{ + u_int32_t rxduplicatebytes; + u_int32_t rxoutoforderbytes; + u_int32_t txretransmitbytes; + u_int8_t synrxtshift; + u_int8_t unused; + u_int16_t unused_pad_to_8; + u_int32_t rxmitpkts; } t_stat; - u_int8_t t_notify_ack_count; - u_int8_t t_ecn_recv_ce_pkt; /* Received packet with CE-bit set (independent from last_ack_sent) */ - u_int16_t t_cached_maxopd; /* default for MSS adjustment using link status report */ - - uint32_t bg_ssthresh; /* Slow start threshold until delay increases */ - uint32_t t_flagsext; /* Another field to accommodate more flags */ -#define TF_RXTFINDROP 0x1 /* Drop conn after retransmitting FIN 3 times */ -#define TF_RCVUNACK_WAITSS 0x2 /* set when the receiver should not stretch acks */ -#define TF_BWMEAS_INPROGRESS 0x4 /* Indicate BW meas is happening */ -#define TF_MEASURESNDBW 0x8 /* Measure send bw on this connection */ -#define TF_LRO_OFFLOADED 0x10 /* Connection LRO offloaded */ -#define TF_SACK_ENABLE 0x20 /* SACK is enabled */ -#define TF_RECOMPUTE_RTT 0x40 /* recompute RTT after spurious retransmit */ -#define TF_DETECT_READSTALL 0x80 /* Used to detect a stall during read operation */ -#define TF_RECV_THROTTLE 0x100 /* Input throttling active */ -#define TF_NOSTRETCHACK 0x200 /* ack every other packet */ -#define TF_NOTIMEWAIT 0x800 /* Avoid going into time-wait */ -#define TF_SENT_TLPROBE 0x1000 /* Sent data in PTO */ -#define TF_PKTS_REORDERED 0x2000 /* Detected reordering */ -#define TF_DELAY_RECOVERY 0x4000 /* delay fast recovery */ -#define TF_FORCE 0x8000 /* force 1 byte out */ -#define TF_DISABLE_STRETCHACK 0x10000 /* auto-disable stretch ack */ -#define TF_NOBLACKHOLE_DETECTION 0x20000 /* Disable PMTU blackhole detection */ -#define TF_DISABLE_DSACK 0x40000 /* Ignore DSACK due to n/w duplication */ -#define TF_RESCUE_RXT 0x80000 /* SACK rescue retransmit */ -#define TF_CWND_NONVALIDATED 0x100000 /* cwnd non validated */ -#define TF_PROBING 0x200000 /* Trigger probe timeout */ -#define TF_FASTOPEN 0x400000 /* TCP Fastopen is enabled */ -#define TF_REASS_INPROG 0x800000 /* Reassembly is in progress */ -#define TF_FASTOPEN_HEUR 0x1000000 /* Make sure that heuristics get never skipped */ + u_int8_t t_notify_ack_count; + u_int8_t t_ecn_recv_ce_pkt; /* Received packet with CE-bit set (independent from last_ack_sent) */ + u_int16_t t_cached_maxopd; /* default for MSS adjustment using link status report */ + + uint32_t bg_ssthresh; /* Slow start threshold until delay increases */ + uint32_t t_flagsext; /* Another field to accommodate more flags */ +#define TF_RXTFINDROP 0x1 /* Drop conn after retransmitting FIN 3 times */ +#define TF_RCVUNACK_WAITSS 0x2 /* set when the receiver should not stretch acks */ +#define TF_BWMEAS_INPROGRESS 0x4 /* Indicate BW meas is happening */ +#define TF_MEASURESNDBW 0x8 /* Measure send bw on this connection */ +#define TF_LRO_OFFLOADED 0x10 /* Connection LRO offloaded */ +#define TF_SACK_ENABLE 0x20 /* SACK is enabled */ +#define TF_RECOMPUTE_RTT 0x40 /* recompute RTT after spurious retransmit */ +#define TF_DETECT_READSTALL 0x80 /* Used to detect a stall during read operation */ +#define TF_RECV_THROTTLE 0x100 /* Input throttling active */ +#define TF_NOSTRETCHACK 0x200 /* ack every other packet */ +#define TF_NOTIMEWAIT 0x800 /* Avoid going into time-wait */ +#define TF_SENT_TLPROBE 0x1000 /* Sent data in PTO */ +#define TF_PKTS_REORDERED 0x2000 /* Detected reordering */ +#define TF_DELAY_RECOVERY 0x4000 /* delay fast recovery */ +#define TF_FORCE 0x8000 /* force 1 byte out */ +#define TF_DISABLE_STRETCHACK 0x10000 /* auto-disable stretch ack */ +#define TF_NOBLACKHOLE_DETECTION 0x20000 /* Disable PMTU blackhole detection */ +#define TF_DISABLE_DSACK 0x40000 /* Ignore DSACK due to n/w duplication */ +#define TF_RESCUE_RXT 0x80000 /* SACK rescue retransmit */ +#define TF_CWND_NONVALIDATED 0x100000 /* cwnd non validated */ +#define TF_PROBING 0x200000 /* Trigger probe timeout */ +#define TF_FASTOPEN 0x400000 /* TCP Fastopen is enabled */ +#define TF_REASS_INPROG 0x800000 /* Reassembly is in progress */ +#define TF_FASTOPEN_HEUR 0x1000000 /* Make sure that heuristics get never skipped */ #if TRAFFIC_MGT /* Inter-arrival jitter related state */ - uint32_t iaj_rcv_ts; /* tcp clock when the first packet was received */ - uint16_t iaj_size; /* Size of packet for iaj measurement */ - uint8_t iaj_small_pkt; /* Count of packets smaller than iaj_size */ - uint8_t t_pipeack_ind; /* index for next pipeack sample */ - uint16_t iaj_pktcnt; /* packet count, to avoid throttling initially */ - uint16_t acc_iaj; /* Accumulated iaj */ - uint32_t avg_iaj; /* Mean */ - uint32_t std_dev_iaj; /* Standard deviation */ + uint32_t iaj_rcv_ts; /* tcp clock when the first packet was received */ + uint16_t iaj_size; /* Size of packet for iaj measurement */ + uint8_t iaj_small_pkt; /* Count of packets smaller than iaj_size */ + uint8_t t_pipeack_ind; /* index for next pipeack sample */ + uint16_t iaj_pktcnt; /* packet count, to avoid throttling initially */ + uint16_t acc_iaj; /* Accumulated iaj */ + uint32_t avg_iaj; /* Mean */ + uint32_t std_dev_iaj; /* Standard deviation */ #endif /* TRAFFIC_MGT */ - struct bwmeas *t_bwmeas; /* State for bandwidth measurement */ - uint32_t t_lropktlen; /* Bytes in a LRO frame */ - tcp_seq t_idleat; /* rcv_nxt at idle time */ - TAILQ_ENTRY(tcpcb) t_twentry; /* link for time wait queue */ - struct tcp_ccstate *t_ccstate; /* congestion control related state */ + struct bwmeas *t_bwmeas; /* State for bandwidth measurement */ + uint32_t t_lropktlen; /* Bytes in a LRO frame */ + tcp_seq t_idleat; /* rcv_nxt at idle time */ + TAILQ_ENTRY(tcpcb) t_twentry; /* link for time wait queue */ + struct tcp_ccstate *t_ccstate; /* congestion control related state */ /* Tail loss probe related state */ - tcp_seq t_tlphighrxt; /* snd_nxt after PTO */ - u_int32_t t_tlpstart; /* timestamp at PTO */ + tcp_seq t_tlphighrxt; /* snd_nxt after PTO */ + u_int32_t t_tlpstart; /* timestamp at PTO */ /* DSACK data receiver state */ - tcp_seq t_dsack_lseq; /* DSACK left sequence */ - tcp_seq t_dsack_rseq; /* DSACK right sequence */ + tcp_seq t_dsack_lseq; /* DSACK left sequence */ + tcp_seq t_dsack_rseq; /* DSACK right sequence */ /* DSACK data sender state */ SLIST_HEAD(tcp_rxt_seghead, tcp_rxt_seg) t_rxt_segments; - tcp_seq t_dsack_lastuna; /* snd_una when last recovery episode started */ + tcp_seq t_dsack_lastuna; /* snd_una when last recovery episode started */ /* state for congestion window validation (draft-ietf-tcpm-newcwv-07) */ -#define TCP_PIPEACK_SAMPLE_COUNT 3 - u_int32_t t_pipeack_sample[TCP_PIPEACK_SAMPLE_COUNT]; /* pipeack, bytes acked within RTT */ - tcp_seq t_pipeack_lastuna; /* una when pipeack measurement started */ - u_int32_t t_pipeack; - u_int32_t t_lossflightsize; +#define TCP_PIPEACK_SAMPLE_COUNT 3 + u_int32_t t_pipeack_sample[TCP_PIPEACK_SAMPLE_COUNT]; /* pipeack, bytes acked within RTT */ + tcp_seq t_pipeack_lastuna; /* una when pipeack measurement started */ + u_int32_t t_pipeack; + u_int32_t t_lossflightsize; #if MPTCP - u_int32_t t_mpflags; /* flags for multipath TCP */ - -#define TMPF_PREESTABLISHED 0x00000001 /* conn in pre-established state */ -#define TMPF_SND_KEYS 0x00000002 /* indicates that keys should be send */ -#define TMPF_MPTCP_TRUE 0x00000004 /* negotiated MPTCP successfully */ -#define TMPF_MPTCP_RCVD_KEY 0x00000008 /* state for 3-way handshake */ -#define TMPF_SND_MPPRIO 0x00000010 /* send priority of subflow */ -#define TMPF_SND_REM_ADDR 0x00000020 /* initiate address removal */ -#define TMPF_RCVD_DACK 0x00000040 /* received a data-ack */ -#define TMPF_JOINED_FLOW 0x00000080 /* Indicates additional flow */ -#define TMPF_BACKUP_PATH 0x00000100 /* Indicates backup path */ -#define TMPF_MPTCP_ACKNOW 0x00000200 /* Send Data ACK */ -#define TMPF_SEND_DSN 0x00000400 /* Send DSN mapping */ -#define TMPF_SEND_DFIN 0x00000800 /* Send Data FIN */ -#define TMPF_RECV_DFIN 0x00001000 /* Recv Data FIN */ -#define TMPF_SENT_JOIN 0x00002000 /* Sent Join */ -#define TMPF_RECVD_JOIN 0x00004000 /* Received Join */ -#define TMPF_RESET 0x00008000 /* Send RST */ -#define TMPF_TCP_FALLBACK 0x00010000 /* Fallback to TCP */ -#define TMPF_FASTCLOSERCV 0x00020000 /* Received Fastclose option */ -#define TMPF_EMBED_DSN 0x00040000 /* tp has DSN mapping */ -#define TMPF_MPTCP_READY 0x00080000 /* Can send DSS options on data */ -#define TMPF_INFIN_SENT 0x00100000 /* Sent infinite mapping */ -#define TMPF_SND_MPFAIL 0x00200000 /* Received mapping csum failure */ -#define TMPF_SND_JACK 0x00400000 /* Send a Join-ACK */ -#define TMPF_TFO_REQUEST 0x00800000 /* TFO Requested */ - -#define TMPF_MPTCP_SIGNALS (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL | TMPF_SND_KEYS | TMPF_SND_JACK) - - tcp_seq t_mpuna; /* unacknowledged sequence */ - struct mptcb *t_mptcb; /* pointer to MPTCP TCB */ - struct mptsub *t_mpsub; /* pointer to the MPTCP subflow */ - struct mpt_dsn_map t_rcv_map; /* Receive mapping list */ - u_int8_t t_local_aid; /* Addr Id for authentication */ - u_int8_t t_rem_aid; /* Addr ID of another subflow */ - u_int8_t t_mprxtshift; /* join retransmission */ + u_int32_t t_mpflags; /* flags for multipath TCP */ + +#define TMPF_PREESTABLISHED 0x00000001 /* conn in pre-established state */ +#define TMPF_SND_KEYS 0x00000002 /* indicates that keys should be send */ +#define TMPF_MPTCP_TRUE 0x00000004 /* negotiated MPTCP successfully */ +#define TMPF_MPTCP_RCVD_KEY 0x00000008 /* state for 3-way handshake */ +#define TMPF_SND_MPPRIO 0x00000010 /* send priority of subflow */ +#define TMPF_SND_REM_ADDR 0x00000020 /* initiate address removal */ +#define TMPF_RCVD_DACK 0x00000040 /* received a data-ack */ +#define TMPF_JOINED_FLOW 0x00000080 /* Indicates additional flow */ +#define TMPF_BACKUP_PATH 0x00000100 /* Indicates backup path */ +#define TMPF_MPTCP_ACKNOW 0x00000200 /* Send Data ACK */ +#define TMPF_SEND_DSN 0x00000400 /* Send DSN mapping */ +#define TMPF_SEND_DFIN 0x00000800 /* Send Data FIN */ +#define TMPF_RECV_DFIN 0x00001000 /* Recv Data FIN */ +#define TMPF_SENT_JOIN 0x00002000 /* Sent Join */ +#define TMPF_RECVD_JOIN 0x00004000 /* Received Join */ +#define TMPF_RESET 0x00008000 /* Send RST */ +#define TMPF_TCP_FALLBACK 0x00010000 /* Fallback to TCP */ +#define TMPF_FASTCLOSERCV 0x00020000 /* Received Fastclose option */ +#define TMPF_EMBED_DSN 0x00040000 /* tp has DSN mapping */ +#define TMPF_MPTCP_READY 0x00080000 /* Can send DSS options on data */ +#define TMPF_INFIN_SENT 0x00100000 /* Sent infinite mapping */ +#define TMPF_SND_MPFAIL 0x00200000 /* Received mapping csum failure */ +#define TMPF_SND_JACK 0x00400000 /* Send a Join-ACK */ +#define TMPF_TFO_REQUEST 0x00800000 /* TFO Requested */ + +#define TMPF_MPTCP_SIGNALS (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL | TMPF_SND_KEYS | TMPF_SND_JACK) + + tcp_seq t_mpuna; /* unacknowledged sequence */ + struct mptcb *t_mptcb; /* pointer to MPTCP TCB */ + struct mptsub *t_mpsub; /* pointer to the MPTCP subflow */ + struct mpt_dsn_map t_rcv_map; /* Receive mapping list */ + u_int8_t t_local_aid; /* Addr Id for authentication */ + u_int8_t t_rem_aid; /* Addr ID of another subflow */ + u_int8_t t_mprxtshift; /* join retransmission */ #endif /* MPTCP */ -#define TFO_F_OFFER_COOKIE 0x01 /* We will offer a cookie */ -#define TFO_F_COOKIE_VALID 0x02 /* The received cookie is valid */ -#define TFO_F_COOKIE_REQ 0x04 /* Client requested a new cookie */ -#define TFO_F_COOKIE_SENT 0x08 /* Client did send a cookie in the SYN */ -#define TFO_F_SYN_LOSS 0x10 /* A SYN-loss triggered a fallback to regular TCP on the client-side */ -#define TFO_F_NO_SNDPROBING 0x20 /* This network is guaranteed to support TFO in the upstream direction */ -#define TFO_F_HEURISTIC_DONE 0x40 /* We have already marked this network as bad */ - u_int8_t t_tfo_flags; -#define TFO_S_SYNDATA_RCV 0x01 /* SYN+data has been received */ -#define TFO_S_COOKIEREQ_RECV 0x02 /* TFO-cookie request received */ -#define TFO_S_COOKIE_SENT 0x04 /* TFO-cookie announced in SYN/ACK */ -#define TFO_S_COOKIE_INVALID 0x08 /* Received TFO-cookie is invalid */ -#define TFO_S_COOKIE_REQ 0x10 /* TFO-cookie requested within the SYN */ -#define TFO_S_COOKIE_RCV 0x20 /* TFO-cookie received in SYN/ACK */ -#define TFO_S_SYN_DATA_SENT 0x40 /* SYN+data sent */ -#define TFO_S_SYN_DATA_ACKED 0x80 /* SYN+data has been acknowledged in SYN/ACK */ -#define TFO_S_SYN_LOSS 0x0100 /* SYN+TFO has been lost - fallback to regular TCP */ -#define TFO_S_COOKIE_WRONG 0x0200 /* Cookie we sent in the SYN was wrong */ -#define TFO_S_NO_COOKIE_RCV 0x0400 /* We asked for a cookie but didn't get one */ -#define TFO_S_HEURISTICS_DISABLE 0x0800 /* TFO-heuristics disabled it for this connection */ -#define TFO_S_SEND_BLACKHOLE 0x1000 /* TFO got blackholed in the send direction */ -#define TFO_S_RECV_BLACKHOLE 0x2000 /* TFO got blackholed in the recv direction */ -#define TFO_S_ONE_BYTE_PROXY 0x4000 /* TFO failed because of a proxy acknowledging just one byte */ - u_int16_t t_tfo_stats; - - u_int8_t t_tfo_probes; /* TFO-probes we did send */ +#define TFO_F_OFFER_COOKIE 0x01 /* We will offer a cookie */ +#define TFO_F_COOKIE_VALID 0x02 /* The received cookie is valid */ +#define TFO_F_COOKIE_REQ 0x04 /* Client requested a new cookie */ +#define TFO_F_COOKIE_SENT 0x08 /* Client did send a cookie in the SYN */ +#define TFO_F_SYN_LOSS 0x10 /* A SYN-loss triggered a fallback to regular TCP on the client-side */ +#define TFO_F_NO_SNDPROBING 0x20 /* This network is guaranteed to support TFO in the upstream direction */ +#define TFO_F_HEURISTIC_DONE 0x40 /* We have already marked this network as bad */ + u_int8_t t_tfo_flags; +#define TFO_S_SYNDATA_RCV 0x01 /* SYN+data has been received */ +#define TFO_S_COOKIEREQ_RECV 0x02 /* TFO-cookie request received */ +#define TFO_S_COOKIE_SENT 0x04 /* TFO-cookie announced in SYN/ACK */ +#define TFO_S_COOKIE_INVALID 0x08 /* Received TFO-cookie is invalid */ +#define TFO_S_COOKIE_REQ 0x10 /* TFO-cookie requested within the SYN */ +#define TFO_S_COOKIE_RCV 0x20 /* TFO-cookie received in SYN/ACK */ +#define TFO_S_SYN_DATA_SENT 0x40 /* SYN+data sent */ +#define TFO_S_SYN_DATA_ACKED 0x80 /* SYN+data has been acknowledged in SYN/ACK */ +#define TFO_S_SYN_LOSS 0x0100 /* SYN+TFO has been lost - fallback to regular TCP */ +#define TFO_S_COOKIE_WRONG 0x0200 /* Cookie we sent in the SYN was wrong */ +#define TFO_S_NO_COOKIE_RCV 0x0400 /* We asked for a cookie but didn't get one */ +#define TFO_S_HEURISTICS_DISABLE 0x0800 /* TFO-heuristics disabled it for this connection */ +#define TFO_S_SEND_BLACKHOLE 0x1000 /* TFO got blackholed in the send direction */ +#define TFO_S_RECV_BLACKHOLE 0x2000 /* TFO got blackholed in the recv direction */ +#define TFO_S_ONE_BYTE_PROXY 0x4000 /* TFO failed because of a proxy acknowledging just one byte */ + u_int16_t t_tfo_stats; + + u_int8_t t_tfo_probes; /* TFO-probes we did send */ /* * This here is the TFO-probing state-machine. Transitions are as follows: * @@ -606,26 +605,26 @@ struct tcpcb { * Event: Data-timeout (did not receive the expected data) * Action: Signal ENODATA up to the app and close everything. */ -#define TFO_PROBE_NONE 0 /* Not probing now */ -#define TFO_PROBE_PROBING 1 /* Sending out TCP-keepalives waiting for reply */ -#define TFO_PROBE_WAIT_DATA 2 /* Received reply, waiting for data */ - u_int8_t t_tfo_probe_state; - - u_int32_t t_rcvoopack; /* out-of-order packets received */ - u_int32_t t_pawsdrop; /* segments dropped due to PAWS */ - u_int32_t t_sack_recovery_episode; /* SACK recovery episodes */ - u_int32_t t_reordered_pkts; /* packets reorderd */ - u_int32_t t_dsack_sent; /* Sent DSACK notification */ - u_int32_t t_dsack_recvd; /* Received a valid DSACK option */ - SLIST_HEAD(,tcp_notify_ack_marker) t_notify_ack; /* state for notifying data acknowledgements */ - u_int32_t t_recv_throttle_ts; /* TS for start of recv throttle */ - u_int32_t t_rxt_minimum_timeout; /* minimum retransmit timeout in ms */ - uint32_t t_challengeack_last; /* last time challenge ACK was sent per sec */ - uint32_t t_challengeack_count; /* # of challenge ACKs already sent per sec */ +#define TFO_PROBE_NONE 0 /* Not probing now */ +#define TFO_PROBE_PROBING 1 /* Sending out TCP-keepalives waiting for reply */ +#define TFO_PROBE_WAIT_DATA 2 /* Received reply, waiting for data */ + u_int8_t t_tfo_probe_state; + + u_int32_t t_rcvoopack; /* out-of-order packets received */ + u_int32_t t_pawsdrop; /* segments dropped due to PAWS */ + u_int32_t t_sack_recovery_episode; /* SACK recovery episodes */ + u_int32_t t_reordered_pkts; /* packets reorderd */ + u_int32_t t_dsack_sent; /* Sent DSACK notification */ + u_int32_t t_dsack_recvd; /* Received a valid DSACK option */ + SLIST_HEAD(, tcp_notify_ack_marker) t_notify_ack; /* state for notifying data acknowledgements */ + u_int32_t t_recv_throttle_ts; /* TS for start of recv throttle */ + u_int32_t t_rxt_minimum_timeout; /* minimum retransmit timeout in ms */ + uint32_t t_challengeack_last; /* last time challenge ACK was sent per sec */ + uint32_t t_challengeack_count; /* # of challenge ACKs already sent per sec */ }; -#define IN_FASTRECOVERY(tp) (tp->t_flags & TF_FASTRECOVERY) -#define SACK_ENABLED(tp) (tp->t_flagsext & TF_SACK_ENABLE) +#define IN_FASTRECOVERY(tp) (tp->t_flags & TF_FASTRECOVERY) +#define SACK_ENABLED(tp) (tp->t_flagsext & TF_SACK_ENABLE) /* * If the connection is in a throttled state due to advisory feedback from @@ -634,23 +633,23 @@ struct tcpcb { * should be just a trickle and it will help to improve performance. * We also do not want to back off twice in the same RTT. */ -#define ENTER_FASTRECOVERY(_tp_) do { \ - (_tp_)->t_flags |= TF_FASTRECOVERY; \ - if (INP_IS_FLOW_CONTROLLED((_tp_)->t_inpcb)) \ - inp_reset_fc_state((_tp_)->t_inpcb); \ - if (!SLIST_EMPTY(&tp->t_rxt_segments)) \ - tcp_rxtseg_clean(tp); \ +#define ENTER_FASTRECOVERY(_tp_) do { \ + (_tp_)->t_flags |= TF_FASTRECOVERY; \ + if (INP_IS_FLOW_CONTROLLED((_tp_)->t_inpcb)) \ + inp_reset_fc_state((_tp_)->t_inpcb); \ + if (!SLIST_EMPTY(&tp->t_rxt_segments)) \ + tcp_rxtseg_clean(tp); \ } while(0) -#define EXIT_FASTRECOVERY(_tp_) do { \ - (_tp_)->t_flags &= ~TF_FASTRECOVERY; \ - (_tp_)->t_dupacks = 0; \ - (_tp_)->t_rexmtthresh = tcprexmtthresh; \ - (_tp_)->t_bytes_acked = 0; \ - (_tp_)->ecn_flags &= ~TE_INRECOVERY; \ - (_tp_)->t_timer[TCPT_PTO] = 0; \ - (_tp_)->t_flagsext &= ~TF_RESCUE_RXT; \ - (_tp_)->t_lossflightsize = 0; \ +#define EXIT_FASTRECOVERY(_tp_) do { \ + (_tp_)->t_flags &= ~TF_FASTRECOVERY; \ + (_tp_)->t_dupacks = 0; \ + (_tp_)->t_rexmtthresh = tcprexmtthresh; \ + (_tp_)->t_bytes_acked = 0; \ + (_tp_)->ecn_flags &= ~TE_INRECOVERY; \ + (_tp_)->t_timer[TCPT_PTO] = 0; \ + (_tp_)->t_flagsext &= ~TF_RESCUE_RXT; \ + (_tp_)->t_lossflightsize = 0; \ } while(0) /* @@ -670,18 +669,18 @@ extern int tcprexmtthresh; */ #define TSTMP_SUPPORTED(_tp_) \ (((_tp_)->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP)) == \ - (TF_REQ_TSTMP|TF_RCVD_TSTMP)) + (TF_REQ_TSTMP|TF_RCVD_TSTMP)) /* * This condition is true if window scale option is supported * on a connection */ -#define TCP_WINDOW_SCALE_ENABLED(_tp_) \ +#define TCP_WINDOW_SCALE_ENABLED(_tp_) \ (((_tp_)->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == \ - (TF_RCVD_SCALE|TF_REQ_SCALE)) + (TF_RCVD_SCALE|TF_REQ_SCALE)) /* Is ECN enabled end-to-end */ -#define TCP_ECN_ENABLED(_tp_) \ +#define TCP_ECN_ENABLED(_tp_) \ (((_tp_)->ecn_flags & (TE_ECN_ON)) == (TE_ECN_ON)) /* @@ -691,11 +690,11 @@ extern int tcprexmtthresh; ((_th_)->th_ack - (_tp_)->snd_una) /* Returns true if a DSACK option should be sent */ -#define TCP_SEND_DSACK_OPT(_tp_) \ +#define TCP_SEND_DSACK_OPT(_tp_) \ ((_tp_)->t_dsack_lseq > 0 && (_tp_)->t_dsack_rseq > 0) /* Check if DSACK option should be processed */ -#define TCP_DSACK_ENABLED(tp) (tcp_dsack_enable == 1 && \ +#define TCP_DSACK_ENABLED(tp) (tcp_dsack_enable == 1 && \ !(tp->t_flagsext & TF_DISABLE_DSACK)) /* @@ -709,7 +708,7 @@ extern int tcprexmtthresh; * DSACK option. Choosing a much larger limit means that the memory for * retransmit segments can be held for a longer time. */ -#define TCP_DSACK_MAX_SEND_WINDOW(_tp_) ((_tp_)->t_maxseg << 8) +#define TCP_DSACK_MAX_SEND_WINDOW(_tp_) ((_tp_)->t_maxseg << 8) #define TCP_DSACK_SEQ_IN_WINDOW(_tp_, _seq_, _una_) \ (SEQ_LEQ((_seq_), (_tp_)->snd_max) && \ SEQ_GEQ((_seq_), ((_una_) - TCP_DSACK_MAX_SEND_WINDOW(_tp_)))) @@ -720,32 +719,32 @@ extern int tcprexmtthresh; mptcp_reset_rexmit_state((_tp_)); \ } while(0); -#define TCP_AUTORCVBUF_MAX(_ifp_) (((_ifp_) != NULL && (IFNET_IS_CELLULAR((_ifp_))) && ((_ifp_)->if_eflags & IFEF_3CA)) ? \ - (tcp_autorcvbuf_max << 1) : tcp_autorcvbuf_max) +#define TCP_AUTORCVBUF_MAX(_ifp_) (((_ifp_) != NULL && (IFNET_IS_CELLULAR((_ifp_))) && ((_ifp_)->if_eflags & IFEF_3CA)) ? \ + (tcp_autorcvbuf_max << 1) : tcp_autorcvbuf_max) enum tcp_cc_event { - TCP_CC_CWND_INIT, /* 0 */ - TCP_CC_INSEQ_ACK_RCVD, /* 1 */ - TCP_CC_ACK_RCVD, /* 2 */ + TCP_CC_CWND_INIT, /* 0 */ + TCP_CC_INSEQ_ACK_RCVD, /* 1 */ + TCP_CC_ACK_RCVD, /* 2 */ TCP_CC_ENTER_FASTRECOVERY, /* 3 */ - TCP_CC_IN_FASTRECOVERY, /* 4 */ + TCP_CC_IN_FASTRECOVERY, /* 4 */ TCP_CC_EXIT_FASTRECOVERY, /* 5 */ - TCP_CC_PARTIAL_ACK, /* 6 */ - TCP_CC_IDLE_TIMEOUT, /* 7 */ - TCP_CC_REXMT_TIMEOUT, /* 8 */ - TCP_CC_ECN_RCVD, /* 9 */ + TCP_CC_PARTIAL_ACK, /* 6 */ + TCP_CC_IDLE_TIMEOUT, /* 7 */ + TCP_CC_REXMT_TIMEOUT, /* 8 */ + TCP_CC_ECN_RCVD, /* 9 */ TCP_CC_BAD_REXMT_RECOVERY, /* 10 */ - TCP_CC_OUTPUT_ERROR, /* 11 */ - TCP_CC_CHANGE_ALGO, /* 12 */ - TCP_CC_FLOW_CONTROL, /* 13 */ - TCP_CC_SUSPEND, /* 14 */ + TCP_CC_OUTPUT_ERROR, /* 11 */ + TCP_CC_CHANGE_ALGO, /* 12 */ + TCP_CC_FLOW_CONTROL, /* 13 */ + TCP_CC_SUSPEND, /* 14 */ TCP_CC_LIMITED_TRANSMIT, /* 15 */ TCP_CC_EARLY_RETRANSMIT, /* 16 */ - TCP_CC_TLP_RECOVERY, /* 17 */ + TCP_CC_TLP_RECOVERY, /* 17 */ TCP_CC_TLP_RECOVER_LASTPACKET, /* 18 */ TCP_CC_DELAY_FASTRECOVERY, /* 19 */ TCP_CC_TLP_IN_FASTRECOVERY, /* 20 */ - TCP_CC_DSACK_BAD_REXMT /* 21 */ + TCP_CC_DSACK_BAD_REXMT /* 21 */ }; /* @@ -755,23 +754,23 @@ enum tcp_cc_event { * to tcp_dooptions. */ struct tcpopt { - u_int32_t to_flags; /* which options are present */ -#define TOF_TS 0x0001 /* timestamp */ -#define TOF_MSS 0x0010 -#define TOF_SCALE 0x0020 -#define TOF_SIGNATURE 0x0040 /* signature option present */ -#define TOF_SIGLEN 0x0080 /* signature length valid (RFC2385) */ -#define TOF_SACK 0x0100 /* Peer sent SACK option */ -#define TOF_MPTCP 0x0200 /* MPTCP options to be dropped */ -#define TOF_TFO 0x0400 /* TFO cookie option present */ -#define TOF_TFOREQ 0x0800 /* TFO cookie request present */ - u_int32_t to_tsval; - u_int32_t to_tsecr; - u_int16_t to_mss; - u_int8_t to_requested_s_scale; - u_int8_t to_nsacks; /* number of SACK blocks */ - u_char *to_sacks; /* pointer to the first SACK blocks */ - u_char *to_tfo; /* pointer to the TFO cookie */ + u_int32_t to_flags; /* which options are present */ +#define TOF_TS 0x0001 /* timestamp */ +#define TOF_MSS 0x0010 +#define TOF_SCALE 0x0020 +#define TOF_SIGNATURE 0x0040 /* signature option present */ +#define TOF_SIGLEN 0x0080 /* signature length valid (RFC2385) */ +#define TOF_SACK 0x0100 /* Peer sent SACK option */ +#define TOF_MPTCP 0x0200 /* MPTCP options to be dropped */ +#define TOF_TFO 0x0400 /* TFO cookie option present */ +#define TOF_TFOREQ 0x0800 /* TFO cookie request present */ + u_int32_t to_tsval; + u_int32_t to_tsecr; + u_int16_t to_mss; + u_int8_t to_requested_s_scale; + u_int8_t to_nsacks; /* number of SACK blocks */ + u_char *to_sacks; /* pointer to the first SACK blocks */ + u_char *to_tfo; /* pointer to the TFO cookie */ }; /* @@ -779,25 +778,25 @@ struct tcpopt { * portion of the route metrics. */ struct rmxp_tao { - tcp_cc tao_cc; /* latest CC in valid SYN */ - tcp_cc tao_ccsent; /* latest CC sent to peer */ - u_short tao_mssopt; /* peer's cached MSS */ + tcp_cc tao_cc; /* latest CC in valid SYN */ + tcp_cc tao_ccsent; /* latest CC sent to peer */ + u_short tao_mssopt; /* peer's cached MSS */ #ifdef notyet - u_short tao_flags; /* cache status flags */ -#define TAOF_DONT 0x0001 /* peer doesn't understand rfc1644 */ -#define TAOF_OK 0x0002 /* peer does understand rfc1644 */ -#define TAOF_UNDEF 0 /* we don't know yet */ + u_short tao_flags; /* cache status flags */ +#define TAOF_DONT 0x0001 /* peer doesn't understand rfc1644 */ +#define TAOF_OK 0x0002 /* peer does understand rfc1644 */ +#define TAOF_UNDEF 0 /* we don't know yet */ #endif /* notyet */ }; -#define rmx_taop(r) ((struct rmxp_tao *)(r).rmx_filler) +#define rmx_taop(r) ((struct rmxp_tao *)(r).rmx_filler) -#define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb) -#define sototcpcb(so) (intotcpcb(sotoinpcb(so))) +#define intotcpcb(ip) ((struct tcpcb *)(ip)->inp_ppcb) +#define sototcpcb(so) (intotcpcb(sotoinpcb(so))) /* TFO-specific defines */ -#define TFO_COOKIE_LEN_MIN 4 -#define TFO_COOKIE_LEN_DEFAULT 8 -#define TFO_COOKIE_LEN_MAX 16 +#define TFO_COOKIE_LEN_MIN 4 +#define TFO_COOKIE_LEN_DEFAULT 8 +#define TFO_COOKIE_LEN_MAX 16 /* * The initial retransmission should happen at rtt + 4 * rttvar. @@ -815,7 +814,7 @@ struct rmxp_tao { * which results in inappropriately large RTO values for very * fast networks. */ -#define TCP_REXMTVAL(tp) \ +#define TCP_REXMTVAL(tp) \ max((tp)->t_rttmin, (((tp)->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)) \ + (tp)->t_rttvar) >> TCP_DELTA_SHIFT) @@ -833,102 +832,102 @@ struct tcpcb { #if defined(KERNEL_PRIVATE) u_int32_t t_segq; #else - struct tsegqe_head t_segq; + struct tsegqe_head t_segq; #endif /* KERNEL_PRIVATE */ - int t_dupacks; /* consecutive dup acks recd */ - u_int32_t unused; /* unused now: was t_template */ - - int t_timer[TCPT_NTIMERS_EXT]; /* tcp timers */ - - _TCPCB_PTR(struct inpcb *) t_inpcb; /* back pointer to internet pcb */ - int t_state; /* state of this connection */ - u_int t_flags; -#define TF_ACKNOW 0x00001 /* ack peer immediately */ -#define TF_DELACK 0x00002 /* ack, but try to delay it */ -#define TF_NODELAY 0x00004 /* don't delay packets to coalesce */ -#define TF_NOOPT 0x00008 /* don't use tcp options */ -#define TF_SENTFIN 0x00010 /* have sent FIN */ -#define TF_REQ_SCALE 0x00020 /* have/will request window scaling */ -#define TF_RCVD_SCALE 0x00040 /* other side has requested scaling */ -#define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ -#define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ -#define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ -#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ -#define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ -#define TF_NOPUSH 0x01000 /* don't push */ -#define TF_REQ_CC 0x02000 /* have/will request CC */ -#define TF_RCVD_CC 0x04000 /* a CC was received in SYN */ -#define TF_SENDCCNEW 0x08000 /* Not implemented */ -#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */ -#define TF_LQ_OVERFLOW 0x20000 /* listen queue overflow */ -#define TF_RXWIN0SENT 0x40000 /* sent a receiver win 0 in response */ -#define TF_SLOWLINK 0x80000 /* route is a on a modem speed link */ - - int t_force; /* 1 if forcing out a byte */ - - tcp_seq snd_una; /* send unacknowledged */ - tcp_seq snd_max; /* highest sequence number sent; - * used to recognize retransmits - */ - tcp_seq snd_nxt; /* send next */ - tcp_seq snd_up; /* send urgent pointer */ - - tcp_seq snd_wl1; /* window update seg seq number */ - tcp_seq snd_wl2; /* window update seg ack number */ - tcp_seq iss; /* initial send sequence number */ - tcp_seq irs; /* initial receive sequence number */ - - tcp_seq rcv_nxt; /* receive next */ - tcp_seq rcv_adv; /* advertised window */ - u_int32_t rcv_wnd; /* receive window */ - tcp_seq rcv_up; /* receive urgent pointer */ - - u_int32_t snd_wnd; /* send window */ - u_int32_t snd_cwnd; /* congestion-controlled window */ - u_int32_t snd_ssthresh; /* snd_cwnd size threshold for - * for slow start exponential to - * linear switch - */ - u_int t_maxopd; /* mss plus options */ - - u_int32_t t_rcvtime; /* time at which a packet was received */ - u_int32_t t_starttime; /* time connection was established */ - int t_rtttime; /* round trip time */ - tcp_seq t_rtseq; /* sequence number being timed */ - - int t_rxtcur; /* current retransmit value (ticks) */ - u_int t_maxseg; /* maximum segment size */ - int t_srtt; /* smoothed round-trip time */ - int t_rttvar; /* variance in round-trip time */ - - int t_rxtshift; /* log(2) of rexmt exp. backoff */ - u_int t_rttmin; /* minimum rtt allowed */ - u_int32_t t_rttupdated; /* number of times rtt sampled */ - u_int32_t max_sndwnd; /* largest window peer has offered */ - - int t_softerror; /* possible error not yet reported */ + int t_dupacks; /* consecutive dup acks recd */ + u_int32_t unused; /* unused now: was t_template */ + + int t_timer[TCPT_NTIMERS_EXT]; /* tcp timers */ + + _TCPCB_PTR(struct inpcb *) t_inpcb; /* back pointer to internet pcb */ + int t_state; /* state of this connection */ + u_int t_flags; +#define TF_ACKNOW 0x00001 /* ack peer immediately */ +#define TF_DELACK 0x00002 /* ack, but try to delay it */ +#define TF_NODELAY 0x00004 /* don't delay packets to coalesce */ +#define TF_NOOPT 0x00008 /* don't use tcp options */ +#define TF_SENTFIN 0x00010 /* have sent FIN */ +#define TF_REQ_SCALE 0x00020 /* have/will request window scaling */ +#define TF_RCVD_SCALE 0x00040 /* other side has requested scaling */ +#define TF_REQ_TSTMP 0x00080 /* have/will request timestamps */ +#define TF_RCVD_TSTMP 0x00100 /* a timestamp was received in SYN */ +#define TF_SACK_PERMIT 0x00200 /* other side said I could SACK */ +#define TF_NEEDSYN 0x00400 /* send SYN (implicit state) */ +#define TF_NEEDFIN 0x00800 /* send FIN (implicit state) */ +#define TF_NOPUSH 0x01000 /* don't push */ +#define TF_REQ_CC 0x02000 /* have/will request CC */ +#define TF_RCVD_CC 0x04000 /* a CC was received in SYN */ +#define TF_SENDCCNEW 0x08000 /* Not implemented */ +#define TF_MORETOCOME 0x10000 /* More data to be appended to sock */ +#define TF_LQ_OVERFLOW 0x20000 /* listen queue overflow */ +#define TF_RXWIN0SENT 0x40000 /* sent a receiver win 0 in response */ +#define TF_SLOWLINK 0x80000 /* route is a on a modem speed link */ + + int t_force; /* 1 if forcing out a byte */ + + tcp_seq snd_una; /* send unacknowledged */ + tcp_seq snd_max; /* highest sequence number sent; + * used to recognize retransmits + */ + tcp_seq snd_nxt; /* send next */ + tcp_seq snd_up; /* send urgent pointer */ + + tcp_seq snd_wl1; /* window update seg seq number */ + tcp_seq snd_wl2; /* window update seg ack number */ + tcp_seq iss; /* initial send sequence number */ + tcp_seq irs; /* initial receive sequence number */ + + tcp_seq rcv_nxt; /* receive next */ + tcp_seq rcv_adv; /* advertised window */ + u_int32_t rcv_wnd; /* receive window */ + tcp_seq rcv_up; /* receive urgent pointer */ + + u_int32_t snd_wnd; /* send window */ + u_int32_t snd_cwnd; /* congestion-controlled window */ + u_int32_t snd_ssthresh; /* snd_cwnd size threshold for + * for slow start exponential to + * linear switch + */ + u_int t_maxopd; /* mss plus options */ + + u_int32_t t_rcvtime; /* time at which a packet was received */ + u_int32_t t_starttime; /* time connection was established */ + int t_rtttime; /* round trip time */ + tcp_seq t_rtseq; /* sequence number being timed */ + + int t_rxtcur; /* current retransmit value (ticks) */ + u_int t_maxseg; /* maximum segment size */ + int t_srtt; /* smoothed round-trip time */ + int t_rttvar; /* variance in round-trip time */ + + int t_rxtshift; /* log(2) of rexmt exp. backoff */ + u_int t_rttmin; /* minimum rtt allowed */ + u_int32_t t_rttupdated; /* number of times rtt sampled */ + u_int32_t max_sndwnd; /* largest window peer has offered */ + + int t_softerror; /* possible error not yet reported */ /* out-of-band data */ - char t_oobflags; /* have some */ - char t_iobc; /* input character */ -#define TCPOOB_HAVEDATA 0x01 -#define TCPOOB_HADDATA 0x02 + char t_oobflags; /* have some */ + char t_iobc; /* input character */ +#define TCPOOB_HAVEDATA 0x01 +#define TCPOOB_HADDATA 0x02 /* RFC 1323 variables */ - u_char snd_scale; /* window scaling for send window */ - u_char rcv_scale; /* window scaling for recv window */ - u_char request_r_scale; /* pending window scaling */ - u_char requested_s_scale; - u_int32_t ts_recent; /* timestamp echo data */ - - u_int32_t ts_recent_age; /* when last updated */ - tcp_seq last_ack_sent; + u_char snd_scale; /* window scaling for send window */ + u_char rcv_scale; /* window scaling for recv window */ + u_char request_r_scale; /* pending window scaling */ + u_char requested_s_scale; + u_int32_t ts_recent; /* timestamp echo data */ + + u_int32_t ts_recent_age; /* when last updated */ + tcp_seq last_ack_sent; /* RFC 1644 variables */ - tcp_cc cc_send; /* send connection count */ - tcp_cc cc_recv; /* receive connection count */ - tcp_seq snd_recover; /* for use in fast recovery */ + tcp_cc cc_send; /* send connection count */ + tcp_cc cc_recv; /* receive connection count */ + tcp_seq snd_recover; /* for use in fast recovery */ /* experimental */ - u_int32_t snd_cwnd_prev; /* cwnd prior to retransmit */ - u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ - u_int32_t t_badrxtwin; /* window for retransmit recovery */ + u_int32_t snd_cwnd_prev; /* cwnd prior to retransmit */ + u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ + u_int32_t t_badrxtwin; /* window for retransmit recovery */ }; #define tcps_ecn_setup tcps_ecn_client_success @@ -940,273 +939,273 @@ struct tcpcb { * Many of these should be kept per connection, * but that's inconvenient at the moment. */ -struct tcpstat { - u_int32_t tcps_connattempt; /* connections initiated */ - u_int32_t tcps_accepts; /* connections accepted */ - u_int32_t tcps_connects; /* connections established */ - u_int32_t tcps_drops; /* connections dropped */ - u_int32_t tcps_conndrops; /* embryonic connections dropped */ - u_int32_t tcps_closed; /* conn. closed (includes drops) */ - u_int32_t tcps_segstimed; /* segs where we tried to get rtt */ - u_int32_t tcps_rttupdated; /* times we succeeded */ - u_int32_t tcps_delack; /* delayed acks sent */ - u_int32_t tcps_timeoutdrop; /* conn. dropped in rxmt timeout */ - u_int32_t tcps_rexmttimeo; /* retransmit timeouts */ - u_int32_t tcps_persisttimeo; /* persist timeouts */ - u_int32_t tcps_keeptimeo; /* keepalive timeouts */ - u_int32_t tcps_keepprobe; /* keepalive probes sent */ - u_int32_t tcps_keepdrops; /* connections dropped in keepalive */ - - u_int32_t tcps_sndtotal; /* total packets sent */ - u_int32_t tcps_sndpack; /* data packets sent */ - u_int32_t tcps_sndbyte; /* data bytes sent */ - u_int32_t tcps_sndrexmitpack; /* data packets retransmitted */ - u_int32_t tcps_sndrexmitbyte; /* data bytes retransmitted */ - u_int32_t tcps_sndacks; /* ack-only packets sent */ - u_int32_t tcps_sndprobe; /* window probes sent */ - u_int32_t tcps_sndurg; /* packets sent with URG only */ - u_int32_t tcps_sndwinup; /* window update-only packets sent */ - u_int32_t tcps_sndctrl; /* control (SYN|FIN|RST) packets sent */ - - u_int32_t tcps_rcvtotal; /* total packets received */ - u_int32_t tcps_rcvpack; /* packets received in sequence */ - u_int32_t tcps_rcvbyte; /* bytes received in sequence */ - u_int32_t tcps_rcvbadsum; /* packets received with ccksum errs */ - u_int32_t tcps_rcvbadoff; /* packets received with bad offset */ - u_int32_t tcps_rcvmemdrop; /* packets dropped for lack of memory */ - u_int32_t tcps_rcvshort; /* packets received too short */ - u_int32_t tcps_rcvduppack; /* duplicate-only packets received */ - u_int32_t tcps_rcvdupbyte; /* duplicate-only bytes received */ - u_int32_t tcps_rcvpartduppack; /* packets with some duplicate data */ - u_int32_t tcps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */ - u_int32_t tcps_rcvoopack; /* out-of-order packets received */ - u_int32_t tcps_rcvoobyte; /* out-of-order bytes received */ - u_int32_t tcps_rcvpackafterwin; /* packets with data after window */ - u_int32_t tcps_rcvbyteafterwin; /* bytes rcvd after window */ - u_int32_t tcps_rcvafterclose; /* packets rcvd after "close" */ - u_int32_t tcps_rcvwinprobe; /* rcvd window probe packets */ - u_int32_t tcps_rcvdupack; /* rcvd duplicate acks */ - u_int32_t tcps_rcvacktoomuch; /* rcvd acks for unsent data */ - u_int32_t tcps_rcvackpack; /* rcvd ack packets */ - u_int32_t tcps_rcvackbyte; /* bytes acked by rcvd acks */ - u_int32_t tcps_rcvwinupd; /* rcvd window update packets */ - u_int32_t tcps_pawsdrop; /* segments dropped due to PAWS */ - u_int32_t tcps_predack; /* times hdr predict ok for acks */ - u_int32_t tcps_preddat; /* times hdr predict ok for data pkts */ - u_int32_t tcps_pcbcachemiss; - u_int32_t tcps_cachedrtt; /* times cached RTT in route updated */ - u_int32_t tcps_cachedrttvar; /* times cached rttvar updated */ - u_int32_t tcps_cachedssthresh; /* times cached ssthresh updated */ - u_int32_t tcps_usedrtt; /* times RTT initialized from route */ - u_int32_t tcps_usedrttvar; /* times RTTVAR initialized from rt */ - u_int32_t tcps_usedssthresh; /* times ssthresh initialized from rt*/ - u_int32_t tcps_persistdrop; /* timeout in persist state */ - u_int32_t tcps_badsyn; /* bogus SYN, e.g. premature ACK */ - u_int32_t tcps_mturesent; /* resends due to MTU discovery */ - u_int32_t tcps_listendrop; /* listen queue overflows */ - u_int32_t tcps_synchallenge; /* challenge ACK due to bad SYN */ - u_int32_t tcps_rstchallenge; /* challenge ACK due to bad RST */ +struct tcpstat { + u_int32_t tcps_connattempt; /* connections initiated */ + u_int32_t tcps_accepts; /* connections accepted */ + u_int32_t tcps_connects; /* connections established */ + u_int32_t tcps_drops; /* connections dropped */ + u_int32_t tcps_conndrops; /* embryonic connections dropped */ + u_int32_t tcps_closed; /* conn. closed (includes drops) */ + u_int32_t tcps_segstimed; /* segs where we tried to get rtt */ + u_int32_t tcps_rttupdated; /* times we succeeded */ + u_int32_t tcps_delack; /* delayed acks sent */ + u_int32_t tcps_timeoutdrop; /* conn. dropped in rxmt timeout */ + u_int32_t tcps_rexmttimeo; /* retransmit timeouts */ + u_int32_t tcps_persisttimeo; /* persist timeouts */ + u_int32_t tcps_keeptimeo; /* keepalive timeouts */ + u_int32_t tcps_keepprobe; /* keepalive probes sent */ + u_int32_t tcps_keepdrops; /* connections dropped in keepalive */ + + u_int32_t tcps_sndtotal; /* total packets sent */ + u_int32_t tcps_sndpack; /* data packets sent */ + u_int32_t tcps_sndbyte; /* data bytes sent */ + u_int32_t tcps_sndrexmitpack; /* data packets retransmitted */ + u_int32_t tcps_sndrexmitbyte; /* data bytes retransmitted */ + u_int32_t tcps_sndacks; /* ack-only packets sent */ + u_int32_t tcps_sndprobe; /* window probes sent */ + u_int32_t tcps_sndurg; /* packets sent with URG only */ + u_int32_t tcps_sndwinup; /* window update-only packets sent */ + u_int32_t tcps_sndctrl; /* control (SYN|FIN|RST) packets sent */ + + u_int32_t tcps_rcvtotal; /* total packets received */ + u_int32_t tcps_rcvpack; /* packets received in sequence */ + u_int32_t tcps_rcvbyte; /* bytes received in sequence */ + u_int32_t tcps_rcvbadsum; /* packets received with ccksum errs */ + u_int32_t tcps_rcvbadoff; /* packets received with bad offset */ + u_int32_t tcps_rcvmemdrop; /* packets dropped for lack of memory */ + u_int32_t tcps_rcvshort; /* packets received too short */ + u_int32_t tcps_rcvduppack; /* duplicate-only packets received */ + u_int32_t tcps_rcvdupbyte; /* duplicate-only bytes received */ + u_int32_t tcps_rcvpartduppack; /* packets with some duplicate data */ + u_int32_t tcps_rcvpartdupbyte; /* dup. bytes in part-dup. packets */ + u_int32_t tcps_rcvoopack; /* out-of-order packets received */ + u_int32_t tcps_rcvoobyte; /* out-of-order bytes received */ + u_int32_t tcps_rcvpackafterwin; /* packets with data after window */ + u_int32_t tcps_rcvbyteafterwin; /* bytes rcvd after window */ + u_int32_t tcps_rcvafterclose; /* packets rcvd after "close" */ + u_int32_t tcps_rcvwinprobe; /* rcvd window probe packets */ + u_int32_t tcps_rcvdupack; /* rcvd duplicate acks */ + u_int32_t tcps_rcvacktoomuch; /* rcvd acks for unsent data */ + u_int32_t tcps_rcvackpack; /* rcvd ack packets */ + u_int32_t tcps_rcvackbyte; /* bytes acked by rcvd acks */ + u_int32_t tcps_rcvwinupd; /* rcvd window update packets */ + u_int32_t tcps_pawsdrop; /* segments dropped due to PAWS */ + u_int32_t tcps_predack; /* times hdr predict ok for acks */ + u_int32_t tcps_preddat; /* times hdr predict ok for data pkts */ + u_int32_t tcps_pcbcachemiss; + u_int32_t tcps_cachedrtt; /* times cached RTT in route updated */ + u_int32_t tcps_cachedrttvar; /* times cached rttvar updated */ + u_int32_t tcps_cachedssthresh; /* times cached ssthresh updated */ + u_int32_t tcps_usedrtt; /* times RTT initialized from route */ + u_int32_t tcps_usedrttvar; /* times RTTVAR initialized from rt */ + u_int32_t tcps_usedssthresh; /* times ssthresh initialized from rt*/ + u_int32_t tcps_persistdrop; /* timeout in persist state */ + u_int32_t tcps_badsyn; /* bogus SYN, e.g. premature ACK */ + u_int32_t tcps_mturesent; /* resends due to MTU discovery */ + u_int32_t tcps_listendrop; /* listen queue overflows */ + u_int32_t tcps_synchallenge; /* challenge ACK due to bad SYN */ + u_int32_t tcps_rstchallenge; /* challenge ACK due to bad RST */ /* new stats from FreeBSD 5.4 sync up */ - u_int32_t tcps_minmssdrops; /* average minmss too low drops */ - - u_int32_t tcps_sndrexmitbad; /* unnecessary packet retransmissions */ - u_int32_t tcps_badrst; /* ignored RSTs in the window */ - - u_int32_t tcps_sc_added; /* entry added to syncache */ - u_int32_t tcps_sc_retransmitted; /* syncache entry was retransmitted */ - u_int32_t tcps_sc_dupsyn; /* duplicate SYN packet */ - u_int32_t tcps_sc_dropped; /* could not reply to packet */ - u_int32_t tcps_sc_completed; /* successful extraction of entry */ - u_int32_t tcps_sc_bucketoverflow; /* syncache per-bucket limit hit */ - u_int32_t tcps_sc_cacheoverflow; /* syncache cache limit hit */ - u_int32_t tcps_sc_reset; /* RST removed entry from syncache */ - u_int32_t tcps_sc_stale; /* timed out or listen socket gone */ - u_int32_t tcps_sc_aborted; /* syncache entry aborted */ - u_int32_t tcps_sc_badack; /* removed due to bad ACK */ - u_int32_t tcps_sc_unreach; /* ICMP unreachable received */ - u_int32_t tcps_sc_zonefail; /* zalloc() failed */ - u_int32_t tcps_sc_sendcookie; /* SYN cookie sent */ - u_int32_t tcps_sc_recvcookie; /* SYN cookie received */ - - u_int32_t tcps_hc_added; /* entry added to hostcache */ - u_int32_t tcps_hc_bucketoverflow; /* hostcache per bucket limit hit */ + u_int32_t tcps_minmssdrops; /* average minmss too low drops */ + + u_int32_t tcps_sndrexmitbad; /* unnecessary packet retransmissions */ + u_int32_t tcps_badrst; /* ignored RSTs in the window */ + + u_int32_t tcps_sc_added; /* entry added to syncache */ + u_int32_t tcps_sc_retransmitted; /* syncache entry was retransmitted */ + u_int32_t tcps_sc_dupsyn; /* duplicate SYN packet */ + u_int32_t tcps_sc_dropped; /* could not reply to packet */ + u_int32_t tcps_sc_completed; /* successful extraction of entry */ + u_int32_t tcps_sc_bucketoverflow; /* syncache per-bucket limit hit */ + u_int32_t tcps_sc_cacheoverflow; /* syncache cache limit hit */ + u_int32_t tcps_sc_reset; /* RST removed entry from syncache */ + u_int32_t tcps_sc_stale; /* timed out or listen socket gone */ + u_int32_t tcps_sc_aborted; /* syncache entry aborted */ + u_int32_t tcps_sc_badack; /* removed due to bad ACK */ + u_int32_t tcps_sc_unreach; /* ICMP unreachable received */ + u_int32_t tcps_sc_zonefail; /* zalloc() failed */ + u_int32_t tcps_sc_sendcookie; /* SYN cookie sent */ + u_int32_t tcps_sc_recvcookie; /* SYN cookie received */ + + u_int32_t tcps_hc_added; /* entry added to hostcache */ + u_int32_t tcps_hc_bucketoverflow; /* hostcache per bucket limit hit */ /* SACK related stats */ - u_int32_t tcps_sack_recovery_episode; /* SACK recovery episodes */ - u_int32_t tcps_sack_rexmits; /* SACK rexmit segments */ - u_int32_t tcps_sack_rexmit_bytes; /* SACK rexmit bytes */ - u_int32_t tcps_sack_rcv_blocks; /* SACK blocks (options) received */ - u_int32_t tcps_sack_send_blocks; /* SACK blocks (options) sent */ - u_int32_t tcps_sack_sboverflow; /* SACK sendblock overflow */ + u_int32_t tcps_sack_recovery_episode; /* SACK recovery episodes */ + u_int32_t tcps_sack_rexmits; /* SACK rexmit segments */ + u_int32_t tcps_sack_rexmit_bytes; /* SACK rexmit bytes */ + u_int32_t tcps_sack_rcv_blocks; /* SACK blocks (options) received */ + u_int32_t tcps_sack_send_blocks; /* SACK blocks (options) sent */ + u_int32_t tcps_sack_sboverflow; /* SACK sendblock overflow */ - u_int32_t tcps_bg_rcvtotal; /* total background packets received */ - u_int32_t tcps_rxtfindrop; /* drop conn after retransmitting FIN */ - u_int32_t tcps_fcholdpacket; /* packets withheld because of flow control */ + u_int32_t tcps_bg_rcvtotal; /* total background packets received */ + u_int32_t tcps_rxtfindrop; /* drop conn after retransmitting FIN */ + u_int32_t tcps_fcholdpacket; /* packets withheld because of flow control */ /* LRO related stats */ - u_int32_t tcps_coalesced_pack; /* number of coalesced packets */ - u_int32_t tcps_flowtbl_full; /* times flow table was full */ - u_int32_t tcps_flowtbl_collision; /* collisions in flow tbl */ - u_int32_t tcps_lro_twopack; /* 2 packets coalesced */ - u_int32_t tcps_lro_multpack; /* 3 or 4 pkts coalesced */ - u_int32_t tcps_lro_largepack; /* 5 or more pkts coalesced */ + u_int32_t tcps_coalesced_pack; /* number of coalesced packets */ + u_int32_t tcps_flowtbl_full; /* times flow table was full */ + u_int32_t tcps_flowtbl_collision; /* collisions in flow tbl */ + u_int32_t tcps_lro_twopack; /* 2 packets coalesced */ + u_int32_t tcps_lro_multpack; /* 3 or 4 pkts coalesced */ + u_int32_t tcps_lro_largepack; /* 5 or more pkts coalesced */ - u_int32_t tcps_limited_txt; /* Limited transmit used */ - u_int32_t tcps_early_rexmt; /* Early retransmit used */ - u_int32_t tcps_sack_ackadv; /* Cumulative ack advanced along with sack */ + u_int32_t tcps_limited_txt; /* Limited transmit used */ + u_int32_t tcps_early_rexmt; /* Early retransmit used */ + u_int32_t tcps_sack_ackadv; /* Cumulative ack advanced along with sack */ /* Checksum related stats */ - u_int32_t tcps_rcv_swcsum; /* tcp swcksum (inbound), packets */ - u_int32_t tcps_rcv_swcsum_bytes; /* tcp swcksum (inbound), bytes */ - u_int32_t tcps_rcv6_swcsum; /* tcp6 swcksum (inbound), packets */ - u_int32_t tcps_rcv6_swcsum_bytes; /* tcp6 swcksum (inbound), bytes */ - u_int32_t tcps_snd_swcsum; /* tcp swcksum (outbound), packets */ - u_int32_t tcps_snd_swcsum_bytes; /* tcp swcksum (outbound), bytes */ - u_int32_t tcps_snd6_swcsum; /* tcp6 swcksum (outbound), packets */ - u_int32_t tcps_snd6_swcsum_bytes; /* tcp6 swcksum (outbound), bytes */ - u_int32_t tcps_msg_unopkts; /* unordered packet on TCP msg stream */ - u_int32_t tcps_msg_unoappendfail; /* failed to append unordered pkt */ - u_int32_t tcps_msg_sndwaithipri; /* send is waiting for high priority data */ + u_int32_t tcps_rcv_swcsum; /* tcp swcksum (inbound), packets */ + u_int32_t tcps_rcv_swcsum_bytes; /* tcp swcksum (inbound), bytes */ + u_int32_t tcps_rcv6_swcsum; /* tcp6 swcksum (inbound), packets */ + u_int32_t tcps_rcv6_swcsum_bytes; /* tcp6 swcksum (inbound), bytes */ + u_int32_t tcps_snd_swcsum; /* tcp swcksum (outbound), packets */ + u_int32_t tcps_snd_swcsum_bytes; /* tcp swcksum (outbound), bytes */ + u_int32_t tcps_snd6_swcsum; /* tcp6 swcksum (outbound), packets */ + u_int32_t tcps_snd6_swcsum_bytes; /* tcp6 swcksum (outbound), bytes */ + u_int32_t tcps_msg_unopkts; /* unordered packet on TCP msg stream */ + u_int32_t tcps_msg_unoappendfail; /* failed to append unordered pkt */ + u_int32_t tcps_msg_sndwaithipri; /* send is waiting for high priority data */ /* MPTCP Related stats */ - u_int32_t tcps_invalid_mpcap; /* Invalid MPTCP capable opts */ - u_int32_t tcps_invalid_joins; /* Invalid MPTCP joins */ - u_int32_t tcps_mpcap_fallback; /* TCP fallback in primary */ - u_int32_t tcps_join_fallback; /* No MPTCP in secondary */ - u_int32_t tcps_estab_fallback; /* DSS option dropped */ - u_int32_t tcps_invalid_opt; /* Catchall error stat */ - u_int32_t tcps_mp_outofwin; /* Packet lies outside the - shared recv window */ - u_int32_t tcps_mp_reducedwin; /* Reduced subflow window */ - u_int32_t tcps_mp_badcsum; /* Bad DSS csum */ - u_int32_t tcps_mp_oodata; /* Out of order data */ - u_int32_t tcps_mp_switches; /* number of subflow switch */ - u_int32_t tcps_mp_rcvtotal; /* number of rcvd packets */ - u_int32_t tcps_mp_rcvbytes; /* number of bytes received */ - u_int32_t tcps_mp_sndpacks; /* number of data packs sent */ - u_int32_t tcps_mp_sndbytes; /* number of bytes sent */ - u_int32_t tcps_join_rxmts; /* join ack retransmits */ - u_int32_t tcps_tailloss_rto; /* RTO due to tail loss */ - u_int32_t tcps_reordered_pkts; /* packets reorderd */ - u_int32_t tcps_recovered_pkts; /* recovered after loss */ - u_int32_t tcps_pto; /* probe timeout */ - u_int32_t tcps_rto_after_pto; /* RTO after a probe */ - u_int32_t tcps_tlp_recovery; /* TLP induced fast recovery */ - u_int32_t tcps_tlp_recoverlastpkt; /* TLP recoverd last pkt */ - u_int32_t tcps_ecn_client_success; /* client-side connection negotiated ECN */ - u_int32_t tcps_ecn_recv_ece; /* ECE received, sent CWR */ - u_int32_t tcps_ecn_sent_ece; /* Sent ECE notification */ - u_int32_t tcps_detect_reordering; /* Detect pkt reordering */ - u_int32_t tcps_delay_recovery; /* Delay fast recovery */ - u_int32_t tcps_avoid_rxmt; /* Retransmission was avoided */ - u_int32_t tcps_unnecessary_rxmt; /* Retransmission was not needed */ - u_int32_t tcps_nostretchack; /* disabled stretch ack algorithm on a connection */ - u_int32_t tcps_rescue_rxmt; /* SACK rescue retransmit */ - u_int32_t tcps_pto_in_recovery; /* rescue retransmit in fast recovery */ - u_int32_t tcps_pmtudbh_reverted; /* PMTU Blackhole detection, segment size reverted */ + u_int32_t tcps_invalid_mpcap; /* Invalid MPTCP capable opts */ + u_int32_t tcps_invalid_joins; /* Invalid MPTCP joins */ + u_int32_t tcps_mpcap_fallback; /* TCP fallback in primary */ + u_int32_t tcps_join_fallback; /* No MPTCP in secondary */ + u_int32_t tcps_estab_fallback; /* DSS option dropped */ + u_int32_t tcps_invalid_opt; /* Catchall error stat */ + u_int32_t tcps_mp_outofwin; /* Packet lies outside the + * shared recv window */ + u_int32_t tcps_mp_reducedwin; /* Reduced subflow window */ + u_int32_t tcps_mp_badcsum; /* Bad DSS csum */ + u_int32_t tcps_mp_oodata; /* Out of order data */ + u_int32_t tcps_mp_switches; /* number of subflow switch */ + u_int32_t tcps_mp_rcvtotal; /* number of rcvd packets */ + u_int32_t tcps_mp_rcvbytes; /* number of bytes received */ + u_int32_t tcps_mp_sndpacks; /* number of data packs sent */ + u_int32_t tcps_mp_sndbytes; /* number of bytes sent */ + u_int32_t tcps_join_rxmts; /* join ack retransmits */ + u_int32_t tcps_tailloss_rto; /* RTO due to tail loss */ + u_int32_t tcps_reordered_pkts; /* packets reorderd */ + u_int32_t tcps_recovered_pkts; /* recovered after loss */ + u_int32_t tcps_pto; /* probe timeout */ + u_int32_t tcps_rto_after_pto; /* RTO after a probe */ + u_int32_t tcps_tlp_recovery; /* TLP induced fast recovery */ + u_int32_t tcps_tlp_recoverlastpkt; /* TLP recoverd last pkt */ + u_int32_t tcps_ecn_client_success; /* client-side connection negotiated ECN */ + u_int32_t tcps_ecn_recv_ece; /* ECE received, sent CWR */ + u_int32_t tcps_ecn_sent_ece; /* Sent ECE notification */ + u_int32_t tcps_detect_reordering; /* Detect pkt reordering */ + u_int32_t tcps_delay_recovery; /* Delay fast recovery */ + u_int32_t tcps_avoid_rxmt; /* Retransmission was avoided */ + u_int32_t tcps_unnecessary_rxmt; /* Retransmission was not needed */ + u_int32_t tcps_nostretchack; /* disabled stretch ack algorithm on a connection */ + u_int32_t tcps_rescue_rxmt; /* SACK rescue retransmit */ + u_int32_t tcps_pto_in_recovery; /* rescue retransmit in fast recovery */ + u_int32_t tcps_pmtudbh_reverted; /* PMTU Blackhole detection, segment size reverted */ /* DSACK related statistics */ - u_int32_t tcps_dsack_disable; /* DSACK disabled due to n/w duplication */ - u_int32_t tcps_dsack_ackloss; /* ignore DSACK due to ack loss */ - u_int32_t tcps_dsack_badrexmt; /* DSACK based bad rexmt recovery */ - u_int32_t tcps_dsack_sent; /* Sent DSACK notification */ - u_int32_t tcps_dsack_recvd; /* Received a valid DSACK option */ - u_int32_t tcps_dsack_recvd_old; /* Received an out of window DSACK option */ + u_int32_t tcps_dsack_disable; /* DSACK disabled due to n/w duplication */ + u_int32_t tcps_dsack_ackloss; /* ignore DSACK due to ack loss */ + u_int32_t tcps_dsack_badrexmt; /* DSACK based bad rexmt recovery */ + u_int32_t tcps_dsack_sent; /* Sent DSACK notification */ + u_int32_t tcps_dsack_recvd; /* Received a valid DSACK option */ + u_int32_t tcps_dsack_recvd_old; /* Received an out of window DSACK option */ /* MPTCP Subflow selection stats */ - u_int32_t tcps_mp_sel_symtomsd; /* By symptomsd */ - u_int32_t tcps_mp_sel_rtt; /* By RTT comparison */ - u_int32_t tcps_mp_sel_rto; /* By RTO comparison */ - u_int32_t tcps_mp_sel_peer; /* By peer's output pattern */ - u_int32_t tcps_mp_num_probes; /* Number of probes sent */ - u_int32_t tcps_mp_verdowngrade; /* MPTCP version downgrade */ - u_int32_t tcps_drop_after_sleep; /* drop after long AP sleep */ - u_int32_t tcps_probe_if; /* probe packets after interface availability */ - u_int32_t tcps_probe_if_conflict; /* Can't send probe packets for interface */ - - u_int32_t tcps_ecn_client_setup; /* Attempted ECN setup from client side */ - u_int32_t tcps_ecn_server_setup; /* Attempted ECN setup from server side */ - u_int32_t tcps_ecn_server_success; /* server-side connection negotiated ECN */ - u_int32_t tcps_ecn_lost_synack; /* Lost SYN-ACK with ECN setup */ - u_int32_t tcps_ecn_lost_syn; /* Lost SYN with ECN setup */ - u_int32_t tcps_ecn_not_supported; /* Server did not support ECN setup */ - u_int32_t tcps_ecn_recv_ce; /* Received CE from the network */ - u_int32_t tcps_ecn_conn_recv_ce; /* Number of connections received CE atleast once */ - u_int32_t tcps_ecn_conn_recv_ece; /* Number of connections received ECE atleast once */ - u_int32_t tcps_ecn_conn_plnoce; /* Number of connections that received no CE and sufferred packet loss */ - u_int32_t tcps_ecn_conn_pl_ce; /* Number of connections that received CE and sufferred packet loss */ - u_int32_t tcps_ecn_conn_nopl_ce; /* Number of connections that received CE and sufferred no packet loss */ - u_int32_t tcps_ecn_fallback_synloss; /* Number of times we did fall back due to SYN-Loss */ - u_int32_t tcps_ecn_fallback_reorder; /* Number of times we fallback because we detected the PAWS-issue */ - u_int32_t tcps_ecn_fallback_ce; /* Number of times we fallback because we received too many CEs */ + u_int32_t tcps_mp_sel_symtomsd; /* By symptomsd */ + u_int32_t tcps_mp_sel_rtt; /* By RTT comparison */ + u_int32_t tcps_mp_sel_rto; /* By RTO comparison */ + u_int32_t tcps_mp_sel_peer; /* By peer's output pattern */ + u_int32_t tcps_mp_num_probes; /* Number of probes sent */ + u_int32_t tcps_mp_verdowngrade; /* MPTCP version downgrade */ + u_int32_t tcps_drop_after_sleep; /* drop after long AP sleep */ + u_int32_t tcps_probe_if; /* probe packets after interface availability */ + u_int32_t tcps_probe_if_conflict; /* Can't send probe packets for interface */ + + u_int32_t tcps_ecn_client_setup; /* Attempted ECN setup from client side */ + u_int32_t tcps_ecn_server_setup; /* Attempted ECN setup from server side */ + u_int32_t tcps_ecn_server_success; /* server-side connection negotiated ECN */ + u_int32_t tcps_ecn_lost_synack; /* Lost SYN-ACK with ECN setup */ + u_int32_t tcps_ecn_lost_syn; /* Lost SYN with ECN setup */ + u_int32_t tcps_ecn_not_supported; /* Server did not support ECN setup */ + u_int32_t tcps_ecn_recv_ce; /* Received CE from the network */ + u_int32_t tcps_ecn_conn_recv_ce; /* Number of connections received CE atleast once */ + u_int32_t tcps_ecn_conn_recv_ece; /* Number of connections received ECE atleast once */ + u_int32_t tcps_ecn_conn_plnoce; /* Number of connections that received no CE and sufferred packet loss */ + u_int32_t tcps_ecn_conn_pl_ce; /* Number of connections that received CE and sufferred packet loss */ + u_int32_t tcps_ecn_conn_nopl_ce; /* Number of connections that received CE and sufferred no packet loss */ + u_int32_t tcps_ecn_fallback_synloss; /* Number of times we did fall back due to SYN-Loss */ + u_int32_t tcps_ecn_fallback_reorder; /* Number of times we fallback because we detected the PAWS-issue */ + u_int32_t tcps_ecn_fallback_ce; /* Number of times we fallback because we received too many CEs */ /* TFO-related statistics */ - u_int32_t tcps_tfo_syn_data_rcv; /* Received a SYN+data with valid cookie */ - u_int32_t tcps_tfo_cookie_req_rcv;/* Received a TFO cookie-request */ - u_int32_t tcps_tfo_cookie_sent; /* Offered a TFO-cookie to the client */ - u_int32_t tcps_tfo_cookie_invalid;/* Received an invalid TFO-cookie */ - u_int32_t tcps_tfo_cookie_req; /* Cookie requested with the SYN */ - u_int32_t tcps_tfo_cookie_rcv; /* Cookie received in a SYN/ACK */ - u_int32_t tcps_tfo_syn_data_sent; /* SYN+data+cookie sent */ - u_int32_t tcps_tfo_syn_data_acked;/* SYN+data has been acknowledged */ - u_int32_t tcps_tfo_syn_loss; /* SYN+TFO has been lost and we fallback */ - u_int32_t tcps_tfo_blackhole; /* TFO got blackholed by a middlebox. */ - u_int32_t tcps_tfo_cookie_wrong; /* TFO-cookie we sent was wrong */ - u_int32_t tcps_tfo_no_cookie_rcv; /* We asked for a cookie but didn't get one */ - u_int32_t tcps_tfo_heuristics_disable; /* TFO got disabled due to heuristics */ - u_int32_t tcps_tfo_sndblackhole; /* TFO got blackholed in the sending direction */ - u_int32_t tcps_mss_to_default; /* Change MSS to default using link status report */ - u_int32_t tcps_mss_to_medium; /* Change MSS to medium using link status report */ - u_int32_t tcps_mss_to_low; /* Change MSS to low using link status report */ - u_int32_t tcps_ecn_fallback_droprst; /* ECN fallback caused by connection drop due to RST */ - u_int32_t tcps_ecn_fallback_droprxmt; /* ECN fallback due to drop after multiple retransmits */ - u_int32_t tcps_ecn_fallback_synrst; /* ECN fallback due to rst after syn */ - - u_int32_t tcps_mptcp_rcvmemdrop; /* MPTCP packets dropped for lack of memory */ - u_int32_t tcps_mptcp_rcvduppack; /* MPTCP duplicate-only packets received */ - u_int32_t tcps_mptcp_rcvpackafterwin; /* MPTCP packets with data after window */ + u_int32_t tcps_tfo_syn_data_rcv; /* Received a SYN+data with valid cookie */ + u_int32_t tcps_tfo_cookie_req_rcv;/* Received a TFO cookie-request */ + u_int32_t tcps_tfo_cookie_sent; /* Offered a TFO-cookie to the client */ + u_int32_t tcps_tfo_cookie_invalid;/* Received an invalid TFO-cookie */ + u_int32_t tcps_tfo_cookie_req; /* Cookie requested with the SYN */ + u_int32_t tcps_tfo_cookie_rcv; /* Cookie received in a SYN/ACK */ + u_int32_t tcps_tfo_syn_data_sent; /* SYN+data+cookie sent */ + u_int32_t tcps_tfo_syn_data_acked;/* SYN+data has been acknowledged */ + u_int32_t tcps_tfo_syn_loss; /* SYN+TFO has been lost and we fallback */ + u_int32_t tcps_tfo_blackhole; /* TFO got blackholed by a middlebox. */ + u_int32_t tcps_tfo_cookie_wrong; /* TFO-cookie we sent was wrong */ + u_int32_t tcps_tfo_no_cookie_rcv; /* We asked for a cookie but didn't get one */ + u_int32_t tcps_tfo_heuristics_disable; /* TFO got disabled due to heuristics */ + u_int32_t tcps_tfo_sndblackhole; /* TFO got blackholed in the sending direction */ + u_int32_t tcps_mss_to_default; /* Change MSS to default using link status report */ + u_int32_t tcps_mss_to_medium; /* Change MSS to medium using link status report */ + u_int32_t tcps_mss_to_low; /* Change MSS to low using link status report */ + u_int32_t tcps_ecn_fallback_droprst; /* ECN fallback caused by connection drop due to RST */ + u_int32_t tcps_ecn_fallback_droprxmt; /* ECN fallback due to drop after multiple retransmits */ + u_int32_t tcps_ecn_fallback_synrst; /* ECN fallback due to rst after syn */ + + u_int32_t tcps_mptcp_rcvmemdrop; /* MPTCP packets dropped for lack of memory */ + u_int32_t tcps_mptcp_rcvduppack; /* MPTCP duplicate-only packets received */ + u_int32_t tcps_mptcp_rcvpackafterwin; /* MPTCP packets with data after window */ /* TCP timer statistics */ - u_int32_t tcps_timer_drift_le_1_ms; /* Timer drift less or equal to 1 ms */ - u_int32_t tcps_timer_drift_le_10_ms; /* Timer drift less or equal to 10 ms */ - u_int32_t tcps_timer_drift_le_20_ms; /* Timer drift less or equal to 20 ms */ - u_int32_t tcps_timer_drift_le_50_ms; /* Timer drift less or equal to 50 ms */ - u_int32_t tcps_timer_drift_le_100_ms; /* Timer drift less or equal to 100 ms */ - u_int32_t tcps_timer_drift_le_200_ms; /* Timer drift less or equal to 200 ms */ - u_int32_t tcps_timer_drift_le_500_ms; /* Timer drift less or equal to 500 ms */ - u_int32_t tcps_timer_drift_le_1000_ms; /* Timer drift less or equal to 1000 ms */ - u_int32_t tcps_timer_drift_gt_1000_ms; /* Timer drift greater than 1000 ms */ - - u_int32_t tcps_mptcp_handover_attempt; /* Total number of MPTCP-attempts using handover mode */ - u_int32_t tcps_mptcp_interactive_attempt; /* Total number of MPTCP-attempts using interactive mode */ - u_int32_t tcps_mptcp_aggregate_attempt; /* Total number of MPTCP-attempts using aggregate mode */ - u_int32_t tcps_mptcp_fp_handover_attempt; /* Same as previous three but only for first-party apps */ - u_int32_t tcps_mptcp_fp_interactive_attempt; - u_int32_t tcps_mptcp_fp_aggregate_attempt; - u_int32_t tcps_mptcp_heuristic_fallback; /* Total number of MPTCP-connections that fell back due to heuristics */ - u_int32_t tcps_mptcp_fp_heuristic_fallback; /* Same as previous but for first-party apps */ - u_int32_t tcps_mptcp_handover_success_wifi; /* Total number of successfull handover-mode connections that *started* on WiFi */ - u_int32_t tcps_mptcp_handover_success_cell; /* Total number of successfull handover-mode connections that *started* on Cell */ - u_int32_t tcps_mptcp_interactive_success; /* Total number of interactive-mode connections that negotiated MPTCP */ - u_int32_t tcps_mptcp_aggregate_success; /* Same as previous but for aggregate */ - u_int32_t tcps_mptcp_fp_handover_success_wifi; /* Same as previous four, but for first-party apps */ - u_int32_t tcps_mptcp_fp_handover_success_cell; - u_int32_t tcps_mptcp_fp_interactive_success; - u_int32_t tcps_mptcp_fp_aggregate_success; - u_int32_t tcps_mptcp_handover_cell_from_wifi; /* Total number of connections that use cell in handover-mode (coming from WiFi) */ - u_int32_t tcps_mptcp_handover_wifi_from_cell; /* Total number of connections that use WiFi in handover-mode (coming from cell) */ - u_int32_t tcps_mptcp_interactive_cell_from_wifi; /* Total number of connections that use cell in interactive mode (coming from WiFi) */ - u_int64_t tcps_mptcp_handover_cell_bytes; /* Total number of bytes sent on cell in handover-mode (on new subflows, ignoring initial one) */ - u_int64_t tcps_mptcp_interactive_cell_bytes; /* Same as previous but for interactive */ - u_int64_t tcps_mptcp_aggregate_cell_bytes; - u_int64_t tcps_mptcp_handover_all_bytes; /* Total number of bytes sent in handover */ - u_int64_t tcps_mptcp_interactive_all_bytes; - u_int64_t tcps_mptcp_aggregate_all_bytes; - u_int32_t tcps_mptcp_back_to_wifi; /* Total number of connections that succeed to move traffic away from cell (when starting on cell) */ - u_int32_t tcps_mptcp_wifi_proxy; /* Total number of new subflows that fell back to regular TCP on cell */ - u_int32_t tcps_mptcp_cell_proxy; /* Total number of new subflows that fell back to regular TCP on WiFi */ - u_int32_t tcps_mptcp_triggered_cell; /* Total number of times an MPTCP-connection triggered cell bringup */ + u_int32_t tcps_timer_drift_le_1_ms; /* Timer drift less or equal to 1 ms */ + u_int32_t tcps_timer_drift_le_10_ms; /* Timer drift less or equal to 10 ms */ + u_int32_t tcps_timer_drift_le_20_ms; /* Timer drift less or equal to 20 ms */ + u_int32_t tcps_timer_drift_le_50_ms; /* Timer drift less or equal to 50 ms */ + u_int32_t tcps_timer_drift_le_100_ms; /* Timer drift less or equal to 100 ms */ + u_int32_t tcps_timer_drift_le_200_ms; /* Timer drift less or equal to 200 ms */ + u_int32_t tcps_timer_drift_le_500_ms; /* Timer drift less or equal to 500 ms */ + u_int32_t tcps_timer_drift_le_1000_ms; /* Timer drift less or equal to 1000 ms */ + u_int32_t tcps_timer_drift_gt_1000_ms; /* Timer drift greater than 1000 ms */ + + u_int32_t tcps_mptcp_handover_attempt; /* Total number of MPTCP-attempts using handover mode */ + u_int32_t tcps_mptcp_interactive_attempt; /* Total number of MPTCP-attempts using interactive mode */ + u_int32_t tcps_mptcp_aggregate_attempt; /* Total number of MPTCP-attempts using aggregate mode */ + u_int32_t tcps_mptcp_fp_handover_attempt; /* Same as previous three but only for first-party apps */ + u_int32_t tcps_mptcp_fp_interactive_attempt; + u_int32_t tcps_mptcp_fp_aggregate_attempt; + u_int32_t tcps_mptcp_heuristic_fallback; /* Total number of MPTCP-connections that fell back due to heuristics */ + u_int32_t tcps_mptcp_fp_heuristic_fallback; /* Same as previous but for first-party apps */ + u_int32_t tcps_mptcp_handover_success_wifi; /* Total number of successfull handover-mode connections that *started* on WiFi */ + u_int32_t tcps_mptcp_handover_success_cell; /* Total number of successfull handover-mode connections that *started* on Cell */ + u_int32_t tcps_mptcp_interactive_success; /* Total number of interactive-mode connections that negotiated MPTCP */ + u_int32_t tcps_mptcp_aggregate_success; /* Same as previous but for aggregate */ + u_int32_t tcps_mptcp_fp_handover_success_wifi; /* Same as previous four, but for first-party apps */ + u_int32_t tcps_mptcp_fp_handover_success_cell; + u_int32_t tcps_mptcp_fp_interactive_success; + u_int32_t tcps_mptcp_fp_aggregate_success; + u_int32_t tcps_mptcp_handover_cell_from_wifi; /* Total number of connections that use cell in handover-mode (coming from WiFi) */ + u_int32_t tcps_mptcp_handover_wifi_from_cell; /* Total number of connections that use WiFi in handover-mode (coming from cell) */ + u_int32_t tcps_mptcp_interactive_cell_from_wifi; /* Total number of connections that use cell in interactive mode (coming from WiFi) */ + u_int64_t tcps_mptcp_handover_cell_bytes; /* Total number of bytes sent on cell in handover-mode (on new subflows, ignoring initial one) */ + u_int64_t tcps_mptcp_interactive_cell_bytes; /* Same as previous but for interactive */ + u_int64_t tcps_mptcp_aggregate_cell_bytes; + u_int64_t tcps_mptcp_handover_all_bytes; /* Total number of bytes sent in handover */ + u_int64_t tcps_mptcp_interactive_all_bytes; + u_int64_t tcps_mptcp_aggregate_all_bytes; + u_int32_t tcps_mptcp_back_to_wifi; /* Total number of connections that succeed to move traffic away from cell (when starting on cell) */ + u_int32_t tcps_mptcp_wifi_proxy; /* Total number of new subflows that fell back to regular TCP on cell */ + u_int32_t tcps_mptcp_cell_proxy; /* Total number of new subflows that fell back to regular TCP on WiFi */ + u_int32_t tcps_mptcp_triggered_cell; /* Total number of times an MPTCP-connection triggered cell bringup */ }; @@ -1236,100 +1235,100 @@ struct tcpstat_local { */ struct xtcpcb { - u_int32_t xt_len; + u_int32_t xt_len; #ifdef KERNEL_PRIVATE - struct inpcb_compat xt_inp; + struct inpcb_compat xt_inp; #else - struct inpcb xt_inp; + struct inpcb xt_inp; #endif #ifdef KERNEL_PRIVATE - struct otcpcb xt_tp; + struct otcpcb xt_tp; #else - struct tcpcb xt_tp; + struct tcpcb xt_tp; #endif - struct xsocket xt_socket; - u_quad_t xt_alignment_hack; + struct xsocket xt_socket; + u_quad_t xt_alignment_hack; }; #if !CONFIG_EMBEDDED struct xtcpcb64 { - u_int32_t xt_len; - struct xinpcb64 xt_inpcb; - - u_int64_t t_segq; - int t_dupacks; /* consecutive dup acks recd */ - - int t_timer[TCPT_NTIMERS_EXT]; /* tcp timers */ - - int t_state; /* state of this connection */ - u_int t_flags; - - int t_force; /* 1 if forcing out a byte */ - - tcp_seq snd_una; /* send unacknowledged */ - tcp_seq snd_max; /* highest sequence number sent; - * used to recognize retransmits - */ - tcp_seq snd_nxt; /* send next */ - tcp_seq snd_up; /* send urgent pointer */ - - tcp_seq snd_wl1; /* window update seg seq number */ - tcp_seq snd_wl2; /* window update seg ack number */ - tcp_seq iss; /* initial send sequence number */ - tcp_seq irs; /* initial receive sequence number */ - - tcp_seq rcv_nxt; /* receive next */ - tcp_seq rcv_adv; /* advertised window */ - u_int32_t rcv_wnd; /* receive window */ - tcp_seq rcv_up; /* receive urgent pointer */ - - u_int32_t snd_wnd; /* send window */ - u_int32_t snd_cwnd; /* congestion-controlled window */ - u_int32_t snd_ssthresh; /* snd_cwnd size threshold for - * for slow start exponential to - * linear switch - */ - u_int t_maxopd; /* mss plus options */ - - u_int32_t t_rcvtime; /* time at which a packet was received */ - u_int32_t t_starttime; /* time connection was established */ - int t_rtttime; /* round trip time */ - tcp_seq t_rtseq; /* sequence number being timed */ - - int t_rxtcur; /* current retransmit value (ticks) */ - u_int t_maxseg; /* maximum segment size */ - int t_srtt; /* smoothed round-trip time */ - int t_rttvar; /* variance in round-trip time */ - - int t_rxtshift; /* log(2) of rexmt exp. backoff */ - u_int t_rttmin; /* minimum rtt allowed */ - u_int32_t t_rttupdated; /* number of times rtt sampled */ - u_int32_t max_sndwnd; /* largest window peer has offered */ - - int t_softerror; /* possible error not yet reported */ + u_int32_t xt_len; + struct xinpcb64 xt_inpcb; + + u_int64_t t_segq; + int t_dupacks; /* consecutive dup acks recd */ + + int t_timer[TCPT_NTIMERS_EXT]; /* tcp timers */ + + int t_state; /* state of this connection */ + u_int t_flags; + + int t_force; /* 1 if forcing out a byte */ + + tcp_seq snd_una; /* send unacknowledged */ + tcp_seq snd_max; /* highest sequence number sent; + * used to recognize retransmits + */ + tcp_seq snd_nxt; /* send next */ + tcp_seq snd_up; /* send urgent pointer */ + + tcp_seq snd_wl1; /* window update seg seq number */ + tcp_seq snd_wl2; /* window update seg ack number */ + tcp_seq iss; /* initial send sequence number */ + tcp_seq irs; /* initial receive sequence number */ + + tcp_seq rcv_nxt; /* receive next */ + tcp_seq rcv_adv; /* advertised window */ + u_int32_t rcv_wnd; /* receive window */ + tcp_seq rcv_up; /* receive urgent pointer */ + + u_int32_t snd_wnd; /* send window */ + u_int32_t snd_cwnd; /* congestion-controlled window */ + u_int32_t snd_ssthresh; /* snd_cwnd size threshold for + * for slow start exponential to + * linear switch + */ + u_int t_maxopd; /* mss plus options */ + + u_int32_t t_rcvtime; /* time at which a packet was received */ + u_int32_t t_starttime; /* time connection was established */ + int t_rtttime; /* round trip time */ + tcp_seq t_rtseq; /* sequence number being timed */ + + int t_rxtcur; /* current retransmit value (ticks) */ + u_int t_maxseg; /* maximum segment size */ + int t_srtt; /* smoothed round-trip time */ + int t_rttvar; /* variance in round-trip time */ + + int t_rxtshift; /* log(2) of rexmt exp. backoff */ + u_int t_rttmin; /* minimum rtt allowed */ + u_int32_t t_rttupdated; /* number of times rtt sampled */ + u_int32_t max_sndwnd; /* largest window peer has offered */ + + int t_softerror; /* possible error not yet reported */ /* out-of-band data */ - char t_oobflags; /* have some */ - char t_iobc; /* input character */ + char t_oobflags; /* have some */ + char t_iobc; /* input character */ /* RFC 1323 variables */ - u_char snd_scale; /* window scaling for send window */ - u_char rcv_scale; /* window scaling for recv window */ - u_char request_r_scale; /* pending window scaling */ - u_char requested_s_scale; - u_int32_t ts_recent; /* timestamp echo data */ - - u_int32_t ts_recent_age; /* when last updated */ - tcp_seq last_ack_sent; + u_char snd_scale; /* window scaling for send window */ + u_char rcv_scale; /* window scaling for recv window */ + u_char request_r_scale; /* pending window scaling */ + u_char requested_s_scale; + u_int32_t ts_recent; /* timestamp echo data */ + + u_int32_t ts_recent_age; /* when last updated */ + tcp_seq last_ack_sent; /* RFC 1644 variables */ - tcp_cc cc_send; /* send connection count */ - tcp_cc cc_recv; /* receive connection count */ - tcp_seq snd_recover; /* for use in fast recovery */ + tcp_cc cc_send; /* send connection count */ + tcp_cc cc_recv; /* receive connection count */ + tcp_seq snd_recover; /* for use in fast recovery */ /* experimental */ - u_int32_t snd_cwnd_prev; /* cwnd prior to retransmit */ - u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ - u_int32_t t_badrxtwin; /* window for retransmit recovery */ + u_int32_t snd_cwnd_prev; /* cwnd prior to retransmit */ + u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ + u_int32_t t_badrxtwin; /* window for retransmit recovery */ - u_quad_t xt_alignment_hack; + u_quad_t xt_alignment_hack; }; #endif /* !CONFIG_EMBEDDED */ @@ -1337,8 +1336,8 @@ struct xtcpcb64 { #ifdef PRIVATE struct xtcpcb_n { - u_int32_t xt_len; - u_int32_t xt_kind; /* XSO_TCPCB */ + u_int32_t xt_len; + u_int32_t xt_kind; /* XSO_TCPCB */ u_int64_t t_segq; int t_dupacks; /* consecutive dup acks recd */ @@ -1352,8 +1351,8 @@ struct xtcpcb_n { tcp_seq snd_una; /* send unacknowledged */ tcp_seq snd_max; /* highest sequence number sent; - * used to recognize retransmits - */ + * used to recognize retransmits + */ tcp_seq snd_nxt; /* send next */ tcp_seq snd_up; /* send urgent pointer */ @@ -1370,9 +1369,9 @@ struct xtcpcb_n { u_int32_t snd_wnd; /* send window */ u_int32_t snd_cwnd; /* congestion-controlled window */ u_int32_t snd_ssthresh; /* snd_cwnd size threshold for - * for slow start exponential to - * linear switch - */ + * for slow start exponential to + * linear switch + */ u_int t_maxopd; /* mss plus options */ u_int32_t t_rcvtime; /* time at which a packet was received */ @@ -1412,21 +1411,21 @@ struct xtcpcb_n { u_int32_t snd_ssthresh_prev; /* ssthresh prior to retransmit */ }; - /* - * The rtt measured is in milliseconds as the timestamp granularity is - * a millisecond. The smoothed round-trip time and estimated variance - * are stored as fixed point numbers scaled by the values below. - * For convenience, these scales are also used in smoothing the average - * (smoothed = (1/scale)sample + ((scale-1)/scale)smoothed). - * With these scales, srtt has 5 bits to the right of the binary point, - * and thus an "ALPHA" of 0.875. rttvar has 4 bits to the right of the - * binary point, and is smoothed with an ALPHA of 0.75. - */ -#define TCP_RTT_SCALE 32 /* multiplier for srtt; 3 bits frac. */ -#define TCP_RTT_SHIFT 5 /* shift for srtt; 5 bits frac. */ -#define TCP_RTTVAR_SCALE 16 /* multiplier for rttvar; 4 bits */ -#define TCP_RTTVAR_SHIFT 4 /* shift for rttvar; 4 bits */ -#define TCP_DELTA_SHIFT 2 /* see tcp_input.c */ +/* + * The rtt measured is in milliseconds as the timestamp granularity is + * a millisecond. The smoothed round-trip time and estimated variance + * are stored as fixed point numbers scaled by the values below. + * For convenience, these scales are also used in smoothing the average + * (smoothed = (1/scale)sample + ((scale-1)/scale)smoothed). + * With these scales, srtt has 5 bits to the right of the binary point, + * and thus an "ALPHA" of 0.875. rttvar has 4 bits to the right of the + * binary point, and is smoothed with an ALPHA of 0.75. + */ +#define TCP_RTT_SCALE 32 /* multiplier for srtt; 3 bits frac. */ +#define TCP_RTT_SHIFT 5 /* shift for srtt; 5 bits frac. */ +#define TCP_RTTVAR_SCALE 16 /* multiplier for rttvar; 4 bits */ +#define TCP_RTTVAR_SHIFT 4 /* shift for rttvar; 4 bits */ +#define TCP_DELTA_SHIFT 2 /* see tcp_input.c */ /* @@ -1434,28 +1433,28 @@ struct xtcpcb_n { * exported to user-land via sysctl(3). */ struct xtcpprogress_indicators { - u_int32_t xp_numflows; /* Total number of flows */ - u_int32_t xp_conn_probe_fails; /* Count of connection failures */ - u_int32_t xp_read_probe_fails; /* Count of read probe failures */ - u_int32_t xp_write_probe_fails; /* Count of write failures */ - u_int32_t xp_recentflows; /* Total of "recent" flows */ - u_int32_t xp_recentflows_unacked; /* Total of "recent" flows with unacknowledged data */ - u_int64_t xp_recentflows_rxbytes; /* Total of "recent" flows received bytes */ - u_int64_t xp_recentflows_txbytes; /* Total of "recent" flows transmitted bytes */ - u_int64_t xp_recentflows_rxooo; /* Total of "recent" flows received out of order bytes */ - u_int64_t xp_recentflows_rxdup; /* Total of "recent" flows received duplicate bytes */ - u_int64_t xp_recentflows_retx; /* Total of "recent" flows retransmitted bytes */ - u_int64_t xp_reserved1; /* Expansion */ - u_int64_t xp_reserved2; /* Expansion */ - u_int64_t xp_reserved3; /* Expansion */ - u_int64_t xp_reserved4; /* Expansion */ + u_int32_t xp_numflows; /* Total number of flows */ + u_int32_t xp_conn_probe_fails; /* Count of connection failures */ + u_int32_t xp_read_probe_fails; /* Count of read probe failures */ + u_int32_t xp_write_probe_fails; /* Count of write failures */ + u_int32_t xp_recentflows; /* Total of "recent" flows */ + u_int32_t xp_recentflows_unacked; /* Total of "recent" flows with unacknowledged data */ + u_int64_t xp_recentflows_rxbytes; /* Total of "recent" flows received bytes */ + u_int64_t xp_recentflows_txbytes; /* Total of "recent" flows transmitted bytes */ + u_int64_t xp_recentflows_rxooo; /* Total of "recent" flows received out of order bytes */ + u_int64_t xp_recentflows_rxdup; /* Total of "recent" flows received duplicate bytes */ + u_int64_t xp_recentflows_retx; /* Total of "recent" flows retransmitted bytes */ + u_int64_t xp_reserved1; /* Expansion */ + u_int64_t xp_reserved2; /* Expansion */ + u_int64_t xp_reserved3; /* Expansion */ + u_int64_t xp_reserved4; /* Expansion */ }; struct tcpprogressreq { - u_int64_t ifindex; /* Interface index for progress indicators */ - u_int64_t recentflow_maxduration; /* In mach_absolute_time, max duration for flow to be counted as "recent" */ - u_int64_t xp_reserved1; /* Expansion */ - u_int64_t xp_reserved2; /* Expansion */ + u_int64_t ifindex; /* Interface index for progress indicators */ + u_int64_t recentflow_maxduration; /* In mach_absolute_time, max duration for flow to be counted as "recent" */ + u_int64_t xp_reserved1; /* Expansion */ + u_int64_t xp_reserved2; /* Expansion */ }; #endif /* PRIVATE */ @@ -1465,27 +1464,27 @@ struct tcpprogressreq { /* * Names for TCP sysctl objects */ -#define TCPCTL_DO_RFC1323 1 /* use RFC-1323 extensions */ -#define TCPCTL_DO_RFC1644 2 /* use RFC-1644 extensions */ -#define TCPCTL_MSSDFLT 3 /* MSS default */ -#define TCPCTL_STATS 4 /* statistics (read-only) */ -#define TCPCTL_RTTDFLT 5 /* default RTT estimate */ -#define TCPCTL_KEEPIDLE 6 /* keepalive idle timer */ -#define TCPCTL_KEEPINTVL 7 /* interval to send keepalives */ -#define TCPCTL_SENDSPACE 8 /* send buffer space */ -#define TCPCTL_RECVSPACE 9 /* receive buffer space */ -#define TCPCTL_KEEPINIT 10 /* timeout for establishing syn */ -#define TCPCTL_PCBLIST 11 /* list of all outstanding PCBs */ -#define TCPCTL_DELACKTIME 12 /* time before sending delayed ACK */ -#define TCPCTL_V6MSSDFLT 13 /* MSS default for IPv6 */ -#define TCPCTL_MAXID 14 +#define TCPCTL_DO_RFC1323 1 /* use RFC-1323 extensions */ +#define TCPCTL_DO_RFC1644 2 /* use RFC-1644 extensions */ +#define TCPCTL_MSSDFLT 3 /* MSS default */ +#define TCPCTL_STATS 4 /* statistics (read-only) */ +#define TCPCTL_RTTDFLT 5 /* default RTT estimate */ +#define TCPCTL_KEEPIDLE 6 /* keepalive idle timer */ +#define TCPCTL_KEEPINTVL 7 /* interval to send keepalives */ +#define TCPCTL_SENDSPACE 8 /* send buffer space */ +#define TCPCTL_RECVSPACE 9 /* receive buffer space */ +#define TCPCTL_KEEPINIT 10 /* timeout for establishing syn */ +#define TCPCTL_PCBLIST 11 /* list of all outstanding PCBs */ +#define TCPCTL_DELACKTIME 12 /* time before sending delayed ACK */ +#define TCPCTL_V6MSSDFLT 13 /* MSS default for IPv6 */ +#define TCPCTL_MAXID 14 #ifdef BSD_KERNEL_PRIVATE #include -#define TCP_PKTLIST_CLEAR(tp) { \ - (tp)->t_pktlist_head = (tp)->t_pktlist_tail = NULL; \ - (tp)->t_lastchain = (tp)->t_pktlist_sentlen = 0; \ +#define TCP_PKTLIST_CLEAR(tp) { \ + (tp)->t_pktlist_head = (tp)->t_pktlist_tail = NULL; \ + (tp)->t_lastchain = (tp)->t_pktlist_sentlen = 0; \ } #define TCPCTL_NAMES { \ @@ -1509,27 +1508,27 @@ struct tcpprogressreq { SYSCTL_DECL(_net_inet_tcp); #endif /* SYSCTL_DECL */ -extern struct inpcbhead tcb; /* head of queue of active tcpcb's */ -extern struct inpcbinfo tcbinfo; -extern struct tcpstat tcpstat; /* tcp statistics */ -extern int tcp_mssdflt; /* XXX */ -extern int tcp_minmss; -#define TCP_FASTOPEN_SERVER 0x01 -#define TCP_FASTOPEN_CLIENT 0x02 +extern struct inpcbhead tcb; /* head of queue of active tcpcb's */ +extern struct inpcbinfo tcbinfo; +extern struct tcpstat tcpstat; /* tcp statistics */ +extern int tcp_mssdflt; /* XXX */ +extern int tcp_minmss; +#define TCP_FASTOPEN_SERVER 0x01 +#define TCP_FASTOPEN_CLIENT 0x02 extern int tcp_tfo_halfcnt; extern int tcp_tfo_backlog; extern int tcp_fastopen; extern int ss_fltsz; extern int ss_fltsz_local; -extern int tcp_do_rfc3390; /* Calculate ss_fltsz according to RFC 3390 */ +extern int tcp_do_rfc3390; /* Calculate ss_fltsz according to RFC 3390 */ extern int tcp_do_rfc1323; extern int target_qdelay; -extern u_int32_t tcp_now; /* for RFC 1323 timestamps */ +extern u_int32_t tcp_now; /* for RFC 1323 timestamps */ extern struct timeval tcp_uptime; extern lck_spin_t *tcp_uptime_lock; extern int tcp_delack_enabled; -extern int tcp_do_sack; /* SACK enabled/disabled */ +extern int tcp_do_sack; /* SACK enabled/disabled */ extern int tcp_do_rfc3465; extern int tcp_do_rfc3465_lim2; extern int maxseg_unacked; @@ -1549,45 +1548,45 @@ struct domain; struct tcp_respond_args { unsigned int ifscope; unsigned int nocell:1, - noexpensive:1, - awdl_unrestricted:1, - intcoproc_allowed:1; + noexpensive:1, + awdl_unrestricted:1, + intcoproc_allowed:1; }; -void tcp_canceltimers(struct tcpcb *); +void tcp_canceltimers(struct tcpcb *); struct tcpcb * - tcp_close(struct tcpcb *); -void tcp_ctlinput(int, struct sockaddr *, void *, struct ifnet *); -int tcp_ctloutput(struct socket *, struct sockopt *); +tcp_close(struct tcpcb *); +void tcp_ctlinput(int, struct sockaddr *, void *, struct ifnet *); +int tcp_ctloutput(struct socket *, struct sockopt *); struct tcpcb * - tcp_drop(struct tcpcb *, int); -void tcp_drain(void); -void tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt); +tcp_drop(struct tcpcb *, int); +void tcp_drain(void); +void tcp_getrt_rtt(struct tcpcb *tp, struct rtentry *rt); struct rmxp_tao * - tcp_gettaocache(struct inpcb *); -void tcp_init(struct protosw *, struct domain *); -void tcp_input(struct mbuf *, int); -void tcp_mss(struct tcpcb *, int, unsigned int); -int tcp_mssopt(struct tcpcb *); -void tcp_drop_syn_sent(struct inpcb *, int); -void tcp_mtudisc(struct inpcb *, int); +tcp_gettaocache(struct inpcb *); +void tcp_init(struct protosw *, struct domain *); +void tcp_input(struct mbuf *, int); +void tcp_mss(struct tcpcb *, int, unsigned int); +int tcp_mssopt(struct tcpcb *); +void tcp_drop_syn_sent(struct inpcb *, int); +void tcp_mtudisc(struct inpcb *, int); struct tcpcb * - tcp_newtcpcb(struct inpcb *); -int tcp_output(struct tcpcb *); -void tcp_respond(struct tcpcb *, void *, struct tcphdr *, struct mbuf *, +tcp_newtcpcb(struct inpcb *); +int tcp_output(struct tcpcb *); +void tcp_respond(struct tcpcb *, void *, struct tcphdr *, struct mbuf *, tcp_seq, tcp_seq, int, struct tcp_respond_args *); struct rtentry * - tcp_rtlookup(struct inpcb *, unsigned int); -void tcp_setpersist(struct tcpcb *); -void tcp_gc(struct inpcbinfo *); +tcp_rtlookup(struct inpcb *, unsigned int); +void tcp_setpersist(struct tcpcb *); +void tcp_gc(struct inpcbinfo *); void tcp_itimer(struct inpcbinfo *ipi); -void tcp_check_timer_state(struct tcpcb *tp); -void tcp_run_timerlist(void *arg1, void *arg2); +void tcp_check_timer_state(struct tcpcb *tp); +void tcp_run_timerlist(void *arg1, void *arg2); struct tcptemp *tcp_maketemplate(struct tcpcb *); -void tcp_fillheaders(struct tcpcb *, void *, void *); +void tcp_fillheaders(struct tcpcb *, void *, void *); struct tcpcb *tcp_timers(struct tcpcb *, int); -void tcp_trace(int, int, struct tcpcb *, void *, struct tcphdr *, int); +void tcp_trace(int, int, struct tcpcb *, void *, struct tcphdr *, int); void tcp_fill_info(struct tcpcb *, struct tcp_info *); void tcp_sack_doack(struct tcpcb *, struct tcpopt *, struct tcphdr *, @@ -1596,16 +1595,16 @@ extern boolean_t tcp_sack_process_dsack(struct tcpcb *, struct tcpopt *, struct tcphdr *); int tcp_detect_bad_rexmt(struct tcpcb *, struct tcphdr *, struct tcpopt *, u_int32_t rxtime); -void tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart, tcp_seq rcv_lastend); -void tcp_clean_sackreport(struct tcpcb *tp); -void tcp_sack_adjust(struct tcpcb *tp); +void tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_laststart, tcp_seq rcv_lastend); +void tcp_clean_sackreport(struct tcpcb *tp); +void tcp_sack_adjust(struct tcpcb *tp); struct sackhole *tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt); -void tcp_sack_partialack(struct tcpcb *, struct tcphdr *); -void tcp_free_sackholes(struct tcpcb *tp); -int32_t tcp_sbspace(struct tcpcb *tp); -void tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp); +void tcp_sack_partialack(struct tcpcb *, struct tcphdr *); +void tcp_free_sackholes(struct tcpcb *tp); +int32_t tcp_sbspace(struct tcpcb *tp); +void tcp_set_tso(struct tcpcb *tp, struct ifnet *ifp); void tcp_set_ecn(struct tcpcb *tp, struct ifnet *ifp); -void tcp_reset_stretch_ack(struct tcpcb *tp); +void tcp_reset_stretch_ack(struct tcpcb *tp); extern void tcp_get_ports_used(u_int32_t, int, u_int32_t, bitstr_t *); uint32_t tcp_count_opportunistic(unsigned int ifindex, u_int32_t flags); uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa); @@ -1619,30 +1618,30 @@ extern void tcp_set_foreground_cc(struct socket *); extern void tcp_set_recv_bg(struct socket *); extern void tcp_clear_recv_bg(struct socket *); extern boolean_t tcp_sack_byte_islost(struct tcpcb *tp); -#define IS_TCP_RECV_BG(_so) \ +#define IS_TCP_RECV_BG(_so) \ ((_so)->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG) #if TRAFFIC_MGT #define CLEAR_IAJ_STATE(_tp_) (_tp_)->iaj_rcv_ts = 0 -void reset_acc_iaj(struct tcpcb *tp); +void reset_acc_iaj(struct tcpcb *tp); #endif /* TRAFFIC_MGT */ -int tcp_lock (struct socket *, int, void *); -int tcp_unlock (struct socket *, int, void *); -void calculate_tcp_clock(void); +int tcp_lock(struct socket *, int, void *); +int tcp_unlock(struct socket *, int, void *); +void calculate_tcp_clock(void); extern void tcp_keepalive_reset(struct tcpcb *); extern uint32_t get_base_rtt(struct tcpcb *tp); #ifdef _KERN_LOCKS_H_ -lck_mtx_t * tcp_getlock (struct socket *, int); +lck_mtx_t * tcp_getlock(struct socket *, int); #else -void * tcp_getlock (struct socket *, int); +void * tcp_getlock(struct socket *, int); #endif -extern struct pr_usrreqs tcp_usrreqs; -extern u_int32_t tcp_sendspace; -extern u_int32_t tcp_recvspace; +extern struct pr_usrreqs tcp_usrreqs; +extern u_int32_t tcp_sendspace; +extern u_int32_t tcp_recvspace; tcp_seq tcp_new_isn(struct tcpcb *); extern int tcp_input_checksum(int, struct mbuf *, struct tcphdr *, int, int); @@ -1667,7 +1666,7 @@ extern void tcp_fill_keepalive_offload_frames(struct ifnet *, extern boolean_t tfo_enabled(const struct tcpcb *tp); extern void tcp_disable_tfo(struct tcpcb *tp); extern void tcp_tfo_gen_cookie(struct inpcb *inp, u_char *out, size_t blk_size); -#define TCP_FASTOPEN_KEYLEN 16 +#define TCP_FASTOPEN_KEYLEN 16 extern int tcp_freeq(struct tcpcb *tp); extern errno_t tcp_notify_ack_id_valid(struct tcpcb *, struct socket *, u_int32_t); extern errno_t tcp_add_notify_ack_marker(struct tcpcb *, u_int32_t); @@ -1684,15 +1683,15 @@ extern bool tcp_notify_ack_active(struct socket *so); #if MPTCP extern int mptcp_input_preproc(struct tcpcb *tp, struct mbuf *m, - struct tcphdr *th, int drop_hdrlen); + struct tcphdr *th, int drop_hdrlen); extern uint32_t mptcp_output_csum(struct mbuf *m, uint64_t dss_val, - uint32_t sseq, uint16_t dlen); + uint32_t sseq, uint16_t dlen); extern int mptcp_adj_mss(struct tcpcb *, boolean_t); extern void mptcp_insert_rmap(struct tcpcb *tp, struct mbuf *m, struct tcphdr *th); #endif __private_extern__ void tcp_update_stats_per_flow( - struct ifnet_stats_per_flow *, struct ifnet *); + struct ifnet_stats_per_flow *, struct ifnet *); #endif /* BSD_KERNEL_RPIVATE */ diff --git a/bsd/netinet/tcpip.h b/bsd/netinet/tcpip.h index 4868d6e0d..6ebc69cdf 100644 --- a/bsd/netinet/tcpip.h +++ b/bsd/netinet/tcpip.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -69,33 +69,33 @@ * Tcp+ip header, after ip options removed. */ struct tcpiphdr { - struct ipovly ti_i; /* overlaid ip structure */ - struct tcphdr ti_t; /* tcp header */ + struct ipovly ti_i; /* overlaid ip structure */ + struct tcphdr ti_t; /* tcp header */ }; #ifdef notyet /* * Tcp+ip header, after ip options removed but including TCP options. */ struct full_tcpiphdr { - struct ipovly ti_i; /* overlaid ip structure */ - struct tcphdr ti_t; /* tcp header */ - char ti_o[TCP_MAXOLEN]; /* space for tcp options */ + struct ipovly ti_i; /* overlaid ip structure */ + struct tcphdr ti_t; /* tcp header */ + char ti_o[TCP_MAXOLEN]; /* space for tcp options */ }; #endif /* notyet */ -#define ti_x1 ti_i.ih_x1 -#define ti_pr ti_i.ih_pr -#define ti_len ti_i.ih_len -#define ti_src ti_i.ih_src -#define ti_dst ti_i.ih_dst -#define ti_sport ti_t.th_sport -#define ti_dport ti_t.th_dport -#define ti_seq ti_t.th_seq -#define ti_ack ti_t.th_ack -#define ti_x2 ti_t.th_x2 -#define ti_off ti_t.th_off -#define ti_flags ti_t.th_flags -#define ti_win ti_t.th_win -#define ti_sum ti_t.th_sum -#define ti_urp ti_t.th_urp +#define ti_x1 ti_i.ih_x1 +#define ti_pr ti_i.ih_pr +#define ti_len ti_i.ih_len +#define ti_src ti_i.ih_src +#define ti_dst ti_i.ih_dst +#define ti_sport ti_t.th_sport +#define ti_dport ti_t.th_dport +#define ti_seq ti_t.th_seq +#define ti_ack ti_t.th_ack +#define ti_x2 ti_t.th_x2 +#define ti_off ti_t.th_off +#define ti_flags ti_t.th_flags +#define ti_win ti_t.th_win +#define ti_sum ti_t.th_sum +#define ti_urp ti_t.th_urp #endif diff --git a/bsd/netinet/udp.h b/bsd/netinet/udp.h index 3f4a47346..bad136e63 100644 --- a/bsd/netinet/udp.h +++ b/bsd/netinet/udp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -63,25 +63,25 @@ #ifndef _NETINET_UDP_H_ #define _NETINET_UDP_H_ #include -#include /* u_short */ +#include /* u_short */ /* * Udp protocol header. * Per RFC 768, September, 1981. */ struct udphdr { - u_short uh_sport; /* source port */ - u_short uh_dport; /* destination port */ - u_short uh_ulen; /* udp length */ - u_short uh_sum; /* udp checksum */ + u_short uh_sport; /* source port */ + u_short uh_dport; /* destination port */ + u_short uh_ulen; /* udp length */ + u_short uh_sum; /* udp checksum */ }; /* * User-settable options (used with setsockopt). */ -#define UDP_NOCKSUM 0x01 /* don't checksum outbound payloads */ +#define UDP_NOCKSUM 0x01 /* don't checksum outbound payloads */ #ifdef PRIVATE -#define UDP_KEEPALIVE_OFFLOAD 0x02 /* Send keep-alive at a given interval */ +#define UDP_KEEPALIVE_OFFLOAD 0x02 /* Send keep-alive at a given interval */ #endif /* PRIVATE */ #ifdef PRIVATE @@ -96,19 +96,19 @@ struct udphdr { * peer-to-peer communication without keeping the host processor awake. * * The application will pass this data to the kernel using setsockopt. It - * can set the interval to 0 to disable keepalive offload. + * can set the interval to 0 to disable keepalive offload. */ -#define UDP_KEEPALIVE_OFFLOAD_DATA_SIZE 32 +#define UDP_KEEPALIVE_OFFLOAD_DATA_SIZE 32 /* Maximum keep alive interval in seconds */ -#define UDP_KEEPALIVE_INTERVAL_MAX_SECONDS 65536 +#define UDP_KEEPALIVE_INTERVAL_MAX_SECONDS 65536 struct udp_keepalive_offload { u_char ka_data[UDP_KEEPALIVE_OFFLOAD_DATA_SIZE]; - u_int16_t ka_interval; /* interval in seconds */ - u_int8_t ka_data_len; /* valid length of ka_data */ - u_int8_t ka_type; /* type of application */ -#define UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY 0x1 + u_int16_t ka_interval; /* interval in seconds */ + u_int8_t ka_data_len; /* valid length of ka_data */ + u_int8_t ka_type; /* type of application */ +#define UDP_KEEPALIVE_OFFLOAD_TYPE_AIRPLAY 0x1 }; #endif /* PRIVATE */ diff --git a/bsd/netinet/udp_usrreq.c b/bsd/netinet/udp_usrreq.c index f11f5a4a5..571afd2ac 100644 --- a/bsd/netinet/udp_usrreq.c +++ b/bsd/netinet/udp_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -123,39 +123,39 @@ extern int esp_udp_encap_port; #include #endif /* CONTENT_FILTER */ -#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0) -#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2) -#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1) -#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3) -#define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8)) -#define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1) +#define DBG_LAYER_IN_BEG NETDBG_CODE(DBG_NETUDP, 0) +#define DBG_LAYER_IN_END NETDBG_CODE(DBG_NETUDP, 2) +#define DBG_LAYER_OUT_BEG NETDBG_CODE(DBG_NETUDP, 1) +#define DBG_LAYER_OUT_END NETDBG_CODE(DBG_NETUDP, 3) +#define DBG_FNC_UDP_INPUT NETDBG_CODE(DBG_NETUDP, (5 << 8)) +#define DBG_FNC_UDP_OUTPUT NETDBG_CODE(DBG_NETUDP, (6 << 8) | 1) /* * UDP protocol implementation. * Per RFC 768, August, 1980. */ -#ifndef COMPAT_42 +#ifndef COMPAT_42 static int udpcksum = 1; #else -static int udpcksum = 0; /* XXX */ +static int udpcksum = 0; /* XXX */ #endif SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW | CTLFLAG_LOCKED, &udpcksum, 0, ""); int udp_log_in_vain = 0; SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW | CTLFLAG_LOCKED, - &udp_log_in_vain, 0, "Log all incoming UDP packets"); + &udp_log_in_vain, 0, "Log all incoming UDP packets"); static int blackhole = 0; SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW | CTLFLAG_LOCKED, - &blackhole, 0, "Do not send port unreachables for refused connects"); + &blackhole, 0, "Do not send port unreachables for refused connects"); -struct inpcbhead udb; /* from udp_var.h */ -#define udb6 udb /* for KAME src sync over BSD*'s */ +struct inpcbhead udb; /* from udp_var.h */ +#define udb6 udb /* for KAME src sync over BSD*'s */ struct inpcbinfo udbinfo; #ifndef UDBHASHSIZE -#define UDBHASHSIZE 16 +#define UDBHASHSIZE 16 #endif /* Garbage collection performed during most recent udp_gc() run */ @@ -167,43 +167,43 @@ extern void ipfwsyslog(int level, const char *format, ...); extern void ipfw_stealth_stats_incr_udp(void); /* Apple logging, log to ipfw.log */ -#define log_in_vain_log(a) { \ - if ((udp_log_in_vain == 3) && (fw_verbose == 2)) { \ - ipfwsyslog a; \ +#define log_in_vain_log(a) { \ + if ((udp_log_in_vain == 3) && (fw_verbose == 2)) { \ + ipfwsyslog a; \ } else if ((udp_log_in_vain == 4) && (fw_verbose == 2)) { \ - ipfw_stealth_stats_incr_udp(); \ - } else { \ - log a; \ - } \ + ipfw_stealth_stats_incr_udp(); \ + } else { \ + log a; \ + } \ } #else /* !IPFIREWALL */ -#define log_in_vain_log(a) { log a; } +#define log_in_vain_log(a) { log a; } #endif /* !IPFIREWALL */ static int udp_getstat SYSCTL_HANDLER_ARGS; -struct udpstat udpstat; /* from udp_var.h */ +struct udpstat udpstat; /* from udp_var.h */ SYSCTL_PROC(_net_inet_udp, UDPCTL_STATS, stats, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, udp_getstat, "S,udpstat", - "UDP statistics (struct udpstat, netinet/udp_var.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, udp_getstat, "S,udpstat", + "UDP statistics (struct udpstat, netinet/udp_var.h)"); SYSCTL_INT(_net_inet_udp, OID_AUTO, pcbcount, - CTLFLAG_RD | CTLFLAG_LOCKED, &udbinfo.ipi_count, 0, - "Number of active PCBs"); + CTLFLAG_RD | CTLFLAG_LOCKED, &udbinfo.ipi_count, 0, + "Number of active PCBs"); __private_extern__ int udp_use_randomport = 1; SYSCTL_INT(_net_inet_udp, OID_AUTO, randomize_ports, - CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0, - "Randomize UDP port numbers"); + CTLFLAG_RW | CTLFLAG_LOCKED, &udp_use_randomport, 0, + "Randomize UDP port numbers"); #if INET6 struct udp_in6 { - struct sockaddr_in6 uin6_sin; - u_char uin6_init_done : 1; + struct sockaddr_in6 uin6_sin; + u_char uin6_init_done : 1; }; struct udp_ip6 { - struct ip6_hdr uip6_ip6; - u_char uip6_init_done : 1; + struct ip6_hdr uip6_ip6; + u_char uip6_init_done : 1; }; int udp_abort(struct socket *); @@ -231,22 +231,22 @@ static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip); static void udp_gc(struct inpcbinfo *); struct pr_usrreqs udp_usrreqs = { - .pru_abort = udp_abort, - .pru_attach = udp_attach, - .pru_bind = udp_bind, - .pru_connect = udp_connect, - .pru_connectx = udp_connectx, - .pru_control = in_control, - .pru_detach = udp_detach, - .pru_disconnect = udp_disconnect, - .pru_disconnectx = udp_disconnectx, - .pru_peeraddr = in_getpeeraddr, - .pru_send = udp_send, - .pru_shutdown = udp_shutdown, - .pru_sockaddr = in_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, - .pru_soreceive_list = soreceive_list, + .pru_abort = udp_abort, + .pru_attach = udp_attach, + .pru_bind = udp_bind, + .pru_connect = udp_connect, + .pru_connectx = udp_connectx, + .pru_control = in_control, + .pru_detach = udp_detach, + .pru_disconnect = udp_disconnect, + .pru_disconnectx = udp_disconnectx, + .pru_peeraddr = in_getpeeraddr, + .pru_send = udp_send, + .pru_shutdown = udp_shutdown, + .pru_sockaddr = in_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, + .pru_soreceive_list = soreceive_list, }; void @@ -254,13 +254,14 @@ udp_init(struct protosw *pp, struct domain *dp) { #pragma unused(dp) static int udp_initialized = 0; - vm_size_t str_size; - struct inpcbinfo *pcbinfo; + vm_size_t str_size; + struct inpcbinfo *pcbinfo; - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - if (udp_initialized) + if (udp_initialized) { return; + } udp_initialized = 1; uint32_t pool_size = (nmbclusters << MCLSHIFT) >> MBSHIFT; if (pool_size >= 96) { @@ -273,8 +274,8 @@ udp_init(struct protosw *pp, struct domain *dp) &udbinfo.ipi_hashmask); udbinfo.ipi_porthashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.ipi_porthashmask); - str_size = (vm_size_t) sizeof (struct inpcb); - udbinfo.ipi_zone = zinit(str_size, 80000*str_size, 8192, "udpcb"); + str_size = (vm_size_t) sizeof(struct inpcb); + udbinfo.ipi_zone = zinit(str_size, 80000 * str_size, 8192, "udpcb"); pcbinfo = &udbinfo; /* @@ -317,12 +318,12 @@ udp_input(struct mbuf *m, int iphlen) boolean_t wifi = (!cell && IFNET_IS_WIFI(ifp)); boolean_t wired = (!wifi && IFNET_IS_WIRED(ifp)); - bzero(&udp_in, sizeof (udp_in)); - udp_in.sin_len = sizeof (struct sockaddr_in); + bzero(&udp_in, sizeof(udp_in)); + udp_in.sin_len = sizeof(struct sockaddr_in); udp_in.sin_family = AF_INET; #if INET6 - bzero(&udp_in6, sizeof (udp_in6)); - udp_in6.uin6_sin.sin6_len = sizeof (struct sockaddr_in6); + bzero(&udp_in6, sizeof(udp_in6)); + udp_in6.uin6_sin.sin6_len = sizeof(struct sockaddr_in6); udp_in6.uin6_sin.sin6_family = AF_INET6; #endif /* INET6 */ @@ -339,17 +340,17 @@ udp_input(struct mbuf *m, int iphlen) * but we don't yet have a way to check the checksum * with options still present. */ - if (iphlen > sizeof (struct ip)) { + if (iphlen > sizeof(struct ip)) { ip_stripoptions(m); - iphlen = sizeof (struct ip); + iphlen = sizeof(struct ip); } /* * Get IP and UDP header together in first mbuf. */ ip = mtod(m, struct ip *); - if (m->m_len < iphlen + sizeof (struct udphdr)) { - m = m_pullup(m, iphlen + sizeof (struct udphdr)); + if (m->m_len < iphlen + sizeof(struct udphdr)) { + m = m_pullup(m, iphlen + sizeof(struct udphdr)); if (m == NULL) { udpstat.udps_hdrops++; KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, @@ -375,7 +376,7 @@ udp_input(struct mbuf *m, int iphlen) */ len = ntohs((u_short)uh->uh_ulen); if (ip->ip_len != len) { - if (len > ip->ip_len || len < sizeof (struct udphdr)) { + if (len > ip->ip_len || len < sizeof(struct udphdr)) { udpstat.udps_badlen++; IF_UDP_STATINC(ifp, badlength); goto bad; @@ -392,8 +393,9 @@ udp_input(struct mbuf *m, int iphlen) /* * Checksum extended UDP header and data. */ - if (udp_input_checksum(m, uh, iphlen, len)) + if (udp_input_checksum(m, uh, iphlen, len)) { goto bad; + } isbroadcast = in_broadcast(ip->ip_dst, ifp); @@ -434,28 +436,33 @@ udp_input(struct mbuf *m, int iphlen) int skipit; #endif /* IPSEC */ - if (inp->inp_socket == NULL) + if (inp->inp_socket == NULL) { continue; + } if (inp != sotoinpcb(inp->inp_socket)) { panic("%s: bad so back ptr inp=%p\n", __func__, inp); /* NOTREACHED */ } #if INET6 - if ((inp->inp_vflag & INP_IPV4) == 0) + if ((inp->inp_vflag & INP_IPV4) == 0) { continue; + } #endif /* INET6 */ - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if ((inp->inp_moptions == NULL) && (ntohl(ip->ip_dst.s_addr) != - INADDR_ALLHOSTS_GROUP) && (isbroadcast == 0)) + INADDR_ALLHOSTS_GROUP) && (isbroadcast == 0)) { continue; + } if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == - WNT_STOPUSING) + WNT_STOPUSING) { continue; + } udp_lock(inp->inp_socket, 1, 0); @@ -496,29 +503,31 @@ udp_input(struct mbuf *m, int iphlen) } IMO_LOCK(imo); - bzero(&group, sizeof (struct sockaddr_in)); - group.sin_len = sizeof (struct sockaddr_in); + bzero(&group, sizeof(struct sockaddr_in)); + group.sin_len = sizeof(struct sockaddr_in); group.sin_family = AF_INET; group.sin_addr = ip->ip_dst; blocked = imo_multi_filter(imo, ifp, &group, &udp_in); - if (blocked == MCAST_PASS) + if (blocked == MCAST_PASS) { foundmembership = 1; + } IMO_UNLOCK(imo); if (!foundmembership) { udp_unlock(inp->inp_socket, 1, 0); if (blocked == MCAST_NOTSMEMBER || - blocked == MCAST_MUTED) + blocked == MCAST_MUTED) { udpstat.udps_filtermcast++; + } continue; } foundmembership = 0; } reuse_sock = (inp->inp_socket->so_options & - (SO_REUSEPORT|SO_REUSEADDR)); + (SO_REUSEPORT | SO_REUSEADDR)); #if NECP skipit = 0; @@ -533,15 +542,16 @@ udp_input(struct mbuf *m, int iphlen) { struct mbuf *n = NULL; - if (reuse_sock) + if (reuse_sock) { n = m_copy(m, 0, M_COPYALL); + } #if INET6 udp_append(inp, ip, m, - iphlen + sizeof (struct udphdr), + iphlen + sizeof(struct udphdr), &udp_in, &udp_in6, &udp_ip6, ifp); #else /* !INET6 */ udp_append(inp, ip, m, - iphlen + sizeof (struct udphdr), + iphlen + sizeof(struct udphdr), &udp_in, ifp); #endif /* !INET6 */ mcast_delivered++; @@ -558,8 +568,9 @@ udp_input(struct mbuf *m, int iphlen) * port. It assumes that an application will never * clear these options after setting them. */ - if (reuse_sock == 0 || m == NULL) + if (reuse_sock == 0 || m == NULL) { break; + } /* * Expect 32-bit aligned data pointer on strict-align @@ -586,8 +597,9 @@ udp_input(struct mbuf *m, int iphlen) } /* free the extra copy of mbuf or skipped by IPSec */ - if (m != NULL) + if (m != NULL) { m_freem(m); + } KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } @@ -602,11 +614,11 @@ udp_input(struct mbuf *m, int iphlen) */ if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 && uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) { - int payload_len = len - sizeof (struct udphdr) > 4 ? 4 : - len - sizeof (struct udphdr); + int payload_len = len - sizeof(struct udphdr) > 4 ? 4 : + len - sizeof(struct udphdr); - if (m->m_len < iphlen + sizeof (struct udphdr) + payload_len) { - if ((m = m_pullup(m, iphlen + sizeof (struct udphdr) + + if (m->m_len < iphlen + sizeof(struct udphdr) + payload_len) { + if ((m = m_pullup(m, iphlen + sizeof(struct udphdr) + payload_len)) == NULL) { udpstat.udps_hdrops++; KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, @@ -624,18 +636,18 @@ udp_input(struct mbuf *m, int iphlen) } /* Check for NAT keepalive packet */ if (payload_len == 1 && *(u_int8_t *) - ((caddr_t)uh + sizeof (struct udphdr)) == 0xFF) { + ((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) { m_freem(m); KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); return; } else if (payload_len == 4 && *(u_int32_t *)(void *) - ((caddr_t)uh + sizeof (struct udphdr)) != 0) { + ((caddr_t)uh + sizeof(struct udphdr)) != 0) { /* UDP encapsulated IPSec packet to pass through NAT */ KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); /* preserve the udp header */ - esp4_input(m, iphlen + sizeof (struct udphdr)); + esp4_input(m, iphlen + sizeof(struct udphdr)); return; } } @@ -657,18 +669,18 @@ udp_input(struct mbuf *m, int iphlen) if (udp_log_in_vain < 3) { log(LOG_INFO, "Connection attempt to " "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET, - &ip->ip_dst, buf, sizeof (buf)), - ntohs(uh->uh_dport), inet_ntop(AF_INET, - &ip->ip_src, buf2, sizeof (buf2)), - ntohs(uh->uh_sport)); + &ip->ip_dst, buf, sizeof(buf)), + ntohs(uh->uh_dport), inet_ntop(AF_INET, + &ip->ip_src, buf2, sizeof(buf2)), + ntohs(uh->uh_sport)); } else if (!(m->m_flags & (M_BCAST | M_MCAST)) && ip->ip_dst.s_addr != ip->ip_src.s_addr) { log_in_vain_log((LOG_INFO, "Stealth Mode connection attempt to " "UDP %s:%d from %s:%d\n", inet_ntop(AF_INET, - &ip->ip_dst, buf, sizeof (buf)), + &ip->ip_dst, buf, sizeof(buf)), ntohs(uh->uh_dport), inet_ntop(AF_INET, - &ip->ip_src, buf2, sizeof (buf2)), + &ip->ip_src, buf2, sizeof(buf2)), ntohs(uh->uh_sport))) } } @@ -678,12 +690,15 @@ udp_input(struct mbuf *m, int iphlen) goto bad; } #if ICMP_BANDLIM - if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) + if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0) { goto bad; + } #endif /* ICMP_BANDLIM */ - if (blackhole) - if (ifp && ifp->if_type != IFT_LOOP) + if (blackhole) { + if (ifp && ifp->if_type != IFT_LOOP) { goto bad; + } + } *ip = save_ip; ip->ip_len += iphlen; icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0); @@ -715,7 +730,7 @@ udp_input(struct mbuf *m, int iphlen) if ((inp->inp_flags & INP_CONTROLOPTS) != 0 || (inp->inp_socket->so_options & SO_TIMESTAMP) != 0 || (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { #if INET6 if (inp->inp_vflag & INP_IPV6) { int savedflags; @@ -735,7 +750,7 @@ udp_input(struct mbuf *m, int iphlen) goto bad; } } - m_adj(m, iphlen + sizeof (struct udphdr)); + m_adj(m, iphlen + sizeof(struct udphdr)); KERNEL_DEBUG(DBG_LAYER_IN_END, uh->uh_dport, uh->uh_sport, save_ip.ip_src.s_addr, save_ip.ip_dst.s_addr, uh->uh_ulen); @@ -766,8 +781,9 @@ udp_input(struct mbuf *m, int iphlen) return; bad: m_freem(m); - if (opts) + if (opts) { m_freem(opts); + } KERNEL_DEBUG(DBG_FNC_UDP_INPUT | DBG_FUNC_END, 0, 0, 0, 0, 0); } @@ -775,7 +791,7 @@ bad: static void ip_2_ip6_hdr(struct ip6_hdr *ip6, struct ip *ip) { - bzero(ip6, sizeof (*ip6)); + bzero(ip6, sizeof(*ip6)); ip6->ip6_vfc = IPV6_VERSION; ip6->ip6_plen = ip->ip_len; @@ -821,7 +837,7 @@ udp_append(struct inpcb *last, struct ip *ip, struct mbuf *n, int off, if ((last->inp_flags & INP_CONTROLOPTS) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP) != 0 || (last->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { #if INET6 if (last->inp_vflag & INP_IPV6) { int savedflags; @@ -898,8 +914,9 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * if struct inpcb *inp = NULL; faddr = ((struct sockaddr_in *)(void *)sa)->sin_addr; - if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) + if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) { return; + } if (PRC_IS_REDIRECT(cmd)) { ip = 0; @@ -912,13 +929,13 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * if if (ip) { struct udphdr uh; - bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof (uh)); + bcopy(((caddr_t)ip + (ip->ip_hl << 2)), &uh, sizeof(uh)); inp = in_pcblookup_hash(&udbinfo, faddr, uh.uh_dport, ip->ip_src, uh.uh_sport, 0, NULL); if (inp != NULL && inp->inp_socket != NULL) { udp_lock(inp->inp_socket, 1, 0); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == - WNT_STOPUSING) { + WNT_STOPUSING) { udp_unlock(inp->inp_socket, 1, 0); return; } @@ -933,13 +950,14 @@ udp_ctlinput(int cmd, struct sockaddr *sa, void *vip, __unused struct ifnet * if int udp_ctloutput(struct socket *so, struct sockopt *sopt) { - int error = 0, optval = 0; - struct inpcb *inp; + int error = 0, optval = 0; + struct inpcb *inp; /* Allow at this level */ if (sopt->sopt_level != IPPROTO_UDP && - !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) - return (ip_ctloutput(so, sopt)); + !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) { + return ip_ctloutput(so, sopt); + } inp = sotoinpcb(so); @@ -953,14 +971,16 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) break; } - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } - if (optval != 0) + if (optval != 0) { inp->inp_flags |= INP_UDP_NOCKSUM; - else + } else { inp->inp_flags &= ~INP_UDP_NOCKSUM; + } break; case UDP_KEEPALIVE_OFFLOAD: { @@ -980,21 +1000,24 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) break; } if ((error = sooptcopyin(sopt, &ka, sizeof(ka), - sizeof(ka))) != 0) + sizeof(ka))) != 0) { break; + } /* application should specify the type */ - if (ka.ka_type == 0) - return (EINVAL); + if (ka.ka_type == 0) { + return EINVAL; + } if (ka.ka_interval == 0) { /* * if interval is 0, disable the offload * mechanism */ - if (inp->inp_keepalive_data != NULL) + if (inp->inp_keepalive_data != NULL) { FREE(inp->inp_keepalive_data, M_TEMP); + } inp->inp_keepalive_data = NULL; inp->inp_keepalive_datalen = 0; inp->inp_keepalive_interval = 0; @@ -1008,8 +1031,8 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) } inp->inp_keepalive_datalen = min( - ka.ka_data_len, - UDP_KEEPALIVE_OFFLOAD_DATA_SIZE); + ka.ka_data_len, + UDP_KEEPALIVE_OFFLOAD_DATA_SIZE); if (inp->inp_keepalive_datalen > 0) { MALLOC(inp->inp_keepalive_data, u_int8_t *, @@ -1035,9 +1058,10 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) break; } case SO_FLUSH: - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } error = inp_flush(inp, optval); break; @@ -1058,11 +1082,12 @@ udp_ctloutput(struct socket *so, struct sockopt *sopt) error = ENOPROTOOPT; break; } - if (error == 0) - error = sooptcopyout(sopt, &optval, sizeof (optval)); + if (error == 0) { + error = sooptcopyout(sopt, &optval, sizeof(optval)); + } break; } - return (error); + return error; } static int @@ -1081,15 +1106,15 @@ udp_pcblist SYSCTL_HANDLER_ARGS lck_rw_lock_exclusive(udbinfo.ipi_lock); if (req->oldptr == USER_ADDR_NULL) { n = udbinfo.ipi_count; - req->oldidx = 2 * (sizeof (xig)) - + (n + n/8) * sizeof (struct xinpcb); + req->oldidx = 2 * (sizeof(xig)) + + (n + n / 8) * sizeof(struct xinpcb); lck_rw_done(udbinfo.ipi_lock); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { lck_rw_done(udbinfo.ipi_lock); - return (EPERM); + return EPERM; } /* @@ -1098,35 +1123,36 @@ udp_pcblist SYSCTL_HANDLER_ARGS gencnt = udbinfo.ipi_gencnt; n = udbinfo.ipi_count; - bzero(&xig, sizeof (xig)); - xig.xig_len = sizeof (xig); + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); xig.xig_count = n; xig.xig_gen = gencnt; xig.xig_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xig, sizeof (xig)); + error = SYSCTL_OUT(req, &xig, sizeof(xig)); if (error) { lck_rw_done(udbinfo.ipi_lock); - return (error); + return error; } /* * We are done if there is no pcb */ if (n == 0) { lck_rw_done(udbinfo.ipi_lock); - return (0); + return 0; } - inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK); + inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK); if (inp_list == 0) { lck_rw_done(udbinfo.ipi_lock); - return (ENOMEM); + return ENOMEM; } for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n; inp = LIST_NEXT(inp, inp_list)) { if (inp->inp_gencnt <= gencnt && - inp->inp_state != INPCB_STATE_DEAD) + inp->inp_state != INPCB_STATE_DEAD) { inp_list[i++] = inp; + } } n = i; @@ -1136,8 +1162,9 @@ udp_pcblist SYSCTL_HANDLER_ARGS inp = inp_list[i]; - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } udp_lock(inp->inp_socket, 1, 0); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { udp_unlock(inp->inp_socket, 1, 0); @@ -1148,16 +1175,17 @@ udp_pcblist SYSCTL_HANDLER_ARGS continue; } - bzero(&xi, sizeof (xi)); - xi.xi_len = sizeof (xi); + bzero(&xi, sizeof(xi)); + xi.xi_len = sizeof(xi); /* XXX should avoid extra copy */ inpcb_to_compat(inp, &xi.xi_inp); - if (inp->inp_socket) + if (inp->inp_socket) { sotoxsocket(inp->inp_socket, &xi.xi_socket); + } udp_unlock(inp->inp_socket, 1, 0); - error = SYSCTL_OUT(req, &xi, sizeof (xi)); + error = SYSCTL_OUT(req, &xi, sizeof(xi)); } if (!error) { /* @@ -1167,21 +1195,21 @@ udp_pcblist SYSCTL_HANDLER_ARGS * while we were processing this request, and it * might be necessary to retry. */ - bzero(&xig, sizeof (xig)); - xig.xig_len = sizeof (xig); + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); xig.xig_gen = udbinfo.ipi_gencnt; xig.xig_sogen = so_gencnt; xig.xig_count = udbinfo.ipi_count; - error = SYSCTL_OUT(req, &xig, sizeof (xig)); + error = SYSCTL_OUT(req, &xig, sizeof(xig)); } FREE(inp_list, M_TEMP); lck_rw_done(udbinfo.ipi_lock); - return (error); + return error; } SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist, - "S,xinpcb", "List of active UDP sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist, + "S,xinpcb", "List of active UDP sockets"); #if !CONFIG_EMBEDDED @@ -1202,14 +1230,14 @@ udp_pcblist64 SYSCTL_HANDLER_ARGS if (req->oldptr == USER_ADDR_NULL) { n = udbinfo.ipi_count; req->oldidx = - 2 * (sizeof (xig)) + (n + n/8) * sizeof (struct xinpcb64); + 2 * (sizeof(xig)) + (n + n / 8) * sizeof(struct xinpcb64); lck_rw_done(udbinfo.ipi_lock); - return (0); + return 0; } if (req->newptr != USER_ADDR_NULL) { lck_rw_done(udbinfo.ipi_lock); - return (EPERM); + return EPERM; } /* @@ -1218,35 +1246,36 @@ udp_pcblist64 SYSCTL_HANDLER_ARGS gencnt = udbinfo.ipi_gencnt; n = udbinfo.ipi_count; - bzero(&xig, sizeof (xig)); - xig.xig_len = sizeof (xig); + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); xig.xig_count = n; xig.xig_gen = gencnt; xig.xig_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xig, sizeof (xig)); + error = SYSCTL_OUT(req, &xig, sizeof(xig)); if (error) { lck_rw_done(udbinfo.ipi_lock); - return (error); + return error; } /* * We are done if there is no pcb */ if (n == 0) { lck_rw_done(udbinfo.ipi_lock); - return (0); + return 0; } - inp_list = _MALLOC(n * sizeof (*inp_list), M_TEMP, M_WAITOK); + inp_list = _MALLOC(n * sizeof(*inp_list), M_TEMP, M_WAITOK); if (inp_list == 0) { lck_rw_done(udbinfo.ipi_lock); - return (ENOMEM); + return ENOMEM; } for (inp = LIST_FIRST(udbinfo.ipi_listhead), i = 0; inp && i < n; inp = LIST_NEXT(inp, inp_list)) { if (inp->inp_gencnt <= gencnt && - inp->inp_state != INPCB_STATE_DEAD) + inp->inp_state != INPCB_STATE_DEAD) { inp_list[i++] = inp; + } } n = i; @@ -1256,8 +1285,9 @@ udp_pcblist64 SYSCTL_HANDLER_ARGS inp = inp_list[i]; - if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == WNT_STOPUSING) { continue; + } udp_lock(inp->inp_socket, 1, 0); if (in_pcb_checkstate(inp, WNT_RELEASE, 1) == WNT_STOPUSING) { udp_unlock(inp->inp_socket, 1, 0); @@ -1268,15 +1298,16 @@ udp_pcblist64 SYSCTL_HANDLER_ARGS continue; } - bzero(&xi, sizeof (xi)); - xi.xi_len = sizeof (xi); + bzero(&xi, sizeof(xi)); + xi.xi_len = sizeof(xi); inpcb_to_xinpcb64(inp, &xi); - if (inp->inp_socket) + if (inp->inp_socket) { sotoxsocket64(inp->inp_socket, &xi.xi_socket); + } udp_unlock(inp->inp_socket, 1, 0); - error = SYSCTL_OUT(req, &xi, sizeof (xi)); + error = SYSCTL_OUT(req, &xi, sizeof(xi)); } if (!error) { /* @@ -1286,21 +1317,21 @@ udp_pcblist64 SYSCTL_HANDLER_ARGS * while we were processing this request, and it * might be necessary to retry. */ - bzero(&xig, sizeof (xig)); - xig.xig_len = sizeof (xig); + bzero(&xig, sizeof(xig)); + xig.xig_len = sizeof(xig); xig.xig_gen = udbinfo.ipi_gencnt; xig.xig_sogen = so_gencnt; xig.xig_count = udbinfo.ipi_count; - error = SYSCTL_OUT(req, &xig, sizeof (xig)); + error = SYSCTL_OUT(req, &xig, sizeof(xig)); } FREE(inp_list, M_TEMP); lck_rw_done(udbinfo.ipi_lock); - return (error); + return error; } SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist64, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64, - "S,xinpcb64", "List of active UDP sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist64, + "S,xinpcb64", "List of active UDP sockets"); #endif /* !CONFIG_EMBEDDED */ @@ -1308,12 +1339,12 @@ static int udp_pcblist_n SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - return (get_pcblist_n(IPPROTO_UDP, req, &udbinfo)); + return get_pcblist_n(IPPROTO_UDP, req, &udbinfo); } SYSCTL_PROC(_net_inet_udp, OID_AUTO, pcblist_n, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n, - "S,xinpcb_n", "List of active UDP sockets"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, udp_pcblist_n, + "S,xinpcb_n", "List of active UDP sockets"); __private_extern__ void udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, @@ -1326,13 +1357,13 @@ udp_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags, __private_extern__ uint32_t udp_count_opportunistic(unsigned int ifindex, u_int32_t flags) { - return (inpcb_count_opportunistic(ifindex, &udbinfo, flags)); + return inpcb_count_opportunistic(ifindex, &udbinfo, flags); } __private_extern__ uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa) { - return (inpcb_find_anypcb_byaddr(ifa, &udbinfo)); + return inpcb_find_anypcb_byaddr(ifa, &udbinfo); } static int @@ -1343,30 +1374,33 @@ udp_check_pktinfo(struct mbuf *control, struct ifnet **outif, struct in_pktinfo *pktinfo; struct ifnet *ifp; - if (outif != NULL) + if (outif != NULL) { *outif = NULL; + } /* * XXX: Currently, we assume all the optional information is stored * in a single mbuf. */ - if (control->m_next) - return (EINVAL); + if (control->m_next) { + return EINVAL; + } - if (control->m_len < CMSG_LEN(0)) - return (EINVAL); + if (control->m_len < CMSG_LEN(0)) { + return EINVAL; + } - for (cm = M_FIRST_CMSGHDR(control); cm; + for (cm = M_FIRST_CMSGHDR(control); + is_cmsg_valid(control, cm); cm = M_NXT_CMSGHDR(control, cm)) { - if (cm->cmsg_len < sizeof (struct cmsghdr) || - cm->cmsg_len > control->m_len) - return (EINVAL); - - if (cm->cmsg_level != IPPROTO_IP || cm->cmsg_type != IP_PKTINFO) + if (cm->cmsg_level != IPPROTO_IP || + cm->cmsg_type != IP_PKTINFO) { continue; + } - if (cm->cmsg_len != CMSG_LEN(sizeof (struct in_pktinfo))) - return (EINVAL); + if (cm->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo))) { + return EINVAL; + } pktinfo = (struct in_pktinfo *)(void *)CMSG_DATA(cm); @@ -1375,7 +1409,7 @@ udp_check_pktinfo(struct mbuf *control, struct ifnet **outif, if (pktinfo->ipi_ifindex > if_index) { ifnet_head_done(); - return (ENXIO); + return ENXIO; } /* @@ -1386,7 +1420,7 @@ udp_check_pktinfo(struct mbuf *control, struct ifnet **outif, ifp = ifindex2ifnet[pktinfo->ipi_ifindex]; if (ifp == NULL) { ifnet_head_done(); - return (ENXIO); + return ENXIO; } if (outif != NULL) { ifnet_reference(ifp); @@ -1406,7 +1440,7 @@ udp_check_pktinfo(struct mbuf *control, struct ifnet **outif, *laddr = pktinfo->ipi_spec_dst; break; } - return (0); + return 0; } int @@ -1468,8 +1502,8 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, */ addr = (struct sockaddr *)cfil_faddr; } else if ((so->so_state_change_cnt != cfil_so_state_change_cnt) && - (inp->inp_fport != sin->sin_port || - inp->inp_faddr.s_addr != sin->sin_addr.s_addr)) { + (inp->inp_fport != sin->sin_port || + inp->inp_faddr.s_addr != sin->sin_addr.s_addr)) { /* * Socket is connected but socket state and dest addr/port changed. * We need to use the saved faddr info. @@ -1486,11 +1520,13 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, error = udp_check_pktinfo(control, &outif, &pi_laddr); m_freem(control); control = NULL; - if (error) + if (error) { goto release; + } pktinfo++; - if (outif != NULL) + if (outif != NULL) { ipoa.ipoa_boundif = outif->if_index; + } } if (sotc == SO_TC_UNSPEC) { sotc = so->so_traffic_class; @@ -1499,9 +1535,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, KERNEL_DEBUG(DBG_LAYER_OUT_BEG, inp->inp_fport, inp->inp_lport, inp->inp_laddr.s_addr, inp->inp_faddr.s_addr, - (htons((u_short)len + sizeof (struct udphdr)))); + (htons((u_short)len + sizeof(struct udphdr)))); - if (len + sizeof (struct udpiphdr) > IP_MAXPACKET) { + if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) { error = EMSGSIZE; goto release; } @@ -1522,18 +1558,22 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, if (ipoa.ipoa_boundif == IFSCOPE_NONE && (inp->inp_flags & INP_BOUND_IF)) { VERIFY(inp->inp_boundifp != NULL); - ifnet_reference(inp->inp_boundifp); /* for this routine */ - if (outif != NULL) + ifnet_reference(inp->inp_boundifp); /* for this routine */ + if (outif != NULL) { ifnet_release(outif); + } outif = inp->inp_boundifp; ipoa.ipoa_boundif = outif->if_index; } - if (INP_NO_CELLULAR(inp)) + if (INP_NO_CELLULAR(inp)) { ipoa.ipoa_flags |= IPOAF_NO_CELLULAR; - if (INP_NO_EXPENSIVE(inp)) + } + if (INP_NO_EXPENSIVE(inp)) { ipoa.ipoa_flags |= IPOAF_NO_EXPENSIVE; - if (INP_AWDL_UNRESTRICTED(inp)) + } + if (INP_AWDL_UNRESTRICTED(inp)) { ipoa.ipoa_flags |= IPOAF_AWDL_UNRESTRICTED; + } ipoa.ipoa_sotc = sotc; ipoa.ipoa_netsvctype = netsvctype; soopts |= IP_OUTARGS; @@ -1548,9 +1588,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, */ if (ROUTE_UNUSABLE(&inp->inp_route) #if CONTENT_FILTER - || cfil_faddr_use + || cfil_faddr_use #endif - ) { + ) { struct in_ifaddr *ia = NULL; ROUTE_RELEASE(&inp->inp_route); @@ -1577,8 +1617,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, inp->inp_last_outifp = NULL; } } - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } } /* @@ -1605,8 +1646,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, fport = inp->inp_fport; #if CONTENT_FILTER - if (cfil_faddr_use) - { + if (cfil_faddr_use) { faddr = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_addr; fport = ((struct sockaddr_in *)(void *)cfil_faddr)->sin_port; } @@ -1626,8 +1666,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, * if the input runs at the same time we do this. */ /* if we have a source address specified, use that */ - if (pi_laddr.s_addr != INADDR_ANY) + if (pi_laddr.s_addr != INADDR_ANY) { inp->inp_laddr = pi_laddr; + } /* * If a scope is specified, use it. Scope from * IP_PKTINFO takes precendence over the the scope @@ -1635,8 +1676,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, */ error = in_pcbconnect(inp, addr, p, ipoa.ipoa_boundif, &outif); - if (error) + if (error) { goto release; + } laddr = inp->inp_laddr; lport = inp->inp_lport; @@ -1645,8 +1687,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, udp_dodisconnect = 1; /* synch up in case in_pcbladdr() overrides */ - if (outif != NULL && ipoa.ipoa_boundif != IFSCOPE_NONE) + if (outif != NULL && ipoa.ipoa_boundif != IFSCOPE_NONE) { ipoa.ipoa_boundif = outif->if_index; + } } else { /* * Fast path case @@ -1660,8 +1703,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, */ if (laddr.s_addr == INADDR_ANY) { if ((error = in_pcbladdr(inp, addr, &laddr, - ipoa.ipoa_boundif, &outif, 0)) != 0) + ipoa.ipoa_boundif, &outif, 0)) != 0) { goto release; + } /* * from pcbconnect: remember we don't * care about src addr. @@ -1670,8 +1714,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, /* synch up in case in_pcbladdr() overrides */ if (outif != NULL && - ipoa.ipoa_boundif != IFSCOPE_NONE) + ipoa.ipoa_boundif != IFSCOPE_NONE) { ipoa.ipoa_boundif = outif->if_index; + } } faddr = sin->sin_addr; @@ -1688,11 +1733,12 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, mac_mbuf_label_associate_inpcb(inp, m); #endif /* CONFIG_MACF_NET */ - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } if (fport == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) { - so->so_flags1 |= SOF1_DNS_COUNTED; + so->so_flags1 |= SOF1_DNS_COUNTED; INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns); } @@ -1700,7 +1746,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, * Calculate data length and get a mbuf * for UDP and IP headers. */ - M_PREPEND(m, sizeof (struct udpiphdr), M_DONTWAIT, 1); + M_PREPEND(m, sizeof(struct udpiphdr), M_DONTWAIT, 1); if (m == 0) { error = ENOBUFS; goto abort; @@ -1711,13 +1757,13 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, * and addresses and length put into network format. */ ui = mtod(m, struct udpiphdr *); - bzero(ui->ui_x1, sizeof (ui->ui_x1)); /* XXX still needed? */ + bzero(ui->ui_x1, sizeof(ui->ui_x1)); /* XXX still needed? */ ui->ui_pr = IPPROTO_UDP; ui->ui_src = laddr; ui->ui_dst = faddr; ui->ui_sport = lport; ui->ui_dport = fport; - ui->ui_ulen = htons((u_short)len + sizeof (struct udphdr)); + ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr)); /* * Set up checksum to pseudo header checksum and output datagram. @@ -1741,15 +1787,15 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, if ((inp->inp_flags2 & INP2_CLAT46_FLOW) || (udpcksum && !(inp->inp_flags & INP_UDP_NOCKSUM))) { ui->ui_sum = in_pseudo(ui->ui_src.s_addr, ui->ui_dst.s_addr, - htons((u_short)len + sizeof (struct udphdr) + IPPROTO_UDP)); - m->m_pkthdr.csum_flags = (CSUM_UDP|CSUM_ZERO_INVERT); + htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP)); + m->m_pkthdr.csum_flags = (CSUM_UDP | CSUM_ZERO_INVERT); m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); } else { ui->ui_sum = 0; } - ((struct ip *)ui)->ip_len = sizeof (struct udpiphdr) + len; - ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ - ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */ + ((struct ip *)ui)->ip_len = sizeof(struct udpiphdr) + len; + ((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl; /* XXX */ + ((struct ip *)ui)->ip_tos = inp->inp_ip_tos; /* XXX */ udpstat.udps_opackets++; KERNEL_DEBUG(DBG_LAYER_OUT_END, ui->ui_dport, ui->ui_sport, @@ -1784,7 +1830,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, inp->inp_route.ro_dst.sa_family = AF_INET; inp->inp_route.ro_dst.sa_len = sizeof(struct sockaddr_in); ((struct sockaddr_in *)(void *)&inp->inp_route.ro_dst)->sin_addr = - faddr; + faddr; rtalloc_scoped(&inp->inp_route, ipoa.ipoa_boundif); @@ -1807,8 +1853,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, } } #endif /* NECP */ - if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ipoa.ipoa_flags |= IPOAF_QOSMARKING_ALLOWED; + } #if IPSEC if (inp->inp_sp != NULL && ipsec_setsocket(m, inp->inp_socket) != 0) { @@ -1819,11 +1866,11 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, inpopts = inp->inp_options; #if CONTENT_FILTER - if (cfil_tag && (inp->inp_socket->so_options != cfil_so_options)) + if (cfil_tag && (inp->inp_socket->so_options != cfil_so_options)) { soopts |= (cfil_so_options & (SO_DONTROUTE | SO_BROADCAST)); - else + } else #endif - soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)); + soopts |= (inp->inp_socket->so_options & (SO_DONTROUTE | SO_BROADCAST)); mopts = inp->inp_moptions; if (mopts != NULL) { @@ -1833,7 +1880,6 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, mopts->imo_multicast_ifp != NULL) { /* no reference needed */ inp->inp_last_outifp = mopts->imo_multicast_ifp; - } IMO_UNLOCK(mopts); } @@ -1846,19 +1892,23 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, m->m_pkthdr.pkt_flowid = inp->inp_flowhash; m->m_pkthdr.pkt_proto = IPPROTO_UDP; m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC); - if (flowadv) + if (flowadv) { m->m_pkthdr.pkt_flags |= PKTF_FLOW_ADV; + } m->m_pkthdr.tx_udp_pid = so->last_pid; - if (so->so_flags & SOF_DELEGATED) + if (so->so_flags & SOF_DELEGATED) { m->m_pkthdr.tx_udp_e_pid = so->e_pid; - else + } else { m->m_pkthdr.tx_udp_e_pid = 0; + } - if (ipoa.ipoa_boundif != IFSCOPE_NONE) + if (ipoa.ipoa_boundif != IFSCOPE_NONE) { ipoa.ipoa_flags |= IPOAF_BOUND_IF; + } - if (laddr.s_addr != INADDR_ANY) + if (laddr.s_addr != INADDR_ANY) { ipoa.ipoa_flags |= IPOAF_BOUND_SRCADDR; + } inp->inp_sndinprog_cnt++; @@ -1866,8 +1916,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, error = ip_output(m, inpopts, &ro, soopts, mopts, &ipoa); m = NULL; socket_lock(so, 0); - if (mopts != NULL) + if (mopts != NULL) { IMO_REMREF(mopts); + } if (error == 0 && nstat_collect) { boolean_t cell, wifi, wired; @@ -1895,8 +1946,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr, } VERIFY(inp->inp_sndinprog_cnt > 0); - if ( --inp->inp_sndinprog_cnt == 0) + if (--inp->inp_sndinprog_cnt == 0) { inp->inp_flags &= ~(INP_FC_FEEDBACK); + } /* Synchronize PCB cached route */ inp_route_copyin(inp, &ro); @@ -1906,30 +1958,31 @@ abort: /* Always discard the cached route for unconnected socket */ ROUTE_RELEASE(&inp->inp_route); in_pcbdisconnect(inp); - inp->inp_laddr = origladdr; /* XXX rehash? */ + inp->inp_laddr = origladdr; /* XXX rehash? */ /* no reference needed */ inp->inp_last_outifp = origoutifp; - } else if (inp->inp_route.ro_rt != NULL) { struct rtentry *rt = inp->inp_route.ro_rt; struct ifnet *outifp; - if (rt->rt_flags & (RTF_MULTICAST|RTF_BROADCAST)) - rt = NULL; /* unusable */ - + if (rt->rt_flags & (RTF_MULTICAST | RTF_BROADCAST)) { + rt = NULL; /* unusable */ + } #if CONTENT_FILTER /* * Discard temporary route for cfil case */ - if (cfil_faddr_use) - rt = NULL; /* unusable */ + if (cfil_faddr_use) { + rt = NULL; /* unusable */ + } #endif /* * Always discard if it is a multicast or broadcast route. */ - if (rt == NULL) + if (rt == NULL) { ROUTE_RELEASE(&inp->inp_route); + } /* * If the destination route is unicast, update outifp with @@ -1940,11 +1993,11 @@ abort: inp->inp_last_outifp = outifp; /* no reference needed */ so->so_pktheadroom = P2ROUNDUP( - sizeof(struct udphdr) + - sizeof(struct ip) + - ifnet_hdrlen(outifp) + - ifnet_mbuf_packetpreamblelen(outifp), - sizeof(u_int32_t)); + sizeof(struct udphdr) + + sizeof(struct ip) + + ifnet_hdrlen(outifp) + + ifnet_mbuf_packetpreamblelen(outifp), + sizeof(u_int32_t)); } } else { ROUTE_RELEASE(&inp->inp_route); @@ -1955,35 +2008,39 @@ abort: * denied access to it, generate an event. */ if (error != 0 && (ipoa.ipoa_retflags & IPOARF_IFDENIED) && - (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) - soevent(so, (SO_FILT_HINT_LOCKED|SO_FILT_HINT_IFDENIED)); + (INP_NO_CELLULAR(inp) || INP_NO_EXPENSIVE(inp))) { + soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } release: KERNEL_DEBUG(DBG_FNC_UDP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0); - if (m != NULL) + if (m != NULL) { m_freem(m); + } - if (outif != NULL) + if (outif != NULL) { ifnet_release(outif); + } #if CONTENT_FILTER - if (cfil_tag) + if (cfil_tag) { m_tag_free(cfil_tag); + } #endif - return (error); + return error; } -u_int32_t udp_sendspace = 9216; /* really max datagram size */ +u_int32_t udp_sendspace = 9216; /* really max datagram size */ /* 187 1K datagrams (approx 192 KB) */ -u_int32_t udp_recvspace = 187 * (1024 + +u_int32_t udp_recvspace = 187 * (1024 + #if INET6 - sizeof (struct sockaddr_in6) + sizeof(struct sockaddr_in6) #else /* !INET6 */ - sizeof (struct sockaddr_in) + sizeof(struct sockaddr_in) #endif /* !INET6 */ - ); + ); /* Check that the values of udp send and recv space do not exceed sb_max */ static int @@ -1993,7 +2050,7 @@ sysctl_udp_sospace(struct sysctl_oid *oidp, void *arg1, int arg2, #pragma unused(arg1, arg2) u_int32_t new_value = 0, *space_p = NULL; int changed = 0, error = 0; - u_quad_t sb_effective_max = (sb_max/(MSIZE+MCLBYTES)) * MCLBYTES; + u_quad_t sb_effective_max = (sb_max / (MSIZE + MCLBYTES)) * MCLBYTES; switch (oidp->oid_number) { case UDPCTL_RECVSPACE: @@ -2003,26 +2060,27 @@ sysctl_udp_sospace(struct sysctl_oid *oidp, void *arg1, int arg2, space_p = &udp_sendspace; break; default: - return (EINVAL); + return EINVAL; } - error = sysctl_io_number(req, *space_p, sizeof (u_int32_t), + error = sysctl_io_number(req, *space_p, sizeof(u_int32_t), &new_value, &changed); if (changed) { - if (new_value > 0 && new_value <= sb_effective_max) + if (new_value > 0 && new_value <= sb_effective_max) { *space_p = new_value; - else + } else { error = ERANGE; + } } - return (error); + return error; } SYSCTL_PROC(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_recvspace, 0, - &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_recvspace, 0, + &sysctl_udp_sospace, "IU", "Maximum incoming UDP datagram size"); SYSCTL_PROC(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_sendspace, 0, - &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &udp_sendspace, 0, + &sysctl_udp_sospace, "IU", "Maximum outgoing UDP datagram size"); int udp_abort(struct socket *so) @@ -2036,7 +2094,7 @@ udp_abort(struct socket *so) } soisdisconnected(so); in_pcbdetach(inp); - return (0); + return 0; } int @@ -2052,17 +2110,20 @@ udp_attach(struct socket *so, int proto, struct proc *p) /* NOTREACHED */ } error = in_pcballoc(so, &udbinfo, p); - if (error != 0) - return (error); + if (error != 0) { + return error; + } error = soreserve(so, udp_sendspace, udp_recvspace); - if (error != 0) - return (error); + if (error != 0) { + return error; + } inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV4; inp->inp_ip_ttl = ip_defttl; - if (nstat_collect) + if (nstat_collect) { nstat_udp_new_pcb(inp); - return (0); + } + return 0; } int @@ -2072,26 +2133,28 @@ udp_bind(struct socket *so, struct sockaddr *nam, struct proc *p) int error; if (nam->sa_family != 0 && nam->sa_family != AF_INET && - nam->sa_family != AF_INET6) - return (EAFNOSUPPORT); + nam->sa_family != AF_INET6) { + return EAFNOSUPPORT; + } inp = sotoinpcb(so); - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } error = in_pcbbind(inp, nam, p); #if NECP /* Update NECP client with bind result if not in middle of connect */ if (error == 0 && - (inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) && - !uuid_is_null(inp->necp_client_uuid)) { + (inp->inp_flags2 & INP2_CONNECT_IN_PROGRESS) && + !uuid_is_null(inp->necp_client_uuid)) { socket_unlock(so, 0); necp_client_assign_from_socket(so->last_pid, inp->necp_client_uuid, inp); socket_lock(so, 0); } #endif /* NECP */ - return (error); + return error; } int @@ -2101,10 +2164,12 @@ udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p) int error; inp = sotoinpcb(so); - if (inp == NULL) - return (EINVAL); - if (inp->inp_faddr.s_addr != INADDR_ANY) - return (EISCONN); + if (inp == NULL) { + return EINVAL; + } + if (inp->inp_faddr.s_addr != INADDR_ANY) { + return EISCONN; + } if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) { so->so_flags1 |= SOF1_CONNECT_COUNTED; @@ -2124,7 +2189,7 @@ udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p) } else { error = ENETDOWN; } - return (error); + return error; } #endif /* FLOW_DIVERT */ #endif /* NECP */ @@ -2141,10 +2206,11 @@ udp_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* NECP */ soisconnected(so); - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } } - return (error); + return error; } int @@ -2158,8 +2224,9 @@ udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sock int error = 0; user_ssize_t datalen = 0; - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } VERIFY(dst != NULL); @@ -2172,13 +2239,13 @@ udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sock /* bind socket to the specified interface, if requested */ if (ifscope != IFSCOPE_NONE && - (error = inp_bindif(inp, ifscope, NULL)) != 0) { + (error = inp_bindif(inp, ifscope, NULL)) != 0) { goto done; } /* if source address and/or port is specified, bind to it */ if (src != NULL) { - error = sobindlock(so, src, 0); /* already locked */ + error = sobindlock(so, src, 0); /* already locked */ if (error != 0) { goto done; } @@ -2217,25 +2284,27 @@ udp_connectx_common(struct socket *so, int af, struct sockaddr *src, struct sock socket_lock(so, 0); /* If error returned is EMSGSIZE, for example, disconnect */ - if (error == 0 || error == EWOULDBLOCK) + if (error == 0 || error == EWOULDBLOCK) { *bytes_written = datalen - uio_resid(uio); - else + } else { (void) so->so_proto->pr_usrreqs->pru_disconnectx(so, SAE_ASSOCID_ANY, SAE_CONNID_ANY); + } /* * mask the EWOULDBLOCK error so that the caller * knows that atleast the connect was successful. */ - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { error = 0; + } } - if (error == 0 && pcid != NULL) - *pcid = 1; /* there is only 1 connection for UDP */ - + if (error == 0 && pcid != NULL) { + *pcid = 1; /* there is only 1 connection for UDP */ + } done: inp->inp_flags2 &= ~INP2_CONNECT_IN_PROGRESS; - return (error); + return error; } int @@ -2244,8 +2313,8 @@ udp_connectx(struct socket *so, struct sockaddr *src, sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg, uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written) { - return (udp_connectx_common(so, AF_INET, src, dst, - p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written)); + return udp_connectx_common(so, AF_INET, src, dst, + p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written); } int @@ -2265,12 +2334,13 @@ udp_detach(struct socket *so) * close to complete before going to sleep. Send a notification * for this kind of sockets */ - if (so->so_options & SO_NOWAKEFROMSLEEP) + if (so->so_options & SO_NOWAKEFROMSLEEP) { socket_post_kev_msg_closed(so); + } in_pcbdetach(inp); inp->inp_state = INPCB_STATE_DEAD; - return (0); + return 0; } int @@ -2281,12 +2351,14 @@ udp_disconnect(struct socket *so) inp = sotoinpcb(so); if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) - return (inp == NULL ? EINVAL : EPROTOTYPE); - if (inp->inp_faddr.s_addr == INADDR_ANY) - return (ENOTCONN); + ) { + return inp == NULL ? EINVAL : EPROTOTYPE; + } + if (inp->inp_faddr.s_addr == INADDR_ANY) { + return ENOTCONN; + } in_pcbdisconnect(inp); @@ -2294,20 +2366,21 @@ udp_disconnect(struct socket *so) inp_reset_fc_state(inp); inp->inp_laddr.s_addr = INADDR_ANY; - so->so_state &= ~SS_ISCONNECTED; /* XXX */ + so->so_state &= ~SS_ISCONNECTED; /* XXX */ inp->inp_last_outifp = NULL; - return (0); + return 0; } int udp_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid) { #pragma unused(cid) - if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) - return (EINVAL); + if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { + return EINVAL; + } - return (udp_disconnect(so)); + return udp_disconnect(so); } int @@ -2321,24 +2394,26 @@ udp_send(struct socket *so, int flags, struct mbuf *m, inp = sotoinpcb(so); if (inp == NULL) { - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); - return (EINVAL); + } + return EINVAL; } #if NECP #if FLOW_DIVERT if (necp_socket_should_use_flow_divert(inp)) { /* Implicit connect */ - return (flow_divert_implicit_data_out(so, flags, m, addr, - control, p)); + return flow_divert_implicit_data_out(so, flags, m, addr, + control, p); } #endif /* FLOW_DIVERT */ #endif /* NECP */ - return (udp_output(inp, m, addr, control, p)); + return udp_output(inp, m, addr, control, p); } int @@ -2347,10 +2422,11 @@ udp_shutdown(struct socket *so) struct inpcb *inp; inp = sotoinpcb(so); - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } socantsendmore(so); - return (0); + return 0; } int @@ -2358,10 +2434,11 @@ udp_lock(struct socket *so, int refcount, void *debug) { void *lr_saved; - if (debug == NULL) + if (debug == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = debug; + } if (so->so_pcb != NULL) { LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx, @@ -2372,12 +2449,13 @@ udp_lock(struct socket *so, int refcount, void *debug) so, lr_saved, solockhistory_nr(so)); /* NOTREACHED */ } - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; - return (0); + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; + return 0; } int @@ -2385,10 +2463,11 @@ udp_unlock(struct socket *so, int refcount, void *debug) { void *lr_saved; - if (debug == NULL) + if (debug == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = debug; + } if (refcount) { VERIFY(so->so_usecount > 0); @@ -2402,10 +2481,10 @@ udp_unlock(struct socket *so, int refcount, void *debug) LCK_MTX_ASSERT(&((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED); so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; lck_mtx_unlock(&((struct inpcb *)so->so_pcb)->inpcb_mtx); } - return (0); + return 0; } lck_mtx_t * @@ -2419,7 +2498,7 @@ udp_getlock(struct socket *so, int flags) so, solockhistory_nr(so)); /* NOTREACHED */ } - return (&inp->inpcb_mtx); + return &inp->inpcb_mtx; } /* @@ -2454,8 +2533,9 @@ udp_gc(struct inpcbinfo *ipi) * wantcnt to that value. If the PCB is already dead, * keep gc active to anticipate wantcnt changing. */ - if (inp->inp_wantcnt != WNT_STOPUSING) + if (inp->inp_wantcnt != WNT_STOPUSING) { continue; + } /* * Skip if busy, no hurry for cleanup. Keep gc active @@ -2473,11 +2553,11 @@ udp_gc(struct inpcbinfo *ipi) if (so->so_usecount == 0) { if (inp->inp_state != INPCB_STATE_DEAD) { #if INET6 - if (SOCK_CHECK_DOM(so, PF_INET6)) + if (SOCK_CHECK_DOM(so, PF_INET6)) { in6_pcbdetach(inp); - else + } else #endif /* INET6 */ - in_pcbdetach(inp); + in_pcbdetach(inp); } in_pcbdispose(inp); } else { @@ -2492,10 +2572,11 @@ static int udp_getstat SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct udpstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct udpstat); + } - return (SYSCTL_OUT(req, &udpstat, MIN(sizeof (udpstat), req->oldlen))); + return SYSCTL_OUT(req, &udpstat, MIN(sizeof(udpstat), req->oldlen)); } void @@ -2540,11 +2621,11 @@ udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) if (uh->uh_sum == 0) { udpstat.udps_nosum++; - return (0); + return 0; } /* ip_stripoptions() must have been called before we get here */ - ASSERT((ip->ip_hl << 2) == sizeof (*ip)); + ASSERT((ip->ip_hl << 2) == sizeof(*ip)); if ((hwcksum_rx || (ifp->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) && @@ -2576,7 +2657,7 @@ udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) uint32_t swbytes = (uint32_t)trailer; if (start < off) { - ip->ip_len += sizeof (*ip); + ip->ip_len += sizeof(*ip); #if BYTE_ORDER != BIG_ENDIAN HTONS(ip->ip_len); HTONS(ip->ip_off); @@ -2584,23 +2665,26 @@ udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) } /* callee folds in sum */ sum = m_adj_sum16(m, start, off, ulen, sum); - if (off > start) + if (off > start) { swbytes += (off - start); - else + } else { swbytes += (start - off); + } if (start < off) { #if BYTE_ORDER != BIG_ENDIAN NTOHS(ip->ip_off); NTOHS(ip->ip_len); #endif /* BYTE_ORDER != BIG_ENDIAN */ - ip->ip_len -= sizeof (*ip); + ip->ip_len -= sizeof(*ip); } - if (swbytes != 0) + if (swbytes != 0) { udp_in_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } /* callee folds in sum */ @@ -2612,12 +2696,12 @@ udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) uint16_t ip_sum; char b[9]; - bcopy(ipov->ih_x1, b, sizeof (ipov->ih_x1)); - bzero(ipov->ih_x1, sizeof (ipov->ih_x1)); + bcopy(ipov->ih_x1, b, sizeof(ipov->ih_x1)); + bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); ip_sum = ipov->ih_len; ipov->ih_len = uh->uh_ulen; - uh->uh_sum = in_cksum(m, ulen + sizeof (struct ip)); - bcopy(b, ipov->ih_x1, sizeof (ipov->ih_x1)); + uh->uh_sum = in_cksum(m, ulen + sizeof(struct ip)); + bcopy(b, ipov->ih_x1, sizeof(ipov->ih_x1)); ipov->ih_len = ip_sum; udp_in_cksum_stats(ulen); @@ -2626,10 +2710,10 @@ udp_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) if (uh->uh_sum != 0) { udpstat.udps_badsum++; IF_UDP_STATINC(ifp, badchksum); - return (-1); + return -1; } - return (0); + return 0; } void @@ -2645,8 +2729,9 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, if (ifp == NULL || frames_array == NULL || frames_array_count == 0 || frame_index >= frames_array_count || - frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) + frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) { return; + } lck_rw_lock_shared(udbinfo.ipi_lock); gencnt = udbinfo.ipi_gencnt; @@ -2656,16 +2741,19 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frame; struct mbuf *m = NULL; - if (frame_index >= frames_array_count) + if (frame_index >= frames_array_count) { break; + } if (inp->inp_gencnt > gencnt || - inp->inp_state == INPCB_STATE_DEAD) + inp->inp_state == INPCB_STATE_DEAD) { continue; + } if ((so = inp->inp_socket) == NULL || - (so->so_state & SS_DEFUNCT)) + (so->so_state & SS_DEFUNCT)) { continue; + } /* * check for keepalive offload flag without socket * lock to avoid a deadlock @@ -2792,7 +2880,7 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, (u_short)inp->inp_keepalive_datalen + IPPROTO_UDP)); m->m_pkthdr.csum_flags = - (CSUM_UDP|CSUM_ZERO_INVERT); + (CSUM_UDP | CSUM_ZERO_INVERT); m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); } @@ -2846,12 +2934,14 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, ip6->ip6_plen = htons(sizeof(struct udphdr) + (u_short)inp->inp_keepalive_datalen); ip6->ip6_src = inp->in6p_laddr; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = 0; + } ip6->ip6_dst = inp->in6p_faddr; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = 0; + } udp6->uh_sport = inp->in6p_lport; udp6->uh_dport = inp->in6p_fport; @@ -2864,7 +2954,7 @@ udp_fill_keepalive_offload_frames(ifnet_t ifp, (u_short)inp->inp_keepalive_datalen + IPPROTO_UDP)); m->m_pkthdr.csum_flags = - (CSUM_UDPIPV6|CSUM_ZERO_INVERT); + (CSUM_UDPIPV6 | CSUM_ZERO_INVERT); m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); } diff --git a/bsd/netinet/udp_var.h b/bsd/netinet/udp_var.h index f016b31de..8c662f05f 100644 --- a/bsd/netinet/udp_var.h +++ b/bsd/netinet/udp_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -69,89 +69,89 @@ /* * UDP kernel structures and variables. */ -struct udpiphdr { - struct ipovly ui_i; /* overlaid ip structure */ - struct udphdr ui_u; /* udp header */ +struct udpiphdr { + struct ipovly ui_i; /* overlaid ip structure */ + struct udphdr ui_u; /* udp header */ }; -#define ui_x1 ui_i.ih_x1 -#define ui_pr ui_i.ih_pr -#define ui_len ui_i.ih_len -#define ui_src ui_i.ih_src -#define ui_dst ui_i.ih_dst -#define ui_sport ui_u.uh_sport -#define ui_dport ui_u.uh_dport -#define ui_ulen ui_u.uh_ulen -#define ui_sum ui_u.uh_sum -#define ui_next ui_i.ih_next -#define ui_prev ui_i.ih_prev +#define ui_x1 ui_i.ih_x1 +#define ui_pr ui_i.ih_pr +#define ui_len ui_i.ih_len +#define ui_src ui_i.ih_src +#define ui_dst ui_i.ih_dst +#define ui_sport ui_u.uh_sport +#define ui_dport ui_u.uh_dport +#define ui_ulen ui_u.uh_ulen +#define ui_sum ui_u.uh_sum +#define ui_next ui_i.ih_next +#define ui_prev ui_i.ih_prev -struct udpstat { +struct udpstat { /* input statistics: */ - u_int32_t udps_ipackets; /* total input packets */ - u_int32_t udps_hdrops; /* packet shorter than header */ - u_int32_t udps_badsum; /* checksum error */ - u_int32_t udps_badlen; /* data length larger than packet */ - u_int32_t udps_noport; /* no socket on port */ - u_int32_t udps_noportbcast; /* of above, arrived as broadcast */ - u_int32_t udps_fullsock; /* not delivered, input socket full */ - u_int32_t udpps_pcbcachemiss; /* input packets missing pcb cache */ - u_int32_t udpps_pcbhashmiss; /* input packets not for hashed pcb */ + u_int32_t udps_ipackets; /* total input packets */ + u_int32_t udps_hdrops; /* packet shorter than header */ + u_int32_t udps_badsum; /* checksum error */ + u_int32_t udps_badlen; /* data length larger than packet */ + u_int32_t udps_noport; /* no socket on port */ + u_int32_t udps_noportbcast; /* of above, arrived as broadcast */ + u_int32_t udps_fullsock; /* not delivered, input socket full */ + u_int32_t udpps_pcbcachemiss; /* input packets missing pcb cache */ + u_int32_t udpps_pcbhashmiss; /* input packets not for hashed pcb */ /* output statistics: */ - u_int32_t udps_opackets; /* total output packets */ - u_int32_t udps_fastout; /* output packets on fast path */ - u_int32_t udps_nosum; /* no checksum */ - u_int32_t udps_noportmcast; /* of no socket on port, multicast */ - u_int32_t udps_filtermcast; /* blocked by multicast filter */ + u_int32_t udps_opackets; /* total output packets */ + u_int32_t udps_fastout; /* output packets on fast path */ + u_int32_t udps_nosum; /* no checksum */ + u_int32_t udps_noportmcast; /* of no socket on port, multicast */ + u_int32_t udps_filtermcast; /* blocked by multicast filter */ /* checksum statistics: */ - u_int32_t udps_rcv_swcsum; /* udp swcksum (inbound), packets */ + u_int32_t udps_rcv_swcsum; /* udp swcksum (inbound), packets */ u_int32_t udps_rcv_swcsum_bytes; /* udp swcksum (inbound), bytes */ - u_int32_t udps_rcv6_swcsum; /* udp6 swcksum (inbound), packets */ + u_int32_t udps_rcv6_swcsum; /* udp6 swcksum (inbound), packets */ u_int32_t udps_rcv6_swcsum_bytes; /* udp6 swcksum (inbound), bytes */ - u_int32_t udps_snd_swcsum; /* udp swcksum (outbound), packets */ + u_int32_t udps_snd_swcsum; /* udp swcksum (outbound), packets */ u_int32_t udps_snd_swcsum_bytes; /* udp swcksum (outbound), bytes */ - u_int32_t udps_snd6_swcsum; /* udp6 swcksum (outbound), packets */ + u_int32_t udps_snd6_swcsum; /* udp6 swcksum (outbound), packets */ u_int32_t udps_snd6_swcsum_bytes; /* udp6 swcksum (outbound), bytes */ }; /* * Names for UDP sysctl objects */ -#define UDPCTL_CHECKSUM 1 /* checksum UDP packets */ -#define UDPCTL_STATS 2 /* statistics (read-only) */ -#define UDPCTL_MAXDGRAM 3 /* max datagram size */ -#define UDPCTL_RECVSPACE 4 /* default receive buffer space */ -#define UDPCTL_PCBLIST 5 /* list of PCBs for UDP sockets */ -#define UDPCTL_MAXID 6 +#define UDPCTL_CHECKSUM 1 /* checksum UDP packets */ +#define UDPCTL_STATS 2 /* statistics (read-only) */ +#define UDPCTL_MAXDGRAM 3 /* max datagram size */ +#define UDPCTL_RECVSPACE 4 /* default receive buffer space */ +#define UDPCTL_PCBLIST 5 /* list of PCBs for UDP sockets */ +#define UDPCTL_MAXID 6 #ifdef BSD_KERNEL_PRIVATE #include #include -#define UDPCTL_NAMES { \ - { 0, 0 }, \ - { "checksum", CTLTYPE_INT }, \ - { "stats", CTLTYPE_STRUCT }, \ - { "maxdgram", CTLTYPE_INT }, \ - { "recvspace", CTLTYPE_INT }, \ - { "pcblist", CTLTYPE_STRUCT }, \ +#define UDPCTL_NAMES { \ + { 0, 0 }, \ + { "checksum", CTLTYPE_INT }, \ + { "stats", CTLTYPE_STRUCT }, \ + { "maxdgram", CTLTYPE_INT }, \ + { "recvspace", CTLTYPE_INT }, \ + { "pcblist", CTLTYPE_STRUCT }, \ } #ifdef INET6 -#define udp6stat udpstat -#define udp6s_opackets udps_opackets +#define udp6stat udpstat +#define udp6s_opackets udps_opackets #endif /* INET6 */ SYSCTL_DECL(_net_inet_udp); struct udpstat_local { - u_int64_t port_unreach; - u_int64_t faithprefix; /* deprecated */ - u_int64_t port0; - u_int64_t badlength; - u_int64_t badchksum; - u_int64_t badmcast; - u_int64_t cleanup; - u_int64_t badipsec; + u_int64_t port_unreach; + u_int64_t faithprefix; /* deprecated */ + u_int64_t port0; + u_int64_t badlength; + u_int64_t badchksum; + u_int64_t badmcast; + u_int64_t cleanup; + u_int64_t badipsec; }; extern struct pr_usrreqs udp_usrreqs; diff --git a/bsd/netinet6/ah.h b/bsd/netinet6/ah.h index c649899d2..149d8b911 100644 --- a/bsd/netinet6/ah.h +++ b/bsd/netinet6/ah.h @@ -39,19 +39,19 @@ #include struct ah { - u_int8_t ah_nxt; /* Next Header */ - u_int8_t ah_len; /* Length of data, in 32bit */ - u_int16_t ah_reserve; /* Reserved for future use */ - u_int32_t ah_spi; /* Security parameter index */ + u_int8_t ah_nxt; /* Next Header */ + u_int8_t ah_len; /* Length of data, in 32bit */ + u_int16_t ah_reserve; /* Reserved for future use */ + u_int32_t ah_spi; /* Security parameter index */ /* variable size, 32bit bound*/ /* Authentication data */ }; struct newah { - u_int8_t ah_nxt; /* Next Header */ - u_int8_t ah_len; /* Length of data + 1, in 32bit */ - u_int16_t ah_reserve; /* Reserved for future use */ - u_int32_t ah_spi; /* Security parameter index */ - u_int32_t ah_seq; /* Sequence number field */ + u_int8_t ah_nxt; /* Next Header */ + u_int8_t ah_len; /* Length of data + 1, in 32bit */ + u_int16_t ah_reserve; /* Reserved for future use */ + u_int32_t ah_spi; /* Security parameter index */ + u_int32_t ah_seq; /* Sequence number field */ /* variable size, 32bit bound*/ /* Authentication data */ }; @@ -60,21 +60,21 @@ struct secasvar; struct ah_algorithm_state { struct secasvar *sav; - void* foo; /*per algorithm data - maybe*/ + void* foo; /*per algorithm data - maybe*/ }; struct ah_algorithm { int (*sumsiz)(struct secasvar *); int (*mature)(struct secasvar *); - int keymin; /* in bits */ - int keymax; /* in bits */ + int keymin; /* in bits */ + int keymax; /* in bits */ const char *name; int (*init)(struct ah_algorithm_state *, struct secasvar *); void (*update)(struct ah_algorithm_state *, caddr_t, size_t); void (*result)(struct ah_algorithm_state *, caddr_t, size_t); }; -#define AH_MAXSUMSIZE 64 // sha2-512's output size +#define AH_MAXSUMSIZE 64 // sha2-512's output size extern const struct ah_algorithm *ah_algorithm_lookup(int); @@ -85,7 +85,7 @@ extern size_t ah_hdrsiz(struct ipsecrequest *); extern void ah4_input(struct mbuf *, int); extern int ah4_output(struct mbuf *, struct secasvar *); extern int ah4_calccksum(struct mbuf *, caddr_t, size_t, - const struct ah_algorithm *, struct secasvar *); + const struct ah_algorithm *, struct secasvar *); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_AH_H_ */ diff --git a/bsd/netinet6/ah6.h b/bsd/netinet6/ah6.h index 1b8add593..b277788c4 100644 --- a/bsd/netinet6/ah6.h +++ b/bsd/netinet6/ah6.h @@ -43,9 +43,9 @@ struct secasvar; extern int ah6_input(struct mbuf **, int *, int); extern int ah6_output(struct mbuf *, u_char *, struct mbuf *, - struct secasvar *); + struct secasvar *); extern int ah6_calccksum(struct mbuf *, caddr_t, size_t, - const struct ah_algorithm *, struct secasvar *); + const struct ah_algorithm *, struct secasvar *); extern void ah6_ctlinput(int, struct sockaddr *, void *); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet6/ah_core.c b/bsd/netinet6/ah_core.c index b072b692e..cd503bea6 100644 --- a/bsd/netinet6/ah_core.c +++ b/bsd/netinet6/ah_core.c @@ -112,7 +112,7 @@ #include -#define HMACSIZE 16 +#define HMACSIZE 16 static int ah_sumsiz_1216(struct secasvar *); static int ah_sumsiz_zero(struct secasvar *); @@ -140,65 +140,65 @@ static void ah_hmac_sha1_result(struct ah_algorithm_state *, caddr_t, size_t); static int ah_sumsiz_sha2_256(struct secasvar *); static int ah_hmac_sha2_256_mature(struct secasvar *); static int ah_hmac_sha2_256_init(struct ah_algorithm_state *, - struct secasvar *); + struct secasvar *); static void ah_hmac_sha2_256_loop(struct ah_algorithm_state *, caddr_t, size_t); static void ah_hmac_sha2_256_result(struct ah_algorithm_state *, caddr_t, size_t); static int ah_sumsiz_sha2_384(struct secasvar *); static int ah_hmac_sha2_384_mature(struct secasvar *); static int ah_hmac_sha2_384_init(struct ah_algorithm_state *, - struct secasvar *); + struct secasvar *); static void ah_hmac_sha2_384_loop(struct ah_algorithm_state *, caddr_t, size_t); static void ah_hmac_sha2_384_result(struct ah_algorithm_state *, caddr_t, size_t); static int ah_sumsiz_sha2_512(struct secasvar *); static int ah_hmac_sha2_512_mature(struct secasvar *); static int ah_hmac_sha2_512_init(struct ah_algorithm_state *, - struct secasvar *); + struct secasvar *); static void ah_hmac_sha2_512_loop(struct ah_algorithm_state *, caddr_t, size_t); static void ah_hmac_sha2_512_result(struct ah_algorithm_state *, caddr_t, size_t); #endif /* AH_ALL_CRYPTO */ static void ah_update_mbuf(struct mbuf *, int, int, - const struct ah_algorithm *, struct ah_algorithm_state *); + const struct ah_algorithm *, struct ah_algorithm_state *); const struct ah_algorithm * ah_algorithm_lookup(int idx) { /* checksum algorithms */ static struct ah_algorithm hmac_md5 = - { ah_sumsiz_1216, ah_hmac_md5_mature, 128, 128, "hmac-md5", - ah_hmac_md5_init, ah_hmac_md5_loop, - ah_hmac_md5_result, }; + { ah_sumsiz_1216, ah_hmac_md5_mature, 128, 128, "hmac-md5", + ah_hmac_md5_init, ah_hmac_md5_loop, + ah_hmac_md5_result, }; static struct ah_algorithm keyed_md5 = - { ah_sumsiz_1216, ah_keyed_md5_mature, 128, 128, "keyed-md5", - ah_keyed_md5_init, ah_keyed_md5_loop, - ah_keyed_md5_result, }; + { ah_sumsiz_1216, ah_keyed_md5_mature, 128, 128, "keyed-md5", + ah_keyed_md5_init, ah_keyed_md5_loop, + ah_keyed_md5_result, }; static struct ah_algorithm hmac_sha1 = - { ah_sumsiz_1216, ah_hmac_sha1_mature, 160, 160, "hmac-sha1", - ah_hmac_sha1_init, ah_hmac_sha1_loop, - ah_hmac_sha1_result, }; + { ah_sumsiz_1216, ah_hmac_sha1_mature, 160, 160, "hmac-sha1", + ah_hmac_sha1_init, ah_hmac_sha1_loop, + ah_hmac_sha1_result, }; static struct ah_algorithm keyed_sha1 = - { ah_sumsiz_1216, ah_keyed_sha1_mature, 160, 160, "keyed-sha1", - ah_keyed_sha1_init, ah_keyed_sha1_loop, - ah_keyed_sha1_result, }; + { ah_sumsiz_1216, ah_keyed_sha1_mature, 160, 160, "keyed-sha1", + ah_keyed_sha1_init, ah_keyed_sha1_loop, + ah_keyed_sha1_result, }; static struct ah_algorithm ah_none = - { ah_sumsiz_zero, ah_none_mature, 0, 2048, "none", - ah_none_init, ah_none_loop, ah_none_result, }; + { ah_sumsiz_zero, ah_none_mature, 0, 2048, "none", + ah_none_init, ah_none_loop, ah_none_result, }; #if AH_ALL_CRYPTO static struct ah_algorithm hmac_sha2_256 = - { ah_sumsiz_sha2_256, ah_hmac_sha2_256_mature, 256, 256, - "hmac-sha2-256", - ah_hmac_sha2_256_init, ah_hmac_sha2_256_loop, - ah_hmac_sha2_256_result, }; + { ah_sumsiz_sha2_256, ah_hmac_sha2_256_mature, 256, 256, + "hmac-sha2-256", + ah_hmac_sha2_256_init, ah_hmac_sha2_256_loop, + ah_hmac_sha2_256_result, }; static struct ah_algorithm hmac_sha2_384 = - { ah_sumsiz_sha2_384, ah_hmac_sha2_384_mature, 384, 384, - "hmac-sha2-384", - ah_hmac_sha2_384_init, ah_hmac_sha2_384_loop, - ah_hmac_sha2_384_result, }; + { ah_sumsiz_sha2_384, ah_hmac_sha2_384_mature, 384, 384, + "hmac-sha2-384", + ah_hmac_sha2_384_init, ah_hmac_sha2_384_loop, + ah_hmac_sha2_384_result, }; static struct ah_algorithm hmac_sha2_512 = - { ah_sumsiz_sha2_512, ah_hmac_sha2_512_mature, 512, 512, - "hmac-sha2-512", - ah_hmac_sha2_512_init, ah_hmac_sha2_512_loop, - ah_hmac_sha2_512_result, }; + { ah_sumsiz_sha2_512, ah_hmac_sha2_512_mature, 512, 512, + "hmac-sha2-512", + ah_hmac_sha2_512_init, ah_hmac_sha2_512_loop, + ah_hmac_sha2_512_result, }; #endif /* AH_ALL_CRYPTO */ switch (idx) { @@ -229,19 +229,22 @@ ah_algorithm_lookup(int idx) static int ah_sumsiz_1216(struct secasvar *sav) { - if (!sav) + if (!sav) { return -1; - if (sav->flags & SADB_X_EXT_OLD) + } + if (sav->flags & SADB_X_EXT_OLD) { return 16; - else + } else { return 12; + } } static int ah_sumsiz_zero(struct secasvar *sav) { - if (!sav) + if (!sav) { return -1; + } return 0; } @@ -296,29 +299,32 @@ ah_keyed_md5_init(struct ah_algorithm_state *state, struct secasvar *sav) size_t keybitlen; u_int8_t buf[32] __attribute__((aligned(4))); - if (!state) + if (!state) { panic("ah_keyed_md5_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(sizeof(MD5_CTX), M_TEMP, M_NOWAIT); - if (state->foo == NULL) + if (state->foo == NULL) { return ENOBUFS; + } MD5Init((MD5_CTX *)state->foo); if (state->sav) { MD5Update((MD5_CTX *)state->foo, - (u_int8_t *)_KEYBUF(state->sav->key_auth), - (u_int)_KEYLEN(state->sav->key_auth)); + (u_int8_t *)_KEYBUF(state->sav->key_auth), + (u_int)_KEYLEN(state->sav->key_auth)); /* * Pad after the key. * We cannot simply use md5_pad() since the function * won't update the total length. */ - if (_KEYLEN(state->sav->key_auth) < 56) + if (_KEYLEN(state->sav->key_auth) < 56) { padlen = 64 - 8 - _KEYLEN(state->sav->key_auth); - else + } else { padlen = 64 + 64 - 8 - _KEYLEN(state->sav->key_auth); + } keybitlen = _KEYLEN(state->sav->key_auth); keybitlen *= 8; @@ -348,8 +354,9 @@ ah_keyed_md5_init(struct ah_algorithm_state *state, struct secasvar *sav) static void ah_keyed_md5_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) { - if (!state) + if (!state) { panic("ah_keyed_md5_loop: what?"); + } MD5Update((MD5_CTX *)state->foo, addr, len); } @@ -359,13 +366,14 @@ ah_keyed_md5_result(struct ah_algorithm_state *state, caddr_t addr, size_t l) { u_char digest[16] __attribute__((aligned(4))); - if (!state) + if (!state) { panic("ah_keyed_md5_result: what?"); + } if (state->sav) { MD5Update((MD5_CTX *)state->foo, - (u_int8_t *)_KEYBUF(state->sav->key_auth), - (u_int)_KEYLEN(state->sav->key_auth)); + (u_int8_t *)_KEYBUF(state->sav->key_auth), + (u_int)_KEYLEN(state->sav->key_auth)); } MD5Final(&digest[0], (MD5_CTX *)state->foo); FREE(state->foo, M_TEMP); @@ -389,7 +397,7 @@ ah_keyed_sha1_mature(struct secasvar *sav) } if (sav->key_auth->sadb_key_bits < algo->keymin - || algo->keymax < sav->key_auth->sadb_key_bits) { + || algo->keymax < sav->key_auth->sadb_key_bits) { ipseclog((LOG_ERR, "ah_keyed_sha1_mature: invalid key length %d.\n", sav->key_auth->sadb_key_bits)); @@ -407,28 +415,31 @@ ah_keyed_sha1_init(struct ah_algorithm_state *state, struct secasvar *sav) size_t keybitlen; u_int8_t buf[32] __attribute__((aligned(4))); - if (!state) + if (!state) { panic("ah_keyed_sha1_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(sizeof(SHA1_CTX), M_TEMP, M_NOWAIT); - if (!state->foo) + if (!state->foo) { return ENOBUFS; + } ctxt = (SHA1_CTX *)state->foo; SHA1Init(ctxt); if (state->sav) { SHA1Update(ctxt, (u_int8_t *)_KEYBUF(state->sav->key_auth), - (u_int)_KEYLEN(state->sav->key_auth)); + (u_int)_KEYLEN(state->sav->key_auth)); /* * Pad after the key. */ - if (_KEYLEN(state->sav->key_auth) < 56) + if (_KEYLEN(state->sav->key_auth) < 56) { padlen = 64 - 8 - _KEYLEN(state->sav->key_auth); - else + } else { padlen = 64 + 64 - 8 - _KEYLEN(state->sav->key_auth); + } keybitlen = _KEYLEN(state->sav->key_auth); keybitlen *= 8; @@ -460,8 +471,9 @@ ah_keyed_sha1_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) { SHA1_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_keyed_sha1_loop: what?"); + } ctxt = (SHA1_CTX *)state->foo; SHA1Update(ctxt, (caddr_t)addr, (size_t)len); @@ -470,16 +482,17 @@ ah_keyed_sha1_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) static void ah_keyed_sha1_result(struct ah_algorithm_state *state, caddr_t addr, size_t l) { - u_char digest[SHA1_RESULTLEN] __attribute__((aligned(4))); /* SHA-1 generates 160 bits */ + u_char digest[SHA1_RESULTLEN] __attribute__((aligned(4))); /* SHA-1 generates 160 bits */ SHA1_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_keyed_sha1_result: what?"); + } ctxt = (SHA1_CTX *)state->foo; if (state->sav) { SHA1Update(ctxt, (u_int8_t *)_KEYBUF(state->sav->key_auth), - (u_int)_KEYLEN(state->sav->key_auth)); + (u_int)_KEYLEN(state->sav->key_auth)); } SHA1Final((caddr_t)&digest[0], ctxt); bcopy(&digest[0], (void *)addr, sizeof(digest) > l ? l : sizeof(digest)); @@ -504,7 +517,7 @@ ah_hmac_md5_mature(struct secasvar *sav) } if (sav->key_auth->sadb_key_bits < algo->keymin - || algo->keymax < sav->key_auth->sadb_key_bits) { + || algo->keymax < sav->key_auth->sadb_key_bits) { ipseclog((LOG_ERR, "ah_hmac_md5_mature: invalid key length %d.\n", sav->key_auth->sadb_key_bits)); @@ -525,13 +538,15 @@ ah_hmac_md5_init(struct ah_algorithm_state *state, struct secasvar *sav) size_t i; MD5_CTX *ctxt; - if (!state) + if (!state) { panic("ah_hmac_md5_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(64 + 64 + sizeof(MD5_CTX), M_TEMP, M_NOWAIT); - if (!state->foo) + if (!state->foo) { return ENOBUFS; + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 64); @@ -541,7 +556,7 @@ ah_hmac_md5_init(struct ah_algorithm_state *state, struct secasvar *sav) if (64 < _KEYLEN(state->sav->key_auth)) { MD5Init(ctxt); MD5Update(ctxt, _KEYBUF(state->sav->key_auth), - _KEYLEN(state->sav->key_auth)); + _KEYLEN(state->sav->key_auth)); MD5Final(&tk[0], ctxt); key = &tk[0]; keylen = 16; @@ -570,8 +585,9 @@ ah_hmac_md5_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) { MD5_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_md5_loop: what?"); + } ctxt = (MD5_CTX *)(void *)(((caddr_t)state->foo) + 128); MD5Update(ctxt, addr, len); } @@ -584,8 +600,9 @@ ah_hmac_md5_result(struct ah_algorithm_state *state, caddr_t addr, size_t l) u_char *opad; MD5_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_md5_result: what?"); + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 64); @@ -620,7 +637,7 @@ ah_hmac_sha1_mature(struct secasvar *sav) } if (sav->key_auth->sadb_key_bits < algo->keymin - || algo->keymax < sav->key_auth->sadb_key_bits) { + || algo->keymax < sav->key_auth->sadb_key_bits) { ipseclog((LOG_ERR, "ah_hmac_sha1_mature: invalid key length %d.\n", sav->key_auth->sadb_key_bits)); @@ -636,19 +653,21 @@ ah_hmac_sha1_init(struct ah_algorithm_state *state, struct secasvar *sav) u_char *ipad; u_char *opad; SHA1_CTX *ctxt; - u_char tk[SHA1_RESULTLEN] __attribute__((aligned(4))); /* SHA-1 generates 160 bits */ + u_char tk[SHA1_RESULTLEN] __attribute__((aligned(4))); /* SHA-1 generates 160 bits */ u_char *key; size_t keylen; size_t i; - if (!state) + if (!state) { panic("ah_hmac_sha1_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(64 + 64 + sizeof(SHA1_CTX), - M_TEMP, M_NOWAIT); - if (!state->foo) + M_TEMP, M_NOWAIT); + if (!state->foo) { return ENOBUFS; + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 64); @@ -658,7 +677,7 @@ ah_hmac_sha1_init(struct ah_algorithm_state *state, struct secasvar *sav) if (64 < _KEYLEN(state->sav->key_auth)) { SHA1Init(ctxt); SHA1Update(ctxt, _KEYBUF(state->sav->key_auth), - _KEYLEN(state->sav->key_auth)); + _KEYLEN(state->sav->key_auth)); SHA1Final(&tk[0], ctxt); key = &tk[0]; keylen = SHA1_RESULTLEN; @@ -687,8 +706,9 @@ ah_hmac_sha1_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) { SHA1_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha1_loop: what?"); + } ctxt = (SHA1_CTX *)(void *)(((u_char *)state->foo) + 128); SHA1Update(ctxt, (caddr_t)addr, (size_t)len); @@ -697,13 +717,14 @@ ah_hmac_sha1_loop(struct ah_algorithm_state *state, caddr_t addr, size_t len) static void ah_hmac_sha1_result(struct ah_algorithm_state *state, caddr_t addr, size_t l) { - u_char digest[SHA1_RESULTLEN] __attribute__((aligned(4))); /* SHA-1 generates 160 bits */ + u_char digest[SHA1_RESULTLEN] __attribute__((aligned(4))); /* SHA-1 generates 160 bits */ u_char *ipad; u_char *opad; SHA1_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha1_result: what?"); + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 64); @@ -725,8 +746,9 @@ ah_hmac_sha1_result(struct ah_algorithm_state *state, caddr_t addr, size_t l) static int ah_sumsiz_sha2_256(struct secasvar *sav) { - if (!sav) + if (!sav) { return -1; + } // return half the output size (in bytes), as per rfc 4868 return 16; // 256/(8*2) } @@ -771,14 +793,16 @@ ah_hmac_sha2_256_init(struct ah_algorithm_state *state, struct secasvar *sav) size_t keylen; size_t i; - if (!state) + if (!state) { panic("ah_hmac_sha2_256_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(64 + 64 + sizeof(SHA256_CTX), M_TEMP, M_NOWAIT); - if (!state->foo) + if (!state->foo) { return ENOBUFS; + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 64); @@ -817,13 +841,14 @@ ah_hmac_sha2_256_init(struct ah_algorithm_state *state, struct secasvar *sav) static void ah_hmac_sha2_256_loop(struct ah_algorithm_state *state, - caddr_t addr, - size_t len) + caddr_t addr, + size_t len) { SHA256_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha2_256_loop: what?"); + } ctxt = (SHA256_CTX *)(void *)(((u_char *)state->foo) + 128); SHA256_Update(ctxt, (const u_int8_t *)addr, (size_t)len); @@ -831,16 +856,17 @@ ah_hmac_sha2_256_loop(struct ah_algorithm_state *state, static void ah_hmac_sha2_256_result(struct ah_algorithm_state *state, - caddr_t addr, - size_t l) + caddr_t addr, + size_t l) { u_char digest[SHA256_DIGEST_LENGTH] __attribute__((aligned(4))); u_char *ipad; u_char *opad; SHA256_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha2_256_result: what?"); + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 64); @@ -861,8 +887,9 @@ ah_hmac_sha2_256_result(struct ah_algorithm_state *state, static int ah_sumsiz_sha2_384(struct secasvar *sav) { - if (!sav) + if (!sav) { return -1; + } // return half the output size (in bytes), as per rfc 4868 return 24; // 384/(8*2) } @@ -907,14 +934,16 @@ ah_hmac_sha2_384_init(struct ah_algorithm_state *state, struct secasvar *sav) size_t keylen; size_t i; - if (!state) + if (!state) { panic("ah_hmac_sha2_384_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(128 + 128 + sizeof(SHA384_CTX), M_TEMP, M_NOWAIT | M_ZERO); - if (!state->foo) + if (!state->foo) { return ENOBUFS; + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 128); @@ -953,13 +982,14 @@ ah_hmac_sha2_384_init(struct ah_algorithm_state *state, struct secasvar *sav) static void ah_hmac_sha2_384_loop(struct ah_algorithm_state *state, - caddr_t addr, - size_t len) + caddr_t addr, + size_t len) { SHA384_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha2_384_loop: what?"); + } ctxt = (SHA384_CTX *)(void *)(((u_char *)state->foo) + 256); SHA384_Update(ctxt, (const u_int8_t *)addr, (size_t)len); @@ -967,16 +997,17 @@ ah_hmac_sha2_384_loop(struct ah_algorithm_state *state, static void ah_hmac_sha2_384_result(struct ah_algorithm_state *state, - caddr_t addr, - size_t l) + caddr_t addr, + size_t l) { u_char digest[SHA384_DIGEST_LENGTH]; u_char *ipad; u_char *opad; SHA384_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha2_384_result: what?"); + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 128); @@ -997,8 +1028,9 @@ ah_hmac_sha2_384_result(struct ah_algorithm_state *state, static int ah_sumsiz_sha2_512(struct secasvar *sav) { - if (!sav) + if (!sav) { return -1; + } // return half the output size (in bytes), as per rfc 4868 return 32; // 512/(8*2) } @@ -1043,14 +1075,16 @@ ah_hmac_sha2_512_init(struct ah_algorithm_state *state, struct secasvar *sav) size_t keylen; size_t i; - if (!state) + if (!state) { panic("ah_hmac_sha2_512_init: what?"); + } state->sav = sav; state->foo = (void *)_MALLOC(128 + 128 + sizeof(SHA512_CTX), M_TEMP, M_NOWAIT | M_ZERO); - if (!state->foo) + if (!state->foo) { return ENOBUFS; + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 128); @@ -1089,13 +1123,14 @@ ah_hmac_sha2_512_init(struct ah_algorithm_state *state, struct secasvar *sav) static void ah_hmac_sha2_512_loop(struct ah_algorithm_state *state, - caddr_t addr, - size_t len) + caddr_t addr, + size_t len) { SHA512_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha2_512_loop: what?"); + } ctxt = (SHA512_CTX *)(void *)(((u_char *)state->foo) + 256); SHA512_Update(ctxt, (const u_int8_t *) addr, (size_t)len); @@ -1103,16 +1138,17 @@ ah_hmac_sha2_512_loop(struct ah_algorithm_state *state, static void ah_hmac_sha2_512_result(struct ah_algorithm_state *state, - caddr_t addr, - size_t l) + caddr_t addr, + size_t l) { u_char digest[SHA512_DIGEST_LENGTH] __attribute__((aligned(4))); u_char *ipad; u_char *opad; SHA512_CTX *ctxt; - if (!state || !state->foo) + if (!state || !state->foo) { panic("ah_hmac_sha2_512_result: what?"); + } ipad = (u_char *)state->foo; opad = (u_char *)(ipad + 128); @@ -1137,9 +1173,9 @@ ah_hmac_sha2_512_result(struct ah_algorithm_state *state, * go generate the checksum. */ static void -ah_update_mbuf(struct mbuf *m,int off, int len, - const struct ah_algorithm *algo, - struct ah_algorithm_state *algos) +ah_update_mbuf(struct mbuf *m, int off, int len, + const struct ah_algorithm *algo, + struct ah_algorithm_state *algos) { struct mbuf *n; int tlen; @@ -1151,22 +1187,26 @@ ah_update_mbuf(struct mbuf *m,int off, int len, } for (n = m; n; n = n->m_next) { - if (off < n->m_len) + if (off < n->m_len) { break; + } off -= n->m_len; } - if (!n) + if (!n) { panic("ah_update_mbuf: wrong offset specified"); + } for (/*nothing*/; n && len > 0; n = n->m_next) { - if (n->m_len == 0) + if (n->m_len == 0) { continue; - if (n->m_len - off < len) + } + if (n->m_len - off < len) { tlen = n->m_len - off; - else + } else { tlen = len; + } (algo->update)(algos, mtod(n, caddr_t) + off, tlen); @@ -1185,7 +1225,7 @@ ah_update_mbuf(struct mbuf *m,int off, int len, */ int ah4_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, - const struct ah_algorithm *algo, struct secasvar *sav) + const struct ah_algorithm *algo, struct secasvar *sav) { int off; int hdrtype; @@ -1196,25 +1236,27 @@ ah4_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, int ahseen; struct mbuf *n = NULL; - if ((m->m_flags & M_PKTHDR) == 0) + if ((m->m_flags & M_PKTHDR) == 0) { return EINVAL; + } ahseen = 0; - hdrtype = -1; /*dummy, it is called IPPROTO_IP*/ + hdrtype = -1; /*dummy, it is called IPPROTO_IP*/ off = 0; error = (algo->init)(&algos, sav); - if (error) + if (error) { return error; + } - advancewidth = 0; /*safety*/ + advancewidth = 0; /*safety*/ again: /* gory. */ switch (hdrtype) { - case -1: /*first one only*/ - { + case -1: /*first one only*/ + { /* * copy ip hdr, modify to fit the AH checksum rule, * then take a checksum. @@ -1230,8 +1272,9 @@ again: #endif iphdr.ip_ttl = 0; iphdr.ip_sum = htons(0); - if (ip4_ah_cleartos) + if (ip4_ah_cleartos) { iphdr.ip_tos = 0; + } iphdr.ip_off = htons(ntohs(iphdr.ip_off) & ip4_ah_offsetmask); (algo->update)(&algos, (caddr_t)&iphdr, sizeof(struct ip)); @@ -1272,9 +1315,9 @@ again: } if (p[i + IPOPT_OPTVAL] == IPOPT_EOL || p[i + IPOPT_OPTVAL] == IPOPT_NOP || - i + IPOPT_OLEN < hlen) + i + IPOPT_OLEN < hlen) { ; - else { + } else { ipseclog((LOG_ERR, "ah4_calccksum: invalid IP option " "(type=%02x)\n", @@ -1290,25 +1333,27 @@ again: l = 1; skip = 0; break; - case IPOPT_SECURITY: /* 0x82 */ - case 0x85: /* Extended security */ - case 0x86: /* Commercial security */ - case 0x94: /* Router alert */ - case 0x95: /* RFC1770 */ + case IPOPT_SECURITY: /* 0x82 */ + case 0x85: /* Extended security */ + case 0x86: /* Commercial security */ + case 0x94: /* Router alert */ + case 0x95: /* RFC1770 */ l = p[i + IPOPT_OLEN]; - if (l < 2) + if (l < 2) { goto invalopt; + } skip = 0; break; default: l = p[i + IPOPT_OLEN]; - if (l < 2) + if (l < 2) { goto invalopt; + } skip = 1; break; } if (l < 1 || hlen - i < l) { - invalopt: +invalopt: ipseclog((LOG_ERR, "ah4_calccksum: invalid IP option " "(type=%02x len=%02x)\n", @@ -1317,10 +1362,12 @@ again: error = EINVAL; goto fail; } - if (skip) + if (skip) { bzero(p + i, l); - if (p[i + IPOPT_OPTVAL] == IPOPT_EOL) + } + if (p[i + IPOPT_OPTVAL] == IPOPT_EOL) { break; + } i += l; } @@ -1334,10 +1381,10 @@ again: hdrtype = (iphdr.ip_p) & 0xff; advancewidth = hlen; break; - } + } case IPPROTO_AH: - { + { struct ah ah; int siz; int hdrsiz; @@ -1345,8 +1392,8 @@ again: m_copydata(m, off, sizeof(ah), (caddr_t)&ah); hdrsiz = (sav->flags & SADB_X_EXT_OLD) - ? sizeof(struct ah) - : sizeof(struct newah); + ? sizeof(struct ah) + : sizeof(struct newah); siz = (*algo->sumsiz)(sav); totlen = (ah.ah_len + 2) << 2; @@ -1377,14 +1424,15 @@ again: (algo->update)(&algos, mtod(n, caddr_t), n->m_len); m_free(n); n = NULL; - } else + } else { ah_update_mbuf(m, off, totlen, algo, &algos); + } ahseen++; hdrtype = ah.ah_nxt; advancewidth = totlen; break; - } + } default: ah_update_mbuf(m, off, m->m_pkthdr.len - off, algo, &algos); @@ -1393,8 +1441,9 @@ again: } off += advancewidth; - if (off < m->m_pkthdr.len) + if (off < m->m_pkthdr.len) { goto again; + } if (len < (*algo->sumsiz)(sav)) { error = EINVAL; @@ -1404,13 +1453,15 @@ again: (algo->result)(&algos, (caddr_t) &sumbuf[0], sizeof(sumbuf)); bcopy(&sumbuf[0], ahdat, (*algo->sumsiz)(sav)); - if (n) + if (n) { m_free(n); + } return error; fail: - if (n) + if (n) { m_free(n); + } return error; } #endif @@ -1425,7 +1476,7 @@ fail: */ int ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, - const struct ah_algorithm *algo, struct secasvar *sav) + const struct ah_algorithm *algo, struct secasvar *sav) { int newoff, off; int proto, nxt; @@ -1435,23 +1486,28 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, struct ah_algorithm_state algos; u_char sumbuf[AH_MAXSUMSIZE] __attribute__((aligned(4))); - if ((m->m_flags & M_PKTHDR) == 0) + if ((m->m_flags & M_PKTHDR) == 0) { return EINVAL; + } error = (algo->init)(&algos, sav); - if (error) + if (error) { return error; + } off = 0; proto = IPPROTO_IPV6; nxt = -1; ahseen = 0; - again: +again: newoff = ip6_nexthdr(m, off, proto, &nxt); - if (newoff < 0) + if (newoff < 0) { newoff = m->m_pkthdr.len; - else if (newoff <= off) { + } else if (newoff <= off) { + error = EINVAL; + goto fail; + } else if (m->m_pkthdr.len < newoff) { error = EINVAL; goto fail; } @@ -1475,12 +1531,14 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, ip6copy.ip6_vfc &= ~IPV6_VERSION_MASK; ip6copy.ip6_vfc |= IPV6_VERSION; ip6copy.ip6_hlim = 0; - if (IN6_IS_ADDR_LINKLOCAL(&ip6copy.ip6_src)) + if (IN6_IS_ADDR_LINKLOCAL(&ip6copy.ip6_src)) { ip6copy.ip6_src.s6_addr16[1] = 0x0000; - if (IN6_IS_ADDR_LINKLOCAL(&ip6copy.ip6_dst)) + } + if (IN6_IS_ADDR_LINKLOCAL(&ip6copy.ip6_dst)) { ip6copy.ip6_dst.s6_addr16[1] = 0x0000; + } (algo->update)(&algos, (caddr_t)&ip6copy, - sizeof(struct ip6_hdr)); + sizeof(struct ip6_hdr)); } else { newoff = m->m_pkthdr.len; ah_update_mbuf(m, off, m->m_pkthdr.len - off, algo, @@ -1489,13 +1547,13 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, break; case IPPROTO_AH: - { + { int siz; int hdrsiz; hdrsiz = (sav->flags & SADB_X_EXT_OLD) - ? sizeof(struct ah) - : sizeof(struct newah); + ? sizeof(struct ah) + : sizeof(struct newah); siz = (*algo->sumsiz)(sav); /* @@ -1524,15 +1582,16 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, (algo->update)(&algos, mtod(n, caddr_t), n->m_len); m_free(n); n = NULL; - } else + } else { ah_update_mbuf(m, off, newoff - off, algo, &algos); + } ahseen++; break; - } + } - case IPPROTO_HOPOPTS: - case IPPROTO_DSTOPTS: - { + case IPPROTO_HOPOPTS: + case IPPROTO_DSTOPTS: + { struct ip6_ext *ip6e; int hdrlen, optlen; u_int8_t *p, *optend, *optp; @@ -1559,10 +1618,10 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, ip6e = mtod(n, struct ip6_ext *); hdrlen = (ip6e->ip6e_len + 1) << 3; if (newoff - off < hdrlen) { - error = EINVAL; - m_free(n); - n = NULL; - goto fail; + error = EINVAL; + m_free(n); + n = NULL; + goto fail; } p = mtod(n, u_int8_t *); optend = p + hdrlen; @@ -1575,9 +1634,9 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, */ optp = p + 2; while (optp < optend) { - if (optp[0] == IP6OPT_PAD1) + if (optp[0] == IP6OPT_PAD1) { optlen = 1; - else { + } else { if (optp + 2 > optend) { error = EINVAL; m_free(n); @@ -1585,9 +1644,16 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, goto fail; } optlen = optp[1] + 2; + if (optp + optlen > optend) { + error = EINVAL; + m_free(n); + n = NULL; + goto fail; + } - if (optp[0] & IP6OPT_MUTABLE) + if (optp[0] & IP6OPT_MUTABLE) { bzero(optp + 2, optlen - 2); + } } optp += optlen; @@ -1597,16 +1663,16 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, m_free(n); n = NULL; break; - } + } - case IPPROTO_ROUTING: - /* - * For an input packet, we can just calculate `as is'. - * For an output packet, we assume ip6_output have already - * made packet how it will be received at the final - * destination. - */ - /* FALLTHROUGH */ + case IPPROTO_ROUTING: + /* + * For an input packet, we can just calculate `as is'. + * For an output packet, we assume ip6_output have already + * made packet how it will be received at the final + * destination. + */ + /* FALLTHROUGH */ default: ah_update_mbuf(m, off, newoff - off, algo, &algos); @@ -1628,13 +1694,15 @@ ah6_calccksum(struct mbuf *m, caddr_t ahdat, size_t len, bcopy(&sumbuf[0], ahdat, (*algo->sumsiz)(sav)); /* just in case */ - if (n) + if (n) { m_free(n); + } return 0; fail: /* just in case */ - if (n) + if (n) { m_free(n); + } return error; } #endif diff --git a/bsd/netinet6/ah_input.c b/bsd/netinet6/ah_input.c index fac8da28c..2a67501f6 100644 --- a/bsd/netinet6/ah_input.c +++ b/bsd/netinet6/ah_input.c @@ -113,7 +113,7 @@ #if IPSEC_DEBUG #include #else -#define KEYDEBUG(lev,arg) +#define KEYDEBUG(lev, arg) #endif #include @@ -145,7 +145,7 @@ ah4_input(struct mbuf *m, int off) m = m_pullup(m, off + sizeof(struct newah)); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup;" - "dropping the packet for simplicity\n")); + "dropping the packet for simplicity\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } @@ -167,8 +167,8 @@ ah4_input(struct mbuf *m, int off) spi = ah->ah_spi; if ((sav = key_allocsa(AF_INET, - (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, - IPPROTO_AH, spi)) == 0) { + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, + IPPROTO_AH, spi)) == 0) { ipseclog((LOG_WARNING, "IPv4 AH input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -179,7 +179,7 @@ ah4_input(struct mbuf *m, int off) printf("DP ah4_input called to allocate SA:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sav))); if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv4 AH input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -202,71 +202,71 @@ ah4_input(struct mbuf *m, int off) /* * sanity checks for header, 1. */ - { - int sizoff; + { + int sizoff; - sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; - - /* - * Here, we do not do "siz1 == siz". This is because the way - * RFC240[34] section 2 is written. They do not require truncation - * to 96 bits. - * For example, Microsoft IPsec stack attaches 160 bits of - * authentication data for both hmac-md5 and hmac-sha1. For hmac-sha1, - * 32 bits of padding is attached. - * - * There are two downsides to this specification. - * They have no real harm, however, they leave us fuzzy feeling. - * - if we attach more than 96 bits of authentication data onto AH, - * we will never notice about possible modification by rogue - * intermediate nodes. - * Since extra bits in AH checksum is never used, this constitutes - * no real issue, however, it is wacky. - * - even if the peer attaches big authentication data, we will never - * notice the difference, since longer authentication data will just - * work. - * - * We may need some clarification in the spec. - */ - if (siz1 < siz) { - ipseclog((LOG_NOTICE, "sum length too short in IPv4 AH input " - "(%lu, should be at least %lu): %s\n", - (u_int32_t)siz1, (u_int32_t)siz, - ipsec4_logpacketstr(ip, spi))); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto fail; - } - if ((ah->ah_len << 2) - sizoff != siz1) { - ipseclog((LOG_NOTICE, "sum length mismatch in IPv4 AH input " - "(%d should be %lu): %s\n", - (ah->ah_len << 2) - sizoff, (u_int32_t)siz1, - ipsec4_logpacketstr(ip, spi))); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto fail; - } + sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; - if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) { - m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1); - if (!m) { - ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); + /* + * Here, we do not do "siz1 == siz". This is because the way + * RFC240[34] section 2 is written. They do not require truncation + * to 96 bits. + * For example, Microsoft IPsec stack attaches 160 bits of + * authentication data for both hmac-md5 and hmac-sha1. For hmac-sha1, + * 32 bits of padding is attached. + * + * There are two downsides to this specification. + * They have no real harm, however, they leave us fuzzy feeling. + * - if we attach more than 96 bits of authentication data onto AH, + * we will never notice about possible modification by rogue + * intermediate nodes. + * Since extra bits in AH checksum is never used, this constitutes + * no real issue, however, it is wacky. + * - even if the peer attaches big authentication data, we will never + * notice the difference, since longer authentication data will just + * work. + * + * We may need some clarification in the spec. + */ + if (siz1 < siz) { + ipseclog((LOG_NOTICE, "sum length too short in IPv4 AH input " + "(%lu, should be at least %lu): %s\n", + (u_int32_t)siz1, (u_int32_t)siz, + ipsec4_logpacketstr(ip, spi))); + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto fail; + } + if ((ah->ah_len << 2) - sizoff != siz1) { + ipseclog((LOG_NOTICE, "sum length mismatch in IPv4 AH input " + "(%d should be %lu): %s\n", + (ah->ah_len << 2) - sizoff, (u_int32_t)siz1, + ipsec4_logpacketstr(ip, spi))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } - /* Expect 32-bit aligned data ptr on strict-align platforms */ - MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); - ip = mtod(m, struct ip *); - ah = (struct ah *)(void *)(((caddr_t)ip) + off); + if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) { + m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1); + if (!m) { + ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto fail; + } + /* Expect 32-bit aligned data ptr on strict-align platforms */ + MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); + + ip = mtod(m, struct ip *); + ah = (struct ah *)(void *)(((caddr_t)ip) + off); + } } - } /* * check for sequence number. */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { - if (ipsec_chkreplay(ntohl(((struct newah *)ah)->ah_seq), sav)) + if (ipsec_chkreplay(ntohl(((struct newah *)ah)->ah_seq), sav)) { ; /*okey*/ - else { + } else { IPSEC_STAT_INCREMENT(ipsecstat.in_ahreplay); ipseclog((LOG_WARNING, "replay packet in IPv4 AH input: %s %s\n", @@ -286,7 +286,7 @@ ah4_input(struct mbuf *m, int off) IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto fail; } - + /* * some of IP header fields are flipped to the host endian. * convert them back to network endian. VERY stupid. @@ -305,26 +305,26 @@ ah4_input(struct mbuf *m, int off) ip->ip_len = ntohs(ip->ip_len) - hlen; ip->ip_off = ntohs(ip->ip_off); - { - caddr_t sumpos = NULL; + { + caddr_t sumpos = NULL; - if (sav->flags & SADB_X_EXT_OLD) { - /* RFC 1826 */ - sumpos = (caddr_t)(ah + 1); - } else { - /* RFC 2402 */ - sumpos = (caddr_t)(((struct newah *)ah) + 1); - } + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + sumpos = (caddr_t)(ah + 1); + } else { + /* RFC 2402 */ + sumpos = (caddr_t)(((struct newah *)ah) + 1); + } - if (bcmp(sumpos, cksum, siz) != 0) { - ipseclog((LOG_WARNING, - "checksum mismatch in IPv4 AH input: %s %s\n", - ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); - FREE(cksum, M_TEMP); - IPSEC_STAT_INCREMENT(ipsecstat.in_ahauthfail); - goto fail; + if (bcmp(sumpos, cksum, siz) != 0) { + ipseclog((LOG_WARNING, + "checksum mismatch in IPv4 AH input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + FREE(cksum, M_TEMP); + IPSEC_STAT_INCREMENT(ipsecstat.in_ahauthfail); + goto fail; + } } - } FREE(cksum, M_TEMP); @@ -344,7 +344,7 @@ ah4_input(struct mbuf *m, int off) if (m->m_len < off + sizeof(struct ah) + sizoff + siz1 + hlen) { m = m_pullup(m, off + sizeof(struct ah) - + sizoff + siz1 + hlen); + + sizoff + siz1 + hlen); if (!m) { ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n")); @@ -355,7 +355,7 @@ ah4_input(struct mbuf *m, int off) nip = (struct ip *)((u_char *)(ah + 1) + sizoff + siz1); if (nip->ip_src.s_addr != ip->ip_src.s_addr - || nip->ip_dst.s_addr != ip->ip_dst.s_addr) { + || nip->ip_dst.s_addr != ip->ip_dst.s_addr) { m->m_flags &= ~M_AUTHIPHDR; m->m_flags &= ~M_AUTHIPDGM; } @@ -369,7 +369,7 @@ ah4_input(struct mbuf *m, int off) #endif /*0*/ if (m->m_flags & M_AUTHIPHDR - && m->m_flags & M_AUTHIPDGM) { + && m->m_flags & M_AUTHIPDGM) { #if 0 ipseclog((LOG_DEBUG, "IPv4 AH input: authentication succeess\n")); @@ -438,16 +438,16 @@ ah4_input(struct mbuf *m, int off) goto fail; } - if (otos != ip->ip_tos) { - sum = ~ntohs(ip->ip_sum) & 0xffff; - sum += (~otos & 0xffff) + ip->ip_tos; - sum = (sum >> 16) + (sum & 0xffff); - sum += (sum >> 16); /* add carry */ - ip->ip_sum = htons(~sum & 0xffff); - } + if (otos != ip->ip_tos) { + sum = ~ntohs(ip->ip_sum) & 0xffff; + sum += (~otos & 0xffff) + ip->ip_tos; + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); /* add carry */ + ip->ip_sum = htons(~sum & 0xffff); + } if (!key_checktunnelsanity(sav, AF_INET, - (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { ipseclog((LOG_NOTICE, "ipsec tunnel address mismatch " "in IPv4 AH input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); @@ -487,7 +487,7 @@ ah4_input(struct mbuf *m, int off) } bzero(&addr, sizeof(addr)); - ipaddr = (__typeof__(ipaddr))&addr; + ipaddr = (__typeof__(ipaddr)) & addr; ipaddr->sin_family = AF_INET; ipaddr->sin_len = sizeof(*ipaddr); ipaddr->sin_addr = ip->ip_dst; @@ -498,7 +498,7 @@ ah4_input(struct mbuf *m, int off) m->m_pkthdr.rcvif = ifa->ifa_ifp; IFA_REMREF(ifa); } - + // Input via IPSec interface if (sav->sah->ipsec_if != NULL) { if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) { @@ -508,9 +508,10 @@ ah4_input(struct mbuf *m, int off) goto fail; } } - - if (proto_input(PF_INET, m) != 0) + + if (proto_input(PF_INET, m) != 0) { goto fail; + } nxt = IPPROTO_DONE; } else { /* @@ -550,8 +551,8 @@ ah4_input(struct mbuf *m, int off) } DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL, - struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif, - struct ip *, ip, struct ip6_hdr *, NULL); + struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif, + struct ip *, ip, struct ip6_hdr *, NULL); if (nxt != IPPROTO_DONE) { // Input via IPSec interface @@ -567,15 +568,16 @@ ah4_input(struct mbuf *m, int off) goto fail; } } - + if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 && ipsec4_in_reject(m, NULL)) { IPSEC_STAT_INCREMENT(ipsecstat.in_polvio); goto fail; } ip_proto_dispatch_in(m, off, nxt, 0); - } else + } else { m_freem(m); + } m = NULL; } done: @@ -595,8 +597,9 @@ fail: (uint64_t)VM_KERNEL_ADDRPERM(sav))); key_freesav(sav, KEY_SADB_UNLOCKED); } - if (m) + if (m) { m_freem(m); + } return; } #endif /* INET */ @@ -639,8 +642,8 @@ ah6_input(struct mbuf **mp, int *offp, int proto) } if ((sav = key_allocsa(AF_INET6, - (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, - IPPROTO_AH, spi)) == 0) { + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, + IPPROTO_AH, spi)) == 0) { ipseclog((LOG_WARNING, "IPv6 AH input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -651,7 +654,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) printf("DP ah6_input called to allocate SA:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sav))); if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv6 AH input: non-mature/dying SA found for spi %u; ", (u_int32_t)ntohl(spi))); @@ -674,42 +677,42 @@ ah6_input(struct mbuf **mp, int *offp, int proto) /* * sanity checks for header, 1. */ - { - int sizoff; + { + int sizoff; - sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; + sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; - /* - * Here, we do not do "siz1 == siz". See ah4_input() for complete - * description. - */ - if (siz1 < siz) { - ipseclog((LOG_NOTICE, "sum length too short in IPv6 AH input " - "(%lu, should be at least %lu): %s\n", - (u_int32_t)siz1, (u_int32_t)siz, - ipsec6_logpacketstr(ip6, spi))); - IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); - goto fail; - } - if ((ah->ah_len << 2) - sizoff != siz1) { - ipseclog((LOG_NOTICE, "sum length mismatch in IPv6 AH input " - "(%d should be %lu): %s\n", - (ah->ah_len << 2) - sizoff, (u_int32_t)siz1, - ipsec6_logpacketstr(ip6, spi))); - IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); - goto fail; + /* + * Here, we do not do "siz1 == siz". See ah4_input() for complete + * description. + */ + if (siz1 < siz) { + ipseclog((LOG_NOTICE, "sum length too short in IPv6 AH input " + "(%lu, should be at least %lu): %s\n", + (u_int32_t)siz1, (u_int32_t)siz, + ipsec6_logpacketstr(ip6, spi))); + IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); + goto fail; + } + if ((ah->ah_len << 2) - sizoff != siz1) { + ipseclog((LOG_NOTICE, "sum length mismatch in IPv6 AH input " + "(%d should be %lu): %s\n", + (ah->ah_len << 2) - sizoff, (u_int32_t)siz1, + ipsec6_logpacketstr(ip6, spi))); + IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); + goto fail; + } + IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1, + {return IPPROTO_DONE;}); } - IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1, - {return IPPROTO_DONE;}); - } /* * check for sequence number. */ if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) { - if (ipsec_chkreplay(ntohl(((struct newah *)ah)->ah_seq), sav)) + if (ipsec_chkreplay(ntohl(((struct newah *)ah)->ah_seq), sav)) { ; /*okey*/ - else { + } else { IPSEC_STAT_INCREMENT(ipsec6stat.in_ahreplay); ipseclog((LOG_WARNING, "replay packet in IPv6 AH input: %s %s\n", @@ -730,7 +733,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); goto fail; } - + if (ah6_calccksum(m, (caddr_t)cksum, siz1, algo, sav)) { FREE(cksum, M_TEMP); IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); @@ -738,26 +741,26 @@ ah6_input(struct mbuf **mp, int *offp, int proto) } IPSEC_STAT_INCREMENT(ipsec6stat.in_ahhist[sav->alg_auth]); - { - caddr_t sumpos = NULL; + { + caddr_t sumpos = NULL; - if (sav->flags & SADB_X_EXT_OLD) { - /* RFC 1826 */ - sumpos = (caddr_t)(ah + 1); - } else { - /* RFC 2402 */ - sumpos = (caddr_t)(((struct newah *)ah) + 1); - } + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1826 */ + sumpos = (caddr_t)(ah + 1); + } else { + /* RFC 2402 */ + sumpos = (caddr_t)(((struct newah *)ah) + 1); + } - if (bcmp(sumpos, cksum, siz) != 0) { - ipseclog((LOG_WARNING, - "checksum mismatch in IPv6 AH input: %s %s\n", - ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); - FREE(cksum, M_TEMP); - IPSEC_STAT_INCREMENT(ipsec6stat.in_ahauthfail); - goto fail; + if (bcmp(sumpos, cksum, siz) != 0) { + ipseclog((LOG_WARNING, + "checksum mismatch in IPv6 AH input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + FREE(cksum, M_TEMP); + IPSEC_STAT_INCREMENT(ipsec6stat.in_ahauthfail); + goto fail; + } } - } FREE(cksum, M_TEMP); @@ -776,12 +779,12 @@ ah6_input(struct mbuf **mp, int *offp, int proto) sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4; IP6_EXTHDR_CHECK(m, off, sizeof(struct ah) + sizoff + siz1 - + sizeof(struct ip6_hdr), - {return IPPROTO_DONE;}); + + sizeof(struct ip6_hdr), + {return IPPROTO_DONE;}); nip6 = (struct ip6_hdr *)((u_char *)(ah + 1) + sizoff + siz1); if (!IN6_ARE_ADDR_EQUAL(&nip6->ip6_src, &ip6->ip6_src) - || !IN6_ARE_ADDR_EQUAL(&nip6->ip6_dst, &ip6->ip6_dst)) { + || !IN6_ARE_ADDR_EQUAL(&nip6->ip6_dst, &ip6->ip6_dst)) { m->m_flags &= ~M_AUTHIPHDR; m->m_flags &= ~M_AUTHIPDGM; } @@ -795,7 +798,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) #endif if (m->m_flags & M_AUTHIPHDR - && m->m_flags & M_AUTHIPDGM) { + && m->m_flags & M_AUTHIPDGM) { #if 0 ipseclog((LOG_DEBUG, "IPv6 AH input: authentication succeess\n")); @@ -838,7 +841,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) * XXX more sanity checks * XXX relationship with gif? */ - u_int32_t flowinfo; /*net endian*/ + u_int32_t flowinfo; /*net endian*/ if (ifamily == AF_INET) { ipseclog((LOG_NOTICE, "ipsec tunnel protocol mismatch " @@ -866,7 +869,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) goto fail; } if (!key_checktunnelsanity(sav, AF_INET6, - (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { ipseclog((LOG_NOTICE, "ipsec tunnel address mismatch " "in IPv6 AH input: %s %s\n", ipsec6_logpacketstr(ip6, spi), @@ -892,7 +895,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) } bzero(&addr, sizeof(addr)); - ip6addr = (__typeof__(ip6addr))&addr; + ip6addr = (__typeof__(ip6addr)) & addr; ip6addr->sin6_family = AF_INET6; ip6addr->sin6_len = sizeof(*ip6addr); ip6addr->sin6_addr = ip6->ip6_dst; @@ -914,9 +917,10 @@ ah6_input(struct mbuf **mp, int *offp, int proto) goto fail; } } - - if (proto_input(PF_INET6, m) != 0) + + if (proto_input(PF_INET6, m) != 0) { goto fail; + } nxt = IPPROTO_DONE; } else { /* @@ -950,7 +954,7 @@ ah6_input(struct mbuf **mp, int *offp, int proto) IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem); goto fail; } - + // Input via IPSec interface if (sav->sah->ipsec_if != NULL) { if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) { @@ -982,8 +986,9 @@ fail: (uint64_t)VM_KERNEL_ADDRPERM(sav))); key_freesav(sav, KEY_SADB_UNLOCKED); } - if (m) + if (m) { m_freem(m); + } return IPPROTO_DONE; } @@ -1000,10 +1005,12 @@ ah6_ctlinput(int cmd, struct sockaddr *sa, void *d) struct sockaddr_in6 *sa6_src, *sa6_dst; if (sa->sa_family != AF_INET6 || - sa->sa_len != sizeof(struct sockaddr_in6)) + sa->sa_len != sizeof(struct sockaddr_in6)) { return; - if ((unsigned)cmd >= PRC_NCMDS) + } + if ((unsigned)cmd >= PRC_NCMDS) { return; + } /* if the parameter is from icmp6, decode it. */ if (d != NULL) { @@ -1023,8 +1030,9 @@ ah6_ctlinput(int cmd, struct sockaddr *sa, void *d) */ /* check if we can safely examine src and dst ports */ - if (m->m_pkthdr.len < off + sizeof(ah)) + if (m->m_pkthdr.len < off + sizeof(ah)) { return; + } if (m->m_len < off + sizeof(ah)) { /* @@ -1033,8 +1041,9 @@ ah6_ctlinput(int cmd, struct sockaddr *sa, void *d) */ m_copydata(m, off, sizeof(ah), (caddr_t)&ah); ahp = &ah; - } else + } else { ahp = (struct newah *)(void *)(mtod(m, caddr_t) + off); + } if (cmd == PRC_MSGSIZE) { int valid = 0; @@ -1046,13 +1055,14 @@ ah6_ctlinput(int cmd, struct sockaddr *sa, void *d) sa6_src = ip6cp->ip6c_src; sa6_dst = (struct sockaddr_in6 *)(void *)sa; sav = key_allocsa(AF_INET6, - (caddr_t)&sa6_src->sin6_addr, - (caddr_t)&sa6_dst->sin6_addr, - IPPROTO_AH, ahp->ah_spi); + (caddr_t)&sa6_src->sin6_addr, + (caddr_t)&sa6_dst->sin6_addr, + IPPROTO_AH, ahp->ah_spi); if (sav) { if (sav->state == SADB_SASTATE_MATURE || - sav->state == SADB_SASTATE_DYING) + sav->state == SADB_SASTATE_DYING) { valid++; + } key_freesav(sav, KEY_SADB_UNLOCKED); } diff --git a/bsd/netinet6/ah_output.c b/bsd/netinet6/ah_output.c index 1e723fa3f..d41e2f679 100644 --- a/bsd/netinet6/ah_output.c +++ b/bsd/netinet6/ah_output.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -119,33 +119,36 @@ extern lck_mtx_t *sadb_mutex; size_t ah_hdrsiz(struct ipsecrequest *isr) { - /* sanity check */ - if (isr == NULL) + if (isr == NULL) { panic("ah_hdrsiz: NULL was passed.\n"); + } - if (isr->saidx.proto != IPPROTO_AH) + if (isr->saidx.proto != IPPROTO_AH) { panic("unsupported mode passed to ah_hdrsiz"); + } #if 0 { - lck_mtx_lock(sadb_mutex); const struct ah_algorithm *algo; size_t hdrsiz; /*%%%%% this needs to change - no sav in ipsecrequest any more */ - if (isr->sav == NULL) + if (isr->sav == NULL) { goto estimate; + } if (isr->sav->state != SADB_SASTATE_MATURE - && isr->sav->state != SADB_SASTATE_DYING) + && isr->sav->state != SADB_SASTATE_DYING) { goto estimate; - + } + /* we need transport mode AH. */ algo = ah_algorithm_lookup(isr->sav->alg_auth); - if (!algo) + if (!algo) { goto estimate; - + } + /* * XXX * right now we don't calcurate the padding size. simply @@ -154,11 +157,12 @@ ah_hdrsiz(struct ipsecrequest *isr) * XXX variable size padding support */ hdrsiz = (((*algo->sumsiz)(isr->sav) + 3) & ~(4 - 1)); - if (isr->sav->flags & SADB_X_EXT_OLD) + if (isr->sav->flags & SADB_X_EXT_OLD) { hdrsiz += sizeof(struct ah); - else + } else { hdrsiz += sizeof(struct newah); - + } + lck_mtx_unlock(sadb_mutex); return hdrsiz; } @@ -166,7 +170,7 @@ ah_hdrsiz(struct ipsecrequest *isr) estimate: #endif - //lck_mtx_unlock(sadb_mutex); + //lck_mtx_unlock(sadb_mutex); /* ASSUMING: * sizeof(struct newah) > sizeof(struct ah). * 16 = (16 + 3) & ~(4 - 1). @@ -189,9 +193,9 @@ ah4_output(struct mbuf *m, struct secasvar *sav) u_int32_t spi; u_char *ahdrpos; u_char *ahsumpos = NULL; - size_t hlen = 0; /*IP header+option in bytes*/ - size_t plen = 0; /*AH payload size in bytes*/ - size_t ahlen = 0; /*plen + sizeof(ah)*/ + size_t hlen = 0; /*IP header+option in bytes*/ + size_t plen = 0; /*AH payload size in bytes*/ + size_t ahlen = 0; /*plen + sizeof(ah)*/ struct ip *ip; struct in_addr dst = { 0 }; struct in_addr *finaldst; @@ -201,10 +205,10 @@ ah4_output(struct mbuf *m, struct secasvar *sav) if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { ip = mtod(m, struct ip *); ipseclog((LOG_DEBUG, "ah4_output: internal error: " - "sav->replay is null: %x->%x, SPI=%u\n", - (u_int32_t)ntohl(ip->ip_src.s_addr), - (u_int32_t)ntohl(ip->ip_dst.s_addr), - (u_int32_t)ntohl(sav->spi))); + "sav->replay is null: %x->%x, SPI=%u\n", + (u_int32_t)ntohl(ip->ip_src.s_addr), + (u_int32_t)ntohl(ip->ip_dst.s_addr), + (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsecstat.out_inval); m_freem(m); return EINVAL; @@ -243,8 +247,9 @@ ah4_output(struct mbuf *m, struct secasvar *sav) hlen = ip->ip_hl << 2; #endif - if (m->m_len != hlen) + if (m->m_len != hlen) { panic("ah4_output: assumption failed (first mbuf length)"); + } if (M_LEADINGSPACE(m->m_next) < ahlen) { struct mbuf *n; MGET(n, M_DONTWAIT, MT_DATA); @@ -266,7 +271,7 @@ ah4_output(struct mbuf *m, struct secasvar *sav) ahdrpos = mtod(m->m_next, u_char *); } - ip = mtod(m, struct ip *); /*just to be sure*/ + ip = mtod(m, struct ip *); /*just to be sure*/ /* * initialize AH. @@ -286,7 +291,7 @@ ah4_output(struct mbuf *m, struct secasvar *sav) ahdr = (struct newah *)(void *)ahdrpos; ahsumpos = (u_char *)(ahdr + 1); - ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ + ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ ahdr->ah_nxt = ip->ip_p; ahdr->ah_reserve = htons(0); ahdr->ah_spi = spi; @@ -316,9 +321,9 @@ ah4_output(struct mbuf *m, struct secasvar *sav) * modify IPv4 header. */ ip->ip_p = IPPROTO_AH; - if (ahlen < (IP_MAXPACKET - ntohs(ip->ip_len))) + if (ahlen < (IP_MAXPACKET - ntohs(ip->ip_len))) { ip->ip_len = htons(ntohs(ip->ip_len) + ahlen); - else { + } else { ipseclog((LOG_ERR, "IPv4 AH output: size exceeds limit\n")); IPSEC_STAT_INCREMENT(ipsecstat.out_inval); m_freem(m); @@ -353,7 +358,7 @@ ah4_output(struct mbuf *m, struct secasvar *sav) } if (finaldst) { - ip = mtod(m, struct ip *); /*just to make sure*/ + ip = mtod(m, struct ip *); /*just to make sure*/ ip->ip_dst.s_addr = dst.s_addr; } lck_mtx_lock(sadb_stat_mutex); @@ -372,21 +377,22 @@ ah_hdrlen(struct secasvar *sav) { const struct ah_algorithm *algo; int plen, ahlen; - + algo = ah_algorithm_lookup(sav->alg_auth); - if (!algo) + if (!algo) { return 0; + } if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1826 */ - plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ + plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ ahlen = plen + sizeof(struct ah); } else { /* RFC 2402 */ - plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ + plen = ((*algo->sumsiz)(sav) + 3) & ~(4 - 1); /*XXX pad to 8byte?*/ ahlen = plen + sizeof(struct newah); } - return(ahlen); + return ahlen; } #if INET6 @@ -395,14 +401,14 @@ ah_hdrlen(struct secasvar *sav) */ int ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, - struct secasvar *sav) + struct secasvar *sav) { struct mbuf *mprev; struct mbuf *mah; const struct ah_algorithm *algo; u_int32_t spi; u_char *ahsumpos = NULL; - size_t plen; /*AH payload size in bytes*/ + size_t plen; /*AH payload size in bytes*/ int error = 0; int ahlen; struct ip6_hdr *ip6; @@ -414,11 +420,13 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, } ahlen = ah_hdrlen(sav); - if (ahlen == 0) + if (ahlen == 0) { return 0; + } - for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) { ; + } if (!mprev || mprev->m_next != md) { ipseclog((LOG_DEBUG, "ah6_output: md is not in chain\n")); m_freem(m); @@ -455,8 +463,8 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, if ((sav->flags & SADB_X_EXT_OLD) == 0 && !sav->replay) { ipseclog((LOG_DEBUG, "ah6_output: internal error: " - "sav->replay is null: SPI=%u\n", - (u_int32_t)ntohl(sav->spi))); + "sav->replay is null: SPI=%u\n", + (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); m_freem(m); return EINVAL; @@ -493,14 +501,14 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, ahsumpos = (u_char *)(ahdr + 1); ahdr->ah_nxt = *nexthdrp; *nexthdrp = IPPROTO_AH; - ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ + ahdr->ah_len = (plen >> 2) + 1; /* plus one for seq# */ ahdr->ah_reserve = htons(0); ahdr->ah_spi = spi; if (sav->replay->count == ~0) { if ((sav->flags & SADB_X_EXT_CYCSEQ) == 0) { /* XXX Is it noisy ? */ ipseclog((LOG_WARNING, - "replay counter overflowed. %s\n", + "replay counter overflowed. %s\n", ipsec_logsastr(sav))); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); m_freem(m); @@ -532,7 +540,7 @@ ah6_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, } IPSEC_STAT_INCREMENT(ipsec6stat.out_ahhist[sav->alg_auth]); - return(error); + return error; } #endif @@ -554,8 +562,9 @@ ah4_finaldst(struct mbuf *m) int i; int hlen; - if (!m) + if (!m) { panic("ah4_finaldst: m == NULL"); + } ip = mtod(m, struct ip *); #ifdef _IP_VHL hlen = IP_VHL_HL(ip->ip_vhl) << 2; @@ -569,8 +578,9 @@ ah4_finaldst(struct mbuf *m) return NULL; } - if (hlen == sizeof(struct ip)) + if (hlen == sizeof(struct ip)) { return NULL; + } optlen = hlen - sizeof(struct ip); if (optlen < 0) { @@ -582,18 +592,20 @@ ah4_finaldst(struct mbuf *m) q = (u_char *)(ip + 1); i = 0; while (i < optlen) { - if (i + IPOPT_OPTVAL >= optlen) + if (i + IPOPT_OPTVAL >= optlen) { return NULL; + } if (q[i + IPOPT_OPTVAL] == IPOPT_EOL || q[i + IPOPT_OPTVAL] == IPOPT_NOP || - i + IPOPT_OLEN < optlen) + i + IPOPT_OLEN < optlen) { ; - else + } else { return NULL; + } switch (q[i + IPOPT_OPTVAL]) { case IPOPT_EOL: - i = optlen; /* bye */ + i = optlen; /* bye */ break; case IPOPT_NOP: i++; diff --git a/bsd/netinet6/dest6.c b/bsd/netinet6/dest6.c index 4feef21cb..ef80246c9 100644 --- a/bsd/netinet6/dest6.c +++ b/bsd/netinet6/dest6.c @@ -89,20 +89,21 @@ dest6_input(struct mbuf **mp, int *offp, int proto) optlen = *(opt + 1) + 2; break; - default: /* unknown option */ + default: /* unknown option */ optlen = ip6_unknown_opt(opt, m, opt - mtod(m, u_int8_t *)); - if (optlen == -1) - return (IPPROTO_DONE); + if (optlen == -1) { + return IPPROTO_DONE; + } optlen += 2; break; } } *offp = off; - return (dstopts->ip6d_nxt); + return dstopts->ip6d_nxt; - bad: +bad: m_freem(m); - return (IPPROTO_DONE); + return IPPROTO_DONE; } diff --git a/bsd/netinet6/esp.h b/bsd/netinet6/esp.h index 94f6e26b7..c49d5eff3 100644 --- a/bsd/netinet6/esp.h +++ b/bsd/netinet6/esp.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -68,7 +68,7 @@ struct esp { - u_int32_t esp_spi; /* ESP */ + u_int32_t esp_spi; /* ESP */ /*variable size, 32bit bound*/ /* Initialization Vector */ /*variable size*/ /* Payload data */ /*variable size*/ /* padding */ @@ -79,8 +79,8 @@ struct esp { }; struct newesp { - u_int32_t esp_spi; /* ESP */ - u_int32_t esp_seq; /* Sequence number */ + u_int32_t esp_spi; /* ESP */ + u_int32_t esp_seq; /* Sequence number */ /*variable size*/ /* (IV and) Payload data */ /*variable size*/ /* padding */ /*8bit*/ /* pad size */ @@ -90,8 +90,8 @@ struct newesp { }; struct esptail { - u_int8_t esp_padlen; /* pad length */ - u_int8_t esp_nxt; /* Next header */ + u_int8_t esp_padlen; /* pad length */ + u_int8_t esp_nxt; /* Next header */ /*variable size, 32bit bound*/ /* Authentication data (new IPsec)*/ }; @@ -99,24 +99,24 @@ struct esptail { struct secasvar; struct esp_algorithm { - size_t padbound; /* pad boundary, in byte */ - int ivlenval; /* iv length, in byte */ + size_t padbound; /* pad boundary, in byte */ + int ivlenval; /* iv length, in byte */ int (*mature)(struct secasvar *); - int keymin; /* in bits */ - int keymax; /* in bits */ + int keymin; /* in bits */ + int keymax; /* in bits */ int (*schedlen)(const struct esp_algorithm *); const char *name; int (*ivlen)(const struct esp_algorithm *, struct secasvar *); int (*decrypt)(struct mbuf *, size_t, - struct secasvar *, const struct esp_algorithm *, int); + struct secasvar *, const struct esp_algorithm *, int); int (*encrypt)(struct mbuf *, size_t, size_t, - struct secasvar *, const struct esp_algorithm *, int); + struct secasvar *, const struct esp_algorithm *, int); /* not supposed to be called directly */ int (*schedule)(const struct esp_algorithm *, struct secasvar *); int (*blockdecrypt)(const struct esp_algorithm *, - struct secasvar *, u_int8_t *, u_int8_t *); + struct secasvar *, u_int8_t *, u_int8_t *); int (*blockencrypt)(const struct esp_algorithm *, - struct secasvar *, u_int8_t *, u_int8_t *); + struct secasvar *, u_int8_t *, u_int8_t *); /* For Authenticated Encryption Methods */ size_t icvlen; int (*finalizedecrypt)(struct secasvar *, u_int8_t *, uint); @@ -134,7 +134,7 @@ extern size_t esp_hdrsiz(struct ipsecrequest *); extern int esp_schedule(const struct esp_algorithm *, struct secasvar *); extern int esp_auth(struct mbuf *, size_t, size_t, - struct secasvar *, u_char *); + struct secasvar *, u_char *); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_ESP_H_ */ diff --git a/bsd/netinet6/esp6.h b/bsd/netinet6/esp6.h index 384ec59dc..cb4b80b9f 100644 --- a/bsd/netinet6/esp6.h +++ b/bsd/netinet6/esp6.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -68,7 +68,7 @@ #ifdef BSD_KERNEL_PRIVATE extern int esp6_output(struct mbuf *, u_char *, struct mbuf *, - struct secasvar *); + struct secasvar *); extern int esp6_input(struct mbuf **, int *, int); extern int esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface); diff --git a/bsd/netinet6/esp_chachapoly.c b/bsd/netinet6/esp_chachapoly.c index 2c68e6f07..a176a64f4 100644 --- a/bsd/netinet6/esp_chachapoly.c +++ b/bsd/netinet6/esp_chachapoly.c @@ -48,18 +48,18 @@ #include #include -#define ESP_CHACHAPOLY_SALT_LEN 4 -#define ESP_CHACHAPOLY_KEY_LEN 32 -#define ESP_CHACHAPOLY_NONCE_LEN 12 +#define ESP_CHACHAPOLY_SALT_LEN 4 +#define ESP_CHACHAPOLY_KEY_LEN 32 +#define ESP_CHACHAPOLY_NONCE_LEN 12 // The minimum alignment is documented in KALLOC_LOG2_MINALIGN // which isn't accessible from here. Current minimum is 8. _Static_assert(_Alignof(chacha20poly1305_ctx) <= 8, - "Alignment guarantee is broken"); + "Alignment guarantee is broken"); -#if ((( 8 * (ESP_CHACHAPOLY_KEY_LEN + ESP_CHACHAPOLY_SALT_LEN)) != ESP_CHACHAPOLY_KEYBITS_WITH_SALT) || \ - (ESP_CHACHAPOLY_KEY_LEN != CCCHACHA20_KEY_NBYTES) || \ - (ESP_CHACHAPOLY_NONCE_LEN != CCCHACHA20POLY1305_NONCE_NBYTES)) +#if (((8 * (ESP_CHACHAPOLY_KEY_LEN + ESP_CHACHAPOLY_SALT_LEN)) != ESP_CHACHAPOLY_KEYBITS_WITH_SALT) || \ + (ESP_CHACHAPOLY_KEY_LEN != CCCHACHA20_KEY_NBYTES) || \ + (ESP_CHACHAPOLY_NONCE_LEN != CCCHACHA20POLY1305_NONCE_NBYTES)) #error "Invalid sizes" #endif @@ -72,11 +72,11 @@ typedef struct _esp_chachapoly_ctx { } esp_chachapoly_ctx_s, *esp_chachapoly_ctx_t; -#define ESP_ASSERT(_cond, _format, ...) \ - do { \ - if (!(_cond)) { \ - panic("%s:%d " _format, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ - } \ +#define ESP_ASSERT(_cond, _format, ...) \ + do { \ + if (!(_cond)) { \ + panic("%s:%d " _format, __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + } \ } while (0) #define ESP_CHECK_ARG(_arg) ESP_ASSERT(_arg != NULL, #_arg " is NULL") @@ -99,45 +99,45 @@ esp_chachapoly_mature(struct secasvar *sav) if ((sav->flags & SADB_X_EXT_OLD) != 0) { esp_log_err("ChaChaPoly is incompatible with SADB_X_EXT_OLD, SPI 0x%08x", - ntohl(sav->spi)); + ntohl(sav->spi)); return 1; } if ((sav->flags & SADB_X_EXT_DERIV) != 0) { esp_log_err("ChaChaPoly is incompatible with SADB_X_EXT_DERIV, SPI 0x%08x", - ntohl(sav->spi)); + ntohl(sav->spi)); return 1; } if (sav->alg_enc != SADB_X_EALG_CHACHA20POLY1305) { esp_log_err("ChaChaPoly unsupported algorithm %d, SPI 0x%08x", - sav->alg_enc, ntohl(sav->spi)); + sav->alg_enc, ntohl(sav->spi)); return 1; } if (sav->key_enc == NULL) { esp_log_err("ChaChaPoly key is missing, SPI 0x%08x", - ntohl(sav->spi)); + ntohl(sav->spi)); return 1; } algo = esp_algorithm_lookup(sav->alg_enc); if (algo == NULL) { esp_log_err("ChaChaPoly lookup failed for algorithm %d, SPI 0x%08x", - sav->alg_enc, ntohl(sav->spi)); + sav->alg_enc, ntohl(sav->spi)); return 1; } if (sav->key_enc->sadb_key_bits != ESP_CHACHAPOLY_KEYBITS_WITH_SALT) { esp_log_err("ChaChaPoly invalid key length %d bits, SPI 0x%08x", - sav->key_enc->sadb_key_bits, ntohl(sav->spi)); + sav->key_enc->sadb_key_bits, ntohl(sav->spi)); return 1; } esp_log_default("ChaChaPoly Mature SPI 0x%08x%s %s dir %u state %u mode %u", - ntohl(sav->spi), - (((sav->flags & SADB_X_EXT_IIV) != 0) ? " IIV" : ""), - ((sav->sah->ipsec_if != NULL) ? if_name(sav->sah->ipsec_if) : "NONE"), - sav->sah->dir, sav->sah->state, sav->sah->saidx.mode); + ntohl(sav->spi), + (((sav->flags & SADB_X_EXT_IIV) != 0) ? " IIV" : ""), + ((sav->sah->ipsec_if != NULL) ? if_name(sav->sah->ipsec_if) : "NONE"), + sav->sah->dir, sav->sah->state, sav->sah->saidx.mode); return 0; } @@ -150,7 +150,7 @@ esp_chachapoly_schedlen(__unused const struct esp_algorithm *algo) int esp_chachapoly_schedule(__unused const struct esp_algorithm *algo, - struct secasvar *sav) + struct secasvar *sav) { esp_chachapoly_ctx_t esp_ccp_ctx; int rc = 0; @@ -158,7 +158,7 @@ esp_chachapoly_schedule(__unused const struct esp_algorithm *algo, ESP_CHECK_ARG(sav); if (_KEYLEN(sav->key_enc) != ESP_CHACHAPOLY_KEY_LEN + ESP_CHACHAPOLY_SALT_LEN) { esp_log_err("ChaChaPoly Invalid key len %u, SPI 0x%08x", - _KEYLEN(sav->key_enc), ntohl(sav->spi)); + _KEYLEN(sav->key_enc), ntohl(sav->spi)); return EINVAL; } LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); @@ -168,40 +168,40 @@ esp_chachapoly_schedule(__unused const struct esp_algorithm *algo, if (sav->ivlen != (esp_ccp_ctx->ccp_implicit_iv ? 0 : ESP_CHACHAPOLY_IV_LEN)) { esp_log_err("ChaChaPoly Invalid ivlen %u, SPI 0x%08x", - sav->ivlen, ntohl(sav->spi)); + sav->ivlen, ntohl(sav->spi)); return EINVAL; } rc = chacha20poly1305_init(&esp_ccp_ctx->ccp_ctx, - (const uint8_t *)_KEYBUF(sav->key_enc)); + (const uint8_t *)_KEYBUF(sav->key_enc)); if (rc != 0) { esp_log_err("ChaChaPoly chacha20poly1305_init failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } memcpy(esp_ccp_ctx->ccp_salt, - (const uint8_t *)_KEYBUF(sav->key_enc) + ESP_CHACHAPOLY_KEY_LEN, - sizeof(esp_ccp_ctx->ccp_salt)); + (const uint8_t *)_KEYBUF(sav->key_enc) + ESP_CHACHAPOLY_KEY_LEN, + sizeof(esp_ccp_ctx->ccp_salt)); esp_log_default("ChaChaPoly Schedule SPI 0x%08x%s %s dir %u state %u mode %u", - ntohl(sav->spi), (esp_ccp_ctx->ccp_implicit_iv ? " IIV" : ""), - ((sav->sah->ipsec_if != NULL) ? if_name(sav->sah->ipsec_if) : "NONE"), - sav->sah->dir, sav->sah->state, sav->sah->saidx.mode); + ntohl(sav->spi), (esp_ccp_ctx->ccp_implicit_iv ? " IIV" : ""), + ((sav->sah->ipsec_if != NULL) ? if_name(sav->sah->ipsec_if) : "NONE"), + sav->sah->dir, sav->sah->state, sav->sah->saidx.mode); return 0; } int esp_chachapoly_ivlen(const struct esp_algorithm *algo, - struct secasvar *sav) + struct secasvar *sav) { ESP_CHECK_ARG(algo); if (sav != NULL && - ((sav->sched != NULL && ((esp_chachapoly_ctx_t)sav->sched)->ccp_implicit_iv) || - ((sav->flags & SADB_X_EXT_IIV) != 0))) { + ((sav->sched != NULL && ((esp_chachapoly_ctx_t)sav->sched)->ccp_implicit_iv) || + ((sav->flags & SADB_X_EXT_IIV) != 0))) { return 0; } else { return algo->ivlenval; @@ -210,8 +210,8 @@ esp_chachapoly_ivlen(const struct esp_algorithm *algo, int esp_chachapoly_encrypt_finalize(struct secasvar *sav, - unsigned char *tag, - unsigned int tag_bytes) + unsigned char *tag, + unsigned int tag_bytes) { esp_chachapoly_ctx_t esp_ccp_ctx; int rc = 0; @@ -220,7 +220,7 @@ esp_chachapoly_encrypt_finalize(struct secasvar *sav, ESP_CHECK_ARG(tag); if (tag_bytes != ESP_CHACHAPOLY_ICV_LEN) { esp_log_err("ChaChaPoly Invalid tag_bytes %u, SPI 0x%08x", - tag_bytes, ntohl(sav->spi)); + tag_bytes, ntohl(sav->spi)); return EINVAL; } @@ -228,7 +228,7 @@ esp_chachapoly_encrypt_finalize(struct secasvar *sav, rc = chacha20poly1305_finalize(&esp_ccp_ctx->ccp_ctx, tag); if (rc != 0) { esp_log_err("ChaChaPoly chacha20poly1305_finalize failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } return 0; @@ -236,8 +236,8 @@ esp_chachapoly_encrypt_finalize(struct secasvar *sav, int esp_chachapoly_decrypt_finalize(struct secasvar *sav, - unsigned char *tag, - unsigned int tag_bytes) + unsigned char *tag, + unsigned int tag_bytes) { esp_chachapoly_ctx_t esp_ccp_ctx; int rc = 0; @@ -246,7 +246,7 @@ esp_chachapoly_decrypt_finalize(struct secasvar *sav, ESP_CHECK_ARG(tag); if (tag_bytes != ESP_CHACHAPOLY_ICV_LEN) { esp_log_err("ChaChaPoly Invalid tag_bytes %u, SPI 0x%08x", - tag_bytes, ntohl(sav->spi)); + tag_bytes, ntohl(sav->spi)); return EINVAL; } @@ -254,7 +254,7 @@ esp_chachapoly_decrypt_finalize(struct secasvar *sav, rc = chacha20poly1305_verify(&esp_ccp_ctx->ccp_ctx, tag); if (rc != 0) { esp_packet_log_err("ChaChaPoly chacha20poly1305_verify failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } return 0; @@ -262,11 +262,11 @@ esp_chachapoly_decrypt_finalize(struct secasvar *sav, int esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain - size_t off, // offset to ESP header - __unused size_t plen, - struct secasvar *sav, - __unused const struct esp_algorithm *algo, - int ivlen) + size_t off, // offset to ESP header + __unused size_t plen, + struct secasvar *sav, + __unused const struct esp_algorithm *algo, + int ivlen) { struct mbuf *s = m; // this mbuf int32_t soff = 0; // offset from the head of mbuf chain (m) to head of this mbuf (s) @@ -290,20 +290,20 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain if (ivlen != (esp_ccp_ctx->ccp_implicit_iv ? 0 : ESP_CHACHAPOLY_IV_LEN)) { m_freem(m); esp_log_err("ChaChaPoly Invalid ivlen %u, SPI 0x%08x", - ivlen, ntohl(sav->spi)); + ivlen, ntohl(sav->spi)); return EINVAL; } if (sav->ivlen != ivlen) { m_freem(m); esp_log_err("ChaChaPoly Invalid sav->ivlen %u, SPI 0x%08x", - sav->ivlen, ntohl(sav->spi)); + sav->ivlen, ntohl(sav->spi)); return EINVAL; } // check if total packet length is enough to contain ESP + IV if (m->m_pkthdr.len < bodyoff) { esp_log_err("ChaChaPoly Packet too short %d < %zu, SPI 0x%08x", - m->m_pkthdr.len, bodyoff, ntohl(sav->spi)); + m->m_pkthdr.len, bodyoff, ntohl(sav->spi)); m_freem(m); return EINVAL; } @@ -312,7 +312,7 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_reset failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -328,18 +328,18 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain memcpy(nonce, esp_ccp_ctx->ccp_salt, ESP_CHACHAPOLY_SALT_LEN); memset(((uint8_t *)nonce) + ESP_CHACHAPOLY_SALT_LEN, 0, 4); memcpy(((uint8_t *)nonce) + ESP_CHACHAPOLY_SALT_LEN + 4, - &esp_hdr.esp_seq, sizeof(esp_hdr.esp_seq)); + &esp_hdr.esp_seq, sizeof(esp_hdr.esp_seq)); _Static_assert(4 + sizeof(esp_hdr.esp_seq) == ESP_CHACHAPOLY_IV_LEN, - "Bad IV length"); + "Bad IV length"); _Static_assert(ESP_CHACHAPOLY_SALT_LEN + ESP_CHACHAPOLY_IV_LEN == sizeof(nonce), - "Bad nonce length"); + "Bad nonce length"); rc = chacha20poly1305_setnonce(&esp_ccp_ctx->ccp_ctx, (uint8_t *)nonce); if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_setnonce failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -351,12 +351,12 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain // Set Additional Authentication Data (AAD) rc = chacha20poly1305_aad(&esp_ccp_ctx->ccp_ctx, - sizeof(esp_hdr), - (void *)&esp_hdr); + sizeof(esp_hdr), + (void *)&esp_hdr); if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_aad failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -380,11 +380,11 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain sp = mtod(s, uint8_t *) + sn; rc = chacha20poly1305_encrypt(&esp_ccp_ctx->ccp_ctx, - len, sp, sp); + len, sp, sp); if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_encrypt failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -395,7 +395,7 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain if (s == NULL && soff != m->m_pkthdr.len) { m_freem(m); esp_log_err("ChaChaPoly not enough mbufs %d %d, SPI 0x%08x", - soff, m->m_pkthdr.len, ntohl(sav->spi)); + soff, m->m_pkthdr.len, ntohl(sav->spi)); return EFBIG; } return 0; @@ -403,10 +403,10 @@ esp_chachapoly_encrypt(struct mbuf *m, // head of mbuf chain int esp_chachapoly_decrypt(struct mbuf *m, // head of mbuf chain - size_t off, // offset to ESP header - struct secasvar *sav, - __unused const struct esp_algorithm *algo, - int ivlen) + size_t off, // offset to ESP header + struct secasvar *sav, + __unused const struct esp_algorithm *algo, + int ivlen) { struct mbuf *s = m; // this mbuf int32_t soff = 0; // offset from the head of mbuf chain (m) to head of this mbuf (s) @@ -430,20 +430,20 @@ esp_chachapoly_decrypt(struct mbuf *m, // head of mbuf chain if (ivlen != (esp_ccp_ctx->ccp_implicit_iv ? 0 : ESP_CHACHAPOLY_IV_LEN)) { m_freem(m); esp_log_err("ChaChaPoly Invalid ivlen %u, SPI 0x%08x", - ivlen, ntohl(sav->spi)); + ivlen, ntohl(sav->spi)); return EINVAL; } if (sav->ivlen != ivlen) { m_freem(m); esp_log_err("ChaChaPoly Invalid sav->ivlen %u, SPI 0x%08x", - sav->ivlen, ntohl(sav->spi)); + sav->ivlen, ntohl(sav->spi)); return EINVAL; } // check if total packet length is enough to contain ESP + IV if (m->m_pkthdr.len < bodyoff) { esp_packet_log_err("ChaChaPoly Packet too short %d < %zu, SPI 0x%08x", - m->m_pkthdr.len, bodyoff, ntohl(sav->spi)); + m->m_pkthdr.len, bodyoff, ntohl(sav->spi)); m_freem(m); return EINVAL; } @@ -452,7 +452,7 @@ esp_chachapoly_decrypt(struct mbuf *m, // head of mbuf chain if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_reset failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -465,32 +465,32 @@ esp_chachapoly_decrypt(struct mbuf *m, // head of mbuf chain // IV is implicit (4 zero bytes followed by the ESP sequence number) memset(((uint8_t *)nonce) + ESP_CHACHAPOLY_SALT_LEN, 0, 4); memcpy(((uint8_t *)nonce) + ESP_CHACHAPOLY_SALT_LEN + 4, - &esp_hdr.esp_seq, sizeof(esp_hdr.esp_seq)); + &esp_hdr.esp_seq, sizeof(esp_hdr.esp_seq)); _Static_assert(4 + sizeof(esp_hdr.esp_seq) == ESP_CHACHAPOLY_IV_LEN, "Bad IV length"); } else { // copy IV from packet m_copydata(m, ivoff, ESP_CHACHAPOLY_IV_LEN, ((uint8_t *)nonce) + ESP_CHACHAPOLY_SALT_LEN); } _Static_assert(ESP_CHACHAPOLY_SALT_LEN + ESP_CHACHAPOLY_IV_LEN == sizeof(nonce), - "Bad nonce length"); + "Bad nonce length"); rc = chacha20poly1305_setnonce(&esp_ccp_ctx->ccp_ctx, (uint8_t *)nonce); if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_setnonce failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } cc_clear(sizeof(nonce), nonce); // Set Additional Authentication Data (AAD) rc = chacha20poly1305_aad(&esp_ccp_ctx->ccp_ctx, - sizeof(esp_hdr), - (void *)&esp_hdr); + sizeof(esp_hdr), + (void *)&esp_hdr); if (rc != 0) { m_freem(m); esp_log_err("ChaChaPoly chacha20poly1305_aad failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -514,11 +514,11 @@ esp_chachapoly_decrypt(struct mbuf *m, // head of mbuf chain sp = mtod(s, uint8_t *) + sn; rc = chacha20poly1305_decrypt(&esp_ccp_ctx->ccp_ctx, - len, sp, sp); + len, sp, sp); if (rc != 0) { m_freem(m); esp_packet_log_err("chacha20poly1305_decrypt failed %d, SPI 0x%08x", - rc, ntohl(sav->spi)); + rc, ntohl(sav->spi)); return rc; } @@ -529,7 +529,7 @@ esp_chachapoly_decrypt(struct mbuf *m, // head of mbuf chain if (s == NULL && soff != m->m_pkthdr.len) { m_freem(m); esp_packet_log_err("not enough mbufs %d %d, SPI 0x%08x", - soff, m->m_pkthdr.len, ntohl(sav->spi)); + soff, m->m_pkthdr.len, ntohl(sav->spi)); return EFBIG; } return 0; diff --git a/bsd/netinet6/esp_chachapoly.h b/bsd/netinet6/esp_chachapoly.h index 8e3c58e4d..79f2c5af3 100644 --- a/bsd/netinet6/esp_chachapoly.h +++ b/bsd/netinet6/esp_chachapoly.h @@ -33,18 +33,18 @@ #ifndef _ESP_CHACHA_POLY_H_ #define _ESP_CHACHA_POLY_H_ -#define ESP_CHACHAPOLY_PAD_BOUND 1 -#define ESP_CHACHAPOLY_IV_LEN 8 -#define ESP_CHACHAPOLY_ICV_LEN 16 -#define ESP_CHACHAPOLY_KEYBITS_WITH_SALT 288 /* 32 bytes key + 4 bytes salt */ +#define ESP_CHACHAPOLY_PAD_BOUND 1 +#define ESP_CHACHAPOLY_IV_LEN 8 +#define ESP_CHACHAPOLY_ICV_LEN 16 +#define ESP_CHACHAPOLY_KEYBITS_WITH_SALT 288 /* 32 bytes key + 4 bytes salt */ int esp_chachapoly_schedlen(const struct esp_algorithm *); int esp_chachapoly_schedule(const struct esp_algorithm *, - struct secasvar *); + struct secasvar *); int esp_chachapoly_encrypt(struct mbuf *, size_t, size_t, struct secasvar *, - const struct esp_algorithm *, int); + const struct esp_algorithm *, int); int esp_chachapoly_decrypt(struct mbuf *, size_t, struct secasvar *, - const struct esp_algorithm *, int); + const struct esp_algorithm *, int); int esp_chachapoly_encrypt_finalize(struct secasvar *, unsigned char *, unsigned int); int esp_chachapoly_decrypt_finalize(struct secasvar *, unsigned char *, unsigned int); int esp_chachapoly_mature(struct secasvar *); diff --git a/bsd/netinet6/esp_core.c b/bsd/netinet6/esp_core.c index 03fdcd7f1..c5448b8a5 100644 --- a/bsd/netinet6/esp_core.c +++ b/bsd/netinet6/esp_core.c @@ -107,91 +107,91 @@ #include #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) -#define DBG_FNC_ESPAUTH NETDBG_CODE(DBG_NETIPSEC, (8 << 8)) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_ESPAUTH NETDBG_CODE(DBG_NETIPSEC, (8 << 8)) #define MAX_SBUF_LEN 2000 extern lck_mtx_t *sadb_mutex; static int esp_null_mature(struct secasvar *); static int esp_null_decrypt(struct mbuf *, size_t, - struct secasvar *, const struct esp_algorithm *, int); + struct secasvar *, const struct esp_algorithm *, int); static int esp_null_encrypt(struct mbuf *, size_t, size_t, - struct secasvar *, const struct esp_algorithm *, int); + struct secasvar *, const struct esp_algorithm *, int); static int esp_descbc_mature(struct secasvar *); static int esp_descbc_ivlen(const struct esp_algorithm *, - struct secasvar *); + struct secasvar *); static int esp_des_schedule(const struct esp_algorithm *, - struct secasvar *); + struct secasvar *); static int esp_des_schedlen(const struct esp_algorithm *); static int esp_des_blockdecrypt(const struct esp_algorithm *, - struct secasvar *, u_int8_t *, u_int8_t *); + struct secasvar *, u_int8_t *, u_int8_t *); static int esp_des_blockencrypt(const struct esp_algorithm *, - struct secasvar *, u_int8_t *, u_int8_t *); + struct secasvar *, u_int8_t *, u_int8_t *); static int esp_cbc_mature(struct secasvar *); static int esp_3des_schedule(const struct esp_algorithm *, - struct secasvar *); + struct secasvar *); static int esp_3des_schedlen(const struct esp_algorithm *); static int esp_3des_blockdecrypt(const struct esp_algorithm *, - struct secasvar *, u_int8_t *, u_int8_t *); + struct secasvar *, u_int8_t *, u_int8_t *); static int esp_3des_blockencrypt(const struct esp_algorithm *, - struct secasvar *, u_int8_t *, u_int8_t *); + struct secasvar *, u_int8_t *, u_int8_t *); static int esp_common_ivlen(const struct esp_algorithm *, - struct secasvar *); + struct secasvar *); static int esp_cbc_decrypt(struct mbuf *, size_t, - struct secasvar *, const struct esp_algorithm *, int); + struct secasvar *, const struct esp_algorithm *, int); static int esp_cbc_encrypt(struct mbuf *, size_t, size_t, - struct secasvar *, const struct esp_algorithm *, int); + struct secasvar *, const struct esp_algorithm *, int); static int esp_gcm_mature(struct secasvar *); -#define MAXIVLEN 16 +#define MAXIVLEN 16 #define ESP_AESGCM_KEYLEN128 160 // 16-bytes key + 4 bytes salt #define ESP_AESGCM_KEYLEN192 224 // 24-bytes key + 4 bytes salt #define ESP_AESGCM_KEYLEN256 288 // 32-bytes key + 4 bytes salt static const struct esp_algorithm des_cbc = - { 8, -1, esp_descbc_mature, 64, 64, esp_des_schedlen, - "des-cbc", - esp_descbc_ivlen, esp_cbc_decrypt, - esp_cbc_encrypt, esp_des_schedule, - esp_des_blockdecrypt, esp_des_blockencrypt, - 0, 0, 0 }; +{ 8, -1, esp_descbc_mature, 64, 64, esp_des_schedlen, + "des-cbc", + esp_descbc_ivlen, esp_cbc_decrypt, + esp_cbc_encrypt, esp_des_schedule, + esp_des_blockdecrypt, esp_des_blockencrypt, + 0, 0, 0 }; static const struct esp_algorithm des3_cbc = - { 8, 8, esp_cbc_mature, 192, 192, esp_3des_schedlen, - "3des-cbc", - esp_common_ivlen, esp_cbc_decrypt, - esp_cbc_encrypt, esp_3des_schedule, - esp_3des_blockdecrypt, esp_3des_blockencrypt, - 0, 0, 0 }; +{ 8, 8, esp_cbc_mature, 192, 192, esp_3des_schedlen, + "3des-cbc", + esp_common_ivlen, esp_cbc_decrypt, + esp_cbc_encrypt, esp_3des_schedule, + esp_3des_blockdecrypt, esp_3des_blockencrypt, + 0, 0, 0 }; static const struct esp_algorithm null_esp = - { 1, 0, esp_null_mature, 0, 2048, 0, "null", - esp_common_ivlen, esp_null_decrypt, - esp_null_encrypt, NULL, NULL, NULL, - 0, 0, 0 }; +{ 1, 0, esp_null_mature, 0, 2048, 0, "null", + esp_common_ivlen, esp_null_decrypt, + esp_null_encrypt, NULL, NULL, NULL, + 0, 0, 0 }; static const struct esp_algorithm aes_cbc = - { 16, 16, esp_cbc_mature, 128, 256, esp_aes_schedlen, - "aes-cbc", - esp_common_ivlen, esp_cbc_decrypt_aes, - esp_cbc_encrypt_aes, esp_aes_schedule, - 0, 0, - 0, 0, 0 }; +{ 16, 16, esp_cbc_mature, 128, 256, esp_aes_schedlen, + "aes-cbc", + esp_common_ivlen, esp_cbc_decrypt_aes, + esp_cbc_encrypt_aes, esp_aes_schedule, + 0, 0, + 0, 0, 0 }; static const struct esp_algorithm aes_gcm = - { 4, 8, esp_gcm_mature, ESP_AESGCM_KEYLEN128, ESP_AESGCM_KEYLEN256, esp_gcm_schedlen, - "aes-gcm", - esp_common_ivlen, esp_gcm_decrypt_aes, - esp_gcm_encrypt_aes, esp_gcm_schedule, - 0, 0, - 16, esp_gcm_decrypt_finalize, esp_gcm_encrypt_finalize}; +{ 4, 8, esp_gcm_mature, ESP_AESGCM_KEYLEN128, ESP_AESGCM_KEYLEN256, esp_gcm_schedlen, + "aes-gcm", + esp_common_ivlen, esp_gcm_decrypt_aes, + esp_gcm_encrypt_aes, esp_gcm_schedule, + 0, 0, + 16, esp_gcm_decrypt_finalize, esp_gcm_encrypt_finalize}; static const struct esp_algorithm chacha_poly = - { ESP_CHACHAPOLY_PAD_BOUND, ESP_CHACHAPOLY_IV_LEN, - esp_chachapoly_mature, ESP_CHACHAPOLY_KEYBITS_WITH_SALT, - ESP_CHACHAPOLY_KEYBITS_WITH_SALT, esp_chachapoly_schedlen, - "chacha-poly", esp_chachapoly_ivlen, esp_chachapoly_decrypt, - esp_chachapoly_encrypt, esp_chachapoly_schedule, - NULL, NULL, ESP_CHACHAPOLY_ICV_LEN, - esp_chachapoly_decrypt_finalize, esp_chachapoly_encrypt_finalize}; +{ ESP_CHACHAPOLY_PAD_BOUND, ESP_CHACHAPOLY_IV_LEN, + esp_chachapoly_mature, ESP_CHACHAPOLY_KEYBITS_WITH_SALT, + ESP_CHACHAPOLY_KEYBITS_WITH_SALT, esp_chachapoly_schedlen, + "chacha-poly", esp_chachapoly_ivlen, esp_chachapoly_decrypt, + esp_chachapoly_encrypt, esp_chachapoly_schedule, + NULL, NULL, ESP_CHACHAPOLY_ICV_LEN, + esp_chachapoly_decrypt_finalize, esp_chachapoly_encrypt_finalize}; static const struct esp_algorithm *esp_algorithms[] = { &des_cbc, @@ -230,10 +230,11 @@ esp_max_ivlen(void) int ivlen; ivlen = 0; - for (idx = 0; idx < sizeof(esp_algorithms)/sizeof(esp_algorithms[0]); - idx++) { - if (esp_algorithms[idx]->ivlenval > ivlen) + for (idx = 0; idx < sizeof(esp_algorithms) / sizeof(esp_algorithms[0]); + idx++) { + if (esp_algorithms[idx]->ivlenval > ivlen) { ivlen = esp_algorithms[idx]->ivlenval; + } } return ivlen; @@ -263,11 +264,11 @@ esp_schedule(const struct esp_algorithm *algo, struct secasvar *sav) /* prevent disallowed implicit IV */ if (((sav->flags & SADB_X_EXT_IIV) != 0) && - (sav->alg_enc != SADB_X_EALG_AES_GCM) && - (sav->alg_enc != SADB_X_EALG_CHACHA20POLY1305)) { + (sav->alg_enc != SADB_X_EALG_AES_GCM) && + (sav->alg_enc != SADB_X_EALG_CHACHA20POLY1305)) { ipseclog((LOG_ERR, "esp_schedule %s: implicit IV not allowed\n", - algo->name)); + algo->name)); lck_mtx_unlock(sadb_mutex); return EINVAL; } @@ -277,7 +278,7 @@ esp_schedule(const struct esp_algorithm *algo, struct secasvar *sav) lck_mtx_unlock(sadb_mutex); return 0; } - + sav->schedlen = (*algo->schedlen)(algo); if ((signed) sav->schedlen < 0) { lck_mtx_unlock(sadb_mutex); @@ -309,7 +310,6 @@ static int esp_null_mature( __unused struct secasvar *sav) { - /* anything is okay */ return 0; } @@ -317,25 +317,23 @@ esp_null_mature( static int esp_null_decrypt( __unused struct mbuf *m, - __unused size_t off, /* offset to ESP header */ + __unused size_t off, /* offset to ESP header */ __unused struct secasvar *sav, __unused const struct esp_algorithm *algo, __unused int ivlen) { - return 0; /* do nothing */ } static int esp_null_encrypt( __unused struct mbuf *m, - __unused size_t off, /* offset to ESP header */ - __unused size_t plen, /* payload length (to be encrypted) */ + __unused size_t off, /* offset to ESP header */ + __unused size_t plen, /* payload length (to be encrypted) */ __unused struct secasvar *sav, __unused const struct esp_algorithm *algo, __unused int ivlen) { - return 0; /* do nothing */ } @@ -385,13 +383,15 @@ esp_descbc_ivlen( __unused const struct esp_algorithm *algo, struct secasvar *sav) { - - if (!sav) + if (!sav) { return 8; - if ((sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_IV4B)) + } + if ((sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_IV4B)) { return 4; - if (!(sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_DERIV)) + } + if (!(sav->flags & SADB_X_EXT_OLD) && (sav->flags & SADB_X_EXT_DERIV)) { return 4; + } return 8; } @@ -407,13 +407,13 @@ esp_des_schedule( __unused const struct esp_algorithm *algo, struct secasvar *sav) { - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); if (des_ecb_key_sched((des_cblock *)_KEYBUF(sav->key_enc), - (des_ecb_key_schedule *)sav->sched)) + (des_ecb_key_schedule *)sav->sched)) { return EINVAL; - else + } else { return 0; + } } static int @@ -559,7 +559,7 @@ esp_gcm_mature(struct secasvar *sav) break; default: ipseclog((LOG_ERR, - "esp_gcm_mature %s: invalid algo %d.\n", sav->alg_enc)); + "esp_gcm_mature %s: invalid algo %d.\n", sav->alg_enc)); return 1; } @@ -570,7 +570,6 @@ static int esp_3des_schedlen( __unused const struct esp_algorithm *algo) { - return sizeof(des3_ecb_key_schedule); } @@ -582,10 +581,11 @@ esp_3des_schedule( LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); if (des3_ecb_key_sched((des_cblock *)_KEYBUF(sav->key_enc), - (des3_ecb_key_schedule *)sav->sched)) + (des3_ecb_key_schedule *)sav->sched)) { return EINVAL; - else + } else { return 0; + } } static int @@ -598,7 +598,7 @@ esp_3des_blockdecrypt( /* assumption: d has a good alignment */ bcopy(s, d, sizeof(DES_LONG) * 2); des3_ecb_encrypt((des_cblock *)d, (des_cblock *)d, - (des3_ecb_key_schedule *)sav->sched, DES_DECRYPT); + (des3_ecb_key_schedule *)sav->sched, DES_DECRYPT); return 0; } @@ -612,7 +612,7 @@ esp_3des_blockencrypt( /* assumption: d has a good alignment */ bcopy(s, d, sizeof(DES_LONG) * 2); des3_ecb_encrypt((des_cblock *)d, (des_cblock *)d, - (des3_ecb_key_schedule *)sav->sched, DES_ENCRYPT); + (des3_ecb_key_schedule *)sav->sched, DES_ENCRYPT); return 0; } @@ -621,20 +621,20 @@ esp_common_ivlen( const struct esp_algorithm *algo, __unused struct secasvar *sav) { - - if (!algo) + if (!algo) { panic("esp_common_ivlen: unknown algorithm"); + } return algo->ivlenval; } static int esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, - const struct esp_algorithm *algo, int ivlen) + const struct esp_algorithm *algo, int ivlen) { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff, doff; /* offset from the head of chain, to head of this mbuf */ - int sn, dn; /* offset from the head of the mbuf, to meat */ + int soff, doff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[MAXIVLEN] __attribute__((aligned(4))), *ivp; u_int8_t *sbuf = NULL, *sp, *sp_unaligned; @@ -691,9 +691,9 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, m_copydata(m, ivoff, ivlen, (caddr_t) iv); /* extend iv */ - if (ivlen == blocklen) + if (ivlen == blocklen) { ; - else if (ivlen == 4 && blocklen == 8) { + } else if (ivlen == 4 && blocklen == 8) { bcopy(&iv[0], &iv[4], 4); iv[4] ^= 0xff; iv[5] ^= 0xff; @@ -740,13 +740,15 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, scutoff = sn; /* skip over empty mbuf */ - while (s && s->m_len == 0) + while (s && s->m_len == 0) { s = s->m_next; + } // Allocate blocksized buffer for unaligned or non-contiguous access sbuf = (u_int8_t *)_MALLOC(blocklen, M_SECA, M_DONTWAIT); - if (sbuf == NULL) + if (sbuf == NULL) { return ENOBUFS; + } while (soff < m->m_pkthdr.len) { /* source */ if (sn + blocklen <= s->m_len) { @@ -760,8 +762,9 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, /* destination */ if (!d || dn + blocklen > d->m_len) { - if (d) + if (d) { dp = d; + } MGET(d, M_DONTWAIT, MT_DATA); i = m->m_pkthdr.len - (soff + sn); if (d && i > MLEN) { @@ -773,15 +776,18 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, } if (!d) { m_freem(m); - if (d0) + if (d0) { m_freem(d0); + } result = ENOBUFS; goto end; } - if (!d0) + if (!d0) { d0 = d; - if (dp) + } + if (dp) { dp->m_next = d; + } // try to make mbuf data aligned if (!IPSEC_IS_P2ALIGNED(d->m_data)) { @@ -790,8 +796,9 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, d->m_len = 0; d->m_len = (M_TRAILINGSPACE(d) / blocklen) * blocklen; - if (d->m_len > i) + if (d->m_len > i) { d->m_len = i; + } dn = 0; } @@ -815,15 +822,17 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, /* xor */ p = ivp ? ivp : iv; q = mtod(d, u_int8_t *) + dn; - for (i = 0; i < blocklen; i++) + for (i = 0; i < blocklen; i++) { q[i] ^= p[i]; + } /* next iv */ if (sp == sbuf) { bcopy(sbuf, iv, blocklen); ivp = NULL; - } else + } else { ivp = sp; + } sn += blocklen; dn += blocklen; @@ -844,8 +853,9 @@ esp_cbc_decrypt(struct mbuf *m, size_t off, struct secasvar *sav, bzero(iv, sizeof(iv)); bzero(sbuf, blocklen); end: - if (sbuf != NULL) + if (sbuf != NULL) { FREE(sbuf, M_SECA); + } return result; } @@ -860,8 +870,8 @@ esp_cbc_encrypt( { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff, doff; /* offset from the head of chain, to head of this mbuf */ - int sn, dn; /* offset from the head of the mbuf, to meat */ + int soff, doff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[MAXIVLEN] __attribute__((aligned(4))), *ivp; u_int8_t *sbuf = NULL, *sp, *sp_unaligned; @@ -915,18 +925,18 @@ esp_cbc_encrypt( } /* put iv into the packet. if we are in derived mode, use seqno. */ - if (derived) + if (derived) { m_copydata(m, ivoff, ivlen, (caddr_t) iv); - else { + } else { bcopy(sav->iv, iv, ivlen); /* maybe it is better to overwrite dest, not source */ m_copyback(m, ivoff, ivlen, (caddr_t) iv); } /* extend iv */ - if (ivlen == blocklen) + if (ivlen == blocklen) { ; - else if (ivlen == 4 && blocklen == 8) { + } else if (ivlen == 4 && blocklen == 8) { bcopy(&iv[0], &iv[4], 4); iv[4] ^= 0xff; iv[5] ^= 0xff; @@ -973,13 +983,15 @@ esp_cbc_encrypt( scutoff = sn; /* skip over empty mbuf */ - while (s && s->m_len == 0) + while (s && s->m_len == 0) { s = s->m_next; + } // Allocate blocksized buffer for unaligned or non-contiguous access - sbuf = (u_int8_t *)_MALLOC(blocklen, M_SECA, M_DONTWAIT); - if (sbuf == NULL) - return ENOBUFS; + sbuf = (u_int8_t *)_MALLOC(blocklen, M_SECA, M_DONTWAIT); + if (sbuf == NULL) { + return ENOBUFS; + } while (soff < m->m_pkthdr.len) { /* source */ if (sn + blocklen <= s->m_len) { @@ -993,8 +1005,9 @@ esp_cbc_encrypt( /* destination */ if (!d || dn + blocklen > d->m_len) { - if (d) + if (d) { dp = d; + } MGET(d, M_DONTWAIT, MT_DATA); i = m->m_pkthdr.len - (soff + sn); if (d && i > MLEN) { @@ -1006,15 +1019,18 @@ esp_cbc_encrypt( } if (!d) { m_freem(m); - if (d0) + if (d0) { m_freem(d0); + } result = ENOBUFS; goto end; } - if (!d0) + if (!d0) { d0 = d; - if (dp) + } + if (dp) { dp->m_next = d; + } // try to make mbuf data aligned if (!IPSEC_IS_P2ALIGNED(d->m_data)) { @@ -1023,16 +1039,18 @@ esp_cbc_encrypt( d->m_len = 0; d->m_len = (M_TRAILINGSPACE(d) / blocklen) * blocklen; - if (d->m_len > i) + if (d->m_len > i) { d->m_len = i; + } dn = 0; } /* xor */ p = ivp ? ivp : iv; q = sp; - for (i = 0; i < blocklen; i++) + for (i = 0; i < blocklen; i++) { q[i] ^= p[i]; + } /* encrypt */ // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary). @@ -1075,8 +1093,9 @@ esp_cbc_encrypt( key_sa_stir_iv(sav); end: - if (sbuf != NULL) + if (sbuf != NULL) { FREE(sbuf, M_SECA); + } return result; } @@ -1086,8 +1105,8 @@ end: int esp_auth( struct mbuf *m0, - size_t skip, /* offset to ESP header */ - size_t length, /* payload length */ + size_t skip, /* offset to ESP header */ + size_t length, /* payload length */ struct secasvar *sav, u_char *sum) { @@ -1110,19 +1129,19 @@ esp_auth( return EINVAL; } - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_START, skip,length,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_START, skip, length, 0, 0, 0); /* * length of esp part (excluding authentication data) must be 4n, * since nexthdr must be at offset 4n+3. */ if (length % 4) { ipseclog((LOG_ERR, "esp_auth: length is not multiple of 4\n")); - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 1,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 1, 0, 0, 0, 0); return EINVAL; } if (!sav) { ipseclog((LOG_DEBUG, "esp_auth: NULL SA passed\n")); - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 2,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 2, 0, 0, 0, 0); return EINVAL; } algo = ah_algorithm_lookup(sav->alg_auth); @@ -1130,7 +1149,7 @@ esp_auth( ipseclog((LOG_ERR, "esp_auth: bad ESP auth algorithm passed: %d\n", sav->alg_auth)); - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 3,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 3, 0, 0, 0, 0); return EINVAL; } @@ -1142,14 +1161,15 @@ esp_auth( ipseclog((LOG_DEBUG, "esp_auth: AH_MAXSUMSIZE is too small: siz=%lu\n", (u_int32_t)siz)); - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 4,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 4, 0, 0, 0, 0); return EINVAL; } /* skip the header */ while (skip) { - if (!m) + if (!m) { panic("mbuf chain?"); + } if (m->m_len <= skip) { skip -= m->m_len; m = m->m_next; @@ -1162,16 +1182,17 @@ esp_auth( error = (*algo->init)(&s, sav); if (error) { - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 5,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 5, 0, 0, 0, 0); return error; } while (0 < length) { - if (!m) + if (!m) { panic("mbuf chain?"); + } if (m->m_len - off < length) { (*algo->update)(&s, (caddr_t)(mtod(m, u_char *) + off), - m->m_len - off); + m->m_len - off); length -= m->m_len - off; m = m->m_next; off = 0; @@ -1181,7 +1202,7 @@ esp_auth( } } (*algo->result)(&s, (caddr_t) sumbuf, sizeof(sumbuf)); - bcopy(sumbuf, sum, siz); /*XXX*/ - KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 6,0,0,0,0); + bcopy(sumbuf, sum, siz); /*XXX*/ + KERNEL_DEBUG(DBG_FNC_ESPAUTH | DBG_FUNC_END, 6, 0, 0, 0, 0); return 0; } diff --git a/bsd/netinet6/esp_input.c b/bsd/netinet6/esp_input.c index 912dd3983..36311e312 100644 --- a/bsd/netinet6/esp_input.c +++ b/bsd/netinet6/esp_input.c @@ -125,10 +125,10 @@ #include #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) -#define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8)) -#define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8)) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8)) +#define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8)) #define IPLEN_FLIPPED extern lck_mtx_t *sadb_mutex; @@ -136,10 +136,10 @@ extern lck_mtx_t *sadb_mutex; #if INET #define ESPMAXLEN \ (sizeof(struct esp) < sizeof(struct newesp) \ - ? sizeof(struct newesp) : sizeof(struct esp)) + ? sizeof(struct newesp) : sizeof(struct esp)) static struct ip * -esp4_input_strip_udp_encap (struct mbuf *m, int iphlen) +esp4_input_strip_udp_encap(struct mbuf *m, int iphlen) { // strip the udp header that's encapsulating ESP struct ip *ip; @@ -157,7 +157,7 @@ esp4_input_strip_udp_encap (struct mbuf *m, int iphlen) } static struct ip6_hdr * -esp6_input_strip_udp_encap (struct mbuf *m, int ip6hlen) +esp6_input_strip_udp_encap(struct mbuf *m, int ip6hlen) { // strip the udp header that's encapsulating ESP struct ip6_hdr *ip6; @@ -198,14 +198,14 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) int ivlen; size_t hlen; size_t esplen; - sa_family_t ifamily; + sa_family_t ifamily; struct mbuf *out_m = NULL; - KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0, 0, 0, 0, 0); /* sanity check for alignment. */ if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem " - "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); + "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } @@ -230,7 +230,7 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) if (ip->ip_p != IPPROTO_ESP && !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) { ipseclog((LOG_DEBUG, - "IPv4 ESP input: invalid protocol type\n")); + "IPv4 ESP input: invalid protocol type\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } @@ -245,8 +245,8 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) spi = esp->esp_spi; if ((sav = key_allocsa_extended(AF_INET, - (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, - IPPROTO_ESP, spi, interface)) == 0) { + (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst, + IPPROTO_ESP, spi, interface)) == 0) { ipseclog((LOG_WARNING, "IPv4 ESP input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -257,7 +257,7 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) printf("DP esp4_input called to allocate SA:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sav))); if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv4 ESP input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -294,19 +294,21 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) } if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay - && (sav->alg_auth && sav->key_auth))) + && (sav->alg_auth && sav->key_auth))) { goto noreplaycheck; + } if (sav->alg_auth == SADB_X_AALG_NULL || - sav->alg_auth == SADB_AALG_NONE) + sav->alg_auth == SADB_AALG_NONE) { goto noreplaycheck; + } /* * check for sequence number. */ - if (ipsec_chkreplay(seq, sav)) + if (ipsec_chkreplay(seq, sav)) { ; /*okey*/ - else { + } else { IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay); ipseclog((LOG_WARNING, "replay packet in IPv4 ESP input: %s %s\n", @@ -315,56 +317,57 @@ esp4_input_extended(struct mbuf *m, int off, ifnet_t interface) } /* check ICV */ - { - u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4))); - u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4))); - const struct ah_algorithm *sumalgo; - - sumalgo = ah_algorithm_lookup(sav->alg_auth); - if (!sumalgo) - goto noreplaycheck; - siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); - if (m->m_pkthdr.len < off + ESPMAXLEN + siz) { - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto bad; - } - if (AH_MAXSUMSIZE < siz) { - ipseclog((LOG_DEBUG, - "internal error: AH_MAXSUMSIZE must be larger than %lu\n", - (u_int32_t)siz)); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto bad; - } + { + u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4))); + u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4))); + const struct ah_algorithm *sumalgo; + + sumalgo = ah_algorithm_lookup(sav->alg_auth); + if (!sumalgo) { + goto noreplaycheck; + } + siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); + if (m->m_pkthdr.len < off + ESPMAXLEN + siz) { + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto bad; + } + if (AH_MAXSUMSIZE < siz) { + ipseclog((LOG_DEBUG, + "internal error: AH_MAXSUMSIZE must be larger than %lu\n", + (u_int32_t)siz)); + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto bad; + } - m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]); + m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]); - if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { - ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", - ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); - IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail); - goto bad; - } + if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { + ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail); + goto bad; + } - if (cc_cmp_safe(siz, sum0, sum)) { - ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n", - ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); - IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail); - goto bad; - } + if (cc_cmp_safe(siz, sum0, sum)) { + ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail); + goto bad; + } delay_icv: - /* strip off the authentication data */ - m_adj(m, -siz); - ip = mtod(m, struct ip *); + /* strip off the authentication data */ + m_adj(m, -siz); + ip = mtod(m, struct ip *); #ifdef IPLEN_FLIPPED - ip->ip_len = ip->ip_len - siz; + ip->ip_len = ip->ip_len - siz; #else - ip->ip_len = htons(ntohs(ip->ip_len) - siz); + ip->ip_len = htons(ntohs(ip->ip_len) - siz); #endif - m->m_flags |= M_AUTHIPDGM; - IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc); - } + m->m_flags |= M_AUTHIPDGM; + IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc); + } /* * update sequence number. @@ -384,10 +387,11 @@ noreplaycheck: esplen = sizeof(struct esp); } else { /* RFC 2406 */ - if (sav->flags & SADB_X_EXT_DERIV) + if (sav->flags & SADB_X_EXT_DERIV) { esplen = sizeof(struct esp); - else + } else { esplen = sizeof(struct newesp); + } } if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) { @@ -418,43 +422,43 @@ noreplaycheck: /* * decrypt the packet. */ - if (!algo->decrypt) + if (!algo->decrypt) { panic("internal error: no decrypt function"); - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0); + } + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0); if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { /* m is already freed */ m = NULL; ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n", ipsec_logsastr(sav))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0); goto bad; } - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0); IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]); m->m_flags |= M_DECRYPTED; - if (algo->finalizedecrypt) - { - if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) { - ipseclog((LOG_ERR, "packet decryption ICV failure\n")); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); - goto bad; - } + if (algo->finalizedecrypt) { + if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) { + ipseclog((LOG_ERR, "packet decryption ICV failure\n")); + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0); + goto bad; + } } /* * find the trailer of the ESP. */ m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail), - (caddr_t)&esptail); + (caddr_t)&esptail); nxt = esptail.esp_nxt; taillen = esptail.esp_padlen + sizeof(esptail); if (m->m_pkthdr.len < taillen - || m->m_pkthdr.len - taillen < hlen) { /*?*/ + || m->m_pkthdr.len - taillen < hlen) { /*?*/ ipseclog((LOG_WARNING, "bad pad length in IPv4 ESP input: %s %s\n", ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); @@ -476,7 +480,7 @@ noreplaycheck: m = m_pullup(m, off); if (!m) { ipseclog((LOG_DEBUG, - "IPv4 ESP input: invalid udp encapsulated ESP packet length \n")); + "IPv4 ESP input: invalid udp encapsulated ESP packet length \n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } @@ -488,7 +492,7 @@ noreplaycheck: if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 && (sav->flags & SADB_X_EXT_OLD) == 0 && seq && sav->replay && - seq >= sav->replay->lastseq) { + seq >= sav->replay->lastseq) { struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off); if (encap_uh->uh_sport && ntohs(encap_uh->uh_sport) != sav->remote_ike_port) { @@ -536,24 +540,24 @@ noreplaycheck: } if (otos != ip->ip_tos) { - sum = ~ntohs(ip->ip_sum) & 0xffff; - sum += (~otos & 0xffff) + ip->ip_tos; - sum = (sum >> 16) + (sum & 0xffff); - sum += (sum >> 16); /* add carry */ - ip->ip_sum = htons(~sum & 0xffff); + sum = ~ntohs(ip->ip_sum) & 0xffff; + sum += (~otos & 0xffff) + ip->ip_tos; + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); /* add carry */ + ip->ip_sum = htons(~sum & 0xffff); } if (!key_checktunnelsanity(sav, AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { ipseclog((LOG_ERR, "ipsec tunnel address mismatch " - "in ESP input: %s %s\n", - ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + "in ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } bzero(&addr, sizeof(addr)); - ipaddr = (__typeof__(ipaddr))&addr; + ipaddr = (__typeof__(ipaddr)) & addr; ipaddr->sin_family = AF_INET; ipaddr->sin_len = sizeof(*ipaddr); ipaddr->sin_addr = ip->ip_dst; @@ -590,21 +594,21 @@ noreplaycheck: if (!key_checktunnelsanity(sav, AF_INET6, (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { ipseclog((LOG_ERR, "ipsec tunnel address mismatch " - "in ESP input: %s %s\n", - ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + "in ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } bzero(&addr, sizeof(addr)); - ip6addr = (__typeof__(ip6addr))&addr; + ip6addr = (__typeof__(ip6addr)) & addr; ip6addr->sin6_family = AF_INET6; ip6addr->sin6_len = sizeof(*ip6addr); ip6addr->sin6_addr = ip6->ip6_dst; #endif /* INET6 */ } else { ipseclog((LOG_ERR, "ipsec tunnel unsupported address family " - "in ESP input\n")); + "in ESP input\n")); goto bad; } @@ -629,7 +633,7 @@ noreplaycheck: if (sav->sah->ipsec_if != NULL) { // Return mbuf if (interface != NULL && - interface == sav->sah->ipsec_if) { + interface == sav->sah->ipsec_if) { out_m = m; goto done; } @@ -641,12 +645,13 @@ noreplaycheck: goto bad; } } - - if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) + + if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) { goto bad; + } nxt = IPPROTO_DONE; - KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2, 0, 0, 0, 0); } else { /* * strip off ESP header and IV. @@ -676,7 +681,7 @@ noreplaycheck: IPSEC_STAT_INCREMENT(ipsecstat.in_nomem); goto bad; } - + /* * Set the csum valid flag, if we authenticated the * packet, the payload shouldn't be corrupt unless @@ -695,33 +700,33 @@ noreplaycheck: IPSEC_STAT_INCREMENT(ipsecstat.in_polvio); goto bad; } - KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0); - + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3, 0, 0, 0, 0); + /* translate encapsulated UDP port ? */ - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { - struct udphdr *udp; - - if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */ + if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { + struct udphdr *udp; + + if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */ IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } - + if (m->m_len < off + sizeof(struct udphdr)) { m = m_pullup(m, off + sizeof(struct udphdr)); if (!m) { ipseclog((LOG_DEBUG, - "IPv4 ESP input: can't pullup UDP header in esp4_input\n")); + "IPv4 ESP input: can't pullup UDP header in esp4_input\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } ip = mtod(m, struct ip *); } udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off); - + lck_mtx_lock(sadb_mutex); - if (sav->natt_encapsulated_src_port == 0) { + if (sav->natt_encapsulated_src_port == 0) { sav->natt_encapsulated_src_port = udp->uh_sport; - } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */ + } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */ IPSEC_STAT_INCREMENT(ipsecstat.in_inval); lck_mtx_unlock(sadb_mutex); goto bad; @@ -732,16 +737,16 @@ noreplaycheck: } DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL, - struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif, - struct ip *, ip, struct ip6_hdr *, NULL); + struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif, + struct ip *, ip, struct ip6_hdr *, NULL); // Input via IPsec interface legacy path if (sav->sah->ipsec_if != NULL) { int mlen; if ((mlen = m_length2(m, NULL)) < hlen) { ipseclog((LOG_DEBUG, - "IPv4 ESP input: decrypted packet too short %d < %d\n", - mlen, hlen)); + "IPv4 ESP input: decrypted packet too short %d < %d\n", + mlen, hlen)); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } @@ -752,7 +757,7 @@ noreplaycheck: // Return mbuf if (interface != NULL && - interface == sav->sah->ipsec_if) { + interface == sav->sah->ipsec_if) { out_m = m; goto done; } @@ -764,7 +769,7 @@ noreplaycheck: goto bad; } } - + ip_proto_dispatch_in(m, off, nxt, 0); } else { m_freem(m); @@ -791,7 +796,7 @@ bad: if (m) { m_freem(m); } - KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4, 0, 0, 0, 0); return out_m; } #endif /* INET */ @@ -828,7 +833,7 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) /* sanity check for alignment. */ if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) { ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem " - "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); + "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len)); IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); goto bad; } @@ -869,8 +874,8 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) spi = esp->esp_spi; if ((sav = key_allocsa_extended(AF_INET6, - (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, - IPPROTO_ESP, spi, interface)) == 0) { + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst, + IPPROTO_ESP, spi, interface)) == 0) { ipseclog((LOG_WARNING, "IPv6 ESP input: no key association found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -881,7 +886,7 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) printf("DP esp6_input called to allocate SA:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sav))); if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { ipseclog((LOG_DEBUG, "IPv6 ESP input: non-mature/dying SA found for spi %u\n", (u_int32_t)ntohl(spi))); @@ -918,19 +923,21 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) } if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay - && (sav->alg_auth && sav->key_auth))) + && (sav->alg_auth && sav->key_auth))) { goto noreplaycheck; + } if (sav->alg_auth == SADB_X_AALG_NULL || - sav->alg_auth == SADB_AALG_NONE) + sav->alg_auth == SADB_AALG_NONE) { goto noreplaycheck; + } /* * check for sequence number. */ - if (ipsec_chkreplay(seq, sav)) + if (ipsec_chkreplay(seq, sav)) { ; /*okey*/ - else { + } else { IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay); ipseclog((LOG_WARNING, "replay packet in IPv6 ESP input: %s %s\n", @@ -939,53 +946,54 @@ esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface) } /* check ICV */ - { - u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4))); - u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4))); - const struct ah_algorithm *sumalgo; - - sumalgo = ah_algorithm_lookup(sav->alg_auth); - if (!sumalgo) - goto noreplaycheck; - siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); - if (m->m_pkthdr.len < off + ESPMAXLEN + siz) { - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto bad; - } - if (AH_MAXSUMSIZE < siz) { - ipseclog((LOG_DEBUG, - "internal error: AH_MAXSUMSIZE must be larger than %lu\n", - (u_int32_t)siz)); - IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); - goto bad; - } + { + u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4))); + u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4))); + const struct ah_algorithm *sumalgo; + + sumalgo = ah_algorithm_lookup(sav->alg_auth); + if (!sumalgo) { + goto noreplaycheck; + } + siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1)); + if (m->m_pkthdr.len < off + ESPMAXLEN + siz) { + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto bad; + } + if (AH_MAXSUMSIZE < siz) { + ipseclog((LOG_DEBUG, + "internal error: AH_MAXSUMSIZE must be larger than %lu\n", + (u_int32_t)siz)); + IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); + goto bad; + } - m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]); + m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]); - if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { - ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", - ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); - IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail); - goto bad; - } + if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) { + ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail); + goto bad; + } - if (cc_cmp_safe(siz, sum0, sum)) { - ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", - ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); - IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail); - goto bad; - } + if (cc_cmp_safe(siz, sum0, sum)) { + ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n", + ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); + IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail); + goto bad; + } delay_icv: - /* strip off the authentication data */ - m_adj(m, -siz); - ip6 = mtod(m, struct ip6_hdr *); - ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz); + /* strip off the authentication data */ + m_adj(m, -siz); + ip6 = mtod(m, struct ip6_hdr *); + ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz); - m->m_flags |= M_AUTHIPDGM; - IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc); - } + m->m_flags |= M_AUTHIPDGM; + IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc); + } /* * update sequence number. @@ -1005,10 +1013,11 @@ noreplaycheck: esplen = sizeof(struct esp); } else { /* RFC 2406 */ - if (sav->flags & SADB_X_EXT_DERIV) + if (sav->flags & SADB_X_EXT_DERIV) { esplen = sizeof(struct esp); - else + } else { esplen = sizeof(struct newesp); + } } if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) { @@ -1019,7 +1028,7 @@ noreplaycheck: } #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/ + IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/ #else IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen); if (esp == NULL) { @@ -1028,7 +1037,7 @@ noreplaycheck: goto bad; } #endif - ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/ + ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/ /* * pre-compute and cache intermediate key @@ -1041,8 +1050,9 @@ noreplaycheck: /* * decrypt the packet. */ - if (!algo->decrypt) + if (!algo->decrypt) { panic("internal error: no decrypt function"); + } if ((*algo->decrypt)(m, off, sav, algo, ivlen)) { /* m is already freed */ m = NULL; @@ -1055,26 +1065,25 @@ noreplaycheck: m->m_flags |= M_DECRYPTED; - if (algo->finalizedecrypt) - { - if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) { - ipseclog((LOG_ERR, "packet decryption ICV failure\n")); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); - goto bad; - } + if (algo->finalizedecrypt) { + if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) { + ipseclog((LOG_ERR, "packet decryption ICV failure\n")); + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0); + goto bad; + } } /* * find the trailer of the ESP. */ m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail), - (caddr_t)&esptail); + (caddr_t)&esptail); nxt = esptail.esp_nxt; taillen = esptail.esp_padlen + sizeof(esptail); if (m->m_pkthdr.len < taillen - || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/ + || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/ ipseclog((LOG_WARNING, "bad pad length in IPv6 ESP input: %s %s\n", ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav))); @@ -1090,10 +1099,10 @@ noreplaycheck: if (*nproto == IPPROTO_UDP) { // offset includes the outer ip and udp header lengths. if (m->m_len < off) { - m = m_pullup(m, off); + m = m_pullup(m, off); if (!m) { ipseclog((LOG_DEBUG, - "IPv6 ESP input: invalid udp encapsulated ESP packet length\n")); + "IPv6 ESP input: invalid udp encapsulated ESP packet length\n")); IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); goto bad; } @@ -1105,7 +1114,7 @@ noreplaycheck: if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 && (sav->flags & SADB_X_EXT_OLD) == 0 && seq && sav->replay && - seq >= sav->replay->lastseq) { + seq >= sav->replay->lastseq) { struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off); if (encap_uh->uh_sport && ntohs(encap_uh->uh_sport) != sav->remote_ike_port) { @@ -1129,7 +1138,7 @@ noreplaycheck: * XXX more sanity checks * XXX relationship with gif? */ - u_int32_t flowinfo; /*net endian*/ + u_int32_t flowinfo; /*net endian*/ flowinfo = ip6->ip6_flow; m_adj(m, off + esplen + ivlen); if (ifamily == AF_INET6) { @@ -1157,7 +1166,7 @@ noreplaycheck: goto bad; } if (!key_checktunnelsanity(sav, AF_INET6, - (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { + (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) { ipseclog((LOG_ERR, "ipsec tunnel address mismatch " "in IPv6 ESP input: %s %s\n", ipsec6_logpacketstr(ip6, spi), @@ -1167,7 +1176,7 @@ noreplaycheck: } bzero(&addr, sizeof(addr)); - ip6addr = (__typeof__(ip6addr))&addr; + ip6addr = (__typeof__(ip6addr)) & addr; ip6addr->sin6_family = AF_INET6; ip6addr->sin6_len = sizeof(*ip6addr); ip6addr->sin6_addr = ip6->ip6_dst; @@ -1194,31 +1203,31 @@ noreplaycheck: } if (otos != ip->ip_tos) { - sum = ~ntohs(ip->ip_sum) & 0xffff; - sum += (~otos & 0xffff) + ip->ip_tos; - sum = (sum >> 16) + (sum & 0xffff); - sum += (sum >> 16); /* add carry */ - ip->ip_sum = htons(~sum & 0xffff); + sum = ~ntohs(ip->ip_sum) & 0xffff; + sum += (~otos & 0xffff) + ip->ip_tos; + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); /* add carry */ + ip->ip_sum = htons(~sum & 0xffff); } if (!key_checktunnelsanity(sav, AF_INET, (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) { ipseclog((LOG_ERR, "ipsec tunnel address mismatch " - "in ESP input: %s %s\n", - ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); + "in ESP input: %s %s\n", + ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav))); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); goto bad; } bzero(&addr, sizeof(addr)); - ipaddr = (__typeof__(ipaddr))&addr; + ipaddr = (__typeof__(ipaddr)) & addr; ipaddr->sin_family = AF_INET; ipaddr->sin_len = sizeof(*ipaddr); ipaddr->sin_addr = ip->ip_dst; } key_sa_recordxfer(sav, m); - if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 || + if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 || ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) { IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem); goto bad; @@ -1235,7 +1244,7 @@ noreplaycheck: if (sav->sah->ipsec_if != NULL) { // Return mbuf if (interface != NULL && - interface == sav->sah->ipsec_if) { + interface == sav->sah->ipsec_if) { goto done; } @@ -1247,9 +1256,10 @@ noreplaycheck: goto bad; } } - - if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) + + if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) { goto bad; + } nxt = IPPROTO_DONE; } else { /* @@ -1303,10 +1313,11 @@ noreplaycheck: struct mbuf *n = NULL; int maxlen; - MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */ maxlen = MHLEN; - if (n) + if (n) { M_COPY_PKTHDR(n, m); + } if (n && m->m_pkthdr.len > maxlen) { MCLGET(n, M_DONTWAIT); maxlen = MCLBYTES; @@ -1363,7 +1374,7 @@ noreplaycheck: if (sav->sah->ipsec_if != NULL) { // Return mbuf if (interface != NULL && - interface == sav->sah->ipsec_if) { + interface == sav->sah->ipsec_if) { goto done; } @@ -1375,7 +1386,6 @@ noreplaycheck: goto bad; } } - } done: @@ -1419,10 +1429,12 @@ esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) struct sockaddr_in6 *sa6_src, *sa6_dst; if (sa->sa_family != AF_INET6 || - sa->sa_len != sizeof(struct sockaddr_in6)) + sa->sa_len != sizeof(struct sockaddr_in6)) { return; - if ((unsigned)cmd >= PRC_NCMDS) + } + if ((unsigned)cmd >= PRC_NCMDS) { return; + } /* if the parameter is from icmp6, decode it. */ if (d != NULL) { @@ -1460,8 +1472,9 @@ esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) */ /* check if we can safely examine src and dst ports */ - if (m->m_pkthdr.len < off + sizeof(esp)) + if (m->m_pkthdr.len < off + sizeof(esp)) { return; + } if (m->m_len < off + sizeof(esp)) { /* @@ -1470,8 +1483,9 @@ esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) */ m_copydata(m, off, sizeof(esp), (caddr_t)&esp); espp = &esp; - } else + } else { espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off); + } if (cmd == PRC_MSGSIZE) { int valid = 0; @@ -1483,13 +1497,14 @@ esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) sa6_src = ip6cp->ip6c_src; sa6_dst = (struct sockaddr_in6 *)(void *)sa; sav = key_allocsa(AF_INET6, - (caddr_t)&sa6_src->sin6_addr, - (caddr_t)&sa6_dst->sin6_addr, - IPPROTO_ESP, espp->esp_spi); + (caddr_t)&sa6_src->sin6_addr, + (caddr_t)&sa6_dst->sin6_addr, + IPPROTO_ESP, espp->esp_spi); if (sav) { if (sav->state == SADB_SASTATE_MATURE || - sav->state == SADB_SASTATE_DYING) + sav->state == SADB_SASTATE_DYING) { valid++; + } key_freesav(sav, KEY_SADB_UNLOCKED); } diff --git a/bsd/netinet6/esp_output.c b/bsd/netinet6/esp_output.c index 720846a19..36c91b56f 100644 --- a/bsd/netinet6/esp_output.c +++ b/bsd/netinet6/esp_output.c @@ -110,15 +110,15 @@ #include #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) -#define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8)) -#define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8)) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8)) +#define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8)) static int esp_output(struct mbuf *, u_char *, struct mbuf *, - int, struct secasvar *sav); + int, struct secasvar *sav); -extern int esp_udp_encap_port; +extern int esp_udp_encap_port; extern u_int64_t natt_now; extern lck_mtx_t *sadb_mutex; @@ -129,11 +129,11 @@ extern lck_mtx_t *sadb_mutex; size_t esp_hdrsiz(__unused struct ipsecrequest *isr) { - #if 0 /* sanity check */ - if (isr == NULL) + if (isr == NULL) { panic("esp_hdrsiz: NULL was passed.\n"); + } lck_mtx_lock(sadb_mutex); @@ -145,57 +145,66 @@ esp_hdrsiz(__unused struct ipsecrequest *isr) size_t authlen; size_t hdrsiz; size_t maxpad; - + /*%%%% this needs to change - no sav in ipsecrequest any more */ sav = isr->sav; - - if (isr->saidx.proto != IPPROTO_ESP) + + if (isr->saidx.proto != IPPROTO_ESP) { panic("unsupported mode passed to esp_hdrsiz"); - - if (sav == NULL) + } + + if (sav == NULL) { goto estimate; + } if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) + && sav->state != SADB_SASTATE_DYING) { goto estimate; - + } + /* we need transport mode ESP. */ algo = esp_algorithm_lookup(sav->alg_enc); - if (!algo) + if (!algo) { goto estimate; + } ivlen = sav->ivlen; - if (ivlen < 0) + if (ivlen < 0) { goto estimate; - - if (algo->padbound) + } + + if (algo->padbound) { maxpad = algo->padbound; - else + } else { maxpad = 4; + } maxpad += 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */ - + if (sav->flags & SADB_X_EXT_OLD) { /* RFC 1827 */ hdrsiz = sizeof(struct esp) + ivlen + maxpad; } else { /* RFC 2406 */ aalgo = ah_algorithm_lookup(sav->alg_auth); - if (aalgo && sav->replay && sav->key_auth) + if (aalgo && sav->replay && sav->key_auth) { authlen = (aalgo->sumsiz)(sav); - else + } else { authlen = 0; + } hdrsiz = sizeof(struct newesp) + ivlen + maxpad + authlen; } - + /* * If the security association indicates that NATT is required, * add the size of the NATT encapsulation header: */ - if ((sav->flags & SADB_X_EXT_NATT) != 0) hdrsiz += sizeof(struct udphdr) + 4; - + if ((sav->flags & SADB_X_EXT_NATT) != 0) { + hdrsiz += sizeof(struct udphdr) + 4; + } + lck_mtx_unlock(sadb_mutex); return hdrsiz; } estimate: - lck_mtx_unlock(sadb_mutex); + lck_mtx_unlock(sadb_mutex); #endif /* * ASSUMING: @@ -244,19 +253,19 @@ esp_output( const struct esp_algorithm *algo; u_int32_t spi; u_int8_t nxt = 0; - size_t plen; /*payload length to be encrypted*/ + size_t plen; /*payload length to be encrypted*/ size_t espoff; - size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */ + size_t esphlen; /* sizeof(struct esp/newesp) + ivlen */ int ivlen; int afnumber; size_t extendsiz; int error = 0; struct ipsecstat *stat; struct udphdr *udp = NULL; - int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) && - (esp_udp_encap_port & 0xFFFF) != 0); + int udp_encapsulate = (sav->flags & SADB_X_EXT_NATT && (af == AF_INET || af == AF_INET6) && + (esp_udp_encap_port & 0xFFFF) != 0); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_START, sav->ivlen, 0, 0, 0, 0); switch (af) { #if INET case AF_INET: @@ -272,8 +281,8 @@ esp_output( #endif default: ipseclog((LOG_ERR, "esp_output: unsupported af %d\n", af)); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1,0,0,0,0); - return 0; /* no change at all */ + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 1, 0, 0, 0, 0); + return 0; /* no change at all */ } /* some sanity check */ @@ -281,24 +290,24 @@ esp_output( switch (af) { #if INET case AF_INET: - { + { struct ip *ip; ip = mtod(m, struct ip *); ipseclog((LOG_DEBUG, "esp4_output: internal error: " - "sav->replay is null: %x->%x, SPI=%u\n", - (u_int32_t)ntohl(ip->ip_src.s_addr), - (u_int32_t)ntohl(ip->ip_dst.s_addr), - (u_int32_t)ntohl(sav->spi))); + "sav->replay is null: %x->%x, SPI=%u\n", + (u_int32_t)ntohl(ip->ip_src.s_addr), + (u_int32_t)ntohl(ip->ip_dst.s_addr), + (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsecstat.out_inval); break; - } + } #endif /*INET*/ #if INET6 case AF_INET6: ipseclog((LOG_DEBUG, "esp6_output: internal error: " - "sav->replay is null: SPI=%u\n", - (u_int32_t)ntohl(sav->spi))); + "sav->replay is null: SPI=%u\n", + (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); break; #endif /*INET6*/ @@ -306,7 +315,7 @@ esp_output( panic("esp_output: should not reach here"); } m_freem(m); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 2, 0, 0, 0, 0); return EINVAL; } @@ -315,7 +324,7 @@ esp_output( ipseclog((LOG_ERR, "esp_output: unsupported algorithm: " "SPI=%u\n", (u_int32_t)ntohl(sav->spi))); m_freem(m); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 3, 0, 0, 0, 0); return EINVAL; } spi = sav->spi; @@ -325,176 +334,180 @@ esp_output( panic("invalid ivlen"); } - { - /* - * insert ESP header. - * XXX inserts ESP header right after IPv4 header. should - * chase the header chain. - * XXX sequential number - */ + { + /* + * insert ESP header. + * XXX inserts ESP header right after IPv4 header. should + * chase the header chain. + * XXX sequential number + */ #if INET - struct ip *ip = NULL; + struct ip *ip = NULL; #endif #if INET6 - struct ip6_hdr *ip6 = NULL; + struct ip6_hdr *ip6 = NULL; #endif - size_t esplen; /* sizeof(struct esp/newesp) */ - size_t hlen = 0; /* ip header len */ + size_t esplen; /* sizeof(struct esp/newesp) */ + size_t hlen = 0; /* ip header len */ - if (sav->flags & SADB_X_EXT_OLD) { - /* RFC 1827 */ - esplen = sizeof(struct esp); - } else { - /* RFC 2406 */ - if (sav->flags & SADB_X_EXT_DERIV) + if (sav->flags & SADB_X_EXT_OLD) { + /* RFC 1827 */ esplen = sizeof(struct esp); - else - esplen = sizeof(struct newesp); - } - esphlen = esplen + ivlen; + } else { + /* RFC 2406 */ + if (sav->flags & SADB_X_EXT_DERIV) { + esplen = sizeof(struct esp); + } else { + esplen = sizeof(struct newesp); + } + } + esphlen = esplen + ivlen; - for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) - ; - if (mprev == NULL || mprev->m_next != md) { - ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n", - afnumber)); - m_freem(m); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4,0,0,0,0); - return EINVAL; - } + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) { + ; + } + if (mprev == NULL || mprev->m_next != md) { + ipseclog((LOG_DEBUG, "esp%d_output: md is not in chain\n", + afnumber)); + m_freem(m); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 4, 0, 0, 0, 0); + return EINVAL; + } - plen = 0; - for (n = md; n; n = n->m_next) - plen += n->m_len; + plen = 0; + for (n = md; n; n = n->m_next) { + plen += n->m_len; + } - switch (af) { + switch (af) { #if INET - case AF_INET: - ip = mtod(m, struct ip *); + case AF_INET: + ip = mtod(m, struct ip *); #ifdef _IP_VHL - hlen = IP_VHL_HL(ip->ip_vhl) << 2; + hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else - hlen = ip->ip_hl << 2; + hlen = ip->ip_hl << 2; #endif - break; + break; #endif #if INET6 - case AF_INET6: - ip6 = mtod(m, struct ip6_hdr *); - hlen = sizeof(*ip6); - break; + case AF_INET6: + ip6 = mtod(m, struct ip6_hdr *); + hlen = sizeof(*ip6); + break; #endif - } + } - /* make the packet over-writable */ - mprev->m_next = NULL; - if ((md = ipsec_copypkt(md)) == NULL) { - m_freem(m); - error = ENOBUFS; - goto fail; - } - mprev->m_next = md; - - /* - * Translate UDP source port back to its original value. - * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode. - */ - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { - /* if not UDP - drop it */ - if (ip->ip_p != IPPROTO_UDP) { - IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + /* make the packet over-writable */ + mprev->m_next = NULL; + if ((md = ipsec_copypkt(md)) == NULL) { m_freem(m); - error = EINVAL; + error = ENOBUFS; goto fail; - } - - udp = mtod(md, struct udphdr *); + } + mprev->m_next = md; - /* if src port not set in sav - find it */ - if (sav->natt_encapsulated_src_port == 0) - if (key_natt_get_translated_port(sav) == 0) { + /* + * Translate UDP source port back to its original value. + * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode. + */ + if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { + /* if not UDP - drop it */ + if (ip->ip_p != IPPROTO_UDP) { + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); m_freem(m); error = EINVAL; goto fail; } - if (sav->remote_ike_port == htons(udp->uh_dport)) { - /* translate UDP port */ - udp->uh_dport = sav->natt_encapsulated_src_port; - udp->uh_sum = 0; /* don't need checksum with ESP auth */ - } else { - /* drop the packet - can't translate the port */ - IPSEC_STAT_INCREMENT(ipsecstat.out_inval); - m_freem(m); - error = EINVAL; - goto fail; - } - } - - espoff = m->m_pkthdr.len - plen; - - if (udp_encapsulate) { - esphlen += sizeof(struct udphdr); - espoff += sizeof(struct udphdr); - } + udp = mtod(md, struct udphdr *); - /* - * grow the mbuf to accomodate ESP header. - * before: IP ... payload - * after: IP ... [UDP] ESP IV payload - */ - if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) { - MGET(n, M_DONTWAIT, MT_DATA); - if (!n) { - m_freem(m); - error = ENOBUFS; - goto fail; + /* if src port not set in sav - find it */ + if (sav->natt_encapsulated_src_port == 0) { + if (key_natt_get_translated_port(sav) == 0) { + m_freem(m); + error = EINVAL; + goto fail; + } + } + if (sav->remote_ike_port == htons(udp->uh_dport)) { + /* translate UDP port */ + udp->uh_dport = sav->natt_encapsulated_src_port; + udp->uh_sum = 0; /* don't need checksum with ESP auth */ + } else { + /* drop the packet - can't translate the port */ + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + m_freem(m); + error = EINVAL; + goto fail; + } } - n->m_len = esphlen; - mprev->m_next = n; - n->m_next = md; - m->m_pkthdr.len += esphlen; + + + espoff = m->m_pkthdr.len - plen; + if (udp_encapsulate) { - udp = mtod(n, struct udphdr *); - esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr)); - } else { - esp = mtod(n, struct esp *); + esphlen += sizeof(struct udphdr); + espoff += sizeof(struct udphdr); } - } else { - md->m_len += esphlen; - md->m_data -= esphlen; - m->m_pkthdr.len += esphlen; - esp = mtod(md, struct esp *); - if (udp_encapsulate) { - udp = mtod(md, struct udphdr *); - esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr)); + + /* + * grow the mbuf to accomodate ESP header. + * before: IP ... payload + * after: IP ... [UDP] ESP IV payload + */ + if (M_LEADINGSPACE(md) < esphlen || (md->m_flags & M_EXT) != 0) { + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + m_freem(m); + error = ENOBUFS; + goto fail; + } + n->m_len = esphlen; + mprev->m_next = n; + n->m_next = md; + m->m_pkthdr.len += esphlen; + if (udp_encapsulate) { + udp = mtod(n, struct udphdr *); + esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr)); + } else { + esp = mtod(n, struct esp *); + } } else { + md->m_len += esphlen; + md->m_data -= esphlen; + m->m_pkthdr.len += esphlen; esp = mtod(md, struct esp *); + if (udp_encapsulate) { + udp = mtod(md, struct udphdr *); + esp = (struct esp *)(void *)((caddr_t)udp + sizeof(struct udphdr)); + } else { + esp = mtod(md, struct esp *); + } } - } - - switch (af) { + + switch (af) { #if INET - case AF_INET: - if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) - ip->ip_len = htons(ntohs(ip->ip_len) + esphlen); - else { - ipseclog((LOG_ERR, - "IPv4 ESP output: size exceeds limit\n")); - IPSEC_STAT_INCREMENT(ipsecstat.out_inval); - m_freem(m); - error = EMSGSIZE; - goto fail; - } - break; + case AF_INET: + if (esphlen < (IP_MAXPACKET - ntohs(ip->ip_len))) { + ip->ip_len = htons(ntohs(ip->ip_len) + esphlen); + } else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; #endif #if INET6 - case AF_INET6: - /* total packet length will be computed in ip6_output() */ - break; + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; #endif + } } - } /* initialize esp header. */ esp->esp_spi = spi; @@ -509,7 +522,7 @@ esp_output( ipsec_logsastr(sav))); IPSEC_STAT_INCREMENT(stat->out_inval); m_freem(m); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 5, 0, 0, 0, 0); return EINVAL; } } @@ -523,168 +536,176 @@ esp_output( nesp->esp_seq = htonl(sav->replay->count); } - { - /* - * find the last mbuf. make some room for ESP trailer. - */ + { + /* + * find the last mbuf. make some room for ESP trailer. + */ #if INET - struct ip *ip = NULL; + struct ip *ip = NULL; #endif - size_t padbound; - u_char *extend; - int i; - int randpadmax; - - if (algo->padbound) - padbound = algo->padbound; - else - padbound = 4; - /* ESP packet, including nxthdr field, must be length of 4n */ - if (padbound < 4) - padbound = 4; - - extendsiz = padbound - (plen % padbound); - if (extendsiz == 1) - extendsiz = padbound + 1; - - /* random padding */ - switch (af) { + size_t padbound; + u_char *extend; + int i; + int randpadmax; + + if (algo->padbound) { + padbound = algo->padbound; + } else { + padbound = 4; + } + /* ESP packet, including nxthdr field, must be length of 4n */ + if (padbound < 4) { + padbound = 4; + } + + extendsiz = padbound - (plen % padbound); + if (extendsiz == 1) { + extendsiz = padbound + 1; + } + + /* random padding */ + switch (af) { #if INET - case AF_INET: - randpadmax = ip4_esp_randpad; - break; + case AF_INET: + randpadmax = ip4_esp_randpad; + break; #endif #if INET6 - case AF_INET6: - randpadmax = ip6_esp_randpad; - break; + case AF_INET6: + randpadmax = ip6_esp_randpad; + break; #endif - default: - randpadmax = -1; - break; - } - if (randpadmax < 0 || plen + extendsiz >= randpadmax) - ; - else { - int pad; + default: + randpadmax = -1; + break; + } + if (randpadmax < 0 || plen + extendsiz >= randpadmax) { + ; + } else { + int pad; - /* round */ - randpadmax = (randpadmax / padbound) * padbound; - pad = (randpadmax - plen + extendsiz) / padbound; + /* round */ + randpadmax = (randpadmax / padbound) * padbound; + pad = (randpadmax - plen + extendsiz) / padbound; - if (pad > 0) - pad = (random() % pad) * padbound; - else - pad = 0; + if (pad > 0) { + pad = (random() % pad) * padbound; + } else { + pad = 0; + } - /* - * make sure we do not pad too much. - * MLEN limitation comes from the trailer attachment - * code below. - * 256 limitation comes from sequential padding. - * also, the 1-octet length field in ESP trailer imposes - * limitation (but is less strict than sequential padding - * as length field do not count the last 2 octets). - */ - if (extendsiz + pad <= MLEN && extendsiz + pad < 256) - extendsiz += pad; - } + /* + * make sure we do not pad too much. + * MLEN limitation comes from the trailer attachment + * code below. + * 256 limitation comes from sequential padding. + * also, the 1-octet length field in ESP trailer imposes + * limitation (but is less strict than sequential padding + * as length field do not count the last 2 octets). + */ + if (extendsiz + pad <= MLEN && extendsiz + pad < 256) { + extendsiz += pad; + } + } #if DIAGNOSTIC - if (extendsiz > MLEN || extendsiz >= 256) - panic("extendsiz too big in esp_output"); + if (extendsiz > MLEN || extendsiz >= 256) { + panic("extendsiz too big in esp_output"); + } #endif - n = m; - while (n->m_next) - n = n->m_next; + n = m; + while (n->m_next) { + n = n->m_next; + } - /* - * if M_EXT, the external mbuf data may be shared among - * two consequtive TCP packets, and it may be unsafe to use the - * trailing space. - */ - if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) { - extend = mtod(n, u_char *) + n->m_len; - n->m_len += extendsiz; - m->m_pkthdr.len += extendsiz; - } else { - struct mbuf *nn; + /* + * if M_EXT, the external mbuf data may be shared among + * two consequtive TCP packets, and it may be unsafe to use the + * trailing space. + */ + if (!(n->m_flags & M_EXT) && extendsiz < M_TRAILINGSPACE(n)) { + extend = mtod(n, u_char *) + n->m_len; + n->m_len += extendsiz; + m->m_pkthdr.len += extendsiz; + } else { + struct mbuf *nn; - MGET(nn, M_DONTWAIT, MT_DATA); - if (!nn) { - ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf", - afnumber)); - m_freem(m); - error = ENOBUFS; - goto fail; + MGET(nn, M_DONTWAIT, MT_DATA); + if (!nn) { + ipseclog((LOG_DEBUG, "esp%d_output: can't alloc mbuf", + afnumber)); + m_freem(m); + error = ENOBUFS; + goto fail; + } + extend = mtod(nn, u_char *); + nn->m_len = extendsiz; + nn->m_next = NULL; + n->m_next = nn; + n = nn; + m->m_pkthdr.len += extendsiz; + } + switch (sav->flags & SADB_X_EXT_PMASK) { + case SADB_X_EXT_PRAND: + key_randomfill(extend, extendsiz); + break; + case SADB_X_EXT_PZERO: + bzero(extend, extendsiz); + break; + case SADB_X_EXT_PSEQ: + for (i = 0; i < extendsiz; i++) { + extend[i] = (i + 1) & 0xff; + } + break; } - extend = mtod(nn, u_char *); - nn->m_len = extendsiz; - nn->m_next = NULL; - n->m_next = nn; - n = nn; - m->m_pkthdr.len += extendsiz; - } - switch (sav->flags & SADB_X_EXT_PMASK) { - case SADB_X_EXT_PRAND: - key_randomfill(extend, extendsiz); - break; - case SADB_X_EXT_PZERO: - bzero(extend, extendsiz); - break; - case SADB_X_EXT_PSEQ: - for (i = 0; i < extendsiz; i++) - extend[i] = (i + 1) & 0xff; - break; - } - - nxt = *nexthdrp; - if (udp_encapsulate) { - *nexthdrp = IPPROTO_UDP; - /* Fill out the UDP header */ - udp->uh_sport = ntohs((u_short)esp_udp_encap_port); - udp->uh_dport = ntohs(sav->remote_ike_port); + nxt = *nexthdrp; + if (udp_encapsulate) { + *nexthdrp = IPPROTO_UDP; + + /* Fill out the UDP header */ + udp->uh_sport = ntohs((u_short)esp_udp_encap_port); + udp->uh_dport = ntohs(sav->remote_ike_port); // udp->uh_len set later, after all length tweaks are complete - udp->uh_sum = 0; - - /* Update last sent so we know if we need to send keepalive */ - sav->natt_last_activity = natt_now; - } else { - *nexthdrp = IPPROTO_ESP; - } + udp->uh_sum = 0; - /* initialize esp trailer. */ - esptail = (struct esptail *) - (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail)); - esptail->esp_nxt = nxt; - esptail->esp_padlen = extendsiz - 2; + /* Update last sent so we know if we need to send keepalive */ + sav->natt_last_activity = natt_now; + } else { + *nexthdrp = IPPROTO_ESP; + } - /* modify IP header (for ESP header part only) */ - switch (af) { + /* initialize esp trailer. */ + esptail = (struct esptail *) + (mtod(n, u_int8_t *) + n->m_len - sizeof(struct esptail)); + esptail->esp_nxt = nxt; + esptail->esp_padlen = extendsiz - 2; + + /* modify IP header (for ESP header part only) */ + switch (af) { #if INET - case AF_INET: - ip = mtod(m, struct ip *); - if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) - ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz); - else { - ipseclog((LOG_ERR, - "IPv4 ESP output: size exceeds limit\n")); - IPSEC_STAT_INCREMENT(ipsecstat.out_inval); - m_freem(m); - error = EMSGSIZE; - goto fail; - } - break; + case AF_INET: + ip = mtod(m, struct ip *); + if (extendsiz < (IP_MAXPACKET - ntohs(ip->ip_len))) { + ip->ip_len = htons(ntohs(ip->ip_len) + extendsiz); + } else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; #endif #if INET6 - case AF_INET6: - /* total packet length will be computed in ip6_output() */ - break; + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; #endif + } } - } /* * pre-compute and cache intermediate key @@ -700,18 +721,19 @@ esp_output( * encrypt the packet, based on security association * and the algorithm specified. */ - if (!algo->encrypt) + if (!algo->encrypt) { panic("internal error: no encrypt function"); - KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0,0,0,0,0); + } + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0); if ((*algo->encrypt)(m, espoff, plen + extendsiz, sav, algo, ivlen)) { /* m is already freed */ ipseclog((LOG_ERR, "packet encryption failure\n")); IPSEC_STAT_INCREMENT(stat->out_inval); error = EINVAL; - KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1,error,0,0,0); + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0); goto fail; } - KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0); /* * calculate ICV if required. @@ -719,35 +741,40 @@ esp_output( size_t siz = 0; u_char authbuf[AH_MAXSUMSIZE] __attribute__((aligned(4))); - if (algo->finalizeencrypt) { + if (algo->finalizeencrypt) { siz = algo->icvlen; if ((*algo->finalizeencrypt)(sav, authbuf, siz)) { - ipseclog((LOG_ERR, "packet encryption ICV failure\n")); + ipseclog((LOG_ERR, "packet encryption ICV failure\n")); IPSEC_STAT_INCREMENT(stat->out_inval); error = EINVAL; - KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1,error,0,0,0); + KERNEL_DEBUG(DBG_FNC_ENCRYPT | DBG_FUNC_END, 1, error, 0, 0, 0); goto fail; } goto fill_icv; } - if (!sav->replay) + if (!sav->replay) { goto noantireplay; - if (!sav->key_auth) + } + if (!sav->key_auth) { goto noantireplay; - if (sav->key_auth == SADB_AALG_NONE) + } + if (sav->key_auth == SADB_AALG_NONE) { goto noantireplay; + } - { + { const struct ah_algorithm *aalgo; - + aalgo = ah_algorithm_lookup(sav->alg_auth); - if (!aalgo) + if (!aalgo) { goto noantireplay; + } siz = ((aalgo->sumsiz)(sav) + 3) & ~(4 - 1); - if (AH_MAXSUMSIZE < siz) + if (AH_MAXSUMSIZE < siz) { panic("assertion failed for AH_MAXSUMSIZE"); - + } + if (esp_auth(m, espoff, m->m_pkthdr.len - espoff, sav, authbuf)) { ipseclog((LOG_ERR, "ESP checksum generation failure\n")); m_freem(m); @@ -755,28 +782,29 @@ esp_output( IPSEC_STAT_INCREMENT(stat->out_inval); goto fail; } - } + } - fill_icv: - { - struct ip *ip; +fill_icv: + { + struct ip *ip; u_char *p; n = m; - while (n->m_next) + while (n->m_next) { n = n->m_next; - + } + if (!(n->m_flags & M_EXT) && siz < M_TRAILINGSPACE(n)) { /* XXX */ n->m_len += siz; m->m_pkthdr.len += siz; p = mtod(n, u_char *) + n->m_len - siz; } else { struct mbuf *nn; - + MGET(nn, M_DONTWAIT, MT_DATA); if (!nn) { ipseclog((LOG_DEBUG, "can't alloc mbuf in esp%d_output", - afnumber)); + afnumber)); m_freem(m); error = ENOBUFS; goto fail; @@ -789,17 +817,17 @@ esp_output( p = mtod(nn, u_char *); } bcopy(authbuf, p, siz); - + /* modify IP header (for ESP header part only) */ switch (af) { #if INET case AF_INET: ip = mtod(m, struct ip *); - if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) + if (siz < (IP_MAXPACKET - ntohs(ip->ip_len))) { ip->ip_len = htons(ntohs(ip->ip_len) + siz); - else { + } else { ipseclog((LOG_ERR, - "IPv4 ESP output: size exceeds limit\n")); + "IPv4 ESP output: size exceeds limit\n")); IPSEC_STAT_INCREMENT(ipsecstat.out_inval); m_freem(m); error = EMSGSIZE; @@ -813,24 +841,24 @@ esp_output( break; #endif } - } - + } + if (udp_encapsulate) { struct ip *ip; struct ip6_hdr *ip6; switch (af) { case AF_INET: - ip = mtod(m, struct ip *); - udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2)); - break; + ip = mtod(m, struct ip *); + udp->uh_ulen = htons(ntohs(ip->ip_len) - (IP_VHL_HL(ip->ip_vhl) << 2)); + break; case AF_INET6: - ip6 = mtod(m, struct ip6_hdr *); - udp->uh_ulen = htons(plen + siz + extendsiz + esphlen); - udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP)); - m->m_pkthdr.csum_flags = (CSUM_UDPIPV6|CSUM_ZERO_INVERT); - m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); - break; + ip6 = mtod(m, struct ip6_hdr *); + udp->uh_ulen = htons(plen + siz + extendsiz + esphlen); + udp->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(udp->uh_ulen) + IPPROTO_UDP)); + m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT); + m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); + break; } } @@ -839,17 +867,18 @@ noantireplay: if (!m) { ipseclog((LOG_ERR, "NULL mbuf after encryption in esp%d_output", afnumber)); - } else + } else { stat->out_success++; + } stat->out_esphist[sav->alg_enc]++; lck_mtx_unlock(sadb_mutex); key_sa_recordxfer(sav, m); - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 6, 0, 0, 0, 0); return 0; fail: #if 1 - KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7,error,0,0,0); + KERNEL_DEBUG(DBG_FNC_ESPOUT | DBG_FUNC_END, 7, error, 0, 0, 0); return error; #else panic("something bad in esp_output"); diff --git a/bsd/netinet6/esp_rijndael.c b/bsd/netinet6/esp_rijndael.c index eba6c7fd3..fbb36070b 100644 --- a/bsd/netinet6/esp_rijndael.c +++ b/bsd/netinet6/esp_rijndael.c @@ -90,16 +90,15 @@ extern lck_mtx_t *sadb_mutex; typedef struct { - ccgcm_ctx *decrypt; - ccgcm_ctx *encrypt; - ccgcm_ctx ctxt[0]; + ccgcm_ctx *decrypt; + ccgcm_ctx *encrypt; + ccgcm_ctx ctxt[0]; } aes_gcm_ctx; int esp_aes_schedlen( __unused const struct esp_algorithm *algo) { - return sizeof(aes_ctx); } @@ -108,13 +107,12 @@ esp_aes_schedule( __unused const struct esp_algorithm *algo, struct secasvar *sav) { - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); aes_ctx *ctx = (aes_ctx*)sav->sched; - + aes_decrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->decrypt); aes_encrypt_key((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc), &ctx->encrypt); - + return 0; } @@ -131,7 +129,7 @@ esp_aes_schedule( * * m = mbuf chain * off = offset to ESP header - * + * * local vars for source: * soff = offset from beginning of the chain to the head of the * current mbuf. @@ -145,8 +143,8 @@ esp_aes_schedule( * d = current mbuf * dn = current offset in d (next location to store result) */ - - + + int esp_cbc_decrypt_aes( struct mbuf *m, @@ -157,16 +155,16 @@ esp_cbc_decrypt_aes( { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff; /* offset from the head of chain, to head of this mbuf */ - int sn, dn; /* offset from the head of the mbuf, to meat */ + int soff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[AES_BLOCKLEN] __attribute__((aligned(4))), *dptr; u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL; struct mbuf *scut; int scutoff; - int i, len; + int i, len; + - if (ivlen != AES_BLOCKLEN) { ipseclog((LOG_ERR, "esp_cbc_decrypt %s: " "unsupported ivlen %d\n", algo->name, ivlen)); @@ -204,7 +202,7 @@ esp_cbc_decrypt_aes( soff = sn = dn = 0; d = d0 = dp = NULL; sp = dptr = NULL; - + /* skip header/IV offset */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { @@ -219,27 +217,29 @@ esp_cbc_decrypt_aes( scutoff = sn; /* skip over empty mbuf */ - while (s && s->m_len == 0) + while (s && s->m_len == 0) { s = s->m_next; - + } + while (soff < m->m_pkthdr.len) { /* source */ if (sn + AES_BLOCKLEN <= s->m_len) { /* body is continuous */ sp = mtod(s, u_int8_t *) + sn; len = s->m_len - sn; - len -= len % AES_BLOCKLEN; // full blocks only + len -= len % AES_BLOCKLEN; // full blocks only } else { /* body is non-continuous */ m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf); sp = sbuf; - len = AES_BLOCKLEN; // 1 block only in sbuf + len = AES_BLOCKLEN; // 1 block only in sbuf } /* destination */ if (!d || dn + AES_BLOCKLEN > d->m_len) { - if (d) + if (d) { dp = d; + } MGET(d, M_DONTWAIT, MT_DATA); i = m->m_pkthdr.len - (soff + sn); if (d && i > MLEN) { @@ -254,14 +254,17 @@ esp_cbc_decrypt_aes( } if (!d) { m_freem(m); - if (d0) + if (d0) { m_freem(d0); + } return ENOBUFS; } - if (!d0) + if (!d0) { d0 = d; - if (dp) + } + if (dp) { dp->m_next = d; + } // try to make mbuf data aligned if (!IPSEC_IS_P2ALIGNED(d->m_data)) { @@ -270,15 +273,17 @@ esp_cbc_decrypt_aes( d->m_len = M_TRAILINGSPACE(d); d->m_len -= d->m_len % AES_BLOCKLEN; - if (d->m_len > i) + if (d->m_len > i) { d->m_len = i; - dptr = mtod(d, u_int8_t *); + } + dptr = mtod(d, u_int8_t *); dn = 0; } /* adjust len if greater than space available in dest */ - if (len > d->m_len - dn) + if (len > d->m_len - dn) { len = d->m_len - dn; + } /* decrypt */ // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary). @@ -289,31 +294,31 @@ esp_cbc_decrypt_aes( if (len > MAX_REALIGN_LEN) { m_freem(m); if (d0 != NULL) { - m_freem(d0); + m_freem(d0); } if (sp_aligned != NULL) { - FREE(sp_aligned, M_SECA); - sp_aligned = NULL; + FREE(sp_aligned, M_SECA); + sp_aligned = NULL; } return ENOBUFS; } if (sp_aligned == NULL) { sp_aligned = (u_int8_t *)_MALLOC(MAX_REALIGN_LEN, M_SECA, M_DONTWAIT); if (sp_aligned == NULL) { - m_freem(m); - if (d0 != NULL) { - m_freem(d0); - } - return ENOMEM; + m_freem(m); + if (d0 != NULL) { + m_freem(d0); + } + return ENOMEM; } } sp = sp_aligned; memcpy(sp, sp_unaligned, len); } // no need to check output pointer alignment - aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn, - (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt))); - + aes_decrypt_cbc(sp, iv, len >> 4, dptr + dn, + (aes_decrypt_ctx*)(&(((aes_ctx*)sav->sched)->decrypt))); + // update unaligned pointers if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) { sp = sp_unaligned; @@ -322,7 +327,7 @@ esp_cbc_decrypt_aes( /* udpate offsets */ sn += len; dn += len; - + // next iv bcopy(sp + len - AES_BLOCKLEN, iv, AES_BLOCKLEN); @@ -332,7 +337,6 @@ esp_cbc_decrypt_aes( soff += s->m_len; s = s->m_next; } - } /* free un-needed source mbufs and add dest mbufs to chain */ @@ -345,7 +349,7 @@ esp_cbc_decrypt_aes( FREE(sp_aligned, M_SECA); sp_aligned = NULL; } - + /* just in case */ bzero(iv, sizeof(iv)); bzero(sbuf, sizeof(sbuf)); @@ -364,8 +368,8 @@ esp_cbc_encrypt_aes( { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff; /* offset from the head of chain, to head of this mbuf */ - int sn, dn; /* offset from the head of the mbuf, to meat */ + int soff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t *ivp, *dptr, *ivp_unaligned; u_int8_t sbuf[AES_BLOCKLEN] __attribute__((aligned(4))), *sp, *sp_unaligned, *sp_aligned = NULL; @@ -412,7 +416,7 @@ esp_cbc_encrypt_aes( soff = sn = dn = 0; d = d0 = dp = NULL; sp = dptr = NULL; - + /* skip headers/IV */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { @@ -427,27 +431,29 @@ esp_cbc_encrypt_aes( scutoff = sn; /* skip over empty mbuf */ - while (s && s->m_len == 0) + while (s && s->m_len == 0) { s = s->m_next; - + } + while (soff < m->m_pkthdr.len) { /* source */ if (sn + AES_BLOCKLEN <= s->m_len) { /* body is continuous */ sp = mtod(s, u_int8_t *) + sn; len = s->m_len - sn; - len -= len % AES_BLOCKLEN; // full blocks only + len -= len % AES_BLOCKLEN; // full blocks only } else { /* body is non-continuous */ m_copydata(s, sn, AES_BLOCKLEN, (caddr_t) sbuf); sp = sbuf; - len = AES_BLOCKLEN; // 1 block only in sbuf + len = AES_BLOCKLEN; // 1 block only in sbuf } /* destination */ if (!d || dn + AES_BLOCKLEN > d->m_len) { - if (d) + if (d) { dp = d; + } MGET(d, M_DONTWAIT, MT_DATA); i = m->m_pkthdr.len - (soff + sn); if (d && i > MLEN) { @@ -462,14 +468,17 @@ esp_cbc_encrypt_aes( } if (!d) { m_freem(m); - if (d0) + if (d0) { m_freem(d0); + } return ENOBUFS; } - if (!d0) + if (!d0) { d0 = d; - if (dp) + } + if (dp) { dp->m_next = d; + } // try to make mbuf data aligned if (!IPSEC_IS_P2ALIGNED(d->m_data)) { @@ -478,16 +487,18 @@ esp_cbc_encrypt_aes( d->m_len = M_TRAILINGSPACE(d); d->m_len -= d->m_len % AES_BLOCKLEN; - if (d->m_len > i) + if (d->m_len > i) { d->m_len = i; + } dptr = mtod(d, u_int8_t *); dn = 0; } - + /* adjust len if greater than space available */ - if (len > d->m_len - dn) + if (len > d->m_len - dn) { len = d->m_len - dn; - + } + /* encrypt */ // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary). if (IPSEC_IS_P2ALIGNED(sp)) { @@ -527,8 +538,8 @@ esp_cbc_encrypt_aes( memcpy(ivp, ivp_unaligned, AES_BLOCKLEN); } // no need to check output pointer alignment - aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn, - (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt))); + aes_encrypt_cbc(sp, ivp, len >> 4, dptr + dn, + (aes_encrypt_ctx*)(&(((aes_ctx*)sav->sched)->encrypt))); // update unaligned pointers if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) { @@ -543,8 +554,8 @@ esp_cbc_encrypt_aes( dn += len; /* next iv */ - ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted - + ivp = dptr + dn - AES_BLOCKLEN; // last block encrypted + /* find the next source block and skip empty mbufs */ while (s && sn >= s->m_len) { sn -= s->m_len; @@ -557,7 +568,7 @@ esp_cbc_encrypt_aes( m_freem(scut->m_next); scut->m_len = scutoff; scut->m_next = d0; - + // free memory if (sp_aligned != NULL) { FREE(sp_aligned, M_SECA); @@ -575,58 +586,58 @@ int esp_gcm_schedlen( __unused const struct esp_algorithm *algo) { - return (sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN); + return sizeof(aes_gcm_ctx) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN; } int esp_gcm_schedule( __unused const struct esp_algorithm *algo, - struct secasvar *sav) + struct secasvar *sav) { LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); u_int ivlen = sav->ivlen; - unsigned char nonce[ESP_GCM_SALT_LEN+ivlen]; + unsigned char nonce[ESP_GCM_SALT_LEN + ivlen]; int rc; ctx->decrypt = &ctx->ctxt[0]; ctx->encrypt = &ctx->ctxt[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx)]; - rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->decrypt); + rc = aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ctx->decrypt); if (rc) { - return (rc); + return rc; } bzero(nonce, ESP_GCM_SALT_LEN + ivlen); - memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); - memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen); + memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); + memcpy(nonce + ESP_GCM_SALT_LEN, sav->iv, ivlen); - rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, nonce, ctx->encrypt); + rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, nonce, ctx->encrypt); if (rc) { - return (rc); + return rc; } rc = aes_encrypt_reset_gcm(ctx->encrypt); if (rc) { - return (rc); + return rc; } - return (rc); + return rc; } int esp_gcm_encrypt_finalize(struct secasvar *sav, - unsigned char *tag, unsigned int tag_bytes) + unsigned char *tag, unsigned int tag_bytes) { aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); - return (aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt)); + return aes_encrypt_finalize_gcm(tag, tag_bytes, ctx->encrypt); } int esp_gcm_decrypt_finalize(struct secasvar *sav, - unsigned char *tag, unsigned int tag_bytes) + unsigned char *tag, unsigned int tag_bytes) { aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); - return (aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt)); + return aes_decrypt_finalize_gcm(tag, tag_bytes, ctx->decrypt); } int @@ -640,18 +651,18 @@ esp_gcm_encrypt_aes( { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff; /* offset from the head of chain, to head of this mbuf */ - int sn, dn; /* offset from the head of the mbuf, to meat */ + int soff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t *dptr, *sp, *sp_unaligned, *sp_aligned = NULL; aes_gcm_ctx *ctx; struct mbuf *scut; int scutoff; int i, len; - unsigned char nonce[ESP_GCM_SALT_LEN+ivlen]; + unsigned char nonce[ESP_GCM_SALT_LEN + ivlen]; if (ivlen != ESP_GCM_IVLEN) { - ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen)); + ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen)); m_freem(m); return EINVAL; } @@ -665,7 +676,7 @@ esp_gcm_encrypt_aes( bodyoff = off + sizeof(struct newesp) + ivlen; } - bzero(nonce, ESP_GCM_SALT_LEN+ivlen); + bzero(nonce, ESP_GCM_SALT_LEN + ivlen); /* generate new iv */ ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); @@ -691,7 +702,7 @@ esp_gcm_encrypt_aes( */ memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen); m_copyback(m, ivoff, ivlen, sav->iv); - bzero(nonce, ESP_GCM_SALT_LEN+ivlen); + bzero(nonce, ESP_GCM_SALT_LEN + ivlen); if (m->m_pkthdr.len < bodyoff) { ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__, @@ -702,10 +713,10 @@ esp_gcm_encrypt_aes( /* Set Additional Authentication Data */ if (!(sav->flags & SADB_X_EXT_OLD)) { - struct newesp esp; + struct newesp esp; m_copydata(m, off, sizeof(esp), (caddr_t) &esp); if (aes_encrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->encrypt)) { - ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__)); + ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__)); m_freem(m); return EINVAL; } @@ -715,7 +726,7 @@ esp_gcm_encrypt_aes( soff = sn = dn = 0; d = d0 = dp = NULL; sp = dptr = NULL; - + /* skip headers/IV */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { @@ -730,18 +741,20 @@ esp_gcm_encrypt_aes( scutoff = sn; /* skip over empty mbuf */ - while (s && s->m_len == 0) + while (s && s->m_len == 0) { s = s->m_next; - + } + while (soff < m->m_pkthdr.len) { - /* source */ - sp = mtod(s, u_int8_t *) + sn; + /* source */ + sp = mtod(s, u_int8_t *) + sn; len = s->m_len - sn; /* destination */ if (!d || (dn + len > d->m_len)) { - if (d) + if (d) { dp = d; + } MGET(d, M_DONTWAIT, MT_DATA); i = m->m_pkthdr.len - (soff + sn); if (d && i > MLEN) { @@ -756,14 +769,17 @@ esp_gcm_encrypt_aes( } if (!d) { m_freem(m); - if (d0) + if (d0) { m_freem(d0); + } return ENOBUFS; } - if (!d0) + if (!d0) { d0 = d; - if (dp) + } + if (dp) { dp->m_next = d; + } // try to make mbuf data aligned if (!IPSEC_IS_P2ALIGNED(d->m_data)) { @@ -772,17 +788,19 @@ esp_gcm_encrypt_aes( d->m_len = M_TRAILINGSPACE(d); - if (d->m_len > i) + if (d->m_len > i) { d->m_len = i; + } dptr = mtod(d, u_int8_t *); dn = 0; } - + /* adjust len if greater than space available */ - if (len > d->m_len - dn) + if (len > d->m_len - dn) { len = d->m_len - dn; - + } + /* encrypt */ // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary). if (IPSEC_IS_P2ALIGNED(sp)) { @@ -814,8 +832,8 @@ esp_gcm_encrypt_aes( memcpy(sp, sp_unaligned, len); } - if (aes_encrypt_gcm(sp, len, dptr+dn, ctx->encrypt)) { - ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__)); + if (aes_encrypt_gcm(sp, len, dptr + dn, ctx->encrypt)) { + ipseclog((LOG_ERR, "%s: failed to encrypt\n", __FUNCTION__)); m_freem(m); return EINVAL; } @@ -841,7 +859,7 @@ esp_gcm_encrypt_aes( m_freem(scut->m_next); scut->m_len = scutoff; scut->m_next = d0; - + // free memory if (sp_aligned != NULL) { FREE(sp_aligned, M_SECA); @@ -861,19 +879,19 @@ esp_gcm_decrypt_aes( { struct mbuf *s; struct mbuf *d, *d0, *dp; - int soff; /* offset from the head of chain, to head of this mbuf */ - int sn, dn; /* offset from the head of the mbuf, to meat */ + int soff; /* offset from the head of chain, to head of this mbuf */ + int sn, dn; /* offset from the head of the mbuf, to meat */ size_t ivoff, bodyoff; u_int8_t iv[ESP_GCM_IVLEN] __attribute__((aligned(4))), *dptr; u_int8_t *sp, *sp_unaligned, *sp_aligned = NULL; aes_gcm_ctx *ctx; struct mbuf *scut; int scutoff; - int i, len; - unsigned char nonce[ESP_GCM_SALT_LEN+ivlen]; + int i, len; + unsigned char nonce[ESP_GCM_SALT_LEN + ivlen]; if (ivlen != ESP_GCM_IVLEN) { - ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen)); + ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen)); m_freem(m); return EINVAL; } @@ -888,7 +906,7 @@ esp_gcm_decrypt_aes( } if (m->m_pkthdr.len < bodyoff) { - ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__, + ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__, m->m_pkthdr.len, (u_int32_t)bodyoff)); m_freem(m); return EINVAL; @@ -898,12 +916,12 @@ esp_gcm_decrypt_aes( m_copydata(m, ivoff, ivlen, (caddr_t) iv); /* Set IV */ - memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); - memcpy(nonce+ESP_GCM_SALT_LEN, iv, ivlen); + memcpy(nonce, _KEYBUF(sav->key_enc) + _KEYLEN(sav->key_enc) - ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); + memcpy(nonce + ESP_GCM_SALT_LEN, iv, ivlen); ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); if (aes_decrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->decrypt)) { - ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__)); + ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__)); m_freem(m); bzero(nonce, sizeof(nonce)); return EINVAL; @@ -912,10 +930,10 @@ esp_gcm_decrypt_aes( /* Set Additional Authentication Data */ if (!(sav->flags & SADB_X_EXT_OLD)) { - struct newesp esp; + struct newesp esp; m_copydata(m, off, sizeof(esp), (caddr_t) &esp); if (aes_decrypt_aad_gcm((unsigned char*)&esp, sizeof(esp), ctx->decrypt)) { - ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__)); + ipseclog((LOG_ERR, "%s: packet decryption AAD failure\n", __FUNCTION__)); return EINVAL; } } @@ -924,7 +942,7 @@ esp_gcm_decrypt_aes( soff = sn = dn = 0; d = d0 = dp = NULL; sp = dptr = NULL; - + /* skip header/IV offset */ while (soff < bodyoff) { if (soff + s->m_len > bodyoff) { @@ -939,18 +957,20 @@ esp_gcm_decrypt_aes( scutoff = sn; /* skip over empty mbuf */ - while (s && s->m_len == 0) + while (s && s->m_len == 0) { s = s->m_next; - + } + while (soff < m->m_pkthdr.len) { /* source */ - sp = mtod(s, u_int8_t *) + sn; + sp = mtod(s, u_int8_t *) + sn; len = s->m_len - sn; /* destination */ if (!d || (dn + len > d->m_len)) { - if (d) + if (d) { dp = d; + } MGET(d, M_DONTWAIT, MT_DATA); i = m->m_pkthdr.len - (soff + sn); if (d && i > MLEN) { @@ -965,14 +985,17 @@ esp_gcm_decrypt_aes( } if (!d) { m_freem(m); - if (d0) + if (d0) { m_freem(d0); + } return ENOBUFS; } - if (!d0) + if (!d0) { d0 = d; - if (dp) + } + if (dp) { dp->m_next = d; + } // try to make mbuf data aligned if (!IPSEC_IS_P2ALIGNED(d->m_data)) { @@ -981,16 +1004,18 @@ esp_gcm_decrypt_aes( d->m_len = M_TRAILINGSPACE(d); - if (d->m_len > i) + if (d->m_len > i) { d->m_len = i; + } - dptr = mtod(d, u_int8_t *); + dptr = mtod(d, u_int8_t *); dn = 0; } /* adjust len if greater than space available in dest */ - if (len > d->m_len - dn) + if (len > d->m_len - dn) { len = d->m_len - dn; + } /* Decrypt */ // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary). @@ -1025,11 +1050,11 @@ esp_gcm_decrypt_aes( // no need to check output pointer alignment if (aes_decrypt_gcm(sp, len, dptr + dn, ctx->decrypt)) { - ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__)); + ipseclog((LOG_ERR, "%s: failed to decrypt\n", __FUNCTION__)); m_freem(m); return EINVAL; } - + // update unaligned pointers if (!IPSEC_IS_P2ALIGNED(sp_unaligned)) { sp = sp_unaligned; @@ -1038,7 +1063,7 @@ esp_gcm_decrypt_aes( /* udpate offsets */ sn += len; dn += len; - + /* find the next source block */ while (s && sn >= s->m_len) { sn -= s->m_len; @@ -1057,7 +1082,7 @@ esp_gcm_decrypt_aes( FREE(sp_aligned, M_SECA); sp_aligned = NULL; } - + /* just in case */ bzero(iv, sizeof(iv)); diff --git a/bsd/netinet6/esp_rijndael.h b/bsd/netinet6/esp_rijndael.h index 75d92c6e8..f579fe66b 100644 --- a/bsd/netinet6/esp_rijndael.h +++ b/bsd/netinet6/esp_rijndael.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -62,11 +62,11 @@ #ifdef BSD_KERNEL_PRIVATE int esp_aes_schedlen(const struct esp_algorithm *); int esp_aes_schedule(const struct esp_algorithm *, struct secasvar *); -int esp_cbc_decrypt_aes(struct mbuf *, size_t, struct secasvar *, - const struct esp_algorithm *, int); +int esp_cbc_decrypt_aes(struct mbuf *, size_t, struct secasvar *, + const struct esp_algorithm *, int); int -esp_cbc_encrypt_aes(struct mbuf *, size_t, size_t, struct secasvar *, - const struct esp_algorithm *, int); + esp_cbc_encrypt_aes(struct mbuf *, size_t, size_t, struct secasvar *, + const struct esp_algorithm *, int); int esp_gcm_schedlen(const struct esp_algorithm *); int esp_gcm_schedule(const struct esp_algorithm *, struct secasvar *); diff --git a/bsd/netinet6/frag6.c b/bsd/netinet6/frag6.c index 5bdb1adf3..1b5925265 100644 --- a/bsd/netinet6/frag6.c +++ b/bsd/netinet6/frag6.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -107,7 +107,7 @@ static void frag6_insque(struct ip6q *, struct ip6q *); static void frag6_remque(struct ip6q *); static void frag6_freef(struct ip6q *, struct fq6_head *, struct fq6_head *); -static int frag6_timeout_run; /* frag6 timer is scheduled to run */ +static int frag6_timeout_run; /* frag6 timer is scheduled to run */ static void frag6_timeout(void *); static void frag6_sched_timeout(void); @@ -118,20 +118,20 @@ static struct ip6asfrag *ip6af_alloc(int); static void ip6af_free(struct ip6asfrag *); decl_lck_mtx_data(static, ip6qlock); -static lck_attr_t *ip6qlock_attr; -static lck_grp_t *ip6qlock_grp; -static lck_grp_attr_t *ip6qlock_grp_attr; +static lck_attr_t *ip6qlock_attr; +static lck_grp_t *ip6qlock_grp; +static lck_grp_attr_t *ip6qlock_grp_attr; /* IPv6 fragment reassembly queues (protected by ip6qlock) */ -static struct ip6q ip6q; /* ip6 reassembly queues */ -static int ip6_maxfragpackets; /* max packets in reass queues */ -static u_int32_t frag6_nfragpackets; /* # of packets in reass queues */ -static int ip6_maxfrags; /* max fragments in reass queues */ -static u_int32_t frag6_nfrags; /* # of fragments in reass queues */ -static u_int32_t ip6q_limit; /* ip6q allocation limit */ -static u_int32_t ip6q_count; /* current # of allocated ip6q's */ -static u_int32_t ip6af_limit; /* ip6asfrag allocation limit */ -static u_int32_t ip6af_count; /* current # of allocated ip6asfrag's */ +static struct ip6q ip6q; /* ip6 reassembly queues */ +static int ip6_maxfragpackets; /* max packets in reass queues */ +static u_int32_t frag6_nfragpackets; /* # of packets in reass queues */ +static int ip6_maxfrags; /* max fragments in reass queues */ +static u_int32_t frag6_nfrags; /* # of fragments in reass queues */ +static u_int32_t ip6q_limit; /* ip6q allocation limit */ +static u_int32_t ip6q_count; /* current # of allocated ip6q's */ +static u_int32_t ip6af_limit; /* ip6asfrag allocation limit */ +static u_int32_t ip6af_count; /* current # of allocated ip6asfrag's */ static int sysctl_maxfragpackets SYSCTL_HANDLER_ARGS; static int sysctl_maxfrags SYSCTL_HANDLER_ARGS; @@ -158,9 +158,9 @@ void frag6_init(void) { /* ip6q_alloc() uses mbufs for IPv6 fragment queue structures */ - _CASSERT(sizeof (struct ip6q) <= _MLEN); + _CASSERT(sizeof(struct ip6q) <= _MLEN); /* ip6af_alloc() uses mbufs for IPv6 fragment queue structures */ - _CASSERT(sizeof (struct ip6asfrag) <= _MLEN); + _CASSERT(sizeof(struct ip6asfrag) <= _MLEN); /* IPv6 fragment reassembly queue lock */ ip6qlock_grp_attr = lck_grp_attr_alloc_init(); @@ -194,7 +194,7 @@ frag6_scrub_context(struct mbuf *m) static int frag6_restore_context(struct mbuf *m) { - return ((int)m->m_pkthdr.pkt_hdr); + return (int)m->m_pkthdr.pkt_hdr; } /* @@ -284,7 +284,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) struct ip6asfrag *af6, *ip6af, *af6dwn; int offset = *offp, nxt, i, next; int first_frag = 0; - int fragoff, frgpartlen; /* must be larger than u_int16_t */ + int fragoff, frgpartlen; /* must be larger than u_int16_t */ struct ifnet *dstifp = NULL; u_int8_t ecn, ecn0; uint32_t csum, csum_flags; @@ -293,7 +293,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) VERIFY(m->m_flags & M_PKTHDR); - MBUFQ_INIT(&diq6); /* for deferred ICMP param problem errors */ + MBUFQ_INIT(&diq6); /* for deferred ICMP param problem errors */ /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); @@ -318,8 +318,9 @@ frag6_input(struct mbuf **mp, int *offp, int proto) #endif /* IN6_IFSTAT_STRICT */ /* we are violating the spec, this may not be the dst interface */ - if (dstifp == NULL) + if (dstifp == NULL) { dstifp = m->m_pkthdr.rcvif; + } /* jumbo payload can't contain a fragment header */ if (ip6->ip6_plen == 0) { @@ -380,7 +381,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) ip6stat.ip6s_atmfrag_rcvd++; in6_ifstat_inc(dstifp, ifs6_atmfrag_rcvd); *offp = offset; - return (ip6f->ip6f_nxt); + return ip6f->ip6f_nxt; } /* @@ -392,12 +393,12 @@ frag6_input(struct mbuf **mp, int *offp, int proto) * care of any trailing bytes and subtract out their partial sum. */ if (ip6f->ip6f_nxt == IPPROTO_UDP && - offset == (sizeof (*ip6) + sizeof (*ip6f)) && + offset == (sizeof(*ip6) + sizeof(*ip6f)) && (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PARTIAL)) { uint32_t start = m->m_pkthdr.csum_rx_start; - uint32_t ip_len = (sizeof (*ip6) + ntohs(ip6->ip6_plen)); + uint32_t ip_len = (sizeof(*ip6) + ntohs(ip6->ip6_plen)); int32_t trailer = (m_pktlen(m) - ip_len); uint32_t swbytes = (uint32_t)trailer; @@ -409,7 +410,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { s = ip6->ip6_src.s6_addr16[1]; - ip6->ip6_src.s6_addr16[1] = 0 ; + ip6->ip6_src.s6_addr16[1] = 0; } if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { d = ip6->ip6_dst.s6_addr16[1]; @@ -419,23 +420,27 @@ frag6_input(struct mbuf **mp, int *offp, int proto) /* callee folds in sum */ csum = m_adj_sum16(m, start, offset, (ip_len - offset), csum); - if (offset > start) + if (offset > start) { swbytes += (offset - start); - else + } else { swbytes += (start - offset); + } - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = s; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = d; - + } } csum_flags = m->m_pkthdr.csum_flags; - if (swbytes != 0) + if (swbytes != 0) { udp_in6_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } else { csum = 0; csum_flags = 0; @@ -450,11 +455,13 @@ frag6_input(struct mbuf **mp, int *offp, int proto) lck_mtx_lock(&ip6qlock); locked = 1; - for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) + for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) { if (ip6f->ip6f_ident == q6->ip6q_ident && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && - IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) + IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) { break; + } + } if (q6 == &ip6q) { /* @@ -463,24 +470,25 @@ frag6_input(struct mbuf **mp, int *offp, int proto) first_frag = 1; q6 = ip6q_alloc(M_DONTWAIT); - if (q6 == NULL) + if (q6 == NULL) { goto dropfrag; + } frag6_insque(q6, &ip6q); frag6_nfragpackets++; /* ip6q_nxt will be filled afterwards, from 1st fragment */ - q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; + q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; #ifdef notyet - q6->ip6q_nxtp = (u_char *)nxtp; + q6->ip6q_nxtp = (u_char *)nxtp; #endif - q6->ip6q_ident = ip6f->ip6f_ident; - q6->ip6q_ttl = IPV6_FRAGTTL; - q6->ip6q_src = ip6->ip6_src; - q6->ip6q_dst = ip6->ip6_dst; - q6->ip6q_ecn = + q6->ip6q_ident = ip6f->ip6f_ident; + q6->ip6q_ttl = IPV6_FRAGTTL; + q6->ip6q_src = ip6->ip6_src; + q6->ip6q_dst = ip6->ip6_dst; + q6->ip6q_ecn = (ntohl(ip6->ip6_flow) >> 20) & IPTOS_ECN_MASK; - q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ + q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ q6->ip6q_nfrag = 0; @@ -537,7 +545,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) */ if (fragoff == 0) { for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; - af6 = af6dwn) { + af6 = af6dwn) { af6dwn = af6->ip6af_down; if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > @@ -561,7 +569,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) ip6err->ip6_dst = q6->ip6q_dst; frag6_save_context(merr, - erroff - sizeof (struct ip6_frag) + + erroff - sizeof(struct ip6_frag) + offsetof(struct ip6_frag, ip6f_offlg)); MBUFQ_ENQUEUE(&diq6, merr); @@ -570,8 +578,9 @@ frag6_input(struct mbuf **mp, int *offp, int proto) } ip6af = ip6af_alloc(M_DONTWAIT); - if (ip6af == NULL) + if (ip6af == NULL) { goto dropfrag; + } ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; ip6af->ip6af_off = fragoff; @@ -596,8 +605,9 @@ frag6_input(struct mbuf **mp, int *offp, int proto) ip6af_free(ip6af); goto dropfrag; } - if (ecn0 != IPTOS_ECN_CE) + if (ecn0 != IPTOS_ECN_CE) { q6->ip6q_ecn = IPTOS_ECN_CE; + } } if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) { ip6af_free(ip6af); @@ -608,9 +618,11 @@ frag6_input(struct mbuf **mp, int *offp, int proto) * Find a segment which begins after this one does. */ for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; - af6 = af6->ip6af_down) - if (af6->ip6af_off > ip6af->ip6af_off) + af6 = af6->ip6af_down) { + if (af6->ip6af_off > ip6af->ip6af_off) { break; + } + } #if 0 /* @@ -623,10 +635,11 @@ frag6_input(struct mbuf **mp, int *offp, int proto) */ if (af6->ip6af_up != (struct ip6asfrag *)q6) { i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen - - ip6af->ip6af_off; + - ip6af->ip6af_off; if (i > 0) { - if (i >= ip6af->ip6af_frglen) + if (i >= ip6af->ip6af_frglen) { goto dropfrag; + } m_adj(IP6_REASS_MBUF(ip6af), i); q6->ip6q_csum_flags = 0; ip6af->ip6af_off += i; @@ -639,7 +652,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) * if they are completely covered, dequeue them. */ while (af6 != (struct ip6asfrag *)q6 && - ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { + ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; if (i < af6->ip6af_frglen) { af6->ip6af_frglen -= i; @@ -665,9 +678,9 @@ frag6_input(struct mbuf **mp, int *offp, int proto) */ if (af6->ip6af_up != (struct ip6asfrag *)q6) { i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen - - ip6af->ip6af_off; + - ip6af->ip6af_off; if (i > 0) { -#if 0 /* suppress the noisy log */ +#if 0 /* suppress the noisy log */ log(LOG_ERR, "%d bytes of a fragment from %s " "overlaps the previous fragment\n", i, ip6_sprintf(&q6->ip6q_src)); @@ -679,7 +692,7 @@ frag6_input(struct mbuf **mp, int *offp, int proto) if (af6 != (struct ip6asfrag *)q6) { i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; if (i > 0) { -#if 0 /* suppress the noisy log */ +#if 0 /* suppress the noisy log */ log(LOG_ERR, "%d bytes of a fragment from %s " "overlaps the succeeding fragment", i, ip6_sprintf(&q6->ip6q_src)); @@ -695,10 +708,11 @@ frag6_input(struct mbuf **mp, int *offp, int proto) * as that of the existing ones, accumulate checksum. Otherwise, * invalidate checksum offload info for the entire datagram. */ - if (csum_flags != 0 && csum_flags == q6->ip6q_csum_flags) + if (csum_flags != 0 && csum_flags == q6->ip6q_csum_flags) { q6->ip6q_csum += csum; - else if (q6->ip6q_csum_flags != 0) + } else if (q6->ip6q_csum_flags != 0) { q6->ip6q_csum_flags = 0; + } insert: @@ -719,7 +733,7 @@ insert: #endif next = 0; for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; - af6 = af6->ip6af_down) { + af6 = af6->ip6af_down) { if (af6->ip6af_off != next) { lck_mtx_unlock(&ip6qlock); locked = 0; @@ -745,8 +759,9 @@ insert: while (af6 != (struct ip6asfrag *)q6) { af6dwn = af6->ip6af_down; frag6_deq(af6); - while (t->m_next) + while (t->m_next) { t = t->m_next; + } t->m_next = IP6_REASS_MBUF(af6); m_adj(t->m_next, af6->ip6af_offset); ip6af_free(af6); @@ -764,7 +779,7 @@ insert: ADDCARRY(csum); m->m_pkthdr.csum_rx_val = csum; - m->m_pkthdr.csum_rx_start = sizeof (struct ip6_hdr); + m->m_pkthdr.csum_rx_start = sizeof(struct ip6_hdr); m->m_pkthdr.csum_flags = q6->ip6q_csum_flags; } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) || (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { @@ -781,8 +796,9 @@ insert: ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); ip6->ip6_src = q6->ip6q_src; ip6->ip6_dst = q6->ip6q_dst; - if (q6->ip6q_ecn == IPTOS_ECN_CE) + if (q6->ip6q_ecn == IPTOS_ECN_CE) { ip6->ip6_flow |= htonl(IPTOS_ECN_CE << 20); + } nxt = q6->ip6q_nxt; #ifdef notyet @@ -822,7 +838,7 @@ insert: frag6_nfrags -= q6->ip6q_nfrag; ip6q_free(q6); - if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ + if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ m_fixhdr(m); /* * Mark packet as reassembled @@ -848,7 +864,7 @@ insert: in6_ifstat_inc(dstifp, ifs6_reass_ok); frag6_icmp6_paramprob_error(&diq6); VERIFY(MBUFQ_EMPTY(&diq6)); - return (nxt); + return nxt; done: VERIFY(m == NULL); @@ -856,7 +872,7 @@ done: if (frag6_nfragpackets == 0) { frag6_icmp6_paramprob_error(&diq6); VERIFY(MBUFQ_EMPTY(&diq6)); - return (IPPROTO_DONE); + return IPPROTO_DONE; } lck_mtx_lock(&ip6qlock); } @@ -865,7 +881,7 @@ done: lck_mtx_unlock(&ip6qlock); frag6_icmp6_paramprob_error(&diq6); VERIFY(MBUFQ_EMPTY(&diq6)); - return (IPPROTO_DONE); + return IPPROTO_DONE; dropfrag: ip6stat.ip6s_fragdropped++; @@ -876,7 +892,7 @@ dropfrag: m_freem(m); frag6_icmp6_paramprob_error(&diq6); VERIFY(MBUFQ_EMPTY(&diq6)); - return (IPPROTO_DONE); + return IPPROTO_DONE; } /* @@ -891,7 +907,7 @@ frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) LCK_MTX_ASSERT(&ip6qlock, LCK_MTX_ASSERT_OWNED); for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; - af6 = down6) { + af6 = down6) { struct mbuf *m = IP6_REASS_MBUF(af6); down6 = af6->ip6af_down; @@ -916,7 +932,6 @@ frag6_freef(struct ip6q *q6, struct fq6_head *dfq6, struct fq6_head *diq6) MBUFQ_ENQUEUE(dfq6, m); } ip6af_free(af6); - } frag6_remque(q6); frag6_nfragpackets--; @@ -958,7 +973,7 @@ frag6_insque(struct ip6q *new, struct ip6q *old) new->ip6q_prev = old; new->ip6q_next = old->ip6q_next; - old->ip6q_next->ip6q_prev= new; + old->ip6q_next->ip6q_prev = new; old->ip6q_next = new; } @@ -983,8 +998,8 @@ frag6_timeout(void *arg) struct fq6_head dfq6, diq6; struct ip6q *q6; - MBUFQ_INIT(&dfq6); /* for deferred frees */ - MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ + MBUFQ_INIT(&dfq6); /* for deferred frees */ + MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ /* * Update coarse-grained networking timestamp (in sec.); the idea @@ -995,7 +1010,7 @@ frag6_timeout(void *arg) lck_mtx_lock(&ip6qlock); q6 = ip6q.ip6q_next; - if (q6) + if (q6) { while (q6 != &ip6q) { --q6->ip6q_ttl; q6 = q6->ip6q_next; @@ -1005,6 +1020,7 @@ frag6_timeout(void *arg) frag6_freef(q6->ip6q_prev, &dfq6, &diq6); } } + } /* * If we are over the maximum number of fragments * (due to the limit being lowered), drain off @@ -1024,8 +1040,9 @@ frag6_timeout(void *arg) lck_mtx_unlock(&ip6qlock); /* free fragments that need to be freed */ - if (!MBUFQ_EMPTY(&dfq6)) + if (!MBUFQ_EMPTY(&dfq6)) { MBUFQ_DRAIN(&dfq6); + } frag6_icmp6_timeex_error(&diq6); @@ -1052,8 +1069,8 @@ frag6_drain(void) { struct fq6_head dfq6, diq6; - MBUFQ_INIT(&dfq6); /* for deferred frees */ - MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ + MBUFQ_INIT(&dfq6); /* for deferred frees */ + MBUFQ_INIT(&diq6); /* for deferred ICMP time exceeded errors */ lck_mtx_lock(&ip6qlock); while (ip6q.ip6q_next != &ip6q) { @@ -1064,8 +1081,9 @@ frag6_drain(void) lck_mtx_unlock(&ip6qlock); /* free fragments that need to be freed */ - if (!MBUFQ_EMPTY(&dfq6)) + if (!MBUFQ_EMPTY(&dfq6)) { MBUFQ_DRAIN(&dfq6); + } frag6_icmp6_timeex_error(&diq6); @@ -1084,18 +1102,19 @@ ip6q_alloc(int how) * from frag6_nfragpackets since the latter represents the elements * already in the reassembly queues. */ - if (ip6q_limit > 0 && ip6q_count > ip6q_limit) - return (NULL); + if (ip6q_limit > 0 && ip6q_count > ip6q_limit) { + return NULL; + } t = m_get(how, MT_FTABLE); if (t != NULL) { atomic_add_32(&ip6q_count, 1); q6 = mtod(t, struct ip6q *); - bzero(q6, sizeof (*q6)); + bzero(q6, sizeof(*q6)); } else { q6 = NULL; } - return (q6); + return q6; } static void @@ -1116,18 +1135,19 @@ ip6af_alloc(int how) * from frag6_nfrags since the latter represents the elements * already in the reassembly queues. */ - if (ip6af_limit > 0 && ip6af_count > ip6af_limit) - return (NULL); + if (ip6af_limit > 0 && ip6af_count > ip6af_limit) { + return NULL; + } t = m_get(how, MT_FTABLE); if (t != NULL) { atomic_add_32(&ip6af_count, 1); af6 = mtod(t, struct ip6asfrag *); - bzero(af6, sizeof (*af6)); + bzero(af6, sizeof(*af6)); } else { af6 = NULL; } - return (af6); + return af6; } static void @@ -1144,26 +1164,32 @@ ip6q_updateparams(void) /* * -1 for unlimited allocation. */ - if (ip6_maxfragpackets < 0) + if (ip6_maxfragpackets < 0) { ip6q_limit = 0; - if (ip6_maxfrags < 0) + } + if (ip6_maxfrags < 0) { ip6af_limit = 0; + } /* * Positive number for specific bound. */ - if (ip6_maxfragpackets > 0) + if (ip6_maxfragpackets > 0) { ip6q_limit = ip6_maxfragpackets; - if (ip6_maxfrags > 0) + } + if (ip6_maxfrags > 0) { ip6af_limit = ip6_maxfrags; + } /* * Zero specifies no further fragment queue allocation -- set the * bound very low, but rely on implementation elsewhere to actually * prevent allocation and reclaim current queues. */ - if (ip6_maxfragpackets == 0) + if (ip6_maxfragpackets == 0) { ip6q_limit = 1; - if (ip6_maxfrags == 0) + } + if (ip6_maxfrags == 0) { ip6af_limit = 1; + } /* * Arm the purge timer if not already and if there's work to do */ @@ -1179,8 +1205,9 @@ sysctl_maxfragpackets SYSCTL_HANDLER_ARGS lck_mtx_lock(&ip6qlock); i = ip6_maxfragpackets; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < -1 || i > (nmbclusters / 4)) { error = EINVAL; @@ -1190,7 +1217,7 @@ sysctl_maxfragpackets SYSCTL_HANDLER_ARGS ip6q_updateparams(); done: lck_mtx_unlock(&ip6qlock); - return (error); + return error; } static int @@ -1202,16 +1229,17 @@ sysctl_maxfrags SYSCTL_HANDLER_ARGS lck_mtx_lock(&ip6qlock); i = ip6_maxfrags; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < -1 || i > (nmbclusters / 4)) { error = EINVAL; goto done; } - ip6_maxfrags= i; - ip6q_updateparams(); /* see if we need to arm timer */ + ip6_maxfrags = i; + ip6q_updateparams(); /* see if we need to arm timer */ done: lck_mtx_unlock(&ip6qlock); - return (error); + return error; } diff --git a/bsd/netinet6/icmp6.c b/bsd/netinet6/icmp6.c index 58917f864..f1d66606e 100644 --- a/bsd/netinet6/icmp6.c +++ b/bsd/netinet6/icmp6.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -160,14 +160,14 @@ static void icmp6_errcount(struct icmp6errstat *, int, int); static int icmp6_rip6_input(struct mbuf **, int); static int icmp6_ratelimit(const struct in6_addr *, const int, const int); static const char *icmp6_redirect_diag(struct in6_addr *, - struct in6_addr *, struct in6_addr *); + struct in6_addr *, struct in6_addr *); static struct mbuf *ni6_input(struct mbuf *, int); static struct mbuf *ni6_nametodns(const char *, int, int); static int ni6_dnsmatch(const char *, int, const char *, int); static int ni6_addrs(struct icmp6_nodeinfo *, - struct ifnet **, char *); + struct ifnet **, char *); static int ni6_store_addrs(struct icmp6_nodeinfo *, struct icmp6_nodeinfo *, - struct ifnet *, int); + struct ifnet *, int); static int icmp6_notify_error(struct mbuf *, int, int, int); @@ -180,7 +180,7 @@ icmp6_init(struct ip6protosw *pp, struct domain *dp) /* Also called from ip6_init() without pp */ VERIFY(pp == NULL || - (pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + (pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); /* This gets called by more than one protocols, so initialize once */ if (!icmp6_initialized) { @@ -255,25 +255,29 @@ icmp6_error2(struct mbuf *m, int type, int code, int param, { struct ip6_hdr *ip6; - if (ifp == NULL) + if (ifp == NULL) { return; + } #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr),return ); + IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), return ); #else if (m->m_len < sizeof(struct ip6_hdr)) { m = m_pullup(m, sizeof(struct ip6_hdr)); - if (m == NULL) + if (m == NULL) { return; + } } #endif ip6 = mtod(m, struct ip6_hdr *); - if (in6_setscope(&ip6->ip6_src, ifp, NULL) != 0) + if (in6_setscope(&ip6->ip6_src, ifp, NULL) != 0) { return; - if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0) + } + if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0) { return; + } icmp6_error(m, type, code, param); } @@ -282,11 +286,13 @@ icmp6_error2(struct mbuf *m, int type, int code, int param, * Generate an error packet of type error in response to bad IP6 packet. */ void -icmp6_error(struct mbuf *m, int type, int code, int param) { +icmp6_error(struct mbuf *m, int type, int code, int param) +{ icmp6_error_flag(m, type, code, param, ICMP6_ERROR_RST_MRCVIF); } -void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) +void +icmp6_error_flag(struct mbuf *m, int type, int code, int param, int flags) { struct ip6_hdr *oip6, *nip6; struct icmp6_hdr *icmp6; @@ -299,7 +305,7 @@ void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) /* count per-type-code statistics */ icmp6_errcount(&icmp6stat.icp6s_outerrhist, type, code); -#ifdef M_DECRYPTED /*not openbsd*/ +#ifdef M_DECRYPTED /*not openbsd*/ if (m->m_flags & M_DECRYPTED) { icmp6stat.icp6s_canterror++; goto freeit; @@ -307,12 +313,13 @@ void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) #endif #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), return); + IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), return ); #else if (m->m_len < sizeof(struct ip6_hdr)) { m = m_pullup(m, sizeof(struct ip6_hdr)); - if (m == NULL) + if (m == NULL) { return; + } } #endif oip6 = mtod(m, struct ip6_hdr *); @@ -328,20 +335,22 @@ void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) * in the option type field. This check has been done in * ip6_unknown_opt(), so we can just check the type and code. */ - if ((m->m_flags & (M_BCAST|M_MCAST) || - IN6_IS_ADDR_MULTICAST(&oip6->ip6_dst)) && + if ((m->m_flags & (M_BCAST | M_MCAST) || + IN6_IS_ADDR_MULTICAST(&oip6->ip6_dst)) && (type != ICMP6_PACKET_TOO_BIG && - (type != ICMP6_PARAM_PROB || - code != ICMP6_PARAMPROB_OPTION))) + (type != ICMP6_PARAM_PROB || + code != ICMP6_PARAMPROB_OPTION))) { goto freeit; + } /* * RFC 2463, 2.4 (e.5): source address check. * XXX: the case of anycast source? */ if (IN6_IS_ADDR_UNSPECIFIED(&oip6->ip6_src) || - IN6_IS_ADDR_MULTICAST(&oip6->ip6_src)) + IN6_IS_ADDR_MULTICAST(&oip6->ip6_src)) { goto freeit; + } /* * If we are about to send ICMPv6 against ICMPv6 error/redirect, @@ -353,11 +362,11 @@ void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) struct icmp6_hdr *icp; #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, 0, off + sizeof(struct icmp6_hdr), return); + IP6_EXTHDR_CHECK(m, 0, off + sizeof(struct icmp6_hdr), return ); icp = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); #else IP6_EXTHDR_GET(icp, struct icmp6_hdr *, m, off, - sizeof(*icp)); + sizeof(*icp)); if (icp == NULL) { icmp6stat.icp6s_tooshort++; return; @@ -391,13 +400,15 @@ void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) * OK, ICMP6 can be generated. */ - if (m->m_pkthdr.len >= ICMPV6_PLD_MAXLEN) + if (m->m_pkthdr.len >= ICMPV6_PLD_MAXLEN) { m_adj(m, ICMPV6_PLD_MAXLEN - m->m_pkthdr.len); + } preplen = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); M_PREPEND(m, preplen, M_DONTWAIT, 1); - if (m && m->m_len < preplen) + if (m && m->m_len < preplen) { m = m_pullup(m, preplen); + } if (m == NULL) { nd6log((LOG_DEBUG, "ENOBUFS in icmp6_error %d\n", __LINE__)); return; @@ -431,7 +442,7 @@ void icmp6_error_flag (struct mbuf *m, int type, int code, int param, int flags) return; - freeit: +freeit: /* * If we can't tell whether or not we can generate ICMP6, free it. */ @@ -514,7 +525,7 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) * Note: SSM filters are not applied for ICMPv6 traffic. */ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { - struct in6_multi *inm; + struct in6_multi *inm; in6_multihead_lock_shared(); IN6_LOOKUP_MULTI(&ip6->ip6_dst, ifp, inm); @@ -566,15 +577,16 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) icmp6stat.icp6s_inhist[icmp6->icmp6_type]++; icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_msg); - if (icmp6->icmp6_type < ICMP6_INFOMSG_MASK) + if (icmp6->icmp6_type < ICMP6_INFOMSG_MASK) { icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_error); + } switch (icmp6->icmp6_type) { case ICMP6_DST_UNREACH: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_dstunreach); switch (code) { case ICMP6_DST_UNREACH_NOROUTE: - case ICMP6_DST_UNREACH_ADDR: /* PRC_HOSTDEAD is a DOS */ + case ICMP6_DST_UNREACH_ADDR: /* PRC_HOSTDEAD is a DOS */ code = PRC_UNREACH_NET; break; case ICMP6_DST_UNREACH_ADMIN: @@ -635,15 +647,16 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ICMP6_ECHO_REQUEST: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_echo); - if (code != 0) + if (code != 0) { goto badcode; + } if ((n = m_copy(m, 0, M_COPYALL)) == NULL) { /* Give up remote */ goto rate_limit_checked; } if ((n->m_flags & M_EXT) != 0 - || n->m_len < off + sizeof(struct icmp6_hdr)) { + || n->m_len < off + sizeof(struct icmp6_hdr)) { struct mbuf *n0 = n; const int maxlen = sizeof(*nip6) + sizeof(*nicmp6); @@ -656,7 +669,7 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) m_freem(n0); goto rate_limit_checked; } - MGETHDR(n, M_DONTWAIT, n0->m_type); /* MAC-OK */ + MGETHDR(n, M_DONTWAIT, n0->m_type); /* MAC-OK */ if (n && maxlen >= MHLEN) { MCLGET(n, M_DONTWAIT); if ((n->m_flags & M_EXT) == 0) { @@ -679,7 +692,7 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) bcopy(icmp6, nicmp6, sizeof(struct icmp6_hdr)); noff = sizeof(struct ip6_hdr); n->m_pkthdr.len = n->m_len = - noff + sizeof(struct icmp6_hdr); + noff + sizeof(struct icmp6_hdr); /* * Adjust mbuf. ip6_plen will be adjusted in * ip6_output(). @@ -691,12 +704,12 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) } else { nip6 = mtod(n, struct ip6_hdr *); IP6_EXTHDR_GET(nicmp6, struct icmp6_hdr *, n, off, - sizeof(*nicmp6)); + sizeof(*nicmp6)); noff = off; } - if(nicmp6 == NULL) + if (nicmp6 == NULL) { panic("nicmp6 is NULL in %s, which isn't good!\n", __FUNCTION__); - else { + } else { nicmp6->icmp6_type = ICMP6_ECHO_REPLY; nicmp6->icmp6_code = 0; } @@ -709,62 +722,72 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ICMP6_ECHO_REPLY: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_echoreply); - if (code != 0) + if (code != 0) { goto badcode; + } break; case MLD_LISTENER_QUERY: case MLD_LISTENER_REPORT: - if (icmp6len < sizeof(struct mld_hdr)) + if (icmp6len < sizeof(struct mld_hdr)) { goto badlen; - if (icmp6->icmp6_type == MLD_LISTENER_QUERY) /* XXX: ugly... */ + } + if (icmp6->icmp6_type == MLD_LISTENER_QUERY) { /* XXX: ugly... */ icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mldquery); - else + } else { icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mldreport); + } if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { /* give up local */ - if (mld_input(m, off, icmp6len) == IPPROTO_DONE) + if (mld_input(m, off, icmp6len) == IPPROTO_DONE) { m = NULL; + } goto freeit; } - if (mld_input(n, off, icmp6len) != IPPROTO_DONE) + if (mld_input(n, off, icmp6len) != IPPROTO_DONE) { m_freem(n); + } /* m stays. */ goto rate_limit_checked; case MLD_LISTENER_DONE: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_mlddone); - if (icmp6len < sizeof(struct mld_hdr)) /* necessary? */ + if (icmp6len < sizeof(struct mld_hdr)) { /* necessary? */ goto badlen; - break; /* nothing to be done in kernel */ + } + break; /* nothing to be done in kernel */ case MLD_MTRACE_RESP: case MLD_MTRACE: /* XXX: these two are experimental. not officially defined. */ /* XXX: per-interface statistics? */ - break; /* just pass it to applications */ + break; /* just pass it to applications */ case ICMP6_NI_QUERY: - if (!icmp6_nodeinfo) + if (!icmp6_nodeinfo) { break; + } //### LD 10/20 Check fbsd differences here. Not sure we're more advanced or not. /* By RFC 4620 refuse to answer queries from global scope addresses */ - if ((icmp6_nodeinfo & 8) != 8 && in6_addrscope(&ip6->ip6_src) == IPV6_ADDR_SCOPE_GLOBAL) + if ((icmp6_nodeinfo & 8) != 8 && in6_addrscope(&ip6->ip6_src) == IPV6_ADDR_SCOPE_GLOBAL) { break; + } - if (icmp6len < sizeof(struct icmp6_nodeinfo)) + if (icmp6len < sizeof(struct icmp6_nodeinfo)) { goto badlen; + } #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, sizeof(struct icmp6_nodeinfo), - return IPPROTO_DONE); + return IPPROTO_DONE); #endif n = m_copy(m, 0, M_COPYALL); - if (n) + if (n) { n = ni6_input(n, off); + } if (n) { noff = sizeof(struct ip6_hdr); icmp6stat.icp6s_reflect++; @@ -774,16 +797,19 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) goto rate_limit_checked; case ICMP6_WRUREPLY: - if (code != 0) + if (code != 0) { goto badcode; + } break; case ND_ROUTER_SOLICIT: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_routersolicit); - if (code != 0) + if (code != 0) { goto badcode; - if (icmp6len < sizeof(struct nd_router_solicit)) + } + if (icmp6len < sizeof(struct nd_router_solicit)) { goto badlen; + } if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { /* give up local */ @@ -797,10 +823,12 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ND_ROUTER_ADVERT: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_routeradvert); - if (code != 0) + if (code != 0) { goto badcode; - if (icmp6len < sizeof(struct nd_router_advert)) + } + if (icmp6len < sizeof(struct nd_router_advert)) { goto badlen; + } if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { /* give up local */ @@ -814,10 +842,12 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ND_NEIGHBOR_SOLICIT: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_neighborsolicit); - if (code != 0) + if (code != 0) { goto badcode; - if (icmp6len < sizeof(struct nd_neighbor_solicit)) + } + if (icmp6len < sizeof(struct nd_neighbor_solicit)) { goto badlen; + } if (proxy || ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL)) { @@ -832,10 +862,12 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ND_NEIGHBOR_ADVERT: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_neighboradvert); - if (code != 0) + if (code != 0) { goto badcode; - if (icmp6len < sizeof(struct nd_neighbor_advert)) + } + if (icmp6len < sizeof(struct nd_neighbor_advert)) { goto badlen; + } if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { /* give up local */ @@ -849,10 +881,12 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ND_REDIRECT: icmp6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_redirect); - if (code != 0) + if (code != 0) { goto badcode; - if (icmp6len < sizeof(struct nd_redirect)) + } + if (icmp6len < sizeof(struct nd_redirect)) { goto badlen; + } if ((n = m_copym(m, 0, M_COPYALL, M_DONTWAIT)) == NULL) { /* give up local */ @@ -866,10 +900,12 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) case ICMP6_ROUTER_RENUMBERING: if (code != ICMP6_ROUTER_RENUMBERING_COMMAND && - code != ICMP6_ROUTER_RENUMBERING_RESULT) + code != ICMP6_ROUTER_RENUMBERING_RESULT) { goto badcode; - if (icmp6len < sizeof(struct icmp6_router_renum)) + } + if (icmp6len < sizeof(struct icmp6_router_renum)) { goto badlen; + } break; default: @@ -886,18 +922,18 @@ icmp6_input(struct mbuf **mp, int *offp, int proto) /* ICMPv6 informational: MUST not deliver */ goto rate_limit_checked; } - deliver: +deliver: if (icmp6_notify_error(m, off, icmp6len, code)) { /* In this case, m should've been freed. */ - return(IPPROTO_DONE); + return IPPROTO_DONE; } break; - badcode: +badcode: icmp6stat.icp6s_badcode++; break; - badlen: +badlen: icmp6stat.icp6s_badlen++; break; } @@ -925,15 +961,15 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) } #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, off, - sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr), - return -1); + sizeof(struct icmp6_hdr) + sizeof(struct ip6_hdr), + return -1); icmp6 = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); #else IP6_EXTHDR_GET(icmp6, struct icmp6_hdr *, m, off, - sizeof(*icmp6) + sizeof(struct ip6_hdr)); + sizeof(*icmp6) + sizeof(struct ip6_hdr)); if (icmp6 == NULL) { icmp6stat.icp6s_tooshort++; - return(-1); + return -1; } #endif eip6 = (struct ip6_hdr *)(icmp6 + 1); @@ -943,7 +979,7 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) void (*ctlfunc)(int, struct sockaddr *, void *, struct ifnet *); u_int8_t nxt = eip6->ip6_nxt; int eoff = off + sizeof(struct icmp6_hdr) + - sizeof(struct ip6_hdr); + sizeof(struct ip6_hdr); struct ip6ctlparam ip6cp; struct in6_addr *finaldst = NULL; int icmp6type = icmp6->icmp6_type; @@ -960,24 +996,24 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) case IPPROTO_DSTOPTS: case IPPROTO_AH: #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, 0, eoff + - sizeof(struct ip6_ext), - return -1); + IP6_EXTHDR_CHECK(m, 0, + eoff + sizeof(struct ip6_ext), return -1); eh = (struct ip6_ext *)(mtod(m, caddr_t) - + eoff); + + eoff); #else IP6_EXTHDR_GET(eh, struct ip6_ext *, m, - eoff, sizeof(*eh)); + eoff, sizeof(*eh)); if (eh == NULL) { icmp6stat.icp6s_tooshort++; - return(-1); + return -1; } #endif - if (nxt == IPPROTO_AH) + if (nxt == IPPROTO_AH) { eoff += (eh->ip6e_len + 2) << 2; - else + } else { eoff += (eh->ip6e_len + 1) << 3; + } nxt = eh->ip6e_nxt; break; case IPPROTO_ROUTING: @@ -991,15 +1027,15 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) */ #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, 0, eoff + sizeof(*rth), - return -1); - rth = (struct ip6_rthdr *)(mtod(m, caddr_t) - + eoff); + return -1); + rth = (struct ip6_rthdr *) + (mtod(m, caddr_t) + eoff); #else IP6_EXTHDR_GET(rth, struct ip6_rthdr *, m, - eoff, sizeof(*rth)); + eoff, sizeof(*rth)); if (rth == NULL) { icmp6stat.icp6s_tooshort++; - return(-1); + return -1; } #endif rthlen = (rth->ip6r_len + 1) << 3; @@ -1017,21 +1053,22 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, 0, eoff + rthlen, - return -1); + return -1); rth0 = (struct ip6_rthdr0 *)(mtod(m, caddr_t) + eoff); #else IP6_EXTHDR_GET(rth0, - struct ip6_rthdr0 *, m, - eoff, rthlen); + struct ip6_rthdr0 *, m, + eoff, rthlen); if (rth0 == NULL) { icmp6stat.icp6s_tooshort++; - return(-1); + return -1; } #endif /* just ignore a bogus header */ if ((rth0->ip6r0_len % 2) == 0 && - (hops = rth0->ip6r0_len/2)) + (hops = rth0->ip6r0_len / 2)) { finaldst = (struct in6_addr *)(void *)(rth0 + 1) + (hops - 1); + } } eoff += rthlen; nxt = rth->ip6r_nxt; @@ -1039,16 +1076,16 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) case IPPROTO_FRAGMENT: #ifndef PULLDOWN_TEST IP6_EXTHDR_CHECK(m, 0, eoff + - sizeof(struct ip6_frag), - return -1); + sizeof(struct ip6_frag), + return -1); fh = (struct ip6_frag *)(mtod(m, caddr_t) - + eoff); + + eoff); #else IP6_EXTHDR_GET(fh, struct ip6_frag *, m, - eoff, sizeof(*fh)); + eoff, sizeof(*fh)); if (fh == NULL) { icmp6stat.icp6s_tooshort++; - return (-1); + return -1; } #endif /* @@ -1057,8 +1094,9 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) * we'll go to the notify label for path MTU * discovery. */ - if (fh->ip6f_offlg & IP6F_OFF_MASK) + if (fh->ip6f_offlg & IP6F_OFF_MASK) { goto notify; + } eoff += sizeof(struct ip6_frag); nxt = fh->ip6f_nxt; @@ -1075,7 +1113,7 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) goto notify; } } - notify: +notify: #ifndef PULLDOWN_TEST icmp6 = (struct icmp6_hdr *)(mtod(m, caddr_t) + off); #else @@ -1083,7 +1121,7 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) sizeof(*icmp6) + sizeof(struct ip6_hdr)); if (icmp6 == NULL) { icmp6stat.icp6s_tooshort++; - return (-1); + return -1; } #endif @@ -1100,23 +1138,27 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) bzero(&icmp6dst, sizeof(icmp6dst)); icmp6dst.sin6_len = sizeof(struct sockaddr_in6); icmp6dst.sin6_family = AF_INET6; - if (finaldst == NULL) + if (finaldst == NULL) { icmp6dst.sin6_addr = eip6->ip6_dst; - else + } else { icmp6dst.sin6_addr = *finaldst; - if (in6_setscope(&icmp6dst.sin6_addr, m->m_pkthdr.rcvif, NULL)) + } + if (in6_setscope(&icmp6dst.sin6_addr, m->m_pkthdr.rcvif, NULL)) { goto freeit; + } bzero(&icmp6src, sizeof(icmp6src)); icmp6src.sin6_len = sizeof(struct sockaddr_in6); icmp6src.sin6_family = AF_INET6; icmp6src.sin6_addr = eip6->ip6_src; - if (in6_setscope(&icmp6src.sin6_addr, m->m_pkthdr.rcvif, NULL)) + if (in6_setscope(&icmp6src.sin6_addr, m->m_pkthdr.rcvif, NULL)) { goto freeit; + } icmp6src.sin6_flowinfo = (eip6->ip6_flow & IPV6_FLOWLABEL_MASK); - if (finaldst == NULL) + if (finaldst == NULL) { finaldst = &eip6->ip6_dst; + } ip6cp.ip6c_m = m; ip6cp.ip6c_icmp6 = icmp6; ip6cp.ip6c_ip6 = (struct ip6_hdr *)(icmp6 + 1); @@ -1128,7 +1170,7 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) if (icmp6type == ICMP6_PACKET_TOO_BIG) { notifymtu = ntohl(icmp6->icmp6_mtu); ip6cp.ip6c_cmdarg = (void *)¬ifymtu; - icmp6_mtudisc_update(&ip6cp, 1); /*XXX*/ + icmp6_mtudisc_update(&ip6cp, 1); /*XXX*/ } ctlfunc = ip6_protox[nxt]->pr_ctlinput; @@ -1143,11 +1185,11 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code) lck_mtx_lock(inet6_domain_mutex); } } - return(0); + return 0; freeit: m_freem(m); - return(-1); + return -1; } void @@ -1155,7 +1197,7 @@ icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated) { struct in6_addr *dst = ip6cp->ip6c_finaldst; struct icmp6_hdr *icmp6 = ip6cp->ip6c_icmp6; - struct mbuf *m = ip6cp->ip6c_m; /* will be necessary for scope issue */ + struct mbuf *m = ip6cp->ip6c_m; /* will be necessary for scope issue */ u_int mtu = ntohl(icmp6->icmp6_mtu); struct rtentry *rt = NULL; struct sockaddr_in6 sin6; @@ -1163,11 +1205,13 @@ icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated) * we reject ICMPv6 too big with abnormally small value. * XXX what is the good definition of "abnormally small"? */ - if (mtu < sizeof(struct ip6_hdr) + sizeof(struct ip6_frag) + 8) + if (mtu < sizeof(struct ip6_hdr) + sizeof(struct ip6_frag) + 8) { return; + } - if (!validated) + if (!validated) { return; + } /* * In case the suggested mtu is less than IPV6_MMTU, we @@ -1175,8 +1219,9 @@ icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated) * "alwaysfrag" case. * Try to be as close to the spec as possible. */ - if (mtu < IPV6_MMTU) + if (mtu < IPV6_MMTU) { mtu = IPV6_MMTU - 8; + } bzero(&sin6, sizeof(sin6)); sin6.sin6_family = PF_INET6; @@ -1222,7 +1267,7 @@ icmp6_mtudisc_update(struct ip6ctlparam *ip6cp, int validated) * - joins NI group address at in6_ifattach() time only, does not cope * with hostname changes by sethostname(3) */ -#define hostnamelen strlen(hostname) +#define hostnamelen strlen(hostname) static struct mbuf * ni6_input(struct mbuf *m, int off) { @@ -1232,12 +1277,12 @@ ni6_input(struct mbuf *m, int off) int subjlen; int replylen = sizeof(struct ip6_hdr) + sizeof(struct icmp6_nodeinfo); struct ni_reply_fqdn *fqdn; - int addrs; /* for NI_QTYPE_NODEADDR */ + int addrs; /* for NI_QTYPE_NODEADDR */ struct ifnet *ifp = NULL; /* for NI_QTYPE_NODEADDR */ struct sockaddr_in6 sin6; /* double meaning; ip6_dst and subjectaddr */ struct sockaddr_in6 sin6_d; /* XXX: we should retrieve this from m_aux */ struct ip6_hdr *ip6; - int oldfqdn = 0; /* if 1, return pascal string (03 draft) */ + int oldfqdn = 0; /* if 1, return pascal string (03 draft) */ char *subj = NULL; ip6 = mtod(m, struct ip6_hdr *); @@ -1247,7 +1292,7 @@ ni6_input(struct mbuf *m, int off) IP6_EXTHDR_GET(ni6, struct icmp6_nodeinfo *, m, off, sizeof(*ni6)); if (ni6 == NULL) { /* m is already reclaimed */ - return (NULL); + return NULL; } #endif @@ -1266,8 +1311,9 @@ ni6_input(struct mbuf *m, int off) */ if ((icmp6_nodeinfo & ICMP6_NODEINFO_GLOBALOK) == 0 && !IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) && - !IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src)) + !IN6_IS_ADDR_LINKLOCAL(&ip6->ip6_src)) { goto bad; + } /* * Validate IPv6 destination address. @@ -1278,20 +1324,21 @@ ni6_input(struct mbuf *m, int off) * [RFC4602, Section 5.] */ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { - if (!IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst)) + if (!IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst)) { goto bad; + } /* else it's a link-local multicast, fine */ - } else { /* unicast or anycast */ + } else { /* unicast or anycast */ uint32_t ia6_flags; - if (ip6_getdstifaddr_info(m, NULL, &ia6_flags) != 0) + if (ip6_getdstifaddr_info(m, NULL, &ia6_flags) != 0) { goto bad; /* XXX impossible */ - + } if ((ia6_flags & IN6_IFF_TEMPORARY) && !(icmp6_nodeinfo & ICMP6_NODEINFO_TMPADDROK)) { nd6log((LOG_DEBUG, "ni6_input: ignore node info to " - "a temporary address in %s:%d", - __func__, __LINE__)); + "a temporary address in %s:%d", + __func__, __LINE__)); goto bad; } } @@ -1303,9 +1350,10 @@ ni6_input(struct mbuf *m, int off) case NI_QTYPE_NOOP: case NI_QTYPE_SUPTYPES: /* 07 draft */ - if (ni6->ni_code == ICMP6_NI_SUBJ_FQDN && subjlen == 0) + if (ni6->ni_code == ICMP6_NI_SUBJ_FQDN && subjlen == 0) { break; - /* FALLTHROUGH */ + } + /* FALLTHROUGH */ case NI_QTYPE_FQDN: case NI_QTYPE_NODEADDR: case NI_QTYPE_IPV4ADDR: @@ -1324,12 +1372,14 @@ ni6_input(struct mbuf *m, int off) break; } #if ICMP6_NI_SUBJ_IPV6 != 0 - if (ni6->ni_code != ICMP6_NI_SUBJ_IPV6) + if (ni6->ni_code != ICMP6_NI_SUBJ_IPV6) { goto bad; + } #endif - if (subjlen != sizeof(struct in6_addr)) + if (subjlen != sizeof(struct in6_addr)) { goto bad; + } /* * Validate Subject address. @@ -1349,7 +1399,7 @@ ni6_input(struct mbuf *m, int off) m_copydata(m, off + sizeof(struct icmp6_nodeinfo), subjlen, (caddr_t)&sin6.sin6_addr); sin6.sin6_scope_id = in6_addr2scopeid(m->m_pkthdr.rcvif, - &sin6.sin6_addr); + &sin6.sin6_addr); in6_embedscope(&sin6.sin6_addr, &sin6, NULL, NULL, NULL); bzero(&sin6_d, sizeof(sin6_d)); @@ -1357,12 +1407,13 @@ ni6_input(struct mbuf *m, int off) sin6_d.sin6_len = sizeof(sin6_d); /* ditto */ sin6_d.sin6_addr = ip6->ip6_dst; sin6_d.sin6_scope_id = in6_addr2scopeid(m->m_pkthdr.rcvif, - &ip6->ip6_dst); + &ip6->ip6_dst); in6_embedscope(&sin6_d.sin6_addr, &sin6_d, NULL, NULL, NULL); subj = (char *)&sin6; - if (SA6_ARE_ADDR_EQUAL(&sin6, &sin6_d)) + if (SA6_ARE_ADDR_EQUAL(&sin6, &sin6_d)) { break; + } /* * XXX if we are to allow other cases, we should really @@ -1388,21 +1439,23 @@ ni6_input(struct mbuf *m, int off) * truncated hostname. */ n = ni6_nametodns(hostname, hostnamelen, 0); - if (!n || n->m_next || n->m_len == 0) + if (!n || n->m_next || n->m_len == 0) { goto bad; + } IP6_EXTHDR_GET(subj, char *, m, off + sizeof(struct icmp6_nodeinfo), subjlen); - if (subj == NULL) + if (subj == NULL) { goto bad; + } if (!ni6_dnsmatch(subj, subjlen, mtod(n, const char *), - n->m_len)) { + n->m_len)) { goto bad; } m_freem(n); n = NULL; break; - case ICMP6_NI_SUBJ_IPV4: /* XXX: to be implemented? */ + case ICMP6_NI_SUBJ_IPV4: /* XXX: to be implemented? */ default: goto bad; } @@ -1412,20 +1465,22 @@ ni6_input(struct mbuf *m, int off) /* refuse based on configuration. XXX ICMP6_NI_REFUSED? */ switch (qtype) { case NI_QTYPE_FQDN: - if ((icmp6_nodeinfo & ICMP6_NODEINFO_FQDNOK) == 0) + if ((icmp6_nodeinfo & ICMP6_NODEINFO_FQDNOK) == 0) { goto bad; + } break; case NI_QTYPE_NODEADDR: case NI_QTYPE_IPV4ADDR: - if ((icmp6_nodeinfo & ICMP6_NODEINFO_NODEADDROK) == 0) + if ((icmp6_nodeinfo & ICMP6_NODEINFO_NODEADDROK) == 0) { goto bad; + } break; } /* guess reply length */ switch (qtype) { case NI_QTYPE_NOOP: - break; /* no reply data */ + break; /* no reply data */ case NI_QTYPE_SUPTYPES: replylen += sizeof(u_int32_t); break; @@ -1436,8 +1491,9 @@ ni6_input(struct mbuf *m, int off) case NI_QTYPE_NODEADDR: addrs = ni6_addrs(ni6, &ifp, subj); if ((replylen += addrs * (sizeof(struct in6_addr) + - sizeof(u_int32_t))) > MCLBYTES) + sizeof(u_int32_t))) > MCLBYTES) { replylen = MCLBYTES; /* XXX: will truncate pkt later */ + } break; case NI_QTYPE_IPV4ADDR: /* unsupported - should respond with unknown Qtype? */ @@ -1460,12 +1516,13 @@ ni6_input(struct mbuf *m, int off) } /* allocate an mbuf to reply. */ - MGETHDR(n, M_DONTWAIT, m->m_type); /* MAC-OK */ + MGETHDR(n, M_DONTWAIT, m->m_type); /* MAC-OK */ if (n == NULL) { m_freem(m); - if (ifp != NULL) + if (ifp != NULL) { ifnet_release(ifp); - return (NULL); + } + return NULL; } M_COPY_PKTHDR(n, m); /* just for recvif */ if (replylen > MHLEN) { @@ -1498,7 +1555,7 @@ ni6_input(struct mbuf *m, int off) { u_int32_t v; nni6->ni_code = ICMP6_NI_SUCCESS; - nni6->ni_flags = htons(0x0000); /* raw bitmap */ + nni6->ni_flags = htons(0x0000); /* raw bitmap */ /* supports NOOP, SUPTYPES, FQDN, and NODEADDR */ v = (u_int32_t)htonl(0x0000000f); bcopy(&v, nni6 + 1, sizeof(u_int32_t)); @@ -1507,19 +1564,21 @@ ni6_input(struct mbuf *m, int off) case NI_QTYPE_FQDN: nni6->ni_code = ICMP6_NI_SUCCESS; fqdn = (struct ni_reply_fqdn *)(mtod(n, caddr_t) + - sizeof(struct ip6_hdr) + - sizeof(struct icmp6_nodeinfo)); + sizeof(struct ip6_hdr) + + sizeof(struct icmp6_nodeinfo)); nni6->ni_flags = 0; /* XXX: meaningless TTL */ - fqdn->ni_fqdn_ttl = 0; /* ditto. */ + fqdn->ni_fqdn_ttl = 0; /* ditto. */ /* * XXX do we really have FQDN in variable "hostname"? */ n->m_next = ni6_nametodns(hostname, hostnamelen, oldfqdn); - if (n->m_next == NULL) + if (n->m_next == NULL) { goto bad; + } /* XXX we assume that n->m_next is not a chain */ - if (n->m_next->m_next != NULL) + if (n->m_next->m_next != NULL) { goto bad; + } n->m_pkthdr.len += n->m_next->m_len; break; case NI_QTYPE_NODEADDR: @@ -1533,26 +1592,29 @@ ni6_input(struct mbuf *m, int off) copied = ni6_store_addrs(ni6, nni6, ifp, lenlim); /* XXX: reset mbuf length */ n->m_pkthdr.len = n->m_len = sizeof(struct ip6_hdr) + - sizeof(struct icmp6_nodeinfo) + copied; + sizeof(struct icmp6_nodeinfo) + copied; break; } default: - break; /* XXX impossible! */ + break; /* XXX impossible! */ } nni6->ni_type = ICMP6_NI_REPLY; m_freem(m); - if (ifp != NULL) + if (ifp != NULL) { ifnet_release(ifp); - return (n); + } + return n; bad: m_freem(m); - if (n) + if (n) { m_freem(n); - if (ifp != NULL) + } + if (ifp != NULL) { ifnet_release(ifp); - return (NULL); + } + return NULL; } #undef hostnamelen @@ -1566,27 +1628,30 @@ static struct mbuf * ni6_nametodns( const char *name, int namelen, - int old) /* return pascal string if non-zero */ + int old) /* return pascal string if non-zero */ { struct mbuf *m; char *cp, *ep; const char *p, *q; int i, len, nterm; - if (old) + if (old) { len = namelen + 1; - else + } else { len = MCLBYTES; + } /* because MAXHOSTNAMELEN is usually 256, we use cluster mbuf */ MGET(m, M_DONTWAIT, MT_DATA); if (m && len > MLEN) { MCLGET(m, M_DONTWAIT); - if ((m->m_flags & M_EXT) == 0) + if ((m->m_flags & M_EXT) == 0) { goto fail; + } } - if (!m) + if (!m) { goto fail; + } m->m_next = NULL; if (old) { @@ -1600,8 +1665,9 @@ ni6_nametodns( ep = mtod(m, char *) + M_TRAILINGSPACE(m); /* if not certain about my name, return empty buffer */ - if (namelen == 0) + if (namelen == 0) { return m; + } /* * guess if it looks like shortened hostname, or FQDN. @@ -1609,41 +1675,49 @@ ni6_nametodns( */ i = 0; for (p = name; p < name + namelen; p++) { - if (*p && *p == '.') + if (*p && *p == '.') { i++; + } } - if (i < 2) + if (i < 2) { nterm = 2; - else + } else { nterm = 1; + } p = name; while (cp < ep && p < name + namelen) { i = 0; - for (q = p; q < name + namelen && *q && *q != '.'; q++) + for (q = p; q < name + namelen && *q && *q != '.'; q++) { i++; + } /* result does not fit into mbuf */ - if (cp + i + 1 >= ep) + if (cp + i + 1 >= ep) { goto fail; + } /* * DNS label length restriction, RFC1035 page 8. * "i == 0" case is included here to avoid returning * 0-length label on "foo..bar". */ - if (i <= 0 || i >= 64) + if (i <= 0 || i >= 64) { goto fail; + } *cp++ = i; bcopy(p, cp, i); cp += i; p = q; - if (p < name + namelen && *p == '.') + if (p < name + namelen && *p == '.') { p++; + } } /* termination */ - if (cp + nterm >= ep) + if (cp + nterm >= ep) { goto fail; - while (nterm-- > 0) + } + while (nterm-- > 0) { *cp++ = '\0'; + } m->m_len = cp - mtod(m, char *); return m; } @@ -1651,9 +1725,10 @@ ni6_nametodns( panic("should not reach here"); /* NOTREACHED */ - fail: - if (m) +fail: + if (m) { m_freem(m); + } return NULL; } @@ -1669,54 +1744,67 @@ ni6_dnsmatch(const char *a, int alen, const char *b, int blen) int l; /* simplest case - need validation? */ - if (alen == blen && bcmp(a, b, alen) == 0) + if (alen == blen && bcmp(a, b, alen) == 0) { return 1; + } a0 = a; b0 = b; /* termination is mandatory */ - if (alen < 2 || blen < 2) + if (alen < 2 || blen < 2) { return 0; - if (a0[alen - 1] != '\0' || b0[blen - 1] != '\0') + } + if (a0[alen - 1] != '\0' || b0[blen - 1] != '\0') { return 0; + } alen--; blen--; while (a - a0 < alen && b - b0 < blen) { - if (a - a0 + 1 > alen || b - b0 + 1 > blen) + if (a - a0 + 1 > alen || b - b0 + 1 > blen) { return 0; + } - if ((signed char)a[0] < 0 || (signed char)b[0] < 0) + if ((signed char)a[0] < 0 || (signed char)b[0] < 0) { return 0; + } /* we don't support compression yet */ - if (a[0] >= 64 || b[0] >= 64) + if (a[0] >= 64 || b[0] >= 64) { return 0; + } /* truncated case */ - if (a[0] == 0 && a - a0 == alen - 1) + if (a[0] == 0 && a - a0 == alen - 1) { return 1; - if (b[0] == 0 && b - b0 == blen - 1) + } + if (b[0] == 0 && b - b0 == blen - 1) { return 1; - if (a[0] == 0 || b[0] == 0) + } + if (a[0] == 0 || b[0] == 0) { return 0; + } - if (a[0] != b[0]) + if (a[0] != b[0]) { return 0; + } l = a[0]; - if (a - a0 + 1 + l > alen || b - b0 + 1 + l > blen) + if (a - a0 + 1 + l > alen || b - b0 + 1 + l > blen) { return 0; - if (bcmp(a + 1, b + 1, l) != 0) + } + if (bcmp(a + 1, b + 1, l) != 0) { return 0; + } a += 1 + l; b += 1 + l; } - if (a - a0 == alen && b - b0 == blen) + if (a - a0 == alen && b - b0 == blen) { return 1; - else + } else { return 0; + } } /* @@ -1732,14 +1820,16 @@ ni6_addrs(struct icmp6_nodeinfo *ni6, struct ifnet **ifpp, char *subj) int addrs = 0, addrsofif, iffound = 0; int niflags = ni6->ni_flags; - if (ifpp != NULL) + if (ifpp != NULL) { *ifpp = NULL; + } if ((niflags & NI_NODEADDR_FLAG_ALL) == 0) { switch (ni6->ni_code) { case ICMP6_NI_SUBJ_IPV6: - if (subj == NULL) /* must be impossible... */ - return(0); + if (subj == NULL) { /* must be impossible... */ + return 0; + } subj_ip6 = (struct sockaddr_in6 *)(void *)subj; break; default: @@ -1747,7 +1837,7 @@ ni6_addrs(struct icmp6_nodeinfo *ni6, struct ifnet **ifpp, char *subj) * XXX: we only support IPv6 subject address for * this Qtype. */ - return (0); + return 0; } } @@ -1766,8 +1856,9 @@ ni6_addrs(struct icmp6_nodeinfo *ni6, struct ifnet **ifpp, char *subj) if ((niflags & NI_NODEADDR_FLAG_ALL) == 0 && IN6_ARE_ADDR_EQUAL(&subj_ip6->sin6_addr, - &ifa6->ia_addr.sin6_addr)) + &ifa6->ia_addr.sin6_addr)) { iffound = 1; + } /* * IPv4-mapped addresses can only be returned by a @@ -1828,19 +1919,19 @@ ni6_addrs(struct icmp6_nodeinfo *ni6, struct ifnet **ifpp, char *subj) ifnet_reference(ifp); } ifnet_head_done(); - return(addrsofif); + return addrsofif; } addrs += addrsofif; } ifnet_head_done(); - return (addrs); + return addrs; } static int ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, - struct ifnet *ifp0, int resid) + struct ifnet *ifp0, int resid) { struct ifnet *ifp = ifp0; struct in6_ifaddr *ifa6; @@ -1852,19 +1943,20 @@ ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, u_int32_t ltime; uint64_t now = net_uptime(); - if (ifp0 == NULL && !(niflags & NI_NODEADDR_FLAG_ALL)) - return (0); /* needless to copy */ - - again: + if (ifp0 == NULL && !(niflags & NI_NODEADDR_FLAG_ALL)) { + return 0; /* needless to copy */ + } +again: ifnet_head_lock_shared(); - if (ifp == NULL) + if (ifp == NULL) { ifp = TAILQ_FIRST(&ifnet_head); + } for (; ifp; ifp = TAILQ_NEXT(ifp, if_list)) { ifnet_lock_shared(ifp); for (ifa = ifp->if_addrlist.tqh_first; ifa; - ifa = ifa->ifa_list.tqe_next) { + ifa = ifa->ifa_list.tqe_next) { struct in6_addrlifetime_i *lt; IFA_LOCK(ifa); @@ -1882,8 +1974,9 @@ ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, */ /* record the interface for later search */ - if (ifp_dep == NULL) + if (ifp_dep == NULL) { ifp_dep = ifp; + } IFA_UNLOCK(ifa); continue; @@ -1941,10 +2034,10 @@ ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, * Set the truncate flag and return. */ nni6->ni_flags |= - NI_NODEADDR_FLAG_TRUNCATE; + NI_NODEADDR_FLAG_TRUNCATE; ifnet_lock_done(ifp); ifnet_head_done(); - return(copied); + return copied; } /* @@ -1966,10 +2059,11 @@ ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, if (lt->ia6ti_expire == 0) { ltime = ND6_INFINITE_LIFETIME; } else { - if (lt->ia6ti_expire > now) + if (lt->ia6ti_expire > now) { ltime = htonl(lt->ia6ti_expire - now); - else + } else { ltime = 0; + } } bcopy(<ime, cp, sizeof(u_int32_t)); @@ -1977,20 +2071,22 @@ ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, /* copy the address itself */ bcopy(&ifa6->ia_addr.sin6_addr, cp, - sizeof(struct in6_addr)); + sizeof(struct in6_addr)); /* XXX: KAME link-local hack; remove ifindex */ - if (IN6_IS_ADDR_LINKLOCAL(&ifa6->ia_addr.sin6_addr)) + if (IN6_IS_ADDR_LINKLOCAL(&ifa6->ia_addr.sin6_addr)) { ((struct in6_addr *)(void *)cp)->s6_addr16[1] = 0; + } cp += sizeof(struct in6_addr); resid -= (sizeof(struct in6_addr) + sizeof(u_int32_t)); copied += (sizeof(struct in6_addr) + - sizeof(u_int32_t)); + sizeof(u_int32_t)); IFA_UNLOCK(ifa); } ifnet_lock_done(ifp); - if (ifp0) /* we need search only on the specified IF */ + if (ifp0) { /* we need search only on the specified IF */ break; + } } ifnet_head_done(); @@ -2001,7 +2097,7 @@ ni6_store_addrs(struct icmp6_nodeinfo *ni6, struct icmp6_nodeinfo *nni6, goto again; } - return(copied); + return copied; } /* @@ -2039,37 +2135,44 @@ icmp6_rip6_input(struct mbuf **mp, int off) rip6src.sin6_family = AF_INET6; rip6src.sin6_len = sizeof(struct sockaddr_in6); rip6src.sin6_addr = ip6->ip6_src; - if (sa6_recoverscope(&rip6src, TRUE)) - return (IPPROTO_DONE); + if (sa6_recoverscope(&rip6src, TRUE)) { + return IPPROTO_DONE; + } lck_rw_lock_shared(ripcbinfo.ipi_lock); LIST_FOREACH(in6p, &ripcb, inp_list) { - if ((in6p->inp_vflag & INP_IPV6) == 0) + if ((in6p->inp_vflag & INP_IPV6) == 0) { continue; - if (in6p->in6p_ip6_nxt != IPPROTO_ICMPV6) + } + if (in6p->in6p_ip6_nxt != IPPROTO_ICMPV6) { continue; + } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) && - !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) { continue; + } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && - !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) { continue; + } if (in6p->in6p_icmp6filt && ICMP6_FILTER_WILLBLOCK(icmp6->icmp6_type, - in6p->in6p_icmp6filt)) + in6p->in6p_icmp6filt)) { continue; + } - if (inp_restricted_recv(in6p, ifp)) + if (inp_restricted_recv(in6p, ifp)) { continue; + } if (last) { - struct mbuf *n; + struct mbuf *n; if ((n = m_copy(m, 0, (int)M_COPYALL)) != NULL) { if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip6_savecontrol(last, n, &opts); if (ret != 0) { m_freem(n); @@ -2082,8 +2185,8 @@ icmp6_rip6_input(struct mbuf **mp, int off) m_adj(n, off); so_recv_data_stat(last->in6p_socket, m, 0); if (sbappendaddr(&last->in6p_socket->so_rcv, - (struct sockaddr *)&rip6src, - n, opts, NULL) != 0) { + (struct sockaddr *)&rip6src, + n, opts, NULL) != 0) { sorwakeup(last->in6p_socket); } opts = NULL; @@ -2095,7 +2198,7 @@ icmp6_rip6_input(struct mbuf **mp, int off) if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip6_savecontrol(last, m, &opts); if (ret != 0) { goto error; @@ -2105,7 +2208,7 @@ icmp6_rip6_input(struct mbuf **mp, int off) m_adj(m, off); so_recv_data_stat(last->in6p_socket, m, 0); if (sbappendaddr(&last->in6p_socket->so_rcv, - (struct sockaddr *)&rip6src, m, opts, NULL) != 0) { + (struct sockaddr *)&rip6src, m, opts, NULL) != 0) { sorwakeup(last->in6p_socket); } } else { @@ -2176,16 +2279,18 @@ icmp6_reflect(struct mbuf *m, size_t off) m_adj(m, l); l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); if (m->m_len < l) { - if ((m_ip6hdr = m_pulldown(m, 0, l, NULL)) == NULL) + if ((m_ip6hdr = m_pulldown(m, 0, l, NULL)) == NULL) { return; + } } bcopy((caddr_t)&nip6, mtod(m, caddr_t), sizeof(nip6)); - } else /* off == sizeof(struct ip6_hdr) */ { + } else { /* off == sizeof(struct ip6_hdr) */ size_t l; l = sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr); if (m->m_len < l) { - if ((m_ip6hdr = m_pulldown(m, 0, l, NULL)) == NULL) + if ((m_ip6hdr = m_pulldown(m, 0, l, NULL)) == NULL) { return; + } } } plen = m->m_pkthdr.len - sizeof(struct ip6_hdr); @@ -2234,7 +2339,7 @@ icmp6_reflect(struct mbuf *m, size_t off) for (ia = in6_ifaddrs; ia; ia = ia->ia_next) { IFA_LOCK(&ia->ia_ifa); if (IN6_ARE_ADDR_EQUAL(&t, &ia->ia_addr.sin6_addr) && - (ia->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_CLAT46)) == 0) { + (ia->ia6_flags & (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY | IN6_IFF_CLAT46)) == 0) { IFA_UNLOCK(&ia->ia_ifa); src = &t; break; @@ -2319,7 +2424,7 @@ icmp6_reflect(struct mbuf *m, size_t off) /* * XXX option handling */ - m->m_flags &= ~(M_BCAST|M_MCAST); + m->m_flags &= ~(M_BCAST | M_MCAST); if (outif != NULL) { ifnet_release(outif); @@ -2337,19 +2442,20 @@ icmp6_reflect(struct mbuf *m, size_t off) bad: m_freem(m); - if (outif != NULL) + if (outif != NULL) { ifnet_release(outif); + } return; } static const char * icmp6_redirect_diag(struct in6_addr *src6, - struct in6_addr *dst6, - struct in6_addr *tgt6) + struct in6_addr *dst6, + struct in6_addr *tgt6) { static char buf[1024]; snprintf(buf, sizeof(buf), "(src=%s dst=%s tgt=%s)", - ip6_sprintf(src6), ip6_sprintf(dst6), ip6_sprintf(tgt6)); + ip6_sprintf(src6), ip6_sprintf(dst6), ip6_sprintf(tgt6)); return buf; } @@ -2372,20 +2478,23 @@ icmp6_redirect_input(struct mbuf *m, int off) struct in6_addr reddst6; union nd_opts ndopts; - if (!m || !ifp) + if (!m || !ifp) { return; + } /* * If we are an advertising router on this interface, * don't update route by icmp6 redirect. */ - if (ifp->if_eflags & IFEF_IPV6_ROUTER) + if (ifp->if_eflags & IFEF_IPV6_ROUTER) { goto freeit; - if (!icmp6_rediraccept) + } + if (!icmp6_rediraccept) { goto freeit; + } #ifndef PULLDOWN_TEST - IP6_EXTHDR_CHECK(m, off, icmp6len, return); + IP6_EXTHDR_CHECK(m, off, icmp6len, return ); nd_rd = (struct nd_redirect *)((caddr_t)ip6 + off); #else IP6_EXTHDR_GET(nd_rd, struct nd_redirect *, m, off, icmp6len); @@ -2405,82 +2514,84 @@ icmp6_redirect_input(struct mbuf *m, int off) /* validation */ if (!IN6_IS_ADDR_LINKLOCAL(&src6)) { nd6log((LOG_ERR, - "ICMP6 redirect sent from %s rejected; " - "must be from linklocal\n", ip6_sprintf(&src6))); + "ICMP6 redirect sent from %s rejected; " + "must be from linklocal\n", ip6_sprintf(&src6))); goto bad; } if (ip6->ip6_hlim != 255) { nd6log((LOG_ERR, - "ICMP6 redirect sent from %s rejected; " - "hlim=%d (must be 255)\n", - ip6_sprintf(&src6), ip6->ip6_hlim)); + "ICMP6 redirect sent from %s rejected; " + "hlim=%d (must be 255)\n", + ip6_sprintf(&src6), ip6->ip6_hlim)); goto bad; } - { - /* ip6->ip6_src must be equal to gw for icmp6->icmp6_reddst */ - struct sockaddr_in6 sin6; - struct in6_addr *gw6; + { + /* ip6->ip6_src must be equal to gw for icmp6->icmp6_reddst */ + struct sockaddr_in6 sin6; + struct in6_addr *gw6; - bzero(&sin6, sizeof(sin6)); - sin6.sin6_family = AF_INET6; - sin6.sin6_len = sizeof(struct sockaddr_in6); - bcopy(&reddst6, &sin6.sin6_addr, sizeof(reddst6)); - rt = rtalloc1_scoped((struct sockaddr *)&sin6, 0, 0, ifp->if_index); - if (rt) { - RT_LOCK(rt); - if (rt->rt_gateway == NULL || - rt->rt_gateway->sa_family != AF_INET6) { - nd6log((LOG_ERR, - "ICMP6 redirect rejected; no route " - "with inet6 gateway found for redirect dst: %s\n", - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); - RT_UNLOCK(rt); - rtfree(rt); - goto bad; - } + bzero(&sin6, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_len = sizeof(struct sockaddr_in6); + bcopy(&reddst6, &sin6.sin6_addr, sizeof(reddst6)); + rt = rtalloc1_scoped((struct sockaddr *)&sin6, 0, 0, ifp->if_index); + if (rt) { + RT_LOCK(rt); + if (rt->rt_gateway == NULL || + rt->rt_gateway->sa_family != AF_INET6) { + nd6log((LOG_ERR, + "ICMP6 redirect rejected; no route " + "with inet6 gateway found for redirect dst: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); + RT_UNLOCK(rt); + rtfree(rt); + goto bad; + } - gw6 = &(((struct sockaddr_in6 *)(void *) - rt->rt_gateway)->sin6_addr); - if (bcmp(&src6, gw6, sizeof(struct in6_addr)) != 0) { + gw6 = &(((struct sockaddr_in6 *)(void *) + rt->rt_gateway)->sin6_addr); + if (bcmp(&src6, gw6, sizeof(struct in6_addr)) != 0) { + nd6log((LOG_ERR, + "ICMP6 redirect rejected; " + "not equal to gw-for-src=%s (must be same): " + "%s\n", + ip6_sprintf(gw6), + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); + RT_UNLOCK(rt); + rtfree(rt); + goto bad; + } + } else { nd6log((LOG_ERR, - "ICMP6 redirect rejected; " - "not equal to gw-for-src=%s (must be same): " - "%s\n", - ip6_sprintf(gw6), - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); - RT_UNLOCK(rt); - rtfree(rt); + "ICMP6 redirect rejected; " + "no route found for redirect dst: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); goto bad; } - } else { - nd6log((LOG_ERR, - "ICMP6 redirect rejected; " - "no route found for redirect dst: %s\n", - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); - goto bad; + RT_UNLOCK(rt); + rtfree(rt); + rt = NULL; } - RT_UNLOCK(rt); - rtfree(rt); - rt = NULL; - } if (IN6_IS_ADDR_MULTICAST(&reddst6)) { nd6log((LOG_ERR, - "ICMP6 redirect rejected; " - "redirect dst must be unicast: %s\n", - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); + "ICMP6 redirect rejected; " + "redirect dst must be unicast: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); goto bad; } is_router = is_onlink = 0; - if (IN6_IS_ADDR_LINKLOCAL(&redtgt6)) - is_router = 1; /* router case */ - if (bcmp(&redtgt6, &reddst6, sizeof(redtgt6)) == 0) - is_onlink = 1; /* on-link destination case */ + if (IN6_IS_ADDR_LINKLOCAL(&redtgt6)) { + is_router = 1; /* router case */ + } + if (bcmp(&redtgt6, &reddst6, sizeof(redtgt6)) == 0) { + is_onlink = 1; /* on-link destination case */ + } if (!is_router && !is_onlink) { nd6log((LOG_ERR, - "ICMP6 redirect rejected; " - "neither router case nor onlink case: %s\n", - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); + "ICMP6 redirect rejected; " + "neither router case nor onlink case: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); goto bad; } /* validation passed */ @@ -2489,8 +2600,8 @@ icmp6_redirect_input(struct mbuf *m, int off) nd6_option_init(nd_rd + 1, icmp6len, &ndopts); if (nd6_options(&ndopts) < 0) { nd6log((LOG_INFO, "icmp6_redirect_input: " - "invalid ND option, rejected: %s\n", - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); + "invalid ND option, rejected: %s\n", + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); /* nd6_options have incremented stats */ goto freeit; } @@ -2507,18 +2618,18 @@ icmp6_redirect_input(struct mbuf *m, int off) if (lladdr && ((ifp->if_addrlen + 2 + 7) & ~7) != lladdrlen) { nd6log((LOG_INFO, - "icmp6_redirect_input: lladdrlen mismatch for %s " - "(if %d, icmp6 packet %d): %s\n", - ip6_sprintf(&redtgt6), ifp->if_addrlen, lladdrlen - 2, - icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); + "icmp6_redirect_input: lladdrlen mismatch for %s " + "(if %d, icmp6 packet %d): %s\n", + ip6_sprintf(&redtgt6), ifp->if_addrlen, lladdrlen - 2, + icmp6_redirect_diag(&src6, &reddst6, &redtgt6))); goto bad; } /* RFC 2461 8.3 */ nd6_cache_lladdr(ifp, &redtgt6, lladdr, lladdrlen, ND_REDIRECT, - is_onlink ? ND_REDIRECT_ONLINK : ND_REDIRECT_ROUTER); + is_onlink ? ND_REDIRECT_ONLINK : ND_REDIRECT_ROUTER); - if (!is_onlink) { /* better router case. perform rtredirect. */ + if (!is_onlink) { /* better router case. perform rtredirect. */ /* perform rtredirect */ struct sockaddr_in6 sdst; struct sockaddr_in6 sgw; @@ -2529,7 +2640,7 @@ icmp6_redirect_input(struct mbuf *m, int off) bzero(&ssrc, sizeof(ssrc)); sdst.sin6_family = sgw.sin6_family = ssrc.sin6_family = AF_INET6; sdst.sin6_len = sgw.sin6_len = ssrc.sin6_len = - sizeof(struct sockaddr_in6); + sizeof(struct sockaddr_in6); bcopy(&redtgt6, &sgw.sin6_addr, sizeof(struct in6_addr)); bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr)); bcopy(&src6, &ssrc.sin6_addr, sizeof(struct in6_addr)); @@ -2538,25 +2649,25 @@ icmp6_redirect_input(struct mbuf *m, int off) (struct sockaddr *)&ssrc, NULL); } /* finally update cached route in each socket via pfctlinput */ - { - struct sockaddr_in6 sdst; + { + struct sockaddr_in6 sdst; - bzero(&sdst, sizeof(sdst)); - sdst.sin6_family = AF_INET6; - sdst.sin6_len = sizeof(struct sockaddr_in6); - bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr)); + bzero(&sdst, sizeof(sdst)); + sdst.sin6_family = AF_INET6; + sdst.sin6_len = sizeof(struct sockaddr_in6); + bcopy(&reddst6, &sdst.sin6_addr, sizeof(struct in6_addr)); - pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&sdst); + pfctlinput(PRC_REDIRECT_HOST, (struct sockaddr *)&sdst); #if IPSEC - key_sa_routechange((struct sockaddr *)&sdst); + key_sa_routechange((struct sockaddr *)&sdst); #endif - } + } - freeit: +freeit: m_freem(m); return; - bad: +bad: icmp6stat.icp6s_badredirect++; m_freem(m); } @@ -2564,12 +2675,12 @@ icmp6_redirect_input(struct mbuf *m, int off) void icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) { - struct ifnet *ifp; /* my outgoing interface */ + struct ifnet *ifp; /* my outgoing interface */ struct in6_addr ifp_ll6; struct in6_addr *router_ll6; - struct ip6_hdr *sip6; /* m0 as struct ip6_hdr */ - struct mbuf *m = NULL; /* newly allocated one */ - struct ip6_hdr *ip6; /* m as struct ip6_hdr */ + struct ip6_hdr *sip6; /* m0 as struct ip6_hdr */ + struct mbuf *m = NULL; /* newly allocated one */ + struct ip6_hdr *ip6; /* m as struct ip6_hdr */ struct nd_redirect *nd_rd; size_t maxlen; u_char *p; @@ -2585,19 +2696,22 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) icmp6_errcount(&icmp6stat.icp6s_outerrhist, ND_REDIRECT, 0); - if (rt != NULL) + if (rt != NULL) { RT_LOCK(rt); + } /* sanity check */ - if (!m0 || !rt || !(rt->rt_flags & RTF_UP) || !(ifp = rt->rt_ifp)) + if (!m0 || !rt || !(rt->rt_flags & RTF_UP) || !(ifp = rt->rt_ifp)) { goto fail; + } /* * If we are not a router to begin with, or not an advertising * router on this interface, don't send icmp6 redirect. */ - if (!ip6_forwarding || !(ifp->if_eflags & IFEF_IPV6_ROUTER)) + if (!ip6_forwarding || !(ifp->if_eflags & IFEF_IPV6_ROUTER)) { goto fail; + } /* * Address check: @@ -2619,12 +2733,13 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) goto fail; } RT_LOCK(rt); - if (IN6_IS_ADDR_MULTICAST(&sip6->ip6_dst)) - goto fail; /* what should we do here? */ - + if (IN6_IS_ADDR_MULTICAST(&sip6->ip6_dst)) { + goto fail; /* what should we do here? */ + } /* rate limit */ - if (icmp6_ratelimit(&sip6->ip6_src, ND_REDIRECT, 0)) + if (icmp6_ratelimit(&sip6->ip6_src, ND_REDIRECT, 0)) { goto fail; + } /* * Since we are going to append up to 1280 bytes (= IPV6_MMTU), @@ -2634,11 +2749,13 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) #if IPV6_MMTU >= MCLBYTES # error assumption failed about IPV6_MMTU and MCLBYTES #endif - MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (m && IPV6_MMTU >= MHLEN) + MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + if (m && IPV6_MMTU >= MHLEN) { MCLGET(m, M_DONTWAIT); - if (!m) + } + if (!m) { goto fail; + } m->m_pkthdr.rcvif = NULL; m->m_len = 0; maxlen = M_TRAILINGSPACE(m); @@ -2653,9 +2770,10 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) /* get ip6 linklocal address for ifp(my outgoing interface). */ struct in6_ifaddr *ia; if ((ia = in6ifa_ifpforlinklocal(ifp, - IN6_IFF_NOTREADY| - IN6_IFF_ANYCAST)) == NULL) + IN6_IFF_NOTREADY | + IN6_IFF_ANYCAST)) == NULL) { goto fail; + } IFA_LOCK(&ia->ia_ifa); ifp_ll6 = ia->ia_addr.sin6_addr; IFA_UNLOCK(&ia->ia_ifa); @@ -2667,10 +2785,12 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) struct sockaddr_in6 *sin6; sin6 = (struct sockaddr_in6 *)(void *)rt->rt_gateway; router_ll6 = &sin6->sin6_addr; - if (!IN6_IS_ADDR_LINKLOCAL(router_ll6)) + if (!IN6_IS_ADDR_LINKLOCAL(router_ll6)) { router_ll6 = (struct in6_addr *)NULL; - } else + } + } else { router_ll6 = (struct in6_addr *)NULL; + } /* ip6 */ ip6 = mtod(m, struct ip6_hdr *); @@ -2694,26 +2814,28 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) * nd_rd->nd_rd_target must be a link-local address in * better router cases. */ - if (!router_ll6) + if (!router_ll6) { goto fail; + } bcopy(router_ll6, &nd_rd->nd_rd_target, - sizeof(nd_rd->nd_rd_target)); + sizeof(nd_rd->nd_rd_target)); bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_dst, - sizeof(nd_rd->nd_rd_dst)); + sizeof(nd_rd->nd_rd_dst)); } else { /* make sure redtgt == reddst */ bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_target, - sizeof(nd_rd->nd_rd_target)); + sizeof(nd_rd->nd_rd_target)); bcopy(&sip6->ip6_dst, &nd_rd->nd_rd_dst, - sizeof(nd_rd->nd_rd_dst)); + sizeof(nd_rd->nd_rd_dst)); } RT_UNLOCK(rt); rt = NULL; p = (u_char *)(nd_rd + 1); - if (!router_ll6) + if (!router_ll6) { goto nolladdropt; + } { /* target lladdr option */ @@ -2725,11 +2847,12 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) /* Callee returns a locked route upon success */ rt_router = nd6_lookup(router_ll6, 0, ifp, 0); - if (!rt_router) + if (!rt_router) { goto nolladdropt; + } RT_LOCK_ASSERT_HELD(rt_router); len = sizeof(*nd_opt) + ifp->if_addrlen; - len = (len + 7) & ~7; /* round by 8 */ + len = (len + 7) & ~7; /* round by 8 */ /* safety check */ if (len + (p - (u_char *)ip6) > maxlen) { RT_REMREF_LOCKED(rt_router); @@ -2738,16 +2861,16 @@ icmp6_redirect_output(struct mbuf *m0, struct rtentry *rt) } if (!(rt_router->rt_flags & RTF_GATEWAY) && - (rt_router->rt_flags & RTF_LLINFO) && - (rt_router->rt_gateway->sa_family == AF_LINK) && - (sdl = (struct sockaddr_dl *)(void *) - rt_router->rt_gateway) && sdl->sdl_alen) { - nd_opt = (struct nd_opt_hdr *)p; - nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; - nd_opt->nd_opt_len = len >> 3; - lladdr = (char *)(nd_opt + 1); - bcopy(LLADDR(sdl), lladdr, ifp->if_addrlen); - p += len; + (rt_router->rt_flags & RTF_LLINFO) && + (rt_router->rt_gateway->sa_family == AF_LINK) && + (sdl = (struct sockaddr_dl *)(void *) + rt_router->rt_gateway) && sdl->sdl_alen) { + nd_opt = (struct nd_opt_hdr *)p; + nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; + nd_opt->nd_opt_len = len >> 3; + lladdr = (char *)(nd_opt + 1); + bcopy(LLADDR(sdl), lladdr, ifp->if_addrlen); + p += len; } RT_REMREF_LOCKED(rt_router); RT_UNLOCK(rt_router); @@ -2758,82 +2881,85 @@ nolladdropt:; m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; /* just to be safe */ -#ifdef M_DECRYPTED /*not openbsd*/ - if (m0->m_flags & M_DECRYPTED) +#ifdef M_DECRYPTED /*not openbsd*/ + if (m0->m_flags & M_DECRYPTED) { goto noredhdropt; + } #endif - if (p - (u_char *)ip6 > maxlen) + if (p - (u_char *)ip6 > maxlen) { goto noredhdropt; + } - { - /* redirected header option */ - int len; - struct nd_opt_rd_hdr *nd_opt_rh; - - /* - * compute the maximum size for icmp6 redirect header option. - * XXX room for auth header? - */ - len = maxlen - (p - (u_char *)ip6); - len &= ~7; + { + /* redirected header option */ + int len; + struct nd_opt_rd_hdr *nd_opt_rh; - /* This is just for simplicity. */ - if (m0->m_pkthdr.len != m0->m_len) { - if (m0->m_next) { - m_freem(m0->m_next); - m0->m_next = NULL; + /* + * compute the maximum size for icmp6 redirect header option. + * XXX room for auth header? + */ + len = maxlen - (p - (u_char *)ip6); + len &= ~7; + + /* This is just for simplicity. */ + if (m0->m_pkthdr.len != m0->m_len) { + if (m0->m_next) { + m_freem(m0->m_next); + m0->m_next = NULL; + } + m0->m_pkthdr.len = m0->m_len; } - m0->m_pkthdr.len = m0->m_len; - } - /* - * Redirected header option spec (RFC2461 4.6.3) talks nothing - * about padding/truncate rule for the original IP packet. - * From the discussion on IPv6imp in Feb 1999, the consensus was: - * - "attach as much as possible" is the goal - * - pad if not aligned (original size can be guessed by original - * ip6 header) - * Following code adds the padding if it is simple enough, - * and truncates if not. - */ - if (m0->m_next || m0->m_pkthdr.len != m0->m_len) - panic("assumption failed in %s:%d\n", __func__, __LINE__); + /* + * Redirected header option spec (RFC2461 4.6.3) talks nothing + * about padding/truncate rule for the original IP packet. + * From the discussion on IPv6imp in Feb 1999, the consensus was: + * - "attach as much as possible" is the goal + * - pad if not aligned (original size can be guessed by original + * ip6 header) + * Following code adds the padding if it is simple enough, + * and truncates if not. + */ + if (m0->m_next || m0->m_pkthdr.len != m0->m_len) { + panic("assumption failed in %s:%d\n", __func__, __LINE__); + } - if (len - sizeof(*nd_opt_rh) < m0->m_pkthdr.len) { - /* not enough room, truncate */ - m0->m_pkthdr.len = m0->m_len = len - sizeof(*nd_opt_rh); - } else { - /* enough room, pad or truncate */ - size_t extra; - - extra = m0->m_pkthdr.len % 8; - if (extra) { - /* pad if easy enough, truncate if not */ - if (8 - extra <= M_TRAILINGSPACE(m0)) { - /* pad */ - m0->m_len += (8 - extra); - m0->m_pkthdr.len += (8 - extra); - } else { - /* truncate */ - m0->m_pkthdr.len -= extra; - m0->m_len -= extra; + if (len - sizeof(*nd_opt_rh) < m0->m_pkthdr.len) { + /* not enough room, truncate */ + m0->m_pkthdr.len = m0->m_len = len - sizeof(*nd_opt_rh); + } else { + /* enough room, pad or truncate */ + size_t extra; + + extra = m0->m_pkthdr.len % 8; + if (extra) { + /* pad if easy enough, truncate if not */ + if (8 - extra <= M_TRAILINGSPACE(m0)) { + /* pad */ + m0->m_len += (8 - extra); + m0->m_pkthdr.len += (8 - extra); + } else { + /* truncate */ + m0->m_pkthdr.len -= extra; + m0->m_len -= extra; + } } + len = m0->m_pkthdr.len + sizeof(*nd_opt_rh); + m0->m_pkthdr.len = m0->m_len = len - sizeof(*nd_opt_rh); } - len = m0->m_pkthdr.len + sizeof(*nd_opt_rh); - m0->m_pkthdr.len = m0->m_len = len - sizeof(*nd_opt_rh); - } - nd_opt_rh = (struct nd_opt_rd_hdr *)p; - bzero(nd_opt_rh, sizeof(*nd_opt_rh)); - nd_opt_rh->nd_opt_rh_type = ND_OPT_REDIRECTED_HEADER; - nd_opt_rh->nd_opt_rh_len = len >> 3; - p += sizeof(*nd_opt_rh); - m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; + nd_opt_rh = (struct nd_opt_rd_hdr *)p; + bzero(nd_opt_rh, sizeof(*nd_opt_rh)); + nd_opt_rh->nd_opt_rh_type = ND_OPT_REDIRECTED_HEADER; + nd_opt_rh->nd_opt_rh_len = len >> 3; + p += sizeof(*nd_opt_rh); + m->m_pkthdr.len = m->m_len = p - (u_char *)ip6; - /* connect m0 to m */ - m->m_next = m0; - m->m_pkthdr.len = m->m_len + m0->m_len; - } + /* connect m0 to m */ + m->m_next = m0; + m->m_pkthdr.len = m->m_len + m0->m_len; + } noredhdropt:; /* XXX: clear embedded link IDs in the inner header */ @@ -2846,7 +2972,7 @@ noredhdropt:; nd_rd->nd_rd_cksum = 0; nd_rd->nd_rd_cksum - = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), ntohs(ip6->ip6_plen)); + = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), ntohs(ip6->ip6_plen)); /* send the packet to outside... */ ip6oa.ip6oa_boundif = ifp->if_index; @@ -2863,12 +2989,15 @@ noredhdropt:; return; fail: - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); - if (m) + } + if (m) { m_freem(m); - if (m0) + } + if (m0) { m_freem(m0); + } } /* @@ -2887,8 +3016,9 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) op = sopt->sopt_dir; optname = sopt->sopt_name; optlen = sopt->sopt_valsize; - } else + } else { level = op = optname = optlen = 0; + } if (level != IPPROTO_ICMPV6) { return EINVAL; @@ -2898,7 +3028,7 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) case PRCO_SETOPT: switch (optname) { case ICMP6_FILTER: - { + { struct icmp6_filter *p; if (optlen != 0 && optlen != sizeof(*p)) { @@ -2918,10 +3048,10 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt); } else { error = sooptcopyin(sopt, inp->in6p_icmp6filt, optlen, - optlen); + optlen); } break; - } + } default: error = ENOPROTOOPT; @@ -2932,15 +3062,15 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) case PRCO_GETOPT: switch (optname) { case ICMP6_FILTER: - { + { if (inp->in6p_icmp6filt == NULL) { error = EINVAL; break; } error = sooptcopyout(sopt, inp->in6p_icmp6filt, - min(sizeof(struct icmp6_filter), optlen)); + min(sizeof(struct icmp6_filter), optlen)); break; - } + } default: error = ENOPROTOOPT; @@ -2949,7 +3079,7 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) break; } - return(error); + return error; } /* @@ -2958,56 +3088,58 @@ icmp6_ctloutput(struct socket *so, struct sockopt *sopt) int icmp6_dgram_ctloutput(struct socket *so, struct sockopt *sopt) { - if (kauth_cred_issuser(so->so_cred)) + if (kauth_cred_issuser(so->so_cred)) { return icmp6_ctloutput(so, sopt); + } if (sopt->sopt_level == IPPROTO_ICMPV6) { switch (sopt->sopt_name) { - case ICMP6_FILTER: - return icmp6_ctloutput(so, sopt); - default: - return EPERM; + case ICMP6_FILTER: + return icmp6_ctloutput(so, sopt); + default: + return EPERM; } } - if (sopt->sopt_level != IPPROTO_IPV6) + if (sopt->sopt_level != IPPROTO_IPV6) { return EINVAL; + } switch (sopt->sopt_name) { - case IPV6_UNICAST_HOPS: - case IPV6_CHECKSUM: - case IPV6_V6ONLY: - case IPV6_USE_MIN_MTU: - case IPV6_RECVRTHDR: - case IPV6_RECVPKTINFO: - case IPV6_RECVHOPLIMIT: - case IPV6_PATHMTU: - case IPV6_PKTINFO: - case IPV6_HOPLIMIT: - case IPV6_HOPOPTS: - case IPV6_DSTOPTS: - case IPV6_MULTICAST_IF: - case IPV6_MULTICAST_HOPS: - case IPV6_MULTICAST_LOOP: - case IPV6_JOIN_GROUP: - case IPV6_LEAVE_GROUP: - case IPV6_PORTRANGE: - case IPV6_IPSEC_POLICY: - case IPV6_RECVTCLASS: - case IPV6_TCLASS: - case IPV6_2292PKTOPTIONS: - case IPV6_2292PKTINFO: - case IPV6_2292HOPLIMIT: - case IPV6_2292HOPOPTS: - case IPV6_2292DSTOPTS: - case IPV6_2292RTHDR: - case IPV6_BOUND_IF: - case IPV6_NO_IFT_CELLULAR: - - return ip6_ctloutput(so, sopt); + case IPV6_UNICAST_HOPS: + case IPV6_CHECKSUM: + case IPV6_V6ONLY: + case IPV6_USE_MIN_MTU: + case IPV6_RECVRTHDR: + case IPV6_RECVPKTINFO: + case IPV6_RECVHOPLIMIT: + case IPV6_PATHMTU: + case IPV6_PKTINFO: + case IPV6_HOPLIMIT: + case IPV6_HOPOPTS: + case IPV6_DSTOPTS: + case IPV6_MULTICAST_IF: + case IPV6_MULTICAST_HOPS: + case IPV6_MULTICAST_LOOP: + case IPV6_JOIN_GROUP: + case IPV6_LEAVE_GROUP: + case IPV6_PORTRANGE: + case IPV6_IPSEC_POLICY: + case IPV6_RECVTCLASS: + case IPV6_TCLASS: + case IPV6_2292PKTOPTIONS: + case IPV6_2292PKTINFO: + case IPV6_2292HOPLIMIT: + case IPV6_2292HOPOPTS: + case IPV6_2292DSTOPTS: + case IPV6_2292RTHDR: + case IPV6_BOUND_IF: + case IPV6_NO_IFT_CELLULAR: + + return ip6_ctloutput(so, sopt); - default: - return EPERM; + default: + return EPERM; } } @@ -3024,15 +3156,16 @@ icmp6_dgram_send(struct socket *so, int flags, struct mbuf *m, if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) { + ) { error = (inp == NULL ? EINVAL : EPROTOTYPE); goto bad; } - if (kauth_cred_issuser(so->so_cred)) - return (rip6_output(m, so, SIN6(nam), control, 0)); + if (kauth_cred_issuser(so->so_cred)) { + return rip6_output(m, so, SIN6(nam), control, 0); + } /* always copy sockaddr to avoid overwrites */ if (so->so_state & SS_ISCONNECTED) { @@ -3045,7 +3178,7 @@ icmp6_dgram_send(struct socket *so, int flags, struct mbuf *m, tmp.sin6_family = AF_INET6; tmp.sin6_len = sizeof(struct sockaddr_in6); bcopy(&inp->in6p_faddr, &tmp.sin6_addr, - sizeof(struct in6_addr)); + sizeof(struct in6_addr)); dst = &tmp; } else { if (nam == NULL) { @@ -3061,9 +3194,9 @@ icmp6_dgram_send(struct socket *so, int flags, struct mbuf *m, */ if (SOCK_PROTO(so) == IPPROTO_ICMPV6) { if (m->m_len < sizeof(struct icmp6_hdr) && - (m = m_pullup(m, sizeof(struct icmp6_hdr))) == NULL) { - error = ENOBUFS; - goto bad; + (m = m_pullup(m, sizeof(struct icmp6_hdr))) == NULL) { + error = ENOBUFS; + goto bad; } icmp6 = mtod(m, struct icmp6_hdr *); @@ -3090,49 +3223,56 @@ icmp6_dgram_send(struct socket *so, int flags, struct mbuf *m, } #endif - return (rip6_output(m, so, dst, control, 0)); + return rip6_output(m, so, dst, control, 0); bad: VERIFY(error != 0); - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } - return (error); + return error; } /* Like rip6_attach but without root privilege enforcement */ __private_extern__ int icmp6_dgram_attach(struct socket *so, int proto, struct proc *p) { - struct inpcb *inp; - int error; - - inp = sotoinpcb(so); - if (inp) - panic("icmp6_dgram_attach"); - - if (proto != IPPROTO_ICMPV6) - return EINVAL; - - error = soreserve(so, rip_sendspace, rip_recvspace); - if (error) - return error; - error = in_pcballoc(so, &ripcbinfo, p); - if (error) - return error; - inp = (struct inpcb *)so->so_pcb; - inp->inp_vflag |= INP_IPV6; - inp->in6p_ip6_nxt = IPPROTO_ICMPV6; - inp->in6p_hops = -1; /* use kernel default */ - inp->in6p_cksum = -1; - MALLOC(inp->in6p_icmp6filt, struct icmp6_filter *, - sizeof(struct icmp6_filter), M_PCB, M_WAITOK); - if (inp->in6p_icmp6filt == NULL) - return (ENOMEM); - ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt); - return 0; + struct inpcb *inp; + int error; + + inp = sotoinpcb(so); + if (inp) { + panic("icmp6_dgram_attach"); + } + + if (proto != IPPROTO_ICMPV6) { + return EINVAL; + } + + error = soreserve(so, rip_sendspace, rip_recvspace); + if (error) { + return error; + } + error = in_pcballoc(so, &ripcbinfo, p); + if (error) { + return error; + } + inp = (struct inpcb *)so->so_pcb; + inp->inp_vflag |= INP_IPV6; + inp->in6p_ip6_nxt = IPPROTO_ICMPV6; + inp->in6p_hops = -1; /* use kernel default */ + inp->in6p_cksum = -1; + MALLOC(inp->in6p_icmp6filt, struct icmp6_filter *, + sizeof(struct icmp6_filter), M_PCB, M_WAITOK); + if (inp->in6p_icmp6filt == NULL) { + return ENOMEM; + } + ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt); + return 0; } @@ -3146,19 +3286,20 @@ icmp6_dgram_attach(struct socket *so, int proto, struct proc *p) */ static int icmp6_ratelimit( - __unused const struct in6_addr *dst, /* not used at this moment */ + __unused const struct in6_addr *dst, /* not used at this moment */ const int type, __unused const int code) { int ret; - ret = 0; /* okay to send */ + ret = 0; /* okay to send */ /* PPS limit */ if (type == ND_ROUTER_ADVERT) { if (!ppsratecheck(&icmp6rappslim_last, &icmp6rapps_count, - icmp6rappslim)) + icmp6rappslim)) { ret++; + } } else if (!ppsratecheck(&icmp6errppslim_last, &icmp6errpps_count, icmp6errppslim)) { /* The packet is subject to rate limit */ diff --git a/bsd/netinet6/in6.c b/bsd/netinet6/in6.c index 4f34af6ba..950911de7 100644 --- a/bsd/netinet6/in6.c +++ b/bsd/netinet6/in6.c @@ -157,13 +157,13 @@ const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT; const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT; const struct in6_addr in6addr_nodelocal_allnodes = - IN6ADDR_NODELOCAL_ALLNODES_INIT; + IN6ADDR_NODELOCAL_ALLNODES_INIT; const struct in6_addr in6addr_linklocal_allnodes = - IN6ADDR_LINKLOCAL_ALLNODES_INIT; + IN6ADDR_LINKLOCAL_ALLNODES_INIT; const struct in6_addr in6addr_linklocal_allrouters = - IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; + IN6ADDR_LINKLOCAL_ALLROUTERS_INIT; const struct in6_addr in6addr_linklocal_allv2routers = - IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT; + IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT; const struct in6_addr in6mask0 = IN6MASK0; const struct in6_addr in6mask7 = IN6MASK7; @@ -174,7 +174,7 @@ const struct in6_addr in6mask96 = IN6MASK96; const struct in6_addr in6mask128 = IN6MASK128; const struct sockaddr_in6 sa6_any = { - sizeof (sa6_any), AF_INET6, 0, 0, IN6ADDR_ANY_INIT, 0 + sizeof(sa6_any), AF_INET6, 0, 0, IN6ADDR_ANY_INIT, 0 }; static int in6ctl_associd(struct socket *, u_long, caddr_t); @@ -220,26 +220,26 @@ static void in6_if_up_dad_start(struct ifnet *); extern lck_mtx_t *nd6_mutex; -#define IN6IFA_TRACE_HIST_SIZE 32 /* size of trace history */ +#define IN6IFA_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int in6ifa_trace_hist_size = IN6IFA_TRACE_HIST_SIZE; struct in6_ifaddr_dbg { - struct in6_ifaddr in6ifa; /* in6_ifaddr */ - struct in6_ifaddr in6ifa_old; /* saved in6_ifaddr */ - u_int16_t in6ifa_refhold_cnt; /* # of IFA_ADDREF */ - u_int16_t in6ifa_refrele_cnt; /* # of IFA_REMREF */ + struct in6_ifaddr in6ifa; /* in6_ifaddr */ + struct in6_ifaddr in6ifa_old; /* saved in6_ifaddr */ + u_int16_t in6ifa_refhold_cnt; /* # of IFA_ADDREF */ + u_int16_t in6ifa_refrele_cnt; /* # of IFA_REMREF */ /* * Alloc and free callers. */ - ctrace_t in6ifa_alloc; - ctrace_t in6ifa_free; + ctrace_t in6ifa_alloc; + ctrace_t in6ifa_free; /* * Circular lists of IFA_ADDREF and IFA_REMREF callers. */ - ctrace_t in6ifa_refhold[IN6IFA_TRACE_HIST_SIZE]; - ctrace_t in6ifa_refrele[IN6IFA_TRACE_HIST_SIZE]; + ctrace_t in6ifa_refhold[IN6IFA_TRACE_HIST_SIZE]; + ctrace_t in6ifa_refrele[IN6IFA_TRACE_HIST_SIZE]; /* * Trash list linkage */ @@ -251,15 +251,15 @@ static TAILQ_HEAD(, in6_ifaddr_dbg) in6ifa_trash_head; static decl_lck_mtx_data(, in6ifa_trash_lock); #if DEBUG -static unsigned int in6ifa_debug = 1; /* debugging (enabled) */ +static unsigned int in6ifa_debug = 1; /* debugging (enabled) */ #else -static unsigned int in6ifa_debug; /* debugging (disabled) */ +static unsigned int in6ifa_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int in6ifa_size; /* size of zone element */ -static struct zone *in6ifa_zone; /* zone for in6_ifaddr */ +static unsigned int in6ifa_size; /* size of zone element */ +static struct zone *in6ifa_zone; /* zone for in6_ifaddr */ -#define IN6IFA_ZONE_MAX 64 /* maximum elements in zone */ -#define IN6IFA_ZONE_NAME "in6_ifaddr" /* zone name */ +#define IN6IFA_ZONE_MAX 64 /* maximum elements in zone */ +#define IN6IFA_ZONE_NAME "in6_ifaddr" /* zone name */ struct eventhandler_lists_ctxt in6_evhdlr_ctxt; struct eventhandler_lists_ctxt in6_clat46_evhdlr_ctxt; @@ -274,9 +274,9 @@ in6_ifloop_request(int cmd, struct ifaddr *ifa) struct rtentry *nrt = NULL; int e; - bzero(&all1_sa, sizeof (all1_sa)); + bzero(&all1_sa, sizeof(all1_sa)); all1_sa.sin6_family = AF_INET6; - all1_sa.sin6_len = sizeof (struct sockaddr_in6); + all1_sa.sin6_len = sizeof(struct sockaddr_in6); all1_sa.sin6_addr = in6mask128; /* @@ -290,7 +290,7 @@ in6_ifloop_request(int cmd, struct ifaddr *ifa) */ lck_mtx_lock(rnh_lock); e = rtrequest_locked(cmd, ifa->ifa_addr, ifa->ifa_addr, - (struct sockaddr *)&all1_sa, RTF_UP|RTF_HOST|RTF_LLINFO, &nrt); + (struct sockaddr *)&all1_sa, RTF_UP | RTF_HOST | RTF_LLINFO, &nrt); if (e != 0) { log(LOG_ERR, "in6_ifloop_request: " "%s operation failed for %s (errno=%d)\n", @@ -299,8 +299,9 @@ in6_ifloop_request(int cmd, struct ifaddr *ifa) e); } - if (nrt != NULL) + if (nrt != NULL) { RT_LOCK(nrt); + } /* * Make sure rt_ifa be equal to IFA, the second argument of the * function. @@ -349,8 +350,9 @@ in6_ifaddloop(struct ifaddr *ifa) * INET6 is set once during init; no need to hold lock. */ rt = rtalloc1(ifa->ifa_addr, 0, 0); - if (rt != NULL) + if (rt != NULL) { RT_LOCK(rt); + } if (rt == NULL || (rt->rt_flags & RTF_HOST) == 0 || (rt->rt_ifp->if_flags & IFF_LOOPBACK) == 0) { if (rt != NULL) { @@ -438,17 +440,20 @@ in6_mask2len(struct in6_addr *mask, u_char *lim0) u_char *lim = lim0, *p; /* ignore the scope_id part */ - if (lim0 == NULL || lim0 - (u_char *)mask > sizeof (*mask)) - lim = (u_char *)mask + sizeof (*mask); + if (lim0 == NULL || lim0 - (u_char *)mask > sizeof(*mask)) { + lim = (u_char *)mask + sizeof(*mask); + } for (p = (u_char *)mask; p < lim; x++, p++) { - if (*p != 0xff) + if (*p != 0xff) { break; + } } y = 0; if (p < lim) { for (y = 0; y < 8; y++) { - if ((*p & (0x80 >> y)) == 0) + if ((*p & (0x80 >> y)) == 0) { break; + } } } @@ -457,14 +462,17 @@ in6_mask2len(struct in6_addr *mask, u_char *lim0) * remaining bits. */ if (p < lim) { - if (y != 0 && (*p & (0x00ff >> y)) != 0) - return (-1); - for (p = p + 1; p < lim; p++) - if (*p != 0) - return (-1); + if (y != 0 && (*p & (0x00ff >> y)) != 0) { + return -1; + } + for (p = p + 1; p < lim; p++) { + if (*p != 0) { + return -1; + } + } } - return (x * 8 + y); + return x * 8 + y; } void @@ -472,18 +480,20 @@ in6_len2mask(struct in6_addr *mask, int len) { int i; - bzero(mask, sizeof (*mask)); - for (i = 0; i < len / 8; i++) + bzero(mask, sizeof(*mask)); + for (i = 0; i < len / 8; i++) { mask->s6_addr8[i] = 0xff; - if (len % 8) + } + if (len % 8) { mask->s6_addr8[i] = (0xff00 >> (len % 8)) & 0xff; + } } void in6_aliasreq_64_to_32(struct in6_aliasreq_64 *src, struct in6_aliasreq_32 *dst) { - bzero(dst, sizeof (*dst)); - bcopy(src->ifra_name, dst->ifra_name, sizeof (dst->ifra_name)); + bzero(dst, sizeof(*dst)); + bcopy(src->ifra_name, dst->ifra_name, sizeof(dst->ifra_name)); dst->ifra_addr = src->ifra_addr; dst->ifra_dstaddr = src->ifra_dstaddr; dst->ifra_prefixmask = src->ifra_prefixmask; @@ -497,8 +507,8 @@ in6_aliasreq_64_to_32(struct in6_aliasreq_64 *src, struct in6_aliasreq_32 *dst) void in6_aliasreq_32_to_64(struct in6_aliasreq_32 *src, struct in6_aliasreq_64 *dst) { - bzero(dst, sizeof (*dst)); - bcopy(src->ifra_name, dst->ifra_name, sizeof (dst->ifra_name)); + bzero(dst, sizeof(*dst)); + bcopy(src->ifra_name, dst->ifra_name, sizeof(dst->ifra_name)); dst->ifra_addr = src->ifra_addr; dst->ifra_dstaddr = src->ifra_dstaddr; dst->ifra_prefixmask = src->ifra_prefixmask; @@ -514,12 +524,12 @@ void in6_cgareq_32_to_64(struct in6_cgareq_32 *src, struct in6_cgareq_64 *dst) { - bzero(dst, sizeof (*dst)); - bcopy(src->cgar_name, dst->cgar_name, sizeof (dst->cgar_name)); + bzero(dst, sizeof(*dst)); + bcopy(src->cgar_name, dst->cgar_name, sizeof(dst->cgar_name)); dst->cgar_flags = src->cgar_flags; bcopy(src->cgar_cgaprep.cga_modifier.octets, dst->cgar_cgaprep.cga_modifier.octets, - sizeof (dst->cgar_cgaprep.cga_modifier.octets)); + sizeof(dst->cgar_cgaprep.cga_modifier.octets)); dst->cgar_cgaprep.cga_security_level = src->cgar_cgaprep.cga_security_level; dst->cgar_lifetime.ia6t_expire = src->cgar_lifetime.ia6t_expire; @@ -534,12 +544,12 @@ void in6_cgareq_64_to_32(struct in6_cgareq_64 *src, struct in6_cgareq_32 *dst) { - bzero(dst, sizeof (*dst)); - bcopy(src->cgar_name, dst->cgar_name, sizeof (dst->cgar_name)); + bzero(dst, sizeof(*dst)); + bcopy(src->cgar_name, dst->cgar_name, sizeof(dst->cgar_name)); dst->cgar_flags = src->cgar_flags; bcopy(src->cgar_cgaprep.cga_modifier.octets, dst->cgar_cgaprep.cga_modifier.octets, - sizeof (dst->cgar_cgaprep.cga_modifier.octets)); + sizeof(dst->cgar_cgaprep.cga_modifier.octets)); dst->cgar_cgaprep.cga_security_level = src->cgar_cgaprep.cga_security_level; dst->cgar_lifetime.ia6t_expire = src->cgar_lifetime.ia6t_expire; @@ -553,38 +563,42 @@ static struct in6_aliasreq * in6_aliasreq_to_native(void *data, int data_is_64, struct in6_aliasreq *dst) { #if defined(__LP64__) - if (data_is_64) - bcopy(data, dst, sizeof (*dst)); - else + if (data_is_64) { + bcopy(data, dst, sizeof(*dst)); + } else { in6_aliasreq_32_to_64((struct in6_aliasreq_32 *)data, (struct in6_aliasreq_64 *)dst); + } #else - if (data_is_64) + if (data_is_64) { in6_aliasreq_64_to_32((struct in6_aliasreq_64 *)data, (struct in6_aliasreq_32 *)dst); - else - bcopy(data, dst, sizeof (*dst)); + } else { + bcopy(data, dst, sizeof(*dst)); + } #endif /* __LP64__ */ - return (dst); + return dst; } static struct in6_cgareq * in6_cgareq_to_native(void *data, int is64, struct in6_cgareq *dst) { #if defined(__LP64__) - if (is64) - bcopy(data, dst, sizeof (*dst)); - else + if (is64) { + bcopy(data, dst, sizeof(*dst)); + } else { in6_cgareq_32_to_64((struct in6_cgareq_32 *)data, (struct in6_cgareq_64 *)dst); + } #else - if (is64) + if (is64) { in6_cgareq_64_to_32((struct in6_cgareq_64 *)data, (struct in6_cgareq_32 *)dst); - else - bcopy(data, dst, sizeof (*dst)); + } else { + bcopy(data, dst, sizeof(*dst)); + } #endif /* __LP64__ */ - return (dst); + return dst; } static __attribute__((noinline)) int @@ -599,19 +613,21 @@ in6ctl_associd(struct socket *so, u_long cmd, caddr_t data) VERIFY(so != NULL); switch (cmd) { - case SIOCGASSOCIDS32: { /* struct so_aidreq32 */ - bcopy(data, &u.a32, sizeof (u.a32)); + case SIOCGASSOCIDS32: { /* struct so_aidreq32 */ + bcopy(data, &u.a32, sizeof(u.a32)); error = in6_getassocids(so, &u.a32.sar_cnt, u.a32.sar_aidp); - if (error == 0) - bcopy(&u.a32, data, sizeof (u.a32)); + if (error == 0) { + bcopy(&u.a32, data, sizeof(u.a32)); + } break; } - case SIOCGASSOCIDS64: { /* struct so_aidreq64 */ - bcopy(data, &u.a64, sizeof (u.a64)); + case SIOCGASSOCIDS64: { /* struct so_aidreq64 */ + bcopy(data, &u.a64, sizeof(u.a64)); error = in6_getassocids(so, &u.a64.sar_cnt, u.a64.sar_aidp); - if (error == 0) - bcopy(&u.a64, data, sizeof (u.a64)); + if (error == 0) { + bcopy(&u.a64, data, sizeof(u.a64)); + } break; } @@ -620,7 +636,7 @@ in6ctl_associd(struct socket *so, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -635,21 +651,23 @@ in6ctl_connid(struct socket *so, u_long cmd, caddr_t data) VERIFY(so != NULL); switch (cmd) { - case SIOCGCONNIDS32: { /* struct so_cidreq32 */ - bcopy(data, &u.c32, sizeof (u.c32)); + case SIOCGCONNIDS32: { /* struct so_cidreq32 */ + bcopy(data, &u.c32, sizeof(u.c32)); error = in6_getconnids(so, u.c32.scr_aid, &u.c32.scr_cnt, u.c32.scr_cidp); - if (error == 0) - bcopy(&u.c32, data, sizeof (u.c32)); + if (error == 0) { + bcopy(&u.c32, data, sizeof(u.c32)); + } break; } - case SIOCGCONNIDS64: { /* struct so_cidreq64 */ - bcopy(data, &u.c64, sizeof (u.c64)); + case SIOCGCONNIDS64: { /* struct so_cidreq64 */ + bcopy(data, &u.c64, sizeof(u.c64)); error = in6_getconnids(so, u.c64.scr_aid, &u.c64.scr_cnt, u.c64.scr_cidp); - if (error == 0) - bcopy(&u.c64, data, sizeof (u.c64)); + if (error == 0) { + bcopy(&u.c64, data, sizeof(u.c64)); + } break; } @@ -658,7 +676,7 @@ in6ctl_connid(struct socket *so, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -673,27 +691,29 @@ in6ctl_conninfo(struct socket *so, u_long cmd, caddr_t data) VERIFY(so != NULL); switch (cmd) { - case SIOCGCONNINFO32: { /* struct so_cinforeq32 */ - bcopy(data, &u.ci32, sizeof (u.ci32)); + case SIOCGCONNINFO32: { /* struct so_cinforeq32 */ + bcopy(data, &u.ci32, sizeof(u.ci32)); error = in6_getconninfo(so, u.ci32.scir_cid, &u.ci32.scir_flags, &u.ci32.scir_ifindex, &u.ci32.scir_error, u.ci32.scir_src, &u.ci32.scir_src_len, u.ci32.scir_dst, &u.ci32.scir_dst_len, &u.ci32.scir_aux_type, u.ci32.scir_aux_data, &u.ci32.scir_aux_len); - if (error == 0) - bcopy(&u.ci32, data, sizeof (u.ci32)); + if (error == 0) { + bcopy(&u.ci32, data, sizeof(u.ci32)); + } break; } - case SIOCGCONNINFO64: { /* struct so_cinforeq64 */ - bcopy(data, &u.ci64, sizeof (u.ci64)); + case SIOCGCONNINFO64: { /* struct so_cinforeq64 */ + bcopy(data, &u.ci64, sizeof(u.ci64)); error = in6_getconninfo(so, u.ci64.scir_cid, &u.ci64.scir_flags, &u.ci64.scir_ifindex, &u.ci64.scir_error, u.ci64.scir_src, &u.ci64.scir_src_len, u.ci64.scir_dst, &u.ci64.scir_dst_len, &u.ci64.scir_aux_type, u.ci64.scir_aux_data, &u.ci64.scir_aux_len); - if (error == 0) - bcopy(&u.ci64, data, sizeof (u.ci64)); + if (error == 0) { + bcopy(&u.ci64, data, sizeof(u.ci64)); + } break; } @@ -702,7 +722,7 @@ in6ctl_conninfo(struct socket *so, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -715,8 +735,8 @@ in6ctl_llstart(struct ifnet *ifp, u_long cmd, caddr_t data) VERIFY(ifp != NULL); switch (cmd) { - case SIOCLL_START_32: /* struct in6_aliasreq_32 */ - case SIOCLL_START_64: /* struct in6_aliasreq_64 */ + case SIOCLL_START_32: /* struct in6_aliasreq_32 */ + case SIOCLL_START_64: /* struct in6_aliasreq_64 */ is64 = (cmd == SIOCLL_START_64); /* * Convert user ifra to the kernel form, when appropriate. @@ -735,13 +755,14 @@ in6ctl_llstart(struct ifnet *ifp, u_long cmd, caddr_t data) /* Only check ifra_dstaddr if valid */ (ifra->ifra_dstaddr.sin6_len == 0 || ifra->ifra_dstaddr.sin6_family == AF_INET6)) { - /* some interfaces may provide LinkLocal addresses */ + /* some interfaces may provide LinkLocal addresses */ error = in6_ifattach_aliasreq(ifp, NULL, ifra); } else { error = in6_ifattach_aliasreq(ifp, NULL, NULL); } - if (error == 0) + if (error == 0) { in6_if_up_dad_start(ifp); + } break; default: @@ -749,7 +770,7 @@ in6ctl_llstart(struct ifnet *ifp, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } static __attribute__((noinline)) int @@ -770,11 +791,11 @@ in6ctl_llstop(struct ifnet *ifp) } IFA_LOCK(&ia->ia_ifa); if (IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) { - IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for us */ + IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for us */ IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(&in6_ifaddr_rwlock); in6_purgeaddr(&ia->ia_ifa); - IFA_REMREF(&ia->ia_ifa); /* for us */ + IFA_REMREF(&ia->ia_ifa); /* for us */ lck_rw_lock_exclusive(&in6_ifaddr_rwlock); /* * Purging the address caused in6_ifaddr_rwlock @@ -806,7 +827,7 @@ in6ctl_llstop(struct ifnet *ifp) lck_mtx_unlock(nd6_mutex); } - return (0); + return 0; } /* @@ -821,8 +842,8 @@ in6ctl_cgastart(struct ifnet *ifp, u_long cmd, caddr_t data) VERIFY(ifp != NULL); switch (cmd) { - case SIOCLL_CGASTART_32: /* struct in6_cgareq_32 */ - case SIOCLL_CGASTART_64: /* struct in6_cgareq_64 */ + case SIOCLL_CGASTART_32: /* struct in6_cgareq_32 */ + case SIOCLL_CGASTART_64: /* struct in6_cgareq_64 */ is64 = (cmd == SIOCLL_CGASTART_64); /* * Convert user cgareq to the kernel form, when appropriate. @@ -839,8 +860,9 @@ in6ctl_cgastart(struct ifnet *ifp, u_long cmd, caddr_t data) * need it. */ error = in6_ifattach_llcgareq(ifp, &llcgasr); - if (error == 0) + if (error == 0) { in6_if_up_dad_start(ifp); + } break; default: @@ -848,7 +870,7 @@ in6ctl_cgastart(struct ifnet *ifp, u_long cmd, caddr_t data) /* NOTREACHED */ } - return (error); + return error; } /* @@ -865,20 +887,22 @@ in6ctl_gifaddr(struct ifnet *ifp, struct in6_ifaddr *ia, u_long cmd, VERIFY(ifp != NULL); - if (ia == NULL) - return (EADDRNOTAVAIL); + if (ia == NULL) { + return EADDRNOTAVAIL; + } switch (cmd) { - case SIOCGIFADDR_IN6: /* struct in6_ifreq */ + case SIOCGIFADDR_IN6: /* struct in6_ifreq */ IFA_LOCK(&ia->ia_ifa); - bcopy(&ia->ia_addr, &addr, sizeof (addr)); + bcopy(&ia->ia_addr, &addr, sizeof(addr)); IFA_UNLOCK(&ia->ia_ifa); - if ((error = sa6_recoverscope(&addr, TRUE)) != 0) + if ((error = sa6_recoverscope(&addr, TRUE)) != 0) { break; - bcopy(&addr, &ifr->ifr_addr, sizeof (addr)); + } + bcopy(&addr, &ifr->ifr_addr, sizeof(addr)); break; - case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ + case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ if (!(ifp->if_flags & IFF_POINTOPOINT)) { error = EINVAL; break; @@ -888,11 +912,12 @@ in6ctl_gifaddr(struct ifnet *ifp, struct in6_ifaddr *ia, u_long cmd, * an error? */ IFA_LOCK(&ia->ia_ifa); - bcopy(&ia->ia_dstaddr, &addr, sizeof (addr)); + bcopy(&ia->ia_dstaddr, &addr, sizeof(addr)); IFA_UNLOCK(&ia->ia_ifa); - if ((error = sa6_recoverscope(&addr, TRUE)) != 0) + if ((error = sa6_recoverscope(&addr, TRUE)) != 0) { break; - bcopy(&addr, &ifr->ifr_dstaddr, sizeof (addr)); + } + bcopy(&addr, &ifr->ifr_dstaddr, sizeof(addr)); break; default: @@ -900,7 +925,7 @@ in6ctl_gifaddr(struct ifnet *ifp, struct in6_ifaddr *ia, u_long cmd, /* NOTREACHED */ } - return (error); + return error; } /* @@ -917,29 +942,29 @@ in6ctl_gifstat(struct ifnet *ifp, u_long cmd, struct in6_ifreq *ifr) index = ifp->if_index; switch (cmd) { - case SIOCGIFSTAT_IN6: /* struct in6_ifreq */ + case SIOCGIFSTAT_IN6: /* struct in6_ifreq */ /* N.B.: if_inet6data is never freed once set. */ if (IN6_IFEXTRA(ifp) == NULL) { /* return (EAFNOSUPPORT)? */ bzero(&ifr->ifr_ifru.ifru_stat, - sizeof (ifr->ifr_ifru.ifru_stat)); + sizeof(ifr->ifr_ifru.ifru_stat)); } else { bcopy(&IN6_IFEXTRA(ifp)->in6_ifstat, &ifr->ifr_ifru.ifru_stat, - sizeof (ifr->ifr_ifru.ifru_stat)); + sizeof(ifr->ifr_ifru.ifru_stat)); } break; - case SIOCGIFSTAT_ICMP6: /* struct in6_ifreq */ + case SIOCGIFSTAT_ICMP6: /* struct in6_ifreq */ /* N.B.: if_inet6data is never freed once set. */ if (IN6_IFEXTRA(ifp) == NULL) { /* return (EAFNOSUPPORT)? */ bzero(&ifr->ifr_ifru.ifru_icmp6stat, - sizeof (ifr->ifr_ifru.ifru_icmp6stat)); + sizeof(ifr->ifr_ifru.ifru_icmp6stat)); } else { bcopy(&IN6_IFEXTRA(ifp)->icmp6_ifstat, &ifr->ifr_ifru.ifru_icmp6stat, - sizeof (ifr->ifr_ifru.ifru_icmp6stat)); + sizeof(ifr->ifr_ifru.ifru_icmp6stat)); } break; @@ -948,7 +973,7 @@ in6ctl_gifstat(struct ifnet *ifp, u_long cmd, struct in6_ifreq *ifr) /* NOTREACHED */ } - return (error); + return error; } /* @@ -965,44 +990,45 @@ in6ctl_alifetime(struct in6_ifaddr *ia, u_long cmd, struct in6_ifreq *ifr, struct timeval caltime; int error = 0; - if (ia == NULL) - return (EADDRNOTAVAIL); + if (ia == NULL) { + return EADDRNOTAVAIL; + } switch (cmd) { - case SIOCGIFALIFETIME_IN6: /* struct in6_ifreq */ + case SIOCGIFALIFETIME_IN6: /* struct in6_ifreq */ IFA_LOCK(&ia->ia_ifa); /* retrieve time as calendar time (last arg is 1) */ in6ifa_getlifetime(ia, &ia6_lt, 1); if (p64) { struct in6_addrlifetime_64 lt; - bzero(<, sizeof (lt)); + bzero(<, sizeof(lt)); lt.ia6t_expire = ia6_lt.ia6t_expire; lt.ia6t_preferred = ia6_lt.ia6t_preferred; lt.ia6t_vltime = ia6_lt.ia6t_vltime; lt.ia6t_pltime = ia6_lt.ia6t_pltime; - bcopy(<, &ifr->ifr_ifru.ifru_lifetime, sizeof (lt)); + bcopy(<, &ifr->ifr_ifru.ifru_lifetime, sizeof(lt)); } else { struct in6_addrlifetime_32 lt; - bzero(<, sizeof (lt)); + bzero(<, sizeof(lt)); lt.ia6t_expire = (uint32_t)ia6_lt.ia6t_expire; lt.ia6t_preferred = (uint32_t)ia6_lt.ia6t_preferred; lt.ia6t_vltime = (uint32_t)ia6_lt.ia6t_vltime; lt.ia6t_pltime = (uint32_t)ia6_lt.ia6t_pltime; - bcopy(<, &ifr->ifr_ifru.ifru_lifetime, sizeof (lt)); + bcopy(<, &ifr->ifr_ifru.ifru_lifetime, sizeof(lt)); } IFA_UNLOCK(&ia->ia_ifa); break; - case SIOCSIFALIFETIME_IN6: /* struct in6_ifreq */ + case SIOCSIFALIFETIME_IN6: /* struct in6_ifreq */ getmicrotime(&caltime); /* sanity for overflow - beware unsigned */ if (p64) { struct in6_addrlifetime_64 lt; - bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof (lt)); + bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof(lt)); if (lt.ia6t_vltime != ND6_INFINITE_LIFETIME && lt.ia6t_vltime + caltime.tv_sec < caltime.tv_sec) { error = EINVAL; @@ -1016,7 +1042,7 @@ in6ctl_alifetime(struct in6_ifaddr *ia, u_long cmd, struct in6_ifreq *ifr, } else { struct in6_addrlifetime_32 lt; - bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof (lt)); + bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof(lt)); if (lt.ia6t_vltime != ND6_INFINITE_LIFETIME && lt.ia6t_vltime + caltime.tv_sec < caltime.tv_sec) { error = EINVAL; @@ -1033,7 +1059,7 @@ in6ctl_alifetime(struct in6_ifaddr *ia, u_long cmd, struct in6_ifreq *ifr, if (p64) { struct in6_addrlifetime_64 lt; - bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof (lt)); + bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof(lt)); ia6_lt.ia6t_expire = lt.ia6t_expire; ia6_lt.ia6t_preferred = lt.ia6t_preferred; ia6_lt.ia6t_vltime = lt.ia6t_vltime; @@ -1041,22 +1067,24 @@ in6ctl_alifetime(struct in6_ifaddr *ia, u_long cmd, struct in6_ifreq *ifr, } else { struct in6_addrlifetime_32 lt; - bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof (lt)); + bcopy(&ifr->ifr_ifru.ifru_lifetime, <, sizeof(lt)); ia6_lt.ia6t_expire = (uint32_t)lt.ia6t_expire; ia6_lt.ia6t_preferred = (uint32_t)lt.ia6t_preferred; ia6_lt.ia6t_vltime = lt.ia6t_vltime; ia6_lt.ia6t_pltime = lt.ia6t_pltime; } /* for sanity */ - if (ia6_lt.ia6t_vltime != ND6_INFINITE_LIFETIME) + if (ia6_lt.ia6t_vltime != ND6_INFINITE_LIFETIME) { ia6_lt.ia6t_expire = timenow + ia6_lt.ia6t_vltime; - else + } else { ia6_lt.ia6t_expire = 0; + } - if (ia6_lt.ia6t_pltime != ND6_INFINITE_LIFETIME) + if (ia6_lt.ia6t_pltime != ND6_INFINITE_LIFETIME) { ia6_lt.ia6t_preferred = timenow + ia6_lt.ia6t_pltime; - else + } else { ia6_lt.ia6t_preferred = 0; + } in6ifa_setlifetime(ia, &ia6_lt); IFA_UNLOCK(&ia->ia_ifa); @@ -1067,7 +1095,7 @@ in6ctl_alifetime(struct in6_ifaddr *ia, u_long cmd, struct in6_ifreq *ifr, /* NOTREACHED */ } - return (error); + return error; } static int @@ -1078,8 +1106,9 @@ in6ctl_clat46start(struct ifnet *ifp) struct in6_ifaddr *ia6 = NULL; int error = 0; - if (ifp == lo_ifp) - return (EINVAL); + if (ifp == lo_ifp) { + return EINVAL; + } /* * Traverse the list of prefixes and find the first non-linklocal * prefix on the interface. @@ -1149,10 +1178,10 @@ in6ctl_clat46start(struct ifnet *ifp) } NDPR_REMREF(pr); } - return (error); + return error; } -#define ifa2ia6(ifa) ((struct in6_ifaddr *)(void *)(ifa)) +#define ifa2ia6(ifa) ((struct in6_ifaddr *)(void *)(ifa)) /* * Generic INET6 control operations (ioctl's). @@ -1187,35 +1216,37 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * ioctls which don't require ifp, may require socket. */ switch (cmd) { - case SIOCAADDRCTL_POLICY: /* struct in6_addrpolicy */ - case SIOCDADDRCTL_POLICY: /* struct in6_addrpolicy */ - if (!privileged) - return (EPERM); - return (in6_src_ioctl(cmd, data)); - /* NOTREACHED */ - - case SIOCDRADD_IN6_32: /* struct in6_defrouter_32 */ - case SIOCDRADD_IN6_64: /* struct in6_defrouter_64 */ - case SIOCDRDEL_IN6_32: /* struct in6_defrouter_32 */ - case SIOCDRDEL_IN6_64: /* struct in6_defrouter_64 */ - if (!privileged) - return (EPERM); - return (defrtrlist_ioctl(cmd, data)); - /* NOTREACHED */ - - case SIOCGASSOCIDS32: /* struct so_aidreq32 */ - case SIOCGASSOCIDS64: /* struct so_aidreq64 */ - return (in6ctl_associd(so, cmd, data)); - /* NOTREACHED */ - - case SIOCGCONNIDS32: /* struct so_cidreq32 */ - case SIOCGCONNIDS64: /* struct so_cidreq64 */ - return (in6ctl_connid(so, cmd, data)); - /* NOTREACHED */ + case SIOCAADDRCTL_POLICY: /* struct in6_addrpolicy */ + case SIOCDADDRCTL_POLICY: /* struct in6_addrpolicy */ + if (!privileged) { + return EPERM; + } + return in6_src_ioctl(cmd, data); + /* NOTREACHED */ - case SIOCGCONNINFO32: /* struct so_cinforeq32 */ - case SIOCGCONNINFO64: /* struct so_cinforeq64 */ - return (in6ctl_conninfo(so, cmd, data)); + case SIOCDRADD_IN6_32: /* struct in6_defrouter_32 */ + case SIOCDRADD_IN6_64: /* struct in6_defrouter_64 */ + case SIOCDRDEL_IN6_32: /* struct in6_defrouter_32 */ + case SIOCDRDEL_IN6_64: /* struct in6_defrouter_64 */ + if (!privileged) { + return EPERM; + } + return defrtrlist_ioctl(cmd, data); + /* NOTREACHED */ + + case SIOCGASSOCIDS32: /* struct so_aidreq32 */ + case SIOCGASSOCIDS64: /* struct so_aidreq64 */ + return in6ctl_associd(so, cmd, data); + /* NOTREACHED */ + + case SIOCGCONNIDS32: /* struct so_cidreq32 */ + case SIOCGCONNIDS64: /* struct so_cidreq64 */ + return in6ctl_connid(so, cmd, data); + /* NOTREACHED */ + + case SIOCGCONNINFO32: /* struct so_cinforeq32 */ + case SIOCGCONNINFO64: /* struct so_cinforeq64 */ + return in6ctl_conninfo(so, cmd, data); /* NOTREACHED */ } @@ -1223,8 +1254,9 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * The rest of ioctls require ifp; reject if we don't have one; * return ENXIO to be consistent with ifioctl(). */ - if (ifp == NULL) - return (ENXIO); + if (ifp == NULL) { + return ENXIO; + } /* * Unlock the socket since ifnet_ioctl() may be invoked by @@ -1240,7 +1272,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * ioctls which require ifp but not interface address. */ switch (cmd) { - case SIOCAUTOCONF_START: /* struct in6_ifreq */ + case SIOCAUTOCONF_START: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; @@ -1248,7 +1280,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = in6_autoconf(ifp, TRUE); goto done; - case SIOCAUTOCONF_STOP: /* struct in6_ifreq */ + case SIOCAUTOCONF_STOP: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; @@ -1256,8 +1288,8 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = in6_autoconf(ifp, FALSE); goto done; - case SIOCLL_START_32: /* struct in6_aliasreq_32 */ - case SIOCLL_START_64: /* struct in6_aliasreq_64 */ + case SIOCLL_START_32: /* struct in6_aliasreq_32 */ + case SIOCLL_START_64: /* struct in6_aliasreq_64 */ if (!privileged) { error = EPERM; goto done; @@ -1265,7 +1297,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = in6ctl_llstart(ifp, cmd, data); goto done; - case SIOCLL_STOP: /* struct in6_ifreq */ + case SIOCLL_STOP: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; @@ -1273,17 +1305,18 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = in6ctl_llstop(ifp); goto done; - case SIOCCLAT46_START: /* struct in6_ifreq */ + case SIOCCLAT46_START: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; } error = in6ctl_clat46start(ifp); - if (error == 0) + if (error == 0) { ifp->if_eflags |= IFEF_CLAT46; + } goto done; - case SIOCCLAT46_STOP: /* struct in6_ifreq */ + case SIOCCLAT46_STOP: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; @@ -1297,19 +1330,19 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, */ ifp->if_eflags &= ~IFEF_CLAT46; goto done; - case SIOCSETROUTERMODE_IN6: /* struct in6_ifreq */ + case SIOCSETROUTERMODE_IN6: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; } bcopy(&((struct in6_ifreq *)(void *)data)->ifr_intval, - &intval, sizeof (intval)); + &intval, sizeof(intval)); error = in6_setrouter(ifp, intval); goto done; - case SIOCPROTOATTACH_IN6_32: /* struct in6_aliasreq_32 */ - case SIOCPROTOATTACH_IN6_64: /* struct in6_aliasreq_64 */ + case SIOCPROTOATTACH_IN6_32: /* struct in6_aliasreq_32 */ + case SIOCPROTOATTACH_IN6_64: /* struct in6_aliasreq_64 */ if (!privileged) { error = EPERM; goto done; @@ -1317,7 +1350,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = in6_domifattach(ifp); goto done; - case SIOCPROTODETACH_IN6: /* struct in6_ifreq */ + case SIOCPROTODETACH_IN6: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; @@ -1325,65 +1358,67 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, /* Cleanup interface routes and addresses */ in6_purgeif(ifp); - if ((error = proto_unplumb(PF_INET6, ifp))) + if ((error = proto_unplumb(PF_INET6, ifp))) { log(LOG_ERR, "SIOCPROTODETACH_IN6: %s error=%d\n", if_name(ifp), error); + } goto done; - case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */ - case SIOCSPFXFLUSH_IN6: /* struct in6_ifreq */ - case SIOCSRTRFLUSH_IN6: /* struct in6_ifreq */ - case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ - case SIOCSDEFIFACE_IN6_64: /* struct in6_ndifreq_64 */ - case SIOCSIFINFO_FLAGS: /* struct in6_ndireq */ - case SIOCGIFCGAPREP_IN6: /* struct in6_ifreq */ - case SIOCSIFCGAPREP_IN6: /* struct in6_ifreq */ + case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */ + case SIOCSPFXFLUSH_IN6: /* struct in6_ifreq */ + case SIOCSRTRFLUSH_IN6: /* struct in6_ifreq */ + case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ + case SIOCSDEFIFACE_IN6_64: /* struct in6_ndifreq_64 */ + case SIOCSIFINFO_FLAGS: /* struct in6_ndireq */ + case SIOCGIFCGAPREP_IN6: /* struct in6_ifreq */ + case SIOCSIFCGAPREP_IN6: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; } - /* FALLTHRU */ - case OSIOCGIFINFO_IN6: /* struct in6_ondireq */ - case SIOCGIFINFO_IN6: /* struct in6_ondireq */ - case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */ - case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */ - case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */ - case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */ - case SIOCGNBRINFO_IN6_32: /* struct in6_nbrinfo_32 */ - case SIOCGNBRINFO_IN6_64: /* struct in6_nbrinfo_64 */ - case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ - case SIOCGDEFIFACE_IN6_64: /* struct in6_ndifreq_64 */ + /* FALLTHRU */ + case OSIOCGIFINFO_IN6: /* struct in6_ondireq */ + case SIOCGIFINFO_IN6: /* struct in6_ondireq */ + case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */ + case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */ + case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */ + case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */ + case SIOCGNBRINFO_IN6_32: /* struct in6_nbrinfo_32 */ + case SIOCGNBRINFO_IN6_64: /* struct in6_nbrinfo_64 */ + case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ + case SIOCGDEFIFACE_IN6_64: /* struct in6_ndifreq_64 */ error = nd6_ioctl(cmd, data, ifp); goto done; - case SIOCSIFPREFIX_IN6: /* struct in6_prefixreq (deprecated) */ - case SIOCDIFPREFIX_IN6: /* struct in6_prefixreq (deprecated) */ - case SIOCAIFPREFIX_IN6: /* struct in6_rrenumreq (deprecated) */ - case SIOCCIFPREFIX_IN6: /* struct in6_rrenumreq (deprecated) */ - case SIOCSGIFPREFIX_IN6: /* struct in6_rrenumreq (deprecated) */ - case SIOCGIFPREFIX_IN6: /* struct in6_prefixreq (deprecated) */ + case SIOCSIFPREFIX_IN6: /* struct in6_prefixreq (deprecated) */ + case SIOCDIFPREFIX_IN6: /* struct in6_prefixreq (deprecated) */ + case SIOCAIFPREFIX_IN6: /* struct in6_rrenumreq (deprecated) */ + case SIOCCIFPREFIX_IN6: /* struct in6_rrenumreq (deprecated) */ + case SIOCSGIFPREFIX_IN6: /* struct in6_rrenumreq (deprecated) */ + case SIOCGIFPREFIX_IN6: /* struct in6_prefixreq (deprecated) */ log(LOG_NOTICE, "prefix ioctls are now invalidated. " "please use ifconfig.\n"); error = EOPNOTSUPP; goto done; - case SIOCSSCOPE6: /* struct in6_ifreq (deprecated) */ - case SIOCGSCOPE6: /* struct in6_ifreq (deprecated) */ - case SIOCGSCOPE6DEF: /* struct in6_ifreq (deprecated) */ + case SIOCSSCOPE6: /* struct in6_ifreq (deprecated) */ + case SIOCGSCOPE6: /* struct in6_ifreq (deprecated) */ + case SIOCGSCOPE6DEF: /* struct in6_ifreq (deprecated) */ error = EOPNOTSUPP; goto done; - - case SIOCLL_CGASTART_32: /* struct in6_cgareq_32 */ - case SIOCLL_CGASTART_64: /* struct in6_cgareq_64 */ - if (!privileged) + + case SIOCLL_CGASTART_32: /* struct in6_cgareq_32 */ + case SIOCLL_CGASTART_64: /* struct in6_cgareq_64 */ + if (!privileged) { error = EPERM; - else + } else { error = in6ctl_cgastart(ifp, cmd, data); + } goto done; - case SIOCGIFSTAT_IN6: /* struct in6_ifreq */ - case SIOCGIFSTAT_ICMP6: /* struct in6_ifreq */ + case SIOCGIFSTAT_IN6: /* struct in6_ifreq */ + case SIOCGIFSTAT_ICMP6: /* struct in6_ifreq */ error = in6ctl_gifstat(ifp, cmd, ifr); goto done; } @@ -1392,9 +1427,9 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * ioctls which require interface address; obtain sockaddr_in6. */ switch (cmd) { - case SIOCSIFADDR_IN6: /* struct in6_ifreq (deprecated) */ - case SIOCSIFDSTADDR_IN6: /* struct in6_ifreq (deprecated) */ - case SIOCSIFNETMASK_IN6: /* struct in6_ifreq (deprecated) */ + case SIOCSIFADDR_IN6: /* struct in6_ifreq (deprecated) */ + case SIOCSIFDSTADDR_IN6: /* struct in6_ifreq (deprecated) */ + case SIOCSIFNETMASK_IN6: /* struct in6_ifreq (deprecated) */ /* * Since IPv6 allows a node to assign multiple addresses * on a single interface, SIOCSIFxxx ioctls are deprecated. @@ -1403,12 +1438,12 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, error = EOPNOTSUPP; goto done; - case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ - case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ + case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ + case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ if (!privileged) { error = EPERM; goto done; - } + } /* * Convert user ifra to the kernel form, when appropriate. * This allows the conversion between different data models @@ -1417,23 +1452,23 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, */ ifra = in6_aliasreq_to_native(data, (cmd == SIOCAIFADDR_IN6_64), &sifra); - bcopy(&ifra->ifra_addr, &sin6, sizeof (sin6)); + bcopy(&ifra->ifra_addr, &sin6, sizeof(sin6)); sa6 = &sin6; break; - case SIOCDIFADDR_IN6: /* struct in6_ifreq */ - case SIOCSIFALIFETIME_IN6: /* struct in6_ifreq */ + case SIOCDIFADDR_IN6: /* struct in6_ifreq */ + case SIOCSIFALIFETIME_IN6: /* struct in6_ifreq */ if (!privileged) { error = EPERM; goto done; } - /* FALLTHRU */ - case SIOCGIFADDR_IN6: /* struct in6_ifreq */ - case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ - case SIOCGIFNETMASK_IN6: /* struct in6_ifreq */ - case SIOCGIFAFLAG_IN6: /* struct in6_ifreq */ - case SIOCGIFALIFETIME_IN6: /* struct in6_ifreq */ - bcopy(&ifr->ifr_addr, &sin6, sizeof (sin6)); + /* FALLTHRU */ + case SIOCGIFADDR_IN6: /* struct in6_ifreq */ + case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ + case SIOCGIFNETMASK_IN6: /* struct in6_ifreq */ + case SIOCGIFAFLAG_IN6: /* struct in6_ifreq */ + case SIOCGIFALIFETIME_IN6: /* struct in6_ifreq */ + bcopy(&ifr->ifr_addr, &sin6, sizeof(sin6)); sa6 = &sin6; break; case SIOCGIFDSTADDR: @@ -1497,14 +1532,14 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * SIOCDIFADDR_IN6/SIOCAIFADDR_IN6 specific tests. */ switch (cmd) { - case SIOCDIFADDR_IN6: /* struct in6_ifreq */ + case SIOCDIFADDR_IN6: /* struct in6_ifreq */ if (ia == NULL) { error = EADDRNOTAVAIL; goto done; } - /* FALLTHROUGH */ - case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ - case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ + /* FALLTHROUGH */ + case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ + case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ VERIFY(sa6 != NULL); /* * We always require users to specify a valid IPv6 address for @@ -1512,7 +1547,7 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * since SIOCDIFADDR_IN6 falls thru above. */ if (sa6->sin6_family != AF_INET6 || - sa6->sin6_len != sizeof (struct sockaddr_in6)) { + sa6->sin6_len != sizeof(struct sockaddr_in6)) { error = EAFNOSUPPORT; goto done; } @@ -1523,42 +1558,42 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, * And finally process address-related ioctls. */ switch (cmd) { - case SIOCGIFADDR_IN6: /* struct in6_ifreq */ - /* This interface is basically deprecated. use SIOCGIFCONF. */ - /* FALLTHRU */ - case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ + case SIOCGIFADDR_IN6: /* struct in6_ifreq */ + /* This interface is basically deprecated. use SIOCGIFCONF. */ + /* FALLTHRU */ + case SIOCGIFDSTADDR_IN6: /* struct in6_ifreq */ error = in6ctl_gifaddr(ifp, ia, cmd, ifr); break; - case SIOCGIFNETMASK_IN6: /* struct in6_ifreq */ + case SIOCGIFNETMASK_IN6: /* struct in6_ifreq */ if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); bcopy(&ia->ia_prefixmask, &ifr->ifr_addr, - sizeof (struct sockaddr_in6)); + sizeof(struct sockaddr_in6)); IFA_UNLOCK(&ia->ia_ifa); } else { error = EADDRNOTAVAIL; } break; - case SIOCGIFAFLAG_IN6: /* struct in6_ifreq */ + case SIOCGIFAFLAG_IN6: /* struct in6_ifreq */ if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); bcopy(&ia->ia6_flags, &ifr->ifr_ifru.ifru_flags6, - sizeof (ifr->ifr_ifru.ifru_flags6)); + sizeof(ifr->ifr_ifru.ifru_flags6)); IFA_UNLOCK(&ia->ia_ifa); } else { error = EADDRNOTAVAIL; } break; - case SIOCGIFALIFETIME_IN6: /* struct in6_ifreq */ - case SIOCSIFALIFETIME_IN6: /* struct in6_ifreq */ + case SIOCGIFALIFETIME_IN6: /* struct in6_ifreq */ + case SIOCSIFALIFETIME_IN6: /* struct in6_ifreq */ error = in6ctl_alifetime(ia, cmd, ifr, p64); break; - case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ - case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ + case SIOCAIFADDR_IN6_32: /* struct in6_aliasreq_32 */ + case SIOCAIFADDR_IN6_64: /* struct in6_aliasreq_64 */ error = in6ctl_aifaddr(ifp, ifra); break; @@ -1572,12 +1607,14 @@ in6_control(struct socket *so, u_long cmd, caddr_t data, struct ifnet *ifp, } done: - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - if (so_unlocked) + } + if (so_unlocked) { socket_lock(so, 0); + } - return (error); + return error; } static __attribute__((noinline)) int @@ -1595,8 +1632,9 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) if (error == 0) { /* PF_INET6 wasn't previously attached */ error = in6_ifattach_aliasreq(ifp, NULL, NULL); - if (error != 0) + if (error != 0) { goto done; + } in6_if_up_dad_start(ifp); } else if (error != EEXIST) { @@ -1608,14 +1646,16 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) * to the list. */ error = in6_update_ifa(ifp, ifra, 0, &ia); - if (error != 0) + if (error != 0) { goto done; + } VERIFY(ia != NULL); /* Now, make the prefix on-link on the interface. */ plen = in6_mask2len(&ifra->ifra_prefixmask.sin6_addr, NULL); - if (plen == 128) + if (plen == 128) { goto done; + } /* * NOTE: We'd rather create the prefix before the address, but we need @@ -1627,7 +1667,7 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) * Convert mask to prefix length (prefixmask has already been validated * in in6_update_ifa(). */ - bzero(&pr0, sizeof (pr0)); + bzero(&pr0, sizeof(pr0)); pr0.ndpr_plen = plen; pr0.ndpr_ifp = ifp; pr0.ndpr_prefix = ifra->ifra_addr; @@ -1659,8 +1699,9 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) * route. */ error = nd6_prelist_add(&pr0, NULL, &pr, FALSE); - if (error != 0) + if (error != 0) { goto done; + } if (pr == NULL) { log(LOG_ERR, "%s: nd6_prelist_add okay, but" @@ -1679,7 +1720,7 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) ++pr->ndpr_addrcnt; VERIFY(pr->ndpr_addrcnt != 0); ia->ia6_ndpr = pr; - NDPR_ADDREF_LOCKED(pr); /* for addr reference */ + NDPR_ADDREF_LOCKED(pr); /* for addr reference */ /* * If this is the first autoconf address from the prefix, @@ -1698,10 +1739,11 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) if (addtmp) { int e; e = in6_tmpifadd(ia, 1); - if (e != 0) + if (e != 0) { log(LOG_NOTICE, "%s: failed to create a" " temporary address, error=%d\n", __func__, e); + } } /* @@ -1716,9 +1758,10 @@ in6ctl_aifaddr(struct ifnet *ifp, struct in6_aliasreq *ifra) NDPR_REMREF(pr); done: - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - return (error); + } + return error; } static __attribute__((noinline)) void @@ -1740,7 +1783,7 @@ in6ctl_difaddr(struct ifnet *ifp, struct in6_ifaddr *ia) * Note that in6_purgeaddr() will decrement ndpr_addrcnt. */ IFA_LOCK(&ia->ia_ifa); - bzero(&pr0, sizeof (pr0)); + bzero(&pr0, sizeof(pr0)); pr0.ndpr_ifp = ifp; pr0.ndpr_plen = in6_mask2len(&ia->ia_prefixmask.sin6_addr, NULL); if (pr0.ndpr_plen == 128) { @@ -1780,8 +1823,9 @@ in6_autoconf(struct ifnet *ifp, int enable) VERIFY(ifp != NULL); - if (ifp->if_flags & IFF_LOOPBACK) - return (EINVAL); + if (ifp->if_flags & IFF_LOOPBACK) { + return EINVAL; + } if (enable) { /* @@ -1815,11 +1859,11 @@ in6_autoconf(struct ifnet *ifp, int enable) } IFA_LOCK(&ia->ia_ifa); if (ia->ia6_flags & IN6_IFF_AUTOCONF) { - IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for us */ + IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for us */ IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(&in6_ifaddr_rwlock); in6_purgeaddr(&ia->ia_ifa); - IFA_REMREF(&ia->ia_ifa); /* for us */ + IFA_REMREF(&ia->ia_ifa); /* for us */ lck_rw_lock_exclusive(&in6_ifaddr_rwlock); /* * Purging the address caused in6_ifaddr_rwlock @@ -1835,7 +1879,7 @@ in6_autoconf(struct ifnet *ifp, int enable) } lck_rw_done(&in6_ifaddr_rwlock); } - return (error); + return error; } /* @@ -1848,8 +1892,9 @@ in6_setrouter(struct ifnet *ifp, int enable) { VERIFY(ifp != NULL); - if (ifp->if_flags & IFF_LOOPBACK) - return (ENODEV); + if (ifp->if_flags & IFF_LOOPBACK) { + return ENODEV; + } if (enable) { struct nd_ifinfo *ndi = NULL; @@ -1882,7 +1927,7 @@ in6_setrouter(struct ifnet *ifp, int enable) if_allmulti(ifp, enable); - return (in6_autoconf(ifp, FALSE)); + return in6_autoconf(ifp, FALSE); } static int @@ -1895,29 +1940,31 @@ in6_to_kamescope(struct sockaddr_in6 *sin6, struct ifnet *ifp) tmp = *sin6; error = in6_recoverscope(&tmp, &sin6->sin6_addr, ifp); - if (error != 0) - return (error); + if (error != 0) { + return error; + } id = in6_addr2scopeid(ifp, &tmp.sin6_addr); - if (tmp.sin6_scope_id == 0) + if (tmp.sin6_scope_id == 0) { tmp.sin6_scope_id = id; - else if (tmp.sin6_scope_id != id) - return (EINVAL); /* scope ID mismatch. */ - + } else if (tmp.sin6_scope_id != id) { + return EINVAL; /* scope ID mismatch. */ + } error = in6_embedscope(&tmp.sin6_addr, &tmp, NULL, NULL, NULL); - if (error != 0) - return (error); + if (error != 0) { + return error; + } tmp.sin6_scope_id = 0; *sin6 = tmp; - return (0); + return 0; } /* * When the address is being configured we should clear out certain flags * coming in from the caller. */ -#define IN6_IFF_CLR_ADDR_FLAG_MASK (~(IN6_IFF_DEPRECATED | IN6_IFF_DETACHED | IN6_IFF_DUPLICATED)) +#define IN6_IFF_CLR_ADDR_FLAG_MASK (~(IN6_IFF_DEPRECATED | IN6_IFF_DETACHED | IN6_IFF_DUPLICATED)) static int in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) @@ -1958,10 +2005,11 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) * Do not delay sending neighbor solicitations when using optimistic * duplicate address detection, c.f. RFC 4429. */ - if (ia->ia6_flags & IN6_IFF_OPTIMISTIC) + if (ia->ia6_flags & IN6_IFF_OPTIMISTIC) { ifaupflags &= ~IN6_IFAUPDATE_DADDELAY; - else + } else { ifaupflags |= IN6_IFAUPDATE_DADDELAY; + } } else { /* * If the interface has been marked to not perform @@ -1973,9 +2021,8 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) /* Join necessary multicast groups */ if ((ifp->if_flags & IFF_MULTICAST) != 0) { - /* join solicited multicast addr for new host id */ - bzero(&llsol, sizeof (struct in6_addr)); + bzero(&llsol, sizeof(struct in6_addr)); llsol.s6_addr32[0] = IPV6_ADDR_INT32_MLL; llsol.s6_addr32[1] = 0; llsol.s6_addr32[2] = htonl(1); @@ -2013,22 +2060,22 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); IFA_UNLOCK(ifa); - bzero(&mltmask, sizeof (mltmask)); - mltmask.sin6_len = sizeof (struct sockaddr_in6); + bzero(&mltmask, sizeof(mltmask)); + mltmask.sin6_len = sizeof(struct sockaddr_in6); mltmask.sin6_family = AF_INET6; mltmask.sin6_addr = in6mask32; -#define MLTMASK_LEN 4 /* mltmask's masklen (=32bit=4octet) */ +#define MLTMASK_LEN 4 /* mltmask's masklen (=32bit=4octet) */ /* * join link-local all-nodes address */ - bzero(&mltaddr, sizeof (mltaddr)); - mltaddr.sin6_len = sizeof (struct sockaddr_in6); + bzero(&mltaddr, sizeof(mltaddr)); + mltaddr.sin6_len = sizeof(struct sockaddr_in6); mltaddr.sin6_family = AF_INET6; mltaddr.sin6_addr = in6addr_linklocal_allnodes; - if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) != 0) + if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) != 0) { goto unwind; /* XXX: should not fail */ - + } /* * XXX: do we really need this automatic routes? * We should probably reconsider this stuff. Most applications @@ -2050,8 +2097,9 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) (struct sockaddr *)&ia->ia_addr, (struct sockaddr *)&mltmask, RTF_UP | RTF_CLONING, NULL, ia->ia_ifp->if_index); - if (error) + if (error) { goto unwind; + } } else { rtfree(rt); } @@ -2072,7 +2120,7 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) /* * join node information group address */ -#define hostnamelen strlen(hostname) +#define hostnamelen strlen(hostname) delay = 0; if ((ifaupflags & IN6_IFAUPDATE_DADDELAY)) { /* @@ -2107,8 +2155,9 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) * (ff01::1%ifN, and ff01::%ifN/32) */ mltaddr.sin6_addr = in6addr_nodelocal_allnodes; - if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) != 0) + if ((error = in6_setscope(&mltaddr.sin6_addr, ifp, NULL)) != 0) { goto unwind; /* XXX: should not fail */ + } /* XXX: again, do we really need the route? */ rt = rtalloc1_scoped((struct sockaddr *)&mltaddr, 0, 0UL, ia->ia_ifp->if_index); @@ -2125,10 +2174,12 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) (struct sockaddr *)&ia->ia_addr, (struct sockaddr *)&mltmask, RTF_UP | RTF_CLONING, NULL, ia->ia_ifp->if_index); - if (error) + if (error) { goto unwind; - } else + } + } else { rtfree(rt); + } imm = in6_joingroup(ifp, &mltaddr.sin6_addr, &error, 0); if (!imm) { @@ -2143,7 +2194,7 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) LIST_INSERT_HEAD(&ia->ia6_memberships, imm, i6mm_chain); IFA_UNLOCK(ifa); } -#undef MLTMASK_LEN +#undef MLTMASK_LEN /* Ensure nd6_service() is scheduled as soon as it's convenient */ ++nd6_sched_timeout_want; @@ -2180,14 +2231,15 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags) if (in6m_sol != NULL) { IN6M_LOCK(in6m_sol); if (in6m_sol->in6m_state == - MLD_REPORTING_MEMBER) + MLD_REPORTING_MEMBER) { mindelay = in6m_sol->in6m_timer; + } IN6M_UNLOCK(in6m_sol); } maxdelay = MAX_RTR_SOLICITATION_DELAY * hz; - if (maxdelay - mindelay == 0) + if (maxdelay - mindelay == 0) { delayval = 0; - else { + } else { delayval = (random() % (maxdelay - mindelay)) + mindelay; @@ -2208,9 +2260,10 @@ unwind: done: /* release reference held for this routine */ - if (in6m_sol != NULL) + if (in6m_sol != NULL) { IN6M_REMREF(in6m_sol); - return (error); + } + return error; } /* @@ -2241,14 +2294,14 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, * the corresponding operation. */ if (ifra->ifra_addr.sin6_family != AF_INET6 || - ifra->ifra_addr.sin6_len != sizeof (struct sockaddr_in6)) { + ifra->ifra_addr.sin6_len != sizeof(struct sockaddr_in6)) { error = EAFNOSUPPORT; goto unwind; } /* Validate ifra_prefixmask.sin6_len is properly bounded. */ if (ifra->ifra_prefixmask.sin6_len == 0 || - ifra->ifra_prefixmask.sin6_len > sizeof (struct sockaddr_in6)) { + ifra->ifra_prefixmask.sin6_len > sizeof(struct sockaddr_in6)) { error = EINVAL; goto unwind; } @@ -2286,13 +2339,14 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, * new one here. */ ia = in6ifa_ifpwithaddr(ifp, &ifra->ifra_addr.sin6_addr); - if (ia != NULL) + if (ia != NULL) { ifa = &ia->ia_ifa; + } /* * Validate destination address on interface types that require it. */ - if ((ifp->if_flags & (IFF_LOOPBACK|IFF_POINTOPOINT)) != 0) { + if ((ifp->if_flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) != 0) { switch (ifra->ifra_dstaddr.sin6_family) { case AF_INET6: if (plen != 128) { @@ -2354,8 +2408,9 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, IFA_ADDREF(ifa); /* for this and optionally for caller */ ifa->ifa_addr = (struct sockaddr *)&ia->ia_addr; if (ifra->ifra_dstaddr.sin6_family == AF_INET6 || - (ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) != 0) + (ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK)) != 0) { ifa->ifa_dstaddr = (struct sockaddr *)&ia->ia_dstaddr; + } ifa->ifa_netmask = (struct sockaddr *)&ia->ia_prefixmask; ifa->ifa_ifp = ifp; ifa->ifa_metric = ifp->if_metric; @@ -2363,19 +2418,21 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, LIST_INIT(&ia->ia6_memberships); ia->ia_addr.sin6_family = AF_INET6; - ia->ia_addr.sin6_len = sizeof (ia->ia_addr); + ia->ia_addr.sin6_len = sizeof(ia->ia_addr); ia->ia_addr.sin6_addr = ifra->ifra_addr.sin6_addr; ia->ia_prefixmask.sin6_family = AF_INET6; - ia->ia_prefixmask.sin6_len = sizeof (ia->ia_prefixmask); + ia->ia_prefixmask.sin6_len = sizeof(ia->ia_prefixmask); ia->ia_prefixmask.sin6_addr = ifra->ifra_prefixmask.sin6_addr; error = in6_to_kamescope(&ia->ia_addr, ifp); - if (error != 0) + if (error != 0) { goto unwind; + } if (ifa->ifa_dstaddr != NULL) { ia->ia_dstaddr = ifra->ifra_dstaddr; error = in6_to_kamescope(&ia->ia_dstaddr, ifp); - if (error != 0) + if (error != 0) { goto unwind; + } } /* Append to address chains */ @@ -2400,8 +2457,9 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, if (in6_ifaddrs != NULL) { struct in6_ifaddr *iac; for (iac = in6_ifaddrs; iac->ia_next != NULL; - iac = iac->ia_next) + iac = iac->ia_next) { continue; + } iac->ia_next = ia; } else { in6_ifaddrs = ia; @@ -2410,7 +2468,7 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, lck_rw_done(&in6_ifaddr_rwlock); } else { ifa = &ia->ia_ifa; - ifaupflags &= ~(IN6_IFAUPDATE_NEWADDR|IN6_IFAUPDATE_1STADDR); + ifaupflags &= ~(IN6_IFAUPDATE_NEWADDR | IN6_IFAUPDATE_1STADDR); } VERIFY(ia != NULL && ifa == &ia->ia_ifa); @@ -2423,14 +2481,16 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, */ ia->ia6_updatetime = ia->ia6_createtime = timenow; ia6_lt = *lt; - if (ia6_lt.ia6t_vltime != ND6_INFINITE_LIFETIME) + if (ia6_lt.ia6t_vltime != ND6_INFINITE_LIFETIME) { ia6_lt.ia6t_expire = timenow + ia6_lt.ia6t_vltime; - else + } else { ia6_lt.ia6t_expire = 0; - if (ia6_lt.ia6t_pltime != ND6_INFINITE_LIFETIME) + } + if (ia6_lt.ia6t_pltime != ND6_INFINITE_LIFETIME) { ia6_lt.ia6t_preferred = timenow + ia6_lt.ia6t_pltime; - else + } else { ia6_lt.ia6t_preferred = 0; + } in6ifa_setlifetime(ia, &ia6_lt); /* @@ -2453,13 +2513,15 @@ in6_update_ifa(struct ifnet *ifp, struct in6_aliasreq *ifra, int ifaupflags, /* Further initialization of the interface address */ error = in6_ifinit(ifp, ia, ifaupflags); - if (error != 0) + if (error != 0) { goto unwind; + } /* Finish updating the address while other tasks are working with it */ error = in6_ifaupdate_aux(ia, ifp, ifaupflags); - if (error != 0) + if (error != 0) { goto unwind; + } /* Return success (optionally w/ address for caller). */ VERIFY(error == 0); @@ -2476,7 +2538,7 @@ unwind: done: *iar = ia; - return (error); + return error; } void @@ -2551,8 +2613,9 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp) ifnet_lock_exclusive(ifp); IFA_LOCK(ifa); - if (ifa->ifa_debug & IFD_ATTACHED) + if (ifa->ifa_debug & IFD_ATTACHED) { if_detach_ifa(ifp, ifa); + } IFA_UNLOCK(ifa); ifnet_lock_done(ifp); @@ -2562,8 +2625,9 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp) if (oia == (ia = in6_ifaddrs)) { in6_ifaddrs = ia->ia_next; } else { - while (ia->ia_next && (ia->ia_next != oia)) + while (ia->ia_next && (ia->ia_next != oia)) { ia = ia->ia_next; + } if (ia->ia_next) { ia->ia_next = oia->ia_next; } else { @@ -2595,21 +2659,22 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp) * For now quiece down the log message for LLAs. */ if (!IN6_IS_ADDR_LINKLOCAL(&oia->ia_addr.sin6_addr)) { - if (oia->ia6_ndpr == NULL) + if (oia->ia6_ndpr == NULL) { log(LOG_NOTICE, "in6_unlink_ifa: IPv6 address " "0x%llx has no prefix\n", (uint64_t)VM_KERNEL_ADDRPERM(oia)); - else { + } else { struct nd_prefix *pr = oia->ia6_ndpr; oia->ia6_flags &= ~IN6_IFF_AUTOCONF; oia->ia6_ndpr = NULL; NDPR_LOCK(pr); VERIFY(pr->ndpr_addrcnt != 0); pr->ndpr_addrcnt--; - if (oia->ia6_flags & IN6_IFF_CLAT46) + if (oia->ia6_flags & IN6_IFF_CLAT46) { pr->ndpr_stateflags &= ~NDPRF_CLAT46; + } NDPR_UNLOCK(pr); - NDPR_REMREF(pr); /* release addr reference */ + NDPR_REMREF(pr); /* release addr reference */ } } IFA_UNLOCK(ifa); @@ -2625,8 +2690,9 @@ in6_unlink_ifa(struct in6_ifaddr *ia, struct ifnet *ifp) * Do this only if it's not already unlinked in the event that we lost * the race, since in6_ifaddr_rwlock was momentarily dropped above. */ - if (unlinked) + if (unlinked) { IFA_REMREF(ifa); + } /* release reference held for this routine */ IFA_REMREF(ifa); @@ -2640,8 +2706,9 @@ in6_purgeif(struct ifnet *ifp) { struct in6_ifaddr *ia; - if (ifp == NULL) + if (ifp == NULL) { return; + } LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); @@ -2652,10 +2719,10 @@ in6_purgeif(struct ifnet *ifp) ia = ia->ia_next; continue; } - IFA_ADDREF(&ia->ia_ifa); /* for us */ + IFA_ADDREF(&ia->ia_ifa); /* for us */ lck_rw_done(&in6_ifaddr_rwlock); in6_purgeaddr(&ia->ia_ifa); - IFA_REMREF(&ia->ia_ifa); /* for us */ + IFA_REMREF(&ia->ia_ifa); /* for us */ lck_rw_lock_exclusive(&in6_ifaddr_rwlock); /* * Purging the address would have caused @@ -2690,8 +2757,9 @@ in6_ifinit(struct ifnet *ifp, struct in6_ifaddr *ia, int ifaupflags) if ((ifaupflags & IN6_IFAUPDATE_1STADDR) != 0) { error = ifnet_ioctl(ifp, PF_INET6, SIOCSIFADDR, ia); if (error != 0) { - if (error != EOPNOTSUPP) - return (error); + if (error != EOPNOTSUPP) { + return error; + } error = 0; } } @@ -2708,8 +2776,9 @@ in6_ifinit(struct ifnet *ifp, struct in6_ifaddr *ia, int ifaupflags) ia->ia_dstaddr.sin6_family == AF_INET6) { IFA_UNLOCK(ifa); error = rtinit(ifa, RTM_ADD, RTF_UP | RTF_HOST); - if (error != 0) - return (error); + if (error != 0) { + return error; + } IFA_LOCK(ifa); ia->ia_flags |= IFA_ROUTE; } @@ -2724,14 +2793,15 @@ in6_ifinit(struct ifnet *ifp, struct in6_ifaddr *ia, int ifaupflags) IFA_UNLOCK(ifa); /* Add ownaddr as loopback rtentry, if necessary (ex. on p2p link). */ - if ((ifaupflags & IN6_IFAUPDATE_NEWADDR) != 0) + if ((ifaupflags & IN6_IFAUPDATE_NEWADDR) != 0) { in6_ifaddloop(ifa); + } /* invalidate route caches */ routegenid_inet6_update(); VERIFY(error == 0); - return (0); + return 0; } void @@ -2762,7 +2832,7 @@ in6ifa_ifpforlinklocal(struct ifnet *ifp, int ignoreflags) IFA_UNLOCK(ifa); continue; } - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -2770,7 +2840,7 @@ in6ifa_ifpforlinklocal(struct ifnet *ifp, int ignoreflags) } ifnet_lock_done(ifp); - return ((struct in6_ifaddr *)ifa); + return (struct in6_ifaddr *)ifa; } struct in6_ifaddr * @@ -2782,7 +2852,7 @@ in6ifa_ifpwithflag(struct ifnet * ifp, int flag) TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { IFA_LOCK_SPIN(ifa); - if (ifa->ifa_addr->sa_family != AF_INET6 ) { + if (ifa->ifa_addr->sa_family != AF_INET6) { IFA_UNLOCK(ifa); continue; } @@ -2795,7 +2865,7 @@ in6ifa_ifpwithflag(struct ifnet * ifp, int flag) } ifnet_lock_done(ifp); - return ((struct in6_ifaddr *)ifa); + return (struct in6_ifaddr *)ifa; } /* @@ -2815,7 +2885,7 @@ in6ifa_ifpwithaddr(struct ifnet *ifp, struct in6_addr *addr) continue; } if (IN6_ARE_ADDR_EQUAL(addr, IFA_IN6(ifa))) { - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); break; } @@ -2823,7 +2893,7 @@ in6ifa_ifpwithaddr(struct ifnet *ifp, struct in6_addr *addr) } ifnet_lock_done(ifp); - return ((struct in6_ifaddr *)ifa); + return (struct in6_ifaddr *)ifa; } struct in6_ifaddr * @@ -2835,7 +2905,7 @@ in6ifa_prproxyaddr(struct in6_addr *addr) for (ia = in6_ifaddrs; ia; ia = ia->ia_next) { IFA_LOCK(&ia->ia_ifa); if (IN6_ARE_ADDR_EQUAL(addr, IFA_IN6(&ia->ia_ifa))) { - IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for caller */ + IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for caller */ IFA_UNLOCK(&ia->ia_ifa); break; } @@ -2848,7 +2918,7 @@ in6ifa_prproxyaddr(struct in6_addr *addr) ia = NULL; } - return (ia); + return ia; } void @@ -2872,22 +2942,26 @@ in6ifa_getlifetime(struct in6_ifaddr *ia6, struct in6_addrlifetime *t_dst, if (iscalendar) { if (t_src->ia6ti_expire != 0 && - t_src->ia6ti_vltime != ND6_INFINITE_LIFETIME) + t_src->ia6ti_vltime != ND6_INFINITE_LIFETIME) { t_dst->ia6t_expire = t_src->ia6ti_base_calendartime + t_src->ia6ti_expire - t_src->ia6ti_base_uptime; + } if (t_src->ia6ti_preferred != 0 && - t_src->ia6ti_pltime != ND6_INFINITE_LIFETIME) + t_src->ia6ti_pltime != ND6_INFINITE_LIFETIME) { t_dst->ia6t_preferred = t_src->ia6ti_base_calendartime + t_src->ia6ti_preferred - t_src->ia6ti_base_uptime; + } } else { if (t_src->ia6ti_expire != 0 && - t_src->ia6ti_vltime != ND6_INFINITE_LIFETIME) + t_src->ia6ti_vltime != ND6_INFINITE_LIFETIME) { t_dst->ia6t_expire = t_src->ia6ti_expire; + } if (t_src->ia6ti_preferred != 0 && - t_src->ia6ti_pltime != ND6_INFINITE_LIFETIME) + t_src->ia6ti_pltime != ND6_INFINITE_LIFETIME) { t_dst->ia6t_preferred = t_src->ia6ti_preferred; + } } } @@ -2935,17 +3009,20 @@ ip6_sprintf(const struct in6_addr *addr) for (i = 0; i < 8; i++) { if (dcolon == 1) { if (*a == 0) { - if (i == 7) + if (i == 7) { *cp++ = ':'; + } a++; continue; - } else + } else { dcolon = 2; + } } if (*a == 0) { if (dcolon == 0 && *(a + 1) == 0) { - if (i == 0) + if (i == 0) { *cp++ = ':'; + } *cp++ = ':'; dcolon = 1; } else { @@ -2969,13 +3046,14 @@ ip6_sprintf(const struct in6_addr *addr) *cp++ = digits[n]; zpad = 1; } - if ((n = *d & 0xf) != 0 || zpad) + if ((n = *d & 0xf) != 0 || zpad) { *cp++ = digits[n]; + } *cp++ = ':'; a++; } *--cp = 0; - return (ip6buf[ip6round]); + return ip6buf[ip6round]; } int @@ -2985,24 +3063,26 @@ in6addr_local(struct in6_addr *in6) struct sockaddr_in6 sin6; int local = 0; - if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_SCOPE_LINKLOCAL(in6)) - return (1); + if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_SCOPE_LINKLOCAL(in6)) { + return 1; + } sin6.sin6_family = AF_INET6; - sin6.sin6_len = sizeof (sin6); - bcopy(in6, &sin6.sin6_addr, sizeof (*in6)); + sin6.sin6_len = sizeof(sin6); + bcopy(in6, &sin6.sin6_addr, sizeof(*in6)); rt = rtalloc1((struct sockaddr *)&sin6, 0, 0); if (rt != NULL) { RT_LOCK_SPIN(rt); - if (rt->rt_gateway->sa_family == AF_LINK) + if (rt->rt_gateway->sa_family == AF_LINK) { local = 1; + } RT_UNLOCK(rt); rtfree(rt); } else { local = in6_localaddr(in6); } - return (local); + return local; } int @@ -3010,8 +3090,9 @@ in6_localaddr(struct in6_addr *in6) { struct in6_ifaddr *ia; - if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6)) - return (1); + if (IN6_IS_ADDR_LOOPBACK(in6) || IN6_IS_ADDR_LINKLOCAL(in6)) { + return 1; + } lck_rw_lock_shared(&in6_ifaddr_rwlock); for (ia = in6_ifaddrs; ia; ia = ia->ia_next) { @@ -3020,12 +3101,12 @@ in6_localaddr(struct in6_addr *in6) &ia->ia_prefixmask.sin6_addr)) { IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(&in6_ifaddr_rwlock); - return (1); + return 1; } IFA_UNLOCK(&ia->ia_ifa); } lck_rw_done(&in6_ifaddr_rwlock); - return (0); + return 0; } /* @@ -3039,16 +3120,18 @@ in6_matchlen(struct in6_addr *src, struct in6_addr *dst) u_char *s = (u_char *)src, *d = (u_char *)dst; u_char *lim = s + 16, r; - while (s < lim) + while (s < lim) { if ((r = (*d++ ^ *s++)) != 0) { while (r < 128) { match++; r <<= 1; } break; - } else + } else { match += 8; - return (match); + } + } + return match; } /* XXX: to be scope conscious */ @@ -3060,20 +3143,22 @@ in6_are_prefix_equal(struct in6_addr *p1, struct in6_addr *p2, int len) /* sanity check */ if (0 > len || len > 128) { log(LOG_ERR, "%s: invalid prefix length(%d)\n", __func__, len); - return (0); + return 0; } bytelen = len / 8; bitlen = len % 8; - if (bcmp(&p1->s6_addr, &p2->s6_addr, bytelen)) - return (0); + if (bcmp(&p1->s6_addr, &p2->s6_addr, bytelen)) { + return 0; + } if (bitlen != 0 && p1->s6_addr[bytelen] >> (8 - bitlen) != - p2->s6_addr[bytelen] >> (8 - bitlen)) - return (0); + p2->s6_addr[bytelen] >> (8 - bitlen)) { + return 0; + } - return (1); + return 1; } void @@ -3088,13 +3173,15 @@ in6_prefixlen2mask(struct in6_addr *maskp, int len) return; } - bzero(maskp, sizeof (*maskp)); + bzero(maskp, sizeof(*maskp)); bytelen = len / 8; bitlen = len % 8; - for (i = 0; i < bytelen; i++) + for (i = 0; i < bytelen; i++) { maskp->s6_addr[i] = 0xff; - if (bitlen) + } + if (bitlen) { maskp->s6_addr[bytelen] = maskarray[bitlen - 1]; + } } /* @@ -3103,14 +3190,14 @@ in6_prefixlen2mask(struct in6_addr *maskp, int len) struct in6_ifaddr * in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) { - int dst_scope = in6_addrscope(dst), src_scope, best_scope = 0; + int dst_scope = in6_addrscope(dst), src_scope, best_scope = 0; int blen = -1; struct ifaddr *ifa; struct ifnet *ifp; struct in6_ifaddr *ifa_best = NULL; if (oifp == NULL) { - return (NULL); + return NULL; } /* @@ -3124,8 +3211,9 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) * We can never take an address that breaks the scope zone * of the destination. */ - if (in6_addr2scopeid(ifp, dst) != in6_addr2scopeid(oifp, dst)) + if (in6_addr2scopeid(ifp, dst) != in6_addr2scopeid(oifp, dst)) { continue; + } ifnet_lock_shared(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { @@ -3162,8 +3250,9 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) * If this is the first address we find, * keep it anyway. */ - if (ifa_best == NULL) + if (ifa_best == NULL) { goto replace; + } /* * ifa_best is never NULL beyond this line except @@ -3186,8 +3275,9 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) * scope(src) >= scope(dst)) */ if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0 && - IN6_ARE_SCOPE_CMP(src_scope, dst_scope) >= 0) + IN6_ARE_SCOPE_CMP(src_scope, dst_scope) >= 0) { goto replace; /* (A) */ + } if (IN6_ARE_SCOPE_CMP(src_scope, dst_scope) < 0 && IN6_ARE_SCOPE_CMP(best_scope, dst_scope) >= 0) { IFA_UNLOCK(ifa); @@ -3228,8 +3318,9 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) */ if ((ifa_best->ia6_flags & IN6_IFF_DEPRECATED) && (((struct in6_ifaddr *)ifa)->ia6_flags & - IN6_IFF_DEPRECATED) == 0) + IN6_IFF_DEPRECATED) == 0) { goto replace; + } /* * When we use temporary addresses described in @@ -3245,18 +3336,18 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) ifat = (struct in6_ifaddr *)ifa; if ((ifa_best->ia6_flags & - (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY)) + (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY)) == IN6_IFF_AUTOCONF && (ifat->ia6_flags & - (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY)) - == (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY)) { + (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY)) + == (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY)) { goto replace; } if ((ifa_best->ia6_flags & - (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY)) - == (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY) && + (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY)) + == (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY) && (ifat->ia6_flags & - (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY)) + (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY)) == IN6_IFF_AUTOCONF) { IFA_UNLOCK(ifa); continue; @@ -3323,8 +3414,9 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) IFA_UNLOCK(ifa); continue; } - if (bifp != oifp && ifp == oifp) /* (2) */ + if (bifp != oifp && ifp == oifp) { /* (2) */ goto replace; + } /* * Both bifp and ifp are on the outgoing @@ -3335,8 +3427,9 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) */ tlen = in6_matchlen(IFA_IN6(ifa), dst); matchcmp = tlen - blen; - if (matchcmp > 0) /* (3) */ + if (matchcmp > 0) { /* (3) */ goto replace; + } IFA_UNLOCK(ifa); continue; /* (4) */ } @@ -3348,25 +3441,27 @@ in6_ifawithscope(struct ifnet *oifp, struct in6_addr *dst) goto replace; /* (6) */ } if (dscopecmp < 0) { - if (bscopecmp > 0) /* (7) */ + if (bscopecmp > 0) { /* (7) */ goto replace; + } IFA_UNLOCK(ifa); continue; /* (8) */ } /* now dscopecmp must be 0 */ - if (bscopecmp < 0) + if (bscopecmp < 0) { goto replace; /* (9) */ - + } replace: - IFA_ADDREF_LOCKED(ifa); /* for ifa_best */ + IFA_ADDREF_LOCKED(ifa); /* for ifa_best */ blen = tlen >= 0 ? tlen : in6_matchlen(IFA_IN6(ifa), dst); best_scope = in6_addrscope(&ifa2ia6(ifa)->ia_addr.sin6_addr); IFA_UNLOCK(ifa); - if (ifa_best) + if (ifa_best) { IFA_REMREF(&ifa_best->ia_ifa); + } ifa_best = (struct in6_ifaddr *)ifa; } ifnet_lock_done(ifp); @@ -3374,26 +3469,29 @@ replace: ifnet_head_done(); /* count statistics for future improvements */ - if (ifa_best == NULL) + if (ifa_best == NULL) { ip6stat.ip6s_sources_none++; - else { + } else { IFA_LOCK_SPIN(&ifa_best->ia_ifa); - if (oifp == ifa_best->ia_ifp) + if (oifp == ifa_best->ia_ifp) { ip6stat.ip6s_sources_sameif[best_scope]++; - else + } else { ip6stat.ip6s_sources_otherif[best_scope]++; + } - if (best_scope == dst_scope) + if (best_scope == dst_scope) { ip6stat.ip6s_sources_samescope[best_scope]++; - else + } else { ip6stat.ip6s_sources_otherscope[best_scope]++; + } - if ((ifa_best->ia6_flags & IN6_IFF_DEPRECATED) != 0) + if ((ifa_best->ia6_flags & IN6_IFF_DEPRECATED) != 0) { ip6stat.ip6s_sources_deprecated[best_scope]++; + } IFA_UNLOCK(&ifa_best->ia_ifa); } - return (ifa_best); + return ifa_best; } /* @@ -3403,10 +3501,10 @@ replace: struct in6_ifaddr * in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) { - int dst_scope = in6_addrscope(dst), blen = -1, tlen; + int dst_scope = in6_addrscope(dst), blen = -1, tlen; struct ifaddr *ifa; struct in6_ifaddr *besta = NULL; - struct in6_ifaddr *dep[2]; /* last-resort: deprecated */ + struct in6_ifaddr *dep[2]; /* last-resort: deprecated */ dep[0] = dep[1] = NULL; @@ -3437,10 +3535,11 @@ in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) } if (ifa2ia6(ifa)->ia6_flags & IN6_IFF_DEPRECATED) { if (ip6_use_deprecated) { - IFA_ADDREF_LOCKED(ifa); /* for dep[0] */ + IFA_ADDREF_LOCKED(ifa); /* for dep[0] */ IFA_UNLOCK(ifa); - if (dep[0] != NULL) + if (dep[0] != NULL) { IFA_REMREF(&dep[0]->ia_ifa); + } dep[0] = (struct in6_ifaddr *)ifa; } else { IFA_UNLOCK(ifa); @@ -3457,14 +3556,14 @@ in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) IFA_UNLOCK(ifa); IFA_LOCK(&besta->ia_ifa); blen = in6_matchlen( - &besta->ia_addr.sin6_addr, dst); + &besta->ia_addr.sin6_addr, dst); IFA_UNLOCK(&besta->ia_ifa); IFA_LOCK(ifa); } tlen = in6_matchlen(IFA_IN6(ifa), dst); if (tlen > blen) { blen = tlen; - IFA_ADDREF_LOCKED(ifa); /* for besta */ + IFA_ADDREF_LOCKED(ifa); /* for besta */ IFA_UNLOCK(ifa); IFA_REMREF(&besta->ia_ifa); besta = (struct in6_ifaddr *)ifa; @@ -3473,7 +3572,7 @@ in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) } } else { besta = (struct in6_ifaddr *)ifa; - IFA_ADDREF_LOCKED(ifa); /* for besta */ + IFA_ADDREF_LOCKED(ifa); /* for besta */ IFA_UNLOCK(ifa); } } else { @@ -3482,9 +3581,10 @@ in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) } if (besta) { ifnet_lock_done(ifp); - if (dep[0] != NULL) + if (dep[0] != NULL) { IFA_REMREF(&dep[0]->ia_ifa); - return (besta); + } + return besta; } TAILQ_FOREACH(ifa, &ifp->if_addrlist, ifa_list) { @@ -3507,37 +3607,42 @@ in6_ifawithifp(struct ifnet *ifp, struct in6_addr *dst) } if (ifa2ia6(ifa)->ia6_flags & IN6_IFF_DEPRECATED) { if (ip6_use_deprecated) { - IFA_ADDREF_LOCKED(ifa); /* for dep[1] */ + IFA_ADDREF_LOCKED(ifa); /* for dep[1] */ IFA_UNLOCK(ifa); - if (dep[1] != NULL) + if (dep[1] != NULL) { IFA_REMREF(&dep[1]->ia_ifa); + } dep[1] = (struct in6_ifaddr *)ifa; } else { IFA_UNLOCK(ifa); } continue; } - IFA_ADDREF_LOCKED(ifa); /* for caller */ + IFA_ADDREF_LOCKED(ifa); /* for caller */ IFA_UNLOCK(ifa); ifnet_lock_done(ifp); - if (dep[0] != NULL) + if (dep[0] != NULL) { IFA_REMREF(&dep[0]->ia_ifa); - if (dep[1] != NULL) + } + if (dep[1] != NULL) { IFA_REMREF(&dep[1]->ia_ifa); - return ((struct in6_ifaddr *)ifa); + } + return (struct in6_ifaddr *)ifa; } ifnet_lock_done(ifp); /* use the last-resort values, that are, deprecated addresses */ if (dep[0]) { - if (dep[1] != NULL) + if (dep[1] != NULL) { IFA_REMREF(&dep[1]->ia_ifa); - return (dep[0]); + } + return dep[0]; + } + if (dep[1]) { + return dep[1]; } - if (dep[1]) - return (dep[1]); - return (NULL); + return NULL; } /* @@ -3550,9 +3655,10 @@ in6_if_up_dad_start(struct ifnet *ifp) struct nd_ifinfo *ndi = NULL; ndi = ND_IFINFO(ifp); - VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); - if (!(ndi->flags & ND6_IFF_DAD)) + VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); + if (!(ndi->flags & ND6_IFF_DAD)) { return; + } /* start DAD on all the interface addresses */ ifnet_lock_exclusive(ifp); @@ -3566,7 +3672,7 @@ in6_if_up_dad_start(struct ifnet *ifp) } ia6 = (struct in6_ifaddr *)ifa; if (ia6->ia6_flags & IN6_IFF_DADPROGRESS) { - int delay = 0; /* delay ticks before DAD output */ + int delay = 0; /* delay ticks before DAD output */ IFA_UNLOCK(ifa); nd6_dad_start(ifa, &delay); } else { @@ -3582,13 +3688,15 @@ in6if_do_dad( { struct nd_ifinfo *ndi = NULL; - if ((ifp->if_flags & IFF_LOOPBACK) != 0) - return (0); + if ((ifp->if_flags & IFF_LOOPBACK) != 0) { + return 0; + } ndi = ND_IFINFO(ifp); - VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); - if (!(ndi->flags & ND6_IFF_DAD)) - return (0); + VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); + if (!(ndi->flags & ND6_IFF_DAD)) { + return 0; + } /* * If we are using the alternative neighbor discovery @@ -3598,17 +3706,18 @@ in6if_do_dad( * for now, even when not marked as using the alternative * interface. This is for historical reasons. */ - if (ifp->if_eflags & - (IFEF_IPV6_ND6ALT|IFEF_LOCALNET_PRIVATE|IFEF_DIRECTLINK)) - return (0); + if (ifp->if_eflags & + (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE | IFEF_DIRECTLINK)) { + return 0; + } if (ifp->if_subfamily == IFNET_SUBFAMILY_IPSEC || - ifp->if_subfamily == IFNET_SUBFAMILY_UTUN) { + ifp->if_subfamily == IFNET_SUBFAMILY_UTUN) { /* * Ignore DAD for tunneling virtual interfaces, which get * their IPv6 address explicitly assigned. */ - return (0); + return 0; } switch (ifp->if_type) { @@ -3622,7 +3731,7 @@ in6if_do_dad( * interfaces. We should even omit it, because loop-backed * NS would confuse the DAD procedure. */ - return (0); + return 0; default: /* * Our DAD routine requires the interface up and running. @@ -3633,11 +3742,12 @@ in6if_do_dad( * XXX: we should rather mark "tentative" on such addresses, * and do DAD after the interface becomes ready. */ - if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != - (IFF_UP|IFF_RUNNING)) - return (0); + if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != + (IFF_UP | IFF_RUNNING)) { + return 0; + } - return (1); + return 1; } } @@ -3655,19 +3765,24 @@ in6_setmaxmtu(void) TAILQ_FOREACH(ifp, &ifnet_head, if_list) { struct nd_ifinfo *ndi = NULL; - if ((ndi = ND_IFINFO(ifp)) != NULL && !ndi->initialized) + if ((ndi = ND_IFINFO(ifp)) != NULL && !ndi->initialized) { ndi = NULL; - if (ndi != NULL) + } + if (ndi != NULL) { lck_mtx_lock(&ndi->lock); + } if ((ifp->if_flags & IFF_LOOPBACK) == 0 && - IN6_LINKMTU(ifp) > maxmtu) + IN6_LINKMTU(ifp) > maxmtu) { maxmtu = IN6_LINKMTU(ifp); - if (ndi != NULL) + } + if (ndi != NULL) { lck_mtx_unlock(&ndi->lock); + } } ifnet_head_done(); - if (maxmtu) /* update only when maxmtu is positive */ + if (maxmtu) { /* update only when maxmtu is positive */ in6_maxmtu = maxmtu; + } } /* * Provide the length of interface identifiers to be used for the link attached @@ -3680,43 +3795,43 @@ int in6_if2idlen(struct ifnet *ifp) { switch (ifp->if_type) { - case IFT_ETHER: /* RFC2464 */ - case IFT_IEEE8023ADLAG: /* IEEE802.3ad Link Aggregate */ + case IFT_ETHER: /* RFC2464 */ + case IFT_IEEE8023ADLAG: /* IEEE802.3ad Link Aggregate */ #ifdef IFT_PROPVIRTUAL - case IFT_PROPVIRTUAL: /* XXX: no RFC. treat it as ether */ + case IFT_PROPVIRTUAL: /* XXX: no RFC. treat it as ether */ #endif #ifdef IFT_L2VLAN - case IFT_L2VLAN: /* ditto */ + case IFT_L2VLAN: /* ditto */ #endif #ifdef IFT_IEEE80211 - case IFT_IEEE80211: /* ditto */ + case IFT_IEEE80211: /* ditto */ #endif #ifdef IFT_MIP - case IFT_MIP: /* ditto */ + case IFT_MIP: /* ditto */ #endif - return (64); - case IFT_FDDI: /* RFC2467 */ - return (64); - case IFT_ISO88025: /* RFC2470 (IPv6 over Token Ring) */ - return (64); - case IFT_PPP: /* RFC2472 */ - return (64); - case IFT_ARCNET: /* RFC2497 */ - return (64); - case IFT_FRELAY: /* RFC2590 */ - return (64); - case IFT_IEEE1394: /* RFC3146 */ - return (64); + return 64; + case IFT_FDDI: /* RFC2467 */ + return 64; + case IFT_ISO88025: /* RFC2470 (IPv6 over Token Ring) */ + return 64; + case IFT_PPP: /* RFC2472 */ + return 64; + case IFT_ARCNET: /* RFC2497 */ + return 64; + case IFT_FRELAY: /* RFC2590 */ + return 64; + case IFT_IEEE1394: /* RFC3146 */ + return 64; case IFT_GIF: - return (64); /* draft-ietf-v6ops-mech-v2-07 */ + return 64; /* draft-ietf-v6ops-mech-v2-07 */ case IFT_LOOP: - return (64); /* XXX: is this really correct? */ + return 64; /* XXX: is this really correct? */ case IFT_OTHER: - return (64); /* for utun interfaces */ + return 64; /* for utun interfaces */ case IFT_CELLULAR: - return (64); /* Packet Data over Cellular */ + return 64; /* Packet Data over Cellular */ case IFT_BRIDGE: - return (64); /* Transparent bridge interface */ + return 64; /* Transparent bridge interface */ default: /* * Unknown link type: @@ -3731,7 +3846,7 @@ in6_if2idlen(struct ifnet *ifp) */ log(LOG_NOTICE, "%s: unknown link type (%d)\n", __func__, ifp->if_type); - return (64); + return 64; } } /* @@ -3741,8 +3856,8 @@ in6_if2idlen(struct ifnet *ifp) void in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) { - bzero(sin, sizeof (*sin)); - sin->sin_len = sizeof (struct sockaddr_in); + bzero(sin, sizeof(*sin)); + sin->sin_len = sizeof(struct sockaddr_in); sin->sin_family = AF_INET; sin->sin_port = sin6->sin6_port; sin->sin_addr.s_addr = sin6->sin6_addr.s6_addr32[3]; @@ -3752,8 +3867,8 @@ in6_sin6_2_sin(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) void in6_sin_2_v4mapsin6(struct sockaddr_in *sin, struct sockaddr_in6 *sin6) { - bzero(sin6, sizeof (*sin6)); - sin6->sin6_len = sizeof (struct sockaddr_in6); + bzero(sin6, sizeof(*sin6)); + sin6->sin6_len = sizeof(struct sockaddr_in6); sin6->sin6_family = AF_INET6; sin6->sin6_port = sin->sin_port; sin6->sin6_addr.s6_addr32[0] = 0; @@ -3790,16 +3905,17 @@ in6_sin_2_v4mapsin6_in_sock(struct sockaddr **nam) struct sockaddr_in *sin_p; struct sockaddr_in6 *sin6_p; - MALLOC(sin6_p, struct sockaddr_in6 *, sizeof (*sin6_p), M_SONAME, + MALLOC(sin6_p, struct sockaddr_in6 *, sizeof(*sin6_p), M_SONAME, M_WAITOK); - if (sin6_p == NULL) - return (ENOBUFS); + if (sin6_p == NULL) { + return ENOBUFS; + } sin_p = (struct sockaddr_in *)(void *)*nam; in6_sin_2_v4mapsin6(sin_p, sin6_p); FREE(*nam, M_SONAME); *nam = (struct sockaddr *)sin6_p; - return (0); + return 0; } /* @@ -3818,21 +3934,21 @@ in6_post_msg(struct ifnet *ifp, u_int32_t event_code, struct in6_ifaddr *ifa, struct kev_in6_data in6_event_data; struct in6_addrlifetime ia6_lt; - bzero(&in6_event_data, sizeof (struct kev_in6_data)); - bzero(&ev_msg, sizeof (struct kev_msg)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_INET6_SUBCLASS; - ev_msg.event_code = event_code; + bzero(&in6_event_data, sizeof(struct kev_in6_data)); + bzero(&ev_msg, sizeof(struct kev_msg)); + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_INET6_SUBCLASS; + ev_msg.event_code = event_code; if (ifa) { IFA_LOCK(&ifa->ia_ifa); - in6_event_data.ia_addr = ifa->ia_addr; - in6_event_data.ia_net = ifa->ia_net; - in6_event_data.ia_dstaddr = ifa->ia_dstaddr; - in6_event_data.ia_prefixmask = ifa->ia_prefixmask; - in6_event_data.ia_plen = ifa->ia_plen; - in6_event_data.ia6_flags = (u_int32_t)ifa->ia6_flags; + in6_event_data.ia_addr = ifa->ia_addr; + in6_event_data.ia_net = ifa->ia_net; + in6_event_data.ia_dstaddr = ifa->ia_dstaddr; + in6_event_data.ia_prefixmask = ifa->ia_prefixmask; + in6_event_data.ia_plen = ifa->ia_plen; + in6_event_data.ia6_flags = (u_int32_t)ifa->ia6_flags; /* retrieve time as calendar time (last arg is 1) */ in6ifa_getlifetime(ifa, &ia6_lt, 1); @@ -3850,12 +3966,13 @@ in6_post_msg(struct ifnet *ifp, u_int32_t event_code, struct in6_ifaddr *ifa, in6_event_data.link_data.if_unit = (u_int32_t)ifp->if_unit; } - if (mac != NULL) - memcpy(&in6_event_data.ia_mac, mac, + if (mac != NULL) { + memcpy(&in6_event_data.ia_mac, mac, sizeof(in6_event_data.ia_mac)); + } ev_msg.dv[0].data_ptr = &in6_event_data; - ev_msg.dv[0].data_length = sizeof (in6_event_data); + ev_msg.dv[0].data_length = sizeof(in6_event_data); ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(NULL, &ev_msg); @@ -3870,10 +3987,10 @@ in6_ifaddr_init(void) in6_cga_init(); in6_multi_init(); - PE_parse_boot_argn("ifa_debug", &in6ifa_debug, sizeof (in6ifa_debug)); + PE_parse_boot_argn("ifa_debug", &in6ifa_debug, sizeof(in6ifa_debug)); - in6ifa_size = (in6ifa_debug == 0) ? sizeof (struct in6_ifaddr) : - sizeof (struct in6_ifaddr_dbg); + in6ifa_size = (in6ifa_debug == 0) ? sizeof(struct in6_ifaddr) : + sizeof(struct in6_ifaddr_dbg); in6ifa_zone = zinit(in6ifa_size, IN6IFA_ZONE_MAX * in6ifa_size, 0, IN6IFA_ZONE_NAME); @@ -3911,7 +4028,7 @@ in6_ifaddr_alloc(int how) } } - return (in6ifa); + return in6ifa; } static void @@ -3931,7 +4048,7 @@ in6_ifaddr_free(struct ifaddr *ifa) (struct in6_ifaddr_dbg *)ifa; ctrace_record(&in6ifa_dbg->in6ifa_free); bcopy(&in6ifa_dbg->in6ifa, &in6ifa_dbg->in6ifa_old, - sizeof (struct in6_ifaddr)); + sizeof(struct in6_ifaddr)); if (ifa->ifa_debug & IFD_TRASHED) { /* Become a regular mutex, just in case */ IFA_CONVERT_LOCK(ifa); @@ -3944,7 +4061,7 @@ in6_ifaddr_free(struct ifaddr *ifa) } IFA_UNLOCK(ifa); ifa_lock_destroy(ifa); - bzero(ifa, sizeof (struct in6_ifaddr)); + bzero(ifa, sizeof(struct in6_ifaddr)); zfree(in6ifa_zone, ifa); } @@ -4024,18 +4141,20 @@ in6_getassocids(struct socket *so, uint32_t *cnt, user_addr_t aidp) struct in6pcb *in6p = sotoin6pcb(so); sae_associd_t aid; - if (in6p == NULL || in6p->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (in6p == NULL || in6p->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } /* IN6PCB has no concept of association */ aid = SAE_ASSOCID_ANY; *cnt = 0; /* just asking how many there are? */ - if (aidp == USER_ADDR_NULL) - return (0); + if (aidp == USER_ADDR_NULL) { + return 0; + } - return (copyout(&aid, aidp, sizeof (aid))); + return copyout(&aid, aidp, sizeof(aid)); } /* @@ -4048,23 +4167,26 @@ in6_getconnids(struct socket *so, sae_associd_t aid, uint32_t *cnt, struct in6pcb *in6p = sotoin6pcb(so); sae_connid_t cid; - if (in6p == NULL || in6p->inp_state == INPCB_STATE_DEAD) - return (EINVAL); + if (in6p == NULL || in6p->inp_state == INPCB_STATE_DEAD) { + return EINVAL; + } - if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) - return (EINVAL); + if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { + return EINVAL; + } /* if connected, return 1 connection count */ *cnt = ((so->so_state & SS_ISCONNECTED) ? 1 : 0); /* just asking how many there are? */ - if (cidp == USER_ADDR_NULL) - return (0); + if (cidp == USER_ADDR_NULL) { + return 0; + } /* if IN6PCB is connected, assign it connid 1 */ cid = ((*cnt != 0) ? 1 : SAE_CONNID_ANY); - return (copyout(&cid, cidp, sizeof (cid))); + return copyout(&cid, cidp, sizeof(cid)); } /* @@ -4100,17 +4222,21 @@ in6_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *ifindex = ((ifp != NULL) ? ifp->if_index : 0); *soerror = so->so_error; *flags = 0; - if (so->so_state & SS_ISCONNECTED) + if (so->so_state & SS_ISCONNECTED) { *flags |= (CIF_CONNECTED | CIF_PREFERRED); - if (in6p->in6p_flags & INP_BOUND_IF) + } + if (in6p->in6p_flags & INP_BOUND_IF) { *flags |= CIF_BOUND_IF; - if (!(in6p->in6p_flags & INP_IN6ADDR_ANY)) + } + if (!(in6p->in6p_flags & INP_IN6ADDR_ANY)) { *flags |= CIF_BOUND_IP; - if (!(in6p->in6p_flags & INP_ANONPORT)) + } + if (!(in6p->in6p_flags & INP_ANONPORT)) { *flags |= CIF_BOUND_PORT; + } - bzero(&sin6, sizeof (sin6)); - sin6.sin6_len = sizeof (sin6); + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(sin6); sin6.sin6_family = AF_INET6; /* source address and port */ @@ -4120,10 +4246,11 @@ in6_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *src_len = sin6.sin6_len; } else { if (src != USER_ADDR_NULL) { - copy_len = min(*src_len, sizeof (sin6)); + copy_len = min(*src_len, sizeof(sin6)); error = copyout(&sin6, src, copy_len); - if (error != 0) + if (error != 0) { goto out; + } *src_len = copy_len; } } @@ -4135,10 +4262,11 @@ in6_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *dst_len = sin6.sin6_len; } else { if (dst != USER_ADDR_NULL) { - copy_len = min(*dst_len, sizeof (sin6)); + copy_len = min(*dst_len, sizeof(sin6)); error = copyout(&sin6, dst, copy_len); - if (error != 0) + if (error != 0) { goto out; + } *dst_len = copy_len; } } @@ -4148,15 +4276,16 @@ in6_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, *aux_type = CIAUX_TCP; if (*aux_len == 0) { - *aux_len = sizeof (tcp_ci); + *aux_len = sizeof(tcp_ci); } else { if (aux_data != USER_ADDR_NULL) { - copy_len = min(*aux_len, sizeof (tcp_ci)); - bzero(&tcp_ci, sizeof (tcp_ci)); + copy_len = min(*aux_len, sizeof(tcp_ci)); + bzero(&tcp_ci, sizeof(tcp_ci)); tcp_getconninfo(so, &tcp_ci); error = copyout(&tcp_ci, aux_data, copy_len); - if (error != 0) + if (error != 0) { goto out; + } *aux_len = copy_len; } } @@ -4166,7 +4295,7 @@ in6_getconninfo(struct socket *so, sae_connid_t cid, uint32_t *flags, } out: - return (error); + return error; } /* @@ -4241,8 +4370,9 @@ in6_lltable_new(const struct in6_addr *addr6, u_int flags) struct in6_llentry *lle; MALLOC(lle, struct in6_llentry *, sizeof(struct in6_llentry), M_LLTABLE, M_NOWAIT | M_ZERO); - if (lle == NULL) /* NB: caller generates msg */ + if (lle == NULL) { /* NB: caller generates msg */ return NULL; + } lle->base.r_l3addr.addr6 = *addr6; lle->base.lle_refcnt = 1; @@ -4261,12 +4391,12 @@ in6_lltable_new(const struct in6_addr *addr6, u_int flags) return NULL; } #endif - return (&lle->base); + return &lle->base; } static int in6_lltable_match_prefix(const struct sockaddr *saddr, - const struct sockaddr *smask, u_int flags, struct llentry *lle) + const struct sockaddr *smask, u_int flags, struct llentry *lle) { const struct in6_addr *addr, *mask, *lle_addr; @@ -4274,8 +4404,9 @@ in6_lltable_match_prefix(const struct sockaddr *saddr, mask = &((const struct sockaddr_in6 *)(const void *)smask)->sin6_addr; lle_addr = &lle->r_l3addr.addr6; - if (IN6_ARE_MASKED_ADDR_EQUAL(lle_addr, addr, mask) == 0) - return (0); + if (IN6_ARE_MASKED_ADDR_EQUAL(lle_addr, addr, mask) == 0) { + return 0; + } if (lle->la_flags & LLE_IFADDR) { /* @@ -4284,16 +4415,18 @@ in6_lltable_match_prefix(const struct sockaddr *saddr, * being matched. */ if (IN6_ARE_ADDR_EQUAL(addr, lle_addr) && - (flags & LLE_STATIC) != 0) - return (1); - return (0); + (flags & LLE_STATIC) != 0) { + return 1; + } + return 0; } /* flags & LLE_STATIC means deleting both dynamic and static entries */ - if ((flags & LLE_STATIC) || !(lle->la_flags & LLE_STATIC)) - return (1); + if ((flags & LLE_STATIC) || !(lle->la_flags & LLE_STATIC)) { + return 1; + } - return (0); + return 0; } static void @@ -4313,21 +4446,22 @@ in6_lltable_free_entry(struct lltable *llt, struct llentry *lle) #if 0 /* XXX TBD */ - if (thread_call_cancel(lle->lle_timer) == TRUE) + if (thread_call_cancel(lle->lle_timer) == TRUE) { LLE_REMREF(lle); + } #endif llentry_free(lle); } static int in6_lltable_rtcheck(struct ifnet *ifp, - u_int flags, const struct sockaddr *l3addr) + u_int flags, const struct sockaddr *l3addr) { #pragma unused(flags) struct rtentry *rt; KASSERT(l3addr->sa_family == AF_INET6, - ("sin_family %d", l3addr->sa_family)); + ("sin_family %d", l3addr->sa_family)); /* XXX rtalloc1 should take a const param */ rt = rtalloc1(__DECONST(struct sockaddr *, l3addr), 0, 0); if (rt == NULL || (rt->rt_flags & RTF_GATEWAY) || rt->rt_ifp != ifp) { @@ -4340,14 +4474,16 @@ in6_lltable_rtcheck(struct ifnet *ifp, ifa = ifaof_ifpforaddr(__DECONST(struct sockaddr *, l3addr), ifp); if (ifa != NULL) { IFA_REMREF(ifa); - if (rt != NULL) + if (rt != NULL) { rtfree(rt); + } return 0; } log(LOG_INFO, "IPv6 address: \"%s\" is not on the network\n", ip6_sprintf(&((const struct sockaddr_in6 *)(const void *)l3addr)->sin6_addr)); - if (rt != NULL) + if (rt != NULL) { rtfree(rt); + } return EINVAL; } rtfree(rt); @@ -4357,13 +4493,13 @@ in6_lltable_rtcheck(struct ifnet *ifp, static inline uint32_t in6_lltable_hash_dst(const struct in6_addr *dst, uint32_t hsize) { - return (IN6_LLTBL_HASH(dst->s6_addr32[3], hsize)); + return IN6_LLTBL_HASH(dst->s6_addr32[3], hsize); } static uint32_t in6_lltable_hash(const struct llentry *lle, uint32_t hsize) { - return (in6_lltable_hash_dst(&lle->r_l3addr.addr6, hsize)); + return in6_lltable_hash_dst(&lle->r_l3addr.addr6, hsize); } static void @@ -4388,13 +4524,15 @@ in6_lltable_find_dst(struct lltable *llt, const struct in6_addr *dst) hashidx = in6_lltable_hash_dst(dst, llt->llt_hsize); lleh = &llt->lle_head[hashidx]; LIST_FOREACH(lle, lleh, lle_next) { - if (lle->la_flags & LLE_DELETED) + if (lle->la_flags & LLE_DELETED) { continue; - if (IN6_ARE_ADDR_EQUAL(&lle->r_l3addr.addr6, dst)) + } + if (IN6_ARE_ADDR_EQUAL(&lle->r_l3addr.addr6, dst)) { break; + } } - return (lle); + return lle; } static void @@ -4411,14 +4549,14 @@ in6_lltable_delete_entry(struct lltable *llt, struct llentry *lle) static struct llentry * in6_lltable_alloc(struct lltable *llt, u_int flags, - const struct sockaddr *l3addr) + const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)(const void *)l3addr; struct ifnet *ifp = llt->llt_ifp; struct llentry *lle; KASSERT(l3addr->sa_family == AF_INET6, - ("sin_family %d", l3addr->sa_family)); + ("sin_family %d", l3addr->sa_family)); /* * A route that covers the given address must have @@ -4426,13 +4564,14 @@ in6_lltable_alloc(struct lltable *llt, u_int flags, * verify this. */ if (!(flags & LLE_IFADDR) && - in6_lltable_rtcheck(ifp, flags, l3addr) != 0) - return (NULL); + in6_lltable_rtcheck(ifp, flags, l3addr) != 0) { + return NULL; + } lle = in6_lltable_new(&sin6->sin6_addr, flags); if (lle == NULL) { log(LOG_INFO, "lla_lookup: new lle malloc failed\n"); - return (NULL); + return NULL; } lle->la_flags = flags; if ((flags & LLE_IFADDR) == LLE_IFADDR) { @@ -4440,45 +4579,49 @@ in6_lltable_alloc(struct lltable *llt, u_int flags, lle->la_flags |= LLE_STATIC; } - if ((lle->la_flags & LLE_STATIC) != 0) + if ((lle->la_flags & LLE_STATIC) != 0) { lle->ln_state = ND6_LLINFO_REACHABLE; + } - return (lle); + return lle; } static struct llentry * in6_lltable_lookup(struct lltable *llt, u_int flags, - const struct sockaddr *l3addr) + const struct sockaddr *l3addr) { const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)(const void *)l3addr; struct llentry *lle; IF_AFDATA_LOCK_ASSERT(llt->llt_ifp, llt->llt_af); KASSERT(l3addr->sa_family == AF_INET6, - ("sin_family %d", l3addr->sa_family)); + ("sin_family %d", l3addr->sa_family)); lle = in6_lltable_find_dst(llt, &sin6->sin6_addr); - if (lle == NULL) - return (NULL); + if (lle == NULL) { + return NULL; + } - KASSERT((flags & (LLE_UNLOCKED|LLE_EXCLUSIVE)) != - (LLE_UNLOCKED|LLE_EXCLUSIVE),("wrong lle request flags: 0x%X", - flags)); + KASSERT((flags & (LLE_UNLOCKED | LLE_EXCLUSIVE)) != + (LLE_UNLOCKED | LLE_EXCLUSIVE), ("wrong lle request flags: 0x%X", + flags)); - if (flags & LLE_UNLOCKED) - return (lle); + if (flags & LLE_UNLOCKED) { + return lle; + } - if (flags & LLE_EXCLUSIVE) + if (flags & LLE_EXCLUSIVE) { LLE_WLOCK(lle); - else + } else { LLE_RLOCK(lle); - return (lle); + } + return lle; } static int in6_lltable_dump_entry(struct lltable *llt, struct llentry *lle, - struct sysctl_req *wr) + struct sysctl_req *wr) { struct ifnet *ifp = llt->llt_ifp; /* XXX stack use */ @@ -4498,11 +4641,12 @@ in6_lltable_dump_entry(struct lltable *llt, struct llentry *lle, bzero(&ndpc, sizeof(ndpc)); /* skip deleted entries */ - if ((lle->la_flags & LLE_DELETED) == LLE_DELETED) - return (0); + if ((lle->la_flags & LLE_DELETED) == LLE_DELETED) { + return 0; + } /* Skip if jailed and not a valid IP of the prison. */ lltable_fill_sa_entry(lle, - (struct sockaddr *)&ndpc.sin6); + (struct sockaddr *)&ndpc.sin6); /* * produce a msg made of: * struct rt_msghdr; @@ -4516,8 +4660,9 @@ in6_lltable_dump_entry(struct lltable *llt, struct llentry *lle, ndpc.rtm.rtm_addrs = RTA_DST | RTA_GATEWAY; /* publish */ - if (lle->la_flags & LLE_PUB) + if (lle->la_flags & LLE_PUB) { ndpc.rtm.rtm_flags |= RTF_ANNOUNCE; + } sdl = &ndpc.sdl; sdl->sdl_family = AF_LINK; sdl->sdl_len = sizeof(*sdl); @@ -4536,23 +4681,26 @@ in6_lltable_dump_entry(struct lltable *llt, struct llentry *lle, clock_get_calendar_microtime(&secs, &usecs); ndpc.rtm.rtm_rmx.rmx_expire = lle->la_expire + - lle->lle_remtime / hz + - secs - net_uptime(); + lle->lle_remtime / hz + + secs - net_uptime(); } ndpc.rtm.rtm_flags |= (RTF_HOST | RTF_LLDATA); - if (lle->la_flags & LLE_STATIC) + if (lle->la_flags & LLE_STATIC) { ndpc.rtm.rtm_flags |= RTF_STATIC; - if (lle->la_flags & LLE_IFADDR) + } + if (lle->la_flags & LLE_IFADDR) { ndpc.rtm.rtm_flags |= RTF_PINNED; - if (lle->ln_router != 0) + } + if (lle->ln_router != 0) { ndpc.rtm.rtm_flags |= RTF_GATEWAY; + } ndpc.rtm.rtm_rmx.rmx_pksent = lle->la_asked; /* Store state in rmx_weight value */ ndpc.rtm.rtm_rmx.rmx_state = lle->ln_state; ndpc.rtm.rtm_index = ifp->if_index; error = SYSCTL_OUT(wr, &ndpc, sizeof(ndpc)); - return (error); + return error; } struct lltable * @@ -4574,12 +4722,12 @@ in6_lltattach(struct ifnet *ifp) llt->llt_match_prefix = in6_lltable_match_prefix; lltable_link(llt); - return (llt); + return llt; } void in6_ip6_to_sockaddr(const struct in6_addr *ip6, u_int16_t port, - struct sockaddr_in6 *sin6, u_int32_t maxlen) + struct sockaddr_in6 *sin6, u_int32_t maxlen) { if (maxlen < sizeof(struct sockaddr_in6)) { return; @@ -4592,8 +4740,7 @@ in6_ip6_to_sockaddr(const struct in6_addr *ip6, u_int16_t port, .sin6_addr = *ip6, }; - if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) - { + if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); sin6->sin6_addr.s6_addr16[1] = 0; } @@ -4608,42 +4755,42 @@ struct in6_event { }; struct in6_event2kev in6_event2kev_array[IN6_EVENT_MAX] = { -{ - .in6_event_code = IN6_ADDR_MARKED_DUPLICATED, - .in6_event_kev_subclass = KEV_ND6_SUBCLASS, - .in6_event_kev_code = KEV_ND6_DAD_FAILURE, - .in6_event_str = "IN6_ADDR_MARKED_DUPLICATED", -}, -{ - .in6_event_code = IN6_ADDR_MARKED_DETACHED, - .in6_event_kev_subclass = KEV_ND6_SUBCLASS, - .in6_event_kev_code = KEV_ND6_ADDR_DETACHED, - .in6_event_str = "IN6_ADDR_MARKED_DETACHED", -}, -{ - .in6_event_code = IN6_ADDR_MARKED_DEPRECATED, - .in6_event_kev_subclass = KEV_ND6_SUBCLASS, - .in6_event_kev_code = KEV_ND6_ADDR_DEPRECATED, - .in6_event_str = "IN6_ADDR_MARKED_DEPRECATED", -}, -{ - .in6_event_code = IN6_NDP_RTR_EXPIRY, - .in6_event_kev_subclass = KEV_ND6_SUBCLASS, - .in6_event_kev_code = KEV_ND6_RTR_EXPIRED, - .in6_event_str = "IN6_NDP_RTR_EXPIRY", -}, -{ - .in6_event_code = IN6_NDP_PFX_EXPIRY, - .in6_event_kev_subclass = KEV_ND6_SUBCLASS, - .in6_event_kev_code = KEV_ND6_PFX_EXPIRED, - .in6_event_str = "IN6_NDP_PFX_EXPIRY", -}, -{ - .in6_event_code = IN6_NDP_ADDR_EXPIRY, - .in6_event_kev_subclass = KEV_ND6_SUBCLASS, - .in6_event_kev_code = KEV_ND6_ADDR_EXPIRED, - .in6_event_str = "IN6_NDP_ADDR_EXPIRY", -}, + { + .in6_event_code = IN6_ADDR_MARKED_DUPLICATED, + .in6_event_kev_subclass = KEV_ND6_SUBCLASS, + .in6_event_kev_code = KEV_ND6_DAD_FAILURE, + .in6_event_str = "IN6_ADDR_MARKED_DUPLICATED", + }, + { + .in6_event_code = IN6_ADDR_MARKED_DETACHED, + .in6_event_kev_subclass = KEV_ND6_SUBCLASS, + .in6_event_kev_code = KEV_ND6_ADDR_DETACHED, + .in6_event_str = "IN6_ADDR_MARKED_DETACHED", + }, + { + .in6_event_code = IN6_ADDR_MARKED_DEPRECATED, + .in6_event_kev_subclass = KEV_ND6_SUBCLASS, + .in6_event_kev_code = KEV_ND6_ADDR_DEPRECATED, + .in6_event_str = "IN6_ADDR_MARKED_DEPRECATED", + }, + { + .in6_event_code = IN6_NDP_RTR_EXPIRY, + .in6_event_kev_subclass = KEV_ND6_SUBCLASS, + .in6_event_kev_code = KEV_ND6_RTR_EXPIRED, + .in6_event_str = "IN6_NDP_RTR_EXPIRY", + }, + { + .in6_event_code = IN6_NDP_PFX_EXPIRY, + .in6_event_kev_subclass = KEV_ND6_SUBCLASS, + .in6_event_kev_code = KEV_ND6_PFX_EXPIRED, + .in6_event_str = "IN6_NDP_PFX_EXPIRY", + }, + { + .in6_event_code = IN6_NDP_ADDR_EXPIRY, + .in6_event_kev_subclass = KEV_ND6_SUBCLASS, + .in6_event_kev_code = KEV_ND6_ADDR_EXPIRED, + .in6_event_str = "IN6_NDP_ADDR_EXPIRY", + }, }; void @@ -4694,8 +4841,7 @@ in6_event_callback(void *arg) &p_in6_ev->in6_address, p_in6_ev->val); } -struct in6_event_nwk_wq_entry -{ +struct in6_event_nwk_wq_entry { struct nwk_wq_entry nwk_wqe; struct in6_event in6_ev_arg; }; diff --git a/bsd/netinet6/in6.h b/bsd/netinet6/in6.h index e057fd9eb..59967ec1b 100644 --- a/bsd/netinet6/in6.h +++ b/bsd/netinet6/in6.h @@ -92,11 +92,11 @@ #ifndef __KAME_NETINET_IN_H_INCLUDED_ #error "do not include netinet6/in6.h directly, include netinet/in.h. " \ - " see RFC2553" + " see RFC2553" #endif #ifndef _NETINET6_IN6_H_ -#define _NETINET6_IN6_H_ +#define _NETINET6_IN6_H_ #include #include #include @@ -106,8 +106,8 @@ * for *BSD-current/release: http://www.kame.net/dev/cvsweb.cgi/kame/COVERAGE * has the table of implementation/integration differences. */ -#define __KAME__ -#define __KAME_VERSION "2009/apple-darwin" +#define __KAME__ +#define __KAME_VERSION "2009/apple-darwin" #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* @@ -138,11 +138,11 @@ * The range is IPPORT_RESERVEDMIN to IPPORT_RESERVEDMAX. */ -#define IPV6PORT_RESERVED 1024 -#define IPV6PORT_ANONMIN 49152 -#define IPV6PORT_ANONMAX 65535 -#define IPV6PORT_RESERVEDMIN 600 -#define IPV6PORT_RESERVEDMAX (IPV6PORT_RESERVED-1) +#define IPV6PORT_RESERVED 1024 +#define IPV6PORT_ANONMIN 49152 +#define IPV6PORT_ANONMAX 65535 +#define IPV6PORT_RESERVEDMIN 600 +#define IPV6PORT_RESERVEDMAX (IPV6PORT_RESERVED-1) #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* @@ -153,50 +153,50 @@ typedef struct in6_addr { __uint8_t __u6_addr8[16]; __uint16_t __u6_addr16[8]; __uint32_t __u6_addr32[4]; - } __u6_addr; /* 128-bit IP6 address */ + } __u6_addr; /* 128-bit IP6 address */ } in6_addr_t; -#define s6_addr __u6_addr.__u6_addr8 -#ifdef KERNEL /* XXX nonstandard */ -#define s6_addr8 __u6_addr.__u6_addr8 -#define s6_addr16 __u6_addr.__u6_addr16 -#define s6_addr32 __u6_addr.__u6_addr32 +#define s6_addr __u6_addr.__u6_addr8 +#ifdef KERNEL /* XXX nonstandard */ +#define s6_addr8 __u6_addr.__u6_addr8 +#define s6_addr16 __u6_addr.__u6_addr16 +#define s6_addr32 __u6_addr.__u6_addr32 #endif -#define INET6_ADDRSTRLEN 46 +#define INET6_ADDRSTRLEN 46 /* * Socket address for IPv6 */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SIN6_LEN +#define SIN6_LEN #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ struct sockaddr_in6 { - __uint8_t sin6_len; /* length of this struct(sa_family_t) */ - sa_family_t sin6_family; /* AF_INET6 (sa_family_t) */ - in_port_t sin6_port; /* Transport layer port # (in_port_t) */ - __uint32_t sin6_flowinfo; /* IP6 flow information */ - struct in6_addr sin6_addr; /* IP6 address */ - __uint32_t sin6_scope_id; /* scope zone index */ + __uint8_t sin6_len; /* length of this struct(sa_family_t) */ + sa_family_t sin6_family; /* AF_INET6 (sa_family_t) */ + in_port_t sin6_port; /* Transport layer port # (in_port_t) */ + __uint32_t sin6_flowinfo; /* IP6 flow information */ + struct in6_addr sin6_addr; /* IP6 address */ + __uint32_t sin6_scope_id; /* scope zone index */ }; -#ifdef KERNEL /* XXX nonstandard */ +#ifdef KERNEL /* XXX nonstandard */ /* * Local definition for masks */ -#define IN6MASK0 {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} -#define IN6MASK7 {{{ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} -#define IN6MASK16 {{{ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} -#define IN6MASK32 {{{ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} -#define IN6MASK64 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} -#define IN6MASK96 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ - 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}} -#define IN6MASK128 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} +#define IN6MASK0 {{{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }}} +#define IN6MASK7 {{{ 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK16 {{{ 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK32 {{{ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK64 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK96 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}} +#define IN6MASK128 {{{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, \ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }}} #endif #ifdef KERNEL_PRIVATE @@ -210,63 +210,63 @@ extern const struct in6_addr in6mask64; extern const struct in6_addr in6mask96; extern const struct in6_addr in6mask128; -#define SIN6(s) ((struct sockaddr_in6 *)(void *)s) -#define satosin6(sa) SIN6(sa) -#define sin6tosa(sin6) ((struct sockaddr *)(void *)(sin6)) -#define SIN6IFSCOPE(s) SIN6(s) +#define SIN6(s) ((struct sockaddr_in6 *)(void *)s) +#define satosin6(sa) SIN6(sa) +#define sin6tosa(sin6) ((struct sockaddr *)(void *)(sin6)) +#define SIN6IFSCOPE(s) SIN6(s) #endif /* KERNEL_PRIVATE */ -#ifdef KERNEL /* XXX nonstandard */ +#ifdef KERNEL /* XXX nonstandard */ /* * Macros started with IPV6_ADDR is KAME local */ #if BYTE_ORDER == BIG_ENDIAN -#define IPV6_ADDR_INT32_ONE 1 -#define IPV6_ADDR_INT32_TWO 2 -#define IPV6_ADDR_INT32_MNL 0xff010000 -#define IPV6_ADDR_INT32_MLL 0xff020000 -#define IPV6_ADDR_INT32_SMP 0x0000ffff -#define IPV6_ADDR_INT16_ULL 0xfe80 -#define IPV6_ADDR_INT16_USL 0xfec0 -#define IPV6_ADDR_INT16_MLL 0xff02 +#define IPV6_ADDR_INT32_ONE 1 +#define IPV6_ADDR_INT32_TWO 2 +#define IPV6_ADDR_INT32_MNL 0xff010000 +#define IPV6_ADDR_INT32_MLL 0xff020000 +#define IPV6_ADDR_INT32_SMP 0x0000ffff +#define IPV6_ADDR_INT16_ULL 0xfe80 +#define IPV6_ADDR_INT16_USL 0xfec0 +#define IPV6_ADDR_INT16_MLL 0xff02 #elif BYTE_ORDER == LITTLE_ENDIAN -#define IPV6_ADDR_INT32_ONE 0x01000000 -#define IPV6_ADDR_INT32_TWO 0x02000000 -#define IPV6_ADDR_INT32_MNL 0x000001ff -#define IPV6_ADDR_INT32_MLL 0x000002ff -#define IPV6_ADDR_INT32_SMP 0xffff0000 -#define IPV6_ADDR_INT16_ULL 0x80fe -#define IPV6_ADDR_INT16_USL 0xc0fe -#define IPV6_ADDR_INT16_MLL 0x02ff +#define IPV6_ADDR_INT32_ONE 0x01000000 +#define IPV6_ADDR_INT32_TWO 0x02000000 +#define IPV6_ADDR_INT32_MNL 0x000001ff +#define IPV6_ADDR_INT32_MLL 0x000002ff +#define IPV6_ADDR_INT32_SMP 0xffff0000 +#define IPV6_ADDR_INT16_ULL 0x80fe +#define IPV6_ADDR_INT16_USL 0xc0fe +#define IPV6_ADDR_INT16_MLL 0x02ff #endif #endif /* * Definition of some useful macros to handle IP6 addresses */ -#define IN6ADDR_ANY_INIT \ +#define IN6ADDR_ANY_INIT \ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }}} -#define IN6ADDR_LOOPBACK_INIT \ +#define IN6ADDR_LOOPBACK_INIT \ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IN6ADDR_NODELOCAL_ALLNODES_INIT \ +#define IN6ADDR_NODELOCAL_ALLNODES_INIT \ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} -#define IN6ADDR_INTFACELOCAL_ALLNODES_INIT \ +#define IN6ADDR_INTFACELOCAL_ALLNODES_INIT \ {{{ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} -#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ +#define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }}} -#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ +#define IN6ADDR_LINKLOCAL_ALLROUTERS_INIT \ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }}} -#define IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT \ +#define IN6ADDR_LINKLOCAL_ALLV2ROUTERS_INIT \ {{{ 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16 }}} -#define IN6ADDR_V4MAPPED_INIT \ +#define IN6ADDR_V4MAPPED_INIT \ {{{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }}} #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ @@ -287,20 +287,20 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; * in ANSI standard. */ #ifdef KERNEL -#define IN6_ARE_ADDR_EQUAL(a, b) \ +#define IN6_ARE_ADDR_EQUAL(a, b) \ (bcmp(&(a)->s6_addr[0], &(b)->s6_addr[0], \ sizeof (struct in6_addr)) == 0) #else #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IN6_ARE_ADDR_EQUAL(a, b) \ +#define IN6_ARE_ADDR_EQUAL(a, b) \ (memcmp(&(a)->s6_addr[0], &(b)->s6_addr[0], sizeof (struct in6_addr)) \ == 0) #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #endif -#ifdef KERNEL /* non standard */ +#ifdef KERNEL /* non standard */ /* see if two addresses are equal in a scope-conscious manner. */ -#define SA6_ARE_ADDR_EQUAL(a, b) \ +#define SA6_ARE_ADDR_EQUAL(a, b) \ (((a)->sin6_scope_id == 0 || (b)->sin6_scope_id == 0 || \ ((a)->sin6_scope_id == (b)->sin6_scope_id)) && \ (bcmp(&(a)->sin6_addr, &(b)->sin6_addr, sizeof (struct in6_addr)) == 0)) @@ -309,7 +309,7 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; /* * Unspecified */ -#define IN6_IS_ADDR_UNSPECIFIED(a) \ +#define IN6_IS_ADDR_UNSPECIFIED(a) \ ((*(const __uint32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \ @@ -318,7 +318,7 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; /* * Loopback */ -#define IN6_IS_ADDR_LOOPBACK(a) \ +#define IN6_IS_ADDR_LOOPBACK(a) \ ((*(const __uint32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \ @@ -327,7 +327,7 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; /* * IPv4 compatible */ -#define IN6_IS_ADDR_V4COMPAT(a) \ +#define IN6_IS_ADDR_V4COMPAT(a) \ ((*(const __uint32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[8]) == 0) && \ @@ -337,7 +337,7 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; /* * Mapped */ -#define IN6_IS_ADDR_V4MAPPED(a) \ +#define IN6_IS_ADDR_V4MAPPED(a) \ ((*(const __uint32_t *)(const void *)(&(a)->s6_addr[0]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[4]) == 0) && \ (*(const __uint32_t *)(const void *)(&(a)->s6_addr[8]) == \ @@ -346,119 +346,119 @@ extern const struct in6_addr in6addr_linklocal_allv2routers; /* * 6to4 */ -#define IN6_IS_ADDR_6TO4(x) (ntohs((x)->s6_addr16[0]) == 0x2002) +#define IN6_IS_ADDR_6TO4(x) (ntohs((x)->s6_addr16[0]) == 0x2002) /* * KAME Scope Values */ -#ifdef KERNEL /* XXX nonstandard */ -#define IPV6_ADDR_SCOPE_NODELOCAL 0x01 -#define IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 -#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 -#define IPV6_ADDR_SCOPE_SITELOCAL 0x05 -#define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ -#define IPV6_ADDR_SCOPE_GLOBAL 0x0e +#ifdef KERNEL /* XXX nonstandard */ +#define IPV6_ADDR_SCOPE_NODELOCAL 0x01 +#define IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 +#define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 +#define IPV6_ADDR_SCOPE_SITELOCAL 0x05 +#define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ +#define IPV6_ADDR_SCOPE_GLOBAL 0x0e #else -#define __IPV6_ADDR_SCOPE_NODELOCAL 0x01 -#define __IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 -#define __IPV6_ADDR_SCOPE_LINKLOCAL 0x02 -#define __IPV6_ADDR_SCOPE_SITELOCAL 0x05 -#define __IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ -#define __IPV6_ADDR_SCOPE_GLOBAL 0x0e +#define __IPV6_ADDR_SCOPE_NODELOCAL 0x01 +#define __IPV6_ADDR_SCOPE_INTFACELOCAL 0x01 +#define __IPV6_ADDR_SCOPE_LINKLOCAL 0x02 +#define __IPV6_ADDR_SCOPE_SITELOCAL 0x05 +#define __IPV6_ADDR_SCOPE_ORGLOCAL 0x08 /* just used in this file */ +#define __IPV6_ADDR_SCOPE_GLOBAL 0x0e #endif /* * Unicast Scope * Note that we must check topmost 10 bits only, not 16 bits (see RFC2373). */ -#define IN6_IS_ADDR_LINKLOCAL(a) \ +#define IN6_IS_ADDR_LINKLOCAL(a) \ (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0x80)) -#define IN6_IS_ADDR_SITELOCAL(a) \ +#define IN6_IS_ADDR_SITELOCAL(a) \ (((a)->s6_addr[0] == 0xfe) && (((a)->s6_addr[1] & 0xc0) == 0xc0)) /* * Multicast */ -#define IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff) +#define IN6_IS_ADDR_MULTICAST(a) ((a)->s6_addr[0] == 0xff) /* * Unique Local IPv6 Unicast Addresses (per RFC 4193) */ -#define IN6_IS_ADDR_UNIQUE_LOCAL(a) \ +#define IN6_IS_ADDR_UNIQUE_LOCAL(a) \ (((a)->s6_addr[0] == 0xfc) || ((a)->s6_addr[0] == 0xfd)) -#ifdef KERNEL /* XXX nonstandard */ -#define IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) +#ifdef KERNEL /* XXX nonstandard */ +#define IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) #else -#define __IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) +#define __IPV6_ADDR_MC_SCOPE(a) ((a)->s6_addr[1] & 0x0f) #endif /* * Multicast Scope */ -#ifdef KERNEL /* refers nonstandard items */ -#define IN6_IS_ADDR_MC_NODELOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#ifdef KERNEL /* refers nonstandard items */ +#define IN6_IS_ADDR_MC_NODELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_NODELOCAL)) -#define IN6_IS_ADDR_MC_INTFACELOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_INTFACELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_INTFACELOCAL)) -#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_LINKLOCAL)) -#define IN6_IS_ADDR_MC_SITELOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_SITELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_SITELOCAL)) -#define IN6_IS_ADDR_MC_ORGLOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_ORGLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_ORGLOCAL)) -#define IN6_IS_ADDR_MC_GLOBAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_GLOBAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (IPV6_ADDR_MC_SCOPE(a) == IPV6_ADDR_SCOPE_GLOBAL)) #else -#define IN6_IS_ADDR_MC_NODELOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_NODELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_NODELOCAL)) -#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_LINKLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_LINKLOCAL)) -#define IN6_IS_ADDR_MC_SITELOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_SITELOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_SITELOCAL)) -#define IN6_IS_ADDR_MC_ORGLOCAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_ORGLOCAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_ORGLOCAL)) -#define IN6_IS_ADDR_MC_GLOBAL(a) \ - (IN6_IS_ADDR_MULTICAST(a) && \ +#define IN6_IS_ADDR_MC_GLOBAL(a) \ + (IN6_IS_ADDR_MULTICAST(a) && \ (__IPV6_ADDR_MC_SCOPE(a) == __IPV6_ADDR_SCOPE_GLOBAL)) #endif -#ifdef KERNEL /* nonstandard */ +#ifdef KERNEL /* nonstandard */ /* * KAME Scope */ -#define IN6_IS_SCOPE_LINKLOCAL(a) \ - ((IN6_IS_ADDR_LINKLOCAL(a)) || \ +#define IN6_IS_SCOPE_LINKLOCAL(a) \ + ((IN6_IS_ADDR_LINKLOCAL(a)) || \ (IN6_IS_ADDR_MC_LINKLOCAL(a))) -#define IN6_IS_SCOPE_EMBED(a) \ - ((IN6_IS_ADDR_LINKLOCAL(a)) || \ - (IN6_IS_ADDR_MC_LINKLOCAL(a)) || \ +#define IN6_IS_SCOPE_EMBED(a) \ + ((IN6_IS_ADDR_LINKLOCAL(a)) || \ + (IN6_IS_ADDR_MC_LINKLOCAL(a)) || \ (IN6_IS_ADDR_MC_INTFACELOCAL(a))) -#define IFA6_IS_DEPRECATED(a, t) \ +#define IFA6_IS_DEPRECATED(a, t) \ ((a)->ia6_lifetime.ia6ti_preferred != 0 && \ (a)->ia6_lifetime.ia6ti_preferred < (t)) -#define IFA6_IS_INVALID(a, t) \ +#define IFA6_IS_INVALID(a, t) \ ((a)->ia6_lifetime.ia6ti_expire != 0 && \ (a)->ia6_lifetime.ia6ti_expire < (t)) #endif /* KERNEL */ #ifdef PRIVATE struct route_in6_old { - void *ro_rt; - uint32_t ro_flags; + void *ro_rt; + uint32_t ro_flags; struct sockaddr_in6 ro_dst; }; #endif /* PRIVATE */ @@ -480,11 +480,11 @@ struct route_in6 { * because the code does some casts of a 'struct route_in6 *' * to a 'struct route *'. */ - struct rtentry *ro_rt; + struct rtentry *ro_rt; struct llentry *ro_lle; - struct ifaddr *ro_srcia; - uint32_t ro_flags; /* route flags */ + struct ifaddr *ro_srcia; + uint32_t ro_flags; /* route flags */ struct sockaddr_in6 ro_dst; }; #endif /* BSD_KERNEL_PRIVATE */ @@ -517,7 +517,7 @@ struct route_in6 { * default and RFC 2292 will be obsolete. */ #ifdef BSD_KERNEL_PRIVATE -#define __APPLE_USE_RFC_3542 1 +#define __APPLE_USE_RFC_3542 1 #endif /* BSD_KERNEL_PRIVATE */ #if defined(__APPLE_USE_RFC_3542) && defined(__APPLE_USE_RFC_2292) @@ -525,64 +525,64 @@ struct route_in6 { #endif #if 0 /* the followings are relic in IPv4 and hence are disabled */ -#define IPV6_OPTIONS 1 /* buf/ip6_opts; set/get IP6 options */ -#define IPV6_RECVOPTS 5 /* bool; receive all IP6 opts w/dgram */ -#define IPV6_RECVRETOPTS 6 /* bool; receive IP6 opts for response */ -#define IPV6_RECVDSTADDR 7 /* bool; receive IP6 dst addr w/dgram */ -#define IPV6_RETOPTS 8 /* ip6_opts; set/get IP6 options */ +#define IPV6_OPTIONS 1 /* buf/ip6_opts; set/get IP6 options */ +#define IPV6_RECVOPTS 5 /* bool; receive all IP6 opts w/dgram */ +#define IPV6_RECVRETOPTS 6 /* bool; receive IP6 opts for response */ +#define IPV6_RECVDSTADDR 7 /* bool; receive IP6 dst addr w/dgram */ +#define IPV6_RETOPTS 8 /* ip6_opts; set/get IP6 options */ #endif /* 0 */ -#define IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */ +#define IPV6_SOCKOPT_RESERVED1 3 /* reserved for future use */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */ -#define IPV6_MULTICAST_IF 9 /* __uint8_t; set/get IP6 multicast i/f */ -#define IPV6_MULTICAST_HOPS 10 /* __uint8_t; set/get IP6 multicast hops */ -#define IPV6_MULTICAST_LOOP 11 /* __uint8_t; set/get IP6 mcast loopback */ -#define IPV6_JOIN_GROUP 12 /* ip6_mreq; join a group membership */ -#define IPV6_LEAVE_GROUP 13 /* ip6_mreq; leave a group membership */ +#define IPV6_UNICAST_HOPS 4 /* int; IP6 hops */ +#define IPV6_MULTICAST_IF 9 /* __uint8_t; set/get IP6 multicast i/f */ +#define IPV6_MULTICAST_HOPS 10 /* __uint8_t; set/get IP6 multicast hops */ +#define IPV6_MULTICAST_LOOP 11 /* __uint8_t; set/get IP6 mcast loopback */ +#define IPV6_JOIN_GROUP 12 /* ip6_mreq; join a group membership */ +#define IPV6_LEAVE_GROUP 13 /* ip6_mreq; leave a group membership */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IPV6_PORTRANGE 14 /* int; range to choose for unspec port */ -#define ICMP6_FILTER 18 /* icmp6_filter; icmp6 filter */ -#define IPV6_2292PKTINFO 19 /* bool; send/recv if, src/dst addr */ -#define IPV6_2292HOPLIMIT 20 /* bool; hop limit */ -#define IPV6_2292NEXTHOP 21 /* bool; next hop addr */ -#define IPV6_2292HOPOPTS 22 /* bool; hop-by-hop option */ -#define IPV6_2292DSTOPTS 23 /* bool; destinaion option */ -#define IPV6_2292RTHDR 24 /* ip6_rthdr: routing header */ +#define IPV6_PORTRANGE 14 /* int; range to choose for unspec port */ +#define ICMP6_FILTER 18 /* icmp6_filter; icmp6 filter */ +#define IPV6_2292PKTINFO 19 /* bool; send/recv if, src/dst addr */ +#define IPV6_2292HOPLIMIT 20 /* bool; hop limit */ +#define IPV6_2292NEXTHOP 21 /* bool; next hop addr */ +#define IPV6_2292HOPOPTS 22 /* bool; hop-by-hop option */ +#define IPV6_2292DSTOPTS 23 /* bool; destinaion option */ +#define IPV6_2292RTHDR 24 /* ip6_rthdr: routing header */ /* buf/cmsghdr; set/get IPv6 options [obsoleted by RFC3542] */ -#define IPV6_2292PKTOPTIONS 25 +#define IPV6_2292PKTOPTIONS 25 #ifdef __APPLE_USE_RFC_2292 -#define IPV6_PKTINFO IPV6_2292PKTINFO -#define IPV6_HOPLIMIT IPV6_2292HOPLIMIT -#define IPV6_NEXTHOP IPV6_2292NEXTHOP -#define IPV6_HOPOPTS IPV6_2292HOPOPTS -#define IPV6_DSTOPTS IPV6_2292DSTOPTS -#define IPV6_RTHDR IPV6_2292RTHDR -#define IPV6_PKTOPTIONS IPV6_2292PKTOPTIONS +#define IPV6_PKTINFO IPV6_2292PKTINFO +#define IPV6_HOPLIMIT IPV6_2292HOPLIMIT +#define IPV6_NEXTHOP IPV6_2292NEXTHOP +#define IPV6_HOPOPTS IPV6_2292HOPOPTS +#define IPV6_DSTOPTS IPV6_2292DSTOPTS +#define IPV6_RTHDR IPV6_2292RTHDR +#define IPV6_PKTOPTIONS IPV6_2292PKTOPTIONS #endif /* __APPLE_USE_RFC_2292 */ -#define IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */ +#define IPV6_CHECKSUM 26 /* int; checksum offset for raw socket */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define IPV6_V6ONLY 27 /* bool; only bind INET6 at wildcard bind */ +#define IPV6_V6ONLY 27 /* bool; only bind INET6 at wildcard bind */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #ifndef KERNEL -#define IPV6_BINDV6ONLY IPV6_V6ONLY +#define IPV6_BINDV6ONLY IPV6_V6ONLY #endif /* KERNEL */ #if 1 /* IPSEC */ -#define IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */ +#define IPV6_IPSEC_POLICY 28 /* struct; get/set security policy */ #endif /* 1 */ -#define IPV6_FAITH 29 /* deprecated */ +#define IPV6_FAITH 29 /* deprecated */ #if 1 /* IPV6FIREWALL */ -#define IPV6_FW_ADD 30 /* add a firewall rule to chain */ -#define IPV6_FW_DEL 31 /* delete a firewall rule from chain */ -#define IPV6_FW_FLUSH 32 /* flush firewall rule chain */ -#define IPV6_FW_ZERO 33 /* clear single/all firewall counter(s) */ -#define IPV6_FW_GET 34 /* get entire firewall rule chain */ +#define IPV6_FW_ADD 30 /* add a firewall rule to chain */ +#define IPV6_FW_DEL 31 /* delete a firewall rule from chain */ +#define IPV6_FW_FLUSH 32 /* flush firewall rule chain */ +#define IPV6_FW_ZERO 33 /* clear single/all firewall counter(s) */ +#define IPV6_FW_GET 34 /* get entire firewall rule chain */ #endif /* 1 */ /* @@ -590,8 +590,8 @@ struct route_in6 { * previous version of darwin/OS X for binary compatibility reasons * and differ from FreeBSD (values 57 and 61). See below. */ -#define IPV6_RECVTCLASS 35 /* bool; recv traffic class values */ -#define IPV6_TCLASS 36 /* int; send traffic class value */ +#define IPV6_RECVTCLASS 35 /* bool; recv traffic class values */ +#define IPV6_TCLASS 36 /* int; send traffic class value */ #ifdef __APPLE_USE_RFC_3542 /* new socket options introduced in RFC3542 */ @@ -601,7 +601,7 @@ struct route_in6 { * collision with definition of IPV6_RECVTCLASS in previous * darwin implementations */ -#define IPV6_RTHDRDSTOPTS 57 +#define IPV6_RTHDRDSTOPTS 57 /* * bool; recv if, dst addr @@ -609,113 +609,113 @@ struct route_in6 { * collision with definition of IPV6_TCLASS in previous * darwin implementations */ -#define IPV6_RECVPKTINFO 61 +#define IPV6_RECVPKTINFO 61 -#define IPV6_RECVHOPLIMIT 37 /* bool; recv hop limit */ -#define IPV6_RECVRTHDR 38 /* bool; recv routing header */ -#define IPV6_RECVHOPOPTS 39 /* bool; recv hop-by-hop option */ -#define IPV6_RECVDSTOPTS 40 /* bool; recv dst option after rthdr */ +#define IPV6_RECVHOPLIMIT 37 /* bool; recv hop limit */ +#define IPV6_RECVRTHDR 38 /* bool; recv routing header */ +#define IPV6_RECVHOPOPTS 39 /* bool; recv hop-by-hop option */ +#define IPV6_RECVDSTOPTS 40 /* bool; recv dst option after rthdr */ #ifdef KERNEL -#define IPV6_RECVRTHDRDSTOPTS 41 /* bool; recv dst option before rthdr */ +#define IPV6_RECVRTHDRDSTOPTS 41 /* bool; recv dst option before rthdr */ #endif -#define IPV6_USE_MIN_MTU 42 /* bool; send packets at the minimum MTU */ -#define IPV6_RECVPATHMTU 43 /* bool; notify an according MTU */ +#define IPV6_USE_MIN_MTU 42 /* bool; send packets at the minimum MTU */ +#define IPV6_RECVPATHMTU 43 /* bool; notify an according MTU */ /* * mtuinfo; get the current path MTU (sopt), 4 bytes int; * MTU notification (cmsg) */ -#define IPV6_PATHMTU 44 +#define IPV6_PATHMTU 44 #if 0 /* obsoleted during 2292bis -> 3542 */ /* no data; ND reachability confirm (cmsg only/not in of RFC3542) */ -#define IPV6_REACHCONF 45 +#define IPV6_REACHCONF 45 #endif /* more new socket options introduced in RFC3542 */ -#define IPV6_3542PKTINFO 46 /* in6_pktinfo; send if, src addr */ -#define IPV6_3542HOPLIMIT 47 /* int; send hop limit */ -#define IPV6_3542NEXTHOP 48 /* sockaddr; next hop addr */ -#define IPV6_3542HOPOPTS 49 /* ip6_hbh; send hop-by-hop option */ -#define IPV6_3542DSTOPTS 50 /* ip6_dest; send dst option befor rthdr */ -#define IPV6_3542RTHDR 51 /* ip6_rthdr; send routing header */ +#define IPV6_3542PKTINFO 46 /* in6_pktinfo; send if, src addr */ +#define IPV6_3542HOPLIMIT 47 /* int; send hop limit */ +#define IPV6_3542NEXTHOP 48 /* sockaddr; next hop addr */ +#define IPV6_3542HOPOPTS 49 /* ip6_hbh; send hop-by-hop option */ +#define IPV6_3542DSTOPTS 50 /* ip6_dest; send dst option befor rthdr */ +#define IPV6_3542RTHDR 51 /* ip6_rthdr; send routing header */ -#define IPV6_PKTINFO IPV6_3542PKTINFO -#define IPV6_HOPLIMIT IPV6_3542HOPLIMIT -#define IPV6_NEXTHOP IPV6_3542NEXTHOP -#define IPV6_HOPOPTS IPV6_3542HOPOPTS -#define IPV6_DSTOPTS IPV6_3542DSTOPTS -#define IPV6_RTHDR IPV6_3542RTHDR +#define IPV6_PKTINFO IPV6_3542PKTINFO +#define IPV6_HOPLIMIT IPV6_3542HOPLIMIT +#define IPV6_NEXTHOP IPV6_3542NEXTHOP +#define IPV6_HOPOPTS IPV6_3542HOPOPTS +#define IPV6_DSTOPTS IPV6_3542DSTOPTS +#define IPV6_RTHDR IPV6_3542RTHDR -#define IPV6_AUTOFLOWLABEL 59 /* bool; attach flowlabel automagically */ +#define IPV6_AUTOFLOWLABEL 59 /* bool; attach flowlabel automagically */ -#define IPV6_DONTFRAG 62 /* bool; disable IPv6 fragmentation */ +#define IPV6_DONTFRAG 62 /* bool; disable IPv6 fragmentation */ /* int; prefer temporary addresses as the source address. */ -#define IPV6_PREFER_TEMPADDR 63 +#define IPV6_PREFER_TEMPADDR 63 /* * The following option is private; do not use it from user applications. * It is deliberately defined to the same value as IP_MSFILTER. */ -#define IPV6_MSFILTER 74 /* struct __msfilterreq; */ +#define IPV6_MSFILTER 74 /* struct __msfilterreq; */ #endif /* __APPLE_USE_RFC_3542 */ -#define IPV6_BOUND_IF 125 /* int; set/get bound interface */ +#define IPV6_BOUND_IF 125 /* int; set/get bound interface */ #ifdef PRIVATE -#define IPV6_NO_IFT_CELLULAR 6969 /* for internal use only */ -#define IPV6_OUT_IF 9696 /* for internal use only */ +#define IPV6_NO_IFT_CELLULAR 6969 /* for internal use only */ +#define IPV6_OUT_IF 9696 /* for internal use only */ #endif /* PRIVATE */ /* to define items, should talk with KAME guys first, for *BSD compatibility */ -#define IPV6_RTHDR_LOOSE 0 /* this hop need not be a neighbor. */ -#define IPV6_RTHDR_STRICT 1 /* this hop must be a neighbor. */ -#define IPV6_RTHDR_TYPE_0 0 /* IPv6 routing header type 0 */ +#define IPV6_RTHDR_LOOSE 0 /* this hop need not be a neighbor. */ +#define IPV6_RTHDR_STRICT 1 /* this hop must be a neighbor. */ +#define IPV6_RTHDR_TYPE_0 0 /* IPv6 routing header type 0 */ /* * Defaults and limits for options */ -#define IPV6_DEFAULT_MULTICAST_HOPS 1 /* normally limit m'casts to 1 hop */ -#define IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ +#define IPV6_DEFAULT_MULTICAST_HOPS 1 /* normally limit m'casts to 1 hop */ +#define IPV6_DEFAULT_MULTICAST_LOOP 1 /* normally hear sends if a member */ /* * The im6o_membership vector for each socket is now dynamically allocated at * run-time, bounded by USHRT_MAX, and is reallocated when needed, sized * according to a power-of-two increment. */ -#define IPV6_MIN_MEMBERSHIPS 31 -#define IPV6_MAX_MEMBERSHIPS 4095 +#define IPV6_MIN_MEMBERSHIPS 31 +#define IPV6_MAX_MEMBERSHIPS 4095 /* * Default resource limits for IPv6 multicast source filtering. * These may be modified by sysctl. */ -#define IPV6_MAX_GROUP_SRC_FILTER 512 /* sources per group */ -#define IPV6_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ +#define IPV6_MAX_GROUP_SRC_FILTER 512 /* sources per group */ +#define IPV6_MAX_SOCK_SRC_FILTER 128 /* sources per socket/group */ /* * Argument structure for IPV6_JOIN_GROUP and IPV6_LEAVE_GROUP. */ struct ipv6_mreq { - struct in6_addr ipv6mr_multiaddr; - unsigned int ipv6mr_interface; + struct in6_addr ipv6mr_multiaddr; + unsigned int ipv6mr_interface; }; /* * IPV6_2292PKTINFO: Packet information(RFC2292 sec 5) */ struct in6_pktinfo { - struct in6_addr ipi6_addr; /* src/dst IPv6 address */ - unsigned int ipi6_ifindex; /* send/recv interface index */ + struct in6_addr ipi6_addr; /* src/dst IPv6 address */ + unsigned int ipi6_ifindex; /* send/recv interface index */ }; /* * Control structure for IPV6_RECVPATHMTU socket option. */ struct ip6_mtuinfo { - struct sockaddr_in6 ip6m_addr; /* or sockaddr_storage? */ + struct sockaddr_in6 ip6m_addr; /* or sockaddr_storage? */ uint32_t ip6m_mtu; }; @@ -723,9 +723,9 @@ struct ip6_mtuinfo { * Argument for IPV6_PORTRANGE: * - which range to search when port is unspecified at bind() or connect() */ -#define IPV6_PORTRANGE_DEFAULT 0 /* default range */ -#define IPV6_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ -#define IPV6_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ +#define IPV6_PORTRANGE_DEFAULT 0 /* default range */ +#define IPV6_PORTRANGE_HIGH 1 /* "high" - request firewall bypass */ +#define IPV6_PORTRANGE_LOW 2 /* "low" - vouchsafe security */ /* * Definitions for inet6 sysctl operations. @@ -733,67 +733,67 @@ struct ip6_mtuinfo { * Third level is protocol number. * Fourth level is desired variable within that protocol. */ -#define IPV6PROTO_MAXID (IPPROTO_PIM + 1) /* don't list to IPV6PROTO_MAX */ +#define IPV6PROTO_MAXID (IPPROTO_PIM + 1) /* don't list to IPV6PROTO_MAX */ /* * Names for IP sysctl objects */ -#define IPV6CTL_FORWARDING 1 /* act as router */ -#define IPV6CTL_SENDREDIRECTS 2 /* may send redirects when forwarding */ -#define IPV6CTL_DEFHLIM 3 /* default Hop-Limit */ +#define IPV6CTL_FORWARDING 1 /* act as router */ +#define IPV6CTL_SENDREDIRECTS 2 /* may send redirects when forwarding */ +#define IPV6CTL_DEFHLIM 3 /* default Hop-Limit */ #ifdef notyet -#define IPV6CTL_DEFMTU 4 /* default MTU */ +#define IPV6CTL_DEFMTU 4 /* default MTU */ #endif -#define IPV6CTL_FORWSRCRT 5 /* forward source-routed dgrams */ -#define IPV6CTL_STATS 6 /* stats */ -#define IPV6CTL_MRTSTATS 7 /* multicast forwarding stats */ -#define IPV6CTL_MRTPROTO 8 /* multicast routing protocol */ -#define IPV6CTL_MAXFRAGPACKETS 9 /* max packets reassembly queue */ -#define IPV6CTL_SOURCECHECK 10 /* verify source route and intf */ -#define IPV6CTL_SOURCECHECK_LOGINT 11 /* minimume logging interval */ -#define IPV6CTL_ACCEPT_RTADV 12 -#define IPV6CTL_KEEPFAITH 13 /* deprecated */ -#define IPV6CTL_LOG_INTERVAL 14 -#define IPV6CTL_HDRNESTLIMIT 15 -#define IPV6CTL_DAD_COUNT 16 -#define IPV6CTL_AUTO_FLOWLABEL 17 -#define IPV6CTL_DEFMCASTHLIM 18 -#define IPV6CTL_GIF_HLIM 19 /* default HLIM for gif encap packet */ -#define IPV6CTL_KAME_VERSION 20 -#define IPV6CTL_USE_DEPRECATED 21 /* use deprec addr (RFC2462 5.5.4) */ -#define IPV6CTL_RR_PRUNE 22 /* walk timer for router renumbering */ -#if 0 /* obsolete */ -#define IPV6CTL_MAPPED_ADDR 23 +#define IPV6CTL_FORWSRCRT 5 /* forward source-routed dgrams */ +#define IPV6CTL_STATS 6 /* stats */ +#define IPV6CTL_MRTSTATS 7 /* multicast forwarding stats */ +#define IPV6CTL_MRTPROTO 8 /* multicast routing protocol */ +#define IPV6CTL_MAXFRAGPACKETS 9 /* max packets reassembly queue */ +#define IPV6CTL_SOURCECHECK 10 /* verify source route and intf */ +#define IPV6CTL_SOURCECHECK_LOGINT 11 /* minimume logging interval */ +#define IPV6CTL_ACCEPT_RTADV 12 +#define IPV6CTL_KEEPFAITH 13 /* deprecated */ +#define IPV6CTL_LOG_INTERVAL 14 +#define IPV6CTL_HDRNESTLIMIT 15 +#define IPV6CTL_DAD_COUNT 16 +#define IPV6CTL_AUTO_FLOWLABEL 17 +#define IPV6CTL_DEFMCASTHLIM 18 +#define IPV6CTL_GIF_HLIM 19 /* default HLIM for gif encap packet */ +#define IPV6CTL_KAME_VERSION 20 +#define IPV6CTL_USE_DEPRECATED 21 /* use deprec addr (RFC2462 5.5.4) */ +#define IPV6CTL_RR_PRUNE 22 /* walk timer for router renumbering */ +#if 0 /* obsolete */ +#define IPV6CTL_MAPPED_ADDR 23 #endif -#define IPV6CTL_V6ONLY 24 -#define IPV6CTL_RTEXPIRE 25 /* cloned route expiration time */ -#define IPV6CTL_RTMINEXPIRE 26 /* min value for expiration time */ -#define IPV6CTL_RTMAXCACHE 27 /* trigger level for dynamic expire */ - -#define IPV6CTL_USETEMPADDR 32 /* use temporary addresses [RFC 4941] */ -#define IPV6CTL_TEMPPLTIME 33 /* preferred lifetime for tmpaddrs */ -#define IPV6CTL_TEMPVLTIME 34 /* valid lifetime for tmpaddrs */ -#define IPV6CTL_AUTO_LINKLOCAL 35 /* automatic link-local addr assign */ -#define IPV6CTL_RIP6STATS 36 /* raw_ip6 stats */ -#define IPV6CTL_PREFER_TEMPADDR 37 /* prefer temporary addr as src */ -#define IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */ -#define IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */ - -#define IPV6CTL_MAXFRAGS 41 /* max fragments */ -#define IPV6CTL_MCAST_PMTU 44 /* enable pMTU discovery for mcast? */ - -#define IPV6CTL_NEIGHBORGCTHRESH 46 -#define IPV6CTL_MAXIFPREFIXES 47 -#define IPV6CTL_MAXIFDEFROUTERS 48 -#define IPV6CTL_MAXDYNROUTES 49 -#define ICMPV6CTL_ND6_ONLINKNSRFC4861 50 +#define IPV6CTL_V6ONLY 24 +#define IPV6CTL_RTEXPIRE 25 /* cloned route expiration time */ +#define IPV6CTL_RTMINEXPIRE 26 /* min value for expiration time */ +#define IPV6CTL_RTMAXCACHE 27 /* trigger level for dynamic expire */ + +#define IPV6CTL_USETEMPADDR 32 /* use temporary addresses [RFC 4941] */ +#define IPV6CTL_TEMPPLTIME 33 /* preferred lifetime for tmpaddrs */ +#define IPV6CTL_TEMPVLTIME 34 /* valid lifetime for tmpaddrs */ +#define IPV6CTL_AUTO_LINKLOCAL 35 /* automatic link-local addr assign */ +#define IPV6CTL_RIP6STATS 36 /* raw_ip6 stats */ +#define IPV6CTL_PREFER_TEMPADDR 37 /* prefer temporary addr as src */ +#define IPV6CTL_ADDRCTLPOLICY 38 /* get/set address selection policy */ +#define IPV6CTL_USE_DEFAULTZONE 39 /* use default scope zone */ + +#define IPV6CTL_MAXFRAGS 41 /* max fragments */ +#define IPV6CTL_MCAST_PMTU 44 /* enable pMTU discovery for mcast? */ + +#define IPV6CTL_NEIGHBORGCTHRESH 46 +#define IPV6CTL_MAXIFPREFIXES 47 +#define IPV6CTL_MAXIFDEFROUTERS 48 +#define IPV6CTL_MAXDYNROUTES 49 +#define ICMPV6CTL_ND6_ONLINKNSRFC4861 50 /* New entries should be added here from current IPV6CTL_MAXID value. */ /* to define items, should talk with KAME guys first, for *BSD compatibility */ -#define IPV6CTL_MAXID 51 +#define IPV6CTL_MAXID 51 #ifdef BSD_KERNEL_PRIVATE -#define CTL_IPV6PROTO_NAMES { \ +#define CTL_IPV6PROTO_NAMES { \ { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 }, \ { 0, 0 }, \ { "tcp6", CTLTYPE_NODE }, \ @@ -842,9 +842,9 @@ struct ip6_mtuinfo { /* * Redefinition of mbuf flags */ -#define M_AUTHIPHDR M_PROTO2 -#define M_DECRYPTED M_PROTO3 -#define M_AUTHIPDGM M_PROTO5 +#define M_AUTHIPHDR M_PROTO2 +#define M_DECRYPTED M_PROTO3 +#define M_AUTHIPDGM M_PROTO5 struct cmsghdr; struct mbuf; @@ -859,9 +859,9 @@ extern u_int16_t inet6_cksum(struct mbuf *, uint32_t, uint32_t, uint32_t); extern u_int16_t inet6_cksum_buffer(const uint8_t *, uint32_t, uint32_t, uint32_t); -#define in6_cksum(_m, _n, _o, _l) \ +#define in6_cksum(_m, _n, _o, _l) \ inet6_cksum(_m, _n, _o, _l) -#define in6_cksum_buffer(_b, _n, _o, _l) \ +#define in6_cksum_buffer(_b, _n, _o, _l) \ inet6_cksum_buffer(_b, _n, _o, _l) extern int in6_addrscope(struct in6_addr *); @@ -879,9 +879,9 @@ extern int in6_sin_2_v4mapsin6_in_sock(struct sockaddr **nam); extern uint32_t in6_finalize_cksum(struct mbuf *, uint32_t, int32_t, int32_t, uint32_t); -#define in6_delayed_cksum(_m) \ +#define in6_delayed_cksum(_m) \ ((void) in6_finalize_cksum(_m, 0, 0, -1, CSUM_DELAY_IPV6_DATA)) -#define in6_delayed_cksum_offset(_m, _o, _s, _p) \ +#define in6_delayed_cksum_offset(_m, _o, _s, _p) \ ((void) in6_finalize_cksum(_m, _o, _s, _p, CSUM_DELAY_IPV6_DATA)) /* IPv6 protocol events */ @@ -891,7 +891,7 @@ extern struct eventhandler_lists_ctxt in6_evhdlr_ctxt; * If the order is changed, please make sure * in6_event2kev_array is also changed to reflect the * change in order of the enums - */ + */ typedef enum { /* Address events */ /* diff --git a/bsd/netinet6/in6_cga.c b/bsd/netinet6/in6_cga.c index 981df0a18..00352e1cd 100644 --- a/bsd/netinet6/in6_cga.c +++ b/bsd/netinet6/in6_cga.c @@ -41,9 +41,9 @@ #include #include -#define IN6_CGA_HASH1_LENGTH 8 -#define IN6_CGA_HASH2_LENGTH 14 -#define IN6_CGA_PREPARE_ZEROES 9 +#define IN6_CGA_HASH1_LENGTH 8 +#define IN6_CGA_HASH2_LENGTH 14 +#define IN6_CGA_PREPARE_ZEROES 9 struct in6_cga_hash1 { u_int8_t octets[IN6_CGA_HASH1_LENGTH]; @@ -100,11 +100,13 @@ in6_cga_is_prepare_valid(const struct in6_cga_prepare *prepare, VERIFY(prepare != NULL); VERIFY(pubkey != NULL && pubkey->iov_base != NULL); - if (prepare->cga_security_level == 0) - return (TRUE); + if (prepare->cga_security_level == 0) { + return TRUE; + } - if (prepare->cga_security_level > 7) - return (FALSE); + if (prepare->cga_security_level > 7) { + return FALSE; + } SHA1Init(&ctx); SHA1Update(&ctx, &prepare->cga_modifier.octets, @@ -116,34 +118,36 @@ in6_cga_is_prepare_valid(const struct in6_cga_prepare *prepare, n = 2 * (u_int) prepare->cga_security_level; VERIFY(n < SHA1_RESULTLEN); - for (i = 0; i < n; ++i) - if (sha1[i] != 0) - return (FALSE); + for (i = 0; i < n; ++i) { + if (sha1[i] != 0) { + return FALSE; + } + } - return (TRUE); + return TRUE; } /* * @brief Generate interface identifier for CGA - * XXX You may notice that following does not really - * mirror what is decribed in: - * https://tools.ietf.org/html/rfc3972#section-4 - * By design kernel here will assume that that - * modifier has been converged on by userspace - * for first part of the algorithm for the given - * security level. - * We are not doing that yet but that's how the code - * below is written. So really we are starting - * from bullet 4 of the algorithm. + * XXX You may notice that following does not really + * mirror what is decribed in: + * https://tools.ietf.org/html/rfc3972#section-4 + * By design kernel here will assume that that + * modifier has been converged on by userspace + * for first part of the algorithm for the given + * security level. + * We are not doing that yet but that's how the code + * below is written. So really we are starting + * from bullet 4 of the algorithm. * * @param prepare Pointer to object containing modifier, - * security level & externsion to be used. + * security level & externsion to be used. * @param pubkey Public key used for IID generation * @param collisions Collission count on DAD failure - * XXX We are not really re-generating IID on DAD - * failures for now. + * XXX We are not really re-generating IID on DAD + * failures for now. * @param in6 Pointer to the address containing - * the prefix. + * the prefix. * * @return void */ @@ -221,43 +225,50 @@ in6_cga_start(const struct in6_cga_nodecfg *cfg) privkey = cfg->cga_privkey; if (privkey.iov_base == NULL || privkey.iov_len == 0 || - privkey.iov_len >= IN6_CGA_KEY_MAXSIZE) - return (EINVAL); + privkey.iov_len >= IN6_CGA_KEY_MAXSIZE) { + return EINVAL; + } pubkey = cfg->cga_pubkey; if (pubkey.iov_base == NULL || pubkey.iov_len == 0 || - pubkey.iov_len >= IN6_CGA_KEY_MAXSIZE) - return (EINVAL); + pubkey.iov_len >= IN6_CGA_KEY_MAXSIZE) { + return EINVAL; + } prepare = &cfg->cga_prepare; - if (!in6_cga_is_prepare_valid(prepare, &pubkey)) - return (EINVAL); + if (!in6_cga_is_prepare_valid(prepare, &pubkey)) { + return EINVAL; + } in6_cga.cga_prepare = *prepare; MALLOC(privkeycopy, caddr_t, privkey.iov_len, M_IP6CGA, M_WAITOK); - if (privkeycopy == NULL) - return (ENOMEM); + if (privkeycopy == NULL) { + return ENOMEM; + } MALLOC(pubkeycopy, caddr_t, pubkey.iov_len, M_IP6CGA, M_WAITOK); if (pubkeycopy == NULL) { - if (privkeycopy != NULL) + if (privkeycopy != NULL) { FREE(privkeycopy, M_IP6CGA); - return (ENOMEM); + } + return ENOMEM; } bcopy(privkey.iov_base, privkeycopy, privkey.iov_len); privkey.iov_base = privkeycopy; - if (in6_cga.cga_privkey.iov_base != NULL) + if (in6_cga.cga_privkey.iov_base != NULL) { FREE(in6_cga.cga_privkey.iov_base, M_IP6CGA); + } in6_cga.cga_privkey = privkey; bcopy(pubkey.iov_base, pubkeycopy, pubkey.iov_len); pubkey.iov_base = pubkeycopy; - if (in6_cga.cga_pubkey.iov_base != NULL) + if (in6_cga.cga_pubkey.iov_base != NULL) { FREE(in6_cga.cga_pubkey.iov_base, M_IP6CGA); + } in6_cga.cga_pubkey = pubkey; - return (0); + return 0; } int @@ -277,7 +288,7 @@ in6_cga_stop(void) in6_cga.cga_pubkey.iov_len = 0; } - return (0); + return 0; } ssize_t @@ -291,24 +302,26 @@ in6_cga_parameters_prepare(void *output, size_t max, if (in6_cga.cga_pubkey.iov_len == 0) { /* No public key */ - return (EINVAL); + return EINVAL; } if (output == NULL || - max < in6_cga.cga_pubkey.iov_len + sizeof (modifier->octets) + 9) { + max < in6_cga.cga_pubkey.iov_len + sizeof(modifier->octets) + 9) { /* Output buffer error */ - return (EINVAL); + return EINVAL; } cursor = output; - if (modifier == NULL) modifier = &in6_cga.cga_prepare.cga_modifier; + if (modifier == NULL) { + modifier = &in6_cga.cga_prepare.cga_modifier; + } if (prefix == NULL) { static const struct in6_addr llprefix = {{{ 0xfe, 0x80 }}}; prefix = &llprefix; } - bcopy(&modifier->octets, cursor, sizeof (modifier->octets)); - cursor += sizeof (modifier->octets); + bcopy(&modifier->octets, cursor, sizeof(modifier->octets)); + cursor += sizeof(modifier->octets); *cursor++ = (char) collisions; @@ -320,7 +333,7 @@ in6_cga_parameters_prepare(void *output, size_t max, /* FUTURE: Extension fields */ - return ((ssize_t)(cursor - (caddr_t)output)); + return (ssize_t)(cursor - (caddr_t)output); } int @@ -333,22 +346,23 @@ in6_cga_generate(struct in6_cga_prepare *prepare, u_int8_t collisions, in6_cga_node_lock_assert(LCK_MTX_ASSERT_OWNED); VERIFY(in6 != NULL); - if (prepare == NULL) + if (prepare == NULL) { prepare = &in6_cga.cga_prepare; - else + } else { prepare->cga_security_level = in6_cga.cga_prepare.cga_security_level; + } pubkey = &in6_cga.cga_pubkey; if (pubkey->iov_base != NULL) { in6_cga_generate_iid(prepare, pubkey, collisions, in6); error = 0; - } - else + } else { error = EADDRNOTAVAIL; + } - return (error); + return error; } /* End of file */ diff --git a/bsd/netinet6/in6_cksum.c b/bsd/netinet6/in6_cksum.c index 394ea35b2..4df0855dc 100644 --- a/bsd/netinet6/in6_cksum.c +++ b/bsd/netinet6/in6_cksum.c @@ -2,7 +2,7 @@ * Copyright (c) 2009-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -149,8 +149,9 @@ in6_pseudo(const struct in6_addr *src, const struct in6_addr *dst, uint32_t x) */ w = (const uint16_t *)src; sum += w[0]; - if (!IN6_IS_SCOPE_EMBED(src)) + if (!IN6_IS_SCOPE_EMBED(src)) { sum += w[1]; + } sum += w[2]; sum += w[3]; sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; @@ -159,8 +160,9 @@ in6_pseudo(const struct in6_addr *src, const struct in6_addr *dst, uint32_t x) */ w = (const uint16_t *)dst; sum += w[0]; - if (!IN6_IS_SCOPE_EMBED(dst)) + if (!IN6_IS_SCOPE_EMBED(dst)) { sum += w[1]; + } sum += w[2]; sum += w[3]; sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; @@ -175,7 +177,7 @@ in6_pseudo(const struct in6_addr *src, const struct in6_addr *dst, uint32_t x) /* fold in carry bits */ ADDCARRY(sum); - return (sum); + return sum; } /* @@ -193,7 +195,7 @@ inet6_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) if (nxt != 0) { struct ip6_hdr *ip6; - unsigned char buf[sizeof (*ip6)] __attribute__((aligned(8))); + unsigned char buf[sizeof(*ip6)] __attribute__((aligned(8))); uint32_t mlen; /* @@ -203,7 +205,7 @@ inet6_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) * the caller setting m_pkthdr.len correctly, if the mbuf is * a M_PKTHDR one. */ - if ((mlen = m_length2(m, NULL)) < sizeof (*ip6)) { + if ((mlen = m_length2(m, NULL)) < sizeof(*ip6)) { panic("%s: mbuf %p pkt too short (%d) for IPv6 header", __func__, m, mlen); /* NOTREACHED */ @@ -214,9 +216,9 @@ inet6_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) * aligned, copy it to a local buffer. Note here that we * expect the data pointer to point to the IPv6 header. */ - if ((sizeof (*ip6) > m->m_len) || + if ((sizeof(*ip6) > m->m_len) || !IP6_HDR_ALIGNED_P(mtod(m, caddr_t))) { - m_copydata(m, 0, sizeof (*ip6), (caddr_t)buf); + m_copydata(m, 0, sizeof(*ip6), (caddr_t)buf); ip6 = (struct ip6_hdr *)(void *)buf; } else { ip6 = (struct ip6_hdr *)(void *)(m->m_data); @@ -230,7 +232,7 @@ inet6_cksum(struct mbuf *m, uint32_t nxt, uint32_t off, uint32_t len) ADDCARRY(sum); } - return (~sum & 0xffff); + return ~sum & 0xffff; } /* @@ -245,14 +247,15 @@ inet6_cksum_buffer(const uint8_t *buffer, uint32_t nxt, uint32_t off, { uint32_t sum; - if (off >= len) + if (off >= len) { panic("%s: off (%d) >= len (%d)", __func__, off, len); + } sum = b_sum16(&((const uint8_t *)buffer)[off], len); if (nxt != 0) { const struct ip6_hdr *ip6; - unsigned char buf[sizeof (*ip6)] __attribute__((aligned(8))); + unsigned char buf[sizeof(*ip6)] __attribute__((aligned(8))); /* * In case the IPv6 header is not contiguous, or not 32-bit @@ -260,7 +263,7 @@ inet6_cksum_buffer(const uint8_t *buffer, uint32_t nxt, uint32_t off, * expect the data pointer to point to the IPv6 header. */ if (!IP6_HDR_ALIGNED_P(buffer)) { - memcpy(buf, buffer, sizeof (*ip6)); + memcpy(buf, buffer, sizeof(*ip6)); ip6 = (const struct ip6_hdr *)(const void *)buf; } else { ip6 = (const struct ip6_hdr *)buffer; @@ -274,5 +277,5 @@ inet6_cksum_buffer(const uint8_t *buffer, uint32_t nxt, uint32_t off, ADDCARRY(sum); } - return (~sum & 0xffff); + return ~sum & 0xffff; } diff --git a/bsd/netinet6/in6_gif.c b/bsd/netinet6/in6_gif.c index 89037d5b1..f98fc60e3 100644 --- a/bsd/netinet6/in6_gif.c +++ b/bsd/netinet6/in6_gif.c @@ -118,72 +118,75 @@ in6_gif_output( sin6_src->sin6_family != AF_INET6 || sin6_dst->sin6_family != AF_INET6) { m_freem(m); - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } switch (family) { #if INET case AF_INET: - { + { struct ip *ip; proto = IPPROTO_IPV4; - if (mbuf_len(m) < sizeof (*ip)) { - m = m_pullup(m, sizeof (*ip)); - if (!m) - return (ENOBUFS); + if (mbuf_len(m) < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) { + return ENOBUFS; + } } ip = mtod(m, struct ip *); itos = ip->ip_tos; break; - } + } #endif #if INET6 case AF_INET6: - { + { proto = IPPROTO_IPV6; - if (mbuf_len(m) < sizeof (*ip6)) { - m = m_pullup(m, sizeof (*ip6)); - if (!m) - return (ENOBUFS); + if (mbuf_len(m) < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) { + return ENOBUFS; + } } ip6 = mtod(m, struct ip6_hdr *); itos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; break; - } + } #endif default: #if DEBUG printf("in6_gif_output: warning: unknown family %d passed\n", - family); + family); #endif m_freem(m); - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } /* prepend new IP header */ - M_PREPEND(m, sizeof (struct ip6_hdr), M_DONTWAIT, 1); - if (m && mbuf_len(m) < sizeof (struct ip6_hdr)) - m = m_pullup(m, sizeof (struct ip6_hdr)); + M_PREPEND(m, sizeof(struct ip6_hdr), M_DONTWAIT, 1); + if (m && mbuf_len(m) < sizeof(struct ip6_hdr)) { + m = m_pullup(m, sizeof(struct ip6_hdr)); + } if (m == NULL) { printf("ENOBUFS in in6_gif_output %d\n", __LINE__); - return (ENOBUFS); + return ENOBUFS; } ip6 = mtod(m, struct ip6_hdr *); - ip6->ip6_flow = 0; - ip6->ip6_vfc &= ~IPV6_VERSION_MASK; - ip6->ip6_vfc |= IPV6_VERSION; - ip6->ip6_plen = htons((u_short)m->m_pkthdr.len); - ip6->ip6_nxt = proto; - ip6->ip6_hlim = ip6_gif_hlim; - ip6->ip6_src = sin6_src->sin6_addr; + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_plen = htons((u_short)m->m_pkthdr.len); + ip6->ip6_nxt = proto; + ip6->ip6_hlim = ip6_gif_hlim; + ip6->ip6_src = sin6_src->sin6_addr; /* bidirectional configured tunnel mode */ - if (!IN6_IS_ADDR_UNSPECIFIED(&sin6_dst->sin6_addr)) + if (!IN6_IS_ADDR_UNSPECIFIED(&sin6_dst->sin6_addr)) { ip6->ip6_dst = sin6_dst->sin6_addr; - else { + } else { m_freem(m); - return (ENETUNREACH); + return ENETUNREACH; } ip_ecn_ingress((ifp->if_flags & IFF_LINK1) ? ECN_NORMAL : ECN_NOCARE, &otos, &itos); @@ -195,9 +198,9 @@ in6_gif_output( !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &sin6_dst->sin6_addr) || (sc->gif_ro6.ro_rt != NULL && sc->gif_ro6.ro_rt->rt_ifp == ifp)) { /* cache route doesn't match or recursive route */ - bzero(dst, sizeof (*dst)); + bzero(dst, sizeof(*dst)); dst->sin6_family = sin6_dst->sin6_family; - dst->sin6_len = sizeof (struct sockaddr_in6); + dst->sin6_len = sizeof(struct sockaddr_in6); dst->sin6_addr = sin6_dst->sin6_addr; ROUTE_RELEASE(&sc->gif_ro6); #if 0 @@ -209,18 +212,18 @@ in6_gif_output( rtalloc((struct route *)&sc->gif_ro6); if (sc->gif_ro6.ro_rt == NULL) { m_freem(m); - return (ENETUNREACH); + return ENETUNREACH; } RT_LOCK(sc->gif_ro6.ro_rt); /* if it constitutes infinite encapsulation, punt. */ if (sc->gif_ro6.ro_rt->rt_ifp == ifp) { RT_UNLOCK(sc->gif_ro6.ro_rt); m_freem(m); - return (ENETUNREACH); /* XXX */ + return ENETUNREACH; /* XXX */ } #if 0 ifp->if_mtu = sc->gif_ro6.ro_rt->rt_ifp->if_mtu - - sizeof (struct ip6_hdr); + - sizeof(struct ip6_hdr); #endif RT_UNLOCK(sc->gif_ro6.ro_rt); } @@ -231,9 +234,9 @@ in6_gif_output( * it is too painful to ask for resend of inner packet, to achieve * path MTU discovery for encapsulated packets. */ - return (ip6_output(m, 0, &sc->gif_ro6, IPV6_MINMTU, 0, NULL, NULL)); + return ip6_output(m, 0, &sc->gif_ro6, IPV6_MINMTU, 0, NULL, NULL); #else - return (ip6_output(m, 0, &sc->gif_ro6, 0, 0, NULL, NULL)); + return ip6_output(m, 0, &sc->gif_ro6, 0, 0, NULL, NULL); #endif } @@ -254,7 +257,7 @@ in6_gif_input(struct mbuf **mp, int *offp, int proto) if (gifp == NULL || (gifp->if_flags & IFF_UP) == 0) { m_freem(m); ip6stat.ip6s_nogif++; - return (IPPROTO_DONE); + return IPPROTO_DONE; } otos = ip6->ip6_flow; @@ -263,69 +266,74 @@ in6_gif_input(struct mbuf **mp, int *offp, int proto) switch (proto) { #if INET case IPPROTO_IPV4: - { + { struct ip *ip; u_int8_t otos8, old_tos; int sum; af = AF_INET; otos8 = (ntohl(otos) >> 20) & 0xff; - if (mbuf_len(m) < sizeof (*ip)) { - m = m_pullup(m, sizeof (*ip)); - if (!m) - return (IPPROTO_DONE); + if (mbuf_len(m) < sizeof(*ip)) { + m = m_pullup(m, sizeof(*ip)); + if (!m) { + return IPPROTO_DONE; + } } ip = mtod(m, struct ip *); if (gifp->if_flags & IFF_LINK1) { old_tos = ip->ip_tos; egress_success = ip_ecn_egress(ECN_NORMAL, &otos8, &ip->ip_tos); if (old_tos != ip->ip_tos) { - sum = ~ntohs(ip->ip_sum) & 0xffff; - sum += (~old_tos & 0xffff) + ip->ip_tos; - sum = (sum >> 16) + (sum & 0xffff); - sum += (sum >> 16); /* add carry */ - ip->ip_sum = htons(~sum & 0xffff); + sum = ~ntohs(ip->ip_sum) & 0xffff; + sum += (~old_tos & 0xffff) + ip->ip_tos; + sum = (sum >> 16) + (sum & 0xffff); + sum += (sum >> 16); /* add carry */ + ip->ip_sum = htons(~sum & 0xffff); } - } else + } else { egress_success = ip_ecn_egress(ECN_NOCARE, &otos8, &ip->ip_tos); + } break; - } + } #endif /* INET */ #if INET6 case IPPROTO_IPV6: - { + { af = AF_INET6; - if (mbuf_len(m) < sizeof (*ip6)) { - m = m_pullup(m, sizeof (*ip6)); - if (!m) - return (IPPROTO_DONE); + if (mbuf_len(m) < sizeof(*ip6)) { + m = m_pullup(m, sizeof(*ip6)); + if (!m) { + return IPPROTO_DONE; + } } ip6 = mtod(m, struct ip6_hdr *); - if (gifp->if_flags & IFF_LINK1) + if (gifp->if_flags & IFF_LINK1) { egress_success = ip6_ecn_egress(ECN_NORMAL, &otos, &ip6->ip6_flow); - else + } else { egress_success = ip6_ecn_egress(ECN_NOCARE, &otos, &ip6->ip6_flow); + } break; - } + } #endif default: ip6stat.ip6s_nogif++; m_freem(m); - return (IPPROTO_DONE); + return IPPROTO_DONE; } if (egress_success == 0) { ip6stat.ip6s_nogif++; m_freem(m); - return (IPPROTO_DONE); + return IPPROTO_DONE; } /* Replace the rcvif by gifp for ifnet_input to route it correctly */ - if (m->m_pkthdr.rcvif) + if (m->m_pkthdr.rcvif) { m->m_pkthdr.rcvif = gifp; + } ifnet_input(gifp, m, NULL); - return (IPPROTO_DONE); + return IPPROTO_DONE; } /* @@ -348,8 +356,9 @@ gif_validate6( * and the *destination* address of the packet, and vice versa. */ if (!IN6_ARE_ADDR_EQUAL(&src->sin6_addr, &ip6->ip6_dst) || - !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_src)) - return (0); + !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_src)) { + return 0; + } /* martian filters on outer source - done in ip6_input */ @@ -358,14 +367,15 @@ gif_validate6( struct sockaddr_in6 sin6; struct rtentry *rt; - bzero(&sin6, sizeof (sin6)); + bzero(&sin6, sizeof(sin6)); sin6.sin6_family = AF_INET6; - sin6.sin6_len = sizeof (struct sockaddr_in6); + sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_addr = ip6->ip6_src; rt = rtalloc1((struct sockaddr *)&sin6, 0, 0); - if (rt != NULL) + if (rt != NULL) { RT_LOCK(rt); + } if (!rt || rt->rt_ifp != ifp) { #if 0 log(LOG_WARNING, "%s: packet from %s dropped " @@ -376,13 +386,13 @@ gif_validate6( RT_UNLOCK(rt); rtfree(rt); } - return (0); + return 0; } RT_UNLOCK(rt); rtfree(rt); } - return (128 * 2); + return 128 * 2; } /* @@ -406,8 +416,8 @@ gif_encapcheck6( GIF_LOCK_ASSERT(sc); - mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof (ip6), &ip6); + mbuf_copydata((struct mbuf *)(size_t)m, 0, sizeof(ip6), &ip6); ifp = ((m->m_flags & M_PKTHDR) != 0) ? m->m_pkthdr.rcvif : NULL; - return (gif_validate6(&ip6, sc, ifp)); + return gif_validate6(&ip6, sc, ifp); } diff --git a/bsd/netinet6/in6_gif.h b/bsd/netinet6/in6_gif.h index a517d9663..efd48ab2d 100644 --- a/bsd/netinet6/in6_gif.h +++ b/bsd/netinet6/in6_gif.h @@ -31,11 +31,11 @@ */ #ifndef _NETINET6_IN6_GIF_H_ -#define _NETINET6_IN6_GIF_H_ +#define _NETINET6_IN6_GIF_H_ #include #ifdef BSD_KERNEL_PRIVATE -#define GIF_HLIM 30 +#define GIF_HLIM 30 int in6_gif_input(struct mbuf **, int *, int); int in6_gif_output(struct ifnet *, int, struct mbuf *, struct rtentry *); diff --git a/bsd/netinet6/in6_ifattach.c b/bsd/netinet6/in6_ifattach.c index f19872e56..643e3e363 100644 --- a/bsd/netinet6/in6_ifattach.c +++ b/bsd/netinet6/in6_ifattach.c @@ -98,14 +98,14 @@ extern lck_mtx_t *nd6_mutex; #if IP6_AUTO_LINKLOCAL int ip6_auto_linklocal = IP6_AUTO_LINKLOCAL; #else -int ip6_auto_linklocal = 1; /* enable by default */ +int ip6_auto_linklocal = 1; /* enable by default */ #endif extern struct inpcbinfo udbinfo; extern struct inpcbinfo ripcbinfo; static const unsigned int in6_extra_size = sizeof(struct in6_ifextra); -static const unsigned int in6_extra_bufsize = in6_extra_size + +static const unsigned int in6_extra_bufsize = in6_extra_size + sizeof(void *) + sizeof(uint64_t); static int get_rand_iid(struct ifnet *, struct in6_addr *); @@ -127,14 +127,14 @@ static int in6_ifattach_loopback(struct ifnet *); static int get_rand_iid( __unused struct ifnet *ifp, - struct in6_addr *in6) /* upper 64bits are preserved */ + struct in6_addr *in6) /* upper 64bits are preserved */ { SHA1_CTX ctxt; u_int8_t digest[SHA1_RESULTLEN]; - int hostnlen = strlen(hostname); + int hostnlen = strlen(hostname); /* generate 8 bytes of pseudo-random value. */ - bzero(&ctxt, sizeof (ctxt)); + bzero(&ctxt, sizeof(ctxt)); SHA1Init(&ctxt); SHA1Update(&ctxt, hostname, hostnlen); SHA1Final(digest, &ctxt); @@ -143,13 +143,13 @@ get_rand_iid( bcopy(digest, &in6->s6_addr[8], 8); /* make sure to set "u" bit to local, and "g" bit to individual. */ - in6->s6_addr[8] &= ~ND6_EUI64_GBIT; /* g bit to "individual" */ - in6->s6_addr[8] |= ND6_EUI64_UBIT; /* u bit to "local" */ + in6->s6_addr[8] &= ~ND6_EUI64_GBIT; /* g bit to "individual" */ + in6->s6_addr[8] |= ND6_EUI64_UBIT; /* u bit to "local" */ /* convert EUI64 into IPv6 interface identifier */ ND6_EUI64_TO_IFID(in6); - return (0); + return 0; } static int @@ -164,15 +164,15 @@ in6_generate_tmp_iid( struct timeval tv; /* If there's no history, start with a random seed. */ - bzero(nullbuf, sizeof (nullbuf)); - if (bcmp(nullbuf, seed0, sizeof (nullbuf)) == 0) { + bzero(nullbuf, sizeof(nullbuf)); + if (bcmp(nullbuf, seed0, sizeof(nullbuf)) == 0) { int i; for (i = 0; i < 2; i++) { getmicrotime(&tv); val32 = RandomULong() ^ tv.tv_usec; - bcopy(&val32, seed + sizeof (val32) * i, - sizeof (val32)); + bcopy(&val32, seed + sizeof(val32) * i, + sizeof(val32)); } } else { bcopy(seed0, seed, 8); @@ -182,19 +182,20 @@ in6_generate_tmp_iid( /* XXX assumption on the size of IFID */ bcopy(seed1, &seed[8], 8); - if ((0)) { /* for debugging purposes only */ + if ((0)) { /* for debugging purposes only */ int i; printf("%s: new randomized ID from: ", __func__); - for (i = 0; i < 16; i++) + for (i = 0; i < 16; i++) { printf("%02x", seed[i]); + } printf(" "); } /* generate 16 bytes of pseudo-random value. */ - bzero(&ctxt, sizeof (ctxt)); + bzero(&ctxt, sizeof(ctxt)); SHA1Init(&ctxt); - SHA1Update(&ctxt, seed, sizeof (seed)); + SHA1Update(&ctxt, seed, sizeof(seed)); SHA1Final(digest, &ctxt); /* @@ -210,7 +211,7 @@ in6_generate_tmp_iid( * for simplicity. If the caclculated digest happens to be zero, * use a random non-zero value as the last resort. */ - if (bcmp(nullbuf, ret, sizeof (nullbuf)) == 0) { + if (bcmp(nullbuf, ret, sizeof(nullbuf)) == 0) { nd6log((LOG_INFO, "%s: computed SHA1 value is zero.\n", __func__)); @@ -227,16 +228,17 @@ in6_generate_tmp_iid( */ bcopy(&digest[8], seed0, 8); - if ((0)) { /* for debugging purposes only */ + if ((0)) { /* for debugging purposes only */ int i; printf("to: "); - for (i = 0; i < 16; i++) + for (i = 0; i < 16; i++) { printf("%02x", digest[i]); + } printf("\n"); } - return (0); + return 0; } /* @@ -256,7 +258,7 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) size_t addrlen; static u_int8_t allzero[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; static u_int8_t allone[8] = - { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; int err = -1; /* Why doesn't this code use ifnet_addrs? */ @@ -265,9 +267,9 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr; if (sdl->sdl_alen == 0) { ifnet_lock_done(ifp); - return (-1); + return -1; } - IFA_ADDREF(ifa); /* for this routine */ + IFA_ADDREF(ifa); /* for this routine */ ifnet_lock_done(ifp); IFA_LOCK(ifa); @@ -289,27 +291,31 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) case IFT_BRIDGE: /* IEEE802/EUI64 cases - what others? */ /* IEEE1394 uses 16byte length address starting with EUI64 */ - if (addrlen > 8) + if (addrlen > 8) { addrlen = 8; + } /* look at IEEE802/EUI64 only */ - if (addrlen != 8 && addrlen != 6) + if (addrlen != 8 && addrlen != 6) { goto done; + } /* * check for invalid MAC address - on bsdi, we see it a lot * since wildboar configures all-zero MAC on pccard before * card insertion. */ - if (bcmp(addr, allzero, addrlen) == 0) + if (bcmp(addr, allzero, addrlen) == 0) { goto done; - if (bcmp(addr, allone, addrlen) == 0) + } + if (bcmp(addr, allone, addrlen) == 0) { goto done; + } /* make EUI64 address */ - if (addrlen == 8) + if (addrlen == 8) { bcopy(addr, &in6->s6_addr[8], 8); - else if (addrlen == 6) { + } else if (addrlen == 6) { in6->s6_addr[8] = addr[0]; in6->s6_addr[9] = addr[1]; in6->s6_addr[10] = addr[2]; @@ -322,10 +328,12 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) break; case IFT_ARCNET: - if (addrlen != 1) + if (addrlen != 1) { goto done; - if (!addr[0]) + } + if (!addr[0]) { goto done; + } bzero(&in6->s6_addr[8], 8); in6->s6_addr[15] = addr[0]; @@ -333,8 +341,8 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) /* * due to insufficient bitwidth, we mark it local. */ - in6->s6_addr[8] &= ~ND6_EUI64_GBIT; /* g to "individual" */ - in6->s6_addr[8] |= ND6_EUI64_UBIT; /* u to "local" */ + in6->s6_addr[8] &= ~ND6_EUI64_GBIT; /* g to "individual" */ + in6->s6_addr[8] |= ND6_EUI64_UBIT; /* u to "local" */ break; case IFT_GIF: @@ -357,8 +365,9 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) } /* sanity check: g bit must not indicate "group" */ - if (ND6_EUI64_GROUP(in6)) + if (ND6_EUI64_GROUP(in6)) { goto done; + } /* convert EUI64 into IPv6 interface identifier */ ND6_EUI64_TO_IFID(in6); @@ -372,7 +381,7 @@ in6_iid_from_hw(struct ifnet *ifp, struct in6_addr *in6) goto done; } - err = 0; /* found */ + err = 0; /* found */ done: /* This must not be the last reference to the lladdr */ @@ -381,7 +390,7 @@ done: /* NOTREACHED */ } IFA_UNLOCK(ifa); - return (err); + return err; } /* @@ -396,7 +405,7 @@ done: static int in6_select_iid_from_all_hw( struct ifnet *ifp0, - struct ifnet *altifp, /* secondary EUI64 source */ + struct ifnet *altifp, /* secondary EUI64 source */ struct in6_addr *in6) { struct ifnet *ifp; @@ -418,10 +427,12 @@ in6_select_iid_from_all_hw( /* next, try to get it from some other hardware interface */ ifnet_head_lock_shared(); TAILQ_FOREACH(ifp, &ifnet_head, if_list) { - if (ifp == ifp0) + if (ifp == ifp0) { continue; - if (in6_iid_from_hw(ifp, in6) != 0) + } + if (in6_iid_from_hw(ifp, in6) != 0) { continue; + } /* * to borrow IID from other interface, IID needs to be @@ -443,17 +454,17 @@ in6_select_iid_from_all_hw( } printf("%s: failed to get interface identifier\n", if_name(ifp0)); - return (-1); + return -1; success: nd6log((LOG_INFO, "%s: IID: " - "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", - if_name(ifp0), - in6->s6_addr[8], in6->s6_addr[9], - in6->s6_addr[10], in6->s6_addr[11], - in6->s6_addr[12], in6->s6_addr[13], - in6->s6_addr[14], in6->s6_addr[15])); - return (0); + "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n", + if_name(ifp0), + in6->s6_addr[8], in6->s6_addr[9], + in6->s6_addr[10], in6->s6_addr[11], + in6->s6_addr[12], in6->s6_addr[13], + in6->s6_addr[14], in6->s6_addr[15])); + return 0; } static int @@ -475,12 +486,13 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct in6_aliasreq *ifra) * notification is rather confusing in this case, so just * suppress it. (jinmei@kame.net 20010130) */ - if (error != EAFNOSUPPORT) + if (error != EAFNOSUPPORT) { nd6log((LOG_NOTICE, "%s: failed to " "configure a link-local address on %s " "(errno=%d)\n", __func__, if_name(ifp), error)); - return (EADDRNOTAVAIL); + } + return EADDRNOTAVAIL; } VERIFY(ia != NULL); @@ -491,7 +503,7 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct in6_aliasreq *ifra) * and add it to the prefix list as a never-expire prefix. * XXX: this change might affect some existing code base... */ - bzero(&pr0, sizeof (pr0)); + bzero(&pr0, sizeof(pr0)); lck_mtx_init(&pr0.ndpr_lock, ifa_mtx_grp, ifa_mtx_attr); pr0.ndpr_ifp = ifp; /* this should be 64 at this moment. */ @@ -501,14 +513,14 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct in6_aliasreq *ifra) /* apply the mask for safety. (nd6_prelist_add will apply it again) */ for (i = 0; i < 4; i++) { pr0.ndpr_prefix.sin6_addr.s6_addr32[i] &= - in6mask64.s6_addr32[i]; + in6mask64.s6_addr32[i]; } /* * Initialize parameters. The link-local prefix must always be * on-link, and its lifetimes never expire. */ pr0.ndpr_raf_onlink = 1; - pr0.ndpr_raf_auto = 1; /* probably meaningless */ + pr0.ndpr_raf_auto = 1; /* probably meaningless */ pr0.ndpr_vltime = ND6_INFINITE_LIFETIME; pr0.ndpr_pltime = ND6_INFINITE_LIFETIME; pr0.ndpr_stateflags |= NDPRF_STATIC; @@ -523,7 +535,7 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct in6_aliasreq *ifra) if ((error = nd6_prelist_add(&pr0, NULL, &pr, TRUE)) != 0) { IFA_REMREF(&ia->ia_ifa); lck_mtx_destroy(&pr0.ndpr_lock, ifa_mtx_grp); - return (error); + return error; } } @@ -531,30 +543,31 @@ in6_ifattach_linklocal(struct ifnet *ifp, struct in6_aliasreq *ifra) IFA_REMREF(&ia->ia_ifa); /* Drop use count held above during lookup/add */ - if (pr != NULL) + if (pr != NULL) { NDPR_REMREF(pr); + } lck_mtx_destroy(&pr0.ndpr_lock, ifa_mtx_grp); - return (0); + return 0; } static int in6_ifattach_loopback( - struct ifnet *ifp) /* must be IFT_LOOP */ + struct ifnet *ifp) /* must be IFT_LOOP */ { struct in6_aliasreq ifra; struct in6_ifaddr *ia; int error; - bzero(&ifra, sizeof (ifra)); + bzero(&ifra, sizeof(ifra)); /* * in6_update_ifa() does not use ifra_name, but we accurately set it * for safety. */ - strlcpy(ifra.ifra_name, if_name(ifp), sizeof (ifra.ifra_name)); + strlcpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); - ifra.ifra_prefixmask.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_prefixmask.sin6_family = AF_INET6; ifra.ifra_prefixmask.sin6_addr = in6mask128; @@ -562,11 +575,11 @@ in6_ifattach_loopback( * Always initialize ia_dstaddr (= broadcast address) to loopback * address. Follows IPv4 practice - see in_ifinit(). */ - ifra.ifra_dstaddr.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_dstaddr.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_dstaddr.sin6_family = AF_INET6; ifra.ifra_dstaddr.sin6_addr = in6addr_loopback; - ifra.ifra_addr.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_addr.sin6_family = AF_INET6; ifra.ifra_addr.sin6_addr = in6addr_loopback; @@ -584,12 +597,12 @@ in6_ifattach_loopback( "%s: failed to configure loopback address %s (error=%d)\n", __func__, if_name(ifp), error)); VERIFY(ia == NULL); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } VERIFY(ia != NULL); IFA_REMREF(&ia->ia_ifa); - return (0); + return 0; } /* @@ -610,41 +623,45 @@ in6_nigroup( SHA1_CTX ctxt; u_int8_t digest[SHA1_RESULTLEN]; char l; - char n[64]; /* a single label must not exceed 63 chars */ + char n[64]; /* a single label must not exceed 63 chars */ - if (!namelen || !name) - return (-1); + if (!namelen || !name) { + return -1; + } p = name; - while (p && *p && *p != '.' && p - name < namelen) + while (p && *p && *p != '.' && p - name < namelen) { p++; - if (p - name > sizeof (n) - 1) - return (-1); /* label too long */ + } + if (p - name > sizeof(n) - 1) { + return -1; /* label too long */ + } l = p - name; strlcpy(n, name, l); n[(int)l] = '\0'; for (q = (u_char *) n; *q; q++) { - if ('A' <= *q && *q <= 'Z') + if ('A' <= *q && *q <= 'Z') { *q = *q - 'A' + 'a'; + } } /* generate 16 bytes of pseudo-random value. */ - bzero(&ctxt, sizeof (ctxt)); + bzero(&ctxt, sizeof(ctxt)); SHA1Init(&ctxt); - SHA1Update(&ctxt, &l, sizeof (l)); + SHA1Update(&ctxt, &l, sizeof(l)); SHA1Update(&ctxt, n, l); SHA1Final(digest, &ctxt); - bzero(in6, sizeof (*in6)); + bzero(in6, sizeof(*in6)); in6->s6_addr16[0] = IPV6_ADDR_INT16_MLL; in6->s6_addr8[11] = 2; in6->s6_addr8[12] = 0xff; /* copy first 3 bytes of prefix into address */ bcopy(digest, &in6->s6_addr8[13], 3); - if (in6_setscope(in6, ifp, NULL)) - return (-1); /* XXX: should not fail */ - - return (0); + if (in6_setscope(in6, ifp, NULL)) { + return -1; /* XXX: should not fail */ + } + return 0; } int @@ -656,9 +673,10 @@ in6_domifattach(struct ifnet *ifp) error = proto_plumb(PF_INET6, ifp); if (error != 0) { - if (error != EEXIST) + if (error != EEXIST) { log(LOG_ERR, "%s: proto_plumb returned %d if=%s\n", __func__, error, if_name(ifp)); + } } else { error = in6_ifattach_prelim(ifp); if (error != 0) { @@ -669,15 +687,16 @@ in6_domifattach(struct ifnet *ifp) __func__, error, ifp->if_name, ifp->if_unit); errorx = proto_unplumb(PF_INET6, ifp); - if (errorx != 0) /* XXX should not fail */ + if (errorx != 0) { /* XXX should not fail */ log(LOG_ERR, "%s: proto_unplumb returned %d if=%s%d\n", __func__, errorx, ifp->if_name, ifp->if_unit); + } } } - return (error); + return error; } int @@ -714,7 +733,7 @@ in6_ifattach_prelim(struct ifnet *ifp) nd6log0((LOG_INFO, "in6_ifattach: ", "%s is not multicast capable, IPv6 not enabled\n", if_name(ifp))); - return (EINVAL); + return EINVAL; } #if IFT_STF @@ -723,9 +742,10 @@ skipmcast: if (ifp->if_inet6data == NULL) { ext = (struct in6_ifextra *)_MALLOC(in6_extra_bufsize, M_IFADDR, - M_WAITOK|M_ZERO); - if (!ext) - return (ENOMEM); + M_WAITOK | M_ZERO); + if (!ext) { + return ENOMEM; + } base = (void *)P2ROUNDUP((intptr_t)ext + sizeof(uint64_t), sizeof(uint64_t)); VERIFY(((intptr_t)base + in6_extra_size) <= @@ -783,16 +803,17 @@ skipmcast: log(LOG_ERR, "%s: in6_ifattach_loopback returned %d\n", __func__, error, ifp->if_name, ifp->if_unit); - return (error); + return error; } } /* update dynamically. */ - if (in6_maxmtu < ifp->if_mtu) + if (in6_maxmtu < ifp->if_mtu) { in6_maxmtu = ifp->if_mtu; + } VERIFY(error == 0); - return (0); + return 0; } /* @@ -808,11 +829,13 @@ in6_ifattach_aliasreq(struct ifnet *ifp, struct ifnet *altifp, struct in6_aliasreq ifra; error = in6_ifattach_prelim(ifp); - if (error != 0) - return (error); + if (error != 0) { + return error; + } - if (!ip6_auto_linklocal) - return (0); + if (!ip6_auto_linklocal) { + return 0; + } /* * Assign a link-local address, only if there isn't one here already. @@ -824,16 +847,16 @@ in6_ifattach_aliasreq(struct ifnet *ifp, struct ifnet *altifp, ia6 = in6ifa_ifpforlinklocal(ifp, 0); if (ia6 != NULL) { IFA_REMREF(&ia6->ia_ifa); - return (0); + return 0; } - bzero(&ifra, sizeof (ifra)); + bzero(&ifra, sizeof(ifra)); /* * in6_update_ifa() does not use ifra_name, but we accurately set it * for safety. */ - strlcpy(ifra.ifra_name, if_name(ifp), sizeof (ifra.ifra_name)); + strlcpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); /* Initialize the IPv6 interface address in our in6_aliasreq block */ if (ifra0 != NULL) { @@ -842,29 +865,34 @@ in6_ifattach_aliasreq(struct ifnet *ifp, struct ifnet *altifp, struct in6_addr *in6 = &sin6->sin6_addr; boolean_t ok = TRUE; - bcopy(&ifra0->ifra_addr, sin6, sizeof (struct sockaddr_in6)); + bcopy(&ifra0->ifra_addr, sin6, sizeof(struct sockaddr_in6)); - if (sin6->sin6_family != AF_INET6 || sin6->sin6_port != 0) + if (sin6->sin6_family != AF_INET6 || sin6->sin6_port != 0) { ok = FALSE; - if (ok && (in6->s6_addr16[0] != htons(0xfe80))) + } + if (ok && (in6->s6_addr16[0] != htons(0xfe80))) { ok = FALSE; + } if (ok) { - if (sin6->sin6_scope_id == 0 && in6->s6_addr16[1] == 0) + if (sin6->sin6_scope_id == 0 && in6->s6_addr16[1] == 0) { in6->s6_addr16[1] = htons(ifp->if_index); - else if (sin6->sin6_scope_id != 0 && - sin6->sin6_scope_id != ifp->if_index) + } else if (sin6->sin6_scope_id != 0 && + sin6->sin6_scope_id != ifp->if_index) { ok = FALSE; - else if (in6->s6_addr16[1] != 0 && - ntohs(in6->s6_addr16[1]) != ifp->if_index) + } else if (in6->s6_addr16[1] != 0 && + ntohs(in6->s6_addr16[1]) != ifp->if_index) { ok = FALSE; + } } - if (ok && (in6->s6_addr32[1] != 0)) + if (ok && (in6->s6_addr32[1] != 0)) { ok = FALSE; - if (!ok) - return (EINVAL); + } + if (!ok) { + return EINVAL; + } } else { ifra.ifra_addr.sin6_family = AF_INET6; - ifra.ifra_addr.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_addr.sin6_addr.s6_addr16[0] = htons(0xfe80); ifra.ifra_addr.sin6_addr.s6_addr16[1] = htons(ifp->if_index); ifra.ifra_addr.sin6_addr.s6_addr32[1] = 0; @@ -876,16 +904,17 @@ in6_ifattach_aliasreq(struct ifnet *ifp, struct ifnet *altifp, &ifra.ifra_addr.sin6_addr) != 0) { nd6log((LOG_ERR, "%s: no IID available\n", if_name(ifp))); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } } } - if (in6_setscope(&ifra.ifra_addr.sin6_addr, ifp, NULL)) - return (EADDRNOTAVAIL); + if (in6_setscope(&ifra.ifra_addr.sin6_addr, ifp, NULL)) { + return EADDRNOTAVAIL; + } /* Set the prefix mask */ - ifra.ifra_prefixmask.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_prefixmask.sin6_family = AF_INET6; ifra.ifra_prefixmask.sin6_addr = in6mask64; @@ -901,7 +930,7 @@ in6_ifattach_aliasreq(struct ifnet *ifp, struct ifnet *altifp, /* NB: not an error */ } - return (0); + return 0; } int @@ -915,19 +944,22 @@ in6_ifattach_llcgareq(struct ifnet *ifp, struct in6_cgareq *llcgasr) VERIFY(llcgasr != NULL); error = in6_ifattach_prelim(ifp); - if (error != 0) - return (error); + if (error != 0) { + return error; + } - if (!ip6_auto_linklocal) - return (0); + if (!ip6_auto_linklocal) { + return 0; + } - if (nd6_send_opstate == ND6_SEND_OPMODE_DISABLED) - return (ENXIO); + if (nd6_send_opstate == ND6_SEND_OPMODE_DISABLED) { + return ENXIO; + } ndi = ND_IFINFO(ifp); VERIFY(ndi != NULL && ndi->initialized); if ((ndi->flags & ND6_IFF_INSECURE) != 0) { - return (ENXIO); + return ENXIO; } /* @@ -940,14 +972,14 @@ in6_ifattach_llcgareq(struct ifnet *ifp, struct in6_cgareq *llcgasr) ia6 = in6ifa_ifpforlinklocal(ifp, 0); if (ia6 != NULL) { IFA_REMREF(&ia6->ia_ifa); - return (0); + return 0; } - bzero(&ifra, sizeof (ifra)); - strlcpy(ifra.ifra_name, if_name(ifp), sizeof (ifra.ifra_name)); + bzero(&ifra, sizeof(ifra)); + strlcpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); ifra.ifra_addr.sin6_family = AF_INET6; - ifra.ifra_addr.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_addr.sin6_addr.s6_addr16[0] = htons(0xfe80); ifra.ifra_addr.sin6_addr.s6_addr16[1] = htons(ifp->if_index); ifra.ifra_addr.sin6_addr.s6_addr32[1] = 0; @@ -957,15 +989,16 @@ in6_ifattach_llcgareq(struct ifnet *ifp, struct in6_cgareq *llcgasr) if (in6_cga_generate(&llcgasr->cgar_cgaprep, 0, &ifra.ifra_addr.sin6_addr)) { in6_cga_node_unlock(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } in6_cga_node_unlock(); - if (in6_setscope(&ifra.ifra_addr.sin6_addr, ifp, NULL)) - return (EADDRNOTAVAIL); + if (in6_setscope(&ifra.ifra_addr.sin6_addr, ifp, NULL)) { + return EADDRNOTAVAIL; + } /* Set the prefix mask */ - ifra.ifra_prefixmask.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_prefixmask.sin6_family = AF_INET6; ifra.ifra_prefixmask.sin6_addr = in6mask64; @@ -987,7 +1020,7 @@ in6_ifattach_llcgareq(struct ifnet *ifp, struct in6_cgareq *llcgasr) } VERIFY(error == 0); - return (error); + return error; } /* @@ -1008,8 +1041,9 @@ in6_ifdetach(struct ifnet *ifp) /* remove neighbor management table */ nd6_purge(ifp); - if (LLTABLE6(ifp)) + if (LLTABLE6(ifp)) { lltable_free(LLTABLE6(ifp)); + } /* nuke any of IPv6 addresses we have */ lck_rw_lock_exclusive(&in6_ifaddr_rwlock); @@ -1019,10 +1053,10 @@ in6_ifdetach(struct ifnet *ifp) ia = ia->ia_next; continue; } - IFA_ADDREF(&ia->ia_ifa); /* for us */ + IFA_ADDREF(&ia->ia_ifa); /* for us */ lck_rw_done(&in6_ifaddr_rwlock); in6_purgeaddr(&ia->ia_ifa); - IFA_REMREF(&ia->ia_ifa); /* for us */ + IFA_REMREF(&ia->ia_ifa); /* for us */ lck_rw_lock_exclusive(&in6_ifaddr_rwlock); /* * Purging the address caused in6_ifaddr_rwlock @@ -1087,10 +1121,10 @@ in6_ifdetach(struct ifnet *ifp) rt = rtalloc1((struct sockaddr *)&ia->ia_addr, 0, 0); if (rt != NULL) { (void) rtrequest(RTM_DELETE, - (struct sockaddr *)&ia->ia_addr, - (struct sockaddr *)&ia->ia_addr, - (struct sockaddr *)&ia->ia_prefixmask, - rt->rt_flags, (struct rtentry **)0); + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&ia->ia_addr, + (struct sockaddr *)&ia->ia_prefixmask, + rt->rt_flags, (struct rtentry **)0); rtfree(rt); } } else { @@ -1104,8 +1138,9 @@ in6_ifdetach(struct ifnet *ifp) if (oia == (ia = in6_ifaddrs)) { in6_ifaddrs = ia->ia_next; } else { - while (ia->ia_next && (ia->ia_next != oia)) + while (ia->ia_next && (ia->ia_next != oia)) { ia = ia->ia_next; + } if (ia->ia_next) { ia->ia_next = oia->ia_next; } else { @@ -1124,8 +1159,9 @@ in6_ifdetach(struct ifnet *ifp) * that we lost the race, since in6_ifaddr_rwlock was * momentarily dropped above. */ - if (unlinked) + if (unlinked) { IFA_REMREF(ifa); + } /* release reference held for this routine */ IFA_REMREF(ifa); @@ -1153,8 +1189,8 @@ in6_ifdetach(struct ifnet *ifp) nd6_purge(ifp); /* remove route to link-local allnodes multicast (ff02::1) */ - bzero(&sin6, sizeof (sin6)); - sin6.sin6_len = sizeof (struct sockaddr_in6); + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_family = AF_INET6; sin6.sin6_addr = in6addr_linklocal_allnodes; sin6.sin6_addr.s6_addr16[1] = htons(ifp->if_index); @@ -1187,14 +1223,14 @@ in6_iid_mktmp(struct ifnet *ifp, u_int8_t *retbuf, const u_int8_t *baseid, VERIFY(ndi != NULL && ndi->initialized); lck_mtx_lock(&ndi->lock); - bzero(nullbuf, sizeof (nullbuf)); - if (bcmp(ndi->randomid, nullbuf, sizeof (nullbuf)) == 0) { + bzero(nullbuf, sizeof(nullbuf)); + if (bcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) == 0) { /* we've never created a random ID. Create a new one. */ generate = 1; } if (generate) { - bcopy(baseid, ndi->randomseed1, sizeof (ndi->randomseed1)); + bcopy(baseid, ndi->randomseed1, sizeof(ndi->randomseed1)); /* in6_generate_tmp_iid will update seedn and buf */ (void) in6_generate_tmp_iid(ndi->randomseed0, ndi->randomseed1, @@ -1216,7 +1252,7 @@ in6_tmpaddrtimer(void *arg) timeout(in6_tmpaddrtimer, (caddr_t)0, (ip6_temp_preferred_lifetime - ip6_desync_factor - ip6_temp_regen_advance) * hz); - bzero(nullbuf, sizeof (nullbuf)); + bzero(nullbuf, sizeof(nullbuf)); ifnet_head_lock_shared(); for (ifp = ifnet_head.tqh_first; ifp; ifp = ifp->if_link.tqe_next) { @@ -1225,7 +1261,7 @@ in6_tmpaddrtimer(void *arg) continue; } lck_mtx_lock(&ndi->lock); - if (bcmp(ndi->randomid, nullbuf, sizeof (nullbuf)) != 0) { + if (bcmp(ndi->randomid, nullbuf, sizeof(nullbuf)) != 0) { /* * We've been generating a random ID on this interface. * Create a new one. diff --git a/bsd/netinet6/in6_ifattach.h b/bsd/netinet6/in6_ifattach.h index 24fd6e4ce..e0622d043 100644 --- a/bsd/netinet6/in6_ifattach.h +++ b/bsd/netinet6/in6_ifattach.h @@ -56,7 +56,7 @@ */ #ifndef _NETINET6_IN6_IFATTACH_H_ -#define _NETINET6_IN6_IFATTACH_H_ +#define _NETINET6_IN6_IFATTACH_H_ #include #ifdef BSD_KERNEL_PRIVATE diff --git a/bsd/netinet6/in6_mcast.c b/bsd/netinet6/in6_mcast.c index 467b3a164..65d4c090a 100644 --- a/bsd/netinet6/in6_mcast.c +++ b/bsd/netinet6/in6_mcast.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -97,60 +97,60 @@ #include #include -static void im6f_commit(struct in6_mfilter *); -static int im6f_get_source(struct in6_mfilter *imf, - const struct sockaddr_in6 *psin, - struct in6_msource **); +static void im6f_commit(struct in6_mfilter *); +static int im6f_get_source(struct in6_mfilter *imf, + const struct sockaddr_in6 *psin, + struct in6_msource **); static struct in6_msource * - im6f_graft(struct in6_mfilter *, const uint8_t, - const struct sockaddr_in6 *); -static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); -static void im6f_rollback(struct in6_mfilter *); -static void im6f_reap(struct in6_mfilter *); -static int im6o_grow(struct ip6_moptions *, size_t); -static size_t im6o_match_group(const struct ip6_moptions *, - const struct ifnet *, const struct sockaddr_in6 *); +im6f_graft(struct in6_mfilter *, const uint8_t, + const struct sockaddr_in6 *); +static int im6f_prune(struct in6_mfilter *, const struct sockaddr_in6 *); +static void im6f_rollback(struct in6_mfilter *); +static void im6f_reap(struct in6_mfilter *); +static int im6o_grow(struct ip6_moptions *, size_t); +static size_t im6o_match_group(const struct ip6_moptions *, + const struct ifnet *, const struct sockaddr_in6 *); static struct in6_msource * - im6o_match_source(const struct ip6_moptions *, - const size_t, const struct sockaddr_in6 *); -static void im6s_merge(struct ip6_msource *ims, - const struct in6_msource *lims, const int rollback); -static int in6_mc_get(struct ifnet *, const struct in6_addr *, - struct in6_multi **); -static int in6m_get_source(struct in6_multi *inm, - const struct in6_addr *addr, const int noalloc, - struct ip6_msource **pims); -static int in6m_is_ifp_detached(const struct in6_multi *); -static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); -static void in6m_reap(struct in6_multi *); +im6o_match_source(const struct ip6_moptions *, + const size_t, const struct sockaddr_in6 *); +static void im6s_merge(struct ip6_msource *ims, + const struct in6_msource *lims, const int rollback); +static int in6_mc_get(struct ifnet *, const struct in6_addr *, + struct in6_multi **); +static int in6m_get_source(struct in6_multi *inm, + const struct in6_addr *addr, const int noalloc, + struct ip6_msource **pims); +static int in6m_is_ifp_detached(const struct in6_multi *); +static int in6m_merge(struct in6_multi *, /*const*/ struct in6_mfilter *); +static void in6m_reap(struct in6_multi *); static struct ip6_moptions * - in6p_findmoptions(struct inpcb *); -static int in6p_get_source_filters(struct inpcb *, struct sockopt *); -static int in6p_lookup_v4addr(struct ipv6_mreq *, struct ip_mreq *); -static int in6p_join_group(struct inpcb *, struct sockopt *); -static int in6p_leave_group(struct inpcb *, struct sockopt *); +in6p_findmoptions(struct inpcb *); +static int in6p_get_source_filters(struct inpcb *, struct sockopt *); +static int in6p_lookup_v4addr(struct ipv6_mreq *, struct ip_mreq *); +static int in6p_join_group(struct inpcb *, struct sockopt *); +static int in6p_leave_group(struct inpcb *, struct sockopt *); static struct ifnet * - in6p_lookup_mcast_ifp(const struct inpcb *, - const struct sockaddr_in6 *); -static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); -static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); -static int in6p_set_source_filters(struct inpcb *, struct sockopt *); -static int sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS; +in6p_lookup_mcast_ifp(const struct inpcb *, + const struct sockaddr_in6 *); +static int in6p_block_unblock_source(struct inpcb *, struct sockopt *); +static int in6p_set_multicast_if(struct inpcb *, struct sockopt *); +static int in6p_set_source_filters(struct inpcb *, struct sockopt *); +static int sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS; static __inline__ int ip6_msource_cmp(const struct ip6_msource *, - const struct ip6_msource *); + const struct ip6_msource *); -SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ +SYSCTL_DECL(_net_inet6_ip6); /* XXX Not in any common header. */ SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, mcast, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPv6 multicast"); static unsigned long in6_mcast_maxgrpsrc = IPV6_MAX_GROUP_SRC_FILTER; SYSCTL_LONG(_net_inet6_ip6_mcast, OID_AUTO, maxgrpsrc, - CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxgrpsrc, + CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxgrpsrc, "Max source filters per group"); static unsigned long in6_mcast_maxsocksrc = IPV6_MAX_SOCK_SRC_FILTER; SYSCTL_LONG(_net_inet6_ip6_mcast, OID_AUTO, maxsocksrc, - CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxsocksrc, + CTLFLAG_RW | CTLFLAG_LOCKED, &in6_mcast_maxsocksrc, "Max source filters per socket"); int in6_mcast_loop = IPV6_DEFAULT_MULTICAST_LOOP; @@ -163,20 +163,20 @@ SYSCTL_NODE(_net_inet6_ip6_mcast, OID_AUTO, filters, RB_GENERATE_PREV(ip6_msource_tree, ip6_msource, im6s_link, ip6_msource_cmp); -#define IN6M_TRACE_HIST_SIZE 32 /* size of trace history */ +#define IN6M_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int in6m_trace_hist_size = IN6M_TRACE_HIST_SIZE; struct in6_multi_dbg { - struct in6_multi in6m; /* in6_multi */ - u_int16_t in6m_refhold_cnt; /* # of ref */ - u_int16_t in6m_refrele_cnt; /* # of rele */ + struct in6_multi in6m; /* in6_multi */ + u_int16_t in6m_refhold_cnt; /* # of ref */ + u_int16_t in6m_refrele_cnt; /* # of rele */ /* * Circular lists of in6m_addref and in6m_remref callers. */ - ctrace_t in6m_refhold[IN6M_TRACE_HIST_SIZE]; - ctrace_t in6m_refrele[IN6M_TRACE_HIST_SIZE]; + ctrace_t in6m_refhold[IN6M_TRACE_HIST_SIZE]; + ctrace_t in6m_refrele[IN6M_TRACE_HIST_SIZE]; /* * Trash list linkage */ @@ -188,38 +188,38 @@ static TAILQ_HEAD(, in6_multi_dbg) in6m_trash_head; static decl_lck_mtx_data(, in6m_trash_lock); #if DEBUG -static unsigned int in6m_debug = 1; /* debugging (enabled) */ +static unsigned int in6m_debug = 1; /* debugging (enabled) */ #else -static unsigned int in6m_debug; /* debugging (disabled) */ +static unsigned int in6m_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int in6m_size; /* size of zone element */ -static struct zone *in6m_zone; /* zone for in6_multi */ +static unsigned int in6m_size; /* size of zone element */ +static struct zone *in6m_zone; /* zone for in6_multi */ -#define IN6M_ZONE_MAX 64 /* maximum elements in zone */ -#define IN6M_ZONE_NAME "in6_multi" /* zone name */ +#define IN6M_ZONE_MAX 64 /* maximum elements in zone */ +#define IN6M_ZONE_NAME "in6_multi" /* zone name */ -static unsigned int imm_size; /* size of zone element */ -static struct zone *imm_zone; /* zone for in6_multi_mship */ +static unsigned int imm_size; /* size of zone element */ +static struct zone *imm_zone; /* zone for in6_multi_mship */ -#define IMM_ZONE_MAX 64 /* maximum elements in zone */ -#define IMM_ZONE_NAME "in6_multi_mship" /* zone name */ +#define IMM_ZONE_MAX 64 /* maximum elements in zone */ +#define IMM_ZONE_NAME "in6_multi_mship" /* zone name */ -#define IP6MS_ZONE_MAX 64 /* maximum elements in zone */ -#define IP6MS_ZONE_NAME "ip6_msource" /* zone name */ +#define IP6MS_ZONE_MAX 64 /* maximum elements in zone */ +#define IP6MS_ZONE_NAME "ip6_msource" /* zone name */ -static unsigned int ip6ms_size; /* size of zone element */ -static struct zone *ip6ms_zone; /* zone for ip6_msource */ +static unsigned int ip6ms_size; /* size of zone element */ +static struct zone *ip6ms_zone; /* zone for ip6_msource */ -#define IN6MS_ZONE_MAX 64 /* maximum elements in zone */ -#define IN6MS_ZONE_NAME "in6_msource" /* zone name */ +#define IN6MS_ZONE_MAX 64 /* maximum elements in zone */ +#define IN6MS_ZONE_NAME "in6_msource" /* zone name */ -static unsigned int in6ms_size; /* size of zone element */ -static struct zone *in6ms_zone; /* zone for in6_msource */ +static unsigned int in6ms_size; /* size of zone element */ +static struct zone *in6ms_zone; /* zone for in6_msource */ /* Lock group and attribute for in6_multihead_lock lock */ -static lck_attr_t *in6_multihead_lock_attr; -static lck_grp_t *in6_multihead_lock_grp; -static lck_grp_attr_t *in6_multihead_lock_grp_attr; +static lck_attr_t *in6_multihead_lock_attr; +static lck_grp_t *in6_multihead_lock_grp; +static lck_grp_attr_t *in6_multihead_lock_grp_attr; static decl_lck_rw_data(, in6_multihead_lock); struct in6_multihead in6_multihead; @@ -245,7 +245,7 @@ static void in6ms_free(struct in6_msource *); static __inline int ip6_msource_cmp(const struct ip6_msource *a, const struct ip6_msource *b) { - return (memcmp(&a->im6s_addr, &b->im6s_addr, sizeof(struct in6_addr))); + return memcmp(&a->im6s_addr, &b->im6s_addr, sizeof(struct in6_addr)); } /* @@ -257,7 +257,7 @@ in6m_is_ifp_detached(const struct in6_multi *inm) VERIFY(inm->in6m_ifma != NULL); VERIFY(inm->in6m_ifp == inm->in6m_ifma->ifma_ifp); - return (!ifnet_is_attached(inm->in6m_ifp, 0)); + return !ifnet_is_attached(inm->in6m_ifp, 0); } /* @@ -279,12 +279,12 @@ im6f_init(struct in6_mfilter *imf, const int st0, const int st1) static int im6o_grow(struct ip6_moptions *imo, size_t newmax) { - struct in6_multi **nmships; - struct in6_multi **omships; - struct in6_mfilter *nmfilters; - struct in6_mfilter *omfilters; - size_t idx; - size_t oldmax; + struct in6_multi **nmships; + struct in6_multi **omships; + struct in6_mfilter *nmfilters; + struct in6_mfilter *omfilters; + size_t idx; + size_t oldmax; IM6O_LOCK_ASSERT_HELD(imo); @@ -293,33 +293,38 @@ im6o_grow(struct ip6_moptions *imo, size_t newmax) omships = imo->im6o_membership; omfilters = imo->im6o_mfilters; oldmax = imo->im6o_max_memberships; - if (newmax == 0) + if (newmax == 0) { newmax = ((oldmax + 1) * 2) - 1; + } - if (newmax > IPV6_MAX_MEMBERSHIPS) - return (ETOOMANYREFS); + if (newmax > IPV6_MAX_MEMBERSHIPS) { + return ETOOMANYREFS; + } if ((nmships = (struct in6_multi **)_REALLOC(omships, - sizeof (struct in6_multi *) * newmax, M_IP6MOPTS, - M_WAITOK | M_ZERO)) == NULL) - return (ENOMEM); + sizeof(struct in6_multi *) * newmax, M_IP6MOPTS, + M_WAITOK | M_ZERO)) == NULL) { + return ENOMEM; + } imo->im6o_membership = nmships; if ((nmfilters = (struct in6_mfilter *)_REALLOC(omfilters, - sizeof (struct in6_mfilter) * newmax, M_IN6MFILTER, - M_WAITOK | M_ZERO)) == NULL) - return (ENOMEM); + sizeof(struct in6_mfilter) * newmax, M_IN6MFILTER, + M_WAITOK | M_ZERO)) == NULL) { + return ENOMEM; + } imo->im6o_mfilters = nmfilters; /* Initialize newly allocated source filter heads. */ - for (idx = oldmax; idx < newmax; idx++) + for (idx = oldmax; idx < newmax; idx++) { im6f_init(&nmfilters[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); + } imo->im6o_max_memberships = newmax; - return (0); + return 0; } /* @@ -333,22 +338,24 @@ im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, { const struct sockaddr_in6 *gsin6; struct in6_multi *pinm; - int idx; - int nmships; + int idx; + int nmships; IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo)); gsin6 = group; /* The im6o_membership array may be lazy allocated. */ - if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0) - return (-1); + if (imo->im6o_membership == NULL || imo->im6o_num_memberships == 0) { + return -1; + } nmships = imo->im6o_num_memberships; for (idx = 0; idx < nmships; idx++) { pinm = imo->im6o_membership[idx]; - if (pinm == NULL) + if (pinm == NULL) { continue; + } IN6M_LOCK(pinm); if ((ifp == NULL || (pinm->in6m_ifp == ifp)) && IN6_ARE_ADDR_EQUAL(&pinm->in6m_addr, @@ -358,10 +365,11 @@ im6o_match_group(const struct ip6_moptions *imo, const struct ifnet *ifp, } IN6M_UNLOCK(pinm); } - if (idx >= nmships) + if (idx >= nmships) { idx = -1; + } - return (idx); + return idx; } /* @@ -379,9 +387,9 @@ static struct in6_msource * im6o_match_source(const struct ip6_moptions *imo, const size_t gidx, const struct sockaddr_in6 *src) { - struct ip6_msource find; - struct in6_mfilter *imf; - struct ip6_msource *ims; + struct ip6_msource find; + struct in6_mfilter *imf; + struct ip6_msource *ims; const struct sockaddr_in6 *psa; IM6O_LOCK_ASSERT_HELD(__DECONST(struct ip6_moptions *, imo)); @@ -390,16 +398,17 @@ im6o_match_source(const struct ip6_moptions *imo, const size_t gidx, VERIFY(gidx != (size_t)-1 && gidx < imo->im6o_num_memberships); /* The im6o_mfilters array may be lazy allocated. */ - if (imo->im6o_mfilters == NULL) - return (NULL); + if (imo->im6o_mfilters == NULL) { + return NULL; + } imf = &imo->im6o_mfilters[gidx]; psa = src; find.im6s_addr = psa->sin6_addr; - in6_clearscope(&find.im6s_addr); /* XXX */ + in6_clearscope(&find.im6s_addr); /* XXX */ ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); - return ((struct in6_msource *)ims); + return (struct in6_msource *)ims; } /* @@ -420,8 +429,9 @@ im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, VERIFY(ifp != NULL); gidx = im6o_match_group(imo, ifp, group); - if (gidx == (size_t)-1) - return (MCAST_NOTGMEMBER); + if (gidx == (size_t)-1) { + return MCAST_NOTGMEMBER; + } /* * Check if the source was included in an (S,G) join. @@ -436,10 +446,11 @@ im6o_mc_filter(const struct ip6_moptions *imo, const struct ifnet *ifp, ims = im6o_match_source(imo, gidx, src); if ((ims == NULL && mode == MCAST_INCLUDE) || - (ims != NULL && ims->im6sl_st[0] != mode)) - return (MCAST_NOTSMEMBER); + (ims != NULL && ims->im6sl_st[0] != mode)) { + return MCAST_NOTSMEMBER; + } - return (MCAST_PASS); + return MCAST_PASS; } /* @@ -454,10 +465,10 @@ static int in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, struct in6_multi **pinm) { - struct sockaddr_in6 gsin6; - struct ifmultiaddr *ifma; - struct in6_multi *inm; - int error; + struct sockaddr_in6 gsin6; + struct ifmultiaddr *ifma; + struct in6_multi *inm; + int error; *pinm = NULL; @@ -475,7 +486,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, * We already joined this group; return the in6m * with a refcount held (via lookup) for caller. */ - return (0); + return 0; } in6_multihead_lock_done(); @@ -489,8 +500,9 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, * with this network-layer group on the given ifnet. */ error = if_addmulti(ifp, (struct sockaddr *)&gsin6, &ifma); - if (error != 0) - return (error); + if (error != 0) { + return error; + } /* * See comments in in6m_remref() for access to ifma_protospec. @@ -500,7 +512,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, if ((inm = ifma->ifma_protospec) != NULL) { VERIFY(ifma->ifma_addr != NULL); VERIFY(ifma->ifma_addr->sa_family == AF_INET6); - IN6M_ADDREF(inm); /* for caller */ + IN6M_ADDREF(inm); /* for caller */ IFMA_UNLOCK(ifma); IN6M_LOCK(inm); VERIFY(inm->in6m_ifma == ifma); @@ -520,7 +532,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, * been joined; return the inm with a refcount * held for caller. */ - return (0); + return 0; } /* * We lost the race with another thread doing in6_delmulti(); @@ -535,7 +547,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, IN6M_UNLOCK(inm); in6_multihead_lock_done(); IFMA_REMREF(ifma); - return (0); + return 0; } IFMA_UNLOCK(ifma); @@ -551,7 +563,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, if (inm == NULL) { in6_multihead_lock_done(); IFMA_REMREF(ifma); - return (ENOMEM); + return ENOMEM; } IN6M_LOCK(inm); inm->in6m_addr = *group; @@ -559,7 +571,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, inm->in6m_mli = MLD_IFINFO(ifp); VERIFY(inm->in6m_mli != NULL); MLI_ADDREF(inm->in6m_mli); - inm->in6m_ifma = ifma; /* keep refcount from if_addmulti() */ + inm->in6m_ifma = ifma; /* keep refcount from if_addmulti() */ inm->in6m_state = MLD_NOT_MEMBER; /* * Pending state-changes per group are subject to a bounds check. @@ -572,7 +584,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, in6_multi_attach(inm); VERIFY((inm->in6m_debug & (IFD_ATTACHED | IFD_TRASHED)) == IFD_ATTACHED); - IN6M_ADDREF_LOCKED(inm); /* for caller */ + IN6M_ADDREF_LOCKED(inm); /* for caller */ IN6M_UNLOCK(inm); IFMA_LOCK(ifma); @@ -581,7 +593,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, IFMA_UNLOCK(ifma); in6_multihead_lock_done(); - return (0); + return 0; } /* @@ -592,7 +604,7 @@ in6_mc_get(struct ifnet *ifp, const struct in6_addr *group, void in6m_clear_recorded(struct in6_multi *inm) { - struct ip6_msource *ims; + struct ip6_msource *ims; IN6M_LOCK_ASSERT_HELD(inm); @@ -630,21 +642,24 @@ in6m_clear_recorded(struct in6_multi *inm) int in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) { - struct ip6_msource find; - struct ip6_msource *ims, *nims; + struct ip6_msource find; + struct ip6_msource *ims, *nims; IN6M_LOCK_ASSERT_HELD(inm); find.im6s_addr = *addr; ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); - if (ims && ims->im6s_stp) - return (0); + if (ims && ims->im6s_stp) { + return 0; + } if (ims == NULL) { - if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) - return (-ENOSPC); + if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) { + return -ENOSPC; + } nims = ip6ms_alloc(M_WAITOK); - if (nims == NULL) - return (-ENOMEM); + if (nims == NULL) { + return -ENOMEM; + } nims->im6s_addr = find.im6s_addr; RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); ++inm->in6m_nsrc; @@ -658,7 +673,7 @@ in6m_record_source(struct in6_multi *inm, const struct in6_addr *addr) ++ims->im6s_stp; ++inm->in6m_st[1].iss_rec; - return (1); + return 1; } /* @@ -676,10 +691,10 @@ static int im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, struct in6_msource **plims) { - struct ip6_msource find; - struct ip6_msource *ims; - struct in6_msource *lims; - int error; + struct ip6_msource find; + struct ip6_msource *ims; + struct in6_msource *lims; + int error; error = 0; ims = NULL; @@ -689,11 +704,13 @@ im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); lims = (struct in6_msource *)ims; if (lims == NULL) { - if (imf->im6f_nsrc == in6_mcast_maxsocksrc) - return (ENOSPC); + if (imf->im6f_nsrc == in6_mcast_maxsocksrc) { + return ENOSPC; + } lims = in6ms_alloc(M_WAITOK); - if (lims == NULL) - return (ENOMEM); + if (lims == NULL) { + return ENOMEM; + } lims->im6s_addr = find.im6s_addr; lims->im6sl_st[0] = MCAST_UNDEFINED; RB_INSERT(ip6_msource_tree, &imf->im6f_sources, @@ -703,7 +720,7 @@ im6f_get_source(struct in6_mfilter *imf, const struct sockaddr_in6 *psin, *plims = lims; - return (error); + return error; } /* @@ -720,11 +737,12 @@ static struct in6_msource * im6f_graft(struct in6_mfilter *imf, const uint8_t st1, const struct sockaddr_in6 *psin) { - struct in6_msource *lims; + struct in6_msource *lims; lims = in6ms_alloc(M_WAITOK); - if (lims == NULL) - return (NULL); + if (lims == NULL) { + return NULL; + } lims->im6s_addr = psin->sin6_addr; lims->im6sl_st[0] = MCAST_UNDEFINED; lims->im6sl_st[1] = st1; @@ -732,7 +750,7 @@ im6f_graft(struct in6_mfilter *imf, const uint8_t st1, (struct ip6_msource *)lims); ++imf->im6f_nsrc; - return (lims); + return lims; } /* @@ -748,17 +766,18 @@ im6f_graft(struct in6_mfilter *imf, const uint8_t st1, static int im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) { - struct ip6_msource find; - struct ip6_msource *ims; - struct in6_msource *lims; + struct ip6_msource find; + struct ip6_msource *ims; + struct in6_msource *lims; find.im6s_addr = psin->sin6_addr; ims = RB_FIND(ip6_msource_tree, &imf->im6f_sources, &find); - if (ims == NULL) - return (ENOENT); + if (ims == NULL) { + return ENOENT; + } lims = (struct in6_msource *)ims; lims->im6sl_st[1] = MCAST_UNDEFINED; - return (0); + return 0; } /* @@ -769,8 +788,8 @@ im6f_prune(struct in6_mfilter *imf, const struct sockaddr_in6 *psin) static void im6f_rollback(struct in6_mfilter *imf) { - struct ip6_msource *ims, *tims; - struct in6_msource *lims; + struct ip6_msource *ims, *tims; + struct in6_msource *lims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { lims = (struct in6_msource *)ims; @@ -800,8 +819,8 @@ im6f_rollback(struct in6_mfilter *imf) void im6f_leave(struct in6_mfilter *imf) { - struct ip6_msource *ims; - struct in6_msource *lims; + struct ip6_msource *ims; + struct in6_msource *lims; RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; @@ -818,8 +837,8 @@ im6f_leave(struct in6_mfilter *imf) static void im6f_commit(struct in6_mfilter *imf) { - struct ip6_msource *ims; - struct in6_msource *lims; + struct ip6_msource *ims; + struct in6_msource *lims; RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; @@ -836,8 +855,8 @@ im6f_commit(struct in6_mfilter *imf) static void im6f_reap(struct in6_mfilter *imf) { - struct ip6_msource *ims, *tims; - struct in6_msource *lims; + struct ip6_msource *ims, *tims; + struct in6_msource *lims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { lims = (struct in6_msource *)ims; @@ -860,8 +879,8 @@ im6f_reap(struct in6_mfilter *imf) void im6f_purge(struct in6_mfilter *imf) { - struct ip6_msource *ims, *tims; - struct in6_msource *lims; + struct ip6_msource *ims, *tims; + struct in6_msource *lims; RB_FOREACH_SAFE(ims, ip6_msource_tree, &imf->im6f_sources, tims) { lims = (struct in6_msource *)ims; @@ -889,19 +908,21 @@ static int in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, const int noalloc, struct ip6_msource **pims) { - struct ip6_msource find; - struct ip6_msource *ims, *nims; + struct ip6_msource find; + struct ip6_msource *ims, *nims; IN6M_LOCK_ASSERT_HELD(inm); find.im6s_addr = *addr; ims = RB_FIND(ip6_msource_tree, &inm->in6m_srcs, &find); if (ims == NULL && !noalloc) { - if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) - return (ENOSPC); + if (inm->in6m_nsrc == in6_mcast_maxgrpsrc) { + return ENOSPC; + } nims = ip6ms_alloc(M_WAITOK); - if (nims == NULL) - return (ENOMEM); + if (nims == NULL) { + return ENOMEM; + } nims->im6s_addr = *addr; RB_INSERT(ip6_msource_tree, &inm->in6m_srcs, nims); ++inm->in6m_nsrc; @@ -911,7 +932,7 @@ in6m_get_source(struct in6_multi *inm, const struct in6_addr *addr, } *pims = ims; - return (0); + return 0; } /* @@ -930,11 +951,12 @@ im6s_get_mode(const struct in6_multi *inm, const struct ip6_msource *ims, t = !!t; if (inm->in6m_st[t].iss_ex > 0 && - inm->in6m_st[t].iss_ex == ims->im6s_st[t].ex) - return (MCAST_EXCLUDE); - else if (ims->im6s_st[t].in > 0 && ims->im6s_st[t].ex == 0) - return (MCAST_INCLUDE); - return (MCAST_UNDEFINED); + inm->in6m_st[t].iss_ex == ims->im6s_st[t].ex) { + return MCAST_EXCLUDE; + } else if (ims->im6s_st[t].in > 0 && ims->im6s_st[t].ex == 0) { + return MCAST_INCLUDE; + } + return MCAST_UNDEFINED; } /* @@ -985,10 +1007,10 @@ im6s_merge(struct ip6_msource *ims, const struct in6_msource *lims, static int in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) { - struct ip6_msource *ims, *nims = NULL; - struct in6_msource *lims; - int schanged, error; - int nsrc0, nsrc1; + struct ip6_msource *ims, *nims = NULL; + struct in6_msource *lims; + int schanged, error; + int nsrc0, nsrc1; IN6M_LOCK_ASSERT_HELD(inm); @@ -1005,13 +1027,20 @@ in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) */ RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; - if (lims->im6sl_st[0] == imf->im6f_st[0]) nsrc0++; - if (lims->im6sl_st[1] == imf->im6f_st[1]) nsrc1++; - if (lims->im6sl_st[0] == lims->im6sl_st[1]) continue; + if (lims->im6sl_st[0] == imf->im6f_st[0]) { + nsrc0++; + } + if (lims->im6sl_st[1] == imf->im6f_st[1]) { + nsrc1++; + } + if (lims->im6sl_st[0] == lims->im6sl_st[1]) { + continue; + } error = in6m_get_source(inm, &lims->im6s_addr, 0, &nims); ++schanged; - if (error) + if (error) { break; + } im6s_merge(nims, lims, 0); } if (error) { @@ -1019,11 +1048,13 @@ in6m_merge(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) RB_FOREACH_REVERSE_FROM(ims, ip6_msource_tree, nims) { lims = (struct in6_msource *)ims; - if (lims->im6sl_st[0] == lims->im6sl_st[1]) + if (lims->im6sl_st[0] == lims->im6sl_st[1]) { continue; + } (void) in6m_get_source(inm, &lims->im6s_addr, 1, &bims); - if (bims == NULL) + if (bims == NULL) { continue; + } im6s_merge(bims, lims, 1); } goto out_reap; @@ -1107,7 +1138,7 @@ out_reap: MLD_PRINTF(("%s: sources changed; reaping\n", __func__)); in6m_reap(inm); } - return (error); + return error; } /* @@ -1117,7 +1148,7 @@ out_reap: void in6m_commit(struct in6_multi *inm) { - struct ip6_msource *ims; + struct ip6_msource *ims; IN6M_LOCK_ASSERT_HELD(inm); @@ -1138,15 +1169,16 @@ in6m_commit(struct in6_multi *inm) static void in6m_reap(struct in6_multi *inm) { - struct ip6_msource *ims, *tims; + struct ip6_msource *ims, *tims; IN6M_LOCK_ASSERT_HELD(inm); RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, tims) { if (ims->im6s_st[0].ex > 0 || ims->im6s_st[0].in > 0 || ims->im6s_st[1].ex > 0 || ims->im6s_st[1].in > 0 || - ims->im6s_stp != 0) + ims->im6s_stp != 0) { continue; + } MLD_PRINTF(("%s: free ims 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ims))); RB_REMOVE(ip6_msource_tree, &inm->in6m_srcs, ims); @@ -1161,7 +1193,7 @@ in6m_reap(struct in6_multi *inm) void in6m_purge(struct in6_multi *inm) { - struct ip6_msource *ims, *tims; + struct ip6_msource *ims, *tims; IN6M_LOCK_ASSERT_HELD(inm); @@ -1191,17 +1223,17 @@ in6_joingroup(struct ifnet *ifp, struct in6_addr *mcaddr, imm = in6_multi_mship_alloc(M_WAITOK); if (imm == NULL) { *errorp = ENOBUFS; - return (NULL); + return NULL; } error = in6_mc_join(ifp, mcaddr, NULL, &imm->i6mm_maddr, delay); if (error) { *errorp = error; in6_multi_mship_free(imm); - return (NULL); + return NULL; } - return (imm); + return imm; } /* @@ -1234,10 +1266,10 @@ in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr, /*const*/ struct in6_mfilter *imf, struct in6_multi **pinm, const int delay) { - struct in6_mfilter timf; - struct in6_multi *inm = NULL; - int error = 0; - struct mld_tparams mtp; + struct in6_mfilter timf; + struct in6_multi *inm = NULL; + int error = 0; + struct mld_tparams mtp; /* * Sanity: Check scope zone ID was set for ifp, if and @@ -1253,7 +1285,7 @@ in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr, ip6_sprintf(mcaddr), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - bzero(&mtp, sizeof (mtp)); + bzero(&mtp, sizeof(mtp)); *pinm = NULL; /* @@ -1268,7 +1300,7 @@ in6_mc_join(struct ifnet *ifp, const struct in6_addr *mcaddr, error = in6_mc_get(ifp, mcaddr, &inm); if (error) { MLD_PRINTF(("%s: in6_mc_get() failure\n", __func__)); - return (error); + return error; } MLD_PRINTF(("%s: merge inm state\n", __func__)); @@ -1296,13 +1328,13 @@ out_in6m_release: IN6M_REMREF(inm); } else { IN6M_UNLOCK(inm); - *pinm = inm; /* keep refcount from in6_mc_get() */ + *pinm = inm; /* keep refcount from in6_mc_get() */ } /* schedule timer now that we've dropped the lock(s) */ mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -1321,11 +1353,11 @@ out_in6m_release: int in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) { - struct in6_mfilter timf; - int error, lastref; - struct mld_tparams mtp; + struct in6_mfilter timf; + int error, lastref; + struct mld_tparams mtp; - bzero(&mtp, sizeof (mtp)); + bzero(&mtp, sizeof(mtp)); error = 0; IN6M_LOCK_ASSERT_NOTHELD(inm); @@ -1362,8 +1394,9 @@ in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) MLD_PRINTF(("%s: doing mld downcall\n", __func__)); error = mld_change_state(inm, &mtp, 0); #if MLD_DEBUG - if (error) + if (error) { MLD_PRINTF(("%s: failed mld downcall\n", __func__)); + } #endif lastref = in6_multi_detach(inm); VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) && @@ -1371,13 +1404,13 @@ in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) IN6M_UNLOCK(inm); in6_multihead_lock_done(); - if (lastref) - IN6M_REMREF(inm); /* for in6_multihead list */ - + if (lastref) { + IN6M_REMREF(inm); /* for in6_multihead list */ + } /* schedule timer now that we've dropped the lock(s) */ mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -1392,19 +1425,19 @@ in6_mc_leave(struct in6_multi *inm, /*const*/ struct in6_mfilter *imf) static int in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) { - struct group_source_req gsr; - struct sockaddr_in6 *gsa, *ssa; - struct ifnet *ifp; - struct in6_mfilter *imf; - struct ip6_moptions *imo; - struct in6_msource *ims; - struct in6_multi *inm; - size_t idx; - uint16_t fmode; - int error, doblock; - struct mld_tparams mtp; - - bzero(&mtp, sizeof (mtp)); + struct group_source_req gsr; + struct sockaddr_in6 *gsa, *ssa; + struct ifnet *ifp; + struct in6_mfilter *imf; + struct ip6_moptions *imo; + struct in6_msource *ims; + struct in6_multi *inm; + size_t idx; + uint16_t fmode; + int error, doblock; + struct mld_tparams mtp; + + bzero(&mtp, sizeof(mtp)); ifp = NULL; error = 0; doblock = 0; @@ -1419,42 +1452,48 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) error = sooptcopyin(sopt, &gsr, sizeof(struct group_source_req), sizeof(struct group_source_req)); - if (error) - return (error); + if (error) { + return error; + } if (gsa->sin6_family != AF_INET6 || - gsa->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); + gsa->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } if (ssa->sin6_family != AF_INET6 || - ssa->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); + ssa->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } ifnet_head_lock_shared(); if (gsr.gsr_interface == 0 || (u_int)if_index < gsr.gsr_interface) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[gsr.gsr_interface]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } - if (sopt->sopt_name == MCAST_BLOCK_SOURCE) + if (sopt->sopt_name == MCAST_BLOCK_SOURCE) { doblock = 1; + } break; default: MLD_PRINTF(("%s: unknown sopt_name %d\n", __func__, sopt->sopt_name)); - return (EOPNOTSUPP); + return EOPNOTSUPP; } - if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) - return (EINVAL); + if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) { + return EINVAL; + } (void) in6_setscope(&gsa->sin6_addr, ifp, NULL); @@ -1462,8 +1501,9 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) * Check if we are actually a member of this group. */ imo = in6p_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IM6O_LOCK(imo); idx = im6o_match_group(imo, ifp, gsa); @@ -1507,8 +1547,9 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) if (doblock) { MLD_PRINTF(("%s: %s source\n", __func__, "block")); ims = im6f_graft(imf, fmode, ssa); - if (ims == NULL) + if (ims == NULL) { error = ENOMEM; + } } else { MLD_PRINTF(("%s: %s source\n", __func__, "allow")); error = im6f_prune(imf, ssa); @@ -1535,26 +1576,28 @@ in6p_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) error = mld_change_state(inm, &mtp, 0); IN6M_UNLOCK(inm); #if MLD_DEBUG - if (error) + if (error) { MLD_PRINTF(("%s: failed mld downcall\n", __func__)); + } #endif out_im6f_rollback: - if (error) + if (error) { im6f_rollback(imf); - else + } else { im6f_commit(imf); + } im6f_reap(imf); out_imo_locked: IM6O_UNLOCK(imo); - IM6O_REMREF(imo); /* from in6p_findmoptions() */ + IM6O_REMREF(imo); /* from in6p_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -1565,33 +1608,34 @@ out_imo_locked: static struct ip6_moptions * in6p_findmoptions(struct inpcb *inp) { - struct ip6_moptions *imo; - struct in6_multi **immp; - struct in6_mfilter *imfp; - size_t idx; + struct ip6_moptions *imo; + struct in6_multi **immp; + struct in6_mfilter *imfp; + size_t idx; if ((imo = inp->in6p_moptions) != NULL) { - IM6O_ADDREF(imo); /* for caller */ - return (imo); + IM6O_ADDREF(imo); /* for caller */ + return imo; } imo = ip6_allocmoptions(M_WAITOK); - if (imo == NULL) - return (NULL); + if (imo == NULL) { + return NULL; + } - immp = _MALLOC(sizeof (*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS, + immp = _MALLOC(sizeof(*immp) * IPV6_MIN_MEMBERSHIPS, M_IP6MOPTS, M_WAITOK | M_ZERO); if (immp == NULL) { IM6O_REMREF(imo); - return (NULL); + return NULL; } - imfp = _MALLOC(sizeof (struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS, + imfp = _MALLOC(sizeof(struct in6_mfilter) * IPV6_MIN_MEMBERSHIPS, M_IN6MFILTER, M_WAITOK | M_ZERO); if (imfp == NULL) { _FREE(immp, M_IP6MOPTS); IM6O_REMREF(imo); - return (NULL); + return NULL; } imo->im6o_multicast_ifp = NULL; @@ -1602,14 +1646,15 @@ in6p_findmoptions(struct inpcb *inp) imo->im6o_membership = immp; /* Initialize per-group source filters. */ - for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++) + for (idx = 0; idx < IPV6_MIN_MEMBERSHIPS; idx++) { im6f_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); + } imo->im6o_mfilters = imfp; inp->in6p_moptions = imo; /* keep reference from ip6_allocmoptions() */ - IM6O_ADDREF(imo); /* for caller */ + IM6O_ADDREF(imo); /* for caller */ - return (imo); + return imo; } /* @@ -1619,20 +1664,20 @@ in6p_findmoptions(struct inpcb *inp) static int in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) { - struct __msfilterreq64 msfr = {}, msfr64; - struct __msfilterreq32 msfr32; - struct sockaddr_in6 *gsa; - struct ifnet *ifp; - struct ip6_moptions *imo; - struct in6_mfilter *imf; - struct ip6_msource *ims; - struct in6_msource *lims; - struct sockaddr_in6 *psin; - struct sockaddr_storage *ptss; - struct sockaddr_storage *tss; - int error; - size_t idx, nsrcs, ncsrcs; - user_addr_t tmp_ptr; + struct __msfilterreq64 msfr = {}, msfr64; + struct __msfilterreq32 msfr32; + struct sockaddr_in6 *gsa; + struct ifnet *ifp; + struct ip6_moptions *imo; + struct in6_mfilter *imf; + struct ip6_msource *ims; + struct in6_msource *lims; + struct sockaddr_in6 *psin; + struct sockaddr_storage *ptss; + struct sockaddr_storage *tss; + int error; + size_t idx, nsrcs, ncsrcs; + user_addr_t tmp_ptr; imo = inp->in6p_moptions; VERIFY(imo != NULL); @@ -1641,45 +1686,52 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) error = sooptcopyin(sopt, &msfr64, sizeof(struct __msfilterreq64), sizeof(struct __msfilterreq64)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr64, sizeof(msfr64)); } else { error = sooptcopyin(sopt, &msfr32, sizeof(struct __msfilterreq32), sizeof(struct __msfilterreq32)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr32, sizeof(msfr32)); } if (msfr.msfr_group.ss_family != AF_INET6 || - msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) - return (EINVAL); + msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } gsa = (struct sockaddr_in6 *)&msfr.msfr_group; - if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) - return (EINVAL); + if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) { + return EINVAL; + } ifnet_head_lock_shared(); if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[msfr.msfr_ifindex]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } if ((size_t) msfr.msfr_nsrcs > - UINT32_MAX / sizeof(struct sockaddr_storage)) + UINT32_MAX / sizeof(struct sockaddr_storage)) { msfr.msfr_nsrcs = UINT32_MAX / sizeof(struct sockaddr_storage); + } - if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) + if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) { msfr.msfr_nsrcs = in6_mcast_maxsocksrc; + } (void)in6_setscope(&gsa->sin6_addr, ifp, NULL); @@ -1690,7 +1742,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) idx = im6o_match_group(imo, ifp, gsa); if (idx == (size_t)-1 || imo->im6o_mfilters == NULL) { IM6O_UNLOCK(imo); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } imf = &imo->im6o_mfilters[idx]; @@ -1699,7 +1751,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) */ if (imf->im6f_st[1] == MCAST_UNDEFINED) { IM6O_UNLOCK(imo); - return (EAGAIN); + return EAGAIN; } msfr.msfr_fmode = imf->im6f_st[1]; @@ -1712,17 +1764,18 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) */ tss = NULL; - if (IS_64BIT_PROCESS(current_proc())) + if (IS_64BIT_PROCESS(current_proc())) { tmp_ptr = msfr64.msfr_srcs; - else + } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); + } if (tmp_ptr != USER_ADDR_NULL && msfr.msfr_nsrcs > 0) { tss = _MALLOC((size_t) msfr.msfr_nsrcs * sizeof(*tss), M_TEMP, M_WAITOK | M_ZERO); if (tss == NULL) { IM6O_UNLOCK(imo); - return (ENOBUFS); + return ENOBUFS; } } @@ -1736,8 +1789,9 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) RB_FOREACH(ims, ip6_msource_tree, &imf->im6f_sources) { lims = (struct in6_msource *)ims; if (lims->im6sl_st[0] == MCAST_UNDEFINED || - lims->im6sl_st[0] != imf->im6f_st[0]) + lims->im6sl_st[0] != imf->im6f_st[0]) { continue; + } if (tss != NULL && nsrcs > 0) { psin = (struct sockaddr_in6 *)ptss; psin->sin6_family = AF_INET6; @@ -1755,8 +1809,9 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) if (tss != NULL) { error = copyout(tss, tmp_ptr, ncsrcs * sizeof(*tss)); FREE(tss, M_TEMP); - if (error) - return (error); + if (error) { + return error; + } } msfr.msfr_nsrcs = ncsrcs; @@ -1778,7 +1833,7 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) sizeof(struct __msfilterreq32)); } - return (error); + return error; } /* @@ -1787,9 +1842,9 @@ in6p_get_source_filters(struct inpcb *inp, struct sockopt *sopt) int ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) { - struct ip6_moptions *im6o; - int error; - u_int optval; + struct ip6_moptions *im6o; + int error; + u_int optval; im6o = inp->in6p_moptions; /* @@ -1799,21 +1854,23 @@ ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) if (SOCK_PROTO(inp->inp_socket) == IPPROTO_DIVERT || (SOCK_TYPE(inp->inp_socket) != SOCK_RAW && SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } error = 0; switch (sopt->sopt_name) { case IPV6_MULTICAST_IF: - if (im6o != NULL) + if (im6o != NULL) { IM6O_LOCK(im6o); + } if (im6o == NULL || im6o->im6o_multicast_ifp == NULL) { optval = 0; } else { optval = im6o->im6o_multicast_ifp->if_index; } - if (im6o != NULL) + if (im6o != NULL) { IM6O_UNLOCK(im6o); + } error = sooptcopyout(sopt, &optval, sizeof(u_int)); break; @@ -1852,7 +1909,7 @@ ip6_getmoptions(struct inpcb *inp, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -1874,17 +1931,19 @@ static struct ifnet * in6p_lookup_mcast_ifp(const struct inpcb *in6p, const struct sockaddr_in6 *gsin6) { - struct route_in6 ro6; - struct ifnet *ifp; - unsigned int ifscope = IFSCOPE_NONE; + struct route_in6 ro6; + struct ifnet *ifp; + unsigned int ifscope = IFSCOPE_NONE; VERIFY(in6p == NULL || (in6p->inp_vflag & INP_IPV6)); VERIFY(gsin6->sin6_family == AF_INET6); - if (IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr) == 0) + if (IN6_IS_ADDR_MULTICAST(&gsin6->sin6_addr) == 0) { return NULL; + } - if (in6p != NULL && (in6p->inp_flags & INP_BOUND_IF)) + if (in6p != NULL && (in6p->inp_flags & INP_BOUND_IF)) { ifscope = in6p->inp_boundifp->if_index; + } ifp = NULL; memset(&ro6, 0, sizeof(struct route_in6)); @@ -1896,7 +1955,7 @@ in6p_lookup_mcast_ifp(const struct inpcb *in6p, } ROUTE_RELEASE(&ro6); - return (ifp); + return ifp; } /* @@ -1916,20 +1975,23 @@ in6p_lookup_v4addr(struct ipv6_mreq *mreq, struct ip_mreq *v4mreq) ifnet_head_lock_shared(); if (mreq->ipv6mr_interface > (unsigned int)if_index) { ifnet_head_done(); - return (EADDRNOTAVAIL); - } else + return EADDRNOTAVAIL; + } else { ifp = ifindex2ifnet[mreq->ipv6mr_interface]; + } ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } ifa = ifa_ifpgetprimary(ifp, AF_INET); - if (ifa == NULL) - return (EADDRNOTAVAIL); + if (ifa == NULL) { + return EADDRNOTAVAIL; + } sin = (struct sockaddr_in *)(uintptr_t)(size_t)ifa->ifa_addr; v4mreq->imr_interface.s_addr = sin->sin_addr.s_addr; IFA_REMREF(ifa); - return (0); + return 0; } /* @@ -1941,19 +2003,19 @@ in6p_lookup_v4addr(struct ipv6_mreq *mreq, struct ip_mreq *v4mreq) static int in6p_join_group(struct inpcb *inp, struct sockopt *sopt) { - struct group_source_req gsr; - struct sockaddr_in6 *gsa, *ssa; - struct ifnet *ifp; - struct in6_mfilter *imf; - struct ip6_moptions *imo; - struct in6_multi *inm = NULL; - struct in6_msource *lims = NULL; - size_t idx; - int error, is_new; - uint32_t scopeid = 0; - struct mld_tparams mtp; - - bzero(&mtp, sizeof (mtp)); + struct group_source_req gsr; + struct sockaddr_in6 *gsa, *ssa; + struct ifnet *ifp; + struct in6_mfilter *imf; + struct ip6_moptions *imo; + struct in6_multi *inm = NULL; + struct in6_msource *lims = NULL; + size_t idx; + int error, is_new; + uint32_t scopeid = 0; + struct mld_tparams mtp; + + bzero(&mtp, sizeof(mtp)); ifp = NULL; imf = NULL; error = 0; @@ -1975,36 +2037,39 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), sizeof(struct ipv6_mreq)); - if (error) - return (error); + if (error) { + return error; + } if (IN6_IS_ADDR_V4MAPPED(&mreq.ipv6mr_multiaddr)) { struct ip_mreq v4mreq; struct sockopt v4sopt; v4mreq.imr_multiaddr.s_addr = mreq.ipv6mr_multiaddr.s6_addr32[3]; - if (mreq.ipv6mr_interface == 0) + if (mreq.ipv6mr_interface == 0) { v4mreq.imr_interface.s_addr = INADDR_ANY; - else + } else { error = in6p_lookup_v4addr(&mreq, &v4mreq); - if (error) - return (error); + } + if (error) { + return error; + } v4sopt.sopt_dir = SOPT_SET; - v4sopt.sopt_level = sopt->sopt_level; + v4sopt.sopt_level = sopt->sopt_level; v4sopt.sopt_name = IP_ADD_MEMBERSHIP; v4sopt.sopt_val = CAST_USER_ADDR_T(&v4mreq); v4sopt.sopt_valsize = sizeof(v4mreq); v4sopt.sopt_p = kernproc; - return (inp_join_group(inp, &v4sopt)); + return inp_join_group(inp, &v4sopt); } gsa->sin6_family = AF_INET6; gsa->sin6_len = sizeof(struct sockaddr_in6); gsa->sin6_addr = mreq.ipv6mr_multiaddr; - /* Only allow IPv6 multicast addresses */ + /* Only allow IPv6 multicast addresses */ if (IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr) == 0) { - return (EINVAL); + return EINVAL; } if (mreq.ipv6mr_interface == 0) { @@ -2013,8 +2078,8 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) ifnet_head_lock_shared(); if ((u_int)if_index < mreq.ipv6mr_interface) { ifnet_head_done(); - return (EADDRNOTAVAIL); - } + return EADDRNOTAVAIL; + } ifp = ifindex2ifnet[mreq.ipv6mr_interface]; ifnet_head_done(); } @@ -2035,19 +2100,23 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) sizeof(struct group_source_req), sizeof(struct group_source_req)); } - if (error) - return (error); + if (error) { + return error; + } if (gsa->sin6_family != AF_INET6 || - gsa->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); + gsa->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { if (ssa->sin6_family != AF_INET6 || - ssa->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); - if (IN6_IS_ADDR_MULTICAST(&ssa->sin6_addr)) - return (EINVAL); + ssa->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } + if (IN6_IS_ADDR_MULTICAST(&ssa->sin6_addr)) { + return EINVAL; + } /* * TODO: Validate embedded scope ID in source * list entry against passed-in ifp, if and only @@ -2062,7 +2131,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) if (gsr.gsr_interface == 0 || (u_int)if_index < gsr.gsr_interface) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[gsr.gsr_interface]; ifnet_head_done(); @@ -2071,14 +2140,16 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) default: MLD_PRINTF(("%s: unknown sopt_name %d\n", __func__, sopt->sopt_name)); - return (EOPNOTSUPP); + return EOPNOTSUPP; } - if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) - return (EINVAL); + if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) { + return EINVAL; + } - if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) - return (EADDRNOTAVAIL); + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + return EADDRNOTAVAIL; + } INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_mcast_join_total); /* @@ -2103,12 +2174,14 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) */ if ((IN6_IS_ADDR_MC_LINKLOCAL(&gsa->sin6_addr) || IN6_IS_ADDR_MC_INTFACELOCAL(&gsa->sin6_addr)) && - (scopeid == 0 || gsa->sin6_addr.s6_addr16[1] == 0)) - return (EINVAL); + (scopeid == 0 || gsa->sin6_addr.s6_addr16[1] == 0)) { + return EINVAL; + } imo = in6p_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IM6O_LOCK(imo); idx = im6o_match_group(imo, ifp, gsa); @@ -2145,7 +2218,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) */ lims = im6o_match_source(imo, idx, ssa); if (lims != NULL /*&& - lims->im6sl_st[1] == MCAST_INCLUDE*/) { + * lims->im6sl_st[1] == MCAST_INCLUDE*/) { error = EADDRNOTAVAIL; goto out_imo_locked; } @@ -2166,8 +2239,9 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) */ error = EINVAL; /* See comments above for EADDRINUSE */ - if (imf->im6f_st[1] == MCAST_EXCLUDE) + if (imf->im6f_st[1] == MCAST_EXCLUDE) { error = EADDRINUSE; + } goto out_imo_locked; } } @@ -2179,8 +2253,9 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) if (is_new) { if (imo->im6o_num_memberships == imo->im6o_max_memberships) { error = im6o_grow(imo, 0); - if (error) + if (error) { goto out_imo_locked; + } } /* * Allocate the new slot upfront so we can deal with @@ -2210,7 +2285,7 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) /* Membership starts in IN mode */ if (is_new) { MLD_PRINTF(("%s: new join w/source\n", __func__); - im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE)); + im6f_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE)); } else { MLD_PRINTF(("%s: %s source\n", __func__, "allow")); } @@ -2249,8 +2324,9 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) IM6O_REMREF(imo); IM6O_LOCK(imo); - if (error) + if (error) { goto out_im6o_free; + } imo->im6o_membership[idx] = inm; /* from in6_mc_join() */ } else { MLD_PRINTF(("%s: merge inm state\n", __func__)); @@ -2275,10 +2351,11 @@ in6p_join_group(struct inpcb *inp, struct sockopt *sopt) out_im6f_rollback: if (error) { im6f_rollback(imf); - if (is_new) + if (is_new) { im6f_purge(imf); - else + } else { im6f_reap(imf); + } } else { im6f_commit(imf); } @@ -2292,12 +2369,12 @@ out_im6o_free: out_imo_locked: IM6O_UNLOCK(imo); - IM6O_REMREF(imo); /* from in6p_findmoptions() */ + IM6O_REMREF(imo); /* from in6p_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -2306,20 +2383,20 @@ out_imo_locked: static int in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) { - struct ipv6_mreq mreq; - struct group_source_req gsr; - struct sockaddr_in6 *gsa, *ssa; - struct ifnet *ifp; - struct in6_mfilter *imf; - struct ip6_moptions *imo; - struct in6_msource *ims; - struct in6_multi *inm = NULL; - uint32_t ifindex = 0; - size_t idx; - int error, is_final; - struct mld_tparams mtp; - - bzero(&mtp, sizeof (mtp)); + struct ipv6_mreq mreq; + struct group_source_req gsr; + struct sockaddr_in6 *gsa, *ssa; + struct ifnet *ifp; + struct in6_mfilter *imf; + struct ip6_moptions *imo; + struct in6_msource *ims; + struct in6_multi *inm = NULL; + uint32_t ifindex = 0; + size_t idx; + int error, is_final; + struct mld_tparams mtp; + + bzero(&mtp, sizeof(mtp)); ifp = NULL; error = 0; is_final = 1; @@ -2337,31 +2414,33 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) */ switch (sopt->sopt_name) { case IPV6_LEAVE_GROUP: { - error = sooptcopyin(sopt, &mreq, sizeof(struct ipv6_mreq), sizeof(struct ipv6_mreq)); - if (error) - return (error); + if (error) { + return error; + } if (IN6_IS_ADDR_V4MAPPED(&mreq.ipv6mr_multiaddr)) { struct ip_mreq v4mreq; struct sockopt v4sopt; v4mreq.imr_multiaddr.s_addr = mreq.ipv6mr_multiaddr.s6_addr32[3]; - if (mreq.ipv6mr_interface == 0) + if (mreq.ipv6mr_interface == 0) { v4mreq.imr_interface.s_addr = INADDR_ANY; - else + } else { error = in6p_lookup_v4addr(&mreq, &v4mreq); - if (error) - return (error); + } + if (error) { + return error; + } v4sopt.sopt_dir = SOPT_SET; - v4sopt.sopt_level = sopt->sopt_level; + v4sopt.sopt_level = sopt->sopt_level; v4sopt.sopt_name = IP_DROP_MEMBERSHIP; v4sopt.sopt_val = CAST_USER_ADDR_T(&v4mreq); v4sopt.sopt_valsize = sizeof(v4mreq); v4sopt.sopt_p = kernproc; - return (inp_leave_group(inp, &v4sopt)); + return inp_leave_group(inp, &v4sopt); } gsa->sin6_family = AF_INET6; gsa->sin6_len = sizeof(struct sockaddr_in6); @@ -2369,9 +2448,9 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) gsa->sin6_port = 0; gsa->sin6_scope_id = 0; ifindex = mreq.ipv6mr_interface; - /* Only allow IPv6 multicast addresses */ + /* Only allow IPv6 multicast addresses */ if (IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr) == 0) { - return (EINVAL); + return EINVAL; } break; } @@ -2387,18 +2466,22 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) sizeof(struct group_source_req), sizeof(struct group_source_req)); } - if (error) - return (error); + if (error) { + return error; + } if (gsa->sin6_family != AF_INET6 || - gsa->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); + gsa->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { if (ssa->sin6_family != AF_INET6 || - ssa->sin6_len != sizeof(struct sockaddr_in6)) - return (EINVAL); - if (IN6_IS_ADDR_MULTICAST(&ssa->sin6_addr)) - return (EINVAL); + ssa->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } + if (IN6_IS_ADDR_MULTICAST(&ssa->sin6_addr)) { + return EINVAL; + } /* * TODO: Validate embedded scope ID in source * list entry against passed-in ifp, if and only @@ -2414,11 +2497,12 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) default: MLD_PRINTF(("%s: unknown sopt_name %d\n", __func__, sopt->sopt_name)); - return (EOPNOTSUPP); + return EOPNOTSUPP; } - if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) - return (EINVAL); + if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) { + return EINVAL; + } /* * Validate interface index if provided. If no interface index @@ -2431,17 +2515,19 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) ifnet_head_lock_shared(); if ((u_int)if_index < ifindex) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } (void) in6_setscope(&gsa->sin6_addr, ifp, NULL); } else { error = sa6_embedscope(gsa, ip6_use_defzone); - if (error) - return (EADDRNOTAVAIL); + if (error) { + return EADDRNOTAVAIL; + } /* * Some badly behaved applications don't pass an ifindex * or a scope ID, which is an API violation. In this case, @@ -2459,14 +2545,16 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) ip6_sprintf(&gsa->sin6_addr))); ifp = in6p_lookup_mcast_ifp(inp, gsa); } else { - if (!IF_INDEX_IN_RANGE(ifindex)) - return (EADDRNOTAVAIL); + if (!IF_INDEX_IN_RANGE(ifindex)) { + return EADDRNOTAVAIL; + } ifnet_head_lock_shared(); ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); } - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } } VERIFY(ifp != NULL); @@ -2477,8 +2565,9 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) * Find the membership in the membership array. */ imo = in6p_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IM6O_LOCK(imo); idx = im6o_match_group(imo, ifp, gsa); @@ -2489,8 +2578,9 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) inm = imo->im6o_membership[idx]; imf = &imo->im6o_mfilters[idx]; - if (ssa->sin6_family != AF_UNSPEC) + if (ssa->sin6_family != AF_UNSPEC) { is_final = 0; + } /* * Begin state merge transaction at socket layer. @@ -2555,10 +2645,11 @@ in6p_leave_group(struct inpcb *inp, struct sockopt *sopt) } out_im6f_rollback: - if (error) + if (error) { im6f_rollback(imf); - else + } else { im6f_commit(imf); + } im6f_reap(imf); @@ -2581,20 +2672,20 @@ out_im6f_rollback: IM6O_LOCK(imo); for (++idx; idx < imo->im6o_num_memberships; ++idx) { - imo->im6o_membership[idx-1] = imo->im6o_membership[idx]; - imo->im6o_mfilters[idx-1] = imo->im6o_mfilters[idx]; + imo->im6o_membership[idx - 1] = imo->im6o_membership[idx]; + imo->im6o_mfilters[idx - 1] = imo->im6o_mfilters[idx]; } imo->im6o_num_memberships--; } out_locked: IM6O_UNLOCK(imo); - IM6O_REMREF(imo); /* from in6p_findmoptions() */ + IM6O_REMREF(imo); /* from in6p_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -2608,39 +2699,43 @@ out_locked: static int in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) { - struct ifnet *ifp; - struct ip6_moptions *imo; - u_int ifindex; - int error; + struct ifnet *ifp; + struct ip6_moptions *imo; + u_int ifindex; + int error; - if (sopt->sopt_valsize != sizeof(u_int)) - return (EINVAL); + if (sopt->sopt_valsize != sizeof(u_int)) { + return EINVAL; + } error = sooptcopyin(sopt, &ifindex, sizeof(u_int), sizeof(u_int)); - if (error) - return (error); + if (error) { + return error; + } ifnet_head_lock_shared(); if ((u_int)if_index < ifindex) { ifnet_head_done(); - return (EINVAL); + return EINVAL; } ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); - if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) - return (EADDRNOTAVAIL); + if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) { + return EADDRNOTAVAIL; + } imo = in6p_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IM6O_LOCK(imo); imo->im6o_multicast_ifp = ifp; IM6O_UNLOCK(imo); - IM6O_REMREF(imo); /* from in6p_findmoptions() */ + IM6O_REMREF(imo); /* from in6p_findmoptions() */ - return (0); + return 0; } /* @@ -2650,68 +2745,76 @@ in6p_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) static int in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) { - struct __msfilterreq64 msfr = {}, msfr64; - struct __msfilterreq32 msfr32; - struct sockaddr_in6 *gsa; - struct ifnet *ifp; - struct in6_mfilter *imf; - struct ip6_moptions *imo; - struct in6_multi *inm; - size_t idx; - int error; - user_addr_t tmp_ptr; - struct mld_tparams mtp; - - bzero(&mtp, sizeof (mtp)); + struct __msfilterreq64 msfr = {}, msfr64; + struct __msfilterreq32 msfr32; + struct sockaddr_in6 *gsa; + struct ifnet *ifp; + struct in6_mfilter *imf; + struct ip6_moptions *imo; + struct in6_multi *inm; + size_t idx; + int error; + user_addr_t tmp_ptr; + struct mld_tparams mtp; + + bzero(&mtp, sizeof(mtp)); if (IS_64BIT_PROCESS(current_proc())) { error = sooptcopyin(sopt, &msfr64, sizeof(struct __msfilterreq64), sizeof(struct __msfilterreq64)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr64, sizeof(msfr64)); } else { error = sooptcopyin(sopt, &msfr32, sizeof(struct __msfilterreq32), sizeof(struct __msfilterreq32)); - if (error) - return (error); + if (error) { + return error; + } /* we never use msfr.msfr_srcs; */ memcpy(&msfr, &msfr32, sizeof(msfr32)); } if ((size_t) msfr.msfr_nsrcs > - UINT32_MAX / sizeof(struct sockaddr_storage)) + UINT32_MAX / sizeof(struct sockaddr_storage)) { msfr.msfr_nsrcs = UINT32_MAX / sizeof(struct sockaddr_storage); + } - if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) - return (ENOBUFS); + if (msfr.msfr_nsrcs > in6_mcast_maxsocksrc) { + return ENOBUFS; + } if (msfr.msfr_fmode != MCAST_EXCLUDE && - msfr.msfr_fmode != MCAST_INCLUDE) - return (EINVAL); + msfr.msfr_fmode != MCAST_INCLUDE) { + return EINVAL; + } if (msfr.msfr_group.ss_family != AF_INET6 || - msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) - return (EINVAL); + msfr.msfr_group.ss_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } gsa = (struct sockaddr_in6 *)&msfr.msfr_group; - if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) - return (EINVAL); + if (!IN6_IS_ADDR_MULTICAST(&gsa->sin6_addr)) { + return EINVAL; + } - gsa->sin6_port = 0; /* ignore port */ + gsa->sin6_port = 0; /* ignore port */ ifnet_head_lock_shared(); if (msfr.msfr_ifindex == 0 || (u_int)if_index < msfr.msfr_ifindex) { ifnet_head_done(); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } ifp = ifindex2ifnet[msfr.msfr_ifindex]; ifnet_head_done(); - if (ifp == NULL) - return (EADDRNOTAVAIL); + if (ifp == NULL) { + return EADDRNOTAVAIL; + } (void)in6_setscope(&gsa->sin6_addr, ifp, NULL); @@ -2720,8 +2823,9 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * Check if this socket is a member of this group. */ imo = in6p_findmoptions(inp); - if (imo == NULL) - return (ENOMEM); + if (imo == NULL) { + return ENOMEM; + } IM6O_LOCK(imo); idx = im6o_match_group(imo, ifp, gsa); @@ -2745,15 +2849,16 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) * allows us to deal with page faults up-front. */ if (msfr.msfr_nsrcs > 0) { - struct in6_msource *lims; - struct sockaddr_in6 *psin; - struct sockaddr_storage *kss, *pkss; - unsigned int i; + struct in6_msource *lims; + struct sockaddr_in6 *psin; + struct sockaddr_storage *kss, *pkss; + unsigned int i; - if (IS_64BIT_PROCESS(current_proc())) + if (IS_64BIT_PROCESS(current_proc())) { tmp_ptr = msfr64.msfr_srcs; - else + } else { tmp_ptr = CAST_USER_ADDR_T(msfr32.msfr_srcs); + } MLD_PRINTF(("%s: loading %lu source list entries\n", __func__, (unsigned long)msfr.msfr_nsrcs)); @@ -2811,15 +2916,17 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) */ in6_clearscope(&psin->sin6_addr); error = im6f_get_source(imf, psin, &lims); - if (error) + if (error) { break; + } lims->im6sl_st[1] = imf->im6f_st[1]; } FREE(kss, M_TEMP); } - if (error) + if (error) { goto out_im6f_rollback; + } /* * Begin state merge transaction at MLD layer. @@ -2837,26 +2944,28 @@ in6p_set_source_filters(struct inpcb *inp, struct sockopt *sopt) error = mld_change_state(inm, &mtp, 0); IN6M_UNLOCK(inm); #if MLD_DEBUG - if (error) + if (error) { MLD_PRINTF(("%s: failed mld downcall\n", __func__)); + } #endif out_im6f_rollback: - if (error) + if (error) { im6f_rollback(imf); - else + } else { im6f_commit(imf); + } im6f_reap(imf); out_imo_locked: IM6O_UNLOCK(imo); - IM6O_REMREF(imo); /* from in6p_findmoptions() */ + IM6O_REMREF(imo); /* from in6p_findmoptions() */ /* schedule timer now that we've dropped the lock(s) */ mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -2872,8 +2981,8 @@ out_imo_locked: int ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) { - struct ip6_moptions *im6o; - int error; + struct ip6_moptions *im6o; + int error; error = 0; @@ -2883,8 +2992,9 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) */ if (SOCK_PROTO(inp->inp_socket) == IPPROTO_DIVERT || (SOCK_TYPE(inp->inp_socket) != SOCK_RAW && - SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) - return (EOPNOTSUPP); + SOCK_TYPE(inp->inp_socket) != SOCK_DGRAM)) { + return EOPNOTSUPP; + } switch (sopt->sopt_name) { case IPV6_MULTICAST_IF: @@ -2899,8 +3009,9 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) break; } error = sooptcopyin(sopt, &hlim, sizeof(hlim), sizeof(int)); - if (error) + if (error) { break; + } if (hlim < -1 || hlim > 255) { error = EINVAL; break; @@ -2915,7 +3026,7 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) IM6O_LOCK(im6o); im6o->im6o_multicast_hlim = hlim; IM6O_UNLOCK(im6o); - IM6O_REMREF(im6o); /* from in6p_findmoptions() */ + IM6O_REMREF(im6o); /* from in6p_findmoptions() */ break; } @@ -2931,8 +3042,9 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) break; } error = sooptcopyin(sopt, &loop, sizeof(u_int), sizeof(u_int)); - if (error) + if (error) { break; + } if (loop > 1) { error = EINVAL; break; @@ -2945,7 +3057,7 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) IM6O_LOCK(im6o); im6o->im6o_multicast_loop = loop; IM6O_UNLOCK(im6o); - IM6O_REMREF(im6o); /* from in6p_findmoptions() */ + IM6O_REMREF(im6o); /* from in6p_findmoptions() */ break; } @@ -2975,7 +3087,7 @@ ip6_setmoptions(struct inpcb *inp, struct sockopt *sopt) break; } - return (error); + return error; } /* * Expose MLD's multicast filter mode and source list(s) to userland, @@ -2989,26 +3101,28 @@ sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - struct in6_addr mcaddr; - struct in6_addr src; - struct ifnet *ifp; - struct in6_multi *inm; - struct in6_multistep step; - struct ip6_msource *ims; - int *name; - int retval = 0; - u_int namelen; - uint32_t fmode, ifindex; + struct in6_addr mcaddr; + struct in6_addr src; + struct ifnet *ifp; + struct in6_multi *inm; + struct in6_multistep step; + struct ip6_msource *ims; + int *name; + int retval = 0; + u_int namelen; + uint32_t fmode, ifindex; name = (int *)arg1; namelen = arg2; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* int: ifindex + 4 * 32 bits of IPv6 address */ - if (namelen != 5) - return (EINVAL); + if (namelen != 5) { + return EINVAL; + } ifindex = name[0]; ifnet_head_lock_shared(); @@ -3016,7 +3130,7 @@ sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS MLD_PRINTF(("%s: ifindex %u out of range\n", __func__, ifindex)); ifnet_head_done(); - return (ENOENT); + return ENOENT; } memcpy(&mcaddr, &name[1], sizeof(struct in6_addr)); @@ -3024,14 +3138,14 @@ sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS MLD_PRINTF(("%s: group %s is not multicast\n", __func__, ip6_sprintf(&mcaddr))); ifnet_head_done(); - return (EINVAL); + return EINVAL; } ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); if (ifp == NULL) { MLD_PRINTF(("%s: no ifp for ifindex %u\n", __func__, ifindex)); - return (ENOENT); + return ENOENT; } /* * Internal MLD lookups require that scope/zone ID is set. @@ -3042,17 +3156,19 @@ sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } - if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) + if (!IN6_ARE_ADDR_EQUAL(&inm->in6m_addr, &mcaddr)) { goto next; + } fmode = inm->in6m_st[1].iss_fmode; retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); if (retval != 0) { IN6M_UNLOCK(inm); - break; /* abort */ + break; /* abort */ } RB_FOREACH(ims, ip6_msource_tree, &inm->in6m_srcs) { MLD_PRINTF(("%s: visit node 0x%llx\n", __func__, @@ -3067,8 +3183,9 @@ sysctl_ip6_mcast_filters SYSCTL_HANDLER_ARGS } src = ims->im6s_addr; retval = SYSCTL_OUT(req, &src, sizeof(struct in6_addr)); - if (retval != 0) - break; /* process next inm */ + if (retval != 0) { + break; /* process next inm */ + } } next: IN6M_UNLOCK(inm); @@ -3076,13 +3193,13 @@ next: } in6_multihead_lock_done(); - return (retval); + return retval; } void in6_multi_init(void) { - PE_parse_boot_argn("ifa_debug", &in6m_debug, sizeof (in6m_debug)); + PE_parse_boot_argn("ifa_debug", &in6m_debug, sizeof(in6m_debug)); /* Setup lock group and attribute for in6_multihead */ in6_multihead_lock_grp_attr = lck_grp_attr_alloc_init(); @@ -3096,8 +3213,8 @@ in6_multi_init(void) in6_multihead_lock_attr); TAILQ_INIT(&in6m_trash_head); - in6m_size = (in6m_debug == 0) ? sizeof (struct in6_multi) : - sizeof (struct in6_multi_dbg); + in6m_size = (in6m_debug == 0) ? sizeof(struct in6_multi) : + sizeof(struct in6_multi_dbg); in6m_zone = zinit(in6m_size, IN6M_ZONE_MAX * in6m_size, 0, IN6M_ZONE_NAME); if (in6m_zone == NULL) { @@ -3106,7 +3223,7 @@ in6_multi_init(void) } zone_change(in6m_zone, Z_EXPAND, TRUE); - imm_size = sizeof (struct in6_multi_mship); + imm_size = sizeof(struct in6_multi_mship); imm_zone = zinit(imm_size, IMM_ZONE_MAX * imm_size, 0, IMM_ZONE_NAME); if (imm_zone == NULL) { panic("%s: failed allocating %s", __func__, IMM_ZONE_NAME); @@ -3114,7 +3231,7 @@ in6_multi_init(void) } zone_change(imm_zone, Z_EXPAND, TRUE); - ip6ms_size = sizeof (struct ip6_msource); + ip6ms_size = sizeof(struct ip6_msource); ip6ms_zone = zinit(ip6ms_size, IP6MS_ZONE_MAX * ip6ms_size, 0, IP6MS_ZONE_NAME); if (ip6ms_zone == NULL) { @@ -3123,7 +3240,7 @@ in6_multi_init(void) } zone_change(ip6ms_zone, Z_EXPAND, TRUE); - in6ms_size = sizeof (struct in6_msource); + in6ms_size = sizeof(struct in6_msource); in6ms_zone = zinit(in6ms_size, IN6MS_ZONE_MAX * in6ms_size, 0, IN6MS_ZONE_NAME); if (in6ms_zone == NULL) { @@ -3150,7 +3267,7 @@ in6_multi_alloc(int how) in6m->in6m_trace = in6m_trace; } } - return (in6m); + return in6m; } static void @@ -3238,8 +3355,9 @@ in6_multi_detach(struct in6_multi *in6m) } --in6m->in6m_reqcnt; - if (in6m->in6m_reqcnt > 0) - return (0); + if (in6m->in6m_reqcnt > 0) { + return 0; + } if (!(in6m->in6m_debug & IFD_ATTACHED)) { panic("%s: Attempt to detach an unattached record in6m=%p", @@ -3266,16 +3384,17 @@ in6_multi_detach(struct in6_multi *in6m) in6m->in6m_debug |= IFD_TRASHED; } - return (1); + return 1; } void in6m_addref(struct in6_multi *in6m, int locked) { - if (!locked) + if (!locked) { IN6M_LOCK_SPIN(in6m); - else + } else { IN6M_LOCK_ASSERT_HELD(in6m); + } if (++in6m->in6m_refcount == 0) { panic("%s: in6m=%p wraparound refcnt", __func__, in6m); @@ -3283,8 +3402,9 @@ in6m_addref(struct in6_multi *in6m, int locked) } else if (in6m->in6m_trace != NULL) { (*in6m->in6m_trace)(in6m, TRUE); } - if (!locked) + if (!locked) { IN6M_UNLOCK(in6m); + } } void @@ -3293,10 +3413,11 @@ in6m_remref(struct in6_multi *in6m, int locked) struct ifmultiaddr *ifma; struct mld_ifinfo *mli; - if (!locked) + if (!locked) { IN6M_LOCK_SPIN(in6m); - else + } else { IN6M_LOCK_ASSERT_HELD(in6m); + } if (in6m->in6m_refcount == 0 || (in6m->in6m_refcount == 1 && locked)) { panic("%s: in6m=%p negative refcnt", __func__, in6m); @@ -3307,8 +3428,9 @@ in6m_remref(struct in6_multi *in6m, int locked) --in6m->in6m_refcount; if (in6m->in6m_refcount > 0) { - if (!locked) + if (!locked) { IN6M_UNLOCK(in6m); + } return; } @@ -3333,8 +3455,9 @@ in6m_remref(struct in6_multi *in6m, int locked) IN6M_UNLOCK(in6m); in6_multihead_lock_done(); /* If it was locked, return it as such */ - if (locked) + if (locked) { IN6M_LOCK(in6m); + } return; } in6m_purge(in6m); @@ -3354,8 +3477,9 @@ in6m_remref(struct in6_multi *in6m, int locked) /* Release reference held to the underlying ifmultiaddr */ IFMA_REMREF(ifma); - if (mli != NULL) + if (mli != NULL) { MLI_REMREF(mli); + } } static void @@ -3388,10 +3512,11 @@ in6_multi_mship_alloc(int how) struct in6_multi_mship *imm; imm = (how == M_WAITOK) ? zalloc(imm_zone) : zalloc_noblock(imm_zone); - if (imm != NULL) + if (imm != NULL) { bzero(imm, imm_size); + } - return (imm); + return imm; } static void @@ -3438,10 +3563,11 @@ ip6ms_alloc(int how) i6ms = (how == M_WAITOK) ? zalloc(ip6ms_zone) : zalloc_noblock(ip6ms_zone); - if (i6ms != NULL) + if (i6ms != NULL) { bzero(i6ms, ip6ms_size); + } - return (i6ms); + return i6ms; } static void @@ -3457,10 +3583,11 @@ in6ms_alloc(int how) in6ms = (how == M_WAITOK) ? zalloc(in6ms_zone) : zalloc_noblock(in6ms_zone); - if (in6ms != NULL) + if (in6ms != NULL) { bzero(in6ms, in6ms_size); + } - return (in6ms); + return in6ms; } static void @@ -3476,9 +3603,10 @@ static const char *in6m_modestrs[] = { "un\n", "in", "ex" }; static const char * in6m_mode_str(const int mode) { - if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) - return (in6m_modestrs[mode]); - return ("??"); + if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) { + return in6m_modestrs[mode]; + } + return "??"; } static const char *in6m_statestrs[] = { @@ -3497,9 +3625,10 @@ static const char *in6m_statestrs[] = { static const char * in6m_state_str(const int state) { - if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) - return (in6m_statestrs[state]); - return ("??"); + if (state >= MLD_NOT_MEMBER && state <= MLD_LEAVING_MEMBER) { + return in6m_statestrs[state]; + } + return "??"; } /* @@ -3512,8 +3641,9 @@ in6m_print(const struct in6_multi *inm) IN6M_LOCK_ASSERT_HELD(__DECONST(struct in6_multi *, inm)); - if (mld_debug == 0) + if (mld_debug == 0) { return; + } printf("%s: --- begin in6m 0x%llx ---\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm)); @@ -3544,12 +3674,11 @@ in6m_print(const struct in6_multi *inm) (uint64_t)VM_KERNEL_ADDRPERM(inm)); } -#else +#else void in6m_print(__unused const struct in6_multi *inm) { - } #endif diff --git a/bsd/netinet6/in6_pcb.c b/bsd/netinet6/in6_pcb.c index db72c5c35..674f64d68 100644 --- a/bsd/netinet6/in6_pcb.c +++ b/bsd/netinet6/in6_pcb.c @@ -166,16 +166,17 @@ in6_pcblookup_local_and_cleanup(struct inpcbinfo *pcbinfo, socket_lock(so, 0); if (so->so_usecount == 0) { - if (inp->inp_state != INPCB_STATE_DEAD) + if (inp->inp_state != INPCB_STATE_DEAD) { in6_pcbdetach(inp); - in_pcbdispose(inp); /* will unlock & destroy */ + } + in_pcbdispose(inp); /* will unlock & destroy */ inp = NULL; } else { socket_unlock(so, 0); } } - return (inp); + return inp; } /* @@ -187,7 +188,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) { struct socket *so = inp->inp_socket; struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; - u_short lport = 0; + u_short lport = 0; int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); struct ifnet *outif = NULL; struct sockaddr_in6 sin6; @@ -196,10 +197,12 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) kauth_cred_t cred; #endif /* !CONFIG_EMBEDDED */ - if (!in6_ifaddrs) /* XXX broken! */ - return (EADDRNOTAVAIL); - if (!(so->so_options & (SO_REUSEADDR|SO_REUSEPORT))) + if (!in6_ifaddrs) { /* XXX broken! */ + return EADDRNOTAVAIL; + } + if (!(so->so_options & (SO_REUSEADDR | SO_REUSEPORT))) { wild = 1; + } socket_unlock(so, 0); /* keep reference */ lck_rw_lock_exclusive(pcbinfo->ipi_lock); @@ -207,15 +210,15 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) /* another thread completed the bind */ lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EINVAL); + return EINVAL; } - bzero(&sin6, sizeof (sin6)); + bzero(&sin6, sizeof(sin6)); if (nam != NULL) { - if (nam->sa_len != sizeof (struct sockaddr_in6)) { + if (nam->sa_len != sizeof(struct sockaddr_in6)) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EINVAL); + return EINVAL; } /* * family check. @@ -223,7 +226,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (nam->sa_family != AF_INET6) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } lport = SIN6(nam)->sin6_port; @@ -234,12 +237,12 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) NULL) != 0) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EINVAL); + return EINVAL; } /* Sanitize local copy for address searches */ - sin6.sin6_flowinfo = 0; - sin6.sin6_scope_id = 0; + sin6.sin6_flowinfo = 0; + sin6.sin6_scope_id = 0; sin6.sin6_port = 0; if (IN6_IS_ADDR_MULTICAST(&sin6.sin6_addr)) { @@ -250,8 +253,9 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) * and a multicast address is bound on both * new and duplicated sockets. */ - if (so->so_options & SO_REUSEADDR) - reuseport = SO_REUSEADDR|SO_REUSEPORT; + if (so->so_options & SO_REUSEADDR) { + reuseport = SO_REUSEADDR | SO_REUSEPORT; + } } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr)) { struct ifaddr *ifa; @@ -259,7 +263,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (ifa == NULL) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } else { /* * XXX: bind to an anycast address might @@ -270,13 +274,13 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) */ IFA_LOCK_SPIN(ifa); if (((struct in6_ifaddr *)ifa)->ia6_flags & - (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY| + (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY | IN6_IFF_DETACHED | IN6_IFF_CLAT46)) { IFA_UNLOCK(ifa); IFA_REMREF(ifa); lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } /* * Opportunistically determine the outbound @@ -297,7 +301,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) #if !CONFIG_EMBEDDED if (ntohs(lport) < IPV6PORT_RESERVED && - !IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr)) { + !IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr)) { cred = kauth_cred_proc_ref(p); error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0); @@ -305,7 +309,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (error != 0) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EACCES); + return EACCES; } } #endif /* !CONFIG_EMBEDDED */ @@ -315,16 +319,16 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) &sin6.sin6_addr, lport, INPLOOKUP_WILDCARD); if (t != NULL && (!IN6_IS_ADDR_UNSPECIFIED( - &sin6.sin6_addr) || + &sin6.sin6_addr) || !IN6_IS_ADDR_UNSPECIFIED(&t->in6p_laddr) || !(t->inp_socket->so_options & SO_REUSEPORT)) && (u != kauth_cred_getuid( - t->inp_socket->so_cred)) && + t->inp_socket->so_cred)) && !(t->inp_socket->so_flags & SOF_REUSESHAREUID)) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } if (!(inp->inp_flags & IN6P_IPV6_V6ONLY) && IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr)) { @@ -332,8 +336,8 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) in6_sin6_2_sin(&sin, &sin6); t = in_pcblookup_local_and_cleanup( - pcbinfo, sin.sin_addr, lport, - INPLOOKUP_WILDCARD); + pcbinfo, sin.sin_addr, lport, + INPLOOKUP_WILDCARD); if (t != NULL && !(t->inp_socket->so_options & SO_REUSEPORT) && @@ -344,7 +348,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) SOCK_DOM(t->inp_socket))) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } } } @@ -354,7 +358,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) (reuseport & t->inp_socket->so_options) == 0) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } if (!(inp->inp_flags & IN6P_IPV6_V6ONLY) && IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr)) { @@ -369,7 +373,7 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) SOCK_DOM(so) == SOCK_DOM(t->inp_socket))) { lck_rw_done(pcbinfo->ipi_lock); socket_lock(so, 0); - return (EADDRINUSE); + return EADDRINUSE; } } } @@ -383,13 +387,13 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) */ if (inp->inp_state == INPCB_STATE_DEAD) { lck_rw_done(pcbinfo->ipi_lock); - return (ECONNABORTED); + return ECONNABORTED; } /* check if the socket got bound when the lock was released */ if (inp->inp_lport || !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { lck_rw_done(pcbinfo->ipi_lock); - return (EINVAL); + return EINVAL; } if (!IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr)) { @@ -402,9 +406,9 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if ((e = in6_pcbsetport(&inp->in6p_laddr, inp, p, 1)) != 0) { /* Undo any address bind from above. */ inp->in6p_laddr = in6addr_any; - inp->in6p_last_outifp = NULL; + inp->in6p_last_outifp = NULL; lck_rw_done(pcbinfo->ipi_lock); - return (e); + return e; } } else { inp->inp_lport = lport; @@ -413,12 +417,12 @@ in6_pcbbind(struct inpcb *inp, struct sockaddr *nam, struct proc *p) inp->inp_lport = 0; inp->in6p_last_outifp = NULL; lck_rw_done(pcbinfo->ipi_lock); - return (EAGAIN); + return EAGAIN; } } lck_rw_done(pcbinfo->ipi_lock); sflt_notify(so, sock_evt_bound, NULL); - return (0); + return 0; } /* @@ -442,26 +446,32 @@ in6_pcbladdr(struct inpcb *inp, struct sockaddr *nam, int error = 0; unsigned int ifscope; - if (outif != NULL) + if (outif != NULL) { *outif = NULL; - if (nam->sa_len != sizeof (struct sockaddr_in6)) - return (EINVAL); - if (SIN6(nam)->sin6_family != AF_INET6) - return (EAFNOSUPPORT); - if (SIN6(nam)->sin6_port == 0) - return (EADDRNOTAVAIL); + } + if (nam->sa_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } + if (SIN6(nam)->sin6_family != AF_INET6) { + return EAFNOSUPPORT; + } + if (SIN6(nam)->sin6_port == 0) { + return EADDRNOTAVAIL; + } /* KAME hack: embed scopeid */ - if (in6_embedscope(&SIN6(nam)->sin6_addr, SIN6(nam), inp, NULL, NULL) != 0) - return (EINVAL); + if (in6_embedscope(&SIN6(nam)->sin6_addr, SIN6(nam), inp, NULL, NULL) != 0) { + return EINVAL; + } if (in6_ifaddrs) { /* * If the destination address is UNSPECIFIED addr, * use the loopback addr, e.g ::1. */ - if (IN6_IS_ADDR_UNSPECIFIED(&SIN6(nam)->sin6_addr)) + if (IN6_IS_ADDR_UNSPECIFIED(&SIN6(nam)->sin6_addr)) { SIN6(nam)->sin6_addr = in6addr_loopback; + } } ifscope = (inp->inp_flags & INP_BOUND_IF) ? @@ -489,9 +499,10 @@ in6_pcbladdr(struct inpcb *inp, struct sockaddr *nam, * it could still be useful to the caller. */ if (rt != NULL && rt->rt_ifp != *outif) { - ifnet_reference(rt->rt_ifp); /* for caller */ - if (*outif != NULL) + ifnet_reference(rt->rt_ifp); /* for caller */ + if (*outif != NULL) { ifnet_release(*outif); + } *outif = rt->rt_ifp; } } @@ -503,9 +514,10 @@ in6_pcbladdr(struct inpcb *inp, struct sockaddr *nam, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); error = EHOSTUNREACH; } - if (error == 0) + if (error == 0) { error = EADDRNOTAVAIL; - return (error); + } + return error; } *plocal_addr6 = *addr6; @@ -513,7 +525,7 @@ in6_pcbladdr(struct inpcb *inp, struct sockaddr *nam, * Don't do pcblookup call here; return interface in * plocal_addr6 and exit to caller, that will do the lookup. */ - return (0); + return 0; } /* @@ -534,13 +546,14 @@ in6_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p) struct socket *so = inp->inp_socket; #if CONTENT_FILTER - if (so) + if (so) { so->so_state_change_cnt++; + } #endif if (so->so_proto->pr_protocol == IPPROTO_UDP && sin6->sin6_port == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) { - so->so_flags1 |= SOF1_DNS_COUNTED; + so->so_flags1 |= SOF1_DNS_COUNTED; INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns); } @@ -553,9 +566,10 @@ in6_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p) * whenever it's non-NULL. */ if ((error = in6_pcbladdr(inp, nam, &addr6, &outif)) != 0) { - if (outif != NULL && inp_restricted_send(inp, outif)) + if (outif != NULL && inp_restricted_send(inp, outif)) { soevent(so, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } goto done; } socket_unlock(so, 0); @@ -571,11 +585,12 @@ in6_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p) if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { if (inp->inp_lport == 0) { error = in6_pcbbind(inp, NULL, p); - if (error) + if (error) { goto done; + } } inp->in6p_laddr = addr6; - inp->in6p_last_outifp = outif; /* no reference needed */ + inp->in6p_last_outifp = outif; /* no reference needed */ inp->in6p_flags |= INP_IN6ADDR_ANY; } if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) { @@ -586,16 +601,18 @@ in6_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p) } inp->in6p_faddr = sin6->sin6_addr; inp->inp_fport = sin6->sin6_port; - if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) + if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) { nstat_pcb_invalidate_cache(inp); + } in_pcbrehash(inp); lck_rw_done(inp->inp_pcbinfo->ipi_lock); done: - if (outif != NULL) + if (outif != NULL) { ifnet_release(outif); + } - return (error); + return error; } void @@ -604,8 +621,9 @@ in6_pcbdisconnect(struct inpcb *inp) struct socket *so = inp->inp_socket; #if CONTENT_FILTER - if (so) + if (so) { so->so_state_change_cnt++; + } #endif if (!lck_rw_try_lock_exclusive(inp->inp_pcbinfo->ipi_lock)) { @@ -614,9 +632,10 @@ in6_pcbdisconnect(struct inpcb *inp) lck_rw_lock_exclusive(inp->inp_pcbinfo->ipi_lock); socket_lock(so, 0); } - if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) + if (nstat_collect && SOCK_PROTO(so) == IPPROTO_UDP) { nstat_pcb_cache(inp); - bzero((caddr_t)&inp->in6p_faddr, sizeof (inp->in6p_faddr)); + } + bzero((caddr_t)&inp->in6p_faddr, sizeof(inp->in6p_faddr)); inp->inp_fport = 0; /* clear flowinfo - RFC 6437 */ inp->inp_flow &= ~IPV6_FLOWLABEL_MASK; @@ -627,8 +646,9 @@ in6_pcbdisconnect(struct inpcb *inp) * so check for SOF_MP_SUBFLOW socket flag before detaching the PCB; * when the socket is closed for real, SOF_MP_SUBFLOW would be cleared. */ - if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) + if (!(so->so_flags & SOF_MP_SUBFLOW) && (so->so_state & SS_NOFDREF)) { in6_pcbdetach(inp); + } } void @@ -660,8 +680,9 @@ in6_pcbdetach(struct inpcb *inp) * before we detach it. */ if (nstat_collect && - (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) + (SOCK_PROTO(so) == IPPROTO_TCP || SOCK_PROTO(so) == IPPROTO_UDP)) { nstat_pcb_detach(inp); + } /* mark socket state as dead */ if (in_pcb_checkstate(inp, WNT_STOPUSING, 1) != WNT_STOPUSING) { panic("%s: so=%p proto=%d couldn't set to STOPUSING\n", @@ -703,10 +724,12 @@ in6_pcbdetach(struct inpcb *inp) */ if (im6o != NULL || imo != NULL) { socket_unlock(so, 0); - if (im6o != NULL) + if (im6o != NULL) { IM6O_REMREF(im6o); - if (imo != NULL) + } + if (imo != NULL) { IMO_REMREF(imo); + } socket_lock(so, 0); } } @@ -717,43 +740,48 @@ in6_sockaddr(in_port_t port, struct in6_addr *addr_p) { struct sockaddr_in6 *sin6; - MALLOC(sin6, struct sockaddr_in6 *, sizeof (*sin6), M_SONAME, M_WAITOK); - if (sin6 == NULL) - return (NULL); - bzero(sin6, sizeof (*sin6)); + MALLOC(sin6, struct sockaddr_in6 *, sizeof(*sin6), M_SONAME, M_WAITOK); + if (sin6 == NULL) { + return NULL; + } + bzero(sin6, sizeof(*sin6)); sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof (*sin6); + sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = port; sin6->sin6_addr = *addr_p; /* would be good to use sa6_recoverscope(), except for locking */ - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); - else - sin6->sin6_scope_id = 0; /* XXX */ - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + } else { + sin6->sin6_scope_id = 0; /* XXX */ + } + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { sin6->sin6_addr.s6_addr16[1] = 0; + } - return ((struct sockaddr *)sin6); + return (struct sockaddr *)sin6; } void in6_sockaddr_s(in_port_t port, struct in6_addr *addr_p, struct sockaddr_in6 *sin6) { - bzero(sin6, sizeof (*sin6)); + bzero(sin6, sizeof(*sin6)); sin6->sin6_family = AF_INET6; - sin6->sin6_len = sizeof (*sin6); + sin6->sin6_len = sizeof(*sin6); sin6->sin6_port = port; sin6->sin6_addr = *addr_p; /* would be good to use sa6_recoverscope(), except for locking */ - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); - else - sin6->sin6_scope_id = 0; /* XXX */ - if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) + } else { + sin6->sin6_scope_id = 0; /* XXX */ + } + if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { sin6->sin6_addr.s6_addr16[1] = 0; + } } /* @@ -769,16 +797,18 @@ in6_getsockaddr(struct socket *so, struct sockaddr **nam) struct in6_addr addr; in_port_t port; - if ((inp = sotoinpcb(so)) == NULL) - return (EINVAL); + if ((inp = sotoinpcb(so)) == NULL) { + return EINVAL; + } port = inp->inp_lport; addr = inp->in6p_laddr; *nam = in6_sockaddr(port, &addr); - if (*nam == NULL) - return (ENOBUFS); - return (0); + if (*nam == NULL) { + return ENOBUFS; + } + return 0; } int @@ -789,16 +819,17 @@ in6_getsockaddr_s(struct socket *so, struct sockaddr_in6 *ss) in_port_t port; VERIFY(ss != NULL); - bzero(ss, sizeof (*ss)); + bzero(ss, sizeof(*ss)); - if ((inp = sotoinpcb(so)) == NULL) - return (EINVAL); + if ((inp = sotoinpcb(so)) == NULL) { + return EINVAL; + } port = inp->inp_lport; addr = inp->in6p_laddr; in6_sockaddr_s(port, &addr, ss); - return (0); + return 0; } int @@ -808,54 +839,60 @@ in6_getpeeraddr(struct socket *so, struct sockaddr **nam) struct in6_addr addr; in_port_t port; - if ((inp = sotoinpcb(so)) == NULL) - return (EINVAL); + if ((inp = sotoinpcb(so)) == NULL) { + return EINVAL; + } port = inp->inp_fport; addr = inp->in6p_faddr; *nam = in6_sockaddr(port, &addr); - if (*nam == NULL) - return (ENOBUFS); - return (0); + if (*nam == NULL) { + return ENOBUFS; + } + return 0; } int in6_mapped_sockaddr(struct socket *so, struct sockaddr **nam) { - struct inpcb *inp = sotoinpcb(so); - int error; + struct inpcb *inp = sotoinpcb(so); + int error; - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } if (inp->inp_vflag & INP_IPV4) { error = in_getsockaddr(so, nam); - if (error == 0) + if (error == 0) { error = in6_sin_2_v4mapsin6_in_sock(nam); + } } else { /* scope issues will be handled in in6_getsockaddr(). */ error = in6_getsockaddr(so, nam); } - return (error); + return error; } int in6_mapped_peeraddr(struct socket *so, struct sockaddr **nam) { - struct inpcb *inp = sotoinpcb(so); - int error; + struct inpcb *inp = sotoinpcb(so); + int error; - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } if (inp->inp_vflag & INP_IPV4) { error = in_getpeeraddr(so, nam); - if (error == 0) + if (error == 0) { error = in6_sin_2_v4mapsin6_in_sock(nam); + } } else { /* scope issues will be handled in in6_getpeeraddr(). */ error = in6_getpeeraddr(so, nam); } - return (error); + return error; } /* @@ -875,16 +912,18 @@ in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst, u_int fport_arg, struct inpcbhead *head = pcbinfo->ipi_listhead; struct inpcb *inp, *ninp; struct sockaddr_in6 sa6_src, *sa6_dst; - u_short fport = fport_arg, lport = lport_arg; + u_short fport = fport_arg, lport = lport_arg; u_int32_t flowinfo; int errno; - if ((unsigned)cmd >= PRC_NCMDS || dst->sa_family != AF_INET6) + if ((unsigned)cmd >= PRC_NCMDS || dst->sa_family != AF_INET6) { return; + } sa6_dst = (struct sockaddr_in6 *)(void *)dst; - if (IN6_IS_ADDR_UNSPECIFIED(&sa6_dst->sin6_addr)) + if (IN6_IS_ADDR_UNSPECIFIED(&sa6_dst->sin6_addr)) { return; + } /* * note that src can be NULL when we get notify by local fragmentation. @@ -904,18 +943,20 @@ in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst, u_int fport_arg, if (PRC_IS_REDIRECT(cmd) || cmd == PRC_HOSTDEAD) { fport = 0; lport = 0; - bzero((caddr_t)&sa6_src.sin6_addr, sizeof (sa6_src.sin6_addr)); + bzero((caddr_t)&sa6_src.sin6_addr, sizeof(sa6_src.sin6_addr)); - if (cmd != PRC_HOSTDEAD) + if (cmd != PRC_HOSTDEAD) { notify = in6_rtchange; + } } errno = inet6ctlerrmap[cmd]; lck_rw_lock_shared(pcbinfo->ipi_lock); for (inp = LIST_FIRST(head); inp != NULL; inp = ninp) { ninp = LIST_NEXT(inp, inp_list); - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; + } /* * If the error designates a new path MTU for a destination @@ -926,9 +967,10 @@ in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst, u_int fport_arg, * sockets disconnected. * XXX: should we avoid to notify the value to TCP sockets? */ - if (cmd == PRC_MSGSIZE) + if (cmd == PRC_MSGSIZE) { ip6_notify_pmtu(inp, (struct sockaddr_in6 *)(void *)dst, (u_int32_t *)cmdarg); + } /* * Detect if we should notify the error. If no source and @@ -941,21 +983,23 @@ in6_pcbnotify(struct inpcbinfo *pcbinfo, struct sockaddr *dst, u_int fport_arg, if (lport == 0 && fport == 0 && flowinfo && inp->inp_socket != NULL && flowinfo == (inp->inp_flow & IPV6_FLOWLABEL_MASK) && - IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &sa6_src.sin6_addr)) + IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, &sa6_src.sin6_addr)) { goto do_notify; - else if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, + } else if (!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &sa6_dst->sin6_addr) || inp->inp_socket == NULL || (lport && inp->inp_lport != lport) || (!IN6_IS_ADDR_UNSPECIFIED(&sa6_src.sin6_addr) && !IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, - &sa6_src.sin6_addr)) || (fport && inp->inp_fport != fport)) + &sa6_src.sin6_addr)) || (fport && inp->inp_fport != fport)) { continue; + } do_notify: if (notify) { if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) == - WNT_STOPUSING) + WNT_STOPUSING) { continue; + } socket_lock(inp->inp_socket, 1); (*notify)(inp, errno); (void) in_pcb_checkstate(inp, WNT_RELEASE, 1); @@ -988,21 +1032,22 @@ in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; + } if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) && IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && inp->inp_lport == lport) { /* * Found. */ - return (inp); + return inp; } } /* * Not found. */ - return (NULL); + return NULL; } /* * Best fit PCB lookup. @@ -1013,8 +1058,9 @@ in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr, porthash = &pcbinfo->ipi_porthashbase[INP_PCBPORTHASH(lport, pcbinfo->ipi_porthashmask)]; LIST_FOREACH(phd, porthash, phd_hash) { - if (phd->phd_port == lport) + if (phd->phd_port == lport) { break; + } } if (phd != NULL) { /* @@ -1023,19 +1069,23 @@ in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr, */ LIST_FOREACH(inp, &phd->phd_pcblist, inp_portlist) { wildcard = 0; - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; - if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) + } + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { wildcard++; + } if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) { - if (IN6_IS_ADDR_UNSPECIFIED(laddr)) + if (IN6_IS_ADDR_UNSPECIFIED(laddr)) { wildcard++; - else if (!IN6_ARE_ADDR_EQUAL( - &inp->in6p_laddr, laddr)) + } else if (!IN6_ARE_ADDR_EQUAL( + &inp->in6p_laddr, laddr)) { continue; + } } else { - if (!IN6_IS_ADDR_UNSPECIFIED(laddr)) + if (!IN6_IS_ADDR_UNSPECIFIED(laddr)) { wildcard++; + } } if (wildcard < matchwild) { match = inp; @@ -1046,7 +1096,7 @@ in6_pcblookup_local(struct inpcbinfo *pcbinfo, struct in6_addr *laddr, } } } - return (match); + return match; } /* @@ -1122,11 +1172,13 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr->s6_addr32[3] /* XXX */, lport, fport, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; + } - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, faddr) && IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && @@ -1137,12 +1189,12 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, * Found. Check if pcb is still valid */ *uid = kauth_cred_getuid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); *gid = kauth_cred_getgid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } } if (wildcard) { @@ -1151,11 +1203,13 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; + } - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) && inp->inp_lport == lport) { @@ -1164,14 +1218,14 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, found = (inp->inp_socket != NULL); if (found) { *uid = kauth_cred_getuid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); *gid = kauth_cred_getgid( - inp->inp_socket->so_cred); + inp->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } else if (IN6_IS_ADDR_UNSPECIFIED( - &inp->in6p_laddr)) { + &inp->in6p_laddr)) { local_wild = inp; } } @@ -1179,12 +1233,12 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, if (local_wild) { if ((found = (local_wild->inp_socket != NULL))) { *uid = kauth_cred_getuid( - local_wild->inp_socket->so_cred); + local_wild->inp_socket->so_cred); *gid = kauth_cred_getgid( - local_wild->inp_socket->so_cred); + local_wild->inp_socket->so_cred); } lck_rw_done(pcbinfo->ipi_lock); - return (found); + return found; } } @@ -1192,7 +1246,7 @@ in6_pcblookup_hash_exists(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, * Not found. */ lck_rw_done(pcbinfo->ipi_lock); - return (0); + return 0; } /* @@ -1215,11 +1269,13 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(faddr->s6_addr32[3] /* XXX */, lport, fport, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; + } - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, faddr) && IN6_ARE_ADDR_EQUAL(&inp->in6p_laddr, laddr) && @@ -1231,11 +1287,11 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (inp); + return inp; } else { /* it's there but dead, say it isn't found */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } } } @@ -1245,11 +1301,13 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, head = &pcbinfo->ipi_hashbase[INP_PCBHASH(INADDR_ANY, lport, 0, pcbinfo->ipi_hashmask)]; LIST_FOREACH(inp, head, inp_hash) { - if (!(inp->inp_vflag & INP_IPV6)) + if (!(inp->inp_vflag & INP_IPV6)) { continue; + } - if (inp_restricted_recv(inp, ifp)) + if (inp_restricted_recv(inp, ifp)) { continue; + } if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) && inp->inp_lport == lport) { @@ -1258,14 +1316,14 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (inp); + return inp; } else { /* dead; say it isn't found */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } } else if (IN6_IS_ADDR_UNSPECIFIED( - &inp->in6p_laddr)) { + &inp->in6p_laddr)) { local_wild = inp; } } @@ -1273,10 +1331,10 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, if (local_wild && in_pcb_checkstate(local_wild, WNT_ACQUIRE, 0) != WNT_STOPUSING) { lck_rw_done(pcbinfo->ipi_lock); - return (local_wild); + return local_wild; } else { lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } } @@ -1284,7 +1342,7 @@ in6_pcblookup_hash(struct inpcbinfo *pcbinfo, struct in6_addr *faddr, * Not found. */ lck_rw_done(pcbinfo->ipi_lock); - return (NULL); + return NULL; } void @@ -1293,17 +1351,18 @@ init_sin6(struct sockaddr_in6 *sin6, struct mbuf *m) struct ip6_hdr *ip; ip = mtod(m, struct ip6_hdr *); - bzero(sin6, sizeof (*sin6)); - sin6->sin6_len = sizeof (*sin6); + bzero(sin6, sizeof(*sin6)); + sin6->sin6_len = sizeof(*sin6); sin6->sin6_family = AF_INET6; sin6->sin6_addr = ip->ip6_src; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { sin6->sin6_addr.s6_addr16[1] = 0; - if ((m->m_pkthdr.pkt_flags & (PKTF_LOOP|PKTF_IFAINFO)) == - (PKTF_LOOP|PKTF_IFAINFO)) + if ((m->m_pkthdr.pkt_flags & (PKTF_LOOP | PKTF_IFAINFO)) == + (PKTF_LOOP | PKTF_IFAINFO)) { sin6->sin6_scope_id = m->m_pkthdr.src_ifindex; - else if (m->m_pkthdr.rcvif != NULL) + } else if (m->m_pkthdr.rcvif != NULL) { sin6->sin6_scope_id = m->m_pkthdr.rcvif->if_index; + } } } @@ -1341,10 +1400,11 @@ in6p_route_copyout(struct inpcb *inp, struct route_in6 *dst) socket_lock_assert_owned(inp->inp_socket); /* Minor sanity check */ - if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET6) + if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET6) { panic("%s: wrong or corrupted route: %p", __func__, src); + } - route_copyout((struct route *)dst, (struct route *)src, sizeof (*dst)); + route_copyout((struct route *)dst, (struct route *)src, sizeof(*dst)); } void @@ -1355,8 +1415,9 @@ in6p_route_copyin(struct inpcb *inp, struct route_in6 *src) socket_lock_assert_owned(inp->inp_socket); /* Minor sanity check */ - if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET6) + if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET6) { panic("%s: wrong or corrupted route: %p", __func__, src); + } - route_copyin((struct route *)src, (struct route *)dst, sizeof (*src)); + route_copyin((struct route *)src, (struct route *)dst, sizeof(*src)); } diff --git a/bsd/netinet6/in6_pcb.h b/bsd/netinet6/in6_pcb.h index 01973578a..dcf8a8f30 100644 --- a/bsd/netinet6/in6_pcb.h +++ b/bsd/netinet6/in6_pcb.h @@ -91,7 +91,7 @@ */ #ifndef _NETINET6_IN6_PCB_H_ -#define _NETINET6_IN6_PCB_H_ +#define _NETINET6_IN6_PCB_H_ #include #ifdef BSD_KERNEL_PRIVATE diff --git a/bsd/netinet6/in6_proto.c b/bsd/netinet6/in6_proto.c index cd8777c0a..78d084ee6 100644 --- a/bsd/netinet6/in6_proto.c +++ b/bsd/netinet6/in6_proto.c @@ -168,180 +168,183 @@ static int rip6_pr_output(struct mbuf *, struct socket *, struct sockaddr_in6 *, struct mbuf *); struct ip6protosw inet6sw[] = { -{ - .pr_type = 0, - .pr_protocol = IPPROTO_IPV6, - .pr_init = ip6_init, - .pr_drain = ip6_drain, - .pr_usrreqs = &nousrreqs, -}, -{ - .pr_type = SOCK_DGRAM, - .pr_protocol = IPPROTO_UDP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK|PR_PCBLOCK| - PR_EVCONNINFO|PR_PRECONN_WRITE, - .pr_input = udp6_input, - .pr_ctlinput = udp6_ctlinput, - .pr_ctloutput = ip6_ctloutput, -#if !INET /* don't call initialization twice */ - .pr_init = udp_init, + { + .pr_type = 0, + .pr_protocol = IPPROTO_IPV6, + .pr_init = ip6_init, + .pr_drain = ip6_drain, + .pr_usrreqs = &nousrreqs, + }, + { + .pr_type = SOCK_DGRAM, + .pr_protocol = IPPROTO_UDP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK | PR_PCBLOCK | + PR_EVCONNINFO | PR_PRECONN_WRITE, + .pr_input = udp6_input, + .pr_ctlinput = udp6_ctlinput, + .pr_ctloutput = ip6_ctloutput, +#if !INET /* don't call initialization twice */ + .pr_init = udp_init, #endif /* !INET */ - .pr_usrreqs = &udp6_usrreqs, - .pr_lock = udp_lock, - .pr_unlock = udp_unlock, - .pr_getlock = udp_getlock, -}, -{ - .pr_type = SOCK_STREAM, - .pr_protocol = IPPROTO_TCP, - .pr_flags = PR_CONNREQUIRED|PR_WANTRCVD|PR_PCBLOCK| - PR_PROTOLOCK|PR_DISPOSE|PR_EVCONNINFO| - PR_PRECONN_WRITE|PR_DATA_IDEMPOTENT, - .pr_input = tcp6_input, - .pr_ctlinput = tcp6_ctlinput, - .pr_ctloutput = tcp_ctloutput, -#if !INET /* don't call initialization and timeout routines twice */ - .pr_init = tcp_init, + .pr_usrreqs = &udp6_usrreqs, + .pr_lock = udp_lock, + .pr_unlock = udp_unlock, + .pr_getlock = udp_getlock, + }, + { + .pr_type = SOCK_STREAM, + .pr_protocol = IPPROTO_TCP, + .pr_flags = PR_CONNREQUIRED | PR_WANTRCVD | PR_PCBLOCK | + PR_PROTOLOCK | PR_DISPOSE | PR_EVCONNINFO | + PR_PRECONN_WRITE | PR_DATA_IDEMPOTENT, + .pr_input = tcp6_input, + .pr_ctlinput = tcp6_ctlinput, + .pr_ctloutput = tcp_ctloutput, +#if !INET /* don't call initialization and timeout routines twice */ + .pr_init = tcp_init, #endif /* !INET */ - .pr_drain = tcp_drain, - .pr_usrreqs = &tcp6_usrreqs, - .pr_lock = tcp_lock, - .pr_unlock = tcp_unlock, - .pr_getlock = tcp_getlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_RAW, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = rip6_input, - .pr_output = rip6_pr_output, - .pr_ctlinput = rip6_ctlinput, - .pr_ctloutput = rip6_ctloutput, -#if !INET /* don't call initialization and timeout routines twice */ - .pr_init = rip_init, + .pr_drain = tcp_drain, + .pr_usrreqs = &tcp6_usrreqs, + .pr_lock = tcp_lock, + .pr_unlock = tcp_unlock, + .pr_getlock = tcp_getlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_RAW, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = rip6_input, + .pr_output = rip6_pr_output, + .pr_ctlinput = rip6_ctlinput, + .pr_ctloutput = rip6_ctloutput, +#if !INET /* don't call initialization and timeout routines twice */ + .pr_init = rip_init, #endif /* !INET */ - .pr_usrreqs = &rip6_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_ICMPV6, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = icmp6_input, - .pr_output = rip6_pr_output, - .pr_ctlinput = rip6_ctlinput, - .pr_ctloutput = rip6_ctloutput, - .pr_init = icmp6_init, - .pr_usrreqs = &rip6_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_DGRAM, - .pr_protocol = IPPROTO_ICMPV6, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = icmp6_input, - .pr_output = rip6_pr_output, - .pr_ctlinput = rip6_ctlinput, - .pr_ctloutput = icmp6_dgram_ctloutput, - .pr_init = icmp6_init, - .pr_usrreqs = &icmp6_dgram_usrreqs, - .pr_unlock = rip_unlock, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_DSTOPTS, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = dest6_input, - .pr_usrreqs = &nousrreqs, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_ROUTING, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_input = route6_input, - .pr_usrreqs = &nousrreqs, -}, -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_FRAGMENT, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = frag6_input, - .pr_usrreqs = &nousrreqs, -}, + .pr_usrreqs = &rip6_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_ICMPV6, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = icmp6_input, + .pr_output = rip6_pr_output, + .pr_ctlinput = rip6_ctlinput, + .pr_ctloutput = rip6_ctloutput, + .pr_init = icmp6_init, + .pr_usrreqs = &rip6_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_DGRAM, + .pr_protocol = IPPROTO_ICMPV6, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = icmp6_input, + .pr_output = rip6_pr_output, + .pr_ctlinput = rip6_ctlinput, + .pr_ctloutput = icmp6_dgram_ctloutput, + .pr_init = icmp6_init, + .pr_usrreqs = &icmp6_dgram_usrreqs, + .pr_unlock = rip_unlock, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_DSTOPTS, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = dest6_input, + .pr_usrreqs = &nousrreqs, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_ROUTING, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_input = route6_input, + .pr_usrreqs = &nousrreqs, + }, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_FRAGMENT, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = frag6_input, + .pr_usrreqs = &nousrreqs, + }, #if IPSEC -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_AH, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = ah6_input, - .pr_usrreqs = &nousrreqs, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_AH, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = ah6_input, + .pr_usrreqs = &nousrreqs, + }, #if IPSEC_ESP -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_ESP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = esp6_input, - .pr_ctlinput = esp6_ctlinput, - .pr_usrreqs = &nousrreqs, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_ESP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = esp6_input, + .pr_ctlinput = esp6_ctlinput, + .pr_usrreqs = &nousrreqs, + }, #endif /* IPSEC_ESP */ -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPCOMP, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_PROTOLOCK, - .pr_input = ipcomp6_input, - .pr_usrreqs = &nousrreqs, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPCOMP, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_PROTOLOCK, + .pr_input = ipcomp6_input, +#if !INET /* don't call initialization and timeout routines twice */ + .pr_init = ipcomp_init, +#endif /* !INET */ + .pr_usrreqs = &nousrreqs, + }, #endif /* IPSEC */ #if INET -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPV4, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = encap6_input, - .pr_output = rip6_pr_output, - .pr_ctloutput = rip6_ctloutput, - .pr_init = encap6_init, - .pr_usrreqs = &rip6_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPV4, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = encap6_input, + .pr_output = rip6_pr_output, + .pr_ctloutput = rip6_ctloutput, + .pr_init = encap6_init, + .pr_usrreqs = &rip6_usrreqs, + .pr_unlock = rip_unlock, + }, #endif /*INET*/ -{ - .pr_type = SOCK_RAW, - .pr_protocol = IPPROTO_IPV6, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = encap6_input, - .pr_output = rip6_pr_output, - .pr_ctloutput = rip6_ctloutput, - .pr_init = encap6_init, - .pr_usrreqs = &rip6_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = IPPROTO_IPV6, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = encap6_input, + .pr_output = rip6_pr_output, + .pr_ctloutput = rip6_ctloutput, + .pr_init = encap6_init, + .pr_usrreqs = &rip6_usrreqs, + .pr_unlock = rip_unlock, + }, /* raw wildcard */ -{ - .pr_type = SOCK_RAW, - .pr_protocol = 0, - .pr_flags = PR_ATOMIC|PR_ADDR|PR_LASTHDR, - .pr_input = rip6_input, - .pr_output = rip6_pr_output, - .pr_ctloutput = rip6_ctloutput, - .pr_usrreqs = &rip6_usrreqs, - .pr_unlock = rip_unlock, -}, + { + .pr_type = SOCK_RAW, + .pr_protocol = 0, + .pr_flags = PR_ATOMIC | PR_ADDR | PR_LASTHDR, + .pr_input = rip6_input, + .pr_output = rip6_pr_output, + .pr_ctloutput = rip6_ctloutput, + .pr_usrreqs = &rip6_usrreqs, + .pr_unlock = rip_unlock, + }, }; -int in6_proto_count = (sizeof (inet6sw) / sizeof (struct ip6protosw)); +int in6_proto_count = (sizeof(inet6sw) / sizeof(struct ip6protosw)); struct domain inet6domain_s = { - .dom_family = PF_INET6, - .dom_flags = DOM_REENTRANT, - .dom_name = "internet6", - .dom_init = in6_dinit, - .dom_rtattach = in6_inithead, - .dom_rtoffset = offsetof(struct sockaddr_in6, sin6_addr) << 3, - .dom_maxrtkey = sizeof (struct sockaddr_in6), - .dom_protohdrlen = sizeof (struct sockaddr_in6), + .dom_family = PF_INET6, + .dom_flags = DOM_REENTRANT, + .dom_name = "internet6", + .dom_init = in6_dinit, + .dom_rtattach = in6_inithead, + .dom_rtoffset = offsetof(struct sockaddr_in6, sin6_addr) << 3, + .dom_maxrtkey = sizeof(struct sockaddr_in6), + .dom_protohdrlen = sizeof(struct sockaddr_in6), }; /* Initialize the PF_INET6 domain, and add in the pre-defined protos */ @@ -356,7 +359,7 @@ in6_dinit(struct domain *dp) inet6domain = dp; - _CASSERT(sizeof (struct protosw) == sizeof (struct ip6protosw)); + _CASSERT(sizeof(struct protosw) == sizeof(struct ip6protosw)); _CASSERT(offsetof(struct ip6protosw, pr_entry) == offsetof(struct protosw, pr_entry)); _CASSERT(offsetof(struct ip6protosw, pr_domain) == @@ -399,10 +402,12 @@ in6_dinit(struct domain *dp) /* * Attach first, then initialize. ip6_init() needs raw IP6 handler. */ - for (i = 0, pr = &inet6sw[0]; i < in6_proto_count; i++, pr++) + for (i = 0, pr = &inet6sw[0]; i < in6_proto_count; i++, pr++) { net_add_proto((struct protosw *)pr, dp, 0); - for (i = 0, pr = &inet6sw[0]; i < in6_proto_count; i++, pr++) + } + for (i = 0, pr = &inet6sw[0]; i < in6_proto_count; i++, pr++) { net_init_proto((struct protosw *)pr, dp); + } inet6_domain_mutex = dp->dom_mtx; } @@ -414,49 +419,49 @@ rip6_pr_output(struct mbuf *m, struct socket *so, struct sockaddr_in6 *sin6, #pragma unused(m, so, sin6, m1) panic("%s\n", __func__); /* NOTREACHED */ - return (0); + return 0; } /* * Internet configuration info */ -#ifndef IPV6FORWARDING +#ifndef IPV6FORWARDING #if GATEWAY6 -#define IPV6FORWARDING 1 /* forward IP6 packets not for us */ +#define IPV6FORWARDING 1 /* forward IP6 packets not for us */ #else -#define IPV6FORWARDING 0 /* don't forward IP6 packets not for us */ +#define IPV6FORWARDING 0 /* don't forward IP6 packets not for us */ #endif /* GATEWAY6 */ #endif /* !IPV6FORWARDING */ -#ifndef IPV6_SENDREDIRECTS -#define IPV6_SENDREDIRECTS 1 +#ifndef IPV6_SENDREDIRECTS +#define IPV6_SENDREDIRECTS 1 #endif -int ip6_forwarding = IPV6FORWARDING; /* act as router? */ -int ip6_sendredirects = IPV6_SENDREDIRECTS; -int ip6_defhlim = IPV6_DEFHLIM; -int ip6_defmcasthlim = IPV6_DEFAULT_MULTICAST_HOPS; -int ip6_accept_rtadv = 1; /* deprecated */ -int ip6_log_interval = 5; -int ip6_hdrnestlimit = 15; /* How many header options will we process? */ -int ip6_dad_count = 1; /* DupAddrDetectionTransmits */ -int ip6_auto_flowlabel = 1; -int ip6_gif_hlim = 0; -int ip6_use_deprecated = 1; /* allow deprecated addr [RFC 4862, 5.5.4] */ -int ip6_rr_prune = 5; /* router renumbering prefix - * walk list every 5 sec. */ -int ip6_mcast_pmtu = 0; /* enable pMTU discovery for multicast? */ -int ip6_v6only = 0; /* Mapped addresses off by default - Radar 3347718 -- REVISITING FOR 10.7 -- TESTING WITH MAPPED@ OFF */ - -int ip6_neighborgcthresh = 1024; /* Threshold # of NDP entries for GC */ -int ip6_maxifprefixes = 16; /* Max acceptable prefixes via RA per IF */ -int ip6_maxifdefrouters = 16; /* Max acceptable def routers via RA */ -int ip6_maxdynroutes = 1024; /* Max # of routes created via redirect */ -int ip6_only_allow_rfc4193_prefix = 0; /* Only allow RFC4193 style Unique Local IPv6 Unicast prefixes */ +int ip6_forwarding = IPV6FORWARDING; /* act as router? */ +int ip6_sendredirects = IPV6_SENDREDIRECTS; +int ip6_defhlim = IPV6_DEFHLIM; +int ip6_defmcasthlim = IPV6_DEFAULT_MULTICAST_HOPS; +int ip6_accept_rtadv = 1; /* deprecated */ +int ip6_log_interval = 5; +int ip6_hdrnestlimit = 15; /* How many header options will we process? */ +int ip6_dad_count = 1; /* DupAddrDetectionTransmits */ +int ip6_auto_flowlabel = 1; +int ip6_gif_hlim = 0; +int ip6_use_deprecated = 1; /* allow deprecated addr [RFC 4862, 5.5.4] */ +int ip6_rr_prune = 5; /* router renumbering prefix + * walk list every 5 sec. */ +int ip6_mcast_pmtu = 0; /* enable pMTU discovery for multicast? */ +int ip6_v6only = 0; /* Mapped addresses off by default - Radar 3347718 -- REVISITING FOR 10.7 -- TESTING WITH MAPPED@ OFF */ + +int ip6_neighborgcthresh = 1024; /* Threshold # of NDP entries for GC */ +int ip6_maxifprefixes = 16; /* Max acceptable prefixes via RA per IF */ +int ip6_maxifdefrouters = 16; /* Max acceptable def routers via RA */ +int ip6_maxdynroutes = 1024; /* Max # of routes created via redirect */ +int ip6_only_allow_rfc4193_prefix = 0; /* Only allow RFC4193 style Unique Local IPv6 Unicast prefixes */ static int ip6_keepfaith = 0; uint64_t ip6_log_time = 0; -int nd6_onlink_ns_rfc4861 = 0; /* allow 'on-link' nd6 NS (as in RFC 4861) */ +int nd6_onlink_ns_rfc4861 = 0; /* allow 'on-link' nd6 NS (as in RFC 4861) */ /* icmp6 */ /* @@ -464,49 +469,49 @@ int nd6_onlink_ns_rfc4861 = 0; /* allow 'on-link' nd6 NS (as in RFC 4861) */ * XXX: what if we don't define INET? Should we define pmtu6_expire * or so? (jinmei@kame.net 19990310) */ -int pmtu_expire = 60*10; -int pmtu_probe = 60*2; +int pmtu_expire = 60 * 10; +int pmtu_probe = 60 * 2; /* raw IP6 parameters */ /* * Nominal space allocated to a raw ip socket. */ -#define RIPV6SNDQ 8192 -#define RIPV6RCVQ 8192 +#define RIPV6SNDQ 8192 +#define RIPV6RCVQ 8192 -u_int32_t rip6_sendspace = RIPV6SNDQ; -u_int32_t rip6_recvspace = RIPV6RCVQ; +u_int32_t rip6_sendspace = RIPV6SNDQ; +u_int32_t rip6_recvspace = RIPV6RCVQ; /* ICMPV6 parameters */ -int icmp6_rediraccept = 1; /* accept and process redirects */ -int icmp6_redirtimeout = 10 * 60; /* 10 minutes */ -int icmp6errppslim = 500; /* 500 packets per second */ -int icmp6rappslim = 10; /* 10 packets per second */ -int icmp6_nodeinfo = 3; /* enable/disable NI response */ +int icmp6_rediraccept = 1; /* accept and process redirects */ +int icmp6_redirtimeout = 10 * 60; /* 10 minutes */ +int icmp6errppslim = 500; /* 500 packets per second */ +int icmp6rappslim = 10; /* 10 packets per second */ +int icmp6_nodeinfo = 3; /* enable/disable NI response */ /* UDP on IP6 parameters */ -int udp6_sendspace = 9216; /* really max datagram size */ -int udp6_recvspace = 40 * (1024 + sizeof(struct sockaddr_in6)); - /* 40 1K datagrams */ +int udp6_sendspace = 9216; /* really max datagram size */ +int udp6_recvspace = 40 * (1024 + sizeof(struct sockaddr_in6)); +/* 40 1K datagrams */ /* * sysctl related items. */ SYSCTL_NODE(_net, PF_INET6, inet6, - CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Internet6 Family"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Internet6 Family"); /* net.inet6 */ -SYSCTL_NODE(_net_inet6, IPPROTO_IPV6, ip6, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IP6"); -SYSCTL_NODE(_net_inet6, IPPROTO_ICMPV6, icmp6, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "ICMP6"); -SYSCTL_NODE(_net_inet6, IPPROTO_UDP, udp6, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "UDP6"); -SYSCTL_NODE(_net_inet6, IPPROTO_TCP, tcp6, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "TCP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_IPV6, ip6, + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_ICMPV6, icmp6, + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "ICMP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_UDP, udp6, + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "UDP6"); +SYSCTL_NODE(_net_inet6, IPPROTO_TCP, tcp6, + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "TCP6"); #if IPSEC -SYSCTL_NODE(_net_inet6, IPPROTO_ESP, ipsec6, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IPSEC6"); +SYSCTL_NODE(_net_inet6, IPPROTO_ESP, ipsec6, + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPSEC6"); #endif /* IPSEC */ /* net.inet6.ip6 */ @@ -518,17 +523,18 @@ sysctl_ip6_temppltime SYSCTL_HANDLER_ARGS int old; error = SYSCTL_OUT(req, arg1, sizeof(int)); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } old = ip6_temp_preferred_lifetime; error = SYSCTL_IN(req, arg1, sizeof(int)); if (ip6_temp_preferred_lifetime > ND6_MAX_LIFETIME || ip6_temp_preferred_lifetime < ip6_desync_factor + ip6_temp_regen_advance) { ip6_temp_preferred_lifetime = old; - return (EINVAL); + return EINVAL; } - return (error); + return error; } static int @@ -539,132 +545,134 @@ sysctl_ip6_tempvltime SYSCTL_HANDLER_ARGS int old; error = SYSCTL_OUT(req, arg1, sizeof(int)); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } old = ip6_temp_valid_lifetime; error = SYSCTL_IN(req, arg1, sizeof(int)); if (ip6_temp_valid_lifetime > ND6_MAX_LIFETIME || ip6_temp_valid_lifetime < ip6_temp_preferred_lifetime) { ip6_temp_valid_lifetime = old; - return (EINVAL); + return EINVAL; } - return (error); + return error; } static int ip6_getstat SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct ip6stat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct ip6stat); + } - return (SYSCTL_OUT(req, &ip6stat, MIN(sizeof (ip6stat), req->oldlen))); + return SYSCTL_OUT(req, &ip6stat, MIN(sizeof(ip6stat), req->oldlen)); } SYSCTL_INT(_net_inet6_ip6, IPV6CTL_FORWARDING, - forwarding, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_forwarding, 0, ""); + forwarding, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_forwarding, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_SENDREDIRECTS, - redirect, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_sendredirects, 0, ""); + redirect, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_sendredirects, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFHLIM, - hlim, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_defhlim, 0, ""); + hlim, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_defhlim, 0, ""); SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_STATS, stats, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, ip6_getstat, "S,ip6stat", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, ip6_getstat, "S,ip6stat", ""); #if (DEVELOPMENT || DEBUG) SYSCTL_INT(_net_inet6_ip6, IPV6CTL_ACCEPT_RTADV, - accept_rtadv, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip6_accept_rtadv, 0, ""); + accept_rtadv, CTLFLAG_RW | CTLFLAG_LOCKED, + &ip6_accept_rtadv, 0, ""); #else SYSCTL_INT(_net_inet6_ip6, IPV6CTL_ACCEPT_RTADV, - accept_rtadv, CTLFLAG_RD | CTLFLAG_LOCKED, - &ip6_accept_rtadv, 0, ""); + accept_rtadv, CTLFLAG_RD | CTLFLAG_LOCKED, + &ip6_accept_rtadv, 0, ""); #endif /* (DEVELOPMENT || DEBUG) */ SYSCTL_INT(_net_inet6_ip6, IPV6CTL_KEEPFAITH, - keepfaith, CTLFLAG_RD | CTLFLAG_LOCKED, &ip6_keepfaith, 0, ""); + keepfaith, CTLFLAG_RD | CTLFLAG_LOCKED, &ip6_keepfaith, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_LOG_INTERVAL, - log_interval, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_log_interval, 0, ""); + log_interval, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_log_interval, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_HDRNESTLIMIT, - hdrnestlimit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_hdrnestlimit, 0, ""); + hdrnestlimit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_hdrnestlimit, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DAD_COUNT, - dad_count, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_dad_count, 0, ""); + dad_count, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_dad_count, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_AUTO_FLOWLABEL, - auto_flowlabel, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_auto_flowlabel, 0, ""); + auto_flowlabel, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_auto_flowlabel, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_DEFMCASTHLIM, - defmcasthlim, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_defmcasthlim, 0, ""); + defmcasthlim, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_defmcasthlim, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_GIF_HLIM, - gifhlim, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_gif_hlim, 0, ""); + gifhlim, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_gif_hlim, 0, ""); SYSCTL_STRING(_net_inet6_ip6, IPV6CTL_KAME_VERSION, - kame_version, CTLFLAG_RD | CTLFLAG_LOCKED, (void *)((uintptr_t)(__KAME_VERSION)), 0, ""); + kame_version, CTLFLAG_RD | CTLFLAG_LOCKED, (void *)((uintptr_t)(__KAME_VERSION)), 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USE_DEPRECATED, - use_deprecated, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_use_deprecated, 0, ""); + use_deprecated, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_use_deprecated, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_RR_PRUNE, - rr_prune, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_rr_prune, 0, ""); + rr_prune, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_rr_prune, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USETEMPADDR, - use_tempaddr, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_use_tempaddr, 0, ""); + use_tempaddr, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_use_tempaddr, 0, ""); SYSCTL_OID(_net_inet6_ip6, IPV6CTL_TEMPPLTIME, temppltime, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_temp_preferred_lifetime, 0, - sysctl_ip6_temppltime, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_temp_preferred_lifetime, 0, + sysctl_ip6_temppltime, "I", ""); SYSCTL_OID(_net_inet6_ip6, IPV6CTL_TEMPVLTIME, tempvltime, - CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_temp_valid_lifetime, 0, - sysctl_ip6_tempvltime, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_temp_valid_lifetime, 0, + sysctl_ip6_tempvltime, "I", ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_V6ONLY, - v6only, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_v6only, 0, ""); + v6only, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_v6only, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_AUTO_LINKLOCAL, - auto_linklocal, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_auto_linklocal, 0, ""); + auto_linklocal, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_auto_linklocal, 0, ""); SYSCTL_STRUCT(_net_inet6_ip6, IPV6CTL_RIP6STATS, rip6stats, CTLFLAG_RD | CTLFLAG_LOCKED, - &rip6stat, rip6stat, ""); + &rip6stat, rip6stat, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_PREFER_TEMPADDR, - prefer_tempaddr, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_prefer_tempaddr, 0, ""); + prefer_tempaddr, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_prefer_tempaddr, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_USE_DEFAULTZONE, - use_defaultzone, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_use_defzone, 0,""); + use_defaultzone, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_use_defzone, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MCAST_PMTU, - mcast_pmtu, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_mcast_pmtu, 0, ""); + mcast_pmtu, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_mcast_pmtu, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_NEIGHBORGCTHRESH, - neighborgcthresh, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_neighborgcthresh, 0, ""); + neighborgcthresh, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_neighborgcthresh, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXIFPREFIXES, - maxifprefixes, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxifprefixes, 0, ""); + maxifprefixes, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxifprefixes, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXIFDEFROUTERS, - maxifdefrouters, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxifdefrouters, 0, ""); + maxifdefrouters, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxifdefrouters, 0, ""); SYSCTL_INT(_net_inet6_ip6, IPV6CTL_MAXDYNROUTES, - maxdynroutes, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxdynroutes, 0, ""); + maxdynroutes, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxdynroutes, 0, ""); SYSCTL_INT(_net_inet6_ip6, OID_AUTO, - only_allow_rfc4193_prefixes, CTLFLAG_RW | CTLFLAG_LOCKED, - &ip6_only_allow_rfc4193_prefix, 0, ""); + only_allow_rfc4193_prefixes, CTLFLAG_RW | CTLFLAG_LOCKED, + &ip6_only_allow_rfc4193_prefix, 0, ""); SYSCTL_INT(_net_inet6_ip6, OID_AUTO, - clat_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &clat_debug, 0, ""); + clat_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &clat_debug, 0, ""); /* net.inet6.icmp6 */ SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRACCEPT, - rediraccept, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_rediraccept, 0, ""); + rediraccept, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_rediraccept, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_REDIRTIMEOUT, - redirtimeout, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_redirtimeout, 0, ""); + redirtimeout, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_redirtimeout, 0, ""); SYSCTL_STRUCT(_net_inet6_icmp6, ICMPV6CTL_STATS, stats, CTLFLAG_RD | CTLFLAG_LOCKED, - &icmp6stat, icmp6stat, ""); + &icmp6stat, icmp6stat, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_PRUNE, - nd6_prune, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_prune, 0, ""); + nd6_prune, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_prune, 0, ""); SYSCTL_INT(_net_inet6_icmp6, OID_AUTO, - nd6_prune_lazy, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_prune_lazy, 0, ""); + nd6_prune_lazy, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_prune_lazy, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_DELAY, - nd6_delay, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_delay, 0, ""); + nd6_delay, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_delay, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_UMAXTRIES, - nd6_umaxtries, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_umaxtries, 0, ""); + nd6_umaxtries, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_umaxtries, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_MMAXTRIES, - nd6_mmaxtries, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_mmaxtries, 0, ""); + nd6_mmaxtries, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_mmaxtries, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_USELOOPBACK, - nd6_useloopback, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_useloopback, 0, ""); + nd6_useloopback, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_useloopback, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_ACCEPT_6TO4, - nd6_accept_6to4, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_accept_6to4, 0, ""); + nd6_accept_6to4, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_accept_6to4, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_NODEINFO, - nodeinfo, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_nodeinfo, 0, ""); + nodeinfo, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6_nodeinfo, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ERRPPSLIMIT, - errppslimit, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6errppslim, 0, ""); + errppslimit, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6errppslim, 0, ""); SYSCTL_INT(_net_inet6_icmp6, OID_AUTO, - rappslimit, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6rappslim, 0, ""); + rappslimit, CTLFLAG_RW | CTLFLAG_LOCKED, &icmp6rappslim, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_DEBUG, - nd6_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_debug, 0, ""); + nd6_debug, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_debug, 0, ""); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_ONLINKNSRFC4861, - nd6_onlink_ns_rfc4861, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_onlink_ns_rfc4861, 0, - "Accept 'on-link' nd6 NS in compliance with RFC 4861."); + nd6_onlink_ns_rfc4861, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_onlink_ns_rfc4861, 0, + "Accept 'on-link' nd6 NS in compliance with RFC 4861."); SYSCTL_INT(_net_inet6_icmp6, ICMPV6CTL_ND6_OPTIMISTIC_DAD, - nd6_optimistic_dad, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_optimistic_dad, 0, ""); + nd6_optimistic_dad, CTLFLAG_RW | CTLFLAG_LOCKED, &nd6_optimistic_dad, 0, ""); diff --git a/bsd/netinet6/in6_rmx.c b/bsd/netinet6/in6_rmx.c index 7647eaf0c..5874aa079 100644 --- a/bsd/netinet6/in6_rmx.c +++ b/bsd/netinet6/in6_rmx.c @@ -127,9 +127,9 @@ #include #include -extern int tvtohz(struct timeval *); +extern int tvtohz(struct timeval *); -static int in6_rtqtimo_run; /* in6_rtqtimo is scheduled to run */ +static int in6_rtqtimo_run; /* in6_rtqtimo is scheduled to run */ static void in6_rtqtimo(void *); static void in6_sched_rtqtimo(struct timeval *); @@ -166,8 +166,9 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (verbose) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + if (verbose) { + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); + } /* * If this is a dynamic route (which is created via Redirect) and @@ -178,17 +179,20 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, * suboptimal path even without the redirected route. */ if ((rt->rt_flags & RTF_DYNAMIC) && - ip6_maxdynroutes >= 0 && in6dynroutes >= ip6_maxdynroutes) - return (NULL); + ip6_maxdynroutes >= 0 && in6dynroutes >= ip6_maxdynroutes) { + return NULL; + } /* * For IPv6, all unicast non-host routes are automatically cloning. */ - if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) + if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { rt->rt_flags |= RTF_MULTICAST; + } - if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) + if (!(rt->rt_flags & (RTF_HOST | RTF_CLONING | RTF_MULTICAST))) { rt->rt_flags |= RTF_PRCLONING; + } /* * A little bit of help for both IPv6 output and input: @@ -214,8 +218,9 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, } if (!rt->rt_rmx.rmx_mtu && !(rt->rt_rmx.rmx_locks & RTV_MTU) && - rt->rt_ifp) + rt->rt_ifp) { rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; + } ret = rn_addroute(v_arg, n_arg, head, treenodes); if (ret == NULL && (rt->rt_flags & RTF_HOST)) { @@ -231,8 +236,9 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, char dbufc[MAX_IPv6_STR_LEN]; RT_LOCK(rt2); - if (verbose) - rt_str(rt2, dbufc, sizeof (dbufc), NULL, 0); + if (verbose) { + rt_str(rt2, dbufc, sizeof(dbufc), NULL, 0); + } if ((rt2->rt_flags & RTF_LLINFO) && (rt2->rt_flags & RTF_HOST) && @@ -286,7 +292,7 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, RTF_CLONING | RTF_PRCLONING, sin6_get_ifscope(rt_key(rt))); if (rt2 != NULL) { RT_LOCK(rt2); - if ((rt2->rt_flags & (RTF_CLONING|RTF_HOST| + if ((rt2->rt_flags & (RTF_CLONING | RTF_HOST | RTF_GATEWAY)) == RTF_CLONING && rt2->rt_gateway && rt2->rt_gateway->sa_family == AF_LINK && @@ -298,11 +304,13 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, } } - if (ret != NULL && (rt->rt_flags & RTF_DYNAMIC)) + if (ret != NULL && (rt->rt_flags & RTF_DYNAMIC)) { in6dynroutes++; + } - if (!verbose) + if (!verbose) { goto done; + } if (ret != NULL) { if (flags != rt->rt_flags) { @@ -324,7 +332,7 @@ in6_addroute(void *v_arg, void *n_arg, struct radix_node_head *head, rt->rt_flags, RTF_BITS); } done: - return (ret); + return ret; } static struct radix_node * @@ -339,12 +347,13 @@ in6_deleteroute(void *v_arg, void *netmask_arg, struct radix_node_head *head) struct rtentry *rt = (struct rtentry *)rn; RT_LOCK(rt); - if (rt->rt_flags & RTF_DYNAMIC) + if (rt->rt_flags & RTF_DYNAMIC) { in6dynroutes--; + } if (rt_verbose > 1) { char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN]; - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); log(LOG_DEBUG, "%s: route to %s->%s->%s deleted, " "flags=%b\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", @@ -352,7 +361,7 @@ in6_deleteroute(void *v_arg, void *netmask_arg, struct radix_node_head *head) } RT_UNLOCK(rt); } - return (rn); + return rn; } /* @@ -370,7 +379,7 @@ in6_validate(struct radix_node *rn) if (rt_verbose > 2) { char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN]; - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); log(LOG_DEBUG, "%s: route to %s->%s->%s validated, " "flags=%b\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", @@ -387,7 +396,7 @@ in6_validate(struct radix_node *rn) rt_setexpire(rt, 0); } } - return (rn); + return rn; } /* @@ -396,7 +405,7 @@ in6_validate(struct radix_node *rn) static struct radix_node * in6_matroute(void *v_arg, struct radix_node_head *head) { - return (in6_matroute_args(v_arg, head, NULL, NULL)); + return in6_matroute_args(v_arg, head, NULL, NULL); } /* @@ -415,25 +424,25 @@ in6_matroute_args(void *v_arg, struct radix_node_head *head, in6_validate(rn); RT_UNLOCK((struct rtentry *)rn); } - return (rn); + return rn; } SYSCTL_DECL(_net_inet6_ip6); /* one hour is ``really old'' */ -static uint32_t rtq_reallyold = 60*60; +static uint32_t rtq_reallyold = 60 * 60; SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_RTEXPIRE, rtexpire, - CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_reallyold, 0, ""); /* never automatically crank down to less */ static uint32_t rtq_minreallyold = 10; SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_RTMINEXPIRE, rtminexpire, - CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_minreallyold, 0, ""); /* 128 cached routes is ``too many'' */ static uint32_t rtq_toomany = 128; SYSCTL_UINT(_net_inet6_ip6, IPV6CTL_RTMAXCACHE, rtmaxcache, - CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &rtq_toomany, 0, ""); /* * On last reference drop, mark the route as belong to us so that it can be @@ -450,20 +459,24 @@ in6_clsroute(struct radix_node *rn, struct radix_node_head *head) LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); RT_LOCK_ASSERT_HELD(rt); - if (!(rt->rt_flags & RTF_UP)) + if (!(rt->rt_flags & RTF_UP)) { return; /* prophylactic measures */ - - if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) + } + if ((rt->rt_flags & (RTF_LLINFO | RTF_HOST)) != RTF_HOST) { return; + } - if (rt->rt_flags & RTPRF_OURS) + if (rt->rt_flags & RTPRF_OURS) { return; + } - if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) + if (!(rt->rt_flags & (RTF_WASCLONED | RTF_DYNAMIC))) { return; + } - if (verbose) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + if (verbose) { + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); + } /* * Delete the route immediately if RTF_DELCLONE is set or @@ -497,9 +510,10 @@ in6_clsroute(struct radix_node *rn, struct radix_node_head *head) RT_REMREF_LOCKED(rt); } else { RT_LOCK(rt); - if (!verbose) - rt_str(rt, dbuf, sizeof (dbuf), - gbuf, sizeof (gbuf)); + if (!verbose) { + rt_str(rt, dbuf, sizeof(dbuf), + gbuf, sizeof(gbuf)); + } log(LOG_ERR, "%s: error deleting route to " "%s->%s->%s, flags=%b, err=%d\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? @@ -515,9 +529,9 @@ in6_clsroute(struct radix_node *rn, struct radix_node_head *head) if (verbose) { log(LOG_DEBUG, "%s: route to %s->%s->%s invalidated, " - "flags=%b, expire=T+%u\n", __func__, dbuf, gbuf, - (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", - rt->rt_flags, RTF_BITS, rt->rt_expire - timenow); + "flags=%b, expire=T+%u\n", __func__, dbuf, gbuf, + (rt->rt_ifp != NULL) ? rt->rt_ifp->if_xname : "", + rt->rt_flags, RTF_BITS, rt->rt_expire - timenow); } /* We have at least one entry; arm the timer if not already */ @@ -557,8 +571,9 @@ in6_rtqkill(struct radix_node *rn, void *rock) if (rt->rt_flags & RTPRF_OURS) { char dbuf[MAX_IPv6_STR_LEN], gbuf[MAX_IPv6_STR_LEN]; - if (verbose) - rt_str(rt, dbuf, sizeof (dbuf), gbuf, sizeof (gbuf)); + if (verbose) { + rt_str(rt, dbuf, sizeof(dbuf), gbuf, sizeof(gbuf)); + } ap->found++; VERIFY(rt->rt_expire == 0 || rt->rt_rmx.rmx_expire != 0); @@ -580,7 +595,7 @@ in6_rtqkill(struct radix_node *rn, void *rock) rt->rt_ifp->if_xname : "", rt->rt_flags, RTF_BITS, ap->draining); } - RT_ADDREF_LOCKED(rt); /* for us to free below */ + RT_ADDREF_LOCKED(rt); /* for us to free below */ /* * Delete this route since we're done with it; * the route may be freed afterwards, so we @@ -595,9 +610,10 @@ in6_rtqkill(struct radix_node *rn, void *rock) rt->rt_gateway, rt_mask(rt), rt->rt_flags, NULL); if (err != 0) { RT_LOCK(rt); - if (!verbose) - rt_str(rt, dbuf, sizeof (dbuf), - gbuf, sizeof (gbuf)); + if (!verbose) { + rt_str(rt, dbuf, sizeof(dbuf), + gbuf, sizeof(gbuf)); + } log(LOG_ERR, "%s: error deleting route to " "%s->%s->%s, flags=%b, err=%d\n", __func__, dbuf, gbuf, (rt->rt_ifp != NULL) ? @@ -631,10 +647,10 @@ in6_rtqkill(struct radix_node *rn, void *rock) RT_UNLOCK(rt); } - return (0); + return 0; } -#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ +#define RTQ_TIMEOUT 60*10 /* run no less than once every ten minutes */ static int rtq_timeout = RTQ_TIMEOUT; static void @@ -659,7 +675,7 @@ in6_rtqtimo(void *targ) log(LOG_DEBUG, "%s: initial nextstop is T+%u seconds\n", __func__, rtq_timeout); } - bzero(&arg, sizeof (arg)); + bzero(&arg, sizeof(arg)); arg.rnh = rnh; arg.nextstop = timenow + rtq_timeout; rnh->rnh_walktree(rnh, in6_rtqkill, &arg); @@ -680,8 +696,9 @@ in6_rtqtimo(void *targ) ((timenow - last_adjusted_timeout) >= (uint64_t)rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2 * rtq_reallyold / 3; - if (rtq_reallyold < rtq_minreallyold) + if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; + } last_adjusted_timeout = timenow; if (verbose) { @@ -697,10 +714,11 @@ in6_rtqtimo(void *targ) atv.tv_sec = arg.nextstop - timenow; /* re-arm the timer only if there's work to do */ in6_rtqtimo_run = 0; - if (ours > 0) + if (ours > 0) { in6_sched_rtqtimo(&atv); - else if (verbose) + } else if (verbose) { log(LOG_DEBUG, "%s: not rescheduling timer\n", __func__); + } lck_mtx_unlock(rnh_lock); } @@ -733,13 +751,14 @@ in6_rtqdrain(void) struct radix_node_head *rnh; struct rtqk_arg arg; - if (rt_verbose > 1) + if (rt_verbose > 1) { log(LOG_DEBUG, "%s: draining routes\n", __func__); + } lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET6]; VERIFY(rnh != NULL); - bzero(&arg, sizeof (arg)); + bzero(&arg, sizeof(arg)); arg.rnh = rnh; arg.draining = 1; rnh->rnh_walktree(rnh, in6_rtqkill, &arg); @@ -757,8 +776,9 @@ in6_inithead(void **head, int off) /* If called from route_init(), make sure it is exactly once */ VERIFY(head != (void **)&rt_tables[AF_INET6] || *head == NULL); - if (!rn_inithead(head, off)) - return (0); + if (!rn_inithead(head, off)) { + return 0; + } /* * We can get here from nfs_subs.c as well, in which case this @@ -766,14 +786,14 @@ in6_inithead(void **head, int off) * this also takes care of the case when we're called more than * once from anywhere but route_init(). */ - if (head != (void **)&rt_tables[AF_INET6]) - return (1); /* only do this for the real routing table */ - + if (head != (void **)&rt_tables[AF_INET6]) { + return 1; /* only do this for the real routing table */ + } rnh = *head; rnh->rnh_addaddr = in6_addroute; rnh->rnh_deladdr = in6_deleteroute; rnh->rnh_matchaddr = in6_matroute; rnh->rnh_matchaddr_args = in6_matroute_args; rnh->rnh_close = in6_clsroute; - return (1); + return 1; } diff --git a/bsd/netinet6/in6_src.c b/bsd/netinet6/in6_src.c index 8af4dc7b1..85e2dc7fe 100644 --- a/bsd/netinet6/in6_src.c +++ b/bsd/netinet6/in6_src.c @@ -130,56 +130,56 @@ SYSCTL_DECL(_net_inet6_ip6); static int ip6_select_srcif_debug = 0; SYSCTL_INT(_net_inet6_ip6, OID_AUTO, select_srcif_debug, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_srcif_debug, 0, - "log source interface selection debug info"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_srcif_debug, 0, + "log source interface selection debug info"); static int ip6_select_srcaddr_debug = 0; SYSCTL_INT(_net_inet6_ip6, OID_AUTO, select_srcaddr_debug, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_srcaddr_debug, 0, - "log source address selection debug info"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_srcaddr_debug, 0, + "log source address selection debug info"); static int ip6_select_src_expensive_secondary_if = 0; SYSCTL_INT(_net_inet6_ip6, OID_AUTO, select_src_expensive_secondary_if, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_src_expensive_secondary_if, 0, - "allow source interface selection to use expensive secondaries"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_src_expensive_secondary_if, 0, + "allow source interface selection to use expensive secondaries"); static int ip6_select_src_strong_end = 1; SYSCTL_INT(_net_inet6_ip6, OID_AUTO, select_src_strong_end, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_src_strong_end, 0, - "limit source address selection to outgoing interface"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_select_src_strong_end, 0, + "limit source address selection to outgoing interface"); -#define ADDR_LABEL_NOTAPP (-1) +#define ADDR_LABEL_NOTAPP (-1) struct in6_addrpolicy defaultaddrpolicy; int ip6_prefer_tempaddr = 1; #ifdef ENABLE_ADDRSEL extern lck_mtx_t *addrsel_mutex; -#define ADDRSEL_LOCK() lck_mtx_lock(addrsel_mutex) -#define ADDRSEL_UNLOCK() lck_mtx_unlock(addrsel_mutex) +#define ADDRSEL_LOCK() lck_mtx_lock(addrsel_mutex) +#define ADDRSEL_UNLOCK() lck_mtx_unlock(addrsel_mutex) #else -#define ADDRSEL_LOCK() -#define ADDRSEL_UNLOCK() +#define ADDRSEL_LOCK() +#define ADDRSEL_UNLOCK() #endif static int selectroute(struct sockaddr_in6 *, struct sockaddr_in6 *, - struct ip6_pktopts *, struct ip6_moptions *, struct in6_ifaddr **, - struct route_in6 *, struct ifnet **, struct rtentry **, int, int, - struct ip6_out_args *ip6oa); + struct ip6_pktopts *, struct ip6_moptions *, struct in6_ifaddr **, + struct route_in6 *, struct ifnet **, struct rtentry **, int, int, + struct ip6_out_args *ip6oa); static int in6_selectif(struct sockaddr_in6 *, struct ip6_pktopts *, - struct ip6_moptions *, struct route_in6 *ro, - struct ip6_out_args *, struct ifnet **); + struct ip6_moptions *, struct route_in6 *ro, + struct ip6_out_args *, struct ifnet **); static void init_policy_queue(void); static int add_addrsel_policyent(const struct in6_addrpolicy *); #ifdef ENABLE_ADDRSEL static int delete_addrsel_policyent(const struct in6_addrpolicy *); #endif static int walk_addrsel_policy(int (*)(const struct in6_addrpolicy *, void *), - void *); + void *); static int dump_addrsel_policyent(const struct in6_addrpolicy *, void *); static struct in6_addrpolicy *match_addrsel_policy(struct sockaddr_in6 *); void addrsel_policy_init(void); -#define SASEL_DO_DBG(inp) \ +#define SASEL_DO_DBG(inp) \ (ip6_select_srcaddr_debug && (inp) != NULL && \ (inp)->inp_socket != NULL && \ ((inp)->inp_socket->so_options & SO_DEBUG)) @@ -187,8 +187,8 @@ void addrsel_policy_init(void); #define SASEL_LOG(fmt, ...) \ do { \ if (srcsel_debug) \ - printf("%s:%d " fmt "\n",\ - __FUNCTION__, __LINE__, ##__VA_ARGS__); \ + printf("%s:%d " fmt "\n",\ + __FUNCTION__, __LINE__, ##__VA_ARGS__); \ } while (0); \ /* @@ -197,40 +197,42 @@ do { \ * If necessary, this function lookups the routing table and returns * an entry to the caller for later use. */ -#define REPLACE(r) do {\ +#define REPLACE(r) do {\ SASEL_LOG("REPLACE r %d ia %s ifp1 %s\n", \ (r), s_src, ifp1->if_xname); \ srcrule = (r); \ goto replace; \ } while (0) -#define NEXTSRC(r) do {\ +#define NEXTSRC(r) do {\ SASEL_LOG("NEXTSRC r %d ia %s ifp1 %s\n", \ (r), s_src, ifp1->if_xname); \ - goto next; /* XXX: we can't use 'continue' here */ \ + goto next; /* XXX: we can't use 'continue' here */ \ } while (0) -#define BREAK(r) do { \ +#define BREAK(r) do { \ SASEL_LOG("BREAK r %d ia %s ifp1 %s\n", \ (r), s_src, ifp1->if_xname); \ srcrule = (r); \ - goto out; /* XXX: we can't use 'break' here */ \ + goto out; /* XXX: we can't use 'break' here */ \ } while (0) struct ifaddr * -in6_selectsrc_core_ifa(struct sockaddr_in6 *addr, struct ifnet *ifp, int srcsel_debug) { +in6_selectsrc_core_ifa(struct sockaddr_in6 *addr, struct ifnet *ifp, int srcsel_debug) +{ int err = 0; struct ifnet *src_ifp = NULL; struct in6_addr src_storage = {}; struct in6_addr *in6 = NULL; struct ifaddr *ifa = NULL; - if((in6 = in6_selectsrc_core(addr, + if ((in6 = in6_selectsrc_core(addr, (ip6_prefer_tempaddr ? IPV6_SRCSEL_HINT_PREFER_TMPADDR : 0), ifp, 0, &src_storage, &src_ifp, &err, &ifa)) == NULL) { - if (err == 0) + if (err == 0) { err = EADDRNOTAVAIL; + } VERIFY(src_ifp == NULL); if (ifa != NULL) { IFA_REMREF(ifa); @@ -240,8 +242,9 @@ in6_selectsrc_core_ifa(struct sockaddr_in6 *addr, struct ifnet *ifp, int srcsel_ } if (src_ifp != ifp) { - if (err == 0) + if (err == 0) { err = ENETUNREACH; + } if (ifa != NULL) { IFA_REMREF(ifa); ifa = NULL; @@ -264,9 +267,10 @@ in6_selectsrc_core_ifa(struct sockaddr_in6 *addr, struct ifnet *ifp, int srcsel_ done: SASEL_LOG("Returned with error: %d", err); - if (src_ifp != NULL) + if (src_ifp != NULL) { ifnet_release(src_ifp); - return (ifa); + } + return ifa; } struct in6_addr * @@ -288,19 +292,21 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, VERIFY(src_storage != NULL); VERIFY(ifp != NULL); - if (sifp != NULL) + if (sifp != NULL) { *sifp = NULL; + } - if (ifapp != NULL) + if (ifapp != NULL) { *ifapp = NULL; + } dst = dstsock->sin6_addr; /* make a copy for local operation */ if (srcsel_debug) { - (void) inet_ntop(AF_INET6, &dst, s_dst, sizeof (s_src)); + (void) inet_ntop(AF_INET6, &dst, s_dst, sizeof(s_src)); tmp = &in6addr_any; - (void) inet_ntop(AF_INET6, tmp, s_src, sizeof (s_src)); + (void) inet_ntop(AF_INET6, tmp, s_src, sizeof(s_src)); printf("%s out src %s dst %s ifp %s\n", __func__, s_src, s_dst, ifp->if_xname); } @@ -320,9 +326,10 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, struct ifnet *ifp1 = ia->ia_ifp; int srcrule; - if (srcsel_debug) + if (srcsel_debug) { (void) inet_ntop(AF_INET6, &ia->ia_addr.sin6_addr, - s_src, sizeof (s_src)); + s_src, sizeof(s_src)); + } IFA_LOCK(&ia->ia_ifa); @@ -386,23 +393,27 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, goto next; } /* Rule 1: Prefer same address */ - if (IN6_ARE_ADDR_EQUAL(&dst, &ia->ia_addr.sin6_addr)) + if (IN6_ARE_ADDR_EQUAL(&dst, &ia->ia_addr.sin6_addr)) { BREAK(IP6S_SRCRULE_1); /* there should be no better candidate */ - - if (ia_best == NULL) + } + if (ia_best == NULL) { REPLACE(IP6S_SRCRULE_0); + } /* Rule 2: Prefer appropriate scope */ - if (dst_scope < 0) + if (dst_scope < 0) { dst_scope = in6_addrscope(&dst); + } new_scope = in6_addrscope(&ia->ia_addr.sin6_addr); if (IN6_ARE_SCOPE_CMP(best_scope, new_scope) < 0) { - if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0) + if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0) { REPLACE(IP6S_SRCRULE_2); + } NEXTSRC(IP6S_SRCRULE_2); } else if (IN6_ARE_SCOPE_CMP(new_scope, best_scope) < 0) { - if (IN6_ARE_SCOPE_CMP(new_scope, dst_scope) < 0) + if (IN6_ARE_SCOPE_CMP(new_scope, dst_scope) < 0) { NEXTSRC(IP6S_SRCRULE_2); + } REPLACE(IP6S_SRCRULE_2); } @@ -411,22 +422,26 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, * !ip6_use_deprecated is already rejected above. */ if (!IFA6_IS_DEPRECATED(ia_best, secs) && - IFA6_IS_DEPRECATED(ia, secs)) + IFA6_IS_DEPRECATED(ia, secs)) { NEXTSRC(IP6S_SRCRULE_3); + } if (IFA6_IS_DEPRECATED(ia_best, secs) && - !IFA6_IS_DEPRECATED(ia, secs)) + !IFA6_IS_DEPRECATED(ia, secs)) { REPLACE(IP6S_SRCRULE_3); + } /* * RFC 4429 says that optimistic addresses are equivalent to * deprecated addresses, so avoid them here. */ if ((ia_best->ia6_flags & IN6_IFF_OPTIMISTIC) == 0 && - (ia->ia6_flags & IN6_IFF_OPTIMISTIC) != 0) + (ia->ia6_flags & IN6_IFF_OPTIMISTIC) != 0) { NEXTSRC(IP6S_SRCRULE_3); + } if ((ia_best->ia6_flags & IN6_IFF_OPTIMISTIC) != 0 && - (ia->ia6_flags & IN6_IFF_OPTIMISTIC) == 0) + (ia->ia6_flags & IN6_IFF_OPTIMISTIC) == 0) { REPLACE(IP6S_SRCRULE_3); + } /* Rule 4: Prefer home addresses */ /* @@ -442,26 +457,31 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, * making the following check redundant. */ if (ip6_select_src_strong_end == 0) { - if (ia_best->ia_ifp == ifp && ia->ia_ifp != ifp) + if (ia_best->ia_ifp == ifp && ia->ia_ifp != ifp) { NEXTSRC(IP6S_SRCRULE_5); - if (ia_best->ia_ifp != ifp && ia->ia_ifp == ifp) + } + if (ia_best->ia_ifp != ifp && ia->ia_ifp == ifp) { REPLACE(IP6S_SRCRULE_5); + } } /* * Rule 6: Prefer matching label * Note that best_policy should be non-NULL here. */ - if (dst_policy == NULL) + if (dst_policy == NULL) { dst_policy = in6_addrsel_lookup_policy(dstsock); + } if (dst_policy->label != ADDR_LABEL_NOTAPP) { new_policy = in6_addrsel_lookup_policy(&ia->ia_addr); if (dst_policy->label == best_policy->label && - dst_policy->label != new_policy->label) + dst_policy->label != new_policy->label) { NEXTSRC(IP6S_SRCRULE_6); + } if (dst_policy->label != best_policy->label && - dst_policy->label == new_policy->label) + dst_policy->label == new_policy->label) { REPLACE(IP6S_SRCRULE_6); + } } /* @@ -472,17 +492,19 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, */ if (!(ia_best->ia6_flags & IN6_IFF_TEMPORARY) && (ia->ia6_flags & IN6_IFF_TEMPORARY)) { - if (hint_mask & IPV6_SRCSEL_HINT_PREFER_TMPADDR) + if (hint_mask & IPV6_SRCSEL_HINT_PREFER_TMPADDR) { REPLACE(IP6S_SRCRULE_7); - else + } else { NEXTSRC(IP6S_SRCRULE_7); + } } if ((ia_best->ia6_flags & IN6_IFF_TEMPORARY) && !(ia->ia6_flags & IN6_IFF_TEMPORARY)) { - if (hint_mask & IPV6_SRCSEL_HINT_PREFER_TMPADDR) + if (hint_mask & IPV6_SRCSEL_HINT_PREFER_TMPADDR) { NEXTSRC(IP6S_SRCRULE_7); - else + } else { REPLACE(IP6S_SRCRULE_7); + } } /* @@ -490,20 +512,24 @@ in6_selectsrc_core(struct sockaddr_in6 *dstsock, uint32_t hint_mask, * This is a KAME specific rule. */ if ((ia_best->ia_ifp->if_flags & IFF_UP) && - !(ia->ia_ifp->if_flags & IFF_UP)) + !(ia->ia_ifp->if_flags & IFF_UP)) { NEXTSRC(IP6S_SRCRULE_7x); + } if (!(ia_best->ia_ifp->if_flags & IFF_UP) && - (ia->ia_ifp->if_flags & IFF_UP)) + (ia->ia_ifp->if_flags & IFF_UP)) { REPLACE(IP6S_SRCRULE_7x); + } /* * Rule 8: Use longest matching prefix. */ new_matchlen = in6_matchlen(&ia->ia_addr.sin6_addr, &dst); - if (best_matchlen < new_matchlen) + if (best_matchlen < new_matchlen) { REPLACE(IP6S_SRCRULE_8); - if (new_matchlen < best_matchlen) + } + if (new_matchlen < best_matchlen) { NEXTSRC(IP6S_SRCRULE_8); + } /* * Last resort: just keep the current candidate. @@ -544,8 +570,9 @@ replace: s_src, ifp1->if_xname, best_scope, new_scope, dst_scope); IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for ia_best */ IFA_UNLOCK(&ia->ia_ifa); - if (ia_best != NULL) + if (ia_best != NULL) { IFA_REMREF(&ia_best->ia_ifa); + } ia_best = ia; continue; @@ -556,8 +583,9 @@ next: out: IFA_ADDREF_LOCKED(&ia->ia_ifa); /* for ia_best */ IFA_UNLOCK(&ia->ia_ifa); - if (ia_best != NULL) + if (ia_best != NULL) { IFA_REMREF(&ia_best->ia_ifa); + } ia_best = ia; break; } @@ -565,8 +593,9 @@ out: lck_rw_done(&in6_ifaddr_rwlock); if ((ia = ia_best) == NULL) { - if (*errorp == 0) + if (*errorp == 0) { *errorp = EADDRNOTAVAIL; + } src_storage = NULL; goto done; } @@ -577,28 +606,30 @@ out: } IFA_LOCK_SPIN(&ia->ia_ifa); - if (bestrule < IP6S_SRCRULE_COUNT) + if (bestrule < IP6S_SRCRULE_COUNT) { ip6stat.ip6s_sources_rule[bestrule]++; + } *src_storage = satosin6(&ia->ia_addr)->sin6_addr; IFA_UNLOCK(&ia->ia_ifa); - if (ifapp != NULL) + if (ifapp != NULL) { *ifapp = &ia->ia_ifa; - else + } else { IFA_REMREF(&ia->ia_ifa); + } done: if (srcsel_debug) { - (void) inet_ntop(AF_INET6, &dst, s_dst, sizeof (s_src)); + (void) inet_ntop(AF_INET6, &dst, s_dst, sizeof(s_src)); tmp = (src_storage != NULL) ? src_storage : &in6addr_any; - (void) inet_ntop(AF_INET6, tmp, s_src, sizeof (s_src)); + (void) inet_ntop(AF_INET6, tmp, s_src, sizeof(s_src)); printf("%s out src %s dst %s dst_scope %d best_scope %d\n", __func__, s_src, s_dst, dst_scope, best_scope); } - return (src_storage); + return src_storage; } /* @@ -628,20 +659,25 @@ in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, ip6oa.ip6oa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; *errorp = 0; - if (ifpp != NULL) + if (ifpp != NULL) { *ifpp = NULL; + } if (inp != NULL) { inp_debug = SASEL_DO_DBG(inp); mopts = inp->in6p_moptions; - if (INP_NO_CELLULAR(inp)) + if (INP_NO_CELLULAR(inp)) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - if (INP_NO_EXPENSIVE(inp)) + } + if (INP_NO_EXPENSIVE(inp)) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; - if (INP_AWDL_UNRESTRICTED(inp)) + } + if (INP_AWDL_UNRESTRICTED(inp)) { ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; - if (INP_INTCOPROC_ALLOWED(inp)) + } + if (INP_INTCOPROC_ALLOWED(inp)) { ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED; + } } else { mopts = NULL; /* Allow the kernel to retransmit packets. */ @@ -649,8 +685,9 @@ in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, IP6OAF_AWDL_UNRESTRICTED; } - if (ip6oa.ip6oa_boundif != IFSCOPE_NONE) + if (ip6oa.ip6oa_boundif != IFSCOPE_NONE) { ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; + } /* * If the source address is explicitly specified by the caller, @@ -677,9 +714,9 @@ in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, * the interface must be specified; otherwise, ifa_ifwithaddr() * will fail matching the address. */ - bzero(&srcsock, sizeof (srcsock)); + bzero(&srcsock, sizeof(srcsock)); srcsock.sin6_family = AF_INET6; - srcsock.sin6_len = sizeof (srcsock); + srcsock.sin6_len = sizeof(srcsock); srcsock.sin6_addr = pi->ipi6_addr; if (ifp != NULL) { *errorp = in6_setscope(&srcsock.sin6_addr, ifp, NULL); @@ -737,11 +774,13 @@ in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, prefer_tempaddr = ip6_prefer_tempaddr; } else if (opts->ip6po_prefer_tempaddr == IP6PO_TEMPADDR_NOTPREFER) { prefer_tempaddr = 0; - } else + } else { prefer_tempaddr = 1; + } - if (prefer_tempaddr) + if (prefer_tempaddr) { hint_mask |= IPV6_SRCSEL_HINT_PREFER_TMPADDR; + } if (in6_selectsrc_core(dstsock, hint_mask, ifp, inp_debug, src_storage, &sifp, errorp, NULL) == NULL) { @@ -767,7 +806,7 @@ done: } else if (ifp != NULL) { ifnet_release(ifp); } - return (src_storage); + return src_storage; } /* @@ -805,17 +844,19 @@ selectroute(struct sockaddr_in6 *srcsock, struct sockaddr_in6 *dstsock, unsigned int ifscope = ((ip6oa != NULL) ? ip6oa->ip6oa_boundif : IFSCOPE_NONE); - if (retifp != NULL) + if (retifp != NULL) { *retifp = NULL; + } - if (retrt != NULL) + if (retrt != NULL) { *retrt = NULL; + } if (ip6_select_srcif_debug) { struct in6_addr src; src = (srcsock != NULL) ? srcsock->sin6_addr : in6addr_any; - (void) inet_ntop(AF_INET6, &src, s_src, sizeof (s_src)); - (void) inet_ntop(AF_INET6, dst, s_dst, sizeof (s_dst)); + (void) inet_ntop(AF_INET6, &src, s_src, sizeof(s_src)); + (void) inet_ntop(AF_INET6, dst, s_dst, sizeof(s_dst)); } /* @@ -857,8 +898,9 @@ selectroute(struct sockaddr_in6 *srcsock, struct sockaddr_in6 *dstsock, * the route and we have no interface to use, * it's an error. */ - if (ifp == NULL) + if (ifp == NULL) { error = EHOSTUNREACH; + } goto done; } else { goto getsrcif; @@ -883,8 +925,9 @@ getsrcif: * If the outgoing interface was not set via IPV6_BOUND_IF or * IPV6_PKTINFO, use the scope ID in the destination address. */ - if (ifscope == IFSCOPE_NONE) + if (ifscope == IFSCOPE_NONE) { ifscope = dstsock->sin6_scope_id; + } /* * Perform source interface selection; the source IPv6 address @@ -898,10 +941,11 @@ getsrcif: goto getroute; } else if (!ROUTE_UNUSABLE(ro) && ro->ro_srcia != NULL && (ro->ro_flags & ROF_SRCIF_SELECTED)) { - if (ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) + if (ro->ro_rt->rt_ifp->if_flags & IFF_LOOPBACK) { local_dst = TRUE; + } ifa = ro->ro_srcia; - IFA_ADDREF(ifa); /* for caller */ + IFA_ADDREF(ifa); /* for caller */ goto getroute; } @@ -930,8 +974,9 @@ getsrcif: if (scope == IFSCOPE_NONE) { scope = rt_ifp->if_index; if (scope != get_primary_ifscope(AF_INET6) && - ROUTE_UNUSABLE(ro)) + ROUTE_UNUSABLE(ro)) { scope = get_primary_ifscope(AF_INET6); + } } ifa = (struct ifaddr *) @@ -1003,8 +1048,9 @@ getsrcif: } getroute: - if (ifa != NULL && !proxied_ifa && !local_dst) + if (ifa != NULL && !proxied_ifa && !local_dst) { ifscope = ifa->ifa_ifp->if_index; + } /* * If the next hop address for the packet is specified by the caller, @@ -1026,30 +1072,34 @@ getroute: * by that address must be a neighbor of the sending host. */ ron = &opts->ip6po_nextroute; - if (ron->ro_rt != NULL) + if (ron->ro_rt != NULL) { RT_LOCK(ron->ro_rt); + } if (ROUTE_UNUSABLE(ron) || (ron->ro_rt != NULL && (!(ron->ro_rt->rt_flags & RTF_LLINFO) || (select_srcif && (ifa == NULL || (ifa->ifa_ifp != ron->ro_rt->rt_ifp && !proxied_ifa))))) || !IN6_ARE_ADDR_EQUAL(&satosin6(&ron->ro_dst)->sin6_addr, &sin6_next->sin6_addr)) { - if (ron->ro_rt != NULL) + if (ron->ro_rt != NULL) { RT_UNLOCK(ron->ro_rt); + } ROUTE_RELEASE(ron); *satosin6(&ron->ro_dst) = *sin6_next; } if (ron->ro_rt == NULL) { rtalloc_scoped((struct route *)ron, ifscope); - if (ron->ro_rt != NULL) + if (ron->ro_rt != NULL) { RT_LOCK(ron->ro_rt); + } if (ROUTE_UNUSABLE(ron) || !(ron->ro_rt->rt_flags & RTF_LLINFO) || !IN6_ARE_ADDR_EQUAL(&satosin6(rt_key(ron->ro_rt))-> sin6_addr, &sin6_next->sin6_addr)) { - if (ron->ro_rt != NULL) + if (ron->ro_rt != NULL) { RT_UNLOCK(ron->ro_rt); + } ROUTE_RELEASE(ron); error = EHOSTUNREACH; @@ -1080,39 +1130,44 @@ getroute: * a new one. Note that we should check the address family of the * cached destination, in case of sharing the cache with IPv4. */ - if (ro == NULL) + if (ro == NULL) { goto done; - if (ro->ro_rt != NULL) + } + if (ro->ro_rt != NULL) { RT_LOCK_SPIN(ro->ro_rt); + } if (ROUTE_UNUSABLE(ro) || (ro->ro_rt != NULL && (satosin6(&ro->ro_dst)->sin6_family != AF_INET6 || !IN6_ARE_ADDR_EQUAL(&satosin6(&ro->ro_dst)->sin6_addr, dst) || (select_srcif && (ifa == NULL || (ifa->ifa_ifp != ro->ro_rt->rt_ifp && !proxied_ifa)))))) { - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } ROUTE_RELEASE(ro); } if (ro->ro_rt == NULL) { struct sockaddr_in6 *sa6; - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } /* No route yet, so try to acquire one */ - bzero(&ro->ro_dst, sizeof (struct sockaddr_in6)); + bzero(&ro->ro_dst, sizeof(struct sockaddr_in6)); sa6 = (struct sockaddr_in6 *)&ro->ro_dst; sa6->sin6_family = AF_INET6; - sa6->sin6_len = sizeof (struct sockaddr_in6); + sa6->sin6_len = sizeof(struct sockaddr_in6); sa6->sin6_addr = *dst; if (IN6_IS_ADDR_MULTICAST(dst)) { ro->ro_rt = rtalloc1_scoped( - &((struct route *)ro)->ro_dst, 0, 0, ifscope); + &((struct route *)ro)->ro_dst, 0, 0, ifscope); } else { rtalloc_scoped((struct route *)ro, ifscope); } - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_LOCK_SPIN(ro->ro_rt); + } } /* @@ -1120,8 +1175,9 @@ getroute: * explicitly specified (in case we're asked to clone.) */ if (opts != NULL && opts->ip6po_nexthop != NULL) { - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } goto done; } @@ -1138,8 +1194,9 @@ validateroute: boolean_t has_route = (route != NULL && route->ro_rt != NULL); boolean_t srcif_selected = FALSE; - if (has_route) + if (has_route) { RT_LOCK_ASSERT_HELD(route->ro_rt); + } /* * If there is a non-loopback route with the wrong interface, * or if there is no interface configured with such an address, @@ -1156,8 +1213,8 @@ validateroute: * address of the real interface. */ if (ifa != NULL && proxied_ifa && - (route->ro_rt->rt_flags & (RTF_UP|RTF_PROXY)) == - (RTF_UP|RTF_PROXY)) { + (route->ro_rt->rt_flags & (RTF_UP | RTF_PROXY)) == + (RTF_UP | RTF_PROXY)) { srcif_selected = TRUE; } else { if (ip6_select_srcif_debug) { @@ -1180,7 +1237,7 @@ validateroute: error = EHOSTUNREACH; /* Undo the settings done above */ route = NULL; - ifp = NULL; /* ditch ifp; keep ifp0 */ + ifp = NULL; /* ditch ifp; keep ifp0 */ has_route = FALSE; } } else if (has_route) { @@ -1192,10 +1249,12 @@ validateroute: if (ifa != route->ro_srcia || !(route->ro_flags & ROF_SRCIF_SELECTED)) { RT_CONVERT_LOCK(route->ro_rt); - if (ifa != NULL) + if (ifa != NULL) { IFA_ADDREF(ifa); /* for route_in6 */ - if (route->ro_srcia != NULL) + } + if (route->ro_srcia != NULL) { IFA_REMREF(route->ro_srcia); + } route->ro_srcia = ifa; route->ro_flags |= ROF_SRCIF_SELECTED; RT_GENID_SYNC(route->ro_rt); @@ -1203,8 +1262,9 @@ validateroute: RT_UNLOCK(route->ro_rt); } } else { - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_UNLOCK(ro->ro_rt); + } if (ifp != NULL && opts != NULL && opts->ip6po_pktinfo != NULL && opts->ip6po_pktinfo->ipi6_ifindex != 0) { @@ -1227,14 +1287,14 @@ done: /* * Check for interface restrictions. */ -#define CHECK_RESTRICTIONS(_ip6oa, _ifp) \ - ((((_ip6oa)->ip6oa_flags & IP6OAF_NO_CELLULAR) && \ - IFNET_IS_CELLULAR(_ifp)) || \ - (((_ip6oa)->ip6oa_flags & IP6OAF_NO_EXPENSIVE) && \ - IFNET_IS_EXPENSIVE(_ifp)) || \ - (!((_ip6oa)->ip6oa_flags & IP6OAF_INTCOPROC_ALLOWED) && \ - IFNET_IS_INTCOPROC(_ifp)) || \ - (!((_ip6oa)->ip6oa_flags & IP6OAF_AWDL_UNRESTRICTED) && \ +#define CHECK_RESTRICTIONS(_ip6oa, _ifp) \ + ((((_ip6oa)->ip6oa_flags & IP6OAF_NO_CELLULAR) && \ + IFNET_IS_CELLULAR(_ifp)) || \ + (((_ip6oa)->ip6oa_flags & IP6OAF_NO_EXPENSIVE) && \ + IFNET_IS_EXPENSIVE(_ifp)) || \ + (!((_ip6oa)->ip6oa_flags & IP6OAF_INTCOPROC_ALLOWED) && \ + IFNET_IS_INTCOPROC(_ifp)) || \ + (!((_ip6oa)->ip6oa_flags & IP6OAF_AWDL_UNRESTRICTED) && \ IFNET_IS_AWDL_RESTRICTED(_ifp))) if (error == 0 && ip6oa != NULL && @@ -1245,7 +1305,7 @@ done: ROUTE_RELEASE(route); route = NULL; } - ifp = NULL; /* ditch ifp; keep ifp0 */ + ifp = NULL; /* ditch ifp; keep ifp0 */ error = EHOSTUNREACH; ip6oa->ip6oa_retflags |= IP6OARF_IFDENIED; } @@ -1266,8 +1326,9 @@ done: */ error = EHOSTUNREACH; } - if (error == EHOSTUNREACH || error == ENETDOWN) + if (error == EHOSTUNREACH || error == ENETDOWN) { ip6stat.ip6s_noroute++; + } /* * We'll return ifp regardless of error, so pick it up from ifp0 @@ -1276,20 +1337,23 @@ done: */ ifp = ifp0; if (retifp != NULL) { - if (ifp != NULL) - ifnet_reference(ifp); /* for caller */ + if (ifp != NULL) { + ifnet_reference(ifp); /* for caller */ + } *retifp = ifp; } if (retsrcia != NULL) { - if (ifa != NULL) - IFA_ADDREF(ifa); /* for caller */ + if (ifa != NULL) { + IFA_ADDREF(ifa); /* for caller */ + } *retsrcia = (struct in6_ifaddr *)ifa; } if (error == 0) { - if (retrt != NULL && route != NULL) - *retrt = route->ro_rt; /* ro_rt may be NULL */ + if (retrt != NULL && route != NULL) { + *retrt = route->ro_rt; /* ro_rt may be NULL */ + } } if (ip6_select_srcif_debug) { printf("%s %s->%s ifscope %d ifa_if %s ro_if %s (error=%d)\n", @@ -1299,10 +1363,11 @@ done: (ifp != NULL) ? if_name(ifp) : "NONE", error); } - if (ifa != NULL) + if (ifa != NULL) { IFA_REMREF(ifa); + } - return (error); + return error; } /* @@ -1320,13 +1385,14 @@ in6_selectif(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, struct rtentry *rt = NULL; if (ro == NULL) { - bzero(&sro, sizeof (sro)); + bzero(&sro, sizeof(sro)); ro = &sro; } if ((err = selectroute(NULL, dstsock, opts, mopts, NULL, ro, retifp, - &rt, 0, 1, ip6oa)) != 0) + &rt, 0, 1, ip6oa)) != 0) { goto done; + } /* * do not use a rejected or black hole route. @@ -1360,8 +1426,9 @@ in6_selectif(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts, if (rt != NULL && rt->rt_ifa != NULL && rt->rt_ifa->ifa_ifp != NULL && retifp != NULL) { ifnet_reference(rt->rt_ifa->ifa_ifp); - if (*retifp != NULL) + if (*retifp != NULL) { ifnet_release(*retifp); + } *retifp = rt->rt_ifa->ifa_ifp; } @@ -1375,7 +1442,7 @@ done: * retifp might point to a valid ifp with a reference held; * caller is responsible for releasing it if non-NULL. */ - return (err); + return err; } /* @@ -1391,9 +1458,8 @@ in6_selectroute(struct sockaddr_in6 *srcsock, struct sockaddr_in6 *dstsock, struct in6_ifaddr **retsrcia, struct route_in6 *ro, struct ifnet **retifp, struct rtentry **retrt, int clone, struct ip6_out_args *ip6oa) { - - return (selectroute(srcsock, dstsock, opts, mopts, retsrcia, ro, retifp, - retrt, clone, 0, ip6oa)); + return selectroute(srcsock, dstsock, opts, mopts, retsrcia, ro, retifp, + retrt, clone, 0, ip6oa); } /* @@ -1407,7 +1473,7 @@ int in6_selecthlim(struct in6pcb *in6p, struct ifnet *ifp) { if (in6p && in6p->in6p_hops >= 0) { - return (in6p->in6p_hops); + return in6p->in6p_hops; } else if (NULL != ifp) { u_int8_t chlim; struct nd_ifinfo *ndi = ND_IFINFO(ifp); @@ -1417,10 +1483,10 @@ in6_selecthlim(struct in6pcb *in6p, struct ifnet *ifp) } else { chlim = ip6_defhlim; } - return (chlim); + return chlim; } - return (ip6_defhlim); + return ip6_defhlim; } /* @@ -1458,16 +1524,17 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, * It is not an error if another thread allocated * a port */ - return (0); + return 0; } } /* XXX: this is redundant when called from in6_pcbbind */ - if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0) + if ((so->so_options & (SO_REUSEADDR | SO_REUSEPORT)) == 0) { wild = INPLOOKUP_WILDCARD; + } if (inp->inp_flags & INP_HIGHPORT) { - first = ipport_hifirstauto; /* sysctl */ + first = ipport_hifirstauto; /* sysctl */ last = ipport_hilastauto; lastport = &pcbinfo->ipi_lasthi; } else if (inp->inp_flags & INP_LOWPORT) { @@ -1475,15 +1542,16 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, error = priv_check_cred(cred, PRIV_NETINET_RESERVEDPORT, 0); kauth_cred_unref(&cred); if (error != 0) { - if (!locked) + if (!locked) { lck_rw_done(pcbinfo->ipi_lock); - return (error); + } + return error; } - first = ipport_lowfirstauto; /* 1023 */ - last = ipport_lowlastauto; /* 600 */ + first = ipport_lowfirstauto; /* 1023 */ + last = ipport_lowlastauto; /* 600 */ lastport = &pcbinfo->ipi_lastlow; } else { - first = ipport_firstauto; /* sysctl */ + first = ipport_firstauto; /* sysctl */ last = ipport_lastauto; lastport = &pcbinfo->ipi_lastport; } @@ -1502,16 +1570,17 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, counting_down = FALSE; } do { - if (count-- < 0) { /* completely used? */ + if (count-- < 0) { /* completely used? */ /* * Undo any address bind that may have * occurred above. */ inp->in6p_laddr = in6addr_any; inp->in6p_last_outifp = NULL; - if (!locked) + if (!locked) { lck_rw_done(pcbinfo->ipi_lock); - return (EAGAIN); + } + return EAGAIN; } if (counting_down) { --*lastport; @@ -1520,12 +1589,13 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, } } else { ++*lastport; - if (*lastport < first || *lastport > last) + if (*lastport < first || *lastport > last) { *lastport = first; + } } lport = htons(*lastport); found = (in6_pcblookup_local(pcbinfo, &inp->in6p_laddr, - lport, wild) == NULL); + lport, wild) == NULL); } while (!found); inp->inp_lport = lport; @@ -1537,14 +1607,16 @@ in6_pcbsetport(struct in6_addr *laddr, struct inpcb *inp, struct proc *p, inp->inp_lport = 0; inp->inp_flags &= ~INP_ANONPORT; - if (!locked) + if (!locked) { lck_rw_done(pcbinfo->ipi_lock); - return (EAGAIN); + } + return EAGAIN; } - if (!locked) + if (!locked) { lck_rw_done(pcbinfo->ipi_lock); - return (0); + } + return 0; } /* @@ -1580,12 +1652,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_LOOPBACK_INIT, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK128, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 50, .label = 0 @@ -1596,12 +1668,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK0, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 40, .label = 1 @@ -1612,12 +1684,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_V4MAPPED_INIT, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK96, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 35, .label = 4 @@ -1628,12 +1700,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = {{{ 0x20, 0x02 }}}, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK16, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 30, .label = 2 @@ -1644,12 +1716,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = {{{ 0x20, 0x01 }}}, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK32, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 5, .label = 5 @@ -1660,12 +1732,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = {{{ 0xfc }}}, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK7, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 3, .label = 13 @@ -1676,12 +1748,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK96, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 1, .label = 3 @@ -1692,12 +1764,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = {{{ 0xfe, 0xc0 }}}, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK16, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 1, .label = 11 @@ -1708,12 +1780,12 @@ addrsel_policy_init(void) .addr = { .sin6_family = AF_INET6, .sin6_addr = {{{ 0x3f, 0xfe }}}, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .addrmask = { .sin6_family = AF_INET6, .sin6_addr = IN6MASK16, - .sin6_len = sizeof (struct sockaddr_in6) + .sin6_len = sizeof(struct sockaddr_in6) }, .preced = 1, .label = 12 @@ -1724,12 +1796,12 @@ addrsel_policy_init(void) init_policy_queue(); /* initialize the "last resort" policy */ - bzero(&defaultaddrpolicy, sizeof (defaultaddrpolicy)); + bzero(&defaultaddrpolicy, sizeof(defaultaddrpolicy)); defaultaddrpolicy.label = ADDR_LABEL_NOTAPP; - for (i = 0; i < sizeof (defaddrsel) / sizeof (defaddrsel[0]); i++) + for (i = 0; i < sizeof(defaddrsel) / sizeof(defaddrsel[0]); i++) { add_addrsel_policyent(&defaddrsel[i]); - + } } struct in6_addrpolicy * @@ -1740,13 +1812,14 @@ in6_addrsel_lookup_policy(struct sockaddr_in6 *key) ADDRSEL_LOCK(); match = match_addrsel_policy(key); - if (match == NULL) + if (match == NULL) { match = &defaultaddrpolicy; - else + } else { match->use++; + } ADDRSEL_UNLOCK(); - return (match); + return match; } static struct in6_addrpolicy * @@ -1762,16 +1835,17 @@ match_addrsel_policy(struct sockaddr_in6 *key) pol = &pent->ape_policy; mp = (u_char *)&pol->addrmask.sin6_addr; - ep = mp + 16; /* XXX: scope field? */ + ep = mp + 16; /* XXX: scope field? */ k = (u_char *)&key->sin6_addr; p = (u_char *)&pol->addr.sin6_addr; for (; mp < ep && *mp; mp++, k++, p++) { m = *mp; - if ((*k & m) != *p) + if ((*k & m) != *p) { goto next; /* not match */ - if (m == 0xff) /* short cut for a typical case */ + } + if (m == 0xff) { /* short cut for a typical case */ matchlen += 8; - else { + } else { while (m >= 0x80) { matchlen++; m <<= 1; @@ -1786,11 +1860,11 @@ match_addrsel_policy(struct sockaddr_in6 *key) bestmatchlen = matchlen; } - next: +next: continue; } - return (bestpol); + return bestpol; } static int @@ -1798,7 +1872,7 @@ add_addrsel_policyent(const struct in6_addrpolicy *newpolicy) { struct addrsel_policyent *new, *pol; - MALLOC(new, struct addrsel_policyent *, sizeof (*new), M_IFADDR, + MALLOC(new, struct addrsel_policyent *, sizeof(*new), M_IFADDR, M_WAITOK); ADDRSEL_LOCK(); @@ -1811,11 +1885,11 @@ add_addrsel_policyent(const struct in6_addrpolicy *newpolicy) &pol->ape_policy.addrmask.sin6_addr)) { ADDRSEL_UNLOCK(); FREE(new, M_IFADDR); - return (EEXIST); /* or override it? */ + return EEXIST; /* or override it? */ } } - bzero(new, sizeof (*new)); + bzero(new, sizeof(*new)); /* XXX: should validate entry */ new->ape_policy = *newpolicy; @@ -1823,7 +1897,7 @@ add_addrsel_policyent(const struct in6_addrpolicy *newpolicy) TAILQ_INSERT_TAIL(&addrsel_policytab, new, ape_entry); ADDRSEL_UNLOCK(); - return (0); + return 0; } #ifdef ENABLE_ADDRSEL static int @@ -1845,7 +1919,7 @@ delete_addrsel_policyent(const struct in6_addrpolicy *key) } if (pol == NULL) { ADDRSEL_UNLOCK(); - return (ESRCH); + return ESRCH; } TAILQ_REMOVE(&addrsel_policytab, pol, ape_entry); @@ -1853,7 +1927,7 @@ delete_addrsel_policyent(const struct in6_addrpolicy *key) pol = NULL; ADDRSEL_UNLOCK(); - return (0); + return 0; } #endif /* ENABLE_ADDRSEL */ @@ -1868,11 +1942,11 @@ walk_addrsel_policy(int (*callback)(const struct in6_addrpolicy *, void *), TAILQ_FOREACH(pol, &addrsel_policytab, ape_entry) { if ((error = (*callback)(&pol->ape_policy, w)) != 0) { ADDRSEL_UNLOCK(); - return (error); + return error; } } ADDRSEL_UNLOCK(); - return (error); + return error; } /* * Subroutines to manage the address selection policy table via sysctl. @@ -1888,67 +1962,70 @@ dump_addrsel_policyent(const struct in6_addrpolicy *pol, void *arg) int error = 0; struct walkarg *w = arg; - error = SYSCTL_OUT(w->w_req, pol, sizeof (*pol)); + error = SYSCTL_OUT(w->w_req, pol, sizeof(*pol)); - return (error); + return error; } static int in6_src_sysctl SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) -struct walkarg w; + struct walkarg w; - if (req->newptr) - return (EPERM); - bzero(&w, sizeof (w)); + if (req->newptr) { + return EPERM; + } + bzero(&w, sizeof(w)); w.w_req = req; - return (walk_addrsel_policy(dump_addrsel_policyent, &w)); + return walk_addrsel_policy(dump_addrsel_policyent, &w); } SYSCTL_NODE(_net_inet6_ip6, IPV6CTL_ADDRCTLPOLICY, addrctlpolicy, - CTLFLAG_RD | CTLFLAG_LOCKED, in6_src_sysctl, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, in6_src_sysctl, ""); int in6_src_ioctl(u_long cmd, caddr_t data) { int i; struct in6_addrpolicy ent0; - if (cmd != SIOCAADDRCTL_POLICY && cmd != SIOCDADDRCTL_POLICY) - return (EOPNOTSUPP); /* check for safety */ - - bcopy(data, &ent0, sizeof (ent0)); + if (cmd != SIOCAADDRCTL_POLICY && cmd != SIOCDADDRCTL_POLICY) { + return EOPNOTSUPP; /* check for safety */ + } + bcopy(data, &ent0, sizeof(ent0)); - if (ent0.label == ADDR_LABEL_NOTAPP) - return (EINVAL); + if (ent0.label == ADDR_LABEL_NOTAPP) { + return EINVAL; + } /* check if the prefix mask is consecutive. */ - if (in6_mask2len(&ent0.addrmask.sin6_addr, NULL) < 0) - return (EINVAL); + if (in6_mask2len(&ent0.addrmask.sin6_addr, NULL) < 0) { + return EINVAL; + } /* clear trailing garbages (if any) of the prefix address. */ for (i = 0; i < 4; i++) { ent0.addr.sin6_addr.s6_addr32[i] &= - ent0.addrmask.sin6_addr.s6_addr32[i]; + ent0.addrmask.sin6_addr.s6_addr32[i]; } ent0.use = 0; switch (cmd) { case SIOCAADDRCTL_POLICY: #ifdef ENABLE_ADDRSEL - return (add_addrsel_policyent(&ent0)); + return add_addrsel_policyent(&ent0); #else - return (ENOTSUP); + return ENOTSUP; #endif case SIOCDADDRCTL_POLICY: #ifdef ENABLE_ADDRSEL - return (delete_addrsel_policyent(&ent0)); + return delete_addrsel_policyent(&ent0); #else - return (ENOTSUP); + return ENOTSUP; #endif } - return (0); /* XXX: compromise compilers */ + return 0; /* XXX: compromise compilers */ } /* @@ -1976,8 +2053,9 @@ in6_embedscope(struct in6_addr *in6, const struct sockaddr_in6 *sin6, *in6 = sin6->sin6_addr; scopeid = sin6->sin6_scope_id; - if (ifpp != NULL) + if (ifpp != NULL) { *ifpp = NULL; + } /* * don't try to read sin6->sin6_addr beyond here, since the caller may @@ -1985,8 +2063,9 @@ in6_embedscope(struct in6_addr *in6, const struct sockaddr_in6 *sin6, */ #ifdef ENABLE_DEFAULT_SCOPE - if (scopeid == 0) + if (scopeid == 0) { scopeid = scope6_addr2default(in6); + } #endif if (IN6_IS_SCOPE_LINKLOCAL(in6) || IN6_IS_ADDR_MC_INTFACELOCAL(in6)) { @@ -2001,10 +2080,11 @@ in6_embedscope(struct in6_addr *in6, const struct sockaddr_in6 *sin6, IM6O_UNLOCK(in6p->in6p_moptions); } - if (opt != NULL) + if (opt != NULL) { optp = opt; - else if (in6p != NULL) + } else if (in6p != NULL) { optp = in6p->in6p_outputopts; + } /* * KAME assumption: link id == interface id */ @@ -2028,9 +2108,9 @@ in6_embedscope(struct in6_addr *in6, const struct sockaddr_in6 *sin6, * against if_index (ifnet_head_lock not needed since * if_index is an ever-increasing integer.) */ - if (if_index < scopeid) - return (ENXIO); /* XXX EINVAL? */ - + if (if_index < scopeid) { + return ENXIO; /* XXX EINVAL? */ + } /* ifp is needed here only if we're returning it */ if (ifpp != NULL) { ifnet_head_lock_shared(); @@ -2042,13 +2122,14 @@ in6_embedscope(struct in6_addr *in6, const struct sockaddr_in6 *sin6, } if (ifpp != NULL) { - if (ifp != NULL) - ifnet_reference(ifp); /* for caller */ + if (ifp != NULL) { + ifnet_reference(ifp); /* for caller */ + } *ifpp = ifp; } } - return (0); + return 0; } /* @@ -2086,14 +2167,16 @@ in6_recoverscope( * Since scopeid is unsigned, we only have to check it * against if_index */ - if (if_index < scopeid) - return (ENXIO); - if (ifp && ifp->if_index != scopeid) - return (ENXIO); + if (if_index < scopeid) { + return ENXIO; + } + if (ifp && ifp->if_index != scopeid) { + return ENXIO; + } sin6->sin6_addr.s6_addr16[1] = 0; sin6->sin6_scope_id = scopeid; } } - return (0); + return 0; } diff --git a/bsd/netinet6/in6_var.h b/bsd/netinet6/in6_var.h index 50cd3e9b9..baa9a541b 100644 --- a/bsd/netinet6/in6_var.h +++ b/bsd/netinet6/in6_var.h @@ -91,7 +91,7 @@ */ #ifndef _NETINET6_IN6_VAR_H_ -#define _NETINET6_IN6_VAR_H_ +#define _NETINET6_IN6_VAR_H_ #include #include @@ -112,10 +112,10 @@ * in kernel: modify preferred/expire only */ struct in6_addrlifetime { - time_t ia6t_expire; /* valid lifetime expiration time */ - time_t ia6t_preferred; /* preferred lifetime expiration time */ - u_int32_t ia6t_vltime; /* valid lifetime */ - u_int32_t ia6t_pltime; /* prefix lifetime */ + time_t ia6t_expire; /* valid lifetime expiration time */ + time_t ia6t_preferred; /* preferred lifetime expiration time */ + u_int32_t ia6t_vltime; /* valid lifetime */ + u_int32_t ia6t_pltime; /* prefix lifetime */ }; #ifdef BSD_KERNEL_PRIVATE @@ -137,12 +137,12 @@ struct in6_addrlifetime_64 { * Internal representation of ia6_lifetime (expiration in uptime unit) */ struct in6_addrlifetime_i { - u_int64_t ia6ti_expire; /* valid lifetime expiration time */ - u_int64_t ia6ti_preferred; /* preferred lifetime expiration time */ - u_int32_t ia6ti_vltime; /* valid lifetime */ - u_int32_t ia6ti_pltime; /* prefix lifetime */ + u_int64_t ia6ti_expire; /* valid lifetime expiration time */ + u_int64_t ia6ti_preferred; /* preferred lifetime expiration time */ + u_int32_t ia6ti_vltime; /* valid lifetime */ + u_int32_t ia6ti_pltime; /* prefix lifetime */ u_int64_t ia6ti_base_calendartime; /* calendar time at creation */ - u_int64_t ia6ti_base_uptime; /* uptime at creation */ + u_int64_t ia6ti_base_uptime; /* uptime at creation */ }; /* @@ -153,15 +153,15 @@ struct in6_addrlifetime_i { */ struct in6_ifaddr { - struct ifaddr ia_ifa; /* protocol-independent info */ -#define ia_ifp ia_ifa.ifa_ifp -#define ia_flags ia_ifa.ifa_flags - struct sockaddr_in6 ia_addr; /* interface address */ - struct sockaddr_in6 ia_net; /* network number of interface */ + struct ifaddr ia_ifa; /* protocol-independent info */ +#define ia_ifp ia_ifa.ifa_ifp +#define ia_flags ia_ifa.ifa_flags + struct sockaddr_in6 ia_addr; /* interface address */ + struct sockaddr_in6 ia_net; /* network number of interface */ struct sockaddr_in6 ia_dstaddr; /* space for destination addr */ struct sockaddr_in6 ia_prefixmask; /* prefix mask */ - u_int32_t ia_plen; /* prefix length */ - struct in6_ifaddr *ia_next; /* next in6 list of IP6 addresses */ + u_int32_t ia_plen; /* prefix length */ + struct in6_ifaddr *ia_next; /* next in6 list of IP6 addresses */ int ia6_flags; struct in6_addrlifetime_i ia6_lifetime; @@ -179,59 +179,59 @@ struct in6_ifaddr { LIST_HEAD(, in6_multi_mship) ia6_memberships; }; -#define ifatoia6(ifa) ((struct in6_ifaddr *)(void *)(ifa)) +#define ifatoia6(ifa) ((struct in6_ifaddr *)(void *)(ifa)) #endif /* BSD_KERNEL_PRIVATE */ /* control structure to manage address selection policy */ struct in6_addrpolicy { struct sockaddr_in6 addr; /* prefix address */ struct sockaddr_in6 addrmask; /* prefix mask */ - int preced; /* precedence */ - int label; /* matching label */ - u_quad_t use; /* statistics */ + int preced; /* precedence */ + int label; /* matching label */ + u_quad_t use; /* statistics */ }; /* * IPv6 interface statistics, as defined in RFC2465 Ipv6IfStatsEntry (p12). */ struct in6_ifstat { - u_quad_t ifs6_in_receive; /* # of total input datagram */ - u_quad_t ifs6_in_hdrerr; /* # of datagrams with invalid hdr */ - u_quad_t ifs6_in_toobig; /* # of datagrams exceeded MTU */ - u_quad_t ifs6_in_noroute; /* # of datagrams with no route */ - u_quad_t ifs6_in_addrerr; /* # of datagrams with invalid dst */ - u_quad_t ifs6_in_protounknown; /* # of datagrams with unknown proto */ - /* NOTE: increment on final dst if */ - u_quad_t ifs6_in_truncated; /* # of truncated datagrams */ - u_quad_t ifs6_in_discard; /* # of discarded datagrams */ - /* NOTE: fragment timeout is not here */ - u_quad_t ifs6_in_deliver; /* # of datagrams delivered to ULP */ - /* NOTE: increment on final dst if */ - u_quad_t ifs6_out_forward; /* # of datagrams forwarded */ - /* NOTE: increment on outgoing if */ - u_quad_t ifs6_out_request; /* # of outgoing datagrams from ULP */ - /* NOTE: does not include forwrads */ - u_quad_t ifs6_out_discard; /* # of discarded datagrams */ - u_quad_t ifs6_out_fragok; /* # of datagrams fragmented */ - u_quad_t ifs6_out_fragfail; /* # of datagrams failed on fragment */ - u_quad_t ifs6_out_fragcreat; /* # of fragment datagrams */ - /* NOTE: this is # after fragment */ - u_quad_t ifs6_reass_reqd; /* # of incoming fragmented packets */ - /* NOTE: increment on final dst if */ - u_quad_t ifs6_reass_ok; /* # of reassembled packets */ - /* NOTE: this is # after reass */ - /* NOTE: increment on final dst if */ - u_quad_t ifs6_atmfrag_rcvd; /* # of atomic fragments received */ - u_quad_t ifs6_reass_fail; /* # of reass failures */ - /* NOTE: may not be packet count */ - /* NOTE: increment on final dst if */ - u_quad_t ifs6_in_mcast; /* # of inbound multicast datagrams */ - u_quad_t ifs6_out_mcast; /* # of outbound multicast datagrams */ - - u_quad_t ifs6_cantfoward_icmp6; /* # of ICMPv6 packets received for unreachable dest */ - u_quad_t ifs6_addr_expiry_cnt; /* # of address expiry events (excluding privacy addresses) */ - u_quad_t ifs6_pfx_expiry_cnt; /* # of prefix expiry events */ - u_quad_t ifs6_defrtr_expiry_cnt; /* # of default router expiry events */ + u_quad_t ifs6_in_receive; /* # of total input datagram */ + u_quad_t ifs6_in_hdrerr; /* # of datagrams with invalid hdr */ + u_quad_t ifs6_in_toobig; /* # of datagrams exceeded MTU */ + u_quad_t ifs6_in_noroute; /* # of datagrams with no route */ + u_quad_t ifs6_in_addrerr; /* # of datagrams with invalid dst */ + u_quad_t ifs6_in_protounknown; /* # of datagrams with unknown proto */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_in_truncated; /* # of truncated datagrams */ + u_quad_t ifs6_in_discard; /* # of discarded datagrams */ + /* NOTE: fragment timeout is not here */ + u_quad_t ifs6_in_deliver; /* # of datagrams delivered to ULP */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_out_forward; /* # of datagrams forwarded */ + /* NOTE: increment on outgoing if */ + u_quad_t ifs6_out_request; /* # of outgoing datagrams from ULP */ + /* NOTE: does not include forwrads */ + u_quad_t ifs6_out_discard; /* # of discarded datagrams */ + u_quad_t ifs6_out_fragok; /* # of datagrams fragmented */ + u_quad_t ifs6_out_fragfail; /* # of datagrams failed on fragment */ + u_quad_t ifs6_out_fragcreat; /* # of fragment datagrams */ + /* NOTE: this is # after fragment */ + u_quad_t ifs6_reass_reqd; /* # of incoming fragmented packets */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_reass_ok; /* # of reassembled packets */ + /* NOTE: this is # after reass */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_atmfrag_rcvd; /* # of atomic fragments received */ + u_quad_t ifs6_reass_fail; /* # of reass failures */ + /* NOTE: may not be packet count */ + /* NOTE: increment on final dst if */ + u_quad_t ifs6_in_mcast; /* # of inbound multicast datagrams */ + u_quad_t ifs6_out_mcast; /* # of outbound multicast datagrams */ + + u_quad_t ifs6_cantfoward_icmp6; /* # of ICMPv6 packets received for unreachable dest */ + u_quad_t ifs6_addr_expiry_cnt; /* # of address expiry events (excluding privacy addresses) */ + u_quad_t ifs6_pfx_expiry_cnt; /* # of prefix expiry events */ + u_quad_t ifs6_defrtr_expiry_cnt; /* # of default router expiry events */ }; /* @@ -317,15 +317,15 @@ struct icmp6_ifstat { }; struct in6_ifreq { - char ifr_name[IFNAMSIZ]; + char ifr_name[IFNAMSIZ]; union { - struct sockaddr_in6 ifru_addr; - struct sockaddr_in6 ifru_dstaddr; - int ifru_flags; - int ifru_flags6; - int ifru_metric; - int ifru_intval; - caddr_t ifru_data; + struct sockaddr_in6 ifru_addr; + struct sockaddr_in6 ifru_dstaddr; + int ifru_flags; + int ifru_flags6; + int ifru_metric; + int ifru_intval; + caddr_t ifru_data; struct in6_addrlifetime ifru_lifetime; struct in6_ifstat ifru_stat; struct icmp6_ifstat ifru_icmp6stat; @@ -334,16 +334,16 @@ struct in6_ifreq { }; struct in6_aliasreq { - char ifra_name[IFNAMSIZ]; - struct sockaddr_in6 ifra_addr; - struct sockaddr_in6 ifra_dstaddr; - struct sockaddr_in6 ifra_prefixmask; - int ifra_flags; + char ifra_name[IFNAMSIZ]; + struct sockaddr_in6 ifra_addr; + struct sockaddr_in6 ifra_dstaddr; + struct sockaddr_in6 ifra_prefixmask; + int ifra_flags; struct in6_addrlifetime ifra_lifetime; }; #ifdef PRIVATE -#define IN6_CGA_MODIFIER_LENGTH 16 +#define IN6_CGA_MODIFIER_LENGTH 16 struct in6_cga_modifier { u_int8_t octets[IN6_CGA_MODIFIER_LENGTH]; @@ -355,7 +355,7 @@ struct in6_cga_prepare { u_int8_t reserved_A[15]; }; -#define IN6_CGA_KEY_MAXSIZE 2048 /* octets */ +#define IN6_CGA_KEY_MAXSIZE 2048 /* octets */ struct in6_cga_nodecfg { struct iovec cga_privkey; @@ -421,27 +421,27 @@ struct in6_cgareq_64 { #ifdef BSD_KERNEL_PRIVATE struct in6_aliasreq_32 { - char ifra_name[IFNAMSIZ]; - struct sockaddr_in6 ifra_addr; - struct sockaddr_in6 ifra_dstaddr; - struct sockaddr_in6 ifra_prefixmask; - int ifra_flags; + char ifra_name[IFNAMSIZ]; + struct sockaddr_in6 ifra_addr; + struct sockaddr_in6 ifra_dstaddr; + struct sockaddr_in6 ifra_prefixmask; + int ifra_flags; struct in6_addrlifetime_32 ifra_lifetime; }; struct in6_aliasreq_64 { - char ifra_name[IFNAMSIZ]; - struct sockaddr_in6 ifra_addr; - struct sockaddr_in6 ifra_dstaddr; - struct sockaddr_in6 ifra_prefixmask; - int ifra_flags; + char ifra_name[IFNAMSIZ]; + struct sockaddr_in6 ifra_addr; + struct sockaddr_in6 ifra_dstaddr; + struct sockaddr_in6 ifra_prefixmask; + int ifra_flags; struct in6_addrlifetime_64 ifra_lifetime; }; #endif /* BSD_KERNEL_PRIVATE */ /* prefix type macro */ -#define IN6_PREFIX_ND 1 -#define IN6_PREFIX_RR 2 +#define IN6_PREFIX_ND 1 +#define IN6_PREFIX_RR 2 /* * prefix related flags passed between kernel(NDP related part) and @@ -466,36 +466,36 @@ struct in6_prflags { }; struct in6_prefixreq { - char ipr_name[IFNAMSIZ]; - u_char ipr_origin; - u_char ipr_plen; + char ipr_name[IFNAMSIZ]; + u_char ipr_origin; + u_char ipr_plen; u_int32_t ipr_vltime; u_int32_t ipr_pltime; struct in6_prflags ipr_flags; - struct sockaddr_in6 ipr_prefix; + struct sockaddr_in6 ipr_prefix; }; -#define PR_ORIG_RA 0 -#define PR_ORIG_RR 1 -#define PR_ORIG_STATIC 2 -#define PR_ORIG_KERNEL 3 +#define PR_ORIG_RA 0 +#define PR_ORIG_RR 1 +#define PR_ORIG_STATIC 2 +#define PR_ORIG_KERNEL 3 -#define ipr_raf_onlink ipr_flags.prf_ra.onlink -#define ipr_raf_auto ipr_flags.prf_ra.autonomous +#define ipr_raf_onlink ipr_flags.prf_ra.onlink +#define ipr_raf_auto ipr_flags.prf_ra.autonomous -#define ipr_statef_onlink ipr_flags.prf_state.onlink +#define ipr_statef_onlink ipr_flags.prf_state.onlink -#define ipr_rrf_decrvalid ipr_flags.prf_rr.decrvalid -#define ipr_rrf_decrprefd ipr_flags.prf_rr.decrprefd +#define ipr_rrf_decrvalid ipr_flags.prf_rr.decrvalid +#define ipr_rrf_decrprefd ipr_flags.prf_rr.decrprefd struct in6_rrenumreq { - char irr_name[IFNAMSIZ]; - u_char irr_origin; - u_char irr_m_len; /* match len for matchprefix */ - u_char irr_m_minlen; /* minlen for matching prefix */ - u_char irr_m_maxlen; /* maxlen for matching prefix */ - u_char irr_u_uselen; /* uselen for adding prefix */ - u_char irr_u_keeplen; /* keeplen from matching prefix */ + char irr_name[IFNAMSIZ]; + u_char irr_origin; + u_char irr_m_len; /* match len for matchprefix */ + u_char irr_m_minlen; /* minlen for matching prefix */ + u_char irr_m_maxlen; /* maxlen for matching prefix */ + u_char irr_u_uselen; /* uselen for adding prefix */ + u_char irr_u_keeplen; /* keeplen from matching prefix */ struct irr_raflagmask { u_char onlink : 1; u_char autonomous : 1; @@ -504,22 +504,22 @@ struct in6_rrenumreq { u_int32_t irr_vltime; u_int32_t irr_pltime; struct in6_prflags irr_flags; - struct sockaddr_in6 irr_matchprefix; - struct sockaddr_in6 irr_useprefix; + struct sockaddr_in6 irr_matchprefix; + struct sockaddr_in6 irr_useprefix; }; -#define irr_raf_mask_onlink irr_raflagmask.onlink -#define irr_raf_mask_auto irr_raflagmask.autonomous -#define irr_raf_mask_reserved irr_raflagmask.reserved +#define irr_raf_mask_onlink irr_raflagmask.onlink +#define irr_raf_mask_auto irr_raflagmask.autonomous +#define irr_raf_mask_reserved irr_raflagmask.reserved -#define irr_raf_onlink irr_flags.prf_ra.onlink -#define irr_raf_auto irr_flags.prf_ra.autonomous +#define irr_raf_onlink irr_flags.prf_ra.onlink +#define irr_raf_auto irr_flags.prf_ra.autonomous -#define irr_statef_onlink irr_flags.prf_state.onlink +#define irr_statef_onlink irr_flags.prf_state.onlink -#define irr_rrf irr_flags.prf_rr -#define irr_rrf_decrvalid irr_flags.prf_rr.decrvalid -#define irr_rrf_decrprefd irr_flags.prf_rr.decrprefd +#define irr_rrf irr_flags.prf_rr +#define irr_rrf_decrvalid irr_flags.prf_rr.decrvalid +#define irr_rrf_decrprefd irr_flags.prf_rr.decrprefd /* * Event data, inet6 style. @@ -533,21 +533,21 @@ struct kev_in6_addrlifetime { struct kev_in6_data { struct net_event_data link_data; - struct sockaddr_in6 ia_addr; /* interface address */ - struct sockaddr_in6 ia_net; /* network number of interface */ + struct sockaddr_in6 ia_addr; /* interface address */ + struct sockaddr_in6 ia_net; /* network number of interface */ struct sockaddr_in6 ia_dstaddr; /* space for destination addr */ struct sockaddr_in6 ia_prefixmask; /* prefix mask */ - u_int32_t ia_plen; /* prefix length */ - u_int32_t ia6_flags; /* address flags from in6_ifaddr */ + u_int32_t ia_plen; /* prefix length */ + u_int32_t ia6_flags; /* address flags from in6_ifaddr */ struct kev_in6_addrlifetime ia_lifetime; /* address life info */ - uint8_t ia_mac[ETHER_ADDR_LEN]; + uint8_t ia_mac[ETHER_ADDR_LEN]; }; #ifdef BSD_KERNEL_PRIVATE /* Utility function used inside netinet6 kernel code for generating events */ void in6_post_msg(struct ifnet *, u_int32_t, struct in6_ifaddr *, uint8_t *mac); -#define IN6_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \ +#define IN6_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \ (((d)->s6_addr32[0] ^ (a)->s6_addr32[0]) & (m)->s6_addr32[0]) == 0 && \ (((d)->s6_addr32[1] ^ (a)->s6_addr32[1]) & (m)->s6_addr32[1]) == 0 && \ (((d)->s6_addr32[2] ^ (a)->s6_addr32[2]) & (m)->s6_addr32[2]) == 0 && \ @@ -557,178 +557,178 @@ void in6_post_msg(struct ifnet *, u_int32_t, struct in6_ifaddr *, uint8_t *mac); * Given a pointer to an in6_ifaddr (ifaddr), * return a pointer to the addr as a sockaddr_in6 */ -#define IA6_IN6(ia) (&((ia)->ia_addr.sin6_addr)) -#define IA6_DSTIN6(ia) (&((ia)->ia_dstaddr.sin6_addr)) -#define IA6_MASKIN6(ia) (&((ia)->ia_prefixmask.sin6_addr)) -#define IA6_SIN6(ia) (&((ia)->ia_addr)) -#define IA6_DSTSIN6(ia) (&((ia)->ia_dstaddr)) -#define IFA_IN6(x) \ +#define IA6_IN6(ia) (&((ia)->ia_addr.sin6_addr)) +#define IA6_DSTIN6(ia) (&((ia)->ia_dstaddr.sin6_addr)) +#define IA6_MASKIN6(ia) (&((ia)->ia_prefixmask.sin6_addr)) +#define IA6_SIN6(ia) (&((ia)->ia_addr)) +#define IA6_DSTSIN6(ia) (&((ia)->ia_dstaddr)) +#define IFA_IN6(x) \ (&((struct sockaddr_in6 *)(void *)((x)->ifa_addr))->sin6_addr) -#define IFA_DSTIN6(x) \ +#define IFA_DSTIN6(x) \ (&((struct sockaddr_in6 *)(void *)((x)->ifa_dstaddr))->sin6_addr) -#define IFPR_IN6(x) \ +#define IFPR_IN6(x) \ (&((struct sockaddr_in6 *)(void *)((x)->ifpr_prefix))->sin6_addr) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSIFADDR_IN6 _IOW('i', 12, struct in6_ifreq) -#define SIOCGIFADDR_IN6 _IOWR('i', 33, struct in6_ifreq) +#define SIOCSIFADDR_IN6 _IOW('i', 12, struct in6_ifreq) +#define SIOCGIFADDR_IN6 _IOWR('i', 33, struct in6_ifreq) /* * SIOCSxxx ioctls should be unused (see comments in in6.c), but * we do not shift numbers for binary compatibility. */ -#define SIOCSIFDSTADDR_IN6 _IOW('i', 14, struct in6_ifreq) -#define SIOCSIFNETMASK_IN6 _IOW('i', 22, struct in6_ifreq) +#define SIOCSIFDSTADDR_IN6 _IOW('i', 14, struct in6_ifreq) +#define SIOCSIFNETMASK_IN6 _IOW('i', 22, struct in6_ifreq) -#define SIOCGIFDSTADDR_IN6 _IOWR('i', 34, struct in6_ifreq) -#define SIOCGIFNETMASK_IN6 _IOWR('i', 37, struct in6_ifreq) +#define SIOCGIFDSTADDR_IN6 _IOWR('i', 34, struct in6_ifreq) +#define SIOCGIFNETMASK_IN6 _IOWR('i', 37, struct in6_ifreq) -#define SIOCDIFADDR_IN6 _IOW('i', 25, struct in6_ifreq) -#define SIOCAIFADDR_IN6 _IOW('i', 26, struct in6_aliasreq) +#define SIOCDIFADDR_IN6 _IOW('i', 25, struct in6_ifreq) +#define SIOCAIFADDR_IN6 _IOW('i', 26, struct in6_aliasreq) #ifdef BSD_KERNEL_PRIVATE -#define SIOCAIFADDR_IN6_32 _IOW('i', 26, struct in6_aliasreq_32) -#define SIOCAIFADDR_IN6_64 _IOW('i', 26, struct in6_aliasreq_64) +#define SIOCAIFADDR_IN6_32 _IOW('i', 26, struct in6_aliasreq_32) +#define SIOCAIFADDR_IN6_64 _IOW('i', 26, struct in6_aliasreq_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSIFPHYADDR_IN6 _IOW('i', 62, struct in6_aliasreq) +#define SIOCSIFPHYADDR_IN6 _IOW('i', 62, struct in6_aliasreq) #ifdef BSD_KERNEL_PRIVATE -#define SIOCSIFPHYADDR_IN6_32 _IOW('i', 62, struct in6_aliasreq_32) -#define SIOCSIFPHYADDR_IN6_64 _IOW('i', 62, struct in6_aliasreq_64) +#define SIOCSIFPHYADDR_IN6_32 _IOW('i', 62, struct in6_aliasreq_32) +#define SIOCSIFPHYADDR_IN6_64 _IOW('i', 62, struct in6_aliasreq_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCGIFPSRCADDR_IN6 _IOWR('i', 63, struct in6_ifreq) -#define SIOCGIFPDSTADDR_IN6 _IOWR('i', 64, struct in6_ifreq) -#define SIOCGIFAFLAG_IN6 _IOWR('i', 73, struct in6_ifreq) +#define SIOCGIFPSRCADDR_IN6 _IOWR('i', 63, struct in6_ifreq) +#define SIOCGIFPDSTADDR_IN6 _IOWR('i', 64, struct in6_ifreq) +#define SIOCGIFAFLAG_IN6 _IOWR('i', 73, struct in6_ifreq) -#define SIOCGDRLST_IN6 _IOWR('i', 74, struct in6_drlist) +#define SIOCGDRLST_IN6 _IOWR('i', 74, struct in6_drlist) #ifdef BSD_KERNEL_PRIVATE -#define SIOCGDRLST_IN6_32 _IOWR('i', 74, struct in6_drlist_32) -#define SIOCGDRLST_IN6_64 _IOWR('i', 74, struct in6_drlist_64) +#define SIOCGDRLST_IN6_32 _IOWR('i', 74, struct in6_drlist_32) +#define SIOCGDRLST_IN6_64 _IOWR('i', 74, struct in6_drlist_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCGPRLST_IN6 _IOWR('i', 75, struct in6_prlist) +#define SIOCGPRLST_IN6 _IOWR('i', 75, struct in6_prlist) #ifdef BSD_KERNEL_PRIVATE -#define SIOCGPRLST_IN6_32 _IOWR('i', 75, struct in6_prlist_32) -#define SIOCGPRLST_IN6_64 _IOWR('i', 75, struct in6_prlist_64) +#define SIOCGPRLST_IN6_32 _IOWR('i', 75, struct in6_prlist_32) +#define SIOCGPRLST_IN6_64 _IOWR('i', 75, struct in6_prlist_64) #endif /* BSD_KERNEL_PRIVATE */ -#define OSIOCGIFINFO_IN6 _IOWR('i', 108, struct in6_ondireq) -#define SIOCGIFINFO_IN6 _IOWR('i', 76, struct in6_ondireq) -#define SIOCSNDFLUSH_IN6 _IOWR('i', 77, struct in6_ifreq) +#define OSIOCGIFINFO_IN6 _IOWR('i', 108, struct in6_ondireq) +#define SIOCGIFINFO_IN6 _IOWR('i', 76, struct in6_ondireq) +#define SIOCSNDFLUSH_IN6 _IOWR('i', 77, struct in6_ifreq) -#define SIOCGNBRINFO_IN6 _IOWR('i', 78, struct in6_nbrinfo) +#define SIOCGNBRINFO_IN6 _IOWR('i', 78, struct in6_nbrinfo) #ifdef BSD_KERNEL_PRIVATE -#define SIOCGNBRINFO_IN6_32 _IOWR('i', 78, struct in6_nbrinfo_32) -#define SIOCGNBRINFO_IN6_64 _IOWR('i', 78, struct in6_nbrinfo_64) +#define SIOCGNBRINFO_IN6_32 _IOWR('i', 78, struct in6_nbrinfo_32) +#define SIOCGNBRINFO_IN6_64 _IOWR('i', 78, struct in6_nbrinfo_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSPFXFLUSH_IN6 _IOWR('i', 79, struct in6_ifreq) -#define SIOCSRTRFLUSH_IN6 _IOWR('i', 80, struct in6_ifreq) +#define SIOCSPFXFLUSH_IN6 _IOWR('i', 79, struct in6_ifreq) +#define SIOCSRTRFLUSH_IN6 _IOWR('i', 80, struct in6_ifreq) -#define SIOCGIFALIFETIME_IN6 _IOWR('i', 81, struct in6_ifreq) -#define SIOCSIFALIFETIME_IN6 _IOWR('i', 82, struct in6_ifreq) -#define SIOCGIFSTAT_IN6 _IOWR('i', 83, struct in6_ifreq) -#define SIOCGIFSTAT_ICMP6 _IOWR('i', 84, struct in6_ifreq) +#define SIOCGIFALIFETIME_IN6 _IOWR('i', 81, struct in6_ifreq) +#define SIOCSIFALIFETIME_IN6 _IOWR('i', 82, struct in6_ifreq) +#define SIOCGIFSTAT_IN6 _IOWR('i', 83, struct in6_ifreq) +#define SIOCGIFSTAT_ICMP6 _IOWR('i', 84, struct in6_ifreq) -#define SIOCSDEFIFACE_IN6 _IOWR('i', 85, struct in6_ndifreq) -#define SIOCGDEFIFACE_IN6 _IOWR('i', 86, struct in6_ndifreq) +#define SIOCSDEFIFACE_IN6 _IOWR('i', 85, struct in6_ndifreq) +#define SIOCGDEFIFACE_IN6 _IOWR('i', 86, struct in6_ndifreq) #ifdef BSD_KERNEL_PRIVATE -#define SIOCSDEFIFACE_IN6_32 _IOWR('i', 85, struct in6_ndifreq_32) -#define SIOCSDEFIFACE_IN6_64 _IOWR('i', 85, struct in6_ndifreq_64) -#define SIOCGDEFIFACE_IN6_32 _IOWR('i', 86, struct in6_ndifreq_32) -#define SIOCGDEFIFACE_IN6_64 _IOWR('i', 86, struct in6_ndifreq_64) +#define SIOCSDEFIFACE_IN6_32 _IOWR('i', 85, struct in6_ndifreq_32) +#define SIOCSDEFIFACE_IN6_64 _IOWR('i', 85, struct in6_ndifreq_64) +#define SIOCGDEFIFACE_IN6_32 _IOWR('i', 86, struct in6_ndifreq_32) +#define SIOCGDEFIFACE_IN6_64 _IOWR('i', 86, struct in6_ndifreq_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSIFINFO_FLAGS _IOWR('i', 87, struct in6_ndireq) /* XXX */ +#define SIOCSIFINFO_FLAGS _IOWR('i', 87, struct in6_ndireq) /* XXX */ /* N.B.: These 3 ioctls are deprecated and won't work */ -#define SIOCSSCOPE6 _IOW('i', 88, struct in6_ifreq) -#define SIOCGSCOPE6 _IOWR('i', 89, struct in6_ifreq) -#define SIOCGSCOPE6DEF _IOWR('i', 90, struct in6_ifreq) +#define SIOCSSCOPE6 _IOW('i', 88, struct in6_ifreq) +#define SIOCGSCOPE6 _IOWR('i', 89, struct in6_ifreq) +#define SIOCGSCOPE6DEF _IOWR('i', 90, struct in6_ifreq) -#define SIOCSIFPREFIX_IN6 _IOW('i', 100, struct in6_prefixreq) /* set */ -#define SIOCGIFPREFIX_IN6 _IOWR('i', 101, struct in6_prefixreq) /* get */ -#define SIOCDIFPREFIX_IN6 _IOW('i', 102, struct in6_prefixreq) /* del */ -#define SIOCAIFPREFIX_IN6 _IOW('i', 103, struct in6_rrenumreq) /* add */ +#define SIOCSIFPREFIX_IN6 _IOW('i', 100, struct in6_prefixreq) /* set */ +#define SIOCGIFPREFIX_IN6 _IOWR('i', 101, struct in6_prefixreq) /* get */ +#define SIOCDIFPREFIX_IN6 _IOW('i', 102, struct in6_prefixreq) /* del */ +#define SIOCAIFPREFIX_IN6 _IOW('i', 103, struct in6_rrenumreq) /* add */ /* change */ -#define SIOCCIFPREFIX_IN6 _IOW('i', 104, struct in6_rrenumreq) +#define SIOCCIFPREFIX_IN6 _IOW('i', 104, struct in6_rrenumreq) /* set global */ -#define SIOCSGIFPREFIX_IN6 _IOW('i', 105, struct in6_rrenumreq) +#define SIOCSGIFPREFIX_IN6 _IOW('i', 105, struct in6_rrenumreq) /* * multicast routing, get s/g pkt cnt, pkt cnt per interface. */ -#define SIOCGETSGCNT_IN6 _IOWR('u', 28, struct sioc_sg_req6) -#define SIOCGETMIFCNT_IN6 _IOWR('u', 107, struct sioc_mif_req6) +#define SIOCGETSGCNT_IN6 _IOWR('u', 28, struct sioc_sg_req6) +#define SIOCGETMIFCNT_IN6 _IOWR('u', 107, struct sioc_mif_req6) #ifdef BSD_KERNEL_PRIVATE -#define SIOCGETMIFCNT_IN6_32 _IOWR('u', 107, struct sioc_mif_req6_32) -#define SIOCGETMIFCNT_IN6_64 _IOWR('u', 107, struct sioc_mif_req6_64) +#define SIOCGETMIFCNT_IN6_32 _IOWR('u', 107, struct sioc_mif_req6_32) +#define SIOCGETMIFCNT_IN6_64 _IOWR('u', 107, struct sioc_mif_req6_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCAADDRCTL_POLICY _IOW('u', 108, struct in6_addrpolicy) -#define SIOCDADDRCTL_POLICY _IOW('u', 109, struct in6_addrpolicy) +#define SIOCAADDRCTL_POLICY _IOW('u', 108, struct in6_addrpolicy) +#define SIOCDADDRCTL_POLICY _IOW('u', 109, struct in6_addrpolicy) #ifdef PRIVATE /* * temporary control calls to attach/detach IPv6 to/from an interface. */ -#define SIOCPROTOATTACH_IN6 _IOWR('i', 110, struct in6_aliasreq) +#define SIOCPROTOATTACH_IN6 _IOWR('i', 110, struct in6_aliasreq) #ifdef BSD_KERNEL_PRIVATE -#define SIOCPROTOATTACH_IN6_32 _IOWR('i', 110, struct in6_aliasreq_32) -#define SIOCPROTOATTACH_IN6_64 _IOWR('i', 110, struct in6_aliasreq_64) +#define SIOCPROTOATTACH_IN6_32 _IOWR('i', 110, struct in6_aliasreq_32) +#define SIOCPROTOATTACH_IN6_64 _IOWR('i', 110, struct in6_aliasreq_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCPROTODETACH_IN6 _IOWR('i', 111, struct in6_ifreq) +#define SIOCPROTODETACH_IN6 _IOWR('i', 111, struct in6_ifreq) /* * start/stop aquiring linklocal on interface */ -#define SIOCLL_START _IOWR('i', 130, struct in6_aliasreq) +#define SIOCLL_START _IOWR('i', 130, struct in6_aliasreq) #ifdef BSD_KERNEL_PRIVATE -#define SIOCLL_START_32 _IOWR('i', 130, struct in6_aliasreq_32) -#define SIOCLL_START_64 _IOWR('i', 130, struct in6_aliasreq_64) +#define SIOCLL_START_32 _IOWR('i', 130, struct in6_aliasreq_32) +#define SIOCLL_START_64 _IOWR('i', 130, struct in6_aliasreq_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCLL_STOP _IOWR('i', 131, struct in6_ifreq) +#define SIOCLL_STOP _IOWR('i', 131, struct in6_ifreq) /* * accept rtadvd (and stop accepting) on this interface. */ -#define SIOCAUTOCONF_START _IOWR('i', 132, struct in6_ifreq) -#define SIOCAUTOCONF_STOP _IOWR('i', 133, struct in6_ifreq) +#define SIOCAUTOCONF_START _IOWR('i', 132, struct in6_ifreq) +#define SIOCAUTOCONF_STOP _IOWR('i', 133, struct in6_ifreq) /* * add/remove default IPv6 router. */ -#define SIOCDRADD_IN6 _IOWR('u', 134, struct in6_defrouter) +#define SIOCDRADD_IN6 _IOWR('u', 134, struct in6_defrouter) #ifdef BSD_KERNEL_PRIVATE -#define SIOCDRADD_IN6_32 _IOWR('u', 134, struct in6_defrouter_32) -#define SIOCDRADD_IN6_64 _IOWR('u', 134, struct in6_defrouter_64) +#define SIOCDRADD_IN6_32 _IOWR('u', 134, struct in6_defrouter_32) +#define SIOCDRADD_IN6_64 _IOWR('u', 134, struct in6_defrouter_64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCDRDEL_IN6 _IOWR('u', 135, struct in6_defrouter) +#define SIOCDRDEL_IN6 _IOWR('u', 135, struct in6_defrouter) #ifdef BSD_KERNEL_PRIVATE -#define SIOCDRDEL_IN6_32 _IOWR('u', 135, struct in6_defrouter_32) -#define SIOCDRDEL_IN6_64 _IOWR('u', 135, struct in6_defrouter_64) +#define SIOCDRDEL_IN6_32 _IOWR('u', 135, struct in6_defrouter_32) +#define SIOCDRDEL_IN6_64 _IOWR('u', 135, struct in6_defrouter_64) #endif /* BSD_KERNEL_PRIVATE */ /* * enable/disable IPv6 router mode on interface. */ -#define SIOCSETROUTERMODE_IN6 _IOWR('i', 136, struct in6_ifreq) +#define SIOCSETROUTERMODE_IN6 _IOWR('i', 136, struct in6_ifreq) /* * start secure link-local interface addresses */ -#define SIOCLL_CGASTART _IOW('i', 160, struct in6_cgareq) +#define SIOCLL_CGASTART _IOW('i', 160, struct in6_cgareq) #ifdef BSD_KERNEL_PRIVATE -#define SIOCLL_CGASTART_32 _IOW('i', 160, struct in6_cgareq_32) -#define SIOCLL_CGASTART_64 _IOW('i', 160, struct in6_cgareq_64) +#define SIOCLL_CGASTART_32 _IOW('i', 160, struct in6_cgareq_32) +#define SIOCLL_CGASTART_64 _IOW('i', 160, struct in6_cgareq_64) #endif -#define SIOCGIFCGAPREP_IN6 _IOWR('i', 187, struct in6_cgareq) -#define SIOCSIFCGAPREP_IN6 _IOWR('i', 188, struct in6_cgareq) +#define SIOCGIFCGAPREP_IN6 _IOWR('i', 187, struct in6_cgareq) +#define SIOCSIFCGAPREP_IN6 _IOWR('i', 188, struct in6_cgareq) -#define SIOCCLAT46_START _IOWR('i', 189, struct in6_ifreq) -#define SIOCCLAT46_STOP _IOWR('i', 190, struct in6_ifreq) +#define SIOCCLAT46_START _IOWR('i', 189, struct in6_ifreq) +#define SIOCCLAT46_STOP _IOWR('i', 190, struct in6_ifreq) #endif /* PRIVATE */ #ifdef BSD_KERNEL_PRIVATE @@ -740,38 +740,38 @@ void in6_post_msg(struct ifnet *, u_int32_t, struct in6_ifaddr *, uint8_t *mac); * translation between those and the publicly-defined ones below. */ #endif /* BSD_KERNEL_PRIVATE */ -#define IN6_IFF_ANYCAST 0x0001 /* anycast address */ -#define IN6_IFF_TENTATIVE 0x0002 /* tentative address */ -#define IN6_IFF_DUPLICATED 0x0004 /* DAD detected duplicate */ -#define IN6_IFF_DETACHED 0x0008 /* may be detached from the link */ -#define IN6_IFF_DEPRECATED 0x0010 /* deprecated address */ +#define IN6_IFF_ANYCAST 0x0001 /* anycast address */ +#define IN6_IFF_TENTATIVE 0x0002 /* tentative address */ +#define IN6_IFF_DUPLICATED 0x0004 /* DAD detected duplicate */ +#define IN6_IFF_DETACHED 0x0008 /* may be detached from the link */ +#define IN6_IFF_DEPRECATED 0x0010 /* deprecated address */ /* don't perform DAD on this address (used only at first SIOC* call) */ -#define IN6_IFF_NODAD 0x0020 +#define IN6_IFF_NODAD 0x0020 -#define IN6_IFF_AUTOCONF 0x0040 /* autoconfigurable address. */ -#define IN6_IFF_TEMPORARY 0x0080 /* temporary (anonymous) address. */ -#define IN6_IFF_DYNAMIC 0x0100 /* assigned by DHCPv6 service */ -#define IN6_IFF_OPTIMISTIC 0x0200 /* optimistic DAD, i.e. RFC 4429 */ -#define IN6_IFF_SECURED 0x0400 /* cryptographically generated */ +#define IN6_IFF_AUTOCONF 0x0040 /* autoconfigurable address. */ +#define IN6_IFF_TEMPORARY 0x0080 /* temporary (anonymous) address. */ +#define IN6_IFF_DYNAMIC 0x0100 /* assigned by DHCPv6 service */ +#define IN6_IFF_OPTIMISTIC 0x0200 /* optimistic DAD, i.e. RFC 4429 */ +#define IN6_IFF_SECURED 0x0400 /* cryptographically generated */ #ifdef PRIVATE -#define IN6_IFF_SWIFTDAD 0x0800 /* DAD with no delay */ +#define IN6_IFF_SWIFTDAD 0x0800 /* DAD with no delay */ #endif -#define IN6_IFF_CLAT46 0x1000 /* Address reserved for CLAT46 */ -#define IN6_IFF_NOPFX 0x8000 /* Depreciated. Don't use. */ +#define IN6_IFF_CLAT46 0x1000 /* Address reserved for CLAT46 */ +#define IN6_IFF_NOPFX 0x8000 /* Depreciated. Don't use. */ /* Duplicate Address Detection [DAD] in progress. */ -#define IN6_IFF_DADPROGRESS (IN6_IFF_TENTATIVE|IN6_IFF_OPTIMISTIC) +#define IN6_IFF_DADPROGRESS (IN6_IFF_TENTATIVE|IN6_IFF_OPTIMISTIC) /* do not input/output */ -#define IN6_IFF_NOTREADY (IN6_IFF_TENTATIVE|IN6_IFF_DUPLICATED) +#define IN6_IFF_NOTREADY (IN6_IFF_TENTATIVE|IN6_IFF_DUPLICATED) /* SLAAC/DHCPv6 address */ -#define IN6_IFF_NOTMANUAL (IN6_IFF_AUTOCONF|IN6_IFF_DYNAMIC) +#define IN6_IFF_NOTMANUAL (IN6_IFF_AUTOCONF|IN6_IFF_DYNAMIC) #ifdef KERNEL -#define IN6_ARE_SCOPE_CMP(a, b) ((a) - (b)) -#define IN6_ARE_SCOPE_EQUAL(a, b) ((a) == (b)) +#define IN6_ARE_SCOPE_CMP(a, b) ((a) - (b)) +#define IN6_ARE_SCOPE_EQUAL(a, b) ((a) == (b)) #endif /* KERNEL */ #ifdef BSD_KERNEL_PRIVATE @@ -784,22 +784,22 @@ extern u_char inet6ctlerrmap[]; extern u_int32_t in6_maxmtu; /* N.B.: if_inet6data is never freed once set, so we don't need to lock */ -#define in6_ifstat_inc_common(_ifp, _tag, _atomic) do { \ - if (_ifp != NULL && IN6_IFEXTRA(_ifp) != NULL) { \ - if (_atomic) \ - atomic_add_64( \ - &IN6_IFEXTRA(_ifp)->in6_ifstat._tag, 1); \ - else \ - IN6_IFEXTRA(_ifp)->in6_ifstat._tag++; \ - } \ +#define in6_ifstat_inc_common(_ifp, _tag, _atomic) do { \ + if (_ifp != NULL && IN6_IFEXTRA(_ifp) != NULL) { \ + if (_atomic) \ + atomic_add_64( \ + &IN6_IFEXTRA(_ifp)->in6_ifstat._tag, 1); \ + else \ + IN6_IFEXTRA(_ifp)->in6_ifstat._tag++; \ + } \ } while (0) /* atomic version */ -#define in6_ifstat_inc(_ifp, _tag) \ +#define in6_ifstat_inc(_ifp, _tag) \ in6_ifstat_inc_common(_ifp, _tag, TRUE) /* non-atomic version (for fast paths) */ -#define in6_ifstat_inc_na(_ifp, _tag) \ +#define in6_ifstat_inc_na(_ifp, _tag) \ in6_ifstat_inc_common(_ifp, _tag, FALSE) /* @@ -807,36 +807,36 @@ extern u_int32_t in6_maxmtu; * to a given interface (ifnet structure). */ -#define IFP_TO_IA6(ifp, ia) \ - /* struct ifnet *ifp; */ \ - /* struct in6_ifaddr *ia; */ \ -do { \ - struct ifaddr *_ifa; \ - ifnet_lock_assert(ifp, LCK_RW_ASSERT_HELD); \ - for (_ifa = (ifp)->if_addrlist.tqh_first; _ifa != NULL; \ - _ifa = _ifa->ifa_list.tqe_next) { \ - IFA_LOCK(_ifa); \ - if (_ifa->ifa_addr->sa_family == AF_INET6) { \ - IFA_ADDREF_LOCKED(_ifa); \ - IFA_UNLOCK(_ifa); \ - break; \ - } \ - IFA_UNLOCK(_ifa); \ - } \ - (ia) = (struct in6_ifaddr *)_ifa; \ +#define IFP_TO_IA6(ifp, ia) \ + /* struct ifnet *ifp; */ \ + /* struct in6_ifaddr *ia; */ \ +do { \ + struct ifaddr *_ifa; \ + ifnet_lock_assert(ifp, LCK_RW_ASSERT_HELD); \ + for (_ifa = (ifp)->if_addrlist.tqh_first; _ifa != NULL; \ + _ifa = _ifa->ifa_list.tqe_next) { \ + IFA_LOCK(_ifa); \ + if (_ifa->ifa_addr->sa_family == AF_INET6) { \ + IFA_ADDREF_LOCKED(_ifa); \ + IFA_UNLOCK(_ifa); \ + break; \ + } \ + IFA_UNLOCK(_ifa); \ + } \ + (ia) = (struct in6_ifaddr *)_ifa; \ } while (0) /* * IPv6 multicast MLD-layer source entry. */ struct ip6_msource { - RB_ENTRY(ip6_msource) im6s_link; /* RB tree links */ - struct in6_addr im6s_addr; + RB_ENTRY(ip6_msource) im6s_link; /* RB tree links */ + struct in6_addr im6s_addr; struct im6s_st { - uint16_t ex; /* # of exclusive members */ - uint16_t in; /* # of inclusive members */ - } im6s_st[2]; /* state at t0, t1 */ - uint8_t im6s_stp; /* pending query */ + uint16_t ex; /* # of exclusive members */ + uint16_t in; /* # of inclusive members */ + } im6s_st[2]; /* state at t0, t1 */ + uint8_t im6s_stp; /* pending query */ }; RB_HEAD(ip6_msource_tree, ip6_msource); @@ -850,25 +850,25 @@ RB_PROTOTYPE_SC_PREV(__private_extern__, ip6_msource_tree, ip6_msource, * NOTE: overlapping use of struct ip6_msource fields at start. */ struct in6_msource { - RB_ENTRY(ip6_msource) im6s_link; /* Common field */ - struct in6_addr im6s_addr; /* Common field */ - uint8_t im6sl_st[2]; /* state before/at commit */ + RB_ENTRY(ip6_msource) im6s_link; /* Common field */ + struct in6_addr im6s_addr; /* Common field */ + uint8_t im6sl_st[2]; /* state before/at commit */ }; /* * IPv6 multicast PCB-layer group filter descriptor. */ struct in6_mfilter { - struct ip6_msource_tree im6f_sources; /* source list for (S,G) */ - u_long im6f_nsrc; /* # of source entries */ - uint8_t im6f_st[2]; /* state before/at commit */ + struct ip6_msource_tree im6f_sources; /* source list for (S,G) */ + u_long im6f_nsrc; /* # of source entries */ + uint8_t im6f_st[2]; /* state before/at commit */ }; /* * Legacy KAME IPv6 multicast membership descriptor. */ struct in6_multi_mship { - struct in6_multi *i6mm_maddr; /* Multicast address pointer */ + struct in6_multi *i6mm_maddr; /* Multicast address pointer */ LIST_ENTRY(in6_multi_mship) i6mm_chain; /* multicast options chain */ }; @@ -887,10 +887,10 @@ struct in6_ifextra { uint32_t netsig_len; u_int8_t netsig[IFNET_SIGNATURELEN]; struct ipv6_prefix nat64_prefixes[NAT64_MAX_NUM_PREFIXES]; - struct lltable *ii_llt; /* NDP state */ + struct lltable *ii_llt; /* NDP state */ }; #define IN6_IFEXTRA(_ifp) ((struct in6_ifextra *)(_ifp->if_inet6data)) -#define LLTABLE6(ifp) ((IN6_IFEXTRA(ifp) == NULL) ? NULL : IN6_IFEXTRA(ifp)->ii_llt) +#define LLTABLE6(ifp) ((IN6_IFEXTRA(ifp) == NULL) ? NULL : IN6_IFEXTRA(ifp)->ii_llt) #endif /* BSD_KERNEL_PRIVATE */ struct mld_ifinfo; @@ -920,28 +920,28 @@ struct mld_ifinfo; */ struct in6_multi { decl_lck_mtx_data(, in6m_lock); - u_int32_t in6m_refcount; /* reference count */ - u_int32_t in6m_reqcnt; /* request count for this address */ - u_int32_t in6m_debug; /* see ifa_debug flags */ + u_int32_t in6m_refcount; /* reference count */ + u_int32_t in6m_reqcnt; /* request count for this address */ + u_int32_t in6m_debug; /* see ifa_debug flags */ LIST_ENTRY(in6_multi) in6m_entry; /* list glue */ - struct in6_addr in6m_addr; /* IP6 multicast address */ - struct ifnet *in6m_ifp; /* back pointer to ifnet */ - struct ifmultiaddr *in6m_ifma; /* back pointer to ifmultiaddr */ - u_int in6m_state; /* state of the membership */ - u_int in6m_timer; /* MLD6 listener report timer */ + struct in6_addr in6m_addr; /* IP6 multicast address */ + struct ifnet *in6m_ifp; /* back pointer to ifnet */ + struct ifmultiaddr *in6m_ifma; /* back pointer to ifmultiaddr */ + u_int in6m_state; /* state of the membership */ + u_int in6m_timer; /* MLD6 listener report timer */ /* New fields for MLDv2 follow. */ - struct mld_ifinfo *in6m_mli; /* MLD info */ - SLIST_ENTRY(in6_multi) in6m_dtle; /* detached waiting for rele */ - SLIST_ENTRY(in6_multi) in6m_nrele; /* to-be-released by MLD */ - u_int32_t in6m_nrelecnt; /* deferred release count */ - struct ip6_msource_tree in6m_srcs; /* tree of sources */ - u_long in6m_nsrc; /* # of tree entries */ - - struct ifqueue in6m_scq; /* pending state-change packets */ - struct timeval in6m_lastgsrtv; /* last G-S-R query */ - uint16_t in6m_sctimer; /* state-change timer */ - uint16_t in6m_scrv; /* state-change rexmit count */ + struct mld_ifinfo *in6m_mli; /* MLD info */ + SLIST_ENTRY(in6_multi) in6m_dtle; /* detached waiting for rele */ + SLIST_ENTRY(in6_multi) in6m_nrele; /* to-be-released by MLD */ + u_int32_t in6m_nrelecnt; /* deferred release count */ + struct ip6_msource_tree in6m_srcs; /* tree of sources */ + u_long in6m_nsrc; /* # of tree entries */ + + struct ifqueue in6m_scq; /* pending state-change packets */ + struct timeval in6m_lastgsrtv; /* last G-S-R query */ + uint16_t in6m_sctimer; /* state-change timer */ + uint16_t in6m_scrv; /* state-change rexmit count */ /* * SSM state counters which track state at T0 (the time the last @@ -951,54 +951,54 @@ struct in6_multi { * are maintained here to optimize for common use-cases. */ struct in6m_st { - uint16_t iss_fmode; /* MLD filter mode */ - uint16_t iss_asm; /* # of ASM listeners */ - uint16_t iss_ex; /* # of exclusive members */ - uint16_t iss_in; /* # of inclusive members */ - uint16_t iss_rec; /* # of recorded sources */ - } in6m_st[2]; /* state at t0, t1 */ - - void (*in6m_trace) /* callback fn for tracing refs */ - (struct in6_multi *, int); + uint16_t iss_fmode; /* MLD filter mode */ + uint16_t iss_asm; /* # of ASM listeners */ + uint16_t iss_ex; /* # of exclusive members */ + uint16_t iss_in; /* # of inclusive members */ + uint16_t iss_rec; /* # of recorded sources */ + } in6m_st[2]; /* state at t0, t1 */ + + void (*in6m_trace) /* callback fn for tracing refs */ + (struct in6_multi *, int); }; -#define IN6M_LOCK_ASSERT_HELD(_in6m) \ +#define IN6M_LOCK_ASSERT_HELD(_in6m) \ LCK_MTX_ASSERT(&(_in6m)->in6m_lock, LCK_MTX_ASSERT_OWNED) -#define IN6M_LOCK_ASSERT_NOTHELD(_in6m) \ +#define IN6M_LOCK_ASSERT_NOTHELD(_in6m) \ LCK_MTX_ASSERT(&(_in6m)->in6m_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IN6M_LOCK(_in6m) \ +#define IN6M_LOCK(_in6m) \ lck_mtx_lock(&(_in6m)->in6m_lock) -#define IN6M_LOCK_SPIN(_in6m) \ +#define IN6M_LOCK_SPIN(_in6m) \ lck_mtx_lock_spin(&(_in6m)->in6m_lock) -#define IN6M_CONVERT_LOCK(_in6m) do { \ - IN6M_LOCK_ASSERT_HELD(_in6m); \ - lck_mtx_convert_spin(&(_in6m)->in6m_lock); \ +#define IN6M_CONVERT_LOCK(_in6m) do { \ + IN6M_LOCK_ASSERT_HELD(_in6m); \ + lck_mtx_convert_spin(&(_in6m)->in6m_lock); \ } while (0) -#define IN6M_UNLOCK(_in6m) \ +#define IN6M_UNLOCK(_in6m) \ lck_mtx_unlock(&(_in6m)->in6m_lock) -#define IN6M_ADDREF(_in6m) \ +#define IN6M_ADDREF(_in6m) \ in6m_addref(_in6m, 0) -#define IN6M_ADDREF_LOCKED(_in6m) \ +#define IN6M_ADDREF_LOCKED(_in6m) \ in6m_addref(_in6m, 1) -#define IN6M_REMREF(_in6m) \ +#define IN6M_REMREF(_in6m) \ in6m_remref(_in6m, 0) -#define IN6M_REMREF_LOCKED(_in6m) \ +#define IN6M_REMREF_LOCKED(_in6m) \ in6m_remref(_in6m, 1) /* flags to in6_update_ifa */ -#define IN6_IFAUPDATE_NOWAIT 0x1 /* don't block allocating memory */ -#define IN6_IFAUPDATE_1STADDR 0x2 /* first address on interface */ -#define IN6_IFAUPDATE_NEWADDR 0x4 /* new address on interface */ -#define IN6_IFAUPDATE_DADDELAY 0x8 /* must delay initial DAD probe */ +#define IN6_IFAUPDATE_NOWAIT 0x1 /* don't block allocating memory */ +#define IN6_IFAUPDATE_1STADDR 0x2 /* first address on interface */ +#define IN6_IFAUPDATE_NEWADDR 0x4 /* new address on interface */ +#define IN6_IFAUPDATE_DADDELAY 0x8 /* must delay initial DAD probe */ struct ip6_moptions; struct sockopt; @@ -1030,24 +1030,24 @@ struct in6_multistep { * * Must be called with in6_multihead_lock held. */ -#define IN6_LOOKUP_MULTI(addr, ifp, in6m) \ - /* struct in6_addr *addr; */ \ - /* struct ifnet *ifp; */ \ - /* struct in6_multi *in6m; */ \ -do { \ - struct in6_multistep _step; \ - IN6_FIRST_MULTI(_step, in6m); \ - while ((in6m) != NULL) { \ - IN6M_LOCK_SPIN(in6m); \ - if ((in6m)->in6m_ifp == (ifp) && \ - IN6_ARE_ADDR_EQUAL(&(in6m)->in6m_addr, (addr))) { \ - IN6M_ADDREF_LOCKED(in6m); \ - IN6M_UNLOCK(in6m); \ - break; \ - } \ - IN6M_UNLOCK(in6m); \ - IN6_NEXT_MULTI(_step, in6m); \ - } \ +#define IN6_LOOKUP_MULTI(addr, ifp, in6m) \ + /* struct in6_addr *addr; */ \ + /* struct ifnet *ifp; */ \ + /* struct in6_multi *in6m; */ \ +do { \ + struct in6_multistep _step; \ + IN6_FIRST_MULTI(_step, in6m); \ + while ((in6m) != NULL) { \ + IN6M_LOCK_SPIN(in6m); \ + if ((in6m)->in6m_ifp == (ifp) && \ + IN6_ARE_ADDR_EQUAL(&(in6m)->in6m_addr, (addr))) { \ + IN6M_ADDREF_LOCKED(in6m); \ + IN6M_UNLOCK(in6m); \ + break; \ + } \ + IN6M_UNLOCK(in6m); \ + IN6_NEXT_MULTI(_step, in6m); \ + } \ } while (0) /* @@ -1059,22 +1059,22 @@ do { \ * * Must be called with in6_multihead_lock held. */ -#define IN6_NEXT_MULTI(step, in6m) \ - /* struct in6_multistep step; */ \ - /* struct in6_multi *in6m; */ \ -do { \ - in6_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ - if (((in6m) = (step).i_in6m) != NULL) \ - (step).i_in6m = (step).i_in6m->in6m_entry.le_next; \ +#define IN6_NEXT_MULTI(step, in6m) \ + /* struct in6_multistep step; */ \ + /* struct in6_multi *in6m; */ \ +do { \ + in6_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ + if (((in6m) = (step).i_in6m) != NULL) \ + (step).i_in6m = (step).i_in6m->in6m_entry.le_next; \ } while (0) -#define IN6_FIRST_MULTI(step, in6m) \ - /* struct in6_multistep step; */ \ - /* struct in6_multi *in6m */ \ -do { \ - in6_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ - (step).i_in6m = in6_multihead.lh_first; \ - IN6_NEXT_MULTI((step), (in6m)); \ +#define IN6_FIRST_MULTI(step, in6m) \ + /* struct in6_multistep step; */ \ + /* struct in6_multi *in6m */ \ +do { \ + in6_multihead_lock_assert(LCK_RW_ASSERT_HELD); \ + (step).i_in6m = in6_multihead.lh_first; \ + IN6_NEXT_MULTI((step), (in6m)); \ } while (0) extern lck_mtx_t *inet6_domain_mutex; @@ -1171,7 +1171,7 @@ extern int in6_getconninfo(struct socket *, sae_connid_t, uint32_t *, uint32_t *, int32_t *, user_addr_t, socklen_t *, user_addr_t, socklen_t *, uint32_t *, user_addr_t, uint32_t *); extern void in6_ip6_to_sockaddr(const struct in6_addr *ip6, u_int16_t port, - struct sockaddr_in6 *sin6, u_int32_t maxlen); + struct sockaddr_in6 *sin6, u_int32_t maxlen); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_IN6_VAR_H_ */ diff --git a/bsd/netinet6/ip6_ecn.h b/bsd/netinet6/ip6_ecn.h index cfad473a6..44b430591 100644 --- a/bsd/netinet6/ip6_ecn.h +++ b/bsd/netinet6/ip6_ecn.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013, 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/netinet6/ip6_forward.c b/bsd/netinet6/ip6_forward.c index e6beab89a..8498211c7 100644 --- a/bsd/netinet6/ip6_forward.c +++ b/bsd/netinet6/ip6_forward.c @@ -131,7 +131,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, int error, type = 0, code = 0; boolean_t proxy = FALSE; struct mbuf *mcopy = NULL; - struct ifnet *ifp, *rcvifp, *origifp; /* maybe unnecessary */ + struct ifnet *ifp, *rcvifp, *origifp; /* maybe unnecessary */ u_int32_t inzone, outzone, len; struct in6_addr src_in6, dst_in6; uint64_t curtime = net_uptime(); @@ -157,14 +157,16 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, if (!srcrt && nd6_prproxy && (rt = ip6forward_rt->ro_rt) != NULL && (rt->rt_flags & RTF_PROXY)) { nd6_proxy_find_fwdroute(m->m_pkthdr.rcvif, ip6forward_rt); - if ((rt = ip6forward_rt->ro_rt) != NULL) + if ((rt = ip6forward_rt->ro_rt) != NULL) { ifscope = rt->rt_ifp->if_index; + } } #if PF pf_mtag = pf_find_mtag(m); - if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) + if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) { ifscope = pf_mtag->pftag_rtableid; + } /* * If the caller provides a route which is on a different interface @@ -195,7 +197,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, if (ipsec6_in_reject(m, NULL)) { IPSEC_STAT_INCREMENT(ipsec6stat.in_polvio); m_freem(m); - return (NULL); + return NULL; } } #endif /*IPSEC*/ @@ -205,7 +207,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, * Do not forward packets with unspecified source. It was discussed * in July 2000, on ipngwg mailing list. */ - if ((m->m_flags & (M_BCAST|M_MCAST)) != 0 || + if ((m->m_flags & (M_BCAST | M_MCAST)) != 0 || IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { ip6stat.ip6s_cantforward++; @@ -221,14 +223,14 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, if_name(m->m_pkthdr.rcvif)); } m_freem(m); - return (NULL); + return NULL; } if (ip6->ip6_hlim <= IPV6_HLIMDEC) { /* XXX in6_ifstat_inc(rt->rt_ifp, ifs6_in_discard) */ icmp6_error_flag(m, ICMP6_TIME_EXCEEDED, - ICMP6_TIME_EXCEED_TRANSIT, 0, 0); - return (NULL); + ICMP6_TIME_EXCEED_TRANSIT, 0, 0); + return NULL; } /* @@ -245,8 +247,9 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, * Also skip IPsec forwarding path processing as this * packet is not to be forwarded. */ - if (proxy) + if (proxy) { goto skip_ipsec; + } } ip6->ip6_hlim -= IPV6_HLIMDEC; @@ -263,8 +266,9 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, mcopy = m_copy(m, 0, imin(m->m_pkthdr.len, ICMPV6_PLD_MAXLEN)); #if IPSEC - if (ipsec_bypass != 0) + if (ipsec_bypass != 0) { goto skip_ipsec; + } /* get a security policy for this packet */ sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, IP_FORWARDING, &error); @@ -279,7 +283,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, #endif } m_freem(m); - return (NULL); + return NULL; } error = 0; @@ -287,7 +291,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, /* check policy */ switch (sp->policy) { case IPSEC_POLICY_DISCARD: - case IPSEC_POLICY_GENERATE: + case IPSEC_POLICY_GENERATE: /* * This packet is just discarded. */ @@ -302,7 +306,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, #endif } m_freem(m); - return (NULL); + return NULL; case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: @@ -324,7 +328,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, #endif } m_freem(m); - return (NULL); + return NULL; } /* do IPsec */ break; @@ -337,61 +341,61 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, goto skip_ipsec; } - { - struct ipsec_output_state state; + { + struct ipsec_output_state state; - /* - * All the extension headers will become inaccessible - * (since they can be encrypted). - * Don't panic, we need no more updates to extension headers - * on inner IPv6 packet (since they are now encapsulated). - * - * IPv6 [ESP|AH] IPv6 [extension headers] payload - */ - bzero(&state, sizeof(state)); - state.m = m; - state.dst = NULL; /* update at ipsec6_output_tunnel() */ + /* + * All the extension headers will become inaccessible + * (since they can be encrypted). + * Don't panic, we need no more updates to extension headers + * on inner IPv6 packet (since they are now encapsulated). + * + * IPv6 [ESP|AH] IPv6 [extension headers] payload + */ + bzero(&state, sizeof(state)); + state.m = m; + state.dst = NULL; /* update at ipsec6_output_tunnel() */ - error = ipsec6_output_tunnel(&state, sp, 0); - key_freesp(sp, KEY_SADB_UNLOCKED); - if (state.tunneled == 4) { - ROUTE_RELEASE(&state.ro); - return (NULL); /* packet is gone - sent over IPv4 */ - } + error = ipsec6_output_tunnel(&state, sp, 0); + key_freesp(sp, KEY_SADB_UNLOCKED); + if (state.tunneled == 4) { + ROUTE_RELEASE(&state.ro); + return NULL; /* packet is gone - sent over IPv4 */ + } - m = state.m; - ROUTE_RELEASE(&state.ro); + m = state.m; + ROUTE_RELEASE(&state.ro); - if (error) { - /* mbuf is already reclaimed in ipsec6_output_tunnel. */ - switch (error) { - case EHOSTUNREACH: - case ENETUNREACH: - case EMSGSIZE: - case ENOBUFS: - case ENOMEM: - break; - default: - printf("ip6_output (ipsec): error code %d\n", error); + if (error) { + /* mbuf is already reclaimed in ipsec6_output_tunnel. */ + switch (error) { + case EHOSTUNREACH: + case ENETUNREACH: + case EMSGSIZE: + case ENOBUFS: + case ENOMEM: + break; + default: + printf("ip6_output (ipsec): error code %d\n", error); /* fall through */ - case ENOENT: - /* don't show these error codes to the user */ - break; - } - ip6stat.ip6s_cantforward++; - if (mcopy) { + case ENOENT: + /* don't show these error codes to the user */ + break; + } + ip6stat.ip6s_cantforward++; + if (mcopy) { #if 0 - /* XXX: what icmp ? */ + /* XXX: what icmp ? */ #else - m_freem(mcopy); + m_freem(mcopy); #endif + } + m_freem(m); + return NULL; } - m_freem(m); - return (NULL); } - } #endif /* IPSEC */ - skip_ipsec: +skip_ipsec: dst = (struct sockaddr_in6 *)&ip6forward_rt->ro_dst; if ((rt = ip6forward_rt->ro_rt) != NULL) { @@ -426,11 +430,12 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, if (rt == NULL) { ip6stat.ip6s_noroute++; in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_noroute); - if (mcopy) + if (mcopy) { icmp6_error(mcopy, ICMP6_DST_UNREACH, - ICMP6_DST_UNREACH_NOROUTE, 0); + ICMP6_DST_UNREACH_NOROUTE, 0); + } m_freem(m); - return (NULL); + return NULL; } RT_LOCK_ASSERT_HELD(rt); } else if (ROUTE_UNUSABLE(ip6forward_rt) || @@ -452,11 +457,12 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, if ((rt = ip6forward_rt->ro_rt) == NULL) { ip6stat.ip6s_noroute++; in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_noroute); - if (mcopy) + if (mcopy) { icmp6_error(mcopy, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOROUTE, 0); + } m_freem(m); - return (NULL); + return NULL; } RT_LOCK(rt); /* Take an extra ref for ourselves */ @@ -481,7 +487,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, ip6stat.ip6s_cantforward++; ip6stat.ip6s_badscope++; m_freem(m); - return (NULL); + return NULL; } if (in6_setscope(&src_in6, m->m_pkthdr.rcvif, &inzone)) { RT_REMREF_LOCKED(rt); @@ -489,7 +495,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, ip6stat.ip6s_cantforward++; ip6stat.ip6s_badscope++; m_freem(m); - return (NULL); + return NULL; } if (inzone != outzone && !proxy) { @@ -512,10 +518,10 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, RT_UNLOCK(rt); if (mcopy) { icmp6_error(mcopy, ICMP6_DST_UNREACH, - ICMP6_DST_UNREACH_BEYONDSCOPE, 0); + ICMP6_DST_UNREACH_BEYONDSCOPE, 0); } m_freem(m); - return (NULL); + return NULL; } /* @@ -534,7 +540,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, ip6stat.ip6s_cantforward++; ip6stat.ip6s_badscope++; m_freem(m); - return (NULL); + return NULL; } if (m->m_pkthdr.len > rt->rt_ifp->if_mtu) { @@ -557,20 +563,22 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, * encapsulated packet as "rt->rt_ifp". */ sp2 = ipsec6_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND, - IP_FORWARDING, &ipsecerror); + IP_FORWARDING, &ipsecerror); if (sp2) { ipsechdrsiz = ipsec6_hdrsiz(mcopy, - IPSEC_DIR_OUTBOUND, NULL); - if (ipsechdrsiz < mtu) + IPSEC_DIR_OUTBOUND, NULL); + if (ipsechdrsiz < mtu) { mtu -= ipsechdrsiz; + } key_freesp(sp2, KEY_SADB_UNLOCKED); } /* * if mtu becomes less than minimum MTU, * tell minimum MTU (and I'll need to fragment it). */ - if (mtu < IPV6_MMTU) + if (mtu < IPV6_MMTU) { mtu = IPV6_MMTU; + } #endif /* Release extra ref */ RT_REMREF_LOCKED(rt); @@ -582,11 +590,12 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, RT_UNLOCK(rt); } m_freem(m); - return (NULL); - } + return NULL; + } - if (rt->rt_flags & RTF_GATEWAY) + if (rt->rt_flags & RTF_GATEWAY) { dst = (struct sockaddr_in6 *)(void *)rt->rt_gateway; + } /* * If we are to forward the packet using the same interface @@ -599,7 +608,7 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, */ if (!proxy && ip6_sendredirects && rt->rt_ifp == m->m_pkthdr.rcvif && !srcrt && - (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0) { + (rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) == 0) { if ((rt->rt_ifp->if_flags & IFF_POINTOPOINT) != 0) { /* * If the incoming interface is equal to the outgoing @@ -611,12 +620,12 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, * type/code is based on suggestion by Rich Draves. * not sure if it is the best pick. */ - RT_REMREF_LOCKED(rt); /* Release extra ref */ + RT_REMREF_LOCKED(rt); /* Release extra ref */ RT_UNLOCK(rt); icmp6_error(mcopy, ICMP6_DST_UNREACH, - ICMP6_DST_UNREACH_ADDR, 0); + ICMP6_DST_UNREACH_ADDR, 0); m_freem(m); - return (NULL); + return NULL; } type = ND_REDIRECT; } @@ -641,15 +650,15 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, #if 1 if ((0)) #else - if ((rt->rt_flags & (RTF_BLACKHOLE|RTF_REJECT)) == 0) + if ((rt->rt_flags & (RTF_BLACKHOLE | RTF_REJECT)) == 0) #endif { printf("ip6_forward: outgoing interface is loopback. " - "src %s, dst %s, nxt %d, rcvif %s, outif %s\n", - ip6_sprintf(&ip6->ip6_src), - ip6_sprintf(&ip6->ip6_dst), - ip6->ip6_nxt, if_name(m->m_pkthdr.rcvif), - if_name(rt->rt_ifp)); + "src %s, dst %s, nxt %d, rcvif %s, outif %s\n", + ip6_sprintf(&ip6->ip6_src), + ip6_sprintf(&ip6->ip6_dst), + ip6->ip6_nxt, if_name(m->m_pkthdr.rcvif), + if_name(rt->rt_ifp)); } /* we can just use rcvif in forwarding. */ @@ -684,9 +693,10 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, VERIFY(m->m_pkthdr.pkt_flags & PKTF_PROXY_DST); /* Release extra ref */ RT_REMREF(rt); - if (mcopy != NULL) + if (mcopy != NULL) { m_freem(mcopy); - return (m); + } + return m; } /* Mark this packet as being forwarded from another interface */ @@ -745,9 +755,9 @@ ip6_forward(struct mbuf *m, struct route_in6 *ip6forward_rt, ip6stat.ip6s_forward++; in6_ifstat_inc(ifp, ifs6_out_forward); - if (type) + if (type) { ip6stat.ip6s_redirectsent++; - else { + } else { if (mcopy) { goto freecopy; } @@ -759,7 +769,7 @@ senderr: if (mcopy == NULL) { /* Release extra ref */ RT_REMREF(rt); - return (NULL); + return NULL; } switch (error) { case 0: @@ -768,7 +778,7 @@ senderr: icmp6_redirect_output(mcopy, rt); /* Release extra ref */ RT_REMREF(rt); - return (NULL); + return NULL; } #endif goto freecopy; @@ -781,7 +791,7 @@ senderr: /* Tell source to slow down like source quench in IP? */ goto freecopy; - case ENETUNREACH: /* shouldn't happen, checked above */ + case ENETUNREACH: /* shouldn't happen, checked above */ case EHOSTUNREACH: case ENETDOWN: case EHOSTDOWN: @@ -793,11 +803,11 @@ senderr: icmp6_error(mcopy, type, code, 0); /* Release extra ref */ RT_REMREF(rt); - return (NULL); + return NULL; - freecopy: +freecopy: m_freem(mcopy); /* Release extra ref */ RT_REMREF(rt); - return (NULL); + return NULL; } diff --git a/bsd/netinet6/ip6_fw.c b/bsd/netinet6/ip6_fw.c index 99d3e8c02..e2dfd716b 100644 --- a/bsd/netinet6/ip6_fw.c +++ b/bsd/netinet6/ip6_fw.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -138,7 +138,7 @@ static int fw6_verbose_limit = IPV6FIREWALL_VERBOSE_LIMIT; static int fw6_verbose_limit = 0; #endif -LIST_HEAD (ip6_fw_head, ip6_fw_chain) ip6_fw_chain; +LIST_HEAD(ip6_fw_head, ip6_fw_chain) ip6_fw_chain; static void ip6fw_kev_post_msg(u_int32_t ); @@ -146,10 +146,10 @@ static void ip6fw_kev_post_msg(u_int32_t ); static int ip6fw_sysctl SYSCTL_HANDLER_ARGS; SYSCTL_DECL(_net_inet6_ip6); -SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Firewall"); -SYSCTL_PROC(_net_inet6_ip6_fw, OID_AUTO, enable, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ip6_fw_enable, 0, ip6fw_sysctl, "I", "Enable ip6fw"); +SYSCTL_NODE(_net_inet6_ip6, OID_AUTO, fw, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Firewall"); +SYSCTL_PROC(_net_inet6_ip6_fw, OID_AUTO, enable, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip6_fw_enable, 0, ip6fw_sysctl, "I", "Enable ip6fw"); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &fw6_debug, 0, ""); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, verbose, CTLFLAG_RW | CTLFLAG_LOCKED, &fw6_verbose, 0, ""); SYSCTL_INT(_net_inet6_ip6_fw, OID_AUTO, verbose_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &fw6_verbose_limit, 0, ""); @@ -159,41 +159,42 @@ ip6fw_sysctl SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) int error; - + error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); - if (error || !req->newptr) - return (error); - + if (error || !req->newptr) { + return error; + } + ip6fw_kev_post_msg(KEV_IP6FW_ENABLE); - + return error; } #endif -#define dprintf(a) do { \ - if (fw6_debug) \ - printf a; \ - } while (0) +#define dprintf(a) do { \ + if (fw6_debug) \ + printf a; \ + } while (0) #define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0 -static int add_entry6 __P((struct ip6_fw_head *chainptr, struct ip6_fw *frwl)); -static int del_entry6 __P((struct ip6_fw_head *chainptr, u_short number)); -static int zero_entry6 __P((struct ip6_fw *frwl)); +static int add_entry6 __P((struct ip6_fw_head *chainptr, struct ip6_fw *frwl)); +static int del_entry6 __P((struct ip6_fw_head *chainptr, u_short number)); +static int zero_entry6 __P((struct ip6_fw *frwl)); static struct ip6_fw *check_ip6fw_struct __P((struct ip6_fw *m)); -static int ip6opts_match __P((struct ip6_hdr **ip6, struct ip6_fw *f, - struct mbuf **m, - int *off, int *nxt, u_short *offset)); -static int port_match6 __P((u_short *portptr, int nports, u_short port, - int range_flag)); -static int tcp6flg_match __P((struct tcphdr *tcp6, struct ip6_fw *f)); -static int icmp6type_match __P((struct icmp6_hdr * icmp, struct ip6_fw * f)); -static void ip6fw_report __P((struct ip6_fw *f, struct ip6_hdr *ip6, - struct ifnet *rif, struct ifnet *oif, int off, int nxt)); - -static int ip6_fw_chk __P((struct ip6_hdr **pip6, - struct ifnet *oif, u_int16_t *cookie, struct mbuf **m)); -static int ip6_fw_ctl __P((struct sockopt *)); +static int ip6opts_match __P((struct ip6_hdr **ip6, struct ip6_fw *f, + struct mbuf **m, + int *off, int *nxt, u_short *offset)); +static int port_match6 __P((u_short *portptr, int nports, u_short port, + int range_flag)); +static int tcp6flg_match __P((struct tcphdr *tcp6, struct ip6_fw *f)); +static int icmp6type_match __P((struct icmp6_hdr * icmp, struct ip6_fw * f)); +static void ip6fw_report __P((struct ip6_fw *f, struct ip6_hdr *ip6, + struct ifnet *rif, struct ifnet *oif, int off, int nxt)); + +static int ip6_fw_chk __P((struct ip6_hdr **pip6, + struct ifnet *oif, u_int16_t *cookie, struct mbuf **m)); +static int ip6_fw_ctl __P((struct sockopt *)); static void cp_to_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule); static void cp_from_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule); static void cp_to_user_32( struct ip6_fw_32 *userrule_32, struct ip6_fw *rule); @@ -208,8 +209,9 @@ static __inline int port_match6(u_short *portptr, int nports, u_short port, int range_flag) { - if (!nports) + if (!nports) { return 1; + } if (range_flag) { if (portptr[0] <= port && port <= portptr[1]) { return 1; @@ -228,8 +230,8 @@ port_match6(u_short *portptr, int nports, u_short port, int range_flag) static int tcp6flg_match(struct tcphdr *tcp6, struct ip6_fw *f) { - u_char flg_set, flg_clr; - + u_char flg_set, flg_clr; + /* * If an established connection is required, reject packets that * have only SYN of RST|ACK|SYN set. Otherwise, fall through to @@ -237,16 +239,19 @@ tcp6flg_match(struct tcphdr *tcp6, struct ip6_fw *f) */ if ((f->fw_ipflg & IPV6_FW_IF_TCPEST) && ((tcp6->th_flags & (IPV6_FW_TCPF_RST | IPV6_FW_TCPF_ACK | - IPV6_FW_TCPF_SYN)) == IPV6_FW_TCPF_SYN)) + IPV6_FW_TCPF_SYN)) == IPV6_FW_TCPF_SYN)) { return 0; + } flg_set = tcp6->th_flags & f->fw_tcpf; flg_clr = tcp6->th_flags & f->fw_tcpnf; - if (flg_set != f->fw_tcpf) + if (flg_set != f->fw_tcpf) { return 0; - if (flg_clr) + } + if (flg_clr) { return 0; + } return 1; } @@ -256,18 +261,20 @@ icmp6type_match(struct icmp6_hdr *icmp6, struct ip6_fw *f) { int type; - if (!(f->fw_flg & IPV6_FW_F_ICMPBIT)) - return(1); + if (!(f->fw_flg & IPV6_FW_F_ICMPBIT)) { + return 1; + } type = icmp6->icmp6_type; /* check for matching type in the bitmap */ if (type < IPV6_FW_ICMPTYPES_DIM * sizeof(unsigned) * 8 && - (f->fw_icmp6types[type / (sizeof(unsigned) * 8)] & - (1U << (type % (8 * sizeof(unsigned)))))) - return(1); + (f->fw_icmp6types[type / (sizeof(unsigned) * 8)] & + (1U << (type % (8 * sizeof(unsigned)))))) { + return 1; + } - return(0); /* no match */ + return 0; /* no match */ } static int @@ -283,20 +290,21 @@ is_icmp6_query(struct ip6_hdr *ip6, int off) icmp6_type == ICMP6_MEMBERSHIP_QUERY || icmp6_type == ICMP6_WRUREQUEST || icmp6_type == ICMP6_FQDN_QUERY || - icmp6_type == ICMP6_NI_QUERY) - return(1); + icmp6_type == ICMP6_NI_QUERY) { + return 1; + } - return(0); + return 0; } static int ip6opts_match(struct ip6_hdr **pip6, struct ip6_fw *f, struct mbuf **m, - int *off, int *nxt, u_short *offset) + int *off, int *nxt, u_short *offset) { int len; struct ip6_hdr *ip6 = *pip6; struct ip6_ext *ip6e; - u_char opts, nopts, nopts_sve; + u_char opts, nopts, nopts_sve; opts = f->fw_ip6opt; nopts = nopts_sve = f->fw_ip6nopt; @@ -306,10 +314,10 @@ ip6opts_match(struct ip6_hdr **pip6, struct ip6_fw *f, struct mbuf **m, len = ntohs(ip6->ip6_plen) + sizeof(struct ip6_hdr); while (*off < len) { ip6e = (struct ip6_ext *)((caddr_t) ip6 + *off); - if ((*m)->m_len < *off + sizeof(*ip6e)) - goto opts_check; /* XXX */ - - switch(*nxt) { + if ((*m)->m_len < *off + sizeof(*ip6e)) { + goto opts_check; /* XXX */ + } + switch (*nxt) { case IPPROTO_FRAGMENT: if ((*m)->m_len >= *off + sizeof(struct ip6_frag)) { struct ip6_frag *ip6f; @@ -357,16 +365,17 @@ ip6opts_match(struct ip6_hdr **pip6, struct ip6_fw *f, struct mbuf **m, break; } *nxt = ip6e->ip6e_nxt; - } - opts_check: - if (f->fw_ip6opt == f->fw_ip6nopt) /* XXX */ +opts_check: + if (f->fw_ip6opt == f->fw_ip6nopt) { /* XXX */ return 1; + } - if (opts == 0 && nopts == nopts_sve) + if (opts == 0 && nopts == nopts_sve) { return 1; - else + } else { return 0; + } } static @@ -377,19 +386,20 @@ iface_match(struct ifnet *ifp, union ip6_fw_if *ifu, int byname) if (byname) { /* Check unit number (-1 is wildcard) */ if (ifu->fu_via_if.unit != -1 - && ifp->if_unit != ifu->fu_via_if.unit) - return(0); + && ifp->if_unit != ifu->fu_via_if.unit) { + return 0; + } /* Check name */ - if (strncmp(ifp->if_name, ifu->fu_via_if.name, IP6FW_IFNLEN)) - return(0); - return(1); - } else if (!IN6_IS_ADDR_UNSPECIFIED(&ifu->fu_via_ip6)) { /* Zero == wildcard */ + if (strncmp(ifp->if_name, ifu->fu_via_if.name, IP6FW_IFNLEN)) { + return 0; + } + return 1; + } else if (!IN6_IS_ADDR_UNSPECIFIED(&ifu->fu_via_ip6)) { /* Zero == wildcard */ struct ifaddr *ia; ifnet_lock_shared(ifp); for (ia = ifp->if_addrlist.tqh_first; ia; - ia = ia->ifa_list.tqe_next) - { + ia = ia->ifa_list.tqe_next) { IFA_LOCK_SPIN(ia); if (ia->ifa_addr->sa_family != AF_INET6) { IFA_UNLOCK(ia); @@ -403,47 +413,49 @@ iface_match(struct ifnet *ifp, union ip6_fw_if *ifu, int byname) } IFA_UNLOCK(ia); ifnet_lock_done(ifp); - return(1); + return 1; } ifnet_lock_done(ifp); - return(0); + return 0; } - return(1); + return 1; } static void ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, - struct ifnet *rif, struct ifnet *oif, int off, int nxt) + struct ifnet *rif, struct ifnet *oif, int off, int nxt) { static int counter; - struct tcphdr *const tcp6 = (struct tcphdr *) ((caddr_t) ip6+ off); - struct udphdr *const udp = (struct udphdr *) ((caddr_t) ip6+ off); - struct icmp6_hdr *const icmp6 = (struct icmp6_hdr *) ((caddr_t) ip6+ off); + struct tcphdr *const tcp6 = (struct tcphdr *) ((caddr_t) ip6 + off); + struct udphdr *const udp = (struct udphdr *) ((caddr_t) ip6 + off); + struct icmp6_hdr *const icmp6 = (struct icmp6_hdr *) ((caddr_t) ip6 + off); int count; const char *action; char action2[32], proto[102], name[18]; int len; count = f ? f->fw_pcnt : ++counter; - if (fw6_verbose_limit != 0 && count > fw6_verbose_limit) + if (fw6_verbose_limit != 0 && count > fw6_verbose_limit) { return; + } /* Print command name */ snprintf(SNPARGS(name, 0), "ip6fw: %d", f ? f->fw_number : -1); action = action2; - if (!f) + if (!f) { action = "Refuse"; - else { + } else { switch (f->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_DENY: action = "Deny"; break; case IPV6_FW_F_REJECT: - if (f->fw_reject_code == IPV6_FW_REJECT_RST) + if (f->fw_reject_code == IPV6_FW_REJECT_RST) { action = "Reset"; - else + } else { action = "Unreach"; + } break; case IPV6_FW_F_ACCEPT: action = "Accept"; @@ -463,7 +475,7 @@ ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, snprintf(SNPARGS(action2, 0), "SkipTo %d", f->fw_skipto_rule); break; - default: + default: action = "UNKNOWN"; break; } @@ -473,37 +485,42 @@ ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, case IPPROTO_TCP: len = snprintf(SNPARGS(proto, 0), "TCP [%s]", ip6_sprintf(&ip6->ip6_src)); - if (off > 0) + if (off > 0) { len += snprintf(SNPARGS(proto, len), ":%d ", ntohs(tcp6->th_sport)); - else + } else { len += snprintf(SNPARGS(proto, len), " "); + } len += snprintf(SNPARGS(proto, len), "[%s]", ip6_sprintf(&ip6->ip6_dst)); - if (off > 0) + if (off > 0) { snprintf(SNPARGS(proto, len), ":%d", ntohs(tcp6->th_dport)); + } break; case IPPROTO_UDP: len = snprintf(SNPARGS(proto, 0), "UDP [%s]", ip6_sprintf(&ip6->ip6_src)); - if (off > 0) + if (off > 0) { len += snprintf(SNPARGS(proto, len), ":%d ", ntohs(udp->uh_sport)); - else - len += snprintf(SNPARGS(proto, len), " "); + } else { + len += snprintf(SNPARGS(proto, len), " "); + } len += snprintf(SNPARGS(proto, len), "[%s]", ip6_sprintf(&ip6->ip6_dst)); - if (off > 0) + if (off > 0) { snprintf(SNPARGS(proto, len), ":%d", ntohs(udp->uh_dport)); + } break; case IPPROTO_ICMPV6: - if (off > 0) + if (off > 0) { len = snprintf(SNPARGS(proto, 0), "IPV6-ICMP:%u.%u ", icmp6->icmp6_type, icmp6->icmp6_code); - else + } else { len = snprintf(SNPARGS(proto, 0), "IPV6-ICMP "); + } len += snprintf(SNPARGS(proto, len), "[%s]", ip6_sprintf(&ip6->ip6_src)); snprintf(SNPARGS(proto, len), " [%s]", @@ -517,18 +534,20 @@ ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, break; } - if (oif) + if (oif) { log(LOG_AUTHPRIV | LOG_INFO, "%s %s %s out via %s\n", name, action, proto, if_name(oif)); - else if (rif) + } else if (rif) { log(LOG_AUTHPRIV | LOG_INFO, "%s %s %s in via %s\n", name, action, proto, if_name(rif)); - else + } else { log(LOG_AUTHPRIV | LOG_INFO, "%s %s %s", name, action, proto); - if (fw6_verbose_limit != 0 && count == fw6_verbose_limit) - log(LOG_AUTHPRIV | LOG_INFO, "ip6fw: limit reached on entry %d\n", - f ? f->fw_number : -1); + } + if (fw6_verbose_limit != 0 && count == fw6_verbose_limit) { + log(LOG_AUTHPRIV | LOG_INFO, "ip6fw: limit reached on entry %d\n", + f ? f->fw_number : -1); + } } /* @@ -547,14 +566,14 @@ ip6fw_report(struct ip6_fw *f, struct ip6_hdr *ip6, * Return value: * * 0 The packet is to be accepted and routed normally OR - * the packet was denied/rejected and has been dropped; + * the packet was denied/rejected and has been dropped; * in the latter case, *m is equal to NULL upon return. * port Divert the packet to port. */ static int ip6_fw_chk(struct ip6_hdr **pip6, - struct ifnet *oif, u_int16_t *cookie, struct mbuf **m) + struct ifnet *oif, u_int16_t *cookie, struct mbuf **m) { struct ip6_fw_chain *chain; struct ip6_fw *rule = NULL; @@ -563,7 +582,7 @@ ip6_fw_chk(struct ip6_hdr **pip6, u_short offset = 0; int off = sizeof(struct ip6_hdr), nxt = ip6->ip6_nxt; u_short src_port, dst_port; -#ifdef IP6FW_DIVERT_RESTART +#ifdef IP6FW_DIVERT_RESTART u_int16_t skipto = *cookie; #else u_int16_t ignport = ntohs(*cookie); @@ -583,12 +602,15 @@ ip6_fw_chk(struct ip6_hdr **pip6, chain = LIST_FIRST(&ip6_fw_chain); #ifdef IP6FW_DIVERT_RESTART if (skipto) { - if (skipto >= 65535) + if (skipto >= 65535) { goto dropit; + } while (chain && (chain->rule->fw_number <= skipto)) { chain = LIST_NEXT(chain, chain); } - if (! chain) goto dropit; + if (!chain) { + goto dropit; + } } #endif /* IP6FW_DIVERT_RESTART */ for (; chain; chain = LIST_NEXT(chain, chain)) { @@ -596,15 +618,17 @@ ip6_fw_chk(struct ip6_hdr **pip6, if (oif) { /* Check direction outbound */ - if (!(f->fw_flg & IPV6_FW_F_OUT)) + if (!(f->fw_flg & IPV6_FW_F_OUT)) { continue; + } } else { /* Check direction inbound */ - if (!(f->fw_flg & IPV6_FW_F_IN)) + if (!(f->fw_flg & IPV6_FW_F_IN)) { continue; + } } -#define IN6_ARE_ADDR_MASKEQUAL(x,y,z) (\ +#define IN6_ARE_ADDR_MASKEQUAL(x, y, z) (\ (((x)->s6_addr32[0] & (y)->s6_addr32[0]) == (z)->s6_addr32[0]) && \ (((x)->s6_addr32[1] & (y)->s6_addr32[1]) == (z)->s6_addr32[1]) && \ (((x)->s6_addr32[2] & (y)->s6_addr32[2]) == (z)->s6_addr32[2]) && \ @@ -612,13 +636,15 @@ ip6_fw_chk(struct ip6_hdr **pip6, /* If src-addr doesn't match, not this rule. */ if (((f->fw_flg & IPV6_FW_F_INVSRC) != 0) ^ - (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_src,&f->fw_smsk,&f->fw_src))) + (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_src, &f->fw_smsk, &f->fw_src))) { continue; + } /* If dest-addr doesn't match, not this rule. */ if (((f->fw_flg & IPV6_FW_F_INVDST) != 0) ^ - (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_dst,&f->fw_dmsk,&f->fw_dst))) + (!IN6_ARE_ADDR_MASKEQUAL(&ip6->ip6_dst, &f->fw_dmsk, &f->fw_dst))) { continue; + } #undef IN6_ARE_ADDR_MASKEQUAL /* Interface check */ @@ -627,52 +653,59 @@ ip6_fw_chk(struct ip6_hdr **pip6, /* Backwards compatibility hack for "via" */ if (!iface || !iface_match(iface, - &f->fw_in_if, f->fw_flg & IPV6_FW_F_OIFNAME)) + &f->fw_in_if, f->fw_flg & IPV6_FW_F_OIFNAME)) { continue; + } } else { /* Check receive interface */ if ((f->fw_flg & IPV6_FW_F_IIFACE) && (!rif || !iface_match(rif, - &f->fw_in_if, f->fw_flg & IPV6_FW_F_IIFNAME))) + &f->fw_in_if, f->fw_flg & IPV6_FW_F_IIFNAME))) { continue; + } /* Check outgoing interface */ if ((f->fw_flg & IPV6_FW_F_OIFACE) && (!oif || !iface_match(oif, - &f->fw_out_if, f->fw_flg & IPV6_FW_F_OIFNAME))) + &f->fw_out_if, f->fw_flg & IPV6_FW_F_OIFNAME))) { continue; + } } /* Check IP options */ - if (!ip6opts_match(&ip6, f, m, &off, &nxt, &offset)) + if (!ip6opts_match(&ip6, f, m, &off, &nxt, &offset)) { continue; + } /* Fragments */ - if ((f->fw_flg & IPV6_FW_F_FRAG) && !offset) + if ((f->fw_flg & IPV6_FW_F_FRAG) && !offset) { continue; + } /* Check protocol; if wildcard, match */ - if (f->fw_prot == IPPROTO_IPV6) + if (f->fw_prot == IPPROTO_IPV6) { goto got_match; + } /* If different, don't match */ - if (nxt != f->fw_prot) + if (nxt != f->fw_prot) { continue; + } -#define PULLUP_TO(len) do { \ - if ((*m)->m_len < (len) \ - && (*m = m_pullup(*m, (len))) == 0) { \ - goto dropit; \ - } \ - *pip6 = ip6 = mtod(*m, struct ip6_hdr *); \ - } while (0) +#define PULLUP_TO(len) do { \ + if ((*m)->m_len < (len) \ + && (*m = m_pullup(*m, (len))) == 0) { \ + goto dropit; \ + } \ + *pip6 = ip6 = mtod(*m, struct ip6_hdr *); \ + } while (0) /* Protocol specific checks */ switch (nxt) { case IPPROTO_TCP: - { + { struct tcphdr *tcp6; - if (offset == 1) { /* cf. RFC 1858 */ + if (offset == 1) { /* cf. RFC 1858 */ PULLUP_TO(off + 4); /* XXX ? */ goto bogusfrag; } @@ -683,24 +716,26 @@ ip6_fw_chk(struct ip6_hdr **pip6, * we consider the rule a non-match. */ if (f->fw_nports != 0 || - f->fw_tcpf != f->fw_tcpnf) + f->fw_tcpf != f->fw_tcpnf) { continue; + } break; } PULLUP_TO(off + 14); tcp6 = (struct tcphdr *) ((caddr_t)ip6 + off); if (((f->fw_tcpf != f->fw_tcpnf) || - (f->fw_ipflg & IPV6_FW_IF_TCPEST)) && - !tcp6flg_match(tcp6, f)) + (f->fw_ipflg & IPV6_FW_IF_TCPEST)) && + !tcp6flg_match(tcp6, f)) { continue; + } src_port = ntohs(tcp6->th_sport); dst_port = ntohs(tcp6->th_dport); goto check_ports; - } + } case IPPROTO_UDP: - { + { struct udphdr *udp; if (offset != 0) { @@ -709,8 +744,9 @@ ip6_fw_chk(struct ip6_hdr **pip6, * rule specifies a port, we consider the rule * a non-match. */ - if (f->fw_nports != 0) + if (f->fw_nports != 0) { continue; + } break; } @@ -721,32 +757,37 @@ ip6_fw_chk(struct ip6_hdr **pip6, check_ports: if (!port_match6(&f->fw_pts[0], IPV6_FW_GETNSRCP(f), src_port, - f->fw_flg & IPV6_FW_F_SRNG)) + f->fw_flg & IPV6_FW_F_SRNG)) { continue; + } if (!port_match6(&f->fw_pts[IPV6_FW_GETNSRCP(f)], IPV6_FW_GETNDSTP(f), dst_port, - f->fw_flg & IPV6_FW_F_DRNG)) + f->fw_flg & IPV6_FW_F_DRNG)) { continue; + } break; - } + } case IPPROTO_ICMPV6: - { + { struct icmp6_hdr *icmp; - if (offset != 0) /* Type isn't valid */ + if (offset != 0) { /* Type isn't valid */ break; + } PULLUP_TO(off + 2); icmp = (struct icmp6_hdr *) ((caddr_t)ip6 + off); - if (!icmp6type_match(icmp, f)) + if (!icmp6type_match(icmp, f)) { continue; + } break; - } + } #undef PULLUP_TO bogusfrag: - if (fw6_verbose) + if (fw6_verbose) { ip6fw_report(NULL, ip6, rif, oif, off, nxt); + } goto dropit; } @@ -756,8 +797,9 @@ got_match: switch (f->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_DIVERT: case IPV6_FW_F_TEE: - if (f->fw_divert_port == ignport) + if (f->fw_divert_port == ignport) { continue; /* ignore this rule */ + } break; } @@ -768,13 +810,14 @@ got_match: f->timestamp = timenow.tv_sec; /* Log to console if desired */ - if ((f->fw_flg & IPV6_FW_F_PRN) && fw6_verbose) + if ((f->fw_flg & IPV6_FW_F_PRN) && fw6_verbose) { ip6fw_report(f, ip6, rif, oif, off, nxt); + } /* Take appropriate action */ switch (f->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_ACCEPT: - return(0); + return 0; case IPV6_FW_F_COUNT: continue; case IPV6_FW_F_DIVERT: @@ -783,7 +826,7 @@ got_match: #else *cookie = htons(f->fw_divert_port); #endif /* IP6FW_DIVERT_RESTART */ - return(f->fw_divert_port); + return f->fw_divert_port; case IPV6_FW_F_TEE: /* * XXX someday tee packet here, but beware that you @@ -798,12 +841,12 @@ got_match: #ifdef DIAGNOSTIC while (chain->chain.le_next && chain->chain.le_next->rule->fw_number - < f->fw_skipto_rule) + < f->fw_skipto_rule) #else while (chain->chain.le_next->rule->fw_number < f->fw_skipto_rule) #endif - chain = chain->chain.le_next; + { chain = chain->chain.le_next;} continue; } @@ -814,8 +857,9 @@ got_match: #ifdef DIAGNOSTIC /* Rule 65535 should always be there and should always match */ - if (!chain) + if (!chain) { panic("ip6_fw: chain"); + } #endif /* @@ -828,13 +872,13 @@ got_match: */ if ((rule->fw_flg & IPV6_FW_F_COMMAND) == IPV6_FW_F_REJECT && (nxt != IPPROTO_ICMPV6 || is_icmp6_query(ip6, off)) - && !((*m)->m_flags & (M_BCAST|M_MCAST)) + && !((*m)->m_flags & (M_BCAST | M_MCAST)) && !IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { switch (rule->fw_reject_code) { case IPV6_FW_REJECT_RST: - { + { struct tcphdr *const tcp = - (struct tcphdr *) ((caddr_t)ip6 + off); + (struct tcphdr *) ((caddr_t)ip6 + off); struct { struct ip6_hdr ip6; struct tcphdr th; @@ -842,8 +886,9 @@ got_match: tcp_seq ack, seq; int flags; - if (offset != 0 || (tcp->th_flags & TH_RST)) + if (offset != 0 || (tcp->th_flags & TH_RST)) { break; + } ti.ip6 = *ip6; ti.th = *tcp; @@ -858,30 +903,31 @@ got_match: ack = ti.th.th_seq; if (((*m)->m_flags & M_PKTHDR) != 0) { ack += (*m)->m_pkthdr.len - off - - (ti.th.th_off << 2); + - (ti.th.th_off << 2); } else if (ip6->ip6_plen) { ack += ntohs(ip6->ip6_plen) + sizeof(*ip6) - - off - (ti.th.th_off << 2); + - off - (ti.th.th_off << 2); } else { m_freem(*m); *m = 0; break; } seq = 0; - flags = TH_RST|TH_ACK; + flags = TH_RST | TH_ACK; } bcopy(&ti, ip6, sizeof(ti)); bzero(&tra, sizeof(tra)); tra.ifscope = IFSCOPE_NONE; tra.awdl_unrestricted = 1; tcp_respond(NULL, ip6, (struct tcphdr *)(ip6 + 1), - *m, ack, seq, flags, &tra); + *m, ack, seq, flags, &tra); *m = NULL; break; - } - default: /* Send an ICMP unreachable using code */ - if (oif) + } + default: /* Send an ICMP unreachable using code */ + if (oif) { (*m)->m_pkthdr.rcvif = oif; + } icmp6_error(*m, ICMP6_DST_UNREACH, rule->fw_reject_code, 0); *m = NULL; @@ -897,7 +943,7 @@ dropit: m_freem(*m); *m = NULL; } - return(0); + return 0; } static int @@ -911,9 +957,13 @@ add_entry6(struct ip6_fw_head *chainptr, struct ip6_fw *frwl) ftmp = _MALLOC(sizeof *ftmp, M_IP6FW, M_WAITOK); if (!fwc || !ftmp) { dprintf(("%s malloc said no\n", err_prefix)); - if (fwc) FREE(fwc, M_IP6FW); - if (ftmp) FREE(ftmp, M_IP6FW); - return (ENOSPC); + if (fwc) { + FREE(fwc, M_IP6FW); + } + if (ftmp) { + FREE(ftmp, M_IP6FW); + } + return ENOSPC; } bcopy(frwl, ftmp, sizeof(struct ip6_fw)); @@ -921,27 +971,33 @@ add_entry6(struct ip6_fw_head *chainptr, struct ip6_fw *frwl) ftmp->fw_pcnt = 0L; ftmp->fw_bcnt = 0L; fwc->rule = ftmp; - + if (!chainptr->lh_first) { LIST_INSERT_HEAD(chainptr, fwc, chain); - return(0); - } else if (ftmp->fw_number == (u_short)-1) { - if (fwc) FREE(fwc, M_IP6FW); - if (ftmp) FREE(ftmp, M_IP6FW); + return 0; + } else if (ftmp->fw_number == (u_short) - 1) { + if (fwc) { + FREE(fwc, M_IP6FW); + } + if (ftmp) { + FREE(ftmp, M_IP6FW); + } dprintf(("%s bad rule number\n", err_prefix)); - return (EINVAL); - } + return EINVAL; + } /* If entry number is 0, find highest numbered rule and add 100 */ if (ftmp->fw_number == 0) { for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { - if (fcp->rule->fw_number != (u_short)-1) + if (fcp->rule->fw_number != (u_short) - 1) { nbr = fcp->rule->fw_number; - else + } else { break; + } } - if (nbr < (u_short)-1 - 100) + if (nbr < (u_short) - 1 - 100) { nbr += 100; + } ftmp->fw_number = nbr; } @@ -960,7 +1016,7 @@ add_entry6(struct ip6_fw_head *chainptr, struct ip6_fw *frwl) } bcopy(ftmp, frwl, sizeof(struct ip6_fw)); - return (0); + return 0; } static int @@ -969,7 +1025,7 @@ del_entry6(struct ip6_fw_head *chainptr, u_short number) struct ip6_fw_chain *fcp; fcp = chainptr->lh_first; - if (number != (u_short)-1) { + if (number != (u_short) - 1) { for (; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number == number) { LIST_REMOVE(fcp, chain); @@ -980,7 +1036,7 @@ del_entry6(struct ip6_fw_head *chainptr, u_short number) } } - return (EINVAL); + return EINVAL; } static int @@ -993,22 +1049,24 @@ zero_entry6(struct ip6_fw *frwl) * same number, so we don't stop after finding the first * match if zeroing a specific entry. */ - for (fcp = ip6_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) + for (fcp = ip6_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) { if (!frwl || frwl->fw_number == 0 || frwl->fw_number == fcp->rule->fw_number) { fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; fcp->rule->timestamp = 0; } + } if (fw6_verbose) { - if (frwl) + if (frwl) { log(LOG_AUTHPRIV | LOG_NOTICE, "ip6fw: Entry %d cleared.\n", frwl->fw_number); - else + } else { log(LOG_AUTHPRIV | LOG_NOTICE, "ip6fw: Accounting cleared.\n"); + } } - return(0); + return 0; } static struct ip6_fw * @@ -1018,45 +1076,45 @@ check_ip6fw_struct(struct ip6_fw *frwl) if ((frwl->fw_flg & ~IPV6_FW_F_MASK) != 0) { dprintf(("%s undefined flag bits set (flags=%x)\n", err_prefix, frwl->fw_flg)); - return (NULL); + return NULL; } /* Must apply to incoming or outgoing (or both) */ if (!(frwl->fw_flg & (IPV6_FW_F_IN | IPV6_FW_F_OUT))) { dprintf(("%s neither in nor out\n", err_prefix)); - return (NULL); + return NULL; } /* Empty interface name is no good */ if (((frwl->fw_flg & IPV6_FW_F_IIFNAME) - && !*frwl->fw_in_if.fu_via_if.name) + && !*frwl->fw_in_if.fu_via_if.name) || ((frwl->fw_flg & IPV6_FW_F_OIFNAME) - && !*frwl->fw_out_if.fu_via_if.name)) { + && !*frwl->fw_out_if.fu_via_if.name)) { dprintf(("%s empty interface name\n", err_prefix)); - return (NULL); + return NULL; } /* Sanity check interface matching */ if ((frwl->fw_flg & IF6_FW_F_VIAHACK) == IF6_FW_F_VIAHACK) { - ; /* allow "via" backwards compatibility */ + ; /* allow "via" backwards compatibility */ } else if ((frwl->fw_flg & IPV6_FW_F_IN) && (frwl->fw_flg & IPV6_FW_F_OIFACE)) { dprintf(("%s outgoing interface check on incoming\n", err_prefix)); - return (NULL); + return NULL; } /* Sanity check port ranges */ if ((frwl->fw_flg & IPV6_FW_F_SRNG) && IPV6_FW_GETNSRCP(frwl) < 2) { dprintf(("%s src range set but n_src_p=%d\n", err_prefix, IPV6_FW_GETNSRCP(frwl))); - return (NULL); + return NULL; } if ((frwl->fw_flg & IPV6_FW_F_DRNG) && IPV6_FW_GETNDSTP(frwl) < 2) { dprintf(("%s dst range set but n_dst_p=%d\n", err_prefix, IPV6_FW_GETNDSTP(frwl))); - return (NULL); + return NULL; } if (IPV6_FW_GETNSRCP(frwl) + IPV6_FW_GETNDSTP(frwl) > IPV6_FW_MAX_PORTS) { dprintf(("%s too many ports (%d+%d)\n", err_prefix, IPV6_FW_GETNSRCP(frwl), IPV6_FW_GETNDSTP(frwl))); - return (NULL); + return NULL; } /* * Protocols other than TCP/UDP don't use port range @@ -1066,7 +1124,7 @@ check_ip6fw_struct(struct ip6_fw *frwl) (IPV6_FW_GETNSRCP(frwl) || IPV6_FW_GETNDSTP(frwl))) { dprintf(("%s port(s) specified for non TCP/UDP rule\n", err_prefix)); - return(NULL); + return NULL; } /* @@ -1075,46 +1133,45 @@ check_ip6fw_struct(struct ip6_fw *frwl) * to enforce whatever policy they deem appropriate. */ if ((frwl->fw_src.s6_addr32[0] & (~frwl->fw_smsk.s6_addr32[0])) || - (frwl->fw_src.s6_addr32[1] & (~frwl->fw_smsk.s6_addr32[1])) || - (frwl->fw_src.s6_addr32[2] & (~frwl->fw_smsk.s6_addr32[2])) || - (frwl->fw_src.s6_addr32[3] & (~frwl->fw_smsk.s6_addr32[3])) || - (frwl->fw_dst.s6_addr32[0] & (~frwl->fw_dmsk.s6_addr32[0])) || - (frwl->fw_dst.s6_addr32[1] & (~frwl->fw_dmsk.s6_addr32[1])) || - (frwl->fw_dst.s6_addr32[2] & (~frwl->fw_dmsk.s6_addr32[2])) || - (frwl->fw_dst.s6_addr32[3] & (~frwl->fw_dmsk.s6_addr32[3]))) { + (frwl->fw_src.s6_addr32[1] & (~frwl->fw_smsk.s6_addr32[1])) || + (frwl->fw_src.s6_addr32[2] & (~frwl->fw_smsk.s6_addr32[2])) || + (frwl->fw_src.s6_addr32[3] & (~frwl->fw_smsk.s6_addr32[3])) || + (frwl->fw_dst.s6_addr32[0] & (~frwl->fw_dmsk.s6_addr32[0])) || + (frwl->fw_dst.s6_addr32[1] & (~frwl->fw_dmsk.s6_addr32[1])) || + (frwl->fw_dst.s6_addr32[2] & (~frwl->fw_dmsk.s6_addr32[2])) || + (frwl->fw_dst.s6_addr32[3] & (~frwl->fw_dmsk.s6_addr32[3]))) { dprintf(("%s rule never matches\n", err_prefix)); - return(NULL); + return NULL; } if ((frwl->fw_flg & IPV6_FW_F_FRAG) && - (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { + (frwl->fw_prot == IPPROTO_UDP || frwl->fw_prot == IPPROTO_TCP)) { if (frwl->fw_nports) { dprintf(("%s cannot mix 'frag' and ports\n", err_prefix)); - return(NULL); + return NULL; } if (frwl->fw_prot == IPPROTO_TCP && - frwl->fw_tcpf != frwl->fw_tcpnf) { + frwl->fw_tcpf != frwl->fw_tcpnf) { dprintf(("%s cannot mix 'frag' with TCP flags\n", err_prefix)); - return(NULL); + return NULL; } } /* Check command specific stuff */ - switch (frwl->fw_flg & IPV6_FW_F_COMMAND) - { + switch (frwl->fw_flg & IPV6_FW_F_COMMAND) { case IPV6_FW_F_REJECT: if (frwl->fw_reject_code >= 0x100 && !(frwl->fw_prot == IPPROTO_TCP - && frwl->fw_reject_code == IPV6_FW_REJECT_RST)) { + && frwl->fw_reject_code == IPV6_FW_REJECT_RST)) { dprintf(("%s unknown reject code\n", err_prefix)); - return(NULL); + return NULL; } break; - case IPV6_FW_F_DIVERT: /* Diverting to port zero is invalid */ + case IPV6_FW_F_DIVERT: /* Diverting to port zero is invalid */ case IPV6_FW_F_TEE: if (frwl->fw_divert_port == 0) { dprintf(("%s can't divert to port 0\n", err_prefix)); - return (NULL); + return NULL; } break; case IPV6_FW_F_DENY: @@ -1124,7 +1181,7 @@ check_ip6fw_struct(struct ip6_fw *frwl) break; default: dprintf(("%s invalid command\n", err_prefix)); - return(NULL); + return NULL; } return frwl; @@ -1133,17 +1190,16 @@ check_ip6fw_struct(struct ip6_fw *frwl) static void ip6fw_kev_post_msg(u_int32_t event_code) { - struct kev_msg ev_msg; + struct kev_msg ev_msg; bzero(&ev_msg, sizeof(struct kev_msg)); - + ev_msg.vendor_code = KEV_VENDOR_APPLE; ev_msg.kev_class = KEV_FIREWALL_CLASS; ev_msg.kev_subclass = KEV_IP6FW_SUBCLASS; ev_msg.event_code = event_code; kev_post_msg(&ev_msg); - } @@ -1162,7 +1218,7 @@ cp_to_user_64( struct ip6_fw_64 *userrule_64, struct ip6_fw *rule) userrule_64->fw_flg = rule->fw_flg; userrule_64->fw_ipflg = rule->fw_ipflg; bcopy( rule->fw_pts, userrule_64->fw_pts, IPV6_FW_MAX_PORTS); - userrule_64->fw_ip6opt= rule->fw_ip6opt; + userrule_64->fw_ip6opt = rule->fw_ip6opt; userrule_64->fw_ip6nopt = rule->fw_ip6nopt; userrule_64->fw_tcpf = rule->fw_tcpf; userrule_64->fw_tcpnf = rule->fw_tcpnf; @@ -1220,7 +1276,7 @@ cp_to_user_32( struct ip6_fw_32 *userrule_32, struct ip6_fw *rule) userrule_32->fw_flg = rule->fw_flg; userrule_32->fw_ipflg = rule->fw_ipflg; bcopy( rule->fw_pts, userrule_32->fw_pts, IPV6_FW_MAX_PORTS); - userrule_32->fw_ip6opt = rule->fw_ip6opt ; + userrule_32->fw_ip6opt = rule->fw_ip6opt; userrule_32->fw_ip6nopt = rule->fw_ip6nopt; userrule_32->fw_tcpf = rule->fw_tcpf; userrule_32->fw_tcpnf = rule->fw_tcpnf; @@ -1268,150 +1324,152 @@ ip6_fw_ctl(struct sockopt *sopt) int error = 0; int valsize; struct ip6_fw rule; - int is64user=0; - size_t userrulesize; + int is64user = 0; + size_t userrulesize; if (securelevel >= 3 && - (sopt->sopt_dir != SOPT_GET || sopt->sopt_name != IPV6_FW_GET)) - return (EPERM); + (sopt->sopt_dir != SOPT_GET || sopt->sopt_name != IPV6_FW_GET)) { + return EPERM; + } - if ( proc_is64bit(sopt->sopt_p) ){ + if (proc_is64bit(sopt->sopt_p)) { is64user = 1; - userrulesize = sizeof( struct ip6_fw_64 ); - } else - userrulesize = sizeof( struct ip6_fw_32 ); - + userrulesize = sizeof(struct ip6_fw_64); + } else { + userrulesize = sizeof(struct ip6_fw_32); + } + /* We ALWAYS expect the client to pass in a rule structure so that we can * check the version of the API that they are using. In the case of a * IPV6_FW_GET operation, the first rule of the output buffer passed to us * must have the version set. */ - if (!sopt->sopt_val || sopt->sopt_valsize < userrulesize) return EINVAL; + if (!sopt->sopt_val || sopt->sopt_valsize < userrulesize) { + return EINVAL; + } /* save sopt->sopt_valsize */ valsize = sopt->sopt_valsize; - - if (is64user){ + + if (is64user) { struct ip6_fw_64 userrule_64; - - if ((error = sooptcopyin(sopt, &userrule_64, userrulesize, userrulesize))) + + if ((error = sooptcopyin(sopt, &userrule_64, userrulesize, userrulesize))) { return error; - + } + cp_from_user_64( &userrule_64, &rule ); - } - else { + } else { struct ip6_fw_32 userrule_32; - - if ((error = sooptcopyin(sopt, &userrule_32, userrulesize, userrulesize))) + + if ((error = sooptcopyin(sopt, &userrule_32, userrulesize, userrulesize))) { return error; - + } + cp_from_user_32( &userrule_32, &rule ); } - - if (rule.version != IPV6_FW_CURRENT_API_VERSION) return EINVAL; - rule.version = 0xFFFFFFFF; /* version is meaningless once rules "make it in the door". */ - switch (sopt->sopt_name) + if (rule.version != IPV6_FW_CURRENT_API_VERSION) { + return EINVAL; + } + rule.version = 0xFFFFFFFF; /* version is meaningless once rules "make it in the door". */ + + switch (sopt->sopt_name) { + case IPV6_FW_GET: { - case IPV6_FW_GET: - { - struct ip6_fw_chain *fcp; - struct ip6_fw *buf; - size_t size = 0; - size_t rulesize = 0; - - if ( is64user ) - rulesize = sizeof(struct ip6_fw_64 ); - else - rulesize = sizeof(struct ip6_fw_32 ); - - LIST_FOREACH(fcp, &ip6_fw_chain, chain) - size += rulesize; + struct ip6_fw_chain *fcp; + struct ip6_fw *buf; + size_t size = 0; + size_t rulesize = 0; + + if (is64user) { + rulesize = sizeof(struct ip6_fw_64); + } else { + rulesize = sizeof(struct ip6_fw_32); + } + + LIST_FOREACH(fcp, &ip6_fw_chain, chain) + size += rulesize; + + buf = _MALLOC(size, M_TEMP, M_WAITOK); + if (!buf) { + error = ENOBUFS; + } else { + //struct ip6_fw *bp = buf; + caddr_t bp = (caddr_t)buf; - buf = _MALLOC(size, M_TEMP, M_WAITOK); - if (!buf) error = ENOBUFS; - else + LIST_FOREACH(fcp, &ip6_fw_chain, chain) { - //struct ip6_fw *bp = buf; - caddr_t bp = (caddr_t)buf; - - LIST_FOREACH(fcp, &ip6_fw_chain, chain) - { - //bcopy(fcp->rule, bp, sizeof *bp); - if ( is64user ){ - cp_to_user_64( (struct ip6_fw_64*)bp, fcp->rule); - } - else { - cp_to_user_32( (struct ip6_fw_32*)bp, fcp->rule); - } - - ( (struct ip6_fw*)bp)->version = IPV6_FW_CURRENT_API_VERSION; - //bp++; - bp += rulesize; + //bcopy(fcp->rule, bp, sizeof *bp); + if (is64user) { + cp_to_user_64((struct ip6_fw_64*)bp, fcp->rule); + } else { + cp_to_user_32((struct ip6_fw_32*)bp, fcp->rule); } - } - if (buf) - { - sopt->sopt_valsize = valsize; - error = sooptcopyout(sopt, buf, size); - FREE(buf, M_TEMP); + ((struct ip6_fw*)bp)->version = IPV6_FW_CURRENT_API_VERSION; + //bp++; + bp += rulesize; } - - break; } - case IPV6_FW_FLUSH: - while (ip6_fw_chain.lh_first && - ip6_fw_chain.lh_first->rule->fw_number != (u_short)-1) - { - struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; - LIST_REMOVE(ip6_fw_chain.lh_first, chain); - FREE(fcp->rule, M_IP6FW); - FREE(fcp, M_IP6FW); - } - ip6fw_kev_post_msg(KEV_IP6FW_FLUSH); - break; + if (buf) { + sopt->sopt_valsize = valsize; + error = sooptcopyout(sopt, buf, size); + FREE(buf, M_TEMP); + } - case IPV6_FW_ZERO: - error = zero_entry6(&rule); - break; + break; + } - case IPV6_FW_ADD: - if (check_ip6fw_struct(&rule)) { - error = add_entry6(&ip6_fw_chain, &rule); + case IPV6_FW_FLUSH: + while (ip6_fw_chain.lh_first && + ip6_fw_chain.lh_first->rule->fw_number != (u_short) - 1) { + struct ip6_fw_chain *fcp = ip6_fw_chain.lh_first; + LIST_REMOVE(ip6_fw_chain.lh_first, chain); + FREE(fcp->rule, M_IP6FW); + FREE(fcp, M_IP6FW); + } + ip6fw_kev_post_msg(KEV_IP6FW_FLUSH); + break; - ip6fw_kev_post_msg(KEV_IP6FW_ADD); - } else - error = EINVAL; + case IPV6_FW_ZERO: + error = zero_entry6(&rule); + break; - if (is64user){ - struct ip6_fw_64 userrule_64; - cp_to_user_64( &userrule_64, &rule); - error = sooptcopyout(sopt, &userrule_64, userrulesize); - } - else { - struct ip6_fw_32 userrule_32; - cp_to_user_32( &userrule_32, &rule); - error = sooptcopyout(sopt, &userrule_32, userrulesize); - } - break; + case IPV6_FW_ADD: + if (check_ip6fw_struct(&rule)) { + error = add_entry6(&ip6_fw_chain, &rule); - case IPV6_FW_DEL: - if (rule.fw_number == (u_short)-1) - { - dprintf(("%s can't delete rule 65535\n", err_prefix)); - error = EINVAL; - } - else { - error = del_entry6(&ip6_fw_chain, rule.fw_number); + ip6fw_kev_post_msg(KEV_IP6FW_ADD); + } else { + error = EINVAL; + } - ip6fw_kev_post_msg(KEV_IP6FW_DEL); - } - break; + if (is64user) { + struct ip6_fw_64 userrule_64; + cp_to_user_64( &userrule_64, &rule); + error = sooptcopyout(sopt, &userrule_64, userrulesize); + } else { + struct ip6_fw_32 userrule_32; + cp_to_user_32( &userrule_32, &rule); + error = sooptcopyout(sopt, &userrule_32, userrulesize); + } + break; - default: - dprintf(("%s invalid option %d\n", err_prefix, sopt->sopt_name)); + case IPV6_FW_DEL: + if (rule.fw_number == (u_short) - 1) { + dprintf(("%s can't delete rule 65535\n", err_prefix)); error = EINVAL; + } else { + error = del_entry6(&ip6_fw_chain, rule.fw_number); + + ip6fw_kev_post_msg(KEV_IP6FW_DEL); + } + break; + + default: + dprintf(("%s invalid option %d\n", err_prefix, sopt->sopt_name)); + error = EINVAL; } return error; @@ -1428,7 +1486,7 @@ ip6_fw_init(void) bzero(&default_rule, sizeof default_rule); default_rule.fw_prot = IPPROTO_IPV6; - default_rule.fw_number = (u_short)-1; + default_rule.fw_number = (u_short) - 1; #ifdef IPV6FIREWALL_DEFAULT_TO_ACCEPT default_rule.fw_flg |= IPV6_FW_F_ACCEPT; #else @@ -1436,8 +1494,9 @@ ip6_fw_init(void) #endif default_rule.fw_flg |= IPV6_FW_F_IN | IPV6_FW_F_OUT; if (check_ip6fw_struct(&default_rule) == NULL || - add_entry6(&ip6_fw_chain, &default_rule)) + add_entry6(&ip6_fw_chain, &default_rule)) { panic("%s", __FUNCTION__); + } printf("IPv6 packet filtering initialized, "); #ifdef IPV6FIREWALL_DEFAULT_TO_ACCEPT @@ -1446,11 +1505,11 @@ ip6_fw_init(void) #ifndef IPV6FIREWALL_VERBOSE printf("logging disabled\n"); #else - if (fw6_verbose_limit == 0) + if (fw6_verbose_limit == 0) { printf("unlimited logging\n"); - else + } else { printf("logging limited to %d packets/entry\n", fw6_verbose_limit); + } #endif } - diff --git a/bsd/netinet6/ip6_fw.h b/bsd/netinet6/ip6_fw.h index 1debe3fb2..81757354b 100644 --- a/bsd/netinet6/ip6_fw.h +++ b/bsd/netinet6/ip6_fw.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -51,39 +51,39 @@ */ /*! - @defined KEV_IP6FW_SUBCLASS - @discussion The kernel event subclass for IPv6 Firewall. -*/ -#define KEV_IP6FW_SUBCLASS 2 + * @defined KEV_IP6FW_SUBCLASS + * @discussion The kernel event subclass for IPv6 Firewall. + */ +#define KEV_IP6FW_SUBCLASS 2 /*! - @defined KEV_IP6FW_ADD - @discussion The event code indicating a rule has been added. -*/ -#define KEV_IP6FW_ADD 1 + * @defined KEV_IP6FW_ADD + * @discussion The event code indicating a rule has been added. + */ +#define KEV_IP6FW_ADD 1 /*! - @defined KEV_IP6FW_DEL - @discussion The event code indicating a rule has been removed. -*/ -#define KEV_IP6FW_DEL 2 + * @defined KEV_IP6FW_DEL + * @discussion The event code indicating a rule has been removed. + */ +#define KEV_IP6FW_DEL 2 /*! - @defined KEV_IP6FW_FLUSH - @discussion The event code indicating the rule set has been flushed. -*/ -#define KEV_IP6FW_FLUSH 3 + * @defined KEV_IP6FW_FLUSH + * @discussion The event code indicating the rule set has been flushed. + */ +#define KEV_IP6FW_FLUSH 3 /*! - @defined KEV_IP6FW_FLUSH - @discussion The event code indicating the enable flag has been changed -*/ -#define KEV_IP6FW_ENABLE 4 + * @defined KEV_IP6FW_FLUSH + * @discussion The event code indicating the enable flag has been changed + */ +#define KEV_IP6FW_ENABLE 4 #include -#define IPV6_FW_CURRENT_API_VERSION 20 /* Version of this API */ +#define IPV6_FW_CURRENT_API_VERSION 20 /* Version of this API */ /* @@ -100,12 +100,12 @@ */ union ip6_fw_if { - struct in6_addr fu_via_ip6; /* Specified by IPv6 address */ - struct { /* Specified by interface name */ + struct in6_addr fu_via_ip6; /* Specified by IPv6 address */ + struct { /* Specified by interface name */ #define IP6FW_IFNLEN IFNAMSIZ - char name[IP6FW_IFNLEN]; - short unit; /* -1 means match any unit */ - } fu_via_if; + char name[IP6FW_IFNLEN]; + short unit; /* -1 means match any unit */ + } fu_via_if; }; /* @@ -119,65 +119,65 @@ union ip6_fw_if { struct ip6_fw { - u_int32_t version; /* Version of this structure. Should always be */ - /* set to IP6_FW_CURRENT_API_VERSION by clients. */ - void *context; /* Context that is usable by user processes to */ - /* identify this rule. */ - u_int32_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ - struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ - u_short fw_number; /* Rule number */ - u_short fw_flg; /* Flags word */ -#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ - u_int fw_ipflg; /* IP flags word */ - u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ - u_char fw_ip6opt,fw_ip6nopt; /* IPv6 options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ + u_int32_t version; /* Version of this structure. Should always be */ + /* set to IP6_FW_CURRENT_API_VERSION by clients. */ + void *context; /* Context that is usable by user processes to */ + /* identify this rule. */ + u_int32_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ + struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ + u_short fw_number; /* Rule number */ + u_short fw_flg; /* Flags word */ +#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ + u_int fw_ipflg; /* IP flags word */ + u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ + u_char fw_ip6opt, fw_ip6nopt; /* IPv6 options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ #define IPV6_FW_ICMPTYPES_DIM (256 / (sizeof(unsigned) * 8)) - unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - long timestamp; /* timestamp (tv_sec) of last match */ - union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - } fw_un; - u_char fw_prot; /* IPv6 protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ - /* in ports array (dst ports follow */ - /* src ports; max of 10 ports in all; */ - /* count of 0 means match all ports) */ + unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + long timestamp; /* timestamp (tv_sec) of last match */ + union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + } fw_un; + u_char fw_prot; /* IPv6 protocol */ + u_char fw_nports; /* N'of src ports and # of dst ports */ + /* in ports array (dst ports follow */ + /* src ports; max of 10 ports in all; */ + /* count of 0 means match all ports) */ }; #if defined(BSD_KERNEL_PRIVATE) #pragma pack(4) struct ip6_fw_32 { - u_int32_t version; /* Version of this structure. Should always be */ + u_int32_t version; /* Version of this structure. Should always be */ /* set to IP6_FW_CURRENT_API_VERSION by clients. */ - user32_addr_t context; /* Context that is usable by user processes to */ + user32_addr_t context; /* Context that is usable by user processes to */ /* identify this rule. */ - u_int32_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ - struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ - u_short fw_number; /* Rule number */ - u_short fw_flg; /* Flags word */ -#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ - u_int fw_ipflg; /* IP flags word */ - u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ - u_char fw_ip6opt,fw_ip6nopt; /* IPv6 options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ + u_int32_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ + struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ + u_short fw_number; /* Rule number */ + u_short fw_flg; /* Flags word */ +#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ + u_int fw_ipflg; /* IP flags word */ + u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ + u_char fw_ip6opt, fw_ip6nopt; /* IPv6 options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ #define IPV6_FW_ICMPTYPES_DIM (256 / (sizeof(unsigned) * 8)) - unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - user32_time_t timestamp; /* timestamp (tv_sec) of last match */ - union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - } fw_un; - u_char fw_prot; /* IPv6 protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ + unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + user32_time_t timestamp; /* timestamp (tv_sec) of last match */ + union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + } fw_un; + u_char fw_prot; /* IPv6 protocol */ + u_char fw_nports; /* N'of src ports and # of dst ports */ /* in ports array (dst ports follow */ /* src ports; max of 10 ports in all; */ /* count of 0 means match all ports) */ @@ -186,102 +186,102 @@ struct ip6_fw_32 { #pragma pack() struct ip6_fw_64 { - u_int32_t version; /* Version of this structure. Should always be */ + u_int32_t version; /* Version of this structure. Should always be */ /* set to IP6_FW_CURRENT_API_VERSION by clients. */ - __uint64_t context __attribute__((aligned(8))); /* Context that is usable by user processes to */ + __uint64_t context __attribute__((aligned(8))); /* Context that is usable by user processes to */ /* identify this rule. */ - u_int32_t fw_pcnt,fw_bcnt; /* Packet and byte counters */ - struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ - struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ - u_short fw_number; /* Rule number */ - u_short fw_flg; /* Flags word */ -#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ - u_int fw_ipflg; /* IP flags word */ - u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ - u_char fw_ip6opt,fw_ip6nopt; /* IPv6 options set/unset */ - u_char fw_tcpf,fw_tcpnf; /* TCP flags set/unset */ + u_int32_t fw_pcnt, fw_bcnt; /* Packet and byte counters */ + struct in6_addr fw_src, fw_dst; /* Source and destination IPv6 addr */ + struct in6_addr fw_smsk, fw_dmsk; /* Mask for src and dest IPv6 addr */ + u_short fw_number; /* Rule number */ + u_short fw_flg; /* Flags word */ +#define IPV6_FW_MAX_PORTS 10 /* A reasonable maximum */ + u_int fw_ipflg; /* IP flags word */ + u_short fw_pts[IPV6_FW_MAX_PORTS]; /* Array of port numbers to match */ + u_char fw_ip6opt, fw_ip6nopt; /* IPv6 options set/unset */ + u_char fw_tcpf, fw_tcpnf; /* TCP flags set/unset */ #define IPV6_FW_ICMPTYPES_DIM (256 / (sizeof(unsigned) * 8)) - unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ - user64_time_t timestamp; /* timestamp (tv_sec) of last match */ - union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ - union { - u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ - u_short fu_skipto_rule; /* SKIPTO command rule number */ - u_short fu_reject_code; /* REJECT response code */ - } fw_un; - u_char fw_prot; /* IPv6 protocol */ - u_char fw_nports; /* N'of src ports and # of dst ports */ + unsigned fw_icmp6types[IPV6_FW_ICMPTYPES_DIM]; /* ICMP types bitmap */ + user64_time_t timestamp; /* timestamp (tv_sec) of last match */ + union ip6_fw_if fw_in_if, fw_out_if;/* Incoming and outgoing interfaces */ + union { + u_short fu_divert_port; /* Divert/tee port (options IP6DIVERT) */ + u_short fu_skipto_rule; /* SKIPTO command rule number */ + u_short fu_reject_code; /* REJECT response code */ + } fw_un; + u_char fw_prot; /* IPv6 protocol */ + u_char fw_nports; /* N'of src ports and # of dst ports */ /* in ports array (dst ports follow */ /* src ports; max of 10 ports in all; */ /* count of 0 means match all ports) */ }; -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ -#define IPV6_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) -#define IPV6_FW_SETNSRCP(rule, n) do { \ - (rule)->fw_nports &= ~0x0f; \ - (rule)->fw_nports |= (n); \ - } while (0) -#define IPV6_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) -#define IPV6_FW_SETNDSTP(rule, n) do { \ - (rule)->fw_nports &= ~0xf0; \ - (rule)->fw_nports |= (n) << 4;\ - } while (0) +#define IPV6_FW_GETNSRCP(rule) ((rule)->fw_nports & 0x0f) +#define IPV6_FW_SETNSRCP(rule, n) do { \ + (rule)->fw_nports &= ~0x0f; \ + (rule)->fw_nports |= (n); \ + } while (0) +#define IPV6_FW_GETNDSTP(rule) ((rule)->fw_nports >> 4) +#define IPV6_FW_SETNDSTP(rule, n) do { \ + (rule)->fw_nports &= ~0xf0; \ + (rule)->fw_nports |= (n) << 4;\ + } while (0) -#define fw_divert_port fw_un.fu_divert_port -#define fw_skipto_rule fw_un.fu_skipto_rule -#define fw_reject_code fw_un.fu_reject_code +#define fw_divert_port fw_un.fu_divert_port +#define fw_skipto_rule fw_un.fu_skipto_rule +#define fw_reject_code fw_un.fu_reject_code struct ip6_fw_chain { - LIST_ENTRY(ip6_fw_chain) chain; - struct ip6_fw *rule; + LIST_ENTRY(ip6_fw_chain) chain; + struct ip6_fw *rule; }; /* * Values for "flags" field . */ -#define IPV6_FW_F_IN 0x0001 /* Check inbound packets */ -#define IPV6_FW_F_OUT 0x0002 /* Check outbound packets */ -#define IPV6_FW_F_IIFACE 0x0004 /* Apply inbound interface test */ -#define IPV6_FW_F_OIFACE 0x0008 /* Apply outbound interface test */ +#define IPV6_FW_F_IN 0x0001 /* Check inbound packets */ +#define IPV6_FW_F_OUT 0x0002 /* Check outbound packets */ +#define IPV6_FW_F_IIFACE 0x0004 /* Apply inbound interface test */ +#define IPV6_FW_F_OIFACE 0x0008 /* Apply outbound interface test */ -#define IPV6_FW_F_COMMAND 0x0070 /* Mask for type of chain entry: */ -#define IPV6_FW_F_DENY 0x0000 /* This is a deny rule */ -#define IPV6_FW_F_REJECT 0x0010 /* Deny and send a response packet */ -#define IPV6_FW_F_ACCEPT 0x0020 /* This is an accept rule */ -#define IPV6_FW_F_COUNT 0x0030 /* This is a count rule */ -#define IPV6_FW_F_DIVERT 0x0040 /* This is a divert rule */ -#define IPV6_FW_F_TEE 0x0050 /* This is a tee rule */ -#define IPV6_FW_F_SKIPTO 0x0060 /* This is a skipto rule */ +#define IPV6_FW_F_COMMAND 0x0070 /* Mask for type of chain entry: */ +#define IPV6_FW_F_DENY 0x0000 /* This is a deny rule */ +#define IPV6_FW_F_REJECT 0x0010 /* Deny and send a response packet */ +#define IPV6_FW_F_ACCEPT 0x0020 /* This is an accept rule */ +#define IPV6_FW_F_COUNT 0x0030 /* This is a count rule */ +#define IPV6_FW_F_DIVERT 0x0040 /* This is a divert rule */ +#define IPV6_FW_F_TEE 0x0050 /* This is a tee rule */ +#define IPV6_FW_F_SKIPTO 0x0060 /* This is a skipto rule */ -#define IPV6_FW_F_PRN 0x0080 /* Print if this rule matches */ +#define IPV6_FW_F_PRN 0x0080 /* Print if this rule matches */ -#define IPV6_FW_F_SRNG 0x0100 /* The first two src ports are a min * - * and max range (stored in host byte * - * order). */ +#define IPV6_FW_F_SRNG 0x0100 /* The first two src ports are a min * + * and max range (stored in host byte * + * order). */ -#define IPV6_FW_F_DRNG 0x0200 /* The first two dst ports are a min * - * and max range (stored in host byte * - * order). */ +#define IPV6_FW_F_DRNG 0x0200 /* The first two dst ports are a min * + * and max range (stored in host byte * + * order). */ -#define IPV6_FW_F_IIFNAME 0x0400 /* In interface by name/unit (not IP) */ -#define IPV6_FW_F_OIFNAME 0x0800 /* Out interface by name/unit (not IP) */ +#define IPV6_FW_F_IIFNAME 0x0400 /* In interface by name/unit (not IP) */ +#define IPV6_FW_F_OIFNAME 0x0800 /* Out interface by name/unit (not IP) */ -#define IPV6_FW_F_INVSRC 0x1000 /* Invert sense of src check */ -#define IPV6_FW_F_INVDST 0x2000 /* Invert sense of dst check */ +#define IPV6_FW_F_INVSRC 0x1000 /* Invert sense of src check */ +#define IPV6_FW_F_INVDST 0x2000 /* Invert sense of dst check */ -#define IPV6_FW_F_FRAG 0x4000 /* Fragment */ +#define IPV6_FW_F_FRAG 0x4000 /* Fragment */ -#define IPV6_FW_F_ICMPBIT 0x8000 /* ICMP type bitmap is valid */ +#define IPV6_FW_F_ICMPBIT 0x8000 /* ICMP type bitmap is valid */ -#define IPV6_FW_F_MASK 0xFFFF /* All possible flag bits mask */ +#define IPV6_FW_F_MASK 0xFFFF /* All possible flag bits mask */ -/* +/* * Flags for the 'fw_ipflg' field, for comparing values of ip and its protocols. */ -#define IPV6_FW_IF_TCPEST 0x00000020 /* established TCP connection */ -#define IPV6_FW_IF_TCPMSK 0x00000020 /* mask of all TCP values */ +#define IPV6_FW_IF_TCPEST 0x00000020 /* established TCP connection */ +#define IPV6_FW_IF_TCPMSK 0x00000020 /* mask of all TCP values */ /* * For backwards compatibility with rules specifying "via iface" but @@ -289,34 +289,34 @@ struct ip6_fw_chain { * of bits to represent this configuration. */ -#define IF6_FW_F_VIAHACK (IPV6_FW_F_IN|IPV6_FW_F_OUT|IPV6_FW_F_IIFACE|IPV6_FW_F_OIFACE) +#define IF6_FW_F_VIAHACK (IPV6_FW_F_IN|IPV6_FW_F_OUT|IPV6_FW_F_IIFACE|IPV6_FW_F_OIFACE) /* * Definitions for REJECT response codes. * Values less than 256 correspond to ICMP unreachable codes. */ -#define IPV6_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ +#define IPV6_FW_REJECT_RST 0x0100 /* TCP packets: send RST */ /* * Definitions for IPv6 option names. */ -#define IPV6_FW_IP6OPT_HOPOPT 0x01 -#define IPV6_FW_IP6OPT_ROUTE 0x02 -#define IPV6_FW_IP6OPT_FRAG 0x04 -#define IPV6_FW_IP6OPT_ESP 0x08 -#define IPV6_FW_IP6OPT_AH 0x10 -#define IPV6_FW_IP6OPT_NONXT 0x20 -#define IPV6_FW_IP6OPT_OPTS 0x40 +#define IPV6_FW_IP6OPT_HOPOPT 0x01 +#define IPV6_FW_IP6OPT_ROUTE 0x02 +#define IPV6_FW_IP6OPT_FRAG 0x04 +#define IPV6_FW_IP6OPT_ESP 0x08 +#define IPV6_FW_IP6OPT_AH 0x10 +#define IPV6_FW_IP6OPT_NONXT 0x20 +#define IPV6_FW_IP6OPT_OPTS 0x40 /* * Definitions for TCP flags. */ -#define IPV6_FW_TCPF_FIN TH_FIN -#define IPV6_FW_TCPF_SYN TH_SYN -#define IPV6_FW_TCPF_RST TH_RST -#define IPV6_FW_TCPF_PSH TH_PUSH -#define IPV6_FW_TCPF_ACK TH_ACK -#define IPV6_FW_TCPF_URG TH_URG +#define IPV6_FW_TCPF_FIN TH_FIN +#define IPV6_FW_TCPF_SYN TH_SYN +#define IPV6_FW_TCPF_RST TH_RST +#define IPV6_FW_TCPF_PSH TH_PUSH +#define IPV6_FW_TCPF_ACK TH_ACK +#define IPV6_FW_TCPF_URG TH_URG /* * Main firewall chains definitions and global var's definitions. @@ -335,12 +335,12 @@ void load_ip6fw(void); /* Firewall hooks */ struct ip6_hdr; struct sockopt; -typedef int ip6_fw_chk_t(struct ip6_hdr**, struct ifnet*, - u_short *, struct mbuf**); -typedef int ip6_fw_ctl_t(struct sockopt *); -extern ip6_fw_chk_t *ip6_fw_chk_ptr; -extern ip6_fw_ctl_t *ip6_fw_ctl_ptr; -extern int ip6_fw_enable; +typedef int ip6_fw_chk_t(struct ip6_hdr**, struct ifnet*, + u_short *, struct mbuf**); +typedef int ip6_fw_ctl_t(struct sockopt *); +extern ip6_fw_chk_t *ip6_fw_chk_ptr; +extern ip6_fw_ctl_t *ip6_fw_ctl_ptr; +extern int ip6_fw_enable; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet6/ip6_id.c b/bsd/netinet6/ip6_id.c index eab70a6c0..b767c8edb 100644 --- a/bsd/netinet6/ip6_id.c +++ b/bsd/netinet6/ip6_id.c @@ -129,14 +129,14 @@ #include struct randomtab { - const int ru_bits; /* resulting bits */ - const long ru_out; /* Time after wich will be reseeded */ - const u_int32_t ru_max; /* Uniq cycle, avoid blackjack prediction */ - const u_int32_t ru_gen; /* Starting generator */ - const u_int32_t ru_n; /* ru_n: prime, ru_n - 1: product of pfacts[] */ + const int ru_bits; /* resulting bits */ + const long ru_out; /* Time after wich will be reseeded */ + const u_int32_t ru_max; /* Uniq cycle, avoid blackjack prediction */ + const u_int32_t ru_gen; /* Starting generator */ + const u_int32_t ru_n; /* ru_n: prime, ru_n - 1: product of pfacts[] */ const u_int32_t ru_agen; /* determine ru_a as ru_agen^(2*rand) */ - const u_int32_t ru_m; /* ru_m = 2^x*3^y */ - const u_int32_t pfacts[4]; /* factors of ru_n */ + const u_int32_t ru_m; /* ru_m = 2^x*3^y */ + const u_int32_t pfacts[4]; /* factors of ru_n */ u_int32_t ru_counter; u_int32_t ru_msb; @@ -149,26 +149,26 @@ struct randomtab { }; static struct randomtab randomtab_32 = { - 32, /* resulting bits */ - 180, /* Time after wich will be reseeded */ - 1000000000, /* Uniq cycle, avoid blackjack prediction */ - 2, /* Starting generator */ - 2147483629, /* RU_N-1 = 2^2*3^2*59652323 */ - 7, /* determine ru_a as RU_AGEN^(2*rand) */ - 1836660096, /* RU_M = 2^7*3^15 - don't change */ - { 2, 3, 59652323, 0 }, /* factors of ru_n */ + 32, /* resulting bits */ + 180, /* Time after wich will be reseeded */ + 1000000000, /* Uniq cycle, avoid blackjack prediction */ + 2, /* Starting generator */ + 2147483629, /* RU_N-1 = 2^2*3^2*59652323 */ + 7, /* determine ru_a as RU_AGEN^(2*rand) */ + 1836660096, /* RU_M = 2^7*3^15 - don't change */ + { 2, 3, 59652323, 0 }, /* factors of ru_n */ 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static struct randomtab randomtab_20 = { - 20, /* resulting bits */ - 180, /* Time after wich will be reseeded */ - 200000, /* Uniq cycle, avoid blackjack prediction */ - 2, /* Starting generator */ - 524269, /* RU_N-1 = 2^2*3^2*14563 */ - 7, /* determine ru_a as RU_AGEN^(2*rand) */ - 279936, /* RU_M = 2^7*3^7 - don't change */ - { 2, 3, 14563, 0 }, /* factors of ru_n */ + 20, /* resulting bits */ + 180, /* Time after wich will be reseeded */ + 200000, /* Uniq cycle, avoid blackjack prediction */ + 2, /* Starting generator */ + 524269, /* RU_N-1 = 2^2*3^2*14563 */ + 7, /* determine ru_a as RU_AGEN^(2*rand) */ + 279936, /* RU_M = 2^7*3^7 - don't change */ + { 2, 3, 14563, 0 }, /* factors of ru_n */ 0, 0, 0, 0, 0, 0, 0, 0, 0 }; @@ -190,12 +190,13 @@ pmod(u_int32_t gen, u_int32_t expo, u_int32_t mod) u = expo; while (u) { - if (u & 1) + if (u & 1) { s = (s * t) % mod; + } u >>= 1; t = (t * t) % mod; } - return (s); + return s; } /* @@ -223,8 +224,9 @@ initid(struct randomtab *p) p->ru_b = (RandomULong() & (~0U >> (32 - p->ru_bits))) | 1; p->ru_a = pmod(p->ru_agen, (RandomULong() & (~0U >> (32 - p->ru_bits))) & (~1U), p->ru_m); - while (p->ru_b % 3 == 0) + while (p->ru_b % 3 == 0) { p->ru_b += 2; + } j = RandomULong() % p->ru_n; @@ -234,14 +236,17 @@ initid(struct randomtab *p) * RU_GEN^j mod RU_N */ while (noprime) { - for (i = 0; p->pfacts[i] > 0; i++) - if (j % p->pfacts[i] == 0) + for (i = 0; p->pfacts[i] > 0; i++) { + if (j % p->pfacts[i] == 0) { break; + } + } - if (p->pfacts[i] == 0) + if (p->pfacts[i] == 0) { noprime = 0; - else + } else { j = (j + 1) % p->ru_n; + } } p->ru_g = pmod(p->ru_gen, j, p->ru_n); @@ -258,15 +263,17 @@ randomid(struct randomtab *p) int i, n; u_int32_t tmp; - if (p->ru_counter >= p->ru_max || curtime > p->ru_reseed) + if (p->ru_counter >= p->ru_max || curtime > p->ru_reseed) { initid(p); + } tmp = RandomULong(); /* Skip a random number of ids */ n = tmp & 0x3; tmp = tmp >> 2; - if (p->ru_counter + n >= p->ru_max) + if (p->ru_counter + n >= p->ru_max) { initid(p); + } for (i = 0; i <= n; i++) { /* Linear Congruential Generator */ @@ -275,20 +282,18 @@ randomid(struct randomtab *p) p->ru_counter += i; - return ((p->ru_seed ^ pmod(p->ru_g, p->ru_seed2 ^ p->ru_x, p->ru_n)) | - p->ru_msb); + return (p->ru_seed ^ pmod(p->ru_g, p->ru_seed2 ^ p->ru_x, p->ru_n)) | + p->ru_msb; } u_int32_t ip6_randomid(void) { - - return (randomid(&randomtab_32)); + return randomid(&randomtab_32); } u_int32_t ip6_randomflowlabel(void) { - - return (randomid(&randomtab_20) & 0xfffff); + return randomid(&randomtab_20) & 0xfffff; } diff --git a/bsd/netinet6/ip6_input.c b/bsd/netinet6/ip6_input.c index 2e8eea64b..b35e7f501 100644 --- a/bsd/netinet6/ip6_input.c +++ b/bsd/netinet6/ip6_input.c @@ -160,18 +160,18 @@ extern int ipsec_bypass; struct ip6protosw *ip6_protox[IPPROTO_MAX]; -static lck_grp_attr_t *in6_ifaddr_rwlock_grp_attr; -static lck_grp_t *in6_ifaddr_rwlock_grp; -static lck_attr_t *in6_ifaddr_rwlock_attr; +static lck_grp_attr_t *in6_ifaddr_rwlock_grp_attr; +static lck_grp_t *in6_ifaddr_rwlock_grp; +static lck_attr_t *in6_ifaddr_rwlock_attr; decl_lck_rw_data(, in6_ifaddr_rwlock); /* Protected by in6_ifaddr_rwlock */ struct in6_ifaddr *in6_ifaddrs = NULL; -#define IN6_IFSTAT_REQUIRE_ALIGNED_64(f) \ +#define IN6_IFSTAT_REQUIRE_ALIGNED_64(f) \ _CASSERT(!(offsetof(struct in6_ifstat, f) % sizeof (uint64_t))) -#define ICMP6_IFSTAT_REQUIRE_ALIGNED_64(f) \ +#define ICMP6_IFSTAT_REQUIRE_ALIGNED_64(f) \ _CASSERT(!(offsetof(struct icmp6_ifstat, f) % sizeof (uint64_t))) struct ip6stat ip6stat; @@ -180,16 +180,16 @@ decl_lck_mtx_data(, proxy6_lock); decl_lck_mtx_data(static, dad6_mutex_data); decl_lck_mtx_data(static, nd6_mutex_data); decl_lck_mtx_data(static, prefix6_mutex_data); -lck_mtx_t *dad6_mutex = &dad6_mutex_data; -lck_mtx_t *nd6_mutex = &nd6_mutex_data; -lck_mtx_t *prefix6_mutex = &prefix6_mutex_data; +lck_mtx_t *dad6_mutex = &dad6_mutex_data; +lck_mtx_t *nd6_mutex = &nd6_mutex_data; +lck_mtx_t *prefix6_mutex = &prefix6_mutex_data; #ifdef ENABLE_ADDRSEL decl_lck_mtx_data(static, addrsel_mutex_data); -lck_mtx_t *addrsel_mutex = &addrsel_mutex_data; +lck_mtx_t *addrsel_mutex = &addrsel_mutex_data; #endif -static lck_attr_t *ip6_mutex_attr; -static lck_grp_t *ip6_mutex_grp; -static lck_grp_attr_t *ip6_mutex_grp_attr; +static lck_attr_t *ip6_mutex_attr; +static lck_grp_t *ip6_mutex_grp; +static lck_grp_attr_t *ip6_mutex_grp_attr; extern int loopattach_done; extern void addrsel_policy_init(void); @@ -208,30 +208,30 @@ SYSCTL_DECL(_net_inet6_ip6); static uint32_t ip6_adj_clear_hwcksum = 0; SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, adj_clear_hwcksum, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_clear_hwcksum, 0, - "Invalidate hwcksum info when adjusting length"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_clear_hwcksum, 0, + "Invalidate hwcksum info when adjusting length"); static uint32_t ip6_adj_partial_sum = 1; SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, adj_partial_sum, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_partial_sum, 0, - "Perform partial sum adjustment of trailing bytes at IP layer"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_partial_sum, 0, + "Perform partial sum adjustment of trailing bytes at IP layer"); static int ip6_input_measure = 0; SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ip6_input_measure, 0, sysctl_reset_ip6_input_stats, "I", "Do time measurement"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip6_input_measure, 0, sysctl_reset_ip6_input_stats, "I", "Do time measurement"); static uint64_t ip6_input_measure_bins = 0; SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_bins, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_input_measure_bins, 0, - sysctl_ip6_input_measure_bins, "I", - "bins for chaining performance data histogram"); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_input_measure_bins, 0, + sysctl_ip6_input_measure_bins, "I", + "bins for chaining performance data histogram"); static net_perf_t net_perf; SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_data, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_ip6_input_getperf, "S,net_perf", - "IP6 input performance data (struct net_perf, net/net_perf.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_ip6_input_getperf, "S,net_perf", + "IP6 input performance data (struct net_perf, net/net_perf.h)"); /* * On platforms which require strict alignment (currently for anything but @@ -245,27 +245,27 @@ SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_data, * load/store operations on the fields in IPv6 headers. */ #if defined(__i386__) || defined(__x86_64__) -#define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0) +#define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0) #else /* !__i386__ && !__x86_64__ */ -#define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \ - if (!IP6_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \ - struct mbuf *_n; \ - struct ifnet *__ifp = (_ifp); \ - atomic_add_64(&(__ifp)->if_alignerrs, 1); \ - if (((_m)->m_flags & M_PKTHDR) && \ - (_m)->m_pkthdr.pkt_hdr != NULL) \ - (_m)->m_pkthdr.pkt_hdr = NULL; \ - _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \ - if (_n == NULL) { \ - ip6stat.ip6s_toosmall++; \ - m_freem(_m); \ - (_m) = NULL; \ - _action; \ - } else { \ - VERIFY(_n != (_m)); \ - (_m) = _n; \ - } \ - } \ +#define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \ + if (!IP6_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \ + struct mbuf *_n; \ + struct ifnet *__ifp = (_ifp); \ + atomic_add_64(&(__ifp)->if_alignerrs, 1); \ + if (((_m)->m_flags & M_PKTHDR) && \ + (_m)->m_pkthdr.pkt_hdr != NULL) \ + (_m)->m_pkthdr.pkt_hdr = NULL; \ + _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \ + if (_n == NULL) { \ + ip6stat.ip6s_toosmall++; \ + m_freem(_m); \ + (_m) = NULL; \ + _action; \ + } else { \ + VERIFY(_n != (_m)); \ + (_m) = _n; \ + } \ + } \ } while (0) #endif /* !__i386__ && !__x86_64__ */ @@ -275,8 +275,9 @@ ip6_proto_input(protocol_family_t protocol, mbuf_t packet) #pragma unused(protocol) #if INET struct timeval start_tv; - if (ip6_input_measure) + if (ip6_input_measure) { net_perf_start_time(&net_perf, &start_tv); + } #endif /* INET */ ip6_input(packet); #if INET @@ -301,13 +302,14 @@ ip6_init(struct ip6protosw *pp, struct domain *dp) domain_unguard_t unguard; domain_proto_mtx_lock_assert_held(); - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); - _CASSERT((sizeof (struct ip6_hdr) + - sizeof (struct icmp6_hdr)) <= _MHLEN); + _CASSERT((sizeof(struct ip6_hdr) + + sizeof(struct icmp6_hdr)) <= _MHLEN); - if (ip6_initialized) + if (ip6_initialized) { return; + } ip6_initialized = 1; eventhandler_lists_ctxt_init(&in6_evhdlr_ctxt); @@ -320,8 +322,9 @@ ip6_init(struct ip6protosw *pp, struct domain *dp) in6_clat46_eventhdlr_callback, eventhandler_entry_dummy_arg, EVENTHANDLER_PRI_ANY); - for (i = 0; i < IN6_EVENT_MAX; i++) + for (i = 0; i < IN6_EVENT_MAX; i++) { VERIFY(in6_event2kev_array[i].in6_event_code == i); + } pr = pffindproto_locked(PF_INET6, IPPROTO_RAW, SOCK_RAW); if (pr == NULL) { @@ -331,8 +334,9 @@ ip6_init(struct ip6protosw *pp, struct domain *dp) } /* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */ - for (i = 0; i < IPPROTO_MAX; i++) + for (i = 0; i < IPPROTO_MAX; i++) { ip6_protox[i] = (struct ip6protosw *)pr; + } /* * Cycle through IP protocols and put them into the appropriate place * in ip6_protox[], skipping protocols IPPROTO_{IP,RAW}. @@ -342,9 +346,10 @@ ip6_init(struct ip6protosw *pp, struct domain *dp) VERIFY(pr->pr_domain == dp); if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) { /* Be careful to only index valid IP protocols. */ - if (pr->pr_protocol < IPPROTO_MAX) + if (pr->pr_protocol < IPPROTO_MAX) { ip6_protox[pr->pr_protocol] = (struct ip6protosw *)pr; + } } } @@ -476,7 +481,7 @@ ip6_input_adjust(struct mbuf *m, struct ip6_hdr *ip6, uint32_t plen, struct ifnet *inifp) { boolean_t adjust = TRUE; - uint32_t tot_len = sizeof (*ip6) + plen; + uint32_t tot_len = sizeof(*ip6) + plen; ASSERT(m_pktlen(m) > tot_len); @@ -503,15 +508,15 @@ ip6_input_adjust(struct mbuf *m, struct ip6_hdr *ip6, uint32_t plen, * prepended extraneous bytes (else it will do both.) */ if (ip6_adj_partial_sum && - (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID|CSUM_PARTIAL)) == - (CSUM_DATA_VALID|CSUM_PARTIAL)) { + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) == + (CSUM_DATA_VALID | CSUM_PARTIAL)) { m->m_pkthdr.csum_rx_val = m_adj_sum16(m, m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start, (tot_len - m->m_pkthdr.csum_rx_start), m->m_pkthdr.csum_rx_val); } else if ((m->m_pkthdr.csum_flags & - (CSUM_DATA_VALID|CSUM_PARTIAL)) == - (CSUM_DATA_VALID|CSUM_PARTIAL)) { + (CSUM_DATA_VALID | CSUM_PARTIAL)) == + (CSUM_DATA_VALID | CSUM_PARTIAL)) { /* * If packet has partial checksum info and we decided not * to subtract the partial sum of postpended extraneous @@ -547,7 +552,7 @@ void ip6_input(struct mbuf *m) { struct ip6_hdr *ip6; - int off = sizeof (struct ip6_hdr), nest; + int off = sizeof(struct ip6_hdr), nest; u_int32_t plen; u_int32_t rtalert = ~0; int nxt = 0, ours = 0; @@ -565,11 +570,11 @@ ip6_input(struct mbuf *m) struct ip_fw_args args; #endif /* DUMMYNET */ } ip6ibz; -#define rin6 ip6ibz.rin6 -#define args ip6ibz.args +#define rin6 ip6ibz.rin6 +#define args ip6ibz.args /* zero out {rin6, args} */ - bzero(&ip6ibz, sizeof (ip6ibz)); + bzero(&ip6ibz, sizeof(ip6ibz)); /* * Check if the packet we received is valid after interface filter @@ -580,7 +585,7 @@ ip6_input(struct mbuf *m) VERIFY(inifp != NULL); /* Perform IP header alignment fixup, if needed */ - IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return); + IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return ); m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED; #if IPSEC @@ -600,9 +605,9 @@ ip6_input(struct mbuf *m) #if DUMMYNET if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) { - struct dn_pkt_tag *dn_tag; + struct dn_pkt_tag *dn_tag; - dn_tag = (struct dn_pkt_tag *)(tag+1); + dn_tag = (struct dn_pkt_tag *)(tag + 1); args.fwa_pf_rule = dn_tag->dn_pf_rule; @@ -633,12 +638,13 @@ ip6_input(struct mbuf *m) * mbuf statistics */ if (m->m_flags & M_EXT) { - if (m->m_next != NULL) + if (m->m_next != NULL) { ip6stat.ip6s_mext2m++; - else + } else { ip6stat.ip6s_mext1++; + } } else { -#define M2MMAX (sizeof (ip6stat.ip6s_m2m) / sizeof (ip6stat.ip6s_m2m[0])) +#define M2MMAX (sizeof (ip6stat.ip6s_m2m) / sizeof (ip6stat.ip6s_m2m[0])) if (m->m_next != NULL) { if (m->m_pkthdr.pkt_flags & PKTF_LOOP) { /* XXX */ @@ -657,8 +663,9 @@ ip6_input(struct mbuf *m) /* * Drop the packet if IPv6 operation is disabled on the interface. */ - if (inifp->if_eflags & IFEF_IPV6_DISABLED) + if (inifp->if_eflags & IFEF_IPV6_DISABLED) { goto bad; + } in6_ifstat_inc_na(inifp, ifs6_in_receive); ip6stat.ip6s_total++; @@ -671,9 +678,10 @@ ip6_input(struct mbuf *m) if (m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) { struct mbuf *n; - MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (n) + MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + if (n) { M_COPY_PKTHDR(n, m); + } if (n && m->m_pkthdr.len > MHLEN) { MCLGET(n, M_DONTWAIT); if ((n->m_flags & M_EXT) == 0) { @@ -681,18 +689,19 @@ ip6_input(struct mbuf *m) n = NULL; } } - if (n == NULL) + if (n == NULL) { goto bad; + } m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t)); n->m_len = m->m_pkthdr.len; m_freem(m); m = n; } - IP6_EXTHDR_CHECK(m, 0, sizeof (struct ip6_hdr), { goto done; }); + IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), { goto done; }); - if (m->m_len < sizeof (struct ip6_hdr)) { - if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == 0) { + if (m->m_len < sizeof(struct ip6_hdr)) { + if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == 0) { ip6stat.ip6s_toosmall++; in6_ifstat_inc(inifp, ifs6_in_hdrerr); goto done; @@ -838,24 +847,28 @@ check_with_pf: } if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) { - if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = htons(m->m_pkthdr.src_ifindex); - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = htons(m->m_pkthdr.dst_ifindex); + } } else { - if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) + if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = htons(inifp->if_index); - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = htons(inifp->if_index); + } } /* * Multicast check */ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { - struct in6_multi *in6m = NULL; + struct in6_multi *in6m = NULL; in6_ifstat_inc_na(inifp, ifs6_in_mcast); /* @@ -890,8 +903,9 @@ check_with_pf: * No reference is held on the address, as we just need * to test for a few things while holding the RW lock. */ - if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &ip6->ip6_dst)) + if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &ip6->ip6_dst)) { break; + } } if (ia6 != NULL) { @@ -925,16 +939,17 @@ check_with_pf: * Slow path: route lookup. */ dst6 = SIN6(&rin6.ro_dst); - dst6->sin6_len = sizeof (struct sockaddr_in6); + dst6->sin6_len = sizeof(struct sockaddr_in6); dst6->sin6_family = AF_INET6; dst6->sin6_addr = ip6->ip6_dst; rtalloc_scoped_ign((struct route *)&rin6, RTF_PRCLONING, IFSCOPE_NONE); - if (rin6.ro_rt != NULL) + if (rin6.ro_rt != NULL) { RT_LOCK_SPIN(rin6.ro_rt); + } -#define rt6_key(r) (SIN6((r)->rt_nodes->rn_key)) +#define rt6_key(r) (SIN6((r)->rt_nodes->rn_key)) /* * Accept the packet if the forwarding interface to the destination @@ -957,7 +972,7 @@ check_with_pf: * reject route for such a case? */ if (rin6.ro_rt != NULL && - (rin6.ro_rt->rt_flags & (RTF_HOST|RTF_GATEWAY)) == RTF_HOST && + (rin6.ro_rt->rt_flags & (RTF_HOST | RTF_GATEWAY)) == RTF_HOST && #if RTF_WASCLONED !(rin6.ro_rt->rt_flags & RTF_WASCLONED) && #endif @@ -974,7 +989,7 @@ check_with_pf: if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) { /* this address is ready */ ours = 1; - deliverifp = ia6->ia_ifp; /* correct? */ + deliverifp = ia6->ia_ifp; /* correct? */ /* * record dst address information into mbuf. */ @@ -991,8 +1006,9 @@ check_with_pf: goto bad; } - if (rin6.ro_rt != NULL) + if (rin6.ro_rt != NULL) { RT_UNLOCK(rin6.ro_rt); + } /* * Now there is no reason to process the packet if it's not our own @@ -1007,8 +1023,9 @@ check_with_pf: * For now limit it to ICMPv6 packets. */ if (inifp->if_type == IFT_CELLULAR && - ip6->ip6_nxt == IPPROTO_ICMPV6) + ip6->ip6_nxt == IPPROTO_ICMPV6) { in6_ifstat_inc(inifp, ifs6_cantfoward_icmp6); + } goto bad; } @@ -1042,10 +1059,10 @@ hbhcheck: */ m->m_pkthdr.pkt_flags |= PKTF_HBH_CHKED; if (ip6_hopopts_input(&plen, &rtalert, &m, &off)) { -#if 0 /* touches NULL pointer */ +#if 0 /* touches NULL pointer */ in6_ifstat_inc(inifp, ifs6_in_discard); #endif - goto done; /* m have already been freed */ + goto done; /* m have already been freed */ } /* adjust pointer */ @@ -1093,8 +1110,9 @@ hbhcheck: break; } } - } else + } else { nxt = ip6->ip6_nxt; + } /* * Check that the amount of data in the buffers @@ -1102,12 +1120,12 @@ hbhcheck: * Trim mbufs if longer than we expect. * Drop packet if shorter than we expect. */ - if (m->m_pkthdr.len - sizeof (struct ip6_hdr) < plen) { + if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) { ip6stat.ip6s_tooshort++; in6_ifstat_inc(inifp, ifs6_in_truncated); goto bad; } - if (m->m_pkthdr.len > sizeof (struct ip6_hdr) + plen) { + if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) { ip6_input_adjust(m, ip6, plen, inifp); } @@ -1127,8 +1145,9 @@ hbhcheck: VERIFY(!ours || (m->m_pkthdr.pkt_flags & PKTF_PROXY_DST)); } - if (!ours) + if (!ours) { goto bad; + } } else if (!ours) { /* * The unicast forwarding function might return the packet @@ -1142,8 +1161,9 @@ hbhcheck: * proxied nodes on different links (src is link-local, dst * is target address.) */ - if ((m = ip6_forward(m, &rin6, 0)) == NULL) + if ((m = ip6_forward(m, &rin6, 0)) == NULL) { goto done; + } VERIFY(rin6.ro_rt != NULL); VERIFY(m->m_pkthdr.pkt_flags & PKTF_PROXY_DST); deliverifp = rin6.ro_rt->rt_ifp; @@ -1182,7 +1202,7 @@ injectit: * we do it once for the outermost protocol, and we assume each * protocol handler wouldn't mess with the alignment afterwards. */ - IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return); + IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return ); while (nxt != IPPROTO_DONE) { struct ipfilter *filter; @@ -1240,14 +1260,15 @@ injectit: TAILQ_FOREACH(filter, &ipv6_filters, ipf_link) { if (seen == 0) { if ((struct ipfilter *)inject_ipfref == - filter) + filter) { seen = 1; + } } else if (filter->ipf_filter.ipf_input) { errno_t result; result = filter->ipf_filter.ipf_input( - filter->ipf_filter.cookie, - (mbuf_t *)&m, off, nxt); + filter->ipf_filter.cookie, + (mbuf_t *)&m, off, nxt); if (result == EJUSTRETURN) { ipf_unref(); goto done; @@ -1304,8 +1325,9 @@ ip6_setsrcifaddr_info(struct mbuf *m, uint32_t src_idx, struct in6_ifaddr *ia6) } else { m->m_pkthdr.src_iff = 0; m->m_pkthdr.src_ifindex = src_idx; - if (src_idx != 0) + if (src_idx != 0) { m->m_pkthdr.pkt_flags |= PKTF_IFAINFO; + } } } @@ -1328,8 +1350,9 @@ ip6_setdstifaddr_info(struct mbuf *m, uint32_t dst_idx, struct in6_ifaddr *ia6) } else { m->m_pkthdr.dst_iff = 0; m->m_pkthdr.dst_ifindex = dst_idx; - if (dst_idx != 0) + if (dst_idx != 0) { m->m_pkthdr.pkt_flags |= PKTF_IFAINFO; + } } } @@ -1338,16 +1361,19 @@ ip6_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *ia6f) { VERIFY(m->m_flags & M_PKTHDR); - if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) - return (-1); + if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) { + return -1; + } - if (src_idx != NULL) + if (src_idx != NULL) { *src_idx = m->m_pkthdr.src_ifindex; + } - if (ia6f != NULL) + if (ia6f != NULL) { *ia6f = m->m_pkthdr.src_iff; + } - return (0); + return 0; } int @@ -1355,16 +1381,19 @@ ip6_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *ia6f) { VERIFY(m->m_flags & M_PKTHDR); - if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) - return (-1); + if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) { + return -1; + } - if (dst_idx != NULL) + if (dst_idx != NULL) { *dst_idx = m->m_pkthdr.dst_ifindex; + } - if (ia6f != NULL) + if (ia6f != NULL) { *ia6f = m->m_pkthdr.dst_iff; + } - return (0); + return 0; } /* @@ -1381,23 +1410,24 @@ ip6_hopopts_input(uint32_t *plenp, uint32_t *rtalertp, struct mbuf **mp, u_int8_t *opt; /* validation of the length of the header */ - IP6_EXTHDR_CHECK(m, off, sizeof (*hbh), return (-1)); + IP6_EXTHDR_CHECK(m, off, sizeof(*hbh), return (-1)); hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off); hbhlen = (hbh->ip6h_len + 1) << 3; IP6_EXTHDR_CHECK(m, off, hbhlen, return (-1)); hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off); off += hbhlen; - hbhlen -= sizeof (struct ip6_hbh); - opt = (u_int8_t *)hbh + sizeof (struct ip6_hbh); + hbhlen -= sizeof(struct ip6_hbh); + opt = (u_int8_t *)hbh + sizeof(struct ip6_hbh); - if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof (struct ip6_hbh), - hbhlen, rtalertp, plenp) < 0) - return (-1); + if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh), + hbhlen, rtalertp, plenp) < 0) { + return -1; + } *offp = off; *mp = m; - return (0); + return 0; } /* @@ -1412,14 +1442,14 @@ ip6_hopopts_input(uint32_t *plenp, uint32_t *rtalertp, struct mbuf **mp, */ int ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, - u_int32_t *rtalertp, u_int32_t *plenp) + u_int32_t *rtalertp, u_int32_t *plenp) { struct ip6_hdr *ip6; int optlen = 0; u_int8_t *opt = opthead; u_int16_t rtalert_val; u_int32_t jumboplen; - const int erroff = sizeof (struct ip6_hdr) + sizeof (struct ip6_hbh); + const int erroff = sizeof(struct ip6_hdr) + sizeof(struct ip6_hbh); for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) { switch (*opt) { @@ -1442,9 +1472,9 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) { /* XXX stat */ icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_HEADER, - erroff + opt + 1 - opthead); - return (-1); + ICMP6_PARAMPROB_HEADER, + erroff + opt + 1 - opthead); + return -1; } optlen = IP6OPT_RTALERT_LEN; bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2); @@ -1459,9 +1489,9 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) { /* XXX stat */ icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_HEADER, - erroff + opt + 1 - opthead); - return (-1); + ICMP6_PARAMPROB_HEADER, + erroff + opt + 1 - opthead); + return -1; } optlen = IP6OPT_JUMBO_LEN; @@ -1473,16 +1503,16 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, if (ip6->ip6_plen) { ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_HEADER, - erroff + opt - opthead); - return (-1); + ICMP6_PARAMPROB_HEADER, + erroff + opt - opthead); + return -1; } /* * We may see jumbolen in unaligned location, so * we'd need to perform bcopy(). */ - bcopy(opt + 2, &jumboplen, sizeof (jumboplen)); + bcopy(opt + 2, &jumboplen, sizeof(jumboplen)); jumboplen = (u_int32_t)htonl(jumboplen); #if 1 @@ -1497,9 +1527,9 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, if (*plenp != 0) { ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_HEADER, - erroff + opt + 2 - opthead); - return (-1); + ICMP6_PARAMPROB_HEADER, + erroff + opt + 2 - opthead); + return -1; } #endif @@ -1509,14 +1539,14 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, if (jumboplen <= IPV6_MAXPACKET) { ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_HEADER, - erroff + opt + 2 - opthead); - return (-1); + ICMP6_PARAMPROB_HEADER, + erroff + opt + 2 - opthead); + return -1; } *plenp = jumboplen; break; - default: /* unknown option */ + default: /* unknown option */ if (hbhlen < IP6OPT_MINLEN) { ip6stat.ip6s_toosmall++; goto bad; @@ -1524,18 +1554,18 @@ ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen, optlen = ip6_unknown_opt(opt, m, erroff + opt - opthead); if (optlen == -1) { - return (-1); + return -1; } optlen += 2; break; } } - return (0); + return 0; bad: m_freem(m); - return (-1); + return -1; } /* @@ -1551,32 +1581,32 @@ ip6_unknown_opt(uint8_t *optp, struct mbuf *m, int off) switch (IP6OPT_TYPE(*optp)) { case IP6OPT_TYPE_SKIP: /* ignore the option */ - return ((int)*(optp + 1)); + return (int)*(optp + 1); - case IP6OPT_TYPE_DISCARD: /* silently discard */ + case IP6OPT_TYPE_DISCARD: /* silently discard */ m_freem(m); - return (-1); + return -1; case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */ ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off); - return (-1); + return -1; case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */ ip6stat.ip6s_badoptions++; ip6 = mtod(m, struct ip6_hdr *); if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || - (m->m_flags & (M_BCAST|M_MCAST))) { + (m->m_flags & (M_BCAST | M_MCAST))) { m_freem(m); } else { icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off); } - return (-1); + return -1; } - m_freem(m); /* XXX: NOTREACHED */ - return (-1); + m_freem(m); /* XXX: NOTREACHED */ + return -1; } /* @@ -1604,39 +1634,43 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp, struct timeval tv; getmicrotime(&tv); - mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof (tv), + mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv), SCM_TIMESTAMP, SOL_SOCKET, mp); - if (*mp == NULL) - return (NULL); + if (*mp == NULL) { + return NULL; + } } if ((inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) { uint64_t time; time = mach_absolute_time(); - mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof (time), + mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time), SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp); - if (*mp == NULL) - return (NULL); + if (*mp == NULL) { + return NULL; + } } if ((inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { uint64_t time; time = mach_continuous_time(); - mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof (time), - SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp); - if (*mp == NULL) - return (NULL); + mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time), + SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp); + if (*mp == NULL) { + return NULL; + } } if ((inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) != 0) { int tc = m_get_traffic_class(m); - mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof (tc), + mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc), SO_TRAFFIC_CLASS, SOL_SOCKET, mp); - if (*mp == NULL) - return (NULL); + if (*mp == NULL) { + return NULL; + } } -#define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y)) +#define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y)) if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { if (v4only != NULL) { *v4only = 1; @@ -1648,9 +1682,10 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp, u_int8_t tos = (ip_header->ip_tos & IPTOS_ECN_MASK); mp = sbcreatecontrol_mbuf((caddr_t)&tos, sizeof(tos), - IPV6_TCLASS, IPPROTO_IPV6, mp); - if (*mp == NULL) - return (NULL); + IPV6_TCLASS, IPPROTO_IPV6, mp); + if (*mp == NULL) { + return NULL; + } } // Send IN6P_PKTINFO for v4-mapped address @@ -1664,45 +1699,49 @@ ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp, bcopy(&ip_header->ip_dst, &pi6.ipi6_addr.s6_addr32[3], sizeof(struct in_addr)); mp = sbcreatecontrol_mbuf((caddr_t)&pi6, - sizeof (struct in6_pktinfo), - IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO), - IPPROTO_IPV6, mp); - if (*mp == NULL) - return (NULL); + sizeof(struct in6_pktinfo), + IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO), + IPPROTO_IPV6, mp); + if (*mp == NULL) { + return NULL; + } } - return (mp); + return mp; } /* RFC 2292 sec. 5 */ if ((inp->inp_flags & IN6P_PKTINFO) != 0) { struct in6_pktinfo pi6; - bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof (struct in6_addr)); - in6_clearscope(&pi6.ipi6_addr); /* XXX */ + bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr)); + in6_clearscope(&pi6.ipi6_addr); /* XXX */ pi6.ipi6_ifindex = (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0; mp = sbcreatecontrol_mbuf((caddr_t)&pi6, - sizeof (struct in6_pktinfo), + sizeof(struct in6_pktinfo), IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO), IPPROTO_IPV6, mp); - if (*mp == NULL) - return (NULL); + if (*mp == NULL) { + return NULL; + } } if ((inp->inp_flags & IN6P_HOPLIMIT) != 0) { int hlim = ip6->ip6_hlim & 0xff; - mp = sbcreatecontrol_mbuf((caddr_t)&hlim, sizeof (int), + mp = sbcreatecontrol_mbuf((caddr_t)&hlim, sizeof(int), IS2292(inp, IPV6_2292HOPLIMIT, IPV6_HOPLIMIT), IPPROTO_IPV6, mp); - if (*mp == NULL) - return (NULL); + if (*mp == NULL) { + return NULL; + } } - if (v4only != NULL) + if (v4only != NULL) { *v4only = 0; - return (mp); + } + return mp; } int @@ -1714,12 +1753,14 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) *mp = NULL; np = ip6_savecontrol_v4(in6p, m, mp, &v4only); - if (np == NULL) + if (np == NULL) { goto no_mbufs; + } mp = np; - if (v4only) - return (0); + if (v4only) { + return 0; + } if ((in6p->inp_flags & IN6P_TCLASS) != 0) { u_int32_t flowinfo; @@ -1729,10 +1770,11 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) flowinfo >>= 20; tclass = flowinfo & 0xff; - mp = sbcreatecontrol_mbuf((caddr_t)&tclass, sizeof (tclass), + mp = sbcreatecontrol_mbuf((caddr_t)&tclass, sizeof(tclass), IPV6_TCLASS, IPPROTO_IPV6, mp); - if (*mp == NULL) + if (*mp == NULL) { goto no_mbufs; + } } /* @@ -1775,7 +1817,7 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) } if ((in6p->inp_flags & (IN6P_RTHDR | IN6P_DSTOPTS)) != 0) { - int nxt = ip6->ip6_nxt, off = sizeof (struct ip6_hdr); + int nxt = ip6->ip6_nxt, off = sizeof(struct ip6_hdr); /* * Search for destination options headers or routing @@ -1784,7 +1826,7 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) * Note that the order of the headers remains in * the chain of ancillary data. */ - while (1) { /* is explicit loop prevention necessary? */ + while (1) { /* is explicit loop prevention necessary? */ struct ip6_ext *ip6e = NULL; int elen; @@ -1802,20 +1844,24 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) goto loopend; } - if (off + sizeof (*ip6e) > m->m_len) + if (off + sizeof(*ip6e) > m->m_len) { goto loopend; + } ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off); - if (nxt == IPPROTO_AH) + if (nxt == IPPROTO_AH) { elen = (ip6e->ip6e_len + 2) << 2; - else + } else { elen = (ip6e->ip6e_len + 1) << 3; - if (off + elen > m->m_len) + } + if (off + elen > m->m_len) { goto loopend; + } switch (nxt) { case IPPROTO_DSTOPTS: - if (!(in6p->inp_flags & IN6P_DSTOPTS)) + if (!(in6p->inp_flags & IN6P_DSTOPTS)) { break; + } mp = sbcreatecontrol_mbuf((caddr_t)ip6e, elen, IS2292(in6p, IPV6_2292DSTOPTS, @@ -1825,8 +1871,9 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) } break; case IPPROTO_ROUTING: - if (!(in6p->inp_flags & IN6P_RTHDR)) + if (!(in6p->inp_flags & IN6P_RTHDR)) { break; + } mp = sbcreatecontrol_mbuf((caddr_t)ip6e, elen, IS2292(in6p, IPV6_2292RTHDR, IPV6_RTHDR), @@ -1847,7 +1894,6 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) * other cases). */ goto loopend; - } /* proceed with the next header. */ @@ -1858,11 +1904,11 @@ ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp) loopend: ; } - return (0); + return 0; no_mbufs: ip6stat.ip6s_pktdropcntrl++; /* XXX increment a stat to show the failure */ - return (ENOBUFS); + return ENOBUFS; } #undef IS2292 @@ -1875,36 +1921,42 @@ ip6_notify_pmtu(struct inpcb *in6p, struct sockaddr_in6 *dst, u_int32_t *mtu) so = in6p->inp_socket; - if ((in6p->inp_flags & IN6P_MTU) == 0) + if ((in6p->inp_flags & IN6P_MTU) == 0) { return; + } - if (mtu == NULL) + if (mtu == NULL) { return; + } #ifdef DIAGNOSTIC - if (so == NULL) { /* I believe this is impossible */ + if (so == NULL) { /* I believe this is impossible */ panic("ip6_notify_pmtu: socket is NULL"); /* NOTREACHED */ } #endif if (IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && - (so->so_proto == NULL || so->so_proto->pr_protocol == IPPROTO_TCP)) + (so->so_proto == NULL || so->so_proto->pr_protocol == IPPROTO_TCP)) { return; + } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && - !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &dst->sin6_addr)) + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &dst->sin6_addr)) { return; + } - bzero(&mtuctl, sizeof (mtuctl)); /* zero-clear for safety */ + bzero(&mtuctl, sizeof(mtuctl)); /* zero-clear for safety */ mtuctl.ip6m_mtu = *mtu; mtuctl.ip6m_addr = *dst; - if (sa6_recoverscope(&mtuctl.ip6m_addr, TRUE)) + if (sa6_recoverscope(&mtuctl.ip6m_addr, TRUE)) { return; + } - if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof (mtuctl), - IPV6_PATHMTU, IPPROTO_IPV6)) == NULL) + if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof(mtuctl), + IPV6_PATHMTU, IPPROTO_IPV6)) == NULL) { return; + } if (sbappendaddr(&so->so_rcv, SA(dst), NULL, m_mtu, NULL) == 0) { m_freem(m_mtu); @@ -1931,20 +1983,20 @@ ip6_get_prevhdr(struct mbuf *m, int off) { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); - if (off == sizeof (struct ip6_hdr)) { - return ((char *)&ip6->ip6_nxt); + if (off == sizeof(struct ip6_hdr)) { + return (char *)&ip6->ip6_nxt; } else { int len, nxt; struct ip6_ext *ip6e = NULL; nxt = ip6->ip6_nxt; - len = sizeof (struct ip6_hdr); + len = sizeof(struct ip6_hdr); while (len < off) { ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + len); switch (nxt) { case IPPROTO_FRAGMENT: - len += sizeof (struct ip6_frag); + len += sizeof(struct ip6_frag); break; case IPPROTO_AH: len += (ip6e->ip6e_len + 2) << 2; @@ -1955,10 +2007,11 @@ ip6_get_prevhdr(struct mbuf *m, int off) } nxt = ip6e->ip6e_nxt; } - if (ip6e) - return ((char *)&ip6e->ip6e_nxt); - else - return (NULL); + if (ip6e) { + return (char *)&ip6e->ip6e_nxt; + } else { + return NULL; + } } } @@ -1974,63 +2027,73 @@ ip6_nexthdr(struct mbuf *m, int off, int proto, int *nxtp) /* just in case */ VERIFY(m != NULL); - if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off) - return (-1); + if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off) { + return -1; + } switch (proto) { case IPPROTO_IPV6: - if (m->m_pkthdr.len < off + sizeof (ip6)) - return (-1); - m_copydata(m, off, sizeof (ip6), (caddr_t)&ip6); - if (nxtp) + if (m->m_pkthdr.len < off + sizeof(ip6)) { + return -1; + } + m_copydata(m, off, sizeof(ip6), (caddr_t)&ip6); + if (nxtp) { *nxtp = ip6.ip6_nxt; - off += sizeof (ip6); - return (off); + } + off += sizeof(ip6); + return off; case IPPROTO_FRAGMENT: /* * terminate parsing if it is not the first fragment, * it does not make sense to parse through it. */ - if (m->m_pkthdr.len < off + sizeof (fh)) - return (-1); - m_copydata(m, off, sizeof (fh), (caddr_t)&fh); + if (m->m_pkthdr.len < off + sizeof(fh)) { + return -1; + } + m_copydata(m, off, sizeof(fh), (caddr_t)&fh); /* IP6F_OFF_MASK = 0xfff8(BigEndian), 0xf8ff(LittleEndian) */ - if (fh.ip6f_offlg & IP6F_OFF_MASK) - return (-1); - if (nxtp) + if (fh.ip6f_offlg & IP6F_OFF_MASK) { + return -1; + } + if (nxtp) { *nxtp = fh.ip6f_nxt; - off += sizeof (struct ip6_frag); - return (off); + } + off += sizeof(struct ip6_frag); + return off; case IPPROTO_AH: - if (m->m_pkthdr.len < off + sizeof (ip6e)) - return (-1); - m_copydata(m, off, sizeof (ip6e), (caddr_t)&ip6e); - if (nxtp) + if (m->m_pkthdr.len < off + sizeof(ip6e)) { + return -1; + } + m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e); + if (nxtp) { *nxtp = ip6e.ip6e_nxt; + } off += (ip6e.ip6e_len + 2) << 2; - return (off); + return off; case IPPROTO_HOPOPTS: case IPPROTO_ROUTING: case IPPROTO_DSTOPTS: - if (m->m_pkthdr.len < off + sizeof (ip6e)) - return (-1); - m_copydata(m, off, sizeof (ip6e), (caddr_t)&ip6e); - if (nxtp) + if (m->m_pkthdr.len < off + sizeof(ip6e)) { + return -1; + } + m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e); + if (nxtp) { *nxtp = ip6e.ip6e_nxt; + } off += (ip6e.ip6e_len + 1) << 3; - return (off); + return off; case IPPROTO_NONE: case IPPROTO_ESP: case IPPROTO_IPCOMP: /* give up */ - return (-1); + return -1; default: - return (-1); + return -1; } } @@ -2049,12 +2112,13 @@ ip6_lasthdr(struct mbuf *m, int off, int proto, int *nxtp) } while (1) { newoff = ip6_nexthdr(m, off, proto, nxtp); - if (newoff < 0) - return (off); - else if (newoff < off) - return (-1); /* invalid */ - else if (newoff == off) - return (newoff); + if (newoff < 0) { + return off; + } else if (newoff < off) { + return -1; /* invalid */ + } else if (newoff == off) { + return newoff; + } off = newoff; proto = *nxtp; @@ -2064,7 +2128,7 @@ ip6_lasthdr(struct mbuf *m, int off, int proto, int *nxtp) struct ip6aux * ip6_addaux(struct mbuf *m) { - struct m_tag *tag; + struct m_tag *tag; /* Check if one is already allocated */ tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, @@ -2072,7 +2136,7 @@ ip6_addaux(struct mbuf *m) if (tag == NULL) { /* Allocate a tag */ tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_INET6, - sizeof (struct ip6aux), M_DONTWAIT, m); + sizeof(struct ip6aux), M_DONTWAIT, m); /* Attach it to the mbuf */ if (tag) { @@ -2080,24 +2144,24 @@ ip6_addaux(struct mbuf *m) } } - return (tag ? (struct ip6aux *)(tag + 1) : NULL); + return tag ? (struct ip6aux *)(tag + 1) : NULL; } struct ip6aux * ip6_findaux(struct mbuf *m) { - struct m_tag *tag; + struct m_tag *tag; tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_INET6, NULL); - return (tag ? (struct ip6aux *)(tag + 1) : NULL); + return tag ? (struct ip6aux *)(tag + 1) : NULL; } void ip6_delaux(struct mbuf *m) { - struct m_tag *tag; + struct m_tag *tag; tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_INET6, NULL); @@ -2112,21 +2176,21 @@ ip6_delaux(struct mbuf *m) void ip6_drain(void) { - frag6_drain(); /* fragments */ - in6_rtqdrain(); /* protocol cloned routes */ - nd6_drain(NULL); /* cloned routes: ND6 */ + frag6_drain(); /* fragments */ + in6_rtqdrain(); /* protocol cloned routes */ + nd6_drain(NULL); /* cloned routes: ND6 */ } /* * System control for IP6 */ -u_char inet6ctlerrmap[PRC_NCMDS] = { - 0, 0, 0, 0, - 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, - EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, - EMSGSIZE, EHOSTUNREACH, 0, 0, - 0, 0, 0, 0, +u_char inet6ctlerrmap[PRC_NCMDS] = { + 0, 0, 0, 0, + 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH, + EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED, + EMSGSIZE, EHOSTUNREACH, 0, 0, + 0, 0, 0, 0, ENOPROTOOPT }; @@ -2138,8 +2202,9 @@ sysctl_reset_ip6_input_stats SYSCTL_HANDLER_ARGS i = ip6_input_measure; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < 0 || i > 1) { error = EINVAL; @@ -2150,7 +2215,7 @@ sysctl_reset_ip6_input_stats SYSCTL_HANDLER_ARGS } ip6_input_measure = i; done: - return (error); + return error; } static int @@ -2162,8 +2227,9 @@ sysctl_ip6_input_measure_bins SYSCTL_HANDLER_ARGS i = ip6_input_measure_bins; error = sysctl_handle_quad(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* validate data */ if (!net_perf_validate_bins(i)) { error = EINVAL; @@ -2171,15 +2237,16 @@ sysctl_ip6_input_measure_bins SYSCTL_HANDLER_ARGS } ip6_input_measure_bins = i; done: - return (error); + return error; } static int sysctl_ip6_input_getperf SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct ipstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct ipstat); + } - return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen))); + return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen)); } diff --git a/bsd/netinet6/ip6_output.c b/bsd/netinet6/ip6_output.c index 0720d6809..a468a93f6 100644 --- a/bsd/netinet6/ip6_output.c +++ b/bsd/netinet6/ip6_output.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -192,53 +192,53 @@ SYSCTL_DECL(_net_inet6_ip6); static int ip6_output_measure = 0; SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, output_perf, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &ip6_output_measure, 0, sysctl_reset_ip6_output_stats, "I", "Do time measurement"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip6_output_measure, 0, sysctl_reset_ip6_output_stats, "I", "Do time measurement"); static uint64_t ip6_output_measure_bins = 0; SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, output_perf_bins, - CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_output_measure_bins, 0, - sysctl_ip6_output_measure_bins, "I", - "bins for chaining performance data histogram"); + CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_output_measure_bins, 0, + sysctl_ip6_output_measure_bins, "I", + "bins for chaining performance data histogram"); static net_perf_t net_perf; SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, output_perf_data, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, sysctl_ip6_output_getperf, "S,net_perf", - "IP6 output performance data (struct net_perf, net/net_perf.h)"); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, sysctl_ip6_output_getperf, "S,net_perf", + "IP6 output performance data (struct net_perf, net/net_perf.h)"); -#define IM6O_TRACE_HIST_SIZE 32 /* size of trace history */ +#define IM6O_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int im6o_trace_hist_size = IM6O_TRACE_HIST_SIZE; struct ip6_moptions_dbg { - struct ip6_moptions im6o; /* ip6_moptions */ - u_int16_t im6o_refhold_cnt; /* # of IM6O_ADDREF */ - u_int16_t im6o_refrele_cnt; /* # of IM6O_REMREF */ + struct ip6_moptions im6o; /* ip6_moptions */ + u_int16_t im6o_refhold_cnt; /* # of IM6O_ADDREF */ + u_int16_t im6o_refrele_cnt; /* # of IM6O_REMREF */ /* * Alloc and free callers. */ - ctrace_t im6o_alloc; - ctrace_t im6o_free; + ctrace_t im6o_alloc; + ctrace_t im6o_free; /* * Circular lists of IM6O_ADDREF and IM6O_REMREF callers. */ - ctrace_t im6o_refhold[IM6O_TRACE_HIST_SIZE]; - ctrace_t im6o_refrele[IM6O_TRACE_HIST_SIZE]; + ctrace_t im6o_refhold[IM6O_TRACE_HIST_SIZE]; + ctrace_t im6o_refrele[IM6O_TRACE_HIST_SIZE]; }; #if DEBUG -static unsigned int im6o_debug = 1; /* debugging (enabled) */ +static unsigned int im6o_debug = 1; /* debugging (enabled) */ #else -static unsigned int im6o_debug; /* debugging (disabled) */ +static unsigned int im6o_debug; /* debugging (disabled) */ #endif /* !DEBUG */ -static unsigned int im6o_size; /* size of zone element */ -static struct zone *im6o_zone; /* zone for ip6_moptions */ +static unsigned int im6o_size; /* size of zone element */ +static struct zone *im6o_zone; /* zone for ip6_moptions */ -#define IM6O_ZONE_MAX 64 /* maximum elements in zone */ -#define IM6O_ZONE_NAME "ip6_moptions" /* zone name */ +#define IM6O_ZONE_MAX 64 /* maximum elements in zone */ +#define IM6O_ZONE_NAME "ip6_moptions" /* zone name */ /* * ip6_output() calls ip6_output_list() to do the work @@ -273,7 +273,7 @@ ip6_output_list(struct mbuf *m0, int packetchain, struct ip6_pktopts *opt, { struct ip6_hdr *ip6; u_char *nexthdrp; - struct ifnet *ifp = NULL, *origifp = NULL; /* refcnt'd */ + struct ifnet *ifp = NULL, *origifp = NULL; /* refcnt'd */ struct ifnet **ifpp_save = ifpp; struct mbuf *m, *mprev; struct mbuf *sendchain = NULL, *sendchain_last = NULL; @@ -327,14 +327,14 @@ ip6_output_list(struct mbuf *m0, int packetchain, struct ip6_pktopts *opt, struct ip_fw_args args; #endif /* DUMMYNET */ } ip6obz; -#define ipf_pktopts ip6obz.ipf_pktopts -#define exthdrs ip6obz.exthdrs -#define ip6route ip6obz.ip6route -#define ipsec_state ip6obz.ipsec_state -#define necp_route ip6obz.necp_route -#define saved_route ip6obz.saved_route -#define saved_ro_pmtu ip6obz.saved_ro_pmtu -#define args ip6obz.args +#define ipf_pktopts ip6obz.ipf_pktopts +#define exthdrs ip6obz.exthdrs +#define ip6route ip6obz.ip6route +#define ipsec_state ip6obz.ipsec_state +#define necp_route ip6obz.necp_route +#define saved_route ip6obz.saved_route +#define saved_ro_pmtu ip6obz.saved_ro_pmtu +#define args ip6obz.args union { struct { boolean_t select_srcif : 1; @@ -349,22 +349,24 @@ ip6_output_list(struct mbuf *m0, int packetchain, struct ip6_pktopts *opt, uint32_t raw; } ip6obf = { .raw = 0 }; - if (ip6_output_measure) + if (ip6_output_measure) { net_perf_start_time(&net_perf, &start_tv); + } VERIFY(m0->m_flags & M_PKTHDR); /* zero out {saved_route, saved_ro_pmtu, ip6route, exthdrs, args} */ - bzero(&ip6obz, sizeof (ip6obz)); + bzero(&ip6obz, sizeof(ip6obz)); #if DUMMYNET - if (SLIST_EMPTY(&m0->m_pkthdr.tags)) + if (SLIST_EMPTY(&m0->m_pkthdr.tags)) { goto tags_done; + } /* Grab info from mtags prepended to the chain */ if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) { - struct dn_pkt_tag *dn_tag; + struct dn_pkt_tag *dn_tag; /* * ip6_output_list() cannot handle chains of packets reinjected @@ -373,14 +375,15 @@ ip6_output_list(struct mbuf *m0, int packetchain, struct ip6_pktopts *opt, */ VERIFY(0 == packetchain); - dn_tag = (struct dn_pkt_tag *)(tag+1); + dn_tag = (struct dn_pkt_tag *)(tag + 1); args.fwa_pf_rule = dn_tag->dn_pf_rule; - bcopy(&dn_tag->dn_dst6, &dst_buf, sizeof (dst_buf)); + bcopy(&dn_tag->dn_dst6, &dst_buf, sizeof(dst_buf)); dst = &dst_buf; ifp = dn_tag->dn_ifp; - if (ifp != NULL) + if (ifp != NULL) { ifnet_reference(ifp); + } flags = dn_tag->dn_flags; if (dn_tag->dn_flags & IPV6_OUTARGS) { saved_ip6oa = dn_tag->dn_ip6oa; @@ -392,13 +395,14 @@ ip6_output_list(struct mbuf *m0, int packetchain, struct ip6_pktopts *opt, saved_ro_pmtu = dn_tag->dn_ro6_pmtu; ro_pmtu = &saved_ro_pmtu; origifp = dn_tag->dn_origifp; - if (origifp != NULL) + if (origifp != NULL) { ifnet_reference(origifp); + } mtu = dn_tag->dn_mtu; alwaysfrag = (dn_tag->dn_alwaysfrag != 0); unfragpartlen = dn_tag->dn_unfragpartlen; - bcopy(&dn_tag->dn_exthdrs, &exthdrs, sizeof (exthdrs)); + bcopy(&dn_tag->dn_exthdrs, &exthdrs, sizeof(exthdrs)); m_tag_delete(m0, tag); } @@ -416,14 +420,15 @@ tags_done: } /* If packet is bound to an interface, check bound policies */ if ((flags & IPV6_OUTARGS) && - (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) && - ip6oa->ip6oa_boundif != IFSCOPE_NONE) { + (ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) && + ip6oa->ip6oa_boundif != IFSCOPE_NONE) { /* ip6obf.noipsec is a bitfield, use temp integer */ int noipsec = 0; if (ipsec6_getpolicybyinterface(m, IPSEC_DIR_OUTBOUND, - flags, ip6oa, &noipsec, &sp) != 0) + flags, ip6oa, &noipsec, &sp) != 0) { goto bad; + } ip6obf.noipsec = (noipsec != 0); } @@ -439,8 +444,9 @@ tags_done: */ if ((ip6obf.select_srcif = (!(flags & (IPV6_FORWARDING | IPV6_UNSPECSRC | IPV6_FLAG_NOSRCIFSEL)) && - (ip6oa->ip6oa_flags & IP6OAF_SELECT_SRCIF)))) + (ip6oa->ip6oa_flags & IP6OAF_SELECT_SRCIF)))) { ipf_pktopts.ippo_flags |= IPPOF_SELECT_SRCIF; + } if ((ip6oa->ip6oa_flags & IP6OAF_BOUND_IF) && ip6oa->ip6oa_boundif != IFSCOPE_NONE) { @@ -448,8 +454,9 @@ tags_done: (ip6oa->ip6oa_boundif << IPPOF_SHIFT_IFSCOPE)); } - if (ip6oa->ip6oa_flags & IP6OAF_BOUND_SRCADDR) + if (ip6oa->ip6oa_flags & IP6OAF_BOUND_SRCADDR) { ipf_pktopts.ippo_flags |= IPPOF_BOUND_SRCADDR; + } } else { ip6obf.select_srcif = FALSE; if (flags & IPV6_OUTARGS) { @@ -460,10 +467,12 @@ tags_done: } if (flags & IPV6_OUTARGS) { - if (ip6oa->ip6oa_flags & IP6OAF_NO_CELLULAR) + if (ip6oa->ip6oa_flags & IP6OAF_NO_CELLULAR) { ipf_pktopts.ippo_flags |= IPPOF_NO_IFT_CELLULAR; - if (ip6oa->ip6oa_flags & IP6OAF_NO_EXPENSIVE) + } + if (ip6oa->ip6oa_flags & IP6OAF_NO_EXPENSIVE) { ipf_pktopts.ippo_flags |= IPPOF_NO_IFF_EXPENSIVE; + } adv = &ip6oa->ip6oa_flowadv; adv->code = FADV_SUCCESS; ip6oa->ip6oa_retflags = 0; @@ -474,13 +483,14 @@ tags_done: * used to keep old value to release reference properly and dtrace * ipsec tunnel traffic properly. */ - if (ifpp != NULL && *ifpp != NULL) + if (ifpp != NULL && *ifpp != NULL) { *ifpp = NULL; + } #if DUMMYNET if (args.fwa_pf_rule) { ip6 = mtod(m, struct ip6_hdr *); - VERIFY(ro != NULL); /* ro == saved_route */ + VERIFY(ro != NULL); /* ro == saved_route */ goto check_with_pf; } #endif /* DUMMYNET */ @@ -510,26 +520,27 @@ tags_done: loopit: packets_processed++; - m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP|PKTF_IFAINFO); + m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP | PKTF_IFAINFO); ip6 = mtod(m, struct ip6_hdr *); nxt0 = ip6->ip6_nxt; finaldst = ip6->ip6_dst; ip6obf.hdrsplit = FALSE; ro_pmtu = NULL; - if (!SLIST_EMPTY(&m->m_pkthdr.tags)) + if (!SLIST_EMPTY(&m->m_pkthdr.tags)) { inject_filter_ref = ipf_get_inject_filter(m); - else + } else { inject_filter_ref = NULL; + } -#define MAKE_EXTHDR(hp, mp) do { \ - if (hp != NULL) { \ - struct ip6_ext *eh = (struct ip6_ext *)(hp); \ - error = ip6_copyexthdr((mp), (caddr_t)(hp), \ - ((eh)->ip6e_len + 1) << 3); \ - if (error) \ - goto freehdrs; \ - } \ +#define MAKE_EXTHDR(hp, mp) do { \ + if (hp != NULL) { \ + struct ip6_ext *eh = (struct ip6_ext *)(hp); \ + error = ip6_copyexthdr((mp), (caddr_t)(hp), \ + ((eh)->ip6e_len + 1) << 3); \ + if (error) \ + goto freehdrs; \ + } \ } while (0) if (opt != NULL) { @@ -604,8 +615,8 @@ loopit: if (opt != NULL && opt->ip6po_pktinfo != NULL) { opt->ip6po_pktinfo-> - ipi6_ifindex = - policy_ifp->if_index; + ipi6_ifindex = + policy_ifp->if_index; } ro = &necp_route; goto skip_ipsec; @@ -623,17 +634,18 @@ loopit: #endif /* NECP */ #if IPSEC - if (ipsec_bypass != 0 || ip6obf.noipsec) + if (ipsec_bypass != 0 || ip6obf.noipsec) { goto skip_ipsec; + } if (sp == NULL) { /* get a security policy for this packet */ if (so != NULL) { sp = ipsec6_getpolicybysock(m, IPSEC_DIR_OUTBOUND, - so, &error); + so, &error); } else { sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND, - 0, &error); + 0, &error); } if (sp == NULL) { IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); @@ -685,17 +697,21 @@ skip_ipsec: * Keep the length of the unfragmentable part for fragmentation. */ optlen = 0; - if (exthdrs.ip6e_hbh != NULL) + if (exthdrs.ip6e_hbh != NULL) { optlen += exthdrs.ip6e_hbh->m_len; - if (exthdrs.ip6e_dest1 != NULL) + } + if (exthdrs.ip6e_dest1 != NULL) { optlen += exthdrs.ip6e_dest1->m_len; - if (exthdrs.ip6e_rthdr != NULL) + } + if (exthdrs.ip6e_rthdr != NULL) { optlen += exthdrs.ip6e_rthdr->m_len; - unfragpartlen = optlen + sizeof (struct ip6_hdr); + } + unfragpartlen = optlen + sizeof(struct ip6_hdr); /* NOTE: we don't add AH/ESP length here. do that later. */ - if (exthdrs.ip6e_dest2 != NULL) + if (exthdrs.ip6e_dest2 != NULL) { optlen += exthdrs.ip6e_dest2->m_len; + } /* * If we need IPsec, or there is at least one extension header, @@ -703,9 +719,9 @@ skip_ipsec: */ if (( #if IPSEC - ip6obf.needipsec || + ip6obf.needipsec || #endif /* IPSEC */ - optlen) && !ip6obf.hdrsplit) { + optlen) && !ip6obf.hdrsplit) { if ((error = ip6_splithdr(m, &exthdrs)) != 0) { m = NULL; goto freehdrs; @@ -719,7 +735,7 @@ skip_ipsec: /* adjust mbuf packet header length */ m->m_pkthdr.len += optlen; - plen = m->m_pkthdr.len - sizeof (*ip6); + plen = m->m_pkthdr.len - sizeof(*ip6); /* If this is a jumbo payload, insert a jumbo payload option. */ if (plen > IPV6_MAXPACKET) { @@ -733,8 +749,9 @@ skip_ipsec: } /* adjust pointer */ ip6 = mtod(m, struct ip6_hdr *); - if ((error = ip6_insert_jumboopt(&exthdrs, plen)) != 0) + if ((error = ip6_insert_jumboopt(&exthdrs, plen)) != 0) { goto freehdrs; + } ip6->ip6_plen = 0; } else { ip6->ip6_plen = htons(plen); @@ -771,19 +788,19 @@ skip_ipsec: ip6->ip6_nxt = IPPROTO_DSTOPTS; } -#define MAKE_CHAIN(m, mp, p, i) do { \ - if (m != NULL) { \ - if (!ip6obf.hdrsplit) { \ - panic("assumption failed: hdr not split"); \ - /* NOTREACHED */ \ - } \ - *mtod((m), u_char *) = *(p); \ - *(p) = (i); \ - p = mtod((m), u_char *); \ - (m)->m_next = (mp)->m_next; \ - (mp)->m_next = (m); \ - (mp) = (m); \ - } \ +#define MAKE_CHAIN(m, mp, p, i) do { \ + if (m != NULL) { \ + if (!ip6obf.hdrsplit) { \ + panic("assumption failed: hdr not split"); \ + /* NOTREACHED */ \ + } \ + *mtod((m), u_char *) = *(p); \ + *(p) = (i); \ + p = mtod((m), u_char *); \ + (m)->m_next = (mp)->m_next; \ + (mp)->m_next = (m); \ + (mp) = (m); \ + } \ } while (0) /* * result: IPv6 hbh dest1 rthdr dest2 payload @@ -800,14 +817,15 @@ skip_ipsec: #undef MAKE_CHAIN #if IPSEC - if (ip6obf.needipsec && (m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) + if (ip6obf.needipsec && (m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) { in6_delayed_cksum_offset(m, 0, optlen, nxt0); + } #endif /* IPSEC */ if (!TAILQ_EMPTY(&ipv6_filters) && !((flags & IPV6_OUTARGS) && (ip6oa->ip6oa_flags & IP6OAF_INTCOPROC_ALLOWED))) { - struct ipfilter *filter; + struct ipfilter *filter; int seen = (inject_filter_ref == NULL); int fixscope = 0; @@ -835,14 +853,15 @@ skip_ipsec: */ if (seen == 0) { if ((struct ipfilter *)inject_filter_ref == - filter) + filter) { seen = 1; + } } else if (filter->ipf_filter.ipf_output != NULL) { errno_t result; result = filter->ipf_filter.ipf_output( - filter->ipf_filter.cookie, - (mbuf_t *)&m, ippo); + filter->ipf_filter.cookie, + (mbuf_t *)&m, ippo); if (result == EJUSTRETURN) { ipf_unref(); m = NULL; @@ -858,8 +877,9 @@ skip_ipsec: ip6 = mtod(m, struct ip6_hdr *); /* Hack: cleanup embedded scope_id if we put it there */ - if (fixscope) + if (fixscope) { ip6->ip6_dst.s6_addr16[1] = 0; + } } #if IPSEC @@ -899,7 +919,7 @@ skip_ipsec: default: printf("ip6_output (ipsec): error code %d\n", error); - /* FALLTHRU */ + /* FALLTHRU */ case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -914,51 +934,10 @@ skip_ipsec: } #endif /* IPSEC */ - /* - * If there is a routing header, replace the destination address field - * with the first hop of the routing header. - */ + /* If there is a routing header, discard the packet. */ if (exthdrs.ip6e_rthdr != NULL) { - struct ip6_rthdr0 *rh0; - struct in6_addr *addr; - struct sockaddr_in6 sa; - - rh = (struct ip6_rthdr *) - (mtod(exthdrs.ip6e_rthdr, struct ip6_rthdr *)); - switch (rh->ip6r_type) { - case IPV6_RTHDR_TYPE_0: - rh0 = (struct ip6_rthdr0 *)rh; - addr = (struct in6_addr *)(void *)(rh0 + 1); - - /* - * construct a sockaddr_in6 form of - * the first hop. - * - * XXX: we may not have enough - * information about its scope zone; - * there is no standard API to pass - * the information from the - * application. - */ - bzero(&sa, sizeof (sa)); - sa.sin6_family = AF_INET6; - sa.sin6_len = sizeof (sa); - sa.sin6_addr = addr[0]; - if ((error = sa6_embedscope(&sa, - ip6_use_defzone)) != 0) { - goto bad; - } - ip6->ip6_dst = sa.sin6_addr; - bcopy(&addr[1], &addr[0], sizeof (struct in6_addr) * - (rh0->ip6r0_segleft - 1)); - addr[rh0->ip6r0_segleft - 1] = finaldst; - /* XXX */ - in6_clearscope(addr + rh0->ip6r0_segleft - 1); - break; - default: /* is it possible? */ - error = EINVAL; - goto bad; - } + error = EINVAL; + goto bad; } /* Source address validation */ @@ -981,15 +960,17 @@ skip_ipsec: */ if (ro == NULL) { ro = &ip6route; - bzero((caddr_t)ro, sizeof (*ro)); + bzero((caddr_t)ro, sizeof(*ro)); } ro_pmtu = ro; - if (opt != NULL && opt->ip6po_rthdr) + if (opt != NULL && opt->ip6po_rthdr) { ro = &opt->ip6po_route; + } dst = SIN6(&ro->ro_dst); - if (ro->ro_rt != NULL) + if (ro->ro_rt != NULL) { RT_LOCK_ASSERT_NOTHELD(ro->ro_rt); + } /* * if specified, try to fill in the traffic class field. * do not override if a non-zero value is already set. @@ -998,10 +979,12 @@ skip_ipsec: if (opt != NULL && opt->ip6po_tclass >= 0) { int mask = 0; - if ((ip6->ip6_flow & htonl(0xfc << 20)) == 0) + if ((ip6->ip6_flow & htonl(0xfc << 20)) == 0) { mask |= 0xfc; - if ((ip6->ip6_flow & htonl(0x03 << 20)) == 0) + } + if ((ip6->ip6_flow & htonl(0x03 << 20)) == 0) { mask |= 0x03; + } if (mask != 0) { ip6->ip6_flow |= htonl((opt->ip6po_tclass & mask) << 20); @@ -1030,13 +1013,14 @@ skip_ipsec: * next transmit. */ if (ROUTE_UNUSABLE(ro) || dst->sin6_family != AF_INET6 || - !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst)) + !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst)) { ROUTE_RELEASE(ro); + } if (ro->ro_rt == NULL) { - bzero(dst, sizeof (*dst)); + bzero(dst, sizeof(*dst)); dst->sin6_family = AF_INET6; - dst->sin6_len = sizeof (struct sockaddr_in6); + dst->sin6_len = sizeof(struct sockaddr_in6); dst->sin6_addr = ip6->ip6_dst; } #if IPSEC @@ -1052,12 +1036,12 @@ skip_ipsec: * * IPv6 [ESP|AH] IPv6 [extension headers] payload */ - bzero(&exthdrs, sizeof (exthdrs)); + bzero(&exthdrs, sizeof(exthdrs)); exthdrs.ip6e_ip6 = m; ipsec_state.m = m; route_copyout((struct route *)&ipsec_state.ro, (struct route *)ro, - sizeof (struct route_in6)); + sizeof(struct route_in6)); ipsec_state.dst = SA(dst); /* So that we can see packets inside the tunnel */ @@ -1088,7 +1072,7 @@ skip_ipsec: default: printf("ip6_output (ipsec): error code %d\n", error); - /* FALLTHRU */ + /* FALLTHRU */ case ENOENT: /* don't show these error codes to the user */ error = 0; @@ -1106,8 +1090,9 @@ skip_ipsec: ip6oa->ip6oa_flags &= ~IP6OAF_BOUND_IF; } if (opt != NULL && opt->ip6po_pktinfo != NULL) { - if (opt->ip6po_pktinfo->ipi6_ifindex != IFSCOPE_NONE) + if (opt->ip6po_pktinfo->ipi6_ifindex != IFSCOPE_NONE) { opt->ip6po_pktinfo->ipi6_ifindex = IFSCOPE_NONE; + } } exthdrs.ip6e_ip6 = m; } @@ -1125,14 +1110,14 @@ skip_ipsec: ip6 = mtod(m, struct ip6_hdr *); if (ip6obf.select_srcif) { - bzero(&src_sa, sizeof (src_sa)); + bzero(&src_sa, sizeof(src_sa)); src_sa.sin6_family = AF_INET6; - src_sa.sin6_len = sizeof (src_sa); + src_sa.sin6_len = sizeof(src_sa); src_sa.sin6_addr = ip6->ip6_src; } - bzero(&dst_sa, sizeof (dst_sa)); + bzero(&dst_sa, sizeof(dst_sa)); dst_sa.sin6_family = AF_INET6; - dst_sa.sin6_len = sizeof (dst_sa); + dst_sa.sin6_len = sizeof(dst_sa); dst_sa.sin6_addr = ip6->ip6_dst; /* @@ -1156,8 +1141,9 @@ skip_ipsec: default: break; /* XXX statistics? */ } - if (ifp != NULL) + if (ifp != NULL) { in6_ifstat_inc(ifp, ifs6_out_discard); + } /* ifp (if non-NULL) will be released at the end */ goto bad; } @@ -1168,7 +1154,7 @@ skip_ipsec: * If in6_selectroute() does not return a route entry, * dst may not have been updated. */ - *dst = dst_sa; /* XXX */ + *dst = dst_sa; /* XXX */ } #if NECP @@ -1190,8 +1176,9 @@ skip_ipsec: RT_LOCK(rt); if (ia == NULL) { ia = (struct in6_ifaddr *)(rt->rt_ifa); - if (ia != NULL) + if (ia != NULL) { IFA_ADDREF(&ia->ia_ifa); + } } rt->rt_use++; RT_UNLOCK(rt); @@ -1204,15 +1191,18 @@ skip_ipsec: * address of our own. */ if (ia != NULL && ia->ia_ifp) { - ifnet_reference(ia->ia_ifp); /* for origifp */ - if (origifp != NULL) + ifnet_reference(ia->ia_ifp); /* for origifp */ + if (origifp != NULL) { ifnet_release(origifp); + } origifp = ia->ia_ifp; } else { - if (ifp != NULL) - ifnet_reference(ifp); /* for origifp */ - if (origifp != NULL) + if (ifp != NULL) { + ifnet_reference(ifp); /* for origifp */ + } + if (origifp != NULL) { ifnet_release(origifp); + } origifp = ifp; } @@ -1222,27 +1212,31 @@ skip_ipsec: u_int32_t zone; src0 = ip6->ip6_src; - if (in6_setscope(&src0, origifp, &zone)) + if (in6_setscope(&src0, origifp, &zone)) { goto badscope; - bzero(&src_sa, sizeof (src_sa)); + } + bzero(&src_sa, sizeof(src_sa)); src_sa.sin6_family = AF_INET6; - src_sa.sin6_len = sizeof (src_sa); + src_sa.sin6_len = sizeof(src_sa); src_sa.sin6_addr = ip6->ip6_src; if ((sa6_recoverscope(&src_sa, TRUE) || - zone != src_sa.sin6_scope_id)) + zone != src_sa.sin6_scope_id)) { goto badscope; + } dst0 = ip6->ip6_dst; - if ((in6_setscope(&dst0, origifp, &zone))) + if ((in6_setscope(&dst0, origifp, &zone))) { goto badscope; + } /* re-initialize to be sure */ - bzero(&dst_sa, sizeof (dst_sa)); + bzero(&dst_sa, sizeof(dst_sa)); dst_sa.sin6_family = AF_INET6; - dst_sa.sin6_len = sizeof (dst_sa); + dst_sa.sin6_len = sizeof(dst_sa); dst_sa.sin6_addr = ip6->ip6_dst; if ((sa6_recoverscope(&dst_sa, TRUE) || - zone != dst_sa.sin6_scope_id)) + zone != dst_sa.sin6_scope_id)) { goto badscope; + } /* scope check is done. */ goto routefound; @@ -1250,8 +1244,9 @@ skip_ipsec: badscope: ip6stat.ip6s_badscope++; in6_ifstat_inc(origifp, ifs6_out_discard); - if (error == 0) + if (error == 0) { error = EHOSTUNREACH; /* XXX */ + } goto bad; } @@ -1277,12 +1272,13 @@ routefound: if (ia != NULL && (ifp->if_flags & IFF_LOOPBACK)) { uint32_t srcidx; - if (src_ia != NULL) + if (src_ia != NULL) { srcidx = src_ia->ia_ifp->if_index; - else if (ro->ro_srcia != NULL) + } else if (ro->ro_srcia != NULL) { srcidx = ro->ro_srcia->ifa_ifp->if_index; - else + } else { srcidx = 0; + } ip6_setsrcifaddr_info(m, srcidx, NULL); ip6_setdstifaddr_info(m, 0, ia); @@ -1292,7 +1288,7 @@ routefound: if (!IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { m->m_flags &= ~(M_BCAST | M_MCAST); /* just in case */ } else { - struct in6_multi *in6m; + struct in6_multi *in6m; m->m_flags = (m->m_flags & ~M_BCAST) | M_MCAST; in6_ifstat_inc_na(ifp, ifs6_out_mcast); @@ -1309,22 +1305,26 @@ routefound: in6_multihead_lock_shared(); IN6_LOOKUP_MULTI(&ip6->ip6_dst, ifp, in6m); in6_multihead_lock_done(); - if (im6o != NULL) + if (im6o != NULL) { IM6O_LOCK(im6o); + } if (in6m != NULL && (im6o == NULL || im6o->im6o_multicast_loop)) { - if (im6o != NULL) + if (im6o != NULL) { IM6O_UNLOCK(im6o); + } /* * If we belong to the destination multicast group * on the outgoing interface, and the caller did not * forbid loopback, loop back a copy. */ ip6_mloopback(NULL, ifp, m, dst, optlen, nxt0); - } else if (im6o != NULL) + } else if (im6o != NULL) { IM6O_UNLOCK(im6o); - if (in6m != NULL) + } + if (in6m != NULL) { IN6M_REMREF(in6m); + } /* * Multicasts with a hoplimit of zero may be looped back, * above, but must not be transmitted on a network. @@ -1336,8 +1336,9 @@ routefound: if (ip6->ip6_hlim == 0 || (ifp->if_flags & IFF_LOOPBACK) || IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst)) { /* remove m from the packetchain and continue looping */ - if (m != NULL) + if (m != NULL) { m_freem(m); + } m = NULL; goto evaluateloop; } @@ -1348,14 +1349,15 @@ routefound: * to increment per-interface statistics. */ if (ifpp != NULL && *ifpp == NULL) { - ifnet_reference(ifp); /* for caller */ + ifnet_reference(ifp); /* for caller */ *ifpp = ifp; } /* Determine path MTU. */ if ((error = ip6_getpmtu(ro_pmtu, ro, ifp, &finaldst, &mtu, - &alwaysfrag)) != 0) + &alwaysfrag)) != 0) { goto bad; + } /* * The caller of this function may specify to use the minimum MTU @@ -1397,8 +1399,9 @@ routefound: u_int32_t dummy; /* XXX unused */ uint32_t oplen = 0; /* for ip6_process_hopopts() */ #if DIAGNOSTIC - if ((hbh->ip6h_len + 1) << 3 > exthdrs.ip6e_hbh->m_len) + if ((hbh->ip6h_len + 1) << 3 > exthdrs.ip6e_hbh->m_len) { panic("ip6e_hbh is not continuous"); + } #endif /* * XXX: If we have to send an ICMPv6 error to the sender, @@ -1409,14 +1412,14 @@ routefound: m->m_flags |= M_LOOP; m->m_pkthdr.rcvif = ifp; if (ip6_process_hopopts(m, (u_int8_t *)(hbh + 1), - ((hbh->ip6h_len + 1) << 3) - sizeof (struct ip6_hbh), + ((hbh->ip6h_len + 1) << 3) - sizeof(struct ip6_hbh), &dummy, &oplen) < 0) { /* * m was already freed at this point. Set to NULL so it - * is not re-freed at end of ip6_output_list. + * is not re-freed at end of ip6_output_list. */ m = NULL; - error = EINVAL; /* better error? */ + error = EINVAL; /* better error? */ goto bad; } m->m_flags &= ~M_LOOP; /* XXX */ @@ -1437,8 +1440,9 @@ check_with_pf: args.fwa_m = m; args.fwa_oif = ifp; args.fwa_oflags = flags; - if (flags & IPV6_OUTARGS) + if (flags & IPV6_OUTARGS) { args.fwa_ip6oa = ip6oa; + } args.fwa_ro6 = ro; args.fwa_dst6 = dst; args.fwa_ro6_pmtu = ro_pmtu; @@ -1473,7 +1477,7 @@ check_with_pf: if (ip6oa != NULL) { u_int8_t dscp; - + dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT; error = set_packet_qos(m, ifp, @@ -1496,8 +1500,9 @@ check_with_pf: &exthdrs, ifp, mtu, alwaysfrag, unfragpartlen, ro_pmtu, nxt0, optlen); - if (error) + if (error) { goto bad; + } /* * The evaluateloop label is where we decide whether to continue looping over @@ -1521,8 +1526,9 @@ evaluateloop: } /* Fragmentation may mean m is a chain. Find the last packet. */ - while (m->m_nextpkt) + while (m->m_nextpkt) { m = m->m_nextpkt; + } sendchain_last = m; pktcnt++; } @@ -1567,8 +1573,9 @@ done: ROUTE_RELEASE(&ip6route); #if IPSEC ROUTE_RELEASE(&ipsec_state.ro); - if (sp != NULL) + if (sp != NULL) { key_freesp(sp, KEY_SADB_UNLOCKED); + } #endif /* IPSEC */ #if NECP ROUTE_RELEASE(&necp_route); @@ -1578,49 +1585,60 @@ done: ROUTE_RELEASE(&saved_ro_pmtu); #endif /* DUMMYNET */ - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - if (src_ia != NULL) + } + if (src_ia != NULL) { IFA_REMREF(&src_ia->ia_ifa); - if (ifp != NULL) + } + if (ifp != NULL) { ifnet_release(ifp); - if (origifp != NULL) + } + if (origifp != NULL) { ifnet_release(origifp); + } if (ip6_output_measure) { net_perf_measure_time(&net_perf, &start_tv, packets_processed); net_perf_histogram(&net_perf, packets_processed); } - return (error); + return error; freehdrs: if (exthdrs.ip6e_hbh != NULL) { - if (exthdrs.merged) + if (exthdrs.merged) { panic("Double free of ip6e_hbh"); + } m_freem(exthdrs.ip6e_hbh); } if (exthdrs.ip6e_dest1 != NULL) { - if (exthdrs.merged) + if (exthdrs.merged) { panic("Double free of ip6e_dest1"); + } m_freem(exthdrs.ip6e_dest1); } if (exthdrs.ip6e_rthdr != NULL) { - if (exthdrs.merged) + if (exthdrs.merged) { panic("Double free of ip6e_rthdr"); + } m_freem(exthdrs.ip6e_rthdr); } if (exthdrs.ip6e_dest2 != NULL) { - if (exthdrs.merged) + if (exthdrs.merged) { panic("Double free of ip6e_dest2"); + } m_freem(exthdrs.ip6e_dest2); } /* FALLTHRU */ bad: - if (inputchain != NULL) + if (inputchain != NULL) { m_freem_list(inputchain); - if (sendchain != NULL) + } + if (sendchain != NULL) { m_freem_list(sendchain); - if (m != NULL) + } + if (m != NULL) { m_freem(m); + } goto done; @@ -1653,9 +1671,9 @@ bad: static int ip6_fragment_packet(struct mbuf **mptr, struct ip6_pktopts *opt, - struct ip6_exthdrs *exthdrsp, struct ifnet *ifp, uint32_t mtu, - boolean_t alwaysfrag, uint32_t unfragpartlen, struct route_in6 *ro_pmtu, - int nxt0, uint32_t optlen) + struct ip6_exthdrs *exthdrsp, struct ifnet *ifp, uint32_t mtu, + boolean_t alwaysfrag, uint32_t unfragpartlen, struct route_in6 *ro_pmtu, + int nxt0, uint32_t optlen) { VERIFY(NULL != mptr); struct mbuf *m = *mptr; @@ -1670,20 +1688,20 @@ ip6_fragment_packet(struct mbuf **mptr, struct ip6_pktopts *opt, * from another interface; the packet would already have the * final checksum and we shouldn't recompute it. */ - if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID|CSUM_PARTIAL)) == - (CSUM_DATA_VALID|CSUM_PARTIAL)) { + if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) == + (CSUM_DATA_VALID | CSUM_PARTIAL)) { m->m_pkthdr.csum_flags &= ~CSUM_TX_FLAGS; m->m_pkthdr.csum_data = 0; } } - if (dontfrag && alwaysfrag) { /* case 4 */ + if (dontfrag && alwaysfrag) { /* case 4 */ /* conflicting request - can't transmit */ return EMSGSIZE; } /* Access without acquiring nd_ifinfo lock for performance */ - if (dontfrag && tlen > IN6_LINKMTU(ifp)) { /* case 2-b */ + if (dontfrag && tlen > IN6_LINKMTU(ifp)) { /* case 2-b */ /* * Even if the DONTFRAG option is specified, we cannot send the * packet when the data length is larger than the MTU of the @@ -1696,7 +1714,7 @@ ip6_fragment_packet(struct mbuf **mptr, struct ip6_pktopts *opt, struct ip6ctlparam ip6cp; mtu32 = (u_int32_t)mtu; - bzero(&ip6cp, sizeof (ip6cp)); + bzero(&ip6cp, sizeof(ip6cp)); ip6cp.ip6c_cmdarg = (void *)&mtu32; pfctlinput2(PRC_MSGSIZE, SA(&ro_pmtu->ro_dst), (void *)&ip6cp); return EMSGSIZE; @@ -1705,7 +1723,7 @@ ip6_fragment_packet(struct mbuf **mptr, struct ip6_pktopts *opt, /* * transmit packet without fragmentation */ - if (dontfrag || (!alwaysfrag && /* case 1-a and 2-a */ + if (dontfrag || (!alwaysfrag && /* case 1-a and 2-a */ (tlen <= mtu || TSO_IPV6_OK(ifp, m) || (ifp->if_hwassist & CSUM_FRAGMENT_IPV6)))) { /* @@ -1774,10 +1792,11 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, * Must be able to put at least 8 bytes per fragment. */ hlen = unfragpartlen; - if (mtu > IPV6_MAXPACKET) + if (mtu > IPV6_MAXPACKET) { mtu = IPV6_MAXPACKET; + } - len = (mtu - hlen - sizeof (struct ip6_frag)) & ~7; + len = (mtu - hlen - sizeof(struct ip6_frag)) & ~7; if (len < 8) { in6_ifstat_inc(ifp, ifs6_out_fragfail); return EMSGSIZE; @@ -1801,8 +1820,9 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, ip6->ip6_nxt = IPPROTO_FRAGMENT; } - if (morig->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA) + if (morig->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA) { in6_delayed_cksum_offset(morig, 0, optlen, nxt0); + } /* * Loop through length of segment after first fragment, @@ -1814,7 +1834,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, struct mbuf *new_m; struct mbuf *m_frgpart; - MGETHDR(new_m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(new_m, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (new_m == NULL) { error = ENOBUFS; ip6stat.ip6s_odropped++; @@ -1837,7 +1857,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, new_m->m_data += max_linkhdr; new_mhip6 = mtod(new_m, struct ip6_hdr *); *new_mhip6 = *ip6; - new_m->m_len = sizeof (*new_mhip6); + new_m->m_len = sizeof(*new_mhip6); error = ip6_insertfraghdr(morig, new_m, hlen, &ip6f); if (error) { @@ -1846,12 +1866,13 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, } ip6f->ip6f_offlg = htons((u_short)((off - hlen) & ~7)); - if (off + len >= tlen) + if (off + len >= tlen) { len = tlen - off; - else + } else { ip6f->ip6f_offlg |= IP6F_MORE_FRAG; + } new_mhip6->ip6_plen = htons((u_short)(len + hlen + - sizeof (*ip6f) - sizeof (struct ip6_hdr))); + sizeof(*ip6f) - sizeof(struct ip6_hdr))); if ((m_frgpart = m_copy(morig, off, len)) == NULL) { error = ENOBUFS; @@ -1859,7 +1880,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, break; } m_cat(new_m, m_frgpart); - new_m->m_pkthdr.len = len + hlen + sizeof (*ip6f); + new_m->m_pkthdr.len = len + hlen + sizeof(*ip6f); new_m->m_pkthdr.rcvif = NULL; M_COPY_CLASSIFIER(new_m, morig); @@ -1902,26 +1923,28 @@ ip6_copyexthdr(struct mbuf **mp, caddr_t hdr, int hlen) { struct mbuf *m; - if (hlen > MCLBYTES) - return (ENOBUFS); /* XXX */ - + if (hlen > MCLBYTES) { + return ENOBUFS; /* XXX */ + } MGET(m, M_DONTWAIT, MT_DATA); - if (m == NULL) - return (ENOBUFS); + if (m == NULL) { + return ENOBUFS; + } if (hlen > MLEN) { MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_free(m); - return (ENOBUFS); + return ENOBUFS; } } m->m_len = hlen; - if (hdr != NULL) + if (hdr != NULL) { bcopy(hdr, mtod(m, caddr_t), hlen); + } *mp = m; - return (0); + return 0; } static void @@ -1955,22 +1978,23 @@ uint32_t in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, int32_t nxt0, uint32_t csum_flags) { - unsigned char buf[sizeof (struct ip6_hdr)] __attribute__((aligned(8))); + unsigned char buf[sizeof(struct ip6_hdr)] __attribute__((aligned(8))); struct ip6_hdr *ip6; uint32_t offset, mlen, hlen, olen, sw_csum; uint16_t csum, ulpoff, plen; uint8_t nxt; - _CASSERT(sizeof (csum) == sizeof (uint16_t)); + _CASSERT(sizeof(csum) == sizeof(uint16_t)); VERIFY(m->m_flags & M_PKTHDR); sw_csum = (csum_flags & m->m_pkthdr.csum_flags); - if ((sw_csum &= CSUM_DELAY_IPV6_DATA) == 0) + if ((sw_csum &= CSUM_DELAY_IPV6_DATA) == 0) { goto done; + } - mlen = m->m_pkthdr.len; /* total mbuf len */ - hlen = sizeof (*ip6); /* IPv6 header len */ + mlen = m->m_pkthdr.len; /* total mbuf len */ + hlen = sizeof(*ip6); /* IPv6 header len */ /* sanity check (need at least IPv6 header) */ if (mlen < (hoff + hlen)) { @@ -2019,12 +2043,13 @@ in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, olen = 0; } else { /* caller supplied the original transport number; use it */ - if (nxt0 >= 0) + if (nxt0 >= 0) { nxt = nxt0; + } olen = optlen; } - offset = hoff + hlen + olen; /* ULP header */ + offset = hoff + hlen + olen; /* ULP header */ /* sanity check */ if (mlen < offset) { @@ -2038,8 +2063,8 @@ in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, * which is expected to contain the ULP offset; therefore * CSUM_PARTIAL offset adjustment must be undone. */ - if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL|CSUM_DATA_VALID)) == - (CSUM_PARTIAL|CSUM_DATA_VALID)) { + if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL | CSUM_DATA_VALID)) == + (CSUM_PARTIAL | CSUM_DATA_VALID)) { /* * Get back the original ULP offset (this will * undo the CSUM_PARTIAL logic in ip6_output.) @@ -2048,9 +2073,9 @@ in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, m->m_pkthdr.csum_tx_start); } - ulpoff = (m->m_pkthdr.csum_data & 0xffff); /* ULP csum offset */ + ulpoff = (m->m_pkthdr.csum_data & 0xffff); /* ULP csum offset */ - if (mlen < (ulpoff + sizeof (csum))) { + if (mlen < (ulpoff + sizeof(csum))) { panic("%s: mbuf %p pkt len (%u) proto %d invalid ULP " "cksum offset (%u) cksum flags 0x%x\n", __func__, m, mlen, nxt, ulpoff, m->m_pkthdr.csum_flags); @@ -2064,23 +2089,24 @@ in6_finalize_cksum(struct mbuf *m, uint32_t hoff, int32_t optlen, /* RFC1122 4.1.3.4 */ if (csum == 0 && - (m->m_pkthdr.csum_flags & (CSUM_UDPIPV6|CSUM_ZERO_INVERT))) + (m->m_pkthdr.csum_flags & (CSUM_UDPIPV6 | CSUM_ZERO_INVERT))) { csum = 0xffff; + } /* Insert the checksum in the ULP csum field */ offset += ulpoff; - if ((offset + sizeof (csum)) > m->m_len) { - m_copyback(m, offset, sizeof (csum), &csum); + if ((offset + sizeof(csum)) > m->m_len) { + m_copyback(m, offset, sizeof(csum), &csum); } else if (IP6_HDR_ALIGNED_P(mtod(m, char *) + hoff)) { *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum; } else { - bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum)); + bcopy(&csum, (mtod(m, char *) + offset), sizeof(csum)); } m->m_pkthdr.csum_flags &= ~(CSUM_DELAY_IPV6_DATA | CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_ZERO_INVERT); done: - return (sw_csum); + return sw_csum; } /* @@ -2093,7 +2119,7 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen) u_char *optbuf; u_int32_t v; -#define JUMBOOPTLEN 8 /* length of jumbo payload option and padding */ +#define JUMBOOPTLEN 8 /* length of jumbo payload option and padding */ /* * If there is no hop-by-hop options header, allocate new one. @@ -2103,11 +2129,12 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen) */ if (exthdrs->ip6e_hbh == NULL) { MGET(mopt, M_DONTWAIT, MT_DATA); - if (mopt == NULL) - return (ENOBUFS); + if (mopt == NULL) { + return ENOBUFS; + } mopt->m_len = JUMBOOPTLEN; optbuf = mtod(mopt, u_char *); - optbuf[1] = 0; /* = ((JUMBOOPTLEN) >> 3) - 1 */ + optbuf[1] = 0; /* = ((JUMBOOPTLEN) >> 3) - 1 */ exthdrs->ip6e_hbh = mopt; } else { struct ip6_hbh *hbh; @@ -2127,8 +2154,9 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen) * XXX: give up if the whole (new) hbh header does * not fit even in an mbuf cluster. */ - if (oldoptlen + JUMBOOPTLEN > MCLBYTES) - return (ENOBUFS); + if (oldoptlen + JUMBOOPTLEN > MCLBYTES) { + return ENOBUFS; + } /* * As a consequence, we must always prepare a cluster @@ -2142,8 +2170,9 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen) n = NULL; } } - if (n == NULL) - return (ENOBUFS); + if (n == NULL) { + return ENOBUFS; + } n->m_len = oldoptlen + JUMBOOPTLEN; bcopy(mtod(mopt, caddr_t), mtod(n, caddr_t), oldoptlen); @@ -2169,12 +2198,12 @@ ip6_insert_jumboopt(struct ip6_exthdrs *exthdrs, u_int32_t plen) optbuf[2] = IP6OPT_JUMBO; optbuf[3] = 4; v = (u_int32_t)htonl(plen + JUMBOOPTLEN); - bcopy(&v, &optbuf[4], sizeof (u_int32_t)); + bcopy(&v, &optbuf[4], sizeof(u_int32_t)); /* finally, adjust the packet header length */ exthdrs->ip6e_ip6->m_pkthdr.len += JUMBOOPTLEN; - return (0); + return 0; #undef JUMBOOPTLEN } @@ -2187,39 +2216,43 @@ ip6_insertfraghdr(struct mbuf *m0, struct mbuf *m, int hlen, { struct mbuf *n, *mlast; - if (hlen > sizeof (struct ip6_hdr)) { - n = m_copym(m0, sizeof (struct ip6_hdr), - hlen - sizeof (struct ip6_hdr), M_DONTWAIT); - if (n == NULL) - return (ENOBUFS); + if (hlen > sizeof(struct ip6_hdr)) { + n = m_copym(m0, sizeof(struct ip6_hdr), + hlen - sizeof(struct ip6_hdr), M_DONTWAIT); + if (n == NULL) { + return ENOBUFS; + } m->m_next = n; - } else + } else { n = m; + } /* Search for the last mbuf of unfragmentable part. */ - for (mlast = n; mlast->m_next; mlast = mlast->m_next) + for (mlast = n; mlast->m_next; mlast = mlast->m_next) { ; + } if (!(mlast->m_flags & M_EXT) && - M_TRAILINGSPACE(mlast) >= sizeof (struct ip6_frag)) { + M_TRAILINGSPACE(mlast) >= sizeof(struct ip6_frag)) { /* use the trailing space of the last mbuf for the frag hdr */ *frghdrp = (struct ip6_frag *)(mtod(mlast, caddr_t) + mlast->m_len); - mlast->m_len += sizeof (struct ip6_frag); - m->m_pkthdr.len += sizeof (struct ip6_frag); + mlast->m_len += sizeof(struct ip6_frag); + m->m_pkthdr.len += sizeof(struct ip6_frag); } else { /* allocate a new mbuf for the fragment header */ struct mbuf *mfrg; MGET(mfrg, M_DONTWAIT, MT_DATA); - if (mfrg == NULL) - return (ENOBUFS); - mfrg->m_len = sizeof (struct ip6_frag); + if (mfrg == NULL) { + return ENOBUFS; + } + mfrg->m_len = sizeof(struct ip6_frag); *frghdrp = mtod(mfrg, struct ip6_frag *); mlast->m_next = mfrg; } - return (0); + return 0; } static int @@ -2232,20 +2265,22 @@ ip6_getpmtu(struct route_in6 *ro_pmtu, struct route_in6 *ro, int error = 0; boolean_t is_local = FALSE; - if (IN6_IS_SCOPE_LINKLOCAL(dst)) + if (IN6_IS_SCOPE_LINKLOCAL(dst)) { is_local = TRUE; + } if (ro_pmtu != ro) { /* The first hop and the final destination may differ. */ struct sockaddr_in6 *sa6_dst = SIN6(&ro_pmtu->ro_dst); if (ROUTE_UNUSABLE(ro_pmtu) || - !IN6_ARE_ADDR_EQUAL(&sa6_dst->sin6_addr, dst)) + !IN6_ARE_ADDR_EQUAL(&sa6_dst->sin6_addr, dst)) { ROUTE_RELEASE(ro_pmtu); + } if (ro_pmtu->ro_rt == NULL) { - bzero(sa6_dst, sizeof (*sa6_dst)); + bzero(sa6_dst, sizeof(*sa6_dst)); sa6_dst->sin6_family = AF_INET6; - sa6_dst->sin6_len = sizeof (struct sockaddr_in6); + sa6_dst->sin6_len = sizeof(struct sockaddr_in6); sa6_dst->sin6_addr = *dst; rtalloc_scoped((struct route *)ro_pmtu, @@ -2256,8 +2291,9 @@ ip6_getpmtu(struct route_in6 *ro_pmtu, struct route_in6 *ro, if (ro_pmtu->ro_rt != NULL) { u_int32_t ifmtu; - if (ifp == NULL) + if (ifp == NULL) { ifp = ro_pmtu->ro_rt->rt_ifp; + } /* Access without acquiring nd_ifinfo lock for performance */ ifmtu = IN6_LINKMTU(ifp); @@ -2280,8 +2316,9 @@ ip6_getpmtu(struct route_in6 *ro_pmtu, struct route_in6 *ro, * this case happens with path MTU discovery timeouts. */ mtu = ifmtu; - if (!(ro_pmtu->ro_rt->rt_rmx.rmx_locks & RTV_MTU)) + if (!(ro_pmtu->ro_rt->rt_rmx.rmx_locks & RTV_MTU)) { ro_pmtu->ro_rt->rt_rmx.rmx_mtu = mtu; /* XXX */ + } } else if (mtu < IPV6_MMTU) { /* * RFC2460 section 5, last paragraph: @@ -2304,9 +2341,10 @@ ip6_getpmtu(struct route_in6 *ro_pmtu, struct route_in6 *ro, } *mtup = mtu; - if ((alwaysfragp != NULL) && !is_local) + if ((alwaysfragp != NULL) && !is_local) { *alwaysfragp = alwaysfrag; - return (error); + } + return error; } /* @@ -2344,11 +2382,13 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) struct mbuf *m; error = soopt_getm(sopt, &m); - if (error != 0) + if (error != 0) { break; + } error = soopt_mcopyin(sopt, m); - if (error != 0) + if (error != 0) { break; + } error = ip6_pcbopts(&in6p->in6p_outputopts, m, so, sopt); m_freem(m); @@ -2371,9 +2411,10 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_RECVHOPOPTS: case IPV6_RECVDSTOPTS: case IPV6_RECVRTHDRDSTOPTS: - if (!privileged) + if (!privileged) { break; - /* FALLTHROUGH */ + } + /* FALLTHROUGH */ case IPV6_UNICAST_HOPS: case IPV6_HOPLIMIT: case IPV6_RECVPKTINFO: @@ -2383,14 +2424,15 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_RECVTCLASS: case IPV6_V6ONLY: case IPV6_AUTOFLOWLABEL: - if (optlen != sizeof (int)) { + if (optlen != sizeof(int)) { error = EINVAL; break; } error = sooptcopyin(sopt, &optval, - sizeof (optval), sizeof (optval)); - if (error) + sizeof(optval), sizeof(optval)); + if (error) { break; + } switch (optname) { case IPV6_UNICAST_HOPS: @@ -2406,22 +2448,22 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) } } break; -#define OPTSET(bit) do { \ - if (optval) \ - in6p->inp_flags |= (bit); \ - else \ - in6p->inp_flags &= ~(bit); \ +#define OPTSET(bit) do { \ + if (optval) \ + in6p->inp_flags |= (bit); \ + else \ + in6p->inp_flags &= ~(bit); \ } while (0) -#define OPTSET2292(bit) do { \ - in6p->inp_flags |= IN6P_RFC2292; \ - if (optval) \ - in6p->inp_flags |= (bit); \ - else \ - in6p->inp_flags &= ~(bit); \ +#define OPTSET2292(bit) do { \ + in6p->inp_flags |= IN6P_RFC2292; \ + if (optval) \ + in6p->inp_flags |= (bit); \ + else \ + in6p->inp_flags &= ~(bit); \ } while (0) -#define OPTBIT(bit) (in6p->inp_flags & (bit) ? 1 : 0) +#define OPTBIT(bit) (in6p->inp_flags & (bit) ? 1 : 0) case IPV6_RECVPKTINFO: /* cannot mix with RFC2292 */ @@ -2442,7 +2484,7 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) } optp = &in6p->in6p_outputopts; error = ip6_pcbopt(IPV6_HOPLIMIT, - (u_char *)&optval, sizeof (optval), + (u_char *)&optval, sizeof(optval), optp, uproto); break; } @@ -2503,8 +2545,9 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) * (RFC3542 leaves this case * unspecified.) */ - if (uproto != IPPROTO_TCP) + if (uproto != IPPROTO_TCP) { OPTSET(IN6P_MTU); + } break; case IPV6_V6ONLY: @@ -2515,15 +2558,16 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) */ if (in6p->inp_lport || !IN6_IS_ADDR_UNSPECIFIED( - &in6p->in6p_laddr)) { + &in6p->in6p_laddr)) { error = EINVAL; break; } OPTSET(IN6P_IPV6_V6ONLY); - if (optval) + if (optval) { in6p->inp_vflag &= ~INP_IPV4; - else + } else { in6p->inp_vflag |= INP_IPV4; + } break; case IPV6_RECVTCLASS: @@ -2534,7 +2578,6 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_AUTOFLOWLABEL: OPTSET(IN6P_AUTOFLOWLABEL); break; - } break; @@ -2544,18 +2587,19 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_PREFER_TEMPADDR: { struct ip6_pktopts **optp; - if (optlen != sizeof (optval)) { + if (optlen != sizeof(optval)) { error = EINVAL; break; } error = sooptcopyin(sopt, &optval, - sizeof (optval), sizeof (optval)); - if (error) + sizeof(optval), sizeof(optval)); + if (error) { break; + } optp = &in6p->in6p_outputopts; error = ip6_pcbopt(optname, (u_char *)&optval, - sizeof (optval), optp, uproto); + sizeof(optval), optp, uproto); if (optname == IPV6_TCLASS) { // Add in the ECN flags @@ -2572,14 +2616,15 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_2292DSTOPTS: case IPV6_2292RTHDR: /* RFC 2292 */ - if (optlen != sizeof (int)) { + if (optlen != sizeof(int)) { error = EINVAL; break; } error = sooptcopyin(sopt, &optval, - sizeof (optval), sizeof (optval)); - if (error) + sizeof(optval), sizeof(optval)); + if (error) { break; + } switch (optname) { case IPV6_2292PKTINFO: OPTSET2292(IN6P_PKTINFO); @@ -2592,15 +2637,17 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) * Check super-user privilege. * See comments for IPV6_RECVHOPOPTS. */ - if (!privileged) - return (EPERM); + if (!privileged) { + return EPERM; + } OPTSET2292(IN6P_HOPOPTS); capture_exthdrstat_in = TRUE; break; case IPV6_2292DSTOPTS: - if (!privileged) - return (EPERM); - OPTSET2292(IN6P_DSTOPTS| + if (!privileged) { + return EPERM; + } + OPTSET2292(IN6P_DSTOPTS | IN6P_RTHDRDSTOPTS); /* XXX */ capture_exthdrstat_in = TRUE; break; @@ -2627,11 +2674,13 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) break; } error = soopt_getm(sopt, &m); - if (error != 0) + if (error != 0) { break; + } error = soopt_mcopyin(sopt, m); - if (error != 0) + if (error != 0) { break; + } optp = &in6p->in6p_outputopts; error = ip6_pcbopt(optname, mtod(m, u_char *), @@ -2657,9 +2706,10 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_PORTRANGE: error = sooptcopyin(sopt, &optval, - sizeof (optval), sizeof (optval)); - if (error) + sizeof(optval), sizeof(optval)); + if (error) { break; + } switch (optval) { case IPV6_PORTRANGE_DEFAULT: @@ -2688,15 +2738,17 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) size_t len = 0; struct mbuf *m; - if ((error = soopt_getm(sopt, &m)) != 0) + if ((error = soopt_getm(sopt, &m)) != 0) { break; - if ((error = soopt_mcopyin(sopt, m)) != 0) + } + if ((error = soopt_mcopyin(sopt, m)) != 0) { break; + } req = mtod(m, caddr_t); len = m->m_len; error = ipsec6_set_policy(in6p, optname, req, - len, privileged); + len, privileged); m_freem(m); break; } @@ -2713,10 +2765,11 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) } error = sooptcopyin(sopt, &optval, - sizeof (optval), sizeof (optval)); + sizeof(optval), sizeof(optval)); - if (error) + if (error) { break; + } error = inp_bindif(in6p, optval, NULL); break; @@ -2729,10 +2782,11 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) } error = sooptcopyin(sopt, &optval, - sizeof (optval), sizeof (optval)); + sizeof(optval), sizeof(optval)); - if (error) + if (error) { break; + } /* once set, it cannot be unset */ if (!optval && INP_NO_CELLULAR(in6p)) { @@ -2758,13 +2812,12 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) INC_ATOMIC_INT64_LIM(net_api_stats.nas_sock_inet6_stream_exthdr_in); } else if (uproto == IPPROTO_UDP) { INC_ATOMIC_INT64_LIM(net_api_stats.nas_sock_inet6_dgram_exthdr_in); - } - } + } + } break; case SOPT_GET: switch (optname) { - case IPV6_2292PKTOPTIONS: /* * RFC3542 (effectively) deprecated the @@ -2791,7 +2844,6 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_RECVTCLASS: case IPV6_AUTOFLOWLABEL: switch (optname) { - case IPV6_RECVHOPOPTS: optval = OPTBIT(IN6P_HOPOPTS); break; @@ -2831,12 +2883,13 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) case IPV6_PORTRANGE: { int flags; flags = in6p->inp_flags; - if (flags & INP_HIGHPORT) + if (flags & INP_HIGHPORT) { optval = IPV6_PORTRANGE_HIGH; - else if (flags & INP_LOWPORT) + } else if (flags & INP_LOWPORT) { optval = IPV6_PORTRANGE_LOW; - else + } else { optval = 0; + } break; } case IPV6_RECVTCLASS: @@ -2847,10 +2900,11 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) optval = OPTBIT(IN6P_AUTOFLOWLABEL); break; } - if (error) + if (error) { break; + } error = sooptcopyout(sopt, &optval, - sizeof (optval)); + sizeof(optval)); break; case IPV6_PATHMTU: { @@ -2858,10 +2912,11 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) struct ip6_mtuinfo mtuinfo; struct route_in6 sro; - bzero(&sro, sizeof (sro)); + bzero(&sro, sizeof(sro)); - if (!(so->so_state & SS_ISCONNECTED)) - return (ENOTCONN); + if (!(so->so_state & SS_ISCONNECTED)) { + return ENOTCONN; + } /* * XXX: we dot not consider the case of source * routing, or optional information to specify @@ -2870,15 +2925,17 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) error = ip6_getpmtu(&sro, NULL, NULL, &in6p->in6p_faddr, &pmtu, NULL); ROUTE_RELEASE(&sro); - if (error) + if (error) { break; - if (pmtu > IPV6_MAXPACKET) + } + if (pmtu > IPV6_MAXPACKET) { pmtu = IPV6_MAXPACKET; + } - bzero(&mtuinfo, sizeof (mtuinfo)); + bzero(&mtuinfo, sizeof(mtuinfo)); mtuinfo.ip6m_mtu = (u_int32_t)pmtu; optdata = (void *)&mtuinfo; - optdatalen = sizeof (mtuinfo); + optdatalen = sizeof(mtuinfo); error = sooptcopyout(sopt, optdata, optdatalen); break; @@ -2903,12 +2960,12 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) optval = OPTBIT(IN6P_RTHDR); break; case IPV6_2292DSTOPTS: - optval = OPTBIT(IN6P_DSTOPTS| + optval = OPTBIT(IN6P_DSTOPTS | IN6P_RTHDRDSTOPTS); break; } error = sooptcopyout(sopt, &optval, - sizeof (optval)); + sizeof(optval)); break; case IPV6_PKTINFO: @@ -2938,23 +2995,24 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) } #endif /* IPSEC */ case IPV6_BOUND_IF: - if (in6p->inp_flags & INP_BOUND_IF) + if (in6p->inp_flags & INP_BOUND_IF) { optval = in6p->inp_boundifp->if_index; + } error = sooptcopyout(sopt, &optval, - sizeof (optval)); + sizeof(optval)); break; case IPV6_NO_IFT_CELLULAR: optval = INP_NO_CELLULAR(in6p) ? 1 : 0; error = sooptcopyout(sopt, &optval, - sizeof (optval)); + sizeof(optval)); break; case IPV6_OUT_IF: optval = (in6p->in6p_last_outifp != NULL) ? in6p->in6p_last_outifp->if_index : 0; error = sooptcopyout(sopt, &optval, - sizeof (optval)); + sizeof(optval)); break; default: @@ -2968,7 +3026,7 @@ ip6_ctloutput(struct socket *so, struct sockopt *sopt) } else { error = EINVAL; } - return (error); + return error; } int @@ -2984,8 +3042,9 @@ ip6_raw_ctloutput(struct socket *so, struct sockopt *sopt) optname = sopt->sopt_name; optlen = sopt->sopt_valsize; - if (level != IPPROTO_IPV6) - return (EINVAL); + if (level != IPPROTO_IPV6) { + return EINVAL; + } switch (optname) { case IPV6_CHECKSUM: @@ -2999,32 +3058,35 @@ ip6_raw_ctloutput(struct socket *so, struct sockopt *sopt) */ switch (op) { case SOPT_SET: - if (optlen != sizeof (int)) { + if (optlen != sizeof(int)) { error = EINVAL; break; } - error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval)); - if (error) + error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval)); + if (error) { break; + } if ((optval % 2) != 0) { /* the API assumes even offset values */ error = EINVAL; } else if (SOCK_PROTO(so) == IPPROTO_ICMPV6) { - if (optval != icmp6off) + if (optval != icmp6off) { error = EINVAL; + } } else { in6p->in6p_cksum = optval; } break; case SOPT_GET: - if (SOCK_PROTO(so) == IPPROTO_ICMPV6) + if (SOCK_PROTO(so) == IPPROTO_ICMPV6) { optval = icmp6off; - else + } else { optval = in6p->in6p_cksum; + } - error = sooptcopyout(sopt, &optval, sizeof (optval)); + error = sooptcopyout(sopt, &optval, sizeof(optval)); break; default: @@ -3038,7 +3100,7 @@ ip6_raw_ctloutput(struct socket *so, struct sockopt *sopt) break; } - return (error); + return error; } /* @@ -3058,15 +3120,17 @@ ip6_pcbopts(struct ip6_pktopts **pktopt, struct mbuf *m, struct socket *so, #if DIAGNOSTIC if (opt->ip6po_pktinfo || opt->ip6po_nexthop || opt->ip6po_hbh || opt->ip6po_dest1 || opt->ip6po_dest2 || - opt->ip6po_rhinfo.ip6po_rhi_rthdr) + opt->ip6po_rhinfo.ip6po_rhi_rthdr) { printf("%s: all specified options are cleared.\n", __func__); + } #endif ip6_clearpktopts(opt, -1); } else { - opt = _MALLOC(sizeof (*opt), M_IP6OPT, M_WAITOK); - if (opt == NULL) - return (ENOBUFS); + opt = _MALLOC(sizeof(*opt), M_IP6OPT, M_WAITOK); + if (opt == NULL) { + return ENOBUFS; + } } *pktopt = NULL; @@ -3075,19 +3139,20 @@ ip6_pcbopts(struct ip6_pktopts **pktopt, struct mbuf *m, struct socket *so, * Only turning off any previous options, regardless of * whether the opt is just created or given. */ - if (opt != NULL) + if (opt != NULL) { FREE(opt, M_IP6OPT); - return (0); + } + return 0; } /* set options specified by user. */ if ((error = ip6_setpktopts(m, opt, NULL, SOCK_PROTO(so))) != 0) { ip6_clearpktopts(opt, -1); /* XXX: discard all options */ FREE(opt, M_IP6OPT); - return (error); + return error; } *pktopt = opt; - return (0); + return 0; } /* @@ -3097,10 +3162,9 @@ ip6_pcbopts(struct ip6_pktopts **pktopt, struct mbuf *m, struct socket *so, void ip6_initpktopts(struct ip6_pktopts *opt) { - - bzero(opt, sizeof (*opt)); - opt->ip6po_hlim = -1; /* -1 means default hop limit */ - opt->ip6po_tclass = -1; /* -1 means default traffic class */ + bzero(opt, sizeof(*opt)); + opt->ip6po_hlim = -1; /* -1 means default hop limit */ + opt->ip6po_tclass = -1; /* -1 means default traffic class */ opt->ip6po_minmtu = IP6PO_MINMTU_MCASTONLY; opt->ip6po_prefer_tempaddr = IP6PO_TEMPADDR_SYSTEM; } @@ -3113,14 +3177,15 @@ ip6_pcbopt(int optname, u_char *buf, int len, struct ip6_pktopts **pktopt, opt = *pktopt; if (opt == NULL) { - opt = _MALLOC(sizeof (*opt), M_IP6OPT, M_WAITOK); - if (opt == NULL) - return (ENOBUFS); + opt = _MALLOC(sizeof(*opt), M_IP6OPT, M_WAITOK); + if (opt == NULL) { + return ENOBUFS; + } ip6_initpktopts(opt); *pktopt = opt; } - return (ip6_setpktopt(optname, buf, len, opt, 1, 0, uproto)); + return ip6_setpktopt(optname, buf, len, opt, 1, 0, uproto); } static int @@ -3137,22 +3202,23 @@ ip6_getpcbopt(struct ip6_pktopts *pktopt, int optname, struct sockopt *sopt) switch (optname) { case IPV6_PKTINFO: - if (pktopt && pktopt->ip6po_pktinfo) + if (pktopt && pktopt->ip6po_pktinfo) { optdata = (void *)pktopt->ip6po_pktinfo; - else { + } else { /* XXX: we don't have to do this every time... */ - bzero(&null_pktinfo, sizeof (null_pktinfo)); + bzero(&null_pktinfo, sizeof(null_pktinfo)); optdata = (void *)&null_pktinfo; } - optdatalen = sizeof (struct in6_pktinfo); + optdatalen = sizeof(struct in6_pktinfo); break; case IPV6_TCLASS: - if (pktopt && pktopt->ip6po_tclass >= 0) + if (pktopt && pktopt->ip6po_tclass >= 0) { optdata = (void *)&pktopt->ip6po_tclass; - else + } else { optdata = (void *)&deftclass; - optdatalen = sizeof (int); + } + optdatalen = sizeof(int); break; case IPV6_HOPOPTS: @@ -3195,118 +3261,132 @@ ip6_getpcbopt(struct ip6_pktopts *pktopt, int optname, struct sockopt *sopt) break; case IPV6_USE_MIN_MTU: - if (pktopt) + if (pktopt) { optdata = (void *)&pktopt->ip6po_minmtu; - else + } else { optdata = (void *)&defminmtu; - optdatalen = sizeof (int); + } + optdatalen = sizeof(int); break; case IPV6_DONTFRAG: - if (pktopt && ((pktopt->ip6po_flags) & IP6PO_DONTFRAG)) + if (pktopt && ((pktopt->ip6po_flags) & IP6PO_DONTFRAG)) { on = 1; - else + } else { on = 0; + } optdata = (void *)&on; - optdatalen = sizeof (on); + optdatalen = sizeof(on); break; case IPV6_PREFER_TEMPADDR: - if (pktopt) + if (pktopt) { optdata = (void *)&pktopt->ip6po_prefer_tempaddr; - else + } else { optdata = (void *)&defpreftemp; - optdatalen = sizeof (int); + } + optdatalen = sizeof(int); break; - default: /* should not happen */ + default: /* should not happen */ #ifdef DIAGNOSTIC panic("ip6_getpcbopt: unexpected option\n"); #endif - return (ENOPROTOOPT); + return ENOPROTOOPT; } - return (sooptcopyout(sopt, optdata, optdatalen)); + return sooptcopyout(sopt, optdata, optdatalen); } void ip6_clearpktopts(struct ip6_pktopts *pktopt, int optname) { - if (pktopt == NULL) + if (pktopt == NULL) { return; + } if (optname == -1 || optname == IPV6_PKTINFO) { - if (pktopt->ip6po_pktinfo) + if (pktopt->ip6po_pktinfo) { FREE(pktopt->ip6po_pktinfo, M_IP6OPT); + } pktopt->ip6po_pktinfo = NULL; } - if (optname == -1 || optname == IPV6_HOPLIMIT) + if (optname == -1 || optname == IPV6_HOPLIMIT) { pktopt->ip6po_hlim = -1; - if (optname == -1 || optname == IPV6_TCLASS) + } + if (optname == -1 || optname == IPV6_TCLASS) { pktopt->ip6po_tclass = -1; + } if (optname == -1 || optname == IPV6_NEXTHOP) { ROUTE_RELEASE(&pktopt->ip6po_nextroute); - if (pktopt->ip6po_nexthop) + if (pktopt->ip6po_nexthop) { FREE(pktopt->ip6po_nexthop, M_IP6OPT); + } pktopt->ip6po_nexthop = NULL; } if (optname == -1 || optname == IPV6_HOPOPTS) { - if (pktopt->ip6po_hbh) + if (pktopt->ip6po_hbh) { FREE(pktopt->ip6po_hbh, M_IP6OPT); + } pktopt->ip6po_hbh = NULL; } if (optname == -1 || optname == IPV6_RTHDRDSTOPTS) { - if (pktopt->ip6po_dest1) + if (pktopt->ip6po_dest1) { FREE(pktopt->ip6po_dest1, M_IP6OPT); + } pktopt->ip6po_dest1 = NULL; } if (optname == -1 || optname == IPV6_RTHDR) { - if (pktopt->ip6po_rhinfo.ip6po_rhi_rthdr) + if (pktopt->ip6po_rhinfo.ip6po_rhi_rthdr) { FREE(pktopt->ip6po_rhinfo.ip6po_rhi_rthdr, M_IP6OPT); + } pktopt->ip6po_rhinfo.ip6po_rhi_rthdr = NULL; ROUTE_RELEASE(&pktopt->ip6po_route); } if (optname == -1 || optname == IPV6_DSTOPTS) { - if (pktopt->ip6po_dest2) + if (pktopt->ip6po_dest2) { FREE(pktopt->ip6po_dest2, M_IP6OPT); + } pktopt->ip6po_dest2 = NULL; } } -#define PKTOPT_EXTHDRCPY(type) do { \ - if (src->type) { \ - int hlen = \ - (((struct ip6_ext *)src->type)->ip6e_len + 1) << 3; \ - dst->type = _MALLOC(hlen, M_IP6OPT, canwait); \ - if (dst->type == NULL && canwait == M_NOWAIT) \ - goto bad; \ - bcopy(src->type, dst->type, hlen); \ - } \ +#define PKTOPT_EXTHDRCPY(type) do { \ + if (src->type) { \ + int hlen = \ + (((struct ip6_ext *)src->type)->ip6e_len + 1) << 3; \ + dst->type = _MALLOC(hlen, M_IP6OPT, canwait); \ + if (dst->type == NULL && canwait == M_NOWAIT) \ + goto bad; \ + bcopy(src->type, dst->type, hlen); \ + } \ } while (0) static int copypktopts(struct ip6_pktopts *dst, struct ip6_pktopts *src, int canwait) { - if (dst == NULL || src == NULL) { + if (dst == NULL || src == NULL) { printf("copypktopts: invalid argument\n"); - return (EINVAL); + return EINVAL; } dst->ip6po_hlim = src->ip6po_hlim; dst->ip6po_tclass = src->ip6po_tclass; dst->ip6po_flags = src->ip6po_flags; if (src->ip6po_pktinfo) { - dst->ip6po_pktinfo = _MALLOC(sizeof (*dst->ip6po_pktinfo), + dst->ip6po_pktinfo = _MALLOC(sizeof(*dst->ip6po_pktinfo), M_IP6OPT, canwait); - if (dst->ip6po_pktinfo == NULL && canwait == M_NOWAIT) + if (dst->ip6po_pktinfo == NULL && canwait == M_NOWAIT) { goto bad; + } *dst->ip6po_pktinfo = *src->ip6po_pktinfo; } if (src->ip6po_nexthop) { dst->ip6po_nexthop = _MALLOC(src->ip6po_nexthop->sa_len, M_IP6OPT, canwait); - if (dst->ip6po_nexthop == NULL && canwait == M_NOWAIT) + if (dst->ip6po_nexthop == NULL && canwait == M_NOWAIT) { goto bad; + } bcopy(src->ip6po_nexthop, dst->ip6po_nexthop, src->ip6po_nexthop->sa_len); } @@ -3314,11 +3394,11 @@ copypktopts(struct ip6_pktopts *dst, struct ip6_pktopts *src, int canwait) PKTOPT_EXTHDRCPY(ip6po_dest1); PKTOPT_EXTHDRCPY(ip6po_dest2); PKTOPT_EXTHDRCPY(ip6po_rthdr); /* not copy the cached route */ - return (0); + return 0; bad: ip6_clearpktopts(dst, -1); - return (ENOBUFS); + return ENOBUFS; } #undef PKTOPT_EXTHDRCPY @@ -3328,24 +3408,26 @@ ip6_copypktopts(struct ip6_pktopts *src, int canwait) int error; struct ip6_pktopts *dst; - dst = _MALLOC(sizeof (*dst), M_IP6OPT, canwait); - if (dst == NULL) - return (NULL); + dst = _MALLOC(sizeof(*dst), M_IP6OPT, canwait); + if (dst == NULL) { + return NULL; + } ip6_initpktopts(dst); if ((error = copypktopts(dst, src, canwait)) != 0) { FREE(dst, M_IP6OPT); - return (NULL); + return NULL; } - return (dst); + return dst; } void ip6_freepcbopts(struct ip6_pktopts *pktopt) { - if (pktopt == NULL) + if (pktopt == NULL) { return; + } ip6_clearpktopts(pktopt, -1); @@ -3355,10 +3437,10 @@ ip6_freepcbopts(struct ip6_pktopts *pktopt) void ip6_moptions_init(void) { - PE_parse_boot_argn("ifa_debug", &im6o_debug, sizeof (im6o_debug)); + PE_parse_boot_argn("ifa_debug", &im6o_debug, sizeof(im6o_debug)); - im6o_size = (im6o_debug == 0) ? sizeof (struct ip6_moptions) : - sizeof (struct ip6_moptions_dbg); + im6o_size = (im6o_debug == 0) ? sizeof(struct ip6_moptions) : + sizeof(struct ip6_moptions_dbg); im6o_zone = zinit(im6o_size, IM6O_ZONE_MAX * im6o_size, 0, IM6O_ZONE_NAME); @@ -3372,10 +3454,11 @@ ip6_moptions_init(void) void im6o_addref(struct ip6_moptions *im6o, int locked) { - if (!locked) + if (!locked) { IM6O_LOCK(im6o); - else + } else { IM6O_LOCK_ASSERT_HELD(im6o); + } if (++im6o->im6o_refcnt == 0) { panic("%s: im6o %p wraparound refcnt\n", __func__, im6o); @@ -3384,8 +3467,9 @@ im6o_addref(struct ip6_moptions *im6o, int locked) (*im6o->im6o_trace)(im6o, TRUE); } - if (!locked) + if (!locked) { IM6O_UNLOCK(im6o); + } } void @@ -3411,13 +3495,15 @@ im6o_remref(struct ip6_moptions *im6o) struct in6_mfilter *imf; imf = im6o->im6o_mfilters ? &im6o->im6o_mfilters[i] : NULL; - if (imf != NULL) + if (imf != NULL) { im6f_leave(imf); + } (void) in6_mc_leave(im6o->im6o_membership[i], imf); - if (imf != NULL) + if (imf != NULL) { im6f_purge(imf); + } IN6M_REMREF(im6o->im6o_membership[i]); im6o->im6o_membership[i] = NULL; @@ -3484,7 +3570,7 @@ ip6_allocmoptions(int how) IM6O_ADDREF(im6o); } - return (im6o); + return im6o; } /* @@ -3496,8 +3582,9 @@ ip6_setpktopts(struct mbuf *control, struct ip6_pktopts *opt, { struct cmsghdr *cm = NULL; - if (control == NULL || opt == NULL) - return (EINVAL); + if (control == NULL || opt == NULL) { + return EINVAL; + } ip6_initpktopts(opt); if (stickyopt) { @@ -3512,37 +3599,40 @@ ip6_setpktopts(struct mbuf *control, struct ip6_pktopts *opt, * but we can allow this since this option should be rarely * used. */ - if ((error = copypktopts(opt, stickyopt, M_NOWAIT)) != 0) - return (error); + if ((error = copypktopts(opt, stickyopt, M_NOWAIT)) != 0) { + return error; + } } /* * XXX: Currently, we assume all the optional information is stored * in a single mbuf. */ - if (control->m_next) - return (EINVAL); + if (control->m_next) { + return EINVAL; + } - if (control->m_len < CMSG_LEN(0)) - return (EINVAL); + if (control->m_len < CMSG_LEN(0)) { + return EINVAL; + } - for (cm = M_FIRST_CMSGHDR(control); cm != NULL; + for (cm = M_FIRST_CMSGHDR(control); + is_cmsg_valid(control, cm); cm = M_NXT_CMSGHDR(control, cm)) { int error; - if (cm->cmsg_len < sizeof (struct cmsghdr) || - cm->cmsg_len > control->m_len) - return (EINVAL); - if (cm->cmsg_level != IPPROTO_IPV6) + if (cm->cmsg_level != IPPROTO_IPV6) { continue; + } error = ip6_setpktopt(cm->cmsg_type, CMSG_DATA(cm), cm->cmsg_len - CMSG_LEN(0), opt, 0, 1, uproto); - if (error) - return (error); + if (error) { + return error; + } } - return (0); + return 0; } /* * Set a particular packet option, as a sticky option or an ancillary data @@ -3565,14 +3655,14 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, #ifdef DIAGNOSTIC printf("ip6_setpktopt: impossible case\n"); #endif - return (EINVAL); + return EINVAL; } /* * Caller must have ensured that the buffer is at least * aligned on 32-bit boundary. */ - VERIFY(IS_P2ALIGNED(buf, sizeof (u_int32_t))); + VERIFY(IS_P2ALIGNED(buf, sizeof(u_int32_t))); /* * IPV6_2292xxx is for backward compatibility to RFC2292, and should @@ -3588,7 +3678,7 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, case IPV6_2292DSTOPTS: case IPV6_2292RTHDR: case IPV6_2292PKTOPTIONS: - return (ENOPROTOOPT); + return ENOPROTOOPT; } } if (sticky && cmsg) { @@ -3604,7 +3694,7 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, case IPV6_DONTFRAG: case IPV6_TCLASS: case IPV6_PREFER_TEMPADDR: /* XXX: not an RFC3542 option */ - return (ENOPROTOOPT); + return ENOPROTOOPT; } } @@ -3614,8 +3704,9 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, struct ifnet *ifp = NULL; struct in6_pktinfo *pktinfo; - if (len != sizeof (struct in6_pktinfo)) - return (EINVAL); + if (len != sizeof(struct in6_pktinfo)) { + return EINVAL; + } pktinfo = (struct in6_pktinfo *)(void *)buf; @@ -3634,7 +3725,7 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, if (uproto == IPPROTO_TCP && optname == IPV6_PKTINFO && sticky && !IN6_IS_ADDR_UNSPECIFIED(&pktinfo->ipi6_addr)) { - return (EINVAL); + return EINVAL; } /* validate the interface index if specified. */ @@ -3642,14 +3733,14 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, if (pktinfo->ipi6_ifindex > if_index) { ifnet_head_done(); - return (ENXIO); + return ENXIO; } if (pktinfo->ipi6_ifindex) { ifp = ifindex2ifnet[pktinfo->ipi6_ifindex]; if (ifp == NULL) { ifnet_head_done(); - return (ENXIO); + return ENXIO; } } @@ -3666,12 +3757,13 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, * application when it is used as a sticky option. */ if (opt->ip6po_pktinfo == NULL) { - opt->ip6po_pktinfo = _MALLOC(sizeof (*pktinfo), + opt->ip6po_pktinfo = _MALLOC(sizeof(*pktinfo), M_IP6OPT, M_NOWAIT); - if (opt->ip6po_pktinfo == NULL) - return (ENOBUFS); + if (opt->ip6po_pktinfo == NULL) { + return ENOBUFS; + } } - bcopy(pktinfo, opt->ip6po_pktinfo, sizeof (*pktinfo)); + bcopy(pktinfo, opt->ip6po_pktinfo, sizeof(*pktinfo)); break; } @@ -3683,14 +3775,17 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, * RFC 3542 deprecated the usage of sticky IPV6_HOPLIMIT * to simplify the ordering among hoplimit options. */ - if (optname == IPV6_HOPLIMIT && sticky) - return (ENOPROTOOPT); + if (optname == IPV6_HOPLIMIT && sticky) { + return ENOPROTOOPT; + } - if (len != sizeof (int)) - return (EINVAL); + if (len != sizeof(int)) { + return EINVAL; + } hlimp = (int *)(void *)buf; - if (*hlimp < -1 || *hlimp > 255) - return (EINVAL); + if (*hlimp < -1 || *hlimp > 255) { + return EINVAL; + } opt->ip6po_hlim = *hlimp; break; @@ -3699,11 +3794,13 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, case IPV6_TCLASS: { int tclass; - if (len != sizeof (int)) - return (EINVAL); + if (len != sizeof(int)) { + return EINVAL; + } tclass = *(int *)(void *)buf; - if (tclass < -1 || tclass > 255) - return (EINVAL); + if (tclass < -1 || tclass > 255) { + return EINVAL; + } opt->ip6po_tclass = tclass; break; @@ -3712,45 +3809,49 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, case IPV6_2292NEXTHOP: case IPV6_NEXTHOP: error = suser(kauth_cred_get(), 0); - if (error) - return (EACCES); + if (error) { + return EACCES; + } - if (len == 0) { /* just remove the option */ + if (len == 0) { /* just remove the option */ ip6_clearpktopts(opt, IPV6_NEXTHOP); break; } /* check if cmsg_len is large enough for sa_len */ - if (len < sizeof (struct sockaddr) || len < *buf) - return (EINVAL); + if (len < sizeof(struct sockaddr) || len < *buf) { + return EINVAL; + } switch (SA(buf)->sa_family) { case AF_INET6: { struct sockaddr_in6 *sa6 = SIN6(buf); - if (sa6->sin6_len != sizeof (struct sockaddr_in6)) - return (EINVAL); + if (sa6->sin6_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } if (IN6_IS_ADDR_UNSPECIFIED(&sa6->sin6_addr) || IN6_IS_ADDR_MULTICAST(&sa6->sin6_addr)) { - return (EINVAL); + return EINVAL; } if ((error = sa6_embedscope(sa6, ip6_use_defzone)) != 0) { - return (error); + return error; } break; } - case AF_LINK: /* should eventually be supported */ + case AF_LINK: /* should eventually be supported */ default: - return (EAFNOSUPPORT); + return EAFNOSUPPORT; } /* turn off the previous option, then set the new option. */ ip6_clearpktopts(opt, IPV6_NEXTHOP); opt->ip6po_nexthop = _MALLOC(*buf, M_IP6OPT, M_NOWAIT); - if (opt->ip6po_nexthop == NULL) - return (ENOBUFS); + if (opt->ip6po_nexthop == NULL) { + return ENOBUFS; + } bcopy(buf, opt->ip6po_nexthop, *buf); break; @@ -3765,27 +3866,31 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, * overhead. */ error = suser(kauth_cred_get(), 0); - if (error) - return (EACCES); + if (error) { + return EACCES; + } if (len == 0) { ip6_clearpktopts(opt, IPV6_HOPOPTS); - break; /* just remove the option */ + break; /* just remove the option */ } /* message length validation */ - if (len < sizeof (struct ip6_hbh)) - return (EINVAL); + if (len < sizeof(struct ip6_hbh)) { + return EINVAL; + } hbh = (struct ip6_hbh *)(void *)buf; hbhlen = (hbh->ip6h_len + 1) << 3; - if (len != hbhlen) - return (EINVAL); + if (len != hbhlen) { + return EINVAL; + } /* turn off the previous option, then set the new option. */ ip6_clearpktopts(opt, IPV6_HOPOPTS); opt->ip6po_hbh = _MALLOC(hbhlen, M_IP6OPT, M_NOWAIT); - if (opt->ip6po_hbh == NULL) - return (ENOBUFS); + if (opt->ip6po_hbh == NULL) { + return ENOBUFS; + } bcopy(hbh, opt->ip6po_hbh, hbhlen); capture_exthdrstat_out = TRUE; break; @@ -3798,21 +3903,24 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, int destlen; error = suser(kauth_cred_get(), 0); - if (error) - return (EACCES); + if (error) { + return EACCES; + } if (len == 0) { ip6_clearpktopts(opt, optname); - break; /* just remove the option */ + break; /* just remove the option */ } /* message length validation */ - if (len < sizeof (struct ip6_dest)) - return (EINVAL); + if (len < sizeof(struct ip6_dest)) { + return EINVAL; + } dest = (struct ip6_dest *)(void *)buf; destlen = (dest->ip6d_len + 1) << 3; - if (len != destlen) - return (EINVAL); + if (len != destlen) { + return EINVAL; + } /* * Determine the position that the destination options header @@ -3832,10 +3940,11 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, * RFC3542 solved the ambiguity by introducing * separate ancillary data or option types. */ - if (opt->ip6po_rthdr == NULL) + if (opt->ip6po_rthdr == NULL) { newdest = &opt->ip6po_dest1; - else + } else { newdest = &opt->ip6po_dest2; + } break; case IPV6_RTHDRDSTOPTS: newdest = &opt->ip6po_dest1; @@ -3848,8 +3957,9 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, /* turn off the previous option, then set the new option. */ ip6_clearpktopts(opt, optname); *newdest = _MALLOC(destlen, M_IP6OPT, M_NOWAIT); - if (*newdest == NULL) - return (ENOBUFS); + if (*newdest == NULL) { + return ENOBUFS; + } bcopy(dest, *newdest, destlen); capture_exthdrstat_out = TRUE; break; @@ -3862,55 +3972,63 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, if (len == 0) { ip6_clearpktopts(opt, IPV6_RTHDR); - break; /* just remove the option */ + break; /* just remove the option */ } /* message length validation */ - if (len < sizeof (struct ip6_rthdr)) - return (EINVAL); + if (len < sizeof(struct ip6_rthdr)) { + return EINVAL; + } rth = (struct ip6_rthdr *)(void *)buf; rthlen = (rth->ip6r_len + 1) << 3; - if (len != rthlen) - return (EINVAL); + if (len != rthlen) { + return EINVAL; + } switch (rth->ip6r_type) { case IPV6_RTHDR_TYPE_0: - if (rth->ip6r_len == 0) /* must contain one addr */ - return (EINVAL); - if (rth->ip6r_len % 2) /* length must be even */ - return (EINVAL); - if (rth->ip6r_len / 2 != rth->ip6r_segleft) - return (EINVAL); + if (rth->ip6r_len == 0) { /* must contain one addr */ + return EINVAL; + } + if (rth->ip6r_len % 2) { /* length must be even */ + return EINVAL; + } + if (rth->ip6r_len / 2 != rth->ip6r_segleft) { + return EINVAL; + } break; default: - return (EINVAL); /* not supported */ + return EINVAL; /* not supported */ } /* turn off the previous option */ ip6_clearpktopts(opt, IPV6_RTHDR); opt->ip6po_rthdr = _MALLOC(rthlen, M_IP6OPT, M_NOWAIT); - if (opt->ip6po_rthdr == NULL) - return (ENOBUFS); + if (opt->ip6po_rthdr == NULL) { + return ENOBUFS; + } bcopy(rth, opt->ip6po_rthdr, rthlen); capture_exthdrstat_out = TRUE; break; } case IPV6_USE_MIN_MTU: - if (len != sizeof (int)) - return (EINVAL); + if (len != sizeof(int)) { + return EINVAL; + } minmtupolicy = *(int *)(void *)buf; if (minmtupolicy != IP6PO_MINMTU_MCASTONLY && minmtupolicy != IP6PO_MINMTU_DISABLE && minmtupolicy != IP6PO_MINMTU_ALL) { - return (EINVAL); + return EINVAL; } opt->ip6po_minmtu = minmtupolicy; break; case IPV6_DONTFRAG: - if (len != sizeof (int)) - return (EINVAL); + if (len != sizeof(int)) { + return EINVAL; + } if (uproto == IPPROTO_TCP || *(int *)(void *)buf == 0) { /* @@ -3924,19 +4042,20 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, break; case IPV6_PREFER_TEMPADDR: - if (len != sizeof (int)) - return (EINVAL); + if (len != sizeof(int)) { + return EINVAL; + } preftemp = *(int *)(void *)buf; if (preftemp != IP6PO_TEMPADDR_SYSTEM && preftemp != IP6PO_TEMPADDR_NOTPREFER && preftemp != IP6PO_TEMPADDR_PREFER) { - return (EINVAL); + return EINVAL; } opt->ip6po_prefer_tempaddr = preftemp; break; default: - return (ENOPROTOOPT); + return ENOPROTOOPT; } /* end of switch */ if (capture_exthdrstat_out) { @@ -3947,7 +4066,7 @@ ip6_setpktopt(int optname, u_char *buf, int len, struct ip6_pktopts *opt, } } - return (0); + return 0; } /* @@ -3964,8 +4083,9 @@ ip6_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, struct ip6_hdr *ip6; struct in6_addr src; - if (lo_ifp == NULL) + if (lo_ifp == NULL) { return; + } /* * Copy the packet header as it's needed for the checksum. @@ -3975,11 +4095,13 @@ ip6_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, */ copym = m_copym_mode(m, 0, M_COPYALL, M_DONTWAIT, M_COPYM_COPY_HDR); if (copym != NULL && ((copym->m_flags & M_EXT) || - copym->m_len < sizeof (struct ip6_hdr))) - copym = m_pullup(copym, sizeof (struct ip6_hdr)); + copym->m_len < sizeof(struct ip6_hdr))) { + copym = m_pullup(copym, sizeof(struct ip6_hdr)); + } - if (copym == NULL) + if (copym == NULL) { return; + } ip6 = mtod(copym, struct ip6_hdr *); src = ip6->ip6_src; @@ -3990,8 +4112,9 @@ ip6_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, in6_clearscope(&ip6->ip6_src); in6_clearscope(&ip6->ip6_dst); - if (copym->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA) + if (copym->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA) { in6_delayed_cksum_offset(copym, 0, optlen, nxt0); + } /* * Stuff the 'real' ifp into the pkthdr, to be used in matching @@ -4021,8 +4144,9 @@ ip6_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m, } lck_rw_done(&in6_ifaddr_rwlock); } - if (srcifp != NULL) + if (srcifp != NULL) { ip6_setsrcifaddr_info(copym, srcifp->if_index, NULL); + } ip6_setdstifaddr_info(copym, origifp->if_index, NULL); dlil_output(lo_ifp, PF_INET6, copym, NULL, SA(dst), 0, NULL); @@ -4038,24 +4162,24 @@ ip6_splithdr(struct mbuf *m, struct ip6_exthdrs *exthdrs) struct ip6_hdr *ip6; ip6 = mtod(m, struct ip6_hdr *); - if (m->m_len > sizeof (*ip6)) { - MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + if (m->m_len > sizeof(*ip6)) { + MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (mh == NULL) { m_freem(m); - return (ENOBUFS); + return ENOBUFS; } M_COPY_PKTHDR(mh, m); - MH_ALIGN(mh, sizeof (*ip6)); + MH_ALIGN(mh, sizeof(*ip6)); m->m_flags &= ~M_PKTHDR; - m->m_len -= sizeof (*ip6); - m->m_data += sizeof (*ip6); + m->m_len -= sizeof(*ip6); + m->m_data += sizeof(*ip6); mh->m_next = m; m = mh; - m->m_len = sizeof (*ip6); - bcopy((caddr_t)ip6, mtod(m, caddr_t), sizeof (*ip6)); + m->m_len = sizeof(*ip6); + bcopy((caddr_t)ip6, mtod(m, caddr_t), sizeof(*ip6)); } exthdrs->ip6e_ip6 = m; - return (0); + return 0; } static void @@ -4090,7 +4214,7 @@ ip6_output_checksum(struct ifnet *ifp, uint32_t mtu, struct mbuf *m, ((hwcap & CSUM_ZERO_INVERT) && (m->m_pkthdr.csum_flags & CSUM_ZERO_INVERT))) && tlen <= mtu) { - uint16_t start = sizeof (struct ip6_hdr); + uint16_t start = sizeof(struct ip6_hdr); uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff; m->m_pkthdr.csum_flags |= @@ -4132,12 +4256,13 @@ ip6_optlen(struct in6pcb *in6p) { int len; - if (!in6p->in6p_outputopts) - return (0); + if (!in6p->in6p_outputopts) { + return 0; + } len = 0; -#define elen(x) \ - (((struct ip6_ext *)(x)) ? \ +#define elen(x) \ + (((struct ip6_ext *)(x)) ? \ (((struct ip6_ext *)(x))->ip6e_len + 1) << 3 : 0) len += elen(in6p->in6p_outputopts->ip6po_hbh); @@ -4147,7 +4272,7 @@ ip6_optlen(struct in6pcb *in6p) } len += elen(in6p->in6p_outputopts->ip6po_rthdr); len += elen(in6p->in6p_outputopts->ip6po_dest2); - return (len); + return len; #undef elen } @@ -4159,8 +4284,9 @@ sysctl_reset_ip6_output_stats SYSCTL_HANDLER_ARGS i = ip6_output_measure; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* impose bounds */ if (i < 0 || i > 1) { error = EINVAL; @@ -4171,7 +4297,7 @@ sysctl_reset_ip6_output_stats SYSCTL_HANDLER_ARGS } ip6_output_measure = i; done: - return (error); + return error; } static int @@ -4183,8 +4309,9 @@ sysctl_ip6_output_measure_bins SYSCTL_HANDLER_ARGS i = ip6_output_measure_bins; error = sysctl_handle_quad(oidp, &i, 0, req); - if (error || req->newptr == USER_ADDR_NULL) + if (error || req->newptr == USER_ADDR_NULL) { goto done; + } /* validate data */ if (!net_perf_validate_bins(i)) { error = EINVAL; @@ -4192,15 +4319,16 @@ sysctl_ip6_output_measure_bins SYSCTL_HANDLER_ARGS } ip6_output_measure_bins = i; done: - return (error); + return error; } static int sysctl_ip6_output_getperf SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - if (req->oldptr == USER_ADDR_NULL) - req->oldlen = (size_t)sizeof (struct ipstat); + if (req->oldptr == USER_ADDR_NULL) { + req->oldlen = (size_t)sizeof(struct ipstat); + } - return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen))); + return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen)); } diff --git a/bsd/netinet6/ip6_var.h b/bsd/netinet6/ip6_var.h index 23c510710..b0c1b84cb 100644 --- a/bsd/netinet6/ip6_var.h +++ b/bsd/netinet6/ip6_var.h @@ -91,7 +91,7 @@ */ #ifndef _NETINET6_IP6_VAR_H_ -#define _NETINET6_IP6_VAR_H_ +#define _NETINET6_IP6_VAR_H_ #include #ifdef BSD_KERNEL_PRIVATE @@ -101,79 +101,79 @@ * IP6 reassembly queue structure. Each fragment * being reassembled is attached to one of these structures. */ -struct ip6q { +struct ip6q { struct ip6asfrag *ip6q_down; struct ip6asfrag *ip6q_up; - u_int32_t ip6q_ident; - u_int8_t ip6q_nxt; - u_int8_t ip6q_ecn; - u_int8_t ip6q_ttl; + u_int32_t ip6q_ident; + u_int8_t ip6q_nxt; + u_int8_t ip6q_ecn; + u_int8_t ip6q_ttl; struct in6_addr ip6q_src, ip6q_dst; - struct ip6q *ip6q_next; - struct ip6q *ip6q_prev; - int ip6q_unfrglen; /* len of unfragmentable part */ + struct ip6q *ip6q_next; + struct ip6q *ip6q_prev; + int ip6q_unfrglen; /* len of unfragmentable part */ #ifdef notyet - u_char *ip6q_nxtp; + u_char *ip6q_nxtp; #endif - int ip6q_nfrag; /* # of fragments */ - uint32_t ip6q_csum_flags; /* checksum flags */ - uint32_t ip6q_csum; /* partial checksum value */ + int ip6q_nfrag; /* # of fragments */ + uint32_t ip6q_csum_flags; /* checksum flags */ + uint32_t ip6q_csum; /* partial checksum value */ }; -struct ip6asfrag { +struct ip6asfrag { struct ip6asfrag *ip6af_down; struct ip6asfrag *ip6af_up; - struct mbuf *ip6af_m; - int ip6af_offset; /* offset in ip6af_m to next header */ - int ip6af_frglen; /* fragmentable part length */ - int ip6af_off; /* fragment offset */ - u_int16_t ip6af_mff; /* more fragment bit in frag off */ + struct mbuf *ip6af_m; + int ip6af_offset; /* offset in ip6af_m to next header */ + int ip6af_frglen; /* fragmentable part length */ + int ip6af_off; /* fragment offset */ + u_int16_t ip6af_mff; /* more fragment bit in frag off */ }; -#define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m)) +#define IP6_REASS_MBUF(ip6af) (*(struct mbuf **)&((ip6af)->ip6af_m)) -struct ip6_moptions { +struct ip6_moptions { decl_lck_mtx_data(, im6o_lock); - uint32_t im6o_refcnt; /* ref count */ - uint32_t im6o_debug; /* see ifa_debug flags */ - struct ifnet *im6o_multicast_ifp; /* ifp for outgoing multicasts */ - u_char im6o_multicast_hlim; /* hoplimit for outgoing multicasts */ - u_char im6o_multicast_loop; /* 1 >= hear sends if a member */ - u_short im6o_num_memberships; /* no. memberships this socket */ - u_short im6o_max_memberships; /* max memberships this socket */ - struct in6_multi **im6o_membership; /* group memberships */ - struct in6_mfilter *im6o_mfilters; /* source filters */ - void (*im6o_trace) /* callback fn for tracing refs */ - (struct ip6_moptions *, int); + uint32_t im6o_refcnt; /* ref count */ + uint32_t im6o_debug; /* see ifa_debug flags */ + struct ifnet *im6o_multicast_ifp; /* ifp for outgoing multicasts */ + u_char im6o_multicast_hlim; /* hoplimit for outgoing multicasts */ + u_char im6o_multicast_loop; /* 1 >= hear sends if a member */ + u_short im6o_num_memberships; /* no. memberships this socket */ + u_short im6o_max_memberships; /* max memberships this socket */ + struct in6_multi **im6o_membership; /* group memberships */ + struct in6_mfilter *im6o_mfilters; /* source filters */ + void (*im6o_trace) /* callback fn for tracing refs */ + (struct ip6_moptions *, int); }; -#define IM6O_LOCK_ASSERT_HELD(_im6o) \ +#define IM6O_LOCK_ASSERT_HELD(_im6o) \ LCK_MTX_ASSERT(&(_im6o)->im6o_lock, LCK_MTX_ASSERT_OWNED) -#define IM6O_LOCK_ASSERT_NOTHELD(_im6o) \ +#define IM6O_LOCK_ASSERT_NOTHELD(_im6o) \ LCK_MTX_ASSERT(&(_im6o)->im6o_lock, LCK_MTX_ASSERT_NOTOWNED) -#define IM6O_LOCK(_im6o) \ +#define IM6O_LOCK(_im6o) \ lck_mtx_lock(&(_im6o)->im6o_lock) -#define IM6O_LOCK_SPIN(_im6o) \ +#define IM6O_LOCK_SPIN(_im6o) \ lck_mtx_lock_spin(&(_im6o)->im6o_lock) -#define IM6O_CONVERT_LOCK(_im6o) do { \ - IM6O_LOCK_ASSERT_HELD(_im6o); \ - lck_mtx_convert_spin(&(_im6o)->im6o_lock); \ +#define IM6O_CONVERT_LOCK(_im6o) do { \ + IM6O_LOCK_ASSERT_HELD(_im6o); \ + lck_mtx_convert_spin(&(_im6o)->im6o_lock); \ } while (0) -#define IM6O_UNLOCK(_im6o) \ +#define IM6O_UNLOCK(_im6o) \ lck_mtx_unlock(&(_im6o)->im6o_lock) -#define IM6O_ADDREF(_im6o) \ +#define IM6O_ADDREF(_im6o) \ im6o_addref(_im6o, 0) -#define IM6O_ADDREF_LOCKED(_im6o) \ +#define IM6O_ADDREF_LOCKED(_im6o) \ im6o_addref(_im6o, 1) -#define IM6O_REMREF(_im6o) \ +#define IM6O_REMREF(_im6o) \ im6o_remref(_im6o) struct ip6_exthdrs { @@ -190,63 +190,63 @@ struct ip6_exthdrs { */ /* Routing header related info */ -struct ip6po_rhinfo { - struct ip6_rthdr *ip6po_rhi_rthdr; /* Routing header */ - struct route_in6 ip6po_rhi_route; /* Route to the 1st hop */ +struct ip6po_rhinfo { + struct ip6_rthdr *ip6po_rhi_rthdr; /* Routing header */ + struct route_in6 ip6po_rhi_route; /* Route to the 1st hop */ }; -#define ip6po_rthdr ip6po_rhinfo.ip6po_rhi_rthdr -#define ip6po_route ip6po_rhinfo.ip6po_rhi_route +#define ip6po_rthdr ip6po_rhinfo.ip6po_rhi_rthdr +#define ip6po_route ip6po_rhinfo.ip6po_rhi_route /* Nexthop related info */ -struct ip6po_nhinfo { - struct sockaddr *ip6po_nhi_nexthop; - struct route_in6 ip6po_nhi_route; /* Route to the nexthop */ +struct ip6po_nhinfo { + struct sockaddr *ip6po_nhi_nexthop; + struct route_in6 ip6po_nhi_route; /* Route to the nexthop */ }; -#define ip6po_nexthop ip6po_nhinfo.ip6po_nhi_nexthop -#define ip6po_nextroute ip6po_nhinfo.ip6po_nhi_route +#define ip6po_nexthop ip6po_nhinfo.ip6po_nhi_nexthop +#define ip6po_nextroute ip6po_nhinfo.ip6po_nhi_route -struct ip6_pktopts { - struct mbuf *ip6po_m; /* Pointer to mbuf storing the data */ - int ip6po_hlim; /* Hoplimit for outgoing packets */ +struct ip6_pktopts { + struct mbuf *ip6po_m; /* Pointer to mbuf storing the data */ + int ip6po_hlim; /* Hoplimit for outgoing packets */ /* Outgoing IF/address information */ - struct in6_pktinfo *ip6po_pktinfo; + struct in6_pktinfo *ip6po_pktinfo; /* Next-hop address information */ - struct ip6po_nhinfo ip6po_nhinfo; + struct ip6po_nhinfo ip6po_nhinfo; - struct ip6_hbh *ip6po_hbh; /* Hop-by-Hop options header */ + struct ip6_hbh *ip6po_hbh; /* Hop-by-Hop options header */ /* Destination options header (before a routing header) */ - struct ip6_dest *ip6po_dest1; + struct ip6_dest *ip6po_dest1; /* Routing header related info. */ - struct ip6po_rhinfo ip6po_rhinfo; + struct ip6po_rhinfo ip6po_rhinfo; /* Destination options header (after a routing header) */ - struct ip6_dest *ip6po_dest2; + struct ip6_dest *ip6po_dest2; - int ip6po_tclass; /* traffic class */ + int ip6po_tclass; /* traffic class */ - int ip6po_minmtu; /* fragment vs PMTU discovery policy */ -#define IP6PO_MINMTU_MCASTONLY -1 /* default; send at min MTU for multicast */ -#define IP6PO_MINMTU_DISABLE 0 /* always perform pmtu disc */ -#define IP6PO_MINMTU_ALL 1 /* always send at min MTU */ + int ip6po_minmtu; /* fragment vs PMTU discovery policy */ +#define IP6PO_MINMTU_MCASTONLY -1 /* default; send at min MTU for multicast */ +#define IP6PO_MINMTU_DISABLE 0 /* always perform pmtu disc */ +#define IP6PO_MINMTU_ALL 1 /* always send at min MTU */ /* whether temporary addresses are preferred as source address */ - int ip6po_prefer_tempaddr; + int ip6po_prefer_tempaddr; -#define IP6PO_TEMPADDR_SYSTEM -1 /* follow the system default */ -#define IP6PO_TEMPADDR_NOTPREFER 0 /* not prefer temporary address */ -#define IP6PO_TEMPADDR_PREFER 1 /* prefer temporary address */ +#define IP6PO_TEMPADDR_SYSTEM -1 /* follow the system default */ +#define IP6PO_TEMPADDR_NOTPREFER 0 /* not prefer temporary address */ +#define IP6PO_TEMPADDR_PREFER 1 /* prefer temporary address */ int ip6po_flags; -#if 0 /* parameters in this block is obsolete. do not reuse the values. */ -#define IP6PO_REACHCONF 0x01 /* upper-layer reachability confirmation. */ -#define IP6PO_MINMTU 0x02 /* use minimum MTU (IPV6_USE_MIN_MTU) */ +#if 0 /* parameters in this block is obsolete. do not reuse the values. */ +#define IP6PO_REACHCONF 0x01 /* upper-layer reachability confirmation. */ +#define IP6PO_MINMTU 0x02 /* use minimum MTU (IPV6_USE_MIN_MTU) */ #endif -#define IP6PO_DONTFRAG 0x04 /* no fragmentation (IPV6_DONTFRAG) */ -#define IP6PO_USECOA 0x08 /* use care of address */ +#define IP6PO_DONTFRAG 0x04 /* no fragmentation (IPV6_DONTFRAG) */ +#define IP6PO_USECOA 0x08 /* use care of address */ }; /* @@ -254,42 +254,42 @@ struct ip6_pktopts { */ #endif /* BSD_KERNEL_PRIVATE */ -#define IP6S_SRCRULE_COUNT 16 +#define IP6S_SRCRULE_COUNT 16 #include -struct ip6stat { - u_quad_t ip6s_total; /* total packets received */ - u_quad_t ip6s_tooshort; /* packet too short */ - u_quad_t ip6s_toosmall; /* not enough data */ - u_quad_t ip6s_fragments; /* fragments received */ - u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */ - u_quad_t ip6s_fragtimeout; /* fragments timed out */ - u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */ - u_quad_t ip6s_forward; /* packets forwarded */ - u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */ - u_quad_t ip6s_redirectsent; /* packets forwarded on same net */ - u_quad_t ip6s_delivered; /* datagrams delivered to upper level */ - u_quad_t ip6s_localout; /* total ip packets generated here */ - u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */ - u_quad_t ip6s_reassembled; /* total packets reassembled ok */ - u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */ - u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */ - u_quad_t ip6s_ofragments; /* output fragments created */ - u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */ - u_quad_t ip6s_badoptions; /* error in option processing */ - u_quad_t ip6s_noroute; /* packets discarded due to no route */ - u_quad_t ip6s_badvers; /* ip6 version != 6 */ - u_quad_t ip6s_rawout; /* total raw ip packets generated */ - u_quad_t ip6s_badscope; /* scope error */ - u_quad_t ip6s_notmember; /* don't join this multicast group */ - u_quad_t ip6s_nxthist[256]; /* next header history */ - u_quad_t ip6s_m1; /* one mbuf */ - u_quad_t ip6s_m2m[32]; /* two or more mbuf */ - u_quad_t ip6s_mext1; /* one ext mbuf */ - u_quad_t ip6s_mext2m; /* two or more ext mbuf */ - u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */ - u_quad_t ip6s_nogif; /* no match gif found */ - u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */ +struct ip6stat { + u_quad_t ip6s_total; /* total packets received */ + u_quad_t ip6s_tooshort; /* packet too short */ + u_quad_t ip6s_toosmall; /* not enough data */ + u_quad_t ip6s_fragments; /* fragments received */ + u_quad_t ip6s_fragdropped; /* frags dropped(dups, out of space) */ + u_quad_t ip6s_fragtimeout; /* fragments timed out */ + u_quad_t ip6s_fragoverflow; /* fragments that exceeded limit */ + u_quad_t ip6s_forward; /* packets forwarded */ + u_quad_t ip6s_cantforward; /* packets rcvd for unreachable dest */ + u_quad_t ip6s_redirectsent; /* packets forwarded on same net */ + u_quad_t ip6s_delivered; /* datagrams delivered to upper level */ + u_quad_t ip6s_localout; /* total ip packets generated here */ + u_quad_t ip6s_odropped; /* lost packets due to nobufs, etc. */ + u_quad_t ip6s_reassembled; /* total packets reassembled ok */ + u_quad_t ip6s_atmfrag_rcvd; /* atomic fragments received */ + u_quad_t ip6s_fragmented; /* datagrams successfully fragmented */ + u_quad_t ip6s_ofragments; /* output fragments created */ + u_quad_t ip6s_cantfrag; /* don't fragment flag was set, etc. */ + u_quad_t ip6s_badoptions; /* error in option processing */ + u_quad_t ip6s_noroute; /* packets discarded due to no route */ + u_quad_t ip6s_badvers; /* ip6 version != 6 */ + u_quad_t ip6s_rawout; /* total raw ip packets generated */ + u_quad_t ip6s_badscope; /* scope error */ + u_quad_t ip6s_notmember; /* don't join this multicast group */ + u_quad_t ip6s_nxthist[256]; /* next header history */ + u_quad_t ip6s_m1; /* one mbuf */ + u_quad_t ip6s_m2m[32]; /* two or more mbuf */ + u_quad_t ip6s_mext1; /* one ext mbuf */ + u_quad_t ip6s_mext2m; /* two or more ext mbuf */ + u_quad_t ip6s_exthdrtoolong; /* ext hdr are not continuous */ + u_quad_t ip6s_nogif; /* no match gif found */ + u_quad_t ip6s_toomanyhdr; /* discarded due to too many headers */ /* * statistics for improvement of the source address selection @@ -383,20 +383,20 @@ enum ip6s_sources_rule_index { */ struct ip6aux { u_int32_t ip6a_flags; -#define IP6A_HASEEN 0x01 /* HA was present */ +#define IP6A_HASEEN 0x01 /* HA was present */ #ifdef notyet -#define IP6A_SWAP 0x02 /* swapped home/care-of on packet */ -#define IP6A_BRUID 0x04 /* BR Unique Identifier was present */ -#define IP6A_RTALERTSEEN 0x08 /* rtalert present */ +#define IP6A_SWAP 0x02 /* swapped home/care-of on packet */ +#define IP6A_BRUID 0x04 /* BR Unique Identifier was present */ +#define IP6A_RTALERTSEEN 0x08 /* rtalert present */ /* ip6.ip6_src */ - struct in6_addr ip6a_careof; /* care-of address of the peer */ - struct in6_addr ip6a_home; /* home address of the peer */ - u_int16_t ip6a_bruid; /* BR unique identifier */ + struct in6_addr ip6a_careof; /* care-of address of the peer */ + struct in6_addr ip6a_home; /* home address of the peer */ + u_int16_t ip6a_bruid; /* BR unique identifier */ /* rtalert */ - u_int16_t ip6a_rtalert; /* rtalert option value */ + u_int16_t ip6a_rtalert; /* rtalert option value */ #endif /* notyet */ /* ether source address if all-nodes multicast destination */ @@ -404,14 +404,14 @@ struct ip6aux { }; /* flags passed to ip6_output as last parameter */ -#define IPV6_UNSPECSRC 0x01 /* allow :: as the source address */ -#define IPV6_FORWARDING 0x02 /* most of IPv6 header exists */ -#define IPV6_MINMTU 0x04 /* use minimum MTU (IPV6_USE_MIN_MTU) */ -#define IPV6_FLAG_NOSRCIFSEL 0x80 /* bypas source address selection */ -#define IPV6_OUTARGS 0x100 /* has ancillary output info */ +#define IPV6_UNSPECSRC 0x01 /* allow :: as the source address */ +#define IPV6_FORWARDING 0x02 /* most of IPv6 header exists */ +#define IPV6_MINMTU 0x04 /* use minimum MTU (IPV6_USE_MIN_MTU) */ +#define IPV6_FLAG_NOSRCIFSEL 0x80 /* bypas source address selection */ +#define IPV6_OUTARGS 0x100 /* has ancillary output info */ #ifdef BSD_KERNEL_PRIVATE -#define IP6_HDR_ALIGNED_P(_ip6) ((((uintptr_t)(_ip6)) & ((uintptr_t)3)) == 0) +#define IP6_HDR_ALIGNED_P(_ip6) ((((uintptr_t)(_ip6)) & ((uintptr_t)3)) == 0) /* * On platforms which require strict alignment (currently for anything but @@ -419,13 +419,13 @@ struct ip6aux { * is 32-bit aligned, and assert otherwise. */ #if defined(__i386__) || defined(__x86_64__) -#define IP6_HDR_STRICT_ALIGNMENT_CHECK(_ip6) do { } while (0) +#define IP6_HDR_STRICT_ALIGNMENT_CHECK(_ip6) do { } while (0) #else /* !__i386__ && !__x86_64__ */ -#define IP6_HDR_STRICT_ALIGNMENT_CHECK(_ip6) do { \ - if (!IP_HDR_ALIGNED_P(_ip6)) { \ - panic_plain("\n%s: Unaligned IPv6 header %p\n", \ - __func__, _ip6); \ - } \ +#define IP6_HDR_STRICT_ALIGNMENT_CHECK(_ip6) do { \ + if (!IP_HDR_ALIGNED_P(_ip6)) { \ + panic_plain("\n%s: Unaligned IPv6 header %p\n", \ + __func__, _ip6); \ + } \ } while (0) #endif /* !__i386__ && !__x86_64__ */ #endif /* BSD_KERNEL_PRIVATE */ @@ -436,46 +436,46 @@ struct ip6aux { * Extra information passed to ip6_output when IPV6_OUTARGS is set. */ struct ip6_out_args { - unsigned int ip6oa_boundif; /* bound outgoing interface */ - struct flowadv ip6oa_flowadv; /* flow advisory code */ - u_int32_t ip6oa_flags; /* IP6OAF flags (see below) */ -#define IP6OAF_SELECT_SRCIF 0x00000001 /* src interface selection */ -#define IP6OAF_BOUND_IF 0x00000002 /* boundif value is valid */ -#define IP6OAF_BOUND_SRCADDR 0x00000004 /* bound to src address */ -#define IP6OAF_NO_CELLULAR 0x00000010 /* skip IFT_CELLULAR */ -#define IP6OAF_NO_EXPENSIVE 0x00000020 /* skip IFEF_EXPENSIVE */ -#define IP6OAF_AWDL_UNRESTRICTED 0x00000040 /* privileged AWDL */ -#define IP6OAF_QOSMARKING_ALLOWED 0x00000080 /* policy allows Fastlane DSCP marking */ -#define IP6OAF_INTCOPROC_ALLOWED 0x00000100 /* access to internal coproc interfaces */ -#define IP6OAF_NO_LOW_POWER 0x00000200 /* skip low power */ - u_int32_t ip6oa_retflags; /* IP6OARF return flags (see below) */ -#define IP6OARF_IFDENIED 0x00000001 /* denied access to interface */ - int ip6oa_sotc; /* traffic class for Fastlane DSCP mapping */ - int ip6oa_netsvctype; + unsigned int ip6oa_boundif; /* bound outgoing interface */ + struct flowadv ip6oa_flowadv; /* flow advisory code */ + u_int32_t ip6oa_flags; /* IP6OAF flags (see below) */ +#define IP6OAF_SELECT_SRCIF 0x00000001 /* src interface selection */ +#define IP6OAF_BOUND_IF 0x00000002 /* boundif value is valid */ +#define IP6OAF_BOUND_SRCADDR 0x00000004 /* bound to src address */ +#define IP6OAF_NO_CELLULAR 0x00000010 /* skip IFT_CELLULAR */ +#define IP6OAF_NO_EXPENSIVE 0x00000020 /* skip IFEF_EXPENSIVE */ +#define IP6OAF_AWDL_UNRESTRICTED 0x00000040 /* privileged AWDL */ +#define IP6OAF_QOSMARKING_ALLOWED 0x00000080 /* policy allows Fastlane DSCP marking */ +#define IP6OAF_INTCOPROC_ALLOWED 0x00000100 /* access to internal coproc interfaces */ +#define IP6OAF_NO_LOW_POWER 0x00000200 /* skip low power */ + u_int32_t ip6oa_retflags; /* IP6OARF return flags (see below) */ +#define IP6OARF_IFDENIED 0x00000001 /* denied access to interface */ + int ip6oa_sotc; /* traffic class for Fastlane DSCP mapping */ + int ip6oa_netsvctype; }; -extern struct ip6stat ip6stat; /* statistics */ -extern int ip6_defhlim; /* default hop limit */ -extern int ip6_defmcasthlim; /* default multicast hop limit */ -extern int ip6_forwarding; /* act as router? */ -extern int ip6_gif_hlim; /* Hop limit for gif encap packet */ -extern int ip6_use_deprecated; /* allow deprecated addr as source */ -extern int ip6_rr_prune; /* router renumbering prefix */ - /* walk list every 5 sec. */ -extern int ip6_mcast_pmtu; /* enable pMTU discovery for multicast? */ -#define ip6_mapped_addr_on (!ip6_v6only) +extern struct ip6stat ip6stat; /* statistics */ +extern int ip6_defhlim; /* default hop limit */ +extern int ip6_defmcasthlim; /* default multicast hop limit */ +extern int ip6_forwarding; /* act as router? */ +extern int ip6_gif_hlim; /* Hop limit for gif encap packet */ +extern int ip6_use_deprecated; /* allow deprecated addr as source */ +extern int ip6_rr_prune; /* router renumbering prefix */ + /* walk list every 5 sec. */ +extern int ip6_mcast_pmtu; /* enable pMTU discovery for multicast? */ +#define ip6_mapped_addr_on (!ip6_v6only) extern int ip6_v6only; extern int ip6_neighborgcthresh; /* Threshold # of NDP entries for GC */ -extern int ip6_maxifprefixes; /* Max acceptable prefixes via RA per IF */ -extern int ip6_maxifdefrouters; /* Max acceptable def routers via RA */ -extern int ip6_maxdynroutes; /* Max # of routes created via redirect */ -extern int ip6_sendredirects; /* send IP redirects when forwarding? */ -extern int ip6_accept_rtadv; /* deprecated */ +extern int ip6_maxifprefixes; /* Max acceptable prefixes via RA per IF */ +extern int ip6_maxifdefrouters; /* Max acceptable def routers via RA */ +extern int ip6_maxdynroutes; /* Max # of routes created via redirect */ +extern int ip6_sendredirects; /* send IP redirects when forwarding? */ +extern int ip6_accept_rtadv; /* deprecated */ extern int ip6_log_interval; extern uint64_t ip6_log_time; -extern int ip6_hdrnestlimit; /* upper limit of # of extension headers */ -extern int ip6_dad_count; /* DupAddrDetectionTransmits */ +extern int ip6_hdrnestlimit; /* upper limit of # of extension headers */ +extern int ip6_dad_count; /* DupAddrDetectionTransmits */ /* RFC4193 Unique Local Unicast Prefixes only */ extern int ip6_only_allow_rfc4193_prefix; @@ -483,10 +483,10 @@ extern int ip6_only_allow_rfc4193_prefix; extern int ip6_auto_flowlabel; extern int ip6_auto_linklocal; -extern int ip6_anonportmin; /* minimum ephemeral port */ -extern int ip6_anonportmax; /* maximum ephemeral port */ -extern int ip6_lowportmin; /* minimum reserved port */ -extern int ip6_lowportmax; /* maximum reserved port */ +extern int ip6_anonportmin; /* minimum ephemeral port */ +extern int ip6_anonportmax; /* maximum ephemeral port */ +extern int ip6_lowportmin; /* minimum reserved port */ +extern int ip6_lowportmax; /* maximum reserved port */ extern int ip6_use_tempaddr; /* whether to use temporary addresses. */ @@ -556,7 +556,7 @@ extern struct ip6_pktopts *ip6_copypktopts(struct ip6_pktopts *, int); extern int ip6_optlen(struct inpcb *); extern void ip6_drain(void); extern int ip6_do_fragmentation(struct mbuf **, uint32_t, struct ifnet *, uint32_t, - struct ip6_hdr *, struct ip6_exthdrs *, uint32_t, int); + struct ip6_hdr *, struct ip6_exthdrs *, uint32_t, int); extern int route6_input(struct mbuf **, int *, int); diff --git a/bsd/netinet6/ip6protosw.h b/bsd/netinet6/ip6protosw.h index 135b6f344..a9f8cefea 100644 --- a/bsd/netinet6/ip6protosw.h +++ b/bsd/netinet6/ip6protosw.h @@ -96,7 +96,7 @@ */ #ifndef _NETINET6_IP6PROTOSW_H_ -#define _NETINET6_IP6PROTOSW_H_ +#define _NETINET6_IP6PROTOSW_H_ #include #ifdef BSD_KERNEL_PRIVATE @@ -135,15 +135,15 @@ struct pr_usrreqs; * ip6c_dst: ip6c_finaldst + scope info */ struct ip6ctlparam { - struct mbuf *ip6c_m; /* start of mbuf chain */ - struct icmp6_hdr *ip6c_icmp6; /* icmp6 header of target packet */ - struct ip6_hdr *ip6c_ip6; /* ip6 header of target packet */ - int ip6c_off; /* offset of the target proto header */ - struct sockaddr_in6 *ip6c_src; /* srcaddr w/ additional info */ - struct sockaddr_in6 *ip6c_dst; /* (final) dstaddr w/ additional info */ - struct in6_addr *ip6c_finaldst; /* final destination address */ - void *ip6c_cmdarg; /* control command dependent data */ - u_int8_t ip6c_nxt; /* final next header field */ + struct mbuf *ip6c_m; /* start of mbuf chain */ + struct icmp6_hdr *ip6c_icmp6; /* icmp6 header of target packet */ + struct ip6_hdr *ip6c_ip6; /* ip6 header of target packet */ + int ip6c_off; /* offset of the target proto header */ + struct sockaddr_in6 *ip6c_src; /* srcaddr w/ additional info */ + struct sockaddr_in6 *ip6c_dst; /* (final) dstaddr w/ additional info */ + struct in6_addr *ip6c_finaldst; /* final destination address */ + void *ip6c_cmdarg; /* control command dependent data */ + u_int8_t ip6c_nxt; /* final next header field */ }; /* @@ -154,41 +154,41 @@ struct ip6ctlparam { */ struct ip6protosw { TAILQ_ENTRY(ip6protosw) pr_entry; /* chain for domain */ - struct domain *pr_domain; /* domain protocol a member of */ - struct protosw *pr_protosw; /* pointer to self */ - u_int16_t pr_type; /* socket type used for */ - u_int16_t pr_protocol; /* protocol number */ - u_int32_t pr_flags; /* see below */ + struct domain *pr_domain; /* domain protocol a member of */ + struct protosw *pr_protosw; /* pointer to self */ + u_int16_t pr_type; /* socket type used for */ + u_int16_t pr_protocol; /* protocol number */ + u_int32_t pr_flags; /* see below */ /* * protocol-protocol hooks */ - int (*pr_input) /* input to protocol (from below) */ - (struct mbuf **, int *, int); - int (*pr_output) /* output to protocol (from above) */ - (struct mbuf *m, struct socket *so, - struct sockaddr_in6 *, struct mbuf *); - void (*pr_ctlinput) /* control input (from below) */ - (int, struct sockaddr *, void *, struct ifnet *); - int (*pr_ctloutput) /* control output (from above) */ - (struct socket *, struct sockopt *); + int (*pr_input) /* input to protocol (from below) */ + (struct mbuf **, int *, int); + int (*pr_output) /* output to protocol (from above) */ + (struct mbuf *m, struct socket *so, + struct sockaddr_in6 *, struct mbuf *); + void (*pr_ctlinput) /* control input (from below) */ + (int, struct sockaddr *, void *, struct ifnet *); + int (*pr_ctloutput) /* control output (from above) */ + (struct socket *, struct sockopt *); /* * user-protocol hook */ - struct pr_usrreqs *pr_usrreqs; /* user request; see list below */ + struct pr_usrreqs *pr_usrreqs; /* user request; see list below */ /* * utility hooks */ - void (*pr_init) /* initialization hook */ - (struct ip6protosw *, struct domain *); - void (*pr_drain)(void); /* flush any excess space possible */ + void (*pr_init) /* initialization hook */ + (struct ip6protosw *, struct domain *); + void (*pr_drain)(void); /* flush any excess space possible */ /* for compat. with IPv4 protosw */ - int (*pr_sysctl)(void); /* sysctl for protocol */ - int (*pr_lock) /* lock function for protocol */ - (struct socket *so, int refcnt, void *debug); - int (*pr_unlock) /* unlock for protocol */ - (struct socket *so, int refcnt, void *debug); - lck_mtx_t *(*pr_getlock) /* retrieve protocol lock */ - (struct socket *so, int flags); + int (*pr_sysctl)(void); /* sysctl for protocol */ + int (*pr_lock) /* lock function for protocol */ + (struct socket *so, int refcnt, void *debug); + int (*pr_unlock) /* unlock for protocol */ + (struct socket *so, int refcnt, void *debug); + lck_mtx_t *(*pr_getlock) /* retrieve protocol lock */ + (struct socket *so, int flags); /* * misc */ diff --git a/bsd/netinet6/ipcomp.h b/bsd/netinet6/ipcomp.h index b976f138a..991764ea0 100644 --- a/bsd/netinet6/ipcomp.h +++ b/bsd/netinet6/ipcomp.h @@ -40,27 +40,28 @@ #include struct ipcomp { - u_int8_t comp_nxt; /* Next Header */ - u_int8_t comp_flags; /* reserved, must be zero */ - u_int16_t comp_cpi; /* Compression parameter index */ + u_int8_t comp_nxt; /* Next Header */ + u_int8_t comp_flags; /* reserved, must be zero */ + u_int16_t comp_cpi; /* Compression parameter index */ }; /* well-known algorithm number (in CPI), from RFC2409 */ -#define IPCOMP_OUI 1 /* vendor specific */ -#define IPCOMP_DEFLATE 2 /* RFC2394 */ -#define IPCOMP_LZS 3 /* RFC2395 */ -#define IPCOMP_MAX 4 +#define IPCOMP_OUI 1 /* vendor specific */ +#define IPCOMP_DEFLATE 2 /* RFC2394 */ +#define IPCOMP_LZS 3 /* RFC2395 */ +#define IPCOMP_MAX 4 -#define IPCOMP_CPI_NEGOTIATE_MIN 256 +#define IPCOMP_CPI_NEGOTIATE_MIN 256 #ifdef BSD_KERNEL_PRIVATE struct ipcomp_algorithm { int (*compress)(struct mbuf *, struct mbuf *, size_t *); int (*decompress)(struct mbuf *, struct mbuf *, size_t *); - size_t minplen; /* minimum required length for compression */ + size_t minplen; /* minimum required length for compression */ }; struct ipsecrequest; +extern void ipcomp_init(struct protosw *, struct domain *); extern const struct ipcomp_algorithm *ipcomp_algorithm_lookup(int); extern void ipcomp4_input(struct mbuf *, int); extern int ipcomp4_output(struct mbuf *, struct secasvar *); diff --git a/bsd/netinet6/ipcomp6.h b/bsd/netinet6/ipcomp6.h index 41b766614..670307044 100644 --- a/bsd/netinet6/ipcomp6.h +++ b/bsd/netinet6/ipcomp6.h @@ -42,7 +42,7 @@ #ifdef BSD_KERNEL_PRIVATE extern int ipcomp6_input(struct mbuf **, int *, int); extern int ipcomp6_output(struct mbuf *, u_char *, struct mbuf *, - struct secasvar *); + struct secasvar *); #endif /* BSD_KERNEL_PRIVATE */ #endif /*_NETINET6_IPCOMP6_H_*/ diff --git a/bsd/netinet6/ipcomp_core.c b/bsd/netinet6/ipcomp_core.c index 01c9e928b..ef4fc2d20 100644 --- a/bsd/netinet6/ipcomp_core.c +++ b/bsd/netinet6/ipcomp_core.c @@ -109,11 +109,11 @@ static int deflate_decompress(struct mbuf *, struct mbuf *, size_t *); */ static int deflate_policy = Z_DEFAULT_COMPRESSION; static int deflate_window_out = -12; -static const int deflate_window_in = -1 * MAX_WBITS; /* don't change it */ +static const int deflate_window_in = -1 * MAX_WBITS; /* don't change it */ static int deflate_memlevel = MAX_MEM_LEVEL; -static z_stream deflate_stream; -static z_stream inflate_stream; +static z_stream deflate_stream; +static z_stream inflate_stream; #endif /* IPCOMP_ZLIB */ #if IPCOMP_ZLIB @@ -124,17 +124,47 @@ static const struct ipcomp_algorithm ipcomp_algorithms[] = { static const struct ipcomp_algorithm ipcomp_algorithms[] __unused = {}; #endif +decl_lck_mtx_data(static, ipcomp_mutex_data); +static lck_mtx_t *ipcomp_mutex = &ipcomp_mutex_data; + +void +ipcomp_init(struct protosw *pp, struct domain *dp) +{ +#pragma unused(dp) + static int ipcomp_initialized = 0; + lck_grp_attr_t *ipcomp_mutex_grp_attr = NULL; + lck_attr_t *ipcomp_mutex_attr = NULL; + lck_grp_t *ipcomp_mutex_grp = NULL; + + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); + + if (ipcomp_initialized) { + return; + } + + ipcomp_mutex_grp_attr = lck_grp_attr_alloc_init(); + ipcomp_mutex_grp = lck_grp_alloc_init("ipcomp", ipcomp_mutex_grp_attr); + lck_grp_attr_free(ipcomp_mutex_grp_attr); + + ipcomp_mutex_attr = lck_attr_alloc_init(); + lck_mtx_init(ipcomp_mutex, ipcomp_mutex_grp, ipcomp_mutex_attr); + lck_grp_free(ipcomp_mutex_grp); + lck_attr_free(ipcomp_mutex_attr); + + ipcomp_initialized = 1; +} + const struct ipcomp_algorithm * ipcomp_algorithm_lookup( #if IPCOMP_ZLIB - int idx + int idx #else - __unused int idx + __unused int idx #endif - ) + ) { #if IPCOMP_ZLIB - if (idx == SADB_X_CALG_DEFLATE) { + if (idx == SADB_X_CALG_DEFLATE) { /* * Avert your gaze, ugly hack follows! * We init here so our malloc can allocate using M_WAIT. @@ -147,7 +177,7 @@ ipcomp_algorithm_lookup( deflate_stream.zalloc = deflate_alloc; deflate_stream.zfree = deflate_free; if (deflateInit2(&deflate_stream, deflate_policy, Z_DEFLATED, - deflate_window_out, deflate_memlevel, Z_DEFAULT_STRATEGY)) { + deflate_window_out, deflate_memlevel, Z_DEFAULT_STRATEGY)) { /* Allocation failed */ bzero(&deflate_stream, sizeof(deflate_stream)); #if IPSEC_DEBUG @@ -155,7 +185,7 @@ ipcomp_algorithm_lookup( #endif } } - + if (inflate_stream.zalloc == NULL) { inflate_stream.zalloc = deflate_alloc; inflate_stream.zfree = deflate_free; @@ -208,46 +238,49 @@ deflate_common(struct mbuf *m, struct mbuf *md, size_t *lenp, int mode) #define MOREBLOCK() \ do { \ - /* keep the reply buffer into our chain */ \ - if (n) { \ - n->m_len = zs->total_out - offset; \ - offset = zs->total_out; \ - *np = n; \ - np = &n->m_next; \ - n = NULL; \ - } \ - \ - /* get a fresh reply buffer */ \ - MGET(n, M_DONTWAIT, MT_DATA); \ - if (n) { \ - MCLGET(n, M_DONTWAIT); \ - } \ - if (!n) { \ - error = ENOBUFS; \ - goto fail; \ - } \ - n->m_len = 0; \ - n->m_len = M_TRAILINGSPACE(n); \ - n->m_next = NULL; \ - /* \ - * if this is the first reply buffer, reserve \ - * region for ipcomp header. \ - */ \ - if (*np == NULL) { \ - n->m_len -= sizeof(struct ipcomp); \ - n->m_data += sizeof(struct ipcomp); \ - } \ - \ - zs->next_out = mtod(n, u_int8_t *); \ - zs->avail_out = n->m_len; \ + /* keep the reply buffer into our chain */ \ + if (n) { \ + n->m_len = zs->total_out - offset; \ + offset = zs->total_out; \ + *np = n; \ + np = &n->m_next; \ + n = NULL; \ + } \ + \ + /* get a fresh reply buffer */ \ + MGET(n, M_DONTWAIT, MT_DATA); \ + if (n) { \ + MCLGET(n, M_DONTWAIT); \ + } \ + if (!n) { \ + error = ENOBUFS; \ + goto fail; \ + } \ + n->m_len = 0; \ + n->m_len = M_TRAILINGSPACE(n); \ + n->m_next = NULL; \ + /* \ + * if this is the first reply buffer, reserve \ + * region for ipcomp header. \ + */ \ + if (*np == NULL) { \ + n->m_len -= sizeof(struct ipcomp); \ + n->m_data += sizeof(struct ipcomp); \ + } \ + \ + zs->next_out = mtod(n, u_int8_t *); \ + zs->avail_out = n->m_len; \ } while (0) - for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) { ; - if (!mprev) + } + if (!mprev) { panic("md is not in m in deflate_common"); + } + lck_mtx_lock(ipcomp_mutex); zs = mode ? &inflate_stream : &deflate_stream; if (zs->zalloc == NULL) { /* @@ -258,7 +291,7 @@ do { \ error = ENOBUFS; goto fail; } - + zs->next_in = 0; zs->avail_in = 0; zs->next_out = 0; @@ -291,15 +324,16 @@ do { \ } zerror = mode ? inflate(zs, Z_NO_FLUSH) - : deflate(zs, Z_NO_FLUSH); + : deflate(zs, Z_NO_FLUSH); - if (zerror == Z_STREAM_END) + if (zerror == Z_STREAM_END) { ; /*once more.*/ - else if (zerror == Z_OK) { + } else if (zerror == Z_OK) { /* inflate: Z_OK can indicate the end of decode */ - if (mode && !p && zs->avail_out != 0) + if (mode && !p && zs->avail_out != 0) { goto terminate; - + } + /* else once more.*/ } else { if (zs->msg) { @@ -320,8 +354,9 @@ do { \ } } - if (zerror == Z_STREAM_END) + if (zerror == Z_STREAM_END) { goto terminate; + } /* termination */ while (1) { @@ -331,13 +366,13 @@ do { \ } zerror = mode ? inflate(zs, Z_FINISH) - : deflate(zs, Z_FINISH); + : deflate(zs, Z_FINISH); - if (zerror == Z_STREAM_END) + if (zerror == Z_STREAM_END) { break; - else if (zerror == Z_OK) + } else if (zerror == Z_OK) { ; /*once more.*/ - else { + } else { if (zs->msg) { ipseclog((LOG_ERR, "ipcomp_%scompress: " "%sflate(Z_FINISH): %s\n", @@ -393,15 +428,20 @@ terminate: } } + lck_mtx_unlock(ipcomp_mutex); return 0; fail: - if (m) + lck_mtx_unlock(ipcomp_mutex); + if (m) { m_freem(m); - if (n) + } + if (n) { m_freem(n); - if (n0) + } + if (n0) { m_freem(n0); + } return error; #undef MOREBLOCK } @@ -409,12 +449,15 @@ fail: static int deflate_compress(struct mbuf *m, struct mbuf *md, size_t *lenp) { - if (!m) + if (!m) { panic("m == NULL in deflate_compress"); - if (!md) + } + if (!md) { panic("md == NULL in deflate_compress"); - if (!lenp) + } + if (!lenp) { panic("lenp == NULL in deflate_compress"); + } return deflate_common(m, md, lenp, 0); } @@ -422,12 +465,15 @@ deflate_compress(struct mbuf *m, struct mbuf *md, size_t *lenp) static int deflate_decompress(struct mbuf *m, struct mbuf *md, size_t *lenp) { - if (!m) + if (!m) { panic("m == NULL in deflate_decompress"); - if (!md) + } + if (!md) { panic("md == NULL in deflate_decompress"); - if (!lenp) + } + if (!lenp) { panic("lenp == NULL in deflate_decompress"); + } return deflate_common(m, md, lenp, 1); } diff --git a/bsd/netinet6/ipcomp_input.c b/bsd/netinet6/ipcomp_input.c index c3b330305..a50e11d2a 100644 --- a/bsd/netinet6/ipcomp_input.c +++ b/bsd/netinet6/ipcomp_input.c @@ -90,7 +90,7 @@ ipcomp4_input(struct mbuf *m, int off) struct ip *ip; struct ipcomp *ipcomp; const struct ipcomp_algorithm *algo; - u_int16_t cpi; /* host order */ + u_int16_t cpi; /* host order */ u_int16_t nxt; size_t hlen; int error; @@ -106,7 +106,7 @@ ipcomp4_input(struct mbuf *m, int off) md = m_pulldown(m, off, sizeof(*ipcomp), NULL); if (!md) { - m = NULL; /*already freed*/ + m = NULL; /*already freed*/ ipseclog((LOG_DEBUG, "IPv4 IPComp input: assumption failed " "(pulldown failure)\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); @@ -129,18 +129,18 @@ ipcomp4_input(struct mbuf *m, int off) if (cpi >= IPCOMP_CPI_NEGOTIATE_MIN) { sav = key_allocsa(AF_INET, (caddr_t)&ip->ip_src, - (caddr_t)&ip->ip_dst, IPPROTO_IPCOMP, htonl(cpi)); + (caddr_t)&ip->ip_dst, IPPROTO_IPCOMP, htonl(cpi)); if (sav != NULL - && (sav->state == SADB_SASTATE_MATURE - || sav->state == SADB_SASTATE_DYING)) { - cpi = sav->alg_enc; /*XXX*/ + && (sav->state == SADB_SASTATE_MATURE + || sav->state == SADB_SASTATE_DYING)) { + cpi = sav->alg_enc; /*XXX*/ /* other parameters to look at? */ } } algo = ipcomp_algorithm_lookup(cpi); if (!algo) { ipseclog((LOG_WARNING, "IPv4 IPComp input: unknown cpi %u\n", - cpi)); + cpi)); IPSEC_STAT_INCREMENT(ipsecstat.in_nosa); goto fail; } @@ -162,8 +162,9 @@ ipcomp4_input(struct mbuf *m, int off) if (error != 0) { if (error == EINVAL) { IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - } else if (error == ENOBUFS) + } else if (error == ENOBUFS) { IPSEC_STAT_INCREMENT(ipsecstat.in_nomem); + } m = NULL; goto fail; } @@ -177,31 +178,31 @@ ipcomp4_input(struct mbuf *m, int off) m->m_pkthdr.len = off + newlen; ip = mtod(m, struct ip *); - { - size_t len; + { + size_t len; #ifdef IPLEN_FLIPPED - len = ip->ip_len; + len = ip->ip_len; #else - len = ntohs(ip->ip_len); + len = ntohs(ip->ip_len); #endif - /* - * be careful about underflow. also, do not assign exact value - * as ip_len is manipulated differently on *BSDs. - */ - len += m->m_pkthdr.len; - len -= olen; - if (len & ~0xffff) { - /* packet too big after decompress */ - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - goto fail; - } + /* + * be careful about underflow. also, do not assign exact value + * as ip_len is manipulated differently on *BSDs. + */ + len += m->m_pkthdr.len; + len -= olen; + if (len & ~0xffff) { + /* packet too big after decompress */ + IPSEC_STAT_INCREMENT(ipsecstat.in_inval); + goto fail; + } #ifdef IPLEN_FLIPPED - ip->ip_len = len & 0xffff; + ip->ip_len = len & 0xffff; #else - ip->ip_len = htons(len & 0xffff); + ip->ip_len = htons(len & 0xffff); #endif - ip->ip_p = nxt; - } + ip->ip_p = nxt; + } if (sav) { key_sa_recordxfer(sav, m); @@ -221,23 +222,26 @@ ipcomp4_input(struct mbuf *m, int off) } DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL, - struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif, - struct ip *, ip, struct ip6_hdr *, NULL); + struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif, + struct ip *, ip, struct ip6_hdr *, NULL); ip_proto_dispatch_in(m, off, nxt, 0); - } else + } else { m_freem(m); + } m = NULL; IPSEC_STAT_INCREMENT(ipsecstat.in_success); return; fail: - if (sav) + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } - if (m) + if (m) { m_freem(m); + } return; } @@ -251,7 +255,7 @@ ipcomp6_input(struct mbuf **mp, int *offp, int proto) struct ip6_hdr *ip6; struct ipcomp *ipcomp; const struct ipcomp_algorithm *algo; - u_int16_t cpi; /* host order */ + u_int16_t cpi; /* host order */ u_int16_t nxt; int error; size_t newlen; @@ -263,7 +267,7 @@ ipcomp6_input(struct mbuf **mp, int *offp, int proto) md = m_pulldown(m, off, sizeof(*ipcomp), NULL); if (!md) { - m = NULL; /*already freed*/ + m = NULL; /*already freed*/ ipseclog((LOG_DEBUG, "IPv6 IPComp input: assumption failed " "(pulldown failure)\n")); IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); @@ -281,18 +285,18 @@ ipcomp6_input(struct mbuf **mp, int *offp, int proto) if (cpi >= IPCOMP_CPI_NEGOTIATE_MIN) { sav = key_allocsa(AF_INET6, (caddr_t)&ip6->ip6_src, - (caddr_t)&ip6->ip6_dst, IPPROTO_IPCOMP, htonl(cpi)); + (caddr_t)&ip6->ip6_dst, IPPROTO_IPCOMP, htonl(cpi)); if (sav != NULL - && (sav->state == SADB_SASTATE_MATURE - || sav->state == SADB_SASTATE_DYING)) { - cpi = sav->alg_enc; /*XXX*/ + && (sav->state == SADB_SASTATE_MATURE + || sav->state == SADB_SASTATE_DYING)) { + cpi = sav->alg_enc; /*XXX*/ /* other parameters to look at? */ } } algo = ipcomp_algorithm_lookup(cpi); if (!algo) { ipseclog((LOG_WARNING, "IPv6 IPComp input: unknown cpi %u; " - "dropping the packet for simplicity\n", cpi)); + "dropping the packet for simplicity\n", cpi)); IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa); goto fail; } @@ -308,8 +312,9 @@ ipcomp6_input(struct mbuf **mp, int *offp, int proto) if (error != 0) { if (error == EINVAL) { IPSEC_STAT_INCREMENT(ipsec6stat.in_inval); - } else if (error == ENOBUFS) + } else if (error == ENOBUFS) { IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem); + } m = NULL; goto fail; } @@ -346,10 +351,12 @@ ipcomp6_input(struct mbuf **mp, int *offp, int proto) return nxt; fail: - if (m) + if (m) { m_freem(m); - if (sav) + } + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } return IPPROTO_DONE; } #endif /* INET6 */ diff --git a/bsd/netinet6/ipcomp_output.c b/bsd/netinet6/ipcomp_output.c index 8ad23be73..6abaedef6 100644 --- a/bsd/netinet6/ipcomp_output.c +++ b/bsd/netinet6/ipcomp_output.c @@ -108,7 +108,7 @@ static int ipcomp_output(struct mbuf *, u_char *, struct mbuf *, - int, struct secasvar *sav); + int, struct secasvar *sav); /* * Modify the packet so that the payload is compressed. @@ -137,8 +137,8 @@ ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct struct mbuf *mprev; struct ipcomp *ipcomp; const struct ipcomp_algorithm *algo; - u_int16_t cpi; /* host order */ - size_t plen0, plen; /*payload length to be compressed*/ + u_int16_t cpi; /* host order */ + size_t plen0, plen; /*payload length to be compressed*/ size_t compoff; int afnumber; int error = 0; @@ -159,7 +159,7 @@ ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct #endif default: ipseclog((LOG_ERR, "ipcomp_output: unsupported af %d\n", af)); - return 0; /* no change at all */ + return 0; /* no change at all */ } /* grab parameters */ @@ -169,19 +169,22 @@ ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct m_freem(m); return EINVAL; } - if ((sav->flags & SADB_X_EXT_RAWCPI) == 0) + if ((sav->flags & SADB_X_EXT_RAWCPI) == 0) { cpi = sav->alg_enc; - else + } else { cpi = ntohl(sav->spi) & 0xffff; + } /* compute original payload length */ plen = 0; - for (n = md; n; n = n->m_next) + for (n = md; n; n = n->m_next) { plen += n->m_len; + } /* if the payload is short enough, we don't need to compress */ - if (plen < algo->minplen) + if (plen < algo->minplen) { return 0; + } /* * retain the original packet for two purposes: @@ -205,8 +208,9 @@ ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct plen0 = plen; /* make the packet over-writable */ - for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) + for (mprev = m; mprev && mprev->m_next != md; mprev = mprev->m_next) { ; + } if (mprev == NULL || mprev->m_next != md) { ipseclog((LOG_DEBUG, "ipcomp%d_output: md is not in chain\n", afnumber)); @@ -259,90 +263,90 @@ ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct m->m_pkthdr.len -= plen0; m->m_pkthdr.len += plen; - { - /* - * insert IPComp header. - */ + { + /* + * insert IPComp header. + */ #if INET - struct ip *ip = NULL; + struct ip *ip = NULL; #endif #if INET6 - struct ip6_hdr *ip6 = NULL; + struct ip6_hdr *ip6 = NULL; #endif - size_t hlen = 0; /*ip header len*/ - size_t complen = sizeof(struct ipcomp); + size_t hlen = 0; /*ip header len*/ + size_t complen = sizeof(struct ipcomp); - switch (af) { + switch (af) { #if INET - case AF_INET: - ip = mtod(m, struct ip *); + case AF_INET: + ip = mtod(m, struct ip *); #ifdef _IP_VHL - hlen = IP_VHL_HL(ip->ip_vhl) << 2; + hlen = IP_VHL_HL(ip->ip_vhl) << 2; #else - hlen = ip->ip_hl << 2; + hlen = ip->ip_hl << 2; #endif - break; + break; #endif #if INET6 - case AF_INET6: - ip6 = mtod(m, struct ip6_hdr *); - hlen = sizeof(*ip6); - break; + case AF_INET6: + ip6 = mtod(m, struct ip6_hdr *); + hlen = sizeof(*ip6); + break; #endif - } - - compoff = m->m_pkthdr.len - plen; + } - /* - * grow the mbuf to accomodate ipcomp header. - * before: IP ... payload - * after: IP ... ipcomp payload - */ - if (M_LEADINGSPACE(md) < complen) { - MGET(n, M_DONTWAIT, MT_DATA); - if (!n) { - m_freem(m); - error = ENOBUFS; - goto fail; + compoff = m->m_pkthdr.len - plen; + + /* + * grow the mbuf to accomodate ipcomp header. + * before: IP ... payload + * after: IP ... ipcomp payload + */ + if (M_LEADINGSPACE(md) < complen) { + MGET(n, M_DONTWAIT, MT_DATA); + if (!n) { + m_freem(m); + error = ENOBUFS; + goto fail; + } + n->m_len = complen; + mprev->m_next = n; + n->m_next = md; + m->m_pkthdr.len += complen; + ipcomp = mtod(n, struct ipcomp *); + } else { + md->m_len += complen; + md->m_data -= complen; + m->m_pkthdr.len += complen; + ipcomp = mtod(md, struct ipcomp *); } - n->m_len = complen; - mprev->m_next = n; - n->m_next = md; - m->m_pkthdr.len += complen; - ipcomp = mtod(n, struct ipcomp *); - } else { - md->m_len += complen; - md->m_data -= complen; - m->m_pkthdr.len += complen; - ipcomp = mtod(md, struct ipcomp *); - } - - bzero(ipcomp, sizeof(*ipcomp)); - ipcomp->comp_nxt = *nexthdrp; - *nexthdrp = IPPROTO_IPCOMP; - ipcomp->comp_cpi = htons(cpi); - switch (af) { + + bzero(ipcomp, sizeof(*ipcomp)); + ipcomp->comp_nxt = *nexthdrp; + *nexthdrp = IPPROTO_IPCOMP; + ipcomp->comp_cpi = htons(cpi); + switch (af) { #if INET - case AF_INET: - if (compoff + complen + plen < IP_MAXPACKET) - ip->ip_len = htons(compoff + complen + plen); - else { - ipseclog((LOG_ERR, - "IPv4 ESP output: size exceeds limit\n")); - IPSEC_STAT_INCREMENT(ipsecstat.out_inval); - m_freem(m); - error = EMSGSIZE; - goto fail; - } - break; + case AF_INET: + if (compoff + complen + plen < IP_MAXPACKET) { + ip->ip_len = htons(compoff + complen + plen); + } else { + ipseclog((LOG_ERR, + "IPv4 ESP output: size exceeds limit\n")); + IPSEC_STAT_INCREMENT(ipsecstat.out_inval); + m_freem(m); + error = EMSGSIZE; + goto fail; + } + break; #endif #if INET6 - case AF_INET6: - /* total packet length will be computed in ip6_output() */ - break; + case AF_INET6: + /* total packet length will be computed in ip6_output() */ + break; #endif + } } - } if (!m) { ipseclog((LOG_DEBUG, @@ -350,7 +354,7 @@ ipcomp_output(struct mbuf *m, u_char *nexthdrp, struct mbuf *md, int af, struct afnumber)); IPSEC_STAT_INCREMENT(stat->out_inval); } - IPSEC_STAT_INCREMENT(stat->out_success); + IPSEC_STAT_INCREMENT(stat->out_success); /* compute byte lifetime against original packet */ key_sa_recordxfer(sav, mcopy); diff --git a/bsd/netinet6/ipsec.c b/bsd/netinet6/ipsec.c index 5442bf7e8..32683adc3 100644 --- a/bsd/netinet6/ipsec.c +++ b/bsd/netinet6/ipsec.c @@ -138,26 +138,26 @@ int ipsec_debug = 0; #endif #include -#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) -#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) -#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8)) -#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8)) -#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8)) +#define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1) +#define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3) +#define DBG_FNC_GETPOL_SOCK NETDBG_CODE(DBG_NETIPSEC, (1 << 8)) +#define DBG_FNC_GETPOL_ADDR NETDBG_CODE(DBG_NETIPSEC, (2 << 8)) +#define DBG_FNC_IPSEC_OUT NETDBG_CODE(DBG_NETIPSEC, (3 << 8)) extern lck_mtx_t *sadb_mutex; struct ipsecstat ipsecstat; int ip4_ah_cleartos = 1; -int ip4_ah_offsetmask = 0; /* maybe IP_DF? */ -int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */ +int ip4_ah_offsetmask = 0; /* maybe IP_DF? */ +int ip4_ipsec_dfbit = 0; /* DF bit on encap. 0: clear 1: set 2: copy */ int ip4_esp_trans_deflev = IPSEC_LEVEL_USE; int ip4_esp_net_deflev = IPSEC_LEVEL_USE; int ip4_ah_trans_deflev = IPSEC_LEVEL_USE; int ip4_ah_net_deflev = IPSEC_LEVEL_USE; struct secpolicy ip4_def_policy; -int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */ +int ip4_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */ int ip4_esp_randpad = -1; -int esp_udp_encap_port = 0; +int esp_udp_encap_port = 0; static int sysctl_def_policy SYSCTL_HANDLER_ARGS; extern int natt_keepalive_interval; extern u_int64_t natt_now; @@ -170,33 +170,33 @@ SYSCTL_DECL(_net_inet6_ipsec6); #endif /* net.inet.ipsec */ SYSCTL_STRUCT(_net_inet_ipsec, IPSECCTL_STATS, - stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, ""); -SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT|CTLFLAG_RW | CTLFLAG_LOCKED, - &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", ""); + stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsecstat, ipsecstat, ""); +SYSCTL_PROC(_net_inet_ipsec, IPSECCTL_DEF_POLICY, def_policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + &ip4_def_policy.policy, 0, &sysctl_def_policy, "I", ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_trans_deflev, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_net_deflev, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_trans_deflev, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_net_deflev, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_CLEARTOS, - ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, ""); + ah_cleartos, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_cleartos, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_AH_OFFSETMASK, - ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, ""); + ah_offsetmask, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ah_offsetmask, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DFBIT, - dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, ""); + dfbit, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_dfbit, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ECN, - ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, ""); + ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_ipsec_ecn, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_DEBUG, - debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, ""); + debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, ""); SYSCTL_INT(_net_inet_ipsec, IPSECCTL_ESP_RANDPAD, - esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, ""); + esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip4_esp_randpad, 0, ""); /* for performance, we bypass ipsec until a security policy is set */ int ipsec_bypass = 1; -SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass,0, ""); +SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec_bypass, 0, ""); /* * NAT Traversal requires a UDP port for encapsulation, @@ -205,7 +205,7 @@ SYSCTL_INT(_net_inet_ipsec, OID_AUTO, bypass, CTLFLAG_RD | CTLFLAG_LOCKED, &ipse * for nat traversal. */ SYSCTL_INT(_net_inet_ipsec, OID_AUTO, esp_port, - CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &esp_udp_encap_port, 0, ""); #if INET6 struct ipsecstat ipsec6stat; @@ -214,34 +214,34 @@ int ip6_esp_net_deflev = IPSEC_LEVEL_USE; int ip6_ah_trans_deflev = IPSEC_LEVEL_USE; int ip6_ah_net_deflev = IPSEC_LEVEL_USE; struct secpolicy ip6_def_policy; -int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */ +int ip6_ipsec_ecn = ECN_COMPATIBILITY; /* ECN ignore(-1)/compatibility(0)/normal(1) */ int ip6_esp_randpad = -1; /* net.inet6.ipsec6 */ SYSCTL_STRUCT(_net_inet6_ipsec6, IPSECCTL_STATS, - stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, ""); + stats, CTLFLAG_RD | CTLFLAG_LOCKED, &ipsec6stat, ipsecstat, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_POLICY, - def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, ""); + def_policy, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_def_policy.policy, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_TRANSLEV, esp_trans_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_trans_deflev, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_ESP_NETLEV, esp_net_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_net_deflev, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_TRANSLEV, ah_trans_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_trans_deflev, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEF_AH_NETLEV, ah_net_deflev, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ah_net_deflev, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ECN, - ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, ""); + ecn, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_ipsec_ecn, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_DEBUG, - debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, ""); + debug, CTLFLAG_RW | CTLFLAG_LOCKED, &ipsec_debug, 0, ""); SYSCTL_INT(_net_inet6_ipsec6, IPSECCTL_ESP_RANDPAD, - esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, ""); + esp_randpad, CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_esp_randpad, 0, ""); #endif /* INET6 */ static int ipsec_setspidx_interface(struct secpolicyindex *, u_int, struct mbuf *, - int, int, int); + int, int, int); static int ipsec_setspidx_mbuf(struct secpolicyindex *, u_int, u_int, - struct mbuf *, int); + struct mbuf *, int); static int ipsec4_setspidx_inpcb(struct mbuf *, struct inpcb *pcb); #if INET6 static int ipsec6_setspidx_in6pcb(struct mbuf *, struct in6pcb *pcb); @@ -257,7 +257,7 @@ static struct inpcbpolicy *ipsec_newpcbpolicy(void); static void ipsec_delpcbpolicy(struct inpcbpolicy *); static struct secpolicy *ipsec_deepcopy_policy(struct secpolicy *src); static int ipsec_set_policy(struct secpolicy **pcb_sp, - int optname, caddr_t request, size_t len, int priv); + int optname, caddr_t request, size_t len, int priv); static void vshiftl(unsigned char *, int, int); static int ipsec_in_reject(struct secpolicy *, struct mbuf *); #if INET6 @@ -280,14 +280,15 @@ sysctl_def_policy SYSCTL_HANDLER_ARGS #pragma unused(arg1, arg2) if (ip4_def_policy.policy != IPSEC_POLICY_NONE && - ip4_def_policy.policy != IPSEC_POLICY_DISCARD) { + ip4_def_policy.policy != IPSEC_POLICY_DISCARD) { ip4_def_policy.policy = old_policy; return EINVAL; } /* Turn off the bypass if the default security policy changes */ - if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) + if (ipsec_bypass != 0 && ip4_def_policy.policy != IPSEC_POLICY_NONE) { ipsec_bypass = 0; + } return error; } @@ -306,193 +307,196 @@ sysctl_def_policy SYSCTL_HANDLER_ARGS */ struct secpolicy * ipsec4_getpolicybysock(struct mbuf *m, - u_int dir, - struct socket *so, - int *error) + u_int dir, + struct socket *so, + int *error) { struct inpcbpolicy *pcbsp = NULL; - struct secpolicy *currsp = NULL; /* policy on socket */ - struct secpolicy *kernsp = NULL; /* policy on kernel */ - + struct secpolicy *currsp = NULL; /* policy on socket */ + struct secpolicy *kernsp = NULL; /* policy on kernel */ + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); /* sanity check */ - if (m == NULL || so == NULL || error == NULL) + if (m == NULL || so == NULL || error == NULL) { panic("ipsec4_getpolicybysock: NULL pointer was passed.\n"); - - if (so->so_pcb == NULL) { - printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n"); - return ipsec4_getpolicybyaddr(m, dir, 0, error); - } - + } + + if (so->so_pcb == NULL) { + printf("ipsec4_getpolicybysock: so->so_pcb == NULL\n"); + return ipsec4_getpolicybyaddr(m, dir, 0, error); + } + switch (SOCK_DOM(so)) { - case PF_INET: - pcbsp = sotoinpcb(so)->inp_sp; - break; + case PF_INET: + pcbsp = sotoinpcb(so)->inp_sp; + break; #if INET6 - case PF_INET6: - pcbsp = sotoin6pcb(so)->in6p_sp; - break; + case PF_INET6: + pcbsp = sotoin6pcb(so)->in6p_sp; + break; #endif } - - if (!pcbsp){ + + if (!pcbsp) { /* Socket has not specified an IPSEC policy */ return ipsec4_getpolicybyaddr(m, dir, 0, error); } - - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0,0,0,0,0); - + + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_START, 0, 0, 0, 0, 0); + switch (SOCK_DOM(so)) { - case PF_INET: - /* set spidx in pcb */ - *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so)); - break; + case PF_INET: + /* set spidx in pcb */ + *error = ipsec4_setspidx_inpcb(m, sotoinpcb(so)); + break; #if INET6 - case PF_INET6: - /* set spidx in pcb */ - *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so)); - break; + case PF_INET6: + /* set spidx in pcb */ + *error = ipsec6_setspidx_in6pcb(m, sotoin6pcb(so)); + break; #endif - default: - panic("ipsec4_getpolicybysock: unsupported address family\n"); + default: + panic("ipsec4_getpolicybysock: unsupported address family\n"); } if (*error) { - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 1, *error, 0, 0, 0); return NULL; } - + /* sanity check */ - if (pcbsp == NULL) + if (pcbsp == NULL) { panic("ipsec4_getpolicybysock: pcbsp is NULL.\n"); - - switch (dir) { - case IPSEC_DIR_INBOUND: - currsp = pcbsp->sp_in; - break; - case IPSEC_DIR_OUTBOUND: - currsp = pcbsp->sp_out; - break; - default: - panic("ipsec4_getpolicybysock: illegal direction.\n"); - } - + } + + switch (dir) { + case IPSEC_DIR_INBOUND: + currsp = pcbsp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + currsp = pcbsp->sp_out; + break; + default: + panic("ipsec4_getpolicybysock: illegal direction.\n"); + } + /* sanity check */ - if (currsp == NULL) + if (currsp == NULL) { panic("ipsec4_getpolicybysock: currsp is NULL.\n"); - - /* when privilieged socket */ - if (pcbsp->priv) { - switch (currsp->policy) { - case IPSEC_POLICY_BYPASS: - lck_mtx_lock(sadb_mutex); - currsp->refcnt++; - lck_mtx_unlock(sadb_mutex); - *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2,*error,0,0,0); - return currsp; - - case IPSEC_POLICY_ENTRUST: - /* look for a policy in SPD */ - kernsp = key_allocsp(&currsp->spidx, dir); - - /* SP found */ - if (kernsp != NULL) { - KEYDEBUG(KEYDEBUG_IPSEC_STAMP, - printf("DP ipsec4_getpolicybysock called " - "to allocate SP:0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); - *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3,*error,0,0,0); - return kernsp; - } - - /* no SP found */ - lck_mtx_lock(sadb_mutex); - if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD - && ip4_def_policy.policy != IPSEC_POLICY_NONE) { - ipseclog((LOG_INFO, - "fixed system default policy: %d->%d\n", - ip4_def_policy.policy, IPSEC_POLICY_NONE)); - ip4_def_policy.policy = IPSEC_POLICY_NONE; - } - ip4_def_policy.refcnt++; - lck_mtx_unlock(sadb_mutex); - *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4,*error,0,0,0); - return &ip4_def_policy; - - case IPSEC_POLICY_IPSEC: - lck_mtx_lock(sadb_mutex); - currsp->refcnt++; - lck_mtx_unlock(sadb_mutex); - *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5,*error,0,0,0); - return currsp; - - default: - ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " - "Invalid policy for PCB %d\n", currsp->policy)); - *error = EINVAL; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6,*error,0,0,0); - return NULL; - } - /* NOTREACHED */ - } - - /* when non-privilieged socket */ - /* look for a policy in SPD */ - kernsp = key_allocsp(&currsp->spidx, dir); - - /* SP found */ - if (kernsp != NULL) { - KEYDEBUG(KEYDEBUG_IPSEC_STAMP, - printf("DP ipsec4_getpolicybysock called " - "to allocate SP:0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); - *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7,*error,0,0,0); - return kernsp; } - - /* no SP found */ - switch (currsp->policy) { + + /* when privilieged socket */ + if (pcbsp->priv) { + switch (currsp->policy) { case IPSEC_POLICY_BYPASS: - ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " - "Illegal policy for non-priviliged defined %d\n", - currsp->policy)); - *error = EINVAL; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8,*error,0,0,0); - return NULL; - + lck_mtx_lock(sadb_mutex); + currsp->refcnt++; + lck_mtx_unlock(sadb_mutex); + *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 2, *error, 0, 0, 0); + return currsp; + case IPSEC_POLICY_ENTRUST: + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_getpolicybysock called " + "to allocate SP:0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); + *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 3, *error, 0, 0, 0); + return kernsp; + } + + /* no SP found */ lck_mtx_lock(sadb_mutex); if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD - && ip4_def_policy.policy != IPSEC_POLICY_NONE) { + && ip4_def_policy.policy != IPSEC_POLICY_NONE) { ipseclog((LOG_INFO, - "fixed system default policy: %d->%d\n", - ip4_def_policy.policy, IPSEC_POLICY_NONE)); + "fixed system default policy: %d->%d\n", + ip4_def_policy.policy, IPSEC_POLICY_NONE)); ip4_def_policy.policy = IPSEC_POLICY_NONE; } ip4_def_policy.refcnt++; lck_mtx_unlock(sadb_mutex); *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 4, *error, 0, 0, 0); return &ip4_def_policy; - + case IPSEC_POLICY_IPSEC: lck_mtx_lock(sadb_mutex); currsp->refcnt++; lck_mtx_unlock(sadb_mutex); *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 5, *error, 0, 0, 0); return currsp; - + default: ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " - "Invalid policy for PCB %d\n", currsp->policy)); + "Invalid policy for PCB %d\n", currsp->policy)); *error = EINVAL; - KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 6, *error, 0, 0, 0); return NULL; + } + /* NOTREACHED */ + } + + /* when non-privilieged socket */ + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec4_getpolicybysock called " + "to allocate SP:0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); + *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 7, *error, 0, 0, 0); + return kernsp; + } + + /* no SP found */ + switch (currsp->policy) { + case IPSEC_POLICY_BYPASS: + ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " + "Illegal policy for non-priviliged defined %d\n", + currsp->policy)); + *error = EINVAL; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 8, *error, 0, 0, 0); + return NULL; + + case IPSEC_POLICY_ENTRUST: + lck_mtx_lock(sadb_mutex); + if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD + && ip4_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, + "fixed system default policy: %d->%d\n", + ip4_def_policy.policy, IPSEC_POLICY_NONE)); + ip4_def_policy.policy = IPSEC_POLICY_NONE; + } + ip4_def_policy.refcnt++; + lck_mtx_unlock(sadb_mutex); + *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 9, *error, 0, 0, 0); + return &ip4_def_policy; + + case IPSEC_POLICY_IPSEC: + lck_mtx_lock(sadb_mutex); + currsp->refcnt++; + lck_mtx_unlock(sadb_mutex); + *error = 0; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 10, *error, 0, 0, 0); + return currsp; + + default: + ipseclog((LOG_ERR, "ipsec4_getpolicybysock: " + "Invalid policy for PCB %d\n", currsp->policy)); + *error = EINVAL; + KERNEL_DEBUG(DBG_FNC_GETPOL_SOCK | DBG_FUNC_END, 11, *error, 0, 0, 0); + return NULL; } /* NOTREACHED */ } @@ -509,32 +513,34 @@ ipsec4_getpolicybysock(struct mbuf *m, */ struct secpolicy * ipsec4_getpolicybyaddr(struct mbuf *m, - u_int dir, - int flag, - int *error) + u_int dir, + int flag, + int *error) { struct secpolicy *sp = NULL; - - if (ipsec_bypass != 0) + + if (ipsec_bypass != 0) { return 0; + } LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (m == NULL || error == NULL) + if (m == NULL || error == NULL) { panic("ipsec4_getpolicybyaddr: NULL pointer was passed.\n"); + } { struct secpolicyindex spidx; - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0); bzero(&spidx, sizeof(spidx)); /* make a index to look for a policy */ *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET, m, - (flag & IP_FORWARDING) ? 0 : 1); + (flag & IP_FORWARDING) ? 0 : 1); if (*error != 0) { - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, *error, 0, 0, 0); return NULL; } @@ -548,23 +554,23 @@ ipsec4_getpolicybyaddr(struct mbuf *m, "to allocate SP:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sp))); *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0); return sp; } /* no SP found */ lck_mtx_lock(sadb_mutex); if (ip4_def_policy.policy != IPSEC_POLICY_DISCARD - && ip4_def_policy.policy != IPSEC_POLICY_NONE) { + && ip4_def_policy.policy != IPSEC_POLICY_NONE) { ipseclog((LOG_INFO, "fixed system default policy:%d->%d\n", - ip4_def_policy.policy, - IPSEC_POLICY_NONE)); + ip4_def_policy.policy, + IPSEC_POLICY_NONE)); ip4_def_policy.policy = IPSEC_POLICY_NONE; } ip4_def_policy.refcnt++; lck_mtx_unlock(sadb_mutex); *error = 0; - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3,*error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 3, *error, 0, 0, 0); return &ip4_def_policy; } @@ -575,36 +581,39 @@ ipsec4_getpolicybyaddr(struct mbuf *m, */ int ipsec4_getpolicybyinterface(struct mbuf *m, - u_int dir, - int *flags, - struct ip_out_args *ipoa, - struct secpolicy **sp) + u_int dir, + int *flags, + struct ip_out_args *ipoa, + struct secpolicy **sp) { struct secpolicyindex spidx; int error = 0; - if (ipsec_bypass != 0) + if (ipsec_bypass != 0) { return 0; - + } + /* Sanity check */ - if (m == NULL || ipoa == NULL || sp == NULL) + if (m == NULL || ipoa == NULL || sp == NULL) { panic("ipsec4_getpolicybyinterface: NULL pointer was passed.\n"); - - if (ipoa->ipoa_boundif == IFSCOPE_NONE) + } + + if (ipoa->ipoa_boundif == IFSCOPE_NONE) { return 0; - - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0); + } + + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0); bzero(&spidx, sizeof(spidx)); - + /* make a index to look for a policy */ error = ipsec_setspidx_interface(&spidx, dir, m, (*flags & IP_FORWARDING) ? 0 : 1, - ipoa->ipoa_boundif, 4); - + ipoa->ipoa_boundif, 4); + if (error != 0) { - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0); return 0; } - + *sp = key_allocsp(&spidx, dir); /* Return SP, whether NULL or not */ @@ -624,9 +633,9 @@ ipsec4_getpolicybyinterface(struct mbuf *m, ipoa->ipoa_boundif = (*sp)->ipsec_if->if_index; } } - - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,error,0,0,0); - + + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, error, 0, 0, 0); + return 0; } @@ -644,157 +653,161 @@ ipsec4_getpolicybyinterface(struct mbuf *m, */ struct secpolicy * ipsec6_getpolicybysock(struct mbuf *m, - u_int dir, - struct socket *so, - int *error) + u_int dir, + struct socket *so, + int *error) { struct inpcbpolicy *pcbsp = NULL; - struct secpolicy *currsp = NULL; /* policy on socket */ - struct secpolicy *kernsp = NULL; /* policy on kernel */ - + struct secpolicy *currsp = NULL; /* policy on socket */ + struct secpolicy *kernsp = NULL; /* policy on kernel */ + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (m == NULL || so == NULL || error == NULL) + if (m == NULL || so == NULL || error == NULL) { panic("ipsec6_getpolicybysock: NULL pointer was passed.\n"); - + } + #if DIAGNOSTIC - if (SOCK_DOM(so) != PF_INET6) - panic("ipsec6_getpolicybysock: socket domain != inet6\n"); + if (SOCK_DOM(so) != PF_INET6) { + panic("ipsec6_getpolicybysock: socket domain != inet6\n"); + } #endif - - pcbsp = sotoin6pcb(so)->in6p_sp; - - if (!pcbsp){ - return ipsec6_getpolicybyaddr(m, dir, 0, error); - } - + + pcbsp = sotoin6pcb(so)->in6p_sp; + + if (!pcbsp) { + return ipsec6_getpolicybyaddr(m, dir, 0, error); + } + /* set spidx in pcb */ ipsec6_setspidx_in6pcb(m, sotoin6pcb(so)); - + /* sanity check */ - if (pcbsp == NULL) + if (pcbsp == NULL) { panic("ipsec6_getpolicybysock: pcbsp is NULL.\n"); - - switch (dir) { - case IPSEC_DIR_INBOUND: - currsp = pcbsp->sp_in; - break; - case IPSEC_DIR_OUTBOUND: - currsp = pcbsp->sp_out; - break; - default: - panic("ipsec6_getpolicybysock: illegal direction.\n"); - } - + } + + switch (dir) { + case IPSEC_DIR_INBOUND: + currsp = pcbsp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + currsp = pcbsp->sp_out; + break; + default: + panic("ipsec6_getpolicybysock: illegal direction.\n"); + } + /* sanity check */ - if (currsp == NULL) + if (currsp == NULL) { panic("ipsec6_getpolicybysock: currsp is NULL.\n"); - - /* when privilieged socket */ - if (pcbsp->priv) { - switch (currsp->policy) { - case IPSEC_POLICY_BYPASS: - lck_mtx_lock(sadb_mutex); - currsp->refcnt++; - lck_mtx_unlock(sadb_mutex); - *error = 0; - return currsp; - - case IPSEC_POLICY_ENTRUST: - /* look for a policy in SPD */ - kernsp = key_allocsp(&currsp->spidx, dir); - - /* SP found */ - if (kernsp != NULL) { - KEYDEBUG(KEYDEBUG_IPSEC_STAMP, - printf("DP ipsec6_getpolicybysock called " - "to allocate SP:0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); - *error = 0; - return kernsp; - } - - /* no SP found */ - lck_mtx_lock(sadb_mutex); - if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD - && ip6_def_policy.policy != IPSEC_POLICY_NONE) { - ipseclog((LOG_INFO, - "fixed system default policy: %d->%d\n", - ip6_def_policy.policy, IPSEC_POLICY_NONE)); - ip6_def_policy.policy = IPSEC_POLICY_NONE; - } - ip6_def_policy.refcnt++; - lck_mtx_unlock(sadb_mutex); - *error = 0; - return &ip6_def_policy; - - case IPSEC_POLICY_IPSEC: - lck_mtx_lock(sadb_mutex); - currsp->refcnt++; - lck_mtx_unlock(sadb_mutex); - *error = 0; - return currsp; - - default: - ipseclog((LOG_ERR, "ipsec6_getpolicybysock: " - "Invalid policy for PCB %d\n", currsp->policy)); - *error = EINVAL; - return NULL; - } - /* NOTREACHED */ - } - - /* when non-privilieged socket */ - /* look for a policy in SPD */ - kernsp = key_allocsp(&currsp->spidx, dir); - - /* SP found */ - if (kernsp != NULL) { - KEYDEBUG(KEYDEBUG_IPSEC_STAMP, - printf("DP ipsec6_getpolicybysock called " - "to allocate SP:0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); - *error = 0; - return kernsp; } - - /* no SP found */ - switch (currsp->policy) { + + /* when privilieged socket */ + if (pcbsp->priv) { + switch (currsp->policy) { case IPSEC_POLICY_BYPASS: - ipseclog((LOG_ERR, "ipsec6_getpolicybysock: " - "Illegal policy for non-priviliged defined %d\n", - currsp->policy)); - *error = EINVAL; - return NULL; - + lck_mtx_lock(sadb_mutex); + currsp->refcnt++; + lck_mtx_unlock(sadb_mutex); + *error = 0; + return currsp; + case IPSEC_POLICY_ENTRUST: + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_getpolicybysock called " + "to allocate SP:0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); + *error = 0; + return kernsp; + } + + /* no SP found */ lck_mtx_lock(sadb_mutex); if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD - && ip6_def_policy.policy != IPSEC_POLICY_NONE) { + && ip6_def_policy.policy != IPSEC_POLICY_NONE) { ipseclog((LOG_INFO, - "fixed system default policy: %d->%d\n", - ip6_def_policy.policy, IPSEC_POLICY_NONE)); + "fixed system default policy: %d->%d\n", + ip6_def_policy.policy, IPSEC_POLICY_NONE)); ip6_def_policy.policy = IPSEC_POLICY_NONE; } ip6_def_policy.refcnt++; lck_mtx_unlock(sadb_mutex); *error = 0; return &ip6_def_policy; - + case IPSEC_POLICY_IPSEC: lck_mtx_lock(sadb_mutex); currsp->refcnt++; lck_mtx_unlock(sadb_mutex); *error = 0; return currsp; - + default: - ipseclog((LOG_ERR, - "ipsec6_policybysock: Invalid policy for PCB %d\n", - currsp->policy)); + ipseclog((LOG_ERR, "ipsec6_getpolicybysock: " + "Invalid policy for PCB %d\n", currsp->policy)); *error = EINVAL; return NULL; + } + /* NOTREACHED */ + } + + /* when non-privilieged socket */ + /* look for a policy in SPD */ + kernsp = key_allocsp(&currsp->spidx, dir); + + /* SP found */ + if (kernsp != NULL) { + KEYDEBUG(KEYDEBUG_IPSEC_STAMP, + printf("DP ipsec6_getpolicybysock called " + "to allocate SP:0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(kernsp))); + *error = 0; + return kernsp; + } + + /* no SP found */ + switch (currsp->policy) { + case IPSEC_POLICY_BYPASS: + ipseclog((LOG_ERR, "ipsec6_getpolicybysock: " + "Illegal policy for non-priviliged defined %d\n", + currsp->policy)); + *error = EINVAL; + return NULL; + + case IPSEC_POLICY_ENTRUST: + lck_mtx_lock(sadb_mutex); + if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD + && ip6_def_policy.policy != IPSEC_POLICY_NONE) { + ipseclog((LOG_INFO, + "fixed system default policy: %d->%d\n", + ip6_def_policy.policy, IPSEC_POLICY_NONE)); + ip6_def_policy.policy = IPSEC_POLICY_NONE; + } + ip6_def_policy.refcnt++; + lck_mtx_unlock(sadb_mutex); + *error = 0; + return &ip6_def_policy; + + case IPSEC_POLICY_IPSEC: + lck_mtx_lock(sadb_mutex); + currsp->refcnt++; + lck_mtx_unlock(sadb_mutex); + *error = 0; + return currsp; + + default: + ipseclog((LOG_ERR, + "ipsec6_policybysock: Invalid policy for PCB %d\n", + currsp->policy)); + *error = EINVAL; + return NULL; } /* NOTREACHED */ } @@ -817,32 +830,34 @@ ipsec6_getpolicybysock(struct mbuf *m, struct secpolicy * ipsec6_getpolicybyaddr(struct mbuf *m, - u_int dir, - int flag, - int *error) + u_int dir, + int flag, + int *error) { struct secpolicy *sp = NULL; LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (m == NULL || error == NULL) + if (m == NULL || error == NULL) { panic("ipsec6_getpolicybyaddr: NULL pointer was passed.\n"); + } - { - struct secpolicyindex spidx; + { + struct secpolicyindex spidx; - bzero(&spidx, sizeof(spidx)); + bzero(&spidx, sizeof(spidx)); - /* make a index to look for a policy */ - *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m, - (flag & IP_FORWARDING) ? 0 : 1); + /* make a index to look for a policy */ + *error = ipsec_setspidx_mbuf(&spidx, dir, AF_INET6, m, + (flag & IP_FORWARDING) ? 0 : 1); - if (*error != 0) - return NULL; + if (*error != 0) { + return NULL; + } - sp = key_allocsp(&spidx, dir); - } + sp = key_allocsp(&spidx, dir); + } /* SP found */ if (sp != NULL) { @@ -857,7 +872,7 @@ ipsec6_getpolicybyaddr(struct mbuf *m, /* no SP found */ lck_mtx_lock(sadb_mutex); if (ip6_def_policy.policy != IPSEC_POLICY_DISCARD - && ip6_def_policy.policy != IPSEC_POLICY_NONE) { + && ip6_def_policy.policy != IPSEC_POLICY_NONE) { ipseclog((LOG_INFO, "fixed system default policy: %d->%d\n", ip6_def_policy.policy, IPSEC_POLICY_NONE)); ip6_def_policy.policy = IPSEC_POLICY_NONE; @@ -875,41 +890,44 @@ ipsec6_getpolicybyaddr(struct mbuf *m, */ int ipsec6_getpolicybyinterface(struct mbuf *m, - u_int dir, - int flag, - struct ip6_out_args *ip6oap, - int *noipsec, - struct secpolicy **sp) + u_int dir, + int flag, + struct ip6_out_args *ip6oap, + int *noipsec, + struct secpolicy **sp) { struct secpolicyindex spidx; int error = 0; - - if (ipsec_bypass != 0) + + if (ipsec_bypass != 0) { return 0; - + } + /* Sanity check */ - if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) + if (m == NULL || sp == NULL || noipsec == NULL || ip6oap == NULL) { panic("ipsec6_getpolicybyinterface: NULL pointer was passed.\n"); - + } + *noipsec = 0; - - if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) + + if (ip6oap->ip6oa_boundif == IFSCOPE_NONE) { return 0; - - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0,0,0,0,0); + } + + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_START, 0, 0, 0, 0, 0); bzero(&spidx, sizeof(spidx)); - + /* make a index to look for a policy */ error = ipsec_setspidx_interface(&spidx, dir, m, (flag & IP_FORWARDING) ? 0 : 1, - ip6oap->ip6oa_boundif, 6); - + ip6oap->ip6oa_boundif, 6); + if (error != 0) { - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1,error,0,0,0); + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 1, error, 0, 0, 0); return 0; } - + *sp = key_allocsp(&spidx, dir); - + /* Return SP, whether NULL or not */ if (*sp != NULL && (*sp)->policy == IPSEC_POLICY_IPSEC) { if ((*sp)->ipsec_if == NULL) { @@ -927,9 +945,9 @@ ipsec6_getpolicybyinterface(struct mbuf *m, ip6oap->ip6oa_boundif = (*sp)->ipsec_if->if_index; } } - - KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2,*error,0,0,0); - + + KERNEL_DEBUG(DBG_FNC_GETPOL_ADDR | DBG_FUNC_END, 2, *error, 0, 0, 0); + return 0; } #endif /* INET6 */ @@ -955,19 +973,21 @@ ipsec_setspidx_mbuf( int error; /* sanity check */ - if (spidx == NULL || m == NULL) + if (spidx == NULL || m == NULL) { panic("ipsec_setspidx_mbuf: NULL pointer was passed.\n"); + } bzero(spidx, sizeof(*spidx)); error = ipsec_setspidx(m, spidx, needport, 0); - if (error) + if (error) { goto bad; + } spidx->dir = dir; return 0; - bad: +bad: /* XXX initialize */ bzero(spidx, sizeof(*spidx)); return EINVAL; @@ -975,26 +995,28 @@ ipsec_setspidx_mbuf( static int ipsec_setspidx_interface( - struct secpolicyindex *spidx, - u_int dir, - struct mbuf *m, - int needport, - int ifindex, - int ip_version) + struct secpolicyindex *spidx, + u_int dir, + struct mbuf *m, + int needport, + int ifindex, + int ip_version) { int error; - + /* sanity check */ - if (spidx == NULL || m == NULL) + if (spidx == NULL || m == NULL) { panic("ipsec_setspidx_interface: NULL pointer was passed.\n"); - + } + bzero(spidx, sizeof(*spidx)); - + error = ipsec_setspidx(m, spidx, needport, ip_version); - if (error) + if (error) { goto bad; + } spidx->dir = dir; - + if (ifindex != 0) { ifnet_head_lock_shared(); spidx->internal_if = ifindex2ifnet[ifindex]; @@ -1002,9 +1024,9 @@ ipsec_setspidx_interface( } else { spidx->internal_if = NULL; } - + return 0; - + bad: return EINVAL; } @@ -1014,35 +1036,41 @@ ipsec4_setspidx_inpcb(struct mbuf *m, struct inpcb *pcb) { struct secpolicyindex *spidx; int error; - - if (ipsec_bypass != 0) + + if (ipsec_bypass != 0) { return 0; - + } + /* sanity check */ - if (pcb == NULL) + if (pcb == NULL) { panic("ipsec4_setspidx_inpcb: no PCB found.\n"); - if (pcb->inp_sp == NULL) - panic("ipsec4_setspidx_inpcb: no inp_sp found.\n"); - if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) - panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n"); - - bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx)); - bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx)); - - spidx = &pcb->inp_sp->sp_in->spidx; - error = ipsec_setspidx(m, spidx, 1, 0); - if (error) - goto bad; + } + if (pcb->inp_sp == NULL) { + panic("ipsec4_setspidx_inpcb: no inp_sp found.\n"); + } + if (pcb->inp_sp->sp_out == NULL || pcb->inp_sp->sp_in == NULL) { + panic("ipsec4_setspidx_inpcb: no sp_in/out found.\n"); + } + + bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx)); + bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx)); + + spidx = &pcb->inp_sp->sp_in->spidx; + error = ipsec_setspidx(m, spidx, 1, 0); + if (error) { + goto bad; + } spidx->dir = IPSEC_DIR_INBOUND; - + spidx = &pcb->inp_sp->sp_out->spidx; error = ipsec_setspidx(m, spidx, 1, 0); - if (error) + if (error) { goto bad; + } spidx->dir = IPSEC_DIR_OUTBOUND; - + return 0; - + bad: bzero(&pcb->inp_sp->sp_in->spidx, sizeof(*spidx)); bzero(&pcb->inp_sp->sp_out->spidx, sizeof(*spidx)); @@ -1055,32 +1083,37 @@ ipsec6_setspidx_in6pcb(struct mbuf *m, struct in6pcb *pcb) { struct secpolicyindex *spidx; int error; - + /* sanity check */ - if (pcb == NULL) + if (pcb == NULL) { panic("ipsec6_setspidx_in6pcb: no PCB found.\n"); - if (pcb->in6p_sp == NULL) - panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n"); - if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) - panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n"); - - bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx)); - bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx)); - - spidx = &pcb->in6p_sp->sp_in->spidx; - error = ipsec_setspidx(m, spidx, 1, 0); - if (error) - goto bad; + } + if (pcb->in6p_sp == NULL) { + panic("ipsec6_setspidx_in6pcb: no in6p_sp found.\n"); + } + if (pcb->in6p_sp->sp_out == NULL || pcb->in6p_sp->sp_in == NULL) { + panic("ipsec6_setspidx_in6pcb: no sp_in/out found.\n"); + } + + bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx)); + bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx)); + + spidx = &pcb->in6p_sp->sp_in->spidx; + error = ipsec_setspidx(m, spidx, 1, 0); + if (error) { + goto bad; + } spidx->dir = IPSEC_DIR_INBOUND; - + spidx = &pcb->in6p_sp->sp_out->spidx; error = ipsec_setspidx(m, spidx, 1, 0); - if (error) + if (error) { goto bad; + } spidx->dir = IPSEC_DIR_OUTBOUND; - + return 0; - + bad: bzero(&pcb->in6p_sp->sp_in->spidx, sizeof(*spidx)); bzero(&pcb->in6p_sp->sp_out->spidx, sizeof(*spidx)); @@ -1095,9 +1128,9 @@ bad: */ static int ipsec_setspidx(struct mbuf *m, - struct secpolicyindex *spidx, - int needport, - int force_ip_version) + struct secpolicyindex *spidx, + int needport, + int force_ip_version) { struct ip *ip = NULL; struct ip ipbuf; @@ -1105,42 +1138,44 @@ ipsec_setspidx(struct mbuf *m, struct mbuf *n; int len; int error; - - if (m == NULL) + + if (m == NULL) { panic("ipsec_setspidx: m == 0 passed.\n"); - + } + /* * validate m->m_pkthdr.len. we see incorrect length if we * mistakenly call this function with inconsistent mbuf chain * (like 4.4BSD tcp/udp processing). XXX should we panic here? */ len = 0; - for (n = m; n; n = n->m_next) + for (n = m; n; n = n->m_next) { len += n->m_len; + } if (m->m_pkthdr.len != len) { KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_setspidx: " - "total of m_len(%d) != pkthdr.len(%d), " - "ignored.\n", - len, m->m_pkthdr.len)); + printf("ipsec_setspidx: " + "total of m_len(%d) != pkthdr.len(%d), " + "ignored.\n", + len, m->m_pkthdr.len)); return EINVAL; } if (m->m_pkthdr.len < sizeof(struct ip)) { KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_setspidx: " - "pkthdr.len(%d) < sizeof(struct ip), ignored.\n", - m->m_pkthdr.len)); + printf("ipsec_setspidx: " + "pkthdr.len(%d) < sizeof(struct ip), ignored.\n", + m->m_pkthdr.len)); return EINVAL; } - if (m->m_len >= sizeof(*ip)) + if (m->m_len >= sizeof(*ip)) { ip = mtod(m, struct ip *); - else { + } else { m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf); ip = &ipbuf; } - + if (force_ip_version) { v = force_ip_version; } else { @@ -1153,29 +1188,31 @@ ipsec_setspidx(struct mbuf *m, switch (v) { case 4: error = ipsec4_setspidx_ipaddr(m, spidx); - if (error) + if (error) { return error; + } ipsec4_get_ulp(m, spidx, needport); return 0; #if INET6 case 6: if (m->m_pkthdr.len < sizeof(struct ip6_hdr)) { KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_setspidx: " - "pkthdr.len(%d) < sizeof(struct ip6_hdr), " - "ignored.\n", m->m_pkthdr.len)); + printf("ipsec_setspidx: " + "pkthdr.len(%d) < sizeof(struct ip6_hdr), " + "ignored.\n", m->m_pkthdr.len)); return EINVAL; } error = ipsec6_setspidx_ipaddr(m, spidx); - if (error) + if (error) { return error; + } ipsec6_get_ulp(m, spidx, needport); return 0; #endif default: KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_setspidx: " - "unknown IP version %u, ignored.\n", v)); + printf("ipsec_setspidx: " + "unknown IP version %u, ignored.\n", v)); return EINVAL; } } @@ -1191,10 +1228,12 @@ ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport) struct udphdr uh; /* sanity check */ - if (m == NULL) + if (m == NULL) { panic("ipsec4_get_ulp: NULL pointer was passed.\n"); - if (m->m_pkthdr.len < sizeof(ip)) + } + if (m->m_pkthdr.len < sizeof(ip)) { panic("ipsec4_get_ulp: too short\n"); + } /* set default */ spidx->ul_proto = IPSEC_ULPROTO_ANY; @@ -1203,8 +1242,9 @@ ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport) m_copydata(m, 0, sizeof(ip), (caddr_t)&ip); /* ip_input() flips it into host endian XXX need more checking */ - if (ip.ip_off & (IP_MF | IP_OFFMASK)) + if (ip.ip_off & (IP_MF | IP_OFFMASK)) { return; + } nxt = ip.ip_p; #ifdef _IP_VHL @@ -1216,10 +1256,12 @@ ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport) switch (nxt) { case IPPROTO_TCP: spidx->ul_proto = nxt; - if (!needport) + if (!needport) { return; - if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) + } + if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) { return; + } m_copydata(m, off, sizeof(th), (caddr_t)&th); ((struct sockaddr_in *)&spidx->src)->sin_port = th.th_sport; @@ -1228,10 +1270,12 @@ ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport) return; case IPPROTO_UDP: spidx->ul_proto = nxt; - if (!needport) + if (!needport) { return; - if (off + sizeof(struct udphdr) > m->m_pkthdr.len) + } + if (off + sizeof(struct udphdr) > m->m_pkthdr.len) { return; + } m_copydata(m, off, sizeof(uh), (caddr_t)&uh); ((struct sockaddr_in *)&spidx->src)->sin_port = uh.uh_sport; @@ -1239,8 +1283,9 @@ ipsec4_get_ulp(struct mbuf *m, struct secpolicyindex *spidx, int needport) uh.uh_dport; return; case IPPROTO_AH: - if (off + sizeof(ip6e) > m->m_pkthdr.len) + if (off + sizeof(ip6e) > m->m_pkthdr.len) { return; + } m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e); off += (ip6e.ip6e_len + 2) << 2; nxt = ip6e.ip6e_nxt; @@ -1262,9 +1307,9 @@ ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx) struct ip ipbuf; struct sockaddr_in *sin; - if (m->m_len >= sizeof(*ip)) + if (m->m_len >= sizeof(*ip)) { ip = mtod(m, struct ip *); - else { + } else { m_copydata(m, 0, sizeof(ipbuf), (caddr_t)&ipbuf); ip = &ipbuf; } @@ -1282,26 +1327,27 @@ ipsec4_setspidx_ipaddr(struct mbuf *m, struct secpolicyindex *spidx) sin->sin_len = sizeof(struct sockaddr_in); bcopy(&ip->ip_dst, &sin->sin_addr, sizeof(ip->ip_dst)); spidx->prefd = sizeof(struct in_addr) << 3; - + return 0; } #if INET6 static void ipsec6_get_ulp(struct mbuf *m, - struct secpolicyindex *spidx, - int needport) + struct secpolicyindex *spidx, + int needport) { int off, nxt; struct tcphdr th; struct udphdr uh; /* sanity check */ - if (m == NULL) + if (m == NULL) { panic("ipsec6_get_ulp: NULL pointer was passed.\n"); + } KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m)); + printf("ipsec6_get_ulp:\n"); kdebug_mbuf(m)); /* set default */ spidx->ul_proto = IPSEC_ULPROTO_ANY; @@ -1310,26 +1356,31 @@ ipsec6_get_ulp(struct mbuf *m, nxt = -1; off = ip6_lasthdr(m, 0, IPPROTO_IPV6, &nxt); - if (off < 0 || m->m_pkthdr.len < off) + if (off < 0 || m->m_pkthdr.len < off) { return; + } switch (nxt) { case IPPROTO_TCP: spidx->ul_proto = nxt; - if (!needport) + if (!needport) { break; - if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) + } + if (off + sizeof(struct tcphdr) > m->m_pkthdr.len) { break; + } m_copydata(m, off, sizeof(th), (caddr_t)&th); ((struct sockaddr_in6 *)&spidx->src)->sin6_port = th.th_sport; ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = th.th_dport; break; case IPPROTO_UDP: spidx->ul_proto = nxt; - if (!needport) + if (!needport) { break; - if (off + sizeof(struct udphdr) > m->m_pkthdr.len) + } + if (off + sizeof(struct udphdr) > m->m_pkthdr.len) { break; + } m_copydata(m, off, sizeof(uh), (caddr_t)&uh); ((struct sockaddr_in6 *)&spidx->src)->sin6_port = uh.uh_sport; ((struct sockaddr_in6 *)&spidx->dst)->sin6_port = uh.uh_dport; @@ -1345,15 +1396,15 @@ ipsec6_get_ulp(struct mbuf *m, /* assumes that m is sane */ static int ipsec6_setspidx_ipaddr(struct mbuf *m, - struct secpolicyindex *spidx) + struct secpolicyindex *spidx) { struct ip6_hdr *ip6 = NULL; struct ip6_hdr ip6buf; struct sockaddr_in6 *sin6; - if (m->m_len >= sizeof(*ip6)) + if (m->m_len >= sizeof(*ip6)) { ip6 = mtod(m, struct ip6_hdr *); - else { + } else { m_copydata(m, 0, sizeof(ip6buf), (caddr_t)&ip6buf); ip6 = &ip6buf; } @@ -1388,7 +1439,7 @@ static struct inpcbpolicy * ipsec_newpcbpolicy(void) { struct inpcbpolicy *p; - + p = (struct inpcbpolicy *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK); return p; } @@ -1402,37 +1453,38 @@ ipsec_delpcbpolicy(struct inpcbpolicy *p) /* initialize policy in PCB */ int ipsec_init_policy(struct socket *so, - struct inpcbpolicy **pcb_sp) + struct inpcbpolicy **pcb_sp) { struct inpcbpolicy *new; - + /* sanity check. */ - if (so == NULL || pcb_sp == NULL) + if (so == NULL || pcb_sp == NULL) { panic("ipsec_init_policy: NULL pointer was passed.\n"); - - new = ipsec_newpcbpolicy(); - if (new == NULL) { - ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n")); - return ENOBUFS; - } + } + + new = ipsec_newpcbpolicy(); + if (new == NULL) { + ipseclog((LOG_DEBUG, "ipsec_init_policy: No more memory.\n")); + return ENOBUFS; + } bzero(new, sizeof(*new)); - + #ifdef __APPLE__ if (kauth_cred_issuser(so->so_cred)) #else - if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL)) + if (so->so_cred != 0 && !suser(so->so_cred->pc_ucred, NULL)) #endif - new->priv = 1; - else - new->priv = 0; - - if ((new->sp_in = key_newsp()) == NULL) { - ipsec_delpcbpolicy(new); - return ENOBUFS; - } + { new->priv = 1;} else { + new->priv = 0; + } + + if ((new->sp_in = key_newsp()) == NULL) { + ipsec_delpcbpolicy(new); + return ENOBUFS; + } new->sp_in->state = IPSEC_SPSTATE_ALIVE; new->sp_in->policy = IPSEC_POLICY_ENTRUST; - + if ((new->sp_out = key_newsp()) == NULL) { key_freesp(new->sp_in, KEY_SADB_UNLOCKED); ipsec_delpcbpolicy(new); @@ -1440,38 +1492,41 @@ ipsec_init_policy(struct socket *so, } new->sp_out->state = IPSEC_SPSTATE_ALIVE; new->sp_out->policy = IPSEC_POLICY_ENTRUST; - + *pcb_sp = new; - + return 0; } /* copy old ipsec policy into new */ int ipsec_copy_policy(struct inpcbpolicy *old, - struct inpcbpolicy *new) + struct inpcbpolicy *new) { struct secpolicy *sp; - - if (ipsec_bypass != 0) + + if (ipsec_bypass != 0) { return 0; - + } + sp = ipsec_deepcopy_policy(old->sp_in); if (sp) { key_freesp(new->sp_in, KEY_SADB_UNLOCKED); new->sp_in = sp; - } else + } else { return ENOBUFS; - + } + sp = ipsec_deepcopy_policy(old->sp_out); if (sp) { key_freesp(new->sp_out, KEY_SADB_UNLOCKED); new->sp_out = sp; - } else + } else { return ENOBUFS; - + } + new->priv = old->priv; - + return 0; } @@ -1484,13 +1539,15 @@ ipsec_deepcopy_policy(struct secpolicy *src) struct ipsecrequest **q; struct ipsecrequest *r; struct secpolicy *dst; - - if (src == NULL) + + if (src == NULL) { return NULL; + } dst = key_newsp(); - if (dst == NULL) + if (dst == NULL) { return NULL; - + } + /* * deep-copy IPsec request chain. This is required since struct * ipsecrequest is not reference counted. @@ -1499,30 +1556,31 @@ ipsec_deepcopy_policy(struct secpolicy *src) for (p = src->req; p; p = p->next) { *q = (struct ipsecrequest *)_MALLOC(sizeof(struct ipsecrequest), M_SECA, M_WAITOK | M_ZERO); - if (*q == NULL) + if (*q == NULL) { goto fail; + } (*q)->next = NULL; - + (*q)->saidx.proto = p->saidx.proto; (*q)->saidx.mode = p->saidx.mode; (*q)->level = p->level; (*q)->saidx.reqid = p->saidx.reqid; - + bcopy(&p->saidx.src, &(*q)->saidx.src, sizeof((*q)->saidx.src)); bcopy(&p->saidx.dst, &(*q)->saidx.dst, sizeof((*q)->saidx.dst)); - + (*q)->sp = dst; - + q = &((*q)->next); } - + dst->req = newchain; dst->state = src->state; dst->policy = src->policy; /* do not touch the refcnt fields */ - + return dst; - + fail: for (p = newchain; p; p = r) { r = p->next; @@ -1536,105 +1594,114 @@ fail: /* set policy and ipsec request if present. */ static int ipsec_set_policy(struct secpolicy **pcb_sp, - __unused int optname, - caddr_t request, - size_t len, - int priv) + __unused int optname, + caddr_t request, + size_t len, + int priv) { struct sadb_x_policy *xpl; struct secpolicy *newsp = NULL; int error; - + /* sanity check. */ - if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) + if (pcb_sp == NULL || *pcb_sp == NULL || request == NULL) { return EINVAL; - if (len < sizeof(*xpl)) + } + if (len < sizeof(*xpl)) { return EINVAL; + } xpl = (struct sadb_x_policy *)(void *)request; - + KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_set_policy: passed policy\n"); - kdebug_sadb_x_policy((struct sadb_ext *)xpl)); - + printf("ipsec_set_policy: passed policy\n"); + kdebug_sadb_x_policy((struct sadb_ext *)xpl)); + /* check policy type */ /* ipsec_set_policy() accepts IPSEC, ENTRUST and BYPASS. */ if (xpl->sadb_x_policy_type == IPSEC_POLICY_DISCARD - || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) + || xpl->sadb_x_policy_type == IPSEC_POLICY_NONE) { return EINVAL; - + } + /* check privileged socket */ - if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) + if (priv == 0 && xpl->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { return EACCES; - + } + /* allocation new SP entry */ - if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) + if ((newsp = key_msg2sp(xpl, len, &error)) == NULL) { return error; - + } + newsp->state = IPSEC_SPSTATE_ALIVE; - + /* clear old SP and set new SP */ key_freesp(*pcb_sp, KEY_SADB_UNLOCKED); *pcb_sp = newsp; KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_set_policy: new policy\n"); - kdebug_secpolicy(newsp)); - + printf("ipsec_set_policy: new policy\n"); + kdebug_secpolicy(newsp)); + return 0; } int ipsec4_set_policy(struct inpcb *inp, - int optname, - caddr_t request, - size_t len, - int priv) + int optname, + caddr_t request, + size_t len, + int priv) { struct sadb_x_policy *xpl; struct secpolicy **pcb_sp; - int error = 0; + int error = 0; struct sadb_x_policy xpl_aligned_buf; u_int8_t *xpl_unaligned; - + /* sanity check. */ - if (inp == NULL || request == NULL) + if (inp == NULL || request == NULL) { return EINVAL; - if (len < sizeof(*xpl)) + } + if (len < sizeof(*xpl)) { return EINVAL; + } xpl = (struct sadb_x_policy *)(void *)request; - + /* This is a new mbuf allocated by soopt_getm() */ if (IPSEC_IS_P2ALIGNED(xpl)) { xpl_unaligned = NULL; } else { xpl_unaligned = (__typeof__(xpl_unaligned))xpl; memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf)); - xpl = (__typeof__(xpl))&xpl_aligned_buf; + xpl = (__typeof__(xpl)) & xpl_aligned_buf; } - + if (inp->inp_sp == NULL) { error = ipsec_init_policy(inp->inp_socket, &inp->inp_sp); - if (error) + if (error) { return error; + } } - + /* select direction */ switch (xpl->sadb_x_policy_dir) { - case IPSEC_DIR_INBOUND: - pcb_sp = &inp->inp_sp->sp_in; - break; - case IPSEC_DIR_OUTBOUND: - pcb_sp = &inp->inp_sp->sp_out; - break; - default: - ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n", - xpl->sadb_x_policy_dir)); - return EINVAL; + case IPSEC_DIR_INBOUND: + pcb_sp = &inp->inp_sp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + pcb_sp = &inp->inp_sp->sp_out; + break; + default: + ipseclog((LOG_ERR, "ipsec4_set_policy: invalid direction=%u\n", + xpl->sadb_x_policy_dir)); + return EINVAL; } - + /* turn bypass off */ - if (ipsec_bypass != 0) + if (ipsec_bypass != 0) { ipsec_bypass = 0; - + } + return ipsec_set_policy(pcb_sp, optname, request, len, priv); } @@ -1642,78 +1709,82 @@ ipsec4_set_policy(struct inpcb *inp, int ipsec4_delete_pcbpolicy(struct inpcb *inp) { - /* sanity check. */ - if (inp == NULL) + if (inp == NULL) { panic("ipsec4_delete_pcbpolicy: NULL pointer was passed.\n"); - - if (inp->inp_sp == NULL) - return 0; - + } + + if (inp->inp_sp == NULL) { + return 0; + } + if (inp->inp_sp->sp_in != NULL) { key_freesp(inp->inp_sp->sp_in, KEY_SADB_UNLOCKED); inp->inp_sp->sp_in = NULL; } - + if (inp->inp_sp->sp_out != NULL) { key_freesp(inp->inp_sp->sp_out, KEY_SADB_UNLOCKED); inp->inp_sp->sp_out = NULL; } - + ipsec_delpcbpolicy(inp->inp_sp); inp->inp_sp = NULL; - + return 0; } #if INET6 int ipsec6_set_policy(struct in6pcb *in6p, - int optname, - caddr_t request, - size_t len, - int priv) + int optname, + caddr_t request, + size_t len, + int priv) { struct sadb_x_policy *xpl; struct secpolicy **pcb_sp; int error = 0; struct sadb_x_policy xpl_aligned_buf; u_int8_t *xpl_unaligned; - + /* sanity check. */ - if (in6p == NULL || request == NULL) + if (in6p == NULL || request == NULL) { return EINVAL; - if (len < sizeof(*xpl)) + } + if (len < sizeof(*xpl)) { return EINVAL; + } xpl = (struct sadb_x_policy *)(void *)request; - + /* This is a new mbuf allocated by soopt_getm() */ if (IPSEC_IS_P2ALIGNED(xpl)) { xpl_unaligned = NULL; } else { xpl_unaligned = (__typeof__(xpl_unaligned))xpl; memcpy(&xpl_aligned_buf, xpl, sizeof(xpl_aligned_buf)); - xpl = (__typeof__(xpl))&xpl_aligned_buf; + xpl = (__typeof__(xpl)) & xpl_aligned_buf; } - + if (in6p->in6p_sp == NULL) { error = ipsec_init_policy(in6p->inp_socket, &in6p->in6p_sp); - if (error) + if (error) { return error; + } } - + /* select direction */ switch (xpl->sadb_x_policy_dir) { - case IPSEC_DIR_INBOUND: - pcb_sp = &in6p->in6p_sp->sp_in; - break; - case IPSEC_DIR_OUTBOUND: - pcb_sp = &in6p->in6p_sp->sp_out; - break; - default: - ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n", - xpl->sadb_x_policy_dir)); - return EINVAL; + case IPSEC_DIR_INBOUND: + pcb_sp = &in6p->in6p_sp->sp_in; + break; + case IPSEC_DIR_OUTBOUND: + pcb_sp = &in6p->in6p_sp->sp_out; + break; + default: + ipseclog((LOG_ERR, "ipsec6_set_policy: invalid direction=%u\n", + xpl->sadb_x_policy_dir)); + return EINVAL; } return ipsec_set_policy(pcb_sp, optname, request, len, priv); @@ -1722,27 +1793,28 @@ ipsec6_set_policy(struct in6pcb *in6p, int ipsec6_delete_pcbpolicy(struct in6pcb *in6p) { - /* sanity check. */ - if (in6p == NULL) + if (in6p == NULL) { panic("ipsec6_delete_pcbpolicy: NULL pointer was passed.\n"); - - if (in6p->in6p_sp == NULL) - return 0; - + } + + if (in6p->in6p_sp == NULL) { + return 0; + } + if (in6p->in6p_sp->sp_in != NULL) { key_freesp(in6p->in6p_sp->sp_in, KEY_SADB_UNLOCKED); in6p->in6p_sp->sp_in = NULL; } - + if (in6p->in6p_sp->sp_out != NULL) { key_freesp(in6p->in6p_sp->sp_out, KEY_SADB_UNLOCKED); in6p->in6p_sp->sp_out = NULL; } - + ipsec_delpcbpolicy(in6p->in6p_sp); in6p->in6p_sp = NULL; - + return 0; } #endif @@ -1758,23 +1830,25 @@ ipsec_get_reqlevel(struct ipsecrequest *isr) u_int esp_trans_deflev = 0, esp_net_deflev = 0, ah_trans_deflev = 0, ah_net_deflev = 0; /* sanity check */ - if (isr == NULL || isr->sp == NULL) + if (isr == NULL || isr->sp == NULL) { panic("ipsec_get_reqlevel: NULL pointer is passed.\n"); + } if (((struct sockaddr *)&isr->sp->spidx.src)->sa_family - != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) + != ((struct sockaddr *)&isr->sp->spidx.dst)->sa_family) { panic("ipsec_get_reqlevel: family mismatched.\n"); + } /* XXX note that we have ipseclog() expanded here - code sync issue */ #define IPSEC_CHECK_DEFAULT(lev) \ - (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \ - && (lev) != IPSEC_LEVEL_UNIQUE) \ - ? (ipsec_debug \ - ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\ - (lev), IPSEC_LEVEL_REQUIRE) \ - : (void)0), \ - (lev) = IPSEC_LEVEL_REQUIRE, \ - (lev) \ - : (lev)) + (((lev) != IPSEC_LEVEL_USE && (lev) != IPSEC_LEVEL_REQUIRE \ + && (lev) != IPSEC_LEVEL_UNIQUE) \ + ? (ipsec_debug \ + ? log(LOG_INFO, "fixed system default level " #lev ":%d->%d\n",\ + (lev), IPSEC_LEVEL_REQUIRE) \ + : (void)0), \ + (lev) = IPSEC_LEVEL_REQUIRE, \ + (lev) \ + : (lev)) /* set default level */ switch (((struct sockaddr *)&isr->sp->spidx.src)->sa_family) { @@ -1796,7 +1870,7 @@ ipsec_get_reqlevel(struct ipsecrequest *isr) #endif /* INET6 */ default: panic("key_get_reqlevel: Unknown family. %d\n", - ((struct sockaddr *)&isr->sp->spidx.src)->sa_family); + ((struct sockaddr *)&isr->sp->spidx.src)->sa_family); } #undef IPSEC_CHECK_DEFAULT @@ -1806,16 +1880,18 @@ ipsec_get_reqlevel(struct ipsecrequest *isr) case IPSEC_LEVEL_DEFAULT: switch (isr->saidx.proto) { case IPPROTO_ESP: - if (isr->saidx.mode == IPSEC_MODE_TUNNEL) + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { level = esp_net_deflev; - else + } else { level = esp_trans_deflev; + } break; case IPPROTO_AH: - if (isr->saidx.mode == IPSEC_MODE_TUNNEL) + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { level = ah_net_deflev; - else + } else { level = ah_trans_deflev; + } break; case IPPROTO_IPCOMP: /* @@ -1826,8 +1902,8 @@ ipsec_get_reqlevel(struct ipsecrequest *isr) break; default: panic("ipsec_get_reqlevel: " - "Illegal protocol defined %u\n", - isr->saidx.proto); + "Illegal protocol defined %u\n", + isr->saidx.proto); } break; @@ -1841,7 +1917,7 @@ ipsec_get_reqlevel(struct ipsecrequest *isr) default: panic("ipsec_get_reqlevel: Illegal IPsec level %u\n", - isr->level); + isr->level); } return level; @@ -1861,8 +1937,8 @@ ipsec_in_reject(struct secpolicy *sp, struct mbuf *m) int need_auth, need_conf, need_icv; KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec_in_reject: using SP\n"); - kdebug_secpolicy(sp)); + printf("ipsec_in_reject: using SP\n"); + kdebug_secpolicy(sp)); /* check policy */ switch (sp->policy) { @@ -1872,7 +1948,7 @@ ipsec_in_reject(struct secpolicy *sp, struct mbuf *m) case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: return 0; - + case IPSEC_POLICY_IPSEC: break; @@ -1888,7 +1964,6 @@ ipsec_in_reject(struct secpolicy *sp, struct mbuf *m) /* XXX should compare policy against ipsec header history */ for (isr = sp->req; isr != NULL; isr = isr->next) { - /* get current level */ level = ipsec_get_reqlevel(isr); @@ -1898,20 +1973,21 @@ ipsec_in_reject(struct secpolicy *sp, struct mbuf *m) need_conf++; #if 0 - /* this won't work with multiple input threads - isr->sav would change - * with every packet and is not necessarily related to the current packet - * being processed. If ESP processing is required - the esp code should - * make sure that the integrity check is present and correct. I don't see - * why it would be necessary to check for the presence of the integrity - * check value here. I think this is just wrong. - * isr->sav has been removed. - * %%%%%% this needs to be re-worked at some point but I think the code below can - * be ignored for now. - */ + /* this won't work with multiple input threads - isr->sav would change + * with every packet and is not necessarily related to the current packet + * being processed. If ESP processing is required - the esp code should + * make sure that the integrity check is present and correct. I don't see + * why it would be necessary to check for the presence of the integrity + * check value here. I think this is just wrong. + * isr->sav has been removed. + * %%%%%% this needs to be re-worked at some point but I think the code below can + * be ignored for now. + */ if (isr->sav != NULL - && isr->sav->flags == SADB_X_EXT_NONE - && isr->sav->alg_auth != SADB_AALG_NONE) + && isr->sav->flags == SADB_X_EXT_NONE + && isr->sav->alg_auth != SADB_AALG_NONE) { need_icv++; + } #endif } break; @@ -1932,13 +2008,14 @@ ipsec_in_reject(struct secpolicy *sp, struct mbuf *m) } KEYDEBUG(KEYDEBUG_IPSEC_DUMP, - printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n", - need_auth, need_conf, need_icv, m->m_flags)); + printf("ipsec_in_reject: auth:%d conf:%d icv:%d m_flags:%x\n", + need_auth, need_conf, need_icv, m->m_flags)); if ((need_conf && !(m->m_flags & M_DECRYPTED)) - || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM)) - || (need_auth && !(m->m_flags & M_AUTHIPHDR))) + || (!need_auth && need_icv && !(m->m_flags & M_AUTHIPDGM)) + || (need_auth && !(m->m_flags & M_AUTHIPHDR))) { return 1; + } return 0; } @@ -1957,22 +2034,23 @@ ipsec4_in_reject_so(struct mbuf *m, struct socket *so) LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); /* sanity check */ - if (m == NULL) - return 0; /* XXX should be panic ? */ - + if (m == NULL) { + return 0; /* XXX should be panic ? */ + } /* get SP for this packet. * When we are called from ip_forward(), we call * ipsec4_getpolicybyaddr() with IP_FORWARDING flag. */ - if (so == NULL) + if (so == NULL) { sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error); - else + } else { sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error); + } - if (sp == NULL) - return 0; /* XXX should be panic ? - * -> No, there may be error. */ - + if (sp == NULL) { + return 0; /* XXX should be panic ? + * -> No, there may be error. */ + } result = ipsec_in_reject(sp, m); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP ipsec4_in_reject_so call free SP:0x%llx\n", @@ -1986,12 +2064,14 @@ int ipsec4_in_reject(struct mbuf *m, struct inpcb *inp) { LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - if (inp == NULL) + if (inp == NULL) { return ipsec4_in_reject_so(m, NULL); - if (inp->inp_socket) + } + if (inp->inp_socket) { return ipsec4_in_reject_so(m, inp->inp_socket); - else + } else { panic("ipsec4_in_reject: invalid inpcb/socket"); + } /* NOTREACHED */ return 0; @@ -2012,21 +2092,22 @@ ipsec6_in_reject_so(struct mbuf *m, struct socket *so) LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); /* sanity check */ - if (m == NULL) - return 0; /* XXX should be panic ? */ - + if (m == NULL) { + return 0; /* XXX should be panic ? */ + } /* get SP for this packet. * When we are called from ip_forward(), we call * ipsec6_getpolicybyaddr() with IP_FORWARDING flag. */ - if (so == NULL) + if (so == NULL) { sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, IP_FORWARDING, &error); - else + } else { sp = ipsec6_getpolicybyaddr(m, IPSEC_DIR_INBOUND, 0, &error); + } - if (sp == NULL) - return 0; /* XXX should be panic ? */ - + if (sp == NULL) { + return 0; /* XXX should be panic ? */ + } result = ipsec_in_reject(sp, m); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP ipsec6_in_reject_so call free SP:0x%llx\n", @@ -2039,14 +2120,15 @@ ipsec6_in_reject_so(struct mbuf *m, struct socket *so) int ipsec6_in_reject(struct mbuf *m, struct in6pcb *in6p) { - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - if (in6p == NULL) + if (in6p == NULL) { return ipsec6_in_reject_so(m, NULL); - if (in6p->in6p_socket) + } + if (in6p->in6p_socket) { return ipsec6_in_reject_so(m, in6p->in6p_socket); - else + } else { panic("ipsec6_in_reject: invalid in6p/socket"); + } /* NOTREACHED */ return 0; @@ -2066,8 +2148,8 @@ ipsec_hdrsiz(struct secpolicy *sp) LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec_hdrsiz: using SP\n"); - kdebug_secpolicy(sp)); + printf("ipsec_hdrsiz: using SP\n"); + kdebug_secpolicy(sp)); /* check policy */ switch (sp->policy) { @@ -2076,7 +2158,7 @@ ipsec_hdrsiz(struct secpolicy *sp) case IPSEC_POLICY_BYPASS: case IPSEC_POLICY_NONE: return 0; - + case IPSEC_POLICY_IPSEC: break; @@ -2088,7 +2170,6 @@ ipsec_hdrsiz(struct secpolicy *sp) siz = 0; for (isr = sp->req; isr != NULL; isr = isr->next) { - clen = 0; switch (isr->saidx.proto) { @@ -2096,7 +2177,7 @@ ipsec_hdrsiz(struct secpolicy *sp) #if IPSEC_ESP clen = esp_hdrsiz(isr); #else - clen = 0; /*XXX*/ + clen = 0; /*XXX*/ #endif break; case IPPROTO_AH: @@ -2140,29 +2221,32 @@ ipsec4_hdrsiz(struct mbuf *m, u_int dir, struct inpcb *inp) LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); /* sanity check */ - if (m == NULL) - return 0; /* XXX should be panic ? */ - if (inp != NULL && inp->inp_socket == NULL) + if (m == NULL) { + return 0; /* XXX should be panic ? */ + } + if (inp != NULL && inp->inp_socket == NULL) { panic("ipsec4_hdrsize: why is socket NULL but there is PCB."); + } /* get SP for this packet. * When we are called from ip_forward(), we call * ipsec4_getpolicybyaddr() with IP_FORWARDING flag. */ - if (inp == NULL) + if (inp == NULL) { sp = ipsec4_getpolicybyaddr(m, dir, IP_FORWARDING, &error); - else + } else { sp = ipsec4_getpolicybyaddr(m, dir, 0, &error); + } - if (sp == NULL) - return 0; /* XXX should be panic ? */ - + if (sp == NULL) { + return 0; /* XXX should be panic ? */ + } size = ipsec_hdrsiz(sp); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP ipsec4_hdrsiz call free SP:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sp))); KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size)); + printf("ipsec4_hdrsiz: size:%lu.\n", (u_int32_t)size)); key_freesp(sp, KEY_SADB_UNLOCKED); return size; @@ -2181,26 +2265,30 @@ ipsec6_hdrsiz(struct mbuf *m, u_int dir, struct in6pcb *in6p) LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); /* sanity check */ - if (m == NULL) - return 0; /* XXX shoud be panic ? */ - if (in6p != NULL && in6p->in6p_socket == NULL) + if (m == NULL) { + return 0; /* XXX shoud be panic ? */ + } + if (in6p != NULL && in6p->in6p_socket == NULL) { panic("ipsec6_hdrsize: why is socket NULL but there is PCB."); + } /* get SP for this packet */ /* XXX Is it right to call with IP_FORWARDING. */ - if (in6p == NULL) + if (in6p == NULL) { sp = ipsec6_getpolicybyaddr(m, dir, IP_FORWARDING, &error); - else + } else { sp = ipsec6_getpolicybyaddr(m, dir, 0, &error); + } - if (sp == NULL) + if (sp == NULL) { return 0; + } size = ipsec_hdrsiz(sp); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP ipsec6_hdrsiz call free SP:0x%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(sp))); KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size)); + printf("ipsec6_hdrsiz: size:%lu.\n", (u_int32_t)size)); key_freesp(sp, KEY_SADB_UNLOCKED); return size; @@ -2222,8 +2310,8 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) /* can't tunnel between different AFs */ if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family - != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family - || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) { + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family + || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) { m_freem(m); return EINVAL; } @@ -2235,8 +2323,9 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) } #endif - if (m->m_len < sizeof(*ip)) + if (m->m_len < sizeof(*ip)) { panic("ipsec4_encapsulate: assumption failed (first mbuf length)"); + } ip = mtod(m, struct ip *); #ifdef _IP_VHL @@ -2245,8 +2334,9 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) hlen = ip->ip_hl << 2; #endif - if (m->m_len != hlen) + if (m->m_len != hlen) { panic("ipsec4_encapsulate: assumption failed (first mbuf length)"); + } /* generate header checksum */ ip->ip_sum = 0; @@ -2296,21 +2386,21 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) ip->ip_off &= htons(~IP_OFFMASK); ip->ip_off &= htons(~IP_MF); switch (ip4_ipsec_dfbit) { - case 0: /* clear DF bit */ + case 0: /* clear DF bit */ ip->ip_off &= htons(~IP_DF); break; - case 1: /* set DF bit */ + case 1: /* set DF bit */ ip->ip_off |= htons(IP_DF); break; - default: /* copy DF bit */ + default: /* copy DF bit */ break; } ip->ip_p = IPPROTO_IPIP; - if (plen + sizeof(struct ip) < IP_MAXPACKET) + if (plen + sizeof(struct ip) < IP_MAXPACKET) { ip->ip_len = htons(plen + sizeof(struct ip)); - else { + } else { ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: " - "leave ip_len as is (invalid packet)\n")); + "leave ip_len as is (invalid packet)\n")); } if (rfc6864 && IP_OFF_IS_ATOMIC(ntohs(ip->ip_off))) { ip->ip_id = 0; @@ -2318,9 +2408,9 @@ ipsec4_encapsulate(struct mbuf *m, struct secasvar *sav) ip->ip_id = ip_randomid(); } bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr, - &ip->ip_src, sizeof(ip->ip_src)); + &ip->ip_src, sizeof(ip->ip_src)); bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr, - &ip->ip_dst, sizeof(ip->ip_dst)); + &ip->ip_dst, sizeof(ip->ip_dst)); ip->ip_ttl = IPDEFTTL; /* XXX Should ip_src be updated later ? */ @@ -2340,8 +2430,8 @@ ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav) /* can't tunnel between different AFs */ if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family - != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family - || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) { + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family + || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET6) { m_freem(m); return EINVAL; } @@ -2358,8 +2448,9 @@ ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav) /* * grow the mbuf to accomodate the new IPv6 header. */ - if (m->m_len != sizeof(struct ip6_hdr)) + if (m->m_len != sizeof(struct ip6_hdr)) { panic("ipsec6_encapsulate: assumption failed (first mbuf length)"); + } if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) { struct mbuf *n; MGET(n, M_DONTWAIT, MT_DATA); @@ -2382,24 +2473,26 @@ ipsec6_encapsulate(struct mbuf *m, struct secasvar *sav) ovbcopy((caddr_t)ip6, (caddr_t)oip6, sizeof(struct ip6_hdr)); /* Fake link-local scope-class addresses */ - if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) + if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_src)) { oip6->ip6_src.s6_addr16[1] = 0; - if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) + } + if (IN6_IS_SCOPE_LINKLOCAL(&oip6->ip6_dst)) { oip6->ip6_dst.s6_addr16[1] = 0; + } /* construct new IPv6 header. see RFC 2401 5.1.2.2 */ /* ECN consideration. */ ip6_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip6->ip6_flow); - if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) + if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) { ip6->ip6_plen = htons(plen); - else { + } else { /* ip6->ip6_plen will be updated in ip6_output() */ } ip6->ip6_nxt = IPPROTO_IPV6; bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr, - &ip6->ip6_src, sizeof(ip6->ip6_src)); + &ip6->ip6_src, sizeof(ip6->ip6_src)); bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr, - &ip6->ip6_dst, sizeof(ip6->ip6_dst)); + &ip6->ip6_dst, sizeof(ip6->ip6_dst)); ip6->ip6_hlim = IPV6_DEFHLIM; /* XXX Should ip6_src be updated later ? */ @@ -2417,8 +2510,8 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) /* tunneling over IPv4 */ if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family - != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family - || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) { + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family + || ((struct sockaddr *)&sav->sah->saidx.src)->sa_family != AF_INET) { m_freem(m); return EINVAL; } @@ -2432,12 +2525,13 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) plen = m->m_pkthdr.len; ip6 = mtod(m, struct ip6_hdr *); - hlim = ip6->ip6_hlim; + hlim = ip6->ip6_hlim; /* * grow the mbuf to accomodate the new IPv4 header. */ - if (m->m_len != sizeof(struct ip6_hdr)) + if (m->m_len != sizeof(struct ip6_hdr)) { panic("ipsec6_encapsulate: assumption failed (first mbuf length)"); + } if (M_LEADINGSPACE(m->m_next) < sizeof(struct ip6_hdr)) { struct mbuf *n; MGET(n, M_DONTWAIT, MT_DATA); @@ -2460,7 +2554,7 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) bcopy(ip6, ip6i, sizeof(struct ip6_hdr)); ip = mtod(m, struct ip *); m->m_len = sizeof(struct ip); - /* + /* * Fill in some of the IPv4 fields - we don't need all of them * because the rest will be filled in by ip_output */ @@ -2468,7 +2562,7 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) ip->ip_hl = sizeof(struct ip) >> 2; ip->ip_id = 0; ip->ip_sum = 0; - ip->ip_tos = 0; + ip->ip_tos = 0; ip->ip_off = 0; ip->ip_ttl = hlim; ip->ip_p = IPPROTO_IPV6; @@ -2477,17 +2571,17 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav) /* ECN consideration. */ ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow); - if (plen + sizeof(struct ip) < IP_MAXPACKET) + if (plen + sizeof(struct ip) < IP_MAXPACKET) { ip->ip_len = htons(plen + sizeof(struct ip)); - else { + } else { ip->ip_len = htons(plen); ipseclog((LOG_ERR, "IPv4 ipsec: size exceeds limit: " - "leave ip_len as is (invalid packet)\n")); + "leave ip_len as is (invalid packet)\n")); } bcopy(&((struct sockaddr_in *)&sav->sah->saidx.src)->sin_addr, - &ip->ip_src, sizeof(ip->ip_src)); + &ip->ip_src, sizeof(ip->ip_src)); bcopy(&((struct sockaddr_in *)&sav->sah->saidx.dst)->sin_addr, - &ip->ip_dst, sizeof(ip->ip_dst)); + &ip->ip_dst, sizeof(ip->ip_dst)); return 0; } @@ -2517,12 +2611,13 @@ ipsec6_update_routecache_and_output( ro6 = &sav->sah->sa_route; dst6 = (struct sockaddr_in6 *)(void *)&ro6->ro_dst; if (ro6->ro_rt) { - RT_LOCK(ro6->ro_rt); + RT_LOCK(ro6->ro_rt); } if (ROUTE_UNUSABLE(ro6) || !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) { - if (ro6->ro_rt != NULL) - RT_UNLOCK(ro6->ro_rt); + if (ro6->ro_rt != NULL) { + RT_UNLOCK(ro6->ro_rt); + } ROUTE_RELEASE(ro6); } if (ro6->ro_rt == 0) { @@ -2532,7 +2627,7 @@ ipsec6_update_routecache_and_output( dst6->sin6_addr = ip6->ip6_dst; rtalloc_scoped((struct route *)ro6, sav->sah->outgoing_if); if (ro6->ro_rt) { - RT_LOCK(ro6->ro_rt); + RT_LOCK(ro6->ro_rt); } } if (ro6->ro_rt == 0) { @@ -2553,14 +2648,15 @@ ipsec6_update_routecache_and_output( * sockaddr via rt_setgate(). This is currently * addressed by SA_SIZE roundup in that routine. */ - if (ro6->ro_rt->rt_flags & RTF_GATEWAY) - dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway; + if (ro6->ro_rt->rt_flags & RTF_GATEWAY) { + dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway; + } RT_UNLOCK(ro6->ro_rt); ROUTE_RELEASE(&state->ro); route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6)); state->dst = (struct sockaddr *)dst6; state->tunneled = 6; - // release sadb_mutex, after updating sah's route cache + // release sadb_mutex, after updating sah's route cache lck_mtx_unlock(sadb_mutex); state->m = ipsec6_splithdr(state->m); @@ -2584,8 +2680,8 @@ ipsec6_update_routecache_and_output( error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav); break; case IPPROTO_IPCOMP: - /* XXX code should be here */ - /*FALLTHROUGH*/ + /* XXX code should be here */ + /*FALLTHROUGH*/ default: ipseclog((LOG_ERR, "%s: unknown ipsec protocol %d\n", __FUNCTION__, sav->sah->saidx.proto)); m_freem(state->m); @@ -2628,7 +2724,7 @@ ipsec6_update_routecache_and_output( adv = &ip6oa.ip6oa_flowadv; (void) ip6_output(state->m, NULL, &ro6_new, IPV6_OUTARGS, NULL, NULL, &ip6oa); state->m = NULL; - + if (adv->code == FADV_FLOW_CONTROLLED || adv->code == FADV_SUSPENDED) { error = ENOBUFS; ifnet_disable_output(sav->sah->ipsec_if); @@ -2663,7 +2759,7 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) #if 0 /* XXX if the dst is myself, perform nothing. */ if (key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) { - m_freem(m); + m_freem(m); return EINVAL; } #endif @@ -2737,7 +2833,7 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) m_freem(m); state->m = n; m = state->m; - } else { + } else { m->m_len += (sizeof(struct ip6_hdr) - hlen); m->m_data -= (sizeof(struct ip6_hdr) - hlen); } @@ -2749,9 +2845,9 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) /* construct new IPv6 header. see RFC 2401 5.1.2.2 */ /* ECN consideration. */ ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos); - if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) + if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) { ip6->ip6_plen = htons(plen); - else { + } else { /* ip6->ip6_plen will be updated in ip6_output() */ } @@ -2759,9 +2855,9 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav) ip6->ip6_hlim = IPV6_DEFHLIM; bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr, - &ip6->ip6_src, sizeof(ip6->ip6_src)); + &ip6->ip6_src, sizeof(ip6->ip6_src)); bcopy(&((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr, - &ip6->ip6_dst, sizeof(ip6->ip6_dst)); + &ip6->ip6_dst, sizeof(ip6->ip6_dst)); return 0; } @@ -2784,20 +2880,21 @@ ipsec_chkreplay(u_int32_t seq, struct secasvar *sav) const struct secreplay *replay; u_int32_t diff; int fr; - u_int32_t wsizeb; /* constant: bits of window size */ - int frlast; /* constant: last frame */ + u_int32_t wsizeb; /* constant: bits of window size */ + int frlast; /* constant: last frame */ + - /* sanity check */ - if (sav == NULL) + if (sav == NULL) { panic("ipsec_chkreplay: NULL pointer was passed.\n"); + } lck_mtx_lock(sadb_mutex); replay = sav->replay; if (replay->wsize == 0) { lck_mtx_unlock(sadb_mutex); - return 1; /* no need to check replay. */ + return 1; /* no need to check replay. */ } /* constant */ @@ -2855,28 +2952,29 @@ ipsec_updatereplay(u_int32_t seq, struct secasvar *sav) struct secreplay *replay; u_int32_t diff; int fr; - u_int32_t wsizeb; /* constant: bits of window size */ - int frlast; /* constant: last frame */ - + u_int32_t wsizeb; /* constant: bits of window size */ + int frlast; /* constant: last frame */ + /* sanity check */ - if (sav == NULL) + if (sav == NULL) { panic("ipsec_chkreplay: NULL pointer was passed.\n"); + } lck_mtx_lock(sadb_mutex); replay = sav->replay; - if (replay->wsize == 0) - goto ok; /* no need to check replay. */ - + if (replay->wsize == 0) { + goto ok; /* no need to check replay. */ + } /* constant */ frlast = replay->wsize - 1; wsizeb = replay->wsize << 3; /* sequence number of 0 is invalid */ - if (seq == 0) { - lck_mtx_unlock(sadb_mutex); - return 1; - } + if (seq == 0) { + lck_mtx_unlock(sadb_mutex); + return 1; + } /* first time */ if (replay->count == 0) { @@ -2930,7 +3028,6 @@ ipsec_updatereplay(u_int32_t seq, struct secasvar *sav) ok: if (replay->count == ~0) { - /* set overflow flag */ replay->overflow++; @@ -2945,7 +3042,7 @@ ok: } replay->count++; - + lck_mtx_unlock(sadb_mutex); return 0; } @@ -2953,7 +3050,7 @@ ok: /* * shift variable length buffer to left. * IN: bitmap: pointer to the buffer - * nbit: the number of to shift. + * nbit: the number of to shift. * wsize: buffer size (bytes). */ static void @@ -2968,7 +3065,7 @@ vshiftl(unsigned char *bitmap, int nbit, int wsize) for (i = 1; i < wsize; i++) { over = (bitmap[i] >> (8 - s)); bitmap[i] <<= s; - bitmap[i-1] |= over; + bitmap[i - 1] |= over; } } @@ -2987,16 +3084,19 @@ ipsec4_logpacketstr(struct ip *ip, u_int32_t spi) p = buf; snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi)); - while (p && *p) + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), "src=%u.%u.%u.%u", - s[0], s[1], s[2], s[3]); - while (p && *p) + s[0], s[1], s[2], s[3]); + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), " dst=%u.%u.%u.%u", - d[0], d[1], d[2], d[3]); - while (p && *p) + d[0], d[1], d[2], d[3]); + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), ")"); return buf; @@ -3011,16 +3111,19 @@ ipsec6_logpacketstr(struct ip6_hdr *ip6, u_int32_t spi) p = buf; snprintf(buf, sizeof(buf), "packet(SPI=%u ", (u_int32_t)ntohl(spi)); - while (p && *p) + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), "src=%s", - ip6_sprintf(&ip6->ip6_src)); - while (p && *p) + ip6_sprintf(&ip6->ip6_src)); + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), " dst=%s", - ip6_sprintf(&ip6->ip6_dst)); - while (p && *p) + ip6_sprintf(&ip6->ip6_dst)); + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), ")"); return buf; @@ -3036,35 +3139,39 @@ ipsec_logsastr(struct secasvar *sav) /* validity check */ if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family - != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) + != ((struct sockaddr *)&sav->sah->saidx.dst)->sa_family) { panic("ipsec_logsastr: family mismatched.\n"); + } p = buf; snprintf(buf, sizeof(buf), "SA(SPI=%u ", (u_int32_t)ntohl(sav->spi)); - while (p && *p) + while (p && *p) { p++; + } if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET) { u_int8_t *s, *d; s = (u_int8_t *)&((struct sockaddr_in *)&saidx->src)->sin_addr; d = (u_int8_t *)&((struct sockaddr_in *)&saidx->dst)->sin_addr; snprintf(p, sizeof(buf) - (p - buf), - "src=%d.%d.%d.%d dst=%d.%d.%d.%d", - s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]); + "src=%d.%d.%d.%d dst=%d.%d.%d.%d", + s[0], s[1], s[2], s[3], d[0], d[1], d[2], d[3]); } #if INET6 else if (((struct sockaddr *)&saidx->src)->sa_family == AF_INET6) { snprintf(p, sizeof(buf) - (p - buf), - "src=%s", - ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr)); - while (p && *p) + "src=%s", + ip6_sprintf(&((struct sockaddr_in6 *)&saidx->src)->sin6_addr)); + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), - " dst=%s", - ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr)); + " dst=%s", + ip6_sprintf(&((struct sockaddr_in6 *)&saidx->dst)->sin6_addr)); } #endif - while (p && *p) + while (p && *p) { p++; + } snprintf(p, sizeof(buf) - (p - buf), ")"); return buf; @@ -3084,13 +3191,15 @@ ipsec_dumpmbuf(struct mbuf *m) for (i = 0; i < m->m_len; i++) { printf("%02x ", p[i]); totlen++; - if (totlen % 16 == 0) + if (totlen % 16 == 0) { printf("\n"); + } } m = m->m_next; } - if (totlen % 16 != 0) + if (totlen % 16 != 0) { printf("\n"); + } printf("---\n"); } @@ -3120,12 +3229,12 @@ ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav) * not decode a packet because SA has been dead. */ if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { IPSEC_STAT_INCREMENT(ipsecstat.out_nosa); error = EINVAL; goto bad; } - + state->outgoing_if = sav->sah->outgoing_if; /* @@ -3153,7 +3262,6 @@ ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav) error = ipsec6_update_routecache_and_output(state, sav); return error; - } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) { error = ipsec4_encapsulate(state->m, sav); if (error) { @@ -3164,15 +3272,16 @@ ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav) // grab sadb_mutex, before updating sah's route cache lck_mtx_lock(sadb_mutex); - ro4= (struct route *)&sav->sah->sa_route; + ro4 = (struct route *)&sav->sah->sa_route; dst4 = (struct sockaddr_in *)(void *)&ro4->ro_dst; if (ro4->ro_rt != NULL) { - RT_LOCK(ro4->ro_rt); + RT_LOCK(ro4->ro_rt); } if (ROUTE_UNUSABLE(ro4) || dst4->sin_addr.s_addr != ip->ip_dst.s_addr) { - if (ro4->ro_rt != NULL) - RT_UNLOCK(ro4->ro_rt); + if (ro4->ro_rt != NULL) { + RT_UNLOCK(ro4->ro_rt); + } ROUTE_RELEASE(ro4); } if (ro4->ro_rt == 0) { @@ -3199,18 +3308,19 @@ ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav) * sockaddr via rt_setgate(). This is currently * addressed by SA_SIZE roundup in that routine. */ - if (ro4->ro_rt->rt_flags & RTF_GATEWAY) - dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway; + if (ro4->ro_rt->rt_flags & RTF_GATEWAY) { + dst4 = (struct sockaddr_in *)(void *)ro4->ro_rt->rt_gateway; + } RT_UNLOCK(ro4->ro_rt); ROUTE_RELEASE(&state->ro); route_copyout((struct route *)&state->ro, ro4, sizeof(struct route)); state->dst = (struct sockaddr *)dst4; state->tunneled = 4; - // release sadb_mutex, after updating sah's route cache + // release sadb_mutex, after updating sah's route cache lck_mtx_unlock(sadb_mutex); } else { ipseclog((LOG_ERR, "%s: family mismatched between inner and outer spi=%u\n", - __FUNCTION__, (u_int32_t)ntohl(sav->spi))); + __FUNCTION__, (u_int32_t)ntohl(sav->spi))); error = EAFNOSUPPORT; goto bad; } @@ -3249,8 +3359,8 @@ ipsec4_output_internal(struct ipsec_output_state *state, struct secasvar *sav) break; default: ipseclog((LOG_ERR, - "ipsec4_output: unknown ipsec protocol %d\n", - sav->sah->saidx.proto)); + "ipsec4_output: unknown ipsec protocol %d\n", + sav->sah->saidx.proto)); m_freem(state->m); state->m = NULL; error = EINVAL; @@ -3273,9 +3383,9 @@ ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface) { int error = 0; struct secasvar *sav = NULL; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + if (state == NULL) { panic("state == NULL in ipsec4_output"); } @@ -3297,31 +3407,31 @@ ipsec4_interface_output(struct ipsec_output_state *state, ifnet_t interface) dst.sin_family = AF_INET; dst.sin_len = sizeof(dst); memcpy(&dst.sin_addr, &ip->ip_dst, sizeof(dst.sin_addr)); - + sav = key_alloc_outbound_sav_for_interface(interface, AF_INET, - (struct sockaddr *)&src, - (struct sockaddr *)&dst); + (struct sockaddr *)&src, + (struct sockaddr *)&dst); if (sav == NULL) { goto bad; } - + if ((error = ipsec4_output_internal(state, sav)) != 0) { goto bad; } - - KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0); + + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0); if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); } return 0; - + bad: if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); } m_freem(state->m); state->m = NULL; - KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0); return error; } @@ -3334,22 +3444,25 @@ ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused i struct secasvar *sav = NULL; int error = 0; struct sockaddr_in *sin; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - - if (!state) + + if (!state) { panic("state == NULL in ipsec4_output"); - if (!state->m) + } + if (!state->m) { panic("state->m == NULL in ipsec4_output"); - if (!state->dst) + } + if (!state->dst) { panic("state->dst == NULL in ipsec4_output"); - - KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0,0,0,0,0); - + } + + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_START, 0, 0, 0, 0, 0); + KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec4_output: applied SP\n"); - kdebug_secpolicy(sp)); - + printf("ipsec4_output: applied SP\n"); + kdebug_secpolicy(sp)); + for (isr = sp->req; isr != NULL; isr = isr->next) { /* make SA index for search proper SA */ ip = mtod(state->m, struct ip *); @@ -3362,7 +3475,7 @@ ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused i sin->sin_family = AF_INET; sin->sin_port = IPSEC_PORT_ANY; bcopy(&ip->ip_src, &sin->sin_addr, - sizeof(sin->sin_addr)); + sizeof(sin->sin_addr)); } sin = (struct sockaddr_in *)&saidx.dst; if (sin->sin_len == 0) { @@ -3373,10 +3486,9 @@ ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused i * Get port from packet if upper layer is UDP and nat traversal * is enabled and transport mode. */ - + if ((esp_udp_encap_port & 0xFFFF) != 0 && - isr->saidx.mode == IPSEC_MODE_TRANSPORT) { - + isr->saidx.mode == IPSEC_MODE_TRANSPORT) { if (ip->ip_p == IPPROTO_UDP) { struct udphdr *udp; size_t hlen; @@ -3398,11 +3510,11 @@ ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused i sin->sin_port = udp->uh_dport; } } - + bcopy(&ip->ip_dst, &sin->sin_addr, - sizeof(sin->sin_addr)); + sizeof(sin->sin_addr)); } - + if ((error = key_checkrequest(isr, &saidx, &sav)) != 0) { /* * IPsec processing is required, but no SA found. @@ -3414,34 +3526,36 @@ ipsec4_output(struct ipsec_output_state *state, struct secpolicy *sp, __unused i IPSEC_STAT_INCREMENT(ipsecstat.out_nosa); goto bad; } - + /* validity check */ if (sav == NULL) { switch (ipsec_get_reqlevel(isr)) { - case IPSEC_LEVEL_USE: - continue; - case IPSEC_LEVEL_REQUIRE: - /* must be not reached here. */ - panic("ipsec4_output: no SA found, but required."); + case IPSEC_LEVEL_USE: + continue; + case IPSEC_LEVEL_REQUIRE: + /* must be not reached here. */ + panic("ipsec4_output: no SA found, but required."); } } - + if ((error = ipsec4_output_internal(state, sav)) != 0) { goto bad; } } - - KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0,0,0,0,0); - if (sav) + + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, 0, 0, 0, 0, 0); + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } return 0; - + bad: - if (sav) + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } m_freem(state->m); state->m = NULL; - KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error,0,0,0,0); + KERNEL_DEBUG(DBG_FNC_IPSEC_OUT | DBG_FUNC_END, error, 0, 0, 0, 0); return error; } @@ -3461,24 +3575,24 @@ ipsec6_output_trans_internal( struct ip6_hdr *ip6; int error = 0; int plen; - + /* validity check */ if (sav == NULL || sav->sah == NULL) { error = EINVAL; goto bad; } - + /* * If there is no valid SA, we give up to process. * see same place at ipsec4_output(). */ if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa); error = EINVAL; goto bad; } - + state->outgoing_if = sav->sah->outgoing_if; switch (sav->sah->saidx.proto) { @@ -3498,7 +3612,7 @@ ipsec6_output_trans_internal( break; default: ipseclog((LOG_ERR, "ipsec6_output_trans: " - "unknown ipsec protocol %d\n", sav->sah->saidx.proto)); + "unknown ipsec protocol %d\n", sav->sah->saidx.proto)); m_freem(state->m); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); error = EINVAL; @@ -3511,9 +3625,9 @@ ipsec6_output_trans_internal( plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr); if (plen > IPV6_MAXPACKET) { ipseclog((LOG_ERR, "ipsec6_output_trans: " - "IPsec with IPv6 jumbogram is not supported\n")); + "IPsec with IPv6 jumbogram is not supported\n")); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); - error = EINVAL; /*XXX*/ + error = EINVAL; /*XXX*/ goto bad; } ip6 = mtod(state->m, struct ip6_hdr *); @@ -3526,12 +3640,12 @@ bad: int ipsec6_output_trans( - struct ipsec_output_state *state, - u_char *nexthdrp, - struct mbuf *mprev, - struct secpolicy *sp, - __unused int flags, - int *tun) + struct ipsec_output_state *state, + u_char *nexthdrp, + struct mbuf *mprev, + struct secpolicy *sp, + __unused int flags, + int *tun) { struct ip6_hdr *ip6; struct ipsecrequest *isr = NULL; @@ -3539,33 +3653,39 @@ ipsec6_output_trans( int error = 0; struct sockaddr_in6 *sin6; struct secasvar *sav = NULL; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - - if (!state) + + if (!state) { panic("state == NULL in ipsec6_output_trans"); - if (!state->m) + } + if (!state->m) { panic("state->m == NULL in ipsec6_output_trans"); - if (!nexthdrp) + } + if (!nexthdrp) { panic("nexthdrp == NULL in ipsec6_output_trans"); - if (!mprev) + } + if (!mprev) { panic("mprev == NULL in ipsec6_output_trans"); - if (!sp) + } + if (!sp) { panic("sp == NULL in ipsec6_output_trans"); - if (!tun) + } + if (!tun) { panic("tun == NULL in ipsec6_output_trans"); - + } + KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec6_output_trans: applyed SP\n"); - kdebug_secpolicy(sp)); - + printf("ipsec6_output_trans: applyed SP\n"); + kdebug_secpolicy(sp)); + *tun = 0; for (isr = sp->req; isr; isr = isr->next) { if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { /* the rest will be handled by ipsec6_output_tunnel() */ break; } - + /* make SA index for search proper SA */ ip6 = mtod(state->m, struct ip6_hdr *); bcopy(&isr->saidx, &saidx, sizeof(saidx)); @@ -3577,7 +3697,7 @@ ipsec6_output_trans( sin6->sin6_family = AF_INET6; sin6->sin6_port = IPSEC_PORT_ANY; bcopy(&ip6->ip6_src, &sin6->sin6_addr, - sizeof(ip6->ip6_src)); + sizeof(ip6->ip6_src)); if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { /* fix scope id for comparing SPD */ sin6->sin6_addr.s6_addr16[1] = 0; @@ -3590,14 +3710,14 @@ ipsec6_output_trans( sin6->sin6_family = AF_INET6; sin6->sin6_port = IPSEC_PORT_ANY; bcopy(&ip6->ip6_dst, &sin6->sin6_addr, - sizeof(ip6->ip6_dst)); + sizeof(ip6->ip6_dst)); if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_dst)) { /* fix scope id for comparing SPD */ sin6->sin6_addr.s6_addr16[1] = 0; sin6->sin6_scope_id = ntohs(ip6->ip6_dst.s6_addr16[1]); } } - + if (key_checkrequest(isr, &saidx, &sav) == ENOENT) { /* * IPsec processing is required, but no SA found. @@ -3608,7 +3728,7 @@ ipsec6_output_trans( */ IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa); error = ENOENT; - + /* * Notify the fact that the packet is discarded * to ourselves. I believe this is better than @@ -3618,38 +3738,41 @@ ipsec6_output_trans( * pfctlinputs? */ icmp6_error(state->m, ICMP6_DST_UNREACH, - ICMP6_DST_UNREACH_ADMIN, 0); + ICMP6_DST_UNREACH_ADMIN, 0); state->m = NULL; /* icmp6_error freed the mbuf */ goto bad; } - + /* validity check */ if (sav == NULL) { switch (ipsec_get_reqlevel(isr)) { - case IPSEC_LEVEL_USE: - continue; - case IPSEC_LEVEL_REQUIRE: - /* must be not reached here. */ - panic("ipsec6_output_trans: no SA found, but required."); + case IPSEC_LEVEL_USE: + continue; + case IPSEC_LEVEL_REQUIRE: + /* must be not reached here. */ + panic("ipsec6_output_trans: no SA found, but required."); } } - + if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) { goto bad; } } - + /* if we have more to go, we need a tunnel mode processing */ - if (isr != NULL) + if (isr != NULL) { *tun = 1; - - if (sav) + } + + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } return 0; - + bad: - if (sav) + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } m_freem(state->m); state->m = NULL; return error; @@ -3666,26 +3789,26 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar int plen; struct sockaddr_in6* dst6; struct route_in6 *ro6; - + /* validity check */ if (sav == NULL || sav->sah == NULL || sav->sah->saidx.mode != IPSEC_MODE_TUNNEL) { error = EINVAL; goto bad; } - + /* * If there is no valid SA, we give up to process. * see same place at ipsec4_output(). */ if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa); error = EINVAL; goto bad; } - + state->outgoing_if = sav->sah->outgoing_if; - + if (sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) { /* * build IPsec tunnel. @@ -3696,7 +3819,7 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar error = ENOMEM; goto bad; } - + if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET6) { error = ipsec6_encapsulate(state->m, sav); if (error) { @@ -3705,7 +3828,6 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar } ip6 = mtod(state->m, struct ip6_hdr *); } else if (((struct sockaddr *)&sav->sah->saidx.src)->sa_family == AF_INET) { - struct ip *ip; struct sockaddr_in* dst4; struct route *ro4 = NULL; @@ -3717,9 +3839,10 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar ipoa.ipoa_flags = IPOAF_SELECT_SRCIF; ipoa.ipoa_sotc = SO_TC_UNSPEC; ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; - - if (must_be_last) + + if (must_be_last) { *must_be_last = 1; + } state->tunneled = 4; /* must not process any further in ip6_output */ error = ipsec64_encapsulate(state->m, sav); @@ -3729,7 +3852,7 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar } /* Now we have an IPv4 packet */ ip = mtod(state->m, struct ip *); - + // grab sadb_mutex, to update sah's route cache and get a local copy of it lck_mtx_lock(sadb_mutex); ro4 = (struct route *)&sav->sah->sa_route; @@ -3738,9 +3861,10 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar RT_LOCK(ro4->ro_rt); } if (ROUTE_UNUSABLE(ro4) || - dst4->sin_addr.s_addr != ip->ip_dst.s_addr) { - if (ro4->ro_rt != NULL) + dst4->sin_addr.s_addr != ip->ip_dst.s_addr) { + if (ro4->ro_rt != NULL) { RT_UNLOCK(ro4->ro_rt); + } ROUTE_RELEASE(ro4); } if (ro4->ro_rt == NULL) { @@ -3760,47 +3884,47 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar goto bad; } switch (sav->sah->saidx.proto) { - case IPPROTO_ESP: + case IPPROTO_ESP: #if IPSEC_ESP - if ((error = esp4_output(state->m, sav)) != 0) { - state->m = NULL; - ROUTE_RELEASE(&ro4_copy); - goto bad; - } - break; - -#else - m_freem(state->m); + if ((error = esp4_output(state->m, sav)) != 0) { state->m = NULL; - error = EINVAL; ROUTE_RELEASE(&ro4_copy); goto bad; + } + break; + +#else + m_freem(state->m); + state->m = NULL; + error = EINVAL; + ROUTE_RELEASE(&ro4_copy); + goto bad; #endif - case IPPROTO_AH: - if ((error = ah4_output(state->m, sav)) != 0) { - state->m = NULL; - ROUTE_RELEASE(&ro4_copy); - goto bad; - } - break; - case IPPROTO_IPCOMP: - if ((error = ipcomp4_output(state->m, sav)) != 0) { - state->m = NULL; - ROUTE_RELEASE(&ro4_copy); - goto bad; - } - break; - default: - ipseclog((LOG_ERR, - "ipsec4_output: unknown ipsec protocol %d\n", - sav->sah->saidx.proto)); - m_freem(state->m); + case IPPROTO_AH: + if ((error = ah4_output(state->m, sav)) != 0) { + state->m = NULL; + ROUTE_RELEASE(&ro4_copy); + goto bad; + } + break; + case IPPROTO_IPCOMP: + if ((error = ipcomp4_output(state->m, sav)) != 0) { state->m = NULL; - error = EINVAL; ROUTE_RELEASE(&ro4_copy); goto bad; + } + break; + default: + ipseclog((LOG_ERR, + "ipsec4_output: unknown ipsec protocol %d\n", + sav->sah->saidx.proto)); + m_freem(state->m); + state->m = NULL; + error = EINVAL; + ROUTE_RELEASE(&ro4_copy); + goto bad; } - + if (state->m == 0) { error = ENOMEM; ROUTE_RELEASE(&ro4_copy); @@ -3817,18 +3941,19 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar lck_mtx_lock(sadb_mutex); route_copyin(&ro4_copy, ro4, sizeof(struct route)); lck_mtx_unlock(sadb_mutex); - if (error != 0) + if (error != 0) { goto bad; + } goto done; } else { ipseclog((LOG_ERR, "ipsec6_output_tunnel: " - "unsupported inner family, spi=%u\n", - (u_int32_t)ntohl(sav->spi))); + "unsupported inner family, spi=%u\n", + (u_int32_t)ntohl(sav->spi))); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); error = EAFNOSUPPORT; goto bad; } - + // grab sadb_mutex, before updating sah's route cache lck_mtx_lock(sadb_mutex); ro6 = &sav->sah->sa_route; @@ -3837,9 +3962,10 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar RT_LOCK(ro6->ro_rt); } if (ROUTE_UNUSABLE(ro6) || - !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) { - if (ro6->ro_rt != NULL) + !IN6_ARE_ADDR_EQUAL(&dst6->sin6_addr, &ip6->ip6_dst)) { + if (ro6->ro_rt != NULL) { RT_UNLOCK(ro6->ro_rt); + } ROUTE_RELEASE(ro6); } if (ro6->ro_rt == 0) { @@ -3860,7 +3986,7 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar lck_mtx_unlock(sadb_mutex); goto bad; } - + /* * adjust state->dst if tunnel endpoint is offlink * @@ -3870,8 +3996,9 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar * sockaddr via rt_setgate(). This is currently * addressed by SA_SIZE roundup in that routine. */ - if (ro6->ro_rt->rt_flags & RTF_GATEWAY) + if (ro6->ro_rt->rt_flags & RTF_GATEWAY) { dst6 = (struct sockaddr_in6 *)(void *)ro6->ro_rt->rt_gateway; + } RT_UNLOCK(ro6->ro_rt); ROUTE_RELEASE(&state->ro); route_copyout((struct route *)&state->ro, (struct route *)ro6, sizeof(struct route_in6)); @@ -3880,7 +4007,7 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar // release sadb_mutex, after updating sah's route cache lck_mtx_unlock(sadb_mutex); } - + state->m = ipsec6_splithdr(state->m); if (!state->m) { IPSEC_STAT_INCREMENT(ipsec6stat.out_nomem); @@ -3889,27 +4016,27 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar } ip6 = mtod(state->m, struct ip6_hdr *); switch (sav->sah->saidx.proto) { - case IPPROTO_ESP: + case IPPROTO_ESP: #if IPSEC_ESP - error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav); + error = esp6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav); #else - m_freem(state->m); - error = EINVAL; + m_freem(state->m); + error = EINVAL; #endif - break; - case IPPROTO_AH: - error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav); - break; - case IPPROTO_IPCOMP: - /* XXX code should be here */ - /*FALLTHROUGH*/ - default: - ipseclog((LOG_ERR, "ipsec6_output_tunnel: " - "unknown ipsec protocol %d\n", sav->sah->saidx.proto)); - m_freem(state->m); - IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); - error = EINVAL; - break; + break; + case IPPROTO_AH: + error = ah6_output(state->m, &ip6->ip6_nxt, state->m->m_next, sav); + break; + case IPPROTO_IPCOMP: + /* XXX code should be here */ + /*FALLTHROUGH*/ + default: + ipseclog((LOG_ERR, "ipsec6_output_tunnel: " + "unknown ipsec protocol %d\n", sav->sah->saidx.proto)); + m_freem(state->m); + IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); + error = EINVAL; + break; } if (error) { state->m = NULL; @@ -3918,16 +4045,16 @@ ipsec6_output_tunnel_internal(struct ipsec_output_state *state, struct secasvar plen = state->m->m_pkthdr.len - sizeof(struct ip6_hdr); if (plen > IPV6_MAXPACKET) { ipseclog((LOG_ERR, "ipsec6_output_tunnel: " - "IPsec with IPv6 jumbogram is not supported\n")); + "IPsec with IPv6 jumbogram is not supported\n")); IPSEC_STAT_INCREMENT(ipsec6stat.out_inval); - error = EINVAL; /*XXX*/ + error = EINVAL; /*XXX*/ goto bad; } ip6 = mtod(state->m, struct ip6_hdr *); ip6->ip6_plen = htons(plen); done: return 0; - + bad: return error; } @@ -3945,25 +4072,29 @@ ipsec6_output_tunnel( int error = 0; LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - - if (!state) + + if (!state) { panic("state == NULL in ipsec6_output_tunnel"); - if (!state->m) + } + if (!state->m) { panic("state->m == NULL in ipsec6_output_tunnel"); - if (!sp) + } + if (!sp) { panic("sp == NULL in ipsec6_output_tunnel"); + } KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("ipsec6_output_tunnel: applyed SP\n"); - kdebug_secpolicy(sp)); + printf("ipsec6_output_tunnel: applyed SP\n"); + kdebug_secpolicy(sp)); /* * transport mode ipsec (before the 1st tunnel mode) is already * processed by ipsec6_output_trans(). */ for (isr = sp->req; isr; isr = isr->next) { - if (isr->saidx.mode == IPSEC_MODE_TUNNEL) + if (isr->saidx.mode == IPSEC_MODE_TUNNEL) { break; + } } for (/* already initialized */; isr; isr = isr->next) { @@ -4037,36 +4168,39 @@ ipsec6_output_tunnel( * see same place at ipsec4_output(). */ if (sav->state != SADB_SASTATE_MATURE - && sav->state != SADB_SASTATE_DYING) { + && sav->state != SADB_SASTATE_DYING) { IPSEC_STAT_INCREMENT(ipsec6stat.out_nosa); error = EINVAL; goto bad; } - + int must_be_last = 0; - + if ((error = ipsec6_output_tunnel_internal(state, sav, &must_be_last)) != 0) { goto bad; } - + if (must_be_last && isr->next) { ipseclog((LOG_ERR, "ipsec6_output_tunnel: " - "IPv4 must be outer layer, spi=%u\n", - (u_int32_t)ntohl(sav->spi))); + "IPv4 must be outer layer, spi=%u\n", + (u_int32_t)ntohl(sav->spi))); error = EINVAL; goto bad; } } - if (sav) + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); + } return 0; bad: - if (sav) + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); - if (state->m) + } + if (state->m) { m_freem(state->m); + } state->m = NULL; return error; } @@ -4076,9 +4210,9 @@ ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_c { int error = 0; struct secasvar *sav = NULL; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + if (state == NULL) { panic("state == NULL in ipsec6_output"); } @@ -4105,28 +4239,27 @@ ipsec6_interface_output(struct ipsec_output_state *state, ifnet_t interface, u_c memcpy(&dst.sin6_addr, &ip6->ip6_dst, sizeof(dst.sin6_addr)); sav = key_alloc_outbound_sav_for_interface(interface, AF_INET6, - (struct sockaddr *)&src, - (struct sockaddr *)&dst); + (struct sockaddr *)&src, + (struct sockaddr *)&dst); if (sav == NULL) { goto bad; } - + if (sav->sah && sav->sah->saidx.mode == IPSEC_MODE_TUNNEL) { if ((error = ipsec6_output_tunnel_internal(state, sav, NULL)) != 0) { goto bad; } - } - else { + } else { if ((error = ipsec6_output_trans_internal(state, sav, nexthdrp, mprev)) != 0) { goto bad; } } - + if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); } return 0; - + bad: if (sav) { key_freesav(sav, KEY_SADB_UNLOCKED); @@ -4148,8 +4281,9 @@ ipsec4_splithdr(struct mbuf *m) struct ip *ip; int hlen; - if (m->m_len < sizeof(struct ip)) + if (m->m_len < sizeof(struct ip)) { panic("ipsec4_splithdr: first mbuf too short, m_len %d, pkt_len %d, m_flag %x", m->m_len, m->m_pkthdr.len, m->m_flags); + } ip = mtod(m, struct ip *); #ifdef _IP_VHL hlen = _IP_VHL_HL(ip->ip_vhl) << 2; @@ -4157,7 +4291,7 @@ ipsec4_splithdr(struct mbuf *m) hlen = ip->ip_hl << 2; #endif if (m->m_len > hlen) { - MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (!mh) { m_freem(m); return NULL; @@ -4174,8 +4308,9 @@ ipsec4_splithdr(struct mbuf *m) bcopy((caddr_t)ip, mtod(m, caddr_t), hlen); } else if (m->m_len < hlen) { m = m_pullup(m, hlen); - if (!m) + if (!m) { return NULL; + } } return m; } @@ -4189,12 +4324,13 @@ ipsec6_splithdr(struct mbuf *m) struct ip6_hdr *ip6; int hlen; - if (m->m_len < sizeof(struct ip6_hdr)) + if (m->m_len < sizeof(struct ip6_hdr)) { panic("ipsec6_splithdr: first mbuf too short"); + } ip6 = mtod(m, struct ip6_hdr *); hlen = sizeof(struct ip6_hdr); if (m->m_len > hlen) { - MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */ + MGETHDR(mh, M_DONTWAIT, MT_HEADER); /* MAC-OK */ if (!mh) { m_freem(m); return NULL; @@ -4211,8 +4347,9 @@ ipsec6_splithdr(struct mbuf *m) bcopy((caddr_t)ip6, mtod(m, caddr_t), hlen); } else if (m->m_len < hlen) { m = m_pullup(m, hlen); - if (!m) + if (!m) { return NULL; + } } return m; } @@ -4221,7 +4358,7 @@ ipsec6_splithdr(struct mbuf *m) /* validate inbound IPsec tunnel packet. */ int ipsec4_tunnel_validate( - struct mbuf *m, /* no pullup permitted, m->m_len >= ip */ + struct mbuf *m, /* no pullup permitted, m->m_len >= ip */ int off, u_int nxt0, struct secasvar *sav, @@ -4238,16 +4375,20 @@ ipsec4_tunnel_validate( LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); #if DIAGNOSTIC - if (m->m_len < sizeof(struct ip)) + if (m->m_len < sizeof(struct ip)) { panic("too short mbuf on ipsec4_tunnel_validate"); + } #endif - if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) + if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) { return 0; - if (m->m_pkthdr.len < off + sizeof(struct ip)) + } + if (m->m_pkthdr.len < off + sizeof(struct ip)) { return 0; + } /* do not decapsulate if the SA is for transport mode only */ - if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) + if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) { return 0; + } oip = mtod(m, struct ip *); #ifdef _IP_VHL @@ -4255,14 +4396,17 @@ ipsec4_tunnel_validate( #else hlen = oip->ip_hl << 2; #endif - if (hlen != sizeof(struct ip)) + if (hlen != sizeof(struct ip)) { return 0; + } sin = (struct sockaddr_in *)&sav->sah->saidx.dst; - if (sin->sin_family != AF_INET) + if (sin->sin_family != AF_INET) { return 0; - if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) + } + if (bcmp(&oip->ip_dst, &sin->sin_addr, sizeof(oip->ip_dst)) != 0) { return 0; + } if (sav->sah->ipsec_if != NULL) { // the ipsec interface SAs don't have a policies. @@ -4309,27 +4453,28 @@ ipsec4_tunnel_validate( i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET; i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in); m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr), - (caddr_t)&i4src.sin_addr); + (caddr_t)&i4src.sin_addr); m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr), - (caddr_t)&i4dst.sin_addr); + (caddr_t)&i4dst.sin_addr); sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst, - (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst); + (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst); } else if (nxt == IPPROTO_IPV6) { bzero(&i6src, sizeof(struct sockaddr_in6)); bzero(&i6dst, sizeof(struct sockaddr_in6)); i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6; i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6); m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr), - (caddr_t)&i6src.sin6_addr); + (caddr_t)&i6src.sin6_addr); m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr), - (caddr_t)&i6dst.sin6_addr); + (caddr_t)&i6dst.sin6_addr); sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst, - (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst); - } else - return 0; /* unsupported family */ - - if (!sp) + (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst); + } else { + return 0; /* unsupported family */ + } + if (!sp) { return 0; + } key_freesp(sp, KEY_SADB_UNLOCKED); @@ -4340,7 +4485,7 @@ ipsec4_tunnel_validate( /* validate inbound IPsec tunnel packet. */ int ipsec6_tunnel_validate( - struct mbuf *m, /* no pullup permitted, m->m_len >= ip */ + struct mbuf *m, /* no pullup permitted, m->m_len >= ip */ int off, u_int nxt0, struct secasvar *sav, @@ -4354,27 +4499,33 @@ ipsec6_tunnel_validate( struct ip6_hdr *oip6; LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + #if DIAGNOSTIC - if (m->m_len < sizeof(struct ip6_hdr)) + if (m->m_len < sizeof(struct ip6_hdr)) { panic("too short mbuf on ipsec6_tunnel_validate"); + } #endif - if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) + if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) { return 0; + } - if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) + if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) { return 0; + } /* do not decapsulate if the SA is for transport mode only */ - if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) + if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) { return 0; + } oip6 = mtod(m, struct ip6_hdr *); /* AF_INET should be supported, but at this moment we don't. */ sin6 = (struct sockaddr_in6 *)&sav->sah->saidx.dst; - if (sin6->sin6_family != AF_INET6) + if (sin6->sin6_family != AF_INET6) { return 0; - if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr)) + } + if (!IN6_ARE_ADDR_EQUAL(&oip6->ip6_dst, &sin6->sin6_addr)) { return 0; + } if (sav->sah->ipsec_if != NULL) { // the ipsec interface SAs don't have a policies. @@ -4407,24 +4558,25 @@ ipsec6_tunnel_validate( i4src.sin_family = i4dst.sin_family = *ifamily = AF_INET; i4src.sin_len = i4dst.sin_len = sizeof(struct sockaddr_in); m_copydata(m, off + offsetof(struct ip, ip_src), sizeof(i4src.sin_addr), - (caddr_t)&i4src.sin_addr); + (caddr_t)&i4src.sin_addr); m_copydata(m, off + offsetof(struct ip, ip_dst), sizeof(i4dst.sin_addr), - (caddr_t)&i4dst.sin_addr); + (caddr_t)&i4dst.sin_addr); sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst, - (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst); + (struct sockaddr *)&i4src, (struct sockaddr *)&i4dst); } else if (nxt == IPPROTO_IPV6) { bzero(&i6src, sizeof(struct sockaddr_in6)); bzero(&i6dst, sizeof(struct sockaddr_in6)); i6src.sin6_family = i6dst.sin6_family = *ifamily = AF_INET6; i6src.sin6_len = i6dst.sin6_len = sizeof(struct sockaddr_in6); m_copydata(m, off + offsetof(struct ip6_hdr, ip6_src), sizeof(i6src.sin6_addr), - (caddr_t)&i6src.sin6_addr); + (caddr_t)&i6src.sin6_addr); m_copydata(m, off + offsetof(struct ip6_hdr, ip6_dst), sizeof(i6dst.sin6_addr), - (caddr_t)&i6dst.sin6_addr); + (caddr_t)&i6dst.sin6_addr); sp = key_gettunnel((struct sockaddr *)&osrc, (struct sockaddr *)&odst, - (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst); - } else - return 0; /* unsupported family */ + (struct sockaddr *)&i6src, (struct sockaddr *)&i6dst); + } else { + return 0; /* unsupported family */ + } /* * when there is no suitable inbound policy for the packet of the ipsec * tunnel mode, the kernel never decapsulate the tunneled packet @@ -4433,8 +4585,9 @@ ipsec6_tunnel_validate( * packet. if there is no rule of the generic tunnel, the packet * is rejected and the statistics will be counted up. */ - if (!sp) + if (!sp) { return 0; + } key_freesp(sp, KEY_SADB_UNLOCKED); return 1; @@ -4463,21 +4616,21 @@ ipsec_copypkt(struct mbuf *m) if ( m_get_ext_free(n) != NULL || m_mclhasreference(n) - ) - { + ) { int remain, copied; struct mbuf *mm; if (n->m_flags & M_PKTHDR) { MGETHDR(mnew, M_DONTWAIT, MT_HEADER); /* MAC-OK */ - if (mnew == NULL) + if (mnew == NULL) { goto fail; + } M_COPY_PKTHDR(mnew, n); - } - else { + } else { MGET(mnew, M_DONTWAIT, MT_DATA); - if (mnew == NULL) + if (mnew == NULL) { goto fail; + } } mnew->m_len = 0; mm = mnew; @@ -4496,32 +4649,34 @@ ipsec_copypkt(struct mbuf *m) int len; struct mbuf *mn; - if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) + if (remain <= (mm->m_flags & M_PKTHDR ? MHLEN : MLEN)) { len = remain; - else { /* allocate a cluster */ + } else { /* allocate a cluster */ MCLGET(mm, M_DONTWAIT); if (!(mm->m_flags & M_EXT)) { m_free(mm); goto fail; } len = remain < MCLBYTES ? - remain : MCLBYTES; + remain : MCLBYTES; } bcopy(n->m_data + copied, mm->m_data, - len); + len); copied += len; remain -= len; mm->m_len = len; - if (remain <= 0) /* completed? */ + if (remain <= 0) { /* completed? */ break; + } /* need another mbuf */ - MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */ - if (mn == NULL) + MGETHDR(mn, M_DONTWAIT, MT_HEADER); /* XXXMAC: tags copied next time in loop? */ + if (mn == NULL) { goto fail; + } mn->m_pkthdr.rcvif = NULL; mm->m_next = mn; mm = mn; @@ -4540,22 +4695,22 @@ ipsec_copypkt(struct mbuf *m) mpp = &n->m_next; } - return(m); - fail: + return m; +fail: m_freem(m); - return(NULL); + return NULL; } /* * Tags are allocated as mbufs for now, since our minimum size is MLEN, we * should make use of up to that much space. */ -#define IPSEC_TAG_HEADER \ +#define IPSEC_TAG_HEADER \ struct ipsec_tag { - struct socket *socket; - u_int32_t history_count; - struct ipsec_history history[]; + struct socket *socket; + u_int32_t history_count; + struct ipsec_history history[]; #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers * are 32-bit: @@ -4566,36 +4721,36 @@ struct ipsec_tag { }; #endif -#define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag)) -#define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0])) -#define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \ - sizeof(struct ipsec_history)) +#define IPSEC_TAG_SIZE (MLEN - sizeof(struct m_tag)) +#define IPSEC_TAG_HDR_SIZE (offsetof(struct ipsec_tag, history[0])) +#define IPSEC_HISTORY_MAX ((IPSEC_TAG_SIZE - IPSEC_TAG_HDR_SIZE) / \ + sizeof(struct ipsec_history)) static struct ipsec_tag * ipsec_addaux( struct mbuf *m) { - struct m_tag *tag; - + struct m_tag *tag; + /* Check if the tag already exists */ tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL); - + if (tag == NULL) { - struct ipsec_tag *itag; - + struct ipsec_tag *itag; + /* Allocate a tag */ tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, - IPSEC_TAG_SIZE, M_DONTWAIT, m); - + IPSEC_TAG_SIZE, M_DONTWAIT, m); + if (tag) { itag = (struct ipsec_tag*)(tag + 1); itag->socket = 0; itag->history_count = 0; - + m_tag_prepend(m, tag); } } - + return tag ? (struct ipsec_tag*)(tag + 1) : NULL; } @@ -4603,10 +4758,10 @@ static struct ipsec_tag * ipsec_findaux( struct mbuf *m) { - struct m_tag *tag; - + struct m_tag *tag; + tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL); - + return tag ? (struct ipsec_tag*)(tag + 1) : NULL; } @@ -4614,10 +4769,10 @@ void ipsec_delaux( struct mbuf *m) { - struct m_tag *tag; - + struct m_tag *tag; + tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPSEC, NULL); - + if (tag) { m_tag_delete(m, tag); } @@ -4626,8 +4781,8 @@ ipsec_delaux( /* if the aux buffer is unnecessary, nuke it. */ static void ipsec_optaux( - struct mbuf *m, - struct ipsec_tag *itag) + struct mbuf *m, + struct ipsec_tag *itag) { if (itag && itag->socket == NULL && itag->history_count == 0) { m_tag_delete(m, ((struct m_tag*)itag) - 1); @@ -4637,15 +4792,17 @@ ipsec_optaux( int ipsec_setsocket(struct mbuf *m, struct socket *so) { - struct ipsec_tag *tag; - + struct ipsec_tag *tag; + /* if so == NULL, don't insist on getting the aux mbuf */ if (so) { tag = ipsec_addaux(m); - if (!tag) + if (!tag) { return ENOBUFS; - } else + } + } else { tag = ipsec_findaux(m); + } if (tag) { tag->socket = so; ipsec_optaux(m, tag); @@ -4656,13 +4813,14 @@ ipsec_setsocket(struct mbuf *m, struct socket *so) struct socket * ipsec_getsocket(struct mbuf *m) { - struct ipsec_tag *itag; - + struct ipsec_tag *itag; + itag = ipsec_findaux(m); - if (itag) + if (itag) { return itag->socket; - else + } else { return NULL; + } } int @@ -4671,21 +4829,22 @@ ipsec_addhist( int proto, u_int32_t spi) { - struct ipsec_tag *itag; - struct ipsec_history *p; + struct ipsec_tag *itag; + struct ipsec_history *p; itag = ipsec_addaux(m); - if (!itag) + if (!itag) { return ENOBUFS; - if (itag->history_count == IPSEC_HISTORY_MAX) - return ENOSPC; /* XXX */ - + } + if (itag->history_count == IPSEC_HISTORY_MAX) { + return ENOSPC; /* XXX */ + } p = &itag->history[itag->history_count]; itag->history_count++; - + bzero(p, sizeof(*p)); p->ih_proto = proto; p->ih_spi = spi; - + return 0; } @@ -4694,15 +4853,18 @@ ipsec_gethist( struct mbuf *m, int *lenp) { - struct ipsec_tag *itag; - + struct ipsec_tag *itag; + itag = ipsec_findaux(m); - if (!itag) + if (!itag) { return NULL; - if (itag->history_count == 0) + } + if (itag->history_count == 0) { return NULL; - if (lenp) + } + if (lenp) { *lenp = (int)(itag->history_count * sizeof(struct ipsec_history)); + } return itag->history; } @@ -4710,8 +4872,8 @@ void ipsec_clearhist( struct mbuf *m) { - struct ipsec_tag *itag; - + struct ipsec_tag *itag; + itag = ipsec_findaux(m); if (itag) { itag->history_count = 0; @@ -4719,45 +4881,56 @@ ipsec_clearhist( ipsec_optaux(m, itag); } -__private_extern__ int +__private_extern__ boolean_t ipsec_send_natt_keepalive( struct secasvar *sav) { - struct mbuf *m; - struct ip *ip; - int error; - struct ip_out_args ipoa; - struct route ro; + struct mbuf *m = NULL; + int error = 0; int keepalive_interval = natt_keepalive_interval; - bzero(&ipoa, sizeof(ipoa)); - ipoa.ipoa_boundif = IFSCOPE_NONE; - ipoa.ipoa_flags = IPOAF_SELECT_SRCIF; - ipoa.ipoa_sotc = SO_TC_UNSPEC; - ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); + lck_mtx_lock(sadb_mutex); - if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) return FALSE; + if ((esp_udp_encap_port & 0xFFFF) == 0 || sav->remote_ike_port == 0) { + lck_mtx_unlock(sadb_mutex); + return FALSE; + } if (sav->natt_interval != 0) { keepalive_interval = (int)sav->natt_interval; } - + // natt timestamp may have changed... reverify - if ((natt_now - sav->natt_last_activity) < keepalive_interval) return FALSE; + if ((natt_now - sav->natt_last_activity) < keepalive_interval) { + lck_mtx_unlock(sadb_mutex); + return FALSE; + } + + if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) { + lck_mtx_unlock(sadb_mutex); + return FALSE; // don't send these from the kernel + } - if (sav->flags & SADB_X_EXT_ESP_KEEPALIVE) return FALSE; // don't send these from the kernel + lck_mtx_unlock(sadb_mutex); m = m_gethdr(M_NOWAIT, MT_DATA); - if (m == NULL) return FALSE; + if (m == NULL) { + return FALSE; + } - ip = (__typeof__(ip))m_mtod(m); + lck_mtx_lock(sadb_mutex); + if (sav->sah->saidx.dst.ss_family == AF_INET) { + struct ip_out_args ipoa = {}; + struct route ro = {}; + + ipoa.ipoa_boundif = IFSCOPE_NONE; + ipoa.ipoa_flags = IPOAF_SELECT_SRCIF; + ipoa.ipoa_sotc = SO_TC_UNSPEC; + ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC; + + struct ip *ip = (__typeof__(ip))m_mtod(m); - // this sends one type of NATT keepalives (Type 1, ESP keepalives, aren't sent by kernel) - if ((sav->flags & SADB_X_EXT_ESP_KEEPALIVE) == 0) { - struct udphdr *uh; - /* * Type 2: a UDP packet complete with IP header. * We must do this because UDP output requires @@ -4765,7 +4938,7 @@ ipsec_send_natt_keepalive( * contains one byte payload. The byte is set * to 0xFF. */ - uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip)); + struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip)); m->m_len = sizeof(struct udpiphdr) + 1; bzero(m_mtod(m), m->m_len); m->m_pkthdr.len = m->m_len; @@ -4785,62 +4958,134 @@ ipsec_send_natt_keepalive( uh->uh_ulen = htons(1 + sizeof(*uh)); uh->uh_sum = 0; *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip) + sizeof(*uh)) = 0xFF; - } - // grab sadb_mutex, to get a local copy of sah's route cache - lck_mtx_lock(sadb_mutex); - if (ROUTE_UNUSABLE(&sav->sah->sa_route) || - rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) - ROUTE_RELEASE(&sav->sah->sa_route); + if (ROUTE_UNUSABLE(&sav->sah->sa_route) || + rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET) { + ROUTE_RELEASE(&sav->sah->sa_route); + } - route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route)); - lck_mtx_unlock(sadb_mutex); - - necp_mark_packet_as_keepalive(m, TRUE); + route_copyout(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route)); + lck_mtx_unlock(sadb_mutex); - error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa); + necp_mark_packet_as_keepalive(m, TRUE); + error = ip_output(m, NULL, &ro, IP_OUTARGS | IP_NOIPSEC, NULL, &ipoa); + + lck_mtx_lock(sadb_mutex); + route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route)); + } else if (sav->sah->saidx.dst.ss_family == AF_INET6) { + struct ip6_out_args ip6oa = {}; + struct route_in6 ro6 = {}; + + ip6oa.ip6oa_flowadv.code = 0; + ip6oa.ip6oa_flags = IP6OAF_SELECT_SRCIF | IP6OAF_BOUND_SRCADDR; + if (sav->sah->outgoing_if) { + ip6oa.ip6oa_boundif = sav->sah->outgoing_if; + ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; + } + + struct ip6_hdr *ip6 = (__typeof__(ip6))m_mtod(m); + + /* + * Type 2: a UDP packet complete with IPv6 header. + * We must do this because UDP output requires + * an inpcb which we don't have. UDP packet + * contains one byte payload. The byte is set + * to 0xFF. + */ + struct udphdr *uh = (__typeof__(uh))(void *)((char *)m_mtod(m) + sizeof(*ip6)); + m->m_len = sizeof(struct udphdr) + sizeof(struct ip6_hdr) + 1; + bzero(m_mtod(m), m->m_len); + m->m_pkthdr.len = m->m_len; + + ip6->ip6_flow = 0; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; + ip6->ip6_nxt = IPPROTO_UDP; + ip6->ip6_hlim = ip6_defhlim; + ip6->ip6_plen = htons(sizeof(struct udphdr) + 1); + if (sav->sah->dir != IPSEC_DIR_INBOUND) { + ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr; + ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr; + } else { + ip6->ip6_src = ((struct sockaddr_in6 *)&sav->sah->saidx.dst)->sin6_addr; + ip6->ip6_dst = ((struct sockaddr_in6 *)&sav->sah->saidx.src)->sin6_addr; + } + + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { + ip6->ip6_src.s6_addr16[1] = 0; + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { + ip6->ip6_dst.s6_addr16[1] = 0; + } + + uh->uh_sport = htons((u_short)esp_udp_encap_port); + uh->uh_dport = htons(sav->remote_ike_port); + uh->uh_ulen = htons(1 + sizeof(*uh)); + *(u_int8_t*)((char*)m_mtod(m) + sizeof(*ip6) + sizeof(*uh)) = 0xFF; + uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, htonl(ntohs(uh->uh_ulen) + IPPROTO_UDP)); + m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT); + m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); + + if (ROUTE_UNUSABLE(&sav->sah->sa_route) || + rt_key(sav->sah->sa_route.ro_rt)->sa_family != AF_INET6) { + ROUTE_RELEASE(&sav->sah->sa_route); + } + + route_copyout((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6)); + lck_mtx_unlock(sadb_mutex); + + necp_mark_packet_as_keepalive(m, TRUE); + error = ip6_output(m, NULL, &ro6, IPV6_OUTARGS, NULL, NULL, &ip6oa); + + lck_mtx_lock(sadb_mutex); + route_copyin((struct route *)&ro6, (struct route *)&sav->sah->sa_route, sizeof(struct route_in6)); + } else { + ipseclog((LOG_ERR, "nat keepalive: invalid address family %u\n", sav->sah->saidx.dst.ss_family)); + lck_mtx_unlock(sadb_mutex); + m_freem(m); + return FALSE; + } - // grab sadb_mutex, to synchronize the sah's route cache with the local copy - lck_mtx_lock(sadb_mutex); - route_copyin(&ro, (struct route *)&sav->sah->sa_route, sizeof(struct route)); - lck_mtx_unlock(sadb_mutex); if (error == 0) { sav->natt_last_activity = natt_now; + lck_mtx_unlock(sadb_mutex); return TRUE; } + + lck_mtx_unlock(sadb_mutex); return FALSE; } __private_extern__ bool ipsec_fill_offload_frame(ifnet_t ifp, - struct secasvar *sav, - struct ifnet_keepalive_offload_frame *frame, - size_t frame_data_offset) + struct secasvar *sav, + struct ifnet_keepalive_offload_frame *frame, + size_t frame_data_offset) { u_int8_t *data = NULL; struct ip *ip = NULL; struct udphdr *uh = NULL; if (sav == NULL || sav->sah == NULL || frame == NULL || - (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) || - sav->sah->saidx.dst.ss_family != AF_INET || - !(sav->flags & SADB_X_EXT_NATT) || - !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) || - !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) || - sav->flags & SADB_X_EXT_ESP_KEEPALIVE || - (esp_udp_encap_port & 0xFFFF) == 0 || - sav->remote_ike_port == 0 || - (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) { + (ifp != NULL && ifp->if_index != sav->sah->outgoing_if) || + sav->sah->saidx.dst.ss_family != AF_INET || + !(sav->flags & SADB_X_EXT_NATT) || + !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE) || + !(sav->flags & SADB_X_EXT_NATT_KEEPALIVE_OFFLOAD) || + sav->flags & SADB_X_EXT_ESP_KEEPALIVE || + (esp_udp_encap_port & 0xFFFF) == 0 || + sav->remote_ike_port == 0 || + (natt_keepalive_interval == 0 && sav->natt_interval == 0 && sav->natt_offload_interval == 0)) { /* SA is not eligible for keepalive offload on this interface */ - return (FALSE); + return FALSE; } if (frame_data_offset + sizeof(struct udpiphdr) + 1 > IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) { /* Not enough room in this data frame */ - return (FALSE); + return FALSE; } - + data = frame->data; ip = (__typeof__(ip))(void *)(data + frame_data_offset); uh = (__typeof__(uh))(void *)(data + frame_data_offset + sizeof(*ip)); @@ -4856,14 +5101,14 @@ ipsec_fill_offload_frame(ifnet_t ifp, ip->ip_off &= htons(~IP_OFFMASK); ip->ip_off &= htons(~IP_MF); switch (ip4_ipsec_dfbit) { - case 0: /* clear DF bit */ - ip->ip_off &= htons(~IP_DF); - break; - case 1: /* set DF bit */ - ip->ip_off |= htons(IP_DF); - break; - default: /* copy DF bit */ - break; + case 0: /* clear DF bit */ + ip->ip_off &= htons(~IP_DF); + break; + case 1: /* set DF bit */ + ip->ip_off |= htons(IP_DF); + break; + default: /* copy DF bit */ + break; } ip->ip_len = htons(sizeof(struct udpiphdr) + 1); if (rfc6864 && IP_OFF_IS_ATOMIC(htons(ip->ip_off))) { @@ -4895,5 +5140,5 @@ ipsec_fill_offload_frame(ifnet_t ifp, } else { frame->interval = natt_keepalive_interval; } - return (TRUE); + return TRUE; } diff --git a/bsd/netinet6/ipsec.h b/bsd/netinet6/ipsec.h index e01608247..0cfe8a0b3 100644 --- a/bsd/netinet6/ipsec.h +++ b/bsd/netinet6/ipsec.h @@ -51,12 +51,12 @@ extern lck_attr_t *sadb_stat_mutex_attr; extern lck_mtx_t *sadb_stat_mutex; -#define IPSEC_STAT_INCREMENT(x) \ +#define IPSEC_STAT_INCREMENT(x) \ OSIncrementAtomic64((SInt64 *)&x) struct secpolicyaddrrange { - struct sockaddr_storage start; /* Start (low values) of address range */ - struct sockaddr_storage end; /* End (high values) of address range */ + struct sockaddr_storage start; /* Start (low values) of address range */ + struct sockaddr_storage end; /* End (high values) of address range */ }; /* @@ -66,15 +66,15 @@ struct secpolicyaddrrange { * specifies ICMPv6 type, and the port field in "dst" specifies ICMPv6 code. */ struct secpolicyindex { - u_int8_t dir; /* direction of packet flow, see blow */ - struct sockaddr_storage src; /* IP src address for SP */ - struct sockaddr_storage dst; /* IP dst address for SP */ - u_int8_t prefs; /* prefix length in bits for src */ - u_int8_t prefd; /* prefix length in bits for dst */ - u_int16_t ul_proto; /* upper layer Protocol */ + u_int8_t dir; /* direction of packet flow, see blow */ + struct sockaddr_storage src; /* IP src address for SP */ + struct sockaddr_storage dst; /* IP dst address for SP */ + u_int8_t prefs; /* prefix length in bits for src */ + u_int8_t prefd; /* prefix length in bits for dst */ + u_int16_t ul_proto; /* upper layer Protocol */ ifnet_t internal_if; /* Interface a matching packet is bound to */ - struct secpolicyaddrrange src_range; /* IP src address range for SP */ - struct secpolicyaddrrange dst_range; /* IP dst address range for SP */ + struct secpolicyaddrrange src_range; /* IP src address range for SP */ + struct secpolicyaddrrange dst_range; /* IP dst address range for SP */ #ifdef notyet uid_t uids; uid_t uidd; @@ -87,23 +87,23 @@ struct secpolicyindex { struct secpolicy { LIST_ENTRY(secpolicy) chain; - int refcnt; /* reference count */ - struct secpolicyindex spidx; /* selector */ - u_int32_t id; /* It's unique number on the system. */ - u_int state; /* 0: dead, others: alive */ -#define IPSEC_SPSTATE_DEAD 0 -#define IPSEC_SPSTATE_ALIVE 1 + int refcnt; /* reference count */ + struct secpolicyindex spidx; /* selector */ + u_int32_t id; /* It's unique number on the system. */ + u_int state; /* 0: dead, others: alive */ +#define IPSEC_SPSTATE_DEAD 0 +#define IPSEC_SPSTATE_ALIVE 1 - u_int policy; /* DISCARD, NONE or IPSEC, see keyv2.h */ + u_int policy; /* DISCARD, NONE or IPSEC, see keyv2.h */ struct ipsecrequest *req; - /* pointer to the ipsec request tree, */ - /* if policy == IPSEC else this value == NULL.*/ + /* pointer to the ipsec request tree, */ + /* if policy == IPSEC else this value == NULL.*/ ifnet_t ipsec_if; /* IPSec interface to use */ ifnet_t outgoing_if; /* Outgoing interface for encrypted traffic */ - + char disabled; /* Set to ignore policy */ - + /* * lifetime handler. * the policy can be used without limitiation if both lifetime and @@ -111,29 +111,29 @@ struct secpolicy { * "lifetime" is passed by sadb_lifetime.sadb_lifetime_addtime. * "validtime" is passed by sadb_lifetime.sadb_lifetime_usetime. */ - long created; /* time created the policy */ - long lastused; /* updated every when kernel sends a packet */ - long lifetime; /* duration of the lifetime of this policy */ - long validtime; /* duration this policy is valid without use */ + long created; /* time created the policy */ + long lastused; /* updated every when kernel sends a packet */ + long lifetime; /* duration of the lifetime of this policy */ + long validtime; /* duration this policy is valid without use */ }; /* Request for IPsec */ struct ipsecrequest { struct ipsecrequest *next; - /* pointer to next structure */ - /* If NULL, it means the end of chain. */ + /* pointer to next structure */ + /* If NULL, it means the end of chain. */ struct secasindex saidx;/* hint for search proper SA */ - /* if __ss_len == 0 then no address specified.*/ - u_int level; /* IPsec level defined below. */ + /* if __ss_len == 0 then no address specified.*/ + u_int level; /* IPsec level defined below. */ - struct secpolicy *sp; /* back pointer to SP */ + struct secpolicy *sp; /* back pointer to SP */ }; /* security policy in PCB */ struct inpcbpolicy { struct secpolicy *sp_in; struct secpolicy *sp_out; - int priv; /* privileged socket ? */ + int priv; /* privileged socket ? */ }; /* SP acquiring list table. */ @@ -142,33 +142,33 @@ struct secspacq { struct secpolicyindex spidx; - long created; /* for lifetime */ - int count; /* for lifetime */ + long created; /* for lifetime */ + int count; /* for lifetime */ /* XXX: here is mbuf place holder to be sent ? */ }; #endif /* BSD_KERNEL_PRIVATE */ /* according to IANA assignment, port 0x0000 and proto 0xff are reserved. */ -#define IPSEC_PORT_ANY 0 -#define IPSEC_ULPROTO_ANY 255 -#define IPSEC_PROTO_ANY 255 +#define IPSEC_PORT_ANY 0 +#define IPSEC_ULPROTO_ANY 255 +#define IPSEC_PROTO_ANY 255 /* mode of security protocol */ /* NOTE: DON'T use IPSEC_MODE_ANY at SPD. It's only use in SAD */ -#define IPSEC_MODE_ANY 0 /* i.e. wildcard. */ -#define IPSEC_MODE_TRANSPORT 1 -#define IPSEC_MODE_TUNNEL 2 +#define IPSEC_MODE_ANY 0 /* i.e. wildcard. */ +#define IPSEC_MODE_TRANSPORT 1 +#define IPSEC_MODE_TUNNEL 2 /* * Direction of security policy. * NOTE: Since INVALID is used just as flag. * The other are used for loop counter too. */ -#define IPSEC_DIR_ANY 0 -#define IPSEC_DIR_INBOUND 1 -#define IPSEC_DIR_OUTBOUND 2 -#define IPSEC_DIR_MAX 3 -#define IPSEC_DIR_INVALID 4 +#define IPSEC_DIR_ANY 0 +#define IPSEC_DIR_INBOUND 1 +#define IPSEC_DIR_OUTBOUND 2 +#define IPSEC_DIR_MAX 3 +#define IPSEC_DIR_INVALID 4 /* Policy level */ /* @@ -176,60 +176,60 @@ struct secspacq { * DISCARD, IPSEC and NONE are allowed for setkey() in SPD. * DISCARD and NONE are allowed for system default. */ -#define IPSEC_POLICY_DISCARD 0 /* discarding packet */ -#define IPSEC_POLICY_NONE 1 /* through IPsec engine */ -#define IPSEC_POLICY_IPSEC 2 /* do IPsec */ -#define IPSEC_POLICY_ENTRUST 3 /* consulting SPD if present. */ -#define IPSEC_POLICY_BYPASS 4 /* only for privileged socket. */ +#define IPSEC_POLICY_DISCARD 0 /* discarding packet */ +#define IPSEC_POLICY_NONE 1 /* through IPsec engine */ +#define IPSEC_POLICY_IPSEC 2 /* do IPsec */ +#define IPSEC_POLICY_ENTRUST 3 /* consulting SPD if present. */ +#define IPSEC_POLICY_BYPASS 4 /* only for privileged socket. */ #define IPSEC_POLICY_GENERATE 5 /* same as discard - IKE daemon can override with generated policy */ /* Security protocol level */ -#define IPSEC_LEVEL_DEFAULT 0 /* reference to system default */ -#define IPSEC_LEVEL_USE 1 /* use SA if present. */ -#define IPSEC_LEVEL_REQUIRE 2 /* require SA. */ -#define IPSEC_LEVEL_UNIQUE 3 /* unique SA. */ - -#define IPSEC_MANUAL_REQID_MAX 0x3fff - /* - * if security policy level == unique, this id - * indicate to a relative SA for use, else is - * zero. - * 1 - 0x3fff are reserved for manual keying. - * 0 are reserved for above reason. Others is - * for kernel use. - * Note that this id doesn't identify SA - * by only itself. - */ +#define IPSEC_LEVEL_DEFAULT 0 /* reference to system default */ +#define IPSEC_LEVEL_USE 1 /* use SA if present. */ +#define IPSEC_LEVEL_REQUIRE 2 /* require SA. */ +#define IPSEC_LEVEL_UNIQUE 3 /* unique SA. */ + +#define IPSEC_MANUAL_REQID_MAX 0x3fff +/* + * if security policy level == unique, this id + * indicate to a relative SA for use, else is + * zero. + * 1 - 0x3fff are reserved for manual keying. + * 0 are reserved for above reason. Others is + * for kernel use. + * Note that this id doesn't identify SA + * by only itself. + */ #define IPSEC_REPLAYWSIZE 32 /* statistics for ipsec processing */ struct ipsecstat { - u_quad_t in_success __attribute__ ((aligned (8))); /* succeeded inbound process */ - u_quad_t in_polvio __attribute__ ((aligned (8))); - /* security policy violation for inbound process */ - u_quad_t in_nosa __attribute__ ((aligned (8))); /* inbound SA is unavailable */ - u_quad_t in_inval __attribute__ ((aligned (8))); /* inbound processing failed due to EINVAL */ - u_quad_t in_nomem __attribute__ ((aligned (8))); /* inbound processing failed due to ENOBUFS */ - u_quad_t in_badspi __attribute__ ((aligned (8))); /* failed getting a SPI */ - u_quad_t in_ahreplay __attribute__ ((aligned (8))); /* AH replay check failed */ - u_quad_t in_espreplay __attribute__ ((aligned (8))); /* ESP replay check failed */ - u_quad_t in_ahauthsucc __attribute__ ((aligned (8))); /* AH authentication success */ - u_quad_t in_ahauthfail __attribute__ ((aligned (8))); /* AH authentication failure */ - u_quad_t in_espauthsucc __attribute__ ((aligned (8))); /* ESP authentication success */ - u_quad_t in_espauthfail __attribute__ ((aligned (8))); /* ESP authentication failure */ - u_quad_t in_esphist[256] __attribute__ ((aligned (8))); - u_quad_t in_ahhist[256] __attribute__ ((aligned (8))); - u_quad_t in_comphist[256] __attribute__ ((aligned (8))); - u_quad_t out_success __attribute__ ((aligned (8))); /* succeeded outbound process */ - u_quad_t out_polvio __attribute__ ((aligned (8))); - /* security policy violation for outbound process */ - u_quad_t out_nosa __attribute__ ((aligned (8))); /* outbound SA is unavailable */ - u_quad_t out_inval __attribute__ ((aligned (8))); /* outbound process failed due to EINVAL */ - u_quad_t out_nomem __attribute__ ((aligned (8))); /* inbound processing failed due to ENOBUFS */ - u_quad_t out_noroute __attribute__ ((aligned (8))); /* there is no route */ - u_quad_t out_esphist[256] __attribute__ ((aligned (8))); - u_quad_t out_ahhist[256] __attribute__ ((aligned (8))); - u_quad_t out_comphist[256] __attribute__ ((aligned (8))); + u_quad_t in_success __attribute__ ((aligned(8))); /* succeeded inbound process */ + u_quad_t in_polvio __attribute__ ((aligned(8))); + /* security policy violation for inbound process */ + u_quad_t in_nosa __attribute__ ((aligned(8))); /* inbound SA is unavailable */ + u_quad_t in_inval __attribute__ ((aligned(8))); /* inbound processing failed due to EINVAL */ + u_quad_t in_nomem __attribute__ ((aligned(8))); /* inbound processing failed due to ENOBUFS */ + u_quad_t in_badspi __attribute__ ((aligned(8))); /* failed getting a SPI */ + u_quad_t in_ahreplay __attribute__ ((aligned(8))); /* AH replay check failed */ + u_quad_t in_espreplay __attribute__ ((aligned(8))); /* ESP replay check failed */ + u_quad_t in_ahauthsucc __attribute__ ((aligned(8))); /* AH authentication success */ + u_quad_t in_ahauthfail __attribute__ ((aligned(8))); /* AH authentication failure */ + u_quad_t in_espauthsucc __attribute__ ((aligned(8))); /* ESP authentication success */ + u_quad_t in_espauthfail __attribute__ ((aligned(8))); /* ESP authentication failure */ + u_quad_t in_esphist[256] __attribute__ ((aligned(8))); + u_quad_t in_ahhist[256] __attribute__ ((aligned(8))); + u_quad_t in_comphist[256] __attribute__ ((aligned(8))); + u_quad_t out_success __attribute__ ((aligned(8))); /* succeeded outbound process */ + u_quad_t out_polvio __attribute__ ((aligned(8))); + /* security policy violation for outbound process */ + u_quad_t out_nosa __attribute__ ((aligned(8))); /* outbound SA is unavailable */ + u_quad_t out_inval __attribute__ ((aligned(8))); /* outbound process failed due to EINVAL */ + u_quad_t out_nomem __attribute__ ((aligned(8))); /* inbound processing failed due to ENOBUFS */ + u_quad_t out_noroute __attribute__ ((aligned(8))); /* there is no route */ + u_quad_t out_esphist[256] __attribute__ ((aligned(8))); + u_quad_t out_ahhist[256] __attribute__ ((aligned(8))); + u_quad_t out_comphist[256] __attribute__ ((aligned(8))); }; #ifdef BSD_KERNEL_PRIVATE @@ -239,22 +239,22 @@ struct ipsecstat { /* * Names for IPsec & Key sysctl objects */ -#define IPSECCTL_STATS 1 /* stats */ -#define IPSECCTL_DEF_POLICY 2 -#define IPSECCTL_DEF_ESP_TRANSLEV 3 /* int; ESP transport mode */ -#define IPSECCTL_DEF_ESP_NETLEV 4 /* int; ESP tunnel mode */ -#define IPSECCTL_DEF_AH_TRANSLEV 5 /* int; AH transport mode */ -#define IPSECCTL_DEF_AH_NETLEV 6 /* int; AH tunnel mode */ -#if 0 /* obsolete, do not reuse */ -#define IPSECCTL_INBOUND_CALL_IKE 7 +#define IPSECCTL_STATS 1 /* stats */ +#define IPSECCTL_DEF_POLICY 2 +#define IPSECCTL_DEF_ESP_TRANSLEV 3 /* int; ESP transport mode */ +#define IPSECCTL_DEF_ESP_NETLEV 4 /* int; ESP tunnel mode */ +#define IPSECCTL_DEF_AH_TRANSLEV 5 /* int; AH transport mode */ +#define IPSECCTL_DEF_AH_NETLEV 6 /* int; AH tunnel mode */ +#if 0 /* obsolete, do not reuse */ +#define IPSECCTL_INBOUND_CALL_IKE 7 #endif -#define IPSECCTL_AH_CLEARTOS 8 -#define IPSECCTL_AH_OFFSETMASK 9 -#define IPSECCTL_DFBIT 10 -#define IPSECCTL_ECN 11 -#define IPSECCTL_DEBUG 12 -#define IPSECCTL_ESP_RANDPAD 13 -#define IPSECCTL_MAXID 14 +#define IPSECCTL_AH_CLEARTOS 8 +#define IPSECCTL_AH_OFFSETMASK 9 +#define IPSECCTL_DFBIT 10 +#define IPSECCTL_ECN 11 +#define IPSECCTL_DEBUG 12 +#define IPSECCTL_ESP_RANDPAD 13 +#define IPSECCTL_MAXID 14 #define IPSECCTL_NAMES { \ { 0, 0 }, \ @@ -325,14 +325,14 @@ extern int ip4_ipsec_dfbit; extern int ip4_ipsec_ecn; extern int ip4_esp_randpad; -#define ipseclog(x) do { if (ipsec_debug) log x; } while (0) +#define ipseclog(x) do { if (ipsec_debug) log x; } while (0) extern struct secpolicy *ipsec4_getpolicybysock(struct mbuf *, u_int, - struct socket *, int *); + struct socket *, int *); extern struct secpolicy *ipsec4_getpolicybyaddr(struct mbuf *, u_int, int, - int *); + int *); extern int ipsec4_getpolicybyinterface(struct mbuf *, u_int, int *, - struct ip_out_args *, struct secpolicy **); + struct ip_out_args *, struct secpolicy **); extern u_int ipsec_get_reqlevel(struct ipsecrequest *); @@ -342,7 +342,7 @@ extern int ipsec_copy_policy(struct inpcbpolicy *, struct inpcbpolicy *); extern u_int ipsec_get_reqlevel(struct ipsecrequest *); extern int ipsec4_set_policy(struct inpcb *inp, int optname, - caddr_t request, size_t len, int priv); + caddr_t request, size_t len, int priv); extern int ipsec4_delete_pcbpolicy(struct inpcb *); extern int ipsec4_in_reject_so(struct mbuf *, struct socket *); extern int ipsec4_in_reject(struct mbuf *, struct inpcb *); @@ -377,7 +377,7 @@ extern struct mbuf *ipsec_copypkt(struct mbuf *); extern void ipsec_delaux(struct mbuf *); extern int ipsec_setsocket(struct mbuf *, struct socket *); extern struct socket *ipsec_getsocket(struct mbuf *); -extern int ipsec_addhist(struct mbuf *, int, u_int32_t); +extern int ipsec_addhist(struct mbuf *, int, u_int32_t); extern struct ipsec_history *ipsec_gethist(struct mbuf *, int *); extern void ipsec_clearhist(struct mbuf *); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet6/ipsec6.h b/bsd/netinet6/ipsec6.h index 018afa4d7..7e8ff5bb1 100644 --- a/bsd/netinet6/ipsec6.h +++ b/bsd/netinet6/ipsec6.h @@ -54,9 +54,9 @@ extern int ip6_esp_randpad; struct ip6_out_args; extern struct secpolicy *ipsec6_getpolicybysock(struct mbuf *, u_int, - struct socket *, int *); + struct socket *, int *); extern struct secpolicy *ipsec6_getpolicybyaddr(struct mbuf *, u_int, int, - int *); + int *); extern int ipsec6_getpolicybyinterface(struct mbuf *, u_int, int, struct ip6_out_args *, int *, struct secpolicy **); @@ -65,7 +65,7 @@ struct inpcb; extern int ipsec6_in_reject_so(struct mbuf *, struct socket *); extern int ipsec6_delete_pcbpolicy(struct inpcb *); extern int ipsec6_set_policy(struct inpcb *inp, int optname, - caddr_t request, size_t len, int priv); + caddr_t request, size_t len, int priv); extern int ipsec6_in_reject(struct mbuf *, struct inpcb *); struct tcp6cb; @@ -77,10 +77,10 @@ extern const char *ipsec6_logpacketstr(struct ip6_hdr *, u_int32_t); extern int ipsec6_interface_output(struct ipsec_output_state *, ifnet_t, u_char *, struct mbuf *); extern int ipsec6_output_trans(struct ipsec_output_state *, u_char *, - struct mbuf *, struct secpolicy *, int, int *); + struct mbuf *, struct secpolicy *, int, int *); extern int ipsec6_output_tunnel(struct ipsec_output_state *, - struct secpolicy *, int); + struct secpolicy *, int); extern int ipsec6_tunnel_validate(struct mbuf *, int, u_int, - struct secasvar *, sa_family_t *); + struct secasvar *, sa_family_t *); #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_IPSEC6_H_ */ diff --git a/bsd/netinet6/mld6.c b/bsd/netinet6/mld6.c index ca0f1c7fd..60b7777cd 100644 --- a/bsd/netinet6/mld6.c +++ b/bsd/netinet6/mld6.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -166,54 +166,54 @@ static decl_lck_mtx_data(, mld_mtx); SLIST_HEAD(mld_in6m_relhead, in6_multi); -static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); +static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); static struct mld_ifinfo *mli_alloc(int); -static void mli_free(struct mld_ifinfo *); -static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); -static void mld_dispatch_packet(struct mbuf *); -static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *, - struct mld_tparams *); -static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *, - struct mld_tparams *); -static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *, - struct mld_tparams *, const int); +static void mli_free(struct mld_ifinfo *); +static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); +static void mld_dispatch_packet(struct mbuf *); +static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *, + struct mld_tparams *); +static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *, + struct mld_tparams *); +static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *, + struct mld_tparams *, const int); #ifdef MLD_DEBUG -static const char * mld_rec_type_to_str(const int); +static const char * mld_rec_type_to_str(const int); #endif -static uint32_t mld_set_version(struct mld_ifinfo *, const int); -static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *); -static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int); -static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, - /*const*/ struct mld_hdr *); -static int mld_v1_input_report(struct ifnet *, struct mbuf *, - const struct ip6_hdr *, /*const*/ struct mld_hdr *); -static void mld_v1_process_group_timer(struct in6_multi *, const int); -static void mld_v1_process_querier_timers(struct mld_ifinfo *); -static int mld_v1_transmit_report(struct in6_multi *, const int); -static uint32_t mld_v1_update_group(struct in6_multi *, const int); -static void mld_v2_cancel_link_timers(struct mld_ifinfo *); -static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); +static uint32_t mld_set_version(struct mld_ifinfo *, const int); +static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *); +static void mld_dispatch_queue_locked(struct mld_ifinfo *, struct ifqueue *, int); +static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, + /*const*/ struct mld_hdr *); +static int mld_v1_input_report(struct ifnet *, struct mbuf *, + const struct ip6_hdr *, /*const*/ struct mld_hdr *); +static void mld_v1_process_group_timer(struct in6_multi *, const int); +static void mld_v1_process_querier_timers(struct mld_ifinfo *); +static int mld_v1_transmit_report(struct in6_multi *, const int); +static uint32_t mld_v1_update_group(struct in6_multi *, const int); +static void mld_v2_cancel_link_timers(struct mld_ifinfo *); +static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); static struct mbuf * - mld_v2_encap_report(struct ifnet *, struct mbuf *); -static int mld_v2_enqueue_filter_change(struct ifqueue *, - struct in6_multi *); -static int mld_v2_enqueue_group_record(struct ifqueue *, - struct in6_multi *, const int, const int, const int, - const int); -static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, - struct mbuf *, const int, const int); -static int mld_v2_merge_state_changes(struct in6_multi *, - struct ifqueue *); -static void mld_v2_process_group_timers(struct mld_ifinfo *, - struct ifqueue *, struct ifqueue *, - struct in6_multi *, const int); -static int mld_v2_process_group_query(struct in6_multi *, - int, struct mbuf *, const int); -static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS; -static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS; -static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS; - -static int mld_timeout_run; /* MLD timer is scheduled to run */ +mld_v2_encap_report(struct ifnet *, struct mbuf *); +static int mld_v2_enqueue_filter_change(struct ifqueue *, + struct in6_multi *); +static int mld_v2_enqueue_group_record(struct ifqueue *, + struct in6_multi *, const int, const int, const int, + const int); +static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, + struct mbuf *, const int, const int); +static int mld_v2_merge_state_changes(struct in6_multi *, + struct ifqueue *); +static void mld_v2_process_group_timers(struct mld_ifinfo *, + struct ifqueue *, struct ifqueue *, + struct in6_multi *, const int); +static int mld_v2_process_group_query(struct in6_multi *, + int, struct mbuf *, const int); +static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS; +static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS; +static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS; + +static int mld_timeout_run; /* MLD timer is scheduled to run */ static void mld_timeout(void *); static void mld_sched_timeout(void); @@ -232,35 +232,35 @@ static unsigned int mld_mli_list_genid; /* * Subsystem lock macros. */ -#define MLD_LOCK() \ +#define MLD_LOCK() \ lck_mtx_lock(&mld_mtx) -#define MLD_LOCK_ASSERT_HELD() \ +#define MLD_LOCK_ASSERT_HELD() \ LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_OWNED) -#define MLD_LOCK_ASSERT_NOTHELD() \ +#define MLD_LOCK_ASSERT_NOTHELD() \ LCK_MTX_ASSERT(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED) -#define MLD_UNLOCK() \ +#define MLD_UNLOCK() \ lck_mtx_unlock(&mld_mtx) -#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \ - SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \ +#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \ + SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \ } -#define MLD_REMOVE_DETACHED_IN6M(_head) { \ - struct in6_multi *_in6m, *_inm_tmp; \ - SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \ - SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \ - IN6M_REMREF(_in6m); \ - } \ - VERIFY(SLIST_EMPTY(_head)); \ +#define MLD_REMOVE_DETACHED_IN6M(_head) { \ + struct in6_multi *_in6m, *_inm_tmp; \ + SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \ + SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \ + IN6M_REMREF(_in6m); \ + } \ + VERIFY(SLIST_EMPTY(_head)); \ } -#define MLI_ZONE_MAX 64 /* maximum elements in zone */ -#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */ +#define MLI_ZONE_MAX 64 /* maximum elements in zone */ +#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */ -static unsigned int mli_size; /* size of zone element */ -static struct zone *mli_zone; /* zone for mld_ifinfo */ +static unsigned int mli_size; /* size of zone element */ +static struct zone *mli_zone; /* zone for mld_ifinfo */ -SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ +SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IPv6 Multicast Listener Discovery"); @@ -270,34 +270,34 @@ SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, "Rate limit for MLDv2 Group-and-Source queries in seconds"); SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED, - sysctl_mld_ifinfo, "Per-interface MLDv2 state"); + sysctl_mld_ifinfo, "Per-interface MLDv2 state"); -static int mld_v1enable = 1; +static int mld_v1enable = 1; SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_v1enable, 0, "Enable fallback to MLDv1"); -static int mld_v2enable = 1; +static int mld_v2enable = 1; SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &mld_v2enable, 0, sysctl_mld_v2enable, "I", "Enable MLDv2 (debug purposes only)"); -static int mld_use_allow = 1; +static int mld_use_allow = 1; SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves"); #ifdef MLD_DEBUG int mld_debug = 0; SYSCTL_INT(_net_inet6_mld, OID_AUTO, - debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, ""); + debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, ""); #endif /* * Packed Router Alert option structure declaration. */ struct mld_raopt { - struct ip6_hbh hbh; - struct ip6_opt pad; - struct ip6_opt_router ra; + struct ip6_hbh hbh; + struct ip6_opt pad; + struct ip6_opt_router ra; } __packed; /* @@ -307,16 +307,16 @@ static struct mld_raopt mld_ra = { .hbh = { 0, 0 }, .pad = { .ip6o_type = IP6OPT_PADN, 0 }, .ra = { - .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT, - .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2), - .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF), - (IP6OPT_RTALERT_MLD & 0xFF) } + .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT, + .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2), + .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF), + (IP6OPT_RTALERT_MLD & 0xFF) } } }; static struct ip6_pktopts mld_po; /* Store MLDv2 record count in the module private scratch space */ -#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] +#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] static __inline void mld_save_context(struct mbuf *m, struct ifnet *ifp) @@ -337,7 +337,7 @@ mld_scrub_context(struct mbuf *m) static __inline struct ifnet * mld_restore_context(struct mbuf *m) { - return (m->m_pkthdr.rcvif); + return m->m_pkthdr.rcvif; } /* @@ -355,8 +355,9 @@ sysctl_mld_gsr SYSCTL_HANDLER_ARGS i = mld_gsrdelay.tv_sec; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { goto out_locked; + } if (i < -1 || i >= 60) { error = EINVAL; @@ -367,7 +368,7 @@ sysctl_mld_gsr SYSCTL_HANDLER_ARGS out_locked: MLD_UNLOCK(); - return (error); + return error; } /* * Expose struct mld_ifinfo to userland, keyed by ifindex. @@ -378,21 +379,23 @@ static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS { #pragma unused(oidp) - int *name; - int error; - u_int namelen; - struct ifnet *ifp; - struct mld_ifinfo *mli; - struct mld_ifinfo_u mli_u; + int *name; + int error; + u_int namelen; + struct ifnet *ifp; + struct mld_ifinfo *mli; + struct mld_ifinfo_u mli_u; name = (int *)arg1; namelen = arg2; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - if (namelen != 1) - return (EINVAL); + if (namelen != 1) { + return EINVAL; + } MLD_LOCK(); @@ -406,10 +409,11 @@ sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS ifnet_head_lock_shared(); ifp = ifindex2ifnet[name[0]]; ifnet_head_done(); - if (ifp == NULL) + if (ifp == NULL) { goto out_locked; + } - bzero(&mli_u, sizeof (mli_u)); + bzero(&mli_u, sizeof(mli_u)); LIST_FOREACH(mli, &mli_head, mli_link) { MLI_LOCK(mli); @@ -429,13 +433,13 @@ sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS mli_u.mli_uri = mli->mli_uri; MLI_UNLOCK(mli); - error = SYSCTL_OUT(req, &mli_u, sizeof (mli_u)); + error = SYSCTL_OUT(req, &mli_u, sizeof(mli_u)); break; } out_locked: MLD_UNLOCK(); - return (error); + return error; } static int @@ -452,8 +456,9 @@ sysctl_mld_v2enable SYSCTL_HANDLER_ARGS i = mld_v2enable; error = sysctl_handle_int(oidp, &i, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { goto out_locked; + } if (i < 0 || i > 1) { error = EINVAL; @@ -466,13 +471,15 @@ sysctl_mld_v2enable SYSCTL_HANDLER_ARGS * the MLD version back to v2. Otherwise, we have to explicitly * downgrade. Note that this functionality is to be used for debugging. */ - if (mld_v2enable == 1) + if (mld_v2enable == 1) { goto out_locked; + } LIST_FOREACH(mli, &mli_head, mli_link) { MLI_LOCK(mli); - if (mld_set_version(mli, MLD_VERSION_1) > 0) + if (mld_set_version(mli, MLD_VERSION_1) > 0) { mtp.qpt = 1; + } MLI_UNLOCK(mli); } @@ -481,7 +488,7 @@ out_locked: mld_set_timeout(&mtp); - return (error); + return error; } /* @@ -502,33 +509,39 @@ mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit MLD_LOCK_ASSERT_HELD(); - if (mli != NULL) + if (mli != NULL) { MLI_LOCK_ASSERT_HELD(mli); + } for (;;) { IF_DEQUEUE(ifq, m); - if (m == NULL) + if (m == NULL) { break; + } MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifq), (uint64_t)VM_KERNEL_ADDRPERM(m))); - if (mli != NULL) + if (mli != NULL) { MLI_UNLOCK(mli); + } MLD_UNLOCK(); mld_dispatch_packet(m); MLD_LOCK(); - if (mli != NULL) + if (mli != NULL) { MLI_LOCK(mli); + } - if (--limit == 0) + if (--limit == 0) { break; + } } - if (mli != NULL) + if (mli != NULL) { MLI_LOCK_ASSERT_HELD(mli); + } } /* @@ -546,20 +559,21 @@ mld_dispatch_queue_locked(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit static __inline__ int mld_is_addr_reported(const struct in6_addr *addr) { - VERIFY(IN6_IS_ADDR_MULTICAST(addr)); - if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) - return (0); + if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) { + return 0; + } if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) { struct in6_addr tmp = *addr; in6_clearscope(&tmp); - if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) - return (0); + if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) { + return 0; + } } - return (1); + return 1; } /* @@ -574,8 +588,9 @@ mld_domifattach(struct ifnet *ifp, int how) (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); mli = mli_alloc(how); - if (mli == NULL) - return (NULL); + if (mli == NULL) { + return NULL; + } MLD_LOCK(); @@ -597,7 +612,7 @@ mld_domifattach(struct ifnet *ifp, int how) MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (mli); + return mli; } /* @@ -638,7 +653,7 @@ mld_domifreattach(struct mld_ifinfo *mli) void mld_domifdetach(struct ifnet *ifp) { - SLIST_HEAD(, in6_multi) in6m_dthead; + SLIST_HEAD(, in6_multi) in6m_dthead; SLIST_INIT(&in6m_dthead); @@ -698,10 +713,11 @@ mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli) MLI_LOCK_ASSERT_NOTHELD(mli); MLI_LOCK(mli); if (!(ifp->if_flags & IFF_MULTICAST) && - (ifp->if_eflags & (IFEF_IPV6_ND6ALT|IFEF_LOCALNET_PRIVATE))) + (ifp->if_eflags & (IFEF_IPV6_ND6ALT | IFEF_LOCALNET_PRIVATE))) { mli->mli_flags |= MLIF_SILENT; - else + } else { mli->mli_flags &= ~MLIF_SILENT; + } MLI_UNLOCK(mli); } @@ -711,20 +727,23 @@ mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach) MLI_LOCK_ASSERT_HELD(mli); mli->mli_ifp = ifp; - if (mld_v2enable) + if (mld_v2enable) { mli->mli_version = MLD_VERSION_2; - else + } else { mli->mli_version = MLD_VERSION_1; + } mli->mli_flags = 0; mli->mli_rv = MLD_RV_INIT; mli->mli_qi = MLD_QI_INIT; mli->mli_qri = MLD_QRI_INIT; mli->mli_uri = MLD_URI_INIT; - if (mld_use_allow) + if (mld_use_allow) { mli->mli_flags |= MLIF_USEALLOW; - if (!reattach) + } + if (!reattach) { SLIST_INIT(&mli->mli_relinmhead); + } /* * Responses to general queries are subject to bounds. @@ -744,7 +763,7 @@ mli_alloc(int how) lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr); mli->mli_debug |= IFD_ALLOC; } - return (mli); + return mli; } static void @@ -774,23 +793,25 @@ mli_free(struct mld_ifinfo *mli) void mli_addref(struct mld_ifinfo *mli, int locked) { - if (!locked) + if (!locked) { MLI_LOCK_SPIN(mli); - else + } else { MLI_LOCK_ASSERT_HELD(mli); + } if (++mli->mli_refcnt == 0) { panic("%s: mli=%p wraparound refcnt", __func__, mli); /* NOTREACHED */ } - if (!locked) + if (!locked) { MLI_UNLOCK(mli); + } } void mli_remref(struct mld_ifinfo *mli) { - SLIST_HEAD(, in6_multi) in6m_dthead; + SLIST_HEAD(, in6_multi) in6m_dthead; struct ifnet *ifp; MLI_LOCK_SPIN(mli); @@ -835,11 +856,11 @@ static int mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld) { - struct mld_ifinfo *mli; - struct in6_multi *inm; - int err = 0, is_general_query; - uint16_t timer; - struct mld_tparams mtp = { 0, 0, 0, 0 }; + struct mld_ifinfo *mli; + struct in6_multi *inm; + int err = 0, is_general_query; + uint16_t timer; + struct mld_tparams mtp = { 0, 0, 0, 0 }; MLD_LOCK_ASSERT_NOTHELD(); @@ -872,7 +893,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, * MLDv1 General Query. * If this was not sent to the all-nodes group, ignore it. */ - struct in6_addr dst; + struct in6_addr dst; dst = ip6->ip6_dst; in6_clearscope(&dst); @@ -900,8 +921,9 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, MLI_UNLOCK(mli); timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE; - if (timer == 0) + if (timer == 0) { timer = 1; + } if (is_general_query) { struct in6_multistep step; @@ -916,8 +938,9 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp == ifp) + if (inm->in6m_ifp == ifp) { mtp.cst += mld_v1_update_group(inm, timer); + } IN6M_UNLOCK(inm); IN6_NEXT_MULTI(step, inm); } @@ -948,7 +971,7 @@ mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, done: mld_set_timeout(&mtp); - return (err); + return err; } /* @@ -961,7 +984,7 @@ done: * We may be updating the group for the first time since we switched * to MLDv2. If we are, then we must clear any recorded source lists, * and transition to REPORTING state; the group timer is overloaded - * for group and group-source query responses. + * for group and group-source query responses. * * Unlike MLDv2, the delay per group should be jittered * to avoid bursts of MLDv1 reports. @@ -986,7 +1009,7 @@ mld_v1_update_group(struct in6_multi *inm, const int timer) "skipping.\n", __func__)); break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case MLD_SG_QUERY_PENDING_MEMBER: case MLD_G_QUERY_PENDING_MEMBER: case MLD_IDLE_MEMBER: @@ -1004,7 +1027,7 @@ mld_v1_update_group(struct in6_multi *inm, const int timer) break; } - return (inm->in6m_timer); + return inm->in6m_timer; } /* @@ -1019,14 +1042,14 @@ static int mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, struct mbuf *m, const int off, const int icmp6len) { - struct mld_ifinfo *mli; - struct mldv2_query *mld; - struct in6_multi *inm; - uint32_t maxdelay, nsrc, qqi; - int err = 0, is_general_query; - uint16_t timer; - uint8_t qrv; - struct mld_tparams mtp = { 0, 0, 0, 0 }; + struct mld_ifinfo *mli; + struct mldv2_query *mld; + struct in6_multi *inm; + uint32_t maxdelay, nsrc, qqi; + int err = 0, is_general_query; + uint16_t timer; + uint8_t qrv; + struct mld_tparams mtp = { 0, 0, 0, 0 }; MLD_LOCK_ASSERT_NOTHELD(); @@ -1055,14 +1078,15 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off); - maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ + maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ if (maxdelay >= 32768) { maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) << - (MLD_MRC_EXP(maxdelay) + 3); + (MLD_MRC_EXP(maxdelay) + 3); } timer = maxdelay / MLD_TIMER_SCALE; - if (timer == 0) + if (timer == 0) { timer = 1; + } qrv = MLD_QRV(mld->mld_misc); if (qrv < 2) { @@ -1074,7 +1098,7 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, qqi = mld->mld_qqi; if (qqi >= 128) { qqi = MLD_QQIC_MANT(mld->mld_qqi) << - (MLD_QQIC_EXP(mld->mld_qqi) + 3); + (MLD_QQIC_EXP(mld->mld_qqi) + 3); } nsrc = ntohs(mld->mld_numsrc); @@ -1164,8 +1188,9 @@ mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, in6_multihead_lock_shared(); IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); in6_multihead_lock_done(); - if (inm == NULL) + if (inm == NULL) { goto done; + } IN6M_LOCK(inm); if (nsrc > 0) { @@ -1207,7 +1232,7 @@ done: } mld_set_timeout(&mtp); - return (err); + return err; } /* @@ -1219,9 +1244,9 @@ static int mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, const int off) { - struct mldv2_query *mld; - int retval; - uint16_t nsrc; + struct mldv2_query *mld; + int retval; + uint16_t nsrc; IN6M_LOCK_ASSERT_HELD(inm); @@ -1236,7 +1261,7 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, case MLD_AWAKENING_MEMBER: case MLD_IDLE_MEMBER: case MLD_LEAVING_MEMBER: - return (retval); + return retval; case MLD_REPORTING_MEMBER: case MLD_G_QUERY_PENDING_MEMBER: case MLD_SG_QUERY_PENDING_MEMBER: @@ -1259,7 +1284,7 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, } inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; inm->in6m_timer = MLD_RANDOM_DELAY(timer); - return (retval); + return retval; } /* @@ -1269,7 +1294,7 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { timer = min(inm->in6m_timer, timer); inm->in6m_timer = MLD_RANDOM_DELAY(timer); - return (retval); + return retval; } /* @@ -1284,10 +1309,10 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, * report for those sources. */ if (inm->in6m_nsrc > 0) { - struct mbuf *m; - uint8_t *sp; - int i, nrecorded; - int soff; + struct mbuf *m; + uint8_t *sp; + int i, nrecorded; + int soff; m = m0; soff = off + sizeof(struct mldv2_query); @@ -1296,26 +1321,28 @@ mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, sp = mtod(m, uint8_t *) + soff; retval = in6m_record_source(inm, (const struct in6_addr *)(void *)sp); - if (retval < 0) + if (retval < 0) { break; + } nrecorded += retval; soff += sizeof(struct in6_addr); if (soff >= m->m_len) { soff = soff - m->m_len; m = m->m_next; - if (m == NULL) + if (m == NULL) { break; + } } } if (nrecorded > 0) { - MLD_PRINTF(( "%s: schedule response to SG query\n", + MLD_PRINTF(("%s: schedule response to SG query\n", __func__)); inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; inm->in6m_timer = MLD_RANDOM_DELAY(timer); } } - return (retval); + return retval; } /* @@ -1329,20 +1356,21 @@ static int mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld) { - struct in6_addr src, dst; - struct in6_ifaddr *ia; - struct in6_multi *inm; + struct in6_addr src, dst; + struct in6_ifaddr *ia; + struct in6_multi *inm; if (!mld_v1enable) { MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&mld->mld_addr), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (0); + return 0; } if ((ifp->if_flags & IFF_LOOPBACK) || - (m->m_pkthdr.pkt_flags & PKTF_LOOP)) - return (0); + (m->m_pkthdr.pkt_flags & PKTF_LOOP)) { + return 0; + } /* * MLDv1 reports must originate from a host's link-local address, @@ -1354,7 +1382,7 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&ip6->ip6_src), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (EINVAL); + return EINVAL; } /* @@ -1368,7 +1396,7 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n", __func__, ip6_sprintf(&ip6->ip6_dst), (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); - return (EINVAL); + return EINVAL; } /* @@ -1381,18 +1409,18 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be * performed for the on-wire address. */ - ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); + ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); - if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))){ + if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))) { IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); - return (0); + return 0; } IFA_UNLOCK(&ia->ia_ifa); IFA_REMREF(&ia->ia_ifa); } else if (IN6_IS_ADDR_UNSPECIFIED(&src)) { - return (0); + return 0; } MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n", @@ -1403,8 +1431,9 @@ mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, * Embed scope ID of receiving interface in MLD query for lookup * whilst we don't hold other locks (due to KAME locking lameness). */ - if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) + if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { in6_setscope(&mld->mld_addr, ifp, NULL); + } /* * MLDv1 report suppression. @@ -1466,7 +1495,7 @@ out: /* XXX Clear embedded scope ID as userland won't expect it. */ in6_clearscope(&mld->mld_addr); - return (0); + return 0; } /* @@ -1483,10 +1512,10 @@ out: int mld_input(struct mbuf *m, int off, int icmp6len) { - struct ifnet *ifp; - struct ip6_hdr *ip6; - struct mld_hdr *mld; - int mldlen; + struct ifnet *ifp; + struct ip6_hdr *ip6; + struct mld_hdr *mld; + int mldlen; MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m), off)); @@ -1503,10 +1532,12 @@ mld_input(struct mbuf *m, int off, int icmp6len) } else { mldlen = sizeof(struct mld_hdr); } + // check if mldv2_query/mld_hdr fits in the first mbuf + IP6_EXTHDR_CHECK(m, off, mldlen, return IPPROTO_DONE); IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen); if (mld == NULL) { icmp6stat.icp6s_badlen++; - return (IPPROTO_DONE); + return IPPROTO_DONE; } /* @@ -1517,18 +1548,21 @@ mld_input(struct mbuf *m, int off, int icmp6len) case MLD_LISTENER_QUERY: icmp6_ifstat_inc(ifp, ifs6_in_mldquery); if (icmp6len == sizeof(struct mld_hdr)) { - if (mld_v1_input_query(ifp, ip6, mld) != 0) - return (0); + if (mld_v1_input_query(ifp, ip6, mld) != 0) { + return 0; + } } else if (icmp6len >= sizeof(struct mldv2_query)) { if (mld_v2_input_query(ifp, ip6, m, off, - icmp6len) != 0) - return (0); + icmp6len) != 0) { + return 0; + } } break; case MLD_LISTENER_REPORT: icmp6_ifstat_inc(ifp, ifs6_in_mldreport); - if (mld_v1_input_report(ifp, m, ip6, mld) != 0) - return (0); + if (mld_v1_input_report(ifp, m, ip6, mld) != 0) { + return 0; + } break; case MLDV2_LISTENER_REPORT: icmp6_ifstat_inc(ifp, ifs6_in_mldreport); @@ -1540,7 +1574,7 @@ mld_input(struct mbuf *m, int off, int icmp6len) break; } - return (0); + return 0; } /* @@ -1555,14 +1589,18 @@ mld_set_timeout(struct mld_tparams *mtp) if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) { MLD_LOCK(); - if (mtp->qpt != 0) + if (mtp->qpt != 0) { querier_present_timers_running6 = 1; - if (mtp->it != 0) + } + if (mtp->it != 0) { interface_timers_running6 = 1; - if (mtp->cst != 0) + } + if (mtp->cst != 0) { current_state_timers_running6 = 1; - if (mtp->sct != 0) + } + if (mtp->sct != 0) { state_change_timers_running6 = 1; + } mld_sched_timeout(); MLD_UNLOCK(); } @@ -1575,15 +1613,15 @@ static void mld_timeout(void *arg) { #pragma unused(arg) - struct ifqueue scq; /* State-change packets */ - struct ifqueue qrq; /* Query response packets */ - struct ifnet *ifp; - struct mld_ifinfo *mli; - struct in6_multi *inm; - int uri_sec = 0; + struct ifqueue scq; /* State-change packets */ + struct ifqueue qrq; /* Query response packets */ + struct ifnet *ifp; + struct mld_ifinfo *mli; + struct in6_multi *inm; + int uri_sec = 0; unsigned int genid = mld_mli_list_genid; - SLIST_HEAD(, in6_multi) in6m_dthead; + SLIST_HEAD(, in6_multi) in6m_dthead; SLIST_INIT(&in6m_dthead); @@ -1608,8 +1646,9 @@ mld_timeout(void *arg) LIST_FOREACH(mli, &mli_head, mli_link) { MLI_LOCK(mli); mld_v1_process_querier_timers(mli); - if (mli->mli_v1_timer > 0) + if (mli->mli_v1_timer > 0) { querier_present_timers_running6 = 1; + } MLI_UNLOCK(mli); } } @@ -1647,8 +1686,9 @@ mld_timeout(void *arg) if (mli->mli_v2_timer == 0) { /* Do nothing. */ } else if (--mli->mli_v2_timer == 0) { - if (mld_v2_dispatch_general_query(mli) > 0) + if (mld_v2_dispatch_general_query(mli) > 0) { interface_timers_running6 = 1; + } } else { interface_timers_running6 = 1; } @@ -1668,14 +1708,15 @@ mld_timeout(void *arg) } LIST_FOREACH(mli, &mli_head, mli_link) - mli->mli_flags &= ~MLIF_PROCESSED; + mli->mli_flags &= ~MLIF_PROCESSED; } if (!current_state_timers_running6 && - !state_change_timers_running6) + !state_change_timers_running6) { goto out_locked; + } current_state_timers_running6 = 0; state_change_timers_running6 = 0; @@ -1711,8 +1752,9 @@ mld_timeout(void *arg) IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } MLI_LOCK(mli); switch (mli->mli_version) { @@ -1784,7 +1826,7 @@ next: } LIST_FOREACH(mli, &mli_head, mli_link) - mli->mli_flags &= ~MLIF_PROCESSED; + mli->mli_flags &= ~MLIF_PROCESSED; out_locked: /* re-arm the timer if there's work to do */ @@ -1892,7 +1934,7 @@ mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version) if (report_timer_expired) { inm->in6m_state = MLD_IDLE_MEMBER; (void) mld_v1_transmit_report(inm, - MLD_LISTENER_REPORT); + MLD_LISTENER_REPORT); IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_HELD(inm->in6m_mli); } @@ -1951,8 +1993,9 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, /* We are in timer callback, so be quick about it. */ if (!state_change_retransmit_timer_expired && - !query_response_timer_expired) + !query_response_timer_expired) { return; + } switch (inm->in6m_state) { case MLD_NOT_MEMBER: @@ -1981,7 +2024,7 @@ mld_v2_process_group_timers(struct mld_ifinfo *mli, inm->in6m_state = MLD_REPORTING_MEMBER; in6m_clear_recorded(inm); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case MLD_REPORTING_MEMBER: case MLD_LEAVING_MEMBER: if (state_change_retransmit_timer_expired) { @@ -2069,7 +2112,7 @@ mld_set_version(struct mld_ifinfo *mli, const int mld_version) MLI_LOCK_ASSERT_HELD(mli); - return (mli->mli_v1_timer); + return mli->mli_v1_timer; } /* @@ -2084,9 +2127,9 @@ mld_set_version(struct mld_ifinfo *mli, const int mld_version) static void mld_v2_cancel_link_timers(struct mld_ifinfo *mli) { - struct ifnet *ifp; - struct in6_multi *inm; - struct in6_multistep step; + struct ifnet *ifp; + struct in6_multi *inm; + struct in6_multistep step; MLI_LOCK_ASSERT_HELD(mli); @@ -2111,8 +2154,9 @@ mld_v2_cancel_link_timers(struct mld_ifinfo *mli) IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } switch (inm->in6m_state) { case MLD_NOT_MEMBER: @@ -2141,11 +2185,11 @@ mld_v2_cancel_link_timers(struct mld_ifinfo *mli) SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, in6m_nrele); MLI_UNLOCK(mli); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case MLD_G_QUERY_PENDING_MEMBER: case MLD_SG_QUERY_PENDING_MEMBER: in6m_clear_recorded(inm); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case MLD_REPORTING_MEMBER: inm->in6m_state = MLD_REPORTING_MEMBER; break; @@ -2194,32 +2238,34 @@ mld_v1_process_querier_timers(struct mld_ifinfo *mli) static int mld_v1_transmit_report(struct in6_multi *in6m, const int type) { - struct ifnet *ifp; - struct in6_ifaddr *ia; - struct ip6_hdr *ip6; - struct mbuf *mh, *md; - struct mld_hdr *mld; - int error = 0; + struct ifnet *ifp; + struct in6_ifaddr *ia; + struct ip6_hdr *ip6; + struct mbuf *mh, *md; + struct mld_hdr *mld; + int error = 0; IN6M_LOCK_ASSERT_HELD(in6m); MLI_LOCK_ASSERT_HELD(in6m->in6m_mli); ifp = in6m->in6m_ifp; /* ia may be NULL if link-local address is tentative. */ - ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); + ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); MGETHDR(mh, M_DONTWAIT, MT_HEADER); if (mh == NULL) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - return (ENOMEM); + } + return ENOMEM; } MGET(md, M_DONTWAIT, MT_DATA); if (md == NULL) { m_free(mh); - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); - return (ENOMEM); + } + return ENOMEM; } mh->m_next = md; @@ -2237,8 +2283,9 @@ mld_v1_transmit_report(struct in6_multi *in6m, const int type) ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_nxt = IPPROTO_ICMPV6; - if (ia != NULL) + if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); + } ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; if (ia != NULL) { IFA_UNLOCK(&ia->ia_ifa); @@ -2271,16 +2318,16 @@ mld_v1_transmit_report(struct in6_multi *in6m, const int type) * Instead we defer the work to the mld_timeout() thread, thus * avoiding unlocking in_multihead_lock here. */ - if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) { - MLD_PRINTF(("%s: v1 outbound queue full\n", __func__)); - error = ENOMEM; - m_freem(mh); - } else { - IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh); + if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) { + MLD_PRINTF(("%s: v1 outbound queue full\n", __func__)); + error = ENOMEM; + m_freem(mh); + } else { + IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh); VERIFY(error == 0); } - return (error); + return error; } /* @@ -2312,7 +2359,7 @@ mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp, int error = 0; VERIFY(mtp != NULL); - bzero(mtp, sizeof (*mtp)); + bzero(mtp, sizeof(*mtp)); IN6M_LOCK_ASSERT_HELD(inm); VERIFY(inm->in6m_mli != NULL); @@ -2355,7 +2402,7 @@ mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp, error = mld_handle_state_change(inm, mli, mtp); out: - return (error); + return error; } /* @@ -2374,10 +2421,10 @@ static int mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, struct mld_tparams *mtp, const int delay) { - struct ifnet *ifp; - struct ifqueue *ifq; - int error, retval, syncstates; - int odelay; + struct ifnet *ifp; + struct ifqueue *ifq; + int error, retval, syncstates; + int odelay; IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_NOTHELD(mli); @@ -2410,7 +2457,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, if ((ifp->if_flags & IFF_LOOPBACK) || (mli->mli_flags & MLIF_SILENT) || (IFNET_IS_CELLULAR(ifp) && - IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) || + IN6_IS_ADDR_MC_LINKLOCAL(&inm->in6m_addr)) || !mld_is_addr_reported(&inm->in6m_addr)) { MLD_PRINTF(("%s: not kicking state machine for silent group\n", __func__)); @@ -2450,7 +2497,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, } else { inm->in6m_state = MLD_IDLE_MEMBER; error = mld_v1_transmit_report(inm, - MLD_LISTENER_REPORT); + MLD_LISTENER_REPORT); IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_HELD(mli); @@ -2502,8 +2549,9 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, if (inm->in6m_sctimer > 1) { inm->in6m_sctimer = min(inm->in6m_sctimer, delay); - } else + } else { inm->in6m_sctimer = delay; + } } else { inm->in6m_sctimer = 1; } @@ -2526,7 +2574,7 @@ mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, if_name(inm->in6m_ifp))); } - return (error); + return error; } /* @@ -2536,8 +2584,8 @@ static int mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, struct mld_tparams *mtp) { - struct ifnet *ifp; - int retval = 0; + struct ifnet *ifp; + int retval = 0; IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_NOTHELD(mli); @@ -2594,7 +2642,7 @@ mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, MLI_UNLOCK(mli); done: - return (retval); + return retval; } /* @@ -2682,12 +2730,12 @@ mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli, VERIFY(inm->in6m_nrelecnt != 0); retval = mld_v2_enqueue_group_record( - &inm->in6m_scq, inm, 1, 0, 0, - (mli->mli_flags & MLIF_USEALLOW)); + &inm->in6m_scq, inm, 1, 0, 0, + (mli->mli_flags & MLIF_USEALLOW)); mtp->cst = (inm->in6m_scq.ifq_len > 0); KASSERT(retval != 0, ("%s: enqueue record = %d\n", __func__, - retval)); + retval)); inm->in6m_state = MLD_LEAVING_MEMBER; inm->in6m_sctimer = 1; @@ -2748,17 +2796,17 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, const int is_state_change, const int is_group_query, const int is_source_query, const int use_block_allow) { - struct mldv2_record mr; - struct mldv2_record *pmr; - struct ifnet *ifp; - struct ip6_msource *ims, *nims; - struct mbuf *m0, *m, *md; - int error, is_filter_list_change; - int minrec0len, m0srcs, msrcs, nbytes, off; - int record_has_sources; - int now; - int type; - uint8_t mode; + struct mldv2_record mr; + struct mldv2_record *pmr; + struct ifnet *ifp; + struct ip6_msource *ims, *nims; + struct mbuf *m0, *m, *md; + int error, is_filter_list_change; + int minrec0len, m0srcs, msrcs, nbytes, off; + int record_has_sources; + int now; + int type; + uint8_t mode; IN6M_LOCK_ASSERT_HELD(inm); MLI_LOCK_ASSERT_HELD(inm->in6m_mli); @@ -2783,8 +2831,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, * the generation of source records. */ if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && - inm->in6m_nsrc == 0) + inm->in6m_nsrc == 0) { record_has_sources = 0; + } if (is_state_change) { /* @@ -2828,8 +2877,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, } } else { type = MLD_CHANGE_TO_INCLUDE_MODE; - if (mode == MCAST_UNDEFINED) + if (mode == MCAST_UNDEFINED) { record_has_sources = 0; + } } } } else { @@ -2854,14 +2904,15 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, /* * Generate the filter list changes using a separate function. */ - if (is_filter_list_change) - return (mld_v2_enqueue_filter_change(ifq, inm)); + if (is_filter_list_change) { + return mld_v2_enqueue_filter_change(ifq, inm); + } if (type == MLD_DO_NOTHING) { MLD_PRINTF(("%s: nothing to do for %s/%s\n", __func__, ip6_sprintf(&inm->in6m_addr), if_name(inm->in6m_ifp))); - return (0); + return 0; } /* @@ -2870,8 +2921,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, * ideally more. */ minrec0len = sizeof(struct mldv2_record); - if (record_has_sources) + if (record_has_sources) { minrec0len += sizeof(struct in6_addr); + } MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__, mld_rec_type_to_str(type), ip6_sprintf(&inm->in6m_addr), @@ -2890,26 +2942,29 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, m0 != NULL && (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && (m0->m_pkthdr.len + minrec0len) < - (ifp->if_mtu - MLD_MTUSPACE)) { + (ifp->if_mtu - MLD_MTUSPACE)) { m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - - sizeof(struct mldv2_record)) / - sizeof(struct in6_addr); + sizeof(struct mldv2_record)) / + sizeof(struct in6_addr); m = m0; MLD_PRINTF(("%s: use existing packet\n", __func__)); } else { if (IF_QFULL(ifq)) { MLD_PRINTF(("%s: outbound queue full\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m = NULL; m0srcs = (ifp->if_mtu - MLD_MTUSPACE - sizeof(struct mldv2_record)) / sizeof(struct in6_addr); - if (!is_state_change && !is_group_query) + if (!is_state_change && !is_group_query) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) + } + if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m == NULL) - return (-ENOMEM); + } + if (m == NULL) { + return -ENOMEM; + } mld_save_context(m, ifp); @@ -2926,10 +2981,11 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, mr.mr_addr = inm->in6m_addr; in6_clearscope(&mr.mr_addr); if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nbytes += sizeof(struct mldv2_record); @@ -2968,7 +3024,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, MLD_PRINTF(("%s: node is %d\n", __func__, now)); if ((now != mode) || (now == mode && - (!use_block_allow && mode == MCAST_UNDEFINED))) { + (!use_block_allow && mode == MCAST_UNDEFINED))) { MLD_PRINTF(("%s: skip node\n", __func__)); continue; } @@ -2980,16 +3036,18 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, MLD_PRINTF(("%s: append node\n", __func__)); if (!m_append(m, sizeof(struct in6_addr), (void *)&ims->im6s_addr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nbytes += sizeof(struct in6_addr); ++msrcs; - if (msrcs == m0srcs) + if (msrcs == m0srcs) { break; + } } MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__, msrcs)); @@ -2999,9 +3057,10 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, if (is_source_query && msrcs == 0) { MLD_PRINTF(("%s: no recorded sources to report\n", __func__)); - if (m != m0) + if (m != m0) { m_freem(m); - return (0); + } + return 0; } /* @@ -3017,8 +3076,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, /* * No further work needed if no source list in packet(s). */ - if (!record_has_sources) - return (nbytes); + if (!record_has_sources) { + return nbytes; + } /* * Whilst sources remain to be announced, we need to allocate @@ -3028,23 +3088,26 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, while (nims != NULL) { if (IF_QFULL(ifq)) { MLD_PRINTF(("%s: outbound queue full\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) + if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); - if (m == NULL) - return (-ENOMEM); + } + if (m == NULL) { + return -ENOMEM; + } mld_save_context(m, ifp); md = m_getptr(m, 0, &off); pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off); MLD_PRINTF(("%s: allocated next packet\n", __func__)); if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m->m_pkthdr.vt_nrecs = 1; nbytes += sizeof(struct mldv2_record); @@ -3059,7 +3122,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, now = im6s_get_mode(inm, ims, 1); if ((now != mode) || (now == mode && - (!use_block_allow && mode == MCAST_UNDEFINED))) { + (!use_block_allow && mode == MCAST_UNDEFINED))) { MLD_PRINTF(("%s: skip node\n", __func__)); continue; } @@ -3071,15 +3134,17 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, MLD_PRINTF(("%s: append node\n", __func__)); if (!m_append(m, sizeof(struct in6_addr), (void *)&ims->im6s_addr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed.\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } ++msrcs; - if (msrcs == m0srcs) + if (msrcs == m0srcs) { break; + } } pmr->mr_numsrc = htons(msrcs); nbytes += (msrcs * sizeof(struct in6_addr)); @@ -3088,7 +3153,7 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, IF_ENQUEUE(ifq, m); } - return (nbytes); + return nbytes; } /* @@ -3097,9 +3162,9 @@ mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, * current filter modes on each ip_msource node. */ typedef enum { - REC_NONE = 0x00, /* MCAST_UNDEFINED */ - REC_ALLOW = 0x01, /* MCAST_INCLUDE */ - REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ + REC_NONE = 0x00, /* MCAST_UNDEFINED */ + REC_ALLOW = 0x01, /* MCAST_INCLUDE */ + REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ REC_FULL = REC_ALLOW | REC_BLOCK } rectype_t; @@ -3129,35 +3194,36 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) { static const int MINRECLEN = sizeof(struct mldv2_record) + sizeof(struct in6_addr); - struct ifnet *ifp; - struct mldv2_record mr; - struct mldv2_record *pmr; - struct ip6_msource *ims, *nims; - struct mbuf *m, *m0, *md; - int m0srcs, nbytes, npbytes, off, rsrcs, schanged; - int nallow, nblock; - uint8_t mode, now, then; - rectype_t crt, drt, nrt; + struct ifnet *ifp; + struct mldv2_record mr; + struct mldv2_record *pmr; + struct ip6_msource *ims, *nims; + struct mbuf *m, *m0, *md; + int m0srcs, nbytes, npbytes, off, rsrcs, schanged; + int nallow, nblock; + uint8_t mode, now, then; + rectype_t crt, drt, nrt; IN6M_LOCK_ASSERT_HELD(inm); if (inm->in6m_nsrc == 0 || - (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) - return (0); - - ifp = inm->in6m_ifp; /* interface */ - mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ - crt = REC_NONE; /* current group record type */ - drt = REC_NONE; /* mask of completed group record types */ - nrt = REC_NONE; /* record type for current node */ - m0srcs = 0; /* # source which will fit in current mbuf chain */ - npbytes = 0; /* # of bytes appended this packet */ - nbytes = 0; /* # of bytes appended to group's state-change queue */ - rsrcs = 0; /* # sources encoded in current record */ - schanged = 0; /* # nodes encoded in overall filter change */ - nallow = 0; /* # of source entries in ALLOW_NEW */ - nblock = 0; /* # of source entries in BLOCK_OLD */ - nims = NULL; /* next tree node pointer */ + (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) { + return 0; + } + + ifp = inm->in6m_ifp; /* interface */ + mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ + crt = REC_NONE; /* current group record type */ + drt = REC_NONE; /* mask of completed group record types */ + nrt = REC_NONE; /* record type for current node */ + m0srcs = 0; /* # source which will fit in current mbuf chain */ + npbytes = 0; /* # of bytes appended this packet */ + nbytes = 0; /* # of bytes appended to group's state-change queue */ + rsrcs = 0; /* # sources encoded in current record */ + schanged = 0; /* # nodes encoded in overall filter change */ + nallow = 0; /* # of source entries in ALLOW_NEW */ + nblock = 0; /* # of source entries in BLOCK_OLD */ + nims = NULL; /* next tree node pointer */ /* * For each possible filter record mode. @@ -3171,23 +3237,24 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) m0 = ifq->ifq_tail; if (m0 != NULL && (m0->m_pkthdr.vt_nrecs + 1 <= - MLD_V2_REPORT_MAXRECS) && + MLD_V2_REPORT_MAXRECS) && (m0->m_pkthdr.len + MINRECLEN) < - (ifp->if_mtu - MLD_MTUSPACE)) { + (ifp->if_mtu - MLD_MTUSPACE)) { m = m0; m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - - sizeof(struct mldv2_record)) / - sizeof(struct in6_addr); + sizeof(struct mldv2_record)) / + sizeof(struct in6_addr); MLD_PRINTF(("%s: use previous packet\n", __func__)); } else { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); - if (m == NULL) + if (m == NULL) { m = m_gethdr(M_DONTWAIT, MT_DATA); + } if (m == NULL) { MLD_PRINTF(("%s: m_get*() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } m->m_pkthdr.vt_nrecs = 0; mld_save_context(m, ifp); @@ -3209,11 +3276,12 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) mr.mr_addr = inm->in6m_addr; in6_clearscope(&mr.mr_addr); if (!m_append(m, sizeof(mr), (void *)&mr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } npbytes += sizeof(struct mldv2_record); if (m != m0) { @@ -3262,24 +3330,28 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) continue; } nrt = (rectype_t)now; - if (nrt == REC_NONE) + if (nrt == REC_NONE) { nrt = (rectype_t)(~mode & REC_FULL); + } if (schanged++ == 0) { crt = nrt; - } else if (crt != nrt) + } else if (crt != nrt) { continue; + } if (!m_append(m, sizeof(struct in6_addr), (void *)&ims->im6s_addr)) { - if (m != m0) + if (m != m0) { m_freem(m); + } MLD_PRINTF(("%s: m_append() failed\n", __func__)); - return (-ENOMEM); + return -ENOMEM; } nallow += !!(crt == REC_ALLOW); nblock += !!(crt == REC_BLOCK); - if (++rsrcs == m0srcs) + if (++rsrcs == m0srcs) { break; + } } /* * If we did not append any tree nodes on this @@ -3295,23 +3367,25 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) MLD_PRINTF(("%s: m_adj(m, -mr)\n", __func__)); m_adj(m, -((int)sizeof( - struct mldv2_record))); + struct mldv2_record))); } continue; } npbytes += (rsrcs * sizeof(struct in6_addr)); - if (crt == REC_ALLOW) + if (crt == REC_ALLOW) { pmr->mr_type = MLD_ALLOW_NEW_SOURCES; - else if (crt == REC_BLOCK) + } else if (crt == REC_BLOCK) { pmr->mr_type = MLD_BLOCK_OLD_SOURCES; + } pmr->mr_numsrc = htons(rsrcs); /* * Count the new group record, and enqueue this * packet if it wasn't already queued. */ m->m_pkthdr.vt_nrecs++; - if (m != m0) + if (m != m0) { IF_ENQUEUE(ifq, m); + } nbytes += npbytes; } while (nims != NULL); drt |= crt; @@ -3321,19 +3395,19 @@ mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__, nallow, nblock)); - return (nbytes); + return nbytes; } static int mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) { - struct ifqueue *gq; - struct mbuf *m; /* pending state-change */ - struct mbuf *m0; /* copy of pending state-change */ - struct mbuf *mt; /* last state-change in packet */ - struct mbuf *n; - int docopy, domerge; - u_int recslen; + struct ifqueue *gq; + struct mbuf *m; /* pending state-change */ + struct mbuf *m0; /* copy of pending state-change */ + struct mbuf *mt; /* last state-change in packet */ + struct mbuf *n; + int docopy, domerge; + u_int recslen; IN6M_LOCK_ASSERT_HELD(inm); @@ -3345,8 +3419,9 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) * If there are further pending retransmissions, make a writable * copy of each queued state-change message before merging. */ - if (inm->in6m_scrv > 0) + if (inm->in6m_scrv > 0) { docopy = 1; + } gq = &inm->in6m_scq; #ifdef MLD_DEBUG @@ -3379,8 +3454,9 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) m->m_pkthdr.vt_nrecs <= MLD_V2_REPORT_MAXRECS) && (mt->m_pkthdr.len + recslen <= - (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) + (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) { domerge = 1; + } } if (!domerge && IF_QFULL(gq)) { @@ -3407,8 +3483,9 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) MLD_PRINTF(("%s: copying 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m))); m0 = m_dup(m, M_NOWAIT); - if (m0 == NULL) - return (ENOMEM); + if (m0 == NULL) { + return ENOMEM; + } m0->m_nextpkt = NULL; m = m->m_nextpkt; } @@ -3419,7 +3496,7 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) (uint64_t)VM_KERNEL_ADDRPERM(ifscq))); IF_ENQUEUE(ifscq, m0); } else { - struct mbuf *mtl; /* last mbuf of packet mt */ + struct mbuf *mtl; /* last mbuf of packet mt */ MLD_PRINTF(("%s: merging 0x%llx with ifscq tail " "0x%llx)\n", __func__, @@ -3436,7 +3513,7 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) } } - return (0); + return 0; } /* @@ -3445,10 +3522,10 @@ mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *mli) { - struct ifnet *ifp; - struct in6_multi *inm; - struct in6_multistep step; - int retval; + struct ifnet *ifp; + struct in6_multi *inm; + struct in6_multistep step; + int retval; MLI_LOCK_ASSERT_HELD(mli); @@ -3461,8 +3538,9 @@ mld_v2_dispatch_general_query(struct mld_ifinfo *mli) IN6_FIRST_MULTI(step, inm); while (inm != NULL) { IN6M_LOCK(inm); - if (inm->in6m_ifp != ifp) + if (inm->in6m_ifp != ifp) { goto next; + } switch (inm->in6m_state) { case MLD_NOT_MEMBER: @@ -3501,10 +3579,10 @@ next: */ if (mli->mli_gq.ifq_head != NULL) { mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( - MLD_RESPONSE_BURST_INTERVAL); + MLD_RESPONSE_BURST_INTERVAL); } - return (mli->mli_v2_timer); + return mli->mli_v2_timer; } /* @@ -3515,16 +3593,16 @@ next: static void mld_dispatch_packet(struct mbuf *m) { - struct ip6_moptions *im6o; - struct ifnet *ifp; - struct ifnet *oifp = NULL; - struct mbuf *m0; - struct mbuf *md; - struct ip6_hdr *ip6; - struct mld_hdr *mld; - int error; - int off; - int type; + struct ip6_moptions *im6o; + struct ifnet *ifp; + struct ifnet *oifp = NULL; + struct mbuf *m0; + struct mbuf *md; + struct ip6_hdr *ip6; + struct mld_hdr *mld; + int error; + int off; + int type; MLD_PRINTF(("%s: transmit 0x%llx\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m))); @@ -3584,8 +3662,8 @@ mld_dispatch_packet(struct mbuf *m) type = mld->mld_type; if (ifp->if_eflags & IFEF_TXSTART) { - /* - * Use control service class if the outgoing + /* + * Use control service class if the outgoing * interface supports transmit-start model. */ (void) m_set_service_class(m0, MBUF_SC_CTL); @@ -3599,8 +3677,9 @@ mld_dispatch_packet(struct mbuf *m) if (error) { MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0), error)); - if (oifp != NULL) + if (oifp != NULL) { ifnet_release(oifp); + } return; } @@ -3632,27 +3711,29 @@ mld_dispatch_packet(struct mbuf *m) static struct mbuf * mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) { - struct mbuf *mh; - struct mldv2_report *mld; - struct ip6_hdr *ip6; - struct in6_ifaddr *ia; - int mldreclen; + struct mbuf *mh; + struct mldv2_report *mld; + struct ip6_hdr *ip6; + struct in6_ifaddr *ia; + int mldreclen; VERIFY(m->m_flags & M_PKTHDR); /* * RFC3590: OK to send as :: or tentative during DAD. */ - ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); - if (ia == NULL) + ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); + if (ia == NULL) { MLD_PRINTF(("%s: warning: ia is NULL\n", __func__)); + } MGETHDR(mh, M_DONTWAIT, MT_HEADER); if (mh == NULL) { - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } m_freem(m); - return (NULL); + return NULL; } MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report)); @@ -3668,8 +3749,9 @@ mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) ip6->ip6_vfc &= ~IPV6_VERSION_MASK; ip6->ip6_vfc |= IPV6_VERSION; ip6->ip6_nxt = IPPROTO_ICMPV6; - if (ia != NULL) + if (ia != NULL) { IFA_LOCK(&ia->ia_ifa); + } ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; if (ia != NULL) { IFA_UNLOCK(&ia->ia_ifa); @@ -3691,7 +3773,7 @@ mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) mh->m_next = m; mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen); - return (mh); + return mh; } #ifdef MLD_DEBUG @@ -3699,20 +3781,20 @@ static const char * mld_rec_type_to_str(const int type) { switch (type) { - case MLD_CHANGE_TO_EXCLUDE_MODE: - return "TO_EX"; - case MLD_CHANGE_TO_INCLUDE_MODE: - return "TO_IN"; - case MLD_MODE_IS_EXCLUDE: - return "MODE_EX"; - case MLD_MODE_IS_INCLUDE: - return "MODE_IN"; - case MLD_ALLOW_NEW_SOURCES: - return "ALLOW_NEW"; - case MLD_BLOCK_OLD_SOURCES: - return "BLOCK_OLD"; - default: - break; + case MLD_CHANGE_TO_EXCLUDE_MODE: + return "TO_EX"; + case MLD_CHANGE_TO_INCLUDE_MODE: + return "TO_IN"; + case MLD_MODE_IS_EXCLUDE: + return "MODE_EX"; + case MLD_MODE_IS_INCLUDE: + return "MODE_IN"; + case MLD_ALLOW_NEW_SOURCES: + return "ALLOW_NEW"; + case MLD_BLOCK_OLD_SOURCES: + return "BLOCK_OLD"; + default: + break; } return "unknown"; } @@ -3721,14 +3803,13 @@ mld_rec_type_to_str(const int type) void mld_init(void) { - MLD_PRINTF(("%s: initializing\n", __func__)); - /* Setup lock group and attribute for mld_mtx */ - mld_mtx_grp_attr = lck_grp_attr_alloc_init(); - mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr); - mld_mtx_attr = lck_attr_alloc_init(); - lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr); + /* Setup lock group and attribute for mld_mtx */ + mld_mtx_grp_attr = lck_grp_attr_alloc_init(); + mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr); + mld_mtx_attr = lck_attr_alloc_init(); + lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr); ip6_initpktopts(&mld_po); mld_po.ip6po_hlim = 1; @@ -3737,7 +3818,7 @@ mld_init(void) mld_po.ip6po_flags = IP6PO_DONTFRAG; LIST_INIT(&mli_head); - mli_size = sizeof (struct mld_ifinfo); + mli_size = sizeof(struct mld_ifinfo); mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size, 0, MLI_ZONE_NAME); if (mli_zone == NULL) { diff --git a/bsd/netinet6/mld6.h b/bsd/netinet6/mld6.h index ceb41365c..61c43dc60 100644 --- a/bsd/netinet6/mld6.h +++ b/bsd/netinet6/mld6.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -63,7 +63,7 @@ */ /* Minimum length of any MLD protocol message. */ -#define MLD_MINLEN sizeof(struct icmp6_hdr) +#define MLD_MINLEN sizeof(struct icmp6_hdr) /* * MLD v2 query format. @@ -71,69 +71,69 @@ * (MLDv1 query and host report format). */ struct mldv2_query { - struct icmp6_hdr mld_icmp6_hdr; /* ICMPv6 header */ - struct in6_addr mld_addr; /* address being queried */ - uint8_t mld_misc; /* reserved/suppress/robustness */ - uint8_t mld_qqi; /* querier's query interval */ - uint16_t mld_numsrc; /* number of sources */ + struct icmp6_hdr mld_icmp6_hdr; /* ICMPv6 header */ + struct in6_addr mld_addr; /* address being queried */ + uint8_t mld_misc; /* reserved/suppress/robustness */ + uint8_t mld_qqi; /* querier's query interval */ + uint16_t mld_numsrc; /* number of sources */ /* followed by 1..numsrc source addresses */ } __attribute__((__packed__)); -#define MLD_V2_QUERY_MINLEN sizeof(struct mldv2_query) -#define MLD_MRC_EXP(x) ((ntohs((x)) >> 12) & 0x0007) -#define MLD_MRC_MANT(x) (ntohs((x)) & 0x0fff) -#define MLD_QQIC_EXP(x) (((x) >> 4) & 0x07) -#define MLD_QQIC_MANT(x) ((x) & 0x0f) -#define MLD_QRESV(x) (((x) >> 4) & 0x0f) -#define MLD_SFLAG(x) (((x) >> 3) & 0x01) -#define MLD_QRV(x) ((x) & 0x07) +#define MLD_V2_QUERY_MINLEN sizeof(struct mldv2_query) +#define MLD_MRC_EXP(x) ((ntohs((x)) >> 12) & 0x0007) +#define MLD_MRC_MANT(x) (ntohs((x)) & 0x0fff) +#define MLD_QQIC_EXP(x) (((x) >> 4) & 0x07) +#define MLD_QQIC_MANT(x) ((x) & 0x0f) +#define MLD_QRESV(x) (((x) >> 4) & 0x0f) +#define MLD_SFLAG(x) (((x) >> 3) & 0x01) +#define MLD_QRV(x) ((x) & 0x07) /* * MLDv2 host membership report header. * mld_type: MLDV2_LISTENER_REPORT */ struct mldv2_report { - struct icmp6_hdr mld_icmp6_hdr; + struct icmp6_hdr mld_icmp6_hdr; /* followed by 1..numgrps records */ } __attribute__((__packed__)); /* overlaid on struct icmp6_hdr. */ -#define mld_numrecs mld_icmp6_hdr.icmp6_data16[1] +#define mld_numrecs mld_icmp6_hdr.icmp6_data16[1] struct mldv2_record { - uint8_t mr_type; /* record type */ - uint8_t mr_datalen; /* length of auxiliary data */ - uint16_t mr_numsrc; /* number of sources */ - struct in6_addr mr_addr; /* address being reported */ + uint8_t mr_type; /* record type */ + uint8_t mr_datalen; /* length of auxiliary data */ + uint16_t mr_numsrc; /* number of sources */ + struct in6_addr mr_addr; /* address being reported */ /* followed by 1..numsrc source addresses */ } __attribute__((__packed__)); -#define MLD_V2_REPORT_MAXRECS 65535 +#define MLD_V2_REPORT_MAXRECS 65535 /* * MLDv2 report modes. */ -#define MLD_DO_NOTHING 0 /* don't send a record */ -#define MLD_MODE_IS_INCLUDE 1 /* MODE_IN */ -#define MLD_MODE_IS_EXCLUDE 2 /* MODE_EX */ -#define MLD_CHANGE_TO_INCLUDE_MODE 3 /* TO_IN */ -#define MLD_CHANGE_TO_EXCLUDE_MODE 4 /* TO_EX */ -#define MLD_ALLOW_NEW_SOURCES 5 /* ALLOW_NEW */ -#define MLD_BLOCK_OLD_SOURCES 6 /* BLOCK_OLD */ +#define MLD_DO_NOTHING 0 /* don't send a record */ +#define MLD_MODE_IS_INCLUDE 1 /* MODE_IN */ +#define MLD_MODE_IS_EXCLUDE 2 /* MODE_EX */ +#define MLD_CHANGE_TO_INCLUDE_MODE 3 /* TO_IN */ +#define MLD_CHANGE_TO_EXCLUDE_MODE 4 /* TO_EX */ +#define MLD_ALLOW_NEW_SOURCES 5 /* ALLOW_NEW */ +#define MLD_BLOCK_OLD_SOURCES 6 /* BLOCK_OLD */ /* * MLDv2 query types. */ -#define MLD_V2_GENERAL_QUERY 1 -#define MLD_V2_GROUP_QUERY 2 -#define MLD_V2_GROUP_SOURCE_QUERY 3 +#define MLD_V2_GENERAL_QUERY 1 +#define MLD_V2_GROUP_QUERY 2 +#define MLD_V2_GROUP_SOURCE_QUERY 3 /* * Maximum report interval for MLDv1 host membership reports. */ -#define MLD_V1_MAX_RI 10 +#define MLD_V1_MAX_RI 10 /* * MLD_TIMER_SCALE denotes that the MLD code field specifies * time in milliseconds. */ -#define MLD_TIMER_SCALE 1000 +#define MLD_TIMER_SCALE 1000 #endif /* _NETINET6_MLD6_H_ */ diff --git a/bsd/netinet6/mld6_var.h b/bsd/netinet6/mld6_var.h index c7623ea8d..cc7886a02 100644 --- a/bsd/netinet6/mld6_var.h +++ b/bsd/netinet6/mld6_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -76,28 +76,28 @@ struct mld_ifinfo { #else struct mld_ifinfo_u { #endif /* XNU_KERNEL_PRIVATE */ - uint32_t mli_ifindex; /* interface this instance belongs to */ - uint32_t mli_version; /* MLDv1 Host Compatibility Mode */ - uint32_t mli_v1_timer; /* MLDv1 Querier Present timer (s) */ - uint32_t mli_v2_timer; /* MLDv2 General Query (interface) timer (s)*/ - uint32_t mli_flags; /* MLD per-interface flags */ - uint32_t mli_rv; /* MLDv2 Robustness Variable */ - uint32_t mli_qi; /* MLDv2 Query Interval (s) */ - uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */ - uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */ + uint32_t mli_ifindex; /* interface this instance belongs to */ + uint32_t mli_version; /* MLDv1 Host Compatibility Mode */ + uint32_t mli_v1_timer; /* MLDv1 Querier Present timer (s) */ + uint32_t mli_v2_timer; /* MLDv2 General Query (interface) timer (s)*/ + uint32_t mli_flags; /* MLD per-interface flags */ + uint32_t mli_rv; /* MLDv2 Robustness Variable */ + uint32_t mli_qi; /* MLDv2 Query Interval (s) */ + uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */ + uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */ uint32_t _pad; }; -#define MLIF_SILENT 0x00000001 /* Do not use MLD on this ifp */ -#define MLIF_USEALLOW 0x00000002 /* Use ALLOW/BLOCK for joins/leaves */ -#define MLIF_PROCESSED 0x00000004 /* Entry has been processed and can be skipped */ +#define MLIF_SILENT 0x00000001 /* Do not use MLD on this ifp */ +#define MLIF_USEALLOW 0x00000002 /* Use ALLOW/BLOCK for joins/leaves */ +#define MLIF_PROCESSED 0x00000004 /* Entry has been processed and can be skipped */ /* * MLD version tag. */ -#define MLD_VERSION_NONE 0 /* Invalid */ -#define MLD_VERSION_1 1 -#define MLD_VERSION_2 2 /* Default */ +#define MLD_VERSION_NONE 0 /* Invalid */ +#define MLD_VERSION_1 1 +#define MLD_VERSION_2 2 /* Default */ #endif /* PRIVATE */ #ifdef BSD_KERNEL_PRIVATE @@ -106,59 +106,59 @@ struct mld_ifinfo_u { #define MLD_DEBUG 1 #ifdef MLD_DEBUG extern int mld_debug; -#define MLD_PRINTF(x) do { if (mld_debug) printf x; } while (0) +#define MLD_PRINTF(x) do { if (mld_debug) printf x; } while (0) #else -#define MLD_PRINTF(x) +#define MLD_PRINTF(x) #endif -#define MLD_RANDOM_DELAY(X) (RandomULong() % (X) + 1) -#define MLD_MAX_STATE_CHANGES 24 /* Max pending changes per group */ +#define MLD_RANDOM_DELAY(X) (RandomULong() % (X) + 1) +#define MLD_MAX_STATE_CHANGES 24 /* Max pending changes per group */ /* * MLD per-group states. */ -#define MLD_NOT_MEMBER 0 /* Can garbage collect group */ -#define MLD_SILENT_MEMBER 1 /* Do not perform MLD for group */ -#define MLD_REPORTING_MEMBER 2 /* MLDv1 we are reporter */ -#define MLD_IDLE_MEMBER 3 /* MLDv1 we reported last */ -#define MLD_LAZY_MEMBER 4 /* MLDv1 other member reporting */ -#define MLD_SLEEPING_MEMBER 5 /* MLDv1 start query response */ -#define MLD_AWAKENING_MEMBER 6 /* MLDv1 group timer will start */ -#define MLD_G_QUERY_PENDING_MEMBER 7 /* MLDv2 group query pending */ -#define MLD_SG_QUERY_PENDING_MEMBER 8 /* MLDv2 source query pending */ -#define MLD_LEAVING_MEMBER 9 /* MLDv2 dying gasp (pending last */ - /* retransmission of INCLUDE {}) */ +#define MLD_NOT_MEMBER 0 /* Can garbage collect group */ +#define MLD_SILENT_MEMBER 1 /* Do not perform MLD for group */ +#define MLD_REPORTING_MEMBER 2 /* MLDv1 we are reporter */ +#define MLD_IDLE_MEMBER 3 /* MLDv1 we reported last */ +#define MLD_LAZY_MEMBER 4 /* MLDv1 other member reporting */ +#define MLD_SLEEPING_MEMBER 5 /* MLDv1 start query response */ +#define MLD_AWAKENING_MEMBER 6 /* MLDv1 group timer will start */ +#define MLD_G_QUERY_PENDING_MEMBER 7 /* MLDv2 group query pending */ +#define MLD_SG_QUERY_PENDING_MEMBER 8 /* MLDv2 source query pending */ +#define MLD_LEAVING_MEMBER 9 /* MLDv2 dying gasp (pending last */ + /* retransmission of INCLUDE {}) */ /* * MLDv2 protocol control variables. */ -#define MLD_RV_INIT 2 /* Robustness Variable */ -#define MLD_RV_MIN 1 -#define MLD_RV_MAX 7 +#define MLD_RV_INIT 2 /* Robustness Variable */ +#define MLD_RV_MIN 1 +#define MLD_RV_MAX 7 -#define MLD_QI_INIT 125 /* Query Interval (s) */ -#define MLD_QI_MIN 1 -#define MLD_QI_MAX 255 +#define MLD_QI_INIT 125 /* Query Interval (s) */ +#define MLD_QI_MIN 1 +#define MLD_QI_MAX 255 -#define MLD_QRI_INIT 10 /* Query Response Interval (s) */ -#define MLD_QRI_MIN 1 -#define MLD_QRI_MAX 255 +#define MLD_QRI_INIT 10 /* Query Response Interval (s) */ +#define MLD_QRI_MIN 1 +#define MLD_QRI_MAX 255 -#define MLD_URI_INIT 3 /* Unsolicited Report Interval (s) */ -#define MLD_URI_MIN 0 -#define MLD_URI_MAX 10 +#define MLD_URI_INIT 3 /* Unsolicited Report Interval (s) */ +#define MLD_URI_MIN 0 +#define MLD_URI_MAX 10 -#define MLD_MAX_GS_SOURCES 256 /* # of sources in rx GS query */ -#define MLD_MAX_G_GS_PACKETS 8 /* # of packets to answer G/GS */ -#define MLD_MAX_STATE_CHANGE_PACKETS 8 /* # of packets per state change */ -#define MLD_MAX_RESPONSE_PACKETS 16 /* # of packets for general query */ -#define MLD_MAX_RESPONSE_BURST 4 /* # of responses to send at once */ -#define MLD_RESPONSE_BURST_INTERVAL 1 /* 1 second */ +#define MLD_MAX_GS_SOURCES 256 /* # of sources in rx GS query */ +#define MLD_MAX_G_GS_PACKETS 8 /* # of packets to answer G/GS */ +#define MLD_MAX_STATE_CHANGE_PACKETS 8 /* # of packets per state change */ +#define MLD_MAX_RESPONSE_PACKETS 16 /* # of packets for general query */ +#define MLD_MAX_RESPONSE_BURST 4 /* # of responses to send at once */ +#define MLD_RESPONSE_BURST_INTERVAL 1 /* 1 second */ /* * MLD-specific mbuf flags. */ -#define M_MLDV1 M_PROTO1 /* Packet is MLDv1 */ -#define M_GROUPREC M_PROTO3 /* mbuf chain is a group record */ +#define M_MLDV1 M_PROTO1 /* Packet is MLDv1 */ +#define M_GROUPREC M_PROTO3 /* mbuf chain is a group record */ /* * Leading space for MLDv2 reports inside MTU. @@ -171,70 +171,70 @@ extern int mld_debug; * We now put the MLDv2 report header in the initial mbuf containing * the IPv6 header. */ -#define MLD_MTUSPACE (sizeof(struct ip6_hdr) + sizeof(struct mld_raopt) + \ - sizeof(struct icmp6_hdr)) +#define MLD_MTUSPACE (sizeof(struct ip6_hdr) + sizeof(struct mld_raopt) + \ + sizeof(struct icmp6_hdr)) struct mld_ifinfo { decl_lck_mtx_data(, mli_lock); - uint32_t mli_refcnt; /* reference count */ - uint32_t mli_debug; /* see ifa_debug flags */ + uint32_t mli_refcnt; /* reference count */ + uint32_t mli_debug; /* see ifa_debug flags */ LIST_ENTRY(mld_ifinfo) mli_link; - struct ifnet *mli_ifp; /* interface this instance belongs to */ - uint32_t mli_version; /* MLDv1 Host Compatibility Mode */ - uint32_t mli_v1_timer; /* MLDv1 Querier Present timer (s) */ - uint32_t mli_v2_timer; /* MLDv2 General Query (interface) timer (s)*/ - uint32_t mli_flags; /* MLD per-interface flags */ - uint32_t mli_rv; /* MLDv2 Robustness Variable */ - uint32_t mli_qi; /* MLDv2 Query Interval (s) */ - uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */ - uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */ - SLIST_HEAD(,in6_multi) mli_relinmhead; /* released groups */ - struct ifqueue mli_gq; /* queue of general query responses */ + struct ifnet *mli_ifp; /* interface this instance belongs to */ + uint32_t mli_version; /* MLDv1 Host Compatibility Mode */ + uint32_t mli_v1_timer; /* MLDv1 Querier Present timer (s) */ + uint32_t mli_v2_timer; /* MLDv2 General Query (interface) timer (s)*/ + uint32_t mli_flags; /* MLD per-interface flags */ + uint32_t mli_rv; /* MLDv2 Robustness Variable */ + uint32_t mli_qi; /* MLDv2 Query Interval (s) */ + uint32_t mli_qri; /* MLDv2 Query Response Interval (s) */ + uint32_t mli_uri; /* MLDv2 Unsolicited Report Interval (s) */ + SLIST_HEAD(, in6_multi) mli_relinmhead; /* released groups */ + struct ifqueue mli_gq; /* queue of general query responses */ struct ifqueue mli_v1q; /* MLDv1 message queue */ }; -#define MLI_LOCK_ASSERT_HELD(_mli) \ +#define MLI_LOCK_ASSERT_HELD(_mli) \ LCK_MTX_ASSERT(&(_mli)->mli_lock, LCK_MTX_ASSERT_OWNED) -#define MLI_LOCK_ASSERT_NOTHELD(_mli) \ +#define MLI_LOCK_ASSERT_NOTHELD(_mli) \ LCK_MTX_ASSERT(&(_mli)->mli_lock, LCK_MTX_ASSERT_NOTOWNED) -#define MLI_LOCK(_mli) \ +#define MLI_LOCK(_mli) \ lck_mtx_lock(&(_mli)->mli_lock) -#define MLI_LOCK_SPIN(_mli) \ +#define MLI_LOCK_SPIN(_mli) \ lck_mtx_lock_spin(&(_mli)->mli_lock) -#define MLI_CONVERT_LOCK(_mli) do { \ - MLI_LOCK_ASSERT_HELD(_mli); \ - lck_mtx_convert_spin(&(_mli)->mli_lock); \ +#define MLI_CONVERT_LOCK(_mli) do { \ + MLI_LOCK_ASSERT_HELD(_mli); \ + lck_mtx_convert_spin(&(_mli)->mli_lock); \ } while (0) -#define MLI_UNLOCK(_mli) \ +#define MLI_UNLOCK(_mli) \ lck_mtx_unlock(&(_mli)->mli_lock) -#define MLI_ADDREF(_mli) \ +#define MLI_ADDREF(_mli) \ mli_addref(_mli, 0) -#define MLI_ADDREF_LOCKED(_mli) \ +#define MLI_ADDREF_LOCKED(_mli) \ mli_addref(_mli, 1) -#define MLI_REMREF(_mli) \ +#define MLI_REMREF(_mli) \ mli_remref(_mli) /* * Per-link MLD context. */ -#define MLD_IFINFO(ifp) ((ifp)->if_mli) +#define MLD_IFINFO(ifp) ((ifp)->if_mli) /* * MLD timer schedule parameters */ struct mld_tparams { - int qpt; /* querier_present_timers_running6 */ - int it; /* interface_timers_running6 */ - int cst; /* current_state_timers_running6 */ - int sct; /* state_change_timers_running6 */ + int qpt; /* querier_present_timers_running6 */ + int it; /* interface_timers_running6 */ + int cst; /* current_state_timers_running6 */ + int sct; /* state_change_timers_running6 */ }; extern int mld_change_state(struct in6_multi *, struct mld_tparams *, diff --git a/bsd/netinet6/nd6.c b/bsd/netinet6/nd6.c index c0c9d9a56..5b8a5d477 100644 --- a/bsd/netinet6/nd6.c +++ b/bsd/netinet6/nd6.c @@ -105,22 +105,22 @@ #include "loop.h" -#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */ -#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */ +#define ND6_SLOWTIMER_INTERVAL (60 * 60) /* 1 hour */ +#define ND6_RECALC_REACHTM_INTERVAL (60 * 120) /* 2 hours */ -#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) +#define equal(a1, a2) (bcmp((caddr_t)(a1), (caddr_t)(a2), (a1)->sa_len) == 0) /* timer values */ -int nd6_prune = 1; /* walk list every 1 seconds */ -int nd6_prune_lazy = 5; /* lazily walk list every 5 seconds */ -int nd6_delay = 5; /* delay first probe time 5 second */ -int nd6_umaxtries = 3; /* maximum unicast query */ -int nd6_mmaxtries = 3; /* maximum multicast query */ -int nd6_useloopback = 1; /* use loopback interface for local traffic */ -int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */ +int nd6_prune = 1; /* walk list every 1 seconds */ +int nd6_prune_lazy = 5; /* lazily walk list every 5 seconds */ +int nd6_delay = 5; /* delay first probe time 5 second */ +int nd6_umaxtries = 3; /* maximum unicast query */ +int nd6_mmaxtries = 3; /* maximum multicast query */ +int nd6_useloopback = 1; /* use loopback interface for local traffic */ +int nd6_gctimer = (60 * 60 * 24); /* 1 day: garbage collection timer */ /* preventing too many loops in ND option parsing */ -int nd6_maxndopt = 10; /* max # of ND options allowed */ +int nd6_maxndopt = 10; /* max # of ND options allowed */ int nd6_maxqueuelen = 1; /* max # of packets cached in unresolved ND entries */ @@ -131,9 +131,9 @@ int nd6_debug = 0; #endif int nd6_optimistic_dad = - (ND6_OPTIMISTIC_DAD_LINKLOCAL|ND6_OPTIMISTIC_DAD_AUTOCONF| - ND6_OPTIMISTIC_DAD_TEMPORARY|ND6_OPTIMISTIC_DAD_DYNAMIC| - ND6_OPTIMISTIC_DAD_SECURED|ND6_OPTIMISTIC_DAD_MANUAL); + (ND6_OPTIMISTIC_DAD_LINKLOCAL | ND6_OPTIMISTIC_DAD_AUTOCONF | + ND6_OPTIMISTIC_DAD_TEMPORARY | ND6_OPTIMISTIC_DAD_DYNAMIC | + ND6_OPTIMISTIC_DAD_SECURED | ND6_OPTIMISTIC_DAD_MANUAL); /* for debugging? */ static int nd6_inuse, nd6_allocated; @@ -169,9 +169,9 @@ struct llinfo_nd6 llinfo_nd6 = { .ln_prev = &llinfo_nd6, }; -static lck_grp_attr_t *nd_if_lock_grp_attr = NULL; -static lck_grp_t *nd_if_lock_grp = NULL; -static lck_attr_t *nd_if_lock_attr = NULL; +static lck_grp_attr_t *nd_if_lock_grp_attr = NULL; +static lck_grp_t *nd_if_lock_grp = NULL; +static lck_attr_t *nd_if_lock_attr = NULL; /* Protected by nd6_mutex */ struct nd_drhead nd_defrouter; @@ -190,9 +190,9 @@ struct nd_prhead nd_prefix = { 0 }; * non-zero. The increment happens on various places when we allocate * new ND entries, default routers, prefixes and addresses. */ -static int nd6_timeout_run; /* nd6_timeout is scheduled to run */ +static int nd6_timeout_run; /* nd6_timeout is scheduled to run */ static void nd6_timeout(void *); -int nd6_sched_timeout_want; /* demand count for timer to be sched */ +int nd6_sched_timeout_want; /* demand count for timer to be sched */ static boolean_t nd6_fast_timer_on = FALSE; /* Serialization variables for nd6_service(), protected by rnh_lock */ @@ -226,28 +226,28 @@ static int nd6_sysctl_prlist SYSCTL_HANDLER_ARGS; /* * Insertion and removal from llinfo_nd6 must be done with rnh_lock held. */ -#define LN_DEQUEUE(_ln) do { \ - LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \ - RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \ - (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \ - (_ln)->ln_prev->ln_next = (_ln)->ln_next; \ - (_ln)->ln_prev = (_ln)->ln_next = NULL; \ - (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \ +#define LN_DEQUEUE(_ln) do { \ + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \ + RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \ + (_ln)->ln_next->ln_prev = (_ln)->ln_prev; \ + (_ln)->ln_prev->ln_next = (_ln)->ln_next; \ + (_ln)->ln_prev = (_ln)->ln_next = NULL; \ + (_ln)->ln_flags &= ~ND6_LNF_IN_USE; \ } while (0) -#define LN_INSERTHEAD(_ln) do { \ - LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \ - RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \ - (_ln)->ln_next = llinfo_nd6.ln_next; \ - llinfo_nd6.ln_next = (_ln); \ - (_ln)->ln_prev = &llinfo_nd6; \ - (_ln)->ln_next->ln_prev = (_ln); \ - (_ln)->ln_flags |= ND6_LNF_IN_USE; \ +#define LN_INSERTHEAD(_ln) do { \ + LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); \ + RT_LOCK_ASSERT_HELD((_ln)->ln_rt); \ + (_ln)->ln_next = llinfo_nd6.ln_next; \ + llinfo_nd6.ln_next = (_ln); \ + (_ln)->ln_prev = &llinfo_nd6; \ + (_ln)->ln_next->ln_prev = (_ln); \ + (_ln)->ln_flags |= ND6_LNF_IN_USE; \ } while (0) static struct zone *llinfo_nd6_zone; -#define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */ -#define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */ +#define LLINFO_ND6_ZONE_MAX 256 /* maximum elements in zone */ +#define LLINFO_ND6_ZONE_NAME "llinfo_nd6" /* name for zone */ extern int tvtohz(struct timeval *); @@ -256,19 +256,19 @@ static int nd6_init_done; SYSCTL_DECL(_net_inet6_icmp6); SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_DRLIST, nd6_drlist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - nd6_sysctl_drlist, "S,in6_defrouter", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + nd6_sysctl_drlist, "S,in6_defrouter", ""); SYSCTL_PROC(_net_inet6_icmp6, ICMPV6CTL_ND6_PRLIST, nd6_prlist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - nd6_sysctl_prlist, "S,in6_defrouter", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + nd6_sysctl_prlist, "S,in6_defrouter", ""); SYSCTL_DECL(_net_inet6_ip6); static int ip6_maxchainsent = 0; SYSCTL_INT(_net_inet6_ip6, OID_AUTO, maxchainsent, - CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxchainsent, 0, - "use dlil_output_list"); + CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_maxchainsent, 0, + "use dlil_output_list"); void nd6_init(void) @@ -278,9 +278,10 @@ nd6_init(void) VERIFY(!nd6_init_done); all1_sa.sin6_family = AF_INET6; - all1_sa.sin6_len = sizeof (struct sockaddr_in6); - for (i = 0; i < sizeof (all1_sa.sin6_addr); i++) + all1_sa.sin6_len = sizeof(struct sockaddr_in6); + for (i = 0; i < sizeof(all1_sa.sin6_addr); i++) { all1_sa.sin6_addr.s6_addr[i] = 0xff; + } /* initialization of the default router list */ TAILQ_INIT(&nd_defrouter); @@ -289,11 +290,12 @@ nd6_init(void) nd_if_lock_grp = lck_grp_alloc_init("nd_if_lock", nd_if_lock_grp_attr); nd_if_lock_attr = lck_attr_alloc_init(); - llinfo_nd6_zone = zinit(sizeof (struct llinfo_nd6), - LLINFO_ND6_ZONE_MAX * sizeof (struct llinfo_nd6), 0, + llinfo_nd6_zone = zinit(sizeof(struct llinfo_nd6), + LLINFO_ND6_ZONE_MAX * sizeof(struct llinfo_nd6), 0, LLINFO_ND6_ZONE_NAME); - if (llinfo_nd6_zone == NULL) + if (llinfo_nd6_zone == NULL) { panic("%s: failed allocating llinfo_nd6_zone", __func__); + } zone_change(llinfo_nd6_zone, Z_EXPAND, TRUE); zone_change(llinfo_nd6_zone, Z_CALLERACCT, FALSE); @@ -315,10 +317,11 @@ nd6_llinfo_alloc(int how) ln = (how == M_WAITOK) ? zalloc(llinfo_nd6_zone) : zalloc_noblock(llinfo_nd6_zone); - if (ln != NULL) - bzero(ln, sizeof (*ln)); + if (ln != NULL) { + bzero(ln, sizeof(*ln)); + } - return (ln); + return ln; } static void @@ -339,8 +342,9 @@ nd6_llinfo_free(void *arg) /* Purge any link-layer info caching */ VERIFY(ln->ln_rt->rt_llinfo == ln); - if (ln->ln_rt->rt_llinfo_purge != NULL) + if (ln->ln_rt->rt_llinfo_purge != NULL) { ln->ln_rt->rt_llinfo_purge(ln->ln_rt); + } zfree(llinfo_nd6_zone, ln); } @@ -368,7 +372,7 @@ nd6_llinfo_get_ri(struct rtentry *rt, struct rt_reach_info *ri) struct if_llreach *lr = ln->ln_llreach; if (lr == NULL) { - bzero(ri, sizeof (*ri)); + bzero(ri, sizeof(*ri)); ri->ri_rssi = IFNET_RSSI_UNKNOWN; ri->ri_lqm = IFNET_LQM_THRESH_OFF; ri->ri_npm = IFNET_NPM_THRESH_UNKNOWN; @@ -390,7 +394,7 @@ nd6_llinfo_get_iflri(struct rtentry *rt, struct ifnet_llreach_info *iflri) struct if_llreach *lr = ln->ln_llreach; if (lr == NULL) { - bzero(iflri, sizeof (*iflri)); + bzero(iflri, sizeof(*iflri)); iflri->iflri_rssi = IFNET_RSSI_UNKNOWN; iflri->iflri_lqm = IFNET_LQM_THRESH_OFF; iflri->iflri_npm = IFNET_NPM_THRESH_UNKNOWN; @@ -460,7 +464,7 @@ ndcache_state2str(short ndp_state) /* Init'd to UNKNOWN */ break; } - return ndp_state_str; + return ndp_state_str; } void @@ -491,7 +495,7 @@ ln_getexpire(struct llinfo_nd6 *ln) } else { expiry = 0; } - return (expiry); + return expiry; } void @@ -600,7 +604,7 @@ nd6_setmtu(struct ifnet *ifp) void nd6_option_init(void *opt, int icmp6len, union nd_opts *ndopts) { - bzero(ndopts, sizeof (*ndopts)); + bzero(ndopts, sizeof(*ndopts)); ndopts->nd_opts_search = (struct nd_opt_hdr *)opt; ndopts->nd_opts_last = (struct nd_opt_hdr *)(((u_char *)opt) + icmp6len); @@ -620,21 +624,25 @@ nd6_option(union nd_opts *ndopts) struct nd_opt_hdr *nd_opt; int olen; - if (!ndopts) + if (!ndopts) { panic("ndopts == NULL in nd6_option\n"); - if (!ndopts->nd_opts_last) + } + if (!ndopts->nd_opts_last) { panic("uninitialized ndopts in nd6_option\n"); - if (!ndopts->nd_opts_search) - return (NULL); - if (ndopts->nd_opts_done) - return (NULL); + } + if (!ndopts->nd_opts_search) { + return NULL; + } + if (ndopts->nd_opts_done) { + return NULL; + } nd_opt = ndopts->nd_opts_search; /* make sure nd_opt_len is inside the buffer */ if ((caddr_t)&nd_opt->nd_opt_len >= (caddr_t)ndopts->nd_opts_last) { - bzero(ndopts, sizeof (*ndopts)); - return (NULL); + bzero(ndopts, sizeof(*ndopts)); + return NULL; } olen = nd_opt->nd_opt_len << 3; @@ -643,21 +651,21 @@ nd6_option(union nd_opts *ndopts) * Message validation requires that all included * options have a length that is greater than zero. */ - bzero(ndopts, sizeof (*ndopts)); - return (NULL); + bzero(ndopts, sizeof(*ndopts)); + return NULL; } ndopts->nd_opts_search = (struct nd_opt_hdr *)((caddr_t)nd_opt + olen); if (ndopts->nd_opts_search > ndopts->nd_opts_last) { /* option overruns the end of buffer, invalid */ - bzero(ndopts, sizeof (*ndopts)); - return (NULL); + bzero(ndopts, sizeof(*ndopts)); + return NULL; } else if (ndopts->nd_opts_search == ndopts->nd_opts_last) { /* reached the end of options chain */ ndopts->nd_opts_done = 1; ndopts->nd_opts_search = NULL; } - return (nd_opt); + return nd_opt; } /* @@ -671,12 +679,15 @@ nd6_options(union nd_opts *ndopts) struct nd_opt_hdr *nd_opt; int i = 0; - if (ndopts == NULL) + if (ndopts == NULL) { panic("ndopts == NULL in nd6_options"); - if (ndopts->nd_opts_last == NULL) + } + if (ndopts->nd_opts_last == NULL) { panic("uninitialized ndopts in nd6_options"); - if (ndopts->nd_opts_search == NULL) - return (0); + } + if (ndopts->nd_opts_search == NULL) { + return 0; + } while (1) { nd_opt = nd6_option(ndopts); @@ -686,12 +697,13 @@ nd6_options(union nd_opts *ndopts) * options have a length that is greater than zero. */ icmp6stat.icp6s_nd_badopt++; - bzero(ndopts, sizeof (*ndopts)); - return (-1); + bzero(ndopts, sizeof(*ndopts)); + return -1; } - if (nd_opt == NULL) + if (nd_opt == NULL) { goto skip1; + } switch (nd_opt->nd_opt_type) { case ND_OPT_SOURCE_LINKADDR: @@ -739,11 +751,12 @@ skip1: break; } - if (ndopts->nd_opts_done) + if (ndopts->nd_opts_done) { break; + } } - return (0); + return 0; } struct nd6svc_arg { @@ -783,7 +796,7 @@ nd6_service(void *arg) __func__, ap->draining ? "drainer" : "timer", nd6_service_waiters)); nd6_service_waiters++; - (void) msleep(nd6_service_wc, rnh_lock, (PZERO-1), + (void) msleep(nd6_service_wc, rnh_lock, (PZERO - 1), __func__, NULL); LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); } @@ -833,7 +846,7 @@ again: sizeof(nd6_ndfailure.link_data.if_name)); ev_msg.dv[0].data_ptr = &nd6_ndfailure; ev_msg.dv[0].data_length = - sizeof(nd6_ndfailure); + sizeof(nd6_ndfailure); dlil_post_complete_msg(NULL, &ev_msg); } @@ -907,10 +920,11 @@ again: * If we are draining, immediately purge non-static * entries without oustanding route refcnt. */ - if (ln->ln_state > ND6_LLINFO_INCOMPLETE) + if (ln->ln_state > ND6_LLINFO_INCOMPLETE) { ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_STALE); - else + } else { ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_PURGE); + } ln_setexpire(ln, timenow); } @@ -1143,8 +1157,9 @@ again: struct llinfo_nd6 *next = ln->ln_next; RT_LOCK_SPIN(rt); - if (ln->ln_flags & ND6_LNF_TIMER_SKIP) + if (ln->ln_flags & ND6_LNF_TIMER_SKIP) { ln->ln_flags &= ~ND6_LNF_TIMER_SKIP; + } RT_UNLOCK(rt); ln = next; } @@ -1205,10 +1220,11 @@ again: TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry); } } else { - if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC)) + if (dr->expire == 0 || (dr->stateflags & NDDRF_STATIC)) { ap->sticky++; - else + } else { ap->aging_lazy++; + } } } @@ -1305,15 +1321,16 @@ addrloop: * case here and make sure we schedule the regular timer if an * interface address is about to expire. */ - if (IFA6_IS_INVALID(ia6, timenow + 3 * nd6_prune_lazy)) + if (IFA6_IS_INVALID(ia6, timenow + 3 * nd6_prune_lazy)) { ap->aging++; - else + } else { ap->aging_lazy++; + } IFA_LOCK_ASSERT_HELD(&ia6->ia_ifa); if (IFA6_IS_DEPRECATED(ia6, timenow)) { ia6->ia6_flags |= IN6_IFF_DEPRECATED; - if((oldflags & IN6_IFF_DEPRECATED) == 0) { + if ((oldflags & IN6_IFF_DEPRECATED) == 0) { /* * Only enqueue the Deprecated event when the address just * becomes deprecated. @@ -1334,7 +1351,6 @@ addrloop: if (ip6_use_tempaddr && (ia6->ia6_flags & IN6_IFF_TEMPORARY) != 0 && (oldflags & IN6_IFF_DEPRECATED) == 0) { - /* see NOTE above */ IFA_UNLOCK(&ia6->ia_ifa); lck_rw_done(&in6_ifaddr_rwlock); @@ -1404,17 +1420,18 @@ addrloop: in6_ifstat_inc(pr->ndpr_ifp, ifs6_pfx_expiry_cnt); in6_event_enqueue_nwk_wq_entry(IN6_NDP_PFX_EXPIRY, pr->ndpr_ifp, &pr->ndpr_prefix.sin6_addr, - 0); + 0); NDPR_REMREF(pr); pfxlist_onlink_check(); pr = nd_prefix.lh_first; ap->killed++; } else { if (pr->ndpr_expire == 0 || - (pr->ndpr_stateflags & NDPRF_STATIC)) + (pr->ndpr_stateflags & NDPRF_STATIC)) { ap->sticky++; - else + } else { ap->aging_lazy++; + } pr->ndpr_stateflags |= NDPRF_PROCESSED_SERVICE; NDPR_UNLOCK(pr); pr = pr->ndpr_next; @@ -1447,14 +1464,14 @@ nd6_drain(void *arg) lck_mtx_lock(rnh_lock); nd6_need_draining = 1; - nd6_sched_timeout(NULL, NULL); + nd6_sched_timeout(NULL, NULL); lck_mtx_unlock(rnh_lock); } /* * We use the ``arg'' variable to decide whether or not the timer we're * running is the fast timer. We do this to reset the nd6_fast_timer_on - * variable so that later we don't end up ignoring a ``fast timer'' + * variable so that later we don't end up ignoring a ``fast timer'' * request if the 5 second timer is running (see nd6_sched_timeout). */ static void @@ -1464,7 +1481,7 @@ nd6_timeout(void *arg) uint32_t buf; lck_mtx_lock(rnh_lock); - bzero(&sarg, sizeof (sarg)); + bzero(&sarg, sizeof(sarg)); if (nd6_need_draining != 0) { nd6_need_draining = 0; sarg.draining = 1; @@ -1476,8 +1493,9 @@ nd6_timeout(void *arg) /* re-arm the timer if there's work to do */ nd6_timeout_run--; VERIFY(nd6_timeout_run >= 0 && nd6_timeout_run < 2); - if (arg == &nd6_fast_timer_on) + if (arg == &nd6_fast_timer_on) { nd6_fast_timer_on = FALSE; + } if (sarg.aging_lazy > 0 || sarg.aging > 0 || nd6_sched_timeout_want) { struct timeval atv, ltv, *leeway; int lazy = nd6_prune_lazy; @@ -1512,7 +1530,7 @@ nd6_sched_timeout(struct timeval *atv, struct timeval *ltv) tv.tv_usec = 0; tv.tv_sec = MAX(nd6_prune, 1); atv = &tv; - ltv = NULL; /* ignore leeway */ + ltv = NULL; /* ignore leeway */ } /* see comments on top of this file */ if (nd6_timeout_run == 0) { @@ -1535,7 +1553,7 @@ nd6_sched_timeout(struct timeval *atv, struct timeval *ltv) } nd6_timeout_run++; nd6_sched_timeout_want = 0; - } else if (nd6_timeout_run == 1 && ltv == NULL && + } else if (nd6_timeout_run == 1 && ltv == NULL && nd6_fast_timer_on == FALSE) { nd6log2((LOG_DEBUG, "%s: fast timer scheduled in " "T+%llus.%lluu (demand %d)\n", __func__, @@ -1573,13 +1591,13 @@ nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list, struct kev_nd6_ra_data nd6_ra_msg_data; struct nd_prefix_list *itr = prefix_list; - bzero(&ev_msg, sizeof (struct kev_msg)); - ev_msg.vendor_code = KEV_VENDOR_APPLE; - ev_msg.kev_class = KEV_NETWORK_CLASS; - ev_msg.kev_subclass = KEV_ND6_SUBCLASS; - ev_msg.event_code = code; + bzero(&ev_msg, sizeof(struct kev_msg)); + ev_msg.vendor_code = KEV_VENDOR_APPLE; + ev_msg.kev_class = KEV_NETWORK_CLASS; + ev_msg.kev_subclass = KEV_ND6_SUBCLASS; + ev_msg.event_code = code; - bzero(&nd6_ra_msg_data, sizeof (nd6_ra_msg_data)); + bzero(&nd6_ra_msg_data, sizeof(nd6_ra_msg_data)); if (mtu > 0 && mtu >= IPV6_MMTU) { nd6_ra_msg_data.mtu = mtu; @@ -1593,7 +1611,7 @@ nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list, while (itr != NULL && nd6_ra_msg_data.list_index < list_length) { bcopy(&itr->pr.ndpr_prefix, &nd6_ra_msg_data.prefix.prefix, - sizeof (nd6_ra_msg_data.prefix.prefix)); + sizeof(nd6_ra_msg_data.prefix.prefix)); nd6_ra_msg_data.prefix.raflags = itr->pr.ndpr_raf; nd6_ra_msg_data.prefix.prefixlen = itr->pr.ndpr_plen; nd6_ra_msg_data.prefix.origin = PR_ORIG_RA; @@ -1605,13 +1623,13 @@ nd6_post_msg(u_int32_t code, struct nd_prefix_list *prefix_list, nd6_ra_msg_data.prefix.if_index = itr->pr.ndpr_ifp->if_index; /* send the message up */ - ev_msg.dv[0].data_ptr = &nd6_ra_msg_data; - ev_msg.dv[0].data_length = sizeof (nd6_ra_msg_data); - ev_msg.dv[1].data_length = 0; + ev_msg.dv[0].data_ptr = &nd6_ra_msg_data; + ev_msg.dv[0].data_length = sizeof(nd6_ra_msg_data); + ev_msg.dv[1].data_length = 0; dlil_post_complete_msg(NULL, &ev_msg); /* clean up for the next prefix */ - bzero(&nd6_ra_msg_data.prefix, sizeof (nd6_ra_msg_data.prefix)); + bzero(&nd6_ra_msg_data.prefix, sizeof(nd6_ra_msg_data.prefix)); itr = itr->next; nd6_ra_msg_data.list_index++; } @@ -1660,8 +1678,9 @@ regen_tmpaddr(struct in6_ifaddr *ia6) if ((it6->ia6_flags & IN6_IFF_TEMPORARY) != 0 && !IFA6_IS_DEPRECATED(it6, timenow)) { IFA_UNLOCK(ifa); - if (public_ifa6 != NULL) + if (public_ifa6 != NULL) { IFA_REMREF(&public_ifa6->ia_ifa); + } public_ifa6 = NULL; break; } @@ -1673,10 +1692,11 @@ regen_tmpaddr(struct in6_ifaddr *ia6) * address with the prefix. */ if (!IFA6_IS_DEPRECATED(it6, timenow)) { - IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */ + IFA_ADDREF_LOCKED(ifa); /* for public_ifa6 */ IFA_UNLOCK(ifa); - if (public_ifa6 != NULL) + if (public_ifa6 != NULL) { IFA_REMREF(&public_ifa6->ia_ifa); + } public_ifa6 = it6; } else { IFA_UNLOCK(ifa); @@ -1691,13 +1711,13 @@ regen_tmpaddr(struct in6_ifaddr *ia6) log(LOG_NOTICE, "regen_tmpaddr: failed to create a new" " tmp addr,errno=%d\n", e); IFA_REMREF(&public_ifa6->ia_ifa); - return (-1); + return -1; } IFA_REMREF(&public_ifa6->ia_ifa); - return (0); + return 0; } - return (-1); + return -1; } /* @@ -1718,8 +1738,9 @@ nd6_purge(struct ifnet *ifp) /* Nuke default router list entries toward ifp */ lck_mtx_lock(nd6_mutex); TAILQ_FOREACH_SAFE(dr, &nd_defrouter, dr_entry, ndr) { - if (dr->ifp != ifp) + if (dr->ifp != ifp) { continue; + } /* * Remove the entry from default router list * and add it to the temp list. @@ -1743,10 +1764,11 @@ nd6_purge(struct ifnet *ifp) */ TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); - if (dr->stateflags & NDDRF_INSTALLED) + if (dr->stateflags & NDDRF_INSTALLED) { TAILQ_INSERT_TAIL(&nd_defrouter_tmp, dr, dr_entry); - else + } else { TAILQ_INSERT_HEAD(&nd_defrouter_tmp, dr, dr_entry); + } } /* @@ -1769,7 +1791,7 @@ nd6_purge(struct ifnet *ifp) for (pr = nd_prefix.lh_first; pr; pr = npr) { NDPR_LOCK(pr); npr = pr->ndpr_next; - if (pr->ndpr_ifp == ifp && + if (pr->ndpr_ifp == ifp && !(pr->ndpr_stateflags & NDPRF_DEFUNCT)) { /* * Because if_detach() does *not* release prefixes @@ -1797,8 +1819,9 @@ nd6_purge(struct ifnet *ifp) NDPR_UNLOCK(pr); } } - if (removed) + if (removed) { pfxlist_onlink_check(); + } lck_mtx_unlock(nd6_mutex); /* cancel default outgoing interface setting */ @@ -1872,8 +1895,8 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) struct sockaddr_in6 sin6; unsigned int ifscope; - bzero(&sin6, sizeof (sin6)); - sin6.sin6_len = sizeof (struct sockaddr_in6); + bzero(&sin6, sizeof(sin6)); + sin6.sin6_len = sizeof(struct sockaddr_in6); sin6.sin6_family = AF_INET6; sin6.sin6_addr = *addr6; @@ -1896,10 +1919,11 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) */ if (create) { RT_UNLOCK(rt); - if (rt_locked) + if (rt_locked) { rtfree_locked(rt); - else + } else { rtfree(rt); + } rt = NULL; } } @@ -1918,8 +1942,9 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) * be covered by our own prefix. */ ifa = ifaof_ifpforaddr(SA(&sin6), ifp); - if (ifa == NULL) - return (NULL); + if (ifa == NULL) { + return NULL; + } /* * Create a new route. RTF_LLINFO is necessary @@ -1927,8 +1952,9 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) * destination in nd6_rtrequest which will be * called in rtrequest via ifa->ifa_rtrequest. */ - if (!rt_locked) + if (!rt_locked) { lck_mtx_lock(rnh_lock); + } IFA_LOCK_SPIN(ifa); ifa_flags = ifa->ifa_flags; IFA_UNLOCK(ifa); @@ -1936,16 +1962,19 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) SA(&sin6), ifa->ifa_addr, SA(&all1_sa), (ifa_flags | RTF_HOST | RTF_LLINFO) & ~RTF_CLONING, &rt, ifscope)) != 0) { - if (e != EEXIST) + if (e != EEXIST) { log(LOG_ERR, "%s: failed to add route " "for a neighbor(%s), errno=%d\n", __func__, ip6_sprintf(addr6), e); + } } - if (!rt_locked) + if (!rt_locked) { lck_mtx_unlock(rnh_lock); + } IFA_REMREF(ifa); - if (rt == NULL) - return (NULL); + if (rt == NULL) { + return NULL; + } RT_LOCK(rt); if (rt->rt_llinfo) { @@ -1966,7 +1995,7 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) } } } else { - return (NULL); + return NULL; } } RT_LOCK_ASSERT_HELD(rt); @@ -2003,12 +2032,12 @@ nd6_lookup(struct in6_addr *addr6, int create, struct ifnet *ifp, int rt_locked) ifp ? if_name(ifp) : "unspec"); /* xxx more logs... kazu */ } - return (NULL); + return NULL; } /* * Caller needs to release reference and call RT_UNLOCK(rt). */ - return (rt); + return rt; } /* @@ -2037,14 +2066,17 @@ nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp) * content (XXX). */ sin6_copy = *addr; - if (sa6_recoverscope(&sin6_copy, FALSE)) - return (0); /* XXX: should be impossible */ - if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone)) - return (0); - if (sin6_copy.sin6_scope_id == zone) - return (1); - else - return (0); + if (sa6_recoverscope(&sin6_copy, FALSE)) { + return 0; /* XXX: should be impossible */ + } + if (in6_setscope(&sin6_copy.sin6_addr, ifp, &zone)) { + return 0; + } + if (sin6_copy.sin6_scope_id == zone) { + return 1; + } else { + return 0; + } } /* @@ -2066,7 +2098,7 @@ nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp) if (IN6_ARE_MASKED_ADDR_EQUAL(&pr->ndpr_prefix.sin6_addr, &addr->sin6_addr, &pr->ndpr_mask)) { NDPR_UNLOCK(pr); - return (1); + return 1; } NDPR_UNLOCK(pr); } @@ -2079,13 +2111,13 @@ nd6_is_new_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp) if (dstaddr != NULL) { if (dstaddr->ifa_ifp == ifp) { IFA_REMREF(dstaddr); - return (1); + return 1; } IFA_REMREF(dstaddr); dstaddr = NULL; } - return (0); + return 0; } @@ -2103,7 +2135,7 @@ nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp, lck_mtx_lock(nd6_mutex); if (nd6_is_new_addr_neighbor(addr, ifp)) { lck_mtx_unlock(nd6_mutex); - return (1); + return 1; } lck_mtx_unlock(nd6_mutex); @@ -2115,10 +2147,10 @@ nd6_is_addr_neighbor(struct sockaddr_in6 *addr, struct ifnet *ifp, RT_LOCK_ASSERT_HELD(rt); RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); - return (1); + return 1; } - return (0); + return 0; } /* @@ -2139,7 +2171,7 @@ nd6_free(struct rtentry *rt) lck_mtx_lock(nd6_mutex); RT_LOCK(rt); - RT_ADDREF_LOCKED(rt); /* Extra ref */ + RT_ADDREF_LOCKED(rt); /* Extra ref */ ln = rt->rt_llinfo; in6 = SIN6(rt_key(rt))->sin6_addr; @@ -2223,7 +2255,7 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) struct sockaddr *gate = rt->rt_gateway; struct llinfo_nd6 *ln = rt->rt_llinfo; static struct sockaddr_dl null_sdl = - { .sdl_len = sizeof (null_sdl), .sdl_family = AF_LINK }; + { .sdl_len = sizeof(null_sdl), .sdl_family = AF_LINK }; struct ifnet *ifp = rt->rt_ifp; struct ifaddr *ifa; uint64_t timenow; @@ -2240,11 +2272,13 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * we might do this again below during RTM_RESOLVE, but doing it * now handles all other cases. */ - if (nd6_sched_timeout_want) + if (nd6_sched_timeout_want) { nd6_sched_timeout(NULL, NULL); + } - if (rt->rt_flags & RTF_GATEWAY) + if (rt->rt_flags & RTF_GATEWAY) { return; + } if (!nd6_need_cache(ifp) && !(rt->rt_flags & RTF_HOST)) { /* @@ -2260,7 +2294,7 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) if (req == RTM_RESOLVE) { int no_nd_cache; - if (!nd6_need_cache(ifp)) { /* stf case */ + if (!nd6_need_cache(ifp)) { /* stf case */ no_nd_cache = 1; } else { struct sockaddr_in6 sin6; @@ -2306,8 +2340,8 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * There is no backward compatibility :) * * if ((rt->rt_flags & RTF_HOST) == 0 && - * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) - * rt->rt_flags |= RTF_CLONING; + * SIN(rt_mask(rt))->sin_addr.s_addr != 0xffffffff) + * rt->rt_flags |= RTF_CLONING; */ if ((rt->rt_flags & RTF_CLONING) || ((rt->rt_flags & RTF_LLINFO) && ln == NULL)) { @@ -2331,28 +2365,29 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) ? 0 : MAX(timenow, 1)); } } - if (rt->rt_flags & RTF_CLONING) + if (rt->rt_flags & RTF_CLONING) { break; + } } - /* - * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. - * We don't do that here since llinfo is not ready yet. - * - * There are also couple of other things to be discussed: - * - unsolicited NA code needs improvement beforehand - * - RFC4861 says we MAY send multicast unsolicited NA - * (7.2.6 paragraph 4), however, it also says that we - * SHOULD provide a mechanism to prevent multicast NA storm. - * we don't have anything like it right now. - * note that the mechanism needs a mutual agreement - * between proxies, which means that we need to implement - * a new protocol, or a new kludge. - * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA. - * we need to check ip6forwarding before sending it. - * (or should we allow proxy ND configuration only for - * routers? there's no mention about proxy ND from hosts) - */ - /* FALLTHROUGH */ + /* + * In IPv4 code, we try to annonuce new RTF_ANNOUNCE entry here. + * We don't do that here since llinfo is not ready yet. + * + * There are also couple of other things to be discussed: + * - unsolicited NA code needs improvement beforehand + * - RFC4861 says we MAY send multicast unsolicited NA + * (7.2.6 paragraph 4), however, it also says that we + * SHOULD provide a mechanism to prevent multicast NA storm. + * we don't have anything like it right now. + * note that the mechanism needs a mutual agreement + * between proxies, which means that we need to implement + * a new protocol, or a new kludge. + * - from RFC4861 6.2.4, host MUST NOT send an unsolicited RA. + * we need to check ip6forwarding before sending it. + * (or should we allow proxy ND configuration only for + * routers? there's no mention about proxy ND from hosts) + */ + /* FALLTHROUGH */ case RTM_RESOLVE: if (!(ifp->if_flags & (IFF_POINTOPOINT | IFF_LOOPBACK))) { /* @@ -2360,7 +2395,7 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * point link, so we can skip this test for a p2p link. */ if (gate->sa_family != AF_LINK || - gate->sa_len < sizeof (null_sdl)) { + gate->sa_len < sizeof(null_sdl)) { /* Don't complain in case of RTM_ADD */ if (req == RTM_RESOLVE) { log(LOG_ERR, "%s: route to %s has bad " @@ -2368,7 +2403,7 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) "sa_len %u) on %s\n", __func__, inet_ntop(AF_INET6, &SIN6(rt_key(rt))->sin6_addr, buf, - sizeof (buf)), gate->sa_family, + sizeof(buf)), gate->sa_family, gate->sa_len, if_name(ifp)); } break; @@ -2376,21 +2411,23 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) SDL(gate)->sdl_type = ifp->if_type; SDL(gate)->sdl_index = ifp->if_index; } - if (ln != NULL) - break; /* This happens on a route change */ + if (ln != NULL) { + break; /* This happens on a route change */ + } /* * Case 2: This route may come from cloning, or a manual route * add with a LL address. */ rt->rt_llinfo = ln = nd6_llinfo_alloc(M_WAITOK); - if (ln == NULL) + if (ln == NULL) { break; + } nd6_allocated++; - rt->rt_llinfo_get_ri = nd6_llinfo_get_ri; - rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri; - rt->rt_llinfo_purge = nd6_llinfo_purge; - rt->rt_llinfo_free = nd6_llinfo_free; + rt->rt_llinfo_get_ri = nd6_llinfo_get_ri; + rt->rt_llinfo_get_iflri = nd6_llinfo_get_iflri; + rt->rt_llinfo_purge = nd6_llinfo_purge; + rt->rt_llinfo_free = nd6_llinfo_free; rt->rt_llinfo_refresh = nd6_llinfo_refresh; rt->rt_flags |= RTF_LLINFO; ln->ln_rt = rt; @@ -2449,10 +2486,11 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) RT_UNLOCK(rt_end); continue; } - if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE) + if (ln_end->ln_state > ND6_LLINFO_INCOMPLETE) { ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_STALE); - else + } else { ND6_CACHE_STATE_TRANSITION(ln_end, ND6_LLINFO_PURGE); + } ln_setexpire(ln_end, timenow); RT_UNLOCK(rt_end); } @@ -2477,8 +2515,9 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } /* * Adjust route ref count for the @@ -2495,8 +2534,9 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * If rmx_mtu is not locked, update it * to the MTU used by the new interface. */ - if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) + if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) { rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; + } /* * Make sure rt_ifa be equal to the ifaddr * corresponding to the address. @@ -2525,8 +2565,9 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) llsol.s6_addr32[1] = 0; llsol.s6_addr32[2] = htonl(1); llsol.s6_addr8[12] = 0xff; - if (in6_setscope(&llsol, ifp, NULL)) + if (in6_setscope(&llsol, ifp, NULL)) { break; + } error = in6_mc_join(ifp, &llsol, NULL, &in6m, 0); if (error) { @@ -2541,8 +2582,9 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) break; case RTM_DELETE: - if (ln == NULL) + if (ln == NULL) { break; + } /* leave from solicited node multicast for proxy ND */ if ((rt->rt_flags & RTF_ANNOUNCE) && (ifp->if_flags & IFF_MULTICAST)) { @@ -2571,14 +2613,16 @@ nd6_rtrequest(int req, struct rtentry *rt, struct sockaddr *sa) * llinfo_nd6, and likewise, ln->ln_rt stil points to this * route entry, except that RTF_LLINFO is now cleared. */ - if (ln->ln_flags & ND6_LNF_IN_USE) + if (ln->ln_flags & ND6_LNF_IN_USE) { LN_DEQUEUE(ln); + } /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } rt->rt_flags &= ~RTF_LLINFO; if (ln->ln_hold != NULL) { @@ -2604,17 +2648,18 @@ nd6_siocgdrlst(void *data, int data_is_64) if (data_is_64) { struct in6_drlist_64 *drl_64; - drl_64 = _MALLOC(sizeof (*drl_64), M_TEMP, M_WAITOK|M_ZERO); - if (drl_64 == NULL) - return (ENOMEM); + drl_64 = _MALLOC(sizeof(*drl_64), M_TEMP, M_WAITOK | M_ZERO); + if (drl_64 == NULL) { + return ENOMEM; + } /* preserve the interface name */ - bcopy(data, drl_64, sizeof (drl_64->ifname)); + bcopy(data, drl_64, sizeof(drl_64->ifname)); while (dr && i < DRLSTSIZ) { drl_64->defrouter[i].rtaddr = dr->rtaddr; if (IN6_IS_ADDR_LINKLOCAL( - &drl_64->defrouter[i].rtaddr)) { + &drl_64->defrouter[i].rtaddr)) { /* XXX: need to this hack for KAME stack */ drl_64->defrouter[i].rtaddr.s6_addr16[1] = 0; } else { @@ -2630,18 +2675,19 @@ nd6_siocgdrlst(void *data, int data_is_64) i++; dr = TAILQ_NEXT(dr, dr_entry); } - bcopy(drl_64, data, sizeof (*drl_64)); + bcopy(drl_64, data, sizeof(*drl_64)); _FREE(drl_64, M_TEMP); - return (0); + return 0; } /* For 32-bit process */ - drl_32 = _MALLOC(sizeof (*drl_32), M_TEMP, M_WAITOK|M_ZERO); - if (drl_32 == NULL) - return (ENOMEM); + drl_32 = _MALLOC(sizeof(*drl_32), M_TEMP, M_WAITOK | M_ZERO); + if (drl_32 == NULL) { + return ENOMEM; + } /* preserve the interface name */ - bcopy(data, drl_32, sizeof (drl_32->ifname)); + bcopy(data, drl_32, sizeof(drl_32->ifname)); while (dr != NULL && i < DRLSTSIZ) { drl_32->defrouter[i].rtaddr = dr->rtaddr; @@ -2661,9 +2707,9 @@ nd6_siocgdrlst(void *data, int data_is_64) i++; dr = TAILQ_NEXT(dr, dr_entry); } - bcopy(drl_32, data, sizeof (*drl_32)); + bcopy(drl_32, data, sizeof(*drl_32)); _FREE(drl_32, M_TEMP); - return (0); + return 0; } /* @@ -2687,12 +2733,13 @@ nd6_siocgprlst(void *data, int data_is_64) if (data_is_64) { struct in6_prlist_64 *prl_64; - prl_64 = _MALLOC(sizeof (*prl_64), M_TEMP, M_WAITOK|M_ZERO); - if (prl_64 == NULL) - return (ENOMEM); + prl_64 = _MALLOC(sizeof(*prl_64), M_TEMP, M_WAITOK | M_ZERO); + if (prl_64 == NULL) { + return ENOMEM; + } /* preserve the interface name */ - bcopy(data, prl_64, sizeof (prl_64->ifname)); + bcopy(data, prl_64, sizeof(prl_64->ifname)); while (pr && i < PRLSTSIZ) { struct nd_pfxrouter *pfr; @@ -2712,7 +2759,7 @@ nd6_siocgprlst(void *data, int data_is_64) j = 0; while (pfr) { if (j < DRLSTSIZ) { -#define RTRADDR prl_64->prefix[i].advrtr[j] +#define RTRADDR prl_64->prefix[i].advrtr[j] RTRADDR = pfr->router->rtaddr; if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) { /* XXX: hack for KAME */ @@ -2736,18 +2783,19 @@ nd6_siocgprlst(void *data, int data_is_64) i++; pr = pr->ndpr_next; } - bcopy(prl_64, data, sizeof (*prl_64)); + bcopy(prl_64, data, sizeof(*prl_64)); _FREE(prl_64, M_TEMP); - return (0); + return 0; } /* For 32-bit process */ - prl_32 = _MALLOC(sizeof (*prl_32), M_TEMP, M_WAITOK|M_ZERO); - if (prl_32 == NULL) - return (ENOMEM); + prl_32 = _MALLOC(sizeof(*prl_32), M_TEMP, M_WAITOK | M_ZERO); + if (prl_32 == NULL) { + return ENOMEM; + } /* preserve the interface name */ - bcopy(data, prl_32, sizeof (prl_32->ifname)); + bcopy(data, prl_32, sizeof(prl_32->ifname)); while (pr && i < PRLSTSIZ) { struct nd_pfxrouter *pfr; @@ -2767,7 +2815,7 @@ nd6_siocgprlst(void *data, int data_is_64) j = 0; while (pfr) { if (j < DRLSTSIZ) { -#define RTRADDR prl_32->prefix[i].advrtr[j] +#define RTRADDR prl_32->prefix[i].advrtr[j] RTRADDR = pfr->router->rtaddr; if (IN6_IS_ADDR_LINKLOCAL(&RTRADDR)) { /* XXX: hack for KAME */ @@ -2791,9 +2839,9 @@ nd6_siocgprlst(void *data, int data_is_64) i++; pr = pr->ndpr_next; } - bcopy(prl_32, data, sizeof (*prl_32)); + bcopy(prl_32, data, sizeof(*prl_32)); _FREE(prl_32, M_TEMP); - return (0); + return 0; } int @@ -2807,8 +2855,8 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) VERIFY(ifp != NULL); switch (cmd) { - case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */ - case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */ + case SIOCGDRLST_IN6_32: /* struct in6_drlist_32 */ + case SIOCGDRLST_IN6_64: /* struct in6_drlist_64 */ /* * obsolete API, use sysctl under net.inet6.icmp6 */ @@ -2817,8 +2865,8 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) lck_mtx_unlock(nd6_mutex); break; - case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */ - case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */ + case SIOCGPRLST_IN6_32: /* struct in6_prlist_32 */ + case SIOCGPRLST_IN6_64: /* struct in6_prlist_64 */ /* * obsolete API, use sysctl under net.inet6.icmp6 */ @@ -2827,8 +2875,8 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) lck_mtx_unlock(nd6_mutex); break; - case OSIOCGIFINFO_IN6: /* struct in6_ondireq */ - case SIOCGIFINFO_IN6: { /* struct in6_ondireq */ + case OSIOCGIFINFO_IN6: /* struct in6_ondireq */ + case SIOCGIFINFO_IN6: { /* struct in6_ondireq */ u_int32_t linkmtu; struct in6_ondireq *ondi = (struct in6_ondireq *)(void *)data; struct nd_ifinfo *ndi; @@ -2837,32 +2885,32 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) * instead of in6_ndireq, so we treat it as such. */ ndi = ND_IFINFO(ifp); - if ((NULL == ndi) || (FALSE == ndi->initialized)){ + if ((NULL == ndi) || (FALSE == ndi->initialized)) { error = EINVAL; break; } lck_mtx_lock(&ndi->lock); linkmtu = IN6_LINKMTU(ifp); - bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof (linkmtu)); + bcopy(&linkmtu, &ondi->ndi.linkmtu, sizeof(linkmtu)); bcopy(&ndi->maxmtu, &ondi->ndi.maxmtu, - sizeof (u_int32_t)); + sizeof(u_int32_t)); bcopy(&ndi->basereachable, &ondi->ndi.basereachable, - sizeof (u_int32_t)); + sizeof(u_int32_t)); bcopy(&ndi->reachable, &ondi->ndi.reachable, - sizeof (u_int32_t)); + sizeof(u_int32_t)); bcopy(&ndi->retrans, &ondi->ndi.retrans, - sizeof (u_int32_t)); + sizeof(u_int32_t)); bcopy(&ndi->flags, &ondi->ndi.flags, - sizeof (u_int32_t)); + sizeof(u_int32_t)); bcopy(&ndi->recalctm, &ondi->ndi.recalctm, - sizeof (int)); + sizeof(int)); ondi->ndi.chlim = ndi->chlim; ondi->ndi.receivedra = 0; lck_mtx_unlock(&ndi->lock); break; } - case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */ + case SIOCSIFINFO_FLAGS: { /* struct in6_ndireq */ /* * XXX BSD has a bunch of checks here to ensure * that interface disabled flag is not reset if @@ -2881,7 +2929,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) lck_mtx_lock(&ndi->lock); oflags = ndi->flags; - bcopy(&cndi->ndi.flags, &(ndi->flags), sizeof (flags)); + bcopy(&cndi->ndi.flags, &(ndi->flags), sizeof(flags)); flags = ndi->flags; lck_mtx_unlock(&ndi->lock); @@ -2893,7 +2941,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) break; } - case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */ + case SIOCSNDFLUSH_IN6: /* struct in6_ifreq */ /* flush default router list */ /* * xxx sumikawa: should not delete route if default @@ -2906,7 +2954,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) /* xxx sumikawa: flush prefix list */ break; - case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */ + case SIOCSPFXFLUSH_IN6: { /* struct in6_ifreq */ /* flush all the prefix advertised by routers */ struct nd_prefix *next = NULL; @@ -2948,7 +2996,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) IFA_REMREF(&ia->ia_ifa); lck_mtx_lock(nd6_mutex); lck_rw_lock_exclusive( - &in6_ifaddr_rwlock); + &in6_ifaddr_rwlock); /* * Purging the address caused * in6_ifaddr_rwlock to be @@ -2978,7 +3026,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) break; } - case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */ + case SIOCSRTRFLUSH_IN6: { /* struct in6_ifreq */ /* flush all the default routers */ struct nd_defrouter *next; struct nd_drhead nd_defrouter_tmp; @@ -3035,24 +3083,25 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) break; } - case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */ + case SIOCGNBRINFO_IN6_32: { /* struct in6_nbrinfo_32 */ struct llinfo_nd6 *ln; struct in6_nbrinfo_32 nbi_32; struct in6_addr nb_addr; /* make local for safety */ - bcopy(data, &nbi_32, sizeof (nbi_32)); + bcopy(data, &nbi_32, sizeof(nbi_32)); nb_addr = nbi_32.addr; /* * XXX: KAME specific hack for scoped addresses - * XXXX: for other scopes than link-local? + * XXXX: for other scopes than link-local? */ if (IN6_IS_ADDR_LINKLOCAL(&nbi_32.addr) || IN6_IS_ADDR_MC_LINKLOCAL(&nbi_32.addr)) { u_int16_t *idp = (u_int16_t *)(void *)&nb_addr.s6_addr[2]; - if (*idp == 0) + if (*idp == 0) { *idp = htons(ifp->if_index); + } } /* Callee returns a locked route upon success */ @@ -3068,28 +3117,29 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) nbi_32.expire = ln_getexpire(ln); RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); - bcopy(&nbi_32, data, sizeof (nbi_32)); + bcopy(&nbi_32, data, sizeof(nbi_32)); break; } - case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */ + case SIOCGNBRINFO_IN6_64: { /* struct in6_nbrinfo_64 */ struct llinfo_nd6 *ln; struct in6_nbrinfo_64 nbi_64; struct in6_addr nb_addr; /* make local for safety */ - bcopy(data, &nbi_64, sizeof (nbi_64)); + bcopy(data, &nbi_64, sizeof(nbi_64)); nb_addr = nbi_64.addr; /* * XXX: KAME specific hack for scoped addresses - * XXXX: for other scopes than link-local? + * XXXX: for other scopes than link-local? */ if (IN6_IS_ADDR_LINKLOCAL(&nbi_64.addr) || IN6_IS_ADDR_MC_LINKLOCAL(&nbi_64.addr)) { u_int16_t *idp = (u_int16_t *)(void *)&nb_addr.s6_addr[2]; - if (*idp == 0) + if (*idp == 0) { *idp = htons(ifp->if_index); + } } /* Callee returns a locked route upon success */ @@ -3105,12 +3155,12 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) nbi_64.expire = ln_getexpire(ln); RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); - bcopy(&nbi_64, data, sizeof (nbi_64)); + bcopy(&nbi_64, data, sizeof(nbi_64)); break; } - case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ - case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */ + case SIOCGDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ + case SIOCGDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */ struct in6_ndifreq_64 *ndif_64 = (struct in6_ndifreq_64 *)(void *)data; struct in6_ndifreq_32 *ndif_32 = @@ -3118,16 +3168,16 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) if (cmd == SIOCGDEFIFACE_IN6_64) { u_int64_t j = nd6_defifindex; - bcopy(&j, &ndif_64->ifindex, sizeof (j)); + bcopy(&j, &ndif_64->ifindex, sizeof(j)); } else { bcopy(&nd6_defifindex, &ndif_32->ifindex, - sizeof (u_int32_t)); + sizeof(u_int32_t)); } break; } - case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ - case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */ + case SIOCSDEFIFACE_IN6_32: /* struct in6_ndifreq_32 */ + case SIOCSDEFIFACE_IN6_64: { /* struct in6_ndifreq_64 */ struct in6_ndifreq_64 *ndif_64 = (struct in6_ndifreq_64 *)(void *)data; struct in6_ndifreq_32 *ndif_32 = @@ -3136,14 +3186,14 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) if (cmd == SIOCSDEFIFACE_IN6_64) { u_int64_t j; - bcopy(&ndif_64->ifindex, &j, sizeof (j)); + bcopy(&ndif_64->ifindex, &j, sizeof(j)); idx = (u_int32_t)j; } else { - bcopy(&ndif_32->ifindex, &idx, sizeof (idx)); + bcopy(&ndif_32->ifindex, &idx, sizeof(idx)); } error = nd6_setdefaultiface(idx); - return (error); + return error; /* NOTREACHED */ } case SIOCGIFCGAPREP_IN6: @@ -3155,7 +3205,7 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) struct in6_cga_modifier *req_cga_mod = &(p_cgareq->cgar_cgaprep.cga_modifier); - struct in6_cga_modifier *ndi_cga_mod = NULL; + struct in6_cga_modifier *ndi_cga_mod = NULL; if ((NULL == ndi) || !ndi->initialized) { error = EINVAL; @@ -3168,15 +3218,16 @@ nd6_ioctl(u_long cmd, caddr_t data, struct ifnet *ifp) if (cmd == SIOCSIFCGAPREP_IN6) { bcopy(req_cga_mod, ndi_cga_mod, sizeof(*ndi_cga_mod)); ndi->cga_initialized = TRUE; - } else + } else { bcopy(ndi_cga_mod, req_cga_mod, sizeof(*req_cga_mod)); + } lck_mtx_unlock(&ndi->lock); - return (error); + return error; /* NOTREACHED */ } } - return (error); + return error; } /* @@ -3200,14 +3251,17 @@ nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr, boolean_t sched_timeout = FALSE; struct nd_ifinfo *ndi = NULL; - if (ifp == NULL) + if (ifp == NULL) { panic("ifp == NULL in nd6_cache_lladdr"); - if (from == NULL) + } + if (from == NULL) { panic("from == NULL in nd6_cache_lladdr"); + } /* nothing must be updated for unspecified address */ - if (IN6_IS_ADDR_UNSPECIFIED(from)) + if (IN6_IS_ADDR_UNSPECIFIED(from)) { return; + } /* * Validation about ifp->if_addrlen and lladdrlen must be done in @@ -3217,8 +3271,9 @@ nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr, rt = nd6_lookup(from, 0, ifp, 0); if (rt == NULL) { - if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL) + if ((rt = nd6_lookup(from, 1, ifp, 0)) == NULL) { return; + } RT_LOCK_ASSERT_HELD(rt); is_newentry = 1; } else { @@ -3232,8 +3287,9 @@ nd6_cache_lladdr(struct ifnet *ifp, struct in6_addr *from, char *lladdr, is_newentry = 0; } - if (rt == NULL) + if (rt == NULL) { return; + } if ((rt->rt_flags & (RTF_GATEWAY | RTF_LLINFO)) != RTF_LLINFO) { fail: RT_UNLOCK(rt); @@ -3242,22 +3298,27 @@ fail: return; } ln = (struct llinfo_nd6 *)rt->rt_llinfo; - if (ln == NULL) + if (ln == NULL) { goto fail; - if (rt->rt_gateway == NULL) + } + if (rt->rt_gateway == NULL) { goto fail; - if (rt->rt_gateway->sa_family != AF_LINK) + } + if (rt->rt_gateway->sa_family != AF_LINK) { goto fail; + } sdl = SDL(rt->rt_gateway); olladdr = (sdl->sdl_alen) ? 1 : 0; if (olladdr && lladdr) { - if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) + if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) { llchange = 1; - else + } else { llchange = 0; - } else + } + } else { llchange = 0; + } /* * newentry olladdr lladdr llchange (*=record) @@ -3270,7 +3331,7 @@ fail: * 1 -- y -- (7) * STALE */ - if (lladdr != NULL) { /* (3-5) and (7) */ + if (lladdr != NULL) { /* (3-5) and (7) */ /* * Record source link-layer address * XXX is it dependent to ifp->if_type? @@ -3283,18 +3344,20 @@ fail: } if (is_newentry == 0) { - if ((!olladdr && lladdr != NULL) || /* (3) */ - (olladdr && lladdr != NULL && llchange)) { /* (5) */ + if ((!olladdr && lladdr != NULL) || /* (3) */ + (olladdr && lladdr != NULL && llchange)) { /* (5) */ do_update = 1; newstate = ND6_LLINFO_STALE; - } else /* (1-2,4) */ + } else { /* (1-2,4) */ do_update = 0; + } } else { do_update = 1; - if (lladdr == NULL) /* (6) */ + if (lladdr == NULL) { /* (6) */ newstate = ND6_LLINFO_NOSTATE; - else /* (7) */ + } else { /* (7) */ newstate = ND6_LLINFO_STALE; + } } /* @@ -3317,7 +3380,7 @@ fail: ND6_CACHE_STATE_TRANSITION(ln, newstate); if ((ln->ln_state == ND6_LLINFO_STALE) || - (ln->ln_state == ND6_LLINFO_REACHABLE)) { + (ln->ln_state == ND6_LLINFO_REACHABLE)) { struct mbuf *m = ln->ln_hold; /* * XXX: since nd6_output() below will cause @@ -3325,8 +3388,9 @@ fail: * we must set the timer now, although it is actually * meaningless. */ - if (ln->ln_state == ND6_LLINFO_STALE) + if (ln->ln_state == ND6_LLINFO_STALE) { ln_setexpire(ln, timenow + nd6_gctimer); + } ln->ln_hold = NULL; if (m != NULL) { @@ -3382,8 +3446,9 @@ fail: /* * New entry must have is_router flag cleared. */ - if (is_newentry) /* (6-7) */ + if (is_newentry) { /* (6-7) */ ln->ln_router = 0; + } break; case ND_REDIRECT: /* @@ -3391,10 +3456,11 @@ fail: * set the is_router flag. Otherwise, if the entry is newly * created, then clear the flag. [RFC 4861, sec 8.3] */ - if (code == ND_REDIRECT_ROUTER) + if (code == ND_REDIRECT_ROUTER) { ln->ln_router = 1; - else if (is_newentry) /* (6-7) */ + } else if (is_newentry) { /* (6-7) */ ln->ln_router = 0; + } break; case ND_ROUTER_SOLICIT: /* @@ -3406,8 +3472,8 @@ fail: /* * Mark an entry with lladdr as a router. */ - if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */ - (is_newentry && lladdr)) { /* (7) */ + if ((!is_newentry && (olladdr || lladdr)) || /* (2-5) */ + (is_newentry && lladdr)) { /* (7) */ ln->ln_router = 1; } break; @@ -3416,10 +3482,11 @@ fail: if (do_update) { int route_ev_code = 0; - if (llchange) + if (llchange) { route_ev_code = ROUTE_LLENTRY_CHANGED; - else + } else { route_ev_code = ROUTE_LLENTRY_RESOLVED; + } /* Enqueue work item to invoke callback for this route entry */ route_event_enqueue_nwk_wq_entry(rt, NULL, route_ev_code, NULL, TRUE); @@ -3438,9 +3505,10 @@ fail: lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET6]; - if (rnh != NULL) + if (rnh != NULL) { (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); + } lck_mtx_unlock(rnh_lock); RT_LOCK(rt); } @@ -3520,7 +3588,7 @@ nd6_output(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0, * Assumption: route determination for first packet can be correctly applied to * all packets in the chain. */ -#define senderr(e) { error = (e); goto bad; } +#define senderr(e) { error = (e); goto bad; } int nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0, struct sockaddr_in6 *dst, struct rtentry *hint0, struct flowadv *adv) @@ -3538,8 +3606,9 @@ nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0, } if (IN6_IS_ADDR_MULTICAST(&dst->sin6_addr) || !nd6_need_cache(ifp)) { - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } goto sendpkt; } @@ -3577,7 +3646,7 @@ nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0, error = nd6_output_list(ifp, origifp, m0, dst, rt, adv); rtfree(rt); - return (error); + return error; } } else { senderr(EHOSTUNREACH); @@ -3611,10 +3680,12 @@ nd6_output_list(struct ifnet *ifp, struct ifnet *origifp, struct mbuf *m0, * when the outgoing interface is p2p. * XXX: we may need a more generic rule here. */ - if (ia6 != NULL) + if (ia6 != NULL) { IFA_REMREF(&ia6->ia_ifa); - if ((ifp->if_flags & IFF_POINTOPOINT) == 0) + } + if ((ifp->if_flags & IFF_POINTOPOINT) == 0) { senderr(EHOSTUNREACH); + } goto sendpkt; } @@ -3665,8 +3736,9 @@ lookup: gwrt = NULL; } RT_UNLOCK(rt); - if (gwrt != NULL) + if (gwrt != NULL) { rtfree_locked(gwrt); + } lck_mtx_unlock(rnh_lock); senderr(EHOSTUNREACH); } @@ -3752,16 +3824,18 @@ lookup: * Must drop rt_lock since nd6_is_addr_neighbor() calls * nd6_lookup() and acquires rnh_lock. */ - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } if (nd6_is_addr_neighbor(&sin6, ifp, 0)) { /* "rtrele" may have been used, so clean up "rt" now */ if (rt != NULL) { /* Don't free "hint0" */ - if (rt == hint0) + if (rt == hint0) { RT_REMREF(rt); - else + } else { rtfree(rt); + } } /* Callee returns a locked route upon success */ rt = nd6_lookup(&dst->sin6_addr, 1, ifp, 0); @@ -3790,11 +3864,11 @@ lookup: ip6_sprintf(&dst->sin6_addr), (uint64_t)VM_KERNEL_ADDRPERM(ln), (uint64_t)VM_KERNEL_ADDRPERM(rt)); - senderr(EIO); /* XXX: good error? */ + senderr(EIO); /* XXX: good error? */ } lck_mtx_unlock(&ndi->lock); - goto sendpkt; /* send anyway */ + goto sendpkt; /* send anyway */ } net_update_uptime(); @@ -3841,8 +3915,9 @@ lookup: (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 && nd6_inuse >= (ip6_neighborgcthresh >> 1))) { lck_mtx_lock(rnh_lock); - if (ln->ln_state == ND6_LLINFO_DELAY) + if (ln->ln_state == ND6_LLINFO_DELAY) { nd6_sched_timeout(NULL, NULL); + } if (ln->ln_expire != 0 && ip6_neighborgcthresh > 0 && nd6_inuse >= (ip6_neighborgcthresh >> 1)) { RT_LOCK_SPIN(rt); @@ -3862,8 +3937,9 @@ lookup: * so that it can be excluded from the list of interfaces eligible * for forwarding the proxied NS in nd6_prproxy_ns_output(). */ - if (rt->rt_flags & RTF_PROXY) + if (rt->rt_flags & RTF_PROXY) { ln->ln_exclifp = ((origifp == ifp) ? NULL : origifp); + } /* * There is a neighbor cache entry, but no ethernet address @@ -3874,10 +3950,12 @@ lookup: * 7.2.2 of RFC 4861, because the timer is set correctly after sending * an NS below. */ - if (ln->ln_state == ND6_LLINFO_NOSTATE) + if (ln->ln_state == ND6_LLINFO_NOSTATE) { ND6_CACHE_STATE_TRANSITION(ln, ND6_LLINFO_INCOMPLETE); - if (ln->ln_hold) + } + if (ln->ln_hold) { m_freem_list(ln->ln_hold); + } ln->ln_hold = m0; if (!ND6_LLINFO_PERMANENT(ln) && ln->ln_asked == 0) { ln->ln_asked++; @@ -3888,11 +3966,12 @@ lookup: lck_mtx_unlock(&ndi->lock); RT_UNLOCK(rt); /* We still have a reference on rt (for ln) */ - if (ip6_forwarding) + if (ip6_forwarding) { nd6_prproxy_ns_output(ifp, origifp, NULL, &dst->sin6_addr, ln); - else + } else { nd6_ns_output(ifp, NULL, &dst->sin6_addr, ln, NULL); + } lck_mtx_lock(rnh_lock); nd6_sched_timeout(NULL, NULL); lck_mtx_unlock(rnh_lock); @@ -3923,15 +4002,16 @@ lookup: RT_UNLOCK(rt); rtfree_locked(rt); } - rt = NULL; /* "rt" has been taken care of */ + rt = NULL; /* "rt" has been taken care of */ lck_mtx_unlock(rnh_lock); } error = 0; goto release; sendpkt: - if (rt != NULL) + if (rt != NULL) { RT_LOCK_ASSERT_NOTHELD(rt); + } /* discard the packet if IPv6 operation is disabled on the interface */ if (ifp->if_eflags & IFEF_IPV6_DISABLED) { @@ -3960,8 +4040,9 @@ sendpkt: if (rt != NULL) { RT_LOCK_SPIN(rt); /* Mark use timestamp */ - if (rt->rt_llinfo != NULL) + if (rt->rt_llinfo != NULL) { nd6_llreach_use(rt->rt_llinfo); + } RT_UNLOCK(rt); } @@ -3973,10 +4054,11 @@ sendpkt: int scnt; if ((mcur->m_pkthdr.csum_flags & CSUM_TSO_IPV6) && - (mcur->m_pkthdr.tso_segsz > 0)) + (mcur->m_pkthdr.tso_segsz > 0)) { scnt = mcur->m_pkthdr.len / mcur->m_pkthdr.tso_segsz; - else + } else { scnt = 1; + } nstat_route_tx(hint, scnt, mcur->m_pkthdr.len, 0); } @@ -3985,14 +4067,16 @@ sendpkt: mcur->m_pkthdr.rcvif = NULL; mcur = mcur->m_nextpkt; } - if (pktcnt > ip6_maxchainsent) + if (pktcnt > ip6_maxchainsent) { ip6_maxchainsent = pktcnt; + } error = dlil_output(ifp, PF_INET6, m0, (caddr_t)rt, SA(dst), 0, adv); goto release; bad: - if (m0 != NULL) + if (m0 != NULL) { m_freem_list(m0); + } release: /* Clean up "rt" unless it's already been done */ @@ -4017,7 +4101,7 @@ release: rtfree(rtrele); } } - return (error); + return error; } #undef senderr @@ -4041,16 +4125,16 @@ nd6_need_cache(struct ifnet *ifp) #if IFT_IEEE80211 case IFT_IEEE80211: #endif - case IFT_GIF: /* XXX need more cases? */ + case IFT_GIF: /* XXX need more cases? */ case IFT_PPP: #if IFT_TUNNEL case IFT_TUNNEL: #endif case IFT_BRIDGE: case IFT_CELLULAR: - return (1); + return 1; default: - return (0); + return 0; } } @@ -4072,40 +4156,41 @@ nd6_storelladdr(struct ifnet *ifp, struct rtentry *rt, struct mbuf *m, #endif case IFT_BRIDGE: ETHER_MAP_IPV6_MULTICAST(&SIN6(dst)->sin6_addr, desten); - return (1); + return 1; case IFT_IEEE1394: - for (i = 0; i < ifp->if_addrlen; i++) + for (i = 0; i < ifp->if_addrlen; i++) { desten[i] = ~0; - return (1); + } + return 1; case IFT_ARCNET: *desten = 0; - return (1); + return 1; default: - return (0); /* caller will free mbuf */ + return 0; /* caller will free mbuf */ } } if (rt == NULL) { /* this could happen, if we could not allocate memory */ - return (0); /* caller will free mbuf */ + return 0; /* caller will free mbuf */ } RT_LOCK(rt); if (rt->rt_gateway->sa_family != AF_LINK) { printf("nd6_storelladdr: something odd happens\n"); RT_UNLOCK(rt); - return (0); /* caller will free mbuf */ + return 0; /* caller will free mbuf */ } sdl = SDL(rt->rt_gateway); if (sdl->sdl_alen == 0) { /* this should be impossible, but we bark here for debugging */ printf("nd6_storelladdr: sdl_alen == 0\n"); RT_UNLOCK(rt); - return (0); /* caller will free mbuf */ + return 0; /* caller will free mbuf */ } bcopy(LLADDR(sdl), desten, sdl->sdl_alen); RT_UNLOCK(rt); - return (1); + return 1; } /* @@ -4115,23 +4200,26 @@ nd6_storelladdr(struct ifnet *ifp, struct rtentry *rt, struct mbuf *m, * placeholder. */ errno_t -nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, - struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint, +nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, + struct sockaddr_dl *ll_dest, size_t ll_dest_len, route_t hint, mbuf_t packet) { - route_t route = hint; - errno_t result = 0; + route_t route = hint; + errno_t result = 0; struct sockaddr_dl *sdl = NULL; - size_t copy_len; + size_t copy_len; - if (ifp == NULL || ip6_dest == NULL) - return (EINVAL); + if (ifp == NULL || ip6_dest == NULL) { + return EINVAL; + } - if (ip6_dest->sin6_family != AF_INET6) - return (EAFNOSUPPORT); + if (ip6_dest->sin6_family != AF_INET6) { + return EAFNOSUPPORT; + } - if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) - return (ENETDOWN); + if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING)) { + return ENETDOWN; + } if (hint != NULL) { /* @@ -4140,22 +4228,26 @@ nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, */ result = route_to_gwroute((const struct sockaddr *)ip6_dest, hint, &route); - if (result != 0) - return (result); - if (route != NULL) + if (result != 0) { + return result; + } + if (route != NULL) { RT_LOCK_ASSERT_HELD(route); + } } if ((packet != NULL && (packet->m_flags & M_MCAST) != 0) || ((ifp->if_flags & IFF_MULTICAST) && IN6_IS_ADDR_MULTICAST(&ip6_dest->sin6_addr))) { - if (route != NULL) + if (route != NULL) { RT_UNLOCK(route); + } result = dlil_resolve_multi(ifp, (const struct sockaddr *)ip6_dest, SA(ll_dest), ll_dest_len); - if (route != NULL) + if (route != NULL) { RT_LOCK(route); + } goto release; } else if (route == NULL) { /* @@ -4170,7 +4262,7 @@ nd6_lookup_ipv6(ifnet_t ifp, const struct sockaddr_in6 *ip6_dest, lck_mtx_unlock(rnh_lock); if (route != NULL) { - RT_LOCK(route); + RT_LOCK(route); } } @@ -4214,15 +4306,15 @@ release: rtfree(route); } } - return (result); + return result; } #if (DEVELOPMENT || DEBUG) static int sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS; SYSCTL_PROC(_net_inet6_icmp6, OID_AUTO, nd6_lookup_ipv6, - CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, - sysctl_nd6_lookup_ipv6, "S", ""); + CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, + sysctl_nd6_lookup_ipv6, "S", ""); int sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS @@ -4234,7 +4326,7 @@ sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS /* * Only root can lookup MAC addresses - */ + */ error = proc_suser(current_proc()); if (error != 0) { printf("%s: proc_suser() error %d\n", @@ -4263,14 +4355,14 @@ sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS } /* Make sure to terminate the string */ nd6_lookup_ipv6_args.ifname[IFNAMSIZ - 1] = 0; - + error = ifnet_find_by_name(nd6_lookup_ipv6_args.ifname, &ifp); if (error != 0) { printf("%s: ifnet_find_by_name() error %d\n", __func__, error); goto done; } - + error = nd6_lookup_ipv6(ifp, &nd6_lookup_ipv6_args.ip6_dest, &nd6_lookup_ipv6_args.ll_dest_._sdl, nd6_lookup_ipv6_args.ll_dest_len, NULL, NULL); @@ -4279,7 +4371,7 @@ sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS __func__, error); goto done; } - + error = SYSCTL_OUT(req, &nd6_lookup_ipv6_args, sizeof(struct nd6_lookup_ipv6_args)); if (error != 0) { @@ -4288,7 +4380,7 @@ sysctl_nd6_lookup_ipv6 SYSCTL_HANDLER_ARGS goto done; } done: - return (error); + return error; } #endif /* (DEVELOPEMENT || DEBUG) */ @@ -4308,8 +4400,9 @@ nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after) a = (after & ND6_IFF_IFDISABLED); if (b != a && (err = nd6_if_disable(ifp, - ((int32_t)(a - b) > 0))) != 0) + ((int32_t)(a - b) > 0))) != 0) { goto done; + } } /* @@ -4321,11 +4414,12 @@ nd6_setifinfo(struct ifnet *ifp, u_int32_t before, u_int32_t after) a = (after & ND6_IFF_PROXY_PREFIXES); if (b != a && (err = nd6_if_prproxy(ifp, - ((int32_t)(a - b) > 0))) != 0) + ((int32_t)(a - b) > 0))) != 0) { goto done; + } } done: - return (err); + return err; } /* @@ -4336,13 +4430,14 @@ int nd6_if_disable(struct ifnet *ifp, boolean_t enable) { ifnet_lock_shared(ifp); - if (enable) + if (enable) { ifp->if_eflags |= IFEF_IPV6_DISABLED; - else + } else { ifp->if_eflags &= ~IFEF_IPV6_DISABLED; + } ifnet_lock_done(ifp); - return (0); + return 0; } static int @@ -4353,60 +4448,65 @@ nd6_sysctl_drlist SYSCTL_HANDLER_ARGS struct nd_defrouter *dr; int error = 0; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* XXX Handle mapped defrouter entries */ lck_mtx_lock(nd6_mutex); if (proc_is64bit(req->p)) { struct in6_defrouter_64 d; - bzero(&d, sizeof (d)); + bzero(&d, sizeof(d)); d.rtaddr.sin6_family = AF_INET6; - d.rtaddr.sin6_len = sizeof (d.rtaddr); + d.rtaddr.sin6_len = sizeof(d.rtaddr); TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) { d.rtaddr.sin6_addr = dr->rtaddr; if (in6_recoverscope(&d.rtaddr, - &dr->rtaddr, dr->ifp) != 0) + &dr->rtaddr, dr->ifp) != 0) { log(LOG_ERR, "scope error in default router " "list (%s)\n", inet_ntop(AF_INET6, - &dr->rtaddr, pbuf, sizeof (pbuf))); + &dr->rtaddr, pbuf, sizeof(pbuf))); + } d.flags = dr->flags; d.stateflags = dr->stateflags; d.rtlifetime = dr->rtlifetime; d.expire = nddr_getexpire(dr); d.if_index = dr->ifp->if_index; - error = SYSCTL_OUT(req, &d, sizeof (d)); - if (error != 0) + error = SYSCTL_OUT(req, &d, sizeof(d)); + if (error != 0) { break; + } } } else { struct in6_defrouter_32 d; - bzero(&d, sizeof (d)); + bzero(&d, sizeof(d)); d.rtaddr.sin6_family = AF_INET6; - d.rtaddr.sin6_len = sizeof (d.rtaddr); + d.rtaddr.sin6_len = sizeof(d.rtaddr); TAILQ_FOREACH(dr, &nd_defrouter, dr_entry) { d.rtaddr.sin6_addr = dr->rtaddr; if (in6_recoverscope(&d.rtaddr, - &dr->rtaddr, dr->ifp) != 0) + &dr->rtaddr, dr->ifp) != 0) { log(LOG_ERR, "scope error in default router " "list (%s)\n", inet_ntop(AF_INET6, - &dr->rtaddr, pbuf, sizeof (pbuf))); + &dr->rtaddr, pbuf, sizeof(pbuf))); + } d.flags = dr->flags; d.stateflags = dr->stateflags; d.rtlifetime = dr->rtlifetime; d.expire = nddr_getexpire(dr); d.if_index = dr->ifp->if_index; - error = SYSCTL_OUT(req, &d, sizeof (d)); - if (error != 0) + error = SYSCTL_OUT(req, &d, sizeof(d)); + if (error != 0) { break; + } } } lck_mtx_unlock(nd6_mutex); - return (error); + return error; } static int @@ -4419,29 +4519,31 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS struct nd_prefix *pr; int error = 0; - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } - bzero(&s6, sizeof (s6)); + bzero(&s6, sizeof(s6)); s6.sin6_family = AF_INET6; - s6.sin6_len = sizeof (s6); + s6.sin6_len = sizeof(s6); /* XXX Handle mapped defrouter entries */ lck_mtx_lock(nd6_mutex); if (proc_is64bit(req->p)) { struct in6_prefix_64 p; - bzero(&p, sizeof (p)); + bzero(&p, sizeof(p)); p.origin = PR_ORIG_RA; LIST_FOREACH(pr, &nd_prefix, ndpr_entry) { NDPR_LOCK(pr); p.prefix = pr->ndpr_prefix; if (in6_recoverscope(&p.prefix, - &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0) + &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0) { log(LOG_ERR, "scope error in " "prefix list (%s)\n", inet_ntop(AF_INET6, - &p.prefix.sin6_addr, pbuf, sizeof (pbuf))); + &p.prefix.sin6_addr, pbuf, sizeof(pbuf))); + } p.raflags = pr->ndpr_raf; p.prefixlen = pr->ndpr_plen; p.vltime = pr->ndpr_vltime; @@ -4452,8 +4554,8 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS p.flags = pr->ndpr_stateflags; p.advrtrs = 0; LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) - p.advrtrs++; - error = SYSCTL_OUT(req, &p, sizeof (p)); + p.advrtrs++; + error = SYSCTL_OUT(req, &p, sizeof(p)); if (error != 0) { NDPR_UNLOCK(pr); break; @@ -4461,34 +4563,38 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) { s6.sin6_addr = pfr->router->rtaddr; if (in6_recoverscope(&s6, &pfr->router->rtaddr, - pfr->router->ifp) != 0) + pfr->router->ifp) != 0) { log(LOG_ERR, "scope error in prefix list (%s)\n", inet_ntop(AF_INET6, &s6.sin6_addr, - pbuf, sizeof (pbuf))); - error = SYSCTL_OUT(req, &s6, sizeof (s6)); - if (error != 0) + pbuf, sizeof(pbuf))); + } + error = SYSCTL_OUT(req, &s6, sizeof(s6)); + if (error != 0) { break; + } } NDPR_UNLOCK(pr); - if (error != 0) + if (error != 0) { break; + } } } else { struct in6_prefix_32 p; - bzero(&p, sizeof (p)); + bzero(&p, sizeof(p)); p.origin = PR_ORIG_RA; LIST_FOREACH(pr, &nd_prefix, ndpr_entry) { NDPR_LOCK(pr); p.prefix = pr->ndpr_prefix; if (in6_recoverscope(&p.prefix, - &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0) + &pr->ndpr_prefix.sin6_addr, pr->ndpr_ifp) != 0) { log(LOG_ERR, "scope error in prefix list (%s)\n", inet_ntop(AF_INET6, &p.prefix.sin6_addr, - pbuf, sizeof (pbuf))); + pbuf, sizeof(pbuf))); + } p.raflags = pr->ndpr_raf; p.prefixlen = pr->ndpr_plen; p.vltime = pr->ndpr_vltime; @@ -4499,8 +4605,8 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS p.flags = pr->ndpr_stateflags; p.advrtrs = 0; LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) - p.advrtrs++; - error = SYSCTL_OUT(req, &p, sizeof (p)); + p.advrtrs++; + error = SYSCTL_OUT(req, &p, sizeof(p)); if (error != 0) { NDPR_UNLOCK(pr); break; @@ -4508,23 +4614,26 @@ nd6_sysctl_prlist SYSCTL_HANDLER_ARGS LIST_FOREACH(pfr, &pr->ndpr_advrtrs, pfr_entry) { s6.sin6_addr = pfr->router->rtaddr; if (in6_recoverscope(&s6, &pfr->router->rtaddr, - pfr->router->ifp) != 0) + pfr->router->ifp) != 0) { log(LOG_ERR, "scope error in prefix list (%s)\n", inet_ntop(AF_INET6, &s6.sin6_addr, - pbuf, sizeof (pbuf))); - error = SYSCTL_OUT(req, &s6, sizeof (s6)); - if (error != 0) + pbuf, sizeof(pbuf))); + } + error = SYSCTL_OUT(req, &s6, sizeof(s6)); + if (error != 0) { break; + } } NDPR_UNLOCK(pr); - if (error != 0) + if (error != 0) { break; + } } } lck_mtx_unlock(nd6_mutex); - return (error); + return error; } void @@ -4536,9 +4645,10 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia) struct nd_ifinfo *ndi = NULL; ndi = ND_IFINFO(ifp); - VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); - if (!(ndi->flags & ND6_IFF_DAD)) + VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); + if (!(ndi->flags & ND6_IFF_DAD)) { return; + } if (optdad) { if ((ifp->if_eflags & IFEF_IPV6_ROUTER) != 0) { @@ -4554,16 +4664,18 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia) if (optdad) { if ((optdad & ND6_OPTIMISTIC_DAD_LINKLOCAL) && - IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) + IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr)) { flags = IN6_IFF_OPTIMISTIC; - else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) && + } else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) && (ia->ia6_flags & IN6_IFF_AUTOCONF)) { if (ia->ia6_flags & IN6_IFF_TEMPORARY) { - if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY) + if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY) { flags = IN6_IFF_OPTIMISTIC; + } } else if (ia->ia6_flags & IN6_IFF_SECURED) { - if (optdad & ND6_OPTIMISTIC_DAD_SECURED) + if (optdad & ND6_OPTIMISTIC_DAD_SECURED) { flags = IN6_IFF_OPTIMISTIC; + } } else { /* * Keeping the behavior for temp and CGA @@ -4581,8 +4693,9 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia) } else if ((optdad & ND6_OPTIMISTIC_DAD_DYNAMIC) && (ia->ia6_flags & IN6_IFF_DYNAMIC)) { if (ia->ia6_flags & IN6_IFF_TEMPORARY) { - if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY) + if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY) { flags = IN6_IFF_OPTIMISTIC; + } } else { flags = IN6_IFF_OPTIMISTIC; } @@ -4595,8 +4708,9 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia) */ if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr) && !(ia->ia6_flags & IN6_IFF_AUTOCONF) && - !(ia->ia6_flags & IN6_IFF_DYNAMIC)) + !(ia->ia6_flags & IN6_IFF_DYNAMIC)) { flags = IN6_IFF_OPTIMISTIC; + } } } @@ -4604,9 +4718,8 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia) ia->ia6_flags |= flags; nd6log2((LOG_DEBUG, "%s - %s ifp %s ia6_flags 0x%x\n", - __func__, - ip6_sprintf(&ia->ia_addr.sin6_addr), - if_name(ia->ia_ifp), - ia->ia6_flags)); + __func__, + ip6_sprintf(&ia->ia_addr.sin6_addr), + if_name(ia->ia_ifp), + ia->ia6_flags)); } - diff --git a/bsd/netinet6/nd6.h b/bsd/netinet6/nd6.h index 04c90e17a..d3bc920ee 100644 --- a/bsd/netinet6/nd6.h +++ b/bsd/netinet6/nd6.h @@ -55,13 +55,13 @@ */ #ifndef _NETINET6_ND6_H_ -#define _NETINET6_ND6_H_ +#define _NETINET6_ND6_H_ #include #include /* see net/route.h, or net/if_inarp.h */ #ifndef RTF_ANNOUNCE -#define RTF_ANNOUNCE RTF_PROTO2 +#define RTF_ANNOUNCE RTF_PROTO2 #endif #include @@ -73,34 +73,34 @@ #include #include -struct llinfo_nd6 { +struct llinfo_nd6 { /* * The following are protected by rnh_lock */ - struct llinfo_nd6 *ln_next; - struct llinfo_nd6 *ln_prev; - struct rtentry *ln_rt; + struct llinfo_nd6 *ln_next; + struct llinfo_nd6 *ln_prev; + struct rtentry *ln_rt; /* * The following are protected by rt_lock */ struct ifnet *ln_exclifp; /* excluded interface (prefix proxy) */ - struct mbuf *ln_hold; /* last packet until resolved/timeout */ - uint32_t ln_asked; /* # of queries already sent for this addr */ - short ln_state; /* reachability state */ - short ln_router; /* 2^0: ND6 router bit */ - u_int32_t ln_flags; /* flags; see below */ - u_int64_t ln_expire; /* lifetime for NDP state transition */ - u_int64_t ln_lastused; /* last used timestamp */ - struct if_llreach *ln_llreach; /* link-layer reachability record */ + struct mbuf *ln_hold; /* last packet until resolved/timeout */ + uint32_t ln_asked; /* # of queries already sent for this addr */ + short ln_state; /* reachability state */ + short ln_router; /* 2^0: ND6 router bit */ + u_int32_t ln_flags; /* flags; see below */ + u_int64_t ln_expire; /* lifetime for NDP state transition */ + u_int64_t ln_lastused; /* last used timestamp */ + struct if_llreach *ln_llreach; /* link-layer reachability record */ }; /* Values for ln_flags */ -#define ND6_LNF_TIMER_SKIP 0x1 /* modified by nd6_timer() */ -#define ND6_LNF_IN_USE 0x2 /* currently in llinfo_nd6 list */ +#define ND6_LNF_TIMER_SKIP 0x1 /* modified by nd6_timer() */ +#define ND6_LNF_IN_USE 0x2 /* currently in llinfo_nd6 list */ #endif /* BSD_KERNEL_PRIVATE */ -#define ND6_LLINFO_PURGE -3 -#define ND6_LLINFO_NOSTATE -2 +#define ND6_LLINFO_PURGE -3 +#define ND6_LLINFO_NOSTATE -2 /* * We don't need the WAITDELETE state any more, but we keep the definition * in a comment line instead of removing it. This is necessary to avoid @@ -109,49 +109,49 @@ struct llinfo_nd6 { * (20000711 jinmei@kame.net) */ /* #define ND6_LLINFO_WAITDELETE -1 */ -#define ND6_LLINFO_INCOMPLETE 0 -#define ND6_LLINFO_REACHABLE 1 -#define ND6_LLINFO_STALE 2 -#define ND6_LLINFO_DELAY 3 -#define ND6_LLINFO_PROBE 4 +#define ND6_LLINFO_INCOMPLETE 0 +#define ND6_LLINFO_REACHABLE 1 +#define ND6_LLINFO_STALE 2 +#define ND6_LLINFO_DELAY 3 +#define ND6_LLINFO_PROBE 4 #ifdef BSD_KERNEL_PRIVATE #define ND6_CACHE_STATE_TRANSITION(ln, nstate) do {\ struct rtentry *ln_rt = (ln)->ln_rt; \ if (nd6_debug >= 1) {\ - nd6log((LOG_INFO,\ - "[%s:%d]: NDP cache entry changed from %s -> %s",\ - __func__,\ - __LINE__,\ - ndcache_state2str((ln)->ln_state),\ - ndcache_state2str(nstate)));\ - if (ln_rt != NULL)\ - nd6log((LOG_INFO,\ - " for address: %s.\n",\ - ip6_sprintf(&SIN6(rt_key(ln_rt))->sin6_addr)));\ - else\ - nd6log((LOG_INFO, "\n"));\ + nd6log((LOG_INFO,\ + "[%s:%d]: NDP cache entry changed from %s -> %s",\ + __func__,\ + __LINE__,\ + ndcache_state2str((ln)->ln_state),\ + ndcache_state2str(nstate)));\ + if (ln_rt != NULL)\ + nd6log((LOG_INFO,\ + " for address: %s.\n",\ + ip6_sprintf(&SIN6(rt_key(ln_rt))->sin6_addr)));\ + else\ + nd6log((LOG_INFO, "\n"));\ }\ (ln)->ln_state = nstate;\ } while(0) -#define ND6_IS_LLINFO_PROBREACH(n) ((n)->ln_state > ND6_LLINFO_INCOMPLETE) -#define ND6_LLINFO_PERMANENT(n) \ +#define ND6_IS_LLINFO_PROBREACH(n) ((n)->ln_state > ND6_LLINFO_INCOMPLETE) +#define ND6_LLINFO_PERMANENT(n) \ (((n)->ln_expire == 0) && ((n)->ln_state > ND6_LLINFO_INCOMPLETE)) -#define ND6_EUI64_GBIT 0x01 -#define ND6_EUI64_UBIT 0x02 +#define ND6_EUI64_GBIT 0x01 +#define ND6_EUI64_UBIT 0x02 -#define ND6_EUI64_TO_IFID(in6) \ +#define ND6_EUI64_TO_IFID(in6) \ do {(in6)->s6_addr[8] ^= ND6_EUI64_UBIT; } while (0) -#define ND6_EUI64_GROUP(in6) ((in6)->s6_addr[8] & ND6_EUI64_GBIT) -#define ND6_EUI64_INDIVIDUAL(in6) (!ND6_EUI64_GROUP(in6)) -#define ND6_EUI64_LOCAL(in6) ((in6)->s6_addr[8] & ND6_EUI64_UBIT) -#define ND6_EUI64_UNIVERSAL(in6) (!ND6_EUI64_LOCAL(in6)) -#define ND6_IFID_LOCAL(in6) (!ND6_EUI64_LOCAL(in6)) -#define ND6_IFID_UNIVERSAL(in6) (!ND6_EUI64_UNIVERSAL(in6)) +#define ND6_EUI64_GROUP(in6) ((in6)->s6_addr[8] & ND6_EUI64_GBIT) +#define ND6_EUI64_INDIVIDUAL(in6) (!ND6_EUI64_GROUP(in6)) +#define ND6_EUI64_LOCAL(in6) ((in6)->s6_addr[8] & ND6_EUI64_UBIT) +#define ND6_EUI64_UNIVERSAL(in6) (!ND6_EUI64_LOCAL(in6)) +#define ND6_IFID_LOCAL(in6) (!ND6_EUI64_LOCAL(in6)) +#define ND6_IFID_UNIVERSAL(in6) (!ND6_EUI64_UNIVERSAL(in6)) #endif /* BSD_KERNEL_PRIVATE */ #if !defined(BSD_KERNEL_PRIVATE) @@ -161,53 +161,53 @@ struct nd_ifinfo { /* NOTE: nd_ifinfo is defined in nd6_var.h */ struct nd_ifinfo_compat { #endif /* !BSD_KERNEL_PRIVATE */ - u_int32_t linkmtu; /* LinkMTU */ - u_int32_t maxmtu; /* Upper bound of LinkMTU */ - u_int32_t basereachable; /* BaseReachableTime */ - u_int32_t reachable; /* Reachable Time */ - u_int32_t retrans; /* Retrans Timer */ - u_int32_t flags; /* Flags */ - int recalctm; /* BaseReacable re-calculation timer */ - u_int8_t chlim; /* CurHopLimit */ + u_int32_t linkmtu; /* LinkMTU */ + u_int32_t maxmtu; /* Upper bound of LinkMTU */ + u_int32_t basereachable; /* BaseReachableTime */ + u_int32_t reachable; /* Reachable Time */ + u_int32_t retrans; /* Retrans Timer */ + u_int32_t flags; /* Flags */ + int recalctm; /* BaseReacable re-calculation timer */ + u_int8_t chlim; /* CurHopLimit */ u_int8_t receivedra; /* the following 3 members are for privacy extension for addrconf */ u_int8_t randomseed0[8]; /* upper 64 bits of SHA1 digest */ u_int8_t randomseed1[8]; /* lower 64 bits (usually the EUI64 IFID) */ - u_int8_t randomid[8]; /* current random ID */ + u_int8_t randomid[8]; /* current random ID */ }; -#define ND6_IFF_PERFORMNUD 0x1 +#define ND6_IFF_PERFORMNUD 0x1 #if defined(PRIVATE) /* * APPLE: not used. Interface specific router advertisements are handled with a * specific ifnet flag: IFEF_ACCEPT_RTADVD */ -#define ND6_IFF_ACCEPT_RTADV 0x2 +#define ND6_IFF_ACCEPT_RTADV 0x2 /* APPLE: NOT USED not related to ND. */ -#define ND6_IFF_PREFER_SOURCE 0x4 +#define ND6_IFF_PREFER_SOURCE 0x4 /* IPv6 operation is disabled due to * DAD failure. (XXX: not ND-specific) */ -#define ND6_IFF_IFDISABLED 0x8 +#define ND6_IFF_IFDISABLED 0x8 -#define ND6_IFF_DONT_SET_IFROUTE 0x10 /* NOT USED */ +#define ND6_IFF_DONT_SET_IFROUTE 0x10 /* NOT USED */ #endif /* PRIVATE */ -#define ND6_IFF_PROXY_PREFIXES 0x20 -#define ND6_IFF_IGNORE_NA 0x40 +#define ND6_IFF_PROXY_PREFIXES 0x20 +#define ND6_IFF_IGNORE_NA 0x40 #if defined(PRIVATE) -#define ND6_IFF_INSECURE 0x80 +#define ND6_IFF_INSECURE 0x80 #endif -#define ND6_IFF_REPLICATED 0x100 /* sleep proxy registered */ -#define ND6_IFF_DAD 0x200 /* Perform DAD on the interface */ +#define ND6_IFF_REPLICATED 0x100 /* sleep proxy registered */ +#define ND6_IFF_DAD 0x200 /* Perform DAD on the interface */ struct in6_nbrinfo { - char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */ - struct in6_addr addr; /* IPv6 address of the neighbor */ - long asked; /* # of queries already sent for this addr */ - int isrouter; /* if it acts as a router */ - int state; /* reachability state */ - int expire; /* lifetime for NDP state transition */ + char ifname[IFNAMSIZ]; /* if name, e.g. "en0" */ + struct in6_addr addr; /* IPv6 address of the neighbor */ + long asked; /* # of queries already sent for this addr */ + int isrouter; /* if it acts as a router */ + int state; /* reachability state */ + int expire; /* lifetime for NDP state transition */ }; #if defined(BSD_KERNEL_PRIVATE) @@ -215,153 +215,153 @@ struct in6_nbrinfo_32 { char ifname[IFNAMSIZ]; struct in6_addr addr; u_int32_t asked; - int isrouter; - int state; - int expire; + int isrouter; + int state; + int expire; }; struct in6_nbrinfo_64 { char ifname[IFNAMSIZ]; struct in6_addr addr; - long asked; - int isrouter __attribute__((aligned(8))); - int state; - int expire; + long asked; + int isrouter __attribute__((aligned(8))); + int state; + int expire; } __attribute__((aligned(8))); #endif /* BSD_KERNEL_PRIVATE */ -#define DRLSTSIZ 10 -#define PRLSTSIZ 10 +#define DRLSTSIZ 10 +#define PRLSTSIZ 10 -struct in6_drlist { +struct in6_drlist { char ifname[IFNAMSIZ]; struct { - struct in6_addr rtaddr; - u_char flags; - u_short rtlifetime; - u_long expire; + struct in6_addr rtaddr; + u_char flags; + u_short rtlifetime; + u_long expire; u_short if_index; } defrouter[DRLSTSIZ]; }; #if defined(BSD_KERNEL_PRIVATE) -struct in6_drlist_32 { +struct in6_drlist_32 { char ifname[IFNAMSIZ]; struct { - struct in6_addr rtaddr; - u_char flags; - u_short rtlifetime; + struct in6_addr rtaddr; + u_char flags; + u_short rtlifetime; u_int32_t expire; u_short if_index; } defrouter[DRLSTSIZ]; }; -struct in6_drlist_64 { +struct in6_drlist_64 { char ifname[IFNAMSIZ]; struct { - struct in6_addr rtaddr; - u_char flags; - u_short rtlifetime; - u_long expire __attribute__((aligned(8))); - u_short if_index __attribute__((aligned(8))); + struct in6_addr rtaddr; + u_char flags; + u_short rtlifetime; + u_long expire __attribute__((aligned(8))); + u_short if_index __attribute__((aligned(8))); } defrouter[DRLSTSIZ] __attribute__((aligned(8))); }; #endif /* BSD_KERNEL_PRIVATE */ /* valid values for stateflags */ -#define NDDRF_INSTALLED 0x1 /* installed in the routing table */ -#define NDDRF_IFSCOPE 0x2 /* installed as a scoped route */ -#define NDDRF_STATIC 0x4 /* for internal use only */ -#define NDDRF_MAPPED 0x8 /* Default router addr is mapped to a different one for routing */ - -struct in6_defrouter { - struct sockaddr_in6 rtaddr; - u_char flags; - u_char stateflags; - u_short rtlifetime; - u_long expire; +#define NDDRF_INSTALLED 0x1 /* installed in the routing table */ +#define NDDRF_IFSCOPE 0x2 /* installed as a scoped route */ +#define NDDRF_STATIC 0x4 /* for internal use only */ +#define NDDRF_MAPPED 0x8 /* Default router addr is mapped to a different one for routing */ + +struct in6_defrouter { + struct sockaddr_in6 rtaddr; + u_char flags; + u_char stateflags; + u_short rtlifetime; + u_long expire; u_short if_index; }; #if defined(BSD_KERNEL_PRIVATE) -struct in6_defrouter_32 { - struct sockaddr_in6 rtaddr; - u_char flags; - u_char stateflags; - u_short rtlifetime; +struct in6_defrouter_32 { + struct sockaddr_in6 rtaddr; + u_char flags; + u_char stateflags; + u_short rtlifetime; u_int32_t expire; u_short if_index; }; -struct in6_defrouter_64 { - struct sockaddr_in6 rtaddr; - u_char flags; - u_char stateflags; - u_short rtlifetime; - u_long expire __attribute__((aligned(8))); - u_short if_index __attribute__((aligned(8))); +struct in6_defrouter_64 { + struct sockaddr_in6 rtaddr; + u_char flags; + u_char stateflags; + u_short rtlifetime; + u_long expire __attribute__((aligned(8))); + u_short if_index __attribute__((aligned(8))); } __attribute__((aligned(8))); #endif /* BSD_KERNEL_PRIVATE */ -struct in6_prlist { +struct in6_prlist { char ifname[IFNAMSIZ]; struct { - struct in6_addr prefix; + struct in6_addr prefix; struct prf_ra raflags; - u_char prefixlen; - u_char origin; - u_long vltime; - u_long pltime; - u_long expire; + u_char prefixlen; + u_char origin; + u_long vltime; + u_long pltime; + u_long expire; u_short if_index; u_short advrtrs; /* number of advertisement routers */ - struct in6_addr advrtr[DRLSTSIZ]; /* XXX: explicit limit */ + struct in6_addr advrtr[DRLSTSIZ]; /* XXX: explicit limit */ } prefix[PRLSTSIZ]; }; #if defined(BSD_KERNEL_PRIVATE) -struct in6_prlist_32 { +struct in6_prlist_32 { char ifname[IFNAMSIZ]; struct { - struct in6_addr prefix; + struct in6_addr prefix; struct prf_ra raflags; - u_char prefixlen; - u_char origin; + u_char prefixlen; + u_char origin; u_int32_t vltime; u_int32_t pltime; u_int32_t expire; u_short if_index; u_short advrtrs; - struct in6_addr advrtr[DRLSTSIZ]; + struct in6_addr advrtr[DRLSTSIZ]; } prefix[PRLSTSIZ]; }; -struct in6_prlist_64 { +struct in6_prlist_64 { char ifname[IFNAMSIZ]; struct { - struct in6_addr prefix; + struct in6_addr prefix; struct prf_ra raflags; - u_char prefixlen; - u_char origin; - u_long vltime __attribute__((aligned(8))); - u_long pltime __attribute__((aligned(8))); - u_long expire __attribute__((aligned(8))); + u_char prefixlen; + u_char origin; + u_long vltime __attribute__((aligned(8))); + u_long pltime __attribute__((aligned(8))); + u_long expire __attribute__((aligned(8))); u_short if_index; u_short advrtrs; u_int32_t pad; - struct in6_addr advrtr[DRLSTSIZ]; + struct in6_addr advrtr[DRLSTSIZ]; } prefix[PRLSTSIZ]; }; #endif /* BSD_KERNEL_PRIVATE */ struct in6_prefix { - struct sockaddr_in6 prefix; + struct sockaddr_in6 prefix; struct prf_ra raflags; - u_char prefixlen; - u_char origin; - u_long vltime; - u_long pltime; - u_long expire; + u_char prefixlen; + u_char origin; + u_long vltime; + u_long pltime; + u_long expire; u_int32_t flags; int refcnt; u_short if_index; @@ -371,10 +371,10 @@ struct in6_prefix { #if defined(BSD_KERNEL_PRIVATE) struct in6_prefix_32 { - struct sockaddr_in6 prefix; + struct sockaddr_in6 prefix; struct prf_ra raflags; - u_char prefixlen; - u_char origin; + u_char prefixlen; + u_char origin; u_int32_t vltime; u_int32_t pltime; u_int32_t expire; @@ -386,14 +386,14 @@ struct in6_prefix_32 { }; struct in6_prefix_64 { - struct sockaddr_in6 prefix; + struct sockaddr_in6 prefix; struct prf_ra raflags; - u_char prefixlen; - u_char origin; - u_long vltime __attribute__((aligned(8))); - u_long pltime __attribute__((aligned(8))); - u_long expire __attribute__((aligned(8))); - u_int32_t flags __attribute__((aligned(8))); + u_char prefixlen; + u_char origin; + u_long vltime __attribute__((aligned(8))); + u_long pltime __attribute__((aligned(8))); + u_long expire __attribute__((aligned(8))); + u_int32_t flags __attribute__((aligned(8))); int refcnt; u_short if_index; u_short advrtrs; @@ -401,78 +401,78 @@ struct in6_prefix_64 { }; #endif /* BSD_KERNEL_PRIVATE */ -struct in6_ondireq { +struct in6_ondireq { char ifname[IFNAMSIZ]; struct { - u_int32_t linkmtu; /* LinkMTU */ - u_int32_t maxmtu; /* Upper bound of LinkMTU */ + u_int32_t linkmtu; /* LinkMTU */ + u_int32_t maxmtu; /* Upper bound of LinkMTU */ u_int32_t basereachable; /* BaseReachableTime */ - u_int32_t reachable; /* Reachable Time */ - u_int32_t retrans; /* Retrans Timer */ - u_int32_t flags; /* Flags */ - int recalctm; /* BaseReacable re-calculation timer */ - u_int8_t chlim; /* CurHopLimit */ + u_int32_t reachable; /* Reachable Time */ + u_int32_t retrans; /* Retrans Timer */ + u_int32_t flags; /* Flags */ + int recalctm; /* BaseReacable re-calculation timer */ + u_int8_t chlim; /* CurHopLimit */ u_int8_t receivedra; } ndi; }; #if !defined(BSD_KERNEL_PRIVATE) -struct in6_ndireq { +struct in6_ndireq { char ifname[IFNAMSIZ]; struct nd_ifinfo ndi; }; #else -struct in6_ndireq { +struct in6_ndireq { char ifname[IFNAMSIZ]; struct nd_ifinfo_compat ndi; }; #endif /* !BSD_KERNEL_PRIVATE */ -struct in6_ndifreq { +struct in6_ndifreq { char ifname[IFNAMSIZ]; u_long ifindex; }; -#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */ -#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */ +#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */ +#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */ #if defined(BSD_KERNEL_PRIVATE) -struct in6_ndifreq_32 { +struct in6_ndifreq_32 { char ifname[IFNAMSIZ]; u_int32_t ifindex; }; -struct in6_ndifreq_64 { +struct in6_ndifreq_64 { char ifname[IFNAMSIZ]; - u_long ifindex __attribute__((aligned(8))); + u_long ifindex __attribute__((aligned(8))); }; #endif /* BSD_KERNEL_PRIVATE */ /* Prefix status */ -#define NDPRF_ONLINK 0x1 -#define NDPRF_DETACHED 0x2 -#define NDPRF_STATIC 0x100 -#define NDPRF_IFSCOPE 0x1000 -#define NDPRF_PRPROXY 0x2000 +#define NDPRF_ONLINK 0x1 +#define NDPRF_DETACHED 0x2 +#define NDPRF_STATIC 0x100 +#define NDPRF_IFSCOPE 0x1000 +#define NDPRF_PRPROXY 0x2000 #ifdef BSD_KERNEL_PRIVATE -#define NDPRF_PROCESSED_ONLINK 0x08000 -#define NDPRF_PROCESSED_SERVICE 0x10000 -#define NDPRF_DEFUNCT 0x20000 -#define NDPRF_CLAT46 0x40000 +#define NDPRF_PROCESSED_ONLINK 0x08000 +#define NDPRF_PROCESSED_SERVICE 0x10000 +#define NDPRF_DEFUNCT 0x20000 +#define NDPRF_CLAT46 0x40000 #endif /* protocol constants */ -#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */ -#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */ -#define MAX_RTR_SOLICITATIONS 3 +#define MAX_RTR_SOLICITATION_DELAY 1 /* 1sec */ +#define RTR_SOLICITATION_INTERVAL 4 /* 4sec */ +#define MAX_RTR_SOLICITATIONS 3 -#define ND6_INFINITE_LIFETIME 0xffffffff -#define ND6_MAX_LIFETIME 0x7fffffff +#define ND6_INFINITE_LIFETIME 0xffffffff +#define ND6_MAX_LIFETIME 0x7fffffff #ifdef BSD_KERNEL_PRIVATE -#define ND_IFINFO(ifp) \ - ((ifp == NULL) ? NULL : \ - ((IN6_IFEXTRA(ifp) == NULL) ? NULL : \ +#define ND_IFINFO(ifp) \ + ((ifp == NULL) ? NULL : \ + ((IN6_IFEXTRA(ifp) == NULL) ? NULL : \ (&IN6_IFEXTRA(ifp)->nd_ifinfo))) /* @@ -489,82 +489,82 @@ struct in6_ndifreq_64 { * else * linkmtu = ifp->if_mtu; */ -#define IN6_LINKMTU(ifp) \ - (ifp == NULL ? IPV6_MMTU : \ - (ND_IFINFO(ifp) == NULL || !ND_IFINFO(ifp)->initialized) ? \ - (ifp)->if_mtu : ((ND_IFINFO(ifp)->linkmtu && \ +#define IN6_LINKMTU(ifp) \ + (ifp == NULL ? IPV6_MMTU : \ + (ND_IFINFO(ifp) == NULL || !ND_IFINFO(ifp)->initialized) ? \ + (ifp)->if_mtu : ((ND_IFINFO(ifp)->linkmtu && \ ND_IFINFO(ifp)->linkmtu < (ifp)->if_mtu) ? ND_IFINFO(ifp)->linkmtu : \ ((ND_IFINFO(ifp)->maxmtu && ND_IFINFO(ifp)->maxmtu < (ifp)->if_mtu) ? \ ND_IFINFO(ifp)->maxmtu : (ifp)->if_mtu))) /* node constants */ -#define MAX_REACHABLE_TIME 3600000 /* msec */ -#define REACHABLE_TIME 30000 /* msec */ -#define RETRANS_TIMER 1000 /* msec */ -#define MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */ -#define MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */ -#define DEF_TEMP_VALID_LIFETIME 604800 /* 1 week */ -#define DEF_TEMP_PREFERRED_LIFETIME 86400 /* 1 day */ -#define TEMPADDR_REGEN_ADVANCE 5 /* sec */ -#define MAX_TEMP_DESYNC_FACTOR 600 /* 10 min */ -#define ND_COMPUTE_RTIME(x) \ - (((MIN_RANDOM_FACTOR * (x >> 10)) + (RandomULong() & \ - ((MAX_RANDOM_FACTOR - MIN_RANDOM_FACTOR) * (x >> 10)))) /1000) +#define MAX_REACHABLE_TIME 3600000 /* msec */ +#define REACHABLE_TIME 30000 /* msec */ +#define RETRANS_TIMER 1000 /* msec */ +#define MIN_RANDOM_FACTOR 512 /* 1024 * 0.5 */ +#define MAX_RANDOM_FACTOR 1536 /* 1024 * 1.5 */ +#define DEF_TEMP_VALID_LIFETIME 604800 /* 1 week */ +#define DEF_TEMP_PREFERRED_LIFETIME 86400 /* 1 day */ +#define TEMPADDR_REGEN_ADVANCE 5 /* sec */ +#define MAX_TEMP_DESYNC_FACTOR 600 /* 10 min */ +#define ND_COMPUTE_RTIME(x) \ + (((MIN_RANDOM_FACTOR * (x >> 10)) + (RandomULong() & \ + ((MAX_RANDOM_FACTOR - MIN_RANDOM_FACTOR) * (x >> 10)))) /1000) /* prefix expiry times */ -#define ND6_PREFIX_EXPIRY_UNSPEC -1 -#define ND6_PREFIX_EXPIRY_NEVER 0 +#define ND6_PREFIX_EXPIRY_UNSPEC -1 +#define ND6_PREFIX_EXPIRY_NEVER 0 TAILQ_HEAD(nd_drhead, nd_defrouter); -struct nd_defrouter { +struct nd_defrouter { decl_lck_mtx_data(, nddr_lock); TAILQ_ENTRY(nd_defrouter) dr_entry; - struct in6_addr rtaddr; - u_int32_t nddr_refcount; - u_int32_t nddr_debug; - u_int64_t expire; - u_int64_t base_calendartime; /* calendar time at creation */ - u_int64_t base_uptime; /* uptime at creation */ - u_char flags; /* flags on RA message */ - u_char stateflags; - u_short rtlifetime; - int err; - struct ifnet *ifp; - struct in6_addr rtaddr_mapped; /* Mapped gateway address for routing */ + struct in6_addr rtaddr; + u_int32_t nddr_refcount; + u_int32_t nddr_debug; + u_int64_t expire; + u_int64_t base_calendartime; /* calendar time at creation */ + u_int64_t base_uptime; /* uptime at creation */ + u_char flags; /* flags on RA message */ + u_char stateflags; + u_short rtlifetime; + int err; + struct ifnet *ifp; + struct in6_addr rtaddr_mapped; /* Mapped gateway address for routing */ void (*nddr_trace)(struct nd_defrouter *, int); /* trace callback fn */ }; -#define NDDR_LOCK_ASSERT_HELD(_nddr) \ +#define NDDR_LOCK_ASSERT_HELD(_nddr) \ LCK_MTX_ASSERT(&(_nddr)->nddr_lock, LCK_MTX_ASSERT_OWNED) -#define NDDR_LOCK_ASSERT_NOTHELD(_nddr) \ +#define NDDR_LOCK_ASSERT_NOTHELD(_nddr) \ LCK_MTX_ASSERT(&(_nddr)->nddr_lock, LCK_MTX_ASSERT_NOTOWNED) -#define NDDR_LOCK(_nddr) \ +#define NDDR_LOCK(_nddr) \ lck_mtx_lock(&(_nddr)->nddr_lock) -#define NDDR_LOCK_SPIN(_nddr) \ +#define NDDR_LOCK_SPIN(_nddr) \ lck_mtx_lock_spin(&(_nddr)->nddr_lock) -#define NDDR_CONVERT_LOCK(_nddr) do { \ - NDPR_LOCK_ASSERT_HELD(_nddr); \ - lck_mtx_convert_spin(&(_nddr)->nddr_lock); \ +#define NDDR_CONVERT_LOCK(_nddr) do { \ + NDPR_LOCK_ASSERT_HELD(_nddr); \ + lck_mtx_convert_spin(&(_nddr)->nddr_lock); \ } while (0) -#define NDDR_UNLOCK(_nddr) \ +#define NDDR_UNLOCK(_nddr) \ lck_mtx_unlock(&(_nddr)->nddr_lock) -#define NDDR_ADDREF(_nddr) \ +#define NDDR_ADDREF(_nddr) \ nddr_addref(_nddr, 0) -#define NDDR_ADDREF_LOCKED(_nddr) \ +#define NDDR_ADDREF_LOCKED(_nddr) \ nddr_addref(_nddr, 1) -#define NDDR_REMREF(_nddr) do { \ - (void) nddr_remref(_nddr, 0); \ +#define NDDR_REMREF(_nddr) do { \ + (void) nddr_remref(_nddr, 0); \ } while (0) -#define NDDR_REMREF_LOCKED(_nddr) \ +#define NDDR_REMREF_LOCKED(_nddr) \ nddr_remref(_nddr, 1) /* define struct prproxy_sols_tree */ @@ -572,77 +572,77 @@ RB_HEAD(prproxy_sols_tree, nd6_prproxy_soltgt); struct nd_prefix { decl_lck_mtx_data(, ndpr_lock); - u_int32_t ndpr_refcount; /* reference count */ - u_int32_t ndpr_debug; /* see ifa_debug flags */ - struct ifnet *ndpr_ifp; - struct rtentry *ndpr_rt; + u_int32_t ndpr_refcount; /* reference count */ + u_int32_t ndpr_debug; /* see ifa_debug flags */ + struct ifnet *ndpr_ifp; + struct rtentry *ndpr_rt; LIST_ENTRY(nd_prefix) ndpr_entry; struct sockaddr_in6 ndpr_prefix; /* prefix */ struct in6_addr ndpr_mask; /* netmask derived from the prefix */ struct in6_addr ndpr_addr; /* address that is derived from the prefix */ - u_int32_t ndpr_vltime; /* advertised valid lifetime */ - u_int32_t ndpr_pltime; /* advertised preferred lifetime */ - u_int64_t ndpr_preferred; /* preferred time of the prefix */ - u_int64_t ndpr_expire; /* expiration time of the prefix */ - u_int64_t ndpr_lastupdate; /* rx time of last advertisement */ - u_int64_t ndpr_base_calendartime; /* calendar time at creation */ - u_int64_t ndpr_base_uptime; /* uptime at creation */ - struct prf_ra ndpr_flags; - unsigned int ndpr_genid; /* protects ndpr_advrtrs */ - u_int32_t ndpr_stateflags; /* actual state flags */ + u_int32_t ndpr_vltime; /* advertised valid lifetime */ + u_int32_t ndpr_pltime; /* advertised preferred lifetime */ + u_int64_t ndpr_preferred; /* preferred time of the prefix */ + u_int64_t ndpr_expire; /* expiration time of the prefix */ + u_int64_t ndpr_lastupdate; /* rx time of last advertisement */ + u_int64_t ndpr_base_calendartime; /* calendar time at creation */ + u_int64_t ndpr_base_uptime; /* uptime at creation */ + struct prf_ra ndpr_flags; + unsigned int ndpr_genid; /* protects ndpr_advrtrs */ + u_int32_t ndpr_stateflags; /* actual state flags */ /* list of routers that advertise the prefix: */ LIST_HEAD(pr_rtrhead, nd_pfxrouter) ndpr_advrtrs; - u_char ndpr_plen; - int ndpr_addrcnt; /* reference counter from addresses */ - u_int32_t ndpr_allmulti_cnt; /* total all-multi reqs */ - u_int32_t ndpr_prproxy_sols_cnt; /* total # of proxied NS */ + u_char ndpr_plen; + int ndpr_addrcnt; /* reference counter from addresses */ + u_int32_t ndpr_allmulti_cnt; /* total all-multi reqs */ + u_int32_t ndpr_prproxy_sols_cnt; /* total # of proxied NS */ struct prproxy_sols_tree ndpr_prproxy_sols; /* tree of proxied NS */ void (*ndpr_trace)(struct nd_prefix *, int); /* trace callback fn */ }; -#define ndpr_next ndpr_entry.le_next +#define ndpr_next ndpr_entry.le_next -#define ndpr_raf ndpr_flags -#define ndpr_raf_onlink ndpr_flags.onlink -#define ndpr_raf_auto ndpr_flags.autonomous -#define ndpr_raf_router ndpr_flags.router +#define ndpr_raf ndpr_flags +#define ndpr_raf_onlink ndpr_flags.onlink +#define ndpr_raf_auto ndpr_flags.autonomous +#define ndpr_raf_router ndpr_flags.router /* * We keep expired prefix for certain amount of time, for validation purposes. * 1800s = MaxRtrAdvInterval */ -#define NDPR_KEEP_EXPIRED (1800 * 2) +#define NDPR_KEEP_EXPIRED (1800 * 2) -#define NDPR_LOCK_ASSERT_HELD(_ndpr) \ +#define NDPR_LOCK_ASSERT_HELD(_ndpr) \ LCK_MTX_ASSERT(&(_ndpr)->ndpr_lock, LCK_MTX_ASSERT_OWNED) -#define NDPR_LOCK_ASSERT_NOTHELD(_ndpr) \ +#define NDPR_LOCK_ASSERT_NOTHELD(_ndpr) \ LCK_MTX_ASSERT(&(_ndpr)->ndpr_lock, LCK_MTX_ASSERT_NOTOWNED) -#define NDPR_LOCK(_ndpr) \ +#define NDPR_LOCK(_ndpr) \ lck_mtx_lock(&(_ndpr)->ndpr_lock) -#define NDPR_LOCK_SPIN(_ndpr) \ +#define NDPR_LOCK_SPIN(_ndpr) \ lck_mtx_lock_spin(&(_ndpr)->ndpr_lock) -#define NDPR_CONVERT_LOCK(_ndpr) do { \ - NDPR_LOCK_ASSERT_HELD(_ndpr); \ - lck_mtx_convert_spin(&(_ndpr)->ndpr_lock); \ +#define NDPR_CONVERT_LOCK(_ndpr) do { \ + NDPR_LOCK_ASSERT_HELD(_ndpr); \ + lck_mtx_convert_spin(&(_ndpr)->ndpr_lock); \ } while (0) -#define NDPR_UNLOCK(_ndpr) \ +#define NDPR_UNLOCK(_ndpr) \ lck_mtx_unlock(&(_ndpr)->ndpr_lock) -#define NDPR_ADDREF(_ndpr) \ +#define NDPR_ADDREF(_ndpr) \ ndpr_addref(_ndpr, 0) -#define NDPR_ADDREF_LOCKED(_ndpr) \ +#define NDPR_ADDREF_LOCKED(_ndpr) \ ndpr_addref(_ndpr, 1) -#define NDPR_REMREF(_ndpr) do { \ - (void) ndpr_remref(_ndpr, 0); \ +#define NDPR_REMREF(_ndpr) do { \ + (void) ndpr_remref(_ndpr, 0); \ } while (0) -#define NDPR_REMREF_LOCKED(_ndpr) \ +#define NDPR_REMREF_LOCKED(_ndpr) \ ndpr_remref(_ndpr, 1) /* @@ -650,30 +650,30 @@ struct nd_prefix { * from inet6 sysctl function */ struct inet6_ndpr_msghdr { - u_short inpm_msglen; /* to skip over non-understood messages */ - u_char inpm_version; /* future binary compatibility */ - u_char inpm_type; /* message type */ + u_short inpm_msglen; /* to skip over non-understood messages */ + u_char inpm_version; /* future binary compatibility */ + u_char inpm_type; /* message type */ struct in6_addr inpm_prefix; - u_int32_t prm_vltim; - u_int32_t prm_pltime; - u_int32_t prm_expire; - u_int32_t prm_preferred; + u_int32_t prm_vltim; + u_int32_t prm_pltime; + u_int32_t prm_expire; + u_int32_t prm_preferred; struct in6_prflags prm_flags; - u_short prm_index; /* index for associated ifp */ - u_char prm_plen; /* length of prefix in bits */ + u_short prm_index; /* index for associated ifp */ + u_char prm_plen; /* length of prefix in bits */ }; -#define prm_raf_onlink prm_flags.prf_ra.onlink -#define prm_raf_auto prm_flags.prf_ra.autonomous +#define prm_raf_onlink prm_flags.prf_ra.onlink +#define prm_raf_auto prm_flags.prf_ra.autonomous -#define prm_statef_onlink prm_flags.prf_state.onlink +#define prm_statef_onlink prm_flags.prf_state.onlink -#define prm_rrf_decrvalid prm_flags.prf_rr.decrvalid -#define prm_rrf_decrprefd prm_flags.prf_rr.decrprefd +#define prm_rrf_decrvalid prm_flags.prf_rr.decrvalid +#define prm_rrf_decrprefd prm_flags.prf_rr.decrprefd struct nd_pfxrouter { LIST_ENTRY(nd_pfxrouter) pfr_entry; -#define pfr_next pfr_entry.le_next +#define pfr_next pfr_entry.le_next struct nd_defrouter *router; }; @@ -709,8 +709,8 @@ struct nd6_ra_prefix { }; /* ND6 router advertisement valid bits */ -#define KEV_ND6_DATA_VALID_MTU (0x1 << 0) -#define KEV_ND6_DATA_VALID_PREFIX (0x1 << 1) +#define KEV_ND6_DATA_VALID_MTU (0x1 << 0) +#define KEV_ND6_DATA_VALID_PREFIX (0x1 << 1) struct kev_nd6_ra_data { u_int32_t mtu; @@ -758,27 +758,27 @@ extern int nd6_debug; extern int nd6_onlink_ns_rfc4861; extern int nd6_optimistic_dad; -#define nd6log0(x) do { log x; } while (0) -#define nd6log(x) do { if (nd6_debug >= 1) log x; } while (0) -#define nd6log2(x) do { if (nd6_debug >= 2) log x; } while (0) +#define nd6log0(x) do { log x; } while (0) +#define nd6log(x) do { if (nd6_debug >= 1) log x; } while (0) +#define nd6log2(x) do { if (nd6_debug >= 2) log x; } while (0) -#define ND6_OPTIMISTIC_DAD_LINKLOCAL (1 << 0) -#define ND6_OPTIMISTIC_DAD_AUTOCONF (1 << 1) -#define ND6_OPTIMISTIC_DAD_TEMPORARY (1 << 2) -#define ND6_OPTIMISTIC_DAD_DYNAMIC (1 << 3) -#define ND6_OPTIMISTIC_DAD_SECURED (1 << 4) -#define ND6_OPTIMISTIC_DAD_MANUAL (1 << 5) +#define ND6_OPTIMISTIC_DAD_LINKLOCAL (1 << 0) +#define ND6_OPTIMISTIC_DAD_AUTOCONF (1 << 1) +#define ND6_OPTIMISTIC_DAD_TEMPORARY (1 << 2) +#define ND6_OPTIMISTIC_DAD_DYNAMIC (1 << 3) +#define ND6_OPTIMISTIC_DAD_SECURED (1 << 4) +#define ND6_OPTIMISTIC_DAD_MANUAL (1 << 5) /* nd6_rtr.c */ extern int nd6_defifindex; -extern int ip6_desync_factor; /* seconds */ +extern int ip6_desync_factor; /* seconds */ /* ND6_INFINITE_LIFETIME does not apply to temporary addresses */ extern u_int32_t ip6_temp_preferred_lifetime; /* seconds */ extern u_int32_t ip6_temp_valid_lifetime; /* seconds */ extern int ip6_temp_regen_advance; /* seconds */ union nd_opts { - struct nd_opt_hdr *nd_opt_array[16]; /* max = target address list */ + struct nd_opt_hdr *nd_opt_array[16]; /* max = target address list */ struct { struct nd_opt_hdr *zero; struct nd_opt_hdr *src_lladdr; @@ -796,22 +796,22 @@ union nd_opts { struct nd_opt_hdr *__res13; struct nd_opt_nonce *nonce; struct nd_opt_hdr *__res15; - struct nd_opt_hdr *search; /* multiple opts */ - struct nd_opt_hdr *last; /* multiple opts */ + struct nd_opt_hdr *search; /* multiple opts */ + struct nd_opt_hdr *last; /* multiple opts */ int done; struct nd_opt_prefix_info *pi_end; /* multiple opts, end */ } nd_opt_each; }; -#define nd_opts_src_lladdr nd_opt_each.src_lladdr -#define nd_opts_tgt_lladdr nd_opt_each.tgt_lladdr -#define nd_opts_pi nd_opt_each.pi_beg -#define nd_opts_pi_end nd_opt_each.pi_end -#define nd_opts_rh nd_opt_each.rh -#define nd_opts_mtu nd_opt_each.mtu -#define nd_opts_nonce nd_opt_each.nonce -#define nd_opts_search nd_opt_each.search -#define nd_opts_last nd_opt_each.last -#define nd_opts_done nd_opt_each.done +#define nd_opts_src_lladdr nd_opt_each.src_lladdr +#define nd_opts_tgt_lladdr nd_opt_each.tgt_lladdr +#define nd_opts_pi nd_opt_each.pi_beg +#define nd_opts_pi_end nd_opt_each.pi_end +#define nd_opts_rh nd_opt_each.rh +#define nd_opts_mtu nd_opt_each.mtu +#define nd_opts_nonce nd_opt_each.nonce +#define nd_opts_search nd_opt_each.search +#define nd_opts_last nd_opt_each.last +#define nd_opts_done nd_opt_each.done /* XXX: need nd6_var.h?? */ /* nd6.c */ @@ -971,8 +971,8 @@ extern errno_t nd6_lookup_ipv6(ifnet_t interface, */ extern int nd6_send_opstate; -#define ND6_SEND_OPMODE_DISABLED 0 -#define ND6_SEND_OPMODE_CGA_QUIET 1 +#define ND6_SEND_OPMODE_DISABLED 0 +#define ND6_SEND_OPMODE_CGA_QUIET 1 #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET6_ND6_H_ */ diff --git a/bsd/netinet6/nd6_nbr.c b/bsd/netinet6/nd6_nbr.c index 4ef91c84c..9adb4feae 100644 --- a/bsd/netinet6/nd6_nbr.c +++ b/bsd/netinet6/nd6_nbr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2018 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -113,47 +113,47 @@ static void dad_remref(struct dadq *); static struct dadq *nd6_dad_attach(struct dadq *, struct ifaddr *); static void nd6_dad_detach(struct dadq *, struct ifaddr *); -static int dad_maxtry = 15; /* max # of *tries* to transmit DAD packet */ +static int dad_maxtry = 15; /* max # of *tries* to transmit DAD packet */ -static unsigned int dad_size; /* size of zone element */ -static struct zone *dad_zone; /* zone for dadq */ +static unsigned int dad_size; /* size of zone element */ +static struct zone *dad_zone; /* zone for dadq */ -#define DAD_ZONE_MAX 64 /* maximum elements in zone */ -#define DAD_ZONE_NAME "nd6_dad" /* zone name */ +#define DAD_ZONE_MAX 64 /* maximum elements in zone */ +#define DAD_ZONE_NAME "nd6_dad" /* zone name */ -#define DAD_LOCK_ASSERT_HELD(_dp) \ +#define DAD_LOCK_ASSERT_HELD(_dp) \ LCK_MTX_ASSERT(&(_dp)->dad_lock, LCK_MTX_ASSERT_OWNED) -#define DAD_LOCK_ASSERT_NOTHELD(_dp) \ +#define DAD_LOCK_ASSERT_NOTHELD(_dp) \ LCK_MTX_ASSERT(&(_dp)->dad_lock, LCK_MTX_ASSERT_NOTOWNED) -#define DAD_LOCK(_dp) \ +#define DAD_LOCK(_dp) \ lck_mtx_lock(&(_dp)->dad_lock) -#define DAD_LOCK_SPIN(_dp) \ +#define DAD_LOCK_SPIN(_dp) \ lck_mtx_lock_spin(&(_dp)->dad_lock) -#define DAD_CONVERT_LOCK(_dp) do { \ - DAD_LOCK_ASSERT_HELD(_dp); \ - lck_mtx_convert_spin(&(_dp)->dad_lock); \ +#define DAD_CONVERT_LOCK(_dp) do { \ + DAD_LOCK_ASSERT_HELD(_dp); \ + lck_mtx_convert_spin(&(_dp)->dad_lock); \ } while (0) -#define DAD_UNLOCK(_dp) \ +#define DAD_UNLOCK(_dp) \ lck_mtx_unlock(&(_dp)->dad_lock) -#define DAD_ADDREF(_dp) \ +#define DAD_ADDREF(_dp) \ dad_addref(_dp, 0) -#define DAD_ADDREF_LOCKED(_dp) \ +#define DAD_ADDREF_LOCKED(_dp) \ dad_addref(_dp, 1) -#define DAD_REMREF(_dp) \ +#define DAD_REMREF(_dp) \ dad_remref(_dp) extern lck_mtx_t *dad6_mutex; extern lck_mtx_t *nd6_mutex; -static int nd6_llreach_base = 30; /* seconds */ +static int nd6_llreach_base = 30; /* seconds */ static struct sockaddr_in6 hostrtmask; @@ -182,7 +182,7 @@ nd6_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, if (nd6_llreach_base != 0 && (ln->ln_expire != 0 || (ifp->if_eflags & IFEF_IPV6_ND6ALT) != 0) && !(rt->rt_ifp->if_flags & IFF_LOOPBACK) && - ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */ + ifp->if_addrlen == IF_LLREACH_MAXLEN && /* Ethernet */ alen == ifp->if_addrlen) { struct if_llreach *lr; const char *why = NULL, *type = ""; @@ -207,7 +207,7 @@ nd6_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, why = " for different target HW address; " "using new llreach record"; } else { - lr->lr_probes = 0; /* reset probe count */ + lr->lr_probes = 0; /* reset probe count */ IFLR_UNLOCK(lr); if (solicited) { why = " for same target HW address; " @@ -220,9 +220,10 @@ nd6_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, lr = ln->ln_llreach = ifnet_llreach_alloc(ifp, ETHERTYPE_IPV6, addr, alen, nd6_llreach_base); if (lr != NULL) { - lr->lr_probes = 0; /* reset probe count */ - if (why == NULL) + lr->lr_probes = 0; /* reset probe count */ + if (why == NULL) { why = "creating new llreach record"; + } } } @@ -231,7 +232,7 @@ nd6_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, nd6log((LOG_DEBUG, "%s: %s%s for %s\n", if_name(ifp), type, why, inet_ntop(AF_INET6, - &SIN6(rt_key(rt))->sin6_addr, tmp, sizeof (tmp)))); + &SIN6(rt_key(rt))->sin6_addr, tmp, sizeof(tmp)))); } } } @@ -239,8 +240,9 @@ nd6_llreach_alloc(struct rtentry *rt, struct ifnet *ifp, void *addr, void nd6_llreach_use(struct llinfo_nd6 *ln) { - if (ln->ln_llreach != NULL) + if (ln->ln_llreach != NULL) { ln->ln_lastused = net_uptime(); + } } /* @@ -276,14 +278,15 @@ nd6_ns_input( /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); - IP6_EXTHDR_CHECK(m, off, icmp6len, return); + IP6_EXTHDR_CHECK(m, off, icmp6len, return ); nd_ns = (struct nd_neighbor_solicit *)((caddr_t)ip6 + off); m->m_pkthdr.pkt_flags |= PKTF_INET6_RESOLVE; ip6 = mtod(m, struct ip6_hdr *); /* adjust pointer for safety */ taddr6 = nd_ns->nd_ns_target; - if (in6_setscope(&taddr6, ifp, NULL) != 0) + if (in6_setscope(&taddr6, ifp, NULL) != 0) { goto bad; + } if (ip6->ip6_hlim != IPV6_MAXHLIM) { nd6log((LOG_ERR, @@ -304,7 +307,7 @@ nd6_ns_input( ; /* good */ } else { nd6log((LOG_INFO, "nd6_ns_input: bad DAD packet " - "(wrong ip6 dst)\n")); + "(wrong ip6 dst)\n")); goto bad; } } else if (!nd6_onlink_ns_rfc4861) { @@ -322,7 +325,7 @@ nd6_ns_input( src_sa6.sin6_addr = saddr6; if (!nd6_is_addr_neighbor(&src_sa6, ifp, 0)) { nd6log((LOG_INFO, "nd6_ns_input: " - "NS packet from non-neighbor\n")); + "NS packet from non-neighbor\n")); goto bad; } } @@ -362,10 +365,11 @@ nd6_ns_input( * In implementation, we add target link-layer address by default. * We do not add one in MUST NOT cases. */ - if (!IN6_IS_ADDR_MULTICAST(&daddr6)) + if (!IN6_IS_ADDR_MULTICAST(&daddr6)) { tlladdr = 0; - else + } else { tlladdr = 1; + } /* * Target address (taddr6) must be either: @@ -397,7 +401,7 @@ nd6_ns_input( * proxy NDP for single entry */ ifa = (struct ifaddr *)in6ifa_ifpforlinklocal( - ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); + ifp, IN6_IFF_NOTREADY | IN6_IFF_ANYCAST); if (ifa) { proxy = 1; proxydl = *SDL(rt->rt_gateway); @@ -422,11 +426,12 @@ nd6_ns_input( * * Forwarding associated with NDPRF_PRPROXY may apply. */ - if (ip6_forwarding && nd6_prproxy) + if (ip6_forwarding && nd6_prproxy) { nd6_prproxy_ns_input(ifp, &saddr6, lladdr, lladdrlen, &daddr6, &taddr6, (ndopts.nd_opts_nonce == NULL) ? NULL : ndopts.nd_opts_nonce->nd_opt_nonce); + } goto freeit; } IFA_LOCK(ifa); @@ -444,14 +449,14 @@ nd6_ns_input( nd6log((LOG_INFO, "nd6_ns_input: lladdrlen mismatch for %s " "(if %d, NS packet %d)\n", - ip6_sprintf(&taddr6), ifp->if_addrlen, lladdrlen - 2)); + ip6_sprintf(&taddr6), ifp->if_addrlen, lladdrlen - 2)); goto bad; } if (IN6_ARE_ADDR_EQUAL(&myaddr6, &saddr6)) { nd6log((LOG_INFO, - "nd6_ns_input: duplicate IP6 address %s\n", - ip6_sprintf(&saddr6))); + "nd6_ns_input: duplicate IP6 address %s\n", + ip6_sprintf(&saddr6))); goto freeit; } @@ -484,8 +489,9 @@ nd6_ns_input( if (!is_dad_probe && (dadprogress & IN6_IFF_OPTIMISTIC) != 0) { oflgclr = 1; } else { - if (is_dad_probe) + if (is_dad_probe) { nd6_dad_ns_input(ifa, lladdr, lladdrlen, ndopts.nd_opts_nonce); + } goto freeit; } @@ -505,14 +511,16 @@ nd6_ns_input( */ if (is_dad_probe) { saddr6 = in6addr_linklocal_allnodes; - if (in6_setscope(&saddr6, ifp, NULL) != 0) + if (in6_setscope(&saddr6, ifp, NULL) != 0) { goto bad; - if ((dadprogress & IN6_IFF_OPTIMISTIC) == 0) + } + if ((dadprogress & IN6_IFF_OPTIMISTIC) == 0) { nd6_na_output(ifp, &saddr6, &taddr6, ((anycast || proxy || !tlladdr) ? 0 : ND_NA_FLAG_OVERRIDE) | (advrouter ? ND_NA_FLAG_ROUTER : 0), tlladdr, proxy ? (struct sockaddr *)&proxydl : NULL); + } goto freeit; } @@ -523,20 +531,22 @@ nd6_ns_input( ((anycast || proxy || !tlladdr || oflgclr) ? 0 : ND_NA_FLAG_OVERRIDE) | (advrouter ? ND_NA_FLAG_ROUTER : 0) | ND_NA_FLAG_SOLICITED, tlladdr, proxy ? (struct sockaddr *)&proxydl : NULL); - freeit: +freeit: m_freem(m); - if (ifa != NULL) + if (ifa != NULL) { IFA_REMREF(ifa); + } return; - bad: +bad: nd6log((LOG_ERR, "nd6_ns_input: src=%s\n", ip6_sprintf(&saddr6))); nd6log((LOG_ERR, "nd6_ns_input: dst=%s\n", ip6_sprintf(&daddr6))); nd6log((LOG_ERR, "nd6_ns_input: tgt=%s\n", ip6_sprintf(&taddr6))); icmp6stat.icp6s_badns++; m_freem(m); - if (ifa != NULL) + if (ifa != NULL) { IFA_REMREF(ifa); + } } /* @@ -557,8 +567,8 @@ nd6_ns_output( struct ifnet *ifp, const struct in6_addr *daddr6, const struct in6_addr *taddr6, - struct llinfo_nd6 *ln, /* for source address determination */ - uint8_t *nonce) /* duplicated address detection */ + struct llinfo_nd6 *ln, /* for source address determination */ + uint8_t *nonce) /* duplicated address detection */ { struct mbuf *m; struct ip6_hdr *ip6; @@ -566,7 +576,7 @@ nd6_ns_output( struct in6_ifaddr *ia = NULL; struct in6_addr *src, src_in, src_storage; struct ip6_moptions *im6o = NULL; - struct ifnet *outif = NULL; + struct ifnet *outif = NULL; int icmp6len; int maxlen; int flags; @@ -575,8 +585,9 @@ nd6_ns_output( struct ip6_out_args ip6oa; u_int32_t rtflags = 0; - if ((ifp->if_eflags & IFEF_IPV6_ND6ALT) || IN6_IS_ADDR_MULTICAST(taddr6)) + if ((ifp->if_eflags & IFEF_IPV6_ND6ALT) || IN6_IS_ADDR_MULTICAST(taddr6)) { return; + } bzero(&ro, sizeof(ro)); bzero(&ip6oa, sizeof(ip6oa)); @@ -599,7 +610,7 @@ nd6_ns_output( return; } - MGETHDR(m, M_DONTWAIT, MT_DATA); /* XXXMAC: mac_create_mbuf_linklayer() probably */ + MGETHDR(m, M_DONTWAIT, MT_DATA); /* XXXMAC: mac_create_mbuf_linklayer() probably */ if (m && max_linkhdr + maxlen >= MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { @@ -607,8 +618,9 @@ nd6_ns_output( m = NULL; } } - if (m == NULL) + if (m == NULL) { return; + } m->m_pkthdr.rcvif = NULL; if (daddr6 == NULL || IN6_IS_ADDR_MULTICAST(daddr6)) { @@ -627,7 +639,7 @@ nd6_ns_output( icmp6len = sizeof(*nd_ns); m->m_pkthdr.len = m->m_len = sizeof(*ip6) + icmp6len; - m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ + m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ /* fill neighbor solicitation packet */ ip6 = mtod(m, struct ip6_hdr *); @@ -637,17 +649,18 @@ nd6_ns_output( /* ip6->ip6_plen will be set later */ ip6->ip6_nxt = IPPROTO_ICMPV6; ip6->ip6_hlim = IPV6_MAXHLIM; - if (daddr6) + if (daddr6) { ip6->ip6_dst = *daddr6; - else { + } else { ip6->ip6_dst.s6_addr16[0] = IPV6_ADDR_INT16_MLL; ip6->ip6_dst.s6_addr16[1] = 0; ip6->ip6_dst.s6_addr32[1] = 0; ip6->ip6_dst.s6_addr32[2] = IPV6_ADDR_INT32_ONE; ip6->ip6_dst.s6_addr32[3] = taddr6->s6_addr32[3]; ip6->ip6_dst.s6_addr8[12] = 0xff; - if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0) + if (in6_setscope(&ip6->ip6_dst, ifp, NULL) != 0) { goto bad; + } } if (nonce == NULL) { /* @@ -665,7 +678,7 @@ nd6_ns_output( * - saddr6 belongs to the outgoing interface. * Otherwise, we perform the source address selection as usual. */ - struct ip6_hdr *hip6; /* hold ip6 */ + struct ip6_hdr *hip6; /* hold ip6 */ struct in6_addr *hsrc = NULL; /* Caller holds ref on this route */ @@ -678,10 +691,11 @@ nd6_ns_output( if (ln->ln_hold != NULL) { hip6 = mtod(ln->ln_hold, struct ip6_hdr *); /* XXX pullup? */ - if (sizeof (*hip6) < ln->ln_hold->m_len) + if (sizeof(*hip6) < ln->ln_hold->m_len) { hsrc = &hip6->ip6_src; - else + } else { hsrc = NULL; + } } /* Update probe count, if applicable */ if (ln->ln_llreach != NULL) { @@ -806,7 +820,7 @@ nd6_ns_output( ip6->ip6_plen = htons((u_short)icmp6len); nd_ns->nd_ns_cksum = 0; nd_ns->nd_ns_cksum - = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), icmp6len); + = in6_cksum(m, IPPROTO_ICMPV6, sizeof(*ip6), icmp6len); flags = nonce ? IPV6_UNSPECSRC : 0; flags |= IPV6_OUTARGS; @@ -822,8 +836,9 @@ nd6_ns_output( * the packet accordingly so that the driver can find out, * in case it needs to perform driver-specific action(s). */ - if (rtflags & RTF_ROUTER) + if (rtflags & RTF_ROUTER) { m->m_pkthdr.pkt_flags |= PKTF_RESOLVE_RTR; + } if (ifp->if_eflags & IFEF_TXSTART) { /* @@ -842,13 +857,15 @@ nd6_ns_output( icmp6stat.icp6s_outhist[ND_NEIGHBOR_SOLICIT]++; exit: - if (im6o != NULL) + if (im6o != NULL) { IM6O_REMREF(im6o); + } - ROUTE_RELEASE(&ro); /* we don't cache this route. */ + ROUTE_RELEASE(&ro); /* we don't cache this route. */ - if (ia != NULL) + if (ia != NULL) { IFA_REMREF(&ia->ia_ifa); + } return; bad: @@ -890,7 +907,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) if ((ifp->if_eflags & IFEF_IPV6_ND6ALT) != 0) { nd6log((LOG_INFO, "nd6_na_input: on ND6ALT interface!\n")); - return; + goto freeit; } /* Expect 32-bit aligned data pointer on strict-align platforms */ @@ -904,7 +921,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) goto bad; } - IP6_EXTHDR_CHECK(m, off, icmp6len, return); + IP6_EXTHDR_CHECK(m, off, icmp6len, return ); nd_na = (struct nd_neighbor_advert *)((caddr_t)ip6 + off); m->m_pkthdr.pkt_flags |= PKTF_INET6_RESOLVE; @@ -914,21 +931,22 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) is_override = ((flags & ND_NA_FLAG_OVERRIDE) != 0); taddr6 = nd_na->nd_na_target; - if (in6_setscope(&taddr6, ifp, NULL)) - goto bad; /* XXX: impossible */ - + if (in6_setscope(&taddr6, ifp, NULL)) { + goto bad; /* XXX: impossible */ + } if (IN6_IS_ADDR_MULTICAST(&taddr6)) { nd6log((LOG_ERR, "nd6_na_input: invalid target address %s\n", ip6_sprintf(&taddr6))); goto bad; } - if (IN6_IS_ADDR_MULTICAST(&daddr6)) + if (IN6_IS_ADDR_MULTICAST(&daddr6)) { if (is_solicited) { nd6log((LOG_ERR, "nd6_na_input: a solicited adv is multicasted\n")); goto bad; } + } icmp6len -= sizeof(*nd_na); nd6_option_init(nd_na + 1, icmp6len, &ndopts); @@ -947,19 +965,21 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) nd6log((LOG_INFO, "nd6_na_input: lladdrlen mismatch for %s " "(if %d, NA packet %d)\n", - ip6_sprintf(&taddr6), ifp->if_addrlen, - lladdrlen - 2)); + ip6_sprintf(&taddr6), ifp->if_addrlen, + lladdrlen - 2)); goto bad; } } m = nd6_dad_na_input(m, ifp, &taddr6, lladdr, lladdrlen); - if (m == NULL) + if (m == NULL) { return; + } /* Forwarding associated with NDPRF_PRPROXY may apply. */ - if (ip6_forwarding && nd6_prproxy) + if (ip6_forwarding && nd6_prproxy) { nd6_prproxy_na_input(ifp, &saddr6, &daddr6, &taddr6, flags); + } /* * If no neighbor cache entry is found, NA SHOULD silently be @@ -968,19 +988,22 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) * another interface (in case we are doing prefix proxying.) */ if ((rt = nd6_lookup(&taddr6, 0, ifp, 0)) == NULL) { - if (!ip6_forwarding || !nd6_prproxy) + if (!ip6_forwarding || !nd6_prproxy) { goto freeit; + } - if ((rt = nd6_lookup(&taddr6, 0, NULL, 0)) == NULL) + if ((rt = nd6_lookup(&taddr6, 0, NULL, 0)) == NULL) { goto freeit; + } RT_LOCK_ASSERT_HELD(rt); if (rt->rt_ifp != ifp) { /* * Purge any link-layer info caching. */ - if (rt->rt_llinfo_purge != NULL) + if (rt->rt_llinfo_purge != NULL) { rt->rt_llinfo_purge(rt); + } /* Adjust route ref count for the interfaces */ if (rt->rt_if_ref_fn != NULL) { @@ -995,8 +1018,9 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) * If rmx_mtu is not locked, update it * to the MTU used by the new interface. */ - if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) + if (!(rt->rt_rmx.rmx_locks & RTV_MTU)) { rt->rt_rmx.rmx_mtu = rt->rt_ifp->if_mtu; + } } } @@ -1071,9 +1095,10 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET6]; - if (rnh != NULL) + if (rnh != NULL) { (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); + } lck_mtx_unlock(rnh_lock); lck_mtx_lock(nd6_mutex); pfxlist_onlink_check(); @@ -1086,16 +1111,18 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) /* * Check if the link-layer address has changed or not. */ - if (lladdr == NULL) + if (lladdr == NULL) { llchange = 0; - else { + } else { if (sdl->sdl_alen) { - if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) + if (bcmp(lladdr, LLADDR(sdl), ifp->if_addrlen)) { llchange = 1; - else + } else { llchange = 0; - } else + } + } else { llchange = 1; + } } /* @@ -1129,9 +1156,9 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); goto freeit; - } else if (is_override /* (2a) */ - || (!is_override && (lladdr && !llchange)) /* (2b) */ - || !lladdr) { /* (2c) */ + } else if (is_override /* (2a) */ + || (!is_override && (lladdr && !llchange)) /* (2b) */ + || !lladdr) { /* (2c) */ /* * Update link-local address, if any. */ @@ -1201,9 +1228,10 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) lck_mtx_lock(rnh_lock); rnh = rt_tables[AF_INET6]; - if (rnh != NULL) + if (rnh != NULL) { (void) rnh->rnh_walktree(rnh, route_event_walktree, (void *)&rt_ev); + } lck_mtx_unlock(rnh_lock); RT_LOCK(rt); } @@ -1264,7 +1292,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) sizeof(nd6_ndalive.link_data.if_name)); ev_msg.dv[0].data_ptr = &nd6_ndalive; ev_msg.dv[0].data_length = - sizeof(nd6_ndalive); + sizeof(nd6_ndalive); dlil_post_complete_msg(NULL, &ev_msg); } @@ -1288,7 +1316,7 @@ nd6_na_input(struct mbuf *m, int off, int icmp6len) */ m_hold = ln->ln_hold; ln->ln_hold = NULL; - for ( ; m_hold; m_hold = m_hold_next) { + for (; m_hold; m_hold = m_hold_next) { m_hold_next = m_hold->m_nextpkt; m_hold->m_nextpkt = NULL; /* @@ -1331,8 +1359,8 @@ nd6_na_output( const struct in6_addr *daddr6_0, const struct in6_addr *taddr6, uint32_t flags, - int tlladdr, /* 1 if include target link-layer address */ - struct sockaddr *sdl0) /* sockaddr_dl (= proxy NA) or NULL */ + int tlladdr, /* 1 if include target link-layer address */ + struct sockaddr *sdl0) /* sockaddr_dl (= proxy NA) or NULL */ { struct mbuf *m; struct ip6_hdr *ip6; @@ -1344,12 +1372,12 @@ nd6_na_output( struct in6_ifaddr *ia; struct sockaddr_in6 dst_sa; int icmp6len, maxlen, error; - struct ifnet *outif = NULL; + struct ifnet *outif = NULL; struct ip6_out_args ip6oa; bzero(&ro, sizeof(ro)); - daddr6 = *daddr6_0; /* make a local copy for modification */ + daddr6 = *daddr6_0; /* make a local copy for modification */ bzero(&ip6oa, sizeof(ip6oa)); ip6oa.ip6oa_boundif = ifp->if_index; @@ -1371,7 +1399,7 @@ nd6_na_output( return; } - MGETHDR(m, M_DONTWAIT, MT_DATA); /* XXXMAC: mac_create_mbuf_linklayer() probably */ + MGETHDR(m, M_DONTWAIT, MT_DATA); /* XXXMAC: mac_create_mbuf_linklayer() probably */ if (m && max_linkhdr + maxlen >= MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { @@ -1379,8 +1407,9 @@ nd6_na_output( m = NULL; } } - if (m == NULL) + if (m == NULL) { return; + } m->m_pkthdr.rcvif = NULL; if (IN6_IS_ADDR_MULTICAST(&daddr6)) { @@ -1399,7 +1428,7 @@ nd6_na_output( icmp6len = sizeof(*nd_na); m->m_pkthdr.len = m->m_len = sizeof(struct ip6_hdr) + icmp6len; - m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ + m->m_data += max_linkhdr; /* or MH_ALIGN() equivalent? */ /* fill neighbor advertisement packet */ ip6 = mtod(m, struct ip6_hdr *); @@ -1415,12 +1444,14 @@ nd6_na_output( daddr6.s6_addr32[1] = 0; daddr6.s6_addr32[2] = 0; daddr6.s6_addr32[3] = IPV6_ADDR_INT32_ONE; - if (in6_setscope(&daddr6, ifp, NULL)) + if (in6_setscope(&daddr6, ifp, NULL)) { goto bad; + } flags &= ~ND_NA_FLAG_SOLICITED; - } else + } else { ip6->ip6_dst = daddr6; + } bzero(&dst_sa, sizeof(struct sockaddr_in6)); dst_sa.sin6_family = AF_INET6; @@ -1447,8 +1478,9 @@ nd6_na_output( */ ia = in6ifa_ifpwithaddr(ifp, src); if (ia != NULL) { - if (ia->ia6_flags & IN6_IFF_OPTIMISTIC) + if (ia->ia6_flags & IN6_IFF_OPTIMISTIC) { flags &= ~ND_NA_FLAG_OVERRIDE; + } IFA_REMREF(&ia->ia_ifa); } @@ -1470,13 +1502,14 @@ nd6_na_output( * lladdr in sdl0. If we are not proxying (sending NA for * my address) use lladdr configured for the interface. */ - if (sdl0 == NULL) + if (sdl0 == NULL) { mac = nd6_ifptomac(ifp); - else if (sdl0->sa_family == AF_LINK) { + } else if (sdl0->sa_family == AF_LINK) { struct sockaddr_dl *sdl; sdl = (struct sockaddr_dl *)(void *)sdl0; - if (sdl->sdl_alen == ifp->if_addrlen) + if (sdl->sdl_alen == ifp->if_addrlen) { mac = LLADDR(sdl); + } } } if (tlladdr && mac) { @@ -1493,14 +1526,15 @@ nd6_na_output( nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR; nd_opt->nd_opt_len = optlen >> 3; bcopy(mac, (caddr_t)(nd_opt + 1), ifp->if_addrlen); - } else + } else { flags &= ~ND_NA_FLAG_OVERRIDE; + } ip6->ip6_plen = htons((u_short)icmp6len); nd_na->nd_na_flags_reserved = flags; nd_na->nd_na_cksum = 0; nd_na->nd_na_cksum = - in6_cksum(m, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), icmp6len); + in6_cksum(m, IPPROTO_ICMPV6, sizeof(struct ip6_hdr), icmp6len); m->m_pkthdr.pkt_flags |= PKTF_INET6_RESOLVE; @@ -1520,8 +1554,9 @@ nd6_na_output( icmp6stat.icp6s_outhist[ND_NEIGHBOR_ADVERT]++; exit: - if (im6o != NULL) + if (im6o != NULL) { IM6O_REMREF(im6o); + } ROUTE_RELEASE(&ro); return; @@ -1552,7 +1587,7 @@ nd6_ifptomac( #endif case IFT_BRIDGE: case IFT_ISO88025: - return ((caddr_t)IF_LLADDR(ifp)); + return (caddr_t)IF_LLADDR(ifp); default: return NULL; } @@ -1561,20 +1596,20 @@ nd6_ifptomac( TAILQ_HEAD(dadq_head, dadq); struct dadq { decl_lck_mtx_data(, dad_lock); - u_int32_t dad_refcount; /* reference count */ + u_int32_t dad_refcount; /* reference count */ int dad_attached; TAILQ_ENTRY(dadq) dad_list; struct ifaddr *dad_ifa; - int dad_count; /* max NS to send */ - int dad_ns_tcount; /* # of trials to send NS */ - int dad_ns_ocount; /* NS sent so far */ + int dad_count; /* max NS to send */ + int dad_ns_tcount; /* # of trials to send NS */ + int dad_ns_ocount; /* NS sent so far */ int dad_ns_icount; int dad_na_icount; - int dad_ns_lcount; /* looped back NS */ + int dad_ns_lcount; /* looped back NS */ int dad_loopbackprobe; /* probing state for loopback detection */ uint8_t dad_lladdr[ETHER_ADDR_LEN]; uint8_t dad_lladdrlen; -#define ND_OPT_NONCE_LEN32 \ +#define ND_OPT_NONCE_LEN32 \ ((ND_OPT_NONCE_LEN + sizeof(uint32_t) - 1)/sizeof(uint32_t)) uint32_t dad_nonce[ND_OPT_NONCE_LEN32]; }; @@ -1588,7 +1623,7 @@ nd6_nbr_init(void) TAILQ_INIT(&dadq); - dad_size = sizeof (struct dadq); + dad_size = sizeof(struct dadq); dad_zone = zinit(dad_size, DAD_ZONE_MAX * dad_size, 0, DAD_ZONE_NAME); if (dad_zone == NULL) { panic("%s: failed allocating %s", __func__, DAD_ZONE_NAME); @@ -1600,8 +1635,9 @@ nd6_nbr_init(void) bzero(&hostrtmask, sizeof hostrtmask); hostrtmask.sin6_family = AF_INET6; hostrtmask.sin6_len = sizeof hostrtmask; - for (i = 0; i < sizeof hostrtmask.sin6_addr; ++i) + for (i = 0; i < sizeof hostrtmask.sin6_addr; ++i) { hostrtmask.sin6_addr.s6_addr[i] = 0xff; + } } static struct dadq * @@ -1641,14 +1677,13 @@ nd6_dad_find(struct ifaddr *ifa, struct nd_opt_nonce *nonce) break; } lck_mtx_unlock(dad6_mutex); - return (dp); + return dp; } void nd6_dad_stoptimer( struct ifaddr *ifa) { - untimeout((void (*)(void *))nd6_dad_timer, (void *)ifa); } @@ -1658,7 +1693,7 @@ nd6_dad_stoptimer( void nd6_dad_start( struct ifaddr *ifa, - int *tick_delay) /* minimum delay ticks for IFF_UP event */ + int *tick_delay) /* minimum delay ticks for IFF_UP event */ { struct in6_ifaddr *ia = (struct in6_ifaddr *)ifa; struct dadq *dp; @@ -1678,10 +1713,10 @@ nd6_dad_start( IFA_LOCK(&ia->ia_ifa); if (!(ia->ia6_flags & IN6_IFF_DADPROGRESS)) { nd6log0((LOG_DEBUG, - "nd6_dad_start: not a tentative or optimistic address " - "%s(%s)\n", - ip6_sprintf(&ia->ia_addr.sin6_addr), - ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); + "nd6_dad_start: not a tentative or optimistic address " + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); IFA_UNLOCK(&ia->ia_ifa); return; } @@ -1691,8 +1726,9 @@ nd6_dad_start( return; } IFA_UNLOCK(&ia->ia_ifa); - if (ifa->ifa_ifp == NULL) + if (ifa->ifa_ifp == NULL) { panic("nd6_dad_start: ifa->ifa_ifp == NULL"); + } if (!(ifa->ifa_ifp->if_flags & IFF_UP) || (ifa->ifa_ifp->if_eflags & IFEF_IPV6_ND6ALT)) { return; @@ -1706,9 +1742,9 @@ nd6_dad_start( dp = zalloc(dad_zone); if (dp == NULL) { nd6log0((LOG_ERR, "nd6_dad_start: memory allocation failed for " - "%s(%s)\n", - ip6_sprintf(&ia->ia_addr.sin6_addr), - ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); return; } bzero(dp, dad_size); @@ -1743,16 +1779,17 @@ nd6_dad_start( } else { int ntick; - if (*tick_delay == 0) + if (*tick_delay == 0) { ntick = random() % (MAX_RTR_SOLICITATION_DELAY * hz); - else + } else { ntick = *tick_delay + random() % (hz / 2); + } *tick_delay = ntick; timeout((void (*)(void *))nd6_dad_timer, (void *)ifa, - ntick); + ntick); } - DAD_REMREF(dp); /* drop our reference */ + DAD_REMREF(dp); /* drop our reference */ } static struct dadq * @@ -1761,7 +1798,7 @@ nd6_dad_attach(struct dadq *dp, struct ifaddr *ifa) lck_mtx_lock(dad6_mutex); DAD_LOCK(dp); dp->dad_ifa = ifa; - IFA_ADDREF(ifa); /* for dad_ifa */ + IFA_ADDREF(ifa); /* for dad_ifa */ dp->dad_count = ip6_dad_count; dp->dad_ns_icount = dp->dad_na_icount = 0; dp->dad_ns_ocount = dp->dad_ns_tcount = 0; @@ -1769,13 +1806,13 @@ nd6_dad_attach(struct dadq *dp, struct ifaddr *ifa) VERIFY(!dp->dad_attached); dp->dad_attached = 1; dp->dad_lladdrlen = 0; - DAD_ADDREF_LOCKED(dp); /* for caller */ - DAD_ADDREF_LOCKED(dp); /* for dadq_head list */ + DAD_ADDREF_LOCKED(dp); /* for caller */ + DAD_ADDREF_LOCKED(dp); /* for dadq_head list */ TAILQ_INSERT_TAIL(&dadq, (struct dadq *)dp, dad_list); DAD_UNLOCK(dp); lck_mtx_unlock(dad6_mutex); - return (dp); + return dp; } static void @@ -1795,7 +1832,7 @@ nd6_dad_detach(struct dadq *dp, struct ifaddr *ifa) DAD_UNLOCK(dp); lck_mtx_unlock(dad6_mutex); if (detached) { - DAD_REMREF(dp); /* drop dadq_head reference */ + DAD_REMREF(dp); /* drop dadq_head reference */ } } @@ -1816,7 +1853,7 @@ nd6_dad_stop(struct ifaddr *ifa) untimeout((void (*)(void *))nd6_dad_timer, (void *)ifa); nd6_dad_detach(dp, ifa); - DAD_REMREF(dp); /* drop our reference */ + DAD_REMREF(dp); /* drop our reference */ } static void @@ -1828,17 +1865,20 @@ nd6_unsol_na_output(struct ifaddr *ifa) if ((ifp->if_flags & IFF_UP) == 0 || (ifp->if_flags & IFF_RUNNING) == 0 || - (ifp->if_eflags & IFEF_IPV6_ND6ALT) != 0) + (ifp->if_eflags & IFEF_IPV6_ND6ALT) != 0) { return; + } IFA_LOCK_SPIN(&ia->ia_ifa); taddr6 = ia->ia_addr.sin6_addr; IFA_UNLOCK(&ia->ia_ifa); - if (in6_setscope(&taddr6, ifp, NULL) != 0) + if (in6_setscope(&taddr6, ifp, NULL) != 0) { return; + } saddr6 = in6addr_linklocal_allnodes; - if (in6_setscope(&saddr6, ifp, NULL) != 0) + if (in6_setscope(&saddr6, ifp, NULL) != 0) { return; + } nd6log((LOG_INFO, "%s: sending unsolicited NA\n", if_name(ifa->ifa_ifp))); @@ -1874,17 +1914,17 @@ nd6_dad_timer(struct ifaddr *ifa) IFA_LOCK(&ia->ia_ifa); if (ia->ia6_flags & IN6_IFF_DUPLICATED) { nd6log0((LOG_ERR, "nd6_dad_timer: called with duplicated address " - "%s(%s)\n", - ip6_sprintf(&ia->ia_addr.sin6_addr), - ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); + "%s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); IFA_UNLOCK(&ia->ia_ifa); goto done; } if ((ia->ia6_flags & IN6_IFF_DADPROGRESS) == 0) { nd6log0((LOG_ERR, "nd6_dad_timer: not a tentative or optimistic " - "address %s(%s)\n", - ip6_sprintf(&ia->ia_addr.sin6_addr), - ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); + "address %s(%s)\n", + ip6_sprintf(&ia->ia_addr.sin6_addr), + ifa->ifa_ifp ? if_name(ifa->ifa_ifp) : "???")); IFA_UNLOCK(&ia->ia_ifa); goto done; } @@ -1895,7 +1935,7 @@ nd6_dad_timer(struct ifaddr *ifa) if (dp->dad_ns_tcount > dad_maxtry) { DAD_UNLOCK(dp); nd6log0((LOG_INFO, "%s: could not run DAD, driver problem?\n", - if_name(ifa->ifa_ifp))); + if_name(ifa->ifa_ifp))); nd6_dad_detach(dp, ifa); goto done; @@ -1920,7 +1960,7 @@ nd6_dad_timer(struct ifaddr *ifa) * See what we've got. */ if (dp->dad_na_icount > 0 || dp->dad_ns_icount) { - /* We've seen NS or NA, means DAD has failed. */ + /* We've seen NS or NA, means DAD has failed. */ DAD_UNLOCK(dp); nd6log0((LOG_INFO, "%s: duplicate IPv6 address %s [timer]\n", @@ -1933,7 +1973,7 @@ nd6_dad_timer(struct ifaddr *ifa) dp->dad_ns_lcount > dp->dad_loopbackprobe) { dp->dad_loopbackprobe = dp->dad_ns_lcount; dp->dad_count = - dp->dad_ns_ocount + dad_maxtry - 1; + dp->dad_ns_ocount + dad_maxtry - 1; DAD_UNLOCK(dp); ndi = ND_IFINFO(ifa->ifa_ifp); VERIFY(ndi != NULL && ndi->initialized); @@ -1942,10 +1982,10 @@ nd6_dad_timer(struct ifaddr *ifa) lck_mtx_unlock(&ndi->lock); /* - * Sec. 4.1 in RFC 7527 requires transmission of - * additional probes until the loopback condition - * becomes clear when a looped back probe is detected. - */ + * Sec. 4.1 in RFC 7527 requires transmission of + * additional probes until the loopback condition + * becomes clear when a looped back probe is detected. + */ nd6log0((LOG_INFO, "%s: a looped back NS message is " "detected during DAD for %s. " @@ -1986,7 +2026,7 @@ nd6_dad_timer(struct ifaddr *ifa) ip6_sprintf(&ia->ia_addr.sin6_addr), txunsolna ? ", tx unsolicited NA with O=1" : ".")); - if (dp->dad_ns_lcount > 0) + if (dp->dad_ns_lcount > 0) { nd6log0((LOG_DEBUG, "%s: DAD completed while " "a looped back NS message is detected " @@ -1994,6 +2034,7 @@ nd6_dad_timer(struct ifaddr *ifa) __func__, ip6_sprintf(&ia->ia_addr.sin6_addr), if_name(ia->ia_ifp))); + } in6_post_msg(ia->ia_ifp, KEV_INET6_NEW_USER_ADDR, ia, dp->dad_lladdr); @@ -2002,8 +2043,9 @@ nd6_dad_timer(struct ifaddr *ifa) } done: - if (dp != NULL) - DAD_REMREF(dp); /* drop our reference */ + if (dp != NULL) { + DAD_REMREF(dp); /* drop our reference */ + } } void @@ -2039,53 +2081,55 @@ nd6_dad_duplicated(struct ifaddr *ifa) * very sure that hardware addresses are supposed to be unique. */ switch (ifp->if_type) { - case IFT_BRIDGE: - case IFT_ETHER: - case IFT_FDDI: - case IFT_ATM: - case IFT_IEEE1394: + case IFT_BRIDGE: + case IFT_ETHER: + case IFT_FDDI: + case IFT_ATM: + case IFT_IEEE1394: #ifdef IFT_IEEE80211 - case IFT_IEEE80211: + case IFT_IEEE80211: #endif + /* + * Check if our hardware address matches the + * link layer information received in the + * NS/NA + */ + llifa = ifp->if_lladdr; + IFA_LOCK(llifa); + sdl = (struct sockaddr_dl *)(void *) + llifa->ifa_addr; + if (lladdrlen == sdl->sdl_alen && + bcmp(lladdr, LLADDR(sdl), lladdrlen) == 0) { + candisable = TRUE; + } + IFA_UNLOCK(llifa); + + in6 = ia->ia_addr.sin6_addr; + if (in6_iid_from_hw(ifp, &in6) != 0) { + break; + } + + /* Refine decision about whether IPv6 can be disabled */ + if (candisable && + !IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &in6)) { /* - * Check if our hardware address matches the - * link layer information received in the - * NS/NA + * Apply this logic only to the embedded MAC + * address form of link-local IPv6 address. */ - llifa = ifp->if_lladdr; - IFA_LOCK(llifa); - sdl = (struct sockaddr_dl *)(void *) - llifa->ifa_addr; - if (lladdrlen == sdl->sdl_alen && - bcmp(lladdr, LLADDR(sdl), lladdrlen) == 0) - candisable = TRUE; - IFA_UNLOCK(llifa); - - in6 = ia->ia_addr.sin6_addr; - if (in6_iid_from_hw(ifp, &in6) != 0) - break; - - /* Refine decision about whether IPv6 can be disabled */ - if (candisable && - !IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &in6)) { - /* - * Apply this logic only to the embedded MAC - * address form of link-local IPv6 address. - */ - candisable = FALSE; - } else if (lladdr == NULL && - IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &in6)) { - /* - * We received a NA with no target link-layer - * address option. This means that someone else - * has our address. Mark it as a hardware - * duplicate so we disable IPv6 later on. - */ - candisable = TRUE; - } - break; - default: - break; + candisable = FALSE; + } else if (lladdr == NULL && + IN6_ARE_ADDR_EQUAL(&ia->ia_addr.sin6_addr, &in6)) { + /* + * We received a NA with no target link-layer + * address option. This means that someone else + * has our address. Mark it as a hardware + * duplicate so we disable IPv6 later on. + */ + candisable = TRUE; + } + break; + default: + break; } } DAD_UNLOCK(dp); @@ -2127,7 +2171,7 @@ nd6_dad_duplicated(struct ifaddr *ifa) */ in6_post_msg(ifp, KEV_INET6_NEW_USER_ADDR, ia, dp->dad_lladdr); nd6_dad_detach(dp, ifa); - DAD_REMREF(dp); /* drop our reference */ + DAD_REMREF(dp); /* drop our reference */ } static void @@ -2155,8 +2199,9 @@ nd6_dad_ns_output(struct dadq *dp, struct ifaddr *ifa) taddr6 = ia->ia_addr.sin6_addr; IFA_UNLOCK(&ia->ia_ifa); if (dad_enhanced != 0 && !(ifp->if_flags & IFF_POINTOPOINT)) { - for (i = 0; i < ND_OPT_NONCE_LEN32; i++) + for (i = 0; i < ND_OPT_NONCE_LEN32; i++) { dp->dad_nonce[i] = RandomULong(); + } /* * XXXHRS: Note that in the case that * DupAddrDetectTransmits > 1, multiple NS messages with @@ -2187,12 +2232,14 @@ nd6_dad_ns_input(struct ifaddr *ifa, char *lladdr, VERIFY(ifa != NULL); /* Ignore Nonce option when Enhanced DAD is disabled. */ - if (dad_enhanced == 0) + if (dad_enhanced == 0) { ndopt_nonce = NULL; + } dp = nd6_dad_find(ifa, ndopt_nonce); - if (dp == NULL) + if (dp == NULL) { return; + } DAD_LOCK(dp); ++dp->dad_ns_icount; @@ -2209,13 +2256,13 @@ nd6_dad_ns_input(struct ifaddr *ifa, char *lladdr, * * @param m is the pointer to the packet's mbuf * @param ifp is the pointer to the interface on which packet - * was receicved. + * was receicved. * @param taddr is pointer to target's IPv6 address * @param lladdr is target's link layer information * @param lladdrlen is target's linklayer length * * @return NULL if the packet is consumed by DAD processing, else - * pointer to the mbuf. + * pointer to the mbuf. */ static struct mbuf * nd6_dad_na_input(struct mbuf *m, struct ifnet *ifp, struct in6_addr *taddr, @@ -2228,8 +2275,9 @@ nd6_dad_na_input(struct mbuf *m, struct ifnet *ifp, struct in6_addr *taddr, boolean_t replicated; ifa = (struct ifaddr *) in6ifa_ifpwithaddr(ifp, taddr); - if (ifa == NULL) + if (ifa == NULL) { return m; + } replicated = FALSE; @@ -2267,7 +2315,7 @@ nd6_dad_na_input(struct mbuf *m, struct ifnet *ifp, struct in6_addr *taddr, if (lladdr != NULL && lladdrlen >= ETHER_ADDR_LEN) { struct ip6aux *ip6a = ip6_findaux(m); if (ip6a && (ip6a->ip6a_flags & IP6A_HASEEN) != 0 && - bcmp(ip6a->ip6a_ehsrc, lladdr, ETHER_ADDR_LEN) != 0) { + bcmp(ip6a->ip6a_ehsrc, lladdr, ETHER_ADDR_LEN) != 0) { IFA_UNLOCK(ifa); nd6log((LOG_ERR, "%s: ignoring duplicate NA on %s " "[eh_src != tgtlladdr]\n", __func__, if_name(ifp))); @@ -2307,17 +2355,19 @@ done: static void dad_addref(struct dadq *dp, int locked) { - if (!locked) + if (!locked) { DAD_LOCK_SPIN(dp); - else + } else { DAD_LOCK_ASSERT_HELD(dp); + } if (++dp->dad_refcount == 0) { panic("%s: dad %p wraparound refcnt\n", __func__, dp); /* NOTREACHED */ } - if (!locked) + if (!locked) { DAD_UNLOCK(dp); + } } static void @@ -2326,8 +2376,9 @@ dad_remref(struct dadq *dp) struct ifaddr *ifa; DAD_LOCK_SPIN(dp); - if (dp->dad_refcount == 0) + if (dp->dad_refcount == 0) { panic("%s: dad %p negative refcnt\n", __func__, dp); + } --dp->dad_refcount; if (dp->dad_refcount > 0) { DAD_UNLOCK(dp); @@ -2342,7 +2393,7 @@ dad_remref(struct dadq *dp) } if ((ifa = dp->dad_ifa) != NULL) { - IFA_REMREF(ifa); /* drop dad_ifa reference */ + IFA_REMREF(ifa); /* drop dad_ifa reference */ dp->dad_ifa = NULL; } @@ -2354,8 +2405,9 @@ void nd6_llreach_set_reachable(struct ifnet *ifp, void *addr, unsigned int alen) { /* Nothing more to do if it's disabled */ - if (nd6_llreach_base == 0) + if (nd6_llreach_base == 0) { return; + } ifnet_llreach_set_reachable(ifp, ETHERTYPE_IPV6, addr, alen); } @@ -2415,13 +2467,14 @@ nd6_alt_node_addr_decompose(struct ifnet *ifp, struct sockaddr *sa, bcopy(sa, sdl, sa->sa_len); sin6->sin6_scope_id = sdla->sdl_index; - if (sin6->sin6_scope_id == 0) + if (sin6->sin6_scope_id == 0) { sin6->sin6_scope_id = ifp->if_index; + } in6->s6_addr[0] = 0xfe; in6->s6_addr[1] = 0x80; - if (sdla->sdl_alen == EUI64_LENGTH) + if (sdla->sdl_alen == EUI64_LENGTH) { bcopy(lla, &in6->s6_addr[8], EUI64_LENGTH); - else { + } else { VERIFY(sdla->sdl_alen == ETHER_ADDR_LEN); in6->s6_addr[8] = ((uint8_t) lla[0] ^ ND6_EUI64_UBIT); @@ -2448,12 +2501,13 @@ nd6_alt_node_present(struct ifnet *ifp, struct sockaddr_in6 *sin6, { struct rtentry *rt; struct llinfo_nd6 *ln; - struct if_llreach *lr = NULL; + struct if_llreach *lr = NULL; const uint16_t temp_embedded_id = sin6->sin6_addr.s6_addr16[1]; if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) && - (temp_embedded_id == 0)) + (temp_embedded_id == 0)) { sin6->sin6_addr.s6_addr16[1] = htons(ifp->if_index); + } nd6_cache_lladdr(ifp, &sin6->sin6_addr, LLADDR(sdl), sdl->sdl_alen, ND_NEIGHBOR_ADVERT, 0); @@ -2465,8 +2519,9 @@ nd6_alt_node_present(struct ifnet *ifp, struct sockaddr_in6 *sin6, ifp->if_index); /* Restore the address that was passed to us */ - if (temp_embedded_id == 0) + if (temp_embedded_id == 0) { sin6->sin6_addr.s6_addr16[1] = 0; + } if (rt != NULL) { RT_LOCK(rt); @@ -2512,8 +2567,9 @@ nd6_alt_node_absent(struct ifnet *ifp, struct sockaddr_in6 *sin6) ip6_sprintf(&sin6->sin6_addr))); if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) && - (temp_embedded_id == 0)) + (temp_embedded_id == 0)) { sin6->sin6_addr.s6_addr16[1] = htons(ifp->if_index); + } LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_NOTOWNED); lck_mtx_lock(rnh_lock); @@ -2521,16 +2577,17 @@ nd6_alt_node_absent(struct ifnet *ifp, struct sockaddr_in6 *sin6) rt = rtalloc1_scoped_locked((struct sockaddr *)sin6, 0, 0, ifp->if_index); - /* Restore the address that was passed to us */ - if (temp_embedded_id == 0) + /* Restore the address that was passed to us */ + if (temp_embedded_id == 0) { sin6->sin6_addr.s6_addr16[1] = 0; + } if (rt != NULL) { RT_LOCK(rt); - if (!(rt->rt_flags & (RTF_CLONING|RTF_PRCLONING)) && - (rt->rt_flags & (RTF_HOST|RTF_LLINFO|RTF_WASCLONED)) == - (RTF_HOST|RTF_LLINFO|RTF_WASCLONED)) { + if (!(rt->rt_flags & (RTF_CLONING | RTF_PRCLONING)) && + (rt->rt_flags & (RTF_HOST | RTF_LLINFO | RTF_WASCLONED)) == + (RTF_HOST | RTF_LLINFO | RTF_WASCLONED)) { rt->rt_flags |= RTF_CONDEMNED; RT_UNLOCK(rt); diff --git a/bsd/netinet6/nd6_prproxy.c b/bsd/netinet6/nd6_prproxy.c index bb7f1448b..a3006a929 100644 --- a/bsd/netinet6/nd6_prproxy.c +++ b/bsd/netinet6/nd6_prproxy.c @@ -108,11 +108,11 @@ struct nd6_prproxy_prelist { SLIST_ENTRY(nd6_prproxy_prelist) ndprl_le; - struct nd_prefix *ndprl_pr; /* prefix */ - struct nd_prefix *ndprl_up; /* non-NULL for upstream */ - struct ifnet *ndprl_fwd_ifp; /* outgoing interface */ - boolean_t ndprl_sol; /* unicast solicitor? */ - struct in6_addr ndprl_sol_saddr; /* solicitor's address */ + struct nd_prefix *ndprl_pr; /* prefix */ + struct nd_prefix *ndprl_up; /* non-NULL for upstream */ + struct ifnet *ndprl_fwd_ifp; /* outgoing interface */ + boolean_t ndprl_sol; /* unicast solicitor? */ + struct in6_addr ndprl_sol_saddr; /* solicitor's address */ }; /* @@ -120,8 +120,8 @@ struct nd6_prproxy_prelist { */ struct nd6_prproxy_solsrc { TAILQ_ENTRY(nd6_prproxy_solsrc) solsrc_tqe; - struct in6_addr solsrc_saddr; /* soliciting (src) address */ - struct ifnet *solsrc_ifp; /* iface where NS arrived on */ + struct in6_addr solsrc_saddr; /* soliciting (src) address */ + struct ifnet *solsrc_ifp; /* iface where NS arrived on */ }; /* @@ -130,10 +130,10 @@ struct nd6_prproxy_solsrc { struct nd6_prproxy_soltgt { RB_ENTRY(nd6_prproxy_soltgt) soltgt_link; /* RB tree links */ struct soltgt_key_s { - struct in6_addr taddr; /* solicited (tgt) address */ + struct in6_addr taddr; /* solicited (tgt) address */ } soltgt_key; - u_int64_t soltgt_expire; /* expiration time */ - u_int32_t soltgt_cnt; /* total # of solicitors */ + u_int64_t soltgt_expire; /* expiration time */ + u_int32_t soltgt_cnt; /* total # of solicitors */ TAILQ_HEAD(, nd6_prproxy_solsrc) soltgt_q; }; @@ -162,38 +162,38 @@ RB_PROTOTYPE_SC_PREV(__private_extern__, prproxy_sols_tree, nd6_prproxy_soltgt, /* * Time (in seconds) before a target record expires (is idle). */ -#define ND6_TGT_SOLS_EXPIRE 5 +#define ND6_TGT_SOLS_EXPIRE 5 /* * Maximum number of queued soliciting (source) records per target. */ -#define ND6_MAX_SRC_SOLS_DEFAULT 4 +#define ND6_MAX_SRC_SOLS_DEFAULT 4 /* * Maximum number of queued solicited (target) records per prefix. */ -#define ND6_MAX_TGT_SOLS_DEFAULT 8 +#define ND6_MAX_TGT_SOLS_DEFAULT 8 static u_int32_t nd6_max_tgt_sols = ND6_MAX_TGT_SOLS_DEFAULT; static u_int32_t nd6_max_src_sols = ND6_MAX_SRC_SOLS_DEFAULT; -static unsigned int ndprl_size; /* size of zone element */ -static struct zone *ndprl_zone; /* nd6_prproxy_prelist zone */ +static unsigned int ndprl_size; /* size of zone element */ +static struct zone *ndprl_zone; /* nd6_prproxy_prelist zone */ -#define NDPRL_ZONE_MAX 256 /* maximum elements in zone */ -#define NDPRL_ZONE_NAME "nd6_prproxy_prelist" /* name for zone */ +#define NDPRL_ZONE_MAX 256 /* maximum elements in zone */ +#define NDPRL_ZONE_NAME "nd6_prproxy_prelist" /* name for zone */ -static unsigned int solsrc_size; /* size of zone element */ -static struct zone *solsrc_zone; /* nd6_prproxy_solsrc zone */ +static unsigned int solsrc_size; /* size of zone element */ +static struct zone *solsrc_zone; /* nd6_prproxy_solsrc zone */ -#define SOLSRC_ZONE_MAX 256 /* maximum elements in zone */ -#define SOLSRC_ZONE_NAME "nd6_prproxy_solsrc" /* name for zone */ +#define SOLSRC_ZONE_MAX 256 /* maximum elements in zone */ +#define SOLSRC_ZONE_NAME "nd6_prproxy_solsrc" /* name for zone */ -static unsigned int soltgt_size; /* size of zone element */ -static struct zone *soltgt_zone; /* nd6_prproxy_soltgt zone */ +static unsigned int soltgt_size; /* size of zone element */ +static struct zone *soltgt_zone; /* nd6_prproxy_soltgt zone */ -#define SOLTGT_ZONE_MAX 256 /* maximum elements in zone */ -#define SOLTGT_ZONE_NAME "nd6_prproxy_soltgt" /* name for zone */ +#define SOLTGT_ZONE_MAX 256 /* maximum elements in zone */ +#define SOLTGT_ZONE_NAME "nd6_prproxy_soltgt" /* name for zone */ /* The following is protected by ndpr_lock */ RB_GENERATE_PREV(prproxy_sols_tree, nd6_prproxy_soltgt, @@ -224,29 +224,32 @@ SYSCTL_UINT(_net_inet6_icmp6, OID_AUTO, prproxy_cnt, void nd6_prproxy_init(void) { - ndprl_size = sizeof (struct nd6_prproxy_prelist); + ndprl_size = sizeof(struct nd6_prproxy_prelist); ndprl_zone = zinit(ndprl_size, NDPRL_ZONE_MAX * ndprl_size, 0, NDPRL_ZONE_NAME); - if (ndprl_zone == NULL) + if (ndprl_zone == NULL) { panic("%s: failed allocating ndprl_zone", __func__); + } zone_change(ndprl_zone, Z_EXPAND, TRUE); zone_change(ndprl_zone, Z_CALLERACCT, FALSE); - solsrc_size = sizeof (struct nd6_prproxy_solsrc); + solsrc_size = sizeof(struct nd6_prproxy_solsrc); solsrc_zone = zinit(solsrc_size, SOLSRC_ZONE_MAX * solsrc_size, 0, SOLSRC_ZONE_NAME); - if (solsrc_zone == NULL) + if (solsrc_zone == NULL) { panic("%s: failed allocating solsrc_zone", __func__); + } zone_change(solsrc_zone, Z_EXPAND, TRUE); zone_change(solsrc_zone, Z_CALLERACCT, FALSE); - soltgt_size = sizeof (struct nd6_prproxy_soltgt); + soltgt_size = sizeof(struct nd6_prproxy_soltgt); soltgt_zone = zinit(soltgt_size, SOLTGT_ZONE_MAX * soltgt_size, 0, SOLTGT_ZONE_NAME); - if (soltgt_zone == NULL) + if (soltgt_zone == NULL) { panic("%s: failed allocating soltgt_zone", __func__); + } zone_change(soltgt_zone, Z_EXPAND, TRUE); zone_change(soltgt_zone, Z_CALLERACCT, FALSE); @@ -259,10 +262,11 @@ nd6_ndprl_alloc(int how) ndprl = (how == M_WAITOK) ? zalloc(ndprl_zone) : zalloc_noblock(ndprl_zone); - if (ndprl != NULL) + if (ndprl != NULL) { bzero(ndprl, ndprl_size); + } - return (ndprl); + return ndprl; } static void @@ -321,20 +325,22 @@ nd6_prproxy_prelist_setroute(boolean_t enable, } if ((rt = pr->ndpr_rt) != NULL) { - if ((enable && prproxy) || (!enable && !prproxy)) + if ((enable && prproxy) || (!enable && !prproxy)) { RT_ADDREF(rt); - else + } else { rt = NULL; + } NDPR_UNLOCK(pr); } else { NDPR_UNLOCK(pr); } - /* Call the following ioctl after releasing NDPR lock */ - if (set_allmulti && ifp != NULL) + /* Call the following ioctl after releasing NDPR lock */ + if (set_allmulti && ifp != NULL) { if_allmulti(ifp, allmulti_sw); + } + - NDPR_REMREF(pr); if (rt != NULL) { rt_set_proxy(rt, enable); @@ -374,16 +380,18 @@ nd6_prproxy_prelist_setroute(boolean_t enable, } if ((rt = pr->ndpr_rt) != NULL) { - if ((enable && prproxy) || (!enable && !prproxy)) + if ((enable && prproxy) || (!enable && !prproxy)) { RT_ADDREF(rt); - else + } else { rt = NULL; + } NDPR_UNLOCK(pr); } else { NDPR_UNLOCK(pr); } - if (set_allmulti && ifp != NULL) + if (set_allmulti && ifp != NULL) { if_allmulti(ifp, allmulti_sw); + } NDPR_REMREF(pr); NDPR_REMREF(pr_up); @@ -411,7 +419,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) ifnet_lock_shared(ifp); if (enable && (ifp->if_eflags & IFEF_IPV6_ROUTER)) { ifnet_lock_done(ifp); - return (EBUSY); + return EBUSY; } ifnet_lock_done(ifp); @@ -457,11 +465,12 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) NDPR_UNLOCK(pr); } else { NDPR_UNLOCK(pr); - pr = NULL; /* don't go further */ + pr = NULL; /* don't go further */ } - if (pr == NULL) + if (pr == NULL) { break; + } up = nd6_ndprl_alloc(M_WAITOK); if (up == NULL) { @@ -469,7 +478,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) continue; } - up->ndprl_pr = pr; /* keep reference from above */ + up->ndprl_pr = pr; /* keep reference from above */ SLIST_INSERT_HEAD(&up_head, up, ndprl_le); } @@ -486,7 +495,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) pr = up->ndprl_pr; NDPR_LOCK(pr); - bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof (pr_addr)); + bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof(pr_addr)); pr_len = pr->ndpr_plen; NDPR_UNLOCK(pr); @@ -503,8 +512,9 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) NDPR_UNLOCK(fwd); down = nd6_ndprl_alloc(M_WAITOK); - if (down == NULL) + if (down == NULL) { continue; + } NDPR_ADDREF(fwd); down->ndprl_pr = fwd; @@ -527,7 +537,7 @@ nd6_if_prproxy(struct ifnet *ifp, boolean_t enable) lck_mtx_unlock(&proxy6_lock); - return (0); + return 0; } /* @@ -542,8 +552,9 @@ nd6_prproxy_isours(struct mbuf *m, struct ip6_hdr *ip6, struct route_in6 *ro6, struct rtentry *rt; boolean_t ours = FALSE; - if (ip6->ip6_hlim != IPV6_MAXHLIM || ip6->ip6_nxt != IPPROTO_ICMPV6) + if (ip6->ip6_hlim != IPV6_MAXHLIM || ip6->ip6_nxt != IPPROTO_ICMPV6) { goto done; + } if (IN6_IS_ADDR_MC_NODELOCAL(&ip6->ip6_dst) || IN6_IS_ADDR_MC_LINKLOCAL(&ip6->ip6_dst)) { @@ -554,15 +565,18 @@ nd6_prproxy_isours(struct mbuf *m, struct ip6_hdr *ip6, struct route_in6 *ro6, goto done; } - if (ro6 == NULL) + if (ro6 == NULL) { goto done; + } - if ((rt = ro6->ro_rt) != NULL) + if ((rt = ro6->ro_rt) != NULL) { RT_LOCK(rt); + } if (ROUTE_UNUSABLE(ro6)) { - if (rt != NULL) + if (rt != NULL) { RT_UNLOCK(rt); + } ROUTE_RELEASE(ro6); @@ -571,8 +585,9 @@ nd6_prproxy_isours(struct mbuf *m, struct ip6_hdr *ip6, struct route_in6 *ro6, &ro6->ro_dst.sin6_addr)); rtalloc_scoped_ign((struct route *)ro6, RTF_PRCLONING, ifscope); - if ((rt = ro6->ro_rt) == NULL) + if ((rt = ro6->ro_rt) == NULL) { goto done; + } RT_LOCK(rt); } @@ -581,10 +596,11 @@ nd6_prproxy_isours(struct mbuf *m, struct ip6_hdr *ip6, struct route_in6 *ro6, RT_UNLOCK(rt); done: - if (ours) + if (ours) { m->m_pkthdr.pkt_flags |= PKTF_PROXY_DST; + } - return (ours); + return ours; } /* @@ -643,7 +659,7 @@ nd6_proxy_find_fwdroute(struct ifnet *ifp, struct route_in6 *ro6) } VERIFY(!(pr->ndpr_stateflags & NDPRF_IFSCOPE)); - bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof (pr_addr)); + bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof(pr_addr)); pr_len = pr->ndpr_plen; NDPR_UNLOCK(pr); @@ -689,7 +705,7 @@ nd6_proxy_find_fwdroute(struct ifnet *ifp, struct route_in6 *ro6) "for dst %s\n", if_name(rt->rt_ifp), ip6_sprintf(dst6))); RT_UNLOCK(rt); - ro6->ro_rt = rt; /* refcnt held by rtalloc1 */ + ro6->ro_rt = rt; /* refcnt held by rtalloc1 */ lck_mtx_unlock(rnh_lock); return; } @@ -711,7 +727,7 @@ nd6_proxy_find_fwdroute(struct ifnet *ifp, struct route_in6 *ro6) "route for dst %s\n", if_name(rt->rt_ifp), ip6_sprintf(dst6))); RT_UNLOCK(rt); - ro6->ro_rt = rt; /* refcnt held by rtalloc1 */ + ro6->ro_rt = rt; /* refcnt held by rtalloc1 */ } } VERIFY(rt != NULL || ro6->ro_rt == NULL); @@ -755,7 +771,7 @@ nd6_prproxy_prelist_update(struct nd_prefix *pr_cur, struct nd_prefix *pr_up) if (pr_up == NULL) { NDPR_LOCK(pr_cur); bcopy(&pr_cur->ndpr_prefix.sin6_addr, &pr_addr, - sizeof (pr_addr)); + sizeof(pr_addr)); pr_len = pr_cur->ndpr_plen; NDPR_UNLOCK(pr_cur); @@ -781,7 +797,7 @@ nd6_prproxy_prelist_update(struct nd_prefix *pr_cur, struct nd_prefix *pr_up) } else { NDPR_LOCK(pr_up); bcopy(&pr_up->ndpr_prefix.sin6_addr, &pr_addr, - sizeof (pr_addr)); + sizeof(pr_addr)); pr_len = pr_up->ndpr_plen; } NDPR_LOCK_ASSERT_HELD(pr_up); @@ -822,8 +838,9 @@ nd6_prproxy_prelist_update(struct nd_prefix *pr_cur, struct nd_prefix *pr_up) NDPR_UNLOCK(pr); down = nd6_ndprl_alloc(M_WAITOK); - if (down == NULL) + if (down == NULL) { continue; + } NDPR_ADDREF(pr); down->ndprl_pr = pr; @@ -860,8 +877,8 @@ nd6_prproxy_ifaddr(struct in6_ifaddr *ia) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); IFA_LOCK(&ia->ia_ifa); - bcopy(&ia->ia_addr.sin6_addr, &addr, sizeof (addr)); - bcopy(&ia->ia_prefixmask.sin6_addr, &pr_mask, sizeof (pr_mask)); + bcopy(&ia->ia_addr.sin6_addr, &addr, sizeof(addr)); + bcopy(&ia->ia_prefixmask.sin6_addr, &pr_mask, sizeof(pr_mask)); pr_len = ia->ia_plen; IFA_UNLOCK(&ia->ia_ifa); @@ -880,7 +897,7 @@ nd6_prproxy_ifaddr(struct in6_ifaddr *ia) } lck_mtx_unlock(nd6_mutex); - return (proxied); + return proxied; } /* @@ -907,16 +924,18 @@ nd6_prproxy_ns_output(struct ifnet *ifp, struct ifnet *exclifp, * Ignore excluded interface if it's the same as the original; * we always send a NS on the original interface down below. */ - if (exclifp != NULL && exclifp == ifp) + if (exclifp != NULL && exclifp == ifp) { exclifp = NULL; + } - if (exclifp == NULL) + if (exclifp == NULL) { nd6log2((LOG_DEBUG, "%s: sending NS who has %s on ALL\n", if_name(ifp), ip6_sprintf(taddr))); - else + } else { nd6log2((LOG_DEBUG, "%s: sending NS who has %s on ALL " "(except %s)\n", if_name(ifp), ip6_sprintf(taddr), if_name(exclifp))); + } SLIST_INIT(&ndprl_head); @@ -933,7 +952,7 @@ nd6_prproxy_ns_output(struct ifnet *ifp, struct ifnet *exclifp, } VERIFY(!(pr->ndpr_stateflags & NDPRF_IFSCOPE)); - bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof (pr_addr)); + bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof(pr_addr)); pr_len = pr->ndpr_plen; NDPR_UNLOCK(pr); @@ -952,8 +971,9 @@ nd6_prproxy_ns_output(struct ifnet *ifp, struct ifnet *exclifp, NDPR_UNLOCK(fwd); ndprl = nd6_ndprl_alloc(M_WAITOK); - if (ndprl == NULL) + if (ndprl == NULL) { continue; + } NDPR_ADDREF(fwd); ndprl->ndprl_pr = fwd; @@ -1035,7 +1055,7 @@ nd6_prproxy_ns_input(struct ifnet *ifp, struct in6_addr *saddr, } VERIFY(!(pr->ndpr_stateflags & NDPRF_IFSCOPE)); - bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof (pr_addr)); + bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, sizeof(pr_addr)); pr_len = pr->ndpr_plen; /* @@ -1047,7 +1067,7 @@ nd6_prproxy_ns_input(struct ifnet *ifp, struct in6_addr *saddr, !nd6_solsrc_enq(pr, ifp, saddr, taddr)) { NDPR_UNLOCK(pr); solrec = FALSE; - break; /* bail out */ + break; /* bail out */ } else { NDPR_UNLOCK(pr); } @@ -1067,8 +1087,9 @@ nd6_prproxy_ns_input(struct ifnet *ifp, struct in6_addr *saddr, NDPR_UNLOCK(fwd); ndprl = nd6_ndprl_alloc(M_WAITOK); - if (ndprl == NULL) + if (ndprl == NULL) { continue; + } NDPR_ADDREF(fwd); ndprl->ndprl_pr = fwd; @@ -1173,18 +1194,18 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, */ if (!IN6_IS_ADDR_MULTICAST(daddr0)) { fwd_ifp = NULL; - bzero(&daddr, sizeof (daddr)); + bzero(&daddr, sizeof(daddr)); if (!nd6_solsrc_deq(pr, taddr, &daddr, &fwd_ifp)) { NDPR_UNLOCK(pr); - break; /* bail out */ + break; /* bail out */ } VERIFY(!IN6_IS_ADDR_UNSPECIFIED(&daddr) && fwd_ifp); NDPR_UNLOCK(pr); ndprl = nd6_ndprl_alloc(M_WAITOK); - if (ndprl == NULL) - break; /* bail out */ - + if (ndprl == NULL) { + break; /* bail out */ + } ndprl->ndprl_fwd_ifp = fwd_ifp; ndprl->ndprl_sol = TRUE; ndprl->ndprl_sol_saddr = *(&daddr); @@ -1196,7 +1217,7 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, u_char pr_len; bcopy(&pr->ndpr_prefix.sin6_addr, &pr_addr, - sizeof (pr_addr)); + sizeof(pr_addr)); pr_len = pr->ndpr_plen; NDPR_UNLOCK(pr); @@ -1207,8 +1228,8 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, fwd->ndpr_ifp == ifp || fwd->ndpr_plen != pr_len || !in6_are_prefix_equal( - &fwd->ndpr_prefix.sin6_addr, - &pr_addr, pr_len)) { + &fwd->ndpr_prefix.sin6_addr, + &pr_addr, pr_len)) { NDPR_UNLOCK(fwd); continue; } @@ -1217,8 +1238,9 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, NDPR_UNLOCK(fwd); ndprl = nd6_ndprl_alloc(M_WAITOK); - if (ndprl == NULL) + if (ndprl == NULL) { continue; + } NDPR_ADDREF(fwd); ndprl->ndprl_pr = fwd; @@ -1275,8 +1297,9 @@ nd6_prproxy_na_input(struct ifnet *ifp, struct in6_addr *saddr, nd6_na_output(fwd_ifp, &daddr, taddr, flags, 1, NULL); } - if (pr != NULL) + if (pr != NULL) { NDPR_REMREF(pr); + } nd6_ndprl_free(ndprl); } @@ -1290,10 +1313,11 @@ nd6_solsrc_alloc(int how) ssrc = (how == M_WAITOK) ? zalloc(solsrc_zone) : zalloc_noblock(solsrc_zone); - if (ssrc != NULL) + if (ssrc != NULL) { bzero(ssrc, solsrc_size); + } - return (ssrc); + return ssrc; } static void @@ -1336,8 +1360,9 @@ nd6_prproxy_sols_purge(struct nd_prefix *pr, u_int64_t max_stgt) pr->ndpr_prproxy_sols_cnt--; RB_REMOVE(prproxy_sols_tree, &pr->ndpr_prproxy_sols, soltgt); nd6_soltgt_free(soltgt); - if (pr->ndpr_prproxy_sols_cnt < max_stgt) + if (pr->ndpr_prproxy_sols_cnt < max_stgt) { break; + } } } @@ -1375,18 +1400,19 @@ nd6_solsrc_enq(struct nd_prefix *pr, struct ifnet *ifp, NDPR_LOCK_ASSERT_HELD(pr); VERIFY(!(pr->ndpr_stateflags & NDPRF_IFSCOPE)); - VERIFY((pr->ndpr_stateflags & (NDPRF_ONLINK|NDPRF_PRPROXY)) == - (NDPRF_ONLINK|NDPRF_PRPROXY)); + VERIFY((pr->ndpr_stateflags & (NDPRF_ONLINK | NDPRF_PRPROXY)) == + (NDPRF_ONLINK | NDPRF_PRPROXY)); VERIFY(!IN6_IS_ADDR_UNSPECIFIED(saddr)); ssrc = nd6_solsrc_alloc(M_WAITOK); - if (ssrc == NULL) - return (FALSE); + if (ssrc == NULL) { + return FALSE; + } ssrc->solsrc_saddr = *saddr; ssrc->solsrc_ifp = ifp; - find.soltgt_key.taddr = *taddr; /* search key */ + find.soltgt_key.taddr = *taddr; /* search key */ soltgt = RB_FIND(prproxy_sols_tree, &pr->ndpr_prproxy_sols, &find); if (soltgt == NULL) { @@ -1399,7 +1425,7 @@ nd6_solsrc_enq(struct nd_prefix *pr, struct ifnet *ifp, soltgt = nd6_soltgt_alloc(M_WAITOK); if (soltgt == NULL) { nd6_solsrc_free(ssrc); - return (FALSE); + return FALSE; } soltgt->soltgt_key.taddr = *taddr; @@ -1420,10 +1446,11 @@ nd6_solsrc_enq(struct nd_prefix *pr, struct ifnet *ifp, soltgt->soltgt_cnt++; VERIFY(soltgt->soltgt_cnt != 0); TAILQ_INSERT_TAIL(&soltgt->soltgt_q, ssrc, solsrc_tqe); - if (soltgt->soltgt_cnt == 1) + if (soltgt->soltgt_cnt == 1) { soltgt->soltgt_expire = net_uptime() + ND6_TGT_SOLS_EXPIRE; + } - return (TRUE); + return TRUE; } /* @@ -1438,18 +1465,18 @@ nd6_solsrc_deq(struct nd_prefix *pr, struct in6_addr *taddr, NDPR_LOCK_ASSERT_HELD(pr); VERIFY(!(pr->ndpr_stateflags & NDPRF_IFSCOPE)); - VERIFY((pr->ndpr_stateflags & (NDPRF_ONLINK|NDPRF_PRPROXY)) == - (NDPRF_ONLINK|NDPRF_PRPROXY)); + VERIFY((pr->ndpr_stateflags & (NDPRF_ONLINK | NDPRF_PRPROXY)) == + (NDPRF_ONLINK | NDPRF_PRPROXY)); - bzero(daddr, sizeof (*daddr)); + bzero(daddr, sizeof(*daddr)); *ifp = NULL; - find.soltgt_key.taddr = *taddr; /* search key */ + find.soltgt_key.taddr = *taddr; /* search key */ soltgt = RB_FIND(prproxy_sols_tree, &pr->ndpr_prproxy_sols, &find); if (soltgt == NULL || soltgt->soltgt_cnt == 0) { VERIFY(soltgt == NULL || TAILQ_EMPTY(&soltgt->soltgt_q)); - return (FALSE); + return FALSE; } VERIFY(soltgt->soltgt_cnt != 0); @@ -1461,7 +1488,7 @@ nd6_solsrc_deq(struct nd_prefix *pr, struct in6_addr *taddr, *ifp = ssrc->solsrc_ifp; nd6_solsrc_free(ssrc); - return (TRUE); + return TRUE; } static struct nd6_prproxy_soltgt * @@ -1475,7 +1502,7 @@ nd6_soltgt_alloc(int how) bzero(soltgt, soltgt_size); TAILQ_INIT(&soltgt->soltgt_q); } - return (soltgt); + return soltgt; } static void @@ -1521,5 +1548,5 @@ static __inline int soltgt_cmp(const struct nd6_prproxy_soltgt *a, const struct nd6_prproxy_soltgt *b) { - return (memcmp(&a->soltgt_key, &b->soltgt_key, sizeof (a->soltgt_key))); + return memcmp(&a->soltgt_key, &b->soltgt_key, sizeof(a->soltgt_key)); } diff --git a/bsd/netinet6/nd6_rtr.c b/bsd/netinet6/nd6_rtr.c index 453eec269..53259c5ea 100644 --- a/bsd/netinet6/nd6_rtr.c +++ b/bsd/netinet6/nd6_rtr.c @@ -101,7 +101,7 @@ static struct nd_defrouter *defrtrlist_update_common(struct nd_defrouter *, static struct nd_defrouter *defrtrlist_update(struct nd_defrouter *); static struct nd_pfxrouter *pfxrtr_lookup(struct nd_prefix *, - struct nd_defrouter *); + struct nd_defrouter *); static void pfxrtr_add(struct nd_prefix *, struct nd_defrouter *); static void pfxrtr_del(struct nd_pfxrouter *, struct nd_prefix *); static struct nd_pfxrouter *find_pfxlist_reachable_router(struct nd_prefix *); @@ -158,72 +158,72 @@ static void *nd_defrouter_waitchan = &nd_defrouter_busy; static int nd_defrouter_waiters = 0; /* RTPREF_MEDIUM has to be 0! */ -#define RTPREF_HIGH 1 -#define RTPREF_MEDIUM 0 -#define RTPREF_LOW (-1) -#define RTPREF_RESERVED (-2) -#define RTPREF_INVALID (-3) /* internal */ +#define RTPREF_HIGH 1 +#define RTPREF_MEDIUM 0 +#define RTPREF_LOW (-1) +#define RTPREF_RESERVED (-2) +#define RTPREF_INVALID (-3) /* internal */ -#define NDPR_TRACE_HIST_SIZE 32 /* size of trace history */ +#define NDPR_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int ndpr_trace_hist_size = NDPR_TRACE_HIST_SIZE; struct nd_prefix_dbg { - struct nd_prefix ndpr_pr; /* nd_prefix */ - u_int16_t ndpr_refhold_cnt; /* # of ref */ - u_int16_t ndpr_refrele_cnt; /* # of rele */ + struct nd_prefix ndpr_pr; /* nd_prefix */ + u_int16_t ndpr_refhold_cnt; /* # of ref */ + u_int16_t ndpr_refrele_cnt; /* # of rele */ /* * Circular lists of ndpr_addref and ndpr_remref callers. */ - ctrace_t ndpr_refhold[NDPR_TRACE_HIST_SIZE]; - ctrace_t ndpr_refrele[NDPR_TRACE_HIST_SIZE]; + ctrace_t ndpr_refhold[NDPR_TRACE_HIST_SIZE]; + ctrace_t ndpr_refrele[NDPR_TRACE_HIST_SIZE]; }; -static unsigned int ndpr_debug; /* debug flags */ -static unsigned int ndpr_size; /* size of zone element */ -static struct zone *ndpr_zone; /* zone for nd_prefix */ +static unsigned int ndpr_debug; /* debug flags */ +static unsigned int ndpr_size; /* size of zone element */ +static struct zone *ndpr_zone; /* zone for nd_prefix */ -#define NDPR_ZONE_MAX 64 /* maximum elements in zone */ -#define NDPR_ZONE_NAME "nd6_prefix" /* zone name */ +#define NDPR_ZONE_MAX 64 /* maximum elements in zone */ +#define NDPR_ZONE_NAME "nd6_prefix" /* zone name */ -#define NDDR_TRACE_HIST_SIZE 32 /* size of trace history */ +#define NDDR_TRACE_HIST_SIZE 32 /* size of trace history */ /* For gdb */ __private_extern__ unsigned int nddr_trace_hist_size = NDDR_TRACE_HIST_SIZE; struct nd_defrouter_dbg { - struct nd_defrouter nddr_dr; /* nd_defrouter */ - uint16_t nddr_refhold_cnt; /* # of ref */ - uint16_t nddr_refrele_cnt; /* # of rele */ + struct nd_defrouter nddr_dr; /* nd_defrouter */ + uint16_t nddr_refhold_cnt; /* # of ref */ + uint16_t nddr_refrele_cnt; /* # of rele */ /* * Circular lists of ndpr_addref and ndpr_remref callers. */ - ctrace_t nddr_refhold[NDDR_TRACE_HIST_SIZE]; - ctrace_t nddr_refrele[NDDR_TRACE_HIST_SIZE]; + ctrace_t nddr_refhold[NDDR_TRACE_HIST_SIZE]; + ctrace_t nddr_refrele[NDDR_TRACE_HIST_SIZE]; }; -static unsigned int nddr_debug; /* debug flags */ -static unsigned int nddr_size; /* size of zone element */ -static struct zone *nddr_zone; /* zone for nd_defrouter */ +static unsigned int nddr_debug; /* debug flags */ +static unsigned int nddr_size; /* size of zone element */ +static struct zone *nddr_zone; /* zone for nd_defrouter */ -#define NDDR_ZONE_MAX 64 /* maximum elements in zone */ -#define NDDR_ZONE_NAME "nd6_defrouter" /* zone name */ +#define NDDR_ZONE_MAX 64 /* maximum elements in zone */ +#define NDDR_ZONE_NAME "nd6_defrouter" /* zone name */ -static unsigned int ndprtr_size; /* size of zone element */ -static struct zone *ndprtr_zone; /* zone for nd_pfxrouter */ +static unsigned int ndprtr_size; /* size of zone element */ +static struct zone *ndprtr_zone; /* zone for nd_pfxrouter */ -#define NDPRTR_ZONE_MAX 64 /* maximum elements in zone */ -#define NDPRTR_ZONE_NAME "nd6_pfxrouter" /* zone name */ +#define NDPRTR_ZONE_MAX 64 /* maximum elements in zone */ +#define NDPRTR_ZONE_NAME "nd6_pfxrouter" /* zone name */ void nd6_rtr_init(void) { - PE_parse_boot_argn("ifa_debug", &ndpr_debug, sizeof (ndpr_debug)); - PE_parse_boot_argn("ifa_debug", &nddr_debug, sizeof (nddr_debug)); + PE_parse_boot_argn("ifa_debug", &ndpr_debug, sizeof(ndpr_debug)); + PE_parse_boot_argn("ifa_debug", &nddr_debug, sizeof(nddr_debug)); - ndpr_size = (ndpr_debug == 0) ? sizeof (struct nd_prefix) : - sizeof (struct nd_prefix_dbg); + ndpr_size = (ndpr_debug == 0) ? sizeof(struct nd_prefix) : + sizeof(struct nd_prefix_dbg); ndpr_zone = zinit(ndpr_size, NDPR_ZONE_MAX * ndpr_size, 0, NDPR_ZONE_NAME); if (ndpr_zone == NULL) { @@ -233,8 +233,8 @@ nd6_rtr_init(void) zone_change(ndpr_zone, Z_EXPAND, TRUE); zone_change(ndpr_zone, Z_CALLERACCT, FALSE); - nddr_size = (nddr_debug == 0) ? sizeof (struct nd_defrouter) : - sizeof (struct nd_defrouter_dbg); + nddr_size = (nddr_debug == 0) ? sizeof(struct nd_defrouter) : + sizeof(struct nd_defrouter_dbg); nddr_zone = zinit(nddr_size, NDDR_ZONE_MAX * nddr_size, 0, NDDR_ZONE_NAME); if (nddr_zone == NULL) { @@ -244,7 +244,7 @@ nd6_rtr_init(void) zone_change(nddr_zone, Z_EXPAND, TRUE); zone_change(nddr_zone, Z_CALLERACCT, FALSE); - ndprtr_size = sizeof (struct nd_pfxrouter); + ndprtr_size = sizeof(struct nd_pfxrouter); ndprtr_zone = zinit(ndprtr_size, NDPRTR_ZONE_MAX * ndprtr_size, 0, NDPRTR_ZONE_NAME); if (ndprtr_zone == NULL) { @@ -264,7 +264,7 @@ nd6_rtr_init(void) */ void nd6_rs_input( - struct mbuf *m, + struct mbuf *m, int off, int icmp6len) { @@ -280,8 +280,9 @@ nd6_rs_input( MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); /* If I'm not a router, ignore it. */ - if (!ip6_forwarding || !(ifp->if_eflags & IFEF_IPV6_ROUTER)) + if (!ip6_forwarding || !(ifp->if_eflags & IFEF_IPV6_ROUTER)) { goto freeit; + } /* Sanity checks */ if (ip6->ip6_hlim != 255) { @@ -302,20 +303,20 @@ nd6_rs_input( } else { struct sockaddr_in6 src_sa6; - bzero(&src_sa6, sizeof (src_sa6)); + bzero(&src_sa6, sizeof(src_sa6)); src_sa6.sin6_family = AF_INET6; - src_sa6.sin6_len = sizeof (src_sa6); + src_sa6.sin6_len = sizeof(src_sa6); src_sa6.sin6_addr = ip6->ip6_src; if (!nd6_is_addr_neighbor(&src_sa6, ifp, 0)) { nd6log((LOG_INFO, "nd6_rs_input: " - "RS packet from non-neighbor\n")); + "RS packet from non-neighbor\n")); goto freeit; } } - IP6_EXTHDR_CHECK(m, off, icmp6len, return); + IP6_EXTHDR_CHECK(m, off, icmp6len, return ); nd_rs = (struct nd_router_solicit *)((caddr_t)ip6 + off); - icmp6len -= sizeof (*nd_rs); + icmp6len -= sizeof(*nd_rs); nd6_option_init(nd_rs + 1, icmp6len, &ndopts); if (nd6_options(&ndopts) < 0) { nd6log((LOG_INFO, @@ -333,7 +334,7 @@ nd6_rs_input( nd6log((LOG_INFO, "nd6_rs_input: lladdrlen mismatch for %s " "(if %d, RS packet %d)\n", - ip6_sprintf(&saddr6), ifp->if_addrlen, lladdrlen - 2)); + ip6_sprintf(&saddr6), ifp->if_addrlen, lladdrlen - 2)); goto bad; } @@ -357,7 +358,7 @@ bad: */ void nd6_ra_input( - struct mbuf *m, + struct mbuf *m, int off, int icmp6len) { @@ -380,8 +381,9 @@ nd6_ra_input( u_int32_t advreachable; #if (DEVELOPMENT || DEBUG) - if (ip6_accept_rtadv == 0) + if (ip6_accept_rtadv == 0) { goto freeit; + } #endif /* (DEVELOPMENT || DEBUG) */ /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); @@ -398,8 +400,9 @@ nd6_ra_input( */ if (!(ifp->if_eflags & (IFEF_ACCEPT_RTADV | IFEF_IPV6_ROUTER)) || ((ifp->if_eflags & IFEF_IPV6_ROUTER) && - (ia6 = ifa_foraddr6(&saddr6)) == NULL)) + (ia6 = ifa_foraddr6(&saddr6)) == NULL)) { goto freeit; + } if (ia6 != NULL) { IFA_REMREF(&ia6->ia_ifa); @@ -421,10 +424,10 @@ nd6_ra_input( goto bad; } - IP6_EXTHDR_CHECK(m, off, icmp6len, return); + IP6_EXTHDR_CHECK(m, off, icmp6len, return ); nd_ra = (struct nd_router_advert *)((caddr_t)ip6 + off); - icmp6len -= sizeof (*nd_ra); + icmp6len -= sizeof(*nd_ra); nd6_option_init(nd_ra + 1, icmp6len, &ndopts); if (nd6_options(&ndopts) < 0) { nd6log((LOG_INFO, @@ -436,13 +439,14 @@ nd6_ra_input( advreachable = nd_ra->nd_ra_reachable; /* remember if this is a multicasted advertisement */ - if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) + if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) { mcast = 1; + } ndi = ND_IFINFO(ifp); VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); lck_mtx_lock(&ndi->lock); - bzero(&dr0, sizeof (dr0)); + bzero(&dr0, sizeof(dr0)); dr0.rtaddr = saddr6; dr0.flags = nd_ra->nd_ra_flags_reserved; dr0.rtlifetime = ntohs(nd_ra->nd_ra_router_lifetime); @@ -458,8 +462,9 @@ nd6_ra_input( ndi->recalctm = nd6_recalc_reachtm_interval; /* reset */ } } - if (nd_ra->nd_ra_retransmit) + if (nd_ra->nd_ra_retransmit) { ndi->retrans = ntohl(nd_ra->nd_ra_retransmit); + } if (nd_ra->nd_ra_curhoplimit) { if (ndi->chlim < nd_ra->nd_ra_curhoplimit) { ndi->chlim = nd_ra->nd_ra_curhoplimit; @@ -492,8 +497,9 @@ nd6_ra_input( struct in6_addr pi_mask; bzero(&pi_mask, sizeof(pi_mask)); - if (pt->nd_opt_type != ND_OPT_PREFIX_INFORMATION) + if (pt->nd_opt_type != ND_OPT_PREFIX_INFORMATION) { continue; + } pi = (struct nd_opt_prefix_info *)pt; if (pi->nd_opt_pi_len != 4) { @@ -532,11 +538,11 @@ nd6_ra_input( continue; } - bzero(&pr, sizeof (pr)); + bzero(&pr, sizeof(pr)); lck_mtx_init(&pr.ndpr_lock, ifa_mtx_grp, ifa_mtx_attr); NDPR_LOCK(&pr); pr.ndpr_prefix.sin6_family = AF_INET6; - pr.ndpr_prefix.sin6_len = sizeof (pr.ndpr_prefix); + pr.ndpr_prefix.sin6_len = sizeof(pr.ndpr_prefix); pr.ndpr_prefix.sin6_addr = pi->nd_opt_pi_prefix; pr.ndpr_ifp = m->m_pkthdr.rcvif; @@ -547,7 +553,7 @@ nd6_ra_input( pr.ndpr_plen = pi->nd_opt_pi_prefix_len; pr.ndpr_vltime = ntohl(pi->nd_opt_pi_valid_time); pr.ndpr_pltime = - ntohl(pi->nd_opt_pi_preferred_time); + ntohl(pi->nd_opt_pi_preferred_time); /* * Exceptions to stateless autoconfiguration processing: @@ -588,7 +594,7 @@ nd6_ra_input( * updating can vet the values. */ prfl = NULL; - MALLOC(prfl, struct nd_prefix_list *, sizeof (*prfl), + MALLOC(prfl, struct nd_prefix_list *, sizeof(*prfl), M_TEMP, M_WAITOK | M_ZERO); if (prfl == NULL) { @@ -599,7 +605,7 @@ nd6_ra_input( /* this is only for nd6_post_msg(), otherwise unused */ bcopy(&pr.ndpr_prefix, &prfl->pr.ndpr_prefix, - sizeof (prfl->pr.ndpr_prefix)); + sizeof(prfl->pr.ndpr_prefix)); prfl->pr.ndpr_raf = pr.ndpr_raf; prfl->pr.ndpr_plen = pr.ndpr_plen; prfl->pr.ndpr_vltime = pr.ndpr_vltime; @@ -640,8 +646,9 @@ nd6_ra_input( ndi->linkmtu = mtu; lck_mtx_unlock(&ndi->lock); - if (change) /* in6_maxmtu may change */ + if (change) { /* in6_maxmtu may change */ in6_setmaxmtu(); + } } else { nd6log((LOG_INFO, "nd6_ra_input: bogus mtu " "mtu=%d sent from %s; " @@ -673,12 +680,13 @@ skip: nd6log((LOG_INFO, "nd6_ra_input: lladdrlen mismatch for %s " "(if %d, RA packet %d)\n", - ip6_sprintf(&saddr6), ifp->if_addrlen, lladdrlen - 2)); + ip6_sprintf(&saddr6), ifp->if_addrlen, lladdrlen - 2)); goto bad; } - if (dr && dr->stateflags & NDDRF_MAPPED) + if (dr && dr->stateflags & NDDRF_MAPPED) { saddr6 = dr->rtaddr_mapped; + } nd6_cache_lladdr(ifp, &saddr6, lladdr, (int)lladdrlen, ND_ROUTER_ADVERT, 0); @@ -698,8 +706,9 @@ skip: freeit: m_freem(m); - if (dr) + if (dr) { NDDR_REMREF(dr); + } prfl = NULL; while ((prfl = nd_prefix_list_head) != NULL) { @@ -727,7 +736,7 @@ nd6_rtmsg(int cmd, struct rtentry *rt) RT_LOCK_ASSERT_HELD(rt); - bzero((caddr_t)&info, sizeof (info)); + bzero((caddr_t)&info, sizeof(info)); /* It's not necessary to lock ifp for if_lladdr */ info.rti_info[RTAX_DST] = rt_key(rt); info.rti_info[RTAX_GATEWAY] = rt->rt_gateway; @@ -754,13 +763,14 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); NDDR_LOCK_ASSERT_NOTHELD(new); /* - * We're free to lock and unlock NDDR because our callers + * We're free to lock and unlock NDDR because our callers * are holding an extra reference for us. */ NDDR_LOCK(new); - if (new->stateflags & NDDRF_INSTALLED) + if (new->stateflags & NDDRF_INSTALLED) { goto out; + } if (new->ifp->if_eflags & IFEF_IPV6_ROUTER) { nd6log2((LOG_INFO, "%s: ignoring router %s, scoped=%d, " @@ -774,18 +784,19 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) "static=%d\n", if_name(new->ifp), ip6_sprintf(&new->rtaddr), scoped, (new->stateflags & NDDRF_STATIC) ? 1 : 0)); - Bzero(&def, sizeof (def)); - Bzero(&mask, sizeof (mask)); - Bzero(&gate, sizeof (gate)); + Bzero(&def, sizeof(def)); + Bzero(&mask, sizeof(mask)); + Bzero(&gate, sizeof(gate)); def.sin6_len = mask.sin6_len = gate.sin6_len - = sizeof (struct sockaddr_in6); + = sizeof(struct sockaddr_in6); def.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; - if (new->stateflags & NDDRF_MAPPED) + if (new->stateflags & NDDRF_MAPPED) { gate.sin6_addr = new->rtaddr_mapped; - else + } else { gate.sin6_addr = new->rtaddr; + } ifscope = scoped ? new->ifp->if_index : IFSCOPE_NONE; NDDR_UNLOCK(new); @@ -810,7 +821,7 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) if (tmp_ia6 != NULL && !(tmp_ia6->ia6_flags & IN6_IFF_NOTMANUAL) && IN6_ARE_ADDR_EQUAL(&tmp_ia6->ia_addr.sin6_addr, - &gate.sin6_addr)) { + &gate.sin6_addr)) { gate.sin6_addr.s6_addr8[15] += 1; new->rtaddr_mapped = gate.sin6_addr; new->stateflags |= NDDRF_MAPPED; @@ -827,13 +838,14 @@ defrouter_addreq(struct nd_defrouter *new, boolean_t scoped) if (newrt) { RT_LOCK(newrt); - nd6_rtmsg(RTM_ADD, newrt); /* tell user process */ + nd6_rtmsg(RTM_ADD, newrt); /* tell user process */ RT_REMREF_LOCKED(newrt); RT_UNLOCK(newrt); NDDR_LOCK(new); new->stateflags |= NDDRF_INSTALLED; - if (ifscope != IFSCOPE_NONE) + if (ifscope != IFSCOPE_NONE) { new->stateflags |= NDDRF_IFSCOPE; + } } else { nd6log((LOG_ERR, "%s: failed to add default router " "%s on %s scoped %d (errno = %d)\n", __func__, @@ -862,12 +874,12 @@ defrouter_lookup( if (dr->ifp == ifp && IN6_ARE_ADDR_EQUAL(addr, &dr->rtaddr)) { NDDR_ADDREF_LOCKED(dr); NDDR_UNLOCK(dr); - return (dr); + return dr; } NDDR_UNLOCK(dr); } - return (NULL); /* search failed */ + return NULL; /* search failed */ } /* @@ -886,25 +898,26 @@ defrouter_delreq(struct nd_defrouter *dr) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_NOTOWNED); NDDR_LOCK_ASSERT_NOTHELD(dr); /* - * We're free to lock and unlock NDDR because our callers + * We're free to lock and unlock NDDR because our callers * are holding an extra reference for us. */ NDDR_LOCK(dr); /* ifp would be NULL for the "drany" case */ - if (dr->ifp != NULL && !(dr->stateflags & NDDRF_INSTALLED)) + if (dr->ifp != NULL && !(dr->stateflags & NDDRF_INSTALLED)) { goto out; + } nd6log2((LOG_INFO, "%s: removing default router %s, scoped=%d, " "static=%d\n", dr->ifp != NULL ? if_name(dr->ifp) : "ANY", ip6_sprintf(&dr->rtaddr), (dr->stateflags & NDDRF_IFSCOPE) ? 1 : 0, (dr->stateflags & NDDRF_STATIC) ? 1 : 0)); - Bzero(&def, sizeof (def)); - Bzero(&mask, sizeof (mask)); - Bzero(&gate, sizeof (gate)); + Bzero(&def, sizeof(def)); + Bzero(&mask, sizeof(mask)); + Bzero(&gate, sizeof(gate)); def.sin6_len = mask.sin6_len = gate.sin6_len - = sizeof (struct sockaddr_in6); + = sizeof(struct sockaddr_in6); def.sin6_family = mask.sin6_family = gate.sin6_family = AF_INET6; /* @@ -914,10 +927,11 @@ defrouter_delreq(struct nd_defrouter *dr) * To get more context, read the related comment in * defrouter_addreq */ - if (dr->stateflags & NDDRF_MAPPED) + if (dr->stateflags & NDDRF_MAPPED) { gate.sin6_addr = dr->rtaddr_mapped; - else + } else { gate.sin6_addr = dr->rtaddr; + } if (dr->ifp != NULL) { ifscope = (dr->stateflags & NDDRF_IFSCOPE) ? @@ -946,8 +960,9 @@ defrouter_delreq(struct nd_defrouter *dr) /* ESRCH means it's no longer in the routing table; ignore it */ if (oldrt != NULL || err == ESRCH) { dr->stateflags &= ~NDDRF_INSTALLED; - if (ifscope != IFSCOPE_NONE) + if (ifscope != IFSCOPE_NONE) { dr->stateflags &= ~NDDRF_IFSCOPE; + } } dr->err = 0; out: @@ -983,7 +998,7 @@ defrouter_reset(void) } /* Nuke primary (non-scoped) default router */ - bzero(&drany, sizeof (drany)); + bzero(&drany, sizeof(drany)); lck_mtx_init(&drany.nddr_lock, ifa_mtx_grp, ifa_mtx_attr); lck_mtx_unlock(nd6_mutex); defrouter_delreq(&drany); @@ -1001,22 +1016,22 @@ defrtrlist_ioctl(u_long cmd, caddr_t data) /* XXX Handle mapped default router entries */ switch (cmd) { - case SIOCDRADD_IN6_32: /* struct in6_defrouter_32 */ - case SIOCDRADD_IN6_64: /* struct in6_defrouter_64 */ + case SIOCDRADD_IN6_32: /* struct in6_defrouter_32 */ + case SIOCDRADD_IN6_64: /* struct in6_defrouter_64 */ ++add; - /* FALLTHRU */ - case SIOCDRDEL_IN6_32: /* struct in6_defrouter_32 */ - case SIOCDRDEL_IN6_64: /* struct in6_defrouter_64 */ - bzero(&dr0, sizeof (dr0)); + /* FALLTHRU */ + case SIOCDRDEL_IN6_32: /* struct in6_defrouter_32 */ + case SIOCDRDEL_IN6_64: /* struct in6_defrouter_64 */ + bzero(&dr0, sizeof(dr0)); if (cmd == SIOCDRADD_IN6_64 || cmd == SIOCDRDEL_IN6_64) { struct in6_defrouter_64 *r_64 = (struct in6_defrouter_64 *)(void *)data; u_int16_t i; bcopy(&r_64->rtaddr.sin6_addr, &dr0.rtaddr, - sizeof (dr0.rtaddr)); + sizeof(dr0.rtaddr)); dr0.flags = r_64->flags; - bcopy(&r_64->if_index, &i, sizeof (i)); + bcopy(&r_64->if_index, &i, sizeof(i)); ifindex = i; } else { struct in6_defrouter_32 *r_32 = @@ -1024,9 +1039,9 @@ defrtrlist_ioctl(u_long cmd, caddr_t data) u_int16_t i; bcopy(&r_32->rtaddr.sin6_addr, &dr0.rtaddr, - sizeof (dr0.rtaddr)); + sizeof(dr0.rtaddr)); dr0.flags = r_32->flags; - bcopy(&r_32->if_index, &i, sizeof (i)); + bcopy(&r_32->if_index, &i, sizeof(i)); ifindex = i; } ifnet_head_lock_shared(); @@ -1057,12 +1072,14 @@ defrtrlist_ioctl(u_long cmd, caddr_t data) } } - if (add) + if (add) { error = defrtrlist_add_static(&dr0); + } if (!add || error != 0) { int err = defrtrlist_del_static(&dr0); - if (!add) + if (!add) { error = err; + } } break; @@ -1071,7 +1088,7 @@ defrtrlist_ioctl(u_long cmd, caddr_t data) break; } - return (error); + return error; } /* @@ -1105,7 +1122,7 @@ defrtrlist_del(struct nd_defrouter *dr) * for development builds. */ TAILQ_FOREACH(dr_itr, &nd_defrouter, dr_entry) - VERIFY(dr != dr_itr); + VERIFY(dr != dr_itr); #endif ++nd6_defrouter_genid; /* @@ -1115,10 +1132,11 @@ defrtrlist_del(struct nd_defrouter *dr) /* above is a good condition? */ NDDR_ADDREF(dr); lck_mtx_unlock(nd6_mutex); - if (dr->stateflags & NDDRF_MAPPED) + if (dr->stateflags & NDDRF_MAPPED) { rt6_flush(&dr->rtaddr_mapped, ifp); - else + } else { rt6_flush(&dr->rtaddr, ifp); + } lck_mtx_lock(nd6_mutex); NDDR_REMREF(dr); @@ -1140,8 +1158,9 @@ defrtrlist_del(struct nd_defrouter *dr) struct nd_pfxrouter *pfxrtr; NDPR_LOCK(pr); - if ((pfxrtr = pfxrtr_lookup(pr, dr)) != NULL) + if ((pfxrtr = pfxrtr_lookup(pr, dr)) != NULL) { pfxrtr_del(pfxrtr, pr); + } NDPR_UNLOCK(pr); } @@ -1165,8 +1184,9 @@ defrtrlist_del(struct nd_defrouter *dr) */ defrouter_select(ifp); - if (resetmtu) + if (resetmtu) { nd6_setmtu(ifp); + } } int @@ -1186,19 +1206,22 @@ defrtrlist_add_static(struct nd_defrouter *new) if (dr != NULL && !(dr->stateflags & NDDRF_STATIC)) { err = EINVAL; } else { - if (dr != NULL) + if (dr != NULL) { NDDR_REMREF(dr); + } dr = defrtrlist_update(new); - if (dr != NULL) + if (dr != NULL) { err = dr->err; - else + } else { err = ENOMEM; + } } - if (dr != NULL) + if (dr != NULL) { NDDR_REMREF(dr); + } lck_mtx_unlock(nd6_mutex); - return (err); + return err; } int @@ -1209,8 +1232,9 @@ defrtrlist_del_static(struct nd_defrouter *new) lck_mtx_lock(nd6_mutex); dr = defrouter_lookup(&new->rtaddr, new->ifp); if (dr == NULL || !(dr->stateflags & NDDRF_STATIC)) { - if (dr != NULL) + if (dr != NULL) { NDDR_REMREF(dr); + } dr = NULL; } else { TAILQ_REMOVE(&nd_defrouter, dr, dr_entry); @@ -1220,7 +1244,7 @@ defrtrlist_del_static(struct nd_defrouter *new) } lck_mtx_unlock(nd6_mutex); - return (dr != NULL ? 0 : EINVAL); + return dr != NULL ? 0 : EINVAL; } /* @@ -1232,12 +1256,12 @@ rtpref(struct nd_defrouter *dr) { switch (dr->flags & ND_RA_FLAG_RTPREF_MASK) { case ND_RA_FLAG_RTPREF_HIGH: - return (RTPREF_HIGH); + return RTPREF_HIGH; case ND_RA_FLAG_RTPREF_MEDIUM: case ND_RA_FLAG_RTPREF_RSV: - return (RTPREF_MEDIUM); + return RTPREF_MEDIUM; case ND_RA_FLAG_RTPREF_LOW: - return (RTPREF_LOW); + return RTPREF_LOW; default: /* * This case should never happen. If it did, it would mean a @@ -1245,7 +1269,7 @@ rtpref(struct nd_defrouter *dr) * Or, can we even panic? */ log(LOG_ERR, "rtpref: impossible RA flag %x\n", dr->flags); - return (RTPREF_INVALID); + return RTPREF_INVALID; } /* NOTREACHED */ } @@ -1356,7 +1380,7 @@ defrouter_select(struct ifnet *ifp) */ while (nd_defrouter_busy) { nd_defrouter_waiters++; - msleep(nd_defrouter_waitchan, nd6_mutex, (PZERO-1), + msleep(nd_defrouter_waitchan, nd6_mutex, (PZERO - 1), __func__, NULL); LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); } @@ -1401,19 +1425,21 @@ defrouter_select(struct ifnet *ifp) __func__, __LINE__, if_name(ifp))); VERIFY(selected_dr == NULL && installed_dr == NULL); selected_dr = dr; - if (dr->stateflags & NDDRF_INSTALLED) + if (dr->stateflags & NDDRF_INSTALLED) { installed_dr = dr; + } NDDR_ADDREF_LOCKED(selected_dr); NDDR_UNLOCK(dr); goto install_route; } - if (dr->stateflags & NDDRF_MAPPED) + if (dr->stateflags & NDDRF_MAPPED) { rtaddr = dr->rtaddr_mapped; - else + } else { rtaddr = dr->rtaddr; + } - NDDR_ADDREF_LOCKED(dr); /* for this for loop */ + NDDR_ADDREF_LOCKED(dr); /* for this for loop */ NDDR_UNLOCK(dr); /* Callee returns a locked route upon success */ @@ -1446,8 +1472,8 @@ defrouter_select(struct ifnet *ifp) */ NDDR_LOCK(dr); if (((selected_dr && (rtpref(dr) >= rtpref(selected_dr)) && - !(selected_dr->stateflags & NDDRF_STATIC)) || - (selected_dr == NULL)) && + !(selected_dr->stateflags & NDDRF_STATIC)) || + (selected_dr == NULL)) && (dr->stateflags & NDDRF_STATIC)) { if (selected_dr) { /* Release it later on */ @@ -1463,18 +1489,20 @@ defrouter_select(struct ifnet *ifp) if (installed_dr == NULL) { installed_dr = dr; NDDR_ADDREF_LOCKED(installed_dr); - if (dr->stateflags & NDDRF_MAPPED) + if (dr->stateflags & NDDRF_MAPPED) { rtaddr = installed_dr->rtaddr_mapped; - else + } else { rtaddr = installed_dr->rtaddr; + } NDDR_UNLOCK(dr); lck_mtx_unlock(nd6_mutex); /* Callee returns a locked route upon success */ if ((rt = nd6_lookup(&rtaddr, 0, ifp, 0)) != NULL) { RT_LOCK_ASSERT_HELD(rt); if ((ln = rt->rt_llinfo) != NULL && - ND6_IS_LLINFO_PROBREACH(ln)) + ND6_IS_LLINFO_PROBREACH(ln)) { is_installed_reachable = TRUE; + } RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); @@ -1488,18 +1516,20 @@ defrouter_select(struct ifnet *ifp) if_name(ifp))); NDDR_UNLOCK(dr); } - } else + } else { NDDR_UNLOCK(dr); + } - NDDR_REMREF(dr); /* for this for loop */ - if (drrele != NULL) + NDDR_REMREF(dr); /* for this for loop */ + if (drrele != NULL) { NDDR_REMREF(drrele); + } /* * Check if the list changed when we gave up * the nd6_mutex lock */ - if(genid != nd6_defrouter_genid) { + if (genid != nd6_defrouter_genid) { if (selected_dr) { NDDR_REMREF(selected_dr); selected_dr = NULL; @@ -1542,8 +1572,9 @@ defrouter_select(struct ifnet *ifp) if (installed_dr) { for (dr = TAILQ_NEXT(installed_dr, dr_entry); dr; dr = TAILQ_NEXT(dr, dr_entry)) { - if (installed_dr->ifp != dr->ifp) + if (installed_dr->ifp != dr->ifp) { continue; + } selected_dr = dr; break; } @@ -1573,8 +1604,9 @@ defrouter_select(struct ifnet *ifp) goto out; } - if (selected_dr != installed_dr) + if (selected_dr != installed_dr) { NDDR_ADDREF(selected_dr); + } } else if (installed_dr != NULL) { if (installed_dr != selected_dr) { /* @@ -1598,7 +1630,7 @@ defrouter_select(struct ifnet *ifp) } } -install_route: +install_route: /* * If the selected router is different than the installed one, * remove the installed router and install the selected one. @@ -1621,9 +1653,9 @@ install_route: defrouter_addreq(selected_dr, (selected_dr->ifp != nd6_defifp)); } else if (((installed_dr->stateflags & NDDRF_IFSCOPE) && - (installed_dr->ifp == nd6_defifp)) || - (!(installed_dr->stateflags & NDDRF_IFSCOPE) && - (installed_dr->ifp != nd6_defifp))) { + (installed_dr->ifp == nd6_defifp)) || + (!(installed_dr->stateflags & NDDRF_IFSCOPE) && + (installed_dr->ifp != nd6_defifp))) { nd6log((LOG_INFO, "%s:%d: Need to reinstall default route for interface " "%s as its scope has changed.\n", @@ -1639,10 +1671,12 @@ install_route: } lck_mtx_lock(nd6_mutex); out: - if (selected_dr && (selected_dr != installed_dr)) + if (selected_dr && (selected_dr != installed_dr)) { NDDR_REMREF(selected_dr); - if (installed_dr) + } + if (installed_dr) { NDDR_REMREF(installed_dr); + } LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); VERIFY(nd_defrouter_busy); nd_defrouter_busy = FALSE; @@ -1686,19 +1720,22 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) * it's already at that position. */ /* same preference and scoped; just return */ - if (rtpref(new) == oldpref && scoped) - return (dr); + if (rtpref(new) == oldpref && scoped) { + return dr; + } n = TAILQ_FIRST(&nd_defrouter); while (n != NULL) { /* preference changed; sort it */ - if (rtpref(new) != oldpref) + if (rtpref(new) != oldpref) { break; + } /* not at the top of band; sort it */ if (n != dr && rtpref(n) == oldpref && - (!p || rtpref(p) > rtpref(n))) + (!p || rtpref(p) > rtpref(n))) { break; + } p = n; n = TAILQ_NEXT(n, dr_entry); @@ -1706,8 +1743,9 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) /* nothing has changed, just return */ if (n == NULL && (scoped || - !(dr->stateflags & NDDRF_IFSCOPE))) - return (dr); + !(dr->stateflags & NDDRF_IFSCOPE))) { + return dr; + } /* * preferred router may be changed, so relocate @@ -1724,19 +1762,19 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) n = dr; goto insert; } - return (dr); + return dr; } VERIFY(dr == NULL); /* entry does not exist */ if (new->rtlifetime == 0) { - return (NULL); + return NULL; } n = nddr_alloc(M_WAITOK); if (n == NULL) { - return (NULL); + return NULL; } ndi = ND_IFINFO(ifp); @@ -1746,11 +1784,11 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) ndi->ndefrouters >= ip6_maxifdefrouters) { lck_mtx_unlock(&ndi->lock); nddr_free(n); - return (NULL); + return NULL; } - NDDR_ADDREF(n); /* for the nd_defrouter list */ - NDDR_ADDREF(n); /* for the caller */ + NDDR_ADDREF(n); /* for the nd_defrouter list */ + NDDR_ADDREF(n); /* for the caller */ ++nd6_defrouter_genid; ndi->ndefrouters++; @@ -1762,7 +1800,7 @@ defrtrlist_update_common(struct nd_defrouter *new, boolean_t scoped) getmicrotime(&caltime); NDDR_LOCK(n); - memcpy(&n->rtaddr, &new->rtaddr, sizeof (n->rtaddr)); + memcpy(&n->rtaddr, &new->rtaddr, sizeof(n->rtaddr)); n->flags = new->flags; n->stateflags = new->stateflags; n->rtlifetime = new->rtlifetime; @@ -1790,17 +1828,19 @@ insert: for (dr = TAILQ_FIRST(&nd_defrouter); dr; dr = TAILQ_NEXT(dr, dr_entry)) { if (rtpref(n) > rtpref(dr) || - (!scoped && rtpref(n) == rtpref(dr))) + (!scoped && rtpref(n) == rtpref(dr))) { break; + } } - if (dr) + if (dr) { TAILQ_INSERT_BEFORE(dr, n, dr_entry); - else + } else { TAILQ_INSERT_TAIL(&nd_defrouter, n, dr_entry); + } defrouter_select(ifp); - return (n); + return n; } static struct nd_defrouter * @@ -1812,7 +1852,7 @@ defrtrlist_update(struct nd_defrouter *new) dr = defrtrlist_update_common(new, (nd6_defifp != NULL && new->ifp != nd6_defifp)); - return (dr); + return dr; } static struct nd_pfxrouter * @@ -1825,11 +1865,12 @@ pfxrtr_lookup(struct nd_prefix *pr, struct nd_defrouter *dr) for (search = pr->ndpr_advrtrs.lh_first; search; search = search->pfr_next) { - if (search->router == dr) + if (search->router == dr) { break; + } } - return (search); + return search; } static void @@ -1841,9 +1882,10 @@ pfxrtr_add(struct nd_prefix *pr, struct nd_defrouter *dr) NDPR_LOCK_ASSERT_NOTHELD(pr); new = zalloc(ndprtr_zone); - if (new == NULL) + if (new == NULL) { return; - bzero(new, sizeof (*new)); + } + bzero(new, sizeof(*new)); new->router = dr; NDPR_LOCK(pr); @@ -1899,7 +1941,7 @@ nd6_prefix_lookup(struct nd_prefix *pr, int nd6_prefix_expiry) } lck_mtx_unlock(nd6_mutex); - return (search); + return search; } int @@ -1917,14 +1959,15 @@ nd6_prelist_add(struct nd_prefix *pr, struct nd_defrouter *dr, lck_mtx_lock(&ndi->lock); if (ndi->nprefixes >= ip6_maxifprefixes) { lck_mtx_unlock(&ndi->lock); - return (ENOMEM); + return ENOMEM; } lck_mtx_unlock(&ndi->lock); } new = ndpr_alloc(M_WAITOK); - if (new == NULL) - return (ENOMEM); + if (new == NULL) { + return ENOMEM; + } NDPR_LOCK(new); NDPR_LOCK(pr); @@ -1934,26 +1977,28 @@ nd6_prelist_add(struct nd_prefix *pr, struct nd_defrouter *dr, new->ndpr_vltime = pr->ndpr_vltime; new->ndpr_pltime = pr->ndpr_pltime; new->ndpr_flags = pr->ndpr_flags; - if (pr->ndpr_stateflags & NDPRF_STATIC) + if (pr->ndpr_stateflags & NDPRF_STATIC) { new->ndpr_stateflags |= NDPRF_STATIC; + } NDPR_UNLOCK(pr); if ((error = in6_init_prefix_ltimes(new)) != 0) { NDPR_UNLOCK(new); ndpr_free(new); - return (error); + return error; } new->ndpr_lastupdate = net_uptime(); if (newp != NULL) { *newp = new; - NDPR_ADDREF_LOCKED(new); /* for caller */ + NDPR_ADDREF_LOCKED(new); /* for caller */ } /* initialization */ LIST_INIT(&new->ndpr_advrtrs); in6_prefixlen2mask(&new->ndpr_mask, new->ndpr_plen); /* make prefix in the canonical form */ - for (i = 0; i < 4; i++) + for (i = 0; i < 4; i++) { new->ndpr_prefix.sin6_addr.s6_addr32[i] &= - new->ndpr_mask.s6_addr32[i]; + new->ndpr_mask.s6_addr32[i]; + } NDPR_UNLOCK(new); @@ -1964,7 +2009,7 @@ nd6_prelist_add(struct nd_prefix *pr, struct nd_defrouter *dr, /* link ndpr_entry to nd_prefix list */ LIST_INSERT_HEAD(&nd_prefix, new, ndpr_entry); new->ndpr_debug |= IFD_ATTACHED; - NDPR_ADDREF(new); /* for nd_prefix list */ + NDPR_ADDREF(new); /* for nd_prefix list */ lck_mtx_lock(&ndi->lock); ndi->nprefixes++; @@ -1992,7 +2037,7 @@ nd6_prelist_add(struct nd_prefix *pr, struct nd_defrouter *dr, lck_mtx_unlock(nd6_mutex); - return (0); + return 0; } /* @@ -2009,8 +2054,9 @@ prelist_remove(struct nd_prefix *pr) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); NDPR_LOCK_ASSERT_HELD(pr); - if (pr->ndpr_stateflags & NDPRF_DEFUNCT) + if (pr->ndpr_stateflags & NDPRF_DEFUNCT) { return; + } /* * If there are no more addresses, defunct the prefix. This is needed @@ -2018,8 +2064,9 @@ prelist_remove(struct nd_prefix *pr) * the same prefix and this might happen because we unlock nd6_mutex * down below. */ - if (pr->ndpr_addrcnt == 0) + if (pr->ndpr_addrcnt == 0) { pr->ndpr_stateflags |= NDPRF_DEFUNCT; + } /* make sure to invalidate the prefix until it is really freed. */ pr->ndpr_vltime = 0; @@ -2043,8 +2090,9 @@ prelist_remove(struct nd_prefix *pr) } lck_mtx_lock(nd6_mutex); NDPR_LOCK(pr); - if (NDPR_REMREF_LOCKED(pr) == NULL) + if (NDPR_REMREF_LOCKED(pr) == NULL) { return; + } } if (pr->ndpr_addrcnt > 0) { @@ -2130,10 +2178,12 @@ prelist_update( */ lck_mtx_lock(nd6_mutex); NDPR_LOCK(pr); - if (new->ndpr_raf_onlink == 1) + if (new->ndpr_raf_onlink == 1) { pr->ndpr_raf_onlink = 1; - if (new->ndpr_raf_auto == 1) + } + if (new->ndpr_raf_auto == 1) { pr->ndpr_raf_auto = 1; + } if (new->ndpr_raf_onlink) { pr->ndpr_vltime = new->ndpr_vltime; pr->ndpr_pltime = new->ndpr_pltime; @@ -2170,12 +2220,14 @@ prelist_update( } else { newprefix = 1; - if (new->ndpr_vltime == 0) + if (new->ndpr_vltime == 0) { goto end; - if (new->ndpr_raf_onlink == 0 && new->ndpr_raf_auto == 0) + } + if (new->ndpr_raf_onlink == 0 && new->ndpr_raf_auto == 0) { goto end; + } - bzero(&new->ndpr_addr, sizeof (struct in6_addr)); + bzero(&new->ndpr_addr, sizeof(struct in6_addr)); error = nd6_prelist_add(new, dr, &pr, FALSE); if (error != 0 || pr == NULL) { @@ -2195,8 +2247,9 @@ prelist_update( */ /* 5.5.3 (a). Ignore the prefix without the A bit set. */ - if (!new->ndpr_raf_auto) + if (!new->ndpr_raf_auto) { goto end; + } /* * 5.5.3 (b). the link-local prefix should have been ignored in @@ -2205,7 +2258,7 @@ prelist_update( /* 5.5.3 (c). Consistency check on lifetimes: pltime <= vltime. */ if (new->ndpr_pltime > new->ndpr_vltime) { - error = EINVAL; /* XXX: won't be used */ + error = EINVAL; /* XXX: won't be used */ goto end; } @@ -2256,7 +2309,7 @@ prelist_update( if (ia6_match == NULL) { /* remember the first one */ ia6_match = ifa6; - IFA_ADDREF_LOCKED(ifa); /* for ia6_match */ + IFA_ADDREF_LOCKED(ifa); /* for ia6_match */ } /* @@ -2265,14 +2318,14 @@ prelist_update( * proceed to 5.5.3. (e): update the lifetimes according to the * "two hours" rule and the privacy extension. */ -#define TWOHOUR (120*60) +#define TWOHOUR (120*60) /* retrieve time as uptime (last arg is 0) */ in6ifa_getlifetime(ifa6, <6_tmp, 0); - if (lt6_tmp.ia6t_vltime == ND6_INFINITE_LIFETIME) + if (lt6_tmp.ia6t_vltime == ND6_INFINITE_LIFETIME) { remaininglifetime = ND6_INFINITE_LIFETIME; - else if (timenow - ifa6->ia6_updatetime > lt6_tmp.ia6t_vltime) { + } else if (timenow - ifa6->ia6_updatetime > lt6_tmp.ia6t_vltime) { /* * The case of "invalid" address. We should usually * not see this case. @@ -2308,12 +2361,14 @@ prelist_update( u_int32_t maxvltime, maxpltime; /* Constrain lifetimes to system limits. */ - if (lt6_tmp.ia6t_vltime > ip6_temp_valid_lifetime) + if (lt6_tmp.ia6t_vltime > ip6_temp_valid_lifetime) { lt6_tmp.ia6t_vltime = ip6_temp_valid_lifetime; - if (lt6_tmp.ia6t_pltime > ip6_temp_preferred_lifetime) + } + if (lt6_tmp.ia6t_pltime > ip6_temp_preferred_lifetime) { lt6_tmp.ia6t_pltime = ip6_temp_preferred_lifetime - ip6_desync_factor; + } /* * According to RFC 4941, section 3.3 (1), we only @@ -2326,24 +2381,28 @@ prelist_update( maxvltime = ip6_temp_valid_lifetime - (timenow - ifa6->ia6_createtime) - ip6_desync_factor; - } else + } else { maxvltime = 0; + } if (ip6_temp_preferred_lifetime > (u_int32_t)((timenow - ifa6->ia6_createtime) + ip6_desync_factor)) { maxpltime = ip6_temp_preferred_lifetime - (timenow - ifa6->ia6_createtime) - ip6_desync_factor; - } else + } else { maxpltime = 0; + } if (lt6_tmp.ia6t_vltime == ND6_INFINITE_LIFETIME || - lt6_tmp.ia6t_vltime > maxvltime) + lt6_tmp.ia6t_vltime > maxvltime) { lt6_tmp.ia6t_vltime = maxvltime; + } if (lt6_tmp.ia6t_pltime == ND6_INFINITE_LIFETIME || - lt6_tmp.ia6t_pltime > maxpltime) + lt6_tmp.ia6t_pltime > maxpltime) { lt6_tmp.ia6t_pltime = maxpltime; + } } in6_init_address_ltimes(pr, <6_tmp); @@ -2367,7 +2426,7 @@ prelist_update( IFA_LOCK(&ia6->ia_ifa); NDPR_LOCK(pr); ia6->ia6_ndpr = pr; - NDPR_ADDREF_LOCKED(pr); /* for addr reference */ + NDPR_ADDREF_LOCKED(pr); /* for addr reference */ pr->ndpr_addrcnt++; VERIFY(pr->ndpr_addrcnt != 0); NDPR_UNLOCK(pr); @@ -2403,7 +2462,7 @@ prelist_update( * stateless translation. */ if (IS_INTF_CLAT46(ifp)) { - if ((ia6 = in6_pfx_newpersistaddr(new, mcast,&error, TRUE)) != NULL) { + if ((ia6 = in6_pfx_newpersistaddr(new, mcast, &error, TRUE)) != NULL) { IFA_LOCK(&ia6->ia_ifa); NDPR_LOCK(pr); ia6->ia6_ndpr = pr; @@ -2423,9 +2482,9 @@ prelist_update( */ ip6stat.ip6s_clat464_v6addr_conffail++; in6_clat46_event_enqueue_nwk_wq_entry( - IN6_CLAT46_EVENT_V6_ADDR_CONFFAIL, - 0, - tmp_uuid); + IN6_CLAT46_EVENT_V6_ADDR_CONFFAIL, + 0, + tmp_uuid); nd6log0((LOG_ERR, "Could not configure CLAT46 address on interface " "%s.\n", ifp->if_xname)); } @@ -2448,11 +2507,13 @@ prelist_update( } } end: - if (pr != NULL) + if (pr != NULL) { NDPR_REMREF(pr); - if (ia6_match != NULL) + } + if (ia6_match != NULL) { IFA_REMREF(&ia6_match->ia_ifa); - return (error); + } + return error; } /* @@ -2473,7 +2534,7 @@ nddr_alloc(int how) dr->nddr_trace = nddr_trace; } } - return (dr); + return dr; } static void @@ -2521,11 +2582,11 @@ nddr_trace(struct nd_defrouter *dr, int refhold) void nddr_addref(struct nd_defrouter *nddr, int locked) { - - if (!locked) + if (!locked) { NDDR_LOCK_SPIN(nddr); - else + } else { NDDR_LOCK_ASSERT_HELD(nddr); + } if (++nddr->nddr_refcount == 0) { panic("%s: nddr %p wraparound refcnt\n", __func__, nddr); @@ -2534,18 +2595,19 @@ nddr_addref(struct nd_defrouter *nddr, int locked) (*nddr->nddr_trace)(nddr, TRUE); } - if (!locked) + if (!locked) { NDDR_UNLOCK(nddr); + } } struct nd_defrouter * nddr_remref(struct nd_defrouter *nddr, int locked) { - - if (!locked) + if (!locked) { NDDR_LOCK_SPIN(nddr); - else + } else { NDDR_LOCK_ASSERT_HELD(nddr); + } if (nddr->nddr_refcount == 0) { panic("%s: nddr %p negative refcnt\n", __func__, nddr); @@ -2560,10 +2622,11 @@ nddr_remref(struct nd_defrouter *nddr, int locked) nddr = NULL; } - if (!locked && nddr != NULL) + if (!locked && nddr != NULL) { NDDR_UNLOCK(nddr); + } - return (nddr); + return nddr; } uint64_t @@ -2585,7 +2648,7 @@ nddr_getexpire(struct nd_defrouter *dr) } else { expiry = 0; } - return (expiry); + return expiry; } /* @@ -2607,7 +2670,7 @@ ndpr_alloc(int how) pr->ndpr_trace = ndpr_trace; } } - return (pr); + return pr; } static void @@ -2666,10 +2729,11 @@ ndpr_trace(struct nd_prefix *pr, int refhold) void ndpr_addref(struct nd_prefix *ndpr, int locked) { - if (!locked) + if (!locked) { NDPR_LOCK_SPIN(ndpr); - else + } else { NDPR_LOCK_ASSERT_HELD(ndpr); + } if (++ndpr->ndpr_refcount == 0) { panic("%s: ndpr %p wraparound refcnt\n", __func__, ndpr); @@ -2678,17 +2742,19 @@ ndpr_addref(struct nd_prefix *ndpr, int locked) (*ndpr->ndpr_trace)(ndpr, TRUE); } - if (!locked) + if (!locked) { NDPR_UNLOCK(ndpr); + } } struct nd_prefix * ndpr_remref(struct nd_prefix *ndpr, int locked) { - if (!locked) + if (!locked) { NDPR_LOCK_SPIN(ndpr); - else + } else { NDPR_LOCK_ASSERT_HELD(ndpr); + } if (ndpr->ndpr_refcount == 0) { panic("%s: ndpr %p negative refcnt\n", __func__, ndpr); @@ -2709,10 +2775,11 @@ ndpr_remref(struct nd_prefix *ndpr, int locked) ndpr = NULL; } - if (!locked && ndpr != NULL) + if (!locked && ndpr != NULL) { NDPR_UNLOCK(ndpr); + } - return (ndpr); + return ndpr; } uint64_t @@ -2735,7 +2802,7 @@ ndpr_getexpire(struct nd_prefix *pr) } else { expiry = 0; } - return (expiry); + return expiry; } /* @@ -2762,10 +2829,11 @@ find_pfxlist_reachable_router(struct nd_prefix *pr) pfxrtr = LIST_FIRST(&pr->ndpr_advrtrs); while (pfxrtr) { ifp = pfxrtr->router->ifp; - if (pfxrtr->router->stateflags & NDDRF_MAPPED) + if (pfxrtr->router->stateflags & NDDRF_MAPPED) { rtaddr = pfxrtr->router->rtaddr_mapped; - else + } else { rtaddr = pfxrtr->router->rtaddr; + } NDPR_UNLOCK(pr); lck_mtx_unlock(nd6_mutex); @@ -2778,7 +2846,7 @@ find_pfxlist_reachable_router(struct nd_prefix *pr) RT_UNLOCK(rt); lck_mtx_lock(nd6_mutex); NDPR_LOCK(pr); - break; /* found */ + break; /* found */ } RT_REMREF_LOCKED(rt); RT_UNLOCK(rt); @@ -2788,13 +2856,13 @@ find_pfxlist_reachable_router(struct nd_prefix *pr) if (pr->ndpr_genid != genid) { pfxrtr = LIST_FIRST(&pr->ndpr_advrtrs); genid = pr->ndpr_genid; - } else + } else { pfxrtr = LIST_NEXT(pfxrtr, pfr_entry); + } } NDPR_LOCK_ASSERT_HELD(pr); - return (pfxrtr); - + return pfxrtr; } /* @@ -2825,7 +2893,7 @@ pfxlist_onlink_check(void) while (nd_prefix_busy) { nd_prefix_waiters++; - msleep(nd_prefix_waitchan, nd6_mutex, (PZERO-1), + msleep(nd_prefix_waitchan, nd6_mutex, (PZERO - 1), __func__, NULL); LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); } @@ -2846,10 +2914,11 @@ pfxlist_onlink_check(void) NDPR_ADDREF_LOCKED(pr); if (pr->ndpr_raf_onlink && find_pfxlist_reachable_router(pr) && (pr->ndpr_debug & IFD_ATTACHED)) { - if (NDPR_REMREF_LOCKED(pr) == NULL) + if (NDPR_REMREF_LOCKED(pr) == NULL) { pr = NULL; - else + } else { NDPR_UNLOCK(pr); + } break; } pr->ndpr_stateflags |= NDPRF_PROCESSED_ONLINK; @@ -2885,8 +2954,9 @@ pfxlist_onlink_check(void) } NDPR_UNLOCK(pr0); } - if (pfxrtr != NULL) + if (pfxrtr != NULL) { break; + } } } if (pr != NULL || (TAILQ_FIRST(&nd_defrouter) && pfxrtr == NULL)) { @@ -2917,12 +2987,14 @@ pfxlist_onlink_check(void) NDPR_ADDREF_LOCKED(pr); if ((pr->ndpr_stateflags & NDPRF_DETACHED) == 0 && find_pfxlist_reachable_router(pr) == NULL && - (pr->ndpr_debug & IFD_ATTACHED)) + (pr->ndpr_debug & IFD_ATTACHED)) { pr->ndpr_stateflags |= NDPRF_DETACHED; + } if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0 && find_pfxlist_reachable_router(pr) != NULL && - (pr->ndpr_debug & IFD_ATTACHED)) + (pr->ndpr_debug & IFD_ATTACHED)) { pr->ndpr_stateflags &= ~NDPRF_DETACHED; + } pr->ndpr_stateflags |= NDPRF_PROCESSED_ONLINK; NDPR_UNLOCK(pr); NDPR_REMREF(pr); @@ -2943,8 +3015,9 @@ pfxlist_onlink_check(void) NDPR_UNLOCK(pr); continue; } - if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0) + if ((pr->ndpr_stateflags & NDPRF_DETACHED) != 0) { pr->ndpr_stateflags &= ~NDPRF_DETACHED; + } NDPR_UNLOCK(pr); } } @@ -3147,8 +3220,9 @@ nd6_prefix_equal_lookup(struct nd_prefix *pr, boolean_t primary_only) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); for (opr = nd_prefix.lh_first; opr; opr = opr->ndpr_next) { - if (opr == pr) + if (opr == pr) { continue; + } NDPR_LOCK(opr); if ((opr->ndpr_stateflags & NDPRF_ONLINK) == 0) { @@ -3162,11 +3236,11 @@ nd6_prefix_equal_lookup(struct nd_prefix *pr, boolean_t primary_only) !(opr->ndpr_stateflags & NDPRF_IFSCOPE))) { NDPR_ADDREF_LOCKED(opr); NDPR_UNLOCK(opr); - return (opr); + return opr; } NDPR_UNLOCK(opr); } - return (NULL); + return NULL; } /* @@ -3182,8 +3256,9 @@ nd6_prefix_sync(struct ifnet *ifp) LCK_MTX_ASSERT(nd6_mutex, LCK_MTX_ASSERT_OWNED); - if (ifp == NULL) + if (ifp == NULL) { return; + } for (pr = nd_prefix.lh_first; pr; pr = pr->ndpr_next) { NDPR_LOCK(pr); @@ -3200,8 +3275,9 @@ nd6_prefix_sync(struct ifnet *ifp) NDPR_UNLOCK(pr); } - if (pr == NULL) + if (pr == NULL) { return; + } /* Remove conflicting entries */ opr = nd6_prefix_equal_lookup(pr, TRUE); @@ -3267,8 +3343,9 @@ nd6_prefix_sync(struct ifnet *ifp) (opr != NULL) ? if_name(opr->ndpr_ifp) : "NONE")); } - if (opr != NULL) + if (opr != NULL) { NDPR_REMREF(opr); + } } static int @@ -3293,8 +3370,8 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, __func__, ip6_sprintf(&pr->ndpr_prefix.sin6_addr), pr->ndpr_plen, if_name(pr->ndpr_ifp), (pr->ndpr_stateflags & NDPRF_IFSCOPE) ? 1 : 0); - NDPR_UNLOCK(pr); - return (EEXIST)); + NDPR_UNLOCK(pr); + return (EEXIST)); } NDPR_UNLOCK(pr); @@ -3304,8 +3381,9 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, * interface, and the prefix has already installed the interface route. */ opr = nd6_prefix_equal_lookup(pr, FALSE); - if (opr != NULL) + if (opr != NULL) { NDPR_REMREF(opr); + } if (!force_scoped) { /* @@ -3315,10 +3393,11 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, */ ifscope = (opr != NULL) ? ifp->if_index : IFSCOPE_NONE; opr = nd6_prefix_equal_lookup(pr, TRUE); - if (opr != NULL) + if (opr != NULL) { NDPR_REMREF(opr); - else if (ifscope != IFSCOPE_NONE) + } else if (ifscope != IFSCOPE_NONE) { ifscope = IFSCOPE_NONE; + } } /* @@ -3332,8 +3411,9 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, ifnet_lock_shared(ifp); IFP_TO_IA6(ifp, ia6); ifnet_lock_done(ifp); - if (ia6 != NULL) + if (ia6 != NULL) { ifa = &ia6->ia_ifa; + } /* should we care about ia6_flags? */ } NDPR_LOCK(pr); @@ -3350,20 +3430,21 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, ip6_sprintf(&pr->ndpr_prefix.sin6_addr), pr->ndpr_plen, if_name(ifp))); NDPR_UNLOCK(pr); - return (0); + return 0; } /* * in6_ifinit() sets nd6_rtrequest to ifa_rtrequest for all ifaddrs. * ifa->ifa_rtrequest = nd6_rtrequest; */ - bzero(&mask6, sizeof (mask6)); - mask6.sin6_len = sizeof (mask6); + bzero(&mask6, sizeof(mask6)); + mask6.sin6_len = sizeof(mask6); mask6.sin6_addr = pr->ndpr_mask; prefix = pr->ndpr_prefix; - if ((rt = pr->ndpr_rt) != NULL) + if ((rt = pr->ndpr_rt) != NULL) { pr->ndpr_rt = NULL; - NDPR_ADDREF_LOCKED(pr); /* keep reference for this routine */ + } + NDPR_ADDREF_LOCKED(pr); /* keep reference for this routine */ NDPR_UNLOCK(pr); IFA_LOCK_SPIN(ifa); @@ -3428,10 +3509,10 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, VERIFY(RB_EMPTY(&pr->ndpr_prproxy_sols)); ndi = ND_IFINFO(ifp); - VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); + VERIFY((NULL != ndi) && (TRUE == ndi->initialized)); lck_mtx_lock(&ndi->lock); - pr->ndpr_rt = rt; /* keep reference from rtrequest */ + pr->ndpr_rt = rt; /* keep reference from rtrequest */ pr->ndpr_stateflags |= NDPRF_ONLINK; if (ifscope != IFSCOPE_NONE) { pr->ndpr_stateflags |= NDPRF_IFSCOPE; @@ -3450,8 +3531,9 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, } lck_mtx_unlock(&ndi->lock); - } else if (rt != NULL && pr->ndpr_stateflags & NDPRF_DEFUNCT) + } else if (rt != NULL && pr->ndpr_stateflags & NDPRF_DEFUNCT) { rtfree(rt); + } prproxy = (pr->ndpr_stateflags & NDPRF_PRPROXY); VERIFY(!prproxy || !(pr->ndpr_stateflags & NDPRF_IFSCOPE)); @@ -3467,24 +3549,24 @@ nd6_prefix_onlink_common(struct nd_prefix *pr, boolean_t force_scoped, */ nd6_prproxy_prelist_update(pr, prproxy ? pr : NULL); - NDPR_REMREF(pr); /* release reference for this routine */ + NDPR_REMREF(pr); /* release reference for this routine */ lck_mtx_unlock(&proxy6_lock); lck_mtx_lock(nd6_mutex); - return (error); + return error; } int nd6_prefix_onlink(struct nd_prefix *pr) { - return (nd6_prefix_onlink_common(pr, FALSE, IFSCOPE_NONE)); + return nd6_prefix_onlink_common(pr, FALSE, IFSCOPE_NONE); } int nd6_prefix_onlink_scoped(struct nd_prefix *pr, unsigned int ifscope) { - return (nd6_prefix_onlink_common(pr, TRUE, ifscope)); + return nd6_prefix_onlink_common(pr, TRUE, ifscope); } int @@ -3507,23 +3589,24 @@ nd6_prefix_offlink(struct nd_prefix *pr) pr->ndpr_plen, if_name(pr->ndpr_ifp), (pr->ndpr_stateflags & NDPRF_IFSCOPE) ? 1 : 0)); NDPR_UNLOCK(pr); - return (EEXIST); + return EEXIST; } - bzero(&sa6, sizeof (sa6)); + bzero(&sa6, sizeof(sa6)); sa6.sin6_family = AF_INET6; - sa6.sin6_len = sizeof (sa6); + sa6.sin6_len = sizeof(sa6); bcopy(&pr->ndpr_prefix.sin6_addr, &sa6.sin6_addr, - sizeof (struct in6_addr)); - bzero(&mask6, sizeof (mask6)); + sizeof(struct in6_addr)); + bzero(&mask6, sizeof(mask6)); mask6.sin6_family = AF_INET6; - mask6.sin6_len = sizeof (sa6); - bcopy(&pr->ndpr_mask, &mask6.sin6_addr, sizeof (struct in6_addr)); + mask6.sin6_len = sizeof(sa6); + bcopy(&pr->ndpr_mask, &mask6.sin6_addr, sizeof(struct in6_addr)); prefix = pr->ndpr_prefix; plen = pr->ndpr_plen; - if ((ndpr_rt = pr->ndpr_rt) != NULL) + if ((ndpr_rt = pr->ndpr_rt) != NULL) { pr->ndpr_rt = NULL; - NDPR_ADDREF_LOCKED(pr); /* keep reference for this routine */ + } + NDPR_ADDREF_LOCKED(pr); /* keep reference for this routine */ NDPR_UNLOCK(pr); ifscope = (pr->ndpr_stateflags & NDPRF_IFSCOPE) ? @@ -3538,7 +3621,6 @@ nd6_prefix_offlink(struct nd_prefix *pr) nd6_rtmsg(RTM_DELETE, rt); RT_UNLOCK(rt); rtfree(rt); - } else { nd6log((LOG_ERR, "nd6_prefix_offlink: failed to delete route: " @@ -3547,8 +3629,9 @@ nd6_prefix_offlink(struct nd_prefix *pr) (ifscope != IFSCOPE_NONE), error)); } - if (ndpr_rt != NULL) + if (ndpr_rt != NULL) { rtfree(ndpr_rt); + } lck_mtx_lock(&proxy6_lock); @@ -3572,10 +3655,10 @@ nd6_prefix_offlink(struct nd_prefix *pr) */ nd6_prproxy_prelist_update(pr, prproxy ? pr : NULL); - NDPR_REMREF(pr); /* release reference for this routine */ + NDPR_REMREF(pr); /* release reference for this routine */ lck_mtx_unlock(&proxy6_lock); - return (error); + return error; } struct in6_ifaddr * @@ -3630,14 +3713,14 @@ in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t i goto unlock1; } - bzero(&ifra, sizeof (ifra)); - strlcpy(ifra.ifra_name, if_name(ifp), sizeof (ifra.ifra_name)); + bzero(&ifra, sizeof(ifra)); + strlcpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); ifra.ifra_addr.sin6_family = AF_INET6; - ifra.ifra_addr.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_addr.sin6_len = sizeof(struct sockaddr_in6); /* prefix */ bcopy(&pr->ndpr_prefix.sin6_addr, &ifra.ifra_addr.sin6_addr, - sizeof (ifra.ifra_addr.sin6_addr)); + sizeof(ifra.ifra_addr.sin6_addr)); in6_len2mask(&mask, pr->ndpr_plen); ifra.ifra_addr.sin6_addr.s6_addr32[0] &= mask.s6_addr32[0]; ifra.ifra_addr.sin6_addr.s6_addr32[1] &= mask.s6_addr32[1]; @@ -3697,25 +3780,28 @@ in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t i &ifra.ifra_addr.sin6_addr); } } else { - if (!is_clat46) + if (!is_clat46) { error = in6_cga_generate(NULL, 0, &ifra.ifra_addr.sin6_addr); - else + } else { error = in6_cga_generate(NULL, 1, &ifra.ifra_addr.sin6_addr); + } } in6_cga_node_unlock(); if (error == 0) { ifra.ifra_flags |= IN6_IFF_SECURED; - if (is_clat46) + if (is_clat46) { ifra.ifra_flags |= IN6_IFF_CLAT46; + } } else { - if (!is_clat46) + if (!is_clat46) { nd6log((LOG_ERR, "%s: no CGA available (%s)\n", __func__, if_name(ifp))); - else + } else { nd6log((LOG_ERR, "%s: no CLAT46 available (%s)\n", - __func__, if_name(ifp))); + __func__, if_name(ifp))); + } goto done; } } @@ -3723,10 +3809,10 @@ in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t i VERIFY(ia6 == NULL); /* new prefix mask. */ - ifra.ifra_prefixmask.sin6_len = sizeof (struct sockaddr_in6); + ifra.ifra_prefixmask.sin6_len = sizeof(struct sockaddr_in6); ifra.ifra_prefixmask.sin6_family = AF_INET6; bcopy(&mask, &ifra.ifra_prefixmask.sin6_addr, - sizeof (ifra.ifra_prefixmask.sin6_addr)); + sizeof(ifra.ifra_prefixmask.sin6_addr)); /* lifetimes. */ ifra.ifra_lifetime.ia6t_vltime = pr->ndpr_vltime; @@ -3759,8 +3845,9 @@ in6_pfx_newpersistaddr(struct nd_prefix *pr, int mcast, int *errorp, boolean_t i * [RFC 4862, Section 5.4.2] */ ifaupdate = IN6_IFAUPDATE_NOWAIT; - if (mcast) + if (mcast) { ifaupdate |= IN6_IFAUPDATE_DADDELAY; + } error = in6_update_ifa(ifp, &ifra, ifaupdate, &ia6); if (error != 0) { nd6log((LOG_ERR, @@ -3780,10 +3867,10 @@ unlock1: done: *errorp = error; - return (ia6); + return ia6; } -#define IA6_NONCONST(i) ((struct in6_ifaddr *)(uintptr_t)(i)) +#define IA6_NONCONST(i) ((struct in6_ifaddr *)(uintptr_t)(i)) int in6_tmpifadd(const struct in6_ifaddr *ia0, int forcegen) @@ -3792,15 +3879,15 @@ in6_tmpifadd(const struct in6_ifaddr *ia0, int forcegen) struct in6_ifaddr *ia, *newia; struct in6_aliasreq ifra; int i, error, ifaupdate; - int trylimit = 3; /* XXX: adhoc value */ + int trylimit = 3; /* XXX: adhoc value */ u_int32_t randid[2]; time_t vltime0, pltime0; uint64_t timenow = net_uptime(); struct in6_addr addr; struct nd_prefix *ndpr; - bzero(&ifra, sizeof (ifra)); - strlcpy(ifra.ifra_name, if_name(ifp), sizeof (ifra.ifra_name)); + bzero(&ifra, sizeof(ifra)); + strlcpy(ifra.ifra_name, if_name(ifp), sizeof(ifra.ifra_name)); IFA_LOCK(&IA6_NONCONST(ia0)->ia_ifa); ifra.ifra_addr = ia0->ia_addr; /* copy prefix mask */ @@ -3808,7 +3895,7 @@ in6_tmpifadd(const struct in6_ifaddr *ia0, int forcegen) /* clear the old IFID */ for (i = 0; i < 4; i++) { ifra.ifra_addr.sin6_addr.s6_addr32[i] - &= ifra.ifra_prefixmask.sin6_addr.s6_addr32[i]; + &= ifra.ifra_prefixmask.sin6_addr.s6_addr32[i]; } addr = ia0->ia_addr.sin6_addr; IFA_UNLOCK(&IA6_NONCONST(ia0)->ia_ifa); @@ -3833,7 +3920,7 @@ again: if (trylimit-- == 0) { nd6log((LOG_NOTICE, "in6_tmpifadd: failed to find " "a unique random IFID\n")); - return (EEXIST); + return EEXIST; } forcegen = 1; goto again; @@ -3851,8 +3938,9 @@ again: vltime0 = IFA6_IS_INVALID(ia0, timenow) ? 0 : (ia0->ia6_lifetime.ia6ti_vltime - (timenow - ia0->ia6_updatetime)); - if (vltime0 > ip6_temp_valid_lifetime) + if (vltime0 > ip6_temp_valid_lifetime) { vltime0 = ip6_temp_valid_lifetime; + } } else { vltime0 = ip6_temp_valid_lifetime; } @@ -3860,9 +3948,10 @@ again: pltime0 = IFA6_IS_DEPRECATED(ia0, timenow) ? 0 : (ia0->ia6_lifetime.ia6ti_pltime - (timenow - ia0->ia6_updatetime)); - if (pltime0 > ip6_temp_preferred_lifetime - ip6_desync_factor) + if (pltime0 > ip6_temp_preferred_lifetime - ip6_desync_factor) { pltime0 = ip6_temp_preferred_lifetime - ip6_desync_factor; + } } else { pltime0 = ip6_temp_preferred_lifetime - ip6_desync_factor; } @@ -3873,19 +3962,20 @@ again: * A temporary address is created only if this calculated Preferred * Lifetime is greater than REGEN_ADVANCE time units. */ - if (ifra.ifra_lifetime.ia6t_pltime <= ip6_temp_regen_advance) - return (0); + if (ifra.ifra_lifetime.ia6t_pltime <= ip6_temp_regen_advance) { + return 0; + } /* XXX: scope zone ID? */ - ifra.ifra_flags |= (IN6_IFF_AUTOCONF|IN6_IFF_TEMPORARY); + ifra.ifra_flags |= (IN6_IFF_AUTOCONF | IN6_IFF_TEMPORARY); /* allocate ifaddr structure, link into chain, etc. */ ifaupdate = IN6_IFAUPDATE_NOWAIT | IN6_IFAUPDATE_DADDELAY; error = in6_update_ifa(ifp, &ifra, ifaupdate, &newia); if (error != 0) { nd6log((LOG_ERR, "in6_tmpifadd: failed to add address.\n")); - return (error); + return error; } VERIFY(newia != NULL); @@ -3901,9 +3991,9 @@ again: IFA_UNLOCK(&IA6_NONCONST(ia0)->ia_ifa); in6_purgeaddr(&newia->ia_ifa); IFA_REMREF(&newia->ia_ifa); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } - NDPR_ADDREF(ndpr); /* for us */ + NDPR_ADDREF(ndpr); /* for us */ IFA_UNLOCK(&IA6_NONCONST(ia0)->ia_ifa); IFA_LOCK(&newia->ia_ifa); if (newia->ia6_ndpr != NULL) { @@ -3911,13 +4001,13 @@ again: VERIFY(newia->ia6_ndpr->ndpr_addrcnt != 0); newia->ia6_ndpr->ndpr_addrcnt--; NDPR_UNLOCK(newia->ia6_ndpr); - NDPR_REMREF(newia->ia6_ndpr); /* release addr reference */ + NDPR_REMREF(newia->ia6_ndpr); /* release addr reference */ } newia->ia6_ndpr = ndpr; NDPR_LOCK(newia->ia6_ndpr); newia->ia6_ndpr->ndpr_addrcnt++; VERIFY(newia->ia6_ndpr->ndpr_addrcnt != 0); - NDPR_ADDREF_LOCKED(newia->ia6_ndpr); /* for addr reference */ + NDPR_ADDREF_LOCKED(newia->ia6_ndpr); /* for addr reference */ NDPR_UNLOCK(newia->ia6_ndpr); IFA_UNLOCK(&newia->ia_ifa); /* @@ -3936,7 +4026,7 @@ again: /* remove our reference */ NDPR_REMREF(ndpr); - return (0); + return 0; } #undef IA6_NONCONST @@ -3957,18 +4047,20 @@ in6_init_prefix_ltimes(struct nd_prefix *ndpr) nd6log((LOG_INFO, "in6_init_prefix_ltimes: preferred lifetime" "(%d) is greater than valid lifetime(%d)\n", (u_int)ndpr->ndpr_pltime, (u_int)ndpr->ndpr_vltime)); - return (EINVAL); + return EINVAL; } - if (ndpr->ndpr_pltime == ND6_INFINITE_LIFETIME) + if (ndpr->ndpr_pltime == ND6_INFINITE_LIFETIME) { ndpr->ndpr_preferred = 0; - else + } else { ndpr->ndpr_preferred = timenow + ndpr->ndpr_pltime; - if (ndpr->ndpr_vltime == ND6_INFINITE_LIFETIME) + } + if (ndpr->ndpr_vltime == ND6_INFINITE_LIFETIME) { ndpr->ndpr_expire = 0; - else + } else { ndpr->ndpr_expire = timenow + ndpr->ndpr_vltime; + } - return (0); + return 0; } static void @@ -4032,12 +4124,12 @@ rt6_deleteroute( RT_LOCK(rt); if (rt->rt_gateway == NULL || rt->rt_gateway->sa_family != AF_INET6) { RT_UNLOCK(rt); - return (0); + return 0; } if (!IN6_ARE_ADDR_EQUAL(gate, &SIN6(rt->rt_gateway)->sin6_addr)) { RT_UNLOCK(rt); - return (0); + return 0; } /* * Do not delete a static route. @@ -4046,7 +4138,7 @@ rt6_deleteroute( */ if ((rt->rt_flags & RTF_STATIC) != 0) { RT_UNLOCK(rt); - return (0); + return 0; } /* * We delete only host route. This means, in particular, we don't @@ -4054,7 +4146,7 @@ rt6_deleteroute( */ if ((rt->rt_flags & RTF_HOST) == 0) { RT_UNLOCK(rt); - return (0); + return 0; } /* @@ -4063,8 +4155,8 @@ rt6_deleteroute( * on this route. */ RT_UNLOCK(rt); - return (rtrequest_locked(RTM_DELETE, rt_key(rt), rt->rt_gateway, - rt_mask(rt), rt->rt_flags, 0)); + return rtrequest_locked(RTM_DELETE, rt_key(rt), rt->rt_gateway, + rt_mask(rt), rt->rt_flags, 0); } int @@ -4079,7 +4171,7 @@ nd6_setdefaultiface( ifnet_head_lock_shared(); if (ifindex < 0 || if_index < ifindex) { ifnet_head_done(); - return (EINVAL); + return EINVAL; } def_ifp = ifindex2ifnet[ifindex]; ifnet_head_done(); @@ -4089,17 +4181,19 @@ nd6_setdefaultiface( struct ifnet *odef_ifp = nd6_defifp; nd6_defifindex = ifindex; - if (nd6_defifindex > 0) + if (nd6_defifindex > 0) { nd6_defifp = def_ifp; - else + } else { nd6_defifp = NULL; + } - if (nd6_defifp != NULL) + if (nd6_defifp != NULL) { nd6log((LOG_INFO, "%s: is now the default " "interface (was %s)\n", if_name(nd6_defifp), odef_ifp != NULL ? if_name(odef_ifp) : "NONE")); - else + } else { nd6log((LOG_INFO, "No default interface set\n")); + } /* * If the Default Router List is empty, install a route @@ -4126,5 +4220,5 @@ nd6_setdefaultiface( scope6_setdefault(nd6_defifp); } lck_mtx_unlock(nd6_mutex); - return (error); + return error; } diff --git a/bsd/netinet6/nd6_send.c b/bsd/netinet6/nd6_send.c index 18a0b96e4..cd59d78dd 100644 --- a/bsd/netinet6/nd6_send.c +++ b/bsd/netinet6/nd6_send.c @@ -47,10 +47,10 @@ #include #endif -SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ +SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ SYSCTL_NODE(_net_inet6, OID_AUTO, send, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "IPv6 Secure Neighbor Discovery"); + "IPv6 Secure Neighbor Discovery"); static int nd6_send_opmode = ND6_SEND_OPMODE_CGA_QUIET; SYSCTL_INT(_net_inet6_send, OID_AUTO, opmode, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -63,8 +63,8 @@ SYSCTL_INT(_net_inet6_send, OID_AUTO, opstate, CTLFLAG_RD | CTLFLAG_LOCKED, static int sysctl_cga_parameters SYSCTL_HANDLER_ARGS; SYSCTL_PROC(_net_inet6_send, OID_AUTO, cga_parameters, - CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, - sysctl_cga_parameters, "S,nd6_send_nodecfg", ""); + CTLTYPE_OPAQUE | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, + sysctl_cga_parameters, "S,nd6_send_nodecfg", ""); /* * The size of the buffer is sufficient to contain a public key, its size in @@ -72,7 +72,7 @@ SYSCTL_PROC(_net_inet6_send, OID_AUTO, cga_parameters, * scope. This interface is not a public API, so we don't anticipate that the * userland and the kernel will be mismatched between ILP32 and LP64. */ -#define SYSCTL_CGA_PARAMETERS_BUFFER_SIZE \ +#define SYSCTL_CGA_PARAMETERS_BUFFER_SIZE \ (2 * (sizeof (u_int16_t) + IN6_CGA_KEY_MAXSIZE) + \ sizeof (struct in6_cga_prepare)) @@ -96,13 +96,13 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS if (namelen != 0) { log(LOG_ERR, "%s: name length err [len=%u]\n", __func__, namelen); - return (EINVAL); + return EINVAL; } if (req->newlen > SYSCTL_CGA_PARAMETERS_BUFFER_SIZE) { log(LOG_ERR, "%s: input buffer size error [len=%u]\n", __func__, req->newlen); - return (EINVAL); + return EINVAL; } #if CONFIG_MACF @@ -111,7 +111,7 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS kauth_cred_unref(&cred); if (error != 0) { log(LOG_ERR, "%s: mac_system_check_info denied.\n", __func__); - return (EPERM); + return EPERM; } #endif @@ -120,7 +120,7 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS if (buffer == NULL) { log(LOG_ERR, "%s: could not allocate marshaling buffer.\n", __func__); - return (ENOMEM); + return ENOMEM; } in6_cga_node_lock(); @@ -128,27 +128,30 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS if (req->oldptr != USER_ADDR_NULL && req->oldlen > 0) { oldp = buffer; fin = &buffer[SYSCTL_CGA_PARAMETERS_BUFFER_SIZE]; - if (req->oldlen < SYSCTL_CGA_PARAMETERS_BUFFER_SIZE) + if (req->oldlen < SYSCTL_CGA_PARAMETERS_BUFFER_SIZE) { fin = &buffer[req->oldlen]; + } in6_cga_query(&cfg); iov = &cfg.cga_pubkey; if (iov->iov_len > 0) { VERIFY(iov->iov_len < UINT16_MAX); - if (&oldp[sizeof (cfg.cga_prepare)] <= fin) + if (&oldp[sizeof(cfg.cga_prepare)] <= fin) { bcopy(&cfg.cga_prepare, oldp, - sizeof (cfg.cga_prepare)); - oldp += sizeof (cfg.cga_prepare); + sizeof(cfg.cga_prepare)); + } + oldp += sizeof(cfg.cga_prepare); - if (&oldp[sizeof (u16)] < fin) { + if (&oldp[sizeof(u16)] < fin) { u16 = (u_int16_t) iov->iov_len; - bcopy(&u16, oldp, sizeof (u16)); + bcopy(&u16, oldp, sizeof(u16)); } - oldp += sizeof (u16); + oldp += sizeof(u16); - if (&oldp[iov->iov_len] < fin) + if (&oldp[iov->iov_len] < fin) { bcopy(iov->iov_base, oldp, iov->iov_len); + } oldp += iov->iov_len; if (oldp > fin) { @@ -161,16 +164,19 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS } error = SYSCTL_OUT(req, buffer, oldp - buffer); - if (error) + if (error) { goto done; + } } - if (req->newptr == USER_ADDR_NULL) + if (req->newptr == USER_ADDR_NULL) { goto done; + } error = proc_suser(current_proc()); - if (error) + if (error) { goto done; + } if (req->newlen == 0) { in6_cga_stop(); @@ -179,21 +185,23 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS } error = SYSCTL_IN(req, buffer, req->newlen); - if (error) + if (error) { goto done; + } newp = buffer; fin = &buffer[req->newlen]; bzero(&cfg, sizeof cfg); - if (&newp[sizeof (cfg.cga_prepare)] <= fin) - bcopy(newp, &cfg.cga_prepare, sizeof (cfg.cga_prepare)); - newp += sizeof (cfg.cga_prepare); + if (&newp[sizeof(cfg.cga_prepare)] <= fin) { + bcopy(newp, &cfg.cga_prepare, sizeof(cfg.cga_prepare)); + } + newp += sizeof(cfg.cga_prepare); iov = &cfg.cga_privkey; - if (&newp[sizeof (u16)] < fin) { - bcopy(newp, &u16, sizeof (u16)); + if (&newp[sizeof(u16)] < fin) { + bcopy(newp, &u16, sizeof(u16)); iov->iov_len = u16; if (iov->iov_len > IN6_CGA_KEY_MAXSIZE) { @@ -201,14 +209,14 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS goto done; } } - newp += sizeof (u16); + newp += sizeof(u16); iov->iov_base = newp; newp += iov->iov_len; iov = &cfg.cga_pubkey; - if (&newp[sizeof (u16)] < fin) { - bcopy(newp, &u16, sizeof (u16)); + if (&newp[sizeof(u16)] < fin) { + bcopy(newp, &u16, sizeof(u16)); iov->iov_len = u16; if (iov->iov_len > IN6_CGA_KEY_MAXSIZE) { @@ -216,7 +224,7 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS goto done; } } - newp += sizeof (u16); + newp += sizeof(u16); iov->iov_base = newp; newp += iov->iov_len; @@ -229,16 +237,17 @@ sysctl_cga_parameters SYSCTL_HANDLER_ARGS } error = in6_cga_start(&cfg); - if (!error) + if (!error) { nd6_send_opstate = nd6_send_opmode; - else + } else { log(LOG_ERR, "%s: in6_cga_start error=%d.\n", __func__, error); + } done: in6_cga_node_unlock(); FREE(buffer, M_IP6CGA); - return (error); + return error; } /* End of file */ diff --git a/bsd/netinet6/nd6_var.h b/bsd/netinet6/nd6_var.h index 0a92f8bc8..c16edced5 100644 --- a/bsd/netinet6/nd6_var.h +++ b/bsd/netinet6/nd6_var.h @@ -55,7 +55,7 @@ */ #ifndef _NETINET6_ND6_VAR_H_ -#define _NETINET6_ND6_VAR_H_ +#define _NETINET6_ND6_VAR_H_ #ifdef BSD_KERNEL_PRIVATE struct nd_ifinfo { diff --git a/bsd/netinet6/raw_ip6.c b/bsd/netinet6/raw_ip6.c index 92ec475f4..b1ce62b0c 100644 --- a/bsd/netinet6/raw_ip6.c +++ b/bsd/netinet6/raw_ip6.c @@ -133,10 +133,10 @@ * Raw interface to IP6 protocol. */ -extern struct inpcbhead ripcb; -extern struct inpcbinfo ripcbinfo; -extern u_int32_t rip_sendspace; -extern u_int32_t rip_recvspace; +extern struct inpcbhead ripcb; +extern struct inpcbinfo ripcbinfo; +extern u_int32_t rip_sendspace; +extern u_int32_t rip_recvspace; struct rip6stat rip6stat; @@ -147,9 +147,9 @@ struct rip6stat rip6stat; */ int rip6_input( - struct mbuf **mp, - int *offp, - int proto) + struct mbuf **mp, + int *offp, + int proto) { struct mbuf *m = *mp; struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); @@ -169,20 +169,25 @@ rip6_input( lck_rw_lock_shared(ripcbinfo.ipi_lock); LIST_FOREACH(in6p, &ripcb, inp_list) { - if ((in6p->in6p_vflag & INP_IPV6) == 0) + if ((in6p->in6p_vflag & INP_IPV6) == 0) { continue; + } if (in6p->in6p_ip6_nxt && - in6p->in6p_ip6_nxt != proto) + in6p->in6p_ip6_nxt != proto) { continue; + } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr) && - !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_laddr, &ip6->ip6_dst)) { continue; + } if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) && - !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &ip6->ip6_src)) { continue; + } - if (inp_restricted_recv(in6p, ifp)) + if (inp_restricted_recv(in6p, ifp)) { continue; + } if (proto == IPPROTO_ICMPV6 || in6p->in6p_cksum != -1) { rip6stat.rip6s_isum++; @@ -197,7 +202,7 @@ rip6_input( #if NECP if (n && !necp_socket_is_allowed_to_send_recv_v6(in6p, 0, 0, - &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { m_freem(n); /* do not inject data into pcb */ } else @@ -206,7 +211,7 @@ rip6_input( if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip6_savecontrol(last, n, &opts); if (ret != 0) { m_freem(n); @@ -219,11 +224,12 @@ rip6_input( m_adj(n, *offp); so_recv_data_stat(last->in6p_socket, m, 0); if (sbappendaddr(&last->in6p_socket->so_rcv, - (struct sockaddr *)&rip6src, - n, opts, NULL) == 0) { + (struct sockaddr *)&rip6src, + n, opts, NULL) == 0) { rip6stat.rip6s_fullsock++; - } else + } else { sorwakeup(last->in6p_socket); + } opts = NULL; } } @@ -232,7 +238,7 @@ rip6_input( #if NECP if (last && !necp_socket_is_allowed_to_send_recv_v6(in6p, 0, 0, - &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { + &ip6->ip6_dst, &ip6->ip6_src, ifp, NULL, NULL, NULL)) { m_freem(m); ip6stat.ip6s_delivered--; /* do not inject data into pcb */ @@ -242,7 +248,7 @@ rip6_input( if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip6_savecontrol(last, m, &opts); if (ret != 0) { m_freem(m); @@ -250,27 +256,28 @@ rip6_input( ip6stat.ip6s_delivered--; goto unlock; } - } /* strip intermediate headers */ m_adj(m, *offp); so_recv_data_stat(last->in6p_socket, m, 0); if (sbappendaddr(&last->in6p_socket->so_rcv, - (struct sockaddr *)&rip6src, m, opts, NULL) == 0) { + (struct sockaddr *)&rip6src, m, opts, NULL) == 0) { rip6stat.rip6s_fullsock++; - } else + } else { sorwakeup(last->in6p_socket); + } } else { rip6stat.rip6s_nosock++; - if (m->m_flags & M_MCAST) + if (m->m_flags & M_MCAST) { rip6stat.rip6s_nosockmcast++; - if (proto == IPPROTO_NONE) + } + if (proto == IPPROTO_NONE) { m_freem(m); - else { + } else { char *prvnxtp = ip6_get_prevhdr(m, *offp); /* XXX */ icmp6_error(m, ICMP6_PARAM_PROB, - ICMP6_PARAMPROB_NEXTHEADER, - prvnxtp - mtod(m, char *)); + ICMP6_PARAMPROB_NEXTHEADER, + prvnxtp - mtod(m, char *)); } ip6stat.ip6s_delivered--; } @@ -297,18 +304,21 @@ rip6_ctlinput( void (*notify)(struct inpcb *, int) = in6_rtchange; if (sa->sa_family != AF_INET6 || - sa->sa_len != sizeof(struct sockaddr_in6)) + sa->sa_len != sizeof(struct sockaddr_in6)) { return; + } - if ((unsigned)cmd >= PRC_NCMDS) + if ((unsigned)cmd >= PRC_NCMDS) { return; + } if (PRC_IS_REDIRECT(cmd)) { notify = in6_rtchange; d = NULL; - } else if (cmd == PRC_HOSTDEAD) + } else if (cmd == PRC_HOSTDEAD) { d = NULL; - else if (inet6ctlerrmap[cmd] == 0) + } else if (inet6ctlerrmap[cmd] == 0) { return; + } /* if the parameter is from icmp6, decode it. */ if (d != NULL) { @@ -325,7 +335,7 @@ rip6_ctlinput( } (void) in6_pcbnotify(&ripcbinfo, sa, 0, (const struct sockaddr *)sa6_src, - 0, cmd, cmdarg, notify); + 0, cmd, cmdarg, notify); } /* @@ -343,12 +353,12 @@ rip6_output( struct in6_addr *dst; struct ip6_hdr *ip6; struct inpcb *in6p; - u_int plen = m->m_pkthdr.len; + u_int plen = m->m_pkthdr.len; int error = 0; struct ip6_pktopts opt, *optp = NULL; struct ip6_moptions *im6o = NULL; struct ifnet *oifp = NULL; - int type = 0, code = 0; /* for ICMPv6 output statistics only */ + int type = 0, code = 0; /* for ICMPv6 output statistics only */ int sotc = SO_TC_UNSPEC; int netsvctype = _NET_SERVICE_TYPE_UNSPEC; struct ip6_out_args ip6oa; @@ -362,13 +372,14 @@ rip6_output( if (in6p == NULL #if NECP - || (necp_socket_should_use_flow_divert(in6p)) + || (necp_socket_should_use_flow_divert(in6p)) #endif /* NECP */ - ) { - if (in6p == NULL) + ) { + if (in6p == NULL) { error = EINVAL; - else + } else { error = EPROTOTYPE; + } goto bad; } if (dstsock != NULL && IN6_IS_ADDR_V4MAPPED(&dstsock->sin6_addr)) { @@ -380,25 +391,31 @@ rip6_output( ip6oa.ip6oa_boundif = in6p->inp_boundifp->if_index; ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; } - if (INP_NO_CELLULAR(in6p)) + if (INP_NO_CELLULAR(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - if (INP_NO_EXPENSIVE(in6p)) + } + if (INP_NO_EXPENSIVE(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; - if (INP_AWDL_UNRESTRICTED(in6p)) + } + if (INP_AWDL_UNRESTRICTED(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; - if (INP_INTCOPROC_ALLOWED(in6p)) + } + if (INP_INTCOPROC_ALLOWED(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED; + } dst = &dstsock->sin6_addr; if (control) { sotc = so_tc_from_control(control, &netsvctype); if ((error = ip6_setpktopts(control, &opt, NULL, - SOCK_PROTO(so))) != 0) + SOCK_PROTO(so))) != 0) { goto bad; + } optp = &opt; - } else + } else { optp = in6p->in6p_outputopts; + } if (sotc == SO_TC_UNSPEC) { sotc = so->so_traffic_class; netsvctype = so->so_netsvctype; @@ -422,8 +439,9 @@ rip6_output( code = icmp6->icmp6_code; } - if (in6p->inp_flowhash == 0) + if (in6p->inp_flowhash == 0) { in6p->inp_flowhash = inp_calc_flowhash(in6p); + } /* update flowinfo - RFC 6437 */ if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) { in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK; @@ -468,8 +486,9 @@ rip6_output( if (optp && (pi = optp->ip6po_pktinfo) && pi->ipi6_ifindex) { ip6->ip6_dst.s6_addr16[1] = htons(pi->ipi6_ifindex); oifp = ifindex2ifnet[pi->ipi6_ifindex]; - if (oifp != NULL) + if (oifp != NULL) { ifnet_reference(oifp); + } } else if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) && im6o != NULL && im6o_multicast_ifp != NULL) { oifp = im6o_multicast_ifp; @@ -488,7 +507,7 @@ rip6_output( goto bad; } ip6->ip6_dst.s6_addr16[1] - = htons(dstsock->sin6_scope_id & 0xffff);/*XXX*/ + = htons(dstsock->sin6_scope_id & 0xffff);/*XXX*/ } ifnet_head_done(); } @@ -498,7 +517,7 @@ rip6_output( */ { struct in6_addr *in6a; - struct in6_addr storage; + struct in6_addr storage; u_short index = 0; if (israw != 0 && optp && optp->ip6po_pktinfo && !IN6_IS_ADDR_UNSPECIFIED(&optp->ip6po_pktinfo->ipi6_addr)) { @@ -507,8 +526,9 @@ rip6_output( } else if ((in6a = in6_selectsrc(dstsock, optp, in6p, &in6p->in6p_route, NULL, &storage, ip6oa.ip6oa_boundif, &error)) == 0) { - if (error == 0) + if (error == 0) { error = EADDRNOTAVAIL; + } goto bad; } else { ip6oa.ip6oa_flags |= IP6OAF_BOUND_SRCADDR; @@ -516,25 +536,28 @@ rip6_output( ip6->ip6_src = *in6a; if (in6p->in6p_route.ro_rt != NULL) { RT_LOCK(in6p->in6p_route.ro_rt); - if (in6p->in6p_route.ro_rt->rt_ifp != NULL) + if (in6p->in6p_route.ro_rt->rt_ifp != NULL) { index = in6p->in6p_route.ro_rt->rt_ifp->if_index; + } RT_UNLOCK(in6p->in6p_route.ro_rt); - if (oifp != NULL) + if (oifp != NULL) { ifnet_release(oifp); + } ifnet_head_lock_shared(); if (index == 0 || if_index < index) { panic("bad if_index on interface from route"); } oifp = ifindex2ifnet[index]; - if (oifp != NULL) + if (oifp != NULL) { ifnet_reference(oifp); + } ifnet_head_done(); } } ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | - (in6p->inp_flow & IPV6_FLOWINFO_MASK); + (in6p->inp_flow & IPV6_FLOWINFO_MASK); ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | - (IPV6_VERSION & IPV6_VERSION_MASK); + (IPV6_VERSION & IPV6_VERSION_MASK); /* ip6_plen will be filled in ip6_output, so not fill it here. */ ip6->ip6_nxt = in6p->in6p_ip6_nxt; ip6->ip6_hlim = in6_selecthlim(in6p, oifp); @@ -545,10 +568,11 @@ rip6_output( u_int16_t *p; /* compute checksum */ - if (SOCK_PROTO(so) == IPPROTO_ICMPV6) + if (SOCK_PROTO(so) == IPPROTO_ICMPV6) { off = offsetof(struct icmp6_hdr, icmp6_cksum); - else + } else { off = in6p->in6p_cksum; + } if (plen < (unsigned int)(off + 1)) { error = EINVAL; goto bad; @@ -560,8 +584,9 @@ rip6_output( off -= n->m_len; n = n->m_next; } - if (!n) + if (!n) { goto bad; + } p = (u_int16_t *)(void *)(mtod(n, caddr_t) + off); *p = 0; *p = in6_cksum(m, ip6->ip6_nxt, sizeof(*ip6), plen); @@ -596,7 +621,7 @@ rip6_output( in6p->in6p_route.ro_dst.sin6_family = AF_INET6; in6p->in6p_route.ro_dst.sin6_len = sizeof(struct sockaddr_in6); ((struct sockaddr_in6 *)(void *)&in6p->in6p_route.ro_dst)->sin6_addr = - ip6->ip6_dst; + ip6->ip6_dst; rtalloc_scoped((struct route *)&in6p->in6p_route, ip6oa.ip6oa_boundif); @@ -606,7 +631,7 @@ rip6_output( } if (!necp_socket_is_allowed_to_send_recv_v6(in6p, 0, 0, - &ip6->ip6_src, &ip6->ip6_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { + &ip6->ip6_src, &ip6->ip6_dst, NULL, &policy_id, &route_rule_id, &skip_policy_id)) { error = EHOSTUNREACH; goto bad; } @@ -619,8 +644,9 @@ rip6_output( } } #endif /* NECP */ - if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; + } #if IPSEC if (in6p->in6p_sp != NULL && ipsec_setsocket(m, so) != 0) { @@ -629,8 +655,9 @@ rip6_output( } #endif /*IPSEC*/ - if (ROUTE_UNUSABLE(&in6p->in6p_route)) + if (ROUTE_UNUSABLE(&in6p->in6p_route)) { ROUTE_RELEASE(&in6p->in6p_route); + } if (oifp != NULL) { ifnet_release(oifp); @@ -644,19 +671,22 @@ rip6_output( PKTF_FLOW_RAWSOCK); m->m_pkthdr.pkt_proto = in6p->in6p_ip6_nxt; m->m_pkthdr.tx_rawip_pid = so->last_pid; - if (so->so_flags & SOF_DELEGATED) + if (so->so_flags & SOF_DELEGATED) { m->m_pkthdr.tx_rawip_e_pid = so->e_pid; - else + } else { m->m_pkthdr.tx_rawip_e_pid = 0; + } - if (im6o != NULL) + if (im6o != NULL) { IM6O_ADDREF(im6o); + } error = ip6_output(m, optp, &in6p->in6p_route, flags, im6o, &oifp, &ip6oa); - if (im6o != NULL) + if (im6o != NULL) { IM6O_REMREF(im6o); + } if (in6p->in6p_route.ro_rt != NULL) { struct rtentry *rt = in6p->in6p_route.ro_rt; @@ -665,14 +695,15 @@ rip6_output( if ((rt->rt_flags & RTF_MULTICAST) || in6p->in6p_socket == NULL || !(in6p->in6p_socket->so_state & SS_ISCONNECTED)) { - rt = NULL; /* unusable */ + rt = NULL; /* unusable */ } /* * Always discard the cached route for unconnected * socket or if it is a multicast route. */ - if (rt == NULL) + if (rt == NULL) { ROUTE_RELEASE(&in6p->in6p_route); + } /* * If this is a connected socket and the destination @@ -692,35 +723,42 @@ rip6_output( * denied access to it, generate an event. */ if (error != 0 && (ip6oa.ip6oa_retflags & IP6OARF_IFDENIED) && - (INP_NO_CELLULAR(in6p) || INP_NO_EXPENSIVE(in6p))) - soevent(in6p->inp_socket, (SO_FILT_HINT_LOCKED| + (INP_NO_CELLULAR(in6p) || INP_NO_EXPENSIVE(in6p))) { + soevent(in6p->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } if (SOCK_PROTO(so) == IPPROTO_ICMPV6) { - if (oifp) + if (oifp) { icmp6_ifoutstat_inc(oifp, type, code); + } icmp6stat.icp6s_outhist[type]++; - } else + } else { rip6stat.rip6s_opackets++; + } goto freectl; bad: - if (m != NULL) + if (m != NULL) { m_freem(m); + } freectl: - if (optp == &opt && optp->ip6po_rthdr) + if (optp == &opt && optp->ip6po_rthdr) { ROUTE_RELEASE(&optp->ip6po_route); + } if (control != NULL) { - if (optp == &opt) + if (optp == &opt) { ip6_clearpktopts(optp, -1); + } m_freem(control); } - if (oifp != NULL) + if (oifp != NULL) { ifnet_release(oifp); - return(error); + } + return error; } /* @@ -734,15 +772,16 @@ rip6_ctloutput( int error, optval; /* Allow at this level */ - if (sopt->sopt_level == IPPROTO_ICMPV6) + if (sopt->sopt_level == IPPROTO_ICMPV6) { /* * XXX: is it better to call icmp6_ctloutput() directly * from protosw? */ - return(icmp6_ctloutput(so, sopt)); - else if (sopt->sopt_level != IPPROTO_IPV6 && - !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) - return (EINVAL); + return icmp6_ctloutput(so, sopt); + } else if (sopt->sopt_level != IPPROTO_IPV6 && + !(sopt->sopt_level == SOL_SOCKET && sopt->sopt_name == SO_FLUSH)) { + return EINVAL; + } error = 0; @@ -765,9 +804,10 @@ rip6_ctloutput( break; case SO_FLUSH: - if ((error = sooptcopyin(sopt, &optval, sizeof (optval), - sizeof (optval))) != 0) + if ((error = sooptcopyin(sopt, &optval, sizeof(optval), + sizeof(optval))) != 0) { break; + } error = inp_flush(sotoinpcb(so), optval); break; @@ -779,7 +819,7 @@ rip6_ctloutput( break; } - return (error); + return error; } static int @@ -789,26 +829,31 @@ rip6_attach(struct socket *so, int proto, struct proc *p) int error; inp = sotoinpcb(so); - if (inp) + if (inp) { panic("rip6_attach"); - if ((error = proc_suser(p)) != 0) + } + if ((error = proc_suser(p)) != 0) { return error; + } error = soreserve(so, rip_sendspace, rip_recvspace); - if (error) + if (error) { return error; + } error = in_pcballoc(so, &ripcbinfo, p); - if (error) + if (error) { return error; + } inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV6; inp->in6p_ip6_nxt = (char)proto; - inp->in6p_hops = -1; /* use kernel default */ + inp->in6p_hops = -1; /* use kernel default */ inp->in6p_cksum = -1; MALLOC(inp->in6p_icmp6filt, struct icmp6_filter *, - sizeof(struct icmp6_filter), M_PCB, M_WAITOK); - if (inp->in6p_icmp6filt == NULL) - return (ENOMEM); + sizeof(struct icmp6_filter), M_PCB, M_WAITOK); + if (inp->in6p_icmp6filt == NULL) { + return ENOMEM; + } ICMP6_FILTER_SETPASSALL(inp->in6p_icmp6filt); return 0; } @@ -819,8 +864,9 @@ rip6_detach(struct socket *so) struct inpcb *inp; inp = sotoinpcb(so); - if (inp == 0) + if (inp == 0) { panic("rip6_detach"); + } /* xxx: RSVP */ if (inp->in6p_icmp6filt) { FREE(inp->in6p_icmp6filt, M_PCB); @@ -842,8 +888,9 @@ rip6_disconnect(struct socket *so) { struct inpcb *inp = sotoinpcb(so); - if ((so->so_state & SS_ISCONNECTED) == 0) + if ((so->so_state & SS_ISCONNECTED) == 0) { return ENOTCONN; + } inp->in6p_faddr = in6addr_any; return rip6_abort(so); } @@ -860,22 +907,26 @@ rip6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) - return (inp == NULL ? EINVAL : EPROTOTYPE); + ) { + return inp == NULL ? EINVAL : EPROTOTYPE; + } - if (nam->sa_len != sizeof (struct sockaddr_in6)) - return (EINVAL); + if (nam->sa_len != sizeof(struct sockaddr_in6)) { + return EINVAL; + } - if (TAILQ_EMPTY(&ifnet_head) || SIN6(nam)->sin6_family != AF_INET6) - return (EADDRNOTAVAIL); + if (TAILQ_EMPTY(&ifnet_head) || SIN6(nam)->sin6_family != AF_INET6) { + return EADDRNOTAVAIL; + } - bzero(&sin6, sizeof (sin6)); + bzero(&sin6, sizeof(sin6)); *(&sin6) = *SIN6(nam); - if ((error = sa6_embedscope(&sin6, ip6_use_defzone)) != 0) - return (error); + if ((error = sa6_embedscope(&sin6, ip6_use_defzone)) != 0) { + return error; + } /* Sanitize local copy for address searches */ sin6.sin6_flowinfo = 0; @@ -883,16 +934,17 @@ rip6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) sin6.sin6_port = 0; if (!IN6_IS_ADDR_UNSPECIFIED(&sin6.sin6_addr) && - (ifa = ifa_ifwithaddr(SA(&sin6))) == 0) - return (EADDRNOTAVAIL); + (ifa = ifa_ifwithaddr(SA(&sin6))) == 0) { + return EADDRNOTAVAIL; + } if (ifa != NULL) { IFA_LOCK(ifa); if (((struct in6_ifaddr *)ifa)->ia6_flags & (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY | IN6_IFF_CLAT46 | - IN6_IFF_DETACHED | IN6_IFF_DEPRECATED)) { + IN6_IFF_DETACHED | IN6_IFF_DEPRECATED)) { IFA_UNLOCK(ifa); IFA_REMREF(ifa); - return (EADDRNOTAVAIL); + return EADDRNOTAVAIL; } outif = ifa->ifa_ifp; IFA_UNLOCK(ifa); @@ -901,7 +953,7 @@ rip6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) inp->in6p_laddr = sin6.sin6_addr; inp->in6p_last_outifp = outif; - return (0); + return 0; } static int @@ -920,16 +972,20 @@ rip6_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) - return (inp == NULL ? EINVAL : EPROTOTYPE); - if (nam->sa_len != sizeof(*addr)) + ) { + return inp == NULL ? EINVAL : EPROTOTYPE; + } + if (nam->sa_len != sizeof(*addr)) { return EINVAL; - if (TAILQ_EMPTY(&ifnet_head)) + } + if (TAILQ_EMPTY(&ifnet_head)) { return EADDRNOTAVAIL; - if (addr->sin6_family != AF_INET6) + } + if (addr->sin6_family != AF_INET6) { return EAFNOSUPPORT; + } if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) { so->so_flags1 |= SOF1_CONNECT_COUNTED; @@ -937,7 +993,7 @@ rip6_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) } #if ENABLE_DEFAULT_SCOPE - if (addr->sin6_scope_id == 0) { /* not change if specified */ + if (addr->sin6_scope_id == 0) { /* not change if specified */ /* avoid overwrites */ tmp = *addr; addr = &tmp; @@ -946,8 +1002,9 @@ rip6_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) #endif /* KAME hack: embed scopeid */ - if (in6_embedscope(&SIN6(nam)->sin6_addr, SIN6(nam), inp, NULL, NULL) != 0) - return (EINVAL); + if (in6_embedscope(&SIN6(nam)->sin6_addr, SIN6(nam), inp, NULL, NULL) != 0) { + return EINVAL; + } ifscope = (inp->inp_flags & INP_BOUND_IF) ? inp->inp_boundifp->if_index : IFSCOPE_NONE; @@ -955,12 +1012,14 @@ rip6_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p) /* Source address selection. XXX: need pcblookup? */ in6a = in6_selectsrc(addr, inp->in6p_outputopts, inp, &inp->in6p_route, NULL, &storage, ifscope, &error); - if (in6a == NULL) - return (error ? error : EADDRNOTAVAIL); + if (in6a == NULL) { + return error ? error : EADDRNOTAVAIL; + } inp->in6p_laddr = *in6a; inp->in6p_faddr = addr->sin6_addr; - if (inp->in6p_route.ro_rt != NULL) + if (inp->in6p_route.ro_rt != NULL) { outif = inp->in6p_route.ro_rt->rt_ifp; + } inp->in6p_last_outifp = outif; soisconnected(so); @@ -986,13 +1045,14 @@ rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) { - if (inp == NULL) + ) { + if (inp == NULL) { error = EINVAL; - else + } else { error = EPROTOTYPE; + } goto bad; } @@ -1007,7 +1067,7 @@ rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, tmp.sin6_family = AF_INET6; tmp.sin6_len = sizeof(struct sockaddr_in6); bcopy(&inp->in6p_faddr, &tmp.sin6_addr, - sizeof(struct in6_addr)); + sizeof(struct in6_addr)); dst = &tmp; } else { if (nam == NULL) { @@ -1018,51 +1078,53 @@ rip6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, dst = &tmp; } #if ENABLE_DEFAULT_SCOPE - if (dst->sin6_scope_id == 0) { /* not change if specified */ + if (dst->sin6_scope_id == 0) { /* not change if specified */ dst->sin6_scope_id = scope6_addr2default(&dst->sin6_addr); } #endif - return (rip6_output(m, so, dst, control, 1)); + return rip6_output(m, so, dst, control, 1); bad: VERIFY(error != 0); - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } - return (error); + return error; } struct pr_usrreqs rip6_usrreqs = { - .pru_abort = rip6_abort, - .pru_attach = rip6_attach, - .pru_bind = rip6_bind, - .pru_connect = rip6_connect, - .pru_control = in6_control, - .pru_detach = rip6_detach, - .pru_disconnect = rip6_disconnect, - .pru_peeraddr = in6_getpeeraddr, - .pru_send = rip6_send, - .pru_shutdown = rip6_shutdown, - .pru_sockaddr = in6_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = rip6_abort, + .pru_attach = rip6_attach, + .pru_bind = rip6_bind, + .pru_connect = rip6_connect, + .pru_control = in6_control, + .pru_detach = rip6_detach, + .pru_disconnect = rip6_disconnect, + .pru_peeraddr = in6_getpeeraddr, + .pru_send = rip6_send, + .pru_shutdown = rip6_shutdown, + .pru_sockaddr = in6_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; __private_extern__ struct pr_usrreqs icmp6_dgram_usrreqs = { - .pru_abort = rip6_abort, - .pru_attach = icmp6_dgram_attach, - .pru_bind = rip6_bind, - .pru_connect = rip6_connect, - .pru_control = in6_control, - .pru_detach = rip6_detach, - .pru_disconnect = rip6_disconnect, - .pru_peeraddr = in6_getpeeraddr, - .pru_send = icmp6_dgram_send, - .pru_shutdown = rip6_shutdown, - .pru_sockaddr = in6_getsockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = rip6_abort, + .pru_attach = icmp6_dgram_attach, + .pru_bind = rip6_bind, + .pru_connect = rip6_connect, + .pru_control = in6_control, + .pru_detach = rip6_detach, + .pru_disconnect = rip6_disconnect, + .pru_peeraddr = in6_getpeeraddr, + .pru_send = icmp6_dgram_send, + .pru_shutdown = rip6_shutdown, + .pru_sockaddr = in6_getsockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; diff --git a/bsd/netinet6/raw_ip6.h b/bsd/netinet6/raw_ip6.h index 390917d01..7619ba29c 100644 --- a/bsd/netinet6/raw_ip6.h +++ b/bsd/netinet6/raw_ip6.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -66,14 +66,14 @@ * ICMPv6 stat is counted separately. see netinet/icmp6.h */ struct rip6stat { - u_quad_t rip6s_ipackets; /* total input packets */ - u_quad_t rip6s_isum; /* input checksum computations */ - u_quad_t rip6s_badsum; /* of above, checksum error */ - u_quad_t rip6s_nosock; /* no matching socket */ - u_quad_t rip6s_nosockmcast; /* of above, arrived as multicast */ - u_quad_t rip6s_fullsock; /* not delivered, input socket full */ + u_quad_t rip6s_ipackets; /* total input packets */ + u_quad_t rip6s_isum; /* input checksum computations */ + u_quad_t rip6s_badsum; /* of above, checksum error */ + u_quad_t rip6s_nosock; /* no matching socket */ + u_quad_t rip6s_nosockmcast; /* of above, arrived as multicast */ + u_quad_t rip6s_fullsock; /* not delivered, input socket full */ - u_quad_t rip6s_opackets; /* total output packets */ + u_quad_t rip6s_opackets; /* total output packets */ }; #ifdef BSD_KERNEL_PRIVATE diff --git a/bsd/netinet6/route6.c b/bsd/netinet6/route6.c index f2cb83006..cc0886d44 100644 --- a/bsd/netinet6/route6.c +++ b/bsd/netinet6/route6.c @@ -108,14 +108,14 @@ route6_input(struct mbuf **mp, int *offp, int proto) /* unknown routing type */ if (rh->ip6r_segleft == 0) { rhlen = (rh->ip6r_len + 1) << 3; - break; /* Final dst. Just ignore the header. */ + break; /* Final dst. Just ignore the header. */ } ip6stat.ip6s_badoptions++; icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, (caddr_t)&rh->ip6r_type - (caddr_t)ip6); - return (IPPROTO_DONE); + return IPPROTO_DONE; } *offp += rhlen; - return (rh->ip6r_nxt); + return rh->ip6r_nxt; } diff --git a/bsd/netinet6/scope6.c b/bsd/netinet6/scope6.c index 8b85f414b..3d9bba38b 100644 --- a/bsd/netinet6/scope6.c +++ b/bsd/netinet6/scope6.c @@ -126,11 +126,11 @@ in6_addrscope(struct in6_addr *addr) switch (scope) { case 0x80: - return (IPV6_ADDR_SCOPE_LINKLOCAL); + return IPV6_ADDR_SCOPE_LINKLOCAL; case 0xc0: - return (IPV6_ADDR_SCOPE_SITELOCAL); + return IPV6_ADDR_SCOPE_SITELOCAL; default: - return (IPV6_ADDR_SCOPE_GLOBAL); /* just in case */ + return IPV6_ADDR_SCOPE_GLOBAL; /* just in case */ } } @@ -143,13 +143,13 @@ in6_addrscope(struct in6_addr *addr) */ switch (scope) { case IPV6_ADDR_SCOPE_INTFACELOCAL: - return (IPV6_ADDR_SCOPE_INTFACELOCAL); + return IPV6_ADDR_SCOPE_INTFACELOCAL; case IPV6_ADDR_SCOPE_LINKLOCAL: - return (IPV6_ADDR_SCOPE_LINKLOCAL); + return IPV6_ADDR_SCOPE_LINKLOCAL; case IPV6_ADDR_SCOPE_SITELOCAL: - return (IPV6_ADDR_SCOPE_SITELOCAL); + return IPV6_ADDR_SCOPE_SITELOCAL; default: - return (IPV6_ADDR_SCOPE_GLOBAL); + return IPV6_ADDR_SCOPE_GLOBAL; } } @@ -157,14 +157,16 @@ in6_addrscope(struct in6_addr *addr) * Regard loopback and unspecified addresses as global, since * they have no ambiguity. */ - if (bcmp(&in6addr_loopback, addr, sizeof (*addr) - 1) == 0) { - if (addr->s6_addr8[15] == 1) /* loopback */ - return (IPV6_ADDR_SCOPE_LINKLOCAL); - if (addr->s6_addr8[15] == 0) /* unspecified */ - return (IPV6_ADDR_SCOPE_GLOBAL); /* XXX: correct? */ + if (bcmp(&in6addr_loopback, addr, sizeof(*addr) - 1) == 0) { + if (addr->s6_addr8[15] == 1) { /* loopback */ + return IPV6_ADDR_SCOPE_LINKLOCAL; + } + if (addr->s6_addr8[15] == 0) { /* unspecified */ + return IPV6_ADDR_SCOPE_GLOBAL; /* XXX: correct? */ + } } - return (IPV6_ADDR_SCOPE_GLOBAL); + return IPV6_ADDR_SCOPE_GLOBAL; } int @@ -175,12 +177,13 @@ in6_addr2scopeid(struct ifnet *ifp, struct in6_addr *addr) struct scope6_id *sid; if_inet6data_lock_shared(ifp); - if (IN6_IFEXTRA(ifp) == NULL) + if (IN6_IFEXTRA(ifp) == NULL) { goto err; + } sid = SID(ifp); switch (scope) { case IPV6_ADDR_SCOPE_NODELOCAL: - retid = -1; /* XXX: is this an appropriate value? */ + retid = -1; /* XXX: is this an appropriate value? */ break; case IPV6_ADDR_SCOPE_LINKLOCAL: retid = sid->s6id_list[IPV6_ADDR_SCOPE_LINKLOCAL]; @@ -192,12 +195,12 @@ in6_addr2scopeid(struct ifnet *ifp, struct in6_addr *addr) retid = sid->s6id_list[IPV6_ADDR_SCOPE_ORGLOCAL]; break; default: - break; /* XXX: value 0, treat as global. */ + break; /* XXX: value 0, treat as global. */ } err: if_inet6data_lock_done(ifp); - return (retid); + return retid; } /* @@ -214,8 +217,9 @@ sa6_embedscope(struct sockaddr_in6 *sin6, int defaultok) struct ifnet *ifp; u_int32_t zoneid; - if ((zoneid = sin6->sin6_scope_id) == 0 && defaultok) + if ((zoneid = sin6->sin6_scope_id) == 0 && defaultok) { zoneid = scope6_addr2default(&sin6->sin6_addr); + } if (zoneid != 0 && (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr) || @@ -226,13 +230,14 @@ sa6_embedscope(struct sockaddr_in6 *sin6, int defaultok) * zone IDs assuming a one-to-one mapping between interfaces * and links. */ - if (if_index < zoneid) - return (ENXIO); + if (if_index < zoneid) { + return ENXIO; + } ifnet_head_lock_shared(); ifp = ifindex2ifnet[zoneid]; - if (ifp == NULL) { /* XXX: this can happen for some OS */ + if (ifp == NULL) { /* XXX: this can happen for some OS */ ifnet_head_done(); - return (ENXIO); + return ENXIO; } ifnet_head_done(); /* XXX assignment to 16bit from 32bit variable */ @@ -241,7 +246,7 @@ sa6_embedscope(struct sockaddr_in6 *sin6, int defaultok) sin6->sin6_scope_id = 0; } - return (0); + return 0; } void @@ -284,8 +289,9 @@ sa6_recoverscope(struct sockaddr_in6 *sin6, boolean_t attachcheck) zoneid = ntohs(sin6->sin6_addr.s6_addr16[1]); if (zoneid) { /* sanity check */ - if (if_index < zoneid) - return (ENXIO); + if (if_index < zoneid) { + return ENXIO; + } /* * We use the attachcheck parameter to skip the * interface attachment check. @@ -301,7 +307,7 @@ sa6_recoverscope(struct sockaddr_in6 *sin6, boolean_t attachcheck) ifnet_head_lock_shared(); if (ifindex2ifnet[zoneid] == NULL) { ifnet_head_done(); - return (ENXIO); + return ENXIO; } ifnet_head_done(); } @@ -310,7 +316,7 @@ sa6_recoverscope(struct sockaddr_in6 *sin6, boolean_t attachcheck) } } - return (0); + return 0; } void @@ -346,14 +352,15 @@ scope6_addr2default(struct in6_addr *addr) * special case: The loopback address should be considered as * link-local, but there's no ambiguity in the syntax. */ - if (IN6_IS_ADDR_LOOPBACK(addr)) - return (0); + if (IN6_IS_ADDR_LOOPBACK(addr)) { + return 0; + } lck_mtx_lock(&scope6_lock); id = sid_default.s6id_list[index]; lck_mtx_unlock(&scope6_lock); - return (id); + return id; } /* @@ -376,11 +383,12 @@ in6_setscope(struct in6_addr *in6, struct ifnet *ifp, u_int32_t *ret_id) */ if (IN6_IS_ADDR_LOOPBACK(in6)) { if (!(ifp->if_flags & IFF_LOOPBACK)) { - return (EINVAL); + return EINVAL; } else { - if (ret_id != NULL) + if (ret_id != NULL) { *ret_id = 0; /* there's no ambiguity */ - return (0); + } + return 0; } } @@ -389,9 +397,10 @@ in6_setscope(struct in6_addr *in6, struct ifnet *ifp, u_int32_t *ret_id) if_inet6data_lock_shared(ifp); if (IN6_IFEXTRA(ifp) == NULL) { if_inet6data_lock_done(ifp); - if (ret_id) + if (ret_id) { *ret_id = 0; - return (EINVAL); + } + return EINVAL; } sid = SID(ifp); switch (scope) { @@ -411,18 +420,19 @@ in6_setscope(struct in6_addr *in6, struct ifnet *ifp, u_int32_t *ret_id) zoneid = sid->s6id_list[IPV6_ADDR_SCOPE_ORGLOCAL]; break; default: - zoneid = 0; /* XXX: treat as global. */ + zoneid = 0; /* XXX: treat as global. */ break; } if_inet6data_lock_done(ifp); - if (ret_id != NULL) + if (ret_id != NULL) { *ret_id = zoneid; + } - if (IN6_IS_SCOPE_LINKLOCAL(in6) || IN6_IS_ADDR_MC_INTFACELOCAL(in6)) + if (IN6_IS_SCOPE_LINKLOCAL(in6) || IN6_IS_ADDR_MC_INTFACELOCAL(in6)) { in6->s6_addr16[1] = htons(zoneid & 0xffff); /* XXX */ - - return (0); + } + return 0; } /* @@ -435,10 +445,11 @@ in6_clearscope(struct in6_addr *in6) int modified = 0; if (IN6_IS_SCOPE_LINKLOCAL(in6) || IN6_IS_ADDR_MC_INTFACELOCAL(in6)) { - if (in6->s6_addr16[1] != 0) + if (in6->s6_addr16[1] != 0) { modified = 1; + } in6->s6_addr16[1] = 0; } - return (modified); + return modified; } diff --git a/bsd/netinet6/scope6_var.h b/bsd/netinet6/scope6_var.h index 58e9b34f4..f2218998f 100644 --- a/bsd/netinet6/scope6_var.h +++ b/bsd/netinet6/scope6_var.h @@ -63,7 +63,7 @@ * 16 is correspondent to 4bit multicast scope field. * i.e. from node-local to global with some reserved/unassigned types. */ -#define SCOPE6_ID_MAX 16 +#define SCOPE6_ID_MAX 16 #ifdef BSD_KERNEL_PRIVATE @@ -76,10 +76,10 @@ extern void scope6_ifattach(struct ifnet *); extern void scope6_setdefault(struct ifnet *); extern u_int32_t scope6_in6_addrscope(struct in6_addr *); extern u_int32_t scope6_addr2default(struct in6_addr *); -extern int sa6_embedscope (struct sockaddr_in6 *, int); -extern int sa6_recoverscope (struct sockaddr_in6 *, boolean_t); -extern int in6_setscope (struct in6_addr *, struct ifnet *, u_int32_t *); -extern int in6_clearscope (struct in6_addr *); +extern int sa6_embedscope(struct sockaddr_in6 *, int); +extern int sa6_recoverscope(struct sockaddr_in6 *, boolean_t); +extern int in6_setscope(struct in6_addr *, struct ifnet *, u_int32_t *); +extern int in6_clearscope(struct in6_addr *); extern void rtkey_to_sa6(struct rtentry *, struct sockaddr_in6 *); extern void rtgw_to_sa6(struct rtentry *, struct sockaddr_in6 *); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/netinet6/tcp6_var.h b/bsd/netinet6/tcp6_var.h index d5b761e3a..1c4c0a0e7 100644 --- a/bsd/netinet6/tcp6_var.h +++ b/bsd/netinet6/tcp6_var.h @@ -2,7 +2,7 @@ * Copyright (c) 2010-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -102,15 +102,15 @@ SYSCTL_DECL(_net_inet6_tcp6); #endif -extern int tcp_v6mssdflt; /* XXX */ +extern int tcp_v6mssdflt; /* XXX */ -struct ip6_hdr; -void tcp6_ctlinput(int, struct sockaddr *, void *, struct ifnet *); -void tcp6_init(void); -int tcp6_input(struct mbuf **, int *, int); -struct rtentry *tcp_rtlookup6(struct inpcb *, unsigned int); +struct ip6_hdr; +void tcp6_ctlinput(int, struct sockaddr *, void *, struct ifnet *); +void tcp6_init(void); +int tcp6_input(struct mbuf **, int *, int); +struct rtentry *tcp_rtlookup6(struct inpcb *, unsigned int); -extern struct pr_usrreqs tcp6_usrreqs; +extern struct pr_usrreqs tcp6_usrreqs; #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETINET_TCP6_VAR_H_ */ diff --git a/bsd/netinet6/udp6_output.c b/bsd/netinet6/udp6_output.c index 2e674c328..7898c179f 100644 --- a/bsd/netinet6/udp6_output.c +++ b/bsd/netinet6/udp6_output.c @@ -151,7 +151,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, struct mbuf *control, struct proc *p) { u_int32_t ulen = m->m_pkthdr.len; - u_int32_t plen = sizeof (struct udphdr) + ulen; + u_int32_t plen = sizeof(struct udphdr) + ulen; struct ip6_hdr *ip6; struct udphdr *udp6; struct in6_addr *laddr, *faddr; @@ -159,10 +159,10 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, int error = 0; struct ip6_pktopts opt, *optp = NULL; struct ip6_moptions *im6o; - int af = AF_INET6, hlen = sizeof (struct ip6_hdr); + int af = AF_INET6, hlen = sizeof(struct ip6_hdr); int flags; struct sockaddr_in6 tmp; - struct in6_addr storage; + struct in6_addr storage; int sotc = SO_TC_UNSPEC; int netsvctype = _NET_SERVICE_TYPE_UNSPEC; struct ip6_out_args ip6oa; @@ -194,14 +194,18 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, ip6oa.ip6oa_boundif = in6p->inp_boundifp->if_index; ip6oa.ip6oa_flags |= IP6OAF_BOUND_IF; } - if (INP_NO_CELLULAR(in6p)) + if (INP_NO_CELLULAR(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_NO_CELLULAR; - if (INP_NO_EXPENSIVE(in6p)) + } + if (INP_NO_EXPENSIVE(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_NO_EXPENSIVE; - if (INP_AWDL_UNRESTRICTED(in6p)) + } + if (INP_AWDL_UNRESTRICTED(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_AWDL_UNRESTRICTED; - if (INP_INTCOPROC_ALLOWED(in6p)) + } + if (INP_INTCOPROC_ALLOWED(in6p)) { ip6oa.ip6oa_flags |= IP6OAF_INTCOPROC_ALLOWED; + } #if CONTENT_FILTER /* @@ -213,8 +217,8 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, if (cfil_tag) { cfil_sin6 = (struct sockaddr_in6 *)(void *)cfil_faddr; if ((so->so_state_change_cnt != cfil_so_state_change_cnt) && - (in6p->in6p_fport != cfil_sin6->sin6_port || - !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &cfil_sin6->sin6_addr))) { + (in6p->in6p_fport != cfil_sin6->sin6_port || + !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &cfil_sin6->sin6_addr))) { /* * Socket is connected but socket state and dest addr/port changed. * We need to use the saved faddr info. @@ -228,11 +232,13 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, if (control) { sotc = so_tc_from_control(control, &netsvctype); if ((error = ip6_setpktopts(control, &opt, - NULL, IPPROTO_UDP)) != 0) + NULL, IPPROTO_UDP)) != 0) { goto release; + } optp = &opt; - } else + } else { optp = in6p->in6p_outputopts; + } if (sotc == SO_TC_UNSPEC) { sotc = so->so_traffic_class; @@ -302,16 +308,19 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, laddr = in6_selectsrc(sin6, optp, in6p, &in6p->in6p_route, NULL, &storage, ip6oa.ip6oa_boundif, &error); - } else - laddr = &in6p->in6p_laddr; /* XXX */ + } else { + laddr = &in6p->in6p_laddr; /* XXX */ + } if (laddr == NULL) { - if (error == 0) + if (error == 0) { error = EADDRNOTAVAIL; + } goto release; } if (in6p->in6p_lport == 0 && - (error = in6_pcbsetport(laddr, in6p, p, 0)) != 0) + (error = in6_pcbsetport(laddr, in6p, p, 0)) != 0) { goto release; + } } else { if (IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr)) { error = ENOTCONN; @@ -321,8 +330,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, faddr = &in6p->in6p_faddr; fport = in6p->in6p_fport; #if CONTENT_FILTER - if (cfil_faddr_use) - { + if (cfil_faddr_use) { faddr = &((struct sockaddr_in6 *)(void *)cfil_faddr)->sin6_addr; fport = ((struct sockaddr_in6 *)(void *)cfil_faddr)->sin6_port; @@ -343,14 +351,15 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, "option was set for a connected socket\n"); error = EINVAL; goto release; - } else + } else { af = AF_INET; + } } - } - if (in6p->inp_flowhash == 0) + if (in6p->inp_flowhash == 0) { in6p->inp_flowhash = inp_calc_flowhash(in6p); + } /* update flowinfo - RFC 6437 */ if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) { in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK; @@ -358,11 +367,12 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK); } - if (af == AF_INET) - hlen = sizeof (struct ip); + if (af == AF_INET) { + hlen = sizeof(struct ip); + } if (fport == htons(53) && !(so->so_flags1 & SOF1_DNS_COUNTED)) { - so->so_flags1 |= SOF1_DNS_COUNTED; + so->so_flags1 |= SOF1_DNS_COUNTED; INC_ATOMIC_INT64_LIM(net_api_stats.nas_socket_inet_dgram_dns); } @@ -370,7 +380,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, * Calculate data length and get a mbuf * for UDP and IP6 headers. */ - M_PREPEND(m, hlen + sizeof (struct udphdr), M_DONTWAIT, 1); + M_PREPEND(m, hlen + sizeof(struct udphdr), M_DONTWAIT, 1); if (m == 0) { error = ENOBUFS; goto release; @@ -382,34 +392,36 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, udp6 = (struct udphdr *)(void *)(mtod(m, caddr_t) + hlen); udp6->uh_sport = in6p->in6p_lport; /* lport is always set in the PCB */ udp6->uh_dport = fport; - if (plen <= 0xffff) + if (plen <= 0xffff) { udp6->uh_ulen = htons((u_short)plen); - else + } else { udp6->uh_ulen = 0; + } udp6->uh_sum = 0; switch (af) { case AF_INET6: ip6 = mtod(m, struct ip6_hdr *); - ip6->ip6_flow = in6p->inp_flow & IPV6_FLOWINFO_MASK; - ip6->ip6_vfc &= ~IPV6_VERSION_MASK; - ip6->ip6_vfc |= IPV6_VERSION; -#if 0 /* ip6_plen will be filled in ip6_output. */ - ip6->ip6_plen = htons((u_short)plen); + ip6->ip6_flow = in6p->inp_flow & IPV6_FLOWINFO_MASK; + ip6->ip6_vfc &= ~IPV6_VERSION_MASK; + ip6->ip6_vfc |= IPV6_VERSION; +#if 0 /* ip6_plen will be filled in ip6_output. */ + ip6->ip6_plen = htons((u_short)plen); #endif - ip6->ip6_nxt = IPPROTO_UDP; - ip6->ip6_hlim = in6_selecthlim(in6p, in6p->in6p_route.ro_rt ? + ip6->ip6_nxt = IPPROTO_UDP; + ip6->ip6_hlim = in6_selecthlim(in6p, in6p->in6p_route.ro_rt ? in6p->in6p_route.ro_rt->rt_ifp : NULL); - ip6->ip6_src = *laddr; - ip6->ip6_dst = *faddr; + ip6->ip6_src = *laddr; + ip6->ip6_dst = *faddr; udp6->uh_sum = in6_pseudo(laddr, faddr, htonl(plen + IPPROTO_UDP)); - m->m_pkthdr.csum_flags = (CSUM_UDPIPV6|CSUM_ZERO_INVERT); + m->m_pkthdr.csum_flags = (CSUM_UDPIPV6 | CSUM_ZERO_INVERT); m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum); - if (!IN6_IS_ADDR_UNSPECIFIED(laddr)) + if (!IN6_IS_ADDR_UNSPECIFIED(laddr)) { ip6oa.ip6oa_flags |= IP6OAF_BOUND_SRCADDR; + } flags = IPV6_OUTARGS; @@ -444,8 +456,8 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, in6p->inp_route.ro_dst.sa_family = AF_INET6; in6p->inp_route.ro_dst.sa_len = sizeof(struct sockaddr_in6); ((struct sockaddr_in6 *)(void *)&in6p->inp_route.ro_dst)->sin6_addr = - *faddr; - + *faddr; + rtalloc_scoped(&in6p->inp_route, ip6oa.ip6oa_boundif); inp_update_necp_policy(in6p, (struct sockaddr *)&from, @@ -466,8 +478,9 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, } } #endif /* NECP */ - if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) + if ((so->so_flags1 & SOF1_QOSMARKING_ALLOWED)) { ip6oa.ip6oa_flags |= IP6OAF_QOSMARKING_ALLOWED; + } #if IPSEC if (in6p->in6p_sp != NULL && ipsec_setsocket(m, so) != 0) { @@ -478,8 +491,9 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, /* In case of IPv4-mapped address used in previous send */ if (ROUTE_UNUSABLE(&in6p->in6p_route) || - rt_key(in6p->in6p_route.ro_rt)->sa_family != AF_INET6) + rt_key(in6p->in6p_route.ro_rt)->sa_family != AF_INET6) { ROUTE_RELEASE(&in6p->in6p_route); + } /* Copy the cached route and take an extra reference */ in6p_route_copyout(in6p, &ro); @@ -490,13 +504,15 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, m->m_pkthdr.pkt_flowid = in6p->inp_flowhash; m->m_pkthdr.pkt_proto = IPPROTO_UDP; m->m_pkthdr.pkt_flags |= (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC); - if (flowadv) + if (flowadv) { m->m_pkthdr.pkt_flags |= PKTF_FLOW_ADV; + } m->m_pkthdr.tx_udp_pid = so->last_pid; - if (so->so_flags & SOF_DELEGATED) + if (so->so_flags & SOF_DELEGATED) { m->m_pkthdr.tx_udp_e_pid = so->e_pid; - else + } else { m->m_pkthdr.tx_udp_e_pid = 0; + } im6o = in6p->in6p_moptions; if (im6o != NULL) { @@ -517,8 +533,9 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, m = NULL; socket_lock(so, 0); - if (im6o != NULL) + if (im6o != NULL) { IM6O_REMREF(im6o); + } if (error == 0 && nstat_collect) { boolean_t cell, wifi, wired; @@ -549,18 +566,19 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, } VERIFY(in6p->inp_sndinprog_cnt > 0); - if ( --in6p->inp_sndinprog_cnt == 0) + if (--in6p->inp_sndinprog_cnt == 0) { in6p->inp_flags &= ~(INP_FC_FEEDBACK); + } if (ro.ro_rt != NULL) { struct ifnet *outif = ro.ro_rt->rt_ifp; so->so_pktheadroom = P2ROUNDUP( - sizeof(struct udphdr) + - hlen + - ifnet_hdrlen(outif) + - ifnet_mbuf_packetpreamblelen(outif), - sizeof(u_int32_t)); + sizeof(struct udphdr) + + hlen + + ifnet_hdrlen(outif) + + ifnet_mbuf_packetpreamblelen(outif), + sizeof(u_int32_t)); } /* Synchronize PCB cached route */ @@ -570,23 +588,25 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, struct rtentry *rt = in6p->in6p_route.ro_rt; struct ifnet *outif; - if (rt->rt_flags & RTF_MULTICAST) - rt = NULL; /* unusable */ - + if (rt->rt_flags & RTF_MULTICAST) { + rt = NULL; /* unusable */ + } #if CONTENT_FILTER /* * Discard temporary route for cfil case */ - if (cfil_faddr_use) - rt = NULL; /* unusable */ + if (cfil_faddr_use) { + rt = NULL; /* unusable */ + } #endif - + /* * Always discard the cached route for unconnected * socket or if it is a multicast route. */ - if (rt == NULL) + if (rt == NULL) { ROUTE_RELEASE(&in6p->in6p_route); + } /* * If the destination route is unicast, update outif @@ -597,11 +617,11 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, in6p->in6p_last_outifp = outif; so->so_pktheadroom = P2ROUNDUP( - sizeof(struct udphdr) + - hlen + - ifnet_hdrlen(outif) + - ifnet_mbuf_packetpreamblelen(outif), - sizeof(u_int32_t)); + sizeof(struct udphdr) + + hlen + + ifnet_hdrlen(outif) + + ifnet_mbuf_packetpreamblelen(outif), + sizeof(u_int32_t)); } } else { ROUTE_RELEASE(&in6p->in6p_route); @@ -612,9 +632,10 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, * socket is denied access to it, generate an event. */ if (error != 0 && (ip6oa.ip6oa_retflags & IP6OARF_IFDENIED) && - (INP_NO_CELLULAR(in6p) || INP_NO_EXPENSIVE(in6p))) - soevent(in6p->inp_socket, (SO_FILT_HINT_LOCKED| + (INP_NO_CELLULAR(in6p) || INP_NO_EXPENSIVE(in6p))) { + soevent(in6p->inp_socket, (SO_FILT_HINT_LOCKED | SO_FILT_HINT_IFDENIED)); + } break; case AF_INET: error = EAFNOSUPPORT; @@ -623,18 +644,21 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6, goto releaseopt; release: - if (m != NULL) + if (m != NULL) { m_freem(m); + } releaseopt: if (control != NULL) { - if (optp == &opt) + if (optp == &opt) { ip6_clearpktopts(optp, -1); + } m_freem(control); } #if CONTENT_FILTER - if (cfil_tag) + if (cfil_tag) { m_tag_free(cfil_tag); + } #endif - return (error); + return error; } diff --git a/bsd/netinet6/udp6_usrreq.c b/bsd/netinet6/udp6_usrreq.c index 325e3773d..64b57f861 100644 --- a/bsd/netinet6/udp6_usrreq.c +++ b/bsd/netinet6/udp6_usrreq.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -160,7 +160,7 @@ static int udp6_bind(struct socket *, struct sockaddr *, struct proc *); static int udp6_connectx(struct socket *, struct sockaddr *, struct sockaddr *, struct proc *, uint32_t, sae_associd_t, sae_connid_t *, uint32_t, void *, uint32_t, struct uio *, user_ssize_t *); -static int udp6_detach(struct socket *); +static int udp6_detach(struct socket *); static int udp6_disconnect(struct socket *); static int udp6_disconnectx(struct socket *, sae_associd_t, sae_connid_t); static int udp6_send(struct socket *, int, struct mbuf *, struct sockaddr *, @@ -170,22 +170,22 @@ static void udp6_append(struct inpcb *, struct ip6_hdr *, static int udp6_input_checksum(struct mbuf *, struct udphdr *, int, int); struct pr_usrreqs udp6_usrreqs = { - .pru_abort = udp6_abort, - .pru_attach = udp6_attach, - .pru_bind = udp6_bind, - .pru_connect = udp6_connect, - .pru_connectx = udp6_connectx, - .pru_control = in6_control, - .pru_detach = udp6_detach, - .pru_disconnect = udp6_disconnect, - .pru_disconnectx = udp6_disconnectx, - .pru_peeraddr = in6_mapped_peeraddr, - .pru_send = udp6_send, - .pru_shutdown = udp_shutdown, - .pru_sockaddr = in6_mapped_sockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, - .pru_soreceive_list = soreceive_list, + .pru_abort = udp6_abort, + .pru_attach = udp6_attach, + .pru_bind = udp6_bind, + .pru_connect = udp6_connect, + .pru_connectx = udp6_connectx, + .pru_control = in6_control, + .pru_detach = udp6_detach, + .pru_disconnect = udp6_disconnect, + .pru_disconnectx = udp6_disconnectx, + .pru_peeraddr = in6_mapped_peeraddr, + .pru_send = udp6_send, + .pru_shutdown = udp_shutdown, + .pru_sockaddr = in6_mapped_sockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, + .pru_soreceive_list = soreceive_list, }; /* @@ -211,7 +211,7 @@ udp6_append(struct inpcb *last, struct ip6_hdr *ip6, if ((last->in6p_flags & INP_CONTROLOPTS) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (last->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (last->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip6_savecontrol(last, n, &opts); if (ret != 0) { m_freem(n); @@ -227,10 +227,11 @@ udp6_append(struct inpcb *last, struct ip6_hdr *ip6, } so_recv_data_stat(last->in6p_socket, n, 0); if (sbappendaddr(&last->in6p_socket->so_rcv, - (struct sockaddr *)udp_in6, n, opts, NULL) == 0) + (struct sockaddr *)udp_in6, n, opts, NULL) == 0) { udpstat.udps_fullsock++; - else + } else { sorwakeup(last->in6p_socket); + } } int @@ -250,7 +251,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) struct inpcbinfo *pcbinfo = &udbinfo; struct sockaddr_in6 fromsa; - IP6_EXTHDR_CHECK(m, off, sizeof (struct udphdr), return IPPROTO_DONE); + IP6_EXTHDR_CHECK(m, off, sizeof(struct udphdr), return IPPROTO_DONE); /* Expect 32-bit aligned data pointer on strict-align platforms */ MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m); @@ -263,7 +264,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) udpstat.udps_ipackets++; - plen = ntohs(ip6->ip6_plen) - off + sizeof (*ip6); + plen = ntohs(ip6->ip6_plen) - off + sizeof(*ip6); uh = (struct udphdr *)(void *)((caddr_t)ip6 + off); ulen = ntohs((u_short)uh->uh_ulen); @@ -282,8 +283,9 @@ udp6_input(struct mbuf **mp, int *offp, int proto) /* * Checksum extended UDP header and data. */ - if (udp6_input_checksum(m, uh, off, ulen)) + if (udp6_input_checksum(m, uh, off, ulen)) { goto bad; + } /* * Construct sockaddr format source address. @@ -342,15 +344,18 @@ udp6_input(struct mbuf **mp, int *offp, int proto) int skipit; #endif /* IPSEC */ - if ((in6p->inp_vflag & INP_IPV6) == 0) + if ((in6p->inp_vflag & INP_IPV6) == 0) { continue; + } - if (inp_restricted_recv(in6p, ifp)) + if (inp_restricted_recv(in6p, ifp)) { continue; + } if (in_pcb_checkstate(in6p, WNT_ACQUIRE, 0) == - WNT_STOPUSING) + WNT_STOPUSING) { continue; + } udp_lock(in6p->in6p_socket, 1, 0); @@ -374,8 +379,8 @@ udp6_input(struct mbuf **mp, int *offp, int proto) int blocked; IM6O_LOCK(imo); - bzero(&mcaddr, sizeof (struct sockaddr_in6)); - mcaddr.sin6_len = sizeof (struct sockaddr_in6); + bzero(&mcaddr, sizeof(struct sockaddr_in6)); + mcaddr.sin6_len = sizeof(struct sockaddr_in6); mcaddr.sin6_family = AF_INET6; mcaddr.sin6_addr = ip6->ip6_dst; @@ -385,8 +390,9 @@ udp6_input(struct mbuf **mp, int *offp, int proto) if (blocked != MCAST_PASS) { udp_unlock(in6p->in6p_socket, 1, 0); if (blocked == MCAST_NOTSMEMBER || - blocked == MCAST_MUTED) + blocked == MCAST_MUTED) { udpstat.udps_filtermcast++; + } continue; } } @@ -420,10 +426,11 @@ udp6_input(struct mbuf **mp, int *offp, int proto) * and m_copy() will copy M_PKTHDR * only if offset is 0. */ - if (reuse_sock) + if (reuse_sock) { n = m_copy(m, 0, M_COPYALL); + } udp6_append(in6p, ip6, &udp_in6, m, - off + sizeof (struct udphdr), ifp); + off + sizeof(struct udphdr), ifp); mcast_delivered++; m = n; } @@ -437,8 +444,9 @@ udp6_input(struct mbuf **mp, int *offp, int proto) * port. It assumes that an application will never * clear these options after setting them. */ - if (reuse_sock == 0 || m == NULL) + if (reuse_sock == 0 || m == NULL) { break; + } /* * Expect 32-bit aligned data pointer on strict-align @@ -467,9 +475,10 @@ udp6_input(struct mbuf **mp, int *offp, int proto) } /* free the extra copy of mbuf or skipped by NECP */ - if (m != NULL) + if (m != NULL) { m_freem(m); - return (IPPROTO_DONE); + } + return IPPROTO_DONE; } #if IPSEC @@ -482,11 +491,11 @@ udp6_input(struct mbuf **mp, int *offp, int proto) */ if (ipsec_bypass == 0 && (esp_udp_encap_port & 0xFFFF) != 0 && uh->uh_dport == ntohs((u_short)esp_udp_encap_port)) { - int payload_len = ulen - sizeof (struct udphdr) > 4 ? 4 : - ulen - sizeof (struct udphdr); + int payload_len = ulen - sizeof(struct udphdr) > 4 ? 4 : + ulen - sizeof(struct udphdr); - if (m->m_len < off + sizeof (struct udphdr) + payload_len) { - if ((m = m_pullup(m, off + sizeof (struct udphdr) + + if (m->m_len < off + sizeof(struct udphdr) + payload_len) { + if ((m = m_pullup(m, off + sizeof(struct udphdr) + payload_len)) == NULL) { udpstat.udps_hdrops++; goto bad; @@ -502,14 +511,14 @@ udp6_input(struct mbuf **mp, int *offp, int proto) } /* Check for NAT keepalive packet */ if (payload_len == 1 && *(u_int8_t*) - ((caddr_t)uh + sizeof (struct udphdr)) == 0xFF) { + ((caddr_t)uh + sizeof(struct udphdr)) == 0xFF) { goto bad; } else if (payload_len == 4 && *(u_int32_t*)(void *) - ((caddr_t)uh + sizeof (struct udphdr)) != 0) { + ((caddr_t)uh + sizeof(struct udphdr)) != 0) { /* UDP encapsulated IPSec packet to pass through NAT */ /* preserve the udp header */ - *offp = off + sizeof (struct udphdr); - return (esp6_input(mp, offp, IPPROTO_UDP)); + *offp = off + sizeof(struct udphdr); + return esp6_input(mp, offp, IPPROTO_UDP); } } #endif /* IPSEC */ @@ -525,7 +534,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) if (udp_log_in_vain) { char buf[INET6_ADDRSTRLEN]; - strlcpy(buf, ip6_sprintf(&ip6->ip6_dst), sizeof (buf)); + strlcpy(buf, ip6_sprintf(&ip6->ip6_dst), sizeof(buf)); if (udp_log_in_vain < 3) { log(LOG_INFO, "Connection attempt to UDP " "%s:%d from %s:%d\n", buf, @@ -549,7 +558,7 @@ udp6_input(struct mbuf **mp, int *offp, int proto) goto bad; } icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_NOPORT, 0); - return (IPPROTO_DONE); + return IPPROTO_DONE; } #if NECP if (!necp_socket_is_allowed_to_send_recv_v6(in6p, uh->uh_dport, @@ -577,14 +586,14 @@ udp6_input(struct mbuf **mp, int *offp, int proto) if ((in6p->in6p_flags & INP_CONTROLOPTS) != 0 || (in6p->in6p_socket->so_options & SO_TIMESTAMP) != 0 || (in6p->in6p_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0 || - (in6p->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { + (in6p->in6p_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) { ret = ip6_savecontrol(in6p, m, &opts); if (ret != 0) { udp_unlock(in6p->in6p_socket, 1, 0); goto bad; } } - m_adj(m, off + sizeof (struct udphdr)); + m_adj(m, off + sizeof(struct udphdr)); if (nstat_collect) { INP_ADD_STAT(in6p, cell, wifi, wired, rxpackets, 1); INP_ADD_STAT(in6p, cell, wifi, wired, rxbytes, m->m_pkthdr.len); @@ -601,13 +610,15 @@ udp6_input(struct mbuf **mp, int *offp, int proto) } sorwakeup(in6p->in6p_socket); udp_unlock(in6p->in6p_socket, 1, 0); - return (IPPROTO_DONE); + return IPPROTO_DONE; bad: - if (m != NULL) + if (m != NULL) { m_freem(m); - if (opts != NULL) + } + if (opts != NULL) { m_freem(opts); - return (IPPROTO_DONE); + } + return IPPROTO_DONE; } void @@ -627,18 +638,21 @@ udp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) } *uhp; if (sa->sa_family != AF_INET6 || - sa->sa_len != sizeof (struct sockaddr_in6)) + sa->sa_len != sizeof(struct sockaddr_in6)) { return; + } - if ((unsigned)cmd >= PRC_NCMDS) + if ((unsigned)cmd >= PRC_NCMDS) { return; + } if (PRC_IS_REDIRECT(cmd)) { notify = in6_rtchange; d = NULL; - } else if (cmd == PRC_HOSTDEAD) + } else if (cmd == PRC_HOSTDEAD) { d = NULL; - else if (inet6ctlerrmap[cmd] == 0) + } else if (inet6ctlerrmap[cmd] == 0) { return; + } /* if the parameter is from icmp6, decode it. */ if (d != NULL) { @@ -660,19 +674,23 @@ udp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp) * M and OFF are valid. */ /* check if we can safely examine src and dst ports */ - if (m->m_pkthdr.len < off + sizeof (*uhp)) + if (m->m_pkthdr.len < off + sizeof(*uhp)) { return; + } - bzero(&uh, sizeof (uh)); - m_copydata(m, off, sizeof (*uhp), (caddr_t)&uh); + bzero(&uh, sizeof(uh)); + m_copydata(m, off, sizeof(*uhp), (caddr_t)&uh); (void) in6_pcbnotify(&udbinfo, sa, uh.uh_dport, (struct sockaddr*)ip6cp->ip6c_src, uh.uh_sport, cmd, NULL, notify); - } else { - (void) in6_pcbnotify(&udbinfo, sa, 0, - (struct sockaddr *)&sa6_src, 0, cmd, NULL, notify); } + /* + * XXX The else condition here was broken for a long time. + * Fixing it made us deliver notification correctly but broke + * some frameworks that didn't handle it well. + * For now we have removed it and will revisit it later. + */ } static int @@ -687,7 +705,7 @@ udp6_abort(struct socket *so) } soisdisconnected(so); in6_pcbdetach(inp); - return (0); + return 0; } static int @@ -698,24 +716,28 @@ udp6_attach(struct socket *so, int proto, struct proc *p) int error; inp = sotoinpcb(so); - if (inp != NULL) - return (EINVAL); + if (inp != NULL) { + return EINVAL; + } error = in_pcballoc(so, &udbinfo, p); - if (error) - return (error); + if (error) { + return error; + } if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { error = soreserve(so, udp_sendspace, udp_recvspace); - if (error) - return (error); + if (error) { + return error; + } } inp = (struct inpcb *)so->so_pcb; inp->inp_vflag |= INP_IPV6; - if (ip6_mapped_addr_on) + if (ip6_mapped_addr_on) { inp->inp_vflag |= INP_IPV4; - inp->in6p_hops = -1; /* use kernel default */ - inp->in6p_cksum = -1; /* just to be sure */ + } + inp->in6p_hops = -1; /* use kernel default */ + inp->in6p_cksum = -1; /* just to be sure */ /* * XXX: ugly!! * IPv4 TTL initialization is necessary for an IPv6 socket as well, @@ -723,9 +745,10 @@ udp6_attach(struct socket *so, int proto, struct proc *p) * which may match an IPv4-mapped IPv6 address. */ inp->inp_ip_ttl = ip_defttl; - if (nstat_collect) + if (nstat_collect) { nstat_udp_new_pcb(inp); - return (0); + } + return 0; } static int @@ -735,8 +758,9 @@ udp6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) int error; inp = sotoinpcb(so); - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } inp->inp_vflag &= ~INP_IPV4; inp->inp_vflag |= INP_IPV6; @@ -754,12 +778,12 @@ udp6_bind(struct socket *so, struct sockaddr *nam, struct proc *p) inp->inp_vflag |= INP_IPV4; inp->inp_vflag &= ~INP_IPV6; error = in_pcbbind(inp, (struct sockaddr *)&sin, p); - return (error); + return error; } } error = in6_pcbbind(inp, nam, p); - return (error); + return error; } int @@ -772,8 +796,9 @@ udp6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) #endif /* defined(NECP) && defined(FLOW_DIVERT) */ inp = sotoinpcb(so); - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } #if defined(NECP) && defined(FLOW_DIVERT) should_use_flow_divert = necp_socket_should_use_flow_divert(inp); @@ -786,8 +811,9 @@ udp6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) if (IN6_IS_ADDR_V4MAPPED(&sin6_p->sin6_addr)) { struct sockaddr_in sin; - if (inp->inp_faddr.s_addr != INADDR_ANY) - return (EISCONN); + if (inp->inp_faddr.s_addr != INADDR_ANY) { + return EISCONN; + } if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) { so->so_flags1 |= SOF1_CONNECT_COUNTED; @@ -815,12 +841,13 @@ udp6_connect(struct socket *so, struct sockaddr *nam, struct proc *p) inp->inp_vflag &= ~INP_IPV6; soisconnected(so); } - return (error); + return error; } } - if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) - return (EISCONN); + if (!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { + return EISCONN; + } if (!(so->so_flags1 & SOF1_CONNECT_COUNTED)) { so->so_flags1 |= SOF1_CONNECT_COUNTED; @@ -839,7 +866,7 @@ do_flow_divert: } else { error = ENETDOWN; } - return (error); + return error; } #endif /* defined(NECP) && defined(FLOW_DIVERT) */ @@ -860,8 +887,9 @@ do_flow_divert: } #endif /* NECP */ soisconnected(so); - if (inp->inp_flowhash == 0) + if (inp->inp_flowhash == 0) { inp->inp_flowhash = inp_calc_flowhash(inp); + } /* update flowinfo - RFC 6437 */ if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) { @@ -870,7 +898,7 @@ do_flow_divert: (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK); } } - return (error); + return error; } static int @@ -879,8 +907,8 @@ udp6_connectx(struct socket *so, struct sockaddr *src, sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg, uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written) { - return (udp_connectx_common(so, AF_INET6, src, dst, - p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written)); + return udp_connectx_common(so, AF_INET6, src, dst, + p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written); } static int @@ -889,10 +917,11 @@ udp6_detach(struct socket *so) struct inpcb *inp; inp = sotoinpcb(so); - if (inp == NULL) - return (EINVAL); + if (inp == NULL) { + return EINVAL; + } in6_pcbdetach(inp); - return (0); + return 0; } static int @@ -903,20 +932,22 @@ udp6_disconnect(struct socket *so) inp = sotoinpcb(so); if (inp == NULL #if NECP - || (necp_socket_should_use_flow_divert(inp)) + || (necp_socket_should_use_flow_divert(inp)) #endif /* NECP */ - ) - return (inp == NULL ? EINVAL : EPROTOTYPE); + ) { + return inp == NULL ? EINVAL : EPROTOTYPE; + } if (inp->inp_vflag & INP_IPV4) { struct pr_usrreqs *pru; pru = ip_protox[IPPROTO_UDP]->pr_usrreqs; - return ((*pru->pru_disconnect)(so)); + return (*pru->pru_disconnect)(so); } - if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) - return (ENOTCONN); + if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)) { + return ENOTCONN; + } in6_pcbdisconnect(inp); @@ -926,18 +957,19 @@ udp6_disconnect(struct socket *so) inp->in6p_laddr = in6addr_any; inp->in6p_last_outifp = NULL; - so->so_state &= ~SS_ISCONNECTED; /* XXX */ - return (0); + so->so_state &= ~SS_ISCONNECTED; /* XXX */ + return 0; } static int udp6_disconnectx(struct socket *so, sae_associd_t aid, sae_connid_t cid) { #pragma unused(cid) - if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) - return (EINVAL); + if (aid != SAE_ASSOCID_ANY && aid != SAE_ASSOCID_ALL) { + return EINVAL; + } - return (udp6_disconnect(so)); + return udp6_disconnect(so); } static int @@ -975,7 +1007,7 @@ udp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, #endif /* defined(NECP) && defined(FLOW_DIVERT) */ if (addr != NULL) { - if (addr->sa_len != sizeof (struct sockaddr_in6)) { + if (addr->sa_len != sizeof(struct sockaddr_in6)) { error = EINVAL; goto bad; } @@ -999,8 +1031,9 @@ udp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, if (hasv4addr) { struct pr_usrreqs *pru; - if (sin6 != NULL) + if (sin6 != NULL) { in6_sin6_2_sin_in_sock(addr); + } #if defined(NECP) && defined(FLOW_DIVERT) if (should_use_flow_divert) { goto do_flow_divert; @@ -1010,11 +1043,12 @@ udp6_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr, error = ((*pru->pru_send)(so, flags, m, addr, control, p)); #if CONTENT_FILTER - if (cfil_tag) + if (cfil_tag) { m_tag_free(cfil_tag); + } #endif /* addr will just be freed in sendit(). */ - return (error); + return error; } } @@ -1024,8 +1058,9 @@ do_flow_divert: /* Implicit connect */ error = flow_divert_implicit_data_out(so, flags, m, addr, control, p); #if CONTENT_FILTER - if (cfil_tag) + if (cfil_tag) { m_tag_free(cfil_tag); + } #endif return error; } @@ -1033,23 +1068,27 @@ do_flow_divert: error = udp6_output(inp, m, addr, control, p); #if CONTENT_FILTER - if (cfil_tag) + if (cfil_tag) { m_tag_free(cfil_tag); + } #endif return error; bad: VERIFY(error != 0); - if (m != NULL) + if (m != NULL) { m_freem(m); - if (control != NULL) + } + if (control != NULL) { m_freem(control); + } #if CONTENT_FILTER - if (cfil_tag) + if (cfil_tag) { m_tag_free(cfil_tag); + } #endif - return (error); + return error; } /* @@ -1062,7 +1101,7 @@ udp6_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); if (!(m->m_pkthdr.csum_flags & CSUM_DATA_VALID) && - uh->uh_sum == 0) { + uh->uh_sum == 0) { /* UDP/IPv6 checksum is mandatory (RFC2460) */ /* @@ -1100,7 +1139,7 @@ udp6_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { s = ip6->ip6_src.s6_addr16[1]; - ip6->ip6_src.s6_addr16[1] = 0 ; + ip6->ip6_src.s6_addr16[1] = 0; } if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { d = ip6->ip6_dst.s6_addr16[1]; @@ -1109,20 +1148,25 @@ udp6_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) /* callee folds in sum */ sum = m_adj_sum16(m, start, off, ulen, sum); - if (off > start) + if (off > start) { swbytes += (off - start); - else + } else { swbytes += (start - off); + } - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_src)) { ip6->ip6_src.s6_addr16[1] = s; - if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) + } + if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) { ip6->ip6_dst.s6_addr16[1] = d; + } - if (swbytes != 0) + if (swbytes != 0) { udp_in_cksum_stats(swbytes); - if (trailer != 0) + } + if (trailer != 0) { m_adj(m, -trailer); + } } uh->uh_sum = in6_pseudo(&ip6->ip6_src, &ip6->ip6_dst, @@ -1138,8 +1182,8 @@ udp6_input_checksum(struct mbuf *m, struct udphdr *uh, int off, int ulen) badsum: udpstat.udps_badsum++; IF_UDP_STATINC(ifp, badchksum); - return (-1); + return -1; } - return (0); + return 0; } diff --git a/bsd/netkey/key.c b/bsd/netkey/key.c index 1e4b99811..a340f34b7 100644 --- a/bsd/netkey/key.c +++ b/bsd/netkey/key.c @@ -130,7 +130,7 @@ #include -#define FULLMASK 0xff +#define FULLMASK 0xff lck_grp_t *sadb_mutex_grp; lck_grp_attr_t *sadb_mutex_grp_attr; @@ -159,14 +159,14 @@ u_int32_t key_debug_level = 0; //### our sysctl is not dynamic static int key_timehandler_running = 0; static u_int key_spi_trycnt = 1000; static u_int32_t key_spi_minval = 0x100; -static u_int32_t key_spi_maxval = 0x0fffffff; /* XXX */ +static u_int32_t key_spi_maxval = 0x0fffffff; /* XXX */ static u_int32_t policy_id = 0; -static u_int key_int_random = 60; /*interval to initialize randseed,1(m)*/ -static u_int key_larval_lifetime = 30; /* interval to expire acquiring, 30(s)*/ -static int key_blockacq_count = 10; /* counter for blocking SADB_ACQUIRE.*/ -static int key_blockacq_lifetime = 20; /* lifetime for blocking SADB_ACQUIRE.*/ -static int key_preferred_oldsa = 0; /* preferred old sa rather than new sa.*/ -__private_extern__ int natt_keepalive_interval = 20; /* interval between natt keepalives.*/ +static u_int key_int_random = 60; /*interval to initialize randseed,1(m)*/ +static u_int key_larval_lifetime = 30; /* interval to expire acquiring, 30(s)*/ +static int key_blockacq_count = 10; /* counter for blocking SADB_ACQUIRE.*/ +static int key_blockacq_lifetime = 20; /* lifetime for blocking SADB_ACQUIRE.*/ +static int key_preferred_oldsa = 0; /* preferred old sa rather than new sa.*/ +__private_extern__ int natt_keepalive_interval = 20; /* interval between natt keepalives.*/ __private_extern__ int ipsec_policy_count = 0; static int ipsec_sav_count = 0; @@ -175,19 +175,19 @@ static int key_tick_init_random = 0; static u_int64_t up_time = 0; __private_extern__ u_int64_t natt_now = 0; -static LIST_HEAD(_sptree, secpolicy) sptree[IPSEC_DIR_MAX]; /* SPD */ -static LIST_HEAD(_sahtree, secashead) sahtree; /* SAD */ +static LIST_HEAD(_sptree, secpolicy) sptree[IPSEC_DIR_MAX]; /* SPD */ +static LIST_HEAD(_sahtree, secashead) sahtree; /* SAD */ static LIST_HEAD(_regtree, secreg) regtree[SADB_SATYPE_MAX + 1]; /* registed list */ -#define SPIHASHSIZE 128 -#define SPIHASH(x) (((x) ^ ((x) >> 16)) % SPIHASHSIZE) +#define SPIHASHSIZE 128 +#define SPIHASH(x) (((x) ^ ((x) >> 16)) % SPIHASHSIZE) static LIST_HEAD(_spihash, secasvar) spihash[SPIHASHSIZE]; #ifndef IPSEC_NONBLOCK_ACQUIRE -static LIST_HEAD(_acqtree, secacq) acqtree; /* acquiring list */ +static LIST_HEAD(_acqtree, secacq) acqtree; /* acquiring list */ #endif -static LIST_HEAD(_spacqtree, secspacq) spacqtree; /* SP acquiring list */ +static LIST_HEAD(_spacqtree, secspacq) spacqtree; /* SP acquiring list */ struct key_cb key_cb; @@ -208,64 +208,64 @@ static const u_int saorder_state_any[] = { }; static const int minsize[] = { - sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ - sizeof(struct sadb_sa), /* SADB_EXT_SA */ - sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ - sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ - sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ - sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_SRC */ - sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_DST */ - sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_PROXY */ - sizeof(struct sadb_key), /* SADB_EXT_KEY_AUTH */ - sizeof(struct sadb_key), /* SADB_EXT_KEY_ENCRYPT */ - sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_SRC */ - sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_DST */ - sizeof(struct sadb_sens), /* SADB_EXT_SENSITIVITY */ - sizeof(struct sadb_prop), /* SADB_EXT_PROPOSAL */ - sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_AUTH */ - sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_ENCRYPT */ - sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ - 0, /* SADB_X_EXT_KMPRIVATE */ - sizeof(struct sadb_x_policy), /* SADB_X_EXT_POLICY */ - sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ + sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ + sizeof(struct sadb_sa), /* SADB_EXT_SA */ + sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ + sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ + sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ + sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_SRC */ + sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_DST */ + sizeof(struct sadb_address), /* SADB_EXT_ADDRESS_PROXY */ + sizeof(struct sadb_key), /* SADB_EXT_KEY_AUTH */ + sizeof(struct sadb_key), /* SADB_EXT_KEY_ENCRYPT */ + sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_SRC */ + sizeof(struct sadb_ident), /* SADB_EXT_IDENTITY_DST */ + sizeof(struct sadb_sens), /* SADB_EXT_SENSITIVITY */ + sizeof(struct sadb_prop), /* SADB_EXT_PROPOSAL */ + sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_AUTH */ + sizeof(struct sadb_supported), /* SADB_EXT_SUPPORTED_ENCRYPT */ + sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ + 0, /* SADB_X_EXT_KMPRIVATE */ + sizeof(struct sadb_x_policy), /* SADB_X_EXT_POLICY */ + sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ sizeof(struct sadb_session_id), /* SADB_EXT_SESSION_ID */ sizeof(struct sadb_sastat), /* SADB_EXT_SASTAT */ - sizeof(struct sadb_x_ipsecif), /* SADB_X_EXT_IPSECIF */ - sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_SRC_START */ - sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_SRC_END */ - sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_DST_START */ - sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_DST_END */ - sizeof(struct sadb_address), /* SADB_EXT_MIGRATE_ADDRESS_SRC */ - sizeof(struct sadb_address), /* SADB_EXT_MIGRATE_ADDRESS_DST */ + sizeof(struct sadb_x_ipsecif), /* SADB_X_EXT_IPSECIF */ + sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_SRC_START */ + sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_SRC_END */ + sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_DST_START */ + sizeof(struct sadb_address), /* SADB_X_EXT_ADDR_RANGE_DST_END */ + sizeof(struct sadb_address), /* SADB_EXT_MIGRATE_ADDRESS_SRC */ + sizeof(struct sadb_address), /* SADB_EXT_MIGRATE_ADDRESS_DST */ sizeof(struct sadb_x_ipsecif), /* SADB_X_EXT_MIGRATE_IPSECIF */ }; static const int maxsize[] = { - sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ - sizeof(struct sadb_sa_2), /* SADB_EXT_SA */ - sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ - sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ - sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ - 0, /* SADB_EXT_ADDRESS_SRC */ - 0, /* SADB_EXT_ADDRESS_DST */ - 0, /* SADB_EXT_ADDRESS_PROXY */ - 0, /* SADB_EXT_KEY_AUTH */ - 0, /* SADB_EXT_KEY_ENCRYPT */ - 0, /* SADB_EXT_IDENTITY_SRC */ - 0, /* SADB_EXT_IDENTITY_DST */ - 0, /* SADB_EXT_SENSITIVITY */ - 0, /* SADB_EXT_PROPOSAL */ - 0, /* SADB_EXT_SUPPORTED_AUTH */ - 0, /* SADB_EXT_SUPPORTED_ENCRYPT */ - sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ - 0, /* SADB_X_EXT_KMPRIVATE */ - 0, /* SADB_X_EXT_POLICY */ - sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ + sizeof(struct sadb_msg), /* SADB_EXT_RESERVED */ + sizeof(struct sadb_sa_2), /* SADB_EXT_SA */ + sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_CURRENT */ + sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_HARD */ + sizeof(struct sadb_lifetime), /* SADB_EXT_LIFETIME_SOFT */ + 0, /* SADB_EXT_ADDRESS_SRC */ + 0, /* SADB_EXT_ADDRESS_DST */ + 0, /* SADB_EXT_ADDRESS_PROXY */ + 0, /* SADB_EXT_KEY_AUTH */ + 0, /* SADB_EXT_KEY_ENCRYPT */ + 0, /* SADB_EXT_IDENTITY_SRC */ + 0, /* SADB_EXT_IDENTITY_DST */ + 0, /* SADB_EXT_SENSITIVITY */ + 0, /* SADB_EXT_PROPOSAL */ + 0, /* SADB_EXT_SUPPORTED_AUTH */ + 0, /* SADB_EXT_SUPPORTED_ENCRYPT */ + sizeof(struct sadb_spirange), /* SADB_EXT_SPIRANGE */ + 0, /* SADB_X_EXT_KMPRIVATE */ + 0, /* SADB_X_EXT_POLICY */ + sizeof(struct sadb_x_sa2), /* SADB_X_SA2 */ 0, /* SADB_EXT_SESSION_ID */ 0, /* SADB_EXT_SASTAT */ - sizeof(struct sadb_x_ipsecif), /* SADB_X_EXT_IPSECIF */ - 0, /* SADB_X_EXT_ADDR_RANGE_SRC_START */ + sizeof(struct sadb_x_ipsecif), /* SADB_X_EXT_IPSECIF */ + 0, /* SADB_X_EXT_ADDR_RANGE_SRC_START */ 0, /* SADB_X_EXT_ADDR_RANGE_SRC_END */ - 0, /* SADB_X_EXT_ADDR_RANGE_DST_START */ + 0, /* SADB_X_EXT_ADDR_RANGE_DST_START */ 0, /* SADB_X_EXT_ADDR_RANGE_DST_END */ 0, /* SADB_EXT_MIGRATE_ADDRESS_SRC */ 0, /* SADB_EXT_MIGRATE_ADDRESS_DST */ @@ -278,61 +278,61 @@ static int ipsec_ah_keymin = 128; SYSCTL_DECL(_net_key); /* Thread safe: no accumulated state */ -SYSCTL_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_debug_level, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_DEBUG_LEVEL, debug, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_debug_level, 0, ""); /* max count of trial for the decision of spi value */ -SYSCTL_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_spi_trycnt, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_SPI_TRY, spi_trycnt, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_spi_trycnt, 0, ""); /* minimum spi value to allocate automatically. */ -SYSCTL_INT(_net_key, KEYCTL_SPI_MIN_VALUE, spi_minval, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_spi_minval, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_SPI_MIN_VALUE, spi_minval, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_spi_minval, 0, ""); /* maximun spi value to allocate automatically. */ -SYSCTL_INT(_net_key, KEYCTL_SPI_MAX_VALUE, spi_maxval, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_spi_maxval, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_SPI_MAX_VALUE, spi_maxval, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_spi_maxval, 0, ""); /* interval to initialize randseed */ -SYSCTL_INT(_net_key, KEYCTL_RANDOM_INT, int_random, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_int_random, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_RANDOM_INT, int_random, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_int_random, 0, ""); /* lifetime for larval SA; thread safe due to > compare */ -SYSCTL_INT(_net_key, KEYCTL_LARVAL_LIFETIME, larval_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_larval_lifetime, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_LARVAL_LIFETIME, larval_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_larval_lifetime, 0, ""); /* counter for blocking to send SADB_ACQUIRE to IKEd */ -SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_blockacq_count, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_COUNT, blockacq_count, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_blockacq_count, 0, ""); /* lifetime for blocking to send SADB_ACQUIRE to IKEd: Thread safe, > compare */ -SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &key_blockacq_lifetime, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_BLOCKACQ_LIFETIME, blockacq_lifetime, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_blockacq_lifetime, 0, ""); /* ESP auth */ -SYSCTL_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &ipsec_esp_auth, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_ESP_AUTH, esp_auth, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &ipsec_esp_auth, 0, ""); /* minimum ESP key length */ -SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &ipsec_esp_keymin, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_ESP_KEYMIN, esp_keymin, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &ipsec_esp_keymin, 0, ""); /* minimum AH key length */ -SYSCTL_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin, CTLFLAG_RW | CTLFLAG_LOCKED, \ - &ipsec_ah_keymin, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_AH_KEYMIN, ah_keymin, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &ipsec_ah_keymin, 0, ""); /* perfered old SA rather than new SA */ -SYSCTL_INT(_net_key, KEYCTL_PREFERED_OLDSA, prefered_oldsa, CTLFLAG_RW | CTLFLAG_LOCKED,\ - &key_preferred_oldsa, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_PREFERED_OLDSA, prefered_oldsa, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &key_preferred_oldsa, 0, ""); /* time between NATT keepalives in seconds, 0 disabled */ -SYSCTL_INT(_net_key, KEYCTL_NATT_KEEPALIVE_INTERVAL, natt_keepalive_interval, CTLFLAG_RW | CTLFLAG_LOCKED,\ - &natt_keepalive_interval, 0, ""); +SYSCTL_INT(_net_key, KEYCTL_NATT_KEEPALIVE_INTERVAL, natt_keepalive_interval, CTLFLAG_RW | CTLFLAG_LOCKED, \ + &natt_keepalive_interval, 0, ""); /* PF_KEY statistics */ -SYSCTL_STRUCT(_net_key, KEYCTL_PFKEYSTAT, pfkeystat, CTLFLAG_RD | CTLFLAG_LOCKED,\ - &pfkeystat, pfkeystat, ""); +SYSCTL_STRUCT(_net_key, KEYCTL_PFKEYSTAT, pfkeystat, CTLFLAG_RD | CTLFLAG_LOCKED, \ + &pfkeystat, pfkeystat, ""); #ifndef LIST_FOREACH #define LIST_FOREACH(elm, head, field) \ @@ -354,20 +354,20 @@ LIST_INSERT_AFTER(curelm, elm, field);\ #define KEY_CHKSASTATE(head, sav, name) \ do { \ -if ((head) != (sav)) { \ +if ((head) != (sav)) { \ ipseclog((LOG_DEBUG, "%s: state mismatched (TREE=%d SA=%d)\n", \ -(name), (head), (sav))); \ -continue; \ -} \ +(name), (head), (sav))); \ +continue; \ +} \ } while (0) #define KEY_CHKSPDIR(head, sp, name) \ do { \ -if ((head) != (sp)) { \ +if ((head) != (sp)) { \ ipseclog((LOG_DEBUG, "%s: direction mismatched (TREE=%d SP=%d), " \ -"anyway continue.\n", \ -(name), (head), (sp))); \ -} \ +"anyway continue.\n", \ +(name), (head), (sp))); \ +} \ } while (0) #if 1 @@ -430,7 +430,7 @@ bzero((idx), sizeof(struct secasindex)); \ (idx)->reqid = (r); \ bcopy((s), &(idx)->src, ((const struct sockaddr *)(s))->sa_len); \ bcopy((d), &(idx)->dst, ((const struct sockaddr *)(d))->sa_len); \ -(idx)->ipsec_ifindex = (ifi); \ +(idx)->ipsec_ifindex = (ifi); \ } while (0) /* key statistics */ @@ -452,54 +452,54 @@ static void key_delsp(struct secpolicy *); static struct secpolicy *key_getsp(struct secpolicyindex *); static u_int32_t key_newreqid(void); static struct mbuf *key_gather_mbuf(struct mbuf *, - const struct sadb_msghdr *, int, int, int *); + const struct sadb_msghdr *, int, int, int *); static int key_spdadd(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static u_int32_t key_getnewspid(void); static int key_spddelete(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_spddelete2(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_spdenable(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_spddisable(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_spdget(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_spdflush(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_spddump(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static struct mbuf *key_setdumpsp(struct secpolicy *, - u_int8_t, u_int32_t, u_int32_t); + u_int8_t, u_int32_t, u_int32_t); static u_int key_getspreqmsglen(struct secpolicy *); static int key_spdexpire(struct secpolicy *); static struct secashead *key_newsah(struct secasindex *, ifnet_t, u_int, u_int8_t); static struct secasvar *key_newsav(struct mbuf *, - const struct sadb_msghdr *, struct secashead *, int *, - struct socket *); + const struct sadb_msghdr *, struct secashead *, int *, + struct socket *); static struct secashead *key_getsah(struct secasindex *); static struct secasvar *key_checkspidup(struct secasindex *, u_int32_t); static void key_setspi __P((struct secasvar *, u_int32_t)); static struct secasvar *key_getsavbyspi(struct secashead *, u_int32_t); static int key_setsaval(struct secasvar *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_mature(struct secasvar *); static struct mbuf *key_setdumpsa(struct secasvar *, u_int8_t, - u_int8_t, u_int32_t, u_int32_t); + u_int8_t, u_int32_t, u_int32_t); static struct mbuf *key_setsadbmsg(u_int8_t, u_int16_t, u_int8_t, - u_int32_t, pid_t, u_int16_t); + u_int32_t, pid_t, u_int16_t); static struct mbuf *key_setsadbsa(struct secasvar *); static struct mbuf *key_setsadbaddr(u_int16_t, - struct sockaddr *, u_int8_t, u_int16_t); + struct sockaddr *, u_int8_t, u_int16_t); static struct mbuf *key_setsadbipsecif(ifnet_t, ifnet_t, ifnet_t, int); #if 0 static struct mbuf *key_setsadbident(u_int16_t, u_int16_t, caddr_t, - int, u_int64_t); + int, u_int64_t); #endif static struct mbuf *key_setsadbxsa2(u_int8_t, u_int32_t, u_int32_t, u_int16_t); static struct mbuf *key_setsadbxpolicy(u_int16_t, u_int8_t, - u_int32_t); + u_int32_t); static void *key_newbuf(const void *, u_int); #if INET6 static int key_ismyaddr6(struct sockaddr_in6 *); @@ -507,17 +507,17 @@ static int key_ismyaddr6(struct sockaddr_in6 *); static void key_update_natt_keepalive_timestamp(struct secasvar *, struct secasvar *); /* flags for key_cmpsaidx() */ -#define CMP_HEAD 0x1 /* protocol, addresses. */ -#define CMP_PORT 0x2 /* additionally HEAD, reqid, mode. */ -#define CMP_REQID 0x4 /* additionally HEAD, reqid. */ +#define CMP_HEAD 0x1 /* protocol, addresses. */ +#define CMP_PORT 0x2 /* additionally HEAD, reqid, mode. */ +#define CMP_REQID 0x4 /* additionally HEAD, reqid. */ #define CMP_MODE 0x8 /* additionally mode. */ -#define CMP_EXACTLY 0xF /* all elements. */ +#define CMP_EXACTLY 0xF /* all elements. */ static int key_cmpsaidx(struct secasindex *, struct secasindex *, int); static int key_cmpspidx_exactly(struct secpolicyindex *, - struct secpolicyindex *); + struct secpolicyindex *); static int key_cmpspidx_withmask(struct secpolicyindex *, - struct secpolicyindex *); + struct secpolicyindex *); static int key_sockaddrcmp(struct sockaddr *, struct sockaddr *, int); static int key_is_addr_in_range(struct sockaddr_storage *, struct secpolicyaddrrange *); static int key_bbcmp(caddr_t, caddr_t, u_int); @@ -526,19 +526,19 @@ static u_int16_t key_satype2proto(u_int8_t); static u_int8_t key_proto2satype(u_int16_t); static int key_getspi(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static u_int32_t key_do_getnewspi(struct sadb_spirange *, struct secasindex *); static int key_update(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); #if IPSEC_DOSEQCHECK static struct secasvar *key_getsavbyseq(struct secashead *, u_int32_t); #endif static int key_add(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_setident(struct secashead *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static struct mbuf *key_getmsgbuf_x1(struct mbuf *, const struct sadb_msghdr *); static int key_delete(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_get(struct socket *, struct mbuf *, const struct sadb_msghdr *); static void key_getcomb_setlifetime(struct sadb_comb *); @@ -558,37 +558,37 @@ static struct secacq *key_getacqbyseq(u_int32_t); static struct secspacq *key_newspacq(struct secpolicyindex *); static struct secspacq *key_getspacq(struct secpolicyindex *); static int key_acquire2(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_register(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_expire(struct secasvar *); static int key_flush(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_dump(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_promisc(struct socket *, struct mbuf *, - const struct sadb_msghdr *); + const struct sadb_msghdr *); static int key_senderror(struct socket *, struct mbuf *, int); static int key_validate_ext(const struct sadb_ext *, int); static int key_align(struct mbuf *, struct sadb_msghdr *); static struct mbuf *key_alloc_mbuf(int); -static int key_getsastat (struct socket *, struct mbuf *, const struct sadb_msghdr *); -static int key_migrate (struct socket *, struct mbuf *, const struct sadb_msghdr *); +static int key_getsastat(struct socket *, struct mbuf *, const struct sadb_msghdr *); +static int key_migrate(struct socket *, struct mbuf *, const struct sadb_msghdr *); static int key_setsaval2(struct secasvar *sav, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft); + u_int8_t satype, + u_int8_t alg_auth, + u_int8_t alg_enc, + u_int32_t flags, + u_int8_t replay, + struct sadb_key *key_auth, + u_int16_t key_auth_len, + struct sadb_key *key_enc, + u_int16_t key_enc_len, + u_int16_t natt_port, + u_int32_t seq, + u_int32_t spi, + u_int32_t pid, + struct sadb_lifetime *lifetime_hard, + struct sadb_lifetime *lifetime_soft); static void bzero_keys(const struct sadb_msghdr *); extern int ipsec_bypass; @@ -608,66 +608,68 @@ key_init(struct protosw *pp, struct domain *dp) { static int key_initialized = 0; int i; - - VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); + + VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED); _CASSERT(PFKEY_ALIGN8(sizeof(struct sadb_msg)) <= _MHLEN); - - if (key_initialized) + + if (key_initialized) { return; + } key_initialized = 1; sadb_mutex_grp_attr = lck_grp_attr_alloc_init(); sadb_mutex_grp = lck_grp_alloc_init("sadb", sadb_mutex_grp_attr); sadb_mutex_attr = lck_attr_alloc_init(); - + lck_mtx_init(sadb_mutex, sadb_mutex_grp, sadb_mutex_attr); pfkey_stat_mutex_grp_attr = lck_grp_attr_alloc_init(); pfkey_stat_mutex_grp = lck_grp_alloc_init("pfkey_stat", pfkey_stat_mutex_grp_attr); pfkey_stat_mutex_attr = lck_attr_alloc_init(); - + lck_mtx_init(pfkey_stat_mutex, pfkey_stat_mutex_grp, pfkey_stat_mutex_attr); - - for (i = 0; i < SPIHASHSIZE; i++) + + for (i = 0; i < SPIHASHSIZE; i++) { LIST_INIT(&spihash[i]); - + } + raw_init(pp, dp); - + bzero((caddr_t)&key_cb, sizeof(key_cb)); for (i = 0; i < IPSEC_DIR_MAX; i++) { LIST_INIT(&sptree[i]); } ipsec_policy_count = 0; - + LIST_INIT(&sahtree); - + for (i = 0; i <= SADB_SATYPE_MAX; i++) { LIST_INIT(®tree[i]); } ipsec_sav_count = 0; - + #ifndef IPSEC_NONBLOCK_ACQUIRE LIST_INIT(&acqtree); #endif LIST_INIT(&spacqtree); - + /* system default */ #if INET ip4_def_policy.policy = IPSEC_POLICY_NONE; - ip4_def_policy.refcnt++; /*never reclaim this*/ + ip4_def_policy.refcnt++; /*never reclaim this*/ #endif #if INET6 ip6_def_policy.policy = IPSEC_POLICY_NONE; - ip6_def_policy.refcnt++; /*never reclaim this*/ + ip6_def_policy.refcnt++; /*never reclaim this*/ #endif - + key_timehandler_running = 0; - + /* initialize key statistics */ keystat.getspi_count = 1; - + #ifndef __APPLE__ printf("IPsec: Initialized Security Association Processing.\n"); #endif @@ -682,10 +684,11 @@ key_start_timehandler(void) key_timehandler_running = 1; (void)timeout((void *)key_timehandler, (void *)0, hz); } - + /* Turn off the ipsec bypass */ - if (ipsec_bypass != 0) + if (ipsec_bypass != 0) { ipsec_bypass = 0; + } } /* %%% IPsec policy management */ @@ -697,64 +700,69 @@ key_start_timehandler(void) */ struct secpolicy * key_allocsp( - struct secpolicyindex *spidx, - u_int dir) + struct secpolicyindex *spidx, + u_int dir) { struct secpolicy *sp; struct timeval tv; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); /* sanity check */ - if (spidx == NULL) + if (spidx == NULL) { panic("key_allocsp: NULL pointer is passed.\n"); - + } + /* check direction */ switch (dir) { - case IPSEC_DIR_INBOUND: - case IPSEC_DIR_OUTBOUND: - break; - default: - panic("key_allocsp: Invalid direction is passed.\n"); + case IPSEC_DIR_INBOUND: + case IPSEC_DIR_OUTBOUND: + break; + default: + panic("key_allocsp: Invalid direction is passed.\n"); } - + /* get a SP entry */ KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("*** objects\n"); - kdebug_secpolicyindex(spidx)); - + printf("*** objects\n"); + kdebug_secpolicyindex(spidx)); + lck_mtx_lock(sadb_mutex); LIST_FOREACH(sp, &sptree[dir], chain) { KEYDEBUG(KEYDEBUG_IPSEC_DATA, - printf("*** in SPD\n"); - kdebug_secpolicyindex(&sp->spidx)); - - if (sp->state == IPSEC_SPSTATE_DEAD) + printf("*** in SPD\n"); + kdebug_secpolicyindex(&sp->spidx)); + + if (sp->state == IPSEC_SPSTATE_DEAD) { continue; - + } + /* If the policy is disabled, skip */ - if (sp->disabled > 0) + if (sp->disabled > 0) { continue; - - /* If the incoming spidx specifies bound if, - ignore unbound policies*/ - if (spidx->internal_if != NULL - && (sp->spidx.internal_if == NULL || sp->ipsec_if == NULL)) - continue; - - if (key_cmpspidx_withmask(&sp->spidx, spidx)) + } + + /* If the incoming spidx specifies bound if, + * ignore unbound policies*/ + if (spidx->internal_if != NULL + && (sp->spidx.internal_if == NULL || sp->ipsec_if == NULL)) { + continue; + } + + if (key_cmpspidx_withmask(&sp->spidx, spidx)) { goto found; + } } lck_mtx_unlock(sadb_mutex); return NULL; - + found: - + /* found a SPD entry */ microtime(&tv); sp->lastused = tv.tv_sec; sp->refcnt++; lck_mtx_unlock(sadb_mutex); - + /* sanity check */ KEY_CHKSPDIR(sp->spidx.dir, dir, "key_allocsp"); KEYDEBUG(KEYDEBUG_IPSEC_STAMP, @@ -769,10 +777,10 @@ found: */ struct secpolicy * key_gettunnel( - struct sockaddr *osrc, - struct sockaddr *odst, - struct sockaddr *isrc, - struct sockaddr *idst) + struct sockaddr *osrc, + struct sockaddr *odst, + struct sockaddr *isrc, + struct sockaddr *idst) { struct secpolicy *sp; const int dir = IPSEC_DIR_INBOUND; @@ -780,56 +788,62 @@ key_gettunnel( struct ipsecrequest *r1, *r2, *p; struct sockaddr *os, *od, *is, *id; struct secpolicyindex spidx; - + if (isrc->sa_family != idst->sa_family) { ipseclog((LOG_ERR, "protocol family mismatched %d != %d\n.", - isrc->sa_family, idst->sa_family)); + isrc->sa_family, idst->sa_family)); return NULL; } - + lck_mtx_lock(sadb_mutex); LIST_FOREACH(sp, &sptree[dir], chain) { - if (sp->state == IPSEC_SPSTATE_DEAD) + if (sp->state == IPSEC_SPSTATE_DEAD) { continue; - + } + r1 = r2 = NULL; for (p = sp->req; p; p = p->next) { - if (p->saidx.mode != IPSEC_MODE_TUNNEL) + if (p->saidx.mode != IPSEC_MODE_TUNNEL) { continue; - + } + r1 = r2; r2 = p; - + if (!r1) { /* here we look at address matches only */ spidx = sp->spidx; if (isrc->sa_len > sizeof(spidx.src) || - idst->sa_len > sizeof(spidx.dst)) + idst->sa_len > sizeof(spidx.dst)) { continue; + } bcopy(isrc, &spidx.src, isrc->sa_len); bcopy(idst, &spidx.dst, idst->sa_len); - if (!key_cmpspidx_withmask(&sp->spidx, &spidx)) + if (!key_cmpspidx_withmask(&sp->spidx, &spidx)) { continue; + } } else { is = (struct sockaddr *)&r1->saidx.src; id = (struct sockaddr *)&r1->saidx.dst; if (key_sockaddrcmp(is, isrc, 0) || - key_sockaddrcmp(id, idst, 0)) + key_sockaddrcmp(id, idst, 0)) { continue; + } } - + os = (struct sockaddr *)&r2->saidx.src; od = (struct sockaddr *)&r2->saidx.dst; if (key_sockaddrcmp(os, osrc, 0) || - key_sockaddrcmp(od, odst, 0)) + key_sockaddrcmp(od, odst, 0)) { continue; - + } + goto found; } } lck_mtx_unlock(sadb_mutex); return NULL; - + found: microtime(&tv); sp->lastused = tv.tv_sec; @@ -838,9 +852,10 @@ found: return sp; } -struct secasvar *key_alloc_outbound_sav_for_interface(ifnet_t interface, int family, - struct sockaddr *src, - struct sockaddr *dst) +struct secasvar * +key_alloc_outbound_sav_for_interface(ifnet_t interface, int family, + struct sockaddr *src, + struct sockaddr *dst) { struct secashead *sah; struct secasvar *sav; @@ -851,13 +866,13 @@ struct secasvar *key_alloc_outbound_sav_for_interface(ifnet_t interface, int fam struct sockaddr_in *sin; u_int16_t dstport; bool strict = true; - + if (interface == NULL) { - return NULL; + return NULL; } - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + lck_mtx_lock(sadb_mutex); do { @@ -866,12 +881,11 @@ struct secasvar *key_alloc_outbound_sav_for_interface(ifnet_t interface, int fam continue; } if (sah->ipsec_if == interface && - (family == AF_INET6 || family == AF_INET) && - sah->dir == IPSEC_DIR_OUTBOUND) { - + (family == AF_INET6 || family == AF_INET) && + sah->dir == IPSEC_DIR_OUTBOUND) { if (strict && - sah->saidx.mode == IPSEC_MODE_TRANSPORT && - src != NULL && dst != NULL) { + sah->saidx.mode == IPSEC_MODE_TRANSPORT && + src != NULL && dst != NULL) { // Validate addresses for transport mode if (key_sockaddrcmp((struct sockaddr *)&sah->saidx.src, src, 0) != 0) { // Source doesn't match @@ -919,7 +933,7 @@ struct secasvar *key_alloc_outbound_sav_for_interface(ifnet_t interface, int fam break; } } while (true); - + lck_mtx_unlock(sadb_mutex); return NULL; } @@ -932,47 +946,50 @@ struct secasvar *key_alloc_outbound_sav_for_interface(ifnet_t interface, int fam */ int key_checkrequest( - struct ipsecrequest *isr, - struct secasindex *saidx, - struct secasvar **sav) + struct ipsecrequest *isr, + struct secasindex *saidx, + struct secasvar **sav) { u_int level; int error; struct sockaddr_in *sin; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + *sav = NULL; - + /* sanity check */ - if (isr == NULL || saidx == NULL) + if (isr == NULL || saidx == NULL) { panic("key_checkrequest: NULL pointer is passed.\n"); - + } + /* check mode */ switch (saidx->mode) { - case IPSEC_MODE_TRANSPORT: - case IPSEC_MODE_TUNNEL: - break; - case IPSEC_MODE_ANY: - default: - panic("key_checkrequest: Invalid policy defined.\n"); + case IPSEC_MODE_TRANSPORT: + case IPSEC_MODE_TUNNEL: + break; + case IPSEC_MODE_ANY: + default: + panic("key_checkrequest: Invalid policy defined.\n"); } - + /* get current level */ level = ipsec_get_reqlevel(isr); - - + + /* * key_allocsa_policy should allocate the oldest SA available. * See key_do_allocsa_policy(), and draft-jenkins-ipsec-rekeying-03.txt. */ - if (*sav == NULL) + if (*sav == NULL) { *sav = key_allocsa_policy(saidx); - + } + /* When there is SA. */ - if (*sav != NULL) + if (*sav != NULL) { return 0; - + } + /* There is no SA. * * Remove dst port - used for special natt support - don't call @@ -985,10 +1002,10 @@ key_checkrequest( if ((error = key_acquire(saidx, isr->sp)) != 0) { /* XXX What should I do ? */ ipseclog((LOG_DEBUG, "key_checkrequest: error %d returned " - "from key_acquire.\n", error)); + "from key_acquire.\n", error)); return error; } - + return level == IPSEC_LEVEL_REQUIRE ? ENOENT : 0; } @@ -1002,7 +1019,7 @@ u_int32_t sah_search_calls = 0; u_int32_t sah_search_count = 0; struct secasvar * key_allocsa_policy( - struct secasindex *saidx) + struct secasindex *saidx) { struct secashead *sah; struct secasvar *sav; @@ -1010,22 +1027,24 @@ key_allocsa_policy( const u_int *saorder_state_valid; int arraysize; struct sockaddr_in *sin; - u_int16_t dstport; - + u_int16_t dstport; + lck_mtx_lock(sadb_mutex); sah_search_calls++; LIST_FOREACH(sah, &sahtree, chain) { sah_search_count++; - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE | CMP_REQID)) + } + if (key_cmpsaidx(&sah->saidx, saidx, CMP_MODE | CMP_REQID)) { goto found; + } } lck_mtx_unlock(sadb_mutex); return NULL; - + found: - + /* * search a valid state list for outbound packet. * This search order is important. @@ -1037,17 +1056,17 @@ found: saorder_state_valid = saorder_state_valid_prefer_new; arraysize = _ARRAYLEN(saorder_state_valid_prefer_new); } - - + + sin = (struct sockaddr_in *)&saidx->dst; dstport = sin->sin_port; - if (saidx->mode == IPSEC_MODE_TRANSPORT) + if (saidx->mode == IPSEC_MODE_TRANSPORT) { sin->sin_port = IPSEC_PORT_ANY; - + } + for (stateidx = 0; stateidx < arraysize; stateidx++) { - state = saorder_state_valid[stateidx]; - + sav = key_do_allocsa_policy(sah, state, dstport); if (sav != NULL) { lck_mtx_unlock(sadb_mutex); @@ -1059,62 +1078,70 @@ found: } static void -key_send_delete (struct secasvar *sav) +key_send_delete(struct secasvar *sav) { struct mbuf *m, *result; u_int8_t satype; - + key_sa_chgstate(sav, SADB_SASTATE_DEAD); - - if ((satype = key_proto2satype(sav->sah->saidx.proto)) == 0) + + if ((satype = key_proto2satype(sav->sah->saidx.proto)) == 0) { panic("key_do_allocsa_policy: invalid proto is passed.\n"); - + } + m = key_setsadbmsg(SADB_DELETE, 0, - satype, 0, 0, sav->refcnt - 1); - if (!m) + satype, 0, 0, sav->refcnt - 1); + if (!m) { goto msgfail; + } result = m; - + /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&sav->sah->saidx.src, - sav->sah->saidx.src.ss_len << 3, - IPSEC_ULPROTO_ANY); - if (!m) + (struct sockaddr *)&sav->sah->saidx.src, + sav->sah->saidx.src.ss_len << 3, + IPSEC_ULPROTO_ANY); + if (!m) { goto msgfail; + } m_cat(result, m); - + /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&sav->sah->saidx.dst, - sav->sah->saidx.src.ss_len << 3, - IPSEC_ULPROTO_ANY); - if (!m) + (struct sockaddr *)&sav->sah->saidx.dst, + sav->sah->saidx.src.ss_len << 3, + IPSEC_ULPROTO_ANY); + if (!m) { goto msgfail; + } m_cat(result, m); - + /* create SA extension */ m = key_setsadbsa(sav); - if (!m) + if (!m) { goto msgfail; + } m_cat(result, m); - + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, - sizeof(struct sadb_msg)); - if (result == NULL) + sizeof(struct sadb_msg)); + if (result == NULL) { goto msgfail; + } } - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; + } mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + if (key_sendup_mbuf(NULL, result, - KEY_SENDUP_REGISTERED)) + KEY_SENDUP_REGISTERED)) { goto msgfail; + } msgfail: key_freesav(sav, KEY_SADB_LOCKED); } @@ -1128,83 +1155,87 @@ msgfail: */ static struct secasvar * key_do_allocsa_policy( - struct secashead *sah, - u_int state, - u_int16_t dstport) + struct secashead *sah, + u_int state, + u_int16_t dstport) { struct secasvar *sav, *nextsav, *candidate, *natt_candidate, *no_natt_candidate, *d; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* initialize */ candidate = NULL; natt_candidate = NULL; no_natt_candidate = NULL; - + for (sav = LIST_FIRST(&sah->savtree[state]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { nextsav = LIST_NEXT(sav, chain); - + /* sanity check */ KEY_CHKSASTATE(sav->state, state, "key_do_allocsa_policy"); - + if (sah->saidx.mode == IPSEC_MODE_TUNNEL && dstport && ((sav->flags & SADB_X_EXT_NATT) != 0) && - ntohs(dstport) != sav->remote_ike_port) + ntohs(dstport) != sav->remote_ike_port) { continue; - + } + if (sah->saidx.mode == IPSEC_MODE_TRANSPORT && ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) && - ntohs(dstport) != sav->remote_ike_port) - continue; /* skip this one - not a match - or not UDP */ - + ntohs(dstport) != sav->remote_ike_port) { + continue; /* skip this one - not a match - or not UDP */ + } if ((sah->saidx.mode == IPSEC_MODE_TUNNEL && - ((sav->flags & SADB_X_EXT_NATT) != 0)) || + ((sav->flags & SADB_X_EXT_NATT) != 0)) || (sah->saidx.mode == IPSEC_MODE_TRANSPORT && - ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0))) { - if (natt_candidate == NULL) { - natt_candidate = sav; - continue; - } else - candidate = natt_candidate; + ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0))) { + if (natt_candidate == NULL) { + natt_candidate = sav; + continue; } else { - if (no_natt_candidate == NULL) { - no_natt_candidate = sav; - continue; - } else - candidate = no_natt_candidate; + candidate = natt_candidate; + } + } else { + if (no_natt_candidate == NULL) { + no_natt_candidate = sav; + continue; + } else { + candidate = no_natt_candidate; } - + } + /* Which SA is the better ? */ - + /* sanity check 2 */ - if (candidate->lft_c == NULL || sav->lft_c == NULL) + if (candidate->lft_c == NULL || sav->lft_c == NULL) { panic("key_do_allocsa_policy: " - "lifetime_current is NULL.\n"); - + "lifetime_current is NULL.\n"); + } + /* What the best method is to compare ? */ if (key_preferred_oldsa) { if (candidate->lft_c->sadb_lifetime_addtime > - sav->lft_c->sadb_lifetime_addtime) { - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) + sav->lft_c->sadb_lifetime_addtime) { + if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { natt_candidate = sav; - else + } else { no_natt_candidate = sav; + } } continue; /*NOTREACHED*/ } - + /* prefered new sa rather than old sa */ if (candidate->lft_c->sadb_lifetime_addtime < - sav->lft_c->sadb_lifetime_addtime) { + sav->lft_c->sadb_lifetime_addtime) { d = candidate; if ((sah->saidx.mode == IPSEC_MODE_TUNNEL && - ((sav->flags & SADB_X_EXT_NATT) != 0)) || - (sah->saidx.mode == IPSEC_MODE_TRANSPORT && - ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0))) { + ((sav->flags & SADB_X_EXT_NATT) != 0)) || + (sah->saidx.mode == IPSEC_MODE_TRANSPORT && + ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0))) { natt_candidate = sav; } else { no_natt_candidate = sav; @@ -1212,7 +1243,7 @@ key_do_allocsa_policy( } else { d = sav; } - + /* * prepared to delete the SA when there is more * suitable candidate and the lifetime of the SA is not @@ -1222,20 +1253,21 @@ key_do_allocsa_policy( key_send_delete(d); } } - + /* choose latest if both types present */ - if (natt_candidate == NULL) + if (natt_candidate == NULL) { candidate = no_natt_candidate; - else if (no_natt_candidate == NULL) + } else if (no_natt_candidate == NULL) { candidate = natt_candidate; - else if (sah->saidx.mode == IPSEC_MODE_TUNNEL && dstport) + } else if (sah->saidx.mode == IPSEC_MODE_TUNNEL && dstport) { candidate = natt_candidate; - else if (natt_candidate->lft_c->sadb_lifetime_addtime > - no_natt_candidate->lft_c->sadb_lifetime_addtime) + } else if (natt_candidate->lft_c->sadb_lifetime_addtime > + no_natt_candidate->lft_c->sadb_lifetime_addtime) { candidate = natt_candidate; - else + } else { candidate = no_natt_candidate; - + } + if (candidate) { candidate->refcnt++; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, @@ -1263,22 +1295,22 @@ key_do_allocsa_policy( */ struct secasvar * key_allocsa( - u_int family, - caddr_t src, - caddr_t dst, - u_int proto, - u_int32_t spi) + u_int family, + caddr_t src, + caddr_t dst, + u_int proto, + u_int32_t spi) { return key_allocsa_extended(family, src, dst, proto, spi, NULL); } struct secasvar * key_allocsa_extended(u_int family, - caddr_t src, - caddr_t dst, - u_int proto, - u_int32_t spi, - ifnet_t interface) + caddr_t src, + caddr_t dst, + u_int proto, + u_int32_t spi, + ifnet_t interface) { struct secasvar *sav, *match; u_int stateidx, state, tmpidx, matchidx; @@ -1286,13 +1318,14 @@ key_allocsa_extended(u_int family, struct sockaddr_in6 sin6; const u_int *saorder_state_valid; int arraysize; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (src == NULL || dst == NULL) + if (src == NULL || dst == NULL) { panic("key_allocsa: NULL pointer is passed.\n"); - + } + /* * when both systems employ similar strategy to use a SA. * the search order is important even in the inbound case. @@ -1304,7 +1337,7 @@ key_allocsa_extended(u_int family, saorder_state_valid = saorder_state_valid_prefer_new; arraysize = _ARRAYLEN(saorder_state_valid_prefer_new); } - + /* * searching SAD. * XXX: to be checked internal IP header somewhere. Also when @@ -1319,17 +1352,20 @@ key_allocsa_extended(u_int family, matchidx = arraysize; lck_mtx_lock(sadb_mutex); LIST_FOREACH(sav, &spihash[SPIHASH(spi)], spihash) { - if (sav->spi != spi) + if (sav->spi != spi) { continue; + } if (interface != NULL && - sav->sah->ipsec_if != interface) { + sav->sah->ipsec_if != interface) { continue; } - if (proto != sav->sah->saidx.proto) + if (proto != sav->sah->saidx.proto) { continue; + } if (family != sav->sah->saidx.src.ss_family || - family != sav->sah->saidx.dst.ss_family) + family != sav->sah->saidx.dst.ss_family) { continue; + } tmpidx = arraysize; for (stateidx = 0; stateidx < matchidx; stateidx++) { state = saorder_state_valid[stateidx]; @@ -1338,91 +1374,97 @@ key_allocsa_extended(u_int family, break; } } - if (tmpidx >= matchidx) + if (tmpidx >= matchidx) { continue; - -#if 0 /* don't check src */ + } + +#if 0 /* don't check src */ /* check src address */ switch (family) { - case AF_INET: - bzero(&sin, sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_len = sizeof(sin); - bcopy(src, &sin.sin_addr, - sizeof(sin.sin_addr)); - if (key_sockaddrcmp((struct sockaddr*)&sin, - (struct sockaddr *)&sav->sah->saidx.src, 0) != 0) - continue; - break; - case AF_INET6: - bzero(&sin6, sizeof(sin6)); - sin6.sin6_family = AF_INET6; - sin6.sin6_len = sizeof(sin6); - bcopy(src, &sin6.sin6_addr, - sizeof(sin6.sin6_addr)); - if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr)) { - /* kame fake scopeid */ - sin6.sin6_scope_id = - ntohs(sin6.sin6_addr.s6_addr16[1]); - sin6.sin6_addr.s6_addr16[1] = 0; - } - if (key_sockaddrcmp((struct sockaddr*)&sin6, - (struct sockaddr *)&sav->sah->saidx.src, 0) != 0) - continue; - break; - default: - ipseclog((LOG_DEBUG, "key_allocsa: " - "unknown address family=%d.\n", - family)); + case AF_INET: + bzero(&sin, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_len = sizeof(sin); + bcopy(src, &sin.sin_addr, + sizeof(sin.sin_addr)); + if (key_sockaddrcmp((struct sockaddr*)&sin, + (struct sockaddr *)&sav->sah->saidx.src, 0) != 0) { + continue; + } + break; + case AF_INET6: + bzero(&sin6, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_len = sizeof(sin6); + bcopy(src, &sin6.sin6_addr, + sizeof(sin6.sin6_addr)); + if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr)) { + /* kame fake scopeid */ + sin6.sin6_scope_id = + ntohs(sin6.sin6_addr.s6_addr16[1]); + sin6.sin6_addr.s6_addr16[1] = 0; + } + if (key_sockaddrcmp((struct sockaddr*)&sin6, + (struct sockaddr *)&sav->sah->saidx.src, 0) != 0) { continue; + } + break; + default: + ipseclog((LOG_DEBUG, "key_allocsa: " + "unknown address family=%d.\n", + family)); + continue; } - + #endif /* check dst address */ switch (family) { - case AF_INET: - bzero(&sin, sizeof(sin)); - sin.sin_family = AF_INET; - sin.sin_len = sizeof(sin); - bcopy(dst, &sin.sin_addr, - sizeof(sin.sin_addr)); - if (key_sockaddrcmp((struct sockaddr*)&sin, - (struct sockaddr *)&sav->sah->saidx.dst, 0) != 0) - continue; - - break; - case AF_INET6: - bzero(&sin6, sizeof(sin6)); - sin6.sin6_family = AF_INET6; - sin6.sin6_len = sizeof(sin6); - bcopy(dst, &sin6.sin6_addr, - sizeof(sin6.sin6_addr)); - if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr)) { - /* kame fake scopeid */ - sin6.sin6_scope_id = - ntohs(sin6.sin6_addr.s6_addr16[1]); - sin6.sin6_addr.s6_addr16[1] = 0; - } - if (key_sockaddrcmp((struct sockaddr*)&sin6, - (struct sockaddr *)&sav->sah->saidx.dst, 0) != 0) - continue; - break; - default: - ipseclog((LOG_DEBUG, "key_allocsa: " - "unknown address family=%d.\n", family)); + case AF_INET: + bzero(&sin, sizeof(sin)); + sin.sin_family = AF_INET; + sin.sin_len = sizeof(sin); + bcopy(dst, &sin.sin_addr, + sizeof(sin.sin_addr)); + if (key_sockaddrcmp((struct sockaddr*)&sin, + (struct sockaddr *)&sav->sah->saidx.dst, 0) != 0) { + continue; + } + + break; + case AF_INET6: + bzero(&sin6, sizeof(sin6)); + sin6.sin6_family = AF_INET6; + sin6.sin6_len = sizeof(sin6); + bcopy(dst, &sin6.sin6_addr, + sizeof(sin6.sin6_addr)); + if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr)) { + /* kame fake scopeid */ + sin6.sin6_scope_id = + ntohs(sin6.sin6_addr.s6_addr16[1]); + sin6.sin6_addr.s6_addr16[1] = 0; + } + if (key_sockaddrcmp((struct sockaddr*)&sin6, + (struct sockaddr *)&sav->sah->saidx.dst, 0) != 0) { continue; + } + break; + default: + ipseclog((LOG_DEBUG, "key_allocsa: " + "unknown address family=%d.\n", family)); + continue; } - + match = sav; matchidx = tmpidx; } - if (match) + if (match) { goto found; - + } + /* not found */ lck_mtx_unlock(sadb_mutex); return NULL; - + found: match->refcnt++; lck_mtx_unlock(sadb_mutex); @@ -1434,32 +1476,33 @@ found: u_int16_t key_natt_get_translated_port( - struct secasvar *outsav) + struct secasvar *outsav) { - struct secasindex saidx; struct secashead *sah; u_int stateidx, state; const u_int *saorder_state_valid; int arraysize; - + /* get sa for incoming */ saidx.mode = outsav->sah->saidx.mode; saidx.reqid = 0; saidx.proto = outsav->sah->saidx.proto; bcopy(&outsav->sah->saidx.src, &saidx.dst, sizeof(struct sockaddr_in)); bcopy(&outsav->sah->saidx.dst, &saidx.src, sizeof(struct sockaddr_in)); - + lck_mtx_lock(sadb_mutex); LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE)) + } + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE)) { goto found; + } } lck_mtx_unlock(sadb_mutex); return 0; - + found: /* * Found sah - now go thru list of SAs and find @@ -1477,7 +1520,7 @@ found: saorder_state_valid = saorder_state_valid_prefer_new; arraysize = _ARRAYLEN(saorder_state_valid_prefer_new); } - + for (stateidx = 0; stateidx < arraysize; stateidx++) { state = saorder_state_valid[stateidx]; if (key_do_get_translated_port(sah, outsav, state)) { @@ -1491,64 +1534,66 @@ found: static int key_do_get_translated_port( - struct secashead *sah, - struct secasvar *outsav, - u_int state) + struct secashead *sah, + struct secasvar *outsav, + u_int state) { struct secasvar *currsav, *nextsav, *candidate; - - + + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* initilize */ candidate = NULL; - + for (currsav = LIST_FIRST(&sah->savtree[state]); - currsav != NULL; - currsav = nextsav) { - + currsav != NULL; + currsav = nextsav) { nextsav = LIST_NEXT(currsav, chain); - + /* sanity check */ KEY_CHKSASTATE(currsav->state, state, "key_do_get_translated_port"); - + if ((currsav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) == 0 || - currsav->remote_ike_port != outsav->remote_ike_port) + currsav->remote_ike_port != outsav->remote_ike_port) { continue; - + } + if (candidate == NULL) { candidate = currsav; continue; } - + /* Which SA is the better ? */ - + /* sanity check 2 */ - if (candidate->lft_c == NULL || currsav->lft_c == NULL) + if (candidate->lft_c == NULL || currsav->lft_c == NULL) { panic("key_do_get_translated_port: " - "lifetime_current is NULL.\n"); - + "lifetime_current is NULL.\n"); + } + /* What the best method is to compare ? */ if (key_preferred_oldsa) { if (candidate->lft_c->sadb_lifetime_addtime > - currsav->lft_c->sadb_lifetime_addtime) { + currsav->lft_c->sadb_lifetime_addtime) { candidate = currsav; } continue; /*NOTREACHED*/ } - + /* prefered new sa rather than old sa */ if (candidate->lft_c->sadb_lifetime_addtime < - currsav->lft_c->sadb_lifetime_addtime) + currsav->lft_c->sadb_lifetime_addtime) { candidate = currsav; + } } - + if (candidate) { outsav->natt_encapsulated_src_port = candidate->natt_encapsulated_src_port; return 1; } - + return 0; } @@ -1557,27 +1602,30 @@ key_do_get_translated_port( */ void key_freesp( - struct secpolicy *sp, - int locked) + struct secpolicy *sp, + int locked) { - /* sanity check */ - if (sp == NULL) + if (sp == NULL) { panic("key_freesp: NULL pointer is passed.\n"); - - if (!locked) + } + + if (!locked) { lck_mtx_lock(sadb_mutex); - else + } else { LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); + } sp->refcnt--; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP freesp cause refcnt--:%d SP:0x%llx\n", sp->refcnt, (uint64_t)VM_KERNEL_ADDRPERM(sp))); - - if (sp->refcnt == 0) + + if (sp->refcnt == 0) { key_delsp(sp); - if (!locked) + } + if (!locked) { lck_mtx_unlock(sadb_mutex); + } return; } @@ -1588,28 +1636,31 @@ key_freesp( */ void key_freesav( - struct secasvar *sav, - int locked) + struct secasvar *sav, + int locked) { - /* sanity check */ - if (sav == NULL) + if (sav == NULL) { panic("key_freesav: NULL pointer is passed.\n"); - - if (!locked) + } + + if (!locked) { lck_mtx_lock(sadb_mutex); - else + } else { LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); + } sav->refcnt--; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, printf("DP freesav cause refcnt--:%d SA:0x%llx SPI %u\n", sav->refcnt, (uint64_t)VM_KERNEL_ADDRPERM(sav), (u_int32_t)ntohl(sav->spi))); - - if (sav->refcnt == 0) + + if (sav->refcnt == 0) { key_delsav(sav); - if (!locked) + } + if (!locked) { lck_mtx_unlock(sadb_mutex); + } return; } @@ -1619,51 +1670,51 @@ key_freesav( */ static void key_delsp( - struct secpolicy *sp) + struct secpolicy *sp) { - /* sanity check */ - if (sp == NULL) + if (sp == NULL) { panic("key_delsp: NULL pointer is passed.\n"); - + } + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); sp->state = IPSEC_SPSTATE_DEAD; - - if (sp->refcnt > 0) + + if (sp->refcnt > 0) { return; /* can't free */ - + } /* remove from SP index */ if (__LIST_CHAINED(sp)) { LIST_REMOVE(sp, chain); ipsec_policy_count--; } - - if (sp->spidx.internal_if) { - ifnet_release(sp->spidx.internal_if); - sp->spidx.internal_if = NULL; - } - - if (sp->ipsec_if) { - ifnet_release(sp->ipsec_if); - sp->ipsec_if = NULL; - } - - if (sp->outgoing_if) { - ifnet_release(sp->outgoing_if); - sp->outgoing_if = NULL; - } - - { + + if (sp->spidx.internal_if) { + ifnet_release(sp->spidx.internal_if); + sp->spidx.internal_if = NULL; + } + + if (sp->ipsec_if) { + ifnet_release(sp->ipsec_if); + sp->ipsec_if = NULL; + } + + if (sp->outgoing_if) { + ifnet_release(sp->outgoing_if); + sp->outgoing_if = NULL; + } + + { struct ipsecrequest *isr = sp->req, *nextisr; - + while (isr != NULL) { nextisr = isr->next; KFREE(isr); isr = nextisr; - } + } } keydb_delsecpolicy(sp); - + return; } @@ -1674,25 +1725,27 @@ key_delsp( */ static struct secpolicy * key_getsp( - struct secpolicyindex *spidx) + struct secpolicyindex *spidx) { struct secpolicy *sp; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (spidx == NULL) + if (spidx == NULL) { panic("key_getsp: NULL pointer is passed.\n"); - + } + LIST_FOREACH(sp, &sptree[spidx->dir], chain) { - if (sp->state == IPSEC_SPSTATE_DEAD) + if (sp->state == IPSEC_SPSTATE_DEAD) { continue; + } if (key_cmpspidx_exactly(spidx, &sp->spidx)) { sp->refcnt++; return sp; } } - + return NULL; } @@ -1703,16 +1756,16 @@ key_getsp( */ struct secpolicy * key_getspbyid( - u_int32_t id) + u_int32_t id) { struct secpolicy *sp; - - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - - lck_mtx_lock(sadb_mutex); - sp = __key_getspbyid(id); - lck_mtx_unlock(sadb_mutex); - + + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); + + lck_mtx_lock(sadb_mutex); + sp = __key_getspbyid(id); + lck_mtx_unlock(sadb_mutex); + return sp; } @@ -1720,27 +1773,29 @@ static struct secpolicy * __key_getspbyid(u_int32_t id) { struct secpolicy *sp; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + LIST_FOREACH(sp, &sptree[IPSEC_DIR_INBOUND], chain) { - if (sp->state == IPSEC_SPSTATE_DEAD) + if (sp->state == IPSEC_SPSTATE_DEAD) { continue; + } if (sp->id == id) { sp->refcnt++; return sp; } } - + LIST_FOREACH(sp, &sptree[IPSEC_DIR_OUTBOUND], chain) { - if (sp->state == IPSEC_SPSTATE_DEAD) + if (sp->state == IPSEC_SPSTATE_DEAD) { continue; + } if (sp->id == id) { sp->refcnt++; return sp; } } - + return NULL; } @@ -1748,15 +1803,16 @@ struct secpolicy * key_newsp(void) { struct secpolicy *newsp = NULL; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); newsp = keydb_newsecpolicy(); - if (!newsp) + if (!newsp) { return newsp; - + } + newsp->refcnt = 1; newsp->req = NULL; - + return newsp; } @@ -1767,256 +1823,258 @@ key_newsp(void) */ struct secpolicy * key_msg2sp( - struct sadb_x_policy *xpl0, - size_t len, - int *error) + struct sadb_x_policy *xpl0, + size_t len, + int *error) { struct secpolicy *newsp; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (xpl0 == NULL) + if (xpl0 == NULL) { panic("key_msg2sp: NULL pointer was passed.\n"); - if (len < sizeof(*xpl0)) + } + if (len < sizeof(*xpl0)) { panic("key_msg2sp: invalid length.\n"); + } if (len != PFKEY_EXTLEN(xpl0)) { ipseclog((LOG_DEBUG, "key_msg2sp: Invalid msg length.\n")); *error = EINVAL; return NULL; } - + if ((newsp = key_newsp()) == NULL) { *error = ENOBUFS; return NULL; } - + newsp->spidx.dir = xpl0->sadb_x_policy_dir; newsp->policy = xpl0->sadb_x_policy_type; - + /* check policy */ switch (xpl0->sadb_x_policy_type) { - case IPSEC_POLICY_DISCARD: - case IPSEC_POLICY_GENERATE: - case IPSEC_POLICY_NONE: - case IPSEC_POLICY_ENTRUST: - case IPSEC_POLICY_BYPASS: - newsp->req = NULL; - break; - - case IPSEC_POLICY_IPSEC: - { - int tlen; - struct sadb_x_ipsecrequest *xisr; - struct ipsecrequest **p_isr = &newsp->req; - - /* validity check */ - if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) { + case IPSEC_POLICY_DISCARD: + case IPSEC_POLICY_GENERATE: + case IPSEC_POLICY_NONE: + case IPSEC_POLICY_ENTRUST: + case IPSEC_POLICY_BYPASS: + newsp->req = NULL; + break; + + case IPSEC_POLICY_IPSEC: + { + int tlen; + struct sadb_x_ipsecrequest *xisr; + struct ipsecrequest **p_isr = &newsp->req; + + /* validity check */ + if (PFKEY_EXTLEN(xpl0) < sizeof(*xpl0)) { + ipseclog((LOG_DEBUG, + "key_msg2sp: Invalid msg length.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; + } + + tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0); + xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1); + + while (tlen > 0) { + if (tlen < sizeof(*xisr)) { + ipseclog((LOG_DEBUG, "key_msg2sp: " + "invalid ipsecrequest.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; + } + + /* length check */ + if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) { + ipseclog((LOG_DEBUG, "key_msg2sp: " + "invalid ipsecrequest length.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; + } + + /* allocate request buffer */ + KMALLOC_WAIT(*p_isr, struct ipsecrequest *, sizeof(**p_isr)); + if ((*p_isr) == NULL) { + ipseclog((LOG_DEBUG, + "key_msg2sp: No more memory.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = ENOBUFS; + return NULL; + } + bzero(*p_isr, sizeof(**p_isr)); + + /* set values */ + (*p_isr)->next = NULL; + + switch (xisr->sadb_x_ipsecrequest_proto) { + case IPPROTO_ESP: + case IPPROTO_AH: + case IPPROTO_IPCOMP: + break; + default: + ipseclog((LOG_DEBUG, + "key_msg2sp: invalid proto type=%u\n", + xisr->sadb_x_ipsecrequest_proto)); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EPROTONOSUPPORT; + return NULL; + } + (*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto; + + switch (xisr->sadb_x_ipsecrequest_mode) { + case IPSEC_MODE_TRANSPORT: + case IPSEC_MODE_TUNNEL: + break; + case IPSEC_MODE_ANY: + default: ipseclog((LOG_DEBUG, - "key_msg2sp: Invalid msg length.\n")); + "key_msg2sp: invalid mode=%u\n", + xisr->sadb_x_ipsecrequest_mode)); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; + } + (*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode; + + switch (xisr->sadb_x_ipsecrequest_level) { + case IPSEC_LEVEL_DEFAULT: + case IPSEC_LEVEL_USE: + case IPSEC_LEVEL_REQUIRE: + break; + case IPSEC_LEVEL_UNIQUE: + /* validity check */ + /* + * If range violation of reqid, kernel will + * update it, don't refuse it. + */ + if (xisr->sadb_x_ipsecrequest_reqid + > IPSEC_MANUAL_REQID_MAX) { + ipseclog((LOG_DEBUG, + "key_msg2sp: reqid=%d range " + "violation, updated by kernel.\n", + xisr->sadb_x_ipsecrequest_reqid)); + xisr->sadb_x_ipsecrequest_reqid = 0; + } + + /* allocate new reqid id if reqid is zero. */ + if (xisr->sadb_x_ipsecrequest_reqid == 0) { + u_int32_t reqid; + if ((reqid = key_newreqid()) == 0) { + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = ENOBUFS; + return NULL; + } + (*p_isr)->saidx.reqid = reqid; + xisr->sadb_x_ipsecrequest_reqid = reqid; + } else { + /* set it for manual keying. */ + (*p_isr)->saidx.reqid = + xisr->sadb_x_ipsecrequest_reqid; + } + break; + + default: + ipseclog((LOG_DEBUG, "key_msg2sp: invalid level=%u\n", + xisr->sadb_x_ipsecrequest_level)); key_freesp(newsp, KEY_SADB_UNLOCKED); *error = EINVAL; return NULL; } - - tlen = PFKEY_EXTLEN(xpl0) - sizeof(*xpl0); - xisr = (struct sadb_x_ipsecrequest *)(xpl0 + 1); - - while (tlen > 0) { - if (tlen < sizeof(*xisr)) { - ipseclog((LOG_DEBUG, "key_msg2sp: " - "invalid ipsecrequest.\n")); + (*p_isr)->level = xisr->sadb_x_ipsecrequest_level; + + /* set IP addresses if there */ + if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { + struct sockaddr *paddr; + + if (tlen < xisr->sadb_x_ipsecrequest_len) { + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "address length.\n")); key_freesp(newsp, KEY_SADB_UNLOCKED); *error = EINVAL; return NULL; } - - /* length check */ - if (xisr->sadb_x_ipsecrequest_len < sizeof(*xisr)) { - ipseclog((LOG_DEBUG, "key_msg2sp: " - "invalid ipsecrequest length.\n")); + + paddr = (struct sockaddr *)(xisr + 1); + uint8_t src_len = paddr->sa_len; + + if (xisr->sadb_x_ipsecrequest_len < src_len) { + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "invalid source address length.\n")); key_freesp(newsp, KEY_SADB_UNLOCKED); *error = EINVAL; return NULL; } - - /* allocate request buffer */ - KMALLOC_WAIT(*p_isr, struct ipsecrequest *, sizeof(**p_isr)); - if ((*p_isr) == NULL) { - ipseclog((LOG_DEBUG, - "key_msg2sp: No more memory.\n")); + + /* validity check */ + if (paddr->sa_len + > sizeof((*p_isr)->saidx.src)) { + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "address length.\n")); key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = ENOBUFS; + *error = EINVAL; return NULL; } - bzero(*p_isr, sizeof(**p_isr)); - - /* set values */ - (*p_isr)->next = NULL; - - switch (xisr->sadb_x_ipsecrequest_proto) { - case IPPROTO_ESP: - case IPPROTO_AH: - case IPPROTO_IPCOMP: - break; - default: - ipseclog((LOG_DEBUG, - "key_msg2sp: invalid proto type=%u\n", - xisr->sadb_x_ipsecrequest_proto)); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EPROTONOSUPPORT; - return NULL; - } - (*p_isr)->saidx.proto = xisr->sadb_x_ipsecrequest_proto; - - switch (xisr->sadb_x_ipsecrequest_mode) { - case IPSEC_MODE_TRANSPORT: - case IPSEC_MODE_TUNNEL: - break; - case IPSEC_MODE_ANY: - default: - ipseclog((LOG_DEBUG, - "key_msg2sp: invalid mode=%u\n", - xisr->sadb_x_ipsecrequest_mode)); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } - (*p_isr)->saidx.mode = xisr->sadb_x_ipsecrequest_mode; - - switch (xisr->sadb_x_ipsecrequest_level) { - case IPSEC_LEVEL_DEFAULT: - case IPSEC_LEVEL_USE: - case IPSEC_LEVEL_REQUIRE: - break; - case IPSEC_LEVEL_UNIQUE: - /* validity check */ - /* - * If range violation of reqid, kernel will - * update it, don't refuse it. - */ - if (xisr->sadb_x_ipsecrequest_reqid - > IPSEC_MANUAL_REQID_MAX) { - ipseclog((LOG_DEBUG, - "key_msg2sp: reqid=%d range " - "violation, updated by kernel.\n", - xisr->sadb_x_ipsecrequest_reqid)); - xisr->sadb_x_ipsecrequest_reqid = 0; - } - - /* allocate new reqid id if reqid is zero. */ - if (xisr->sadb_x_ipsecrequest_reqid == 0) { - u_int32_t reqid; - if ((reqid = key_newreqid()) == 0) { - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = ENOBUFS; - return NULL; - } - (*p_isr)->saidx.reqid = reqid; - xisr->sadb_x_ipsecrequest_reqid = reqid; - } else { - /* set it for manual keying. */ - (*p_isr)->saidx.reqid = - xisr->sadb_x_ipsecrequest_reqid; - } - break; - - default: - ipseclog((LOG_DEBUG, "key_msg2sp: invalid level=%u\n", - xisr->sadb_x_ipsecrequest_level)); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } - (*p_isr)->level = xisr->sadb_x_ipsecrequest_level; - - /* set IP addresses if there */ - if (xisr->sadb_x_ipsecrequest_len > sizeof(*xisr)) { - struct sockaddr *paddr; - - if (tlen < xisr->sadb_x_ipsecrequest_len) { - ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " - "address length.\n")); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } - - paddr = (struct sockaddr *)(xisr + 1); - uint8_t src_len = paddr->sa_len; - if (xisr->sadb_x_ipsecrequest_len < src_len) { - ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " - "invalid source address length.\n")); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } - - /* validity check */ - if (paddr->sa_len - > sizeof((*p_isr)->saidx.src)) { - ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " - "address length.\n")); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } + bcopy(paddr, &(*p_isr)->saidx.src, + MIN(paddr->sa_len, sizeof((*p_isr)->saidx.src))); - bcopy(paddr, &(*p_isr)->saidx.src, - MIN(paddr->sa_len, sizeof((*p_isr)->saidx.src))); - - paddr = (struct sockaddr *)((caddr_t)paddr + paddr->sa_len); - uint8_t dst_len = paddr->sa_len; - - if (xisr->sadb_x_ipsecrequest_len < (src_len + dst_len)) { - ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " - "invalid dest address length.\n")); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } - - /* validity check */ - if (paddr->sa_len - > sizeof((*p_isr)->saidx.dst)) { - ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " - "address length.\n")); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; - } + paddr = (struct sockaddr *)((caddr_t)paddr + paddr->sa_len); + uint8_t dst_len = paddr->sa_len; - bcopy(paddr, &(*p_isr)->saidx.dst, - MIN(paddr->sa_len, sizeof((*p_isr)->saidx.dst))); + if (xisr->sadb_x_ipsecrequest_len < (src_len + dst_len)) { + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "invalid dest address length.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; } - - (*p_isr)->sp = newsp; - - /* initialization for the next. */ - p_isr = &(*p_isr)->next; - tlen -= xisr->sadb_x_ipsecrequest_len; - + /* validity check */ - if (tlen < 0) { - ipseclog((LOG_DEBUG, "key_msg2sp: becoming tlen < 0.\n")); + if (paddr->sa_len + > sizeof((*p_isr)->saidx.dst)) { + ipseclog((LOG_DEBUG, "key_msg2sp: invalid request " + "address length.\n")); key_freesp(newsp, KEY_SADB_UNLOCKED); *error = EINVAL; return NULL; } - - xisr = (struct sadb_x_ipsecrequest *)(void *) - ((caddr_t)xisr + xisr->sadb_x_ipsecrequest_len); + + bcopy(paddr, &(*p_isr)->saidx.dst, + MIN(paddr->sa_len, sizeof((*p_isr)->saidx.dst))); } - } - break; - default: - ipseclog((LOG_DEBUG, "key_msg2sp: invalid policy type.\n")); - key_freesp(newsp, KEY_SADB_UNLOCKED); - *error = EINVAL; - return NULL; + + (*p_isr)->sp = newsp; + + /* initialization for the next. */ + p_isr = &(*p_isr)->next; + tlen -= xisr->sadb_x_ipsecrequest_len; + + /* validity check */ + if (tlen < 0) { + ipseclog((LOG_DEBUG, "key_msg2sp: becoming tlen < 0.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; + } + + xisr = (struct sadb_x_ipsecrequest *)(void *) + ((caddr_t)xisr + xisr->sadb_x_ipsecrequest_len); + } + } + break; + default: + ipseclog((LOG_DEBUG, "key_msg2sp: invalid policy type.\n")); + key_freesp(newsp, KEY_SADB_UNLOCKED); + *error = EINVAL; + return NULL; } - + *error = 0; return newsp; } @@ -2027,20 +2085,20 @@ key_newreqid(void) lck_mtx_lock(sadb_mutex); static u_int32_t auto_reqid = IPSEC_MANUAL_REQID_MAX + 1; int done = 0; - + /* The reqid must be limited to 16 bits because the PF_KEY message format only uses - 16 bits for this field. Once it becomes larger than 16 bits - ipsec fails to - work anymore. Changing the PF_KEY message format would introduce compatibility - issues. This code now tests to see if the tentative reqid is in use */ - + * 16 bits for this field. Once it becomes larger than 16 bits - ipsec fails to + * work anymore. Changing the PF_KEY message format would introduce compatibility + * issues. This code now tests to see if the tentative reqid is in use */ + while (!done) { struct secpolicy *sp; struct ipsecrequest *isr; int dir; - + auto_reqid = (auto_reqid == 0xFFFF - ? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1); - + ? IPSEC_MANUAL_REQID_MAX + 1 : auto_reqid + 1); + /* check for uniqueness */ done = 1; for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { @@ -2051,14 +2109,16 @@ key_newreqid(void) break; } } - if (done == 0) + if (done == 0) { break; + } } - if (done == 0) + if (done == 0) { break; + } } } - + lck_mtx_unlock(sadb_mutex); return auto_reqid; } @@ -2068,132 +2128,142 @@ key_newreqid(void) */ struct mbuf * key_sp2msg( - struct secpolicy *sp) + struct secpolicy *sp) { struct sadb_x_policy *xpl; int tlen; caddr_t p; struct mbuf *m; - + /* sanity check. */ - if (sp == NULL) + if (sp == NULL) { panic("key_sp2msg: NULL pointer was passed.\n"); - + } + tlen = key_getspreqmsglen(sp); - + m = key_alloc_mbuf(tlen); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + m->m_len = tlen; m->m_next = NULL; xpl = mtod(m, struct sadb_x_policy *); bzero(xpl, tlen); - + xpl->sadb_x_policy_len = PFKEY_UNIT64(tlen); xpl->sadb_x_policy_exttype = SADB_X_EXT_POLICY; xpl->sadb_x_policy_type = sp->policy; xpl->sadb_x_policy_dir = sp->spidx.dir; xpl->sadb_x_policy_id = sp->id; p = (caddr_t)xpl + sizeof(*xpl); - + /* if is the policy for ipsec ? */ if (sp->policy == IPSEC_POLICY_IPSEC) { struct sadb_x_ipsecrequest *xisr; struct ipsecrequest *isr; - + for (isr = sp->req; isr != NULL; isr = isr->next) { - xisr = (struct sadb_x_ipsecrequest *)(void *)p; - + xisr->sadb_x_ipsecrequest_proto = isr->saidx.proto; xisr->sadb_x_ipsecrequest_mode = isr->saidx.mode; xisr->sadb_x_ipsecrequest_level = isr->level; xisr->sadb_x_ipsecrequest_reqid = isr->saidx.reqid; - + p += sizeof(*xisr); bcopy(&isr->saidx.src, p, isr->saidx.src.ss_len); p += isr->saidx.src.ss_len; bcopy(&isr->saidx.dst, p, isr->saidx.dst.ss_len); p += isr->saidx.src.ss_len; - + xisr->sadb_x_ipsecrequest_len = - PFKEY_ALIGN8(sizeof(*xisr) - + isr->saidx.src.ss_len - + isr->saidx.dst.ss_len); + PFKEY_ALIGN8(sizeof(*xisr) + + isr->saidx.src.ss_len + + isr->saidx.dst.ss_len); } } - + return m; } /* m will not be freed nor modified */ static struct mbuf * key_gather_mbuf(struct mbuf *m, const struct sadb_msghdr *mhp, - int ndeep, int nitem, int *items) + int ndeep, int nitem, int *items) { int idx; int i; struct mbuf *result = NULL, *n; int len; - - if (m == NULL || mhp == NULL) + + if (m == NULL || mhp == NULL) { panic("null pointer passed to key_gather"); - + } + for (i = 0; i < nitem; i++) { idx = items[i]; - if (idx < 0 || idx > SADB_EXT_MAX) + if (idx < 0 || idx > SADB_EXT_MAX) { goto fail; + } /* don't attempt to pull empty extension */ - if (idx == SADB_EXT_RESERVED && mhp->msg == NULL) + if (idx == SADB_EXT_RESERVED && mhp->msg == NULL) { continue; - if (idx != SADB_EXT_RESERVED && - (mhp->ext[idx] == NULL || mhp->extlen[idx] == 0)) + } + if (idx != SADB_EXT_RESERVED && + (mhp->ext[idx] == NULL || mhp->extlen[idx] == 0)) { continue; - + } + if (idx == SADB_EXT_RESERVED) { len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); MGETHDR(n, M_WAITOK, MT_DATA); // sadb_msg len < MHLEN - enforced by _CASSERT - if (!n) + if (!n) { goto fail; + } n->m_len = len; n->m_next = NULL; m_copydata(m, 0, sizeof(struct sadb_msg), - mtod(n, caddr_t)); + mtod(n, caddr_t)); } else if (i < ndeep) { len = mhp->extlen[idx]; n = key_alloc_mbuf(len); - if (!n || n->m_next) { /*XXX*/ - if (n) + if (!n || n->m_next) { /*XXX*/ + if (n) { m_freem(n); + } goto fail; } m_copydata(m, mhp->extoff[idx], mhp->extlen[idx], - mtod(n, caddr_t)); + mtod(n, caddr_t)); } else { n = m_copym(m, mhp->extoff[idx], mhp->extlen[idx], - M_WAITOK); + M_WAITOK); } - if (n == NULL) + if (n == NULL) { goto fail; - - if (result) + } + + if (result) { m_cat(result, n); - else + } else { result = n; + } } - + if ((result->m_flags & M_PKTHDR) != 0) { result->m_pkthdr.len = 0; - for (n = result; n; n = n->m_next) + for (n = result; n; n = n->m_next) { result->m_pkthdr.len += n->m_len; + } } - + return result; - + fail: m_freem(result); return NULL; @@ -2217,9 +2287,9 @@ fail: */ static int key_spdadd( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0, *src1 = NULL, *dst1 = NULL; struct sadb_x_policy *xpl0, *xpl; @@ -2227,152 +2297,153 @@ key_spdadd( struct secpolicyindex spidx; struct secpolicy *newsp; struct timeval tv; - ifnet_t internal_if = NULL; - char *outgoing_if = NULL; - char *ipsec_if = NULL; - struct sadb_x_ipsecif *ipsecifopts = NULL; + ifnet_t internal_if = NULL; + char *outgoing_if = NULL; + char *ipsec_if = NULL; + struct sadb_x_ipsecif *ipsecifopts = NULL; int error; int use_src_range = 0; int use_dst_range = 0; int init_disabled = 0; int address_family, address_len; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spdadd: NULL pointer is passed.\n"); - - if (mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END] != NULL) { - use_src_range = 1; - } - if (mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END] != NULL) { - use_dst_range = 1; - } - + } + + if (mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END] != NULL) { + use_src_range = 1; + } + if (mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END] != NULL) { + use_dst_range = 1; + } + if ((!use_src_range && mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL) || - (!use_dst_range && mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) || + (!use_dst_range && mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) || mhp->ext[SADB_X_EXT_POLICY] == NULL) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if ((use_src_range && (mhp->extlen[SADB_X_EXT_ADDR_RANGE_SRC_START] < sizeof(struct sadb_address) - || mhp->extlen[SADB_X_EXT_ADDR_RANGE_SRC_END] < sizeof(struct sadb_address))) || - (!use_src_range && mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address)) || - (use_dst_range && (mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_START] < sizeof(struct sadb_address) - || mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_END] < sizeof(struct sadb_address))) || - (!use_dst_range && mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) || + || mhp->extlen[SADB_X_EXT_ADDR_RANGE_SRC_END] < sizeof(struct sadb_address))) || + (!use_src_range && mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address)) || + (use_dst_range && (mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_START] < sizeof(struct sadb_address) + || mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_END] < sizeof(struct sadb_address))) || + (!use_dst_range && mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] - < sizeof(struct sadb_lifetime)) { + < sizeof(struct sadb_lifetime)) { ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } lft = (struct sadb_lifetime *) - (void *)mhp->ext[SADB_EXT_LIFETIME_HARD]; + (void *)mhp->ext[SADB_EXT_LIFETIME_HARD]; } - if (mhp->ext[SADB_X_EXT_IPSECIF] != NULL) { - if (mhp->extlen[SADB_X_EXT_IPSECIF] < sizeof(struct sadb_x_ipsecif)) { - ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); + if (mhp->ext[SADB_X_EXT_IPSECIF] != NULL) { + if (mhp->extlen[SADB_X_EXT_IPSECIF] < sizeof(struct sadb_x_ipsecif)) { + ipseclog((LOG_DEBUG, "key_spdadd: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); - } - } - - if (use_src_range) { - src0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START]; - src1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END]; - } else { - src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; - } - if (use_dst_range) { - dst0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START]; - dst1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END]; - } else { - dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; - } + } + } + + if (use_src_range) { + src0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START]; + src1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END]; + } else { + src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; + } + if (use_dst_range) { + dst0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START]; + dst1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END]; + } else { + dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; + } xpl0 = (struct sadb_x_policy *)(void *)mhp->ext[SADB_X_EXT_POLICY]; - ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[SADB_X_EXT_IPSECIF]; - - /* check addresses */ - address_family = ((struct sockaddr *)(src0 + 1))->sa_family; - address_len = ((struct sockaddr *)(src0 + 1))->sa_len; - if (use_src_range) { - if (((struct sockaddr *)(src1+ 1))->sa_family != address_family || - ((struct sockaddr *)(src1+ 1))->sa_len != address_len) { - return key_senderror(so, m, EINVAL); - } - } - if (((struct sockaddr *)(dst0+ 1))->sa_family != address_family || - ((struct sockaddr *)(dst0+ 1))->sa_len != address_len) { - return key_senderror(so, m, EINVAL); - } - if (use_dst_range) { - if (((struct sockaddr *)(dst1+ 1))->sa_family != address_family || - ((struct sockaddr *)(dst1+ 1))->sa_len != address_len) { - return key_senderror(so, m, EINVAL); - } - } - - /* checking the direction. */ + ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[SADB_X_EXT_IPSECIF]; + + /* check addresses */ + address_family = ((struct sockaddr *)(src0 + 1))->sa_family; + address_len = ((struct sockaddr *)(src0 + 1))->sa_len; + if (use_src_range) { + if (((struct sockaddr *)(src1 + 1))->sa_family != address_family || + ((struct sockaddr *)(src1 + 1))->sa_len != address_len) { + return key_senderror(so, m, EINVAL); + } + } + if (((struct sockaddr *)(dst0 + 1))->sa_family != address_family || + ((struct sockaddr *)(dst0 + 1))->sa_len != address_len) { + return key_senderror(so, m, EINVAL); + } + if (use_dst_range) { + if (((struct sockaddr *)(dst1 + 1))->sa_family != address_family || + ((struct sockaddr *)(dst1 + 1))->sa_len != address_len) { + return key_senderror(so, m, EINVAL); + } + } + + /* checking the direction. */ switch (xpl0->sadb_x_policy_dir) { - case IPSEC_DIR_INBOUND: - case IPSEC_DIR_OUTBOUND: - break; - default: - ipseclog((LOG_DEBUG, "key_spdadd: Invalid SP direction.\n")); - mhp->msg->sadb_msg_errno = EINVAL; - return 0; - } - - /* check policy */ + case IPSEC_DIR_INBOUND: + case IPSEC_DIR_OUTBOUND: + break; + default: + ipseclog((LOG_DEBUG, "key_spdadd: Invalid SP direction.\n")); + mhp->msg->sadb_msg_errno = EINVAL; + return 0; + } + + /* check policy */ /* key_spdadd() accepts DISCARD, NONE and IPSEC. */ if (xpl0->sadb_x_policy_type == IPSEC_POLICY_ENTRUST - || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { + || xpl0->sadb_x_policy_type == IPSEC_POLICY_BYPASS) { ipseclog((LOG_DEBUG, "key_spdadd: Invalid policy type.\n")); return key_senderror(so, m, EINVAL); } - + /* policy requests are mandatory when action is ipsec. */ - if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX - && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC - && mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) { + if (mhp->msg->sadb_msg_type != SADB_X_SPDSETIDX + && xpl0->sadb_x_policy_type == IPSEC_POLICY_IPSEC + && mhp->extlen[SADB_X_EXT_POLICY] <= sizeof(*xpl0)) { ipseclog((LOG_DEBUG, "key_spdadd: some policy requests part required.\n")); return key_senderror(so, m, EINVAL); } - - /* Process interfaces */ - if (ipsecifopts != NULL) { - if (ipsecifopts->sadb_x_ipsecif_internal_if[0]) { - ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_internal_if, &internal_if); - } - if (ipsecifopts->sadb_x_ipsecif_outgoing_if[0]) { - outgoing_if = ipsecifopts->sadb_x_ipsecif_outgoing_if; - } - if (ipsecifopts->sadb_x_ipsecif_ipsec_if[0]) { - ipsec_if = ipsecifopts->sadb_x_ipsecif_ipsec_if; - } + + /* Process interfaces */ + if (ipsecifopts != NULL) { + if (ipsecifopts->sadb_x_ipsecif_internal_if[0]) { + ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_internal_if, &internal_if); + } + if (ipsecifopts->sadb_x_ipsecif_outgoing_if[0]) { + outgoing_if = ipsecifopts->sadb_x_ipsecif_outgoing_if; + } + if (ipsecifopts->sadb_x_ipsecif_ipsec_if[0]) { + ipsec_if = ipsecifopts->sadb_x_ipsecif_ipsec_if; + } init_disabled = ipsecifopts->sadb_x_ipsecif_init_disabled; - } - + } + /* make secindex */ /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, - src0 + 1, - dst0 + 1, - src0->sadb_address_prefixlen, - dst0->sadb_address_prefixlen, - src0->sadb_address_proto, - internal_if, - use_src_range ? src0 + 1 : NULL, - use_src_range ? src1 + 1 : NULL, - use_dst_range ? dst0 + 1 : NULL, - use_dst_range ? dst1 + 1 : NULL, - &spidx); - + src0 + 1, + dst0 + 1, + src0->sadb_address_prefixlen, + dst0->sadb_address_prefixlen, + src0->sadb_address_proto, + internal_if, + use_src_range ? src0 + 1 : NULL, + use_src_range ? src1 + 1 : NULL, + use_dst_range ? dst0 + 1 : NULL, + use_dst_range ? dst1 + 1 : NULL, + &spidx); + /* * checking there is SP already or not. * SPDUPDATE doesn't depend on whether there is a SP or not. @@ -2391,47 +2462,47 @@ key_spdadd( key_freesp(newsp, KEY_SADB_LOCKED); ipseclog((LOG_DEBUG, "key_spdadd: a SP entry exists already.\n")); lck_mtx_unlock(sadb_mutex); - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } return key_senderror(so, m, EEXIST); } } lck_mtx_unlock(sadb_mutex); - + /* allocation new SP entry */ if ((newsp = key_msg2sp(xpl0, PFKEY_EXTLEN(xpl0), &error)) == NULL) { - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } return key_senderror(so, m, error); } - + if ((newsp->id = key_getnewspid()) == 0) { keydb_delsecpolicy(newsp); - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } return key_senderror(so, m, ENOBUFS); } - + /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, - src0 + 1, - dst0 + 1, - src0->sadb_address_prefixlen, - dst0->sadb_address_prefixlen, - src0->sadb_address_proto, - internal_if, - use_src_range ? src0 + 1 : NULL, - use_src_range ? src1 + 1 : NULL, - use_dst_range ? dst0 + 1 : NULL, - use_dst_range ? dst1 + 1 : NULL, - &newsp->spidx); - + src0 + 1, + dst0 + 1, + src0->sadb_address_prefixlen, + dst0->sadb_address_prefixlen, + src0->sadb_address_proto, + internal_if, + use_src_range ? src0 + 1 : NULL, + use_src_range ? src1 + 1 : NULL, + use_dst_range ? dst0 + 1 : NULL, + use_dst_range ? dst1 + 1 : NULL, + &newsp->spidx); + #if 1 /* * allow IPv6 over IPv4 or IPv4 over IPv6 tunnels using ESP - @@ -2443,10 +2514,10 @@ key_spdadd( if (sa->sa_family != newsp->req->saidx.src.ss_family) { if (newsp->req->saidx.mode != IPSEC_MODE_TUNNEL || newsp->req->saidx.proto != IPPROTO_ESP) { keydb_delsecpolicy(newsp); - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } return key_senderror(so, m, EINVAL); } } @@ -2457,33 +2528,33 @@ key_spdadd( if (sa->sa_family != newsp->req->saidx.dst.ss_family) { if (newsp->req->saidx.mode != IPSEC_MODE_TUNNEL || newsp->req->saidx.proto != IPPROTO_ESP) { keydb_delsecpolicy(newsp); - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } return key_senderror(so, m, EINVAL); } } } #endif - + microtime(&tv); newsp->created = tv.tv_sec; newsp->lastused = tv.tv_sec; newsp->lifetime = lft ? lft->sadb_lifetime_addtime : 0; newsp->validtime = lft ? lft->sadb_lifetime_usetime : 0; - - if (outgoing_if != NULL) { - ifnet_find_by_name(outgoing_if, &newsp->outgoing_if); - } - if (ipsec_if != NULL) { - ifnet_find_by_name(ipsec_if, &newsp->ipsec_if); - } + + if (outgoing_if != NULL) { + ifnet_find_by_name(outgoing_if, &newsp->outgoing_if); + } + if (ipsec_if != NULL) { + ifnet_find_by_name(ipsec_if, &newsp->ipsec_if); + } if (init_disabled > 0) { newsp->disabled = 1; } - - newsp->refcnt = 1; /* do not reclaim until I say I do */ + + newsp->refcnt = 1; /* do not reclaim until I say I do */ newsp->state = IPSEC_SPSTATE_ALIVE; lck_mtx_lock(sadb_mutex); /* @@ -2491,26 +2562,29 @@ key_spdadd( * because they function as default discard policies * Don't start timehandler for generate policies */ - if (newsp->policy == IPSEC_POLICY_GENERATE) + if (newsp->policy == IPSEC_POLICY_GENERATE) { LIST_INSERT_TAIL(&sptree[newsp->spidx.dir], newsp, secpolicy, chain); - else { /* XXX until we have policy ordering in the kernel */ + } else { /* XXX until we have policy ordering in the kernel */ struct secpolicy *tmpsp; - + LIST_FOREACH(tmpsp, &sptree[newsp->spidx.dir], chain) - if (tmpsp->policy == IPSEC_POLICY_GENERATE) + if (tmpsp->policy == IPSEC_POLICY_GENERATE) { break; - if (tmpsp) + } + if (tmpsp) { LIST_INSERT_BEFORE(tmpsp, newsp, chain); - else + } else { LIST_INSERT_TAIL(&sptree[newsp->spidx.dir], newsp, secpolicy, chain); + } key_start_timehandler(); } - + ipsec_policy_count++; /* Turn off the ipsec bypass */ - if (ipsec_bypass != 0) + if (ipsec_bypass != 0) { ipsec_bypass = 0; - + } + /* delete the entry in spacqtree */ if (mhp->msg->sadb_msg_type == SADB_X_SPDUPDATE) { struct secspacq *spacq; @@ -2520,43 +2594,45 @@ key_spdadd( spacq->created = tv.tv_sec; spacq->count = 0; } - } + } lck_mtx_unlock(sadb_mutex); - - { + + { struct mbuf *n, *mpolicy; struct sadb_msg *newmsg; int off; - + /* create new sadb_msg to reply. */ if (lft) { - int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY, - SADB_EXT_LIFETIME_HARD, SADB_EXT_ADDRESS_SRC, - SADB_EXT_ADDRESS_DST, SADB_X_EXT_ADDR_RANGE_SRC_START, SADB_X_EXT_ADDR_RANGE_SRC_END, - SADB_X_EXT_ADDR_RANGE_DST_START, SADB_X_EXT_ADDR_RANGE_DST_END}; - n = key_gather_mbuf(m, mhp, 2, sizeof(mbufItems)/sizeof(int), mbufItems); + int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY, + SADB_EXT_LIFETIME_HARD, SADB_EXT_ADDRESS_SRC, + SADB_EXT_ADDRESS_DST, SADB_X_EXT_ADDR_RANGE_SRC_START, SADB_X_EXT_ADDR_RANGE_SRC_END, + SADB_X_EXT_ADDR_RANGE_DST_START, SADB_X_EXT_ADDR_RANGE_DST_END}; + n = key_gather_mbuf(m, mhp, 2, sizeof(mbufItems) / sizeof(int), mbufItems); } else { - int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY, - SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, - SADB_X_EXT_ADDR_RANGE_SRC_START, SADB_X_EXT_ADDR_RANGE_SRC_END, - SADB_X_EXT_ADDR_RANGE_DST_START, SADB_X_EXT_ADDR_RANGE_DST_END}; - n = key_gather_mbuf(m, mhp, 2, sizeof(mbufItems)/sizeof(int), mbufItems); + int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY, + SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, + SADB_X_EXT_ADDR_RANGE_SRC_START, SADB_X_EXT_ADDR_RANGE_SRC_END, + SADB_X_EXT_ADDR_RANGE_DST_START, SADB_X_EXT_ADDR_RANGE_DST_END}; + n = key_gather_mbuf(m, mhp, 2, sizeof(mbufItems) / sizeof(int), mbufItems); } - if (!n) + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + if (n->m_len < sizeof(*newmsg)) { n = m_pullup(n, sizeof(*newmsg)); - if (!n) + if (!n) { return key_senderror(so, m, ENOBUFS); + } } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + off = 0; mpolicy = m_pulldown(n, PFKEY_ALIGN8(sizeof(struct sadb_msg)), - sizeof(*xpl), &off); + sizeof(*xpl), &off); if (mpolicy == NULL) { /* n is already freed */ return key_senderror(so, m, ENOBUFS); @@ -2567,10 +2643,10 @@ key_spdadd( return key_senderror(so, m, EINVAL); } xpl->sadb_x_policy_id = newsp->id; - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } /* @@ -2583,17 +2659,18 @@ static u_int32_t key_getnewspid(void) { u_int32_t newid = 0; - int count = key_spi_trycnt; /* XXX */ + int count = key_spi_trycnt; /* XXX */ struct secpolicy *sp; - + /* when requesting to allocate spi ranged */ lck_mtx_lock(sadb_mutex); while (count--) { newid = (policy_id = (policy_id == ~0 ? 1 : policy_id + 1)); - - if ((sp = __key_getspbyid(newid)) == NULL) + + if ((sp = __key_getspbyid(newid)) == NULL) { break; - + } + key_freesp(sp, KEY_SADB_LOCKED); } lck_mtx_unlock(sadb_mutex); @@ -2601,7 +2678,7 @@ key_getnewspid(void) ipseclog((LOG_DEBUG, "key_getnewspid: to allocate policy id is failed.\n")); return 0; } - + return newid; } @@ -2619,141 +2696,143 @@ key_getnewspid(void) */ static int key_spddelete( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0, *src1 = NULL, *dst1 = NULL; struct sadb_x_policy *xpl0; struct secpolicyindex spidx; struct secpolicy *sp; - ifnet_t internal_if = NULL; - struct sadb_x_ipsecif *ipsecifopts = NULL; - int use_src_range = 0; - int use_dst_range = 0; - + ifnet_t internal_if = NULL; + struct sadb_x_ipsecif *ipsecifopts = NULL; + int use_src_range = 0; + int use_dst_range = 0; + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spddelete: NULL pointer is passed.\n"); - - if (mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END] != NULL) { - use_src_range = 1; - } - if (mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END] != NULL) { - use_dst_range = 1; - } - + } + + if (mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END] != NULL) { + use_src_range = 1; + } + if (mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START] != NULL && mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END] != NULL) { + use_dst_range = 1; + } + if ((!use_src_range && mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL) || - (!use_dst_range && mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) || + (!use_dst_range && mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) || mhp->ext[SADB_X_EXT_POLICY] == NULL) { ipseclog((LOG_DEBUG, "key_spddelete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } if ((use_src_range && (mhp->extlen[SADB_X_EXT_ADDR_RANGE_SRC_START] < sizeof(struct sadb_address) - || mhp->extlen[SADB_X_EXT_ADDR_RANGE_SRC_END] < sizeof(struct sadb_address))) || - (!use_src_range && mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address)) || - (use_dst_range && (mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_START] < sizeof(struct sadb_address) - || mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_END] < sizeof(struct sadb_address))) || - (!use_dst_range && mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) || + || mhp->extlen[SADB_X_EXT_ADDR_RANGE_SRC_END] < sizeof(struct sadb_address))) || + (!use_src_range && mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address)) || + (use_dst_range && (mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_START] < sizeof(struct sadb_address) + || mhp->extlen[SADB_X_EXT_ADDR_RANGE_DST_END] < sizeof(struct sadb_address))) || + (!use_dst_range && mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spddelete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - - if (use_src_range) { - src0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START]; - src1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END]; - } else { - src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; - } - if (use_dst_range) { - dst0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START]; - dst1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END]; - } else { - dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; - } + + if (use_src_range) { + src0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_START]; + src1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_SRC_END]; + } else { + src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; + } + if (use_dst_range) { + dst0 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_START]; + dst1 = (struct sadb_address *)mhp->ext[SADB_X_EXT_ADDR_RANGE_DST_END]; + } else { + dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; + } xpl0 = (struct sadb_x_policy *)(void *)mhp->ext[SADB_X_EXT_POLICY]; - ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[SADB_X_EXT_IPSECIF]; - - /* checking the direction. */ + ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[SADB_X_EXT_IPSECIF]; + + /* checking the direction. */ switch (xpl0->sadb_x_policy_dir) { - case IPSEC_DIR_INBOUND: - case IPSEC_DIR_OUTBOUND: - break; - default: - ipseclog((LOG_DEBUG, "key_spddelete: Invalid SP direction.\n")); - return key_senderror(so, m, EINVAL); - } - - /* Process interfaces */ - if (ipsecifopts != NULL) { - if (ipsecifopts->sadb_x_ipsecif_internal_if[0]) { - ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_internal_if, &internal_if); - } - } - + case IPSEC_DIR_INBOUND: + case IPSEC_DIR_OUTBOUND: + break; + default: + ipseclog((LOG_DEBUG, "key_spddelete: Invalid SP direction.\n")); + return key_senderror(so, m, EINVAL); + } + + /* Process interfaces */ + if (ipsecifopts != NULL) { + if (ipsecifopts->sadb_x_ipsecif_internal_if[0]) { + ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_internal_if, &internal_if); + } + } + /* make secindex */ /* XXX boundary check against sa_len */ KEY_SETSECSPIDX(xpl0->sadb_x_policy_dir, - src0 + 1, - dst0 + 1, - src0->sadb_address_prefixlen, - dst0->sadb_address_prefixlen, - src0->sadb_address_proto, - internal_if, - use_src_range ? src0 + 1 : NULL, - use_src_range ? src1 + 1 : NULL, - use_dst_range ? dst0 + 1 : NULL, - use_dst_range ? dst1 + 1 : NULL, - &spidx); - + src0 + 1, + dst0 + 1, + src0->sadb_address_prefixlen, + dst0->sadb_address_prefixlen, + src0->sadb_address_proto, + internal_if, + use_src_range ? src0 + 1 : NULL, + use_src_range ? src1 + 1 : NULL, + use_dst_range ? dst0 + 1 : NULL, + use_dst_range ? dst1 + 1 : NULL, + &spidx); + /* Is there SP in SPD ? */ lck_mtx_lock(sadb_mutex); if ((sp = key_getsp(&spidx)) == NULL) { ipseclog((LOG_DEBUG, "key_spddelete: no SP found.\n")); lck_mtx_unlock(sadb_mutex); - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } return key_senderror(so, m, EINVAL); } - - if (internal_if) { - ifnet_release(internal_if); - internal_if = NULL; - } - + + if (internal_if) { + ifnet_release(internal_if); + internal_if = NULL; + } + /* save policy id to buffer to be returned. */ xpl0->sadb_x_policy_id = sp->id; - + sp->state = IPSEC_SPSTATE_DEAD; key_freesp(sp, KEY_SADB_LOCKED); lck_mtx_unlock(sadb_mutex); - - - { + + + { struct mbuf *n; struct sadb_msg *newmsg; - int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY, - SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, - SADB_X_EXT_ADDR_RANGE_SRC_START, SADB_X_EXT_ADDR_RANGE_SRC_END, - SADB_X_EXT_ADDR_RANGE_DST_START, SADB_X_EXT_ADDR_RANGE_DST_END}; - + int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY, + SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, + SADB_X_EXT_ADDR_RANGE_SRC_START, SADB_X_EXT_ADDR_RANGE_SRC_END, + SADB_X_EXT_ADDR_RANGE_DST_START, SADB_X_EXT_ADDR_RANGE_DST_END}; + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } /* @@ -2770,29 +2849,30 @@ key_spddelete( */ static int key_spddelete2( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spddelete2: NULL pointer is passed.\n"); - + } + if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spddelete2: invalid message is passed.\n")); key_senderror(so, m, EINVAL); return 0; } - + id = ((struct sadb_x_policy *) - (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; - + (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; + /* Is there SP in SPD ? */ lck_mtx_lock(sadb_mutex); if ((sp = __key_getspbyid(id)) == NULL) { @@ -2800,21 +2880,22 @@ key_spddelete2( ipseclog((LOG_DEBUG, "key_spddelete2: no SP found id:%u.\n", id)); return key_senderror(so, m, EINVAL); } - + sp->state = IPSEC_SPSTATE_DEAD; key_freesp(sp, KEY_SADB_LOCKED); lck_mtx_unlock(sadb_mutex); - - { + + { struct mbuf *n, *nn; struct sadb_msg *newmsg; int off, len; - + /* create new sadb_msg to reply. */ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); - - if (len > MCLBYTES) + + if (len > MCLBYTES) { return key_senderror(so, m, ENOBUFS); + } MGETHDR(n, M_WAITOK, MT_DATA); if (n && len > MHLEN) { MCLGET(n, M_WAITOK); @@ -2823,66 +2904,70 @@ key_spddelete2( n = NULL; } } - if (!n) + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + n->m_len = len; n->m_next = NULL; off = 0; - + m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); - + #if DIAGNOSTIC - if (off != len) + if (off != len) { panic("length inconsistency in key_spddelete2"); + } #endif - + n->m_next = m_copym(m, mhp->extoff[SADB_X_EXT_POLICY], - mhp->extlen[SADB_X_EXT_POLICY], M_WAITOK); + mhp->extlen[SADB_X_EXT_POLICY], M_WAITOK); if (!n->m_next) { m_freem(n); return key_senderror(so, m, ENOBUFS); } - + n->m_pkthdr.len = 0; - for (nn = n; nn; nn = nn->m_next) + for (nn = n; nn; nn = nn->m_next) { n->m_pkthdr.len += nn->m_len; - + } + newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } static int key_spdenable( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spdenable: NULL pointer is passed.\n"); - + } + if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spdenable: invalid message is passed.\n")); key_senderror(so, m, EINVAL); return 0; } - + id = ((struct sadb_x_policy *) - (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; - + (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; + /* Is there SP in SPD ? */ lck_mtx_lock(sadb_mutex); if ((sp = __key_getspbyid(id)) == NULL) { @@ -2890,59 +2975,62 @@ key_spdenable( ipseclog((LOG_DEBUG, "key_spdenable: no SP found id:%u.\n", id)); return key_senderror(so, m, EINVAL); } - + sp->disabled = 0; lck_mtx_unlock(sadb_mutex); - + { struct mbuf *n; struct sadb_msg *newmsg; int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY}; - + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return key_senderror(so, m, ENOBUFS); + } } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } static int key_spddisable( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spddisable: NULL pointer is passed.\n"); - + } + if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spddisable: invalid message is passed.\n")); key_senderror(so, m, EINVAL); return 0; } - + id = ((struct sadb_x_policy *) - (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; - + (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; + /* Is there SP in SPD ? */ lck_mtx_lock(sadb_mutex); if ((sp = __key_getspbyid(id)) == NULL) { @@ -2950,32 +3038,34 @@ key_spddisable( ipseclog((LOG_DEBUG, "key_spddisable: no SP found id:%u.\n", id)); return key_senderror(so, m, EINVAL); } - + sp->disabled = 1; lck_mtx_unlock(sadb_mutex); - + { struct mbuf *n; struct sadb_msg *newmsg; int mbufItems[] = {SADB_EXT_RESERVED, SADB_X_EXT_POLICY}; - + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return key_senderror(so, m, ENOBUFS); + } } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } /* @@ -2992,29 +3082,30 @@ key_spddisable( */ static int key_spdget( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { u_int32_t id; struct secpolicy *sp; struct mbuf *n; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spdget: NULL pointer is passed.\n"); - + } + if (mhp->ext[SADB_X_EXT_POLICY] == NULL || mhp->extlen[SADB_X_EXT_POLICY] < sizeof(struct sadb_x_policy)) { ipseclog((LOG_DEBUG, "key_spdget: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + id = ((struct sadb_x_policy *) - (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; - + (void *)mhp->ext[SADB_X_EXT_POLICY])->sadb_x_policy_id; + /* Is there SP in SPD ? */ lck_mtx_lock(sadb_mutex); if ((sp = __key_getspbyid(id)) == NULL) { @@ -3027,8 +3118,9 @@ key_spdget( if (n != NULL) { m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); - } else + } else { return key_senderror(so, m, ENOBUFS); + } } /* @@ -3048,22 +3140,25 @@ key_spdget( */ int key_spdacquire( - struct secpolicy *sp) + struct secpolicy *sp) { struct mbuf *result = NULL, *m; struct secspacq *newspacq; int error; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (sp == NULL) + if (sp == NULL) { panic("key_spdacquire: NULL pointer is passed.\n"); - if (sp->req != NULL) + } + if (sp->req != NULL) { panic("key_spdacquire: called but there is request.\n"); - if (sp->policy != IPSEC_POLICY_IPSEC) + } + if (sp->policy != IPSEC_POLICY_IPSEC) { panic("key_spdacquire: policy mismathed. IPsec is expected.\n"); - + } + /* get a entry to check whether sent message or not. */ lck_mtx_lock(sadb_mutex); if ((newspacq = key_getspacq(&sp->spidx)) != NULL) { @@ -3094,19 +3189,21 @@ key_spdacquire( goto fail; } result = m; - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return key_sendup_mbuf(NULL, m, KEY_SENDUP_REGISTERED); - + fail: - if (result) + if (result) { m_freem(result); + } return error; } @@ -3124,21 +3221,23 @@ fail: */ static int key_spdflush( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_msg *newmsg; struct secpolicy *sp; u_int dir; - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spdflush: NULL pointer is passed.\n"); - - if (m->m_len != PFKEY_ALIGN8(sizeof(struct sadb_msg))) + } + + if (m->m_len != PFKEY_ALIGN8(sizeof(struct sadb_msg))) { return key_senderror(so, m, EINVAL); - + } + lck_mtx_lock(sadb_mutex); for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { LIST_FOREACH(sp, &sptree[dir], chain) { @@ -3146,20 +3245,21 @@ key_spdflush( } } lck_mtx_unlock(sadb_mutex); - + if (sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { ipseclog((LOG_DEBUG, "key_spdflush: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } - - if (m->m_next) + + if (m->m_next) { m_freem(m->m_next); + } m->m_next = NULL; m->m_pkthdr.len = m->m_len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); - + return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } @@ -3177,25 +3277,26 @@ key_spdflush( static int key_spddump( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct secpolicy *sp, **spbuf = NULL, **sp_ptr; int cnt = 0, bufcount; u_int dir; struct mbuf *n; int error = 0; - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_spddump: NULL pointer is passed.\n"); - + } + if ((bufcount = ipsec_policy_count) == 0) { error = ENOENT; goto end; } - bufcount += 256; /* extra */ + bufcount += 256; /* extra */ KMALLOC_WAIT(spbuf, struct secpolicy**, bufcount * sizeof(struct secpolicy*)); if (spbuf == NULL) { ipseclog((LOG_DEBUG, "key_spddump: No more memory.\n")); @@ -3207,136 +3308,152 @@ key_spddump( sp_ptr = spbuf; for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { LIST_FOREACH(sp, &sptree[dir], chain) { - if (cnt == bufcount) - break; /* buffer full */ + if (cnt == bufcount) { + break; /* buffer full */ + } *sp_ptr++ = sp; sp->refcnt++; cnt++; } } lck_mtx_unlock(sadb_mutex); - + if (cnt == 0) { error = ENOENT; goto end; } - + sp_ptr = spbuf; while (cnt) { --cnt; n = key_setdumpsp(*sp_ptr++, SADB_X_SPDDUMP, cnt, - mhp->msg->sadb_msg_pid); - - if (n) + mhp->msg->sadb_msg_pid); + + if (n) { key_sendup_mbuf(so, n, KEY_SENDUP_ONE); + } } - + lck_mtx_lock(sadb_mutex); - while (sp_ptr > spbuf) + while (sp_ptr > spbuf) { key_freesp(*(--sp_ptr), KEY_SADB_LOCKED); + } lck_mtx_unlock(sadb_mutex); - + end: - if (spbuf) + if (spbuf) { KFREE(spbuf); - if (error) + } + if (error) { return key_senderror(so, m, error); - + } + m_freem(m); return 0; - } static struct mbuf * key_setdumpsp( - struct secpolicy *sp, - u_int8_t type, - u_int32_t seq, - u_int32_t pid) + struct secpolicy *sp, + u_int8_t type, + u_int32_t seq, + u_int32_t pid) { struct mbuf *result = NULL, *m; - + m = key_setsadbmsg(type, 0, SADB_SATYPE_UNSPEC, seq, pid, sp->refcnt); - if (!m) + if (!m) { goto fail; + } result = m; - - if (sp->spidx.src_range.start.ss_len > 0) { - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_START, - (struct sockaddr *)&sp->spidx.src_range.start, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_END, - (struct sockaddr *)&sp->spidx.src_range.end, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } else { - m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } - - if (sp->spidx.dst_range.start.ss_len > 0) { - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_START, - (struct sockaddr *)&sp->spidx.dst_range.start, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_END, - (struct sockaddr *)&sp->spidx.dst_range.end, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } else { - m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } - - if (sp->spidx.internal_if || sp->outgoing_if || sp->ipsec_if || sp->disabled) { - m = key_setsadbipsecif(sp->spidx.internal_if, sp->outgoing_if, sp->ipsec_if, sp->disabled); - if (!m) - goto fail; - m_cat(result, m); - } - + + if (sp->spidx.src_range.start.ss_len > 0) { + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_START, + (struct sockaddr *)&sp->spidx.src_range.start, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_END, + (struct sockaddr *)&sp->spidx.src_range.end, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } else { + m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } + + if (sp->spidx.dst_range.start.ss_len > 0) { + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_START, + (struct sockaddr *)&sp->spidx.dst_range.start, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_END, + (struct sockaddr *)&sp->spidx.dst_range.end, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } else { + m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } + + if (sp->spidx.internal_if || sp->outgoing_if || sp->ipsec_if || sp->disabled) { + m = key_setsadbipsecif(sp->spidx.internal_if, sp->outgoing_if, sp->ipsec_if, sp->disabled); + if (!m) { + goto fail; + } + m_cat(result, m); + } + m = key_sp2msg(sp); - if (!m) + if (!m) { goto fail; + } m_cat(result, m); - - if ((result->m_flags & M_PKTHDR) == 0) + + if ((result->m_flags & M_PKTHDR) == 0) { goto fail; - + } + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); - if (result == NULL) + if (result == NULL) { goto fail; + } } - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return result; - + fail: m_freem(result); return NULL; @@ -3347,30 +3464,31 @@ fail: */ static u_int key_getspreqmsglen( - struct secpolicy *sp) + struct secpolicy *sp) { u_int tlen; - + tlen = sizeof(struct sadb_x_policy); - + /* if is the policy for ipsec ? */ - if (sp->policy != IPSEC_POLICY_IPSEC) + if (sp->policy != IPSEC_POLICY_IPSEC) { return tlen; - + } + /* get length of ipsec requests */ - { + { struct ipsecrequest *isr; int len; - + for (isr = sp->req; isr != NULL; isr = isr->next) { len = sizeof(struct sadb_x_ipsecrequest) - + isr->saidx.src.ss_len - + isr->saidx.dst.ss_len; - + + isr->saidx.src.ss_len + + isr->saidx.dst.ss_len; + tlen += PFKEY_ALIGN8(len); } - } - + } + return tlen; } @@ -3385,19 +3503,20 @@ key_getspreqmsglen( */ static int key_spdexpire( - struct secpolicy *sp) + struct secpolicy *sp) { struct mbuf *result = NULL, *m; int len; int error = EINVAL; struct sadb_lifetime *lt; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (sp == NULL) + if (sp == NULL) { panic("key_spdexpire: NULL pointer is passed.\n"); - + } + /* set msg header */ m = key_setsadbmsg(SADB_X_SPDEXPIRE, 0, 0, 0, 0, 0); if (!m) { @@ -3405,13 +3524,14 @@ key_spdexpire( goto fail; } result = m; - + /* create lifetime extension (current and hard) */ len = PFKEY_ALIGN8(sizeof(*lt)) * 2; m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } error = ENOBUFS; goto fail; } @@ -3431,67 +3551,67 @@ key_spdexpire( lt->sadb_lifetime_addtime = sp->lifetime; lt->sadb_lifetime_usetime = sp->validtime; m_cat(result, m); - - /* set sadb_address(es) for source */ - if (sp->spidx.src_range.start.ss_len > 0) { - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_START, - (struct sockaddr *)&sp->spidx.src_range.start, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) { - error = ENOBUFS; - goto fail; - } - m_cat(result, m); - - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_END, - (struct sockaddr *)&sp->spidx.src_range.end, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) { - error = ENOBUFS; - goto fail; - } - m_cat(result, m); - } else { - m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) { - error = ENOBUFS; - goto fail; - } - m_cat(result, m); - } - - /* set sadb_address(es) for dest */ - if (sp->spidx.dst_range.start.ss_len > 0) { - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_START, - (struct sockaddr *)&sp->spidx.dst_range.start, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) { - error = ENOBUFS; - goto fail; - } - m_cat(result, m); - - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_END, - (struct sockaddr *)&sp->spidx.dst_range.end, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) { - error = ENOBUFS; - goto fail; - } - m_cat(result, m); - } else { - m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) { - error = ENOBUFS; - goto fail; - } - m_cat(result, m); - } - + + /* set sadb_address(es) for source */ + if (sp->spidx.src_range.start.ss_len > 0) { + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_START, + (struct sockaddr *)&sp->spidx.src_range.start, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + error = ENOBUFS; + goto fail; + } + m_cat(result, m); + + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_END, + (struct sockaddr *)&sp->spidx.src_range.end, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + error = ENOBUFS; + goto fail; + } + m_cat(result, m); + } else { + m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + error = ENOBUFS; + goto fail; + } + m_cat(result, m); + } + + /* set sadb_address(es) for dest */ + if (sp->spidx.dst_range.start.ss_len > 0) { + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_START, + (struct sockaddr *)&sp->spidx.dst_range.start, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + error = ENOBUFS; + goto fail; + } + m_cat(result, m); + + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_END, + (struct sockaddr *)&sp->spidx.dst_range.end, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + error = ENOBUFS; + goto fail; + } + m_cat(result, m); + } else { + m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + error = ENOBUFS; + goto fail; + } + m_cat(result, m); + } + /* set secpolicy */ m = key_sp2msg(sp); if (!m) { @@ -3499,12 +3619,12 @@ key_spdexpire( goto fail; } m_cat(result, m); - + if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } - + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { @@ -3512,19 +3632,21 @@ key_spdexpire( goto fail; } } - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); - + fail: - if (result) + if (result) { m_freem(result); + } return error; } @@ -3536,44 +3658,46 @@ fail: */ static struct secashead * key_newsah(struct secasindex *saidx, - ifnet_t ipsec_if, - u_int outgoing_if, - u_int8_t dir) + ifnet_t ipsec_if, + u_int outgoing_if, + u_int8_t dir) { struct secashead *newsah; - + /* sanity check */ - if (saidx == NULL) + if (saidx == NULL) { panic("key_newsaidx: NULL pointer is passed.\n"); - + } + newsah = keydb_newsecashead(); - if (newsah == NULL) + if (newsah == NULL) { return NULL; - + } + bcopy(saidx, &newsah->saidx, sizeof(newsah->saidx)); - + /* remove the ports */ switch (saidx->src.ss_family) { - case AF_INET: - ((struct sockaddr_in *)(&newsah->saidx.src))->sin_port = IPSEC_PORT_ANY; - break; - case AF_INET6: - ((struct sockaddr_in6 *)(&newsah->saidx.src))->sin6_port = IPSEC_PORT_ANY; - break; - default: - break; + case AF_INET: + ((struct sockaddr_in *)(&newsah->saidx.src))->sin_port = IPSEC_PORT_ANY; + break; + case AF_INET6: + ((struct sockaddr_in6 *)(&newsah->saidx.src))->sin6_port = IPSEC_PORT_ANY; + break; + default: + break; } switch (saidx->dst.ss_family) { - case AF_INET: - ((struct sockaddr_in *)(&newsah->saidx.dst))->sin_port = IPSEC_PORT_ANY; - break; - case AF_INET6: - ((struct sockaddr_in6 *)(&newsah->saidx.dst))->sin6_port = IPSEC_PORT_ANY; - break; - default: - break; + case AF_INET: + ((struct sockaddr_in *)(&newsah->saidx.dst))->sin_port = IPSEC_PORT_ANY; + break; + case AF_INET6: + ((struct sockaddr_in6 *)(&newsah->saidx.dst))->sin6_port = IPSEC_PORT_ANY; + break; + default: + break; } - + newsah->outgoing_if = outgoing_if; if (ipsec_if) { ifnet_reference(ipsec_if); @@ -3585,7 +3709,7 @@ key_newsah(struct secasindex *saidx, LIST_INSERT_HEAD(&sahtree, newsah, chain); key_start_timehandler(); - return(newsah); + return newsah; } /* @@ -3593,72 +3717,73 @@ key_newsah(struct secasindex *saidx, */ void key_delsah( - struct secashead *sah) + struct secashead *sah) { struct secasvar *sav, *nextsav; u_int stateidx, state; int zombie = 0; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (sah == NULL) + if (sah == NULL) { panic("key_delsah: NULL pointer is passed.\n"); - + } + /* searching all SA registerd in the secindex. */ for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_any); - stateidx++) { - + stateidx < _ARRAYLEN(saorder_state_any); + stateidx++) { state = saorder_state_any[stateidx]; for (sav = (struct secasvar *)LIST_FIRST(&sah->savtree[state]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { nextsav = LIST_NEXT(sav, chain); - + if (sav->refcnt > 0) { /* give up to delete this sa */ zombie++; continue; } - + /* sanity check */ KEY_CHKSASTATE(state, sav->state, "key_delsah"); - + key_freesav(sav, KEY_SADB_LOCKED); - + /* remove back pointer */ sav->sah = NULL; sav = NULL; } } - + /* don't delete sah only if there are savs. */ - if (zombie) + if (zombie) { return; - + } + ROUTE_RELEASE(&sah->sa_route); - + if (sah->ipsec_if) { ifnet_release(sah->ipsec_if); sah->ipsec_if = NULL; } - - if (sah->idents) { - KFREE(sah->idents); - } - - if (sah->identd) { - KFREE(sah->identd); - } - + + if (sah->idents) { + KFREE(sah->idents); + } + + if (sah->identd) { + KFREE(sah->identd); + } + /* remove from tree of SA index */ - if (__LIST_CHAINED(sah)) + if (__LIST_CHAINED(sah)) { LIST_REMOVE(sah, chain); - + } + KFREE(sah); - + return; } @@ -3676,21 +3801,22 @@ key_delsah( */ static struct secasvar * key_newsav( - struct mbuf *m, - const struct sadb_msghdr *mhp, - struct secashead *sah, - int *errp, - struct socket *so) + struct mbuf *m, + const struct sadb_msghdr *mhp, + struct secashead *sah, + int *errp, + struct socket *so) { struct secasvar *newsav; const struct sadb_sa *xsa; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (m == NULL || mhp == NULL || mhp->msg == NULL || sah == NULL) + if (m == NULL || mhp == NULL || mhp->msg == NULL || sah == NULL) { panic("key_newsa: NULL pointer is passed.\n"); - + } + KMALLOC_NOWAIT(newsav, struct secasvar *, sizeof(struct secasvar)); if (newsav == NULL) { lck_mtx_unlock(sadb_mutex); @@ -3703,48 +3829,49 @@ key_newsav( } } bzero((caddr_t)newsav, sizeof(struct secasvar)); - + switch (mhp->msg->sadb_msg_type) { - case SADB_GETSPI: - key_setspi(newsav, 0); - + case SADB_GETSPI: + key_setspi(newsav, 0); + #if IPSEC_DOSEQCHECK - /* sync sequence number */ - if (mhp->msg->sadb_msg_seq == 0) - newsav->seq = - (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); - else + /* sync sequence number */ + if (mhp->msg->sadb_msg_seq == 0) { + newsav->seq = + (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); + } else #endif - newsav->seq = mhp->msg->sadb_msg_seq; - break; - - case SADB_ADD: - /* sanity check */ - if (mhp->ext[SADB_EXT_SA] == NULL) { - key_delsav(newsav); - ipseclog((LOG_DEBUG, "key_newsa: invalid message is passed.\n")); - *errp = EINVAL; - return NULL; - } - xsa = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; - key_setspi(newsav, xsa->sadb_sa_spi); - newsav->seq = mhp->msg->sadb_msg_seq; - break; - default: + newsav->seq = mhp->msg->sadb_msg_seq; + break; + + case SADB_ADD: + /* sanity check */ + if (mhp->ext[SADB_EXT_SA] == NULL) { key_delsav(newsav); + ipseclog((LOG_DEBUG, "key_newsa: invalid message is passed.\n")); *errp = EINVAL; return NULL; + } + xsa = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; + key_setspi(newsav, xsa->sadb_sa_spi); + newsav->seq = mhp->msg->sadb_msg_seq; + break; + default: + key_delsav(newsav); + *errp = EINVAL; + return NULL; } - + if (mhp->ext[SADB_X_EXT_SA2] != NULL) { - if (((struct sadb_x_sa2 *)(void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_alwaysexpire) + if (((struct sadb_x_sa2 *)(void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_alwaysexpire) { newsav->always_expire = 1; + } newsav->flags2 = ((struct sadb_x_sa2 *)(void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_flags; if (newsav->flags2 & SADB_X_EXT_SA2_DELETE_ON_DETACH) { newsav->so = so; } } - + /* copy sav values */ if (mhp->msg->sadb_msg_type != SADB_GETSPI) { *errp = key_setsaval(newsav, m, mhp); @@ -3756,16 +3883,16 @@ key_newsav( /* For get SPI, if has a hard lifetime, apply */ const struct sadb_lifetime *lft0; struct timeval tv; - + lft0 = (struct sadb_lifetime *)(void *)mhp->ext[SADB_EXT_LIFETIME_HARD]; if (lft0 != NULL) { /* make lifetime for CURRENT */ KMALLOC_NOWAIT(newsav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); + sizeof(struct sadb_lifetime)); if (newsav->lft_c == NULL) { lck_mtx_unlock(sadb_mutex); KMALLOC_WAIT(newsav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); + sizeof(struct sadb_lifetime)); lck_mtx_lock(sadb_mutex); if (newsav->lft_c == NULL) { ipseclog((LOG_DEBUG, "key_newsa: No more memory.\n")); @@ -3774,16 +3901,16 @@ key_newsav( return NULL; } } - + microtime(&tv); - + newsav->lft_c->sadb_lifetime_len = PFKEY_UNIT64(sizeof(struct sadb_lifetime)); newsav->lft_c->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; newsav->lft_c->sadb_lifetime_allocations = 0; newsav->lft_c->sadb_lifetime_bytes = 0; newsav->lft_c->sadb_lifetime_addtime = tv.tv_sec; newsav->lft_c->sadb_lifetime_usetime = 0; - + if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(*lft0)) { ipseclog((LOG_DEBUG, "key_newsa: invalid hard lifetime ext len.\n")); key_delsav(newsav); @@ -3799,24 +3926,24 @@ key_newsav( } } } - + /* reset created */ - { + { struct timeval tv; microtime(&tv); newsav->created = tv.tv_sec; - } - + } + newsav->pid = mhp->msg->sadb_msg_pid; - + /* add to satree */ newsav->sah = sah; newsav->refcnt = 1; newsav->state = SADB_SASTATE_LARVAL; LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav, - secasvar, chain); + secasvar, chain); ipsec_sav_count++; - + return newsav; } @@ -3832,30 +3959,31 @@ key_newsav( */ struct secasvar * key_newsav2(struct secashead *sah, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft) + u_int8_t satype, + u_int8_t alg_auth, + u_int8_t alg_enc, + u_int32_t flags, + u_int8_t replay, + struct sadb_key *key_auth, + u_int16_t key_auth_len, + struct sadb_key *key_enc, + u_int16_t key_enc_len, + u_int16_t natt_port, + u_int32_t seq, + u_int32_t spi, + u_int32_t pid, + struct sadb_lifetime *lifetime_hard, + struct sadb_lifetime *lifetime_soft) { struct secasvar *newsav; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (sah == NULL) + if (sah == NULL) { panic("key_newsa: NULL pointer is passed.\n"); - + } + KMALLOC_NOWAIT(newsav, struct secasvar *, sizeof(struct secasvar)); if (newsav == NULL) { lck_mtx_unlock(sadb_mutex); @@ -3867,74 +3995,75 @@ key_newsav2(struct secashead *sah, } } bzero((caddr_t)newsav, sizeof(struct secasvar)); - + #if IPSEC_DOSEQCHECK /* sync sequence number */ - if (seq == 0) + if (seq == 0) { newsav->seq = (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); - else + } else #endif - newsav->seq = seq; + newsav->seq = seq; key_setspi(newsav, spi); - + if (key_setsaval2(newsav, - satype, - alg_auth, - alg_enc, - flags, - replay, - key_auth, - key_auth_len, - key_enc, - key_enc_len, - natt_port, - seq, - spi, - pid, - lifetime_hard, - lifetime_soft)) { + satype, + alg_auth, + alg_enc, + flags, + replay, + key_auth, + key_auth_len, + key_enc, + key_enc_len, + natt_port, + seq, + spi, + pid, + lifetime_hard, + lifetime_soft)) { key_delsav(newsav); return NULL; } - + /* reset created */ - { + { struct timeval tv; microtime(&tv); newsav->created = tv.tv_sec; - } - + } + newsav->pid = pid; - + /* add to satree */ newsav->sah = sah; newsav->refcnt = 1; if (spi && key_auth && key_auth_len && key_enc && key_enc_len) { newsav->state = SADB_SASTATE_MATURE; LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_MATURE], newsav, - secasvar, chain); + secasvar, chain); } else { newsav->state = SADB_SASTATE_LARVAL; LIST_INSERT_TAIL(&sah->savtree[SADB_SASTATE_LARVAL], newsav, - secasvar, chain); + secasvar, chain); } ipsec_sav_count++; - + return newsav; } static int key_migratesav(struct secasvar *sav, - struct secashead *newsah) + struct secashead *newsah) { if (sav == NULL || newsah == NULL || sav->state != SADB_SASTATE_MATURE) { return EINVAL; } - + /* remove from SA header */ - if (__LIST_CHAINED(sav)) + if (__LIST_CHAINED(sav)) { LIST_REMOVE(sav, chain); - + } + sav->sah = newsah; LIST_INSERT_TAIL(&newsah->savtree[SADB_SASTATE_MATURE], sav, secasvar, chain); return 0; @@ -3945,26 +4074,28 @@ key_migratesav(struct secasvar *sav, */ void key_delsav( - struct secasvar *sav) + struct secasvar *sav) { - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (sav == NULL) + if (sav == NULL) { panic("key_delsav: NULL pointer is passed.\n"); - - if (sav->refcnt > 0) - return; /* can't free */ - + } + + if (sav->refcnt > 0) { + return; /* can't free */ + } /* remove from SA header */ - if (__LIST_CHAINED(sav)) + if (__LIST_CHAINED(sav)) { LIST_REMOVE(sav, chain); + } ipsec_sav_count--; - - if (sav->spihash.le_prev || sav->spihash.le_next) + + if (sav->spihash.le_prev || sav->spihash.le_next) { LIST_REMOVE(sav, spihash); - + } + if (sav->key_auth != NULL) { bzero(_KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); KFREE(sav->key_auth); @@ -4000,9 +4131,9 @@ key_delsav( KFREE(sav->iv); sav->iv = NULL; } - + KFREE(sav); - + return; } @@ -4016,30 +4147,32 @@ static struct secashead * key_getsah(struct secasindex *saidx) { struct secashead *sah; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID)) + } + if (key_cmpsaidx(&sah->saidx, saidx, CMP_REQID)) { return sah; + } } - + return NULL; } struct secashead * -key_newsah2 (struct secasindex *saidx, - u_int8_t dir) +key_newsah2(struct secasindex *saidx, + u_int8_t dir) { struct secashead *sah; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + sah = key_getsah(saidx); if (!sah) { - return(key_newsah(saidx, NULL, 0, dir)); + return key_newsah(saidx, NULL, 0, dir); } return sah; } @@ -4053,46 +4186,49 @@ key_newsah2 (struct secasindex *saidx, */ static struct secasvar * key_checkspidup( - struct secasindex *saidx, - u_int32_t spi) + struct secasindex *saidx, + u_int32_t spi) { struct secasvar *sav; u_int stateidx, state; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* check address family */ if (saidx->src.ss_family != saidx->dst.ss_family) { ipseclog((LOG_DEBUG, "key_checkspidup: address family mismatched.\n")); return NULL; } - + /* check all SAD */ LIST_FOREACH(sav, &spihash[SPIHASH(spi)], spihash) { - if (sav->spi != spi) + if (sav->spi != spi) { continue; + } for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_alive); - stateidx++) { + stateidx < _ARRAYLEN(saorder_state_alive); + stateidx++) { state = saorder_state_alive[stateidx]; if (sav->state == state && - key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) + key_ismyaddr((struct sockaddr *)&sav->sah->saidx.dst)) { return sav; + } } } - + return NULL; } static void key_setspi( - struct secasvar *sav, - u_int32_t spi) + struct secasvar *sav, + u_int32_t spi) { LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); sav->spi = spi; - if (sav->spihash.le_prev || sav->spihash.le_next) + if (sav->spihash.le_prev || sav->spihash.le_next) { LIST_REMOVE(sav, spihash); + } LIST_INSERT_HEAD(&spihash[SPIHASH(spi)], sav, spihash); } @@ -4105,20 +4241,22 @@ key_setspi( */ static struct secasvar * key_getsavbyspi( - struct secashead *sah, - u_int32_t spi) + struct secashead *sah, + u_int32_t spi) { struct secasvar *sav, *match; u_int stateidx, state, matchidx; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); match = NULL; matchidx = _ARRAYLEN(saorder_state_alive); LIST_FOREACH(sav, &spihash[SPIHASH(spi)], spihash) { - if (sav->spi != spi) + if (sav->spi != spi) { continue; - if (sav->sah != sah) + } + if (sav->sah != sah) { continue; + } for (stateidx = 0; stateidx < matchidx; stateidx++) { state = saorder_state_alive[stateidx]; if (sav->state == state) { @@ -4128,7 +4266,7 @@ key_getsavbyspi( } } } - + return match; } @@ -4142,22 +4280,23 @@ key_getsavbyspi( */ static int key_setsaval( - struct secasvar *sav, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct secasvar *sav, + struct mbuf *m, + const struct sadb_msghdr *mhp) { #if IPSEC_ESP const struct esp_algorithm *algo; #endif int error = 0; struct timeval tv; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (m == NULL || mhp == NULL || mhp->msg == NULL) + if (m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_setsaval: NULL pointer is passed.\n"); - + } + /* initialization */ sav->replay = NULL; sav->key_auth = NULL; @@ -4171,29 +4310,29 @@ key_setsaval( sav->remote_ike_port = 0; sav->natt_last_activity = natt_now; sav->natt_encapsulated_src_port = 0; - + /* SA */ if (mhp->ext[SADB_EXT_SA] != NULL) { const struct sadb_sa *sa0; - + sa0 = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; if (mhp->extlen[SADB_EXT_SA] < sizeof(*sa0)) { ipseclog((LOG_DEBUG, "key_setsaval: invalid message size.\n")); error = EINVAL; goto fail; } - + sav->alg_auth = sa0->sadb_sa_auth; sav->alg_enc = sa0->sadb_sa_encrypt; sav->flags = sa0->sadb_sa_flags; - + /* * Verify that a nat-traversal port was specified if * the nat-traversal flag is set. */ if ((sav->flags & SADB_X_EXT_NATT) != 0) { if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa_2) || - ((const struct sadb_sa_2*)(sa0))->sadb_sa_natt_port == 0) { + ((const struct sadb_sa_2*)(sa0))->sadb_sa_natt_port == 0) { ipseclog((LOG_DEBUG, "key_setsaval: natt port not set.\n")); error = EINVAL; goto fail; @@ -4202,17 +4341,19 @@ key_setsaval( sav->natt_interval = ((const struct sadb_sa_2*)(sa0))->sadb_sa_natt_interval; sav->natt_offload_interval = ((const struct sadb_sa_2*)(sa0))->sadb_sa_natt_offload_interval; } - + /* * Verify if SADB_X_EXT_NATT_MULTIPLEUSERS flag is set that * SADB_X_EXT_NATT is set and SADB_X_EXT_NATT_KEEPALIVE is not * set (we're not behind nat) - otherwise clear it. */ - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) + if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { if ((sav->flags & SADB_X_EXT_NATT) == 0 || - (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) + (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) { sav->flags &= ~SADB_X_EXT_NATT_MULTIPLEUSERS; - + } + } + /* replay window */ if ((sa0->sadb_sa_flags & SADB_X_EXT_OLD) == 0) { sav->replay = keydb_newsecreplay(sa0->sadb_sa_replay); @@ -4223,15 +4364,15 @@ key_setsaval( } } } - + /* Authentication keys */ if (mhp->ext[SADB_EXT_KEY_AUTH] != NULL) { const struct sadb_key *key0; int len; - + key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_AUTH]; len = mhp->extlen[SADB_EXT_KEY_AUTH]; - + error = 0; if (len < sizeof(*key0)) { ipseclog((LOG_DEBUG, "key_setsaval: invalid auth key ext len. len = %d\n", len)); @@ -4239,22 +4380,23 @@ key_setsaval( goto fail; } switch (mhp->msg->sadb_msg_satype) { - case SADB_SATYPE_AH: - case SADB_SATYPE_ESP: - if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && - sav->alg_auth != SADB_X_AALG_NULL) - error = EINVAL; - break; - case SADB_X_SATYPE_IPCOMP: - default: + case SADB_SATYPE_AH: + case SADB_SATYPE_ESP: + if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && + sav->alg_auth != SADB_X_AALG_NULL) { error = EINVAL; - break; + } + break; + case SADB_X_SATYPE_IPCOMP: + default: + error = EINVAL; + break; } if (error) { ipseclog((LOG_DEBUG, "key_setsaval: invalid key_auth values.\n")); goto fail; } - + sav->key_auth = (struct sadb_key *)key_newbuf(key0, len); if (sav->key_auth == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); @@ -4262,15 +4404,15 @@ key_setsaval( goto fail; } } - + /* Encryption key */ if (mhp->ext[SADB_EXT_KEY_ENCRYPT] != NULL) { const struct sadb_key *key0; int len; - + key0 = (const struct sadb_key *)mhp->ext[SADB_EXT_KEY_ENCRYPT]; len = mhp->extlen[SADB_EXT_KEY_ENCRYPT]; - + error = 0; if (len < sizeof(*key0)) { ipseclog((LOG_DEBUG, "key_setsaval: invalid encryption key ext len. len = %d\n", len)); @@ -4278,111 +4420,114 @@ key_setsaval( goto fail; } switch (mhp->msg->sadb_msg_satype) { - case SADB_SATYPE_ESP: - if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && - sav->alg_enc != SADB_EALG_NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: invalid ESP algorithm.\n")); - error = EINVAL; - break; - } - sav->key_enc = (struct sadb_key *)key_newbuf(key0, len); - if (sav->key_enc == NULL) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - break; - case SADB_X_SATYPE_IPCOMP: - if (len != PFKEY_ALIGN8(sizeof(struct sadb_key))) - error = EINVAL; - sav->key_enc = NULL; /*just in case*/ - break; - case SADB_SATYPE_AH: - default: + case SADB_SATYPE_ESP: + if (len == PFKEY_ALIGN8(sizeof(struct sadb_key)) && + sav->alg_enc != SADB_EALG_NULL) { + ipseclog((LOG_DEBUG, "key_setsaval: invalid ESP algorithm.\n")); error = EINVAL; break; + } + sav->key_enc = (struct sadb_key *)key_newbuf(key0, len); + if (sav->key_enc == NULL) { + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); + error = ENOBUFS; + goto fail; + } + break; + case SADB_X_SATYPE_IPCOMP: + if (len != PFKEY_ALIGN8(sizeof(struct sadb_key))) { + error = EINVAL; + } + sav->key_enc = NULL; /*just in case*/ + break; + case SADB_SATYPE_AH: + default: + error = EINVAL; + break; } if (error) { ipseclog((LOG_DEBUG, "key_setsaval: invalid key_enc value.\n")); goto fail; } } - + /* set iv */ sav->ivlen = 0; - + switch (mhp->msg->sadb_msg_satype) { - case SADB_SATYPE_ESP: + case SADB_SATYPE_ESP: #if IPSEC_ESP - algo = esp_algorithm_lookup(sav->alg_enc); - if (algo && algo->ivlen) - sav->ivlen = (*algo->ivlen)(algo, sav); - if (sav->ivlen == 0) - break; - KMALLOC_NOWAIT(sav->iv, caddr_t, sav->ivlen); + algo = esp_algorithm_lookup(sav->alg_enc); + if (algo && algo->ivlen) { + sav->ivlen = (*algo->ivlen)(algo, sav); + } + if (sav->ivlen == 0) { + break; + } + KMALLOC_NOWAIT(sav->iv, caddr_t, sav->ivlen); + if (sav->iv == 0) { + lck_mtx_unlock(sadb_mutex); + KMALLOC_WAIT(sav->iv, caddr_t, sav->ivlen); + lck_mtx_lock(sadb_mutex); if (sav->iv == 0) { - lck_mtx_unlock(sadb_mutex); - KMALLOC_WAIT(sav->iv, caddr_t, sav->ivlen); - lck_mtx_lock(sadb_mutex); - if (sav->iv == 0) { - ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); - error = ENOBUFS; - goto fail; - } - } - - /* initialize */ - if (sav->alg_enc == SADB_X_EALG_AES_GCM) { - bzero(sav->iv, sav->ivlen); - } else { - key_randomfill(sav->iv, sav->ivlen); + ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); + error = ENOBUFS; + goto fail; } + } + + /* initialize */ + if (sav->alg_enc == SADB_X_EALG_AES_GCM) { + bzero(sav->iv, sav->ivlen); + } else { + key_randomfill(sav->iv, sav->ivlen); + } #endif - break; - case SADB_SATYPE_AH: - case SADB_X_SATYPE_IPCOMP: - break; - default: - ipseclog((LOG_DEBUG, "key_setsaval: invalid SA type.\n")); - error = EINVAL; - goto fail; + break; + case SADB_SATYPE_AH: + case SADB_X_SATYPE_IPCOMP: + break; + default: + ipseclog((LOG_DEBUG, "key_setsaval: invalid SA type.\n")); + error = EINVAL; + goto fail; } - + /* reset created */ microtime(&tv); sav->created = tv.tv_sec; - + /* make lifetime for CURRENT */ KMALLOC_NOWAIT(sav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); + sizeof(struct sadb_lifetime)); if (sav->lft_c == NULL) { lck_mtx_unlock(sadb_mutex); KMALLOC_WAIT(sav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); - lck_mtx_lock(sadb_mutex); + sizeof(struct sadb_lifetime)); + lck_mtx_lock(sadb_mutex); if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } } - + microtime(&tv); - + sav->lft_c->sadb_lifetime_len = - PFKEY_UNIT64(sizeof(struct sadb_lifetime)); + PFKEY_UNIT64(sizeof(struct sadb_lifetime)); sav->lft_c->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; sav->lft_c->sadb_lifetime_allocations = 0; sav->lft_c->sadb_lifetime_bytes = 0; sav->lft_c->sadb_lifetime_addtime = tv.tv_sec; sav->lft_c->sadb_lifetime_usetime = 0; - + /* lifetimes for HARD and SOFT */ - { + { const struct sadb_lifetime *lft0; - + lft0 = (struct sadb_lifetime *) - (void *)mhp->ext[SADB_EXT_LIFETIME_HARD]; + (void *)mhp->ext[SADB_EXT_LIFETIME_HARD]; if (lft0 != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_HARD] < sizeof(*lft0)) { ipseclog((LOG_DEBUG, "key_setsaval: invalid hard lifetime ext len.\n")); @@ -4390,7 +4535,7 @@ key_setsaval( goto fail; } sav->lft_h = (struct sadb_lifetime *)key_newbuf(lft0, - sizeof(*lft0)); + sizeof(*lft0)); if (sav->lft_h == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; @@ -4398,9 +4543,9 @@ key_setsaval( } /* to be initialize ? */ } - + lft0 = (struct sadb_lifetime *) - (void *)mhp->ext[SADB_EXT_LIFETIME_SOFT]; + (void *)mhp->ext[SADB_EXT_LIFETIME_SOFT]; if (lft0 != NULL) { if (mhp->extlen[SADB_EXT_LIFETIME_SOFT] < sizeof(*lft0)) { ipseclog((LOG_DEBUG, "key_setsaval: invalid soft lifetime ext len.\n")); @@ -4408,7 +4553,7 @@ key_setsaval( goto fail; } sav->lft_s = (struct sadb_lifetime *)key_newbuf(lft0, - sizeof(*lft0)); + sizeof(*lft0)); if (sav->lft_s == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; @@ -4416,10 +4561,10 @@ key_setsaval( } /* to be initialize ? */ } - } - + } + return 0; - + fail: /* initialization */ if (sav->replay != NULL) { @@ -4457,7 +4602,7 @@ fail: KFREE(sav->lft_s); sav->lft_s = NULL; } - + return error; } @@ -4471,30 +4616,30 @@ fail: */ int key_setsaval2(struct secasvar *sav, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft) + u_int8_t satype, + u_int8_t alg_auth, + u_int8_t alg_enc, + u_int32_t flags, + u_int8_t replay, + struct sadb_key *key_auth, + u_int16_t key_auth_len, + struct sadb_key *key_enc, + u_int16_t key_enc_len, + u_int16_t natt_port, + u_int32_t seq, + u_int32_t spi, + u_int32_t pid, + struct sadb_lifetime *lifetime_hard, + struct sadb_lifetime *lifetime_soft) { #if IPSEC_ESP const struct esp_algorithm *algo; #endif int error = 0; struct timeval tv; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* initialization */ sav->replay = NULL; sav->key_auth = NULL; @@ -4508,14 +4653,14 @@ key_setsaval2(struct secasvar *sav, sav->remote_ike_port = 0; sav->natt_last_activity = natt_now; sav->natt_encapsulated_src_port = 0; - + sav->alg_auth = alg_auth; sav->alg_enc = alg_enc; sav->flags = flags; sav->pid = pid; sav->seq = seq; key_setspi(sav, htonl(spi)); - + /* * Verify that a nat-traversal port was specified if * the nat-traversal flag is set. @@ -4528,17 +4673,19 @@ key_setsaval2(struct secasvar *sav, } sav->remote_ike_port = natt_port; } - + /* * Verify if SADB_X_EXT_NATT_MULTIPLEUSERS flag is set that * SADB_X_EXT_NATT is set and SADB_X_EXT_NATT_KEEPALIVE is not * set (we're not behind nat) - otherwise clear it. */ - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) + if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { if ((sav->flags & SADB_X_EXT_NATT) == 0 || - (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) + (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) { sav->flags &= ~SADB_X_EXT_NATT_MULTIPLEUSERS; - + } + } + /* replay window */ if ((flags & SADB_X_EXT_OLD) == 0) { sav->replay = keydb_newsecreplay(replay); @@ -4548,7 +4695,7 @@ key_setsaval2(struct secasvar *sav, goto fail; } } - + /* Authentication keys */ sav->key_auth = (__typeof__(sav->key_auth))key_newbuf(key_auth, key_auth_len); if (sav->key_auth == NULL) { @@ -4556,7 +4703,7 @@ key_setsaval2(struct secasvar *sav, error = ENOBUFS; goto fail; } - + /* Encryption key */ sav->key_enc = (__typeof__(sav->key_enc))key_newbuf(key_enc, key_enc_len); if (sav->key_enc == NULL) { @@ -4564,15 +4711,16 @@ key_setsaval2(struct secasvar *sav, error = ENOBUFS; goto fail; } - + /* set iv */ sav->ivlen = 0; - + if (satype == SADB_SATYPE_ESP) { #if IPSEC_ESP algo = esp_algorithm_lookup(sav->alg_enc); - if (algo && algo->ivlen) + if (algo && algo->ivlen) { sav->ivlen = (*algo->ivlen)(algo, sav); + } if (sav->ivlen != 0) { KMALLOC_NOWAIT(sav->iv, caddr_t, sav->ivlen); if (sav->iv == 0) { @@ -4594,54 +4742,54 @@ key_setsaval2(struct secasvar *sav, } #endif } - + /* reset created */ microtime(&tv); sav->created = tv.tv_sec; - + /* make lifetime for CURRENT */ KMALLOC_NOWAIT(sav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); + sizeof(struct sadb_lifetime)); if (sav->lft_c == NULL) { lck_mtx_unlock(sadb_mutex); KMALLOC_WAIT(sav->lft_c, struct sadb_lifetime *, - sizeof(struct sadb_lifetime)); - lck_mtx_lock(sadb_mutex); + sizeof(struct sadb_lifetime)); + lck_mtx_lock(sadb_mutex); if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } } - + microtime(&tv); - + sav->lft_c->sadb_lifetime_len = - PFKEY_UNIT64(sizeof(struct sadb_lifetime)); + PFKEY_UNIT64(sizeof(struct sadb_lifetime)); sav->lft_c->sadb_lifetime_exttype = SADB_EXT_LIFETIME_CURRENT; sav->lft_c->sadb_lifetime_allocations = 0; sav->lft_c->sadb_lifetime_bytes = 0; sav->lft_c->sadb_lifetime_addtime = tv.tv_sec; sav->lft_c->sadb_lifetime_usetime = 0; - + /* lifetimes for HARD and SOFT */ sav->lft_h = (__typeof__(sav->lft_h))key_newbuf(lifetime_hard, - sizeof(*lifetime_hard)); + sizeof(*lifetime_hard)); if (sav->lft_h == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } sav->lft_s = (__typeof__(sav->lft_s))key_newbuf(lifetime_soft, - sizeof(*lifetime_soft)); + sizeof(*lifetime_soft)); if (sav->lft_s == NULL) { ipseclog((LOG_DEBUG, "key_setsaval: No more memory.\n")); error = ENOBUFS; goto fail; } - + return 0; - + fail: /* initialization */ if (sav->replay != NULL) { @@ -4679,7 +4827,7 @@ fail: KFREE(sav->lft_s); sav->lft_s = NULL; } - + return error; } @@ -4690,154 +4838,159 @@ fail: */ static int key_mature( - struct secasvar *sav) + struct secasvar *sav) { int mature; - int checkmask = 0; /* 2^0: ealg 2^1: aalg 2^2: calg */ - int mustmask = 0; /* 2^0: ealg 2^1: aalg 2^2: calg */ - + int checkmask = 0; /* 2^0: ealg 2^1: aalg 2^2: calg */ + int mustmask = 0; /* 2^0: ealg 2^1: aalg 2^2: calg */ + mature = 0; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* check SPI value */ switch (sav->sah->saidx.proto) { - case IPPROTO_ESP: - case IPPROTO_AH: - - /* No reason to test if this is >= 0, because ntohl(sav->spi) is unsigned. */ - if (ntohl(sav->spi) <= 255) { - ipseclog((LOG_DEBUG, - "key_mature: illegal range of SPI %u.\n", - (u_int32_t)ntohl(sav->spi))); - return EINVAL; - } - break; + case IPPROTO_ESP: + case IPPROTO_AH: + + /* No reason to test if this is >= 0, because ntohl(sav->spi) is unsigned. */ + if (ntohl(sav->spi) <= 255) { + ipseclog((LOG_DEBUG, + "key_mature: illegal range of SPI %u.\n", + (u_int32_t)ntohl(sav->spi))); + return EINVAL; + } + break; } - + /* check satype */ switch (sav->sah->saidx.proto) { - case IPPROTO_ESP: - /* check flags */ - if ((sav->flags & SADB_X_EXT_OLD) - && (sav->flags & SADB_X_EXT_DERIV)) { - ipseclog((LOG_DEBUG, "key_mature: " - "invalid flag (derived) given to old-esp.\n")); - return EINVAL; - } - if (sav->alg_auth == SADB_AALG_NONE) - checkmask = 1; - else - checkmask = 3; - mustmask = 1; - break; - case IPPROTO_AH: - /* check flags */ - if (sav->flags & SADB_X_EXT_DERIV) { - ipseclog((LOG_DEBUG, "key_mature: " - "invalid flag (derived) given to AH SA.\n")); - return EINVAL; - } - if (sav->alg_enc != SADB_EALG_NONE) { - ipseclog((LOG_DEBUG, "key_mature: " - "protocol and algorithm mismated.\n")); - return(EINVAL); - } - checkmask = 2; - mustmask = 2; - break; - case IPPROTO_IPCOMP: - if (sav->alg_auth != SADB_AALG_NONE) { - ipseclog((LOG_DEBUG, "key_mature: " - "protocol and algorithm mismated.\n")); - return(EINVAL); - } - if ((sav->flags & SADB_X_EXT_RAWCPI) == 0 - && ntohl(sav->spi) >= 0x10000) { - ipseclog((LOG_DEBUG, "key_mature: invalid cpi for IPComp.\n")); - return(EINVAL); - } - checkmask = 4; - mustmask = 4; - break; - default: - ipseclog((LOG_DEBUG, "key_mature: Invalid satype.\n")); - return EPROTONOSUPPORT; + case IPPROTO_ESP: + /* check flags */ + if ((sav->flags & SADB_X_EXT_OLD) + && (sav->flags & SADB_X_EXT_DERIV)) { + ipseclog((LOG_DEBUG, "key_mature: " + "invalid flag (derived) given to old-esp.\n")); + return EINVAL; + } + if (sav->alg_auth == SADB_AALG_NONE) { + checkmask = 1; + } else { + checkmask = 3; + } + mustmask = 1; + break; + case IPPROTO_AH: + /* check flags */ + if (sav->flags & SADB_X_EXT_DERIV) { + ipseclog((LOG_DEBUG, "key_mature: " + "invalid flag (derived) given to AH SA.\n")); + return EINVAL; + } + if (sav->alg_enc != SADB_EALG_NONE) { + ipseclog((LOG_DEBUG, "key_mature: " + "protocol and algorithm mismated.\n")); + return EINVAL; + } + checkmask = 2; + mustmask = 2; + break; + case IPPROTO_IPCOMP: + if (sav->alg_auth != SADB_AALG_NONE) { + ipseclog((LOG_DEBUG, "key_mature: " + "protocol and algorithm mismated.\n")); + return EINVAL; + } + if ((sav->flags & SADB_X_EXT_RAWCPI) == 0 + && ntohl(sav->spi) >= 0x10000) { + ipseclog((LOG_DEBUG, "key_mature: invalid cpi for IPComp.\n")); + return EINVAL; + } + checkmask = 4; + mustmask = 4; + break; + default: + ipseclog((LOG_DEBUG, "key_mature: Invalid satype.\n")); + return EPROTONOSUPPORT; } - + /* check authentication algorithm */ if ((checkmask & 2) != 0) { const struct ah_algorithm *algo; int keylen; - + algo = ah_algorithm_lookup(sav->alg_auth); if (!algo) { - ipseclog((LOG_DEBUG,"key_mature: " - "unknown authentication algorithm.\n")); + ipseclog((LOG_DEBUG, "key_mature: " + "unknown authentication algorithm.\n")); return EINVAL; } - + /* algorithm-dependent check */ - if (sav->key_auth) + if (sav->key_auth) { keylen = sav->key_auth->sadb_key_bits; - else + } else { keylen = 0; + } if (keylen < algo->keymin || algo->keymax < keylen) { ipseclog((LOG_DEBUG, - "key_mature: invalid AH key length %d " - "(%d-%d allowed)\n", - keylen, algo->keymin, algo->keymax)); + "key_mature: invalid AH key length %d " + "(%d-%d allowed)\n", + keylen, algo->keymin, algo->keymax)); return EINVAL; } - + if (algo->mature) { if ((*algo->mature)(sav)) { /* message generated in per-algorithm function*/ return EINVAL; - } else + } else { mature = SADB_SATYPE_AH; + } } - - if ((mustmask & 2) != 0 && mature != SADB_SATYPE_AH) { + + if ((mustmask & 2) != 0 && mature != SADB_SATYPE_AH) { ipseclog((LOG_DEBUG, "key_mature: no satisfy algorithm for AH\n")); return EINVAL; } } - + /* check encryption algorithm */ if ((checkmask & 1) != 0) { #if IPSEC_ESP const struct esp_algorithm *algo; int keylen; - + algo = esp_algorithm_lookup(sav->alg_enc); if (!algo) { ipseclog((LOG_DEBUG, "key_mature: unknown encryption algorithm.\n")); return EINVAL; } - + /* algorithm-dependent check */ - if (sav->key_enc) + if (sav->key_enc) { keylen = sav->key_enc->sadb_key_bits; - else + } else { keylen = 0; + } if (keylen < algo->keymin || algo->keymax < keylen) { ipseclog((LOG_DEBUG, - "key_mature: invalid ESP key length %d " - "(%d-%d allowed)\n", - keylen, algo->keymin, algo->keymax)); + "key_mature: invalid ESP key length %d " + "(%d-%d allowed)\n", + keylen, algo->keymin, algo->keymax)); return EINVAL; } - + if (algo->mature) { if ((*algo->mature)(sav)) { /* message generated in per-algorithm function*/ return EINVAL; - } else + } else { mature = SADB_SATYPE_ESP; + } } - - if ((mustmask & 1) != 0 && mature != SADB_SATYPE_ESP) { + + if ((mustmask & 1) != 0 && mature != SADB_SATYPE_ESP) { ipseclog((LOG_DEBUG, "key_mature: no satisfy algorithm for ESP\n")); return EINVAL; } @@ -4846,11 +4999,11 @@ key_mature( return EINVAL; #endif } - + /* check compression algorithm */ if ((checkmask & 4) != 0) { const struct ipcomp_algorithm *algo; - + /* algorithm-dependent check */ algo = ipcomp_algorithm_lookup(sav->alg_enc); if (!algo) { @@ -4858,9 +5011,9 @@ key_mature( return EINVAL; } } - + key_sa_chgstate(sav, SADB_SASTATE_MATURE); - + return 0; } @@ -4869,11 +5022,11 @@ key_mature( */ static struct mbuf * key_setdumpsa( - struct secasvar *sav, - u_int8_t type, - u_int8_t satype, - u_int32_t seq, - u_int32_t pid) + struct secasvar *sav, + u_int8_t type, + u_int8_t satype, + u_int32_t seq, + u_int32_t pid) { struct mbuf *result = NULL, *tres = NULL, *m; int l = 0; @@ -4887,136 +5040,153 @@ key_setdumpsa( SADB_EXT_KEY_ENCRYPT, SADB_EXT_IDENTITY_SRC, SADB_EXT_IDENTITY_DST, SADB_EXT_SENSITIVITY, }; - + m = key_setsadbmsg(type, 0, satype, seq, pid, sav->refcnt); - if (m == NULL) + if (m == NULL) { goto fail; + } result = m; - - for (i = sizeof(dumporder)/sizeof(dumporder[0]) - 1; i >= 0; i--) { + + for (i = sizeof(dumporder) / sizeof(dumporder[0]) - 1; i >= 0; i--) { m = NULL; p = NULL; switch (dumporder[i]) { - case SADB_EXT_SA: - m = key_setsadbsa(sav); - if (!m) - goto fail; - break; - - case SADB_X_EXT_SA2: - m = key_setsadbxsa2(sav->sah->saidx.mode, - sav->replay ? sav->replay->count : 0, - sav->sah->saidx.reqid, - sav->flags2); - if (!m) - goto fail; - break; - - case SADB_EXT_ADDRESS_SRC: - m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&sav->sah->saidx.src, - FULLMASK, IPSEC_ULPROTO_ANY); - if (!m) - goto fail; - break; - - case SADB_EXT_ADDRESS_DST: - m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&sav->sah->saidx.dst, - FULLMASK, IPSEC_ULPROTO_ANY); - if (!m) - goto fail; - break; - - case SADB_EXT_KEY_AUTH: - if (!sav->key_auth) - continue; - l = PFKEY_UNUNIT64(sav->key_auth->sadb_key_len); - p = sav->key_auth; - break; - - case SADB_EXT_KEY_ENCRYPT: - if (!sav->key_enc) - continue; - l = PFKEY_UNUNIT64(sav->key_enc->sadb_key_len); - p = sav->key_enc; - break; - - case SADB_EXT_LIFETIME_CURRENT: - if (!sav->lft_c) - continue; - l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_c)->sadb_ext_len); - p = sav->lft_c; - break; - - case SADB_EXT_LIFETIME_HARD: - if (!sav->lft_h) - continue; - l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_h)->sadb_ext_len); - p = sav->lft_h; - break; - - case SADB_EXT_LIFETIME_SOFT: - if (!sav->lft_s) - continue; - l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_s)->sadb_ext_len); - p = sav->lft_s; - break; - - case SADB_EXT_ADDRESS_PROXY: - case SADB_EXT_IDENTITY_SRC: - case SADB_EXT_IDENTITY_DST: - /* XXX: should we brought from SPD ? */ - case SADB_EXT_SENSITIVITY: - default: + case SADB_EXT_SA: + m = key_setsadbsa(sav); + if (!m) { + goto fail; + } + break; + + case SADB_X_EXT_SA2: + m = key_setsadbxsa2(sav->sah->saidx.mode, + sav->replay ? sav->replay->count : 0, + sav->sah->saidx.reqid, + sav->flags2); + if (!m) { + goto fail; + } + break; + + case SADB_EXT_ADDRESS_SRC: + m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sav->sah->saidx.src, + FULLMASK, IPSEC_ULPROTO_ANY); + if (!m) { + goto fail; + } + break; + + case SADB_EXT_ADDRESS_DST: + m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sav->sah->saidx.dst, + FULLMASK, IPSEC_ULPROTO_ANY); + if (!m) { + goto fail; + } + break; + + case SADB_EXT_KEY_AUTH: + if (!sav->key_auth) { continue; + } + l = PFKEY_UNUNIT64(sav->key_auth->sadb_key_len); + p = sav->key_auth; + break; + + case SADB_EXT_KEY_ENCRYPT: + if (!sav->key_enc) { + continue; + } + l = PFKEY_UNUNIT64(sav->key_enc->sadb_key_len); + p = sav->key_enc; + break; + + case SADB_EXT_LIFETIME_CURRENT: + if (!sav->lft_c) { + continue; + } + l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_c)->sadb_ext_len); + p = sav->lft_c; + break; + + case SADB_EXT_LIFETIME_HARD: + if (!sav->lft_h) { + continue; + } + l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_h)->sadb_ext_len); + p = sav->lft_h; + break; + + case SADB_EXT_LIFETIME_SOFT: + if (!sav->lft_s) { + continue; + } + l = PFKEY_UNUNIT64(((struct sadb_ext *)sav->lft_s)->sadb_ext_len); + p = sav->lft_s; + break; + + case SADB_EXT_ADDRESS_PROXY: + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + /* XXX: should we brought from SPD ? */ + case SADB_EXT_SENSITIVITY: + default: + continue; } - - if ((!m && !p) || (m && p)) + + if ((!m && !p) || (m && p)) { goto fail; + } if (p && tres) { M_PREPEND(tres, l, M_WAITOK, 1); - if (!tres) + if (!tres) { goto fail; + } bcopy(p, mtod(tres, caddr_t), l); continue; } if (p) { m = key_alloc_mbuf(l); - if (!m) + if (!m) { goto fail; + } m_copyback(m, 0, l, p); } - - if (tres) + + if (tres) { m_cat(m, tres); + } tres = m; } - + m_cat(result, tres); - + if (sav->sah && (sav->sah->outgoing_if || sav->sah->ipsec_if)) { - m = key_setsadbipsecif(NULL, ifindex2ifnet[sav->sah->outgoing_if], sav->sah->ipsec_if, 0); - if (!m) - goto fail; - m_cat(result, m); - } - + m = key_setsadbipsecif(NULL, ifindex2ifnet[sav->sah->outgoing_if], sav->sah->ipsec_if, 0); + if (!m) { + goto fail; + } + m_cat(result, m); + } + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); - if (result == NULL) + if (result == NULL) { goto fail; + } } - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return result; - + fail: m_freem(result); m_freem(tres); @@ -5028,20 +5198,21 @@ fail: */ static struct mbuf * key_setsadbmsg( - u_int8_t type, - u_int16_t tlen, - u_int8_t satype, - u_int32_t seq, - pid_t pid, - u_int16_t reserved) + u_int8_t type, + u_int16_t tlen, + u_int8_t satype, + u_int32_t seq, + pid_t pid, + u_int16_t reserved) { struct mbuf *m; struct sadb_msg *p; int len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_msg)); - if (len > MCLBYTES) + if (len > MCLBYTES) { return NULL; + } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m && len > MHLEN) { MCLGET(m, M_DONTWAIT); @@ -5050,13 +5221,14 @@ key_setsadbmsg( m = NULL; } } - if (!m) + if (!m) { return NULL; + } m->m_pkthdr.len = m->m_len = len; m->m_next = NULL; - + p = mtod(m, struct sadb_msg *); - + bzero(p, len); p->sadb_msg_version = PF_KEY_V2; p->sadb_msg_type = type; @@ -5066,7 +5238,7 @@ key_setsadbmsg( p->sadb_msg_reserved = reserved; p->sadb_msg_seq = seq; p->sadb_msg_pid = (u_int32_t)pid; - + return m; } @@ -5075,22 +5247,23 @@ key_setsadbmsg( */ static struct mbuf * key_setsadbsa( - struct secasvar *sav) + struct secasvar *sav) { struct mbuf *m; struct sadb_sa *p; int len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_sa)); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, struct sadb_sa *); - + bzero(p, len); p->sadb_sa_len = PFKEY_UNIT64(len); p->sadb_sa_exttype = SADB_EXT_SA; @@ -5100,7 +5273,7 @@ key_setsadbsa( p->sadb_sa_auth = sav->alg_auth; p->sadb_sa_encrypt = sav->alg_enc; p->sadb_sa_flags = sav->flags; - + return m; } @@ -5109,85 +5282,90 @@ key_setsadbsa( */ static struct mbuf * key_setsadbaddr( - u_int16_t exttype, - struct sockaddr *saddr, - u_int8_t prefixlen, - u_int16_t ul_proto) + u_int16_t exttype, + struct sockaddr *saddr, + u_int8_t prefixlen, + u_int16_t ul_proto) { struct mbuf *m; struct sadb_address *p; size_t len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_address)) + - PFKEY_ALIGN8(saddr->sa_len); + PFKEY_ALIGN8(saddr->sa_len); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, struct sadb_address *); - + bzero(p, len); p->sadb_address_len = PFKEY_UNIT64(len); p->sadb_address_exttype = exttype; p->sadb_address_proto = ul_proto; if (prefixlen == FULLMASK) { switch (saddr->sa_family) { - case AF_INET: - prefixlen = sizeof(struct in_addr) << 3; - break; - case AF_INET6: - prefixlen = sizeof(struct in6_addr) << 3; - break; - default: - ; /*XXX*/ + case AF_INET: + prefixlen = sizeof(struct in_addr) << 3; + break; + case AF_INET6: + prefixlen = sizeof(struct in6_addr) << 3; + break; + default: + ; /*XXX*/ } } p->sadb_address_prefixlen = prefixlen; p->sadb_address_reserved = 0; - + bcopy(saddr, - mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_address)), - saddr->sa_len); - + mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_address)), + saddr->sa_len); + return m; } static struct mbuf * key_setsadbipsecif(ifnet_t internal_if, - ifnet_t outgoing_if, - ifnet_t ipsec_if, - int init_disabled) + ifnet_t outgoing_if, + ifnet_t ipsec_if, + int init_disabled) { - struct mbuf *m; - struct sadb_x_ipsecif *p; + struct mbuf *m; + struct sadb_x_ipsecif *p; size_t len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_x_ipsecif)); - m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + m = key_alloc_mbuf(len); + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - - p = mtod(m, struct sadb_x_ipsecif *); - + + p = mtod(m, struct sadb_x_ipsecif *); + bzero(p, len); - p->sadb_x_ipsecif_len = PFKEY_UNIT64(len); + p->sadb_x_ipsecif_len = PFKEY_UNIT64(len); p->sadb_x_ipsecif_exttype = SADB_X_EXT_IPSECIF; - - if (internal_if && internal_if->if_xname) - strlcpy(p->sadb_x_ipsecif_internal_if, internal_if->if_xname, IFXNAMSIZ); - if (outgoing_if && outgoing_if->if_xname) - strlcpy(p->sadb_x_ipsecif_outgoing_if, outgoing_if->if_xname, IFXNAMSIZ); - if (ipsec_if && ipsec_if->if_xname) - strlcpy(p->sadb_x_ipsecif_ipsec_if, ipsec_if->if_xname, IFXNAMSIZ); - + + if (internal_if && internal_if->if_xname) { + strlcpy(p->sadb_x_ipsecif_internal_if, internal_if->if_xname, IFXNAMSIZ); + } + if (outgoing_if && outgoing_if->if_xname) { + strlcpy(p->sadb_x_ipsecif_outgoing_if, outgoing_if->if_xname, IFXNAMSIZ); + } + if (ipsec_if && ipsec_if->if_xname) { + strlcpy(p->sadb_x_ipsecif_ipsec_if, ipsec_if->if_xname, IFXNAMSIZ); + } + p->sadb_x_ipsecif_init_disabled = init_disabled; - + return m; } @@ -5195,28 +5373,29 @@ key_setsadbipsecif(ifnet_t internal_if, * set data into sadb_session_id */ static struct mbuf * -key_setsadbsession_id (u_int64_t session_ids[]) +key_setsadbsession_id(u_int64_t session_ids[]) { struct mbuf *m; struct sadb_session_id *p; size_t len; - + len = PFKEY_ALIGN8(sizeof(*p)); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, __typeof__(p)); - + bzero(p, len); p->sadb_session_id_len = PFKEY_UNIT64(len); p->sadb_session_id_exttype = SADB_EXT_SESSION_ID; p->sadb_session_id_v[0] = session_ids[0]; p->sadb_session_id_v[1] = session_ids[1]; - + return m; } @@ -5224,29 +5403,30 @@ key_setsadbsession_id (u_int64_t session_ids[]) * copy stats data into sadb_sastat type. */ static struct mbuf * -key_setsadbsastat (u_int32_t dir, - struct sastat *stats, - u_int32_t max_stats) +key_setsadbsastat(u_int32_t dir, + struct sastat *stats, + u_int32_t max_stats) { struct mbuf *m; struct sadb_sastat *p; int list_len, len; - + if (!stats) { return NULL; } - + list_len = sizeof(*stats) * max_stats; len = PFKEY_ALIGN8(sizeof(*p)) + PFKEY_ALIGN8(list_len); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, __typeof__(p)); - + bzero(p, len); p->sadb_sastat_len = PFKEY_UNIT64(len); p->sadb_sastat_exttype = SADB_EXT_SASTAT; @@ -5254,10 +5434,10 @@ key_setsadbsastat (u_int32_t dir, p->sadb_sastat_list_len = max_stats; if (list_len) { bcopy(stats, - mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(*p)), - list_len); + mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(*p)), + list_len); } - + return m; } @@ -5267,37 +5447,38 @@ key_setsadbsastat (u_int32_t dir, */ static struct mbuf * key_setsadbident( - u_int16_t exttype, - u_int16_t idtype, - caddr_t string, - int stringlen, - u_int64_t id) + u_int16_t exttype, + u_int16_t idtype, + caddr_t string, + int stringlen, + u_int64_t id) { struct mbuf *m; struct sadb_ident *p; size_t len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_ident)) + PFKEY_ALIGN8(stringlen); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, struct sadb_ident *); - + bzero(p, len); p->sadb_ident_len = PFKEY_UNIT64(len); p->sadb_ident_exttype = exttype; p->sadb_ident_type = idtype; p->sadb_ident_reserved = 0; p->sadb_ident_id = id; - + bcopy(string, - mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_ident)), - stringlen); - + mtod(m, caddr_t) + PFKEY_ALIGN8(sizeof(struct sadb_ident)), + stringlen); + return m; } #endif @@ -5307,25 +5488,26 @@ key_setsadbident( */ static struct mbuf * key_setsadbxsa2( - u_int8_t mode, - u_int32_t seq, - u_int32_t reqid, - u_int16_t flags) + u_int8_t mode, + u_int32_t seq, + u_int32_t reqid, + u_int16_t flags) { struct mbuf *m; struct sadb_x_sa2 *p; size_t len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_x_sa2)); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, struct sadb_x_sa2 *); - + bzero(p, len); p->sadb_x_sa2_len = PFKEY_UNIT64(len); p->sadb_x_sa2_exttype = SADB_X_EXT_SA2; @@ -5335,7 +5517,7 @@ key_setsadbxsa2( p->sadb_x_sa2_sequence = seq; p->sadb_x_sa2_reqid = reqid; p->sadb_x_sa2_flags = flags; - + return m; } @@ -5344,31 +5526,32 @@ key_setsadbxsa2( */ static struct mbuf * key_setsadbxpolicy( - u_int16_t type, - u_int8_t dir, - u_int32_t id) + u_int16_t type, + u_int8_t dir, + u_int32_t id) { struct mbuf *m; struct sadb_x_policy *p; size_t len; - + len = PFKEY_ALIGN8(sizeof(struct sadb_x_policy)); m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } return NULL; } - + p = mtod(m, struct sadb_x_policy *); - + bzero(p, len); p->sadb_x_policy_len = PFKEY_UNIT64(len); p->sadb_x_policy_exttype = SADB_X_EXT_POLICY; p->sadb_x_policy_type = type; p->sadb_x_policy_dir = dir; p->sadb_x_policy_id = id; - + return m; } @@ -5378,11 +5561,11 @@ key_setsadbxpolicy( */ static void * key_newbuf( - const void *src, - u_int len) + const void *src, + u_int len) { caddr_t new; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); KMALLOC_NOWAIT(new, caddr_t, len); if (new == NULL) { @@ -5395,7 +5578,7 @@ key_newbuf( } } bcopy(src, new, len); - + return new; } @@ -5405,44 +5588,44 @@ key_newbuf( */ int key_ismyaddr( - struct sockaddr *sa) + struct sockaddr *sa) { #if INET struct sockaddr_in *sin; struct in_ifaddr *ia; #endif - + /* sanity check */ - if (sa == NULL) + if (sa == NULL) { panic("key_ismyaddr: NULL pointer is passed.\n"); - + } + switch (sa->sa_family) { #if INET - case AF_INET: - lck_rw_lock_shared(in_ifaddr_rwlock); - sin = (struct sockaddr_in *)(void *)sa; - for (ia = in_ifaddrhead.tqh_first; ia; - ia = ia->ia_link.tqe_next) { - IFA_LOCK_SPIN(&ia->ia_ifa); - if (sin->sin_family == ia->ia_addr.sin_family && - sin->sin_len == ia->ia_addr.sin_len && - sin->sin_addr.s_addr == ia->ia_addr.sin_addr.s_addr) - { - IFA_UNLOCK(&ia->ia_ifa); - lck_rw_done(in_ifaddr_rwlock); - return 1; - } + case AF_INET: + lck_rw_lock_shared(in_ifaddr_rwlock); + sin = (struct sockaddr_in *)(void *)sa; + for (ia = in_ifaddrhead.tqh_first; ia; + ia = ia->ia_link.tqe_next) { + IFA_LOCK_SPIN(&ia->ia_ifa); + if (sin->sin_family == ia->ia_addr.sin_family && + sin->sin_len == ia->ia_addr.sin_len && + sin->sin_addr.s_addr == ia->ia_addr.sin_addr.s_addr) { IFA_UNLOCK(&ia->ia_ifa); + lck_rw_done(in_ifaddr_rwlock); + return 1; } - lck_rw_done(in_ifaddr_rwlock); - break; + IFA_UNLOCK(&ia->ia_ifa); + } + lck_rw_done(in_ifaddr_rwlock); + break; #endif #if INET6 - case AF_INET6: - return key_ismyaddr6((struct sockaddr_in6 *)(void *)sa); + case AF_INET6: + return key_ismyaddr6((struct sockaddr_in6 *)(void *)sa); #endif } - + return 0; } @@ -5457,22 +5640,22 @@ key_ismyaddr( static int key_ismyaddr6( - struct sockaddr_in6 *sin6) + struct sockaddr_in6 *sin6) { struct in6_ifaddr *ia; struct in6_multi *in6m; - + lck_rw_lock_shared(&in6_ifaddr_rwlock); for (ia = in6_ifaddrs; ia; ia = ia->ia_next) { IFA_LOCK(&ia->ia_ifa); if (key_sockaddrcmp((struct sockaddr *)&sin6, - (struct sockaddr *)&ia->ia_addr, 0) == 0) { + (struct sockaddr *)&ia->ia_addr, 0) == 0) { IFA_UNLOCK(&ia->ia_ifa); lck_rw_done(&in6_ifaddr_rwlock); return 1; } IFA_UNLOCK(&ia->ia_ifa); - + /* * XXX Multicast * XXX why do we care about multlicast here while we don't care @@ -5490,11 +5673,12 @@ key_ismyaddr6( } } lck_rw_done(&in6_ifaddr_rwlock); - + /* loopback, just for safety */ - if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr)) + if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr)) { return 1; - + } + return 0; } #endif /*INET6*/ @@ -5513,372 +5697,417 @@ key_ismyaddr6( */ static int key_cmpsaidx( - struct secasindex *saidx0, - struct secasindex *saidx1, - int flag) + struct secasindex *saidx0, + struct secasindex *saidx1, + int flag) { /* sanity */ - if (saidx0 == NULL && saidx1 == NULL) + if (saidx0 == NULL && saidx1 == NULL) { return 1; - - if (saidx0 == NULL || saidx1 == NULL) + } + + if (saidx0 == NULL || saidx1 == NULL) { return 0; - - if (saidx0->ipsec_ifindex != 0 && saidx0->ipsec_ifindex != saidx1->ipsec_ifindex) + } + + if (saidx0->ipsec_ifindex != 0 && saidx0->ipsec_ifindex != saidx1->ipsec_ifindex) { return 0; - - if (saidx0->proto != saidx1->proto) + } + + if (saidx0->proto != saidx1->proto) { return 0; - + } + if (flag == CMP_EXACTLY) { - if (saidx0->mode != saidx1->mode) + if (saidx0->mode != saidx1->mode) { return 0; - if (saidx0->reqid != saidx1->reqid) + } + if (saidx0->reqid != saidx1->reqid) { return 0; + } if (bcmp(&saidx0->src, &saidx1->src, saidx0->src.ss_len) != 0 || - bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.ss_len) != 0) + bcmp(&saidx0->dst, &saidx1->dst, saidx0->dst.ss_len) != 0) { return 0; + } } else { - /* CMP_MODE_REQID, CMP_REQID, CMP_HEAD */ if (flag & CMP_REQID) { /* * If reqid of SPD is non-zero, unique SA is required. * The result must be of same reqid in this case. */ - if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) + if (saidx1->reqid != 0 && saidx0->reqid != saidx1->reqid) { return 0; + } } - + if (flag & CMP_MODE) { if (saidx0->mode != IPSEC_MODE_ANY - && saidx0->mode != saidx1->mode) + && saidx0->mode != saidx1->mode) { return 0; + } } - + if (key_sockaddrcmp((struct sockaddr *)&saidx0->src, - (struct sockaddr *)&saidx1->src, flag & CMP_PORT ? 1 : 0) != 0) { + (struct sockaddr *)&saidx1->src, flag & CMP_PORT ? 1 : 0) != 0) { return 0; } if (key_sockaddrcmp((struct sockaddr *)&saidx0->dst, - (struct sockaddr *)&saidx1->dst, flag & CMP_PORT ? 1 : 0) != 0) { + (struct sockaddr *)&saidx1->dst, flag & CMP_PORT ? 1 : 0) != 0) { return 0; } } - + + return 1; +} + +/* + * compare two secindex structure exactly. + * IN: + * spidx0: source, it is often in SPD. + * spidx1: object, it is often from PFKEY message. + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_cmpspidx_exactly( + struct secpolicyindex *spidx0, + struct secpolicyindex *spidx1) +{ + /* sanity */ + if (spidx0 == NULL && spidx1 == NULL) { + return 1; + } + + if (spidx0 == NULL || spidx1 == NULL) { + return 0; + } + + if (spidx0->prefs != spidx1->prefs + || spidx0->prefd != spidx1->prefd + || spidx0->ul_proto != spidx1->ul_proto + || spidx0->internal_if != spidx1->internal_if) { + return 0; + } + + if (key_sockaddrcmp((struct sockaddr *)&spidx0->src, + (struct sockaddr *)&spidx1->src, 1) != 0) { + return 0; + } + if (key_sockaddrcmp((struct sockaddr *)&spidx0->dst, + (struct sockaddr *)&spidx1->dst, 1) != 0) { + return 0; + } + + if (key_sockaddrcmp((struct sockaddr *)&spidx0->src_range.start, + (struct sockaddr *)&spidx1->src_range.start, 1) != 0) { + return 0; + } + if (key_sockaddrcmp((struct sockaddr *)&spidx0->src_range.end, + (struct sockaddr *)&spidx1->src_range.end, 1) != 0) { + return 0; + } + if (key_sockaddrcmp((struct sockaddr *)&spidx0->dst_range.start, + (struct sockaddr *)&spidx1->dst_range.start, 1) != 0) { + return 0; + } + if (key_sockaddrcmp((struct sockaddr *)&spidx0->dst_range.end, + (struct sockaddr *)&spidx1->dst_range.end, 1) != 0) { + return 0; + } + + return 1; +} + +/* + * compare two secindex structure with mask. + * IN: + * spidx0: source, it is often in SPD. + * spidx1: object, it is often from IP header. + * OUT: + * 1 : equal + * 0 : not equal + */ +static int +key_cmpspidx_withmask( + struct secpolicyindex *spidx0, + struct secpolicyindex *spidx1) +{ + int spidx0_src_is_range = 0; + int spidx0_dst_is_range = 0; + + /* sanity */ + if (spidx0 == NULL && spidx1 == NULL) { + return 1; + } + + if (spidx0 == NULL || spidx1 == NULL) { + return 0; + } + + if (spidx0->src_range.start.ss_len > 0) { + spidx0_src_is_range = 1; + } + + if (spidx0->dst_range.start.ss_len > 0) { + spidx0_dst_is_range = 1; + } + + if ((spidx0_src_is_range ? spidx0->src_range.start.ss_family : spidx0->src.ss_family) != spidx1->src.ss_family || + (spidx0_dst_is_range ? spidx0->dst_range.start.ss_family : spidx0->dst.ss_family) != spidx1->dst.ss_family || + (spidx0_src_is_range ? spidx0->src_range.start.ss_len : spidx0->src.ss_len) != spidx1->src.ss_len || + (spidx0_dst_is_range ? spidx0->dst_range.start.ss_len : spidx0->dst.ss_len) != spidx1->dst.ss_len) { + return 0; + } + + /* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */ + if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY + && spidx0->ul_proto != spidx1->ul_proto) { + return 0; + } + + /* If spidx1 specifies interface, ignore src addr */ + if (spidx1->internal_if != NULL) { + if (spidx0->internal_if == NULL + || spidx0->internal_if != spidx1->internal_if) { + return 0; + } + + /* Still check ports */ + switch (spidx0->src.ss_family) { + case AF_INET: + if (spidx0_src_is_range && + (satosin(&spidx1->src)->sin_port < satosin(&spidx0->src_range.start)->sin_port + || satosin(&spidx1->src)->sin_port > satosin(&spidx0->src_range.end)->sin_port)) { + return 0; + } else if (satosin(&spidx0->src)->sin_port != IPSEC_PORT_ANY + && satosin(&spidx0->src)->sin_port != + satosin(&spidx1->src)->sin_port) { + return 0; + } + break; + case AF_INET6: + if (spidx0_src_is_range && + (satosin6(&spidx1->src)->sin6_port < satosin6(&spidx0->src_range.start)->sin6_port + || satosin6(&spidx1->src)->sin6_port > satosin6(&spidx0->src_range.end)->sin6_port)) { + return 0; + } else if (satosin6(&spidx0->src)->sin6_port != IPSEC_PORT_ANY + && satosin6(&spidx0->src)->sin6_port != + satosin6(&spidx1->src)->sin6_port) { + return 0; + } + break; + default: + break; + } + } else if (spidx0_src_is_range) { + if (!key_is_addr_in_range(&spidx1->src, &spidx0->src_range)) { + return 0; + } + } else { + switch (spidx0->src.ss_family) { + case AF_INET: + if (satosin(&spidx0->src)->sin_port != IPSEC_PORT_ANY + && satosin(&spidx0->src)->sin_port != + satosin(&spidx1->src)->sin_port) { + return 0; + } + if (!key_bbcmp((caddr_t)&satosin(&spidx0->src)->sin_addr, + (caddr_t)&satosin(&spidx1->src)->sin_addr, spidx0->prefs)) { + return 0; + } + break; + case AF_INET6: + if (satosin6(&spidx0->src)->sin6_port != IPSEC_PORT_ANY + && satosin6(&spidx0->src)->sin6_port != + satosin6(&spidx1->src)->sin6_port) { + return 0; + } + /* + * scope_id check. if sin6_scope_id is 0, we regard it + * as a wildcard scope, which matches any scope zone ID. + */ + if (satosin6(&spidx0->src)->sin6_scope_id && + satosin6(&spidx1->src)->sin6_scope_id && + satosin6(&spidx0->src)->sin6_scope_id != + satosin6(&spidx1->src)->sin6_scope_id) { + return 0; + } + if (!key_bbcmp((caddr_t)&satosin6(&spidx0->src)->sin6_addr, + (caddr_t)&satosin6(&spidx1->src)->sin6_addr, spidx0->prefs)) { + return 0; + } + break; + default: + /* XXX */ + if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.ss_len) != 0) { + return 0; + } + break; + } + } + + if (spidx0_dst_is_range) { + if (!key_is_addr_in_range(&spidx1->dst, &spidx0->dst_range)) { + return 0; + } + } else { + switch (spidx0->dst.ss_family) { + case AF_INET: + if (satosin(&spidx0->dst)->sin_port != IPSEC_PORT_ANY + && satosin(&spidx0->dst)->sin_port != + satosin(&spidx1->dst)->sin_port) { + return 0; + } + if (!key_bbcmp((caddr_t)&satosin(&spidx0->dst)->sin_addr, + (caddr_t)&satosin(&spidx1->dst)->sin_addr, spidx0->prefd)) { + return 0; + } + break; + case AF_INET6: + if (satosin6(&spidx0->dst)->sin6_port != IPSEC_PORT_ANY + && satosin6(&spidx0->dst)->sin6_port != + satosin6(&spidx1->dst)->sin6_port) { + return 0; + } + /* + * scope_id check. if sin6_scope_id is 0, we regard it + * as a wildcard scope, which matches any scope zone ID. + */ + if (satosin6(&spidx0->src)->sin6_scope_id && + satosin6(&spidx1->src)->sin6_scope_id && + satosin6(&spidx0->dst)->sin6_scope_id != + satosin6(&spidx1->dst)->sin6_scope_id) { + return 0; + } + if (!key_bbcmp((caddr_t)&satosin6(&spidx0->dst)->sin6_addr, + (caddr_t)&satosin6(&spidx1->dst)->sin6_addr, spidx0->prefd)) { + return 0; + } + break; + default: + /* XXX */ + if (bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.ss_len) != 0) { + return 0; + } + break; + } + } + + /* XXX Do we check other field ? e.g. flowinfo */ + return 1; } -/* - * compare two secindex structure exactly. - * IN: - * spidx0: source, it is often in SPD. - * spidx1: object, it is often from PFKEY message. - * OUT: - * 1 : equal - * 0 : not equal - */ static int -key_cmpspidx_exactly( - struct secpolicyindex *spidx0, - struct secpolicyindex *spidx1) +key_is_addr_in_range(struct sockaddr_storage *addr, struct secpolicyaddrrange *addr_range) { - /* sanity */ - if (spidx0 == NULL && spidx1 == NULL) - return 1; - - if (spidx0 == NULL || spidx1 == NULL) - return 0; - - if (spidx0->prefs != spidx1->prefs - || spidx0->prefd != spidx1->prefd - || spidx0->ul_proto != spidx1->ul_proto - || spidx0->internal_if != spidx1->internal_if) - return 0; - - if (key_sockaddrcmp((struct sockaddr *)&spidx0->src, - (struct sockaddr *)&spidx1->src, 1) != 0) { - return 0; - } - if (key_sockaddrcmp((struct sockaddr *)&spidx0->dst, - (struct sockaddr *)&spidx1->dst, 1) != 0) { - return 0; - } - - if (key_sockaddrcmp((struct sockaddr *)&spidx0->src_range.start, - (struct sockaddr *)&spidx1->src_range.start, 1) != 0) { - return 0; - } - if (key_sockaddrcmp((struct sockaddr *)&spidx0->src_range.end, - (struct sockaddr *)&spidx1->src_range.end, 1) != 0) { + int cmp = 0; + + if (addr == NULL || addr_range == NULL) { return 0; } - if (key_sockaddrcmp((struct sockaddr *)&spidx0->dst_range.start, - (struct sockaddr *)&spidx1->dst_range.start, 1) != 0) { + + /* Must be greater than or equal to start */ + cmp = key_sockaddrcmp((struct sockaddr *)addr, (struct sockaddr *)&addr_range->start, 1); + if (cmp != 0 && cmp != 1) { return 0; } - if (key_sockaddrcmp((struct sockaddr *)&spidx0->dst_range.end, - (struct sockaddr *)&spidx1->dst_range.end, 1) != 0) { + + /* Must be less than or equal to end */ + cmp = key_sockaddrcmp((struct sockaddr *)addr, (struct sockaddr *)&addr_range->end, 1); + if (cmp != 0 && cmp != -1) { return 0; } - - return 1; -} -/* - * compare two secindex structure with mask. - * IN: - * spidx0: source, it is often in SPD. - * spidx1: object, it is often from IP header. - * OUT: - * 1 : equal - * 0 : not equal - */ -static int -key_cmpspidx_withmask( - struct secpolicyindex *spidx0, - struct secpolicyindex *spidx1) -{ - int spidx0_src_is_range = 0; - int spidx0_dst_is_range = 0; - - /* sanity */ - if (spidx0 == NULL && spidx1 == NULL) - return 1; - - if (spidx0 == NULL || spidx1 == NULL) - return 0; - - if (spidx0->src_range.start.ss_len > 0) - spidx0_src_is_range = 1; - - if (spidx0->dst_range.start.ss_len > 0) - spidx0_dst_is_range = 1; - - if ((spidx0_src_is_range ? spidx0->src_range.start.ss_family : spidx0->src.ss_family) != spidx1->src.ss_family || - (spidx0_dst_is_range ? spidx0->dst_range.start.ss_family : spidx0->dst.ss_family) != spidx1->dst.ss_family || - (spidx0_src_is_range ? spidx0->src_range.start.ss_len : spidx0->src.ss_len) != spidx1->src.ss_len || - (spidx0_dst_is_range ? spidx0->dst_range.start.ss_len : spidx0->dst.ss_len) != spidx1->dst.ss_len) - return 0; - - /* if spidx.ul_proto == IPSEC_ULPROTO_ANY, ignore. */ - if (spidx0->ul_proto != (u_int16_t)IPSEC_ULPROTO_ANY - && spidx0->ul_proto != spidx1->ul_proto) - return 0; - - /* If spidx1 specifies interface, ignore src addr */ - if (spidx1->internal_if != NULL) { - if (spidx0->internal_if == NULL - || spidx0->internal_if != spidx1->internal_if) - return 0; - - /* Still check ports */ - switch (spidx0->src.ss_family) { - case AF_INET: - if (spidx0_src_is_range && - (satosin(&spidx1->src)->sin_port < satosin(&spidx0->src_range.start)->sin_port - || satosin(&spidx1->src)->sin_port > satosin(&spidx0->src_range.end)->sin_port)) - return 0; - else if (satosin(&spidx0->src)->sin_port != IPSEC_PORT_ANY - && satosin(&spidx0->src)->sin_port != - satosin(&spidx1->src)->sin_port) - return 0; - break; - case AF_INET6: - if (spidx0_src_is_range && - (satosin6(&spidx1->src)->sin6_port < satosin6(&spidx0->src_range.start)->sin6_port - || satosin6(&spidx1->src)->sin6_port > satosin6(&spidx0->src_range.end)->sin6_port)) - return 0; - else if (satosin6(&spidx0->src)->sin6_port != IPSEC_PORT_ANY - && satosin6(&spidx0->src)->sin6_port != - satosin6(&spidx1->src)->sin6_port) - return 0; - break; - default: - break; - } - } else if (spidx0_src_is_range) { - if (!key_is_addr_in_range(&spidx1->src, &spidx0->src_range)) - return 0; - } else { - switch (spidx0->src.ss_family) { - case AF_INET: - if (satosin(&spidx0->src)->sin_port != IPSEC_PORT_ANY - && satosin(&spidx0->src)->sin_port != - satosin(&spidx1->src)->sin_port) - return 0; - if (!key_bbcmp((caddr_t)&satosin(&spidx0->src)->sin_addr, - (caddr_t)&satosin(&spidx1->src)->sin_addr, spidx0->prefs)) - return 0; - break; - case AF_INET6: - if (satosin6(&spidx0->src)->sin6_port != IPSEC_PORT_ANY - && satosin6(&spidx0->src)->sin6_port != - satosin6(&spidx1->src)->sin6_port) - return 0; - /* - * scope_id check. if sin6_scope_id is 0, we regard it - * as a wildcard scope, which matches any scope zone ID. - */ - if (satosin6(&spidx0->src)->sin6_scope_id && - satosin6(&spidx1->src)->sin6_scope_id && - satosin6(&spidx0->src)->sin6_scope_id != - satosin6(&spidx1->src)->sin6_scope_id) - return 0; - if (!key_bbcmp((caddr_t)&satosin6(&spidx0->src)->sin6_addr, - (caddr_t)&satosin6(&spidx1->src)->sin6_addr, spidx0->prefs)) - return 0; - break; - default: - /* XXX */ - if (bcmp(&spidx0->src, &spidx1->src, spidx0->src.ss_len) != 0) - return 0; - break; - } - } - - if (spidx0_dst_is_range) { - if (!key_is_addr_in_range(&spidx1->dst, &spidx0->dst_range)) - return 0; - } else { - switch (spidx0->dst.ss_family) { - case AF_INET: - if (satosin(&spidx0->dst)->sin_port != IPSEC_PORT_ANY - && satosin(&spidx0->dst)->sin_port != - satosin(&spidx1->dst)->sin_port) - return 0; - if (!key_bbcmp((caddr_t)&satosin(&spidx0->dst)->sin_addr, - (caddr_t)&satosin(&spidx1->dst)->sin_addr, spidx0->prefd)) - return 0; - break; - case AF_INET6: - if (satosin6(&spidx0->dst)->sin6_port != IPSEC_PORT_ANY - && satosin6(&spidx0->dst)->sin6_port != - satosin6(&spidx1->dst)->sin6_port) - return 0; - /* - * scope_id check. if sin6_scope_id is 0, we regard it - * as a wildcard scope, which matches any scope zone ID. - */ - if (satosin6(&spidx0->src)->sin6_scope_id && - satosin6(&spidx1->src)->sin6_scope_id && - satosin6(&spidx0->dst)->sin6_scope_id != - satosin6(&spidx1->dst)->sin6_scope_id) - return 0; - if (!key_bbcmp((caddr_t)&satosin6(&spidx0->dst)->sin6_addr, - (caddr_t)&satosin6(&spidx1->dst)->sin6_addr, spidx0->prefd)) - return 0; - break; - default: - /* XXX */ - if (bcmp(&spidx0->dst, &spidx1->dst, spidx0->dst.ss_len) != 0) - return 0; - break; - } - } - - /* XXX Do we check other field ? e.g. flowinfo */ - return 1; } -static int -key_is_addr_in_range(struct sockaddr_storage *addr, struct secpolicyaddrrange *addr_range) -{ - int cmp = 0; - - if (addr == NULL || addr_range == NULL) - return 0; - - /* Must be greater than or equal to start */ - cmp = key_sockaddrcmp((struct sockaddr *)addr, (struct sockaddr *)&addr_range->start, 1); - if (cmp != 0 && cmp != 1) - return 0; - - /* Must be less than or equal to end */ - cmp = key_sockaddrcmp((struct sockaddr *)addr, (struct sockaddr *)&addr_range->end, 1); - if (cmp != 0 && cmp != -1) - return 0; - - return 1; -} - /* - Return values: - -1: sa1 < sa2 - 0: sa1 == sa2 - 1: sa1 > sa2 - 2: Not comparable or error + * Return values: + * -1: sa1 < sa2 + * 0: sa1 == sa2 + * 1: sa1 > sa2 + * 2: Not comparable or error */ static int key_sockaddrcmp( - struct sockaddr *sa1, - struct sockaddr *sa2, - int port) -{ - int result = 0; - int port_result = 0; - - if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) + struct sockaddr *sa1, + struct sockaddr *sa2, + int port) +{ + int result = 0; + int port_result = 0; + + if (sa1->sa_family != sa2->sa_family || sa1->sa_len != sa2->sa_len) { return 2; - - if (sa1->sa_len == 0) - return 0; - + } + + if (sa1->sa_len == 0) { + return 0; + } + switch (sa1->sa_family) { - case AF_INET: - if (sa1->sa_len != sizeof(struct sockaddr_in)) - return 2; - - result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr)); - - if (port) { - if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) { - port_result = -1; - } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) { - port_result = 1; - } - - if (result == 0) - result = port_result; - else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) - return 2; - } - - break; - case AF_INET6: - if (sa1->sa_len != sizeof(struct sockaddr_in6)) - return 2; /*EINVAL*/ - - if (satosin6(sa1)->sin6_scope_id != - satosin6(sa2)->sin6_scope_id) { - return 2; - } - - result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr)); - - if (port) { - if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) { - port_result = -1; - } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) { - port_result = 1; - } - - if (result == 0) - result = port_result; - else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) - return 2; - } - - break; - default: - result = memcmp(sa1, sa2, sa1->sa_len); - break; - } - - if (result < 0) result = -1; - else if (result > 0) result = 1; - + case AF_INET: + if (sa1->sa_len != sizeof(struct sockaddr_in)) { + return 2; + } + + result = memcmp(&satosin(sa1)->sin_addr.s_addr, &satosin(sa2)->sin_addr.s_addr, sizeof(satosin(sa1)->sin_addr.s_addr)); + + if (port) { + if (satosin(sa1)->sin_port < satosin(sa2)->sin_port) { + port_result = -1; + } else if (satosin(sa1)->sin_port > satosin(sa2)->sin_port) { + port_result = 1; + } + + if (result == 0) { + result = port_result; + } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) { + return 2; + } + } + + break; + case AF_INET6: + if (sa1->sa_len != sizeof(struct sockaddr_in6)) { + return 2; /*EINVAL*/ + } + if (satosin6(sa1)->sin6_scope_id != + satosin6(sa2)->sin6_scope_id) { + return 2; + } + + result = memcmp(&satosin6(sa1)->sin6_addr.s6_addr[0], &satosin6(sa2)->sin6_addr.s6_addr[0], sizeof(struct in6_addr)); + + if (port) { + if (satosin6(sa1)->sin6_port < satosin6(sa2)->sin6_port) { + port_result = -1; + } else if (satosin6(sa1)->sin6_port > satosin6(sa2)->sin6_port) { + port_result = 1; + } + + if (result == 0) { + result = port_result; + } else if ((result > 0 && port_result < 0) || (result < 0 && port_result > 0)) { + return 2; + } + } + + break; + default: + result = memcmp(sa1, sa2, sa1->sa_len); + break; + } + + if (result < 0) { + result = -1; + } else if (result > 0) { + result = 1; + } + return result; } @@ -5894,31 +6123,34 @@ key_sockaddrcmp( */ static int key_bbcmp( - caddr_t p1, - caddr_t p2, - u_int bits) + caddr_t p1, + caddr_t p2, + u_int bits) { u_int8_t mask; - + /* XXX: This could be considerably faster if we compare a word * at a time, but it is complicated on LSB Endian machines */ - + /* Handle null pointers */ - if (p1 == NULL || p2 == NULL) - return (p1 == p2); - + if (p1 == NULL || p2 == NULL) { + return p1 == p2; + } + while (bits >= 8) { - if (*p1++ != *p2++) + if (*p1++ != *p2++) { return 0; + } bits -= 8; } - + if (bits > 0) { - mask = ~((1<<(8-bits))-1); - if ((*p1 & mask) != (*p2 & mask)) + mask = ~((1 << (8 - bits)) - 1); + if ((*p1 & mask) != (*p2 & mask)) { return 0; + } } - return 1; /* Match! */ + return 1; /* Match! */ } /* @@ -5942,84 +6174,86 @@ key_timehandler(void) int stop_handler = 1; /* stop the timehandler */ microtime(&tv); - + /* pre-allocate buffers before taking the lock */ /* if allocation failures occur - portions of the processing will be skipped */ if ((spbufcount = ipsec_policy_count) != 0) { spbufcount += 256; KMALLOC_WAIT(spbuf, struct secpolicy **, spbufcount * sizeof(struct secpolicy *)); - if (spbuf) + if (spbuf) { spptr = spbuf; + } } if ((savbufcount = ipsec_sav_count) != 0) { savbufcount += 512; KMALLOC_WAIT(savexbuf, struct secasvar **, savbufcount * sizeof(struct secasvar *)); - if (savexbuf) + if (savexbuf) { savexptr = savexbuf; + } KMALLOC_WAIT(savkabuf, struct secasvar **, savbufcount * sizeof(struct secasvar *)); - if (savkabuf) + if (savkabuf) { savkaptr = savkabuf; + } } lck_mtx_lock(sadb_mutex); /* SPD */ if (spbuf) { - struct secpolicy *sp, *nextsp; - + for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { for (sp = LIST_FIRST(&sptree[dir]); - sp != NULL; - sp = nextsp) { - + sp != NULL; + sp = nextsp) { /* don't prevent timehandler from stopping for generate policy */ - if (sp->policy != IPSEC_POLICY_GENERATE) + if (sp->policy != IPSEC_POLICY_GENERATE) { stop_handler = 0; + } spd_count++; nextsp = LIST_NEXT(sp, chain); - + if (sp->state == IPSEC_SPSTATE_DEAD) { key_freesp(sp, KEY_SADB_LOCKED); continue; } - - if (sp->lifetime == 0 && sp->validtime == 0) + + if (sp->lifetime == 0 && sp->validtime == 0) { continue; + } if (spbuf && spcount < spbufcount) { /* the deletion will occur next time */ if ((sp->lifetime - && tv.tv_sec - sp->created > sp->lifetime) + && tv.tv_sec - sp->created > sp->lifetime) || (sp->validtime - && tv.tv_sec - sp->lastused > sp->validtime)) { - //key_spdexpire(sp); - sp->state = IPSEC_SPSTATE_DEAD; - sp->refcnt++; - *spptr++ = sp; - spcount++; - } + && tv.tv_sec - sp->lastused > sp->validtime)) { + //key_spdexpire(sp); + sp->state = IPSEC_SPSTATE_DEAD; + sp->refcnt++; + *spptr++ = sp; + spcount++; + } } } } } - + /* SAD */ { struct secashead *sah, *nextsah; struct secasvar *sav, *nextsav; - + for (sah = LIST_FIRST(&sahtree); - sah != NULL; - sah = nextsah) { - + sah != NULL; + sah = nextsah) { sah_count++; nextsah = LIST_NEXT(sah, chain); - + /* if sah has been dead, then delete it and process next sah. */ if (sah->state == SADB_SASTATE_DEAD) { key_delsah(sah); dead_sah_count++; continue; } - + if (LIST_FIRST(&sah->savtree[SADB_SASTATE_LARVAL]) == NULL && LIST_FIRST(&sah->savtree[SADB_SASTATE_MATURE]) == NULL && LIST_FIRST(&sah->savtree[SADB_SASTATE_DYING]) == NULL && @@ -6028,26 +6262,25 @@ key_timehandler(void) empty_sah_count++; continue; } - + if (savbufcount == 0) { continue; } - + stop_handler = 0; - + /* if LARVAL entry doesn't become MATURE, delete it. */ for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_LARVAL]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { larval_sav_count++; total_sav_count++; nextsav = LIST_NEXT(sav, chain); - + if (sav->lft_h != NULL) { /* If a hard lifetime is defined for the LARVAL SA, use it */ if (sav->lft_h->sadb_lifetime_addtime != 0 - && tv.tv_sec - sav->created > sav->lft_h->sadb_lifetime_addtime) { + && tv.tv_sec - sav->created > sav->lft_h->sadb_lifetime_addtime) { if (sav->always_expire) { key_send_delete(sav); sav = NULL; @@ -6063,7 +6296,7 @@ key_timehandler(void) } } } - + /* * If this is a NAT traversal SA with no activity, * we need to send a keep alive. @@ -6075,47 +6308,47 @@ key_timehandler(void) * when to send the keepalive. */ if (savkabuf && savkacount < savbufcount) { - sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_MATURE]); //%%% should we check dying list if this is empty??? + sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_MATURE]); //%%% should we check dying list if this is empty??? if (sav && (natt_keepalive_interval || sav->natt_interval) && - (sav->flags & (SADB_X_EXT_NATT_KEEPALIVE | SADB_X_EXT_ESP_KEEPALIVE)) != 0) { + (sav->flags & (SADB_X_EXT_NATT_KEEPALIVE | SADB_X_EXT_ESP_KEEPALIVE)) != 0) { sav->refcnt++; *savkaptr++ = sav; savkacount++; } } - + /* * check MATURE entry to start to send expire message * whether or not. */ for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_MATURE]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { mature_sav_count++; total_sav_count++; nextsav = LIST_NEXT(sav, chain); - + /* we don't need to check. */ - if (sav->lft_s == NULL) + if (sav->lft_s == NULL) { continue; - + } + /* sanity check */ if (sav->lft_c == NULL) { - ipseclog((LOG_DEBUG,"key_timehandler: " - "There is no CURRENT time, why?\n")); + ipseclog((LOG_DEBUG, "key_timehandler: " + "There is no CURRENT time, why?\n")); continue; } - + /* check SOFT lifetime */ if (sav->lft_s->sadb_lifetime_addtime != 0 - && tv.tv_sec - sav->created > sav->lft_s->sadb_lifetime_addtime) { + && tv.tv_sec - sav->created > sav->lft_s->sadb_lifetime_addtime) { /* * If always_expire is set, expire. Otherwise, * if the SA has not been used, delete immediately. */ if (sav->lft_c->sadb_lifetime_usetime == 0 - && sav->always_expire == 0) { + && sav->always_expire == 0) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); sav = NULL; @@ -6126,7 +6359,6 @@ key_timehandler(void) savexcount++; } } - /* check SOFT lifetime by bytes */ /* * XXX I don't know the way to delete this SA @@ -6134,9 +6366,8 @@ key_timehandler(void) * installed too big lifetime by time. */ else if (savexbuf && savexcount < savbufcount - && sav->lft_s->sadb_lifetime_bytes != 0 - && sav->lft_s->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { - + && sav->lft_s->sadb_lifetime_bytes != 0 + && sav->lft_s->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { /* * XXX If we keep to send expire * message in the status of @@ -6149,29 +6380,29 @@ key_timehandler(void) savexcount++; } } - + /* check DYING entry to change status to DEAD. */ for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_DYING]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { dying_sav_count++; total_sav_count++; nextsav = LIST_NEXT(sav, chain); - + /* we don't need to check. */ - if (sav->lft_h == NULL) + if (sav->lft_h == NULL) { continue; - + } + /* sanity check */ if (sav->lft_c == NULL) { ipseclog((LOG_DEBUG, "key_timehandler: " - "There is no CURRENT time, why?\n")); + "There is no CURRENT time, why?\n")); continue; } - + if (sav->lft_h->sadb_lifetime_addtime != 0 - && tv.tv_sec - sav->created > sav->lft_h->sadb_lifetime_addtime) { + && tv.tv_sec - sav->created > sav->lft_h->sadb_lifetime_addtime) { if (sav->always_expire) { key_send_delete(sav); sav = NULL; @@ -6181,16 +6412,16 @@ key_timehandler(void) sav = NULL; } } -#if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */ +#if 0 /* XXX Should we keep to send expire message until HARD lifetime ? */ else if (savbuf && savexcount < savbufcount - && sav->lft_s != NULL - && sav->lft_s->sadb_lifetime_addtime != 0 - && tv.tv_sec - sav->created > sav->lft_s->sadb_lifetime_addtime) { + && sav->lft_s != NULL + && sav->lft_s->sadb_lifetime_addtime != 0 + && tv.tv_sec - sav->created > sav->lft_s->sadb_lifetime_addtime) { /* * XXX: should be checked to be * installed the valid SA. */ - + /* * If there is no SA then sending * expire message. @@ -6203,31 +6434,30 @@ key_timehandler(void) #endif /* check HARD lifetime by bytes */ else if (sav->lft_h->sadb_lifetime_bytes != 0 - && sav->lft_h->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { + && sav->lft_h->sadb_lifetime_bytes < sav->lft_c->sadb_lifetime_bytes) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); sav = NULL; } } - + /* delete entry in DEAD */ for (sav = LIST_FIRST(&sah->savtree[SADB_SASTATE_DEAD]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { dead_sav_count++; total_sav_count++; nextsav = LIST_NEXT(sav, chain); - + /* sanity check */ if (sav->state != SADB_SASTATE_DEAD) { ipseclog((LOG_DEBUG, "key_timehandler: " - "invalid sav->state " - "(queue: %d SA: %d): " - "kill it anyway\n", - SADB_SASTATE_DEAD, sav->state)); + "invalid sav->state " + "(queue: %d SA: %d): " + "kill it anyway\n", + SADB_SASTATE_DEAD, sav->state)); } - + /* * do not call key_freesav() here. * sav should already be freed, and sav->refcnt @@ -6237,18 +6467,18 @@ key_timehandler(void) } } } - + if (++key_timehandler_debug >= 300) { if (key_debug_level) { printf("%s: total stats for %u calls\n", __FUNCTION__, key_timehandler_debug); printf("%s: walked %u SPDs\n", __FUNCTION__, spd_count); printf("%s: walked %llu SAs: LARVAL SAs %u, MATURE SAs %u, DYING SAs %u, DEAD SAs %u\n", __FUNCTION__, - total_sav_count, larval_sav_count, mature_sav_count, dying_sav_count, dead_sav_count); + total_sav_count, larval_sav_count, mature_sav_count, dying_sav_count, dead_sav_count); printf("%s: walked %u SAHs: DEAD SAHs %u, EMPTY SAHs %u\n", __FUNCTION__, - sah_count, dead_sah_count, empty_sah_count); + sah_count, dead_sah_count, empty_sah_count); if (sah_search_calls) { printf("%s: SAH search cost %d iters per call\n", __FUNCTION__, - (sah_search_count/sah_search_calls)); + (sah_search_count / sah_search_calls)); } } spd_count = 0; @@ -6266,44 +6496,42 @@ key_timehandler(void) } #ifndef IPSEC_NONBLOCK_ACQUIRE /* ACQ tree */ - { + { struct secacq *acq, *nextacq; - + for (acq = LIST_FIRST(&acqtree); - acq != NULL; - acq = nextacq) { - + acq != NULL; + acq = nextacq) { stop_handler = 0; nextacq = LIST_NEXT(acq, chain); - + if (tv.tv_sec - acq->created > key_blockacq_lifetime - && __LIST_CHAINED(acq)) { + && __LIST_CHAINED(acq)) { LIST_REMOVE(acq, chain); KFREE(acq); } } - } + } #endif - + /* SP ACQ tree */ - { + { struct secspacq *acq, *nextacq; - + for (acq = LIST_FIRST(&spacqtree); - acq != NULL; - acq = nextacq) { - + acq != NULL; + acq = nextacq) { stop_handler = 0; nextacq = LIST_NEXT(acq, chain); - + if (tv.tv_sec - acq->created > key_blockacq_lifetime - && __LIST_CHAINED(acq)) { + && __LIST_CHAINED(acq)) { LIST_REMOVE(acq, chain); KFREE(acq); } } - } - + } + /* initialize random seed */ if (key_tick_init_random++ > key_int_random) { key_tick_init_random = 0; @@ -6313,19 +6541,20 @@ key_timehandler(void) uint64_t acc_sleep_time = 0; absolutetime_to_nanoseconds(mach_absolutetime_asleep, &acc_sleep_time); natt_now = ++up_time + (acc_sleep_time / NSEC_PER_SEC); - + lck_mtx_unlock(sadb_mutex); - + /* send messages outside of sadb_mutex */ if (spbuf && spcount > 0) { cnt = spcount; - while (cnt--) + while (cnt--) { key_spdexpire(*(--spptr)); + } } if (savkabuf && savkacount > 0) { struct secasvar **savkaptr_sav = savkaptr; int cnt_send = savkacount; - + while (cnt_send--) { if (ipsec_send_natt_keepalive(*(--savkaptr))) { // iterate (all over again) and update timestamps @@ -6333,35 +6562,39 @@ key_timehandler(void) int cnt_update = savkacount; while (cnt_update--) { key_update_natt_keepalive_timestamp(*savkaptr, - *(--savkaptr_update)); + *(--savkaptr_update)); } } } } if (savexbuf && savexcount > 0) { cnt = savexcount; - while (cnt--) + while (cnt--) { key_expire(*(--savexptr)); + } } - + /* decrement ref counts and free buffers */ lck_mtx_lock(sadb_mutex); if (spbuf) { - while (spcount--) + while (spcount--) { key_freesp(*spptr++, KEY_SADB_LOCKED); + } KFREE(spbuf); } if (savkabuf) { - while (savkacount--) + while (savkacount--) { key_freesav(*savkaptr++, KEY_SADB_LOCKED); + } KFREE(savkabuf); } if (savexbuf) { - while (savexcount--) + while (savexcount--) { key_freesav(*savexptr++, KEY_SADB_LOCKED); + } KFREE(savexbuf); } - + if (stop_handler) { key_timehandler_running = 0; /* Turn on the ipsec bypass */ @@ -6386,12 +6619,12 @@ key_srandom(void) random(); #else struct timeval tv; - + microtime(&tv); - + srandom(tv.tv_usec); #endif - + return; } @@ -6399,35 +6632,35 @@ u_int32_t key_random(void) { u_int32_t value; - + key_randomfill(&value, sizeof(value)); return value; } void key_randomfill( - void *p, - size_t l) + void *p, + size_t l) { #ifdef __APPLE__ - cc_rand_generate(p, l); + cc_rand_generate(p, l); #else size_t n; u_int32_t v; static int warn = 1; - + n = 0; n = (size_t)read_random(p, (u_int)l); /* last resort */ while (n < l) { v = random(); bcopy(&v, (u_int8_t *)p + n, - l - n < sizeof(v) ? l - n : sizeof(v)); + l - n < sizeof(v) ? l - n : sizeof(v)); n += sizeof(v); - + if (warn) { printf("WARNING: pseudo-random number generator " - "used for IPsec processing\n"); + "used for IPsec processing\n"); warn = 0; } } @@ -6442,19 +6675,19 @@ key_randomfill( */ static u_int16_t key_satype2proto( - u_int8_t satype) + u_int8_t satype) { switch (satype) { - case SADB_SATYPE_UNSPEC: - return IPSEC_PROTO_ANY; - case SADB_SATYPE_AH: - return IPPROTO_AH; - case SADB_SATYPE_ESP: - return IPPROTO_ESP; - case SADB_X_SATYPE_IPCOMP: - return IPPROTO_IPCOMP; - default: - return 0; + case SADB_SATYPE_UNSPEC: + return IPSEC_PROTO_ANY; + case SADB_SATYPE_AH: + return IPPROTO_AH; + case SADB_SATYPE_ESP: + return IPPROTO_ESP; + case SADB_X_SATYPE_IPCOMP: + return IPPROTO_IPCOMP; + default: + return 0; } /* NOTREACHED */ } @@ -6466,50 +6699,50 @@ key_satype2proto( */ static u_int8_t key_proto2satype( - u_int16_t proto) + u_int16_t proto) { switch (proto) { - case IPPROTO_AH: - return SADB_SATYPE_AH; - case IPPROTO_ESP: - return SADB_SATYPE_ESP; - case IPPROTO_IPCOMP: - return SADB_X_SATYPE_IPCOMP; - default: - return 0; + case IPPROTO_AH: + return SADB_SATYPE_AH; + case IPPROTO_ESP: + return SADB_SATYPE_ESP; + case IPPROTO_IPCOMP: + return SADB_X_SATYPE_IPCOMP; + default: + return 0; } /* NOTREACHED */ } static ifnet_t -key_get_ipsec_if_from_message (const struct sadb_msghdr *mhp, int message_type) +key_get_ipsec_if_from_message(const struct sadb_msghdr *mhp, int message_type) { struct sadb_x_ipsecif *ipsecifopts = NULL; ifnet_t ipsec_if = NULL; - + ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[message_type]; if (ipsecifopts != NULL) { if (ipsecifopts->sadb_x_ipsecif_ipsec_if[0]) { ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_ipsec_if, &ipsec_if); } } - + return ipsec_if; } static u_int -key_get_outgoing_ifindex_from_message (const struct sadb_msghdr *mhp, int message_type) +key_get_outgoing_ifindex_from_message(const struct sadb_msghdr *mhp, int message_type) { struct sadb_x_ipsecif *ipsecifopts = NULL; ifnet_t outgoing_if = NULL; - + ipsecifopts = (struct sadb_x_ipsecif *)(void *)mhp->ext[message_type]; if (ipsecifopts != NULL) { if (ipsecifopts->sadb_x_ipsecif_outgoing_if[0]) { ifnet_find_by_name(ipsecifopts->sadb_x_ipsecif_outgoing_if, &outgoing_if); - } - } - + } + } + return outgoing_if ? outgoing_if->if_index : 0; } @@ -6528,9 +6761,9 @@ key_get_outgoing_ifindex_from_message (const struct sadb_msghdr *mhp, int messag */ static int key_getspi( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_address *src0, *dst0; struct secasindex saidx; @@ -6542,13 +6775,14 @@ key_getspi( u_int8_t mode; u_int32_t reqid; int error; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_getspi: NULL pointer is passed.\n"); - + } + if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "key_getspi: invalid message is passed.\n")); @@ -6561,72 +6795,76 @@ key_getspi( } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *) - (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; + (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *) - (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; + (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } - + src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); - + ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_getspi: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + /* make sure if port number is zero. */ switch (((struct sockaddr *)(src0 + 1))->sa_family) { - case AF_INET: - if (((struct sockaddr *)(src0 + 1))->sa_len != - sizeof(struct sockaddr_in)) - return key_senderror(so, m, EINVAL); - ((struct sockaddr_in *)(void *)(src0 + 1))->sin_port = 0; - break; - case AF_INET6: - if (((struct sockaddr *)(src0 + 1))->sa_len != - sizeof(struct sockaddr_in6)) - return key_senderror(so, m, EINVAL); - ((struct sockaddr_in6 *)(void *)(src0 + 1))->sin6_port = 0; - break; - default: - ; /*???*/ + case AF_INET: + if (((struct sockaddr *)(src0 + 1))->sa_len != + sizeof(struct sockaddr_in)) { + return key_senderror(so, m, EINVAL); + } + ((struct sockaddr_in *)(void *)(src0 + 1))->sin_port = 0; + break; + case AF_INET6: + if (((struct sockaddr *)(src0 + 1))->sa_len != + sizeof(struct sockaddr_in6)) { + return key_senderror(so, m, EINVAL); + } + ((struct sockaddr_in6 *)(void *)(src0 + 1))->sin6_port = 0; + break; + default: + ; /*???*/ } switch (((struct sockaddr *)(dst0 + 1))->sa_family) { - case AF_INET: - if (((struct sockaddr *)(dst0 + 1))->sa_len != - sizeof(struct sockaddr_in)) - return key_senderror(so, m, EINVAL); - ((struct sockaddr_in *)(void *)(dst0 + 1))->sin_port = 0; - break; - case AF_INET6: - if (((struct sockaddr *)(dst0 + 1))->sa_len != - sizeof(struct sockaddr_in6)) - return key_senderror(so, m, EINVAL); - ((struct sockaddr_in6 *)(void *)(dst0 + 1))->sin6_port = 0; - break; - default: - ; /*???*/ + case AF_INET: + if (((struct sockaddr *)(dst0 + 1))->sa_len != + sizeof(struct sockaddr_in)) { + return key_senderror(so, m, EINVAL); + } + ((struct sockaddr_in *)(void *)(dst0 + 1))->sin_port = 0; + break; + case AF_INET6: + if (((struct sockaddr *)(dst0 + 1))->sa_len != + sizeof(struct sockaddr_in6)) { + return key_senderror(so, m, EINVAL); + } + ((struct sockaddr_in6 *)(void *)(dst0 + 1))->sin6_port = 0; + break; + default: + ; /*???*/ } - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + lck_mtx_lock(sadb_mutex); - + /* SPI allocation */ spi = key_do_getnewspi((struct sadb_spirange *) - (void *)mhp->ext[SADB_EXT_SPIRANGE], &saidx); + (void *)mhp->ext[SADB_EXT_SPIRANGE], &saidx); if (spi == 0) { lck_mtx_unlock(sadb_mutex); return key_senderror(so, m, EINVAL); } - + /* get a SA index */ if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA index: key_addspi is always used for inbound spi */ @@ -6636,7 +6874,7 @@ key_getspi( return key_senderror(so, m, ENOBUFS); } } - + /* get a new SA */ /* XXX rewrite */ newsav = key_newsav(m, mhp, newsah, &error, so); @@ -6645,10 +6883,10 @@ key_getspi( lck_mtx_unlock(sadb_mutex); return key_senderror(so, m, error); } - + /* set spi */ key_setspi(newsav, htonl(spi)); - + #ifndef IPSEC_NONBLOCK_ACQUIRE /* delete the entry in acqtree */ if (mhp->msg->sadb_msg_seq != 0) { @@ -6662,21 +6900,22 @@ key_getspi( } } #endif - + lck_mtx_unlock(sadb_mutex); - - { + + { struct mbuf *n, *nn; struct sadb_sa *m_sa; struct sadb_msg *newmsg; int off, len; - + /* create new sadb_msg to reply. */ len = PFKEY_ALIGN8(sizeof(struct sadb_msg)) + - PFKEY_ALIGN8(sizeof(struct sadb_sa)); - if (len > MCLBYTES) + PFKEY_ALIGN8(sizeof(struct sadb_sa)); + if (len > MCLBYTES) { return key_senderror(so, m, ENOBUFS); - + } + MGETHDR(n, M_WAITOK, MT_DATA); if (n && len > MHLEN) { MCLGET(n, M_WAITOK); @@ -6685,108 +6924,116 @@ key_getspi( n = NULL; } } - if (!n) + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + n->m_len = len; n->m_next = NULL; off = 0; - + m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); - + m_sa = (struct sadb_sa *)(void *)(mtod(n, caddr_t) + off); m_sa->sadb_sa_len = PFKEY_UNIT64(sizeof(struct sadb_sa)); m_sa->sadb_sa_exttype = SADB_EXT_SA; m_sa->sadb_sa_spi = htonl(spi); off += PFKEY_ALIGN8(sizeof(struct sadb_sa)); - + #if DIAGNOSTIC - if (off != len) + if (off != len) { panic("length inconsistency in key_getspi"); + } #endif { int mbufItems[] = {SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST}; - n->m_next = key_gather_mbuf(m, mhp, 0, sizeof(mbufItems)/sizeof(int), mbufItems); + n->m_next = key_gather_mbuf(m, mhp, 0, sizeof(mbufItems) / sizeof(int), mbufItems); if (!n->m_next) { m_freem(n); return key_senderror(so, m, ENOBUFS); } } - + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); + } } - + n->m_pkthdr.len = 0; - for (nn = n; nn; nn = nn->m_next) + for (nn = n; nn; nn = nn->m_next) { n->m_pkthdr.len += nn->m_len; - + } + newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_seq = newsav->seq; newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); - } + } } u_int32_t key_getspi2(struct sockaddr *src, - struct sockaddr *dst, - u_int8_t proto, - u_int8_t mode, - u_int32_t reqid, - struct sadb_spirange *spirange) + struct sockaddr *dst, + u_int8_t proto, + u_int8_t mode, + u_int32_t reqid, + struct sadb_spirange *spirange) { u_int32_t spi; struct secasindex saidx; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src, dst, 0, &saidx); - + /* make sure if port number is zero. */ switch (((struct sockaddr *)&saidx.src)->sa_family) { - case AF_INET: - if (((struct sockaddr *)&saidx.src)->sa_len != sizeof(struct sockaddr_in)) - return 0; - ((struct sockaddr_in *)&saidx.src)->sin_port = 0; - break; - case AF_INET6: - if (((struct sockaddr *)&saidx.src)->sa_len != sizeof(struct sockaddr_in6)) - return 0; - ((struct sockaddr_in6 *)&saidx.src)->sin6_port = 0; - break; - default: - ; /*???*/ - } - switch (((struct sockaddr *)&saidx.dst)->sa_family) { - case AF_INET: - if (((struct sockaddr *)&saidx.dst)->sa_len != sizeof(struct sockaddr_in)) - return 0; - ((struct sockaddr_in *)&saidx.dst)->sin_port = 0; - break; - case AF_INET6: - if (((struct sockaddr *)&saidx.dst)->sa_len != sizeof(struct sockaddr_in6)) - return 0; - ((struct sockaddr_in6 *)&saidx.dst)->sin6_port = 0; - break; - default: - ; /*???*/ + case AF_INET: + if (((struct sockaddr *)&saidx.src)->sa_len != sizeof(struct sockaddr_in)) { + return 0; + } + ((struct sockaddr_in *)&saidx.src)->sin_port = 0; + break; + case AF_INET6: + if (((struct sockaddr *)&saidx.src)->sa_len != sizeof(struct sockaddr_in6)) { + return 0; + } + ((struct sockaddr_in6 *)&saidx.src)->sin6_port = 0; + break; + default: + ; /*???*/ + } + switch (((struct sockaddr *)&saidx.dst)->sa_family) { + case AF_INET: + if (((struct sockaddr *)&saidx.dst)->sa_len != sizeof(struct sockaddr_in)) { + return 0; + } + ((struct sockaddr_in *)&saidx.dst)->sin_port = 0; + break; + case AF_INET6: + if (((struct sockaddr *)&saidx.dst)->sa_len != sizeof(struct sockaddr_in6)) { + return 0; + } + ((struct sockaddr_in6 *)&saidx.dst)->sin6_port = 0; + break; + default: + ; /*???*/ } - + lck_mtx_lock(sadb_mutex); - + /* SPI allocation */ spi = key_do_getnewspi(spirange, &saidx); - + lck_mtx_unlock(sadb_mutex); - + return spi; } @@ -6799,15 +7046,15 @@ key_getspi2(struct sockaddr *src, */ static u_int32_t key_do_getnewspi( - struct sadb_spirange *spirange, - struct secasindex *saidx) + struct sadb_spirange *spirange, + struct secasindex *saidx) { u_int32_t newspi; u_int32_t keymin, keymax; int count = key_spi_trycnt; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* set spi range to allocate */ if (spirange != NULL) { keymin = spirange->sadb_spirange_min; @@ -6819,52 +7066,53 @@ key_do_getnewspi( /* IPCOMP needs 2-byte SPI */ if (saidx->proto == IPPROTO_IPCOMP) { u_int32_t t; - if (keymin >= 0x10000) + if (keymin >= 0x10000) { keymin = 0xffff; - if (keymax >= 0x10000) + } + if (keymax >= 0x10000) { keymax = 0xffff; + } if (keymin > keymax) { t = keymin; keymin = keymax; keymax = t; } } - + if (keymin == keymax) { if (key_checkspidup(saidx, keymin) != NULL) { ipseclog((LOG_DEBUG, "key_do_getnewspi: SPI %u exists already.\n", keymin)); return 0; } - + count--; /* taking one cost. */ newspi = keymin; - } else { - u_int32_t range = keymax - keymin + 1; /* overflow value of zero means full range */ - + /* init SPI */ newspi = 0; - + /* when requesting to allocate spi ranged */ while (count--) { u_int32_t rand_val = key_random(); - + /* generate pseudo-random SPI value ranged. */ newspi = (range == 0 ? rand_val : keymin + (rand_val % range)); - - if (key_checkspidup(saidx, newspi) == NULL) + + if (key_checkspidup(saidx, newspi) == NULL) { break; + } } - + if (count == 0 || newspi == 0) { ipseclog((LOG_DEBUG, "key_do_getnewspi: to allocate spi is failed.\n")); return 0; } } - + /* statistics */ keystat.getspi_count = - (keystat.getspi_count + key_spi_trycnt - count) / 2; - + (keystat.getspi_count + key_spi_trycnt - count) / 2; + return newspi; } @@ -6883,9 +7131,9 @@ key_do_getnewspi( */ static int key_update( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; @@ -6898,33 +7146,34 @@ key_update( u_int32_t reqid; u_int16_t flags2; int error; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_update: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_update: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP && - mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || + mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH && - mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || + mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL && - mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || + mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && - mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { - ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); - return key_senderror(so, m, EINVAL); - } + mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { + ipseclog((LOG_DEBUG, "key_update: invalid message is passed.\n")); + return key_senderror(so, m, EINVAL); + } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { @@ -6933,9 +7182,9 @@ key_update( } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *) - (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; + (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *) - (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; + (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; flags2 = ((struct sadb_x_sa2 *)(void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_flags; } else { mode = IPSEC_MODE_ANY; @@ -6943,24 +7192,24 @@ key_update( flags2 = 0; } /* XXX boundary checking for other extensions */ - + sa0 = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + lck_mtx_lock(sadb_mutex); - + /* get a SA header */ if ((sah = key_getsah(&saidx)) == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_update: no SA index found.\n")); return key_senderror(so, m, ENOENT); } - + /* set spidx if there */ /* XXX rewrite */ error = key_setident(sah, m, mhp); @@ -6968,53 +7217,53 @@ key_update( lck_mtx_unlock(sadb_mutex); return key_senderror(so, m, error); } - + /* find a SA with sequence number. */ #if IPSEC_DOSEQCHECK if (mhp->msg->sadb_msg_seq != 0 - && (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) { - lck_mtx_unlock(sadb_mutex); + && (sav = key_getsavbyseq(sah, mhp->msg->sadb_msg_seq)) == NULL) { + lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, - "key_update: no larval SA with sequence %u exists.\n", - mhp->msg->sadb_msg_seq)); + "key_update: no larval SA with sequence %u exists.\n", + mhp->msg->sadb_msg_seq)); return key_senderror(so, m, ENOENT); } #else if ((sav = key_getsavbyspi(sah, sa0->sadb_sa_spi)) == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, - "key_update: no such a SA found (spi:%u)\n", - (u_int32_t)ntohl(sa0->sadb_sa_spi))); + "key_update: no such a SA found (spi:%u)\n", + (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif - + /* validity check */ if (sav->sah->saidx.proto != proto) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, - "key_update: protocol mismatched (DB=%u param=%u)\n", - sav->sah->saidx.proto, proto)); + "key_update: protocol mismatched (DB=%u param=%u)\n", + sav->sah->saidx.proto, proto)); return key_senderror(so, m, EINVAL); } #if IPSEC_DOSEQCHECK if (sav->spi != sa0->sadb_sa_spi) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, - "key_update: SPI mismatched (DB:%u param:%u)\n", - (u_int32_t)ntohl(sav->spi), - (u_int32_t)ntohl(sa0->sadb_sa_spi))); + "key_update: SPI mismatched (DB:%u param:%u)\n", + (u_int32_t)ntohl(sav->spi), + (u_int32_t)ntohl(sa0->sadb_sa_spi))); return key_senderror(so, m, EINVAL); } #endif if (sav->pid != mhp->msg->sadb_msg_pid) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, - "key_update: pid mismatched (DB:%u param:%u)\n", - sav->pid, mhp->msg->sadb_msg_pid)); + "key_update: pid mismatched (DB:%u param:%u)\n", + sav->pid, mhp->msg->sadb_msg_pid)); return key_senderror(so, m, EINVAL); } - + /* copy sav values */ error = key_setsaval(sav, m, mhp); if (error) { @@ -7033,38 +7282,39 @@ key_update( * this SA is for transport mode - otherwise clear it. */ if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0 && - (sav->sah->saidx.mode != IPSEC_MODE_TRANSPORT || - sav->sah->saidx.src.ss_family != AF_INET)) + (sav->sah->saidx.mode != IPSEC_MODE_TRANSPORT || + sav->sah->saidx.src.ss_family != AF_INET)) { sav->flags &= ~SADB_X_EXT_NATT_MULTIPLEUSERS; - + } + /* check SA values to be mature. */ if ((error = key_mature(sav)) != 0) { key_freesav(sav, KEY_SADB_LOCKED); lck_mtx_unlock(sadb_mutex); return key_senderror(so, m, error); } - + lck_mtx_unlock(sadb_mutex); - - { + + { struct mbuf *n; - + /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { ipseclog((LOG_DEBUG, "key_update: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } static int key_migrate(struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_sa *sa0 = NULL; struct sadb_address *src0 = NULL; @@ -7079,39 +7329,40 @@ key_migrate(struct socket *so, struct secashead *newsah = NULL; struct secasvar *sav = NULL; u_int16_t proto; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_migrate: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_migrate: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->ext[SADB_EXT_SA] == NULL || - mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || - mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || - mhp->ext[SADB_EXT_MIGRATE_ADDRESS_SRC] == NULL || - mhp->ext[SADB_EXT_MIGRATE_ADDRESS_DST] == NULL) { + mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || + mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || + mhp->ext[SADB_EXT_MIGRATE_ADDRESS_SRC] == NULL || + mhp->ext[SADB_EXT_MIGRATE_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "key_migrate: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || - mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || - mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || - mhp->extlen[SADB_EXT_MIGRATE_ADDRESS_SRC] < sizeof(struct sadb_address) || - mhp->extlen[SADB_EXT_MIGRATE_ADDRESS_DST] < sizeof(struct sadb_address)) { + mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || + mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address) || + mhp->extlen[SADB_EXT_MIGRATE_ADDRESS_SRC] < sizeof(struct sadb_address) || + mhp->extlen[SADB_EXT_MIGRATE_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_migrate: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + lck_mtx_lock(sadb_mutex); - + sa0 = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); @@ -7119,35 +7370,38 @@ key_migrate(struct socket *so, dst1 = (struct sadb_address *)(mhp->ext[SADB_EXT_MIGRATE_ADDRESS_DST]); ipsec_if0 = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); ipsec_if1 = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_MIGRATE_IPSECIF); - + /* Find existing SAH and SAV */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if0 ? ipsec_if0->if_index : 0, &saidx0); - + LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state != SADB_SASTATE_MATURE) + if (sah->state != SADB_SASTATE_MATURE) { continue; - if (key_cmpsaidx(&sah->saidx, &saidx0, CMP_HEAD) == 0) + } + if (key_cmpsaidx(&sah->saidx, &saidx0, CMP_HEAD) == 0) { continue; - + } + sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); - if (sav && sav->state == SADB_SASTATE_MATURE) + if (sav && sav->state == SADB_SASTATE_MATURE) { break; + } } if (sah == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_migrate: no mature SAH found.\n")); return key_senderror(so, m, ENOENT); } - + if (sav == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_migrate: no SA found.\n")); return key_senderror(so, m, ENOENT); } - + /* Find or create new SAH */ KEY_SETSECASIDX(proto, sah->saidx.mode, sah->saidx.reqid, src1 + 1, dst1 + 1, ipsec_if1 ? ipsec_if1->if_index : 0, &saidx1); - + if ((newsah = key_getsah(&saidx1)) == NULL) { if ((newsah = key_newsah(&saidx1, ipsec_if1, key_get_outgoing_ifindex_from_message(mhp, SADB_X_EXT_MIGRATE_IPSECIF), sah->dir)) == NULL) { lck_mtx_unlock(sadb_mutex); @@ -7155,14 +7409,14 @@ key_migrate(struct socket *so, return key_senderror(so, m, ENOBUFS); } } - + /* Migrate SAV in to new SAH */ if (key_migratesav(sav, newsah) != 0) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_migrate: Failed to migrate SA to new SAH.\n")); return key_senderror(so, m, EINVAL); } - + /* Reset NAT values */ sav->flags = sa0->sadb_sa_flags; sav->remote_ike_port = ((const struct sadb_sa_2*)(sa0))->sadb_sa_natt_port; @@ -7175,33 +7429,37 @@ key_migrate(struct socket *so, * SADB_X_EXT_NATT is set and SADB_X_EXT_NATT_KEEPALIVE is not * set (we're not behind nat) - otherwise clear it. */ - if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) + if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) { if ((sav->flags & SADB_X_EXT_NATT) == 0 || - (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) + (sav->flags & SADB_X_EXT_NATT_KEEPALIVE) != 0) { sav->flags &= ~SADB_X_EXT_NATT_MULTIPLEUSERS; - + } + } + lck_mtx_unlock(sadb_mutex); { struct mbuf *n; struct sadb_msg *newmsg; int mbufItems[] = {SADB_EXT_RESERVED, SADB_EXT_SA, - SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, SADB_X_EXT_IPSECIF, - SADB_EXT_MIGRATE_ADDRESS_SRC, SADB_EXT_MIGRATE_ADDRESS_DST, SADB_X_EXT_MIGRATE_IPSECIF}; - + SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST, SADB_X_EXT_IPSECIF, + SADB_EXT_MIGRATE_ADDRESS_SRC, SADB_EXT_MIGRATE_ADDRESS_DST, SADB_X_EXT_MIGRATE_IPSECIF}; + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return key_senderror(so, m, ENOBUFS); + } } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); } @@ -7217,21 +7475,20 @@ key_migrate(struct socket *so, #if IPSEC_DOSEQCHECK static struct secasvar * key_getsavbyseq( - struct secashead *sah, - u_int32_t seq) + struct secashead *sah, + u_int32_t seq) { struct secasvar *sav; u_int state; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + state = SADB_SASTATE_LARVAL; - + /* search SAD with sequence number ? */ LIST_FOREACH(sav, &sah->savtree[state], chain) { - KEY_CHKSASTATE(state, sav->state, "key_getsabyseq"); - + if (sav->seq == seq) { sav->refcnt++; KEYDEBUG(KEYDEBUG_IPSEC_STAMP, @@ -7241,7 +7498,7 @@ key_getsavbyseq( return sav; } } - + return NULL; } #endif @@ -7263,9 +7520,9 @@ key_getsavbyseq( */ static int key_add( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; @@ -7277,35 +7534,36 @@ key_add( u_int8_t mode; u_int32_t reqid; int error; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_add: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_add: invalid satype is passed.\n")); bzero_keys(mhp); return key_senderror(so, m, EINVAL); } - + if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || (mhp->msg->sadb_msg_satype == SADB_SATYPE_ESP && - mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || + mhp->ext[SADB_EXT_KEY_ENCRYPT] == NULL) || (mhp->msg->sadb_msg_satype == SADB_SATYPE_AH && - mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || + mhp->ext[SADB_EXT_KEY_AUTH] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] != NULL && - mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || + mhp->ext[SADB_EXT_LIFETIME_SOFT] == NULL) || (mhp->ext[SADB_EXT_LIFETIME_HARD] == NULL && - mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { - ipseclog((LOG_DEBUG, "key_add: invalid message is passed.\n")); - bzero_keys(mhp); - return key_senderror(so, m, EINVAL); - } + mhp->ext[SADB_EXT_LIFETIME_SOFT] != NULL)) { + ipseclog((LOG_DEBUG, "key_add: invalid message is passed.\n")); + bzero_keys(mhp); + return key_senderror(so, m, EINVAL); + } if (mhp->extlen[SADB_EXT_SA] < sizeof(struct sadb_sa) || mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { @@ -7316,24 +7574,24 @@ key_add( } if (mhp->ext[SADB_X_EXT_SA2] != NULL) { mode = ((struct sadb_x_sa2 *) - (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; + (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_mode; reqid = ((struct sadb_x_sa2 *) - (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; + (void *)mhp->ext[SADB_X_EXT_SA2])->sadb_x_sa2_reqid; } else { mode = IPSEC_MODE_ANY; reqid = 0; } - + sa0 = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, mode, reqid, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + lck_mtx_lock(sadb_mutex); - + /* get a SA header */ if ((newsah = key_getsah(&saidx)) == NULL) { /* create a new SA header: key_addspi is always used for outbound spi */ @@ -7344,7 +7602,7 @@ key_add( return key_senderror(so, m, ENOBUFS); } } - + /* set spidx if there */ /* XXX rewrite */ error = key_setident(newsah, m, mhp); @@ -7353,7 +7611,7 @@ key_add( bzero_keys(mhp); return key_senderror(so, m, error); } - + /* create new SA entry. */ /* We can create new SA only if SPI is different. */ if (key_getsavbyspi(newsah, sa0->sadb_sa_spi)) { @@ -7368,16 +7626,17 @@ key_add( bzero_keys(mhp); return key_senderror(so, m, error); } - + /* * Verify if SADB_X_EXT_NATT_MULTIPLEUSERS flag is set that * this SA is for transport mode - otherwise clear it. */ if ((newsav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0 && - (newsah->saidx.mode != IPSEC_MODE_TRANSPORT || - newsah->saidx.dst.ss_family != AF_INET)) + (newsah->saidx.mode != IPSEC_MODE_TRANSPORT || + newsah->saidx.dst.ss_family != AF_INET)) { newsav->flags &= ~SADB_X_EXT_NATT_MULTIPLEUSERS; - + } + /* check SA values to be mature. */ if ((error = key_mature(newsav)) != 0) { key_freesav(newsav, KEY_SADB_LOCKED); @@ -7385,17 +7644,17 @@ key_add( bzero_keys(mhp); return key_senderror(so, m, error); } - + lck_mtx_unlock(sadb_mutex); - + /* * don't call key_freesav() here, as we would like to keep the SA * in the database on success. */ - - { + + { struct mbuf *n; - + /* set msg buf from mhp */ n = key_getmsgbuf_x1(m, mhp); if (n == NULL) { @@ -7403,31 +7662,32 @@ key_add( bzero_keys(mhp); return key_senderror(so, m, ENOBUFS); } - + // mh.ext points to the mbuf content. // Zero out Encryption and Integrity keys if present. bzero_keys(mhp); m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } /* m is retained */ static int key_setident( - struct secashead *sah, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct secashead *sah, + struct mbuf *m, + const struct sadb_msghdr *mhp) { const struct sadb_ident *idsrc, *iddst; int idsrclen, iddstlen; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + /* sanity check */ - if (sah == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (sah == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_setident: NULL pointer is passed.\n"); - + } + /* don't make buffer if not there */ if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL && mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { @@ -7435,37 +7695,37 @@ key_setident( sah->identd = NULL; return 0; } - + if (mhp->ext[SADB_EXT_IDENTITY_SRC] == NULL || mhp->ext[SADB_EXT_IDENTITY_DST] == NULL) { ipseclog((LOG_DEBUG, "key_setident: invalid identity.\n")); return EINVAL; } - + idsrc = (const struct sadb_ident *) - (void *)mhp->ext[SADB_EXT_IDENTITY_SRC]; + (void *)mhp->ext[SADB_EXT_IDENTITY_SRC]; iddst = (const struct sadb_ident *) - (void *)mhp->ext[SADB_EXT_IDENTITY_DST]; + (void *)mhp->ext[SADB_EXT_IDENTITY_DST]; idsrclen = mhp->extlen[SADB_EXT_IDENTITY_SRC]; iddstlen = mhp->extlen[SADB_EXT_IDENTITY_DST]; - + /* validity check */ if (idsrc->sadb_ident_type != iddst->sadb_ident_type) { ipseclog((LOG_DEBUG, "key_setident: ident type mismatch.\n")); return EINVAL; } - + switch (idsrc->sadb_ident_type) { - case SADB_IDENTTYPE_PREFIX: - case SADB_IDENTTYPE_FQDN: - case SADB_IDENTTYPE_USERFQDN: - default: - /* XXX do nothing */ - sah->idents = NULL; - sah->identd = NULL; - return 0; + case SADB_IDENTTYPE_PREFIX: + case SADB_IDENTTYPE_FQDN: + case SADB_IDENTTYPE_USERFQDN: + default: + /* XXX do nothing */ + sah->idents = NULL; + sah->identd = NULL; + return 0; } - + /* make structure */ KMALLOC_NOWAIT(sah->idents, struct sadb_ident *, idsrclen); if (sah->idents == NULL) { @@ -7491,7 +7751,7 @@ key_setident( } bcopy(idsrc, sah->idents, idsrclen); bcopy(iddst, sah->identd, iddstlen); - + return 0; } @@ -7501,39 +7761,42 @@ key_setident( */ static struct mbuf * key_getmsgbuf_x1( - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct mbuf *n; int mbufItems[] = {SADB_EXT_RESERVED, SADB_EXT_SA, - SADB_X_EXT_SA2, SADB_EXT_ADDRESS_SRC, - SADB_EXT_ADDRESS_DST, SADB_EXT_LIFETIME_HARD, - SADB_EXT_LIFETIME_SOFT, SADB_EXT_IDENTITY_SRC, - SADB_EXT_IDENTITY_DST}; - + SADB_X_EXT_SA2, SADB_EXT_ADDRESS_SRC, + SADB_EXT_ADDRESS_DST, SADB_EXT_LIFETIME_HARD, + SADB_EXT_LIFETIME_SOFT, SADB_EXT_IDENTITY_SRC, + SADB_EXT_IDENTITY_DST}; + /* sanity check */ - if (m == NULL || mhp == NULL || mhp->msg == NULL) + if (m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_getmsgbuf_x1: NULL pointer is passed.\n"); - + } + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return NULL; - + } + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return NULL; + } } mtod(n, struct sadb_msg *)->sadb_msg_errno = 0; mtod(n, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(n->m_pkthdr.len); - + PFKEY_UNIT64(n->m_pkthdr.len); + return n; } static int key_delete_all(struct socket *, struct mbuf *, - const struct sadb_msghdr *, u_int16_t); + const struct sadb_msghdr *, u_int16_t); /* * SADB_DELETE processing @@ -7548,9 +7811,9 @@ static int key_delete_all(struct socket *, struct mbuf *, */ static int key_delete( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; @@ -7559,33 +7822,34 @@ key_delete( struct secashead *sah; struct secasvar *sav = NULL; u_int16_t proto; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_delete: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_delete: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->extlen[SADB_EXT_ADDRESS_SRC] < sizeof(struct sadb_address) || mhp->extlen[SADB_EXT_ADDRESS_DST] < sizeof(struct sadb_address)) { ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + lck_mtx_lock(sadb_mutex); - + if (mhp->ext[SADB_EXT_SA] == NULL) { /* * Caller wants us to delete all non-LARVAL SAs @@ -7600,62 +7864,67 @@ key_delete( ipseclog((LOG_DEBUG, "key_delete: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + sa0 = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + /* get a SA header */ LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) + } + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) { continue; - + } + /* get a SA with SPI. */ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); - if (sav) + if (sav) { break; + } } if (sah == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_delete: no SA found.\n")); return key_senderror(so, m, ENOENT); } - + key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); - + lck_mtx_unlock(sadb_mutex); sav = NULL; - - { + + { struct mbuf *n; struct sadb_msg *newmsg; int mbufItems[] = {SADB_EXT_RESERVED, SADB_EXT_SA, - SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST}; - + SADB_EXT_ADDRESS_SRC, SADB_EXT_ADDRESS_DST}; + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return key_senderror(so, m, ENOBUFS); + } } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } /* @@ -7663,10 +7932,10 @@ key_delete( */ static int key_delete_all( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp, - u_int16_t proto) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp, + u_int16_t proto) { struct sadb_address *src0, *dst0; ifnet_t ipsec_if = NULL; @@ -7674,71 +7943,76 @@ key_delete_all( struct secashead *sah; struct secasvar *sav, *nextsav; u_int stateidx, state; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + src0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_SRC]); dst0 = (struct sadb_address *)(mhp->ext[SADB_EXT_ADDRESS_DST]); ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) + } + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) { continue; - + } + /* Delete all non-LARVAL SAs. */ for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_alive); - stateidx++) { + stateidx < _ARRAYLEN(saorder_state_alive); + stateidx++) { state = saorder_state_alive[stateidx]; - if (state == SADB_SASTATE_LARVAL) + if (state == SADB_SASTATE_LARVAL) { continue; + } for (sav = LIST_FIRST(&sah->savtree[state]); - sav != NULL; sav = nextsav) { + sav != NULL; sav = nextsav) { nextsav = LIST_NEXT(sav, chain); /* sanity check */ if (sav->state != state) { ipseclog((LOG_DEBUG, "key_delete_all: " - "invalid sav->state " - "(queue: %d SA: %d)\n", - state, sav->state)); + "invalid sav->state " + "(queue: %d SA: %d)\n", + state, sav->state)); continue; } - + key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); } } } lck_mtx_unlock(sadb_mutex); - - { + + { struct mbuf *n; struct sadb_msg *newmsg; int mbufItems[] = {SADB_EXT_RESERVED, SADB_EXT_ADDRESS_SRC, - SADB_EXT_ADDRESS_DST}; - + SADB_EXT_ADDRESS_DST}; + /* create new sadb_msg to reply. */ - n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems)/sizeof(int), mbufItems); - if (!n) + n = key_gather_mbuf(m, mhp, 1, sizeof(mbufItems) / sizeof(int), mbufItems); + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + if (n->m_len < sizeof(struct sadb_msg)) { n = m_pullup(n, sizeof(struct sadb_msg)); - if (n == NULL) + if (n == NULL) { return key_senderror(so, m, ENOBUFS); + } } newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(n->m_pkthdr.len); - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ALL); - } + } } /* @@ -7755,9 +8029,9 @@ key_delete_all( */ static int key_get( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_sa *sa0; struct sadb_address *src0, *dst0; @@ -7766,19 +8040,20 @@ key_get( struct secashead *sah; struct secasvar *sav = NULL; u_int16_t proto; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_get: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_get: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->ext[SADB_EXT_SA] == NULL || mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL) { @@ -7791,39 +8066,42 @@ key_get( ipseclog((LOG_DEBUG, "key_get: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + sa0 = (struct sadb_sa *)(void *)mhp->ext[SADB_EXT_SA]; src0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* XXX boundary check against sa_len */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + lck_mtx_lock(sadb_mutex); - + /* get a SA header */ LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) + } + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_HEAD) == 0) { continue; - + } + /* get a SA with SPI. */ sav = key_getsavbyspi(sah, sa0->sadb_sa_spi); - if (sav) + if (sav) { break; + } } if (sah == NULL) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_get: no SA found.\n")); return key_senderror(so, m, ENOENT); } - - { + + { struct mbuf *n; u_int8_t satype; - + /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { lck_mtx_unlock(sadb_mutex); @@ -7831,19 +8109,20 @@ key_get( return key_senderror(so, m, EINVAL); } lck_mtx_unlock(sadb_mutex); - + /* create new sadb_msg to reply. */ n = key_setdumpsa(sav, SADB_GET, satype, mhp->msg->sadb_msg_seq, - mhp->msg->sadb_msg_pid); - - - - if (!n) + mhp->msg->sadb_msg_pid); + + + + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_ONE); - } + } } /* @@ -7852,30 +8131,31 @@ key_get( * 0 : found, arg pointer to a SA stats is updated. */ static int -key_getsastatbyspi_one (u_int32_t spi, - struct sastat *stat) +key_getsastatbyspi_one(u_int32_t spi, + struct sastat *stat) { struct secashead *sah; struct secasvar *sav = NULL; - + if ((void *)stat == NULL) { return -1; } - + lck_mtx_lock(sadb_mutex); - + /* get a SA header */ LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - + } + /* get a SA with SPI. */ sav = key_getsavbyspi(sah, spi); if (sav) { stat->spi = sav->spi; stat->created = sav->created; if (sav->lft_c) { - bcopy(sav->lft_c,&stat->lft_c, sizeof(stat->lft_c)); + bcopy(sav->lft_c, &stat->lft_c, sizeof(stat->lft_c)); } else { bzero(&stat->lft_c, sizeof(stat->lft_c)); } @@ -7883,9 +8163,9 @@ key_getsastatbyspi_one (u_int32_t spi, return 0; } } - + lck_mtx_unlock(sadb_mutex); - + return -1; } @@ -7895,14 +8175,14 @@ key_getsastatbyspi_one (u_int32_t spi, * 0 : found, arg pointers to a SA stats and 'maximum stats' are updated. */ static int -key_getsastatbyspi (struct sastat *stat_arg, - u_int32_t max_stat_arg, - struct sastat *stat_res, - u_int32_t stat_res_size, - u_int32_t *max_stat_res) +key_getsastatbyspi(struct sastat *stat_arg, + u_int32_t max_stat_arg, + struct sastat *stat_res, + u_int32_t stat_res_size, + u_int32_t *max_stat_res) { int cur, found = 0; - + if (stat_arg == NULL || stat_res == NULL || max_stat_res == NULL) { @@ -7911,15 +8191,15 @@ key_getsastatbyspi (struct sastat *stat_arg, u_int32_t max_stats = stat_res_size / (sizeof(struct sastat)); max_stats = ((max_stat_arg <= max_stats) ? max_stat_arg : max_stats); - + for (cur = 0; cur < max_stats; cur++) { if (key_getsastatbyspi_one(stat_arg[cur].spi, - &stat_res[found]) == 0) { + &stat_res[found]) == 0) { found++; } } *max_stat_res = found; - + if (found) { return 0; } @@ -7929,16 +8209,15 @@ key_getsastatbyspi (struct sastat *stat_arg, /* XXX make it sysctl-configurable? */ static void key_getcomb_setlifetime( - struct sadb_comb *comb) + struct sadb_comb *comb) { - comb->sadb_comb_soft_allocations = 1; comb->sadb_comb_hard_allocations = 1; comb->sadb_comb_soft_bytes = 0; comb->sadb_comb_hard_bytes = 0; - comb->sadb_comb_hard_addtime = 86400; /* 1 day */ + comb->sadb_comb_hard_addtime = 86400; /* 1 day */ comb->sadb_comb_soft_addtime = comb->sadb_comb_soft_addtime * 80 / 100; - comb->sadb_comb_soft_usetime = 28800; /* 8 hours */ + comb->sadb_comb_soft_usetime = 28800; /* 8 hours */ comb->sadb_comb_hard_usetime = comb->sadb_comb_hard_usetime * 80 / 100; } @@ -7957,26 +8236,30 @@ key_getcomb_esp(void) int i, off, o; int totlen; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); - + m = NULL; for (i = 1; i <= SADB_EALG_MAX; i++) { algo = esp_algorithm_lookup(i); - if (!algo) + if (!algo) { continue; - - if (algo->keymax < ipsec_esp_keymin) + } + + if (algo->keymax < ipsec_esp_keymin) { continue; - if (algo->keymin < ipsec_esp_keymin) + } + if (algo->keymin < ipsec_esp_keymin) { encmin = ipsec_esp_keymin; - else + } else { encmin = algo->keymin; - - if (ipsec_esp_auth) + } + + if (ipsec_esp_auth) { m = key_getcomb_ah(); - else { + } else { #if DIAGNOSTIC - if (l > MLEN) + if (l > MLEN) { panic("assumption failed in key_getcomb_esp"); + } #endif MGET(m, M_WAITOK, MT_DATA); if (m) { @@ -7986,17 +8269,20 @@ key_getcomb_esp(void) bzero(mtod(m, caddr_t), m->m_len); } } - if (!m) + if (!m) { goto fail; - + } + totlen = 0; - for (n = m; n; n = n->m_next) + for (n = m; n; n = n->m_next) { totlen += n->m_len; + } #if DIAGNOSTIC - if (totlen % l) + if (totlen % l) { panic("assumption failed in key_getcomb_esp"); + } #endif - + for (off = 0; off < totlen; off += l) { n = m_pulldown(m, off, l, &o); if (!n) { @@ -8004,25 +8290,27 @@ key_getcomb_esp(void) goto fail; } comb = (struct sadb_comb *) - (void *)(mtod(n, caddr_t) + o); + (void *)(mtod(n, caddr_t) + o); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_encrypt = i; comb->sadb_comb_encrypt_minbits = encmin; comb->sadb_comb_encrypt_maxbits = algo->keymax; } - - if (!result) + + if (!result) { result = m; - else + } else { m_cat(result, m); + } } - + return result; - + fail: - if (result) + if (result) { m_freem(result); + } return NULL; } #endif @@ -8039,29 +8327,34 @@ key_getcomb_ah(void) int keymin; int i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); - + m = NULL; for (i = 1; i <= SADB_AALG_MAX; i++) { #if 1 /* we prefer HMAC algorithms, not old algorithms */ - if (i != SADB_AALG_SHA1HMAC && i != SADB_AALG_MD5HMAC) + if (i != SADB_AALG_SHA1HMAC && i != SADB_AALG_MD5HMAC) { continue; + } #endif algo = ah_algorithm_lookup(i); - if (!algo) + if (!algo) { continue; - - if (algo->keymax < ipsec_ah_keymin) + } + + if (algo->keymax < ipsec_ah_keymin) { continue; - if (algo->keymin < ipsec_ah_keymin) + } + if (algo->keymin < ipsec_ah_keymin) { keymin = ipsec_ah_keymin; - else + } else { keymin = algo->keymin; - + } + if (!m) { #if DIAGNOSTIC - if (l > MLEN) + if (l > MLEN) { panic("assumption failed in key_getcomb_ah"); + } #endif MGET(m, M_WAITOK, MT_DATA); if (m) { @@ -8069,11 +8362,13 @@ key_getcomb_ah(void) m->m_len = l; m->m_next = NULL; } - } else + } else { M_PREPEND(m, l, M_WAITOK, 1); - if (!m) + } + if (!m) { return NULL; - + } + comb = mtod(m, struct sadb_comb *); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); @@ -8081,7 +8376,7 @@ key_getcomb_ah(void) comb->sadb_comb_auth_minbits = keymin; comb->sadb_comb_auth_maxbits = algo->keymax; } - + return m; } @@ -8097,17 +8392,19 @@ key_getcomb_ipcomp(void) struct mbuf *m; int i; const int l = PFKEY_ALIGN8(sizeof(struct sadb_comb)); - + m = NULL; for (i = 1; i <= SADB_X_CALG_MAX; i++) { algo = ipcomp_algorithm_lookup(i); - if (!algo) + if (!algo) { continue; - + } + if (!m) { #if DIAGNOSTIC - if (l > MLEN) + if (l > MLEN) { panic("assumption failed in key_getcomb_ipcomp"); + } #endif MGET(m, M_WAITOK, MT_DATA); if (m) { @@ -8115,18 +8412,20 @@ key_getcomb_ipcomp(void) m->m_len = l; m->m_next = NULL; } - } else + } else { M_PREPEND(m, l, M_WAITOK, 1); - if (!m) + } + if (!m) { return NULL; - + } + comb = mtod(m, struct sadb_comb *); bzero(comb, sizeof(*comb)); key_getcomb_setlifetime(comb); comb->sadb_comb_encrypt = i; /* what should we set into sadb_comb_*_{min,max}bits? */ } - + return m; } @@ -8137,45 +8436,48 @@ key_getcomb_ipcomp(void) */ static struct mbuf * key_getprop( - const struct secasindex *saidx) + const struct secasindex *saidx) { struct sadb_prop *prop; struct mbuf *m, *n; const int l = PFKEY_ALIGN8(sizeof(struct sadb_prop)); int totlen; - - switch (saidx->proto) { + + switch (saidx->proto) { #if IPSEC_ESP - case IPPROTO_ESP: - m = key_getcomb_esp(); - break; + case IPPROTO_ESP: + m = key_getcomb_esp(); + break; #endif - case IPPROTO_AH: - m = key_getcomb_ah(); - break; - case IPPROTO_IPCOMP: - m = key_getcomb_ipcomp(); - break; - default: - return NULL; + case IPPROTO_AH: + m = key_getcomb_ah(); + break; + case IPPROTO_IPCOMP: + m = key_getcomb_ipcomp(); + break; + default: + return NULL; } - - if (!m) + + if (!m) { return NULL; + } M_PREPEND(m, l, M_WAITOK, 1); - if (!m) + if (!m) { return NULL; - + } + totlen = 0; - for (n = m; n; n = n->m_next) + for (n = m; n; n = n->m_next) { totlen += n->m_len; - + } + prop = mtod(m, struct sadb_prop *); bzero(prop, sizeof(*prop)); prop->sadb_prop_len = PFKEY_UNIT64(totlen); prop->sadb_prop_exttype = SADB_EXT_PROPOSAL; - prop->sadb_prop_replay = 32; /* XXX */ - + prop->sadb_prop_replay = 32; /* XXX */ + return m; } @@ -8201,8 +8503,8 @@ key_getprop( */ static int key_acquire( - struct secasindex *saidx, - struct secpolicy *sp) + struct secasindex *saidx, + struct secpolicy *sp) { struct mbuf *result = NULL, *m; #ifndef IPSEC_NONBLOCK_ACQUIRE @@ -8211,15 +8513,17 @@ key_acquire( u_int8_t satype; int error = -1; u_int32_t seq; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (saidx == NULL) + if (saidx == NULL) { panic("key_acquire: NULL pointer is passed.\n"); - if ((satype = key_proto2satype(saidx->proto)) == 0) + } + if ((satype = key_proto2satype(saidx->proto)) == 0) { panic("key_acquire: invalid proto is passed.\n"); - + } + #ifndef IPSEC_NONBLOCK_ACQUIRE /* * We never do anything about acquirng SA. There is anather @@ -8245,14 +8549,14 @@ key_acquire( lck_mtx_unlock(sadb_mutex); return ENOBUFS; } - + /* add to acqtree */ LIST_INSERT_HEAD(&acqtree, newacq, chain); key_start_timehandler(); } seq = newacq->seq; lck_mtx_unlock(sadb_mutex); - + #else seq = (acq_seq = (acq_seq == ~0 ? 1 : ++acq_seq)); #endif @@ -8262,26 +8566,26 @@ key_acquire( goto fail; } result = m; - + /* set sadb_address for saidx's. */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&saidx->src, FULLMASK, IPSEC_ULPROTO_ANY); + (struct sockaddr *)&saidx->src, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); - + m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&saidx->dst, FULLMASK, IPSEC_ULPROTO_ANY); + (struct sockaddr *)&saidx->dst, FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); - + /* XXX proxy address (optional) */ - + /* set sadb_x_policy */ if (sp) { m = key_setsadbxpolicy(sp->policy, sp->spidx.dir, sp->id); @@ -8291,15 +8595,15 @@ key_acquire( } m_cat(result, m); } - + /* XXX identity (optional) */ #if 0 if (idexttype && fqdn) { /* create identity extension (FQDN) */ struct sadb_ident *id; int fqdnlen; - - fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */ + + fqdnlen = strlen(fqdn) + 1; /* +1 for terminating-NUL */ id = (struct sadb_ident *)p; bzero(id, sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(fqdnlen)); @@ -8308,33 +8612,36 @@ key_acquire( bcopy(fqdn, id + 1, fqdnlen); p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(fqdnlen); } - + if (idexttype) { /* create identity extension (USERFQDN) */ struct sadb_ident *id; int userfqdnlen; - + if (userfqdn) { /* +1 for terminating-NUL */ userfqdnlen = strlen(userfqdn) + 1; - } else + } else { userfqdnlen = 0; + } id = (struct sadb_ident *)p; bzero(id, sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); id->sadb_ident_len = PFKEY_UNIT64(sizeof(*id) + PFKEY_ALIGN8(userfqdnlen)); id->sadb_ident_exttype = idexttype; id->sadb_ident_type = SADB_IDENTTYPE_USERFQDN; /* XXX is it correct? */ - if (curproc && curproc->p_cred) + if (curproc && curproc->p_cred) { id->sadb_ident_id = curproc->p_cred->p_ruid; - if (userfqdn && userfqdnlen) + } + if (userfqdn && userfqdnlen) { bcopy(userfqdn, id + 1, userfqdnlen); + } p += sizeof(struct sadb_ident) + PFKEY_ALIGN8(userfqdnlen); } #endif - + /* XXX sensitivity (optional) */ - + /* create proposal/combination extension */ m = key_getprop(saidx); #if 0 @@ -8352,15 +8659,16 @@ key_acquire( /* * outside of spec; make proposal/combination extension optional. */ - if (m) + if (m) { m_cat(result, m); + } #endif - + if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } - + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { @@ -8368,30 +8676,32 @@ key_acquire( goto fail; } } - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); - + fail: - if (result) + if (result) { m_freem(result); + } return error; } #ifndef IPSEC_NONBLOCK_ACQUIRE static struct secacq * key_newacq( - struct secasindex *saidx) + struct secasindex *saidx) { struct secacq *newacq; struct timeval tv; - + /* get new entry */ KMALLOC_NOWAIT(newacq, struct secacq *, sizeof(struct secacq)); if (newacq == NULL) { @@ -8404,57 +8714,59 @@ key_newacq( } } bzero(newacq, sizeof(*newacq)); - + /* copy secindex */ bcopy(saidx, &newacq->saidx, sizeof(newacq->saidx)); newacq->seq = (acq_seq == ~0 ? 1 : ++acq_seq); microtime(&tv); newacq->created = tv.tv_sec; newacq->count = 0; - + return newacq; } static struct secacq * key_getacq( - struct secasindex *saidx) + struct secasindex *saidx) { struct secacq *acq; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + LIST_FOREACH(acq, &acqtree, chain) { - if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY)) + if (key_cmpsaidx(saidx, &acq->saidx, CMP_EXACTLY)) { return acq; + } } - + return NULL; } static struct secacq * key_getacqbyseq( - u_int32_t seq) + u_int32_t seq) { struct secacq *acq; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + LIST_FOREACH(acq, &acqtree, chain) { - if (acq->seq == seq) + if (acq->seq == seq) { return acq; + } } - + return NULL; } #endif static struct secspacq * key_newspacq( - struct secpolicyindex *spidx) + struct secpolicyindex *spidx) { struct secspacq *acq; struct timeval tv; - + /* get new entry */ KMALLOC_NOWAIT(acq, struct secspacq *, sizeof(struct secspacq)); if (acq == NULL) { @@ -8467,29 +8779,30 @@ key_newspacq( } } bzero(acq, sizeof(*acq)); - + /* copy secindex */ bcopy(spidx, &acq->spidx, sizeof(acq->spidx)); microtime(&tv); acq->created = tv.tv_sec; acq->count = 0; - + return acq; } static struct secspacq * key_getspacq( - struct secpolicyindex *spidx) + struct secpolicyindex *spidx) { struct secspacq *acq; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - + LIST_FOREACH(acq, &spacqtree, chain) { - if (key_cmpspidx_exactly(spidx, &acq->spidx)) + if (key_cmpspidx_exactly(spidx, &acq->spidx)) { return acq; + } } - + return NULL; } @@ -8509,9 +8822,9 @@ key_getspacq( */ static int key_acquire2( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { const struct sadb_address *src0, *dst0; ifnet_t ipsec_if = NULL; @@ -8519,12 +8832,13 @@ key_acquire2( struct secashead *sah; u_int16_t proto; int error; - - + + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_acquire2: NULL pointer is passed.\n"); - + } + /* * Error message from KMd. * We assume that if error was occurred in IKEd, the length of PFKEY @@ -8532,12 +8846,12 @@ key_acquire2( * We do not raise error even if error occurred in this function. */ lck_mtx_lock(sadb_mutex); - + if (mhp->msg->sadb_msg_len == PFKEY_UNIT64(sizeof(struct sadb_msg))) { #ifndef IPSEC_NONBLOCK_ACQUIRE struct secacq *acq; struct timeval tv; - + /* check sequence number */ if (mhp->msg->sadb_msg_seq == 0) { lck_mtx_unlock(sadb_mutex); @@ -8545,7 +8859,7 @@ key_acquire2( m_freem(m); return 0; } - + if ((acq = key_getacqbyseq(mhp->msg->sadb_msg_seq)) == NULL) { /* * the specified larval SA is already gone, or we got @@ -8555,7 +8869,7 @@ key_acquire2( m_freem(m); return 0; } - + /* reset acq counter in order to deletion by timehander. */ microtime(&tv); acq->created = tv.tv_sec; @@ -8565,18 +8879,18 @@ key_acquire2( m_freem(m); return 0; } - + /* * This message is from user land. */ - + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { lck_mtx_unlock(sadb_mutex); ipseclog((LOG_DEBUG, "key_acquire2: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + if (mhp->ext[SADB_EXT_ADDRESS_SRC] == NULL || mhp->ext[SADB_EXT_ADDRESS_DST] == NULL || mhp->ext[SADB_EXT_PROPOSAL] == NULL) { @@ -8593,21 +8907,23 @@ key_acquire2( ipseclog((LOG_DEBUG, "key_acquire2: invalid message is passed.\n")); return key_senderror(so, m, EINVAL); } - + src0 = (const struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_SRC]; dst0 = (const struct sadb_address *)mhp->ext[SADB_EXT_ADDRESS_DST]; ipsec_if = key_get_ipsec_if_from_message(mhp, SADB_X_EXT_IPSECIF); - + /* XXX boundary check against sa_len */ /* cast warnings */ KEY_SETSECASIDX(proto, IPSEC_MODE_ANY, 0, src0 + 1, dst0 + 1, ipsec_if ? ipsec_if->if_index : 0, &saidx); - + /* get a SA index */ LIST_FOREACH(sah, &sahtree, chain) { - if (sah->state == SADB_SASTATE_DEAD) + if (sah->state == SADB_SASTATE_DEAD) { continue; - if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE | CMP_REQID)) + } + if (key_cmpsaidx(&sah->saidx, &saidx, CMP_MODE | CMP_REQID)) { break; + } } if (sah != NULL) { lck_mtx_unlock(sadb_mutex); @@ -8618,10 +8934,10 @@ key_acquire2( error = key_acquire(&saidx, NULL); if (error != 0) { ipseclog((LOG_DEBUG, "key_acquire2: error %d returned " - "from key_acquire.\n", mhp->msg->sadb_msg_errno)); + "from key_acquire.\n", mhp->msg->sadb_msg_errno)); return key_senderror(so, m, error); } - + return key_sendup_mbuf(so, m, KEY_SENDUP_REGISTERED); } @@ -8640,24 +8956,27 @@ key_acquire2( */ static int key_register( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct secreg *reg, *newreg = 0; - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_register: NULL pointer is passed.\n"); - + } + /* check for invalid register message */ - if (mhp->msg->sadb_msg_satype >= sizeof(regtree)/sizeof(regtree[0])) + if (mhp->msg->sadb_msg_satype >= sizeof(regtree) / sizeof(regtree[0])) { return key_senderror(so, m, EINVAL); - + } + /* When SATYPE_UNSPEC is specified, only return sadb_supported. */ - if (mhp->msg->sadb_msg_satype == SADB_SATYPE_UNSPEC) + if (mhp->msg->sadb_msg_satype == SADB_SATYPE_UNSPEC) { goto setmsg; - + } + /* create regnode */ KMALLOC_WAIT(newreg, struct secreg *, sizeof(*newreg)); if (newreg == NULL) { @@ -8665,7 +8984,7 @@ key_register( return key_senderror(so, m, ENOBUFS); } bzero((caddr_t)newreg, sizeof(*newreg)); - + lck_mtx_lock(sadb_mutex); /* check whether existing or not */ LIST_FOREACH(reg, ®tree[mhp->msg->sadb_msg_satype], chain) { @@ -8676,17 +8995,17 @@ key_register( return key_senderror(so, m, EEXIST); } } - + socket_lock(so, 1); newreg->so = so; ((struct keycb *)sotorawcb(so))->kp_registered++; socket_unlock(so, 1); - + /* add regnode to regtree. */ LIST_INSERT_HEAD(®tree[mhp->msg->sadb_msg_satype], newreg, chain); lck_mtx_unlock(sadb_mutex); setmsg: - { + { struct mbuf *n; struct sadb_msg *newmsg; struct sadb_supported *sup; @@ -8694,30 +9013,35 @@ setmsg: int off; int i; struct sadb_alg *alg; - + /* create new sadb_msg to reply. */ alen = 0; for (i = 1; i <= SADB_AALG_MAX; i++) { - if (ah_algorithm_lookup(i)) + if (ah_algorithm_lookup(i)) { alen += sizeof(struct sadb_alg); + } } - if (alen) + if (alen) { alen += sizeof(struct sadb_supported); + } elen = 0; #if IPSEC_ESP for (i = 1; i <= SADB_EALG_MAX; i++) { - if (esp_algorithm_lookup(i)) + if (esp_algorithm_lookup(i)) { elen += sizeof(struct sadb_alg); + } } - if (elen) + if (elen) { elen += sizeof(struct sadb_supported); + } #endif - + len = sizeof(struct sadb_msg) + alen + elen; - - if (len > MCLBYTES) + + if (len > MCLBYTES) { return key_senderror(so, m, ENOBUFS); - + } + MGETHDR(n, M_WAITOK, MT_DATA); if (n && len > MHLEN) { MCLGET(n, M_WAITOK); @@ -8726,34 +9050,36 @@ setmsg: n = NULL; } } - if (!n) + if (!n) { return key_senderror(so, m, ENOBUFS); - + } + n->m_pkthdr.len = n->m_len = len; n->m_next = NULL; off = 0; - + m_copydata(m, 0, sizeof(struct sadb_msg), mtod(n, caddr_t) + off); newmsg = mtod(n, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(len); off += PFKEY_ALIGN8(sizeof(struct sadb_msg)); - + /* for authentication algorithm */ if (alen) { sup = (struct sadb_supported *)(void *)(mtod(n, caddr_t) + off); sup->sadb_supported_len = PFKEY_UNIT64(alen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_AUTH; off += PFKEY_ALIGN8(sizeof(*sup)); - + for (i = 1; i <= SADB_AALG_MAX; i++) { const struct ah_algorithm *aalgo; - + aalgo = ah_algorithm_lookup(i); - if (!aalgo) + if (!aalgo) { continue; + } alg = (struct sadb_alg *) - (void *)(mtod(n, caddr_t) + off); + (void *)(mtod(n, caddr_t) + off); alg->sadb_alg_id = i; alg->sadb_alg_ivlen = 0; alg->sadb_alg_minbits = aalgo->keymin; @@ -8761,7 +9087,7 @@ setmsg: off += PFKEY_ALIGN8(sizeof(*alg)); } } - + #if IPSEC_ESP /* for encryption algorithm */ if (elen) { @@ -8769,15 +9095,16 @@ setmsg: sup->sadb_supported_len = PFKEY_UNIT64(elen); sup->sadb_supported_exttype = SADB_EXT_SUPPORTED_ENCRYPT; off += PFKEY_ALIGN8(sizeof(*sup)); - + for (i = 1; i <= SADB_EALG_MAX; i++) { const struct esp_algorithm *ealgo; - + ealgo = esp_algorithm_lookup(i); - if (!ealgo) + if (!ealgo) { continue; + } alg = (struct sadb_alg *) - (void *)(mtod(n, caddr_t) + off); + (void *)(mtod(n, caddr_t) + off); alg->sadb_alg_id = i; if (ealgo && ealgo->ivlen) { /* @@ -8785,28 +9112,30 @@ setmsg: * algorithm XXX SADB_X_EXT_DERIV ? */ alg->sadb_alg_ivlen = - (*ealgo->ivlen)(ealgo, NULL); - } else + (*ealgo->ivlen)(ealgo, NULL); + } else { alg->sadb_alg_ivlen = 0; + } alg->sadb_alg_minbits = ealgo->keymin; alg->sadb_alg_maxbits = ealgo->keymax; off += PFKEY_ALIGN8(sizeof(struct sadb_alg)); } } #endif - + #if DIGAGNOSTIC - if (off != len) + if (off != len) { panic("length assumption failed in key_register"); + } #endif - + m_freem(m); return key_sendup_mbuf(so, n, KEY_SENDUP_REGISTERED); - } + } } static void -key_delete_all_for_socket (struct socket *so) +key_delete_all_for_socket(struct socket *so) { struct secashead *sah, *nextsah; struct secasvar *sav, *nextsav; @@ -8814,15 +9143,15 @@ key_delete_all_for_socket (struct socket *so) u_int state; for (sah = LIST_FIRST(&sahtree); - sah != NULL; - sah = nextsah) { + sah != NULL; + sah = nextsah) { nextsah = LIST_NEXT(sah, chain); for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_alive); stateidx++) { state = saorder_state_any[stateidx]; for (sav = LIST_FIRST(&sah->savtree[state]); sav != NULL; sav = nextsav) { nextsav = LIST_NEXT(sav, chain); if (sav->flags2 & SADB_X_EXT_SA2_DELETE_ON_DETACH && - sav->so == so) { + sav->so == so) { key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); } @@ -8837,15 +9166,16 @@ key_delete_all_for_socket (struct socket *so) */ void key_freereg( - struct socket *so) + struct socket *so) { struct secreg *reg; int i; - + /* sanity check */ - if (so == NULL) + if (so == NULL) { panic("key_freereg: NULL pointer is passed.\n"); - + } + /* * check whether existing or not. * check all type of SA, because there is a potential that @@ -8856,7 +9186,7 @@ key_freereg( for (i = 0; i <= SADB_SATYPE_MAX; i++) { LIST_FOREACH(reg, ®tree[i], chain) { if (reg->so == so - && __LIST_CHAINED(reg)) { + && __LIST_CHAINED(reg)) { LIST_REMOVE(reg, chain); KFREE(reg); break; @@ -8879,24 +9209,27 @@ key_freereg( */ static int key_expire( - struct secasvar *sav) + struct secasvar *sav) { int satype; struct mbuf *result = NULL, *m; int len; int error = -1; struct sadb_lifetime *lt; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (sav == NULL) + if (sav == NULL) { panic("key_expire: NULL pointer is passed.\n"); - if (sav->sah == NULL) + } + if (sav->sah == NULL) { panic("key_expire: Why was SA index in SA NULL.\n"); - if ((satype = key_proto2satype(sav->sah->saidx.proto)) == 0) + } + if ((satype = key_proto2satype(sav->sah->saidx.proto)) == 0) { panic("key_expire: invalid proto is passed.\n"); - + } + /* set msg header */ m = key_setsadbmsg(SADB_EXPIRE, 0, satype, sav->seq, 0, sav->refcnt); if (!m) { @@ -8904,7 +9237,7 @@ key_expire( goto fail; } result = m; - + /* create SA extension */ m = key_setsadbsa(sav); if (!m) { @@ -8912,24 +9245,25 @@ key_expire( goto fail; } m_cat(result, m); - + /* create SA extension */ m = key_setsadbxsa2(sav->sah->saidx.mode, - sav->replay ? sav->replay->count : 0, - sav->sah->saidx.reqid, - sav->flags2); + sav->replay ? sav->replay->count : 0, + sav->sah->saidx.reqid, + sav->flags2); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); - + /* create lifetime extension (current and soft) */ len = PFKEY_ALIGN8(sizeof(*lt)) * 2; m = key_alloc_mbuf(len); - if (!m || m->m_next) { /*XXX*/ - if (m) + if (!m || m->m_next) { /*XXX*/ + if (m) { m_freem(m); + } error = ENOBUFS; goto fail; } @@ -8944,32 +9278,32 @@ key_expire( lt = (struct sadb_lifetime *)(void *)(mtod(m, caddr_t) + len / 2); bcopy(sav->lft_s, lt, sizeof(*lt)); m_cat(result, m); - + /* set sadb_address for source */ m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&sav->sah->saidx.src, - FULLMASK, IPSEC_ULPROTO_ANY); + (struct sockaddr *)&sav->sah->saidx.src, + FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); - + /* set sadb_address for destination */ m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&sav->sah->saidx.dst, - FULLMASK, IPSEC_ULPROTO_ANY); + (struct sockaddr *)&sav->sah->saidx.dst, + FULLMASK, IPSEC_ULPROTO_ANY); if (!m) { error = ENOBUFS; goto fail; } m_cat(result, m); - + if ((result->m_flags & M_PKTHDR) == 0) { error = EINVAL; goto fail; } - + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { @@ -8977,19 +9311,21 @@ key_expire( goto fail; } } - + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); - + fail: - if (result) + if (result) { m_freem(result); + } return error; } @@ -9007,9 +9343,9 @@ fail: */ static int key_flush( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_msg *newmsg; struct secashead *sah, *nextsah; @@ -9017,62 +9353,64 @@ key_flush( u_int16_t proto; u_int8_t state; u_int stateidx; - + /* sanity check */ - if (so == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_flush: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_flush: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + lck_mtx_lock(sadb_mutex); - + /* no SATYPE specified, i.e. flushing all SA. */ for (sah = LIST_FIRST(&sahtree); - sah != NULL; - sah = nextsah) { + sah != NULL; + sah = nextsah) { nextsah = LIST_NEXT(sah, chain); - + if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC - && proto != sah->saidx.proto) + && proto != sah->saidx.proto) { continue; - + } + for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_alive); - stateidx++) { + stateidx < _ARRAYLEN(saorder_state_alive); + stateidx++) { state = saorder_state_any[stateidx]; for (sav = LIST_FIRST(&sah->savtree[state]); - sav != NULL; - sav = nextsav) { - + sav != NULL; + sav = nextsav) { nextsav = LIST_NEXT(sav, chain); - + key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); } } - + sah->state = SADB_SASTATE_DEAD; } lck_mtx_unlock(sadb_mutex); - + if (m->m_len < sizeof(struct sadb_msg) || sizeof(struct sadb_msg) > m->m_len + M_TRAILINGSPACE(m)) { ipseclog((LOG_DEBUG, "key_flush: No more memory.\n")); return key_senderror(so, m, ENOBUFS); } - - if (m->m_next) + + if (m->m_next) { m_freem(m->m_next); + } m->m_next = NULL; m->m_pkthdr.len = m->m_len = sizeof(struct sadb_msg); newmsg = mtod(m, struct sadb_msg *); newmsg->sadb_msg_errno = 0; newmsg->sadb_msg_len = PFKEY_UNIT64(m->m_pkthdr.len); - + return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } @@ -9096,9 +9434,9 @@ struct sav_dump_elem { static int key_dump( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct secashead *sah; struct secasvar *sav; @@ -9110,39 +9448,41 @@ key_dump( int cnt = 0, cnt2, bufcount; struct mbuf *n; int error = 0; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_dump: NULL pointer is passed.\n"); - + } + /* map satype to proto */ if ((proto = key_satype2proto(mhp->msg->sadb_msg_satype)) == 0) { ipseclog((LOG_DEBUG, "key_dump: invalid satype is passed.\n")); return key_senderror(so, m, EINVAL); } - + if ((bufcount = ipsec_sav_count) <= 0) { error = ENOENT; goto end; } - bufcount += 512; /* extra */ + bufcount += 512; /* extra */ KMALLOC_WAIT(savbuf, struct sav_dump_elem*, bufcount * sizeof(struct sav_dump_elem)); if (savbuf == NULL) { ipseclog((LOG_DEBUG, "key_dump: No more memory.\n")); error = ENOMEM; goto end; } - + /* count sav entries to be sent to the userland. */ lck_mtx_lock(sadb_mutex); elem_ptr = savbuf; LIST_FOREACH(sah, &sahtree, chain) { if (mhp->msg->sadb_msg_satype != SADB_SATYPE_UNSPEC - && proto != sah->saidx.proto) + && proto != sah->saidx.proto) { continue; - + } + /* map proto to satype */ if ((satype = key_proto2satype(sah->saidx.proto)) == 0) { lck_mtx_unlock(sadb_mutex); @@ -9150,14 +9490,15 @@ key_dump( error = EINVAL; goto end; } - + for (stateidx = 0; - stateidx < _ARRAYLEN(saorder_state_any); - stateidx++) { + stateidx < _ARRAYLEN(saorder_state_any); + stateidx++) { state = saorder_state_any[stateidx]; LIST_FOREACH(sav, &sah->savtree[state], chain) { - if (cnt == bufcount) - break; /* out of buffer space */ + if (cnt == bufcount) { + break; /* out of buffer space */ + } elem_ptr->sav = sav; elem_ptr->satype = satype; sav->refcnt++; @@ -9167,43 +9508,45 @@ key_dump( } } lck_mtx_unlock(sadb_mutex); - + if (cnt == 0) { error = ENOENT; goto end; } - + /* send this to the userland, one at a time. */ elem_ptr = savbuf; cnt2 = cnt; while (cnt2) { n = key_setdumpsa(elem_ptr->sav, SADB_DUMP, elem_ptr->satype, - --cnt2, mhp->msg->sadb_msg_pid); - + --cnt2, mhp->msg->sadb_msg_pid); + if (!n) { error = ENOBUFS; goto end; } - + key_sendup_mbuf(so, n, KEY_SENDUP_ONE); elem_ptr++; } - + end: if (savbuf) { if (cnt) { elem_ptr = savbuf; lck_mtx_lock(sadb_mutex); - while (cnt--) + while (cnt--) { key_freesav((elem_ptr++)->sav, KEY_SADB_LOCKED); + } lck_mtx_unlock(sadb_mutex); } KFREE(savbuf); } - - if (error) + + if (error) { return key_senderror(so, m, error); - + } + m_freem(m); return 0; } @@ -9215,18 +9558,19 @@ end: */ static int key_promisc( - struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) + struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { int olen; - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("key_promisc: NULL pointer is passed.\n"); - + } + olen = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); - + if (olen < sizeof(struct sadb_msg)) { #if 1 return key_senderror(so, m, EINVAL); @@ -9237,60 +9581,61 @@ key_promisc( } else if (olen == sizeof(struct sadb_msg)) { /* enable/disable promisc mode */ struct keycb *kp; - + socket_lock(so, 1); - if ((kp = (struct keycb *)sotorawcb(so)) == NULL) + if ((kp = (struct keycb *)sotorawcb(so)) == NULL) { return key_senderror(so, m, EINVAL); + } mhp->msg->sadb_msg_errno = 0; switch (mhp->msg->sadb_msg_satype) { - case 0: - case 1: - kp->kp_promisc = mhp->msg->sadb_msg_satype; - break; - default: - socket_unlock(so, 1); - return key_senderror(so, m, EINVAL); + case 0: + case 1: + kp->kp_promisc = mhp->msg->sadb_msg_satype; + break; + default: + socket_unlock(so, 1); + return key_senderror(so, m, EINVAL); } socket_unlock(so, 1); - + /* send the original message back to everyone */ mhp->msg->sadb_msg_errno = 0; return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } else { /* send packet as is */ - + m_adj(m, PFKEY_ALIGN8(sizeof(struct sadb_msg))); - + /* TODO: if sadb_msg_seq is specified, send to specific pid */ return key_sendup_mbuf(so, m, KEY_SENDUP_ALL); } } -static int (*const key_typesw[])(struct socket *, struct mbuf *, - const struct sadb_msghdr *) = { - NULL, /* SADB_RESERVED */ - key_getspi, /* SADB_GETSPI */ - key_update, /* SADB_UPDATE */ - key_add, /* SADB_ADD */ - key_delete, /* SADB_DELETE */ - key_get, /* SADB_GET */ - key_acquire2, /* SADB_ACQUIRE */ - key_register, /* SADB_REGISTER */ - NULL, /* SADB_EXPIRE */ - key_flush, /* SADB_FLUSH */ - key_dump, /* SADB_DUMP */ - key_promisc, /* SADB_X_PROMISC */ - NULL, /* SADB_X_PCHANGE */ - key_spdadd, /* SADB_X_SPDUPDATE */ - key_spdadd, /* SADB_X_SPDADD */ - key_spddelete, /* SADB_X_SPDDELETE */ - key_spdget, /* SADB_X_SPDGET */ - NULL, /* SADB_X_SPDACQUIRE */ - key_spddump, /* SADB_X_SPDDUMP */ - key_spdflush, /* SADB_X_SPDFLUSH */ - key_spdadd, /* SADB_X_SPDSETIDX */ - NULL, /* SADB_X_SPDEXPIRE */ - key_spddelete2, /* SADB_X_SPDDELETE2 */ +static int(*const key_typesw[])(struct socket *, struct mbuf *, + const struct sadb_msghdr *) = { + NULL, /* SADB_RESERVED */ + key_getspi, /* SADB_GETSPI */ + key_update, /* SADB_UPDATE */ + key_add, /* SADB_ADD */ + key_delete, /* SADB_DELETE */ + key_get, /* SADB_GET */ + key_acquire2, /* SADB_ACQUIRE */ + key_register, /* SADB_REGISTER */ + NULL, /* SADB_EXPIRE */ + key_flush, /* SADB_FLUSH */ + key_dump, /* SADB_DUMP */ + key_promisc, /* SADB_X_PROMISC */ + NULL, /* SADB_X_PCHANGE */ + key_spdadd, /* SADB_X_SPDUPDATE */ + key_spdadd, /* SADB_X_SPDADD */ + key_spddelete, /* SADB_X_SPDDELETE */ + key_spdget, /* SADB_X_SPDGET */ + NULL, /* SADB_X_SPDACQUIRE */ + key_spddump, /* SADB_X_SPDDUMP */ + key_spdflush, /* SADB_X_SPDFLUSH */ + key_spdadd, /* SADB_X_SPDSETIDX */ + NULL, /* SADB_X_SPDEXPIRE */ + key_spddelete2, /* SADB_X_SPDDELETE2 */ key_getsastat, /* SADB_GETSASTAT */ key_spdenable, /* SADB_X_SPDENABLE */ key_spddisable, /* SADB_X_SPDDISABLE */ @@ -9316,7 +9661,7 @@ bzero_mbuf(struct mbuf *m) } offset = sizeof(struct sadb_msg); } - bzero(mptr->m_data+offset, mptr->m_len-offset); + bzero(mptr->m_data + offset, mptr->m_len - offset); mptr = mptr->m_next; while (mptr != NULL) { bzero(mptr->m_data, mptr->m_len); @@ -9340,7 +9685,7 @@ bzero_keys(const struct sadb_msghdr *mh) extlen = key->sadb_key_bits >> 3; if (mh->extlen[SADB_EXT_KEY_ENCRYPT] >= offset + extlen) { - bzero((uint8_t *)mh->ext[SADB_EXT_KEY_ENCRYPT]+offset, extlen); + bzero((uint8_t *)mh->ext[SADB_EXT_KEY_ENCRYPT] + offset, extlen); } else { bzero(mh->ext[SADB_EXT_KEY_ENCRYPT], mh->extlen[SADB_EXT_KEY_ENCRYPT]); } @@ -9350,7 +9695,7 @@ bzero_keys(const struct sadb_msghdr *mh) extlen = key->sadb_key_bits >> 3; if (mh->extlen[SADB_EXT_KEY_AUTH] >= offset + extlen) { - bzero((uint8_t *)mh->ext[SADB_EXT_KEY_AUTH]+offset, extlen); + bzero((uint8_t *)mh->ext[SADB_EXT_KEY_AUTH] + offset, extlen); } else { bzero(mh->ext[SADB_EXT_KEY_AUTH], mh->extlen[SADB_EXT_KEY_AUTH]); } @@ -9359,7 +9704,7 @@ bzero_keys(const struct sadb_msghdr *mh) static int key_validate_address_pair(struct sadb_address *src0, - struct sadb_address *dst0) + struct sadb_address *dst0) { u_int plen = 0; @@ -9367,70 +9712,70 @@ key_validate_address_pair(struct sadb_address *src0, if (src0->sadb_address_proto != dst0->sadb_address_proto) { ipseclog((LOG_DEBUG, "key_parse: upper layer protocol mismatched.\n")); PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EINVAL); + return EINVAL; } /* check family */ if (PFKEY_ADDR_SADDR(src0)->sa_family != - PFKEY_ADDR_SADDR(dst0)->sa_family) { + PFKEY_ADDR_SADDR(dst0)->sa_family) { ipseclog((LOG_DEBUG, "key_parse: address family mismatched.\n")); PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EINVAL); + return EINVAL; } if (PFKEY_ADDR_SADDR(src0)->sa_len != - PFKEY_ADDR_SADDR(dst0)->sa_len) { + PFKEY_ADDR_SADDR(dst0)->sa_len) { ipseclog((LOG_DEBUG, - "key_parse: address struct size mismatched.\n")); + "key_parse: address struct size mismatched.\n")); PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EINVAL); + return EINVAL; } switch (PFKEY_ADDR_SADDR(src0)->sa_family) { - case AF_INET: - if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in)) { - PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EINVAL); - } - break; - case AF_INET6: - if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in6)) { - PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EINVAL); - } - break; - default: - ipseclog((LOG_DEBUG, - "key_parse: unsupported address family.\n")); + case AF_INET: + if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in)) { PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EAFNOSUPPORT); + return EINVAL; + } + break; + case AF_INET6: + if (PFKEY_ADDR_SADDR(src0)->sa_len != sizeof(struct sockaddr_in6)) { + PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); + return EINVAL; + } + break; + default: + ipseclog((LOG_DEBUG, + "key_parse: unsupported address family.\n")); + PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); + return EAFNOSUPPORT; } switch (PFKEY_ADDR_SADDR(src0)->sa_family) { - case AF_INET: - plen = sizeof(struct in_addr) << 3; - break; - case AF_INET6: - plen = sizeof(struct in6_addr) << 3; - break; - default: - plen = 0; /*fool gcc*/ - break; + case AF_INET: + plen = sizeof(struct in_addr) << 3; + break; + case AF_INET6: + plen = sizeof(struct in6_addr) << 3; + break; + default: + plen = 0; /*fool gcc*/ + break; } /* check max prefix length */ if (src0->sadb_address_prefixlen > plen || - dst0->sadb_address_prefixlen > plen) { + dst0->sadb_address_prefixlen > plen) { ipseclog((LOG_DEBUG, - "key_parse: illegal prefixlen.\n")); + "key_parse: illegal prefixlen.\n")); PFKEY_STAT_INCREMENT(pfkeystat.out_invaddr); - return (EINVAL); + return EINVAL; } /* * prefixlen == 0 is valid because there can be a case when * all addresses are matched. */ - return (0); + return 0; } /* @@ -9446,8 +9791,8 @@ key_validate_address_pair(struct sadb_address *src0, */ int key_parse( - struct mbuf *m, - struct socket *so) + struct mbuf *m, + struct socket *so) { struct sadb_msg *msg; struct sadb_msghdr mh; @@ -9457,26 +9802,28 @@ key_parse( Boolean keyAligned = FALSE; LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + /* sanity check */ - if (m == NULL || so == NULL) + if (m == NULL || so == NULL) { panic("key_parse: NULL pointer is passed.\n"); - -#if 0 /*kdebug_sadb assumes msg in linear buffer*/ + } + +#if 0 /*kdebug_sadb assumes msg in linear buffer*/ KEYDEBUG(KEYDEBUG_KEY_DUMP, - ipseclog((LOG_DEBUG, "key_parse: passed sadb_msg\n")); - kdebug_sadb(msg)); + ipseclog((LOG_DEBUG, "key_parse: passed sadb_msg\n")); + kdebug_sadb(msg)); #endif - + if (m->m_len < sizeof(struct sadb_msg)) { m = m_pullup(m, sizeof(struct sadb_msg)); - if (!m) + if (!m) { return ENOBUFS; + } } msg = mtod(m, struct sadb_msg *); orglen = PFKEY_UNUNIT64(msg->sadb_msg_len); target = KEY_SENDUP_ONE; - + if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len != m->m_pkthdr.len) { ipseclog((LOG_DEBUG, "key_parse: invalid message length.\n")); @@ -9484,24 +9831,24 @@ key_parse( error = EINVAL; goto senderror; } - + if (msg->sadb_msg_version != PF_KEY_V2) { ipseclog((LOG_DEBUG, - "key_parse: PF_KEY version %u is mismatched.\n", - msg->sadb_msg_version)); + "key_parse: PF_KEY version %u is mismatched.\n", + msg->sadb_msg_version)); PFKEY_STAT_INCREMENT(pfkeystat.out_invver); error = EINVAL; goto senderror; } - + if (msg->sadb_msg_type > SADB_MAX) { ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", - msg->sadb_msg_type)); + msg->sadb_msg_type)); PFKEY_STAT_INCREMENT(pfkeystat.out_invmsgtype); error = EINVAL; goto senderror; } - + /* for old-fashioned code - should be nuked */ if (m->m_pkthdr.len > MCLBYTES) { m_freem(m); @@ -9509,7 +9856,7 @@ key_parse( } if (m->m_next) { struct mbuf *n; - + MGETHDR(n, M_WAITOK, MT_DATA); if (n && m->m_pkthdr.len > MHLEN) { MCLGET(n, M_WAITOK); @@ -9530,89 +9877,90 @@ key_parse( m_freem(m); m = n; } - + /* align the mbuf chain so that extensions are in contiguous region. */ error = key_align(m, &mh); - if (error) + if (error) { return error; - - if (m->m_next) { /*XXX*/ + } + + if (m->m_next) { /*XXX*/ bzero_mbuf(m); m_freem(m); return ENOBUFS; } - + keyAligned = TRUE; msg = mh.msg; - + /* check SA type */ switch (msg->sadb_msg_satype) { - case SADB_SATYPE_UNSPEC: - switch (msg->sadb_msg_type) { - case SADB_GETSPI: - case SADB_UPDATE: - case SADB_ADD: - case SADB_DELETE: - case SADB_GET: - case SADB_ACQUIRE: - case SADB_EXPIRE: - ipseclog((LOG_DEBUG, "key_parse: must specify satype " - "when msg type=%u.\n", msg->sadb_msg_type)); - PFKEY_STAT_INCREMENT(pfkeystat.out_invsatype); - error = EINVAL; - goto senderror; - } - break; - case SADB_SATYPE_AH: - case SADB_SATYPE_ESP: - case SADB_X_SATYPE_IPCOMP: - switch (msg->sadb_msg_type) { - case SADB_X_SPDADD: - case SADB_X_SPDDELETE: - case SADB_X_SPDGET: - case SADB_X_SPDDUMP: - case SADB_X_SPDFLUSH: - case SADB_X_SPDSETIDX: - case SADB_X_SPDUPDATE: - case SADB_X_SPDDELETE2: - case SADB_X_SPDENABLE: - case SADB_X_SPDDISABLE: - ipseclog((LOG_DEBUG, "key_parse: illegal satype=%u\n", - msg->sadb_msg_type)); - PFKEY_STAT_INCREMENT(pfkeystat.out_invsatype); - error = EINVAL; - goto senderror; - } - break; - case SADB_SATYPE_RSVP: - case SADB_SATYPE_OSPFV2: - case SADB_SATYPE_RIPV2: - case SADB_SATYPE_MIP: - ipseclog((LOG_DEBUG, "key_parse: type %u isn't supported.\n", - msg->sadb_msg_satype)); + case SADB_SATYPE_UNSPEC: + switch (msg->sadb_msg_type) { + case SADB_GETSPI: + case SADB_UPDATE: + case SADB_ADD: + case SADB_DELETE: + case SADB_GET: + case SADB_ACQUIRE: + case SADB_EXPIRE: + ipseclog((LOG_DEBUG, "key_parse: must specify satype " + "when msg type=%u.\n", msg->sadb_msg_type)); PFKEY_STAT_INCREMENT(pfkeystat.out_invsatype); - error = EOPNOTSUPP; + error = EINVAL; goto senderror; - case 1: /* XXX: What does it do? */ - if (msg->sadb_msg_type == SADB_X_PROMISC) - break; - /*FALLTHROUGH*/ - default: - ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", - msg->sadb_msg_satype)); + } + break; + case SADB_SATYPE_AH: + case SADB_SATYPE_ESP: + case SADB_X_SATYPE_IPCOMP: + switch (msg->sadb_msg_type) { + case SADB_X_SPDADD: + case SADB_X_SPDDELETE: + case SADB_X_SPDGET: + case SADB_X_SPDDUMP: + case SADB_X_SPDFLUSH: + case SADB_X_SPDSETIDX: + case SADB_X_SPDUPDATE: + case SADB_X_SPDDELETE2: + case SADB_X_SPDENABLE: + case SADB_X_SPDDISABLE: + ipseclog((LOG_DEBUG, "key_parse: illegal satype=%u\n", + msg->sadb_msg_type)); PFKEY_STAT_INCREMENT(pfkeystat.out_invsatype); error = EINVAL; goto senderror; + } + break; + case SADB_SATYPE_RSVP: + case SADB_SATYPE_OSPFV2: + case SADB_SATYPE_RIPV2: + case SADB_SATYPE_MIP: + ipseclog((LOG_DEBUG, "key_parse: type %u isn't supported.\n", + msg->sadb_msg_satype)); + PFKEY_STAT_INCREMENT(pfkeystat.out_invsatype); + error = EOPNOTSUPP; + goto senderror; + case 1: /* XXX: What does it do? */ + if (msg->sadb_msg_type == SADB_X_PROMISC) { + break; + } + /*FALLTHROUGH*/ + default: + ipseclog((LOG_DEBUG, "key_parse: invalid type %u is passed.\n", + msg->sadb_msg_satype)); + PFKEY_STAT_INCREMENT(pfkeystat.out_invsatype); + error = EINVAL; + goto senderror; } - + /* Validate address fields for matching families, lengths, etc. */ void *src0 = mh.ext[SADB_EXT_ADDRESS_SRC]; void *dst0 = mh.ext[SADB_EXT_ADDRESS_DST]; if (mh.ext[SADB_X_EXT_ADDR_RANGE_SRC_START] != NULL && - mh.ext[SADB_X_EXT_ADDR_RANGE_SRC_END] != NULL) { - + mh.ext[SADB_X_EXT_ADDR_RANGE_SRC_END] != NULL) { error = key_validate_address_pair((struct sadb_address *)(mh.ext[SADB_X_EXT_ADDR_RANGE_SRC_START]), - (struct sadb_address *)(mh.ext[SADB_X_EXT_ADDR_RANGE_SRC_END])); + (struct sadb_address *)(mh.ext[SADB_X_EXT_ADDR_RANGE_SRC_END])); if (error != 0) { goto senderror; } @@ -9622,10 +9970,9 @@ key_parse( } } if (mh.ext[SADB_X_EXT_ADDR_RANGE_DST_START] != NULL && - mh.ext[SADB_X_EXT_ADDR_RANGE_DST_END] != NULL) { - + mh.ext[SADB_X_EXT_ADDR_RANGE_DST_END] != NULL) { error = key_validate_address_pair((struct sadb_address *)(mh.ext[SADB_X_EXT_ADDR_RANGE_DST_START]), - (struct sadb_address *)(mh.ext[SADB_X_EXT_ADDR_RANGE_DST_END])); + (struct sadb_address *)(mh.ext[SADB_X_EXT_ADDR_RANGE_DST_END])); if (error != 0) { goto senderror; } @@ -9636,19 +9983,19 @@ key_parse( } if (src0 != NULL && dst0 != NULL) { error = key_validate_address_pair((struct sadb_address *)(src0), - (struct sadb_address *)(dst0)); + (struct sadb_address *)(dst0)); if (error != 0) { goto senderror; } } - - if (msg->sadb_msg_type >= sizeof(key_typesw)/sizeof(key_typesw[0]) || + + if (msg->sadb_msg_type >= sizeof(key_typesw) / sizeof(key_typesw[0]) || key_typesw[msg->sadb_msg_type] == NULL) { PFKEY_STAT_INCREMENT(pfkeystat.out_invmsgtype); error = EINVAL; goto senderror; } - + error = (*key_typesw[msg->sadb_msg_type])(so, m, &mh); return error; @@ -9665,17 +10012,18 @@ senderror: static int key_senderror( - struct socket *so, - struct mbuf *m, - int code) + struct socket *so, + struct mbuf *m, + int code) { struct sadb_msg *msg; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - - if (m->m_len < sizeof(struct sadb_msg)) + + if (m->m_len < sizeof(struct sadb_msg)) { panic("invalid mbuf passed to key_senderror"); - + } + msg = mtod(m, struct sadb_msg *); msg->sadb_msg_errno = code; return key_sendup_mbuf(so, m, KEY_SENDUP_ONE); @@ -9688,29 +10036,31 @@ key_senderror( */ static int key_align( - struct mbuf *m, - struct sadb_msghdr *mhp) + struct mbuf *m, + struct sadb_msghdr *mhp) { struct mbuf *n; struct sadb_ext *ext; size_t off, end; int extlen; int toff; - + /* sanity check */ - if (m == NULL || mhp == NULL) + if (m == NULL || mhp == NULL) { panic("key_align: NULL pointer is passed.\n"); - if (m->m_len < sizeof(struct sadb_msg)) + } + if (m->m_len < sizeof(struct sadb_msg)) { panic("invalid mbuf passed to key_align"); - + } + /* initialize */ bzero(mhp, sizeof(*mhp)); - + mhp->msg = mtod(m, struct sadb_msg *); - mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */ - + mhp->ext[0] = (struct sadb_ext *)mhp->msg; /*XXX backward compat */ + end = PFKEY_UNUNIT64(mhp->msg->sadb_msg_len); - extlen = end; /*just in case extlen is not updated*/ + extlen = end; /*just in case extlen is not updated*/ for (off = sizeof(struct sadb_msg); off < end; off += extlen) { n = m_pulldown(m, off, sizeof(struct sadb_ext), &toff); if (!n) { @@ -9718,154 +10068,161 @@ key_align( return ENOBUFS; } ext = (struct sadb_ext *)(void *)(mtod(n, caddr_t) + toff); - + /* set pointer */ switch (ext->sadb_ext_type) { - case SADB_EXT_SA: - case SADB_EXT_ADDRESS_SRC: - case SADB_EXT_ADDRESS_DST: - case SADB_EXT_ADDRESS_PROXY: - case SADB_EXT_LIFETIME_CURRENT: - case SADB_EXT_LIFETIME_HARD: - case SADB_EXT_LIFETIME_SOFT: - case SADB_EXT_KEY_AUTH: - case SADB_EXT_KEY_ENCRYPT: - case SADB_EXT_IDENTITY_SRC: - case SADB_EXT_IDENTITY_DST: - case SADB_EXT_SENSITIVITY: - case SADB_EXT_PROPOSAL: - case SADB_EXT_SUPPORTED_AUTH: - case SADB_EXT_SUPPORTED_ENCRYPT: - case SADB_EXT_SPIRANGE: - case SADB_X_EXT_POLICY: - case SADB_X_EXT_SA2: - case SADB_EXT_SESSION_ID: - case SADB_EXT_SASTAT: - case SADB_X_EXT_IPSECIF: - case SADB_X_EXT_ADDR_RANGE_SRC_START: - case SADB_X_EXT_ADDR_RANGE_SRC_END: - case SADB_X_EXT_ADDR_RANGE_DST_START: - case SADB_X_EXT_ADDR_RANGE_DST_END: - case SADB_EXT_MIGRATE_ADDRESS_SRC: - case SADB_EXT_MIGRATE_ADDRESS_DST: - case SADB_X_EXT_MIGRATE_IPSECIF: - /* duplicate check */ - /* - * XXX Are there duplication payloads of either - * KEY_AUTH or KEY_ENCRYPT ? - */ - if (mhp->ext[ext->sadb_ext_type] != NULL) { - ipseclog((LOG_DEBUG, - "key_align: duplicate ext_type %u " - "is passed.\n", ext->sadb_ext_type)); - bzero_mbuf(m); - m_freem(m); - PFKEY_STAT_INCREMENT(pfkeystat.out_dupext); - return EINVAL; - } - break; - default: + case SADB_EXT_SA: + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + case SADB_EXT_LIFETIME_CURRENT: + case SADB_EXT_LIFETIME_HARD: + case SADB_EXT_LIFETIME_SOFT: + case SADB_EXT_KEY_AUTH: + case SADB_EXT_KEY_ENCRYPT: + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + case SADB_EXT_SENSITIVITY: + case SADB_EXT_PROPOSAL: + case SADB_EXT_SUPPORTED_AUTH: + case SADB_EXT_SUPPORTED_ENCRYPT: + case SADB_EXT_SPIRANGE: + case SADB_X_EXT_POLICY: + case SADB_X_EXT_SA2: + case SADB_EXT_SESSION_ID: + case SADB_EXT_SASTAT: + case SADB_X_EXT_IPSECIF: + case SADB_X_EXT_ADDR_RANGE_SRC_START: + case SADB_X_EXT_ADDR_RANGE_SRC_END: + case SADB_X_EXT_ADDR_RANGE_DST_START: + case SADB_X_EXT_ADDR_RANGE_DST_END: + case SADB_EXT_MIGRATE_ADDRESS_SRC: + case SADB_EXT_MIGRATE_ADDRESS_DST: + case SADB_X_EXT_MIGRATE_IPSECIF: + /* duplicate check */ + /* + * XXX Are there duplication payloads of either + * KEY_AUTH or KEY_ENCRYPT ? + */ + if (mhp->ext[ext->sadb_ext_type] != NULL) { ipseclog((LOG_DEBUG, - "key_align: invalid ext_type %u is passed.\n", - ext->sadb_ext_type)); + "key_align: duplicate ext_type %u " + "is passed.\n", ext->sadb_ext_type)); bzero_mbuf(m); m_freem(m); - PFKEY_STAT_INCREMENT(pfkeystat.out_invexttype); + PFKEY_STAT_INCREMENT(pfkeystat.out_dupext); return EINVAL; + } + break; + default: + ipseclog((LOG_DEBUG, + "key_align: invalid ext_type %u is passed.\n", + ext->sadb_ext_type)); + bzero_mbuf(m); + m_freem(m); + PFKEY_STAT_INCREMENT(pfkeystat.out_invexttype); + return EINVAL; } - + extlen = PFKEY_UNUNIT64(ext->sadb_ext_len); - + if (key_validate_ext(ext, extlen)) { bzero_mbuf(m); m_freem(m); PFKEY_STAT_INCREMENT(pfkeystat.out_invlen); return EINVAL; } - + n = m_pulldown(m, off, extlen, &toff); if (!n) { /* m is already freed */ return ENOBUFS; } ext = (struct sadb_ext *)(void *)(mtod(n, caddr_t) + toff); - + mhp->ext[ext->sadb_ext_type] = ext; mhp->extoff[ext->sadb_ext_type] = off; mhp->extlen[ext->sadb_ext_type] = extlen; } - + if (off != end) { bzero_mbuf(m); m_freem(m); PFKEY_STAT_INCREMENT(pfkeystat.out_invlen); return EINVAL; } - + return 0; } static int key_validate_ext( - const struct sadb_ext *ext, - int len) + const struct sadb_ext *ext, + int len) { struct sockaddr *sa; enum { NONE, ADDR } checktype = NONE; int baselen = 0; const int sal = offsetof(struct sockaddr, sa_len) + sizeof(sa->sa_len); - - if (len != PFKEY_UNUNIT64(ext->sadb_ext_len)) + + if (len != PFKEY_UNUNIT64(ext->sadb_ext_len)) { return EINVAL; - + } + /* if it does not match minimum/maximum length, bail */ if (ext->sadb_ext_type >= sizeof(minsize) / sizeof(minsize[0]) || - ext->sadb_ext_type >= sizeof(maxsize) / sizeof(maxsize[0])) + ext->sadb_ext_type >= sizeof(maxsize) / sizeof(maxsize[0])) { return EINVAL; - if (!minsize[ext->sadb_ext_type] || len < minsize[ext->sadb_ext_type]) + } + if (!minsize[ext->sadb_ext_type] || len < minsize[ext->sadb_ext_type]) { return EINVAL; - if (maxsize[ext->sadb_ext_type] && len > maxsize[ext->sadb_ext_type]) + } + if (maxsize[ext->sadb_ext_type] && len > maxsize[ext->sadb_ext_type]) { return EINVAL; - + } + /* more checks based on sadb_ext_type XXX need more */ switch (ext->sadb_ext_type) { - case SADB_EXT_ADDRESS_SRC: - case SADB_EXT_ADDRESS_DST: - case SADB_EXT_ADDRESS_PROXY: - case SADB_X_EXT_ADDR_RANGE_SRC_START: - case SADB_X_EXT_ADDR_RANGE_SRC_END: - case SADB_X_EXT_ADDR_RANGE_DST_START: - case SADB_X_EXT_ADDR_RANGE_DST_END: - case SADB_EXT_MIGRATE_ADDRESS_SRC: - case SADB_EXT_MIGRATE_ADDRESS_DST: - baselen = PFKEY_ALIGN8(sizeof(struct sadb_address)); + case SADB_EXT_ADDRESS_SRC: + case SADB_EXT_ADDRESS_DST: + case SADB_EXT_ADDRESS_PROXY: + case SADB_X_EXT_ADDR_RANGE_SRC_START: + case SADB_X_EXT_ADDR_RANGE_SRC_END: + case SADB_X_EXT_ADDR_RANGE_DST_START: + case SADB_X_EXT_ADDR_RANGE_DST_END: + case SADB_EXT_MIGRATE_ADDRESS_SRC: + case SADB_EXT_MIGRATE_ADDRESS_DST: + baselen = PFKEY_ALIGN8(sizeof(struct sadb_address)); + checktype = ADDR; + break; + case SADB_EXT_IDENTITY_SRC: + case SADB_EXT_IDENTITY_DST: + if (((struct sadb_ident *)(uintptr_t)(size_t)ext)-> + sadb_ident_type == SADB_X_IDENTTYPE_ADDR) { + baselen = PFKEY_ALIGN8(sizeof(struct sadb_ident)); checktype = ADDR; - break; - case SADB_EXT_IDENTITY_SRC: - case SADB_EXT_IDENTITY_DST: - if (((struct sadb_ident *)(uintptr_t)(size_t)ext)-> - sadb_ident_type == SADB_X_IDENTTYPE_ADDR) { - baselen = PFKEY_ALIGN8(sizeof(struct sadb_ident)); - checktype = ADDR; - } else - checktype = NONE; - break; - default: + } else { checktype = NONE; - break; + } + break; + default: + checktype = NONE; + break; } - + switch (checktype) { - case NONE: - break; - case ADDR: - sa = (struct sockaddr *)((caddr_t)(uintptr_t)ext + baselen); - - if (len < baselen + sal) - return EINVAL; - if (baselen + PFKEY_ALIGN8(sa->sa_len) != len) - return EINVAL; - break; + case NONE: + break; + case ADDR: + sa = (struct sockaddr *)((caddr_t)(uintptr_t)ext + baselen); + + if (len < baselen + sal) { + return EINVAL; + } + if (baselen + PFKEY_ALIGN8(sa->sa_len) != len) { + return EINVAL; + } + break; } /* check key bits length */ @@ -9876,7 +10233,7 @@ key_validate_ext( return EINVAL; } } - + return 0; } @@ -9890,36 +10247,37 @@ key_validate_ext( */ int key_checktunnelsanity( - struct secasvar *sav, - __unused u_int family, - __unused caddr_t src, - __unused caddr_t dst) + struct secasvar *sav, + __unused u_int family, + __unused caddr_t src, + __unused caddr_t dst) { - /* sanity check */ - if (sav->sah == NULL) + if (sav->sah == NULL) { panic("sav->sah == NULL at key_checktunnelsanity"); - + } + /* XXX: check inner IP header */ - + return 1; } /* record data transfer on SA, and update timestamps */ void key_sa_recordxfer( - struct secasvar *sav, - struct mbuf *m) + struct secasvar *sav, + struct mbuf *m) { - - - if (!sav) + if (!sav) { panic("key_sa_recordxfer called with sav == NULL"); - if (!m) + } + if (!m) { panic("key_sa_recordxfer called with m == NULL"); - if (!sav->lft_c) + } + if (!sav->lft_c) { return; - + } + lck_mtx_lock(sadb_mutex); /* * XXX Currently, there is a difference of bytes size @@ -9927,7 +10285,7 @@ key_sa_recordxfer( */ sav->lft_c->sadb_lifetime_bytes += m->m_pkthdr.len; /* to check bytes lifetime is done in key_timehandler(). */ - + /* * We use the number of packets as the unit of * sadb_lifetime_allocations. We increment the variable @@ -9935,7 +10293,7 @@ key_sa_recordxfer( */ sav->lft_c->sadb_lifetime_allocations++; /* XXX check for expires? */ - + /* * NOTE: We record CURRENT sadb_lifetime_usetime by using wall clock, * in seconds. HARD and SOFT lifetime are measured by the time @@ -9947,67 +10305,69 @@ key_sa_recordxfer( * <--------------> HARD * <-----> SOFT */ - { + { struct timeval tv; microtime(&tv); sav->lft_c->sadb_lifetime_usetime = tv.tv_sec; /* XXX check for expires? */ - } + } lck_mtx_unlock(sadb_mutex); - + return; } /* dumb version */ void key_sa_routechange( - struct sockaddr *dst) + struct sockaddr *dst) { struct secashead *sah; struct route *ro; - + lck_mtx_lock(sadb_mutex); LIST_FOREACH(sah, &sahtree, chain) { ro = (struct route *)&sah->sa_route; if (ro->ro_rt && dst->sa_len == ro->ro_dst.sa_len - && bcmp(dst, &ro->ro_dst, dst->sa_len) == 0) { + && bcmp(dst, &ro->ro_dst, dst->sa_len) == 0) { ROUTE_RELEASE(ro); } } lck_mtx_unlock(sadb_mutex); - + return; } void key_sa_chgstate( - struct secasvar *sav, - u_int8_t state) + struct secasvar *sav, + u_int8_t state) { - - if (sav == NULL) + if (sav == NULL) { panic("key_sa_chgstate called with sav == NULL"); - - if (sav->state == state) + } + + if (sav->state == state) { return; - + } + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); - - if (__LIST_CHAINED(sav)) + + if (__LIST_CHAINED(sav)) { LIST_REMOVE(sav, chain); - + } + sav->state = state; LIST_INSERT_HEAD(&sav->sah->savtree[state], sav, chain); - } void key_sa_stir_iv( - struct secasvar *sav) + struct secasvar *sav) { lck_mtx_lock(sadb_mutex); - if (!sav->iv) + if (!sav->iv) { panic("key_sa_stir_iv called with sav == NULL"); + } key_randomfill(sav->iv, sav->ivlen); lck_mtx_unlock(sadb_mutex); } @@ -10015,21 +10375,22 @@ key_sa_stir_iv( /* XXX too much? */ static struct mbuf * key_alloc_mbuf( - int l) + int l) { struct mbuf *m = NULL, *n; int len, t; - + len = l; while (len > 0) { MGET(n, M_DONTWAIT, MT_DATA); - if (n && len > MLEN) + if (n && len > MLEN) { MCLGET(n, M_DONTWAIT); + } if (!n) { m_freem(m); return NULL; } - + n->m_next = NULL; n->m_len = 0; n->m_len = M_TRAILINGSPACE(n); @@ -10039,69 +10400,70 @@ key_alloc_mbuf( n->m_data += t; n->m_len = len; } - + len -= n->m_len; - - if (m) + + if (m) { m_cat(m, n); - else + } else { m = n; + } } - + return m; } static struct mbuf * -key_setdumpsastats (u_int32_t dir, - struct sastat *stats, - u_int32_t max_stats, - u_int64_t session_ids[], - u_int32_t seq, - u_int32_t pid) +key_setdumpsastats(u_int32_t dir, + struct sastat *stats, + u_int32_t max_stats, + u_int64_t session_ids[], + u_int32_t seq, + u_int32_t pid) { struct mbuf *result = NULL, *m = NULL; - + m = key_setsadbmsg(SADB_GETSASTAT, 0, 0, seq, pid, 0); if (!m) { goto fail; } result = m; - + m = key_setsadbsession_id(session_ids); if (!m) { goto fail; } m_cat(result, m); - + m = key_setsadbsastat(dir, - stats, - max_stats); + stats, + max_stats); if (!m) { goto fail; } m_cat(result, m); - + if ((result->m_flags & M_PKTHDR) == 0) { goto fail; } - + if (result->m_len < sizeof(struct sadb_msg)) { result = m_pullup(result, sizeof(struct sadb_msg)); if (result == NULL) { goto fail; } } - + result->m_pkthdr.len = 0; for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; } - + mtod(result, struct sadb_msg *)->sadb_msg_len = - PFKEY_UNIT64(result->m_pkthdr.len); - + PFKEY_UNIT64(result->m_pkthdr.len); + return result; - + fail: if (result) { m_freem(result); @@ -10117,9 +10479,9 @@ fail: */ static int -key_getsastat (struct socket *so, - struct mbuf *m, - const struct sadb_msghdr *mhp) +key_getsastat(struct socket *so, + struct mbuf *m, + const struct sadb_msghdr *mhp) { struct sadb_session_id *session_id; u_int32_t bufsize, arg_count, res_count; @@ -10127,11 +10489,12 @@ key_getsastat (struct socket *so, struct sastat *sa_stats_sav = NULL; struct mbuf *n; int error = 0; - + /* sanity check */ - if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) + if (so == NULL || m == NULL || mhp == NULL || mhp->msg == NULL) { panic("%s: NULL pointer is passed.\n", __FUNCTION__); - + } + if (mhp->ext[SADB_EXT_SESSION_ID] == NULL) { printf("%s: invalid message is passed. missing session-id.\n", __FUNCTION__); return key_senderror(so, m, EINVAL); @@ -10148,9 +10511,9 @@ key_getsastat (struct socket *so, printf("%s: invalid message is passed. short stat args.\n", __FUNCTION__); return key_senderror(so, m, EINVAL); } - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + // exit early if there are no active SAs if (ipsec_sav_count <= 0) { printf("%s: No active SAs.\n", __FUNCTION__); @@ -10158,7 +10521,7 @@ key_getsastat (struct socket *so, goto end; } bufsize = (ipsec_sav_count + 1) * sizeof(*sa_stats_sav); - + KMALLOC_WAIT(sa_stats_sav, __typeof__(sa_stats_sav), bufsize); if (sa_stats_sav == NULL) { printf("%s: No more memory.\n", __FUNCTION__); @@ -10166,9 +10529,9 @@ key_getsastat (struct socket *so, goto end; } bzero(sa_stats_sav, bufsize); - + sa_stats_arg = (__typeof__(sa_stats_arg)) - (void *)mhp->ext[SADB_EXT_SASTAT]; + (void *)mhp->ext[SADB_EXT_SASTAT]; arg_count = sa_stats_arg->sadb_sastat_list_len; // exit early if there are no requested SAs if (arg_count == 0) { @@ -10177,12 +10540,12 @@ key_getsastat (struct socket *so, goto end; } res_count = 0; - + if (key_getsastatbyspi((struct sastat *)(sa_stats_arg + 1), - arg_count, - sa_stats_sav, - bufsize, - &res_count)) { + arg_count, + sa_stats_sav, + bufsize, + &res_count)) { printf("%s: Error finding SAs.\n", __FUNCTION__); error = ENOENT; goto end; @@ -10192,57 +10555,58 @@ key_getsastat (struct socket *so, error = ENOENT; goto end; } - + session_id = (__typeof__(session_id)) - (void *)mhp->ext[SADB_EXT_SESSION_ID]; - + (void *)mhp->ext[SADB_EXT_SESSION_ID]; + /* send this to the userland. */ n = key_setdumpsastats(sa_stats_arg->sadb_sastat_dir, - sa_stats_sav, - res_count, - session_id->sadb_session_id_v, - mhp->msg->sadb_msg_seq, - mhp->msg->sadb_msg_pid); + sa_stats_sav, + res_count, + session_id->sadb_session_id_v, + mhp->msg->sadb_msg_seq, + mhp->msg->sadb_msg_pid); if (!n) { printf("%s: No bufs to dump stats.\n", __FUNCTION__); error = ENOBUFS; goto end; } - + key_sendup_mbuf(so, n, KEY_SENDUP_ALL); end: if (sa_stats_sav) { KFREE(sa_stats_sav); } - - if (error) + + if (error) { return key_senderror(so, m, error); - + } + m_freem(m); return 0; } static void -key_update_natt_keepalive_timestamp (struct secasvar *sav_sent, - struct secasvar *sav_update) +key_update_natt_keepalive_timestamp(struct secasvar *sav_sent, + struct secasvar *sav_update) { struct secasindex saidx_swap_sent_addr; - + // exit early if two SAs are identical, or if sav_update is current if (sav_sent == sav_update || sav_update->natt_last_activity == natt_now) { return; } - + // assuming that (sav_update->remote_ike_port != 0 && (esp_udp_encap_port & 0xFFFF) != 0) - + bzero(&saidx_swap_sent_addr, sizeof(saidx_swap_sent_addr)); memcpy(&saidx_swap_sent_addr.src, &sav_sent->sah->saidx.dst, sizeof(saidx_swap_sent_addr.src)); memcpy(&saidx_swap_sent_addr.dst, &sav_sent->sah->saidx.src, sizeof(saidx_swap_sent_addr.dst)); saidx_swap_sent_addr.proto = sav_sent->sah->saidx.proto; saidx_swap_sent_addr.mode = sav_sent->sah->saidx.mode; // we ignore reqid for split-tunnel setups - + if (key_cmpsaidx(&sav_sent->sah->saidx, &sav_update->sah->saidx, CMP_MODE | CMP_PORT) || key_cmpsaidx(&saidx_swap_sent_addr, &sav_update->sah->saidx, CMP_MODE | CMP_PORT)) { sav_update->natt_last_activity = natt_now; @@ -10250,102 +10614,111 @@ key_update_natt_keepalive_timestamp (struct secasvar *sav_sent, } static int -key_send_delsp (struct secpolicy *sp) -{ - struct mbuf *result = NULL, *m; - - if (sp == NULL) - goto fail; - - /* set msg header */ - m = key_setsadbmsg(SADB_X_SPDDELETE, 0, 0, 0, 0, 0); - if (!m) { - goto fail; - } - result = m; - - /* set sadb_address(es) for source */ - if (sp->spidx.src_range.start.ss_len > 0) { - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_START, - (struct sockaddr *)&sp->spidx.src_range.start, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_END, - (struct sockaddr *)&sp->spidx.src_range.end, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } else { - m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, - (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } - - /* set sadb_address(es) for destination */ - if (sp->spidx.dst_range.start.ss_len > 0) { - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_START, - (struct sockaddr *)&sp->spidx.dst_range.start, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - - m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_END, - (struct sockaddr *)&sp->spidx.dst_range.end, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } else { - m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, - (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, - sp->spidx.ul_proto); - if (!m) - goto fail; - m_cat(result, m); - } - - /* set secpolicy */ - m = key_sp2msg(sp); - if (!m) { - goto fail; - } - m_cat(result, m); - - if ((result->m_flags & M_PKTHDR) == 0) { - goto fail; - } - - if (result->m_len < sizeof(struct sadb_msg)) { - result = m_pullup(result, sizeof(struct sadb_msg)); - if (result == NULL) { - goto fail; - } - } - +key_send_delsp(struct secpolicy *sp) +{ + struct mbuf *result = NULL, *m; + + if (sp == NULL) { + goto fail; + } + + /* set msg header */ + m = key_setsadbmsg(SADB_X_SPDDELETE, 0, 0, 0, 0, 0); + if (!m) { + goto fail; + } + result = m; + + /* set sadb_address(es) for source */ + if (sp->spidx.src_range.start.ss_len > 0) { + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_START, + (struct sockaddr *)&sp->spidx.src_range.start, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_SRC_END, + (struct sockaddr *)&sp->spidx.src_range.end, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } else { + m = key_setsadbaddr(SADB_EXT_ADDRESS_SRC, + (struct sockaddr *)&sp->spidx.src, sp->spidx.prefs, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } + + /* set sadb_address(es) for destination */ + if (sp->spidx.dst_range.start.ss_len > 0) { + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_START, + (struct sockaddr *)&sp->spidx.dst_range.start, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + + m = key_setsadbaddr(SADB_X_EXT_ADDR_RANGE_DST_END, + (struct sockaddr *)&sp->spidx.dst_range.end, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } else { + m = key_setsadbaddr(SADB_EXT_ADDRESS_DST, + (struct sockaddr *)&sp->spidx.dst, sp->spidx.prefd, + sp->spidx.ul_proto); + if (!m) { + goto fail; + } + m_cat(result, m); + } + + /* set secpolicy */ + m = key_sp2msg(sp); + if (!m) { + goto fail; + } + m_cat(result, m); + + if ((result->m_flags & M_PKTHDR) == 0) { + goto fail; + } + + if (result->m_len < sizeof(struct sadb_msg)) { + result = m_pullup(result, sizeof(struct sadb_msg)); + if (result == NULL) { + goto fail; + } + } + result->m_pkthdr.len = 0; - for (m = result; m; m = m->m_next) + for (m = result; m; m = m->m_next) { result->m_pkthdr.len += m->m_len; - + } + mtod(result, struct sadb_msg *)->sadb_msg_len = PFKEY_UNIT64(result->m_pkthdr.len); - + return key_sendup_mbuf(NULL, result, KEY_SENDUP_REGISTERED); - + fail: - if (result) + if (result) { m_free(result); + } return -1; } void -key_delsp_for_ipsec_if (ifnet_t ipsec_if) +key_delsp_for_ipsec_if(ifnet_t ipsec_if) { struct secashead *sah; struct secasvar *sav, *nextsav; @@ -10353,58 +10726,58 @@ key_delsp_for_ipsec_if (ifnet_t ipsec_if) u_int state; struct secpolicy *sp, *nextsp; int dir; - - if (ipsec_if == NULL) - return; - + + if (ipsec_if == NULL) { + return; + } + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); - + lck_mtx_lock(sadb_mutex); - + for (dir = 0; dir < IPSEC_DIR_MAX; dir++) { for (sp = LIST_FIRST(&sptree[dir]); - sp != NULL; - sp = nextsp) { - + sp != NULL; + sp = nextsp) { nextsp = LIST_NEXT(sp, chain); - + if (sp->ipsec_if == ipsec_if) { ifnet_release(sp->ipsec_if); sp->ipsec_if = NULL; - + key_send_delsp(sp); - + sp->state = IPSEC_SPSTATE_DEAD; - key_freesp(sp, KEY_SADB_LOCKED); + key_freesp(sp, KEY_SADB_LOCKED); } } } - + LIST_FOREACH(sah, &sahtree, chain) { if (sah->ipsec_if == ipsec_if) { /* This SAH is linked to the IPSec interface. It now needs to close. */ ifnet_release(sah->ipsec_if); sah->ipsec_if = NULL; - + for (stateidx = 0; stateidx < _ARRAYLEN(saorder_state_alive); stateidx++) { state = saorder_state_any[stateidx]; for (sav = LIST_FIRST(&sah->savtree[state]); sav != NULL; sav = nextsav) { nextsav = LIST_NEXT(sav, chain); - + key_sa_chgstate(sav, SADB_SASTATE_DEAD); key_freesav(sav, KEY_SADB_LOCKED); } } - + sah->state = SADB_SASTATE_DEAD; } } - + lck_mtx_unlock(sadb_mutex); } __private_extern__ u_int32_t -key_fill_offload_frames_for_savs (ifnet_t ifp, +key_fill_offload_frames_for_savs(ifnet_t ifp, struct ifnet_keepalive_offload_frame *frames_array, u_int32_t frames_array_count, size_t frame_data_offset) @@ -10415,7 +10788,7 @@ key_fill_offload_frames_for_savs (ifnet_t ifp, u_int32_t frame_index = 0; if (frame == NULL || frames_array_count == 0) { - return (frame_index); + return frame_index; } lck_mtx_lock(sadb_mutex); @@ -10425,7 +10798,7 @@ key_fill_offload_frames_for_savs (ifnet_t ifp, frame_index++; if (frame_index >= frames_array_count) { lck_mtx_unlock(sadb_mutex); - return (frame_index); + return frame_index; } frame = &(frames_array[frame_index]); } @@ -10433,5 +10806,5 @@ key_fill_offload_frames_for_savs (ifnet_t ifp, } lck_mtx_unlock(sadb_mutex); - return (frame_index); + return frame_index; } diff --git a/bsd/netkey/key.h b/bsd/netkey/key.h index c61f04f22..4d3ee9421 100644 --- a/bsd/netkey/key.h +++ b/bsd/netkey/key.h @@ -35,8 +35,8 @@ #ifdef BSD_KERNEL_PRIVATE -#define KEY_SADB_UNLOCKED 0 -#define KEY_SADB_LOCKED 1 +#define KEY_SADB_UNLOCKED 0 +#define KEY_SADB_LOCKED 1 extern struct key_cb key_cb; @@ -56,17 +56,17 @@ struct sadb_lifetime; extern struct secpolicy *key_allocsp(struct secpolicyindex *, u_int); extern struct secasvar *key_allocsa_policy(struct secasindex *); extern struct secpolicy *key_gettunnel(struct sockaddr *, - struct sockaddr *, struct sockaddr *, struct sockaddr *); + struct sockaddr *, struct sockaddr *, struct sockaddr *); extern struct secasvar *key_alloc_outbound_sav_for_interface(ifnet_t interface, int family, - struct sockaddr *src, - struct sockaddr *dst); + struct sockaddr *src, + struct sockaddr *dst); extern int key_checkrequest(struct ipsecrequest *isr, struct secasindex *, - struct secasvar **sav); + struct secasvar **sav); extern struct secasvar *key_allocsa(u_int, caddr_t, caddr_t, - u_int, u_int32_t); + u_int, u_int32_t); struct secasvar * key_allocsa_extended(u_int family, caddr_t src, caddr_t dst, - u_int proto, u_int32_t spi, ifnet_t interface); + u_int proto, u_int32_t spi, ifnet_t interface); extern u_int16_t key_natt_get_translated_port(struct secasvar *); extern void key_freesp(struct secpolicy *, int); extern void key_freesav(struct secasvar *, int); @@ -86,29 +86,29 @@ extern void key_sa_routechange(struct sockaddr *); extern void key_sa_chgstate(struct secasvar *, u_int8_t); extern void key_sa_stir_iv(struct secasvar *); extern void key_delsah(struct secashead *sah); -extern struct secashead *key_newsah2 (struct secasindex *saidx, u_int8_t dir); +extern struct secashead *key_newsah2(struct secasindex *saidx, u_int8_t dir); extern u_int32_t key_getspi2(struct sockaddr *src, - struct sockaddr *dst, - u_int8_t proto, - u_int8_t mode, - u_int32_t reqid, - struct sadb_spirange *spirange); + struct sockaddr *dst, + u_int8_t proto, + u_int8_t mode, + u_int32_t reqid, + struct sadb_spirange *spirange); extern struct secasvar * key_newsav2(struct secashead *sah, - u_int8_t satype, - u_int8_t alg_auth, - u_int8_t alg_enc, - u_int32_t flags, - u_int8_t replay, - struct sadb_key *key_auth, - u_int16_t key_auth_len, - struct sadb_key *key_enc, - u_int16_t key_enc_len, - u_int16_t natt_port, - u_int32_t seq, - u_int32_t spi, - u_int32_t pid, - struct sadb_lifetime *lifetime_hard, - struct sadb_lifetime *lifetime_soft); + u_int8_t satype, + u_int8_t alg_auth, + u_int8_t alg_enc, + u_int32_t flags, + u_int8_t replay, + struct sadb_key *key_auth, + u_int16_t key_auth_len, + struct sadb_key *key_enc, + u_int16_t key_enc_len, + u_int16_t natt_port, + u_int32_t seq, + u_int32_t spi, + u_int32_t pid, + struct sadb_lifetime *lifetime_hard, + struct sadb_lifetime *lifetime_soft); extern void key_delsav(struct secasvar *sav); extern struct secpolicy *key_getspbyid(u_int32_t); extern void key_delsp_for_ipsec_if(ifnet_t ipsec_if); diff --git a/bsd/netkey/key_debug.h b/bsd/netkey/key_debug.h index 0683f37f8..702d92014 100644 --- a/bsd/netkey/key_debug.h +++ b/bsd/netkey/key_debug.h @@ -34,29 +34,29 @@ #include /* debug flags */ -#define KEYDEBUG_STAMP 0x00000001 /* path */ -#define KEYDEBUG_DATA 0x00000002 /* data */ -#define KEYDEBUG_DUMP 0x00000004 /* dump */ +#define KEYDEBUG_STAMP 0x00000001 /* path */ +#define KEYDEBUG_DATA 0x00000002 /* data */ +#define KEYDEBUG_DUMP 0x00000004 /* dump */ -#define KEYDEBUG_KEY 0x00000010 /* key processing */ -#define KEYDEBUG_ALG 0x00000020 /* ciph & auth algorithm */ -#define KEYDEBUG_IPSEC 0x00000040 /* ipsec processing */ +#define KEYDEBUG_KEY 0x00000010 /* key processing */ +#define KEYDEBUG_ALG 0x00000020 /* ciph & auth algorithm */ +#define KEYDEBUG_IPSEC 0x00000040 /* ipsec processing */ -#define KEYDEBUG_KEY_STAMP (KEYDEBUG_KEY | KEYDEBUG_STAMP) -#define KEYDEBUG_KEY_DATA (KEYDEBUG_KEY | KEYDEBUG_DATA) -#define KEYDEBUG_KEY_DUMP (KEYDEBUG_KEY | KEYDEBUG_DUMP) -#define KEYDEBUG_ALG_STAMP (KEYDEBUG_ALG | KEYDEBUG_STAMP) -#define KEYDEBUG_ALG_DATA (KEYDEBUG_ALG | KEYDEBUG_DATA) -#define KEYDEBUG_ALG_DUMP (KEYDEBUG_ALG | KEYDEBUG_DUMP) -#define KEYDEBUG_IPSEC_STAMP (KEYDEBUG_IPSEC | KEYDEBUG_STAMP) -#define KEYDEBUG_IPSEC_DATA (KEYDEBUG_IPSEC | KEYDEBUG_DATA) -#define KEYDEBUG_IPSEC_DUMP (KEYDEBUG_IPSEC | KEYDEBUG_DUMP) +#define KEYDEBUG_KEY_STAMP (KEYDEBUG_KEY | KEYDEBUG_STAMP) +#define KEYDEBUG_KEY_DATA (KEYDEBUG_KEY | KEYDEBUG_DATA) +#define KEYDEBUG_KEY_DUMP (KEYDEBUG_KEY | KEYDEBUG_DUMP) +#define KEYDEBUG_ALG_STAMP (KEYDEBUG_ALG | KEYDEBUG_STAMP) +#define KEYDEBUG_ALG_DATA (KEYDEBUG_ALG | KEYDEBUG_DATA) +#define KEYDEBUG_ALG_DUMP (KEYDEBUG_ALG | KEYDEBUG_DUMP) +#define KEYDEBUG_IPSEC_STAMP (KEYDEBUG_IPSEC | KEYDEBUG_STAMP) +#define KEYDEBUG_IPSEC_DATA (KEYDEBUG_IPSEC | KEYDEBUG_DATA) +#define KEYDEBUG_IPSEC_DUMP (KEYDEBUG_IPSEC | KEYDEBUG_DUMP) #if 0 -#define KEYDEBUG(lev,arg) \ +#define KEYDEBUG(lev, arg) \ do { if ((key_debug_level & (lev)) == (lev)) { arg; } } while (0) #else -#define KEYDEBUG(lev,arg) +#define KEYDEBUG(lev, arg) #endif struct sadb_msg; @@ -89,4 +89,3 @@ extern void ipsec_bindump(caddr_t, int); #endif /* _NETKEY_KEY_DEBUG_H_ */ - diff --git a/bsd/netkey/key_var.h b/bsd/netkey/key_var.h index 7552efebc..64a3f2616 100644 --- a/bsd/netkey/key_var.h +++ b/bsd/netkey/key_var.h @@ -34,21 +34,21 @@ #include /* sysctl */ -#define KEYCTL_DEBUG_LEVEL 1 -#define KEYCTL_SPI_TRY 2 -#define KEYCTL_SPI_MIN_VALUE 3 -#define KEYCTL_SPI_MAX_VALUE 4 -#define KEYCTL_RANDOM_INT 5 -#define KEYCTL_LARVAL_LIFETIME 6 -#define KEYCTL_BLOCKACQ_COUNT 7 -#define KEYCTL_BLOCKACQ_LIFETIME 8 -#define KEYCTL_ESP_KEYMIN 9 -#define KEYCTL_ESP_AUTH 10 -#define KEYCTL_AH_KEYMIN 11 -#define KEYCTL_PREFERED_OLDSA 12 -#define KEYCTL_NATT_KEEPALIVE_INTERVAL 13 -#define KEYCTL_PFKEYSTAT 14 -#define KEYCTL_MAXID 15 +#define KEYCTL_DEBUG_LEVEL 1 +#define KEYCTL_SPI_TRY 2 +#define KEYCTL_SPI_MIN_VALUE 3 +#define KEYCTL_SPI_MAX_VALUE 4 +#define KEYCTL_RANDOM_INT 5 +#define KEYCTL_LARVAL_LIFETIME 6 +#define KEYCTL_BLOCKACQ_COUNT 7 +#define KEYCTL_BLOCKACQ_LIFETIME 8 +#define KEYCTL_ESP_KEYMIN 9 +#define KEYCTL_ESP_AUTH 10 +#define KEYCTL_AH_KEYMIN 11 +#define KEYCTL_PREFERED_OLDSA 12 +#define KEYCTL_NATT_KEEPALIVE_INTERVAL 13 +#define KEYCTL_PFKEYSTAT 14 +#define KEYCTL_MAXID 15 #ifdef BSD_KERNEL_PRIVATE #define KEYCTL_NAMES { \ @@ -92,4 +92,3 @@ #endif /* BSD_KERNEL_PRIVATE */ #endif /* _NETKEY_KEY_VAR_H_ */ - diff --git a/bsd/netkey/keydb.c b/bsd/netkey/keydb.c index a5500fe9c..5292414f1 100644 --- a/bsd/netkey/keydb.c +++ b/bsd/netkey/keydb.c @@ -94,7 +94,7 @@ keydb_newsecpolicy(void) LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); return (struct secpolicy *)_MALLOC(sizeof(*p), M_SECA, - M_WAITOK | M_ZERO); + M_WAITOK | M_ZERO); } void @@ -121,25 +121,26 @@ keydb_newsecashead(void) M_WAITOK | M_ZERO); lck_mtx_lock(sadb_mutex); } - if (!p) + if (!p) { return p; - for (i = 0; i < sizeof(p->savtree)/sizeof(p->savtree[0]); i++) + } + for (i = 0; i < sizeof(p->savtree) / sizeof(p->savtree[0]); i++) { LIST_INIT(&p->savtree[i]); + } return p; } #if 0 void keydb_delsecashead(p) - struct secashead *p; +struct secashead *p; { - _FREE(p, M_SECA); } -/* +/* * secasvar management (reference counted) */ struct secasvar * @@ -150,8 +151,9 @@ keydb_newsecasvar() LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_NOTOWNED); p = (struct secasvar *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK); - if (!p) + if (!p) { return p; + } bzero(p, sizeof(*p)); p->refcnt = 1; return p; @@ -159,9 +161,8 @@ keydb_newsecasvar() void keydb_refsecasvar(p) - struct secasvar *p; +struct secasvar *p; { - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); p->refcnt++; @@ -169,24 +170,24 @@ keydb_refsecasvar(p) void keydb_freesecasvar(p) - struct secasvar *p; +struct secasvar *p; { - LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); p->refcnt--; /* negative refcnt will cause panic intentionally */ - if (p->refcnt <= 0) + if (p->refcnt <= 0) { keydb_delsecasvar(p); + } } static void keydb_delsecasvar(p) - struct secasvar *p; +struct secasvar *p; { - - if (p->refcnt) + if (p->refcnt) { panic("keydb_delsecasvar called with refcnt != 0"); + } _FREE(p, M_SECA); } @@ -199,7 +200,7 @@ struct secreplay * keydb_newsecreplay(size_t wsize) { struct secreplay *p; - + LCK_MTX_ASSERT(sadb_mutex, LCK_MTX_ASSERT_OWNED); p = (struct secreplay *)_MALLOC(sizeof(*p), M_SECA, M_NOWAIT | M_ZERO); @@ -209,8 +210,9 @@ keydb_newsecreplay(size_t wsize) M_WAITOK | M_ZERO); lck_mtx_lock(sadb_mutex); } - if (!p) + if (!p) { return p; + } if (wsize != 0) { p->bitmap = (caddr_t)_MALLOC(wsize, M_SECA, M_NOWAIT | M_ZERO); @@ -232,8 +234,9 @@ keydb_newsecreplay(size_t wsize) void keydb_delsecreplay(struct secreplay *p) { - if (p->bitmap) + if (p->bitmap) { _FREE(p->bitmap, M_SECA); + } _FREE(p, M_SECA); } @@ -247,16 +250,16 @@ keydb_newsecreg() struct secreg *p; p = (struct secreg *)_MALLOC(sizeof(*p), M_SECA, M_WAITOK); - if (p) + if (p) { bzero(p, sizeof(*p)); + } return p; } void keydb_delsecreg(p) - struct secreg *p; +struct secreg *p; { - _FREE(p, M_SECA); } #endif diff --git a/bsd/netkey/keydb.h b/bsd/netkey/keydb.h index 8e053fbff..19450e6bd 100644 --- a/bsd/netkey/keydb.h +++ b/bsd/netkey/keydb.h @@ -40,12 +40,12 @@ /* Security Association Index */ /* NOTE: Ensure to be same address family */ struct secasindex { - struct sockaddr_storage src; /* srouce address for SA */ - struct sockaddr_storage dst; /* destination address for SA */ - u_int16_t proto; /* IPPROTO_ESP or IPPROTO_AH */ - u_int8_t mode; /* mode of protocol, see ipsec.h */ - u_int32_t reqid; /* reqid id who owned this SA */ - /* see IPSEC_MANUAL_REQID_MAX. */ + struct sockaddr_storage src; /* srouce address for SA */ + struct sockaddr_storage dst; /* destination address for SA */ + u_int16_t proto; /* IPPROTO_ESP or IPPROTO_AH */ + u_int8_t mode; /* mode of protocol, see ipsec.h */ + u_int32_t reqid; /* reqid id who owned this SA */ + /* see IPSEC_MANUAL_REQID_MAX. */ u_int ipsec_ifindex; }; @@ -55,73 +55,73 @@ struct secashead { struct secasindex saidx; - struct sadb_ident *idents; /* source identity */ - struct sadb_ident *identd; /* destination identity */ - /* XXX I don't know how to use them. */ + struct sadb_ident *idents; /* source identity */ + struct sadb_ident *identd; /* destination identity */ + /* XXX I don't know how to use them. */ ifnet_t ipsec_if; u_int outgoing_if; u_int8_t dir; /* IPSEC_DIR_INBOUND or IPSEC_DIR_OUTBOUND */ - u_int8_t state; /* MATURE or DEAD. */ - LIST_HEAD(_satree, secasvar) savtree[SADB_SASTATE_MAX+1]; - /* SA chain */ - /* The first of this list is newer SA */ + u_int8_t state; /* MATURE or DEAD. */ + LIST_HEAD(_satree, secasvar) savtree[SADB_SASTATE_MAX + 1]; + /* SA chain */ + /* The first of this list is newer SA */ - struct route_in6 sa_route; /* route cache */ + struct route_in6 sa_route; /* route cache */ }; /* Security Association */ struct secasvar { LIST_ENTRY(secasvar) chain; LIST_ENTRY(secasvar) spihash; - int refcnt; /* reference count */ - u_int8_t state; /* Status of this Association */ - - u_int8_t alg_auth; /* Authentication Algorithm Identifier*/ - u_int8_t alg_enc; /* Cipher Algorithm Identifier */ - u_int32_t spi; /* SPI Value, network byte order */ - u_int32_t flags; /* holder for SADB_KEY_FLAGS */ - u_int16_t flags2; /* holder for SADB_SA2_KEY_FLAGS */ - - struct sadb_key *key_auth; /* Key for Authentication */ - struct sadb_key *key_enc; /* Key for Encryption */ - caddr_t iv; /* Initilization Vector */ - u_int ivlen; /* length of IV */ - void *sched; /* intermediate encryption key */ + int refcnt; /* reference count */ + u_int8_t state; /* Status of this Association */ + + u_int8_t alg_auth; /* Authentication Algorithm Identifier*/ + u_int8_t alg_enc; /* Cipher Algorithm Identifier */ + u_int32_t spi; /* SPI Value, network byte order */ + u_int32_t flags; /* holder for SADB_KEY_FLAGS */ + u_int16_t flags2; /* holder for SADB_SA2_KEY_FLAGS */ + + struct sadb_key *key_auth; /* Key for Authentication */ + struct sadb_key *key_enc; /* Key for Encryption */ + caddr_t iv; /* Initilization Vector */ + u_int ivlen; /* length of IV */ + void *sched; /* intermediate encryption key */ size_t schedlen; - struct secreplay *replay; /* replay prevention */ - long created; /* for lifetime */ + struct secreplay *replay; /* replay prevention */ + long created; /* for lifetime */ - struct sadb_lifetime *lft_c; /* CURRENT lifetime, it's constant. */ - struct sadb_lifetime *lft_h; /* HARD lifetime */ - struct sadb_lifetime *lft_s; /* SOFT lifetime */ + struct sadb_lifetime *lft_c; /* CURRENT lifetime, it's constant. */ + struct sadb_lifetime *lft_h; /* HARD lifetime */ + struct sadb_lifetime *lft_s; /* SOFT lifetime */ struct socket *so; /* Associated socket */ - u_int32_t seq; /* sequence number */ - pid_t pid; /* message's pid */ + u_int32_t seq; /* sequence number */ + pid_t pid; /* message's pid */ + + struct secashead *sah; /* back pointer to the secashead */ - struct secashead *sah; /* back pointer to the secashead */ - /* Nat Traversal related bits */ - u_int64_t natt_last_activity; - u_int16_t remote_ike_port; - u_int16_t natt_encapsulated_src_port; /* network byte order */ - u_int16_t natt_interval; /* Interval in seconds */ - u_int16_t natt_offload_interval; /* Hardware Offload Interval in seconds */ - - u_int8_t always_expire; /* Send expire/delete messages even if unused */ + u_int64_t natt_last_activity; + u_int16_t remote_ike_port; + u_int16_t natt_encapsulated_src_port; /* network byte order */ + u_int16_t natt_interval; /* Interval in seconds */ + u_int16_t natt_offload_interval; /* Hardware Offload Interval in seconds */ + + u_int8_t always_expire; /* Send expire/delete messages even if unused */ }; /* replay prevention */ struct secreplay { u_int32_t count; - u_int wsize; /* window size, i.g. 4 bytes */ - u_int32_t seq; /* used by sender */ - u_int32_t lastseq; /* used by receiver */ - caddr_t bitmap; /* used by receiver */ - int overflow; /* overflow flag */ + u_int wsize; /* window size, i.g. 4 bytes */ + u_int32_t seq; /* used by sender */ + u_int32_t lastseq; /* used by receiver */ + caddr_t bitmap; /* used by receiver */ + int overflow; /* overflow flag */ }; /* socket table due to send PF_KEY messages. */ @@ -138,16 +138,16 @@ struct secacq { struct secasindex saidx; - u_int32_t seq; /* sequence number */ - long created; /* for lifetime */ - int count; /* for lifetime */ + u_int32_t seq; /* sequence number */ + long created; /* for lifetime */ + int count; /* for lifetime */ }; #endif /* Sensitivity Level Specification */ /* nothing */ -#define SADB_KILL_INTERVAL 600 /* six seconds */ +#define SADB_KILL_INTERVAL 600 /* six seconds */ struct key_cb { int key_count; diff --git a/bsd/netkey/keysock.c b/bsd/netkey/keysock.c index 5502485d3..9b7f46424 100644 --- a/bsd/netkey/keysock.c +++ b/bsd/netkey/keysock.c @@ -87,8 +87,8 @@ extern lck_mtx_t *raw_mtx; extern void key_init(struct protosw *, struct domain *); -struct sockaddr key_dst = { 2, PF_KEY, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,} }; -struct sockaddr key_src = { 2, PF_KEY, {0,0,0,0,0,0,0,0,0,0,0,0,0,0,} }; +struct sockaddr key_dst = { 2, PF_KEY, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; +struct sockaddr key_src = { 2, PF_KEY, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }; static void key_dinit(struct domain *); static int key_sendup0(struct rawcb *, struct mbuf *, int); @@ -111,8 +111,8 @@ key_output(struct mbuf *m, struct socket *so) key_output(struct mbuf *m, ...) #else key_output(m, va_alist) - struct mbuf *m; - va_dcl +struct mbuf *m; +va_dcl #endif #endif { @@ -127,8 +127,9 @@ key_output(m, va_alist) va_end(ap); #endif - if (m == 0) + if (m == 0) { panic("key_output: NULL pointer was passed.\n"); + } socket_unlock(so, 0); lck_mtx_lock(pfkey_stat_mutex); @@ -157,8 +158,9 @@ key_output(m, va_alist) } } - if ((m->m_flags & M_PKTHDR) == 0) + if ((m->m_flags & M_PKTHDR) == 0) { panic("key_output: not M_PKTHDR ??"); + } #if IPSEC_DEBUG KEYDEBUG(KEYDEBUG_KEY_DUMP, kdebug_mbuf(m)); @@ -179,8 +181,9 @@ key_output(m, va_alist) m = NULL; end: - if (m) + if (m) { m_freem(m); + } socket_lock(so, 0); return error; } @@ -197,8 +200,9 @@ key_sendup0(struct rawcb *rp, struct mbuf *m, int promisc) struct sadb_msg *pmsg; M_PREPEND(m, sizeof(struct sadb_msg), M_NOWAIT, 1); - if (m && m->m_len < sizeof(struct sadb_msg)) + if (m && m->m_len < sizeof(struct sadb_msg)) { m = m_pullup(m, sizeof(struct sadb_msg)); + } if (!m) { #if IPSEC_DEBUG printf("key_sendup0: cannot pullup\n"); @@ -225,8 +229,7 @@ key_sendup0(struct rawcb *rp, struct mbuf *m, int promisc) printf("key_sendup0: sbappendaddr failed\n"); #endif PFKEY_STAT_INCREMENT(pfkeystat.in_nomem); - } - else { + } else { sorwakeup(rp->rcb_socket); } return error; @@ -243,10 +246,12 @@ key_sendup_mbuf(struct socket *so, struct mbuf *m, int target) struct rawcb *rp; int error = 0; - if (m == NULL) + if (m == NULL) { panic("key_sendup_mbuf: NULL pointer was passed.\n"); - if (so == NULL && target == KEY_SENDUP_ONE) + } + if (so == NULL && target == KEY_SENDUP_ONE) { panic("key_sendup_mbuf: NULL pointer was passed.\n"); + } lck_mtx_lock(pfkey_stat_mutex); pfkeystat.in_total++; @@ -268,19 +273,20 @@ key_sendup_mbuf(struct socket *so, struct mbuf *m, int target) msg = mtod(m, struct sadb_msg *); PFKEY_STAT_INCREMENT(pfkeystat.in_msgtype[msg->sadb_msg_type]); } - + lck_mtx_lock(raw_mtx); LIST_FOREACH(rp, &rawcb_list, list) { - if (rp->rcb_proto.sp_family != PF_KEY) + if (rp->rcb_proto.sp_family != PF_KEY) { continue; + } if (rp->rcb_proto.sp_protocol - && rp->rcb_proto.sp_protocol != PF_KEY_V2) { + && rp->rcb_proto.sp_protocol != PF_KEY_V2) { continue; } kp = (struct keycb *)rp; - + socket_lock(rp->rcb_socket, 1); /* * If you are in promiscuous mode, and when you get broadcasted @@ -309,8 +315,9 @@ key_sendup_mbuf(struct socket *so, struct mbuf *m, int target) sendup++; break; case KEY_SENDUP_REGISTERED: - if (kp->kp_registered) + if (kp->kp_registered) { sendup++; + } break; } PFKEY_STAT_INCREMENT(pfkeystat.in_msgtarget[target]); @@ -318,10 +325,9 @@ key_sendup_mbuf(struct socket *so, struct mbuf *m, int target) if (!sendup) { socket_unlock(rp->rcb_socket, 1); continue; - } - else + } else { sendup = 0; // clear for next iteration - + } if ((n = m_copy(m, 0, (int)M_COPYALL)) == NULL) { #if IPSEC_DEBUG printf("key_sendup: m_copy fail\n"); @@ -378,12 +384,14 @@ key_attach(struct socket *so, int proto, struct proc *p) struct keycb *kp; int error; - if (sotorawcb(so) != 0) - return EISCONN; /* XXX panic? */ - kp = (struct keycb *)_MALLOC(sizeof (*kp), M_PCB, + if (sotorawcb(so) != 0) { + return EISCONN; /* XXX panic? */ + } + kp = (struct keycb *)_MALLOC(sizeof(*kp), M_PCB, M_WAITOK | M_ZERO); /* XXX */ - if (kp == 0) + if (kp == 0) { return ENOBUFS; + } so->so_pcb = (caddr_t)kp; kp->kp_promisc = kp->kp_registered = 0; @@ -401,8 +409,9 @@ key_attach(struct socket *so, int proto, struct proc *p) } /* so is already locked when calling key_attach */ - if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) /* XXX: AF_KEY */ + if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) { /* XXX: AF_KEY */ key_cb.key_count++; + } key_cb.any_count++; soisconnected(so); so->so_options |= SO_USELOOPBACK; @@ -445,8 +454,9 @@ key_detach(struct socket *so) int error; if (kp != 0) { - if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) /* XXX: AF_KEY */ + if (kp->kp_raw.rcb_proto.sp_protocol == PF_KEY) { /* XXX: AF_KEY */ key_cb.key_count--; + } key_cb.any_count--; socket_unlock(so, 0); key_freereg(so); @@ -486,7 +496,7 @@ key_peeraddr(struct socket *so, struct sockaddr **nam) */ static int key_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam, - struct mbuf *control, struct proc *p) + struct mbuf *control, struct proc *p) { int error; error = raw_usrreqs.pru_send(so, flags, m, nam, control, p); @@ -518,22 +528,22 @@ key_sockaddr(struct socket *so, struct sockaddr **nam) } static struct pr_usrreqs key_usrreqs = { - .pru_abort = key_abort, - .pru_attach = key_attach, - .pru_bind = key_bind, - .pru_connect = key_connect, - .pru_detach = key_detach, - .pru_disconnect = key_disconnect, - .pru_peeraddr = key_peeraddr, - .pru_send = key_send, - .pru_shutdown = key_shutdown, - .pru_sockaddr = key_sockaddr, - .pru_sosend = sosend, - .pru_soreceive = soreceive, + .pru_abort = key_abort, + .pru_attach = key_attach, + .pru_bind = key_bind, + .pru_connect = key_connect, + .pru_detach = key_detach, + .pru_disconnect = key_disconnect, + .pru_peeraddr = key_peeraddr, + .pru_send = key_send, + .pru_shutdown = key_shutdown, + .pru_sockaddr = key_sockaddr, + .pru_sosend = sosend, + .pru_soreceive = soreceive, }; /* sysctl */ -SYSCTL_NODE(_net, PF_KEY, key, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Key Family"); +SYSCTL_NODE(_net, PF_KEY, key, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Key Family"); /* * Definitions of protocols supported in the KEY domain. @@ -542,24 +552,24 @@ SYSCTL_NODE(_net, PF_KEY, key, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Key Family"); extern struct domain keydomain_s; static struct protosw keysw[] = { -{ - .pr_type = SOCK_RAW, - .pr_protocol = PF_KEY_V2, - .pr_flags = PR_ATOMIC|PR_ADDR, - .pr_output = key_output, - .pr_ctlinput = raw_ctlinput, - .pr_init = key_init, - .pr_usrreqs = &key_usrreqs, -} + { + .pr_type = SOCK_RAW, + .pr_protocol = PF_KEY_V2, + .pr_flags = PR_ATOMIC | PR_ADDR, + .pr_output = key_output, + .pr_ctlinput = raw_ctlinput, + .pr_init = key_init, + .pr_usrreqs = &key_usrreqs, + } }; -static int key_proto_count = (sizeof (keysw) / sizeof (struct protosw)); +static int key_proto_count = (sizeof(keysw) / sizeof(struct protosw)); struct domain keydomain_s = { - .dom_family = PF_KEY, - .dom_name = "key", - .dom_init = key_dinit, - .dom_maxrtkey = sizeof (struct key_cb), + .dom_family = PF_KEY, + .dom_name = "key", + .dom_init = key_dinit, + .dom_maxrtkey = sizeof(struct key_cb), }; static void @@ -573,6 +583,7 @@ key_dinit(struct domain *dp) keydomain = dp; - for (i = 0, pr = &keysw[0]; i < key_proto_count; i++, pr++) + for (i = 0, pr = &keysw[0]; i < key_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); + } } diff --git a/bsd/netkey/keysock.h b/bsd/netkey/keysock.h index 32d8ef559..347630151 100644 --- a/bsd/netkey/keysock.h +++ b/bsd/netkey/keysock.h @@ -36,40 +36,40 @@ /* statistics for pfkey socket */ struct pfkeystat { /* kernel -> userland */ - u_quad_t out_total; /* # of total calls */ - u_quad_t out_bytes; /* total bytecount */ - u_quad_t out_msgtype[256]; /* message type histogram */ - u_quad_t out_invlen; /* invalid length field */ - u_quad_t out_invver; /* invalid version field */ - u_quad_t out_invmsgtype; /* invalid message type field */ - u_quad_t out_tooshort; /* msg too short */ - u_quad_t out_nomem; /* memory allocation failure */ - u_quad_t out_dupext; /* duplicate extension */ - u_quad_t out_invexttype; /* invalid extension type */ - u_quad_t out_invsatype; /* invalid sa type */ - u_quad_t out_invaddr; /* invalid address extension */ + u_quad_t out_total; /* # of total calls */ + u_quad_t out_bytes; /* total bytecount */ + u_quad_t out_msgtype[256]; /* message type histogram */ + u_quad_t out_invlen; /* invalid length field */ + u_quad_t out_invver; /* invalid version field */ + u_quad_t out_invmsgtype; /* invalid message type field */ + u_quad_t out_tooshort; /* msg too short */ + u_quad_t out_nomem; /* memory allocation failure */ + u_quad_t out_dupext; /* duplicate extension */ + u_quad_t out_invexttype; /* invalid extension type */ + u_quad_t out_invsatype; /* invalid sa type */ + u_quad_t out_invaddr; /* invalid address extension */ /* userland -> kernel */ - u_quad_t in_total; /* # of total calls */ - u_quad_t in_bytes; /* total bytecount */ - u_quad_t in_msgtype[256]; /* message type histogram */ - u_quad_t in_msgtarget[3]; /* one/all/registered */ - u_quad_t in_nomem; /* memory allocation failure */ + u_quad_t in_total; /* # of total calls */ + u_quad_t in_bytes; /* total bytecount */ + u_quad_t in_msgtype[256]; /* message type histogram */ + u_quad_t in_msgtarget[3]; /* one/all/registered */ + u_quad_t in_nomem; /* memory allocation failure */ /* others */ - u_quad_t sockerr; /* # of socket related errors */ + u_quad_t sockerr; /* # of socket related errors */ }; -#define KEY_SENDUP_ONE 0 -#define KEY_SENDUP_ALL 1 -#define KEY_SENDUP_REGISTERED 2 +#define KEY_SENDUP_ONE 0 +#define KEY_SENDUP_ALL 1 +#define KEY_SENDUP_REGISTERED 2 #ifdef BSD_KERNEL_PRIVATE -#define PFKEY_STAT_INCREMENT(x) \ +#define PFKEY_STAT_INCREMENT(x) \ {lck_mtx_lock(pfkey_stat_mutex); (x)++; lck_mtx_unlock(pfkey_stat_mutex);} struct keycb { - struct rawcb kp_raw; /* rawcb */ - int kp_promisc; /* promiscuous mode */ - int kp_registered; /* registered socket */ + struct rawcb kp_raw; /* rawcb */ + int kp_promisc; /* promiscuous mode */ + int kp_registered; /* registered socket */ }; extern struct pfkeystat pfkeystat; @@ -80,7 +80,7 @@ extern int key_output(struct mbuf *, struct socket* so); extern int key_output(struct mbuf *, ...); #endif extern int key_usrreq(struct socket *, - int, struct mbuf *, struct mbuf *, struct mbuf *); + int, struct mbuf *, struct mbuf *, struct mbuf *); extern int key_sendup(struct socket *, struct sadb_msg *, u_int, int); extern int key_sendup_mbuf(struct socket *, struct mbuf *, int); diff --git a/bsd/nfs/gss/ccrypto.c b/bsd/nfs/gss/ccrypto.c index b7aab7c55..64239e28b 100644 --- a/bsd/nfs/gss/ccrypto.c +++ b/bsd/nfs/gss/ccrypto.c @@ -38,57 +38,63 @@ int corecrypto_available(void); int corecrypto_available(void) { - return (g_crypto_funcs ? 1 : 0); + return g_crypto_funcs ? 1 : 0; } const struct ccmode_cbc * ccaes_cbc_decrypt_mode(void) { - if (g_crypto_funcs) - return (g_crypto_funcs->ccaes_cbc_decrypt); - return (NULL); + if (g_crypto_funcs) { + return g_crypto_funcs->ccaes_cbc_decrypt; + } + return NULL; } const struct ccmode_cbc * ccaes_cbc_encrypt_mode(void) { - if (g_crypto_funcs) - return (g_crypto_funcs->ccaes_cbc_encrypt); - return (NULL); + if (g_crypto_funcs) { + return g_crypto_funcs->ccaes_cbc_encrypt; + } + return NULL; } const struct ccmode_cbc * ccdes3_cbc_decrypt_mode(void) { - if (g_crypto_funcs) - return (g_crypto_funcs->cctdes_cbc_decrypt); - return (NULL); + if (g_crypto_funcs) { + return g_crypto_funcs->cctdes_cbc_decrypt; + } + return NULL; } const struct ccmode_cbc * ccdes3_cbc_encrypt_mode(void) { - if (g_crypto_funcs) - return (g_crypto_funcs->cctdes_cbc_encrypt); - return (NULL); + if (g_crypto_funcs) { + return g_crypto_funcs->cctdes_cbc_encrypt; + } + return NULL; } size_t ccpad_cts3_decrypt(const struct ccmode_cbc *cbc, cccbc_ctx *cbc_key, - cccbc_iv *iv, size_t nbytes, const void *in, void *out) + cccbc_iv *iv, size_t nbytes, const void *in, void *out) { - if (g_crypto_funcs) + if (g_crypto_funcs) { return (*g_crypto_funcs->ccpad_cts3_decrypt_fn)(cbc, cbc_key, iv, nbytes, in, out); - return (0); + } + return 0; } size_t ccpad_cts3_encrypt(const struct ccmode_cbc *cbc, cccbc_ctx *cbc_key, - cccbc_iv *iv, size_t nbytes, const void *in, void *out) + cccbc_iv *iv, size_t nbytes, const void *in, void *out) { - if (g_crypto_funcs) + if (g_crypto_funcs) { return (*g_crypto_funcs->ccpad_cts3_encrypt_fn)(cbc, cbc_key, iv, nbytes, in, out); - return (0); + } + return 0; } const struct ccdigest_info *ccsha1_ltc_di_ptr; @@ -96,13 +102,16 @@ const struct ccdigest_info *ccsha1_ltc_di_ptr; const struct ccdigest_info * ccsha1_di(void) { - if (g_crypto_funcs) - return (g_crypto_funcs->ccsha1_di); - return (NULL); + if (g_crypto_funcs) { + return g_crypto_funcs->ccsha1_di; + } + return NULL; } -void ccdes_key_set_odd_parity(void *key, unsigned long length) +void +ccdes_key_set_odd_parity(void *key, unsigned long length) { - if (g_crypto_funcs) + if (g_crypto_funcs) { (*g_crypto_funcs->ccdes_key_set_odd_parity_fn)(key, length); + } } diff --git a/bsd/nfs/gss/gss_krb5_mech.c b/bsd/nfs/gss/gss_krb5_mech.c index 0d592043a..70f497101 100644 --- a/bsd/nfs/gss/gss_krb5_mech.c +++ b/bsd/nfs/gss/gss_krb5_mech.c @@ -86,7 +86,7 @@ typedef struct hmac_walker_ctx { } *hmac_walker_ctx_t; typedef size_t (*ccpad_func)(const struct ccmode_cbc *, cccbc_ctx *, cccbc_iv *, - size_t nbytes, const void *, void *); + size_t nbytes, const void *, void *); static int krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size); @@ -101,7 +101,7 @@ int do_crypt(void *, uint8_t *, uint32_t); void do_hmac_init(hmac_walker_ctx_t, crypto_ctx_t, void *); int do_hmac(void *, uint8_t *, uint32_t); -void krb5_make_usage(uint32_t, uint8_t, uint8_t [KRB5_USAGE_LEN]); +void krb5_make_usage(uint32_t, uint8_t, uint8_t[KRB5_USAGE_LEN]); void krb5_key_derivation(crypto_ctx_t, const void *, size_t, void **, size_t); void cc_key_schedule_create(crypto_ctx_t); void gss_crypto_ctx_free(crypto_ctx_t); @@ -136,7 +136,7 @@ printmbuf(const char *str, mbuf_t mb, uint32_t offset, uint32_t len) offset -= mbuf_len(mb); continue; } - for(i = offset; len && i < mbuf_len(mb); i++) { + for (i = offset; len && i < mbuf_len(mb); i++) { const char *s = (cout % 8) ? " " : (cout % 16) ? " " : "\n"; printf("%02x%s", ((uint8_t *)mbuf_data(mb))[i], s); len--; @@ -144,8 +144,9 @@ printmbuf(const char *str, mbuf_t mb, uint32_t offset, uint32_t len) } offset = 0; } - if ((cout-1) % 16) + if ((cout - 1) % 16) { printf("\n"); + } printf("Count chars %d\n", cout - 1); } @@ -156,26 +157,27 @@ printgbuf(const char *str, gss_buffer_t buf) size_t len = buf->length > 128 ? 128 : buf->length; printf("%s: len = %d value = %p\n", str ? str : "buffer", (int)buf->length, buf->value); - for (i = 0; i < len; i++) { + for (i = 0; i < len; i++) { const char *s = ((i + 1) % 8) ? " " : ((i + 1) % 16) ? " " : "\n"; printf("%02x%s", ((uint8_t *)buf->value)[i], s); } - if (i % 16) + if (i % 16) { printf("\n"); + } } /* * Initialize the data structures for the gss kerberos mech. */ -#define GSS_KRB5_NOT_INITIALIZED 0 -#define GSS_KRB5_INITIALIZING 1 -#define GSS_KRB5_INITIALIZED 2 +#define GSS_KRB5_NOT_INITIALIZED 0 +#define GSS_KRB5_INITIALIZING 1 +#define GSS_KRB5_INITIALIZED 2 static volatile uint32_t gss_krb5_mech_initted = GSS_KRB5_NOT_INITIALIZED; int gss_krb5_mech_is_initialized(void) { - return (gss_krb5_mech_initted == GSS_KRB5_NOT_INITIALIZED); + return gss_krb5_mech_initted == GSS_KRB5_NOT_INITIALIZED; } void @@ -184,14 +186,16 @@ gss_krb5_mech_init(void) extern void IOSleep(int); /* Once initted always initted */ - if (gss_krb5_mech_initted == GSS_KRB5_INITIALIZED) + if (gss_krb5_mech_initted == GSS_KRB5_INITIALIZED) { return; + } /* make sure we init only once */ if (!OSCompareAndSwap(GSS_KRB5_NOT_INITIALIZED, GSS_KRB5_INITIALIZING, &gss_krb5_mech_initted)) { /* wait until initialization is complete */ - while (!gss_krb5_mech_is_initialized()) + while (!gss_krb5_mech_is_initialized()) { IOSleep(10); + } return; } gss_krb5_mech_grp = lck_grp_alloc_init("gss_krb5_mech", LCK_GRP_ATTR_NULL); @@ -201,13 +205,15 @@ gss_krb5_mech_init(void) uint32_t gss_release_buffer(uint32_t *minor, gss_buffer_t buf) { - if (minor) + if (minor) { *minor = 0; - if (buf->value) + } + if (buf->value) { FREE(buf->value, M_TEMP); + } buf->value = NULL; buf->length = 0; - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } /* @@ -219,9 +225,10 @@ gss_mbuf_len(mbuf_t mb, size_t offset) { size_t len; - for (len = 0; mb; mb = mbuf_next(mb)) + for (len = 0; mb; mb = mbuf_next(mb)) { len += mbuf_len(mb); - return ((offset > len) ? 0 : len - offset); + } + return (offset > len) ? 0 : len - offset; } /* @@ -239,34 +246,39 @@ split_one_mbuf(mbuf_t mb, size_t offset, mbuf_t *nmb, int join) *nmb = mb; /* We don't have an mbuf or we're alread on an mbuf boundary */ - if (mb == NULL || offset == 0) - return (0); + if (mb == NULL || offset == 0) { + return 0; + } /* If the mbuf length is offset then the next mbuf is the one we want */ if (mbuf_len(mb) == offset) { *nmb = mbuf_next(mb); - if (!join) + if (!join) { mbuf_setnext(mb, NULL); - return (0); + } + return 0; } - if (offset > mbuf_len(mb)) - return (EINVAL); + if (offset > mbuf_len(mb)) { + return EINVAL; + } error = mbuf_split(mb, offset, MBUF_WAITOK, nmb); - if (error) - return (error); + if (error) { + return error; + } if (mbuf_flags(*nmb) & MBUF_PKTHDR) { /* We don't want to copy the pkthdr. mbuf_split does that. */ error = mbuf_setflags_mask(*nmb, ~MBUF_PKTHDR, MBUF_PKTHDR); } - if (join) + if (join) { /* Join the chain again */ mbuf_setnext(mb, *nmb); + } - return (0); + return 0; } /* @@ -291,21 +303,25 @@ gss_normalize_mbuf(mbuf_t chain, size_t offset, size_t *subchain_length, mbuf_t mbuf_t mb, nmb; errno_t error; - if (tail == NULL) + if (tail == NULL) { tail = &nmb; + } *tail = NULL; *subchain = NULL; - for (len = offset, mb = chain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) - len -= mbuf_len(mb); + for (len = offset, mb = chain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) { + len -= mbuf_len(mb); + } /* if we don't have offset bytes just return */ - if (mb == NULL) - return (0); + if (mb == NULL) { + return 0; + } error = split_one_mbuf(mb, len, subchain, join); - if (error) - return (error); + if (error) { + return error; + } assert(subchain != NULL && *subchain != NULL); assert(offset == 0 ? mb == *subchain : 1); @@ -314,12 +330,13 @@ gss_normalize_mbuf(mbuf_t chain, size_t offset, size_t *subchain_length, mbuf_t length = (length > len) ? len : length; *subchain_length = length; - for (len = length, mb = *subchain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) + for (len = length, mb = *subchain; mb && len > mbuf_len(mb); mb = mbuf_next(mb)) { len -= mbuf_len(mb); + } error = split_one_mbuf(mb, len, tail, join); - return (error); + return error; } mbuf_t @@ -327,16 +344,20 @@ gss_join_mbuf(mbuf_t head, mbuf_t body, mbuf_t tail) { mbuf_t mb; - for (mb = head; mb && mbuf_next(mb); mb = mbuf_next(mb)) + for (mb = head; mb && mbuf_next(mb); mb = mbuf_next(mb)) { ; - if (mb) + } + if (mb) { mbuf_setnext(mb, body); - for (mb = body; mb && mbuf_next(mb); mb = mbuf_next(mb)) + } + for (mb = body; mb && mbuf_next(mb); mb = mbuf_next(mb)) { ; - if (mb) + } + if (mb) { mbuf_setnext(mb, tail); + } mb = head ? head : (body ? body : tail); - return (mb); + return mb; } /* @@ -357,12 +378,13 @@ gss_prepend_mbuf(mbuf_t *chain, uint8_t *bytes, size_t size) } error = mbuf_prepend(chain, size, MBUF_WAITOK); - if (error) - return (error); + if (error) { + return error; + } data = mbuf_data(*chain); memcpy(data, bytes, size); - return (0); + return 0; } errno_t @@ -371,24 +393,27 @@ gss_append_mbuf(mbuf_t chain, uint8_t *bytes, size_t size) size_t len = 0; mbuf_t mb; - if (chain == NULL) - return (EINVAL); + if (chain == NULL) { + return EINVAL; + } - for (mb = chain; mb; mb = mbuf_next(mb)) + for (mb = chain; mb; mb = mbuf_next(mb)) { len += mbuf_len(mb); + } - return (mbuf_copyback(chain, len, size, bytes, MBUF_WAITOK)); + return mbuf_copyback(chain, len, size, bytes, MBUF_WAITOK); } errno_t gss_strip_mbuf(mbuf_t chain, ssize_t size) { - if (chain == NULL) - return (EINVAL); + if (chain == NULL) { + return EINVAL; + } mbuf_adj(chain, size); - return (0); + return 0; } @@ -437,8 +462,9 @@ mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_ /* run our hash/encrypt/decrpyt function */ if (mlen > 0) { error = crypto_fn(ctx, ptr, mlen); - if (error) + if (error) { break; + } ptr += mlen; len -= mlen; } @@ -475,7 +501,7 @@ mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_ error = mbuf_pullup(&nmb, offset - mlen); if (error) { mbuf_setnext(mb, NULL); - return (error); + return error; } } nptr = mbuf_data(nmb); @@ -483,15 +509,17 @@ mbuf_walk(mbuf_t mbp, size_t offset, size_t len, size_t blocksize, int (*crypto_ } len -= offset; error = crypto_fn(ctx, block, sizeof(block)); - if (error) + if (error) { break; + } memcpy(ptr, block, residue); - if (nptr) + if (nptr) { memcpy(nptr, block + residue, offset); + } } } - return (error); + return error; } void @@ -500,7 +528,7 @@ do_crypt_init(crypt_walker_ctx_t wctx, int encrypt, crypto_ctx_t cctx, cccbc_ctx wctx->ccmode = encrypt ? cctx->enc_mode : cctx->dec_mode; wctx->crypt_ctx = ks; - MALLOC(wctx->iv, cccbc_iv *, wctx->ccmode->block_size, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(wctx->iv, cccbc_iv *, wctx->ccmode->block_size, M_TEMP, M_WAITOK | M_ZERO); cccbc_set_iv(wctx->ccmode, wctx->iv, NULL); } @@ -515,7 +543,7 @@ do_crypt(void *walker, uint8_t *data, uint32_t len) cccbc_update(wctx->ccmode, wctx->crypt_ctx, wctx->iv, nblocks, data, data); wctx->length += len; - return (0); + return 0; } void @@ -524,7 +552,7 @@ do_hmac_init(hmac_walker_ctx_t wctx, crypto_ctx_t cctx, void *key) size_t alloc_size = cchmac_di_size(cctx->di); wctx->di = cctx->di; - MALLOC(wctx->hmac_ctx, struct cchmac_ctx *, alloc_size, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(wctx->hmac_ctx, struct cchmac_ctx *, alloc_size, M_TEMP, M_WAITOK | M_ZERO); cchmac_init(cctx->di, wctx->hmac_ctx, cctx->keylen, key); } @@ -535,12 +563,12 @@ do_hmac(void *walker, uint8_t *data, uint32_t len) cchmac_update(wctx->di, wctx->hmac_ctx, len, data); - return (0); + return 0; } int -krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse) +krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse) { uint8_t digest[ctx->di->output_size]; cchmac_di_decl(ctx->di, hmac_ctx); @@ -576,17 +604,17 @@ krb5_mic(crypto_ctx_t ctx, gss_buffer_t header, gss_buffer_t bp, gss_buffer_t t cchmac_final(ctx->di, hmac_ctx, digest); if (verify) { - *verify = (memcmp(mic, digest, ctx->digest_size) == 0); - } - else + *verify = (memcmp(mic, digest, ctx->digest_size) == 0); + } else { memcpy(mic, digest, ctx->digest_size); + } - return (0); + return 0; } int krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header, - mbuf_t mbp, uint32_t offset, uint32_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse) + mbuf_t mbp, uint32_t offset, uint32_t len, gss_buffer_t trailer, uint8_t *mic, int *verify, int ikey, int reverse) { struct hmac_walker_ctx wctx; uint8_t digest[ctx->di->output_size]; @@ -616,25 +644,30 @@ krb5_mic_mbuf(crypto_ctx_t ctx, gss_buffer_t header, error = mbuf_walk(mbp, offset, len, 1, do_hmac, &wctx); - if (error) - return (error); - if (trailer) + if (error) { + return error; + } + if (trailer) { cchmac_update(ctx->di, wctx.hmac_ctx, trailer->length, trailer->value); + } cchmac_final(ctx->di, wctx.hmac_ctx, digest); FREE(wctx.hmac_ctx, M_TEMP); if (verify) { *verify = (memcmp(mic, digest, ctx->digest_size) == 0); - if (!*verify) - return (EBADRPC); - } else + if (!*verify) { + return EBADRPC; + } + } else { memcpy(mic, digest, ctx->digest_size); + } - return (0); + return 0; } -errno_t /* __attribute__((optnone)) */ +errno_t +/* __attribute__((optnone)) */ krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ctx *ks) { struct crypt_walker_ctx wctx; @@ -652,8 +685,9 @@ krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ ctx->flags |= CRYPTO_KS_ALLOCED; lck_mtx_unlock(ctx->lock); } - if (!ks) + if (!ks) { ks = encrypt ? ctx->ks.enc : ctx->ks.dec; + } if ((ctx->flags & CRYPTO_CTS_ENABLE) && ctx->mpad == 1) { uint8_t block[ccmode->block_size]; @@ -671,9 +705,9 @@ krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ cts_len = r ? r + ccmode->block_size : 2 * ccmode->block_size; plen = len - cts_len; /* If plen is 0 we only have two blocks to crypt with ccpad below */ - if (plen == 0) + if (plen == 0) { lmb = *mbp; - else { + } else { gss_normalize_mbuf(*mbp, 0, &plen, &mb, &lmb, 0); assert(*mbp == mb); assert(plen == len - cts_len); @@ -687,22 +721,24 @@ krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ memset(pad_block, 0, padlen); error = gss_append_mbuf(*mbp, pad_block, padlen); - if (error) - return (error); + if (error) { + return error; + } plen = len + padlen; } do_crypt_init(&wctx, encrypt, ctx, ks); if (plen) { error = mbuf_walk(*mbp, 0, plen, ccmode->block_size, do_crypt, &wctx); - if (error) - return (error); + if (error) { + return error; + } } if ((ctx->flags & CRYPTO_CTS_ENABLE) && cts_len) { - uint8_t cts_pad[2*ccmode->block_size]; + uint8_t cts_pad[2 * ccmode->block_size]; ccpad_func do_ccpad = encrypt ? ccpad_cts3_encrypt : ccpad_cts3_decrypt; - assert(cts_len <= 2*ccmode->block_size && cts_len > ccmode->block_size); + assert(cts_len <= 2 * ccmode->block_size && cts_len > ccmode->block_size); memset(cts_pad, 0, sizeof(cts_pad)); mbuf_copydata(lmb, 0, cts_len, cts_pad); mbuf_freem(lmb); @@ -711,7 +747,7 @@ krb5_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, uint32_t len, int encrypt, cccbc_ } FREE(wctx.iv, M_TEMP); - return (0); + return 0; } /* @@ -725,36 +761,40 @@ rr13(unsigned char *buf, size_t len) unsigned char tmp[bytes]; size_t i; - if(len == 0) + if (len == 0) { return 0; + } { const int bits = 13 % len; const int lbit = len % 8; memcpy(tmp, buf, bytes); - if(lbit) { + if (lbit) { /* pad final byte with inital bits */ tmp[bytes - 1] &= 0xff << (8 - lbit); - for(i = lbit; i < 8; i += len) + for (i = lbit; i < 8; i += len) { tmp[bytes - 1] |= buf[0] >> i; + } } - for(i = 0; i < bytes; i++) { + for (i = 0; i < bytes; i++) { ssize_t bb; ssize_t b1, s1, b2, s2; /* calculate first bit position of this byte */ bb = 8 * i - bits; - while(bb < 0) + while (bb < 0) { bb += len; + } /* byte offset and shift count */ b1 = bb / 8; s1 = bb % 8; - if((size_t)bb + 8 > bytes * 8) + if ((size_t)bb + 8 > bytes * 8) { /* watch for wraparound */ s2 = (len + 8 - s1) % 8; - else + } else { s2 = 8 - s1; + } b2 = (b1 + 1) % bytes; buf[i] = (tmp[b1] << s1) | (tmp[b2] >> s2); } @@ -770,12 +810,12 @@ add1(unsigned char *a, unsigned char *b, size_t len) ssize_t i; int carry = 0; - for(i = len - 1; i >= 0; i--){ + for (i = len - 1; i >= 0; i--) { int x = a[i] + b[i] + carry; carry = x > 0xff; a[i] = x & 0xff; } - for(i = len - 1; carry && i >= 0; i--){ + for (i = len - 1; carry && i >= 0; i--) { int x = a[i] + carry; carry = x > 0xff; a[i] = x & 0xff; @@ -787,7 +827,7 @@ static int krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size) { /* if len < size we need at most N * len bytes, ie < 2 * size; - if len > size we need at most 2 * len */ + * if len > size we need at most 2 * len */ int ret = 0; size_t maxlen = 2 * max(size, len); size_t l = 0; @@ -800,16 +840,18 @@ krb5_n_fold(const void *instr, size_t len, void *foldstr, size_t size) memcpy(tmp + l, buf, len); l += len; ret = rr13(buf, len * 8); - if (ret) + if (ret) { goto out; - while(l >= size) { + } + while (l >= size) { add1(foldstr, tmp, size); l -= size; - if(l == 0) + if (l == 0) { break; + } memmove(tmp, tmp + size, l); } - } while(l != 0); + } while (l != 0); out: return ret; @@ -820,8 +862,9 @@ krb5_make_usage(uint32_t usage_no, uint8_t suffix, uint8_t usage_string[KRB5_USA { uint32_t i; - for (i = 0; i < 4; i++) - usage_string[i] = ((usage_no >> 8*(3-i)) & 0xff); + for (i = 0; i < 4; i++) { + usage_string[i] = ((usage_no >> 8 * (3 - i)) & 0xff); + } usage_string[i] = suffix; } @@ -831,8 +874,8 @@ krb5_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **d size_t blocksize = ctx->enc_mode->block_size; cccbc_iv_decl(blocksize, iv); cccbc_ctx_decl(ctx->enc_mode->size, enc_ctx); - size_t ksize = 8*dklen; - size_t nblocks = (ksize + 8*blocksize - 1) / (8*blocksize); + size_t ksize = 8 * dklen; + size_t nblocks = (ksize + 8 * blocksize - 1) / (8 * blocksize); uint8_t *dkptr; uint8_t block[blocksize]; @@ -844,7 +887,7 @@ krb5_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, void **d for (size_t i = 0; i < nblocks; i++) { cccbc_set_iv(ctx->enc_mode, iv, NULL); cccbc_update(ctx->enc_mode, enc_ctx, iv, 1, block, block); - memcpy(dkptr, block, blocksize); + memcpy(dkptr, block, blocksize); dkptr += blocksize; } } @@ -855,8 +898,9 @@ des_make_key(const uint8_t rawkey[7], uint8_t deskey[8]) uint8_t val = 0; memcpy(deskey, rawkey, 7); - for (int i = 0; i < 7; i++) - val |= ((deskey[i] & 1) << (i+1)); + for (int i = 0; i < 7; i++) { + val |= ((deskey[i] & 1) << (i + 1)); + } deskey[7] = val; ccdes_key_set_odd_parity(deskey, 8); } @@ -868,8 +912,8 @@ krb5_3des_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, voi void *rawkey; uint8_t *kptr, *rptr; - MALLOC(*des3key, void *, 3*cbcmode->block_size, M_TEMP, M_WAITOK | M_ZERO); - krb5_key_derivation(ctx, cons, conslen, &rawkey, 3*(cbcmode->block_size - 1)); + MALLOC(*des3key, void *, 3 * cbcmode->block_size, M_TEMP, M_WAITOK | M_ZERO); + krb5_key_derivation(ctx, cons, conslen, &rawkey, 3 * (cbcmode->block_size - 1)); kptr = (uint8_t *)*des3key; rptr = (uint8_t *)rawkey; @@ -879,7 +923,7 @@ krb5_3des_key_derivation(crypto_ctx_t ctx, const void *cons, size_t conslen, voi kptr += cbcmode->block_size; } - cc_clear(3*(cbcmode->block_size - 1), rawkey); + cc_clear(3 * (cbcmode->block_size - 1), rawkey); FREE(rawkey, M_TEMP); } @@ -908,8 +952,8 @@ cc_key_schedule_create(crypto_ctx_t ctx) case 1: { if (ctx->ks.enc == NULL) { krb5_make_usage(lctx->initiate ? - KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL, - 0xAA, usage_string); + KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL, + 0xAA, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen); MALLOC(ctx->ks.enc, cccbc_ctx *, ctx->enc_mode->size, M_TEMP, M_WAITOK | M_ZERO); cccbc_init(ctx->enc_mode, ctx->ks.enc, ctx->keylen, ekey); @@ -917,8 +961,8 @@ cc_key_schedule_create(crypto_ctx_t ctx) } if (ctx->ks.dec == NULL) { krb5_make_usage(lctx->initiate ? - KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL, - 0xAA, usage_string); + KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL, + 0xAA, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen); MALLOC(ctx->ks.dec, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO); cccbc_init(ctx->dec_mode, ctx->ks.dec, ctx->keylen, ekey); @@ -926,14 +970,14 @@ cc_key_schedule_create(crypto_ctx_t ctx) } if (ctx->ks.ikey[GSS_SND] == NULL) { krb5_make_usage(lctx->initiate ? - KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL, - 0x55, usage_string); + KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL, + 0x55, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_SND], ctx->keylen); } if (ctx->ks.ikey[GSS_RCV] == NULL) { krb5_make_usage(lctx->initiate ? - KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL, - 0x55, usage_string); + KRB5_USAGE_ACCEPTOR_SEAL : KRB5_USAGE_INITIATOR_SEAL, + 0x55, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ks.ikey[GSS_RCV], ctx->keylen); } } @@ -985,7 +1029,7 @@ gss_crypto_ctx_init(struct crypto_ctx *ctx, lucid_context_t lucid) ctx->etype = ctx->gss_ctx->ctx_key.etype; ctx->key = key; - switch(ctx->etype) { + switch (ctx->etype) { case AES128_CTS_HMAC_SHA1_96: case AES256_CTS_HMAC_SHA1_96: ctx->enc_mode = ccaes_cbc_encrypt_mode(); @@ -1000,12 +1044,12 @@ gss_crypto_ctx_init(struct crypto_ctx *ctx, lucid_context_t lucid) ctx->mpad = 1; ctx->digest_size = 12; /* 96 bits */ krb5_make_usage(ctx->gss_ctx->initiate ? - KRB5_USAGE_INITIATOR_SIGN : KRB5_USAGE_ACCEPTOR_SIGN, - 0x99, usage_string); + KRB5_USAGE_INITIATOR_SIGN : KRB5_USAGE_ACCEPTOR_SIGN, + 0x99, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_SND], ctx->keylen); krb5_make_usage(ctx->gss_ctx->initiate ? - KRB5_USAGE_ACCEPTOR_SIGN : KRB5_USAGE_INITIATOR_SIGN, - 0x99, usage_string); + KRB5_USAGE_ACCEPTOR_SIGN : KRB5_USAGE_INITIATOR_SIGN, + 0x99, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV], ctx->keylen); break; case DES3_CBC_SHA1_KD: @@ -1025,29 +1069,27 @@ gss_crypto_ctx_init(struct crypto_ctx *ctx, lucid_context_t lucid) krb5_3des_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ctx->ckey[GSS_RCV]); break; default: - return (ENOTSUP); + return ENOTSUP; } ctx->lock = lck_mtx_alloc_init(gss_krb5_mech_grp, LCK_ATTR_NULL); - return (0); + return 0; } /* * CFX gss support routines */ /* From Heimdal cfx.h file RFC 4121 Cryptoo framework extensions */ -typedef struct gss_cfx_mic_token_desc_struct -{ - uint8_t TOK_ID[2]; /* 04 04 */ +typedef struct gss_cfx_mic_token_desc_struct { + uint8_t TOK_ID[2]; /* 04 04 */ uint8_t Flags; uint8_t Filler[5]; uint8_t SND_SEQ[8]; } gss_cfx_mic_token_desc, *gss_cfx_mic_token; -typedef struct gss_cfx_wrap_token_desc_struct -{ - uint8_t TOK_ID[2]; /* 05 04 */ +typedef struct gss_cfx_wrap_token_desc_struct { + uint8_t TOK_ID[2]; /* 05 04 */ uint8_t Flags; uint8_t Filler; uint8_t EC[2]; @@ -1086,35 +1128,38 @@ gss_krb5_cfx_verify_mic_token(gss_ctx_id_t ctx, gss_cfx_mic_token token) if (token->TOK_ID[0] != mic_cfx_token.TOK_ID[0] || token->TOK_ID[1] != mic_cfx_token.TOK_ID[1]) { printf("Bad mic TOK_ID %x %x\n", token->TOK_ID[0], token->TOK_ID[1]); - return (EBADRPC); + return EBADRPC; } - if (lctx->initiate) + if (lctx->initiate) { flags |= CFXSentByAcceptor; - if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) + } + if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) { flags |= CFXAcceptorSubkey; + } if (token->Flags != flags) { printf("Bad flags received %x exptect %x\n", token->Flags, flags); - return (EBADRPC); + return EBADRPC; } for (i = 0; i < 5; i++) { - if (token->Filler[i] != mic_cfx_token.Filler[i]) + if (token->Filler[i] != mic_cfx_token.Filler[i]) { break; + } } if (i != 5) { printf("Bad mic filler %x @ %d\n", token->Filler[i], i); - return (EBADRPC); + return EBADRPC; } - return (0); + return 0; } uint32_t -gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - gss_qop_t qop __unused, /* qop_req (ignored) */ - gss_buffer_t mbp, /* message mbuf */ - gss_buffer_t mic /* message_token */) +gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + gss_qop_t qop __unused, /* qop_req (ignored) */ + gss_buffer_t mbp, /* message mbuf */ + gss_buffer_t mic /* message_token */) { gss_cfx_mic_token_desc token; lucid_context_t lctx = &ctx->gss_lucid_ctx; @@ -1123,20 +1168,23 @@ gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */ uint32_t rv; uint64_t seq = htonll(lctx->send_seq); - if (minor == NULL) + if (minor == NULL) { minor = &rv; + } *minor = 0; token = mic_cfx_token; - mic->length = sizeof (token) + cctx->digest_size; + mic->length = sizeof(token) + cctx->digest_size; MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO); - if (!lctx->initiate) + if (!lctx->initiate) { token.Flags |= CFXSentByAcceptor; - if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) + } + if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) { token.Flags |= CFXAcceptorSubkey; + } memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq)); lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way header.value = &token; - header.length = sizeof (gss_cfx_mic_token_desc); + header.length = sizeof(gss_cfx_mic_token_desc); *minor = krb5_mic(cctx, NULL, mbp, &header, (uint8_t *)mic->value + sizeof(token), NULL, 0, 0); @@ -1148,29 +1196,31 @@ gss_krb5_cfx_get_mic(uint32_t *minor, /* minor_status */ memcpy(mic->value, &token, sizeof(token)); } - return (*minor ? GSS_S_FAILURE : GSS_S_COMPLETE); + return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE; } uint32_t -gss_krb5_cfx_verify_mic(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - gss_buffer_t mbp, /* message_buffer */ - gss_buffer_t mic, /* message_token */ - gss_qop_t *qop /* qop_state */) +gss_krb5_cfx_verify_mic(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + gss_buffer_t mbp, /* message_buffer */ + gss_buffer_t mic, /* message_token */ + gss_qop_t *qop /* qop_state */) { gss_cfx_mic_token token = mic->value; lucid_context_t lctx = &ctx->gss_lucid_ctx; crypto_ctx_t cctx = &ctx->gss_cryptor; - uint8_t *digest = (uint8_t *)mic->value + sizeof (gss_cfx_mic_token_desc); + uint8_t *digest = (uint8_t *)mic->value + sizeof(gss_cfx_mic_token_desc); int verified = 0; uint64_t seq; uint32_t rv; gss_buffer_desc header; - if (qop) + if (qop) { *qop = GSS_C_QOP_DEFAULT; - if (minor == NULL) + } + if (minor == NULL) { minor = &rv; + } if (mic->length != sizeof(gss_cfx_mic_token_desc) + cctx->digest_size) { printf("mic token wrong length\n"); @@ -1178,31 +1228,32 @@ gss_krb5_cfx_verify_mic(uint32_t *minor, /* minor_status */ goto out; } *minor = gss_krb5_cfx_verify_mic_token(ctx, token); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } header.value = token; - header.length = sizeof (gss_cfx_mic_token_desc); + header.length = sizeof(gss_cfx_mic_token_desc); *minor = krb5_mic(cctx, NULL, mbp, &header, digest, &verified, 0, 0); if (verified) { //XXX errors and such? Sequencing and replay? Not supported in RPCSEC_GSS - memcpy(&seq, token->SND_SEQ, sizeof (uint64_t)); + memcpy(&seq, token->SND_SEQ, sizeof(uint64_t)); seq = ntohll(seq); lctx->recv_seq = seq; } out: - return (verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG); + return verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG; } uint32_t -gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - gss_qop_t qop __unused ,/* qop_req (ignored) */ - mbuf_t mbp, /* message mbuf */ - size_t offset, /* offest */ - size_t len, /* length */ - gss_buffer_t mic /* message_token */) +gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + gss_qop_t qop __unused, /* qop_req (ignored) */ + mbuf_t mbp, /* message mbuf */ + size_t offset, /* offest */ + size_t len, /* length */ + gss_buffer_t mic /* message_token */) { gss_cfx_mic_token_desc token; lucid_context_t lctx = &ctx->gss_lucid_ctx; @@ -1211,17 +1262,20 @@ gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */ uint64_t seq = htonll(lctx->send_seq); gss_buffer_desc header; - if (minor == NULL) + if (minor == NULL) { minor = &rv; + } *minor = 0; token = mic_cfx_token; - mic->length = sizeof (token) + cctx->digest_size; + mic->length = sizeof(token) + cctx->digest_size; MALLOC(mic->value, void *, mic->length, M_TEMP, M_WAITOK | M_ZERO); - if (!lctx->initiate) + if (!lctx->initiate) { token.Flags |= CFXSentByAcceptor; - if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) + } + if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) { token.Flags |= CFXAcceptorSubkey; + } memcpy(&token.SND_SEQ, &seq, sizeof(lctx->send_seq)); lctx->send_seq++; //XXX should only update this below on success? Heimdal seems to do it this way @@ -1240,37 +1294,40 @@ gss_krb5_cfx_get_mic_mbuf(uint32_t *minor, /* minor_status */ memcpy(mic->value, &token, sizeof(token)); } - return (*minor ? GSS_S_FAILURE : GSS_S_COMPLETE); + return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE; } uint32_t -gss_krb5_cfx_verify_mic_mbuf(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - mbuf_t mbp, /* message_buffer */ - size_t offset, /* offset */ - size_t len, /* length */ - gss_buffer_t mic, /* message_token */ - gss_qop_t *qop /* qop_state */) +gss_krb5_cfx_verify_mic_mbuf(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + mbuf_t mbp, /* message_buffer */ + size_t offset, /* offset */ + size_t len, /* length */ + gss_buffer_t mic, /* message_token */ + gss_qop_t *qop /* qop_state */) { gss_cfx_mic_token token = mic->value; lucid_context_t lctx = &ctx->gss_lucid_ctx; crypto_ctx_t cctx = &ctx->gss_cryptor; - uint8_t *digest = (uint8_t *)mic->value + sizeof (gss_cfx_mic_token_desc); + uint8_t *digest = (uint8_t *)mic->value + sizeof(gss_cfx_mic_token_desc); int verified; uint64_t seq; uint32_t rv; gss_buffer_desc header; - if (qop) + if (qop) { *qop = GSS_C_QOP_DEFAULT; + } - if (minor == NULL) + if (minor == NULL) { minor = &rv; + } *minor = gss_krb5_cfx_verify_mic_token(ctx, token); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } header.length = sizeof(gss_cfx_mic_token_desc); header.value = mic->value; @@ -1278,11 +1335,11 @@ gss_krb5_cfx_verify_mic_mbuf(uint32_t *minor, /* minor_status */ *minor = krb5_mic_mbuf(cctx, NULL, mbp, offset, len, &header, digest, &verified, 0, 0); //XXX errors and such? Sequencing and replay? Not Supported RPCSEC_GSS - memcpy(&seq, token->SND_SEQ, sizeof (uint64_t)); + memcpy(&seq, token->SND_SEQ, sizeof(uint64_t)); seq = ntohll(seq); lctx->recv_seq = seq; - return (verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG); + return verified ? GSS_S_COMPLETE : GSS_S_BAD_SIG; } errno_t @@ -1297,44 +1354,52 @@ krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int if (encrypt) { read_random(confounder, ccmode->block_size); error = gss_prepend_mbuf(mbp, confounder, ccmode->block_size); - if (error) - return (error); + if (error) { + return error; + } tlen = *len + ccmode->block_size; - if (ctx->mpad > 1) + if (ctx->mpad > 1) { r = ctx->mpad - (tlen % ctx->mpad); + } /* We expect that r == 0 from krb5_cfx_wrap */ if (r != 0) { uint8_t mpad[r]; memset(mpad, 0, r); error = gss_append_mbuf(*mbp, mpad, r); - if (error) - return (error); + if (error) { + return error; + } } tlen += r; error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, NULL, 1, 0); - if (error) - return (error); + if (error) { + return error; + } error = krb5_crypt_mbuf(ctx, mbp, tlen, 1, NULL); - if (error) - return (error); + if (error) { + return error; + } error = gss_append_mbuf(*mbp, digest, ctx->digest_size); - if (error) - return (error); + if (error) { + return error; + } *len = tlen + ctx->digest_size; - return (0); + return 0; } else { int verf; cccbc_ctx *ks = NULL; - if (*len < ctx->digest_size + sizeof(confounder)) - return (EBADRPC); + if (*len < ctx->digest_size + sizeof(confounder)) { + return EBADRPC; + } tlen = *len - ctx->digest_size; /* get the digest */ error = mbuf_copydata(*mbp, tlen, ctx->digest_size, digest); /* Remove the digest from the mbuffer */ error = gss_strip_mbuf(*mbp, -ctx->digest_size); - if (error) - return (error); + if (error) { + return error; + } if (reverse) { /* @@ -1348,8 +1413,8 @@ krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int lucid_context_t lctx = ctx->gss_ctx; krb5_make_usage(lctx->initiate ? - KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL, - 0xAA, usage_string); + KRB5_USAGE_INITIATOR_SEAL : KRB5_USAGE_ACCEPTOR_SEAL, + 0xAA, usage_string); krb5_key_derivation(ctx, usage_string, KRB5_USAGE_LEN, &ekey, ctx->keylen); MALLOC(ks, cccbc_ctx *, ctx->dec_mode->size, M_TEMP, M_WAITOK | M_ZERO); cccbc_init(ctx->dec_mode, ks, ctx->keylen, ekey); @@ -1357,30 +1422,34 @@ krb5_cfx_crypt_mbuf(crypto_ctx_t ctx, mbuf_t *mbp, size_t *len, int encrypt, int } error = krb5_crypt_mbuf(ctx, mbp, tlen, 0, ks); FREE(ks, M_TEMP); - if (error) - return (error); + if (error) { + return error; + } error = krb5_mic_mbuf(ctx, NULL, *mbp, 0, tlen, NULL, digest, &verf, 1, reverse); - if (error) - return (error); - if (!verf) - return (EBADRPC); + if (error) { + return error; + } + if (!verf) { + return EBADRPC; + } /* strip off the confounder */ error = gss_strip_mbuf(*mbp, ccmode->block_size); - if (error) - return (error); + if (error) { + return error; + } *len = tlen - ccmode->block_size; } - return (0); + return 0; } uint32_t -gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - int conf_flag, /* conf_req_flag */ - gss_qop_t qop __unused, /* qop_req */ - mbuf_t *mbp, /* input/output message_buffer */ - size_t len, /* mbuf chain length */ - int *conf /* conf_state */) +gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + int conf_flag, /* conf_req_flag */ + gss_qop_t qop __unused, /* qop_req */ + mbuf_t *mbp, /* input/output message_buffer */ + size_t len, /* mbuf chain length */ + int *conf /* conf_state */) { gss_cfx_wrap_token_desc token; lucid_context_t lctx = &ctx->gss_lucid_ctx; @@ -1389,17 +1458,21 @@ gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ uint32_t mv; uint64_t seq = htonll(lctx->send_seq); - if (minor == NULL) + if (minor == NULL) { minor = &mv; - if (conf) + } + if (conf) { *conf = conf_flag; + } *minor = 0; token = wrap_cfx_token; - if (!lctx->initiate) + if (!lctx->initiate) { token.Flags |= CFXSentByAcceptor; - if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) + } + if (lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey) { token.Flags |= CFXAcceptorSubkey; + } memcpy(&token.SND_SEQ, &seq, sizeof(uint64_t)); lctx->send_seq++; if (conf_flag) { @@ -1409,7 +1482,7 @@ gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ token.Flags |= CFXSealed; memset(pad, 0, cctx->mpad); if (cctx->mpad > 1) { - plen = htons(cctx->mpad - ((len + sizeof (gss_cfx_wrap_token_desc)) % cctx->mpad)); + plen = htons(cctx->mpad - ((len + sizeof(gss_cfx_wrap_token_desc)) % cctx->mpad)); token.EC[0] = ((plen >> 8) & 0xff); token.EC[1] = (plen & 0xff); } @@ -1419,12 +1492,14 @@ gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ } if (error == 0) { error = gss_append_mbuf(*mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc)); - len += sizeof (gss_cfx_wrap_token_desc); + len += sizeof(gss_cfx_wrap_token_desc); } - if (error == 0) + if (error == 0) { error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 1, 0); - if (error == 0) + } + if (error == 0) { error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc)); + } } else { uint8_t digest[cctx->digest_size]; gss_buffer_desc header; @@ -1438,16 +1513,16 @@ gss_krb5_cfx_wrap_mbuf(uint32_t *minor, /* minor_status */ if (error == 0) { uint16_t plen = htons(cctx->digest_size); memcpy(token.EC, &plen, 2); - error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof (gss_cfx_wrap_token_desc)); + error = gss_prepend_mbuf(mbp, (uint8_t *)&token, sizeof(gss_cfx_wrap_token_desc)); } } } if (error) { *minor = error; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } /* @@ -1463,25 +1538,25 @@ gss_krb5_cfx_unwrap_rrc_mbuf(mbuf_t header, size_t rrc) } uint32_t -gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - mbuf_t *mbp, /* input/output message_buffer */ - size_t len, /* mbuf chain length */ - int *conf_flag, /* conf_state */ - gss_qop_t *qop /* qop state */) +gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + mbuf_t *mbp, /* input/output message_buffer */ + size_t len, /* mbuf chain length */ + int *conf_flag, /* conf_state */ + gss_qop_t *qop /* qop state */) { gss_cfx_wrap_token_desc token; lucid_context_t lctx = &ctx->gss_lucid_ctx; crypto_ctx_t cctx = &ctx->gss_cryptor; int error, conf; - uint16_t ec = 0 , rrc = 0; + uint16_t ec = 0, rrc = 0; uint64_t seq; int reverse = (*qop == GSS_C_QOP_REVERSE); int initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0); - error = mbuf_copydata(*mbp, 0, sizeof (gss_cfx_wrap_token_desc), &token); - gss_strip_mbuf(*mbp, sizeof (gss_cfx_wrap_token_desc)); - len -= sizeof (gss_cfx_wrap_token_desc); + error = mbuf_copydata(*mbp, 0, sizeof(gss_cfx_wrap_token_desc), &token); + gss_strip_mbuf(*mbp, sizeof(gss_cfx_wrap_token_desc)); + len -= sizeof(gss_cfx_wrap_token_desc); /* Check for valid token */ if (token.TOK_ID[0] != wrap_cfx_token.TOK_ID[0] || @@ -1497,7 +1572,7 @@ gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ } /* XXX Sequence replay detection */ - memcpy(&seq, token.SND_SEQ, sizeof (seq)); + memcpy(&seq, token.SND_SEQ, sizeof(seq)); seq = ntohll(seq); lctx->recv_seq = seq; @@ -1505,27 +1580,30 @@ gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ rrc = (token.RRC[0] << 8) | token.RRC[1]; *qop = GSS_C_QOP_DEFAULT; conf = ((token.Flags & CFXSealed) == CFXSealed); - if (conf_flag) + if (conf_flag) { *conf_flag = conf; + } if (conf) { gss_cfx_wrap_token_desc etoken; - if (rrc) /* Handle Right rotation count */ + if (rrc) { /* Handle Right rotation count */ gss_krb5_cfx_unwrap_rrc_mbuf(*mbp, rrc); + } error = krb5_cfx_crypt_mbuf(cctx, mbp, &len, 0, reverse); if (error) { printf("krb5_cfx_crypt_mbuf %d\n", error); *minor = error; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - if (len >= sizeof(gss_cfx_wrap_token_desc)) + if (len >= sizeof(gss_cfx_wrap_token_desc)) { len -= sizeof(gss_cfx_wrap_token_desc); - else + } else { goto badrpc; + } mbuf_copydata(*mbp, len, sizeof(gss_cfx_wrap_token_desc), &etoken); /* Verify etoken with the token wich should be the same, except the rc field is always zero */ token.RRC[0] = token.RRC[1] = 0; - if (memcmp(&token, &etoken, sizeof (gss_cfx_wrap_token_desc)) != 0) { + if (memcmp(&token, &etoken, sizeof(gss_cfx_wrap_token_desc)) != 0) { printf("Encrypted token mismach\n"); goto badrpc; } @@ -1537,8 +1615,9 @@ gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ int verf; gss_buffer_desc header; - if (ec != cctx->digest_size || len >= cctx->digest_size) + if (ec != cctx->digest_size || len >= cctx->digest_size) { goto badrpc; + } len -= cctx->digest_size; mbuf_copydata(*mbp, len, cctx->digest_size, digest); gss_strip_mbuf(*mbp, -cctx->digest_size); @@ -1547,14 +1626,15 @@ gss_krb5_cfx_unwrap_mbuf(uint32_t * minor, /* minor_status */ header.value = &token; header.length = sizeof(gss_cfx_wrap_token_desc); error = krb5_mic_mbuf(cctx, NULL, *mbp, 0, len, &header, digest, &verf, 1, reverse); - if (error) + if (error) { goto badrpc; + } } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; badrpc: *minor = EBADRPC; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } /* @@ -1562,44 +1642,43 @@ badrpc: */ typedef struct gss_1964_mic_token_desc_struct { - uint8_t TOK_ID[2]; /* 01 01 */ + uint8_t TOK_ID[2]; /* 01 01 */ uint8_t Sign_Alg[2]; - uint8_t Filler[4]; /* ff ff ff ff */ + uint8_t Filler[4]; /* ff ff ff ff */ } gss_1964_mic_token_desc, *gss_1964_mic_token; typedef struct gss_1964_wrap_token_desc_struct { - uint8_t TOK_ID[2]; /* 02 01 */ + uint8_t TOK_ID[2]; /* 02 01 */ uint8_t Sign_Alg[2]; uint8_t Seal_Alg[2]; - uint8_t Filler[2]; /* ff ff */ + uint8_t Filler[2]; /* ff ff */ } gss_1964_wrap_token_desc, *gss_1964_wrap_token; typedef struct gss_1964_delete_token_desc_struct { - uint8_t TOK_ID[2]; /* 01 02 */ + uint8_t TOK_ID[2]; /* 01 02 */ uint8_t Sign_Alg[2]; - uint8_t Filler[4]; /* ff ff ff ff */ + uint8_t Filler[4]; /* ff ff ff ff */ } gss_1964_delete_token_desc, *gss_1964_delete_token; typedef struct gss_1964_header_desc_struct { - uint8_t App0; /* 0x60 Application 0 constructed */ - uint8_t AppLen[]; /* Variable Der length */ + uint8_t App0; /* 0x60 Application 0 constructed */ + uint8_t AppLen[]; /* Variable Der length */ } gss_1964_header_desc, *gss_1964_header; typedef union { - gss_1964_mic_token_desc mic_tok; - gss_1964_wrap_token_desc wrap_tok; - gss_1964_delete_token_desc del_tok; + gss_1964_mic_token_desc mic_tok; + gss_1964_wrap_token_desc wrap_tok; + gss_1964_delete_token_desc del_tok; } gss_1964_tok_type __attribute__((transparent_union)); -typedef struct gss_1964_token_body_struct -{ - uint8_t OIDType; /* 0x06 */ - uint8_t OIDLen; /* 0x09 */ - uint8_t kerb_mech[9]; /* Der Encode kerberos mech 1.2.840.113554.1.2.2 - 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 */ +typedef struct gss_1964_token_body_struct { + uint8_t OIDType; /* 0x06 */ + uint8_t OIDLen; /* 0x09 */ + uint8_t kerb_mech[9]; /* Der Encode kerberos mech 1.2.840.113554.1.2.2 + * 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x01, 0x02, 0x02 */ gss_1964_tok_type body; uint8_t SND_SEQ[8]; - uint8_t Hash[]; /* Mic */ + uint8_t Hash[]; /* Mic */ } gss_1964_token_body_desc, *gss_1964_token_body; @@ -1649,15 +1728,17 @@ gss_krb5_der_length_get(uint8_t **pp) flen = *p & 0x7f; if (*p++ & 0x80) { - if (flen > sizeof(uint32_t)) - return (-1); - while (flen--) + if (flen > sizeof(uint32_t)) { + return -1; + } + while (flen--) { len = (len << 8) + *p++; + } } else { len = flen; } *pp = p; - return (len); + return len; } /* @@ -1667,10 +1748,10 @@ static int gss_krb5_der_length_size(int len) { return - len < (1 << 7) ? 1 : - len < (1 << 8) ? 2 : - len < (1 << 16) ? 3 : - len < (1 << 24) ? 4 : 5; + len < (1 << 7) ? 1 : + len < (1 << 8) ? 2 : + len < (1 << 16) ? 3 : + len < (1 << 24) ? 4 : 5; } /* @@ -1685,10 +1766,11 @@ gss_krb5_der_length_put(uint8_t **pp, int len) if (sz == 1) { *p++ = (uint8_t) len; } else { - *p++ = (uint8_t) ((sz-1) | 0x80); + *p++ = (uint8_t) ((sz - 1) | 0x80); sz -= 1; - while (sz--) + while (sz--) { *p++ = (uint8_t) ((len >> (sz * 8)) & 0xff); + } } *pp = p; @@ -1703,10 +1785,10 @@ gss_krb5_3des_token_put(gss_ctx_id_t ctx, gss_1964_tok_type body, gss_buffer_t h crypto_ctx_t cctx = &ctx->gss_cryptor; uint32_t seq = (uint32_t) (lctx->send_seq++ & 0xffff); size_t toklen = sizeof(gss_1964_token_body_desc) + cctx->digest_size; - size_t alloclen = toklen + sizeof (gss_1964_header_desc) + gss_krb5_der_length_size(toklen + datalen); + size_t alloclen = toklen + sizeof(gss_1964_header_desc) + gss_krb5_der_length_size(toklen + datalen); uint8_t *tokptr; - MALLOC(token, gss_1964_header, alloclen, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(token, gss_1964_header, alloclen, M_TEMP, M_WAITOK | M_ZERO); *token = tok_1964_header; tokptr = token->AppLen; gss_krb5_der_length_put(&tokptr, toklen + datalen); @@ -1714,10 +1796,12 @@ gss_krb5_3des_token_put(gss_ctx_id_t ctx, gss_1964_tok_type body, gss_buffer_t h *tokbody = body_1964_token; /* Initalize the token body */ tokbody->body = body; /* and now set the body to the token type passed in */ seq = htonl(seq); - for (int i = 0; i < 4; i++) + for (int i = 0; i < 4; i++) { tokbody->SND_SEQ[i] = (uint8_t)((seq >> (i * 8)) & 0xff); - for (int i = 4; i < 8; i++) + } + for (int i = 4; i < 8; i++) { tokbody->SND_SEQ[i] = lctx->initiate ? 0x00 : 0xff; + } size_t blocksize = cctx->enc_mode->block_size; cccbc_iv_decl(blocksize, iv); @@ -1734,7 +1818,7 @@ gss_krb5_3des_token_put(gss_ctx_id_t ctx, gss_1964_tok_type body, gss_buffer_t h static int gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok, - gss_1964_tok_type body, gss_buffer_t hash, size_t *offset, size_t *len, int reverse) + gss_1964_tok_type body, gss_buffer_t hash, size_t *offset, size_t *len, int reverse) { gss_1964_header token = intok->value; gss_1964_token_body tokbody; @@ -1749,30 +1833,32 @@ gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok, if (token->App0 != tok_1964_header.App0) { printf("%s: bad framing\n", __func__); printgbuf(__func__, intok); - return (EBADRPC); + return EBADRPC; } tokptr = token->AppLen; length = gss_krb5_der_length_get(&tokptr); if (length < 0) { printf("%s: invalid length\n", __func__); printgbuf(__func__, intok); - return (EBADRPC); + return EBADRPC; } - toklen = sizeof (gss_1964_header_desc) + gss_krb5_der_length_size(length) - + sizeof (gss_1964_token_body_desc); + toklen = sizeof(gss_1964_header_desc) + gss_krb5_der_length_size(length) + + sizeof(gss_1964_token_body_desc); if (intok->length < toklen + cctx->digest_size) { printf("%s: token to short", __func__); printf("toklen = %d, length = %d\n", (int)toklen, (int)length); printgbuf(__func__, intok); - return (EBADRPC); + return EBADRPC; } - if (offset) + if (offset) { *offset = toklen + cctx->digest_size; + } - if (len) - *len = length - sizeof (gss_1964_token_body_desc) - cctx->digest_size; + if (len) { + *len = length - sizeof(gss_1964_token_body_desc) - cctx->digest_size; + } tokbody = (gss_1964_token_body)tokptr; if (tokbody->OIDType != body_1964_token.OIDType || @@ -1780,12 +1866,12 @@ gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok, memcmp(tokbody->kerb_mech, body_1964_token.kerb_mech, tokbody->OIDLen) != 0) { printf("%s: Invalid mechanism\n", __func__); printgbuf(__func__, intok); - return (EBADRPC); + return EBADRPC; } if (memcmp(&tokbody->body, &body, sizeof(gss_1964_tok_type)) != 0) { printf("%s: Invalid body\n", __func__); printgbuf(__func__, intok); - return (EBADRPC); + return EBADRPC; } size_t blocksize = cctx->enc_mode->block_size; uint8_t *block = tokbody->SND_SEQ; @@ -1798,30 +1884,30 @@ gss_krb5_3des_token_get(gss_ctx_id_t ctx, gss_buffer_t intok, cccbc_update(cctx->dec_mode, dec_ctx, iv, 1, block, block); initiate = lctx->initiate ? (reverse ? 0 : 1) : (reverse ? 1 : 0); - for(int i = 4; i < 8; i++) { + for (int i = 4; i < 8; i++) { if (tokbody->SND_SEQ[i] != (initiate ? 0xff : 0x00)) { printf("%s: Invalid des mac\n", __func__); printgbuf(__func__, intok); - return (EAUTH); + return EAUTH; } } - memcpy(&seq, tokbody->SND_SEQ, sizeof (uint32_t)); + memcpy(&seq, tokbody->SND_SEQ, sizeof(uint32_t)); lctx->recv_seq = ntohl(seq); assert(hash->length >= cctx->digest_size); memcpy(hash->value, tokbody->Hash, cctx->digest_size); - return (0); + return 0; } uint32_t -gss_krb5_3des_get_mic(uint32_t *minor, /* minor status */ - gss_ctx_id_t ctx, /* krb5 context id */ - gss_qop_t qop __unused, /* qop_req (ignored) */ - gss_buffer_t mbp, /* message buffer in */ - gss_buffer_t mic) /* mic token out */ +gss_krb5_3des_get_mic(uint32_t *minor, /* minor status */ + gss_ctx_id_t ctx, /* krb5 context id */ + gss_qop_t qop __unused, /* qop_req (ignored) */ + gss_buffer_t mbp, /* message buffer in */ + gss_buffer_t mic) /* mic token out */ { gss_1964_mic_token_desc tokbody = mic_1964_token; crypto_ctx_t cctx = &ctx->gss_cryptor; @@ -1833,26 +1919,27 @@ gss_krb5_3des_get_mic(uint32_t *minor, /* minor status */ hash.value = hashval; tokbody.Sign_Alg[0] = 0x04; /* lctx->keydata.lucid_protocol_u.data_1964.sign_alg */ tokbody.Sign_Alg[1] = 0x00; - header.length = sizeof (gss_1964_mic_token_desc); - header.value = & tokbody; + header.length = sizeof(gss_1964_mic_token_desc); + header.value = &tokbody; /* Hash the data */ *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, NULL, 0, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } /* Make the token */ gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic); - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } uint32_t gss_krb5_3des_verify_mic(uint32_t *minor, - gss_ctx_id_t ctx, - gss_buffer_t mbp, - gss_buffer_t mic, - gss_qop_t *qop) + gss_ctx_id_t ctx, + gss_buffer_t mbp, + gss_buffer_t mic, + gss_qop_t *qop) { crypto_ctx_t cctx = &ctx->gss_cryptor; uint8_t hashval[cctx->digest_size]; @@ -1868,28 +1955,31 @@ gss_krb5_3des_verify_mic(uint32_t *minor, header.length = sizeof(gss_1964_mic_token_desc); header.value = &mtok; - if (qop) + if (qop) { *qop = GSS_C_QOP_DEFAULT; + } *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } *minor = krb5_mic(cctx, &header, mbp, NULL, hashval, &verf, 0, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } - return (verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG); + return verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG; } uint32_t gss_krb5_3des_get_mic_mbuf(uint32_t *minor, - gss_ctx_id_t ctx, - gss_qop_t qop __unused, - mbuf_t mbp, - size_t offset, - size_t len, - gss_buffer_t mic) + gss_ctx_id_t ctx, + gss_qop_t qop __unused, + mbuf_t mbp, + size_t offset, + size_t len, + gss_buffer_t mic) { gss_1964_mic_token_desc tokbody = mic_1964_token; crypto_ctx_t cctx = &ctx->gss_cryptor; @@ -1901,28 +1991,29 @@ gss_krb5_3des_get_mic_mbuf(uint32_t *minor, hash.value = hashval; tokbody.Sign_Alg[0] = 0x04; /* lctx->key_data.lucid_protocol_u.data_4121.sign_alg */ tokbody.Sign_Alg[1] = 0x00; - header.length = sizeof (gss_1964_mic_token_desc); + header.length = sizeof(gss_1964_mic_token_desc); header.value = &tokbody; /* Hash the data */ *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, NULL, 0, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } /* Make the token */ gss_krb5_3des_token_put(ctx, tokbody, &hash, 0, mic); - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } uint32_t gss_krb5_3des_verify_mic_mbuf(uint32_t *minor, - gss_ctx_id_t ctx, - mbuf_t mbp, - size_t offset, - size_t len, - gss_buffer_t mic, - gss_qop_t *qop) + gss_ctx_id_t ctx, + mbuf_t mbp, + size_t offset, + size_t len, + gss_buffer_t mic, + gss_qop_t *qop) { crypto_ctx_t cctx = &ctx->gss_cryptor; uint8_t hashval[cctx->digest_size]; @@ -1938,31 +2029,34 @@ gss_krb5_3des_verify_mic_mbuf(uint32_t *minor, header.length = sizeof(gss_1964_mic_token_desc); header.value = &mtok; - if (qop) + if (qop) { *qop = GSS_C_QOP_DEFAULT; + } *minor = gss_krb5_3des_token_get(ctx, mic, mtok, &hash, NULL, NULL, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } *minor = krb5_mic_mbuf(cctx, &header, mbp, offset, len, NULL, hashval, &verf, 0, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } - return (verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG); + return verf ? GSS_S_COMPLETE : GSS_S_BAD_SIG; } uint32_t gss_krb5_3des_wrap_mbuf(uint32_t *minor, - gss_ctx_id_t ctx, - int conf_flag, - gss_qop_t qop __unused, - mbuf_t *mbp, - size_t len, - int *conf_state) + gss_ctx_id_t ctx, + int conf_flag, + gss_qop_t qop __unused, + mbuf_t *mbp, + size_t len, + int *conf_state) { crypto_ctx_t cctx = &ctx->gss_cryptor; - const struct ccmode_cbc *ccmode = cctx->enc_mode; + const struct ccmode_cbc *ccmode = cctx->enc_mode; uint8_t padlen; uint8_t pad[8]; uint8_t confounder[ccmode->block_size]; @@ -1972,8 +2066,9 @@ gss_krb5_3des_wrap_mbuf(uint32_t *minor, gss_buffer_desc hash; uint8_t hashval[cctx->digest_size]; - if (conf_state) + if (conf_state) { *conf_state = conf_flag; + } hash.length = cctx->digest_size; hash.value = hashval; @@ -1982,54 +2077,59 @@ gss_krb5_3des_wrap_mbuf(uint32_t *minor, /* conf_flag ? lctx->key_data.lucid_protocol_u.data_1964.seal_alg : 0xffff */ tokbody.Seal_Alg[0] = conf_flag ? 0x02 : 0xff; tokbody.Seal_Alg[1] = conf_flag ? 0x00 : 0xff; - header.length = sizeof (gss_1964_wrap_token_desc); + header.length = sizeof(gss_1964_wrap_token_desc); header.value = &tokbody; /* Prepend confounder */ read_random(confounder, ccmode->block_size); *minor = gss_prepend_mbuf(mbp, confounder, ccmode->block_size); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } /* Append trailer of up to 8 bytes and set pad length in each trailer byte */ padlen = 8 - len % 8; - for (int i = 0; i < padlen; i++) + for (int i = 0; i < padlen; i++) { pad[i] = padlen; + } *minor = gss_append_mbuf(*mbp, pad, padlen); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } len += ccmode->block_size + padlen; /* Hash the data */ *minor = krb5_mic_mbuf(cctx, &header, *mbp, 0, len, NULL, hashval, NULL, 0, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } /* Make the token */ gss_krb5_3des_token_put(ctx, tokbody, &hash, len, &mic); if (conf_flag) { *minor = krb5_crypt_mbuf(cctx, mbp, len, 1, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } } *minor = gss_prepend_mbuf(mbp, mic.value, mic.length); - return (*minor ? GSS_S_FAILURE : GSS_S_COMPLETE); + return *minor ? GSS_S_FAILURE : GSS_S_COMPLETE; } uint32_t gss_krb5_3des_unwrap_mbuf(uint32_t *minor, - gss_ctx_id_t ctx, - mbuf_t *mbp, - size_t len, - int *conf_state, - gss_qop_t *qop) + gss_ctx_id_t ctx, + mbuf_t *mbp, + size_t len, + int *conf_state, + gss_qop_t *qop) { crypto_ctx_t cctx = &ctx->gss_cryptor; - const struct ccmode_cbc *ccmode = cctx->dec_mode; + const struct ccmode_cbc *ccmode = cctx->dec_mode; size_t length = 0, offset; gss_buffer_desc hash; uint8_t hashval[cctx->digest_size]; @@ -2045,16 +2145,18 @@ gss_krb5_3des_unwrap_mbuf(uint32_t *minor, if (len < GSS_KRB5_3DES_MAXTOKSZ) { *minor = EBADRPC; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - if (*qop == GSS_C_QOP_REVERSE) + if (*qop == GSS_C_QOP_REVERSE) { reverse = 1; + } *qop = GSS_C_QOP_DEFAULT; *minor = mbuf_copydata(*mbp, 0, itoken.length, itoken.value); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } hash.length = cctx->digest_size; hash.value = hashval; @@ -2065,31 +2167,36 @@ gss_krb5_3des_unwrap_mbuf(uint32_t *minor, for (cflag = 1; cflag >= 0; cflag--) { *minor = gss_krb5_3des_token_get(ctx, &itoken, wrap, &hash, &offset, &length, reverse); - if (*minor == 0) + if (*minor == 0) { break; + } wrap.Seal_Alg[0] = 0xff; wrap.Seal_Alg[0] = 0xff; } - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } - if (conf_state) + if (conf_state) { *conf_state = cflag; + } /* * Seperate off the header */ *minor = gss_normalize_mbuf(*mbp, offset, &length, &smb, &tmb, 0); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } assert(tmb == NULL); /* Decrypt the chain if needed */ if (cflag) { *minor = krb5_crypt_mbuf(cctx, &smb, length, 0, NULL); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } } /* Verify the mic */ @@ -2097,15 +2204,18 @@ gss_krb5_3des_unwrap_mbuf(uint32_t *minor, header.value = &wrap; *minor = krb5_mic_mbuf(cctx, &header, smb, 0, length, NULL, hashval, &verified, 0, 0); - if (!verified) - return (GSS_S_BAD_SIG); - if (*minor) - return (GSS_S_FAILURE); + if (!verified) { + return GSS_S_BAD_SIG; + } + if (*minor) { + return GSS_S_FAILURE; + } /* Get the pad bytes */ *minor = mbuf_copydata(smb, length - 1, 1, &padlen); - if (*minor) - return (GSS_S_FAILURE); + if (*minor) { + return GSS_S_FAILURE; + } /* Strip the confounder and trailing pad bytes */ gss_strip_mbuf(smb, -padlen); @@ -2116,7 +2226,7 @@ gss_krb5_3des_unwrap_mbuf(uint32_t *minor, *mbp = smb; } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } static const char * @@ -2124,13 +2234,13 @@ etype_name(etypes etype) { switch (etype) { case DES3_CBC_SHA1_KD: - return ("des3-cbc-sha1"); + return "des3-cbc-sha1"; case AES128_CTS_HMAC_SHA1_96: - return ("aes128-cts-hmac-sha1-96"); + return "aes128-cts-hmac-sha1-96"; case AES256_CTS_HMAC_SHA1_96: - return ("aes-cts-hmac-sha1-96"); + return "aes-cts-hmac-sha1-96"; default: - return ("unknown enctype"); + return "unknown enctype"; } } @@ -2139,13 +2249,13 @@ supported_etype(uint32_t proto, etypes etype) { const char *proto_name; - switch(proto) { + switch (proto) { case 0: /* RFC 1964 */ proto_name = "RFC 1964 krb5 gss mech"; switch (etype) { case DES3_CBC_SHA1_KD: - return (1); + return 1; default: break; } @@ -2156,7 +2266,7 @@ supported_etype(uint32_t proto, etypes etype) switch (etype) { case AES256_CTS_HMAC_SHA1_96: case AES128_CTS_HMAC_SHA1_96: - return (1); + return 1; default: break; } @@ -2166,199 +2276,214 @@ supported_etype(uint32_t proto, etypes etype) break; } printf("%s: Non supported encryption %s (%d) type for protocol %s (%d)\n", - __func__, etype_name(etype), etype, proto_name, proto); - return (0); + __func__, etype_name(etype), etype, proto_name, proto); + return 0; } /* * Kerberos gss mech entry points */ uint32_t -gss_krb5_get_mic(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - gss_qop_t qop, /* qop_req */ - gss_buffer_t mbp, /* message buffer */ - gss_buffer_t mic /* message_token */) +gss_krb5_get_mic(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + gss_qop_t qop, /* qop_req */ + gss_buffer_t mbp, /* message buffer */ + gss_buffer_t mic /* message_token */) { uint32_t minor_stat = 0; - if (minor == NULL) + if (minor == NULL) { minor = &minor_stat; + } *minor = 0; /* Validate context */ - if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) - return (GSS_S_NO_CONTEXT); + if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) { + return GSS_S_NO_CONTEXT; + } if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) { *minor = ENOTSUP; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - switch(ctx->gss_lucid_ctx.key_data.proto) { + switch (ctx->gss_lucid_ctx.key_data.proto) { case 0: /* RFC 1964 DES3 case */ - return (gss_krb5_3des_get_mic(minor, ctx, qop, mbp, mic)); + return gss_krb5_3des_get_mic(minor, ctx, qop, mbp, mic); case 1: /* RFC 4121 CFX case */ - return (gss_krb5_cfx_get_mic(minor, ctx, qop, mbp, mic)); + return gss_krb5_cfx_get_mic(minor, ctx, qop, mbp, mic); } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } uint32_t -gss_krb5_verify_mic(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - gss_buffer_t mbp, /* message_buffer */ - gss_buffer_t mic, /* message_token */ - gss_qop_t *qop /* qop_state */) +gss_krb5_verify_mic(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + gss_buffer_t mbp, /* message_buffer */ + gss_buffer_t mic, /* message_token */ + gss_qop_t *qop /* qop_state */) { uint32_t minor_stat = 0; gss_qop_t qop_val = GSS_C_QOP_DEFAULT; - if (minor == NULL) + if (minor == NULL) { minor = &minor_stat; - if (qop == NULL) + } + if (qop == NULL) { qop = &qop_val; + } *minor = 0; /* Validate context */ - if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) - return (GSS_S_NO_CONTEXT); + if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) { + return GSS_S_NO_CONTEXT; + } if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) { *minor = ENOTSUP; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - switch(ctx->gss_lucid_ctx.key_data.proto) { + switch (ctx->gss_lucid_ctx.key_data.proto) { case 0: /* RFC 1964 DES3 case */ - return (gss_krb5_3des_verify_mic(minor, ctx, mbp, mic, qop)); + return gss_krb5_3des_verify_mic(minor, ctx, mbp, mic, qop); case 1: /* RFC 4121 CFX case */ - return (gss_krb5_cfx_verify_mic(minor, ctx, mbp, mic, qop)); + return gss_krb5_cfx_verify_mic(minor, ctx, mbp, mic, qop); } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } uint32_t -gss_krb5_get_mic_mbuf(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - gss_qop_t qop, /* qop_req */ - mbuf_t mbp, /* message mbuf */ - size_t offset, /* offest */ - size_t len, /* length */ - gss_buffer_t mic /* message_token */) +gss_krb5_get_mic_mbuf(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + gss_qop_t qop, /* qop_req */ + mbuf_t mbp, /* message mbuf */ + size_t offset, /* offest */ + size_t len, /* length */ + gss_buffer_t mic /* message_token */) { uint32_t minor_stat = 0; - if (minor == NULL) + if (minor == NULL) { minor = &minor_stat; + } *minor = 0; - if (len == 0) + if (len == 0) { len = ~(size_t)0; + } /* Validate context */ - if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) - return (GSS_S_NO_CONTEXT); + if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) { + return GSS_S_NO_CONTEXT; + } if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) { *minor = ENOTSUP; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - switch(ctx->gss_lucid_ctx.key_data.proto) { + switch (ctx->gss_lucid_ctx.key_data.proto) { case 0: /* RFC 1964 DES3 case */ - return (gss_krb5_3des_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic)); + return gss_krb5_3des_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic); case 1: /* RFC 4121 CFX case */ - return (gss_krb5_cfx_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic)); + return gss_krb5_cfx_get_mic_mbuf(minor, ctx, qop, mbp, offset, len, mic); } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } uint32_t -gss_krb5_verify_mic_mbuf(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - mbuf_t mbp, /* message_buffer */ - size_t offset, /* offset */ - size_t len, /* length */ - gss_buffer_t mic, /* message_token */ - gss_qop_t *qop /* qop_state */) +gss_krb5_verify_mic_mbuf(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + mbuf_t mbp, /* message_buffer */ + size_t offset, /* offset */ + size_t len, /* length */ + gss_buffer_t mic, /* message_token */ + gss_qop_t *qop /* qop_state */) { uint32_t minor_stat = 0; gss_qop_t qop_val = GSS_C_QOP_DEFAULT; - if (minor == NULL) + if (minor == NULL) { minor = &minor_stat; - if (qop == NULL) + } + if (qop == NULL) { qop = &qop_val; + } *minor = 0; - if (len == 0) + if (len == 0) { len = ~(size_t)0; + } /* Validate context */ - if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) - return (GSS_S_NO_CONTEXT); + if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) { + return GSS_S_NO_CONTEXT; + } if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) { *minor = ENOTSUP; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } - switch(ctx->gss_lucid_ctx.key_data.proto) { + switch (ctx->gss_lucid_ctx.key_data.proto) { case 0: /* RFC 1964 DES3 case */ - return (gss_krb5_3des_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop)); + return gss_krb5_3des_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop); case 1: /* RFC 4121 CFX case */ - return (gss_krb5_cfx_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop)); + return gss_krb5_cfx_verify_mic_mbuf(minor, ctx, mbp, offset, len, mic, qop); } - return (GSS_S_COMPLETE); + return GSS_S_COMPLETE; } uint32_t -gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - int conf_flag, /* conf_req_flag */ - gss_qop_t qop, /* qop_req */ - mbuf_t *mbp, /* input/output message_buffer */ - size_t offset, /* offset */ - size_t len, /* length */ - int *conf_state /* conf state */) +gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + int conf_flag, /* conf_req_flag */ + gss_qop_t qop, /* qop_req */ + mbuf_t *mbp, /* input/output message_buffer */ + size_t offset, /* offset */ + size_t len, /* length */ + int *conf_state /* conf state */) { uint32_t major, minor_stat = 0; mbuf_t smb, tmb; int conf_val = 0; - if (minor == NULL) + if (minor == NULL) { minor = &minor_stat; - if (conf_state == NULL) + } + if (conf_state == NULL) { conf_state = &conf_val; + } *minor = 0; /* Validate context */ - if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) - return (GSS_S_NO_CONTEXT); + if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) { + return GSS_S_NO_CONTEXT; + } if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) { *minor = ENOTSUP; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0); - switch(ctx->gss_lucid_ctx.key_data.proto) { + switch (ctx->gss_lucid_ctx.key_data.proto) { case 0: /* RFC 1964 DES3 case */ major = gss_krb5_3des_wrap_mbuf(minor, ctx, conf_flag, qop, &smb, len, conf_state); @@ -2369,49 +2494,53 @@ gss_krb5_wrap_mbuf(uint32_t *minor, /* minor_status */ break; } - if (offset) + if (offset) { gss_join_mbuf(*mbp, smb, tmb); - else { + } else { *mbp = smb; gss_join_mbuf(smb, tmb, NULL); } - return (major); + return major; } uint32_t -gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */ - gss_ctx_id_t ctx, /* context_handle */ - mbuf_t *mbp, /* input/output message_buffer */ - size_t offset, /* offset */ - size_t len, /* length */ - int *conf_flag, /* conf_state */ - gss_qop_t *qop /* qop state */) +gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */ + gss_ctx_id_t ctx, /* context_handle */ + mbuf_t *mbp, /* input/output message_buffer */ + size_t offset, /* offset */ + size_t len, /* length */ + int *conf_flag, /* conf_state */ + gss_qop_t *qop /* qop state */) { uint32_t major, minor_stat = 0; gss_qop_t qop_val = GSS_C_QOP_DEFAULT; int conf_val = 0; mbuf_t smb, tmb; - if (minor == NULL) + if (minor == NULL) { minor = &minor_stat; - if (qop == NULL) + } + if (qop == NULL) { qop = &qop_val; - if (conf_flag == NULL) + } + if (conf_flag == NULL) { conf_flag = &conf_val; + } /* Validate context */ - if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) - return (GSS_S_NO_CONTEXT); + if (ctx == NULL || ((lucid_context_version_t)ctx)->version != 1) { + return GSS_S_NO_CONTEXT; + } if (!supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_cryptor.etype)) { *minor = ENOTSUP; - return (GSS_S_FAILURE); + return GSS_S_FAILURE; } gss_normalize_mbuf(*mbp, offset, &len, &smb, &tmb, 0); - switch(ctx->gss_lucid_ctx.key_data.proto) { + switch (ctx->gss_lucid_ctx.key_data.proto) { case 0: /* RFC 1964 DES3 case */ major = gss_krb5_3des_unwrap_mbuf(minor, ctx, &smb, len, conf_flag, qop); @@ -2422,14 +2551,14 @@ gss_krb5_unwrap_mbuf(uint32_t * minor, /* minor_status */ break; } - if (offset) + if (offset) { gss_join_mbuf(*mbp, smb, tmb); - else { + } else { *mbp = smb; gss_join_mbuf(smb, tmb, NULL); } - return (major); + return major; } #include @@ -2473,30 +2602,33 @@ xdr_lucid_context(void *data, size_t length, lucid_context_t lctx) printf("%s: Could not decode mech protocol\n", __func__); goto out; } - switch(lctx->key_data.proto) { + switch (lctx->key_data.proto) { case 0: xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.sign_alg); xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_1964.seal_alg); - if (error) + if (error) { printf("%s: Could not decode rfc1964 sign and seal\n", __func__); + } break; case 1: xb_get_32(error, &xb, lctx->key_data.lucid_protocol_u.data_4121.acceptor_subkey); - if (error) + if (error) { printf("%s: Could not decode rfc4121 acceptor_subkey", __func__); + } break; default: printf("%s: Invalid mech protocol %d\n", __func__, (int)lctx->key_data.proto); error = EINVAL; } - if (error) + if (error) { goto out; + } xb_get_32(error, &xb, lctx->ctx_key.etype); if (error) { printf("%s: Could not decode key enctype\n", __func__); goto out; } - switch(lctx->ctx_key.etype) { + switch (lctx->ctx_key.etype) { case DES3_CBC_SHA1_KD: keylen = 24; break; @@ -2518,7 +2650,7 @@ xdr_lucid_context(void *data, size_t length, lucid_context_t lctx) if (lctx->ctx_key.key.key_len != keylen) { error = EINVAL; printf("%s: etype = %d keylen = %d expected keylen = %d\n", __func__, - lctx->ctx_key.etype, lctx->ctx_key.key.key_len, keylen); + lctx->ctx_key.etype, lctx->ctx_key.key.key_len, keylen); goto out; } @@ -2534,7 +2666,7 @@ xdr_lucid_context(void *data, size_t length, lucid_context_t lctx) xb_free(lctx->ctx_key.key.key_val); } out: - return (error); + return error; } gss_ctx_id_t @@ -2542,32 +2674,34 @@ gss_krb5_make_context(void *data, uint32_t datalen) { gss_ctx_id_t ctx; - if (!corecrypto_available()) - return (NULL); + if (!corecrypto_available()) { + return NULL; + } gss_krb5_mech_init(); - MALLOC(ctx, gss_ctx_id_t, sizeof (struct gss_ctx_id_desc), M_TEMP, M_WAITOK | M_ZERO); + MALLOC(ctx, gss_ctx_id_t, sizeof(struct gss_ctx_id_desc), M_TEMP, M_WAITOK | M_ZERO); if (xdr_lucid_context(data, datalen, &ctx->gss_lucid_ctx) || !supported_etype(ctx->gss_lucid_ctx.key_data.proto, ctx->gss_lucid_ctx.ctx_key.etype)) { FREE(ctx, M_TEMP); FREE(data, M_TEMP); - return (NULL); + return NULL; } /* Set up crypto context */ gss_crypto_ctx_init(&ctx->gss_cryptor, &ctx->gss_lucid_ctx); FREE(data, M_TEMP); - return (ctx); + return ctx; } void gss_krb5_destroy_context(gss_ctx_id_t ctx) { - if (ctx == NULL) + if (ctx == NULL) { return; + } gss_crypto_ctx_free(&ctx->gss_cryptor); FREE(ctx->gss_lucid_ctx.ctx_key.key.key_val, M_TEMP); - cc_clear(sizeof (lucid_context_t), &ctx->gss_lucid_ctx); + cc_clear(sizeof(lucid_context_t), &ctx->gss_lucid_ctx); FREE(ctx, M_TEMP); } diff --git a/bsd/nfs/gss/gss_krb5_mech.h b/bsd/nfs/gss/gss_krb5_mech.h index 01386b6da..b900347ce 100644 --- a/bsd/nfs/gss/gss_krb5_mech.h +++ b/bsd/nfs/gss/gss_krb5_mech.h @@ -60,7 +60,7 @@ typedef uint32_t OM_uint32; -#define GSS_S_COMPLETE 0 +#define GSS_S_COMPLETE 0 /* * Some "helper" definitions to make the status code macros obvious. @@ -86,17 +86,17 @@ typedef uint32_t OM_uint32; ((x) & (GSS_C_SUPPLEMENTARY_MASK << GSS_C_SUPPLEMENTARY_OFFSET)) #define GSS_ERROR(x) \ ((x) & ((GSS_C_CALLING_ERROR_MASK << GSS_C_CALLING_ERROR_OFFSET) | \ - (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))) + (GSS_C_ROUTINE_ERROR_MASK << GSS_C_ROUTINE_ERROR_OFFSET))) /* * Calling errors: */ #define GSS_S_CALL_INACCESSIBLE_READ \ - (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET) + (((OM_uint32) 1ul) << GSS_C_CALLING_ERROR_OFFSET) #define GSS_S_CALL_INACCESSIBLE_WRITE \ - (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET) + (((OM_uint32) 2ul) << GSS_C_CALLING_ERROR_OFFSET) #define GSS_S_CALL_BAD_STRUCTURE \ - (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET) + (((OM_uint32) 3ul) << GSS_C_CALLING_ERROR_OFFSET) /* * Routine errors: @@ -191,7 +191,7 @@ typedef struct lucid_context *lucid_context_t; * See example below for usage. */ typedef struct lucid_context_version { - uint32_t version; + uint32_t version; /* Structure version number */ } *lucid_context_version_t; @@ -201,15 +201,15 @@ typedef enum etypes { AES256_CTS_HMAC_SHA1_96 = 18, } etypes; -#define KRB5_USAGE_ACCEPTOR_SEAL 22 -#define KRB5_USAGE_ACCEPTOR_SIGN 23 -#define KRB5_USAGE_INITIATOR_SEAL 24 -#define KRB5_USAGE_INITIATOR_SIGN 25 +#define KRB5_USAGE_ACCEPTOR_SEAL 22 +#define KRB5_USAGE_ACCEPTOR_SIGN 23 +#define KRB5_USAGE_INITIATOR_SEAL 24 +#define KRB5_USAGE_INITIATOR_SIGN 25 #define KRB5_USAGE_LEN 5 #define GSS_SND 0 #define GSS_RCV 1 -#define GSS_C_QOP_REVERSE 0x80000000 /* Pseudo QOP value to use as input to gss_krb5_unwrap to allow Sender to unwrap */ +#define GSS_C_QOP_REVERSE 0x80000000 /* Pseudo QOP value to use as input to gss_krb5_unwrap to allow Sender to unwrap */ /* * Key schedule is the cbc state for encryption and decryption. @@ -219,7 +219,7 @@ typedef enum etypes { struct key_schedule { cccbc_ctx *enc; cccbc_ctx *dec; - void *ikey[2]; /* Drived integrity key (same length context key); */ + void *ikey[2]; /* Drived integrity key (same length context key); */ }; /* @@ -245,11 +245,11 @@ typedef struct crypto_ctx { const struct ccmode_cbc *dec_mode; struct key_schedule ks; uint32_t digest_size; - void *ckey[2]; /* Derived checksum key. Same as key for DES3 */ + void *ckey[2]; /* Derived checksum key. Same as key for DES3 */ } *crypto_ctx_t; -#define CRYPTO_KS_ALLOCED 0x00001 -#define CRYPTO_CTS_ENABLE 0x00002 +#define CRYPTO_KS_ALLOCED 0x00001 +#define CRYPTO_CTS_ENABLE 0x00002 typedef struct gss_ctx_id_desc { lucid_context gss_lucid_ctx; @@ -262,8 +262,8 @@ typedef struct gss_buffer_desc_struct { } gss_buffer_desc, *gss_buffer_t; uint32_t -gss_release_buffer(uint32_t *, /* minor_status */ - gss_buffer_t); + gss_release_buffer(uint32_t *, /* minor_status */ + gss_buffer_t); /* Per message interfaces for kerberos gss mech in the kernel */ @@ -271,61 +271,61 @@ gss_release_buffer(uint32_t *, /* minor_status */ typedef uint32_t gss_qop_t; uint32_t -gss_krb5_get_mic_mbuf(uint32_t *, /* minor_status */ - gss_ctx_id_t, /* context_handle */ - gss_qop_t, /* qop_req */ - mbuf_t, /* message mbuf */ - size_t, /* offest */ - size_t, /* length */ - gss_buffer_t /* message_token */ - ); + gss_krb5_get_mic_mbuf(uint32_t *, /* minor_status */ + gss_ctx_id_t, /* context_handle */ + gss_qop_t, /* qop_req */ + mbuf_t, /* message mbuf */ + size_t, /* offest */ + size_t, /* length */ + gss_buffer_t /* message_token */ + ); uint32_t -gss_krb5_get_mic(uint32_t *, /* minor_status */ - gss_ctx_id_t, /* context_handle */ - gss_qop_t, /* qop_req */ - gss_buffer_t, /* message buffer */ - gss_buffer_t /* message_token */ - ); + gss_krb5_get_mic(uint32_t *, /* minor_status */ + gss_ctx_id_t, /* context_handle */ + gss_qop_t, /* qop_req */ + gss_buffer_t, /* message buffer */ + gss_buffer_t /* message_token */ + ); uint32_t -gss_krb5_verify_mic(uint32_t *, /* minor_status */ - gss_ctx_id_t, /* context_handle */ - gss_buffer_t, /* message_buffer */ - gss_buffer_t, /* message_token */ - gss_qop_t * /* qop_state */ - ); + gss_krb5_verify_mic(uint32_t *, /* minor_status */ + gss_ctx_id_t, /* context_handle */ + gss_buffer_t, /* message_buffer */ + gss_buffer_t, /* message_token */ + gss_qop_t * /* qop_state */ + ); uint32_t -gss_krb5_verify_mic_mbuf(uint32_t *, /* minor_status */ - gss_ctx_id_t, /* context_handle */ - mbuf_t, /* message_buffer */ - size_t, /* offset */ - size_t, /* length */ - gss_buffer_t, /* message_token */ - gss_qop_t * /* qop_state */ - ); + gss_krb5_verify_mic_mbuf(uint32_t *, /* minor_status */ + gss_ctx_id_t, /* context_handle */ + mbuf_t, /* message_buffer */ + size_t, /* offset */ + size_t, /* length */ + gss_buffer_t, /* message_token */ + gss_qop_t * /* qop_state */ + ); uint32_t -gss_krb5_wrap_mbuf(uint32_t *, /* minor_status */ - gss_ctx_id_t, /* context_handle */ - int, /* conf_req_flag */ - gss_qop_t, /* qop_req */ - mbuf_t *, /* input/output message_buffer */ - size_t, /* offset */ - size_t, /* length */ - int * /* conf_state */ - ); + gss_krb5_wrap_mbuf(uint32_t *, /* minor_status */ + gss_ctx_id_t, /* context_handle */ + int, /* conf_req_flag */ + gss_qop_t, /* qop_req */ + mbuf_t *, /* input/output message_buffer */ + size_t, /* offset */ + size_t, /* length */ + int * /* conf_state */ + ); uint32_t -gss_krb5_unwrap_mbuf(uint32_t *, /* minor_status */ - gss_ctx_id_t, /* context_handle */ - mbuf_t *, /* input/output message_buffer */ - size_t, /* offset */ - size_t, /* length */ - int *, /* conf_state */ - gss_qop_t * /* qop state */ - ); + gss_krb5_unwrap_mbuf(uint32_t *, /* minor_status */ + gss_ctx_id_t, /* context_handle */ + mbuf_t *, /* input/output message_buffer */ + size_t, /* offset */ + size_t, /* length */ + int *, /* conf_state */ + gss_qop_t * /* qop state */ + ); void gss_krb5_destroy_context(gss_ctx_id_t); diff --git a/bsd/nfs/krpc.h b/bsd/nfs/krpc.h index 5f3b87677..58c9d1dc7 100644 --- a/bsd/nfs/krpc.h +++ b/bsd/nfs/krpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -36,52 +36,52 @@ #ifdef __APPLE_API_PRIVATE int krpc_call(struct sockaddr_in *sin, u_int sotype, - u_int prog, u_int vers, u_int func, - mbuf_t *data, struct sockaddr_in *from); + u_int prog, u_int vers, u_int func, + mbuf_t *data, struct sockaddr_in *from); int krpc_portmap(struct sockaddr_in *sin, - u_int prog, u_int vers, u_int proto, u_int16_t *portp); + u_int prog, u_int vers, u_int proto, u_int16_t *portp); /* * RPC definitions for the portmapper (portmap and rpcbind) */ -#define PMAPPORT 111 -#define PMAPPROG 100000 -#define PMAPVERS 2 -#define PMAPPROC_NULL 0 -#define PMAPPROC_SET 1 -#define PMAPPROC_UNSET 2 -#define PMAPPROC_GETPORT 3 -#define PMAPPROC_DUMP 4 -#define PMAPPROC_CALLIT 5 +#define PMAPPORT 111 +#define PMAPPROG 100000 +#define PMAPVERS 2 +#define PMAPPROC_NULL 0 +#define PMAPPROC_SET 1 +#define PMAPPROC_UNSET 2 +#define PMAPPROC_GETPORT 3 +#define PMAPPROC_DUMP 4 +#define PMAPPROC_CALLIT 5 -#define RPCBPROG PMAPPROG -#define RPCBVERS3 3 -#define RPCBVERS4 4 -#define RPCBPROC_NULL 0 -#define RPCBPROC_SET 1 -#define RPCBPROC_UNSET 2 -#define RPCBPROC_GETADDR 3 -#define RPCBPROC_DUMP 4 -#define RPCBPROC_CALLIT 5 -#define RPCBPROC_BCAST RPCBPROC_CALLIT -#define RPCBPROC_GETTIME 6 -#define RPCBPROC_UADDR2TADDR 7 -#define RPCBPROC_TADDR2UADDR 8 -#define RPCBPROC_GETVERSADDR 9 -#define RPCBPROC_INDIRECT 10 -#define RPCBPROC_GETADDRLIST 11 -#define RPCBPROC_GETSTAT 12 +#define RPCBPROG PMAPPROG +#define RPCBVERS3 3 +#define RPCBVERS4 4 +#define RPCBPROC_NULL 0 +#define RPCBPROC_SET 1 +#define RPCBPROC_UNSET 2 +#define RPCBPROC_GETADDR 3 +#define RPCBPROC_DUMP 4 +#define RPCBPROC_CALLIT 5 +#define RPCBPROC_BCAST RPCBPROC_CALLIT +#define RPCBPROC_GETTIME 6 +#define RPCBPROC_UADDR2TADDR 7 +#define RPCBPROC_TADDR2UADDR 8 +#define RPCBPROC_GETVERSADDR 9 +#define RPCBPROC_INDIRECT 10 +#define RPCBPROC_GETADDRLIST 11 +#define RPCBPROC_GETSTAT 12 /* * RPC definitions for bootparamd */ -#define BOOTPARAM_PROG 100026 -#define BOOTPARAM_VERS 1 -#define BOOTPARAM_WHOAMI 1 -#define BOOTPARAM_GETFILE 2 +#define BOOTPARAM_PROG 100026 +#define BOOTPARAM_VERS 1 +#define BOOTPARAM_WHOAMI 1 +#define BOOTPARAM_GETFILE 2 #endif /* __APPLE_API_PRIVATE */ #endif /* __NFS_KRPC_H__ */ diff --git a/bsd/nfs/krpc_subr.c b/bsd/nfs/krpc_subr.c index 7ae7758e0..50e547fb1 100644 --- a/bsd/nfs/krpc_subr.c +++ b/bsd/nfs/krpc_subr.c @@ -87,7 +87,7 @@ * Kernel support for Sun RPC * * Used currently for bootstrapping in nfs diskless configurations. - * + * * Note: will not work on variable-sized rpc args/results. * implicit size-limit of an mbuf. */ @@ -97,36 +97,36 @@ */ struct auth_info { - u_int32_t rp_atype; /* auth type */ - u_int32_t rp_alen; /* auth length */ + u_int32_t rp_atype; /* auth type */ + u_int32_t rp_alen; /* auth length */ }; struct rpc_call { - u_int32_t rp_xid; /* request transaction id */ - int32_t rp_direction; /* call direction (0) */ - u_int32_t rp_rpcvers; /* rpc version (2) */ - u_int32_t rp_prog; /* program */ - u_int32_t rp_vers; /* version */ - u_int32_t rp_proc; /* procedure */ - struct auth_info rp_auth; - struct auth_info rp_verf; + u_int32_t rp_xid; /* request transaction id */ + int32_t rp_direction; /* call direction (0) */ + u_int32_t rp_rpcvers; /* rpc version (2) */ + u_int32_t rp_prog; /* program */ + u_int32_t rp_vers; /* version */ + u_int32_t rp_proc; /* procedure */ + struct auth_info rp_auth; + struct auth_info rp_verf; }; struct rpc_reply { - u_int32_t rp_xid; /* request transaction id */ - int32_t rp_direction; /* call direction (1) */ - int32_t rp_astatus; /* accept status (0: accepted) */ + u_int32_t rp_xid; /* request transaction id */ + int32_t rp_direction; /* call direction (1) */ + int32_t rp_astatus; /* accept status (0: accepted) */ union { u_int32_t rpu_errno; struct { struct auth_info rp_auth; - u_int32_t rp_rstatus; + u_int32_t rp_rstatus; } rpu_ok; } rp_u; }; -#define MIN_REPLY_HDR 16 /* xid, dir, astat, errno */ -#define REPLY_SIZE 24 /* xid, dir, astat, rpu_ok */ +#define MIN_REPLY_HDR 16 /* xid, dir, astat, errno */ +#define REPLY_SIZE 24 /* xid, dir, astat, rpu_ok */ /* * What is the longest we will wait before re-sending a request? @@ -134,7 +134,7 @@ struct rpc_reply { * The re-send loop count sup linearly to this maximum, so the * first complaint will happen after (1+2+3+4+5)=15 seconds. */ -#define MAX_RESEND_DELAY 5 /* seconds */ +#define MAX_RESEND_DELAY 5 /* seconds */ /* copied over from nfs_boot.c for printf format. could put in .h file... */ #define IP_FORMAT "%d.%d.%d.%d" @@ -148,15 +148,15 @@ struct rpc_reply { */ int krpc_portmap( - struct sockaddr_in *sin, /* server address */ - u_int prog, u_int vers, u_int proto, /* host order */ - u_int16_t *portp) /* network order */ + struct sockaddr_in *sin, /* server address */ + u_int prog, u_int vers, u_int proto, /* host order */ + u_int16_t *portp) /* network order */ { struct sdata { - u_int32_t prog; /* call program */ - u_int32_t vers; /* call version */ - u_int32_t proto; /* call protocol */ - u_int32_t port; /* call port (unused) */ + u_int32_t prog; /* call program */ + u_int32_t vers; /* call version */ + u_int32_t proto; /* call protocol */ + u_int32_t port; /* call port (unused) */ } *sdata; struct rdata { u_int16_t pad; @@ -172,8 +172,9 @@ krpc_portmap( } error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &m); - if (error) + if (error) { return error; + } mbuf_setlen(m, sizeof(*sdata)); mbuf_pkthdr_setlen(m, sizeof(*sdata)); sdata = mbuf_data(m); @@ -186,8 +187,9 @@ krpc_portmap( sin->sin_port = htons(PMAPPORT); error = krpc_call(sin, SOCK_DGRAM, PMAPPROG, PMAPVERS, PMAPPROC_GETPORT, &m, NULL); - if (error) + if (error) { return error; + } rdata = mbuf_data(m); @@ -195,11 +197,12 @@ krpc_portmap( *portp = rdata->port; } - if (mbuf_len(m) < sizeof(*rdata) || !rdata->port) + if (mbuf_len(m) < sizeof(*rdata) || !rdata->port) { error = EPROGUNAVAIL; + } mbuf_freem(m); - return (error); + return error; } /* @@ -211,8 +214,8 @@ int krpc_call( struct sockaddr_in *sa, u_int sotype, u_int prog, u_int vers, u_int func, - mbuf_t *data, /* input/output */ - struct sockaddr_in *from_p) /* output */ + mbuf_t *data, /* input/output */ + struct sockaddr_in *from_p) /* output */ { socket_t so; struct sockaddr_in *sin; @@ -223,14 +226,15 @@ krpc_call( size_t len; static u_int32_t xid = ~0xFF; u_int16_t tport; - size_t maxpacket = 1<<16; + size_t maxpacket = 1 << 16; /* * Validate address family. * Sorry, this is INET specific... */ - if (sa->sin_family != AF_INET) - return (EAFNOSUPPORT); + if (sa->sin_family != AF_INET) { + return EAFNOSUPPORT; + } /* Free at end if not null. */ nam = mhead = NULL; @@ -238,8 +242,9 @@ krpc_call( /* * Create socket and set its recieve timeout. */ - if ((error = sock_socket(AF_INET, sotype, 0, 0, 0, &so))) + if ((error = sock_socket(AF_INET, sotype, 0, 0, 0, &so))) { goto out1; + } { struct timeval tv; @@ -247,9 +252,9 @@ krpc_call( tv.tv_sec = 1; tv.tv_usec = 0; - if ((error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) - goto out; - + if ((error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) { + goto out; + } } /* @@ -258,8 +263,9 @@ krpc_call( if (from_p && (sotype == SOCK_DGRAM)) { int on = 1; - if ((error = sock_setsockopt(so, SOL_SOCKET, SO_BROADCAST, &on, sizeof(on)))) + if ((error = sock_setsockopt(so, SOL_SOCKET, SO_BROADCAST, &on, sizeof(on)))) { goto out; + } } /* @@ -267,8 +273,9 @@ krpc_call( * because some NFS servers refuse requests from * non-reserved (non-privileged) ports. */ - if ((error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &m))) + if ((error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &m))) { goto out; + } sin = mbuf_data(m); bzero(sin, sizeof(*sin)); mbuf_setlen(m, sizeof(*sin)); @@ -281,7 +288,7 @@ krpc_call( sin->sin_port = htons(tport); error = sock_bind(so, (struct sockaddr*)sin); } while (error == EADDRINUSE && - tport > IPPORT_RESERVED / 2); + tport > IPPORT_RESERVED / 2); mbuf_freem(m); m = NULL; if (error) { @@ -292,8 +299,9 @@ krpc_call( /* * Setup socket address for the server. */ - if ((error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &nam))) + if ((error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_SONAME, &nam))) { goto out; + } sin = mbuf_data(nam); mbuf_setlen(nam, sa->sin_len); bcopy((caddr_t)sa, (caddr_t)sin, sa->sin_len); @@ -303,12 +311,14 @@ krpc_call( tv.tv_sec = 60; tv.tv_usec = 0; error = sock_connect(so, mbuf_data(nam), MSG_DONTWAIT); - if (error && (error != EINPROGRESS)) + if (error && (error != EINPROGRESS)) { goto out; + } error = sock_connectwait(so, &tv); if (error) { - if (error == EINPROGRESS) + if (error == EINPROGRESS) { error = ETIMEDOUT; + } printf("krpc_call: error waiting for TCP socket connect: %d\n", error); goto out; } @@ -319,20 +329,25 @@ krpc_call( */ m = *data; *data = NULL; -#if DIAGNOSTIC - if ((mbuf_flags(m) & MBUF_PKTHDR) == 0) +#if DIAGNOSTIC + if ((mbuf_flags(m) & MBUF_PKTHDR) == 0) { panic("krpc_call: send data w/o pkthdr"); - if (mbuf_pkthdr_len(m) < mbuf_len(m)) + } + if (mbuf_pkthdr_len(m) < mbuf_len(m)) { panic("krpc_call: pkthdr.len not set"); + } #endif len = sizeof(*call); - if (sotype == SOCK_STREAM) + if (sotype == SOCK_STREAM) { len += 4; /* account for RPC record marker */ + } mhead = m; - if ((error = mbuf_prepend(&mhead, len, MBUF_WAITOK))) + if ((error = mbuf_prepend(&mhead, len, MBUF_WAITOK))) { goto out; - if ((error = mbuf_pkthdr_setrcvif(mhead, NULL))) + } + if ((error = mbuf_pkthdr_setrcvif(mhead, NULL))) { goto out; + } /* * Fill in the RPC header @@ -364,10 +379,11 @@ krpc_call( timo = 0; for (;;) { struct msghdr msg; - + /* Send RPC request (or re-send). */ - if ((error = mbuf_copym(mhead, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) + if ((error = mbuf_copym(mhead, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) { goto out; + } bzero(&msg, sizeof(msg)); if (sotype == SOCK_STREAM) { msg.msg_name = NULL; @@ -384,11 +400,12 @@ krpc_call( m = NULL; /* Determine new timeout. */ - if (timo < MAX_RESEND_DELAY) + if (timo < MAX_RESEND_DELAY) { timo++; - else - printf("RPC timeout for server " IP_FORMAT "\n", - IP_LIST(&(sin->sin_addr.s_addr))); + } else { + printf("RPC timeout for server " IP_FORMAT "\n", + IP_LIST(&(sin->sin_addr.s_addr))); + } /* * Wait for up to timo seconds for a reply. @@ -397,7 +414,7 @@ krpc_call( secs = timo; while (secs > 0) { size_t readlen; - + if (m) { mbuf_freem(m); m = NULL; @@ -411,40 +428,43 @@ krpc_call( msg.msg_iov = &aio; msg.msg_iovlen = 1; do { - error = sock_receive(so, &msg, MSG_WAITALL, &readlen); - if ((error == EWOULDBLOCK) && (--maxretries <= 0)) - error = ETIMEDOUT; + error = sock_receive(so, &msg, MSG_WAITALL, &readlen); + if ((error == EWOULDBLOCK) && (--maxretries <= 0)) { + error = ETIMEDOUT; + } } while (error == EWOULDBLOCK); if (!error && readlen < aio.iov_len) { - /* only log a message if we got a partial word */ - if (readlen != 0) - printf("short receive (%ld/%ld) from server " IP_FORMAT "\n", - readlen, sizeof(u_int32_t), IP_LIST(&(sin->sin_addr.s_addr))); - error = EPIPE; + /* only log a message if we got a partial word */ + if (readlen != 0) { + printf("short receive (%ld/%ld) from server " IP_FORMAT "\n", + readlen, sizeof(u_int32_t), IP_LIST(&(sin->sin_addr.s_addr))); + } + error = EPIPE; } - if (error) + if (error) { goto out; + } len = ntohl(len) & ~0x80000000; /* * This is SERIOUS! We are out of sync with the sender * and forcing a disconnect/reconnect is all I can do. */ if (len > maxpacket) { - printf("impossible packet length (%ld) from server " IP_FORMAT "\n", - len, IP_LIST(&(sin->sin_addr.s_addr))); - error = EFBIG; - goto out; + printf("impossible packet length (%ld) from server " IP_FORMAT "\n", + len, IP_LIST(&(sin->sin_addr.s_addr))); + error = EFBIG; + goto out; } - + do { - readlen = len; - error = sock_receivembuf(so, NULL, &m, MSG_WAITALL, &readlen); + readlen = len; + error = sock_receivembuf(so, NULL, &m, MSG_WAITALL, &readlen); } while (error == EWOULDBLOCK); if (!error && (len > readlen)) { - printf("short receive (%ld/%ld) from server " IP_FORMAT "\n", - readlen, len, IP_LIST(&(sin->sin_addr.s_addr))); - error = EPIPE; + printf("short receive (%ld/%ld) from server " IP_FORMAT "\n", + readlen, len, IP_LIST(&(sin->sin_addr.s_addr))); + error = EPIPE; } } else { len = maxpacket; @@ -459,24 +479,29 @@ krpc_call( secs--; continue; } - if (error) + if (error) { goto out; + } len = readlen; /* Does the reply contain at least a header? */ - if (len < MIN_REPLY_HDR) + if (len < MIN_REPLY_HDR) { continue; - if (mbuf_len(m) < MIN_REPLY_HDR) + } + if (mbuf_len(m) < MIN_REPLY_HDR) { continue; + } reply = mbuf_data(m); /* Is it the right reply? */ - if (reply->rp_direction != htonl(RPC_REPLY)) + if (reply->rp_direction != htonl(RPC_REPLY)) { continue; + } - if (reply->rp_xid != htonl(xid)) + if (reply->rp_xid != htonl(xid)) { continue; - + } + /* Was RPC accepted? (authorization OK) */ if (reply->rp_astatus != 0) { error = ntohl(reply->rp_u.rpu_errno); @@ -496,8 +521,7 @@ krpc_call( if (mbuf_len(m) < REPLY_SIZE) { error = RPC_SYSTEM_ERR; - } - else { + } else { error = ntohl(reply->rp_u.rpu_ok.rp_rstatus); } @@ -525,15 +549,14 @@ krpc_call( goto out; } - goto gotreply; /* break two levels */ - + goto gotreply; /* break two levels */ } /* while secs */ } /* forever send/receive */ error = ETIMEDOUT; goto out; - gotreply: +gotreply: /* * Pull as much as we can into first mbuf, to make @@ -542,16 +565,19 @@ krpc_call( * XXX - Should not rely on making the entire reply * contiguous (fix callers instead). -gwr */ -#if DIAGNOSTIC - if ((mbuf_flags(m) & MBUF_PKTHDR) == 0) +#if DIAGNOSTIC + if ((mbuf_flags(m) & MBUF_PKTHDR) == 0) { panic("krpc_call: received pkt w/o header?"); + } #endif len = mbuf_pkthdr_len(m); - if (sotype == SOCK_STREAM) + if (sotype == SOCK_STREAM) { len -= 4; /* the RPC record marker was read separately */ + } if (mbuf_len(m) < len) { - if ((error = mbuf_pullup(&m, len))) + if ((error = mbuf_pullup(&m, len))) { goto out; + } reply = mbuf_data(m); } @@ -567,10 +593,14 @@ krpc_call( /* result */ *data = m; - out: +out: sock_close(so); out1: - if (nam) mbuf_freem(nam); - if (mhead) mbuf_freem(mhead); + if (nam) { + mbuf_freem(nam); + } + if (mhead) { + mbuf_freem(mhead); + } return error; } diff --git a/bsd/nfs/nfs.h b/bsd/nfs/nfs.h index 2277221c4..cc09ff674 100644 --- a/bsd/nfs/nfs.h +++ b/bsd/nfs/nfs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -76,49 +76,49 @@ * Tunable constants for nfs */ -#define NFS_TICKINTVL 5 /* Desired time for a tick (msec) */ -#define NFS_HZ (hz / nfs_ticks) /* Ticks/sec */ +#define NFS_TICKINTVL 5 /* Desired time for a tick (msec) */ +#define NFS_HZ (hz / nfs_ticks) /* Ticks/sec */ extern int nfs_ticks; -#define NFS_TIMEO (1 * NFS_HZ) /* Default timeout = 1 second */ -#define NFS_MINTIMEO (1 * NFS_HZ) /* Min timeout to use */ -#define NFS_MAXTIMEO (60 * NFS_HZ) /* Max timeout to backoff to */ -#define NFS_MINIDEMTIMEO (5 * NFS_HZ) /* Min timeout for non-idempotent ops*/ -#define NFS_MAXREXMIT 100 /* Stop counting after this many */ -#define NFS_RETRANS 10 /* Num of retrans for soft mounts */ -#define NFS_TRYLATERDEL 4 /* Initial try later delay (sec) */ -#define NFS_MAXGRPS 16U /* Max. size of groups list */ -#define NFS_MINATTRTIMO 5 /* Attribute cache timeout in sec */ -#define NFS_MAXATTRTIMO 60 -#define NFS_MINDIRATTRTIMO 5 /* directory attribute cache timeout in sec */ -#define NFS_MAXDIRATTRTIMO 60 -#define NFS_IOSIZE (1024 * 1024) /* suggested I/O size */ -#define NFS_RWSIZE 32768 /* Def. read/write data size <= 32K */ -#define NFS_WSIZE NFS_RWSIZE /* Def. write data size <= 32K */ -#define NFS_RSIZE NFS_RWSIZE /* Def. read data size <= 32K */ -#define NFS_DGRAM_WSIZE 8192 /* UDP Def. write data size <= 8K */ -#define NFS_DGRAM_RSIZE 8192 /* UDP Def. read data size <= 8K */ -#define NFS_READDIRSIZE 32768 /* Def. readdir size */ -#define NFS_DEFRAHEAD 16 /* Def. read ahead # blocks */ -#define NFS_MAXRAHEAD 128 /* Max. read ahead # blocks */ -#define NFS_DEFMAXASYNCWRITES 128 /* Def. max # concurrent async write RPCs */ -#define NFS_DEFASYNCTHREAD 16 /* Def. # nfsiod threads */ -#define NFS_MAXASYNCTHREAD 64 /* max # nfsiod threads */ -#define NFS_ASYNCTHREADMAXIDLE 60 /* Seconds before idle nfsiods are reaped */ -#define NFS_DEFSTATFSRATELIMIT 10 /* Def. max # statfs RPCs per second */ -#define NFS_REQUESTDELAY 10 /* ms interval to check request queue */ -#define NFSRV_MAXWGATHERDELAY 100 /* Max. write gather delay (msec) */ +#define NFS_TIMEO (1 * NFS_HZ) /* Default timeout = 1 second */ +#define NFS_MINTIMEO (1 * NFS_HZ) /* Min timeout to use */ +#define NFS_MAXTIMEO (60 * NFS_HZ) /* Max timeout to backoff to */ +#define NFS_MINIDEMTIMEO (5 * NFS_HZ) /* Min timeout for non-idempotent ops*/ +#define NFS_MAXREXMIT 100 /* Stop counting after this many */ +#define NFS_RETRANS 10 /* Num of retrans for soft mounts */ +#define NFS_TRYLATERDEL 4 /* Initial try later delay (sec) */ +#define NFS_MAXGRPS 16U /* Max. size of groups list */ +#define NFS_MINATTRTIMO 5 /* Attribute cache timeout in sec */ +#define NFS_MAXATTRTIMO 60 +#define NFS_MINDIRATTRTIMO 5 /* directory attribute cache timeout in sec */ +#define NFS_MAXDIRATTRTIMO 60 +#define NFS_IOSIZE (1024 * 1024) /* suggested I/O size */ +#define NFS_RWSIZE 32768 /* Def. read/write data size <= 32K */ +#define NFS_WSIZE NFS_RWSIZE /* Def. write data size <= 32K */ +#define NFS_RSIZE NFS_RWSIZE /* Def. read data size <= 32K */ +#define NFS_DGRAM_WSIZE 8192 /* UDP Def. write data size <= 8K */ +#define NFS_DGRAM_RSIZE 8192 /* UDP Def. read data size <= 8K */ +#define NFS_READDIRSIZE 32768 /* Def. readdir size */ +#define NFS_DEFRAHEAD 16 /* Def. read ahead # blocks */ +#define NFS_MAXRAHEAD 128 /* Max. read ahead # blocks */ +#define NFS_DEFMAXASYNCWRITES 128 /* Def. max # concurrent async write RPCs */ +#define NFS_DEFASYNCTHREAD 16 /* Def. # nfsiod threads */ +#define NFS_MAXASYNCTHREAD 64 /* max # nfsiod threads */ +#define NFS_ASYNCTHREADMAXIDLE 60 /* Seconds before idle nfsiods are reaped */ +#define NFS_DEFSTATFSRATELIMIT 10 /* Def. max # statfs RPCs per second */ +#define NFS_REQUESTDELAY 10 /* ms interval to check request queue */ +#define NFSRV_MAXWGATHERDELAY 100 /* Max. write gather delay (msec) */ #ifndef NFSRV_WGATHERDELAY -#define NFSRV_WGATHERDELAY 1 /* Default write gather delay (msec) */ +#define NFSRV_WGATHERDELAY 1 /* Default write gather delay (msec) */ #endif -#define NFS_DIRBLKSIZ 4096 /* size of NFS directory buffers */ +#define NFS_DIRBLKSIZ 4096 /* size of NFS directory buffers */ #if defined(KERNEL) && !defined(DIRBLKSIZ) -#define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ - /* can't be larger than NFS_FABLKSIZE */ +#define DIRBLKSIZ 512 /* XXX we used to use ufs's DIRBLKSIZ */ + /* can't be larger than NFS_FABLKSIZE */ #endif /* default values for unresponsive mount timeouts */ -#define NFS_TPRINTF_INITIAL_DELAY 12 -#define NFS_TPRINTF_DELAY 30 +#define NFS_TPRINTF_INITIAL_DELAY 12 +#define NFS_TPRINTF_DELAY 30 /* * Oddballs @@ -126,15 +126,15 @@ extern int nfs_ticks; #define NFS_CMPFH(n, f, s) \ ((n)->n_fhsize == (s) && !bcmp((caddr_t)(n)->n_fhp, (caddr_t)(f), (s))) #define NFSRV_NDMAXDATA(n) \ - (((n)->nd_vers == NFS_VER3) ? (((n)->nd_nam2) ? \ - NFS_MAXDGRAMDATA : NFSRV_MAXDATA) : NFS_V2MAXDATA) + (((n)->nd_vers == NFS_VER3) ? (((n)->nd_nam2) ? \ + NFS_MAXDGRAMDATA : NFSRV_MAXDATA) : NFS_V2MAXDATA) /* * The IO_METASYNC flag should be implemented for local file systems. * (Until then, it is nothin at all.) */ #ifndef IO_METASYNC -#define IO_METASYNC 0 +#define IO_METASYNC 0 #endif /* @@ -147,66 +147,66 @@ extern int nfs_ticks; * becomes bunk!). * Note that some of these structures come out of their own nfs zones. */ -#define NFS_NODEALLOC 1024 -#define NFS_MNTALLOC 1024 -#define NFS_SVCALLOC 512 +#define NFS_NODEALLOC 1024 +#define NFS_MNTALLOC 1024 +#define NFS_SVCALLOC 512 -#define NFS_ARGSVERSION_XDR 88 /* NFS mount args are in XDR format */ +#define NFS_ARGSVERSION_XDR 88 /* NFS mount args are in XDR format */ -#define NFS_XDRARGS_VERSION_0 0 -#define NFS_MATTR_BITMAP_LEN 1 /* length of mount attributes bitmap */ -#define NFS_MFLAG_BITMAP_LEN 1 /* length of mount flags bitmap */ +#define NFS_XDRARGS_VERSION_0 0 +#define NFS_MATTR_BITMAP_LEN 1 /* length of mount attributes bitmap */ +#define NFS_MFLAG_BITMAP_LEN 1 /* length of mount flags bitmap */ /* NFS mount attributes */ -#define NFS_MATTR_FLAGS 0 /* mount flags (NFS_MATTR_*) */ -#define NFS_MATTR_NFS_VERSION 1 /* NFS protocol version */ -#define NFS_MATTR_NFS_MINOR_VERSION 2 /* NFS protocol minor version */ -#define NFS_MATTR_READ_SIZE 3 /* READ RPC size */ -#define NFS_MATTR_WRITE_SIZE 4 /* WRITE RPC size */ -#define NFS_MATTR_READDIR_SIZE 5 /* READDIR RPC size */ -#define NFS_MATTR_READAHEAD 6 /* block readahead count */ -#define NFS_MATTR_ATTRCACHE_REG_MIN 7 /* minimum attribute cache time */ -#define NFS_MATTR_ATTRCACHE_REG_MAX 8 /* maximum attribute cache time */ -#define NFS_MATTR_ATTRCACHE_DIR_MIN 9 /* minimum attribute cache time for dirs */ -#define NFS_MATTR_ATTRCACHE_DIR_MAX 10 /* maximum attribute cache time for dirs */ -#define NFS_MATTR_LOCK_MODE 11 /* advisory file locking mode (NFS_LOCK_MODE_*) */ -#define NFS_MATTR_SECURITY 12 /* RPC security flavors to use */ -#define NFS_MATTR_MAX_GROUP_LIST 13 /* max # of RPC AUTH_SYS groups */ -#define NFS_MATTR_SOCKET_TYPE 14 /* socket transport type as a netid-like string */ -#define NFS_MATTR_NFS_PORT 15 /* port # to use for NFS protocol */ -#define NFS_MATTR_MOUNT_PORT 16 /* port # to use for MOUNT protocol */ -#define NFS_MATTR_REQUEST_TIMEOUT 17 /* initial RPC request timeout value */ -#define NFS_MATTR_SOFT_RETRY_COUNT 18 /* max RPC retransmissions for soft mounts */ -#define NFS_MATTR_DEAD_TIMEOUT 19 /* how long until unresponsive mount is considered dead */ -#define NFS_MATTR_FH 20 /* file handle for mount directory */ -#define NFS_MATTR_FS_LOCATIONS 21 /* list of locations for the file system */ -#define NFS_MATTR_MNTFLAGS 22 /* VFS mount flags (MNT_*) */ -#define NFS_MATTR_MNTFROM 23 /* fixed string to use for "f_mntfromname" */ -#define NFS_MATTR_REALM 24 /* Realm to authenticate with */ -#define NFS_MATTR_PRINCIPAL 25 /* GSS principal to authenticate with */ -#define NFS_MATTR_SVCPRINCIPAL 26 /* GSS principal to authenticate to, the server principal */ -#define NFS_MATTR_NFS_VERSION_RANGE 27 /* Packed version range to try */ -#define NFS_MATTR_KERB_ETYPE 28 /* Enctype to use for kerberos mounts */ +#define NFS_MATTR_FLAGS 0 /* mount flags (NFS_MATTR_*) */ +#define NFS_MATTR_NFS_VERSION 1 /* NFS protocol version */ +#define NFS_MATTR_NFS_MINOR_VERSION 2 /* NFS protocol minor version */ +#define NFS_MATTR_READ_SIZE 3 /* READ RPC size */ +#define NFS_MATTR_WRITE_SIZE 4 /* WRITE RPC size */ +#define NFS_MATTR_READDIR_SIZE 5 /* READDIR RPC size */ +#define NFS_MATTR_READAHEAD 6 /* block readahead count */ +#define NFS_MATTR_ATTRCACHE_REG_MIN 7 /* minimum attribute cache time */ +#define NFS_MATTR_ATTRCACHE_REG_MAX 8 /* maximum attribute cache time */ +#define NFS_MATTR_ATTRCACHE_DIR_MIN 9 /* minimum attribute cache time for dirs */ +#define NFS_MATTR_ATTRCACHE_DIR_MAX 10 /* maximum attribute cache time for dirs */ +#define NFS_MATTR_LOCK_MODE 11 /* advisory file locking mode (NFS_LOCK_MODE_*) */ +#define NFS_MATTR_SECURITY 12 /* RPC security flavors to use */ +#define NFS_MATTR_MAX_GROUP_LIST 13 /* max # of RPC AUTH_SYS groups */ +#define NFS_MATTR_SOCKET_TYPE 14 /* socket transport type as a netid-like string */ +#define NFS_MATTR_NFS_PORT 15 /* port # to use for NFS protocol */ +#define NFS_MATTR_MOUNT_PORT 16 /* port # to use for MOUNT protocol */ +#define NFS_MATTR_REQUEST_TIMEOUT 17 /* initial RPC request timeout value */ +#define NFS_MATTR_SOFT_RETRY_COUNT 18 /* max RPC retransmissions for soft mounts */ +#define NFS_MATTR_DEAD_TIMEOUT 19 /* how long until unresponsive mount is considered dead */ +#define NFS_MATTR_FH 20 /* file handle for mount directory */ +#define NFS_MATTR_FS_LOCATIONS 21 /* list of locations for the file system */ +#define NFS_MATTR_MNTFLAGS 22 /* VFS mount flags (MNT_*) */ +#define NFS_MATTR_MNTFROM 23 /* fixed string to use for "f_mntfromname" */ +#define NFS_MATTR_REALM 24 /* Realm to authenticate with */ +#define NFS_MATTR_PRINCIPAL 25 /* GSS principal to authenticate with */ +#define NFS_MATTR_SVCPRINCIPAL 26 /* GSS principal to authenticate to, the server principal */ +#define NFS_MATTR_NFS_VERSION_RANGE 27 /* Packed version range to try */ +#define NFS_MATTR_KERB_ETYPE 28 /* Enctype to use for kerberos mounts */ /* NFS mount flags */ -#define NFS_MFLAG_SOFT 0 /* soft mount (requests fail if unresponsive) */ -#define NFS_MFLAG_INTR 1 /* allow operations to be interrupted */ -#define NFS_MFLAG_RESVPORT 2 /* use a reserved port */ -#define NFS_MFLAG_NOCONNECT 3 /* don't connect the socket (UDP) */ -#define NFS_MFLAG_DUMBTIMER 4 /* don't estimate RTT dynamically */ -#define NFS_MFLAG_CALLUMNT 5 /* call MOUNTPROC_UMNT on unmount */ -#define NFS_MFLAG_RDIRPLUS 6 /* request additional info when reading directories */ -#define NFS_MFLAG_NONEGNAMECACHE 7 /* don't do negative name caching */ -#define NFS_MFLAG_MUTEJUKEBOX 8 /* don't treat jukebox errors as unresponsive */ -#define NFS_MFLAG_EPHEMERAL 9 /* ephemeral (mirror) mount */ -#define NFS_MFLAG_NOCALLBACK 10 /* don't provide callback RPC service */ -#define NFS_MFLAG_NAMEDATTR 11 /* don't use named attributes */ -#define NFS_MFLAG_NOACL 12 /* don't support ACLs */ -#define NFS_MFLAG_ACLONLY 13 /* only support ACLs - not mode */ -#define NFS_MFLAG_NFC 14 /* send NFC strings */ -#define NFS_MFLAG_NOQUOTA 15 /* don't support QUOTA requests */ -#define NFS_MFLAG_MNTUDP 16 /* MOUNT protocol should use UDP */ -#define NFS_MFLAG_MNTQUICK 17 /* use short timeouts while mounting */ +#define NFS_MFLAG_SOFT 0 /* soft mount (requests fail if unresponsive) */ +#define NFS_MFLAG_INTR 1 /* allow operations to be interrupted */ +#define NFS_MFLAG_RESVPORT 2 /* use a reserved port */ +#define NFS_MFLAG_NOCONNECT 3 /* don't connect the socket (UDP) */ +#define NFS_MFLAG_DUMBTIMER 4 /* don't estimate RTT dynamically */ +#define NFS_MFLAG_CALLUMNT 5 /* call MOUNTPROC_UMNT on unmount */ +#define NFS_MFLAG_RDIRPLUS 6 /* request additional info when reading directories */ +#define NFS_MFLAG_NONEGNAMECACHE 7 /* don't do negative name caching */ +#define NFS_MFLAG_MUTEJUKEBOX 8 /* don't treat jukebox errors as unresponsive */ +#define NFS_MFLAG_EPHEMERAL 9 /* ephemeral (mirror) mount */ +#define NFS_MFLAG_NOCALLBACK 10 /* don't provide callback RPC service */ +#define NFS_MFLAG_NAMEDATTR 11 /* don't use named attributes */ +#define NFS_MFLAG_NOACL 12 /* don't support ACLs */ +#define NFS_MFLAG_ACLONLY 13 /* only support ACLs - not mode */ +#define NFS_MFLAG_NFC 14 /* send NFC strings */ +#define NFS_MFLAG_NOQUOTA 15 /* don't support QUOTA requests */ +#define NFS_MFLAG_MNTUDP 16 /* MOUNT protocol should use UDP */ +#define NFS_MFLAG_MNTQUICK 17 /* use short timeouts while mounting */ /* Macros for packing and unpacking packed versions */ #define PVER2MAJOR(M) ((uint32_t)(((M) >> 16) & 0xffff)) @@ -214,9 +214,9 @@ extern int nfs_ticks; #define VER2PVER(M, m) ((uint32_t)((M) << 16) | ((m) & 0xffff)) /* NFS advisory file locking modes */ -#define NFS_LOCK_MODE_ENABLED 0 /* advisory file locking enabled */ -#define NFS_LOCK_MODE_DISABLED 1 /* do not support advisory file locking */ -#define NFS_LOCK_MODE_LOCAL 2 /* perform advisory file locking locally */ +#define NFS_LOCK_MODE_ENABLED 0 /* advisory file locking enabled */ +#define NFS_LOCK_MODE_DISABLED 1 /* do not support advisory file locking */ +#define NFS_LOCK_MODE_LOCAL 2 /* perform advisory file locking locally */ /* Supported encryption types for kerberos session keys */ @@ -237,53 +237,53 @@ struct nfs_etype { /* * Old-style arguments to mount NFS */ -#define NFS_ARGSVERSION 6 /* change when nfs_args changes */ +#define NFS_ARGSVERSION 6 /* change when nfs_args changes */ struct nfs_args { - int version; /* args structure version number */ + int version; /* args structure version number */ #ifdef KERNEL - user32_addr_t addr; /* file server address */ + user32_addr_t addr; /* file server address */ #else - struct sockaddr *addr; /* file server address */ + struct sockaddr *addr; /* file server address */ #endif - int addrlen; /* length of address */ - int sotype; /* Socket type */ - int proto; /* and Protocol */ + int addrlen; /* length of address */ + int sotype; /* Socket type */ + int proto; /* and Protocol */ #ifdef KERNEL - user32_addr_t fh; /* File handle to be mounted */ + user32_addr_t fh; /* File handle to be mounted */ #else - u_char *fh; /* File handle to be mounted */ + u_char *fh; /* File handle to be mounted */ #endif - int fhsize; /* Size, in bytes, of fh */ - int flags; /* flags */ - int wsize; /* write size in bytes */ - int rsize; /* read size in bytes */ - int readdirsize; /* readdir size in bytes */ - int timeo; /* initial timeout in .1 secs */ - int retrans; /* times to retry send */ - int maxgrouplist; /* Max. size of group list */ - int readahead; /* # of blocks to readahead */ - int leaseterm; /* obsolete: Term (sec) of lease */ - int deadthresh; /* obsolete: Retrans threshold */ + int fhsize; /* Size, in bytes, of fh */ + int flags; /* flags */ + int wsize; /* write size in bytes */ + int rsize; /* read size in bytes */ + int readdirsize; /* readdir size in bytes */ + int timeo; /* initial timeout in .1 secs */ + int retrans; /* times to retry send */ + int maxgrouplist; /* Max. size of group list */ + int readahead; /* # of blocks to readahead */ + int leaseterm; /* obsolete: Term (sec) of lease */ + int deadthresh; /* obsolete: Retrans threshold */ #ifdef KERNEL - user32_addr_t hostname; /* server's name */ + user32_addr_t hostname; /* server's name */ #else - char *hostname; /* server's name */ + char *hostname; /* server's name */ #endif /* NFS_ARGSVERSION 3 ends here */ - int acregmin; /* reg file min attr cache timeout */ - int acregmax; /* reg file max attr cache timeout */ - int acdirmin; /* dir min attr cache timeout */ - int acdirmax; /* dir max attr cache timeout */ + int acregmin; /* reg file min attr cache timeout */ + int acregmax; /* reg file max attr cache timeout */ + int acdirmin; /* dir min attr cache timeout */ + int acdirmax; /* dir max attr cache timeout */ /* NFS_ARGSVERSION 4 ends here */ - uint32_t auth; /* security mechanism flavor */ + uint32_t auth; /* security mechanism flavor */ /* NFS_ARGSVERSION 5 ends here */ - uint32_t deadtimeout; /* secs until unresponsive mount considered dead */ + uint32_t deadtimeout; /* secs until unresponsive mount considered dead */ }; /* incremental size additions in each version of nfs_args */ -#define NFS_ARGSVERSION4_INCSIZE (4 * sizeof(int)) -#define NFS_ARGSVERSION5_INCSIZE (sizeof(uint32_t)) -#define NFS_ARGSVERSION6_INCSIZE (sizeof(uint32_t)) +#define NFS_ARGSVERSION4_INCSIZE (4 * sizeof(int)) +#define NFS_ARGSVERSION5_INCSIZE (sizeof(uint32_t)) +#define NFS_ARGSVERSION6_INCSIZE (sizeof(uint32_t)) #ifdef KERNEL /* LP64 version of nfs_args. all pointers and longs @@ -291,86 +291,86 @@ struct nfs_args { * WARNING - keep in sync with nfs_args */ struct user_nfs_args { - int version; /* args structure version number */ - user_addr_t addr __attribute((aligned(8))); /* file server address */ - int addrlen; /* length of address */ - int sotype; /* Socket type */ - int proto; /* and Protocol */ - user_addr_t fh __attribute((aligned(8))); /* File handle to be mounted */ - int fhsize; /* Size, in bytes, of fh */ - int flags; /* flags */ - int wsize; /* write size in bytes */ - int rsize; /* read size in bytes */ - int readdirsize; /* readdir size in bytes */ - int timeo; /* initial timeout in .1 secs */ - int retrans; /* times to retry send */ - int maxgrouplist; /* Max. size of group list */ - int readahead; /* # of blocks to readahead */ - int leaseterm; /* obsolete: Term (sec) of lease */ - int deadthresh; /* obsolete: Retrans threshold */ - user_addr_t hostname __attribute((aligned(8))); /* server's name */ + int version; /* args structure version number */ + user_addr_t addr __attribute((aligned(8))); /* file server address */ + int addrlen; /* length of address */ + int sotype; /* Socket type */ + int proto; /* and Protocol */ + user_addr_t fh __attribute((aligned(8))); /* File handle to be mounted */ + int fhsize; /* Size, in bytes, of fh */ + int flags; /* flags */ + int wsize; /* write size in bytes */ + int rsize; /* read size in bytes */ + int readdirsize; /* readdir size in bytes */ + int timeo; /* initial timeout in .1 secs */ + int retrans; /* times to retry send */ + int maxgrouplist; /* Max. size of group list */ + int readahead; /* # of blocks to readahead */ + int leaseterm; /* obsolete: Term (sec) of lease */ + int deadthresh; /* obsolete: Retrans threshold */ + user_addr_t hostname __attribute((aligned(8))); /* server's name */ /* NFS_ARGSVERSION 3 ends here */ - int acregmin; /* reg file min attr cache timeout */ - int acregmax; /* reg file max attr cache timeout */ - int acdirmin; /* dir min attr cache timeout */ - int acdirmax; /* dir max attr cache timeout */ + int acregmin; /* reg file min attr cache timeout */ + int acregmax; /* reg file max attr cache timeout */ + int acdirmin; /* dir min attr cache timeout */ + int acdirmax; /* dir max attr cache timeout */ /* NFS_ARGSVERSION 4 ends here */ - uint32_t auth; /* security mechanism flavor */ + uint32_t auth; /* security mechanism flavor */ /* NFS_ARGSVERSION 5 ends here */ - uint32_t deadtimeout; /* secs until unresponsive mount considered dead */ + uint32_t deadtimeout; /* secs until unresponsive mount considered dead */ }; #endif // KERNEL /* * Old-style NFS mount option flags */ -#define NFSMNT_SOFT 0x00000001 /* soft mount (hard is default) */ -#define NFSMNT_WSIZE 0x00000002 /* set write size */ -#define NFSMNT_RSIZE 0x00000004 /* set read size */ -#define NFSMNT_TIMEO 0x00000008 /* set initial timeout */ -#define NFSMNT_RETRANS 0x00000010 /* set number of request retries */ -#define NFSMNT_MAXGRPS 0x00000020 /* set maximum grouplist size */ -#define NFSMNT_INT 0x00000040 /* allow interrupts on hard mount */ -#define NFSMNT_NOCONN 0x00000080 /* Don't Connect the socket */ -#define NFSMNT_NONEGNAMECACHE 0x00000100 /* Don't do negative name caching */ -#define NFSMNT_NFSV3 0x00000200 /* Use NFS Version 3 protocol */ -#define NFSMNT_NFSV4 0x00000400 /* Use NFS Version 4 protocol */ -#define NFSMNT_DUMBTIMR 0x00000800 /* Don't estimate rtt dynamically */ -#define NFSMNT_DEADTIMEOUT 0x00001000 /* unmount after a period of unresponsiveness */ -#define NFSMNT_READAHEAD 0x00002000 /* set read ahead */ -#define NFSMNT_CALLUMNT 0x00004000 /* call MOUNTPROC_UMNT on unmount */ -#define NFSMNT_RESVPORT 0x00008000 /* Allocate a reserved port */ -#define NFSMNT_RDIRPLUS 0x00010000 /* Use Readdirplus for V3 */ -#define NFSMNT_READDIRSIZE 0x00020000 /* Set readdir size */ -#define NFSMNT_NOLOCKS 0x00040000 /* don't support file locking */ -#define NFSMNT_LOCALLOCKS 0x00080000 /* do file locking locally on client */ -#define NFSMNT_ACREGMIN 0x00100000 /* reg min attr cache timeout */ -#define NFSMNT_ACREGMAX 0x00200000 /* reg max attr cache timeout */ -#define NFSMNT_ACDIRMIN 0x00400000 /* dir min attr cache timeout */ -#define NFSMNT_ACDIRMAX 0x00800000 /* dir max attr cache timeout */ -#define NFSMNT_SECFLAVOR 0x01000000 /* Use security flavor */ -#define NFSMNT_SECSYSOK 0x02000000 /* Server can support auth sys */ -#define NFSMNT_MUTEJUKEBOX 0x04000000 /* don't treat jukebox errors as unresponsive */ -#define NFSMNT_NOQUOTA 0x08000000 /* don't support QUOTA requests */ +#define NFSMNT_SOFT 0x00000001 /* soft mount (hard is default) */ +#define NFSMNT_WSIZE 0x00000002 /* set write size */ +#define NFSMNT_RSIZE 0x00000004 /* set read size */ +#define NFSMNT_TIMEO 0x00000008 /* set initial timeout */ +#define NFSMNT_RETRANS 0x00000010 /* set number of request retries */ +#define NFSMNT_MAXGRPS 0x00000020 /* set maximum grouplist size */ +#define NFSMNT_INT 0x00000040 /* allow interrupts on hard mount */ +#define NFSMNT_NOCONN 0x00000080 /* Don't Connect the socket */ +#define NFSMNT_NONEGNAMECACHE 0x00000100 /* Don't do negative name caching */ +#define NFSMNT_NFSV3 0x00000200 /* Use NFS Version 3 protocol */ +#define NFSMNT_NFSV4 0x00000400 /* Use NFS Version 4 protocol */ +#define NFSMNT_DUMBTIMR 0x00000800 /* Don't estimate rtt dynamically */ +#define NFSMNT_DEADTIMEOUT 0x00001000 /* unmount after a period of unresponsiveness */ +#define NFSMNT_READAHEAD 0x00002000 /* set read ahead */ +#define NFSMNT_CALLUMNT 0x00004000 /* call MOUNTPROC_UMNT on unmount */ +#define NFSMNT_RESVPORT 0x00008000 /* Allocate a reserved port */ +#define NFSMNT_RDIRPLUS 0x00010000 /* Use Readdirplus for V3 */ +#define NFSMNT_READDIRSIZE 0x00020000 /* Set readdir size */ +#define NFSMNT_NOLOCKS 0x00040000 /* don't support file locking */ +#define NFSMNT_LOCALLOCKS 0x00080000 /* do file locking locally on client */ +#define NFSMNT_ACREGMIN 0x00100000 /* reg min attr cache timeout */ +#define NFSMNT_ACREGMAX 0x00200000 /* reg max attr cache timeout */ +#define NFSMNT_ACDIRMIN 0x00400000 /* dir min attr cache timeout */ +#define NFSMNT_ACDIRMAX 0x00800000 /* dir max attr cache timeout */ +#define NFSMNT_SECFLAVOR 0x01000000 /* Use security flavor */ +#define NFSMNT_SECSYSOK 0x02000000 /* Server can support auth sys */ +#define NFSMNT_MUTEJUKEBOX 0x04000000 /* don't treat jukebox errors as unresponsive */ +#define NFSMNT_NOQUOTA 0x08000000 /* don't support QUOTA requests */ /* * fs.nfs sysctl(3) NFS_MOUNTINFO defines */ -#define NFS_MOUNT_INFO_VERSION 0 /* nfsstat mount information version */ -#define NFS_MIATTR_BITMAP_LEN 1 /* length of mount info attributes bitmap */ -#define NFS_MIFLAG_BITMAP_LEN 1 /* length of mount info flags bitmap */ +#define NFS_MOUNT_INFO_VERSION 0 /* nfsstat mount information version */ +#define NFS_MIATTR_BITMAP_LEN 1 /* length of mount info attributes bitmap */ +#define NFS_MIFLAG_BITMAP_LEN 1 /* length of mount info flags bitmap */ /* NFS mount info attributes */ -#define NFS_MIATTR_FLAGS 0 /* mount info flags bitmap (MIFLAG_*) */ -#define NFS_MIATTR_ORIG_ARGS 1 /* original mount args passed into mount call */ -#define NFS_MIATTR_CUR_ARGS 2 /* current mount args values */ -#define NFS_MIATTR_CUR_LOC_INDEX 3 /* current fs location index */ +#define NFS_MIATTR_FLAGS 0 /* mount info flags bitmap (MIFLAG_*) */ +#define NFS_MIATTR_ORIG_ARGS 1 /* original mount args passed into mount call */ +#define NFS_MIATTR_CUR_ARGS 2 /* current mount args values */ +#define NFS_MIATTR_CUR_LOC_INDEX 3 /* current fs location index */ /* NFS mount info flags */ -#define NFS_MIFLAG_DEAD 0 /* mount is dead */ -#define NFS_MIFLAG_NOTRESP 1 /* server is unresponsive */ -#define NFS_MIFLAG_RECOVERY 2 /* mount in recovery */ +#define NFS_MIFLAG_DEAD 0 /* mount is dead */ +#define NFS_MIFLAG_NOTRESP 1 /* server is unresponsive */ +#define NFS_MIFLAG_RECOVERY 2 /* mount in recovery */ /* @@ -378,13 +378,13 @@ struct user_nfs_args { * should ever try and use it. */ struct nfsd_args { - int sock; /* Socket to serve */ + int sock; /* Socket to serve */ #ifdef KERNEL - user32_addr_t name; /* Client addr for connection based sockets */ + user32_addr_t name; /* Client addr for connection based sockets */ #else - caddr_t name; /* Client addr for connection based sockets */ + caddr_t name; /* Client addr for connection based sockets */ #endif - int namelen; /* Length of name */ + int namelen; /* Length of name */ }; #ifdef KERNEL @@ -393,9 +393,9 @@ struct nfsd_args { * WARNING - keep in sync with nfsd_args */ struct user_nfsd_args { - int sock; /* Socket to serve */ - user_addr_t name __attribute((aligned(8))); /* Client addr for connection based sockets */ - int namelen; /* Length of name */ + int sock; /* Socket to serve */ + user_addr_t name __attribute((aligned(8))); /* Client addr for connection based sockets */ + int namelen; /* Length of name */ }; #endif // KERNEL @@ -405,32 +405,32 @@ struct user_nfsd_args { */ /* NFS export handle identifies which NFS export */ -#define NFS_FH_VERSION 0x4e580000 /* 'NX00' */ +#define NFS_FH_VERSION 0x4e580000 /* 'NX00' */ struct nfs_exphandle { - uint32_t nxh_version; /* data structure version */ - uint32_t nxh_fsid; /* File System Export ID */ - uint32_t nxh_expid; /* Export ID */ - uint16_t nxh_flags; /* export handle flags */ - uint8_t nxh_reserved; /* future use */ - uint8_t nxh_fidlen; /* length of File ID */ + uint32_t nxh_version; /* data structure version */ + uint32_t nxh_fsid; /* File System Export ID */ + uint32_t nxh_expid; /* Export ID */ + uint16_t nxh_flags; /* export handle flags */ + uint8_t nxh_reserved; /* future use */ + uint8_t nxh_fidlen; /* length of File ID */ }; /* nxh_flags */ -#define NXHF_INVALIDFH 0x0001 /* file handle is invalid */ +#define NXHF_INVALIDFH 0x0001 /* file handle is invalid */ -#define NFS_MAX_FID_SIZE (NFS_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) -#define NFSV4_MAX_FID_SIZE (NFSV4_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) -#define NFSV3_MAX_FID_SIZE (NFSV3_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) -#define NFSV2_MAX_FID_SIZE (NFSV2_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) +#define NFS_MAX_FID_SIZE (NFS_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) +#define NFSV4_MAX_FID_SIZE (NFSV4_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) +#define NFSV3_MAX_FID_SIZE (NFSV3_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) +#define NFSV2_MAX_FID_SIZE (NFSV2_MAX_FH_SIZE - sizeof(struct nfs_exphandle)) /* NFS server internal view of fhandle_t */ /* The first sizeof(fhandle_t) bytes must match what goes into fhandle_t. */ /* (fhp is used to allow use of an external buffer) */ struct nfs_filehandle { - uint32_t nfh_len; /* total length of file handle */ - struct nfs_exphandle nfh_xh; /* export handle */ - unsigned char nfh_fid[NFS_MAX_FID_SIZE]; /* File ID */ - unsigned char *nfh_fhp; /* pointer to file handle */ + uint32_t nfh_len; /* total length of file handle */ + struct nfs_exphandle nfh_xh; /* export handle */ + unsigned char nfh_fid[NFS_MAX_FID_SIZE]; /* File ID */ + unsigned char *nfh_fhp; /* pointer to file handle */ }; /* @@ -442,32 +442,32 @@ struct nfs_filehandle { struct nfs_sec { int count; uint32_t flavors[NX_MAX_SEC_FLAVORS]; -}; +}; struct nfs_export_net_args { - uint32_t nxna_flags; /* export flags */ - struct xucred nxna_cred; /* mapped credential for root/all user */ - struct sockaddr_storage nxna_addr; /* net address to which exported */ - struct sockaddr_storage nxna_mask; /* mask for net address */ - struct nfs_sec nxna_sec; /* security mechanism flavors */ + uint32_t nxna_flags; /* export flags */ + struct xucred nxna_cred; /* mapped credential for root/all user */ + struct sockaddr_storage nxna_addr; /* net address to which exported */ + struct sockaddr_storage nxna_mask; /* mask for net address */ + struct nfs_sec nxna_sec; /* security mechanism flavors */ }; struct nfs_export_args { - uint32_t nxa_fsid; /* export FS ID */ - uint32_t nxa_expid; /* export ID */ + uint32_t nxa_fsid; /* export FS ID */ + uint32_t nxa_expid; /* export ID */ #ifdef KERNEL - user32_addr_t nxa_fspath; /* export FS path */ - user32_addr_t nxa_exppath; /* export sub-path */ + user32_addr_t nxa_fspath; /* export FS path */ + user32_addr_t nxa_exppath; /* export sub-path */ #else - char *nxa_fspath; /* export FS path */ - char *nxa_exppath; /* export sub-path */ + char *nxa_fspath; /* export FS path */ + char *nxa_exppath; /* export sub-path */ #endif - uint32_t nxa_flags; /* export arg flags */ - uint32_t nxa_netcount; /* #entries in ex_nets array */ + uint32_t nxa_flags; /* export arg flags */ + uint32_t nxa_netcount; /* #entries in ex_nets array */ #ifdef KERNEL - user32_addr_t nxa_nets; /* array of net args */ + user32_addr_t nxa_nets; /* array of net args */ #else - struct nfs_export_net_args *nxa_nets; /* array of net args */ + struct nfs_export_net_args *nxa_nets; /* array of net args */ #endif }; @@ -475,78 +475,76 @@ struct nfs_export_args { /* LP64 version of export_args */ struct user_nfs_export_args { - uint32_t nxa_fsid; /* export FS ID */ - uint32_t nxa_expid; /* export ID */ - user_addr_t nxa_fspath; /* export FS path */ - user_addr_t nxa_exppath; /* export sub-path */ - uint32_t nxa_flags; /* export arg flags */ - uint32_t nxa_netcount; /* #entries in ex_nets array */ - user_addr_t nxa_nets; /* array of net args */ + uint32_t nxa_fsid; /* export FS ID */ + uint32_t nxa_expid; /* export ID */ + user_addr_t nxa_fspath; /* export FS path */ + user_addr_t nxa_exppath; /* export sub-path */ + uint32_t nxa_flags; /* export arg flags */ + uint32_t nxa_netcount; /* #entries in ex_nets array */ + user_addr_t nxa_nets; /* array of net args */ }; #endif /* KERNEL */ /* nfs export arg flags */ -#define NXA_DELETE 0x0001 /* delete the specified export(s) */ -#define NXA_ADD 0x0002 /* add the specified export(s) */ -#define NXA_REPLACE 0x0003 /* delete and add the specified export(s) */ -#define NXA_DELETE_ALL 0x0004 /* delete all exports */ -#define NXA_OFFLINE 0x0008 /* export is offline */ -#define NXA_CHECK 0x0010 /* check if exportable */ +#define NXA_DELETE 0x0001 /* delete the specified export(s) */ +#define NXA_ADD 0x0002 /* add the specified export(s) */ +#define NXA_REPLACE 0x0003 /* delete and add the specified export(s) */ +#define NXA_DELETE_ALL 0x0004 /* delete all exports */ +#define NXA_OFFLINE 0x0008 /* export is offline */ +#define NXA_CHECK 0x0010 /* check if exportable */ /* export option flags */ -#define NX_READONLY 0x0001 /* exported read-only */ -#define NX_DEFAULTEXPORT 0x0002 /* exported to the world */ -#define NX_MAPROOT 0x0004 /* map root access to anon credential */ -#define NX_MAPALL 0x0008 /* map all access to anon credential */ -#define NX_32BITCLIENTS 0x0020 /* restrict directory cookies to 32 bits */ -#define NX_OFFLINE 0x0040 /* export is offline */ -#define NX_MANGLEDNAMES 0x0080 /* export will return mangled names for names > 255 bytes */ +#define NX_READONLY 0x0001 /* exported read-only */ +#define NX_DEFAULTEXPORT 0x0002 /* exported to the world */ +#define NX_MAPROOT 0x0004 /* map root access to anon credential */ +#define NX_MAPALL 0x0008 /* map all access to anon credential */ +#define NX_32BITCLIENTS 0x0020 /* restrict directory cookies to 32 bits */ +#define NX_OFFLINE 0x0040 /* export is offline */ +#define NX_MANGLEDNAMES 0x0080 /* export will return mangled names for names > 255 bytes */ /* * fs.nfs sysctl(3) export stats record structures */ -#define NFS_EXPORT_STAT_REC_VERSION 1 /* export stat record version */ +#define NFS_EXPORT_STAT_REC_VERSION 1 /* export stat record version */ #define NFS_USER_STAT_REC_VERSION 1 /* active user list record version */ /* descriptor describing following records */ -struct nfs_export_stat_desc -{ - uint32_t rec_vers; /* version of export stat records */ - uint32_t rec_count; /* total record count */ +struct nfs_export_stat_desc { + uint32_t rec_vers; /* version of export stat records */ + uint32_t rec_count; /* total record count */ }__attribute__((__packed__)); /* export stat record containing path and stat counters */ struct nfs_export_stat_rec { char path[RPCMNT_PATHLEN + 1]; - uint64_t ops; /* Count of NFS Requests received for this export */ - uint64_t bytes_read; /* Count of bytes read from this export */ - uint64_t bytes_written; /* Count of bytes written to this export */ + uint64_t ops; /* Count of NFS Requests received for this export */ + uint64_t bytes_read; /* Count of bytes read from this export */ + uint64_t bytes_written; /* Count of bytes written to this export */ }__attribute__((__packed__)); /* Active user list stat buffer descriptor */ -struct nfs_user_stat_desc -{ - uint32_t rec_vers; /* version of active user stat records */ - uint32_t rec_count; /* total record count */ +struct nfs_user_stat_desc { + uint32_t rec_vers; /* version of active user stat records */ + uint32_t rec_count; /* total record count */ }__attribute__((__packed__)); /* Active user list user stat record format */ struct nfs_user_stat_user_rec { - u_char rec_type; - uid_t uid; - struct sockaddr_storage sock; - uint64_t ops; - uint64_t bytes_read; - uint64_t bytes_written; - uint32_t tm_start; - uint32_t tm_last; + u_char rec_type; + uid_t uid; + struct sockaddr_storage sock; + uint64_t ops; + uint64_t bytes_read; + uint64_t bytes_written; + uint32_t tm_start; + uint32_t tm_last; }__attribute__((__packed__)); /* Active user list path record format */ struct nfs_user_stat_path_rec { - u_char rec_type; - char path[RPCMNT_PATHLEN + 1]; + u_char rec_type; + char path[RPCMNT_PATHLEN + 1]; }__attribute__((__packed__)); /* Defines for rec_type field of @@ -561,15 +559,15 @@ struct nfs_user_stat_path_rec { struct nfs_exportfs; struct nfs_export_options { - uint32_t nxo_flags; /* export options */ - kauth_cred_t nxo_cred; /* mapped credential */ - struct nfs_sec nxo_sec; /* security mechanism flavors */ + uint32_t nxo_flags; /* export options */ + kauth_cred_t nxo_cred; /* mapped credential */ + struct nfs_sec nxo_sec; /* security mechanism flavors */ }; /* Network address lookup element and individual export options */ struct nfs_netopt { - struct radix_node no_rnodes[2]; /* radix tree glue */ - struct nfs_export_options no_opt; /* export options */ + struct radix_node no_rnodes[2]; /* radix tree glue */ + struct nfs_export_options no_opt; /* export options */ }; /* statistic counters for each exported directory @@ -579,53 +577,53 @@ struct nfs_netopt { * atomic operations */ typedef struct nfsstatcount64 { - uint32_t hi; - uint32_t lo; + uint32_t hi; + uint32_t lo; } nfsstatcount64; struct nfs_export_stat_counters { - struct nfsstatcount64 ops; /* Count of NFS Requests received for this export */ - struct nfsstatcount64 bytes_read; /* Count of bytes read from this export */ - struct nfsstatcount64 bytes_written; /* Count of bytes written to his export */ + struct nfsstatcount64 ops; /* Count of NFS Requests received for this export */ + struct nfsstatcount64 bytes_read; /* Count of bytes read from this export */ + struct nfsstatcount64 bytes_written; /* Count of bytes written to his export */ }; /* Macro for updating nfs export stat counters */ #define NFSStatAdd64(PTR, VAL) \ do { \ - uint32_t NFSSA_OldValue = \ - OSAddAtomic((VAL), &(PTR)->lo); \ - if ((NFSSA_OldValue + (VAL)) < NFSSA_OldValue) \ - OSAddAtomic(1, &(PTR)->hi); \ + uint32_t NFSSA_OldValue = \ + OSAddAtomic((VAL), &(PTR)->lo); \ + if ((NFSSA_OldValue + (VAL)) < NFSSA_OldValue) \ + OSAddAtomic(1, &(PTR)->hi); \ } while (0) /* Some defines for dealing with active user list stats */ -#define NFSRV_USER_STAT_DEF_MAX_NODES 1024 /* default active user list size limit */ -#define NFSRV_USER_STAT_DEF_IDLE_SEC 7200 /* default idle seconds (node no longer considered active) */ +#define NFSRV_USER_STAT_DEF_MAX_NODES 1024 /* default active user list size limit */ +#define NFSRV_USER_STAT_DEF_IDLE_SEC 7200 /* default idle seconds (node no longer considered active) */ /* active user list globals */ -extern uint32_t nfsrv_user_stat_enabled; /* enable/disable active user list */ -extern uint32_t nfsrv_user_stat_node_count; /* current count of user stat nodes */ -extern uint32_t nfsrv_user_stat_max_idle_sec; /* idle seconds (node no longer considered active) */ -extern uint32_t nfsrv_user_stat_max_nodes; /* active user list size limit */ +extern uint32_t nfsrv_user_stat_enabled; /* enable/disable active user list */ +extern uint32_t nfsrv_user_stat_node_count; /* current count of user stat nodes */ +extern uint32_t nfsrv_user_stat_max_idle_sec; /* idle seconds (node no longer considered active) */ +extern uint32_t nfsrv_user_stat_max_nodes; /* active user list size limit */ extern lck_grp_t *nfsrv_active_user_mutex_group; /* An active user node represented in the kernel */ struct nfs_user_stat_node { - TAILQ_ENTRY(nfs_user_stat_node) lru_link; - LIST_ENTRY(nfs_user_stat_node) hash_link; - uid_t uid; - struct sockaddr_storage sock; - uint64_t ops; - uint64_t bytes_read; - uint64_t bytes_written; - uint32_t tm_start; - uint32_t tm_last; + TAILQ_ENTRY(nfs_user_stat_node) lru_link; + LIST_ENTRY(nfs_user_stat_node) hash_link; + uid_t uid; + struct sockaddr_storage sock; + uint64_t ops; + uint64_t bytes_read; + uint64_t bytes_written; + uint32_t tm_start; + uint32_t tm_last; }; /* Hash table for active user nodes */ -#define NFS_USER_STAT_HASH_SIZE 16 /* MUST be a power of 2 */ +#define NFS_USER_STAT_HASH_SIZE 16 /* MUST be a power of 2 */ #define NFS_USER_STAT_HASH(userhashtbl, uid) \ - &((userhashtbl)[(uid) & (NFS_USER_STAT_HASH_SIZE - 1)]) + &((userhashtbl)[(uid) & (NFS_USER_STAT_HASH_SIZE - 1)]) TAILQ_HEAD(nfs_user_stat_lru_head, nfs_user_stat_node); LIST_HEAD(nfs_user_stat_hashtbl_head, nfs_user_stat_node); @@ -633,9 +631,9 @@ LIST_HEAD(nfs_user_stat_hashtbl_head, nfs_user_stat_node); /* Active user list data structure */ /* One per exported directory */ struct nfs_active_user_list { - struct nfs_user_stat_lru_head user_lru; - struct nfs_user_stat_hashtbl_head user_hashtbl[NFS_USER_STAT_HASH_SIZE]; - uint32_t node_count; + struct nfs_user_stat_lru_head user_lru; + struct nfs_user_stat_hashtbl_head user_hashtbl[NFS_USER_STAT_HASH_SIZE]; + uint32_t node_count; lck_mtx_t user_mutex; }; @@ -643,39 +641,39 @@ struct nfs_active_user_list { /* Network export information */ /* one of these for each exported directory */ struct nfs_export { - LIST_ENTRY(nfs_export) nx_next; /* FS export list */ - LIST_ENTRY(nfs_export) nx_hash; /* export hash chain */ - struct nfs_export *nx_parent; /* parent export */ - uint32_t nx_id; /* export ID */ - uint32_t nx_flags; /* export flags */ - struct nfs_exportfs *nx_fs; /* exported file system */ - char *nx_path; /* exported file system sub-path */ - struct nfs_filehandle nx_fh; /* export root file handle */ - struct nfs_export_options nx_defopt; /* default options */ - uint32_t nx_expcnt; /* # exports in table */ - struct radix_node_head *nx_rtable[AF_MAX+1]; /* table of exports (netopts) */ - struct nfs_export_stat_counters nx_stats; /* statistic counters for this exported directory */ - struct nfs_active_user_list nx_user_list; /* Active User List for this exported directory */ - struct timeval nx_exptime; /* time of export for write verifier */ + LIST_ENTRY(nfs_export) nx_next; /* FS export list */ + LIST_ENTRY(nfs_export) nx_hash; /* export hash chain */ + struct nfs_export *nx_parent; /* parent export */ + uint32_t nx_id; /* export ID */ + uint32_t nx_flags; /* export flags */ + struct nfs_exportfs *nx_fs; /* exported file system */ + char *nx_path; /* exported file system sub-path */ + struct nfs_filehandle nx_fh; /* export root file handle */ + struct nfs_export_options nx_defopt; /* default options */ + uint32_t nx_expcnt; /* # exports in table */ + struct radix_node_head *nx_rtable[AF_MAX + 1]; /* table of exports (netopts) */ + struct nfs_export_stat_counters nx_stats; /* statistic counters for this exported directory */ + struct nfs_active_user_list nx_user_list; /* Active User List for this exported directory */ + struct timeval nx_exptime; /* time of export for write verifier */ }; /* NFS exported file system info */ /* one of these for each exported file system */ struct nfs_exportfs { - LIST_ENTRY(nfs_exportfs) nxfs_next; /* exported file system list */ - uint32_t nxfs_id; /* exported file system ID */ - char *nxfs_path; /* exported file system path */ - LIST_HEAD(,nfs_export) nxfs_exports; /* list of exports for this file system */ + LIST_ENTRY(nfs_exportfs) nxfs_next; /* exported file system list */ + uint32_t nxfs_id; /* exported file system ID */ + char *nxfs_path; /* exported file system path */ + LIST_HEAD(, nfs_export) nxfs_exports; /* list of exports for this file system */ }; extern LIST_HEAD(nfsrv_expfs_list, nfs_exportfs) nfsrv_exports; extern lck_rw_t nfsrv_export_rwlock; // lock for export data structures -#define NFSRVEXPHASHSZ 64 -#define NFSRVEXPHASHVAL(FSID, EXPID) \ +#define NFSRVEXPHASHSZ 64 +#define NFSRVEXPHASHVAL(FSID, EXPID) \ (((FSID) >> 24) ^ ((FSID) >> 16) ^ ((FSID) >> 8) ^ (EXPID)) -#define NFSRVEXPHASH(FSID, EXPID) \ +#define NFSRVEXPHASH(FSID, EXPID) \ (&nfsrv_export_hashtbl[NFSRVEXPHASHVAL((FSID),(EXPID)) & nfsrv_export_hash]) -extern LIST_HEAD(nfsrv_export_hashhead, nfs_export) *nfsrv_export_hashtbl; +extern LIST_HEAD(nfsrv_export_hashhead, nfs_export) * nfsrv_export_hashtbl; extern u_long nfsrv_export_hash; #if CONFIG_FSE @@ -683,22 +681,22 @@ extern u_long nfsrv_export_hash; * NFS server file mod fsevents */ struct nfsrv_fmod { - LIST_ENTRY(nfsrv_fmod) fm_link; - vnode_t fm_vp; - struct vfs_context fm_context; - uint64_t fm_deadline; + LIST_ENTRY(nfsrv_fmod) fm_link; + vnode_t fm_vp; + struct vfs_context fm_context; + uint64_t fm_deadline; }; -#define NFSRVFMODHASHSZ 128 +#define NFSRVFMODHASHSZ 128 #define NFSRVFMODHASH(vp) (((uintptr_t) vp) & nfsrv_fmod_hash) -extern LIST_HEAD(nfsrv_fmod_hashhead, nfsrv_fmod) *nfsrv_fmod_hashtbl; +extern LIST_HEAD(nfsrv_fmod_hashhead, nfsrv_fmod) * nfsrv_fmod_hashtbl; extern u_long nfsrv_fmod_hash; extern lck_mtx_t *nfsrv_fmod_mutex; extern int nfsrv_fmod_pending, nfsrv_fsevents_enabled; #endif -extern int nfsrv_async, nfsrv_export_hash_size, - nfsrv_reqcache_size, nfsrv_sock_max_rec_queue_length; +extern int nfsrv_async, nfsrv_export_hash_size, + nfsrv_reqcache_size, nfsrv_sock_max_rec_queue_length; extern uint32_t nfsrv_gss_context_ttl; extern struct nfsstats nfsstats; #define NFS_UC_Q_DEBUG @@ -706,7 +704,7 @@ extern struct nfsstats nfsstats; extern int nfsrv_uc_use_proxy; extern uint32_t nfsrv_uc_queue_limit; extern uint32_t nfsrv_uc_queue_max_seen; -extern volatile uint32_t nfsrv_uc_queue_count; +extern volatile uint32_t nfsrv_uc_queue_count; #endif #endif // KERNEL @@ -719,82 +717,82 @@ extern volatile uint32_t nfsrv_uc_queue_count; * Stats structure */ struct nfsstats { - uint64_t attrcache_hits; - uint64_t attrcache_misses; - uint64_t lookupcache_hits; - uint64_t lookupcache_misses; - uint64_t direofcache_hits; - uint64_t direofcache_misses; - uint64_t biocache_reads; - uint64_t read_bios; - uint64_t read_physios; - uint64_t biocache_writes; - uint64_t write_bios; - uint64_t write_physios; - uint64_t biocache_readlinks; - uint64_t readlink_bios; - uint64_t biocache_readdirs; - uint64_t readdir_bios; - uint64_t rpccnt[NFS_NPROCS]; - uint64_t rpcretries; - uint64_t srvrpccnt[NFS_NPROCS]; - uint64_t srvrpc_errs; - uint64_t srv_errs; - uint64_t rpcrequests; - uint64_t rpctimeouts; - uint64_t rpcunexpected; - uint64_t rpcinvalid; - uint64_t srvcache_inproghits; - uint64_t srvcache_idemdonehits; - uint64_t srvcache_nonidemdonehits; - uint64_t srvcache_misses; - uint64_t srvvop_writes; - uint64_t pageins; - uint64_t pageouts; + uint64_t attrcache_hits; + uint64_t attrcache_misses; + uint64_t lookupcache_hits; + uint64_t lookupcache_misses; + uint64_t direofcache_hits; + uint64_t direofcache_misses; + uint64_t biocache_reads; + uint64_t read_bios; + uint64_t read_physios; + uint64_t biocache_writes; + uint64_t write_bios; + uint64_t write_physios; + uint64_t biocache_readlinks; + uint64_t readlink_bios; + uint64_t biocache_readdirs; + uint64_t readdir_bios; + uint64_t rpccnt[NFS_NPROCS]; + uint64_t rpcretries; + uint64_t srvrpccnt[NFS_NPROCS]; + uint64_t srvrpc_errs; + uint64_t srv_errs; + uint64_t rpcrequests; + uint64_t rpctimeouts; + uint64_t rpcunexpected; + uint64_t rpcinvalid; + uint64_t srvcache_inproghits; + uint64_t srvcache_idemdonehits; + uint64_t srvcache_nonidemdonehits; + uint64_t srvcache_misses; + uint64_t srvvop_writes; + uint64_t pageins; + uint64_t pageouts; }; #endif /* * Flags for nfssvc() system call. */ -#define NFSSVC_NFSD 0x004 -#define NFSSVC_ADDSOCK 0x008 -#define NFSSVC_EXPORT 0x200 +#define NFSSVC_NFSD 0x004 +#define NFSSVC_ADDSOCK 0x008 +#define NFSSVC_EXPORT 0x200 /* * Flags for nfsclnt() system call. */ -#define NFSCLNT_LOCKDANS 0x200 -#define NFSCLNT_LOCKDNOTIFY 0x400 -#define NFSCLNT_TESTIDMAP 0x001 +#define NFSCLNT_LOCKDANS 0x200 +#define NFSCLNT_LOCKDNOTIFY 0x400 +#define NFSCLNT_TESTIDMAP 0x001 #include /* for guid_t below */ -#define MAXIDNAMELEN 1024 +#define MAXIDNAMELEN 1024 struct nfs_testmapid { - uint32_t ntm_lookup; /* lookup name 2 id or id 2 name */ - uint32_t ntm_grpflag; /* Is this a group or user maping */ - uint32_t ntm_id; /* id to map or return */ - uint32_t pad; - guid_t ntm_guid; /* intermidiate guid used in conversion */ - char ntm_name[MAXIDNAMELEN]; /* name to map or return */ + uint32_t ntm_lookup; /* lookup name 2 id or id 2 name */ + uint32_t ntm_grpflag; /* Is this a group or user maping */ + uint32_t ntm_id; /* id to map or return */ + uint32_t pad; + guid_t ntm_guid; /* intermidiate guid used in conversion */ + char ntm_name[MAXIDNAMELEN]; /* name to map or return */ }; -#define NTM_ID2NAME 0 -#define NTM_NAME2ID 1 -#define NTM_NAME2GUID 2 -#define NTM_GUID2NAME 3 +#define NTM_ID2NAME 0 +#define NTM_NAME2ID 1 +#define NTM_NAME2GUID 2 +#define NTM_GUID2NAME 3 /* * fs.nfs sysctl(3) identifiers */ -#define NFS_NFSSTATS 1 /* struct: struct nfsstats */ -#define NFS_EXPORTSTATS 3 /* gets exported directory stats */ -#define NFS_USERSTATS 4 /* gets exported directory active user stats */ -#define NFS_USERCOUNT 5 /* gets current count of active nfs users */ -#define NFS_MOUNTINFO 6 /* gets information about an NFS mount */ +#define NFS_NFSSTATS 1 /* struct: struct nfsstats */ +#define NFS_EXPORTSTATS 3 /* gets exported directory stats */ +#define NFS_USERSTATS 4 /* gets exported directory active user stats */ +#define NFS_USERCOUNT 5 /* gets current count of active nfs users */ +#define NFS_MOUNTINFO 6 /* gets information about an NFS mount */ #ifndef NFS_WDELAYHASHSIZ -#define NFS_WDELAYHASHSIZ 16 /* and with this */ +#define NFS_WDELAYHASHSIZ 16 /* and with this */ #endif #ifdef KERNEL @@ -807,13 +805,13 @@ struct nfs_testmapid { /* kernel debug trace macros */ #define FSDBG(A, B, C, D, E) \ NFS_KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_NONE, \ - (int)(B), (int)(C), (int)(D), (int)(E), 0) + (int)(B), (int)(C), (int)(D), (int)(E), 0) #define FSDBG_TOP(A, B, C, D, E) \ NFS_KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_START, \ - (int)(B), (int)(C), (int)(D), (int)(E), 0) + (int)(B), (int)(C), (int)(D), (int)(E), 0) #define FSDBG_BOT(A, B, C, D, E) \ NFS_KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, (A))) | DBG_FUNC_END, \ - (int)(B), (int)(C), (int)(D), (int)(E), 0) + (int)(B), (int)(C), (int)(D), (int)(E), 0) #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_NFSREQ); @@ -850,8 +848,8 @@ struct nfsrv_uc_arg; * such as SIGALRM will not expect file I/O system calls to be interrupted * by them and break. */ -#define NFSINT_SIGMASK (sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGKILL)| \ - sigmask(SIGHUP)|sigmask(SIGQUIT)) +#define NFSINT_SIGMASK (sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGKILL)| \ + sigmask(SIGHUP)|sigmask(SIGQUIT)) extern size_t nfs_mbuf_mhlen, nfs_mbuf_minclsize; @@ -859,30 +857,30 @@ extern size_t nfs_mbuf_mhlen, nfs_mbuf_minclsize; * NFS mbuf chain structure used for managing the building/dissection of RPCs */ struct nfsm_chain { - mbuf_t nmc_mhead; /* mbuf chain head */ - mbuf_t nmc_mcur; /* current mbuf */ - caddr_t nmc_ptr; /* pointer into current mbuf */ - uint32_t nmc_left; /* bytes remaining in current mbuf */ - uint32_t nmc_flags; /* flags for this nfsm_chain */ + mbuf_t nmc_mhead; /* mbuf chain head */ + mbuf_t nmc_mcur; /* current mbuf */ + caddr_t nmc_ptr; /* pointer into current mbuf */ + uint32_t nmc_left; /* bytes remaining in current mbuf */ + uint32_t nmc_flags; /* flags for this nfsm_chain */ }; -#define NFSM_CHAIN_FLAG_ADD_CLUSTERS 0x1 /* always add mbuf clusters */ +#define NFSM_CHAIN_FLAG_ADD_CLUSTERS 0x1 /* always add mbuf clusters */ /* * Each retransmission of an RPCSEC_GSS request * has an additional sequence number. */ struct gss_seq { - SLIST_ENTRY(gss_seq) gss_seqnext; - uint32_t gss_seqnum; + SLIST_ENTRY(gss_seq) gss_seqnext; + uint32_t gss_seqnum; }; /* * async NFS request callback info */ struct nfsreq_cbinfo { - void (*rcb_func)(struct nfsreq *); /* async request callback function */ - struct nfsbuf *rcb_bp; /* buffer I/O RPC is for */ - uint32_t rcb_args[3]; /* additional callback args */ + void (*rcb_func)(struct nfsreq *); /* async request callback function */ + struct nfsbuf *rcb_bp; /* buffer I/O RPC is for */ + uint32_t rcb_args[3]; /* additional callback args */ }; /* @@ -892,59 +890,59 @@ struct nfsreq_cbinfo { * use any file handle and name provided. */ struct nfsreq_secinfo_args { - nfsnode_t rsia_np; /* the node */ - const char *rsia_name; /* alternate name string */ - u_char *rsia_fh; /* alternate file handle */ - uint32_t rsia_namelen; /* length of string */ - uint32_t rsia_fhsize; /* length of fh */ + nfsnode_t rsia_np; /* the node */ + const char *rsia_name; /* alternate name string */ + u_char *rsia_fh; /* alternate file handle */ + uint32_t rsia_namelen; /* length of string */ + uint32_t rsia_fhsize; /* length of fh */ }; #define NFSREQ_SECINFO_SET(SI, NP, FH, FHSIZE, NAME, NAMELEN) \ do { \ - (SI)->rsia_np = (NP); \ - (SI)->rsia_fh = (FH); \ - (SI)->rsia_fhsize = (FHSIZE); \ - (SI)->rsia_name = (NAME); \ - (SI)->rsia_namelen = (NAMELEN); \ + (SI)->rsia_np = (NP); \ + (SI)->rsia_fh = (FH); \ + (SI)->rsia_fhsize = (FHSIZE); \ + (SI)->rsia_name = (NAME); \ + (SI)->rsia_namelen = (NAMELEN); \ } while (0) /* * NFS outstanding request list element */ struct nfsreq { - lck_mtx_t r_mtx; /* NFS request mutex */ - TAILQ_ENTRY(nfsreq) r_chain; /* request queue chain */ - TAILQ_ENTRY(nfsreq) r_achain; /* mount's async I/O request queue chain */ - TAILQ_ENTRY(nfsreq) r_rchain; /* mount's async I/O resend queue chain */ - TAILQ_ENTRY(nfsreq) r_cchain; /* mount's cwnd queue chain */ - mbuf_t r_mrest; /* request body mbufs */ - mbuf_t r_mhead; /* request header mbufs */ - struct nfsm_chain r_nmrep; /* reply mbufs */ - nfsnode_t r_np; /* NFS node */ - struct nfsmount *r_nmp; /* NFS mount point */ - uint64_t r_xid; /* RPC transaction ID */ - uint32_t r_procnum; /* NFS procedure number */ - uint32_t r_mreqlen; /* request length */ - int r_flags; /* flags on request, see below */ - int r_lflags; /* flags protected by list mutex, see below */ - int r_refs; /* # outstanding references */ - uint8_t r_delay; /* delay to use for jukebox error */ - uint8_t r_retry; /* max retransmission count */ - uint8_t r_rexmit; /* current retrans count */ - int r_rtt; /* RTT for rpc */ - thread_t r_thread; /* thread that did I/O system call */ - kauth_cred_t r_cred; /* credential used for request */ - time_t r_start; /* request start time */ - time_t r_lastmsg; /* time of last tprintf */ - time_t r_resendtime; /* time of next jukebox error resend */ - struct nfs_gss_clnt_ctx *r_gss_ctx; /* RPCSEC_GSS context */ - SLIST_HEAD(, gss_seq) r_gss_seqlist; /* RPCSEC_GSS sequence numbers */ - uint32_t r_gss_argoff; /* RPCSEC_GSS offset to args */ - uint32_t r_gss_arglen; /* RPCSEC_GSS arg length */ - uint32_t r_auth; /* security flavor request sent with */ - uint32_t *r_wrongsec; /* wrongsec: other flavors to try */ - int r_error; /* request error */ - struct nfsreq_cbinfo r_callback; /* callback info */ - struct nfsreq_secinfo_args r_secinfo; /* secinfo args */ + lck_mtx_t r_mtx; /* NFS request mutex */ + TAILQ_ENTRY(nfsreq) r_chain; /* request queue chain */ + TAILQ_ENTRY(nfsreq) r_achain; /* mount's async I/O request queue chain */ + TAILQ_ENTRY(nfsreq) r_rchain; /* mount's async I/O resend queue chain */ + TAILQ_ENTRY(nfsreq) r_cchain; /* mount's cwnd queue chain */ + mbuf_t r_mrest; /* request body mbufs */ + mbuf_t r_mhead; /* request header mbufs */ + struct nfsm_chain r_nmrep; /* reply mbufs */ + nfsnode_t r_np; /* NFS node */ + struct nfsmount *r_nmp; /* NFS mount point */ + uint64_t r_xid; /* RPC transaction ID */ + uint32_t r_procnum; /* NFS procedure number */ + uint32_t r_mreqlen; /* request length */ + int r_flags; /* flags on request, see below */ + int r_lflags; /* flags protected by list mutex, see below */ + int r_refs; /* # outstanding references */ + uint8_t r_delay; /* delay to use for jukebox error */ + uint8_t r_retry; /* max retransmission count */ + uint8_t r_rexmit; /* current retrans count */ + int r_rtt; /* RTT for rpc */ + thread_t r_thread; /* thread that did I/O system call */ + kauth_cred_t r_cred; /* credential used for request */ + time_t r_start; /* request start time */ + time_t r_lastmsg; /* time of last tprintf */ + time_t r_resendtime; /* time of next jukebox error resend */ + struct nfs_gss_clnt_ctx *r_gss_ctx; /* RPCSEC_GSS context */ + SLIST_HEAD(, gss_seq) r_gss_seqlist; /* RPCSEC_GSS sequence numbers */ + uint32_t r_gss_argoff; /* RPCSEC_GSS offset to args */ + uint32_t r_gss_arglen; /* RPCSEC_GSS arg length */ + uint32_t r_auth; /* security flavor request sent with */ + uint32_t *r_wrongsec; /* wrongsec: other flavors to try */ + int r_error; /* request error */ + struct nfsreq_cbinfo r_callback; /* callback info */ + struct nfsreq_secinfo_args r_secinfo; /* secinfo args */ }; /* @@ -954,40 +952,40 @@ TAILQ_HEAD(nfs_reqqhead, nfsreq); extern struct nfs_reqqhead nfs_reqq; extern lck_grp_t *nfs_request_grp; -#define R_XID32(x) ((x) & 0xffffffff) +#define R_XID32(x) ((x) & 0xffffffff) -#define NFSNOLIST ((void *)0x0badcafe) /* sentinel value for nfs lists */ -#define NFSREQNOLIST NFSNOLIST /* sentinel value for nfsreq lists */ +#define NFSNOLIST ((void *)0x0badcafe) /* sentinel value for nfs lists */ +#define NFSREQNOLIST NFSNOLIST /* sentinel value for nfsreq lists */ /* Flag values for r_flags */ -#define R_TIMING 0x00000001 /* timing request (in mntp) */ -#define R_CWND 0x00000002 /* request accounted for in congestion window */ -#define R_SOFTTERM 0x00000004 /* request terminated (e.g. soft mnt) */ -#define R_RESTART 0x00000008 /* RPC should be restarted. */ -#define R_INITTED 0x00000010 /* request has been initialized */ -#define R_TPRINTFMSG 0x00000020 /* Did a tprintf msg. */ -#define R_MUSTRESEND 0x00000040 /* Must resend request */ -#define R_ALLOCATED 0x00000080 /* request was allocated */ -#define R_SENT 0x00000100 /* request has been sent */ -#define R_WAITSENT 0x00000200 /* someone is waiting for request to be sent */ -#define R_RESENDERR 0x00000400 /* resend failed */ -#define R_JBTPRINTFMSG 0x00000800 /* Did a tprintf msg for jukebox error */ -#define R_ASYNC 0x00001000 /* async request */ -#define R_ASYNCWAIT 0x00002000 /* async request now being waited on */ -#define R_RESENDQ 0x00004000 /* async request currently on resendq */ -#define R_SENDING 0x00008000 /* request currently being sent */ -#define R_SOFT 0x00010000 /* request is soft - don't retry or reconnect */ -#define R_IOD 0x00020000 /* request is being managed by an IOD */ - -#define R_NOINTR 0x20000000 /* request should not be interupted by a signal */ -#define R_RECOVER 0x40000000 /* a state recovery RPC - during NFSSTA_RECOVER */ -#define R_SETUP 0x80000000 /* a setup RPC - during (re)connection */ -#define R_OPTMASK 0xe0000000 /* mask of all RPC option flags */ +#define R_TIMING 0x00000001 /* timing request (in mntp) */ +#define R_CWND 0x00000002 /* request accounted for in congestion window */ +#define R_SOFTTERM 0x00000004 /* request terminated (e.g. soft mnt) */ +#define R_RESTART 0x00000008 /* RPC should be restarted. */ +#define R_INITTED 0x00000010 /* request has been initialized */ +#define R_TPRINTFMSG 0x00000020 /* Did a tprintf msg. */ +#define R_MUSTRESEND 0x00000040 /* Must resend request */ +#define R_ALLOCATED 0x00000080 /* request was allocated */ +#define R_SENT 0x00000100 /* request has been sent */ +#define R_WAITSENT 0x00000200 /* someone is waiting for request to be sent */ +#define R_RESENDERR 0x00000400 /* resend failed */ +#define R_JBTPRINTFMSG 0x00000800 /* Did a tprintf msg for jukebox error */ +#define R_ASYNC 0x00001000 /* async request */ +#define R_ASYNCWAIT 0x00002000 /* async request now being waited on */ +#define R_RESENDQ 0x00004000 /* async request currently on resendq */ +#define R_SENDING 0x00008000 /* request currently being sent */ +#define R_SOFT 0x00010000 /* request is soft - don't retry or reconnect */ +#define R_IOD 0x00020000 /* request is being managed by an IOD */ + +#define R_NOINTR 0x20000000 /* request should not be interupted by a signal */ +#define R_RECOVER 0x40000000 /* a state recovery RPC - during NFSSTA_RECOVER */ +#define R_SETUP 0x80000000 /* a setup RPC - during (re)connection */ +#define R_OPTMASK 0xe0000000 /* mask of all RPC option flags */ /* Flag values for r_lflags */ -#define RL_BUSY 0x0001 /* Locked. */ -#define RL_WAITING 0x0002 /* Someone waiting for lock. */ -#define RL_QUEUED 0x0004 /* request is on the queue */ +#define RL_BUSY 0x0001 /* Locked. */ +#define RL_WAITING 0x0002 /* Someone waiting for lock. */ +#define RL_QUEUED 0x0004 /* request is on the queue */ extern u_int32_t nfs_xid, nfs_xidwrap; extern int nfs_iosize, nfs_allow_async, nfs_statfs_rate_limit; @@ -1001,20 +999,20 @@ extern uint32_t nfs_squishy_flags; extern uint32_t nfs_debug_ctl; /* bits for nfs_idmap_ctrl: */ -#define NFS_IDMAP_CTRL_USE_IDMAP_SERVICE 0x00000001 /* use the ID mapping service */ -#define NFS_IDMAP_CTRL_FALLBACK_NO_COMMON_IDS 0x00000002 /* fallback should NOT handle common IDs like "root" and "nobody" */ -#define NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS 0x00000020 /* log failed ID mapping attempts */ -#define NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS 0x00000040 /* log successful ID mapping attempts */ +#define NFS_IDMAP_CTRL_USE_IDMAP_SERVICE 0x00000001 /* use the ID mapping service */ +#define NFS_IDMAP_CTRL_FALLBACK_NO_COMMON_IDS 0x00000002 /* fallback should NOT handle common IDs like "root" and "nobody" */ +#define NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS 0x00000020 /* log failed ID mapping attempts */ +#define NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS 0x00000040 /* log successful ID mapping attempts */ -#define NFSIOD_MAX (MIN(nfsiod_thread_max, NFS_MAXASYNCTHREAD)) +#define NFSIOD_MAX (MIN(nfsiod_thread_max, NFS_MAXASYNCTHREAD)) struct nfs_dulookup { - int du_flags; /* state of ._ lookup */ -#define NFS_DULOOKUP_DOIT 0x1 -#define NFS_DULOOKUP_INPROG 0x2 - struct componentname du_cn; /* ._ name being looked up */ - struct nfsreq du_req; /* NFS request for lookup */ - char du_smallname[48]; /* buffer for small names */ + int du_flags; /* state of ._ lookup */ +#define NFS_DULOOKUP_DOIT 0x1 +#define NFS_DULOOKUP_INPROG 0x2 + struct componentname du_cn; /* ._ name being looked up */ + struct nfsreq du_req; /* NFS request for lookup */ + char du_smallname[48]; /* buffer for small names */ }; /* @@ -1022,46 +1020,46 @@ struct nfs_dulookup { * server is servicing requests on. */ struct nfsrv_sock { - TAILQ_ENTRY(nfsrv_sock) ns_chain; /* List of all nfsrv_sock's */ - TAILQ_ENTRY(nfsrv_sock) ns_svcq; /* List of sockets needing servicing */ - TAILQ_ENTRY(nfsrv_sock) ns_wgq; /* List of sockets with a pending write gather */ - struct nfsrv_uc_arg *ns_ua; /* Opaque pointer to upcall */ - lck_rw_t ns_rwlock; /* lock for most fields */ - socket_t ns_so; - mbuf_t ns_nam; - mbuf_t ns_raw; - mbuf_t ns_rawend; - mbuf_t ns_rec; - mbuf_t ns_recend; - mbuf_t ns_frag; - int ns_flag; - int ns_sotype; - int ns_cc; - int ns_reclen; - int ns_reccnt; - u_int32_t ns_sref; - time_t ns_timestamp; /* socket timestamp */ - lck_mtx_t ns_wgmutex; /* mutex for write gather fields */ - u_quad_t ns_wgtime; /* next Write deadline (usec) */ - LIST_HEAD(, nfsrv_descript) ns_tq; /* Write gather lists */ + TAILQ_ENTRY(nfsrv_sock) ns_chain; /* List of all nfsrv_sock's */ + TAILQ_ENTRY(nfsrv_sock) ns_svcq; /* List of sockets needing servicing */ + TAILQ_ENTRY(nfsrv_sock) ns_wgq; /* List of sockets with a pending write gather */ + struct nfsrv_uc_arg *ns_ua; /* Opaque pointer to upcall */ + lck_rw_t ns_rwlock; /* lock for most fields */ + socket_t ns_so; + mbuf_t ns_nam; + mbuf_t ns_raw; + mbuf_t ns_rawend; + mbuf_t ns_rec; + mbuf_t ns_recend; + mbuf_t ns_frag; + int ns_flag; + int ns_sotype; + int ns_cc; + int ns_reclen; + int ns_reccnt; + u_int32_t ns_sref; + time_t ns_timestamp; /* socket timestamp */ + lck_mtx_t ns_wgmutex; /* mutex for write gather fields */ + u_quad_t ns_wgtime; /* next Write deadline (usec) */ + LIST_HEAD(, nfsrv_descript) ns_tq; /* Write gather lists */ LIST_HEAD(nfsrv_wg_delayhash, nfsrv_descript) ns_wdelayhashtbl[NFS_WDELAYHASHSIZ]; }; /* Bits for "ns_flag" */ -#define SLP_VALID 0x0001 /* nfs sock valid */ -#define SLP_DOREC 0x0002 /* nfs sock has received data to process */ -#define SLP_NEEDQ 0x0004 /* network socket has data to receive */ -#define SLP_DISCONN 0x0008 /* socket needs to be zapped */ -#define SLP_GETSTREAM 0x0010 /* currently in nfsrv_getstream() */ -#define SLP_LASTFRAG 0x0020 /* on last fragment of RPC record */ -#define SLP_DOWRITES 0x0040 /* nfs sock has gathered writes to service */ -#define SLP_WORKTODO 0x004e /* mask of all "work to do" flags */ -#define SLP_ALLFLAGS 0x007f -#define SLP_WAITQ 0x4000 /* nfs sock is on the wait queue */ -#define SLP_WORKQ 0x8000 /* nfs sock is on the work queue */ -#define SLP_QUEUED 0xc000 /* nfs sock is on a queue */ - -#define SLPNOLIST ((struct nfsrv_sock *)0xdeadbeef) /* sentinel value for sockets not in the nfsrv_sockwg list */ +#define SLP_VALID 0x0001 /* nfs sock valid */ +#define SLP_DOREC 0x0002 /* nfs sock has received data to process */ +#define SLP_NEEDQ 0x0004 /* network socket has data to receive */ +#define SLP_DISCONN 0x0008 /* socket needs to be zapped */ +#define SLP_GETSTREAM 0x0010 /* currently in nfsrv_getstream() */ +#define SLP_LASTFRAG 0x0020 /* on last fragment of RPC record */ +#define SLP_DOWRITES 0x0040 /* nfs sock has gathered writes to service */ +#define SLP_WORKTODO 0x004e /* mask of all "work to do" flags */ +#define SLP_ALLFLAGS 0x007f +#define SLP_WAITQ 0x4000 /* nfs sock is on the wait queue */ +#define SLP_WORKQ 0x8000 /* nfs sock is on the work queue */ +#define SLP_QUEUED 0xc000 /* nfs sock is on a queue */ + +#define SLPNOLIST ((struct nfsrv_sock *)0xdeadbeef) /* sentinel value for sockets not in the nfsrv_sockwg list */ extern struct nfsrv_sock *nfsrv_udpsock, *nfsrv_udp6sock; @@ -1074,7 +1072,7 @@ extern struct nfsrv_sock *nfsrv_udpsock, *nfsrv_udp6sock; * nfsrv_sockwg - sockets with pending write gather input (ns_wgq) */ extern TAILQ_HEAD(nfsrv_sockhead, nfsrv_sock) nfsrv_socklist, nfsrv_sockwg, - nfsrv_sockwait, nfsrv_sockwork; +nfsrv_sockwait, nfsrv_sockwork; /* lock groups for nfsrv_sock's */ extern lck_grp_t *nfsrv_slp_rwlock_group; @@ -1084,51 +1082,51 @@ extern lck_grp_t *nfsrv_slp_mutex_group; * One of these structures is allocated for each nfsd. */ struct nfsd { - TAILQ_ENTRY(nfsd) nfsd_chain; /* List of all nfsd's */ - TAILQ_ENTRY(nfsd) nfsd_queue; /* List of waiting nfsd's */ - int nfsd_flag; /* NFSD_ flags */ - struct nfsrv_sock *nfsd_slp; /* Current socket */ - struct nfsrv_descript *nfsd_nd; /* Associated nfsrv_descript */ + TAILQ_ENTRY(nfsd) nfsd_chain; /* List of all nfsd's */ + TAILQ_ENTRY(nfsd) nfsd_queue; /* List of waiting nfsd's */ + int nfsd_flag; /* NFSD_ flags */ + struct nfsrv_sock *nfsd_slp; /* Current socket */ + struct nfsrv_descript *nfsd_nd; /* Associated nfsrv_descript */ }; /* Bits for "nfsd_flag" */ -#define NFSD_WAITING 0x01 -#define NFSD_REQINPROG 0x02 +#define NFSD_WAITING 0x01 +#define NFSD_REQINPROG 0x02 /* * This structure is used by the server for describing each request. * Some fields are used only when write request gathering is performed. */ struct nfsrv_descript { - u_quad_t nd_time; /* Write deadline (usec) */ - off_t nd_off; /* Start byte offset */ - off_t nd_eoff; /* and end byte offset */ - LIST_ENTRY(nfsrv_descript) nd_hash; /* Hash list */ - LIST_ENTRY(nfsrv_descript) nd_tq; /* and timer list */ - LIST_HEAD(,nfsrv_descript) nd_coalesce; /* coalesced writes */ - struct nfsm_chain nd_nmreq; /* Request mbuf chain */ - mbuf_t nd_mrep; /* Reply mbuf list (WG) */ - mbuf_t nd_nam; /* and socket addr */ - mbuf_t nd_nam2; /* return socket addr */ - u_int32_t nd_procnum; /* RPC # */ - int nd_stable; /* storage type */ - int nd_vers; /* NFS version */ - int nd_len; /* Length of this write */ - int nd_repstat; /* Reply status */ - u_int32_t nd_retxid; /* Reply xid */ - struct timeval nd_starttime; /* Time RPC initiated */ - struct nfs_filehandle nd_fh; /* File handle */ - uint32_t nd_sec; /* Security flavor */ - struct nfs_gss_svc_ctx *nd_gss_context;/* RPCSEC_GSS context */ - uint32_t nd_gss_seqnum; /* RPCSEC_GSS seq num */ - mbuf_t nd_gss_mb; /* RPCSEC_GSS results mbuf */ - kauth_cred_t nd_cr; /* Credentials */ + u_quad_t nd_time; /* Write deadline (usec) */ + off_t nd_off; /* Start byte offset */ + off_t nd_eoff; /* and end byte offset */ + LIST_ENTRY(nfsrv_descript) nd_hash; /* Hash list */ + LIST_ENTRY(nfsrv_descript) nd_tq; /* and timer list */ + LIST_HEAD(, nfsrv_descript) nd_coalesce; /* coalesced writes */ + struct nfsm_chain nd_nmreq; /* Request mbuf chain */ + mbuf_t nd_mrep; /* Reply mbuf list (WG) */ + mbuf_t nd_nam; /* and socket addr */ + mbuf_t nd_nam2; /* return socket addr */ + u_int32_t nd_procnum; /* RPC # */ + int nd_stable; /* storage type */ + int nd_vers; /* NFS version */ + int nd_len; /* Length of this write */ + int nd_repstat; /* Reply status */ + u_int32_t nd_retxid; /* Reply xid */ + struct timeval nd_starttime; /* Time RPC initiated */ + struct nfs_filehandle nd_fh; /* File handle */ + uint32_t nd_sec; /* Security flavor */ + struct nfs_gss_svc_ctx *nd_gss_context;/* RPCSEC_GSS context */ + uint32_t nd_gss_seqnum; /* RPCSEC_GSS seq num */ + mbuf_t nd_gss_mb; /* RPCSEC_GSS results mbuf */ + kauth_cred_t nd_cr; /* Credentials */ }; extern TAILQ_HEAD(nfsd_head, nfsd) nfsd_head, nfsd_queue; typedef int (*nfsrv_proc_t)(struct nfsrv_descript *, struct nfsrv_sock *, - vfs_context_t, mbuf_t *); + vfs_context_t, mbuf_t *); /* mutex for nfs server */ extern lck_mtx_t *nfsd_mutex; @@ -1146,12 +1144,12 @@ extern int nfs4_callback_timer_on; extern in_port_t nfs4_cb_port, nfs4_cb_port6; /* nfs timer call structures */ -extern thread_call_t nfs_request_timer_call; -extern thread_call_t nfs_buf_timer_call; -extern thread_call_t nfs4_callback_timer_call; -extern thread_call_t nfsrv_idlesock_timer_call; +extern thread_call_t nfs_request_timer_call; +extern thread_call_t nfs_buf_timer_call; +extern thread_call_t nfs4_callback_timer_call; +extern thread_call_t nfsrv_idlesock_timer_call; #if CONFIG_FSE -extern thread_call_t nfsrv_fmod_timer_call; +extern thread_call_t nfsrv_fmod_timer_call; #endif /* nfs 4 default domain for user mapping */ @@ -1159,105 +1157,105 @@ extern char nfs4_default_domain[MAXPATHLEN]; __BEGIN_DECLS -nfstype vtonfs_type(enum vtype, int); +nfstype vtonfs_type(enum vtype, int); enum vtype nfstov_type(nfstype, int); -int vtonfsv2_mode(enum vtype, mode_t); - -void nfs_mbuf_init(void); - -void nfs_nhinit(void); -void nfs_nhinit_finish(void); -u_long nfs_hash(u_char *, int); - -int nfs4_init_clientid(struct nfsmount *); -int nfs4_setclientid(struct nfsmount *); -int nfs4_renew(struct nfsmount *, int); -void nfs4_renew_timer(void *, void *); -void nfs4_mount_callback_setup(struct nfsmount *); -void nfs4_mount_callback_shutdown(struct nfsmount *); -void nfs4_cb_accept(socket_t, void *, int); -void nfs4_cb_rcv(socket_t, void *, int); -void nfs4_callback_timer(void *, void *); -int nfs4_secinfo_rpc(struct nfsmount *, struct nfsreq_secinfo_args *, kauth_cred_t, uint32_t *, int *); -int nfs4_get_fs_locations(struct nfsmount *, nfsnode_t, u_char *, int, const char *, vfs_context_t, struct nfs_fs_locations *); -void nfs_fs_locations_cleanup(struct nfs_fs_locations *); -void nfs4_default_attrs_for_referral_trigger(nfsnode_t, char *, int, struct nfs_vattr *, fhandle_t *); - -int nfs_sockaddr_cmp(struct sockaddr *, struct sockaddr *); -int nfs_connect(struct nfsmount *, int, int); -void nfs_disconnect(struct nfsmount *); -void nfs_need_reconnect(struct nfsmount *); -void nfs_mount_sock_thread_wake(struct nfsmount *); -int nfs_mount_check_dead_timeout(struct nfsmount *); -int nfs_mount_gone(struct nfsmount *); -void nfs_mount_rele(struct nfsmount *); -void nfs_mount_zombie(struct nfsmount *, int); -void nfs_mount_make_zombie(struct nfsmount *); - -void nfs_rpc_record_state_init(struct nfs_rpc_record_state *); -void nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *); -int nfs_rpc_record_read(socket_t, struct nfs_rpc_record_state *, int, int *, mbuf_t *); - -int nfs_getattr(nfsnode_t, struct nfs_vattr *, vfs_context_t, int); -int nfs_getattrcache(nfsnode_t, struct nfs_vattr *, int); -int nfs_loadattrcache(nfsnode_t, struct nfs_vattr *, u_int64_t *, int); -int nfs_attrcachetimeout(nfsnode_t); - -int nfs_buf_page_inval(vnode_t vp, off_t offset); -int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int); -int nfs_vinvalbuf2(vnode_t, int, thread_t, kauth_cred_t, int); -int nfs_vinvalbuf_internal(nfsnode_t, int, thread_t, kauth_cred_t, int, int); -void nfs_wait_bufs(nfsnode_t); - -int nfs_request_create(nfsnode_t, mount_t, struct nfsm_chain *, int, thread_t, kauth_cred_t, struct nfsreq **); -void nfs_request_destroy(struct nfsreq *); -void nfs_request_ref(struct nfsreq *, int); -void nfs_request_rele(struct nfsreq *); -int nfs_request_add_header(struct nfsreq *); -int nfs_request_send(struct nfsreq *, int); -void nfs_request_wait(struct nfsreq *); -int nfs_request_finish(struct nfsreq *, struct nfsm_chain *, int *); -int nfs_request(nfsnode_t, mount_t, struct nfsm_chain *, int, vfs_context_t, struct nfsreq_secinfo_args *, struct nfsm_chain *, u_int64_t *, int *); -int nfs_request2(nfsnode_t, mount_t, struct nfsm_chain *, int, thread_t, kauth_cred_t, struct nfsreq_secinfo_args *, int, struct nfsm_chain *, u_int64_t *, int *); -int nfs_request_gss(mount_t, struct nfsm_chain *, thread_t, kauth_cred_t, int, struct nfs_gss_clnt_ctx *, struct nfsm_chain *, int *); -int nfs_request_async(nfsnode_t, mount_t, struct nfsm_chain *, int, thread_t, kauth_cred_t, struct nfsreq_secinfo_args *, int, struct nfsreq_cbinfo *, struct nfsreq **); -int nfs_request_async_finish(struct nfsreq *, struct nfsm_chain *, u_int64_t *, int *); -void nfs_request_async_cancel(struct nfsreq *); -void nfs_request_timer(void *, void *); -int nfs_request_using_gss(struct nfsreq *); -void nfs_get_xid(uint64_t *); -int nfs_sigintr(struct nfsmount *, struct nfsreq *, thread_t, int); -int nfs_noremotehang(thread_t); - -int nfs_send(struct nfsreq *, int); -int nfs_sndlock(struct nfsreq *); -void nfs_sndunlock(struct nfsreq *); - -int nfs_uaddr2sockaddr(const char *, struct sockaddr *); - -int nfs_aux_request(struct nfsmount *, thread_t, struct sockaddr *, socket_t, int, mbuf_t, uint32_t, int, int, struct nfsm_chain *); -int nfs_portmap_lookup(struct nfsmount *, vfs_context_t, struct sockaddr *, socket_t, uint32_t, uint32_t, uint32_t, int); - -void nfs_location_next(struct nfs_fs_locations *, struct nfs_location_index *); -int nfs_location_index_cmp(struct nfs_location_index *, struct nfs_location_index *); -void nfs_location_mntfromname(struct nfs_fs_locations *, struct nfs_location_index, char *, int, int); -int nfs_socket_create(struct nfsmount *, struct sockaddr *, int, in_port_t, uint32_t, uint32_t, int, struct nfs_socket **); -void nfs_socket_destroy(struct nfs_socket *); -void nfs_socket_options(struct nfsmount *, struct nfs_socket *); -void nfs_connect_upcall(socket_t, void *, int); -int nfs_connect_error_class(int); -int nfs_connect_search_loop(struct nfsmount *, struct nfs_socket_search *); -void nfs_socket_search_update_error(struct nfs_socket_search *, int); -void nfs_socket_search_cleanup(struct nfs_socket_search *); -void nfs_mount_connect_thread(void *, __unused wait_result_t); - -int nfs_lookitup(nfsnode_t, char *, int, vfs_context_t, nfsnode_t *); -void nfs_dulookup_init(struct nfs_dulookup *, nfsnode_t, const char *, int, vfs_context_t); -void nfs_dulookup_start(struct nfs_dulookup *, nfsnode_t, vfs_context_t); -void nfs_dulookup_finish(struct nfs_dulookup *, nfsnode_t, vfs_context_t); -int nfs_dir_buf_cache_lookup(nfsnode_t, nfsnode_t *, struct componentname *, vfs_context_t, int); -int nfs_dir_buf_search(struct nfsbuf *, struct componentname *, fhandle_t *, struct nfs_vattr *, uint64_t *, time_t *, daddr64_t *, int); -void nfs_name_cache_purge(nfsnode_t, nfsnode_t, struct componentname *, vfs_context_t); +int vtonfsv2_mode(enum vtype, mode_t); + +void nfs_mbuf_init(void); + +void nfs_nhinit(void); +void nfs_nhinit_finish(void); +u_long nfs_hash(u_char *, int); + +int nfs4_init_clientid(struct nfsmount *); +int nfs4_setclientid(struct nfsmount *); +int nfs4_renew(struct nfsmount *, int); +void nfs4_renew_timer(void *, void *); +void nfs4_mount_callback_setup(struct nfsmount *); +void nfs4_mount_callback_shutdown(struct nfsmount *); +void nfs4_cb_accept(socket_t, void *, int); +void nfs4_cb_rcv(socket_t, void *, int); +void nfs4_callback_timer(void *, void *); +int nfs4_secinfo_rpc(struct nfsmount *, struct nfsreq_secinfo_args *, kauth_cred_t, uint32_t *, int *); +int nfs4_get_fs_locations(struct nfsmount *, nfsnode_t, u_char *, int, const char *, vfs_context_t, struct nfs_fs_locations *); +void nfs_fs_locations_cleanup(struct nfs_fs_locations *); +void nfs4_default_attrs_for_referral_trigger(nfsnode_t, char *, int, struct nfs_vattr *, fhandle_t *); + +int nfs_sockaddr_cmp(struct sockaddr *, struct sockaddr *); +int nfs_connect(struct nfsmount *, int, int); +void nfs_disconnect(struct nfsmount *); +void nfs_need_reconnect(struct nfsmount *); +void nfs_mount_sock_thread_wake(struct nfsmount *); +int nfs_mount_check_dead_timeout(struct nfsmount *); +int nfs_mount_gone(struct nfsmount *); +void nfs_mount_rele(struct nfsmount *); +void nfs_mount_zombie(struct nfsmount *, int); +void nfs_mount_make_zombie(struct nfsmount *); + +void nfs_rpc_record_state_init(struct nfs_rpc_record_state *); +void nfs_rpc_record_state_cleanup(struct nfs_rpc_record_state *); +int nfs_rpc_record_read(socket_t, struct nfs_rpc_record_state *, int, int *, mbuf_t *); + +int nfs_getattr(nfsnode_t, struct nfs_vattr *, vfs_context_t, int); +int nfs_getattrcache(nfsnode_t, struct nfs_vattr *, int); +int nfs_loadattrcache(nfsnode_t, struct nfs_vattr *, u_int64_t *, int); +int nfs_attrcachetimeout(nfsnode_t); + +int nfs_buf_page_inval(vnode_t vp, off_t offset); +int nfs_vinvalbuf(vnode_t, int, vfs_context_t, int); +int nfs_vinvalbuf2(vnode_t, int, thread_t, kauth_cred_t, int); +int nfs_vinvalbuf_internal(nfsnode_t, int, thread_t, kauth_cred_t, int, int); +void nfs_wait_bufs(nfsnode_t); + +int nfs_request_create(nfsnode_t, mount_t, struct nfsm_chain *, int, thread_t, kauth_cred_t, struct nfsreq **); +void nfs_request_destroy(struct nfsreq *); +void nfs_request_ref(struct nfsreq *, int); +void nfs_request_rele(struct nfsreq *); +int nfs_request_add_header(struct nfsreq *); +int nfs_request_send(struct nfsreq *, int); +void nfs_request_wait(struct nfsreq *); +int nfs_request_finish(struct nfsreq *, struct nfsm_chain *, int *); +int nfs_request(nfsnode_t, mount_t, struct nfsm_chain *, int, vfs_context_t, struct nfsreq_secinfo_args *, struct nfsm_chain *, u_int64_t *, int *); +int nfs_request2(nfsnode_t, mount_t, struct nfsm_chain *, int, thread_t, kauth_cred_t, struct nfsreq_secinfo_args *, int, struct nfsm_chain *, u_int64_t *, int *); +int nfs_request_gss(mount_t, struct nfsm_chain *, thread_t, kauth_cred_t, int, struct nfs_gss_clnt_ctx *, struct nfsm_chain *, int *); +int nfs_request_async(nfsnode_t, mount_t, struct nfsm_chain *, int, thread_t, kauth_cred_t, struct nfsreq_secinfo_args *, int, struct nfsreq_cbinfo *, struct nfsreq **); +int nfs_request_async_finish(struct nfsreq *, struct nfsm_chain *, u_int64_t *, int *); +void nfs_request_async_cancel(struct nfsreq *); +void nfs_request_timer(void *, void *); +int nfs_request_using_gss(struct nfsreq *); +void nfs_get_xid(uint64_t *); +int nfs_sigintr(struct nfsmount *, struct nfsreq *, thread_t, int); +int nfs_noremotehang(thread_t); + +int nfs_send(struct nfsreq *, int); +int nfs_sndlock(struct nfsreq *); +void nfs_sndunlock(struct nfsreq *); + +int nfs_uaddr2sockaddr(const char *, struct sockaddr *); + +int nfs_aux_request(struct nfsmount *, thread_t, struct sockaddr *, socket_t, int, mbuf_t, uint32_t, int, int, struct nfsm_chain *); +int nfs_portmap_lookup(struct nfsmount *, vfs_context_t, struct sockaddr *, socket_t, uint32_t, uint32_t, uint32_t, int); + +void nfs_location_next(struct nfs_fs_locations *, struct nfs_location_index *); +int nfs_location_index_cmp(struct nfs_location_index *, struct nfs_location_index *); +void nfs_location_mntfromname(struct nfs_fs_locations *, struct nfs_location_index, char *, int, int); +int nfs_socket_create(struct nfsmount *, struct sockaddr *, int, in_port_t, uint32_t, uint32_t, int, struct nfs_socket **); +void nfs_socket_destroy(struct nfs_socket *); +void nfs_socket_options(struct nfsmount *, struct nfs_socket *); +void nfs_connect_upcall(socket_t, void *, int); +int nfs_connect_error_class(int); +int nfs_connect_search_loop(struct nfsmount *, struct nfs_socket_search *); +void nfs_socket_search_update_error(struct nfs_socket_search *, int); +void nfs_socket_search_cleanup(struct nfs_socket_search *); +void nfs_mount_connect_thread(void *, __unused wait_result_t); + +int nfs_lookitup(nfsnode_t, char *, int, vfs_context_t, nfsnode_t *); +void nfs_dulookup_init(struct nfs_dulookup *, nfsnode_t, const char *, int, vfs_context_t); +void nfs_dulookup_start(struct nfs_dulookup *, nfsnode_t, vfs_context_t); +void nfs_dulookup_finish(struct nfs_dulookup *, nfsnode_t, vfs_context_t); +int nfs_dir_buf_cache_lookup(nfsnode_t, nfsnode_t *, struct componentname *, vfs_context_t, int); +int nfs_dir_buf_search(struct nfsbuf *, struct componentname *, fhandle_t *, struct nfs_vattr *, uint64_t *, time_t *, daddr64_t *, int); +void nfs_name_cache_purge(nfsnode_t, nfsnode_t, struct componentname *, vfs_context_t); uint32_t nfs4_ace_nfstype_to_vfstype(uint32_t, int *); uint32_t nfs4_ace_vfstype_to_nfstype(uint32_t, int *); @@ -1268,233 +1266,233 @@ uint32_t nfs4_ace_vfsrights_to_nfsmask(uint32_t); int nfs4_id2guid(char *, guid_t *, int); int nfs4_guid2id(guid_t *, char *, size_t *, int); -int nfs_parsefattr(struct nfsm_chain *, int, struct nfs_vattr *); -int nfs4_parsefattr(struct nfsm_chain *, struct nfs_fsattr *, struct nfs_vattr *, fhandle_t *, struct dqblk *, struct nfs_fs_locations *); -void nfs_vattr_set_supported(uint32_t *, struct vnode_attr *); -void nfs_vattr_set_bitmap(struct nfsmount *, uint32_t *, struct vnode_attr *); -void nfs3_pathconf_cache(struct nfsmount *, struct nfs_fsattr *); -int nfs3_mount_rpc(struct nfsmount *, struct sockaddr *, int, int, char *, vfs_context_t, int, fhandle_t *, struct nfs_sec *); -void nfs3_umount_rpc(struct nfsmount *, vfs_context_t, int); -int nfs_node_access_slot(nfsnode_t, uid_t, int); -void nfs_vnode_notify(nfsnode_t, uint32_t); - -void nfs_avoid_needless_id_setting_on_create(nfsnode_t, struct vnode_attr *, vfs_context_t); -int nfs4_create_rpc(vfs_context_t, nfsnode_t, struct componentname *, struct vnode_attr *, int, char *, nfsnode_t *); -int nfs_open_state_set_busy(nfsnode_t, thread_t); -void nfs_open_state_clear_busy(nfsnode_t); +int nfs_parsefattr(struct nfsm_chain *, int, struct nfs_vattr *); +int nfs4_parsefattr(struct nfsm_chain *, struct nfs_fsattr *, struct nfs_vattr *, fhandle_t *, struct dqblk *, struct nfs_fs_locations *); +void nfs_vattr_set_supported(uint32_t *, struct vnode_attr *); +void nfs_vattr_set_bitmap(struct nfsmount *, uint32_t *, struct vnode_attr *); +void nfs3_pathconf_cache(struct nfsmount *, struct nfs_fsattr *); +int nfs3_mount_rpc(struct nfsmount *, struct sockaddr *, int, int, char *, vfs_context_t, int, fhandle_t *, struct nfs_sec *); +void nfs3_umount_rpc(struct nfsmount *, vfs_context_t, int); +int nfs_node_access_slot(nfsnode_t, uid_t, int); +void nfs_vnode_notify(nfsnode_t, uint32_t); + +void nfs_avoid_needless_id_setting_on_create(nfsnode_t, struct vnode_attr *, vfs_context_t); +int nfs4_create_rpc(vfs_context_t, nfsnode_t, struct componentname *, struct vnode_attr *, int, char *, nfsnode_t *); +int nfs_open_state_set_busy(nfsnode_t, thread_t); +void nfs_open_state_clear_busy(nfsnode_t); struct nfs_open_owner *nfs_open_owner_find(struct nfsmount *, kauth_cred_t, int); -void nfs_open_owner_destroy(struct nfs_open_owner *); -void nfs_open_owner_ref(struct nfs_open_owner *); -void nfs_open_owner_rele(struct nfs_open_owner *); -int nfs_open_owner_set_busy(struct nfs_open_owner *, thread_t); -void nfs_open_owner_clear_busy(struct nfs_open_owner *); -void nfs_owner_seqid_increment(struct nfs_open_owner *, struct nfs_lock_owner *, int); -int nfs_open_file_find(nfsnode_t, struct nfs_open_owner *, struct nfs_open_file **, uint32_t, uint32_t, int); -int nfs_open_file_find_internal(nfsnode_t, struct nfs_open_owner *, struct nfs_open_file **, uint32_t, uint32_t, int); -void nfs_open_file_destroy(struct nfs_open_file *); -int nfs_open_file_set_busy(struct nfs_open_file *, thread_t); -void nfs_open_file_clear_busy(struct nfs_open_file *); -void nfs_open_file_add_open(struct nfs_open_file *, uint32_t, uint32_t, int); -void nfs_open_file_remove_open_find(struct nfs_open_file *, uint32_t, uint32_t, uint32_t *, uint32_t *, int*); -void nfs_open_file_remove_open(struct nfs_open_file *, uint32_t, uint32_t); -void nfs_get_stateid(nfsnode_t, thread_t, kauth_cred_t, nfs_stateid *); -int nfs4_open(nfsnode_t, struct nfs_open_file *, uint32_t, uint32_t, vfs_context_t); -int nfs4_open_delegated(nfsnode_t, struct nfs_open_file *, uint32_t, uint32_t, vfs_context_t); -int nfs_close(nfsnode_t, struct nfs_open_file *, uint32_t, uint32_t, vfs_context_t); -int nfs_check_for_locks(struct nfs_open_owner *, struct nfs_open_file *); -int nfs4_reopen(struct nfs_open_file *, thread_t); -int nfs4_open_rpc(struct nfs_open_file *, vfs_context_t, struct componentname *, struct vnode_attr *, vnode_t, vnode_t *, int, int, int); -int nfs4_open_rpc_internal(struct nfs_open_file *, vfs_context_t, thread_t, kauth_cred_t, struct componentname *, struct vnode_attr *, vnode_t, vnode_t *, int, int, int); -int nfs4_open_confirm_rpc(struct nfsmount *, nfsnode_t, u_char *, int, struct nfs_open_owner *, nfs_stateid *, thread_t, kauth_cred_t, struct nfs_vattr *, uint64_t *); -int nfs4_open_reopen_rpc(struct nfs_open_file *, thread_t, kauth_cred_t, struct componentname *, vnode_t, vnode_t *, int, int); -int nfs4_open_reclaim_rpc(struct nfs_open_file *, int, int); -int nfs4_claim_delegated_open_rpc(struct nfs_open_file *, int, int, int); -int nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *, int); -int nfs4_claim_delegated_state_for_node(nfsnode_t, int); -int nfs4_open_downgrade_rpc(nfsnode_t, struct nfs_open_file *, vfs_context_t); -int nfs4_close_rpc(nfsnode_t, struct nfs_open_file *, thread_t, kauth_cred_t, int); -void nfs4_delegation_return_enqueue(nfsnode_t); -int nfs4_delegation_return(nfsnode_t, int, thread_t, kauth_cred_t); -int nfs4_delegreturn_rpc(struct nfsmount *, u_char *, int, struct nfs_stateid *, int, thread_t, kauth_cred_t); -void nfs_release_open_state_for_node(nfsnode_t, int); -void nfs_revoke_open_state_for_node(nfsnode_t); +void nfs_open_owner_destroy(struct nfs_open_owner *); +void nfs_open_owner_ref(struct nfs_open_owner *); +void nfs_open_owner_rele(struct nfs_open_owner *); +int nfs_open_owner_set_busy(struct nfs_open_owner *, thread_t); +void nfs_open_owner_clear_busy(struct nfs_open_owner *); +void nfs_owner_seqid_increment(struct nfs_open_owner *, struct nfs_lock_owner *, int); +int nfs_open_file_find(nfsnode_t, struct nfs_open_owner *, struct nfs_open_file **, uint32_t, uint32_t, int); +int nfs_open_file_find_internal(nfsnode_t, struct nfs_open_owner *, struct nfs_open_file **, uint32_t, uint32_t, int); +void nfs_open_file_destroy(struct nfs_open_file *); +int nfs_open_file_set_busy(struct nfs_open_file *, thread_t); +void nfs_open_file_clear_busy(struct nfs_open_file *); +void nfs_open_file_add_open(struct nfs_open_file *, uint32_t, uint32_t, int); +void nfs_open_file_remove_open_find(struct nfs_open_file *, uint32_t, uint32_t, uint32_t *, uint32_t *, int*); +void nfs_open_file_remove_open(struct nfs_open_file *, uint32_t, uint32_t); +void nfs_get_stateid(nfsnode_t, thread_t, kauth_cred_t, nfs_stateid *); +int nfs4_open(nfsnode_t, struct nfs_open_file *, uint32_t, uint32_t, vfs_context_t); +int nfs4_open_delegated(nfsnode_t, struct nfs_open_file *, uint32_t, uint32_t, vfs_context_t); +int nfs_close(nfsnode_t, struct nfs_open_file *, uint32_t, uint32_t, vfs_context_t); +int nfs_check_for_locks(struct nfs_open_owner *, struct nfs_open_file *); +int nfs4_reopen(struct nfs_open_file *, thread_t); +int nfs4_open_rpc(struct nfs_open_file *, vfs_context_t, struct componentname *, struct vnode_attr *, vnode_t, vnode_t *, int, int, int); +int nfs4_open_rpc_internal(struct nfs_open_file *, vfs_context_t, thread_t, kauth_cred_t, struct componentname *, struct vnode_attr *, vnode_t, vnode_t *, int, int, int); +int nfs4_open_confirm_rpc(struct nfsmount *, nfsnode_t, u_char *, int, struct nfs_open_owner *, nfs_stateid *, thread_t, kauth_cred_t, struct nfs_vattr *, uint64_t *); +int nfs4_open_reopen_rpc(struct nfs_open_file *, thread_t, kauth_cred_t, struct componentname *, vnode_t, vnode_t *, int, int); +int nfs4_open_reclaim_rpc(struct nfs_open_file *, int, int); +int nfs4_claim_delegated_open_rpc(struct nfs_open_file *, int, int, int); +int nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *, int); +int nfs4_claim_delegated_state_for_node(nfsnode_t, int); +int nfs4_open_downgrade_rpc(nfsnode_t, struct nfs_open_file *, vfs_context_t); +int nfs4_close_rpc(nfsnode_t, struct nfs_open_file *, thread_t, kauth_cred_t, int); +void nfs4_delegation_return_enqueue(nfsnode_t); +int nfs4_delegation_return(nfsnode_t, int, thread_t, kauth_cred_t); +int nfs4_delegreturn_rpc(struct nfsmount *, u_char *, int, struct nfs_stateid *, int, thread_t, kauth_cred_t); +void nfs_release_open_state_for_node(nfsnode_t, int); +void nfs_revoke_open_state_for_node(nfsnode_t); struct nfs_lock_owner *nfs_lock_owner_find(nfsnode_t, proc_t, int); -void nfs_lock_owner_destroy(struct nfs_lock_owner *); -void nfs_lock_owner_ref(struct nfs_lock_owner *); -void nfs_lock_owner_rele(struct nfs_lock_owner *); -int nfs_lock_owner_set_busy(struct nfs_lock_owner *, thread_t); -void nfs_lock_owner_clear_busy(struct nfs_lock_owner *); -void nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *, struct nfs_file_lock *); +void nfs_lock_owner_destroy(struct nfs_lock_owner *); +void nfs_lock_owner_ref(struct nfs_lock_owner *); +void nfs_lock_owner_rele(struct nfs_lock_owner *); +int nfs_lock_owner_set_busy(struct nfs_lock_owner *, thread_t); +void nfs_lock_owner_clear_busy(struct nfs_lock_owner *); +void nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *, struct nfs_file_lock *); struct nfs_file_lock *nfs_file_lock_alloc(struct nfs_lock_owner *); -void nfs_file_lock_destroy(struct nfs_file_lock *); -int nfs_file_lock_conflict(struct nfs_file_lock *, struct nfs_file_lock *, int *); -int nfs4_lock_rpc(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); -int nfs_unlock_rpc(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, thread_t, kauth_cred_t, int); -int nfs_advlock_getlock(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); -int nfs_advlock_setlock(nfsnode_t, struct nfs_open_file *, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, short, vfs_context_t); -int nfs_advlock_unlock(nfsnode_t, struct nfs_open_file *, struct nfs_lock_owner *, uint64_t, uint64_t, int, vfs_context_t); +void nfs_file_lock_destroy(struct nfs_file_lock *); +int nfs_file_lock_conflict(struct nfs_file_lock *, struct nfs_file_lock *, int *); +int nfs4_lock_rpc(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); +int nfs_unlock_rpc(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, thread_t, kauth_cred_t, int); +int nfs_advlock_getlock(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); +int nfs_advlock_setlock(nfsnode_t, struct nfs_open_file *, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, short, vfs_context_t); +int nfs_advlock_unlock(nfsnode_t, struct nfs_open_file *, struct nfs_lock_owner *, uint64_t, uint64_t, int, vfs_context_t); nfsnode_t nfs4_named_attr_dir_get(nfsnode_t, int, vfs_context_t); -int nfs4_named_attr_get(nfsnode_t, struct componentname *, uint32_t, int, vfs_context_t, nfsnode_t *, struct nfs_open_file **); -int nfs4_named_attr_remove(nfsnode_t, nfsnode_t, const char *, vfs_context_t); - -int nfs_mount_state_in_use_start(struct nfsmount *, thread_t); -int nfs_mount_state_in_use_end(struct nfsmount *, int); -int nfs_mount_state_error_should_restart(int); -int nfs_mount_state_error_delegation_lost(int); -uint nfs_mount_state_max_restarts(struct nfsmount *); -int nfs_mount_state_wait_for_recovery(struct nfsmount *); -void nfs_need_recover(struct nfsmount *nmp, int error); -void nfs_recover(struct nfsmount *); - -int nfs_vnop_access(struct vnop_access_args *); -int nfs_vnop_remove(struct vnop_remove_args *); -int nfs_vnop_read(struct vnop_read_args *); -int nfs_vnop_write(struct vnop_write_args *); -int nfs_vnop_open(struct vnop_open_args *); -int nfs_vnop_close(struct vnop_close_args *); -int nfs_vnop_advlock(struct vnop_advlock_args *); -int nfs_vnop_mmap(struct vnop_mmap_args *); -int nfs_vnop_mnomap(struct vnop_mnomap_args *); - -int nfs4_vnop_create(struct vnop_create_args *); -int nfs4_vnop_mknod(struct vnop_mknod_args *); -int nfs4_vnop_close(struct vnop_close_args *); -int nfs4_vnop_getattr(struct vnop_getattr_args *); -int nfs4_vnop_link(struct vnop_link_args *); -int nfs4_vnop_mkdir(struct vnop_mkdir_args *); -int nfs4_vnop_rmdir(struct vnop_rmdir_args *); -int nfs4_vnop_symlink(struct vnop_symlink_args *); -int nfs4_vnop_getxattr(struct vnop_getxattr_args *); -int nfs4_vnop_setxattr(struct vnop_setxattr_args *); -int nfs4_vnop_removexattr(struct vnop_removexattr_args *); -int nfs4_vnop_listxattr(struct vnop_listxattr_args *); +int nfs4_named_attr_get(nfsnode_t, struct componentname *, uint32_t, int, vfs_context_t, nfsnode_t *, struct nfs_open_file **); +int nfs4_named_attr_remove(nfsnode_t, nfsnode_t, const char *, vfs_context_t); + +int nfs_mount_state_in_use_start(struct nfsmount *, thread_t); +int nfs_mount_state_in_use_end(struct nfsmount *, int); +int nfs_mount_state_error_should_restart(int); +int nfs_mount_state_error_delegation_lost(int); +uint nfs_mount_state_max_restarts(struct nfsmount *); +int nfs_mount_state_wait_for_recovery(struct nfsmount *); +void nfs_need_recover(struct nfsmount *nmp, int error); +void nfs_recover(struct nfsmount *); + +int nfs_vnop_access(struct vnop_access_args *); +int nfs_vnop_remove(struct vnop_remove_args *); +int nfs_vnop_read(struct vnop_read_args *); +int nfs_vnop_write(struct vnop_write_args *); +int nfs_vnop_open(struct vnop_open_args *); +int nfs_vnop_close(struct vnop_close_args *); +int nfs_vnop_advlock(struct vnop_advlock_args *); +int nfs_vnop_mmap(struct vnop_mmap_args *); +int nfs_vnop_mnomap(struct vnop_mnomap_args *); + +int nfs4_vnop_create(struct vnop_create_args *); +int nfs4_vnop_mknod(struct vnop_mknod_args *); +int nfs4_vnop_close(struct vnop_close_args *); +int nfs4_vnop_getattr(struct vnop_getattr_args *); +int nfs4_vnop_link(struct vnop_link_args *); +int nfs4_vnop_mkdir(struct vnop_mkdir_args *); +int nfs4_vnop_rmdir(struct vnop_rmdir_args *); +int nfs4_vnop_symlink(struct vnop_symlink_args *); +int nfs4_vnop_getxattr(struct vnop_getxattr_args *); +int nfs4_vnop_setxattr(struct vnop_setxattr_args *); +int nfs4_vnop_removexattr(struct vnop_removexattr_args *); +int nfs4_vnop_listxattr(struct vnop_listxattr_args *); #if NAMEDSTREAMS -int nfs4_vnop_getnamedstream(struct vnop_getnamedstream_args *); -int nfs4_vnop_makenamedstream(struct vnop_makenamedstream_args *); -int nfs4_vnop_removenamedstream(struct vnop_removenamedstream_args *); +int nfs4_vnop_getnamedstream(struct vnop_getnamedstream_args *); +int nfs4_vnop_makenamedstream(struct vnop_makenamedstream_args *); +int nfs4_vnop_removenamedstream(struct vnop_removenamedstream_args *); #endif -int nfs_read_rpc(nfsnode_t, uio_t, vfs_context_t); -int nfs_write_rpc(nfsnode_t, uio_t, vfs_context_t, int *, uint64_t *); -int nfs_write_rpc2(nfsnode_t, uio_t, thread_t, kauth_cred_t, int *, uint64_t *); - -int nfs3_access_rpc(nfsnode_t, u_int32_t *, int, vfs_context_t); -int nfs4_access_rpc(nfsnode_t, u_int32_t *, int, vfs_context_t); -int nfs3_getattr_rpc(nfsnode_t, mount_t, u_char *, size_t, int, vfs_context_t, struct nfs_vattr *, u_int64_t *); -int nfs4_getattr_rpc(nfsnode_t, mount_t, u_char *, size_t, int, vfs_context_t, struct nfs_vattr *, u_int64_t *); -int nfs3_setattr_rpc(nfsnode_t, struct vnode_attr *, vfs_context_t); -int nfs4_setattr_rpc(nfsnode_t, struct vnode_attr *, vfs_context_t); -int nfs3_read_rpc_async(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); -int nfs4_read_rpc_async(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); -int nfs3_read_rpc_async_finish(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); -int nfs4_read_rpc_async_finish(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); -int nfs3_write_rpc_async(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); -int nfs4_write_rpc_async(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); -int nfs3_write_rpc_async_finish(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); -int nfs4_write_rpc_async_finish(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); -int nfs3_readdir_rpc(nfsnode_t, struct nfsbuf *, vfs_context_t); -int nfs4_readdir_rpc(nfsnode_t, struct nfsbuf *, vfs_context_t); -int nfs3_readlink_rpc(nfsnode_t, char *, uint32_t *, vfs_context_t); -int nfs4_readlink_rpc(nfsnode_t, char *, uint32_t *, vfs_context_t); -int nfs3_commit_rpc(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); -int nfs4_commit_rpc(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); -int nfs3_lookup_rpc_async(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); -int nfs4_lookup_rpc_async(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); -int nfs3_lookup_rpc_async_finish(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); -int nfs4_lookup_rpc_async_finish(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); -int nfs3_remove_rpc(nfsnode_t, char *, int, thread_t, kauth_cred_t); -int nfs4_remove_rpc(nfsnode_t, char *, int, thread_t, kauth_cred_t); -int nfs3_rename_rpc(nfsnode_t, char *, int, nfsnode_t, char *, int, vfs_context_t); -int nfs4_rename_rpc(nfsnode_t, char *, int, nfsnode_t, char *, int, vfs_context_t); -int nfs3_pathconf_rpc(nfsnode_t, struct nfs_fsattr *, vfs_context_t); -int nfs4_pathconf_rpc(nfsnode_t, struct nfs_fsattr *, vfs_context_t); -int nfs3_setlock_rpc(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); -int nfs4_setlock_rpc(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); -int nfs3_unlock_rpc(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, thread_t, kauth_cred_t); -int nfs4_unlock_rpc(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, thread_t, kauth_cred_t); -int nfs3_getlock_rpc(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); -int nfs4_getlock_rpc(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); - -void nfsrv_active_user_list_reclaim(void); -void nfsrv_cleancache(void); -void nfsrv_cleanup(void); -int nfsrv_credcheck(struct nfsrv_descript *, vfs_context_t, struct nfs_export *, - struct nfs_export_options *); -void nfsrv_idlesock_timer(void *, void *); -int nfsrv_dorec(struct nfsrv_sock *, struct nfsd *, struct nfsrv_descript **); -int nfsrv_errmap(struct nfsrv_descript *, int); -int nfsrv_export(struct user_nfs_export_args *, vfs_context_t); -int nfsrv_fhmatch(struct nfs_filehandle *, struct nfs_filehandle *); -int nfsrv_fhtovp(struct nfs_filehandle *, struct nfsrv_descript *, vnode_t *, - struct nfs_export **, struct nfs_export_options **); -int nfsrv_check_exports_allow_address(mbuf_t); +int nfs_read_rpc(nfsnode_t, uio_t, vfs_context_t); +int nfs_write_rpc(nfsnode_t, uio_t, vfs_context_t, int *, uint64_t *); +int nfs_write_rpc2(nfsnode_t, uio_t, thread_t, kauth_cred_t, int *, uint64_t *); + +int nfs3_access_rpc(nfsnode_t, u_int32_t *, int, vfs_context_t); +int nfs4_access_rpc(nfsnode_t, u_int32_t *, int, vfs_context_t); +int nfs3_getattr_rpc(nfsnode_t, mount_t, u_char *, size_t, int, vfs_context_t, struct nfs_vattr *, u_int64_t *); +int nfs4_getattr_rpc(nfsnode_t, mount_t, u_char *, size_t, int, vfs_context_t, struct nfs_vattr *, u_int64_t *); +int nfs3_setattr_rpc(nfsnode_t, struct vnode_attr *, vfs_context_t); +int nfs4_setattr_rpc(nfsnode_t, struct vnode_attr *, vfs_context_t); +int nfs3_read_rpc_async(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); +int nfs4_read_rpc_async(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); +int nfs3_read_rpc_async_finish(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); +int nfs4_read_rpc_async_finish(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); +int nfs3_write_rpc_async(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); +int nfs4_write_rpc_async(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); +int nfs3_write_rpc_async_finish(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); +int nfs4_write_rpc_async_finish(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); +int nfs3_readdir_rpc(nfsnode_t, struct nfsbuf *, vfs_context_t); +int nfs4_readdir_rpc(nfsnode_t, struct nfsbuf *, vfs_context_t); +int nfs3_readlink_rpc(nfsnode_t, char *, uint32_t *, vfs_context_t); +int nfs4_readlink_rpc(nfsnode_t, char *, uint32_t *, vfs_context_t); +int nfs3_commit_rpc(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); +int nfs4_commit_rpc(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); +int nfs3_lookup_rpc_async(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); +int nfs4_lookup_rpc_async(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); +int nfs3_lookup_rpc_async_finish(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); +int nfs4_lookup_rpc_async_finish(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); +int nfs3_remove_rpc(nfsnode_t, char *, int, thread_t, kauth_cred_t); +int nfs4_remove_rpc(nfsnode_t, char *, int, thread_t, kauth_cred_t); +int nfs3_rename_rpc(nfsnode_t, char *, int, nfsnode_t, char *, int, vfs_context_t); +int nfs4_rename_rpc(nfsnode_t, char *, int, nfsnode_t, char *, int, vfs_context_t); +int nfs3_pathconf_rpc(nfsnode_t, struct nfs_fsattr *, vfs_context_t); +int nfs4_pathconf_rpc(nfsnode_t, struct nfs_fsattr *, vfs_context_t); +int nfs3_setlock_rpc(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); +int nfs4_setlock_rpc(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); +int nfs3_unlock_rpc(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, thread_t, kauth_cred_t); +int nfs4_unlock_rpc(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, thread_t, kauth_cred_t); +int nfs3_getlock_rpc(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); +int nfs4_getlock_rpc(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); + +void nfsrv_active_user_list_reclaim(void); +void nfsrv_cleancache(void); +void nfsrv_cleanup(void); +int nfsrv_credcheck(struct nfsrv_descript *, vfs_context_t, struct nfs_export *, + struct nfs_export_options *); +void nfsrv_idlesock_timer(void *, void *); +int nfsrv_dorec(struct nfsrv_sock *, struct nfsd *, struct nfsrv_descript **); +int nfsrv_errmap(struct nfsrv_descript *, int); +int nfsrv_export(struct user_nfs_export_args *, vfs_context_t); +int nfsrv_fhmatch(struct nfs_filehandle *, struct nfs_filehandle *); +int nfsrv_fhtovp(struct nfs_filehandle *, struct nfsrv_descript *, vnode_t *, + struct nfs_export **, struct nfs_export_options **); +int nfsrv_check_exports_allow_address(mbuf_t); #if CONFIG_FSE -void nfsrv_fmod_timer(void *, void *); +void nfsrv_fmod_timer(void *, void *); #endif -int nfsrv_getcache(struct nfsrv_descript *, struct nfsrv_sock *, mbuf_t *); -void nfsrv_group_sort(gid_t *, int); -void nfsrv_init(void); -void nfsrv_initcache(void); -int nfsrv_is_initialized(void); -int nfsrv_namei(struct nfsrv_descript *, vfs_context_t, struct nameidata *, - struct nfs_filehandle *, vnode_t *, - struct nfs_export **, struct nfs_export_options **); -void nfsrv_rcv(socket_t, void *, int); -void nfsrv_rcv_locked(socket_t, struct nfsrv_sock *, int); -int nfsrv_rephead(struct nfsrv_descript *, struct nfsrv_sock *, struct nfsm_chain *, size_t); -int nfsrv_send(struct nfsrv_sock *, mbuf_t, mbuf_t); -void nfsrv_updatecache(struct nfsrv_descript *, int, mbuf_t); -void nfsrv_update_user_stat(struct nfs_export *, struct nfsrv_descript *, uid_t, u_int, u_int, u_int); -int nfsrv_vptofh(struct nfs_export *, int, struct nfs_filehandle *, - vnode_t, vfs_context_t, struct nfs_filehandle *); -void nfsrv_wakenfsd(struct nfsrv_sock *); -void nfsrv_wg_timer(void *, void *); -int nfsrv_writegather(struct nfsrv_descript **, struct nfsrv_sock *, - vfs_context_t, mbuf_t *); - -int nfsrv_access(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_commit(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_create(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_fsinfo(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_getattr(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_link(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_lookup(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_mkdir(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_mknod(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_noop(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_null(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_pathconf(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_read(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_readdir(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_readdirplus(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_readlink(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_remove(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_rename(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_rmdir(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_setattr(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_statfs(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_symlink(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); -int nfsrv_write(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); - -void nfs_interval_timer_start(thread_call_t, int); -int nfs_use_cache(struct nfsmount *); -void nfs_up(struct nfsmount *, thread_t, int, const char *); -void nfs_down(struct nfsmount *, thread_t, int, int, const char *, int); -int nfs_msg(thread_t, const char *, const char *, int); - -int nfs_mountroot(void); +int nfsrv_getcache(struct nfsrv_descript *, struct nfsrv_sock *, mbuf_t *); +void nfsrv_group_sort(gid_t *, int); +void nfsrv_init(void); +void nfsrv_initcache(void); +int nfsrv_is_initialized(void); +int nfsrv_namei(struct nfsrv_descript *, vfs_context_t, struct nameidata *, + struct nfs_filehandle *, vnode_t *, + struct nfs_export **, struct nfs_export_options **); +void nfsrv_rcv(socket_t, void *, int); +void nfsrv_rcv_locked(socket_t, struct nfsrv_sock *, int); +int nfsrv_rephead(struct nfsrv_descript *, struct nfsrv_sock *, struct nfsm_chain *, size_t); +int nfsrv_send(struct nfsrv_sock *, mbuf_t, mbuf_t); +void nfsrv_updatecache(struct nfsrv_descript *, int, mbuf_t); +void nfsrv_update_user_stat(struct nfs_export *, struct nfsrv_descript *, uid_t, u_int, u_int, u_int); +int nfsrv_vptofh(struct nfs_export *, int, struct nfs_filehandle *, + vnode_t, vfs_context_t, struct nfs_filehandle *); +void nfsrv_wakenfsd(struct nfsrv_sock *); +void nfsrv_wg_timer(void *, void *); +int nfsrv_writegather(struct nfsrv_descript **, struct nfsrv_sock *, + vfs_context_t, mbuf_t *); + +int nfsrv_access(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_commit(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_create(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_fsinfo(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_getattr(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_link(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_lookup(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_mkdir(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_mknod(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_noop(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_null(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_pathconf(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_read(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_readdir(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_readdirplus(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_readlink(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_remove(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_rename(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_rmdir(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_setattr(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_statfs(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_symlink(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); +int nfsrv_write(struct nfsrv_descript *, struct nfsrv_sock *, vfs_context_t, mbuf_t *); + +void nfs_interval_timer_start(thread_call_t, int); +int nfs_use_cache(struct nfsmount *); +void nfs_up(struct nfsmount *, thread_t, int, const char *); +void nfs_down(struct nfsmount *, thread_t, int, int, const char *, int); +int nfs_msg(thread_t, const char *, const char *, int); + +int nfs_mountroot(void); struct nfs_diskless; -int nfs_boot_init(struct nfs_diskless *); -int nfs_boot_getfh(struct nfs_diskless *, int, int); +int nfs_boot_init(struct nfs_diskless *); +int nfs_boot_getfh(struct nfs_diskless *, int, int); #if CONFIG_TRIGGERS resolver_result_t nfs_mirror_mount_trigger_resolve(vnode_t, const struct componentname *, enum path_operation, int, void *, vfs_context_t); resolver_result_t nfs_mirror_mount_trigger_unresolve(vnode_t, int, void *, vfs_context_t); resolver_result_t nfs_mirror_mount_trigger_rearm(vnode_t, int, void *, vfs_context_t); -int nfs_mirror_mount_domount(vnode_t, vnode_t, vfs_context_t); -void nfs_ephemeral_mount_harvester_start(void); -void nfs_ephemeral_mount_harvester(__unused void *arg, __unused wait_result_t wr); +int nfs_mirror_mount_domount(vnode_t, vnode_t, vfs_context_t); +void nfs_ephemeral_mount_harvester_start(void); +void nfs_ephemeral_mount_harvester(__unused void *arg, __unused wait_result_t wr); #endif /* socket upcall interfaces */ @@ -1502,29 +1500,29 @@ void nfsrv_uc_init(void); void nfsrv_uc_cleanup(void); void nfsrv_uc_addsock(struct nfsrv_sock *, int); void nfsrv_uc_dequeue(struct nfsrv_sock *); - + /* Debug support */ #define NFS_DEBUG_LEVEL (nfs_debug_ctl & 0xf) #define NFS_DEBUG_FACILITY ((nfs_debug_ctl >> 4) & 0xff) #define NFS_DEBUG_FLAGS ((nfs_debug_ctl >> 12) & 0xff) #define NFS_DEBUG_VALUE ((nfs_debug_ctl >> 20) & 0xfff) -#define NFS_FAC_SOCK 0x01 -#define NFS_FAC_STATE 0x02 -#define NFS_FAC_NODE 0x04 -#define NFS_FAC_VNOP 0x08 -#define NFS_FAC_BIO 0x10 -#define NFS_FAC_GSS 0x20 -#define NFS_FAC_VFS 0x40 +#define NFS_FAC_SOCK 0x01 +#define NFS_FAC_STATE 0x02 +#define NFS_FAC_NODE 0x04 +#define NFS_FAC_VNOP 0x08 +#define NFS_FAC_BIO 0x10 +#define NFS_FAC_GSS 0x20 +#define NFS_FAC_VFS 0x40 #define NFS_DBG(fac, lev, fmt, ...) \ if (__builtin_expect(NFS_DEBUG_LEVEL, 0)) nfs_printf(fac, lev, "%s: %d: " fmt, __func__, __LINE__, ## __VA_ARGS__) -void nfs_printf(int, int, const char *, ...) __printflike(3,4); +void nfs_printf(int, int, const char *, ...) __printflike(3, 4); int nfs_mountopts(struct nfsmount *, char *, int); __END_DECLS -#endif /* KERNEL */ +#endif /* KERNEL */ #endif /* __APPLE_API_PRIVATE */ #endif diff --git a/bsd/nfs/nfs4_subs.c b/bsd/nfs/nfs4_subs.c index d16cf84e3..b906597c5 100644 --- a/bsd/nfs/nfs4_subs.c +++ b/bsd/nfs/nfs4_subs.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -77,11 +77,11 @@ * limit. This should be changed if kauth routines change. * * We also want some reasonable maximum, as 32 bits worth of string length - * is liable to cause problems. At the very least this limit must guarantee + * is liable to cause problems. At the very least this limit must guarantee * that any express that contains the 32 bit length from off the wire used in * allocations does not overflow. */ -#define NFS_MAX_WHO MAXPATHLEN +#define NFS_MAX_WHO MAXPATHLEN /* * Create the unique client ID to use for this mount. @@ -121,87 +121,103 @@ nfs4_init_clientid(struct nfsmount *nmp) if (!en0addr_set) { ifnet_t interface = NULL; error = ifnet_find_by_name("en0", &interface); - if (!error) + if (!error) { error = ifnet_lladdr_copy_bytes(interface, en0addr, sizeof(en0addr)); - if (error) + } + if (error) { printf("nfs4_init_clientid: error getting en0 address, %d\n", error); - if (!error) + } + if (!error) { en0addr_set = 1; - if (interface) + } + if (interface) { ifnet_release(interface); + } } lck_mtx_unlock(nfs_global_mutex); MALLOC(ncip, struct nfs_client_id *, sizeof(struct nfs_client_id), M_TEMP, M_WAITOK); - if (!ncip) - return (ENOMEM); + if (!ncip) { + return ENOMEM; + } vsfs = vfs_statfs(nmp->nm_mountp); saddr = nmp->nm_saddr; ncip->nci_idlen = sizeof(uint32_t) + sizeof(en0addr) + saddr->sa_len + - strlen(vsfs->f_mntfromname) + 1 + strlen(vsfs->f_mntonname) + 1; - if (ncip->nci_idlen > NFS4_OPAQUE_LIMIT) + strlen(vsfs->f_mntfromname) + 1 + strlen(vsfs->f_mntonname) + 1; + if (ncip->nci_idlen > NFS4_OPAQUE_LIMIT) { ncip->nci_idlen = NFS4_OPAQUE_LIMIT; + } MALLOC(ncip->nci_id, char *, ncip->nci_idlen, M_TEMP, M_WAITOK); if (!ncip->nci_id) { FREE(ncip, M_TEMP); - return (ENOMEM); + return ENOMEM; } *(uint32_t*)ncip->nci_id = 0; len = sizeof(uint32_t); - len2 = min(sizeof(en0addr), ncip->nci_idlen-len); + len2 = min(sizeof(en0addr), ncip->nci_idlen - len); bcopy(en0addr, &ncip->nci_id[len], len2); len += sizeof(en0addr); - len2 = min(saddr->sa_len, ncip->nci_idlen-len); + len2 = min(saddr->sa_len, ncip->nci_idlen - len); bcopy(saddr, &ncip->nci_id[len], len2); len += len2; if (len < ncip->nci_idlen) { - len2 = strlcpy(&ncip->nci_id[len], vsfs->f_mntfromname, ncip->nci_idlen-len); - if (len2 < (ncip->nci_idlen - len)) + len2 = strlcpy(&ncip->nci_id[len], vsfs->f_mntfromname, ncip->nci_idlen - len); + if (len2 < (ncip->nci_idlen - len)) { len += len2 + 1; - else + } else { len = ncip->nci_idlen; + } } if (len < ncip->nci_idlen) { - len2 = strlcpy(&ncip->nci_id[len], vsfs->f_mntonname, ncip->nci_idlen-len); - if (len2 < (ncip->nci_idlen - len)) + len2 = strlcpy(&ncip->nci_id[len], vsfs->f_mntonname, ncip->nci_idlen - len); + if (len2 < (ncip->nci_idlen - len)) { len += len2 + 1; - else + } else { len = ncip->nci_idlen; + } } /* make sure the ID is unique, and add it to the sorted list */ lck_mtx_lock(nfs_global_mutex); TAILQ_FOREACH(ncip2, &nfsclientids, nci_link) { - if (ncip->nci_idlen > ncip2->nci_idlen) + if (ncip->nci_idlen > ncip2->nci_idlen) { continue; - if (ncip->nci_idlen < ncip2->nci_idlen) + } + if (ncip->nci_idlen < ncip2->nci_idlen) { break; + } cmp = bcmp(ncip->nci_id + sizeof(uint32_t), - ncip2->nci_id + sizeof(uint32_t), - ncip->nci_idlen - sizeof(uint32_t)); - if (cmp > 0) + ncip2->nci_id + sizeof(uint32_t), + ncip->nci_idlen - sizeof(uint32_t)); + if (cmp > 0) { continue; - if (cmp < 0) + } + if (cmp < 0) { break; - if (*(uint32_t*)ncip->nci_id > *(uint32_t*)ncip2->nci_id) + } + if (*(uint32_t*)ncip->nci_id > *(uint32_t*)ncip2->nci_id) { continue; - if (*(uint32_t*)ncip->nci_id < *(uint32_t*)ncip2->nci_id) + } + if (*(uint32_t*)ncip->nci_id < *(uint32_t*)ncip2->nci_id) { break; + } *(uint32_t*)ncip->nci_id += 1; } - if (*(uint32_t*)ncip->nci_id) + if (*(uint32_t*)ncip->nci_id) { printf("nfs client ID collision (%d) for %s on %s\n", *(uint32_t*)ncip->nci_id, - vsfs->f_mntfromname, vsfs->f_mntonname); - if (ncip2) + vsfs->f_mntfromname, vsfs->f_mntonname); + } + if (ncip2) { TAILQ_INSERT_BEFORE(ncip2, ncip, nci_link); - else + } else { TAILQ_INSERT_TAIL(&nfsclientids, ncip, nci_link); + } nmp->nm_longid = ncip; lck_mtx_unlock(nfs_global_mutex); - return (0); + return 0; } /* @@ -219,7 +235,7 @@ nfs4_setclientid(struct nfsmount *nmp) struct sockaddr_storage ss; void *sinaddr = NULL; char raddr[MAX_IPv6_STR_LEN]; - char uaddr[MAX_IPv6_STR_LEN+16]; + char uaddr[MAX_IPv6_STR_LEN + 16]; int ualen = 0; in_port_t port; @@ -230,8 +246,9 @@ nfs4_setclientid(struct nfsmount *nmp) nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); - if (!nmp->nm_longid) + if (!nmp->nm_longid) { error = nfs4_init_clientid(nmp); + } // SETCLIENTID numops = 1; @@ -257,20 +274,22 @@ nfs4_setclientid(struct nfsmount *nmp) if (sinaddr && port && (inet_ntop(ss.ss_family, sinaddr, raddr, sizeof(raddr)) == raddr)) { /* assemble r_addr = universal address (nmp->nm_nso->nso_so source IP addr + port) */ ualen = snprintf(uaddr, sizeof(uaddr), "%s.%d.%d", raddr, - ((port >> 8) & 0xff), - (port & 0xff)); + ((port >> 8) & 0xff), + (port & 0xff)); /* make sure it fit, give up if it didn't */ - if (ualen >= (int)sizeof(uaddr)) + if (ualen >= (int)sizeof(uaddr)) { ualen = 0; + } } } if (ualen > 0) { /* add callback info */ nfsm_chain_add_32(error, &nmreq, NFS4_CALLBACK_PROG); /* callback program */ - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { nfsm_chain_add_string(error, &nmreq, "tcp", 3); /* callback r_netid */ - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { nfsm_chain_add_string(error, &nmreq, "tcp6", 4); /* callback r_netid */ + } nfsm_chain_add_string(error, &nmreq, uaddr, ualen); /* callback r_addr */ nfsm_chain_add_32(error, &nmreq, nmp->nm_cbid); /* callback_ident */ } else { @@ -286,11 +305,13 @@ nfs4_setclientid(struct nfsmount *nmp) error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, thd, cred, NULL, R_SETUP, &nmrep, &xid, &status); nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); - if (!error && (numops != 1) && status) + if (!error && (numops != 1) && status) { error = status; + } nfsm_chain_op_check(error, &nmrep, NFS_OP_SETCLIENTID); - if (error == NFSERR_CLID_INUSE) + if (error == NFSERR_CLID_INUSE) { printf("nfs4_setclientid: client ID in use?\n"); + } nfsmout_if(error); nfsm_chain_get_64(error, &nmrep, nmp->nm_clientid); nfsm_chain_get_64(error, &nmrep, verifier); @@ -312,11 +333,13 @@ nfs4_setclientid(struct nfsmount *nmp) nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_SETCLIENTID_CONFIRM); - if (error) + if (error) { printf("nfs4_setclientid: confirm error %d\n", error); + } lck_mtx_lock(&nmp->nm_lock); - if (!error) + if (!error) { nmp->nm_state |= NFSSTA_CLIENTID; + } lck_mtx_unlock(&nmp->nm_lock); nfsmout_if(error || !nmp->nm_dnp); @@ -343,18 +366,21 @@ nfs4_setclientid(struct nfsmount *nmp) lck_mtx_lock(&nmp->nm_lock); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); - if (!error) + if (!error) { error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, NULL, NULL, NULL, NULL); + } lck_mtx_unlock(&nmp->nm_lock); - if (error) /* ignore any error from the getattr */ + if (error) { /* ignore any error from the getattr */ error = 0; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); kauth_cred_unref(&cred); - if (error) + if (error) { printf("nfs4_setclientid failed, %d\n", error); - return (error); + } + return error; } /* @@ -385,7 +411,7 @@ nfs4_renew(struct nfsmount *nmp, int rpcflag) nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request2(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - current_thread(), cred, NULL, rpcflag, &nmrep, &xid, &status); + current_thread(), cred, NULL, rpcflag, &nmrep, &xid, &status); nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_RENEW); @@ -393,7 +419,7 @@ nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); kauth_cred_unref(&cred); - return (error); + return error; } @@ -417,10 +443,11 @@ nfs4_renew_timer(void *param0, __unused void *param1) error = nfs4_renew(nmp, R_RECOVER); out: - if (error == ETIMEDOUT) + if (error == ETIMEDOUT) { nfs_need_reconnect(nmp); - else if (error) + } else if (error) { printf("nfs4_renew_timer: error %d\n", error); + } lck_mtx_lock(&nmp->nm_lock); if (error && (error != ETIMEDOUT) && (nmp->nm_clientid == clientid) && !(nmp->nm_state & NFSSTA_RECOVER)) { @@ -429,8 +456,9 @@ out: } interval = nmp->nm_fsattr.nfsa_lease / (error ? 4 : 2); - if ((interval < 1) || (nmp->nm_state & NFSSTA_RECOVER)) + if ((interval < 1) || (nmp->nm_state & NFSSTA_RECOVER)) { interval = 1; + } lck_mtx_unlock(&nmp->nm_lock); nfs_interval_timer_start(nmp->nm_renew_timer, interval * 1000); } @@ -459,8 +487,9 @@ nfs4_secinfo_rpc(struct nfsmount *nmp, struct nfsreq_secinfo_args *siap, kauth_c struct nfsm_chain nmreq, nmrep; *seccountp = 0; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; np = siap->rsia_np; @@ -471,19 +500,23 @@ nfs4_secinfo_rpc(struct nfsmount *nmp, struct nfsreq_secinfo_args *siap, kauth_c fhsize = fhp ? siap->rsia_fhsize : 0; name = siap->rsia_name; namelen = name ? siap->rsia_namelen : 0; - if (name && !namelen) + if (name && !namelen) { namelen = strlen(name); + } if (!fhp && name) { - if (!np) /* use PUTROOTFH */ + if (!np) { /* use PUTROOTFH */ goto gotargs; + } fhp = np->n_fhp; fhsize = np->n_fhsize; } - if (fhp && name) + if (fhp && name) { goto gotargs; + } - if (!np) - return (EIO); + if (!np) { + return EIO; + } nfs_node_lock_force(np); if ((vnode_vtype(NFSTOV(np)) != VDIR) && np->n_sillyrename) { /* @@ -509,14 +542,17 @@ nfs4_secinfo_rpc(struct nfsmount *nmp, struct nfsreq_secinfo_args *siap, kauth_c * from the n_parent we have stashed away. */ if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) && - (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) + (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) { dvp = NULL; - if (!dvp) + } + if (!dvp) { dvp = vnode_getparent(NFSTOV(np)); + } vname = vnode_getname(NFSTOV(np)); if (!dvp || !vname) { - if (!error) + if (!error) { error = EIO; + } nfs_node_unlock(np); goto nfsmout; } @@ -532,7 +568,7 @@ gotargs: // PUT(ROOT)FH + SECINFO numops = 2; nfsm_chain_build_alloc_init(error, &nmreq, - 4 * NFSX_UNSIGNED + NFSX_FH(nfsvers) + nfsm_rndup(namelen)); + 4 * NFSX_UNSIGNED + NFSX_FH(nfsvers) + nfsm_rndup(namelen)); nfsm_chain_add_compound_header(error, &nmreq, "secinfo", nmp->nm_minor_vers, numops); numops--; if (fhp) { @@ -548,7 +584,7 @@ gotargs: nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - current_thread(), cred, NULL, 0, &nmrep, &xid, &status); + current_thread(), cred, NULL, 0, &nmrep, &xid, &status); nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, fhp ? NFS_OP_PUTFH : NFS_OP_PUTROOTFH); @@ -558,11 +594,13 @@ gotargs: nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - if (vname) + if (vname) { vnode_putname(vname); - if (dvp != NULLVP) + } + if (dvp != NULLVP) { vnode_put(dvp); - return (error); + } + return error; } /* @@ -598,13 +636,13 @@ nfsm_chain_get_secinfo(struct nfsm_chain *nmc, uint32_t *sec, int *seccountp) nfsmout_if(error); if (val != sizeof(krb5_mech_oid)) { nfsm_chain_adv(error, nmc, val); - nfsm_chain_adv(error, nmc, 2*NFSX_UNSIGNED); + nfsm_chain_adv(error, nmc, 2 * NFSX_UNSIGNED); break; } nfsm_chain_get_opaque(error, nmc, val, oid); /* OID bytes */ nfsmout_if(error); if (bcmp(oid, krb5_mech_oid, sizeof(krb5_mech_oid))) { - nfsm_chain_adv(error, nmc, 2*NFSX_UNSIGNED); + nfsm_chain_adv(error, nmc, 2 * NFSX_UNSIGNED); break; } nfsm_chain_get_32(error, nmc, val); /* QOP */ @@ -626,9 +664,10 @@ nfsm_chain_get_secinfo(struct nfsm_chain *nmc, uint32_t *sec, int *seccountp) srvcount--; } nfsmout: - if (!error) + if (!error) { *seccountp = seccount; - return (error); + } + return error; } @@ -656,8 +695,9 @@ nfs4_get_fs_locations( fhp = dnp->n_fhp; fhsize = dnp->n_fhsize; } - if (!fhp) - return (EINVAL); + if (!fhp) { + return EINVAL; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -681,9 +721,10 @@ nfs4_get_fs_locations( nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(dnp, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -694,7 +735,7 @@ nfs4_get_fs_locations( nfsmout: nfsm_chain_cleanup(&nmrep); nfsm_chain_cleanup(&nmreq); - return (error); + return error; } /* @@ -775,8 +816,9 @@ nfs4_default_attrs_for_referral_trigger( len = sizeof(fhp->fh_data) - sizeof(dnp); bcopy(name, &fhp->fh_data[0] + sizeof(dnp), MIN(len, namelen)); fhp->fh_len = sizeof(dnp) + namelen; - if (fhp->fh_len > (int)sizeof(fhp->fh_data)) + if (fhp->fh_len > (int)sizeof(fhp->fh_data)) { fhp->fh_len = sizeof(fhp->fh_data); + } } } @@ -789,38 +831,48 @@ nfs_vattr_set_bitmap(struct nfsmount *nmp, uint32_t *bitmap, struct vnode_attr * int i; NFS_CLEAR_ATTRIBUTES(bitmap); - if (VATTR_IS_ACTIVE(vap, va_data_size)) + if (VATTR_IS_ACTIVE(vap, va_data_size)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_SIZE); - if (VATTR_IS_ACTIVE(vap, va_acl) && (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) + } + if (VATTR_IS_ACTIVE(vap, va_acl) && (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL); + } if (VATTR_IS_ACTIVE(vap, va_flags)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_ARCHIVE); NFS_BITMAP_SET(bitmap, NFS_FATTR_HIDDEN); } // NFS_BITMAP_SET(bitmap, NFS_FATTR_MIMETYPE) - if (VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) + if (VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_MODE); - if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_uuuid)) + } + if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_uuuid)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_OWNER); - if (VATTR_IS_ACTIVE(vap, va_gid) || VATTR_IS_ACTIVE(vap, va_guuid)) + } + if (VATTR_IS_ACTIVE(vap, va_gid) || VATTR_IS_ACTIVE(vap, va_guuid)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_OWNER_GROUP); + } // NFS_BITMAP_SET(bitmap, NFS_FATTR_SYSTEM) if (vap->va_vaflags & VA_UTIMES_NULL) { NFS_BITMAP_SET(bitmap, NFS_FATTR_TIME_ACCESS_SET); NFS_BITMAP_SET(bitmap, NFS_FATTR_TIME_MODIFY_SET); } else { - if (VATTR_IS_ACTIVE(vap, va_access_time)) + if (VATTR_IS_ACTIVE(vap, va_access_time)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_TIME_ACCESS_SET); - if (VATTR_IS_ACTIVE(vap, va_modify_time)) + } + if (VATTR_IS_ACTIVE(vap, va_modify_time)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_TIME_MODIFY_SET); + } } - if (VATTR_IS_ACTIVE(vap, va_backup_time)) + if (VATTR_IS_ACTIVE(vap, va_backup_time)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_TIME_BACKUP); - if (VATTR_IS_ACTIVE(vap, va_create_time)) + } + if (VATTR_IS_ACTIVE(vap, va_create_time)) { NFS_BITMAP_SET(bitmap, NFS_FATTR_TIME_CREATE); + } /* and limit to what is supported by server */ - for (i=0; i < NFS_ATTR_BITMAP_LEN; i++) + for (i = 0; i < NFS_ATTR_BITMAP_LEN; i++) { bitmap[i] &= nmp->nm_fsattr.nfsa_supp_attr[i]; + } } /* @@ -868,22 +920,29 @@ nfs4_ace_nfsflags_to_vfsflags(uint32_t nfsflags) { uint32_t vfsflags = 0; - if (nfsflags & NFS_ACE_FILE_INHERIT_ACE) + if (nfsflags & NFS_ACE_FILE_INHERIT_ACE) { vfsflags |= KAUTH_ACE_FILE_INHERIT; - if (nfsflags & NFS_ACE_DIRECTORY_INHERIT_ACE) + } + if (nfsflags & NFS_ACE_DIRECTORY_INHERIT_ACE) { vfsflags |= KAUTH_ACE_DIRECTORY_INHERIT; - if (nfsflags & NFS_ACE_NO_PROPAGATE_INHERIT_ACE) + } + if (nfsflags & NFS_ACE_NO_PROPAGATE_INHERIT_ACE) { vfsflags |= KAUTH_ACE_LIMIT_INHERIT; - if (nfsflags & NFS_ACE_INHERIT_ONLY_ACE) + } + if (nfsflags & NFS_ACE_INHERIT_ONLY_ACE) { vfsflags |= KAUTH_ACE_ONLY_INHERIT; - if (nfsflags & NFS_ACE_SUCCESSFUL_ACCESS_ACE_FLAG) + } + if (nfsflags & NFS_ACE_SUCCESSFUL_ACCESS_ACE_FLAG) { vfsflags |= KAUTH_ACE_SUCCESS; - if (nfsflags & NFS_ACE_FAILED_ACCESS_ACE_FLAG) + } + if (nfsflags & NFS_ACE_FAILED_ACCESS_ACE_FLAG) { vfsflags |= KAUTH_ACE_FAILURE; - if (nfsflags & NFS_ACE_INHERITED_ACE) + } + if (nfsflags & NFS_ACE_INHERITED_ACE) { vfsflags |= KAUTH_ACE_INHERITED; + } - return (vfsflags); + return vfsflags; } uint32_t @@ -891,22 +950,29 @@ nfs4_ace_vfsflags_to_nfsflags(uint32_t vfsflags) { uint32_t nfsflags = 0; - if (vfsflags & KAUTH_ACE_FILE_INHERIT) + if (vfsflags & KAUTH_ACE_FILE_INHERIT) { nfsflags |= NFS_ACE_FILE_INHERIT_ACE; - if (vfsflags & KAUTH_ACE_DIRECTORY_INHERIT) + } + if (vfsflags & KAUTH_ACE_DIRECTORY_INHERIT) { nfsflags |= NFS_ACE_DIRECTORY_INHERIT_ACE; - if (vfsflags & KAUTH_ACE_LIMIT_INHERIT) + } + if (vfsflags & KAUTH_ACE_LIMIT_INHERIT) { nfsflags |= NFS_ACE_NO_PROPAGATE_INHERIT_ACE; - if (vfsflags & KAUTH_ACE_ONLY_INHERIT) + } + if (vfsflags & KAUTH_ACE_ONLY_INHERIT) { nfsflags |= NFS_ACE_INHERIT_ONLY_ACE; - if (vfsflags & KAUTH_ACE_SUCCESS) + } + if (vfsflags & KAUTH_ACE_SUCCESS) { nfsflags |= NFS_ACE_SUCCESSFUL_ACCESS_ACE_FLAG; - if (vfsflags & KAUTH_ACE_FAILURE) + } + if (vfsflags & KAUTH_ACE_FAILURE) { nfsflags |= NFS_ACE_FAILED_ACCESS_ACE_FLAG; - if (vfsflags & KAUTH_ACE_INHERITED) + } + if (vfsflags & KAUTH_ACE_INHERITED) { nfsflags |= NFS_ACE_INHERITED_ACE; + } - return (nfsflags); + return nfsflags; } /* @@ -917,48 +983,68 @@ nfs4_ace_nfsmask_to_vfsrights(uint32_t nfsmask) { uint32_t vfsrights = 0; - if (nfsmask & NFS_ACE_READ_DATA) + if (nfsmask & NFS_ACE_READ_DATA) { vfsrights |= KAUTH_VNODE_READ_DATA; - if (nfsmask & NFS_ACE_LIST_DIRECTORY) + } + if (nfsmask & NFS_ACE_LIST_DIRECTORY) { vfsrights |= KAUTH_VNODE_LIST_DIRECTORY; - if (nfsmask & NFS_ACE_WRITE_DATA) + } + if (nfsmask & NFS_ACE_WRITE_DATA) { vfsrights |= KAUTH_VNODE_WRITE_DATA; - if (nfsmask & NFS_ACE_ADD_FILE) + } + if (nfsmask & NFS_ACE_ADD_FILE) { vfsrights |= KAUTH_VNODE_ADD_FILE; - if (nfsmask & NFS_ACE_APPEND_DATA) + } + if (nfsmask & NFS_ACE_APPEND_DATA) { vfsrights |= KAUTH_VNODE_APPEND_DATA; - if (nfsmask & NFS_ACE_ADD_SUBDIRECTORY) + } + if (nfsmask & NFS_ACE_ADD_SUBDIRECTORY) { vfsrights |= KAUTH_VNODE_ADD_SUBDIRECTORY; - if (nfsmask & NFS_ACE_READ_NAMED_ATTRS) + } + if (nfsmask & NFS_ACE_READ_NAMED_ATTRS) { vfsrights |= KAUTH_VNODE_READ_EXTATTRIBUTES; - if (nfsmask & NFS_ACE_WRITE_NAMED_ATTRS) + } + if (nfsmask & NFS_ACE_WRITE_NAMED_ATTRS) { vfsrights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES; - if (nfsmask & NFS_ACE_EXECUTE) + } + if (nfsmask & NFS_ACE_EXECUTE) { vfsrights |= KAUTH_VNODE_EXECUTE; - if (nfsmask & NFS_ACE_DELETE_CHILD) + } + if (nfsmask & NFS_ACE_DELETE_CHILD) { vfsrights |= KAUTH_VNODE_DELETE_CHILD; - if (nfsmask & NFS_ACE_READ_ATTRIBUTES) + } + if (nfsmask & NFS_ACE_READ_ATTRIBUTES) { vfsrights |= KAUTH_VNODE_READ_ATTRIBUTES; - if (nfsmask & NFS_ACE_WRITE_ATTRIBUTES) + } + if (nfsmask & NFS_ACE_WRITE_ATTRIBUTES) { vfsrights |= KAUTH_VNODE_WRITE_ATTRIBUTES; - if (nfsmask & NFS_ACE_DELETE) + } + if (nfsmask & NFS_ACE_DELETE) { vfsrights |= KAUTH_VNODE_DELETE; - if (nfsmask & NFS_ACE_READ_ACL) + } + if (nfsmask & NFS_ACE_READ_ACL) { vfsrights |= KAUTH_VNODE_READ_SECURITY; - if (nfsmask & NFS_ACE_WRITE_ACL) + } + if (nfsmask & NFS_ACE_WRITE_ACL) { vfsrights |= KAUTH_VNODE_WRITE_SECURITY; - if (nfsmask & NFS_ACE_WRITE_OWNER) + } + if (nfsmask & NFS_ACE_WRITE_OWNER) { vfsrights |= KAUTH_VNODE_CHANGE_OWNER; - if (nfsmask & NFS_ACE_SYNCHRONIZE) + } + if (nfsmask & NFS_ACE_SYNCHRONIZE) { vfsrights |= KAUTH_VNODE_SYNCHRONIZE; - if ((nfsmask & NFS_ACE_GENERIC_READ) == NFS_ACE_GENERIC_READ) + } + if ((nfsmask & NFS_ACE_GENERIC_READ) == NFS_ACE_GENERIC_READ) { vfsrights |= KAUTH_ACE_GENERIC_READ; - if ((nfsmask & NFS_ACE_GENERIC_WRITE) == NFS_ACE_GENERIC_WRITE) + } + if ((nfsmask & NFS_ACE_GENERIC_WRITE) == NFS_ACE_GENERIC_WRITE) { vfsrights |= KAUTH_ACE_GENERIC_WRITE; - if ((nfsmask & NFS_ACE_GENERIC_EXECUTE) == NFS_ACE_GENERIC_EXECUTE) + } + if ((nfsmask & NFS_ACE_GENERIC_EXECUTE) == NFS_ACE_GENERIC_EXECUTE) { vfsrights |= KAUTH_ACE_GENERIC_EXECUTE; + } - return (vfsrights); + return vfsrights; } uint32_t @@ -966,50 +1052,71 @@ nfs4_ace_vfsrights_to_nfsmask(uint32_t vfsrights) { uint32_t nfsmask = 0; - if (vfsrights & KAUTH_VNODE_READ_DATA) + if (vfsrights & KAUTH_VNODE_READ_DATA) { nfsmask |= NFS_ACE_READ_DATA; - if (vfsrights & KAUTH_VNODE_LIST_DIRECTORY) + } + if (vfsrights & KAUTH_VNODE_LIST_DIRECTORY) { nfsmask |= NFS_ACE_LIST_DIRECTORY; - if (vfsrights & KAUTH_VNODE_WRITE_DATA) + } + if (vfsrights & KAUTH_VNODE_WRITE_DATA) { nfsmask |= NFS_ACE_WRITE_DATA; - if (vfsrights & KAUTH_VNODE_ADD_FILE) + } + if (vfsrights & KAUTH_VNODE_ADD_FILE) { nfsmask |= NFS_ACE_ADD_FILE; - if (vfsrights & KAUTH_VNODE_APPEND_DATA) + } + if (vfsrights & KAUTH_VNODE_APPEND_DATA) { nfsmask |= NFS_ACE_APPEND_DATA; - if (vfsrights & KAUTH_VNODE_ADD_SUBDIRECTORY) + } + if (vfsrights & KAUTH_VNODE_ADD_SUBDIRECTORY) { nfsmask |= NFS_ACE_ADD_SUBDIRECTORY; - if (vfsrights & KAUTH_VNODE_READ_EXTATTRIBUTES) + } + if (vfsrights & KAUTH_VNODE_READ_EXTATTRIBUTES) { nfsmask |= NFS_ACE_READ_NAMED_ATTRS; - if (vfsrights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) + } + if (vfsrights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) { nfsmask |= NFS_ACE_WRITE_NAMED_ATTRS; - if (vfsrights & KAUTH_VNODE_EXECUTE) + } + if (vfsrights & KAUTH_VNODE_EXECUTE) { nfsmask |= NFS_ACE_EXECUTE; - if (vfsrights & KAUTH_VNODE_DELETE_CHILD) + } + if (vfsrights & KAUTH_VNODE_DELETE_CHILD) { nfsmask |= NFS_ACE_DELETE_CHILD; - if (vfsrights & KAUTH_VNODE_READ_ATTRIBUTES) + } + if (vfsrights & KAUTH_VNODE_READ_ATTRIBUTES) { nfsmask |= NFS_ACE_READ_ATTRIBUTES; - if (vfsrights & KAUTH_VNODE_WRITE_ATTRIBUTES) + } + if (vfsrights & KAUTH_VNODE_WRITE_ATTRIBUTES) { nfsmask |= NFS_ACE_WRITE_ATTRIBUTES; - if (vfsrights & KAUTH_VNODE_DELETE) + } + if (vfsrights & KAUTH_VNODE_DELETE) { nfsmask |= NFS_ACE_DELETE; - if (vfsrights & KAUTH_VNODE_READ_SECURITY) + } + if (vfsrights & KAUTH_VNODE_READ_SECURITY) { nfsmask |= NFS_ACE_READ_ACL; - if (vfsrights & KAUTH_VNODE_WRITE_SECURITY) + } + if (vfsrights & KAUTH_VNODE_WRITE_SECURITY) { nfsmask |= NFS_ACE_WRITE_ACL; - if (vfsrights & KAUTH_VNODE_CHANGE_OWNER) + } + if (vfsrights & KAUTH_VNODE_CHANGE_OWNER) { nfsmask |= NFS_ACE_WRITE_OWNER; - if (vfsrights & KAUTH_VNODE_SYNCHRONIZE) + } + if (vfsrights & KAUTH_VNODE_SYNCHRONIZE) { nfsmask |= NFS_ACE_SYNCHRONIZE; - if (vfsrights & KAUTH_ACE_GENERIC_READ) + } + if (vfsrights & KAUTH_ACE_GENERIC_READ) { nfsmask |= NFS_ACE_GENERIC_READ; - if (vfsrights & KAUTH_ACE_GENERIC_WRITE) + } + if (vfsrights & KAUTH_ACE_GENERIC_WRITE) { nfsmask |= NFS_ACE_GENERIC_WRITE; - if (vfsrights & KAUTH_ACE_GENERIC_EXECUTE) + } + if (vfsrights & KAUTH_ACE_GENERIC_EXECUTE) { nfsmask |= NFS_ACE_GENERIC_EXECUTE; - if (vfsrights & KAUTH_ACE_GENERIC_ALL) - nfsmask |= (KAUTH_ACE_GENERIC_READ|KAUTH_ACE_GENERIC_WRITE|NFS_ACE_GENERIC_EXECUTE); + } + if (vfsrights & KAUTH_ACE_GENERIC_ALL) { + nfsmask |= (KAUTH_ACE_GENERIC_READ | KAUTH_ACE_GENERIC_WRITE | NFS_ACE_GENERIC_EXECUTE); + } - return (nfsmask); + return nfsmask; } /* @@ -1022,8 +1129,9 @@ nfs4_wkid2sid(const char *id, ntsid_t *sp) { size_t len = strnlen(id, MAXIDNAMELEN); - if (len == MAXIDNAMELEN || id[len-1] != '@') - return (EINVAL); + if (len == MAXIDNAMELEN || id[len - 1] != '@') { + return EINVAL; + } bzero(sp, sizeof(ntsid_t)); sp->sid_kind = 1; @@ -1073,7 +1181,7 @@ nfs4_wkid2sid(const char *id, ntsid_t *sp) sp->sid_authority[5] = 0; sp->sid_authorities[0] = 0; } - return (0); + return 0; } static int @@ -1082,31 +1190,34 @@ nfs4_fallback_name(const char *id, int have_at) if (have_at) { /* must be user@domain */ /* try to identify some well-known IDs */ - if (!strncmp(id, "root@", 5)) - return (0); - else if (!strncmp(id, "wheel@", 6)) - return (0); - else if (!strncmp(id, "nobody@", 7)) - return (-2); - else if (!strncmp(id, "nfsnobody@", 10)) - return (-2); - } - return (-2); + if (!strncmp(id, "root@", 5)) { + return 0; + } else if (!strncmp(id, "wheel@", 6)) { + return 0; + } else if (!strncmp(id, "nobody@", 7)) { + return -2; + } else if (!strncmp(id, "nfsnobody@", 10)) { + return -2; + } + } + return -2; } static void nfs4_mapid_log(int error, const char *idstr, int isgroup, guid_t *gp) { - if (error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS)) - printf("nfs4_id2guid: idmap failed for %s %s error %d\n", idstr, isgroup ? "G" : " ", error); - if (!error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS)) + if (error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS)) { + printf("nfs4_id2guid: idmap failed for %s %s error %d\n", idstr, isgroup ? "G" : " ", error); + } + if (!error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS)) { printf("nfs4_id2guid: idmap for %s %s got guid " - "%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x\n", - idstr, isgroup ? "G" : " ", - gp->g_guid[0], gp->g_guid[1], gp->g_guid[2], gp->g_guid[3], - gp->g_guid[4], gp->g_guid[5], gp->g_guid[6], gp->g_guid[7], - gp->g_guid[8], gp->g_guid[9], gp->g_guid[10], gp->g_guid[11], - gp->g_guid[12], gp->g_guid[13], gp->g_guid[14], gp->g_guid[15]); + "%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x\n", + idstr, isgroup ? "G" : " ", + gp->g_guid[0], gp->g_guid[1], gp->g_guid[2], gp->g_guid[3], + gp->g_guid[4], gp->g_guid[5], gp->g_guid[6], gp->g_guid[7], + gp->g_guid[8], gp->g_guid[9], gp->g_guid[10], gp->g_guid[11], + gp->g_guid[12], gp->g_guid[13], gp->g_guid[14], gp->g_guid[15]); + } } static char * @@ -1119,10 +1230,12 @@ nfs4_map_domain(char *id, char **atp) size_t otw_id_2_at_len; int error; - if (at == NULL) + if (at == NULL) { at = strchr(id, '@'); - if (at == NULL || *at != '@') - return (NULL); + } + if (at == NULL || *at != '@') { + return NULL; + } otw_nfs4domain = at + 1; otw_domain_len = strnlen(otw_nfs4domain, MAXPATHLEN); @@ -1154,14 +1267,14 @@ nfs4_map_domain(char *id, char **atp) *at = '\0'; } } - FREE_ZONE(dsnode, MAXPATHLEN, M_NAMEI); + FREE_ZONE(dsnode, MAXPATHLEN, M_NAMEI); if (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS) { printf("nfs4_id2guid: after domain mapping id is %s\n", id); } *atp = at; - return (new_id); + return new_id; } /* @@ -1188,22 +1301,25 @@ nfs4_id2guid(/*const*/ char *id, guid_t *guidp, int isgroup) at = NULL; p = id; while (*p) { - if ((*p < '0') || (*p > '9')) + if ((*p < '0') || (*p > '9')) { num = 0; - if (*p == '@') + } + if (*p == '@') { at = p; + } p++; } if (num) { /* must be numeric ID (or empty) */ num = *id ? strtol(id, NULL, 10) : -2; - if (isgroup) + if (isgroup) { error = kauth_cred_gid2guid((gid_t)num, guidp); - else + } else { error = kauth_cred_uid2guid((uid_t)num, guidp); + } nfs4_mapid_log(error, id, isgroup, guidp); - return (error); + return error; } /* See if this is a well known NFSv4 name */ @@ -1211,14 +1327,15 @@ nfs4_id2guid(/*const*/ char *id, guid_t *guidp, int isgroup) if (!error) { error = kauth_cred_ntsid2guid(&sid, guidp); nfs4_mapid_log(error, id, 1, guidp); - return (error); + return error; } /* Handle nfs4 domain first */ if (at && at[1]) { new_id = nfs4_map_domain(id, &at); - if (new_id) + if (new_id) { id = new_id; + } } /* Now try to do actual id mapping */ @@ -1228,10 +1345,11 @@ nfs4_id2guid(/*const*/ char *id, guid_t *guidp, int isgroup) * * [sigh] this isn't a "pwnam/grnam" it's an NFS ID string! */ - if (isgroup) + if (isgroup) { error = kauth_cred_grnam2guid(id, guidp); - else + } else { error = kauth_cred_pwnam2guid(id, guidp); + } nfs4_mapid_log(error, id, isgroup, guidp); } else { error = ENOTSUP; @@ -1242,23 +1360,26 @@ nfs4_id2guid(/*const*/ char *id, guid_t *guidp, int isgroup) * fallback path... see if we can come up with an answer ourselves. */ num = nfs4_fallback_name(id, at != NULL); - if (isgroup) + if (isgroup) { error = kauth_cred_gid2guid((gid_t)num, guidp); - else + } else { error = kauth_cred_uid2guid((uid_t)num, guidp); - nfs4_mapid_log(error, id, isgroup, guidp); + } + nfs4_mapid_log(error, id, isgroup, guidp); } /* restore @ symbol in case we clobered for unscoped lookup */ - if (at && *at == '\0') + if (at && *at == '\0') { *at = '@'; + } /* free mapped domain id string */ - if (new_id) + if (new_id) { FREE(new_id, M_TEMP); + } - return (error); + return error; } /* @@ -1274,57 +1395,63 @@ nfs4_sid2wkid(ntsid_t *sp) if ((sp->sid_kind == 1) && (sp->sid_authcount == 1)) { /* check if it's one of our well-known ACE WHO names */ if (sp->sid_authority[5] == 0) { - if (sp->sid_authorities[0] == 0) // S-1-0-0 - return ("nobody@localdomain"); + if (sp->sid_authorities[0] == 0) { // S-1-0-0 + return "nobody@localdomain"; + } } else if (sp->sid_authority[5] == 1) { - if (sp->sid_authorities[0] == 0) // S-1-1-0 - return ("EVERYONE@"); + if (sp->sid_authorities[0] == 0) { // S-1-1-0 + return "EVERYONE@"; + } } else if (sp->sid_authority[5] == 3) { - if (sp->sid_authorities[0] == 0) // S-1-3-0 - return ("OWNER@"); - else if (sp->sid_authorities[0] == 1) // S-1-3-1 - return ("GROUP@"); + if (sp->sid_authorities[0] == 0) { // S-1-3-0 + return "OWNER@"; + } else if (sp->sid_authorities[0] == 1) { // S-1-3-1 + return "GROUP@"; + } } else if (sp->sid_authority[5] == 5) { - if (sp->sid_authorities[0] == 1) // S-1-5-1 - return ("DIALUP@"); - else if (sp->sid_authorities[0] == 2) // S-1-5-2 - return ("NETWORK@"); - else if (sp->sid_authorities[0] == 3) // S-1-5-3 - return ("BATCH@"); - else if (sp->sid_authorities[0] == 4) // S-1-5-4 - return ("INTERACTIVE@"); - else if (sp->sid_authorities[0] == 6) // S-1-5-6 - return ("SERVICE@"); - else if (sp->sid_authorities[0] == 7) // S-1-5-7 - return ("ANONYMOUS@"); - else if (sp->sid_authorities[0] == 11) // S-1-5-11 - return ("AUTHENTICATED@"); - } - } - return (NULL); + if (sp->sid_authorities[0] == 1) { // S-1-5-1 + return "DIALUP@"; + } else if (sp->sid_authorities[0] == 2) { // S-1-5-2 + return "NETWORK@"; + } else if (sp->sid_authorities[0] == 3) { // S-1-5-3 + return "BATCH@"; + } else if (sp->sid_authorities[0] == 4) { // S-1-5-4 + return "INTERACTIVE@"; + } else if (sp->sid_authorities[0] == 6) { // S-1-5-6 + return "SERVICE@"; + } else if (sp->sid_authorities[0] == 7) { // S-1-5-7 + return "ANONYMOUS@"; + } else if (sp->sid_authorities[0] == 11) { // S-1-5-11 + return "AUTHENTICATED@"; + } + } + } + return NULL; } static void nfs4_mapguid_log(int error, const char *where, guid_t *gp, int isgroup, const char *idstr) { - if (error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS)) + if (error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS)) { printf("nfs4_guid2id: %s idmap failed for " - "%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x %s " - "error %d\n", where, - gp->g_guid[0], gp->g_guid[1], gp->g_guid[2], gp->g_guid[3], - gp->g_guid[4], gp->g_guid[5], gp->g_guid[6], gp->g_guid[7], - gp->g_guid[8], gp->g_guid[9], gp->g_guid[10], gp->g_guid[11], - gp->g_guid[12], gp->g_guid[13], gp->g_guid[14], gp->g_guid[15], - isgroup ? "G" : " ", error); - if (!error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS)) + "%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x %s " + "error %d\n", where, + gp->g_guid[0], gp->g_guid[1], gp->g_guid[2], gp->g_guid[3], + gp->g_guid[4], gp->g_guid[5], gp->g_guid[6], gp->g_guid[7], + gp->g_guid[8], gp->g_guid[9], gp->g_guid[10], gp->g_guid[11], + gp->g_guid[12], gp->g_guid[13], gp->g_guid[14], gp->g_guid[15], + isgroup ? "G" : " ", error); + } + if (!error && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS)) { printf("nfs4_guid2id: %s idmap for " - "%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x %s " - "got ID %s\n", where, - gp->g_guid[0], gp->g_guid[1], gp->g_guid[2], gp->g_guid[3], - gp->g_guid[4], gp->g_guid[5], gp->g_guid[6], gp->g_guid[7], - gp->g_guid[8], gp->g_guid[9], gp->g_guid[10], gp->g_guid[11], - gp->g_guid[12], gp->g_guid[13], gp->g_guid[14], gp->g_guid[15], - isgroup ? "G" : " ", idstr); + "%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x_%02x%02x%02x%02x %s " + "got ID %s\n", where, + gp->g_guid[0], gp->g_guid[1], gp->g_guid[2], gp->g_guid[3], + gp->g_guid[4], gp->g_guid[5], gp->g_guid[6], gp->g_guid[7], + gp->g_guid[8], gp->g_guid[9], gp->g_guid[10], gp->g_guid[11], + gp->g_guid[12], gp->g_guid[13], gp->g_guid[14], gp->g_guid[15], + isgroup ? "G" : " ", idstr); + } } static int @@ -1336,8 +1463,9 @@ nfs4_addv4domain(char *id, size_t *idlen) size_t idsize; - if (id == NULL || *id == '\0') - return (EINVAL); + if (id == NULL || *id == '\0') { + return EINVAL; + } for (cp = id; *cp != '\0'; cp++) { if (*cp == '@') { @@ -1346,7 +1474,7 @@ nfs4_addv4domain(char *id, size_t *idlen) } } - have_domain = (at && at[1] != '\0'); + have_domain = (at && at[1] != '\0'); if (have_domain) { char *dsnode = at + 1; @@ -1369,10 +1497,11 @@ nfs4_addv4domain(char *id, size_t *idlen) at[1] = '\0'; /* Add our mapped_domain */ idsize = strlcat(id, mapped_domain, *idlen); - if (*idlen > idsize) + if (*idlen > idsize) { *idlen = idsize; - else + } else { error = ENOSPC; + } } FREE_ZONE(nfs4domain, MAXPATHLEN, M_NAMEI); } else if (at == NULL) { @@ -1386,19 +1515,21 @@ nfs4_addv4domain(char *id, size_t *idlen) if (default_domain_len) { strlcat(id, "@", *idlen); idsize = strlcat(id, nfs4_default_domain, *idlen); - if (*idlen > idsize) + if (*idlen > idsize) { *idlen = idsize; - else + } else { error = ENOSPC; + } } else { ; /* Unscoped name otw */ } } - if (!error && nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS) + if (!error && nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_SUCCESSFUL_MAPPINGS) { printf("nfs4_guid2id: id after nfs4 domain map: %s[%zd].\n", id, *idlen); + } - return (error); + return error; } static char * @@ -1408,22 +1539,24 @@ nfs4_fallback_id(int numid, int isgrp, char *buf, size_t size) if (!(nfs_idmap_ctrl & NFS_IDMAP_CTRL_FALLBACK_NO_COMMON_IDS)) { /* map well known uid's to strings */ - if (numid == 0) + if (numid == 0) { idp = isgrp ? "wheel" : "root"; - else if (numid == -2) + } else if (numid == -2) { idp = "nobody"; + } } if (!idp) { /* or just use a decimal number string. */ - snprintf(buf, size-1, "%d", numid); - buf[size-1] = '\0'; + snprintf(buf, size - 1, "%d", numid); + buf[size - 1] = '\0'; } else { size_t idplen = strlcpy(buf, idp, size); - if (idplen >= size) - return (NULL); + if (idplen >= size) { + return NULL; + } } - return (buf); + return buf; } /* @@ -1435,7 +1568,7 @@ int nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) { int error = 0; - size_t id1len, len; + size_t id1len, len; char *id1buf, *id1; char numbuf[32]; ntsid_t sid; @@ -1455,7 +1588,7 @@ nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) error = (len < *idlen) ? 0 : ENOSPC; *idlen = len; nfs4_mapguid_log(error, "kauth_cred_guid2ntsid", guidp, 1, id); - return (error); + return error; } } else { nfs4_mapguid_log(error, "kauth_cred_guid2ntsid", guidp, isgroup, NULL); @@ -1483,12 +1616,14 @@ nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) id1len = *idlen; } - if (isgroup) + if (isgroup) { error = kauth_cred_guid2grnam(guidp, id1); - else + } else { error = kauth_cred_guid2pwnam(guidp, id1); - if (error) + } + if (error) { nfs4_mapguid_log(error, "kauth_cred2[pw|gr]nam", guidp, isgroup, id1); + } } else { error = ENOTSUP; } @@ -1500,38 +1635,41 @@ nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) uid_t uid; /* OK, let's just try mapping it to a UID/GID */ - if (isgroup) + if (isgroup) { error = kauth_cred_guid2gid(guidp, (gid_t*)&uid); - else + } else { error = kauth_cred_guid2uid(guidp, &uid); + } if (!error) { char *fbidp = nfs4_fallback_id(uid, isgroup, numbuf, sizeof(numbuf)); - if (fbidp == NULL) + if (fbidp == NULL) { error = ENOSPC; - else + } else { id1 = fbidp; + } } } else { - error = nfs4_addv4domain(id1, &id1len); + error = nfs4_addv4domain(id1, &id1len); } if (!error) { - if (id1 != id) { /* copy idmap result to output buffer */ len = strlcpy(id, id1, *idlen); - if (len >= *idlen) + if (len >= *idlen) { error = ENOSPC; - else + } else { *idlen = len; + } } } - nfs4_mapguid_log(error, "End of routine", guidp, isgroup, id1); + nfs4_mapguid_log(error, "End of routine", guidp, isgroup, id1); - if (id1buf) + if (id1buf) { FREE_ZONE(id1buf, MAXPATHLEN, M_NAMEI); + } - return (error); + return error; } /* @@ -1540,26 +1678,35 @@ nfs4_guid2id(guid_t *guidp, char *id, size_t *idlen, int isgroup) void nfs_vattr_set_supported(uint32_t *bitmap, struct vnode_attr *vap) { - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TYPE)) + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TYPE)) { VATTR_SET_SUPPORTED(vap, va_type); + } // if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_CHANGE)) - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SIZE)) + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SIZE)) { VATTR_SET_SUPPORTED(vap, va_data_size); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FSID)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FSID)) { VATTR_SET_SUPPORTED(vap, va_fsid); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL)) { VATTR_SET_SUPPORTED(vap, va_acl); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ARCHIVE)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ARCHIVE)) { VATTR_SET_SUPPORTED(vap, va_flags); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FILEID)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FILEID)) { VATTR_SET_SUPPORTED(vap, va_fileid); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_HIDDEN)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_HIDDEN)) { VATTR_SET_SUPPORTED(vap, va_flags); + } // if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MIMETYPE)) - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) { VATTR_SET_SUPPORTED(vap, va_mode); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_NUMLINKS)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_NUMLINKS)) { VATTR_SET_SUPPORTED(vap, va_nlink); + } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_OWNER)) { VATTR_SET_SUPPORTED(vap, va_uid); VATTR_SET_SUPPORTED(vap, va_uuuid); @@ -1568,21 +1715,28 @@ nfs_vattr_set_supported(uint32_t *bitmap, struct vnode_attr *vap) VATTR_SET_SUPPORTED(vap, va_gid); VATTR_SET_SUPPORTED(vap, va_guuid); } - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_RAWDEV)) + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_RAWDEV)) { VATTR_SET_SUPPORTED(vap, va_rdev); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SPACE_USED)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SPACE_USED)) { VATTR_SET_SUPPORTED(vap, va_total_alloc); + } // if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SYSTEM)) - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_ACCESS)) + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_ACCESS)) { VATTR_SET_SUPPORTED(vap, va_access_time); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_BACKUP)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_BACKUP)) { VATTR_SET_SUPPORTED(vap, va_backup_time); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_CREATE)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_CREATE)) { VATTR_SET_SUPPORTED(vap, va_create_time); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_METADATA)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_METADATA)) { VATTR_SET_SUPPORTED(vap, va_change_time); - if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_MODIFY)) + } + if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_MODIFY)) { VATTR_SET_SUPPORTED(vap, va_modify_time); + } } /* @@ -1612,14 +1766,18 @@ nfs4_parsefattr( struct sockaddr_storage ss; /* if not interested in some values... throw 'em into a local dummy variable */ - if (!nfsap) + if (!nfsap) { nfsap = &nfsa_dummy; - if (!nvap) + } + if (!nvap) { nvap = &nva_dummy; - if (!dqbp) + } + if (!dqbp) { dqbp = &dqb_dummy; - if (!nfslsp) + } + if (!nfslsp) { nfslsp = &nfsls_dummy; + } bzero(nfslsp, sizeof(*nfslsp)); attrbytes = val = val2 = val3 = 0; @@ -1630,7 +1788,7 @@ nfs4_parsefattr( len = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, nmc, bitmap, len); /* add bits to object/fs attr bitmaps */ - for (i=0; i < NFS_ATTR_BITMAP_LEN; i++) { + for (i = 0; i < NFS_ATTR_BITMAP_LEN; i++) { nvap->nva_bitmap[i] |= bitmap[i] & nfs_object_attr_bitmap[i]; nfsap->nfsa_bitmap[i] |= bitmap[i] & nfs_fs_attr_bitmap[i]; } @@ -1646,10 +1804,11 @@ nfs4_parsefattr( if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TYPE)) { nfsm_chain_get_32(error, nmc, val); nvap->nva_type = nfstov_type(val, NFS_VER4); - if ((val == NFATTRDIR) || (val == NFNAMEDATTR)) + if ((val == NFATTRDIR) || (val == NFNAMEDATTR)) { nvap->nva_flags |= NFS_FFLAG_IS_ATTR; - else + } else { nvap->nva_flags &= ~NFS_FFLAG_IS_ATTR; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FH_EXPIRE_TYPE)) { @@ -1657,8 +1816,9 @@ nfs4_parsefattr( nfsmout_if(error); nfsap->nfsa_flags &= ~NFS_FSFLAG_FHTYPE_MASK; nfsap->nfsa_flags |= val << NFS_FSFLAG_FHTYPE_SHIFT; - if (val & ~0xff) + if (val & ~0xff) { printf("nfs: warning unknown fh type: 0x%x\n", val); + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_CHANGE)) { @@ -1671,26 +1831,29 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_LINK_SUPPORT)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_LINK; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_LINK; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SYMLINK_SUPPORT)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_SYMLINK; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_SYMLINK; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_NAMED_ATTR)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nvap->nva_flags |= NFS_FFLAG_HAS_NAMED_ATTRS; - else + } else { nvap->nva_flags &= ~NFS_FFLAG_HAS_NAMED_ATTRS; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FSID)) { @@ -1700,10 +1863,11 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_UNIQUE_HANDLES)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_UNIQUE_FH; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_UNIQUE_FH; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_LEASE_TIME)) { @@ -1722,23 +1886,26 @@ nfs4_parsefattr( error2 = 0; ace_type = ace_flags = ace_mask = 0; nfsm_chain_get_32(error, nmc, val); /* ACE count */ - if (!error && (val > KAUTH_ACL_MAX_ENTRIES)) + if (!error && (val > KAUTH_ACL_MAX_ENTRIES)) { error = EOVERFLOW; - if (!error && !((acl = kauth_acl_alloc(val)))) + } + if (!error && !((acl = kauth_acl_alloc(val)))) { error = ENOMEM; + } if (!error && acl) { acl->acl_entrycount = val; acl->acl_flags = 0; } attrbytes -= NFSX_UNSIGNED; nfsm_assert(error, (attrbytes >= 0), EBADRPC); - for (i=0; !error && (i < val); i++) { + for (i = 0; !error && (i < val); i++) { nfsm_chain_get_32(error, nmc, ace_type); nfsm_chain_get_32(error, nmc, ace_flags); nfsm_chain_get_32(error, nmc, ace_mask); nfsm_chain_get_32(error, nmc, len); - if (!error && len >= NFS_MAX_WHO) + if (!error && len >= NFS_MAX_WHO) { error = EBADRPC; + } acl->acl_ace[i].ace_flags = nfs4_ace_nfstype_to_vfstype(ace_type, &error); acl->acl_ace[i].ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags); acl->acl_ace[i].ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask); @@ -1749,24 +1916,27 @@ nfs4_parsefattr( slen = sizeof(sbuf); } /* Let's add a bit more if we can to the allocation as to try and avoid future allocations */ - MALLOC(s, char*, (len + 16 < NFS_MAX_WHO) ? len+16 : NFS_MAX_WHO, M_TEMP, M_WAITOK); - if (s) - slen = (len + 16 < NFS_MAX_WHO) ? len+16 : NFS_MAX_WHO; - else + MALLOC(s, char*, (len + 16 < NFS_MAX_WHO) ? len + 16 : NFS_MAX_WHO, M_TEMP, M_WAITOK); + if (s) { + slen = (len + 16 < NFS_MAX_WHO) ? len + 16 : NFS_MAX_WHO; + } else { error = ENOMEM; + } } - if (error2) + if (error2) { nfsm_chain_adv(error, nmc, nfsm_rndup(len)); - else + } else { nfsm_chain_get_opaque(error, nmc, len, s); + } if (!error && !error2) { s[len] = '\0'; error2 = nfs4_id2guid(s, &acl->acl_ace[i].ace_applicable, - (ace_flags & NFS_ACE_IDENTIFIER_GROUP)); - if (error2 && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS)) + (ace_flags & NFS_ACE_IDENTIFIER_GROUP)); + if (error2 && (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS)) { printf("nfs4_parsefattr: ACE WHO %s is no one, no guid?, error %d\n", s, error2); + } } - attrbytes -= 4*NFSX_UNSIGNED + nfsm_rndup(len); + attrbytes -= 4 * NFSX_UNSIGNED + nfsm_rndup(len); nfsm_assert(error, (attrbytes >= 0), EBADRPC); } nfsmout_if(error); @@ -1781,7 +1951,7 @@ nfs4_parsefattr( * (just to be safe) FATTR_ACL is in the supported list too. */ nfsm_chain_get_32(error, nmc, val); - if ((val & (NFS_ACL_SUPPORT_ALLOW_ACL|NFS_ACL_SUPPORT_DENY_ACL)) && + if ((val & (NFS_ACL_SUPPORT_ALLOW_ACL | NFS_ACL_SUPPORT_DENY_ACL)) && NFS_BITMAP_ISSET(nfsap->nfsa_supp_attr, NFS_FATTR_ACL)) { nfsap->nfsa_flags |= NFS_FSFLAG_ACL; } else { @@ -1791,48 +1961,54 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ARCHIVE)) { /* SF_ARCHIVED */ nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nvap->nva_flags |= NFS_FFLAG_ARCHIVED; - else + } else { nvap->nva_flags &= ~NFS_FFLAG_ARCHIVED; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_CANSETTIME)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_SET_TIME; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_SET_TIME; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_CASE_INSENSITIVE)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_CASE_INSENSITIVE; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_CASE_INSENSITIVE; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_CASE_PRESERVING)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_CASE_PRESERVING; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_CASE_PRESERVING; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_CHOWN_RESTRICTED)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_CHOWN_RESTRICTED; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_CHOWN_RESTRICTED; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FILEHANDLE)) { nfsm_chain_get_32(error, nmc, val); - if (error == 0 && val > NFS_MAX_FH_SIZE) + if (error == 0 && val > NFS_MAX_FH_SIZE) { error = EBADRPC; + } nfsmout_if(error); if (fhp) { fhp->fh_len = val; @@ -1869,13 +2045,15 @@ nfs4_parsefattr( nfsm_chain_get_32(error, nmc, fsp->np_compcount); /* component count */ attrbytes -= NFSX_UNSIGNED; /* sanity check component count */ - if (!error && (fsp->np_compcount > MAXPATHLEN)) + if (!error && (fsp->np_compcount > MAXPATHLEN)) { error = EBADRPC; + } nfsmout_if(error); if (fsp->np_compcount) { - MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK|M_ZERO); - if (!fsp->np_components) + MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO); + if (!fsp->np_components) { error = ENOMEM; + } } for (comp = 0; comp < fsp->np_compcount; comp++) { nfsm_chain_get_32(error, nmc, val); /* component length */ @@ -1895,12 +2073,14 @@ nfs4_parsefattr( attrbytes -= NFSX_UNSIGNED; continue; } - if (!error && ((val < 1) || (val > MAXPATHLEN))) + if (!error && ((val < 1) || (val > MAXPATHLEN))) { error = EBADRPC; + } nfsmout_if(error); - MALLOC(fsp->np_components[comp], char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fsp->np_components[comp]) + MALLOC(fsp->np_components[comp], char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fsp->np_components[comp]) { error = ENOMEM; + } nfsmout_if(error); nfsm_chain_get_opaque(error, nmc, val, fsp->np_components[comp]); /* component */ attrbytes -= NFSX_UNSIGNED + nfsm_rndup(val); @@ -1908,59 +2088,69 @@ nfs4_parsefattr( nfsm_chain_get_32(error, nmc, nfslsp->nl_numlocs); /* fs location count */ attrbytes -= NFSX_UNSIGNED; /* sanity check location count */ - if (!error && (nfslsp->nl_numlocs > 256)) + if (!error && (nfslsp->nl_numlocs > 256)) { error = EBADRPC; + } nfsmout_if(error); if (nfslsp->nl_numlocs > 0) { - MALLOC(nfslsp->nl_locations, struct nfs_fs_location **, nfslsp->nl_numlocs * sizeof(struct nfs_fs_location*), M_TEMP, M_WAITOK|M_ZERO); - if (!nfslsp->nl_locations) + MALLOC(nfslsp->nl_locations, struct nfs_fs_location **, nfslsp->nl_numlocs * sizeof(struct nfs_fs_location*), M_TEMP, M_WAITOK | M_ZERO); + if (!nfslsp->nl_locations) { error = ENOMEM; + } } nfsmout_if(error); for (loc = 0; loc < nfslsp->nl_numlocs; loc++) { nfsmout_if(error); - MALLOC(fsl, struct nfs_fs_location *, sizeof(struct nfs_fs_location), M_TEMP, M_WAITOK|M_ZERO); - if (!fsl) + MALLOC(fsl, struct nfs_fs_location *, sizeof(struct nfs_fs_location), M_TEMP, M_WAITOK | M_ZERO); + if (!fsl) { error = ENOMEM; + } nfslsp->nl_locations[loc] = fsl; nfsm_chain_get_32(error, nmc, fsl->nl_servcount); /* server count */ attrbytes -= NFSX_UNSIGNED; /* sanity check server count */ - if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256))) + if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256))) { error = EBADRPC; + } nfsmout_if(error); - MALLOC(fsl->nl_servers, struct nfs_fs_server **, fsl->nl_servcount * sizeof(struct nfs_fs_server*), M_TEMP, M_WAITOK|M_ZERO); - if (!fsl->nl_servers) + MALLOC(fsl->nl_servers, struct nfs_fs_server **, fsl->nl_servcount * sizeof(struct nfs_fs_server*), M_TEMP, M_WAITOK | M_ZERO); + if (!fsl->nl_servers) { error = ENOMEM; + } for (serv = 0; serv < fsl->nl_servcount; serv++) { nfsmout_if(error); - MALLOC(fss, struct nfs_fs_server *, sizeof(struct nfs_fs_server), M_TEMP, M_WAITOK|M_ZERO); - if (!fss) + MALLOC(fss, struct nfs_fs_server *, sizeof(struct nfs_fs_server), M_TEMP, M_WAITOK | M_ZERO); + if (!fss) { error = ENOMEM; + } fsl->nl_servers[serv] = fss; nfsm_chain_get_32(error, nmc, val); /* server name length */ /* sanity check server name length */ - if (!error && ((val < 1) || (val > MAXPATHLEN))) + if (!error && ((val < 1) || (val > MAXPATHLEN))) { error = EINVAL; + } nfsmout_if(error); - MALLOC(fss->ns_name, char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fss->ns_name) + MALLOC(fss->ns_name, char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fss->ns_name) { error = ENOMEM; + } nfsm_chain_get_opaque(error, nmc, val, fss->ns_name); /* server name */ attrbytes -= NFSX_UNSIGNED + nfsm_rndup(val); nfsmout_if(error); /* copy name to address if it converts to a sockaddr */ if (nfs_uaddr2sockaddr(fss->ns_name, (struct sockaddr*)&ss)) { fss->ns_addrcount = 1; - MALLOC(fss->ns_addresses, char **, sizeof(char *), M_TEMP, M_WAITOK|M_ZERO); - if (!fss->ns_addresses) + MALLOC(fss->ns_addresses, char **, sizeof(char *), M_TEMP, M_WAITOK | M_ZERO); + if (!fss->ns_addresses) { error = ENOMEM; + } nfsmout_if(error); - MALLOC(fss->ns_addresses[0], char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fss->ns_addresses[0]) + MALLOC(fss->ns_addresses[0], char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fss->ns_addresses[0]) { error = ENOMEM; + } nfsmout_if(error); - strlcpy(fss->ns_addresses[0], fss->ns_name, val+1); + strlcpy(fss->ns_addresses[0], fss->ns_name, val + 1); } } /* get pathname */ @@ -1968,13 +2158,15 @@ nfs4_parsefattr( nfsm_chain_get_32(error, nmc, fsp->np_compcount); /* component count */ attrbytes -= NFSX_UNSIGNED; /* sanity check component count */ - if (!error && (fsp->np_compcount > MAXPATHLEN)) + if (!error && (fsp->np_compcount > MAXPATHLEN)) { error = EINVAL; + } nfsmout_if(error); if (fsp->np_compcount) { - MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK|M_ZERO); - if (!fsp->np_components) + MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO); + if (!fsp->np_components) { error = ENOMEM; + } } for (comp = 0; comp < fsp->np_compcount; comp++) { nfsm_chain_get_32(error, nmc, val); /* component length */ @@ -1994,12 +2186,14 @@ nfs4_parsefattr( attrbytes -= NFSX_UNSIGNED; continue; } - if (!error && ((val < 1) || (val > MAXPATHLEN))) + if (!error && ((val < 1) || (val > MAXPATHLEN))) { error = EINVAL; + } nfsmout_if(error); - MALLOC(fsp->np_components[comp], char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fsp->np_components[comp]) + MALLOC(fsp->np_components[comp], char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fsp->np_components[comp]) { error = ENOMEM; + } nfsm_chain_get_opaque(error, nmc, val, fsp->np_components[comp]); /* component */ attrbytes -= NFSX_UNSIGNED + nfsm_rndup(val); } @@ -2008,19 +2202,21 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_HIDDEN)) { /* UF_HIDDEN */ nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nvap->nva_flags |= NFS_FFLAG_HIDDEN; - else + } else { nvap->nva_flags &= ~NFS_FFLAG_HIDDEN; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_HOMOGENEOUS)) { /* XXX If NOT homogeneous, we may need to clear flags on the mount */ nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_HOMOGENEOUS; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_HOMOGENEOUS; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MAXFILESIZE)) { @@ -2029,14 +2225,16 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MAXLINK)) { nfsm_chain_get_32(error, nmc, nvap->nva_maxlink); - if (!error && (nfsap->nfsa_maxlink > INT32_MAX)) + if (!error && (nfsap->nfsa_maxlink > INT32_MAX)) { nfsap->nfsa_maxlink = INT32_MAX; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MAXNAME)) { nfsm_chain_get_32(error, nmc, nfsap->nfsa_maxname); - if (!error && (nfsap->nfsa_maxname > INT32_MAX)) + if (!error && (nfsap->nfsa_maxname > INT32_MAX)) { nfsap->nfsa_maxname = INT32_MAX; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MAXREAD)) { @@ -2058,10 +2256,11 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_NO_TRUNC)) { nfsm_chain_get_32(error, nmc, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_NO_TRUNC; - else + } else { nfsap->nfsa_flags &= ~NFS_FSFLAG_NO_TRUNC; + } attrbytes -= NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_NUMLINKS)) { @@ -2071,8 +2270,9 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_OWNER)) { nfsm_chain_get_32(error, nmc, len); - if (!error && len >= NFS_MAX_WHO) + if (!error && len >= NFS_MAX_WHO) { error = EBADRPC; + } if (!error && (len >= slen)) { if (s != sbuf) { FREE(s, M_TEMP); @@ -2080,25 +2280,28 @@ nfs4_parsefattr( slen = sizeof(sbuf); } /* Let's add a bit more if we can to the allocation as to try and avoid future allocations */ - MALLOC(s, char*, (len + 16 < NFS_MAX_WHO) ? len+16 : NFS_MAX_WHO, M_TEMP, M_WAITOK); - if (s) - slen = (len + 16 < NFS_MAX_WHO) ? len+16 : NFS_MAX_WHO; - else + MALLOC(s, char*, (len + 16 < NFS_MAX_WHO) ? len + 16 : NFS_MAX_WHO, M_TEMP, M_WAITOK); + if (s) { + slen = (len + 16 < NFS_MAX_WHO) ? len + 16 : NFS_MAX_WHO; + } else { error = ENOMEM; + } } nfsm_chain_get_opaque(error, nmc, len, s); if (!error) { s[len] = '\0'; error = nfs4_id2guid(s, &nvap->nva_uuuid, 0); - if (!error) + if (!error) { error = kauth_cred_guid2uid(&nvap->nva_uuuid, &nvap->nva_uid); + } if (error) { /* unable to get either GUID or UID, set to default */ nvap->nva_uid = (uid_t)(-2); - if (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS) + if (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS) { printf("nfs4_parsefattr: owner %s is no one, no %s?, error %d\n", s, - kauth_guid_equal(&nvap->nva_uuuid, &kauth_null_guid) ? "guid" : "uid", - error); + kauth_guid_equal(&nvap->nva_uuuid, &kauth_null_guid) ? "guid" : "uid", + error); + } error = 0; } } @@ -2106,8 +2309,9 @@ nfs4_parsefattr( } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_OWNER_GROUP)) { nfsm_chain_get_32(error, nmc, len); - if (!error && len >= NFS_MAX_WHO) + if (!error && len >= NFS_MAX_WHO) { error = EBADRPC; + } if (!error && (len >= slen)) { if (s != sbuf) { FREE(s, M_TEMP); @@ -2115,25 +2319,28 @@ nfs4_parsefattr( slen = sizeof(sbuf); } /* Let's add a bit more if we can to the allocation as to try and avoid future allocations */ - MALLOC(s, char*, (len + 16 < NFS_MAX_WHO) ? len+16 : NFS_MAX_WHO, M_TEMP, M_WAITOK); - if (s) - slen = (len + 16 < NFS_MAX_WHO) ? len+16 : NFS_MAX_WHO; - else + MALLOC(s, char*, (len + 16 < NFS_MAX_WHO) ? len + 16 : NFS_MAX_WHO, M_TEMP, M_WAITOK); + if (s) { + slen = (len + 16 < NFS_MAX_WHO) ? len + 16 : NFS_MAX_WHO; + } else { error = ENOMEM; + } } nfsm_chain_get_opaque(error, nmc, len, s); if (!error) { s[len] = '\0'; error = nfs4_id2guid(s, &nvap->nva_guuid, 1); - if (!error) + if (!error) { error = kauth_cred_guid2gid(&nvap->nva_guuid, &nvap->nva_gid); + } if (error) { /* unable to get either GUID or GID, set to default */ nvap->nva_gid = (gid_t)(-2); - if (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS) + if (nfs_idmap_ctrl & NFS_IDMAP_CTRL_LOG_FAILED_MAPPINGS) { printf("nfs4_parsefattr: group %s is no one, no %s?, error %d\n", s, - kauth_guid_equal(&nvap->nva_guuid, &kauth_null_guid) ? "guid" : "gid", - error); + kauth_guid_equal(&nvap->nva_guuid, &kauth_null_guid) ? "guid" : "gid", + error); + } error = 0; } } @@ -2183,7 +2390,7 @@ nfs4_parsefattr( attrbytes -= 3 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_ACCESS_SET)) { - nfsm_chain_adv(error, nmc, 4*NFSX_UNSIGNED); /* just skip it */ + nfsm_chain_adv(error, nmc, 4 * NFSX_UNSIGNED); /* just skip it */ attrbytes -= 4 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_BACKUP)) { @@ -2197,7 +2404,7 @@ nfs4_parsefattr( attrbytes -= 3 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_DELTA)) { /* skip for now */ - nfsm_chain_adv(error, nmc, 3*NFSX_UNSIGNED); + nfsm_chain_adv(error, nmc, 3 * NFSX_UNSIGNED); attrbytes -= 3 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_METADATA)) { @@ -2211,7 +2418,7 @@ nfs4_parsefattr( attrbytes -= 3 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_MODIFY_SET)) { - nfsm_chain_adv(error, nmc, 4*NFSX_UNSIGNED); /* just skip it */ + nfsm_chain_adv(error, nmc, 4 * NFSX_UNSIGNED); /* just skip it */ attrbytes -= 4 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MOUNTED_ON_FILEID)) { @@ -2219,7 +2426,7 @@ nfs4_parsefattr( /* we prefer the mounted on file ID, so just replace the fileid */ nfsm_chain_get_64(error, nmc, nvap->nva_fileid); #else - nfsm_chain_adv(error, nmc, 2*NFSX_UNSIGNED); + nfsm_chain_adv(error, nmc, 2 * NFSX_UNSIGNED); #endif attrbytes -= 2 * NFSX_UNSIGNED; } @@ -2227,20 +2434,24 @@ nfs4_parsefattr( nfsm_assert(error, (attrbytes >= 0), EBADRPC); nfsm_chain_adv(error, nmc, nfsm_rndup(attrbytes)); nfsmout: - if (error) + if (error) { nfs_fs_locations_cleanup(nfslsp); - if (!error && rderror) + } + if (!error && rderror) { error = rderror; + } /* free up temporary resources */ - if (s && (s != sbuf)) + if (s && (s != sbuf)) { FREE(s, M_TEMP); - if (acl) + } + if (acl) { kauth_acl_free(acl); + } if (error && nvap->nva_acl) { kauth_acl_free(nvap->nva_acl); nvap->nva_acl = NULL; } - return (error); + return error; } /* @@ -2275,17 +2486,18 @@ nfsm_chain_add_fattr4_f(struct nfsm_chain *nmc, struct vnode_attr *vap, struct n if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_SIZE)) { nfsm_chain_add_64(error, nmc, vap->va_data_size); - attrbytes += 2*NFSX_UNSIGNED; + attrbytes += 2 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL)) { acl = vap->va_acl; - if (!acl || (acl->acl_entrycount == KAUTH_FILESEC_NOACL)) + if (!acl || (acl->acl_entrycount == KAUTH_FILESEC_NOACL)) { acecount = 0; - else + } else { acecount = acl->acl_entrycount; + } nfsm_chain_add_32(error, nmc, acecount); attrbytes += NFSX_UNSIGNED; - for (i=0; !error && (i < (int)acecount); i++) { + for (i = 0; !error && (i < (int)acecount); i++) { val = (acl->acl_ace[i].ace_flags & KAUTH_ACE_KINDMASK); val = nfs4_ace_vfstype_to_nfstype(val, &error); nfsm_chain_add_32(error, nmc, val); @@ -2312,7 +2524,7 @@ nfsm_chain_add_fattr4_f(struct nfsm_chain *nmc, struct vnode_attr *vap, struct n } } nfsm_chain_add_name(error, nmc, s, len, nmp); - attrbytes += 4*NFSX_UNSIGNED + nfsm_rndup(len); + attrbytes += 4 * NFSX_UNSIGNED + nfsm_rndup(len); } } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ARCHIVE)) { @@ -2389,18 +2601,18 @@ nfsm_chain_add_fattr4_f(struct nfsm_chain *nmc, struct vnode_attr *vap, struct n nfsm_chain_add_32(error, nmc, NFS4_TIME_SET_TO_CLIENT); nfsm_chain_add_64(error, nmc, vap->va_access_time.tv_sec); nfsm_chain_add_32(error, nmc, vap->va_access_time.tv_nsec); - attrbytes += 4*NFSX_UNSIGNED; + attrbytes += 4 * NFSX_UNSIGNED; } } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_BACKUP)) { nfsm_chain_add_64(error, nmc, vap->va_backup_time.tv_sec); nfsm_chain_add_32(error, nmc, vap->va_backup_time.tv_nsec); - attrbytes += 3*NFSX_UNSIGNED; + attrbytes += 3 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_CREATE)) { nfsm_chain_add_64(error, nmc, vap->va_create_time.tv_sec); nfsm_chain_add_32(error, nmc, vap->va_create_time.tv_nsec); - attrbytes += 3*NFSX_UNSIGNED; + attrbytes += 3 * NFSX_UNSIGNED; } if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_TIME_MODIFY_SET)) { if (vap->va_vaflags & VA_UTIMES_NULL) { @@ -2410,16 +2622,17 @@ nfsm_chain_add_fattr4_f(struct nfsm_chain *nmc, struct vnode_attr *vap, struct n nfsm_chain_add_32(error, nmc, NFS4_TIME_SET_TO_CLIENT); nfsm_chain_add_64(error, nmc, vap->va_modify_time.tv_sec); nfsm_chain_add_32(error, nmc, vap->va_modify_time.tv_nsec); - attrbytes += 4*NFSX_UNSIGNED; + attrbytes += 4 * NFSX_UNSIGNED; } } nfsmout_if(error); /* Now, set the attribute data length */ *pattrbytes = txdr_unsigned(attrbytes); nfsmout: - if (s && (s != sbuf)) + if (s && (s != sbuf)) { FREE(s, M_TEMP); - return (error); + } + return error; } /* @@ -2434,10 +2647,12 @@ nfs_need_recover(struct nfsmount *nmp, int error) nmp->nm_state |= NFSSTA_RECOVER; if ((error == NFSERR_ADMIN_REVOKED) || (error == NFSERR_EXPIRED) || - (error == NFSERR_STALE_CLIENTID)) + (error == NFSERR_STALE_CLIENTID)) { nmp->nm_state |= NFSSTA_RECOVER_EXPIRED; - if (wake) + } + if (wake) { nfs_mount_sock_thread_wake(nmp); + } } /* @@ -2455,24 +2670,28 @@ nfs4_expired_check_delegation(nfsnode_t np, vfs_context_t ctx) struct nfs_open_file *nofp; int drop = 1; - if ((np->n_flag & NREVOKE) || !(np->n_openflags & N_DELEG_MASK)) + if ((np->n_flag & NREVOKE) || !(np->n_openflags & N_DELEG_MASK)) { return; + } lck_mtx_lock(&np->n_openlock); TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { - if (!nofp->nof_opencnt) + if (!nofp->nof_opencnt) { continue; - if (nofp->nof_flags & NFS_OPEN_FILE_LOST) + } + if (nofp->nof_flags & NFS_OPEN_FILE_LOST) { continue; - if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) + } + if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { continue; + } /* we have an open that is not lost and not marked for reopen */ // XXX print out what's keeping this node from dropping the delegation. NP(nofp->nof_np, "nfs4_expired_check_delegation: !drop: opencnt %d flags 0x%x access %d %d mmap %d %d", - nofp->nof_opencnt, nofp->nof_flags, - nofp->nof_access, nofp->nof_deny, - nofp->nof_mmap_access, nofp->nof_mmap_deny); + nofp->nof_opencnt, nofp->nof_flags, + nofp->nof_access, nofp->nof_deny, + nofp->nof_mmap_access, nofp->nof_mmap_deny); drop = 0; break; } @@ -2497,7 +2716,7 @@ nfs4_expired_check_delegation(nfsnode_t np, vfs_context_t ctx) } lck_mtx_unlock(&nmp->nm_lock); nfs4_delegreturn_rpc(nmp, np->n_fhp, np->n_fhsize, &np->n_dstateid, - 0, vfs_context_thread(ctx), vfs_context_ucred(ctx)); + 0, vfs_context_thread(ctx), vfs_context_ucred(ctx)); } } @@ -2530,24 +2749,31 @@ restart: * we know there are no state operations in progress. */ do { - if ((error = nfs_sigintr(nmp, NULL, NULL, 1))) + if ((error = nfs_sigintr(nmp, NULL, NULL, 1))) { break; - if (!(nmp->nm_sockflags & NMSOCK_READY)) + } + if (!(nmp->nm_sockflags & NMSOCK_READY)) { error = EPIPE; - if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) + } + if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) { error = ENXIO; - if (nmp->nm_sockflags & NMSOCK_UNMOUNT) + } + if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { error = ENXIO; - if (error) + } + if (error) { break; - if (nmp->nm_stateinuse) - msleep(&nmp->nm_stateinuse, &nmp->nm_lock, (PZERO-1), "nfsrecoverstartwait", &ts); + } + if (nmp->nm_stateinuse) { + msleep(&nmp->nm_stateinuse, &nmp->nm_lock, (PZERO - 1), "nfsrecoverstartwait", &ts); + } } while (nmp->nm_stateinuse); if (error) { - if (error == EPIPE) + if (error == EPIPE) { printf("nfs recovery reconnecting for %s, 0x%x\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); - else + } else { printf("nfs recovery aborted for %s, 0x%x\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); + } lck_mtx_unlock(&nmp->nm_lock); return; } @@ -2556,12 +2782,13 @@ restart: if (now.tv_sec == nmp->nm_recover_start) { printf("nfs recovery throttled for %s, 0x%x\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); lck_mtx_unlock(&nmp->nm_lock); - tsleep(nfs_recover, (PZERO-1), "nfsrecoverrestart", hz); + tsleep(nfs_recover, (PZERO - 1), "nfsrecoverrestart", hz); goto restart; } nmp->nm_recover_start = now.tv_sec; - if (++nmp->nm_stategenid == 0) + if (++nmp->nm_stategenid == 0) { ++nmp->nm_stategenid; + } printf("nfs recovery started for %s, 0x%x\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); lck_mtx_unlock(&nmp->nm_lock); @@ -2569,24 +2796,32 @@ restart: TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) { /* for each of its opens... */ TAILQ_FOREACH(nofp, &noop->noo_opens, nof_oolink) { - if (!nofp->nof_access || (nofp->nof_flags & NFS_OPEN_FILE_LOST) || (nofp->nof_np->n_flag & NREVOKE)) + if (!nofp->nof_access || (nofp->nof_flags & NFS_OPEN_FILE_LOST) || (nofp->nof_np->n_flag & NREVOKE)) { continue; + } lost = reopen = 0; /* for NFSv2/v3, just skip straight to lock reclaim */ - if (nmp->nm_vers < NFS_VER4) + if (nmp->nm_vers < NFS_VER4) { goto reclaim_locks; - if (nofp->nof_rw_drw) + } + if (nofp->nof_rw_drw) { error = nfs4_open_reclaim_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_BOTH); - if (!error && nofp->nof_w_drw) + } + if (!error && nofp->nof_w_drw) { error = nfs4_open_reclaim_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_BOTH); - if (!error && nofp->nof_r_drw) + } + if (!error && nofp->nof_r_drw) { error = nfs4_open_reclaim_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_BOTH); - if (!error && nofp->nof_rw_dw) + } + if (!error && nofp->nof_rw_dw) { error = nfs4_open_reclaim_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_WRITE); - if (!error && nofp->nof_w_dw) + } + if (!error && nofp->nof_w_dw) { error = nfs4_open_reclaim_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_WRITE); - if (!error && nofp->nof_r_dw) + } + if (!error && nofp->nof_r_dw) { error = nfs4_open_reclaim_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_WRITE); + } /* * deny-none opens with no locks can just be reopened (later) if reclaim fails. */ @@ -2618,21 +2853,23 @@ restart: * delegation unless asked to). */ if ((nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw || - nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw || - nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) && - (!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw && - !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw && - !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r)) { + nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw || + nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) && + (!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw && + !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw && + !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r)) { if (!error && !nfs_open_state_set_busy(nofp->nof_np, NULL)) { error = nfs4_claim_delegated_state_for_node(nofp->nof_np, R_RECOVER); - if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) + if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { reopen = EAGAIN; + } nfs_open_state_clear_busy(nofp->nof_np); /* if claim didn't go well, we may need to return delegation now */ if (nofp->nof_np->n_openflags & N_DELEG_RETURN) { nfs4_delegation_return(nofp->nof_np, R_RECOVER, thd, noop->noo_cred); - if (!(nmp->nm_sockflags & NMSOCK_READY)) + if (!(nmp->nm_sockflags & NMSOCK_READY)) { error = ETIMEDOUT; /* looks like we need a reconnect */ + } } } } @@ -2644,25 +2881,27 @@ restart: if (error || reopen) { /* restart recovery? */ if ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error)) { - if (error == ETIMEDOUT) + if (error == ETIMEDOUT) { nfs_need_reconnect(nmp); - tsleep(nfs_recover, (PZERO-1), "nfsrecoverrestart", hz); + } + tsleep(nfs_recover, (PZERO - 1), "nfsrecoverrestart", hz); printf("nfs recovery restarting for %s, 0x%x, error %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); goto restart; } if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) { /* just reopen the file on next access */ NP(nofp->nof_np, "nfs_recover: %d, need reopen for %d %p 0x%x", reopen, - kauth_cred_getuid(noop->noo_cred), nofp->nof_np, nofp->nof_np->n_flag); + kauth_cred_getuid(noop->noo_cred), nofp->nof_np, nofp->nof_np->n_flag); lck_mtx_lock(&nofp->nof_lock); nofp->nof_flags |= NFS_OPEN_FILE_REOPEN; lck_mtx_unlock(&nofp->nof_lock); } else { /* open file state lost */ - if (reopen) + if (reopen) { NP(nofp->nof_np, "nfs_recover: %d, can't reopen because of locks %d %p", reopen, - kauth_cred_getuid(noop->noo_cred), nofp->nof_np); + kauth_cred_getuid(noop->noo_cred), nofp->nof_np); + } lost = 1; error = 0; reopen = 0; @@ -2680,34 +2919,41 @@ restart: */ reclaim_locks: TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) { - if (lost || reopen) + if (lost || reopen) { break; - if (nlop->nlo_open_owner != noop) + } + if (nlop->nlo_open_owner != noop) { continue; + } TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) { /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */ - if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) { continue; + } /* skip delegated locks */ - if (nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED) + if (nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED) { continue; + } error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 1, R_RECOVER, thd, noop->noo_cred); - if (error) + if (error) { NP(nofp->nof_np, "nfs: lock reclaim (0x%llx, 0x%llx) %s %d", - nflp->nfl_start, nflp->nfl_end, - error ? "failed" : "succeeded", error); - if (!error) + nflp->nfl_start, nflp->nfl_end, + error ? "failed" : "succeeded", error); + } + if (!error) { continue; + } /* restart recovery? */ if ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error)) { - if (error == ETIMEDOUT) + if (error == ETIMEDOUT) { nfs_need_reconnect(nmp); - tsleep(nfs_recover, (PZERO-1), "nfsrecoverrestart", hz); + } + tsleep(nfs_recover, (PZERO - 1), "nfsrecoverrestart", hz); printf("nfs recovery restarting for %s, 0x%x, error %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); goto restart; } - /* lock state lost - attempt to close file */ + /* lock state lost - attempt to close file */ lost = 1; error = 0; break; @@ -2727,9 +2973,9 @@ reclaim_locks: nfs4_delegation_return(nofp->nof_np, R_RECOVER, thd, noop->noo_cred); if (!(nmp->nm_sockflags & NMSOCK_READY)) { /* looks like we need a reconnect */ - tsleep(nfs_recover, (PZERO-1), "nfsrecoverrestart", hz); + tsleep(nfs_recover, (PZERO - 1), "nfsrecoverrestart", hz); printf("nfs recovery restarting for %s, 0x%x, error %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); goto restart; } } @@ -2737,7 +2983,7 @@ reclaim_locks: if (lost) { /* revoke open file state */ NP(nofp->nof_np, "nfs_recover: state lost for %d %p 0x%x", - kauth_cred_getuid(noop->noo_cred), nofp->nof_np, nofp->nof_np->n_flag); + kauth_cred_getuid(noop->noo_cred), nofp->nof_np, nofp->nof_np->n_flag); nfs_revoke_open_state_for_node(nofp->nof_np); } } @@ -2752,17 +2998,18 @@ recheckdeleg: lck_mtx_unlock(&nmp->nm_lock); nfs4_expired_check_delegation(np, vfs_context_kernel()); lck_mtx_lock(&nmp->nm_lock); - if (nextnp == NFSNOLIST) + if (nextnp == NFSNOLIST) { goto recheckdeleg; + } } } - nmp->nm_state &= ~(NFSSTA_RECOVER|NFSSTA_RECOVER_EXPIRED); + nmp->nm_state &= ~(NFSSTA_RECOVER | NFSSTA_RECOVER_EXPIRED); wakeup(&nmp->nm_state); printf("nfs recovery completed for %s, 0x%x\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); lck_mtx_unlock(&nmp->nm_lock); } else { printf("nfs recovery failed for %s, 0x%x, error %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid, error); } } diff --git a/bsd/nfs/nfs4_vnops.c b/bsd/nfs/nfs4_vnops.c index ceac2dd0a..223ae28f7 100644 --- a/bsd/nfs/nfs4_vnops.c +++ b/bsd/nfs/nfs4_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -92,8 +92,9 @@ nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx uid_t uid; struct nfsreq_secinfo_args si; - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (0); + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return 0; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -116,11 +117,12 @@ nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - &si, rpcflags, &nmrep, &xid, &status); + vfs_context_thread(ctx), vfs_context_ucred(ctx), + &si, rpcflags, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -143,16 +145,19 @@ nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx /* ".zfs" subdirectories may erroneously give a denied answer for modify/delete */ if (nfs_access_dotzfs) { vnode_t dvp = NULLVP; - if (np->n_flag & NISDOTZFSCHILD) /* may be able to create/delete snapshot dirs */ - access_result |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE); - else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) + if (np->n_flag & NISDOTZFSCHILD) { /* may be able to create/delete snapshot dirs */ + access_result |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE); + } else if (((dvp = vnode_getparent(NFSTOV(np))) != NULLVP) && (VTONFS(dvp)->n_flag & NISDOTZFSCHILD)) { access_result |= NFS_ACCESS_DELETE; /* may be able to delete snapshot dirs */ - if (dvp != NULLVP) + } + if (dvp != NULLVP) { vnode_put(dvp); + } } /* Some servers report DELETE support but erroneously give a denied answer. */ - if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) + if (nfs_access_delete && (*access & NFS_ACCESS_DELETE) && !(access_result & NFS_ACCESS_DELETE)) { access_result |= NFS_ACCESS_DELETE; + } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); nfsmout_if(error); @@ -176,11 +181,12 @@ nfs4_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx /* pass back the access returned with this request */ *access = np->n_access[slot]; nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int @@ -200,21 +206,24 @@ nfs4_getattr_rpc( struct nfsm_chain nmreq, nmrep; struct nfsreq_secinfo_args si; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL); if (np && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL)) { nfs4_default_attrs_for_referral_trigger(VTONFS(np->n_parent), NULL, 0, nvap, NULL); - return (0); + return 0; } - if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */ + if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */ rpcflags = R_RECOVER; + } - if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */ + if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */ rpcflags |= R_SOFT; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -230,15 +239,16 @@ nfs4_getattr_rpc( numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR); NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap); - if ((flags & NGA_ACL) && acls) + if ((flags & NGA_ACL) && acls) { NFS_BITMAP_SET(bitmap, NFS_FATTR_ACL); + } nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, np); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); - error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - NULL, rpcflags, &nmrep, xidp, &status); + error = nfs_request2(np, mp, &nmreq, NFSPROC4_COMPOUND, + vfs_context_thread(ctx), vfs_context_ucred(ctx), + NULL, rpcflags, &nmrep, xidp, &status); nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); @@ -255,7 +265,7 @@ nfs4_getattr_rpc( nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int @@ -269,10 +279,12 @@ nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -294,8 +306,9 @@ nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) nfsmout_if(error); error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -305,20 +318,23 @@ nfs4_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) nfsm_chain_get_32(error, &nmrep, len); nfsmout_if(error); if (len >= *buflenp) { - if (np->n_size && (np->n_size < *buflenp)) + if (np->n_size && (np->n_size < *buflenp)) { len = np->n_size; - else + } else { len = *buflenp - 1; + } } nfsm_chain_get_opaque(error, &nmrep, len, buf); - if (!error) + if (!error) { *buflenp = len; + } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int @@ -338,11 +354,13 @@ nfs4_read_rpc_async( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -369,7 +387,7 @@ nfs4_read_rpc_async( error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp); nfsmout: nfsm_chain_cleanup(&nmreq); - return (error); + return error; } int @@ -389,18 +407,20 @@ nfs4_read_rpc_async_finish( nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { nfs_request_async_cancel(req); - return (ENXIO); + return ENXIO; } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmrep); error = nfs_request_async_finish(req, &nmrep, &xid, &status); - if (error == EINPROGRESS) /* async request restarted */ - return (error); + if (error == EINPROGRESS) { /* async request restarted */ + return error; + } - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -413,17 +433,20 @@ nfs4_read_rpc_async_finish( } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } if (eofp) { - if (!eof && !retlen) + if (!eof && !retlen) { eof = 1; + } *eofp = eof; } nfsm_chain_cleanup(&nmrep); - if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) + if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) { microuptime(&np->n_lastio); - return (error); + } + return error; } int @@ -445,16 +468,19 @@ nfs4_write_rpc_async( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } /* for async mounts, don't bother sending sync write requests */ if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async && - ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) + ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) { iomode = NFS_WRITE_UNSTABLE; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -473,8 +499,9 @@ nfs4_write_rpc_async( nfsm_chain_add_64(error, &nmreq, uio_offset(uio)); nfsm_chain_add_32(error, &nmreq, iomode); nfsm_chain_add_32(error, &nmreq, len); - if (!error) + if (!error) { error = nfsm_chain_add_uio(&nmreq, uio, len); + } numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR); nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, np); @@ -485,7 +512,7 @@ nfs4_write_rpc_async( error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, cb, reqp); nfsmout: nfsm_chain_cleanup(&nmreq); - return (error); + return error; } int @@ -507,20 +534,23 @@ nfs4_write_rpc_async_finish( nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { nfs_request_async_cancel(req); - return (ENXIO); + return ENXIO; } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmrep); error = nfs_request_async_finish(req, &nmrep, &xid, &status); - if (error == EINPROGRESS) /* async request restarted */ - return (error); + if (error == EINPROGRESS) { /* async request restarted */ + return error; + } nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { error = ENXIO; - if (!error && (lockerror = nfs_node_lock(np))) + } + if (!error && (lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -528,13 +558,15 @@ nfs4_write_rpc_async_finish( nfsm_chain_get_32(error, &nmrep, rlen); nfsmout_if(error); *rlenp = rlen; - if (rlen <= 0) + if (rlen <= 0) { error = NFSERR_IO; + } nfsm_chain_get_32(error, &nmrep, committed); nfsm_chain_get_64(error, &nmrep, wverf); nfsmout_if(error); - if (wverfp) + if (wverfp) { *wverfp = wverf; + } lck_mtx_lock(&nmp->nm_lock); if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) { nmp->nm_verf = wverf; @@ -546,16 +578,19 @@ nfs4_write_rpc_async_finish( nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmrep); if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async && - ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) + ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) { committed = NFS_WRITE_FILESYNC; + } *iomodep = committed; - if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) + if (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) { microuptime(&np->n_lastio); - return (error); + } + return error; } int @@ -574,11 +609,13 @@ nfs4_remove_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0); restart: nfsm_chain_null(&nmreq); @@ -603,8 +640,9 @@ restart: error = nfs_request2(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, 0, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -613,8 +651,9 @@ restart: nfsm_chain_check_change_info(error, &nmrep, dnp); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid); - if (error && !lockerror) + if (error && !lockerror) { NATTRINVALIDATE(dnp); + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -624,11 +663,11 @@ nfsmout: nfs_node_unlock(dnp); } if (error == NFSERR_GRACE) { - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); goto restart; } - return (remove_error); + return remove_error; } int @@ -648,13 +687,16 @@ nfs4_rename_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(fdnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); - if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (fdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } + if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } NFSREQ_SECINFO_SET(&si, fdnp, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -690,8 +732,9 @@ nfs4_rename_rpc( error = nfs_request(fdnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock2(fdnp, tdnp))) + if ((lockerror = nfs_node_lock2(fdnp, tdnp))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -704,14 +747,16 @@ nfs4_rename_rpc( nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); savedxid = xid; nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid); - if (error && !lockerror) + if (error && !lockerror) { NATTRINVALIDATE(tdnp); + } nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); xid = savedxid; nfsm_chain_loadattr(error, &nmrep, fdnp, nfsvers, &xid); - if (error && !lockerror) + if (error && !lockerror) { NATTRINVALIDATE(fdnp); + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -720,7 +765,7 @@ nfsmout: tdnp->n_flag |= NMODIFIED; nfs_node_unlock2(fdnp, tdnp); } - return (error); + return error; } /* @@ -747,16 +792,18 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) struct nfsreq_secinfo_args si; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nmreaddirsize = nmp->nm_readdirsize; nmrsize = nmp->nm_rsize; bigcookies = nmp->nm_state & NFSSTA_BIGCOOKIES; namedattr = (dnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) ? 1 : 0; rdirplus = (NMFLAG(nmp, RDIRPLUS) || namedattr) ? 1 : 0; - if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0); /* @@ -778,15 +825,17 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) NFS_BITMAP_SET(entry_attrs, NFS_FATTR_RDATTR_ERROR); /* lock to protect access to cookie verifier */ - if ((lockerror = nfs_node_lock(dnp))) - return (lockerror); + if ((lockerror = nfs_node_lock(dnp))) { + return lockerror; + } /* determine cookie to use, and move dp to the right offset */ ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp); if (ndbhp->ndbh_count) { - for (i=0; i < ndbhp->ndbh_count-1; i++) + for (i = 0; i < ndbhp->ndbh_count - 1; i++) { dp = NFS_DIRENTRY_NEXT(dp); + } cookie = dp->d_seekoff; dp = NFS_DIRENTRY_NEXT(dp); } else { @@ -809,10 +858,11 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) /* "." */ namlen = 1; reclen = NFS_DIRENTRY_LEN(namlen + xlen); - if (xlen) - bzero(&dp->d_name[namlen+1], xlen); + if (xlen) { + bzero(&dp->d_name[namlen + 1], xlen); + } dp->d_namlen = namlen; - strlcpy(dp->d_name, ".", namlen+1); + strlcpy(dp->d_name, ".", namlen + 1); dp->d_fileno = dnp->n_vattr.nva_fileid; dp->d_type = DT_DIR; dp->d_reclen = reclen; @@ -820,32 +870,38 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) padstart = dp->d_name + dp->d_namlen + 1 + xlen; dp = NFS_DIRENTRY_NEXT(dp); padlen = (char*)dp - padstart; - if (padlen > 0) + if (padlen > 0) { bzero(padstart, padlen); - if (rdirplus) /* zero out attributes */ + } + if (rdirplus) { /* zero out attributes */ bzero(NFS_DIR_BUF_NVATTR(bp, 0), sizeof(struct nfs_vattr)); + } /* ".." */ namlen = 2; reclen = NFS_DIRENTRY_LEN(namlen + xlen); - if (xlen) - bzero(&dp->d_name[namlen+1], xlen); + if (xlen) { + bzero(&dp->d_name[namlen + 1], xlen); + } dp->d_namlen = namlen; - strlcpy(dp->d_name, "..", namlen+1); - if (dnp->n_parent) + strlcpy(dp->d_name, "..", namlen + 1); + if (dnp->n_parent) { dp->d_fileno = VTONFS(dnp->n_parent)->n_vattr.nva_fileid; - else + } else { dp->d_fileno = dnp->n_vattr.nva_fileid; + } dp->d_type = DT_DIR; dp->d_reclen = reclen; dp->d_seekoff = 2; padstart = dp->d_name + dp->d_namlen + 1 + xlen; dp = NFS_DIRENTRY_NEXT(dp); padlen = (char*)dp - padstart; - if (padlen > 0) + if (padlen > 0) { bzero(padstart, padlen); - if (rdirplus) /* zero out attributes */ + } + if (rdirplus) { /* zero out attributes */ bzero(NFS_DIR_BUF_NVATTR(bp, 1), sizeof(struct nfs_vattr)); + } ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data; ndbhp->ndbh_count = 2; @@ -859,7 +915,6 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) { - // PUTFH, GETATTR, READDIR numops = 3; nfsm_chain_build_alloc_init(error, &nmreq, 26 * NFSX_UNSIGNED); @@ -883,8 +938,9 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) nfsmout_if(error); error = nfs_request(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } savedxid = xid; nfsm_chain_skip_tag(error, &nmrep); @@ -902,8 +958,9 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) } nfsmout_if(error); - if (rdirplus) + if (rdirplus) { microuptime(&now); + } /* loop through the entries packing them into the buffer */ while (more_entries) { @@ -923,7 +980,7 @@ nfs4_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) error = EBADRPC; goto nfsmout; } - if (namlen > (sizeof(dp->d_name)-1)) { + if (namlen > (sizeof(dp->d_name) - 1)) { skiplen = namlen - sizeof(dp->d_name) + 1; namlen = sizeof(dp->d_name) - 1; } else { @@ -971,9 +1028,10 @@ nextbuffer: nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name); nfsmout_if(error); dp->d_name[namlen] = '\0'; - if (skiplen) + if (skiplen) { nfsm_chain_adv(error, &nmrep, - nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen)); + nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen)); + } nfsmout_if(error); nvattrp = rdirplus ? NFS_DIR_BUF_NVATTR(bp, ndbhp->ndbh_count) : &nvattr; error = nfs4_parsefattr(&nmrep, NULL, nvattrp, &fh, NULL, NULL); @@ -1005,10 +1063,12 @@ nextbuffer: continue; } - if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) + if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_TYPE)) { dp->d_type = IFTODT(VTTOIF(nvattrp->nva_type)); - if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) + } + if (NFS_BITMAP_ISSET(nvattrp->nva_bitmap, NFS_FATTR_FILEID)) { dp->d_fileno = nvattrp->nva_fileid; + } if (rdirplus) { /* fileid is already in d_fileno, so stash xid in attrs */ nvattrp->nva_fileid = savedxid; @@ -1023,17 +1083,17 @@ nextbuffer: goto nextbuffer; } /* pack the file handle into the record */ - dp->d_name[dp->d_namlen+1] = fh.fh_len; - bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len); + dp->d_name[dp->d_namlen + 1] = fh.fh_len; + bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len); } else { /* mark the file handle invalid */ fh.fh_len = 0; fhlen = fh.fh_len + 1; xlen = fhlen + sizeof(time_t); reclen = NFS_DIRENTRY_LEN(namlen + xlen); - bzero(&dp->d_name[dp->d_namlen+1], fhlen); + bzero(&dp->d_name[dp->d_namlen + 1], fhlen); } - *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec; + *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec; dp->d_reclen = reclen; } padstart = dp->d_name + dp->d_namlen + 1 + xlen; @@ -1045,14 +1105,15 @@ nextbuffer: ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data; /* zero out the pad bytes */ padlen = (char*)dp - padstart; - if (padlen > 0) + if (padlen > 0) { bzero(padstart, padlen); + } } /* Finally, get the eof boolean */ nfsm_chain_get_32(error, &nmrep, eof); nfsmout_if(error); if (eof) { - ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF); + ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF); nfs_node_lock_force(dnp); dnp->n_eofcookie = lastcookie; nfs_node_unlock(dnp); @@ -1064,20 +1125,23 @@ nextbuffer: bp = NULL; break; } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } nfsmout_if(error); nfsm_chain_cleanup(&nmrep); nfsm_chain_null(&nmreq); } nfsmout: - if (bp_dropped && bp) + if (bp_dropped && bp) { nfs_buf_release(bp, 0); - if (!lockerror) + } + if (!lockerror) { nfs_node_unlock(dnp); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (bp_dropped ? NFSERR_DIRBUFDROPPED : error); + return bp_dropped ? NFSERR_DIRBUFDROPPED : error; } int @@ -1095,11 +1159,13 @@ nfs4_lookup_rpc_async( struct nfsreq_secinfo_args si; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) { isdotdot = 1; @@ -1133,21 +1199,24 @@ nfs4_lookup_rpc_async( nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR); NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap); /* some ".zfs" directories can't handle being asked for some attributes */ - if ((dnp->n_flag & NISDOTZFS) && !isdotdot) + if ((dnp->n_flag & NISDOTZFS) && !isdotdot) { NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR); - if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) + } + if ((dnp->n_flag & NISDOTZFSCHILD) && isdotdot) { NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR); - if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) + } + if (((namelen == 4) && (name[0] == '.') && (name[1] == 'z') && (name[2] == 'f') && (name[3] == 's'))) { NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR); + } nfsm_chain_add_bitmap_supported(error, &nmreq, bitmap, nmp, NULL); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp); + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, reqp); nfsmout: nfsm_chain_cleanup(&nmreq); - return (error); + return error; } @@ -1169,32 +1238,37 @@ nfs4_lookup_rpc_async_finish( struct nfsm_chain nmrep; nmp = NFSTONMP(dnp); - if (nmp == NULL) - return (ENXIO); + if (nmp == NULL) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) + if ((name[0] == '.') && (name[1] == '.') && (namelen == 2)) { isdotdot = 1; + } nfsm_chain_null(&nmrep); error = nfs_request_async_finish(req, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); - if (xidp) + if (xidp) { *xidp = xid; + } nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid); nfsm_chain_op_check(error, &nmrep, (isdotdot ? NFS_OP_LOOKUPP : NFS_OP_LOOKUP)); nfsmout_if(error || !fhp || !nvap); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH); nfsm_chain_get_32(error, &nmrep, fhp->fh_len); - if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) + if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data)) { error = EBADRPC; + } nfsmout_if(error); nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); @@ -1207,8 +1281,9 @@ nfs4_lookup_rpc_async_finish( error = nfs4_parsefattr(&nmrep, NULL, nvap, NULL, NULL, NULL); } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(dnp); + } nfsm_chain_cleanup(&nmrep); if (!error && (op == NFS_OP_LOOKUP) && (nmp->nm_state & NFSSTA_NEEDSECINFO)) { /* We still need to get SECINFO to set default for mount. */ @@ -1218,18 +1293,20 @@ nfsmout: sec.count = NX_MAX_SEC_FLAVORS; error = nfs4_secinfo_rpc(nmp, &req->r_secinfo, vfs_context_ucred(ctx), sec.flavors, &sec.count); /* [sigh] some implementations return "illegal" error for unsupported ops */ - if (error == NFSERR_OP_ILLEGAL) + if (error == NFSERR_OP_ILLEGAL) { error = 0; + } if (!error) { /* set our default security flavor to the first in the list */ lck_mtx_lock(&nmp->nm_lock); - if (sec.count) + if (sec.count) { nmp->nm_auth = sec.flavors[0]; + } nmp->nm_state &= ~NFSSTA_NEEDSECINFO; lck_mtx_unlock(&nmp->nm_lock); } } - return (error); + return error; } int @@ -1249,18 +1326,22 @@ nfs4_commit_rpc( nmp = NFSTONMP(np); FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); - if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) - return (0); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } + if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) { + return 0; + } nfsvers = nmp->nm_vers; - if (count > UINT32_MAX) + if (count > UINT32_MAX) { count32 = 0; - else + } else { count32 = count; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -1284,10 +1365,11 @@ nfs4_commit_rpc( nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, - current_thread(), cred, &si, 0, &nmrep, &xid, &status); + current_thread(), cred, &si, 0, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -1295,19 +1377,22 @@ nfs4_commit_rpc( nfsm_chain_get_64(error, &nmrep, newwverf); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsmout_if(error); lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_verf != newwverf) + if (nmp->nm_verf != newwverf) { nmp->nm_verf = newwverf; - if (wverf != newwverf) + } + if (wverf != newwverf) { error = NFSERR_STALEWRITEVERF; + } lck_mtx_unlock(&nmp->nm_lock); nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int @@ -1324,11 +1409,13 @@ nfs4_pathconf_rpc( struct nfs_vattr nvattr; struct nfsreq_secinfo_args si; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); NVATTR_INIT(&nvattr); @@ -1365,27 +1452,30 @@ nfs4_pathconf_rpc( nfsmout_if(error); error = nfs4_parsefattr(&nmrep, nfsap, &nvattr, NULL, NULL, NULL); nfsmout_if(error); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; - if (!error) + } + if (!error) { nfs_loadattrcache(np, &nvattr, &xid, 0); - if (!lockerror) + } + if (!lockerror) { nfs_node_unlock(np); + } nfsmout: NVATTR_CLEANUP(&nvattr); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int nfs4_vnop_getattr( struct vnop_getattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { struct vnode_attr *vap = ap->a_vap; struct nfsmount *nmp; @@ -1393,57 +1483,68 @@ nfs4_vnop_getattr( int error, acls, ngaflags; nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } acls = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL); ngaflags = NGA_CACHED; - if (VATTR_IS_ACTIVE(vap, va_acl) && acls) + if (VATTR_IS_ACTIVE(vap, va_acl) && acls) { ngaflags |= NGA_ACL; + } error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, ngaflags); - if (error) - return (error); - - vap->va_flags |= VA_64BITOBJIDS; + if (error) { + return error; + } /* copy what we have in nva to *a_vap */ if (VATTR_IS_ACTIVE(vap, va_rdev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_RAWDEV)) { dev_t rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2); VATTR_RETURN(vap, va_rdev, rdev); } - if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) + if (VATTR_IS_ACTIVE(vap, va_nlink) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_NUMLINKS)) { VATTR_RETURN(vap, va_nlink, nva.nva_nlink); - if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) + } + if (VATTR_IS_ACTIVE(vap, va_data_size) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SIZE)) { VATTR_RETURN(vap, va_data_size, nva.nva_size); + } // VATTR_RETURN(vap, va_data_alloc, ???); // VATTR_RETURN(vap, va_total_size, ???); - if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) + if (VATTR_IS_ACTIVE(vap, va_total_alloc) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_SPACE_USED)) { VATTR_RETURN(vap, va_total_alloc, nva.nva_bytes); - if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) + } + if (VATTR_IS_ACTIVE(vap, va_uid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) { VATTR_RETURN(vap, va_uid, nva.nva_uid); - if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) + } + if (VATTR_IS_ACTIVE(vap, va_uuuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER)) { VATTR_RETURN(vap, va_uuuid, nva.nva_uuuid); - if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) + } + if (VATTR_IS_ACTIVE(vap, va_gid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) { VATTR_RETURN(vap, va_gid, nva.nva_gid); - if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) + } + if (VATTR_IS_ACTIVE(vap, va_guuid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_OWNER_GROUP)) { VATTR_RETURN(vap, va_guuid, nva.nva_guuid); + } if (VATTR_IS_ACTIVE(vap, va_mode)) { - if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) + if (NMFLAG(nmp, ACLONLY) || !NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_MODE)) { VATTR_RETURN(vap, va_mode, 0777); - else + } else { VATTR_RETURN(vap, va_mode, nva.nva_mode); + } } if (VATTR_IS_ACTIVE(vap, va_flags) && (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) || - NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) || - (nva.nva_flags & NFS_FFLAG_TRIGGER))) { + NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) || + (nva.nva_flags & NFS_FFLAG_TRIGGER))) { uint32_t flags = 0; if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_ARCHIVE) && - (nva.nva_flags & NFS_FFLAG_ARCHIVED)) + (nva.nva_flags & NFS_FFLAG_ARCHIVED)) { flags |= SF_ARCHIVED; + } if (NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_HIDDEN) && - (nva.nva_flags & NFS_FFLAG_HIDDEN)) + (nva.nva_flags & NFS_FFLAG_HIDDEN)) { flags |= UF_HIDDEN; + } VATTR_RETURN(vap, va_flags, flags); } if (VATTR_IS_ACTIVE(vap, va_create_time) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TIME_CREATE)) { @@ -1471,12 +1572,15 @@ nfs4_vnop_getattr( vap->va_backup_time.tv_nsec = nva.nva_timensec[NFSTIME_BACKUP]; VATTR_SET_SUPPORTED(vap, va_backup_time); } - if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) + if (VATTR_IS_ACTIVE(vap, va_fileid) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_FILEID)) { VATTR_RETURN(vap, va_fileid, nva.nva_fileid); - if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) + } + if (VATTR_IS_ACTIVE(vap, va_type) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_TYPE)) { VATTR_RETURN(vap, va_type, nva.nva_type); - if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) + } + if (VATTR_IS_ACTIVE(vap, va_filerev) && NFS_BITMAP_ISSET(nva.nva_bitmap, NFS_FATTR_CHANGE)) { VATTR_RETURN(vap, va_filerev, nva.nva_change); + } if (VATTR_IS_ACTIVE(vap, va_acl) && acls) { VATTR_RETURN(vap, va_acl, nva.nva_acl); @@ -1487,7 +1591,7 @@ nfs4_vnop_getattr( // VATTR_RETURN(vap, va_encoding, ??? /* potentially unnormalized UTF-8? */); NVATTR_CLEANUP(&nva); - return (error); + return error; } int @@ -1506,35 +1610,43 @@ nfs4_setattr_rpc( nfs_stateid stateid; struct nfsreq_secinfo_args si; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } - if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED|UF_HIDDEN))) { + if (VATTR_IS_ACTIVE(vap, va_flags) && (vap->va_flags & ~(SF_ARCHIVED | UF_HIDDEN))) { /* we don't support setting unsupported flags (duh!) */ - if (vap->va_active & ~VNODE_ATTR_va_flags) - return (EINVAL); /* return EINVAL if other attributes also set */ - else - return (ENOTSUP); /* return ENOTSUP for chflags(2) */ + if (vap->va_active & ~VNODE_ATTR_va_flags) { + return EINVAL; /* return EINVAL if other attributes also set */ + } else { + return ENOTSUP; /* return ENOTSUP for chflags(2) */ + } } /* don't bother requesting some changes if they don't look like they are changing */ - if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) + if (VATTR_IS_ACTIVE(vap, va_uid) && (vap->va_uid == np->n_vattr.nva_uid)) { VATTR_CLEAR_ACTIVE(vap, va_uid); - if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) + } + if (VATTR_IS_ACTIVE(vap, va_gid) && (vap->va_gid == np->n_vattr.nva_gid)) { VATTR_CLEAR_ACTIVE(vap, va_gid); - if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) + } + if (VATTR_IS_ACTIVE(vap, va_uuuid) && kauth_guid_equal(&vap->va_uuuid, &np->n_vattr.nva_uuuid)) { VATTR_CLEAR_ACTIVE(vap, va_uuuid); - if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) + } + if (VATTR_IS_ACTIVE(vap, va_guuid) && kauth_guid_equal(&vap->va_guuid, &np->n_vattr.nva_guuid)) { VATTR_CLEAR_ACTIVE(vap, va_guuid); + } tryagain: /* do nothing if no attributes will be sent */ nfs_vattr_set_bitmap(nmp, bitmap, vap); - if (!bitmap[0] && !bitmap[1]) - return (0); + if (!bitmap[0] && !bitmap[1]) { + return 0; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -1548,8 +1660,9 @@ tryagain: NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, getbitmap); if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_ACL) || NFS_BITMAP_ISSET(bitmap, NFS_FATTR_MODE)) { - if (NACLVALID(np)) + if (NACLVALID(np)) { NFS_BITMAP_SET(getbitmap, NFS_FATTR_ACL); + } NACLINVALIDATE(np); } @@ -1562,10 +1675,11 @@ tryagain: nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize); numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_SETATTR); - if (VATTR_IS_ACTIVE(vap, va_data_size)) + if (VATTR_IS_ACTIVE(vap, va_data_size)) { nfs_get_stateid(np, vfs_context_thread(ctx), vfs_context_ucred(ctx), &stateid); - else + } else { stateid.seqid = stateid.other[0] = stateid.other[1] = stateid.other[2] = 0; + } nfsm_chain_add_stateid(error, &nmreq, &stateid); nfsm_chain_add_fattr4(error, &nmreq, vap, nmp); numops--; @@ -1576,8 +1690,9 @@ tryagain: nfsmout_if(error); error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -1589,15 +1704,17 @@ tryagain: bmlen = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, &nmrep, setbitmap, bmlen); if (!error) { - if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) + if (VATTR_IS_ACTIVE(vap, va_data_size) && (np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { microuptime(&np->n_lastio); + } nfs_vattr_set_supported(setbitmap, vap); error = setattr_error; } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); - if (error) + if (error) { NATTRINVALIDATE(np); + } /* * We just changed the attributes and we want to make sure that we * see the latest attributes. Get the next XID. If it's not the @@ -1614,8 +1731,9 @@ tryagain: NATTRINVALIDATE(np); } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); if ((setattr_error == EINVAL) && VATTR_IS_ACTIVE(vap, va_acl) && VATTR_IS_ACTIVE(vap, va_mode) && !NMFLAG(nmp, ACLONLY)) { @@ -1626,13 +1744,13 @@ nfsmout: * but mode was already successfully set). */ if (((bitmap[0] & setbitmap[0]) != bitmap[0]) || - ((bitmap[1] & (setbitmap[1]|NFS_FATTR_MODE)) != bitmap[1])) { + ((bitmap[1] & (setbitmap[1] | NFS_FATTR_MODE)) != bitmap[1])) { VATTR_CLEAR_ACTIVE(vap, va_mode); error = 0; goto tryagain; } } - return (error); + return error; } /* @@ -1646,15 +1764,16 @@ nfs_mount_state_wait_for_recovery(struct nfsmount *nmp) lck_mtx_lock(&nmp->nm_lock); while (nmp->nm_state & NFSSTA_RECOVER) { - if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) + if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) { break; + } nfs_mount_sock_thread_wake(nmp); - msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts); + msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts); slpflag = 0; } lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } /* @@ -1669,25 +1788,28 @@ nfs_mount_state_in_use_start(struct nfsmount *nmp, thread_t thd) struct timespec ts = { 1, 0 }; int error = 0, slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) { + if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) { lck_mtx_unlock(&nmp->nm_lock); - return (ENXIO); + return ENXIO; } while (nmp->nm_state & NFSSTA_RECOVER) { - if ((error = nfs_sigintr(nmp, NULL, thd, 1))) + if ((error = nfs_sigintr(nmp, NULL, thd, 1))) { break; + } nfs_mount_sock_thread_wake(nmp); - msleep(&nmp->nm_state, &nmp->nm_lock, slpflag|(PZERO-1), "nfsrecoverwait", &ts); + msleep(&nmp->nm_state, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsrecoverwait", &ts); slpflag = 0; } - if (!error) + if (!error) { nmp->nm_stateinuse++; + } lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } /* @@ -1700,25 +1822,29 @@ nfs_mount_state_in_use_end(struct nfsmount *nmp, int error) { int restart = nfs_mount_state_error_should_restart(error); - if (nfs_mount_gone(nmp)) - return (restart); + if (nfs_mount_gone(nmp)) { + return restart; + } lck_mtx_lock(&nmp->nm_lock); if (restart && (error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE)) { printf("nfs_mount_state_in_use_end: error %d, initiating recovery for %s, 0x%x\n", - error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); + error, vfs_statfs(nmp->nm_mountp)->f_mntfromname, nmp->nm_stategenid); nfs_need_recover(nmp, error); } - if (nmp->nm_stateinuse > 0) + if (nmp->nm_stateinuse > 0) { nmp->nm_stateinuse--; - else + } else { panic("NFS mount state in use count underrun"); - if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) + } + if (!nmp->nm_stateinuse && (nmp->nm_state & NFSSTA_RECOVER)) { wakeup(&nmp->nm_stateinuse); + } lck_mtx_unlock(&nmp->nm_lock); - if (error == NFSERR_GRACE) - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); + if (error == NFSERR_GRACE) { + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); + } - return (restart); + return restart; } /* @@ -1735,9 +1861,9 @@ nfs_mount_state_error_should_restart(int error) case NFSERR_OLD_STATEID: case NFSERR_BAD_STATEID: case NFSERR_GRACE: - return (1); + return 1; } - return (0); + return 0; } /* @@ -1748,7 +1874,7 @@ nfs_mount_state_error_should_restart(int error) uint nfs_mount_state_max_restarts(struct nfsmount *nmp) { - return (MAX(nmp->nm_fsattr.nfsa_lease, 60)); + return MAX(nmp->nm_fsattr.nfsa_lease, 60); } /* @@ -1764,9 +1890,9 @@ nfs_mount_state_error_delegation_lost(int error) case NFSERR_OLD_STATEID: case NFSERR_BAD_STATEID: case NFSERR_GRACE: /* ugh! (stupid) RFC 3530 specifically disallows CLAIM_DELEGATE_CUR during grace period? */ - return (1); + return 1; } - return (0); + return 0; } @@ -1781,23 +1907,26 @@ nfs_open_state_set_busy(nfsnode_t np, thread_t thd) int error = 0, slpflag; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0; lck_mtx_lock(&np->n_openlock); while (np->n_openflags & N_OPENBUSY) { - if ((error = nfs_sigintr(nmp, NULL, thd, 0))) + if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { break; + } np->n_openflags |= N_OPENWANT; msleep(&np->n_openflags, &np->n_openlock, slpflag, "nfs_open_state_set_busy", &ts); slpflag = 0; } - if (!error) + if (!error) { np->n_openflags |= N_OPENBUSY; + } lck_mtx_unlock(&np->n_openlock); - return (error); + return error; } /* @@ -1810,13 +1939,15 @@ nfs_open_state_clear_busy(nfsnode_t np) int wanted; lck_mtx_lock(&np->n_openlock); - if (!(np->n_openflags & N_OPENBUSY)) + if (!(np->n_openflags & N_OPENBUSY)) { panic("nfs_open_state_clear_busy"); + } wanted = (np->n_openflags & N_OPENWANT); - np->n_openflags &= ~(N_OPENBUSY|N_OPENWANT); + np->n_openflags &= ~(N_OPENBUSY | N_OPENWANT); lck_mtx_unlock(&np->n_openlock); - if (wanted) + if (wanted) { wakeup(&np->n_openflags); + } } /* @@ -1832,15 +1963,17 @@ nfs_open_owner_find(struct nfsmount *nmp, kauth_cred_t cred, int alloc) tryagain: lck_mtx_lock(&nmp->nm_lock); TAILQ_FOREACH(noop, &nmp->nm_open_owners, noo_link) { - if (kauth_cred_getuid(noop->noo_cred) == uid) + if (kauth_cred_getuid(noop->noo_cred) == uid) { break; + } } if (!noop && !newnoop && alloc) { lck_mtx_unlock(&nmp->nm_lock); MALLOC(newnoop, struct nfs_open_owner *, sizeof(struct nfs_open_owner), M_TEMP, M_WAITOK); - if (!newnoop) - return (NULL); + if (!newnoop) { + return NULL; + } bzero(newnoop, sizeof(*newnoop)); lck_mtx_init(&newnoop->noo_lock, nfs_open_grp, LCK_ATTR_NULL); newnoop->noo_mount = nmp; @@ -1852,18 +1985,21 @@ tryagain: } if (!noop && newnoop) { newnoop->noo_flags |= NFS_OPEN_OWNER_LINK; + os_ref_init(&newnoop->noo_refcnt, NULL); TAILQ_INSERT_HEAD(&nmp->nm_open_owners, newnoop, noo_link); noop = newnoop; } lck_mtx_unlock(&nmp->nm_lock); - if (newnoop && (noop != newnoop)) + if (newnoop && (noop != newnoop)) { nfs_open_owner_destroy(newnoop); + } - if (noop) + if (noop) { nfs_open_owner_ref(noop); + } - return (noop); + return noop; } /* @@ -1872,8 +2008,9 @@ tryagain: void nfs_open_owner_destroy(struct nfs_open_owner *noop) { - if (noop->noo_cred) + if (noop->noo_cred) { kauth_cred_unref(&noop->noo_cred); + } lck_mtx_destroy(&noop->noo_lock, nfs_open_grp); FREE(noop, M_TEMP); } @@ -1885,7 +2022,7 @@ void nfs_open_owner_ref(struct nfs_open_owner *noop) { lck_mtx_lock(&noop->noo_lock); - noop->noo_refcnt++; + os_ref_retain_locked(&noop->noo_refcnt); lck_mtx_unlock(&noop->noo_lock); } @@ -1896,14 +2033,18 @@ nfs_open_owner_ref(struct nfs_open_owner *noop) void nfs_open_owner_rele(struct nfs_open_owner *noop) { + os_ref_count_t newcount; + lck_mtx_lock(&noop->noo_lock); - if (noop->noo_refcnt < 1) + if (os_ref_get_count(&noop->noo_refcnt) < 1) { panic("nfs_open_owner_rele: no refcnt"); - noop->noo_refcnt--; - if (!noop->noo_refcnt && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) + } + newcount = os_ref_release_locked(&noop->noo_refcnt); + if (!newcount && (noop->noo_flags & NFS_OPEN_OWNER_BUSY)) { panic("nfs_open_owner_rele: busy"); + } /* XXX we may potentially want to clean up idle/unused open owner structures */ - if (noop->noo_refcnt || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) { + if (newcount || (noop->noo_flags & NFS_OPEN_OWNER_LINK)) { lck_mtx_unlock(&noop->noo_lock); return; } @@ -1924,23 +2065,26 @@ nfs_open_owner_set_busy(struct nfs_open_owner *noop, thread_t thd) int error = 0, slpflag; nmp = noop->noo_mount; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0; lck_mtx_lock(&noop->noo_lock); while (noop->noo_flags & NFS_OPEN_OWNER_BUSY) { - if ((error = nfs_sigintr(nmp, NULL, thd, 0))) + if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { break; + } noop->noo_flags |= NFS_OPEN_OWNER_WANT; msleep(noop, &noop->noo_lock, slpflag, "nfs_open_owner_set_busy", &ts); slpflag = 0; } - if (!error) + if (!error) { noop->noo_flags |= NFS_OPEN_OWNER_BUSY; + } lck_mtx_unlock(&noop->noo_lock); - return (error); + return error; } /* @@ -1953,13 +2097,15 @@ nfs_open_owner_clear_busy(struct nfs_open_owner *noop) int wanted; lck_mtx_lock(&noop->noo_lock); - if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) + if (!(noop->noo_flags & NFS_OPEN_OWNER_BUSY)) { panic("nfs_open_owner_clear_busy"); + } wanted = (noop->noo_flags & NFS_OPEN_OWNER_WANT); - noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY|NFS_OPEN_OWNER_WANT); + noop->noo_flags &= ~(NFS_OPEN_OWNER_BUSY | NFS_OPEN_OWNER_WANT); lck_mtx_unlock(&noop->noo_lock); - if (wanted) + if (wanted) { wakeup(noop); + } } /* @@ -1981,10 +2127,12 @@ nfs_owner_seqid_increment(struct nfs_open_owner *noop, struct nfs_lock_owner *nl /* do not increment the open seqid on these errors */ return; } - if (noop) + if (noop) { noop->noo_seqid++; - if (nlop) + } + if (nlop) { nlop->nlo_seqid++; + } } /* @@ -2021,20 +2169,22 @@ nfs_open_file_find_internal( { struct nfs_open_file *nofp = NULL, *nofp2, *newnofp = NULL; - if (!np) + if (!np) { goto alloc; + } tryagain: lck_mtx_lock(&np->n_openlock); TAILQ_FOREACH(nofp2, &np->n_opens, nof_link) { if (nofp2->nof_owner == noop) { nofp = nofp2; - if (!accessMode) + if (!accessMode) { break; + } } if ((accessMode & nofp2->nof_deny) || (denyMode & nofp2->nof_access)) { /* This request conflicts with an existing open on this client. */ lck_mtx_unlock(&np->n_openlock); - return (EACCES); + return EACCES; } } @@ -2046,8 +2196,9 @@ tryagain: lck_mtx_unlock(&np->n_openlock); alloc: MALLOC(newnofp, struct nfs_open_file *, sizeof(struct nfs_open_file), M_TEMP, M_WAITOK); - if (!newnofp) - return (ENOMEM); + if (!newnofp) { + return ENOMEM; + } bzero(newnofp, sizeof(*newnofp)); lck_mtx_init(&newnofp->nof_lock, nfs_open_grp, LCK_ATTR_NULL); newnofp->nof_owner = noop; @@ -2056,8 +2207,9 @@ alloc: lck_mtx_lock(&noop->noo_lock); TAILQ_INSERT_HEAD(&noop->noo_opens, newnofp, nof_oolink); lck_mtx_unlock(&noop->noo_lock); - if (np) + if (np) { goto tryagain; + } } if (!nofp) { if (*nofpp) { @@ -2066,17 +2218,20 @@ alloc: } else { nofp = newnofp; } - if (nofp && np) + if (nofp && np) { TAILQ_INSERT_HEAD(&np->n_opens, nofp, nof_link); + } } - if (np) + if (np) { lck_mtx_unlock(&np->n_openlock); + } - if (alloc && newnofp && (nofp != newnofp)) + if (alloc && newnofp && (nofp != newnofp)) { nfs_open_file_destroy(newnofp); + } *nofpp = nofp; - return (nofp ? 0 : ESRCH); + return nofp ? 0 : ESRCH; } /* @@ -2105,23 +2260,26 @@ nfs_open_file_set_busy(struct nfs_open_file *nofp, thread_t thd) int error = 0, slpflag; nmp = nofp->nof_owner->noo_mount; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0; lck_mtx_lock(&nofp->nof_lock); while (nofp->nof_flags & NFS_OPEN_FILE_BUSY) { - if ((error = nfs_sigintr(nmp, NULL, thd, 0))) + if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { break; + } nofp->nof_flags |= NFS_OPEN_FILE_WANT; msleep(nofp, &nofp->nof_lock, slpflag, "nfs_open_file_set_busy", &ts); slpflag = 0; } - if (!error) + if (!error) { nofp->nof_flags |= NFS_OPEN_FILE_BUSY; + } lck_mtx_unlock(&nofp->nof_lock); - return (error); + return error; } /* @@ -2134,13 +2292,15 @@ nfs_open_file_clear_busy(struct nfs_open_file *nofp) int wanted; lck_mtx_lock(&nofp->nof_lock); - if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) + if (!(nofp->nof_flags & NFS_OPEN_FILE_BUSY)) { panic("nfs_open_file_clear_busy"); + } wanted = (nofp->nof_flags & NFS_OPEN_FILE_WANT); - nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY|NFS_OPEN_FILE_WANT); + nofp->nof_flags &= ~(NFS_OPEN_FILE_BUSY | NFS_OPEN_FILE_WANT); lck_mtx_unlock(&nofp->nof_lock); - if (wanted) + if (wanted) { wakeup(nofp); + } } /* @@ -2155,49 +2315,55 @@ nfs_open_file_add_open(struct nfs_open_file *nofp, uint32_t accessMode, uint32_t if (delegated) { if (denyMode == NFS_OPEN_SHARE_DENY_NONE) { - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { nofp->nof_d_r++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { nofp->nof_d_w++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { nofp->nof_d_rw++; + } } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) { - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { nofp->nof_d_r_dw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { nofp->nof_d_w_dw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { nofp->nof_d_rw_dw++; + } } else { /* NFS_OPEN_SHARE_DENY_BOTH */ - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { nofp->nof_d_r_drw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { nofp->nof_d_w_drw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { nofp->nof_d_rw_drw++; + } } } else { if (denyMode == NFS_OPEN_SHARE_DENY_NONE) { - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { nofp->nof_r++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { nofp->nof_w++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { nofp->nof_rw++; + } } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) { - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { nofp->nof_r_dw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { nofp->nof_w_dw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { nofp->nof_rw_dw++; + } } else { /* NFS_OPEN_SHARE_DENY_BOTH */ - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { nofp->nof_r_drw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { nofp->nof_w_drw++; - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { nofp->nof_rw_drw++; + } } } @@ -2228,65 +2394,72 @@ nfs_open_file_remove_open_find( if ((accessMode & NFS_OPEN_SHARE_ACCESS_READ) && (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ) && ((nofp->nof_r + nofp->nof_d_r + - nofp->nof_rw + nofp->nof_d_rw + - nofp->nof_r_dw + nofp->nof_d_r_dw + - nofp->nof_rw_dw + nofp->nof_d_rw_dw + - nofp->nof_r_drw + nofp->nof_d_r_drw + - nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) + nofp->nof_rw + nofp->nof_d_rw + + nofp->nof_r_dw + nofp->nof_d_r_dw + + nofp->nof_rw_dw + nofp->nof_d_rw_dw + + nofp->nof_r_drw + nofp->nof_d_r_drw + + nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) { *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_READ; + } if ((accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && (nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE) && ((nofp->nof_w + nofp->nof_d_w + - nofp->nof_rw + nofp->nof_d_rw + - nofp->nof_w_dw + nofp->nof_d_w_dw + - nofp->nof_rw_dw + nofp->nof_d_rw_dw + - nofp->nof_w_drw + nofp->nof_d_w_drw + - nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) + nofp->nof_rw + nofp->nof_d_rw + + nofp->nof_w_dw + nofp->nof_d_w_dw + + nofp->nof_rw_dw + nofp->nof_d_rw_dw + + nofp->nof_w_drw + nofp->nof_d_w_drw + + nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) { *newAccessMode &= ~NFS_OPEN_SHARE_ACCESS_WRITE; + } if ((denyMode & NFS_OPEN_SHARE_DENY_READ) && (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) && ((nofp->nof_r_drw + nofp->nof_d_r_drw + - nofp->nof_w_drw + nofp->nof_d_w_drw + - nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) + nofp->nof_w_drw + nofp->nof_d_w_drw + + nofp->nof_rw_drw + nofp->nof_d_rw_drw) == 1)) { *newDenyMode &= ~NFS_OPEN_SHARE_DENY_READ; + } if ((denyMode & NFS_OPEN_SHARE_DENY_WRITE) && (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) && ((nofp->nof_r_drw + nofp->nof_d_r_drw + - nofp->nof_w_drw + nofp->nof_d_w_drw + - nofp->nof_rw_drw + nofp->nof_d_rw_drw + - nofp->nof_r_dw + nofp->nof_d_r_dw + - nofp->nof_w_dw + nofp->nof_d_w_dw + - nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) + nofp->nof_w_drw + nofp->nof_d_w_drw + + nofp->nof_rw_drw + nofp->nof_d_rw_drw + + nofp->nof_r_dw + nofp->nof_d_r_dw + + nofp->nof_w_dw + nofp->nof_d_w_dw + + nofp->nof_rw_dw + nofp->nof_d_rw_dw) == 1)) { *newDenyMode &= ~NFS_OPEN_SHARE_DENY_WRITE; + } /* Find the corresponding open access/deny mode counter. */ if (denyMode == NFS_OPEN_SHARE_DENY_NONE) { - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { *delegated = (nofp->nof_d_r != 0); - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { *delegated = (nofp->nof_d_w != 0); - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { *delegated = (nofp->nof_d_rw != 0); - else + } else { *delegated = 0; + } } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) { - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { *delegated = (nofp->nof_d_r_dw != 0); - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { *delegated = (nofp->nof_d_w_dw != 0); - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { *delegated = (nofp->nof_d_rw_dw != 0); - else + } else { *delegated = 0; + } } else { /* NFS_OPEN_SHARE_DENY_BOTH */ - if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { *delegated = (nofp->nof_d_r_drw != 0); - else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { *delegated = (nofp->nof_d_w_drw != 0); - else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) + } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { *delegated = (nofp->nof_d_rw_drw != 0); - else + } else { *delegated = 0; + } } } @@ -2306,115 +2479,133 @@ nfs_open_file_remove_open(struct nfs_open_file *nofp, uint32_t accessMode, uint3 if (denyMode == NFS_OPEN_SHARE_DENY_NONE) { if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { if (delegated) { - if (nofp->nof_d_r == 0) + if (nofp->nof_d_r == 0) { NP(nofp->nof_np, "nfs: open(R) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_r--; + } } else { - if (nofp->nof_r == 0) + if (nofp->nof_r == 0) { NP(nofp->nof_np, "nfs: open(R) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_r--; + } } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { if (delegated) { - if (nofp->nof_d_w == 0) + if (nofp->nof_d_w == 0) { NP(nofp->nof_np, "nfs: open(W) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_w--; + } } else { - if (nofp->nof_w == 0) + if (nofp->nof_w == 0) { NP(nofp->nof_np, "nfs: open(W) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_w--; + } } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { if (delegated) { - if (nofp->nof_d_rw == 0) + if (nofp->nof_d_rw == 0) { NP(nofp->nof_np, "nfs: open(RW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_rw--; + } } else { - if (nofp->nof_rw == 0) + if (nofp->nof_rw == 0) { NP(nofp->nof_np, "nfs: open(RW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_rw--; + } } } } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) { if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { if (delegated) { - if (nofp->nof_d_r_dw == 0) + if (nofp->nof_d_r_dw == 0) { NP(nofp->nof_np, "nfs: open(R,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_r_dw--; + } } else { - if (nofp->nof_r_dw == 0) + if (nofp->nof_r_dw == 0) { NP(nofp->nof_np, "nfs: open(R,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_r_dw--; + } } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { if (delegated) { - if (nofp->nof_d_w_dw == 0) + if (nofp->nof_d_w_dw == 0) { NP(nofp->nof_np, "nfs: open(W,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_w_dw--; + } } else { - if (nofp->nof_w_dw == 0) + if (nofp->nof_w_dw == 0) { NP(nofp->nof_np, "nfs: open(W,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_w_dw--; + } } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { if (delegated) { - if (nofp->nof_d_rw_dw == 0) + if (nofp->nof_d_rw_dw == 0) { NP(nofp->nof_np, "nfs: open(RW,DW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_rw_dw--; + } } else { - if (nofp->nof_rw_dw == 0) + if (nofp->nof_rw_dw == 0) { NP(nofp->nof_np, "nfs: open(RW,DW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_rw_dw--; + } } } } else { /* NFS_OPEN_SHARE_DENY_BOTH */ if (accessMode == NFS_OPEN_SHARE_ACCESS_READ) { if (delegated) { - if (nofp->nof_d_r_drw == 0) + if (nofp->nof_d_r_drw == 0) { NP(nofp->nof_np, "nfs: open(R,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_r_drw--; + } } else { - if (nofp->nof_r_drw == 0) + if (nofp->nof_r_drw == 0) { NP(nofp->nof_np, "nfs: open(R,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_r_drw--; + } } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) { if (delegated) { - if (nofp->nof_d_w_drw == 0) + if (nofp->nof_d_w_drw == 0) { NP(nofp->nof_np, "nfs: open(W,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_w_drw--; + } } else { - if (nofp->nof_w_drw == 0) + if (nofp->nof_w_drw == 0) { NP(nofp->nof_np, "nfs: open(W,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_w_drw--; + } } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { if (delegated) { - if (nofp->nof_d_rw_drw == 0) + if (nofp->nof_d_rw_drw == 0) { NP(nofp->nof_np, "nfs: open(RW,DRW) delegated count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_d_rw_drw--; + } } else { - if (nofp->nof_rw_drw == 0) + if (nofp->nof_rw_drw == 0) { NP(nofp->nof_np, "nfs: open(RW,DRW) count underrun, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); - else + } else { nofp->nof_rw_drw--; + } } } } @@ -2447,20 +2638,23 @@ nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid) if (np->n_openflags & N_DELEG_MASK) { s = &np->n_dstateid; } else { - if (p) + if (p) { nlop = nfs_lock_owner_find(np, p, 0); + } if (nlop && !TAILQ_EMPTY(&nlop->nlo_locks)) { /* we hold locks, use lock stateid */ s = &nlop->nlo_stateid; } else if (((noop = nfs_open_owner_find(nmp, cred, 0))) && - (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) && - !(nofp->nof_flags & NFS_OPEN_FILE_LOST) && - nofp->nof_access) { + (nfs_open_file_find(np, noop, &nofp, 0, 0, 0) == 0) && + !(nofp->nof_flags & NFS_OPEN_FILE_LOST) && + nofp->nof_access) { /* we (should) have the file open, use open stateid */ - if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) + if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { nfs4_reopen(nofp, thd); - if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) + } + if (!(nofp->nof_flags & NFS_OPEN_FILE_LOST)) { s = &nofp->nof_stateid; + } } } @@ -2471,14 +2665,17 @@ nfs_get_stateid(nfsnode_t np, thread_t thd, kauth_cred_t cred, nfs_stateid *sid) sid->other[2] = s->other[2]; } else { /* named attributes may not have a stateid for reads, so don't complain for them */ - if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) + if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { NP(np, "nfs_get_stateid: no stateid"); + } sid->seqid = sid->other[0] = sid->other[1] = sid->other[2] = 0xffffffff; } - if (nlop) + if (nlop) { nfs_lock_owner_rele(nlop); - if (noop) + } + if (noop) { nfs_open_owner_rele(noop); + } } @@ -2510,10 +2707,12 @@ nfs4_open_delegated( tryagain: action = 0; - if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) + if (accessMode & NFS_OPEN_SHARE_ACCESS_READ) { action |= KAUTH_VNODE_READ_DATA; - if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) + } + if (accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) { action |= KAUTH_VNODE_WRITE_DATA; + } /* evaluate ACE (if we have one) */ if (np->n_dace.ace_flags) { @@ -2521,11 +2720,13 @@ tryagain: eval.ae_acl = &np->n_dace; eval.ae_count = 1; eval.ae_options = 0; - if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) + if (np->n_vattr.nva_uid == kauth_cred_getuid(cred)) { eval.ae_options |= KAUTH_AEVAL_IS_OWNER; + } error = kauth_cred_ismember_gid(cred, np->n_vattr.nva_gid, &ismember); - if (!error && ismember) + if (!error && ismember) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP; + } eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; @@ -2534,8 +2735,9 @@ tryagain: error = kauth_acl_evaluate(cred, &eval); - if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) + if (!error && (eval.ae_result == KAUTH_RESULT_ALLOW)) { authorized = 1; + } } if (!authorized) { @@ -2545,8 +2747,9 @@ tryagain: naa.a_vp = NFSTOV(np); naa.a_action = action; naa.a_context = ctx; - if (!(error = nfs_vnop_access(&naa))) + if (!(error = nfs_vnop_access(&naa))) { authorized = 1; + } } if (!authorized) { @@ -2556,12 +2759,12 @@ tryagain: readtoo = 0; goto tryagain; } - return (error ? error : EACCES); + return error ? error : EACCES; } nfs_open_file_add_open(nofp, accessMode, denyMode, 1); - return (0); + return 0; } @@ -2599,14 +2802,15 @@ nfs4_open( * use the delegation if it's being returned. */ if (np->n_openflags & N_DELEG_MASK) { - if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) - return (error); + if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) { + return error; + } if ((np->n_openflags & N_DELEG_MASK) && !(np->n_openflags & N_DELEG_RETURN) && (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) || - (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) { + (!(accessMode & NFS_OPEN_SHARE_ACCESS_WRITE) && !(denyMode & NFS_OPEN_SHARE_DENY_READ)))) { error = nfs4_open_delegated(np, nofp, accessMode, denyMode, ctx); nfs_open_state_clear_busy(np); - return (error); + return error; } nfs_open_state_clear_busy(np); } @@ -2618,25 +2822,28 @@ nfs4_open( * from the n_parent we have stashed away. */ if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) && - (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) + (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) { dvp = NULL; - if (!dvp) + } + if (!dvp) { dvp = vnode_getparent(vp); + } vname = vnode_getname(vp); if (!dvp || !vname) { - if (!error) + if (!error) { error = EIO; + } goto out; } filename = &smallname[0]; namelen = snprintf(filename, sizeof(smallname), "%s", vname); if (namelen >= sizeof(smallname)) { - MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK); + MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK); if (!filename) { error = ENOMEM; goto out; } - snprintf(filename, namelen+1, "%s", vname); + snprintf(filename, namelen + 1, "%s", vname); } bzero(&cn, sizeof(cn)); cn.cn_nameptr = filename; @@ -2664,23 +2871,26 @@ tryagain: } nfs_open_file_add_open(nofp, accessMode, denyMode, 0); out: - if (filename && (filename != &smallname[0])) + if (filename && (filename != &smallname[0])) { FREE(filename, M_TEMP); - if (vname) + } + if (vname) { vnode_putname(vname); - if (dvp != NULLVP) + } + if (dvp != NULLVP) { vnode_put(dvp); - return (error); + } + return error; } int nfs_vnop_mmap( struct vnop_mmap_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_fflags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_fflags; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -2691,13 +2901,16 @@ nfs_vnop_mmap( struct nfs_open_file *nofp = NULL; nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ|PROT_WRITE))) - return (EINVAL); - if (np->n_flag & NREVOKE) - return (EIO); + if (!vnode_isreg(vp) || !(ap->a_fflags & (PROT_READ | PROT_WRITE))) { + return EINVAL; + } + if (np->n_flag & NREVOKE) { + return EIO; + } /* * fflags contains some combination of: PROT_READ, PROT_WRITE @@ -2705,25 +2918,27 @@ nfs_vnop_mmap( * read access is always there (regardless if PROT_READ is not set). */ accessMode = NFS_OPEN_SHARE_ACCESS_READ; - if (ap->a_fflags & PROT_WRITE) + if (ap->a_fflags & PROT_WRITE) { accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE; + } denyMode = NFS_OPEN_SHARE_DENY_NONE; noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); - if (!noop) - return (ENOMEM); + if (!noop) { + return ENOMEM; + } restart: error = nfs_mount_state_in_use_start(nmp, NULL); if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } if (np->n_flag & NREVOKE) { error = EIO; nfs_mount_state_in_use_end(nmp, 0); nfs_open_owner_rele(noop); - return (error); + return error; } error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1); @@ -2735,11 +2950,13 @@ restart: nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); nofp = NULL; - if (!error) + if (!error) { goto restart; + } } - if (!error) + if (!error) { error = nfs_open_file_set_busy(nofp, NULL); + } if (error) { nofp = NULL; goto out; @@ -2763,7 +2980,7 @@ restart: if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) { /* We shouldn't get here. We've already open the file for execve */ NP(np, "nfs_vnop_mmap: File already needs close access: 0x%x, cred: %d thread: %lld", - nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx))); + nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx))); } /* * mmapings for execve are just for read. Get out with EPERM if the accessMode is not ACCESS_READ @@ -2782,50 +2999,56 @@ restart: } else { error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx); } - if (!error) + if (!error) { nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE; - if (error) + } + if (error) { goto out; + } } /* determine deny mode for open */ if (accessMode == NFS_OPEN_SHARE_ACCESS_BOTH) { if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) { delegated = 1; - if (nofp->nof_d_rw) + if (nofp->nof_d_rw) { denyMode = NFS_OPEN_SHARE_DENY_NONE; - else if (nofp->nof_d_rw_dw) + } else if (nofp->nof_d_rw_dw) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else if (nofp->nof_d_rw_drw) + } else if (nofp->nof_d_rw_drw) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; + } } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) { delegated = 0; - if (nofp->nof_rw) + if (nofp->nof_rw) { denyMode = NFS_OPEN_SHARE_DENY_NONE; - else if (nofp->nof_rw_dw) + } else if (nofp->nof_rw_dw) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else if (nofp->nof_rw_drw) + } else if (nofp->nof_rw_drw) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; + } } else { error = EPERM; } } else { /* NFS_OPEN_SHARE_ACCESS_READ */ if (nofp->nof_d_r || nofp->nof_d_r_dw || nofp->nof_d_r_drw) { delegated = 1; - if (nofp->nof_d_r) + if (nofp->nof_d_r) { denyMode = NFS_OPEN_SHARE_DENY_NONE; - else if (nofp->nof_d_r_dw) + } else if (nofp->nof_d_r_dw) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else if (nofp->nof_d_r_drw) + } else if (nofp->nof_d_r_drw) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; + } } else if (nofp->nof_r || nofp->nof_r_dw || nofp->nof_r_drw) { delegated = 0; - if (nofp->nof_r) + if (nofp->nof_r) { denyMode = NFS_OPEN_SHARE_DENY_NONE; - else if (nofp->nof_r_dw) + } else if (nofp->nof_r_dw) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else if (nofp->nof_r_drw) + } else if (nofp->nof_r_drw) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; + } } else if (nofp->nof_d_rw || nofp->nof_d_rw_dw || nofp->nof_d_rw_drw) { /* * This clause and the one below is to co-opt a read write access @@ -2834,27 +3057,30 @@ restart: */ delegated = 1; accessMode = NFS_OPEN_SHARE_ACCESS_BOTH; - if (nofp->nof_d_rw) + if (nofp->nof_d_rw) { denyMode = NFS_OPEN_SHARE_DENY_NONE; - else if (nofp->nof_d_rw_dw) + } else if (nofp->nof_d_rw_dw) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else if (nofp->nof_d_rw_drw) + } else if (nofp->nof_d_rw_drw) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; + } } else if (nofp->nof_rw || nofp->nof_rw_dw || nofp->nof_rw_drw) { delegated = 0; accessMode = NFS_OPEN_SHARE_ACCESS_BOTH; - if (nofp->nof_rw) + if (nofp->nof_rw) { denyMode = NFS_OPEN_SHARE_DENY_NONE; - else if (nofp->nof_rw_dw) + } else if (nofp->nof_rw_dw) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else if (nofp->nof_rw_drw) + } else if (nofp->nof_rw_drw) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; + } } else { error = EPERM; } } - if (error) /* mmap mode without proper open mode */ + if (error) { /* mmap mode without proper open mode */ goto out; + } /* * If the existing mmap access is more than the new access OR the @@ -2862,15 +3088,17 @@ restart: * then we'll stick with the existing mmap open mode. */ if ((nofp->nof_mmap_access > accessMode) || - ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) + ((nofp->nof_mmap_access == accessMode) && (nofp->nof_mmap_deny <= denyMode))) { goto out; + } /* update mmap open mode */ if (nofp->nof_mmap_access) { error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx); if (error) { - if (!nfs_mount_state_error_should_restart(error)) + if (!nfs_mount_state_error_should_restart(error)) { NP(np, "nfs_vnop_mmap: close of previous mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } NP(np, "nfs_vnop_mmap: update, close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); goto out; } @@ -2882,14 +3110,16 @@ restart: nofp->nof_mmap_deny = denyMode; out: - if (nofp) + if (nofp) { nfs_open_file_clear_busy(nofp); + } if (nfs_mount_state_in_use_end(nmp, error)) { nofp = NULL; goto restart; } - if (noop) + if (noop) { nfs_open_owner_rele(noop); + } if (!error) { int ismapped = 0; @@ -2903,24 +3133,25 @@ out: lck_mtx_lock(&nmp->nm_lock); nmp->nm_state &= ~NFSSTA_SQUISHY; nmp->nm_curdeadtimeout = nmp->nm_deadtimeout; - if (nmp->nm_curdeadtimeout <= 0) + if (nmp->nm_curdeadtimeout <= 0) { nmp->nm_deadto_start = 0; + } nmp->nm_mappers++; lck_mtx_unlock(&nmp->nm_lock); } } - return (error); + return error; } int nfs_vnop_mnomap( struct vnop_mnomap_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -2930,10 +3161,11 @@ nfs_vnop_mnomap( off_t size; int error; int is_mapped_flag = 0; - + nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfs_node_lock_force(np); if (np->n_flag & NISMAPPED) { @@ -2943,36 +3175,42 @@ nfs_vnop_mnomap( nfs_node_unlock(np); if (is_mapped_flag) { lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_mappers) + if (nmp->nm_mappers) { nmp->nm_mappers--; - else + } else { NP(np, "nfs_vnop_mnomap: removing mmap reference from mount, but mount has no files mmapped"); + } lck_mtx_unlock(&nmp->nm_lock); } /* flush buffers/ubc before we drop the open (in case it's our last open) */ nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR); - if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) + if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) { ubc_msync(vp, 0, size, NULL, UBC_PUSHALL | UBC_SYNC); + } /* walk all open files and close all mmap opens */ loop: error = nfs_mount_state_in_use_start(nmp, NULL); - if (error) - return (error); + if (error) { + return error; + } lck_mtx_lock(&np->n_openlock); TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { - if (!nofp->nof_mmap_access) + if (!nofp->nof_mmap_access) { continue; + } lck_mtx_unlock(&np->n_openlock); if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); - if (!error) + if (!error) { goto loop; + } } - if (!error) + if (!error) { error = nfs_open_file_set_busy(nofp, NULL); + } if (error) { lck_mtx_lock(&np->n_openlock); break; @@ -2980,12 +3218,14 @@ loop: if (nofp->nof_mmap_access) { error = nfs_close(np, nofp, nofp->nof_mmap_access, nofp->nof_mmap_deny, ctx); if (!nfs_mount_state_error_should_restart(error)) { - if (error) /* not a state-operation-restarting error, so just clear the access */ + if (error) { /* not a state-operation-restarting error, so just clear the access */ NP(np, "nfs_vnop_mnomap: close of mmap mode failed: %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } nofp->nof_mmap_access = nofp->nof_mmap_deny = 0; } - if (error) + if (error) { NP(np, "nfs_vnop_mnomap: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } } nfs_open_file_clear_busy(nofp); nfs_mount_state_in_use_end(nmp, error); @@ -2993,7 +3233,7 @@ loop: } lck_mtx_unlock(&np->n_openlock); nfs_mount_state_in_use_end(nmp, error); - return (error); + return error; } /* @@ -3009,14 +3249,19 @@ nfs_lock_owner_find(nfsnode_t np, proc_t p, int alloc) tryagain: lck_mtx_lock(&np->n_openlock); TAILQ_FOREACH(nlop, &np->n_lock_owners, nlo_link) { - if (nlop->nlo_pid != pid) + os_ref_count_t newcount; + + if (nlop->nlo_pid != pid) { continue; - if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) + } + if (timevalcmp(&nlop->nlo_pid_start, &p->p_start, ==)) { break; + } /* stale lock owner... reuse it if we can */ - if (nlop->nlo_refcnt) { + if (os_ref_get_count(&nlop->nlo_refcnt)) { TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link); nlop->nlo_flags &= ~NFS_LOCK_OWNER_LINK; + newcount = os_ref_release_locked(&nlop->nlo_refcnt); lck_mtx_unlock(&np->n_openlock); goto tryagain; } @@ -3029,8 +3274,9 @@ tryagain: if (!nlop && !newnlop && alloc) { lck_mtx_unlock(&np->n_openlock); MALLOC(newnlop, struct nfs_lock_owner *, sizeof(struct nfs_lock_owner), M_TEMP, M_WAITOK); - if (!newnlop) - return (NULL); + if (!newnlop) { + return NULL; + } bzero(newnlop, sizeof(*newnlop)); lck_mtx_init(&newnlop->nlo_lock, nfs_open_grp, LCK_ATTR_NULL); newnlop->nlo_pid = pid; @@ -3041,18 +3287,21 @@ tryagain: } if (!nlop && newnlop) { newnlop->nlo_flags |= NFS_LOCK_OWNER_LINK; + os_ref_init(&newnlop->nlo_refcnt, NULL); TAILQ_INSERT_HEAD(&np->n_lock_owners, newnlop, nlo_link); nlop = newnlop; } lck_mtx_unlock(&np->n_openlock); - if (newnlop && (nlop != newnlop)) + if (newnlop && (nlop != newnlop)) { nfs_lock_owner_destroy(newnlop); + } - if (nlop) + if (nlop) { nfs_lock_owner_ref(nlop); + } - return (nlop); + return nlop; } /* @@ -3076,7 +3325,7 @@ void nfs_lock_owner_ref(struct nfs_lock_owner *nlop) { lck_mtx_lock(&nlop->nlo_lock); - nlop->nlo_refcnt++; + os_ref_retain_locked(&nlop->nlo_refcnt); lck_mtx_unlock(&nlop->nlo_lock); } @@ -3087,14 +3336,18 @@ nfs_lock_owner_ref(struct nfs_lock_owner *nlop) void nfs_lock_owner_rele(struct nfs_lock_owner *nlop) { + os_ref_count_t newcount; + lck_mtx_lock(&nlop->nlo_lock); - if (nlop->nlo_refcnt < 1) + if (os_ref_get_count(&nlop->nlo_refcnt) < 1) { panic("nfs_lock_owner_rele: no refcnt"); - nlop->nlo_refcnt--; - if (!nlop->nlo_refcnt && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) + } + newcount = os_ref_release_locked(&nlop->nlo_refcnt); + if (!newcount && (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) { panic("nfs_lock_owner_rele: busy"); + } /* XXX we may potentially want to clean up idle/unused lock owner structures */ - if (nlop->nlo_refcnt || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) { + if (newcount || (nlop->nlo_flags & NFS_LOCK_OWNER_LINK)) { lck_mtx_unlock(&nlop->nlo_lock); return; } @@ -3115,23 +3368,26 @@ nfs_lock_owner_set_busy(struct nfs_lock_owner *nlop, thread_t thd) int error = 0, slpflag; nmp = nlop->nlo_open_owner->noo_mount; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } slpflag = (NMFLAG(nmp, INTR) && thd) ? PCATCH : 0; lck_mtx_lock(&nlop->nlo_lock); while (nlop->nlo_flags & NFS_LOCK_OWNER_BUSY) { - if ((error = nfs_sigintr(nmp, NULL, thd, 0))) + if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { break; + } nlop->nlo_flags |= NFS_LOCK_OWNER_WANT; msleep(nlop, &nlop->nlo_lock, slpflag, "nfs_lock_owner_set_busy", &ts); slpflag = 0; } - if (!error) + if (!error) { nlop->nlo_flags |= NFS_LOCK_OWNER_BUSY; + } lck_mtx_unlock(&nlop->nlo_lock); - return (error); + return error; } /* @@ -3144,13 +3400,15 @@ nfs_lock_owner_clear_busy(struct nfs_lock_owner *nlop) int wanted; lck_mtx_lock(&nlop->nlo_lock); - if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) + if (!(nlop->nlo_flags & NFS_LOCK_OWNER_BUSY)) { panic("nfs_lock_owner_clear_busy"); + } wanted = (nlop->nlo_flags & NFS_LOCK_OWNER_WANT); - nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY|NFS_LOCK_OWNER_WANT); + nlop->nlo_flags &= ~(NFS_LOCK_OWNER_BUSY | NFS_LOCK_OWNER_WANT); lck_mtx_unlock(&nlop->nlo_lock); - if (wanted) + if (wanted) { wakeup(nlop); + } } /* @@ -3168,13 +3426,15 @@ nfs_lock_owner_insert_held_lock(struct nfs_lock_owner *nlop, struct nfs_file_loc TAILQ_INSERT_HEAD(&nlop->nlo_locks, newnflp, nfl_lolink); } else { TAILQ_FOREACH(nflp, &nlop->nlo_locks, nfl_lolink) { - if (newnflp->nfl_start < nflp->nfl_start) + if (newnflp->nfl_start < nflp->nfl_start) { break; + } } - if (nflp) + if (nflp) { TAILQ_INSERT_BEFORE(nflp, newnflp, nfl_lolink); - else + } else { TAILQ_INSERT_TAIL(&nlop->nlo_locks, newnflp, nfl_lolink); + } } lck_mtx_unlock(&nlop->nlo_lock); } @@ -3195,14 +3455,15 @@ nfs_file_lock_alloc(struct nfs_lock_owner *nlop) lck_mtx_unlock(&nlop->nlo_lock); if (!nflp) { MALLOC(nflp, struct nfs_file_lock *, sizeof(struct nfs_file_lock), M_TEMP, M_WAITOK); - if (!nflp) - return (NULL); + if (!nflp) { + return NULL; + } bzero(nflp, sizeof(*nflp)); nflp->nfl_flags |= NFS_FILE_LOCK_ALLOC; nflp->nfl_owner = nlop; } nfs_lock_owner_ref(nlop); - return (nflp); + return nflp; } /* @@ -3232,25 +3493,29 @@ int nfs_file_lock_conflict(struct nfs_file_lock *nflp1, struct nfs_file_lock *nflp2, int *willsplit) { /* no conflict if lock is dead */ - if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) - return (0); + if ((nflp1->nfl_flags & NFS_FILE_LOCK_DEAD) || (nflp2->nfl_flags & NFS_FILE_LOCK_DEAD)) { + return 0; + } /* no conflict if it's ours - unless the lock style doesn't match */ if ((nflp1->nfl_owner == nflp2->nfl_owner) && ((nflp1->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == (nflp2->nfl_flags & NFS_FILE_LOCK_STYLE_MASK))) { if (willsplit && (nflp1->nfl_type != nflp2->nfl_type) && (nflp1->nfl_start > nflp2->nfl_start) && - (nflp1->nfl_end < nflp2->nfl_end)) + (nflp1->nfl_end < nflp2->nfl_end)) { *willsplit = 1; - return (0); + } + return 0; } /* no conflict if ranges don't overlap */ - if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) - return (0); + if ((nflp1->nfl_start > nflp2->nfl_end) || (nflp1->nfl_end < nflp2->nfl_start)) { + return 0; + } /* no conflict if neither lock is exclusive */ - if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) - return (0); + if ((nflp1->nfl_type != F_WRLCK) && (nflp2->nfl_type != F_WRLCK)) { + return 0; + } /* conflict */ - return (1); + return 1; } /* @@ -3275,27 +3540,30 @@ nfs4_setlock_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } newlocker = (nlop->nlo_stategenid != nmp->nm_stategenid); locktype = (nflp->nfl_flags & NFS_FILE_LOCK_WAIT) ? - ((nflp->nfl_type == F_WRLCK) ? - NFS_LOCK_TYPE_WRITEW : - NFS_LOCK_TYPE_READW) : - ((nflp->nfl_type == F_WRLCK) ? - NFS_LOCK_TYPE_WRITE : - NFS_LOCK_TYPE_READ); + ((nflp->nfl_type == F_WRLCK) ? + NFS_LOCK_TYPE_WRITEW : + NFS_LOCK_TYPE_READW) : + ((nflp->nfl_type == F_WRLCK) ? + NFS_LOCK_TYPE_WRITE : + NFS_LOCK_TYPE_READ); if (newlocker) { error = nfs_open_file_set_busy(nofp, thd); - if (error) - return (error); + if (error) { + return error; + } error = nfs_open_owner_set_busy(nofp->nof_owner, thd); if (error) { nfs_open_file_clear_busy(nofp); - return (error); + return error; } if (!nlop->nlo_open_owner) { nfs_open_owner_ref(nofp->nof_owner); @@ -3308,7 +3576,7 @@ nfs4_setlock_rpc( nfs_open_owner_clear_busy(nofp->nof_owner); nfs_open_file_clear_busy(nofp); } - return (error); + return error; } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); @@ -3345,10 +3613,11 @@ nfs4_setlock_rpc( nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); - error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status); + error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -3362,11 +3631,13 @@ nfs4_setlock_rpc( /* Update the lock owner's stategenid once it appears the server has state for it. */ /* We determine this by noting the request was successful (we got a stateid). */ - if (newlocker && !error) + if (newlocker && !error) { nlop->nlo_stategenid = nmp->nm_stategenid; + } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfs_lock_owner_clear_busy(nlop); if (newlocker) { nfs_open_owner_clear_busy(nofp->nof_owner); @@ -3374,7 +3645,7 @@ nfsmout: } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -3398,14 +3669,17 @@ nfs4_unlock_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } error = nfs_lock_owner_set_busy(nlop, NULL); - if (error) - return (error); + if (error) { + return error; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -3432,10 +3706,11 @@ nfs4_unlock_rpc( nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); - error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status); + error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -3447,12 +3722,13 @@ nfs4_unlock_rpc( nfs_owner_seqid_increment(NULL, nlop, error); nfsm_chain_get_stateid(error, &nmrep, &nlop->nlo_stateid); nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfs_lock_owner_clear_busy(nlop); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -3475,10 +3751,12 @@ nfs4_getlock_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } lockerror = ENOENT; NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); @@ -3507,8 +3785,9 @@ nfs4_getlock_rpc( error = nfs_request(np, NULL, &nmreq, NFSPROC4_COMPOUND, ctx, &si, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -3530,11 +3809,12 @@ nfs4_getlock_rpc( fl->l_type = F_UNLCK; } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } @@ -3559,21 +3839,25 @@ nfs_advlock_getlock( int error = 0, answered = 0; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } restart: - if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) - return (error); + if ((error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)))) { + return error; + } lck_mtx_lock(&np->n_openlock); /* scan currently held locks for conflict */ TAILQ_FOREACH(nflp, &np->n_locks, nfl_link) { - if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) { continue; + } if ((start <= nflp->nfl_end) && (end >= nflp->nfl_start) && - ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) + ((fl->l_type == F_WRLCK) || (nflp->nfl_type == F_WRLCK))) { break; + } } if (nflp) { /* found a conflicting lock */ @@ -3594,15 +3878,16 @@ restart: lck_mtx_unlock(&np->n_openlock); if (answered) { nfs_mount_state_in_use_end(nmp, 0); - return (0); + return 0; } /* no conflict found locally, so ask the server */ error = nmp->nm_funcs->nf_getlock_rpc(np, nlop, fl, start, end, ctx); - if (nfs_mount_state_in_use_end(nmp, error)) + if (nfs_mount_state_in_use_end(nmp, error)) { goto restart; - return (error); + } + return error; } /* @@ -3635,22 +3920,26 @@ nfs_advlock_setlock( struct timespec ts = {1, 0}; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0; - if ((type != F_RDLCK) && (type != F_WRLCK)) - return (EINVAL); + if ((type != F_RDLCK) && (type != F_WRLCK)) { + return EINVAL; + } /* allocate a new lock */ newnflp = nfs_file_lock_alloc(nlop); - if (!newnflp) - return (ENOLCK); + if (!newnflp) { + return ENOLCK; + } newnflp->nfl_start = start; newnflp->nfl_end = end; newnflp->nfl_type = type; - if (op == F_SETLKW) + if (op == F_SETLKW) { newnflp->nfl_flags |= NFS_FILE_LOCK_WAIT; + } newnflp->nfl_flags |= style; newnflp->nfl_flags |= NFS_FILE_LOCK_BLOCKED; @@ -3662,18 +3951,21 @@ nfs_advlock_setlock( * have a shared flock-style lock. */ nflp = TAILQ_FIRST(&nlop->nlo_locks); - if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) + if (nflp && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_FLOCK)) { nflp = NULL; - if (nflp && (nflp->nfl_type != F_RDLCK)) + } + if (nflp && (nflp->nfl_type != F_RDLCK)) { nflp = NULL; + } flocknflp = nflp; } restart: restart = 0; error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); - if (error) + if (error) { goto error_out; + } inuse = 1; if (np->n_flag & NREVOKE) { error = EIO; @@ -3685,8 +3977,9 @@ restart: nfs_mount_state_in_use_end(nmp, 0); inuse = 0; error = nfs4_reopen(nofp, vfs_context_thread(ctx)); - if (error) + if (error) { goto error_out; + } goto restart; } @@ -3700,8 +3993,9 @@ restart: /* scan current list of locks (held and pending) for conflicts */ for (nflp = TAILQ_NEXT(newnflp, nfl_link); nflp; nflp = nextnflp) { nextnflp = TAILQ_NEXT(nflp, nfl_link); - if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) + if (!nfs_file_lock_conflict(newnflp, nflp, &willsplit)) { continue; + } /* Conflict */ if (!(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) { error = EAGAIN; @@ -3721,8 +4015,9 @@ restart: inuse = 0; error = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx); flocknflp = NULL; - if (!error) + if (!error) { error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); + } if (error) { lck_mtx_lock(&np->n_openlock); break; @@ -3730,8 +4025,9 @@ restart: inuse = 1; lck_mtx_lock(&np->n_openlock); /* no need to block/sleep if the conflict is gone */ - if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) + if (!nfs_file_lock_conflict(newnflp, nflp, NULL)) { break; + } } msleep(nflp, &np->n_openlock, slpflag, "nfs_advlock_setlock_blocked", &ts); slpflag = 0; @@ -3745,25 +4041,29 @@ restart: lck_mtx_lock(&np->n_openlock); break; } - if (!error && (np->n_flag & NREVOKE)) + if (!error && (np->n_flag & NREVOKE)) { error = EIO; + } } while (!error && nfs_file_lock_conflict(newnflp, nflp, NULL)); nflp->nfl_blockcnt--; if ((nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !nflp->nfl_blockcnt) { TAILQ_REMOVE(&np->n_locks, nflp, nfl_link); nfs_file_lock_destroy(nflp); } - if (error || restart) + if (error || restart) { break; + } /* We have released n_openlock and we can't trust that nextnflp is still valid. */ /* So, start this lock-scanning loop over from where it started. */ nextnflp = TAILQ_NEXT(newnflp, nfl_link); } lck_mtx_unlock(&np->n_openlock); - if (restart) + if (restart) { goto restart; - if (error) + } + if (error) { goto error_out; + } if (willsplit) { /* @@ -3779,8 +4079,9 @@ restart: } /* once scan for local conflicts is clear, send request to server */ - if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) + if ((error = nfs_open_state_set_busy(np, vfs_context_thread(ctx)))) { goto error_out; + } busy = 1; delay = 0; do { @@ -3804,23 +4105,27 @@ restart: * with an open it knows about. */ if ((!nofp->nof_rw_drw && !nofp->nof_w_drw && !nofp->nof_r_drw && - !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw && - !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) && + !nofp->nof_rw_dw && !nofp->nof_w_dw && !nofp->nof_r_dw && + !nofp->nof_rw && !nofp->nof_w && !nofp->nof_r) && (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw || - nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw || - nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) { + nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw || + nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r)) { error = nfs4_claim_delegated_state_for_open_file(nofp, 0); - if (error) + if (error) { break; + } } } } - if (np->n_flag & NREVOKE) + if (np->n_flag & NREVOKE) { error = EIO; - if (!error) + } + if (!error) { error = nmp->nm_funcs->nf_setlock_rpc(np, nofp, newnflp, 0, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx)); - if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) + } + if (!error || ((error != NFSERR_DENIED) && (error != NFSERR_GRACE))) { break; + } /* request was denied due to either conflict or grace period */ if ((error == NFSERR_DENIED) && !(newnflp->nfl_flags & NFS_FILE_LOCK_WAIT)) { error = EAGAIN; @@ -3834,8 +4139,9 @@ restart: inuse = 0; error2 = nfs_advlock_unlock(np, nofp, nlop, 0, UINT64_MAX, NFS_FILE_LOCK_STYLE_FLOCK, ctx); flocknflp = NULL; - if (!error2) + if (!error2) { error2 = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); + } if (!error2) { inuse = 1; error2 = nfs_open_state_set_busy(np, vfs_context_thread(ctx)); @@ -3851,11 +4157,13 @@ restart: * Except for retries of blocked v2/v3 request where we've already waited a bit. */ if ((nmp->nm_vers >= NFS_VER4) || (error == NFSERR_GRACE)) { - if (error == NFSERR_GRACE) + if (error == NFSERR_GRACE) { delay = 4; - if (delay < 4) + } + if (delay < 4) { delay++; - tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz/2)); + } + tsleep(newnflp, slpflag, "nfs_advlock_setlock_delay", delay * (hz / 2)); slpflag = 0; } error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0); @@ -3867,8 +4175,9 @@ restart: inuse = 0; goto restart; } - if (!error && (np->n_flag & NREVOKE)) + if (!error && (np->n_flag & NREVOKE)) { error = EIO; + } } while (!error); error_out: @@ -3893,18 +4202,22 @@ error_out: wakeup(newnflp); } else { /* remove newnflp from lock list and destroy */ - if (inqueue) + if (inqueue) { TAILQ_REMOVE(&np->n_locks, newnflp, nfl_link); + } nfs_file_lock_destroy(newnflp); } lck_mtx_unlock(&np->n_openlock); - if (busy) + if (busy) { nfs_open_state_clear_busy(np); - if (inuse) + } + if (inuse) { nfs_mount_state_in_use_end(nmp, error); - if (nflp2) + } + if (nflp2) { nfs_file_lock_destroy(nflp2); - return (error); + } + return error; } /* server granted the lock */ @@ -3917,16 +4230,21 @@ error_out: * It's possible that a single lock may need to be split. */ TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) { - if (nflp == newnflp) + if (nflp == newnflp) { continue; - if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD)) + } + if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) { continue; - if (nflp->nfl_owner != nlop) + } + if (nflp->nfl_owner != nlop) { continue; - if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) + } + if ((newnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != (nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK)) { continue; - if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) + } + if ((newnflp->nfl_start > nflp->nfl_end) || (newnflp->nfl_end < nflp->nfl_start)) { continue; + } /* here's one to update */ if ((newnflp->nfl_start <= nflp->nfl_start) && (newnflp->nfl_end >= nflp->nfl_end)) { /* The entire lock is being replaced. */ @@ -3939,7 +4257,7 @@ error_out: /* We're replacing a range in the middle of a lock. */ /* The current lock will be split into two locks. */ /* Update locks and insert new lock after current lock. */ - nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED)); + nflp2->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED)); nflp2->nfl_type = nflp->nfl_type; nflp2->nfl_start = newnflp->nfl_end + 1; nflp2->nfl_end = nflp->nfl_end; @@ -3982,12 +4300,15 @@ error_out: * checking locks that are further down the list. */ TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) { - if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) { continue; - if (nflp->nfl_owner != nlop) + } + if (nflp->nfl_owner != nlop) { continue; - if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) + } + if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != NFS_FILE_LOCK_STYLE_POSIX) { continue; + } if (((coalnflp = TAILQ_PREV(nflp, nfs_file_lock_queue, nfl_lolink))) && ((coalnflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) && (coalnflp->nfl_type == nflp->nfl_type) && @@ -4007,8 +4328,9 @@ error_out: TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink); lck_mtx_unlock(&nlop->nlo_lock); } - if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) + if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD)) { continue; + } if (nflp->nfl_blockcnt) { /* wake up anyone blocked on this lock */ wakeup(nflp); @@ -4024,9 +4346,10 @@ error_out: nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); - if (nflp2) + if (nflp2) { nfs_file_lock_destroy(nflp2); - return (error); + } + return error; } /* @@ -4047,22 +4370,25 @@ nfs_advlock_unlock( int error = 0, willsplit = 0, send_unlock_rpcs = 1; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } restart: - if ((error = nfs_mount_state_in_use_start(nmp, NULL))) - return (error); + if ((error = nfs_mount_state_in_use_start(nmp, NULL))) { + return error; + } if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); - if (error) - return (error); + if (error) { + return error; + } goto restart; } if ((error = nfs_open_state_set_busy(np, NULL))) { nfs_mount_state_in_use_end(nmp, error); - return (error); + return error; } lck_mtx_lock(&np->n_openlock); @@ -4073,14 +4399,18 @@ restart: * going to be one, we'll allocate one now. */ TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) { - if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) { continue; - if (nflp->nfl_owner != nlop) + } + if (nflp->nfl_owner != nlop) { continue; - if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) + } + if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) { continue; - if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) + } + if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) { continue; + } if ((start > nflp->nfl_start) && (end < nflp->nfl_end)) { willsplit = 1; break; @@ -4091,8 +4421,9 @@ restart: nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, 0); newnflp = nfs_file_lock_alloc(nlop); - if (!newnflp) - return (ENOMEM); + if (!newnflp) { + return ENOMEM; + } goto restart; } } @@ -4101,7 +4432,7 @@ restart: * Free all of our locks in the given range. * * Note that this process requires sending requests to the server. - * Because of this, we will release the n_openlock while performing + * Because of this, we will release the n_openlock while performing * the unlock RPCs. The N_OPENBUSY state keeps the state of *held* * locks from changing underneath us. However, other entries in the * list may be removed. So we need to be careful walking the list. @@ -4115,8 +4446,9 @@ restart: */ if ((style == NFS_FILE_LOCK_STYLE_POSIX) && ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) && - ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) + ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK)) { send_unlock_rpcs = 0; + } if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((nflp = TAILQ_FIRST(&nlop->nlo_locks))) && ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_FLOCK) && @@ -4129,52 +4461,58 @@ restart: if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) == NFS_FILE_LOCK_STYLE_POSIX) { /* unlock the range preceding this lock */ lck_mtx_unlock(&np->n_openlock); - error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start-1, 0, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, nflp->nfl_start - 1, 0, + vfs_context_thread(ctx), vfs_context_ucred(ctx)); if (nfs_mount_state_error_should_restart(error)) { nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); goto restart; } lck_mtx_lock(&np->n_openlock); - if (error) + if (error) { goto out; - s = nflp->nfl_end+1; + } + s = nflp->nfl_end + 1; } nflp = TAILQ_NEXT(nflp, nfl_lolink); } if (!delegated) { lck_mtx_unlock(&np->n_openlock); error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, type, s, end, 0, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); if (nfs_mount_state_error_should_restart(error)) { nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); goto restart; } lck_mtx_lock(&np->n_openlock); - if (error) + if (error) { goto out; + } } send_unlock_rpcs = 0; } TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) { - if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD)) { continue; - if (nflp->nfl_owner != nlop) + } + if (nflp->nfl_owner != nlop) { continue; - if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) + } + if ((nflp->nfl_flags & NFS_FILE_LOCK_STYLE_MASK) != style) { continue; - if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) + } + if ((start > nflp->nfl_end) || (end < nflp->nfl_start)) { continue; + } /* here's one to unlock */ if ((start <= nflp->nfl_start) && (end >= nflp->nfl_end)) { /* The entire lock is being unlocked. */ if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) { lck_mtx_unlock(&np->n_openlock); error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, nflp->nfl_end, 0, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); if (nfs_mount_state_error_should_restart(error)) { nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); @@ -4183,8 +4521,9 @@ restart: lck_mtx_lock(&np->n_openlock); } nextnflp = TAILQ_NEXT(nflp, nfl_link); - if (error) + if (error) { break; + } nflp->nfl_flags |= NFS_FILE_LOCK_DEAD; lck_mtx_lock(&nlop->nlo_lock); TAILQ_REMOVE(&nlop->nlo_locks, nflp, nfl_lolink); @@ -4196,7 +4535,7 @@ restart: if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) { lck_mtx_unlock(&np->n_openlock); error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, end, 0, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); if (nfs_mount_state_error_should_restart(error)) { nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); @@ -4204,10 +4543,11 @@ restart: } lck_mtx_lock(&np->n_openlock); } - if (error) + if (error) { break; + } /* update locks and insert new lock after current lock */ - newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK|NFS_FILE_LOCK_DELEGATED)); + newnflp->nfl_flags |= (nflp->nfl_flags & (NFS_FILE_LOCK_STYLE_MASK | NFS_FILE_LOCK_DELEGATED)); newnflp->nfl_type = nflp->nfl_type; newnflp->nfl_start = end + 1; newnflp->nfl_end = nflp->nfl_end; @@ -4221,7 +4561,7 @@ restart: if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) { lck_mtx_unlock(&np->n_openlock); error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, start, nflp->nfl_end, 0, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); if (nfs_mount_state_error_should_restart(error)) { nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); @@ -4230,15 +4570,16 @@ restart: lck_mtx_lock(&np->n_openlock); } nextnflp = TAILQ_NEXT(nflp, nfl_link); - if (error) + if (error) { break; + } nflp->nfl_end = start - 1; } else if (end < nflp->nfl_end) { /* We're unlocking the start of a lock. */ if (send_unlock_rpcs && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) { lck_mtx_unlock(&np->n_openlock); error = nmp->nm_funcs->nf_unlock_rpc(np, nlop, nflp->nfl_type, nflp->nfl_start, end, 0, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); if (nfs_mount_state_error_should_restart(error)) { nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, error); @@ -4247,8 +4588,9 @@ restart: lck_mtx_lock(&np->n_openlock); } nextnflp = TAILQ_NEXT(nflp, nfl_link); - if (error) + if (error) { break; + } nflp->nfl_start = end + 1; } if (nflp->nfl_blockcnt) { @@ -4265,9 +4607,10 @@ out: nfs_open_state_clear_busy(np); nfs_mount_state_in_use_end(nmp, 0); - if (newnflp) + if (newnflp) { nfs_file_lock_destroy(newnflp); - return (error); + } + return error; } /* @@ -4276,14 +4619,14 @@ out: int nfs_vnop_advlock( struct vnop_advlock_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - caddr_t a_id; - int a_op; - struct flock *a_fl; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * caddr_t a_id; + * int a_op; + * struct flock *a_fl; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; nfsnode_t np = VTONFS(ap->a_vp); @@ -4302,22 +4645,26 @@ nfs_vnop_advlock( #define OFF_MAX QUAD_MAX nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) { lck_mtx_unlock(&nmp->nm_lock); - return (ENOTSUP); + return ENOTSUP; } lck_mtx_unlock(&nmp->nm_lock); - if (np->n_flag & NREVOKE) - return (EIO); + if (np->n_flag & NREVOKE) { + return EIO; + } vtype = vnode_vtype(ap->a_vp); - if (vtype == VDIR) /* ignore lock requests on directories */ - return (0); - if (vtype != VREG) /* anything other than regular files is invalid */ - return (EINVAL); + if (vtype == VDIR) { /* ignore lock requests on directories */ + return 0; + } + if (vtype != VREG) { /* anything other than regular files is invalid */ + return EINVAL; + } /* Convert the flock structure into a start and end. */ switch (fl->l_whence) { @@ -4332,54 +4679,65 @@ nfs_vnop_advlock( case SEEK_END: /* need to flush, and refetch attributes to make */ /* sure we have the correct end of file offset */ - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } modified = (np->n_flag & NMODIFIED); nfs_node_unlock(np); - if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) - return (error); - if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) - return (error); + if (modified && ((error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1)))) { + return error; + } + if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) { + return error; + } nfs_data_lock(np, NFS_DATA_LOCK_SHARED); if ((np->n_size > OFF_MAX) || - ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) + ((fl->l_start > 0) && (np->n_size > (u_quad_t)(OFF_MAX - fl->l_start)))) { error = EOVERFLOW; + } lstart = np->n_size + fl->l_start; nfs_data_unlock(np); - if (error) - return (error); + if (error) { + return error; + } break; default: - return (EINVAL); + return EINVAL; + } + if (lstart < 0) { + return EINVAL; } - if (lstart < 0) - return (EINVAL); start = lstart; if (fl->l_len == 0) { end = UINT64_MAX; } else if (fl->l_len > 0) { - if ((fl->l_len - 1) > (OFF_MAX - lstart)) - return (EOVERFLOW); + if ((fl->l_len - 1) > (OFF_MAX - lstart)) { + return EOVERFLOW; + } end = start - 1 + fl->l_len; } else { /* l_len is negative */ - if ((lstart + fl->l_len) < 0) - return (EINVAL); + if ((lstart + fl->l_len) < 0) { + return EINVAL; + } end = start - 1; start += fl->l_len; } - if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) - return (EINVAL); + if ((nmp->nm_vers == NFS_VER2) && ((start > INT32_MAX) || (fl->l_len && (end > INT32_MAX)))) { + return EINVAL; + } style = (flags & F_FLOCK) ? NFS_FILE_LOCK_STYLE_FLOCK : NFS_FILE_LOCK_STYLE_POSIX; - if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) - return (EINVAL); + if ((style == NFS_FILE_LOCK_STYLE_FLOCK) && ((start != 0) || (end != UINT64_MAX))) { + return EINVAL; + } /* find the lock owner, alloc if not unlock */ nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), (op != F_UNLCK)); if (!nlop) { error = (op == F_UNLCK) ? 0 : ENOMEM; - if (error) + if (error) { NP(np, "nfs_vnop_advlock: no lock owner, error %d", error); + } goto out; } @@ -4396,8 +4754,9 @@ nfs_vnop_advlock( /* find the open file */ restart: error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0); - if (error) + if (error) { error = EBADF; + } if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) { NP(np, "nfs_vnop_advlock: LOST %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); error = EIO; @@ -4405,8 +4764,9 @@ restart: if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { error = nfs4_reopen(nofp, ((op == F_UNLCK) ? NULL : vfs_context_thread(ctx))); nofp = NULL; - if (!error) + if (!error) { goto restart; + } } if (error) { NP(np, "nfs_vnop_advlock: no open file %d, %d", error, kauth_cred_getuid(noop->noo_cred)); @@ -4415,8 +4775,9 @@ restart: if (op == F_UNLCK) { error = nfs_advlock_unlock(np, nofp, nlop, start, end, style, ctx); } else if ((op == F_SETLK) || (op == F_SETLKW)) { - if ((op == F_SETLK) && (flags & F_WAIT)) + if ((op == F_SETLK) && (flags & F_WAIT)) { op = F_SETLKW; + } error = nfs_advlock_setlock(np, nofp, nlop, op, start, end, style, fl->l_type, ctx); } else { /* not getlk, unlock or lock? */ @@ -4425,11 +4786,13 @@ restart: } out: - if (nlop) + if (nlop) { nfs_lock_owner_rele(nlop); - if (noop) + } + if (noop) { nfs_open_owner_rele(noop); - return (error); + } + return error; } /* @@ -4441,12 +4804,14 @@ nfs_check_for_locks(struct nfs_open_owner *noop, struct nfs_open_file *nofp) struct nfs_lock_owner *nlop; TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) { - if (nlop->nlo_open_owner != noop) + if (nlop->nlo_open_owner != noop) { continue; - if (!TAILQ_EMPTY(&nlop->nlo_locks)) + } + if (!TAILQ_EMPTY(&nlop->nlo_locks)) { break; + } } - return (nlop ? 1 : 0); + return nlop ? 1 : 0; } /* @@ -4471,14 +4836,15 @@ nfs4_reopen(struct nfs_open_file *nofp, thread_t thd) lck_mtx_lock(&nofp->nof_lock); while (nofp->nof_flags & NFS_OPEN_FILE_REOPENING) { - if ((error = nfs_sigintr(nmp, NULL, thd, 0))) + if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { break; - msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag|(PZERO-1), "nfsreopenwait", &ts); + } + msleep(&nofp->nof_flags, &nofp->nof_lock, slpflag | (PZERO - 1), "nfsreopenwait", &ts); slpflag = 0; } if (error || !(nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { lck_mtx_unlock(&nofp->nof_lock); - return (error); + return error; } nofp->nof_flags |= NFS_OPEN_FILE_REOPENING; lck_mtx_unlock(&nofp->nof_lock); @@ -4504,14 +4870,17 @@ nfs4_reopen(struct nfs_open_file *nofp, thread_t thd) * from the n_parent we have stashed away. */ if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) && - (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) + (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) { dvp = NULL; - if (!dvp) + } + if (!dvp) { dvp = vnode_getparent(vp); + } vname = vnode_getname(vp); if (!dvp || !vname) { - if (!error) + if (!error) { error = EIO; + } nfs_node_unlock(np); goto out; } @@ -4520,12 +4889,12 @@ nfs4_reopen(struct nfs_open_file *nofp, thread_t thd) filename = &smallname[0]; namelen = snprintf(filename, sizeof(smallname), "%s", name); if (namelen >= sizeof(smallname)) { - MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK); + MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK); if (!filename) { error = ENOMEM; goto out; } - snprintf(filename, namelen+1, "%s", name); + snprintf(filename, namelen + 1, "%s", name); } nfs_node_unlock(np); bzero(&cn, sizeof(cn)); @@ -4534,43 +4903,53 @@ nfs4_reopen(struct nfs_open_file *nofp, thread_t thd) restart: done = 0; - if ((error = nfs_mount_state_in_use_start(nmp, thd))) + if ((error = nfs_mount_state_in_use_start(nmp, thd))) { goto out; + } - if (nofp->nof_rw) + if (nofp->nof_rw) { error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE); - if (!error && nofp->nof_w) + } + if (!error && nofp->nof_w) { error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE); - if (!error && nofp->nof_r) + } + if (!error && nofp->nof_r) { error = nfs4_open_reopen_rpc(nofp, thd, noop->noo_cred, &cn, dvp, &vp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE); + } if (nfs_mount_state_in_use_end(nmp, error)) { - if (error == NFSERR_GRACE) + if (error == NFSERR_GRACE) { goto restart; + } printf("nfs4_reopen: RPC failed, error %d, lost %d, %s\n", error, - (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???"); + (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???"); error = 0; goto out; } done = 1; out: - if (error && (error != EINTR) && (error != ERESTART)) + if (error && (error != EINTR) && (error != ERESTART)) { nfs_revoke_open_state_for_node(np); + } lck_mtx_lock(&nofp->nof_lock); nofp->nof_flags &= ~NFS_OPEN_FILE_REOPENING; - if (done) + if (done) { nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN; - else if (error) + } else if (error) { printf("nfs4_reopen: failed, error %d, lost %d, %s\n", error, - (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???"); + (nofp->nof_flags & NFS_OPEN_FILE_LOST) ? 1 : 0, name ? name : "???"); + } lck_mtx_unlock(&nofp->nof_lock); - if (filename && (filename != &smallname[0])) + if (filename && (filename != &smallname[0])) { FREE(filename, M_TEMP); - if (vname) + } + if (vname) { vnode_putname(vname); - if (dvp != NULLVP) + } + if (dvp != NULLVP) { vnode_put(dvp); - return (error); + } + return error; } /* @@ -4588,8 +4967,8 @@ nfs4_open_rpc( int share_access, int share_deny) { - return (nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx), - cnp, vap, dvp, vpp, create, share_access, share_deny)); + return nfs4_open_rpc_internal(nofp, ctx, vfs_context_thread(ctx), vfs_context_ucred(ctx), + cnp, vap, dvp, vpp, create, share_access, share_deny); } /* @@ -4606,7 +4985,7 @@ nfs4_open_reopen_rpc( int share_access, int share_deny) { - return (nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny)); + return nfs4_open_rpc_internal(nofp, NULL, thd, cred, cnp, NULL, dvp, vpp, NFS_OPEN_NOCREATE, share_access, share_deny); } /* @@ -4665,7 +5044,7 @@ nfs4_open_confirm_rpc( nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -4709,16 +5088,19 @@ nfs4_open_rpc_internal( struct kauth_ace ace; struct nfsreq_secinfo_args si; - if (create && !ctx) - return (EINVAL); + if (create && !ctx) { + return EINVAL; + } nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); - if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } np = *vpp ? VTONFS(*vpp) : NULL; if (create && vap) { @@ -4726,8 +5108,9 @@ nfs4_open_rpc_internal( nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); gotuid = VATTR_IS_ACTIVE(vap, va_uid); gotgid = VATTR_IS_ACTIVE(vap, va_gid); - if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) + if (exclusive && (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time))) { vap->va_vaflags |= VA_UTIMES_NULL; + } } else { exclusive = gotuid = gotgid = 0; } @@ -4738,8 +5121,9 @@ nfs4_open_rpc_internal( sid = &stateid; } - if ((error = nfs_open_owner_set_busy(noop, thd))) - return (error); + if ((error = nfs_open_owner_set_busy(noop, thd))) { + return error; + } again: rflags = delegation = recall = 0; ace.ace_flags = 0; @@ -4796,26 +5180,31 @@ again: nfsm_chain_add_bitmap_supported(error, &nmreq, nfs_getattr_bitmap, nmp, dnp); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); - if (!error) + if (!error) { error = busyerror = nfs_node_set_busy(dnp, thd); + } nfsmout_if(error); - if (create && !namedattrs) + if (create && !namedattrs) { nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + } error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, R_NOINTR, NULL, &req); if (!error) { - if (create && !namedattrs) + if (create && !namedattrs) { nfs_dulookup_start(&dul, dnp, ctx); + } error = nfs_request_async_finish(req, &nmrep, &xid, &status); savedxid = xid; } - if (create && !namedattrs) + if (create && !namedattrs) { nfs_dulookup_finish(&dul, dnp, ctx); + } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -4829,7 +5218,7 @@ again: bmlen = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen); nfsm_chain_get_32(error, &nmrep, delegation); - if (!error) + if (!error) { switch (delegation) { case NFS_OPEN_DELEGATE_NONE: break; @@ -4837,8 +5226,9 @@ again: case NFS_OPEN_DELEGATE_WRITE: nfsm_chain_get_stateid(error, &nmrep, &dstateid); nfsm_chain_get_32(error, &nmrep, recall); - if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX + if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED); + } /* if we have any trouble accepting the ACE, just invalidate it */ ace_type = ace_flags = ace_mask = len = 0; nfsm_chain_get_32(error, &nmrep, ace_type); @@ -4849,35 +5239,42 @@ again: ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags); ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask); if (!error && (len >= slen)) { - MALLOC(s, char*, len+1, M_TEMP, M_WAITOK); - if (s) - slen = len+1; - else + MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK); + if (s) { + slen = len + 1; + } else { ace.ace_flags = 0; + } } - if (s) + if (s) { nfsm_chain_get_opaque(error, &nmrep, len, s); - else + } else { nfsm_chain_adv(error, &nmrep, nfsm_rndup(len)); + } if (!error && s) { s[len] = '\0'; - if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) + if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) { ace.ace_flags = 0; + } } - if (error || !s) + if (error || !s) { ace.ace_flags = 0; - if (s && (s != sbuf)) + } + if (s && (s != sbuf)) { FREE(s, M_TEMP); + } break; default: error = EBADRPC; break; } + } /* At this point if we have no error, the object was created/opened. */ open_error = error; nfsmout_if(error); - if (create && vap && !exclusive) + if (create && vap && !exclusive) { nfs_vattr_set_supported(bitmap, vap); + } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); @@ -4890,19 +5287,22 @@ again: if (!create && np && !NFS_CMPFH(np, fh.fh_data, fh.fh_len)) { // XXX for the open case, what if fh doesn't match the vnode we think we're opening? // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes. - if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) + if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { NP(np, "nfs4_open_rpc: warning: file handle mismatch"); + } } /* directory attributes: if we don't get them, make sure to invalidate */ nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid); - if (error) + if (error) { NATTRINVALIDATE(dnp); + } nfsmout_if(error); - if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) + if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; + } if (rflags & NFS_OPEN_RESULT_CONFIRM) { nfs_node_unlock(dnp); @@ -4911,8 +5311,9 @@ again: error = nfs4_open_confirm_rpc(nmp, dnp, fh.fh_data, fh.fh_len, noop, sid, thd, cred, &nvattr, &xid); nfsmout_if(error); savedxid = xid; - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } } nfsmout: @@ -4929,21 +5330,25 @@ nfsmout: lockerror = ENOENT; nfs_getattr(dnp, NULL, ctx, NGA_CACHED); } - if (!lockerror) + if (!lockerror) { nfs_node_unlock(dnp); + } if (!error && !np && fh.fh_len) { /* create the vnode with the filehandle and attributes */ xid = savedxid; error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &newnp); - if (!error) + if (!error) { newvp = NFSTOV(newnp); + } } NVATTR_CLEANUP(&nvattr); - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy(dnp); + } if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) { - if (!np) + if (!np) { np = newnp; + } if (!error && np && !recall) { /* stuff the delegation state in the node */ lck_mtx_lock(&np->n_openlock); @@ -4953,8 +5358,9 @@ nfsmout: np->n_dace = ace; if (np->n_dlink.tqe_next == NFSNOLIST) { lck_mtx_lock(&nmp->nm_lock); - if (np->n_dlink.tqe_next == NFSNOLIST) + if (np->n_dlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink); + } lck_mtx_unlock(&nmp->nm_lock); } lck_mtx_unlock(&np->n_openlock); @@ -4970,8 +5376,9 @@ nfsmout: np->n_dace = ace; if (np->n_dlink.tqe_next == NFSNOLIST) { lck_mtx_lock(&nmp->nm_lock); - if (np->n_dlink.tqe_next == NFSNOLIST) + if (np->n_dlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink); + } lck_mtx_unlock(&nmp->nm_lock); } lck_mtx_unlock(&np->n_openlock); @@ -4981,8 +5388,9 @@ nfsmout: /* return np's current delegation */ nfs4_delegation_return(np, 0, thd, cred); } - if (fh.fh_len) /* return fh's delegation if it wasn't for np */ + if (fh.fh_len) { /* return fh's delegation if it wasn't for np */ nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred); + } } } if (error) { @@ -5006,13 +5414,14 @@ nfsmout: error = nfs4_setattr_rpc(newnp, vap, ctx); } } - if (error) + if (error) { vnode_put(newvp); - else + } else { *vpp = newvp; + } } nfs_open_owner_clear_busy(noop); - return (error); + return error; } @@ -5050,8 +5459,9 @@ nfs4_claim_delegated_open_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nfs_node_lock_force(np); @@ -5075,14 +5485,17 @@ nfs4_claim_delegated_open_rpc( * from the n_parent we have stashed away. */ if ((np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR) && - (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) + (((dvp = np->n_parent)) && (error = vnode_get(dvp)))) { dvp = NULL; - if (!dvp) + } + if (!dvp) { dvp = vnode_getparent(NFSTOV(np)); + } vname = vnode_getname(NFSTOV(np)); if (!dvp || !vname) { - if (!error) + if (!error) { error = EIO; + } nfs_node_unlock(np); goto out; } @@ -5091,18 +5504,19 @@ nfs4_claim_delegated_open_rpc( filename = &smallname[0]; namelen = snprintf(filename, sizeof(smallname), "%s", name); if (namelen >= sizeof(smallname)) { - MALLOC(filename, char *, namelen+1, M_TEMP, M_WAITOK); + MALLOC(filename, char *, namelen + 1, M_TEMP, M_WAITOK); if (!filename) { error = ENOMEM; nfs_node_unlock(np); goto out; } - snprintf(filename, namelen+1, "%s", name); + snprintf(filename, namelen + 1, "%s", name); } nfs_node_unlock(np); - if ((error = nfs_open_owner_set_busy(noop, NULL))) + if ((error = nfs_open_owner_set_busy(noop, NULL))) { goto out; + } NVATTR_INIT(&nvattr); delegation = NFS_OPEN_DELEGATE_NONE; dstateid = np->n_dstateid; @@ -5143,10 +5557,11 @@ nfs4_claim_delegated_open_rpc( nfsmout_if(error); error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(), - noop->noo_cred, &si, flags|R_NOINTR, &nmrep, &xid, &status); + noop->noo_cred, &si, flags | R_NOINTR, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -5159,25 +5574,27 @@ nfs4_claim_delegated_open_rpc( bmlen = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen); nfsm_chain_get_32(error, &nmrep, delegation); - if (!error) + if (!error) { switch (delegation) { case NFS_OPEN_DELEGATE_NONE: // if (!(np->n_openflags & N_DELEG_RETURN)) /* don't warn if delegation is being returned */ - // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???"); + // printf("nfs: open delegated claim didn't return a delegation %s\n", filename ? filename : "???"); break; case NFS_OPEN_DELEGATE_READ: case NFS_OPEN_DELEGATE_WRITE: if ((((np->n_openflags & N_DELEG_MASK) == N_DELEG_READ) && - (delegation == NFS_OPEN_DELEGATE_WRITE)) || + (delegation == NFS_OPEN_DELEGATE_WRITE)) || (((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) && - (delegation == NFS_OPEN_DELEGATE_READ))) + (delegation == NFS_OPEN_DELEGATE_READ))) { printf("nfs: open delegated claim returned a different delegation type! have %s got %s %s\n", - ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R", - (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???"); + ((np->n_openflags & N_DELEG_MASK) == N_DELEG_WRITE) ? "W" : "R", + (delegation == NFS_OPEN_DELEGATE_WRITE) ? "W" : "R", filename ? filename : "???"); + } nfsm_chain_get_stateid(error, &nmrep, &dstateid); nfsm_chain_get_32(error, &nmrep, recall); - if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX + if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED); + } /* if we have any trouble accepting the ACE, just invalidate it */ ace_type = ace_flags = ace_mask = len = 0; nfsm_chain_get_32(error, &nmrep, ace_type); @@ -5188,25 +5605,30 @@ nfs4_claim_delegated_open_rpc( ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags); ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask); if (!error && (len >= slen)) { - MALLOC(s, char*, len+1, M_TEMP, M_WAITOK); - if (s) - slen = len+1; - else + MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK); + if (s) { + slen = len + 1; + } else { ace.ace_flags = 0; + } } - if (s) + if (s) { nfsm_chain_get_opaque(error, &nmrep, len, s); - else + } else { nfsm_chain_adv(error, &nmrep, nfsm_rndup(len)); + } if (!error && s) { s[len] = '\0'; - if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) + if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) { ace.ace_flags = 0; + } } - if (error || !s) + if (error || !s) { ace.ace_flags = 0; - if (s && (s != sbuf)) + } + if (s && (s != sbuf)) { FREE(s, M_TEMP); + } if (!error) { /* stuff the latest delegation state in the node */ lck_mtx_lock(&np->n_openlock); @@ -5216,8 +5638,9 @@ nfs4_claim_delegated_open_rpc( np->n_dace = ace; if (np->n_dlink.tqe_next == NFSNOLIST) { lck_mtx_lock(&nmp->nm_lock); - if (np->n_dlink.tqe_next == NFSNOLIST) + if (np->n_dlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink); + } lck_mtx_unlock(&nmp->nm_lock); } lck_mtx_unlock(&np->n_openlock); @@ -5227,6 +5650,7 @@ nfs4_claim_delegated_open_rpc( error = EBADRPC; break; } + } nfsmout_if(error); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); @@ -5239,19 +5663,22 @@ nfs4_claim_delegated_open_rpc( if (!NFS_CMPFH(np, fh.fh_data, fh.fh_len)) { // XXX what if fh doesn't match the vnode we think we're re-opening? // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes. - if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) + if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { printf("nfs4_claim_delegated_open_rpc: warning: file handle mismatch %s\n", filename ? filename : "???"); + } } error = nfs_loadattrcache(np, &nvattr, &xid, 1); nfsmout_if(error); - if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) + if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; + } nfsmout: NVATTR_CLEANUP(&nvattr); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfs_open_owner_clear_busy(noop); if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) { if (recall) { @@ -5265,14 +5692,17 @@ nfsmout: } out: // if (!error) - // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???"); - if (filename && (filename != &smallname[0])) + // printf("nfs: open claim delegated (%d, %d) succeeded for %s\n", share_access, share_deny, filename ? filename : "???"); + if (filename && (filename != &smallname[0])) { FREE(filename, M_TEMP); - if (vname) + } + if (vname) { vnode_putname(vname); - if (dvp != NULLVP) + } + if (dvp != NULLVP) { vnode_put(dvp); - return (error); + } + return error; } /* @@ -5302,12 +5732,14 @@ nfs4_open_reclaim_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((error = nfs_open_owner_set_busy(noop, NULL))) - return (error); + if ((error = nfs_open_owner_set_busy(noop, NULL))) { + return error; + } NVATTR_INIT(&nvattr); delegation = NFS_OPEN_DELEGATE_NONE; @@ -5338,8 +5770,8 @@ nfs4_open_reclaim_rpc( // open_claim4 nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_PREVIOUS); delegation = (np->n_openflags & N_DELEG_READ) ? NFS_OPEN_DELEGATE_READ : - (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : - NFS_OPEN_DELEGATE_NONE; + (np->n_openflags & N_DELEG_WRITE) ? NFS_OPEN_DELEGATE_WRITE : + NFS_OPEN_DELEGATE_NONE; nfsm_chain_add_32(error, &nmreq, delegation); delegation = NFS_OPEN_DELEGATE_NONE; numops--; @@ -5352,10 +5784,11 @@ nfs4_open_reclaim_rpc( nfsmout_if(error); error = nfs_request2(np, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, current_thread(), - noop->noo_cred, &si, R_RECOVER|R_NOINTR, &nmrep, &xid, &status); + noop->noo_cred, &si, R_RECOVER | R_NOINTR, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -5368,7 +5801,7 @@ nfs4_open_reclaim_rpc( bmlen = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen); nfsm_chain_get_32(error, &nmrep, delegation); - if (!error) + if (!error) { switch (delegation) { case NFS_OPEN_DELEGATE_NONE: if (np->n_openflags & N_DELEG_MASK) { @@ -5387,8 +5820,9 @@ nfs4_open_reclaim_rpc( case NFS_OPEN_DELEGATE_WRITE: nfsm_chain_get_stateid(error, &nmrep, &dstateid); nfsm_chain_get_32(error, &nmrep, recall); - if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX + if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED); + } /* if we have any trouble accepting the ACE, just invalidate it */ ace_type = ace_flags = ace_mask = len = 0; nfsm_chain_get_32(error, &nmrep, ace_type); @@ -5399,25 +5833,30 @@ nfs4_open_reclaim_rpc( ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags); ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask); if (!error && (len >= slen)) { - MALLOC(s, char*, len+1, M_TEMP, M_WAITOK); - if (s) - slen = len+1; - else + MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK); + if (s) { + slen = len + 1; + } else { ace.ace_flags = 0; + } } - if (s) + if (s) { nfsm_chain_get_opaque(error, &nmrep, len, s); - else + } else { nfsm_chain_adv(error, &nmrep, nfsm_rndup(len)); + } if (!error && s) { s[len] = '\0'; - if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) + if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) { ace.ace_flags = 0; + } } - if (error || !s) + if (error || !s) { ace.ace_flags = 0; - if (s && (s != sbuf)) + } + if (s && (s != sbuf)) { FREE(s, M_TEMP); + } if (!error) { /* stuff the delegation state in the node */ lck_mtx_lock(&np->n_openlock); @@ -5427,8 +5866,9 @@ nfs4_open_reclaim_rpc( np->n_dace = ace; if (np->n_dlink.tqe_next == NFSNOLIST) { lck_mtx_lock(&nmp->nm_lock); - if (np->n_dlink.tqe_next == NFSNOLIST) + if (np->n_dlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_delegations, np, n_dlink); + } lck_mtx_unlock(&nmp->nm_lock); } lck_mtx_unlock(&np->n_openlock); @@ -5438,6 +5878,7 @@ nfs4_open_reclaim_rpc( error = EBADRPC; break; } + } nfsmout_if(error); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); error = nfs4_parsefattr(&nmrep, NULL, &nvattr, &fh, NULL, NULL); @@ -5452,27 +5893,31 @@ nfs4_open_reclaim_rpc( // That should be pretty hard in this case, given that we are doing // the open reclaim using the file handle (and not a dir/name pair). // Solaris Named Attributes may do this due to a bug.... so don't warn for named attributes. - if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) + if (!(np->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { NP(np, "nfs4_open_reclaim_rpc: warning: file handle mismatch"); + } } error = nfs_loadattrcache(np, &nvattr, &xid, 1); nfsmout_if(error); - if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) + if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; + } nfsmout: // if (!error) - // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny); + // NP(np, "nfs: open reclaim (%d, %d) succeeded", share_access, share_deny); NVATTR_CLEANUP(&nvattr); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfs_open_owner_clear_busy(noop); if ((delegation == NFS_OPEN_DELEGATE_READ) || (delegation == NFS_OPEN_DELEGATE_WRITE)) { - if (recall) + if (recall) { nfs4_delegation_return_enqueue(np); + } } - return (error); + return error; } int @@ -5489,12 +5934,14 @@ nfs4_open_downgrade_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((error = nfs_open_owner_set_busy(noop, NULL))) - return (error); + if ((error = nfs_open_owner_set_busy(noop, NULL))) { + return error; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -5520,11 +5967,12 @@ nfs4_open_downgrade_rpc( nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - &si, R_NOINTR, &nmrep, &xid, &status); + vfs_context_thread(ctx), vfs_context_ucred(ctx), + &si, R_NOINTR, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -5535,12 +5983,13 @@ nfs4_open_downgrade_rpc( nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfs_open_owner_clear_busy(noop); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int @@ -5559,12 +6008,14 @@ nfs4_close_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((error = nfs_open_owner_set_busy(noop, NULL))) - return (error); + if ((error = nfs_open_owner_set_busy(noop, NULL))) { + return error; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -5587,10 +6038,11 @@ nfs4_close_rpc( nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); - error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags|R_NOINTR, &nmrep, &xid, &status); + error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, thd, cred, &si, flags | R_NOINTR, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -5601,12 +6053,13 @@ nfs4_close_rpc( nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfs_open_owner_clear_busy(noop); nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } @@ -5680,8 +6133,9 @@ nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags) if (!error && nofp->nof_d_rw) { error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, flags); /* for some errors, we should just try reopening the file */ - if (nfs_mount_state_error_delegation_lost(error)) + if (nfs_mount_state_error_delegation_lost(error)) { reopen = error; + } if (!error || reopen) { lck_mtx_lock(&nofp->nof_lock); nofp->nof_rw += nofp->nof_d_rw; @@ -5694,8 +6148,9 @@ nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags) if (!error) { error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, flags); /* for some errors, we should just try reopening the file */ - if (nfs_mount_state_error_delegation_lost(error)) + if (nfs_mount_state_error_delegation_lost(error)) { reopen = error; + } } if (!error || reopen) { lck_mtx_lock(&nofp->nof_lock); @@ -5708,8 +6163,9 @@ nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags) if (!error) { error = nfs4_claim_delegated_open_rpc(nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, flags); /* for some errors, we should just try reopening the file */ - if (nfs_mount_state_error_delegation_lost(error)) + if (nfs_mount_state_error_delegation_lost(error)) { reopen = error; + } } if (!error || reopen) { lck_mtx_lock(&nofp->nof_lock); @@ -5733,58 +6189,65 @@ nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags) if (reopen && (nfs_check_for_locks(noop, nofp) == 0)) { /* just reopen the file on next access */ NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, need reopen, %d", - reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred)); lck_mtx_lock(&nofp->nof_lock); nofp->nof_flags |= NFS_OPEN_FILE_REOPEN; lck_mtx_unlock(&nofp->nof_lock); - return (0); + return 0; } - if (reopen) + if (reopen) { NP(nofp->nof_np, "nfs4_claim_delegated_state_for_open_file: %d, locks prevent reopen, %d", - reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + reopen, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } } if (!error && ((nmp = NFSTONMP(nofp->nof_np)))) { /* claim delegated locks */ TAILQ_FOREACH(nlop, &nofp->nof_np->n_lock_owners, nlo_link) { - if (nlop->nlo_open_owner != noop) + if (nlop->nlo_open_owner != noop) { continue; + } TAILQ_FOREACH_SAFE(nflp, &nlop->nlo_locks, nfl_lolink, nextnflp) { /* skip dead & blocked lock requests (shouldn't be any in the held lock list) */ - if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) { continue; + } /* skip non-delegated locks */ - if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) + if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) { continue; + } error = nmp->nm_funcs->nf_setlock_rpc(nofp->nof_np, nofp, nflp, 0, flags, current_thread(), noop->noo_cred); if (error) { NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) failed %d, %d", - nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + nflp->nfl_start, nflp->nfl_end, error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); break; } // else { - // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d", - // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + // NP(nofp->nof_np, "nfs: delegated lock claim (0x%llx, 0x%llx) succeeded, %d", + // nflp->nfl_start, nflp->nfl_end, kauth_cred_getuid(nofp->nof_owner->noo_cred)); // } } - if (error) + if (error) { break; + } } } - if (!error) /* all state claimed successfully! */ - return (0); + if (!error) { /* all state claimed successfully! */ + return 0; + } /* restart if it looks like a problem more than just losing the delegation */ if (!nfs_mount_state_error_delegation_lost(error) && ((error == ETIMEDOUT) || nfs_mount_state_error_should_restart(error))) { NP(nofp->nof_np, "nfs delegated lock claim error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); - if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) + if ((error == ETIMEDOUT) && ((nmp = NFSTONMP(nofp->nof_np)))) { nfs_need_reconnect(nmp); - return (error); + } + return error; } - /* delegated state lost (once held but now not claimable) */ + /* delegated state lost (once held but now not claimable) */ NP(nofp->nof_np, "nfs delegated state claim error %d, state lost, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); /* @@ -5801,7 +6264,7 @@ nfs4_claim_delegated_state_for_open_file(struct nfs_open_file *nofp, int flags) /* revoke all open file state */ nfs_revoke_open_state_for_node(nofp->nof_np); - return (error); + return error; } /* @@ -5817,12 +6280,14 @@ nfs_release_open_state_for_node(nfsnode_t np, int force) /* drop held locks */ TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) { /* skip dead & blocked lock requests */ - if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD|NFS_FILE_LOCK_BLOCKED)) + if (nflp->nfl_flags & (NFS_FILE_LOCK_DEAD | NFS_FILE_LOCK_BLOCKED)) { continue; + } /* send an unlock if not a delegated lock */ - if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) + if (!force && nmp && !(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED)) { nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER, - NULL, nflp->nfl_owner->nlo_open_owner->noo_cred); + NULL, nflp->nfl_owner->nlo_open_owner->noo_cred); + } /* kill/remove the lock */ lck_mtx_lock(&np->n_openlock); nflp->nfl_flags |= NFS_FILE_LOCK_DEAD; @@ -5844,16 +6309,18 @@ nfs_release_open_state_for_node(nfsnode_t np, int force) /* drop all opens */ TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { - if (nofp->nof_flags & NFS_OPEN_FILE_LOST) + if (nofp->nof_flags & NFS_OPEN_FILE_LOST) { continue; + } /* mark open state as lost */ lck_mtx_lock(&nofp->nof_lock); nofp->nof_flags &= ~NFS_OPEN_FILE_REOPEN; nofp->nof_flags |= NFS_OPEN_FILE_LOST; - + lck_mtx_unlock(&nofp->nof_lock); - if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) + if (!force && nmp && (nmp->nm_vers >= NFS_VER4)) { nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER); + } } lck_mtx_unlock(&np->n_openlock); @@ -5871,8 +6338,7 @@ nfs_revoke_open_state_for_node(nfsnode_t np) /* mark node as needing to be revoked */ nfs_node_lock_force(np); - if (np->n_flag & NREVOKE) /* already revoked? */ - { + if (np->n_flag & NREVOKE) { /* already revoked? */ NP(np, "nfs_revoke_open_state_for_node(): already revoked"); nfs_node_unlock(np); return; @@ -5908,19 +6374,21 @@ restart: TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { if (!nofp->nof_d_rw_drw && !nofp->nof_d_w_drw && !nofp->nof_d_r_drw && !nofp->nof_d_rw_dw && !nofp->nof_d_w_dw && !nofp->nof_d_r_dw && - !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) + !nofp->nof_d_rw && !nofp->nof_d_w && !nofp->nof_d_r) { continue; + } lck_mtx_unlock(&np->n_openlock); error = nfs4_claim_delegated_state_for_open_file(nofp, flags); lck_mtx_lock(&np->n_openlock); - if (error) + if (error) { break; + } goto restart; } lck_mtx_unlock(&np->n_openlock); - return (error); + return error; } /* @@ -5934,16 +6402,18 @@ nfs4_delegation_return_enqueue(nfsnode_t np) struct nfsmount *nmp; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { return; + } lck_mtx_lock(&np->n_openlock); np->n_openflags |= N_DELEG_RETURN; lck_mtx_unlock(&np->n_openlock); lck_mtx_lock(&nmp->nm_lock); - if (np->n_dreturn.tqe_next == NFSNOLIST) + if (np->n_dreturn.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_dreturnq, np, n_dreturn); + } nfs_mount_sock_thread_wake(nmp); lck_mtx_unlock(&nmp->nm_lock); } @@ -5960,21 +6430,24 @@ nfs4_delegation_return(nfsnode_t np, int flags, thread_t thd, kauth_cred_t cred) int error; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } /* first, make sure the node's marked for delegation return */ lck_mtx_lock(&np->n_openlock); - np->n_openflags |= (N_DELEG_RETURN|N_DELEG_RETURNING); + np->n_openflags |= (N_DELEG_RETURN | N_DELEG_RETURNING); lck_mtx_unlock(&np->n_openlock); /* make sure nobody else is using the delegation state */ - if ((error = nfs_open_state_set_busy(np, NULL))) + if ((error = nfs_open_state_set_busy(np, NULL))) { goto out; + } /* claim any delegated state */ - if ((error = nfs4_claim_delegated_state_for_node(np, flags))) + if ((error = nfs4_claim_delegated_state_for_node(np, flags))) { goto out; + } /* return the delegation */ lck_mtx_lock(&np->n_openlock); @@ -6005,13 +6478,14 @@ out: } lck_mtx_unlock(&nmp->nm_lock); lck_mtx_lock(&np->n_openlock); - np->n_openflags &= ~(N_DELEG_RETURN|N_DELEG_RETURNING); + np->n_openflags &= ~(N_DELEG_RETURN | N_DELEG_RETURNING); lck_mtx_unlock(&np->n_openlock); if (error) { NP(np, "nfs4_delegation_return, error %d", error); - if (error == ETIMEDOUT) + if (error == ETIMEDOUT) { nfs_need_reconnect(nmp); + } if (nfs_mount_state_error_should_restart(error)) { /* make sure recovery happens */ lck_mtx_lock(&nmp->nm_lock); @@ -6022,7 +6496,7 @@ out: nfs_open_state_clear_busy(np); - return (error); + return error; } /* @@ -6061,7 +6535,7 @@ nfs4_delegreturn_rpc(struct nfsmount *nmp, u_char *fhp, int fhlen, struct nfs_st nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } @@ -6075,12 +6549,12 @@ nfsmout: int nfs_vnop_read( struct vnop_read_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; vfs_context_t ctx = ap->a_context; @@ -6090,19 +6564,23 @@ nfs_vnop_read( struct nfs_open_file *nofp; int error; - if (vnode_vtype(ap->a_vp) != VREG) + if (vnode_vtype(ap->a_vp) != VREG) { return (vnode_vtype(vp) == VDIR) ? EISDIR : EPERM; + } np = VTONFS(vp); nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_flag & NREVOKE) - return (EIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_flag & NREVOKE) { + return EIO; + } noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); - if (!noop) - return (ENOMEM); + if (!noop) { + return ENOMEM; + } restart: error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1); if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) { @@ -6112,12 +6590,13 @@ restart: if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { error = nfs4_reopen(nofp, vfs_context_thread(ctx)); nofp = NULL; - if (!error) + if (!error) { goto restart; + } } if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } /* * Since the read path is a hot path, if we already have @@ -6137,7 +6616,7 @@ restart: error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } /* * If we don't have a file already open with the access we need (read) then @@ -6149,26 +6628,26 @@ restart: if (error) { nfs_mount_state_in_use_end(nmp, 0); nfs_open_owner_rele(noop); - return (error); + return error; } if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_READ)) { /* we don't have the file open, so open it for read access if we're not denied */ if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) { NP(np, "nfs_vnop_read: File already needs close access: 0x%x, cred: %d thread: %lld", - nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx))); + nofp->nof_access, kauth_cred_getuid(nofp->nof_owner->noo_cred), thread_tid(vfs_context_thread(ctx))); } if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) { nfs_open_file_clear_busy(nofp); nfs_mount_state_in_use_end(nmp, 0); nfs_open_owner_rele(noop); - return (EPERM); + return EPERM; } if (np->n_flag & NREVOKE) { error = EIO; nfs_open_file_clear_busy(nofp); nfs_mount_state_in_use_end(nmp, 0); nfs_open_owner_rele(noop); - return (error); + return error; } if (nmp->nm_vers < NFS_VER4) { /* NFS v2/v3 opens are always allowed - so just add it. */ @@ -6176,20 +6655,23 @@ restart: } else { error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx); } - if (!error) + if (!error) { nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE; + } } - if (nofp) + if (nofp) { nfs_open_file_clear_busy(nofp); + } if (nfs_mount_state_in_use_end(nmp, error)) { nofp = NULL; goto restart; } nfs_open_owner_rele(noop); - if (error) - return (error); + if (error) { + return error; + } do_read: - return (nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context)); + return nfs_bioread(VTONFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_context); } /* @@ -6200,13 +6682,13 @@ do_read: int nfs4_vnop_create( struct vnop_create_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; struct componentname *cnp = ap->a_cnp; @@ -6220,21 +6702,24 @@ nfs4_vnop_create( struct nfs_open_file *newnofp = NULL, *nofp = NULL; nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (vap) + if (vap) { nfs_avoid_needless_id_setting_on_create(VTONFS(dvp), vap, ctx); + } noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); - if (!noop) - return (ENOMEM); + if (!noop) { + return ENOMEM; + } restart: error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } /* grab a provisional, nodeless open file */ @@ -6249,14 +6734,17 @@ restart: error = nfs4_reopen(newnofp, vfs_context_thread(ctx)); nfs_open_file_destroy(newnofp); newnofp = NULL; - if (!error) + if (!error) { goto restart; + } } - if (!error) + if (!error) { error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx)); + } if (error) { - if (newnofp) + if (newnofp) { nfs_open_file_destroy(newnofp); + } newnofp = NULL; goto out; } @@ -6296,8 +6784,9 @@ restart: VATTR_INIT(&vattr); VATTR_SET(&vattr, va_mode, vap->va_mode); nfs4_setattr_rpc(np, &vattr, ctx); - if (!error2) + if (!error2) { error = 0; + } } } if (error) { @@ -6338,8 +6827,9 @@ restart: busyerror = nfs_open_file_set_busy(nofp, NULL); nfs_open_file_add_open(nofp, accessMode, denyMode, 0); nofp->nof_stateid = newnofp->nof_stateid; - if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) + if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; + } nfs_open_file_clear_busy(newnofp); nfs_open_file_destroy(newnofp); } @@ -6348,16 +6838,18 @@ restart: nofp->nof_flags |= NFS_OPEN_FILE_CREATE; nofp->nof_creator = current_thread(); out: - if (nofp && !busyerror) + if (nofp && !busyerror) { nfs_open_file_clear_busy(nofp); + } if (nfs_mount_state_in_use_end(nmp, error)) { nofp = newnofp = NULL; busyerror = 0; goto restart; } - if (noop) + if (noop) { nfs_open_owner_rele(noop); - return (error); + } + return error; } /* @@ -6390,12 +6882,14 @@ nfs4_create_rpc( struct nfsreq_secinfo_args si; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); - if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } sd.specdata1 = sd.specdata2 = 0; @@ -6406,8 +6900,9 @@ nfs4_create_rpc( case NFBLK: case NFCHR: tag = "mknod"; - if (!VATTR_IS_ACTIVE(vap, va_rdev)) - return (EINVAL); + if (!VATTR_IS_ACTIVE(vap, va_rdev)) { + return EINVAL; + } sd.specdata1 = major(vap->va_rdev); sd.specdata2 = minor(vap->va_rdev); break; @@ -6419,14 +6914,15 @@ nfs4_create_rpc( tag = "mkdir"; break; default: - return (EINVAL); + return EINVAL; } nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + } NFSREQ_SECINFO_SET(&si, dnp, NULL, 0, NULL, 0); NVATTR_INIT(&nvattr); @@ -6468,15 +6964,17 @@ nfs4_create_rpc( nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); if (!error) { - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_start(&dul, dnp, ctx); + } error = nfs_request_async_finish(req, &nmrep, &xid, &status); } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -6505,8 +7003,9 @@ nfs4_create_rpc( nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); savedxid = xid; nfsm_chain_loadattr(error, &nmrep, dnp, nfsvers, &xid); - if (error) + if (error) { NATTRINVALIDATE(dnp); + } nfsmout: nfsm_chain_cleanup(&nmreq); @@ -6527,13 +7026,15 @@ nfsmout: /* create the vnode with the filehandle and attributes */ xid = savedxid; error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); - if (!error) + if (!error) { newvp = NFSTOV(np); + } } NVATTR_CLEANUP(&nvattr); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_finish(&dul, dnp, ctx); + } /* * Kludge: Map EEXIST => 0 assuming that you have a reply to a retry @@ -6543,12 +7044,14 @@ nfsmout: error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); if (!error) { newvp = NFSTOV(np); - if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) + if (vnode_vtype(newvp) != nfstov_type(type, nfsvers)) { error = EEXIST; + } } } - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy(dnp); + } if (error) { if (newvp) { nfs_node_unlock(np); @@ -6558,30 +7061,32 @@ nfsmout: nfs_node_unlock(np); *npp = np; } - return (error); + return error; } int nfs4_vnop_mknod( struct vnop_mknod_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = NULL; struct nfsmount *nmp; int error; nmp = VTONMP(ap->a_dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) - return (EINVAL); + if (!VATTR_IS_ACTIVE(ap->a_vap, va_type)) { + return EINVAL; + } switch (ap->a_vap->va_type) { case VBLK: case VCHR: @@ -6589,68 +7094,71 @@ nfs4_vnop_mknod( case VSOCK: break; default: - return (ENOTSUP); + return ENOTSUP; } error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap, - vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np); - if (!error) + vtonfs_type(ap->a_vap->va_type, nmp->nm_vers), NULL, &np); + if (!error) { *ap->a_vpp = NFSTOV(np); - return (error); + } + return error; } int nfs4_vnop_mkdir( struct vnop_mkdir_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = NULL; int error; error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap, - NFDIR, NULL, &np); - if (!error) + NFDIR, NULL, &np); + if (!error) { *ap->a_vpp = NFSTOV(np); - return (error); + } + return error; } int nfs4_vnop_symlink( struct vnop_symlink_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - char *a_target; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * char *a_target; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = NULL; int error; error = nfs4_create_rpc(ap->a_context, VTONFS(ap->a_dvp), ap->a_cnp, ap->a_vap, - NFLNK, ap->a_target, &np); - if (!error) + NFLNK, ap->a_target, &np); + if (!error) { *ap->a_vpp = NFSTOV(np); - return (error); + } + return error; } int nfs4_vnop_link( struct vnop_link_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vnode_t a_tdvp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vnode_t a_tdvp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -6665,17 +7173,21 @@ nfs4_vnop_link( struct nfsm_chain nmreq, nmrep; struct nfsreq_secinfo_args si; - if (vnode_mount(vp) != vnode_mount(tdvp)) - return (EXDEV); + if (vnode_mount(vp) != vnode_mount(tdvp)) { + return EXDEV; + } nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); - if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (EINVAL); + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } + if (tdnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return EINVAL; + } /* * Push all writes to the server, so that the attribute cache @@ -6684,8 +7196,9 @@ nfs4_vnop_link( */ nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR); - if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) - return (error); + if ((error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)))) { + return error; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); nfsm_chain_null(&nmreq); @@ -6734,42 +7247,47 @@ nfs4_vnop_link( nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); savedxid = xid; nfsm_chain_loadattr(error, &nmrep, tdnp, nfsvers, &xid); - if (error) + if (error) { NATTRINVALIDATE(tdnp); + } /* link attributes: if we don't get them, make sure to invalidate */ nfsm_chain_op_check(error, &nmrep, NFS_OP_RESTOREFH); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); xid = savedxid; nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); - if (error) + if (error) { NATTRINVALIDATE(np); + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - if (!lockerror) + if (!lockerror) { tdnp->n_flag |= NMODIFIED; + } /* Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */ - if (error == EEXIST) + if (error == EEXIST) { error = 0; + } if (!error && (tdnp->n_flag & NNEGNCENTRIES)) { tdnp->n_flag &= ~NNEGNCENTRIES; cache_purge_negatives(tdvp); } - if (!lockerror) + if (!lockerror) { nfs_node_unlock2(tdnp, np); + } nfs_node_clear_busy2(tdnp, np); - return (error); + return error; } int nfs4_vnop_rmdir( struct vnop_rmdir_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t a_vp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t a_vp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -6781,16 +7299,19 @@ nfs4_vnop_rmdir( nfsnode_t dnp = VTONFS(dvp); struct nfs_dulookup dul; - if (vnode_vtype(vp) != VDIR) - return (EINVAL); + if (vnode_vtype(vp) != VDIR) { + return EINVAL; + } nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); - if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) - return (error); + if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) { + return error; + } if (!namedattrs) { nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); @@ -6798,20 +7319,22 @@ nfs4_vnop_rmdir( } error = nfs4_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); nfs_name_cache_purge(dnp, np, cnp, ctx); /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, NGA_CACHED); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_finish(&dul, dnp, ctx); + } nfs_node_clear_busy2(dnp, np); /* * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. */ - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } if (!error) { /* * remove nfsnode from hash now so we can't accidentally find it @@ -6826,7 +7349,7 @@ nfs4_vnop_rmdir( } lck_mtx_unlock(nfs_node_hash_mutex); } - return (error); + return error; } /* @@ -6869,10 +7392,12 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) struct nfsreq_secinfo_args si; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (NULL); - if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) - return (NULL); + if (nfs_mount_gone(nmp)) { + return NULL; + } + if (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) { + return NULL; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); NVATTR_INIT(&nvattr); @@ -6886,10 +7411,11 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) if (np->n_attrdirfh) { // XXX can't set parent correctly (to np) yet - error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh+1, *np->n_attrdirfh, - NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp); - if (adnp) + error = nfs_nget(nmp->nm_mountp, NULL, &cn, np->n_attrdirfh + 1, *np->n_attrdirfh, + NULL, NULL, RPCAUTH_UNKNOWN, NG_NOCREATE, &adnp); + if (adnp) { goto nfsmout; + } } if (!fetch) { error = ENOENT; @@ -6911,14 +7437,15 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap); NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE); nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap, - NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); + NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(np, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); @@ -6934,9 +7461,10 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) } if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) { /* (re)allocate attrdir fh buffer */ - if (np->n_attrdirfh) + if (np->n_attrdirfh) { FREE(np->n_attrdirfh, M_TEMP); - MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK); + } + MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK); } if (!np->n_attrdirfh) { error = ENOMEM; @@ -6944,7 +7472,7 @@ nfs4_named_attr_dir_get(nfsnode_t np, int fetch, vfs_context_t ctx) } /* cache the attrdir fh in the node */ *np->n_attrdirfh = fh.fh_len; - bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len); + bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len); /* create node for attrdir */ // XXX can't set parent correctly (to np) yet error = nfs_nget(NFSTOMP(np), NULL, &cn, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, 0, &adnp); @@ -6955,15 +7483,18 @@ nfsmout: if (adnp) { /* sanity check that this node is an attribute directory */ - if (adnp->n_vattr.nva_type != VDIR) + if (adnp->n_vattr.nva_type != VDIR) { error = EINVAL; - if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) + } + if (!(adnp->n_vattr.nva_flags & NFS_FFLAG_IS_ATTR)) { error = EINVAL; + } nfs_node_unlock(adnp); - if (error) + if (error) { vnode_put(NFSTOV(adnp)); + } } - return (error ? NULL : adnp); + return error ? NULL : adnp; } /* @@ -6987,10 +7518,10 @@ nfsmout: * the lookup/open, we lock both the node and the attribute directory node. */ -#define NFS_GET_NAMED_ATTR_CREATE 0x1 -#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2 -#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4 -#define NFS_GET_NAMED_ATTR_PREFETCH 0x8 +#define NFS_GET_NAMED_ATTR_CREATE 0x1 +#define NFS_GET_NAMED_ATTR_CREATE_GUARDED 0x2 +#define NFS_GET_NAMED_ATTR_TRUNCATE 0x4 +#define NFS_GET_NAMED_ATTR_PREFETCH 0x8 int nfs4_named_attr_get( @@ -7037,8 +7568,9 @@ nfs4_named_attr_get( slen = sizeof(sbuf); nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } NVATTR_INIT(&nvattr); negnamecache = !NMFLAG(nmp, NONEGNAMECACHE); thd = vfs_context_thread(ctx); @@ -7050,11 +7582,13 @@ nfs4_named_attr_get( if (!create) { error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED); - if (error) - return (error); + if (error) { + return error; + } if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) - return (ENOATTR); + !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + return ENOATTR; + } } else if (accessMode == NFS_OPEN_SHARE_ACCESS_NONE) { /* shouldn't happen... but just be safe */ printf("nfs4_named_attr_get: create with no access %s\n", cnp->cn_nameptr); @@ -7068,16 +7602,18 @@ nfs4_named_attr_get( * and set NFS_OPEN_FILE_CREATE. */ denyMode = NFS_OPEN_SHARE_DENY_NONE; - if (prefetch && guarded) + if (prefetch && guarded) { prefetch = 0; /* no sense prefetching data that can't be there */ - + } noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); - if (!noop) - return (ENOMEM); + if (!noop) { + return ENOMEM; + } } - if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) - return (error); + if ((error = busyerror = nfs_node_set_busy(np, vfs_context_thread(ctx)))) { + return error; + } adnp = nfs4_named_attr_dir_get(np, 0, ctx); hadattrdir = (adnp != NULL); @@ -7092,8 +7628,9 @@ nfs4_named_attr_get( nfsm_chain_null(&nmrep); if (hadattrdir) { - if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) + if ((error = adbusyerror = nfs_node_set_busy(adnp, vfs_context_thread(ctx)))) { goto nfsmout; + } /* nfs_getattr() will check changed and purge caches */ error = nfs_getattr(adnp, NULL, ctx, NGA_CACHED); nfsmout_if(error); @@ -7111,14 +7648,16 @@ nfs4_named_attr_get( *anpp = anp; error = -1; } - if (error != -1) /* cache miss */ + if (error != -1) { /* cache miss */ break; - /* FALLTHROUGH */ + } + /* FALLTHROUGH */ case -1: /* cache hit, not really an error */ OSAddAtomic64(1, &nfsstats.lookupcache_hits); - if (!anp && avp) + if (!anp && avp) { *anpp = anp = VTONFS(avp); + } nfs_node_clear_busy(adnp); adbusyerror = ENOENT; @@ -7131,7 +7670,7 @@ nfs4_named_attr_get( /* compute actual success/failure based on accessibility */ error = nfs_vnop_access(&naa); - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: /* we either found it, or hit an error */ if (!error && guarded) { @@ -7141,8 +7680,9 @@ nfs4_named_attr_get( *anpp = anp = NULL; } /* we're done if error or we don't need to open */ - if (error || !open) + if (error || !open) { goto nfsmout; + } /* no error and we need to open... */ } } @@ -7168,14 +7708,17 @@ restart: error = nfs4_reopen(newnofp, vfs_context_thread(ctx)); nfs_open_file_destroy(newnofp); newnofp = NULL; - if (!error) + if (!error) { goto restart; + } } - if (!error) + if (!error) { error = nfs_open_file_set_busy(newnofp, vfs_context_thread(ctx)); + } if (error) { - if (newnofp) + if (newnofp) { nfs_open_file_destroy(newnofp); + } newnofp = NULL; goto nfsmout; } @@ -7190,8 +7733,9 @@ restart: nofp = newnofp; nofpbusyerror = 0; newnofp = NULL; - if (nofpp) + if (nofpp) { *nofpp = nofp; + } } goto nfsmout; } @@ -7210,8 +7754,9 @@ restart: if (open) { /* need to mark the open owner busy during the RPC */ - if ((error = nfs_open_owner_set_busy(noop, thd))) + if ((error = nfs_open_owner_set_busy(noop, thd))) { goto nfsmout; + } noopbusy = 1; } @@ -7224,10 +7769,12 @@ restart: * At a minimum we're sending: PUTFH, LOOKUP/OPEN, GETATTR, PUTFH, GETATTR */ numops = 5; - if (!hadattrdir) - numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR - if (prefetch) - numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ + if (!hadattrdir) { + numops += 3; // also sending: OPENATTR, GETATTR, OPENATTR + } + if (prefetch) { + numops += 4; // also sending: SAVEFH, RESTOREFH, NVERIFY, READ + } nfsm_chain_build_alloc_init(error, &nmreq, 64 * NFSX_UNSIGNED + cnp->cn_namelen); nfsm_chain_add_compound_header(error, &nmreq, "getnamedattr", nmp->nm_minor_vers, numops); if (hadattrdir) { @@ -7246,7 +7793,7 @@ restart: NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap); NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE); nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap, - NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); + NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); } if (open) { numops--; @@ -7261,8 +7808,9 @@ restart: if (create) { nfsm_chain_add_32(error, &nmreq, guarded); VATTR_INIT(&vattr); - if (truncate) + if (truncate) { VATTR_SET(&vattr, va_data_size, 0); + } nfsm_chain_add_fattr4(error, &nmreq, &vattr, nmp); } nfsm_chain_add_32(error, &nmreq, NFS_CLAIM_NULL); @@ -7277,7 +7825,7 @@ restart: NFS_COPY_ATTRIBUTES(nfs_getattr_bitmap, bitmap); NFS_BITMAP_SET(bitmap, NFS_FATTR_FILEHANDLE); nfsm_chain_add_bitmap_masked(error, &nmreq, bitmap, - NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); + NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); if (prefetch) { numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_SAVEFH); @@ -7297,7 +7845,7 @@ restart: numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_GETATTR); nfsm_chain_add_bitmap_masked(error, &nmreq, nfs_getattr_bitmap, - NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); + NFS_ATTR_BITMAP_LEN, nmp->nm_fsattr.nfsa_supp_attr); if (prefetch) { numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_RESTOREFH); @@ -7316,12 +7864,14 @@ restart: nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(hadattrdir ? adnp : np, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, open ? R_NOINTR: 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } - if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) + if (hadattrdir && ((adlockerror = nfs_node_lock(adnp)))) { error = adlockerror; + } savedxid = xid; nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); @@ -7335,14 +7885,15 @@ restart: if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_FILEHANDLE) && fh.fh_len) { if (!np->n_attrdirfh || (*np->n_attrdirfh != fh.fh_len)) { /* (re)allocate attrdir fh buffer */ - if (np->n_attrdirfh) + if (np->n_attrdirfh) { FREE(np->n_attrdirfh, M_TEMP); - MALLOC(np->n_attrdirfh, u_char*, fh.fh_len+1, M_TEMP, M_WAITOK); + } + MALLOC(np->n_attrdirfh, u_char*, fh.fh_len + 1, M_TEMP, M_WAITOK); } if (np->n_attrdirfh) { /* remember the attrdir fh in the node */ *np->n_attrdirfh = fh.fh_len; - bcopy(fh.fh_data, np->n_attrdirfh+1, fh.fh_len); + bcopy(fh.fh_data, np->n_attrdirfh + 1, fh.fh_len); /* create busied node for attrdir */ struct componentname cn; bzero(&cn, sizeof(cn)); @@ -7373,7 +7924,7 @@ restart: bmlen = NFS_ATTR_BITMAP_LEN; nfsm_chain_get_bitmap(error, &nmrep, bitmap, bmlen); nfsm_chain_get_32(error, &nmrep, delegation); - if (!error) + if (!error) { switch (delegation) { case NFS_OPEN_DELEGATE_NONE: break; @@ -7381,8 +7932,9 @@ restart: case NFS_OPEN_DELEGATE_WRITE: nfsm_chain_get_stateid(error, &nmrep, &dstateid); nfsm_chain_get_32(error, &nmrep, recall); - if (delegation == NFS_OPEN_DELEGATE_WRITE) // space (skip) XXX + if (delegation == NFS_OPEN_DELEGATE_WRITE) { // space (skip) XXX nfsm_chain_adv(error, &nmrep, 3 * NFSX_UNSIGNED); + } /* if we have any trouble accepting the ACE, just invalidate it */ ace_type = ace_flags = ace_mask = len = 0; nfsm_chain_get_32(error, &nmrep, ace_type); @@ -7393,30 +7945,36 @@ restart: ace.ace_flags |= nfs4_ace_nfsflags_to_vfsflags(ace_flags); ace.ace_rights = nfs4_ace_nfsmask_to_vfsrights(ace_mask); if (!error && (len >= slen)) { - MALLOC(s, char*, len+1, M_TEMP, M_WAITOK); - if (s) - slen = len+1; - else + MALLOC(s, char*, len + 1, M_TEMP, M_WAITOK); + if (s) { + slen = len + 1; + } else { ace.ace_flags = 0; + } } - if (s) + if (s) { nfsm_chain_get_opaque(error, &nmrep, len, s); - else + } else { nfsm_chain_adv(error, &nmrep, nfsm_rndup(len)); + } if (!error && s) { s[len] = '\0'; - if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) + if (nfs4_id2guid(s, &ace.ace_applicable, (ace_flags & NFS_ACE_IDENTIFIER_GROUP))) { ace.ace_flags = 0; + } } - if (error || !s) + if (error || !s) { ace.ace_flags = 0; - if (s && (s != sbuf)) + } + if (s && (s != sbuf)) { FREE(s, M_TEMP); + } break; default: error = EBADRPC; break; } + } /* At this point if we have no error, the object was created/opened. */ open_error = error; } else { @@ -7430,11 +7988,13 @@ restart: error = EIO; goto nfsmout; } - if (prefetch) + if (prefetch) { nfsm_chain_op_check(error, &nmrep, NFS_OP_SAVEFH); + } nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); - if (!hadattrdir) + if (!hadattrdir) { nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR); + } nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); nfsmout_if(error); xid = savedxid; @@ -7442,8 +8002,9 @@ restart: nfsmout_if(error); if (open) { - if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) + if (rflags & NFS_OPEN_RESULT_LOCKTYPE_POSIX) { newnofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; + } if (rflags & NFS_OPEN_RESULT_CONFIRM) { if (adnp) { nfs_node_unlock(adnp); @@ -7453,8 +8014,9 @@ restart: error = nfs4_open_confirm_rpc(nmp, adnp ? adnp : np, fh.fh_data, fh.fh_len, noop, &newnofp->nof_stateid, thd, cred, &nvattr, &xid); nfsmout_if(error); savedxid = xid; - if ((adlockerror = nfs_node_lock(adnp))) + if ((adlockerror = nfs_node_lock(adnp))) { error = adlockerror; + } } } @@ -7507,8 +8069,9 @@ nfsmout: nofpbusyerror = nfs_open_file_set_busy(nofp, NULL); nfs_open_file_add_open(nofp, accessMode, denyMode, 0); nofp->nof_stateid = newnofp->nof_stateid; - if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) + if (newnofp->nof_flags & NFS_OPEN_FILE_POSIXLOCK) { nofp->nof_flags |= NFS_OPEN_FILE_POSIXLOCK; + } nfs_open_file_clear_busy(newnofp); nfs_open_file_destroy(newnofp); newnofp = NULL; @@ -7519,8 +8082,9 @@ nfsmout: /* mark the node as holding a create-initiated open */ nofp->nof_flags |= NFS_OPEN_FILE_CREATE; nofp->nof_creator = current_thread(); - if (nofpp) + if (nofpp) { *nofpp = nofp; + } } } } @@ -7535,8 +8099,9 @@ nfsmout: anp->n_dace = ace; if (anp->n_dlink.tqe_next == NFSNOLIST) { lck_mtx_lock(&nmp->nm_lock); - if (anp->n_dlink.tqe_next == NFSNOLIST) + if (anp->n_dlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink); + } lck_mtx_unlock(&nmp->nm_lock); } lck_mtx_unlock(&anp->n_openlock); @@ -7552,8 +8117,9 @@ nfsmout: anp->n_dace = ace; if (anp->n_dlink.tqe_next == NFSNOLIST) { lck_mtx_lock(&nmp->nm_lock); - if (anp->n_dlink.tqe_next == NFSNOLIST) + if (anp->n_dlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_delegations, anp, n_dlink); + } lck_mtx_unlock(&nmp->nm_lock); } lck_mtx_unlock(&anp->n_openlock); @@ -7563,8 +8129,9 @@ nfsmout: /* return anp's current delegation */ nfs4_delegation_return(anp, 0, thd, cred); } - if (fh.fh_len) /* return fh's delegation if it wasn't for anp */ + if (fh.fh_len) { /* return fh's delegation if it wasn't for anp */ nfs4_delegreturn_rpc(nmp, fh.fh_data, fh.fh_len, &dstateid, 0, thd, cred); + } } } if (open) { @@ -7633,15 +8200,17 @@ nfsmout: retlen = MIN(retlen, rlen); /* check if node needs size update or invalidation */ - if (ISSET(anp->n_flag, NUPDATESIZE)) + if (ISSET(anp->n_flag, NUPDATESIZE)) { nfs_data_update_size(anp, 0); + } if (!(error = nfs_node_lock(anp))) { if (anp->n_flag & NNEEDINVALIDATE) { anp->n_flag &= ~NNEEDINVALIDATE; nfs_node_unlock(anp); - error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE|V_IGNORE_WRITEERR, ctx, 1); - if (!error) /* lets play it safe and just drop the data */ + error = nfs_vinvalbuf(NFSTOV(anp), V_SAVE | V_IGNORE_WRITEERR, ctx, 1); + if (!error) { /* lets play it safe and just drop the data */ error = EIO; + } } else { nfs_node_unlock(anp); } @@ -7651,13 +8220,14 @@ nfsmout: lastpg = (trunc_page_32(retlen) - 1) / PAGE_SIZE; pagemask = ((1 << (lastpg + 1)) - 1); - if (!error) - error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp); + if (!error) { + error = nfs_buf_get(anp, 0, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp); + } /* don't save the data if dirty or potential I/O conflict */ if (!error && bp && !bp->nb_dirtyoff && !(bp->nb_dirty & pagemask) && timevalcmp(&anp->n_lastio, &now, <)) { OSAddAtomic64(1, &nfsstats.read_bios); - CLR(bp->nb_flags, (NB_DONE|NB_ASYNC)); + CLR(bp->nb_flags, (NB_DONE | NB_ASYNC)); SET(bp->nb_flags, NB_READ); NFS_BUF_MAP(bp); nfsm_chain_get_opaque(error, &nmrep, retlen, bp->nb_data); @@ -7667,16 +8237,18 @@ nfsmout: } else { bp->nb_offio = 0; bp->nb_endio = rlen; - if ((retlen > 0) && (bp->nb_endio < (int)retlen)) + if ((retlen > 0) && (bp->nb_endio < (int)retlen)) { bp->nb_endio = retlen; + } if (eof || (retlen == 0)) { /* zero out the remaining data (up to EOF) */ off_t rpcrem, eofrem, rem; rpcrem = (rlen - retlen); eofrem = anp->n_size - (NBOFF(bp) + retlen); rem = (rpcrem < eofrem) ? rpcrem : eofrem; - if (rem > 0) + if (rem > 0) { bzero(bp->nb_data + retlen, rem); + } } else if ((retlen < rlen) && !ISSET(bp->nb_flags, NB_ERROR)) { /* ugh... short read ... just invalidate for now... */ SET(bp->nb_flags, NB_INVAL); @@ -7685,8 +8257,9 @@ nfsmout: nfs_buf_read_finish(bp); microuptime(&anp->n_lastio); } - if (bp) + if (bp) { nfs_buf_release(bp, 1); + } } error = 0; /* ignore any transient error in processing the prefetch */ } @@ -7698,15 +8271,16 @@ nfsmout: nfs_node_clear_busy(np); busyerror = ENOENT; } - if (adnp) + if (adnp) { vnode_put(NFSTOV(adnp)); + } if (error && *anpp) { vnode_put(NFSTOV(*anpp)); *anpp = NULL; } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -7722,8 +8296,9 @@ nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_contex int error, putanp = 0; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(name, const, char *); @@ -7733,9 +8308,10 @@ nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_contex if (!anp) { error = nfs4_named_attr_get(np, &cn, NFS_OPEN_SHARE_ACCESS_NONE, - 0, ctx, &anp, NULL); - if ((!error && !anp) || (error == ENOATTR)) + 0, ctx, &anp, NULL); + if ((!error && !anp) || (error == ENOATTR)) { error = ENOENT; + } if (error) { if (anp) { vnode_put(NFSTOV(anp)); @@ -7746,8 +8322,9 @@ nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_contex putanp = 1; } - if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) + if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) { goto out; + } adnp = nfs4_named_attr_dir_get(np, 1, ctx); nfs_node_clear_busy(np); if (!adnp) { @@ -7763,24 +8340,26 @@ nfs4_named_attr_remove(nfsnode_t np, nfsnode_t anp, const char *name, vfs_contex vra.a_context = ctx; error = nfs_vnop_remove(&vra); out: - if (adnp) + if (adnp) { vnode_put(NFSTOV(adnp)); - if (putanp) + } + if (putanp) { vnode_put(NFSTOV(anp)); - return (error); + } + return error; } int nfs4_vnop_getxattr( struct vnop_getxattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - const char * a_name; - uio_t a_uio; - size_t *a_size; - int a_options; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * const char * a_name; + * uio_t a_uio; + * size_t *a_size; + * int a_options; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; struct nfsmount *nmp; @@ -7790,17 +8369,21 @@ nfs4_vnop_getxattr( int error = 0, isrsrcfork; nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED); - if (error) - return (error); + if (error) { + return error; + } if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) - return (ENOATTR); + !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + return ENOATTR; + } bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *); @@ -7812,30 +8395,33 @@ nfs4_vnop_getxattr( isrsrcfork = (bcmp(ap->a_name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0); error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE, - !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL); - if ((!error && !anp) || (error == ENOENT)) + !isrsrcfork ? NFS_GET_NAMED_ATTR_PREFETCH : 0, ctx, &anp, NULL); + if ((!error && !anp) || (error == ENOENT)) { error = ENOATTR; + } if (!error) { - if (ap->a_uio) + if (ap->a_uio) { error = nfs_bioread(anp, ap->a_uio, 0, ctx); - else + } else { *ap->a_size = anp->n_size; + } } - if (anp) + if (anp) { vnode_put(NFSTOV(anp)); - return (error); + } + return error; } int nfs4_vnop_setxattr( struct vnop_setxattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - const char * a_name; - uio_t a_uio; - int a_options; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * const char * a_name; + * uio_t a_uio; + * int a_options; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; int options = ap->a_options; @@ -7849,24 +8435,27 @@ nfs4_vnop_setxattr( uint8_t finfo[FINDERINFOSIZE]; uint32_t *finfop; struct nfs_open_file *nofp = NULL; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; uio_t auio; struct vnop_write_args vwa; nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } - if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) - return (EINVAL); + if ((options & XATTR_CREATE) && (options & XATTR_REPLACE)) { + return EINVAL; + } /* XXX limitation based on need to back up uio on short write */ if (uio_iovcnt(uio) > 1) { printf("nfs4_vnop_setxattr: iovcnt > 1\n"); - return (EINVAL); + return EINVAL; } bzero(&cn, sizeof(cn)); @@ -7877,26 +8466,31 @@ nfs4_vnop_setxattr( isfinderinfo = (bcmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0); isrsrcfork = isfinderinfo ? 0 : (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) == 0); - if (!isrsrcfork) + if (!isrsrcfork) { uio_setoffset(uio, 0); + } if (isfinderinfo) { - if (uio_resid(uio) != sizeof(finfo)) - return (ERANGE); + if (uio_resid(uio) != sizeof(finfo)) { + return ERANGE; + } error = uiomove((char*)&finfo, sizeof(finfo), uio); - if (error) - return (error); + if (error) { + return error; + } /* setting a FinderInfo of all zeroes means remove the FinderInfo */ empty = 1; - for (i=0, finfop=(uint32_t*)&finfo; i < (int)(sizeof(finfo)/sizeof(uint32_t)); i++) + for (i = 0, finfop = (uint32_t*)&finfo; i < (int)(sizeof(finfo) / sizeof(uint32_t)); i++) { if (finfop[i]) { empty = 0; break; } - if (empty && !(options & (XATTR_CREATE|XATTR_REPLACE))) { + } + if (empty && !(options & (XATTR_CREATE | XATTR_REPLACE))) { error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx); - if (error == ENOENT) + if (error == ENOENT) { error = 0; - return (error); + } + return error; } /* first, let's see if we get a create/replace error */ } @@ -7910,19 +8504,24 @@ nfs4_vnop_setxattr( * that by setting the size to 0 on create/open. */ flags = 0; - if (!(options & XATTR_REPLACE)) + if (!(options & XATTR_REPLACE)) { flags |= NFS_GET_NAMED_ATTR_CREATE; - if (options & XATTR_CREATE) + } + if (options & XATTR_CREATE) { flags |= NFS_GET_NAMED_ATTR_CREATE_GUARDED; - if (!isrsrcfork) + } + if (!isrsrcfork) { flags |= NFS_GET_NAMED_ATTR_TRUNCATE; + } error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH, - flags, ctx, &anp, &nofp); - if (!error && !anp) + flags, ctx, &anp, &nofp); + if (!error && !anp) { error = ENOATTR; - if (error) + } + if (error) { goto out; + } /* grab the open state from the get/create/open */ if (nofp && !(error = nfs_open_file_set_busy(nofp, NULL))) { nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE; @@ -7931,8 +8530,9 @@ nfs4_vnop_setxattr( } /* Setting an empty FinderInfo really means remove it, skip to the close/remove */ - if (isfinderinfo && empty) + if (isfinderinfo && empty) { goto doclose; + } /* * Write the data out and flush. @@ -7953,66 +8553,75 @@ nfs4_vnop_setxattr( } if (vwa.a_uio) { error = nfs_vnop_write(&vwa); - if (!error) + if (!error) { error = nfs_flush(anp, MNT_WAIT, vfs_context_thread(ctx), 0); + } } doclose: /* Close the xattr. */ if (nofp) { int busyerror = nfs_open_file_set_busy(nofp, NULL); closeerror = nfs_close(anp, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx); - if (!busyerror) + if (!busyerror) { nfs_open_file_clear_busy(nofp); + } } - if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */ + if (!error && isfinderinfo && empty) { /* Setting an empty FinderInfo really means remove it */ error = nfs4_named_attr_remove(VTONFS(ap->a_vp), anp, name, ctx); - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } } - if (!error) + if (!error) { error = closeerror; + } out: - if (anp) + if (anp) { vnode_put(NFSTOV(anp)); - if (error == ENOENT) + } + if (error == ENOENT) { error = ENOATTR; - return (error); + } + return error; } int nfs4_vnop_removexattr( struct vnop_removexattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - const char * a_name; - int a_options; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * const char * a_name; + * int a_options; + * vfs_context_t a_context; + * } */*ap) { struct nfsmount *nmp = VTONMP(ap->a_vp); int error; - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } error = nfs4_named_attr_remove(VTONFS(ap->a_vp), NULL, ap->a_name, ap->a_context); - if (error == ENOENT) + if (error == ENOENT) { error = ENOATTR; - return (error); + } + return error; } int nfs4_vnop_listxattr( struct vnop_listxattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - uio_t a_uio; - size_t *a_size; - int a_options; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * uio_t a_uio; + * size_t *a_size; + * int a_options; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; nfsnode_t np = VTONFS(ap->a_vp); @@ -8027,38 +8636,47 @@ nfs4_vnop_listxattr( struct direntry *dp; nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } error = nfs_getattr(np, &nvattr, ctx, NGA_CACHED); - if (error) - return (error); + if (error) { + return error; + } if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) - return (0); + !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + return 0; + } - if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) - return (error); + if ((error = nfs_node_set_busy(np, vfs_context_thread(ctx)))) { + return error; + } adnp = nfs4_named_attr_dir_get(np, 1, ctx); nfs_node_clear_busy(np); - if (!adnp) + if (!adnp) { goto out; + } - if ((error = nfs_node_lock(adnp))) + if ((error = nfs_node_lock(adnp))) { goto out; + } if (adnp->n_flag & NNEEDINVALIDATE) { adnp->n_flag &= ~NNEEDINVALIDATE; nfs_invaldir(adnp); nfs_node_unlock(adnp); error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1); - if (!error) + if (!error) { error = nfs_node_lock(adnp); - if (error) + } + if (error) { goto out; + } } /* @@ -8067,17 +8685,20 @@ nfs4_vnop_listxattr( if (adnp->n_flag & NMODIFIED) { nfs_invaldir(adnp); nfs_node_unlock(adnp); - if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) + if ((error = nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1))) { goto out; + } } else { nfs_node_unlock(adnp); } /* nfs_getattr() will check changed and purge caches */ - if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) + if ((error = nfs_getattr(adnp, &nvattr, ctx, NGA_UNCACHED))) { goto out; + } - if (uio && (uio_resid(uio) == 0)) + if (uio && (uio_resid(uio) == 0)) { goto out; + } done = 0; nextcookie = lbn = 0; @@ -8087,8 +8708,9 @@ nfs4_vnop_listxattr( cookie = nextcookie; getbuffer: error = nfs_buf_get(adnp, lbn, NFS_DIRBLKSIZ, vfs_context_thread(ctx), NBLK_READ, &bp); - if (error) + if (error) { goto out; + } ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) { if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */ @@ -8098,35 +8720,40 @@ getbuffer: ndbhp->ndbh_ncgen = adnp->n_ncgen; } error = nfs_buf_readdir(bp, ctx); - if (error == NFSERR_DIRBUFDROPPED) + if (error == NFSERR_DIRBUFDROPPED) { goto getbuffer; - if (error) + } + if (error) { nfs_buf_release(bp, 1); + } if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) { if (!nfs_node_lock(adnp)) { nfs_invaldir(adnp); nfs_node_unlock(adnp); } nfs_vinvalbuf(NFSTOV(adnp), 0, ctx, 1); - if (error == NFSERR_BAD_COOKIE) + if (error == NFSERR_BAD_COOKIE) { error = ENOENT; + } } - if (error) + if (error) { goto out; + } } /* go through all the entries copying/counting */ dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp); - for (i=0; i < ndbhp->ndbh_count; i++) { + for (i = 0; i < ndbhp->ndbh_count; i++) { if (!xattr_protected(dp->d_name)) { if (uio == NULL) { *ap->a_size += dp->d_namlen + 1; } else if (uio_resid(uio) < (dp->d_namlen + 1)) { error = ERANGE; } else { - error = uiomove(dp->d_name, dp->d_namlen+1, uio); - if (error && (error != EFAULT)) + error = uiomove(dp->d_name, dp->d_namlen + 1, uio); + if (error && (error != EFAULT)) { error = ERANGE; + } } } nextcookie = dp->d_seekoff; @@ -8137,8 +8764,9 @@ getbuffer: /* hit end of buffer, move to next buffer */ lbn = nextcookie; /* if we also hit EOF, we're done */ - if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) + if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) { done = 1; + } } if (!error && !done && (nextcookie == cookie)) { printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count); @@ -8147,23 +8775,24 @@ getbuffer: nfs_buf_release(bp, 1); } out: - if (adnp) + if (adnp) { vnode_put(NFSTOV(adnp)); - return (error); + } + return error; } #if NAMEDSTREAMS int nfs4_vnop_getnamedstream( struct vnop_getnamedstream_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vnode_t *a_svpp; - const char *a_name; - enum nsoperation a_operation; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vnode_t *a_svpp; + * const char *a_name; + * enum nsoperation a_operation; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; struct nfsmount *nmp; @@ -8173,17 +8802,21 @@ nfs4_vnop_getnamedstream( int error = 0; nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } error = nfs_getattr(VTONFS(ap->a_vp), &nvattr, ctx, NGA_CACHED); - if (error) - return (error); + if (error) { + return error; + } if (NFS_BITMAP_ISSET(nvattr.nva_bitmap, NFS_FATTR_NAMED_ATTR) && - !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) - return (ENOATTR); + !(nvattr.nva_flags & NFS_FFLAG_HAS_NAMED_ATTRS)) { + return ENOATTR; + } bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *); @@ -8192,26 +8825,28 @@ nfs4_vnop_getnamedstream( cn.cn_flags = MAKEENTRY; error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_NONE, - 0, ctx, &anp, NULL); - if ((!error && !anp) || (error == ENOENT)) + 0, ctx, &anp, NULL); + if ((!error && !anp) || (error == ENOENT)) { error = ENOATTR; - if (!error && anp) + } + if (!error && anp) { *ap->a_svpp = NFSTOV(anp); - else if (anp) + } else if (anp) { vnode_put(NFSTOV(anp)); - return (error); + } + return error; } int nfs4_vnop_makenamedstream( struct vnop_makenamedstream_args /* { - struct vnodeop_desc *a_desc; - vnode_t *a_svpp; - vnode_t a_vp; - const char *a_name; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t *a_svpp; + * vnode_t a_vp; + * const char *a_name; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; struct nfsmount *nmp; @@ -8220,11 +8855,13 @@ nfs4_vnop_makenamedstream( int error = 0; nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } bzero(&cn, sizeof(cn)); cn.cn_nameptr = __CAST_AWAY_QUALIFIER(ap->a_name, const, char *); @@ -8233,42 +8870,46 @@ nfs4_vnop_makenamedstream( cn.cn_flags = MAKEENTRY; error = nfs4_named_attr_get(VTONFS(ap->a_vp), &cn, NFS_OPEN_SHARE_ACCESS_BOTH, - NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL); - if ((!error && !anp) || (error == ENOENT)) + NFS_GET_NAMED_ATTR_CREATE, ctx, &anp, NULL); + if ((!error && !anp) || (error == ENOENT)) { error = ENOATTR; - if (!error && anp) + } + if (!error && anp) { *ap->a_svpp = NFSTOV(anp); - else if (anp) + } else if (anp) { vnode_put(NFSTOV(anp)); - return (error); + } + return error; } int nfs4_vnop_removenamedstream( struct vnop_removenamedstream_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vnode_t a_svp; - const char *a_name; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vnode_t a_svp; + * const char *a_name; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { struct nfsmount *nmp = VTONMP(ap->a_vp); nfsnode_t np = ap->a_vp ? VTONFS(ap->a_vp) : NULL; nfsnode_t anp = ap->a_svp ? VTONFS(ap->a_svp) : NULL; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } /* * Given that a_svp is a named stream, checking for * named attribute support is kinda pointless. */ - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) - return (ENOTSUP); + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR)) { + return ENOTSUP; + } - return (nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context)); + return nfs4_named_attr_remove(np, anp, ap->a_name, ap->a_context); } #endif diff --git a/bsd/nfs/nfs_bio.c b/bsd/nfs/nfs_bio.c index d65d98a1a..cb1f92939 100644 --- a/bsd/nfs/nfs_bio.c +++ b/bsd/nfs/nfs_bio.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -99,11 +99,11 @@ #define NFS_BIO_DBG(...) NFS_DBG(NFS_FAC_BIO, 7, ## __VA_ARGS__) -kern_return_t thread_terminate(thread_t); /* XXX */ +kern_return_t thread_terminate(thread_t); /* XXX */ -#define NFSBUFHASH(np, lbn) \ +#define NFSBUFHASH(np, lbn) \ (&nfsbufhashtbl[((long)(np) / sizeof(*(np)) + (int)(lbn)) & nfsbufhash]) -LIST_HEAD(nfsbufhashhead, nfsbuf) *nfsbufhashtbl; +LIST_HEAD(nfsbufhashhead, nfsbuf) * nfsbufhashtbl; struct nfsbuffreehead nfsbuffree, nfsbuffreemeta, nfsbufdelwri; u_long nfsbufhash; int nfsbufcnt, nfsbufmin, nfsbufmax, nfsbufmetacnt, nfsbufmetamax; @@ -115,32 +115,32 @@ thread_t nfsbufdelwrithd = NULL; lck_grp_t *nfs_buf_lck_grp; lck_mtx_t *nfs_buf_mutex; -#define NFSBUF_FREE_PERIOD 30 /* seconds */ -#define NFSBUF_LRU_STALE 120 -#define NFSBUF_META_STALE 240 +#define NFSBUF_FREE_PERIOD 30 /* seconds */ +#define NFSBUF_LRU_STALE 120 +#define NFSBUF_META_STALE 240 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list */ -#define LRU_TO_FREEUP 6 +#define LRU_TO_FREEUP 6 /* number of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list */ -#define META_TO_FREEUP 3 +#define META_TO_FREEUP 3 /* total number of nfsbufs nfs_buf_freeup() should attempt to free */ -#define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP) +#define TOTAL_TO_FREEUP (LRU_TO_FREEUP+META_TO_FREEUP) /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffree list when called from timer */ -#define LRU_FREEUP_FRAC_ON_TIMER 8 +#define LRU_FREEUP_FRAC_ON_TIMER 8 /* fraction of nfsbufs nfs_buf_freeup() should attempt to free from nfsbuffreemeta list when called from timer */ -#define META_FREEUP_FRAC_ON_TIMER 16 +#define META_FREEUP_FRAC_ON_TIMER 16 /* fraction of total nfsbufs that nfsbuffreecnt should exceed before bothering to call nfs_buf_freeup() */ -#define LRU_FREEUP_MIN_FRAC 4 +#define LRU_FREEUP_MIN_FRAC 4 /* fraction of total nfsbufs that nfsbuffreemetacnt should exceed before bothering to call nfs_buf_freeup() */ -#define META_FREEUP_MIN_FRAC 2 +#define META_FREEUP_MIN_FRAC 2 #define NFS_BUF_FREEUP() \ do { \ - /* only call nfs_buf_freeup() if it has work to do: */ \ - if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \ - (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \ - ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \ - nfs_buf_freeup(0); \ + /* only call nfs_buf_freeup() if it has work to do: */ \ + if (((nfsbuffreecnt > nfsbufcnt/LRU_FREEUP_MIN_FRAC) || \ + (nfsbuffreemetacnt > nfsbufcnt/META_FREEUP_MIN_FRAC)) && \ + ((nfsbufcnt - TOTAL_TO_FREEUP) > nfsbufmin)) \ + nfs_buf_freeup(0); \ } while (0) /* @@ -153,7 +153,7 @@ nfs_nbinit(void) nfs_buf_mutex = lck_mtx_alloc_init(nfs_buf_lck_grp, LCK_ATTR_NULL); nfsbufcnt = nfsbufmetacnt = - nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0; + nfsbuffreecnt = nfsbuffreemetacnt = nfsbufdelwricnt = 0; nfsbufmin = 128; /* size nfsbufmax to cover at most half sane_size (w/default buf size) */ nfsbufmax = (sane_size >> PAGE_SHIFT) / (2 * (NFS_RWSIZE >> PAGE_SHIFT)); @@ -161,11 +161,10 @@ nfs_nbinit(void) nfsneedbuffer = 0; nfs_nbdwrite = 0; - nfsbufhashtbl = hashinit(nfsbufmax/4, M_TEMP, &nfsbufhash); + nfsbufhashtbl = hashinit(nfsbufmax / 4, M_TEMP, &nfsbufhash); TAILQ_INIT(&nfsbuffree); TAILQ_INIT(&nfsbuffreemeta); TAILQ_INIT(&nfsbufdelwri); - } /* @@ -185,7 +184,7 @@ nfs_buf_timer(__unused void *param0, __unused void *param1) lck_mtx_unlock(nfs_buf_mutex); nfs_interval_timer_start(nfs_buf_timer_call, - NFSBUF_FREE_PERIOD * 1000); + NFSBUF_FREE_PERIOD * 1000); } /* @@ -207,16 +206,19 @@ nfs_buf_freeup(int timer) FSDBG(320, nfsbufcnt, nfsbuffreecnt, nfsbuffreemetacnt, 0); - count = timer ? nfsbuffreecnt/LRU_FREEUP_FRAC_ON_TIMER : LRU_TO_FREEUP; + count = timer ? nfsbuffreecnt / LRU_FREEUP_FRAC_ON_TIMER : LRU_TO_FREEUP; while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) { fbp = TAILQ_FIRST(&nfsbuffree); - if (!fbp) + if (!fbp) { break; - if (fbp->nb_refs) + } + if (fbp->nb_refs) { break; + } if (NBUFSTAMPVALID(fbp) && - (fbp->nb_timestamp + (2*NFSBUF_LRU_STALE)) > now.tv_sec) + (fbp->nb_timestamp + (2 * NFSBUF_LRU_STALE)) > now.tv_sec) { break; + } nfs_buf_remfree(fbp); /* disassociate buffer from any nfsnode */ if (fbp->nb_np) { @@ -231,16 +233,19 @@ nfs_buf_freeup(int timer) nfsbufcnt--; } - count = timer ? nfsbuffreemetacnt/META_FREEUP_FRAC_ON_TIMER : META_TO_FREEUP; + count = timer ? nfsbuffreemetacnt / META_FREEUP_FRAC_ON_TIMER : META_TO_FREEUP; while ((nfsbufcnt > nfsbufmin) && (count-- > 0)) { fbp = TAILQ_FIRST(&nfsbuffreemeta); - if (!fbp) + if (!fbp) { break; - if (fbp->nb_refs) + } + if (fbp->nb_refs) { break; + } if (NBUFSTAMPVALID(fbp) && - (fbp->nb_timestamp + (2*NFSBUF_META_STALE)) > now.tv_sec) + (fbp->nb_timestamp + (2 * NFSBUF_META_STALE)) > now.tv_sec) { break; + } nfs_buf_remfree(fbp); /* disassociate buffer from any nfsnode */ if (fbp->nb_np) { @@ -264,16 +269,18 @@ nfs_buf_freeup(int timer) while ((fbp = TAILQ_FIRST(&nfsbuffreeup))) { TAILQ_REMOVE(&nfsbuffreeup, fbp, nb_free); /* nuke any creds */ - if (IS_VALID_CRED(fbp->nb_rcred)) + if (IS_VALID_CRED(fbp->nb_rcred)) { kauth_cred_unref(&fbp->nb_rcred); - if (IS_VALID_CRED(fbp->nb_wcred)) + } + if (IS_VALID_CRED(fbp->nb_wcred)) { kauth_cred_unref(&fbp->nb_wcred); + } /* if buf was NB_META, dump buffer */ - if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) + if (ISSET(fbp->nb_flags, NB_META) && fbp->nb_data) { kfree(fbp->nb_data, fbp->nb_bufsize); + } FREE(fbp, M_TEMP); } - } /* @@ -283,8 +290,9 @@ nfs_buf_freeup(int timer) void nfs_buf_remfree(struct nfsbuf *bp) { - if (bp->nb_free.tqe_next == NFSNOLIST) + if (bp->nb_free.tqe_next == NFSNOLIST) { panic("nfsbuf not on free list"); + } if (ISSET(bp->nb_flags, NB_DELWRI)) { nfsbufdelwricnt--; TAILQ_REMOVE(&nfsbufdelwri, bp, nb_free); @@ -307,12 +315,13 @@ nfs_buf_is_incore(nfsnode_t np, daddr64_t blkno) { boolean_t rv; lck_mtx_lock(nfs_buf_mutex); - if (nfs_buf_incore(np, blkno)) + if (nfs_buf_incore(np, blkno)) { rv = TRUE; - else + } else { rv = FALSE; + } lck_mtx_unlock(nfs_buf_mutex); - return (rv); + return rv; } /* @@ -323,14 +332,15 @@ nfs_buf_incore(nfsnode_t np, daddr64_t blkno) { /* Search hash chain */ struct nfsbuf * bp = NFSBUFHASH(np, blkno)->lh_first; - for (; bp != NULL; bp = bp->nb_hash.le_next) + for (; bp != NULL; bp = bp->nb_hash.le_next) { if ((bp->nb_lblkno == blkno) && (bp->nb_np == np)) { if (!ISSET(bp->nb_flags, NB_INVAL)) { FSDBG(547, bp, blkno, bp->nb_flags, bp->nb_np); - return (bp); + return bp; } } - return (NULL); + } + return NULL; } /* @@ -348,13 +358,15 @@ nfs_buf_page_inval(vnode_t vp, off_t offset) struct nfsbuf *bp; int error = 0; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(nfs_buf_mutex); bp = nfs_buf_incore(VTONFS(vp), (daddr64_t)(offset / nmp->nm_biosize)); - if (!bp) + if (!bp) { goto out; + } FSDBG(325, bp, bp->nb_flags, bp->nb_dirtyoff, bp->nb_dirtyend); if (ISSET(bp->nb_lflags, NBL_BUSY)) { error = EBUSY; @@ -384,7 +396,7 @@ nfs_buf_page_inval(vnode_t vp, off_t offset) } out: lck_mtx_unlock(nfs_buf_mutex); - return (error); + return error; } /* @@ -398,8 +410,9 @@ nfs_buf_upl_setup(struct nfsbuf *bp) upl_t upl; int upl_flags; - if (ISSET(bp->nb_flags, NB_PAGELIST)) - return (0); + if (ISSET(bp->nb_flags, NB_PAGELIST)) { + return 0; + } upl_flags = UPL_PRECIOUS; if (!ISSET(bp->nb_flags, NB_READ)) { @@ -410,23 +423,23 @@ nfs_buf_upl_setup(struct nfsbuf *bp) upl_flags |= UPL_WILL_MODIFY; } kret = ubc_create_upl_kernel(NFSTOV(bp->nb_np), NBOFF(bp), bp->nb_bufsize, - &upl, NULL, upl_flags, VM_KERN_MEMORY_FILE); + &upl, NULL, upl_flags, VM_KERN_MEMORY_FILE); if (kret == KERN_INVALID_ARGUMENT) { /* vm object probably doesn't exist any more */ bp->nb_pagelist = NULL; - return (EINVAL); + return EINVAL; } if (kret != KERN_SUCCESS) { printf("nfs_buf_upl_setup(): failed to get pagelist %d\n", kret); bp->nb_pagelist = NULL; - return (EIO); + return EIO; } FSDBG(538, bp, NBOFF(bp), bp->nb_bufsize, bp->nb_np); bp->nb_pagelist = upl; SET(bp->nb_flags, NB_PAGELIST); - return (0); + return 0; } /* @@ -440,38 +453,43 @@ nfs_buf_upl_check(struct nfsbuf *bp) off_t filesize, fileoffset; int i, npages; - if (!ISSET(bp->nb_flags, NB_PAGELIST)) + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { return; + } npages = round_page_32(bp->nb_bufsize) / PAGE_SIZE; filesize = ubc_getsize(NFSTOV(bp->nb_np)); fileoffset = NBOFF(bp); - if (fileoffset < filesize) + if (fileoffset < filesize) { SET(bp->nb_flags, NB_CACHE); - else + } else { CLR(bp->nb_flags, NB_CACHE); + } pl = ubc_upl_pageinfo(bp->nb_pagelist); bp->nb_valid = bp->nb_dirty = 0; - for (i=0; i < npages; i++, fileoffset += PAGE_SIZE_64) { + for (i = 0; i < npages; i++, fileoffset += PAGE_SIZE_64) { /* anything beyond the end of the file is not valid or dirty */ - if (fileoffset >= filesize) + if (fileoffset >= filesize) { break; + } if (!upl_valid_page(pl, i)) { CLR(bp->nb_flags, NB_CACHE); continue; } - NBPGVALID_SET(bp,i); - if (upl_dirty_page(pl, i)) + NBPGVALID_SET(bp, i); + if (upl_dirty_page(pl, i)) { NBPGDIRTY_SET(bp, i); + } } fileoffset = NBOFF(bp); if (ISSET(bp->nb_flags, NB_CACHE)) { bp->nb_validoff = 0; bp->nb_validend = bp->nb_bufsize; - if (fileoffset + bp->nb_validend > filesize) + if (fileoffset + bp->nb_validend > filesize) { bp->nb_validend = filesize - fileoffset; + } } else { bp->nb_validoff = bp->nb_validend = -1; } @@ -488,18 +506,22 @@ nfs_buf_map(struct nfsbuf *bp) { kern_return_t kret; - if (bp->nb_data) - return (0); - if (!ISSET(bp->nb_flags, NB_PAGELIST)) - return (EINVAL); + if (bp->nb_data) { + return 0; + } + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { + return EINVAL; + } kret = ubc_upl_map(bp->nb_pagelist, (vm_offset_t *)&(bp->nb_data)); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { panic("nfs_buf_map: ubc_upl_map() failed with (%d)", kret); - if (bp->nb_data == 0) + } + if (bp->nb_data == 0) { panic("ubc_upl_map mapped 0"); + } FSDBG(540, bp, bp->nb_flags, NBOFF(bp), bp->nb_data); - return (0); + return 0; } /* @@ -516,19 +538,22 @@ nfs_buf_normalize_valid_range(nfsnode_t np, struct nfsbuf *bp) { int pg, npg; /* pull validoff back to start of contiguous valid page range */ - pg = bp->nb_validoff/PAGE_SIZE; - while (pg >= 0 && NBPGVALID(bp,pg)) + pg = bp->nb_validoff / PAGE_SIZE; + while (pg >= 0 && NBPGVALID(bp, pg)) { pg--; - bp->nb_validoff = (pg+1) * PAGE_SIZE; + } + bp->nb_validoff = (pg + 1) * PAGE_SIZE; /* push validend forward to end of contiguous valid page range */ - npg = bp->nb_bufsize/PAGE_SIZE; - pg = bp->nb_validend/PAGE_SIZE; - while (pg < npg && NBPGVALID(bp,pg)) + npg = bp->nb_bufsize / PAGE_SIZE; + pg = bp->nb_validend / PAGE_SIZE; + while (pg < npg && NBPGVALID(bp, pg)) { pg++; + } bp->nb_validend = pg * PAGE_SIZE; /* clip to EOF */ - if (NBOFF(bp) + bp->nb_validend > (off_t)np->n_size) + if (NBOFF(bp) + bp->nb_validend > (off_t)np->n_size) { bp->nb_validend = np->n_size % bp->nb_bufsize; + } } /* @@ -546,17 +571,21 @@ nfs_buf_delwri_service(void) np = bp->nb_np; nfs_buf_remfree(bp); nfs_buf_refget(bp); - while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN); + while ((error = nfs_buf_acquire(bp, 0, 0, 0)) == EAGAIN) { + ; + } nfs_buf_refrele(bp); - if (error) + if (error) { break; + } if (!bp->nb_np) { /* buffer is no longer valid */ nfs_buf_drop(bp); continue; } - if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { nfs_buf_check_write_verifier(np, bp); + } if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { /* put buffer at end of delwri list */ TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); @@ -600,20 +629,25 @@ nfs_buf_delwri_thread(__unused void *arg, __unused wait_result_t wr) void nfs_buf_delwri_push(int locked) { - if (TAILQ_EMPTY(&nfsbufdelwri)) + if (TAILQ_EMPTY(&nfsbufdelwri)) { return; - if (!locked) + } + if (!locked) { lck_mtx_lock(nfs_buf_mutex); + } /* wake up the delayed write service thread */ - if (nfsbufdelwrithd) + if (nfsbufdelwrithd) { wakeup(&nfsbufdelwrithd); - else if (kernel_thread_start(nfs_buf_delwri_thread, NULL, &nfsbufdelwrithd) == KERN_SUCCESS) + } else if (kernel_thread_start(nfs_buf_delwri_thread, NULL, &nfsbufdelwrithd) == KERN_SUCCESS) { thread_deallocate(nfsbufdelwrithd); + } /* otherwise, try to do some of the work ourselves */ - if (!nfsbufdelwrithd) + if (!nfsbufdelwrithd) { nfs_buf_delwri_service(); - if (!locked) + } + if (!locked) { lck_mtx_unlock(nfs_buf_mutex); + } } /* @@ -657,12 +691,13 @@ nfs_buf_get( *bpp = NULL; bufsize = size; - if (bufsize > NFS_MAXBSIZE) + if (bufsize > NFS_MAXBSIZE) { panic("nfs_buf_get: buffer larger than NFS_MAXBSIZE requested"); + } if (nfs_mount_gone(nmp)) { FSDBG_BOT(541, np, blkno, 0, ENXIO); - return (ENXIO); + return ENXIO; } if (!UBCINFOEXISTS(vp)) { @@ -696,10 +731,11 @@ loop: if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { lck_mtx_unlock(nfs_buf_mutex); FSDBG_BOT(541, np, blkno, 0, error); - return (error); + return error; } - if (np->n_bflag & NBINVALINPROG) + if (np->n_bflag & NBINVALINPROG) { slpflag = 0; + } } /* check for existence of nfsbuf in cache */ @@ -709,38 +745,40 @@ loop: if (flags & NBLK_NOWAIT) { lck_mtx_unlock(nfs_buf_mutex); FSDBG_BOT(541, np, blkno, bp, 0xbcbcbcbc); - return (0); + return 0; } FSDBG_TOP(543, np, blkno, bp, bp->nb_flags); SET(bp->nb_lflags, NBL_WANTED); ts.tv_sec = 2; ts.tv_nsec = 0; - msleep(bp, nfs_buf_mutex, slpflag|(PRIBIO+1)|PDROP, - "nfsbufget", (slpflag == PCATCH) ? NULL : &ts); + msleep(bp, nfs_buf_mutex, slpflag | (PRIBIO + 1) | PDROP, + "nfsbufget", (slpflag == PCATCH) ? NULL : &ts); slpflag = 0; FSDBG_BOT(543, np, blkno, bp, bp->nb_flags); if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { FSDBG_BOT(541, np, blkno, 0, error); - return (error); + return error; } goto loop; } - if (bp->nb_bufsize != bufsize) + if (bp->nb_bufsize != bufsize) { panic("nfsbuf size mismatch"); + } SET(bp->nb_lflags, NBL_BUSY); SET(bp->nb_flags, NB_CACHE); nfs_buf_remfree(bp); /* additional paranoia: */ - if (ISSET(bp->nb_flags, NB_PAGELIST)) + if (ISSET(bp->nb_flags, NB_PAGELIST)) { panic("pagelist buffer was not busy"); + } goto buffer_setup; } if (flags & NBLK_ONLYVALID) { lck_mtx_unlock(nfs_buf_mutex); FSDBG_BOT(541, np, blkno, 0, 0x0000cace); - return (0); + return 0; } /* @@ -768,28 +806,31 @@ loop: /* if the next LRU or META buffer is invalid or stale, use it */ lrubp = TAILQ_FIRST(&nfsbuffree); if (lrubp && (!NBUFSTAMPVALID(lrubp) || - ((lrubp->nb_timestamp + NFSBUF_LRU_STALE) < now.tv_sec))) + ((lrubp->nb_timestamp + NFSBUF_LRU_STALE) < now.tv_sec))) { bp = lrubp; + } metabp = TAILQ_FIRST(&nfsbuffreemeta); if (!bp && metabp && (!NBUFSTAMPVALID(metabp) || - ((metabp->nb_timestamp + NFSBUF_META_STALE) < now.tv_sec))) + ((metabp->nb_timestamp + NFSBUF_META_STALE) < now.tv_sec))) { bp = metabp; + } if (!bp && (nfsbufcnt >= nfsbufmax)) { /* we've already allocated all bufs, so */ /* choose the buffer that'll go stale first */ - if (!metabp) + if (!metabp) { bp = lrubp; - else if (!lrubp) + } else if (!lrubp) { bp = metabp; - else { + } else { int32_t lru_stale_time, meta_stale_time; lru_stale_time = lrubp->nb_timestamp + NFSBUF_LRU_STALE; meta_stale_time = metabp->nb_timestamp + NFSBUF_META_STALE; - if (lru_stale_time <= meta_stale_time) + if (lru_stale_time <= meta_stale_time) { bp = lrubp; - else + } else { bp = metabp; + } } } } @@ -798,8 +839,9 @@ loop: /* we have a buffer to reuse */ FSDBG(544, np, blkno, bp, bp->nb_flags); nfs_buf_remfree(bp); - if (ISSET(bp->nb_flags, NB_DELWRI)) + if (ISSET(bp->nb_flags, NB_DELWRI)) { panic("nfs_buf_get: delwri"); + } SET(bp->nb_lflags, NBL_BUSY); /* disassociate buffer from previous nfsnode */ if (bp->nb_np) { @@ -811,14 +853,17 @@ loop: } LIST_REMOVE(bp, nb_hash); /* nuke any creds we're holding */ - if (IS_VALID_CRED(bp->nb_rcred)) + if (IS_VALID_CRED(bp->nb_rcred)) { kauth_cred_unref(&bp->nb_rcred); - if (IS_VALID_CRED(bp->nb_wcred)) + } + if (IS_VALID_CRED(bp->nb_wcred)) { kauth_cred_unref(&bp->nb_wcred); + } /* if buf will no longer be NB_META, dump old buffer */ if (operation == NBLK_META) { - if (!ISSET(bp->nb_flags, NB_META)) + if (!ISSET(bp->nb_flags, NB_META)) { nfsbufmetacnt++; + } } else if (ISSET(bp->nb_flags, NB_META)) { if (bp->nb_data) { kfree(bp->nb_data, bp->nb_bufsize); @@ -842,7 +887,7 @@ loop: if (!bp) { lck_mtx_unlock(nfs_buf_mutex); FSDBG_BOT(541, np, blkno, 0, error); - return (ENOMEM); + return ENOMEM; } nfsbufcnt++; @@ -853,11 +898,12 @@ loop: if (nfsbufcnt > nfsbufmin && !nfs_buf_timer_on) { nfs_buf_timer_on = 1; nfs_interval_timer_start(nfs_buf_timer_call, - NFSBUF_FREE_PERIOD * 1000); + NFSBUF_FREE_PERIOD * 1000); } - if (operation == NBLK_META) + if (operation == NBLK_META) { nfsbufmetacnt++; + } NFSBUFCNTCHK(); /* init nfsbuf */ bzero(bp, sizeof(*bp)); @@ -872,11 +918,11 @@ loop: nfs_buf_delwri_push(1); nfsneedbuffer = 1; - msleep(&nfsneedbuffer, nfs_buf_mutex, PCATCH|PDROP, "nfsbufget", NULL); + msleep(&nfsneedbuffer, nfs_buf_mutex, PCATCH | PDROP, "nfsbufget", NULL); FSDBG_BOT(546, np, blkno, nfsbufcnt, nfsbufmax); if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { FSDBG_BOT(541, np, blkno, 0, error); - return (error); + return error; } goto loop; } @@ -909,8 +955,9 @@ buffer_setup: bp->nb_dirty = 0; CLR(bp->nb_flags, NB_CACHE); } - if (!bp->nb_data) + if (!bp->nb_data) { bp->nb_data = kalloc(bufsize); + } if (!bp->nb_data) { /* Ack! couldn't allocate the data buffer! */ /* clean up buffer and return error */ @@ -920,13 +967,14 @@ buffer_setup: bp->nb_np = NULL; /* invalidate usage timestamp to allow immediate freeing */ NBUFSTAMPINVALIDATE(bp); - if (bp->nb_free.tqe_next != NFSNOLIST) + if (bp->nb_free.tqe_next != NFSNOLIST) { panic("nfsbuf on freelist"); + } TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); nfsbuffreecnt++; lck_mtx_unlock(nfs_buf_mutex); FSDBG_BOT(541, np, blkno, 0xb00, ENOMEM); - return (ENOMEM); + return ENOMEM; } bp->nb_bufsize = bufsize; break; @@ -942,8 +990,9 @@ buffer_setup: } else { CLR(bp->nb_flags, NB_READ); } - if (bufsize < PAGE_SIZE) + if (bufsize < PAGE_SIZE) { bufsize = PAGE_SIZE; + } bp->nb_bufsize = bufsize; bp->nb_validoff = bp->nb_validend = -1; @@ -959,13 +1008,14 @@ buffer_setup: bp->nb_np = NULL; /* invalidate usage timestamp to allow immediate freeing */ NBUFSTAMPINVALIDATE(bp); - if (bp->nb_free.tqe_next != NFSNOLIST) + if (bp->nb_free.tqe_next != NFSNOLIST) { panic("nfsbuf on freelist"); + } TAILQ_INSERT_HEAD(&nfsbuffree, bp, nb_free); nfsbuffreecnt++; lck_mtx_unlock(nfs_buf_mutex); FSDBG_BOT(541, np, blkno, 0x2bc, EIO); - return (EIO); + return EIO; } nfs_buf_upl_check(bp); } @@ -979,7 +1029,7 @@ buffer_setup: FSDBG_BOT(541, np, blkno, bp, bp->nb_flags); - return (0); + return 0; } void @@ -1002,17 +1052,20 @@ nfs_buf_release(struct nfsbuf *bp, int freeup) if (!ISSET(bp->nb_flags, NB_PAGELIST) && !ISSET(bp->nb_flags, NB_INVAL)) { rv = nfs_buf_upl_setup(bp); - if (rv) + if (rv) { printf("nfs_buf_release: upl create failed %d\n", rv); - else + } else { nfs_buf_upl_check(bp); + } } upl = bp->nb_pagelist; - if (!upl) + if (!upl) { goto pagelist_cleanup_done; + } if (bp->nb_data) { - if (ubc_upl_unmap(upl) != KERN_SUCCESS) + if (ubc_upl_unmap(upl) != KERN_SUCCESS) { panic("ubc_upl_unmap failed"); + } bp->nb_data = NULL; } /* @@ -1021,33 +1074,36 @@ nfs_buf_release(struct nfsbuf *bp, int freeup) */ if (ISSET(bp->nb_flags, NB_ERROR) || (!bp->nb_dirty && (ISSET(bp->nb_flags, NB_INVAL) || (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))))) { - if (ISSET(bp->nb_flags, (NB_READ | NB_INVAL | NB_NOCACHE))) + if (ISSET(bp->nb_flags, (NB_READ | NB_INVAL | NB_NOCACHE))) { upl_flags = UPL_ABORT_DUMP_PAGES; - else + } else { upl_flags = 0; + } ubc_upl_abort(upl, upl_flags); goto pagelist_cleanup_done; } - for (i=0; i <= (bp->nb_bufsize - 1)/PAGE_SIZE; i++) { - if (!NBPGVALID(bp,i)) + for (i = 0; i <= (bp->nb_bufsize - 1) / PAGE_SIZE; i++) { + if (!NBPGVALID(bp, i)) { ubc_upl_abort_range(upl, - i*PAGE_SIZE, PAGE_SIZE, - UPL_ABORT_DUMP_PAGES | - UPL_ABORT_FREE_ON_EMPTY); - else { - if (NBPGDIRTY(bp,i)) + i * PAGE_SIZE, PAGE_SIZE, + UPL_ABORT_DUMP_PAGES | + UPL_ABORT_FREE_ON_EMPTY); + } else { + if (NBPGDIRTY(bp, i)) { upl_flags = UPL_COMMIT_SET_DIRTY; - else + } else { upl_flags = UPL_COMMIT_CLEAR_DIRTY; - - if (!ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI))) + } + + if (!ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI))) { upl_flags |= UPL_COMMIT_CLEAR_PRECIOUS; + } ubc_upl_commit_range(upl, - i*PAGE_SIZE, PAGE_SIZE, - upl_flags | - UPL_COMMIT_INACTIVATE | - UPL_COMMIT_FREE_ON_EMPTY); + i * PAGE_SIZE, PAGE_SIZE, + upl_flags | + UPL_COMMIT_INACTIVATE | + UPL_COMMIT_FREE_ON_EMPTY); } } pagelist_cleanup_done: @@ -1056,11 +1112,13 @@ pagelist_cleanup_done: off_t start, end; start = trunc_page_64(np->n_size) + PAGE_SIZE_64; end = trunc_page_64(NBOFF(bp) + bp->nb_bufsize); - if (start < NBOFF(bp)) + if (start < NBOFF(bp)) { start = NBOFF(bp); + } if (end > start) { - if ((rv = ubc_msync(vp, start, end, NULL, UBC_INVALIDATE))) + if ((rv = ubc_msync(vp, start, end, NULL, UBC_INVALIDATE))) { printf("nfs_buf_release(): ubc_msync failed!, error %d\n", rv); + } } } CLR(bp->nb_flags, NB_PAGELIST); @@ -1084,8 +1142,9 @@ pagelist_cleanup_done: /* If it's non-needcommit nocache, or an error, mark it invalid. */ if (ISSET(bp->nb_flags, NB_ERROR) || - (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))) + (ISSET(bp->nb_flags, NB_NOCACHE) && !ISSET(bp->nb_flags, (NB_NEEDCOMMIT | NB_DELWRI)))) { SET(bp->nb_flags, NB_INVAL); + } if ((bp->nb_bufsize <= 0) || ISSET(bp->nb_flags, NB_INVAL)) { /* If it's invalid or empty, dissociate it from its nfsnode */ @@ -1105,8 +1164,9 @@ pagelist_cleanup_done: /* invalidate usage timestamp to allow immediate freeing */ NBUFSTAMPINVALIDATE(bp); /* put buffer at head of free list */ - if (bp->nb_free.tqe_next != NFSNOLIST) + if (bp->nb_free.tqe_next != NFSNOLIST) { panic("nfsbuf on freelist"); + } SET(bp->nb_flags, NB_INVAL); if (ISSET(bp->nb_flags, NB_META)) { TAILQ_INSERT_HEAD(&nfsbuffreemeta, bp, nb_free); @@ -1117,8 +1177,9 @@ pagelist_cleanup_done: } } else if (ISSET(bp->nb_flags, NB_DELWRI)) { /* put buffer at end of delwri list */ - if (bp->nb_free.tqe_next != NFSNOLIST) + if (bp->nb_free.tqe_next != NFSNOLIST) { panic("nfsbuf on freelist"); + } TAILQ_INSERT_TAIL(&nfsbufdelwri, bp, nb_free); nfsbufdelwricnt++; freeup = 0; @@ -1127,8 +1188,9 @@ pagelist_cleanup_done: microuptime(&now); bp->nb_timestamp = now.tv_sec; /* put buffer at end of free list */ - if (bp->nb_free.tqe_next != NFSNOLIST) + if (bp->nb_free.tqe_next != NFSNOLIST) { panic("nfsbuf on freelist"); + } if (ISSET(bp->nb_flags, NB_META)) { TAILQ_INSERT_TAIL(&nfsbuffreemeta, bp, nb_free); nfsbuffreemetacnt++; @@ -1148,14 +1210,18 @@ pagelist_cleanup_done: lck_mtx_unlock(nfs_buf_mutex); - if (wakeup_needbuffer) + if (wakeup_needbuffer) { wakeup(&nfsneedbuffer); - if (wakeup_buffer) + } + if (wakeup_buffer) { wakeup(bp); - if (wakeup_nbdwrite) + } + if (wakeup_nbdwrite) { wakeup(&nfs_nbdwrite); - if (freeup) + } + if (freeup) { NFS_BUF_FREEUP(); + } } /* @@ -1169,8 +1235,9 @@ nfs_buf_iowait(struct nfsbuf *bp) lck_mtx_lock(nfs_buf_mutex); - while (!ISSET(bp->nb_flags, NB_DONE)) + while (!ISSET(bp->nb_flags, NB_DONE)) { msleep(bp, nfs_buf_mutex, PRIBIO + 1, "nfs_buf_iowait", NULL); + } lck_mtx_unlock(nfs_buf_mutex); @@ -1179,10 +1246,11 @@ nfs_buf_iowait(struct nfsbuf *bp) /* check for interruption of I/O, then errors. */ if (ISSET(bp->nb_flags, NB_EINTR)) { CLR(bp->nb_flags, NB_EINTR); - return (EINTR); - } else if (ISSET(bp->nb_flags, NB_ERROR)) - return (bp->nb_error ? bp->nb_error : EIO); - return (0); + return EINTR; + } else if (ISSET(bp->nb_flags, NB_ERROR)) { + return bp->nb_error ? bp->nb_error : EIO; + } + return 0; } /* @@ -1191,11 +1259,11 @@ nfs_buf_iowait(struct nfsbuf *bp) void nfs_buf_iodone(struct nfsbuf *bp) { - FSDBG_TOP(550, bp, NBOFF(bp), bp->nb_flags, bp->nb_error); - if (ISSET(bp->nb_flags, NB_DONE)) + if (ISSET(bp->nb_flags, NB_DONE)) { panic("nfs_buf_iodone already"); + } if (!ISSET(bp->nb_flags, NB_READ)) { CLR(bp->nb_flags, NB_WRITEINPROG); @@ -1208,14 +1276,14 @@ nfs_buf_iodone(struct nfsbuf *bp) bp->nb_np->n_numoutput--; nfs_node_unlock(bp->nb_np); } - if (ISSET(bp->nb_flags, NB_ASYNC)) { /* if async, release it */ - SET(bp->nb_flags, NB_DONE); /* note that it's done */ + if (ISSET(bp->nb_flags, NB_ASYNC)) { /* if async, release it */ + SET(bp->nb_flags, NB_DONE); /* note that it's done */ nfs_buf_release(bp, 1); - } else { /* or just wakeup the buffer */ - lck_mtx_lock(nfs_buf_mutex); - SET(bp->nb_flags, NB_DONE); /* note that it's done */ + } else { /* or just wakeup the buffer */ + lck_mtx_lock(nfs_buf_mutex); + SET(bp->nb_flags, NB_DONE); /* note that it's done */ CLR(bp->nb_lflags, NBL_WANTED); - lck_mtx_unlock(nfs_buf_mutex); + lck_mtx_unlock(nfs_buf_mutex); wakeup(bp); } @@ -1241,8 +1309,9 @@ nfs_buf_write_delayed(struct nfsbuf *bp) lck_mtx_lock(nfs_buf_mutex); nfs_nbdwrite++; NFSBUFCNTCHK(); - if (bp->nb_vnbufs.le_next != NFSNOLIST) + if (bp->nb_vnbufs.le_next != NFSNOLIST) { LIST_REMOVE(bp, nb_vnbufs); + } LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); lck_mtx_unlock(nfs_buf_mutex); } @@ -1262,8 +1331,9 @@ nfs_buf_write_delayed(struct nfsbuf *bp) * If we have too many delayed write buffers, * just fall back to doing the async write. */ - if (nfs_nbdwrite < 0) + if (nfs_nbdwrite < 0) { panic("nfs_buf_write_delayed: Negative nfs_nbdwrite"); + } if (nfs_nbdwrite > NFS_A_LOT_OF_DELAYED_WRITES) { /* issue async write */ SET(bp->nb_flags, NB_ASYNC); @@ -1289,14 +1359,17 @@ nfs_buf_check_write_verifier(nfsnode_t np, struct nfsbuf *bp) { struct nfsmount *nmp; - if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + if (!ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { return; + } nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { return; - if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf)) + } + if (!ISSET(bp->nb_flags, NB_STALEWVERF) && (bp->nb_verf == nmp->nm_verf)) { return; + } /* write verifier changed, clear commit/wverf flags */ CLR(bp->nb_flags, (NB_NEEDCOMMIT | NB_STALEWVERF)); @@ -1337,30 +1410,33 @@ nfs_buf_acquire(struct nfsbuf *bp, int flags, int slpflag, int slptimeo) struct timespec ts; if (ISSET(bp->nb_lflags, NBL_BUSY)) { - /* + /* * since the lck_mtx_lock may block, the buffer * may become BUSY, so we need to recheck for * a NOWAIT request */ - if (flags & NBAC_NOWAIT) - return (EBUSY); - SET(bp->nb_lflags, NBL_WANTED); + if (flags & NBAC_NOWAIT) { + return EBUSY; + } + SET(bp->nb_lflags, NBL_WANTED); - ts.tv_sec = (slptimeo/100); + ts.tv_sec = (slptimeo / 100); /* the hz value is 100; which leads to 10ms */ ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; error = msleep(bp, nfs_buf_mutex, slpflag | (PRIBIO + 1), - "nfs_buf_acquire", &ts); - if (error) - return (error); - return (EAGAIN); + "nfs_buf_acquire", &ts); + if (error) { + return error; + } + return EAGAIN; + } + if (flags & NBAC_REMOVE) { + nfs_buf_remfree(bp); } - if (flags & NBAC_REMOVE) - nfs_buf_remfree(bp); SET(bp->nb_lflags, NBL_BUSY); - return (0); + return 0; } /* @@ -1372,17 +1448,19 @@ nfs_buf_drop(struct nfsbuf *bp) { int need_wakeup = 0; - if (!ISSET(bp->nb_lflags, NBL_BUSY)) + if (!ISSET(bp->nb_lflags, NBL_BUSY)) { panic("nfs_buf_drop: buffer not busy!"); + } if (ISSET(bp->nb_lflags, NBL_WANTED)) { - /* delay the actual wakeup until after we clear NBL_BUSY */ + /* delay the actual wakeup until after we clear NBL_BUSY */ need_wakeup = 1; } /* Unlock the buffer. */ CLR(bp->nb_lflags, (NBL_BUSY | NBL_WANTED)); - if (need_wakeup) - wakeup(bp); + if (need_wakeup) { + wakeup(bp); + } } /* @@ -1395,31 +1473,32 @@ nfs_buf_iterprepare(nfsnode_t np, struct nfsbuflists *iterheadp, int flags) { struct nfsbuflists *listheadp; - if (flags & NBI_DIRTY) + if (flags & NBI_DIRTY) { listheadp = &np->n_dirtyblkhd; - else + } else { listheadp = &np->n_cleanblkhd; + } if ((flags & NBI_NOWAIT) && (np->n_bufiterflags & NBI_ITER)) { - LIST_INIT(iterheadp); - return(EWOULDBLOCK); + LIST_INIT(iterheadp); + return EWOULDBLOCK; } - while (np->n_bufiterflags & NBI_ITER) { - np->n_bufiterflags |= NBI_ITERWANT; + while (np->n_bufiterflags & NBI_ITER) { + np->n_bufiterflags |= NBI_ITERWANT; msleep(&np->n_bufiterflags, nfs_buf_mutex, 0, "nfs_buf_iterprepare", NULL); } if (LIST_EMPTY(listheadp)) { - LIST_INIT(iterheadp); - return(EINVAL); + LIST_INIT(iterheadp); + return EINVAL; } np->n_bufiterflags |= NBI_ITER; iterheadp->lh_first = listheadp->lh_first; - listheadp->lh_first->nb_vnbufs.le_prev = &iterheadp->lh_first; + listheadp->lh_first->nb_vnbufs.le_prev = &iterheadp->lh_first; LIST_INIT(listheadp); - return(0); + return 0; } /* @@ -1433,10 +1512,11 @@ nfs_buf_itercomplete(nfsnode_t np, struct nfsbuflists *iterheadp, int flags) struct nfsbuflists * listheadp; struct nfsbuf *bp; - if (flags & NBI_DIRTY) + if (flags & NBI_DIRTY) { listheadp = &np->n_dirtyblkhd; - else + } else { listheadp = &np->n_cleanblkhd; + } while (!LIST_EMPTY(iterheadp)) { bp = LIST_FIRST(iterheadp); @@ -1465,15 +1545,18 @@ nfs_buf_read(struct nfsbuf *bp) np = bp->nb_np; cred = bp->nb_rcred; - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_ref(cred); + } thd = ISSET(bp->nb_flags, NB_ASYNC) ? NULL : current_thread(); /* sanity checks */ - if (!ISSET(bp->nb_flags, NB_READ)) + if (!ISSET(bp->nb_flags, NB_READ)) { panic("nfs_buf_read: !NB_READ"); - if (ISSET(bp->nb_flags, NB_DONE)) + } + if (ISSET(bp->nb_flags, NB_DONE)) { CLR(bp->nb_flags, NB_DONE); + } NFS_BUF_MAP(bp); @@ -1485,9 +1568,10 @@ nfs_buf_read(struct nfsbuf *bp) * read. Otherwise, the read has already been finished. */ - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); - return (error); + } + return error; } /* @@ -1503,7 +1587,7 @@ nfs_buf_read_finish(struct nfsbuf *bp) /* update valid range */ bp->nb_validoff = 0; bp->nb_validend = bp->nb_endio; - if (bp->nb_endio < (int)bp->nb_bufsize) { + if (bp->nb_endio < (int)bp->nb_bufsize) { /* * The read may be short because we have unflushed writes * that are extending the file size and the reads hit the @@ -1513,16 +1597,18 @@ nfs_buf_read_finish(struct nfsbuf *bp) * in nfs_buf_read_rpc_finish(). */ off_t boff = NBOFF(bp); - if ((off_t)np->n_size >= (boff + bp->nb_bufsize)) + if ((off_t)np->n_size >= (boff + bp->nb_bufsize)) { bp->nb_validend = bp->nb_bufsize; - else if ((off_t)np->n_size >= boff) + } else if ((off_t)np->n_size >= boff) { bp->nb_validend = np->n_size - boff; - else + } else { bp->nb_validend = 0; + } } if ((nmp = NFSTONMP(np)) && (nmp->nm_vers == NFS_VER2) && - ((NBOFF(bp) + bp->nb_validend) > 0x100000000LL)) + ((NBOFF(bp) + bp->nb_validend) > 0x100000000LL)) { bp->nb_validend = 0x100000000LL - NBOFF(bp); + } bp->nb_valid = (1 << (round_page_32(bp->nb_validend) / PAGE_SIZE)) - 1; if (bp->nb_validend & PAGE_MASK) { /* zero-fill remainder of last page */ @@ -1552,7 +1638,7 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) bp->nb_error = error = ENXIO; SET(bp->nb_flags, NB_ERROR); nfs_buf_iodone(bp); - return (error); + return error; } nfsvers = nmp->nm_vers; nmrsize = nmp->nm_rsize; @@ -1566,10 +1652,11 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) bp->nb_error = error = EFBIG; SET(bp->nb_flags, NB_ERROR); nfs_buf_iodone(bp); - return (error); + return error; } - if ((boff + length - 1) > 0xffffffffLL) + if ((boff + length - 1) > 0xffffffffLL) { length = 0x100000000LL - boff; + } } /* Note: Can only do async I/O if nfsiods are configured. */ @@ -1593,16 +1680,19 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) len = (length > nmrsize) ? nmrsize : length; cb.rcb_args[0] = offset; cb.rcb_args[1] = len; - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { cb.rcb_args[2] = nmp->nm_stategenid; + } req = NULL; error = nmp->nm_funcs->nf_read_rpc_async(np, boff + offset, len, thd, cred, &cb, &req); - if (error) + if (error) { break; + } offset += len; length -= len; - if (async) + if (async) { continue; + } nfs_buf_read_rpc_finish(req); if (ISSET(bp->nb_flags, NB_ERROR)) { error = bp->nb_error; @@ -1627,9 +1717,10 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) nfs_buf_iodone(bp); } else { /* wait for the last RPC to mark it done */ - while (bp->nb_rpcs > 0) + while (bp->nb_rpcs > 0) { msleep(&bp->nb_rpcs, nfs_buf_mutex, 0, - "nfs_buf_read_rpc_cancel", NULL); + "nfs_buf_read_rpc_cancel", NULL); + } lck_mtx_unlock(nfs_buf_mutex); } } else { @@ -1637,7 +1728,7 @@ nfs_buf_read_rpc(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) } } - return (error); + return error; } /* @@ -1657,18 +1748,20 @@ nfs_buf_read_rpc_finish(struct nfsreq *req) thread_t thd; kauth_cred_t cred; uio_t auio; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; finish: np = req->r_np; thd = req->r_thread; cred = req->r_cred; - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_ref(cred); + } cb = req->r_callback; bp = cb.rcb_bp; - if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */ + if (cb.rcb_func) { /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */ nfs_request_ref(req, 0); + } nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { @@ -1686,24 +1779,26 @@ finish: rlen = length = cb.rcb_args[1]; auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, - UIO_READ, &uio_buf, sizeof(uio_buf)); + UIO_READ, &uio_buf, sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); /* finish the RPC */ error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, auio, &rlen, &eof); if ((error == EINPROGRESS) && cb.rcb_func) { /* async request restarted */ - if (cb.rcb_func) + if (cb.rcb_func) { nfs_request_rele(req); - if (IS_VALID_CRED(cred)) + } + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } return; } if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { lck_mtx_lock(&nmp->nm_lock); if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) { NP(np, "nfs_buf_read_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery", - error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid); + error, NBOFF(bp) + offset, cb.rcb_args[2], nmp->nm_stategenid); nfs_need_recover(nmp, error); } lck_mtx_unlock(&nmp->nm_lock); @@ -1730,13 +1825,14 @@ finish: req->r_start = 0; nfs_asyncio_resend(req); lck_mtx_unlock(&req->r_mtx); - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } /* Note: nfsreq reference taken will be dropped later when finished */ return; } /* otherwise, just pause a couple seconds and retry */ - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); } if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { rlen = 0; @@ -1750,8 +1846,9 @@ finish: goto out; } - if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen))) + if ((rlen > 0) && (bp->nb_endio < (offset + (int)rlen))) { bp->nb_endio = offset + rlen; + } if ((nfsvers == NFS_VER2) || eof || (rlen == 0)) { /* zero out the remaining data (up to EOF) */ @@ -1759,8 +1856,9 @@ finish: rpcrem = (length - rlen); eofrem = np->n_size - (NBOFF(bp) + offset + rlen); rem = (rpcrem < eofrem) ? rpcrem : eofrem; - if (rem > 0) + if (rem > 0) { bzero(bp->nb_data + offset + rlen, rem); + } } else if (((int)rlen < length) && !ISSET(bp->nb_flags, NB_ERROR)) { /* * short read @@ -1774,12 +1872,14 @@ readagain: length -= rlen; cb.rcb_args[0] = offset; cb.rcb_args[1] = length; - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { cb.rcb_args[2] = nmp->nm_stategenid; + } error = nmp->nm_funcs->nf_read_rpc_async(np, NBOFF(bp) + offset, length, thd, cred, &cb, &rreq); if (!error) { - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } if (!cb.rcb_func) { /* if !async we'll need to wait for this RPC to finish */ req = rreq; @@ -1799,10 +1899,12 @@ readagain: } out: - if (cb.rcb_func) + if (cb.rcb_func) { nfs_request_rele(req); - if (IS_VALID_CRED(cred)) + } + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } /* * Decrement outstanding RPC count on buffer @@ -1814,21 +1916,25 @@ out: */ multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC); - if (multasyncrpc) + if (multasyncrpc) { lck_mtx_lock(nfs_buf_mutex); + } bp->nb_rpcs--; finished = (bp->nb_rpcs == 0); - if (multasyncrpc) + if (multasyncrpc) { lck_mtx_unlock(nfs_buf_mutex); + } if (finished) { - if (multasyncrpc) + if (multasyncrpc) { wakeme = &bp->nb_rpcs; + } nfs_buf_read_finish(bp); - if (wakeme) + if (wakeme) { wakeup(wakeme); + } } } @@ -1844,12 +1950,15 @@ nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn int error = 0; uint32_t nra; - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (nmp->nm_readahead <= 0) - return (0); - if (*rabnp > lastrabn) - return (0); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (nmp->nm_readahead <= 0) { + return 0; + } + if (*rabnp > lastrabn) { + return 0; + } for (nra = 0; (nra < nmp->nm_readahead) && (*rabnp <= lastrabn); nra++, *rabnp = *rabnp + 1) { /* check if block exists and is valid. */ @@ -1858,36 +1967,40 @@ nfs_buf_readahead(nfsnode_t np, int ioflag, daddr64_t *rabnp, daddr64_t lastrabn *rabnp = lastrabn; break; } - error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ|NBLK_NOWAIT, &bp); - if (error) + error = nfs_buf_get(np, *rabnp, nmp->nm_biosize, thd, NBLK_READ | NBLK_NOWAIT, &bp); + if (error) { break; + } nfs_node_lock_force(np); np->n_lastrahead = *rabnp; nfs_node_unlock(np); - if (!bp) + if (!bp) { continue; + } if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE) && - !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI|NB_NCRDAHEAD))) { + !bp->nb_dirty && !ISSET(bp->nb_flags, (NB_DELWRI | NB_NCRDAHEAD))) { CLR(bp->nb_flags, NB_CACHE); bp->nb_valid = 0; bp->nb_validoff = bp->nb_validend = -1; } if ((bp->nb_dirtyend <= 0) && !bp->nb_dirty && - !ISSET(bp->nb_flags, (NB_CACHE|NB_DELWRI))) { - SET(bp->nb_flags, (NB_READ|NB_ASYNC)); - if (ioflag & IO_NOCACHE) + !ISSET(bp->nb_flags, (NB_CACHE | NB_DELWRI))) { + SET(bp->nb_flags, (NB_READ | NB_ASYNC)); + if (ioflag & IO_NOCACHE) { SET(bp->nb_flags, NB_NCRDAHEAD); + } if (!IS_VALID_CRED(bp->nb_rcred) && IS_VALID_CRED(cred)) { kauth_cred_ref(cred); bp->nb_rcred = cred; } - if ((error = nfs_buf_read(bp))) + if ((error = nfs_buf_read(bp))) { break; + } continue; } nfs_buf_release(bp, 1); } - return (error); + return error; } /* @@ -1917,14 +2030,14 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) if (vnode_vtype(vp) != VREG) { printf("nfs_bioread: type %x unexpected\n", vnode_vtype(vp)); FSDBG_BOT(514, np, 0xd1e0016, 0, EINVAL); - return (EINVAL); + return EINVAL; } /* * For NFS, cache consistency can only be maintained approximately. * Although RFC1094 does not specify the criteria, the following is * believed to be compatible with the reference port. - * + * * If the file has changed since the last read RPC or you have * written to the file, you may have lost data cache consistency * with the server. So, check for a change, and flush all of the @@ -1935,23 +2048,25 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) * new attributes (via NATTRINVALIDATE() or NGA_UNCACHED). */ - if (ISSET(np->n_flag, NUPDATESIZE)) + if (ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 0); + } if ((error = nfs_node_lock(np))) { FSDBG_BOT(514, np, 0xd1e0222, 0, error); - return (error); + return error; } if (np->n_flag & NNEEDINVALIDATE) { np->n_flag &= ~NNEEDINVALIDATE; nfs_node_unlock(np); - error = nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); - if (!error) + error = nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1); + if (!error) { error = nfs_node_lock(np); + } if (error) { FSDBG_BOT(514, np, 0xd1e0322, 0, error); - return (error); + return error; } } @@ -1961,16 +2076,16 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) error = nfs_getattr(np, NULL, ctx, modified ? NGA_UNCACHED : NGA_CACHED); if (error) { FSDBG_BOT(514, np, 0xd1e0004, 0, error); - return (error); + return error; } if (uio_resid(uio) == 0) { FSDBG_BOT(514, np, 0xd1e0001, 0, 0); - return (0); + return 0; } if (uio_offset(uio) < 0) { FSDBG_BOT(514, np, 0xd1e0002, 0, EINVAL); - return (EINVAL); + return EINVAL; } /* @@ -1981,28 +2096,31 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) */ if (nmp->nm_readahead > 0) { off_t end = uio_offset(uio) + uio_resid(uio); - if (end > (off_t)np->n_size) + if (end > (off_t)np->n_size) { end = np->n_size; + } rabn = uio_offset(uio) / biosize; maxrabn = (end - 1) / biosize; nfs_node_lock_force(np); if (!(ioflag & IO_NOCACHE) && - (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread+1)))) { + (!rabn || (rabn == np->n_lastread) || (rabn == (np->n_lastread + 1)))) { maxrabn += nmp->nm_readahead; - if ((maxrabn * biosize) >= (off_t)np->n_size) - maxrabn = ((off_t)np->n_size - 1)/biosize; + if ((maxrabn * biosize) >= (off_t)np->n_size) { + maxrabn = ((off_t)np->n_size - 1) / biosize; + } } - if (maxrabn < np->n_lastrahead) + if (maxrabn < np->n_lastrahead) { np->n_lastrahead = -1; - if (rabn < np->n_lastrahead) + } + if (rabn < np->n_lastrahead) { rabn = np->n_lastrahead + 1; + } nfs_node_unlock(np); } else { rabn = maxrabn = 0; } do { - nfs_data_lock(np, NFS_DATA_LOCK_SHARED); lbn = uio_offset(uio) / biosize; @@ -2013,24 +2131,25 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) */ if ((!(ioflag & IO_NOCACHE) || !readaheads) && ((uio->uio_segflg == UIO_USERSPACE32 || - uio->uio_segflg == UIO_USERSPACE64 || - uio->uio_segflg == UIO_USERSPACE))) { + uio->uio_segflg == UIO_USERSPACE64 || + uio->uio_segflg == UIO_USERSPACE))) { io_resid = uio_resid(uio); diff = np->n_size - uio_offset(uio); - if (diff < io_resid) + if (diff < io_resid) { io_resid = diff; + } if (io_resid > 0) { int count = (io_resid > INT_MAX) ? INT_MAX : io_resid; error = cluster_copy_ubc_data(vp, uio, &count, 0); if (error) { nfs_data_unlock(np); FSDBG_BOT(514, np, uio_offset(uio), 0xcacefeed, error); - return (error); + return error; } } /* count any biocache reads that we just copied directly */ - if (lbn != (uio_offset(uio)/biosize)) { - OSAddAtomic64((uio_offset(uio)/biosize) - lbn, &nfsstats.biocache_reads); + if (lbn != (uio_offset(uio) / biosize)) { + OSAddAtomic64((uio_offset(uio) / biosize) - lbn, &nfsstats.biocache_reads); FSDBG(514, np, 0xcacefeed, uio_offset(uio), error); } } @@ -2044,19 +2163,20 @@ nfs_bioread(nfsnode_t np, uio_t uio, int ioflag, vfs_context_t ctx) if ((uio_resid(uio) <= 0) || (uio_offset(uio) >= (off_t)np->n_size)) { nfs_data_unlock(np); FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), 0xaaaaaaaa); - return (0); + return 0; } /* adjust readahead block number, if necessary */ - if (rabn < lbn) + if (rabn < lbn) { rabn = lbn; + } lastrabn = MIN(maxrabn, lbn + nmp->nm_readahead); if (rabn <= lastrabn) { /* start readaheads */ error = nfs_buf_readahead(np, ioflag, &rabn, lastrabn, thd, cred); if (error) { nfs_data_unlock(np); FSDBG_BOT(514, np, 0xd1e000b, 1, error); - return (error); + return error; } readaheads = 1; } @@ -2073,14 +2193,15 @@ again: io_resid = uio_resid(uio); n = (io_resid > (biosize - on)) ? (biosize - on) : io_resid; diff = np->n_size - uio_offset(uio); - if (diff < n) + if (diff < n) { n = diff; + } error = nfs_buf_get(np, lbn, biosize, thd, NBLK_READ, &bp); if (error) { nfs_data_unlock(np); FSDBG_BOT(514, np, 0xd1e000c, 0, error); - return (error); + return error; } if ((ioflag & IO_NOCACHE) && ISSET(bp->nb_flags, NB_CACHE)) { @@ -2106,14 +2227,16 @@ again: /* ...check for any invalid pages in the read range */ int pg, firstpg, lastpg, dirtypg; dirtypg = firstpg = lastpg = -1; - pg = on/PAGE_SIZE; - while (pg <= (on + n - 1)/PAGE_SIZE) { - if (!NBPGVALID(bp,pg)) { - if (firstpg < 0) + pg = on / PAGE_SIZE; + while (pg <= (on + n - 1) / PAGE_SIZE) { + if (!NBPGVALID(bp, pg)) { + if (firstpg < 0) { firstpg = pg; + } lastpg = pg; - } else if (firstpg >= 0 && dirtypg < 0 && NBPGDIRTY(bp,pg)) + } else if (firstpg >= 0 && dirtypg < 0 && NBPGDIRTY(bp, pg)) { dirtypg = pg; + } pg++; } @@ -2123,7 +2246,7 @@ again: /* valid range isn't set up, so */ /* set it to what we know is valid */ bp->nb_validoff = trunc_page(on); - bp->nb_validend = round_page(on+n); + bp->nb_validend = round_page(on + n); nfs_buf_normalize_valid_range(np, bp); } goto buffer_ready; @@ -2131,7 +2254,7 @@ again: /* there are invalid pages in the read range */ if (((dirtypg > firstpg) && (dirtypg < lastpg)) || - (((firstpg*PAGE_SIZE) < bp->nb_dirtyend) && (((lastpg+1)*PAGE_SIZE) > bp->nb_dirtyoff))) { + (((firstpg * PAGE_SIZE) < bp->nb_dirtyend) && (((lastpg + 1) * PAGE_SIZE) > bp->nb_dirtyoff))) { /* there are also dirty page(s) (or range) in the read range, */ /* so write the buffer out and try again */ flushbuffer: @@ -2145,41 +2268,42 @@ flushbuffer: if (error) { nfs_data_unlock(np); FSDBG_BOT(514, np, 0xd1e000d, 0, error); - return (error); + return error; } goto again; } if (!bp->nb_dirty && bp->nb_dirtyend <= 0 && - (lastpg - firstpg + 1) > (biosize/PAGE_SIZE)/2) { + (lastpg - firstpg + 1) > (biosize / PAGE_SIZE) / 2) { /* we need to read in more than half the buffer and the */ /* buffer's not dirty, so just fetch the whole buffer */ bp->nb_valid = 0; } else { /* read the page range in */ uio_t auio; - char uio_buf[ UIO_SIZEOF(1) ]; - + char uio_buf[UIO_SIZEOF(1)]; + NFS_BUF_MAP(bp); auio = uio_createwithbuffer(1, (NBOFF(bp) + firstpg * PAGE_SIZE_64), - UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); + UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); if (!auio) { error = ENOMEM; } else { uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + (firstpg * PAGE_SIZE)), - ((lastpg - firstpg + 1) * PAGE_SIZE)); + ((lastpg - firstpg + 1) * PAGE_SIZE)); error = nfs_read_rpc(np, auio, ctx); } if (error) { - if (ioflag & IO_NOCACHE) + if (ioflag & IO_NOCACHE) { SET(bp->nb_flags, NB_NOCACHE); + } nfs_buf_release(bp, 1); nfs_data_unlock(np); FSDBG_BOT(514, np, 0xd1e000e, 0, error); - return (error); + return error; } /* Make sure that the valid range is set to cover this read. */ bp->nb_validoff = trunc_page_32(on); - bp->nb_validend = round_page_32(on+n); + bp->nb_validend = round_page_32(on + n); nfs_buf_normalize_valid_range(np, bp); if (uio_resid(auio) > 0) { /* if short read, must have hit EOF, */ @@ -2187,8 +2311,9 @@ flushbuffer: bzero(CAST_DOWN(caddr_t, uio_curriovbase(auio)), uio_resid(auio)); } /* mark the pages (successfully read) as valid */ - for (pg=firstpg; pg <= lastpg; pg++) - NBPGVALID_SET(bp,pg); + for (pg = firstpg; pg <= lastpg; pg++) { + NBPGVALID_SET(bp, pg); + } } } /* if no pages are valid, read the whole block */ @@ -2200,21 +2325,23 @@ flushbuffer: SET(bp->nb_flags, NB_READ); CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); error = nfs_buf_read(bp); - if (ioflag & IO_NOCACHE) + if (ioflag & IO_NOCACHE) { SET(bp->nb_flags, NB_NOCACHE); + } if (error) { nfs_data_unlock(np); nfs_buf_release(bp, 1); FSDBG_BOT(514, np, 0xd1e000f, 0, error); - return (error); + return error; } } buffer_ready: /* validate read range against valid range and clip */ if (bp->nb_validend > 0) { diff = (on >= bp->nb_validend) ? 0 : (bp->nb_validend - on); - if (diff < n) + if (diff < n) { n = diff; + } } if (n > 0) { NFS_BUF_MAP(bp); @@ -2228,7 +2355,7 @@ buffer_ready: nfs_node_unlock(np); } while (error == 0 && uio_resid(uio) > 0 && n > 0); FSDBG_BOT(514, np, uio_offset(uio), uio_resid(uio), error); - return (error); + return error; } /* @@ -2240,28 +2367,33 @@ nfs_async_write_start(struct nfsmount *nmp) int error = 0, slpflag = NMFLAG(nmp, INTR) ? PCATCH : 0; struct timespec ts = {1, 0}; - if (nfs_max_async_writes <= 0) - return (0); + if (nfs_max_async_writes <= 0) { + return 0; + } lck_mtx_lock(&nmp->nm_lock); while ((nfs_max_async_writes > 0) && (nmp->nm_asyncwrites >= nfs_max_async_writes)) { - if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) + if ((error = nfs_sigintr(nmp, NULL, current_thread(), 1))) { break; - msleep(&nmp->nm_asyncwrites, &nmp->nm_lock, slpflag|(PZERO-1), "nfsasyncwrites", &ts); + } + msleep(&nmp->nm_asyncwrites, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsasyncwrites", &ts); slpflag = 0; } - if (!error) + if (!error) { nmp->nm_asyncwrites++; + } lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } void nfs_async_write_done(struct nfsmount *nmp) { - if (nmp->nm_asyncwrites <= 0) + if (nmp->nm_asyncwrites <= 0) { return; + } lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_asyncwrites-- >= nfs_max_async_writes) + if (nmp->nm_asyncwrites-- >= nfs_max_async_writes) { wakeup(&nmp->nm_asyncwrites); + } lck_mtx_unlock(&nmp->nm_lock); } @@ -2289,14 +2421,15 @@ nfs_buf_write(struct nfsbuf *bp) FSDBG_TOP(553, bp, NBOFF(bp), bp->nb_flags, 0); - if (!ISSET(bp->nb_lflags, NBL_BUSY)) + if (!ISSET(bp->nb_lflags, NBL_BUSY)) { panic("nfs_buf_write: buffer is not busy???"); + } np = bp->nb_np; async = ISSET(bp->nb_flags, NB_ASYNC); oldflags = bp->nb_flags; - CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); + CLR(bp->nb_flags, (NB_READ | NB_DONE | NB_ERROR | NB_DELWRI)); if (ISSET(oldflags, NB_DELWRI)) { lck_mtx_lock(nfs_buf_mutex); nfs_nbdwrite--; @@ -2306,10 +2439,11 @@ nfs_buf_write(struct nfsbuf *bp) } /* move to clean list */ - if (ISSET(oldflags, (NB_ASYNC|NB_DELWRI))) { + if (ISSET(oldflags, (NB_ASYNC | NB_DELWRI))) { lck_mtx_lock(nfs_buf_mutex); - if (bp->nb_vnbufs.le_next != NFSNOLIST) + if (bp->nb_vnbufs.le_next != NFSNOLIST) { LIST_REMOVE(bp, nb_vnbufs); + } LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); lck_mtx_unlock(nfs_buf_mutex); } @@ -2318,14 +2452,17 @@ nfs_buf_write(struct nfsbuf *bp) nfs_node_unlock(np); vnode_startwrite(NFSTOV(np)); - if (p && p->p_stats) + if (p && p->p_stats) { OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); + } cred = bp->nb_wcred; - if (!IS_VALID_CRED(cred) && ISSET(bp->nb_flags, NB_READ)) + if (!IS_VALID_CRED(cred) && ISSET(bp->nb_flags, NB_READ)) { cred = bp->nb_rcred; /* shouldn't really happen, but... */ - if (IS_VALID_CRED(cred)) + } + if (IS_VALID_CRED(cred)) { kauth_cred_ref(cred); + } thd = async ? NULL : current_thread(); /* We need to make sure the pages are locked before doing I/O. */ @@ -2353,8 +2490,9 @@ nfs_buf_write(struct nfsbuf *bp) } /* If NB_NEEDCOMMIT is set, a commit RPC may do the trick. */ - if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { nfs_buf_check_write_verifier(np, bp); + } if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { struct nfsmount *nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { @@ -2365,7 +2503,7 @@ nfs_buf_write(struct nfsbuf *bp) } SET(bp->nb_flags, NB_WRITEINPROG); error = nmp->nm_funcs->nf_commit_rpc(np, NBOFF(bp) + bp->nb_dirtyoff, - bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred, bp->nb_verf); + bp->nb_dirtyend - bp->nb_dirtyoff, bp->nb_wcred, bp->nb_verf); CLR(bp->nb_flags, NB_WRITEINPROG); if (error) { if (error != NFSERR_STALEWRITEVERF) { @@ -2386,8 +2524,9 @@ nfs_buf_write(struct nfsbuf *bp) /* sanity check the dirty range */ if (NBOFF(bp) + bp->nb_dirtyend > (off_t) np->n_size) { bp->nb_dirtyend = np->n_size - NBOFF(bp); - if (bp->nb_dirtyoff >= bp->nb_dirtyend) + if (bp->nb_dirtyoff >= bp->nb_dirtyend) { bp->nb_dirtyoff = bp->nb_dirtyend = 0; + } } } if (!error && (bp->nb_dirtyend > 0)) { @@ -2398,22 +2537,29 @@ nfs_buf_write(struct nfsbuf *bp) dend = bp->nb_dirtyend; /* if doff page is dirty, move doff to start of page */ - if (NBPGDIRTY(bp, doff / PAGE_SIZE)) + if (NBPGDIRTY(bp, doff / PAGE_SIZE)) { doff -= doff & PAGE_MASK; + } /* try to expand write range to include preceding dirty pages */ - if (!(doff & PAGE_MASK)) - while ((doff > 0) && NBPGDIRTY(bp, (doff - 1) / PAGE_SIZE)) + if (!(doff & PAGE_MASK)) { + while ((doff > 0) && NBPGDIRTY(bp, (doff - 1) / PAGE_SIZE)) { doff -= PAGE_SIZE; + } + } /* if dend page is dirty, move dend to start of next page */ - if ((dend & PAGE_MASK) && NBPGDIRTY(bp, dend / PAGE_SIZE)) + if ((dend & PAGE_MASK) && NBPGDIRTY(bp, dend / PAGE_SIZE)) { dend = round_page_32(dend); + } /* try to expand write range to include trailing dirty pages */ - if (!(dend & PAGE_MASK)) - while ((dend < (int)bp->nb_bufsize) && NBPGDIRTY(bp, dend / PAGE_SIZE)) + if (!(dend & PAGE_MASK)) { + while ((dend < (int)bp->nb_bufsize) && NBPGDIRTY(bp, dend / PAGE_SIZE)) { dend += PAGE_SIZE; + } + } /* make sure to keep dend clipped to EOF */ - if ((NBOFF(bp) + dend) > (off_t) np->n_size) + if ((NBOFF(bp) + dend) > (off_t) np->n_size) { dend = np->n_size - NBOFF(bp); + } /* calculate range of complete pages being written */ firstpg = round_page_32(doff) / PAGE_SIZE; lastpg = (trunc_page_32(dend) - 1) / PAGE_SIZE; @@ -2425,12 +2571,13 @@ nfs_buf_write(struct nfsbuf *bp) * then write FILESYNC; otherwise, write UNSTABLE if async and * not needcommit/stable; otherwise write FILESYNC */ - if (bp->nb_dirty & ~pagemask) + if (bp->nb_dirty & ~pagemask) { iomode = NFS_WRITE_FILESYNC; - else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_STABLE)) == NB_ASYNC) + } else if ((bp->nb_flags & (NB_ASYNC | NB_NEEDCOMMIT | NB_STABLE)) == NB_ASYNC) { iomode = NFS_WRITE_UNSTABLE; - else + } else { iomode = NFS_WRITE_FILESYNC; + } /* write the whole contiguous dirty range */ bp->nb_offio = doff; @@ -2447,8 +2594,9 @@ nfs_buf_write(struct nfsbuf *bp) * pages pushed out. */ } else { - if (!error && bp->nb_dirty) /* write out any dirty pages */ + if (!error && bp->nb_dirty) { /* write out any dirty pages */ error = nfs_buf_write_dirty_pages(bp, thd, cred); + } nfs_buf_iodone(bp); } /* note: bp is still valid only for !async case */ @@ -2458,8 +2606,9 @@ out: /* move to clean list */ if (oldflags & NB_DELWRI) { lck_mtx_lock(nfs_buf_mutex); - if (bp->nb_vnbufs.le_next != NFSNOLIST) + if (bp->nb_vnbufs.le_next != NFSNOLIST) { LIST_REMOVE(bp, nb_vnbufs); + } LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); lck_mtx_unlock(nfs_buf_mutex); } @@ -2467,7 +2616,7 @@ out: nfs_buf_release(bp, 1); /* check if we need to invalidate (and we can) */ if ((np->n_flag & NNEEDINVALIDATE) && - !(np->n_bflag & (NBINVALINPROG|NBFLUSHINPROG))) { + !(np->n_bflag & (NBINVALINPROG | NBFLUSHINPROG))) { int invalidate = 0; nfs_node_lock_force(np); if (np->n_flag & NNEEDINVALIDATE) { @@ -2487,14 +2636,15 @@ out: * the buffer busy. So we call vinvalbuf() after * releasing the buffer. */ - nfs_vinvalbuf2(NFSTOV(np), V_SAVE|V_IGNORE_WRITEERR, thd, cred, 1); + nfs_vinvalbuf2(NFSTOV(np), V_SAVE | V_IGNORE_WRITEERR, thd, cred, 1); } } } - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); - return (error); + } + return error; } /* @@ -2569,8 +2719,9 @@ nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) if (ISSET(bp->nb_flags, NB_ASYNC)) { /* move to dirty list */ lck_mtx_lock(nfs_buf_mutex); - if (bp->nb_vnbufs.le_next != NFSNOLIST) + if (bp->nb_vnbufs.le_next != NFSNOLIST) { LIST_REMOVE(bp, nb_vnbufs); + } LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); lck_mtx_unlock(nfs_buf_mutex); } @@ -2597,8 +2748,9 @@ nfs_buf_write_finish(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) bp->nb_dirtyoff = bp->nb_dirtyend = 0; } - if (!error && bp->nb_dirty) + if (!error && bp->nb_dirty) { nfs_buf_write_dirty_pages(bp, thd, cred); + } nfs_buf_iodone(bp); } @@ -2618,10 +2770,11 @@ nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) uint32_t dirty = bp->nb_dirty; uint64_t wverf; uio_t auio; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (!bp->nb_dirty) - return (0); + if (!bp->nb_dirty) { + return 0; + } /* there are pages marked dirty that need to be written out */ OSAddAtomic64(1, &nfsstats.write_bios); @@ -2631,33 +2784,38 @@ nfs_buf_write_dirty_pages(struct nfsbuf *bp, thread_t thd, kauth_cred_t cred) iomode = NFS_WRITE_UNSTABLE; auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, - &uio_buf, sizeof(uio_buf)); + &uio_buf, sizeof(uio_buf)); again: dirty = bp->nb_dirty; wverf = bp->nb_verf; commit = NFS_WRITE_FILESYNC; for (pg = 0; pg < npages; pg++) { - if (!NBPGDIRTY(bp, pg)) + if (!NBPGDIRTY(bp, pg)) { continue; + } count = 1; - while (((pg + count) < npages) && NBPGDIRTY(bp, pg + count)) + while (((pg + count) < npages) && NBPGDIRTY(bp, pg + count)) { count++; + } /* write count pages starting with page pg */ off = pg * PAGE_SIZE; len = count * PAGE_SIZE; /* clip writes to EOF */ - if (NBOFF(bp) + off + len > (off_t) np->n_size) + if (NBOFF(bp) + off + len > (off_t) np->n_size) { len -= (NBOFF(bp) + off + len) - np->n_size; + } if (len > 0) { iomode2 = iomode; uio_reset(auio, NBOFF(bp) + off, UIO_SYSSPACE, UIO_WRITE); uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + off), len); error = nfs_write_rpc2(np, auio, thd, cred, &iomode2, &bp->nb_verf); - if (error) + if (error) { break; - if (iomode2 < commit) /* Retain the lowest commitment level returned. */ + } + if (iomode2 < commit) { /* Retain the lowest commitment level returned. */ commit = iomode2; + } if ((commit != NFS_WRITE_FILESYNC) && (wverf != bp->nb_verf)) { /* verifier changed, redo all the writes filesync */ iomode = NFS_WRITE_FILESYNC; @@ -2667,8 +2825,9 @@ again: /* clear dirty bits */ while (count--) { dirty &= ~(1 << pg); - if (count) /* leave pg on last page */ + if (count) { /* leave pg on last page */ pg++; + } } } CLR(bp->nb_flags, NB_WRITEINPROG); @@ -2687,7 +2846,7 @@ again: SET(bp->nb_flags, NB_ERROR); bp->nb_error = error; } - return (error); + return error; } /* @@ -2704,14 +2863,14 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred struct nfsreq *req; struct nfsreq_cbinfo cb; uio_t auio; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { bp->nb_error = error = ENXIO; SET(bp->nb_flags, NB_ERROR); nfs_buf_iodone(bp); - return (error); + return error; } nfsvers = nmp->nm_vers; nmwsize = nmp->nm_wsize; @@ -2729,11 +2888,11 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred bp->nb_error = error = EFBIG; SET(bp->nb_flags, NB_ERROR); nfs_buf_iodone(bp); - return (error); + return error; } auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, - UIO_WRITE, &uio_buf, sizeof(uio_buf)); + UIO_WRITE, &uio_buf, sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); bp->nb_rpcs = nrpcs = (length + nmwsize - 1) / nmwsize; @@ -2751,22 +2910,26 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred len = (length > nmwsize) ? nmwsize : length; cb.rcb_args[0] = offset; cb.rcb_args[1] = len; - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { cb.rcb_args[2] = nmp->nm_stategenid; - if (async && ((error = nfs_async_write_start(nmp)))) + } + if (async && ((error = nfs_async_write_start(nmp)))) { break; + } req = NULL; error = nmp->nm_funcs->nf_write_rpc_async(np, auio, len, thd, cred, - iomode, &cb, &req); + iomode, &cb, &req); if (error) { - if (async) + if (async) { nfs_async_write_done(nmp); + } break; } offset += len; length -= len; - if (async) + if (async) { continue; + } nfs_buf_write_rpc_finish(req); } @@ -2787,20 +2950,22 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred nfs_buf_write_finish(bp, thd, cred); } else { /* wait for the last RPC to mark it done */ - while (bp->nb_rpcs > 0) + while (bp->nb_rpcs > 0) { msleep(&bp->nb_rpcs, nfs_buf_mutex, 0, - "nfs_buf_write_rpc_cancel", NULL); + "nfs_buf_write_rpc_cancel", NULL); + } lck_mtx_unlock(nfs_buf_mutex); } } else { nfs_buf_write_finish(bp, thd, cred); } /* It may have just been an interrupt... that's OK */ - if (!ISSET(bp->nb_flags, NB_ERROR)) + if (!ISSET(bp->nb_flags, NB_ERROR)) { error = 0; + } } - return (error); + return error; } /* @@ -2822,18 +2987,20 @@ nfs_buf_write_rpc_finish(struct nfsreq *req) thread_t thd; kauth_cred_t cred; uio_t auio; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; finish: np = req->r_np; thd = req->r_thread; cred = req->r_cred; - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_ref(cred); + } cb = req->r_callback; bp = cb.rcb_bp; - if (cb.rcb_func) /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */ + if (cb.rcb_func) { /* take an extra reference on the nfsreq in case we want to resend it later due to grace error */ nfs_request_ref(req, 0); + } nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { @@ -2854,17 +3021,19 @@ finish: error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &committed, &rlen, &wverf); if ((error == EINPROGRESS) && cb.rcb_func) { /* async request restarted */ - if (cb.rcb_func) + if (cb.rcb_func) { nfs_request_rele(req); - if (IS_VALID_CRED(cred)) + } + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } return; } if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && !ISSET(bp->nb_flags, NB_ERROR)) { lck_mtx_lock(&nmp->nm_lock); if ((error != NFSERR_OLD_STATEID) && (error != NFSERR_GRACE) && (cb.rcb_args[2] == nmp->nm_stategenid)) { NP(np, "nfs_buf_write_rpc_finish: error %d @ 0x%llx, 0x%x 0x%x, initiating recovery", - error, NBOFF(bp)+offset, cb.rcb_args[2], nmp->nm_stategenid); + error, NBOFF(bp) + offset, cb.rcb_args[2], nmp->nm_stategenid); nfs_need_recover(nmp, error); } lck_mtx_unlock(&nmp->nm_lock); @@ -2891,13 +3060,14 @@ finish: req->r_start = 0; nfs_asyncio_resend(req); lck_mtx_unlock(&req->r_mtx); - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } /* Note: nfsreq reference taken will be dropped later when finished */ return; } /* otherwise, just pause a couple seconds and retry */ - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); } if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { rlen = 0; @@ -2909,8 +3079,9 @@ finish: SET(bp->nb_flags, NB_ERROR); bp->nb_error = error; } - if (error || (nfsvers == NFS_VER2)) + if (error || (nfsvers == NFS_VER2)) { goto out; + } if (rlen <= 0) { SET(bp->nb_flags, NB_ERROR); bp->nb_error = error = EIO; @@ -2918,8 +3089,9 @@ finish: } /* save lowest commit level returned */ - if (committed < bp->nb_commitlevel) + if (committed < bp->nb_commitlevel) { bp->nb_commitlevel = committed; + } /* check the write verifier */ if (!bp->nb_verf) { @@ -2938,26 +3110,28 @@ finish: * need to issue another write for the rest of it. * (Don't bother if the buffer hit an error or stale wverf.) */ - if (((int)rlen < length) && !(bp->nb_flags & (NB_STALEWVERF|NB_ERROR))) { + if (((int)rlen < length) && !(bp->nb_flags & (NB_STALEWVERF | NB_ERROR))) { writeagain: offset += rlen; length -= rlen; auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, - UIO_WRITE, &uio_buf, sizeof(uio_buf)); + UIO_WRITE, &uio_buf, sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); cb.rcb_args[0] = offset; cb.rcb_args[1] = length; - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { cb.rcb_args[2] = nmp->nm_stategenid; + } // XXX iomode should really match the original request error = nmp->nm_funcs->nf_write_rpc_async(np, auio, length, thd, cred, - NFS_WRITE_FILESYNC, &cb, &wreq); + NFS_WRITE_FILESYNC, &cb, &wreq); if (!error) { - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } if (!cb.rcb_func) { /* if !async we'll need to wait for this RPC to finish */ req = wreq; @@ -2990,29 +3164,34 @@ out: * aborting a partially-initiated set of RPCs) */ multasyncrpc = ISSET(bp->nb_flags, NB_MULTASYNCRPC); - if (multasyncrpc) + if (multasyncrpc) { lck_mtx_lock(nfs_buf_mutex); + } bp->nb_rpcs--; finished = (bp->nb_rpcs == 0); - if (multasyncrpc) + if (multasyncrpc) { lck_mtx_unlock(nfs_buf_mutex); + } if (finished) { - if (multasyncrpc) + if (multasyncrpc) { wakeme = &bp->nb_rpcs; + } nfs_buf_write_finish(bp, thd, cred); - if (wakeme) + if (wakeme) { wakeup(wakeme); + } } - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); + } } /* - * Send commit(s) for the given node's "needcommit" buffers + * Send commit(s) for the given node's "needcommit" buffers */ int nfs_flushcommits(nfsnode_t np, int nowait) @@ -3036,8 +3215,9 @@ nfs_flushcommits(nfsnode_t np, int nowait) */ if (!LIST_EMPTY(&np->n_dirtyblkhd)) { error = nfs_node_lock(np); - if (error) + if (error) { goto done; + } np->n_flag |= NMODIFIED; nfs_node_unlock(np); } @@ -3058,8 +3238,9 @@ nfs_flushcommits(nfsnode_t np, int nowait) } flags = NBI_DIRTY; - if (nowait) + if (nowait) { flags |= NBI_NOWAIT; + } lck_mtx_lock(nfs_buf_mutex); wverf = nmp->nm_verf; if (!nfs_buf_iterprepare(np, &blist, flags)) { @@ -3067,10 +3248,12 @@ nfs_flushcommits(nfsnode_t np, int nowait) LIST_REMOVE(bp, nb_vnbufs); LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); error = nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0); - if (error) + if (error) { continue; - if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + } + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { nfs_buf_check_write_verifier(np, bp); + } if (((bp->nb_flags & (NB_DELWRI | NB_NEEDCOMMIT)) != (NB_DELWRI | NB_NEEDCOMMIT)) || (bp->nb_verf != wverf)) { nfs_buf_drop(bp); @@ -3082,7 +3265,7 @@ nfs_flushcommits(nfsnode_t np, int nowait) FSDBG(557, bp, bp->nb_flags, bp->nb_valid, bp->nb_dirty); FSDBG(557, bp->nb_validoff, bp->nb_validend, - bp->nb_dirtyoff, bp->nb_dirtyend); + bp->nb_dirtyoff, bp->nb_dirtyend); /* * Work out if all buffers are using the same cred @@ -3093,8 +3276,9 @@ nfs_flushcommits(nfsnode_t np, int nowait) */ if (wcred_set == 0) { wcred = bp->nb_wcred; - if (!IS_VALID_CRED(wcred)) + if (!IS_VALID_CRED(wcred)) { panic("nfs: needcommit w/out wcred"); + } wcred_set = 1; } else if ((wcred_set == 1) && wcred != bp->nb_wcred) { wcred_set = -1; @@ -3108,23 +3292,27 @@ nfs_flushcommits(nfsnode_t np, int nowait) */ prevlbp = NULL; LIST_FOREACH(lbp, &commitlist, nb_vnbufs) { - if (bp->nb_lblkno < lbp->nb_lblkno) + if (bp->nb_lblkno < lbp->nb_lblkno) { break; + } prevlbp = lbp; } LIST_REMOVE(bp, nb_vnbufs); - if (prevlbp) + if (prevlbp) { LIST_INSERT_AFTER(prevlbp, bp, nb_vnbufs); - else + } else { LIST_INSERT_HEAD(&commitlist, bp, nb_vnbufs); + } /* update commit range start, end */ toff = NBOFF(bp) + bp->nb_dirtyoff; - if (toff < off) + if (toff < off) { off = toff; + } toff += (u_quad_t)(bp->nb_dirtyend - bp->nb_dirtyoff); - if (toff > endoff) + if (toff > endoff) { endoff = toff; + } } nfs_buf_itercomplete(np, &blist, NBI_DIRTY); } @@ -3168,10 +3356,11 @@ nfs_flushcommits(nfsnode_t np, int nowait) * Note, it's possible the commit range could be >2^32-1. * If it is, we'll send one commit that covers the whole file. */ - if ((endoff - off) > 0xffffffff) + if ((endoff - off) > 0xffffffff) { count = 0; - else + } else { count = (endoff - off); + } retv = nmp->nm_funcs->nf_commit_rpc(np, off, count, wcred, wverf); } else { retv = 0; @@ -3179,8 +3368,9 @@ nfs_flushcommits(nfsnode_t np, int nowait) toff = NBOFF(bp) + bp->nb_dirtyoff; count = bp->nb_dirtyend - bp->nb_dirtyoff; retv = nmp->nm_funcs->nf_commit_rpc(np, toff, count, bp->nb_wcred, wverf); - if (retv) + if (retv) { break; + } } } @@ -3218,13 +3408,14 @@ nfs_flushcommits(nfsnode_t np, int nowait) lck_mtx_unlock(nfs_buf_mutex); wakeup(&nfs_nbdwrite); } - CLR(bp->nb_flags, (NB_READ|NB_DONE|NB_ERROR|NB_DELWRI)); + CLR(bp->nb_flags, (NB_READ | NB_DONE | NB_ERROR | NB_DELWRI)); /* if block still has dirty pages, we don't want it to */ /* be released in nfs_buf_iodone(). So, don't set NB_ASYNC. */ - if (!(dirty = bp->nb_dirty)) + if (!(dirty = bp->nb_dirty)) { SET(bp->nb_flags, NB_ASYNC); - else + } else { CLR(bp->nb_flags, NB_ASYNC); + } /* move to clean list */ lck_mtx_lock(nfs_buf_mutex); @@ -3243,12 +3434,12 @@ nfs_flushcommits(nfsnode_t np, int nowait) done: FSDBG_BOT(557, np, 0, 0, error); - return (error); + return error; } /* * Flush all the blocks associated with a vnode. - * Walk through the buffer pool and push any dirty pages + * Walk through the buffer pool and push any dirty pages * associated with the vnode. */ int @@ -3267,8 +3458,9 @@ nfs_flush(nfsnode_t np, int waitfor, thread_t thd, int ignore_writeerr) goto out; } nfsvers = nmp->nm_vers; - if (NMFLAG(nmp, INTR)) + if (NMFLAG(nmp, INTR)) { slpflag = PCATCH; + } if (!LIST_EMPTY(&np->n_dirtyblkhd)) { nfs_node_lock_force(np); @@ -3309,17 +3501,20 @@ again: LIST_REMOVE(bp, nb_vnbufs); LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); flags = (passone || !(waitfor == MNT_WAIT || waitfor == MNT_DWAIT)) ? NBAC_NOWAIT : 0; - if (flags != NBAC_NOWAIT) + if (flags != NBAC_NOWAIT) { nfs_buf_refget(bp); + } while ((error = nfs_buf_acquire(bp, flags, slpflag, slptimeo))) { FSDBG(524, bp, flags, bp->nb_lflags, bp->nb_flags); - if (error == EBUSY) + if (error == EBUSY) { break; + } if (error) { error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); if (error2) { - if (flags != NBAC_NOWAIT) + if (flags != NBAC_NOWAIT) { nfs_buf_refrele(bp); + } nfs_buf_itercomplete(np, &blist, NBI_DIRTY); lck_mtx_unlock(nfs_buf_mutex); error = error2; @@ -3331,17 +3526,20 @@ again: } } } - if (flags != NBAC_NOWAIT) + if (flags != NBAC_NOWAIT) { nfs_buf_refrele(bp); - if (error == EBUSY) + } + if (error == EBUSY) { continue; + } if (!bp->nb_np) { /* buffer is no longer valid */ nfs_buf_drop(bp); continue; } - if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) + if (ISSET(bp->nb_flags, NB_NEEDCOMMIT)) { nfs_buf_check_write_verifier(np, bp); + } if (!ISSET(bp->nb_flags, NB_DELWRI)) { /* buffer is no longer dirty */ nfs_buf_drop(bp); @@ -3377,10 +3575,10 @@ again: lck_mtx_unlock(nfs_buf_mutex); if (waitfor == MNT_WAIT || waitfor == MNT_DWAIT) { - while ((error = vnode_waitforwrites(NFSTOV(np), 0, slpflag, slptimeo, "nfsflush"))) { - error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); + while ((error = vnode_waitforwrites(NFSTOV(np), 0, slpflag, slptimeo, "nfsflush"))) { + error2 = nfs_sigintr(NFSTONMP(np), NULL, thd, 0); if (error2) { - error = error2; + error = error2; goto done; } if (slpflag == PCATCH) { @@ -3393,9 +3591,11 @@ again: if (nfsvers != NFS_VER2) { /* loop while it looks like there are still buffers to be */ /* commited and nfs_flushcommits() seems to be handling them. */ - while (np->n_needcommitcnt) - if (nfs_flushcommits(np, 0)) + while (np->n_needcommitcnt) { + if (nfs_flushcommits(np, 0)) { break; + } + } } if (passone) { @@ -3416,8 +3616,9 @@ again: nfs_node_unlock(np); } lck_mtx_lock(nfs_buf_mutex); - if (!LIST_EMPTY(&np->n_dirtyblkhd)) + if (!LIST_EMPTY(&np->n_dirtyblkhd)) { goto again; + } lck_mtx_unlock(nfs_buf_mutex); nfs_node_lock_force(np); /* @@ -3451,13 +3652,14 @@ again: done: lck_mtx_lock(nfs_buf_mutex); flags = np->n_bflag; - np->n_bflag &= ~(NBFLUSHINPROG|NBFLUSHWANT); + np->n_bflag &= ~(NBFLUSHINPROG | NBFLUSHWANT); lck_mtx_unlock(nfs_buf_mutex); - if (flags & NBFLUSHWANT) + if (flags & NBFLUSHWANT) { wakeup(&np->n_bflag); + } out: FSDBG_BOT(517, np, error, ignore_writeerr, 0); - return (error); + return error; } /* @@ -3478,8 +3680,9 @@ nfs_vinvalbuf_internal( int list, error = 0; if (flags & V_SAVE) { - if ((error = nfs_flush(np, MNT_WAIT, thd, (flags & V_IGNORE_WRITEERR)))) - return (error); + if ((error = nfs_flush(np, MNT_WAIT, thd, (flags & V_IGNORE_WRITEERR)))) { + return error; + } } lck_mtx_lock(nfs_buf_mutex); @@ -3487,15 +3690,17 @@ nfs_vinvalbuf_internal( list = NBI_CLEAN; if (nfs_buf_iterprepare(np, &blist, list)) { list = NBI_DIRTY; - if (nfs_buf_iterprepare(np, &blist, list)) + if (nfs_buf_iterprepare(np, &blist, list)) { break; + } } while ((bp = LIST_FIRST(&blist))) { LIST_REMOVE(bp, nb_vnbufs); - if (list == NBI_CLEAN) + if (list == NBI_CLEAN) { LIST_INSERT_HEAD(&np->n_cleanblkhd, bp, nb_vnbufs); - else + } else { LIST_INSERT_HEAD(&np->n_dirtyblkhd, bp, nb_vnbufs); + } nfs_buf_refget(bp); while ((error = nfs_buf_acquire(bp, NBAC_REMOVE, slpflag, slptimeo))) { FSDBG(556, np, bp, NBOFF(bp), bp->nb_flags); @@ -3504,7 +3709,7 @@ nfs_vinvalbuf_internal( nfs_buf_refrele(bp); nfs_buf_itercomplete(np, &blist, list); lck_mtx_unlock(nfs_buf_mutex); - return (error); + return error; } } nfs_buf_refrele(bp); @@ -3523,8 +3728,9 @@ nfs_vinvalbuf_internal( /* vm object must no longer exist */ /* hopefully we don't need to do */ /* anything for this buffer */ - } else if (error) + } else if (error) { printf("nfs_vinvalbuf: upl setup failed %d\n", error); + } bp->nb_valid = bp->nb_dirty = 0; } nfs_buf_upl_check(bp); @@ -3533,15 +3739,18 @@ nfs_vinvalbuf_internal( /* clip dirty range to EOF */ if (bp->nb_dirtyend > end) { bp->nb_dirtyend = end; - if (bp->nb_dirtyoff >= bp->nb_dirtyend) + if (bp->nb_dirtyoff >= bp->nb_dirtyend) { bp->nb_dirtyoff = bp->nb_dirtyend = 0; + } } - if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) + if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < end)) { mustwrite++; + } } - bp->nb_dirty &= (1 << (round_page_32(end)/PAGE_SIZE)) - 1; - if (bp->nb_dirty) + bp->nb_dirty &= (1 << (round_page_32(end) / PAGE_SIZE)) - 1; + if (bp->nb_dirty) { mustwrite++; + } /* also make sure we'll have a credential to do the write */ if (mustwrite && !IS_VALID_CRED(bp->nb_wcred) && !IS_VALID_CRED(cred)) { printf("nfs_vinvalbuf: found dirty buffer with no write creds\n"); @@ -3549,8 +3758,9 @@ nfs_vinvalbuf_internal( } if (mustwrite) { FSDBG(554, np, bp, 0xd00dee, bp->nb_flags); - if (!ISSET(bp->nb_flags, NB_PAGELIST)) + if (!ISSET(bp->nb_flags, NB_PAGELIST)) { panic("nfs_vinvalbuf: dirty buffer without upl"); + } /* gotta write out dirty data before invalidating */ /* (NB_STABLE indicates that data writes should be FILESYNC) */ /* (NB_NOCACHE indicates buffer should be discarded) */ @@ -3586,7 +3796,7 @@ nfs_vinvalbuf_internal( lck_mtx_lock(nfs_buf_mutex); nfs_buf_itercomplete(np, &blist, list); lck_mtx_unlock(nfs_buf_mutex); - return (error); + return error; } error = 0; } @@ -3601,17 +3811,20 @@ nfs_vinvalbuf_internal( } nfs_buf_itercomplete(np, &blist, list); } - if (!LIST_EMPTY(&(np)->n_dirtyblkhd) || !LIST_EMPTY(&(np)->n_cleanblkhd)) + if (!LIST_EMPTY(&(np)->n_dirtyblkhd) || !LIST_EMPTY(&(np)->n_cleanblkhd)) { panic("nfs_vinvalbuf: flush/inval failed"); + } lck_mtx_unlock(nfs_buf_mutex); nfs_node_lock_force(np); - if (!(flags & V_SAVE)) + if (!(flags & V_SAVE)) { np->n_flag &= ~NMODIFIED; - if (vnode_vtype(NFSTOV(np)) == VREG) + } + if (vnode_vtype(NFSTOV(np)) == VREG) { np->n_lastrahead = -1; + } nfs_node_unlock(np); NFS_BUF_FREEUP(); - return (0); + return 0; } @@ -3645,9 +3858,10 @@ nfs_vinvalbuf2(vnode_t vp, int flags, thread_t thd, kauth_cred_t cred, int intrf flags &= ~V_SAVE; ubcflags &= ~UBC_PUSHALL; } - - if (nmp && !NMFLAG(nmp, INTR)) + + if (nmp && !NMFLAG(nmp, INTR)) { intrflg = 0; + } if (intrflg) { slpflag = PCATCH; slptimeo = 2 * hz; @@ -3663,10 +3877,11 @@ nfs_vinvalbuf2(vnode_t vp, int flags, thread_t thd, kauth_cred_t cred, int intrf msleep(&np->n_bflag, nfs_buf_mutex, slpflag, "nfs_vinvalbuf", &ts); if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { lck_mtx_unlock(nfs_buf_mutex); - return (error); + return error; } - if (np->n_bflag & NBINVALINPROG) + if (np->n_bflag & NBINVALINPROG) { slpflag = 0; + } } np->n_bflag |= NBINVALINPROG; lck_mtx_unlock(nfs_buf_mutex); @@ -3676,34 +3891,39 @@ again: error = nfs_vinvalbuf_internal(np, flags, thd, cred, slpflag, 0); while (error) { FSDBG(554, np, 0, 0, error); - if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) + if ((error = nfs_sigintr(VTONMP(vp), NULL, thd, 0))) { goto done; + } error = nfs_vinvalbuf_internal(np, flags, thd, cred, 0, slptimeo); } /* get the pages out of vm also */ - if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) + if (UBCINFOEXISTS(vp) && (size = ubc_getsize(vp))) { if ((error = ubc_msync(vp, 0, size, NULL, ubcflags))) { - if (error == EINVAL) + if (error == EINVAL) { panic("nfs_vinvalbuf(): ubc_msync failed!, error %d", error); + } if (retry++ < 10) { /* retry invalidating a few times */ - if (retry > 1 || error == ENXIO) + if (retry > 1 || error == ENXIO) { ubcflags &= ~UBC_PUSHALL; + } goto again; } /* give up */ printf("nfs_vinvalbuf(): ubc_msync failed!, error %d\n", error); } + } done: lck_mtx_lock(nfs_buf_mutex); nflags = np->n_bflag; - np->n_bflag &= ~(NBINVALINPROG|NBINVALWANT); + np->n_bflag &= ~(NBINVALINPROG | NBINVALWANT); lck_mtx_unlock(nfs_buf_mutex); - if (nflags & NBINVALWANT) + if (nflags & NBINVALWANT) { wakeup(&np->n_bflag); + } FSDBG_BOT(554, np, flags, intrflg, error); - return (error); + return error; } /* @@ -3772,8 +3992,9 @@ nfs_asyncio_finish(struct nfsreq *req) again: nmp = req->r_nmp; - if (nmp == NULL) + if (nmp == NULL) { return; + } lck_mtx_lock(nfsiod_mutex); niod = nmp->nm_niod; @@ -3793,8 +4014,9 @@ again: */ lck_mtx_unlock(nfsiod_mutex); started++; - if (!nfsiod_start()) + if (!nfsiod_start()) { goto again; + } lck_mtx_lock(nfsiod_mutex); } } @@ -3821,8 +4043,9 @@ again: } lck_mtx_unlock(&req->r_mtx); - if (req->r_achain.tqe_next == NFSREQNOLIST) + if (req->r_achain.tqe_next == NFSREQNOLIST) { TAILQ_INSERT_TAIL(&nmp->nm_iodq, req, r_achain); + } /* If this mount doesn't already have an nfsiod working on it... */ if (!nmp->nm_niod) { @@ -3832,8 +4055,9 @@ again: wakeup(niod); } else if (nfsiod_thread_count > 0) { /* just queue it up on nfsiod mounts queue if needed */ - if (nmp->nm_iodlink.tqe_next == NFSNOLIST) + if (nmp->nm_iodlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink); + } lck_mtx_unlock(nfsiod_mutex); } else { printf("nfs_asyncio(): no nfsiods? %d %d (%d)\n", nfsiod_thread_count, NFSIOD_MAX, started); @@ -3857,8 +4081,9 @@ nfs_asyncio_resend(struct nfsreq *req) { struct nfsmount *nmp = req->r_nmp; - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { return; + } nfs_gss_clnt_rpcdone(req); lck_mtx_lock(&nmp->nm_lock); @@ -3888,17 +4113,19 @@ nfs_buf_readdir(struct nfsbuf *bp, vfs_context_t ctx) struct nfsmount *nmp = NFSTONMP(np); int error = 0; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } - if (nmp->nm_vers < NFS_VER4) + if (nmp->nm_vers < NFS_VER4) { error = nfs3_readdir_rpc(np, bp, ctx); - else + } else { error = nfs4_readdir_rpc(np, bp, ctx); + } if (error && (error != NFSERR_DIRBUFDROPPED)) { SET(bp->nb_flags, NB_ERROR); bp->nb_error = error; } - return (error); + return error; } diff --git a/bsd/nfs/nfs_boot.c b/bsd/nfs/nfs_boot.c index 56d3be664..67d3d5ef4 100644 --- a/bsd/nfs/nfs_boot.c +++ b/bsd/nfs/nfs_boot.c @@ -127,12 +127,14 @@ #if NETHER == 0 -int nfs_boot_init(__unused struct nfs_diskless *nd) +int +nfs_boot_init(__unused struct nfs_diskless *nd) { panic("nfs_boot_init: no ether"); } -int nfs_boot_getfh(__unused struct nfs_diskless *nd, __unused int v3, __unused int sotype) +int +nfs_boot_getfh(__unused struct nfs_diskless *nd, __unused int v3, __unused int sotype) { panic("nfs_boot_getfh: no ether"); } @@ -161,21 +163,21 @@ int nfs_boot_getfh(__unused struct nfs_diskless *nd, __unused int v3, __unused i /* bootparam RPC */ static int bp_whoami(struct sockaddr_in *bpsin, - struct in_addr *my_ip, struct in_addr *gw_ip); + struct in_addr *my_ip, struct in_addr *gw_ip); static int bp_getfile(struct sockaddr_in *bpsin, const char *key, - struct sockaddr_in *mdsin, char *servname, char *path); + struct sockaddr_in *mdsin, char *servname, char *path); /* mountd RPC */ static int md_mount(struct sockaddr_in *mdsin, char *path, int v3, int sotype, - u_char *fhp, u_int32_t *fhlenp); + u_char *fhp, u_int32_t *fhlenp); /* other helpers */ static int get_file_handle(struct nfs_dlmount *ndmntp); -#define IP_FORMAT "%d.%d.%d.%d" -#define IP_CH(ip) ((u_char *)ip) -#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] +#define IP_FORMAT "%d.%d.%d.%d" +#define IP_CH(ip) ((u_char *)ip) +#define IP_LIST(ip) IP_CH(ip)[0],IP_CH(ip)[1],IP_CH(ip)[2],IP_CH(ip)[3] #include @@ -185,46 +187,47 @@ static int get_file_handle(struct nfs_dlmount *ndmntp); int nfs_boot_init(struct nfs_diskless *nd) { - struct sockaddr_in bp_sin; - boolean_t do_bpwhoami = TRUE; - boolean_t do_bpgetfile = TRUE; - int error = 0; - struct in_addr my_ip; - struct sockaddr_in * sin_p; + struct sockaddr_in bp_sin; + boolean_t do_bpwhoami = TRUE; + boolean_t do_bpgetfile = TRUE; + int error = 0; + struct in_addr my_ip; + struct sockaddr_in * sin_p; /* make sure mbuf constants are set up */ - if (!nfs_mbuf_mhlen) + if (!nfs_mbuf_mhlen) { nfs_mbuf_init(); + } /* by this point, networking must already have been configured */ if (netboot_iaddr(&my_ip) == FALSE) { - printf("nfs_boot: networking is not initialized\n"); - error = ENXIO; - goto failed; + printf("nfs_boot: networking is not initialized\n"); + error = ENXIO; + goto failed; } /* get the root path information */ MALLOC_ZONE(nd->nd_root.ndm_path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (!nd->nd_root.ndm_path) { - printf("nfs_boot: can't allocate root path buffer\n"); - error = ENOMEM; - goto failed; + printf("nfs_boot: can't allocate root path buffer\n"); + error = ENOMEM; + goto failed; } MALLOC_ZONE(nd->nd_root.ndm_mntfrom, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (!nd->nd_root.ndm_mntfrom) { - printf("nfs_boot: can't allocate root mntfrom buffer\n"); - error = ENOMEM; - goto failed; + printf("nfs_boot: can't allocate root mntfrom buffer\n"); + error = ENOMEM; + goto failed; } sin_p = &nd->nd_root.ndm_saddr; bzero((caddr_t)sin_p, sizeof(*sin_p)); sin_p->sin_len = sizeof(*sin_p); sin_p->sin_family = AF_INET; if (netboot_rootpath(&sin_p->sin_addr, nd->nd_root.ndm_host, - sizeof(nd->nd_root.ndm_host), - nd->nd_root.ndm_path, MAXPATHLEN) == TRUE) { - do_bpgetfile = FALSE; - do_bpwhoami = FALSE; + sizeof(nd->nd_root.ndm_host), + nd->nd_root.ndm_path, MAXPATHLEN) == TRUE) { + do_bpgetfile = FALSE; + do_bpwhoami = FALSE; } nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; @@ -249,20 +252,20 @@ nfs_boot_init(struct nfs_diskless *nd) printf("nfs_boot: bootparam whoami, error=%d", error); goto failed; } - printf("nfs_boot: BOOTPARAMS server " IP_FORMAT "\n", - IP_LIST(&bp_sin.sin_addr)); + printf("nfs_boot: BOOTPARAMS server " IP_FORMAT "\n", + IP_LIST(&bp_sin.sin_addr)); printf("nfs_boot: hostname %s\n", hostname); } if (do_bpgetfile) { error = bp_getfile(&bp_sin, "root", &nd->nd_root.ndm_saddr, - nd->nd_root.ndm_host, nd->nd_root.ndm_path); + nd->nd_root.ndm_host, nd->nd_root.ndm_path); if (error) { printf("nfs_boot: bootparam get root: %d\n", error); goto failed; } } -#if !defined(NO_MOUNT_PRIVATE) +#if !defined(NO_MOUNT_PRIVATE) if (do_bpgetfile) { /* get private path */ MALLOC_ZONE(nd->nd_private.ndm_path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (!nd->nd_private.ndm_path) { @@ -276,13 +279,13 @@ nfs_boot_init(struct nfs_diskless *nd) error = ENOMEM; goto failed; } - error = bp_getfile(&bp_sin, "private", - &nd->nd_private.ndm_saddr, - nd->nd_private.ndm_host, - nd->nd_private.ndm_path); + error = bp_getfile(&bp_sin, "private", + &nd->nd_private.ndm_saddr, + nd->nd_private.ndm_host, + nd->nd_private.ndm_path); if (!error) { char * check_path = NULL; - + MALLOC_ZONE(check_path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (!check_path) { printf("nfs_boot: can't allocate check_path buffer\n"); @@ -290,25 +293,23 @@ nfs_boot_init(struct nfs_diskless *nd) goto failed; } snprintf(check_path, MAXPATHLEN, "%s/private", nd->nd_root.ndm_path); - if ((nd->nd_root.ndm_saddr.sin_addr.s_addr - == nd->nd_private.ndm_saddr.sin_addr.s_addr) + if ((nd->nd_root.ndm_saddr.sin_addr.s_addr + == nd->nd_private.ndm_saddr.sin_addr.s_addr) && (strncmp(check_path, nd->nd_private.ndm_path, MAXPATHLEN) == 0)) { /* private path is prefix of root path, don't mount */ nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; } FREE_ZONE(check_path, MAXPATHLEN, M_NAMEI); - } - else { + } else { /* private key not defined, don't mount */ nd->nd_private.ndm_saddr.sin_addr.s_addr = 0; } - } - else { + } else { error = 0; } #endif /* NO_MOUNT_PRIVATE */ failed: - return (error); + return error; } /* @@ -325,11 +326,11 @@ nfs_boot_getfh(struct nfs_diskless *nd, int v3, int sotype) error = get_file_handle(&nd->nd_root); if (error) { printf("nfs_boot: get_file_handle(v%d) root failed, %d\n", - v3 ? 3 : 2, error); + v3 ? 3 : 2, error); goto failed; } -#if !defined(NO_MOUNT_PRIVATE) +#if !defined(NO_MOUNT_PRIVATE) if (nd->nd_private.ndm_saddr.sin_addr.s_addr) { /* get private file handle */ nd->nd_private.ndm_nfsv3 = v3; @@ -337,13 +338,13 @@ nfs_boot_getfh(struct nfs_diskless *nd, int v3, int sotype) error = get_file_handle(&nd->nd_private); if (error) { printf("nfs_boot: get_file_handle(v%d) private failed, %d\n", - v3 ? 3 : 2, error); + v3 ? 3 : 2, error); goto failed; } } #endif /* NO_MOUNT_PRIVATE */ failed: - return (error); + return error; } static int @@ -357,22 +358,25 @@ get_file_handle(struct nfs_dlmount *ndmntp) * using RPC to mountd/mount */ error = md_mount(&ndmntp->ndm_saddr, ndmntp->ndm_path, ndmntp->ndm_nfsv3, - ndmntp->ndm_sotype, ndmntp->ndm_fh, &ndmntp->ndm_fhlen); - if (error) - return (error); + ndmntp->ndm_sotype, ndmntp->ndm_fh, &ndmntp->ndm_fhlen); + if (error) { + return error; + } /* Construct remote path (for getmntinfo(3)) */ dp = ndmntp->ndm_mntfrom; endp = dp + MAXPATHLEN - 1; - for (sp = ndmntp->ndm_host; *sp && dp < endp;) + for (sp = ndmntp->ndm_host; *sp && dp < endp;) { *dp++ = *sp++; - if (dp < endp) + } + if (dp < endp) { *dp++ = ':'; - for (sp = ndmntp->ndm_path; *sp && dp < endp;) + } + for (sp = ndmntp->ndm_path; *sp && dp < endp;) { *dp++ = *sp++; + } *dp = '\0'; - return (0); - + return 0; } @@ -385,20 +389,22 @@ mbuf_get_with_len(size_t msg_len, mbuf_t *m) { int error; error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, m); - if (error) - return (error); + if (error) { + return error; + } if (msg_len > mbuf_maxlen(*m)) { error = mbuf_mclget(MBUF_WAITOK, MBUF_TYPE_DATA, m); if (error) { mbuf_freem(*m); - return (error); + return error; } - if (msg_len > mbuf_maxlen(*m)) + if (msg_len > mbuf_maxlen(*m)) { panic("nfs_boot: msg_len > MCLBYTES"); + } } mbuf_setlen(*m, msg_len); mbuf_pkthdr_setlen(*m, msg_len); - return (0); + return 0; } @@ -406,12 +412,12 @@ mbuf_get_with_len(size_t msg_len, mbuf_t *m) * String representation for RPC. */ struct rpc_string { - u_int32_t len; /* length without null or padding */ - u_char data[4]; /* data (longer, of course) */ - /* data is padded to a long-word boundary */ + u_int32_t len; /* length without null or padding */ + u_char data[4]; /* data (longer, of course) */ + /* data is padded to a long-word boundary */ }; /* Compute space used given string length. */ -#define RPC_STR_SIZE(slen) (4 + ((slen + 3) & ~3)) +#define RPC_STR_SIZE(slen) (4 + ((slen + 3) & ~3)) /* * Inet address in RPC messages @@ -419,7 +425,7 @@ struct rpc_string { */ struct bp_inaddr { u_int32_t atype; - int32_t addr[4]; + int32_t addr[4]; }; @@ -440,8 +446,8 @@ struct bp_inaddr { */ static int bp_whoami(struct sockaddr_in *bpsin, - struct in_addr *my_ip, - struct in_addr *gw_ip) + struct in_addr *my_ip, + struct in_addr *gw_ip) { /* RPC structures for PMAPPROC_CALLIT */ struct whoami_call { @@ -460,15 +466,16 @@ bp_whoami(struct sockaddr_in *bpsin, size_t msg_len, cn_len, dn_len; u_char *p; int32_t *lp; - size_t encapsulated_size; + size_t encapsulated_size; /* * Get message buffer of sufficient size. */ msg_len = sizeof(*call); error = mbuf_get_with_len(msg_len, &m); - if (error) + if (error) { return error; + } /* * Build request message for PMAPPROC_CALLIT. @@ -483,17 +490,18 @@ bp_whoami(struct sockaddr_in *bpsin, call->call_ia.atype = htonl(1); p = (u_char*)my_ip; lp = call->call_ia.addr; - *lp++ = htonl(*p); p++; - *lp++ = htonl(*p); p++; - *lp++ = htonl(*p); p++; - *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; + *lp++ = htonl(*p); p++; /* RPC: portmap/callit */ bpsin->sin_port = htons(PMAPPORT); error = krpc_call(bpsin, SOCK_DGRAM, PMAPPROG, PMAPVERS, PMAPPROC_CALLIT, &m, &sin); - if (error) + if (error) { return error; + } /* * Parse result message. @@ -502,29 +510,33 @@ bp_whoami(struct sockaddr_in *bpsin, lp = mbuf_data(m); /* bootparam server port (also grab from address). */ - if (msg_len < sizeof(*lp)) + if (msg_len < sizeof(*lp)) { goto bad; + } msg_len -= sizeof(*lp); bpsin->sin_port = htons((short)ntohl(*lp++)); bpsin->sin_addr.s_addr = sin.sin_addr.s_addr; /* length of encapsulated results */ if (os_add_overflow((size_t) ntohl(*lp), sizeof(*lp), &encapsulated_size) - || msg_len < encapsulated_size) { + || msg_len < encapsulated_size) { goto bad; } msg_len = ntohl(*lp++); p = (u_char*)lp; /* client name */ - if (msg_len < sizeof(*str)) + if (msg_len < sizeof(*str)) { goto bad; + } str = (struct rpc_string *)p; cn_len = ntohl(str->len); - if ((msg_len - 4) < cn_len) + if ((msg_len - 4) < cn_len) { goto bad; - if (cn_len >= MAXHOSTNAMELEN) + } + if (cn_len >= MAXHOSTNAMELEN) { goto bad; + } bcopy(str->data, hostname, cn_len); hostname[cn_len] = '\0'; hostnamelen = cn_len; @@ -532,14 +544,17 @@ bp_whoami(struct sockaddr_in *bpsin, msg_len -= RPC_STR_SIZE(cn_len); /* domain name */ - if (msg_len < sizeof(*str)) + if (msg_len < sizeof(*str)) { goto bad; + } str = (struct rpc_string *)p; dn_len = ntohl(str->len); - if ((msg_len - 4) < dn_len) + if ((msg_len - 4) < dn_len) { goto bad; - if (dn_len >= MAXHOSTNAMELEN) + } + if (dn_len >= MAXHOSTNAMELEN) { goto bad; + } bcopy(str->data, domainname, dn_len); domainname[dn_len] = '\0'; domainnamelen = dn_len; @@ -547,11 +562,13 @@ bp_whoami(struct sockaddr_in *bpsin, msg_len -= RPC_STR_SIZE(dn_len); /* gateway address */ - if (msg_len < sizeof(*bia)) + if (msg_len < sizeof(*bia)) { goto bad; + } bia = (struct bp_inaddr *)p; - if (bia->atype != htonl(1)) + if (bia->atype != htonl(1)) { goto bad; + } p = (u_char*)gw_ip; *p++ = ntohl(bia->addr[0]); *p++ = ntohl(bia->addr[1]); @@ -565,7 +582,7 @@ bad: out: mbuf_freem(m); - return(error); + return error; } @@ -578,10 +595,10 @@ out: */ static int bp_getfile(struct sockaddr_in *bpsin, - const char *key, - struct sockaddr_in *md_sin, - char *serv_name, - char *pathname) + const char *key, + struct sockaddr_in *md_sin, + char *serv_name, + char *pathname) { struct rpc_string *str; mbuf_t m; @@ -600,8 +617,9 @@ bp_getfile(struct sockaddr_in *bpsin, msg_len += RPC_STR_SIZE(cn_len); msg_len += RPC_STR_SIZE(key_len); error = mbuf_get_with_len(msg_len, &m); - if (error) + if (error) { return error; + } /* * Build request message. @@ -620,9 +638,10 @@ bp_getfile(struct sockaddr_in *bpsin, /* RPC: bootparam/getfile */ error = krpc_call(bpsin, SOCK_DGRAM, BOOTPARAM_PROG, BOOTPARAM_VERS, - BOOTPARAM_GETFILE, &m, NULL); - if (error) + BOOTPARAM_GETFILE, &m, NULL); + if (error) { return error; + } /* * Parse result message. @@ -631,25 +650,30 @@ bp_getfile(struct sockaddr_in *bpsin, msg_len = mbuf_len(m); /* server name */ - if (msg_len < sizeof(*str)) + if (msg_len < sizeof(*str)) { goto bad; + } str = (struct rpc_string *)p; sn_len = ntohl(str->len); - if ((msg_len - 4) < sn_len) + if ((msg_len - 4) < sn_len) { goto bad; - if (sn_len >= MAXHOSTNAMELEN) + } + if (sn_len >= MAXHOSTNAMELEN) { goto bad; + } bcopy(str->data, serv_name, sn_len); serv_name[sn_len] = '\0'; p += RPC_STR_SIZE(sn_len); msg_len -= RPC_STR_SIZE(sn_len); /* server IP address (mountd) */ - if (msg_len < sizeof(*bia)) + if (msg_len < sizeof(*bia)) { goto bad; + } bia = (struct bp_inaddr *)p; - if (bia->atype != htonl(1)) + if (bia->atype != htonl(1)) { goto bad; + } sin = md_sin; bzero((caddr_t)sin, sizeof(*sin)); sin->sin_len = sizeof(*sin); @@ -663,14 +687,17 @@ bp_getfile(struct sockaddr_in *bpsin, msg_len -= sizeof(*bia); /* server pathname */ - if (msg_len < sizeof(*str)) + if (msg_len < sizeof(*str)) { goto bad; + } str = (struct rpc_string *)p; path_len = ntohl(str->len); - if ((msg_len - 4) < path_len) + if ((msg_len - 4) < path_len) { goto bad; - if (path_len >= MAXPATHLEN) + } + if (path_len >= MAXPATHLEN) { goto bad; + } bcopy(str->data, pathname, path_len); pathname[path_len] = '\0'; goto out; @@ -681,7 +708,7 @@ bad: out: mbuf_freem(m); - return(0); + return 0; } @@ -691,18 +718,18 @@ out: * Also, sets sin->sin_port to the NFS service port. */ static int -md_mount(struct sockaddr_in *mdsin, /* mountd server address */ - char *path, - int v3, - int sotype, - u_char *fhp, - u_int32_t *fhlenp) +md_mount(struct sockaddr_in *mdsin, /* mountd server address */ + char *path, + int v3, + int sotype, + u_char *fhp, + u_int32_t *fhlenp) { /* The RPC structures */ struct rpc_string *str; struct rdata { - u_int32_t errno; - u_char data[NFSX_V3FHMAX + sizeof(u_int32_t)]; + u_int32_t errno; + u_char data[NFSX_V3FHMAX + sizeof(u_int32_t)]; } *rdata; mbuf_t m; size_t mlen; @@ -713,14 +740,16 @@ md_mount(struct sockaddr_in *mdsin, /* mountd server address */ /* Get port number for MOUNTD. */ error = krpc_portmap(mdsin, RPCPROG_MNT, mntversion, proto, &mntport); - if (error) + if (error) { return error; + } /* Get port number for NFS use. */ /* (If NFS/proto unavailable, don't bother with the mount call) */ error = krpc_portmap(mdsin, NFS_PROG, v3 ? NFS_VER3 : NFS_VER2, proto, &nfsport); - if (error) + if (error) { return error; + } /* Set port number for MOUNTD */ mdsin->sin_port = mntport; @@ -729,44 +758,50 @@ md_mount(struct sockaddr_in *mdsin, /* mountd server address */ mlen = RPC_STR_SIZE(slen); error = mbuf_get_with_len(mlen, &m); - if (error) + if (error) { return error; + } str = mbuf_data(m); str->len = htonl(slen); bcopy(path, str->data, slen); /* Do RPC to mountd. */ error = krpc_call(mdsin, sotype, RPCPROG_MNT, mntversion, RPCMNT_MOUNT, &m, NULL); - if (error) - return error; /* message already freed */ - + if (error) { + return error; /* message already freed */ + } /* * the reply must be long enough to hold the errno plus either of: * + a v2 filehandle * + a v3 filehandle length + a v3 filehandle */ mlen = mbuf_len(m); - if (mlen < sizeof(u_int32_t)) + if (mlen < sizeof(u_int32_t)) { goto bad; + } rdata = mbuf_data(m); error = ntohl(rdata->errno); - if (error) + if (error) { goto out; + } if (v3) { u_int32_t fhlen; u_char *fh; - if (mlen < sizeof(u_int32_t)*2) + if (mlen < sizeof(u_int32_t) * 2) { goto bad; + } fhlen = ntohl(*(u_int32_t*)rdata->data); fh = rdata->data + sizeof(u_int32_t); - if (mlen < (sizeof(u_int32_t)*2 + fhlen) - || fhlen >= (NFSX_V3FHMAX + sizeof(u_int32_t))) + if (mlen < (sizeof(u_int32_t) * 2 + fhlen) + || fhlen >= (NFSX_V3FHMAX + sizeof(u_int32_t))) { goto bad; + } bcopy(fh, fhp, fhlen); *fhlenp = fhlen; } else { - if (mlen < (sizeof(u_int32_t) + NFSX_V2FH)) + if (mlen < (sizeof(u_int32_t) + NFSX_V2FH)) { goto bad; + } bcopy(rdata->data, fhp, NFSX_V2FH); *fhlenp = NFSX_V2FH; } diff --git a/bsd/nfs/nfs_gss.c b/bsd/nfs/nfs_gss.c index 02c121289..c1d300d0f 100644 --- a/bsd/nfs/nfs_gss.c +++ b/bsd/nfs/nfs_gss.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -138,35 +138,35 @@ uint8_t krb5_mech_oid[11] = { 0x06, 0x09, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x12, 0x static uint8_t xdrpad[] = { 0x00, 0x00, 0x00, 0x00}; #if NFSCLIENT -static int nfs_gss_clnt_ctx_find(struct nfsreq *); -static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *); -static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *); -static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *); -static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *); -static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t); -void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *); -static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *); -static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **); -static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *); -static void nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t); +static int nfs_gss_clnt_ctx_find(struct nfsreq *); +static int nfs_gss_clnt_ctx_init(struct nfsreq *, struct nfs_gss_clnt_ctx *); +static int nfs_gss_clnt_ctx_init_retry(struct nfsreq *, struct nfs_gss_clnt_ctx *); +static int nfs_gss_clnt_ctx_callserver(struct nfsreq *, struct nfs_gss_clnt_ctx *); +static uint8_t *nfs_gss_clnt_svcname(struct nfsmount *, gssd_nametype *, uint32_t *); +static int nfs_gss_clnt_gssd_upcall(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t); +void nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *); +static void nfs_gss_clnt_ctx_clean(struct nfs_gss_clnt_ctx *); +static int nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *, struct nfs_gss_clnt_ctx **); +static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *); +static void nfs_gss_clnt_log_error(struct nfsreq *, struct nfs_gss_clnt_ctx *, uint32_t, uint32_t); #endif /* NFSCLIENT */ #if NFSSERVER static struct nfs_gss_svc_ctx *nfs_gss_svc_ctx_find(uint32_t); -static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *); -static void nfs_gss_svc_ctx_timer(void *, void *); -static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *); -static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t); +static void nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *); +static void nfs_gss_svc_ctx_timer(void *, void *); +static int nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *); +static int nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *, uint32_t); #endif /* NFSSERVER */ -static void host_release_special_port(mach_port_t); +static void host_release_special_port(mach_port_t); static mach_port_t host_copy_special_port(mach_port_t); -static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *); -static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *); +static void nfs_gss_mach_alloc_buffer(u_char *, uint32_t, vm_map_copy_t *); +static int nfs_gss_mach_vmcopyout(vm_map_copy_t, uint32_t, u_char *); -static int nfs_gss_mchain_length(mbuf_t); -static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t); -static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t); +static int nfs_gss_mchain_length(mbuf_t); +static int nfs_gss_append_chain(struct nfsm_chain *, mbuf_t); +static void nfs_gss_nfsm_chain(struct nfsm_chain *, mbuf_t); #if NFSSERVER thread_call_t nfs_gss_svc_ctx_timer_call; @@ -186,7 +186,7 @@ nfs_gss_init(void) #endif /* NFSCLIENT */ #if NFSSERVER - nfs_gss_svc_grp = lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL); + nfs_gss_svc_grp = lck_grp_alloc_init("rpcsec_gss_svc", LCK_GRP_ATTR_NULL); nfs_gss_svc_ctx_hashtbl = hashinit(SVC_CTX_HASHSZ, M_TEMP, &nfs_gss_svc_ctx_hash); nfs_gss_svc_ctx_mutex = lck_mtx_alloc_init(nfs_gss_svc_grp, LCK_ATTR_NULL); @@ -216,20 +216,22 @@ rpc_gss_prepend_32(mbuf_t *mb, uint32_t value) mbuf_t nmb; error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &nmb); - if (error) - return (error); + if (error) { + return error; + } mbuf_setnext(nmb, *mb); *mb = nmb; } #endif error = mbuf_prepend(mb, sizeof(uint32_t), MBUF_WAITOK); - if (error) - return (error); + if (error) { + return error; + } data = mbuf_data(*mb); *data = txdr_unsigned(value); - return (0); + return 0; } /* @@ -250,8 +252,9 @@ rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum) uint8_t *data; error = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, &mb); - if (error) - return (error); + if (error) { + return error; + } data = mbuf_data(mb); #if 0 /* Reserve space for prepending */ @@ -259,26 +262,28 @@ rpc_gss_data_create(mbuf_t *mbp_head, uint32_t seqnum) len = (len & ~0x3) - NFSX_UNSIGNED; printf("%s: data = %p, len = %d\n", __func__, data, (int)len); error = mbuf_setdata(mb, data + len, 0); - if (error || mbuf_trailingspace(mb)) + if (error || mbuf_trailingspace(mb)) { printf("%s: data = %p trailingspace = %d error = %d\n", __func__, mbuf_data(mb), (int)mbuf_trailingspace(mb), error); + } #endif /* Reserve 16 words for prepending */ - error = mbuf_setdata(mb, data + 16*sizeof(uint32_t), 0); + error = mbuf_setdata(mb, data + 16 * sizeof(uint32_t), 0); nfsm_chain_init(nmcp, mb); nfsm_chain_add_32(error, nmcp, seqnum); nfsm_chain_build_done(error, nmcp); - if (error) - return (EINVAL); + if (error) { + return EINVAL; + } mbuf_setnext(nmcp->nmc_mcur, *mbp_head); *mbp_head = nmcp->nmc_mhead; - return (0); + return 0; } /* * Create an rpc_gss_integ_data_t given an argument or result in mb_head. * On successful return mb_head will point to the rpc_gss_integ_data_t of length len. - * Note mb_head will now point to a 4 byte sequence number. len does not include + * Note mb_head will now point to a 4 byte sequence number. len does not include * any extra xdr padding. * Returns 0 on success, else an errno_t */ @@ -294,11 +299,13 @@ rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, ui /* Length of the argument or result */ length = nfs_gss_mchain_length(*mb_head); - if (len) + if (len) { *len = length; + } error = rpc_gss_data_create(mb_head, seqnum); - if (error) - return (error); + if (error) { + return error; + } /* * length is the length of the rpc_gss_data @@ -307,12 +314,13 @@ rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, ui major = gss_krb5_get_mic_mbuf(&error, ctx, 0, *mb_head, 0, length, &mic); if (major != GSS_S_COMPLETE) { printf("gss_krb5_get_mic_mbuf failed %d\n", error); - return (error); + return error; } error = rpc_gss_prepend_32(mb_head, length); - if (error) - return (error); + if (error) { + return error; + } nfsm_chain_dissect_init(error, &nmc, *mb_head); /* Append GSS mic token by advancing rpc_gss_data_t length + NFSX_UNSIGNED (size of the length field) */ @@ -326,7 +334,7 @@ rpc_gss_integ_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, ui // printmbuf("rpc_gss_integ_data_create done", *mb_head, 0, 0); assert(nmc.nmc_mhead == *mb_head); - return (error); + return error; } /* @@ -345,24 +353,28 @@ rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uin uint32_t length; error = rpc_gss_data_create(mb_head, seqnum); - if (error) - return (error); + if (error) { + return error; + } length = nfs_gss_mchain_length(*mb_head); major = gss_krb5_wrap_mbuf(&error, ctx, 1, 0, mb_head, 0, length, NULL); - if (major != GSS_S_COMPLETE) - return (error); + if (major != GSS_S_COMPLETE) { + return error; + } length = nfs_gss_mchain_length(*mb_head); - if (len) + if (len) { *len = length; + } pad = nfsm_pad(length); /* Prepend the opaque length of rep rpc_gss_priv_data */ error = rpc_gss_prepend_32(mb_head, length); - if (error) - return (error); + if (error) { + return error; + } if (pad) { nfsm_chain_dissect_init(error, &nmc, *mb_head); /* Advance the opauque size of length and length data */ @@ -372,7 +384,7 @@ rpc_gss_priv_data_create(gss_ctx_id_t ctx, mbuf_t *mb_head, uint32_t seqnum, uin nfsm_chain_build_done(error, &nmc); } - return (error); + return error; } #if NFSCLIENT @@ -399,27 +411,29 @@ rpc_gss_integ_data_restore(gss_ctx_id_t ctx __unused, mbuf_t *mb_head, size_t le /* should only be one, ... but */ for (; mb; mb = next) { next = mbuf_next(mb); - if (mbuf_len(mb) == 0) + if (mbuf_len(mb) == 0) { mbuf_free(mb); - else + } else { break; + } } *mb_head = mb; for (; mb && len; mb = mbuf_next(mb)) { tail = mb; - if (mbuf_len(mb) <= len) + if (mbuf_len(mb) <= len) { len -= mbuf_len(mb); - else - return (EBADRPC); + } else { + return EBADRPC; + } } /* drop the mic */ if (tail) { - mbuf_setnext(tail, NULL); - mbuf_freem(mb); + mbuf_setnext(tail, NULL); + mbuf_freem(mb); } - return (0); + return 0; } /* @@ -445,12 +459,13 @@ rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len) if (plen) { mbuf_t tail = NULL; - for(length = 0; length < len && mb; mb = mbuf_next(mb)) { + for (length = 0; length < len && mb; mb = mbuf_next(mb)) { tail = mb; length += mbuf_len(mb); } - if ((length != len) || (mb == NULL) || (tail == NULL)) - return (EBADRPC); + if ((length != len) || (mb == NULL) || (tail == NULL)) { + return EBADRPC; + } mbuf_freem(mb); mbuf_setnext(tail, NULL); @@ -459,7 +474,7 @@ rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len) major = gss_krb5_unwrap_mbuf(&error, ctx, mb_head, 0, len, NULL, &qop); if (major != GSS_S_COMPLETE) { printf("gss_krb5_unwrap_mbuf failed. major = %d minor = %d\n", (int)major, error); - return (error); + return error; } mb = *mb_head; @@ -470,14 +485,15 @@ rpc_gss_priv_data_restore(gss_ctx_id_t ctx, mbuf_t *mb_head, size_t len) /* Chop of any empty mbufs */ for (mb = *mb_head; mb; mb = next) { next = mbuf_next(mb); - if (mbuf_len(mb) == 0) + if (mbuf_len(mb) == 0) { mbuf_free(mb); - else + } else { break; + } } *mb_head = mb; - return (0); + return 0; } /* @@ -503,7 +519,7 @@ uid_t nfs_cred_getasid2uid(kauth_cred_t cred) { uid_t result = SAFE_CAST_INTTYPE(uid_t, kauth_cred_getasid(cred)); - return (result); + return result; } /* @@ -519,9 +535,9 @@ nfs_gss_clnt_ctx_dump(struct nfsmount *nmp) TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { lck_mtx_lock(cp->gss_clnt_mtx); printf("context %d/%d: refcnt = %d, flags = %x\n", - kauth_cred_getasid(cp->gss_clnt_cred), - kauth_cred_getauid(cp->gss_clnt_cred), - cp->gss_clnt_refcnt, cp->gss_clnt_flags); + kauth_cred_getasid(cp->gss_clnt_cred), + kauth_cred_getauid(cp->gss_clnt_cred), + cp->gss_clnt_refcnt, cp->gss_clnt_flags); lck_mtx_unlock(cp->gss_clnt_mtx); } NFS_GSS_DBG("Exit\n"); @@ -535,12 +551,13 @@ nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *b int nlen; const char *server = ""; - if (nmp && nmp->nm_mountp) + if (nmp && nmp->nm_mountp) { server = vfs_statfs(nmp->nm_mountp)->f_mntfromname; + } if (cp == NULL) { snprintf(buf, len, "[%s] NULL context", server); - return (buf); + return buf; } if (cp->gss_clnt_principal && !cp->gss_clnt_display) { @@ -550,33 +567,35 @@ nfs_gss_clnt_ctx_name(struct nfsmount *nmp, struct nfs_gss_clnt_ctx *cp, char *b np = cp->gss_clnt_display; nlen = np ? strlen(cp->gss_clnt_display) : 0; } - if (nlen) + if (nlen) { snprintf(buf, len, "[%s] %.*s %d/%d %s", server, nlen, np, - kauth_cred_getasid(cp->gss_clnt_cred), - kauth_cred_getuid(cp->gss_clnt_cred), - cp->gss_clnt_principal ? "" : "[from default cred] "); - else + kauth_cred_getasid(cp->gss_clnt_cred), + kauth_cred_getuid(cp->gss_clnt_cred), + cp->gss_clnt_principal ? "" : "[from default cred] "); + } else { snprintf(buf, len, "[%s] using default %d/%d ", server, - kauth_cred_getasid(cp->gss_clnt_cred), - kauth_cred_getuid(cp->gss_clnt_cred)); - return (buf); + kauth_cred_getasid(cp->gss_clnt_cred), + kauth_cred_getuid(cp->gss_clnt_cred)); + } + return buf; } #define NFS_CTXBUFSZ 80 #define NFS_GSS_CTX(req, cp) nfs_gss_clnt_ctx_name((req)->r_nmp, cp ? cp : (req)->r_gss_ctx, CTXBUF, sizeof(CTXBUF)) -#define NFS_GSS_CLNT_CTX_DUMP(nmp) \ - do { \ - if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \ - nfs_gss_clnt_ctx_dump((nmp)); \ +#define NFS_GSS_CLNT_CTX_DUMP(nmp) \ + do { \ + if (NFS_GSS_ISDBG && (NFS_DEBUG_FLAGS & 0x2)) \ + nfs_gss_clnt_ctx_dump((nmp)); \ } while (0) static int nfs_gss_clnt_ctx_cred_match(kauth_cred_t cred1, kauth_cred_t cred2) { - if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2)) - return (1); - return (0); + if (kauth_cred_getasid(cred1) == kauth_cred_getasid(cred2)) { + return 1; + } + return 0; } /* @@ -598,8 +617,9 @@ nfs_gss_clnt_mnt_ref(struct nfsmount *nmp) vnode_t rvp; if (nmp == NULL || - !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) + !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) { return; + } error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); if (!error) { @@ -619,8 +639,9 @@ nfs_gss_clnt_mnt_rele(struct nfsmount *nmp) vnode_t rvp; if (nmp == NULL || - !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) + !(vfs_flags(nmp->nm_mountp) & MNT_AUTOMOUNTED)) { return; + } error = VFS_ROOT(nmp->nm_mountp, &rvp, NULL); if (!error) { @@ -641,7 +662,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t struct timeval now; char CTXBUF[NFS_CTXBUFSZ]; - bzero(&treq, sizeof (struct nfsreq)); + bzero(&treq, sizeof(struct nfsreq)); treq.r_nmp = nmp; microuptime(&now); @@ -650,8 +671,8 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n", - NFS_GSS_CTX(req, cp), - cp->gss_clnt_refcnt); + NFS_GSS_CTX(req, cp), + cp->gss_clnt_refcnt); lck_mtx_unlock(cp->gss_clnt_mtx); continue; } @@ -671,9 +692,9 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t cp->gss_clnt_refcnt++; lck_mtx_unlock(cp->gss_clnt_mtx); NFS_GSS_DBG("Marking %s for deletion because %s does not match\n", - NFS_GSS_CTX(req, cp), principal); + NFS_GSS_CTX(req, cp), principal); NFS_GSS_DBG("len = (%d,%d), nt = (%d,%d)\n", cp->gss_clnt_prinlen, plen, - cp->gss_clnt_prinnt, nt); + cp->gss_clnt_prinnt, nt); treq.r_gss_ctx = cp; cp = NULL; break; @@ -687,10 +708,10 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t */ if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec || cp->gss_clnt_nctime == 0) { NFS_GSS_DBG("Context %s (refcnt = %d) not expired returning EAUTH nctime = %ld now = %ld\n", - NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec); + NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt, cp->gss_clnt_nctime, now.tv_sec); lck_mtx_unlock(cp->gss_clnt_mtx); lck_mtx_unlock(&nmp->nm_lock); - return (NFSERR_EAUTH); + return NFSERR_EAUTH; } if (cp->gss_clnt_refcnt) { struct nfs_gss_clnt_ctx *ncp; @@ -701,18 +722,19 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t */ cp->gss_clnt_flags |= GSS_CTX_DESTROY; NFS_GSS_DBG("Context %s has expired but we still have %d references\n", - NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt); + NFS_GSS_CTX(req, cp), cp->gss_clnt_refcnt); error = nfs_gss_clnt_ctx_copy(cp, &ncp); lck_mtx_unlock(cp->gss_clnt_mtx); if (error) { lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } cp = ncp; break; } else { - if (cp->gss_clnt_nctime) + if (cp->gss_clnt_nctime) { nmp->nm_ncentries--; + } lck_mtx_unlock(cp->gss_clnt_mtx); TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); break; @@ -723,7 +745,7 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t req->r_gss_ctx = cp; lck_mtx_unlock(cp->gss_clnt_mtx); lck_mtx_unlock(&nmp->nm_lock); - return (0); + return 0; } lck_mtx_unlock(cp->gss_clnt_mtx); } @@ -737,36 +759,36 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t * in case one is set up for it. */ TAILQ_FOREACH(cp, &nmp->nm_gsscl, gss_clnt_entries) { - if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL|GSS_CTX_DESTROY))) { + if (!(cp->gss_clnt_flags & (GSS_CTX_INVAL | GSS_CTX_DESTROY))) { nfs_gss_clnt_ctx_ref(req, cp); lck_mtx_unlock(&nmp->nm_lock); NFS_GSS_DBG("Root stole context %s\n", NFS_GSS_CTX(req, NULL)); - return (0); + return 0; } } } NFS_GSS_DBG("Context %s%sfound in Neg Cache @ %ld\n", - NFS_GSS_CTX(req, cp), - cp == NULL ? " not " : "", - cp == NULL ? 0L : cp->gss_clnt_nctime); + NFS_GSS_CTX(req, cp), + cp == NULL ? " not " : "", + cp == NULL ? 0L : cp->gss_clnt_nctime); /* * Not found - create a new context */ if (cp == NULL) { - MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(cp, struct nfs_gss_clnt_ctx *, sizeof(*cp), M_TEMP, M_WAITOK | M_ZERO); if (cp == NULL) { lck_mtx_unlock(&nmp->nm_lock); - return (ENOMEM); + return ENOMEM; } cp->gss_clnt_cred = req->r_cred; kauth_cred_ref(cp->gss_clnt_cred); cp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL); cp->gss_clnt_ptime = now.tv_sec - GSS_PRINT_DELAY; if (principal) { - MALLOC(cp->gss_clnt_principal, uint8_t *, plen+1, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(cp->gss_clnt_principal, uint8_t *, plen + 1, M_TEMP, M_WAITOK | M_ZERO); memcpy(cp->gss_clnt_principal, principal, plen); cp->gss_clnt_prinlen = plen; cp->gss_clnt_prinnt = nt; @@ -806,13 +828,13 @@ nfs_gss_clnt_ctx_find_principal(struct nfsreq *req, uint8_t *principal, uint32_t /* Remove any old matching contex that had a different principal */ nfs_gss_clnt_ctx_unref(&treq); - return (error); + return error; } static int nfs_gss_clnt_ctx_find(struct nfsreq *req) { - return (nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0)); + return nfs_gss_clnt_ctx_find_principal(req, NULL, 0, 0); } /* @@ -833,7 +855,7 @@ nfs_gss_clnt_cred_put(struct nfsreq *req, struct nfsm_chain *nmc, mbuf_t args) struct gss_seq *gsp; gss_buffer_desc mic; - slpflag = (PZERO-1); + slpflag = (PZERO - 1); if (req->r_nmp) { slpflag |= (NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) ? PCATCH : 0; recordmark = (req->r_nmp->nm_sotype == SOCK_STREAM); @@ -847,8 +869,9 @@ retry: * be created. */ error = nfs_gss_clnt_ctx_find(req); - if (error) - return (error); + if (error) { + return error; + } } cp = req->r_gss_ctx; @@ -863,8 +886,9 @@ retry: cp->gss_clnt_flags |= GSS_NEEDCTX; msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "ctxwait", NULL); slpflag &= ~PCATCH; - if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) - return (error); + if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { + return error; + } nfs_gss_clnt_ctx_unref(req); goto retry; } @@ -879,13 +903,13 @@ retry: * to proceed. */ lck_mtx_lock(cp->gss_clnt_mtx); - while (win_getbit(cp->gss_clnt_seqbits, - ((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) { + while (win_getbit(cp->gss_clnt_seqbits, + ((cp->gss_clnt_seqnum - cp->gss_clnt_seqwin) + 1) % cp->gss_clnt_seqwin)) { cp->gss_clnt_flags |= GSS_NEEDSEQ; msleep(cp, cp->gss_clnt_mtx, slpflag | PDROP, "seqwin", NULL); slpflag &= ~PCATCH; if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { - return (error); + return error; } lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_flags & GSS_CTX_INVAL) { @@ -899,9 +923,10 @@ retry: win_setbit(cp->gss_clnt_seqbits, seqnum % cp->gss_clnt_seqwin); lck_mtx_unlock(cp->gss_clnt_mtx); - MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK|M_ZERO); - if (gsp == NULL) - return (ENOMEM); + MALLOC(gsp, struct gss_seq *, sizeof(*gsp), M_TEMP, M_WAITOK | M_ZERO); + if (gsp == NULL) { + return ENOMEM; + } gsp->gss_seqnum = seqnum; SLIST_INSERT_HEAD(&req->r_gss_seqlist, gsp, gss_seqnext); } @@ -915,27 +940,30 @@ retry: nfsm_chain_add_32(error, nmc, cp->gss_clnt_service); nfsm_chain_add_32(error, nmc, cp->gss_clnt_handle_len); if (cp->gss_clnt_handle_len > 0) { - if (cp->gss_clnt_handle == NULL) - return (EBADRPC); + if (cp->gss_clnt_handle == NULL) { + return EBADRPC; + } nfsm_chain_add_opaque(error, nmc, cp->gss_clnt_handle, cp->gss_clnt_handle_len); } - if (error) - return(error); + if (error) { + return error; + } /* * Now add the verifier */ if (cp->gss_clnt_proc == RPCSEC_GSS_INIT || - cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) { + cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT) { /* * If the context is still being created * then use a null verifier. */ - nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); // flavor - nfsm_chain_add_32(error, nmc, 0); // length + nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); // flavor + nfsm_chain_add_32(error, nmc, 0); // length nfsm_chain_build_done(error, nmc); - if (!error) + if (!error) { nfs_gss_append_chain(nmc, args); - return (error); + } + return error; } offset = recordmark ? NFSX_UNSIGNED : 0; // record mark @@ -943,17 +971,18 @@ retry: major = gss_krb5_get_mic_mbuf((uint32_t *)&error, cp->gss_clnt_ctx_id, 0, nmc->nmc_mhead, offset, 0, &mic); if (major != GSS_S_COMPLETE) { - printf ("gss_krb5_get_mic_buf failed %d\n", error); - return (error); + printf("gss_krb5_get_mic_buf failed %d\n", error); + return error; } - nfsm_chain_add_32(error, nmc, RPCSEC_GSS); // flavor - nfsm_chain_add_32(error, nmc, mic.length); // length + nfsm_chain_add_32(error, nmc, RPCSEC_GSS); // flavor + nfsm_chain_add_32(error, nmc, mic.length); // length nfsm_chain_add_opaque(error, nmc, mic.value, mic.length); (void)gss_release_buffer(NULL, &mic); nfsm_chain_build_done(error, nmc); - if (error) - return (error); + if (error) { + return error; + } /* * Now we may have to compute integrity or encrypt the call args @@ -961,8 +990,9 @@ retry: */ switch (cp->gss_clnt_service) { case RPCSEC_GSS_SVC_NONE: - if (args) + if (args) { nfs_gss_append_chain(nmc, args); + } break; case RPCSEC_GSS_SVC_INTEGRITY: /* @@ -972,11 +1002,13 @@ retry: /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_integ_data_t */ assert(req->r_mrest == args); nfsm_chain_finish_mbuf(error, nmc); - if (error) - return (error); + if (error) { + return error; + } error = rpc_gss_integ_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen); - if (error) + if (error) { break; + } req->r_mrest = args; req->r_gss_argoff = nfsm_chain_offset(nmc); nfs_gss_append_chain(nmc, args); @@ -989,20 +1021,22 @@ retry: /* Note the mbufs that were used in r_mrest are being encapsulated in the rpc_gss_priv_data_t */ assert(req->r_mrest == args); nfsm_chain_finish_mbuf(error, nmc); - if (error) - return (error); + if (error) { + return error; + } error = rpc_gss_priv_data_create(cp->gss_clnt_ctx_id, &args, seqnum, &req->r_gss_arglen); - if (error) + if (error) { break; + } req->r_mrest = args; req->r_gss_argoff = nfsm_chain_offset(nmc); nfs_gss_append_chain(nmc, args); break; default: - return (EINVAL); + return EINVAL; } - return (error); + return error; } /* @@ -1035,8 +1069,9 @@ nfs_gss_clnt_verf_get( reslen = 0; *accepted_statusp = 0; - if (cp == NULL) - return (NFSERR_EAUTH); + if (cp == NULL) { + return NFSERR_EAUTH; + } /* * If it's not an RPCSEC_GSS verifier, then it has to * be a null verifier that resulted from either @@ -1045,14 +1080,17 @@ nfs_gss_clnt_verf_get( * context that resulted from a fallback to sec=sys. */ if (verftype != RPCSEC_GSS) { - if (verftype != RPCAUTH_NULL) - return (NFSERR_EAUTH); - if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) - return (NFSERR_EAUTH); - if (verflen > 0) + if (verftype != RPCAUTH_NULL) { + return NFSERR_EAUTH; + } + if (cp->gss_clnt_flags & GSS_CTX_COMPLETE) { + return NFSERR_EAUTH; + } + if (verflen > 0) { nfsm_chain_adv(error, nmc, nfsm_rndup(verflen)); + } nfsm_chain_get_32(error, nmc, *accepted_statusp); - return (error); + return error; } /* @@ -1066,19 +1104,22 @@ nfs_gss_clnt_verf_get( * the context is complete. */ if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) { - if (verflen > KRB5_MAX_MIC_SIZE) - return (EBADRPC); - MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO); - if (cp->gss_clnt_verf == NULL) - return (ENOMEM); + if (verflen > KRB5_MAX_MIC_SIZE) { + return EBADRPC; + } + MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK | M_ZERO); + if (cp->gss_clnt_verf == NULL) { + return ENOMEM; + } cp->gss_clnt_verflen = verflen; nfsm_chain_get_opaque(error, nmc, verflen, cp->gss_clnt_verf); nfsm_chain_get_32(error, nmc, *accepted_statusp); - return (error); + return error; } - if (verflen > KRB5_MAX_MIC_SIZE) - return (EBADRPC); + if (verflen > KRB5_MAX_MIC_SIZE) { + return EBADRPC; + } cksum.length = verflen; MALLOC(cksum.value, void *, verflen, M_TEMP, M_WAITOK); @@ -1103,19 +1144,22 @@ nfs_gss_clnt_verf_get( seqnum_buf.length = sizeof(network_seqnum); seqnum_buf.value = &network_seqnum; major = gss_krb5_verify_mic(NULL, cp->gss_clnt_ctx_id, &seqnum_buf, &cksum, NULL); - if (major == GSS_S_COMPLETE) + if (major == GSS_S_COMPLETE) { break; + } } FREE(cksum.value, M_TEMP); - if (gsp == NULL) - return (NFSERR_EAUTH); + if (gsp == NULL) { + return NFSERR_EAUTH; + } /* * Get the RPC accepted status */ nfsm_chain_get_32(error, nmc, *accepted_statusp); - if (*accepted_statusp != RPC_SUCCESS) - return (0); + if (*accepted_statusp != RPC_SUCCESS) { + return 0; + } /* * Now we may have to check integrity or decrypt the results @@ -1136,7 +1180,7 @@ nfs_gss_clnt_verf_get( * - checksum of seqnum + results */ - nfsm_chain_get_32(error, nmc, reslen); // length of results + nfsm_chain_get_32(error, nmc, reslen); // length of results if (reslen > NFS_MAXPACKET) { error = EBADRPC; goto nfsmout; @@ -1144,8 +1188,8 @@ nfs_gss_clnt_verf_get( /* Advance and fetch the mic */ nmc_tmp = *nmc; - nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results - nfsm_chain_get_32(error, &nmc_tmp, cksum.length); + nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results + nfsm_chain_get_32(error, &nmc_tmp, cksum.length); if (cksum.length > KRB5_MAX_MIC_SIZE) { error = EBADRPC; goto nfsmout; @@ -1175,8 +1219,9 @@ nfs_gss_clnt_verf_get( } #if 0 SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { - if (seqnum == gsp->gss_seqnum) + if (seqnum == gsp->gss_seqnum) { break; + } } if (gsp == NULL) { error = EBADRPC; @@ -1193,7 +1238,7 @@ nfs_gss_clnt_verf_get( * - wrap token */ prev_mbuf = nmc->nmc_mcur; - nfsm_chain_get_32(error, nmc, reslen); // length of results + nfsm_chain_get_32(error, nmc, reslen); // length of results if (reslen == 0 || reslen > NFS_MAXPACKET) { error = EBADRPC; goto nfsmout; @@ -1205,8 +1250,9 @@ nfs_gss_clnt_verf_get( /* split out the wrap token */ ressize = reslen; error = gss_normalize_mbuf(nmc->nmc_mcur, offset, &ressize, &results_mbuf, &pad_mbuf, 0); - if (error) + if (error) { goto nfsmout; + } if (pad_mbuf) { assert(nfsm_pad(reslen) == mbuf_len(pad_mbuf)); @@ -1237,8 +1283,9 @@ nfs_gss_clnt_verf_get( } #if 0 SLIST_FOREACH(gsp, &req->r_gss_seqlist, gss_seqnext) { - if (seqnum == gsp->gss_seqnum) + if (seqnum == gsp->gss_seqnum) { break; + } } if (gsp == NULL) { error = EBADRPC; @@ -1248,7 +1295,7 @@ nfs_gss_clnt_verf_get( break; } nfsmout: - return (error); + return error; } /* @@ -1271,30 +1318,35 @@ nfs_gss_clnt_args_restore(struct nfsreq *req) struct nfsm_chain mchain, *nmc = &mchain; int error = 0, merr; - if (cp == NULL) - return (NFSERR_EAUTH); + if (cp == NULL) { + return NFSERR_EAUTH; + } - if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0) - return (ENEEDAUTH); + if ((cp->gss_clnt_flags & GSS_CTX_COMPLETE) == 0) { + return ENEEDAUTH; + } /* Nothing to restore for SVC_NONE */ - if (cp->gss_clnt_service == RPCSEC_GSS_SVC_NONE) - return (0); + if (cp->gss_clnt_service == RPCSEC_GSS_SVC_NONE) { + return 0; + } - nfsm_chain_dissect_init(error, nmc, req->r_mhead); // start at RPC header - nfsm_chain_adv(error, nmc, req->r_gss_argoff); // advance to args - if (error) - return (error); + nfsm_chain_dissect_init(error, nmc, req->r_mhead); // start at RPC header + nfsm_chain_adv(error, nmc, req->r_gss_argoff); // advance to args + if (error) { + return error; + } - if (cp->gss_clnt_service == RPCSEC_GSS_SVC_INTEGRITY) + if (cp->gss_clnt_service == RPCSEC_GSS_SVC_INTEGRITY) { error = rpc_gss_integ_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen); - else + } else { error = rpc_gss_priv_data_restore(cp->gss_clnt_ctx_id, &req->r_mrest, req->r_gss_arglen); + } merr = mbuf_setnext(nmc->nmc_mcur, req->r_mrest); /* Should always succeed */ - assert (merr == 0); + assert(merr == 0); - return (error ? error : merr); + return error ? error : merr; } /* @@ -1332,9 +1384,9 @@ nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) cp->gss_clnt_proc = RPCSEC_GSS_INIT; cp->gss_clnt_service = - req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE : - req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY : - req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0; + req->r_auth == RPCAUTH_KRB5 ? RPCSEC_GSS_SVC_NONE : + req->r_auth == RPCAUTH_KRB5I ? RPCSEC_GSS_SVC_INTEGRITY : + req->r_auth == RPCAUTH_KRB5P ? RPCSEC_GSS_SVC_PRIVACY : 0; /* * Now loop around alternating gss_init_sec_context and @@ -1345,14 +1397,16 @@ nfs_gss_clnt_ctx_init(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) retry: /* Upcall to the gss_init_sec_context in the gssd */ error = nfs_gss_clnt_gssd_upcall(req, cp, retrycnt); - if (error) + if (error) { goto nfsmout; + } if (cp->gss_clnt_major == GSS_S_COMPLETE) { client_complete = 1; NFS_GSS_DBG("Client complete\n"); - if (server_complete) + if (server_complete) { break; + } } else if (cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { /* * We may have gotten here because the accept sec context @@ -1375,7 +1429,7 @@ retry: if (error) { if (error == ENEEDAUTH && (cp->gss_clnt_proc == RPCSEC_GSS_INIT || - cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT)) { + cp->gss_clnt_proc == RPCSEC_GSS_CONTINUE_INIT)) { /* * We got here because the server had a problem * trying to establish a context and sent that there @@ -1395,8 +1449,9 @@ retry: if (cp->gss_clnt_major == GSS_S_COMPLETE) { NFS_GSS_DBG("Server complete\n"); server_complete = 1; - if (client_complete) + if (client_complete) { break; + } } else if (cp->gss_clnt_major == GSS_S_CONTINUE_NEEDED) { cp->gss_clnt_proc = RPCSEC_GSS_CONTINUE_INIT; } else { @@ -1416,7 +1471,7 @@ retry: cp->gss_clnt_proc = RPCSEC_GSS_DATA; network_seqnum = htonl(cp->gss_clnt_seqwin); - window.length = sizeof (cp->gss_clnt_seqwin); + window.length = sizeof(cp->gss_clnt_seqwin); window.value = &network_seqnum; cksum.value = cp->gss_clnt_verf; cksum.length = cp->gss_clnt_verflen; @@ -1443,9 +1498,10 @@ retry: * are pending within the sequence number window. */ MALLOC(cp->gss_clnt_seqbits, uint32_t *, - nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO); - if (cp->gss_clnt_seqbits == NULL) + nfsm_rndup((cp->gss_clnt_seqwin + 7) / 8), M_TEMP, M_WAITOK | M_ZERO); + if (cp->gss_clnt_seqbits == NULL) { error = NFSERR_EAUTH; + } nfsmout: /* @@ -1455,7 +1511,7 @@ nfsmout: */ if (error == ENEEDAUTH) { NFS_GSS_DBG("Returning ENEEDAUTH\n"); - return (error); + return error; } /* @@ -1464,8 +1520,9 @@ nfsmout: * drops to zero. */ lck_mtx_lock(cp->gss_clnt_mtx); - if (error) + if (error) { cp->gss_clnt_flags |= GSS_CTX_INVAL; + } /* * Wake any threads waiting to use the context @@ -1478,7 +1535,7 @@ nfsmout: lck_mtx_unlock(cp->gss_clnt_mtx); NFS_GSS_DBG("Returning error = %d\n", error); - return (error); + return error; } /* @@ -1514,8 +1571,9 @@ nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) tsleep(NULL, PSOCK | slpflag, "nfs_gss_clnt_ctx_init_retry", hz); slpflag = 0; error = nfs_sigintr(req->r_nmp, req, current_thread(), 0); - if (error) + if (error) { goto bad; + } microuptime(&now); } @@ -1526,12 +1584,14 @@ nfs_gss_clnt_ctx_init_retry(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) goto bad; } timeo *= 2; - if (timeo > 60) + if (timeo > 60) { timeo = 60; + } } - if (error == 0) - return 0; // success + if (error == 0) { + return 0; // success + } bad: /* * Give up on this context @@ -1547,7 +1607,7 @@ bad: cp->gss_clnt_flags &= ~GSS_NEEDCTX; wakeup(cp); } - lck_mtx_unlock(cp->gss_clnt_mtx); + lck_mtx_unlock(cp->gss_clnt_mtx); return error; } @@ -1566,30 +1626,35 @@ nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) uint32_t major = cp->gss_clnt_major, minor = cp->gss_clnt_minor; int sz; - if (nfs_mount_gone(req->r_nmp)) - return (ENXIO); + if (nfs_mount_gone(req->r_nmp)) { + return ENXIO; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); sz = NFSX_UNSIGNED + nfsm_rndup(cp->gss_clnt_tokenlen); nfsm_chain_build_alloc_init(error, &nmreq, sz); nfsm_chain_add_32(error, &nmreq, cp->gss_clnt_tokenlen); - if (cp->gss_clnt_tokenlen > 0) + if (cp->gss_clnt_tokenlen > 0) { nfsm_chain_add_opaque(error, &nmreq, cp->gss_clnt_token, cp->gss_clnt_tokenlen); + } nfsm_chain_build_done(error, &nmreq); - if (error) + if (error) { goto nfsmout; + } /* Call the server */ - error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred, - (req->r_flags & R_OPTMASK), cp, &nmrep, &status); + error = nfs_request_gss(req->r_nmp->nm_mountp, &nmreq, req->r_thread, req->r_cred, + (req->r_flags & R_OPTMASK), cp, &nmrep, &status); if (cp->gss_clnt_token != NULL) { FREE(cp->gss_clnt_token, M_TEMP); cp->gss_clnt_token = NULL; } - if (!error) + if (!error) { error = status; - if (error) + } + if (error) { goto nfsmout; + } /* Get the server's reply */ @@ -1612,8 +1677,9 @@ nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_minor); nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_seqwin); nfsm_chain_get_32(error, &nmrep, cp->gss_clnt_tokenlen); - if (error) + if (error) { goto nfsmout; + } if (cp->gss_clnt_tokenlen > 0 && cp->gss_clnt_tokenlen < GSS_MAX_TOKEN_LEN) { MALLOC(cp->gss_clnt_token, u_char *, cp->gss_clnt_tokenlen, M_TEMP, M_WAITOK); if (cp->gss_clnt_token == NULL) { @@ -1630,17 +1696,15 @@ nfs_gss_clnt_ctx_callserver(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp) */ if (cp->gss_clnt_major != GSS_S_COMPLETE && cp->gss_clnt_major != GSS_S_CONTINUE_NEEDED) { - printf("nfs_gss_clnt_ctx_callserver: gss_clnt_major = %d\n", cp->gss_clnt_major); nfs_gss_clnt_log_error(req, cp, major, minor); - } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -1661,18 +1725,20 @@ nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len) char *svcname, *d, *server; int lindx, sindx; - if (nfs_mount_gone(nmp)) - return (NULL); + if (nfs_mount_gone(nmp)) { + return NULL; + } if (nmp->nm_sprinc) { *len = strlen(nmp->nm_sprinc) + 1; MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK); *nt = GSSD_HOSTBASED; - if (svcname == NULL) - return (NULL); + if (svcname == NULL) { + return NULL; + } strlcpy(svcname, nmp->nm_sprinc, *len); - return ((uint8_t *)svcname); + return (uint8_t *)svcname; } *nt = GSSD_HOSTBASED; @@ -1688,7 +1754,7 @@ nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len) d = strchr(server, ':'); *len = (uint32_t)(d ? (d - server) : strlen(server)); } - + *len += 5; /* "nfs@" plus null */ MALLOC(svcname, char *, *len, M_TEMP, M_WAITOK); strlcpy(svcname, "nfs", *len); @@ -1696,7 +1762,7 @@ nfs_gss_clnt_svcname(struct nfsmount *nmp, gssd_nametype *nt, uint32_t *len) strlcat(svcname, server, *len); NFS_GSS_DBG("nfs svcname = %s\n", svcname); - return ((uint8_t *)svcname); + return (uint8_t *)svcname; } /* @@ -1717,30 +1783,32 @@ nfs_gss_clnt_get_upcall_port(kauth_cred_t credp) kr = host_get_gssd_port(host_priv_self(), &gssd_host_port); if (kr != KERN_SUCCESS) { printf("nfs_gss_get_upcall_port: can't get gssd port, status %x (%d)\n", kr, kr); - return (IPC_PORT_NULL); + return IPC_PORT_NULL; } if (!IPC_PORT_VALID(gssd_host_port)) { printf("nfs_gss_get_upcall_port: gssd port not valid\n"); - return (IPC_PORT_NULL); + return IPC_PORT_NULL; } asid = kauth_cred_getasid(credp); uid = kauth_cred_getauid(credp); - if (uid == AU_DEFAUDITID) + if (uid == AU_DEFAUDITID) { uid = kauth_cred_getuid(credp); + } kr = mach_gss_lookup(gssd_host_port, uid, asid, &uc_port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { printf("nfs_gss_clnt_get_upcall_port: mach_gssd_lookup failed: status %x (%d)\n", kr, kr); + } host_release_special_port(gssd_host_port); - return (uc_port); + return uc_port; } static void nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t major, uint32_t minor) { -#define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) +#define GETMAJERROR(x) (((x) >> GSS_C_ROUTINE_ERROR_OFFSET) & GSS_C_ROUTINE_ERROR_MASK) struct nfsmount *nmp = req->r_nmp; char who[] = "client"; uint32_t gss_error = GETMAJERROR(cp->gss_clnt_major); @@ -1751,23 +1819,25 @@ nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t if (req->r_thread) { proc = (proc_t)get_bsdthreadtask_info(req->r_thread); - if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) + if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) { proc = NULL; + } if (proc) { - if (*proc->p_comm) + if (*proc->p_comm) { procn = proc->p_comm; + } pid = proc->p_pid; } } else { procn = "kernproc"; pid = 0; } - + microuptime(&now); if ((cp->gss_clnt_major != major || cp->gss_clnt_minor != minor || - cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) && + cp->gss_clnt_ptime + GSS_PRINT_DELAY < now.tv_sec) && (nmp->nm_state & NFSSTA_MOUNTED)) { - /* + /* * Will let gssd do some logging in hopes that it can translate * the minor code. */ @@ -1786,21 +1856,21 @@ nfs_gss_clnt_log_error(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32_t *%%% It would be really nice to get the terminal from the proc or auditinfo_addr struct and print that here. */ printf("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n", - cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), - procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); + cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), + procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); cp->gss_clnt_ptime = now.tv_sec; switch (gss_error) { case 7: printf("NFS: gssd does not have credentials for session %d/%d, (kinit)?\n", - kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); + kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); break; case 11: printf("NFS: gssd has expired credentals for session %d/%d, (kinit)?\n", - kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); + kauth_cred_getasid(req->r_cred), kauth_cred_getauid(req->r_cred)); break; } } else { NFS_GSS_DBG("NFS: gssd auth failure by %s on audit session %d uid %d proc %s/%d for mount %s. Error: major = %d minor = %d\n", - cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), - procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); + cp->gss_clnt_display ? cp->gss_clnt_display : who, kauth_cred_getasid(req->r_cred), kauth_cred_getuid(req->r_cred), + procn, pid, vfs_statfs(nmp->nm_mountp)->f_mntfromname, gss_error, (int32_t)cp->gss_clnt_minor); } } @@ -1835,12 +1905,14 @@ nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32 uint32_t selected = (uint32_t)-1; struct nfs_etype etype; - if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) - return (ENXIO); + if (nmp == NULL || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { + return ENXIO; + } if (cp->gss_clnt_gssd_flags & GSSD_RESTART) { - if (cp->gss_clnt_token) + if (cp->gss_clnt_token) { FREE(cp->gss_clnt_token, M_TEMP); + } cp->gss_clnt_token = NULL; cp->gss_clnt_tokenlen = 0; cp->gss_clnt_proc = RPCSEC_GSS_INIT; @@ -1853,8 +1925,9 @@ nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32 } NFS_GSS_DBG("Retrycnt = %d nm_etype.count = %d\n", retrycnt, nmp->nm_etype.count); - if (retrycnt >= nmp->nm_etype.count) - return (EACCES); + if (retrycnt >= nmp->nm_etype.count) { + return EACCES; + } /* Copy the mount etypes to an order set of etypes to try */ etype = nmp->nm_etype; @@ -1866,15 +1939,18 @@ nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32 */ if (etype.selected < etype.count) { etype.etypes[0] = nmp->nm_etype.etypes[etype.selected]; - for (uint32_t i = 0; i < etype.selected; i++) - etype.etypes[i+1] = nmp->nm_etype.etypes[i]; - for (uint32_t i = etype.selected + 1; i < etype.count; i++) + for (uint32_t i = 0; i < etype.selected; i++) { + etype.etypes[i + 1] = nmp->nm_etype.etypes[i]; + } + for (uint32_t i = etype.selected + 1; i < etype.count; i++) { etype.etypes[i] = nmp->nm_etype.etypes[i]; + } } /* Remove the ones we've already have tried */ - for (uint32_t i = retrycnt; i < etype.count; i++) + for (uint32_t i = retrycnt; i < etype.count; i++) { etype.etypes[i - retrycnt] = etype.etypes[i]; + } etype.count = etype.count - retrycnt; NFS_GSS_DBG("etype count = %d preferred etype = %d\n", etype.count, etype.etypes[0]); @@ -1897,8 +1973,7 @@ nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32 plen = (uint32_t)strlen(nmp->nm_principal); principal = (uint8_t *)nmp->nm_principal; cp->gss_clnt_prinnt = nt = GSSD_USER; - } - else if (nmp->nm_realm) { + } else if (nmp->nm_realm) { plen = (uint32_t)strlen(nmp->nm_realm); principal = (uint8_t *)nmp->nm_realm; nt = GSSD_USER; @@ -1906,16 +1981,20 @@ nfs_gss_clnt_gssd_upcall(struct nfsreq *req, struct nfs_gss_clnt_ctx *cp, uint32 if (!IPC_PORT_VALID(cp->gss_clnt_mport)) { cp->gss_clnt_mport = nfs_gss_clnt_get_upcall_port(req->r_cred); - if (cp->gss_clnt_mport == IPC_PORT_NULL) + if (cp->gss_clnt_mport == IPC_PORT_NULL) { goto out; + } } - if (plen) + if (plen) { nfs_gss_mach_alloc_buffer(principal, plen, &pname); - if (cp->gss_clnt_svcnamlen) + } + if (cp->gss_clnt_svcnamlen) { nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname); - if (cp->gss_clnt_tokenlen) + } + if (cp->gss_clnt_tokenlen) { nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken); + } /* Always want to export the lucid context */ cp->gss_clnt_gssd_flags |= GSSD_LUCID_CONTEXT; @@ -1936,7 +2015,7 @@ retry: &cp->gss_clnt_context, &cp->gss_clnt_cred_handle, &ret_flags, - &octx, (mach_msg_type_number_t *) &lucidlen, + &octx, (mach_msg_type_number_t *) &lucidlen, &otoken, &otokenlen, cp->gss_clnt_display ? NULL : display_name, &cp->gss_clnt_major, @@ -1953,14 +2032,17 @@ retry: if (kr != KERN_SUCCESS) { printf("nfs_gss_clnt_gssd_upcall: mach_gss_init_sec_context failed: %x (%d)\n", kr, kr); if (kr == MIG_SERVER_DIED && cp->gss_clnt_cred_handle == 0 && - retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES && - !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) { - if (plen) + retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES && + !vfs_isforce(nmp->nm_mountp) && (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) == 0) { + if (plen) { nfs_gss_mach_alloc_buffer(principal, plen, &pname); - if (cp->gss_clnt_svcnamlen) + } + if (cp->gss_clnt_svcnamlen) { nfs_gss_mach_alloc_buffer(cp->gss_clnt_svcname, cp->gss_clnt_svcnamlen, &svcname); - if (cp->gss_clnt_tokenlen > 0) + } + if (cp->gss_clnt_tokenlen > 0) { nfs_gss_mach_alloc_buffer(cp->gss_clnt_token, cp->gss_clnt_tokenlen, &itoken); + } goto retry; } @@ -1974,9 +2056,10 @@ retry: if (dlen < MAX_DISPLAY_STR) { MALLOC(cp->gss_clnt_display, char *, dlen, M_TEMP, M_WAITOK); - if (cp->gss_clnt_display == NULL) + if (cp->gss_clnt_display == NULL) { goto skip; - bcopy(display_name, cp->gss_clnt_display, dlen); + } + bcopy(display_name, cp->gss_clnt_display, dlen); } else { goto skip; } @@ -2013,8 +2096,9 @@ skip: goto out; } - if (cp->gss_clnt_ctx_id) + if (cp->gss_clnt_ctx_id) { gss_krb5_destroy_context(cp->gss_clnt_ctx_id); + } cp->gss_clnt_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen); if (cp->gss_clnt_ctx_id == NULL) { printf("Failed to make context from lucid_ctx_buffer\n"); @@ -2029,8 +2113,9 @@ skip: } /* Free context token used as input */ - if (cp->gss_clnt_token) + if (cp->gss_clnt_token) { FREE(cp->gss_clnt_token, M_TEMP); + } cp->gss_clnt_token = NULL; cp->gss_clnt_tokenlen = 0; @@ -2040,14 +2125,14 @@ skip: if (cp->gss_clnt_token == NULL) { printf("nfs_gss_clnt_gssd_upcall: could not allocate %d bytes\n", otokenlen); vm_map_copy_discard((vm_map_copy_t) otoken); - return (ENOMEM); + return ENOMEM; } error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_clnt_token); if (error) { printf("Could not copyout gss token\n"); FREE(cp->gss_clnt_token, M_TEMP); cp->gss_clnt_token = NULL; - return (NFSERR_EAUTH); + return NFSERR_EAUTH; } cp->gss_clnt_tokenlen = otokenlen; } @@ -2057,11 +2142,12 @@ skip: NFS_GSS_DBG("etype selected = %d\n", nmp->nm_etype.etypes[selected]); } NFS_GSS_DBG("Up call succeeded major = %d\n", cp->gss_clnt_major); - return (0); + return 0; out: - if (cp->gss_clnt_token) + if (cp->gss_clnt_token) { FREE(cp->gss_clnt_token, M_TEMP); + } cp->gss_clnt_token = NULL; cp->gss_clnt_tokenlen = 0; /* Server's handle isn't valid. Don't reuse */ @@ -2070,9 +2156,9 @@ out: FREE(cp->gss_clnt_handle, M_TEMP); cp->gss_clnt_handle = NULL; } - + NFS_GSS_DBG("Up call returned NFSERR_EAUTH"); - return (NFSERR_EAUTH); + return NFSERR_EAUTH; } /* @@ -2096,8 +2182,9 @@ nfs_gss_clnt_rpcdone(struct nfsreq *req) struct gss_seq *gsp, *ngsp; int i = 0; - if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) - return; // no context - don't bother + if (cp == NULL || !(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) { + return; // no context - don't bother + } /* * Reset the bit for this request in the * sequence number window to indicate it's done. @@ -2105,9 +2192,10 @@ nfs_gss_clnt_rpcdone(struct nfsreq *req) */ lck_mtx_lock(cp->gss_clnt_mtx); gsp = SLIST_FIRST(&req->r_gss_seqlist); - if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin)) + if (gsp && gsp->gss_seqnum > (cp->gss_clnt_seqnum - cp->gss_clnt_seqwin)) { win_resetbit(cp->gss_clnt_seqbits, - gsp->gss_seqnum % cp->gss_clnt_seqwin); + gsp->gss_seqnum % cp->gss_clnt_seqwin); + } /* * Limit the seqnum list to GSS_CLNT_SEQLISTMAX entries @@ -2160,14 +2248,16 @@ nfs_gss_clnt_ctx_unref(struct nfsreq *req) struct timeval now; char CTXBUF[NFS_CTXBUFSZ]; - if (cp == NULL) + if (cp == NULL) { return; + } req->r_gss_ctx = NULL; lck_mtx_lock(cp->gss_clnt_mtx); - if (--cp->gss_clnt_refcnt < 0) + if (--cp->gss_clnt_refcnt < 0) { panic("Over release of gss context!\n"); + } if (cp->gss_clnt_refcnt == 0) { if ((cp->gss_clnt_flags & GSS_CTX_INVAL) && @@ -2177,10 +2267,12 @@ nfs_gss_clnt_ctx_unref(struct nfsreq *req) } if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { destroy = 1; - if (cp->gss_clnt_flags & GSS_CTX_STICKY) + if (cp->gss_clnt_flags & GSS_CTX_STICKY) { nfs_gss_clnt_mnt_rele(nmp); - if (cp->gss_clnt_nctime) + } + if (cp->gss_clnt_nctime) { on_neg_cache = 1; + } } } if (!destroy && cp->gss_clnt_nctime == 0 && @@ -2236,14 +2328,17 @@ nfs_gss_clnt_ctx_neg_cache_reap(struct nfsmount *nmp) /* Don't reap STICKY contexts */ if ((cp->gss_clnt_flags & GSS_CTX_STICKY) || - !(cp->gss_clnt_flags & GSS_CTX_INVAL)) + !(cp->gss_clnt_flags & GSS_CTX_INVAL)) { continue; + } /* Keep up to GSS_MAX_NEG_CACHE_ENTRIES */ - if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES) + if (nmp->nm_ncentries <= GSS_MAX_NEG_CACHE_ENTRIES) { break; + } /* Contexts too young */ - if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec) + if (cp->gss_clnt_nctime + GSS_NEG_CACHE_TO >= now.tv_sec) { continue; + } /* Not referenced, remove it. */ lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_refcnt == 0) { @@ -2336,10 +2431,11 @@ nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dc struct nfs_gss_clnt_ctx *dcp; *dcpp = (struct nfs_gss_clnt_ctx *)NULL; - MALLOC(dcp, struct nfs_gss_clnt_ctx *, sizeof (struct nfs_gss_clnt_ctx), M_TEMP, M_WAITOK); - if (dcp == NULL) - return (ENOMEM); - bzero(dcp, sizeof (struct nfs_gss_clnt_ctx)); + MALLOC(dcp, struct nfs_gss_clnt_ctx *, sizeof(struct nfs_gss_clnt_ctx), M_TEMP, M_WAITOK); + if (dcp == NULL) { + return ENOMEM; + } + bzero(dcp, sizeof(struct nfs_gss_clnt_ctx)); dcp->gss_clnt_mtx = lck_mtx_alloc_init(nfs_gss_clnt_grp, LCK_ATTR_NULL); dcp->gss_clnt_cred = scp->gss_clnt_cred; kauth_cred_ref(dcp->gss_clnt_cred); @@ -2349,7 +2445,7 @@ nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dc MALLOC(dcp->gss_clnt_principal, uint8_t *, dcp->gss_clnt_prinlen, M_TEMP, M_WAITOK | M_ZERO); if (dcp->gss_clnt_principal == NULL) { FREE(dcp, M_TEMP); - return (ENOMEM); + return ENOMEM; } bcopy(scp->gss_clnt_principal, dcp->gss_clnt_principal, dcp->gss_clnt_prinlen); } @@ -2364,7 +2460,7 @@ nfs_gss_clnt_ctx_copy(struct nfs_gss_clnt_ctx *scp, struct nfs_gss_clnt_ctx **dc *dcpp = dcp; - return (0); + return 0; } /* @@ -2374,8 +2470,8 @@ static void nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp) { NFS_GSS_DBG("Destroying context %d/%d\n", - kauth_cred_getasid(cp->gss_clnt_cred), - kauth_cred_getauid(cp->gss_clnt_cred)); + kauth_cred_getasid(cp->gss_clnt_cred), + kauth_cred_getauid(cp->gss_clnt_cred)); host_release_special_port(cp->gss_clnt_mport); cp->gss_clnt_mport = IPC_PORT_NULL; @@ -2384,8 +2480,9 @@ nfs_gss_clnt_ctx_destroy(struct nfs_gss_clnt_ctx *cp) lck_mtx_destroy(cp->gss_clnt_mtx, nfs_gss_clnt_grp); cp->gss_clnt_mtx = (lck_mtx_t *)NULL; } - if (IS_VALID_CRED(cp->gss_clnt_cred)) + if (IS_VALID_CRED(cp->gss_clnt_cred)) { kauth_cred_unref(&cp->gss_clnt_cred); + } cp->gss_clnt_entries.tqe_next = NFSNOLIST; cp->gss_clnt_entries.tqe_prev = NFSNOLIST; if (cp->gss_clnt_principal) { @@ -2420,18 +2517,20 @@ nfs_gss_clnt_ctx_renew(struct nfsreq *req) int error = 0; char CTXBUF[NFS_CTXBUFSZ]; - if (cp == NULL) - return (0); + if (cp == NULL) { + return 0; + } - if (req->r_nmp == NULL) - return (ENXIO); + if (req->r_nmp == NULL) { + return ENXIO; + } nmp = req->r_nmp; lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_flags & GSS_CTX_INVAL) { lck_mtx_unlock(cp->gss_clnt_mtx); nfs_gss_clnt_ctx_unref(req); - return (0); // already being renewed + return 0; // already being renewed } cp->gss_clnt_flags |= (GSS_CTX_INVAL | GSS_CTX_DESTROY); @@ -2442,19 +2541,22 @@ nfs_gss_clnt_ctx_renew(struct nfsreq *req) } lck_mtx_unlock(cp->gss_clnt_mtx); - if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) - return (EACCES); /* Destroying a context is best effort. Don't renew. */ + if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) { + return EACCES; /* Destroying a context is best effort. Don't renew. */ + } /* * If we're setting up a context let nfs_gss_clnt_ctx_init know this is not working * and to try some other etype. */ - if (cp->gss_clnt_proc != RPCSEC_GSS_DATA) - return (ENEEDAUTH); + if (cp->gss_clnt_proc != RPCSEC_GSS_DATA) { + return ENEEDAUTH; + } error = nfs_gss_clnt_ctx_copy(cp, &ncp); NFS_GSS_DBG("Renewing context %s\n", NFS_GSS_CTX(req, ncp)); nfs_gss_clnt_ctx_unref(req); - if (error) - return (error); + if (error) { + return error; + } lck_mtx_lock(&nmp->nm_lock); /* @@ -2467,10 +2569,11 @@ nfs_gss_clnt_ctx_renew(struct nfsreq *req) lck_mtx_unlock(&nmp->nm_lock); error = nfs_gss_clnt_ctx_init_retry(req, ncp); // Initialize new context - if (error) + if (error) { nfs_gss_clnt_ctx_unref(req); + } - return (error); + return error; } @@ -2487,12 +2590,13 @@ nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) struct nfsreq req; req.r_nmp = nmp; - if (!nmp) + if (!nmp) { return; + } lck_mtx_lock(&nmp->nm_lock); - while((cp = TAILQ_FIRST(&nmp->nm_gsscl))) { + while ((cp = TAILQ_FIRST(&nmp->nm_gsscl))) { TAILQ_REMOVE(&nmp->nm_gsscl, cp, gss_clnt_entries); cp->gss_clnt_entries.tqe_next = NFSNOLIST; lck_mtx_lock(cp->gss_clnt_mtx); @@ -2518,9 +2622,10 @@ nfs_gss_clnt_ctx_unmount(struct nfsmount *nmp) nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, 0); nfsm_chain_build_done(error, &nmreq); - if (!error) + if (!error) { nfs_request_gss(nmp->nm_mountp, &nmreq, - current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status); + current_thread(), cp->gss_clnt_cred, 0, cp, &nmrep, &status); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); } @@ -2560,9 +2665,9 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) if (nfs_gss_clnt_ctx_cred_match(cp->gss_clnt_cred, cred)) { if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { NFS_GSS_DBG("Found destroyed context %d/%d. refcnt = %d continuing\n", - kauth_cred_getasid(cp->gss_clnt_cred), - kauth_cred_getauid(cp->gss_clnt_cred), - cp->gss_clnt_refcnt); + kauth_cred_getasid(cp->gss_clnt_cred), + kauth_cred_getauid(cp->gss_clnt_cred), + cp->gss_clnt_refcnt); lck_mtx_unlock(cp->gss_clnt_mtx); continue; } @@ -2576,19 +2681,19 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) * refcount is zero. */ NFS_GSS_DBG("Removed context %d/%d refcnt = %d\n", - kauth_cred_getasid(cp->gss_clnt_cred), - kauth_cred_getuid(cp->gss_clnt_cred), - cp->gss_clnt_refcnt); + kauth_cred_getasid(cp->gss_clnt_cred), + kauth_cred_getuid(cp->gss_clnt_cred), + cp->gss_clnt_refcnt); nfs_gss_clnt_ctx_unref(&req); - return (0); + return 0; } lck_mtx_unlock(cp->gss_clnt_mtx); } lck_mtx_unlock(&nmp->nm_lock); - + NFS_GSS_DBG("Returning ENOENT\n"); - return (ENOENT); + return ENOENT; } /* @@ -2596,8 +2701,7 @@ nfs_gss_clnt_ctx_remove(struct nfsmount *nmp, kauth_cred_t cred) */ int nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, - uint8_t *principal, uint32_t princlen, uint32_t nametype) - + uint8_t *principal, uint32_t princlen, uint32_t nametype) { struct nfsreq req; int error; @@ -2618,13 +2722,14 @@ nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, * neagative cache and if and when the user has credentials for the principal * we should be good to go in that we will select those credentials for this principal. */ - if (error == EACCES || error == EAUTH || error == ENEEDAUTH) + if (error == EACCES || error == EAUTH || error == ENEEDAUTH) { error = 0; + } /* We're done with this request */ nfs_gss_clnt_ctx_unref(&req); - return (error); + return error; } /* @@ -2632,7 +2737,7 @@ nfs_gss_clnt_ctx_set_principal(struct nfsmount *nmp, vfs_context_t ctx, */ int nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx, - struct user_nfs_gss_principal *p) + struct user_nfs_gss_principal *p) { struct nfsreq req; int error = 0; @@ -2653,8 +2758,8 @@ nfs_gss_clnt_ctx_get_principal(struct nfsmount *nmp, vfs_context_t ctx, lck_mtx_lock(cp->gss_clnt_mtx); if (cp->gss_clnt_flags & GSS_CTX_DESTROY) { NFS_GSS_DBG("Found destroyed context %s refcnt = %d continuing\n", - NFS_GSS_CTX(&req, cp), - cp->gss_clnt_refcnt); + NFS_GSS_CTX(&req, cp), + cp->gss_clnt_refcnt); lck_mtx_unlock(cp->gss_clnt_mtx); continue; } @@ -2671,13 +2776,14 @@ out: lck_mtx_unlock(&nmp->nm_lock); p->flags |= NFS_IOC_NO_CRED_FLAG; /* No credentials, valid or invalid on this mount */ NFS_GSS_DBG("No context found for session %d by uid %d\n", - kauth_cred_getasid(cred), kauth_cred_getuid(cred)); - return (0); + kauth_cred_getasid(cred), kauth_cred_getuid(cred)); + return 0; } /* Indicate if the cred is INVALID */ - if (cp->gss_clnt_flags & GSS_CTX_INVAL) + if (cp->gss_clnt_flags & GSS_CTX_INVAL) { p->flags |= NFS_IOC_INVALID_CRED_FLAG; + } /* We have set a principal on the mount */ if (cp->gss_clnt_principal) { @@ -2708,7 +2814,7 @@ out: req.r_gss_ctx = cp; NFS_GSS_DBG("Found context %s\n", NFS_GSS_CTX(&req, NULL)); nfs_gss_clnt_ctx_unref(&req); - return (error); + return error; } #endif /* NFSCLIENT */ @@ -2730,9 +2836,10 @@ nfs_gss_svc_ctx_find(uint32_t handle) struct nfs_gss_svc_ctx *cp; uint64_t timenow; - if (handle == 0) - return (NULL); - + if (handle == 0) { + return NULL; + } + head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(handle)]; /* * Don't return a context that is going to expire in GSS_CTX_PEND seconds @@ -2744,14 +2851,14 @@ nfs_gss_svc_ctx_find(uint32_t handle) LIST_FOREACH(cp, head, gss_svc_entries) { if (cp->gss_svc_handle == handle) { if (timenow > cp->gss_svc_incarnation + GSS_SVC_CTX_TTL) { - /* + /* * Context has or is about to expire. Don't use. * We'll return null and the client will have to create * a new context. */ cp->gss_svc_handle = 0; /* - * Make sure though that we stay around for GSS_CTX_PEND seconds + * Make sure though that we stay around for GSS_CTX_PEND seconds * for other threads that might be using the context. */ cp->gss_svc_incarnation = timenow; @@ -2759,16 +2866,16 @@ nfs_gss_svc_ctx_find(uint32_t handle) cp = NULL; break; } - lck_mtx_lock(cp->gss_svc_mtx); + lck_mtx_lock(cp->gss_svc_mtx); cp->gss_svc_refcnt++; - lck_mtx_unlock(cp->gss_svc_mtx); + lck_mtx_unlock(cp->gss_svc_mtx); break; } } lck_mtx_unlock(nfs_gss_svc_ctx_mutex); - return (cp); + return cp; } /* @@ -2780,7 +2887,7 @@ nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp) { struct nfs_gss_svc_ctx_hashhead *head; struct nfs_gss_svc_ctx *p; - + lck_mtx_lock(nfs_gss_svc_ctx_mutex); /* @@ -2790,15 +2897,17 @@ nfs_gss_svc_ctx_insert(struct nfs_gss_svc_ctx *cp) */ retry: cp->gss_svc_handle = random(); - if (cp->gss_svc_handle == 0) + if (cp->gss_svc_handle == 0) { goto retry; + } head = &nfs_gss_svc_ctx_hashtbl[SVC_CTX_HASH(cp->gss_svc_handle)]; LIST_FOREACH(p, head, gss_svc_entries) - if (p->gss_svc_handle == cp->gss_svc_handle) - goto retry; + if (p->gss_svc_handle == cp->gss_svc_handle) { + goto retry; + } clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, - &cp->gss_svc_incarnation); + &cp->gss_svc_incarnation); LIST_INSERT_HEAD(head, cp, gss_svc_entries); nfs_gss_ctx_count++; @@ -2806,7 +2915,7 @@ retry: nfs_gss_timer_on = 1; nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call, - min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); + min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); } lck_mtx_unlock(nfs_gss_svc_ctx_mutex); @@ -2840,16 +2949,17 @@ nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2) */ LIST_FOREACH_SAFE(cp, &nfs_gss_svc_ctx_hashtbl[i], gss_svc_entries, next) { contexts++; - if (timenow > cp->gss_svc_incarnation + - (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0) - && cp->gss_svc_refcnt == 0) { + if (timenow > cp->gss_svc_incarnation + + (cp->gss_svc_handle ? GSS_SVC_CTX_TTL : 0) + && cp->gss_svc_refcnt == 0) { /* * A stale context - remove it */ LIST_REMOVE(cp, gss_svc_entries); NFS_GSS_DBG("Removing contex for %d\n", cp->gss_svc_uid); - if (cp->gss_svc_seqbits) + if (cp->gss_svc_seqbits) { FREE(cp->gss_svc_seqbits, M_TEMP); + } lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); FREE(cp, M_TEMP); contexts--; @@ -2864,9 +2974,10 @@ nfs_gss_svc_ctx_timer(__unused void *param1, __unused void *param2) * set up another callout to check on them later. */ nfs_gss_timer_on = nfs_gss_ctx_count > 0; - if (nfs_gss_timer_on) + if (nfs_gss_timer_on) { nfs_interval_timer_start(nfs_gss_svc_ctx_timer_call, - min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); + min(GSS_TIMER_PERIOD, max(GSS_CTX_TTL_MIN, nfsrv_gss_context_ttl)) * MSECS_PER_SEC); + } lck_mtx_unlock(nfs_gss_svc_ctx_mutex); } @@ -2909,8 +3020,9 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) nfsm_chain_get_32(error, nmc, seqnum); nfsm_chain_get_32(error, nmc, service); nfsm_chain_get_32(error, nmc, handle_len); - if (error) + if (error) { goto nfsmout; + } /* * Make sure context setup/destroy is being done with a nullproc @@ -2930,10 +3042,10 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) goto nfsmout; } - nd->nd_sec = - service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 : - service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I : - service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0; + nd->nd_sec = + service == RPCSEC_GSS_SVC_NONE ? RPCAUTH_KRB5 : + service == RPCSEC_GSS_SVC_INTEGRITY ? RPCAUTH_KRB5I : + service == RPCSEC_GSS_SVC_PRIVACY ? RPCAUTH_KRB5P : 0; if (proc == RPCSEC_GSS_INIT) { /* @@ -2947,7 +3059,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) /* * Set up a new context */ - MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(cp, struct nfs_gss_svc_ctx *, sizeof(*cp), M_TEMP, M_WAITOK | M_ZERO); if (cp == NULL) { error = ENOMEM; goto nfsmout; @@ -2963,8 +3075,9 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) goto nfsmout; } nfsm_chain_get_32(error, nmc, handle); - if (error) + if (error) { goto nfsmout; + } cp = nfs_gss_svc_ctx_find(handle); if (cp == NULL) { error = NFSERR_AUTHERR | RPCSEC_GSS_CTXPROBLEM; @@ -2989,7 +3102,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) /* * Sequence number is bad */ - error = EINVAL; // drop the request + error = EINVAL; // drop the request goto nfsmout; } @@ -3004,16 +3117,18 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) header_len = nfsm_chain_offset(nmc); nfsm_chain_get_32(error, nmc, flavor); nfsm_chain_get_32(error, nmc, cksum.length); - if (error) + if (error) { goto nfsmout; - if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) + } + if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE) { error = NFSERR_AUTHERR | AUTH_BADVERF; - else { + } else { MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value); } - if (error) + if (error) { goto nfsmout; + } /* Now verify the client's call header checksum */ major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, nmc->nmc_mhead, 0, header_len, &cksum, NULL); @@ -3032,7 +3147,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_uid = cp->gss_svc_uid; bcopy(cp->gss_svc_gids, temp_pcred.cr_groups, - sizeof(gid_t) * cp->gss_svc_ngroups); + sizeof(gid_t) * cp->gss_svc_ngroups); temp_pcred.cr_ngroups = cp->gss_svc_ngroups; nd->nd_cr = posix_cred_create(&temp_pcred); @@ -3060,7 +3175,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) * - length of checksum token * - checksum of seqnum + call args */ - nfsm_chain_get_32(error, nmc, arglen); // length of args + nfsm_chain_get_32(error, nmc, arglen); // length of args if (arglen > NFS_MAXPACKET) { error = EBADRPC; goto nfsmout; @@ -3070,8 +3185,9 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) nfsm_chain_adv(error, &nmc_tmp, arglen); nfsm_chain_get_32(error, &nmc_tmp, cksum.length); cksum.value = NULL; - if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) + if (cksum.length > 0 && cksum.length < GSS_MAX_MIC_LEN) { MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK); + } if (cksum.value == NULL) { error = EBADRPC; @@ -3083,7 +3199,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) start = nfsm_chain_offset(nmc); major = gss_krb5_verify_mic_mbuf((uint32_t *)&error, cp->gss_svc_ctx_id, - nmc->nmc_mhead, start, arglen, &cksum, NULL); + nmc->nmc_mhead, start, arglen, &cksum, NULL); FREE(cksum.value, M_TEMP); if (major != GSS_S_COMPLETE) { printf("Server args: gss_krb5_verify_mic_mbuf failed %d\n", error); @@ -3098,7 +3214,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) */ nfsm_chain_get_32(error, nmc, seqnum); if (seqnum != nd->nd_gss_seqnum) { - error = EBADRPC; // returns as GARBAGEARGS + error = EBADRPC; // returns as GARBAGEARGS goto nfsmout; } break; @@ -3110,7 +3226,7 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) * - wrap token (37-40 bytes) */ prev_mbuf = nmc->nmc_mcur; - nfsm_chain_get_32(error, nmc, arglen); // length of args + nfsm_chain_get_32(error, nmc, arglen); // length of args if (arglen > NFS_MAXPACKET) { error = EBADRPC; goto nfsmout; @@ -3122,8 +3238,9 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) /* split out the wrap token */ argsize = arglen; error = gss_normalize_mbuf(nmc->nmc_mcur, start, &argsize, &reply_mbuf, &pad_mbuf, 0); - if (error) + if (error) { goto nfsmout; + } assert(argsize == arglen); if (pad_mbuf) { @@ -3160,10 +3277,10 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) nfsm_chain_get_32(error, nmc, seqnum); if (seqnum != nd->nd_gss_seqnum) { printf("%s: Sequence number mismatch seqnum = %d nd->nd_gss_seqnum = %d\n", - __func__, seqnum, nd->nd_gss_seqnum); + __func__, seqnum, nd->nd_gss_seqnum); printmbuf("reply_mbuf", nmc->nmc_mhead, 0, 0); printf("reply_mbuf %p nmc_head %p\n", reply_mbuf, nmc->nmc_mhead); - error = EBADRPC; // returns as GARBAGEARGS + error = EBADRPC; // returns as GARBAGEARGS goto nfsmout; } break; @@ -3176,8 +3293,9 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) */ nfsm_chain_get_32(error, nmc, flavor); nfsm_chain_get_32(error, nmc, verflen); - if (error || flavor != RPCAUTH_NULL || verflen > 0) + if (error || flavor != RPCAUTH_NULL || verflen > 0) { error = NFSERR_AUTHERR | RPCSEC_GSS_CREDPROBLEM; + } if (error) { if (proc == RPCSEC_GSS_INIT) { lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); @@ -3191,9 +3309,10 @@ nfs_gss_svc_cred_get(struct nfsrv_descript *nd, struct nfsm_chain *nmc) nd->nd_gss_context = cp; return 0; nfsmout: - if (cp) + if (cp) { nfs_gss_svc_ctx_deref(cp); - return (error); + } + return error; } /* @@ -3219,7 +3338,7 @@ nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc) */ nfsm_chain_add_32(error, nmc, RPCAUTH_NULL); nfsm_chain_add_32(error, nmc, 0); - return (error); + return error; } /* @@ -3230,15 +3349,17 @@ nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc) */ seqbuf.length = NFSX_UNSIGNED; if (cp->gss_svc_proc == RPCSEC_GSS_INIT || - cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) + cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { network_seqnum = htonl(cp->gss_svc_seqwin); - else + } else { network_seqnum = htonl(nd->nd_gss_seqnum); + } seqbuf.value = &network_seqnum; major = gss_krb5_get_mic((uint32_t *)&error, cp->gss_svc_ctx_id, 0, &seqbuf, &cksum); - if (major != GSS_S_COMPLETE) - return (error); + if (major != GSS_S_COMPLETE) { + return error; + } /* * Now wrap it in a token and add @@ -3249,7 +3370,7 @@ nfs_gss_svc_verf_put(struct nfsrv_descript *nd, struct nfsm_chain *nmc) nfsm_chain_add_opaque(error, nmc, cksum.value, cksum.length); gss_release_buffer(NULL, &cksum); - return (error); + return error; } /* @@ -3267,8 +3388,9 @@ nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc) int error = 0; if (cp->gss_svc_proc == RPCSEC_GSS_INIT || - cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) - return (0); + cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { + return 0; + } switch (nd->nd_sec) { case RPCAUTH_KRB5: @@ -3276,12 +3398,12 @@ nfs_gss_svc_prepare_reply(struct nfsrv_descript *nd, struct nfsm_chain *nmc) break; case RPCAUTH_KRB5I: case RPCAUTH_KRB5P: - nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf - nfsm_chain_finish_mbuf(error, nmc); // split the chain here + nd->nd_gss_mb = nmc->nmc_mcur; // record current mbuf + nfsm_chain_finish_mbuf(error, nmc); // split the chain here break; } - return (error); + return error; } /* @@ -3305,15 +3427,17 @@ nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused) * together. */ - mb = nd->nd_gss_mb; // the mbuf where we split - results = mbuf_next(mb); // first mbuf in the results - error = mbuf_setnext(mb, NULL); // disconnect the chains - if (error) - return (error); - nfs_gss_nfsm_chain(nmc_res, mb); // set up the prepend chain + mb = nd->nd_gss_mb; // the mbuf where we split + results = mbuf_next(mb); // first mbuf in the results + error = mbuf_setnext(mb, NULL); // disconnect the chains + if (error) { + return error; + } + nfs_gss_nfsm_chain(nmc_res, mb); // set up the prepend chain nfsm_chain_build_done(error, nmc_res); - if (error) - return (error); + if (error) { + return error; + } if (nd->nd_sec == RPCAUTH_KRB5I) { error = rpc_gss_integ_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen); @@ -3321,10 +3445,10 @@ nfs_gss_svc_protect_reply(struct nfsrv_descript *nd, mbuf_t mrep __unused) /* RPCAUTH_KRB5P */ error = rpc_gss_priv_data_create(cp->gss_svc_ctx_id, &results, nd->nd_gss_seqnum, &reslen); } - nfs_gss_append_chain(nmc_res, results); // Append the results mbufs + nfs_gss_append_chain(nmc_res, results); // Append the results mbufs nfsm_chain_build_done(error, nmc_res); - return (error); + return error; } /* @@ -3355,14 +3479,15 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * switch (cp->gss_svc_proc) { case RPCSEC_GSS_INIT: nfs_gss_svc_ctx_insert(cp); - /* FALLTHRU */ + /* FALLTHRU */ case RPCSEC_GSS_CONTINUE_INIT: /* Get the token from the request */ nfsm_chain_get_32(error, nmreq, cp->gss_svc_tokenlen); cp->gss_svc_token = NULL; - if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) + if (cp->gss_svc_tokenlen > 0 && cp->gss_svc_tokenlen < GSS_MAX_TOKEN_LEN) { MALLOC(cp->gss_svc_token, u_char *, cp->gss_svc_tokenlen, M_TEMP, M_WAITOK); + } if (cp->gss_svc_token == NULL) { autherr = RPCSEC_GSS_CREDPROBLEM; break; @@ -3373,8 +3498,9 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * error = nfs_gss_svc_gssd_upcall(cp); if (error) { autherr = RPCSEC_GSS_CREDPROBLEM; - if (error == NFSERR_EAUTH) + if (error == NFSERR_EAUTH) { error = 0; + } break; } @@ -3382,8 +3508,9 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * * If the context isn't complete, pass the new token * back to the client for another round. */ - if (cp->gss_svc_major != GSS_S_COMPLETE) + if (cp->gss_svc_major != GSS_S_COMPLETE) { break; + } /* * Now the server context is complete. @@ -3393,7 +3520,7 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * cp->gss_svc_seqwin = GSS_SVC_SEQWINDOW; MALLOC(cp->gss_svc_seqbits, uint32_t *, - nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK|M_ZERO); + nfsm_rndup((cp->gss_svc_seqwin + 7) / 8), M_TEMP, M_WAITOK | M_ZERO); if (cp->gss_svc_seqbits == NULL) { autherr = RPCSEC_GSS_CREDPROBLEM; break; @@ -3413,10 +3540,10 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * */ cp = nfs_gss_svc_ctx_find(cp->gss_svc_handle); if (cp != NULL) { - cp->gss_svc_handle = 0; // so it can't be found + cp->gss_svc_handle = 0; // so it can't be found lck_mtx_lock(cp->gss_svc_mtx); clock_interval_to_deadline(GSS_CTX_PEND, NSEC_PER_SEC, - &cp->gss_svc_incarnation); + &cp->gss_svc_incarnation); lck_mtx_unlock(cp->gss_svc_mtx); } break; @@ -3427,23 +3554,25 @@ nfs_gss_svc_ctx_init(struct nfsrv_descript *nd, struct nfsrv_sock *slp, mbuf_t * /* Now build the reply */ - if (nd->nd_repstat == 0) + if (nd->nd_repstat == 0) { nd->nd_repstat = autherr ? (NFSERR_AUTHERR | autherr) : NFSERR_RETVOID; + } sz = 7 * NFSX_UNSIGNED + nfsm_rndup(cp->gss_svc_tokenlen); // size of results error = nfsrv_rephead(nd, slp, &nmrep, sz); *mrepp = nmrep.nmc_mhead; - if (error || autherr) + if (error || autherr) { goto nfsmout; + } if (cp->gss_svc_proc == RPCSEC_GSS_INIT || cp->gss_svc_proc == RPCSEC_GSS_CONTINUE_INIT) { nfsm_chain_add_32(error, &nmrep, sizeof(cp->gss_svc_handle)); nfsm_chain_add_32(error, &nmrep, cp->gss_svc_handle); - + nfsm_chain_add_32(error, &nmrep, cp->gss_svc_major); nfsm_chain_add_32(error, &nmrep, cp->gss_svc_minor); nfsm_chain_add_32(error, &nmrep, cp->gss_svc_seqwin); - + nfsm_chain_add_32(error, &nmrep, cp->gss_svc_tokenlen); if (cp->gss_svc_token != NULL) { nfsm_chain_add_opaque(error, &nmrep, cp->gss_svc_token, cp->gss_svc_tokenlen); @@ -3456,10 +3585,12 @@ nfsmout: if (autherr != 0) { nd->nd_gss_context = NULL; LIST_REMOVE(cp, gss_svc_entries); - if (cp->gss_svc_seqbits != NULL) + if (cp->gss_svc_seqbits != NULL) { FREE(cp->gss_svc_seqbits, M_TEMP); - if (cp->gss_svc_token != NULL) + } + if (cp->gss_svc_token != NULL) { FREE(cp->gss_svc_token, M_TEMP); + } lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); FREE(cp, M_TEMP); } @@ -3469,7 +3600,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -3504,8 +3635,9 @@ nfs_gss_svc_gssd_upcall(struct nfs_gss_svc_ctx *cp) goto out; } - if (cp->gss_svc_tokenlen > 0) + if (cp->gss_svc_tokenlen > 0) { nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken); + } retry: printf("Calling mach_gss_accept_sec_context\n"); @@ -3526,12 +3658,13 @@ retry: &cp->gss_svc_minor); printf("mach_gss_accept_sec_context returned %d\n", kr); - if (kr != KERN_SUCCESS) { + if (kr != KERN_SUCCESS) { printf("nfs_gss_svc_gssd_upcall failed: %x (%d)\n", kr, kr); if (kr == MIG_SERVER_DIED && cp->gss_svc_context == 0 && - retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) { - if (cp->gss_svc_tokenlen > 0) + retry_cnt++ < NFS_GSS_MACH_MAX_RETRIES) { + if (cp->gss_svc_tokenlen > 0) { nfs_gss_mach_alloc_buffer(cp->gss_svc_token, cp->gss_svc_tokenlen, &itoken); + } goto retry; } host_release_special_port(mp); @@ -3554,8 +3687,9 @@ retry: FREE(lucid_ctx_buffer, M_TEMP); goto out; } - if (cp->gss_svc_ctx_id) + if (cp->gss_svc_ctx_id) { gss_krb5_destroy_context(cp->gss_svc_ctx_id); + } cp->gss_svc_ctx_id = gss_krb5_make_context(lucid_ctx_buffer, lucidlen); if (cp->gss_svc_ctx_id == NULL) { printf("Failed to make context from lucid_ctx_buffer\n"); @@ -3564,36 +3698,37 @@ retry: } /* Free context token used as input */ - if (cp->gss_svc_token) + if (cp->gss_svc_token) { FREE(cp->gss_svc_token, M_TEMP); + } cp->gss_svc_token = NULL; cp->gss_svc_tokenlen = 0; - + if (otokenlen > 0) { /* Set context token to gss output token */ MALLOC(cp->gss_svc_token, u_char *, otokenlen, M_TEMP, M_WAITOK); if (cp->gss_svc_token == NULL) { printf("nfs_gss_svc_gssd_upcall: could not allocate %d bytes\n", otokenlen); vm_map_copy_discard((vm_map_copy_t) otoken); - return (ENOMEM); + return ENOMEM; } error = nfs_gss_mach_vmcopyout((vm_map_copy_t) otoken, otokenlen, cp->gss_svc_token); if (error) { FREE(cp->gss_svc_token, M_TEMP); cp->gss_svc_token = NULL; - return (NFSERR_EAUTH); + return NFSERR_EAUTH; } cp->gss_svc_tokenlen = otokenlen; } - return (0); + return 0; out: FREE(cp->gss_svc_token, M_TEMP); cp->gss_svc_tokenlen = 0; cp->gss_svc_token = NULL; - return (NFSERR_EAUTH); + return NFSERR_EAUTH; } /* @@ -3620,15 +3755,17 @@ nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq) * move the window up, and set the bit. */ if (seq > cp->gss_svc_seqmax) { - if (seq - cp->gss_svc_seqmax > win) + if (seq - cp->gss_svc_seqmax > win) { bzero(bits, nfsm_rndup((win + 7) / 8)); - else - for (i = cp->gss_svc_seqmax + 1; i < seq; i++) + } else { + for (i = cp->gss_svc_seqmax + 1; i < seq; i++) { win_resetbit(bits, i % win); + } + } win_setbit(bits, seq % win); cp->gss_svc_seqmax = seq; lck_mtx_unlock(cp->gss_svc_mtx); - return (1); + return 1; } /* @@ -3636,7 +3773,7 @@ nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq) */ if (seq <= cp->gss_svc_seqmax - win) { lck_mtx_unlock(cp->gss_svc_mtx); - return (0); + return 0; } /* @@ -3644,11 +3781,11 @@ nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq) */ if (win_getbit(bits, seq % win)) { lck_mtx_unlock(cp->gss_svc_mtx); - return (0); + return 0; } win_setbit(bits, seq % win); lck_mtx_unlock(cp->gss_svc_mtx); - return (1); + return 1; } /* @@ -3661,12 +3798,13 @@ nfs_gss_svc_seqnum_valid(struct nfs_gss_svc_ctx *cp, uint32_t seq) void nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *cp) { - lck_mtx_lock(cp->gss_svc_mtx); - if (cp->gss_svc_refcnt > 0) + lck_mtx_lock(cp->gss_svc_mtx); + if (cp->gss_svc_refcnt > 0) { cp->gss_svc_refcnt--; - else + } else { printf("nfs_gss_ctx_deref: zero refcount\n"); - lck_mtx_unlock(cp->gss_svc_mtx); + } + lck_mtx_unlock(cp->gss_svc_mtx); } /* @@ -3678,7 +3816,7 @@ nfs_gss_svc_cleanup(void) struct nfs_gss_svc_ctx_hashhead *head; struct nfs_gss_svc_ctx *cp, *ncp; int i; - + lck_mtx_lock(nfs_gss_svc_ctx_mutex); /* @@ -3691,8 +3829,9 @@ nfs_gss_svc_cleanup(void) head = &nfs_gss_svc_ctx_hashtbl[i]; LIST_FOREACH_SAFE(cp, head, gss_svc_entries, ncp) { LIST_REMOVE(cp, gss_svc_entries); - if (cp->gss_svc_seqbits) + if (cp->gss_svc_seqbits) { FREE(cp->gss_svc_seqbits, M_TEMP); + } lck_mtx_destroy(cp->gss_svc_mtx, nfs_gss_svc_grp); FREE(cp, M_TEMP); } @@ -3711,7 +3850,7 @@ nfs_gss_svc_cleanup(void) /* * Release a host special port that was obtained by host_get_special_port * or one of its macros (host_get_gssd_port in this case). - * This really should be in a public kpi. + * This really should be in a public kpi. */ /* This should be in a public header if this routine is not */ @@ -3721,14 +3860,15 @@ extern ipc_port_t ipc_port_copy_send(ipc_port_t); static void host_release_special_port(mach_port_t mp) { - if (IPC_PORT_VALID(mp)) + if (IPC_PORT_VALID(mp)) { ipc_port_release_send(mp); + } } static mach_port_t host_copy_special_port(mach_port_t mp) { - return (ipc_port_copy_send(mp)); + return ipc_port_copy_send(mp); } /* @@ -3750,11 +3890,12 @@ nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr) vm_size_t tbuflen; *addr = NULL; - if (buf == NULL || buflen == 0) + if (buf == NULL || buflen == 0) { return; + } tbuflen = vm_map_round_page(buflen, - vm_map_page_mask(ipc_kernel_map)); + vm_map_page_mask(ipc_kernel_map)); kr = vm_allocate_kernel(ipc_kernel_map, &kmem_buf, tbuflen, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_FILE); if (kr != 0) { printf("nfs_gss_mach_alloc_buffer: vm_allocate failed\n"); @@ -3762,33 +3903,33 @@ nfs_gss_mach_alloc_buffer(u_char *buf, uint32_t buflen, vm_map_copy_t *addr) } kr = vm_map_wire_kernel(ipc_kernel_map, - vm_map_trunc_page(kmem_buf, - vm_map_page_mask(ipc_kernel_map)), - vm_map_round_page(kmem_buf + tbuflen, - vm_map_page_mask(ipc_kernel_map)), - VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_FILE, FALSE); + vm_map_trunc_page(kmem_buf, + vm_map_page_mask(ipc_kernel_map)), + vm_map_round_page(kmem_buf + tbuflen, + vm_map_page_mask(ipc_kernel_map)), + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_FILE, FALSE); if (kr != 0) { printf("nfs_gss_mach_alloc_buffer: vm_map_wire failed\n"); return; } - + bcopy(buf, (void *) kmem_buf, buflen); // Shouldn't need to bzero below since vm_allocate returns zeroed pages // bzero(kmem_buf + buflen, tbuflen - buflen); - + kr = vm_map_unwire(ipc_kernel_map, - vm_map_trunc_page(kmem_buf, - vm_map_page_mask(ipc_kernel_map)), - vm_map_round_page(kmem_buf + tbuflen, - vm_map_page_mask(ipc_kernel_map)), - FALSE); + vm_map_trunc_page(kmem_buf, + vm_map_page_mask(ipc_kernel_map)), + vm_map_round_page(kmem_buf + tbuflen, + vm_map_page_mask(ipc_kernel_map)), + FALSE); if (kr != 0) { printf("nfs_gss_mach_alloc_buffer: vm_map_unwire failed\n"); return; } kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t) kmem_buf, - (vm_map_size_t) buflen, TRUE, addr); + (vm_map_size_t) buflen, TRUE, addr); if (kr != 0) { printf("nfs_gss_mach_alloc_buffer: vm_map_copyin failed\n"); return; @@ -3809,14 +3950,15 @@ nfs_gss_mach_vmcopyout(vm_map_copy_t in, uint32_t len, u_char *out) int error; error = vm_map_copyout(ipc_kernel_map, &map_data, in); - if (error) - return (error); + if (error) { + return error; + } data = CAST_DOWN(vm_offset_t, map_data); bcopy((void *) data, out, len); vm_deallocate(ipc_kernel_map, data, len); - return (0); + return 0; } /* @@ -3828,10 +3970,11 @@ nfs_gss_mchain_length(mbuf_t mhead) mbuf_t mb; int len = 0; - for (mb = mhead; mb; mb = mbuf_next(mb)) + for (mb = mhead; mb; mb = mbuf_next(mb)) { len += mbuf_len(mb); + } - return (len); + return len; } /* @@ -3845,19 +3988,21 @@ nfs_gss_append_chain(struct nfsm_chain *nmc, mbuf_t mc) /* Connect the mbuf chains */ error = mbuf_setnext(nmc->nmc_mcur, mc); - if (error) - return (error); + if (error) { + return error; + } /* Find the last mbuf in the chain */ tail = NULL; - for (mb = mc; mb; mb = mbuf_next(mb)) + for (mb = mc; mb; mb = mbuf_next(mb)) { tail = mb; + } nmc->nmc_mcur = tail; nmc->nmc_ptr = (caddr_t) mbuf_data(tail) + mbuf_len(tail); nmc->nmc_left = mbuf_trailingspace(tail); - return (0); + return 0; } /* @@ -3870,8 +4015,9 @@ nfs_gss_nfsm_chain(struct nfsm_chain *nmc, mbuf_t mc) /* Find the last mbuf in the chain */ tail = NULL; - for (mb = mc; mb; mb = mbuf_next(mb)) + for (mb = mc; mb; mb = mbuf_next(mb)) { tail = mb; + } nmc->nmc_mhead = mc; nmc->nmc_mcur = tail; @@ -3891,15 +4037,17 @@ hexdump(const char *msg, void *data, size_t len) { size_t i, j; u_char *d = data; - char *p, disbuf[3*DISPLAYLEN+1]; - + char *p, disbuf[3 * DISPLAYLEN + 1]; + printf("NFS DEBUG %s len=%d:\n", msg, (uint32_t)len); - if (len > MAXDISPLAYLEN) + if (len > MAXDISPLAYLEN) { len = MAXDISPLAYLEN; + } for (i = 0; i < len; i += DISPLAYLEN) { - for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) + for (p = disbuf, j = 0; (j + i) < len && j < DISPLAYLEN; j++, p += 3) { snprintf(p, 4, "%02x ", d[i + j]); + } printf("\t%s\n", disbuf); } } diff --git a/bsd/nfs/nfs_gss.h b/bsd/nfs/nfs_gss.h index 5b6887f9b..302489486 100644 --- a/bsd/nfs/nfs_gss.h +++ b/bsd/nfs/nfs_gss.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,20 +34,20 @@ #include #include -#define RPCSEC_GSS 6 -#define RPCSEC_GSS_VERS_1 1 +#define RPCSEC_GSS 6 +#define RPCSEC_GSS_VERS_1 1 enum rpcsec_gss_proc { - RPCSEC_GSS_DATA = 0, - RPCSEC_GSS_INIT = 1, - RPCSEC_GSS_CONTINUE_INIT = 2, - RPCSEC_GSS_DESTROY = 3 + RPCSEC_GSS_DATA = 0, + RPCSEC_GSS_INIT = 1, + RPCSEC_GSS_CONTINUE_INIT = 2, + RPCSEC_GSS_DESTROY = 3 }; enum rpcsec_gss_service { - RPCSEC_GSS_SVC_NONE = 1, // sec=krb5 - RPCSEC_GSS_SVC_INTEGRITY = 2, // sec=krb5i - RPCSEC_GSS_SVC_PRIVACY = 3, // sec=krb5p + RPCSEC_GSS_SVC_NONE = 1, // sec=krb5 + RPCSEC_GSS_SVC_INTEGRITY = 2, // sec=krb5i + RPCSEC_GSS_SVC_PRIVACY = 3, // sec=krb5p }; /* encoded krb5 OID */ @@ -62,101 +62,101 @@ extern u_char krb5_mech_oid[11]; * of a gss_init_sec_context if it includes a large PAC. */ -#define GSS_MAX_CTX_HANDLE_LEN 256 -#define GSS_MAX_TOKEN_LEN 64*1024 +#define GSS_MAX_CTX_HANDLE_LEN 256 +#define GSS_MAX_TOKEN_LEN 64*1024 /* * Put a "reasonble" bound on MIC lengths */ -#define GSS_MAX_MIC_LEN 2048 +#define GSS_MAX_MIC_LEN 2048 -#define GSS_MAXSEQ 0x80000000 // The biggest sequence number -#define GSS_SVC_MAXCONTEXTS 500000 // Max contexts supported -#define GSS_SVC_SEQWINDOW 256 // Server's sequence window -#define GSS_CLNT_SEQLISTMAX 32 // Max length of req seq num list +#define GSS_MAXSEQ 0x80000000 // The biggest sequence number +#define GSS_SVC_MAXCONTEXTS 500000 // Max contexts supported +#define GSS_SVC_SEQWINDOW 256 // Server's sequence window +#define GSS_CLNT_SEQLISTMAX 32 // Max length of req seq num list -#define MAX_SKEYLEN 32 -#define MAX_LUCIDLEN (sizeof (lucid_context) + MAX_SKEYLEN) +#define MAX_SKEYLEN 32 +#define MAX_LUCIDLEN (sizeof (lucid_context) + MAX_SKEYLEN) #define GSS_MAX_NEG_CACHE_ENTRIES 16 #define GSS_NEG_CACHE_TO 3 -#define GSS_PRINT_DELAY (8 * 3600) // Wait day before printing the same error message +#define GSS_PRINT_DELAY (8 * 3600) // Wait day before printing the same error message /* * The client's RPCSEC_GSS context information */ struct nfs_gss_clnt_ctx { - lck_mtx_t *gss_clnt_mtx; - thread_t gss_clnt_thread; // Thread creating context - TAILQ_ENTRY(nfs_gss_clnt_ctx) gss_clnt_entries; - uint32_t gss_clnt_flags; // Flag bits - see below - int32_t gss_clnt_refcnt; // Reference count - kauth_cred_t gss_clnt_cred; // Owner of this context - uint8_t *gss_clnt_principal; // Principal to use for this credential - uint32_t gss_clnt_prinlen; // Length of principal - gssd_nametype gss_clnt_prinnt; // Name type of principal - char *gss_clnt_display; // display name of principal - uint32_t gss_clnt_proc; // Current GSS proc for cred - uint32_t gss_clnt_seqnum; // GSS sequence number - uint32_t gss_clnt_service; // Indicates krb5, krb5i or krb5p - uint8_t *gss_clnt_handle; // Identifies server context - uint32_t gss_clnt_handle_len; // Size of server's ctx handle - time_t gss_clnt_nctime; // When context was put in the negative cache - uint32_t gss_clnt_seqwin; // Server's seq num window - uint32_t *gss_clnt_seqbits; // Bitmap to track seq numbers in use - mach_port_t gss_clnt_mport; // Mach port for gssd upcall - uint32_t gss_clnt_verflen; // RPC verifier length from server - uint8_t *gss_clnt_verf; // RPC verifier from server - uint8_t *gss_clnt_svcname; // Service name e.g. "nfs/big.apple.com" - uint32_t gss_clnt_svcnamlen; // Service name length - gssd_nametype gss_clnt_svcnt; // Service name type - gssd_cred gss_clnt_cred_handle; // Opaque cred handle from gssd - gssd_ctx gss_clnt_context; // Opaque context handle from gssd - gss_ctx_id_t gss_clnt_ctx_id; // Underlying gss context - uint8_t *gss_clnt_token; // GSS token exchanged via gssd & server - uint32_t gss_clnt_tokenlen; // Length of token - uint32_t gss_clnt_gssd_flags; // Special flag bits to gssd - uint32_t gss_clnt_major; // GSS major result from gssd or server - uint32_t gss_clnt_minor; // GSS minor result from gssd or server - time_t gss_clnt_ptime; // When last error message was printed + lck_mtx_t *gss_clnt_mtx; + thread_t gss_clnt_thread; // Thread creating context + TAILQ_ENTRY(nfs_gss_clnt_ctx) gss_clnt_entries; + uint32_t gss_clnt_flags; // Flag bits - see below + int32_t gss_clnt_refcnt; // Reference count + kauth_cred_t gss_clnt_cred; // Owner of this context + uint8_t *gss_clnt_principal; // Principal to use for this credential + uint32_t gss_clnt_prinlen; // Length of principal + gssd_nametype gss_clnt_prinnt; // Name type of principal + char *gss_clnt_display; // display name of principal + uint32_t gss_clnt_proc; // Current GSS proc for cred + uint32_t gss_clnt_seqnum; // GSS sequence number + uint32_t gss_clnt_service; // Indicates krb5, krb5i or krb5p + uint8_t *gss_clnt_handle; // Identifies server context + uint32_t gss_clnt_handle_len; // Size of server's ctx handle + time_t gss_clnt_nctime; // When context was put in the negative cache + uint32_t gss_clnt_seqwin; // Server's seq num window + uint32_t *gss_clnt_seqbits; // Bitmap to track seq numbers in use + mach_port_t gss_clnt_mport; // Mach port for gssd upcall + uint32_t gss_clnt_verflen; // RPC verifier length from server + uint8_t *gss_clnt_verf; // RPC verifier from server + uint8_t *gss_clnt_svcname; // Service name e.g. "nfs/big.apple.com" + uint32_t gss_clnt_svcnamlen; // Service name length + gssd_nametype gss_clnt_svcnt; // Service name type + gssd_cred gss_clnt_cred_handle; // Opaque cred handle from gssd + gssd_ctx gss_clnt_context; // Opaque context handle from gssd + gss_ctx_id_t gss_clnt_ctx_id; // Underlying gss context + uint8_t *gss_clnt_token; // GSS token exchanged via gssd & server + uint32_t gss_clnt_tokenlen; // Length of token + uint32_t gss_clnt_gssd_flags; // Special flag bits to gssd + uint32_t gss_clnt_major; // GSS major result from gssd or server + uint32_t gss_clnt_minor; // GSS minor result from gssd or server + time_t gss_clnt_ptime; // When last error message was printed }; /* * gss_clnt_flags */ -#define GSS_CTX_COMPLETE 0x00000001 // Context is complete -#define GSS_CTX_INVAL 0x00000002 // Context is invalid -#define GSS_CTX_STICKY 0x00000004 // Context has been set by user -#define GSS_NEEDSEQ 0x00000008 // Need a sequence number -#define GSS_NEEDCTX 0x00000010 // Need the context -#define GSS_CTX_DESTROY 0x00000020 // Context is being destroyed, don't cache +#define GSS_CTX_COMPLETE 0x00000001 // Context is complete +#define GSS_CTX_INVAL 0x00000002 // Context is invalid +#define GSS_CTX_STICKY 0x00000004 // Context has been set by user +#define GSS_NEEDSEQ 0x00000008 // Need a sequence number +#define GSS_NEEDCTX 0x00000010 // Need the context +#define GSS_CTX_DESTROY 0x00000020 // Context is being destroyed, don't cache /* * The server's RPCSEC_GSS context information */ struct nfs_gss_svc_ctx { - lck_mtx_t *gss_svc_mtx; - LIST_ENTRY(nfs_gss_svc_ctx) gss_svc_entries; - uint32_t gss_svc_handle; // Identifies server context to client - uint32_t gss_svc_refcnt; // Reference count - uint32_t gss_svc_proc; // Current GSS proc from cred - uid_t gss_svc_uid; // UID of this user - gid_t gss_svc_gids[NGROUPS]; // GIDs of this user - uint32_t gss_svc_ngroups; // Count of gids - uint64_t gss_svc_incarnation; // Delete ctx if we exceed this + ttl value - uint32_t gss_svc_seqmax; // Current max GSS sequence number - uint32_t gss_svc_seqwin; // GSS sequence number window - uint32_t *gss_svc_seqbits; // Bitmap to track seq numbers - gssd_cred gss_svc_cred_handle; // Opaque cred handle from gssd - gssd_ctx gss_svc_context; // Opaque context handle from gssd - gss_ctx_id_t gss_svc_ctx_id; // Underlying gss context - u_char *gss_svc_token; // GSS token exchanged via gssd & client - uint32_t gss_svc_tokenlen; // Length of token - uint32_t gss_svc_major; // GSS major result from gssd - uint32_t gss_svc_minor; // GSS minor result from gssd + lck_mtx_t *gss_svc_mtx; + LIST_ENTRY(nfs_gss_svc_ctx) gss_svc_entries; + uint32_t gss_svc_handle; // Identifies server context to client + uint32_t gss_svc_refcnt; // Reference count + uint32_t gss_svc_proc; // Current GSS proc from cred + uid_t gss_svc_uid; // UID of this user + gid_t gss_svc_gids[NGROUPS]; // GIDs of this user + uint32_t gss_svc_ngroups; // Count of gids + uint64_t gss_svc_incarnation; // Delete ctx if we exceed this + ttl value + uint32_t gss_svc_seqmax; // Current max GSS sequence number + uint32_t gss_svc_seqwin; // GSS sequence number window + uint32_t *gss_svc_seqbits; // Bitmap to track seq numbers + gssd_cred gss_svc_cred_handle; // Opaque cred handle from gssd + gssd_ctx gss_svc_context; // Opaque context handle from gssd + gss_ctx_id_t gss_svc_ctx_id; // Underlying gss context + u_char *gss_svc_token; // GSS token exchanged via gssd & client + uint32_t gss_svc_tokenlen; // Length of token + uint32_t gss_svc_major; // GSS major result from gssd + uint32_t gss_svc_minor; // GSS minor result from gssd }; -#define SVC_CTX_HASHSZ 64 -#define SVC_CTX_HASH(handle) ((handle) % SVC_CTX_HASHSZ) +#define SVC_CTX_HASHSZ 64 +#define SVC_CTX_HASH(handle) ((handle) % SVC_CTX_HASHSZ) LIST_HEAD(nfs_gss_svc_ctx_hashhead, nfs_gss_svc_ctx); /* @@ -169,11 +169,11 @@ LIST_HEAD(nfs_gss_svc_ctx_hashhead, nfs_gss_svc_ctx); /* * Server context stale times */ -#define GSS_CTX_PEND 5 // seconds -#define GSS_CTX_EXPIRE (8 * 3600) // seconds -#define GSS_CTX_TTL_MIN 1 // seconds -#define GSS_TIMER_PERIOD 300 // seconds -#define MSECS_PER_SEC 1000 +#define GSS_CTX_PEND 5 // seconds +#define GSS_CTX_EXPIRE (8 * 3600) // seconds +#define GSS_CTX_TTL_MIN 1 // seconds +#define GSS_TIMER_PERIOD 300 // seconds +#define MSECS_PER_SEC 1000 #define auth_is_kerberized(auth) \ (auth == RPCAUTH_KRB5 || \ @@ -182,27 +182,27 @@ LIST_HEAD(nfs_gss_svc_ctx_hashhead, nfs_gss_svc_ctx); __BEGIN_DECLS -void nfs_gss_init(void); -uid_t nfs_cred_getasid2uid(kauth_cred_t); -int nfs_gss_clnt_cred_put(struct nfsreq *, struct nfsm_chain *, mbuf_t); -int nfs_gss_clnt_verf_get(struct nfsreq *, struct nfsm_chain *, - uint32_t, uint32_t, uint32_t *); -void nfs_gss_clnt_rpcdone(struct nfsreq *); -int nfs_gss_clnt_args_restore(struct nfsreq *); -int nfs_gss_clnt_ctx_renew(struct nfsreq *); -void nfs_gss_clnt_ctx_ref(struct nfsreq *, struct nfs_gss_clnt_ctx *); -void nfs_gss_clnt_ctx_unref(struct nfsreq *); -void nfs_gss_clnt_ctx_unmount(struct nfsmount *); -int nfs_gss_clnt_ctx_remove(struct nfsmount *, kauth_cred_t); -int nfs_gss_clnt_ctx_set_principal(struct nfsmount *, vfs_context_t, uint8_t *, uint32_t, uint32_t); -int nfs_gss_clnt_ctx_get_principal(struct nfsmount *, vfs_context_t, struct user_nfs_gss_principal *); -int nfs_gss_svc_cred_get(struct nfsrv_descript *, struct nfsm_chain *); -int nfs_gss_svc_verf_put(struct nfsrv_descript *, struct nfsm_chain *); -int nfs_gss_svc_ctx_init(struct nfsrv_descript *, struct nfsrv_sock *, mbuf_t *); -int nfs_gss_svc_prepare_reply(struct nfsrv_descript *, struct nfsm_chain *); -int nfs_gss_svc_protect_reply(struct nfsrv_descript *, mbuf_t); -void nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *); -void nfs_gss_svc_cleanup(void); +void nfs_gss_init(void); +uid_t nfs_cred_getasid2uid(kauth_cred_t); +int nfs_gss_clnt_cred_put(struct nfsreq *, struct nfsm_chain *, mbuf_t); +int nfs_gss_clnt_verf_get(struct nfsreq *, struct nfsm_chain *, + uint32_t, uint32_t, uint32_t *); +void nfs_gss_clnt_rpcdone(struct nfsreq *); +int nfs_gss_clnt_args_restore(struct nfsreq *); +int nfs_gss_clnt_ctx_renew(struct nfsreq *); +void nfs_gss_clnt_ctx_ref(struct nfsreq *, struct nfs_gss_clnt_ctx *); +void nfs_gss_clnt_ctx_unref(struct nfsreq *); +void nfs_gss_clnt_ctx_unmount(struct nfsmount *); +int nfs_gss_clnt_ctx_remove(struct nfsmount *, kauth_cred_t); +int nfs_gss_clnt_ctx_set_principal(struct nfsmount *, vfs_context_t, uint8_t *, uint32_t, uint32_t); +int nfs_gss_clnt_ctx_get_principal(struct nfsmount *, vfs_context_t, struct user_nfs_gss_principal *); +int nfs_gss_svc_cred_get(struct nfsrv_descript *, struct nfsm_chain *); +int nfs_gss_svc_verf_put(struct nfsrv_descript *, struct nfsm_chain *); +int nfs_gss_svc_ctx_init(struct nfsrv_descript *, struct nfsrv_sock *, mbuf_t *); +int nfs_gss_svc_prepare_reply(struct nfsrv_descript *, struct nfsm_chain *); +int nfs_gss_svc_protect_reply(struct nfsrv_descript *, mbuf_t); +void nfs_gss_svc_ctx_deref(struct nfs_gss_svc_ctx *); +void nfs_gss_svc_cleanup(void); __END_DECLS #endif /* _NFS_NFS_GSS_H_ */ diff --git a/bsd/nfs/nfs_ioctl.h b/bsd/nfs/nfs_ioctl.h index f140e3ea8..6ab20e01a 100644 --- a/bsd/nfs/nfs_ioctl.h +++ b/bsd/nfs/nfs_ioctl.h @@ -2,7 +2,7 @@ * Copyright (c) 2012,2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,47 +37,45 @@ /* * fsctl (vnop_ioctl) to detroy the callers credentials associated with the vnode's mount */ -#define NFS_IOC_DESTROY_CRED _IO('n', 1) +#define NFS_IOC_DESTROY_CRED _IO('n', 1) /* * fsctl (vnop_ioctl) to set the callers credentials associated with the vnode's mount */ -struct nfs_gss_principal -{ - uint32_t princlen; /* length of data */ - uint32_t nametype; /* nametype of data */ +struct nfs_gss_principal { + uint32_t princlen; /* length of data */ + uint32_t nametype; /* nametype of data */ #ifdef KERNEL - user32_addr_t principal; /* principal data in userspace */ + user32_addr_t principal; /* principal data in userspace */ #else - uint8_t *principal; + uint8_t *principal; #endif - uint32_t flags; /* Return flags */ + uint32_t flags; /* Return flags */ }; #ifdef KERNEL /* LP64 version of nfs_gss_principal */ -struct user_nfs_gss_principal -{ - uint32_t princlen; /* length of data */ - uint32_t nametype; /* nametype of data */ - user64_addr_t principal; /* principal data in userspace */ - uint32_t flags; /* Returned flags */ +struct user_nfs_gss_principal { + uint32_t princlen; /* length of data */ + uint32_t nametype; /* nametype of data */ + user64_addr_t principal; /* principal data in userspace */ + uint32_t flags; /* Returned flags */ }; #endif /* If no credential was found returned NFS_IOC_NO_CRED_FLAG in the flags field. */ -#define NFS_IOC_NO_CRED_FLAG 1 /* No credential was found */ -#define NFS_IOC_INVALID_CRED_FLAG 2 /* Found a credential, but its not valid */ +#define NFS_IOC_NO_CRED_FLAG 1 /* No credential was found */ +#define NFS_IOC_INVALID_CRED_FLAG 2 /* Found a credential, but its not valid */ -#define NFS_IOC_SET_CRED _IOW('n', 2, struct nfs_gss_principal) +#define NFS_IOC_SET_CRED _IOW('n', 2, struct nfs_gss_principal) -#define NFS_IOC_GET_CRED _IOWR('n', 3, struct nfs_gss_principal) +#define NFS_IOC_GET_CRED _IOWR('n', 3, struct nfs_gss_principal) #ifdef KERNEL -#define NFS_IOC_SET_CRED64 _IOW('n', 2, struct user_nfs_gss_principal) +#define NFS_IOC_SET_CRED64 _IOW('n', 2, struct user_nfs_gss_principal) -#define NFS_IOC_GET_CRED64 _IOWR('n', 3, struct user_nfs_gss_principal) +#define NFS_IOC_GET_CRED64 _IOWR('n', 3, struct user_nfs_gss_principal) #endif #endif diff --git a/bsd/nfs/nfs_lock.c b/bsd/nfs/nfs_lock.c index 2514489c1..ab5f4f4c9 100644 --- a/bsd/nfs/nfs_lock.c +++ b/bsd/nfs/nfs_lock.c @@ -2,7 +2,7 @@ * Copyright (c) 2002-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -59,13 +59,13 @@ #include #include #include -#include /* for hz */ +#include /* for hz */ #include #include -#include /* for hz */ /* Must come after sys/malloc.h */ +#include /* for hz */ /* Must come after sys/malloc.h */ #include #include -#include /* for p_start */ +#include /* for p_start */ #include #include #include @@ -103,7 +103,7 @@ static uint64_t nfs_lockxid = 0; static LOCKD_MSG_QUEUE nfs_pendlockq; /* list of mounts that are (potentially) making lockd requests */ -TAILQ_HEAD(nfs_lockd_mount_list,nfsmount) nfs_lockd_mount_list; +TAILQ_HEAD(nfs_lockd_mount_list, nfsmount) nfs_lockd_mount_list; static lck_grp_t *nfs_lock_lck_grp; static lck_mtx_t *nfs_lock_mutex; @@ -159,7 +159,7 @@ nfs_lockd_mount_unregister(struct nfsmount *nmp) lck_mtx_unlock(nfs_lock_mutex); return; } - + TAILQ_REMOVE(&nfs_lockd_mount_list, nmp, nm_ldlink); nmp->nm_ldlink.tqe_next = NFSNOLIST; @@ -167,13 +167,15 @@ nfs_lockd_mount_unregister(struct nfsmount *nmp) /* send a shutdown request if there are no more lockd mounts */ send_shutdown = ((nfs_lockd_mounts == 0) && nfs_lockd_request_sent); - if (send_shutdown) + if (send_shutdown) { nfs_lockd_request_sent = 0; + } lck_mtx_unlock(nfs_lock_mutex); - if (!send_shutdown) + if (!send_shutdown) { return; + } /* * Let lockd know that it is no longer needed for any NFS mounts @@ -181,14 +183,15 @@ nfs_lockd_mount_unregister(struct nfsmount *nmp) kr = host_get_lockd_port(host_priv_self(), &lockd_port); if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(lockd_port)) { printf("nfs_lockd_mount_change: shutdown couldn't get port, kr %d, port %s\n", - kr, (lockd_port == IPC_PORT_NULL) ? "NULL" : - (lockd_port == IPC_PORT_DEAD) ? "DEAD" : "VALID"); + kr, (lockd_port == IPC_PORT_NULL) ? "NULL" : + (lockd_port == IPC_PORT_DEAD) ? "DEAD" : "VALID"); return; } kr = lockd_shutdown(lockd_port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { printf("nfs_lockd_mount_change: shutdown %d\n", kr); + } ipc_port_release_send(lockd_port); } @@ -247,10 +250,12 @@ nfs_lockdmsg_find_by_xid(uint64_t lockxid) LOCKD_MSG_REQUEST *mr; TAILQ_FOREACH(mr, &nfs_pendlockq, lmr_next) { - if (mr->lmr_msg.lm_xid == lockxid) + if (mr->lmr_msg.lm_xid == lockxid) { return mr; - if (mr->lmr_msg.lm_xid > lockxid) + } + if (mr->lmr_msg.lm_xid > lockxid) { return NULL; + } } return mr; } @@ -270,18 +275,24 @@ nfs_lockdmsg_find_by_xid(uint64_t lockxid) int nfs_lockdmsg_compare_to_answer(LOCKD_MSG_REQUEST *msgreq, struct lockd_ans *ansp) { - if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO)) + if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO)) { return 1; - if (msgreq->lmr_msg.lm_fl.l_pid != ansp->la_pid) + } + if (msgreq->lmr_msg.lm_fl.l_pid != ansp->la_pid) { return 1; - if (msgreq->lmr_msg.lm_fl.l_start != ansp->la_start) + } + if (msgreq->lmr_msg.lm_fl.l_start != ansp->la_start) { return 1; - if (msgreq->lmr_msg.lm_fl.l_len != ansp->la_len) + } + if (msgreq->lmr_msg.lm_fl.l_len != ansp->la_len) { return 1; - if (msgreq->lmr_msg.lm_fh_len != ansp->la_fh_len) + } + if (msgreq->lmr_msg.lm_fh_len != ansp->la_fh_len) { return 1; - if (bcmp(msgreq->lmr_msg.lm_fh, ansp->la_fh, ansp->la_fh_len)) + } + if (bcmp(msgreq->lmr_msg.lm_fh, ansp->la_fh, ansp->la_fh_len)) { return 1; + } return 0; } @@ -305,11 +316,13 @@ nfs_lockdmsg_find_by_answer(struct lockd_ans *ansp) { LOCKD_MSG_REQUEST *mr; - if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO)) + if (!(ansp->la_flags & LOCKD_ANS_LOCK_INFO)) { return NULL; + } TAILQ_FOREACH(mr, &nfs_pendlockq, lmr_next) { - if (!nfs_lockdmsg_compare_to_answer(mr, ansp)) + if (!nfs_lockdmsg_compare_to_answer(mr, ansp)) { break; + } } return mr; } @@ -339,10 +352,11 @@ nfs_lockxid_get(void) /* make sure we get a unique xid */ do { /* Skip zero xid if it should ever happen. */ - if (++nfs_lockxid == 0) + if (++nfs_lockxid == 0) { nfs_lockxid++; + } if (!(mr = TAILQ_LAST(&nfs_pendlockq, nfs_lock_msg_queue)) || - (mr->lmr_msg.lm_xid < nfs_lockxid)) { + (mr->lmr_msg.lm_xid < nfs_lockxid)) { /* fast path: empty queue or new largest xid */ break; } @@ -362,8 +376,9 @@ nfs_lockd_send_request(LOCKD_MSG *msg, int interruptable) mach_port_t lockd_port = IPC_PORT_NULL; kr = host_get_lockd_port(host_priv_self(), &lockd_port); - if (kr != KERN_SUCCESS || !IPC_PORT_VALID(lockd_port)) - return (ENOTSUP); + if (kr != KERN_SUCCESS || !IPC_PORT_VALID(lockd_port)) { + return ENOTSUP; + } do { /* In the kernel all mach messaging is interruptable */ @@ -382,22 +397,23 @@ nfs_lockd_send_request(LOCKD_MSG *msg, int interruptable) (uint32_t *)&msg->lm_cred, msg->lm_fh_len, msg->lm_fh); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { printf("lockd_request received %d!\n", kr); + } } while (!interruptable && kr == MACH_SEND_INTERRUPTED); } while (kr == MIG_SERVER_DIED && retries++ < MACH_MAX_TRIES); ipc_port_release_send(lockd_port); switch (kr) { - case MACH_SEND_INTERRUPTED: - return (EINTR); + case MACH_SEND_INTERRUPTED: + return EINTR; default: /* * Other MACH or MIG errors we will retry. Eventually - * we will call nfs_down and allow the user to disable + * we will call nfs_down and allow the user to disable * locking. */ - return (EAGAIN); + return EAGAIN; } } @@ -422,17 +438,20 @@ nfs3_lockd_request( struct sockaddr *saddr; nmp = NFSTONMP(np); - if (!nmp || !nmp->nm_saddr) - return (ENXIO); + if (!nmp || !nmp->nm_saddr) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); saddr = nmp->nm_saddr; bcopy(saddr, &msg->lm_addr, min(sizeof msg->lm_addr, saddr->sa_len)); - if (nmp->nm_vers == NFS_VER3) + if (nmp->nm_vers == NFS_VER3) { msg->lm_flags |= LOCKD_MSG_NFSV3; + } - if (nmp->nm_sotype != SOCK_DGRAM) + if (nmp->nm_sotype != SOCK_DGRAM) { msg->lm_flags |= LOCKD_MSG_TCP; + } microuptime(&now); starttime = now.tv_sec; @@ -455,8 +474,9 @@ nfs3_lockd_request( lck_mtx_unlock(nfs_lock_mutex); error = nfs_lockd_send_request(msg, interruptable); lck_mtx_lock(nfs_lock_mutex); - if (error && error != EAGAIN) + if (error && error != EAGAIN) { break; + } /* * Always wait for an answer. Not waiting for unlocks could @@ -512,16 +532,18 @@ wait_for_granted: } break; } - if (error != EWOULDBLOCK) + if (error != EWOULDBLOCK) { break; + } /* check that we still have our mount... */ /* ...and that we still support locks */ /* ...and that there isn't a recovery pending */ nmp = NFSTONMP(np); if ((error2 = nfs_sigintr(nmp, NULL, NULL, 0))) { error = error2; - if (type == F_UNLCK) + if (type == F_UNLCK) { printf("nfs3_lockd_request: aborting unlock request, error %d\n", error); + } break; } lck_mtx_lock(&nmp->nm_lock); @@ -545,16 +567,18 @@ wait_for_granted: if ((error2 = nfs_sigintr(nmp, NULL, NULL, 0))) { error = error2; if (error2 != EINTR) { - if (type == F_UNLCK) + if (type == F_UNLCK) { printf("nfs3_lockd_request: aborting unlock request, error %d\n", error); + } break; } } /* ...and that we still support locks */ lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) { - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { error = ENOTSUP; + } lck_mtx_unlock(&nmp->nm_lock); break; } @@ -609,8 +633,9 @@ wait_for_granted: lastmsg = now.tv_sec; nfs_down(nmp, thd, 0, NFSSTA_LOCKTIMEO, "lockd not responding", 1); wentdown = 1; - } else + } else { lck_mtx_unlock(&nmp->nm_lock); + } if (msgreq->lmr_errno == EINPROGRESS) { /* @@ -639,17 +664,19 @@ wait_for_granted: /* * We timed out, so we will resend the request. */ - if (!(flags & R_RECOVER)) + if (!(flags & R_RECOVER)) { timeo *= 2; - if (timeo > 30) + } + if (timeo > 30) { timeo = 30; + } /* resend request */ continue; } /* we got a reponse, so the server's lockd is OK */ nfs_up(NFSTONMP(np), thd, NFSSTA_LOCKTIMEO, - wentdown ? "lockd alive again" : NULL); + wentdown ? "lockd alive again" : NULL); wentdown = 0; if (msgreq->lmr_answered && (msg->lm_flags & LOCKD_MSG_DENIED_GRACE)) { @@ -714,15 +741,16 @@ wait_for_granted: nmp->nm_state &= ~NFSSTA_LOCKTIMEO; lck_mtx_unlock(&nmp->nm_lock); printf("lockd returned ENOTSUP, disabling locks for nfs server: %s\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname); - return (error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname); + return error; } if (!error) { /* record that NFS file locking has worked on this mount */ if (nmp) { lck_mtx_lock(&nmp->nm_lock); - if (!(nmp->nm_state & NFSSTA_LOCKSWORK)) + if (!(nmp->nm_state & NFSSTA_LOCKSWORK)) { nmp->nm_state |= NFSSTA_LOCKSWORK; + } lck_mtx_unlock(&nmp->nm_lock); } } @@ -733,7 +761,7 @@ wait_for_granted: lck_mtx_unlock(nfs_lock_mutex); - return (error); + return error; } /* @@ -756,24 +784,28 @@ nfs3_setlock_rpc( LOCKD_MSG *msg; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } if (!nlop->nlo_open_owner) { nfs_open_owner_ref(nofp->nof_owner); nlop->nlo_open_owner = nofp->nof_owner; } - if ((error = nfs_lock_owner_set_busy(nlop, thd))) - return (error); + if ((error = nfs_lock_owner_set_busy(nlop, thd))) { + return error; + } /* set up lock message request structure */ bzero(&msgreq, sizeof(msgreq)); msg = &msgreq.lmr_msg; msg->lm_version = LOCKD_MSG_VERSION; - if ((nflp->nfl_flags & NFS_FILE_LOCK_WAIT) && !reclaim) + if ((nflp->nfl_flags & NFS_FILE_LOCK_WAIT) && !reclaim) { msg->lm_flags |= LOCKD_MSG_BLOCK; - if (reclaim) + } + if (reclaim) { msg->lm_flags |= LOCKD_MSG_RECLAIM; + } msg->lm_fh_len = (nmp->nm_vers == NFS_VER2) ? NFSX_V2FH : np->n_fhsize; bcopy(np->n_fhp, msg->lm_fh, msg->lm_fh_len); cru2x(cred, &msg->lm_cred); @@ -787,7 +819,7 @@ nfs3_setlock_rpc( error = nfs3_lockd_request(np, 0, &msgreq, flags, thd); nfs_lock_owner_clear_busy(nlop); - return (error); + return error; } /* @@ -809,8 +841,9 @@ nfs3_unlock_rpc( LOCKD_MSG *msg; nmp = NFSTONMP(np); - if (!nmp) - return (ENXIO); + if (!nmp) { + return ENXIO; + } /* set up lock message request structure */ bzero(&msgreq, sizeof(msgreq)); @@ -826,7 +859,7 @@ nfs3_unlock_rpc( msg->lm_fl.l_type = F_UNLCK; msg->lm_fl.l_pid = nlop->nlo_pid; - return (nfs3_lockd_request(np, F_UNLCK, &msgreq, flags, thd)); + return nfs3_lockd_request(np, F_UNLCK, &msgreq, flags, thd); } /* @@ -847,8 +880,9 @@ nfs3_getlock_rpc( LOCKD_MSG *msg; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } /* set up lock message request structure */ bzero(&msgreq, sizeof(msgreq)); @@ -874,11 +908,12 @@ nfs3_getlock_rpc( fl->l_start = msg->lm_fl.l_start; fl->l_len = msg->lm_fl.l_len; fl->l_whence = SEEK_SET; - } else + } else { fl->l_type = F_UNLCK; + } } - return (error); + return error; } /* @@ -893,12 +928,14 @@ nfslockdans(proc_t p, struct lockd_ans *ansp) /* Let root make this call. */ error = proc_suser(p); - if (error) - return (error); + if (error) { + return error; + } /* the version should match, or we're out of sync */ - if (ansp->la_version != LOCKD_ANS_VERSION) - return (EINVAL); + if (ansp->la_version != LOCKD_ANS_VERSION) { + return EINVAL; + } lck_mtx_lock(nfs_lock_mutex); @@ -911,27 +948,30 @@ nfslockdans(proc_t p, struct lockd_ans *ansp) * If no message was found or it doesn't match the answer, * we look for the lockd message by the answer's lock info. */ - if (!msgreq || nfs_lockdmsg_compare_to_answer(msgreq, ansp)) + if (!msgreq || nfs_lockdmsg_compare_to_answer(msgreq, ansp)) { msgreq = nfs_lockdmsg_find_by_answer(ansp); + } /* * We need to make sure this request isn't being cancelled * If it is, we don't want to accept the granted message. */ - if (msgreq && (msgreq->lmr_msg.lm_flags & LOCKD_MSG_CANCEL)) + if (msgreq && (msgreq->lmr_msg.lm_flags & LOCKD_MSG_CANCEL)) { msgreq = NULL; + } } if (!msgreq) { lck_mtx_unlock(nfs_lock_mutex); - return (EPIPE); + return EPIPE; } msgreq->lmr_errno = ansp->la_errno; if ((msgreq->lmr_msg.lm_flags & LOCKD_MSG_TEST) && msgreq->lmr_errno == 0) { if (ansp->la_flags & LOCKD_ANS_LOCK_INFO) { - if (ansp->la_flags & LOCKD_ANS_LOCK_EXCL) + if (ansp->la_flags & LOCKD_ANS_LOCK_EXCL) { msgreq->lmr_msg.lm_fl.l_type = F_WRLCK; - else + } else { msgreq->lmr_msg.lm_fl.l_type = F_RDLCK; + } msgreq->lmr_msg.lm_fl.l_pid = ansp->la_pid; msgreq->lmr_msg.lm_fl.l_start = ansp->la_start; msgreq->lmr_msg.lm_fl.l_len = ansp->la_len; @@ -939,14 +979,15 @@ nfslockdans(proc_t p, struct lockd_ans *ansp) msgreq->lmr_msg.lm_fl.l_type = F_UNLCK; } } - if (ansp->la_flags & LOCKD_ANS_DENIED_GRACE) + if (ansp->la_flags & LOCKD_ANS_DENIED_GRACE) { msgreq->lmr_msg.lm_flags |= LOCKD_MSG_DENIED_GRACE; + } msgreq->lmr_answered = 1; lck_mtx_unlock(nfs_lock_mutex); wakeup(msgreq); - return (0); + return 0; } /* @@ -966,32 +1007,38 @@ nfslockdnotify(proc_t p, user_addr_t argp) /* Let root make this call. */ error = proc_suser(p); - if (error) - return (error); + if (error) { + return error; + } headsize = (char*)&ln.ln_addr[0] - (char*)&ln.ln_version; error = copyin(argp, &ln, headsize); - if (error) - return (error); - if (ln.ln_version != LOCKD_NOTIFY_VERSION) - return (EINVAL); - if ((ln.ln_addrcount < 1) || (ln.ln_addrcount > 128)) - return (EINVAL); + if (error) { + return error; + } + if (ln.ln_version != LOCKD_NOTIFY_VERSION) { + return EINVAL; + } + if ((ln.ln_addrcount < 1) || (ln.ln_addrcount > 128)) { + return EINVAL; + } argp += headsize; saddr = (struct sockaddr *)&ln.ln_addr[0]; lck_mtx_lock(nfs_lock_mutex); - for (i=0; i < ln.ln_addrcount; i++) { + for (i = 0; i < ln.ln_addrcount; i++) { error = copyin(argp, &ln.ln_addr[0], sizeof(ln.ln_addr[0])); - if (error) + if (error) { break; + } argp += sizeof(ln.ln_addr[0]); /* scan lockd mount list for match to this address */ TAILQ_FOREACH(nmp, &nfs_lockd_mount_list, nm_ldlink) { /* check if address matches this mount's server address */ - if (!nmp->nm_saddr || nfs_sockaddr_cmp(saddr, nmp->nm_saddr)) + if (!nmp->nm_saddr || nfs_sockaddr_cmp(saddr, nmp->nm_saddr)) { continue; + } /* We have a match! Mark it as needing recovery. */ lck_mtx_lock(&nmp->nm_lock); nfs_need_recover(nmp, 0); @@ -1001,6 +1048,5 @@ nfslockdnotify(proc_t p, user_addr_t argp) lck_mtx_unlock(nfs_lock_mutex); - return (error); + return error; } - diff --git a/bsd/nfs/nfs_lock.h b/bsd/nfs/nfs_lock.h index 5a5efe3e4..b360849e6 100644 --- a/bsd/nfs/nfs_lock.h +++ b/bsd/nfs/nfs_lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2002-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -73,34 +73,34 @@ /* * The structure that the kernel hands lockd for each lock request. */ -#define LOCKD_MSG_VERSION 3 +#define LOCKD_MSG_VERSION 3 typedef struct nfs_lock_msg { - int lm_version; /* LOCKD_MSG version */ - int lm_flags; /* request flags */ - u_int64_t lm_xid; /* unique message transaction ID */ - struct flock lm_fl; /* The lock request. */ - struct sockaddr_storage lm_addr; /* The address. */ - int lm_fh_len; /* The file handle length. */ - struct xucred lm_cred; /* user cred for lock req */ - u_int8_t lm_fh[NFSV3_MAX_FH_SIZE]; /* The file handle. */ + int lm_version; /* LOCKD_MSG version */ + int lm_flags; /* request flags */ + u_int64_t lm_xid; /* unique message transaction ID */ + struct flock lm_fl; /* The lock request. */ + struct sockaddr_storage lm_addr; /* The address. */ + int lm_fh_len; /* The file handle length. */ + struct xucred lm_cred; /* user cred for lock req */ + u_int8_t lm_fh[NFSV3_MAX_FH_SIZE]; /* The file handle. */ } LOCKD_MSG; /* lm_flags */ -#define LOCKD_MSG_BLOCK 0x0001 /* a blocking request */ -#define LOCKD_MSG_TEST 0x0002 /* just a lock test */ -#define LOCKD_MSG_NFSV3 0x0004 /* NFSv3 request */ -#define LOCKD_MSG_CANCEL 0x0008 /* cancelling blocked request */ -#define LOCKD_MSG_DENIED_GRACE 0x0010 /* lock denied due to grace period */ -#define LOCKD_MSG_RECLAIM 0x0020 /* lock reclaim request */ -#define LOCKD_MSG_TCP 0x0040 /* (try to) use TCP for request */ +#define LOCKD_MSG_BLOCK 0x0001 /* a blocking request */ +#define LOCKD_MSG_TEST 0x0002 /* just a lock test */ +#define LOCKD_MSG_NFSV3 0x0004 /* NFSv3 request */ +#define LOCKD_MSG_CANCEL 0x0008 /* cancelling blocked request */ +#define LOCKD_MSG_DENIED_GRACE 0x0010 /* lock denied due to grace period */ +#define LOCKD_MSG_RECLAIM 0x0020 /* lock reclaim request */ +#define LOCKD_MSG_TCP 0x0040 /* (try to) use TCP for request */ /* The structure used to maintain the pending request queue */ typedef struct nfs_lock_msg_request { - TAILQ_ENTRY(nfs_lock_msg_request) lmr_next; /* in-kernel pending request list */ - int lmr_answered; /* received an answer? */ - int lmr_errno; /* return status */ - int lmr_saved_errno; /* original return status */ - LOCKD_MSG lmr_msg; /* the message */ + TAILQ_ENTRY(nfs_lock_msg_request) lmr_next; /* in-kernel pending request list */ + int lmr_answered; /* received an answer? */ + int lmr_errno; /* return status */ + int lmr_saved_errno; /* original return status */ + LOCKD_MSG lmr_msg; /* the message */ } LOCKD_MSG_REQUEST; TAILQ_HEAD(nfs_lock_msg_queue, nfs_lock_msg_request); @@ -110,46 +110,46 @@ typedef struct nfs_lock_msg_queue LOCKD_MSG_QUEUE; /* * The structure that lockd hands the kernel for each lock answer. */ -#define LOCKD_ANS_VERSION 2 +#define LOCKD_ANS_VERSION 2 struct lockd_ans { - int la_version; /* lockd_ans version */ - int la_errno; /* return status */ - u_int64_t la_xid; /* unique message transaction ID */ - int la_flags; /* answer flags */ - pid_t la_pid; /* pid of lock requester/owner */ - off_t la_start; /* lock starting offset */ - off_t la_len; /* lock length */ - int la_fh_len; /* The file handle length. */ - u_int8_t la_fh[NFSV3_MAX_FH_SIZE];/* The file handle. */ + int la_version; /* lockd_ans version */ + int la_errno; /* return status */ + u_int64_t la_xid; /* unique message transaction ID */ + int la_flags; /* answer flags */ + pid_t la_pid; /* pid of lock requester/owner */ + off_t la_start; /* lock starting offset */ + off_t la_len; /* lock length */ + int la_fh_len; /* The file handle length. */ + u_int8_t la_fh[NFSV3_MAX_FH_SIZE];/* The file handle. */ }; /* la_flags */ -#define LOCKD_ANS_GRANTED 0x0001 /* NLM_GRANTED request */ -#define LOCKD_ANS_LOCK_INFO 0x0002 /* lock info valid */ -#define LOCKD_ANS_LOCK_EXCL 0x0004 /* lock is exclusive */ -#define LOCKD_ANS_DENIED_GRACE 0x0008 /* lock denied due to grace period */ +#define LOCKD_ANS_GRANTED 0x0001 /* NLM_GRANTED request */ +#define LOCKD_ANS_LOCK_INFO 0x0002 /* lock info valid */ +#define LOCKD_ANS_LOCK_EXCL 0x0004 /* lock is exclusive */ +#define LOCKD_ANS_DENIED_GRACE 0x0008 /* lock denied due to grace period */ /* * The structure that lockd hands the kernel for each notify. */ -#define LOCKD_NOTIFY_VERSION 1 +#define LOCKD_NOTIFY_VERSION 1 struct lockd_notify { - int ln_version; /* lockd_notify version */ - int ln_flags; /* notify flags */ - int ln_pad; /* (for alignment) */ - int ln_addrcount; /* # of addresss */ - struct sockaddr_storage ln_addr[1]; /* List of addresses. */ + int ln_version; /* lockd_notify version */ + int ln_flags; /* notify flags */ + int ln_pad; /* (for alignment) */ + int ln_addrcount; /* # of addresss */ + struct sockaddr_storage ln_addr[1]; /* List of addresses. */ }; #ifdef KERNEL -void nfs_lockinit(void); -void nfs_lockd_mount_register(struct nfsmount *); -void nfs_lockd_mount_unregister(struct nfsmount *); -int nfs3_lockd_request(nfsnode_t, int, LOCKD_MSG_REQUEST *, int, thread_t); -int nfslockdans(proc_t p, struct lockd_ans *ansp); -int nfslockdnotify(proc_t p, user_addr_t argp); +void nfs_lockinit(void); +void nfs_lockd_mount_register(struct nfsmount *); +void nfs_lockd_mount_unregister(struct nfsmount *); +int nfs3_lockd_request(nfsnode_t, int, LOCKD_MSG_REQUEST *, int, thread_t); +int nfslockdans(proc_t p, struct lockd_ans *ansp); +int nfslockdnotify(proc_t p, user_addr_t argp); #endif #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/nfs/nfs_node.c b/bsd/nfs/nfs_node.c index 437242919..c48c14954 100644 --- a/bsd/nfs/nfs_node.c +++ b/bsd/nfs/nfs_node.c @@ -86,9 +86,9 @@ #include #include -#define NFSNOHASH(fhsum) \ +#define NFSNOHASH(fhsum) \ (&nfsnodehashtbl[(fhsum) & nfsnodehash]) -static LIST_HEAD(nfsnodehashhead, nfsnode) *nfsnodehashtbl; +static LIST_HEAD(nfsnodehashhead, nfsnode) * nfsnodehashtbl; static u_long nfsnodehash; static lck_grp_t *nfs_node_hash_lck_grp; @@ -115,8 +115,9 @@ void nfs_nhinit_finish(void) { lck_mtx_lock(nfs_node_hash_mutex); - if (!nfsnodehashtbl) + if (!nfsnodehashtbl) { nfsnodehashtbl = hashinit(desiredvnodes, M_NFSNODE, &nfsnodehash); + } lck_mtx_unlock(nfs_node_hash_mutex); } @@ -130,12 +131,13 @@ nfs_hash(u_char *fhp, int fhsize) int i; fhsum = 0; - for (i = 0; i < fhsize; i++) + for (i = 0; i < fhsize; i++) { fhsum += *fhp++; - return (fhsum); + } + return fhsum; } - + int nfs_case_insensitive(mount_t); int @@ -144,39 +146,40 @@ nfs_case_insensitive(mount_t mp) struct nfsmount *nmp = VFSTONFS(mp); int answer = 0; int skip = 0; - + if (nfs_mount_gone(nmp)) { - return (0); + return 0; } - + if (nmp->nm_vers == NFS_VER2) { /* V2 has no way to know */ - return (0); + return 0; } lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_vers == NFS_VER3) { if (!(nmp->nm_state & NFSSTA_GOTPATHCONF)) { - /* We're holding the node lock so we just return + /* We're holding the node lock so we just return * with answer as case sensitive. Is very rare * for file systems not to be homogenous w.r.t. pathconf */ skip = 1; - } + } } else if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS)) { /* no pathconf info cached */ skip = 1; } - if (!skip && (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE)) + if (!skip && (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE)) { answer = 1; + } lck_mtx_unlock(&nmp->nm_lock); - return (answer); + return answer; } - + /* * Look up a vnode/nfsnode by file handle. * Callers must check for mount points!! @@ -211,7 +214,7 @@ nfs_nget( *npp = NULL; error = ENXIO; FSDBG_BOT(263, mp, dnp, 0xd1e, error); - return (error); + return error; } nfsvers = VFSTONFS(mp)->nm_vers; @@ -221,22 +224,25 @@ loop: for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) { mp2 = (np->n_hflag & NHINIT) ? np->n_mount : NFSTOMP(np); if (mp != mp2 || np->n_fhsize != fhsize || - bcmp(fhp, np->n_fhp, fhsize)) + bcmp(fhp, np->n_fhp, fhsize)) { continue; + } if (nvap && (nvap->nva_flags & NFS_FFLAG_TRIGGER_REFERRAL) && cnp && (cnp->cn_namelen > (fhsize - (int)sizeof(dnp)))) { /* The name was too long to fit in the file handle. Check it against the node's name. */ int namecmp = 0; const char *vname = vnode_getname(NFSTOV(np)); if (vname) { - if (cnp->cn_namelen != (int)strlen(vname)) + if (cnp->cn_namelen != (int)strlen(vname)) { namecmp = 1; - else + } else { namecmp = strncmp(vname, cnp->cn_nameptr, cnp->cn_namelen); + } vnode_putname(vname); } - if (namecmp) /* full name didn't match */ + if (namecmp) { /* full name didn't match */ continue; + } } FSDBG(263, dnp, np, np->n_flag, 0xcace0000); /* if the node is locked, sleep on it */ @@ -256,7 +262,7 @@ loop: * changed identity, no need to wait. */ FSDBG_BOT(263, dnp, *npp, 0xcace0d1e, error); - return (error); + return error; } if ((error = nfs_node_lock(np))) { /* this only fails if the node is now unhashed */ @@ -266,19 +272,21 @@ loop: if (flags & NG_NOCREATE) { *npp = 0; FSDBG_BOT(263, dnp, *npp, 0xcaced1e0, ENOENT); - return (ENOENT); + return ENOENT; } goto loop; } /* update attributes */ - if (nvap) + if (nvap) { error = nfs_loadattrcache(np, nvap, xidp, 0); + } if (error) { nfs_node_unlock(np); vnode_put(vp); } else { - if (dnp && cnp && (flags & NG_MAKEENTRY)) + if (dnp && cnp && (flags & NG_MAKEENTRY)) { cache_enter(NFSTOV(dnp), vp, cnp); + } /* * Update the vnode if the name/and or the parent has * changed. We need to do this so that if getattrlist is @@ -324,7 +332,7 @@ loop: * insensitive. * * Note that V2 does not know the case, so we just - * assume case sensitivity. + * assume case sensitivity. * * This is clearly not perfect due to races, but this is * as good as its going to get. You can defeat the @@ -346,15 +354,18 @@ loop: cmp = nfs_case_insensitive(mp) ? strncasecmp : strncmp; - if (vp->v_name && cnp->cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cnp->cn_namelen)) + if (vp->v_name && cnp->cn_namelen && (*cmp)(cnp->cn_nameptr, vp->v_name, cnp->cn_namelen)) { update_flags |= VNODE_UPDATE_NAME; - if ((vp->v_name == NULL && cnp->cn_namelen != 0) || (vp->v_name != NULL && cnp->cn_namelen == 0)) + } + if ((vp->v_name == NULL && cnp->cn_namelen != 0) || (vp->v_name != NULL && cnp->cn_namelen == 0)) { update_flags |= VNODE_UPDATE_NAME; - if (vnode_parent(vp) != NFSTOV(dnp)) + } + if (vnode_parent(vp) != NFSTOV(dnp)) { update_flags |= VNODE_UPDATE_PARENT; + } if (update_flags) { NFS_NODE_DBG("vnode_update_identity old name %s new name %.*s update flags = %x\n", - vp->v_name, cnp->cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags); + vp->v_name, cnp->cn_namelen, cnp->cn_nameptr ? cnp->cn_nameptr : "", update_flags); vnode_update_identity(vp, NFSTOV(dnp), cnp->cn_nameptr, cnp->cn_namelen, 0, update_flags); } } @@ -362,7 +373,7 @@ loop: *npp = np; } FSDBG_BOT(263, dnp, *npp, 0xcace0000, error); - return(error); + return error; } FSDBG(263, mp, dnp, npp, 0xaaaaaaaa); @@ -371,7 +382,7 @@ loop: lck_mtx_unlock(nfs_node_hash_mutex); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOENT); - return (ENOENT); + return ENOENT; } /* @@ -384,7 +395,7 @@ loop: lck_mtx_unlock(nfs_node_hash_mutex); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000001, ENOMEM); - return (ENOMEM); + return ENOMEM; } bzero(np, sizeof *np); np->n_hflag |= (NHINIT | NHLOCKED); @@ -400,17 +411,20 @@ loop: /* ugh... need to keep track of ".zfs" directories to workaround server bugs */ if ((nvap->nva_type == VDIR) && cnp && (cnp->cn_namelen == 4) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == 'z') && - (cnp->cn_nameptr[2] == 'f') && (cnp->cn_nameptr[3] == 's')) + (cnp->cn_nameptr[2] == 'f') && (cnp->cn_nameptr[3] == 's')) { np->n_flag |= NISDOTZFS; - if (dnp && (dnp->n_flag & NISDOTZFS)) + } + if (dnp && (dnp->n_flag & NISDOTZFS)) { np->n_flag |= NISDOTZFSCHILD; + } if (dnp && cnp && ((cnp->cn_namelen != 2) || (cnp->cn_nameptr[0] != '.') || (cnp->cn_nameptr[1] != '.'))) { vnode_t dvp = NFSTOV(dnp); if (!vnode_get(dvp)) { - if (!vnode_ref(dvp)) + if (!vnode_ref(dvp)) { np->n_parent = dvp; + } vnode_put(dvp); } } @@ -418,13 +432,13 @@ loop: /* setup node's file handle */ if (fhsize > NFS_SMALLFH) { MALLOC_ZONE(np->n_fhp, u_char *, - fhsize, M_NFSBIGFH, M_WAITOK); + fhsize, M_NFSBIGFH, M_WAITOK); if (!np->n_fhp) { lck_mtx_unlock(nfs_node_hash_mutex); FREE_ZONE(np, sizeof *np, M_NFSNODE); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000002, ENOMEM); - return (ENOMEM); + return ENOMEM; } } else { np->n_fhp = &np->n_fh[0]; @@ -455,7 +469,7 @@ loop: nfs_node_unlock(np); lck_mtx_lock(nfs_node_hash_mutex); LIST_REMOVE(np, n_hash); - np->n_hflag &= ~(NHHASHED|NHINIT|NHLOCKED); + np->n_hflag &= ~(NHHASHED | NHINIT | NHLOCKED); if (np->n_hflag & NHLOCKWANT) { np->n_hflag &= ~NHLOCKWANT; wakeup(np); @@ -471,16 +485,18 @@ loop: lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp); lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp); lck_mtx_destroy(&np->n_openlock, nfs_open_grp); - if (np->n_fhsize > NFS_SMALLFH) + if (np->n_fhsize > NFS_SMALLFH) { FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH); + } FREE_ZONE(np, sizeof *np, M_NFSNODE); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000003, error); - return (error); + return error; } NFS_CHANGED_UPDATE(nfsvers, np, nvap); - if (nvap->nva_type == VDIR) + if (nvap->nva_type == VDIR) { NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap); + } /* now, attempt to get a new vnode */ vfsp.vnfs_mp = mp; @@ -490,24 +506,26 @@ loop: vfsp.vnfs_fsnode = np; if (nfsvers == NFS_VER4) { #if FIFO - if (nvap->nva_type == VFIFO) + if (nvap->nva_type == VFIFO) { vfsp.vnfs_vops = fifo_nfsv4nodeop_p; - else + } else #endif /* FIFO */ - if (nvap->nva_type == VBLK || nvap->nva_type == VCHR) + if (nvap->nva_type == VBLK || nvap->nva_type == VCHR) { vfsp.vnfs_vops = spec_nfsv4nodeop_p; - else + } else { vfsp.vnfs_vops = nfsv4_vnodeop_p; + } } else { #if FIFO - if (nvap->nva_type == VFIFO) + if (nvap->nva_type == VFIFO) { vfsp.vnfs_vops = fifo_nfsv2nodeop_p; - else + } else #endif /* FIFO */ - if (nvap->nva_type == VBLK || nvap->nva_type == VCHR) + if (nvap->nva_type == VBLK || nvap->nva_type == VCHR) { vfsp.vnfs_vops = spec_nfsv2nodeop_p; - else + } else { vfsp.vnfs_vops = nfsv2_vnodeop_p; + } } vfsp.vnfs_markroot = (flags & NG_MARKROOT) ? 1 : 0; vfsp.vnfs_marksystem = 0; @@ -515,8 +533,9 @@ loop: vfsp.vnfs_filesize = nvap->nva_size; vfsp.vnfs_cnp = cnp; vfsp.vnfs_flags = VNFS_ADDFSREF; - if (!dnp || !cnp || !(flags & NG_MAKEENTRY)) + if (!dnp || !cnp || !(flags & NG_MAKEENTRY)) { vfsp.vnfs_flags |= VNFS_NOCACHE; + } #if CONFIG_TRIGGERS if ((nfsvers >= NFS_VER4) && (nvap->nva_type == VDIR) && (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) { @@ -538,7 +557,7 @@ loop: nfs_node_unlock(np); lck_mtx_lock(nfs_node_hash_mutex); LIST_REMOVE(np, n_hash); - np->n_hflag &= ~(NHHASHED|NHINIT|NHLOCKED); + np->n_hflag &= ~(NHHASHED | NHINIT | NHLOCKED); if (np->n_hflag & NHLOCKWANT) { np->n_hflag &= ~NHLOCKWANT; wakeup(np); @@ -554,12 +573,13 @@ loop: lck_mtx_destroy(&np->n_lock, nfs_node_lck_grp); lck_rw_destroy(&np->n_datalock, nfs_data_lck_grp); lck_mtx_destroy(&np->n_openlock, nfs_open_grp); - if (np->n_fhsize > NFS_SMALLFH) + if (np->n_fhsize > NFS_SMALLFH) { FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH); + } FREE_ZONE(np, sizeof *np, M_NFSNODE); *npp = 0; FSDBG_BOT(263, dnp, *npp, 0x80000004, error); - return (error); + return error; } vp = np->n_vnode; vnode_settag(vp, VT_NFS); @@ -567,7 +587,7 @@ loop: /* check if anyone's waiting on this node */ lck_mtx_lock(nfs_node_hash_mutex); - np->n_hflag &= ~(NHINIT|NHLOCKED); + np->n_hflag &= ~(NHINIT | NHLOCKED); if (np->n_hflag & NHLOCKWANT) { np->n_hflag &= ~NHLOCKWANT; wakeup(np); @@ -577,17 +597,17 @@ loop: *npp = np; FSDBG_BOT(263, dnp, vp, *npp, error); - return (error); + return error; } int nfs_vnop_inactive( struct vnop_inactive_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; vfs_context_t ctx = ap->a_context; @@ -600,12 +620,14 @@ nfs_vnop_inactive( struct nfsmount *nmp; mount_t mp; - if (vp == NULL) + if (vp == NULL) { panic("nfs_vnop_inactive: vp == NULL"); + } np = VTONFS(vp); - if (np == NULL) + if (np == NULL) { panic("nfs_vnop_inactive: np == NULL"); - + } + nmp = NFSTONMP(np); mp = vnode_mount(vp); @@ -626,7 +648,7 @@ restart: */ #ifdef NFS_NODE_DEBUG NP(np, "nfs_vnop_inactive: still open: %d", np->n_openrefcnt); -#endif +#endif lck_mtx_unlock(&np->n_openlock); return 0; } @@ -634,8 +656,9 @@ restart: TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { lck_mtx_lock(&nofp->nof_lock); if (nofp->nof_flags & NFS_OPEN_FILE_BUSY) { - if (!force) + if (!force) { NP(np, "nfs_vnop_inactive: open file busy"); + } busied = 0; } else { nofp->nof_flags |= NFS_OPEN_FILE_BUSY; @@ -643,8 +666,9 @@ restart: } lck_mtx_unlock(&nofp->nof_lock); if ((np->n_flag & NREVOKE) || (nofp->nof_flags & NFS_OPEN_FILE_LOST)) { - if (busied) + if (busied) { nfs_open_file_clear_busy(nofp); + } continue; } /* @@ -656,12 +680,15 @@ restart: if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && nofp->nof_creator && !force) { if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { lck_mtx_unlock(&np->n_openlock); - if (busied) + if (busied) { nfs_open_file_clear_busy(nofp); - if (inuse) + } + if (inuse) { nfs_mount_state_in_use_end(nmp, 0); - if (!nfs4_reopen(nofp, NULL)) + } + if (!nfs4_reopen(nofp, NULL)) { goto restart; + } } nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE; lck_mtx_unlock(&np->n_openlock); @@ -670,10 +697,12 @@ restart: NP(np, "nfs_vnop_inactive: create close error: %d", error); nofp->nof_flags |= NFS_OPEN_FILE_CREATE; } - if (busied) + if (busied) { nfs_open_file_clear_busy(nofp); - if (inuse) + } + if (inuse) { nfs_mount_state_in_use_end(nmp, error); + } goto restart; } if (nofp->nof_flags & NFS_OPEN_FILE_NEEDCLOSE) { @@ -690,27 +719,33 @@ restart: } else if (!force) { lck_mtx_unlock(&np->n_openlock); if (nofp->nof_flags & NFS_OPEN_FILE_REOPEN) { - if (busied) + if (busied) { nfs_open_file_clear_busy(nofp); - if (inuse) + } + if (inuse) { nfs_mount_state_in_use_end(nmp, 0); - if (!nfs4_reopen(nofp, NULL)) + } + if (!nfs4_reopen(nofp, NULL)) { goto restart; + } } error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_READ, NFS_OPEN_SHARE_DENY_NONE, ctx); if (error) { NP(np, "nfs_vnop_inactive: need close error: %d", error); nofp->nof_flags |= NFS_OPEN_FILE_NEEDCLOSE; } - if (busied) + if (busied) { nfs_open_file_clear_busy(nofp); - if (inuse) + } + if (inuse) { nfs_mount_state_in_use_end(nmp, error); + } goto restart; } } - if (nofp->nof_opencnt && !force) + if (nofp->nof_opencnt && !force) { NP(np, "nfs_vnop_inactive: file still open: %d", nofp->nof_opencnt); + } if (!force && (nofp->nof_access || nofp->nof_deny || nofp->nof_mmap_access || nofp->nof_mmap_deny || nofp->nof_r || nofp->nof_w || nofp->nof_rw || @@ -720,30 +755,32 @@ restart: nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw || nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) { NP(np, "nfs_vnop_inactive: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u", - nofp->nof_access, nofp->nof_deny, - nofp->nof_mmap_access, nofp->nof_mmap_deny, - nofp->nof_r, nofp->nof_d_r, - nofp->nof_w, nofp->nof_d_w, - nofp->nof_rw, nofp->nof_d_rw, - nofp->nof_r_dw, nofp->nof_d_r_dw, - nofp->nof_w_dw, nofp->nof_d_w_dw, - nofp->nof_rw_dw, nofp->nof_d_rw_dw, - nofp->nof_r_drw, nofp->nof_d_r_drw, - nofp->nof_w_drw, nofp->nof_d_w_drw, - nofp->nof_rw_drw, nofp->nof_d_rw_drw); + nofp->nof_access, nofp->nof_deny, + nofp->nof_mmap_access, nofp->nof_mmap_deny, + nofp->nof_r, nofp->nof_d_r, + nofp->nof_w, nofp->nof_d_w, + nofp->nof_rw, nofp->nof_d_rw, + nofp->nof_r_dw, nofp->nof_d_r_dw, + nofp->nof_w_dw, nofp->nof_d_w_dw, + nofp->nof_rw_dw, nofp->nof_d_rw_dw, + nofp->nof_r_drw, nofp->nof_d_r_drw, + nofp->nof_w_drw, nofp->nof_d_w_drw, + nofp->nof_rw_drw, nofp->nof_d_rw_drw); } - if (busied) + if (busied) { nfs_open_file_clear_busy(nofp); + } } lck_mtx_unlock(&np->n_openlock); - if (inuse && nfs_mount_state_in_use_end(nmp, error)) + if (inuse && nfs_mount_state_in_use_end(nmp, error)) { goto restart; + } nfs_node_lock_force(np); if (vnode_vtype(vp) != VDIR) { - nsp = np->n_sillyrename; + nsp = np->n_sillyrename; np->n_sillyrename = NULL; } else { nsp = NULL; @@ -757,7 +794,7 @@ restart: np->n_flag &= (NMODIFIED); nfs_node_unlock(np); FSDBG_BOT(264, vp, np, np->n_flag, 0); - return (0); + return 0; } nfs_node_unlock(np); @@ -806,8 +843,9 @@ restart: np->n_flag &= (NMODIFIED); nfs_node_unlock(np); - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy2(nsp->nsr_dnp, np); + } if (unhash && vnode_isinuse(vp, 0)) { /* vnode now inuse after silly remove? */ @@ -838,13 +876,14 @@ restart: lck_mtx_unlock(nfs_node_hash_mutex); /* cleanup sillyrename info */ - if (nsp->nsr_cred != NOCRED) + if (nsp->nsr_cred != NOCRED) { kauth_cred_unref(&nsp->nsr_cred); + } vnode_rele(NFSTOV(nsp->nsr_dnp)); FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ); FSDBG_BOT(264, vp, np, np->n_flag, 0); - return (0); + return 0; } /* @@ -853,10 +892,10 @@ restart: int nfs_vnop_reclaim( struct vnop_reclaim_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; nfsnode_t np = VTONFS(vp); @@ -898,7 +937,7 @@ nfs_vnop_reclaim( /* try to return the delegation */ np->n_openflags &= ~N_DELEG_MASK; nfs4_delegreturn_rpc(nmp, np->n_fhp, np->n_fhsize, &np->n_dstateid, - R_RECOVER, vfs_context_thread(ctx), vfs_context_ucred(ctx)); + R_RECOVER, vfs_context_thread(ctx), vfs_context_ucred(ctx)); } if (np->n_attrdirfh) { FREE(np->n_attrdirfh, M_TEMP); @@ -910,13 +949,14 @@ nfs_vnop_reclaim( TAILQ_FOREACH_SAFE(nflp, &np->n_locks, nfl_link, nextnflp) { if (!(nflp->nfl_flags & NFS_FILE_LOCK_DEAD) && !force) { NP(np, "nfs_vnop_reclaim: lock 0x%llx 0x%llx 0x%x (bc %d)", - nflp->nfl_start, nflp->nfl_end, nflp->nfl_flags, nflp->nfl_blockcnt); + nflp->nfl_start, nflp->nfl_end, nflp->nfl_flags, nflp->nfl_blockcnt); } - if (!(nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED|NFS_FILE_LOCK_DEAD))) { + if (!(nflp->nfl_flags & (NFS_FILE_LOCK_BLOCKED | NFS_FILE_LOCK_DEAD))) { /* try sending an unlock RPC if it wasn't delegated */ - if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED) && !force) + if (!(nflp->nfl_flags & NFS_FILE_LOCK_DELEGATED) && !force) { nmp->nm_funcs->nf_unlock_rpc(np, nflp->nfl_owner, F_WRLCK, nflp->nfl_start, nflp->nfl_end, R_RECOVER, - NULL, nflp->nfl_owner->nlo_open_owner->noo_cred); + NULL, nflp->nfl_owner->nlo_open_owner->noo_cred); + } lck_mtx_lock(&nflp->nfl_owner->nlo_lock); TAILQ_REMOVE(&nflp->nfl_owner->nlo_locks, nflp, nfl_lolink); lck_mtx_unlock(&nflp->nfl_owner->nlo_lock); @@ -926,20 +966,24 @@ nfs_vnop_reclaim( } /* clean up lock owners */ TAILQ_FOREACH_SAFE(nlop, &np->n_lock_owners, nlo_link, nextnlop) { - if (!TAILQ_EMPTY(&nlop->nlo_locks) && !force) + if (!TAILQ_EMPTY(&nlop->nlo_locks) && !force) { NP(np, "nfs_vnop_reclaim: lock owner with locks"); + } TAILQ_REMOVE(&np->n_lock_owners, nlop, nlo_link); nfs_lock_owner_destroy(nlop); } /* clean up open state */ - if (np->n_openrefcnt && !force) + if (np->n_openrefcnt && !force) { NP(np, "nfs_vnop_reclaim: still open: %d", np->n_openrefcnt); + } TAILQ_FOREACH_SAFE(nofp, &np->n_opens, nof_link, nextnofp) { - if (nofp->nof_flags & NFS_OPEN_FILE_BUSY) + if (nofp->nof_flags & NFS_OPEN_FILE_BUSY) { NP(np, "nfs_vnop_reclaim: open file busy"); + } if (!(np->n_flag & NREVOKE) && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) { - if (nofp->nof_opencnt && !force) + if (nofp->nof_opencnt && !force) { NP(np, "nfs_vnop_reclaim: file still open: %d", nofp->nof_opencnt); + } if (!force && (nofp->nof_access || nofp->nof_deny || nofp->nof_mmap_access || nofp->nof_mmap_deny || nofp->nof_r || nofp->nof_w || nofp->nof_rw || @@ -949,22 +993,23 @@ nfs_vnop_reclaim( nofp->nof_d_r_dw || nofp->nof_d_w_dw || nofp->nof_d_rw_dw || nofp->nof_d_r_drw || nofp->nof_d_w_drw || nofp->nof_d_rw_drw)) { NP(np, "nfs_vnop_reclaim: non-zero access: %d %d %d %d # %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u", - nofp->nof_access, nofp->nof_deny, - nofp->nof_mmap_access, nofp->nof_mmap_deny, - nofp->nof_r, nofp->nof_d_r, - nofp->nof_w, nofp->nof_d_w, - nofp->nof_rw, nofp->nof_d_rw, - nofp->nof_r_dw, nofp->nof_d_r_dw, - nofp->nof_w_dw, nofp->nof_d_w_dw, - nofp->nof_rw_dw, nofp->nof_d_rw_dw, - nofp->nof_r_drw, nofp->nof_d_r_drw, - nofp->nof_w_drw, nofp->nof_d_w_drw, - nofp->nof_rw_drw, nofp->nof_d_rw_drw); + nofp->nof_access, nofp->nof_deny, + nofp->nof_mmap_access, nofp->nof_mmap_deny, + nofp->nof_r, nofp->nof_d_r, + nofp->nof_w, nofp->nof_d_w, + nofp->nof_rw, nofp->nof_d_rw, + nofp->nof_r_dw, nofp->nof_d_r_dw, + nofp->nof_w_dw, nofp->nof_d_w_dw, + nofp->nof_rw_dw, nofp->nof_d_rw_dw, + nofp->nof_r_drw, nofp->nof_d_r_drw, + nofp->nof_w_drw, nofp->nof_d_w_drw, + nofp->nof_rw_drw, nofp->nof_d_rw_drw); /* try sending a close RPC if it wasn't delegated */ if (nofp->nof_r || nofp->nof_w || nofp->nof_rw || nofp->nof_r_dw || nofp->nof_w_dw || nofp->nof_rw_dw || - nofp->nof_r_drw || nofp->nof_w_drw || nofp->nof_rw_drw) + nofp->nof_r_drw || nofp->nof_w_drw || nofp->nof_rw_drw) { nfs4_close_rpc(np, nofp, NULL, nofp->nof_owner->noo_cred, R_RECOVER); + } } } TAILQ_REMOVE(&np->n_opens, nofp, nof_link); @@ -979,7 +1024,7 @@ nfs_vnop_reclaim( while (np->n_mflag & NMMONSCANINPROG) { struct timespec ts = { 1, 0 }; np->n_mflag |= NMMONSCANWANT; - msleep(&np->n_mflag, &nmp->nm_lock, PZERO-1, "nfswaitmonscan", &ts); + msleep(&np->n_mflag, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts); } if (np->n_monlink.le_next != NFSNOLIST) { LIST_REMOVE(np, n_monlink); @@ -989,18 +1034,21 @@ nfs_vnop_reclaim( } lck_mtx_lock(nfs_buf_mutex); - if (!force && (!LIST_EMPTY(&np->n_dirtyblkhd) || !LIST_EMPTY(&np->n_cleanblkhd))) + if (!force && (!LIST_EMPTY(&np->n_dirtyblkhd) || !LIST_EMPTY(&np->n_cleanblkhd))) { NP(np, "nfs_reclaim: dropping %s buffers", (!LIST_EMPTY(&np->n_dirtyblkhd) ? "dirty" : "clean")); + } lck_mtx_unlock(nfs_buf_mutex); nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ap->a_context, 0); lck_mtx_lock(nfs_node_hash_mutex); if ((vnode_vtype(vp) != VDIR) && np->n_sillyrename) { - if (!force) + if (!force) { NP(np, "nfs_reclaim: leaving unlinked file %s", np->n_sillyrename->nsr_name); - if (np->n_sillyrename->nsr_cred != NOCRED) + } + if (np->n_sillyrename->nsr_cred != NOCRED) { kauth_cred_unref(&np->n_sillyrename->nsr_cred); + } vnode_rele(NFSTOV(np->n_sillyrename->nsr_dnp)); FREE_ZONE(np->n_sillyrename, sizeof(*np->n_sillyrename), M_NFSREQ); } @@ -1019,12 +1067,15 @@ nfs_vnop_reclaim( * structures that might be associated with this nfs node. */ nfs_node_lock_force(np); - if ((vnode_vtype(vp) == VDIR) && np->n_cookiecache) + if ((vnode_vtype(vp) == VDIR) && np->n_cookiecache) { FREE_ZONE(np->n_cookiecache, sizeof(struct nfsdmap), M_NFSDIROFF); - if (np->n_fhsize > NFS_SMALLFH) + } + if (np->n_fhsize > NFS_SMALLFH) { FREE_ZONE(np->n_fhp, np->n_fhsize, M_NFSBIGFH); - if (np->n_vattr.nva_acl) + } + if (np->n_vattr.nva_acl) { kauth_acl_free(np->n_vattr.nva_acl); + } nfs_node_unlock(np); vnode_clearfsnode(vp); @@ -1042,7 +1093,7 @@ nfs_vnop_reclaim( FSDBG_BOT(265, vp, np, np->n_flag, 0xd1ed1e); FREE_ZONE(np, sizeof(struct nfsnode), M_NFSNODE); - return (0); + return 0; } /* @@ -1057,10 +1108,10 @@ nfs_node_lock_internal(nfsnode_t np, int force) if (!force && !(np->n_hflag && NHHASHED)) { FSDBG_BOT(268, np, 0xdead, 0, 0); lck_mtx_unlock(&np->n_lock); - return (ENOENT); + return ENOENT; } FSDBG_BOT(268, np, force, 0, 0); - return (0); + return 0; } int @@ -1099,21 +1150,25 @@ nfs_node_lock2(nfsnode_t np1, nfsnode_t np2) first = (np1 > np2) ? np1 : np2; second = (np1 > np2) ? np2 : np1; - if ((error = nfs_node_lock(first))) - return (error); - if (np1 == np2) - return (error); - if ((error = nfs_node_lock(second))) + if ((error = nfs_node_lock(first))) { + return error; + } + if (np1 == np2) { + return error; + } + if ((error = nfs_node_lock(second))) { nfs_node_unlock(first); - return (error); + } + return error; } void nfs_node_unlock2(nfsnode_t np1, nfsnode_t np2) { nfs_node_unlock(np1); - if (np1 != np2) + if (np1 != np2) { nfs_node_unlock(np2); + } } /* @@ -1126,18 +1181,21 @@ nfs_node_set_busy(nfsnode_t np, thread_t thd) struct timespec ts = { 2, 0 }; int error; - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } while (ISSET(np->n_flag, NBUSY)) { SET(np->n_flag, NBUSYWANT); - msleep(np, &np->n_lock, PZERO-1, "nfsbusywant", &ts); - if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0))) + msleep(np, &np->n_lock, PZERO - 1, "nfsbusywant", &ts); + if ((error = nfs_sigintr(NFSTONMP(np), NULL, thd, 0))) { break; + } } - if (!error) + if (!error) { SET(np->n_flag, NBUSY); + } nfs_node_unlock(np); - return (error); + return error; } void @@ -1147,10 +1205,11 @@ nfs_node_clear_busy(nfsnode_t np) nfs_node_lock_force(np); wanted = ISSET(np->n_flag, NBUSYWANT); - CLR(np->n_flag, NBUSY|NBUSYWANT); + CLR(np->n_flag, NBUSY | NBUSYWANT); nfs_node_unlock(np); - if (wanted) + if (wanted) { wakeup(np); + } } int @@ -1161,21 +1220,25 @@ nfs_node_set_busy2(nfsnode_t np1, nfsnode_t np2, thread_t thd) first = (np1 > np2) ? np1 : np2; second = (np1 > np2) ? np2 : np1; - if ((error = nfs_node_set_busy(first, thd))) - return (error); - if (np1 == np2) - return (error); - if ((error = nfs_node_set_busy(second, thd))) + if ((error = nfs_node_set_busy(first, thd))) { + return error; + } + if (np1 == np2) { + return error; + } + if ((error = nfs_node_set_busy(second, thd))) { nfs_node_clear_busy(first); - return (error); + } + return error; } void nfs_node_clear_busy2(nfsnode_t np1, nfsnode_t np2) { nfs_node_clear_busy(np1); - if (np1 != np2) + if (np1 != np2) { nfs_node_clear_busy(np2); + } } /* helper function to sort four nodes in reverse address order (no dupes) */ @@ -1191,17 +1254,20 @@ nfs_node_sort4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4, nfsno nb[0] = (np3 > np4) ? np3 : np4; nb[1] = (np3 > np4) ? np4 : np3; for (a = b = i = lcnt = 0; i < 4; i++) { - if (a >= 2) + if (a >= 2) { list[lcnt] = nb[b++]; - else if ((b >= 2) || (na[a] >= nb[b])) + } else if ((b >= 2) || (na[a] >= nb[b])) { list[lcnt] = na[a++]; - else + } else { list[lcnt] = nb[b++]; - if ((lcnt <= 0) || (list[lcnt] != list[lcnt-1])) + } + if ((lcnt <= 0) || (list[lcnt] != list[lcnt - 1])) { lcnt++; /* omit dups */ + } } - if (list[lcnt-1] == NULL) + if (list[lcnt - 1] == NULL) { lcnt--; + } *lcntp = lcnt; } @@ -1214,14 +1280,16 @@ nfs_node_set_busy4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4, t nfs_node_sort4(np1, np2, np3, np4, list, &lcnt); /* Now we can lock using list[0 - lcnt-1] */ - for (i = 0; i < lcnt; ++i) + for (i = 0; i < lcnt; ++i) { if ((error = nfs_node_set_busy(list[i], thd))) { /* Drop any locks we acquired. */ - while (--i >= 0) + while (--i >= 0) { nfs_node_clear_busy(list[i]); - return (error); + } + return error; } - return (0); + } + return 0; } void @@ -1231,8 +1299,9 @@ nfs_node_clear_busy4(nfsnode_t np1, nfsnode_t np2, nfsnode_t np3, nfsnode_t np4) int lcnt; nfs_node_sort4(np1, np2, np3, np4, list, &lcnt); - while (--lcnt >= 0) + while (--lcnt >= 0) { nfs_node_clear_busy(list[lcnt]); + } } /* @@ -1253,14 +1322,16 @@ nfs_data_lock_internal(nfsnode_t np, int locktype, int updatesize) { FSDBG_TOP(270, np, locktype, np->n_datalockowner, 0); if (locktype == NFS_DATA_LOCK_SHARED) { - if (updatesize && ISSET(np->n_flag, NUPDATESIZE)) + if (updatesize && ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 0); + } lck_rw_lock_shared(&np->n_datalock); } else { lck_rw_lock_exclusive(&np->n_datalock); np->n_datalockowner = current_thread(); - if (updatesize && ISSET(np->n_flag, NUPDATESIZE)) + if (updatesize && ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 1); + } } FSDBG_BOT(270, np, locktype, np->n_datalockowner, 0); } @@ -1283,12 +1354,14 @@ nfs_data_unlock_internal(nfsnode_t np, int updatesize) { int mine = (np->n_datalockowner == current_thread()); FSDBG_TOP(271, np, np->n_datalockowner, current_thread(), 0); - if (updatesize && mine && ISSET(np->n_flag, NUPDATESIZE)) + if (updatesize && mine && ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 1); + } np->n_datalockowner = NULL; lck_rw_done(&np->n_datalock); - if (updatesize && !mine && ISSET(np->n_flag, NUPDATESIZE)) + if (updatesize && !mine && ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 0); + } FSDBG_BOT(271, np, np->n_datalockowner, current_thread(), 0); } @@ -1311,8 +1384,9 @@ nfs_data_update_size(nfsnode_t np, int datalocked) } error = nfs_node_lock(np); if (error || !ISSET(np->n_flag, NUPDATESIZE)) { - if (!error) + if (!error) { nfs_node_unlock(np); + } FSDBG_BOT(272, np, np->n_flag, np->n_size, np->n_newsize); return; } @@ -1332,7 +1406,7 @@ nfs_mount_is_dirty(mount_t mp) { u_long i; nfsnode_t np; -#ifdef DODEBUG +#ifdef DODEBUG struct timeval now, then, diff; u_long ncnt = 0; microuptime(&now); @@ -1342,9 +1416,10 @@ nfs_mount_is_dirty(mount_t mp) LIST_FOREACH(np, &nfsnodehashtbl[i], n_hash) { #ifdef DODEBUG ncnt++; -#endif - if (np->n_mount == mp && !LIST_EMPTY(&np->n_dirtyblkhd)) +#endif + if (np->n_mount == mp && !LIST_EMPTY(&np->n_dirtyblkhd)) { goto out; + } } } out: @@ -1352,10 +1427,10 @@ out: #ifdef DODEBUG microuptime(&then); timersub(&then, &now, &diff); - + NFS_DBG(NFS_FAC_SOCK, 7, "mount_is_dirty for %s took %lld mics for %ld slots and %ld nodes return %d\n", - vfs_statfs(mp)->f_mntfromname, (uint64_t)diff.tv_sec * 1000000LL + diff.tv_usec, i, ncnt, (i <= nfsnodehash)); + vfs_statfs(mp)->f_mntfromname, (uint64_t)diff.tv_sec * 1000000LL + diff.tv_usec, i, ncnt, (i <= nfsnodehash)); #endif - return (i <= nfsnodehash); + return i <= nfsnodehash; } diff --git a/bsd/nfs/nfs_serv.c b/bsd/nfs/nfs_serv.c index 06c11bb72..b5cf7e407 100644 --- a/bsd/nfs/nfs_serv.c +++ b/bsd/nfs/nfs_serv.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -122,7 +122,7 @@ struct nfsd_head nfsd_head, nfsd_queue; lck_grp_t *nfsrv_slp_rwlock_group; lck_grp_t *nfsrv_slp_mutex_group; struct nfsrv_sockhead nfsrv_socklist, nfsrv_sockwg, - nfsrv_sockwait, nfsrv_sockwork; + nfsrv_sockwait, nfsrv_sockwork; struct nfsrv_sock *nfsrv_udpsock = NULL; struct nfsrv_sock *nfsrv_udp6sock = NULL; @@ -146,10 +146,10 @@ int nfsrv_fsevents_enabled = 1; /* NFS server timers */ #if CONFIG_FSE -thread_call_t nfsrv_fmod_timer_call; +thread_call_t nfsrv_fmod_timer_call; #endif -thread_call_t nfsrv_idlesock_timer_call; -thread_call_t nfsrv_wg_timer_call; +thread_call_t nfsrv_idlesock_timer_call; +thread_call_t nfsrv_wg_timer_call; int nfsrv_wg_timer_on; /* globals for the active user list */ @@ -164,7 +164,7 @@ int nfsrv_wg_delay_v3 = 0; int nfsrv_async = 0; -int nfsrv_authorize(vnode_t,vnode_t,kauth_action_t,vfs_context_t,struct nfs_export_options*,int); +int nfsrv_authorize(vnode_t, vnode_t, kauth_action_t, vfs_context_t, struct nfs_export_options*, int); int nfsrv_wg_coalesce(struct nfsrv_descript *, struct nfsrv_descript *); void nfsrv_modified(vnode_t, vfs_context_t); @@ -175,15 +175,15 @@ extern int safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, * Initialize the data structures for the server. */ -#define NFSRV_NOT_INITIALIZED 0 -#define NFSRV_INITIALIZING 1 -#define NFSRV_INITIALIZED 2 +#define NFSRV_NOT_INITIALIZED 0 +#define NFSRV_INITIALIZING 1 +#define NFSRV_INITIALIZED 2 static volatile UInt32 nfsrv_initted = NFSRV_NOT_INITIALIZED; int nfsrv_is_initialized(void) { - return (nfsrv_initted == NFSRV_INITIALIZED); + return nfsrv_initted == NFSRV_INITIALIZED; } void @@ -192,13 +192,15 @@ nfsrv_init(void) /* make sure we init only once */ if (!OSCompareAndSwap(NFSRV_NOT_INITIALIZED, NFSRV_INITIALIZING, &nfsrv_initted)) { /* wait until initialization is complete */ - while (!nfsrv_is_initialized()) + while (!nfsrv_is_initialized()) { IOSleep(500); + } return; } - if (sizeof (struct nfsrv_sock) > NFS_SVCALLOC) - printf("struct nfsrv_sock bloated (> %dbytes)\n",NFS_SVCALLOC); + if (sizeof(struct nfsrv_sock) > NFS_SVCALLOC) { + printf("struct nfsrv_sock bloated (> %dbytes)\n", NFS_SVCALLOC); + } /* init nfsd mutex */ nfsd_lck_grp = lck_grp_alloc_init("nfsd", LCK_GRP_ATTR_NULL); @@ -246,7 +248,7 @@ nfsrv_init(void) /* Setup the up-call handling */ nfsrv_uc_init(); - + /* initialization complete */ nfsrv_initted = NFSRV_INITIALIZED; } @@ -327,13 +329,15 @@ nfsrv_access( */ if (nfsmode & NFS_ACCESS_READ) { testaction = vnode_isdir(vp) ? KAUTH_VNODE_LIST_DIRECTORY : KAUTH_VNODE_READ_DATA; - if (nfsrv_authorize(vp, NULL, testaction, ctx, nxo, 0)) + if (nfsrv_authorize(vp, NULL, testaction, ctx, nxo, 0)) { nfsmode &= ~NFS_ACCESS_READ; + } } if ((nfsmode & NFS_ACCESS_LOOKUP) && (!vnode_isdir(vp) || - nfsrv_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx, nxo, 0))) + nfsrv_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx, nxo, 0))) { nfsmode &= ~NFS_ACCESS_LOOKUP; + } if (nfsmode & NFS_ACCESS_MODIFY) { if (vnode_isdir(vp)) { testaction = @@ -344,8 +348,9 @@ nfsrv_access( testaction = KAUTH_VNODE_WRITE_DATA; } - if (nfsrv_authorize(vp, NULL, testaction, ctx, nxo, 0)) + if (nfsrv_authorize(vp, NULL, testaction, ctx, nxo, 0)) { nfsmode &= ~NFS_ACCESS_MODIFY; + } } if (nfsmode & NFS_ACCESS_EXTEND) { if (vnode_isdir(vp)) { @@ -357,8 +362,9 @@ nfsrv_access( KAUTH_VNODE_WRITE_DATA | KAUTH_VNODE_APPEND_DATA; } - if (nfsrv_authorize(vp, NULL, testaction, ctx, nxo, 0)) + if (nfsrv_authorize(vp, NULL, testaction, ctx, nxo, 0)) { nfsmode &= ~NFS_ACCESS_EXTEND; + } } /* @@ -374,8 +380,9 @@ nfsrv_access( if ((nfsmode & NFS_ACCESS_EXECUTE) && (vnode_isdir(vp) || - nfsrv_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx, nxo, 0))) + nfsrv_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx, nxo, 0))) { nfsmode &= ~NFS_ACCESS_EXECUTE; + } /* get postop attributes */ nfsm_srv_vattr_init(&vattr, NFS_VER3); @@ -389,17 +396,19 @@ nfsmerr: *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, &vattr); - if (!nd->nd_repstat) + if (!nd->nd_repstat) { nfsm_chain_add_32(error, &nmrep, nfsmode); + } nfsmout: nfsm_chain_build_done(error, &nmrep); - if (vp) + if (vp) { vnode_put(vp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -441,8 +450,9 @@ nfsrv_getattr( nfsmerr_if(error); #if CONFIG_MAC - if (mac_vnode_check_open(ctx, vp, FREAD)) + if (mac_vnode_check_open(ctx, vp, FREAD)) { error = ESTALE; + } nfsmerr_if(error); #endif @@ -451,11 +461,11 @@ nfsrv_getattr( #if CONFIG_MAC /* XXXab: Comment in the VFS code makes it sound like - * some arguments can be filtered out, but not - * what it actually means. Hopefully not like - * they gonna set mtime to 0 or something. For - * now trust there are no shenanigans here. - */ + * some arguments can be filtered out, but not + * what it actually means. Hopefully not like + * they gonna set mtime to 0 or something. For + * now trust there are no shenanigans here. + */ error = mac_vnode_check_getattr(ctx, NOCRED, vp, &vattr); nfsmerr_if(error); #endif @@ -473,13 +483,14 @@ nfsmerr: error = nfsm_chain_add_fattr(nd, &nmrep, &vattr); nfsmout: nfsm_chain_build_done(error, &nmrep); - if (vp) + if (vp) { vnode_put(vp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -519,8 +530,9 @@ nfsrv_setattr( error = nfsm_chain_get_sattr(nd, nmreq, vap); if (nd->nd_vers == NFS_VER3) { nfsm_chain_get_32(error, nmreq, gcheck); - if (gcheck) + if (gcheck) { nfsm_chain_get_time(error, nmreq, nd->nd_vers, guard.tv_sec, guard.tv_nsec); + } } nfsmerr_if(error); @@ -549,11 +561,13 @@ nfsrv_setattr( nfsm_srv_pre_vattr_init(&preattr); error = preattrerr = vnode_getattr(vp, &preattr, ctx); if (!error && gcheck && VATTR_IS_SUPPORTED(&preattr, va_change_time) && - (preattr.va_change_time.tv_sec != guard.tv_sec || - preattr.va_change_time.tv_nsec != guard.tv_nsec)) + (preattr.va_change_time.tv_sec != guard.tv_sec || + preattr.va_change_time.tv_nsec != guard.tv_nsec)) { error = NFSERR_NOT_SYNC; - if (!preattrerr && !VATTR_ALL_SUPPORTED(&preattr)) + } + if (!preattrerr && !VATTR_ALL_SUPPORTED(&preattr)) { preattrerr = ENOENT; + } nfsmerr_if(error); } @@ -564,25 +578,28 @@ nfsrv_setattr( if ((vap->va_uid == saved_uid) && (kauth_cred_getuid(nd->nd_cr) != saved_uid)) { int ismember; VATTR_SET(vap, va_uid, kauth_cred_getuid(nd->nd_cr)); - if (kauth_cred_ismember_gid(nd->nd_cr, vap->va_gid, &ismember) || !ismember) + if (kauth_cred_ismember_gid(nd->nd_cr, vap->va_gid, &ismember) || !ismember) { VATTR_SET(vap, va_gid, kauth_cred_getgid(nd->nd_cr)); + } } /* Authorize the attribute changes. */ error = vnode_authattr(vp, vap, &action, ctx); - if (!error) + if (!error) { error = nfsrv_authorize(vp, NULL, action, ctx, nxo, 0); + } #if CONFIG_MACF - if (!error && mac_vnode_check_open(ctx, vp, FREAD|FWRITE)) + if (!error && mac_vnode_check_open(ctx, vp, FREAD | FWRITE)) { error = ESTALE; + } if (!error) { /* chown case */ if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) { error = mac_vnode_check_setowner(ctx, vp, - VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1, - VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1); + VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1, + VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1); } /* chmod case */ if (!error && VATTR_IS_ACTIVE(vap, va_mode)) { @@ -599,25 +616,28 @@ nfsrv_setattr( nanotime(¤t_time); error = mac_vnode_check_setutimes(ctx, vp, - VATTR_IS_ACTIVE(vap, va_access_time) ? vap->va_access_time : current_time, - VATTR_IS_ACTIVE(vap, va_modify_time) ? vap->va_modify_time : current_time); + VATTR_IS_ACTIVE(vap, va_access_time) ? vap->va_access_time : current_time, + VATTR_IS_ACTIVE(vap, va_modify_time) ? vap->va_modify_time : current_time); } } #endif /* set the new attributes */ - if (!error) + if (!error) { error = vnode_setattr(vp, vap, ctx); + } if (!error || (nd->nd_vers == NFS_VER3)) { nfsm_srv_vattr_init(&postattr, nd->nd_vers); postattrerr = vnode_getattr(vp, &postattr, ctx); - if (!error) + if (!error) { error = postattrerr; + } } nfsmerr: - if (vp) + if (vp) { vnode_put(vp); + } /* assemble reply */ nd->nd_repstat = error; @@ -625,18 +645,19 @@ nfsmerr: nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_wcc_data(error, nd, &nmrep, - preattrerr, &preattr, postattrerr, &postattr); - else + preattrerr, &preattr, postattrerr, &postattr); + } else { error = nfsm_chain_add_fattr(nd, &nmrep, &postattr); + } nfsmout: nfsm_chain_build_done(error, &nmrep); if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -724,12 +745,13 @@ nfsmerr: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_SRVFH(nd->nd_vers, &nfh) + - NFSX_POSTOPORFATTR(nd->nd_vers) + NFSX_POSTOPATTR(nd->nd_vers)); + NFSX_POSTOPORFATTR(nd->nd_vers) + NFSX_POSTOPATTR(nd->nd_vers)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; if (nd->nd_repstat) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_postop_attr(error, nd, &nmrep, dirattrerr, &dirattr); + } goto nfsmout; } nfsm_chain_add_fh(error, &nmrep, nd->nd_vers, nfh.nfh_fhp, nfh.nfh_len); @@ -745,7 +767,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -767,7 +789,7 @@ nfsrv_readlink( struct nfsm_chain *nmreq, nmrep; mbuf_t mpath, mp; uio_t auio = NULL; - char uio_buf[ UIO_SIZEOF(4) ]; + char uio_buf[UIO_SIZEOF(4)]; char *uio_bufp = &uio_buf[0]; int uio_buflen = UIO_SIZEOF(4); @@ -788,17 +810,20 @@ nfsrv_readlink( if (mpcnt > 4) { uio_buflen = UIO_SIZEOF(mpcnt); MALLOC(uio_bufp, char*, uio_buflen, M_TEMP, M_WAITOK); - if (!uio_bufp) + if (!uio_bufp) { error = ENOMEM; + } nfsmerr_if(error); } auio = uio_createwithbuffer(mpcnt, 0, UIO_SYSSPACE, UIO_READ, uio_bufp, uio_buflen); - if (!auio) + if (!auio) { error = ENOMEM; + } nfsmerr_if(error); - for (mp = mpath; mp; mp = mbuf_next(mp)) + for (mp = mpath; mp; mp = mbuf_next(mp)) { uio_addiov(auio, CAST_USER_ADDR_T((caddr_t)mbuf_data(mp)), mbuf_len(mp)); + } error = nfsrv_fhtovp(&nfh, nd, &vp, &nx, &nxo); nfsmerr_if(error); @@ -813,23 +838,28 @@ nfsrv_readlink( nfsmerr_if(error); if (vnode_vtype(vp) != VLNK) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EINVAL; - else + } else { error = ENXIO; + } } - if (!error) + if (!error) { error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, ctx, nxo, 0); + } #if CONFIG_MACF - if (mac_vnode_check_open(ctx, vp, FREAD)) + if (mac_vnode_check_open(ctx, vp, FREAD)) { error = ESTALE; + } nfsmerr_if(error); - if (!error) + if (!error) { error = mac_vnode_check_readlink(ctx, vp); + } #endif - if (!error) + if (!error) { error = VNOP_READLINK(vp, auio, ctx); + } if (vp) { if (nd->nd_vers == NFS_VER3) { nfsm_srv_vattr_init(&vattr, NFS_VER3); @@ -850,8 +880,9 @@ nfsmerr: nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, &vattr); + } if (error || nd->nd_repstat) { nfsm_chain_build_done(error, &nmrep); goto nfsmout; @@ -859,26 +890,30 @@ nfsmerr: if (auio && (uio_resid(auio) > 0)) { len -= uio_resid(auio); tlen = nfsm_rndup(len); - nfsm_adj(mpath, NFS_MAXPATHLEN-tlen, tlen-len); + nfsm_adj(mpath, NFS_MAXPATHLEN - tlen, tlen - len); } nfsm_chain_add_32(error, &nmrep, len); nfsm_chain_build_done(error, &nmrep); nfsmout_if(error); error = mbuf_setnext(nmrep.nmc_mcur, mpath); - if (!error) + if (!error) { mpath = NULL; + } nfsmout: - if (vp) + if (vp) { vnode_put(vp); - if (mpath) + } + if (mpath) { mbuf_freem(mpath); - if (uio_bufp != &uio_buf[0]) + } + if (uio_bufp != &uio_buf[0]) { FREE(uio_bufp, M_TEMP); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -903,7 +938,7 @@ nfsrv_read( struct vnode_attr vattr, *vap = &vattr; off_t off; uid_t saved_uid; - char uio_buf[ UIO_SIZEOF(0) ]; + char uio_buf[UIO_SIZEOF(0)]; struct nfsm_chain *nmreq, nmrep; error = 0; @@ -917,14 +952,16 @@ nfsrv_read( nfsm_chain_get_fh_ptr(error, nmreq, nd->nd_vers, nfh.nfh_fhp, nfh.nfh_len); nfsmerr_if(error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_get_64(error, nmreq, off); - else + } else { nfsm_chain_get_32(error, nmreq, off); + } nfsm_chain_get_32(error, nmreq, reqlen); maxlen = NFSRV_NDMAXDATA(nd); - if (reqlen > maxlen) + if (reqlen > maxlen) { reqlen = maxlen; + } nfsmerr_if(error); error = nfsrv_fhtovp(&nfh, nd, &vp, &nx, &nxo); nfsmerr_if(error); @@ -936,15 +973,17 @@ nfsrv_read( nfsmerr_if(error); if (vnode_vtype(vp) != VREG) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EINVAL; - else + } else { error = (vnode_vtype(vp) == VDIR) ? EISDIR : EACCES; + } } if (!error) { - if ((error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, ctx, nxo, 1))) - error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx, nxo, 1); + if ((error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, ctx, nxo, 1))) { + error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx, nxo, 1); + } } #if CONFIG_MACF if (!error) { @@ -954,8 +993,9 @@ nfsrv_read( } else { /* XXXab: Do we need to do this?! */ error = mac_vnode_check_read(ctx, vfs_context_ucred(ctx), vp); - if (error) + if (error) { error = EACCES; + } /* mac_vnode_check_exec() can't be done here. */ } } @@ -963,16 +1003,18 @@ nfsrv_read( #endif nfsm_srv_vattr_init(vap, nd->nd_vers); attrerr = vnode_getattr(vp, vap, ctx); - if (!error) + if (!error) { error = attrerr; + } nfsmerr_if(error); - if ((u_quad_t)off >= vap->va_data_size) + if ((u_quad_t)off >= vap->va_data_size) { count = 0; - else if (((u_quad_t)off + reqlen) > vap->va_data_size) + } else if (((u_quad_t)off + reqlen) > vap->va_data_size) { count = nfsm_rndup(vap->va_data_size - off); - else + } else { count = reqlen; + } len = left = count; if (count > 0) { @@ -980,15 +1022,17 @@ nfsrv_read( error = nfsm_mbuf_get_list(count, &mread, &mreadcnt); nfsmerr_if(error); MALLOC(uio_bufp, char *, UIO_SIZEOF(mreadcnt), M_TEMP, M_WAITOK); - if (uio_bufp) + if (uio_bufp) { auio = uio_createwithbuffer(mreadcnt, off, UIO_SYSSPACE, - UIO_READ, uio_bufp, UIO_SIZEOF(mreadcnt)); + UIO_READ, uio_bufp, UIO_SIZEOF(mreadcnt)); + } if (!uio_bufp || !auio) { error = ENOMEM; goto errorexit; } - for (m = mread; m; m = mbuf_next(m)) + for (m = mread; m; m = mbuf_next(m)) { uio_addiov(auio, CAST_USER_ADDR_T((caddr_t)mbuf_data(m)), mbuf_len(m)); + } error = VNOP_READ(vp, auio, IO_NODELOCKED, ctx); } else { auio = uio_createwithbuffer(0, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); @@ -1002,8 +1046,9 @@ errorexit: if (!error || (nd->nd_vers == NFS_VER3)) { nfsm_srv_vattr_init(vap, nd->nd_vers); attrerr = vnode_getattr(vp, vap, ctx); - if (!error && (nd->nd_vers == NFS_VER2)) + if (!error && (nd->nd_vers == NFS_VER2)) { error = attrerr; /* NFSv2 must have attributes to return */ + } } nfsmerr_if(error); @@ -1013,8 +1058,9 @@ errorexit: /* trim off any data not actually read */ len -= uio_resid(auio); tlen = nfsm_rndup(len); - if (count != tlen || tlen != len) + if (count != tlen || tlen != len) { nfsm_adj(mread, count - tlen, tlen - len); + } nfsmerr: /* assemble reply */ @@ -1023,8 +1069,9 @@ nfsmerr: nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, vap); + } if (error || nd->nd_repstat) { nfsm_chain_build_done(error, &nmrep); goto nfsmout; @@ -1039,8 +1086,9 @@ nfsmerr: nfsm_chain_build_done(error, &nmrep); nfsmout_if(error); error = mbuf_setnext(nmrep.nmc_mcur, mread); - if (!error) + if (!error) { mread = NULL; + } /* update export stats */ NFSStatAdd64(&nx->nx_stats.bytes_read, len); @@ -1048,17 +1096,20 @@ nfsmerr: /* update active user stats */ nfsrv_update_user_stat(nx, nd, saved_uid, 1, len, 0); nfsmout: - if (vp) + if (vp) { vnode_put(vp); - if (mread) + } + if (mread) { mbuf_freem(mread); - if (uio_bufp != NULL) + } + if (uio_bufp != NULL) { FREE(uio_bufp, M_TEMP); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } #if CONFIG_FSE @@ -1074,9 +1125,9 @@ nfsmout: * "content modified" fsevent only if there are no writes to * a vnode for nfsrv_fmod_pendtime milliseconds. */ -int nfsrv_fmod_pending; /* count of vnodes being written to */ -int nfsrv_fmod_pendtime = 1000; /* msec to wait */ -int nfsrv_fmod_min_interval = 100; /* msec min interval between callbacks */ +int nfsrv_fmod_pending; /* count of vnodes being written to */ +int nfsrv_fmod_pendtime = 1000; /* msec to wait */ +int nfsrv_fmod_min_interval = 100; /* msec min interval between callbacks */ /* * This function is called via the kernel's callout @@ -1097,7 +1148,7 @@ nfsrv_fmod_timer(__unused void *param0, __unused void *param1) again: clock_get_uptime(&timenow); clock_interval_to_deadline(nfsrv_fmod_pendtime, 1000 * 1000, - &next_deadline); + &next_deadline); /* * Scan all the hash chains @@ -1110,10 +1161,12 @@ again: */ headp = &nfsrv_fmod_hashtbl[i]; LIST_FOREACH(fp, headp, fm_link) { - if (timenow >= fp->fm_deadline) + if (timenow >= fp->fm_deadline) { break; - if (fp->fm_deadline < next_deadline) + } + if (fp->fm_deadline < next_deadline) { next_deadline = fp->fm_deadline; + } } /* @@ -1128,10 +1181,11 @@ again: nfp = LIST_NEXT(fp, fm_link); LIST_REMOVE(fp, fm_link); fmod_fire++; - if (pfp) + if (pfp) { LIST_INSERT_AFTER(pfp, fp, fm_link); - else + } else { LIST_INSERT_HEAD(&firehead, fp, fm_link); + } pfp = fp; fp = nfp; } @@ -1147,8 +1201,8 @@ again: if (nfsrv_fsevents_enabled) { fp->fm_context.vc_thread = current_thread(); add_fsevent(FSE_CONTENT_MODIFIED, &fp->fm_context, - FSE_ARG_VNODE, fp->fm_vp, - FSE_ARG_DONE); + FSE_ARG_VNODE, fp->fm_vp, + FSE_ARG_DONE); } vnode_put(fp->fm_vp); kauth_cred_unref(&fp->fm_context.vc_ucred); @@ -1168,13 +1222,15 @@ again: */ if (nfsrv_fmod_pending > 0) { interval = (next_deadline - timenow) / (1000 * 1000); - if (interval < nfsrv_fmod_min_interval) + if (interval < nfsrv_fmod_min_interval) { interval = nfsrv_fmod_min_interval; + } } nfsrv_fmod_timer_on = interval > 0; - if (nfsrv_fmod_timer_on) + if (nfsrv_fmod_timer_on) { nfs_interval_timer_start(nfsrv_fmod_timer_call, interval); + } lck_mtx_unlock(nfsrv_fmod_mutex); } @@ -1223,8 +1279,9 @@ nfsrv_modified(vnode_t vp, vfs_context_t ctx) * Allocate a new file mod entry and add it * on the front of the hash chain. */ - if (vnode_get(vp) != 0) + if (vnode_get(vp) != 0) { goto done; + } MALLOC(fp, struct nfsrv_fmod *, sizeof(*fp), M_TEMP, M_WAITOK); if (fp == NULL) { vnode_put(vp); @@ -1244,7 +1301,7 @@ nfsrv_modified(vnode_t vp, vfs_context_t ctx) if (!nfsrv_fmod_timer_on) { nfsrv_fmod_timer_on = 1; nfs_interval_timer_start(nfsrv_fmod_timer_call, - nfsrv_fmod_pendtime); + nfsrv_fmod_pendtime); } done: lck_mtx_unlock(nfsrv_fmod_mutex); @@ -1280,7 +1337,7 @@ nfsrv_write( if (nd->nd_nmreq.nmc_mhead == NULL) { *mrepp = NULL; - return (0); + return 0; } error = 0; @@ -1301,8 +1358,9 @@ nfsrv_write( nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); nfsm_chain_get_32(error, nmreq, off); nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); - if (nfsrv_async) - stable = NFS_WRITE_UNSTABLE; + if (nfsrv_async) { + stable = NFS_WRITE_UNSTABLE; + } } nfsm_chain_get_32(error, nmreq, len); nfsmerr_if(error); @@ -1338,13 +1396,15 @@ nfsrv_write( preattrerr = vnode_getattr(vp, &preattr, ctx); } if (vnode_vtype(vp) != VREG) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EINVAL; - else + } else { error = (vnode_vtype(vp) == VDIR) ? EISDIR : EACCES; + } } - if (!error) + if (!error) { error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_WRITE_DATA, ctx, nxo, 1); + } nfsmerr_if(error); #if CONFIG_MACF @@ -1355,37 +1415,45 @@ nfsrv_write( } else { /* XXXab: Do we need to do this?! */ error = mac_vnode_check_write(ctx, vfs_context_ucred(ctx), vp); - if (error) + if (error) { error = EACCES; + } } } nfsmerr_if(error); #endif if (len > 0) { - for (mcount=0, m=nmreq->nmc_mcur; m; m = mbuf_next(m)) - if (mbuf_len(m) > 0) + for (mcount = 0, m = nmreq->nmc_mcur; m; m = mbuf_next(m)) { + if (mbuf_len(m) > 0) { mcount++; + } + } MALLOC(uio_bufp, char *, UIO_SIZEOF(mcount), M_TEMP, M_WAITOK); - if (uio_bufp) + if (uio_bufp) { auio = uio_createwithbuffer(mcount, off, UIO_SYSSPACE, UIO_WRITE, uio_bufp, UIO_SIZEOF(mcount)); - if (!uio_bufp || !auio) + } + if (!uio_bufp || !auio) { error = ENOMEM; + } nfsmerr_if(error); - for (m = nmreq->nmc_mcur; m; m = mbuf_next(m)) - if ((mlen = mbuf_len(m)) > 0) + for (m = nmreq->nmc_mcur; m; m = mbuf_next(m)) { + if ((mlen = mbuf_len(m)) > 0) { uio_addiov(auio, CAST_USER_ADDR_T((caddr_t)mbuf_data(m)), mlen); + } + } /* * XXX The IO_METASYNC flag indicates that all metadata (and not just * enough to ensure data integrity) mus be written to stable storage * synchronously. (IO_METASYNC is not yet implemented in 4.4BSD-Lite.) */ - if (stable == NFS_WRITE_UNSTABLE) + if (stable == NFS_WRITE_UNSTABLE) { ioflags = IO_NODELOCKED; - else if (stable == NFS_WRITE_DATASYNC) + } else if (stable == NFS_WRITE_DATASYNC) { ioflags = (IO_SYNC | IO_NODELOCKED); - else + } else { ioflags = (IO_METASYNC | IO_SYNC | IO_NODELOCKED); + } error = VNOP_WRITE(vp, auio, ioflags, ctx); OSAddAtomic64(1, &nfsstats.srvvop_writes); @@ -1397,14 +1465,16 @@ nfsrv_write( nfsrv_update_user_stat(nx, nd, saved_uid, 1, 0, len); #if CONFIG_FSE - if (nfsrv_fsevents_enabled && !error && need_fsevent(FSE_CONTENT_MODIFIED, vp)) + if (nfsrv_fsevents_enabled && !error && need_fsevent(FSE_CONTENT_MODIFIED, vp)) { nfsrv_modified(vp, ctx); + } #endif } nfsm_srv_vattr_init(&postattr, nd->nd_vers); postattrerr = vnode_getattr(vp, &postattr, ctx); - if (!error && (nd->nd_vers == NFS_VER2)) + if (!error && (nd->nd_vers == NFS_VER2)) { error = postattrerr; /* NFSv2 must have attributes to return */ + } vnode_put(vp); vp = NULL; @@ -1412,21 +1482,22 @@ nfsmerr: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_PREOPATTR(nd->nd_vers) + - NFSX_POSTOPORFATTR(nd->nd_vers) + 2 * NFSX_UNSIGNED + - NFSX_WRITEVERF(nd->nd_vers)); + NFSX_POSTOPORFATTR(nd->nd_vers) + 2 * NFSX_UNSIGNED + + NFSX_WRITEVERF(nd->nd_vers)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_wcc_data(error, nd, &nmrep, - preattrerr, &preattr, postattrerr, &postattr); + preattrerr, &preattr, postattrerr, &postattr); nfsmout_if(error || nd->nd_repstat); nfsm_chain_add_32(error, &nmrep, retlen); /* If nfsrv_async is set, then pretend the write was FILESYNC. */ - if ((stable == NFS_WRITE_UNSTABLE) && !nfsrv_async) + if ((stable == NFS_WRITE_UNSTABLE) && !nfsrv_async) { nfsm_chain_add_32(error, &nmrep, stable); - else + } else { nfsm_chain_add_32(error, &nmrep, NFS_WRITE_FILESYNC); + } /* write verifier */ nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_sec); nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_usec); @@ -1435,15 +1506,17 @@ nfsmerr: } nfsmout: nfsm_chain_build_done(error, &nmrep); - if (vp) + if (vp) { vnode_put(vp); - if (uio_bufp != NULL) + } + if (uio_bufp != NULL) { FREE(uio_bufp, M_TEMP); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -1454,11 +1527,11 @@ nfsmout: * Jan. 1994. */ -#define NWDELAYHASH(sock, f) \ +#define NWDELAYHASH(sock, f) \ (&(sock)->ns_wdelayhashtbl[(*((u_int32_t *)(f))) % NFS_WDELAYHASHSIZ]) /* These macros compare nfsrv_descript structures. */ #define NFSW_CONTIG(o, n) \ - (((o)->nd_eoff >= (n)->nd_off) && nfsrv_fhmatch(&(o)->nd_fh, &(n)->nd_fh)) + (((o)->nd_eoff >= (n)->nd_off) && nfsrv_fhmatch(&(o)->nd_fh, &(n)->nd_fh)) /* * XXX The following is an incorrect comparison; it fails to take into account * XXX scoping of MAC labels, but we currently lack KPI for credential @@ -1466,7 +1539,7 @@ nfsmout: */ #define NFSW_SAMECRED(o, n) \ (!bcmp((caddr_t)(o)->nd_cr, (caddr_t)(n)->nd_cr, \ - sizeof (struct ucred))) + sizeof (struct ucred))) int nfsrv_writegather( @@ -1498,103 +1571,106 @@ nfsrv_writegather( *mrepp = NULL; if (*ndp) { - nd = *ndp; - *ndp = NULL; - nmreq = &nd->nd_nmreq; - LIST_INIT(&nd->nd_coalesce); - nd->nd_mrep = NULL; - nd->nd_stable = NFS_WRITE_FILESYNC; - microuptime(&now); - cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; - nd->nd_time = cur_usec + - ((nd->nd_vers == NFS_VER3) ? nfsrv_wg_delay_v3 : nfsrv_wg_delay); - - /* Now, get the write header... */ - nfsm_chain_get_fh_ptr(error, nmreq, nd->nd_vers, nd->nd_fh.nfh_fhp, nd->nd_fh.nfh_len); - /* XXX shouldn't we be checking for invalid FHs before doing any more work? */ - nfsmerr_if(error); - if (nd->nd_vers == NFS_VER3) { - nfsm_chain_get_64(error, nmreq, nd->nd_off); - nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); - nfsm_chain_get_32(error, nmreq, nd->nd_stable); - } else { - nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); - nfsm_chain_get_32(error, nmreq, nd->nd_off); - nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); - if (nfsrv_async) - nd->nd_stable = NFS_WRITE_UNSTABLE; - } - nfsm_chain_get_32(error, nmreq, nd->nd_len); - nfsmerr_if(error); - nd->nd_eoff = nd->nd_off + nd->nd_len; - - if (nd->nd_len > 0) { - error = nfsm_chain_trim_data(nmreq, nd->nd_len, &mlen); - nfsmerr_if(error); - } else { - mlen = 0; - } + nd = *ndp; + *ndp = NULL; + nmreq = &nd->nd_nmreq; + LIST_INIT(&nd->nd_coalesce); + nd->nd_mrep = NULL; + nd->nd_stable = NFS_WRITE_FILESYNC; + microuptime(&now); + cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; + nd->nd_time = cur_usec + + ((nd->nd_vers == NFS_VER3) ? nfsrv_wg_delay_v3 : nfsrv_wg_delay); + + /* Now, get the write header... */ + nfsm_chain_get_fh_ptr(error, nmreq, nd->nd_vers, nd->nd_fh.nfh_fhp, nd->nd_fh.nfh_len); + /* XXX shouldn't we be checking for invalid FHs before doing any more work? */ + nfsmerr_if(error); + if (nd->nd_vers == NFS_VER3) { + nfsm_chain_get_64(error, nmreq, nd->nd_off); + nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); + nfsm_chain_get_32(error, nmreq, nd->nd_stable); + } else { + nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); + nfsm_chain_get_32(error, nmreq, nd->nd_off); + nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); + if (nfsrv_async) { + nd->nd_stable = NFS_WRITE_UNSTABLE; + } + } + nfsm_chain_get_32(error, nmreq, nd->nd_len); + nfsmerr_if(error); + nd->nd_eoff = nd->nd_off + nd->nd_len; - if ((nd->nd_len > NFSRV_MAXDATA) || (nd->nd_len < 0) || (mlen < nd->nd_len)) { - error = EIO; + if (nd->nd_len > 0) { + error = nfsm_chain_trim_data(nmreq, nd->nd_len, &mlen); + nfsmerr_if(error); + } else { + mlen = 0; + } + + if ((nd->nd_len > NFSRV_MAXDATA) || (nd->nd_len < 0) || (mlen < nd->nd_len)) { + error = EIO; nfsmerr: - nd->nd_repstat = error; - error = nfsrv_rephead(nd, slp, &nmrep, NFSX_WCCDATA(nd->nd_vers)); - if (!error) { - nd->nd_mrep = nmrep.nmc_mhead; - if (nd->nd_vers == NFS_VER3) - nfsm_chain_add_wcc_data(error, nd, &nmrep, - preattrerr, &preattr, postattrerr, &postattr); + nd->nd_repstat = error; + error = nfsrv_rephead(nd, slp, &nmrep, NFSX_WCCDATA(nd->nd_vers)); + if (!error) { + nd->nd_mrep = nmrep.nmc_mhead; + if (nd->nd_vers == NFS_VER3) { + nfsm_chain_add_wcc_data(error, nd, &nmrep, + preattrerr, &preattr, postattrerr, &postattr); + } + } + nfsm_chain_build_done(error, &nmrep); + nd->nd_time = 1; } - nfsm_chain_build_done(error, &nmrep); - nd->nd_time = 1; - } - - /* - * Add this entry to the hash and time queues. - */ - lck_mtx_lock(&slp->ns_wgmutex); - owp = NULL; - wp = slp->ns_tq.lh_first; - while (wp && wp->nd_time < nd->nd_time) { - owp = wp; - wp = wp->nd_tq.le_next; - } - if (owp) { - LIST_INSERT_AFTER(owp, nd, nd_tq); - } else { - LIST_INSERT_HEAD(&slp->ns_tq, nd, nd_tq); - } - if (!error) { - wpp = NWDELAYHASH(slp, nd->nd_fh.nfh_fid); + + /* + * Add this entry to the hash and time queues. + */ + lck_mtx_lock(&slp->ns_wgmutex); owp = NULL; - wp = wpp->lh_first; - while (wp && !nfsrv_fhmatch(&nd->nd_fh, &wp->nd_fh)) { - owp = wp; - wp = wp->nd_hash.le_next; - } - while (wp && (wp->nd_off < nd->nd_off) && - nfsrv_fhmatch(&nd->nd_fh, &wp->nd_fh)) { - owp = wp; - wp = wp->nd_hash.le_next; + wp = slp->ns_tq.lh_first; + while (wp && wp->nd_time < nd->nd_time) { + owp = wp; + wp = wp->nd_tq.le_next; } if (owp) { - LIST_INSERT_AFTER(owp, nd, nd_hash); - /* - * Search the hash list for overlapping entries and - * coalesce. - */ - for(; nd && NFSW_CONTIG(owp, nd); nd = wp) { - wp = nd->nd_hash.le_next; - if (NFSW_SAMECRED(owp, nd)) - nfsrv_wg_coalesce(owp, nd); - } + LIST_INSERT_AFTER(owp, nd, nd_tq); } else { - LIST_INSERT_HEAD(wpp, nd, nd_hash); + LIST_INSERT_HEAD(&slp->ns_tq, nd, nd_tq); + } + if (!error) { + wpp = NWDELAYHASH(slp, nd->nd_fh.nfh_fid); + owp = NULL; + wp = wpp->lh_first; + while (wp && !nfsrv_fhmatch(&nd->nd_fh, &wp->nd_fh)) { + owp = wp; + wp = wp->nd_hash.le_next; + } + while (wp && (wp->nd_off < nd->nd_off) && + nfsrv_fhmatch(&nd->nd_fh, &wp->nd_fh)) { + owp = wp; + wp = wp->nd_hash.le_next; + } + if (owp) { + LIST_INSERT_AFTER(owp, nd, nd_hash); + /* + * Search the hash list for overlapping entries and + * coalesce. + */ + for (; nd && NFSW_CONTIG(owp, nd); nd = wp) { + wp = nd->nd_hash.le_next; + if (NFSW_SAMECRED(owp, nd)) { + nfsrv_wg_coalesce(owp, nd); + } + } + } else { + LIST_INSERT_HEAD(wpp, nd, nd_hash); + } } - } } else { - lck_mtx_lock(&slp->ns_wgmutex); + lck_mtx_lock(&slp->ns_wgmutex); } /* @@ -1606,88 +1682,102 @@ loop1: cur_usec = (u_quad_t)now.tv_sec * 1000000 + (u_quad_t)now.tv_usec; for (nd = slp->ns_tq.lh_first; nd; nd = owp) { owp = nd->nd_tq.le_next; - if (nd->nd_time > cur_usec) - break; - if (nd->nd_mrep) - continue; + if (nd->nd_time > cur_usec) { + break; + } + if (nd->nd_mrep) { + continue; + } LIST_REMOVE(nd, nd_tq); LIST_REMOVE(nd, nd_hash); nmreq = &nd->nd_nmreq; preattrerr = postattrerr = ENOENT; - /* save the incoming uid before mapping, */ + /* save the incoming uid before mapping, */ /* for updating active user stats later */ saved_uid = kauth_cred_getuid(nd->nd_cr); error = nfsrv_fhtovp(&nd->nd_fh, nd, &vp, &nx, &nxo); if (!error) { - /* update per-export stats */ - NFSStatAdd64(&nx->nx_stats.ops, 1); + /* update per-export stats */ + NFSStatAdd64(&nx->nx_stats.ops, 1); - error = nfsrv_credcheck(nd, ctx, nx, nxo); - if (error) - vnode_put(vp); + error = nfsrv_credcheck(nd, ctx, nx, nxo); + if (error) { + vnode_put(vp); + } } if (!error) { - if (nd->nd_vers == NFS_VER3) { - nfsm_srv_pre_vattr_init(&preattr); - preattrerr = vnode_getattr(vp, &preattr, ctx); - } - if (vnode_vtype(vp) != VREG) { - if (nd->nd_vers == NFS_VER3) - error = EINVAL; - else - error = (vnode_vtype(vp) == VDIR) ? EISDIR : EACCES; - } - } else - vp = NULL; - if (!error) - error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_WRITE_DATA, ctx, nxo, 1); - - if (nd->nd_stable == NFS_WRITE_UNSTABLE) - ioflags = IO_NODELOCKED; - else if (nd->nd_stable == NFS_WRITE_DATASYNC) - ioflags = (IO_SYNC | IO_NODELOCKED); - else - ioflags = (IO_METASYNC | IO_SYNC | IO_NODELOCKED); + if (nd->nd_vers == NFS_VER3) { + nfsm_srv_pre_vattr_init(&preattr); + preattrerr = vnode_getattr(vp, &preattr, ctx); + } + if (vnode_vtype(vp) != VREG) { + if (nd->nd_vers == NFS_VER3) { + error = EINVAL; + } else { + error = (vnode_vtype(vp) == VDIR) ? EISDIR : EACCES; + } + } + } else { + vp = NULL; + } + if (!error) { + error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_WRITE_DATA, ctx, nxo, 1); + } + + if (nd->nd_stable == NFS_WRITE_UNSTABLE) { + ioflags = IO_NODELOCKED; + } else if (nd->nd_stable == NFS_WRITE_DATASYNC) { + ioflags = (IO_SYNC | IO_NODELOCKED); + } else { + ioflags = (IO_METASYNC | IO_SYNC | IO_NODELOCKED); + } if (!error && ((nd->nd_eoff - nd->nd_off) > 0)) { - for (i=0, m=nmreq->nmc_mhead; m; m = mbuf_next(m)) - if (mbuf_len(m) > 0) - i++; - - MALLOC(uio_bufp, char *, UIO_SIZEOF(i), M_TEMP, M_WAITOK); - if (uio_bufp) - auio = uio_createwithbuffer(i, nd->nd_off, UIO_SYSSPACE, - UIO_WRITE, uio_bufp, UIO_SIZEOF(i)); - if (!uio_bufp || !auio) - error = ENOMEM; - if (!error) { - for (m = nmreq->nmc_mhead; m; m = mbuf_next(m)) - if ((tlen = mbuf_len(m)) > 0) - uio_addiov(auio, CAST_USER_ADDR_T((caddr_t)mbuf_data(m)), tlen); - error = VNOP_WRITE(vp, auio, ioflags, ctx); - OSAddAtomic64(1, &nfsstats.srvvop_writes); + for (i = 0, m = nmreq->nmc_mhead; m; m = mbuf_next(m)) { + if (mbuf_len(m) > 0) { + i++; + } + } - /* update export stats */ - NFSStatAdd64(&nx->nx_stats.bytes_written, nd->nd_len); - /* update active user stats */ - nfsrv_update_user_stat(nx, nd, saved_uid, 1, 0, nd->nd_len); + MALLOC(uio_bufp, char *, UIO_SIZEOF(i), M_TEMP, M_WAITOK); + if (uio_bufp) { + auio = uio_createwithbuffer(i, nd->nd_off, UIO_SYSSPACE, + UIO_WRITE, uio_bufp, UIO_SIZEOF(i)); + } + if (!uio_bufp || !auio) { + error = ENOMEM; + } + if (!error) { + for (m = nmreq->nmc_mhead; m; m = mbuf_next(m)) { + if ((tlen = mbuf_len(m)) > 0) { + uio_addiov(auio, CAST_USER_ADDR_T((caddr_t)mbuf_data(m)), tlen); + } + } + error = VNOP_WRITE(vp, auio, ioflags, ctx); + OSAddAtomic64(1, &nfsstats.srvvop_writes); + + /* update export stats */ + NFSStatAdd64(&nx->nx_stats.bytes_written, nd->nd_len); + /* update active user stats */ + nfsrv_update_user_stat(nx, nd, saved_uid, 1, 0, nd->nd_len); #if CONFIG_FSE - if (nfsrv_fsevents_enabled && !error && need_fsevent(FSE_CONTENT_MODIFIED, vp)) - nfsrv_modified(vp, ctx); + if (nfsrv_fsevents_enabled && !error && need_fsevent(FSE_CONTENT_MODIFIED, vp)) { + nfsrv_modified(vp, ctx); + } #endif - } - if (uio_bufp) { - FREE(uio_bufp, M_TEMP); - uio_bufp = NULL; - } + } + if (uio_bufp) { + FREE(uio_bufp, M_TEMP); + uio_bufp = NULL; + } } if (vp) { - nfsm_srv_vattr_init(&postattr, nd->nd_vers); - postattrerr = vnode_getattr(vp, &postattr, ctx); - vnode_put(vp); + nfsm_srv_vattr_init(&postattr, nd->nd_vers); + postattrerr = vnode_getattr(vp, &postattr, ctx); + vnode_put(vp); } /* @@ -1696,46 +1786,46 @@ loop1: */ swp = nd; do { - if (error) { - nd->nd_repstat = error; - error = nfsrv_rephead(nd, slp, &nmrep, NFSX_WCCDATA(nd->nd_vers)); - if (!error && (nd->nd_vers == NFS_VER3)) { - nfsm_chain_add_wcc_data(error, nd, &nmrep, - preattrerr, &preattr, postattrerr, &postattr); + if (error) { + nd->nd_repstat = error; + error = nfsrv_rephead(nd, slp, &nmrep, NFSX_WCCDATA(nd->nd_vers)); + if (!error && (nd->nd_vers == NFS_VER3)) { + nfsm_chain_add_wcc_data(error, nd, &nmrep, + preattrerr, &preattr, postattrerr, &postattr); + } + } else { + nd->nd_repstat = error; + error = nfsrv_rephead(nd, slp, &nmrep, NFSX_PREOPATTR(nd->nd_vers) + + NFSX_POSTOPORFATTR(nd->nd_vers) + 2 * NFSX_UNSIGNED + + NFSX_WRITEVERF(nd->nd_vers)); + if (!error && (nd->nd_vers == NFS_VER3)) { + nfsm_chain_add_wcc_data(error, nd, &nmrep, + preattrerr, &preattr, postattrerr, &postattr); + nfsm_chain_add_32(error, &nmrep, nd->nd_len); + nfsm_chain_add_32(error, &nmrep, nd->nd_stable); + /* write verifier */ + nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_sec); + nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_usec); + } else if (!error) { + error = nfsm_chain_add_fattr(nd, &nmrep, &postattr); + } } - } else { - nd->nd_repstat = error; - error = nfsrv_rephead(nd, slp, &nmrep, NFSX_PREOPATTR(nd->nd_vers) + - NFSX_POSTOPORFATTR(nd->nd_vers) + 2 * NFSX_UNSIGNED + - NFSX_WRITEVERF(nd->nd_vers)); - if (!error && (nd->nd_vers == NFS_VER3)) { - nfsm_chain_add_wcc_data(error, nd, &nmrep, - preattrerr, &preattr, postattrerr, &postattr); - nfsm_chain_add_32(error, &nmrep, nd->nd_len); - nfsm_chain_add_32(error, &nmrep, nd->nd_stable); - /* write verifier */ - nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_sec); - nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_usec); - } else if (!error) { - error = nfsm_chain_add_fattr(nd, &nmrep, &postattr); + nfsm_chain_build_done(error, &nmrep); + nfsmerr_if(error); + nd->nd_mrep = nmrep.nmc_mhead; + + /* + * Done. Put it at the head of the timer queue so that + * the final phase can return the reply. + */ + if (nd != swp) { + nd->nd_time = 1; + LIST_INSERT_HEAD(&slp->ns_tq, nd, nd_tq); + } + nd = swp->nd_coalesce.lh_first; + if (nd) { + LIST_REMOVE(nd, nd_tq); } - } - nfsm_chain_build_done(error, &nmrep); - nfsmerr_if(error); - nd->nd_mrep = nmrep.nmc_mhead; - - /* - * Done. Put it at the head of the timer queue so that - * the final phase can return the reply. - */ - if (nd != swp) { - nd->nd_time = 1; - LIST_INSERT_HEAD(&slp->ns_tq, nd, nd_tq); - } - nd = swp->nd_coalesce.lh_first; - if (nd) { - LIST_REMOVE(nd, nd_tq); - } } while (nd); swp->nd_time = 1; LIST_INSERT_HEAD(&slp->ns_tq, swp, nd_tq); @@ -1745,13 +1835,14 @@ loop1: /* * Search for a reply to return. */ - for (nd = slp->ns_tq.lh_first; nd; nd = nd->nd_tq.le_next) + for (nd = slp->ns_tq.lh_first; nd; nd = nd->nd_tq.le_next) { if (nd->nd_mrep) { - LIST_REMOVE(nd, nd_tq); - *mrepp = nd->nd_mrep; - *ndp = nd; - break; + LIST_REMOVE(nd, nd_tq); + *mrepp = nd->nd_mrep; + *ndp = nd; + break; } + } slp->ns_wgtime = slp->ns_tq.lh_first ? slp->ns_tq.lh_first->nd_time : 0; lck_mtx_unlock(&slp->ns_wgmutex); @@ -1770,7 +1861,7 @@ loop1: if (!nfsrv_wg_timer_on) { nfsrv_wg_timer_on = 1; nfs_interval_timer_start(nfsrv_wg_timer_call, - NFSRV_WGATHERDELAY); + NFSRV_WGATHERDELAY); } } else if (slp->ns_wgq.tqe_next != SLPNOLIST) { TAILQ_REMOVE(&nfsrv_sockwg, slp, ns_wgq); @@ -1778,7 +1869,7 @@ loop1: } lck_mtx_unlock(nfsd_mutex); - return (0); + return 0; } /* @@ -1799,27 +1890,32 @@ nfsrv_wg_coalesce(struct nfsrv_descript *owp, struct nfsrv_descript *nd) LIST_REMOVE(nd, nd_tq); if (owp->nd_eoff < nd->nd_eoff) { overlap = owp->nd_eoff - nd->nd_off; - if (overlap < 0) - return (EIO); - if (overlap > 0) + if (overlap < 0) { + return EIO; + } + if (overlap > 0) { mbuf_adj(nd->nd_nmreq.nmc_mhead, overlap); + } mp = owp->nd_nmreq.nmc_mhead; - while ((mpnext = mbuf_next(mp))) + while ((mpnext = mbuf_next(mp))) { mp = mpnext; + } error = mbuf_setnext(mp, nd->nd_nmreq.nmc_mhead); - if (error) - return (error); + if (error) { + return error; + } owp->nd_eoff = nd->nd_eoff; } else { mbuf_freem(nd->nd_nmreq.nmc_mhead); } nd->nd_nmreq.nmc_mhead = NULL; nd->nd_nmreq.nmc_mcur = NULL; - if (nd->nd_stable == NFS_WRITE_FILESYNC) + if (nd->nd_stable == NFS_WRITE_FILESYNC) { owp->nd_stable = NFS_WRITE_FILESYNC; - else if ((nd->nd_stable == NFS_WRITE_DATASYNC) && - (owp->nd_stable == NFS_WRITE_UNSTABLE)) + } else if ((nd->nd_stable == NFS_WRITE_DATASYNC) && + (owp->nd_stable == NFS_WRITE_UNSTABLE)) { owp->nd_stable = NFS_WRITE_DATASYNC; + } LIST_INSERT_HEAD(&owp->nd_coalesce, nd, nd_tq); /* @@ -1830,7 +1926,7 @@ nfsrv_wg_coalesce(struct nfsrv_descript *owp, struct nfsrv_descript *nd) LIST_REMOVE(p, nd_tq); LIST_INSERT_HEAD(&owp->nd_coalesce, p, nd_tq); } - return (0); + return 0; } /* @@ -1861,8 +1957,9 @@ nfsrv_wg_timer(__unused void *param0, __unused void *param1) nfsrv_wakenfsd(slp); continue; } - if (slp->ns_wgtime < next_usec) + if (slp->ns_wgtime < next_usec) { next_usec = slp->ns_wgtime; + } } } @@ -1877,8 +1974,9 @@ nfsrv_wg_timer(__unused void *param0, __unused void *param1) * Return the number of msec to wait again */ interval = (next_usec - cur_usec) / 1000; - if (interval < 1) + if (interval < 1) { interval = 1; + } nfs_interval_timer_start(nfsrv_wg_timer_call, interval); } @@ -1897,8 +1995,9 @@ nfsrv_group_sort(gid_t *list, int num) for (i = 1; i < num; i++) { v = list[i]; /* find correct slot for value v, moving others up */ - for (j = i; --j >= 0 && v < list[j];) + for (j = i; --j >= 0 && v < list[j];) { list[j + 1] = list[j]; + } list[j + 1] = v; } } @@ -1997,19 +2096,22 @@ nfsrv_create( case NFS_CREATE_EXCLUSIVE: nfsm_chain_get_opaque(error, nmreq, NFSX_V3CREATEVERF, cverf); exclusive_flag = 1; - if (vp == NULL) + if (vp == NULL) { VATTR_SET(vap, va_mode, 0); + } break; - }; + } + ; VATTR_SET(vap, va_type, VREG); } else { - enum vtype v_type; + enum vtype v_type; error = nfsm_chain_get_sattr(nd, nmreq, vap); nfsmerr_if(error); v_type = vap->va_type; - if (v_type == VNON) + if (v_type == VNON) { v_type = VREG; + } VATTR_SET(vap, va_type, v_type); switch (v_type) { @@ -2021,7 +2123,8 @@ nfsrv_create( break; default: break; - }; + } + ; } nfsmerr_if(error); @@ -2031,7 +2134,7 @@ nfsrv_create( * should I set the mode too ?? */ if (vp == NULL) { - kauth_acl_t xacl = NULL; + kauth_acl_t xacl = NULL; /* authorize before creating */ error = nfsrv_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx, nxo, 0); @@ -2044,13 +2147,14 @@ nfsrv_create( 0 /* !isdir */, ctx); - if (!error && xacl != NULL) - VATTR_SET(vap, va_acl, xacl); + if (!error && xacl != NULL) { + VATTR_SET(vap, va_acl, xacl); + } } VATTR_CLEAR_ACTIVE(vap, va_data_size); VATTR_CLEAR_ACTIVE(vap, va_access_time); /* - * Server policy is to alway use the mapped rpc credential for + * Server policy is to alway use the mapped rpc credential for * file system object creation. This has the nice side effect of * enforcing BSD creation semantics */ @@ -2058,36 +2162,40 @@ nfsrv_create( VATTR_CLEAR_ACTIVE(vap, va_gid); /* validate new-file security information */ - if (!error) + if (!error) { error = vnode_authattr_new(dvp, vap, 0, ctx); + } if (!error) { error = vn_authorize_create(dvp, &ni.ni_cnd, vap, ctx, NULL); - if (error) + if (error) { error = EACCES; + } } if (vap->va_type == VREG || vap->va_type == VSOCK) { - - if (!error) + if (!error) { error = VNOP_CREATE(dvp, &vp, &ni.ni_cnd, vap, ctx); + } - if (!error && !VATTR_ALL_SUPPORTED(vap)) - /* + if (!error && !VATTR_ALL_SUPPORTED(vap)) { + /* * If some of the requested attributes weren't handled by the VNOP, * use our fallback code. */ error = vnode_setattr_fallback(vp, vap, ctx); + } - if (xacl != NULL) + if (xacl != NULL) { kauth_acl_free(xacl); + } if (!error) { if (exclusive_flag) { exclusive_flag = 0; VATTR_INIT(vap); bcopy(cverf, (caddr_t)&vap->va_access_time, - NFSX_V3CREATEVERF); + NFSX_V3CREATEVERF); VATTR_SET_ACTIVE(vap, va_access_time); // skip authorization, as this is an // NFS internal implementation detail. @@ -2096,17 +2204,17 @@ nfsrv_create( #if CONFIG_FSE if (nfsrv_fsevents_enabled && need_fsevent(FSE_CREATE_FILE, vp)) { - add_fsevent(FSE_CREATE_FILE, ctx, - FSE_ARG_VNODE, vp, - FSE_ARG_DONE); + add_fsevent(FSE_CREATE_FILE, ctx, + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); } #endif } - } else if (vap->va_type == VCHR || vap->va_type == VBLK || - vap->va_type == VFIFO) { - if (vap->va_type == VCHR && rdev == (int)0xffffffff) + vap->va_type == VFIFO) { + if (vap->va_type == VCHR && rdev == (int)0xffffffff) { VATTR_SET(vap, va_type, VFIFO); + } if (vap->va_type != VFIFO) { error = suser(nd->nd_cr, NULL); nfsmerr_if(error); @@ -2115,8 +2223,9 @@ nfsrv_create( error = VNOP_MKNOD(dvp, &vp, &ni.ni_cnd, vap, ctx); - if (xacl != NULL) + if (xacl != NULL) { kauth_acl_free(xacl); + } nfsmerr_if(error); @@ -2141,8 +2250,9 @@ nfsrv_create( ni.ni_usedvp = ni.ni_dvp = ni.ni_startdir = dvp; } if (!error) { - if (ni.ni_cnd.cn_flags & ISSYMLINK) + if (ni.ni_cnd.cn_flags & ISSYMLINK) { error = EINVAL; + } vp = ni.ni_vp; } nfsmerr_if(error); @@ -2158,11 +2268,11 @@ nfsrv_create( vnode_put(dvp); } else { - /* + /* * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ - nameidone(&ni); + nameidone(&ni); ni.ni_cnd.cn_nameiop = 0; vnode_put(dvp); @@ -2171,8 +2281,9 @@ nfsrv_create( if (!error && VATTR_IS_ACTIVE(vap, va_data_size)) { /* NOTE: File has not been open for NFS case, so NOCRED for filecred */ error = mac_vnode_check_truncate(ctx, NOCRED, vp); - if (error) + if (error) { error = EACCES; + } } #endif if (!error && VATTR_IS_ACTIVE(vap, va_data_size)) { @@ -2191,17 +2302,20 @@ nfsrv_create( if (!error) { nfsm_srv_vattr_init(&postattr, nd->nd_vers); postattrerr = vnode_getattr(vp, &postattr, ctx); - if (nd->nd_vers == NFS_VER2) + if (nd->nd_vers == NFS_VER2) { error = postattrerr; + } } } - if (vp) - vnode_put(vp); + if (vp) { + vnode_put(vp); + } if (nd->nd_vers == NFS_VER3) { if (exclusive_flag && !error && - bcmp(cverf, &postattr.va_access_time, NFSX_V3CREATEVERF)) + bcmp(cverf, &postattr.va_access_time, NFSX_V3CREATEVERF)) { error = EEXIST; + } nfsm_srv_vattr_init(&dpostattr, NFS_VER3); dpostattrerr = vnode_getattr(dirp, &dpostattr, ctx); vnode_put(dirp); @@ -2212,7 +2326,7 @@ nfsmerr: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_SRVFH(nd->nd_vers, &nfh) + - NFSX_FATTR(nd->nd_vers) + NFSX_WCCDATA(nd->nd_vers)); + NFSX_FATTR(nd->nd_vers) + NFSX_WCCDATA(nd->nd_vers)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -2222,32 +2336,35 @@ nfsmerr: nfsm_chain_add_postop_attr(error, nd, &nmrep, postattrerr, &postattr); } nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); } else { nfsm_chain_add_fh(error, &nmrep, NFS_VER2, nfh.nfh_fhp, nfh.nfh_len); - if (!error) + if (!error) { error = nfsm_chain_add_fattr(nd, &nmrep, &postattr); + } } nfsmout: nfsm_chain_build_done(error, &nmrep); if (ni.ni_cnd.cn_nameiop) { - /* + /* * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ nameidone(&ni); - if (vp) + if (vp) { vnode_put(vp); + } vnode_put(dvp); } - if (dirp) + if (dirp) { vnode_put(dirp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -2357,13 +2474,14 @@ nfsrv_mknod( 0 /* !isdir */, ctx); - if (!error && xacl != NULL) - VATTR_SET(vap, va_acl, xacl); + if (!error && xacl != NULL) { + VATTR_SET(vap, va_acl, xacl); + } } VATTR_CLEAR_ACTIVE(vap, va_data_size); VATTR_CLEAR_ACTIVE(vap, va_access_time); /* - * Server policy is to alway use the mapped rpc credential for + * Server policy is to alway use the mapped rpc credential for * file system object creation. This has the nice side effect of * enforcing BSD creation semantics */ @@ -2371,30 +2489,36 @@ nfsrv_mknod( VATTR_CLEAR_ACTIVE(vap, va_gid); /* validate new-file security information */ - if (!error) + if (!error) { error = vnode_authattr_new(dvp, vap, 0, ctx); + } if (!error) { error = vn_authorize_create(dvp, &ni.ni_cnd, vap, ctx, NULL); - if (error) + if (error) { error = EACCES; + } } - if (error) + if (error) { goto out1; + } if (vtyp == VSOCK) { error = VNOP_CREATE(dvp, &vp, &ni.ni_cnd, vap, ctx); - if (!error && !VATTR_ALL_SUPPORTED(vap)) - /* + if (!error && !VATTR_ALL_SUPPORTED(vap)) { + /* * If some of the requested attributes weren't handled by the VNOP, * use our fallback code. */ error = vnode_setattr_fallback(vp, vap, ctx); + } } else { - if (vtyp != VFIFO && (error = suser(nd->nd_cr, (u_short *)0))) + if (vtyp != VFIFO && (error = suser(nd->nd_cr, (u_short *)0))) { goto out1; - if ((error = VNOP_MKNOD(dvp, &vp, &ni.ni_cnd, vap, ctx))) + } + if ((error = VNOP_MKNOD(dvp, &vp, &ni.ni_cnd, vap, ctx))) { goto out1; + } if (vp) { vnode_recycle(vp); vnode_put(vp); @@ -2416,14 +2540,16 @@ nfsrv_mknod( ni.ni_usedvp = ni.ni_dvp = ni.ni_startdir = dvp; } if (!error) { - vp = ni.ni_vp; - if (ni.ni_cnd.cn_flags & ISSYMLINK) - error = EINVAL; + vp = ni.ni_vp; + if (ni.ni_cnd.cn_flags & ISSYMLINK) { + error = EINVAL; + } } } out1: - if (xacl != NULL) + if (xacl != NULL) { kauth_acl_free(xacl); + } out: /* * nameidone has to happen before we vnode_put(dvp) @@ -2456,7 +2582,7 @@ nfsmerr: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_SRVFH(NFS_VER3, &nfh) + - NFSX_POSTOPATTR(NFS_VER3) + NFSX_WCCDATA(NFS_VER3)); + NFSX_POSTOPATTR(NFS_VER3) + NFSX_WCCDATA(NFS_VER3)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -2465,31 +2591,35 @@ nfsmerr: nfsm_chain_add_postop_attr(error, nd, &nmrep, postattrerr, &postattr); } nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); nfsmout: nfsm_chain_build_done(error, &nmrep); if (ni.ni_cnd.cn_nameiop) { - /* + /* * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ nameidone(&ni); - if (vp) + if (vp) { vnode_put(vp); + } vnode_put(dvp); } - if (dvp) + if (dvp) { vnode_put(dvp); - if (vp) + } + if (vp) { vnode_put(vp); - if (dirp) + } + if (dirp) { vnode_put(dirp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -2543,7 +2673,7 @@ nfsrv_remove( } } if (dirp) { - if (nd->nd_vers == NFS_VER3) { + if (nd->nd_vers == NFS_VER3) { nfsm_srv_pre_vattr_init(&dpreattr); dpreattrerr = vnode_getattr(dirp, &dpreattr, ctx); } else { @@ -2556,20 +2686,22 @@ nfsrv_remove( dvp = ni.ni_dvp; vp = ni.ni_vp; - if (vnode_vtype(vp) == VDIR) - error = EPERM; /* POSIX */ - else if (vnode_isvroot(vp)) - /* + if (vnode_vtype(vp) == VDIR) { + error = EPERM; /* POSIX */ + } else if (vnode_isvroot(vp)) { + /* * The root of a mounted filesystem cannot be deleted. */ error = EBUSY; - else + } else { error = nfsrv_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx, nxo, 0); + } if (!error) { error = vn_authorize_unlink(dvp, vp, &ni.ni_cnd, ctx, NULL); - if (error) + if (error) { error = EACCES; + } } if (!error) { @@ -2577,7 +2709,7 @@ nfsrv_remove( char *path = NULL; int plen; fse_info finfo; - + if (nfsrv_fsevents_enabled && need_fsevent(FSE_DELETE, dvp)) { plen = MAXPATHLEN; if ((path = get_pathbuff()) && !vn_getpath(vp, path, &plen)) { @@ -2588,16 +2720,17 @@ nfsrv_remove( } } #endif - error = VNOP_REMOVE(dvp, vp, &ni.ni_cnd, 0, ctx); - + error = VNOP_REMOVE(dvp, vp, &ni.ni_cnd, 0, ctx); + #if CONFIG_FSE if (path) { - if (!error) + if (!error) { add_fsevent(FSE_DELETE, ctx, - FSE_ARG_STRING, plen, path, - FSE_ARG_FINFO, &finfo, - FSE_ARG_DONE); - release_pathbuff(path); + FSE_ARG_STRING, plen, path, + FSE_ARG_FINFO, &finfo, + FSE_ARG_DONE); + } + release_pathbuff(path); } #endif } @@ -2609,13 +2742,13 @@ nfsrv_remove( nameidone(&ni); vnode_put(vp); - vnode_put(dvp); + vnode_put(dvp); } nfsmerr: if (dirp) { nfsm_srv_vattr_init(&dpostattr, nd->nd_vers); - dpostattrerr = vnode_getattr(dirp, &dpostattr, ctx); + dpostattrerr = vnode_getattr(dirp, &dpostattr, ctx); vnode_put(dirp); } @@ -2625,16 +2758,17 @@ nfsmerr: nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + } nfsmout: nfsm_chain_build_done(error, &nmrep); if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -2667,7 +2801,7 @@ nfsrv_rename( struct nfsm_chain *nmreq, nmrep; char *from_name, *to_name; #if CONFIG_FSE - int from_len=0, to_len=0; + int from_len = 0, to_len = 0; fse_info from_finfo, to_finfo; #endif u_char didstats = 0; @@ -2728,8 +2862,9 @@ retry: fromni.ni_cnd.cn_ndp = &fromni; error = nfsrv_namei(nd, ctx, &fromni, &fnfh, &fdirp, &fnx, &fnxo); - if (error) + if (error) { goto out; + } fdvp = fromni.ni_dvp; fvp = fromni.ni_vp; @@ -2763,8 +2898,9 @@ retry: toni.ni_cnd.cn_flags |= HASBUF; toni.ni_cnd.cn_ndp = &toni; - if (fvtype == VDIR) + if (fvtype == VDIR) { toni.ni_cnd.cn_flags |= WILLBEDIR; + } tnx = NULL; error = nfsrv_namei(nd, ctx, &toni, &tnfh, &tdirp, &tnx, &tnxo); @@ -2772,11 +2908,12 @@ retry: /* * Translate error code for rename("dir1", "dir2/."). */ - if (error == EISDIR && fvtype == VDIR) { - if (nd->nd_vers == NFS_VER3) - error = EINVAL; - else - error = ENOTEMPTY; + if (error == EISDIR && fvtype == VDIR) { + if (nd->nd_vers == NFS_VER3) { + error = EINVAL; + } else { + error = ENOTEMPTY; + } } goto out; } @@ -2809,31 +2946,35 @@ retry: tvtype = vnode_vtype(tvp); if (fvtype == VDIR && tvtype != VDIR) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EEXIST; - else + } else { error = EISDIR; + } goto out; } else if (fvtype != VDIR && tvtype == VDIR) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EEXIST; - else + } else { error = ENOTDIR; + } goto out; } if (tvtype == VDIR && vnode_mountedhere(tvp)) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EXDEV; - else + } else { error = ENOTEMPTY; + } goto out; } } if (fvp == tdvp) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EINVAL; - else + } else { error = ENOTEMPTY; + } goto out; } @@ -2854,53 +2995,61 @@ retry: error = 0; if ((tvp != NULL) && vnode_isdir(tvp)) { - if (tvp != fdvp) + if (tvp != fdvp) { moving = 1; + } } else if (tdvp != fdvp) { moving = 1; } if (moving) { /* moving out of fdvp, must have delete rights */ - if ((error = nfsrv_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx, fnxo, 0)) != 0) + if ((error = nfsrv_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx, fnxo, 0)) != 0) { goto auth_exit; + } /* moving into tdvp or tvp, must have rights to add */ if ((error = nfsrv_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp, - NULL, - vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, - ctx, tnxo, 0)) != 0) + NULL, + vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, + ctx, tnxo, 0)) != 0) { goto auth_exit; + } } else { /* node staying in same directory, must be allowed to add new name */ if ((error = nfsrv_authorize(fdvp, NULL, - vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, - ctx, fnxo, 0)) != 0) + vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, + ctx, fnxo, 0)) != 0) { goto auth_exit; + } } /* overwriting tvp */ if ((tvp != NULL) && !vnode_isdir(tvp) && - ((error = nfsrv_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx, tnxo, 0)) != 0)) + ((error = nfsrv_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx, tnxo, 0)) != 0)) { goto auth_exit; + } if (!error && - ((error = vn_authorize_rename(fdvp, fvp, &fromni.ni_cnd , tdvp, tvp, &toni.ni_cnd , ctx, NULL)) != 0)) { - if (error) + ((error = vn_authorize_rename(fdvp, fvp, &fromni.ni_cnd, tdvp, tvp, &toni.ni_cnd, ctx, NULL)) != 0)) { + if (error) { error = EACCES; + } goto auth_exit; } /* XXX more checks? */ auth_exit: /* authorization denied */ - if (error != 0) + if (error != 0) { goto out; + } } if ((vnode_mount(fvp) != vnode_mount(tdvp)) || (tvp && (vnode_mount(fvp) != vnode_mount(tvp)))) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EXDEV; - else + } else { error = ENOTEMPTY; + } goto out; } /* @@ -2919,17 +3068,19 @@ auth_exit: * o tvp */ if (tdvp->v_parent == fvp) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EXDEV; - else + } else { error = ENOTEMPTY; + } goto out; } if (fvtype == VDIR && vnode_mountedhere(fvp)) { - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = EXDEV; - else + } else { error = ENOTEMPTY; + } goto out; } /* @@ -2951,24 +3102,24 @@ auth_exit: */ if ((fvp == tvp) && (fdvp == tdvp)) { if (fromni.ni_cnd.cn_namelen == toni.ni_cnd.cn_namelen && - !bcmp(fromni.ni_cnd.cn_nameptr, toni.ni_cnd.cn_nameptr, - fromni.ni_cnd.cn_namelen)) { + !bcmp(fromni.ni_cnd.cn_nameptr, toni.ni_cnd.cn_nameptr, + fromni.ni_cnd.cn_namelen)) { goto out; } } if (holding_mntlock && vnode_mount(fvp) != locked_mp) { - /* + /* * we're holding a reference and lock * on locked_mp, but it no longer matches * what we want to do... so drop our hold */ mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); - holding_mntlock = 0; + holding_mntlock = 0; } if (tdvp != fdvp && fvtype == VDIR) { - /* + /* * serialize renames that re-shape * the tree... if holding_mntlock is * set, then we're ready to go... @@ -2979,8 +3130,8 @@ auth_exit: * then finally start the lookup * process over with the lock held */ - if (!holding_mntlock) { - /* + if (!holding_mntlock) { + /* * need to grab a reference on * the mount point before we * drop all the iocounts... once @@ -2992,8 +3143,9 @@ auth_exit: /* make a copy of to path to pass to nfsrv_namei() again */ MALLOC_ZONE(topath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (topath) + if (topath) { bcopy(toni.ni_cnd.cn_pnbuf, topath, tolen + 1); + } /* * nameidone has to happen before we vnode_put(tdvp) @@ -3001,14 +3153,16 @@ auth_exit: */ nameidone(&toni); - if (tvp) - vnode_put(tvp); + if (tvp) { + vnode_put(tvp); + } vnode_put(tdvp); /* make a copy of from path to pass to nfsrv_namei() again */ MALLOC_ZONE(frompath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (frompath) + if (frompath) { bcopy(fromni.ni_cnd.cn_pnbuf, frompath, fromlen + 1); + } /* * nameidone has to happen before we vnode_put(fdvp) @@ -3020,11 +3174,11 @@ auth_exit: vnode_put(fdvp); if (fdirp) { - vnode_put(fdirp); + vnode_put(fdirp); fdirp = NULL; } if (tdirp) { - vnode_put(tdirp); + vnode_put(tdirp); tdirp = NULL; } mount_lock_renames(locked_mp); @@ -3051,7 +3205,7 @@ auth_exit: goto retry; } } else { - /* + /* * when we dropped the iocounts to take * the lock, we allowed the identity of * the various vnodes to change... if they did, @@ -3061,10 +3215,10 @@ auth_exit: * so we're free to drop the lock at this point * and continue on */ - if (holding_mntlock) { + if (holding_mntlock) { mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); - holding_mntlock = 0; + holding_mntlock = 0; } } @@ -3080,16 +3234,17 @@ auth_exit: #if CONFIG_FSE if (nfsrv_fsevents_enabled && need_fsevent(FSE_RENAME, fvp)) { int from_truncated = 0, to_truncated = 0; - - get_fse_info(fvp, &from_finfo, ctx); - if (tvp) - get_fse_info(tvp, &to_finfo, ctx); - from_name = get_pathbuff(); + get_fse_info(fvp, &from_finfo, ctx); + if (tvp) { + get_fse_info(tvp, &to_finfo, ctx); + } + + from_name = get_pathbuff(); if (from_name) { from_len = safe_getpath(fdvp, fromni.ni_cnd.cn_nameptr, from_name, MAXPATHLEN, &from_truncated); } - + to_name = from_name ? get_pathbuff() : NULL; if (to_name) { to_len = safe_getpath(tdvp, toni.ni_cnd.cn_nameptr, to_name, MAXPATHLEN, &to_truncated); @@ -3098,10 +3253,9 @@ auth_exit: if (from_truncated || to_truncated) { from_finfo.mode |= FSE_TRUNCATED_PATH; } - } else { - from_name = NULL; - to_name = NULL; + from_name = NULL; + to_name = NULL; } #else /* CONFIG_FSE */ from_name = NULL; @@ -3109,9 +3263,9 @@ auth_exit: #endif /* CONFIG_FSE */ error = VNOP_RENAME(fromni.ni_dvp, fromni.ni_vp, &fromni.ni_cnd, - toni.ni_dvp, toni.ni_vp, &toni.ni_cnd, ctx); + toni.ni_dvp, toni.ni_vp, &toni.ni_cnd, ctx); /* - * fix up name & parent pointers. note that we first + * fix up name & parent pointers. note that we first * check that fvp has the same name/parent pointers it * had before the rename call... this is a 'weak' check * at best... @@ -3119,10 +3273,11 @@ auth_exit: if (oname == fvp->v_name && oparent == fvp->v_parent) { int update_flags; update_flags = VNODE_UPDATE_NAME; - if (fdvp != tdvp) + if (fdvp != tdvp) { update_flags |= VNODE_UPDATE_PARENT; + } vnode_update_identity(fvp, tdvp, toni.ni_cnd.cn_nameptr, - toni.ni_cnd.cn_namelen, toni.ni_cnd.cn_hash, update_flags); + toni.ni_cnd.cn_namelen, toni.ni_cnd.cn_hash, update_flags); } /* @@ -3131,43 +3286,46 @@ auth_exit: */ #if CONFIG_FSE if (nfsrv_fsevents_enabled && !error && from_name && to_name) { - if (tvp) { - add_fsevent(FSE_RENAME, ctx, - FSE_ARG_STRING, from_len, from_name, - FSE_ARG_FINFO, &from_finfo, - FSE_ARG_STRING, to_len, to_name, - FSE_ARG_FINFO, &to_finfo, - FSE_ARG_DONE); + if (tvp) { + add_fsevent(FSE_RENAME, ctx, + FSE_ARG_STRING, from_len, from_name, + FSE_ARG_FINFO, &from_finfo, + FSE_ARG_STRING, to_len, to_name, + FSE_ARG_FINFO, &to_finfo, + FSE_ARG_DONE); } else { - add_fsevent(FSE_RENAME, ctx, - FSE_ARG_STRING, from_len, from_name, - FSE_ARG_FINFO, &from_finfo, - FSE_ARG_STRING, to_len, to_name, - FSE_ARG_DONE); + add_fsevent(FSE_RENAME, ctx, + FSE_ARG_STRING, from_len, from_name, + FSE_ARG_FINFO, &from_finfo, + FSE_ARG_STRING, to_len, to_name, + FSE_ARG_DONE); } } - if (from_name) - release_pathbuff(from_name); - if (to_name) - release_pathbuff(to_name); + if (from_name) { + release_pathbuff(from_name); + } + if (to_name) { + release_pathbuff(to_name); + } #endif /* CONFIG_FSE */ from_name = to_name = NULL; - + out: if (holding_mntlock) { - mount_unlock_renames(locked_mp); + mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); holding_mntlock = 0; } if (tdvp) { - /* + /* * nameidone has to happen before we vnode_put(tdvp) * since it may need to release the fs_nodelock on the tdvp */ nameidone(&toni); - if (tvp) - vnode_put(tvp); - vnode_put(tdvp); + if (tvp) { + vnode_put(tvp); + } + vnode_put(tdvp); tdvp = NULL; } @@ -3178,9 +3336,10 @@ out: */ nameidone(&fromni); - if (fvp) - vnode_put(fvp); - vnode_put(fdvp); + if (fvp) { + vnode_put(fvp); + } + vnode_put(fdvp); fdvp = NULL; } @@ -3206,14 +3365,14 @@ nfsmerr: nfsmout_on_status(nd, error); if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_wcc_data(error, nd, &nmrep, - fdpreattrerr, &fdpreattr, fdpostattrerr, &fdpostattr); + fdpreattrerr, &fdpreattr, fdpostattrerr, &fdpostattr); nfsm_chain_add_wcc_data(error, nd, &nmrep, - tdpreattrerr, &tdpreattr, tdpostattrerr, &tdpostattr); + tdpreattrerr, &tdpreattr, tdpostattrerr, &tdpostattr); } nfsmout: nfsm_chain_build_done(error, &nmrep); if (holding_mntlock) { - mount_unlock_renames(locked_mp); + mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); } if (tdvp) { @@ -3223,9 +3382,10 @@ nfsmout: */ nameidone(&toni); - if (tvp) - vnode_put(tvp); - vnode_put(tdvp); + if (tvp) { + vnode_put(tvp); + } + vnode_put(tdvp); } if (fdvp) { /* @@ -3234,25 +3394,31 @@ nfsmout: */ nameidone(&fromni); - if (fvp) - vnode_put(fvp); - vnode_put(fdvp); + if (fvp) { + vnode_put(fvp); + } + vnode_put(fdvp); } - if (fdirp) + if (fdirp) { vnode_put(fdirp); - if (tdirp) + } + if (tdirp) { vnode_put(tdirp); - if (frompath) + } + if (frompath) { FREE_ZONE(frompath, MAXPATHLEN, M_NAMEI); - if (topath) + } + if (topath) { FREE_ZONE(topath, MAXPATHLEN, M_NAMEI); - if (saved_cred) + } + if (saved_cred) { kauth_cred_unref(&saved_cred); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -3300,13 +3466,14 @@ nfsrv_link( /* we're not allowed to link to directories... */ if (vnode_vtype(vp) == VDIR) { - error = EPERM; /* POSIX */ + error = EPERM; /* POSIX */ goto out; } /* ...or to anything that kauth doesn't want us to (eg. immutable items) */ - if ((error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_LINKTARGET, ctx, nxo, 0)) != 0) + if ((error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_LINKTARGET, ctx, nxo, 0)) != 0) { goto out; + } ni.ni_cnd.cn_nameiop = CREATE; #if CONFIG_TRIGGERS @@ -3314,8 +3481,9 @@ nfsrv_link( #endif ni.ni_cnd.cn_flags = LOCKPARENT; error = nfsm_chain_get_path_namei(nmreq, len, &ni); - if (!error) + if (!error) { error = nfsrv_namei(nd, ctx, &ni, &dnfh, &dirp, &nx, &nxo); + } if (dirp) { if (nd->nd_vers == NFS_VER3) { nfsm_srv_pre_vattr_init(&dpreattr); @@ -3325,32 +3493,36 @@ nfsrv_link( dirp = NULL; } } - if (error) + if (error) { goto out; + } dvp = ni.ni_dvp; xp = ni.ni_vp; - if (xp != NULL) + if (xp != NULL) { error = EEXIST; - else if (vnode_mount(vp) != vnode_mount(dvp)) + } else if (vnode_mount(vp) != vnode_mount(dvp)) { error = EXDEV; - else + } else { error = nfsrv_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx, nxo, 0); + } #if CONFIG_MACF if (!error) { error = mac_vnode_check_link(ctx, dvp, vp, &ni.ni_cnd); - if (error) + if (error) { error = EACCES; + } } #endif - if (!error) + if (!error) { error = VNOP_LINK(vp, dvp, &ni.ni_cnd, ctx); + } #if CONFIG_FSE if (nfsrv_fsevents_enabled && !error && need_fsevent(FSE_CREATE_FILE, dvp)) { char *target_path = NULL; - int plen, truncated=0; + int plen, truncated = 0; fse_info finfo; /* build the path to the new link file */ @@ -3363,9 +3535,9 @@ nfsrv_link( finfo.mode |= FSE_TRUNCATED_PATH; } add_fsevent(FSE_CREATE_FILE, ctx, - FSE_ARG_STRING, plen, target_path, - FSE_ARG_FINFO, &finfo, - FSE_ARG_DONE); + FSE_ARG_STRING, plen, target_path, + FSE_ARG_FINFO, &finfo, + FSE_ARG_DONE); } release_pathbuff(target_path); @@ -3379,8 +3551,9 @@ nfsrv_link( */ nameidone(&ni); - if (xp) + if (xp) { vnode_put(xp); + } vnode_put(dvp); out: if (nd->nd_vers == NFS_VER3) { @@ -3406,17 +3579,18 @@ nfsmerr: if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, &attr); nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); } nfsmout: nfsm_chain_build_done(error, &nmrep); - if (vp) + if (vp) { vnode_put(vp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -3441,7 +3615,7 @@ nfsrv_symlink( struct nfs_export *nx = NULL; struct nfs_export_options *nxo; uio_t auio = NULL; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; struct nfsm_chain *nmreq, nmrep; error = 0; @@ -3496,25 +3670,29 @@ nfsrv_symlink( vp = ni.ni_vp; VATTR_INIT(vap); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { error = nfsm_chain_get_sattr(nd, nmreq, vap); + } nfsm_chain_get_32(error, nmreq, linkdatalen); if (!error && (((nd->nd_vers == NFS_VER2) && (linkdatalen > NFS_MAXPATHLEN)) || - ((nd->nd_vers == NFS_VER3) && (linkdatalen > MAXPATHLEN)))) + ((nd->nd_vers == NFS_VER3) && (linkdatalen > MAXPATHLEN)))) { error = NFSERR_NAMETOL; + } nfsmerr_if(error); MALLOC(linkdata, caddr_t, linkdatalen + 1, M_TEMP, M_WAITOK); - if (linkdata) + if (linkdata) { auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); + } if (!linkdata || !auio) { error = ENOMEM; goto out; } uio_addiov(auio, CAST_USER_ADDR_T(linkdata), linkdatalen); error = nfsm_chain_get_uio(nmreq, linkdatalen, auio); - if (!error && (nd->nd_vers == NFS_VER2)) + if (!error && (nd->nd_vers == NFS_VER2)) { error = nfsm_chain_get_sattr(nd, nmreq, vap); + } nfsmerr_if(error); *(linkdata + linkdatalen) = '\0'; if (vp) { @@ -3526,7 +3704,7 @@ nfsrv_symlink( VATTR_CLEAR_ACTIVE(vap, va_data_size); VATTR_CLEAR_ACTIVE(vap, va_access_time); /* - * Server policy is to alway use the mapped rpc credential for + * Server policy is to alway use the mapped rpc credential for * file system object creation. This has the nice side effect of * enforcing BSD creation semantics */ @@ -3537,16 +3715,19 @@ nfsrv_symlink( error = nfsrv_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx, nxo, 0); /* validate given attributes */ - if (!error) + if (!error) { error = vnode_authattr_new(dvp, vap, 0, ctx); + } if (!error) { error = vn_authorize_create(dvp, &ni.ni_cnd, vap, ctx, NULL); - if (error) + if (error) { error = EACCES; + } } - if (!error) + if (!error) { error = VNOP_SYMLINK(dvp, &vp, &ni.ni_cnd, vap, linkdata, ctx); + } if (!error && (nd->nd_vers == NFS_VER3)) { if (vp == NULL) { @@ -3566,8 +3747,9 @@ nfsrv_symlink( ni.ni_cnd.cn_nameptr = ni.ni_cnd.cn_pnbuf; ni.ni_usedvp = ni.ni_dvp = ni.ni_startdir = dvp; } - if (!error) - vp = ni.ni_vp; + if (!error) { + vp = ni.ni_vp; + } } if (!error) { error = nfsrv_vptofh(nx, NFS_VER3, NULL, vp, ctx, &nfh); @@ -3581,8 +3763,8 @@ nfsrv_symlink( #if CONFIG_FSE if (nfsrv_fsevents_enabled && !error && vp) { add_fsevent(FSE_CREATE_FILE, ctx, - FSE_ARG_VNODE, vp, - FSE_ARG_DONE); + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); } #endif out: @@ -3592,8 +3774,9 @@ out: */ nameidone(&ni); ni.ni_cnd.cn_nameiop = 0; - if (vp) - vnode_put(vp); + if (vp) { + vnode_put(vp); + } vnode_put(dvp); out1: if (linkdata) { @@ -3611,7 +3794,7 @@ nfsmerr: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_SRVFH(nd->nd_vers, &nfh) + - NFSX_POSTOPATTR(nd->nd_vers) + NFSX_WCCDATA(nd->nd_vers)); + NFSX_POSTOPATTR(nd->nd_vers) + NFSX_WCCDATA(nd->nd_vers)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -3621,36 +3804,39 @@ nfsmerr: nfsm_chain_add_postop_attr(error, nd, &nmrep, postattrerr, &postattr); } nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); } nfsmout: nfsm_chain_build_done(error, &nmrep); if (ni.ni_cnd.cn_nameiop) { - /* + /* * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ nameidone(&ni); - if (vp) + if (vp) { vnode_put(vp); + } vnode_put(dvp); } - if (dirp) + if (dirp) { vnode_put(dirp); - if (linkdata) + } + if (linkdata) { FREE(linkdata, M_TEMP); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* * nfs mkdir service */ - + int nfsrv_mkdir( struct nfsrv_descript *nd, @@ -3725,11 +3911,11 @@ nfsrv_mkdir( VATTR_SET(vap, va_type, VDIR); if (vp != NULL) { - /* + /* * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ - nameidone(&ni); + nameidone(&ni); vnode_put(dvp); vnode_put(vp); error = EEXIST; @@ -3742,12 +3928,13 @@ nfsrv_mkdir( if (!error) { error = kauth_acl_inherit(dvp, NULL, - &xacl, /* isdir */ + &xacl, /* isdir */ 1, ctx); - - if (!error && xacl != NULL) - VATTR_SET(vap, va_acl, xacl); + + if (!error && xacl != NULL) { + VATTR_SET(vap, va_acl, xacl); + } } VATTR_CLEAR_ACTIVE(vap, va_data_size); @@ -3755,12 +3942,13 @@ nfsrv_mkdir( /* * We don't support the S_ISGID bit for directories. Solaris and other * SRV4 derived systems might set this to get BSD semantics, which we enforce - * any ways. + * any ways. */ - if (VATTR_IS_ACTIVE(vap, va_mode)) + if (VATTR_IS_ACTIVE(vap, va_mode)) { vap->va_mode &= ~S_ISGID; + } /* - * Server policy is to alway use the mapped rpc credential for + * Server policy is to alway use the mapped rpc credential for * file system object creation. This has the nice side effect of * enforcing BSD creation semantics */ @@ -3768,46 +3956,54 @@ nfsrv_mkdir( VATTR_CLEAR_ACTIVE(vap, va_gid); /* validate new-file security information */ - if (!error) + if (!error) { error = vnode_authattr_new(dvp, vap, 0, ctx); + } /* - * vnode_authattr_new can return errors other than EPERM, but that's not going to + * vnode_authattr_new can return errors other than EPERM, but that's not going to * sit well with our clients so we map all errors to EPERM. - */ - if (error) + */ + if (error) { error = EPERM; + } - if(!error) { + if (!error) { error = vn_authorize_mkdir(dvp, &ni.ni_cnd, vap, ctx, NULL); - if (error) + if (error) { error = EACCES; + } } - if (!error) + if (!error) { error = VNOP_MKDIR(dvp, &vp, &ni.ni_cnd, vap, ctx); + } #if CONFIG_FSE - if (nfsrv_fsevents_enabled && !error) + if (nfsrv_fsevents_enabled && !error) { add_fsevent(FSE_CREATE_DIR, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); + } #endif - if (!error && !VATTR_ALL_SUPPORTED(vap)) - /* + if (!error && !VATTR_ALL_SUPPORTED(vap)) { + /* * If some of the requested attributes weren't handled by the VNOP, * use our fallback code. */ error = vnode_setattr_fallback(vp, vap, ctx); + } - if (xacl != NULL) + if (xacl != NULL) { kauth_acl_free(xacl); - + } + if (!error) { error = nfsrv_vptofh(nx, nd->nd_vers, NULL, vp, ctx, &nfh); if (!error) { nfsm_srv_vattr_init(&postattr, nd->nd_vers); postattrerr = vnode_getattr(vp, &postattr, ctx); - if (nd->nd_vers == NFS_VER2) + if (nd->nd_vers == NFS_VER2) { error = postattrerr; + } } vnode_put(vp); vp = NULL; @@ -3832,7 +4028,7 @@ nfsmerr: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_SRVFH(nd->nd_vers, &nfh) + - NFSX_POSTOPATTR(nd->nd_vers) + NFSX_WCCDATA(nd->nd_vers)); + NFSX_POSTOPATTR(nd->nd_vers) + NFSX_WCCDATA(nd->nd_vers)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -3842,31 +4038,34 @@ nfsmerr: nfsm_chain_add_postop_attr(error, nd, &nmrep, postattrerr, &postattr); } nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); } else { nfsm_chain_add_fh(error, &nmrep, NFS_VER2, nfh.nfh_fhp, nfh.nfh_len); - if (!error) + if (!error) { error = nfsm_chain_add_fattr(nd, &nmrep, &postattr); + } } nfsmout: nfsm_chain_build_done(error, &nmrep); if (ni.ni_cnd.cn_nameiop) { - /* + /* * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ nameidone(&ni); vnode_put(dvp); - if (vp) + if (vp) { vnode_put(vp); + } } - if (dirp) + if (dirp) { vnode_put(dirp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -3921,7 +4120,7 @@ nfsrv_rmdir( } } if (dirp) { - if (nd->nd_vers == NFS_VER3) { + if (nd->nd_vers == NFS_VER3) { nfsm_srv_pre_vattr_init(&dpreattr); dpreattrerr = vnode_getattr(dirp, &dpreattr, ctx); } else { @@ -3948,14 +4147,17 @@ nfsrv_rmdir( /* * The root of a mounted filesystem cannot be deleted. */ - if (vnode_isvroot(vp)) + if (vnode_isvroot(vp)) { error = EBUSY; - if (!error) + } + if (!error) { error = nfsrv_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx, nxo, 0); + } if (!error) { error = vn_authorize_rmdir(dvp, vp, &ni.ni_cnd, ctx, NULL); - if (error) + if (error) { error = EACCES; + } } if (!error) { @@ -3963,10 +4165,10 @@ nfsrv_rmdir( char *path = NULL; int plen; fse_info finfo; - + if (nfsrv_fsevents_enabled && need_fsevent(FSE_DELETE, dvp)) { plen = MAXPATHLEN; - if ((path = get_pathbuff()) && !vn_getpath(vp, path, &plen)) { + if ((path = get_pathbuff()) && !vn_getpath(vp, path, &plen)) { get_fse_info(vp, &finfo, ctx); } else if (path) { release_pathbuff(path); @@ -3979,12 +4181,13 @@ nfsrv_rmdir( #if CONFIG_FSE if (path) { - if (!error) + if (!error) { add_fsevent(FSE_DELETE, ctx, - FSE_ARG_STRING, plen, path, - FSE_ARG_FINFO, &finfo, - FSE_ARG_DONE); - release_pathbuff(path); + FSE_ARG_STRING, plen, path, + FSE_ARG_FINFO, &finfo, + FSE_ARG_DONE); + } + release_pathbuff(path); } #endif /* CONFIG_FSE */ } @@ -4012,18 +4215,20 @@ nfsmerr: nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_wcc_data(error, nd, &nmrep, - dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + dpreattrerr, &dpreattr, dpostattrerr, &dpostattr); + } nfsmout: nfsm_chain_build_done(error, &nmrep); - if (dirp) + if (dirp) { vnode_put(dirp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4068,7 +4273,7 @@ nfsrv_readdir( struct nfs_export *nx; struct nfs_export_options *nxo; uio_t auio = NULL; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; int len, nlen, rem, xfer, error, attrerr; int siz, count, fullsiz, eofflag, nentries; u_quad_t off, toff, verf; @@ -4098,8 +4303,9 @@ nfsrv_readdir( off = toff; siz = ((count + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); xfer = NFSRV_NDMAXDATA(nd); - if (siz > xfer) + if (siz > xfer) { siz = xfer; + } fullsiz = siz; error = nfsrv_fhtovp(&nfh, nd, &vp, &nx, &nxo); @@ -4114,35 +4320,42 @@ nfsrv_readdir( error = nfsrv_credcheck(nd, ctx, nx, nxo); nfsmerr_if(error); - if (nxo->nxo_flags & NX_MANGLEDNAMES || nd->nd_vers == NFS_VER2) + if (nxo->nxo_flags & NX_MANGLEDNAMES || nd->nd_vers == NFS_VER2) { vnopflag |= VNODE_READDIR_NAMEMAX; + } - if ((nd->nd_vers == NFS_VER2) || (nxo->nxo_flags & NX_32BITCLIENTS)) + if ((nd->nd_vers == NFS_VER2) || (nxo->nxo_flags & NX_32BITCLIENTS)) { vnopflag |= VNODE_READDIR_SEEKOFF32; + } if (nd->nd_vers == NFS_VER3) { nfsm_srv_vattr_init(&attr, NFS_VER3); error = attrerr = vnode_getattr(vp, &attr, ctx); - if (!error && toff && verf && (verf != attr.va_filerev)) + if (!error && toff && verf && (verf != attr.va_filerev)) { error = NFSERR_BAD_COOKIE; + } } - if (!error) + if (!error) { error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_LIST_DIRECTORY, ctx, nxo, 0); + } #if CONFIG_MACF if (!error) { - if (!error && mac_vnode_check_open(ctx, vp, FREAD)) + if (!error && mac_vnode_check_open(ctx, vp, FREAD)) { error = EACCES; + } - if (!error) + if (!error) { error = mac_vnode_check_readdir(ctx, vp); + } } #endif nfsmerr_if(error); MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); - if (rbuf) + if (rbuf) { auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); + } if (!rbuf || !auio) { error = ENOMEM; goto nfsmerr; @@ -4171,7 +4384,7 @@ again: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_POSTOPATTR(nd->nd_vers) + - NFSX_COOKIEVERF(nd->nd_vers) + 2 * NFSX_UNSIGNED); + NFSX_COOKIEVERF(nd->nd_vers) + 2 * NFSX_UNSIGNED); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -4182,7 +4395,7 @@ again: nfsm_chain_add_32(error, &nmrep, FALSE); nfsm_chain_add_32(error, &nmrep, TRUE); nfsm_chain_build_done(error, &nmrep); - return (error); + return error; } } @@ -4210,7 +4423,7 @@ again: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_POSTOPATTR(nd->nd_vers) + - NFSX_COOKIEVERF(nd->nd_vers) + siz); + NFSX_COOKIEVERF(nd->nd_vers) + siz); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -4228,12 +4441,14 @@ again: while ((cpos < cend) && (nentries > 0)) { if (dp->d_fileno != 0) { nlen = dp->d_namlen; - if ((nd->nd_vers == NFS_VER2) && (nlen > NFS_MAXNAMLEN)) + if ((nd->nd_vers == NFS_VER2) && (nlen > NFS_MAXNAMLEN)) { nlen = NFS_MAXNAMLEN; - rem = nfsm_rndup(nlen)-nlen; + } + rem = nfsm_rndup(nlen) - nlen; len += (4 * NFSX_UNSIGNED + nlen + rem); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { len += 2 * NFSX_UNSIGNED; + } if (len > count) { eofflag = 0; break; @@ -4247,8 +4462,9 @@ again: } nfsm_chain_add_string(error, &nmrep, dp->d_name, nlen); if (nd->nd_vers == NFS_VER3) { - if (vnopflag & VNODE_READDIR_SEEKOFF32) + if (vnopflag & VNODE_READDIR_SEEKOFF32) { dp->d_seekoff &= 0x00000000ffffffffULL; + } nfsm_chain_add_64(error, &nmrep, dp->d_seekoff); } else { nfsm_chain_add_32(error, &nmrep, dp->d_seekoff); @@ -4264,24 +4480,27 @@ again: FREE(rbuf, M_TEMP); goto nfsmout; nfsmerr: - if (rbuf) + if (rbuf) { FREE(rbuf, M_TEMP); - if (vp) + } + if (vp) { vnode_put(vp); + } nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_POSTOPATTR(nd->nd_vers)); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, &attr); + } nfsmout: nfsm_chain_build_done(error, &nmrep); if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } int @@ -4298,7 +4517,7 @@ nfsrv_readdirplus( struct nfs_export *nx; struct nfs_export_options *nxo; uio_t auio = NULL; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; struct vnode_attr attr, va, *vap = &va; int len, nlen, rem, xfer, error, attrerr, gotfh, gotattr; int siz, dircount, maxcount, fullsiz, eofflag, dirlen, nentries, isdotdot; @@ -4327,12 +4546,14 @@ nfsrv_readdirplus( off = toff; xfer = NFSRV_NDMAXDATA(nd); dircount = ((dircount + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); - if (dircount > xfer) + if (dircount > xfer) { dircount = xfer; + } fullsiz = siz = dircount; maxcount = ((maxcount + DIRBLKSIZ - 1) & ~(DIRBLKSIZ - 1)); - if (maxcount > xfer) + if (maxcount > xfer) { maxcount = xfer; + } error = nfsrv_fhtovp(&dnfh, nd, &vp, &nx, &nxo); nfsmerr_if(error); @@ -4346,33 +4567,40 @@ nfsrv_readdirplus( error = nfsrv_credcheck(nd, ctx, nx, nxo); nfsmerr_if(error); - if (nxo->nxo_flags & NX_32BITCLIENTS) + if (nxo->nxo_flags & NX_32BITCLIENTS) { vnopflag |= VNODE_READDIR_SEEKOFF32; + } - if (nxo->nxo_flags & NX_MANGLEDNAMES) + if (nxo->nxo_flags & NX_MANGLEDNAMES) { vnopflag |= VNODE_READDIR_NAMEMAX; + } nfsm_srv_vattr_init(&attr, NFS_VER3); error = attrerr = vnode_getattr(vp, &attr, ctx); - if (!error && toff && verf && (verf != attr.va_filerev)) + if (!error && toff && verf && (verf != attr.va_filerev)) { error = NFSERR_BAD_COOKIE; - if (!error) + } + if (!error) { error = nfsrv_authorize(vp, NULL, KAUTH_VNODE_LIST_DIRECTORY, ctx, nxo, 0); + } #if CONFIG_MACF if (!error) { - if (!error && mac_vnode_check_open(ctx, vp, FREAD)) + if (!error && mac_vnode_check_open(ctx, vp, FREAD)) { error = EACCES; + } - if (!error) + if (!error) { error = mac_vnode_check_readdir(ctx, vp); + } } #endif nfsmerr_if(error); MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); - if (rbuf) + if (rbuf) { auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); + } if (!rbuf || !auio) { error = ENOMEM; goto nfsmerr; @@ -4399,7 +4627,7 @@ again: /* assemble reply */ nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_V3POSTOPATTR + - NFSX_V3COOKIEVERF + 2 * NFSX_UNSIGNED); + NFSX_V3COOKIEVERF + 2 * NFSX_UNSIGNED); nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); @@ -4408,7 +4636,7 @@ again: nfsm_chain_add_32(error, &nmrep, FALSE); nfsm_chain_add_32(error, &nmrep, TRUE); nfsm_chain_build_done(error, &nmrep); - return (error); + return error; } } @@ -4435,8 +4663,9 @@ again: * supports VGET. */ if ((error = VFS_VGET(vnode_mount(vp), (ino64_t)dp->d_fileno, &nvp, ctx))) { - if (error == ENOTSUP) /* let others get passed back */ + if (error == ENOTSUP) { /* let others get passed back */ error = NFSERR_NOTSUPP; + } goto nfsmerr; } vnode_put(nvp); @@ -4458,7 +4687,7 @@ again: while ((cpos < cend) && (nentries > 0)) { if (dp->d_fileno != 0) { nlen = dp->d_namlen; - rem = nfsm_rndup(nlen)-nlen; + rem = nfsm_rndup(nlen) - nlen; gotfh = gotattr = 1; /* Got to get the vnode for lookup per entry. */ @@ -4467,12 +4696,14 @@ again: gotfh = gotattr = 0; } else { isdotdot = ((dp->d_namlen == 2) && - (dp->d_name[0] == '.') && (dp->d_name[1] == '.')); - if (nfsrv_vptofh(nx, 0, (isdotdot ? &dnfh : NULL), nvp, ctx, &nfh)) + (dp->d_name[0] == '.') && (dp->d_name[1] == '.')); + if (nfsrv_vptofh(nx, 0, (isdotdot ? &dnfh : NULL), nvp, ctx, &nfh)) { gotfh = 0; + } nfsm_srv_vattr_init(vap, NFS_VER3); - if (vnode_getattr(nvp, vap, ctx)) + if (vnode_getattr(nvp, vap, ctx)) { gotattr = 0; + } vnode_put(nvp); } @@ -4483,10 +4714,12 @@ again: * XDR overheads. */ len += 8 * NFSX_UNSIGNED + nlen + rem; - if (gotattr) + if (gotattr) { len += NFSX_V3FATTR; - if (gotfh) + } + if (gotfh) { len += NFSX_UNSIGNED + nfsm_rndup(nfh.nfh_len); + } dirlen += 6 * NFSX_UNSIGNED + nlen + rem; if ((len > maxcount) || (dirlen > dircount)) { eofflag = 0; @@ -4497,14 +4730,16 @@ again: nfsm_chain_add_32(error, &nmrep, TRUE); nfsm_chain_add_64(error, &nmrep, dp->d_fileno); nfsm_chain_add_string(error, &nmrep, dp->d_name, nlen); - if (vnopflag & VNODE_READDIR_SEEKOFF32) + if (vnopflag & VNODE_READDIR_SEEKOFF32) { dp->d_seekoff &= 0x00000000ffffffffULL; + } nfsm_chain_add_64(error, &nmrep, dp->d_seekoff); nfsm_chain_add_postop_attr(error, nd, &nmrep, (gotattr ? 0 : ENOENT), vap); - if (gotfh) + if (gotfh) { nfsm_chain_add_postop_fh(error, &nmrep, nfh.nfh_fhp, nfh.nfh_len); - else + } else { nfsm_chain_add_32(error, &nmrep, FALSE); + } nfsmerr_if(error); } cpos += dp->d_reclen; @@ -4518,8 +4753,9 @@ again: FREE(rbuf, M_TEMP); goto nfsmout; nfsmerr: - if (rbuf) + if (rbuf) { FREE(rbuf, M_TEMP); + } nd->nd_repstat = error; error = nfsrv_rephead(nd, slp, &nmrep, NFSX_V3POSTOPATTR); nfsmout_if(error); @@ -4528,13 +4764,14 @@ nfsmerr: nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, &attr); nfsmout: nfsm_chain_build_done(error, &nmrep); - if (vp) + if (vp) { vnode_put(vp); + } if (error) { nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4593,8 +4830,9 @@ nfsrv_commit( postattrerr = vnode_getattr(vp, &postattr, ctx); nfsmerr: - if (vp) + if (vp) { vnode_put(vp); + } /* assemble reply */ nd->nd_repstat = error; @@ -4603,7 +4841,7 @@ nfsmerr: *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); nfsm_chain_add_wcc_data(error, nd, &nmrep, - preattrerr, &preattr, postattrerr, &postattr); + preattrerr, &preattr, postattrerr, &postattr); if (!nd->nd_repstat) { nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_sec); nfsm_chain_add_32(error, &nmrep, nx->nx_exptime.tv_usec); @@ -4614,7 +4852,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4672,8 +4910,9 @@ nfsrv_statfs( } nfsmerr: - if (vp) + if (vp) { vnode_put(vp); + } /* assemble reply */ nd->nd_repstat = error; @@ -4681,8 +4920,9 @@ nfsmerr: nfsmout_if(error); *mrepp = nmrep.nmc_mhead; nfsmout_on_status(nd, error); - if (nd->nd_vers == NFS_VER3) + if (nd->nd_vers == NFS_VER3) { nfsm_chain_add_postop_attr(error, nd, &nmrep, attrerr, &attr); + } nfsmout_if(nd->nd_repstat); if (nd->nd_vers == NFS_VER3) { @@ -4706,7 +4946,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4751,8 +4991,9 @@ nfsrv_fsinfo( attrerr = vnode_getattr(vp, &attr, ctx); nfsmerr: - if (vp) + if (vp) { vnode_put(vp); + } /* assemble reply */ nd->nd_repstat = error; @@ -4770,8 +5011,9 @@ nfsmerr: if (slp->ns_sotype == SOCK_DGRAM) { maxsize = NFS_MAXDGRAMDATA; prefsize = NFS_PREFDGRAMDATA; - } else + } else { maxsize = prefsize = NFSRV_MAXDATA; + } nfsm_chain_add_32(error, &nmrep, maxsize); nfsm_chain_add_32(error, &nmrep, prefsize); @@ -4785,8 +5027,8 @@ nfsmerr: nfsm_chain_add_32(error, &nmrep, 1); /* XXX link/symlink support should be taken from volume capabilities */ nfsm_chain_add_32(error, &nmrep, - NFSV3FSINFO_LINK | NFSV3FSINFO_SYMLINK | - NFSV3FSINFO_HOMOGENEOUS | NFSV3FSINFO_CANSETTIME); + NFSV3FSINFO_LINK | NFSV3FSINFO_SYMLINK | + NFSV3FSINFO_HOMOGENEOUS | NFSV3FSINFO_CANSETTIME); nfsmout: nfsm_chain_build_done(error, &nmrep); @@ -4794,7 +5036,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4837,23 +5079,29 @@ nfsrv_pathconf( nfsmerr_if(error); error = VNOP_PATHCONF(vp, _PC_LINK_MAX, &linkmax, ctx); - if (!error) + if (!error) { error = VNOP_PATHCONF(vp, _PC_NAME_MAX, &namemax, ctx); - if (!error) + } + if (!error) { error = VNOP_PATHCONF(vp, _PC_CHOWN_RESTRICTED, &chownres, ctx); - if (!error) + } + if (!error) { error = VNOP_PATHCONF(vp, _PC_NO_TRUNC, ¬runc, ctx); - if (!error) + } + if (!error) { error = VNOP_PATHCONF(vp, _PC_CASE_SENSITIVE, &case_sensitive, ctx); - if (!error) + } + if (!error) { error = VNOP_PATHCONF(vp, _PC_CASE_PRESERVING, &case_preserving, ctx); + } nfsm_srv_vattr_init(&attr, NFS_VER3); attrerr = vnode_getattr(vp, &attr, ctx); nfsmerr: - if (vp) + if (vp) { vnode_put(vp); + } /* assemble reply */ nd->nd_repstat = error; @@ -4877,7 +5125,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4897,8 +5145,9 @@ nfsrv_null( /* * RPCSEC_GSS context setup ? */ - if (nd->nd_gss_context) - return(nfs_gss_svc_ctx_init(nd, slp, mrepp)); + if (nd->nd_gss_context) { + return nfs_gss_svc_ctx_init(nd, slp, mrepp); + } nfsm_chain_null(&nmrep); @@ -4913,7 +5162,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } /* @@ -4932,10 +5181,11 @@ nfsrv_noop( nfsm_chain_null(&nmrep); - if (nd->nd_repstat) + if (nd->nd_repstat) { error = nd->nd_repstat; - else + } else { error = EPROCUNAVAIL; + } /* assemble reply */ nd->nd_repstat = error; @@ -4948,7 +5198,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); *mrepp = NULL; } - return (error); + return error; } const nfsrv_proc_t nfsrv_procs[NFS_NPROCS] = { @@ -4986,7 +5236,7 @@ const nfsrv_proc_t nfsrv_procs[NFS_NPROCS] = { * processes that chmod after opening a file don't break. I don't like * this because it opens a security hole, but since the nfs server opens * a security hole the size of a barn door anyhow, what the heck. - * + * * The exception to rule 2 is EPERM. If a file is IMMUTABLE, vnode_authorize() * will return EPERM instead of EACCESS. EPERM is always an error. */ @@ -5012,7 +5262,7 @@ nfsrv_authorize( if (nxo->nxo_flags & NX_READONLY) { switch (vnode_vtype(vp)) { case VREG: case VDIR: case VLNK: case VCPLX: - return (EROFS); + return EROFS; default: break; } @@ -5027,11 +5277,11 @@ nfsrv_authorize( VATTR_INIT(&vattr); VATTR_WANTED(&vattr, va_uid); if ((vnode_getattr(vp, &vattr, ctx) == 0) && - (kauth_cred_getuid(vfs_context_ucred(ctx)) == vattr.va_uid)) + (kauth_cred_getuid(vfs_context_ucred(ctx)) == vattr.va_uid)) { error = 0; + } } return error; } #endif /* NFSSERVER */ - diff --git a/bsd/nfs/nfs_socket.c b/bsd/nfs/nfs_socket.c index 8e3562c64..55ba36619 100644 --- a/bsd/nfs/nfs_socket.c +++ b/bsd/nfs/nfs_socket.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -110,14 +110,14 @@ #define NFS_SOCK_DBG(...) NFS_DBG(NFS_FAC_SOCK, 7, ## __VA_ARGS__) /* XXX */ -boolean_t current_thread_aborted(void); -kern_return_t thread_terminate(thread_t); +boolean_t current_thread_aborted(void); +kern_return_t thread_terminate(thread_t); #if NFSSERVER int nfsrv_sock_max_rec_queue_length = 128; /* max # RPC records queued on (UDP) socket */ -int nfsrv_getstream(struct nfsrv_sock *,int); +int nfsrv_getstream(struct nfsrv_sock *, int); int nfsrv_getreq(struct nfsrv_descript *); extern int nfsv3_procid[NFS_NPROCS]; #endif /* NFSSERVER */ @@ -128,46 +128,52 @@ extern int nfsv3_procid[NFS_NPROCS]; int nfs_sockaddr_cmp(struct sockaddr *sa1, struct sockaddr *sa2) { - if (!sa1) - return (-1); - if (!sa2) - return (1); - if (sa1->sa_family != sa2->sa_family) - return ((sa1->sa_family < sa2->sa_family) ? -1 : 1); - if (sa1->sa_len != sa2->sa_len) - return ((sa1->sa_len < sa2->sa_len) ? -1 : 1); - if (sa1->sa_family == AF_INET) - return (bcmp(&((struct sockaddr_in*)sa1)->sin_addr, - &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr))); - if (sa1->sa_family == AF_INET6) - return (bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr, - &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr))); - return (-1); + if (!sa1) { + return -1; + } + if (!sa2) { + return 1; + } + if (sa1->sa_family != sa2->sa_family) { + return (sa1->sa_family < sa2->sa_family) ? -1 : 1; + } + if (sa1->sa_len != sa2->sa_len) { + return (sa1->sa_len < sa2->sa_len) ? -1 : 1; + } + if (sa1->sa_family == AF_INET) { + return bcmp(&((struct sockaddr_in*)sa1)->sin_addr, + &((struct sockaddr_in*)sa2)->sin_addr, sizeof(((struct sockaddr_in*)sa1)->sin_addr)); + } + if (sa1->sa_family == AF_INET6) { + return bcmp(&((struct sockaddr_in6*)sa1)->sin6_addr, + &((struct sockaddr_in6*)sa2)->sin6_addr, sizeof(((struct sockaddr_in6*)sa1)->sin6_addr)); + } + return -1; } #if NFSCLIENT -int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *); -int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int); -int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *); -void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *); -void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *); -int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *); -int nfs_reconnect(struct nfsmount *); -int nfs_connect_setup(struct nfsmount *); -void nfs_mount_sock_thread(void *, wait_result_t); -void nfs_udp_rcv(socket_t, void*, int); -void nfs_tcp_rcv(socket_t, void*, int); -void nfs_sock_poke(struct nfsmount *); -void nfs_request_match_reply(struct nfsmount *, mbuf_t); -void nfs_reqdequeue(struct nfsreq *); -void nfs_reqbusy(struct nfsreq *); +int nfs_connect_search_new_socket(struct nfsmount *, struct nfs_socket_search *, struct timeval *); +int nfs_connect_search_socket_connect(struct nfsmount *, struct nfs_socket *, int); +int nfs_connect_search_ping(struct nfsmount *, struct nfs_socket *, struct timeval *); +void nfs_connect_search_socket_found(struct nfsmount *, struct nfs_socket_search *, struct nfs_socket *); +void nfs_connect_search_socket_reap(struct nfsmount *, struct nfs_socket_search *, struct timeval *); +int nfs_connect_search_check(struct nfsmount *, struct nfs_socket_search *, struct timeval *); +int nfs_reconnect(struct nfsmount *); +int nfs_connect_setup(struct nfsmount *); +void nfs_mount_sock_thread(void *, wait_result_t); +void nfs_udp_rcv(socket_t, void*, int); +void nfs_tcp_rcv(socket_t, void*, int); +void nfs_sock_poke(struct nfsmount *); +void nfs_request_match_reply(struct nfsmount *, mbuf_t); +void nfs_reqdequeue(struct nfsreq *); +void nfs_reqbusy(struct nfsreq *); struct nfsreq *nfs_reqnext(struct nfsreq *); -int nfs_wait_reply(struct nfsreq *); -void nfs_softterm(struct nfsreq *); -int nfs_can_squish(struct nfsmount *); -int nfs_is_squishy(struct nfsmount *); -int nfs_is_dead(int, struct nfsmount *); +int nfs_wait_reply(struct nfsreq *); +void nfs_softterm(struct nfsreq *); +int nfs_can_squish(struct nfsmount *); +int nfs_is_squishy(struct nfsmount *); +int nfs_is_dead(int, struct nfsmount *); /* * Estimate rto for an nfs rpc sent via. an unreliable datagram. @@ -181,13 +187,13 @@ int nfs_is_dead(int, struct nfsmount *); * read, write - A+4D * other - nm_timeo */ -#define NFS_RTO(n, t) \ +#define NFS_RTO(n, t) \ ((t) == 0 ? (n)->nm_timeo : \ ((t) < 3 ? \ (((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \ ((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1))) -#define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] -#define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] +#define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1] +#define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1] /* * Defines which timer to use for the procnum. @@ -214,8 +220,8 @@ static int proct[NFS_NPROCS] = { * performance hit (ave. rtt 3 times larger), * I suspect due to the large rtt that nfs rpcs have. */ -#define NFS_CWNDSCALE 256 -#define NFS_MAXCWND (NFS_CWNDSCALE * 32) +#define NFS_CWNDSCALE 256 +#define NFS_MAXCWND (NFS_CWNDSCALE * 32) static int nfs_backoff[8] = { 2, 4, 8, 16, 32, 64, 128, 256, }; /* @@ -239,8 +245,9 @@ next_server: /* no more servers on current location, go to first server of next location */ serv = 0; loc++; - if (loc >= nlp->nl_numlocs) + if (loc >= nlp->nl_numlocs) { loc = 0; /* after last location, wrap back around to first location */ + } } } /* @@ -250,10 +257,12 @@ next_server: * location that was passed in. (That would mean no servers had any * addresses. And we don't want to spin here forever.) */ - if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) + if ((loc == nlip->nli_loc) && (serv == nlip->nli_serv) && (addr == nlip->nli_addr)) { return; - if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) + } + if (addr >= nlp->nl_locations[loc]->nl_servers[serv]->ns_addrcount) { goto next_server; + } nlip->nli_loc = loc; nlip->nli_serv = serv; @@ -266,11 +275,13 @@ next_server: int nfs_location_index_cmp(struct nfs_location_index *nlip1, struct nfs_location_index *nlip2) { - if (nlip1->nli_loc != nlip2->nli_loc) - return (nlip1->nli_loc - nlip2->nli_loc); - if (nlip1->nli_serv != nlip2->nli_serv) - return (nlip1->nli_serv - nlip2->nli_serv); - return (nlip1->nli_addr - nlip2->nli_addr); + if (nlip1->nli_loc != nlip2->nli_loc) { + return nlip1->nli_loc - nlip2->nli_loc; + } + if (nlip1->nli_serv != nlip2->nli_serv) { + return nlip1->nli_serv - nlip2->nli_serv; + } + return nlip1->nli_addr - nlip2->nli_addr; } /* @@ -298,7 +309,7 @@ nfs_location_mntfromname(struct nfs_fs_locations *locs, struct nfs_location_inde return; } /* append each server path component */ - for (i=0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) { + for (i = 0; (size > 0) && (i < (int)fsl->nl_path.np_compcount); i++) { cnt = snprintf(p, size, "/%s", fsl->nl_path.np_components[i]); p += cnt; size -= cnt; @@ -324,7 +335,7 @@ nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag) } lck_mtx_lock(&nso->nso_lock); - if ((nso->nso_flags & (NSO_UPCALL|NSO_DISCONNECTING|NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) { + if ((nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) || !(nso->nso_flags & NSO_PINGING)) { NFS_SOCK_DBG("nfs connect - socket %p upcall - nevermind\n", nso); lck_mtx_unlock(&nso->nso_lock); return; @@ -335,8 +346,9 @@ nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag) /* loop while we make error-free progress */ while (!error && recv) { /* make sure we're still interested in this socket */ - if (nso->nso_flags & (NSO_DISCONNECTING|NSO_DEAD)) + if (nso->nso_flags & (NSO_DISCONNECTING | NSO_DEAD)) { break; + } lck_mtx_unlock(&nso->nso_lock); m = NULL; if (nso->nso_sotype == SOCK_STREAM) { @@ -356,19 +368,22 @@ nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag) nfsm_chain_dissect_init(error, &nmrep, m); nfsm_chain_get_32(error, &nmrep, rxid); nfsm_chain_get_32(error, &nmrep, reply); - if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) + if (!error && ((reply != RPC_REPLY) || (rxid != nso->nso_pingxid))) { error = EBADRPC; + } nfsm_chain_get_32(error, &nmrep, reply_status); if (!error && (reply_status == RPC_MSGDENIED)) { nfsm_chain_get_32(error, &nmrep, rejected_status); - if (!error) + if (!error) { error = (rejected_status == RPC_MISMATCH) ? ERPCMISMATCH : EACCES; + } } nfsm_chain_get_32(error, &nmrep, verf_type); /* verifier flavor */ nfsm_chain_get_32(error, &nmrep, verf_len); /* verifier length */ nfsmout_if(error); - if (verf_len) + if (verf_len) { nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len)); + } nfsm_chain_get_32(error, &nmrep, accepted_status); nfsmout_if(error); if ((accepted_status == RPC_PROGMISMATCH) && !nso->nso_version) { @@ -377,16 +392,17 @@ nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag) nfsm_chain_get_32(error, &nmrep, maxvers); nfsmout_if(error); if (nso->nso_protocol == PMAPPROG) { - if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) + if ((minvers > RPCBVERS4) || (maxvers < PMAPVERS)) { error = EPROGMISMATCH; - else if ((nso->nso_saddr->sa_family == AF_INET) && - (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) + } else if ((nso->nso_saddr->sa_family == AF_INET) && + (PMAPVERS >= minvers) && (PMAPVERS <= maxvers)) { nso->nso_version = PMAPVERS; - else if (nso->nso_saddr->sa_family == AF_INET6) { - if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) + } else if (nso->nso_saddr->sa_family == AF_INET6) { + if ((RPCBVERS4 >= minvers) && (RPCBVERS4 <= maxvers)) { nso->nso_version = RPCBVERS4; - else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) + } else if ((RPCBVERS3 >= minvers) && (RPCBVERS3 <= maxvers)) { nso->nso_version = RPCBVERS3; + } } } else if (nso->nso_protocol == NFS_PROG) { int vers; @@ -402,13 +418,15 @@ nfs_connect_upcall(socket_t so, void *arg, __unused int waitflag) * here if that was successful. */ for (vers = nso->nso_nfs_max_vers; vers >= (int)nso->nso_nfs_min_vers; vers--) { - if (vers >= (int)minvers && vers <= (int)maxvers) - break; + if (vers >= (int)minvers && vers <= (int)maxvers) { + break; + } } nso->nso_version = (vers < (int)nso->nso_nfs_min_vers) ? 0 : vers; } - if (!error && nso->nso_version) + if (!error && nso->nso_version) { accepted_status = RPC_SUCCESS; + } } if (!error) { switch (accepted_status) { @@ -455,8 +473,9 @@ nfsmout: nso->nso_flags |= NSO_DEAD; wakeup(nso->nso_wake); } - if (nso->nso_flags & NSO_DISCONNECTING) + if (nso->nso_flags & NSO_DISCONNECTING) { wakeup(&nso->nso_flags); + } lck_mtx_unlock(&nso->nso_lock); } @@ -481,12 +500,14 @@ nfs_socket_create( char naddr[MAX_IPv6_STR_LEN]; void *sinaddr; - if (sa->sa_family == AF_INET) + if (sa->sa_family == AF_INET) { sinaddr = &((struct sockaddr_in*)sa)->sin_addr; - else + } else { sinaddr = &((struct sockaddr_in6*)sa)->sin6_addr; - if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) + } + if (inet_ntop(sa->sa_family, sinaddr, naddr, sizeof(naddr)) != naddr) { strlcpy(naddr, "", sizeof(naddr)); + } #else char naddr[1] = { 0 }; #endif @@ -494,25 +515,29 @@ nfs_socket_create( *nsop = NULL; /* Create the socket. */ - MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK|M_ZERO); - if (nso) - MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK|M_ZERO); + MALLOC(nso, struct nfs_socket *, sizeof(struct nfs_socket), M_TEMP, M_WAITOK | M_ZERO); + if (nso) { + MALLOC(nso->nso_saddr, struct sockaddr *, sa->sa_len, M_SONAME, M_WAITOK | M_ZERO); + } if (!nso || !nso->nso_saddr) { - if (nso) + if (nso) { FREE(nso, M_TEMP); - return (ENOMEM); + } + return ENOMEM; } lck_mtx_init(&nso->nso_lock, nfs_request_grp, LCK_ATTR_NULL); nso->nso_sotype = sotype; - if (nso->nso_sotype == SOCK_STREAM) + if (nso->nso_sotype == SOCK_STREAM) { nfs_rpc_record_state_init(&nso->nso_rrs); + } microuptime(&now); nso->nso_timestamp = now.tv_sec; bcopy(sa, nso->nso_saddr, sa->sa_len); - if (sa->sa_family == AF_INET) + if (sa->sa_family == AF_INET) { ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port); - else if (sa->sa_family == AF_INET6) + } else if (sa->sa_family == AF_INET6) { ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port); + } nso->nso_protocol = protocol; nso->nso_version = vers; nso->nso_nfs_min_vers = PVER2MAJOR(nmp->nm_min_vers); @@ -528,7 +553,7 @@ nfs_socket_create( int portrange = IP_PORTRANGE_LOW; error = sock_setsockopt(nso->nso_so, level, optname, &portrange, sizeof(portrange)); - if (!error) { /* bind now to check for failure */ + if (!error) { /* bind now to check for failure */ ss.ss_len = sa->sa_len; ss.ss_family = sa->sa_family; if (ss.ss_family == AF_INET) { @@ -540,23 +565,24 @@ nfs_socket_create( } else { error = EINVAL; } - if (!error) + if (!error) { error = sock_bind(nso->nso_so, (struct sockaddr*)&ss); + } } } if (error) { NFS_SOCK_DBG("nfs connect %s error %d creating socket %p %s type %d%s port %d prot %d %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype, - resvport ? "r" : "", port, protocol, vers); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nso, naddr, sotype, + resvport ? "r" : "", port, protocol, vers); nfs_socket_destroy(nso); } else { NFS_SOCK_DBG("nfs connect %s created socket %p %s type %d%s port %d prot %d %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr, - sotype, resvport ? "r" : "", port, protocol, vers); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, naddr, + sotype, resvport ? "r" : "", port, protocol, vers); *nsop = nso; } - return (error); + return error; } /* @@ -569,18 +595,22 @@ nfs_socket_destroy(struct nfs_socket *nso) lck_mtx_lock(&nso->nso_lock); nso->nso_flags |= NSO_DISCONNECTING; - if (nso->nso_flags & NSO_UPCALL) /* give upcall a chance to complete */ - msleep(&nso->nso_flags, &nso->nso_lock, PZERO-1, "nfswaitupcall", &ts); + if (nso->nso_flags & NSO_UPCALL) { /* give upcall a chance to complete */ + msleep(&nso->nso_flags, &nso->nso_lock, PZERO - 1, "nfswaitupcall", &ts); + } lck_mtx_unlock(&nso->nso_lock); sock_shutdown(nso->nso_so, SHUT_RDWR); sock_close(nso->nso_so); - if (nso->nso_sotype == SOCK_STREAM) + if (nso->nso_sotype == SOCK_STREAM) { nfs_rpc_record_state_cleanup(&nso->nso_rrs); + } lck_mtx_destroy(&nso->nso_lock, nfs_request_grp); - if (nso->nso_saddr) + if (nso->nso_saddr) { FREE(nso->nso_saddr, M_SONAME); - if (nso->nso_saddr2) + } + if (nso->nso_saddr2) { FREE(nso->nso_saddr2, M_SONAME); + } NFS_SOCK_DBG("nfs connect - socket %p destroyed\n", nso); FREE(nso, M_TEMP); } @@ -610,8 +640,9 @@ nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso) sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)); /* set nodelay for TCP */ sock_gettype(nso->nso_so, NULL, NULL, &proto); - if (proto == IPPROTO_TCP) + if (proto == IPPROTO_TCP) { sock_setsockopt(nso->nso_so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); + } } if (nso->nso_sotype == SOCK_DGRAM) { /* set socket buffer sizes for UDP */ int reserve = NFS_UDPSOCKBUF; @@ -623,8 +654,9 @@ nfs_socket_options(struct nfsmount *nmp, struct nfs_socket *nso) /* just playin' it safe with upcalls */ sock_setsockopt(nso->nso_so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); /* socket should be interruptible if the mount is */ - if (!NMFLAG(nmp, INTR)) + if (!NMFLAG(nmp, INTR)) { sock_nointerrupt(nso->nso_so, 1); + } } /* @@ -655,10 +687,10 @@ nfs_connect_error_class(int error) { switch (error) { case 0: - return (0); + return 0; case ETIMEDOUT: case EAGAIN: - return (1); + return 1; case EPIPE: case EADDRNOTAVAIL: case ENETDOWN: @@ -672,16 +704,16 @@ nfs_connect_error_class(int error) case ECONNREFUSED: case EHOSTDOWN: case EHOSTUNREACH: - return (2); + return 2; case ERPCMISMATCH: case EPROCUNAVAIL: case EPROGMISMATCH: case EPROGUNAVAIL: - return (3); + return 3; case EBADRPC: - return (4); + return 4; default: - return (5); + return 5; } } @@ -691,12 +723,13 @@ nfs_connect_error_class(int error) void nfs_socket_search_update_error(struct nfs_socket_search *nss, int error) { - if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) + if (nfs_connect_error_class(error) >= nfs_connect_error_class(nss->nss_error)) { nss->nss_error = error; + } } /* nfs_connect_search_new_socket: - * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified + * Given a socket search structure for an nfs mount try to find a new socket from the set of addresses specified * by nss. * * nss_last is set to -1 at initialization to indicate the first time. Its set to -2 if address was found but @@ -711,10 +744,10 @@ nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *ns struct nfs_socket *nso; char *addrstr; int error = 0; - + NFS_SOCK_DBG("nfs connect %s nss_addrcnt = %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss->nss_addrcnt); /* * while there are addresses and: @@ -724,9 +757,10 @@ nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *ns * then attempt to create a socket with the current address. */ while (nss->nss_addrcnt > 0 && ((nss->nss_last < 0) || (nss->nss_sockcnt == 0) || - ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) { - if (nmp->nm_sockflags & NMSOCK_UNMOUNT) - return (EINTR); + ((nss->nss_sockcnt < 4) && (now->tv_sec >= (nss->nss_last + 2))))) { + if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { + return EINTR; + } /* Can we convert the address to a sockaddr? */ fsl = nmp->nm_locations.nl_locations[nss->nss_nextloc.nli_loc]; fss = fsl->nl_servers[nss->nss_nextloc.nli_serv]; @@ -747,10 +781,11 @@ nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *ns /* Create the socket. */ error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nss->nss_sotype, - nss->nss_port, nss->nss_protocol, nss->nss_version, - ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso); - if (error) - return (error); + nss->nss_port, nss->nss_protocol, nss->nss_version, + ((nss->nss_protocol == NFS_PROG) && NMFLAG(nmp, RESVPORT)), &nso); + if (error) { + return error; + } nso->nso_location = nss->nss_nextloc; nso->nso_wake = nss; @@ -766,14 +801,15 @@ nfs_connect_search_new_socket(struct nfsmount *nmp, struct nfs_socket_search *ns nss->nss_sockcnt++; nfs_location_next(&nmp->nm_locations, &nss->nss_nextloc); nss->nss_addrcnt -= 1; - + nss->nss_last = now->tv_sec; } - if (nss->nss_addrcnt == 0 && nss->nss_last < 0) + if (nss->nss_addrcnt == 0 && nss->nss_last < 0) { nss->nss_last = now->tv_sec; - - return (error); + } + + return error; } /* @@ -786,55 +822,56 @@ int nfs_connect_search_socket_connect(struct nfsmount *nmp, struct nfs_socket *nso, int verbose) { int error; - + if ((nso->nso_sotype != SOCK_STREAM) && NMFLAG(nmp, NOCONNECT)) { /* no connection needed, just say it's already connected */ NFS_SOCK_DBG("nfs connect %s UDP socket %p noconnect\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); nso->nso_flags |= NSO_CONNECTED; nfs_socket_options(nmp, nso); - return (1); /* Socket is connected and setup */ + return 1; /* Socket is connected and setup */ } else if (!(nso->nso_flags & NSO_CONNECTING)) { /* initiate the connection */ nso->nso_flags |= NSO_CONNECTING; lck_mtx_unlock(&nso->nso_lock); NFS_SOCK_DBG("nfs connect %s connecting socket %p\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); error = sock_connect(nso->nso_so, nso->nso_saddr, MSG_DONTWAIT); lck_mtx_lock(&nso->nso_lock); if (error && (error != EINPROGRESS)) { nso->nso_error = error; nso->nso_flags |= NSO_DEAD; - return (0); + return 0; } } if (nso->nso_flags & NSO_CONNECTING) { /* check the connection */ if (sock_isconnected(nso->nso_so)) { NFS_SOCK_DBG("nfs connect %s socket %p is connected\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); nso->nso_flags &= ~NSO_CONNECTING; nso->nso_flags |= NSO_CONNECTED; nfs_socket_options(nmp, nso); - return (1); /* Socket is connected and setup */ + return 1; /* Socket is connected and setup */ } else { int optlen = sizeof(error); error = 0; sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &error, &optlen); if (error) { /* we got an error on the socket */ NFS_SOCK_DBG("nfs connect %s socket %p connection error %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); - if (verbose) + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); + if (verbose) { printf("nfs connect socket error %d for %s\n", error, vfs_statfs(nmp->nm_mountp)->f_mntfromname); + } nso->nso_error = error; nso->nso_flags |= NSO_DEAD; - return (0); + return 0; } } } - - return (0); /* Waiting to be connected */ + + return 0; /* Waiting to be connected */ } /* @@ -852,14 +889,15 @@ nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct tim int error; if (!vers) { - if (nso->nso_protocol == PMAPPROG) + if (nso->nso_protocol == PMAPPROG) { vers = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4; - else if (nso->nso_protocol == NFS_PROG) + } else if (nso->nso_protocol == NFS_PROG) { vers = PVER2MAJOR(nmp->nm_max_vers); + } } lck_mtx_unlock(&nso->nso_lock); error = nfsm_rpchead2(nmp, nso->nso_sotype, nso->nso_protocol, vers, 0, RPCAUTH_SYS, - vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq); + vfs_context_ucred(vfs_context_kernel()), NULL, NULL, &xid, &mreq); lck_mtx_lock(&nso->nso_lock); if (!error) { nso->nso_flags |= NSO_PINGING; @@ -870,40 +908,44 @@ nfs_connect_search_ping(struct nfsmount *nmp, struct nfs_socket *nso, struct tim msg.msg_name = nso->nso_saddr; msg.msg_namelen = nso->nso_saddr->sa_len; } - for (reqlen=0, m=mreq; m; m = mbuf_next(m)) + for (reqlen = 0, m = mreq; m; m = mbuf_next(m)) { reqlen += mbuf_len(m); + } lck_mtx_unlock(&nso->nso_lock); error = sock_sendmbuf(nso->nso_so, &msg, mreq, 0, &sentlen); NFS_SOCK_DBG("nfs connect %s verifying socket %p send rv %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); lck_mtx_lock(&nso->nso_lock); - if (!error && (sentlen != reqlen)) + if (!error && (sentlen != reqlen)) { error = ETIMEDOUT; + } } if (error) { nso->nso_error = error; nso->nso_flags |= NSO_DEAD; - return (0); + return 0; } - return (1); + return 1; } /* * nfs_connect_search_socket_found: Take the found socket of the socket search list and assign it to the searched socket. - * Set the nfs socket protocol and version if needed. + * Set the nfs socket protocol and version if needed. */ void nfs_connect_search_socket_found(struct nfsmount *nmp, struct nfs_socket_search *nss, struct nfs_socket *nso) { NFS_SOCK_DBG("nfs connect %s socket %p verified\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); if (!nso->nso_version) { /* If the version isn't set, the default must have worked. */ - if (nso->nso_protocol == PMAPPROG) + if (nso->nso_protocol == PMAPPROG) { nso->nso_version = (nso->nso_saddr->sa_family == AF_INET) ? PMAPVERS : RPCBVERS4; - if (nso->nso_protocol == NFS_PROG) + } + if (nso->nso_protocol == NFS_PROG) { nso->nso_version = PVER2MAJOR(nmp->nm_max_vers); + } } TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link); nss->nss_sockcnt--; @@ -918,13 +960,13 @@ void nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_search *nss, struct timeval *now) { struct nfs_socket *nso, *nsonext; - + TAILQ_FOREACH_SAFE(nso, &nss->nss_socklist, nso_link, nsonext) { lck_mtx_lock(&nso->nso_lock); if (now->tv_sec >= (nso->nso_timestamp + nss->nss_timeo)) { /* took too long */ NFS_SOCK_DBG("nfs connect %s socket %p timed out\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); nso->nso_error = ETIMEDOUT; nso->nso_flags |= NSO_DEAD; } @@ -934,14 +976,15 @@ nfs_connect_search_socket_reap(struct nfsmount *nmp __unused, struct nfs_socket_ } lck_mtx_unlock(&nso->nso_lock); NFS_SOCK_DBG("nfs connect %s reaping socket %p %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, nso->nso_error); nfs_socket_search_update_error(nss, nso->nso_error); TAILQ_REMOVE(&nss->nss_socklist, nso, nso_link); nss->nss_sockcnt--; nfs_socket_destroy(nso); /* If there are more sockets to try, force the starting of another socket */ - if (nss->nss_addrcnt > 0) + if (nss->nss_addrcnt > 0) { nss->nss_last = -2; + } } } @@ -954,20 +997,23 @@ nfs_connect_search_check(struct nfsmount *nmp, struct nfs_socket_search *nss, st int error; /* log a warning if connect is taking a while */ - if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE|NSS_WARNED)) == NSS_VERBOSE)) { + if (((now->tv_sec - nss->nss_timestamp) >= 8) && ((nss->nss_flags & (NSS_VERBOSE | NSS_WARNED)) == NSS_VERBOSE)) { printf("nfs_connect: socket connect taking a while for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); nss->nss_flags |= NSS_WARNED; } - if (nmp->nm_sockflags & NMSOCK_UNMOUNT) - return (EINTR); - if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) - return (error); + if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { + return EINTR; + } + if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) { + return error; + } /* If we were succesfull at sending a ping, wait up to a second for a reply */ - if (nss->nss_last >= 0) + if (nss->nss_last >= 0) { tsleep(nss, PSOCK, "nfs_connect_search_wait", hz); - - return (0); + } + + return 0; } @@ -981,7 +1027,7 @@ nfs_connect_search_loop(struct nfsmount *nmp, struct nfs_socket_search *nss) struct timeval now; int error; int verbose = (nss->nss_flags & NSS_VERBOSE); - + loop: microuptime(&now); NFS_SOCK_DBG("nfs connect %s search %ld\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, now.tv_sec); @@ -990,9 +1036,9 @@ loop: error = nfs_connect_search_new_socket(nmp, nss, &now); if (error) { NFS_SOCK_DBG("nfs connect returned %d\n", error); - return (error); + return error; } - + /* check each active socket on the list and try to push it along */ TAILQ_FOREACH(nso, &nss->nss_socklist, nso_link) { lck_mtx_lock(&nso->nso_lock); @@ -1006,8 +1052,8 @@ loop: } /* If the socket hasn't been verified or in a ping, ping it. We also handle UDP retransmits */ - if (!(nso->nso_flags & (NSO_PINGING|NSO_VERIFIED)) || - ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp+2))) { + if (!(nso->nso_flags & (NSO_PINGING | NSO_VERIFIED)) || + ((nso->nso_sotype == SOCK_DGRAM) && (now.tv_sec >= nso->nso_reqtimestamp + 2))) { if (!nfs_connect_search_ping(nmp, nso, &now)) { lck_mtx_unlock(&nso->nso_lock); continue; @@ -1023,10 +1069,10 @@ loop: } lck_mtx_unlock(&nso->nso_lock); } - + /* Check for timed out sockets and mark as dead and then remove all dead sockets. */ nfs_connect_search_socket_reap(nmp, nss, &now); - + /* * Keep looping if we haven't found a socket yet and we have more * sockets to (continue to) try. @@ -1034,12 +1080,13 @@ loop: error = 0; if (!nss->nss_sock && (!TAILQ_EMPTY(&nss->nss_socklist) || nss->nss_addrcnt)) { error = nfs_connect_search_check(nmp, nss, &now); - if (!error) + if (!error) { goto loop; + } } NFS_SOCK_DBG("nfs connect %s returning %d\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); - return (error); + return error; } /* @@ -1069,31 +1116,33 @@ nfs_connect(struct nfsmount *nmp, int verbose, int timeo) struct timeval now, start; int error, savederror, nfsvers; int tryv4 = 1; - uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM; + uint8_t sotype = nmp->nm_sotype ? nmp->nm_sotype : SOCK_STREAM; fhandle_t *fh = NULL; char *path = NULL; in_port_t port; int addrtotal = 0; - + /* paranoia... check that we have at least one address in the locations */ uint32_t loc, serv; - for (loc=0; loc < nmp->nm_locations.nl_numlocs; loc++) { - for (serv=0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) { + for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) { + for (serv = 0; serv < nmp->nm_locations.nl_locations[loc]->nl_servcount; serv++) { addrtotal += nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount; - if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) + if (nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount == 0) { NFS_SOCK_DBG("nfs connect %s search, server %s has no addresses\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, - nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, + nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name); + } } } if (addrtotal == 0) { NFS_SOCK_DBG("nfs connect %s search failed, no addresses\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname); - return (EINVAL); - } else + vfs_statfs(nmp->nm_mountp)->f_mntfromname); + return EINVAL; + } else { NFS_SOCK_DBG("nfs connect %s has %d addresses\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, addrtotal); + } lck_mtx_lock(&nmp->nm_lock); nmp->nm_sockflags |= NMSOCK_CONNECTING; @@ -1112,8 +1161,9 @@ tryagain: nss.nss_startloc = nmp->nm_locations.nl_current; nss.nss_timestamp = start.tv_sec; nss.nss_timeo = timeo; - if (verbose) + if (verbose) { nss.nss_flags |= NSS_VERBOSE; + } /* First time connecting, we may need to negotiate some things */ if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { @@ -1148,8 +1198,9 @@ tryagain: * if no port is specified on the mount; * Note nm_vers is set so we will only try NFS_VER4. */ - if (!nmp->nm_nfsport) + if (!nmp->nm_nfsport) { nss.nss_flags |= NSS_FALLBACK2PMAP; + } } else { nss.nss_port = PMAPPORT; nss.nss_protocol = PMAPPROG; @@ -1170,8 +1221,8 @@ tryagain: } } NFS_SOCK_DBG("nfs connect first %s, so type %d port %d prot %d %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port, - nss.nss_protocol, nss.nss_version); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port, + nss.nss_protocol, nss.nss_version); } else { /* we've connected before, just connect to NFS port */ if (!nmp->nm_nfsport) { @@ -1185,8 +1236,8 @@ tryagain: nss.nss_version = nmp->nm_vers; } NFS_SOCK_DBG("nfs connect %s, so type %d port %d prot %d %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port, - nss.nss_protocol, nss.nss_version); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nss.nss_sotype, nss.nss_port, + nss.nss_protocol, nss.nss_version); } /* Set next location to first valid location. */ @@ -1197,8 +1248,8 @@ tryagain: nfs_location_next(&nmp->nm_locations, &nss.nss_nextloc); if (!nfs_location_index_cmp(&nss.nss_nextloc, &nss.nss_startloc)) { NFS_SOCK_DBG("nfs connect %s search failed, couldn't find a valid location index\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname); - return (ENOENT); + vfs_statfs(nmp->nm_mountp)->f_mntfromname); + return ENOENT; } } nss.nss_last = -1; @@ -1212,7 +1263,7 @@ keepsearching: if (nss.nss_flags & NSS_FALLBACK2PMAP) { tryv4 = 0; NFS_SOCK_DBG("nfs connect %s TCP failed for V4 %d %d, trying PORTMAP\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error); goto tryagain; } @@ -1221,25 +1272,29 @@ keepsearching: sotype = SOCK_DGRAM; savederror = nss.nss_error; NFS_SOCK_DBG("nfs connect %s TCP failed %d %d, trying UDP\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, error, nss.nss_error); goto tryagain; } - if (!error) + if (!error) { error = nss.nss_error ? nss.nss_error : ETIMEDOUT; + } lck_mtx_lock(&nmp->nm_lock); nmp->nm_sockflags &= ~NMSOCK_CONNECTING; nmp->nm_nss = NULL; lck_mtx_unlock(&nmp->nm_lock); - if (nss.nss_flags & NSS_WARNED) + if (nss.nss_flags & NSS_WARNED) { log(LOG_INFO, "nfs_connect: socket connect aborted for %s\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname); - if (fh) + vfs_statfs(nmp->nm_mountp)->f_mntfromname); + } + if (fh) { FREE(fh, M_TEMP); - if (path) + } + if (path) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + } NFS_SOCK_DBG("nfs connect %s search failed, returning %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); - return (error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); + return error; } /* try to use nss_sock */ @@ -1247,14 +1302,15 @@ keepsearching: nss.nss_sock = NULL; /* We may be speaking to portmap first... to determine port(s). */ - if (nso->nso_saddr->sa_family == AF_INET) + if (nso->nso_saddr->sa_family == AF_INET) { port = ntohs(((struct sockaddr_in*)nso->nso_saddr)->sin_port); - else + } else { port = ntohs(((struct sockaddr_in6*)nso->nso_saddr)->sin6_port); + } if (port == PMAPPORT) { /* Use this portmapper port to get the port #s we need. */ NFS_SOCK_DBG("nfs connect %s got portmapper socket %p\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso); /* remove the connect upcall so nfs_portmap_lookup() can use this socket */ sock_setupcall(nso->nso_so, NULL, NULL); @@ -1265,33 +1321,41 @@ keepsearching: /* If NFS version not set, try nm_max_vers down to nm_min_vers */ nfsvers = nmp->nm_vers ? nmp->nm_vers : PVER2MAJOR(nmp->nm_max_vers); if (!(port = nmp->nm_nfsport)) { - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { ((struct sockaddr_in*)&ss)->sin_port = htons(0); - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { ((struct sockaddr_in6*)&ss)->sin6_port = htons(0); + } for (; nfsvers >= (int)PVER2MAJOR(nmp->nm_min_vers); nfsvers--) { - if (nmp->nm_vers && nmp->nm_vers != nfsvers) + if (nmp->nm_vers && nmp->nm_vers != nfsvers) { continue; /* Wrong version */ - if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) + } + if (nfsvers == NFS_VER4 && nso->nso_sotype == SOCK_DGRAM) { continue; /* NFSv4 does not do UDP */ + } error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, - nso->nso_so, NFS_PROG, nfsvers, - (nso->nso_sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP, timeo); + nso->nso_so, NFS_PROG, nfsvers, + (nso->nso_sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP, timeo); if (!error) { - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { port = ntohs(((struct sockaddr_in*)&ss)->sin_port); - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); - if (!port) + } + if (!port) { error = EPROGUNAVAIL; - if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) + } + if (port == NFS_PORT && nfsvers == NFS_VER4 && tryv4 == 0) { continue; /* We already tried this */ + } } - if (!error) + if (!error) { break; + } } - if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) + if (nfsvers < (int)PVER2MAJOR(nmp->nm_min_vers) && error == 0) { error = EPROGUNAVAIL; + } if (error) { nfs_socket_search_update_error(&nss, error); nfs_socket_destroy(nso); @@ -1301,7 +1365,7 @@ keepsearching: /* Create NFS protocol socket and add it to the list of sockets. */ /* N.B. If nfsvers is NFS_VER4 at this point then we're on a non standard port */ error = nfs_socket_create(nmp, (struct sockaddr*)&ss, nso->nso_sotype, port, - NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs); + NFS_PROG, nfsvers, NMFLAG(nmp, RESVPORT), &nsonfs); if (error) { nfs_socket_search_update_error(&nss, error); nfs_socket_destroy(nso); @@ -1323,33 +1387,39 @@ keepsearching: error = 0; bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len); port = nmp->nm_mountport; - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { ((struct sockaddr_in*)&ss)->sin_port = htons(port); - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { ((struct sockaddr_in6*)&ss)->sin6_port = htons(port); + } if (!port) { /* Get port/sockaddr for MOUNT version corresponding to NFS version. */ /* If NFS version is unknown, optimistically choose for NFSv3. */ int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3; int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP; error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, - nso->nso_so, RPCPROG_MNT, mntvers, mntproto, timeo); + nso->nso_so, RPCPROG_MNT, mntvers, mntproto, timeo); } if (!error) { - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { port = ntohs(((struct sockaddr_in*)&ss)->sin_port); - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); - if (!port) + } + if (!port) { error = EPROGUNAVAIL; + } } /* create sockaddr for MOUNT */ - if (!error) - MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK|M_ZERO); - if (!error && !nsonfs->nso_saddr2) + if (!error) { + MALLOC(nsonfs->nso_saddr2, struct sockaddr *, ss.ss_len, M_SONAME, M_WAITOK | M_ZERO); + } + if (!error && !nsonfs->nso_saddr2) { error = ENOMEM; - if (!error) + } + if (!error) { bcopy(&ss, nsonfs->nso_saddr2, ss.ss_len); + } if (error) { lck_mtx_lock(&nsonfs->nso_lock); nsonfs->nso_error = error; @@ -1375,39 +1445,47 @@ keepsearching: /* Need sockaddr for MOUNT port */ bcopy(nso->nso_saddr, &ss, nso->nso_saddr->sa_len); port = nmp->nm_mountport; - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { ((struct sockaddr_in*)&ss)->sin_port = htons(port); - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { ((struct sockaddr_in6*)&ss)->sin6_port = htons(port); + } if (!port) { /* Get port/sockaddr for MOUNT version corresponding to NFS version. */ int mntvers = (nfsvers == NFS_VER2) ? RPCMNT_VER1 : RPCMNT_VER3; int mntproto = (NM_OMFLAG(nmp, MNTUDP) || (nso->nso_sotype == SOCK_DGRAM)) ? IPPROTO_UDP : IPPROTO_TCP; error = nfs_portmap_lookup(nmp, vfs_context_current(), (struct sockaddr*)&ss, - NULL, RPCPROG_MNT, mntvers, mntproto, timeo); - if (ss.ss_family == AF_INET) + NULL, RPCPROG_MNT, mntvers, mntproto, timeo); + if (ss.ss_family == AF_INET) { port = ntohs(((struct sockaddr_in*)&ss)->sin_port); - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { port = ntohs(((struct sockaddr_in6*)&ss)->sin6_port); + } } if (!error) { - if (port) + if (port) { saddr = (struct sockaddr*)&ss; - else + } else { error = EPROGUNAVAIL; + } } } - if (saddr) - MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK|M_ZERO); - if (saddr && fh) + if (saddr) { + MALLOC(fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO); + } + if (saddr && fh) { MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + } if (!saddr || !fh || !path) { - if (!error) + if (!error) { error = ENOMEM; - if (fh) + } + if (fh) { FREE(fh, M_TEMP); - if (path) + } + if (path) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + } fh = NULL; path = NULL; nfs_socket_search_update_error(&nss, error); @@ -1416,9 +1494,9 @@ keepsearching: } nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, path, MAXPATHLEN, 1); error = nfs3_mount_rpc(nmp, saddr, nso->nso_sotype, nfsvers, - path, vfs_context_current(), timeo, fh, &nmp->nm_servsec); + path, vfs_context_current(), timeo, fh, &nmp->nm_servsec); NFS_SOCK_DBG("nfs connect %s socket %p mount %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); if (!error) { /* Make sure we can agree on a security flavor. */ int o, s; /* indices into mount option and server security flavor lists */ @@ -1436,24 +1514,27 @@ keepsearching: nmp->nm_auth = nmp->nm_sec.flavors[0]; found = 1; } - for (o=0; !found && (o < nmp->nm_sec.count); o++) - for (s=0; !found && (s < nmp->nm_servsec.count); s++) + for (o = 0; !found && (o < nmp->nm_sec.count); o++) { + for (s = 0; !found && (s < nmp->nm_servsec.count); s++) { if (nmp->nm_sec.flavors[o] == nmp->nm_servsec.flavors[s]) { nmp->nm_auth = nmp->nm_sec.flavors[o]; found = 1; } + } + } } else { /* Choose the first one we support from the server's list. */ if (!nmp->nm_servsec.count) { nmp->nm_auth = RPCAUTH_SYS; found = 1; } - for (s=0; s < nmp->nm_servsec.count; s++) + for (s = 0; s < nmp->nm_servsec.count; s++) { switch (nmp->nm_servsec.flavors[s]) { case RPCAUTH_SYS: /* prefer RPCAUTH_SYS to RPCAUTH_NONE */ - if (found && (nmp->nm_auth == RPCAUTH_NONE)) + if (found && (nmp->nm_auth == RPCAUTH_NONE)) { found = 0; + } case RPCAUTH_NONE: case RPCAUTH_KRB5: case RPCAUTH_KRB5I: @@ -1464,6 +1545,7 @@ keepsearching: } break; } + } } error = !found ? EAUTH : 0; } @@ -1476,8 +1558,9 @@ keepsearching: nfs_socket_destroy(nso); goto keepsearching; } - if (nmp->nm_fh) + if (nmp->nm_fh) { FREE(nmp->nm_fh, M_TEMP); + } nmp->nm_fh = fh; fh = NULL; NFS_BITMAP_SET(nmp->nm_flags, NFS_MFLAG_CALLUMNT); @@ -1494,44 +1577,49 @@ keepsearching: if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { /* set mntfromname to this location */ - if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) + if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) { nfs_location_mntfromname(&nmp->nm_locations, nso->nso_location, - vfs_statfs(nmp->nm_mountp)->f_mntfromname, - sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, + sizeof(vfs_statfs(nmp->nm_mountp)->f_mntfromname), 0); + } /* some negotiated values need to remain unchanged for the life of the mount */ - if (!nmp->nm_sotype) + if (!nmp->nm_sotype) { nmp->nm_sotype = nso->nso_sotype; + } if (!nmp->nm_vers) { nmp->nm_vers = nfsvers; /* If we negotiated NFSv4, set nm_nfsport if we ended up on the standard NFS port */ if ((nfsvers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) { - if (nso->nso_saddr->sa_family == AF_INET) + if (nso->nso_saddr->sa_family == AF_INET) { port = ((struct sockaddr_in*)nso->nso_saddr)->sin_port = htons(port); - else if (nso->nso_saddr->sa_family == AF_INET6) + } else if (nso->nso_saddr->sa_family == AF_INET6) { port = ((struct sockaddr_in6*)nso->nso_saddr)->sin6_port = htons(port); - else + } else { port = 0; - if (port == NFS_PORT) + } + if (port == NFS_PORT) { nmp->nm_nfsport = NFS_PORT; + } } } /* do some version-specific pre-mount set up */ if (nmp->nm_vers >= NFS_VER4) { microtime(&now); nmp->nm_mounttime = ((uint64_t)now.tv_sec << 32) | now.tv_usec; - if (!NMFLAG(nmp, NOCALLBACK)) + if (!NMFLAG(nmp, NOCALLBACK)) { nfs4_mount_callback_setup(nmp); + } } } /* Initialize NFS socket state variables */ lck_mtx_lock(&nmp->nm_lock); nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] = - nmp->nm_srtt[3] = (NFS_TIMEO << 3); + nmp->nm_srtt[3] = (NFS_TIMEO << 3); nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] = - nmp->nm_sdrtt[3] = 0; + nmp->nm_sdrtt[3] = 0; if (nso->nso_sotype == SOCK_DGRAM) { - nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ + nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */ nmp->nm_sent = 0; } else if (nso->nso_sotype == SOCK_STREAM) { nmp->nm_timeouts = 0; @@ -1552,21 +1640,25 @@ keepsearching: } if (error) { NFS_SOCK_DBG("nfs connect %s socket %p setup failed %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, nso, error); nfs_socket_search_update_error(&nss, error); nmp->nm_saddr = oldsaddr; if (!(nmp->nm_sockflags & NMSOCK_HASCONNECTED)) { /* undo settings made prior to setup */ - if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) + if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_SOCKET_TYPE)) { nmp->nm_sotype = 0; + } if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_VERSION)) { if (nmp->nm_vers >= NFS_VER4) { - if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) + if (!NFS_BITMAP_ISSET(nmp->nm_mattrs, NFS_MATTR_NFS_PORT)) { nmp->nm_nfsport = 0; - if (nmp->nm_cbid) + } + if (nmp->nm_cbid) { nfs4_mount_callback_shutdown(nmp); - if (IS_VALID_CRED(nmp->nm_mcred)) + } + if (IS_VALID_CRED(nmp->nm_mcred)) { kauth_cred_unref(&nmp->nm_mcred); + } bzero(&nmp->nm_un, sizeof(nmp->nm_un)); } nmp->nm_vers = 0; @@ -1593,21 +1685,25 @@ keepsearching: } lck_mtx_unlock(&nmp->nm_lock); - if (oldsaddr) + if (oldsaddr) { FREE(oldsaddr, M_SONAME); + } - if (nss.nss_flags & NSS_WARNED) + if (nss.nss_flags & NSS_WARNED) { log(LOG_INFO, "nfs_connect: socket connect completed for %s\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname); + vfs_statfs(nmp->nm_mountp)->f_mntfromname); + } nmp->nm_nss = NULL; nfs_socket_search_cleanup(&nss); - if (fh) + if (fh) { FREE(fh, M_TEMP); - if (path) + } + if (path) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + } NFS_SOCK_DBG("nfs connect %s success\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); - return (0); + return 0; } @@ -1633,7 +1729,7 @@ nfs_connect_setup(struct nfsmount *nmp) } error = nfs4_setclientid(nmp); } - return (error); + return error; } /* @@ -1667,10 +1763,12 @@ nfs_reconnect(struct nfsmount *nmp) while ((error = nfs_connect(nmp, verbose, timeo))) { verbose = 0; nfs_disconnect(nmp); - if ((error == EINTR) || (error == ERESTART)) - return (EINTR); - if (error == EIO) - return (EIO); + if ((error == EINTR) || (error == ERESTART)) { + return EINTR; + } + if (error == EIO) { + return EIO; + } microuptime(&now); if ((lastmsg + nmp->nm_tprintf_delay) < now.tv_sec) { lastmsg = now.tv_sec; @@ -1683,27 +1781,29 @@ nfs_reconnect(struct nfsmount *nmp) /* we can't reconnect, so we fail */ lck_mtx_unlock(&nmp->nm_lock); NFS_SOCK_DBG("Not mounted returning %d\n", error); - return (error); + return error; } if (nfs_mount_check_dead_timeout(nmp)) { nfs_mount_make_zombie(nmp); lck_mtx_unlock(&nmp->nm_lock); - return (ENXIO); + return ENXIO; } - + if ((error = nfs_sigintr(nmp, NULL, thd, 1))) { lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } lck_mtx_unlock(&nmp->nm_lock); - tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2*hz); - if ((error = nfs_sigintr(nmp, NULL, thd, 0))) - return (error); + tsleep(nfs_reconnect, PSOCK, "nfs_reconnect_delay", 2 * hz); + if ((error = nfs_sigintr(nmp, NULL, thd, 0))) { + return error; + } } - if (wentdown) + if (wentdown) { nfs_up(nmp, thd, NFSSTA_TIMEO, "connected"); + } /* * Loop through outstanding request list and mark all requests @@ -1718,14 +1818,15 @@ nfs_reconnect(struct nfsmount *nmp) rq->r_flags |= R_MUSTRESEND; rq->r_rtt = -1; wakeup(rq); - if ((rq->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC) + if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) { nfs_asyncio_resend(rq); + } } lck_mtx_unlock(&rq->r_mtx); } } lck_mtx_unlock(nfs_request_mutex); - return (0); + return 0; } /* @@ -1742,19 +1843,20 @@ tryagain: struct timespec ts = { 1, 0 }; if (nmp->nm_state & NFSSTA_SENDING) { /* wait for sending to complete */ nmp->nm_state |= NFSSTA_WANTSND; - msleep(&nmp->nm_state, &nmp->nm_lock, PZERO-1, "nfswaitsending", &ts); + msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitsending", &ts); goto tryagain; } if (nmp->nm_sockflags & NMSOCK_POKE) { /* wait for poking to complete */ - msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO-1, "nfswaitpoke", &ts); + msleep(&nmp->nm_sockflags, &nmp->nm_lock, PZERO - 1, "nfswaitpoke", &ts); goto tryagain; } nmp->nm_sockflags |= NMSOCK_DISCONNECTING; nmp->nm_sockflags &= ~NMSOCK_READY; nso = nmp->nm_nso; nmp->nm_nso = NULL; - if (nso->nso_saddr == nmp->nm_saddr) + if (nso->nso_saddr == nmp->nm_saddr) { nso->nso_saddr = NULL; + } lck_mtx_unlock(&nmp->nm_lock); nfs_socket_destroy(nso); lck_mtx_lock(&nmp->nm_lock); @@ -1774,7 +1876,7 @@ nfs_need_reconnect(struct nfsmount *nmp) struct nfsreq *rq; lck_mtx_lock(&nmp->nm_lock); - nmp->nm_sockflags &= ~(NMSOCK_READY|NMSOCK_SETUP); + nmp->nm_sockflags &= ~(NMSOCK_READY | NMSOCK_SETUP); lck_mtx_unlock(&nmp->nm_lock); /* @@ -1789,8 +1891,9 @@ nfs_need_reconnect(struct nfsmount *nmp) rq->r_flags |= R_MUSTRESEND; rq->r_rtt = -1; wakeup(rq); - if ((rq->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC) + if ((rq->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) { nfs_asyncio_resend(rq); + } } lck_mtx_unlock(&rq->r_mtx); } @@ -1816,16 +1919,16 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) lck_mtx_lock(&nmp->nm_lock); while (!(nmp->nm_sockflags & NMSOCK_READY) || - !TAILQ_EMPTY(&nmp->nm_resendq) || - !LIST_EMPTY(&nmp->nm_monlist) || - nmp->nm_deadto_start || - (nmp->nm_state & NFSSTA_RECOVER) || - ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) - { - if (nmp->nm_sockflags & NMSOCK_UNMOUNT) + !TAILQ_EMPTY(&nmp->nm_resendq) || + !LIST_EMPTY(&nmp->nm_monlist) || + nmp->nm_deadto_start || + (nmp->nm_state & NFSSTA_RECOVER) || + ((nmp->nm_vers >= NFS_VER4) && !TAILQ_EMPTY(&nmp->nm_dreturnq))) { + if (nmp->nm_sockflags & NMSOCK_UNMOUNT) { break; + } /* do reconnect, if necessary */ - if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) { + if (!(nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { if (nmp->nm_reconnect_start <= 0) { microuptime(&now); nmp->nm_reconnect_start = now.tv_sec; @@ -1833,12 +1936,13 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) lck_mtx_unlock(&nmp->nm_lock); NFS_SOCK_DBG("nfs reconnect %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); /* - * XXX We don't want to call reconnect again right away if returned errors + * XXX We don't want to call reconnect again right away if returned errors * before that may not have blocked. This has caused spamming null procs * from machines in the pass. */ - if (do_reconnect_sleep) + if (do_reconnect_sleep) { tsleep(nfs_mount_sock_thread, PSOCK, "nfs_reconnect_sock_thread_delay", hz); + } error = nfs_reconnect(nmp); if (error) { int lvl = 7; @@ -1846,7 +1950,7 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) lvl = (do_reconnect_sleep++ % 600) ? 7 : 0; } nfs_printf(NFS_FAC_SOCK, lvl, "nfs reconnect %s: returned %d\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, error); } else { nmp->nm_reconnect_start = 0; do_reconnect_sleep = 0; @@ -1856,30 +1960,33 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) if ((nmp->nm_sockflags & NMSOCK_READY) && (nmp->nm_state & NFSSTA_RECOVER) && !(nmp->nm_sockflags & NMSOCK_UNMOUNT) && - !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) { + !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { /* perform state recovery */ lck_mtx_unlock(&nmp->nm_lock); nfs_recover(nmp); lck_mtx_lock(&nmp->nm_lock); } /* handle NFSv4 delegation returns */ - while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && - (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) && - ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) { + while ((nmp->nm_vers >= NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && + (nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER) && + ((np = TAILQ_FIRST(&nmp->nm_dreturnq)))) { lck_mtx_unlock(&nmp->nm_lock); nfs4_delegation_return(np, R_RECOVER, thd, nmp->nm_mcred); lck_mtx_lock(&nmp->nm_lock); } /* do resends, if necessary/possible */ while ((((nmp->nm_sockflags & NMSOCK_READY) && !(nmp->nm_state & NFSSTA_RECOVER)) || - (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) && - ((req = TAILQ_FIRST(&nmp->nm_resendq)))) { - if (req->r_resendtime) + (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) && + ((req = TAILQ_FIRST(&nmp->nm_resendq)))) { + if (req->r_resendtime) { microuptime(&now); - while (req && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) + } + while (req && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && req->r_resendtime && (now.tv_sec < req->r_resendtime)) { req = TAILQ_NEXT(req, r_rchain); - if (!req) + } + if (!req) { break; + } TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); req->r_rchain.tqe_next = NFSREQNOLIST; lck_mtx_unlock(&nmp->nm_lock); @@ -1890,8 +1997,9 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) req->r_flags &= ~R_RESENDQ; wakeup(req); lck_mtx_unlock(&req->r_mtx); - if (dofinish) + if (dofinish) { nfs_asyncio_finish(req); + } nfs_request_rele(req); lck_mtx_lock(&nmp->nm_lock); continue; @@ -1905,34 +2013,40 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) if (nfs_request_using_gss(req)) { nfs_gss_clnt_rpcdone(req); error = nfs_gss_clnt_args_restore(req); - if (error == ENEEDAUTH) + if (error == ENEEDAUTH) { req->r_xid = 0; + } } NFS_SOCK_DBG("nfs async%s restart: p %d x 0x%llx f 0x%x rtt %d\n", - nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid, - req->r_flags, req->r_rtt); + nfs_request_using_gss(req) ? " gss" : "", req->r_procnum, req->r_xid, + req->r_flags, req->r_rtt); error = nfs_sigintr(nmp, req, req->r_thread, 0); - if (!error) + if (!error) { error = nfs_request_add_header(req); - if (!error) + } + if (!error) { error = nfs_request_send(req, 0); + } lck_mtx_lock(&req->r_mtx); - if (req->r_flags & R_RESENDQ) + if (req->r_flags & R_RESENDQ) { req->r_flags &= ~R_RESENDQ; - if (error) + } + if (error) { req->r_error = error; + } wakeup(req); dofinish = error && req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); lck_mtx_unlock(&req->r_mtx); - if (dofinish) + if (dofinish) { nfs_asyncio_finish(req); + } nfs_request_rele(req); lck_mtx_lock(&nmp->nm_lock); error = 0; continue; } NFS_SOCK_DBG("nfs async resend: p %d x 0x%llx f 0x%x rtt %d\n", - req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); + req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); error = nfs_sigintr(nmp, req, req->r_thread, 0); if (!error) { req->r_flags |= R_SENDING; @@ -1940,8 +2054,9 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) error = nfs_send(req, 0); lck_mtx_lock(&req->r_mtx); if (!error) { - if (req->r_flags & R_RESENDQ) + if (req->r_flags & R_RESENDQ) { req->r_flags &= ~R_RESENDQ; + } wakeup(req); lck_mtx_unlock(&req->r_mtx); nfs_request_rele(req); @@ -1950,13 +2065,15 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) } } req->r_error = error; - if (req->r_flags & R_RESENDQ) + if (req->r_flags & R_RESENDQ) { req->r_flags &= ~R_RESENDQ; + } wakeup(req); dofinish = req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT); lck_mtx_unlock(&req->r_mtx); - if (dofinish) + if (dofinish) { nfs_asyncio_finish(req); + } nfs_request_rele(req); lck_mtx_lock(&nmp->nm_lock); } @@ -1964,21 +2081,24 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) nfs_mount_make_zombie(nmp); break; } - - if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) + + if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) { break; + } /* check monitored nodes, if necessary/possible */ if (!LIST_EMPTY(&nmp->nm_monlist)) { nmp->nm_state |= NFSSTA_MONITOR_SCAN; LIST_FOREACH(np, &nmp->nm_monlist, n_monlink) { if (!(nmp->nm_sockflags & NMSOCK_READY) || - (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE|NFSSTA_DEAD))) + (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) { break; + } np->n_mflag |= NMMONSCANINPROG; lck_mtx_unlock(&nmp->nm_lock); - error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED|NGA_MONITOR)); - if (!error && ISSET(np->n_flag, NUPDATESIZE)) /* update quickly to avoid multiple events */ + error = nfs_getattr(np, NULL, vfs_context_kernel(), (NGA_UNCACHED | NGA_MONITOR)); + if (!error && ISSET(np->n_flag, NUPDATESIZE)) { /* update quickly to avoid multiple events */ nfs_data_update_size(np, 0); + } lck_mtx_lock(&nmp->nm_lock); np->n_mflag &= ~NMMONSCANINPROG; if (np->n_mflag & NMMONSCANWANT) { @@ -1986,19 +2106,22 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) wakeup(&np->n_mflag); } if (error || !(nmp->nm_sockflags & NMSOCK_READY) || - (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING|NFSSTA_FORCE|NFSSTA_DEAD))) + (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING | NFSSTA_FORCE | NFSSTA_DEAD))) { break; + } } nmp->nm_state &= ~NFSSTA_MONITOR_SCAN; - if (nmp->nm_state & NFSSTA_UNMOUNTING) + if (nmp->nm_state & NFSSTA_UNMOUNTING) { wakeup(&nmp->nm_state); /* let unmounting thread know scan is done */ + } } - if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER|NFSSTA_UNMOUNTING))) { + if ((nmp->nm_sockflags & NMSOCK_READY) || (nmp->nm_state & (NFSSTA_RECOVER | NFSSTA_UNMOUNTING))) { if (nmp->nm_deadto_start || !TAILQ_EMPTY(&nmp->nm_resendq) || - (nmp->nm_state & NFSSTA_RECOVER)) + (nmp->nm_state & NFSSTA_RECOVER)) { ts.tv_sec = 1; - else + } else { ts.tv_sec = 5; + } msleep(&nmp->nm_sockthd, &nmp->nm_lock, PSOCK, "nfssockthread", &ts); } } @@ -2006,15 +2129,16 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) /* If we're unmounting, send the unmount RPC, if requested/appropriate. */ if ((nmp->nm_sockflags & NMSOCK_UNMOUNT) && (nmp->nm_state & NFSSTA_MOUNTED) && NMFLAG(nmp, CALLUMNT) && - (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) { + (nmp->nm_vers < NFS_VER4) && !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { lck_mtx_unlock(&nmp->nm_lock); nfs3_umount_rpc(nmp, vfs_context_kernel(), - (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2); + (nmp->nm_sockflags & NMSOCK_READY) ? 6 : 2); lck_mtx_lock(&nmp->nm_lock); } - if (nmp->nm_sockthd == thd) + if (nmp->nm_sockthd == thd) { nmp->nm_sockthd = NULL; + } lck_mtx_unlock(&nmp->nm_lock); wakeup(&nmp->nm_sockthd); thread_terminate(thd); @@ -2024,10 +2148,11 @@ nfs_mount_sock_thread(void *arg, __unused wait_result_t wr) void nfs_mount_sock_thread_wake(struct nfsmount *nmp) { - if (nmp->nm_sockthd) + if (nmp->nm_sockthd) { wakeup(&nmp->nm_sockthd); - else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) + } else if (kernel_thread_start(nfs_mount_sock_thread, nmp, &nmp->nm_sockthd) == KERN_SUCCESS) { thread_deallocate(nmp->nm_sockthd); + } } /* @@ -2040,16 +2165,20 @@ nfs_mount_check_dead_timeout(struct nfsmount *nmp) { struct timeval now; - if (nmp->nm_state & NFSSTA_DEAD) + if (nmp->nm_state & NFSSTA_DEAD) { return 1; - if (nmp->nm_deadto_start == 0) + } + if (nmp->nm_deadto_start == 0) { return 0; + } nfs_is_squishy(nmp); - if (nmp->nm_curdeadtimeout <= 0) + if (nmp->nm_curdeadtimeout <= 0) { return 0; + } microuptime(&now); - if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) + if ((now.tv_sec - nmp->nm_deadto_start) < nmp->nm_curdeadtimeout) { return 0; + } return 1; } @@ -2064,15 +2193,17 @@ void nfs_mount_make_zombie(struct nfsmount *nmp) { fsid_t fsid; - - if (!nmp) + + if (!nmp) { return; + } - if (nmp->nm_state & NFSSTA_DEAD) + if (nmp->nm_state & NFSSTA_DEAD) { return; + } printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, - (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : ""); + (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : ""); fsid = vfs_statfs(nmp->nm_mountp)->f_fsid; lck_mtx_unlock(&nmp->nm_lock); nfs_mount_zombie(nmp, NFSSTA_DEAD); @@ -2084,18 +2215,17 @@ nfs_mount_make_zombie(struct nfsmount *nmp) /* * NFS callback channel socket state */ -struct nfs_callback_socket -{ +struct nfs_callback_socket { TAILQ_ENTRY(nfs_callback_socket) ncbs_link; - socket_t ncbs_so; /* the socket */ - struct sockaddr_storage ncbs_saddr; /* socket address */ - struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */ - time_t ncbs_stamp; /* last accessed at */ - uint32_t ncbs_flags; /* see below */ + socket_t ncbs_so; /* the socket */ + struct sockaddr_storage ncbs_saddr; /* socket address */ + struct nfs_rpc_record_state ncbs_rrs; /* RPC record parsing state */ + time_t ncbs_stamp; /* last accessed at */ + uint32_t ncbs_flags; /* see below */ }; -#define NCBSOCK_UPCALL 0x0001 -#define NCBSOCK_UPCALLWANT 0x0002 -#define NCBSOCK_DEAD 0x0004 +#define NCBSOCK_UPCALL 0x0001 +#define NCBSOCK_UPCALLWANT 0x0002 +#define NCBSOCK_DEAD 0x0004 /* * NFS callback channel state @@ -2112,8 +2242,8 @@ in_port_t nfs4_cb_port = 0; in_port_t nfs4_cb_port6 = 0; uint32_t nfs4_cb_id = 0; uint32_t nfs4_cb_so_usecount = 0; -TAILQ_HEAD(nfs4_cb_sock_list,nfs_callback_socket) nfs4_cb_socks; -TAILQ_HEAD(nfs4_cb_mount_list,nfsmount) nfs4_cb_mounts; +TAILQ_HEAD(nfs4_cb_sock_list, nfs_callback_socket) nfs4_cb_socks; +TAILQ_HEAD(nfs4_cb_mount_list, nfsmount) nfs4_cb_mounts; int nfs4_cb_handler(struct nfs_callback_socket *, mbuf_t); @@ -2141,8 +2271,9 @@ nfs4_mount_callback_setup(struct nfsmount *nmp) nfs4_cb_id++; } nmp->nm_cbid = nfs4_cb_id++; - if (nmp->nm_cbid == 0) + if (nmp->nm_cbid == 0) { nmp->nm_cbid = nfs4_cb_id++; + } nfs4_cb_so_usecount++; TAILQ_INSERT_HEAD(&nfs4_cb_mounts, nmp, nm_cblink); @@ -2186,11 +2317,13 @@ nfs4_mount_callback_setup(struct nfsmount *nmp) timeo.tv_usec = 0; timeo.tv_sec = 60; error = sock_setsockopt(so, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); - if (error) + if (error) { log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket rx timeout\n", error); + } error = sock_setsockopt(so, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); - if (error) + if (error) { log(LOG_INFO, "nfs callback setup: error %d setting IPv4 socket tx timeout\n", error); + } sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); sock_setsockopt(so, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); sock_setsockopt(so, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); @@ -2240,11 +2373,13 @@ ipv6_bind_again: timeo.tv_usec = 0; timeo.tv_sec = 60; error = sock_setsockopt(so6, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); - if (error) + if (error) { log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket rx timeout\n", error); + } error = sock_setsockopt(so6, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); - if (error) + if (error) { log(LOG_INFO, "nfs callback setup: error %d setting IPv6 socket tx timeout\n", error); + } sock_setsockopt(so6, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); sock_setsockopt(so6, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); sock_setsockopt(so6, SOL_SOCKET, SO_UPCALLCLOSEWAIT, &on, sizeof(on)); @@ -2280,13 +2415,14 @@ nfs4_mount_callback_shutdown(struct nfsmount *nmp) struct nfs_callback_socket *ncbsp; socket_t so, so6; struct nfs4_cb_sock_list cb_socks; - struct timespec ts = {1,0}; + struct timespec ts = {1, 0}; lck_mtx_lock(nfs_global_mutex); TAILQ_REMOVE(&nfs4_cb_mounts, nmp, nm_cblink); /* wait for any callbacks in progress to complete */ - while (nmp->nm_cbrefs) + while (nmp->nm_cbrefs) { msleep(&nmp->nm_cbrefs, nfs_global_mutex, PSOCK, "cbshutwait", &ts); + } nmp->nm_cbid = 0; if (--nfs4_cb_so_usecount) { lck_mtx_unlock(nfs_global_mutex); @@ -2318,8 +2454,8 @@ nfs4_mount_callback_shutdown(struct nfsmount *nmp) /* * Check periodically for stale/unused nfs callback sockets */ -#define NFS4_CB_TIMER_PERIOD 30 -#define NFS4_CB_IDLE_MAX 300 +#define NFS4_CB_TIMER_PERIOD 30 +#define NFS4_CB_IDLE_MAX 300 void nfs4_callback_timer(__unused void *param0, __unused void *param1) { @@ -2336,8 +2472,9 @@ loop: microuptime(&now); TAILQ_FOREACH_SAFE(ncbsp, &nfs4_cb_socks, ncbs_link, nextncbsp) { if (!(ncbsp->ncbs_flags & NCBSOCK_DEAD) && - (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) + (now.tv_sec < (ncbsp->ncbs_stamp + NFS4_CB_IDLE_MAX))) { continue; + } TAILQ_REMOVE(&nfs4_cb_socks, ncbsp, ncbs_link); lck_mtx_unlock(nfs_global_mutex); sock_shutdown(ncbsp->ncbs_so, SHUT_RDWR); @@ -2348,7 +2485,7 @@ loop: } nfs4_callback_timer_on = 1; nfs_interval_timer_start(nfs4_callback_timer_call, - NFS4_CB_TIMER_PERIOD * 1000); + NFS4_CB_TIMER_PERIOD * 1000); lck_mtx_unlock(nfs_global_mutex); } @@ -2364,12 +2501,13 @@ nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag) struct timeval timeo, now; int error, on = 1, ip; - if (so == nfs4_cb_so) + if (so == nfs4_cb_so) { ip = 4; - else if (so == nfs4_cb_so6) + } else if (so == nfs4_cb_so6) { ip = 6; - else + } else { return; + } /* allocate/initialize a new nfs_callback_socket */ MALLOC(ncbsp, struct nfs_callback_socket *, sizeof(struct nfs_callback_socket), M_TEMP, M_WAITOK); @@ -2383,8 +2521,8 @@ nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag) /* accept a new socket */ error = sock_accept(so, (struct sockaddr*)&ncbsp->ncbs_saddr, - ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT, - nfs4_cb_rcv, ncbsp, &newso); + ncbsp->ncbs_saddr.ss_len, MSG_DONTWAIT, + nfs4_cb_rcv, ncbsp, &newso); if (error) { log(LOG_INFO, "nfs callback accept: error %d accepting IPv%d socket\n", error, ip); FREE(ncbsp, M_TEMP); @@ -2396,11 +2534,13 @@ nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag) timeo.tv_usec = 0; timeo.tv_sec = 60; error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo)); - if (error) + if (error) { log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket rx timeout\n", error, ip); + } error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo)); - if (error) + if (error) { log(LOG_INFO, "nfs callback socket: error %d setting IPv%d socket tx timeout\n", error, ip); + } sock_setsockopt(newso, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); sock_setsockopt(newso, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on)); @@ -2418,20 +2558,23 @@ nfs4_cb_accept(socket_t so, __unused void *arg, __unused int waitflag) /* verify it's from a host we have mounted */ TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) { /* check if socket's source address matches this mount's server address */ - if (!nmp->nm_saddr) + if (!nmp->nm_saddr) { continue; - if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) + } + if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) { break; + } } - if (!nmp) /* we don't want this socket, mark it dead */ + if (!nmp) { /* we don't want this socket, mark it dead */ ncbsp->ncbs_flags |= NCBSOCK_DEAD; + } /* make sure the callback socket cleanup timer is running */ /* (shorten the timer if we've got a socket we don't want) */ if (!nfs4_callback_timer_on) { nfs4_callback_timer_on = 1; nfs_interval_timer_start(nfs4_callback_timer_call, - !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000)); + !nmp ? 500 : (NFS4_CB_TIMER_PERIOD * 1000)); } else if (!nmp && (nfs4_callback_timer_on < 2)) { nfs4_callback_timer_on = 2; thread_call_cancel(nfs4_callback_timer_call); @@ -2449,7 +2592,7 @@ void nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag) { struct nfs_callback_socket *ncbsp = arg; - struct timespec ts = {1,0}; + struct timespec ts = {1, 0}; struct timeval now; mbuf_t m; int error = 0, recv = 1; @@ -2466,8 +2609,9 @@ nfs4_cb_rcv(socket_t so, void *arg, __unused int waitflag) /* loop while we make error-free progress */ while (!error && recv) { error = nfs_rpc_record_read(so, &ncbsp->ncbs_rrs, MSG_DONTWAIT, &recv, &m); - if (m) /* handle the request */ + if (m) { /* handle the request */ error = nfs4_cb_handler(ncbsp, m); + } } /* note: no error and no data indicates server closed its end */ @@ -2514,30 +2658,32 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) xid = numops = op = status = procnum = taglen = cbid = 0; nfsm_chain_dissect_init(error, &nmreq, mreq); - nfsm_chain_get_32(error, &nmreq, xid); // RPC XID - nfsm_chain_get_32(error, &nmreq, val); // RPC Call + nfsm_chain_get_32(error, &nmreq, xid); // RPC XID + nfsm_chain_get_32(error, &nmreq, val); // RPC Call nfsm_assert(error, (val == RPC_CALL), EBADRPC); - nfsm_chain_get_32(error, &nmreq, val); // RPC Version + nfsm_chain_get_32(error, &nmreq, val); // RPC Version nfsm_assert(error, (val == RPC_VER2), ERPCMISMATCH); - nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number + nfsm_chain_get_32(error, &nmreq, val); // RPC Program Number nfsm_assert(error, (val == NFS4_CALLBACK_PROG), EPROGUNAVAIL); - nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number + nfsm_chain_get_32(error, &nmreq, val); // NFS Callback Program Version Number nfsm_assert(error, (val == NFS4_CALLBACK_PROG_VERSION), EPROGMISMATCH); - nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number + nfsm_chain_get_32(error, &nmreq, procnum); // NFS Callback Procedure Number nfsm_assert(error, (procnum <= NFSPROC4_CB_COMPOUND), EPROCUNAVAIL); /* Handle authentication */ /* XXX just ignore auth for now - handling kerberos may be tricky */ - nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor - nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length + nfsm_chain_get_32(error, &nmreq, auth_type); // RPC Auth Flavor + nfsm_chain_get_32(error, &nmreq, auth_len); // RPC Auth Length nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC); - if (!error && (auth_len > 0)) + if (!error && (auth_len > 0)) { nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len)); - nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE) - nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length + } + nfsm_chain_adv(error, &nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE) + nfsm_chain_get_32(error, &nmreq, auth_len); // verifier length nfsm_assert(error, (auth_len <= RPCAUTH_MAXSIZ), EBADRPC); - if (!error && (auth_len > 0)) + if (!error && (auth_len > 0)) { nfsm_chain_adv(error, &nmreq, nfsm_rndup(auth_len)); + } if (error) { status = error; error = 0; @@ -2550,15 +2696,15 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) break; case NFSPROC4_CB_COMPOUND: /* tag, minorversion, cb ident, numops, op array */ - nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */ + nfsm_chain_get_32(error, &nmreq, taglen); /* tag length */ nfsm_assert(error, (val <= NFS4_OPAQUE_LIMIT), EBADRPC); /* start building the body of the response */ - nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5*NFSX_UNSIGNED); + nfsm_mbuf_get(error, &mrest, nfsm_rndup(taglen) + 5 * NFSX_UNSIGNED); nfsm_chain_init(&nmrep, mrest); /* copy tag from request to response */ - nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */ + nfsm_chain_add_32(error, &nmrep, taglen); /* tag length */ for (len = (int)taglen; !error && (len > 0); len -= NFSX_UNSIGNED) { nfsm_chain_get_32(error, &nmreq, val); nfsm_chain_add_32(error, &nmrep, val); @@ -2569,17 +2715,18 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) nfsm_chain_add_32(error, &nmrep, numres); pnumres = (uint32_t*)(nmrep.nmc_ptr - NFSX_UNSIGNED); - nfsm_chain_get_32(error, &nmreq, val); /* minorversion */ + nfsm_chain_get_32(error, &nmreq, val); /* minorversion */ nfsm_assert(error, (val == 0), NFSERR_MINOR_VERS_MISMATCH); - nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */ - nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */ + nfsm_chain_get_32(error, &nmreq, cbid); /* callback ID */ + nfsm_chain_get_32(error, &nmreq, numops); /* number of operations */ if (error) { - if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) + if ((error == EBADRPC) || (error == NFSERR_MINOR_VERS_MISMATCH)) { status = error; - else if ((error == ENOBUFS) || (error == ENOMEM)) + } else if ((error == ENOBUFS) || (error == ENOMEM)) { status = NFSERR_RESOURCE; - else + } else { status = NFSERR_SERVERFAULT; + } error = 0; nfsm_chain_null(&nmrep); goto nfsmout; @@ -2587,17 +2734,21 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) /* match the callback ID to a registered mount */ lck_mtx_lock(nfs_global_mutex); TAILQ_FOREACH(nmp, &nfs4_cb_mounts, nm_cblink) { - if (nmp->nm_cbid != cbid) + if (nmp->nm_cbid != cbid) { continue; + } /* verify socket's source address matches this mount's server address */ - if (!nmp->nm_saddr) + if (!nmp->nm_saddr) { continue; - if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) + } + if (nfs_sockaddr_cmp((struct sockaddr*)&ncbsp->ncbs_saddr, nmp->nm_saddr) == 0) { break; + } } /* mark the NFS mount as busy */ - if (nmp) + if (nmp) { nmp->nm_cbrefs++; + } lck_mtx_unlock(nfs_global_mutex); if (!nmp) { /* if no mount match, just drop socket. */ @@ -2610,8 +2761,9 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) while (numops > 0) { numops--; nfsm_chain_get_32(error, &nmreq, op); - if (error) + if (error) { break; + } switch (op) { case NFS_OP_CB_GETATTR: // (FH, BITMAP) -> (STATUS, BITMAP, ATTRS) @@ -2635,8 +2787,9 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) } nfsm_chain_add_32(error, &nmrep, op); nfsm_chain_add_32(error, &nmrep, status); - if (!error && (status == EBADRPC)) + if (!error && (status == EBADRPC)) { error = status; + } if (np) { /* only allow returning size, change, and mtime attrs */ NFS_CLEAR_ATTRIBUTES(&rbitmap); @@ -2655,11 +2808,13 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) } nfsm_chain_add_bitmap(error, &nmrep, rbitmap, NFS_ATTR_BITMAP_LEN); nfsm_chain_add_32(error, &nmrep, attrbytes); - if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) + if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_CHANGE)) { nfsm_chain_add_64(error, &nmrep, - np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0)); - if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) + np->n_vattr.nva_change + ((np->n_flag & NMODIFIED) ? 1 : 0)); + } + if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_SIZE)) { nfsm_chain_add_64(error, &nmrep, np->n_size); + } if (NFS_BITMAP_ISSET(&bitmap, NFS_FATTR_TIME_MODIFY)) { nfsm_chain_add_64(error, &nmrep, np->n_vattr.nva_timesec[NFSTIME_MODIFY]); nfsm_chain_add_32(error, &nmrep, np->n_vattr.nva_timensec[NFSTIME_MODIFY]); @@ -2693,13 +2848,14 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) np = NULL; numops = 0; /* don't process any more ops */ } else if (!(np->n_openflags & N_DELEG_MASK) || - bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) { + bcmp(&np->n_dstateid, &stateid, sizeof(stateid))) { /* delegation stateid state doesn't match */ status = NFSERR_BAD_STATEID; numops = 0; /* don't process any more ops */ } - if (!status) /* add node to recall queue, and wake socket thread */ + if (!status) { /* add node to recall queue, and wake socket thread */ nfs4_delegation_return_enqueue(np); + } if (np) { nfs_node_unlock(np); vnode_put(NFSTOV(np)); @@ -2707,8 +2863,9 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) } nfsm_chain_add_32(error, &nmrep, op); nfsm_chain_add_32(error, &nmrep, status); - if (!error && (status == EBADRPC)) + if (!error && (status == EBADRPC)) { error = status; + } break; case NFS_OP_CB_ILLEGAL: default: @@ -2722,12 +2879,13 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) } if (!status && error) { - if (error == EBADRPC) + if (error == EBADRPC) { status = error; - else if ((error == ENOBUFS) || (error == ENOMEM)) + } else if ((error == ENOBUFS) || (error == ENOMEM)) { status = NFSERR_RESOURCE; - else + } else { status = NFSERR_SERVERFAULT; + } error = 0; } @@ -2739,15 +2897,17 @@ nfs4_cb_handler(struct nfs_callback_socket *ncbsp, mbuf_t mreq) /* drop the callback reference on the mount */ lck_mtx_lock(nfs_global_mutex); nmp->nm_cbrefs--; - if (!nmp->nm_cbid) + if (!nmp->nm_cbid) { wakeup(&nmp->nm_cbrefs); + } lck_mtx_unlock(nfs_global_mutex); break; } nfsmout: - if (status == EBADRPC) + if (status == EBADRPC) { OSAddAtomic64(1, &nfsstats.rpcinvalid); + } /* build reply header */ error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mhead); @@ -2789,8 +2949,9 @@ nfsmout: break; default: nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS); - if (status != NFSERR_RETVOID) + if (status != NFSERR_RETVOID) { nfsm_chain_add_32(error, &nmrep, status); + } break; } } @@ -2807,8 +2968,9 @@ nfsmout: mrest = NULL; /* Calculate the size of the reply */ replen = 0; - for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) + for (m = nmrep.nmc_mhead; m; m = mbuf_next(m)) { replen += mbuf_len(m); + } mbuf_pkthdr_setlen(mhead, replen); error = mbuf_pkthdr_setrcvif(mhead, NULL); nfsm_chain_set_recmark(error, &nmrep, (replen - NFSX_UNSIGNED) | 0x80000000); @@ -2818,20 +2980,26 @@ nfsmout: bzero(&msg, sizeof(msg)); error = sock_sendmbuf(so, &msg, mhead, 0, &sentlen); mhead = NULL; - if (!error && ((int)sentlen != replen)) + if (!error && ((int)sentlen != replen)) { error = EWOULDBLOCK; - if (error == EWOULDBLOCK) /* inability to send response is considered fatal */ + } + if (error == EWOULDBLOCK) { /* inability to send response is considered fatal */ error = ETIMEDOUT; + } out: - if (error) + if (error) { nfsm_chain_cleanup(&nmrep); - if (mhead) + } + if (mhead) { mbuf_freem(mhead); - if (mrest) + } + if (mrest) { mbuf_freem(mrest); - if (mreq) + } + if (mreq) { mbuf_freem(mreq); - return (error); + } + return error; } @@ -2878,18 +3046,20 @@ nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, /* read the TCP RPC record marker */ while (!error && nrrsp->nrrs_markerleft) { aio.iov_base = ((char*)&nrrsp->nrrs_fragleft + - sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft); + sizeof(nrrsp->nrrs_fragleft) - nrrsp->nrrs_markerleft); aio.iov_len = nrrsp->nrrs_markerleft; bzero(&msg, sizeof(msg)); msg.msg_iov = &aio; msg.msg_iovlen = 1; error = sock_receive(so, &msg, flags, &rcvlen); - if (error || !rcvlen) + if (error || !rcvlen) { break; + } *recvp = 1; nrrsp->nrrs_markerleft -= rcvlen; - if (nrrsp->nrrs_markerleft) + if (nrrsp->nrrs_markerleft) { continue; + } /* record marker complete */ nrrsp->nrrs_fragleft = ntohl(nrrsp->nrrs_fragleft); if (nrrsp->nrrs_fragleft & 0x80000000) { @@ -2909,8 +3079,9 @@ nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, m = NULL; rcvlen = nrrsp->nrrs_fragleft; error = sock_receivembuf(so, NULL, &m, flags, &rcvlen); - if (error || !rcvlen || !m) + if (error || !rcvlen || !m) { break; + } *recvp = 1; /* append mbufs to list */ nrrsp->nrrs_fragleft -= rcvlen; @@ -2924,8 +3095,9 @@ nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, break; } } - while (mbuf_next(m)) + while (mbuf_next(m)) { m = mbuf_next(m); + } nrrsp->nrrs_mlast = m; } @@ -2943,7 +3115,7 @@ nfs_rpc_record_read(socket_t so, struct nfs_rpc_record_state *nrrsp, int flags, } } - return (error); + return error; } @@ -2992,7 +3164,7 @@ again: req->r_error = error; req->r_flags &= ~R_SENDING; lck_mtx_unlock(&req->r_mtx); - return (error); + return error; } error = nfs_sigintr(req->r_nmp, req, NULL, 0); @@ -3002,7 +3174,7 @@ again: req->r_error = error; req->r_flags &= ~R_SENDING; lck_mtx_unlock(&req->r_mtx); - return (error); + return error; } nmp = req->r_nmp; sotype = nmp->nm_sotype; @@ -3019,7 +3191,7 @@ again: req->r_error = error; req->r_flags &= ~R_SENDING; lck_mtx_unlock(&req->r_mtx); - return (error); + return error; } /* If the socket needs reconnection, do that now. */ @@ -3027,8 +3199,9 @@ again: lck_mtx_lock(&nmp->nm_lock); if (!(nmp->nm_sockflags & NMSOCK_READY) && !((nmp->nm_sockflags & NMSOCK_SETUP) && (req->r_flags & R_SETUP))) { - if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) + if (NMFLAG(nmp, INTR) && !(req->r_flags & R_NOINTR)) { slpflag |= PCATCH; + } lck_mtx_unlock(&nmp->nm_lock); nfs_sndunlock(req); if (!wait) { @@ -3037,7 +3210,7 @@ again: req->r_flags |= R_MUSTRESEND; req->r_rtt = 0; lck_mtx_unlock(&req->r_mtx); - return (0); + return 0; } NFS_SOCK_DBG("nfs_send: 0x%llx wait reconnect\n", req->r_xid); lck_mtx_lock(&req->r_mtx); @@ -3047,7 +3220,7 @@ again: lck_mtx_lock(&nmp->nm_lock); while (!(nmp->nm_sockflags & NMSOCK_READY)) { /* don't bother waiting if the socket thread won't be reconnecting it */ - if (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) { + if (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) { error = EIO; break; } @@ -3064,9 +3237,10 @@ again: } /* make sure socket thread is running, then wait */ nfs_mount_sock_thread_wake(nmp); - if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) + if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) { break; - msleep(req, &nmp->nm_lock, slpflag|PSOCK, "nfsconnectwait", &ts); + } + msleep(req, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectwait", &ts); slpflag = 0; } lck_mtx_unlock(&nmp->nm_lock); @@ -3075,7 +3249,7 @@ again: req->r_error = error; req->r_flags &= ~R_SENDING; lck_mtx_unlock(&req->r_mtx); - return (error); + return error; } goto again; } @@ -3090,7 +3264,7 @@ again: req->r_flags |= R_MUSTRESEND; req->r_rtt = 0; lck_mtx_unlock(&req->r_mtx); - return (0); + return 0; } lck_mtx_lock(&req->r_mtx); @@ -3108,12 +3282,13 @@ again: lck_mtx_unlock(&req->r_mtx); if (!wait) { req->r_rtt = 0; - return (0); + return 0; } lck_mtx_lock(&nmp->nm_lock); while (nmp->nm_sent >= nmp->nm_cwnd) { - if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) + if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 1))) { break; + } TAILQ_INSERT_TAIL(&nmp->nm_cwndq, req, r_cchain); msleep(req, &nmp->nm_lock, slpflag | (PZERO - 1), "nfswaitcwnd", &ts); slpflag = 0; @@ -3140,8 +3315,9 @@ again: */ req->r_flags &= ~R_TIMING; nmp->nm_cwnd >>= 1; - if (nmp->nm_cwnd < NFS_CWNDSCALE) + if (nmp->nm_cwnd < NFS_CWNDSCALE) { nmp->nm_cwnd = NFS_CWNDSCALE; + } } lck_mtx_unlock(&nmp->nm_lock); } @@ -3150,17 +3326,18 @@ again: lck_mtx_unlock(&req->r_mtx); error = mbuf_copym(req->r_mhead, 0, MBUF_COPYALL, - wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy); + wait ? MBUF_WAITOK : MBUF_DONTWAIT, &mreqcopy); if (error) { - if (wait) + if (wait) { log(LOG_INFO, "nfs_send: mbuf copy failed %d\n", error); + } nfs_sndunlock(req); lck_mtx_lock(&req->r_mtx); req->r_flags &= ~R_SENDING; req->r_flags |= R_MUSTRESEND; req->r_rtt = 0; lck_mtx_unlock(&req->r_mtx); - return (0); + return 0; } bzero(&msg, sizeof(msg)); @@ -3171,24 +3348,27 @@ again: error = sock_sendmbuf(nso->nso_so, &msg, mreqcopy, 0, &sentlen); if (error || (sentlen != req->r_mreqlen)) { NFS_SOCK_DBG("nfs_send: 0x%llx sent %d/%d error %d\n", - req->r_xid, (int)sentlen, (int)req->r_mreqlen, error); + req->r_xid, (int)sentlen, (int)req->r_mreqlen, error); } - - if (!error && (sentlen != req->r_mreqlen)) + + if (!error && (sentlen != req->r_mreqlen)) { error = EWOULDBLOCK; + } needrecon = ((sotype == SOCK_STREAM) && sentlen && (sentlen != req->r_mreqlen)); lck_mtx_lock(&req->r_mtx); req->r_flags &= ~R_SENDING; req->r_rtt = 0; - if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) + if (rexmit && (++req->r_rexmit > NFS_MAXREXMIT)) { req->r_rexmit = NFS_MAXREXMIT; + } if (!error) { /* SUCCESS */ req->r_flags &= ~R_RESENDERR; - if (rexmit) + if (rexmit) { OSAddAtomic64(1, &nfsstats.rpcretries); + } req->r_flags |= R_SENT; if (req->r_flags & R_WAITSENT) { req->r_flags &= ~R_WAITSENT; @@ -3196,15 +3376,17 @@ again: } nfs_sndunlock(req); lck_mtx_unlock(&req->r_mtx); - return (0); + return 0; } /* send failed */ req->r_flags |= R_MUSTRESEND; - if (rexmit) + if (rexmit) { req->r_flags |= R_RESENDERR; - if ((error == EINTR) || (error == ERESTART)) + } + if ((error == EINTR) || (error == ERESTART)) { req->r_error = error; + } lck_mtx_unlock(&req->r_mtx); if (sotype == SOCK_DGRAM) { @@ -3222,9 +3404,10 @@ again: int clearerror = 0, optlen = sizeof(clearerror); sock_getsockopt(nso->nso_so, SOL_SOCKET, SO_ERROR, &clearerror, &optlen); #ifdef NFS_SOCKET_DEBUGGING - if (clearerror) + if (clearerror) { NFS_SOCK_DBG("nfs_send: ignoring UDP socket error %d so %d\n", - error, clearerror); + error, clearerror); + } #endif } } @@ -3233,8 +3416,9 @@ again: switch (error) { case EWOULDBLOCK: /* if send timed out, reconnect if on TCP */ - if (sotype != SOCK_STREAM) + if (sotype != SOCK_STREAM) { break; + } case EPIPE: case EADDRNOTAVAIL: case ENETDOWN: @@ -3247,7 +3431,7 @@ again: case ECONNREFUSED: case EHOSTDOWN: case EHOSTUNREACH: - /* case ECANCELED??? */ + /* case ECANCELED??? */ needrecon = 1; break; } @@ -3258,8 +3442,9 @@ again: nfs_sndunlock(req); - if (nfs_is_dead(error, nmp)) + if (nfs_is_dead(error, nmp)) { error = EIO; + } /* * Don't log some errors: @@ -3267,21 +3452,24 @@ again: * EADDRNOTAVAIL may occur on network transitions. * ENOTCONN may occur under some network conditions. */ - if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) + if ((error == EPIPE) || (error == EADDRNOTAVAIL) || (error == ENOTCONN)) { error = 0; - if (error && (error != EINTR) && (error != ERESTART)) + } + if (error && (error != EINTR) && (error != ERESTART)) { log(LOG_INFO, "nfs send error %d for server %s\n", error, - !req->r_nmp ? "" : - vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname); + !req->r_nmp ? "" : + vfs_statfs(req->r_nmp->nm_mountp)->f_mntfromname); + } /* prefer request termination error over other errors */ error2 = nfs_sigintr(req->r_nmp, req, req->r_thread, 0); - if (error2) + if (error2) { error = error2; + } /* only allow the following errors to be returned */ if ((error != EINTR) && (error != ERESTART) && (error != EIO) && - (error != ENXIO) && (error != ETIMEDOUT)) + (error != ENXIO) && (error != ETIMEDOUT)) { /* * We got some error we don't know what do do with, * i.e., we're not reconnecting, we map it to @@ -3291,7 +3479,8 @@ again: * return 0 and the request will be resent. */ error = needrecon ? 0 : EIO; - return (error); + } + return error; } /* @@ -3317,19 +3506,22 @@ nfs_udp_rcv(socket_t so, void *arg, __unused int waitflag) mbuf_t m; int error = 0; - if (nmp->nm_sockflags & NMSOCK_CONNECTING) + if (nmp->nm_sockflags & NMSOCK_CONNECTING) { return; + } do { /* make sure we're on the current socket */ - if (!nso || (nso->nso_so != so)) + if (!nso || (nso->nso_so != so)) { return; + } m = NULL; rcvlen = 1000000; error = sock_receivembuf(so, NULL, &m, MSG_DONTWAIT, &rcvlen); - if (m) + if (m) { nfs_request_match_reply(nmp, m); + } } while (m && !error); if (error && (error != EWOULDBLOCK)) { @@ -3351,8 +3543,9 @@ nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag) int recv = 1; int wup = 0; - if (nmp->nm_sockflags & NMSOCK_CONNECTING) + if (nmp->nm_sockflags & NMSOCK_CONNECTING) { return; + } /* make sure we're on the current socket */ lck_mtx_lock(&nmp->nm_lock); @@ -3365,7 +3558,7 @@ nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag) /* make sure this upcall should be trying to do work */ lck_mtx_lock(&nso->nso_lock); - if (nso->nso_flags & (NSO_UPCALL|NSO_DISCONNECTING|NSO_DEAD)) { + if (nso->nso_flags & (NSO_UPCALL | NSO_DISCONNECTING | NSO_DEAD)) { lck_mtx_unlock(&nso->nso_lock); return; } @@ -3376,23 +3569,27 @@ nfs_tcp_rcv(socket_t so, void *arg, __unused int waitflag) /* loop while we make error-free progress */ while (!error && recv) { error = nfs_rpc_record_read(so, &nrrs, MSG_DONTWAIT, &recv, &m); - if (m) /* match completed response with request */ + if (m) { /* match completed response with request */ nfs_request_match_reply(nmp, m); + } } /* Update the sockets's rpc parsing state */ lck_mtx_lock(&nso->nso_lock); nso->nso_rrs = nrrs; - if (nso->nso_flags & NSO_DISCONNECTING) + if (nso->nso_flags & NSO_DISCONNECTING) { wup = 1; + } nso->nso_flags &= ~NSO_UPCALL; lck_mtx_unlock(&nso->nso_lock); - if (wup) + if (wup) { wakeup(&nso->nso_flags); + } #ifdef NFS_SOCKET_DEBUGGING - if (!recv && (error != EWOULDBLOCK)) + if (!recv && (error != EWOULDBLOCK)) { NFS_SOCK_DBG("nfs_tcp_rcv: got nothing, error %d, got FIN?\n", error); + } #endif /* note: no error and no data indicates server closed its end */ if ((error != EWOULDBLOCK) && (error || !recv)) { @@ -3466,8 +3663,9 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep) */ lck_mtx_lock(nfs_request_mutex); TAILQ_FOREACH(req, &nfs_reqq, r_chain) { - if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) + if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) { continue; + } /* looks like we have it, grab lock and double check */ lck_mtx_lock(&req->r_mtx); if (req->r_nmrep.nmc_mhead || (rxid != R_XID32(req->r_xid))) { @@ -3485,10 +3683,11 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep) FSDBG(530, R_XID32(req->r_xid), req, nmp->nm_sent, nmp->nm_cwnd); if (nmp->nm_cwnd <= nmp->nm_sent) { nmp->nm_cwnd += - ((NFS_CWNDSCALE * NFS_CWNDSCALE) + + ((NFS_CWNDSCALE * NFS_CWNDSCALE) + (nmp->nm_cwnd >> 1)) / nmp->nm_cwnd; - if (nmp->nm_cwnd > NFS_MAXCWND) + if (nmp->nm_cwnd > NFS_MAXCWND) { nmp->nm_cwnd = NFS_MAXCWND; + } } if (req->r_flags & R_CWND) { nmp->nm_sent -= NFS_CWNDSCALE; @@ -3515,13 +3714,15 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep) * rtt is between N+dt and N+2-dt ticks, * add 1. */ - if (proct[req->r_procnum] == 0) + if (proct[req->r_procnum] == 0) { panic("nfs_request_match_reply: proct[%d] is zero", req->r_procnum); + } t1 = req->r_rtt + 1; t1 -= (NFS_SRTT(req) >> 3); NFS_SRTT(req) += t1; - if (t1 < 0) + if (t1 < 0) { t1 = -t1; + } t1 -= (NFS_SDRTT(req) >> 2); NFS_SDRTT(req) += t1; } @@ -3530,13 +3731,15 @@ nfs_request_match_reply(struct nfsmount *nmp, mbuf_t mrep) /* signal anyone waiting on this request */ wakeup(req); asyncioq = (req->r_callback.rcb_func != NULL); - if (nfs_request_using_gss(req)) + if (nfs_request_using_gss(req)) { nfs_gss_clnt_rpcdone(req); + } lck_mtx_unlock(&req->r_mtx); lck_mtx_unlock(nfs_request_mutex); /* if it's an async RPC with a callback, queue it up */ - if (asyncioq) + if (asyncioq) { nfs_asyncio_finish(req); + } break; } @@ -3558,21 +3761,24 @@ nfs_wait_reply(struct nfsreq *req) struct timespec ts = { 2, 0 }; int error = 0, slpflag, first = 1; - if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) + if (req->r_nmp && NMFLAG(req->r_nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) { slpflag = PCATCH; - else + } else { slpflag = 0; + } lck_mtx_lock(&req->r_mtx); while (!req->r_nmrep.nmc_mhead) { - if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) + if ((error = nfs_sigintr(req->r_nmp, req, first ? NULL : req->r_thread, 0))) { break; - if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) + } + if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) { break; + } /* check if we need to resend */ if (req->r_flags & R_MUSTRESEND) { NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d\n", - req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); + req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); req->r_flags |= R_SENDING; lck_mtx_unlock(&req->r_mtx); if (nfs_request_using_gss(req)) { @@ -3585,26 +3791,29 @@ nfs_wait_reply(struct nfsreq *req) lck_mtx_lock(&req->r_mtx); req->r_flags &= ~R_SENDING; lck_mtx_unlock(&req->r_mtx); - return (EAGAIN); + return EAGAIN; } error = nfs_send(req, 1); lck_mtx_lock(&req->r_mtx); NFS_SOCK_DBG("nfs wait resend: p %d x 0x%llx f 0x%x rtt %d err %d\n", - req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error); - if (error) + req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, error); + if (error) { break; - if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) + } + if (((error = req->r_error)) || req->r_nmrep.nmc_mhead) { break; + } } /* need to poll if we're P_NOREMOTEHANG */ - if (nfs_noremotehang(req->r_thread)) + if (nfs_noremotehang(req->r_thread)) { ts.tv_sec = 1; + } msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitreply", &ts); first = slpflag = 0; } lck_mtx_unlock(&req->r_mtx); - return (error); + return error; } /* @@ -3635,7 +3844,7 @@ nfs_wait_reply(struct nfsreq *req) int nfs_request_create( nfsnode_t np, - mount_t mp, /* used only if !np */ + mount_t mp, /* used only if !np */ struct nfsm_chain *nmrest, int procnum, thread_t thd, @@ -3652,51 +3861,58 @@ nfs_request_create( if (!newreq) { mbuf_freem(nmrest->nmc_mhead); nmrest->nmc_mhead = NULL; - return (ENOMEM); + return ENOMEM; } req = newreq; } bzero(req, sizeof(*req)); - if (req == newreq) + if (req == newreq) { req->r_flags = R_ALLOCATED; + } nmp = VFSTONFS(np ? NFSTOMP(np) : mp); if (nfs_mount_gone(nmp)) { - if (newreq) + if (newreq) { FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ); - return (ENXIO); + } + return ENXIO; } lck_mtx_lock(&nmp->nm_lock); - if ((nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && + if ((nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && (nmp->nm_state & NFSSTA_TIMEO)) { lck_mtx_unlock(&nmp->nm_lock); mbuf_freem(nmrest->nmc_mhead); nmrest->nmc_mhead = NULL; - if (newreq) + if (newreq) { FREE_ZONE(newreq, sizeof(*newreq), M_NFSREQ); - return (ENXIO); + } + return ENXIO; } - - if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) + + if ((nmp->nm_vers != NFS_VER4) && (procnum >= 0) && (procnum < NFS_NPROCS)) { OSAddAtomic64(1, &nfsstats.rpccnt[procnum]); - if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) + } + if ((nmp->nm_vers == NFS_VER4) && (procnum != NFSPROC4_COMPOUND) && (procnum != NFSPROC4_NULL)) { panic("nfs_request: invalid NFSv4 RPC request %d\n", procnum); + } lck_mtx_init(&req->r_mtx, nfs_request_grp, LCK_ATTR_NULL); req->r_nmp = nmp; nmp->nm_ref++; req->r_np = np; req->r_thread = thd; - if (!thd) + if (!thd) { req->r_flags |= R_NOINTR; + } if (IS_VALID_CRED(cred)) { kauth_cred_ref(cred); req->r_cred = cred; } req->r_procnum = procnum; - if (proct[procnum] > 0) + if (proct[procnum] > 0) { req->r_flags |= R_TIMING; + } req->r_nmrep.nmc_mhead = NULL; SLIST_INIT(&req->r_gss_seqlist); req->r_achain.tqe_next = NFSREQNOLIST; @@ -3704,12 +3920,13 @@ nfs_request_create( req->r_cchain.tqe_next = NFSREQNOLIST; /* set auth flavor to use for request */ - if (!req->r_cred) + if (!req->r_cred) { req->r_auth = RPCAUTH_NONE; - else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) + } else if (req->r_np && (req->r_np->n_auth != RPCAUTH_INVALID)) { req->r_auth = req->r_np->n_auth; - else + } else { req->r_auth = nmp->nm_auth; + } lck_mtx_unlock(&nmp->nm_lock); @@ -3719,9 +3936,10 @@ nfs_request_create( req->r_flags |= R_INITTED; req->r_refs = 1; - if (newreq) + if (newreq) { *reqp = req; - return (0); + } + return 0; } /* @@ -3734,15 +3952,17 @@ nfs_request_destroy(struct nfsreq *req) struct gss_seq *gsp, *ngsp; int clearjbtimeo = 0; - if (!req || !(req->r_flags & R_INITTED)) + if (!req || !(req->r_flags & R_INITTED)) { return; + } nmp = req->r_nmp; req->r_flags &= ~R_INITTED; - if (req->r_lflags & RL_QUEUED) + if (req->r_lflags & RL_QUEUED) { nfs_reqdequeue(req); + } if (req->r_achain.tqe_next != NFSREQNOLIST) { - /* + /* * Still on an async I/O queue? * %%% But which one, we may be on a local iod. */ @@ -3774,8 +3994,9 @@ nfs_request_destroy(struct nfsreq *req) if (req->r_rchain.tqe_next != NFSREQNOLIST) { TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); req->r_rchain.tqe_next = NFSREQNOLIST; - if (req->r_flags & R_RESENDQ) + if (req->r_flags & R_RESENDQ) { req->r_flags &= ~R_RESENDQ; + } } if (req->r_cchain.tqe_next != NFSREQNOLIST) { TAILQ_REMOVE(&nmp->nm_cwndq, req, r_cchain); @@ -3790,41 +4011,53 @@ nfs_request_destroy(struct nfsreq *req) } lck_mtx_unlock(&req->r_mtx); - if (clearjbtimeo) + if (clearjbtimeo) { nfs_up(nmp, req->r_thread, clearjbtimeo, NULL); - if (req->r_mhead) + } + if (req->r_mhead) { mbuf_freem(req->r_mhead); - else if (req->r_mrest) + } else if (req->r_mrest) { mbuf_freem(req->r_mrest); - if (req->r_nmrep.nmc_mhead) + } + if (req->r_nmrep.nmc_mhead) { mbuf_freem(req->r_nmrep.nmc_mhead); - if (IS_VALID_CRED(req->r_cred)) + } + if (IS_VALID_CRED(req->r_cred)) { kauth_cred_unref(&req->r_cred); - if (nfs_request_using_gss(req)) + } + if (nfs_request_using_gss(req)) { nfs_gss_clnt_rpcdone(req); + } SLIST_FOREACH_SAFE(gsp, &req->r_gss_seqlist, gss_seqnext, ngsp) - FREE(gsp, M_TEMP); - if (req->r_gss_ctx) + FREE(gsp, M_TEMP); + if (req->r_gss_ctx) { nfs_gss_clnt_ctx_unref(req); - if (req->r_wrongsec) + } + if (req->r_wrongsec) { FREE(req->r_wrongsec, M_TEMP); - if (nmp) + } + if (nmp) { nfs_mount_rele(nmp); + } lck_mtx_destroy(&req->r_mtx, nfs_request_grp); - if (req->r_flags & R_ALLOCATED) + if (req->r_flags & R_ALLOCATED) { FREE_ZONE(req, sizeof(*req), M_NFSREQ); + } } void nfs_request_ref(struct nfsreq *req, int locked) { - if (!locked) + if (!locked) { lck_mtx_lock(&req->r_mtx); - if (req->r_refs <= 0) + } + if (req->r_refs <= 0) { panic("nfsreq reference error"); + } req->r_refs++; - if (!locked) + if (!locked) { lck_mtx_unlock(&req->r_mtx); + } } void @@ -3833,13 +4066,15 @@ nfs_request_rele(struct nfsreq *req) int destroy; lck_mtx_lock(&req->r_mtx); - if (req->r_refs <= 0) + if (req->r_refs <= 0) { panic("nfsreq reference underflow"); + } req->r_refs--; destroy = (req->r_refs == 0); lck_mtx_unlock(&req->r_mtx); - if (destroy) + if (destroy) { nfs_request_destroy(req); + } } @@ -3855,31 +4090,36 @@ nfs_request_add_header(struct nfsreq *req) /* free up any previous header */ if ((m = req->r_mhead)) { - while (m && (m != req->r_mrest)) + while (m && (m != req->r_mrest)) { m = mbuf_free(m); + } req->r_mhead = NULL; } nmp = req->r_nmp; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } error = nfsm_rpchead(req, req->r_mrest, &req->r_xid, &req->r_mhead); - if (error) - return (error); + if (error) { + return error; + } req->r_mreqlen = mbuf_pkthdr_len(req->r_mhead); nmp = req->r_nmp; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); - if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) + if (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) { req->r_retry = nmp->nm_retry; - else - req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ + } else { + req->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */ + } lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } @@ -3901,7 +4141,7 @@ nfs_request_send(struct nfsreq *req, int wait) nmp = req->r_nmp; if (nfs_mount_gone(nmp)) { lck_mtx_unlock(nfs_request_mutex); - return (ENXIO); + return ENXIO; } microuptime(&now); @@ -3924,12 +4164,12 @@ nfs_request_send(struct nfsreq *req, int wait) if (!nfs_request_timer_on) { nfs_request_timer_on = 1; nfs_interval_timer_start(nfs_request_timer_call, - NFS_REQUESTDELAY); + NFS_REQUESTDELAY); } lck_mtx_unlock(nfs_request_mutex); /* Send the request... */ - return (nfs_send(req, wait)); + return nfs_send(req, wait); } /* @@ -3964,8 +4204,9 @@ nfs_request_finish( error = req->r_error; - if (nmrepp) + if (nmrepp) { nmrepp->nmc_mhead = NULL; + } /* RPC done, unlink the request. */ nfs_reqdequeue(req); @@ -4007,13 +4248,14 @@ nfs_request_finish( */ if (error == EAGAIN) { req->r_error = 0; - if (mrep) + if (mrep) { mbuf_freem(mrep); - error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs + } + error = nfs_gss_clnt_args_restore(req); // remove any trailer mbufs req->r_nmrep.nmc_mhead = NULL; req->r_flags |= R_RESTART; if (error == ENEEDAUTH) { - req->r_xid = 0; // get a new XID + req->r_xid = 0; // get a new XID error = 0; } goto nfsmout; @@ -4028,13 +4270,15 @@ nfs_request_finish( if (!error) { if ((req->r_flags & R_TPRINTFMSG) || (nmp && (NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && - ((nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_FORCE|NFSSTA_DEAD)) == NFSSTA_TIMEO))) + ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_FORCE | NFSSTA_DEAD)) == NFSSTA_TIMEO))) { nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, "is alive again"); - else + } else { nfs_up(nmp, req->r_thread, NFSSTA_TIMEO, NULL); + } } - if (!error && !nmp) + if (!error && !nmp) { error = ENXIO; + } nfsmout_if(error); /* @@ -4062,17 +4306,19 @@ nfs_request_finish( * and set up for a resend. */ error = nfs_gss_clnt_args_restore(req); - if (error && error != ENEEDAUTH) + if (error && error != ENEEDAUTH) { break; + } if (!error) { error = nfs_gss_clnt_ctx_renew(req); - if (error) + if (error) { break; + } } mbuf_freem(mrep); req->r_nmrep.nmc_mhead = NULL; - req->r_xid = 0; // get a new XID + req->r_xid = 0; // get a new XID req->r_flags |= R_RESTART; goto nfsmout; default: @@ -4091,15 +4337,16 @@ nfs_request_finish( case RPCAUTH_NONE: case RPCAUTH_SYS: /* Any AUTH_SYS verifier is ignored */ - if (verf_len > 0) + if (verf_len > 0) { nfsm_chain_adv(error, &nmrep, nfsm_rndup(verf_len)); + } nfsm_chain_get_32(error, &nmrep, accepted_status); break; case RPCAUTH_KRB5: case RPCAUTH_KRB5I: case RPCAUTH_KRB5P: error = nfs_gss_clnt_verf_get(req, &nmrep, - verf_type, verf_len, &accepted_status); + verf_type, verf_len, &accepted_status); break; } nfsmout_if(error); @@ -4134,8 +4381,9 @@ nfs_request_finish( goto nfsmout; } req->r_delay = !req->r_delay ? NFS_TRYLATERDEL : (req->r_delay * 2); - if (req->r_delay > 30) + if (req->r_delay > 30) { req->r_delay = 30; + } if (nmp->nm_tprintf_initial_delay && (req->r_delay >= nmp->nm_tprintf_initial_delay)) { if (!(req->r_flags & R_JBTPRINTFMSG)) { req->r_flags |= R_JBTPRINTFMSG; @@ -4144,10 +4392,10 @@ nfs_request_finish( lck_mtx_unlock(&nmp->nm_lock); } nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_JUKEBOXTIMEO, - "resource temporarily unavailable (jukebox)", 0); + "resource temporarily unavailable (jukebox)", 0); } if ((NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && (req->r_delay == 30) && - !(req->r_flags & R_NOINTR)) { + !(req->r_flags & R_NOINTR)) { /* for soft mounts, just give up after a short while */ OSAddAtomic64(1, &nfsstats.rpctimeouts); nfs_softterm(req); @@ -4161,17 +4409,18 @@ nfs_request_finish( req->r_resendtime = now.tv_sec + delay; } else { do { - if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) + if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { goto nfsmout; - tsleep(nfs_request_finish, PSOCK|slpflag, "nfs_jukebox_trylater", hz); + } + tsleep(nfs_request_finish, PSOCK | slpflag, "nfs_jukebox_trylater", hz); slpflag = 0; } while (--delay > 0); } - req->r_xid = 0; // get a new XID + req->r_xid = 0; // get a new XID req->r_flags |= R_RESTART; req->r_start = 0; FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_TRYLATER); - return (0); + return 0; } if (req->r_flags & R_JBTPRINTFMSG) { @@ -4200,18 +4449,20 @@ nfs_request_finish( if (!req->r_wrongsec) { /* first time... set up flavor array */ - MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS*sizeof(uint32_t), M_TEMP, M_WAITOK); + MALLOC(req->r_wrongsec, uint32_t*, NX_MAX_SEC_FLAVORS * sizeof(uint32_t), M_TEMP, M_WAITOK); if (!req->r_wrongsec) { error = EACCES; goto nfsmout; } - i=0; + i = 0; if (nmp->nm_sec.count) { /* use the mount's preferred list of flavors */ - for(; i < nmp->nm_sec.count; i++) + for (; i < nmp->nm_sec.count; i++) { req->r_wrongsec[i] = nmp->nm_sec.flavors[i]; + } } else if (srvcount) { /* otherwise use the server's list of flavors */ - for(; i < srvcount; i++) + for (; i < srvcount; i++) { req->r_wrongsec[i] = srvflavors[i]; + } } else { /* otherwise, just try the flavors we support. */ req->r_wrongsec[i++] = RPCAUTH_KRB5P; req->r_wrongsec[i++] = RPCAUTH_KRB5I; @@ -4219,29 +4470,37 @@ nfs_request_finish( req->r_wrongsec[i++] = RPCAUTH_SYS; req->r_wrongsec[i++] = RPCAUTH_NONE; } - for(; i < NX_MAX_SEC_FLAVORS; i++) /* invalidate any remaining slots */ + for (; i < NX_MAX_SEC_FLAVORS; i++) { /* invalidate any remaining slots */ req->r_wrongsec[i] = RPCAUTH_INVALID; + } } /* clear the current flavor from the list */ - for(i=0; i < NX_MAX_SEC_FLAVORS; i++) - if (req->r_wrongsec[i] == req->r_auth) + for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) { + if (req->r_wrongsec[i] == req->r_auth) { req->r_wrongsec[i] = RPCAUTH_INVALID; + } + } /* find the next flavor to try */ - for(i=0; i < NX_MAX_SEC_FLAVORS; i++) + for (i = 0; i < NX_MAX_SEC_FLAVORS; i++) { if (req->r_wrongsec[i] != RPCAUTH_INVALID) { - if (!srvcount) /* no server list, just try it */ + if (!srvcount) { /* no server list, just try it */ break; + } /* check that it's in the server's list */ - for(j=0; j < srvcount; j++) - if (req->r_wrongsec[i] == srvflavors[j]) + for (j = 0; j < srvcount; j++) { + if (req->r_wrongsec[i] == srvflavors[j]) { break; - if (j < srvcount) /* found */ + } + } + if (j < srvcount) { /* found */ break; + } /* not found in server list */ req->r_wrongsec[i] = RPCAUTH_INVALID; } + } if (i == NX_MAX_SEC_FLAVORS) { /* nothing left to try! */ error = EACCES; @@ -4250,19 +4509,20 @@ nfs_request_finish( /* retry with the next auth flavor */ req->r_auth = req->r_wrongsec[i]; - req->r_xid = 0; // get a new XID + req->r_xid = 0; // get a new XID req->r_flags |= R_RESTART; req->r_start = 0; FSDBG(273, R_XID32(req->r_xid), nmp, req, NFSERR_WRONGSEC); - return (0); + return 0; } if ((nmp->nm_vers >= NFS_VER4) && req->r_wrongsec) { /* * We renegotiated security for this request; so update the * default security flavor for the associated node. */ - if (req->r_np) + if (req->r_np) { req->r_np->n_auth = req->r_auth; + } } if (*status == NFS_OK) { @@ -4282,13 +4542,15 @@ nfs_request_finish( if ((*status == ESTALE) && req->r_np) { cache_purge(NFSTOV(req->r_np)); /* if monitored, also send delete event */ - if (vnode_ismonitored(NFSTOV(req->r_np))) - nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB|VNODE_EVENT_DELETE)); + if (vnode_ismonitored(NFSTOV(req->r_np))) { + nfs_vnode_notify(req->r_np, (VNODE_EVENT_ATTRIB | VNODE_EVENT_DELETE)); + } } - if (nmp->nm_vers == NFS_VER2) + if (nmp->nm_vers == NFS_VER2) { mbuf_freem(mrep); - else + } else { *nmrepp = nmrep; + } req->r_nmrep.nmc_mhead = NULL; error = 0; break; @@ -4316,12 +4578,13 @@ nfsmout: nmp->nm_jbreqs--; clearjbtimeo = (nmp->nm_jbreqs == 0) ? NFSSTA_JUKEBOXTIMEO : 0; lck_mtx_unlock(&nmp->nm_lock); - if (clearjbtimeo) + if (clearjbtimeo) { nfs_up(nmp, req->r_thread, clearjbtimeo, NULL); + } } FSDBG(273, R_XID32(req->r_xid), nmp, req, - (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error); - return (error); + (!error && (*status == NFS_OK)) ? 0xf0f0f0f0 : error); + return error; } /* @@ -4330,15 +4593,16 @@ nfsmout: int nfs_request_using_gss(struct nfsreq *req) { - if (!req->r_gss_ctx) - return (0); + if (!req->r_gss_ctx) { + return 0; + } switch (req->r_auth) { - case RPCAUTH_KRB5: - case RPCAUTH_KRB5I: - case RPCAUTH_KRB5P: - return (1); + case RPCAUTH_KRB5: + case RPCAUTH_KRB5I: + case RPCAUTH_KRB5P: + return 1; } - return (0); + return 0; } /* @@ -4348,7 +4612,7 @@ nfs_request_using_gss(struct nfsreq *req) int nfs_request( nfsnode_t np, - mount_t mp, /* used only if !np */ + mount_t mp, /* used only if !np */ struct nfsm_chain *nmrest, int procnum, vfs_context_t ctx, @@ -4358,14 +4622,14 @@ nfs_request( int *status) { return nfs_request2(np, mp, nmrest, procnum, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - si, 0, nmrepp, xidp, status); + vfs_context_thread(ctx), vfs_context_ucred(ctx), + si, 0, nmrepp, xidp, status); } int nfs_request2( nfsnode_t np, - mount_t mp, /* used only if !np */ + mount_t mp, /* used only if !np */ struct nfsm_chain *nmrest, int procnum, thread_t thd, @@ -4379,30 +4643,36 @@ nfs_request2( struct nfsreq rq, *req = &rq; int error; - if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) - return (error); + if ((error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, &req))) { + return error; + } req->r_flags |= (flags & (R_OPTMASK | R_SOFT)); - if (si) + if (si) { req->r_secinfo = *si; + } FSDBG_TOP(273, R_XID32(req->r_xid), np, procnum, 0); do { req->r_error = 0; req->r_flags &= ~R_RESTART; - if ((error = nfs_request_add_header(req))) + if ((error = nfs_request_add_header(req))) { break; - if (xidp) + } + if (xidp) { *xidp = req->r_xid; - if ((error = nfs_request_send(req, 1))) + } + if ((error = nfs_request_send(req, 1))) { break; + } nfs_request_wait(req); - if ((error = nfs_request_finish(req, nmrepp, status))) + if ((error = nfs_request_finish(req, nmrepp, status))) { break; + } } while (req->r_flags & R_RESTART); FSDBG_BOT(273, R_XID32(req->r_xid), np, procnum, error); nfs_request_rele(req); - return (error); + return error; } @@ -4414,26 +4684,27 @@ nfs_request2( int nfs_request_gss( - mount_t mp, - struct nfsm_chain *nmrest, - thread_t thd, - kauth_cred_t cred, - int flags, - struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */ - struct nfsm_chain *nmrepp, - int *status) + mount_t mp, + struct nfsm_chain *nmrest, + thread_t thd, + kauth_cred_t cred, + int flags, + struct nfs_gss_clnt_ctx *cp, /* Set to gss context to renew or setup */ + struct nfsm_chain *nmrepp, + int *status) { struct nfsreq rq, *req = &rq; int error, wait = 1; - if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) - return (error); + if ((error = nfs_request_create(NULL, mp, nmrest, NFSPROC_NULL, thd, cred, &req))) { + return error; + } req->r_flags |= (flags & R_OPTMASK); if (cp == NULL) { printf("nfs_request_gss request has no context\n"); nfs_request_rele(req); - return (NFSERR_EAUTH); + return NFSERR_EAUTH; } nfs_gss_clnt_ctx_ref(req, cp); @@ -4441,24 +4712,29 @@ nfs_request_gss( * Don't wait for a reply to a context destroy advisory * to avoid hanging on a dead server. */ - if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) + if (cp->gss_clnt_proc == RPCSEC_GSS_DESTROY) { wait = 0; + } FSDBG_TOP(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, 0); do { req->r_error = 0; req->r_flags &= ~R_RESTART; - if ((error = nfs_request_add_header(req))) + if ((error = nfs_request_add_header(req))) { break; + } - if ((error = nfs_request_send(req, wait))) + if ((error = nfs_request_send(req, wait))) { break; - if (!wait) + } + if (!wait) { break; + } nfs_request_wait(req); - if ((error = nfs_request_finish(req, nmrepp, status))) + if ((error = nfs_request_finish(req, nmrepp, status))) { break; + } } while (req->r_flags & R_RESTART); FSDBG_BOT(273, R_XID32(req->r_xid), NULL, NFSPROC_NULL, error); @@ -4466,7 +4742,7 @@ nfs_request_gss( nfs_gss_clnt_ctx_unref(req); nfs_request_rele(req); - return (error); + return error; } /* @@ -4475,7 +4751,7 @@ nfs_request_gss( int nfs_request_async( nfsnode_t np, - mount_t mp, /* used only if !np */ + mount_t mp, /* used only if !np */ struct nfsm_chain *nmrest, int procnum, thread_t thd, @@ -4492,19 +4768,23 @@ nfs_request_async( error = nfs_request_create(np, mp, nmrest, procnum, thd, cred, reqp); req = *reqp; FSDBG(274, (req ? R_XID32(req->r_xid) : 0), np, procnum, error); - if (error) - return (error); + if (error) { + return error; + } req->r_flags |= (flags & R_OPTMASK); req->r_flags |= R_ASYNC; - if (si) + if (si) { req->r_secinfo = *si; - if (cb) + } + if (cb) { req->r_callback = *cb; + } error = nfs_request_add_header(req); if (!error) { req->r_flags |= R_WAITSENT; - if (req->r_callback.rcb_func) + if (req->r_callback.rcb_func) { nfs_request_ref(req, 0); + } error = nfs_request_send(req, 1); lck_mtx_lock(&req->r_mtx); if (!error && !(req->r_flags & R_SENT) && req->r_callback.rcb_func) { @@ -4523,8 +4803,9 @@ nfs_request_async( */ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); req->r_rchain.tqe_next = NFSREQNOLIST; - if (req->r_flags & R_RESENDQ) + if (req->r_flags & R_RESENDQ) { req->r_flags &= ~R_RESENDQ; + } lck_mtx_unlock(&nmp->nm_lock); req->r_flags |= R_SENDING; lck_mtx_unlock(&req->r_mtx); @@ -4532,14 +4813,16 @@ nfs_request_async( /* Remove the R_RESENDQ reference */ nfs_request_rele(req); lck_mtx_lock(&req->r_mtx); - if (error) + if (error) { break; + } continue; } lck_mtx_unlock(&nmp->nm_lock); } - if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) + if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { break; + } msleep(req, &req->r_mtx, slpflag | (PZERO - 1), "nfswaitsent", &ts); slpflag = 0; } @@ -4551,10 +4834,11 @@ nfs_request_async( } } FSDBG(274, R_XID32(req->r_xid), np, procnum, error); - if (error || req->r_callback.rcb_func) + if (error || req->r_callback.rcb_func) { nfs_request_rele(req); + } - return (error); + return error; } /* @@ -4571,8 +4855,9 @@ nfs_request_async_finish( struct nfsmount *nmp; lck_mtx_lock(&req->r_mtx); - if (!asyncio) + if (!asyncio) { req->r_flags |= R_ASYNCWAIT; + } while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */ struct timespec ts = { 2, 0 }; @@ -4586,8 +4871,9 @@ nfs_request_async_finish( */ TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); req->r_rchain.tqe_next = NFSREQNOLIST; - if (req->r_flags & R_RESENDQ) + if (req->r_flags & R_RESENDQ) { req->r_flags &= ~R_RESENDQ; + } /* Remove the R_RESENDQ reference */ assert(req->r_refs > 0); req->r_refs--; @@ -4596,9 +4882,10 @@ nfs_request_async_finish( } lck_mtx_unlock(&nmp->nm_lock); } - if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) + if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0))) { break; - msleep(req, &req->r_mtx, PZERO-1, "nfsresendqwait", &ts); + } + msleep(req, &req->r_mtx, PZERO - 1, "nfsresendqwait", &ts); } lck_mtx_unlock(&req->r_mtx); @@ -4615,28 +4902,33 @@ nfs_request_async_finish( if (req->r_resendtime) { /* send later */ nfs_asyncio_resend(req); lck_mtx_unlock(&req->r_mtx); - return (EINPROGRESS); + return EINPROGRESS; } lck_mtx_unlock(&req->r_mtx); } req->r_error = 0; req->r_flags &= ~R_RESTART; - if ((error = nfs_request_add_header(req))) + if ((error = nfs_request_add_header(req))) { break; - if ((error = nfs_request_send(req, !asyncio))) + } + if ((error = nfs_request_send(req, !asyncio))) { break; - if (asyncio) - return (EINPROGRESS); + } + if (asyncio) { + return EINPROGRESS; + } nfs_request_wait(req); - if ((error = nfs_request_finish(req, nmrepp, status))) + if ((error = nfs_request_finish(req, nmrepp, status))) { break; + } } - if (xidp) + if (xidp) { *xidp = req->r_xid; + } FSDBG(275, R_XID32(req->r_xid), req->r_np, req->r_procnum, error); nfs_request_rele(req); - return (error); + return error; } /* @@ -4658,8 +4950,9 @@ nfs_softterm(struct nfsreq *req) struct nfsmount *nmp = req->r_nmp; req->r_flags |= R_SOFTTERM; req->r_error = ETIMEDOUT; - if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) + if (!(req->r_flags & R_CWND) || nfs_mount_gone(nmp)) { return; + } /* update congestion window */ req->r_flags &= ~R_CWND; lck_mtx_lock(&nmp->nm_lock); @@ -4700,8 +4993,9 @@ nfs_reqdequeue(struct nfsreq *req) void nfs_reqbusy(struct nfsreq *req) { - if (req->r_lflags & RL_BUSY) + if (req->r_lflags & RL_BUSY) { panic("req locked"); + } req->r_lflags |= RL_BUSY; } @@ -4713,23 +5007,25 @@ nfs_reqnext(struct nfsreq *req) { struct nfsreq * nextreq; - if (req == NULL) - return (NULL); + if (req == NULL) { + return NULL; + } /* * We need to get and busy the next req before signalling the * current one, otherwise wakeup() may block us and we'll race to * grab the next req. */ nextreq = TAILQ_NEXT(req, r_chain); - if (nextreq != NULL) + if (nextreq != NULL) { nfs_reqbusy(nextreq); + } /* unbusy and signal. */ req->r_lflags &= ~RL_BUSY; if (req->r_lflags & RL_WAITING) { req->r_lflags &= ~RL_WAITING; wakeup(&req->r_lflags); } - return (nextreq); + return nextreq; } /* @@ -4754,7 +5050,7 @@ nfs_request_timer(__unused void *param0, __unused void *param1) restart: lck_mtx_lock(nfs_request_mutex); req = TAILQ_FIRST(&nfs_reqq); - if (req == NULL) { /* no requests - turn timer off */ + if (req == NULL) { /* no requests - turn timer off */ nfs_request_timer_on = 0; lck_mtx_unlock(nfs_request_mutex); return; @@ -4763,14 +5059,15 @@ restart: nfs_reqbusy(req); microuptime(&now); - for ( ; req != NULL ; req = nfs_reqnext(req)) { + for (; req != NULL; req = nfs_reqnext(req)) { nmp = req->r_nmp; if (nmp == NULL) { NFS_SOCK_DBG("Found a request with out a mount!\n"); continue; } - if (req->r_error || req->r_nmrep.nmc_mhead) + if (req->r_error || req->r_nmrep.nmc_mhead) { continue; + } if ((error = nfs_sigintr(nmp, req, req->r_thread, 0))) { if (req->r_callback.rcb_func != NULL) { /* async I/O RPC needs to be finished */ @@ -4779,8 +5076,9 @@ restart: finish_asyncio = !(req->r_flags & R_WAITSENT); wakeup(req); lck_mtx_unlock(&req->r_mtx); - if (finish_asyncio) + if (finish_asyncio) { nfs_asyncio_finish(req); + } } continue; } @@ -4792,7 +5090,7 @@ restart: ((req->r_lastmsg + nmp->nm_tprintf_delay) < now.tv_sec)) { req->r_lastmsg = now.tv_sec; nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO, - "not responding", 1); + "not responding", 1); req->r_flags |= R_TPRINTFMSG; lck_mtx_lock(&nmp->nm_lock); if (!(nmp->nm_state & NFSSTA_MOUNTED)) { @@ -4804,8 +5102,9 @@ restart: finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT)); wakeup(req); lck_mtx_unlock(&req->r_mtx); - if (finish_asyncio) + if (finish_asyncio) { nfs_asyncio_finish(req); + } continue; } lck_mtx_unlock(&nmp->nm_lock); @@ -4815,13 +5114,14 @@ restart: * Put a reasonable limit on the maximum timeout, * and reduce that limit when soft mounts get timeouts or are in reconnect. */ - if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) + if (!(NMFLAG(nmp, SOFT) || (req->r_flags & R_SOFT)) && !nfs_can_squish(nmp)) { maxtime = NFS_MAXTIMEO; - else if ((req->r_flags & (R_SETUP|R_RECOVER)) || - ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) - maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts+1))/2; - else - maxtime = NFS_MINTIMEO/4; + } else if ((req->r_flags & (R_SETUP | R_RECOVER)) || + ((nmp->nm_reconnect_start <= 0) || ((now.tv_sec - nmp->nm_reconnect_start) < 8))) { + maxtime = (NFS_MAXTIMEO / (nmp->nm_timeouts + 1)) / 2; + } else { + maxtime = NFS_MINTIMEO / 4; + } /* * Check for request timeout. @@ -4831,24 +5131,28 @@ restart: lck_mtx_lock(&nmp->nm_lock); if (req->r_flags & R_RESENDERR) { /* with resend errors, retry every few seconds */ - timeo = 4*hz; + timeo = 4 * hz; } else { - if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) + if (req->r_procnum == NFSPROC_NULL && req->r_gss_ctx != NULL) { timeo = NFS_MINIDEMTIMEO; // gss context setup - else if (NMFLAG(nmp, DUMBTIMER)) + } else if (NMFLAG(nmp, DUMBTIMER)) { timeo = nmp->nm_timeo; - else + } else { timeo = NFS_RTO(nmp, proct[req->r_procnum]); + } /* ensure 62.5 ms floor */ - while (16 * timeo < hz) + while (16 * timeo < hz) { timeo *= 2; - if (nmp->nm_timeouts > 0) + } + if (nmp->nm_timeouts > 0) { timeo *= nfs_backoff[nmp->nm_timeouts - 1]; + } } /* limit timeout to max */ - if (timeo > maxtime) + if (timeo > maxtime) { timeo = maxtime; + } if (req->r_rtt <= timeo) { NFS_SOCK_DBG("nfs timeout: req time %d and timeo is %d continue\n", req->r_rtt, timeo); lck_mtx_unlock(&nmp->nm_lock); @@ -4857,11 +5161,12 @@ restart: } /* The request has timed out */ NFS_SOCK_DBG("nfs timeout: proc %d %d xid %llx rtt %d to %d # %d, t %ld/%d\n", - req->r_procnum, proct[req->r_procnum], - req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts, - (now.tv_sec - req->r_start)*NFS_HZ, maxtime); - if (nmp->nm_timeouts < 8) + req->r_procnum, proct[req->r_procnum], + req->r_xid, req->r_rtt, timeo, nmp->nm_timeouts, + (now.tv_sec - req->r_start) * NFS_HZ, maxtime); + if (nmp->nm_timeouts < 8) { nmp->nm_timeouts++; + } if (nfs_mount_check_dead_timeout(nmp)) { /* Unbusy this request */ req->r_lflags &= ~RL_BUSY; @@ -4871,7 +5176,7 @@ restart: } lck_mtx_unlock(&req->r_mtx); - /* No need to poke this mount */ + /* No need to poke this mount */ if (nmp->nm_sockflags & NMSOCK_POKE) { nmp->nm_sockflags &= ~NMSOCK_POKE; TAILQ_REMOVE(&nfs_mount_poke_queue, nmp, nm_pokeq); @@ -4903,11 +5208,11 @@ restart: */ goto restart; } - + /* if it's been a few seconds, try poking the socket */ if ((nmp->nm_sotype == SOCK_STREAM) && ((now.tv_sec - req->r_start) >= 3) && - !(nmp->nm_sockflags & (NMSOCK_POKE|NMSOCK_UNMOUNT)) && + !(nmp->nm_sockflags & (NMSOCK_POKE | NMSOCK_UNMOUNT)) && (nmp->nm_sockflags & NMSOCK_READY)) { nmp->nm_sockflags |= NMSOCK_POKE; /* @@ -4923,9 +5228,9 @@ restart: } /* For soft mounts (& SETUPs/RECOVERs), check for too many retransmits/timeout. */ - if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP|R_RECOVER|R_SOFT))) && + if ((NMFLAG(nmp, SOFT) || (req->r_flags & (R_SETUP | R_RECOVER | R_SOFT))) && ((req->r_rexmit >= req->r_retry) || /* too many */ - ((now.tv_sec - req->r_start)*NFS_HZ > maxtime))) { /* too long */ + ((now.tv_sec - req->r_start) * NFS_HZ > maxtime))) { /* too long */ OSAddAtomic64(1, &nfsstats.rpctimeouts); lck_mtx_lock(&nmp->nm_lock); if (!(nmp->nm_state & NFSSTA_TIMEO)) { @@ -4933,7 +5238,7 @@ restart: /* make sure we note the unresponsive server */ /* (maxtime may be less than tprintf delay) */ nfs_down(req->r_nmp, req->r_thread, 0, NFSSTA_TIMEO, - "not responding", 1); + "not responding", 1); req->r_lastmsg = now.tv_sec; req->r_flags |= R_TPRINTFMSG; } else { @@ -4945,21 +5250,23 @@ restart: continue; } NFS_SOCK_DBG("nfs timer TERMINATE: p %d x 0x%llx f 0x%x rtt %d t %ld\n", - req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, - now.tv_sec - req->r_start); + req->r_procnum, req->r_xid, req->r_flags, req->r_rtt, + now.tv_sec - req->r_start); nfs_softterm(req); finish_asyncio = ((req->r_callback.rcb_func != NULL) && !(req->r_flags & R_WAITSENT)); wakeup(req); lck_mtx_unlock(&req->r_mtx); - if (finish_asyncio) + if (finish_asyncio) { nfs_asyncio_finish(req); + } continue; } /* for TCP, only resend if explicitly requested */ if ((nmp->nm_sotype == SOCK_STREAM) && !(req->r_flags & R_MUSTRESEND)) { - if (++req->r_rexmit > NFS_MAXREXMIT) + if (++req->r_rexmit > NFS_MAXREXMIT) { req->r_rexmit = NFS_MAXREXMIT; + } req->r_rtt = 0; lck_mtx_unlock(&req->r_mtx); continue; @@ -4974,12 +5281,13 @@ restart: continue; } NFS_SOCK_DBG("nfs timer mark resend: p %d x 0x%llx f 0x%x rtt %d\n", - req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); + req->r_procnum, req->r_xid, req->r_flags, req->r_rtt); req->r_flags |= R_MUSTRESEND; req->r_rtt = -1; wakeup(req); - if ((req->r_flags & (R_IOD|R_ASYNC|R_ASYNCWAIT|R_SENDING)) == R_ASYNC) + if ((req->r_flags & (R_IOD | R_ASYNC | R_ASYNCWAIT | R_SENDING)) == R_ASYNC) { nfs_asyncio_resend(req); + } lck_mtx_unlock(&req->r_mtx); } @@ -5002,7 +5310,7 @@ int nfs_noremotehang(thread_t thd) { proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL; - return (p && proc_noremotehang(p)); + return p && proc_noremotehang(p); } /* @@ -5013,23 +5321,26 @@ nfs_noremotehang(thread_t thd) * and the mount is interruptable, or if we are a thread that is in the process * of cancellation (also SIGKILL posted). */ -extern int sigprop[NSIG+1]; +extern int sigprop[NSIG + 1]; int nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked) { proc_t p; int error = 0; - if (!nmp) - return (ENXIO); + if (!nmp) { + return ENXIO; + } - if (req && (req->r_flags & R_SOFTTERM)) - return (ETIMEDOUT); /* request has been terminated. */ - if (req && (req->r_flags & R_NOINTR)) + if (req && (req->r_flags & R_SOFTTERM)) { + return ETIMEDOUT; /* request has been terminated. */ + } + if (req && (req->r_flags & R_NOINTR)) { thd = NULL; /* don't check for signal on R_NOINTR */ - - if (!nmplocked) + } + if (!nmplocked) { lck_mtx_lock(&nmp->nm_lock); + } if (nmp->nm_state & NFSSTA_FORCE) { /* If a force unmount is in progress then fail. */ error = EIO; @@ -5040,25 +5351,30 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocke } /* Check if the mount is marked dead. */ - if (!error && (nmp->nm_state & NFSSTA_DEAD)) + if (!error && (nmp->nm_state & NFSSTA_DEAD)) { error = ENXIO; + } /* * If the mount is hung and we've requested not to hang * on remote filesystems, then bail now. */ if (current_proc() != kernproc && - !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) + !error && (nmp->nm_state & NFSSTA_TIMEO) && nfs_noremotehang(thd)) { error = EIO; + } - if (!nmplocked) + if (!nmplocked) { lck_mtx_unlock(&nmp->nm_lock); - if (error) - return (error); + } + if (error) { + return error; + } /* may not have a thread for async I/O */ - if (thd == NULL || current_proc() == kernproc) - return (0); + if (thd == NULL || current_proc() == kernproc) { + return 0; + } /* * Check if the process is aborted, but don't interrupt if we @@ -5067,16 +5383,18 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocke */ if (((p = current_proc()) != kernproc) && current_thread_aborted() && (!(p->p_acflag & AXSIG) || (p->exit_thread != current_thread()) || - (p->p_sigacts == NULL) || - (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) || - !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) - return (EINTR); + (p->p_sigacts == NULL) || + (p->p_sigacts->ps_sig < 1) || (p->p_sigacts->ps_sig > NSIG) || + !(sigprop[p->p_sigacts->ps_sig] & SA_CORE))) { + return EINTR; + } /* mask off thread and process blocked signals. */ if (NMFLAG(nmp, INTR) && ((p = get_bsdthreadtask_info(thd))) && - proc_pendingsignals(p, NFSINT_SIGMASK)) - return (EINTR); - return (0); + proc_pendingsignals(p, NFSINT_SIGMASK)) { + return EINTR; + } + return 0; } /* @@ -5093,30 +5411,35 @@ nfs_sndlock(struct nfsreq *req) int error = 0, slpflag = 0; struct timespec ts = { 0, 0 }; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); statep = &nmp->nm_state; - if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) + if (NMFLAG(nmp, INTR) && req->r_thread && !(req->r_flags & R_NOINTR)) { slpflag = PCATCH; + } while (*statep & NFSSTA_SNDLOCK) { - if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) + if ((error = nfs_sigintr(nmp, req, req->r_thread, 1))) { break; + } *statep |= NFSSTA_WANTSND; - if (nfs_noremotehang(req->r_thread)) + if (nfs_noremotehang(req->r_thread)) { ts.tv_sec = 1; + } msleep(statep, &nmp->nm_lock, slpflag | (PZERO - 1), "nfsndlck", &ts); if (slpflag == PCATCH) { slpflag = 0; ts.tv_sec = 2; } } - if (!error) + if (!error) { *statep |= NFSSTA_SNDLOCK; + } lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } /* @@ -5128,20 +5451,23 @@ nfs_sndunlock(struct nfsreq *req) struct nfsmount *nmp = req->r_nmp; int *statep, wake = 0; - if (!nmp) + if (!nmp) { return; + } lck_mtx_lock(&nmp->nm_lock); statep = &nmp->nm_state; - if ((*statep & NFSSTA_SNDLOCK) == 0) + if ((*statep & NFSSTA_SNDLOCK) == 0) { panic("nfs sndunlock"); - *statep &= ~(NFSSTA_SNDLOCK|NFSSTA_SENDING); + } + *statep &= ~(NFSSTA_SNDLOCK | NFSSTA_SENDING); if (*statep & NFSSTA_WANTSND) { *statep &= ~NFSSTA_WANTSND; wake = 1; } lck_mtx_unlock(&nmp->nm_lock); - if (wake) + if (wake) { wakeup(statep); + } } int @@ -5171,8 +5497,9 @@ nfs_aux_request( if (!so) { /* create socket and set options */ soproto = (sotype == SOCK_DGRAM) ? IPPROTO_UDP : IPPROTO_TCP; - if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) + if ((error = sock_socket(saddr->sa_family, sotype, soproto, NULL, NULL, &newso))) { goto nfsmout; + } if (bindresv) { int level = (saddr->sa_family == AF_INET) ? IPPROTO_IP : IPPROTO_IPV6; @@ -5191,18 +5518,20 @@ nfs_aux_request( } else { error = EINVAL; } - if (!error) + if (!error) { error = sock_bind(newso, (struct sockaddr *)&ss); + } nfsmout_if(error); } if (sotype == SOCK_STREAM) { -# define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */ +# define NFS_AUX_CONNECTION_TIMEOUT 4 /* 4 second timeout for connections */ int count = 0; - + error = sock_connect(newso, saddr, MSG_DONTWAIT); - if (error == EINPROGRESS) + if (error == EINPROGRESS) { error = 0; + } nfsmout_if(error); while ((error = sock_connectwait(newso, &tv)) == EINPROGRESS) { @@ -5216,8 +5545,9 @@ nfs_aux_request( } if (((error = sock_setsockopt(newso, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))) || ((error = sock_setsockopt(newso, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))) || - ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) + ((error = sock_setsockopt(newso, SOL_SOCKET, SO_NOADDRERR, &on, sizeof(on))))) { goto nfsmout; + } so = newso; } else { /* make sure socket is using a one second timeout in this function */ @@ -5239,23 +5569,27 @@ nfs_aux_request( nfs_rpc_record_state_init(&nrrs); } - for (try=0; try < timeo; try++) { - if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) + for (try = 0; try < timeo; try++) { + if ((error = nfs_sigintr(nmp, NULL, !try ? NULL : thd, 0))) { break; + } if (!try || (try == sendat)) { /* send the request (resending periodically for UDP) */ - if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) + if ((error = mbuf_copym(mreq, 0, MBUF_COPYALL, MBUF_WAITOK, &m))) { goto nfsmout; + } bzero(&msg, sizeof(msg)); if ((sotype == SOCK_DGRAM) && !sock_isconnected(so)) { msg.msg_name = saddr; msg.msg_namelen = saddr->sa_len; } - if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) + if ((error = sock_sendmbuf(so, &msg, m, 0, &sentlen))) { goto nfsmout; + } sendat *= 2; - if (sendat > 30) + if (sendat > 30) { sendat = 30; + } } /* wait for the response */ if (sotype == SOCK_STREAM) { @@ -5263,20 +5597,22 @@ nfs_aux_request( error = nfs_rpc_record_read(so, &nrrs, 0, &recv, &mrep); /* if we don't have the whole record yet, we'll keep trying */ } else { - readlen = 1<<18; + readlen = 1 << 18; bzero(&msg, sizeof(msg)); error = sock_receivembuf(so, &msg, &mrep, 0, &readlen); } - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { continue; + } nfsmout_if(error); /* parse the response */ nfsm_chain_dissect_init(error, nmrep, mrep); nfsm_chain_get_32(error, nmrep, rxid); nfsm_chain_get_32(error, nmrep, reply); nfsmout_if(error); - if ((rxid != xid) || (reply != RPC_REPLY)) + if ((rxid != xid) || (reply != RPC_REPLY)) { error = EBADRPC; + } nfsm_chain_get_32(error, nmrep, reply_status); nfsmout_if(error); if (reply_status == RPC_MSGDENIED) { @@ -5288,8 +5624,9 @@ nfs_aux_request( nfsm_chain_get_32(error, nmrep, verf_type); /* verifier flavor */ nfsm_chain_get_32(error, nmrep, verf_len); /* verifier length */ nfsmout_if(error); - if (verf_len) + if (verf_len) { nfsm_chain_adv(error, nmrep, nfsm_rndup(verf_len)); + } nfsm_chain_get_32(error, nmrep, accepted_status); nfsmout_if(error); switch (accepted_status) { @@ -5325,7 +5662,7 @@ nfsmout: sock_close(newso); } mbuf_freem(mreq); - return (error); + return error; } int @@ -5349,7 +5686,7 @@ nfs_portmap_lookup( uint32_t ualen = 0; uint32_t port; uint64_t xid = 0; - char uaddr[MAX_IPv6_STR_LEN+16]; + char uaddr[MAX_IPv6_STR_LEN + 16]; bcopy(sa, saddr, min(sizeof(ss), sa->sa_len)); if (saddr->sa_family == AF_INET) { @@ -5363,52 +5700,56 @@ nfs_portmap_lookup( pmvers = RPCBVERS4; pmproc = RPCBPROC_GETVERSADDR; } else { - return (EINVAL); + return EINVAL; } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); tryagain: /* send portmapper request to get port/uaddr */ - if (ip == 4) + if (ip == 4) { ((struct sockaddr_in*)saddr)->sin_port = htons(PMAPPORT); - else + } else { ((struct sockaddr_in6*)saddr)->sin6_port = htons(PMAPPORT); - nfsm_chain_build_alloc_init(error, &nmreq, 8*NFSX_UNSIGNED); + } + nfsm_chain_build_alloc_init(error, &nmreq, 8 * NFSX_UNSIGNED); nfsm_chain_add_32(error, &nmreq, protocol); nfsm_chain_add_32(error, &nmreq, vers); if (ip == 4) { nfsm_chain_add_32(error, &nmreq, ipproto); nfsm_chain_add_32(error, &nmreq, 0); } else { - if (ipproto == IPPROTO_TCP) + if (ipproto == IPPROTO_TCP) { nfsm_chain_add_string(error, &nmreq, "tcp6", 4); - else + } else { nfsm_chain_add_string(error, &nmreq, "udp6", 4); + } nfsm_chain_add_string(error, &nmreq, "", 0); /* uaddr */ nfsm_chain_add_string(error, &nmreq, "", 0); /* owner */ } nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfsm_rpchead2(nmp, (ipproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, - pmprog, pmvers, pmproc, RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, - &xid, &mreq); + pmprog, pmvers, pmproc, RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, + &xid, &mreq); nfsmout_if(error); nmreq.nmc_mhead = NULL; error = nfs_aux_request(nmp, thd, saddr, so, (ipproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, - mreq, R_XID32(xid), 0, timeo, &nmrep); + mreq, R_XID32(xid), 0, timeo, &nmrep); /* grab port from portmap response */ if (ip == 4) { nfsm_chain_get_32(error, &nmrep, port); - if (!error) + if (!error) { ((struct sockaddr_in*)sa)->sin_port = htons(port); + } } else { /* get uaddr string and convert to sockaddr */ nfsm_chain_get_32(error, &nmrep, ualen); if (!error) { - if (ualen > (sizeof(uaddr)-1)) + if (ualen > (sizeof(uaddr) - 1)) { error = EIO; + } if (ualen < 1) { /* program is not available, just return a zero port */ bcopy(sa, saddr, min(sizeof(ss), sa->sa_len)); @@ -5417,8 +5758,9 @@ tryagain: nfsm_chain_get_opaque(error, &nmrep, ualen, uaddr); if (!error) { uaddr[ualen] = '\0'; - if (!nfs_uaddr2sockaddr(uaddr, saddr)) + if (!nfs_uaddr2sockaddr(uaddr, saddr)) { error = EIO; + } } } } @@ -5436,47 +5778,50 @@ tryagain: goto tryagain; } } - if (!error) + if (!error) { bcopy(saddr, sa, min(saddr->sa_len, sa->sa_len)); + } } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int nfs_msg(thread_t thd, - const char *server, - const char *msg, - int error) + const char *server, + const char *msg, + int error) { proc_t p = thd ? get_bsdthreadtask_info(thd) : NULL; tpr_t tpr; - if (p) + if (p) { tpr = tprintf_open(p); - else + } else { tpr = NULL; - if (error) + } + if (error) { tprintf(tpr, "nfs server %s: %s, error %d\n", server, msg, error); - else + } else { tprintf(tpr, "nfs server %s: %s\n", server, msg); + } tprintf_close(tpr); - return (0); + return 0; } -#define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */ -#define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */ -#define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */ -#define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */ -#define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */ +#define NFS_SQUISH_MOBILE_ONLY 0x0001 /* Squish mounts only on mobile machines */ +#define NFS_SQUISH_AUTOMOUNTED_ONLY 0x0002 /* Squish mounts only if the are automounted */ +#define NFS_SQUISH_SOFT 0x0004 /* Treat all soft mounts as though they were on a mobile machine */ +#define NFS_SQUISH_QUICK 0x0008 /* Try to squish mounts more quickly. */ +#define NFS_SQUISH_SHUTDOWN 0x1000 /* Squish all mounts on shutdown. Currently not implemented */ uint32_t nfs_squishy_flags = NFS_SQUISH_MOBILE_ONLY | NFS_SQUISH_AUTOMOUNTED_ONLY | NFS_SQUISH_QUICK; int32_t nfs_is_mobile; -#define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */ -#define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/ +#define NFS_SQUISHY_DEADTIMEOUT 8 /* Dead time out for squishy mounts */ +#define NFS_SQUISHY_QUICKTIMEOUT 4 /* Quicker dead time out when nfs_squish_flags NFS_SQUISH_QUICK bit is set*/ /* * Could this mount be squished? @@ -5487,13 +5832,15 @@ nfs_can_squish(struct nfsmount *nmp) uint64_t flags = vfs_flags(nmp->nm_mountp); int softsquish = ((nfs_squishy_flags & NFS_SQUISH_SOFT) & NMFLAG(nmp, SOFT)); - if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) - return (0); + if (!softsquish && (nfs_squishy_flags & NFS_SQUISH_MOBILE_ONLY) && nfs_is_mobile == 0) { + return 0; + } - if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) - return (0); + if ((nfs_squishy_flags & NFS_SQUISH_AUTOMOUNTED_ONLY) && (flags & MNT_AUTOMOUNTED) == 0) { + return 0; + } - return (1); + return 1; } /* @@ -5535,38 +5882,41 @@ nfs_is_squishy(struct nfsmount *nmp) int timeo = (nfs_squishy_flags & NFS_SQUISH_QUICK) ? NFS_SQUISHY_QUICKTIMEOUT : NFS_SQUISHY_DEADTIMEOUT; NFS_SOCK_DBG("%s: nm_curdeadtimeout = %d, nfs_is_mobile = %d\n", - vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile); + vfs_statfs(mp)->f_mntfromname, nmp->nm_curdeadtimeout, nfs_is_mobile); - if (!nfs_can_squish(nmp)) + if (!nfs_can_squish(nmp)) { goto out; + } - timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout/8, timeo) : timeo; + timeo = (nmp->nm_deadtimeout > timeo) ? max(nmp->nm_deadtimeout / 8, timeo) : timeo; NFS_SOCK_DBG("nm_writers = %d nm_mappers = %d timeo = %d\n", nmp->nm_writers, nmp->nm_mappers, timeo); if (nmp->nm_writers == 0 && nmp->nm_mappers == 0) { uint64_t flags = mp ? vfs_flags(mp) : 0; squishy = 1; - - /* - * Walk the nfs nodes and check for dirty buffers it we're not + + /* + * Walk the nfs nodes and check for dirty buffers it we're not * RDONLY and we've not already been declared as squishy since * this can be a bit expensive. */ - if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) + if (!(flags & MNT_RDONLY) && !(nmp->nm_state & NFSSTA_SQUISHY)) { squishy = !nfs_mount_is_dirty(mp); + } } out: - if (squishy) + if (squishy) { nmp->nm_state |= NFSSTA_SQUISHY; - else + } else { nmp->nm_state &= ~NFSSTA_SQUISHY; + } nmp->nm_curdeadtimeout = squishy ? timeo : nmp->nm_deadtimeout; - + NFS_SOCK_DBG("nm_curdeadtimeout = %d\n", nmp->nm_curdeadtimeout); - return (squishy); + return squishy; } /* @@ -5582,13 +5932,13 @@ nfs_is_dead(int error, struct nfsmount *nmp) lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_state & NFSSTA_DEAD) { lck_mtx_unlock(&nmp->nm_lock); - return (1); + return 1; } if ((error != ENETUNREACH && error != EHOSTUNREACH && error != EADDRNOTAVAIL) || !(nmp->nm_locations.nl_numlocs == 1 && nmp->nm_locations.nl_locations[0]->nl_servcount == 1)) { lck_mtx_unlock(&nmp->nm_lock); - return (0); + return 0; } if ((nfs_squishy_flags & NFS_SQUISH_QUICK) && nfs_is_squishy(nmp)) { @@ -5597,10 +5947,10 @@ nfs_is_dead(int error, struct nfsmount *nmp) lck_mtx_unlock(&nmp->nm_lock); nfs_mount_zombie(nmp, NFSSTA_DEAD); vfs_event_signal(&fsid, VQ_DEAD, 0); - return (1); + return 1; } lck_mtx_unlock(&nmp->nm_lock); - return (0); + return 0; } /* @@ -5617,7 +5967,7 @@ nfs_use_cache(struct nfsmount *nmp) * a timeout? */ int cache_ok = (nfs_is_mobile || NMFLAG(nmp, SOFT) || - nfs_can_squish(nmp) || nmp->nm_deadtimeout); + nfs_can_squish(nmp) || nmp->nm_deadtimeout); int timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; @@ -5626,7 +5976,7 @@ nfs_use_cache(struct nfsmount *nmp) * return 1 to not get things out of the cache. */ - return ((nmp->nm_state & timeoutmask) && cache_ok); + return (nmp->nm_state & timeoutmask) && cache_ok; } /* @@ -5644,25 +5994,30 @@ nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *m uint32_t do_vfs_signal = 0; struct timeval now; - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { return; + } lck_mtx_lock(&nmp->nm_lock); timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; - if (NMFLAG(nmp, MUTEJUKEBOX)) /* jukebox timeouts don't count as unresponsive if muted */ - timeoutmask &= ~NFSSTA_JUKEBOXTIMEO; + if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */ + timeoutmask &= ~NFSSTA_JUKEBOXTIMEO; + } wasunresponsive = (nmp->nm_state & timeoutmask); /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */ softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); - if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) + if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) { nmp->nm_state |= NFSSTA_TIMEO; - if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) + } + if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) { nmp->nm_state |= NFSSTA_LOCKTIMEO; - if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) + } + if ((flags & NFSSTA_JUKEBOXTIMEO) && !(nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) { nmp->nm_state |= NFSSTA_JUKEBOXTIMEO; + } unresponsive = (nmp->nm_state & timeoutmask); @@ -5674,22 +6029,25 @@ nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *m nmp->nm_deadto_start = now.tv_sec; nfs_mount_sock_thread_wake(nmp); } else if ((now.tv_sec - nmp->nm_deadto_start) > nmp->nm_curdeadtimeout && !holding_state) { - if (!(nmp->nm_state & NFSSTA_DEAD)) + if (!(nmp->nm_state & NFSSTA_DEAD)) { printf("nfs server %s: %sdead\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname, - (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : ""); + (nmp->nm_curdeadtimeout != nmp->nm_deadtimeout) ? "squished " : ""); + } do_vfs_signal = VQ_DEAD; } } lck_mtx_unlock(&nmp->nm_lock); - if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) + if (do_vfs_signal == VQ_DEAD && !(nmp->nm_state & NFSSTA_DEAD)) { nfs_mount_zombie(nmp, NFSSTA_DEAD); - else if (softnobrowse || wasunresponsive || !unresponsive) + } else if (softnobrowse || wasunresponsive || !unresponsive) { do_vfs_signal = 0; - else + } else { do_vfs_signal = VQ_NOTRESP; - if (do_vfs_signal) + } + if (do_vfs_signal) { vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, do_vfs_signal, 0); + } nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error); } @@ -5700,28 +6058,34 @@ nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg) int timeoutmask, wasunresponsive, unresponsive, softnobrowse; int do_vfs_signal; - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { return; + } - if (msg) + if (msg) { nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, 0); + } lck_mtx_lock(&nmp->nm_lock); timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; - if (NMFLAG(nmp, MUTEJUKEBOX)) /* jukebox timeouts don't count as unresponsive if muted */ - timeoutmask &= ~NFSSTA_JUKEBOXTIMEO; + if (NMFLAG(nmp, MUTEJUKEBOX)) { /* jukebox timeouts don't count as unresponsive if muted */ + timeoutmask &= ~NFSSTA_JUKEBOXTIMEO; + } wasunresponsive = (nmp->nm_state & timeoutmask); /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */ softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); - if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) + if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) { nmp->nm_state &= ~NFSSTA_TIMEO; - if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) + } + if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) { nmp->nm_state &= ~NFSSTA_LOCKTIMEO; - if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) + } + if ((flags & NFSSTA_JUKEBOXTIMEO) && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO)) { nmp->nm_state &= ~NFSSTA_JUKEBOXTIMEO; + } unresponsive = (nmp->nm_state & timeoutmask); @@ -5730,12 +6094,14 @@ nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg) nmp->nm_state &= ~NFSSTA_SQUISHY; lck_mtx_unlock(&nmp->nm_lock); - if (softnobrowse) + if (softnobrowse) { do_vfs_signal = 0; - else + } else { do_vfs_signal = (wasunresponsive && !unresponsive); - if (do_vfs_signal) + } + if (do_vfs_signal) { vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1); + } } @@ -5760,8 +6126,9 @@ nfsrv_rephead( int err, error; err = nd->nd_repstat; - if (err && (nd->nd_vers == NFS_VER2)) + if (err && (nd->nd_vers == NFS_VER2)) { siz = 0; + } /* * If this is a big reply, use a cluster else @@ -5776,12 +6143,12 @@ nfsrv_rephead( if (error) { /* unable to allocate packet */ /* XXX should we keep statistics for these errors? */ - return (error); + return error; } if (siz < nfs_mbuf_minclsize) { /* leave space for lower level headers */ tl = mbuf_data(mrep); - tl += 80/sizeof(*tl); /* XXX max_hdr? XXX */ + tl += 80 / sizeof(*tl); /* XXX max_hdr? XXX */ mbuf_setdata(mrep, tl, 6 * NFSX_UNSIGNED); } nfsm_chain_init(&nmrep, mrep); @@ -5831,11 +6198,13 @@ nfsrv_rephead( break; default: nfsm_chain_add_32(error, &nmrep, RPC_SUCCESS); - if (nd->nd_gss_context != NULL) + if (nd->nd_gss_context != NULL) { error = nfs_gss_svc_prepare_reply(nd, &nmrep); - if (err != NFSERR_RETVOID) + } + if (err != NFSERR_RETVOID) { nfsm_chain_add_32(error, &nmrep, - (err ? nfsrv_errmap(nd, err) : 0)); + (err ? nfsrv_errmap(nd, err) : 0)); + } break; } } @@ -5846,13 +6215,14 @@ done: /* error composing reply header */ /* XXX should we keep statistics for these errors? */ mbuf_freem(mrep); - return (error); + return error; } *nmrepp = nmrep; - if ((err != 0) && (err != NFSERR_RETVOID)) + if ((err != 0) && (err != NFSERR_RETVOID)) { OSAddAtomic64(1, &nfsstats.srvrpc_errs); - return (0); + } + return 0; } /* @@ -5878,19 +6248,21 @@ nfsrv_send(struct nfsrv_sock *slp, mbuf_t nam, mbuf_t top) } } error = sock_sendmbuf(so, &msg, top, 0, NULL); - if (!error) - return (0); + if (!error) { + return 0; + } log(LOG_INFO, "nfsd send error %d\n", error); - if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) + if ((error == EWOULDBLOCK) && (slp->ns_sotype == SOCK_STREAM)) { error = EPIPE; /* zap TCP sockets if they time out on send */ - + } /* Handle any recoverable (soft) socket errors here. (???) */ if (error != EINTR && error != ERESTART && error != EIO && - error != EWOULDBLOCK && error != EPIPE) + error != EWOULDBLOCK && error != EPIPE) { error = 0; + } - return (error); + return error; } /* @@ -5904,8 +6276,9 @@ nfsrv_rcv(socket_t so, void *arg, int waitflag) { struct nfsrv_sock *slp = arg; - if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) + if (!nfsd_thread_count || !(slp->ns_flag & SLP_VALID)) { return; + } lck_rw_lock_exclusive(&slp->ns_rwlock); nfsrv_rcv_locked(so, slp, waitflag); @@ -5915,13 +6288,14 @@ void nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag) { mbuf_t m, mp, mhck, m2; - int ns_flag=0, error; - struct msghdr msg; + int ns_flag = 0, error; + struct msghdr msg; size_t bytes_read; if ((slp->ns_flag & SLP_VALID) == 0) { - if (waitflag == MBUF_DONTWAIT) + if (waitflag == MBUF_DONTWAIT) { lck_rw_done(&slp->ns_rwlock); + } return; } @@ -5951,23 +6325,26 @@ nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag) bytes_read = 1000000000; error = sock_receivembuf(so, NULL, &mp, MSG_DONTWAIT, &bytes_read); if (error || mp == NULL) { - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { ns_flag = (waitflag == MBUF_DONTWAIT) ? SLP_NEEDQ : 0; - else + } else { ns_flag = SLP_DISCONN; + } goto dorecs; } m = mp; if (slp->ns_rawend) { - if ((error = mbuf_setnext(slp->ns_rawend, m))) + if ((error = mbuf_setnext(slp->ns_rawend, m))) { panic("nfsrv_rcv: mbuf_setnext failed %d\n", error); + } slp->ns_cc += bytes_read; } else { slp->ns_raw = m; slp->ns_cc = bytes_read; } - while ((m2 = mbuf_next(m))) + while ((m2 = mbuf_next(m))) { m = m2; + } slp->ns_rawend = m; /* @@ -5975,13 +6352,14 @@ nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag) */ error = nfsrv_getstream(slp, waitflag); if (error) { - if (error == EPERM) + if (error == EPERM) { ns_flag = SLP_DISCONN; - else + } else { ns_flag = SLP_NEEDQ; + } } } else { - struct sockaddr_storage nam; + struct sockaddr_storage nam; if (slp->ns_reccnt >= nfsrv_sock_max_rec_queue_length) { /* already have max # RPC records queued on this socket */ @@ -6010,9 +6388,9 @@ nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag) } else { m = mp; } - if (slp->ns_recend) + if (slp->ns_recend) { mbuf_setnextpkt(slp->ns_recend, m); - else { + } else { slp->ns_rec = m; slp->ns_flag |= SLP_DOREC; } @@ -6027,8 +6405,9 @@ nfsrv_rcv_locked(socket_t so, struct nfsrv_sock *slp, int waitflag) * Now try and process the request records, non-blocking. */ dorecs: - if (ns_flag) + if (ns_flag) { slp->ns_flag |= ns_flag; + } if (waitflag == MBUF_DONTWAIT) { int wake = (slp->ns_flag & SLP_WORKTODO); lck_rw_done(&slp->ns_rwlock); @@ -6054,139 +6433,143 @@ nfsrv_getstream(struct nfsrv_sock *slp, int waitflag) mbuf_t om, m2, recm; u_int32_t recmark; - if (slp->ns_flag & SLP_GETSTREAM) + if (slp->ns_flag & SLP_GETSTREAM) { panic("nfs getstream"); + } slp->ns_flag |= SLP_GETSTREAM; for (;;) { - if (slp->ns_reclen == 0) { - if (slp->ns_cc < NFSX_UNSIGNED) { - slp->ns_flag &= ~SLP_GETSTREAM; - return (0); - } - m = slp->ns_raw; - mdata = mbuf_data(m); - mlen = mbuf_len(m); - if (mlen >= NFSX_UNSIGNED) { - bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED); - mdata += NFSX_UNSIGNED; - mlen -= NFSX_UNSIGNED; - mbuf_setdata(m, mdata, mlen); - } else { - cp1 = (caddr_t)&recmark; - cp2 = mdata; - while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { - while (mlen == 0) { - m = mbuf_next(m); - cp2 = mbuf_data(m); - mlen = mbuf_len(m); - } - *cp1++ = *cp2++; - mlen--; - mbuf_setdata(m, cp2, mlen); - } - } - slp->ns_cc -= NFSX_UNSIGNED; - recmark = ntohl(recmark); - slp->ns_reclen = recmark & ~0x80000000; - if (recmark & 0x80000000) - slp->ns_flag |= SLP_LASTFRAG; - else - slp->ns_flag &= ~SLP_LASTFRAG; - if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) { - slp->ns_flag &= ~SLP_GETSTREAM; - return (EPERM); - } - } - - /* - * Now get the record part. - * - * Note that slp->ns_reclen may be 0. Linux sometimes - * generates 0-length RPCs - */ - recm = NULL; - if (slp->ns_cc == slp->ns_reclen) { - recm = slp->ns_raw; - slp->ns_raw = slp->ns_rawend = NULL; - slp->ns_cc = slp->ns_reclen = 0; - } else if (slp->ns_cc > slp->ns_reclen) { - len = 0; - m = slp->ns_raw; - mlen = mbuf_len(m); - mdata = mbuf_data(m); - om = NULL; - while (len < slp->ns_reclen) { - if ((len + mlen) > slp->ns_reclen) { - if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) { - slp->ns_flag &= ~SLP_GETSTREAM; - return (EWOULDBLOCK); + if (slp->ns_reclen == 0) { + if (slp->ns_cc < NFSX_UNSIGNED) { + slp->ns_flag &= ~SLP_GETSTREAM; + return 0; + } + m = slp->ns_raw; + mdata = mbuf_data(m); + mlen = mbuf_len(m); + if (mlen >= NFSX_UNSIGNED) { + bcopy(mdata, (caddr_t)&recmark, NFSX_UNSIGNED); + mdata += NFSX_UNSIGNED; + mlen -= NFSX_UNSIGNED; + mbuf_setdata(m, mdata, mlen); + } else { + cp1 = (caddr_t)&recmark; + cp2 = mdata; + while (cp1 < ((caddr_t)&recmark) + NFSX_UNSIGNED) { + while (mlen == 0) { + m = mbuf_next(m); + cp2 = mbuf_data(m); + mlen = mbuf_len(m); + } + *cp1++ = *cp2++; + mlen--; + mbuf_setdata(m, cp2, mlen); } - if (om) { - if (mbuf_setnext(om, m2)) { - /* trouble... just drop it */ - printf("nfsrv_getstream: mbuf_setnext failed\n"); - mbuf_freem(m2); + } + slp->ns_cc -= NFSX_UNSIGNED; + recmark = ntohl(recmark); + slp->ns_reclen = recmark & ~0x80000000; + if (recmark & 0x80000000) { + slp->ns_flag |= SLP_LASTFRAG; + } else { + slp->ns_flag &= ~SLP_LASTFRAG; + } + if (slp->ns_reclen <= 0 || slp->ns_reclen > NFS_MAXPACKET) { + slp->ns_flag &= ~SLP_GETSTREAM; + return EPERM; + } + } + + /* + * Now get the record part. + * + * Note that slp->ns_reclen may be 0. Linux sometimes + * generates 0-length RPCs + */ + recm = NULL; + if (slp->ns_cc == slp->ns_reclen) { + recm = slp->ns_raw; + slp->ns_raw = slp->ns_rawend = NULL; + slp->ns_cc = slp->ns_reclen = 0; + } else if (slp->ns_cc > slp->ns_reclen) { + len = 0; + m = slp->ns_raw; + mlen = mbuf_len(m); + mdata = mbuf_data(m); + om = NULL; + while (len < slp->ns_reclen) { + if ((len + mlen) > slp->ns_reclen) { + if (mbuf_copym(m, 0, slp->ns_reclen - len, waitflag, &m2)) { slp->ns_flag &= ~SLP_GETSTREAM; - return (EWOULDBLOCK); + return EWOULDBLOCK; + } + if (om) { + if (mbuf_setnext(om, m2)) { + /* trouble... just drop it */ + printf("nfsrv_getstream: mbuf_setnext failed\n"); + mbuf_freem(m2); + slp->ns_flag &= ~SLP_GETSTREAM; + return EWOULDBLOCK; + } + recm = slp->ns_raw; + } else { + recm = m2; } + mdata += slp->ns_reclen - len; + mlen -= slp->ns_reclen - len; + mbuf_setdata(m, mdata, mlen); + len = slp->ns_reclen; + } else if ((len + mlen) == slp->ns_reclen) { + om = m; + len += mlen; + m = mbuf_next(m); recm = slp->ns_raw; + if (mbuf_setnext(om, NULL)) { + printf("nfsrv_getstream: mbuf_setnext failed 2\n"); + slp->ns_flag &= ~SLP_GETSTREAM; + return EWOULDBLOCK; + } + mlen = mbuf_len(m); + mdata = mbuf_data(m); } else { - recm = m2; - } - mdata += slp->ns_reclen - len; - mlen -= slp->ns_reclen - len; - mbuf_setdata(m, mdata, mlen); - len = slp->ns_reclen; - } else if ((len + mlen) == slp->ns_reclen) { - om = m; - len += mlen; - m = mbuf_next(m); - recm = slp->ns_raw; - if (mbuf_setnext(om, NULL)) { - printf("nfsrv_getstream: mbuf_setnext failed 2\n"); - slp->ns_flag &= ~SLP_GETSTREAM; - return (EWOULDBLOCK); + om = m; + len += mlen; + m = mbuf_next(m); + mlen = mbuf_len(m); + mdata = mbuf_data(m); } - mlen = mbuf_len(m); - mdata = mbuf_data(m); + } + slp->ns_raw = m; + slp->ns_cc -= len; + slp->ns_reclen = 0; + } else { + slp->ns_flag &= ~SLP_GETSTREAM; + return 0; + } + + /* + * Accumulate the fragments into a record. + */ + if (slp->ns_frag == NULL) { + slp->ns_frag = recm; + } else { + m = slp->ns_frag; + while ((m2 = mbuf_next(m))) { + m = m2; + } + if ((error = mbuf_setnext(m, recm))) { + panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error); + } + } + if (slp->ns_flag & SLP_LASTFRAG) { + if (slp->ns_recend) { + mbuf_setnextpkt(slp->ns_recend, slp->ns_frag); } else { - om = m; - len += mlen; - m = mbuf_next(m); - mlen = mbuf_len(m); - mdata = mbuf_data(m); - } - } - slp->ns_raw = m; - slp->ns_cc -= len; - slp->ns_reclen = 0; - } else { - slp->ns_flag &= ~SLP_GETSTREAM; - return (0); - } - - /* - * Accumulate the fragments into a record. - */ - if (slp->ns_frag == NULL) { - slp->ns_frag = recm; - } else { - m = slp->ns_frag; - while ((m2 = mbuf_next(m))) - m = m2; - if ((error = mbuf_setnext(m, recm))) - panic("nfsrv_getstream: mbuf_setnext failed 3, %d\n", error); - } - if (slp->ns_flag & SLP_LASTFRAG) { - if (slp->ns_recend) - mbuf_setnextpkt(slp->ns_recend, slp->ns_frag); - else { - slp->ns_rec = slp->ns_frag; - slp->ns_flag |= SLP_DOREC; - } - slp->ns_recend = slp->ns_frag; - slp->ns_frag = NULL; - } + slp->ns_rec = slp->ns_frag; + slp->ns_flag |= SLP_DOREC; + } + slp->ns_recend = slp->ns_frag; + slp->ns_frag = NULL; + } } } @@ -6205,17 +6588,19 @@ nfsrv_dorec( int error = 0; *ndp = NULL; - if (!(slp->ns_flag & (SLP_VALID|SLP_DOREC)) || (slp->ns_rec == NULL)) - return (ENOBUFS); + if (!(slp->ns_flag & (SLP_VALID | SLP_DOREC)) || (slp->ns_rec == NULL)) { + return ENOBUFS; + } MALLOC_ZONE(nd, struct nfsrv_descript *, - sizeof (struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); - if (!nd) - return (ENOMEM); + sizeof(struct nfsrv_descript), M_NFSRVDESC, M_WAITOK); + if (!nd) { + return ENOMEM; + } m = slp->ns_rec; slp->ns_rec = mbuf_nextpkt(m); - if (slp->ns_rec) + if (slp->ns_rec) { mbuf_setnextpkt(m, NULL); - else { + } else { slp->ns_flag &= ~SLP_DOREC; slp->ns_recend = NULL; } @@ -6223,26 +6608,31 @@ nfsrv_dorec( if (mbuf_type(m) == MBUF_TYPE_SONAME) { nam = m; m = mbuf_next(m); - if ((error = mbuf_setnext(nam, NULL))) + if ((error = mbuf_setnext(nam, NULL))) { panic("nfsrv_dorec: mbuf_setnext failed %d\n", error); - } else + } + } else { nam = NULL; + } nd->nd_nam2 = nam; nfsm_chain_dissect_init(error, &nd->nd_nmreq, m); - if (!error) + if (!error) { error = nfsrv_getreq(nd); + } if (error) { - if (nam) + if (nam) { mbuf_freem(nam); - if (nd->nd_gss_context) + } + if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); + } FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); - return (error); + return error; } nd->nd_mrep = NULL; *ndp = nd; nfsd->nfsd_nd = nd; - return (0); + return 0; } /* @@ -6271,61 +6661,66 @@ nfsrv_getreq(struct nfsrv_descript *nd) val = auth_type = len = 0; nmreq = &nd->nd_nmreq; - nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID - nfsm_chain_get_32(error, nmreq, val); // RPC Call - if (!error && (val != RPC_CALL)) + nfsm_chain_get_32(error, nmreq, nd->nd_retxid); // XID + nfsm_chain_get_32(error, nmreq, val); // RPC Call + if (!error && (val != RPC_CALL)) { error = EBADRPC; + } nfsmout_if(error); nd->nd_repstat = 0; - nfsm_chain_get_32(error, nmreq, val); // RPC Version + nfsm_chain_get_32(error, nmreq, val); // RPC Version nfsmout_if(error); if (val != RPC_VER2) { nd->nd_repstat = ERPCMISMATCH; nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } - nfsm_chain_get_32(error, nmreq, val); // RPC Program Number + nfsm_chain_get_32(error, nmreq, val); // RPC Program Number nfsmout_if(error); if (val != NFS_PROG) { nd->nd_repstat = EPROGUNAVAIL; nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } nfsm_chain_get_32(error, nmreq, nfsvers);// NFS Version Number nfsmout_if(error); if ((nfsvers < NFS_VER2) || (nfsvers > NFS_VER3)) { nd->nd_repstat = EPROGMISMATCH; nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } nd->nd_vers = nfsvers; nfsm_chain_get_32(error, nmreq, nd->nd_procnum);// NFS Procedure Number nfsmout_if(error); if ((nd->nd_procnum >= NFS_NPROCS) || - ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) { + ((nd->nd_vers == NFS_VER2) && (nd->nd_procnum > NFSV2PROC_STATFS))) { nd->nd_repstat = EPROCUNAVAIL; nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } - if (nfsvers != NFS_VER3) + if (nfsvers != NFS_VER3) { nd->nd_procnum = nfsv3_procid[nd->nd_procnum]; - nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor - nfsm_chain_get_32(error, nmreq, len); // Auth Length - if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) + } + nfsm_chain_get_32(error, nmreq, auth_type); // Auth Flavor + nfsm_chain_get_32(error, nmreq, len); // Auth Length + if (!error && (len < 0 || len > RPCAUTH_MAXSIZ)) { error = EBADRPC; + } nfsmout_if(error); /* Handle authentication */ if (auth_type == RPCAUTH_SYS) { struct posix_cred temp_pcred; - if (nd->nd_procnum == NFSPROC_NULL) - return (0); + if (nd->nd_procnum == NFSPROC_NULL) { + return 0; + } nd->nd_sec = RPCAUTH_SYS; - nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp - nfsm_chain_get_32(error, nmreq, len); // hostname length - if (len < 0 || len > NFS_MAXNAMLEN) + nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // skip stamp + nfsm_chain_get_32(error, nmreq, len); // hostname length + if (len < 0 || len > NFS_MAXNAMLEN) { error = EBADRPC; - nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname + } + nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); // skip hostname nfsmout_if(error); /* create a temporary credential using the bits from the wire */ @@ -6333,25 +6728,31 @@ nfsrv_getreq(struct nfsrv_descript *nd) nfsm_chain_get_32(error, nmreq, user_id); nfsm_chain_get_32(error, nmreq, group_id); temp_pcred.cr_groups[0] = group_id; - nfsm_chain_get_32(error, nmreq, len); // extra GID count - if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) + nfsm_chain_get_32(error, nmreq, len); // extra GID count + if ((len < 0) || (len > RPCAUTH_UNIXGIDS)) { error = EBADRPC; + } nfsmout_if(error); - for (i = 1; i <= len; i++) - if (i < NGROUPS) + for (i = 1; i <= len; i++) { + if (i < NGROUPS) { nfsm_chain_get_32(error, nmreq, temp_pcred.cr_groups[i]); - else + } else { nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); + } + } nfsmout_if(error); ngroups = (len >= NGROUPS) ? NGROUPS : (len + 1); - if (ngroups > 1) + if (ngroups > 1) { nfsrv_group_sort(&temp_pcred.cr_groups[0], ngroups); - nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE) - nfsm_chain_get_32(error, nmreq, len); // verifier length - if (len < 0 || len > RPCAUTH_MAXSIZ) + } + nfsm_chain_adv(error, nmreq, NFSX_UNSIGNED); // verifier flavor (should be AUTH_NONE) + nfsm_chain_get_32(error, nmreq, len); // verifier length + if (len < 0 || len > RPCAUTH_MAXSIZ) { error = EBADRPC; - if (len > 0) + } + if (len > 0) { nfsm_chain_adv(error, nmreq, nfsm_rndup(len)); + } /* request creation of a real credential */ temp_pcred.cr_uid = user_id; @@ -6360,30 +6761,33 @@ nfsrv_getreq(struct nfsrv_descript *nd) if (nd->nd_cr == NULL) { nd->nd_repstat = ENOMEM; nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } } else if (auth_type == RPCSEC_GSS) { error = nfs_gss_svc_cred_get(nd, nmreq); if (error) { - if (error == EINVAL) - goto nfsmout; // drop the request + if (error == EINVAL) { + goto nfsmout; // drop the request + } nd->nd_repstat = error; nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } } else { - if (nd->nd_procnum == NFSPROC_NULL) // assume it's AUTH_NONE - return (0); + if (nd->nd_procnum == NFSPROC_NULL) { // assume it's AUTH_NONE + return 0; + } nd->nd_repstat = (NFSERR_AUTHERR | AUTH_REJECTCRED); nd->nd_procnum = NFSPROC_NOOP; - return (0); + return 0; } - return (0); + return 0; nfsmout: - if (IS_VALID_CRED(nd->nd_cr)) + if (IS_VALID_CRED(nd->nd_cr)) { kauth_cred_unref(&nd->nd_cr); + } nfsm_chain_cleanup(nmreq); - return (error); + return error; } /* @@ -6397,8 +6801,9 @@ nfsrv_wakenfsd(struct nfsrv_sock *slp) { struct nfsd *nd; - if ((slp->ns_flag & SLP_VALID) == 0) + if ((slp->ns_flag & SLP_VALID) == 0) { return; + } lck_rw_lock_exclusive(&slp->ns_rwlock); /* if there's work to do on this socket, make sure it's queued up */ @@ -6410,8 +6815,9 @@ nfsrv_wakenfsd(struct nfsrv_sock *slp) /* wake up a waiting nfsd, if possible */ nd = TAILQ_FIRST(&nfsd_queue); - if (!nd) + if (!nd) { return; + } TAILQ_REMOVE(&nfsd_queue, nd, nfsd_queue); nd->nfsd_flag &= ~NFSD_WAITING; diff --git a/bsd/nfs/nfs_srvcache.c b/bsd/nfs/nfs_srvcache.c index b0eb21d73..639cca075 100644 --- a/bsd/nfs/nfs_srvcache.c +++ b/bsd/nfs/nfs_srvcache.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -92,9 +92,9 @@ extern int nfsv2_procid[NFS_NPROCS]; static int nfsrv_reqcache_count; int nfsrv_reqcache_size = NFSRVCACHESIZ; -#define NFSRCHASH(xid) \ +#define NFSRCHASH(xid) \ (&nfsrv_reqcache_hashtbl[((xid) + ((xid) >> 24)) & nfsrv_reqcache_hash]) -LIST_HEAD(nfsrv_reqcache_hash, nfsrvcache) *nfsrv_reqcache_hashtbl; +LIST_HEAD(nfsrv_reqcache_hash, nfsrvcache) * nfsrv_reqcache_hashtbl; TAILQ_HEAD(nfsrv_reqcache_lru, nfsrvcache) nfsrv_reqcache_lruhead; u_long nfsrv_reqcache_hash; @@ -158,8 +158,9 @@ static int nfsv2_repstat[NFS_NPROCS] = { void nfsrv_initcache(void) { - if (nfsrv_reqcache_size <= 0) + if (nfsrv_reqcache_size <= 0) { return; + } lck_mtx_lock(nfsrv_reqcache_mutex); /* init nfs server request cache hash table */ @@ -189,17 +190,19 @@ netaddr_match( case AF_INET: inetaddr = mbuf_data(nam); if ((inetaddr->sin_family == AF_INET) && - (inetaddr->sin_addr.s_addr == haddr->had_inetaddr)) - return (1); + (inetaddr->sin_addr.s_addr == haddr->had_inetaddr)) { + return 1; + } break; case AF_INET6: inet6addr = mbuf_data(nam); if ((inet6addr->sin6_family == AF_INET6) && - !bcmp(&inet6addr->sin6_addr, &haddr->had_inet6addr, sizeof(inet6addr->sin6_addr))) - return (1); + !bcmp(&inet6addr->sin6_addr, &haddr->had_inet6addr, sizeof(inet6addr->sin6_addr))) { + return 1; + } break; } - return (0); + return 0; } /* @@ -231,17 +234,18 @@ nfsrv_getcache( * Don't cache recent requests for reliable transport protocols. * (Maybe we should for the case of a reconnect, but..) */ - if (!nd->nd_nam2) - return (RC_DOIT); + if (!nd->nd_nam2) { + return RC_DOIT; + } lck_mtx_lock(nfsrv_reqcache_mutex); loop: for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0; rp = rp->rc_hash.le_next) { - if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc && - netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) { + if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc && + netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) { if ((rp->rc_flag & RC_LOCKED) != 0) { rp->rc_flag |= RC_WANTED; - msleep(rp, nfsrv_reqcache_mutex, PZERO-1, "nfsrc", NULL); + msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL); goto loop; } rp->rc_flag |= RC_LOCKED; @@ -250,8 +254,9 @@ loop: TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru); TAILQ_INSERT_TAIL(&nfsrv_reqcache_lruhead, rp, rc_lru); } - if (rp->rc_state == RC_UNUSED) + if (rp->rc_state == RC_UNUSED) { panic("nfsrv cache"); + } if (rp->rc_state == RC_INPROG) { OSAddAtomic64(1, &nfsstats.srvcache_inproghits); ret = RC_DROPIT; @@ -287,7 +292,7 @@ loop: wakeup(rp); } lck_mtx_unlock(nfsrv_reqcache_mutex); - return (ret); + return ret; } } OSAddAtomic64(1, &nfsstats.srvcache_misses); @@ -309,20 +314,22 @@ loop: /* no entry to reuse? */ /* OK, we just won't be able to cache this request */ lck_mtx_unlock(nfsrv_reqcache_mutex); - return (RC_DOIT); + return RC_DOIT; } while ((rp->rc_flag & RC_LOCKED) != 0) { rp->rc_flag |= RC_WANTED; - msleep(rp, nfsrv_reqcache_mutex, PZERO-1, "nfsrc", NULL); + msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL); rp = nfsrv_reqcache_lruhead.tqh_first; } rp->rc_flag |= RC_LOCKED; LIST_REMOVE(rp, rc_hash); TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru); - if (rp->rc_flag & RC_REPMBUF) + if (rp->rc_flag & RC_REPMBUF) { mbuf_freem(rp->rc_reply); - if (rp->rc_flag & RC_NAM) + } + if (rp->rc_flag & RC_NAM) { mbuf_freem(rp->rc_nam); + } rp->rc_flag &= (RC_LOCKED | RC_WANTED); } TAILQ_INSERT_TAIL(&nfsrv_reqcache_lruhead, rp, rc_lru); @@ -341,12 +348,14 @@ loop: break; default: error = mbuf_copym(nd->nd_nam, 0, MBUF_COPYALL, MBUF_WAITOK, &rp->rc_nam); - if (error) + if (error) { printf("nfsrv cache: nam copym failed\n"); - else + } else { rp->rc_flag |= RC_NAM; + } break; - }; + } + ; rp->rc_proc = nd->nd_procnum; LIST_INSERT_HEAD(NFSRCHASH(nd->nd_retxid), rp, rc_hash); rp->rc_flag &= ~RC_LOCKED; @@ -355,7 +364,7 @@ loop: wakeup(rp); } lck_mtx_unlock(nfsrv_reqcache_mutex); - return (RC_DOIT); + return RC_DOIT; } /* @@ -370,31 +379,32 @@ nfsrv_updatecache( struct nfsrvcache *rp; int error; - if (!nd->nd_nam2) + if (!nd->nd_nam2) { return; + } lck_mtx_lock(nfsrv_reqcache_mutex); loop: for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0; rp = rp->rc_hash.le_next) { - if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc && - netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) { + if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc && + netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) { if ((rp->rc_flag & RC_LOCKED) != 0) { rp->rc_flag |= RC_WANTED; - msleep(rp, nfsrv_reqcache_mutex, PZERO-1, "nfsrc", NULL); + msleep(rp, nfsrv_reqcache_mutex, PZERO - 1, "nfsrc", NULL); goto loop; } rp->rc_flag |= RC_LOCKED; - if (rp->rc_state == RC_DONE) { - /* - * This can occur if the cache is too small. - * Retransmits of the same request aren't - * dropped so we may see the operation - * complete more then once. - */ - if (rp->rc_flag & RC_REPMBUF) { - mbuf_freem(rp->rc_reply); - rp->rc_flag &= ~RC_REPMBUF; - } + if (rp->rc_state == RC_DONE) { + /* + * This can occur if the cache is too small. + * Retransmits of the same request aren't + * dropped so we may see the operation + * complete more then once. + */ + if (rp->rc_flag & RC_REPMBUF) { + mbuf_freem(rp->rc_reply); + rp->rc_flag &= ~RC_REPMBUF; + } } rp->rc_state = RC_DONE; /* @@ -403,13 +413,14 @@ loop: */ if (repvalid && nonidempotent[nd->nd_procnum]) { if ((nd->nd_vers == NFS_VER2) && - nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) { + nfsv2_repstat[nfsv2_procid[nd->nd_procnum]]) { rp->rc_status = nd->nd_repstat; rp->rc_flag |= RC_REPSTATUS; } else { error = mbuf_copym(repmbuf, 0, MBUF_COPYALL, MBUF_WAITOK, &rp->rc_reply); - if (!error) + if (!error) { rp->rc_flag |= RC_REPMBUF; + } } } rp->rc_flag &= ~RC_LOCKED; diff --git a/bsd/nfs/nfs_subs.c b/bsd/nfs/nfs_subs.c index 0702fbed9..6b8cf9140 100644 --- a/bsd/nfs/nfs_subs.c +++ b/bsd/nfs/nfs_subs.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -118,7 +118,7 @@ /* * NFS globals */ -struct nfsstats __attribute__((aligned(8))) nfsstats; +struct nfsstats __attribute__((aligned(8))) nfsstats; size_t nfs_mbuf_mhlen = 0, nfs_mbuf_minclsize = 0; /* @@ -141,11 +141,13 @@ vtonfs_type(enum vtype vtype, int nfsvers) case VLNK: return NFLNK; case VSOCK: - if (nfsvers > NFS_VER2) + if (nfsvers > NFS_VER2) { return NFSOCK; + } case VFIFO: - if (nfsvers > NFS_VER2) + if (nfsvers > NFS_VER2) { return NFFIFO; + } case VBAD: case VSTR: case VCPLX: @@ -171,17 +173,21 @@ nfstov_type(nfstype nvtype, int nfsvers) case NFLNK: return VLNK; case NFSOCK: - if (nfsvers > NFS_VER2) + if (nfsvers > NFS_VER2) { return VSOCK; + } case NFFIFO: - if (nfsvers > NFS_VER2) + if (nfsvers > NFS_VER2) { return VFIFO; + } case NFATTRDIR: - if (nfsvers > NFS_VER3) + if (nfsvers > NFS_VER3) { return VDIR; + } case NFNAMEDATTR: - if (nfsvers > NFS_VER3) + if (nfsvers > NFS_VER3) { return VREG; + } default: return VNON; } @@ -303,17 +309,20 @@ nfsm_mbuf_get_list(size_t size, mbuf_t *mp, int *mbcnt) while (len < size) { nfsm_mbuf_get(error, &m, (size - len)); - if (error) + if (error) { break; - if (!mhead) + } + if (!mhead) { mhead = m; + } if (mlast && ((error = mbuf_setnext(mlast, m)))) { mbuf_free(m); break; } mlen = mbuf_maxlen(m); - if ((len + mlen) > size) + if ((len + mlen) > size) { mlen = size - len; + } mbuf_setlen(m, mlen); len += mlen; cnt++; @@ -324,7 +333,7 @@ nfsm_mbuf_get_list(size_t size, mbuf_t *mp, int *mbcnt) *mp = mhead; *mbcnt = cnt; } - return (error); + return error; } #endif /* NFSSERVER */ @@ -340,15 +349,18 @@ nfsm_chain_new_mbuf(struct nfsm_chain *nmc, size_t sizehint) mbuf_t mb; int error = 0; - if (nmc->nmc_flags & NFSM_CHAIN_FLAG_ADD_CLUSTERS) + if (nmc->nmc_flags & NFSM_CHAIN_FLAG_ADD_CLUSTERS) { sizehint = nfs_mbuf_minclsize; + } /* allocate a new mbuf */ nfsm_mbuf_get(error, &mb, sizehint); - if (error) - return (error); - if (mb == NULL) + if (error) { + return error; + } + if (mb == NULL) { panic("got NULL mbuf?"); + } /* do we have a current mbuf? */ if (nmc->nmc_mcur) { @@ -358,7 +370,7 @@ nfsm_chain_new_mbuf(struct nfsm_chain *nmc, size_t sizehint) error = mbuf_setnext(nmc->nmc_mcur, mb); if (error) { mbuf_free(mb); - return (error); + return error; } } @@ -367,7 +379,7 @@ nfsm_chain_new_mbuf(struct nfsm_chain *nmc, size_t sizehint) nmc->nmc_ptr = mbuf_data(mb); nmc->nmc_left = mbuf_trailingspace(mb); - return (0); + return 0; } /* @@ -386,14 +398,16 @@ nfsm_chain_add_opaque_f(struct nfsm_chain *nmc, const u_char *buf, uint32_t len) while (paddedlen) { if (!nmc->nmc_left) { error = nfsm_chain_new_mbuf(nmc, paddedlen); - if (error) - return (error); + if (error) { + return error; + } } tlen = MIN(nmc->nmc_left, paddedlen); if (tlen) { if (len) { - if (tlen > len) + if (tlen > len) { tlen = len; + } bcopy(buf, nmc->nmc_ptr, tlen); } else { bzero(nmc->nmc_ptr, tlen); @@ -407,7 +421,7 @@ nfsm_chain_add_opaque_f(struct nfsm_chain *nmc, const u_char *buf, uint32_t len) } } } - return (0); + return 0; } /* @@ -425,8 +439,9 @@ nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *nmc, const u_char *buf, uint32_ while (len > 0) { if (nmc->nmc_left <= 0) { error = nfsm_chain_new_mbuf(nmc, len); - if (error) - return (error); + if (error) { + return error; + } } tlen = MIN(nmc->nmc_left, len); bcopy(buf, nmc->nmc_ptr, tlen); @@ -435,7 +450,7 @@ nfsm_chain_add_opaque_nopad_f(struct nfsm_chain *nmc, const u_char *buf, uint32_ len -= tlen; buf += tlen; } - return (0); + return 0; } /* @@ -454,14 +469,16 @@ nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, uint32_t len) while (paddedlen) { if (!nmc->nmc_left) { error = nfsm_chain_new_mbuf(nmc, paddedlen); - if (error) - return (error); + if (error) { + return error; + } } tlen = MIN(nmc->nmc_left, paddedlen); if (tlen) { if (len) { - if (tlen > len) + if (tlen > len) { tlen = len; + } uiomove(nmc->nmc_ptr, tlen, uio); } else { bzero(nmc->nmc_ptr, tlen); @@ -469,11 +486,12 @@ nfsm_chain_add_uio(struct nfsm_chain *nmc, uio_t uio, uint32_t len) nmc->nmc_ptr += tlen; nmc->nmc_left -= tlen; paddedlen -= tlen; - if (len) + if (len) { len -= tlen; + } } } - return (0); + return 0; } /* @@ -487,12 +505,13 @@ nfsm_chain_offset(struct nfsm_chain *nmc) int len = 0; for (mb = nmc->nmc_mhead; mb; mb = mbuf_next(mb)) { - if (mb == nmc->nmc_mcur) - return (len + (nmc->nmc_ptr - (caddr_t) mbuf_data(mb))); + if (mb == nmc->nmc_mcur) { + return len + (nmc->nmc_ptr - (caddr_t) mbuf_data(mb)); + } len += mbuf_len(mb); } - return (len); + return len; } /* @@ -509,17 +528,18 @@ nfsm_chain_advance(struct nfsm_chain *nmc, uint32_t len) if (nmc->nmc_left >= len) { nmc->nmc_left -= len; nmc->nmc_ptr += len; - return (0); + return 0; } len -= nmc->nmc_left; nmc->nmc_mcur = mb = mbuf_next(nmc->nmc_mcur); - if (!mb) - return (EBADRPC); + if (!mb) { + return EBADRPC; + } nmc->nmc_ptr = mbuf_data(mb); nmc->nmc_left = mbuf_len(mb); } - return (0); + return 0; } /* @@ -537,15 +557,16 @@ nfsm_chain_reverse(struct nfsm_chain *nmc, uint32_t len) if (len <= mlen) { nmc->nmc_ptr -= len; nmc->nmc_left += len; - return (0); + return 0; } new_offset = nfsm_chain_offset(nmc) - len; nfsm_chain_dissect_init(error, nmc, nmc->nmc_mhead); - if (error) - return (error); + if (error) { + return error; + } - return (nfsm_chain_advance(nmc, new_offset)); + return nfsm_chain_advance(nmc, new_offset); } /* @@ -569,21 +590,23 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p while (nmc->nmc_mcur && (nmc->nmc_left == 0)) { mb = mbuf_next(nmc->nmc_mcur); nmc->nmc_mcur = mb; - if (!mb) + if (!mb) { break; + } nmc->nmc_ptr = mbuf_data(mb); nmc->nmc_left = mbuf_len(mb); } /* check if we've run out of data */ - if (!nmc->nmc_mcur) - return (EBADRPC); + if (!nmc->nmc_mcur) { + return EBADRPC; + } /* do we already have a contiguous buffer? */ if (nmc->nmc_left >= len) { /* the returned pointer will be the current pointer */ *pptr = (u_char*)nmc->nmc_ptr; error = nfsm_chain_advance(nmc, nfsm_rndup(len)); - return (error); + return error; } padlen = nfsm_rndup(len) - len; @@ -599,12 +622,13 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p * allocate a new mbuf to hold the contiguous range of data. */ nfsm_mbuf_get(error, &mb, len); - if (error) - return (error); + if (error) { + return error; + } /* double check that this mbuf can hold all the data */ if (mbuf_maxlen(mb) < len) { mbuf_free(mb); - return (EOVERFLOW); + return EOVERFLOW; } /* the returned pointer will be the new mbuf's data pointer */ @@ -617,11 +641,12 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p /* insert the new mbuf between the current and next mbufs */ error = mbuf_setnext(mb, mbuf_next(mbcur)); - if (!error) + if (!error) { error = mbuf_setnext(mbcur, mb); + } if (error) { mbuf_free(mb); - return (error); + return error; } /* reduce current mbuf's length by "left" */ @@ -673,21 +698,22 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p error = mbuf_setdata(mb, ptr + cplen, mblen - cplen); if (error) { mbuf_setlen(mbcur, mbuf_len(mbcur) - need); - return (error); + return error; } /* update pointer/need */ nmc->nmc_ptr += cplen; need -= cplen; } /* if more needed, go to next mbuf */ - if (need) + if (need) { mb = mbuf_next(mb); + } } /* did we run out of data in the mbuf chain? */ if (need) { mbuf_setlen(mbcur, mbuf_len(mbcur) - need); - return (EBADRPC); + return EBADRPC; } /* @@ -701,10 +727,11 @@ nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *nmc, uint32_t len, u_char **p nmc->nmc_left = mbuf_len(mb); /* move past any padding */ - if (padlen) + if (padlen) { error = nfsm_chain_advance(nmc, padlen); + } - return (error); + return error; } /* @@ -742,13 +769,15 @@ nfsm_chain_get_opaque_f(struct nfsm_chain *nmc, uint32_t len, u_char *buf) } /* did we run out of data in the mbuf chain? */ - if (len) - return (EBADRPC); + if (len) { + return EBADRPC; + } - if (padlen) + if (padlen) { nfsm_chain_adv(error, nmc, padlen); + } - return (error); + return error; } /* @@ -771,8 +800,9 @@ nfsm_chain_get_uio(struct nfsm_chain *nmc, uint32_t len, uio_t uio) cplen = MIN(nmc->nmc_left, len); if (cplen) { error = uiomove(nmc->nmc_ptr, cplen, uio); - if (error) - return (error); + if (error) { + return error; + } nmc->nmc_ptr += cplen; nmc->nmc_left -= cplen; len -= cplen; @@ -787,13 +817,15 @@ nfsm_chain_get_uio(struct nfsm_chain *nmc, uint32_t len, uio_t uio) } /* did we run out of data in the mbuf chain? */ - if (len) - return (EBADRPC); + if (len) { + return EBADRPC; + } - if (padlen) + if (padlen) { nfsm_chain_adv(error, nmc, padlen); + } - return (error); + return error; } #if NFSCLIENT @@ -806,23 +838,26 @@ nfsm_chain_add_string_nfc(struct nfsm_chain *nmc, const uint8_t *s, uint32_t sle size_t buflen = sizeof(smallbuf), nfclen; int error; - error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED|UTF_NO_NULL_TERM); + error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM); if (error == ENAMETOOLONG) { buflen = MAXPATHLEN; MALLOC_ZONE(nfcname, uint8_t *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (nfcname) - error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED|UTF_NO_NULL_TERM); + if (nfcname) { + error = utf8_normalizestr(s, slen, nfcname, &nfclen, buflen, UTF_PRECOMPOSED | UTF_NO_NULL_TERM); + } } /* if we got an error, just use the original string */ - if (error) + if (error) { nfsm_chain_add_string(error, nmc, s, slen); - else + } else { nfsm_chain_add_string(error, nmc, nfcname, nfclen); + } - if (nfcname && (nfcname != smallbuf)) + if (nfcname && (nfcname != smallbuf)) { FREE_ZONE(nfcname, MAXPATHLEN, M_NAMEI); - return (error); + } + return error; } /* @@ -834,20 +869,20 @@ nfsm_chain_add_v2sattr_f(struct nfsm_chain *nmc, struct vnode_attr *vap, uint32_ int error = 0; nfsm_chain_add_32(error, nmc, vtonfsv2_mode(vap->va_type, - (VATTR_IS_ACTIVE(vap, va_mode) ? vap->va_mode : 0600))); + (VATTR_IS_ACTIVE(vap, va_mode) ? vap->va_mode : 0600))); nfsm_chain_add_32(error, nmc, - VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uint32_t)-1); + VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uint32_t)-1); nfsm_chain_add_32(error, nmc, - VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (uint32_t)-1); + VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (uint32_t)-1); nfsm_chain_add_32(error, nmc, szrdev); nfsm_chain_add_v2time(error, nmc, - VATTR_IS_ACTIVE(vap, va_access_time) ? - &vap->va_access_time : NULL); + VATTR_IS_ACTIVE(vap, va_access_time) ? + &vap->va_access_time : NULL); nfsm_chain_add_v2time(error, nmc, - VATTR_IS_ACTIVE(vap, va_modify_time) ? - &vap->va_modify_time : NULL); + VATTR_IS_ACTIVE(vap, va_modify_time) ? + &vap->va_modify_time : NULL); - return (error); + return error; } /* @@ -902,7 +937,7 @@ nfsm_chain_add_v3sattr_f(struct nfsm_chain *nmc, struct vnode_attr *vap) } } - return (error); + return error; } @@ -925,20 +960,24 @@ nfsm_chain_get_fh_attr( gotfh = gotattr = 1; - if (nfsvers == NFS_VER3) /* check for file handle */ + if (nfsvers == NFS_VER3) { /* check for file handle */ nfsm_chain_get_32(error, nmc, gotfh); - if (!error && gotfh) /* get file handle */ + } + if (!error && gotfh) { /* get file handle */ nfsm_chain_get_fh(error, nmc, nfsvers, fhp); - else + } else { fhp->fh_len = 0; - if (nfsvers == NFS_VER3) /* check for file attributes */ + } + if (nfsvers == NFS_VER3) { /* check for file attributes */ nfsm_chain_get_32(error, nmc, gotattr); + } nfsmout_if(error); if (gotattr) { - if (!gotfh) /* skip attributes */ + if (!gotfh) { /* skip attributes */ nfsm_chain_adv(error, nmc, NFSX_V3FATTR); - else /* get attributes */ + } else { /* get attributes */ error = nfs_parsefattr(nmc, nfsvers, nvap); + } } else if (gotfh) { /* we need valid attributes in order to call nfs_nget() */ if (nfs3_getattr_rpc(NULL, NFSTOMP(dnp), fhp->fh_data, fhp->fh_len, 0, ctx, nvap, xidp)) { @@ -947,7 +986,7 @@ nfsm_chain_get_fh_attr( } } nfsmout: - return (error); + return error; } /* @@ -976,7 +1015,7 @@ nfsm_chain_get_wcc_data_f( } nfsm_chain_postop_attr_update_flag(error, nmc, np, *newpostattr, xidp); - return (error); + return error; } /* @@ -1026,7 +1065,7 @@ nfsm_rpchead( int proc = ((nfsvers == NFS_VER2) ? nfsv2_procid[req->r_procnum] : (int)req->r_procnum); return nfsm_rpchead2(nmp, nmp->nm_sotype, NFS_PROG, nfsvers, proc, - req->r_auth, req->r_cred, req, mrest, xidp, mreqp); + req->r_auth, req->r_cred, req, mrest, xidp, mreqp); } /* @@ -1036,10 +1075,10 @@ nfsm_rpchead( * OUT: groups: An array of gids of NGROUPS size. * IN: count: The number of groups to get; i.e.; the number of groups the server supports * - * returns: The number of groups found. + * returns: The number of groups found. * * Just a wrapper around kauth_cred_getgroups to handle the case of a server supporting less - * than NGROUPS. + * than NGROUPS. */ static int get_auxiliary_groups(kauth_cred_t cred, gid_t groups[NGROUPS], int count) @@ -1047,14 +1086,15 @@ get_auxiliary_groups(kauth_cred_t cred, gid_t groups[NGROUPS], int count) gid_t pgid; int maxcount = count < NGROUPS ? count + 1 : NGROUPS; int i; - - for (i = 0; i < NGROUPS; i++) - groups[i] = -2; /* Initialize to the nobody group */ + for (i = 0; i < NGROUPS; i++) { + groups[i] = -2; /* Initialize to the nobody group */ + } (void)kauth_cred_getgroups(cred, groups, &maxcount); - if (maxcount < 1) - return (maxcount); - + if (maxcount < 1) { + return maxcount; + } + /* * kauth_get_groups returns the primary group followed by the * users auxiliary groups. If the number of groups the server supports @@ -1067,18 +1107,18 @@ get_auxiliary_groups(kauth_cred_t cred, gid_t groups[NGROUPS], int count) pgid = kauth_cred_getgid(cred); if (pgid == groups[0]) { maxcount -= 1; - for (i = 0; i < maxcount; i++) { - groups[i] = groups[i+1]; + for (i = 0; i < maxcount; i++) { + groups[i] = groups[i + 1]; } } } - - return (maxcount); + + return maxcount; } int nfsm_rpchead2(struct nfsmount *nmp, int sotype, int prog, int vers, int proc, int auth_type, - kauth_cred_t cred, struct nfsreq *req, mbuf_t mrest, u_int64_t *xidp, mbuf_t *mreqp) + kauth_cred_t cred, struct nfsreq *req, mbuf_t mrest, u_int64_t *xidp, mbuf_t *mreqp) { mbuf_t mreq, mb; int error, i, auth_len = 0, authsiz, reqlen; @@ -1089,52 +1129,57 @@ nfsm_rpchead2(struct nfsmount *nmp, int sotype, int prog, int vers, int proc, in /* calculate expected auth length */ switch (auth_type) { - case RPCAUTH_NONE: - auth_len = 0; - break; - case RPCAUTH_SYS: - { - int count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS; + case RPCAUTH_NONE: + auth_len = 0; + break; + case RPCAUTH_SYS: + { + int count = nmp->nm_numgrps < NGROUPS ? nmp->nm_numgrps : NGROUPS; - if (!cred) - return (EINVAL); - groupcount = get_auxiliary_groups(cred, grouplist, count); - if (groupcount < 0) - return (EINVAL); - auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED; - break; - } - case RPCAUTH_KRB5: - case RPCAUTH_KRB5I: - case RPCAUTH_KRB5P: - if (!req || !cred) - return (EINVAL); - auth_len = 5 * NFSX_UNSIGNED + 0; // zero context handle for now - break; - default: - return (EINVAL); + if (!cred) { + return EINVAL; + } + groupcount = get_auxiliary_groups(cred, grouplist, count); + if (groupcount < 0) { + return EINVAL; + } + auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED; + break; + } + case RPCAUTH_KRB5: + case RPCAUTH_KRB5I: + case RPCAUTH_KRB5P: + if (!req || !cred) { + return EINVAL; } + auth_len = 5 * NFSX_UNSIGNED + 0; // zero context handle for now + break; + default: + return EINVAL; + } authsiz = nfsm_rndup(auth_len); /* allocate the packet */ headlen = authsiz + 10 * NFSX_UNSIGNED; - if (sotype == SOCK_STREAM) /* also include room for any RPC Record Mark */ + if (sotype == SOCK_STREAM) { /* also include room for any RPC Record Mark */ headlen += NFSX_UNSIGNED; + } if (headlen >= nfs_mbuf_minclsize) { error = mbuf_getpacket(MBUF_WAITOK, &mreq); } else { error = mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_DATA, &mreq); if (!error) { - if (headlen < nfs_mbuf_mhlen) + if (headlen < nfs_mbuf_mhlen) { mbuf_align_32(mreq, headlen); - else + } else { mbuf_align_32(mreq, 8 * NFSX_UNSIGNED); + } } } if (error) { /* unable to allocate packet */ /* XXX should we keep statistics for these errors? */ - return (error); + return error; } /* @@ -1142,15 +1187,17 @@ nfsm_rpchead2(struct nfsmount *nmp, int sotype, int prog, int vers, int proc, in * it may be a higher-level resend with a GSSAPI credential. * Otherwise, allocate a new one. */ - if (*xidp == 0) + if (*xidp == 0) { nfs_get_xid(xidp); + } /* build the header(s) */ nfsm_chain_init(&nmreq, mreq); /* First, if it's a TCP stream insert space for an RPC record mark */ - if (sotype == SOCK_STREAM) + if (sotype == SOCK_STREAM) { nfsm_chain_add_32(error, &nmreq, 0); + } /* Then the RPC header. */ nfsm_chain_add_32(error, &nmreq, (*xidp & 0xffffffff)); @@ -1164,33 +1211,36 @@ add_cred: switch (auth_type) { case RPCAUTH_NONE: nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* auth */ - nfsm_chain_add_32(error, &nmreq, 0); /* length */ - nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* verf */ - nfsm_chain_add_32(error, &nmreq, 0); /* length */ + nfsm_chain_add_32(error, &nmreq, 0); /* length */ + nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* verf */ + nfsm_chain_add_32(error, &nmreq, 0); /* length */ nfsm_chain_build_done(error, &nmreq); /* Append the args mbufs */ - if (!error) + if (!error) { error = mbuf_setnext(nmreq.nmc_mcur, mrest); + } break; case RPCAUTH_SYS: { nfsm_chain_add_32(error, &nmreq, RPCAUTH_SYS); nfsm_chain_add_32(error, &nmreq, authsiz); - nfsm_chain_add_32(error, &nmreq, 0); /* stamp */ - nfsm_chain_add_32(error, &nmreq, 0); /* zero-length hostname */ - nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(cred)); /* UID */ - nfsm_chain_add_32(error, &nmreq, kauth_cred_getgid(cred)); /* GID */ + nfsm_chain_add_32(error, &nmreq, 0); /* stamp */ + nfsm_chain_add_32(error, &nmreq, 0); /* zero-length hostname */ + nfsm_chain_add_32(error, &nmreq, kauth_cred_getuid(cred)); /* UID */ + nfsm_chain_add_32(error, &nmreq, kauth_cred_getgid(cred)); /* GID */ nfsm_chain_add_32(error, &nmreq, groupcount);/* additional GIDs */ - for (i = 0; i < groupcount; i++) + for (i = 0; i < groupcount; i++) { nfsm_chain_add_32(error, &nmreq, grouplist[i]); + } /* And the verifier... */ - nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* flavor */ - nfsm_chain_add_32(error, &nmreq, 0); /* length */ + nfsm_chain_add_32(error, &nmreq, RPCAUTH_NONE); /* flavor */ + nfsm_chain_add_32(error, &nmreq, 0); /* length */ nfsm_chain_build_done(error, &nmreq); /* Append the args mbufs */ - if (!error) + if (!error) { error = mbuf_setnext(nmreq.nmc_mcur, mrest); + } break; } case RPCAUTH_KRB5: @@ -1205,29 +1255,33 @@ add_cred: */ error = 0; req->r_auth = auth_type = RPCAUTH_SYS; - groupcount = get_auxiliary_groups(cred, grouplist, count); - if (groupcount < 0) - return (EINVAL); - auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED; + groupcount = get_auxiliary_groups(cred, grouplist, count); + if (groupcount < 0) { + return EINVAL; + } + auth_len = ((uint32_t)groupcount + 5) * NFSX_UNSIGNED; authsiz = nfsm_rndup(auth_len); goto add_cred; } break; - }; + } + ; /* finish setting up the packet */ - if (!error) + if (!error) { error = mbuf_pkthdr_setrcvif(mreq, 0); + } if (error) { mbuf_freem(mreq); - return (error); + return error; } /* Calculate the size of the request */ reqlen = 0; - for (mb = nmreq.nmc_mhead; mb; mb = mbuf_next(mb)) + for (mb = nmreq.nmc_mhead; mb; mb = mbuf_next(mb)) { reqlen += mbuf_len(mb); + } mbuf_pkthdr_setlen(mreq, reqlen); @@ -1237,12 +1291,13 @@ add_cred: * The record mark count doesn't include itself * and the last fragment bit is set. */ - if (sotype == SOCK_STREAM) + if (sotype == SOCK_STREAM) { nfsm_chain_set_recmark(error, &nmreq, - (reqlen - NFSX_UNSIGNED) | 0x80000000); + (reqlen - NFSX_UNSIGNED) | 0x80000000); + } *mreqp = mreq; - return (0); + return 0; } /* @@ -1300,8 +1355,9 @@ nfs_parsefattr(struct nfsm_chain *nmc, int nfsvers, struct nfs_vattr *nvap) * sockets and FIFOs for fa_type). */ vtype = nfstov_type(nvtype, nfsvers); - if ((vtype == VNON) || ((vtype == VREG) && ((vmode & S_IFMT) != 0))) + if ((vtype == VNON) || ((vtype == VREG) && ((vmode & S_IFMT) != 0))) { vtype = IFTOVT(vmode); + } nvap->nva_type = vtype; } @@ -1338,20 +1394,21 @@ nfs_parsefattr(struct nfsm_chain *nmc, int nfsvers, struct nfs_vattr *nvap) nfsmout_if(error); nvap->nva_fileid = (uint64_t)val; /* Really ugly NFSv2 kludge. */ - if ((vtype == VCHR) && (rdev == (dev_t)0xffffffff)) + if ((vtype == VCHR) && (rdev == (dev_t)0xffffffff)) { nvap->nva_type = VFIFO; + } } nfsm_chain_get_time(error, nmc, nfsvers, - nvap->nva_timesec[NFSTIME_ACCESS], - nvap->nva_timensec[NFSTIME_ACCESS]); + nvap->nva_timesec[NFSTIME_ACCESS], + nvap->nva_timensec[NFSTIME_ACCESS]); nfsm_chain_get_time(error, nmc, nfsvers, - nvap->nva_timesec[NFSTIME_MODIFY], - nvap->nva_timensec[NFSTIME_MODIFY]); + nvap->nva_timesec[NFSTIME_MODIFY], + nvap->nva_timensec[NFSTIME_MODIFY]); nfsm_chain_get_time(error, nmc, nfsvers, - nvap->nva_timesec[NFSTIME_CHANGE], - nvap->nva_timensec[NFSTIME_CHANGE]); + nvap->nva_timesec[NFSTIME_CHANGE], + nvap->nva_timensec[NFSTIME_CHANGE]); nfsmout: - return (error); + return error; } /* @@ -1394,7 +1451,7 @@ nfs_loadattrcache( if (!((nmp = VFSTONFS(mp)))) { FSDBG_BOT(527, ENXIO, 1, 0, *xidp); - return (ENXIO); + return ENXIO; } if (*xidp < np->n_xid) { @@ -1411,7 +1468,7 @@ nfs_loadattrcache( NATTRINVALIDATE(np); FSDBG_BOT(527, 0, np, np->n_xid, *xidp); *xidp = 0; - return (0); + return 0; } if (vp && (nvap->nva_type != vnode_vtype(vp))) { @@ -1437,10 +1494,11 @@ nfs_loadattrcache( * object type. */ printf("nfs loadattrcache vnode changed type, was %d now %d\n", - vnode_vtype(vp), nvap->nva_type); + vnode_vtype(vp), nvap->nva_type); error = ESTALE; - if (monitored) + if (monitored) { events |= VNODE_EVENT_DELETE; + } goto out; } @@ -1459,58 +1517,67 @@ nfs_loadattrcache( * For monitored nodes, check for attribute changes that should generate events. */ if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_NUMLINKS) && - (nvap->nva_nlink != npnvap->nva_nlink)) + (nvap->nva_nlink != npnvap->nva_nlink)) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_LINK; - if (events & VNODE_EVENT_PERMS) + } + if (events & VNODE_EVENT_PERMS) { /* no need to do all the checking if it's already set */; - else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_MODE) && - (nvap->nva_mode != npnvap->nva_mode)) + } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_MODE) && + (nvap->nva_mode != npnvap->nva_mode)) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS; - else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER) && - (nvap->nva_uid != npnvap->nva_uid)) + } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER) && + (nvap->nva_uid != npnvap->nva_uid)) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS; - else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP) && - (nvap->nva_gid != npnvap->nva_gid)) + } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP) && + (nvap->nva_gid != npnvap->nva_gid)) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS; - else if (nmp->nm_vers >= NFS_VER4) { + } else if (nmp->nm_vers >= NFS_VER4) { if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER) && - !kauth_guid_equal(&nvap->nva_uuuid, &npnvap->nva_uuuid)) + !kauth_guid_equal(&nvap->nva_uuuid, &npnvap->nva_uuuid)) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS; - else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP) && - !kauth_guid_equal(&nvap->nva_guuid, &npnvap->nva_guuid)) + } else if (NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_OWNER_GROUP) && + !kauth_guid_equal(&nvap->nva_guuid, &npnvap->nva_guuid)) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS; - else if ((NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL) && - nvap->nva_acl && npnvap->nva_acl && - ((nvap->nva_acl->acl_entrycount != npnvap->nva_acl->acl_entrycount) || - bcmp(nvap->nva_acl, npnvap->nva_acl, KAUTH_ACL_COPYSIZE(nvap->nva_acl))))) + } else if ((NFS_BITMAP_ISSET(nvap->nva_bitmap, NFS_FATTR_ACL) && + nvap->nva_acl && npnvap->nva_acl && + ((nvap->nva_acl->acl_entrycount != npnvap->nva_acl->acl_entrycount) || + bcmp(nvap->nva_acl, npnvap->nva_acl, KAUTH_ACL_COPYSIZE(nvap->nva_acl))))) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_PERMS; + } } if (((nmp->nm_vers >= NFS_VER4) && (nvap->nva_change != npnvap->nva_change)) || - (NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_MODIFY) && + (NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_MODIFY) && ((nvap->nva_timesec[NFSTIME_MODIFY] != npnvap->nva_timesec[NFSTIME_MODIFY]) || - (nvap->nva_timensec[NFSTIME_MODIFY] != npnvap->nva_timensec[NFSTIME_MODIFY])))) + (nvap->nva_timensec[NFSTIME_MODIFY] != npnvap->nva_timensec[NFSTIME_MODIFY])))) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_WRITE; + } if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_RAWDEV) && ((nvap->nva_rawdev.specdata1 != npnvap->nva_rawdev.specdata1) || - (nvap->nva_rawdev.specdata2 != npnvap->nva_rawdev.specdata2))) + (nvap->nva_rawdev.specdata2 != npnvap->nva_rawdev.specdata2))) { events |= VNODE_EVENT_ATTRIB; + } if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_FILEID) && - (nvap->nva_fileid != npnvap->nva_fileid)) + (nvap->nva_fileid != npnvap->nva_fileid)) { events |= VNODE_EVENT_ATTRIB; + } if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_ARCHIVE) && - ((nvap->nva_flags & NFS_FFLAG_ARCHIVED) != (npnvap->nva_flags & NFS_FFLAG_ARCHIVED))) + ((nvap->nva_flags & NFS_FFLAG_ARCHIVED) != (npnvap->nva_flags & NFS_FFLAG_ARCHIVED))) { events |= VNODE_EVENT_ATTRIB; + } if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_HIDDEN) && - ((nvap->nva_flags & NFS_FFLAG_HIDDEN) != (npnvap->nva_flags & NFS_FFLAG_HIDDEN))) + ((nvap->nva_flags & NFS_FFLAG_HIDDEN) != (npnvap->nva_flags & NFS_FFLAG_HIDDEN))) { events |= VNODE_EVENT_ATTRIB; + } if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_CREATE) && ((nvap->nva_timesec[NFSTIME_CREATE] != npnvap->nva_timesec[NFSTIME_CREATE]) || - (nvap->nva_timensec[NFSTIME_CREATE] != npnvap->nva_timensec[NFSTIME_CREATE]))) + (nvap->nva_timensec[NFSTIME_CREATE] != npnvap->nva_timensec[NFSTIME_CREATE]))) { events |= VNODE_EVENT_ATTRIB; + } if (!events && NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_TIME_BACKUP) && ((nvap->nva_timesec[NFSTIME_BACKUP] != npnvap->nva_timesec[NFSTIME_BACKUP]) || - (nvap->nva_timensec[NFSTIME_BACKUP] != npnvap->nva_timensec[NFSTIME_BACKUP]))) + (nvap->nva_timensec[NFSTIME_BACKUP] != npnvap->nva_timensec[NFSTIME_BACKUP]))) { events |= VNODE_EVENT_ATTRIB; + } } /* Copy the attributes to the attribute cache */ @@ -1520,10 +1587,12 @@ nfs_loadattrcache( np->n_attrstamp = now.tv_sec; np->n_xid = *xidp; /* NFS_FFLAG_IS_ATTR and NFS_FFLAG_TRIGGER_REFERRAL need to be sticky... */ - if (vp && xattr) + if (vp && xattr) { nvap->nva_flags |= xattr; - if (vp && referral) + } + if (vp && referral) { nvap->nva_flags |= referral; + } if (NFS_BITMAP_ISSET(npnvap->nva_bitmap, NFS_FATTR_ACL)) { /* we're updating the ACL */ @@ -1549,8 +1618,9 @@ nfs_loadattrcache( np->n_aclstamp = now.tv_sec; } else { /* we aren't updating the ACL, so restore original values */ - if (aclbit) + if (aclbit) { NFS_BITMAP_SET(npnvap->nva_bitmap, NFS_FATTR_ACL); + } npnvap->nva_acl = acl; } @@ -1562,8 +1632,9 @@ nfs_loadattrcache( */ if ((nmp->nm_vers >= NFS_VER4) && (nvap->nva_type == VDIR) && ((np->n_vattr.nva_fsid.major != nmp->nm_fsid.major) || - (np->n_vattr.nva_fsid.minor != nmp->nm_fsid.minor))) + (np->n_vattr.nva_fsid.minor != nmp->nm_fsid.minor))) { np->n_vattr.nva_flags |= NFS_FFLAG_TRIGGER; + } #endif if (!vp || (nvap->nva_type != VREG)) { @@ -1589,8 +1660,9 @@ nfs_loadattrcache( */ np->n_newsize = nvap->nva_size; SET(np->n_flag, NUPDATESIZE); - if (monitored) + if (monitored) { events |= VNODE_EVENT_ATTRIB | VNODE_EVENT_EXTEND; + } } } @@ -1606,10 +1678,11 @@ nfs_loadattrcache( } out: - if (monitored && events) + if (monitored && events) { nfs_vnode_notify(np, events); + } FSDBG_BOT(527, error, np, np->n_size, *xidp); - return (error); + return error; } /* @@ -1625,8 +1698,9 @@ nfs_attrcachetimeout(nfsnode_t np) uint32_t timeo; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (0); + if (nfs_mount_gone(nmp)) { + return 0; + } isdir = vnode_isdir(NFSTOV(np)); @@ -1643,19 +1717,21 @@ nfs_attrcachetimeout(nfsnode_t np) microtime(&now); timeo = (now.tv_sec - (np)->n_vattr.nva_timesec[NFSTIME_MODIFY]) / 10; if (isdir) { - if (timeo < nmp->nm_acdirmin) + if (timeo < nmp->nm_acdirmin) { timeo = nmp->nm_acdirmin; - else if (timeo > nmp->nm_acdirmax) + } else if (timeo > nmp->nm_acdirmax) { timeo = nmp->nm_acdirmax; + } } else { - if (timeo < nmp->nm_acregmin) + if (timeo < nmp->nm_acregmin) { timeo = nmp->nm_acregmin; - else if (timeo > nmp->nm_acregmax) + } else if (timeo > nmp->nm_acregmax) { timeo = nmp->nm_acregmax; + } } } - return (timeo); + return timeo; } /* @@ -1676,12 +1752,13 @@ nfs_getattrcache(nfsnode_t np, struct nfs_vattr *nvaper, int flags) if (!NATTRVALID(np) || ((flags & NGA_ACL) && !NACLVALID(np))) { FSDBG(528, np, 0, 0xffffff01, ENOENT); OSAddAtomic64(1, &nfsstats.attrcache_misses); - return (ENOENT); + return ENOENT; } nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } /* * Verify the cached attributes haven't timed out. * If the server isn't responding, skip the check @@ -1691,21 +1768,21 @@ nfs_getattrcache(nfsnode_t np, struct nfs_vattr *nvaper, int flags) microuptime(&nowup); if (np->n_attrstamp > nowup.tv_sec) { printf("NFS: Attribute time stamp is in the future by %ld seconds. Invalidating cache\n", - np->n_attrstamp - nowup.tv_sec); + np->n_attrstamp - nowup.tv_sec); NATTRINVALIDATE(np); NACCESSINVALIDATE(np); - return (ENOENT); + return ENOENT; } timeo = nfs_attrcachetimeout(np); if ((nowup.tv_sec - np->n_attrstamp) >= timeo) { FSDBG(528, np, 0, 0xffffff02, ENOENT); OSAddAtomic64(1, &nfsstats.attrcache_misses); - return (ENOENT); + return ENOENT; } if ((flags & NGA_ACL) && ((nowup.tv_sec - np->n_aclstamp) >= timeo)) { FSDBG(528, np, 0, 0xffffff02, ENOENT); OSAddAtomic64(1, &nfsstats.attrcache_misses); - return (ENOENT); + return ENOENT; } } @@ -1746,14 +1823,15 @@ nfs_getattrcache(nfsnode_t np, struct nfs_vattr *nvaper, int flags) if (nvap->nva_acl) { if (flags & NGA_ACL) { nvaper->nva_acl = kauth_acl_alloc(nvap->nva_acl->acl_entrycount); - if (!nvaper->nva_acl) - return (ENOMEM); + if (!nvaper->nva_acl) { + return ENOMEM; + } bcopy(nvap->nva_acl, nvaper->nva_acl, KAUTH_ACL_COPYSIZE(nvap->nva_acl)); } else { nvaper->nva_acl = NULL; } } - return (0); + return 0; } /* @@ -1823,36 +1901,36 @@ nfs_avoid_needless_id_setting_on_create(nfsnode_t dnp, struct vnode_attr *vap, v int nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) { - const char *p, *pd; /* pointers to current character in scan */ - const char *pnum; /* pointer to current number to decode */ - const char *pscope; /* pointer to IPv6 scope ID */ - uint8_t a[18]; /* octet array to store address bytes */ - int i; /* index of next octet to decode */ - int dci; /* index of octet to insert double-colon zeroes */ - int dcount, xdcount; /* count of digits in current number */ - int needmore; /* set when we know we need more input (e.g. after colon, period) */ - int dots; /* # of dots */ - int hex; /* contains hex values */ - unsigned long val; /* decoded value */ - int s; /* index used for sliding array to insert elided zeroes */ - -#define HEXVALUE 0 -#define DECIMALVALUE 1 + const char *p, *pd; /* pointers to current character in scan */ + const char *pnum; /* pointer to current number to decode */ + const char *pscope; /* pointer to IPv6 scope ID */ + uint8_t a[18]; /* octet array to store address bytes */ + int i; /* index of next octet to decode */ + int dci; /* index of octet to insert double-colon zeroes */ + int dcount, xdcount; /* count of digits in current number */ + int needmore; /* set when we know we need more input (e.g. after colon, period) */ + int dots; /* # of dots */ + int hex; /* contains hex values */ + unsigned long val; /* decoded value */ + int s; /* index used for sliding array to insert elided zeroes */ + +#define HEXVALUE 0 +#define DECIMALVALUE 1 #define GET(TYPE) \ do { \ - if ((dcount <= 0) || (dcount > (((TYPE) == DECIMALVALUE) ? 3 : 4))) \ - return (0); \ - if (((TYPE) == DECIMALVALUE) && xdcount) \ - return (0); \ - val = strtoul(pnum, NULL, ((TYPE) == DECIMALVALUE) ? 10 : 16); \ - if (((TYPE) == DECIMALVALUE) && (val >= 256)) \ - return (0); \ - /* check if there is room left in the array */ \ - if (i > (int)(sizeof(a) - (((TYPE) == HEXVALUE) ? 2 : 1) - ((dci != -1) ? 2 : 0))) \ - return (0); \ - if ((TYPE) == HEXVALUE) \ - a[i++] = ((val >> 8) & 0xff); \ - a[i++] = (val & 0xff); \ + if ((dcount <= 0) || (dcount > (((TYPE) == DECIMALVALUE) ? 3 : 4))) \ + return (0); \ + if (((TYPE) == DECIMALVALUE) && xdcount) \ + return (0); \ + val = strtoul(pnum, NULL, ((TYPE) == DECIMALVALUE) ? 10 : 16); \ + if (((TYPE) == DECIMALVALUE) && (val >= 256)) \ + return (0); \ + /* check if there is room left in the array */ \ + if (i > (int)(sizeof(a) - (((TYPE) == HEXVALUE) ? 2 : 1) - ((dci != -1) ? 2 : 0))) \ + return (0); \ + if ((TYPE) == HEXVALUE) \ + a[i++] = ((val >> 8) & 0xff); \ + a[i++] = (val & 0xff); \ } while (0) hex = 0; @@ -1862,14 +1940,16 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) pnum = p = uaddr; pscope = NULL; needmore = 1; - if ((*p == ':') && (*++p != ':')) /* if it starts with colon, gotta be a double */ - return (0); + if ((*p == ':') && (*++p != ':')) { /* if it starts with colon, gotta be a double */ + return 0; + } while (*p) { if (IS_XDIGIT(*p)) { dcount++; - if (!IS_DIGIT(*p)) + if (!IS_DIGIT(*p)) { xdcount++; + } needmore = 0; p++; } else if (*p == '.') { @@ -1878,21 +1958,25 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) /* this is the first, so count them */ for (pd = p; *pd; pd++) { if (*pd == '.') { - if (++dots > 5) - return (0); + if (++dots > 5) { + return 0; + } } else if (hex && (*pd == '%')) { break; } else if ((*pd < '0') || (*pd > '9')) { - return (0); + return 0; } } - if ((dots != 2) && (dots != 3) && (dots != 5)) - return (0); + if ((dots != 2) && (dots != 3) && (dots != 5)) { + return 0; + } if (hex && (dots == 2)) { /* hex+port */ - if (!dcount && needmore) - return (0); - if (dcount) /* last hex may be elided zero */ + if (!dcount && needmore) { + return 0; + } + if (dcount) { /* last hex may be elided zero */ GET(HEXVALUE); + } } else { GET(DECIMALVALUE); } @@ -1904,11 +1988,13 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) pnum = ++p; } else if (*p == ':') { hex = 1; - if (dots) - return (0); + if (dots) { + return 0; + } if (!dcount) { /* missing number, probably double colon */ - if (dci >= 0) /* can only have one double colon */ - return (0); + if (dci >= 0) { /* can only have one double colon */ + return 0; + } dci = i; needmore = 0; } else { @@ -1918,58 +2004,71 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) } pnum = ++p; } else if (*p == '%') { /* scope ID delimiter */ - if (!hex) - return (0); + if (!hex) { + return 0; + } p++; pscope = p; break; } else { /* unexpected character */ - return (0); + return 0; } } - if (needmore && !dcount) - return (0); - if (dcount) /* decode trailing number */ + if (needmore && !dcount) { + return 0; + } + if (dcount) { /* decode trailing number */ GET(dots ? DECIMALVALUE : HEXVALUE); + } if (dci >= 0) { /* got a double-colon at i, need to insert a range of zeroes */ /* if we got a port, slide to end of array */ /* otherwise, slide to end of address (non-port) values */ int end = ((dots == 2) || (dots == 5)) ? sizeof(a) : (sizeof(a) - 2); - if (i % 2) /* length of zero range must be multiple of 2 */ - return (0); - if (i >= end) /* no room? */ - return (0); + if (i % 2) { /* length of zero range must be multiple of 2 */ + return 0; + } + if (i >= end) { /* no room? */ + return 0; + } /* slide (i-dci) numbers up from index dci */ - for (s=0; s < (i - dci); s++) - a[end-1-s] = a[i-1-s]; + for (s = 0; s < (i - dci); s++) { + a[end - 1 - s] = a[i - 1 - s]; + } /* zero (end-i) numbers at index dci */ - for (s=0; s < (end - i); s++) - a[dci+s] = 0; + for (s = 0; s < (end - i); s++) { + a[dci + s] = 0; + } i = end; } /* copy out resulting socket address */ if (hex) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)addr; - if ((((dots == 0) || (dots == 3)) && (i != (sizeof(a)-2)))) - return (0); - if ((((dots == 2) || (dots == 5)) && (i != sizeof(a)))) - return (0); + if ((((dots == 0) || (dots == 3)) && (i != (sizeof(a) - 2)))) { + return 0; + } + if ((((dots == 2) || (dots == 5)) && (i != sizeof(a)))) { + return 0; + } bzero(sin6, sizeof(struct sockaddr_in6)); sin6->sin6_len = sizeof(struct sockaddr_in6); sin6->sin6_family = AF_INET6; bcopy(a, &sin6->sin6_addr.s6_addr, sizeof(struct in6_addr)); - if ((dots == 5) || (dots == 2)) + if ((dots == 5) || (dots == 2)) { sin6->sin6_port = htons((a[16] << 8) | a[17]); + } if (pscope) { - for (p=pscope; IS_DIGIT(*p); p++) + for (p = pscope; IS_DIGIT(*p); p++) { ; + } if (*p && !IS_DIGIT(*p)) { /* name */ ifnet_t interface = NULL; - if (ifnet_find_by_name(pscope, &interface) == 0) + if (ifnet_find_by_name(pscope, &interface) == 0) { sin6->sin6_scope_id = ifnet_index(interface); - if (interface) + } + if (interface) { ifnet_release(interface); + } } else { /* decimal number */ sin6->sin6_scope_id = strtoul(pscope, NULL, 10); } @@ -1977,20 +2076,24 @@ nfs_uaddr2sockaddr(const char *uaddr, struct sockaddr *addr) } } else { struct sockaddr_in *sin = (struct sockaddr_in*)addr; - if ((dots != 3) && (dots != 5)) - return (0); - if ((dots == 3) && (i != 4)) - return (0); - if ((dots == 5) && (i != 6)) - return (0); + if ((dots != 3) && (dots != 5)) { + return 0; + } + if ((dots == 3) && (i != 4)) { + return 0; + } + if ((dots == 5) && (i != 6)) { + return 0; + } bzero(sin, sizeof(struct sockaddr_in)); sin->sin_len = sizeof(struct sockaddr_in); sin->sin_family = AF_INET; bcopy(a, &sin->sin_addr.s_addr, sizeof(struct in_addr)); - if (dots == 5) + if (dots == 5) { sin->sin_port = htons((a[4] << 8) | a[5]); + } } - return (1); + return 1; } @@ -2004,12 +2107,14 @@ void nfs_printf(int facility, int level, const char *fmt, ...) { va_list ap; - - if ((uint32_t)level > NFS_DEBUG_LEVEL) + + if ((uint32_t)level > NFS_DEBUG_LEVEL) { return; - if (NFS_DEBUG_FACILITY && !((uint32_t)facility & NFS_DEBUG_FACILITY)) + } + if (NFS_DEBUG_FACILITY && !((uint32_t)facility & NFS_DEBUG_FACILITY)) { return; - + } + va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); @@ -2019,7 +2124,7 @@ nfs_printf(int facility, int level, const char *fmt, ...) int nfs_mount_gone(struct nfsmount *nmp) { - return (!nmp || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))); + return !nmp || vfs_isforce(nmp->nm_mountp) || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)); } /* @@ -2032,21 +2137,21 @@ nfs_mountopts(struct nfsmount *nmp, char *buf, int buflen) int c; c = snprintf(buf, buflen, "%s,%s,%s,%s,vers=%d,sec=%s,%sdeadtimeout=%d", - (vfs_flags(nmp->nm_mountp) & MNT_RDONLY) ? "ro" : "rw", - NMFLAG(nmp, SOFT) ? "soft" : "hard", - NMFLAG(nmp, INTR) ? "intr" : "nointr", - nmp->nm_sotype == SOCK_STREAM ? "tcp" : "udp", - nmp->nm_vers, - nmp->nm_auth == RPCAUTH_KRB5 ? "krb5" : - nmp->nm_auth == RPCAUTH_KRB5I ? "krb5i" : - nmp->nm_auth == RPCAUTH_KRB5P ? "krb5p" : - nmp->nm_auth == RPCAUTH_SYS ? "sys" : "none", - nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED ? "locks," : - nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED ? "nolocks," : - nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL ? "locallocks," : "", - nmp->nm_deadtimeout); - - return (c > buflen ? ENOMEM : 0); + (vfs_flags(nmp->nm_mountp) & MNT_RDONLY) ? "ro" : "rw", + NMFLAG(nmp, SOFT) ? "soft" : "hard", + NMFLAG(nmp, INTR) ? "intr" : "nointr", + nmp->nm_sotype == SOCK_STREAM ? "tcp" : "udp", + nmp->nm_vers, + nmp->nm_auth == RPCAUTH_KRB5 ? "krb5" : + nmp->nm_auth == RPCAUTH_KRB5I ? "krb5i" : + nmp->nm_auth == RPCAUTH_KRB5P ? "krb5p" : + nmp->nm_auth == RPCAUTH_SYS ? "sys" : "none", + nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED ? "locks," : + nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED ? "nolocks," : + nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL ? "locallocks," : "", + nmp->nm_deadtimeout); + + return c > buflen ? ENOMEM : 0; } #endif /* NFSCLIENT */ @@ -2101,7 +2206,7 @@ nfsm_chain_add_wcc_data_f( } nfsm_chain_add_postop_attr(error, nd, nmc, postattrerr, postvap); - return (error); + return error; } /* @@ -2118,38 +2223,43 @@ nfsm_chain_get_path_namei( int error = 0; char *cp; - if (len > (MAXPATHLEN - 1)) - return (ENAMETOOLONG); + if (len > (MAXPATHLEN - 1)) { + return ENAMETOOLONG; + } /* * Get a buffer for the name to be translated, and copy the * name into the buffer. */ MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!cnp->cn_pnbuf) - return (ENOMEM); + if (!cnp->cn_pnbuf) { + return ENOMEM; + } cnp->cn_pnlen = MAXPATHLEN; cnp->cn_flags |= HASBUF; /* Copy the name from the mbuf list to the string */ cp = cnp->cn_pnbuf; nfsm_chain_get_opaque(error, nmc, len, cp); - if (error) + if (error) { goto out; + } cnp->cn_pnbuf[len] = '\0'; /* sanity check the string */ - if ((strlen(cp) != len) || strchr(cp, '/')) + if ((strlen(cp) != len) || strchr(cp, '/')) { error = EACCES; + } out: if (error) { - if (cnp->cn_pnbuf) + if (cnp->cn_pnbuf) { FREE_ZONE(cnp->cn_pnbuf, MAXPATHLEN, M_NAMEI); + } cnp->cn_flags &= ~HASBUF; } else { nip->ni_pathlen = len; } - return (error); + return error; } /* @@ -2177,8 +2287,9 @@ nfsrv_namei( * Extract and set starting directory. */ error = nfsrv_fhtovp(nfhp, nd, &dp, nxp, nxop); - if (error) + if (error) { goto out; + } error = nfsrv_credcheck(nd, ctx, *nxp, *nxop); if (error || (vnode_vtype(dp) != VDIR)) { vnode_put(dp); @@ -2189,8 +2300,9 @@ nfsrv_namei( nip->ni_cnd.cn_context = ctx; - if (*nxop && ((*nxop)->nxo_flags & NX_READONLY)) + if (*nxop && ((*nxop)->nxo_flags & NX_READONLY)) { cnp->cn_flags |= RDONLY; + } cnp->cn_flags |= NOCROSSMOUNT; cnp->cn_nameptr = cnp->cn_pnbuf; @@ -2206,13 +2318,15 @@ nfsrv_namei( cnp->cn_nameptr = cnp->cn_pnbuf; nip->ni_usedvp = nip->ni_dvp = nip->ni_startdir = dp; } - if (error) + if (error) { goto out; + } /* Check for encountering a symbolic link */ if (cnp->cn_flags & ISSYMLINK) { - if (cnp->cn_flags & (LOCKPARENT | WANTPARENT)) + if (cnp->cn_flags & (LOCKPARENT | WANTPARENT)) { vnode_put(nip->ni_dvp); + } if (nip->ni_vp) { vnode_put(nip->ni_vp); nip->ni_vp = NULL; @@ -2226,7 +2340,7 @@ out: cnp->cn_flags &= ~HASBUF; FREE_ZONE(tmppn, cnp->cn_pnlen, M_NAMEI); } - return (error); + return error; } /* @@ -2253,8 +2367,9 @@ nfsm_adj(mbuf_t mp, int len, int nul) mlen = mbuf_len(m); count += mlen; mnext = mbuf_next(m); - if (mnext == NULL) + if (mnext == NULL) { break; + } m = mnext; } if (mlen > len) { @@ -2262,14 +2377,16 @@ nfsm_adj(mbuf_t mp, int len, int nul) mbuf_setlen(m, mlen); if (nul > 0) { cp = (caddr_t)mbuf_data(m) + mlen - nul; - for (i = 0; i < nul; i++) + for (i = 0; i < nul; i++) { *cp++ = '\0'; + } } return; } count -= len; - if (count < 0) + if (count < 0) { count = 0; + } /* * Correct length for chain is "count". * Find the mbuf with last data, adjust its length, @@ -2282,15 +2399,17 @@ nfsm_adj(mbuf_t mp, int len, int nul) mbuf_setlen(m, count); if (nul > 0) { cp = (caddr_t)mbuf_data(m) + mlen - nul; - for (i = 0; i < nul; i++) + for (i = 0; i < nul; i++) { *cp++ = '\0'; + } } break; } count -= mlen; } - for (m = mbuf_next(m); m; m = mbuf_next(m)) + for (m = mbuf_next(m); m; m = mbuf_next(m)) { mbuf_setlen(m, 0); + } } /* @@ -2304,14 +2423,17 @@ nfsm_chain_trim_data(struct nfsm_chain *nmc, int len, int *mlen) caddr_t data; mbuf_t m; - if (mlen) + if (mlen) { *mlen = 0; + } /* trim header */ - for (m = nmc->nmc_mhead; m && (m != nmc->nmc_mcur); m = mbuf_next(m)) + for (m = nmc->nmc_mhead; m && (m != nmc->nmc_mcur); m = mbuf_next(m)) { mbuf_setlen(m, 0); - if (!m) - return (EIO); + } + if (!m) { + return EIO; + } /* trim current mbuf */ data = mbuf_data(m); @@ -2319,10 +2441,12 @@ nfsm_chain_trim_data(struct nfsm_chain *nmc, int len, int *mlen) adjust = nmc->nmc_ptr - data; dlen -= adjust; if ((dlen > 0) && (adjust > 0)) { - if (mbuf_setdata(m, nmc->nmc_ptr, dlen)) - return(EIO); - } else + if (mbuf_setdata(m, nmc->nmc_ptr, dlen)) { + return EIO; + } + } else { mbuf_setlen(m, dlen); + } /* skip next len bytes */ for (; m && (cnt < len); m = mbuf_next(m)) { @@ -2331,21 +2455,25 @@ nfsm_chain_trim_data(struct nfsm_chain *nmc, int len, int *mlen) if (cnt > len) { /* truncate to end of data */ mbuf_setlen(m, dlen - (cnt - len)); - if (m == nmc->nmc_mcur) + if (m == nmc->nmc_mcur) { nmc->nmc_left -= (cnt - len); + } cnt = len; } } - if (mlen) + if (mlen) { *mlen = cnt; + } /* trim any trailing data */ - if (m == nmc->nmc_mcur) + if (m == nmc->nmc_mcur) { nmc->nmc_left = 0; - for (; m; m = mbuf_next(m)) + } + for (; m; m = mbuf_next(m)) { mbuf_setlen(m, 0); + } - return (0); + return 0; } int @@ -2377,10 +2505,11 @@ nfsm_chain_add_fattr( } else { nfsm_chain_add_32(error, nmc, vap->va_data_size); nfsm_chain_add_32(error, nmc, NFS_FABLKSIZE); - if (vap->va_type == VFIFO) + if (vap->va_type == VFIFO) { nfsm_chain_add_32(error, nmc, 0xffffffff); - else + } else { nfsm_chain_add_32(error, nmc, vap->va_rdev); + } nfsm_chain_add_32(error, nmc, vap->va_data_alloc / NFS_FABLKSIZE); nfsm_chain_add_32(error, nmc, vap->va_fsid); nfsm_chain_add_32(error, nmc, vap->va_fileid); @@ -2389,7 +2518,7 @@ nfsm_chain_add_fattr( nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_modify_time); nfsm_chain_add_time(error, nmc, nd->nd_vers, &vap->va_change_time); - return (error); + return error; } int @@ -2417,27 +2546,32 @@ nfsm_chain_get_sattr( VATTR_CLEAR_ACTIVE(vap, va_type); } nfsm_chain_get_32(error, nmc, val); - if (val != (uint32_t)-1) + if (val != (uint32_t)-1) { VATTR_SET(vap, va_uid, val); + } nfsm_chain_get_32(error, nmc, val); - if (val != (uint32_t)-1) + if (val != (uint32_t)-1) { VATTR_SET(vap, va_gid, val); + } /* save the "size" bits for NFSv2 create (even if they appear unset) */ nfsm_chain_get_32(error, nmc, val); VATTR_SET(vap, va_data_size, val); - if (val == (uint32_t)-1) + if (val == (uint32_t)-1) { VATTR_CLEAR_ACTIVE(vap, va_data_size); + } nfsm_chain_get_time(error, nmc, NFS_VER2, - vap->va_access_time.tv_sec, - vap->va_access_time.tv_nsec); - if (vap->va_access_time.tv_sec != -1) + vap->va_access_time.tv_sec, + vap->va_access_time.tv_nsec); + if (vap->va_access_time.tv_sec != -1) { VATTR_SET_ACTIVE(vap, va_access_time); + } nfsm_chain_get_time(error, nmc, NFS_VER2, - vap->va_modify_time.tv_sec, - vap->va_modify_time.tv_nsec); - if (vap->va_modify_time.tv_sec != -1) + vap->va_modify_time.tv_sec, + vap->va_modify_time.tv_nsec); + if (vap->va_modify_time.tv_sec != -1) { VATTR_SET_ACTIVE(vap, va_modify_time); - return (error); + } + return error; } /* NFSv3 */ @@ -2466,8 +2600,8 @@ nfsm_chain_get_sattr( switch (val) { case NFS_TIME_SET_TO_CLIENT: nfsm_chain_get_time(error, nmc, nd->nd_vers, - vap->va_access_time.tv_sec, - vap->va_access_time.tv_nsec); + vap->va_access_time.tv_sec, + vap->va_access_time.tv_nsec); VATTR_SET_ACTIVE(vap, va_access_time); vap->va_vaflags &= ~VA_UTIMES_NULL; break; @@ -2480,19 +2614,20 @@ nfsm_chain_get_sattr( switch (val) { case NFS_TIME_SET_TO_CLIENT: nfsm_chain_get_time(error, nmc, nd->nd_vers, - vap->va_modify_time.tv_sec, - vap->va_modify_time.tv_nsec); + vap->va_modify_time.tv_sec, + vap->va_modify_time.tv_nsec); VATTR_SET_ACTIVE(vap, va_modify_time); vap->va_vaflags &= ~VA_UTIMES_NULL; break; case NFS_TIME_SET_TO_SERVER: VATTR_SET(vap, va_modify_time, now); - if (!VATTR_IS_ACTIVE(vap, va_access_time)) + if (!VATTR_IS_ACTIVE(vap, va_access_time)) { vap->va_vaflags |= VA_UTIMES_NULL; + } break; } - return (error); + return error; } /* @@ -2503,11 +2638,14 @@ nfsrv_cmp_secflavs(struct nfs_sec *sf1, struct nfs_sec *sf2) { int i; - if (sf1->count != sf2->count) + if (sf1->count != sf2->count) { return 1; - for (i = 0; i < sf1->count; i++) - if (sf1->flavors[i] != sf2->flavors[i]) + } + for (i = 0; i < sf1->count; i++) { + if (sf1->flavors[i] != sf2->flavors[i]) { return 1; + } + } return 0; } @@ -2532,25 +2670,29 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) uaddr = unxa->nxa_nets; for (net = 0; net < unxa->nxa_netcount; net++, uaddr += sizeof(nxna)) { error = copyin(uaddr, &nxna, sizeof(nxna)); - if (error) - return (error); + if (error) { + return error; + } if (nxna.nxna_addr.ss_len > sizeof(struct sockaddr_storage) || nxna.nxna_mask.ss_len > sizeof(struct sockaddr_storage) || nxna.nxna_addr.ss_family > AF_MAX || - nxna.nxna_mask.ss_family > AF_MAX) - return (EINVAL); + nxna.nxna_mask.ss_family > AF_MAX) { + return EINVAL; + } - if (nxna.nxna_flags & (NX_MAPROOT|NX_MAPALL)) { + if (nxna.nxna_flags & (NX_MAPROOT | NX_MAPALL)) { struct posix_cred temp_pcred; - bzero(&temp_pcred, sizeof(temp_pcred)); + bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_uid = nxna.nxna_cred.cr_uid; temp_pcred.cr_ngroups = nxna.nxna_cred.cr_ngroups; - for (i=0; i < nxna.nxna_cred.cr_ngroups && i < NGROUPS; i++) + for (i = 0; i < nxna.nxna_cred.cr_ngroups && i < NGROUPS; i++) { temp_pcred.cr_groups[i] = nxna.nxna_cred.cr_groups[i]; + } cred = posix_cred_create(&temp_pcred); - if (!IS_VALID_CRED(cred)) - return (ENOMEM); + if (!IS_VALID_CRED(cred)) { + return ENOMEM; + } } else { cred = NOCRED; } @@ -2558,9 +2700,10 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) if (nxna.nxna_addr.ss_len == 0) { /* No address means this is a default/world export */ if (nx->nx_flags & NX_DEFAULTEXPORT) { - if (IS_VALID_CRED(cred)) - kauth_cred_unref(&cred); - return (EEXIST); + if (IS_VALID_CRED(cred)) { + kauth_cred_unref(&cred); + } + return EEXIST; } nx->nx_flags |= NX_DEFAULTEXPORT; nx->nx_defopt.nxo_flags = nxna.nxna_flags; @@ -2574,9 +2717,10 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) i += nxna.nxna_addr.ss_len + nxna.nxna_mask.ss_len; MALLOC(no, struct nfs_netopt *, i, M_NETADDR, M_WAITOK); if (!no) { - if (IS_VALID_CRED(cred)) + if (IS_VALID_CRED(cred)) { kauth_cred_unref(&cred); - return (ENOMEM); + } + return ENOMEM; } bzero(no, sizeof(struct nfs_netopt)); no->no_opt.nxo_flags = nxna.nxna_flags; @@ -2600,15 +2744,16 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) TAILQ_FOREACH(dom, &domains, dom_entry) { if (dom->dom_family == i && dom->dom_rtattach) { dom->dom_rtattach((void **)&nx->nx_rtable[i], - dom->dom_rtoffset); + dom->dom_rtoffset); break; } } if ((rnh = nx->nx_rtable[i]) == 0) { - if (IS_VALID_CRED(cred)) - kauth_cred_unref(&cred); + if (IS_VALID_CRED(cred)) { + kauth_cred_unref(&cred); + } _FREE(no, M_NETADDR); - return (ENOBUFS); + return ENOBUFS; } } rn = (*rnh->rnh_addaddr)((caddr_t)saddr, (caddr_t)smask, rnh, no->no_rnodes); @@ -2632,46 +2777,51 @@ nfsrv_hang_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) /* creds are same (or both NULL) */ matched = 1; } else if (cred && cred2 && (kauth_cred_getuid(cred) == kauth_cred_getuid(cred2))) { - /* - * Now compare the effective and - * supplementary groups... - * - * Note: This comparison, as written, - * does not correctly indicate that - * the groups are equivalent, since - * other than the first supplementary - * group, which is also the effective - * group, order on the remaining groups - * doesn't matter, and this is an - * ordered compare. - */ - gid_t groups[NGROUPS]; - gid_t groups2[NGROUPS]; - int groupcount = NGROUPS; - int group2count = NGROUPS; - - if (!kauth_cred_getgroups(cred, groups, &groupcount) && - !kauth_cred_getgroups(cred2, groups2, &group2count) && - groupcount == group2count) { - for (i=0; i < group2count; i++) - if (groups[i] != groups2[i]) - break; - if (i >= group2count || i >= NGROUPS) - matched = 1; - } + /* + * Now compare the effective and + * supplementary groups... + * + * Note: This comparison, as written, + * does not correctly indicate that + * the groups are equivalent, since + * other than the first supplementary + * group, which is also the effective + * group, order on the remaining groups + * doesn't matter, and this is an + * ordered compare. + */ + gid_t groups[NGROUPS]; + gid_t groups2[NGROUPS]; + int groupcount = NGROUPS; + int group2count = NGROUPS; + + if (!kauth_cred_getgroups(cred, groups, &groupcount) && + !kauth_cred_getgroups(cred2, groups2, &group2count) && + groupcount == group2count) { + for (i = 0; i < group2count; i++) { + if (groups[i] != groups2[i]) { + break; + } + } + if (i >= group2count || i >= NGROUPS) { + matched = 1; + } + } } } - if (IS_VALID_CRED(cred)) - kauth_cred_unref(&cred); + if (IS_VALID_CRED(cred)) { + kauth_cred_unref(&cred); + } _FREE(no, M_NETADDR); - if (matched) + if (matched) { continue; - return (EPERM); + } + return EPERM; } nx->nx_expcnt++; } - return (0); + return 0; } /* @@ -2693,11 +2843,12 @@ nfsrv_free_netopt(struct radix_node *rn, void *w) struct nfs_netopt *nno = (struct nfs_netopt *)rn; (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); - if (IS_VALID_CRED(nno->no_opt.nxo_cred)) + if (IS_VALID_CRED(nno->no_opt.nxo_cred)) { kauth_cred_unref(&nno->no_opt.nxo_cred); + } _FREE((caddr_t)rn, M_NETADDR); *cnt -= 1; - return (0); + return 0; } /* @@ -2717,23 +2868,25 @@ nfsrv_free_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) if (!unxa || !unxa->nxa_netcount) { /* delete everything */ - for (i = 0; i <= AF_MAX; i++) - if ( (rnh = nx->nx_rtable[i]) ) { + for (i = 0; i <= AF_MAX; i++) { + if ((rnh = nx->nx_rtable[i])) { fna.rnh = rnh; fna.cnt = &nx->nx_expcnt; (*rnh->rnh_walktree)(rnh, nfsrv_free_netopt, (caddr_t)&fna); _FREE((caddr_t)rnh, M_RTABLE); nx->nx_rtable[i] = 0; } - return (0); + } + return 0; } /* delete only the exports specified */ uaddr = unxa->nxa_nets; for (net = 0; net < unxa->nxa_netcount; net++, uaddr += sizeof(nxna)) { error = copyin(uaddr, &nxna, sizeof(nxna)); - if (error) - return (error); + if (error) { + return error; + } if (nxna.nxna_addr.ss_len == 0) { /* No address means this is a default/world export */ @@ -2749,23 +2902,26 @@ nfsrv_free_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) if ((rnh = nx->nx_rtable[nxna.nxna_addr.ss_family]) == 0) { /* AF not initialized? */ - if (!(unxa->nxa_flags & NXA_ADD)) + if (!(unxa->nxa_flags & NXA_ADD)) { printf("nfsrv_free_addrlist: address not found (0)\n"); + } continue; } rn = (*rnh->rnh_lookup)(&nxna.nxna_addr, - nxna.nxna_mask.ss_len ? &nxna.nxna_mask : NULL, rnh); + nxna.nxna_mask.ss_len ? &nxna.nxna_mask : NULL, rnh); if (!rn || (rn->rn_flags & RNF_ROOT)) { - if (!(unxa->nxa_flags & NXA_ADD)) + if (!(unxa->nxa_flags & NXA_ADD)) { printf("nfsrv_free_addrlist: address not found (1)\n"); + } continue; } (*rnh->rnh_deladdr)(rn->rn_key, rn->rn_mask, rnh); nno = (struct nfs_netopt *)rn; - if (IS_VALID_CRED(nno->no_opt.nxo_cred)) + if (IS_VALID_CRED(nno->no_opt.nxo_cred)) { kauth_cred_unref(&nno->no_opt.nxo_cred); + } _FREE((caddr_t)rn, M_NETADDR); nx->nx_expcnt--; @@ -2776,7 +2932,7 @@ nfsrv_free_addrlist(struct nfs_export *nx, struct user_nfs_export_args *unxa) } } - return (0); + return 0; } void enablequotas(struct mount *mp, vfs_context_t ctx); // XXX @@ -2798,35 +2954,41 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) if (unxa->nxa_flags == NXA_CHECK) { /* just check if the path is an NFS-exportable file system */ error = copyinstr(unxa->nxa_fspath, path, MAXPATHLEN, &pathlen); - if (error) - return (error); + if (error) { + return error; + } NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, - UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); + UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); error = namei(&mnd); - if (error) - return (error); + if (error) { + return error; + } mvp = mnd.ni_vp; mp = vnode_mount(mvp); /* make sure it's the root of a file system */ - if (!vnode_isvroot(mvp)) + if (!vnode_isvroot(mvp)) { error = EINVAL; + } /* make sure the file system is NFS-exportable */ if (!error) { nfh.nfh_len = NFSV3_MAX_FID_SIZE; error = VFS_VPTOFH(mvp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL); } - if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) + if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) { error = EIO; - if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED)) + } + if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED)) { error = EISDIR; + } vnode_put(mvp); nameidone(&mnd); - return (error); + return error; } /* all other operations: must be super user */ - if ((error = vfs_context_suser(ctx))) - return (error); + if ((error = vfs_context_suser(ctx))) { + return error; + } if (unxa->nxa_flags & NXA_DELETE_ALL) { /* delete all exports on all file systems */ @@ -2863,26 +3025,29 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) nfsrv_export_hashtbl = NULL; } lck_rw_done(&nfsrv_export_rwlock); - return (0); + return 0; } error = copyinstr(unxa->nxa_fspath, path, MAXPATHLEN, &pathlen); - if (error) - return (error); + if (error) { + return error; + } lck_rw_lock_exclusive(&nfsrv_export_rwlock); /* init export hash table if not already */ if (!nfsrv_export_hashtbl) { - if (nfsrv_export_hash_size <= 0) + if (nfsrv_export_hash_size <= 0) { nfsrv_export_hash_size = NFSRVEXPHASHSZ; + } nfsrv_export_hashtbl = hashinit(nfsrv_export_hash_size, M_TEMP, &nfsrv_export_hash); } // first check if we've already got an exportfs with the given ID LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) { - if (nxfs->nxfs_id == unxa->nxa_fsid) + if (nxfs->nxfs_id == unxa->nxa_fsid) { break; + } } if (nxfs) { /* verify exported FS path matches given path */ @@ -2890,7 +3055,7 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) error = EEXIST; goto unlock_out; } - if ((unxa->nxa_flags & (NXA_ADD|NXA_OFFLINE)) == NXA_ADD) { + if ((unxa->nxa_flags & (NXA_ADD | NXA_OFFLINE)) == NXA_ADD) { /* if adding, verify that the mount is still what we expect */ mp = vfs_getvfs_by_mntonname(nxfs->nxfs_path); if (mp) { @@ -2899,10 +3064,11 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) } /* find exported FS root vnode */ NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, - UIO_SYSSPACE, CAST_USER_ADDR_T(nxfs->nxfs_path), ctx); + UIO_SYSSPACE, CAST_USER_ADDR_T(nxfs->nxfs_path), ctx); error = namei(&mnd); - if (error) + if (error) { goto unlock_out; + } mvp = mnd.ni_vp; /* make sure it's (still) the root of a file system */ if (!vnode_isvroot(mvp)) { @@ -2924,11 +3090,12 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) /* find exported FS root vnode */ NDINIT(&mnd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, - UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); + UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); error = namei(&mnd); if (error) { - if (!(unxa->nxa_flags & NXA_OFFLINE)) + if (!(unxa->nxa_flags & NXA_OFFLINE)) { goto unlock_out; + } } else { mvp = mnd.ni_vp; /* make sure it's the root of a file system */ @@ -2948,12 +3115,15 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) /* make sure the file system is NFS-exportable */ nfh.nfh_len = NFSV3_MAX_FID_SIZE; error = VFS_VPTOFH(mvp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL); - if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) + if (!error && (nfh.nfh_len > (int)NFSV3_MAX_FID_SIZE)) { error = EIO; - if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED)) + } + if (!error && !(mp->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED)) { error = EISDIR; - if (error) + } + if (error) { goto out; + } } } @@ -2975,29 +3145,34 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) /* insert into list in reverse-sorted order */ nxfs3 = NULL; LIST_FOREACH(nxfs2, &nfsrv_exports, nxfs_next) { - if (strncmp(nxfs->nxfs_path, nxfs2->nxfs_path, MAXPATHLEN) > 0) + if (strncmp(nxfs->nxfs_path, nxfs2->nxfs_path, MAXPATHLEN) > 0) { break; + } nxfs3 = nxfs2; } - if (nxfs2) + if (nxfs2) { LIST_INSERT_BEFORE(nxfs2, nxfs, nxfs_next); - else if (nxfs3) + } else if (nxfs3) { LIST_INSERT_AFTER(nxfs3, nxfs, nxfs_next); - else + } else { LIST_INSERT_HEAD(&nfsrv_exports, nxfs, nxfs_next); + } /* make sure any quotas are enabled before we export the file system */ - if (mp) + if (mp) { enablequotas(mp, ctx); + } } if (unxa->nxa_exppath) { error = copyinstr(unxa->nxa_exppath, path, MAXPATHLEN, &pathlen); - if (error) + if (error) { goto out; + } LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) { - if (nx->nx_id == unxa->nxa_expid) + if (nx->nx_id == unxa->nxa_expid) { break; + } } if (nx) { /* verify exported FS path matches given path */ @@ -3034,16 +3209,18 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) /* insert into list in reverse-sorted order */ nx3 = NULL; LIST_FOREACH(nx2, &nxfs->nxfs_exports, nx_next) { - if (strncmp(nx->nx_path, nx2->nx_path, MAXPATHLEN) > 0) + if (strncmp(nx->nx_path, nx2->nx_path, MAXPATHLEN) > 0) { break; + } nx3 = nx2; } - if (nx2) + if (nx2) { LIST_INSERT_BEFORE(nx2, nx, nx_next); - else if (nx3) + } else if (nx3) { LIST_INSERT_AFTER(nx3, nx, nx_next); - else + } else { LIST_INSERT_HEAD(&nxfs->nxfs_exports, nx, nx_next); + } /* insert into hash */ LIST_INSERT_HEAD(NFSRVEXPHASH(nxfs->nxfs_id, nx->nx_id), nx, nx_hash); @@ -3054,25 +3231,29 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) */ error = 0; if ((nx3 && !strncmp(nx3->nx_path, nx->nx_path, pathlen - 1) && - (nx3->nx_path[pathlen-1] == '/')) || + (nx3->nx_path[pathlen - 1] == '/')) || (nx2 && !strncmp(nx2->nx_path, nx->nx_path, strlen(nx2->nx_path)) && - (nx->nx_path[strlen(nx2->nx_path)] == '/'))) + (nx->nx_path[strlen(nx2->nx_path)] == '/'))) { error = EINVAL; + } if (!error) { /* check export conflict with fs root export and vice versa */ expisroot = !nx->nx_path[0] || - ((nx->nx_path[0] == '.') && !nx->nx_path[1]); + ((nx->nx_path[0] == '.') && !nx->nx_path[1]); LIST_FOREACH(nx2, &nxfs->nxfs_exports, nx_next) { if (expisroot) { - if (nx2 != nx) + if (nx2 != nx) { break; - } else if (!nx2->nx_path[0]) + } + } else if (!nx2->nx_path[0]) { break; - else if ((nx2->nx_path[0] == '.') && !nx2->nx_path[1]) + } else if ((nx2->nx_path[0] == '.') && !nx2->nx_path[1]) { break; + } } - if (nx2) + if (nx2) { error = EINVAL; + } } if (error) { /* @@ -3085,7 +3266,7 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) * complexity of mountd for very little overall benefit. */ printf("nfsrv_export: warning: nested exports: %s/%s\n", - nxfs->nxfs_path, nx->nx_path); + nxfs->nxfs_path, nx->nx_path); error = 0; } nx->nx_fh.nfh_xh.nxh_flags = NXHF_INVALIDFH; @@ -3124,8 +3305,9 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) xnd.ni_cnd.cn_nameptr = xnd.ni_cnd.cn_pnbuf; xnd.ni_usedvp = xnd.ni_dvp = xnd.ni_startdir = mvp; } - if (error) + if (error) { goto out1; + } xvp = xnd.ni_vp; } @@ -3146,8 +3328,9 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) } vnode_put(xvp); - if (error) + if (error) { goto out1; + } } else { nx->nx_fh.nfh_xh.nxh_flags = NXHF_INVALIDFH; nx->nx_fh.nfh_xh.nxh_fidlen = 0; @@ -3187,22 +3370,25 @@ nfsrv_export(struct user_nfs_export_args *unxa, vfs_context_t ctx) } else { /* delete only the netopts for the given addresses */ error = nfsrv_free_addrlist(nx, unxa); - if (error) + if (error) { goto out1; + } } } if (unxa->nxa_flags & NXA_ADD) { - /* + /* * If going offline set the export time so that when * coming back on line we will present a new write verifier * to the client. */ - if (unxa->nxa_flags & NXA_OFFLINE) + if (unxa->nxa_flags & NXA_OFFLINE) { microtime(&nx->nx_exptime); + } error = nfsrv_hang_addrlist(nx, unxa); - if (!error && mp) + if (!error && mp) { vfs_setflags(mp, MNT_EXPORTED); + } } out1: @@ -3220,8 +3406,9 @@ out1: LIST_REMOVE(nxfs, nxfs_next); FREE(nxfs->nxfs_path, M_TEMP); FREE(nxfs, M_TEMP); - if (mp) + if (mp) { vfs_clearflags(mp, MNT_EXPORTED); + } } out: @@ -3230,10 +3417,11 @@ out: nameidone(&mnd); } unlock_out: - if (mp) + if (mp) { mount_drop(mp, 0); + } lck_rw_done(&nfsrv_export_rwlock); - return (error); + return error; } /* @@ -3245,27 +3433,30 @@ unlock_out: int nfsrv_check_exports_allow_address(mbuf_t nam) { - struct nfs_exportfs *nxfs; - struct nfs_export *nx; - struct nfs_export_options *nxo = NULL; + struct nfs_exportfs *nxfs; + struct nfs_export *nx; + struct nfs_export_options *nxo = NULL; - if (nam == NULL) - return (EACCES); + if (nam == NULL) { + return EACCES; + } lck_rw_lock_shared(&nfsrv_export_rwlock); LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) { LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) { /* A little optimizing by checking for the default first */ - if (nx->nx_flags & NX_DEFAULTEXPORT) + if (nx->nx_flags & NX_DEFAULTEXPORT) { nxo = &nx->nx_defopt; - if (nxo || (nxo = nfsrv_export_lookup(nx, nam))) + } + if (nxo || (nxo = nfsrv_export_lookup(nx, nam))) { goto found; + } } } found: lck_rw_done(&nfsrv_export_rwlock); - return (nxo ? 0 : EACCES); + return nxo ? 0 : EACCES; } struct nfs_export_options * @@ -3281,22 +3472,25 @@ nfsrv_export_lookup(struct nfs_export *nx, mbuf_t nam) saddr = mbuf_data(nam); if (saddr->sa_family > AF_MAX) { /* Bogus sockaddr? Don't match anything. */ - return (NULL); + return NULL; } rnh = nx->nx_rtable[saddr->sa_family]; if (rnh != NULL) { no = (struct nfs_netopt *) - (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh); - if (no && no->no_rnodes->rn_flags & RNF_ROOT) + (*rnh->rnh_matchaddr)((caddr_t)saddr, rnh); + if (no && no->no_rnodes->rn_flags & RNF_ROOT) { no = NULL; - if (no) + } + if (no) { nxo = &no->no_opt; + } } } /* If no address match, use the default if it exists. */ - if ((nxo == NULL) && (nx->nx_flags & NX_DEFAULTEXPORT)) + if ((nxo == NULL) && (nx->nx_flags & NX_DEFAULTEXPORT)) { nxo = &nx->nx_defopt; - return (nxo); + } + return nxo; } /* find an export for the given handle */ @@ -3307,19 +3501,22 @@ nfsrv_fhtoexport(struct nfs_filehandle *nfhp) struct nfs_export *nx; uint32_t fsid, expid; - if (!nfsrv_export_hashtbl) - return (NULL); + if (!nfsrv_export_hashtbl) { + return NULL; + } fsid = ntohl(nxh->nxh_fsid); expid = ntohl(nxh->nxh_expid); nx = NFSRVEXPHASH(fsid, expid)->lh_first; for (; nx; nx = LIST_NEXT(nx, nx_hash)) { - if (nx->nx_fs->nxfs_id != fsid) + if (nx->nx_fs->nxfs_id != fsid) { continue; - if (nx->nx_id != expid) + } + if (nx->nx_id != expid) { continue; + } break; } - return (nx); + return nx; } /* @@ -3346,30 +3543,36 @@ nfsrv_fhtovp( *nxp = NULL; *nxop = NULL; - if (nd != NULL) + if (nd != NULL) { nam = nd->nd_nam; + } v = ntohl(nxh->nxh_version); if (v != NFS_FH_VERSION) { /* file handle format not supported */ - return (ESTALE); + return ESTALE; + } + if (nfhp->nfh_len > NFSV3_MAX_FH_SIZE) { + return EBADRPC; + } + if (nfhp->nfh_len < (int)sizeof(struct nfs_exphandle)) { + return ESTALE; } - if (nfhp->nfh_len > NFSV3_MAX_FH_SIZE) - return (EBADRPC); - if (nfhp->nfh_len < (int)sizeof(struct nfs_exphandle)) - return (ESTALE); v = ntohs(nxh->nxh_flags); - if (v & NXHF_INVALIDFH) - return (ESTALE); + if (v & NXHF_INVALIDFH) { + return ESTALE; + } *nxp = nfsrv_fhtoexport(nfhp); - if (!*nxp) - return (ESTALE); + if (!*nxp) { + return ESTALE; + } /* Get the export option structure for this tuple. */ *nxop = nxo = nfsrv_export_lookup(*nxp, nam); - if (nam && (*nxop == NULL)) - return (EACCES); + if (nam && (*nxop == NULL)) { + return EACCES; + } if (nd != NULL) { /* Validate the security flavor of the request */ @@ -3386,47 +3589,56 @@ nfsrv_fhtovp( * This allows an unauthenticated superuser on the client * to do mounts for the benefit of authenticated users. */ - if (nd->nd_vers == NFS_VER2) + if (nd->nd_vers == NFS_VER2) { if (nd->nd_procnum == NFSV2PROC_GETATTR || - nd->nd_procnum == NFSV2PROC_STATFS) + nd->nd_procnum == NFSV2PROC_STATFS) { valid = 1; - if (nd->nd_vers == NFS_VER3) - if (nd->nd_procnum == NFSPROC_FSINFO) + } + } + if (nd->nd_vers == NFS_VER3) { + if (nd->nd_procnum == NFSPROC_FSINFO) { valid = 1; + } + } - if (!valid) - return (NFSERR_AUTHERR | AUTH_REJECTCRED); + if (!valid) { + return NFSERR_AUTHERR | AUTH_REJECTCRED; + } } } - if (nxo && (nxo->nxo_flags & NX_OFFLINE)) - return ((nd == NULL || nd->nd_vers == NFS_VER2) ? ESTALE : NFSERR_TRYLATER); + if (nxo && (nxo->nxo_flags & NX_OFFLINE)) { + return (nd == NULL || nd->nd_vers == NFS_VER2) ? ESTALE : NFSERR_TRYLATER; + } /* find mount structure */ mp = vfs_getvfs_by_mntonname((*nxp)->nx_fs->nxfs_path); if (mp) { error = vfs_busy(mp, LK_NOWAIT); mount_iterdrop(mp); - if (error) + if (error) { mp = NULL; + } } if (!mp) { /* * We have an export, but no mount? * Perhaps the export just hasn't been marked offline yet. */ - return ((nd == NULL || nd->nd_vers == NFS_VER2) ? ESTALE : NFSERR_TRYLATER); + return (nd == NULL || nd->nd_vers == NFS_VER2) ? ESTALE : NFSERR_TRYLATER; } fidp = nfhp->nfh_fhp + sizeof(*nxh); error = VFS_FHTOVP(mp, nxh->nxh_fidlen, fidp, vpp, NULL); vfs_unbusy(mp); - if (error) - return (error); + if (error) { + return error; + } /* vnode pointer should be good at this point or ... */ - if (*vpp == NULL) - return (ESTALE); - return (0); + if (*vpp == NULL) { + return ESTALE; + } + return 0; } /* @@ -3449,7 +3661,7 @@ nfsrv_credcheck( } } ctx->vc_ucred = nd->nd_cr; - return (0); + return 0; } /* @@ -3479,37 +3691,43 @@ nfsrv_vptofh( nfhp->nfh_xh.nxh_flags = 0; nfhp->nfh_xh.nxh_reserved = 0; - if (nfsvers == NFS_VER2) + if (nfsvers == NFS_VER2) { bzero(&nfhp->nfh_fid[0], NFSV2_MAX_FID_SIZE); + } /* if directory FH matches export root, return invalid FH */ if (dnfhp && nfsrv_fhmatch(dnfhp, &nx->nx_fh)) { - if (nfsvers == NFS_VER2) + if (nfsvers == NFS_VER2) { nfhp->nfh_len = NFSX_V2FH; - else + } else { nfhp->nfh_len = sizeof(nfhp->nfh_xh); + } nfhp->nfh_xh.nxh_fidlen = 0; nfhp->nfh_xh.nxh_flags = htons(NXHF_INVALIDFH); - return (0); + return 0; } - if (nfsvers == NFS_VER2) + if (nfsvers == NFS_VER2) { maxfidsize = NFSV2_MAX_FID_SIZE; - else + } else { maxfidsize = NFSV3_MAX_FID_SIZE; + } nfhp->nfh_len = maxfidsize; error = VFS_VPTOFH(vp, (int*)&nfhp->nfh_len, &nfhp->nfh_fid[0], ctx); - if (error) - return (error); - if (nfhp->nfh_len > maxfidsize) - return (EOVERFLOW); + if (error) { + return error; + } + if (nfhp->nfh_len > maxfidsize) { + return EOVERFLOW; + } nfhp->nfh_xh.nxh_fidlen = nfhp->nfh_len; nfhp->nfh_len += sizeof(nfhp->nfh_xh); - if ((nfsvers == NFS_VER2) && (nfhp->nfh_len < NFSX_V2FH)) + if ((nfsvers == NFS_VER2) && (nfhp->nfh_len < NFSX_V2FH)) { nfhp->nfh_len = NFSX_V2FH; + } - return (0); + return 0; } /* @@ -3527,11 +3745,13 @@ nfsrv_fhmatch(struct nfs_filehandle *fh1, struct nfs_filehandle *fh2) nxh2 = (struct nfs_exphandle *)fh2->nfh_fhp; len1 = sizeof(fh1->nfh_xh) + nxh1->nxh_fidlen; len2 = sizeof(fh2->nfh_xh) + nxh2->nxh_fidlen; - if (len1 != len2) - return (0); - if (bcmp(nxh1, nxh2, len1)) - return (0); - return (1); + if (len1 != len2) { + return 0; + } + if (bcmp(nxh1, nxh2, len1)) { + return 0; + } + return 1; } /* @@ -3550,9 +3770,9 @@ nfsrv_fhmatch(struct nfs_filehandle *fh1, struct nfs_filehandle *fh2) struct nfs_user_stat_node * nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *saddr, uid_t uid) { - struct nfs_user_stat_node *unode; - struct timeval now; - struct nfs_user_stat_hashtbl_head *head; + struct nfs_user_stat_node *unode; + struct timeval now; + struct nfs_user_stat_hashtbl_head *head; /* seach the hash table */ head = NFS_USER_STAT_HASH(list->user_hashtbl, uid); @@ -3577,10 +3797,11 @@ nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *sad if (list->node_count < nfsrv_user_stat_max_nodes) { /* Allocate a new node */ MALLOC(unode, struct nfs_user_stat_node *, sizeof(struct nfs_user_stat_node), - M_TEMP, M_WAITOK | M_ZERO); + M_TEMP, M_WAITOK | M_ZERO); - if (!unode) + if (!unode) { return NULL; + } /* increment node count */ OSAddAtomic(1, &nfsrv_user_stat_node_count); @@ -3589,8 +3810,9 @@ nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *sad /* reuse the oldest node in the lru list */ unode = TAILQ_FIRST(&list->user_lru); - if (!unode) + if (!unode) { return NULL; + } /* Remove the node */ TAILQ_REMOVE(&list->user_lru, unode, lru_link); @@ -3617,18 +3839,20 @@ nfsrv_get_user_stat_node(struct nfs_active_user_list *list, struct sockaddr *sad void nfsrv_update_user_stat(struct nfs_export *nx, struct nfsrv_descript *nd, uid_t uid, u_int ops, u_int rd_bytes, u_int wr_bytes) { - struct nfs_user_stat_node *unode; - struct nfs_active_user_list *ulist; - struct sockaddr *saddr; + struct nfs_user_stat_node *unode; + struct nfs_active_user_list *ulist; + struct sockaddr *saddr; - if ((!nfsrv_user_stat_enabled) || (!nx) || (!nd) || (!nd->nd_nam)) + if ((!nfsrv_user_stat_enabled) || (!nx) || (!nd) || (!nd->nd_nam)) { return; + } saddr = (struct sockaddr *)mbuf_data(nd->nd_nam); /* check address family before going any further */ - if ((saddr->sa_family != AF_INET) && (saddr->sa_family != AF_INET6)) + if ((saddr->sa_family != AF_INET) && (saddr->sa_family != AF_INET6)) { return; + } ulist = &nx->nx_user_list; @@ -3662,8 +3886,9 @@ nfsrv_init_user_list(struct nfs_active_user_list *ulist) TAILQ_INIT(&ulist->user_lru); /* initialize the hash table */ - for(i = 0; i < NFS_USER_STAT_HASH_SIZE; i++) + for (i = 0; i < NFS_USER_STAT_HASH_SIZE; i++) { LIST_INIT(&ulist->user_hashtbl[i]); + } ulist->node_count = 0; lck_mtx_init(&ulist->user_mutex, nfsrv_active_user_mutex_group, LCK_ATTR_NULL); @@ -3675,8 +3900,9 @@ nfsrv_free_user_list(struct nfs_active_user_list *ulist) { struct nfs_user_stat_node *unode; - if (!ulist) + if (!ulist) { return; + } while ((unode = TAILQ_FIRST(&ulist->user_lru))) { /* Remove node and free */ @@ -3696,13 +3922,13 @@ nfsrv_free_user_list(struct nfs_active_user_list *ulist) void nfsrv_active_user_list_reclaim(void) { - struct nfs_exportfs *nxfs; - struct nfs_export *nx; - struct nfs_active_user_list *ulist; - struct nfs_user_stat_hashtbl_head oldlist; - struct nfs_user_stat_node *unode, *unode_next; - struct timeval now; - uint32_t tstale; + struct nfs_exportfs *nxfs; + struct nfs_export *nx; + struct nfs_active_user_list *ulist; + struct nfs_user_stat_hashtbl_head oldlist; + struct nfs_user_stat_node *unode, *unode_next; + struct timeval now; + uint32_t tstale; LIST_INIT(&oldlist); @@ -3718,8 +3944,9 @@ nfsrv_active_user_list_reclaim(void) unode_next = TAILQ_NEXT(unode, lru_link); /* check if this node has expired */ - if (unode->tm_last >= tstale) + if (unode->tm_last >= tstale) { break; + } /* Remove node from the active user list */ TAILQ_REMOVE(&ulist->user_lru, unode, lru_link); @@ -3736,10 +3963,10 @@ nfsrv_active_user_list_reclaim(void) lck_mtx_unlock(&ulist->user_mutex); } } - lck_rw_done(&nfsrv_export_rwlock); + lck_rw_done(&nfsrv_export_rwlock); /* Free expired nodes */ - while ((unode = LIST_FIRST(&oldlist))) { + while ((unode = LIST_FIRST(&oldlist))) { LIST_REMOVE(unode, hash_link); FREE(unode, M_TEMP); } @@ -3751,20 +3978,20 @@ nfsrv_active_user_list_reclaim(void) * RFC 1094. */ static u_char nfsrv_v2errmap[] = { - NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, - NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, - NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, - NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, - NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, + NFSERR_PERM, NFSERR_NOENT, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_NXIO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_ACCES, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_EXIST, NFSERR_IO, NFSERR_NODEV, NFSERR_NOTDIR, + NFSERR_ISDIR, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_FBIG, NFSERR_NOSPC, NFSERR_IO, NFSERR_ROFS, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, NFSERR_IO, + NFSERR_IO, NFSERR_IO, NFSERR_NAMETOL, NFSERR_IO, NFSERR_IO, + NFSERR_NOTEMPTY, NFSERR_IO, NFSERR_IO, NFSERR_DQUOT, NFSERR_STALE, }; /* @@ -4130,22 +4357,24 @@ nfsrv_errmap(struct nfsrv_descript *nd, int err) short *defaulterrp, *errp; if (nd->nd_vers == NFS_VER2) { - if (err <= (int)sizeof(nfsrv_v2errmap)) - return ((int)nfsrv_v2errmap[err - 1]); - return (NFSERR_IO); + if (err <= (int)sizeof(nfsrv_v2errmap)) { + return (int)nfsrv_v2errmap[err - 1]; + } + return NFSERR_IO; } /* NFSv3 */ - if (nd->nd_procnum > NFSPROC_COMMIT) - return (err & 0xffff); + if (nd->nd_procnum > NFSPROC_COMMIT) { + return err & 0xffff; + } errp = defaulterrp = nfsrv_v3errmap[nd->nd_procnum]; while (*++errp) { - if (*errp == err) - return (err); - else if (*errp > err) + if (*errp == err) { + return err; + } else if (*errp > err) { break; + } } - return ((int)*defaulterrp); + return (int)*defaulterrp; } #endif /* NFSSERVER */ - diff --git a/bsd/nfs/nfs_syscalls.c b/bsd/nfs/nfs_syscalls.c index 2e43de4b1..a27683203 100644 --- a/bsd/nfs/nfs_syscalls.c +++ b/bsd/nfs/nfs_syscalls.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -119,7 +119,7 @@ #include #endif -kern_return_t thread_terminate(thread_t); /* XXX */ +kern_return_t thread_terminate(thread_t); /* XXX */ #if NFSSERVER @@ -134,12 +134,12 @@ static int nfsrv_sock_tcp_cnt = 0; #define NFSD_MIN_IDLE_TIMEOUT 30 static int nfsrv_sock_idle_timeout = 3600; /* One hour */ -int nfssvc_export(user_addr_t argp); -int nfssvc_nfsd(void); -int nfssvc_addsock(socket_t, mbuf_t); -void nfsrv_zapsock(struct nfsrv_sock *); -void nfsrv_slpderef(struct nfsrv_sock *); -void nfsrv_slpfree(struct nfsrv_sock *); +int nfssvc_export(user_addr_t argp); +int nfssvc_nfsd(void); +int nfssvc_addsock(socket_t, mbuf_t); +void nfsrv_zapsock(struct nfsrv_sock *); +void nfsrv_slpderef(struct nfsrv_sock *); +void nfsrv_slpfree(struct nfsrv_sock *); #endif /* NFSSERVER */ @@ -147,10 +147,10 @@ void nfsrv_slpfree(struct nfsrv_sock *); * sysctl stuff */ SYSCTL_DECL(_vfs_generic); -SYSCTL_NODE(_vfs_generic, OID_AUTO, nfs, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "nfs hinge"); +SYSCTL_NODE(_vfs_generic, OID_AUTO, nfs, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "nfs hinge"); #if NFSCLIENT -SYSCTL_NODE(_vfs_generic_nfs, OID_AUTO, client, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "nfs client hinge"); +SYSCTL_NODE(_vfs_generic_nfs, OID_AUTO, client, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "nfs client hinge"); SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, initialdowndelay, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_tprintf_initial_delay, 0, ""); SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, nextdowndelay, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_tprintf_delay, 0, ""); SYSCTL_INT(_vfs_generic_nfs_client, OID_AUTO, iosize, CTLFLAG_RW | CTLFLAG_LOCKED, &nfs_iosize, 0, ""); @@ -175,7 +175,7 @@ SYSCTL_STRING(_vfs_generic_nfs_client, OID_AUTO, default_nfs4domain, CTLFLAG_RW #endif /* NFSCLIENT */ #if NFSSERVER -SYSCTL_NODE(_vfs_generic_nfs, OID_AUTO, server, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "nfs server hinge"); +SYSCTL_NODE(_vfs_generic_nfs, OID_AUTO, server, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "nfs server hinge"); SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, wg_delay, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsrv_wg_delay, 0, ""); SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, wg_delay_v3, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsrv_wg_delay_v3, 0, ""); SYSCTL_INT(_vfs_generic_nfs_server, OID_AUTO, require_resv_port, CTLFLAG_RW | CTLFLAG_LOCKED, &nfsrv_require_resv_port, 0, ""); @@ -209,15 +209,17 @@ mapname2id(struct nfs_testmapid *map) int error; error = nfs4_id2guid(map->ntm_name, &map->ntm_guid, map->ntm_grpflag); - if (error) - return (error); + if (error) { + return error; + } - if (map->ntm_grpflag) + if (map->ntm_grpflag) { error = kauth_cred_guid2gid(&map->ntm_guid, (gid_t *)&map->ntm_id); - else + } else { error = kauth_cred_guid2uid(&map->ntm_guid, (uid_t *)&map->ntm_id); + } - return (error); + return error; } static int @@ -225,19 +227,20 @@ mapid2name(struct nfs_testmapid *map) { int error; size_t len = sizeof(map->ntm_name); - - if (map->ntm_grpflag) + + if (map->ntm_grpflag) { error = kauth_cred_gid2guid((gid_t)map->ntm_id, &map->ntm_guid); - else + } else { error = kauth_cred_uid2guid((uid_t)map->ntm_id, &map->ntm_guid); + } + + if (error) { + return error; + } - if (error) - return (error); - error = nfs4_guid2id(&map->ntm_guid, map->ntm_name, &len, map->ntm_grpflag); - return (error); - + return error; } static int @@ -246,15 +249,17 @@ nfsclnt_testidmap(proc_t p, user_addr_t argp) struct nfs_testmapid mapid; int error, coerror; size_t len = sizeof(mapid.ntm_name); - - /* Let root make this call. */ + + /* Let root make this call. */ error = proc_suser(p); - if (error) - return (error); + if (error) { + return error; + } error = copyin(argp, &mapid, sizeof(mapid)); - if (error) - return (error); + if (error) { + return error; + } switch (mapid.ntm_lookup) { case NTM_NAME2ID: error = mapname2id(&mapid); @@ -269,12 +274,12 @@ nfsclnt_testidmap(proc_t p, user_addr_t argp) error = nfs4_guid2id(&mapid.ntm_guid, mapid.ntm_name, &len, mapid.ntm_grpflag); break; default: - return (EINVAL); + return EINVAL; } coerror = copyout(&mapid, argp, sizeof(mapid)); - return (error ? error : coerror); + return error ? error : coerror; } int @@ -286,8 +291,9 @@ nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval) switch (uap->flag) { case NFSCLNT_LOCKDANS: error = copyin(uap->argp, &la, sizeof(la)); - if (!error) + if (!error) { error = nfslockdans(p, &la); + } break; case NFSCLNT_LOCKDNOTIFY: error = nfslockdnotify(p, uap->argp); @@ -298,7 +304,7 @@ nfsclnt(proc_t p, struct nfsclnt_args *uap, __unused int *retval) default: error = EINVAL; } - return (error); + return error; } @@ -327,10 +333,11 @@ nfsiod_terminate(struct nfsiod *niod) { nfsiod_thread_count--; lck_mtx_unlock(nfsiod_mutex); - if (niod) + if (niod) { FREE(niod, M_TEMP); - else + } else { printf("nfsiod: terminating without niod\n"); + } thread_terminate(current_thread()); /*NOTREACHED*/ } @@ -355,12 +362,13 @@ nfsiod_thread(void) lck_mtx_lock(nfsiod_mutex); TAILQ_INSERT_HEAD(&nfsiodfree, niod, niod_link); wakeup(current_thread()); - error = msleep0(niod, nfsiod_mutex, PWAIT | PDROP, "nfsiod", NFS_ASYNCTHREADMAXIDLE*hz, nfsiod_continue); + error = msleep0(niod, nfsiod_mutex, PWAIT | PDROP, "nfsiod", NFS_ASYNCTHREADMAXIDLE * hz, nfsiod_continue); /* shouldn't return... so we have an error */ /* remove an old nfsiod struct and terminate */ lck_mtx_lock(nfsiod_mutex); - if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist))) + if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist))) { TAILQ_REMOVE(&nfsiodfree, niod, niod_link); + } nfsiod_terminate(niod); /*NOTREACHED*/ } @@ -377,17 +385,17 @@ nfsiod_start(void) lck_mtx_lock(nfsiod_mutex); if ((nfsiod_thread_count >= NFSIOD_MAX) && (nfsiod_thread_count > 0)) { lck_mtx_unlock(nfsiod_mutex); - return (EBUSY); + return EBUSY; } nfsiod_thread_count++; if (kernel_thread_start((thread_continue_t)nfsiod_thread, NULL, &thd) != KERN_SUCCESS) { lck_mtx_unlock(nfsiod_mutex); - return (EBUSY); + return EBUSY; } /* wait for the thread to complete startup */ msleep(thd, nfsiod_mutex, PWAIT | PDROP, "nfsiodw", NULL); thread_deallocate(thd); - return (0); + return 0; } /* @@ -409,8 +417,9 @@ nfsiod_continue(int error) if (!niod) { /* there's no work queued up */ /* remove an old nfsiod struct and terminate */ - if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist))) + if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist))) { TAILQ_REMOVE(&nfsiodfree, niod, niod_link); + } nfsiod_terminate(niod); /*NOTREACHED*/ } @@ -418,12 +427,12 @@ nfsiod_continue(int error) worktodo: while ((nmp = niod->niod_nmp)) { - if (nmp == NULL){ + if (nmp == NULL) { niod->niod_nmp = NULL; break; } - /* + /* * Service this mount's async I/O queue. * * In order to ensure some level of fairness between mounts, @@ -456,12 +465,13 @@ worktodo: lck_mtx_lock(nfsiod_mutex); morework = !TAILQ_EMPTY(&nmp->nm_iodq); if (!morework || !TAILQ_EMPTY(&nfsiodmounts)) { - /* - * we're going to stop working on this mount but if the + /* + * we're going to stop working on this mount but if the * mount still needs more work so queue it up */ - if (morework && nmp->nm_iodlink.tqe_next == NFSNOLIST) + if (morework && nmp->nm_iodlink.tqe_next == NFSNOLIST) { TAILQ_INSERT_TAIL(&nfsiodmounts, nmp, nm_iodlink); + } nmp->nm_niod = NULL; niod->niod_nmp = NULL; } @@ -473,22 +483,24 @@ worktodo: TAILQ_REMOVE(&nfsiodmounts, niod->niod_nmp, nm_iodlink); niod->niod_nmp->nm_iodlink.tqe_next = NFSNOLIST; } - if (niod->niod_nmp) + if (niod->niod_nmp) { goto worktodo; + } /* queue ourselves back up - if there aren't too many threads running */ if (nfsiod_thread_count <= NFSIOD_MAX) { TAILQ_INSERT_HEAD(&nfsiodfree, niod, niod_link); - error = msleep0(niod, nfsiod_mutex, PWAIT | PDROP, "nfsiod", NFS_ASYNCTHREADMAXIDLE*hz, nfsiod_continue); + error = msleep0(niod, nfsiod_mutex, PWAIT | PDROP, "nfsiod", NFS_ASYNCTHREADMAXIDLE * hz, nfsiod_continue); /* shouldn't return... so we have an error */ /* remove an old nfsiod struct and terminate */ lck_mtx_lock(nfsiod_mutex); - if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist))) + if ((niod = TAILQ_LAST(&nfsiodfree, nfsiodlist))) { TAILQ_REMOVE(&nfsiodfree, niod, niod_link); + } } nfsiod_terminate(niod); /*NOTREACHED*/ - return (0); + return 0; } #endif /* NFSCLIENT */ @@ -520,27 +532,33 @@ getfh(proc_t p, struct getfh_args *uap, __unused int *retval) * Must be super user */ error = proc_suser(p); - if (error) - return (error); + if (error) { + return error; + } error = copyinstr(uap->fname, path, MAXPATHLEN, &pathlen); - if (!error) + if (!error) { error = copyin(uap->fhp, &fhlen, sizeof(fhlen)); - if (error) - return (error); + } + if (error) { + return error; + } /* limit fh size to length specified (or v3 size by default) */ - if ((fhlen != NFSV2_MAX_FH_SIZE) && (fhlen != NFSV3_MAX_FH_SIZE)) + if ((fhlen != NFSV2_MAX_FH_SIZE) && (fhlen != NFSV3_MAX_FH_SIZE)) { fhlen = NFSV3_MAX_FH_SIZE; + } fidlen = fhlen - sizeof(struct nfs_exphandle); - if (!nfsrv_is_initialized()) - return (EINVAL); + if (!nfsrv_is_initialized()) { + return EINVAL; + } - NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, - UIO_SYSSPACE, CAST_USER_ADDR_T(path), vfs_context_current()); + NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, + UIO_SYSSPACE, CAST_USER_ADDR_T(path), vfs_context_current()); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } nameidone(&nd); vp = nd.ni_vp; @@ -549,8 +567,9 @@ getfh(proc_t p, struct getfh_args *uap, __unused int *retval) lck_rw_lock_shared(&nfsrv_export_rwlock); ptr = vnode_mount(vp)->mnt_vfsstat.f_mntonname; LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) { - if (!strncmp(nxfs->nxfs_path, ptr, MAXPATHLEN)) + if (!strncmp(nxfs->nxfs_path, ptr, MAXPATHLEN)) { break; + } } if (!nxfs || strncmp(nxfs->nxfs_path, path, strlen(nxfs->nxfs_path))) { error = EINVAL; @@ -558,14 +577,17 @@ getfh(proc_t p, struct getfh_args *uap, __unused int *retval) } // find export that best matches remainder of path ptr = path + strlen(nxfs->nxfs_path); - while (*ptr && (*ptr == '/')) + while (*ptr && (*ptr == '/')) { ptr++; + } LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) { int len = strlen(nx->nx_path); - if (len == 0) // we've hit the export entry for the root directory + if (len == 0) { // we've hit the export entry for the root directory break; - if (!strncmp(nx->nx_path, ptr, len)) + } + if (!strncmp(nx->nx_path, ptr, len)) { break; + } } if (!nx) { error = EINVAL; @@ -580,8 +602,9 @@ getfh(proc_t p, struct getfh_args *uap, __unused int *retval) nfh.nfh_xh.nxh_reserved = 0; nfh.nfh_len = fidlen; error = VFS_VPTOFH(vp, (int*)&nfh.nfh_len, &nfh.nfh_fid[0], NULL); - if (nfh.nfh_len > (uint32_t)fidlen) + if (nfh.nfh_len > (uint32_t)fidlen) { error = EOVERFLOW; + } nfh.nfh_xh.nxh_fidlen = nfh.nfh_len; nfh.nfh_len += sizeof(nfh.nfh_xh); nfh.nfh_fhp = (u_char*)&nfh.nfh_xh; @@ -589,15 +612,16 @@ getfh(proc_t p, struct getfh_args *uap, __unused int *retval) out: lck_rw_done(&nfsrv_export_rwlock); vnode_put(vp); - if (error) - return (error); + if (error) { + return error; + } /* * At first blush, this may appear to leak a kernel stack * address, but the copyout() never reaches &nfh.nfh_fhp * (sizeof(fhandle_t) < sizeof(nfh)). */ error = copyout((caddr_t)&nfh, uap->fhp, sizeof(fhandle_t)); - return (error); + return error; } extern const struct fileops vnops; @@ -611,8 +635,8 @@ extern const struct fileops vnops; */ int fhopen( proc_t p, - struct fhopen_args *uap, - int32_t *retval) + struct fhopen_args *uap, + int32_t *retval) { vnode_t vp; struct nfs_filehandle nfh; @@ -630,27 +654,31 @@ fhopen( proc_t p, */ error = suser(vfs_context_ucred(ctx), 0); if (error) { - return (error); + return error; } if (!nfsrv_is_initialized()) { - return (EINVAL); + return EINVAL; } fmode = FFLAGS(uap->flags); /* why not allow a non-read/write open for our lockd? */ - if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) - return (EINVAL); + if (((fmode & (FREAD | FWRITE)) == 0) || (fmode & O_CREAT)) { + return EINVAL; + } error = copyin(uap->u_fhp, &nfh.nfh_len, sizeof(nfh.nfh_len)); - if (error) - return (error); + if (error) { + return error; + } if ((nfh.nfh_len < (int)sizeof(struct nfs_exphandle)) || - (nfh.nfh_len > (int)NFSV3_MAX_FH_SIZE)) - return (EINVAL); + (nfh.nfh_len > (int)NFSV3_MAX_FH_SIZE)) { + return EINVAL; + } error = copyin(uap->u_fhp, &nfh, sizeof(nfh.nfh_len) + nfh.nfh_len); - if (error) - return (error); + if (error) { + return error; + } nfh.nfh_fhp = (u_char*)&nfh.nfh_xh; lck_rw_lock_shared(&nfsrv_export_rwlock); @@ -658,9 +686,10 @@ fhopen( proc_t p, error = nfsrv_fhtovp(&nfh, NULL, &vp, &nx, &nxo); lck_rw_done(&nfsrv_export_rwlock); if (error) { - if (error == NFSERR_TRYLATER) + if (error == NFSERR_TRYLATER) { error = EAGAIN; // XXX EBUSY? Or just leave as TRYLATER? - return (error); + } + return error; } /* @@ -671,11 +700,11 @@ fhopen( proc_t p, */ /* - * from vn_open - */ + * from vn_open + */ if (vnode_vtype(vp) == VSOCK) { error = EOPNOTSUPP; - goto bad; + goto bad; } /* disallow write operations on directories */ @@ -685,23 +714,29 @@ fhopen( proc_t p, } #if CONFIG_MACF - if ((error = mac_vnode_check_open(ctx, vp, fmode))) + if ((error = mac_vnode_check_open(ctx, vp, fmode))) { goto bad; + } #endif /* compute action to be authorized */ action = 0; - if (fmode & FREAD) + if (fmode & FREAD) { action |= KAUTH_VNODE_READ_DATA; - if (fmode & (FWRITE | O_TRUNC)) + } + if (fmode & (FWRITE | O_TRUNC)) { action |= KAUTH_VNODE_WRITE_DATA; - if ((error = vnode_authorize(vp, NULL, action, ctx)) != 0) + } + if ((error = vnode_authorize(vp, NULL, action, ctx)) != 0) { goto bad; + } - if ((error = VNOP_OPEN(vp, fmode, ctx))) + if ((error = VNOP_OPEN(vp, fmode, ctx))) { goto bad; - if ((error = vnode_ref_ext(vp, fmode, 0))) + } + if ((error = vnode_ref_ext(vp, fmode, 0))) { goto bad; + } /* * end of vn_open code @@ -723,13 +758,15 @@ fhopen( proc_t p, lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; - if (fmode & O_EXLOCK) + if (fmode & O_EXLOCK) { lf.l_type = F_WRLCK; - else + } else { lf.l_type = F_RDLCK; + } type = F_FLOCK; - if ((fmode & FNONBLOCK) == 0) + if ((fmode & FNONBLOCK) == 0) { type |= F_WAIT; + } if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx, NULL))) { struct vfs_context context = *vfs_context_current(); /* Modify local copy (to not damage thread copy) */ @@ -737,7 +774,7 @@ fhopen( proc_t p, vn_close(vp, fp->f_fglob->fg_flag, &context); fp_free(p, indx, fp); - return (error); + return error; } fp->f_fglob->fg_flag |= FHASLOCK; } @@ -750,11 +787,11 @@ fhopen( proc_t p, proc_fdunlock(p); *retval = indx; - return (0); + return 0; bad: vnode_put(vp); - return (error); + return error; } /* @@ -773,12 +810,14 @@ nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval) /* * Must be super user for most operations (export ops checked later). */ - if ((uap->flag != NFSSVC_EXPORT) && ((error = proc_suser(p)))) - return (error); + if ((uap->flag != NFSSVC_EXPORT) && ((error = proc_suser(p)))) { + return error; + } #if CONFIG_MACF error = mac_system_check_nfsd(kauth_cred_get()); - if (error) - return (error); + if (error) { + return error; + } #endif /* make sure NFS server data structures have been initialized */ @@ -796,12 +835,14 @@ nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval) user_nfsdarg.namelen = tmp_args.namelen; } } - if (error) - return (error); + if (error) { + return error; + } /* get the socket */ error = file_socket(user_nfsdarg.sock, &so); - if (error) - return (error); + if (error) { + return error; + } /* Get the client address for connected sockets. */ if (user_nfsdarg.name == USER_ADDR_NULL || user_nfsdarg.namelen == 0) { nam = NULL; @@ -810,7 +851,7 @@ nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval) if (error) { /* drop the iocount file_socket() grabbed on the file descriptor */ file_drop(user_nfsdarg.sock); - return (error); + return error; } } /* @@ -828,9 +869,10 @@ nfssvc(proc_t p, struct nfssvc_args *uap, __unused int *retval) } else { error = EINVAL; } - if (error == EINTR || error == ERESTART) + if (error == EINTR || error == ERESTART) { error = 0; - return (error); + } + return error; } /* @@ -845,30 +887,33 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) struct timeval timeo; /* make sure mbuf constants are set up */ - if (!nfs_mbuf_mhlen) + if (!nfs_mbuf_mhlen) { nfs_mbuf_init(); + } sock_gettype(so, &sodomain, &sotype, &soprotocol); /* There should be only one UDP socket for each of IPv4 and IPv6 */ if ((sodomain == AF_INET) && (soprotocol == IPPROTO_UDP) && nfsrv_udpsock) { mbuf_freem(mynam); - return (EEXIST); + return EEXIST; } if ((sodomain == AF_INET6) && (soprotocol == IPPROTO_UDP) && nfsrv_udp6sock) { mbuf_freem(mynam); - return (EEXIST); + return EEXIST; } /* Set protocol options and reserve some space (for UDP). */ if (sotype == SOCK_STREAM) { error = nfsrv_check_exports_allow_address(mynam); - if (error) - return (error); + if (error) { + return error; + } sock_setsockopt(so, SOL_SOCKET, SO_KEEPALIVE, &on, sizeof(on)); } - if ((sodomain == AF_INET) && (soprotocol == IPPROTO_TCP)) + if ((sodomain == AF_INET) && (soprotocol == IPPROTO_TCP)) { sock_setsockopt(so, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); + } if (sotype == SOCK_DGRAM) { /* set socket buffer sizes for UDP */ int reserve = NFS_UDPSOCKBUF; error |= sock_setsockopt(so, SOL_SOCKET, SO_SNDBUF, &reserve, sizeof(reserve)); @@ -898,9 +943,9 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) MALLOC(slp, struct nfsrv_sock *, sizeof(struct nfsrv_sock), M_NFSSVC, M_WAITOK); if (!slp) { mbuf_freem(mynam); - return (ENOMEM); + return ENOMEM; } - bzero((caddr_t)slp, sizeof (struct nfsrv_sock)); + bzero((caddr_t)slp, sizeof(struct nfsrv_sock)); lck_rw_init(&slp->ns_rwlock, nfsrv_slp_rwlock_group, LCK_ATTR_NULL); lck_mtx_init(&slp->ns_wgmutex, nfsrv_slp_mutex_group, LCK_ATTR_NULL); @@ -913,7 +958,7 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) lck_mtx_unlock(nfsd_mutex); nfsrv_slpfree(slp); mbuf_freem(mynam); - return (EEXIST); + return EEXIST; } nfsrv_udpsock = slp; } @@ -923,7 +968,7 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) lck_mtx_unlock(nfsd_mutex); nfsrv_slpfree(slp); mbuf_freem(mynam); - return (EEXIST); + return EEXIST; } nfsrv_udp6sock = slp; } @@ -934,10 +979,12 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) TAILQ_INSERT_TAIL(&nfsrv_socklist, slp, ns_chain); if (soprotocol == IPPROTO_TCP) { nfsrv_sock_tcp_cnt++; - if (nfsrv_sock_idle_timeout < 0) + if (nfsrv_sock_idle_timeout < 0) { nfsrv_sock_idle_timeout = 0; - if (nfsrv_sock_idle_timeout && (nfsrv_sock_idle_timeout < NFSD_MIN_IDLE_TIMEOUT)) + } + if (nfsrv_sock_idle_timeout && (nfsrv_sock_idle_timeout < NFSD_MIN_IDLE_TIMEOUT)) { nfsrv_sock_idle_timeout = NFSD_MIN_IDLE_TIMEOUT; + } /* * Possibly start or stop the idle timer. We only start the idle timer when * we have more than 2 * nfsd_thread_max connections. If the idle timer is @@ -963,8 +1010,9 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) if (old_slp->ns_sotype == SOCK_STREAM) { microuptime(&now); time_to_wait -= now.tv_sec - old_slp->ns_timestamp; - if (time_to_wait < 1) + if (time_to_wait < 1) { time_to_wait = 1; + } break; } } @@ -1001,7 +1049,7 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) nfsrv_wakenfsd(slp); lck_mtx_unlock(nfsd_mutex); - return (0); + return 0; } /* @@ -1013,7 +1061,7 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) * which are then added via the "addsock" call. The rest of the nfsd threads * simply call into the kernel and remain there in a loop handling NFS * requests until killed by a signal. - * + * * There's a list of nfsd threads (nfsd_head). * There's an nfsd queue that contains only those nfsds that are * waiting for work to do (nfsd_queue). @@ -1035,7 +1083,7 @@ nfssvc_addsock(socket_t so, mbuf_t mynam) * then check the "work" queue. * When an nfsd starts working on a socket, it removes it from the head of * the queue it's currently on and moves it to the end of the "work" queue. - * When nfsds are checking the queues for work, any sockets found not to + * When nfsds are checking the queues for work, any sockets found not to * have any work are simply dropped from the queue. * */ @@ -1059,13 +1107,14 @@ nfssvc_nfsd(void) #endif MALLOC(nfsd, struct nfsd *, sizeof(struct nfsd), M_NFSD, M_WAITOK); - if (!nfsd) - return (ENOMEM); + if (!nfsd) { + return ENOMEM; + } bzero(nfsd, sizeof(struct nfsd)); lck_mtx_lock(nfsd_mutex); - if (nfsd_thread_count++ == 0) - nfsrv_initcache(); /* Init the server request cache */ - + if (nfsd_thread_count++ == 0) { + nfsrv_initcache(); /* Init the server request cache */ + } TAILQ_INSERT_TAIL(&nfsd_head, nfsd, nfsd_chain); lck_mtx_unlock(nfsd_mutex); @@ -1108,8 +1157,9 @@ nfssvc_nfsd(void) TAILQ_REMOVE(&nfsd_queue, nfsd, nfsd_queue); nfsd->nfsd_flag &= ~NFSD_WAITING; } - if (error == EWOULDBLOCK) + if (error == EWOULDBLOCK) { continue; + } goto done; } } @@ -1121,8 +1171,9 @@ nfssvc_nfsd(void) /* remove from the head of the queue */ TAILQ_REMOVE(&nfsrv_sockwait, slp, ns_svcq); slp->ns_flag &= ~SLP_WAITQ; - if ((slp->ns_flag & SLP_VALID) && (slp->ns_flag & SLP_WORKTODO)) + if ((slp->ns_flag & SLP_VALID) && (slp->ns_flag & SLP_WORKTODO)) { break; + } /* nothing to do, so skip this socket */ lck_rw_done(&slp->ns_rwlock); } @@ -1134,8 +1185,9 @@ nfssvc_nfsd(void) /* remove from the head of the queue */ TAILQ_REMOVE(&nfsrv_sockwork, slp, ns_svcq); slp->ns_flag &= ~SLP_WORKQ; - if ((slp->ns_flag & SLP_VALID) && (slp->ns_flag & SLP_WORKTODO)) + if ((slp->ns_flag & SLP_VALID) && (slp->ns_flag & SLP_WORKTODO)) { break; + } /* nothing to do, so skip this socket */ lck_rw_done(&slp->ns_rwlock); } @@ -1156,26 +1208,29 @@ nfssvc_nfsd(void) lck_rw_done(&slp->ns_rwlock); } lck_mtx_unlock(nfsd_mutex); - if (!slp) + if (!slp) { continue; + } lck_rw_lock_exclusive(&slp->ns_rwlock); if (slp->ns_flag & SLP_VALID) { - if ((slp->ns_flag & (SLP_NEEDQ|SLP_DISCONN)) == SLP_NEEDQ) { + if ((slp->ns_flag & (SLP_NEEDQ | SLP_DISCONN)) == SLP_NEEDQ) { slp->ns_flag &= ~SLP_NEEDQ; nfsrv_rcv_locked(slp->ns_so, slp, MBUF_WAITOK); } - if (slp->ns_flag & SLP_DISCONN) + if (slp->ns_flag & SLP_DISCONN) { nfsrv_zapsock(slp); + } error = nfsrv_dorec(slp, nfsd, &nd); - if (error == EINVAL) { // RPCSEC_GSS drop - if (slp->ns_sotype == SOCK_STREAM) + if (error == EINVAL) { // RPCSEC_GSS drop + if (slp->ns_sotype == SOCK_STREAM) { nfsrv_zapsock(slp); // drop connection + } } writes_todo = 0; if (error && (slp->ns_wgtime || (slp->ns_flag & SLP_DOWRITES))) { microuptime(&now); cur_usec = (u_quad_t)now.tv_sec * 1000000 + - (u_quad_t)now.tv_usec; + (u_quad_t)now.tv_usec; if (slp->ns_wgtime <= cur_usec) { error = 0; cacherep = RC_DOIT; @@ -1190,48 +1245,54 @@ nfssvc_nfsd(void) if (error || (slp && !(slp->ns_flag & SLP_VALID))) { if (nd) { nfsm_chain_cleanup(&nd->nd_nmreq); - if (nd->nd_nam2) + if (nd->nd_nam2) { mbuf_freem(nd->nd_nam2); - if (IS_VALID_CRED(nd->nd_cr)) + } + if (IS_VALID_CRED(nd->nd_cr)) { kauth_cred_unref(&nd->nd_cr); - if (nd->nd_gss_context) + } + if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); + } FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); nd = NULL; } nfsd->nfsd_slp = NULL; nfsd->nfsd_flag &= ~NFSD_REQINPROG; - if (slp) + if (slp) { nfsrv_slpderef(slp); - if (nfsd_thread_max <= 0) + } + if (nfsd_thread_max <= 0) { break; + } continue; } if (nd) { - microuptime(&nd->nd_starttime); - if (nd->nd_nam2) - nd->nd_nam = nd->nd_nam2; - else - nd->nd_nam = slp->ns_nam; - - cacherep = nfsrv_getcache(nd, slp, &mrep); - - if (nfsrv_require_resv_port) { - /* Check if source port is a reserved port */ - in_port_t port = 0; - struct sockaddr *saddr = mbuf_data(nd->nd_nam); - - if (saddr->sa_family == AF_INET) - port = ntohs(((struct sockaddr_in*)saddr)->sin_port); - else if (saddr->sa_family == AF_INET6) - port = ntohs(((struct sockaddr_in6*)saddr)->sin6_port); - if ((port >= IPPORT_RESERVED) && (nd->nd_procnum != NFSPROC_NULL)) { - nd->nd_procnum = NFSPROC_NOOP; - nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK); - cacherep = RC_DOIT; + microuptime(&nd->nd_starttime); + if (nd->nd_nam2) { + nd->nd_nam = nd->nd_nam2; + } else { + nd->nd_nam = slp->ns_nam; } - } + cacherep = nfsrv_getcache(nd, slp, &mrep); + + if (nfsrv_require_resv_port) { + /* Check if source port is a reserved port */ + in_port_t port = 0; + struct sockaddr *saddr = mbuf_data(nd->nd_nam); + + if (saddr->sa_family == AF_INET) { + port = ntohs(((struct sockaddr_in*)saddr)->sin_port); + } else if (saddr->sa_family == AF_INET6) { + port = ntohs(((struct sockaddr_in6*)saddr)->sin6_port); + } + if ((port >= IPPORT_RESERVED) && (nd->nd_procnum != NFSPROC_NULL)) { + nd->nd_procnum = NFSPROC_NOOP; + nd->nd_repstat = (NFSERR_AUTHERR | AUTH_TOOWEAK); + cacherep = RC_DOIT; + } + } } /* @@ -1239,160 +1300,170 @@ nfssvc_nfsd(void) * gathered together. */ do { - switch (cacherep) { - case RC_DOIT: - if (nd && (nd->nd_vers == NFS_VER3)) - procrastinate = nfsrv_wg_delay_v3; - else - procrastinate = nfsrv_wg_delay; - lck_rw_lock_shared(&nfsrv_export_rwlock); - context.vc_ucred = NULL; - if (writes_todo || ((nd->nd_procnum == NFSPROC_WRITE) && (procrastinate > 0))) - error = nfsrv_writegather(&nd, slp, &context, &mrep); - else - error = (*(nfsrv_procs[nd->nd_procnum]))(nd, slp, &context, &mrep); - lck_rw_done(&nfsrv_export_rwlock); - if (mrep == NULL) { - /* - * If this is a stream socket and we are not going - * to send a reply we better close the connection - * so the client doesn't hang. - */ - if (error && slp->ns_sotype == SOCK_STREAM) { - lck_rw_lock_exclusive(&slp->ns_rwlock); - nfsrv_zapsock(slp); - lck_rw_done(&slp->ns_rwlock); - printf("NFS server: NULL reply from proc = %d error = %d\n", - nd->nd_procnum, error); + switch (cacherep) { + case RC_DOIT: + if (nd && (nd->nd_vers == NFS_VER3)) { + procrastinate = nfsrv_wg_delay_v3; + } else { + procrastinate = nfsrv_wg_delay; } - break; - - } - if (error) { - OSAddAtomic64(1, &nfsstats.srv_errs); - nfsrv_updatecache(nd, FALSE, mrep); - if (nd->nd_nam2) { - mbuf_freem(nd->nd_nam2); - nd->nd_nam2 = NULL; + lck_rw_lock_shared(&nfsrv_export_rwlock); + context.vc_ucred = NULL; + if (writes_todo || ((nd->nd_procnum == NFSPROC_WRITE) && (procrastinate > 0))) { + error = nfsrv_writegather(&nd, slp, &context, &mrep); + } else { + error = (*(nfsrv_procs[nd->nd_procnum]))(nd, slp, &context, &mrep); } - break; - } - OSAddAtomic64(1, &nfsstats.srvrpccnt[nd->nd_procnum]); - nfsrv_updatecache(nd, TRUE, mrep); + lck_rw_done(&nfsrv_export_rwlock); + if (mrep == NULL) { + /* + * If this is a stream socket and we are not going + * to send a reply we better close the connection + * so the client doesn't hang. + */ + if (error && slp->ns_sotype == SOCK_STREAM) { + lck_rw_lock_exclusive(&slp->ns_rwlock); + nfsrv_zapsock(slp); + lck_rw_done(&slp->ns_rwlock); + printf("NFS server: NULL reply from proc = %d error = %d\n", + nd->nd_procnum, error); + } + break; + } + if (error) { + OSAddAtomic64(1, &nfsstats.srv_errs); + nfsrv_updatecache(nd, FALSE, mrep); + if (nd->nd_nam2) { + mbuf_freem(nd->nd_nam2); + nd->nd_nam2 = NULL; + } + break; + } + OSAddAtomic64(1, &nfsstats.srvrpccnt[nd->nd_procnum]); + nfsrv_updatecache(nd, TRUE, mrep); /* FALLTHRU */ - case RC_REPLY: - if (nd->nd_gss_mb != NULL) { // It's RPCSEC_GSS + case RC_REPLY: + if (nd->nd_gss_mb != NULL) { // It's RPCSEC_GSS + /* + * Need to checksum or encrypt the reply + */ + error = nfs_gss_svc_protect_reply(nd, mrep); + if (error) { + mbuf_freem(mrep); + break; + } + } + /* - * Need to checksum or encrypt the reply + * Get the total size of the reply */ - error = nfs_gss_svc_protect_reply(nd, mrep); + m = mrep; + siz = 0; + while (m) { + siz += mbuf_len(m); + m = mbuf_next(m); + } + if (siz <= 0 || siz > NFS_MAXPACKET) { + printf("mbuf siz=%d\n", siz); + panic("Bad nfs svc reply"); + } + m = mrep; + mbuf_pkthdr_setlen(m, siz); + error = mbuf_pkthdr_setrcvif(m, NULL); if (error) { - mbuf_freem(mrep); - break; + panic("nfsd setrcvif failed: %d", error); } - } - - /* - * Get the total size of the reply - */ - m = mrep; - siz = 0; - while (m) { - siz += mbuf_len(m); - m = mbuf_next(m); - } - if (siz <= 0 || siz > NFS_MAXPACKET) { - printf("mbuf siz=%d\n",siz); - panic("Bad nfs svc reply"); - } - m = mrep; - mbuf_pkthdr_setlen(m, siz); - error = mbuf_pkthdr_setrcvif(m, NULL); - if (error) - panic("nfsd setrcvif failed: %d", error); - /* - * For stream protocols, prepend a Sun RPC - * Record Mark. - */ - if (slp->ns_sotype == SOCK_STREAM) { - error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK); - if (!error) - *(u_int32_t*)mbuf_data(m) = htonl(0x80000000 | siz); - } - if (!error) { - if (slp->ns_flag & SLP_VALID) { - error = nfsrv_send(slp, nd->nd_nam2, m); + /* + * For stream protocols, prepend a Sun RPC + * Record Mark. + */ + if (slp->ns_sotype == SOCK_STREAM) { + error = mbuf_prepend(&m, NFSX_UNSIGNED, MBUF_WAITOK); + if (!error) { + *(u_int32_t*)mbuf_data(m) = htonl(0x80000000 | siz); + } + } + if (!error) { + if (slp->ns_flag & SLP_VALID) { + error = nfsrv_send(slp, nd->nd_nam2, m); + } else { + error = EPIPE; + mbuf_freem(m); + } } else { - error = EPIPE; - mbuf_freem(m); + mbuf_freem(m); } - } else { - mbuf_freem(m); - } - mrep = NULL; - if (nd->nd_nam2) { + mrep = NULL; + if (nd->nd_nam2) { + mbuf_freem(nd->nd_nam2); + nd->nd_nam2 = NULL; + } + if (error == EPIPE) { + lck_rw_lock_exclusive(&slp->ns_rwlock); + nfsrv_zapsock(slp); + lck_rw_done(&slp->ns_rwlock); + } + if (error == EINTR || error == ERESTART) { + nfsm_chain_cleanup(&nd->nd_nmreq); + if (IS_VALID_CRED(nd->nd_cr)) { + kauth_cred_unref(&nd->nd_cr); + } + if (nd->nd_gss_context) { + nfs_gss_svc_ctx_deref(nd->nd_gss_context); + } + FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); + nfsrv_slpderef(slp); + lck_mtx_lock(nfsd_mutex); + goto done; + } + break; + case RC_DROPIT: mbuf_freem(nd->nd_nam2); nd->nd_nam2 = NULL; + break; } - if (error == EPIPE) { - lck_rw_lock_exclusive(&slp->ns_rwlock); - nfsrv_zapsock(slp); - lck_rw_done(&slp->ns_rwlock); - } - if (error == EINTR || error == ERESTART) { + ; + opcnt++; + if (nd) { nfsm_chain_cleanup(&nd->nd_nmreq); - if (IS_VALID_CRED(nd->nd_cr)) + if (nd->nd_nam2) { + mbuf_freem(nd->nd_nam2); + } + if (IS_VALID_CRED(nd->nd_cr)) { kauth_cred_unref(&nd->nd_cr); - if (nd->nd_gss_context) + } + if (nd->nd_gss_context) { nfs_gss_svc_ctx_deref(nd->nd_gss_context); + } FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); - nfsrv_slpderef(slp); - lck_mtx_lock(nfsd_mutex); - goto done; + nd = NULL; } - break; - case RC_DROPIT: - mbuf_freem(nd->nd_nam2); - nd->nd_nam2 = NULL; - break; - }; - opcnt++; - if (nd) { - nfsm_chain_cleanup(&nd->nd_nmreq); - if (nd->nd_nam2) - mbuf_freem(nd->nd_nam2); - if (IS_VALID_CRED(nd->nd_cr)) - kauth_cred_unref(&nd->nd_cr); - if (nd->nd_gss_context) - nfs_gss_svc_ctx_deref(nd->nd_gss_context); - FREE_ZONE(nd, sizeof(*nd), M_NFSRVDESC); - nd = NULL; - } - - /* - * Check to see if there are outstanding writes that - * need to be serviced. - */ - writes_todo = 0; - if (slp->ns_wgtime) { - microuptime(&now); - cur_usec = (u_quad_t)now.tv_sec * 1000000 + - (u_quad_t)now.tv_usec; - if (slp->ns_wgtime <= cur_usec) { - cacherep = RC_DOIT; - writes_todo = 1; + + /* + * Check to see if there are outstanding writes that + * need to be serviced. + */ + writes_todo = 0; + if (slp->ns_wgtime) { + microuptime(&now); + cur_usec = (u_quad_t)now.tv_sec * 1000000 + + (u_quad_t)now.tv_usec; + if (slp->ns_wgtime <= cur_usec) { + cacherep = RC_DOIT; + writes_todo = 1; + } } - } } while (writes_todo); nd = NULL; if (TAILQ_EMPTY(&nfsrv_sockwait) && (opcnt < 8)) { lck_rw_lock_exclusive(&slp->ns_rwlock); error = nfsrv_dorec(slp, nfsd, &nd); - if (error == EINVAL) { // RPCSEC_GSS drop - if (slp->ns_sotype == SOCK_STREAM) + if (error == EINVAL) { // RPCSEC_GSS drop + if (slp->ns_sotype == SOCK_STREAM) { nfsrv_zapsock(slp); // drop connection + } } lck_rw_done(&slp->ns_rwlock); } @@ -1407,10 +1478,11 @@ nfssvc_nfsd(void) done: TAILQ_REMOVE(&nfsd_head, nfsd, nfsd_chain); FREE(nfsd, M_NFSD); - if (--nfsd_thread_count == 0) + if (--nfsd_thread_count == 0) { nfsrv_cleanup(); + } lck_mtx_unlock(nfsd_mutex); - return (error); + return error; } int @@ -1439,12 +1511,13 @@ nfssvc_export(user_addr_t argp) unxa.nxa_nets = CAST_USER_ADDR_T(tnxa.nxa_nets); } } - if (error) - return (error); + if (error) { + return error; + } error = nfsrv_export(&unxa, ctx); - return (error); + return error; } /* @@ -1459,13 +1532,15 @@ nfsrv_zapsock(struct nfsrv_sock *slp) { socket_t so; - if ((slp->ns_flag & SLP_VALID) == 0) + if ((slp->ns_flag & SLP_VALID) == 0) { return; + } slp->ns_flag &= ~SLP_ALLFLAGS; so = slp->ns_so; - if (so == NULL) + if (so == NULL) { return; + } sock_setupcall(so, NULL, NULL); sock_shutdown(so, SHUT_RDWR); @@ -1488,14 +1563,18 @@ nfsrv_slpfree(struct nfsrv_sock *slp) sock_release(slp->ns_so); slp->ns_so = NULL; } - if (slp->ns_nam) + if (slp->ns_nam) { mbuf_free(slp->ns_nam); - if (slp->ns_raw) + } + if (slp->ns_raw) { mbuf_freem(slp->ns_raw); - if (slp->ns_rec) + } + if (slp->ns_rec) { mbuf_freem(slp->ns_rec); - if (slp->ns_frag) + } + if (slp->ns_frag) { mbuf_freem(slp->ns_frag); + } slp->ns_nam = slp->ns_raw = slp->ns_rec = slp->ns_frag = NULL; slp->ns_reccnt = 0; @@ -1503,14 +1582,18 @@ nfsrv_slpfree(struct nfsrv_sock *slp) nnwp = nwp->nd_tq.le_next; LIST_REMOVE(nwp, nd_tq); nfsm_chain_cleanup(&nwp->nd_nmreq); - if (nwp->nd_mrep) + if (nwp->nd_mrep) { mbuf_freem(nwp->nd_mrep); - if (nwp->nd_nam2) + } + if (nwp->nd_nam2) { mbuf_freem(nwp->nd_nam2); - if (IS_VALID_CRED(nwp->nd_cr)) + } + if (IS_VALID_CRED(nwp->nd_cr)) { kauth_cred_unref(&nwp->nd_cr); - if (nwp->nd_gss_context) + } + if (nwp->nd_gss_context) { nfs_gss_svc_ctx_deref(nwp->nd_gss_context); + } FREE_ZONE(nwp, sizeof(*nwp), M_NFSRVDESC); } LIST_INIT(&slp->ns_tq); @@ -1533,10 +1616,11 @@ nfsrv_slpderef_locked(struct nfsrv_sock *slp) if (slp->ns_sref || (slp->ns_flag & SLP_VALID)) { if ((slp->ns_flag & SLP_QUEUED) && !(slp->ns_flag & SLP_WORKTODO)) { /* remove socket from queue since there's no work */ - if (slp->ns_flag & SLP_WAITQ) + if (slp->ns_flag & SLP_WAITQ) { TAILQ_REMOVE(&nfsrv_sockwait, slp, ns_svcq); - else + } else { TAILQ_REMOVE(&nfsrv_sockwork, slp, ns_svcq); + } slp->ns_flag &= ~SLP_QUEUED; } lck_rw_done(&slp->ns_rwlock); @@ -1546,19 +1630,21 @@ nfsrv_slpderef_locked(struct nfsrv_sock *slp) /* This socket is no longer valid, so we'll get rid of it */ if (slp->ns_flag & SLP_QUEUED) { - if (slp->ns_flag & SLP_WAITQ) + if (slp->ns_flag & SLP_WAITQ) { TAILQ_REMOVE(&nfsrv_sockwait, slp, ns_svcq); - else + } else { TAILQ_REMOVE(&nfsrv_sockwork, slp, ns_svcq); + } slp->ns_flag &= ~SLP_QUEUED; } lck_rw_done(&slp->ns_rwlock); TAILQ_REMOVE(&nfsrv_socklist, slp, ns_chain); - if (slp->ns_sotype == SOCK_STREAM) + if (slp->ns_sotype == SOCK_STREAM) { nfsrv_sock_tcp_cnt--; + } - /* now remove from the write gather socket list */ + /* now remove from the write gather socket list */ if (slp->ns_wgq.tqe_next != SLPNOLIST) { TAILQ_REMOVE(&nfsrv_sockwg, slp, ns_wgq); slp->ns_wgq.tqe_next = SLPNOLIST; @@ -1589,8 +1675,9 @@ nfsrv_idlesock_timer(__unused void *param0, __unused void *param1) lck_mtx_lock(nfsd_mutex); /* Turn off the timer if we're suppose to and get out */ - if (nfsrv_sock_idle_timeout < NFSD_MIN_IDLE_TIMEOUT) - nfsrv_sock_idle_timeout = 0; + if (nfsrv_sock_idle_timeout < NFSD_MIN_IDLE_TIMEOUT) { + nfsrv_sock_idle_timeout = 0; + } if ((nfsrv_sock_tcp_cnt <= 2 * nfsd_thread_max) || (nfsrv_sock_idle_timeout == 0)) { nfsrv_idlesock_timer_on = 0; lck_mtx_unlock(nfsd_mutex); @@ -1611,11 +1698,12 @@ nfsrv_idlesock_timer(__unused void *param0, __unused void *param1) * is sorted oldest access to newest. Once we find the first one, * we're done and break out of the loop. */ - if (((slp->ns_timestamp + nfsrv_sock_idle_timeout) > now.tv_sec) || - nfsrv_sock_tcp_cnt <= 2 * nfsd_thread_max) { + if (((slp->ns_timestamp + nfsrv_sock_idle_timeout) > now.tv_sec) || + nfsrv_sock_tcp_cnt <= 2 * nfsd_thread_max) { time_to_wait -= now.tv_sec - slp->ns_timestamp; - if (time_to_wait < 1) + if (time_to_wait < 1) { time_to_wait = 1; + } lck_rw_done(&slp->ns_rwlock); break; } @@ -1654,8 +1742,9 @@ nfsrv_cleanup(void) nslp = TAILQ_NEXT(slp, ns_chain); lck_rw_lock_exclusive(&slp->ns_rwlock); slp->ns_sref++; - if (slp->ns_flag & SLP_VALID) + if (slp->ns_flag & SLP_VALID) { nfsrv_zapsock(slp); + } lck_rw_done(&slp->ns_rwlock); nfsrv_slpderef_locked(slp); } @@ -1674,8 +1763,8 @@ nfsrv_cleanup(void) if (nfsrv_fsevents_enabled) { fp->fm_context.vc_thread = current_thread(); add_fsevent(FSE_CONTENT_MODIFIED, &fp->fm_context, - FSE_ARG_VNODE, fp->fm_vp, - FSE_ARG_DONE); + FSE_ARG_VNODE, fp->fm_vp, + FSE_ARG_DONE); } vnode_put(fp->fm_vp); kauth_cred_unref(&fp->fm_context.vc_ucred); @@ -1689,10 +1778,10 @@ nfsrv_cleanup(void) #endif nfsrv_uc_cleanup(); /* Stop nfs socket up-call threads */ - - nfs_gss_svc_cleanup(); /* Remove any RPCSEC_GSS contexts */ - nfsrv_cleancache(); /* And clear out server cache */ + nfs_gss_svc_cleanup(); /* Remove any RPCSEC_GSS contexts */ + + nfsrv_cleancache(); /* And clear out server cache */ nfsrv_udpsock = NULL; nfsrv_udp6sock = NULL; diff --git a/bsd/nfs/nfs_upcall.c b/bsd/nfs/nfs_upcall.c index bc71aa0a9..b6dced906 100644 --- a/bsd/nfs/nfs_upcall.c +++ b/bsd/nfs/nfs_upcall.c @@ -54,7 +54,7 @@ struct nfsrv_uc_arg { uint32_t nua_flags; uint32_t nua_qi; }; -#define NFS_UC_QUEUED 0x0001 +#define NFS_UC_QUEUED 0x0001 #define NFS_UC_HASH_SZ 7 #define NFS_UC_HASH(x) ((((uint32_t)(uintptr_t)(x)) >> 3) % nfsrv_uc_thread_count) @@ -62,12 +62,12 @@ struct nfsrv_uc_arg { TAILQ_HEAD(nfsrv_uc_q, nfsrv_uc_arg); static struct nfsrv_uc_queue { - lck_mtx_t *ucq_lock; - struct nfsrv_uc_q ucq_queue[1]; - thread_t ucq_thd; - uint32_t ucq_flags; + lck_mtx_t *ucq_lock; + struct nfsrv_uc_q ucq_queue[1]; + thread_t ucq_thd; + uint32_t ucq_flags; } nfsrv_uc_queue_tbl[NFS_UC_HASH_SZ]; -#define NFS_UC_QUEUE_SLEEPING 0x0001 +#define NFS_UC_QUEUE_SLEEPING 0x0001 static lck_grp_t *nfsrv_uc_group; static lck_mtx_t *nfsrv_uc_shutdown_lock; @@ -152,8 +152,9 @@ nfsrv_uc_dequeue(struct nfsrv_sock *slp) * is shutting down so no need for acquiring the lock to check that * the flag is cleared. */ - if (ap == NULL || (ap->nua_flags & NFS_UC_QUEUED) == 0) + if (ap == NULL || (ap->nua_flags & NFS_UC_QUEUED) == 0) { return; + } /* If we're queued we might race with nfsrv_uc_thread */ lck_mtx_lock(myqueue->ucq_lock); if (ap->nua_flags & NFS_UC_QUEUED) { @@ -162,7 +163,7 @@ nfsrv_uc_dequeue(struct nfsrv_sock *slp) ap->nua_flags &= ~NFS_UC_QUEUED; #ifdef NFS_UC_Q_DEBUG OSDecrementAtomic(&nfsrv_uc_queue_count); -#endif +#endif } FREE(slp->ns_ua, M_TEMP); slp->ns_ua = NULL; @@ -198,15 +199,17 @@ nfsrv_uc_start(void) int error; #ifdef NFS_UC_Q_DEBUG - if (!nfsrv_uc_use_proxy) + if (!nfsrv_uc_use_proxy) { return; + } #endif DPRINT("nfsrv_uc_start\n"); /* Wait until previous shutdown finishes */ lck_mtx_lock(nfsrv_uc_shutdown_lock); - while (nfsrv_uc_shutdown || nfsrv_uc_thread_count > 0) + while (nfsrv_uc_shutdown || nfsrv_uc_thread_count > 0) { msleep(&nfsrv_uc_thread_count, nfsrv_uc_shutdown_lock, PSOCK, "nfsd_upcall_shutdown_wait", NULL); + } /* Start up-call threads */ for (i = 0; i < NFS_UC_HASH_SZ; i++) { @@ -252,13 +255,15 @@ nfsrv_uc_stop(void) /* Wait until they are done shutting down */ lck_mtx_lock(nfsrv_uc_shutdown_lock); - while (nfsrv_uc_thread_count > 0) + while (nfsrv_uc_thread_count > 0) { msleep(&nfsrv_uc_thread_count, nfsrv_uc_shutdown_lock, PSOCK, "nfsd_upcall_shutdown_stop", NULL); + } /* Deallocate old threads */ for (i = 0; i < nfsrv_uc_thread_count; i++) { - if (nfsrv_uc_queue_tbl[i].ucq_thd != THREAD_NULL) + if (nfsrv_uc_queue_tbl[i].ucq_thd != THREAD_NULL) { thread_deallocate(nfsrv_uc_queue_tbl[i].ucq_thd); + } nfsrv_uc_queue_tbl[i].ucq_thd = THREAD_NULL; } @@ -328,16 +333,18 @@ nfsrv_uc_proxy(socket_t so, void *arg, int waitflag) TAILQ_INSERT_TAIL(myqueue->ucq_queue, uap, nua_svcq); uap->nua_flags |= NFS_UC_QUEUED; - if (myqueue->ucq_flags | NFS_UC_QUEUE_SLEEPING) + if (myqueue->ucq_flags | NFS_UC_QUEUE_SLEEPING) { wakeup(myqueue); + } #ifdef NFS_UC_Q_DEBUG { uint32_t count = OSIncrementAtomic(&nfsrv_uc_queue_count); - + /* This is a bit racey but just for debug */ - if (count > nfsrv_uc_queue_max_seen) + if (count > nfsrv_uc_queue_max_seen) { nfsrv_uc_queue_max_seen = count; + } if (nfsrv_uc_queue_limit && count > nfsrv_uc_queue_limit) { panic("nfsd up-call queue limit exceeded\n"); @@ -359,8 +366,9 @@ nfsrv_uc_addsock(struct nfsrv_sock *slp, int start) int on = 1; struct nfsrv_uc_arg *arg; - if (start && nfsrv_uc_thread_count == 0) + if (start && nfsrv_uc_thread_count == 0) { nfsrv_uc_start(); + } /* * We don't take a lock since once we're up nfsrv_uc_thread_count does @@ -368,9 +376,10 @@ nfsrv_uc_addsock(struct nfsrv_sock *slp, int start) * generate up-calls. */ if (nfsrv_uc_thread_count) { - MALLOC(arg, struct nfsrv_uc_arg *, sizeof (struct nfsrv_uc_arg), M_TEMP, M_WAITOK | M_ZERO); - if (arg == NULL) + MALLOC(arg, struct nfsrv_uc_arg *, sizeof(struct nfsrv_uc_arg), M_TEMP, M_WAITOK | M_ZERO); + if (arg == NULL) { goto direct; + } slp->ns_ua = arg; arg->nua_slp = slp; @@ -389,4 +398,3 @@ direct: return; } - diff --git a/bsd/nfs/nfs_vfsops.c b/bsd/nfs/nfs_vfsops.c index 17c51b7da..1ac2b3bd5 100644 --- a/bsd/nfs/nfs_vfsops.c +++ b/bsd/nfs/nfs_vfsops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -140,7 +140,7 @@ lck_mtx_t *nfs_request_mutex; thread_call_t nfs_request_timer_call; int nfs_request_timer_on; u_int32_t nfs_xid = 0; -u_int32_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */ +u_int32_t nfs_xidwrap = 0; /* to build a (non-wrapping) 64 bit xid */ thread_call_t nfs_buf_timer_call; @@ -177,32 +177,32 @@ int nfs_tprintf_initial_delay = NFS_TPRINTF_INITIAL_DELAY; int nfs_tprintf_delay = NFS_TPRINTF_DELAY; -int mountnfs(char *, mount_t, vfs_context_t, vnode_t *); -static int nfs_mount_diskless(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t); +int mountnfs(char *, mount_t, vfs_context_t, vnode_t *); +static int nfs_mount_diskless(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t); #if !defined(NO_MOUNT_PRIVATE) -static int nfs_mount_diskless_private(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t); +static int nfs_mount_diskless_private(struct nfs_dlmount *, const char *, int, vnode_t *, mount_t *, vfs_context_t); #endif /* NO_MOUNT_PRIVATE */ -int nfs_mount_connect(struct nfsmount *); -void nfs_mount_drain_and_cleanup(struct nfsmount *); -void nfs_mount_cleanup(struct nfsmount *); -int nfs_mountinfo_assemble(struct nfsmount *, struct xdrbuf *); -int nfs4_mount_update_path_with_symlink(struct nfsmount *, struct nfs_fs_path *, uint32_t, fhandle_t *, int *, fhandle_t *, vfs_context_t); +int nfs_mount_connect(struct nfsmount *); +void nfs_mount_drain_and_cleanup(struct nfsmount *); +void nfs_mount_cleanup(struct nfsmount *); +int nfs_mountinfo_assemble(struct nfsmount *, struct xdrbuf *); +int nfs4_mount_update_path_with_symlink(struct nfsmount *, struct nfs_fs_path *, uint32_t, fhandle_t *, int *, fhandle_t *, vfs_context_t); /* * NFS VFS operations. */ -int nfs_vfs_mount(mount_t, vnode_t, user_addr_t, vfs_context_t); -int nfs_vfs_start(mount_t, int, vfs_context_t); -int nfs_vfs_unmount(mount_t, int, vfs_context_t); -int nfs_vfs_root(mount_t, vnode_t *, vfs_context_t); -int nfs_vfs_quotactl(mount_t, int, uid_t, caddr_t, vfs_context_t); -int nfs_vfs_getattr(mount_t, struct vfs_attr *, vfs_context_t); -int nfs_vfs_sync(mount_t, int, vfs_context_t); -int nfs_vfs_vget(mount_t, ino64_t, vnode_t *, vfs_context_t); -int nfs_vfs_vptofh(vnode_t, int *, unsigned char *, vfs_context_t); -int nfs_vfs_fhtovp(mount_t, int, unsigned char *, vnode_t *, vfs_context_t); -int nfs_vfs_init(struct vfsconf *); -int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t); +int nfs_vfs_mount(mount_t, vnode_t, user_addr_t, vfs_context_t); +int nfs_vfs_start(mount_t, int, vfs_context_t); +int nfs_vfs_unmount(mount_t, int, vfs_context_t); +int nfs_vfs_root(mount_t, vnode_t *, vfs_context_t); +int nfs_vfs_quotactl(mount_t, int, uid_t, caddr_t, vfs_context_t); +int nfs_vfs_getattr(mount_t, struct vfs_attr *, vfs_context_t); +int nfs_vfs_sync(mount_t, int, vfs_context_t); +int nfs_vfs_vget(mount_t, ino64_t, vnode_t *, vfs_context_t); +int nfs_vfs_vptofh(vnode_t, int *, unsigned char *, vfs_context_t); +int nfs_vfs_fhtovp(mount_t, int, unsigned char *, vnode_t *, vfs_context_t); +int nfs_vfs_init(struct vfsconf *); +int nfs_vfs_sysctl(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t); const struct vfsops nfs_vfsops = { .vfs_mount = nfs_vfs_mount, @@ -230,8 +230,8 @@ int nfs3_fsinfo(struct nfsmount *, nfsnode_t, vfs_context_t); int nfs3_update_statfs(struct nfsmount *, vfs_context_t); int nfs4_update_statfs(struct nfsmount *, vfs_context_t); #if !QUOTA -#define nfs3_getquota NULL -#define nfs4_getquota NULL +#define nfs3_getquota NULL +#define nfs4_getquota NULL #else int nfs3_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *); int nfs4_getquota(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *); @@ -257,7 +257,7 @@ const struct nfs_funcs nfs3_funcs = { nfs3_setlock_rpc, nfs3_unlock_rpc, nfs3_getlock_rpc - }; +}; const struct nfs_funcs nfs4_funcs = { nfs4_mount, nfs4_update_statfs, @@ -278,7 +278,7 @@ const struct nfs_funcs nfs4_funcs = { nfs4_setlock_rpc, nfs4_unlock_rpc, nfs4_getlock_rpc - }; +}; /* * Called once to initialize data structures... @@ -291,16 +291,18 @@ nfs_vfs_init(__unused struct vfsconf *vfsp) /* * Check to see if major data structures haven't bloated. */ - if (sizeof (struct nfsnode) > NFS_NODEALLOC) { + if (sizeof(struct nfsnode) > NFS_NODEALLOC) { printf("struct nfsnode bloated (> %dbytes)\n", NFS_NODEALLOC); printf("Try reducing NFS_SMALLFH\n"); } - if (sizeof (struct nfsmount) > NFS_MNTALLOC) + if (sizeof(struct nfsmount) > NFS_MNTALLOC) { printf("struct nfsmount bloated (> %dbytes)\n", NFS_MNTALLOC); + } nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000; - if (nfs_ticks < 1) + if (nfs_ticks < 1) { nfs_ticks = 1; + } /* init async I/O thread pool state */ TAILQ_INIT(&nfsiodfree); @@ -323,17 +325,18 @@ nfs_vfs_init(__unused struct vfsconf *vfsp) /* initialize NFS request list */ TAILQ_INIT(&nfs_reqq); - nfs_nbinit(); /* Init the nfsbuf table */ - nfs_nhinit(); /* Init the nfsnode table */ - nfs_lockinit(); /* Init the nfs lock state */ - nfs_gss_init(); /* Init RPCSEC_GSS security */ + nfs_nbinit(); /* Init the nfsbuf table */ + nfs_nhinit(); /* Init the nfsnode table */ + nfs_lockinit(); /* Init the nfs lock state */ + nfs_gss_init(); /* Init RPCSEC_GSS security */ /* NFSv4 stuff */ NFS4_PER_FS_ATTRIBUTES(nfs_fs_attr_bitmap); NFS4_PER_OBJECT_ATTRIBUTES(nfs_object_attr_bitmap); NFS4_DEFAULT_ATTRIBUTES(nfs_getattr_bitmap); - for (i=0; i < NFS_ATTR_BITMAP_LEN; i++) + for (i = 0; i < NFS_ATTR_BITMAP_LEN; i++) { nfs_getattr_bitmap[i] &= nfs_object_attr_bitmap[i]; + } TAILQ_INIT(&nfsclientids); /* initialize NFS timer callouts */ @@ -341,7 +344,7 @@ nfs_vfs_init(__unused struct vfsconf *vfsp) nfs_buf_timer_call = thread_call_allocate(nfs_buf_timer, NULL); nfs4_callback_timer_call = thread_call_allocate(nfs4_callback_timer, NULL); - return (0); + return 0; } /* @@ -358,10 +361,12 @@ nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx) nfsvers = nmp->nm_vers; np = nmp->nm_dnp; - if (!np) - return (ENXIO); - if ((error = vnode_get(NFSTOV(np)))) - return (error); + if (!np) { + return ENXIO; + } + if ((error = vnode_get(NFSTOV(np)))) { + return error; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -371,17 +376,22 @@ nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx) nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC_FSSTAT, vfs_context_thread(ctx), - vfs_context_ucred(ctx), NULL, R_SOFT, &nmrep, &xid, &status); - if (error == ETIMEDOUT) + vfs_context_ucred(ctx), NULL, R_SOFT, &nmrep, &xid, &status); + if (error == ETIMEDOUT) { goto nfsmout; - if ((lockerror = nfs_node_lock(np))) + } + if ((lockerror = nfs_node_lock(np))) { error = lockerror; - if (nfsvers == NFS_VER3) + } + if (nfsvers == NFS_VER3) { nfsm_chain_postop_attr_update(error, &nmrep, np, &xid); - if (!lockerror) + } + if (!lockerror) { nfs_node_unlock(np); - if (!error) + } + if (!error) { error = status; + } nfsm_assert(error, NFSTONMP(np), ENXIO); nfsmout_if(error); lck_mtx_lock(&nmp->nm_lock); @@ -405,8 +415,9 @@ nfs3_update_statfs(struct nfsmount *nmp, vfs_context_t ctx) nfsm_chain_get_32(error, &nmrep, nmp->nm_fsattr.nfsa_bsize); nfsm_chain_get_32(error, &nmrep, val); nfsmout_if(error); - if (nmp->nm_fsattr.nfsa_bsize <= 0) + if (nmp->nm_fsattr.nfsa_bsize <= 0) { nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE; + } nmp->nm_fsattr.nfsa_space_total = (uint64_t)val * nmp->nm_fsattr.nfsa_bsize; nfsm_chain_get_32(error, &nmrep, val); nfsmout_if(error); @@ -420,7 +431,7 @@ nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); vnode_put(NFSTOV(np)); - return (error); + return error; } int @@ -436,10 +447,12 @@ nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx) nfsvers = nmp->nm_vers; np = nmp->nm_dnp; - if (!np) - return (ENXIO); - if ((error = vnode_get(NFSTOV(np)))) - return (error); + if (!np) { + return ENXIO; + } + if ((error = vnode_get(NFSTOV(np)))) { + return error; + } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); NVATTR_INIT(&nvattr); @@ -462,8 +475,8 @@ nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx) nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - NULL, R_SOFT, &nmrep, &xid, &status); + vfs_context_thread(ctx), vfs_context_ucred(ctx), + NULL, R_SOFT, &nmrep, &xid, &status); nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -474,12 +487,15 @@ nfs4_update_statfs(struct nfsmount *nmp, vfs_context_t ctx) error = nfs4_parsefattr(&nmrep, &nmp->nm_fsattr, &nvattr, NULL, NULL, NULL); lck_mtx_unlock(&nmp->nm_lock); nfsmout_if(error); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; - if (!error) + } + if (!error) { nfs_loadattrcache(np, &nvattr, &xid, 0); - if (!lockerror) + } + if (!lockerror) { nfs_node_unlock(np); + } nfsm_assert(error, NFSTONMP(np), ENXIO); nfsmout_if(error); nmp->nm_fsattr.nfsa_bsize = NFS_FABLKSIZE; @@ -488,7 +504,7 @@ nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); vnode_put(NFSTOV(np)); - return (error); + return error; } /* @@ -499,7 +515,7 @@ nfs_get_volname(struct mount *mp, char *volname, size_t len) { const char *ptr, *cptr; const char *mntfrom = mp->mnt_vfsstat.f_mntfromname; - size_t mflen = strnlen(mntfrom, MAXPATHLEN+1); + size_t mflen = strnlen(mntfrom, MAXPATHLEN + 1); if (mflen > MAXPATHLEN || mflen == 0) { strlcpy(volname, "Bad volname", len); @@ -507,32 +523,36 @@ nfs_get_volname(struct mount *mp, char *volname, size_t len) } /* Move back over trailing slashes */ - for (ptr = &mntfrom[mflen-1]; ptr != mntfrom && *ptr == '/'; ptr--) { + for (ptr = &mntfrom[mflen - 1]; ptr != mntfrom && *ptr == '/'; ptr--) { mflen--; } /* Find first character after the last slash */ cptr = ptr = NULL; - for(size_t i = 0; i < mflen; i++) { - if (mntfrom[i] == '/') - ptr = &mntfrom[i+1]; + for (size_t i = 0; i < mflen; i++) { + if (mntfrom[i] == '/') { + ptr = &mntfrom[i + 1]; + } /* And the first character after the first colon */ - else if (cptr == NULL && mntfrom[i] == ':') - cptr = &mntfrom[i+1]; + else if (cptr == NULL && mntfrom[i] == ':') { + cptr = &mntfrom[i + 1]; + } } /* * No slash or nothing after the last slash * use everything past the first colon */ - if (ptr == NULL || *ptr == '\0') + if (ptr == NULL || *ptr == '\0') { ptr = cptr; + } /* Otherwise use the mntfrom name */ - if (ptr == NULL) + if (ptr == NULL) { ptr = mntfrom; + } mflen = &mntfrom[mflen] - ptr; - len = mflen+1 < len ? mflen+1 : len; + len = mflen + 1 < len ? mflen + 1 : len; strlcpy(volname, ptr, len); } @@ -550,17 +570,18 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) int error = 0, nfsvers; nmp = VFSTONFS(mp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (VFSATTR_IS_ACTIVE(fsap, f_bsize) || + if (VFSATTR_IS_ACTIVE(fsap, f_bsize) || VFSATTR_IS_ACTIVE(fsap, f_iosize) || VFSATTR_IS_ACTIVE(fsap, f_blocks) || - VFSATTR_IS_ACTIVE(fsap, f_bfree) || + VFSATTR_IS_ACTIVE(fsap, f_bfree) || VFSATTR_IS_ACTIVE(fsap, f_bavail) || - VFSATTR_IS_ACTIVE(fsap, f_bused) || - VFSATTR_IS_ACTIVE(fsap, f_files) || + VFSATTR_IS_ACTIVE(fsap, f_bused) || + VFSATTR_IS_ACTIVE(fsap, f_files) || VFSATTR_IS_ACTIVE(fsap, f_ffree)) { int statfsrate = nfs_statfs_rate_limit; int refresh = 1; @@ -575,7 +596,7 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) microuptime(&now); lck_mtx_lock(&nmp->nm_lock); - stamp = (now.tv_sec * statfsrate) + (now.tv_usec / (1000000/statfsrate)); + stamp = (now.tv_sec * statfsrate) + (now.tv_usec / (1000000 / statfsrate)); if (stamp != nmp->nm_fsattrstamp) { refresh = 1; nmp->nm_fsattrstamp = stamp; @@ -585,32 +606,41 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) lck_mtx_unlock(&nmp->nm_lock); } - if (refresh && !nfs_use_cache(nmp)) + if (refresh && !nfs_use_cache(nmp)) { error = nmp->nm_funcs->nf_update_statfs(nmp, ctx); - if ((error == ESTALE) || (error == ETIMEDOUT)) + } + if ((error == ESTALE) || (error == ETIMEDOUT)) { error = 0; - if (error) - return (error); + } + if (error) { + return error; + } lck_mtx_lock(&nmp->nm_lock); VFSATTR_RETURN(fsap, f_iosize, nfs_iosize); VFSATTR_RETURN(fsap, f_bsize, nmp->nm_fsattr.nfsa_bsize); bsize = nmp->nm_fsattr.nfsa_bsize; - if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL)) + if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL)) { VFSATTR_RETURN(fsap, f_blocks, nmp->nm_fsattr.nfsa_space_total / bsize); - if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) + } + if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) { VFSATTR_RETURN(fsap, f_bfree, nmp->nm_fsattr.nfsa_space_free / bsize); - if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL)) + } + if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_AVAIL)) { VFSATTR_RETURN(fsap, f_bavail, nmp->nm_fsattr.nfsa_space_avail / bsize); + } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_TOTAL) && - NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) + NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SPACE_FREE)) { VFSATTR_RETURN(fsap, f_bused, - (nmp->nm_fsattr.nfsa_space_total / bsize) - - (nmp->nm_fsattr.nfsa_space_free / bsize)); - if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL)) + (nmp->nm_fsattr.nfsa_space_total / bsize) - + (nmp->nm_fsattr.nfsa_space_free / bsize)); + } + if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_TOTAL)) { VFSATTR_RETURN(fsap, f_files, nmp->nm_fsattr.nfsa_files_total); - if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE)) + } + if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_FILES_FREE)) { VFSATTR_RETURN(fsap, f_ffree, nmp->nm_fsattr.nfsa_files_free); + } lck_mtx_unlock(&nmp->nm_lock); } @@ -624,8 +654,9 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) nfsnode_t np = nmp->nm_dnp; nfsm_assert(error, VFSTONFS(mp) && np, ENXIO); - if (error) - return (error); + if (error) { + return error; + } lck_mtx_lock(&nmp->nm_lock); /* @@ -640,29 +671,34 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) caps = valid = 0; if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_SYMLINK_SUPPORT)) { valid |= VOL_CAP_FMT_SYMBOLICLINKS; - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_SYMLINK) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_SYMLINK) { caps |= VOL_CAP_FMT_SYMBOLICLINKS; + } } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_LINK_SUPPORT)) { valid |= VOL_CAP_FMT_HARDLINKS; - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_LINK) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_LINK) { caps |= VOL_CAP_FMT_HARDLINKS; + } } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) { valid |= VOL_CAP_FMT_CASE_SENSITIVE; - if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE)) + if (!(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE)) { caps |= VOL_CAP_FMT_CASE_SENSITIVE; + } } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) { valid |= VOL_CAP_FMT_CASE_PRESERVING; - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) { caps |= VOL_CAP_FMT_CASE_PRESERVING; + } } /* Note: VOL_CAP_FMT_2TB_FILESIZE is actually used to test for "large file support" */ if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXFILESIZE)) { /* Is server's max file size at least 4GB? */ - if (nmp->nm_fsattr.nfsa_maxfilesize >= 0x100000000ULL) + if (nmp->nm_fsattr.nfsa_maxfilesize >= 0x100000000ULL) { caps |= VOL_CAP_FMT_2TB_FILESIZE; + } } else if (nfsvers >= NFS_VER3) { /* * NFSv3 and up supports 64 bits of file size. @@ -682,37 +718,37 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) valid |= VOL_CAP_FMT_NO_IMMUTABLE_FILES; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] = - // VOL_CAP_FMT_PERSISTENTOBJECTIDS | - // VOL_CAP_FMT_SYMBOLICLINKS | - // VOL_CAP_FMT_HARDLINKS | - // VOL_CAP_FMT_JOURNAL | - // VOL_CAP_FMT_JOURNAL_ACTIVE | - // VOL_CAP_FMT_NO_ROOT_TIMES | - // VOL_CAP_FMT_SPARSE_FILES | - // VOL_CAP_FMT_ZERO_RUNS | - // VOL_CAP_FMT_CASE_SENSITIVE | - // VOL_CAP_FMT_CASE_PRESERVING | - // VOL_CAP_FMT_FAST_STATFS | - // VOL_CAP_FMT_2TB_FILESIZE | - // VOL_CAP_FMT_OPENDENYMODES | - // VOL_CAP_FMT_HIDDEN_FILES | - caps; + // VOL_CAP_FMT_PERSISTENTOBJECTIDS | + // VOL_CAP_FMT_SYMBOLICLINKS | + // VOL_CAP_FMT_HARDLINKS | + // VOL_CAP_FMT_JOURNAL | + // VOL_CAP_FMT_JOURNAL_ACTIVE | + // VOL_CAP_FMT_NO_ROOT_TIMES | + // VOL_CAP_FMT_SPARSE_FILES | + // VOL_CAP_FMT_ZERO_RUNS | + // VOL_CAP_FMT_CASE_SENSITIVE | + // VOL_CAP_FMT_CASE_PRESERVING | + // VOL_CAP_FMT_FAST_STATFS | + // VOL_CAP_FMT_2TB_FILESIZE | + // VOL_CAP_FMT_OPENDENYMODES | + // VOL_CAP_FMT_HIDDEN_FILES | + caps; fsap->f_capabilities.valid[VOL_CAPABILITIES_FORMAT] = - VOL_CAP_FMT_PERSISTENTOBJECTIDS | - // VOL_CAP_FMT_SYMBOLICLINKS | - // VOL_CAP_FMT_HARDLINKS | - // VOL_CAP_FMT_JOURNAL | - // VOL_CAP_FMT_JOURNAL_ACTIVE | - // VOL_CAP_FMT_NO_ROOT_TIMES | - // VOL_CAP_FMT_SPARSE_FILES | - // VOL_CAP_FMT_ZERO_RUNS | - // VOL_CAP_FMT_CASE_SENSITIVE | - // VOL_CAP_FMT_CASE_PRESERVING | - VOL_CAP_FMT_FAST_STATFS | - VOL_CAP_FMT_2TB_FILESIZE | - // VOL_CAP_FMT_OPENDENYMODES | - // VOL_CAP_FMT_HIDDEN_FILES | - valid; + VOL_CAP_FMT_PERSISTENTOBJECTIDS | + // VOL_CAP_FMT_SYMBOLICLINKS | + // VOL_CAP_FMT_HARDLINKS | + // VOL_CAP_FMT_JOURNAL | + // VOL_CAP_FMT_JOURNAL_ACTIVE | + // VOL_CAP_FMT_NO_ROOT_TIMES | + // VOL_CAP_FMT_SPARSE_FILES | + // VOL_CAP_FMT_ZERO_RUNS | + // VOL_CAP_FMT_CASE_SENSITIVE | + // VOL_CAP_FMT_CASE_PRESERVING | + VOL_CAP_FMT_FAST_STATFS | + VOL_CAP_FMT_2TB_FILESIZE | + // VOL_CAP_FMT_OPENDENYMODES | + // VOL_CAP_FMT_HIDDEN_FILES | + valid; /* * We don't support most of the interfaces. @@ -730,15 +766,18 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) if (nfsvers >= NFS_VER4) { caps = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK; valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK; - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) { caps |= VOL_CAP_INT_EXTENDED_SECURITY; + } valid |= VOL_CAP_INT_EXTENDED_SECURITY; - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) { caps |= VOL_CAP_INT_EXTENDED_ATTR; + } valid |= VOL_CAP_INT_EXTENDED_ATTR; #if NAMEDSTREAMS - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) { caps |= VOL_CAP_INT_NAMEDSTREAMS; + } valid |= VOL_CAP_INT_NAMEDSTREAMS; #endif } else if (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) { @@ -749,41 +788,41 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) valid = VOL_CAP_INT_ADVLOCK | VOL_CAP_INT_FLOCK; } fsap->f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = - // VOL_CAP_INT_SEARCHFS | - // VOL_CAP_INT_ATTRLIST | - // VOL_CAP_INT_NFSEXPORT | - // VOL_CAP_INT_READDIRATTR | - // VOL_CAP_INT_EXCHANGEDATA | - // VOL_CAP_INT_COPYFILE | - // VOL_CAP_INT_ALLOCATE | - // VOL_CAP_INT_VOL_RENAME | - // VOL_CAP_INT_ADVLOCK | - // VOL_CAP_INT_FLOCK | - // VOL_CAP_INT_EXTENDED_SECURITY | - // VOL_CAP_INT_USERACCESS | - // VOL_CAP_INT_MANLOCK | - // VOL_CAP_INT_NAMEDSTREAMS | - // VOL_CAP_INT_EXTENDED_ATTR | - VOL_CAP_INT_REMOTE_EVENT | - caps; + // VOL_CAP_INT_SEARCHFS | + // VOL_CAP_INT_ATTRLIST | + // VOL_CAP_INT_NFSEXPORT | + // VOL_CAP_INT_READDIRATTR | + // VOL_CAP_INT_EXCHANGEDATA | + // VOL_CAP_INT_COPYFILE | + // VOL_CAP_INT_ALLOCATE | + // VOL_CAP_INT_VOL_RENAME | + // VOL_CAP_INT_ADVLOCK | + // VOL_CAP_INT_FLOCK | + // VOL_CAP_INT_EXTENDED_SECURITY | + // VOL_CAP_INT_USERACCESS | + // VOL_CAP_INT_MANLOCK | + // VOL_CAP_INT_NAMEDSTREAMS | + // VOL_CAP_INT_EXTENDED_ATTR | + VOL_CAP_INT_REMOTE_EVENT | + caps; fsap->f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] = - VOL_CAP_INT_SEARCHFS | - VOL_CAP_INT_ATTRLIST | - VOL_CAP_INT_NFSEXPORT | - VOL_CAP_INT_READDIRATTR | - VOL_CAP_INT_EXCHANGEDATA | - VOL_CAP_INT_COPYFILE | - VOL_CAP_INT_ALLOCATE | - VOL_CAP_INT_VOL_RENAME | - // VOL_CAP_INT_ADVLOCK | - // VOL_CAP_INT_FLOCK | - // VOL_CAP_INT_EXTENDED_SECURITY | - // VOL_CAP_INT_USERACCESS | - // VOL_CAP_INT_MANLOCK | - // VOL_CAP_INT_NAMEDSTREAMS | - // VOL_CAP_INT_EXTENDED_ATTR | - VOL_CAP_INT_REMOTE_EVENT | - valid; + VOL_CAP_INT_SEARCHFS | + VOL_CAP_INT_ATTRLIST | + VOL_CAP_INT_NFSEXPORT | + VOL_CAP_INT_READDIRATTR | + VOL_CAP_INT_EXCHANGEDATA | + VOL_CAP_INT_COPYFILE | + VOL_CAP_INT_ALLOCATE | + VOL_CAP_INT_VOL_RENAME | + // VOL_CAP_INT_ADVLOCK | + // VOL_CAP_INT_FLOCK | + // VOL_CAP_INT_EXTENDED_SECURITY | + // VOL_CAP_INT_USERACCESS | + // VOL_CAP_INT_MANLOCK | + // VOL_CAP_INT_NAMEDSTREAMS | + // VOL_CAP_INT_EXTENDED_ATTR | + VOL_CAP_INT_REMOTE_EVENT | + valid; fsap->f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0; fsap->f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0; @@ -798,14 +837,14 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) { fsap->f_attributes.validattr.commonattr = 0; fsap->f_attributes.validattr.volattr = - ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; + ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; fsap->f_attributes.validattr.dirattr = 0; fsap->f_attributes.validattr.fileattr = 0; fsap->f_attributes.validattr.forkattr = 0; fsap->f_attributes.nativeattr.commonattr = 0; fsap->f_attributes.nativeattr.volattr = - ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; + ATTR_VOL_NAME | ATTR_VOL_CAPABILITIES | ATTR_VOL_ATTRIBUTES; fsap->f_attributes.nativeattr.dirattr = 0; fsap->f_attributes.nativeattr.fileattr = 0; fsap->f_attributes.nativeattr.forkattr = 0; @@ -813,7 +852,7 @@ nfs_vfs_getattr(mount_t mp, struct vfs_attr *fsap, vfs_context_t ctx) VFSATTR_SET_SUPPORTED(fsap, f_attributes); } - return (error); + return error; } /* @@ -835,13 +874,16 @@ nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx) nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request(np, NULL, &nmreq, NFSPROC_FSINFO, ctx, NULL, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_postop_attr_update(error, &nmrep, np, &xid); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); - if (!error) + } + if (!error) { error = status; + } nfsmout_if(error); lck_mtx_lock(&nmp->nm_lock); @@ -851,13 +893,15 @@ nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx) nfsm_chain_get_32(error, &nmrep, prefsize); nfsmout_if(error); nmp->nm_fsattr.nfsa_maxread = maxsize; - if (prefsize < nmp->nm_rsize) + if (prefsize < nmp->nm_rsize) { nmp->nm_rsize = (prefsize + NFS_FABLKSIZE - 1) & - ~(NFS_FABLKSIZE - 1); + ~(NFS_FABLKSIZE - 1); + } if ((maxsize > 0) && (maxsize < nmp->nm_rsize)) { nmp->nm_rsize = maxsize & ~(NFS_FABLKSIZE - 1); - if (nmp->nm_rsize == 0) + if (nmp->nm_rsize == 0) { nmp->nm_rsize = maxsize; + } } nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip rtmult @@ -865,23 +909,27 @@ nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx) nfsm_chain_get_32(error, &nmrep, prefsize); nfsmout_if(error); nmp->nm_fsattr.nfsa_maxwrite = maxsize; - if (prefsize < nmp->nm_wsize) + if (prefsize < nmp->nm_wsize) { nmp->nm_wsize = (prefsize + NFS_FABLKSIZE - 1) & - ~(NFS_FABLKSIZE - 1); + ~(NFS_FABLKSIZE - 1); + } if ((maxsize > 0) && (maxsize < nmp->nm_wsize)) { nmp->nm_wsize = maxsize & ~(NFS_FABLKSIZE - 1); - if (nmp->nm_wsize == 0) + if (nmp->nm_wsize == 0) { nmp->nm_wsize = maxsize; + } } nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); // skip wtmult nfsm_chain_get_32(error, &nmrep, prefsize); nfsmout_if(error); - if ((prefsize > 0) && (prefsize < nmp->nm_readdirsize)) + if ((prefsize > 0) && (prefsize < nmp->nm_readdirsize)) { nmp->nm_readdirsize = prefsize; + } if ((nmp->nm_fsattr.nfsa_maxread > 0) && - (nmp->nm_fsattr.nfsa_maxread < nmp->nm_readdirsize)) + (nmp->nm_fsattr.nfsa_maxread < nmp->nm_readdirsize)) { nmp->nm_readdirsize = nmp->nm_fsattr.nfsa_maxread; + } nfsm_chain_get_64(error, &nmrep, nmp->nm_fsattr.nfsa_maxfilesize); @@ -890,14 +938,18 @@ nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx) /* convert FS properties to our own flags */ nfsm_chain_get_32(error, &nmrep, val); nfsmout_if(error); - if (val & NFSV3FSINFO_LINK) + if (val & NFSV3FSINFO_LINK) { nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_LINK; - if (val & NFSV3FSINFO_SYMLINK) + } + if (val & NFSV3FSINFO_SYMLINK) { nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SYMLINK; - if (val & NFSV3FSINFO_HOMOGENEOUS) + } + if (val & NFSV3FSINFO_HOMOGENEOUS) { nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_HOMOGENEOUS; - if (val & NFSV3FSINFO_CANSETTIME) + } + if (val & NFSV3FSINFO_CANSETTIME) { nmp->nm_fsattr.nfsa_flags |= NFS_FSFLAG_SET_TIME; + } nmp->nm_state |= NFSSTA_GOTFSINFO; NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD); NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE); @@ -907,11 +959,12 @@ nfs3_fsinfo(struct nfsmount *nmp, nfsnode_t np, vfs_context_t ctx) NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS); NFS_BITMAP_SET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_CANSETTIME); nfsmout: - if (nmlocked) + if (nmlocked) { lck_mtx_unlock(&nmp->nm_lock); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -948,9 +1001,10 @@ nfs_mountroot(void) */ bzero((caddr_t) &nd, sizeof(nd)); error = nfs_boot_init(&nd); - if (error) + if (error) { panic("nfs_boot_init: unable to initialize NFS root system information, " - "error %d, check configuration: %s\n", error, PE_boot_args()); + "error %d, check configuration: %s\n", error, PE_boot_args()); + } /* * Try NFSv3 first, then fallback to NFSv2. @@ -963,19 +1017,23 @@ tryagain: error = nfs_boot_getfh(&nd, v3, sotype); if (error) { if (error == EHOSTDOWN || error == EHOSTUNREACH) { - if (nd.nd_root.ndm_mntfrom) + if (nd.nd_root.ndm_mntfrom) { FREE_ZONE(nd.nd_root.ndm_mntfrom, - MAXPATHLEN, M_NAMEI); - if (nd.nd_root.ndm_path) + MAXPATHLEN, M_NAMEI); + } + if (nd.nd_root.ndm_path) { FREE_ZONE(nd.nd_root.ndm_path, - MAXPATHLEN, M_NAMEI); - if (nd.nd_private.ndm_mntfrom) + MAXPATHLEN, M_NAMEI); + } + if (nd.nd_private.ndm_mntfrom) { FREE_ZONE(nd.nd_private.ndm_mntfrom, - MAXPATHLEN, M_NAMEI); - if (nd.nd_private.ndm_path) + MAXPATHLEN, M_NAMEI); + } + if (nd.nd_private.ndm_path) { FREE_ZONE(nd.nd_private.ndm_path, - MAXPATHLEN, M_NAMEI); - return (error); + MAXPATHLEN, M_NAMEI); + } + return error; } if (v3) { if (sotype == SOCK_STREAM) { @@ -994,7 +1052,7 @@ tryagain: } else { printf("NFS mount (v2,UDP) failed with error %d, giving up...\n", error); } - switch(error) { + switch (error) { case EPROGUNAVAIL: panic("NFS mount failed: NFS server mountd not responding, check server configuration: %s", PE_boot_args()); case EACCES: @@ -1013,74 +1071,79 @@ tryagain: #if !defined(NO_MOUNT_PRIVATE) { //PWC hack until we have a real "mount" tool to remount root rw - int rw_root=0; - int flags = MNT_ROOTFS|MNT_RDONLY; - PE_parse_boot_argn("-rwroot_hack", &rw_root, sizeof (rw_root)); - if(rw_root) - { + int rw_root = 0; + int flags = MNT_ROOTFS | MNT_RDONLY; + PE_parse_boot_argn("-rwroot_hack", &rw_root, sizeof(rw_root)); + if (rw_root) { flags = MNT_ROOTFS; kprintf("-rwroot_hack in effect: mounting root fs read/write\n"); } - - if ((error = nfs_mount_diskless(&nd.nd_root, "/", flags, &vp, &mp, ctx))) + + if ((error = nfs_mount_diskless(&nd.nd_root, "/", flags, &vp, &mp, ctx))) #else if ((error = nfs_mount_diskless(&nd.nd_root, "/", MNT_ROOTFS, &vp, &mp, ctx))) #endif /* NO_MOUNT_PRIVATE */ - { - if (v3) { - if (sotype == SOCK_STREAM) { - printf("NFS root mount (v3,TCP) failed with %d, trying UDP...\n", error); + { + if (v3) { + if (sotype == SOCK_STREAM) { + printf("NFS root mount (v3,TCP) failed with %d, trying UDP...\n", error); + sotype = SOCK_DGRAM; + goto tryagain; + } + printf("NFS root mount (v3,UDP) failed with %d, trying v2...\n", error); + v3 = 0; + sotype = SOCK_STREAM; + goto tryagain; + } else if (sotype == SOCK_STREAM) { + printf("NFS root mount (v2,TCP) failed with %d, trying UDP...\n", error); sotype = SOCK_DGRAM; goto tryagain; + } else { + printf("NFS root mount (v2,UDP) failed with error %d, giving up...\n", error); } - printf("NFS root mount (v3,UDP) failed with %d, trying v2...\n", error); - v3 = 0; - sotype = SOCK_STREAM; - goto tryagain; - } else if (sotype == SOCK_STREAM) { - printf("NFS root mount (v2,TCP) failed with %d, trying UDP...\n", error); - sotype = SOCK_DGRAM; - goto tryagain; - } else { - printf("NFS root mount (v2,UDP) failed with error %d, giving up...\n", error); + panic("NFS root mount failed with error %d, check configuration: %s\n", error, PE_boot_args()); } - panic("NFS root mount failed with error %d, check configuration: %s\n", error, PE_boot_args()); - } } printf("root on %s\n", nd.nd_root.ndm_mntfrom); vfs_unbusy(mp); mount_list_add(mp); rootvp = vp; - + #if !defined(NO_MOUNT_PRIVATE) if (nd.nd_private.ndm_saddr.sin_addr.s_addr) { - error = nfs_mount_diskless_private(&nd.nd_private, "/private", - 0, &vppriv, &mppriv, ctx); - if (error) - panic("NFS /private mount failed with error %d, check configuration: %s\n", error, PE_boot_args()); - printf("private on %s\n", nd.nd_private.ndm_mntfrom); + error = nfs_mount_diskless_private(&nd.nd_private, "/private", + 0, &vppriv, &mppriv, ctx); + if (error) { + panic("NFS /private mount failed with error %d, check configuration: %s\n", error, PE_boot_args()); + } + printf("private on %s\n", nd.nd_private.ndm_mntfrom); - vfs_unbusy(mppriv); - mount_list_add(mppriv); + vfs_unbusy(mppriv); + mount_list_add(mppriv); } #endif /* NO_MOUNT_PRIVATE */ - if (nd.nd_root.ndm_mntfrom) + if (nd.nd_root.ndm_mntfrom) { FREE_ZONE(nd.nd_root.ndm_mntfrom, MAXPATHLEN, M_NAMEI); - if (nd.nd_root.ndm_path) + } + if (nd.nd_root.ndm_path) { FREE_ZONE(nd.nd_root.ndm_path, MAXPATHLEN, M_NAMEI); - if (nd.nd_private.ndm_mntfrom) + } + if (nd.nd_private.ndm_mntfrom) { FREE_ZONE(nd.nd_private.ndm_mntfrom, MAXPATHLEN, M_NAMEI); - if (nd.nd_private.ndm_path) + } + if (nd.nd_private.ndm_path) { FREE_ZONE(nd.nd_private.ndm_path, MAXPATHLEN, M_NAMEI); + } /* Get root attributes (for the time). */ error = nfs_getattr(VTONFS(vp), NULL, ctx, NGA_UNCACHED); - if (error) + if (error) { panic("NFS mount: failed to get attributes for root directory, error %d, check server", error); - return (0); + } + return 0; } /* @@ -1107,43 +1170,51 @@ nfs_mount_diskless( if ((error = vfs_rootmountalloc("nfs", ndmntp->ndm_mntfrom, &mp))) { printf("nfs_mount_diskless: NFS not configured\n"); - return (error); + return error; } mp->mnt_flag |= mntflag; - if (!(mntflag & MNT_RDONLY)) + if (!(mntflag & MNT_RDONLY)) { mp->mnt_flag &= ~MNT_RDONLY; + } /* find the server-side path being mounted */ frompath = ndmntp->ndm_mntfrom; if (*frompath == '[') { /* skip IPv6 literal address */ - while (*frompath && (*frompath != ']')) + while (*frompath && (*frompath != ']')) { frompath++; - if (*frompath == ']') + } + if (*frompath == ']') { frompath++; + } } - while (*frompath && (*frompath != ':')) + while (*frompath && (*frompath != ':')) { frompath++; + } endserverp = frompath; - while (*frompath && (*frompath == ':')) + while (*frompath && (*frompath == ':')) { frompath++; + } /* count fs location path components */ p = frompath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } numcomps = 0; while (*p) { numcomps++; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } /* convert address to universal address string */ if (inet_ntop(AF_INET, &ndmntp->ndm_saddr.sin_addr, uaddr, sizeof(uaddr)) != uaddr) { printf("nfs_mount_diskless: bad address\n"); - return (EINVAL); + return EINVAL; } /* prepare mount attributes */ @@ -1183,17 +1254,21 @@ nfs_mount_diskless( xb_add_32(error, &xb, 0); /* empty server info */ xb_add_32(error, &xb, numcomps); /* pathname component count */ p = frompath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } while (*p) { cp = p; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; + } xb_add_string(error, &xb, cp, (p - cp)); /* component */ - if (error) + if (error) { break; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } xb_add_32(error, &xb, 0); /* empty fsl info */ xb_add_32(error, &xb, mntflag); /* MNT flags */ @@ -1203,16 +1278,16 @@ nfs_mount_diskless( end_offset = xb_offset(&xb); if (!error) { error = xb_seek(&xb, argslength_offset); - xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD/*version*/); + xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD /*version*/); } if (!error) { error = xb_seek(&xb, attrslength_offset); - xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD/*don't include length field*/); + xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD /*don't include length field*/); } if (error) { printf("nfs_mount_diskless: error %d assembling mount args\n", error); xb_cleanup(&xb); - return (error); + return error; } /* grab the assembled buffer */ xdrbuf = xb_buffer_base(&xb); @@ -1235,7 +1310,7 @@ nfs_mount_diskless( *mpp = mp; } xb_cleanup(&xb); - return (error); + return error; } #if !defined(NO_MOUNT_PRIVATE) @@ -1269,17 +1344,18 @@ nfs_mount_diskless_private( xb_init(&xb, 0); { - /* - * mimic main()!. Temporarily set up rootvnode and other stuff so - * that namei works. Need to undo this because main() does it, too - */ - struct filedesc *fdp; /* pointer to file descriptor state */ + /* + * mimic main()!. Temporarily set up rootvnode and other stuff so + * that namei works. Need to undo this because main() does it, too + */ + struct filedesc *fdp; /* pointer to file descriptor state */ fdp = procp->p_fd; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; /* Get the vnode for '/'. Set fdp->fd_cdir to reference it. */ - if (VFS_ROOT(mountlist.tqh_first, &rootvnode, NULL)) + if (VFS_ROOT(mountlist.tqh_first, &rootvnode, NULL)) { panic("cannot find root vnode"); + } error = vnode_ref(rootvnode); if (error) { printf("nfs_mountroot: vnode_ref() failed on root vnode!\n"); @@ -1315,9 +1391,11 @@ nfs_mount_diskless_private( error = ENOTDIR; goto out; } - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) - if (!strncmp(vfsp->vfc_name, "nfs", sizeof(vfsp->vfc_name))) + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { + if (!strncmp(vfsp->vfc_name, "nfs", sizeof(vfsp->vfc_name))) { break; + } + } if (vfsp == NULL) { printf("nfs_mountroot: private NFS not configured\n"); vnode_put(vp); @@ -1363,7 +1441,7 @@ nfs_mount_diskless_private( // mp->mnt_stat.f_type = vfsp->vfc_typenum; mp->mnt_flag = mntflag; mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK; - strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSNAMELEN-1); + strncpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSNAMELEN - 1); vp->v_mountedhere = mp; mp->mnt_vnodecovered = vp; vp = NULLVP; @@ -1378,27 +1456,34 @@ nfs_mount_diskless_private( /* find the server-side path being mounted */ frompath = ndmntp->ndm_mntfrom; if (*frompath == '[') { /* skip IPv6 literal address */ - while (*frompath && (*frompath != ']')) + while (*frompath && (*frompath != ']')) { frompath++; - if (*frompath == ']') + } + if (*frompath == ']') { frompath++; + } } - while (*frompath && (*frompath != ':')) + while (*frompath && (*frompath != ':')) { frompath++; + } endserverp = frompath; - while (*frompath && (*frompath == ':')) + while (*frompath && (*frompath == ':')) { frompath++; + } /* count fs location path components */ p = frompath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } numcomps = 0; while (*p) { numcomps++; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } /* convert address to universal address string */ @@ -1445,17 +1530,21 @@ nfs_mount_diskless_private( xb_add_32(error, &xb, 0); /* empty server info */ xb_add_32(error, &xb, numcomps); /* pathname component count */ p = frompath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } while (*p) { cp = p; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; + } xb_add_string(error, &xb, cp, (p - cp)); /* component */ - if (error) + if (error) { break; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } xb_add_32(error, &xb, 0); /* empty fsl info */ xb_add_32(error, &xb, mntflag); /* MNT flags */ @@ -1465,11 +1554,11 @@ nfs_mount_diskless_private( end_offset = xb_offset(&xb); if (!error) { error = xb_seek(&xb, argslength_offset); - xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD/*version*/); + xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD /*version*/); } if (!error) { error = xb_seek(&xb, attrslength_offset); - xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD/*don't include length field*/); + xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD /*don't include length field*/); } if (error) { printf("nfs_mountroot: error %d assembling mount args\n", error); @@ -1491,7 +1580,7 @@ nfs_mount_diskless_private( #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE(mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); goto out; } @@ -1499,7 +1588,7 @@ nfs_mount_diskless_private( *vpp = vp; out: xb_cleanup(&xb); - return (error); + return error; } #endif /* NO_MOUNT_PRIVATE */ @@ -1528,8 +1617,9 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar /* allocate a temporary buffer for mntfrom */ MALLOC_ZONE(mntfrom, char*, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!mntfrom) - return (ENOMEM); + if (!mntfrom) { + return ENOMEM; + } args64bit = (inkernel || vfs_context_is64bit(ctx)); argsp = args64bit ? (void*)&args : (void*)&tempargs; @@ -1550,10 +1640,11 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar } /* read in the structure */ - if (inkernel) + if (inkernel) { bcopy(CAST_DOWN(void *, data), argsp, argsize); - else + } else { error = copyin(data, argsp, argsize); + } nfsmout_if(error); if (!args64bit) { @@ -1580,10 +1671,12 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar args.acdirmin = tempargs.acdirmin; args.acdirmax = tempargs.acdirmax; } - if (args.version >= 5) + if (args.version >= 5) { args.auth = tempargs.auth; - if (args.version >= 6) + } + if (args.version >= 6) { args.deadtimeout = tempargs.deadtimeout; + } } if ((args.fhsize < 0) || (args.fhsize > NFS4_FHSIZE)) { @@ -1591,65 +1684,76 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar goto nfsmout; } if (args.fhsize > 0) { - if (inkernel) + if (inkernel) { bcopy(CAST_DOWN(void *, args.fh), (caddr_t)nfh, args.fhsize); - else + } else { error = copyin(args.fh, (caddr_t)nfh, args.fhsize); + } nfsmout_if(error); } - if (inkernel) - error = copystr(CAST_DOWN(void *, args.hostname), mntfrom, MAXPATHLEN-1, &len); - else - error = copyinstr(args.hostname, mntfrom, MAXPATHLEN-1, &len); + if (inkernel) { + error = copystr(CAST_DOWN(void *, args.hostname), mntfrom, MAXPATHLEN - 1, &len); + } else { + error = copyinstr(args.hostname, mntfrom, MAXPATHLEN - 1, &len); + } nfsmout_if(error); bzero(&mntfrom[len], MAXPATHLEN - len); /* find the server-side path being mounted */ frompath = mntfrom; if (*frompath == '[') { /* skip IPv6 literal address */ - while (*frompath && (*frompath != ']')) + while (*frompath && (*frompath != ']')) { frompath++; - if (*frompath == ']') + } + if (*frompath == ']') { frompath++; + } } - while (*frompath && (*frompath != ':')) + while (*frompath && (*frompath != ':')) { frompath++; + } endserverp = frompath; - while (*frompath && (*frompath == ':')) + while (*frompath && (*frompath == ':')) { frompath++; + } /* count fs location path components */ p = frompath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } numcomps = 0; while (*p) { numcomps++; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } /* copy socket address */ - if (inkernel) + if (inkernel) { bcopy(CAST_DOWN(void *, args.addr), &ss, args.addrlen); - else { - if ((size_t)args.addrlen > sizeof (struct sockaddr_storage)) + } else { + if ((size_t)args.addrlen > sizeof(struct sockaddr_storage)) { error = EINVAL; - else + } else { error = copyin(args.addr, &ss, args.addrlen); + } } nfsmout_if(error); ss.ss_len = args.addrlen; /* convert address to universal address string */ - if (ss.ss_family == AF_INET) + if (ss.ss_family == AF_INET) { sinaddr = &((struct sockaddr_in*)&ss)->sin_addr; - else if (ss.ss_family == AF_INET6) + } else if (ss.ss_family == AF_INET6) { sinaddr = &((struct sockaddr_in6*)&ss)->sin6_addr; - else + } else { sinaddr = NULL; + } if (!sinaddr || (inet_ntop(ss.ss_family, sinaddr, uaddr, sizeof(uaddr)) != uaddr)) { error = EINVAL; goto nfsmout; @@ -1668,26 +1772,36 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA); - if (args.flags & NFSMNT_SOFT) + if (args.flags & NFSMNT_SOFT) { NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT); - if (args.flags & NFSMNT_INT) + } + if (args.flags & NFSMNT_INT) { NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR); - if (args.flags & NFSMNT_RESVPORT) + } + if (args.flags & NFSMNT_RESVPORT) { NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT); - if (args.flags & NFSMNT_NOCONN) + } + if (args.flags & NFSMNT_NOCONN) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT); - if (args.flags & NFSMNT_DUMBTIMR) + } + if (args.flags & NFSMNT_DUMBTIMR) { NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER); - if (args.flags & NFSMNT_CALLUMNT) + } + if (args.flags & NFSMNT_CALLUMNT) { NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT); - if (args.flags & NFSMNT_RDIRPLUS) + } + if (args.flags & NFSMNT_RDIRPLUS) { NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS); - if (args.flags & NFSMNT_NONEGNAMECACHE) + } + if (args.flags & NFSMNT_NONEGNAMECACHE) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE); - if (args.flags & NFSMNT_MUTEJUKEBOX) + } + if (args.flags & NFSMNT_MUTEJUKEBOX) { NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX); - if (args.flags & NFSMNT_NOQUOTA) + } + if (args.flags & NFSMNT_NOQUOTA) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA); + } /* prepare mount attributes */ NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN); @@ -1699,53 +1813,68 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS); NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS); NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM); - if (args.flags & NFSMNT_NFSV4) + if (args.flags & NFSMNT_NFSV4) { nfsvers = 4; - else if (args.flags & NFSMNT_NFSV3) + } else if (args.flags & NFSMNT_NFSV3) { nfsvers = 3; - else + } else { nfsvers = 2; - if ((args.flags & NFSMNT_RSIZE) && (args.rsize > 0)) + } + if ((args.flags & NFSMNT_RSIZE) && (args.rsize > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE); - if ((args.flags & NFSMNT_WSIZE) && (args.wsize > 0)) + } + if ((args.flags & NFSMNT_WSIZE) && (args.wsize > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE); - if ((args.flags & NFSMNT_TIMEO) && (args.timeo > 0)) + } + if ((args.flags & NFSMNT_TIMEO) && (args.timeo > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT); - if ((args.flags & NFSMNT_RETRANS) && (args.retrans > 0)) + } + if ((args.flags & NFSMNT_RETRANS) && (args.retrans > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT); - if ((args.flags & NFSMNT_MAXGRPS) && (args.maxgrouplist > 0)) + } + if ((args.flags & NFSMNT_MAXGRPS) && (args.maxgrouplist > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST); - if ((args.flags & NFSMNT_READAHEAD) && (args.readahead > 0)) + } + if ((args.flags & NFSMNT_READAHEAD) && (args.readahead > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_READAHEAD); - if ((args.flags & NFSMNT_READDIRSIZE) && (args.readdirsize > 0)) + } + if ((args.flags & NFSMNT_READDIRSIZE) && (args.readdirsize > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE); + } if ((args.flags & NFSMNT_NOLOCKS) || (args.flags & NFSMNT_LOCALLOCKS)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE); - if (args.flags & NFSMNT_NOLOCKS) + if (args.flags & NFSMNT_NOLOCKS) { nfslockmode = NFS_LOCK_MODE_DISABLED; - else if (args.flags & NFSMNT_LOCALLOCKS) + } else if (args.flags & NFSMNT_LOCALLOCKS) { nfslockmode = NFS_LOCK_MODE_LOCAL; - else + } else { nfslockmode = NFS_LOCK_MODE_ENABLED; + } } if (args.version >= 4) { - if ((args.flags & NFSMNT_ACREGMIN) && (args.acregmin > 0)) + if ((args.flags & NFSMNT_ACREGMIN) && (args.acregmin > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN); - if ((args.flags & NFSMNT_ACREGMAX) && (args.acregmax > 0)) + } + if ((args.flags & NFSMNT_ACREGMAX) && (args.acregmax > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_REG_MAX); - if ((args.flags & NFSMNT_ACDIRMIN) && (args.acdirmin > 0)) + } + if ((args.flags & NFSMNT_ACDIRMIN) && (args.acdirmin > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MIN); - if ((args.flags & NFSMNT_ACDIRMAX) && (args.acdirmax > 0)) + } + if ((args.flags & NFSMNT_ACDIRMAX) && (args.acdirmax > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX); + } } if (args.version >= 5) { - if ((args.flags & NFSMNT_SECFLAVOR) || (args.flags & NFSMNT_SECSYSOK)) + if ((args.flags & NFSMNT_SECFLAVOR) || (args.flags & NFSMNT_SECSYSOK)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY); + } } if (args.version >= 6) { - if ((args.flags & NFSMNT_DEADTIMEOUT) && (args.deadtimeout > 0)) + if ((args.flags & NFSMNT_DEADTIMEOUT) && (args.deadtimeout > 0)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT); + } } /* build xdr buffer */ @@ -1760,14 +1889,18 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar xb_add_bitmap(error, &xb, mflags_mask, NFS_MFLAG_BITMAP_LEN); /* mask */ xb_add_bitmap(error, &xb, mflags, NFS_MFLAG_BITMAP_LEN); /* value */ xb_add_32(error, &xb, nfsvers); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) { xb_add_32(error, &xb, args.rsize); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) { xb_add_32(error, &xb, args.wsize); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) { xb_add_32(error, &xb, args.readdirsize); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) { xb_add_32(error, &xb, args.readahead); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) { xb_add_32(error, &xb, args.acregmin); xb_add_32(error, &xb, 0); @@ -1784,38 +1917,46 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar xb_add_32(error, &xb, args.acdirmax); xb_add_32(error, &xb, 0); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) { xb_add_32(error, &xb, nfslockmode); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) { - uint32_t flavors[2], i=0; - if (args.flags & NFSMNT_SECFLAVOR) + uint32_t flavors[2], i = 0; + if (args.flags & NFSMNT_SECFLAVOR) { flavors[i++] = args.auth; - if ((args.flags & NFSMNT_SECSYSOK) && ((i == 0) || (flavors[0] != RPCAUTH_SYS))) + } + if ((args.flags & NFSMNT_SECSYSOK) && ((i == 0) || (flavors[0] != RPCAUTH_SYS))) { flavors[i++] = RPCAUTH_SYS; + } xb_add_word_array(error, &xb, flavors, i); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) { xb_add_32(error, &xb, args.maxgrouplist); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) { xb_add_string(error, &xb, ((args.sotype == SOCK_DGRAM) ? "udp" : "tcp"), 3); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) - xb_add_32(error, &xb, ((ss.ss_family == AF_INET) ? - ntohs(((struct sockaddr_in*)&ss)->sin_port) : - ntohs(((struct sockaddr_in6*)&ss)->sin6_port))); + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) { + xb_add_32(error, &xb, ((ss.ss_family == AF_INET) ? + ntohs(((struct sockaddr_in*)&ss)->sin_port) : + ntohs(((struct sockaddr_in6*)&ss)->sin6_port))); + } /* NFS_MATTR_MOUNT_PORT (not available in old args) */ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) { /* convert from .1s increments to time */ - xb_add_32(error, &xb, args.timeo/10); - xb_add_32(error, &xb, (args.timeo%10)*100000000); + xb_add_32(error, &xb, args.timeo / 10); + xb_add_32(error, &xb, (args.timeo % 10) * 100000000); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) { xb_add_32(error, &xb, args.retrans); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) { xb_add_32(error, &xb, args.deadtimeout); xb_add_32(error, &xb, 0); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) { xb_add_fh(error, &xb, &nfh[0], args.fhsize); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) { xb_add_32(error, &xb, 1); /* fs location count */ xb_add_32(error, &xb, 1); /* server count */ @@ -1826,32 +1967,37 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar xb_add_32(error, &xb, numcomps); /* pathname component count */ nfsmout_if(error); p = frompath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } while (*p) { cp = p; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; + } xb_add_string(error, &xb, cp, (p - cp)); /* component */ nfsmout_if(error); - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } } xb_add_32(error, &xb, 0); /* empty fsl info */ } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) { xb_add_32(error, &xb, (vfs_flags(mp) & MNT_VISFLAGMASK)); /* VFS MNT_* flags */ - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) { xb_add_string(error, &xb, mntfrom, strlen(mntfrom)); /* fixed f_mntfromname */ + } xb_build_done(error, &xb); /* update opaque counts */ end_offset = xb_offset(&xb); error = xb_seek(&xb, argslength_offset); - xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD/*version*/); + xb_add_32(error, &xb, end_offset - argslength_offset + XDRWORD /*version*/); nfsmout_if(error); error = xb_seek(&xb, attrslength_offset); - xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD/*don't include length field*/); + xb_add_32(error, &xb, end_offset - attrslength_offset - XDRWORD /*don't include length field*/); if (!error) { /* grab the assembled buffer */ @@ -1861,7 +2007,7 @@ nfs_convert_old_nfs_args(mount_t mp, user_addr_t data, vfs_context_t ctx, int ar nfsmout: xb_cleanup(&xb); FREE_ZONE(mntfrom, MAXPATHLEN, M_NAMEI); - return (error); + return error; } /* @@ -1877,14 +2023,16 @@ nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx) char *xdrbuf = NULL; /* read in version */ - if (inkernel) + if (inkernel) { bcopy(CAST_DOWN(void *, data), &argsversion, sizeof(argsversion)); - else if ((error = copyin(data, &argsversion, sizeof(argsversion)))) - return (error); + } else if ((error = copyin(data, &argsversion, sizeof(argsversion)))) { + return error; + } /* If we have XDR args, then all values in the buffer are in network order */ - if (argsversion == htonl(NFS_ARGSVERSION_XDR)) + if (argsversion == htonl(NFS_ARGSVERSION_XDR)) { argsversion = NFS_ARGSVERSION_XDR; + } switch (argsversion) { case 3: @@ -1896,15 +2044,17 @@ nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx) break; case NFS_ARGSVERSION_XDR: /* copy in xdr buffer */ - if (inkernel) + if (inkernel) { bcopy(CAST_DOWN(void *, (data + XDRWORD)), &argslength, XDRWORD); - else + } else { error = copyin((data + XDRWORD), &argslength, XDRWORD); - if (error) + } + if (error) { break; + } argslength = ntohl(argslength); /* put a reasonable limit on the size of the XDR args */ - if (argslength > 16*1024) { + if (argslength > 16 * 1024) { error = E2BIG; break; } @@ -1914,22 +2064,24 @@ nfs_vfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t ctx) error = ENOMEM; break; } - if (inkernel) + if (inkernel) { bcopy(CAST_DOWN(void *, data), xdrbuf, argslength); - else + } else { error = copyin(data, xdrbuf, argslength); + } break; default: error = EPROGMISMATCH; } if (error) { - if (xdrbuf) + if (xdrbuf) { xb_free(xdrbuf); - return (error); + } + return error; } error = mountnfs(xdrbuf, mp, ctx, &vp); - return (error); + return error; } /* @@ -1949,24 +2101,28 @@ nfs3_mount( *npp = NULL; - if (!nmp->nm_fh) - return (EINVAL); + if (!nmp->nm_fh) { + return EINVAL; + } /* * Get file attributes for the mountpoint. These are needed * in order to properly create the root vnode. */ error = nfs3_getattr_rpc(NULL, nmp->nm_mountp, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len, 0, - ctx, &nvattr, &xid); - if (error) + ctx, &nvattr, &xid); + if (error) { goto out; + } error = nfs_nget(nmp->nm_mountp, NULL, NULL, nmp->nm_fh->fh_data, nmp->nm_fh->fh_len, - &nvattr, &xid, RPCAUTH_UNKNOWN, NG_MARKROOT, npp); - if (*npp) + &nvattr, &xid, RPCAUTH_UNKNOWN, NG_MARKROOT, npp); + if (*npp) { nfs_node_unlock(*npp); - if (error) + } + if (error) { goto out; + } /* * Try to make sure we have all the general info from the server. @@ -1977,8 +2133,9 @@ nfs3_mount( } else if (nmp->nm_vers == NFS_VER3) { /* get the NFSv3 FSINFO */ error = nfs3_fsinfo(nmp, *npp, ctx); - if (error) + if (error) { goto out; + } /* If the server indicates all pathconf info is */ /* the same, grab a copy of that info now */ if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_HOMOGENEOUS) && @@ -1998,7 +2155,7 @@ out: vnode_recycle(NFSTOV(*npp)); *npp = NULL; } - return (error); + return error; } /* @@ -2020,16 +2177,18 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf struct nfs_fs_path nfsp2; bzero(&nfsp2, sizeof(nfsp2)); - if (dirfhp->fh_len) + if (dirfhp->fh_len) { NFSREQ_SECINFO_SET(&si, NULL, dirfhp->fh_data, dirfhp->fh_len, nfsp->np_components[curcomp], 0); - else + } else { NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, nfsp->np_components[curcomp], 0); + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); - MALLOC_ZONE(link, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (!link) + MALLOC_ZONE(link, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + if (!link) { error = ENOMEM; + } // PUTFH, READLINK numops = 2; @@ -2045,9 +2204,10 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf nfsmout_if(error); error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); @@ -2055,10 +2215,11 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf nfsm_chain_op_check(error, &nmrep, NFS_OP_READLINK); nfsm_chain_get_32(error, &nmrep, len); nfsmout_if(error); - if (len == 0) + if (len == 0) { error = ENOENT; - else if (len >= MAXPATHLEN) + } else if (len >= MAXPATHLEN) { len = MAXPATHLEN - 1; + } nfsm_chain_get_opaque(error, &nmrep, len, link); nfsmout_if(error); /* make sure link string is terminated properly */ @@ -2066,19 +2227,22 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf /* count the number of components in link */ p = link; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } linkcompcount = 0; while (*p) { linkcompcount++; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } /* free up used components */ - for (comp=0; comp <= curcomp; comp++) { + for (comp = 0; comp <= curcomp; comp++) { if (nfsp->np_components[comp]) { FREE(nfsp->np_components[comp], M_TEMP); nfsp->np_components[comp] = NULL; @@ -2087,7 +2251,7 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf /* set up new path */ nfsp2.np_compcount = nfsp->np_compcount - curcomp - 1 + linkcompcount; - MALLOC(nfsp2.np_components, char **, nfsp2.np_compcount*sizeof(char*), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(nfsp2.np_components, char **, nfsp2.np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO); if (!nfsp2.np_components) { error = ENOMEM; goto nfsmout; @@ -2095,30 +2259,33 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf /* add link components */ p = link; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; - for (newcomp=0; newcomp < linkcompcount; newcomp++) { + } + for (newcomp = 0; newcomp < linkcompcount; newcomp++) { /* find end of component */ q = p; - while (*q && (*q != '/')) + while (*q && (*q != '/')) { q++; - MALLOC(nfsp2.np_components[newcomp], char *, q-p+1, M_TEMP, M_WAITOK|M_ZERO); + } + MALLOC(nfsp2.np_components[newcomp], char *, q - p + 1, M_TEMP, M_WAITOK | M_ZERO); if (!nfsp2.np_components[newcomp]) { error = ENOMEM; break; } ch = *q; *q = '\0'; - strlcpy(nfsp2.np_components[newcomp], p, q-p+1); + strlcpy(nfsp2.np_components[newcomp], p, q - p + 1); *q = ch; p = q; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } } nfsmout_if(error); /* add remaining components */ - for(comp = curcomp + 1; comp < nfsp->np_compcount; comp++,newcomp++) { + for (comp = curcomp + 1; comp < nfsp->np_compcount; comp++, newcomp++) { nfsp2.np_components[newcomp] = nfsp->np_components[comp]; nfsp->np_components[comp] = NULL; } @@ -2135,17 +2302,20 @@ nfs4_mount_update_path_with_symlink(struct nfsmount *nmp, struct nfs_fs_path *nf *depthp = 0; } nfsmout: - if (link) + if (link) { FREE_ZONE(link, MAXPATHLEN, M_NAMEI); + } if (nfsp2.np_components) { - for (comp=0; comp < nfsp2.np_compcount; comp++) - if (nfsp2.np_components[comp]) + for (comp = 0; comp < nfsp2.np_compcount; comp++) { + if (nfsp2.np_components[comp]) { FREE(nfsp2.np_components[comp], M_TEMP); + } + } FREE(nfsp2.np_components, M_TEMP); } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* Set up an NFSv4 mount */ @@ -2185,30 +2355,32 @@ nfs4_mount( * mounting. If we are mounting the server's root, we'll need to defer the * SECINFO call to the first successful LOOKUP request. */ - if (!nmp->nm_sec.count) + if (!nmp->nm_sec.count) { nmp->nm_state |= NFSSTA_NEEDSECINFO; + } /* make a copy of the current location's path */ nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path; bzero(&fspath, sizeof(fspath)); fspath.np_compcount = nfsp->np_compcount; if (fspath.np_compcount > 0) { - MALLOC(fspath.np_components, char **, fspath.np_compcount*sizeof(char*), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(fspath.np_components, char **, fspath.np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO); if (!fspath.np_components) { error = ENOMEM; goto nfsmout; } - for (comp=0; comp < nfsp->np_compcount; comp++) { + for (comp = 0; comp < nfsp->np_compcount; comp++) { int slen = strlen(nfsp->np_components[comp]); - MALLOC(fspath.np_components[comp], char *, slen+1, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(fspath.np_components[comp], char *, slen + 1, M_TEMP, M_WAITOK | M_ZERO); if (!fspath.np_components[comp]) { error = ENOMEM; break; } - strlcpy(fspath.np_components[comp], nfsp->np_components[comp], slen+1); + strlcpy(fspath.np_components[comp], nfsp->np_components[comp], slen + 1); } - if (error) + if (error) { goto nfsmout; + } } /* for mirror mounts, we can just use the file handle passed in */ @@ -2241,9 +2413,10 @@ nocomponents: nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTROOTFH); @@ -2263,7 +2436,7 @@ nocomponents: } /* look up each path component */ - for (comp=0; comp < fspath.np_compcount; ) { + for (comp = 0; comp < fspath.np_compcount;) { isdotdot = 0; if (fspath.np_components[comp][0] == '.') { if (fspath.np_components[comp][1] == '\0') { @@ -2273,8 +2446,9 @@ nocomponents: } /* treat ".." specially */ if ((fspath.np_components[comp][1] == '.') && - (fspath.np_components[comp][2] == '\0')) - isdotdot = 1; + (fspath.np_components[comp][2] == '\0')) { + isdotdot = 1; + } if (isdotdot && (dirfh.fh_len == 0)) { /* ".." in root directory is same as "." */ comp++; @@ -2282,10 +2456,11 @@ nocomponents: } } // PUT(ROOT)FH + LOOKUP(P) + GETFH + GETATTR - if (dirfh.fh_len == 0) + if (dirfh.fh_len == 0) { NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0); - else + } else { NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0); + } numops = 4; nfsm_chain_build_alloc_init(error, &nmreq, 18 * NFSX_UNSIGNED); nfsm_chain_add_compound_header(error, &nmreq, "mount", nmp->nm_minor_vers, numops); @@ -2302,7 +2477,7 @@ nocomponents: } else { nfsm_chain_add_32(error, &nmreq, NFS_OP_LOOKUP); nfsm_chain_add_name(error, &nmreq, - fspath.np_components[comp], strlen(fspath.np_components[comp]), nmp); + fspath.np_components[comp], strlen(fspath.np_components[comp]), nmp); } numops--; nfsm_chain_add_32(error, &nmreq, NFS_OP_GETFH); @@ -2311,16 +2486,18 @@ nocomponents: NFS_CLEAR_ATTRIBUTES(bitmap); NFS4_DEFAULT_ATTRIBUTES(bitmap); /* if no namedattr support or component is ".zfs", clear NFS_FATTR_NAMED_ATTR */ - if (!NMFLAG(nmp, NAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs")) + if (!NMFLAG(nmp, NAMEDATTR) || !strcmp(fspath.np_components[comp], ".zfs")) { NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR); + } nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN); nfsm_chain_build_done(error, &nmreq); nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, dirfh.fh_len ? NFS_OP_PUTFH : NFS_OP_PUTROOTFH); @@ -2328,8 +2505,9 @@ nocomponents: nfsmout_if(error); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH); nfsm_chain_get_32(error, &nmrep, fh.fh_len); - if (fh.fh_len > sizeof(fh.fh_data)) + if (fh.fh_len > sizeof(fh.fh_data)) { error = EBADRPC; + } nfsmout_if(error); nfsm_chain_get_opaque(error, &nmrep, fh.fh_len, fh.fh_data); nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR); @@ -2344,8 +2522,9 @@ nocomponents: /* Try the lookup again with a getattr for fs_locations. */ nfs_fs_locations_cleanup(&nfsls); error = nfs4_get_fs_locations(nmp, NULL, dirfh.fh_data, dirfh.fh_len, fspath.np_components[comp], ctx, &nfsls); - if (!error && (nfsls.nl_numlocs < 1)) + if (!error && (nfsls.nl_numlocs < 1)) { error = ENOENT; + } nfsmout_if(error); if (++loopcnt > MAXSYMLINKS) { /* too many symlink/referral redirections */ @@ -2360,22 +2539,23 @@ nocomponents: bzero(&nfsls, sizeof(nfsls)); /* initiate a connection using the new fs locations */ error = nfs_mount_connect(nmp); - if (!error && !(nmp->nm_locations.nl_current.nli_flags & NLI_VALID)) + if (!error && !(nmp->nm_locations.nl_current.nli_flags & NLI_VALID)) { error = EIO; + } nfsmout_if(error); /* add new server's remote path to beginning of our path and continue */ nfsp = &nmp->nm_locations.nl_locations[nmp->nm_locations.nl_current.nli_loc]->nl_path; bzero(&fspath2, sizeof(fspath2)); fspath2.np_compcount = (fspath.np_compcount - comp - 1) + nfsp->np_compcount; if (fspath2.np_compcount > 0) { - MALLOC(fspath2.np_components, char **, fspath2.np_compcount*sizeof(char*), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(fspath2.np_components, char **, fspath2.np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO); if (!fspath2.np_components) { error = ENOMEM; goto nfsmout; } - for (comp2=0; comp2 < nfsp->np_compcount; comp2++) { + for (comp2 = 0; comp2 < nfsp->np_compcount; comp2++) { int slen = strlen(nfsp->np_components[comp2]); - MALLOC(fspath2.np_components[comp2], char *, slen+1, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(fspath2.np_components[comp2], char *, slen + 1, M_TEMP, M_WAITOK | M_ZERO); if (!fspath2.np_components[comp2]) { /* clean up fspath2, then error out */ while (comp2 > 0) { @@ -2386,10 +2566,11 @@ nocomponents: error = ENOMEM; goto nfsmout; } - strlcpy(fspath2.np_components[comp2], nfsp->np_components[comp2], slen+1); + strlcpy(fspath2.np_components[comp2], nfsp->np_components[comp2], slen + 1); + } + if ((fspath.np_compcount - comp - 1) > 0) { + bcopy(&fspath.np_components[comp + 1], &fspath2.np_components[nfsp->np_compcount], (fspath.np_compcount - comp - 1) * sizeof(char*)); } - if ((fspath.np_compcount - comp - 1) > 0) - bcopy(&fspath.np_components[comp+1], &fspath2.np_components[nfsp->np_compcount], (fspath.np_compcount - comp - 1)*sizeof(char*)); /* free up unused parts of old path (prior components and component array) */ do { FREE(fspath.np_components[comp], M_TEMP); @@ -2402,17 +2583,19 @@ nocomponents: dirfh.fh_len = 0; comp = 0; NVATTR_CLEANUP(&nvattr); - if (fspath.np_compcount == 0) + if (fspath.np_compcount == 0) { goto nocomponents; + } continue; } nfsmout_if(error); /* if file handle is for a symlink, then update the path with the symlink contents */ if (NFS_BITMAP_ISSET(&nvattr.nva_bitmap, NFS_FATTR_TYPE) && (nvattr.nva_type == VLNK)) { - if (++loopcnt > MAXSYMLINKS) + if (++loopcnt > MAXSYMLINKS) { error = ELOOP; - else + } else { error = nfs4_mount_update_path_with_symlink(nmp, &fspath, comp, &dirfh, &depth, &fh, ctx); + } nfsmout_if(error); /* directory file handle is either left the same or reset to root (if link was absolute) */ /* path traversal starts at beginning of the path again */ @@ -2424,30 +2607,34 @@ nocomponents: NVATTR_CLEANUP(&nvattr); nfs_fs_locations_cleanup(&nfsls); /* not a symlink... */ - if ((nmp->nm_state & NFSSTA_NEEDSECINFO) && (comp == (fspath.np_compcount-1)) && !isdotdot) { + if ((nmp->nm_state & NFSSTA_NEEDSECINFO) && (comp == (fspath.np_compcount - 1)) && !isdotdot) { /* need to get SECINFO for the directory being mounted */ - if (dirfh.fh_len == 0) + if (dirfh.fh_len == 0) { NFSREQ_SECINFO_SET(&si, NULL, NULL, 0, isdotdot ? NULL : fspath.np_components[comp], 0); - else + } else { NFSREQ_SECINFO_SET(&si, NULL, dirfh.fh_data, dirfh.fh_len, isdotdot ? NULL : fspath.np_components[comp], 0); + } sec.count = NX_MAX_SEC_FLAVORS; error = nfs4_secinfo_rpc(nmp, &si, vfs_context_ucred(ctx), sec.flavors, &sec.count); /* [sigh] some implementations return "illegal" error for unsupported ops */ - if (error == NFSERR_OP_ILLEGAL) + if (error == NFSERR_OP_ILLEGAL) { error = 0; + } nfsmout_if(error); /* set our default security flavor to the first in the list */ - if (sec.count) + if (sec.count) { nmp->nm_auth = sec.flavors[0]; + } nmp->nm_state &= ~NFSSTA_NEEDSECINFO; } /* advance directory file handle, component index, & update depth */ dirfh = fh; comp++; - if (!isdotdot) /* going down the hierarchy */ + if (!isdotdot) { /* going down the hierarchy */ depth++; - else if (--depth <= 0) /* going up the hierarchy */ + } else if (--depth <= 0) { /* going up the hierarchy */ dirfh.fh_len = 0; /* clear dirfh when we hit root */ + } } gotfh: @@ -2463,8 +2650,9 @@ gotfh: NFS_CLEAR_ATTRIBUTES(bitmap); NFS4_DEFAULT_ATTRIBUTES(bitmap); /* if no namedattr support or last component is ".zfs", clear NFS_FATTR_NAMED_ATTR */ - if (!NMFLAG(nmp, NAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount-1], ".zfs"))) + if (!NMFLAG(nmp, NAMEDATTR) || ((fspath.np_compcount > 0) && !strcmp(fspath.np_components[fspath.np_compcount - 1], ".zfs"))) { NFS_BITMAP_CLR(bitmap, NFS_FATTR_NAMED_ATTR); + } nfsm_chain_add_bitmap(error, &nmreq, bitmap, NFS_ATTR_BITMAP_LEN); if (NMFLAG(nmp, NAMEDATTR)) { numops--; @@ -2475,9 +2663,10 @@ gotfh: nfsm_assert(error, (numops == 0), EPROTO); nfsmout_if(error); error = nfs_request_async(NULL, nmp->nm_mountp, &nmreq, NFSPROC4_COMPOUND, - vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), &si, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } nfsm_chain_skip_tag(error, &nmrep); nfsm_chain_get_32(error, &nmrep, numops); nfsm_chain_op_check(error, &nmrep, NFS_OP_PUTFH); @@ -2488,8 +2677,9 @@ gotfh: nfsmout_if(error); if (NMFLAG(nmp, NAMEDATTR)) { nfsm_chain_op_check(error, &nmrep, NFS_OP_OPENATTR); - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } /* [sigh] some implementations return "illegal" error for unsupported ops */ if (error || !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_NAMED_ATTR)) { nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR; @@ -2499,14 +2689,17 @@ gotfh: } else { nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_NAMED_ATTR; } - if (NMFLAG(nmp, NOACL)) /* make sure ACL support is turned off */ + if (NMFLAG(nmp, NOACL)) { /* make sure ACL support is turned off */ nmp->nm_fsattr.nfsa_flags &= ~NFS_FSFLAG_ACL; - if (NMFLAG(nmp, ACLONLY) && !(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) + } + if (NMFLAG(nmp, ACLONLY) && !(nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) { NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_ACLONLY); + } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_FH_EXPIRE_TYPE)) { uint32_t fhtype = ((nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_FHTYPE_MASK) >> NFS_FSFLAG_FHTYPE_SHIFT); - if (fhtype != NFS_FH_PERSISTENT) + if (fhtype != NFS_FH_PERSISTENT) { printf("nfs: warning: non-persistent file handles! for %s\n", vfs_statfs(nmp->nm_mountp)->f_mntfromname); + } } /* make sure it's a directory */ @@ -2522,46 +2715,53 @@ gotfh: error = nfs_nget(nmp->nm_mountp, NULL, NULL, dirfh.fh_data, dirfh.fh_len, &nvattr, &xid, rq.r_auth, NG_MARKROOT, npp); nfsmout_if(error); - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL) { vfs_setextendedsecurity(nmp->nm_mountp); + } /* adjust I/O sizes to server limits */ if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXREAD) && (nmp->nm_fsattr.nfsa_maxread > 0)) { if (nmp->nm_fsattr.nfsa_maxread < (uint64_t)nmp->nm_rsize) { nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread & ~(NFS_FABLKSIZE - 1); - if (nmp->nm_rsize == 0) + if (nmp->nm_rsize == 0) { nmp->nm_rsize = nmp->nm_fsattr.nfsa_maxread; + } } } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXWRITE) && (nmp->nm_fsattr.nfsa_maxwrite > 0)) { if (nmp->nm_fsattr.nfsa_maxwrite < (uint64_t)nmp->nm_wsize) { nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite & ~(NFS_FABLKSIZE - 1); - if (nmp->nm_wsize == 0) + if (nmp->nm_wsize == 0) { nmp->nm_wsize = nmp->nm_fsattr.nfsa_maxwrite; + } } } /* set up lease renew timer */ nmp->nm_renew_timer = thread_call_allocate(nfs4_renew_timer, nmp); interval = nmp->nm_fsattr.nfsa_lease / 2; - if (interval < 1) + if (interval < 1) { interval = 1; + } nfs_interval_timer_start(nmp->nm_renew_timer, interval * 1000); nfsmout: if (fspath.np_components) { - for (comp=0; comp < fspath.np_compcount; comp++) - if (fspath.np_components[comp]) + for (comp = 0; comp < fspath.np_compcount; comp++) { + if (fspath.np_components[comp]) { FREE(fspath.np_components[comp], M_TEMP); + } + } FREE(fspath.np_components, M_TEMP); } NVATTR_CLEANUP(&nvattr); nfs_fs_locations_cleanup(&nfsls); - if (*npp) + if (*npp) { nfs_node_unlock(*npp); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -2610,26 +2810,30 @@ nfs_mount_connect_thread(void *arg, __unused wait_result_t wr) } /* save the best error */ - if (nfs_connect_error_class(error) >= nfs_connect_error_class(savederror)) + if (nfs_connect_error_class(error) >= nfs_connect_error_class(savederror)) { savederror = error; + } if (done) { error = savederror; break; } /* pause before next attempt */ - if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) + if ((error = nfs_sigintr(nmp, NULL, current_thread(), 0))) { break; - error = tsleep(nmp, PSOCK|slpflag, "nfs_mount_connect_retry", 2*hz); - if (error && (error != EWOULDBLOCK)) + } + error = tsleep(nmp, PSOCK | slpflag, "nfs_mount_connect_retry", 2 * hz); + if (error && (error != EWOULDBLOCK)) { break; + } error = savederror; } /* update status of mount connect */ lck_mtx_lock(&nmp->nm_lock); - if (!nmp->nm_mounterror) + if (!nmp->nm_mounterror) { nmp->nm_mounterror = error; + } nmp->nm_state &= ~NFSSTA_MOUNT_THREAD; lck_mtx_unlock(&nmp->nm_lock); wakeup(&nmp->nm_nss); @@ -2661,15 +2865,17 @@ nfs_mount_connect(struct nfsmount *nmp) /* wait until mount connect thread is finished/gone */ while (nmp->nm_state & NFSSTA_MOUNT_THREAD) { - error = msleep(&nmp->nm_nss, &nmp->nm_lock, slpflag|PSOCK, "nfsconnectthread", &ts); + error = msleep(&nmp->nm_nss, &nmp->nm_lock, slpflag | PSOCK, "nfsconnectthread", &ts); if ((error && (error != EWOULDBLOCK)) || ((error = nfs_sigintr(nmp, NULL, current_thread(), 1)))) { /* record error */ - if (!nmp->nm_mounterror) + if (!nmp->nm_mounterror) { nmp->nm_mounterror = error; + } /* signal the thread that we are aborting */ nmp->nm_sockflags |= NMSOCK_UNMOUNT; - if (nmp->nm_nss) + if (nmp->nm_nss) { wakeup(nmp->nm_nss); + } /* and continue waiting on it to finish */ slpflag = 0; } @@ -2679,7 +2885,7 @@ nfs_mount_connect(struct nfsmount *nmp) /* grab mount connect status */ error = nmp->nm_mounterror; - return (error); + return error; } /* Table of maximum minor version for a given version */ @@ -2723,27 +2929,27 @@ mountnfs( .selected = NFS_MAX_ETYPES, .etypes = { NFS_AES256_CTS_HMAC_SHA1_96, NFS_AES128_CTS_HMAC_SHA1_96, - NFS_DES3_CBC_SHA1_KD - } + NFS_DES3_CBC_SHA1_KD} }; /* make sure mbuf constants are set up */ - if (!nfs_mbuf_mhlen) + if (!nfs_mbuf_mhlen) { nfs_mbuf_init(); + } if (vfs_flags(mp) & MNT_UPDATE) { nmp = VFSTONFS(mp); /* update paths, file handles, etc, here XXX */ xb_free(xdrbuf); - return (0); + return 0; } else { /* allocate an NFS mount structure for this mount */ MALLOC_ZONE(nmp, struct nfsmount *, - sizeof (struct nfsmount), M_NFSMNT, M_WAITOK); + sizeof(struct nfsmount), M_NFSMNT, M_WAITOK); if (!nmp) { xb_free(xdrbuf); - return (ENOMEM); + return ENOMEM; } - bzero((caddr_t)nmp, sizeof (struct nfsmount)); + bzero((caddr_t)nmp, sizeof(struct nfsmount)); lck_mtx_init(&nmp->nm_lock, nfs_mount_grp, LCK_ATTR_NULL); TAILQ_INIT(&nmp->nm_resendq); TAILQ_INIT(&nmp->nm_iodq); @@ -2781,11 +2987,13 @@ mountnfs( nmp->nm_numgrps = NFS_MAXGRPS; nmp->nm_readahead = NFS_DEFRAHEAD; nmp->nm_tprintf_delay = nfs_tprintf_delay; - if (nmp->nm_tprintf_delay < 0) + if (nmp->nm_tprintf_delay < 0) { nmp->nm_tprintf_delay = 0; + } nmp->nm_tprintf_initial_delay = nfs_tprintf_initial_delay; - if (nmp->nm_tprintf_initial_delay < 0) + if (nmp->nm_tprintf_initial_delay < 0) { nmp->nm_tprintf_initial_delay = 0; + } nmp->nm_acregmin = NFS_MINATTRTIMO; nmp->nm_acregmax = NFS_MAXATTRTIMO; nmp->nm_acdirmin = NFS_MINDIRATTRTIMO; @@ -2806,11 +3014,11 @@ mountnfs( mflags_mask = nmp->nm_mflags_mask; /* set up NFS mount with args */ - xb_init_buffer(&xb, xdrbuf, 2*XDRWORD); + xb_init_buffer(&xb, xdrbuf, 2 * XDRWORD); xb_get_32(error, &xb, val); /* version */ xb_get_32(error, &xb, argslength); /* args length */ nfsmerr_if(error); - xb_init_buffer(&xb, xdrbuf, argslength); /* restart parsing with actual buffer length */ + xb_init_buffer(&xb, xdrbuf, argslength); /* restart parsing with actual buffer length */ xb_get_32(error, &xb, val); /* version */ xb_get_32(error, &xb, argslength); /* args length */ xb_get_32(error, &xb, val); /* XDR args version */ @@ -2821,8 +3029,9 @@ mountnfs( xb_get_bitmap(error, &xb, mattrs, len); /* mount attribute bitmap */ attrslength = 0; xb_get_32(error, &xb, attrslength); /* attrs length */ - if (!error && (attrslength > (argslength - ((4+NFS_MATTR_BITMAP_LEN+1)*XDRWORD)))) + if (!error && (attrslength > (argslength - ((4 + NFS_MATTR_BITMAP_LEN + 1) * XDRWORD)))) { error = EINVAL; + } nfsmerr_if(error); if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FLAGS)) { len = NFS_MFLAG_BITMAP_LEN; @@ -2837,25 +3046,30 @@ mountnfs( } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) { /* Can't specify a single version and a range */ - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) { error = EINVAL; + } xb_get_32(error, &xb, nmp->nm_vers); if (nmp->nm_vers > NFS_MAX_SUPPORTED_VERSION || - nmp->nm_vers < NFS_VER2) + nmp->nm_vers < NFS_VER2) { error = EINVAL; - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) { xb_get_32(error, &xb, nmp->nm_minor_vers); - else + } else { nmp->nm_minor_vers = maxminorverstab[nmp->nm_vers]; - if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers]) + } + if (nmp->nm_minor_vers > maxminorverstab[nmp->nm_vers]) { error = EINVAL; - nmp->nm_max_vers = nmp->nm_min_vers = - VER2PVER(nmp->nm_vers, nmp->nm_minor_vers); - } + } + nmp->nm_max_vers = nmp->nm_min_vers = + VER2PVER(nmp->nm_vers, nmp->nm_minor_vers); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) { /* should have also gotten NFS version (and already gotten minor version) */ - if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) + if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) { error = EINVAL; + } } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) { xb_get_32(error, &xb, nmp->nm_min_vers); @@ -2863,17 +3077,22 @@ mountnfs( if ((nmp->nm_min_vers > nmp->nm_max_vers) || (PVER2MAJOR(nmp->nm_max_vers) > NFS_MAX_SUPPORTED_VERSION) || (PVER2MINOR(nmp->nm_min_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_min_vers)]) || - (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)])) + (PVER2MINOR(nmp->nm_max_vers) > maxminorverstab[PVER2MAJOR(nmp->nm_max_vers)])) { error = EINVAL; + } } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) { xb_get_32(error, &xb, nmp->nm_rsize); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) { xb_get_32(error, &xb, nmp->nm_wsize); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) { xb_get_32(error, &xb, nmp->nm_readdirsize); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) { xb_get_32(error, &xb, nmp->nm_readahead); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) { xb_get_32(error, &xb, nmp->nm_acregmin); xb_skip(error, &xb, XDRWORD); @@ -2901,7 +3120,7 @@ mountnfs( error = EINVAL; break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case NFS_LOCK_MODE_ENABLED: nmp->nm_lockmode = val; break; @@ -2913,11 +3132,12 @@ mountnfs( if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) { uint32_t seccnt; xb_get_32(error, &xb, seccnt); - if (!error && ((seccnt < 1) || (seccnt > NX_MAX_SEC_FLAVORS))) + if (!error && ((seccnt < 1) || (seccnt > NX_MAX_SEC_FLAVORS))) { error = EINVAL; + } nfsmerr_if(error); nmp->nm_sec.count = seccnt; - for (i=0; i < seccnt; i++) { + for (i = 0; i < seccnt; i++) { xb_get_32(error, &xb, nmp->nm_sec.flavors[i]); /* Check for valid security flavor */ switch (nmp->nm_sec.flavors[i]) { @@ -2937,15 +3157,16 @@ mountnfs( if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) { uint32_t etypecnt; xb_get_32(error, &xb, etypecnt); - if (!error && ((etypecnt < 1) || (etypecnt > NFS_MAX_ETYPES))) + if (!error && ((etypecnt < 1) || (etypecnt > NFS_MAX_ETYPES))) { error = EINVAL; + } nfsmerr_if(error); nmp->nm_etype.count = etypecnt; xb_get_32(error, &xb, nmp->nm_etype.selected); nfsmerr_if(error); if (etypecnt) { nmp->nm_etype.selected = etypecnt; /* Nothing is selected yet, so set selected to count */ - for (i=0; i < etypecnt; i++) { + for (i = 0; i < etypecnt; i++) { xb_get_32(error, &xb, nmp->nm_etype.etypes[i]); /* Check for valid encryption type */ switch (nmp->nm_etype.etypes[i]) { @@ -2959,14 +3180,16 @@ mountnfs( } } } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) { xb_get_32(error, &xb, nmp->nm_numgrps); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) { char sotype[6]; xb_get_32(error, &xb, val); - if (!error && ((val < 3) || (val > 5))) + if (!error && ((val < 3) || (val > 5))) { error = EINVAL; + } nfsmerr_if(error); error = xb_get_bytes(&xb, sotype, val, 0); nfsmerr_if(error); @@ -2997,31 +3220,36 @@ mountnfs( error = EINVAL; } if (!error && (nmp->nm_vers >= NFS_VER4) && nmp->nm_sotype && - (nmp->nm_sotype != SOCK_STREAM)) - error = EINVAL; /* NFSv4 is only allowed over TCP. */ + (nmp->nm_sotype != SOCK_STREAM)) { + error = EINVAL; /* NFSv4 is only allowed over TCP. */ + } nfsmerr_if(error); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) { xb_get_32(error, &xb, nmp->nm_nfsport); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) { xb_get_32(error, &xb, nmp->nm_mountport); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) { /* convert from time to 0.1s units */ xb_get_32(error, &xb, nmp->nm_timeo); xb_get_32(error, &xb, val); nfsmerr_if(error); - if (val >= 1000000000) + if (val >= 1000000000) { error = EINVAL; + } nfsmerr_if(error); nmp->nm_timeo *= 10; - nmp->nm_timeo += (val+100000000-1)/100000000; + nmp->nm_timeo += (val + 100000000 - 1) / 100000000; /* now convert to ticks */ nmp->nm_timeo = (nmp->nm_timeo * NFS_HZ + 5) / 10; } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) { xb_get_32(error, &xb, val); - if (!error && (val > 1)) + if (!error && (val > 1)) { nmp->nm_retry = val; + } } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) { xb_get_32(error, &xb, nmp->nm_deadtimeout); @@ -3029,15 +3257,17 @@ mountnfs( } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FH)) { nfsmerr_if(error); - MALLOC(nmp->nm_fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK|M_ZERO); - if (!nmp->nm_fh) + MALLOC(nmp->nm_fh, fhandle_t *, sizeof(fhandle_t), M_TEMP, M_WAITOK | M_ZERO); + if (!nmp->nm_fh) { error = ENOMEM; + } xb_get_32(error, &xb, nmp->nm_fh->fh_len); nfsmerr_if(error); - if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data)) + if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data)) { error = EINVAL; - else + } else { error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0); + } } nfsmerr_if(error); if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) { @@ -3048,60 +3278,72 @@ mountnfs( xb_get_32(error, &xb, nmp->nm_locations.nl_numlocs); /* fs location count */ /* sanity check location count */ - if (!error && ((nmp->nm_locations.nl_numlocs < 1) || (nmp->nm_locations.nl_numlocs > 256))) + if (!error && ((nmp->nm_locations.nl_numlocs < 1) || (nmp->nm_locations.nl_numlocs > 256))) { error = EINVAL; + } nfsmerr_if(error); - MALLOC(nmp->nm_locations.nl_locations, struct nfs_fs_location **, nmp->nm_locations.nl_numlocs * sizeof(struct nfs_fs_location*), M_TEMP, M_WAITOK|M_ZERO); - if (!nmp->nm_locations.nl_locations) + MALLOC(nmp->nm_locations.nl_locations, struct nfs_fs_location **, nmp->nm_locations.nl_numlocs * sizeof(struct nfs_fs_location*), M_TEMP, M_WAITOK | M_ZERO); + if (!nmp->nm_locations.nl_locations) { error = ENOMEM; + } for (loc = 0; loc < nmp->nm_locations.nl_numlocs; loc++) { nfsmerr_if(error); - MALLOC(fsl, struct nfs_fs_location *, sizeof(struct nfs_fs_location), M_TEMP, M_WAITOK|M_ZERO); - if (!fsl) + MALLOC(fsl, struct nfs_fs_location *, sizeof(struct nfs_fs_location), M_TEMP, M_WAITOK | M_ZERO); + if (!fsl) { error = ENOMEM; + } nmp->nm_locations.nl_locations[loc] = fsl; xb_get_32(error, &xb, fsl->nl_servcount); /* server count */ /* sanity check server count */ - if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256))) + if (!error && ((fsl->nl_servcount < 1) || (fsl->nl_servcount > 256))) { error = EINVAL; + } nfsmerr_if(error); - MALLOC(fsl->nl_servers, struct nfs_fs_server **, fsl->nl_servcount * sizeof(struct nfs_fs_server*), M_TEMP, M_WAITOK|M_ZERO); - if (!fsl->nl_servers) + MALLOC(fsl->nl_servers, struct nfs_fs_server **, fsl->nl_servcount * sizeof(struct nfs_fs_server*), M_TEMP, M_WAITOK | M_ZERO); + if (!fsl->nl_servers) { error = ENOMEM; + } for (serv = 0; serv < fsl->nl_servcount; serv++) { nfsmerr_if(error); - MALLOC(fss, struct nfs_fs_server *, sizeof(struct nfs_fs_server), M_TEMP, M_WAITOK|M_ZERO); - if (!fss) + MALLOC(fss, struct nfs_fs_server *, sizeof(struct nfs_fs_server), M_TEMP, M_WAITOK | M_ZERO); + if (!fss) { error = ENOMEM; + } fsl->nl_servers[serv] = fss; xb_get_32(error, &xb, val); /* server name length */ /* sanity check server name length */ - if (!error && ((val < 1) || (val > MAXPATHLEN))) + if (!error && ((val < 1) || (val > MAXPATHLEN))) { error = EINVAL; + } nfsmerr_if(error); - MALLOC(fss->ns_name, char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fss->ns_name) + MALLOC(fss->ns_name, char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fss->ns_name) { error = ENOMEM; + } nfsmerr_if(error); error = xb_get_bytes(&xb, fss->ns_name, val, 0); /* server name */ xb_get_32(error, &xb, fss->ns_addrcount); /* address count */ /* sanity check address count (OK to be zero) */ - if (!error && (fss->ns_addrcount > 256)) + if (!error && (fss->ns_addrcount > 256)) { error = EINVAL; + } nfsmerr_if(error); if (fss->ns_addrcount > 0) { - MALLOC(fss->ns_addresses, char **, fss->ns_addrcount * sizeof(char *), M_TEMP, M_WAITOK|M_ZERO); - if (!fss->ns_addresses) + MALLOC(fss->ns_addresses, char **, fss->ns_addrcount * sizeof(char *), M_TEMP, M_WAITOK | M_ZERO); + if (!fss->ns_addresses) { error = ENOMEM; + } for (addr = 0; addr < fss->ns_addrcount; addr++) { xb_get_32(error, &xb, val); /* address length */ /* sanity check address length */ - if (!error && ((val < 1) || (val > 128))) + if (!error && ((val < 1) || (val > 128))) { error = EINVAL; + } nfsmerr_if(error); - MALLOC(fss->ns_addresses[addr], char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fss->ns_addresses[addr]) + MALLOC(fss->ns_addresses[addr], char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fss->ns_addresses[addr]) { error = ENOMEM; + } nfsmerr_if(error); error = xb_get_bytes(&xb, fss->ns_addresses[addr], val, 0); /* address */ } @@ -3113,13 +3355,15 @@ mountnfs( fsp = &fsl->nl_path; xb_get_32(error, &xb, fsp->np_compcount); /* component count */ /* sanity check component count */ - if (!error && (fsp->np_compcount > MAXPATHLEN)) + if (!error && (fsp->np_compcount > MAXPATHLEN)) { error = EINVAL; + } nfsmerr_if(error); if (fsp->np_compcount) { - MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK|M_ZERO); - if (!fsp->np_components) + MALLOC(fsp->np_components, char **, fsp->np_compcount * sizeof(char*), M_TEMP, M_WAITOK | M_ZERO); + if (!fsp->np_components) { error = ENOMEM; + } } for (comp = 0; comp < fsp->np_compcount; comp++) { xb_get_32(error, &xb, val); /* component length */ @@ -3138,12 +3382,14 @@ mountnfs( } continue; } - if (!error && ((val < 1) || (val > MAXPATHLEN))) + if (!error && ((val < 1) || (val > MAXPATHLEN))) { error = EINVAL; + } nfsmerr_if(error); - MALLOC(fsp->np_components[comp], char *, val+1, M_TEMP, M_WAITOK|M_ZERO); - if (!fsp->np_components[comp]) + MALLOC(fsp->np_components[comp], char *, val + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!fsp->np_components[comp]) { error = ENOMEM; + } nfsmerr_if(error); error = xb_get_bytes(&xb, fsp->np_components[comp], val, 0); /* component */ } @@ -3151,17 +3397,20 @@ mountnfs( xb_skip(error, &xb, val); /* skip fs location info */ } } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) { xb_skip(error, &xb, XDRWORD); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFROM)) { xb_get_32(error, &xb, len); nfsmerr_if(error); val = len; - if (val >= sizeof(vfs_statfs(mp)->f_mntfromname)) + if (val >= sizeof(vfs_statfs(mp)->f_mntfromname)) { val = sizeof(vfs_statfs(mp)->f_mntfromname) - 1; + } error = xb_get_bytes(&xb, vfs_statfs(mp)->f_mntfromname, val, 0); - if ((len - val) > 0) + if ((len - val) > 0) { xb_skip(error, &xb, len - val); + } nfsmerr_if(error); vfs_statfs(mp)->f_mntfromname[val] = '\0'; } @@ -3169,13 +3418,15 @@ mountnfs( if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) { xb_get_32(error, &xb, len); - if (!error && ((len < 1) || (len > MAXPATHLEN))) - error=EINVAL; + if (!error && ((len < 1) || (len > MAXPATHLEN))) { + error = EINVAL; + } nfsmerr_if(error); /* allocate an extra byte for a leading '@' if its not already prepended to the realm */ - MALLOC(nmp->nm_realm, char *, len+2, M_TEMP, M_WAITOK|M_ZERO); - if (!nmp->nm_realm) + MALLOC(nmp->nm_realm, char *, len + 2, M_TEMP, M_WAITOK | M_ZERO); + if (!nmp->nm_realm) { error = ENOMEM; + } nfsmerr_if(error); error = xb_get_bytes(&xb, nmp->nm_realm, len, 0); if (error == 0 && *nmp->nm_realm != '@') { @@ -3187,12 +3438,14 @@ mountnfs( if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) { xb_get_32(error, &xb, len); - if (!error && ((len < 1) || (len > MAXPATHLEN))) - error=EINVAL; + if (!error && ((len < 1) || (len > MAXPATHLEN))) { + error = EINVAL; + } nfsmerr_if(error); - MALLOC(nmp->nm_principal, char *, len+1, M_TEMP, M_WAITOK|M_ZERO); - if (!nmp->nm_principal) + MALLOC(nmp->nm_principal, char *, len + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!nmp->nm_principal) { error = ENOMEM; + } nfsmerr_if(error); error = xb_get_bytes(&xb, nmp->nm_principal, len, 0); } @@ -3200,12 +3453,14 @@ mountnfs( if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) { xb_get_32(error, &xb, len); - if (!error && ((len < 1) || (len > MAXPATHLEN))) - error=EINVAL; + if (!error && ((len < 1) || (len > MAXPATHLEN))) { + error = EINVAL; + } nfsmerr_if(error); - MALLOC(nmp->nm_sprinc, char *, len+1, M_TEMP, M_WAITOK|M_ZERO); - if (!nmp->nm_sprinc) + MALLOC(nmp->nm_sprinc, char *, len + 1, M_TEMP, M_WAITOK | M_ZERO); + if (!nmp->nm_sprinc) { error = ENOMEM; + } nfsmerr_if(error); error = xb_get_bytes(&xb, nmp->nm_sprinc, len, 0); } @@ -3215,44 +3470,54 @@ mountnfs( * Sanity check/finalize settings. */ - if (nmp->nm_timeo < NFS_MINTIMEO) + if (nmp->nm_timeo < NFS_MINTIMEO) { nmp->nm_timeo = NFS_MINTIMEO; - else if (nmp->nm_timeo > NFS_MAXTIMEO) + } else if (nmp->nm_timeo > NFS_MAXTIMEO) { nmp->nm_timeo = NFS_MAXTIMEO; - if (nmp->nm_retry > NFS_MAXREXMIT) + } + if (nmp->nm_retry > NFS_MAXREXMIT) { nmp->nm_retry = NFS_MAXREXMIT; + } - if (nmp->nm_numgrps > NFS_MAXGRPS) + if (nmp->nm_numgrps > NFS_MAXGRPS) { nmp->nm_numgrps = NFS_MAXGRPS; - if (nmp->nm_readahead > NFS_MAXRAHEAD) + } + if (nmp->nm_readahead > NFS_MAXRAHEAD) { nmp->nm_readahead = NFS_MAXRAHEAD; - if (nmp->nm_acregmin > nmp->nm_acregmax) + } + if (nmp->nm_acregmin > nmp->nm_acregmax) { nmp->nm_acregmin = nmp->nm_acregmax; - if (nmp->nm_acdirmin > nmp->nm_acdirmax) + } + if (nmp->nm_acdirmin > nmp->nm_acdirmax) { nmp->nm_acdirmin = nmp->nm_acdirmax; + } /* need at least one fs location */ - if (nmp->nm_locations.nl_numlocs < 1) + if (nmp->nm_locations.nl_numlocs < 1) { error = EINVAL; + } nfsmerr_if(error); /* init mount's mntfromname to first location */ - if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) + if (!NM_OMATTR_GIVEN(nmp, MNTFROM)) { nfs_location_mntfromname(&nmp->nm_locations, firstloc, - vfs_statfs(mp)->f_mntfromname, sizeof(vfs_statfs(mp)->f_mntfromname), 0); + vfs_statfs(mp)->f_mntfromname, sizeof(vfs_statfs(mp)->f_mntfromname), 0); + } /* Need to save the mounting credential for v4. */ nmp->nm_mcred = vfs_context_ucred(ctx); - if (IS_VALID_CRED(nmp->nm_mcred)) + if (IS_VALID_CRED(nmp->nm_mcred)) { kauth_cred_ref(nmp->nm_mcred); + } /* * If a reserved port is required, check for that privilege. * (Note that mirror mounts are exempt because the privilege was * already checked for the original mount.) */ - if (NMFLAG(nmp, RESVPORT) && !vfs_iskernelmount(mp)) + if (NMFLAG(nmp, RESVPORT) && !vfs_iskernelmount(mp)) { error = priv_check_cred(nmp->nm_mcred, PRIV_NETINET_RESERVEDPORT, 0); + } nfsmerr_if(error); /* do mount's initial socket connection */ @@ -3260,20 +3525,24 @@ mountnfs( nfsmerr_if(error); /* set up the version-specific function tables */ - if (nmp->nm_vers < NFS_VER4) + if (nmp->nm_vers < NFS_VER4) { nmp->nm_funcs = &nfs3_funcs; - else + } else { nmp->nm_funcs = &nfs4_funcs; + } /* sanity check settings now that version/connection is set */ - if (nmp->nm_vers == NFS_VER2) /* ignore RDIRPLUS on NFSv2 */ + if (nmp->nm_vers == NFS_VER2) { /* ignore RDIRPLUS on NFSv2 */ NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_RDIRPLUS); + } if (nmp->nm_vers >= NFS_VER4) { - if (NFS_BITMAP_ISSET(nmp->nm_flags, NFS_MFLAG_ACLONLY)) /* aclonly trumps noacl */ + if (NFS_BITMAP_ISSET(nmp->nm_flags, NFS_MFLAG_ACLONLY)) { /* aclonly trumps noacl */ NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOACL); + } NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_CALLUMNT); - if (nmp->nm_lockmode != NFS_LOCK_MODE_ENABLED) + if (nmp->nm_lockmode != NFS_LOCK_MODE_ENABLED) { error = EINVAL; /* disabled/local lock mode only allowed on v2/v3 */ + } } else { /* ignore these if not v4 */ NFS_BITMAP_CLR(nmp->nm_flags, NFS_MFLAG_NOCALLBACK); @@ -3285,38 +3554,48 @@ mountnfs( if (nmp->nm_sotype == SOCK_DGRAM) { /* I/O size defaults for UDP are different */ - if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) + if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) { nmp->nm_rsize = NFS_DGRAM_RSIZE; - if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) + } + if (!NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) { nmp->nm_wsize = NFS_DGRAM_WSIZE; + } } /* round down I/O sizes to multiple of NFS_FABLKSIZE */ nmp->nm_rsize &= ~(NFS_FABLKSIZE - 1); - if (nmp->nm_rsize <= 0) + if (nmp->nm_rsize <= 0) { nmp->nm_rsize = NFS_FABLKSIZE; + } nmp->nm_wsize &= ~(NFS_FABLKSIZE - 1); - if (nmp->nm_wsize <= 0) + if (nmp->nm_wsize <= 0) { nmp->nm_wsize = NFS_FABLKSIZE; + } /* and limit I/O sizes to maximum allowed */ maxio = (nmp->nm_vers == NFS_VER2) ? NFS_V2MAXDATA : - (nmp->nm_sotype == SOCK_DGRAM) ? NFS_MAXDGRAMDATA : NFS_MAXDATA; - if (maxio > NFS_MAXBSIZE) + (nmp->nm_sotype == SOCK_DGRAM) ? NFS_MAXDGRAMDATA : NFS_MAXDATA; + if (maxio > NFS_MAXBSIZE) { maxio = NFS_MAXBSIZE; - if (nmp->nm_rsize > maxio) + } + if (nmp->nm_rsize > maxio) { nmp->nm_rsize = maxio; - if (nmp->nm_wsize > maxio) + } + if (nmp->nm_wsize > maxio) { nmp->nm_wsize = maxio; + } - if (nmp->nm_readdirsize > maxio) + if (nmp->nm_readdirsize > maxio) { nmp->nm_readdirsize = maxio; - if (nmp->nm_readdirsize > nmp->nm_rsize) + } + if (nmp->nm_readdirsize > nmp->nm_rsize) { nmp->nm_readdirsize = nmp->nm_rsize; + } /* Set up the sockets and related info */ - if (nmp->nm_sotype == SOCK_DGRAM) + if (nmp->nm_sotype == SOCK_DGRAM) { TAILQ_INIT(&nmp->nm_cwndq); + } /* * Get the root node/attributes from the NFS server and @@ -3347,8 +3626,9 @@ mountnfs( if ((error = nmp->nm_funcs->nf_update_statfs(nmp, ctx))) { int error2 = vnode_getwithref(*vpp); vnode_rele(*vpp); - if (!error2) + if (!error2) { vnode_put(*vpp); + } vnode_recycle(*vpp); goto nfsmerr; } @@ -3358,7 +3638,7 @@ mountnfs( sbp->f_bfree = nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize; sbp->f_bavail = nmp->nm_fsattr.nfsa_space_avail / sbp->f_bsize; sbp->f_bused = (nmp->nm_fsattr.nfsa_space_total / sbp->f_bsize) - - (nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize); + (nmp->nm_fsattr.nfsa_space_free / sbp->f_bsize); sbp->f_files = nmp->nm_fsattr.nfsa_files_total; sbp->f_ffree = nmp->nm_fsattr.nfsa_files_free; sbp->f_iosize = nfs_iosize; @@ -3372,18 +3652,20 @@ mountnfs( * buffers into multiple requests if the buffer size is * larger than the I/O size. */ -#ifndef CONFIG_EMBEDDED +#ifndef CONFIG_EMBEDDED iosize = max(nmp->nm_rsize, nmp->nm_wsize); - if (iosize < PAGE_SIZE) + if (iosize < PAGE_SIZE) { iosize = PAGE_SIZE; + } #else iosize = PAGE_SIZE; #endif nmp->nm_biosize = trunc_page_32(iosize); /* For NFSv3 and greater, there is a (relatively) reliable ACCESS call. */ - if (nmp->nm_vers > NFS_VER2) + if (nmp->nm_vers > NFS_VER2) { vfs_setauthopaqueaccess(mp); + } switch (nmp->nm_lockmode) { case NFS_LOCK_MODE_DISABLED: @@ -3393,8 +3675,9 @@ mountnfs( break; case NFS_LOCK_MODE_ENABLED: default: - if (nmp->nm_vers <= NFS_VER3) + if (nmp->nm_vers <= NFS_VER3) { nfs_lockd_mount_register(nmp); + } break; } @@ -3402,10 +3685,10 @@ mountnfs( lck_mtx_lock(&nmp->nm_lock); nmp->nm_state |= NFSSTA_MOUNTED; lck_mtx_unlock(&nmp->nm_lock); - return (0); + return 0; nfsmerr: nfs_mount_drain_and_cleanup(nmp); - return (error); + return error; } #if CONFIG_TRIGGERS @@ -3438,21 +3721,23 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) struct nfs_fs_locations nfsls; referral = (np->n_vattr.nva_flags & NFS_FFLAG_TRIGGER_REFERRAL); - if (referral) + if (referral) { bzero(&nfsls, sizeof(nfsls)); + } xb_init(&xbnew, 0); - if (!nmp || (nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD))) - return (ENXIO); + if (!nmp || (nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD))) { + return ENXIO; + } /* allocate a couple path buffers we need */ - MALLOC_ZONE(mntfromname, char *, pathbuflen, M_NAMEI, M_WAITOK); + MALLOC_ZONE(mntfromname, char *, pathbuflen, M_NAMEI, M_WAITOK); if (!mntfromname) { error = ENOMEM; goto nfsmerr; } - MALLOC_ZONE(path, char *, pathbuflen, M_NAMEI, M_WAITOK); + MALLOC_ZONE(path, char *, pathbuflen, M_NAMEI, M_WAITOK); if (!path) { error = ENOMEM; goto nfsmerr; @@ -3474,8 +3759,8 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) * We'll be adding those to each fs location path in the new args. */ nlen = strlcpy(mntfromname, vfs_statfs(nmp->nm_mountp)->f_mntfromname, MAXPATHLEN); - if ((nlen > 0) && (mntfromname[nlen-1] == '/')) { /* avoid double '/' in new name */ - mntfromname[nlen-1] = '\0'; + if ((nlen > 0) && (mntfromname[nlen - 1] == '/')) { /* avoid double '/' in new name */ + mntfromname[nlen - 1] = '\0'; nlen--; } relpath = mntfromname + nlen; @@ -3486,15 +3771,18 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) } /* count the number of components in relpath */ p = relpath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } relpathcomps = 0; while (*p) { relpathcomps++; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; - while (*p && (*p == '/')) + } + while (*p && (*p == '/')) { p++; + } } /* grab a copy of the file system type */ @@ -3508,8 +3796,9 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) } else { error = nfs4_get_fs_locations(nmp, dnp, NULL, 0, vname, ctx, &nfsls); vnode_putname(vname); - if (!error && (nfsls.nl_numlocs < 1)) + if (!error && (nfsls.nl_numlocs < 1)) { error = ENOENT; + } } nfsmerr_if(error); } @@ -3518,23 +3807,23 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) #define xb_copy_32(E, XBSRC, XBDST, V) \ do { \ - if (E) break; \ - xb_get_32((E), (XBSRC), (V)); \ - if (skipcopy) break; \ - xb_add_32((E), (XBDST), (V)); \ + if (E) break; \ + xb_get_32((E), (XBSRC), (V)); \ + if (skipcopy) break; \ + xb_add_32((E), (XBDST), (V)); \ } while (0) #define xb_copy_opaque(E, XBSRC, XBDST) \ do { \ - uint32_t __count, __val; \ - xb_copy_32((E), (XBSRC), (XBDST), __count); \ - if (E) break; \ - __count = nfsm_rndup(__count); \ - __count /= XDRWORD; \ - while (__count-- > 0) \ - xb_copy_32((E), (XBSRC), (XBDST), __val); \ + uint32_t __count, __val; \ + xb_copy_32((E), (XBSRC), (XBDST), __count); \ + if (E) break; \ + __count = nfsm_rndup(__count); \ + __count /= XDRWORD; \ + while (__count-- > 0) \ + xb_copy_32((E), (XBSRC), (XBDST), __val); \ } while (0) - xb_init_buffer(&xb, nmp->nm_args, 2*XDRWORD); + xb_init_buffer(&xb, nmp->nm_args, 2 * XDRWORD); xb_get_32(error, &xb, val); /* version */ xb_get_32(error, &xb, argslength); /* args length */ xb_init_buffer(&xb, nmp->nm_args, argslength); @@ -3547,12 +3836,14 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) count = NFS_MATTR_BITMAP_LEN; xb_get_bitmap(error, &xb, mattrs, count); /* mount attribute bitmap */ nfsmerr_if(error); - for (i = 0; i < NFS_MATTR_BITMAP_LEN; i++) + for (i = 0; i < NFS_MATTR_BITMAP_LEN; i++) { newmattrs[i] = mattrs[i]; - if (referral) + } + if (referral) { NFS_BITMAP_SET(newmattrs, NFS_MATTR_FS_LOCATIONS); - else + } else { NFS_BITMAP_SET(newmattrs, NFS_MATTR_FH); + } NFS_BITMAP_SET(newmattrs, NFS_MATTR_FLAGS); NFS_BITMAP_SET(newmattrs, NFS_MATTR_MNTFLAGS); NFS_BITMAP_CLR(newmattrs, NFS_MATTR_MNTFROM); @@ -3571,22 +3862,28 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) NFS_BITMAP_SET(newmflags, NFS_MFLAG_EPHEMERAL); xb_add_bitmap(error, &xbnew, newmflags_mask, NFS_MFLAG_BITMAP_LEN); xb_add_bitmap(error, &xbnew, newmflags, NFS_MFLAG_BITMAP_LEN); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION)) { xb_copy_32(error, &xb, &xbnew, val); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_MINOR_VERSION)) { xb_copy_32(error, &xb, &xbnew, val); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_VERSION_RANGE)) { xb_copy_32(error, &xb, &xbnew, val); xb_copy_32(error, &xb, &xbnew, val); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READ_SIZE)) { xb_copy_32(error, &xb, &xbnew, val); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_WRITE_SIZE)) { xb_copy_32(error, &xb, &xbnew, val); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READDIR_SIZE)) { xb_copy_32(error, &xb, &xbnew, val); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_READAHEAD)) { xb_copy_32(error, &xb, &xbnew, val); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_ATTRCACHE_REG_MIN)) { xb_copy_32(error, &xb, &xbnew, val); xb_copy_32(error, &xb, &xbnew, val); @@ -3603,33 +3900,41 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) xb_copy_32(error, &xb, &xbnew, val); xb_copy_32(error, &xb, &xbnew, val); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_LOCK_MODE)) { xb_copy_32(error, &xb, &xbnew, val); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SECURITY)) { xb_copy_32(error, &xb, &xbnew, count); - while (!error && (count-- > 0)) + while (!error && (count-- > 0)) { xb_copy_32(error, &xb, &xbnew, val); + } } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_KERB_ETYPE)) { xb_copy_32(error, &xb, &xbnew, count); xb_add_32(error, &xbnew, -1); - while (!error && (count-- > 0)) + while (!error && (count-- > 0)) { xb_copy_32(error, &xb, &xbnew, val); + } } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MAX_GROUP_LIST)) { xb_copy_32(error, &xb, &xbnew, val); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOCKET_TYPE)) { xb_copy_opaque(error, &xb, &xbnew); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_NFS_PORT)) { xb_copy_32(error, &xb, &xbnew, val); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MOUNT_PORT)) { xb_copy_32(error, &xb, &xbnew, val); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REQUEST_TIMEOUT)) { xb_copy_32(error, &xb, &xbnew, val); xb_copy_32(error, &xb, &xbnew, val); } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT)) { xb_copy_32(error, &xb, &xbnew, val); + } if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_DEAD_TIMEOUT)) { xb_copy_32(error, &xb, &xbnew, val); xb_copy_32(error, &xb, &xbnew, val); @@ -3645,66 +3950,84 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) /* copy/extend/skip fs locations */ if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_FS_LOCATIONS)) { numlocs = numserv = numaddr = numcomp = 0; - if (referral) /* don't copy the fs locations for a referral */ + if (referral) { /* don't copy the fs locations for a referral */ skipcopy = 1; + } xb_copy_32(error, &xb, &xbnew, numlocs); /* location count */ for (loc = 0; !error && (loc < numlocs); loc++) { xb_copy_32(error, &xb, &xbnew, numserv); /* server count */ for (serv = 0; !error && (serv < numserv); serv++) { xb_copy_opaque(error, &xb, &xbnew); /* server name */ xb_copy_32(error, &xb, &xbnew, numaddr); /* address count */ - for (addr = 0; !error && (addr < numaddr); addr++) + for (addr = 0; !error && (addr < numaddr); addr++) { xb_copy_opaque(error, &xb, &xbnew); /* address */ + } xb_copy_opaque(error, &xb, &xbnew); /* server info */ } /* pathname */ xb_get_32(error, &xb, numcomp); /* component count */ - if (!skipcopy) - xb_add_32(error, &xbnew, numcomp+relpathcomps); /* new component count */ - for (comp = 0; !error && (comp < numcomp); comp++) + if (!skipcopy) { + uint64_t totalcomps = numcomp + relpathcomps; + + /* set error to ERANGE in the event of overflow */ + if (totalcomps > UINT32_MAX) { + nfsmerr_if((error = ERANGE)); + } + + xb_add_32(error, &xbnew, (uint32_t) totalcomps); /* new component count */ + } + for (comp = 0; !error && (comp < numcomp); comp++) { xb_copy_opaque(error, &xb, &xbnew); /* component */ + } /* add additional components */ for (comp = 0; !skipcopy && !error && (comp < relpathcomps); comp++) { p = relpath; - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } while (*p && !error) { cp = p; - while (*p && (*p != '/')) + while (*p && (*p != '/')) { p++; + } xb_add_string(error, &xbnew, cp, (p - cp)); /* component */ - while (*p && (*p == '/')) + while (*p && (*p == '/')) { p++; + } } } xb_copy_opaque(error, &xb, &xbnew); /* fs location info */ } - if (referral) + if (referral) { skipcopy = 0; + } } if (referral) { /* add referral's fs locations */ - xb_add_32(error, &xbnew, nfsls.nl_numlocs); /* FS_LOCATIONS */ + xb_add_32(error, &xbnew, nfsls.nl_numlocs); /* FS_LOCATIONS */ for (loc = 0; !error && (loc < nfsls.nl_numlocs); loc++) { xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servcount); for (serv = 0; !error && (serv < nfsls.nl_locations[loc]->nl_servcount); serv++) { xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_name, - strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_name)); + strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_name)); xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount); - for (addr = 0; !error && (addr < nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) + for (addr = 0; !error && (addr < nfsls.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) { xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr], - strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr])); + strlen(nfsls.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr])); + } xb_add_32(error, &xbnew, 0); /* empty server info */ } xb_add_32(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_compcount); - for (comp = 0; !error && (comp < nfsls.nl_locations[loc]->nl_path.np_compcount); comp++) + for (comp = 0; !error && (comp < nfsls.nl_locations[loc]->nl_path.np_compcount); comp++) { xb_add_string(error, &xbnew, nfsls.nl_locations[loc]->nl_path.np_components[comp], - strlen(nfsls.nl_locations[loc]->nl_path.np_components[comp])); + strlen(nfsls.nl_locations[loc]->nl_path.np_components[comp])); + } xb_add_32(error, &xbnew, 0); /* empty fs location info */ } } - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_MNTFLAGS)) { xb_get_32(error, &xb, mntflags); + } /* * We add the following mount flags to the ones for the mounted-on mount: * MNT_DONTBROWSE - to keep the mount from showing up as a separate volume @@ -3720,14 +4043,16 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) nfsmerr_if(error); mlen2 = mlen + ((relpath[0] != '/') ? 1 : 0) + rlen; xb_add_32(error, &xbnew, mlen2); - count = mlen/XDRWORD; + count = mlen / XDRWORD; /* copy the original string */ - while (count-- > 0) + while (count-- > 0) { xb_copy_32(error, &xb, &xbnew, val); + } if (!error && (mlen % XDRWORD)) { - error = xb_get_bytes(&xb, buf, mlen%XDRWORD, 0); - if (!error) - error = xb_add_bytes(&xbnew, buf, mlen%XDRWORD, 1); + error = xb_get_bytes(&xb, buf, mlen % XDRWORD, 0); + if (!error) { + error = xb_add_bytes(&xbnew, buf, mlen % XDRWORD, 1); + } } /* insert a '/' if the relative path doesn't start with one */ if (!error && (relpath[0] != '/')) { @@ -3735,8 +4060,9 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) error = xb_add_bytes(&xbnew, buf, 1, 1); } /* add the additional relative path */ - if (!error) + if (!error) { error = xb_add_bytes(&xbnew, relpath, rlen, 1); + } /* make sure the resulting string has the right number of pad bytes */ if (!error && (mlen2 != nfsm_rndup(mlen2))) { bzero(buf, sizeof(buf)); @@ -3750,12 +4076,12 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) end_offset = xb_offset(&xbnew); if (!error) { error = xb_seek(&xbnew, argslength_offset); - argslength = end_offset - argslength_offset + XDRWORD/*version*/; + argslength = end_offset - argslength_offset + XDRWORD /*version*/; xb_add_32(error, &xbnew, argslength); } if (!error) { error = xb_seek(&xbnew, attrslength_offset); - xb_add_32(error, &xbnew, end_offset - attrslength_offset - XDRWORD/*don't include length field*/); + xb_add_32(error, &xbnew, end_offset - attrslength_offset - XDRWORD /*don't include length field*/); } nfsmerr_if(error); @@ -3769,23 +4095,28 @@ nfs_mirror_mount_domount(vnode_t dvp, vnode_t vp, vfs_context_t ctx) /* do the mount */ error = kernel_mount(fstype, dvp, vp, path, xb_buffer_base(&xbnew), argslength, - mntflags, KERNEL_MOUNT_PERMIT_UNMOUNT | KERNEL_MOUNT_NOAUTH, ctx); + mntflags, KERNEL_MOUNT_PERMIT_UNMOUNT | KERNEL_MOUNT_NOAUTH, ctx); nfsmerr: - if (error) + if (error) { printf("nfs: mirror mount of %s on %s failed (%d)\n", - mntfromname, path, error); + mntfromname, path, error); + } /* clean up */ xb_cleanup(&xbnew); - if (referral) + if (referral) { nfs_fs_locations_cleanup(&nfsls); - if (path) + } + if (path) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); - if (mntfromname) + } + if (mntfromname) { FREE_ZONE(mntfromname, MAXPATHLEN, M_NAMEI); - if (!error) + } + if (!error) { nfs_ephemeral_mount_harvester_start(); - return (error); + } + return error; } /* @@ -3834,9 +4165,9 @@ nfs_mirror_mount_trigger_resolve( result = vfs_resolver_result(np->n_trigseq, RESOLVER_NOCHANGE, 0); #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger RESOLVE: no change, last %d nameiop %d, seq %d", - (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq); + (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq); #endif - return (result); + return result; case OP_OPEN: case OP_CHDIR: case OP_CHROOT: @@ -3869,30 +4200,34 @@ nfs_mirror_mount_trigger_resolve( result = vfs_resolver_result(np->n_trigseq, RESOLVER_ERROR, error); #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger RESOLVE: busy error %d, last %d nameiop %d, seq %d", - error, (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq); + error, (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq); #endif - return (result); + return result; } pvp = vnode_getparent(vp); - if (pvp == NULLVP) + if (pvp == NULLVP) { error = EINVAL; - if (!error) + } + if (!error) { error = nfs_mirror_mount_domount(pvp, vp, ctx); + } skipmount: - if (!error) + if (!error) { np->n_trigseq++; + } result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_RESOLVED, error); #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger RESOLVE: %s %d, last %d nameiop %d, seq %d", - error ? "error" : "resolved", error, - (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq); + error ? "error" : "resolved", error, + (cnp->cn_flags & ISLASTCN) ? 1 : 0, cnp->cn_nameiop, np->n_trigseq); #endif - if (pvp != NULLVP) + if (pvp != NULLVP) { vnode_put(pvp); + } nfs_node_clear_busy(np); - return (result); + return result; } resolver_result_t @@ -3912,23 +4247,26 @@ nfs_mirror_mount_trigger_unresolve( #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger UNRESOLVE: busy error %d, seq %d", error, np->n_trigseq); #endif - return (result); + return result; } mp = vnode_mountedhere(vp); - if (!mp) + if (!mp) { error = EINVAL; - if (!error) + } + if (!error) { error = vfs_unmountbyfsid(&(vfs_statfs(mp)->f_fsid), flags, ctx); - if (!error) + } + if (!error) { np->n_trigseq++; + } result = vfs_resolver_result(np->n_trigseq, error ? RESOLVER_ERROR : RESOLVER_UNRESOLVED, error); #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger UNRESOLVE: %s %d, seq %d", - error ? "error" : "unresolved", error, np->n_trigseq); + error ? "error" : "unresolved", error, np->n_trigseq); #endif nfs_node_clear_busy(np); - return (result); + return result; } resolver_result_t @@ -3947,18 +4285,18 @@ nfs_mirror_mount_trigger_rearm( #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger REARM: busy error %d, seq %d", error, np->n_trigseq); #endif - return (result); + return result; } np->n_trigseq++; result = vfs_resolver_result(np->n_trigseq, - vnode_mountedhere(vp) ? RESOLVER_RESOLVED : RESOLVER_UNRESOLVED, 0); + vnode_mountedhere(vp) ? RESOLVER_RESOLVED : RESOLVER_UNRESOLVED, 0); #ifdef NFS_TRIGGER_DEBUG NP(np, "nfs trigger REARM: %s, seq %d", - vnode_mountedhere(vp) ? "resolved" : "unresolved", np->n_trigseq); + vnode_mountedhere(vp) ? "resolved" : "unresolved", np->n_trigseq); #endif nfs_node_clear_busy(np); - return (result); + return result; } /* @@ -3966,11 +4304,11 @@ nfs_mirror_mount_trigger_rearm( * the number of unused mounts. */ -#define NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL 120 /* how often the harvester runs */ +#define NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL 120 /* how often the harvester runs */ struct nfs_ephemeral_mount_harvester_info { - fsid_t fsid; /* FSID that we need to try to unmount */ - uint32_t mountcount; /* count of ephemeral mounts seen in scan */ - }; + fsid_t fsid; /* FSID that we need to try to unmount */ + uint32_t mountcount; /* count of ephemeral mounts seen in scan */ +}; /* various globals for the harvester */ static thread_call_t nfs_ephemeral_mount_harvester_timer = NULL; static int nfs_ephemeral_mount_harvester_on = 0; @@ -3984,17 +4322,20 @@ nfs_ephemeral_mount_harvester_callback(mount_t mp, void *arg) struct nfsmount *nmp; struct timeval now; - if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs")) - return (VFS_RETURNED); + if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs")) { + return VFS_RETURNED; + } nmp = VFSTONFS(mp); - if (!nmp || !NMFLAG(nmp, EPHEMERAL)) - return (VFS_RETURNED); + if (!nmp || !NMFLAG(nmp, EPHEMERAL)) { + return VFS_RETURNED; + } hinfo->mountcount++; /* avoid unmounting mounts that have been triggered within the last harvest interval */ microtime(&now); - if ((nmp->nm_mounttime >> 32) > ((uint32_t)now.tv_sec - NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL)) - return (VFS_RETURNED); + if ((nmp->nm_mounttime >> 32) > ((uint32_t)now.tv_sec - NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL)) { + return VFS_RETURNED; + } if (hinfo->fsid.val[0] || hinfo->fsid.val[1]) { /* attempt to unmount previously-found ephemeral mount */ @@ -4009,7 +4350,7 @@ nfs_ephemeral_mount_harvester_callback(mount_t mp, void *arg) hinfo->fsid.val[0] = mp->mnt_vfsstat.f_fsid.val[0]; hinfo->fsid.val[1] = mp->mnt_vfsstat.f_fsid.val[1]; - return (VFS_RETURNED); + return VFS_RETURNED; } /* @@ -4020,8 +4361,9 @@ nfs_ephemeral_mount_harvester_timer_func(void) { thread_t thd; - if (kernel_thread_start(nfs_ephemeral_mount_harvester, NULL, &thd) == KERN_SUCCESS) + if (kernel_thread_start(nfs_ephemeral_mount_harvester, NULL, &thd) == KERN_SUCCESS) { thread_deallocate(thd); + } } /* @@ -4070,8 +4412,9 @@ nfs_ephemeral_mount_harvester_start(void) lck_mtx_unlock(nfs_global_mutex); return; } - if (nfs_ephemeral_mount_harvester_timer == NULL) + if (nfs_ephemeral_mount_harvester_timer == NULL) { nfs_ephemeral_mount_harvester_timer = thread_call_allocate((thread_call_func_t)nfs_ephemeral_mount_harvester_timer_func, NULL); + } clock_interval_to_deadline(NFS_EPHEMERAL_MOUNT_HARVEST_INTERVAL, NSEC_PER_SEC, &deadline); thread_call_enter_delayed(nfs_ephemeral_mount_harvester_timer, deadline); nfs_ephemeral_mount_harvester_on = 1; @@ -4105,22 +4448,25 @@ nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsver bcopy(sa, saddr, min(sizeof(ss), sa->sa_len)); if (saddr->sa_family == AF_INET) { - if (nmp->nm_mountport) + if (nmp->nm_mountport) { ((struct sockaddr_in*)saddr)->sin_port = htons(nmp->nm_mountport); + } mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port); } else { - if (nmp->nm_mountport) + if (nmp->nm_mountport) { ((struct sockaddr_in6*)saddr)->sin6_port = htons(nmp->nm_mountport); + } mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port); } while (!mntport) { error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers, mntproto, timeo); nfsmout_if(error); - if (saddr->sa_family == AF_INET) + if (saddr->sa_family == AF_INET) { mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port); - else + } else { mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port); + } if (!mntport) { /* if not found and TCP, then retry with UDP */ if (mntproto == IPPROTO_UDP) { @@ -4140,17 +4486,18 @@ nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsver nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, - RPCPROG_MNT, mntvers, RPCMNT_MOUNT, - RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); + RPCPROG_MNT, mntvers, RPCMNT_MOUNT, + RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); nfsmout_if(error); nmreq.nmc_mhead = NULL; error = nfs_aux_request(nmp, thd, saddr, NULL, - ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM), - mreq, R_XID32(xid), 1, timeo, &nmrep); + ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM), + mreq, R_XID32(xid), 1, timeo, &nmrep); nfsmout_if(error); nfsm_chain_get_32(error, &nmrep, val); - if (!error && val) + if (!error && val) { error = val; + } nfsm_chain_get_fh(error, &nmrep, nfsvers, fh); if (!error && (nfsvers > NFS_VER2)) { sec->count = NX_MAX_SEC_FLAVORS; @@ -4159,7 +4506,7 @@ nfs3_mount_rpc(struct nfsmount *nmp, struct sockaddr *sa, int sotype, int nfsver nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } @@ -4180,8 +4527,9 @@ nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo) struct sockaddr_storage ss; struct sockaddr *saddr = (struct sockaddr*)&ss; - if (!nmp->nm_saddr) + if (!nmp->nm_saddr) { return; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -4191,18 +4539,20 @@ nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo) mntport = nmp->nm_mountport; bcopy(nmp->nm_saddr, saddr, min(sizeof(ss), nmp->nm_saddr->sa_len)); - if (saddr->sa_family == AF_INET) + if (saddr->sa_family == AF_INET) { ((struct sockaddr_in*)saddr)->sin_port = htons(mntport); - else + } else { ((struct sockaddr_in6*)saddr)->sin6_port = htons(mntport); + } while (!mntport) { error = nfs_portmap_lookup(nmp, ctx, saddr, NULL, RPCPROG_MNT, mntvers, mntproto, timeo); - nfsmout_if(error); - if (saddr->sa_family == AF_INET) + nfsmout_if(error); + if (saddr->sa_family == AF_INET) { mntport = ntohs(((struct sockaddr_in*)saddr)->sin_port); - else + } else { mntport = ntohs(((struct sockaddr_in6*)saddr)->sin6_port); + } /* if not found and mntvers > VER1, then retry with VER1 */ if (!mntport) { if (mntvers > RPCMNT_VER1) { @@ -4220,21 +4570,22 @@ nfs3_umount_rpc(struct nfsmount *nmp, vfs_context_t ctx, int timeo) /* MOUNT protocol UNMOUNT request */ path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0]; - while (*path && (*path != '/')) + while (*path && (*path != '/')) { path++; + } slen = strlen(path); nfsm_chain_build_alloc_init(error, &nmreq, NFSX_UNSIGNED + nfsm_rndup(slen)); nfsm_chain_add_name(error, &nmreq, path, slen, nmp); nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfsm_rpchead2(nmp, (mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, - RPCPROG_MNT, RPCMNT_VER1, RPCMNT_UMOUNT, - RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); + RPCPROG_MNT, RPCMNT_VER1, RPCMNT_UMOUNT, + RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); nfsmout_if(error); nmreq.nmc_mhead = NULL; error = nfs_aux_request(nmp, thd, saddr, NULL, - ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM), - mreq, R_XID32(xid), 1, timeo, &nmrep); + ((mntproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM), + mreq, R_XID32(xid), 1, timeo, &nmrep); nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -4273,8 +4624,9 @@ nfs_vfs_unmount( /* * Wait for any in-progress monitored node scan to complete. */ - while (nmp->nm_state & NFSSTA_MONITOR_SCAN) - msleep(&nmp->nm_state, &nmp->nm_lock, PZERO-1, "nfswaitmonscan", &ts); + while (nmp->nm_state & NFSSTA_MONITOR_SCAN) { + msleep(&nmp->nm_state, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts); + } /* * Goes something like this.. * - Call vflush() to clear out vnodes for this file system, @@ -4294,16 +4646,17 @@ nfs_vfs_unmount( if (mntflags & MNT_FORCE) { error = vflush(mp, NULLVP, flags); /* locks vp in the process */ } else { - if (vnode_isinuse(vp, 1)) + if (vnode_isinuse(vp, 1)) { error = EBUSY; - else + } else { error = vflush(mp, vp, flags); + } } if (error) { lck_mtx_lock(&nmp->nm_lock); nmp->nm_state &= ~NFSSTA_UNMOUNTING; lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } lck_mtx_lock(&nmp->nm_lock); @@ -4315,15 +4668,16 @@ nfs_vfs_unmount( */ error = vnode_get(vp); vnode_rele(vp); - if (!error) + if (!error) { vnode_put(vp); + } vflush(mp, NULLVP, FORCECLOSE); /* Wait for all other references to be released and free the mount */ nfs_mount_drain_and_cleanup(nmp); - - return (0); + + return 0; } /* @@ -4338,21 +4692,25 @@ nfs_fs_locations_cleanup(struct nfs_fs_locations *nfslsp) uint32_t loc, serv, addr, comp; /* free up fs locations */ - if (!nfslsp->nl_numlocs || !nfslsp->nl_locations) + if (!nfslsp->nl_numlocs || !nfslsp->nl_locations) { return; + } for (loc = 0; loc < nfslsp->nl_numlocs; loc++) { fsl = nfslsp->nl_locations[loc]; - if (!fsl) + if (!fsl) { continue; + } if ((fsl->nl_servcount > 0) && fsl->nl_servers) { for (serv = 0; serv < fsl->nl_servcount; serv++) { fss = fsl->nl_servers[serv]; - if (!fss) + if (!fss) { continue; + } if ((fss->ns_addrcount > 0) && fss->ns_addresses) { - for (addr = 0; addr < fss->ns_addrcount; addr++) + for (addr = 0; addr < fss->ns_addrcount; addr++) { FREE(fss->ns_addresses[addr], M_TEMP); + } FREE(fss->ns_addresses, M_TEMP); } FREE(fss->ns_name, M_TEMP); @@ -4362,9 +4720,11 @@ nfs_fs_locations_cleanup(struct nfs_fs_locations *nfslsp) } fsp = &fsl->nl_path; if (fsp->np_compcount && fsp->np_components) { - for (comp = 0; comp < fsp->np_compcount; comp++) - if (fsp->np_components[comp]) + for (comp = 0; comp < fsp->np_compcount; comp++) { + if (fsp->np_components[comp]) { FREE(fsp->np_components[comp], M_TEMP); + } + } FREE(fsp->np_components, M_TEMP); } FREE(fsl, M_TEMP); @@ -4380,14 +4740,17 @@ nfs_mount_rele(struct nfsmount *nmp) int wup = 0; lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_ref < 1) + if (nmp->nm_ref < 1) { panic("nfs zombie mount underflow\n"); + } nmp->nm_ref--; - if (nmp->nm_ref == 0) + if (nmp->nm_ref == 0) { wup = nmp->nm_state & NFSSTA_MOUNT_DRAIN; + } lck_mtx_unlock(&nmp->nm_lock); - if (wup) + if (wup) { wakeup(&nmp->nm_ref); + } } void @@ -4396,7 +4759,7 @@ nfs_mount_drain_and_cleanup(struct nfsmount *nmp) lck_mtx_lock(&nmp->nm_lock); nmp->nm_state |= NFSSTA_MOUNT_DRAIN; while (nmp->nm_ref > 0) { - msleep(&nmp->nm_ref, &nmp->nm_lock, PZERO-1, "nfs_mount_drain", NULL); + msleep(&nmp->nm_ref, &nmp->nm_lock, PZERO - 1, "nfs_mount_drain", NULL); } assert(nmp->nm_ref == 0); lck_mtx_unlock(&nmp->nm_lock); @@ -4420,10 +4783,11 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) nmp->nm_state |= nm_state_flags; nmp->nm_ref++; lck_mtx_unlock(&nmp->nm_lock); - + /* stop callbacks */ - if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) + if ((nmp->nm_vers >= NFS_VER4) && !NMFLAG(nmp, NOCALLBACK) && nmp->nm_cbid) { nfs4_mount_callback_shutdown(nmp); + } /* Destroy any RPCSEC_GSS contexts */ nfs_gss_clnt_ctx_unmount(nmp); @@ -4434,13 +4798,14 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) /* Have the socket thread send the unmount RPC, if requested/appropriate. */ if ((nmp->nm_vers < NFS_VER4) && (nmp->nm_state & NFSSTA_MOUNTED) && - !(nmp->nm_state & (NFSSTA_FORCE|NFSSTA_DEAD)) && NMFLAG(nmp, CALLUMNT)) + !(nmp->nm_state & (NFSSTA_FORCE | NFSSTA_DEAD)) && NMFLAG(nmp, CALLUMNT)) { nfs_mount_sock_thread_wake(nmp); + } /* wait for the socket thread to terminate */ while (nmp->nm_sockthd && current_thread() != nmp->nm_sockthd) { wakeup(&nmp->nm_sockthd); - msleep(&nmp->nm_sockthd, &nmp->nm_lock, PZERO-1, "nfswaitsockthd", &ts); + msleep(&nmp->nm_sockthd, &nmp->nm_lock, PZERO - 1, "nfswaitsockthd", &ts); } lck_mtx_unlock(&nmp->nm_lock); @@ -4466,7 +4831,7 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) lck_mtx_unlock(&nmp->nm_lock); - if (nmp->nm_state & NFSSTA_MOUNTED) + if (nmp->nm_state & NFSSTA_MOUNTED) { switch (nmp->nm_lockmode) { case NFS_LOCK_MODE_DISABLED: case NFS_LOCK_MODE_LOCAL: @@ -4479,13 +4844,15 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) } break; } + } if ((nmp->nm_vers >= NFS_VER4) && nmp->nm_longid) { /* remove/deallocate the client ID data */ lck_mtx_lock(nfs_global_mutex); TAILQ_REMOVE(&nfsclientids, nmp->nm_longid, nci_link); - if (nmp->nm_longid->nci_id) + if (nmp->nm_longid->nci_id) { FREE(nmp->nm_longid->nci_id, M_TEMP); + } FREE(nmp->nm_longid, M_TEMP); nmp->nm_longid = NULL; lck_mtx_unlock(nfs_global_mutex); @@ -4500,15 +4867,16 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) TAILQ_FOREACH(req, &nfs_reqq, r_chain) { if (req->r_nmp == nmp) { lck_mtx_lock(&req->r_mtx); - if (!req->r_error && req->r_nmrep.nmc_mhead == NULL) + if (!req->r_error && req->r_nmrep.nmc_mhead == NULL) { req->r_error = EIO; + } if (req->r_flags & R_RESENDQ) { lck_mtx_lock(&nmp->nm_lock); req->r_flags &= ~R_RESENDQ; if (req->r_rchain.tqe_next != NFSREQNOLIST) { TAILQ_REMOVE(&nmp->nm_resendq, req, r_rchain); /* - * Queue up the request so that we can unreference them + * Queue up the request so that we can unreference them * with out holding nfs_request_mutex */ TAILQ_INSERT_TAIL(&resendq, req, r_rchain); @@ -4543,7 +4911,7 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) lck_mtx_lock(&req->r_mtx); if (req->r_callback.rcb_func && !(req->r_flags & R_WAITSENT) && !(req->r_flags & R_IOD)) { - /* + /* * Since R_IOD is not set then we need to handle it. If * we're not on a list add it to our iod queue. Otherwise * we must already be on nm_iodq which is added to our @@ -4561,8 +4929,9 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) } /* finish any async I/O RPCs queued up */ - if (nmp->nm_iodlink.tqe_next != NFSNOLIST) + if (nmp->nm_iodlink.tqe_next != NFSNOLIST) { TAILQ_REMOVE(&nfsiodmounts, nmp, nm_iodlink); + } TAILQ_CONCAT(&iodq, &nmp->nm_iodq, r_achain); lck_mtx_unlock(nfsiod_mutex); lck_mtx_unlock(nfs_request_mutex); @@ -4573,21 +4942,27 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) lck_mtx_lock(&req->r_mtx); docallback = !(req->r_flags & R_WAITSENT); lck_mtx_unlock(&req->r_mtx); - if (docallback) + if (docallback) { req->r_callback.rcb_func(req); + } } /* clean up common state */ lck_mtx_lock(&nmp->nm_lock); - while ((np = LIST_FIRST(&nmp->nm_monlist))) { - LIST_REMOVE(np, n_monlink); - np->n_monlink.le_next = NFSNOLIST; - } + while ((np = LIST_FIRST(&nmp->nm_monlist))) { + LIST_REMOVE(np, n_monlink); + np->n_monlink.le_next = NFSNOLIST; + } TAILQ_FOREACH_SAFE(noop, &nmp->nm_open_owners, noo_link, nextnoop) { + os_ref_count_t newcount; + TAILQ_REMOVE(&nmp->nm_open_owners, noop, noo_link); noop->noo_flags &= ~NFS_OPEN_OWNER_LINK; - if (noop->noo_refcnt) + newcount = os_ref_release_locked(&noop->noo_refcnt); + + if (newcount) { continue; + } nfs_open_owner_destroy(noop); } lck_mtx_unlock(&nmp->nm_lock); @@ -4611,52 +4986,63 @@ nfs_mount_zombie(struct nfsmount *nmp, int nm_state_flags) void nfs_mount_cleanup(struct nfsmount *nmp) { - if (!nmp) + if (!nmp) { return; + } nfs_mount_zombie(nmp, 0); NFS_VFS_DBG("Unmounting %s from %s\n", - vfs_statfs(nmp->nm_mountp)->f_mntfromname, - vfs_statfs(nmp->nm_mountp)->f_mntonname); + vfs_statfs(nmp->nm_mountp)->f_mntfromname, + vfs_statfs(nmp->nm_mountp)->f_mntonname); NFS_VFS_DBG("nfs state = 0x%8.8x\n", nmp->nm_state); NFS_VFS_DBG("nfs socket flags = 0x%8.8x\n", nmp->nm_sockflags); NFS_VFS_DBG("nfs mount ref count is %d\n", nmp->nm_ref); NFS_VFS_DBG("mount ref count is %d\n", nmp->nm_mountp->mnt_count); - - if (nmp->nm_mountp) + + if (nmp->nm_mountp) { vfs_setfsprivate(nmp->nm_mountp, NULL); + } lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_ref) + if (nmp->nm_ref) { panic("Some one has grabbed a ref %d state flags = 0x%8.8x\n", nmp->nm_ref, nmp->nm_state); + } - if (nmp->nm_saddr) + if (nmp->nm_saddr) { FREE(nmp->nm_saddr, M_SONAME); - if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr) + } + if ((nmp->nm_vers < NFS_VER4) && nmp->nm_rqsaddr) { FREE(nmp->nm_rqsaddr, M_SONAME); + } - if (IS_VALID_CRED(nmp->nm_mcred)) + if (IS_VALID_CRED(nmp->nm_mcred)) { kauth_cred_unref(&nmp->nm_mcred); + } nfs_fs_locations_cleanup(&nmp->nm_locations); - if (nmp->nm_realm) + if (nmp->nm_realm) { FREE(nmp->nm_realm, M_TEMP); - if (nmp->nm_principal) + } + if (nmp->nm_principal) { FREE(nmp->nm_principal, M_TEMP); - if (nmp->nm_sprinc) + } + if (nmp->nm_sprinc) { FREE(nmp->nm_sprinc, M_TEMP); - - if (nmp->nm_args) + } + + if (nmp->nm_args) { xb_free(nmp->nm_args); + } lck_mtx_unlock(&nmp->nm_lock); - + lck_mtx_destroy(&nmp->nm_lock, nfs_mount_grp); - if (nmp->nm_fh) + if (nmp->nm_fh) { FREE(nmp->nm_fh, M_TEMP); - FREE_ZONE((caddr_t)nmp, sizeof (struct nfsmount), M_NFSMNT); + } + FREE_ZONE(nmp, sizeof(struct nfsmount), M_NFSMNT); } /* @@ -4671,19 +5057,21 @@ nfs_vfs_root(mount_t mp, vnode_t *vpp, __unused vfs_context_t ctx) u_int32_t vpid; nmp = VFSTONFS(mp); - if (!nmp || !nmp->nm_dnp) - return (ENXIO); + if (!nmp || !nmp->nm_dnp) { + return ENXIO; + } vp = NFSTOV(nmp->nm_dnp); vpid = vnode_vid(vp); while ((error = vnode_getwithvid(vp, vpid))) { /* vnode_get() may return ENOENT if the dir changes. */ /* If that happens, just try it again, else return the error. */ - if ((error != ENOENT) || (vnode_vid(vp) == vpid)) - return (error); + if ((error != ENOENT) || (vnode_vid(vp) == vpid)) { + return error; + } vpid = vnode_vid(vp); } *vpp = vp; - return (0); + return 0; } /* @@ -4698,7 +5086,7 @@ nfs_vfs_quotactl( __unused caddr_t datap, __unused vfs_context_t context) { - return (ENOTSUP); + return ENOTSUP; } #else @@ -4707,12 +5095,13 @@ nfs_sa_getport(struct sockaddr *sa, int *error) { int port = 0; - if (sa->sa_family == AF_INET6) + if (sa->sa_family == AF_INET6) { port = ntohs(((struct sockaddr_in6*)sa)->sin6_port); - else if (sa->sa_family == AF_INET) + } else if (sa->sa_family == AF_INET) { port = ntohs(((struct sockaddr_in*)sa)->sin_port); - else if (error) + } else if (error) { *error = EIO; + } return port; } @@ -4720,10 +5109,11 @@ nfs_sa_getport(struct sockaddr *sa, int *error) static void nfs_sa_setport(struct sockaddr *sa, int port) { - if (sa->sa_family == AF_INET6) + if (sa->sa_family == AF_INET6) { ((struct sockaddr_in6*)sa)->sin6_port = htons(port); - else if (sa->sa_family == AF_INET) + } else if (sa->sa_family == AF_INET) { ((struct sockaddr_in*)sa)->sin_port = htons(port); + } } int @@ -4742,11 +5132,13 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc struct timeval now; struct timespec ts = { 1, 0 }; - if (!nmp->nm_saddr) - return (ENXIO); + if (!nmp->nm_saddr) { + return ENXIO; + } - if (NMFLAG(nmp, NOQUOTA)) - return (ENOTSUP); + if (NMFLAG(nmp, NOQUOTA)) { + return ENOTSUP; + } /* * Allocate an address for rquotad if needed @@ -4754,7 +5146,7 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc if (!nmp->nm_rqsaddr) { int need_free = 0; - MALLOC(rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK|M_ZERO); + MALLOC(rqsaddr, struct sockaddr *, sizeof(struct sockaddr_storage), M_SONAME, M_WAITOK | M_ZERO); bcopy(nmp->nm_saddr, rqsaddr, min(sizeof(struct sockaddr_storage), nmp->nm_saddr->sa_len)); /* Set the port to zero, will call rpcbind to get the port below */ nfs_sa_setport(rqsaddr, 0); @@ -4768,8 +5160,9 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc need_free = 1; } lck_mtx_unlock(&nmp->nm_lock); - if (need_free) + if (need_free) { FREE(rqsaddr, M_SONAME); + } } timeo = NMFLAG(nmp, SOFT) ? 10 : 60; @@ -4784,11 +5177,11 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc error = nfs_sigintr(nmp, NULL, thd, 1); if (error) { lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } if (nmp->nm_state & NFSSTA_RQUOTAINPROG) { nmp->nm_state |= NFSSTA_WANTRQUOTA; - msleep(&nmp->nm_rqsaddr, &nmp->nm_lock, PZERO-1, "nfswaitrquotaaddr", &ts); + msleep(&nmp->nm_rqsaddr, &nmp->nm_lock, PZERO - 1, "nfswaitrquotaaddr", &ts); rqport = nfs_sa_getport(rqsaddr, &error); continue; } @@ -4797,11 +5190,13 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc /* send portmap request to get rquota port */ error = nfs_portmap_lookup(nmp, ctx, rqsaddr, NULL, RPCPROG_RQUOTA, rqvers, rqproto, timeo); - if (error) + if (error) { goto out; + } rqport = nfs_sa_getport(rqsaddr, &error); - if (error) + if (error) { goto out; + } if (!rqport) { /* @@ -4818,7 +5213,7 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc } microuptime(&now); nmp->nm_rqsaddrstamp = now.tv_sec; - out: +out: lck_mtx_lock(&nmp->nm_lock); nmp->nm_state &= ~NFSSTA_RQUOTAINPROG; if (nmp->nm_state & NFSSTA_WANTRQUOTA) { @@ -4827,46 +5222,51 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc } } lck_mtx_unlock(&nmp->nm_lock); - if (error) - return (error); + if (error) { + return error; + } /* Using PMAPPORT for unavailabe rquota service */ - if (rqport == PMAPPORT) - return (ENOTSUP); + if (rqport == PMAPPORT) { + return ENOTSUP; + } /* rquota request */ nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); path = &vfs_statfs(nmp->nm_mountp)->f_mntfromname[0]; - while (*path && (*path != '/')) + while (*path && (*path != '/')) { path++; + } slen = strlen(path); nfsm_chain_build_alloc_init(error, &nmreq, 3 * NFSX_UNSIGNED + nfsm_rndup(slen)); nfsm_chain_add_name(error, &nmreq, path, slen, nmp); - if (type == GRPQUOTA) + if (type == GRPQUOTA) { nfsm_chain_add_32(error, &nmreq, type); + } nfsm_chain_add_32(error, &nmreq, id); nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfsm_rpchead2(nmp, (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, - RPCPROG_RQUOTA, rqvers, RPCRQUOTA_GET, - RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); + RPCPROG_RQUOTA, rqvers, RPCRQUOTA_GET, + RPCAUTH_SYS, cred, NULL, nmreq.nmc_mhead, &xid, &mreq); nfsmout_if(error); nmreq.nmc_mhead = NULL; error = nfs_aux_request(nmp, thd, rqsaddr, NULL, - (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, - mreq, R_XID32(xid), 0, timeo, &nmrep); + (rqproto == IPPROTO_UDP) ? SOCK_DGRAM : SOCK_STREAM, + mreq, R_XID32(xid), 0, timeo, &nmrep); nfsmout_if(error); /* parse rquota response */ nfsm_chain_get_32(error, &nmrep, val); if (!error && (val != RQUOTA_STAT_OK)) { - if (val == RQUOTA_STAT_NOQUOTA) + if (val == RQUOTA_STAT_NOQUOTA) { error = ENOENT; - else if (val == RQUOTA_STAT_EPERM) + } else if (val == RQUOTA_STAT_EPERM) { error = EPERM; - else + } else { error = EIO; + } } nfsm_chain_get_32(error, &nmrep, bsize); nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); @@ -4889,7 +5289,7 @@ nfs3_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int @@ -4904,14 +5304,16 @@ nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc kauth_cred_t cred = vfs_context_ucred(ctx); struct nfsreq_secinfo_args si; - if (type != USRQUOTA) /* NFSv4 only supports user quotas */ - return (ENOTSUP); + if (type != USRQUOTA) { /* NFSv4 only supports user quotas */ + return ENOTSUP; + } /* first check that the server supports any of the quota attributes */ if (!NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) && !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) && - !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED)) - return (ENOTSUP); + !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED)) { + return ENOTSUP; + } /* * The credential passed to the server needs to have @@ -4925,19 +5327,21 @@ nfs4_getquota(struct nfsmount *nmp, vfs_context_t ctx, uid_t id, int type, struc temp_pcred.cr_ngroups = pcred->cr_ngroups; bcopy(pcred->cr_groups, temp_pcred.cr_groups, sizeof(temp_pcred.cr_groups)); cred = posix_cred_create(&temp_pcred); - if (!IS_VALID_CRED(cred)) - return (ENOMEM); + if (!IS_VALID_CRED(cred)) { + return ENOMEM; + } } else { kauth_cred_ref(cred); } nfsvers = nmp->nm_vers; np = nmp->nm_dnp; - if (!np) + if (!np) { error = ENXIO; + } if (error || ((error = vnode_get(NFSTOV(np))))) { kauth_cred_unref(&cred); - return(error); + return error; } NFSREQ_SECINFO_SET(&si, np, NULL, 0, NULL, 0); @@ -4976,7 +5380,7 @@ nfsmout: nfsm_chain_cleanup(&nmrep); vnode_put(NFSTOV(np)); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -4988,12 +5392,14 @@ nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t c struct dqblk *dqb = (struct dqblk*)datap; nmp = VFSTONFS(mp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (uid == ~0U) + if (uid == ~0U) { uid = euid; + } /* we can only support Q_GETQUOTA */ cmd = cmds >> SUBCMDSHIFT; @@ -5006,23 +5412,26 @@ nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t c case Q_SETUSE: case Q_SYNC: case Q_QUOTASTAT: - return (ENOTSUP); + return ENOTSUP; default: - return (EINVAL); + return EINVAL; } type = cmds & SUBCMDMASK; - if ((u_int)type >= MAXQUOTAS) - return (EINVAL); - if ((uid != euid) && ((error = vfs_context_suser(ctx)))) - return (error); + if ((u_int)type >= MAXQUOTAS) { + return EINVAL; + } + if ((uid != euid) && ((error = vfs_context_suser(ctx)))) { + return error; + } - if (vfs_busy(mp, LK_NOWAIT)) - return (0); + if (vfs_busy(mp, LK_NOWAIT)) { + return 0; + } bzero(dqb, sizeof(*dqb)); error = nmp->nm_funcs->nf_getquota(nmp, ctx, uid, type, dqb); vfs_unbusy(mp); - return (error); + return error; } #endif @@ -5032,9 +5441,9 @@ nfs_vfs_quotactl(mount_t mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t c int nfs_sync_callout(vnode_t, void *); struct nfs_sync_cargs { - vfs_context_t ctx; - int waitfor; - int error; + vfs_context_t ctx; + int waitfor; + int error; }; int @@ -5046,21 +5455,25 @@ nfs_sync_callout(vnode_t vp, void *arg) if (np->n_flag & NREVOKE) { vn_revoke(vp, REVOKEALL, cargs->ctx); - return (VNODE_RETURNED); + return VNODE_RETURNED; } - if (LIST_EMPTY(&np->n_dirtyblkhd)) - return (VNODE_RETURNED); - if (np->n_wrbusy > 0) - return (VNODE_RETURNED); - if (np->n_bflag & (NBFLUSHINPROG|NBINVALINPROG)) - return (VNODE_RETURNED); + if (LIST_EMPTY(&np->n_dirtyblkhd)) { + return VNODE_RETURNED; + } + if (np->n_wrbusy > 0) { + return VNODE_RETURNED; + } + if (np->n_bflag & (NBFLUSHINPROG | NBINVALINPROG)) { + return VNODE_RETURNED; + } error = nfs_flush(np, cargs->waitfor, vfs_context_thread(cargs->ctx), 0); - if (error) + if (error) { cargs->error = error; + } - return (VNODE_RETURNED); + return VNODE_RETURNED; } int @@ -5074,7 +5487,7 @@ nfs_vfs_sync(mount_t mp, int waitfor, vfs_context_t ctx) vnode_iterate(mp, 0, nfs_sync_callout, &cargs); - return (cargs.error); + return cargs.error; } /* @@ -5089,8 +5502,7 @@ nfs_vfs_vget( __unused vnode_t *vpp, __unused vfs_context_t ctx) { - - return (ENOTSUP); + return ENOTSUP; } /* @@ -5105,8 +5517,7 @@ nfs_vfs_fhtovp( __unused vnode_t *vpp, __unused vfs_context_t ctx) { - - return (ENOTSUP); + return ENOTSUP; } /* @@ -5120,8 +5531,7 @@ nfs_vfs_vptofh( __unused unsigned char *fhp, __unused vfs_context_t ctx) { - - return (ENOTSUP); + return ENOTSUP; } /* @@ -5134,8 +5544,7 @@ nfs_vfs_start( __unused int flags, __unused vfs_context_t ctx) { - - return (0); + return 0; } /* @@ -5168,16 +5577,19 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_DEAD); NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_NOTRESP); NFS_BITMAP_SET(miflags_mask, NFS_MIFLAG_RECOVERY); - if (nmp->nm_state & NFSSTA_DEAD) + if (nmp->nm_state & NFSSTA_DEAD) { NFS_BITMAP_SET(miflags, NFS_MIFLAG_DEAD); - if ((nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_JUKEBOXTIMEO)) || - ((nmp->nm_state & NFSSTA_LOCKTIMEO) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED))) + } + if ((nmp->nm_state & (NFSSTA_TIMEO | NFSSTA_JUKEBOXTIMEO)) || + ((nmp->nm_state & NFSSTA_LOCKTIMEO) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED))) { NFS_BITMAP_SET(miflags, NFS_MIFLAG_NOTRESP); - if (nmp->nm_state & NFSSTA_RECOVER) + } + if (nmp->nm_state & NFSSTA_RECOVER) { NFS_BITMAP_SET(miflags, NFS_MIFLAG_RECOVERY); + } /* get original mount args length */ - xb_init_buffer(&xborig, nmp->nm_args, 2*XDRWORD); + xb_init_buffer(&xborig, nmp->nm_args, 2 * XDRWORD); xb_get_32(error, &xborig, origargsvers); /* version */ xb_get_32(error, &xborig, origargslength); /* args length */ nfsmerr_if(error); @@ -5186,8 +5598,9 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) NFS_BITMAP_ZERO(mattrs, NFS_MATTR_BITMAP_LEN); NFS_BITMAP_SET(mattrs, NFS_MATTR_FLAGS); NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_VERSION); - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_MINOR_VERSION); + } NFS_BITMAP_SET(mattrs, NFS_MATTR_READ_SIZE); NFS_BITMAP_SET(mattrs, NFS_MATTR_WRITE_SIZE); NFS_BITMAP_SET(mattrs, NFS_MATTR_READDIR_SIZE); @@ -5198,44 +5611,56 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) NFS_BITMAP_SET(mattrs, NFS_MATTR_ATTRCACHE_DIR_MAX); NFS_BITMAP_SET(mattrs, NFS_MATTR_LOCK_MODE); NFS_BITMAP_SET(mattrs, NFS_MATTR_SECURITY); - if (nmp->nm_etype.selected < nmp->nm_etype.count) + if (nmp->nm_etype.selected < nmp->nm_etype.count) { NFS_BITMAP_SET(mattrs, NFS_MATTR_KERB_ETYPE); + } NFS_BITMAP_SET(mattrs, NFS_MATTR_MAX_GROUP_LIST); NFS_BITMAP_SET(mattrs, NFS_MATTR_SOCKET_TYPE); NFS_BITMAP_SET(mattrs, NFS_MATTR_NFS_PORT); - if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport) + if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport) { NFS_BITMAP_SET(mattrs, NFS_MATTR_MOUNT_PORT); + } NFS_BITMAP_SET(mattrs, NFS_MATTR_REQUEST_TIMEOUT); - if (NMFLAG(nmp, SOFT)) + if (NMFLAG(nmp, SOFT)) { NFS_BITMAP_SET(mattrs, NFS_MATTR_SOFT_RETRY_COUNT); - if (nmp->nm_deadtimeout) + } + if (nmp->nm_deadtimeout) { NFS_BITMAP_SET(mattrs, NFS_MATTR_DEAD_TIMEOUT); - if (nmp->nm_fh) + } + if (nmp->nm_fh) { NFS_BITMAP_SET(mattrs, NFS_MATTR_FH); + } NFS_BITMAP_SET(mattrs, NFS_MATTR_FS_LOCATIONS); NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFLAGS); - if (origargsvers < NFS_ARGSVERSION_XDR) + if (origargsvers < NFS_ARGSVERSION_XDR) { NFS_BITMAP_SET(mattrs, NFS_MATTR_MNTFROM); - if (nmp->nm_realm) + } + if (nmp->nm_realm) { NFS_BITMAP_SET(mattrs, NFS_MATTR_REALM); - if (nmp->nm_principal) + } + if (nmp->nm_principal) { NFS_BITMAP_SET(mattrs, NFS_MATTR_PRINCIPAL); - if (nmp->nm_sprinc) + } + if (nmp->nm_sprinc) { NFS_BITMAP_SET(mattrs, NFS_MATTR_SVCPRINCIPAL); - + } + /* set up current mount flags bitmap */ /* first set the flags that we will be setting - either on OR off */ NFS_BITMAP_ZERO(mflags_mask, NFS_MFLAG_BITMAP_LEN); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_SOFT); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_INTR); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RESVPORT); - if (nmp->nm_sotype == SOCK_DGRAM) + if (nmp->nm_sotype == SOCK_DGRAM) { NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOCONNECT); + } NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_DUMBTIMER); - if (nmp->nm_vers < NFS_VER4) + if (nmp->nm_vers < NFS_VER4) { NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_CALLUMNT); - if (nmp->nm_vers >= NFS_VER3) + } + if (nmp->nm_vers >= NFS_VER3) { NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_RDIRPLUS); + } NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NONEGNAMECACHE); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MUTEJUKEBOX); if (nmp->nm_vers >= NFS_VER4) { @@ -5247,52 +5672,71 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) } NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NFC); NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_NOQUOTA); - if (nmp->nm_vers < NFS_VER4) + if (nmp->nm_vers < NFS_VER4) { NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTUDP); + } NFS_BITMAP_SET(mflags_mask, NFS_MFLAG_MNTQUICK); /* now set the flags that should be set */ NFS_BITMAP_ZERO(mflags, NFS_MFLAG_BITMAP_LEN); - if (NMFLAG(nmp, SOFT)) + if (NMFLAG(nmp, SOFT)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_SOFT); - if (NMFLAG(nmp, INTR)) + } + if (NMFLAG(nmp, INTR)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_INTR); - if (NMFLAG(nmp, RESVPORT)) + } + if (NMFLAG(nmp, RESVPORT)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_RESVPORT); - if ((nmp->nm_sotype == SOCK_DGRAM) && NMFLAG(nmp, NOCONNECT)) + } + if ((nmp->nm_sotype == SOCK_DGRAM) && NMFLAG(nmp, NOCONNECT)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCONNECT); - if (NMFLAG(nmp, DUMBTIMER)) + } + if (NMFLAG(nmp, DUMBTIMER)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_DUMBTIMER); - if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, CALLUMNT)) + } + if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, CALLUMNT)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_CALLUMNT); - if ((nmp->nm_vers >= NFS_VER3) && NMFLAG(nmp, RDIRPLUS)) + } + if ((nmp->nm_vers >= NFS_VER3) && NMFLAG(nmp, RDIRPLUS)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_RDIRPLUS); - if (NMFLAG(nmp, NONEGNAMECACHE)) + } + if (NMFLAG(nmp, NONEGNAMECACHE)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NONEGNAMECACHE); - if (NMFLAG(nmp, MUTEJUKEBOX)) + } + if (NMFLAG(nmp, MUTEJUKEBOX)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_MUTEJUKEBOX); + } if (nmp->nm_vers >= NFS_VER4) { - if (NMFLAG(nmp, EPHEMERAL)) + if (NMFLAG(nmp, EPHEMERAL)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_EPHEMERAL); - if (NMFLAG(nmp, NOCALLBACK)) + } + if (NMFLAG(nmp, NOCALLBACK)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NOCALLBACK); - if (NMFLAG(nmp, NAMEDATTR)) + } + if (NMFLAG(nmp, NAMEDATTR)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NAMEDATTR); - if (NMFLAG(nmp, NOACL)) + } + if (NMFLAG(nmp, NOACL)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NOACL); - if (NMFLAG(nmp, ACLONLY)) + } + if (NMFLAG(nmp, ACLONLY)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_ACLONLY); + } } - if (NMFLAG(nmp, NFC)) + if (NMFLAG(nmp, NFC)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NFC); + } if (NMFLAG(nmp, NOQUOTA) || ((nmp->nm_vers >= NFS_VER4) && !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_HARD) && !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_AVAIL_SOFT) && - !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED))) + !NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_supp_attr, NFS_FATTR_QUOTA_USED))) { NFS_BITMAP_SET(mflags, NFS_MFLAG_NOQUOTA); - if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, MNTUDP)) + } + if ((nmp->nm_vers < NFS_VER4) && NMFLAG(nmp, MNTUDP)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTUDP); - if (NMFLAG(nmp, MNTQUICK)) + } + if (NMFLAG(nmp, MNTQUICK)) { NFS_BITMAP_SET(mflags, NFS_MFLAG_MNTQUICK); + } /* assemble info buffer: */ xb_init_buffer(&xbinfo, NULL, 0); @@ -5302,8 +5746,9 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) xb_add_bitmap(error, &xbinfo, miattrs, NFS_MIATTR_BITMAP_LEN); xb_add_bitmap(error, &xbinfo, miflags, NFS_MIFLAG_BITMAP_LEN); xb_add_32(error, &xbinfo, origargslength); - if (!error) + if (!error) { error = xb_add_bytes(&xbinfo, nmp->nm_args, origargslength, 0); + } /* the opaque byte count for the current mount args values: */ curargsopaquelength_offset = xb_offset(&xbinfo); @@ -5319,90 +5764,103 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) xb_add_32(error, &xbinfo, 0); xb_add_bitmap(error, &xbinfo, mflags_mask, NFS_MFLAG_BITMAP_LEN); xb_add_bitmap(error, &xbinfo, mflags, NFS_MFLAG_BITMAP_LEN); - xb_add_32(error, &xbinfo, nmp->nm_vers); /* NFS_VERSION */ - if (nmp->nm_vers >= NFS_VER4) - xb_add_32(error, &xbinfo, nmp->nm_minor_vers); /* NFS_MINOR_VERSION */ - xb_add_32(error, &xbinfo, nmp->nm_rsize); /* READ_SIZE */ - xb_add_32(error, &xbinfo, nmp->nm_wsize); /* WRITE_SIZE */ - xb_add_32(error, &xbinfo, nmp->nm_readdirsize); /* READDIR_SIZE */ - xb_add_32(error, &xbinfo, nmp->nm_readahead); /* READAHEAD */ - xb_add_32(error, &xbinfo, nmp->nm_acregmin); /* ATTRCACHE_REG_MIN */ - xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MIN */ - xb_add_32(error, &xbinfo, nmp->nm_acregmax); /* ATTRCACHE_REG_MAX */ - xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MAX */ - xb_add_32(error, &xbinfo, nmp->nm_acdirmin); /* ATTRCACHE_DIR_MIN */ - xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MIN */ - xb_add_32(error, &xbinfo, nmp->nm_acdirmax); /* ATTRCACHE_DIR_MAX */ - xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MAX */ - xb_add_32(error, &xbinfo, nmp->nm_lockmode); /* LOCK_MODE */ + xb_add_32(error, &xbinfo, nmp->nm_vers); /* NFS_VERSION */ + if (nmp->nm_vers >= NFS_VER4) { + xb_add_32(error, &xbinfo, nmp->nm_minor_vers); /* NFS_MINOR_VERSION */ + } + xb_add_32(error, &xbinfo, nmp->nm_rsize); /* READ_SIZE */ + xb_add_32(error, &xbinfo, nmp->nm_wsize); /* WRITE_SIZE */ + xb_add_32(error, &xbinfo, nmp->nm_readdirsize); /* READDIR_SIZE */ + xb_add_32(error, &xbinfo, nmp->nm_readahead); /* READAHEAD */ + xb_add_32(error, &xbinfo, nmp->nm_acregmin); /* ATTRCACHE_REG_MIN */ + xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MIN */ + xb_add_32(error, &xbinfo, nmp->nm_acregmax); /* ATTRCACHE_REG_MAX */ + xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_REG_MAX */ + xb_add_32(error, &xbinfo, nmp->nm_acdirmin); /* ATTRCACHE_DIR_MIN */ + xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MIN */ + xb_add_32(error, &xbinfo, nmp->nm_acdirmax); /* ATTRCACHE_DIR_MAX */ + xb_add_32(error, &xbinfo, 0); /* ATTRCACHE_DIR_MAX */ + xb_add_32(error, &xbinfo, nmp->nm_lockmode); /* LOCK_MODE */ if (nmp->nm_sec.count) { - xb_add_32(error, &xbinfo, nmp->nm_sec.count); /* SECURITY */ + xb_add_32(error, &xbinfo, nmp->nm_sec.count); /* SECURITY */ nfsmerr_if(error); - for (i=0; i < nmp->nm_sec.count; i++) + for (i = 0; i < nmp->nm_sec.count; i++) { xb_add_32(error, &xbinfo, nmp->nm_sec.flavors[i]); + } } else if (nmp->nm_servsec.count) { - xb_add_32(error, &xbinfo, nmp->nm_servsec.count); /* SECURITY */ + xb_add_32(error, &xbinfo, nmp->nm_servsec.count); /* SECURITY */ nfsmerr_if(error); - for (i=0; i < nmp->nm_servsec.count; i++) + for (i = 0; i < nmp->nm_servsec.count; i++) { xb_add_32(error, &xbinfo, nmp->nm_servsec.flavors[i]); + } } else { - xb_add_32(error, &xbinfo, 1); /* SECURITY */ + xb_add_32(error, &xbinfo, 1); /* SECURITY */ xb_add_32(error, &xbinfo, nmp->nm_auth); } if (nmp->nm_etype.selected < nmp->nm_etype.count) { xb_add_32(error, &xbinfo, nmp->nm_etype.count); xb_add_32(error, &xbinfo, nmp->nm_etype.selected); - for (uint32_t j=0; j < nmp->nm_etype.count; j++) + for (uint32_t j = 0; j < nmp->nm_etype.count; j++) { xb_add_32(error, &xbinfo, nmp->nm_etype.etypes[j]); + } nfsmerr_if(error); } - xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */ + xb_add_32(error, &xbinfo, nmp->nm_numgrps); /* MAX_GROUP_LIST */ nfsmerr_if(error); snprintf(sotype, sizeof(sotype), "%s%s", (nmp->nm_sotype == SOCK_DGRAM) ? "udp" : "tcp", - nmp->nm_sofamily ? (nmp->nm_sofamily == AF_INET) ? "4" : "6" : ""); - xb_add_string(error, &xbinfo, sotype, strlen(sotype)); /* SOCKET_TYPE */ + nmp->nm_sofamily ? (nmp->nm_sofamily == AF_INET) ? "4" : "6" : ""); + xb_add_string(error, &xbinfo, sotype, strlen(sotype)); /* SOCKET_TYPE */ xb_add_32(error, &xbinfo, ntohs(((struct sockaddr_in*)nmp->nm_saddr)->sin_port)); /* NFS_PORT */ - if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport) - xb_add_32(error, &xbinfo, nmp->nm_mountport); /* MOUNT_PORT */ + if ((nmp->nm_vers < NFS_VER4) && nmp->nm_mountport) { + xb_add_32(error, &xbinfo, nmp->nm_mountport); /* MOUNT_PORT */ + } timeo = (nmp->nm_timeo * 10) / NFS_HZ; - xb_add_32(error, &xbinfo, timeo/10); /* REQUEST_TIMEOUT */ - xb_add_32(error, &xbinfo, (timeo%10)*100000000); /* REQUEST_TIMEOUT */ - if (NMFLAG(nmp, SOFT)) - xb_add_32(error, &xbinfo, nmp->nm_retry); /* SOFT_RETRY_COUNT */ + xb_add_32(error, &xbinfo, timeo / 10); /* REQUEST_TIMEOUT */ + xb_add_32(error, &xbinfo, (timeo % 10) * 100000000); /* REQUEST_TIMEOUT */ + if (NMFLAG(nmp, SOFT)) { + xb_add_32(error, &xbinfo, nmp->nm_retry); /* SOFT_RETRY_COUNT */ + } if (nmp->nm_deadtimeout) { - xb_add_32(error, &xbinfo, nmp->nm_deadtimeout); /* DEAD_TIMEOUT */ - xb_add_32(error, &xbinfo, 0); /* DEAD_TIMEOUT */ + xb_add_32(error, &xbinfo, nmp->nm_deadtimeout); /* DEAD_TIMEOUT */ + xb_add_32(error, &xbinfo, 0); /* DEAD_TIMEOUT */ } - if (nmp->nm_fh) + if (nmp->nm_fh) { xb_add_fh(error, &xbinfo, &nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len); /* FH */ - xb_add_32(error, &xbinfo, nmp->nm_locations.nl_numlocs); /* FS_LOCATIONS */ + } + xb_add_32(error, &xbinfo, nmp->nm_locations.nl_numlocs); /* FS_LOCATIONS */ for (loc = 0; !error && (loc < nmp->nm_locations.nl_numlocs); loc++) { xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servcount); for (serv = 0; !error && (serv < nmp->nm_locations.nl_locations[loc]->nl_servcount); serv++) { xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name, - strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name)); + strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_name)); xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount); - for (addr = 0; !error && (addr < nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) + for (addr = 0; !error && (addr < nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addrcount); addr++) { xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr], - strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr])); + strlen(nmp->nm_locations.nl_locations[loc]->nl_servers[serv]->ns_addresses[addr])); + } xb_add_32(error, &xbinfo, 0); /* empty server info */ } xb_add_32(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount); - for (comp = 0; !error && (comp < nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount); comp++) + for (comp = 0; !error && (comp < nmp->nm_locations.nl_locations[loc]->nl_path.np_compcount); comp++) { xb_add_string(error, &xbinfo, nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp], - strlen(nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp])); + strlen(nmp->nm_locations.nl_locations[loc]->nl_path.np_components[comp])); + } xb_add_32(error, &xbinfo, 0); /* empty fs location info */ } - xb_add_32(error, &xbinfo, vfs_flags(nmp->nm_mountp)); /* MNTFLAGS */ - if (origargsvers < NFS_ARGSVERSION_XDR) + xb_add_32(error, &xbinfo, vfs_flags(nmp->nm_mountp)); /* MNTFLAGS */ + if (origargsvers < NFS_ARGSVERSION_XDR) { xb_add_string(error, &xbinfo, vfs_statfs(nmp->nm_mountp)->f_mntfromname, - strlen(vfs_statfs(nmp->nm_mountp)->f_mntfromname)); /* MNTFROM */ - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) + strlen(vfs_statfs(nmp->nm_mountp)->f_mntfromname)); /* MNTFROM */ + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_REALM)) { xb_add_string(error, &xbinfo, nmp->nm_realm, strlen(nmp->nm_realm)); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_PRINCIPAL)) { xb_add_string(error, &xbinfo, nmp->nm_principal, strlen(nmp->nm_principal)); - if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) + } + if (NFS_BITMAP_ISSET(mattrs, NFS_MATTR_SVCPRINCIPAL)) { xb_add_string(error, &xbinfo, nmp->nm_sprinc, strlen(nmp->nm_sprinc)); + } curargs_end_offset = xb_offset(&xbinfo); @@ -5418,19 +5876,19 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) end_offset = xb_offset(&xbinfo); if (!error) { error = xb_seek(&xbinfo, attrslength_offset); - xb_add_32(error, &xbinfo, curargs_end_offset - attrslength_offset - XDRWORD/*don't include length field*/); + xb_add_32(error, &xbinfo, curargs_end_offset - attrslength_offset - XDRWORD /*don't include length field*/); } if (!error) { error = xb_seek(&xbinfo, curargslength_offset); - xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD/*version*/); + xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD /*version*/); } if (!error) { error = xb_seek(&xbinfo, curargsopaquelength_offset); - xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD/*version*/); + xb_add_32(error, &xbinfo, curargs_end_offset - curargslength_offset + XDRWORD /*version*/); } if (!error) { error = xb_seek(&xbinfo, infolength_offset); - xb_add_32(error, &xbinfo, end_offset - infolength_offset + XDRWORD/*version*/); + xb_add_32(error, &xbinfo, end_offset - infolength_offset + XDRWORD /*version*/); } nfsmerr_if(error); @@ -5441,7 +5899,7 @@ nfs_mountinfo_assemble(struct nfsmount *nmp, struct xdrbuf *xb) xbinfo.xb_flags &= ~XB_CLEANUP; nfsmerr: xb_cleanup(&xbinfo); - return (error); + return error; } /* @@ -5449,12 +5907,12 @@ nfsmerr: */ int nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, - user_addr_t newp, size_t newlen, vfs_context_t ctx) + user_addr_t newp, size_t newlen, vfs_context_t ctx) { int error = 0, val; #ifndef CONFIG_EMBEDDED int softnobrowse; -#endif +#endif struct sysctl_req *req = NULL; union union_vfsidctl vc; mount_t mp; @@ -5484,9 +5942,9 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, /* * All names at this level are terminal. */ - if (namelen > 1) - return (ENOTDIR); /* overloaded */ - + if (namelen > 1) { + return ENOTDIR; /* overloaded */ + } is_64_bit = vfs_context_is64bit(ctx); /* common code for "new style" VFS_CTL sysctl, get the mount. */ @@ -5496,20 +5954,23 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, case VFS_CTL_NSTATUS: #ifndef CONFIG_EMBEDDED case VFS_CTL_QUERY: -#endif +#endif req = CAST_DOWN(struct sysctl_req *, oldp); if (req == NULL) { return EFAULT; } error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32)); - if (error) - return (error); + if (error) { + return error; + } mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */ - if (mp == NULL) - return (ENOENT); + if (mp == NULL) { + return ENOENT; + } nmp = VFSTONFS(mp); - if (!nmp) - return (ENOENT); + if (!nmp) { + return ENOENT; + } bzero(&vq, sizeof(vq)); req->newidx = 0; if (is_64_bit) { @@ -5526,51 +5987,61 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, #endif } - switch(name[0]) { + switch (name[0]) { case NFS_NFSSTATS: if (!oldp) { *oldlenp = sizeof nfsstats; - return (0); + return 0; } if (*oldlenp < sizeof nfsstats) { *oldlenp = sizeof nfsstats; - return (ENOMEM); + return ENOMEM; } error = copyout(&nfsstats, oldp, sizeof nfsstats); - if (error) - return (error); + if (error) { + return error; + } - if (newp && newlen != sizeof nfsstats) - return (EINVAL); + if (newp && newlen != sizeof nfsstats) { + return EINVAL; + } - if (newp) + if (newp) { return copyin(newp, &nfsstats, sizeof nfsstats); - return (0); + } + return 0; case NFS_MOUNTINFO: /* read in the fsid */ - if (*oldlenp < sizeof(fsid)) - return (EINVAL); - if ((error = copyin(oldp, &fsid, sizeof(fsid)))) - return (error); + if (*oldlenp < sizeof(fsid)) { + return EINVAL; + } + if ((error = copyin(oldp, &fsid, sizeof(fsid)))) { + return error; + } /* swizzle it back to host order */ fsid.val[0] = ntohl(fsid.val[0]); fsid.val[1] = ntohl(fsid.val[1]); /* find mount and make sure it's NFS */ - if (((mp = vfs_getvfs(&fsid))) == NULL) - return (ENOENT); - if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs")) - return (EINVAL); - if (((nmp = VFSTONFS(mp))) == NULL) - return (ENOENT); + if (((mp = vfs_getvfs(&fsid))) == NULL) { + return ENOENT; + } + if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs")) { + return EINVAL; + } + if (((nmp = VFSTONFS(mp))) == NULL) { + return ENOENT; + } xb_init(&xb, 0); - if ((error = nfs_mountinfo_assemble(nmp, &xb))) - return (error); - if (*oldlenp < xb.xb_u.xb_buffer.xbb_len) + if ((error = nfs_mountinfo_assemble(nmp, &xb))) { + return error; + } + if (*oldlenp < xb.xb_u.xb_buffer.xbb_len) { error = ENOMEM; - else + } else { error = copyout(xb_buffer_base(&xb), oldp, xb.xb_u.xb_buffer.xbb_len); + } *oldlenp = xb.xb_u.xb_buffer.xbb_len; xb_cleanup(&xb); break; @@ -5581,18 +6052,19 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, if (!nfsrv_is_initialized()) { stat_desc.rec_count = 0; - if (oldp && (*oldlenp >= sizeof(struct nfs_export_stat_desc))) + if (oldp && (*oldlenp >= sizeof(struct nfs_export_stat_desc))) { error = copyout(&stat_desc, oldp, sizeof(struct nfs_export_stat_desc)); + } *oldlenp = sizeof(struct nfs_export_stat_desc); - return (error); + return error; } /* Count the number of exported directories */ lck_rw_lock_shared(&nfsrv_export_rwlock); numExports = 0; LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) - LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) - numExports += 1; + LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) + numExports += 1; /* update stat descriptor's export record count */ stat_desc.rec_count = numExports; @@ -5605,7 +6077,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, lck_rw_done(&nfsrv_export_rwlock); /* indicate required buffer len */ *oldlenp = totlen; - return (0); + return 0; } /* We require the caller's buffer to be at least large enough to hold the descriptor */ @@ -5613,7 +6085,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, lck_rw_done(&nfsrv_export_rwlock); /* indicate required buffer len */ *oldlenp = totlen; - return (ENOMEM); + return ENOMEM; } /* indicate required buffer len */ @@ -5623,7 +6095,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, if (!numExports) { lck_rw_done(&nfsrv_export_rwlock); error = copyout(&stat_desc, oldp, sizeof(struct nfs_export_stat_desc)); - return (error); + return error; } /* calculate how many actual export stat records fit into caller's buffer */ @@ -5634,12 +6106,13 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, lck_rw_done(&nfsrv_export_rwlock); stat_desc.rec_count = 0; error = copyout(&stat_desc, oldp, sizeof(struct nfs_export_stat_desc)); - return (error); + return error; } /* adjust to actual number of records to copyout to caller's buffer */ - if (numRecs > numExports) + if (numRecs > numExports) { numRecs = numExports; + } /* set actual number of records we are returning */ stat_desc.rec_count = numRecs; @@ -5649,7 +6122,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, error = copyout(&stat_desc, oldp + pos, sizeof(struct nfs_export_stat_desc)); if (error) { lck_rw_done(&nfsrv_export_rwlock); - return (error); + return error; } pos += sizeof(struct nfs_export_stat_desc); @@ -5657,27 +6130,27 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, count = 0; LIST_FOREACH(nxfs, &nfsrv_exports, nxfs_next) { LIST_FOREACH(nx, &nxfs->nxfs_exports, nx_next) { - - if (count >= numRecs) + if (count >= numRecs) { break; + } /* build exported filesystem path */ memset(statrec.path, 0, sizeof(statrec.path)); snprintf(statrec.path, sizeof(statrec.path), "%s%s%s", - nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""), - nx->nx_path); + nxfs->nxfs_path, ((nxfs->nxfs_path[1] && nx->nx_path[0]) ? "/" : ""), + nx->nx_path); /* build the 64-bit export stat counters */ statrec.ops = ((uint64_t)nx->nx_stats.ops.hi << 32) | - nx->nx_stats.ops.lo; + nx->nx_stats.ops.lo; statrec.bytes_read = ((uint64_t)nx->nx_stats.bytes_read.hi << 32) | - nx->nx_stats.bytes_read.lo; + nx->nx_stats.bytes_read.lo; statrec.bytes_written = ((uint64_t)nx->nx_stats.bytes_written.hi << 32) | - nx->nx_stats.bytes_written.lo; + nx->nx_stats.bytes_written.lo; error = copyout(&statrec, oldp + pos, sizeof(statrec)); if (error) { lck_rw_done(&nfsrv_export_rwlock); - return (error); + return error; } /* advance buffer position */ pos += sizeof(statrec); @@ -5696,17 +6169,19 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, bytes_avail = *oldlenp; recs_copied = 0; - if (!nfsrv_is_initialized()) /* NFS server not initialized, so no stats */ + if (!nfsrv_is_initialized()) { /* NFS server not initialized, so no stats */ goto ustat_skip; + } /* reclaim old expired user nodes */ nfsrv_active_user_list_reclaim(); /* reserve space for the buffer descriptor */ - if (bytes_avail >= sizeof(struct nfs_user_stat_desc)) + if (bytes_avail >= sizeof(struct nfs_user_stat_desc)) { bytes_avail -= sizeof(struct nfs_user_stat_desc); - else + } else { bytes_avail = 0; + } /* put buffer position past the buffer descriptor */ pos = sizeof(struct nfs_user_stat_desc); @@ -5731,8 +6206,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, pos += sizeof(struct nfs_user_stat_path_rec); bytes_avail -= sizeof(struct nfs_user_stat_path_rec); recs_copied++; - } - else { + } else { /* Caller's buffer is exhausted */ bytes_avail = 0; } @@ -5768,8 +6242,7 @@ nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp, pos += sizeof(struct nfs_user_stat_user_rec); bytes_avail -= sizeof(struct nfs_user_stat_user_rec); recs_copied++; - } - else { + } else { /* Caller's buffer is exhausted */ bytes_avail = 0; } @@ -5790,10 +6263,11 @@ ustat_skip: if (!error) { /* check if there was enough room for the buffer descriptor */ - if (*oldlenp >= sizeof(struct nfs_user_stat_desc)) + if (*oldlenp >= sizeof(struct nfs_user_stat_desc)) { error = copyout(&ustat_desc, oldp, sizeof(struct nfs_user_stat_desc)); - else + } else { error = ENOMEM; + } /* always indicate required buffer size */ *oldlenp = bytes_total; @@ -5802,12 +6276,12 @@ ustat_skip: case NFS_USERCOUNT: if (!oldp) { *oldlenp = sizeof(nfsrv_user_stat_node_count); - return (0); + return 0; } if (*oldlenp < sizeof(nfsrv_user_stat_node_count)) { *oldlenp = sizeof(nfsrv_user_stat_node_count); - return (ENOMEM); + return ENOMEM; } if (nfsrv_is_initialized()) { @@ -5819,18 +6293,20 @@ ustat_skip: break; #endif /* NFSSERVER */ case VFS_CTL_NOLOCKS: - if (req->oldptr != USER_ADDR_NULL) { + if (req->oldptr != USER_ADDR_NULL) { lck_mtx_lock(&nmp->nm_lock); val = (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED) ? 1 : 0; lck_mtx_unlock(&nmp->nm_lock); - error = SYSCTL_OUT(req, &val, sizeof(val)); - if (error) - return (error); - } - if (req->newptr != USER_ADDR_NULL) { - error = SYSCTL_IN(req, &val, sizeof(val)); - if (error) - return (error); + error = SYSCTL_OUT(req, &val, sizeof(val)); + if (error) { + return error; + } + } + if (req->newptr != USER_ADDR_NULL) { + error = SYSCTL_IN(req, &val, sizeof(val)); + if (error) { + return error; + } lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_lockmode == NFS_LOCK_MODE_LOCAL) { /* can't toggle locks when using local locks */ @@ -5839,56 +6315,65 @@ ustat_skip: /* can't disable locks for NFSv4 */ error = EINVAL; } else if (val) { - if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) + if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) { nfs_lockd_mount_unregister(nmp); + } nmp->nm_lockmode = NFS_LOCK_MODE_DISABLED; nmp->nm_state &= ~NFSSTA_LOCKTIMEO; } else { - if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) + if ((nmp->nm_vers <= NFS_VER3) && (nmp->nm_lockmode == NFS_LOCK_MODE_DISABLED)) { nfs_lockd_mount_register(nmp); + } nmp->nm_lockmode = NFS_LOCK_MODE_ENABLED; } lck_mtx_unlock(&nmp->nm_lock); - } + } break; #ifndef CONFIG_EMBEDDED case VFS_CTL_QUERY: lck_mtx_lock(&nmp->nm_lock); /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */ softnobrowse = (NMFLAG(nmp, SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE)); - if (!softnobrowse && (nmp->nm_state & NFSSTA_TIMEO)) + if (!softnobrowse && (nmp->nm_state & NFSSTA_TIMEO)) { vq.vq_flags |= VQ_NOTRESP; - if (!softnobrowse && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO) && !NMFLAG(nmp, MUTEJUKEBOX)) + } + if (!softnobrowse && (nmp->nm_state & NFSSTA_JUKEBOXTIMEO) && !NMFLAG(nmp, MUTEJUKEBOX)) { vq.vq_flags |= VQ_NOTRESP; + } if (!softnobrowse && (nmp->nm_state & NFSSTA_LOCKTIMEO) && - (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) + (nmp->nm_lockmode == NFS_LOCK_MODE_ENABLED)) { vq.vq_flags |= VQ_NOTRESP; - if (nmp->nm_state & NFSSTA_DEAD) + } + if (nmp->nm_state & NFSSTA_DEAD) { vq.vq_flags |= VQ_DEAD; + } lck_mtx_unlock(&nmp->nm_lock); error = SYSCTL_OUT(req, &vq, sizeof(vq)); break; #endif - case VFS_CTL_TIMEO: - if (req->oldptr != USER_ADDR_NULL) { + case VFS_CTL_TIMEO: + if (req->oldptr != USER_ADDR_NULL) { lck_mtx_lock(&nmp->nm_lock); val = nmp->nm_tprintf_initial_delay; lck_mtx_unlock(&nmp->nm_lock); - error = SYSCTL_OUT(req, &val, sizeof(val)); - if (error) - return (error); - } - if (req->newptr != USER_ADDR_NULL) { - error = SYSCTL_IN(req, &val, sizeof(val)); - if (error) - return (error); + error = SYSCTL_OUT(req, &val, sizeof(val)); + if (error) { + return error; + } + } + if (req->newptr != USER_ADDR_NULL) { + error = SYSCTL_IN(req, &val, sizeof(val)); + if (error) { + return error; + } lck_mtx_lock(&nmp->nm_lock); - if (val < 0) - nmp->nm_tprintf_initial_delay = 0; - else + if (val < 0) { + nmp->nm_tprintf_initial_delay = 0; + } else { nmp->nm_tprintf_initial_delay = val; + } lck_mtx_unlock(&nmp->nm_lock); - } + } break; case VFS_CTL_NSTATUS: /* @@ -5908,39 +6393,42 @@ ustat_skip: */ numThreads = 0; TAILQ_FOREACH(rq, &nfs_reqq, r_chain) { - if (rq->r_nmp == nmp) + if (rq->r_nmp == nmp) { numThreads++; + } } /* Calculate total size of result buffer */ totlen = sizeof(struct netfs_status) + (numThreads * sizeof(uint64_t)); - if (req->oldptr == USER_ADDR_NULL) { // Caller is querying buffer size + if (req->oldptr == USER_ADDR_NULL) { // Caller is querying buffer size lck_mtx_unlock(&nmp->nm_lock); lck_mtx_unlock(nfs_request_mutex); return SYSCTL_OUT(req, NULL, totlen); } - if (req->oldlen < totlen) { // Check if caller's buffer is big enough + if (req->oldlen < totlen) { // Check if caller's buffer is big enough lck_mtx_unlock(&nmp->nm_lock); lck_mtx_unlock(nfs_request_mutex); - return (ERANGE); + return ERANGE; } - MALLOC(nsp, struct netfs_status *, totlen, M_TEMP, M_WAITOK|M_ZERO); + MALLOC(nsp, struct netfs_status *, totlen, M_TEMP, M_WAITOK | M_ZERO); if (nsp == NULL) { lck_mtx_unlock(&nmp->nm_lock); lck_mtx_unlock(nfs_request_mutex); - return (ENOMEM); + return ENOMEM; } timeoutmask = NFSSTA_TIMEO | NFSSTA_LOCKTIMEO | NFSSTA_JUKEBOXTIMEO; - if (nmp->nm_state & timeoutmask) + if (nmp->nm_state & timeoutmask) { nsp->ns_status |= VQ_NOTRESP; - if (nmp->nm_state & NFSSTA_DEAD) + } + if (nmp->nm_state & NFSSTA_DEAD) { nsp->ns_status |= VQ_DEAD; + } (void) nfs_mountopts(nmp, nsp->ns_mountopts, sizeof(nsp->ns_mountopts)); nsp->ns_threadcount = numThreads; - + /* * Get the thread ids of threads waiting for a reply * and find the longest wait time. @@ -5954,13 +6442,15 @@ ustat_skip: sendtime = now.tv_sec; TAILQ_FOREACH(rq, &nfs_reqq, r_chain) { if (rq->r_nmp == nmp) { - if (rq->r_start < sendtime) + if (rq->r_start < sendtime) { sendtime = rq->r_start; - // A thread_id of zero is used to represent an async I/O request. + } + // A thread_id of zero is used to represent an async I/O request. nsp->ns_threadids[count] = - rq->r_thread ? thread_tid(rq->r_thread) : 0; - if (++count >= numThreads) + rq->r_thread ? thread_tid(rq->r_thread) : 0; + if (++count >= numThreads) { break; + } } } nsp->ns_waittime = now.tv_sec - sendtime; @@ -5969,11 +6459,11 @@ ustat_skip: lck_mtx_unlock(&nmp->nm_lock); lck_mtx_unlock(nfs_request_mutex); - error = SYSCTL_OUT(req, nsp, totlen); + error = SYSCTL_OUT(req, nsp, totlen); FREE(nsp, M_TEMP); break; default: - return (ENOTSUP); + return ENOTSUP; } - return (error); + return error; } diff --git a/bsd/nfs/nfs_vnops.c b/bsd/nfs/nfs_vnops.c index 5753b6ea8..0991a5373 100644 --- a/bsd/nfs/nfs_vnops.c +++ b/bsd/nfs/nfs_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -125,136 +125,136 @@ /* * NFS vnode ops */ -int nfs_vnop_lookup(struct vnop_lookup_args *); -int nfsspec_vnop_read(struct vnop_read_args *); -int nfsspec_vnop_write(struct vnop_write_args *); -int nfsspec_vnop_close(struct vnop_close_args *); +int nfs_vnop_lookup(struct vnop_lookup_args *); +int nfsspec_vnop_read(struct vnop_read_args *); +int nfsspec_vnop_write(struct vnop_write_args *); +int nfsspec_vnop_close(struct vnop_close_args *); #if FIFO -int nfsfifo_vnop_read(struct vnop_read_args *); -int nfsfifo_vnop_write(struct vnop_write_args *); -int nfsfifo_vnop_close(struct vnop_close_args *); +int nfsfifo_vnop_read(struct vnop_read_args *); +int nfsfifo_vnop_write(struct vnop_write_args *); +int nfsfifo_vnop_close(struct vnop_close_args *); #endif -int nfs_vnop_ioctl(struct vnop_ioctl_args *); -int nfs_vnop_select(struct vnop_select_args *); -int nfs_vnop_setattr(struct vnop_setattr_args *); -int nfs_vnop_fsync(struct vnop_fsync_args *); -int nfs_vnop_rename(struct vnop_rename_args *); -int nfs_vnop_readdir(struct vnop_readdir_args *); -int nfs_vnop_readlink(struct vnop_readlink_args *); -int nfs_vnop_pathconf(struct vnop_pathconf_args *); -int nfs_vnop_pagein(struct vnop_pagein_args *); -int nfs_vnop_pageout(struct vnop_pageout_args *); -int nfs_vnop_blktooff(struct vnop_blktooff_args *); -int nfs_vnop_offtoblk(struct vnop_offtoblk_args *); -int nfs_vnop_blockmap(struct vnop_blockmap_args *); -int nfs_vnop_monitor(struct vnop_monitor_args *); - -int nfs3_vnop_create(struct vnop_create_args *); -int nfs3_vnop_mknod(struct vnop_mknod_args *); -int nfs3_vnop_getattr(struct vnop_getattr_args *); -int nfs3_vnop_link(struct vnop_link_args *); -int nfs3_vnop_mkdir(struct vnop_mkdir_args *); -int nfs3_vnop_rmdir(struct vnop_rmdir_args *); -int nfs3_vnop_symlink(struct vnop_symlink_args *); +int nfs_vnop_ioctl(struct vnop_ioctl_args *); +int nfs_vnop_select(struct vnop_select_args *); +int nfs_vnop_setattr(struct vnop_setattr_args *); +int nfs_vnop_fsync(struct vnop_fsync_args *); +int nfs_vnop_rename(struct vnop_rename_args *); +int nfs_vnop_readdir(struct vnop_readdir_args *); +int nfs_vnop_readlink(struct vnop_readlink_args *); +int nfs_vnop_pathconf(struct vnop_pathconf_args *); +int nfs_vnop_pagein(struct vnop_pagein_args *); +int nfs_vnop_pageout(struct vnop_pageout_args *); +int nfs_vnop_blktooff(struct vnop_blktooff_args *); +int nfs_vnop_offtoblk(struct vnop_offtoblk_args *); +int nfs_vnop_blockmap(struct vnop_blockmap_args *); +int nfs_vnop_monitor(struct vnop_monitor_args *); + +int nfs3_vnop_create(struct vnop_create_args *); +int nfs3_vnop_mknod(struct vnop_mknod_args *); +int nfs3_vnop_getattr(struct vnop_getattr_args *); +int nfs3_vnop_link(struct vnop_link_args *); +int nfs3_vnop_mkdir(struct vnop_mkdir_args *); +int nfs3_vnop_rmdir(struct vnop_rmdir_args *); +int nfs3_vnop_symlink(struct vnop_symlink_args *); vnop_t **nfsv2_vnodeop_p; static struct vnodeopv_entry_desc nfsv2_vnodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)nfs3_vnop_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)nfs3_vnop_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */ - { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */ - { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */ - { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)nfs3_vnop_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)nfs3_vnop_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)nfs3_vnop_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)nfs3_vnop_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)nfs3_vnop_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)nfs3_vnop_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */ + { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */ + { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */ + { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)nfs3_vnop_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)nfs3_vnop_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)nfs3_vnop_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)nfs3_vnop_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; struct vnodeopv_desc nfsv2_vnodeop_opv_desc = - { &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; +{ &nfsv2_vnodeop_p, nfsv2_vnodeop_entries }; vnop_t **nfsv4_vnodeop_p; static struct vnodeopv_entry_desc nfsv4_vnodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)nfs4_vnop_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)nfs4_vnop_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */ - { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */ - { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */ - { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)nfs4_vnop_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)nfs4_vnop_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)nfs4_vnop_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)nfs4_vnop_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ - { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ + { &vnop_lookup_desc, (vnop_t *)nfs_vnop_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)nfs4_vnop_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)nfs4_vnop_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)nfs_vnop_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfs_vnop_close }, /* close */ + { &vnop_access_desc, (vnop_t *)nfs_vnop_access }, /* access */ + { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfs_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfs_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)nfs_vnop_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)nfs_vnop_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)nfs_vnop_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)nfs_vnop_mmap }, /* mmap */ + { &vnop_mnomap_desc, (vnop_t *)nfs_vnop_mnomap }, /* mnomap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)nfs_vnop_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)nfs4_vnop_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)nfs_vnop_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)nfs4_vnop_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)nfs4_vnop_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)nfs4_vnop_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)nfs_vnop_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)nfs_vnop_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)err_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)nfs_vnop_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)nfs_vnop_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)err_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_copyfile_desc, (vnop_t *)err_copyfile }, /* Copyfile */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ + { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */ { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */ #if NAMEDSTREAMS - { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ - { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ + { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ + { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */ #endif - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; struct vnodeopv_desc nfsv4_vnodeop_opv_desc = - { &nfsv4_vnodeop_p, nfsv4_vnodeop_entries }; +{ &nfsv4_vnodeop_p, nfsv4_vnodeop_entries }; /* * Special device vnode ops @@ -262,193 +262,193 @@ struct vnodeopv_desc nfsv4_vnodeop_opv_desc = vnop_t **spec_nfsv2nodeop_p; static struct vnodeopv_entry_desc spec_nfsv2nodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)spec_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)spec_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */ - { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)spec_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)spec_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)spec_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)spec_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */ + { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)spec_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)spec_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; struct vnodeopv_desc spec_nfsv2nodeop_opv_desc = - { &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; +{ &spec_nfsv2nodeop_p, spec_nfsv2nodeop_entries }; vnop_t **spec_nfsv4nodeop_p; static struct vnodeopv_entry_desc spec_nfsv4nodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)spec_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)spec_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */ - { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)spec_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)spec_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ - { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ + { &vnop_lookup_desc, (vnop_t *)spec_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)spec_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)spec_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)spec_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfsspec_vnop_close }, /* close */ + { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfsspec_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfsspec_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)spec_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)spec_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)spec_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)spec_mmap }, /* mmap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)spec_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)spec_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)spec_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)spec_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)spec_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)spec_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)spec_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)spec_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)spec_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)spec_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)spec_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ + { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */ { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */ #if NAMEDSTREAMS - { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ - { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ + { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ + { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */ #endif - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; struct vnodeopv_desc spec_nfsv4nodeop_opv_desc = - { &spec_nfsv4nodeop_p, spec_nfsv4nodeop_entries }; +{ &spec_nfsv4nodeop_p, spec_nfsv4nodeop_entries }; #if FIFO vnop_t **fifo_nfsv2nodeop_p; static struct vnodeopv_entry_desc fifo_nfsv2nodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */ - { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */ + { &vnop_getattr_desc, (vnop_t *)nfs3_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; struct vnodeopv_desc fifo_nfsv2nodeop_opv_desc = - { &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; +{ &fifo_nfsv2nodeop_p, fifo_nfsv2nodeop_entries }; vnop_t **fifo_nfsv4nodeop_p; static struct vnodeopv_entry_desc fifo_nfsv4nodeop_entries[] = { { &vnop_default_desc, (vnop_t *)vn_default_error }, - { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */ - { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */ - { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */ - { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */ - { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */ - { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ - { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ - { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */ - { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */ - { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */ - { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */ - { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */ - { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */ - { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ - { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */ - { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */ - { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */ - { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */ - { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */ - { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */ - { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */ - { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */ - { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ - { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ - { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */ - { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */ - { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */ - { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ - { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ - { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ - { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ - { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ - { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ - { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ - { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ + { &vnop_lookup_desc, (vnop_t *)fifo_lookup }, /* lookup */ + { &vnop_create_desc, (vnop_t *)fifo_create }, /* create */ + { &vnop_mknod_desc, (vnop_t *)fifo_mknod }, /* mknod */ + { &vnop_open_desc, (vnop_t *)fifo_open }, /* open */ + { &vnop_close_desc, (vnop_t *)nfsfifo_vnop_close }, /* close */ + { &vnop_getattr_desc, (vnop_t *)nfs4_vnop_getattr }, /* getattr */ + { &vnop_setattr_desc, (vnop_t *)nfs_vnop_setattr }, /* setattr */ + { &vnop_read_desc, (vnop_t *)nfsfifo_vnop_read }, /* read */ + { &vnop_write_desc, (vnop_t *)nfsfifo_vnop_write }, /* write */ + { &vnop_ioctl_desc, (vnop_t *)fifo_ioctl }, /* ioctl */ + { &vnop_select_desc, (vnop_t *)fifo_select }, /* select */ + { &vnop_revoke_desc, (vnop_t *)fifo_revoke }, /* revoke */ + { &vnop_mmap_desc, (vnop_t *)fifo_mmap }, /* mmap */ + { &vnop_fsync_desc, (vnop_t *)nfs_vnop_fsync }, /* fsync */ + { &vnop_remove_desc, (vnop_t *)fifo_remove }, /* remove */ + { &vnop_link_desc, (vnop_t *)fifo_link }, /* link */ + { &vnop_rename_desc, (vnop_t *)fifo_rename }, /* rename */ + { &vnop_mkdir_desc, (vnop_t *)fifo_mkdir }, /* mkdir */ + { &vnop_rmdir_desc, (vnop_t *)fifo_rmdir }, /* rmdir */ + { &vnop_symlink_desc, (vnop_t *)fifo_symlink }, /* symlink */ + { &vnop_readdir_desc, (vnop_t *)fifo_readdir }, /* readdir */ + { &vnop_readlink_desc, (vnop_t *)fifo_readlink }, /* readlink */ + { &vnop_inactive_desc, (vnop_t *)nfs_vnop_inactive }, /* inactive */ + { &vnop_reclaim_desc, (vnop_t *)nfs_vnop_reclaim }, /* reclaim */ + { &vnop_strategy_desc, (vnop_t *)fifo_strategy }, /* strategy */ + { &vnop_pathconf_desc, (vnop_t *)fifo_pathconf }, /* pathconf */ + { &vnop_advlock_desc, (vnop_t *)fifo_advlock }, /* advlock */ + { &vnop_bwrite_desc, (vnop_t *)vn_bwrite }, /* bwrite */ + { &vnop_pagein_desc, (vnop_t *)nfs_vnop_pagein }, /* Pagein */ + { &vnop_pageout_desc, (vnop_t *)nfs_vnop_pageout }, /* Pageout */ + { &vnop_blktooff_desc, (vnop_t *)nfs_vnop_blktooff }, /* blktooff */ + { &vnop_offtoblk_desc, (vnop_t *)nfs_vnop_offtoblk }, /* offtoblk */ + { &vnop_blockmap_desc, (vnop_t *)nfs_vnop_blockmap }, /* blockmap */ + { &vnop_getxattr_desc, (vnop_t *)nfs4_vnop_getxattr }, /* getxattr */ + { &vnop_setxattr_desc, (vnop_t *)nfs4_vnop_setxattr }, /* setxattr */ { &vnop_removexattr_desc, (vnop_t *)nfs4_vnop_removexattr },/* removexattr */ { &vnop_listxattr_desc, (vnop_t *)nfs4_vnop_listxattr },/* listxattr */ #if NAMEDSTREAMS - { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ - { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ + { &vnop_getnamedstream_desc, (vnop_t *)nfs4_vnop_getnamedstream }, /* getnamedstream */ + { &vnop_makenamedstream_desc, (vnop_t *)nfs4_vnop_makenamedstream }, /* makenamedstream */ { &vnop_removenamedstream_desc, (vnop_t *)nfs4_vnop_removenamedstream },/* removenamedstream */ #endif - { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ + { &vnop_monitor_desc, (vnop_t *)nfs_vnop_monitor }, /* monitor */ { NULL, NULL } }; struct vnodeopv_desc fifo_nfsv4nodeop_opv_desc = - { &fifo_nfsv4nodeop_p, fifo_nfsv4nodeop_entries }; +{ &fifo_nfsv4nodeop_p, fifo_nfsv4nodeop_entries }; #endif /* FIFO */ -int nfs_sillyrename(nfsnode_t,nfsnode_t,struct componentname *,vfs_context_t); -int nfs_getattr_internal(nfsnode_t, struct nfs_vattr *, vfs_context_t, int); -int nfs_refresh_fh(nfsnode_t, vfs_context_t); +int nfs_sillyrename(nfsnode_t, nfsnode_t, struct componentname *, vfs_context_t); +int nfs_getattr_internal(nfsnode_t, struct nfs_vattr *, vfs_context_t, int); +int nfs_refresh_fh(nfsnode_t, vfs_context_t); /* * Find the slot in the access cache for this UID. @@ -460,16 +460,19 @@ nfs_node_access_slot(nfsnode_t np, uid_t uid, int add) { int slot; - for (slot=0; slot < NFS_ACCESS_CACHE_SIZE; slot++) - if (np->n_accessuid[slot] == uid) + for (slot = 0; slot < NFS_ACCESS_CACHE_SIZE; slot++) { + if (np->n_accessuid[slot] == uid) { break; + } + } if (slot == NFS_ACCESS_CACHE_SIZE) { - if (!add) - return (-1); + if (!add) { + return -1; + } slot = np->n_access[NFS_ACCESS_CACHE_SIZE]; np->n_access[NFS_ACCESS_CACHE_SIZE] = (slot + 1) % NFS_ACCESS_CACHE_SIZE; } - return (slot); + return slot; } int @@ -492,13 +495,15 @@ nfs3_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC_ACCESS, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - NULL, rpcflags, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + vfs_context_thread(ctx), vfs_context_ucred(ctx), + NULL, rpcflags, &nmrep, &xid, &status); + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_postop_attr_update(error, &nmrep, np, &xid); - if (!error) + if (!error) { error = status; + } nfsm_chain_get_32(error, &nmrep, access_result); nfsmout_if(error); @@ -528,19 +533,22 @@ nfs3_access_rpc(nfsnode_t np, u_int32_t *access, int rpcflags, vfs_context_t ctx * really isn't deletable. */ if ((*access & NFS_ACCESS_DELETE) && - !(np->n_access[slot] & NFS_ACCESS_DELETE)) + !(np->n_access[slot] & NFS_ACCESS_DELETE)) { np->n_access[slot] |= NFS_ACCESS_DELETE; + } /* ".zfs" subdirectories may erroneously give a denied answer for add/remove */ - if (nfs_access_dotzfs && (np->n_flag & NISDOTZFSCHILD)) - np->n_access[slot] |= (NFS_ACCESS_MODIFY|NFS_ACCESS_EXTEND|NFS_ACCESS_DELETE); + if (nfs_access_dotzfs && (np->n_flag & NISDOTZFSCHILD)) { + np->n_access[slot] |= (NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND | NFS_ACCESS_DELETE); + } /* pass back the access returned with this request */ *access = np->n_access[slot]; nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -552,11 +560,11 @@ nfsmout: int nfs_vnop_access( struct vnop_access_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_action; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_action; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -569,15 +577,17 @@ nfs_vnop_access( uid_t uid; nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; if (nfsvers == NFS_VER2) { if ((ap->a_action & KAUTH_VNODE_WRITE_RIGHTS) && - vfs_isrdonly(vnode_mount(vp))) - return (EROFS); - return (0); + vfs_isrdonly(vnode_mount(vp))) { + return EROFS; + } + return 0; } /* @@ -597,61 +607,74 @@ nfs_vnop_access( /* directory */ if (ap->a_action & (KAUTH_VNODE_LIST_DIRECTORY | - KAUTH_VNODE_READ_EXTATTRIBUTES)) + KAUTH_VNODE_READ_EXTATTRIBUTES)) { access |= NFS_ACCESS_READ; - if (ap->a_action & KAUTH_VNODE_SEARCH) + } + if (ap->a_action & KAUTH_VNODE_SEARCH) { access |= NFS_ACCESS_LOOKUP; + } if (ap->a_action & (KAUTH_VNODE_ADD_FILE | - KAUTH_VNODE_ADD_SUBDIRECTORY)) + KAUTH_VNODE_ADD_SUBDIRECTORY)) { access |= NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND; - if (ap->a_action & KAUTH_VNODE_DELETE_CHILD) + } + if (ap->a_action & KAUTH_VNODE_DELETE_CHILD) { access |= NFS_ACCESS_MODIFY; + } } else { /* file */ if (ap->a_action & (KAUTH_VNODE_READ_DATA | - KAUTH_VNODE_READ_EXTATTRIBUTES)) + KAUTH_VNODE_READ_EXTATTRIBUTES)) { access |= NFS_ACCESS_READ; - if (ap->a_action & KAUTH_VNODE_WRITE_DATA) + } + if (ap->a_action & KAUTH_VNODE_WRITE_DATA) { access |= NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND; - if (ap->a_action & KAUTH_VNODE_APPEND_DATA) + } + if (ap->a_action & KAUTH_VNODE_APPEND_DATA) { access |= NFS_ACCESS_EXTEND; - if (ap->a_action & KAUTH_VNODE_EXECUTE) + } + if (ap->a_action & KAUTH_VNODE_EXECUTE) { access |= NFS_ACCESS_EXECUTE; + } } /* common */ - if (ap->a_action & KAUTH_VNODE_DELETE) + if (ap->a_action & KAUTH_VNODE_DELETE) { access |= NFS_ACCESS_DELETE; + } if (ap->a_action & (KAUTH_VNODE_WRITE_ATTRIBUTES | KAUTH_VNODE_WRITE_EXTATTRIBUTES | - KAUTH_VNODE_WRITE_SECURITY)) + KAUTH_VNODE_WRITE_SECURITY)) { access |= NFS_ACCESS_MODIFY; + } /* XXX this is pretty dubious */ - if (ap->a_action & KAUTH_VNODE_CHANGE_OWNER) + if (ap->a_action & KAUTH_VNODE_CHANGE_OWNER) { access |= NFS_ACCESS_MODIFY; + } /* if caching, always ask for every right */ if (nfs_access_cache_timeout > 0) { waccess = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | - NFS_ACCESS_EXTEND | NFS_ACCESS_EXECUTE | - NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; + NFS_ACCESS_EXTEND | NFS_ACCESS_EXECUTE | + NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; } else { waccess = access; } - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } /* * Does our cached result allow us to give a definite yes to * this request? */ - if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) + if (auth_is_kerberized(np->n_auth) || auth_is_kerberized(nmp->nm_auth)) { uid = nfs_cred_getasid2uid(vfs_context_ucred(ctx)); - else + } else { uid = kauth_cred_getuid(vfs_context_ucred(ctx)); + } slot = nfs_node_access_slot(np, uid, 0); dorpc = 1; if (access == 0) { @@ -677,8 +700,9 @@ nfs_vnop_access( * Allow an access call to timeout if we have it cached * so we won't hang if the server isn't responding. */ - if (NACCESSVALID(np, slot)) + if (NACCESSVALID(np, slot)) { rpcflags |= R_SOFT; + } error = nmp->nm_funcs->nf_access_rpc(np, &waccess, rpcflags, ctx); @@ -690,10 +714,11 @@ nfs_vnop_access( waccess = np->n_access[slot]; } } - if (!error && ((waccess & access) != access)) + if (!error && ((waccess & access) != access)) { error = EACCES; + } - return (error); + return error; } @@ -707,11 +732,11 @@ nfs_vnop_access( int nfs_vnop_open( struct vnop_open_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_mode; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_mode; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -722,87 +747,103 @@ nfs_vnop_open( struct nfs_open_file *nofp = NULL; enum vtype vtype; - if (!(ap->a_mode & (FREAD|FWRITE))) - return (EINVAL); + if (!(ap->a_mode & (FREAD | FWRITE))) { + return EINVAL; + } nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (np->n_flag & NREVOKE) - return (EIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (np->n_flag & NREVOKE) { + return EIO; + } vtype = vnode_vtype(vp); - if ((vtype != VREG) && (vtype != VDIR) && (vtype != VLNK)) - return (EACCES); + if ((vtype != VREG) && (vtype != VDIR) && (vtype != VLNK)) { + return EACCES; + } /* First, check if we need to update/invalidate */ - if (ISSET(np->n_flag, NUPDATESIZE)) + if (ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 0); - if ((error = nfs_node_lock(np))) - return (error); + } + if ((error = nfs_node_lock(np))) { + return error; + } if (np->n_flag & NNEEDINVALIDATE) { np->n_flag &= ~NNEEDINVALIDATE; - if (vtype == VDIR) + if (vtype == VDIR) { nfs_invaldir(np); + } nfs_node_unlock(np); - nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); - if ((error = nfs_node_lock(np))) - return (error); + nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1); + if ((error = nfs_node_lock(np))) { + return error; + } } - if (vtype == VREG) + if (vtype == VREG) { np->n_lastrahead = -1; + } if (np->n_flag & NMODIFIED) { - if (vtype == VDIR) + if (vtype == VDIR) { nfs_invaldir(np); + } nfs_node_unlock(np); - if ((error = nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1))) - return (error); + if ((error = nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1))) { + return error; + } } else { nfs_node_unlock(np); } /* nfs_getattr() will check changed and purge caches */ - if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) - return (error); + if ((error = nfs_getattr(np, NULL, ctx, NGA_UNCACHED))) { + return error; + } if (vtype != VREG) { /* Just mark that it was opened */ lck_mtx_lock(&np->n_openlock); np->n_openrefcnt++; lck_mtx_unlock(&np->n_openlock); - return (0); + return 0; } /* mode contains some combination of: FREAD, FWRITE, O_SHLOCK, O_EXLOCK */ accessMode = 0; - if (ap->a_mode & FREAD) + if (ap->a_mode & FREAD) { accessMode |= NFS_OPEN_SHARE_ACCESS_READ; - if (ap->a_mode & FWRITE) + } + if (ap->a_mode & FWRITE) { accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE; - if (ap->a_mode & O_EXLOCK) + } + if (ap->a_mode & O_EXLOCK) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; - else if (ap->a_mode & O_SHLOCK) + } else if (ap->a_mode & O_SHLOCK) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else + } else { denyMode = NFS_OPEN_SHARE_DENY_NONE; + } // XXX don't do deny modes just yet (and never do it for !v4) denyMode = NFS_OPEN_SHARE_DENY_NONE; noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); - if (!noop) - return (ENOMEM); + if (!noop) { + return ENOMEM; + } restart: error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } if (np->n_flag & NREVOKE) { error = EIO; nfs_mount_state_in_use_end(nmp, 0); nfs_open_owner_rele(noop); - return (error); + return error; } error = nfs_open_file_find(np, noop, &nofp, accessMode, denyMode, 1); @@ -814,11 +855,13 @@ restart: nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, vfs_context_thread(ctx)); nofp = NULL; - if (!error) + if (!error) { goto restart; + } } - if (!error) + if (!error) { error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx)); + } if (error) { nofp = NULL; goto out; @@ -843,8 +886,9 @@ restart: nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE; nofp->nof_creator = NULL; } else { - if (!opened) + if (!opened) { error = nfs4_open(np, nofp, accessMode, denyMode, ctx); + } if ((error == EACCES) && (nofp->nof_flags & NFS_OPEN_FILE_CREATE) && (nofp->nof_creator == current_thread())) { /* @@ -856,13 +900,15 @@ restart: * We may have access we don't need or we may not have a requested * deny mode. We may log complaints later, but we'll try to avoid it. */ - if (denyMode != NFS_OPEN_SHARE_DENY_NONE) + if (denyMode != NFS_OPEN_SHARE_DENY_NONE) { NP(np, "nfs_vnop_open: deny mode foregone on create, %d", kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } nofp->nof_creator = NULL; error = 0; } - if (error) + if (error) { goto out; + } opened = 1; /* * If we had just created the file, we already had it open. @@ -872,8 +918,9 @@ restart: if ((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && (nofp->nof_creator == current_thread())) { error = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_BOTH, NFS_OPEN_SHARE_DENY_NONE, ctx); - if (error) + if (error) { NP(np, "nfs_vnop_open: create close error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } if (!nfs_mount_state_error_should_restart(error)) { error = 0; nofp->nof_flags &= ~NFS_OPEN_FILE_CREATE; @@ -882,27 +929,31 @@ restart: } out: - if (nofp) + if (nofp) { nfs_open_file_clear_busy(nofp); + } if (nfs_mount_state_in_use_end(nmp, error)) { nofp = NULL; goto restart; } - if (error) + if (error) { NP(np, "nfs_vnop_open: error %d, %d", error, kauth_cred_getuid(noop->noo_cred)); - if (noop) + } + if (noop) { nfs_open_owner_rele(noop); + } if (!error && vtype == VREG && (ap->a_mode & FWRITE)) { lck_mtx_lock(&nmp->nm_lock); nmp->nm_state &= ~NFSSTA_SQUISHY; nmp->nm_curdeadtimeout = nmp->nm_deadtimeout; - if (nmp->nm_curdeadtimeout <= 0) + if (nmp->nm_curdeadtimeout <= 0) { nmp->nm_deadto_start = 0; + } nmp->nm_writers++; lck_mtx_unlock(&nmp->nm_lock); } - - return (error); + + return error; } static uint32_t @@ -911,14 +962,14 @@ nfs_no_of_open_file_writers(nfsnode_t np) uint32_t writers = 0; struct nfs_open_file *nofp; - TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { + TAILQ_FOREACH(nofp, &np->n_opens, nof_link) { writers += nofp->nof_w + nofp->nof_rw + nofp->nof_w_dw + nofp->nof_rw_dw + - nofp->nof_w_drw + nofp->nof_rw_drw + nofp->nof_d_w_dw + - nofp->nof_d_rw_dw + nofp->nof_d_w_drw + nofp->nof_d_rw_drw + - nofp->nof_d_w + nofp->nof_d_rw; + nofp->nof_w_drw + nofp->nof_rw_drw + nofp->nof_d_w_dw + + nofp->nof_d_rw_dw + nofp->nof_d_w_drw + nofp->nof_d_rw_drw + + nofp->nof_d_w + nofp->nof_d_rw; } - - return (writers); + + return writers; } /* @@ -947,11 +998,11 @@ nfs_no_of_open_file_writers(nfsnode_t np) int nfs_vnop_close( struct vnop_close_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_fflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_fflag; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -965,28 +1016,31 @@ nfs_vnop_close( struct nfs_open_file *nofp = NULL; nmp = VTONMP(vp); - if (!nmp) - return (ENXIO); + if (!nmp) { + return ENXIO; + } nfsvers = nmp->nm_vers; vtype = vnode_vtype(vp); /* First, check if we need to update/flush/invalidate */ - if (ISSET(np->n_flag, NUPDATESIZE)) + if (ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 0); + } nfs_node_lock_force(np); if (np->n_flag & NNEEDINVALIDATE) { np->n_flag &= ~NNEEDINVALIDATE; nfs_node_unlock(np); - nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); + nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1); nfs_node_lock_force(np); } if ((vtype == VREG) && (np->n_flag & NMODIFIED) && (fflag & FWRITE)) { /* we're closing an open for write and the file is modified, so flush it */ nfs_node_unlock(np); - if (nfsvers != NFS_VER2) + if (nfsvers != NFS_VER2) { error = nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), 0); - else + } else { error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1); + } nfs_node_lock_force(np); NATTRINVALIDATE(np); } @@ -1000,27 +1054,29 @@ nfs_vnop_close( /* Just mark that it was closed */ lck_mtx_lock(&np->n_openlock); if (np->n_openrefcnt == 0) { - if (fflag & (FREAD|FWRITE)) { + if (fflag & (FREAD | FWRITE)) { NP(np, "nfs_vnop_close: open reference underrun"); error = EINVAL; } - } else if (fflag & (FREAD|FWRITE)) { + } else if (fflag & (FREAD | FWRITE)) { np->n_openrefcnt--; } else { /* No FREAD/FWRITE set - probably the final close */ np->n_openrefcnt = 0; } lck_mtx_unlock(&np->n_openlock); - return (error); + return error; } error1 = error; /* fflag should contain some combination of: FREAD, FWRITE, FHASLOCK */ accessMode = 0; - if (fflag & FREAD) + if (fflag & FREAD) { accessMode |= NFS_OPEN_SHARE_ACCESS_READ; - if (fflag & FWRITE) + } + if (fflag & FWRITE) { accessMode |= NFS_OPEN_SHARE_ACCESS_WRITE; + } // XXX It would be nice if we still had the O_EXLOCK/O_SHLOCK flags that were on the open // if (fflag & O_EXLOCK) // denyMode = NFS_OPEN_SHARE_DENY_BOTH; @@ -1032,14 +1088,15 @@ nfs_vnop_close( if (fflag & FHASLOCK) { /* XXX assume FHASLOCK is for the deny mode and not flock */ /* FHASLOCK flock will be unlocked in the close path, but the flag is not cleared. */ - if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) + if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_READ) { denyMode = NFS_OPEN_SHARE_DENY_BOTH; - else if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) + } else if (nofp->nof_deny & NFS_OPEN_SHARE_DENY_WRITE) { denyMode = NFS_OPEN_SHARE_DENY_WRITE; - else + } else { denyMode = NFS_OPEN_SHARE_DENY_NONE; + } } else { - denyMode = NFS_OPEN_SHARE_DENY_NONE; + denyMode = NFS_OPEN_SHARE_DENY_NONE; } #else // XXX don't do deny modes just yet (and never do it for !v4) @@ -1062,16 +1119,16 @@ nfs_vnop_close( lck_mtx_lock(&nmp->nm_lock); if (writers > nmp->nm_writers) { NP(np, "nfs_vnop_close: number of write opens for mount underrun. Node has %d" - " opens for write. Mount has total of %d opens for write\n", - writers, nmp->nm_writers); + " opens for write. Mount has total of %d opens for write\n", + writers, nmp->nm_writers); nmp->nm_writers = 0; } else { nmp->nm_writers -= writers; } lck_mtx_unlock(&nmp->nm_lock); } - - return (error); + + return error; } else if (fflag & FWRITE) { lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_writers == 0) { @@ -1081,19 +1138,19 @@ nfs_vnop_close( } lck_mtx_unlock(&nmp->nm_lock); } - + noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 0); if (!noop) { // printf("nfs_vnop_close: can't get open owner!\n"); - return (EIO); + return EIO; } restart: error = nfs_mount_state_in_use_start(nmp, NULL); if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } error = nfs_open_file_find(np, noop, &nofp, 0, 0, 0); @@ -1101,8 +1158,9 @@ restart: nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, NULL); nofp = NULL; - if (!error) + if (!error) { goto restart; + } } if (error) { NP(np, "nfs_vnop_close: no open file for owner, error %d, %d", error, kauth_cred_getuid(noop->noo_cred)); @@ -1116,23 +1174,28 @@ restart: } error = nfs_close(np, nofp, accessMode, denyMode, ctx); - if (error) + if (error) { NP(np, "nfs_vnop_close: close error %d, %d", error, kauth_cred_getuid(noop->noo_cred)); + } out: - if (nofp) + if (nofp) { nfs_open_file_clear_busy(nofp); + } if (nfs_mount_state_in_use_end(nmp, error)) { nofp = NULL; goto restart; } - if (!error) + if (!error) { error = error1; - if (error) + } + if (error) { NP(np, "nfs_vnop_close: error %d, %d", error, kauth_cred_getuid(noop->noo_cred)); - if (noop) + } + if (noop) { nfs_open_owner_rele(noop); - return (error); + } + return error; } /* @@ -1152,12 +1215,13 @@ nfs_close( struct nfs_lock_owner *nlop; int error = 0, changed = 0, delegated = 0, closed = 0, downgrade = 0; uint32_t newAccessMode, newDenyMode; - + /* warn if modes don't match current state */ - if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) + if (((accessMode & nofp->nof_access) != accessMode) || ((denyMode & nofp->nof_deny) != denyMode)) { NP(np, "nfs_close: mode mismatch %d %d, current %d %d, %d", - accessMode, denyMode, nofp->nof_access, nofp->nof_deny, - kauth_cred_getuid(nofp->nof_owner->noo_cred)); + accessMode, denyMode, nofp->nof_access, nofp->nof_deny, + kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } /* * If we're closing a write-only open, we may not have a write-only count @@ -1166,28 +1230,33 @@ nfs_close( if (denyMode == NFS_OPEN_SHARE_DENY_NONE) { if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) && (nofp->nof_w == 0) && (nofp->nof_d_w == 0) && - (nofp->nof_rw || nofp->nof_d_rw)) + (nofp->nof_rw || nofp->nof_d_rw)) { accessMode = NFS_OPEN_SHARE_ACCESS_BOTH; + } } else if (denyMode == NFS_OPEN_SHARE_DENY_WRITE) { if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) && (nofp->nof_w_dw == 0) && (nofp->nof_d_w_dw == 0) && - (nofp->nof_rw_dw || nofp->nof_d_rw_dw)) + (nofp->nof_rw_dw || nofp->nof_d_rw_dw)) { accessMode = NFS_OPEN_SHARE_ACCESS_BOTH; + } } else { /* NFS_OPEN_SHARE_DENY_BOTH */ if ((accessMode == NFS_OPEN_SHARE_ACCESS_WRITE) && (nofp->nof_w_drw == 0) && (nofp->nof_d_w_drw == 0) && - (nofp->nof_rw_drw || nofp->nof_d_rw_drw)) + (nofp->nof_rw_drw || nofp->nof_d_rw_drw)) { accessMode = NFS_OPEN_SHARE_ACCESS_BOTH; + } } nfs_open_file_remove_open_find(nofp, accessMode, denyMode, &newAccessMode, &newDenyMode, &delegated); - if ((newAccessMode != nofp->nof_access) || (newDenyMode != nofp->nof_deny)) + if ((newAccessMode != nofp->nof_access) || (newDenyMode != nofp->nof_deny)) { changed = 1; - else + } else { changed = 0; + } - if (NFSTONMP(np)->nm_vers < NFS_VER4) /* NFS v2/v3 closes simply need to remove the open. */ + if (NFSTONMP(np)->nm_vers < NFS_VER4) { /* NFS v2/v3 closes simply need to remove the open. */ goto v3close; + } if ((newAccessMode == 0) || (nofp->nof_opencnt == 1)) { /* @@ -1196,8 +1265,9 @@ nfs_close( */ nfs_wait_bufs(np); closed = 1; - if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) + if (!delegated && !(nofp->nof_flags & NFS_OPEN_FILE_LOST)) { error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0); + } if (error == NFSERR_LOCKS_HELD) { /* * Hmm... the server says we have locks we need to release first @@ -1206,7 +1276,7 @@ nfs_close( nlop = nfs_lock_owner_find(np, vfs_context_proc(ctx), 0); if (nlop) { nfs4_unlock_rpc(np, nlop, F_WRLCK, 0, UINT64_MAX, - 0, vfs_context_thread(ctx), vfs_context_ucred(ctx)); + 0, vfs_context_thread(ctx), vfs_context_ucred(ctx)); nfs_lock_owner_rele(nlop); } error = nfs4_close_rpc(np, nofp, vfs_context_thread(ctx), vfs_context_ucred(ctx), 0); @@ -1224,24 +1294,27 @@ nfs_close( */ if (nofp->nof_d_rw_drw || nofp->nof_d_w_drw || nofp->nof_d_r_drw || nofp->nof_d_rw_dw || nofp->nof_d_w_dw || nofp->nof_d_r_dw || - nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) + nofp->nof_d_rw || nofp->nof_d_w || nofp->nof_d_r) { nfs4_claim_delegated_state_for_open_file(nofp, 0); + } /* need to remove the open before sending the downgrade */ nfs_open_file_remove_open(nofp, accessMode, denyMode); error = nfs4_open_downgrade_rpc(np, nofp, ctx); - if (error) /* Hmm.. that didn't work. Add the open back in. */ + if (error) { /* Hmm.. that didn't work. Add the open back in. */ nfs_open_file_add_open(nofp, accessMode, denyMode, delegated); + } } } if (error) { NP(np, "nfs_close: error %d, %d", error, kauth_cred_getuid(nofp->nof_owner->noo_cred)); - return (error); + return error; } v3close: - if (!downgrade) + if (!downgrade) { nfs_open_file_remove_open(nofp, accessMode, denyMode); + } if (closed) { lck_mtx_lock(&nofp->nof_lock); @@ -1249,14 +1322,15 @@ v3close: (nofp->nof_rw && !((nofp->nof_flags & NFS_OPEN_FILE_CREATE) && !nofp->nof_creator && (nofp->nof_rw == 1))) || nofp->nof_r_dw || nofp->nof_d_r_dw || nofp->nof_w_dw || nofp->nof_d_w_dw || nofp->nof_rw_dw || nofp->nof_d_rw_dw || nofp->nof_r_drw || nofp->nof_d_r_drw || - nofp->nof_w_drw || nofp->nof_d_w_drw || nofp->nof_rw_drw || nofp->nof_d_rw_drw) + nofp->nof_w_drw || nofp->nof_d_w_drw || nofp->nof_rw_drw || nofp->nof_d_rw_drw) { NP(np, "nfs_close: unexpected count: %u.%u %u.%u %u.%u dw %u.%u %u.%u %u.%u drw %u.%u %u.%u %u.%u flags 0x%x, %d", - nofp->nof_r, nofp->nof_d_r, nofp->nof_w, nofp->nof_d_w, - nofp->nof_rw, nofp->nof_d_rw, nofp->nof_r_dw, nofp->nof_d_r_dw, - nofp->nof_w_dw, nofp->nof_d_w_dw, nofp->nof_rw_dw, nofp->nof_d_rw_dw, - nofp->nof_r_drw, nofp->nof_d_r_drw, nofp->nof_w_drw, nofp->nof_d_w_drw, - nofp->nof_rw_drw, nofp->nof_d_rw_drw, nofp->nof_flags, - kauth_cred_getuid(nofp->nof_owner->noo_cred)); + nofp->nof_r, nofp->nof_d_r, nofp->nof_w, nofp->nof_d_w, + nofp->nof_rw, nofp->nof_d_rw, nofp->nof_r_dw, nofp->nof_d_r_dw, + nofp->nof_w_dw, nofp->nof_d_w_dw, nofp->nof_rw_dw, nofp->nof_d_rw_dw, + nofp->nof_r_drw, nofp->nof_d_r_drw, nofp->nof_w_drw, nofp->nof_d_w_drw, + nofp->nof_rw_drw, nofp->nof_d_rw_drw, nofp->nof_flags, + kauth_cred_getuid(nofp->nof_owner->noo_cred)); + } /* clear out all open info, just to be safe */ nofp->nof_access = nofp->nof_deny = 0; nofp->nof_mmap_access = nofp->nof_mmap_deny = 0; @@ -1276,10 +1350,10 @@ v3close: if (nofp->nof_flags & NFS_OPEN_FILE_LOST) { error = EIO; NP(np, "nfs_close: LOST%s, %d", !nofp->nof_opencnt ? " (last)" : "", - kauth_cred_getuid(nofp->nof_owner->noo_cred)); + kauth_cred_getuid(nofp->nof_owner->noo_cred)); } - - return (error); + + return error; } @@ -1298,36 +1372,41 @@ nfs3_getattr_rpc( int error = 0, status, nfsvers, rpcflags = 0; struct nfsm_chain nmreq, nmrep; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (flags & NGA_MONITOR) /* vnode monitor requests should be soft */ + if (flags & NGA_MONITOR) { /* vnode monitor requests should be soft */ rpcflags = R_RECOVER; + } - if (flags & NGA_SOFT) /* Return ETIMEDOUT if server not responding */ + if (flags & NGA_SOFT) { /* Return ETIMEDOUT if server not responding */ rpcflags |= R_SOFT; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, NFSX_FH(nfsvers)); - if (nfsvers != NFS_VER2) + if (nfsvers != NFS_VER2) { nfsm_chain_add_32(error, &nmreq, fhsize); + } nfsm_chain_add_opaque(error, &nmreq, fhp, fhsize); nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); - error = nfs_request2(np, mp, &nmreq, NFSPROC_GETATTR, - vfs_context_thread(ctx), vfs_context_ucred(ctx), - NULL, rpcflags, &nmrep, xidp, &status); - if (!error) + error = nfs_request2(np, mp, &nmreq, NFSPROC_GETATTR, + vfs_context_thread(ctx), vfs_context_ucred(ctx), + NULL, rpcflags, &nmrep, xidp, &status); + if (!error) { error = status; + } nfsmout_if(error); error = nfs_parsefattr(&nmrep, nfsvers, nvap); nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -1336,7 +1415,7 @@ nfsmout: * It only does this for symbolic links and regular files that are not currently opened. * * On Success returns 0 and the nodes file handle is updated, or ESTALE on failure. - */ + */ int nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) { @@ -1354,19 +1433,20 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) dvp = vnode_parent(vp); if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VLNK) || v_name == NULL || *v_name == '\0' || dvp == NULL) { - if (v_name != NULL) + if (v_name != NULL) { vnode_putname(v_name); - return (ESTALE); + } + return ESTALE; } dnp = VTONFS(dvp); - + namelen = strlen(v_name); MALLOC(name, char *, namelen + 1, M_TEMP, M_WAITOK); if (name == NULL) { vnode_putname(v_name); - return (ESTALE); + return ESTALE; } - bcopy(v_name, name, namelen+1); + bcopy(v_name, name, namelen + 1); NFS_VNOP_DBG("Trying to refresh %s : %s\n", v_name, name); vnode_putname(v_name); @@ -1374,52 +1454,54 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) MALLOC(fhp, uint8_t *, NFS4_FHSIZE, M_TEMP, M_WAITOK); if (fhp == NULL) { FREE(name, M_TEMP); - return (ESTALE); + return ESTALE; } - + if ((error = nfs_node_lock(np))) { FREE(name, M_TEMP); FREE(fhp, M_TEMP); - return (ESTALE); + return ESTALE; } - + fhsize = np->n_fhsize; bcopy(np->n_fhp, fhp, fhsize); while (ISSET(np->n_flag, NREFRESH)) { SET(np->n_flag, NREFRESHWANT); NFS_VNOP_DBG("Waiting for refresh of %s\n", name); - msleep(np, &np->n_lock, PZERO-1, "nfsrefreshwant", &ts); - if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) + msleep(np, &np->n_lock, PZERO - 1, "nfsrefreshwant", &ts); + if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) { break; + } } refreshed = error ? 0 : !NFS_CMPFH(np, fhp, fhsize); SET(np->n_flag, NREFRESH); nfs_node_unlock(np); NFS_VNOP_DBG("error = %d, refreshed = %d\n", error, refreshed); - if (error || refreshed) + if (error || refreshed) { goto nfsmout; - + } + /* Check that there are no open references for this file */ lck_mtx_lock(&np->n_openlock); if (np->n_openrefcnt || !TAILQ_EMPTY(&np->n_opens) || !TAILQ_EMPTY(&np->n_lock_owners)) { int cnt = 0; struct nfs_open_file *ofp; - + TAILQ_FOREACH(ofp, &np->n_opens, nof_link) { cnt += ofp->nof_opencnt; } if (cnt) { lck_mtx_unlock(&np->n_openlock); NFS_VNOP_DBG("Can not refresh file handle for %s with open state\n", name); - NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n", - np->n_openrefcnt, cnt, !TAILQ_EMPTY(&np->n_lock_owners)); + NFS_VNOP_DBG("\topenrefcnt = %d, opens = %d lock_owners = %d\n", + np->n_openrefcnt, cnt, !TAILQ_EMPTY(&np->n_lock_owners)); error = ESTALE; goto nfsmout; } } lck_mtx_unlock(&np->n_openlock); - /* + /* * Since the FH is currently stale we should not be able to * establish any open state until the FH is refreshed. */ @@ -1437,8 +1519,9 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) np->n_flag &= ~NNEEDINVALIDATE; nfs_node_unlock(np); error = nfs_vinvalbuf(vp, V_IGNORE_WRITEERR, ctx, 1); - if (error) + if (error) { NFS_VNOP_DBG("nfs_vinvalbuf returned %d\n", error); + } nfsmout_if(error); } else { nfs_node_unlock(np); @@ -1446,39 +1529,43 @@ nfs_refresh_fh(nfsnode_t np, vfs_context_t ctx) NFS_VNOP_DBG("Looking up %s\n", name); error = nfs_lookitup(dnp, name, namelen, ctx, &np); - if (error) + if (error) { NFS_VNOP_DBG("nfs_lookitup returned %d\n", error); + } nfsmout: nfs_node_lock_force(np); wanted = ISSET(np->n_flag, NREFRESHWANT); - CLR(np->n_flag, NREFRESH|NREFRESHWANT); + CLR(np->n_flag, NREFRESH | NREFRESHWANT); nfs_node_unlock(np); - if (wanted) + if (wanted) { wakeup(np); - - if (error == 0) + } + + if (error == 0) { NFS_VNOP_DBG("%s refreshed file handle\n", name); + } FREE(name, M_TEMP); FREE(fhp, M_TEMP); - - return (error ? ESTALE : 0); + + return error ? ESTALE : 0; } int nfs_getattr(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, int flags) { int error; - + retry: error = nfs_getattr_internal(np, nvap, ctx, flags); if (error == ESTALE) { error = nfs_refresh_fh(np, ctx); - if (!error) + if (!error) { goto retry; + } } - return (error); + return error; } int @@ -1493,13 +1580,15 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in FSDBG_TOP(513, np->n_size, np, np->n_vattr.nva_size, np->n_flag); nmp = NFSTONMP(np); - - if (nfs_mount_gone(nmp)) - return (ENXIO); + + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (!nvap) + if (!nvap) { nvap = &nvattr; + } NVATTR_INIT(nvap); /* Update local times for special files. */ @@ -1509,12 +1598,13 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in nfs_node_unlock(np); } /* Update size, if necessary */ - if (ISSET(np->n_flag, NUPDATESIZE)) + if (ISSET(np->n_flag, NUPDATESIZE)) { nfs_data_update_size(np, 0); + } error = nfs_node_lock(np); nfsmout_if(error); - if (!(flags & (NGA_UNCACHED|NGA_MONITOR)) || ((nfsvers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK))) { + if (!(flags & (NGA_UNCACHED | NGA_MONITOR)) || ((nfsvers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK))) { /* * Use the cache or wait for any getattr in progress if: * - it's a cached request, or @@ -1528,8 +1618,9 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in goto nfsmout; } error = 0; - if (!ISSET(np->n_flag, NGETATTRINPROG)) + if (!ISSET(np->n_flag, NGETATTRINPROG)) { break; + } if (flags & NGA_MONITOR) { /* no need to wait if a request is pending */ error = EINPROGRESS; @@ -1537,7 +1628,7 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in goto nfsmout; } SET(np->n_flag, NGETATTRWANT); - msleep(np, &np->n_lock, PZERO-1, "nfsgetattrwant", &ts); + msleep(np, &np->n_lock, PZERO - 1, "nfsgetattrwant", &ts); if ((error = nfs_sigintr(NFSTONMP(np), NULL, vfs_context_thread(ctx), 0))) { nfs_node_unlock(np); goto nfsmout; @@ -1555,18 +1646,21 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in nfs_node_unlock(np); nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { error = ENXIO; - if (error) + } + if (error) { goto nfsmout; + } /* * Return cached attributes if they are valid, * if the server doesn't respond, and this is * some softened up style of mount. */ - if (NATTRVALID(np) && nfs_use_cache(nmp)) + if (NATTRVALID(np) && nfs_use_cache(nmp)) { flags |= NGA_SOFT; + } /* * We might want to try to get both the attributes and access info by @@ -1581,21 +1675,25 @@ nfs_getattr_internal(nfsnode_t np, struct nfs_vattr *nvap, vfs_context_t ctx, in int rpcflags = 0; /* Return cached attrs if server doesn't respond */ - if (flags & NGA_SOFT) + if (flags & NGA_SOFT) { rpcflags |= R_SOFT; + } error = nmp->nm_funcs->nf_access_rpc(np, &access, rpcflags, ctx); - if (error == ETIMEDOUT) + if (error == ETIMEDOUT) { goto returncached; + } - if (error) + if (error) { goto nfsmout; + } nfs_node_lock_force(np); error = nfs_getattrcache(np, nvap, flags); nfs_node_unlock(np); - if (!error || (error != ENOENT)) + if (!error || (error != ENOENT)) { goto nfsmout; + } /* Well, that didn't work... just do a getattr... */ error = 0; } @@ -1628,8 +1726,9 @@ returncached: if (!xid) { /* out-of-order rpc - attributes were dropped */ FSDBG(513, -1, np, np->n_xid >> 32, np->n_xid); - if (avoidfloods++ < 20) + if (avoidfloods++ < 20) { goto tryagain; + } /* avoidfloods>1 is bizarre. at 20 pull the plug */ /* just return the last attributes we got */ } @@ -1649,19 +1748,20 @@ nfsmout: cache_purge(vp); np->n_ncgen++; NFS_CHANGED_UPDATE_NC(nfsvers, np, nvap); - NFS_VNOP_DBG("Purge directory 0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(vp)); + NFS_VNOP_DBG("Purge directory 0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(vp)); } if (NFS_CHANGED(nfsvers, np, nvap)) { FSDBG(513, -1, np, -1, np); if (vtype == VDIR) { - NFS_VNOP_DBG("Invalidate directory 0x%llx\n", - (uint64_t)VM_KERNEL_ADDRPERM(vp)); + NFS_VNOP_DBG("Invalidate directory 0x%llx\n", + (uint64_t)VM_KERNEL_ADDRPERM(vp)); nfs_invaldir(np); } nfs_node_unlock(np); - if (wanted) + if (wanted) { wakeup(np); + } error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1); FSDBG(513, -1, np, -2, error); if (!error) { @@ -1671,13 +1771,15 @@ nfsmout: } } else { nfs_node_unlock(np); - if (wanted) + if (wanted) { wakeup(np); + } } } else { nfs_node_unlock(np); - if (wanted) + if (wanted) { wakeup(np); + } } if (nvap == &nvattr) { @@ -1691,7 +1793,7 @@ nfsmout: } } FSDBG_BOT(513, np->n_size, error, np->n_vattr.nva_size, np->n_flag); - return (error); + return error; } /* @@ -1700,33 +1802,33 @@ nfsmout: /* * The attributes we support over the wire. - * We also get fsid but the vfs layer gets it out of the mount + * We also get fsid but the vfs layer gets it out of the mount * structure after this calling us so there's no need to return it, * and Finder expects to call getattrlist just looking for the FSID * with out hanging on a non responsive server. */ #define NFS3_SUPPORTED_VATTRS \ - (VNODE_ATTR_va_rdev | \ - VNODE_ATTR_va_nlink | \ - VNODE_ATTR_va_data_size | \ - VNODE_ATTR_va_data_alloc | \ - VNODE_ATTR_va_uid | \ - VNODE_ATTR_va_gid | \ - VNODE_ATTR_va_mode | \ - VNODE_ATTR_va_modify_time | \ - VNODE_ATTR_va_change_time | \ - VNODE_ATTR_va_access_time | \ - VNODE_ATTR_va_fileid | \ + (VNODE_ATTR_va_rdev | \ + VNODE_ATTR_va_nlink | \ + VNODE_ATTR_va_data_size | \ + VNODE_ATTR_va_data_alloc | \ + VNODE_ATTR_va_uid | \ + VNODE_ATTR_va_gid | \ + VNODE_ATTR_va_mode | \ + VNODE_ATTR_va_modify_time | \ + VNODE_ATTR_va_change_time | \ + VNODE_ATTR_va_access_time | \ + VNODE_ATTR_va_fileid | \ VNODE_ATTR_va_type) int nfs3_vnop_getattr( struct vnop_getattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { int error; struct nfs_vattr nva; @@ -1740,20 +1842,22 @@ nfs3_vnop_getattr( */ /* Return the io size no matter what, since we don't go over the wire for this */ VATTR_RETURN(vap, va_iosize, nfs_iosize); - if ((vap->va_active & NFS3_SUPPORTED_VATTRS) == 0) - return (0); + if ((vap->va_active & NFS3_SUPPORTED_VATTRS) == 0) { + return 0; + } - if (VATTR_IS_ACTIVE(ap->a_vap, va_name)) - NFS_VNOP_DBG("Getting attrs for 0x%llx, vname is %s\n", - (uint64_t)VM_KERNEL_ADDRPERM(ap->a_vp), - ap->a_vp->v_name ? ap->a_vp->v_name : "empty"); + if (VATTR_IS_ACTIVE(ap->a_vap, va_name)) { + NFS_VNOP_DBG("Getting attrs for 0x%llx, vname is %s\n", + (uint64_t)VM_KERNEL_ADDRPERM(ap->a_vp), + ap->a_vp->v_name ? ap->a_vp->v_name : "empty"); + } error = nfs_getattr(VTONFS(ap->a_vp), &nva, ap->a_context, NGA_CACHED); - if (error) - return (error); + if (error) { + return error; + } /* copy nva to *a_vap */ nmp = VTONMP(ap->a_vp); - vap->va_flags |= nmp ? (nmp->nm_vers > 2 ? VA_64BITOBJIDS : 0) : 0; VATTR_RETURN(vap, va_type, nva.nva_type); VATTR_RETURN(vap, va_mode, nva.nva_mode); rdev = makedev(nva.nva_rawdev.specdata1, nva.nva_rawdev.specdata2); @@ -1775,7 +1879,7 @@ nfs3_vnop_getattr( VATTR_SET_SUPPORTED(vap, va_change_time); // VATTR_RETURN(vap, va_encoding, 0xffff /* kTextEncodingUnknown */); - return (error); + return error; } /* @@ -1784,11 +1888,11 @@ nfs3_vnop_getattr( int nfs_vnop_setattr( struct vnop_setattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -1807,21 +1911,23 @@ nfs_vnop_setattr( struct nfs_open_file *nofp = NULL; nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); biosize = nmp->nm_biosize; /* Disallow write attempts if the filesystem is mounted read-only. */ - if (vnode_vfsisrdonly(vp)) - return (EROFS); + if (vnode_vfsisrdonly(vp)) { + return EROFS; + } origsize = np->n_size; if (VATTR_IS_ACTIVE(vap, va_data_size)) { switch (vnode_vtype(vp)) { case VDIR: - return (EISDIR); + return EISDIR; case VCHR: case VBLK: case VSOCK: @@ -1831,7 +1937,7 @@ nfs_vnop_setattr( !VATTR_IS_ACTIVE(vap, va_mode) && !VATTR_IS_ACTIVE(vap, va_uid) && !VATTR_IS_ACTIVE(vap, va_gid)) { - return (0); + return 0; } VATTR_CLEAR_ACTIVE(vap, va_data_size); break; @@ -1840,64 +1946,75 @@ nfs_vnop_setattr( * Disallow write attempts if the filesystem is * mounted read-only. */ - if (vnode_vfsisrdonly(vp)) - return (EROFS); + if (vnode_vfsisrdonly(vp)) { + return EROFS; + } FSDBG_TOP(512, np->n_size, vap->va_data_size, - np->n_vattr.nva_size, np->n_flag); + np->n_vattr.nva_size, np->n_flag); /* clear NNEEDINVALIDATE, if set */ - if ((error = nfs_node_lock(np))) - return (error); - if (np->n_flag & NNEEDINVALIDATE) + if ((error = nfs_node_lock(np))) { + return error; + } + if (np->n_flag & NNEEDINVALIDATE) { np->n_flag &= ~NNEEDINVALIDATE; + } nfs_node_unlock(np); /* flush everything */ - error = nfs_vinvalbuf(vp, (vap->va_data_size ? V_SAVE : 0) , ctx, 1); + error = nfs_vinvalbuf(vp, (vap->va_data_size ? V_SAVE : 0), ctx, 1); if (error) { NP(np, "nfs_setattr: nfs_vinvalbuf %d", error); FSDBG_BOT(512, np->n_size, vap->va_data_size, np->n_vattr.nva_size, -1); - return (error); + return error; } if (nfsvers >= NFS_VER4) { /* setting file size requires having the file open for write access */ - if (np->n_flag & NREVOKE) - return (EIO); + if (np->n_flag & NREVOKE) { + return EIO; + } noop = nfs_open_owner_find(nmp, vfs_context_ucred(ctx), 1); - if (!noop) - return (ENOMEM); + if (!noop) { + return ENOMEM; + } restart: error = nfs_mount_state_in_use_start(nmp, vfs_context_thread(ctx)); - if (error) - return (error); + if (error) { + return error; + } if (np->n_flag & NREVOKE) { nfs_mount_state_in_use_end(nmp, 0); - return (EIO); + return EIO; } error = nfs_open_file_find(np, noop, &nofp, 0, 0, 1); - if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) + if (!error && (nofp->nof_flags & NFS_OPEN_FILE_LOST)) { error = EIO; + } if (!error && (nofp->nof_flags & NFS_OPEN_FILE_REOPEN)) { nfs_mount_state_in_use_end(nmp, 0); error = nfs4_reopen(nofp, vfs_context_thread(ctx)); nofp = NULL; - if (!error) + if (!error) { goto restart; + } } - if (!error) + if (!error) { error = nfs_open_file_set_busy(nofp, vfs_context_thread(ctx)); + } if (error) { nfs_open_owner_rele(noop); - return (error); + return error; } if (!(nofp->nof_access & NFS_OPEN_SHARE_ACCESS_WRITE)) { /* we don't have the file open for write access, so open it */ error = nfs4_open(np, nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, ctx); - if (!error) + if (!error) { nofp->nof_flags |= NFS_OPEN_FILE_SETATTR; + } if (nfs_mount_state_error_should_restart(error)) { nfs_open_file_clear_busy(nofp); nofp = NULL; - if (nfs_mount_state_in_use_end(nmp, error)) + if (nfs_mount_state_in_use_end(nmp, error)) { goto restart; + } } } } @@ -1909,12 +2026,14 @@ restart: obn = (np->n_size - 1) / biosize; bn = vap->va_data_size / biosize; - for ( ; obn >= bn; obn--) { - if (!nfs_buf_is_incore(np, obn)) + for (; obn >= bn; obn--) { + if (!nfs_buf_is_incore(np, obn)) { continue; + } error = nfs_buf_get(np, obn, biosize, NULL, NBLK_READ, &bp); - if (error) + if (error) { continue; + } if (obn != bn) { FSDBG(512, bp, bp->nb_flags, 0, obn); SET(bp->nb_flags, NB_INVAL); @@ -1928,15 +2047,18 @@ restart: /* clip dirty range to EOF */ if (bp->nb_dirtyend > neweofoff) { bp->nb_dirtyend = neweofoff; - if (bp->nb_dirtyoff >= bp->nb_dirtyend) + if (bp->nb_dirtyoff >= bp->nb_dirtyend) { bp->nb_dirtyoff = bp->nb_dirtyend = 0; + } } - if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < neweofoff)) + if ((bp->nb_dirtyend > 0) && (bp->nb_dirtyoff < neweofoff)) { mustwrite++; + } } - bp->nb_dirty &= (1 << round_page_32(neweofoff)/PAGE_SIZE) - 1; - if (bp->nb_dirty) + bp->nb_dirty &= (1 << round_page_32(neweofoff) / PAGE_SIZE) - 1; + if (bp->nb_dirty) { mustwrite++; + } if (!mustwrite) { FSDBG(512, bp, bp->nb_flags, 0, obn); SET(bp->nb_flags, NB_INVAL); @@ -1970,14 +2092,15 @@ restart: NATTRINVALIDATE(np); nfs_node_unlock(np); nfs_data_unlock(np); - nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); + nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1); nfs_data_lock(np, NFS_DATA_LOCK_EXCLUSIVE); error = 0; } } } - if (vap->va_data_size != np->n_size) + if (vap->va_data_size != np->n_size) { ubc_setsize(vp, (off_t)vap->va_data_size); /* XXX error? */ + } origsize = np->n_size; np->n_size = np->n_vattr.nva_size = vap->va_data_size; nfs_node_lock_force(np); @@ -1986,21 +2109,23 @@ restart: FSDBG(512, np, np->n_size, np->n_vattr.nva_size, 0xf00d0001); } } else if (VATTR_IS_ACTIVE(vap, va_modify_time) || - VATTR_IS_ACTIVE(vap, va_access_time) || - (vap->va_vaflags & VA_UTIMES_NULL)) { - if ((error = nfs_node_lock(np))) - return (error); + VATTR_IS_ACTIVE(vap, va_access_time) || + (vap->va_vaflags & VA_UTIMES_NULL)) { + if ((error = nfs_node_lock(np))) { + return error; + } if ((np->n_flag & NMODIFIED) && (vnode_vtype(vp) == VREG)) { nfs_node_unlock(np); error = nfs_vinvalbuf(vp, V_SAVE, ctx, 1); - if (error == EINTR) - return (error); + if (error == EINTR) { + return error; + } } else { nfs_node_unlock(np); } } if ((VATTR_IS_ACTIVE(vap, va_mode) || VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid) || - VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid)) && + VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid)) && !(error = nfs_node_lock(np))) { NACCESSINVALIDATE(np); nfs_node_unlock(np); @@ -2018,16 +2143,19 @@ restart: dul_in_progress = 1; } } else { - if (dvp) + if (dvp) { vnode_put(dvp); - if (vname) + } + if (vname) { vnode_putname(vname); + } } } } - if (!error) + if (!error) { error = nmp->nm_funcs->nf_setattr_rpc(np, vap, ctx); + } if (dul_in_progress) { nfs_dulookup_finish(&dul, dnp, ctx); @@ -2052,8 +2180,9 @@ restart: vapsize = vap->va_data_size; vap->va_data_size = origsize; err = nmp->nm_funcs->nf_setattr_rpc(np, vap, ctx); - if (err) + if (err) { NP(np, "nfs_vnop_setattr: nfs%d_setattr_rpc %d %d", nfsvers, error, err); + } vap->va_data_size = vapsize; } nfs_node_lock_force(np); @@ -2078,19 +2207,21 @@ restart: if (!nfs_mount_state_error_should_restart(error) && (nofp->nof_flags & NFS_OPEN_FILE_SETATTR)) { int err = nfs_close(np, nofp, NFS_OPEN_SHARE_ACCESS_WRITE, NFS_OPEN_SHARE_DENY_NONE, ctx); - if (err) + if (err) { NP(np, "nfs_vnop_setattr: close error: %d", err); + } nofp->nof_flags &= ~NFS_OPEN_FILE_SETATTR; } nfs_open_file_clear_busy(nofp); nofp = NULL; } - if (nfs_mount_state_in_use_end(nmp, error)) + if (nfs_mount_state_in_use_end(nmp, error)) { goto restart; + } nfs_open_owner_rele(noop); } } - return (error); + return error; } /* @@ -2107,8 +2238,9 @@ nfs3_setattr_rpc( u_int64_t xid, nextxid; struct nfsm_chain nmreq, nmrep; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; VATTR_SET_SUPPORTED(vap, va_mode); @@ -2119,22 +2251,24 @@ nfs3_setattr_rpc( VATTR_SET_SUPPORTED(vap, va_modify_time); if (VATTR_IS_ACTIVE(vap, va_flags)) { - if (vap->va_flags) { /* we don't support setting flags */ - if (vap->va_active & ~VNODE_ATTR_va_flags) - return (EINVAL); /* return EINVAL if other attributes also set */ - else - return (ENOTSUP); /* return ENOTSUP for chflags(2) */ + if (vap->va_flags) { /* we don't support setting flags */ + if (vap->va_active & ~VNODE_ATTR_va_flags) { + return EINVAL; /* return EINVAL if other attributes also set */ + } else { + return ENOTSUP; /* return ENOTSUP for chflags(2) */ + } } /* no flags set, so we'll just ignore it */ - if (!(vap->va_active & ~VNODE_ATTR_va_flags)) - return (0); /* no (other) attributes to set, so nothing to do */ + if (!(vap->va_active & ~VNODE_ATTR_va_flags)) { + return 0; /* no (other) attributes to set, so nothing to do */ + } } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + NFSX_SATTR(nfsvers)); + NFSX_FH(nfsvers) + NFSX_SATTR(nfsvers)); nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize); if (nfsvers == NFS_VER3) { if (VATTR_IS_ACTIVE(vap, va_mode)) { @@ -2183,17 +2317,17 @@ nfs3_setattr_rpc( nfsm_chain_add_32(error, &nmreq, FALSE); } else { nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_mode) ? - vtonfsv2_mode(vnode_vtype(NFSTOV(np)), vap->va_mode) : -1); + vtonfsv2_mode(vnode_vtype(NFSTOV(np)), vap->va_mode) : -1); nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_uid) ? - vap->va_uid : (uint32_t)-1); + vap->va_uid : (uint32_t)-1); nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_gid) ? - vap->va_gid : (uint32_t)-1); + vap->va_gid : (uint32_t)-1); nfsm_chain_add_32(error, &nmreq, VATTR_IS_ACTIVE(vap, va_data_size) ? - vap->va_data_size : (uint32_t)-1); + vap->va_data_size : (uint32_t)-1); if (VATTR_IS_ACTIVE(vap, va_access_time)) { nfsm_chain_add_32(error, &nmreq, vap->va_access_time.tv_sec); nfsm_chain_add_32(error, &nmreq, (vap->va_access_time.tv_nsec != -1) ? - ((uint32_t)vap->va_access_time.tv_nsec / 1000) : 0xffffffff); + ((uint32_t)vap->va_access_time.tv_nsec / 1000) : 0xffffffff); } else { nfsm_chain_add_32(error, &nmreq, -1); nfsm_chain_add_32(error, &nmreq, -1); @@ -2201,7 +2335,7 @@ nfs3_setattr_rpc( if (VATTR_IS_ACTIVE(vap, va_modify_time)) { nfsm_chain_add_32(error, &nmreq, vap->va_modify_time.tv_sec); nfsm_chain_add_32(error, &nmreq, (vap->va_modify_time.tv_nsec != -1) ? - ((uint32_t)vap->va_modify_time.tv_nsec / 1000) : 0xffffffff); + ((uint32_t)vap->va_modify_time.tv_nsec / 1000) : 0xffffffff); } else { nfsm_chain_add_32(error, &nmreq, -1); nfsm_chain_add_32(error, &nmreq, -1); @@ -2210,25 +2344,30 @@ nfs3_setattr_rpc( nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request(np, NULL, &nmreq, NFSPROC_SETATTR, ctx, NULL, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } if (nfsvers == NFS_VER3) { struct timespec premtime = { 0, 0 }; nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid); nfsmout_if(error); /* if file hadn't changed, update cached mtime */ - if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) + if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) { NFS_CHANGED_UPDATE(nfsvers, np, &np->n_vattr); + } /* if directory hadn't changed, update namecache mtime */ if ((vnode_vtype(NFSTOV(np)) == VDIR) && - nfstimespeccmp(&np->n_ncmtime, &premtime, ==)) + nfstimespeccmp(&np->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, np, &np->n_vattr); - if (!wccpostattr) + } + if (!wccpostattr) { NATTRINVALIDATE(np); + } error = status; } else { - if (!error) + if (!error) { error = status; + } nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); } /* @@ -2247,11 +2386,12 @@ nfs3_setattr_rpc( NATTRINVALIDATE(np); } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -2262,12 +2402,12 @@ nfsmout: int nfs_vnop_lookup( struct vnop_lookup_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; struct componentname *cnp = ap->a_cnp; @@ -2300,11 +2440,13 @@ nfs_vnop_lookup( nfsvers = nmp->nm_vers; negnamecache = !NMFLAG(nmp, NONEGNAMECACHE); - if ((error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)))) + if ((error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)))) { goto error_return; + } /* nfs_getattr() will check changed and purge caches */ - if ((error = nfs_getattr(dnp, NULL, ctx, NGA_CACHED))) + if ((error = nfs_getattr(dnp, NULL, ctx, NGA_CACHED))) { goto error_return; + } error = cache_lookup(dvp, vpp, cnp); switch (error) { @@ -2322,9 +2464,10 @@ nfs_vnop_lookup( error = -1; } } - if (error != -1) /* cache miss */ + if (error != -1) { /* cache miss */ break; - /* FALLTHROUGH */ + } + /* FALLTHROUGH */ case -1: /* cache hit, not really an error */ OSAddAtomic64(1, &nfsstats.lookupcache_hits); @@ -2340,7 +2483,7 @@ nfs_vnop_lookup( /* compute actual success/failure based on accessibility */ error = nfs_vnop_access(&naa); - /* FALLTHROUGH */ + /* FALLTHROUGH */ default: /* unexpected error from cache_lookup */ goto error_return; @@ -2349,10 +2492,12 @@ nfs_vnop_lookup( /* skip lookup, if we know who we are: "." or ".." */ isdot = isdotdot = 0; if (cnp->cn_nameptr[0] == '.') { - if (cnp->cn_namelen == 1) + if (cnp->cn_namelen == 1) { isdot = 1; - if ((cnp->cn_namelen == 2) && (cnp->cn_nameptr[1] == '.')) + } + if ((cnp->cn_namelen == 2) && (cnp->cn_nameptr[1] == '.')) { isdotdot = 1; + } } if (isdotdot || isdot) { fh.fh_len = 0; @@ -2371,7 +2516,7 @@ nfs_vnop_lookup( goto error_return; } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) && - (cnp->cn_namelen > (int)nmp->nm_fsattr.nfsa_maxname)) { + (cnp->cn_namelen > (int)nmp->nm_fsattr.nfsa_maxname)) { error = ENAMETOOLONG; goto error_return; } @@ -2413,18 +2558,21 @@ found: } } else if (isdot) { error = vnode_get(dvp); - if (error) + if (error) { goto error_return; + } newvp = dvp; nfs_node_lock_force(dnp); - if (fh.fh_len && (dnp->n_xid <= xid)) + if (fh.fh_len && (dnp->n_xid <= xid)) { nfs_loadattrcache(dnp, &nvattr, &xid, 0); + } nfs_node_unlock(dnp); } else { ngflags = (cnp->cn_flags & MAKEENTRY) ? NG_MAKEENTRY : 0; error = nfs_nget(mp, dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, ngflags, &np); - if (error) + if (error) { goto error_return; + } newvp = NFSTOV(np); nfs_node_unlock(np); } @@ -2434,10 +2582,11 @@ nfsmout: if (error) { if (((cnp->cn_nameiop == CREATE) || (cnp->cn_nameiop == RENAME)) && (flags & ISLASTCN) && (error == ENOENT)) { - if (vnode_mount(dvp) && vnode_vfsisrdonly(dvp)) + if (vnode_mount(dvp) && vnode_vfsisrdonly(dvp)) { error = EROFS; - else + } else { error = EJUSTRETURN; + } } } if ((error == ENOENT) && (cnp->cn_flags & MAKEENTRY) && @@ -2450,13 +2599,14 @@ nfsmout: } error_return: NVATTR_CLEANUP(&nvattr); - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy(dnp); + } if (error && *vpp) { - vnode_put(*vpp); + vnode_put(*vpp); *vpp = NULLVP; } - return (error); + return error; } int nfs_readlink_nocache = DEFAULT_READLINK_NOCACHE; @@ -2467,11 +2617,11 @@ int nfs_readlink_nocache = DEFAULT_READLINK_NOCACHE; int nfs_vnop_readlink( struct vnop_readlink_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; nfsnode_t np = VTONFS(ap->a_vp); @@ -2483,45 +2633,49 @@ nfs_vnop_readlink( struct timespec ts; int timeo; - if (vnode_vtype(ap->a_vp) != VLNK) - return (EPERM); + if (vnode_vtype(ap->a_vp) != VLNK) { + return EPERM; + } - if (uio_resid(uio) == 0) - return (0); - if (uio_offset(uio) < 0) - return (EINVAL); + if (uio_resid(uio) == 0) { + return 0; + } + if (uio_offset(uio) < 0) { + return EINVAL; + } nmp = VTONMP(ap->a_vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - + /* nfs_getattr() will check changed and purge caches */ if ((error = nfs_getattr(np, NULL, ctx, nfs_readlink_nocache ? NGA_UNCACHED : NGA_CACHED))) { FSDBG(531, np, 0xd1e0001, 0, error); - return (error); + return error; } if (nfs_readlink_nocache) { timeo = nfs_attrcachetimeout(np); nanouptime(&ts); } - + retry: OSAddAtomic64(1, &nfsstats.biocache_readlinks); error = nfs_buf_get(np, 0, NFS_MAXPATHLEN, vfs_context_thread(ctx), NBLK_META, &bp); if (error) { FSDBG(531, np, 0xd1e0002, 0, error); - return (error); + return error; } if (nfs_readlink_nocache) { NFS_VNOP_DBG("timeo = %d ts.tv_sec = %ld need refresh = %d cached = %d\n", timeo, ts.tv_sec, - (np->n_rltim.tv_sec + timeo) < ts.tv_sec || nfs_readlink_nocache > 1, - ISSET(bp->nb_flags, NB_CACHE) == NB_CACHE); + (np->n_rltim.tv_sec + timeo) < ts.tv_sec || nfs_readlink_nocache > 1, + ISSET(bp->nb_flags, NB_CACHE) == NB_CACHE); /* n_rltim is synchronized by the associated nfs buf */ - if (ISSET(bp->nb_flags, NB_CACHE) && ((nfs_readlink_nocache > 1) || ((np->n_rltim.tv_sec + timeo) < ts.tv_sec))) { + if (ISSET(bp->nb_flags, NB_CACHE) && ((nfs_readlink_nocache > 1) || ((np->n_rltim.tv_sec + timeo) < ts.tv_sec))) { SET(bp->nb_flags, NB_INVAL); nfs_buf_release(bp, 0); goto retry; @@ -2536,8 +2690,9 @@ readagain: if (error == ESTALE) { NFS_VNOP_DBG("Stale FH from readlink rpc\n"); error = nfs_refresh_fh(np, ctx); - if (error == 0) + if (error == 0) { goto readagain; + } } SET(bp->nb_flags, NB_ERROR); bp->nb_error = error; @@ -2551,12 +2706,13 @@ readagain: } else { NFS_VNOP_DBG("got cached link of %.*s\n", bp->nb_validend, (char *)bp->nb_data); } - - if (!error && (bp->nb_validend > 0)) + + if (!error && (bp->nb_validend > 0)) { error = uiomove(bp->nb_data, bp->nb_validend, uio); + } FSDBG(531, np, bp->nb_validend, 0, error); nfs_buf_release(bp, 1); - return (error); + return error; } /* @@ -2572,8 +2728,9 @@ nfs3_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) struct nfsm_chain nmreq, nmrep; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -2583,12 +2740,15 @@ nfs3_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request(np, NULL, &nmreq, NFSPROC_READLINK, ctx, NULL, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; - if (nfsvers == NFS_VER3) + } + if (nfsvers == NFS_VER3) { nfsm_chain_postop_attr_update(error, &nmrep, np, &xid); - if (!error) + } + if (!error) { error = status; + } nfsm_chain_get_32(error, &nmrep, len); nfsmout_if(error); if ((nfsvers == NFS_VER2) && (len > *buflenp)) { @@ -2596,20 +2756,23 @@ nfs3_readlink_rpc(nfsnode_t np, char *buf, uint32_t *buflenp, vfs_context_t ctx) goto nfsmout; } if (len >= *buflenp) { - if (np->n_size && (np->n_size < *buflenp)) + if (np->n_size && (np->n_size < *buflenp)) { len = np->n_size; - else + } else { len = *buflenp - 1; + } } nfsm_chain_get_opaque(error, &nmrep, len, buf); - if (!error) + if (!error) { *buflenp = len; + } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -2629,8 +2792,9 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) FSDBG_TOP(536, np, uio_offset(uio), uio_resid(uio), 0); nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nmrsize = nmp->nm_rsize; @@ -2638,7 +2802,7 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) tsiz = uio_resid(uio); if ((nfsvers == NFS_VER2) && ((uint64_t)(txoffset + tsiz) > 0xffffffffULL)) { FSDBG_BOT(536, np, uio_offset(uio), uio_resid(uio), EFBIG); - return (EFBIG); + return EFBIG; } while (tsiz > 0) { @@ -2648,12 +2812,14 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) error = EIO; break; } - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { stategenid = nmp->nm_stategenid; + } error = nmp->nm_funcs->nf_read_rpc_async(np, txoffset, len, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, &req); + if (!error) { error = nmp->nm_funcs->nf_read_rpc_async_finish(np, req, uio, &retlen, &eof); + } if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && (++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */ lck_mtx_lock(&nmp->nm_lock); @@ -2665,25 +2831,30 @@ nfs_read_rpc(nfsnode_t np, uio_t uio, vfs_context_t ctx) if (np->n_flag & NREVOKE) { error = EIO; } else { - if (error == NFSERR_GRACE) - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); - if (!(error = nfs_mount_state_wait_for_recovery(nmp))) + if (error == NFSERR_GRACE) { + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); + } + if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { continue; + } } } - if (error) + if (error) { break; + } txoffset += retlen; tsiz -= retlen; if (nfsvers != NFS_VER2) { - if (eof || (retlen == 0)) + if (eof || (retlen == 0)) { tsiz = 0; - } else if (retlen < len) + } + } else if (retlen < len) { tsiz = 0; + } } FSDBG_BOT(536, np, eof, uio_resid(uio), error); - return (error); + return error; } int @@ -2701,8 +2872,9 @@ nfs3_read_rpc_async( struct nfsm_chain nmreq; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmreq); @@ -2721,7 +2893,7 @@ nfs3_read_rpc_async( error = nfs_request_async(np, NULL, &nmreq, NFSPROC_READ, thd, cred, NULL, 0, cb, reqp); nfsmout: nfsm_chain_cleanup(&nmreq); - return (error); + return error; } int @@ -2741,39 +2913,46 @@ nfs3_read_rpc_async_finish( nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { nfs_request_async_cancel(req); - return (ENXIO); + return ENXIO; } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmrep); error = nfs_request_async_finish(req, &nmrep, &xid, &status); - if (error == EINPROGRESS) /* async request restarted */ - return (error); + if (error == EINPROGRESS) { /* async request restarted */ + return error; + } - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; - if (nfsvers == NFS_VER3) + } + if (nfsvers == NFS_VER3) { nfsm_chain_postop_attr_update(error, &nmrep, np, &xid); - if (!error) + } + if (!error) { error = status; + } if (nfsvers == NFS_VER3) { nfsm_chain_adv(error, &nmrep, NFSX_UNSIGNED); nfsm_chain_get_32(error, &nmrep, eof); } else { nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); } - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_get_32(error, &nmrep, retlen); - if ((nfsvers == NFS_VER2) && (retlen > *lenp)) + if ((nfsvers == NFS_VER2) && (retlen > *lenp)) { error = EBADRPC; + } nfsmout_if(error); error = nfsm_chain_get_uio(&nmrep, MIN(retlen, *lenp), uio); if (eofp) { if (nfsvers == NFS_VER3) { - if (!eof && !retlen) + if (!eof && !retlen) { eof = 1; + } } else if (retlen < *lenp) { eof = 1; } @@ -2782,7 +2961,7 @@ nfs3_read_rpc_async_finish( *lenp = MIN(retlen, *lenp); nfsmout: nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -2791,12 +2970,12 @@ nfsmout: int nfs_vnop_write( struct vnop_write_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; uio_t uio = ap->a_uio; @@ -2810,7 +2989,7 @@ nfs_vnop_write( int n, on, error = 0; off_t boff, start, end; uio_t auio; - char auio_buf [ UIO_SIZEOF(1) ]; + char auio_buf[UIO_SIZEOF(1)]; thread_t thd; kauth_cred_t cred; @@ -2818,7 +2997,7 @@ nfs_vnop_write( if (vnode_vtype(vp) != VREG) { FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), EIO); - return (EIO); + return EIO; } thd = vfs_context_thread(ctx); @@ -2829,7 +3008,7 @@ nfs_vnop_write( if ((error = nfs_node_lock(np))) { nfs_data_unlock(np); FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), error); - return (error); + return error; } np->n_wrbusy++; @@ -2841,13 +3020,14 @@ nfs_vnop_write( np->n_flag &= ~NNEEDINVALIDATE; nfs_node_unlock(np); nfs_data_unlock(np); - nfs_vinvalbuf(vp, V_SAVE|V_IGNORE_WRITEERR, ctx, 1); + nfs_vinvalbuf(vp, V_SAVE | V_IGNORE_WRITEERR, ctx, 1); nfs_data_lock(np, NFS_DATA_LOCK_SHARED); } else { nfs_node_unlock(np); } - if (error) + if (error) { goto out; + } biosize = nmp->nm_biosize; @@ -2884,8 +3064,9 @@ nfs_vnop_write( FSDBG_BOT(515, np, uio_offset(uio), 0xbad0ff, error); goto out; } - if (uio_resid(uio) == 0) + if (uio_resid(uio) == 0) { goto out; + } if (((uio_offset(uio) + uio_resid(uio)) > (off_t)np->n_size) && !(ioflag & IO_APPEND)) { /* @@ -2904,8 +3085,9 @@ nfs_vnop_write( lbn = uio_offset(uio) / biosize; if (eofoff && (eofbn < lbn)) { - if ((error = nfs_buf_get(np, eofbn, biosize, thd, NBLK_WRITE|NBLK_ONLYVALID, &eofbp))) + if ((error = nfs_buf_get(np, eofbn, biosize, thd, NBLK_WRITE | NBLK_ONLYVALID, &eofbp))) { goto out; + } np->n_size += (biosize - eofoff); nfs_node_lock_force(np); CLR(np->n_flag, NUPDATESIZE); @@ -2923,15 +3105,16 @@ nfs_vnop_write( */ char *d; int i; - if (ioflag & IO_NOCACHE) + if (ioflag & IO_NOCACHE) { SET(eofbp->nb_flags, NB_NOCACHE); + } NFS_BUF_MAP(eofbp); FSDBG(516, eofbp, eofoff, biosize - eofoff, 0xe0fff01e); d = eofbp->nb_data; - i = eofoff/PAGE_SIZE; + i = eofoff / PAGE_SIZE; while (eofoff < biosize) { int poff = eofoff & PAGE_MASK; - if (!poff || NBPGVALID(eofbp,i)) { + if (!poff || NBPGVALID(eofbp, i)) { bzero(d + eofoff, PAGE_SIZE - poff); NBPGVALID_SET(eofbp, i); } @@ -2948,8 +3131,9 @@ nfs_vnop_write( lbn = uio_offset(uio) / biosize; on = uio_offset(uio) % biosize; n = biosize - on; - if (uio_resid(uio) < n) + if (uio_resid(uio) < n) { n = uio_resid(uio); + } again: /* * Get a cache block for writing. The range to be written is @@ -2958,13 +3142,15 @@ again: * contiguous with the existing dirty region. */ error = nfs_buf_get(np, lbn, biosize, thd, NBLK_WRITE, &bp); - if (error) + if (error) { goto out; + } /* map the block because we know we're going to write to it */ NFS_BUF_MAP(bp); - if (ioflag & IO_NOCACHE) + if (ioflag & IO_NOCACHE) { SET(bp->nb_flags, NB_NOCACHE); + } if (!IS_VALID_CRED(bp->nb_wcred)) { kauth_cred_ref(cred); @@ -2985,52 +3171,56 @@ again: * that's just masquerading as new written data.) */ if (bp->nb_dirtyend > 0) { - if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || bp->nb_dirty) { - FSDBG(515, np, uio_offset(uio), bp, 0xd15c001); - /* write/commit buffer "synchronously" */ - /* (NB_STABLE indicates that data writes should be FILESYNC) */ - CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); - SET(bp->nb_flags, (NB_ASYNC | NB_STABLE)); - error = nfs_buf_write(bp); - if (error) - goto out; - goto again; - } + if (on > bp->nb_dirtyend || (on + n) < bp->nb_dirtyoff || bp->nb_dirty) { + FSDBG(515, np, uio_offset(uio), bp, 0xd15c001); + /* write/commit buffer "synchronously" */ + /* (NB_STABLE indicates that data writes should be FILESYNC) */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); + SET(bp->nb_flags, (NB_ASYNC | NB_STABLE)); + error = nfs_buf_write(bp); + if (error) { + goto out; + } + goto again; + } } else if (bp->nb_dirty) { - int firstpg, lastpg; - u_int32_t pagemask; - /* calculate write range pagemask */ - firstpg = on/PAGE_SIZE; - lastpg = (on+n-1)/PAGE_SIZE; - pagemask = ((1 << (lastpg+1)) - 1) & ~((1 << firstpg) - 1); - /* check if there are dirty pages outside the write range */ - if (bp->nb_dirty & ~pagemask) { - FSDBG(515, np, uio_offset(uio), bp, 0xd15c002); - /* write/commit buffer "synchronously" */ - /* (NB_STABLE indicates that data writes should be FILESYNC) */ - CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); - SET(bp->nb_flags, (NB_ASYNC | NB_STABLE)); - error = nfs_buf_write(bp); - if (error) - goto out; - goto again; - } - /* if the first or last pages are already dirty */ - /* make sure that the dirty range encompasses those pages */ - if (NBPGDIRTY(bp,firstpg) || NBPGDIRTY(bp,lastpg)) { - FSDBG(515, np, uio_offset(uio), bp, 0xd15c003); - bp->nb_dirtyoff = min(on, firstpg * PAGE_SIZE); - if (NBPGDIRTY(bp,lastpg)) { - bp->nb_dirtyend = (lastpg+1) * PAGE_SIZE; - /* clip to EOF */ - if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) { - bp->nb_dirtyend = np->n_size - NBOFF(bp); - if (bp->nb_dirtyoff >= bp->nb_dirtyend) - bp->nb_dirtyoff = bp->nb_dirtyend = 0; - } - } else - bp->nb_dirtyend = on+n; - } + int firstpg, lastpg; + u_int32_t pagemask; + /* calculate write range pagemask */ + firstpg = on / PAGE_SIZE; + lastpg = (on + n - 1) / PAGE_SIZE; + pagemask = ((1 << (lastpg + 1)) - 1) & ~((1 << firstpg) - 1); + /* check if there are dirty pages outside the write range */ + if (bp->nb_dirty & ~pagemask) { + FSDBG(515, np, uio_offset(uio), bp, 0xd15c002); + /* write/commit buffer "synchronously" */ + /* (NB_STABLE indicates that data writes should be FILESYNC) */ + CLR(bp->nb_flags, (NB_DONE | NB_ERROR | NB_INVAL)); + SET(bp->nb_flags, (NB_ASYNC | NB_STABLE)); + error = nfs_buf_write(bp); + if (error) { + goto out; + } + goto again; + } + /* if the first or last pages are already dirty */ + /* make sure that the dirty range encompasses those pages */ + if (NBPGDIRTY(bp, firstpg) || NBPGDIRTY(bp, lastpg)) { + FSDBG(515, np, uio_offset(uio), bp, 0xd15c003); + bp->nb_dirtyoff = min(on, firstpg * PAGE_SIZE); + if (NBPGDIRTY(bp, lastpg)) { + bp->nb_dirtyend = (lastpg + 1) * PAGE_SIZE; + /* clip to EOF */ + if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) { + bp->nb_dirtyend = np->n_size - NBOFF(bp); + if (bp->nb_dirtyoff >= bp->nb_dirtyend) { + bp->nb_dirtyoff = bp->nb_dirtyend = 0; + } + } + } else { + bp->nb_dirtyend = on + n; + } + } } /* @@ -3049,28 +3239,29 @@ again: /* and the block is flagged as being cached... */ if ((lbn == eofbn) && ISSET(bp->nb_flags, NB_CACHE)) { /* ...check that all pages in buffer are valid */ - int endpg = ((neweofoff ? neweofoff : biosize) - 1)/PAGE_SIZE; + int endpg = ((neweofoff ? neweofoff : biosize) - 1) / PAGE_SIZE; u_int32_t pagemask; /* pagemask only has to extend to last page being written to */ - pagemask = (1 << (endpg+1)) - 1; + pagemask = (1 << (endpg + 1)) - 1; FSDBG(515, 0xb1ffa001, bp->nb_valid, pagemask, 0); if ((bp->nb_valid & pagemask) != pagemask) { /* zerofill any hole */ if (on > bp->nb_validend) { int i; - for (i=bp->nb_validend/PAGE_SIZE; i <= (on - 1)/PAGE_SIZE; i++) + for (i = bp->nb_validend / PAGE_SIZE; i <= (on - 1) / PAGE_SIZE; i++) { NBPGVALID_SET(bp, i); + } NFS_BUF_MAP(bp); FSDBG(516, bp, bp->nb_validend, on - bp->nb_validend, 0xf01e); bzero((char *)bp->nb_data + bp->nb_validend, - on - bp->nb_validend); + on - bp->nb_validend); } /* zerofill any trailing data in the last page */ if (neweofoff) { NFS_BUF_MAP(bp); FSDBG(516, bp, neweofoff, PAGE_SIZE - (neweofoff & PAGE_MASK), 0xe0f); bzero((char *)bp->nb_data + neweofoff, - PAGE_SIZE - (neweofoff & PAGE_MASK)); + PAGE_SIZE - (neweofoff & PAGE_MASK)); } } } @@ -3088,8 +3279,9 @@ again: */ if (NBOFF(bp) + bp->nb_dirtyend > (off_t)np->n_size) { bp->nb_dirtyend = np->n_size - NBOFF(bp); - if (bp->nb_dirtyoff >= bp->nb_dirtyend) + if (bp->nb_dirtyoff >= bp->nb_dirtyend) { bp->nb_dirtyoff = bp->nb_dirtyend = 0; + } } /* * UBC doesn't handle partial pages, so we need to make sure @@ -3118,19 +3310,20 @@ again: int firstpg, lastpg, dirtypg; int firstpgoff, lastpgoff; start = end = -1; - firstpg = on/PAGE_SIZE; + firstpg = on / PAGE_SIZE; firstpgoff = on & PAGE_MASK; - lastpg = (on+n-1)/PAGE_SIZE; - lastpgoff = (on+n) & PAGE_MASK; - if (firstpgoff && !NBPGVALID(bp,firstpg)) { + lastpg = (on + n - 1) / PAGE_SIZE; + lastpgoff = (on + n) & PAGE_MASK; + if (firstpgoff && !NBPGVALID(bp, firstpg)) { /* need to read start of first page */ start = firstpg * PAGE_SIZE; end = start + firstpgoff; } - if (lastpgoff && !NBPGVALID(bp,lastpg)) { + if (lastpgoff && !NBPGVALID(bp, lastpg)) { /* need to read end of last page */ - if (start < 0) + if (start < 0) { start = (lastpg * PAGE_SIZE) + lastpgoff; + } end = (lastpg + 1) * PAGE_SIZE; } if (ISSET(bp->nb_flags, NB_NOCACHE)) { @@ -3145,8 +3338,9 @@ again: * and the COMMIT. * (NB_STABLE indicates that data writes should be FILESYNC) */ - if (end > start) + if (end > start) { SET(bp->nb_flags, NB_STABLE); + } goto skipread; } if (end > start) { @@ -3155,26 +3349,30 @@ again: /* first, check for dirty pages in between */ /* if there are, we'll have to do two reads because */ /* we don't want to overwrite the dirty pages. */ - for (dirtypg=start/PAGE_SIZE; dirtypg <= (end-1)/PAGE_SIZE; dirtypg++) - if (NBPGDIRTY(bp,dirtypg)) + for (dirtypg = start / PAGE_SIZE; dirtypg <= (end - 1) / PAGE_SIZE; dirtypg++) { + if (NBPGDIRTY(bp, dirtypg)) { break; + } + } /* if start is at beginning of page, try */ /* to get any preceeding pages as well. */ if (!(start & PAGE_MASK)) { /* stop at next dirty/valid page or start of block */ - for (; start > 0; start-=PAGE_SIZE) - if (NBPGVALID(bp,((start-1)/PAGE_SIZE))) + for (; start > 0; start -= PAGE_SIZE) { + if (NBPGVALID(bp, ((start - 1) / PAGE_SIZE))) { break; + } + } } NFS_BUF_MAP(bp); /* setup uio for read(s) */ boff = NBOFF(bp); auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &auio_buf, sizeof(auio_buf)); + &auio_buf, sizeof(auio_buf)); - if (dirtypg <= (end-1)/PAGE_SIZE) { + if (dirtypg <= (end - 1) / PAGE_SIZE) { /* there's a dirty page in the way, so just do two reads */ /* we'll read the preceding data here */ uio_reset(auio, boff + start, UIO_SYSSPACE, UIO_READ); @@ -3182,7 +3380,7 @@ again: error = nfs_read_rpc(np, auio, ctx); if (error) { /* couldn't read the data, so treat buffer as synchronous NOCACHE */ - SET(bp->nb_flags, (NB_NOCACHE|NB_STABLE)); + SET(bp->nb_flags, (NB_NOCACHE | NB_STABLE)); goto skipread; } if (uio_resid(auio) > 0) { @@ -3191,30 +3389,36 @@ again: } if (!error) { /* update validoff/validend if necessary */ - if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) + if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) { bp->nb_validoff = start; - if ((bp->nb_validend < 0) || (bp->nb_validend < on)) + } + if ((bp->nb_validend < 0) || (bp->nb_validend < on)) { bp->nb_validend = on; - if ((off_t)np->n_size > boff + bp->nb_validend) + } + if ((off_t)np->n_size > boff + bp->nb_validend) { bp->nb_validend = min(np->n_size - (boff + start), biosize); + } /* validate any pages before the write offset */ - for (; start < on/PAGE_SIZE; start+=PAGE_SIZE) - NBPGVALID_SET(bp, start/PAGE_SIZE); + for (; start < on / PAGE_SIZE; start += PAGE_SIZE) { + NBPGVALID_SET(bp, start / PAGE_SIZE); + } } /* adjust start to read any trailing data */ - start = on+n; + start = on + n; } /* if end is at end of page, try to */ /* get any following pages as well. */ if (!(end & PAGE_MASK)) { /* stop at next valid page or end of block */ - for (; end < biosize; end+=PAGE_SIZE) - if (NBPGVALID(bp,end/PAGE_SIZE)) + for (; end < biosize; end += PAGE_SIZE) { + if (NBPGVALID(bp, end / PAGE_SIZE)) { break; + } + } } - if (((boff+start) >= (off_t)np->n_size) || + if (((boff + start) >= (off_t)np->n_size) || ((start >= on) && ((boff + on + n) >= (off_t)np->n_size))) { /* * Either this entire read is beyond the current EOF @@ -3233,7 +3437,7 @@ again: error = nfs_read_rpc(np, auio, ctx); if (error) { /* couldn't read the data, so treat buffer as synchronous NOCACHE */ - SET(bp->nb_flags, (NB_NOCACHE|NB_STABLE)); + SET(bp->nb_flags, (NB_NOCACHE | NB_STABLE)); goto skipread; } if (uio_resid(auio) > 0) { @@ -3243,18 +3447,23 @@ again: } if (!error) { /* update validoff/validend if necessary */ - if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) + if ((bp->nb_validoff < 0) || (bp->nb_validoff > start)) { bp->nb_validoff = start; - if ((bp->nb_validend < 0) || (bp->nb_validend < end)) + } + if ((bp->nb_validend < 0) || (bp->nb_validend < end)) { bp->nb_validend = end; - if ((off_t)np->n_size > boff + bp->nb_validend) + } + if ((off_t)np->n_size > boff + bp->nb_validend) { bp->nb_validend = min(np->n_size - (boff + start), biosize); + } /* validate any pages before the write offset's page */ - for (; start < (off_t)trunc_page_32(on); start+=PAGE_SIZE) - NBPGVALID_SET(bp, start/PAGE_SIZE); + for (; start < (off_t)trunc_page_32(on); start += PAGE_SIZE) { + NBPGVALID_SET(bp, start / PAGE_SIZE); + } /* validate any pages after the range of pages being written to */ - for (; (end - 1) > (off_t)round_page_32(on+n-1); end-=PAGE_SIZE) - NBPGVALID_SET(bp, (end-1)/PAGE_SIZE); + for (; (end - 1) > (off_t)round_page_32(on + n - 1); end -= PAGE_SIZE) { + NBPGVALID_SET(bp, (end - 1) / PAGE_SIZE); + } } /* Note: pages being written to will be validated when written */ } @@ -3281,8 +3490,8 @@ skipread: /* validate any pages written to */ start = on & ~PAGE_MASK; - for (; start < on+n; start += PAGE_SIZE) { - NBPGVALID_SET(bp, start/PAGE_SIZE); + for (; start < on + n; start += PAGE_SIZE) { + NBPGVALID_SET(bp, start / PAGE_SIZE); /* * This may seem a little weird, but we don't actually set the * dirty bits for writes. This is because we keep the dirty range @@ -3308,8 +3517,9 @@ skipread: bp->nb_validoff = min(bp->nb_validoff, bp->nb_dirtyoff); bp->nb_validend = max(bp->nb_validend, bp->nb_dirtyend); } - if (!ISSET(bp->nb_flags, NB_CACHE)) + if (!ISSET(bp->nb_flags, NB_CACHE)) { nfs_buf_normalize_valid_range(np, bp); + } /* * Since this block is being modified, it must be written @@ -3327,26 +3537,29 @@ skipread: if (ioflag & IO_SYNC) { error = nfs_buf_write(bp); - if (error) + if (error) { goto out; + } } else if (((n + on) == biosize) || (ioflag & IO_APPEND) || - (ioflag & IO_NOCACHE) || ISSET(bp->nb_flags, NB_NOCACHE)) { + (ioflag & IO_NOCACHE) || ISSET(bp->nb_flags, NB_NOCACHE)) { SET(bp->nb_flags, NB_ASYNC); error = nfs_buf_write(bp); - if (error) + if (error) { goto out; + } } else { /* If the block wasn't already delayed: charge for the write */ if (!ISSET(bp->nb_flags, NB_DELWRI)) { proc_t p = vfs_context_proc(ctx); - if (p && p->p_stats) + if (p && p->p_stats) { OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); + } } nfs_buf_write_delayed(bp); } - if (np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) - nfs_flushcommits(np, 1); - + if (np->n_needcommitcnt >= NFS_A_LOT_OF_NEEDCOMMITS) { + nfs_flushcommits(np, 1); + } } while (uio_resid(uio) > 0 && n > 0); out: @@ -3355,7 +3568,7 @@ out: nfs_node_unlock(np); nfs_data_unlock(np); FSDBG_BOT(515, np, uio_offset(uio), uio_resid(uio), error); - return (error); + return error; } @@ -3393,13 +3606,15 @@ nfs_write_rpc2( #if DIAGNOSTIC /* XXX limitation based on need to back up uio on short write */ - if (uio_iovcnt(uio) != 1) + if (uio_iovcnt(uio) != 1) { panic("nfs3_write_rpc: iovcnt > 1"); + } #endif FSDBG_TOP(537, np, uio_offset(uio), uio_resid(uio), *iomodep); nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nmwsize = nmp->nm_wsize; @@ -3409,12 +3624,12 @@ nfs_write_rpc2( totalsize = tsiz = uio_resid(uio); if ((nfsvers == NFS_VER2) && ((uint64_t)(uio_offset(uio) + tsiz) > 0xffffffffULL)) { FSDBG_BOT(537, np, uio_offset(uio), uio_resid(uio), EFBIG); - return (EFBIG); + return EFBIG; } uio_save = uio_duplicate(uio); if (uio_save == NULL) { - return (EIO); + return EIO; } while (tsiz > 0) { @@ -3424,14 +3639,17 @@ nfs_write_rpc2( error = EIO; break; } - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { stategenid = nmp->nm_stategenid; + } error = nmp->nm_funcs->nf_write_rpc_async(np, uio, len, thd, cred, *iomodep, NULL, &req); - if (!error) + if (!error) { error = nmp->nm_funcs->nf_write_rpc_async_finish(np, req, &commit, &rlen, &wverf2); + } nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { error = ENXIO; + } if ((nmp->nm_vers >= NFS_VER4) && nfs_mount_state_error_should_restart(error) && (++restart <= nfs_mount_state_max_restarts(nmp))) { /* guard against no progress */ lck_mtx_lock(&nmp->nm_lock); @@ -3443,14 +3661,17 @@ nfs_write_rpc2( if (np->n_flag & NREVOKE) { error = EIO; } else { - if (error == NFSERR_GRACE) - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); - if (!(error = nfs_mount_state_wait_for_recovery(nmp))) + if (error == NFSERR_GRACE) { + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); + } + if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { continue; + } } } - if (error) + if (error) { break; + } if (nfsvers == NFS_VER2) { tsiz -= len; continue; @@ -3465,8 +3686,9 @@ nfs_write_rpc2( } /* return lowest commit level returned */ - if (commit < committed) + if (commit < committed) { committed = commit; + } tsiz -= len; @@ -3481,21 +3703,24 @@ nfs_write_rpc2( error = EIO; break; } - *uio = *uio_save; // Reset the uio back to the start + *uio = *uio_save; // Reset the uio back to the start committed = NFS_WRITE_FILESYNC; wverfset = 0; tsiz = totalsize; } } - if (uio_save) + if (uio_save) { uio_free(uio_save); - if (wverfset && wverfp) + } + if (wverfset && wverfp) { *wverfp = wverf; + } *iomodep = committed; - if (error) + if (error) { uio_setresid(uio, tsiz); + } FSDBG_BOT(537, np, committed, uio_resid(uio), error); - return (error); + return error; } int @@ -3515,18 +3740,20 @@ nfs3_write_rpc_async( struct nfsm_chain nmreq; nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; /* for async mounts, don't bother sending sync write requests */ if ((iomode != NFS_WRITE_UNSTABLE) && nfs_allow_async && - ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) + ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) { iomode = NFS_WRITE_UNSTABLE; + } nfsm_chain_null(&nmreq); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); + NFSX_FH(nfsvers) + 5 * NFSX_UNSIGNED + nfsm_rndup(len)); nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize); if (nfsvers == NFS_VER3) { nfsm_chain_add_64(error, &nmreq, uio_offset(uio)); @@ -3545,7 +3772,7 @@ nfs3_write_rpc_async( error = nfs_request_async(np, NULL, &nmreq, NFSPROC_WRITE, thd, cred, NULL, 0, cb, reqp); nfsmout: nfsm_chain_cleanup(&nmreq); - return (error); + return error; } int @@ -3566,37 +3793,44 @@ nfs3_write_rpc_async_finish( nmp = NFSTONMP(np); if (nfs_mount_gone(nmp)) { nfs_request_async_cancel(req); - return (ENXIO); + return ENXIO; } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmrep); error = nfs_request_async_finish(req, &nmrep, &xid, &status); - if (error == EINPROGRESS) /* async request restarted */ - return (error); + if (error == EINPROGRESS) { /* async request restarted */ + return error; + } nmp = NFSTONMP(np); - if (nfs_mount_gone(nmp)) + if (nfs_mount_gone(nmp)) { error = ENXIO; - if (!error && (lockerror = nfs_node_lock(np))) + } + if (!error && (lockerror = nfs_node_lock(np))) { error = lockerror; + } if (nfsvers == NFS_VER3) { struct timespec premtime = { 0, 0 }; nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid); - if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) + if (nfstimespeccmp(&np->n_mtime, &premtime, ==)) { updatemtime = 1; - if (!error) + } + if (!error) { error = status; + } nfsm_chain_get_32(error, &nmrep, rlen); nfsmout_if(error); *rlenp = rlen; - if (rlen <= 0) + if (rlen <= 0) { error = NFSERR_IO; + } nfsm_chain_get_32(error, &nmrep, committed); nfsm_chain_get_64(error, &nmrep, wverf); nfsmout_if(error); - if (wverfp) + if (wverfp) { *wverfp = wverf; + } lck_mtx_lock(&nmp->nm_lock); if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) { nmp->nm_verf = wverf; @@ -3606,22 +3840,26 @@ nfs3_write_rpc_async_finish( } lck_mtx_unlock(&nmp->nm_lock); } else { - if (!error) + if (!error) { error = status; + } nfsm_chain_loadattr(error, &nmrep, np, nfsvers, &xid); nfsmout_if(error); } - if (updatemtime) + if (updatemtime) { NFS_CHANGED_UPDATE(nfsvers, np, &np->n_vattr); + } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); + } nfsm_chain_cleanup(&nmrep); if ((committed != NFS_WRITE_FILESYNC) && nfs_allow_async && - ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) + ((mp = NFSTOMP(np))) && (vfs_flags(mp) & MNT_ASYNC)) { committed = NFS_WRITE_FILESYNC; + } *iomodep = committed; - return (error); + return error; } /* @@ -3633,13 +3871,13 @@ nfsmout: int nfs3_vnop_mknod( struct vnop_mknod_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { vnode_t dvp = ap->a_dvp; vnode_t *vpp = ap->a_vpp; @@ -3661,23 +3899,27 @@ nfs3_vnop_mknod( struct nfsreq rq, *req = &rq; nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if (!VATTR_IS_ACTIVE(vap, va_type)) - return (EINVAL); + if (!VATTR_IS_ACTIVE(vap, va_type)) { + return EINVAL; + } if (vap->va_type == VCHR || vap->va_type == VBLK) { - if (!VATTR_IS_ACTIVE(vap, va_rdev)) - return (EINVAL); + if (!VATTR_IS_ACTIVE(vap, va_rdev)) { + return EINVAL; + } rdev = vap->va_rdev; - } else if (vap->va_type == VFIFO || vap->va_type == VSOCK) + } else if (vap->va_type == VFIFO || vap->va_type == VSOCK) { rdev = 0xffffffff; - else { - return (ENOTSUP); + } else { + return ENOTSUP; + } + if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) { + return ENAMETOOLONG; } - if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) - return (ENAMETOOLONG); nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); @@ -3694,8 +3936,8 @@ nfs3_vnop_mknod( nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + 4 * NFSX_UNSIGNED + - nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); + NFSX_FH(nfsvers) + 4 * NFSX_UNSIGNED + + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp); if (nfsvers == NFS_VER3) { @@ -3709,17 +3951,20 @@ nfs3_vnop_mknod( nfsm_chain_add_v2sattr(error, &nmreq, vap, rdev); } nfsm_chain_build_done(error, &nmreq); - if (!error) + if (!error) { error = busyerror = nfs_node_set_busy(dnp, vfs_context_thread(ctx)); + } nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_MKNOD, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); - if (!error) + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); + if (!error) { error = nfs_request_async_finish(req, &nmrep, &xid, &status); + } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } /* XXX no EEXIST kludge here? */ dxid = xid; if (!error && !status) { @@ -3729,10 +3974,12 @@ nfs3_vnop_mknod( } error = nfsm_chain_get_fh_attr(&nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); } - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); - if (!error) + } + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -3740,26 +3987,31 @@ nfsmout: if (!lockerror) { dnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr); + } nfs_node_unlock(dnp); /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) + if (!error && fh.fh_len) { error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); - if (!error && !np) + } + if (!error && !np) { error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); - if (!error && np) + } + if (!error && np) { newvp = NFSTOV(np); - if (!busyerror) + } + if (!busyerror) { nfs_node_clear_busy(dnp); + } if (!error && (gotuid || gotgid) && (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (gotuid && (nvattr.nva_uid != vap->va_uid)) || + (gotgid && (nvattr.nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -3773,7 +4025,7 @@ nfsmout: *vpp = newvp; nfs_node_unlock(np); } - return (error); + return error; } static uint32_t create_verf; @@ -3783,13 +4035,13 @@ static uint32_t create_verf; int nfs3_vnop_create( struct vnop_create_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t dvp = ap->a_dvp; @@ -3811,12 +4063,14 @@ nfs3_vnop_create( struct nfs_dulookup dul; nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) - return (ENAMETOOLONG); + if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) { + return ENAMETOOLONG; + } nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); @@ -3831,8 +4085,9 @@ nfs3_vnop_create( if (vap->va_vaflags & VA_EXCLUSIVE) { fmode |= O_EXCL; - if (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)) + if (!VATTR_IS_ACTIVE(vap, va_access_time) || !VATTR_IS_ACTIVE(vap, va_modify_time)) { vap->va_vaflags |= VA_UTIMES_NULL; + } } again: @@ -3843,18 +4098,19 @@ again: nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED + - nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); + NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED + + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp); if (nfsvers == NFS_VER3) { if (fmode & O_EXCL) { nfsm_chain_add_32(error, &nmreq, NFS_CREATE_EXCLUSIVE); lck_rw_lock_shared(in_ifaddr_rwlock); - if (!TAILQ_EMPTY(&in_ifaddrhead)) + if (!TAILQ_EMPTY(&in_ifaddrhead)) { val = IA_SIN(in_ifaddrhead.tqh_first)->sin_addr.s_addr; - else + } else { val = create_verf; + } lck_rw_done(in_ifaddr_rwlock); nfsm_chain_add_32(error, &nmreq, val); ++create_verf; @@ -3870,14 +4126,15 @@ again: nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_CREATE, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { nfs_dulookup_start(&dul, dnp, ctx); error = nfs_request_async_finish(req, &nmrep, &xid, &status); } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } dxid = xid; if (!error && !status) { if (dnp->n_flag & NNEGNCENTRIES) { @@ -3886,10 +4143,12 @@ again: } error = nfsm_chain_get_fh_attr(&nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); } - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); - if (!error) + } + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -3897,23 +4156,28 @@ nfsmout: if (!lockerror) { dnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr); + } nfs_node_unlock(dnp); /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) + if (!error && fh.fh_len) { error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); - if (!error && !np) + } + if (!error && !np) { error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); - if (!error && np) + } + if (!error && np) { newvp = NFSTOV(np); + } nfs_dulookup_finish(&dul, dnp, ctx); - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy(dnp); + } if (error) { if ((nfsvers == NFS_VER3) && (fmode & O_EXCL) && (error == NFSERR_NOTSUPP)) { @@ -3934,24 +4198,27 @@ nfsmout: VATTR_CLEAR_ACTIVE(vap, va_gid); error = nfs3_setattr_rpc(np, vap, ctx); } - if (error) + if (error) { vnode_put(newvp); - else + } else { nfs_node_lock_force(np); + } } - if (!error) + if (!error) { *ap->a_vpp = newvp; + } if (!error && (gotuid || gotgid) && (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (gotuid && (nvattr.nva_uid != vap->va_uid)) || + (gotgid && (nvattr.nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); } - if (!error) + if (!error) { nfs_node_unlock(np); - return (error); + } + return error; } /* @@ -3968,13 +4235,13 @@ nfsmout: int nfs_vnop_remove( struct vnop_remove_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t a_vp; - struct componentname *a_cnp; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t a_vp; + * struct componentname *a_cnp; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -3990,15 +4257,17 @@ nfs_vnop_remove( /* XXX prevent removing a sillyrenamed file? */ nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; namedattrs = (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR); again_relock: error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)); - if (error) - return (error); + if (error) { + return error; + } /* lock the node while we remove the file */ lck_mtx_lock(nfs_node_hash_mutex); @@ -4009,8 +4278,9 @@ again_relock: np->n_hflag |= NHLOCKED; lck_mtx_unlock(nfs_node_hash_mutex); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); + } again: inuse = vnode_isinuse(vp, 0); if ((ap->a_flags & VNODE_REMOVE_NODELETEBUSY) && inuse) { @@ -4019,13 +4289,13 @@ again: goto out; } if (inuse && !gotattr) { - if (nfs_getattr(np, &nvattr, ctx, NGA_CACHED)) + if (nfs_getattr(np, &nvattr, ctx, NGA_CACHED)) { nvattr.nva_nlink = 1; + } gotattr = 1; goto again; } if (!inuse || (np->n_sillyrename && (nvattr.nva_nlink > 1))) { - if (!inuse && !flushed) { /* flush all the buffers first */ /* unlock the node */ lck_mtx_lock(nfs_node_hash_mutex); @@ -4043,15 +4313,17 @@ again: nfs_node_lock_force(np); NATTRINVALIDATE(np); nfs_node_unlock(np); - return (error); + return error; } - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_finish(&dul, dnp, ctx); + } goto again_relock; } - if ((nmp->nm_vers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK)) + if ((nmp->nm_vers >= NFS_VER4) && (np->n_openflags & N_DELEG_MASK)) { nfs4_delegation_return(np, 0, vfs_context_thread(ctx), vfs_context_ucred(ctx)); + } /* * Purge the name cache so that the chance of a lookup for @@ -4060,12 +4332,13 @@ again: */ nfs_name_cache_purge(dnp, np, cnp, ctx); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_start(&dul, dnp, ctx); + } /* Do the rpc */ error = nmp->nm_funcs->nf_remove_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen, - vfs_context_thread(ctx), vfs_context_ucred(ctx)); + vfs_context_thread(ctx), vfs_context_ucred(ctx)); /* * Kludge City: If the first reply to the remove rpc is lost.. @@ -4073,8 +4346,9 @@ again: * since the file was in fact removed * Therefore, we cheat and return success. */ - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } if (!error && !inuse && !np->n_sillyrename) { /* @@ -4104,8 +4378,9 @@ again: nfs_node_unlock(np); } } else if (!np->n_sillyrename) { - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_start(&dul, dnp, ctx); + } error = nfs_sillyrename(dnp, np, cnp, ctx); nfs_node_lock_force(np); NATTRINVALIDATE(np); @@ -4114,14 +4389,16 @@ again: nfs_node_lock_force(np); NATTRINVALIDATE(np); nfs_node_unlock(np); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_start(&dul, dnp, ctx); + } } /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, NGA_CACHED); - if (!namedattrs) + if (!namedattrs) { nfs_dulookup_finish(&dul, dnp, ctx); + } out: /* unlock the node */ lck_mtx_lock(nfs_node_hash_mutex); @@ -4132,9 +4409,10 @@ out: } lck_mtx_unlock(nfs_node_hash_mutex); nfs_node_clear_busy2(dnp, np); - if (setsize) + if (setsize) { ubc_setsize(vp, 0); - return (error); + } + return error; } /* @@ -4144,8 +4422,9 @@ int nfs_removeit(struct nfs_sillyrename *nsp) { struct nfsmount *nmp = NFSTONMP(nsp->nsr_dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } return nmp->nm_funcs->nf_remove_rpc(nsp->nsr_dnp, nsp->nsr_name, nsp->nsr_namlen, NULL, nsp->nsr_cred); } @@ -4168,17 +4447,19 @@ nfs3_remove_rpc( struct nfsm_chain nmreq, nmrep; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((nfsvers == NFS_VER2) && (namelen > NFS_MAXNAMLEN)) - return (ENAMETOOLONG); + if ((nfsvers == NFS_VER2) && (namelen > NFS_MAXNAMLEN)) { + return ENAMETOOLONG; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen)); + NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, name, namelen, nmp); nfsm_chain_build_done(error, &nmreq); @@ -4186,25 +4467,31 @@ nfs3_remove_rpc( error = nfs_request2(dnp, NULL, &nmreq, NFSPROC_REMOVE, thd, cred, NULL, 0, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; - if (nfsvers == NFS_VER3) + } + if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &xid); + } nfsmout_if(error); dnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr); - if (!wccpostattr) + } + if (!wccpostattr) { NATTRINVALIDATE(dnp); - if (!error) + } + if (!error) { error = status; + } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(dnp); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -4213,15 +4500,15 @@ nfsmout: int nfs_vnop_rename( struct vnop_rename_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_fdvp; - vnode_t a_fvp; - struct componentname *a_fcnp; - vnode_t a_tdvp; - vnode_t a_tvp; - struct componentname *a_tcnp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_fdvp; + * vnode_t a_fvp; + * struct componentname *a_fcnp; + * vnode_t a_tdvp; + * vnode_t a_tvp; + * struct componentname *a_tcnp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t fdvp = ap->a_fdvp; @@ -4231,7 +4518,7 @@ nfs_vnop_rename( nfsnode_t fdnp, fnp, tdnp, tnp; struct componentname *tcnp = ap->a_tcnp; struct componentname *fcnp = ap->a_fcnp; - int error, nfsvers, inuse=0, tvprecycle=0, locked=0; + int error, nfsvers, inuse = 0, tvprecycle = 0, locked = 0; mount_t fmp, tdmp, tmp; struct nfs_vattr nvattr; struct nfsmount *nmp; @@ -4242,13 +4529,15 @@ nfs_vnop_rename( tnp = tvp ? VTONFS(tvp) : NULL; nmp = NFSTONMP(fdnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; error = nfs_node_set_busy4(fdnp, fnp, tdnp, tnp, vfs_context_thread(ctx)); - if (error) - return (error); + if (error) { + return error; + } if (tvp && (tvp != fvp)) { /* lock the node while we rename over the existing file */ @@ -4280,8 +4569,9 @@ nfs_vnop_rename( * Don't sillyrename if source and target are same vnode (hard * links or case-variants) */ - if (tvp && (tvp != fvp)) + if (tvp && (tvp != fvp)) { inuse = vnode_isinuse(tvp, 0); + } if (inuse && !tnp->n_sillyrename && (vnode_vtype(tvp) != VDIR)) { error = nfs_sillyrename(tdnp, tnp, tcnp, ctx); if (error) { @@ -4296,13 +4586,14 @@ nfs_vnop_rename( } error = nmp->nm_funcs->nf_rename_rpc(fdnp, fcnp->cn_nameptr, fcnp->cn_namelen, - tdnp, tcnp->cn_nameptr, tcnp->cn_namelen, ctx); + tdnp, tcnp->cn_nameptr, tcnp->cn_namelen, ctx); /* * Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. */ - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } if (tvp && (tvp != fvp) && !tnp->n_sillyrename) { nfs_node_lock_force(tnp); @@ -4345,7 +4636,7 @@ nfs_vnop_rename( nfs_node_unlock(tdnp); nfs_node_lock_force(fnp); cache_enter(tdvp, fvp, tcnp); - if (tdvp != fdvp) { /* update parent pointer */ + if (tdvp != fdvp) { /* update parent pointer */ if (fnp->n_parent && !vnode_get(fnp->n_parent)) { /* remove ref from old parent */ vnode_rele(fnp->n_parent); @@ -4377,7 +4668,7 @@ out: lck_mtx_unlock(nfs_node_hash_mutex); } nfs_node_clear_busy4(fdnp, fnp, tdnp, tnp); - return (error); + return error; } /* @@ -4401,19 +4692,21 @@ nfs3_rename_rpc( struct nfsm_chain nmreq, nmrep; nmp = NFSTONMP(fdnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; if ((nfsvers == NFS_VER2) && - ((fnamelen > NFS_MAXNAMLEN) || (tnamelen > NFS_MAXNAMLEN))) - return (ENAMETOOLONG); + ((fnamelen > NFS_MAXNAMLEN) || (tnamelen > NFS_MAXNAMLEN))) { + return ENAMETOOLONG; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - (NFSX_FH(nfsvers) + NFSX_UNSIGNED) * 2 + - nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); + (NFSX_FH(nfsvers) + NFSX_UNSIGNED) * 2 + + nfsm_rndup(fnamelen) + nfsm_rndup(tnamelen)); nfsm_chain_add_fh(error, &nmreq, nfsvers, fdnp->n_fhp, fdnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, fnameptr, fnamelen, nmp); nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize); @@ -4423,34 +4716,40 @@ nfs3_rename_rpc( error = nfs_request(fdnp, NULL, &nmreq, NFSPROC_RENAME, ctx, NULL, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock2(fdnp, tdnp))) + if ((lockerror = nfs_node_lock2(fdnp, tdnp))) { error = lockerror; + } if (nfsvers == NFS_VER3) { txid = xid; nfsm_chain_get_wcc_data(error, &nmrep, fdnp, &fpremtime, &fwccpostattr, &xid); nfsm_chain_get_wcc_data(error, &nmrep, tdnp, &tpremtime, &twccpostattr, &txid); } - if (!error) + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); if (!lockerror) { fdnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&fdnp->n_ncmtime, &fpremtime, ==)) + if (nfstimespeccmp(&fdnp->n_ncmtime, &fpremtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, fdnp, &fdnp->n_vattr); - if (!fwccpostattr) + } + if (!fwccpostattr) { NATTRINVALIDATE(fdnp); + } tdnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&tdnp->n_ncmtime, &tpremtime, ==)) + if (nfstimespeccmp(&tdnp->n_ncmtime, &tpremtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, tdnp, &tdnp->n_vattr); - if (!twccpostattr) + } + if (!twccpostattr) { NATTRINVALIDATE(tdnp); + } nfs_node_unlock2(fdnp, tdnp); } - return (error); + return error; } /* @@ -4459,12 +4758,12 @@ nfsmout: int nfs3_vnop_link( struct vnop_link_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - vnode_t a_tdvp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * vnode_t a_tdvp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -4479,15 +4778,18 @@ nfs3_vnop_link( u_int64_t xid, txid; struct nfsm_chain nmreq, nmrep; - if (vnode_mount(vp) != vnode_mount(tdvp)) - return (EXDEV); + if (vnode_mount(vp) != vnode_mount(tdvp)) { + return EXDEV; + } nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) - return (ENAMETOOLONG); + if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) { + return ENAMETOOLONG; + } /* * Push all writes to the server, so that the attribute cache @@ -4497,14 +4799,15 @@ nfs3_vnop_link( nfs_flush(np, MNT_WAIT, vfs_context_thread(ctx), V_IGNORE_WRITEERR); error = nfs_node_set_busy2(tdnp, np, vfs_context_thread(ctx)); - if (error) - return (error); + if (error) { + return error; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers)*2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); + NFSX_FH(nfsvers) * 2 + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); nfsm_chain_add_fh(error, &nmreq, nfsvers, np->n_fhp, np->n_fhsize); nfsm_chain_add_fh(error, &nmreq, nfsvers, tdnp->n_fhp, tdnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp); @@ -4521,20 +4824,24 @@ nfs3_vnop_link( nfsm_chain_postop_attr_update_flag(error, &nmrep, np, attrflag, &xid); nfsm_chain_get_wcc_data(error, &nmrep, tdnp, &premtime, &wccpostattr, &txid); } - if (!error) + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); if (!lockerror) { - if (!attrflag) + if (!attrflag) { NATTRINVALIDATE(np); + } tdnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&tdnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&tdnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, tdnp, &tdnp->n_vattr); - if (!wccpostattr) + } + if (!wccpostattr) { NATTRINVALIDATE(tdnp); + } if (!error && (tdnp->n_flag & NNEGNCENTRIES)) { tdnp->n_flag &= ~NNEGNCENTRIES; cache_purge_negatives(tdvp); @@ -4545,9 +4852,10 @@ nfsmout: /* * Kludge: Map EEXIST => 0 assuming that it is a reply to a retry. */ - if (error == EEXIST) + if (error == EEXIST) { error = 0; - return (error); + } + return error; } /* @@ -4556,14 +4864,14 @@ nfsmout: int nfs3_vnop_symlink( struct vnop_symlink_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - char *a_target; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * char *a_target; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t dvp = ap->a_dvp; @@ -4584,14 +4892,16 @@ nfs3_vnop_symlink( struct nfs_dulookup dul; nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; slen = strlen(ap->a_target); if ((nfsvers == NFS_VER2) && - ((cnp->cn_namelen > NFS_MAXNAMLEN) || (slen > NFS_MAXPATHLEN))) - return (ENAMETOOLONG); + ((cnp->cn_namelen > NFS_MAXNAMLEN) || (slen > NFS_MAXPATHLEN))) { + return ENAMETOOLONG; + } nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); @@ -4611,42 +4921,48 @@ nfs3_vnop_symlink( nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED + - nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(nfsvers)); + NFSX_FH(nfsvers) + 2 * NFSX_UNSIGNED + + nfsm_rndup(cnp->cn_namelen) + nfsm_rndup(slen) + NFSX_SATTR(nfsvers)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp); - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_add_v3sattr(error, &nmreq, vap); + } nfsm_chain_add_name(error, &nmreq, ap->a_target, slen, nmp); - if (nfsvers == NFS_VER2) + if (nfsvers == NFS_VER2) { nfsm_chain_add_v2sattr(error, &nmreq, vap, -1); + } nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_SYMLINK, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { nfs_dulookup_start(&dul, dnp, ctx); error = nfs_request_async_finish(req, &nmrep, &xid, &status); } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } dxid = xid; if (!error && !status) { if (dnp->n_flag & NNEGNCENTRIES) { dnp->n_flag &= ~NNEGNCENTRIES; cache_purge_negatives(dvp); } - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { error = nfsm_chain_get_fh_attr(&nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); - else + } else { fh.fh_len = 0; + } } - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); - if (!error) + } + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -4654,17 +4970,20 @@ nfsmout: if (!lockerror) { dnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr); + } nfs_node_unlock(dnp); /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) + if (!error && fh.fh_len) { error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); - if (!error && np) + } + if (!error && np) { newvp = NFSTOV(np); + } nfs_dulookup_finish(&dul, dnp, ctx); @@ -4681,16 +5000,18 @@ nfsmout: error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); if (!error) { newvp = NFSTOV(np); - if (vnode_vtype(newvp) != VLNK) + if (vnode_vtype(newvp) != VLNK) { error = EEXIST; + } } } - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy(dnp); + } if (!error && (gotuid || gotgid) && (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (gotuid && (nvattr.nva_uid != vap->va_uid)) || + (gotgid && (nvattr.nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -4704,7 +5025,7 @@ nfsmout: nfs_node_unlock(np); *ap->a_vpp = newvp; } - return (error); + return error; } /* @@ -4713,13 +5034,13 @@ nfsmout: int nfs3_vnop_mkdir( struct vnop_mkdir_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t *a_vpp; + * struct componentname *a_cnp; + * struct vnode_attr *a_vap; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t dvp = ap->a_dvp; @@ -4733,18 +5054,20 @@ nfs3_vnop_mkdir( int error = 0, lockerror = ENOENT, busyerror = ENOENT, status, wccpostattr = 0; struct timespec premtime = { 0, 0 }; int nfsvers, gotuid, gotgid; - u_int64_t xid= 0, dxid; + u_int64_t xid = 0, dxid; fhandle_t fh; struct nfsm_chain nmreq, nmrep; struct nfsreq rq, *req = &rq; struct nfs_dulookup dul; nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) - return (ENAMETOOLONG); + if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) { + return ENAMETOOLONG; + } nfs_avoid_needless_id_setting_on_create(dnp, vap, ctx); @@ -4764,26 +5087,28 @@ nfs3_vnop_mkdir( nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + NFSX_UNSIGNED + - nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); + NFSX_FH(nfsvers) + NFSX_UNSIGNED + + nfsm_rndup(cnp->cn_namelen) + NFSX_SATTR(nfsvers)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp); - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_add_v3sattr(error, &nmreq, vap); - else + } else { nfsm_chain_add_v2sattr(error, &nmreq, vap, -1); + } nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_MKDIR, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { nfs_dulookup_start(&dul, dnp, ctx); error = nfs_request_async_finish(req, &nmrep, &xid, &status); } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } dxid = xid; if (!error && !status) { if (dnp->n_flag & NNEGNCENTRIES) { @@ -4792,10 +5117,12 @@ nfs3_vnop_mkdir( } error = nfsm_chain_get_fh_attr(&nmrep, dnp, ctx, nfsvers, &xid, &fh, &nvattr); } - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &dxid); - if (!error) + } + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -4803,17 +5130,20 @@ nfsmout: if (!lockerror) { dnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr); + } nfs_node_unlock(dnp); /* nfs_getattr() will check changed and purge caches */ nfs_getattr(dnp, NULL, ctx, wccpostattr ? NGA_CACHED : NGA_UNCACHED); } - if (!error && fh.fh_len) + if (!error && fh.fh_len) { error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); - if (!error && np) + } + if (!error && np) { newvp = NFSTOV(np); + } nfs_dulookup_finish(&dul, dnp, ctx); @@ -4830,16 +5160,18 @@ nfsmout: error = nfs_lookitup(dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx, &np); if (!error) { newvp = NFSTOV(np); - if (vnode_vtype(newvp) != VDIR) + if (vnode_vtype(newvp) != VDIR) { error = EEXIST; + } } } - if (!busyerror) + if (!busyerror) { nfs_node_clear_busy(dnp); + } if (!error && (gotuid || gotgid) && (!newvp || nfs_getattrcache(np, &nvattr, 0) || - (gotuid && (nvattr.nva_uid != vap->va_uid)) || - (gotgid && (nvattr.nva_gid != vap->va_gid)))) { + (gotuid && (nvattr.nva_uid != vap->va_uid)) || + (gotgid && (nvattr.nva_gid != vap->va_gid)))) { /* clear ID bits if server didn't use them (or we can't tell) */ VATTR_CLEAR_SUPPORTED(vap, va_uid); VATTR_CLEAR_SUPPORTED(vap, va_gid); @@ -4853,7 +5185,7 @@ nfsmout: nfs_node_unlock(np); *ap->a_vpp = newvp; } - return (error); + return error; } /* @@ -4862,12 +5194,12 @@ nfsmout: int nfs3_vnop_rmdir( struct vnop_rmdir_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t a_vp; - struct componentname *a_cnp; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_dvp; + * vnode_t a_vp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -4885,14 +5217,17 @@ nfs3_vnop_rmdir( struct nfs_dulookup dul; nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; - if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) - return (ENAMETOOLONG); + if ((nfsvers == NFS_VER2) && (cnp->cn_namelen > NFS_MAXNAMLEN)) { + return ENAMETOOLONG; + } - if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) - return (error); + if ((error = nfs_node_set_busy2(dnp, np, vfs_context_thread(ctx)))) { + return error; + } nfs_dulookup_init(&dul, dnp, cnp->cn_nameptr, cnp->cn_namelen, ctx); @@ -4900,25 +5235,28 @@ nfs3_vnop_rmdir( nfsm_chain_null(&nmrep); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); + NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(cnp->cn_namelen)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, cnp->cn_nameptr, cnp->cn_namelen, nmp); nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_RMDIR, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, &req); if (!error) { nfs_dulookup_start(&dul, dnp, ctx); error = nfs_request_async_finish(req, &nmrep, &xid, &status); } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; - if (nfsvers == NFS_VER3) + } + if (nfsvers == NFS_VER3) { nfsm_chain_get_wcc_data(error, &nmrep, dnp, &premtime, &wccpostattr, &xid); - if (!error) + } + if (!error) { error = status; + } nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); @@ -4926,8 +5264,9 @@ nfsmout: if (!lockerror) { dnp->n_flag |= NMODIFIED; /* if directory hadn't changed, update namecache mtime */ - if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) + if (nfstimespeccmp(&dnp->n_ncmtime, &premtime, ==)) { NFS_CHANGED_UPDATE_NC(nfsvers, dnp, &dnp->n_vattr); + } nfs_node_unlock(dnp); nfs_name_cache_purge(dnp, np, cnp, ctx); /* nfs_getattr() will check changed and purge caches */ @@ -4939,8 +5278,9 @@ nfsmout: /* * Kludge: Map ENOENT => 0 assuming that you have a reply to a retry. */ - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } if (!error) { /* * remove nfsnode from hash now so we can't accidentally find it @@ -4955,7 +5295,7 @@ nfsmout: } lck_mtx_unlock(nfs_node_hash_mutex); } - return (error); + return error; } /* @@ -4964,7 +5304,7 @@ nfsmout: * The incoming "offset" is a directory cookie indicating where in the * directory entries should be read from. A zero cookie means start at * the beginning of the directory. Any other cookie will be a cookie - * returned from the server. + * returned from the server. * * Using that cookie, determine which buffer (and where in that buffer) * to start returning entries from. Buffer logical block numbers are @@ -4983,14 +5323,14 @@ nfsmout: int nfs_vnop_readdir( struct vnop_readdir_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_flags; - int *a_eofflag; - int *a_numdirent; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_flags; + * int *a_eofflag; + * int *a_numdirent; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t dvp = ap->a_vp; @@ -5008,24 +5348,28 @@ nfs_vnop_readdir( thread_t thd; nmp = VTONMP(dvp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; bigcookies = (nmp->nm_state & NFSSTA_BIGCOOKIES); extended = (ap->a_flags & VNODE_READDIR_EXTENDED); - if (vnode_vtype(dvp) != VDIR) - return (EPERM); + if (vnode_vtype(dvp) != VDIR) { + return EPERM; + } - if (ap->a_eofflag) + if (ap->a_eofflag) { *ap->a_eofflag = 0; + } - if (uio_resid(uio) == 0) - return (0); + if (uio_resid(uio) == 0) { + return 0; + } if ((nfsvers >= NFS_VER4) && (dnp->n_vattr.nva_flags & NFS_FFLAG_TRIGGER)) { /* trigger directories should never be read, return nothing */ - return (0); + return 0; } thd = vfs_context_thread(ctx); @@ -5033,18 +5377,21 @@ nfs_vnop_readdir( nextcookie = uio_offset(uio); ptc = bigcookies && NFS_DIR_COOKIE_POTENTIALLY_TRUNCATED(nextcookie); - if ((error = nfs_node_lock(dnp))) + if ((error = nfs_node_lock(dnp))) { goto out; + } if (dnp->n_flag & NNEEDINVALIDATE) { dnp->n_flag &= ~NNEEDINVALIDATE; nfs_invaldir(dnp); nfs_node_unlock(dnp); error = nfs_vinvalbuf(dvp, 0, ctx, 1); - if (!error) + if (!error) { error = nfs_node_lock(dnp); - if (error) + } + if (error) { goto out; + } } /* @@ -5054,14 +5401,16 @@ nfs_vnop_readdir( if (dnp->n_flag & NMODIFIED) { nfs_invaldir(dnp); nfs_node_unlock(dnp); - if ((error = nfs_vinvalbuf(dvp, 0, ctx, 1))) + if ((error = nfs_vinvalbuf(dvp, 0, ctx, 1))) { goto out; + } } else { nfs_node_unlock(dnp); } /* nfs_getattr() will check changed and purge caches */ - if ((error = nfs_getattr(dnp, NULL, ctx, NGA_UNCACHED))) + if ((error = nfs_getattr(dnp, NULL, ctx, NGA_UNCACHED))) { goto out; + } } else { nfs_node_unlock(dnp); } @@ -5072,8 +5421,9 @@ nfs_vnop_readdir( done = 1; error = 0; } - if (ap->a_eofflag) + if (ap->a_eofflag) { *ap->a_eofflag = 1; + } } while (!error && !done) { @@ -5081,8 +5431,9 @@ nfs_vnop_readdir( cookie = nextcookie; getbuffer: error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ, &bp); - if (error) + if (error) { goto out; + } ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; if (!ISSET(bp->nb_flags, NB_CACHE) || !ISSET(ndbhp->ndbh_flags, NDB_FULL)) { if (!ISSET(bp->nb_flags, NB_CACHE)) { /* initialize the buffer */ @@ -5092,21 +5443,25 @@ getbuffer: ndbhp->ndbh_ncgen = dnp->n_ncgen; } error = nfs_buf_readdir(bp, ctx); - if (error == NFSERR_DIRBUFDROPPED) + if (error == NFSERR_DIRBUFDROPPED) { goto getbuffer; - if (error) + } + if (error) { nfs_buf_release(bp, 1); + } if (error && (error != ENXIO) && (error != ETIMEDOUT) && (error != EINTR) && (error != ERESTART)) { if (!nfs_node_lock(dnp)) { nfs_invaldir(dnp); nfs_node_unlock(dnp); } nfs_vinvalbuf(dvp, 0, ctx, 1); - if (error == NFSERR_BAD_COOKIE) + if (error == NFSERR_BAD_COOKIE) { error = ENOENT; + } } - if (error) + if (error) { goto out; + } } /* find next entry to return */ @@ -5145,10 +5500,11 @@ getbuffer: cp = (char*)&dent; bzero(cp, sizeof(dent)); } - if (dp->d_namlen > (sizeof(dent.d_name) - 1)) + if (dp->d_namlen > (sizeof(dent.d_name) - 1)) { nlen = sizeof(dent.d_name) - 1; - else + } else { nlen = dp->d_namlen; + } rlen = NFS_DIRENT_LEN(nlen); dent.d_reclen = rlen; dent.d_ino = dp->d_ino; @@ -5161,8 +5517,9 @@ getbuffer: done = 1; break; } - if ((error = uiomove(cp, rlen, uio))) + if ((error = uiomove(cp, rlen, uio))) { break; + } numdirent++; nextcookie = dp->d_seekoff; dp = NFS_DIRENTRY_NEXT(dp); @@ -5174,12 +5531,14 @@ getbuffer: /* if we also hit EOF, we're done */ if (ISSET(ndbhp->ndbh_flags, NDB_EOF)) { done = 1; - if (ap->a_eofflag) + if (ap->a_eofflag) { *ap->a_eofflag = 1; + } } } - if (!error) + if (!error) { uio_setoffset(uio, nextcookie); + } if (!error && !done && (nextcookie == cookie)) { printf("nfs readdir cookie didn't change 0x%llx, %d/%d\n", cookie, i, ndbhp->ndbh_count); error = EIO; @@ -5187,13 +5546,15 @@ getbuffer: nfs_buf_release(bp, 1); } - if (!error) + if (!error) { nfs_dir_cookie_cache(dnp, nextcookie, lbn); + } - if (ap->a_numdirent) + if (ap->a_numdirent) { *ap->a_numdirent = numdirent; + } out: - return (error); + return error; } @@ -5204,12 +5565,14 @@ out: void nfs_invaldir(nfsnode_t dnp) { - if (vnode_vtype(NFSTOV(dnp)) != VDIR) + if (vnode_vtype(NFSTOV(dnp)) != VDIR) { return; + } dnp->n_eofcookie = 0; dnp->n_cookieverf = 0; - if (!dnp->n_cookiecache) + if (!dnp->n_cookiecache) { return; + } dnp->n_cookiecache->free = 0; dnp->n_cookiecache->mru = -1; memset(dnp->n_cookiecache->next, -1, NFSNUMCOOKIES); @@ -5224,12 +5587,14 @@ nfs_dir_buf_freespace(struct nfsbuf *bp, int rdirplus) struct nfs_dir_buf_header *ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; uint32_t space; - if (!ndbhp) - return (0); + if (!ndbhp) { + return 0; + } space = bp->nb_bufsize - ndbhp->ndbh_entry_end; - if (rdirplus) + if (rdirplus) { space -= ndbhp->ndbh_count * sizeof(struct nfs_vattr); - return (space); + } + return space; } /* @@ -5241,11 +5606,13 @@ nfs_dir_cookie_cache(nfsnode_t dnp, uint64_t cookie, uint64_t lbn) struct nfsdmap *ndcc; int8_t i, prev; - if (!cookie) + if (!cookie) { return; + } - if (nfs_node_lock(dnp)) + if (nfs_node_lock(dnp)) { return; + } if (cookie == dnp->n_eofcookie) { /* EOF cookie */ nfs_node_unlock(dnp); @@ -5256,7 +5623,7 @@ nfs_dir_cookie_cache(nfsnode_t dnp, uint64_t cookie, uint64_t lbn) if (!ndcc) { /* allocate the cookie cache structure */ MALLOC_ZONE(dnp->n_cookiecache, struct nfsdmap *, - sizeof(struct nfsdmap), M_NFSDIROFF, M_WAITOK); + sizeof(struct nfsdmap), M_NFSDIROFF, M_WAITOK); if (!dnp->n_cookiecache) { nfs_node_unlock(dnp); return; @@ -5274,23 +5641,26 @@ nfs_dir_cookie_cache(nfsnode_t dnp, uint64_t cookie, uint64_t lbn) prev = -1; i = ndcc->mru; while ((i != -1) && (cookie != ndcc->cookies[i].key)) { - if (ndcc->next[i] == -1) /* stop on last entry so we can reuse */ + if (ndcc->next[i] == -1) { /* stop on last entry so we can reuse */ break; + } prev = i; i = ndcc->next[i]; } if ((i != -1) && (cookie == ndcc->cookies[i].key)) { /* found it, remove from list */ - if (prev != -1) + if (prev != -1) { ndcc->next[prev] = ndcc->next[i]; - else + } else { ndcc->mru = ndcc->next[i]; + } } else { /* not found, use next free entry or reuse last entry */ - if (ndcc->free != NFSNUMCOOKIES) + if (ndcc->free != NFSNUMCOOKIES) { i = ndcc->free++; - else + } else { ndcc->next[prev] = -1; + } ndcc->cookies[i].key = cookie; ndcc->cookies[i].lbn = lbn; } @@ -5319,17 +5689,18 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) if (!cookie) { /* initial cookie */ *lbnp = 0; *ptc = 0; - return (0); + return 0; } - if (nfs_node_lock(dnp)) - return (ENOENT); + if (nfs_node_lock(dnp)) { + return ENOENT; + } if (cookie == dnp->n_eofcookie) { /* EOF cookie */ nfs_node_unlock(dnp); OSAddAtomic64(1, &nfsstats.direofcache_hits); *ptc = 0; - return (-1); + return -1; } /* note if cookie is a 32-bit match with the EOF cookie */ eofptc = *ptc ? NFS_DIR_COOKIE_SAME32(cookie, dnp->n_eofcookie) : 0; @@ -5343,25 +5714,26 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) nfs_node_unlock(dnp); OSAddAtomic64(1, &nfsstats.direofcache_hits); *ptc = 0; - return (0); + return 0; } /* check for 32-bit match */ - if (*ptc && (iptc == -1) && NFS_DIR_COOKIE_SAME32(ndcc->cookies[i].key, cookie)) + if (*ptc && (iptc == -1) && NFS_DIR_COOKIE_SAME32(ndcc->cookies[i].key, cookie)) { iptc = i; + } } /* exact match not found */ if (eofptc) { /* but 32-bit match hit the EOF cookie */ nfs_node_unlock(dnp); OSAddAtomic64(1, &nfsstats.direofcache_hits); - return (-1); + return -1; } if (iptc >= 0) { /* but 32-bit match got a hit */ *lbnp = ndcc->cookies[iptc].lbn; nfs_node_unlock(dnp); OSAddAtomic64(1, &nfsstats.direofcache_hits); - return (0); + return 0; } nfs_node_unlock(dnp); @@ -5370,8 +5742,9 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) * Let's search the directory's buffers for the cookie. */ nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } dpptc = NULL; found = 0; @@ -5389,13 +5762,15 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) lastbp = NULL; while ((bp = LIST_FIRST(&blist))) { LIST_REMOVE(bp, nb_vnbufs); - if (!lastbp) + if (!lastbp) { LIST_INSERT_HEAD(&dnp->n_cleanblkhd, bp, nb_vnbufs); - else + } else { LIST_INSERT_AFTER(lastbp, bp, nb_vnbufs); + } lastbp = bp; - if (found) + if (found) { continue; + } nfs_buf_refget(bp); if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) { /* just skip this buffer */ @@ -5408,7 +5783,7 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp); dpptc = NULL; - for (i=0; (i < ndbhp->ndbh_count) && (cookie != dp->d_seekoff); i++) { + for (i = 0; (i < ndbhp->ndbh_count) && (cookie != dp->d_seekoff); i++) { if (*ptc && !dpptc && NFS_DIR_COOKIE_SAME32(cookie, dp->d_seekoff)) { dpptc = dp; iptc = i; @@ -5422,11 +5797,11 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) } else if (i < ndbhp->ndbh_count) { *ptc = 0; } - if (i < (ndbhp->ndbh_count-1)) { + if (i < (ndbhp->ndbh_count - 1)) { /* next entry is *in* this buffer: return this block */ *lbnp = bp->nb_lblkno; found = 1; - } else if (i == (ndbhp->ndbh_count-1)) { + } else if (i == (ndbhp->ndbh_count - 1)) { /* next entry refers to *next* buffer: return next block */ *lbnp = dp->d_seekoff; found = 1; @@ -5438,13 +5813,13 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) lck_mtx_unlock(nfs_buf_mutex); if (found) { OSAddAtomic64(1, &nfsstats.direofcache_hits); - return (0); + return 0; } /* still not found... oh well, just start a new block */ *lbnp = cookie; OSAddAtomic64(1, &nfsstats.direofcache_misses); - return (0); + return 0; } /* @@ -5453,8 +5828,8 @@ nfs_dir_cookie_to_lbn(nfsnode_t dnp, uint64_t cookie, int *ptc, uint64_t *lbnp) * Note: should only be called with RDIRPLUS directory buffers */ -#define NDBS_PURGE 1 -#define NDBS_UPDATE 2 +#define NDBS_PURGE 1 +#define NDBS_UPDATE 2 int nfs_dir_buf_search( @@ -5477,10 +5852,10 @@ nfs_dir_buf_search( /* scan the buffer for the name */ ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp); - for (i=0; i < ndbhp->ndbh_count; i++) { + for (i = 0; i < ndbhp->ndbh_count; i++) { nextlbn = dp->d_seekoff; if ((cnp->cn_namelen == dp->d_namlen) && !strcmp(cnp->cn_nameptr, dp->d_name)) { - fhlen = dp->d_name[dp->d_namlen+1]; + fhlen = dp->d_name[dp->d_namlen + 1]; nvattrp = NFS_DIR_BUF_NVATTR(bp, i); if ((ndbhp->ndbh_ncgen != bp->nb_np->n_ncgen) || (fhp->fh_len == 0) || (nvattrp->nva_type == VNON) || (nvattrp->nva_fileid == 0)) { @@ -5496,19 +5871,19 @@ nfs_dir_buf_search( } if (flags == NDBS_UPDATE) { /* update direntry's attrs if fh matches */ - if ((fhp->fh_len == fhlen) && !bcmp(&dp->d_name[dp->d_namlen+2], fhp->fh_data, fhlen)) { + if ((fhp->fh_len == fhlen) && !bcmp(&dp->d_name[dp->d_namlen + 2], fhp->fh_data, fhlen)) { bcopy(nvap, nvattrp, sizeof(*nvap)); dp->d_fileno = nvattrp->nva_fileid; nvattrp->nva_fileid = *xidp; - *(time_t*)(&dp->d_name[dp->d_namlen+2+fhp->fh_len]) = *attrstampp; + *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]) = *attrstampp; } error = 0; break; } /* copy out fh, attrs, attrstamp, and xid */ fhp->fh_len = fhlen; - bcopy(&dp->d_name[dp->d_namlen+2], fhp->fh_data, MAX(fhp->fh_len, (int)sizeof(fhp->fh_data))); - *attrstampp = *(time_t*)(&dp->d_name[dp->d_namlen+2+fhp->fh_len]); + bcopy(&dp->d_name[dp->d_namlen + 2], fhp->fh_data, MAX(fhp->fh_len, (int)sizeof(fhp->fh_data))); + *attrstampp = *(time_t*)(&dp->d_name[dp->d_namlen + 2 + fhp->fh_len]); bcopy(nvattrp, nvap, sizeof(*nvap)); *xidp = nvap->nva_fileid; nvap->nva_fileid = dp->d_fileno; @@ -5517,9 +5892,10 @@ nfs_dir_buf_search( } dp = NFS_DIRENTRY_NEXT(dp); } - if (nextlbnp) + if (nextlbnp) { *nextlbnp = nextlbn; - return (error); + } + return error; } /* @@ -5543,18 +5919,22 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn int dotunder = (cnp->cn_namelen > 2) && (cnp->cn_nameptr[0] == '.') && (cnp->cn_nameptr[1] == '_'); nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (!purge) + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (!purge) { *npp = NULL; + } /* first check most recent buffer (and next one too) */ lbn = dnp->n_lastdbl; - for (i=0; i < 2; i++) { - if ((error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ|NBLK_ONLYVALID, &bp))) - return (error); - if (!bp) + for (i = 0; i < 2; i++) { + if ((error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp))) { + return error; + } + if (!bp) { break; + } count++; error = nfs_dir_buf_search(bp, cnp, &fh, &nvattr, &xid, &attrstamp, &nextlbn, purge ? NDBS_PURGE : 0); nfs_buf_release(bp, 0); @@ -5586,15 +5966,18 @@ nfs_dir_buf_cache_lookup(nfsnode_t dnp, nfsnode_t *npp, struct componentname *cn lastbp = foundbp = NULL; while ((bp = LIST_FIRST(&blist))) { LIST_REMOVE(bp, nb_vnbufs); - if (!lastbp) + if (!lastbp) { LIST_INSERT_HEAD(&dnp->n_cleanblkhd, bp, nb_vnbufs); - else + } else { LIST_INSERT_AFTER(lastbp, bp, nb_vnbufs); + } lastbp = bp; - if (error || found) + if (error || found) { continue; - if (!purge && dotunder && (count > 100)) /* don't waste too much time looking for ._ files */ + } + if (!purge && dotunder && (count > 100)) { /* don't waste too much time looking for ._ files */ continue; + } nfs_buf_refget(bp); lbn = bp->nb_lblkno; if (nfs_buf_acquire(bp, NBAC_NOWAIT, 0, 0)) { @@ -5625,9 +6008,10 @@ done: if (!error && found && !purge) { error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, - &nvattr, &xid, dnp->n_auth, NG_MAKEENTRY, &newnp); - if (error) - return (error); + &nvattr, &xid, dnp->n_auth, NG_MAKEENTRY, &newnp); + if (error) { + return error; + } newnp->n_attrstamp = attrstamp; *npp = newnp; nfs_node_unlock(newnp); @@ -5635,7 +6019,7 @@ done: if (!nfs_getattr(newnp, &nvattr, ctx, NGA_CACHED) && (newnp->n_attrstamp != attrstamp)) { /* they are, so update them */ - error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ|NBLK_ONLYVALID, &bp); + error = nfs_buf_get(dnp, lbn, NFS_DIRBLKSIZ, thd, NBLK_READ | NBLK_ONLYVALID, &bp); if (!error && bp) { attrstamp = newnp->n_attrstamp; xid = newnp->n_xid; @@ -5646,7 +6030,7 @@ done: } } - return (error); + return error; } /* @@ -5659,8 +6043,9 @@ nfs_name_cache_purge(nfsnode_t dnp, nfsnode_t np, struct componentname *cnp, vfs struct nfsmount *nmp = NFSTONMP(dnp); cache_purge(NFSTOV(np)); - if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) + if (nmp && (nmp->nm_vers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) { nfs_dir_buf_cache_lookup(dnp, NULL, cnp, ctx, 1); + } } /* @@ -5684,8 +6069,9 @@ nfs3_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) struct timeval now; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nmreaddirsize = nmp->nm_readdirsize; nmrsize = nmp->nm_rsize; @@ -5693,15 +6079,17 @@ nfs3_readdir_rpc(nfsnode_t dnp, struct nfsbuf *bp, vfs_context_t ctx) noplus: rdirplus = ((nfsvers > NFS_VER2) && NMFLAG(nmp, RDIRPLUS)) ? 1 : 0; - if ((lockerror = nfs_node_lock(dnp))) - return (lockerror); + if ((lockerror = nfs_node_lock(dnp))) { + return lockerror; + } /* determine cookie to use, and move dp to the right offset */ ndbhp = (struct nfs_dir_buf_header*)bp->nb_data; dp = NFS_DIR_BUF_FIRST_DIRENTRY(bp); if (ndbhp->ndbh_count) { - for (i=0; i < ndbhp->ndbh_count-1; i++) + for (i = 0; i < ndbhp->ndbh_count - 1; i++) { dp = NFS_DIRENTRY_NEXT(dp); + } cookie = dp->d_seekoff; dp = NFS_DIRENTRY_NEXT(dp); } else { @@ -5720,7 +6108,7 @@ noplus: nfsm_chain_null(&nmrep); while (nfs_dir_buf_freespace(bp, rdirplus) && !(ndbhp->ndbh_flags & NDB_FULL)) { nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + NFSX_READDIR(nfsvers) + NFSX_UNSIGNED); + NFSX_FH(nfsvers) + NFSX_READDIR(nfsvers) + NFSX_UNSIGNED); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); if (nfsvers == NFS_VER3) { /* opaque values don't need swapping, but as long */ @@ -5731,27 +6119,32 @@ noplus: nfsm_chain_add_32(error, &nmreq, cookie); } nfsm_chain_add_32(error, &nmreq, nmreaddirsize); - if (rdirplus) + if (rdirplus) { nfsm_chain_add_32(error, &nmreq, nmrsize); + } nfsm_chain_build_done(error, &nmreq); nfs_node_unlock(dnp); lockerror = ENOENT; nfsmout_if(error); error = nfs_request(dnp, NULL, &nmreq, - rdirplus ? NFSPROC_READDIRPLUS : NFSPROC_READDIR, - ctx, NULL, &nmrep, &xid, &status); + rdirplus ? NFSPROC_READDIRPLUS : NFSPROC_READDIR, + ctx, NULL, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } savedxid = xid; - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid); - if (!error) + } + if (!error) { error = status; - if (nfsvers == NFS_VER3) + } + if (nfsvers == NFS_VER3) { nfsm_chain_get_64(error, &nmrep, dnp->n_cookieverf); + } nfsm_chain_get_32(error, &nmrep, more_entries); if (!lockerror) { @@ -5767,15 +6160,17 @@ noplus: } nfsmout_if(error); - if (rdirplus) + if (rdirplus) { microuptime(&now); + } /* loop through the entries packing them into the buffer */ while (more_entries) { - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_get_64(error, &nmrep, fileno); - else + } else { nfsm_chain_get_32(error, &nmrep, fileno); + } nfsm_chain_get_32(error, &nmrep, namlen); nfsmout_if(error); /* just truncate names that don't fit in direntry.d_name */ @@ -5783,7 +6178,7 @@ noplus: error = EBADRPC; goto nfsmout; } - if (namlen > (sizeof(dp->d_name)-1)) { + if (namlen > (sizeof(dp->d_name) - 1)) { skiplen = namlen - sizeof(dp->d_name) + 1; namlen = sizeof(dp->d_name) - 1; } else { @@ -5830,13 +6225,15 @@ nextbuffer: nfsm_chain_get_opaque(error, &nmrep, namlen, dp->d_name); nfsmout_if(error); dp->d_name[namlen] = '\0'; - if (skiplen) + if (skiplen) { nfsm_chain_adv(error, &nmrep, - nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen)); - if (nfsvers == NFS_VER3) + nfsm_rndup(namlen + skiplen) - nfsm_rndup(namlen)); + } + if (nfsvers == NFS_VER3) { nfsm_chain_get_64(error, &nmrep, cookie); - else + } else { nfsm_chain_get_32(error, &nmrep, cookie); + } nfsmout_if(error); dp->d_seekoff = cookie; if (!bigcookies && (cookie >> 32) && (nmp == NFSTONMP(dnp))) { @@ -5878,17 +6275,17 @@ nextbuffer: goto nextbuffer; } /* pack the file handle into the record */ - dp->d_name[dp->d_namlen+1] = fh.fh_len; - bcopy(fh.fh_data, &dp->d_name[dp->d_namlen+2], fh.fh_len); + dp->d_name[dp->d_namlen + 1] = fh.fh_len; + bcopy(fh.fh_data, &dp->d_name[dp->d_namlen + 2], fh.fh_len); } else { /* mark the file handle invalid */ fh.fh_len = 0; fhlen = fh.fh_len + 1; xlen = fhlen + sizeof(time_t); reclen = NFS_DIRENTRY_LEN(namlen + xlen); - bzero(&dp->d_name[dp->d_namlen+1], fhlen); + bzero(&dp->d_name[dp->d_namlen + 1], fhlen); } - *(time_t*)(&dp->d_name[dp->d_namlen+1+fhlen]) = now.tv_sec; + *(time_t*)(&dp->d_name[dp->d_namlen + 1 + fhlen]) = now.tv_sec; dp->d_reclen = reclen; } padstart = dp->d_name + dp->d_namlen + 1 + xlen; @@ -5899,8 +6296,9 @@ nextbuffer: ndbhp->ndbh_entry_end = (char*)dp - bp->nb_data; /* zero out the pad bytes */ padlen = (char*)dp - padstart; - if (padlen > 0) + if (padlen > 0) { bzero(padstart, padlen); + } /* check for more entries */ nfsm_chain_get_32(error, &nmrep, more_entries); nfsmout_if(error); @@ -5909,7 +6307,7 @@ nextbuffer: nfsm_chain_get_32(error, &nmrep, eof); nfsmout_if(error); if (eof) { - ndbhp->ndbh_flags |= (NDB_FULL|NDB_EOF); + ndbhp->ndbh_flags |= (NDB_FULL | NDB_EOF); nfs_node_lock_force(dnp); dnp->n_eofcookie = lastcookie; nfs_node_unlock(dnp); @@ -5921,20 +6319,23 @@ nextbuffer: bp = NULL; break; } - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } nfsmout_if(error); nfsm_chain_cleanup(&nmrep); nfsm_chain_null(&nmreq); } nfsmout: - if (bp_dropped && bp) + if (bp_dropped && bp) { nfs_buf_release(bp, 0); - if (!lockerror) + } + if (!lockerror) { nfs_node_unlock(dnp); + } nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (bp_dropped ? NFSERR_DIRBUFDROPPED : error); + return bp_dropped ? NFSERR_DIRBUFDROPPED : error; } /* @@ -5967,47 +6368,53 @@ nfs_sillyrename( struct nfsmount *nmp; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfs_name_cache_purge(dnp, np, cnp, ctx); MALLOC_ZONE(nsp, struct nfs_sillyrename *, - sizeof (struct nfs_sillyrename), M_NFSREQ, M_WAITOK); - if (!nsp) - return (ENOMEM); + sizeof(struct nfs_sillyrename), M_NFSREQ, M_WAITOK); + if (!nsp) { + return ENOMEM; + } cred = vfs_context_ucred(ctx); kauth_cred_ref(cred); nsp->nsr_cred = cred; nsp->nsr_dnp = dnp; error = vnode_ref(NFSTOV(dnp)); - if (error) + if (error) { goto bad_norele; + } /* Fudge together a funny name */ pid = vfs_context_pid(ctx); num = OSAddAtomic(1, &nfs_sillyrename_number); nsp->nsr_namlen = snprintf(nsp->nsr_name, sizeof(nsp->nsr_name), - NFS_SILLYNAME_FORMAT, num, (pid & 0xffff)); - if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) + NFS_SILLYNAME_FORMAT, num, (pid & 0xffff)); + if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) { nsp->nsr_namlen = sizeof(nsp->nsr_name) - 1; + } /* Try lookitups until we get one that isn't there */ while (nfs_lookitup(dnp, nsp->nsr_name, nsp->nsr_namlen, ctx, NULL) == 0) { num = OSAddAtomic(1, &nfs_sillyrename_number); nsp->nsr_namlen = snprintf(nsp->nsr_name, sizeof(nsp->nsr_name), - NFS_SILLYNAME_FORMAT, num, (pid & 0xffff)); - if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) + NFS_SILLYNAME_FORMAT, num, (pid & 0xffff)); + if (nsp->nsr_namlen >= (int)sizeof(nsp->nsr_name)) { nsp->nsr_namlen = sizeof(nsp->nsr_name) - 1; + } } /* now, do the rename */ error = nmp->nm_funcs->nf_rename_rpc(dnp, cnp->cn_nameptr, cnp->cn_namelen, - dnp, nsp->nsr_name, nsp->nsr_namlen, ctx); + dnp, nsp->nsr_name, nsp->nsr_namlen, ctx); /* Kludge: Map ENOENT => 0 assuming that it is a reply to a retry. */ - if (error == ENOENT) + if (error == ENOENT) { error = 0; + } if (!error) { nfs_node_lock_force(dnp); if (dnp->n_flag & NNEGNCENTRIES) { @@ -6017,20 +6424,21 @@ nfs_sillyrename( nfs_node_unlock(dnp); } FSDBG(267, dnp, np, num, error); - if (error) + if (error) { goto bad; + } error = nfs_lookitup(dnp, nsp->nsr_name, nsp->nsr_namlen, ctx, &np); nfs_node_lock_force(np); np->n_sillyrename = nsp; nfs_node_unlock(np); - return (0); + return 0; bad: vnode_rele(NFSTOV(dnp)); bad_norele: nsp->nsr_cred = NOCRED; kauth_cred_unref(&cred); FREE_ZONE(nsp, sizeof(*nsp), M_NFSREQ); - return (error); + return error; } int @@ -6046,23 +6454,24 @@ nfs3_lookup_rpc_async( int error = 0, nfsvers; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmreq); nfsm_chain_build_alloc_init(error, &nmreq, - NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen)); + NFSX_FH(nfsvers) + NFSX_UNSIGNED + nfsm_rndup(namelen)); nfsm_chain_add_fh(error, &nmreq, nfsvers, dnp->n_fhp, dnp->n_fhsize); nfsm_chain_add_name(error, &nmreq, name, namelen, nmp); nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request_async(dnp, NULL, &nmreq, NFSPROC_LOOKUP, - vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, reqp); + vfs_context_thread(ctx), vfs_context_ucred(ctx), NULL, 0, NULL, reqp); nfsmout: nfsm_chain_cleanup(&nmreq); - return (error); + return error; } int @@ -6082,22 +6491,26 @@ nfs3_lookup_rpc_async_finish( struct nfsm_chain nmrep; nmp = NFSTONMP(dnp); - if (nmp == NULL) - return (ENXIO); + if (nmp == NULL) { + return ENXIO; + } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmrep); error = nfs_request_async_finish(req, &nmrep, xidp, &status); - if ((lockerror = nfs_node_lock(dnp))) + if ((lockerror = nfs_node_lock(dnp))) { error = lockerror; + } xid = *xidp; if (error || status) { - if (nfsvers == NFS_VER3) + if (nfsvers == NFS_VER3) { nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid); - if (!error) + } + if (!error) { error = status; + } goto nfsmout; } @@ -6110,16 +6523,18 @@ nfs3_lookup_rpc_async_finish( if (nfsvers == NFS_VER3) { nfsm_chain_postop_attr_get(error, &nmrep, attrflag, nvap); nfsm_chain_postop_attr_update(error, &nmrep, dnp, &xid); - if (!error && !attrflag) + if (!error && !attrflag) { error = nfs3_getattr_rpc(NULL, NFSTOMP(dnp), fhp->fh_data, fhp->fh_len, 0, ctx, nvap, xidp); + } } else { error = nfs_parsefattr(&nmrep, nfsvers, nvap); } nfsmout: - if (!lockerror) + if (!lockerror) { nfs_node_unlock(dnp); + } nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* @@ -6147,12 +6562,14 @@ nfs_lookitup( struct nfsreq rq, *req = &rq; nmp = NFSTONMP(dnp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } if (NFS_BITMAP_ISSET(nmp->nm_fsattr.nfsa_bitmap, NFS_FATTR_MAXNAME) && - (namelen > (int)nmp->nm_fsattr.nfsa_maxname)) - return (ENAMETOOLONG); + (namelen > (int)nmp->nm_fsattr.nfsa_maxname)) { + return ENAMETOOLONG; + } NVATTR_INIT(&nvattr); @@ -6176,15 +6593,16 @@ nfs_lookitup( if (fh.fh_len > NFS_SMALLFH) { MALLOC_ZONE(np->n_fhp, u_char *, fh.fh_len, M_NFSBIGFH, M_WAITOK); if (!np->n_fhp) { - np->n_fhp = oldbuf; - error = ENOMEM; - goto nfsmout; + np->n_fhp = oldbuf; + error = ENOMEM; + goto nfsmout; } } else { np->n_fhp = &np->n_fh[0]; } - if (oldbuf) + if (oldbuf) { FREE_ZONE(oldbuf, np->n_fhsize, M_NFSBIGFH); + } } bcopy(fh.fh_data, np->n_fhp, fh.fh_len); np->n_fhsize = fh.fh_len; @@ -6195,8 +6613,9 @@ nfs_lookitup( newnp = np; } else if (NFS_CMPFH(dnp, fh.fh_data, fh.fh_len)) { nfs_node_lock_force(dnp); - if (dnp->n_xid <= xid) + if (dnp->n_xid <= xid) { error = nfs_loadattrcache(dnp, &nvattr, &xid, 0); + } nfs_node_unlock(dnp); nfsmout_if(error); newnp = dnp; @@ -6206,16 +6625,17 @@ nfs_lookitup( cnp->cn_nameptr = name; cnp->cn_namelen = namelen; error = nfs_nget(NFSTOMP(dnp), dnp, cnp, fh.fh_data, fh.fh_len, - &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); + &nvattr, &xid, rq.r_auth, NG_MAKEENTRY, &np); nfsmout_if(error); newnp = np; } nfsmout: - if (npp && !*npp && !error) + if (npp && !*npp && !error) { *npp = newnp; + } NVATTR_CLEANUP(&nvattr); - return (error); + return error; } /* @@ -6233,16 +6653,20 @@ nfs_dulookup_init(struct nfs_dulookup *dulp, nfsnode_t dnp, const char *name, in dulp->du_flags = 0; bzero(&dulp->du_cn, sizeof(dulp->du_cn)); du_namelen = namelen + 2; - if (!nmp || NMFLAG(nmp, NONEGNAMECACHE)) + if (!nmp || NMFLAG(nmp, NONEGNAMECACHE)) { return; - if ((namelen >= 2) && (name[0] == '.') && (name[1] == '_')) + } + if ((namelen >= 2) && (name[0] == '.') && (name[1] == '_')) { return; - if (du_namelen >= (int)sizeof(dulp->du_smallname)) + } + if (du_namelen >= (int)sizeof(dulp->du_smallname)) { MALLOC(dulp->du_cn.cn_nameptr, char *, du_namelen + 1, M_TEMP, M_WAITOK); - else + } else { dulp->du_cn.cn_nameptr = dulp->du_smallname; - if (!dulp->du_cn.cn_nameptr) + } + if (!dulp->du_cn.cn_nameptr) { return; + } dulp->du_cn.cn_namelen = du_namelen; snprintf(dulp->du_cn.cn_nameptr, du_namelen + 1, "._%s", name); dulp->du_cn.cn_nameptr[du_namelen] = '\0'; @@ -6264,8 +6688,9 @@ nfs_dulookup_init(struct nfs_dulookup *dulp, nfsnode_t dnp, const char *name, in error = -1; } } - if (!error) + if (!error) { dulp->du_flags |= NFS_DULOOKUP_DOIT; + } } } @@ -6278,11 +6703,13 @@ nfs_dulookup_start(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx) struct nfsmount *nmp = NFSTONMP(dnp); struct nfsreq *req = &dulp->du_req; - if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_DOIT) || (dulp->du_flags & NFS_DULOOKUP_INPROG)) + if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_DOIT) || (dulp->du_flags & NFS_DULOOKUP_INPROG)) { return; + } if (!nmp->nm_funcs->nf_lookup_rpc_async(dnp, dulp->du_cn.cn_nameptr, - dulp->du_cn.cn_namelen, ctx, &req)) + dulp->du_cn.cn_namelen, ctx, &req)) { dulp->du_flags |= NFS_DULOOKUP_INPROG; + } } /* @@ -6298,12 +6725,13 @@ nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx) fhandle_t fh; struct nfs_vattr nvattr; - if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_INPROG)) + if (!nmp || !(dulp->du_flags & NFS_DULOOKUP_INPROG)) { goto out; + } NVATTR_INIT(&nvattr); error = nmp->nm_funcs->nf_lookup_rpc_async_finish(dnp, dulp->du_cn.cn_nameptr, - dulp->du_cn.cn_namelen, ctx, &dulp->du_req, &xid, &fh, &nvattr); + dulp->du_cn.cn_namelen, ctx, &dulp->du_req, &xid, &fh, &nvattr); dulp->du_flags &= ~NFS_DULOOKUP_INPROG; if (error == ENOENT) { /* add a negative entry in the name cache */ @@ -6313,7 +6741,7 @@ nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx) nfs_node_unlock(dnp); } else if (!error) { error = nfs_nget(NFSTOMP(dnp), dnp, &dulp->du_cn, fh.fh_data, fh.fh_len, - &nvattr, &xid, dulp->du_req.r_auth, NG_MAKEENTRY, &du_np); + &nvattr, &xid, dulp->du_req.r_auth, NG_MAKEENTRY, &du_np); if (!error) { nfs_node_unlock(du_np); vnode_put(NFSTOV(du_np)); @@ -6321,10 +6749,12 @@ nfs_dulookup_finish(struct nfs_dulookup *dulp, nfsnode_t dnp, vfs_context_t ctx) } NVATTR_CLEANUP(&nvattr); out: - if (dulp->du_flags & NFS_DULOOKUP_INPROG) + if (dulp->du_flags & NFS_DULOOKUP_INPROG) { nfs_request_async_cancel(&dulp->du_req); - if (dulp->du_cn.cn_nameptr && (dulp->du_cn.cn_nameptr != dulp->du_smallname)) + } + if (dulp->du_cn.cn_nameptr && (dulp->du_cn.cn_nameptr != dulp->du_smallname)) { FREE(dulp->du_cn.cn_nameptr, M_TEMP); + } } @@ -6348,16 +6778,19 @@ nfs3_commit_rpc( nmp = NFSTONMP(np); FSDBG(521, np, offset, count, nmp ? nmp->nm_state : 0); - if (nfs_mount_gone(nmp)) - return (ENXIO); - if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) - return (0); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } + if (!(nmp->nm_state & NFSSTA_HASWRITEVERF)) { + return 0; + } nfsvers = nmp->nm_vers; - if (count > UINT32_MAX) + if (count > UINT32_MAX) { count32 = 0; - else + } else { count32 = count; + } nfsm_chain_null(&nmreq); nfsm_chain_null(&nmrep); @@ -6369,44 +6802,49 @@ nfs3_commit_rpc( nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request2(np, NULL, &nmreq, NFSPROC_COMMIT, - current_thread(), cred, NULL, 0, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + current_thread(), cred, NULL, 0, &nmrep, &xid, &status); + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } /* can we do anything useful with the wcc info? */ nfsm_chain_get_wcc_data(error, &nmrep, np, &premtime, &wccpostattr, &xid); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); - if (!error) + } + if (!error) { error = status; + } nfsm_chain_get_64(error, &nmrep, newwverf); nfsmout_if(error); lck_mtx_lock(&nmp->nm_lock); - if (nmp->nm_verf != newwverf) + if (nmp->nm_verf != newwverf) { nmp->nm_verf = newwverf; - if (wverf != newwverf) + } + if (wverf != newwverf) { error = NFSERR_STALEWRITEVERF; + } lck_mtx_unlock(&nmp->nm_lock); nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } int nfs_vnop_blockmap( __unused struct vnop_blockmap_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - off_t a_foffset; - size_t a_size; - daddr64_t *a_bpn; - size_t *a_run; - void *a_poff; - int a_flags; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * off_t a_foffset; + * size_t a_size; + * daddr64_t *a_bpn; + * size_t *a_run; + * void *a_poff; + * int a_flags; + * } */*ap) { - return (ENOTSUP); + return ENOTSUP; } @@ -6417,13 +6855,13 @@ nfs_vnop_blockmap( int nfs_vnop_fsync( struct vnop_fsync_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_waitfor; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_waitfor; + * vfs_context_t a_context; + * } */*ap) { - return (nfs_flush(VTONFS(ap->a_vp), ap->a_waitfor, vfs_context_thread(ap->a_context), 0)); + return nfs_flush(VTONFS(ap->a_vp), ap->a_waitfor, vfs_context_thread(ap->a_context), 0); } @@ -6442,8 +6880,9 @@ nfs3_pathconf_rpc( struct nfsmount *nmp = NFSTONMP(np); uint32_t val = 0; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } nfsvers = nmp->nm_vers; nfsm_chain_null(&nmreq); @@ -6455,28 +6894,35 @@ nfs3_pathconf_rpc( nfsm_chain_build_done(error, &nmreq); nfsmout_if(error); error = nfs_request(np, NULL, &nmreq, NFSPROC_PATHCONF, ctx, NULL, &nmrep, &xid, &status); - if ((lockerror = nfs_node_lock(np))) + if ((lockerror = nfs_node_lock(np))) { error = lockerror; + } nfsm_chain_postop_attr_update(error, &nmrep, np, &xid); - if (!lockerror) + if (!lockerror) { nfs_node_unlock(np); - if (!error) + } + if (!error) { error = status; + } nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxlink); nfsm_chain_get_32(error, &nmrep, nfsap->nfsa_maxname); - nfsap->nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC|NFS_FSFLAG_CHOWN_RESTRICTED|NFS_FSFLAG_CASE_INSENSITIVE|NFS_FSFLAG_CASE_PRESERVING); + nfsap->nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC | NFS_FSFLAG_CHOWN_RESTRICTED | NFS_FSFLAG_CASE_INSENSITIVE | NFS_FSFLAG_CASE_PRESERVING); nfsm_chain_get_32(error, &nmrep, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_NO_TRUNC; + } nfsm_chain_get_32(error, &nmrep, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_CHOWN_RESTRICTED; + } nfsm_chain_get_32(error, &nmrep, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_CASE_INSENSITIVE; + } nfsm_chain_get_32(error, &nmrep, val); - if (val) + if (val) { nfsap->nfsa_flags |= NFS_FSFLAG_CASE_PRESERVING; + } NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK); NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME); NFS_BITMAP_SET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC); @@ -6486,7 +6932,7 @@ nfs3_pathconf_rpc( nfsmout: nfsm_chain_cleanup(&nmreq); nfsm_chain_cleanup(&nmrep); - return (error); + return error; } /* save pathconf info for NFSv3 mount */ @@ -6495,7 +6941,7 @@ nfs3_pathconf_cache(struct nfsmount *nmp, struct nfs_fsattr *nfsap) { nmp->nm_fsattr.nfsa_maxlink = nfsap->nfsa_maxlink; nmp->nm_fsattr.nfsa_maxname = nfsap->nfsa_maxname; - nmp->nm_fsattr.nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC|NFS_FSFLAG_CHOWN_RESTRICTED|NFS_FSFLAG_CASE_INSENSITIVE|NFS_FSFLAG_CASE_PRESERVING); + nmp->nm_fsattr.nfsa_flags &= ~(NFS_FSFLAG_NO_TRUNC | NFS_FSFLAG_CHOWN_RESTRICTED | NFS_FSFLAG_CASE_INSENSITIVE | NFS_FSFLAG_CASE_PRESERVING); nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC; nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED; nmp->nm_fsattr.nfsa_flags |= nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE; @@ -6519,12 +6965,12 @@ nfs3_pathconf_cache(struct nfsmount *nmp, struct nfs_fsattr *nfsap) int nfs_vnop_pathconf( struct vnop_pathconf_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_name; - int32_t *a_retval; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_name; + * int32_t *a_retval; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; nfsnode_t np = VTONFS(vp); @@ -6535,8 +6981,9 @@ nfs_vnop_pathconf( uint nbits; nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } switch (ap->a_name) { case _PC_LINK_MAX: @@ -6549,22 +6996,24 @@ nfs_vnop_pathconf( case _PC_FILESIZEBITS: if (nmp->nm_vers == NFS_VER2) { *ap->a_retval = 32; - return (0); + return 0; } break; case _PC_XATTR_SIZE_BITS: /* Do we support xattrs natively? */ - if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) + if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) { break; /* Yes */ - /* No... so just return an error */ - /* FALLTHROUGH */ + } + /* No... so just return an error */ + /* FALLTHROUGH */ default: /* don't bother contacting the server if we know the answer */ - return (EINVAL); + return EINVAL; } - if (nmp->nm_vers == NFS_VER2) - return (EINVAL); + if (nmp->nm_vers == NFS_VER2) { + return EINVAL; + } lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_vers == NFS_VER3) { @@ -6573,11 +7022,13 @@ nfs_vnop_pathconf( lck_mtx_unlock(&nmp->nm_lock); NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap); error = nfs3_pathconf_rpc(np, &nfsa, ap->a_context); - if (error) - return (error); + if (error) { + return error; + } nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); if (nmp->nm_fsattr.nfsa_flags & NFS_FSFLAG_HOMOGENEOUS) { /* all files have the same pathconf info, */ @@ -6593,11 +7044,13 @@ nfs_vnop_pathconf( lck_mtx_unlock(&nmp->nm_lock); NFS_CLEAR_ATTRIBUTES(nfsa.nfsa_bitmap); error = nfs4_pathconf_rpc(np, &nfsa, ap->a_context); - if (error) - return (error); + if (error) { + return error; + } nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } lck_mtx_lock(&nmp->nm_lock); nfsap = &nfsa; } else { @@ -6606,42 +7059,48 @@ nfs_vnop_pathconf( switch (ap->a_name) { case _PC_LINK_MAX: - if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK)) + if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXLINK)) { *ap->a_retval = nfsap->nfsa_maxlink; - else if ((nmp->nm_vers == NFS_VER4) && NFS_BITMAP_ISSET(np->n_vattr.nva_bitmap, NFS_FATTR_MAXLINK)) + } else if ((nmp->nm_vers == NFS_VER4) && NFS_BITMAP_ISSET(np->n_vattr.nva_bitmap, NFS_FATTR_MAXLINK)) { *ap->a_retval = np->n_vattr.nva_maxlink; - else + } else { error = EINVAL; + } break; case _PC_NAME_MAX: - if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME)) + if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_MAXNAME)) { *ap->a_retval = nfsap->nfsa_maxname; - else + } else { error = EINVAL; + } break; case _PC_CHOWN_RESTRICTED: - if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED)) + if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CHOWN_RESTRICTED)) { *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CHOWN_RESTRICTED) ? 200112 /* _POSIX_CHOWN_RESTRICTED */ : 0; - else + } else { error = EINVAL; + } break; case _PC_NO_TRUNC: - if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC)) + if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_NO_TRUNC)) { *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_NO_TRUNC) ? 200112 /* _POSIX_NO_TRUNC */ : 0; - else + } else { error = EINVAL; + } break; case _PC_CASE_SENSITIVE: - if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) + if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_INSENSITIVE)) { *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CASE_INSENSITIVE) ? 0 : 1; - else + } else { error = EINVAL; + } break; case _PC_CASE_PRESERVING: - if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) + if (NFS_BITMAP_ISSET(nfsap->nfsa_bitmap, NFS_FATTR_CASE_PRESERVING)) { *ap->a_retval = (nfsap->nfsa_flags & NFS_FSFLAG_CASE_PRESERVING) ? 1 : 0; - else + } else { error = EINVAL; + } break; case _PC_XATTR_SIZE_BITS: /* same as file size bits if named attrs supported */ case _PC_FILESIZEBITS: @@ -6683,7 +7142,7 @@ nfs_vnop_pathconf( lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } /* @@ -6692,12 +7151,12 @@ nfs_vnop_pathconf( int nfsspec_vnop_read( struct vnop_read_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = VTONFS(ap->a_vp); struct timespec now; @@ -6706,14 +7165,15 @@ nfsspec_vnop_read( /* * Set access flag. */ - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } np->n_flag |= NACC; nanotime(&now); np->n_atim.tv_sec = now.tv_sec; np->n_atim.tv_nsec = now.tv_nsec; nfs_node_unlock(np); - return (VOCALL(spec_vnodeop_p, VOFFSET(vnop_read), ap)); + return VOCALL(spec_vnodeop_p, VOFFSET(vnop_read), ap); } /* @@ -6722,12 +7182,12 @@ nfsspec_vnop_read( int nfsspec_vnop_write( struct vnop_write_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = VTONFS(ap->a_vp); struct timespec now; @@ -6736,14 +7196,15 @@ nfsspec_vnop_write( /* * Set update flag. */ - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } np->n_flag |= NUPD; nanotime(&now); np->n_mtim.tv_sec = now.tv_sec; np->n_mtim.tv_nsec = now.tv_nsec; nfs_node_unlock(np); - return (VOCALL(spec_vnodeop_p, VOFFSET(vnop_write), ap)); + return VOCALL(spec_vnodeop_p, VOFFSET(vnop_write), ap); } /* @@ -6754,11 +7215,11 @@ nfsspec_vnop_write( int nfsspec_vnop_close( struct vnop_close_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_fflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_fflag; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; nfsnode_t np = VTONFS(vp); @@ -6766,8 +7227,9 @@ nfsspec_vnop_close( mount_t mp; int error; - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } if (np->n_flag & (NACC | NUPD)) { np->n_flag |= NCHG; if (!vnode_isinuse(vp, 0) && (mp = vnode_mount(vp)) && !vfs_isrdonly(mp)) { @@ -6788,7 +7250,7 @@ nfsspec_vnop_close( } else { nfs_node_unlock(np); } - return (VOCALL(spec_vnodeop_p, VOFFSET(vnop_close), ap)); + return VOCALL(spec_vnodeop_p, VOFFSET(vnop_close), ap); } #if FIFO @@ -6800,12 +7262,12 @@ extern vnop_t **fifo_vnodeop_p; int nfsfifo_vnop_read( struct vnop_read_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = VTONFS(ap->a_vp); struct timespec now; @@ -6814,14 +7276,15 @@ nfsfifo_vnop_read( /* * Set access flag. */ - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } np->n_flag |= NACC; nanotime(&now); np->n_atim.tv_sec = now.tv_sec; np->n_atim.tv_nsec = now.tv_nsec; nfs_node_unlock(np); - return (VOCALL(fifo_vnodeop_p, VOFFSET(vnop_read), ap)); + return VOCALL(fifo_vnodeop_p, VOFFSET(vnop_read), ap); } /* @@ -6830,12 +7293,12 @@ nfsfifo_vnop_read( int nfsfifo_vnop_write( struct vnop_write_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = VTONFS(ap->a_vp); struct timespec now; @@ -6844,14 +7307,15 @@ nfsfifo_vnop_write( /* * Set update flag. */ - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } np->n_flag |= NUPD; nanotime(&now); np->n_mtim.tv_sec = now.tv_sec; np->n_mtim.tv_nsec = now.tv_nsec; nfs_node_unlock(np); - return (VOCALL(fifo_vnodeop_p, VOFFSET(vnop_write), ap)); + return VOCALL(fifo_vnodeop_p, VOFFSET(vnop_write), ap); } /* @@ -6862,11 +7326,11 @@ nfsfifo_vnop_write( int nfsfifo_vnop_close( struct vnop_close_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_fflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_fflag; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; nfsnode_t np = VTONFS(vp); @@ -6875,8 +7339,9 @@ nfsfifo_vnop_close( mount_t mp; int error; - if ((error = nfs_node_lock(np))) - return (error); + if ((error = nfs_node_lock(np))) { + return error; + } if (np->n_flag & (NACC | NUPD)) { nanotime(&now); if (np->n_flag & NACC) { @@ -6906,7 +7371,7 @@ nfsfifo_vnop_close( } else { nfs_node_unlock(np); } - return (VOCALL(fifo_vnodeop_p, VOFFSET(vnop_close), ap)); + return VOCALL(fifo_vnodeop_p, VOFFSET(vnop_close), ap); } #endif /* FIFO */ @@ -6914,13 +7379,13 @@ nfsfifo_vnop_close( int nfs_vnop_ioctl( struct vnop_ioctl_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - u_int32_t a_command; - caddr_t a_data; - int a_fflag; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * u_int32_t a_command; + * caddr_t a_data; + * int a_fflag; + * vfs_context_t a_context; + * } */*ap) { vfs_context_t ctx = ap->a_context; vnode_t vp = ap->a_vp; @@ -6929,28 +7394,32 @@ nfs_vnop_ioctl( uint32_t len; int error = ENOTTY; - if (mp == NULL) - return (ENXIO); - - switch (ap->a_command) { + if (mp == NULL) { + return ENXIO; + } + switch (ap->a_command) { case F_FULLFSYNC: - if (vnode_vfsisrdonly(vp)) - return (EROFS); + if (vnode_vfsisrdonly(vp)) { + return EROFS; + } error = nfs_flush(VTONFS(vp), MNT_WAIT, vfs_context_thread(ctx), 0); break; case NFS_IOC_DESTROY_CRED: - if (!auth_is_kerberized(mp->nm_auth)) - return (ENOTSUP); + if (!auth_is_kerberized(mp->nm_auth)) { + return ENOTSUP; + } error = nfs_gss_clnt_ctx_remove(mp, vfs_context_ucred(ctx)); break; case NFS_IOC_SET_CRED: case NFS_IOC_SET_CRED64: - if (!auth_is_kerberized(mp->nm_auth)) - return (ENOTSUP); + if (!auth_is_kerberized(mp->nm_auth)) { + return ENOTSUP; + } if ((ap->a_command == NFS_IOC_SET_CRED && vfs_context_is64bit(ctx)) || - (ap->a_command == NFS_IOC_SET_CRED64 && !vfs_context_is64bit(ctx))) - return (EINVAL); + (ap->a_command == NFS_IOC_SET_CRED64 && !vfs_context_is64bit(ctx))) { + return EINVAL; + } if (vfs_context_is64bit(ctx)) { gprinc = *(struct user_nfs_gss_principal *)ap->a_data; } else { @@ -6961,18 +7430,20 @@ nfs_vnop_ioctl( gprinc.principal = CAST_USER_ADDR_T(tp->principal); } NFS_DBG(NFS_FAC_GSS, 7, "Enter NFS_FSCTL_SET_CRED (64-bit=%d): principal length %d name type %d usr pointer 0x%llx\n", vfs_context_is64bit(ctx), gprinc.princlen, gprinc.nametype, (unsigned long long)gprinc.principal); - if (gprinc.princlen > MAXPATHLEN) - return (EINVAL); + if (gprinc.princlen > MAXPATHLEN) { + return EINVAL; + } uint8_t *p; - MALLOC(p, uint8_t *, gprinc.princlen+1, M_TEMP, M_WAITOK|M_ZERO); - if (p == NULL) - return (ENOMEM); + MALLOC(p, uint8_t *, gprinc.princlen + 1, M_TEMP, M_WAITOK | M_ZERO); + if (p == NULL) { + return ENOMEM; + } error = copyin(gprinc.principal, p, gprinc.princlen); if (error) { NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_SET_CRED could not copy in princiapl data of len %d: %d\n", - gprinc.princlen, error); + gprinc.princlen, error); FREE(p, M_TEMP); - return (error); + return error; } NFS_DBG(NFS_FAC_GSS, 7, "Seting credential to principal %s\n", p); error = nfs_gss_clnt_ctx_set_principal(mp, ctx, p, gprinc.princlen, gprinc.nametype); @@ -6981,67 +7452,74 @@ nfs_vnop_ioctl( break; case NFS_IOC_GET_CRED: case NFS_IOC_GET_CRED64: - if (!auth_is_kerberized(mp->nm_auth)) - return (ENOTSUP); + if (!auth_is_kerberized(mp->nm_auth)) { + return ENOTSUP; + } if ((ap->a_command == NFS_IOC_GET_CRED && vfs_context_is64bit(ctx)) || - (ap->a_command == NFS_IOC_GET_CRED64 && !vfs_context_is64bit(ctx))) - return (EINVAL); + (ap->a_command == NFS_IOC_GET_CRED64 && !vfs_context_is64bit(ctx))) { + return EINVAL; + } error = nfs_gss_clnt_ctx_get_principal(mp, ctx, &gprinc); - if (error) + if (error) { break; + } if (vfs_context_is64bit(ctx)) { struct user_nfs_gss_principal *upp = (struct user_nfs_gss_principal *)ap->a_data; len = upp->princlen; - if (gprinc.princlen < len) + if (gprinc.princlen < len) { len = gprinc.princlen; + } upp->princlen = gprinc.princlen; upp->nametype = gprinc.nametype; upp->flags = gprinc.flags; - if (gprinc.principal) + if (gprinc.principal) { error = copyout((void *)gprinc.principal, upp->principal, len); - else + } else { upp->principal = USER_ADDR_NULL; + } } else { struct nfs_gss_principal *u32pp = (struct nfs_gss_principal *)ap->a_data; len = u32pp->princlen; - if (gprinc.princlen < len) + if (gprinc.princlen < len) { len = gprinc.princlen; + } u32pp->princlen = gprinc.princlen; u32pp->nametype = gprinc.nametype; u32pp->flags = gprinc.flags; - if (gprinc.principal) + if (gprinc.principal) { error = copyout((void *)gprinc.principal, u32pp->principal, len); - else + } else { u32pp->principal = (user32_addr_t)0; + } } if (error) { NFS_DBG(NFS_FAC_GSS, 7, "NFS_FSCTL_GET_CRED could not copy out princiapl data of len %d: %d\n", - gprinc.princlen, error); + gprinc.princlen, error); } - if (gprinc.principal) + if (gprinc.principal) { FREE(gprinc.principal, M_TEMP); + } } - return (error); + return error; } /*ARGSUSED*/ int nfs_vnop_select( __unused struct vnop_select_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - int a_which; - int a_fflags; - void *a_wql; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * int a_which; + * int a_fflags; + * void *a_wql; + * vfs_context_t a_context; + * } */*ap) { - /* * We were once bogusly seltrue() which returns 1. Is this right? */ - return (1); + return 1; } /* @@ -7052,15 +7530,15 @@ nfs_vnop_select( int nfs_vnop_pagein( struct vnop_pagein_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - upl_t a_pl; - vm_offset_t a_pl_offset; - off_t a_f_offset; - size_t a_size; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * upl_t a_pl; + * vm_offset_t a_pl_offset; + * off_t a_f_offset; + * size_t a_size; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; upl_t pl = ap->a_pl; @@ -7077,58 +7555,65 @@ nfs_vnop_pagein( int error = 0; vm_offset_t ioaddr, rxaddr; uio_t uio; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; int nofreeupl = flags & UPL_NOCOMMIT; upl_page_info_t *plinfo; -#define MAXPAGINGREQS 16 /* max outstanding RPCs for pagein/pageout */ +#define MAXPAGINGREQS 16 /* max outstanding RPCs for pagein/pageout */ struct nfsreq *req[MAXPAGINGREQS]; int nextsend, nextwait; uint32_t stategenid = 0, restart = 0; kern_return_t kret; FSDBG(322, np, f_offset, size, flags); - if (pl == (upl_t)NULL) + if (pl == (upl_t)NULL) { panic("nfs_pagein: no upl"); + } if (size <= 0) { printf("nfs_pagein: invalid size %ld", size); - if (!nofreeupl) + if (!nofreeupl) { (void) ubc_upl_abort_range(pl, pl_offset, size, 0); - return (EINVAL); + } + return EINVAL; } if (f_offset < 0 || f_offset >= (off_t)np->n_size || (f_offset & PAGE_MASK_64)) { - if (!nofreeupl) + if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, - UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); - return (EINVAL); + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + } + return EINVAL; } thd = vfs_context_thread(ap->a_context); cred = ubc_getcred(vp); - if (!IS_VALID_CRED(cred)) + if (!IS_VALID_CRED(cred)) { cred = vfs_context_ucred(ap->a_context); + } uio = uio_createwithbuffer(1, f_offset, UIO_SYSSPACE, UIO_READ, - &uio_buf, sizeof(uio_buf)); + &uio_buf, sizeof(uio_buf)); nmp = VTONMP(vp); if (nfs_mount_gone(nmp)) { - if (!nofreeupl) + if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, - UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); - return (ENXIO); + UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); + } + return ENXIO; } nmrsize = nmp->nm_rsize; plinfo = ubc_upl_pageinfo(pl); kret = ubc_upl_map(pl, &ioaddr); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { panic("nfs_vnop_pagein: ubc_upl_map() failed with (%d)", kret); + } ioaddr += pl_offset; tryagain: - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { stategenid = nmp->nm_stategenid; + } txsize = rxsize = size; txoffset = f_offset; rxaddr = ioaddr; @@ -7185,10 +7670,11 @@ tryagain: FSDBG(324, uio_offset(uio), retsize, zcnt, rxaddr); uio_update(uio, zcnt); } - rxaddr += iosize; + rxaddr += iosize; rxsize -= iosize; - if (txsize) + if (txsize) { break; + } } } while (!error && (txsize || rxsize)); @@ -7206,10 +7692,12 @@ cancel: error = EIO; } else if (restart) { if (restart <= nfs_mount_state_max_restarts(nmp)) { /* guard against no progress */ - if (error == NFSERR_GRACE) - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); - if (!(error = nfs_mount_state_wait_for_recovery(nmp))) + if (error == NFSERR_GRACE) { + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); + } + if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { goto tryagain; + } } else { NP(np, "nfs_pagein: too many restarts, aborting"); } @@ -7219,16 +7707,17 @@ cancel: ubc_upl_unmap(pl); if (!nofreeupl) { - if (error) + if (error) { ubc_upl_abort_range(pl, pl_offset, size, - UPL_ABORT_ERROR | - UPL_ABORT_FREE_ON_EMPTY); - else + UPL_ABORT_ERROR | + UPL_ABORT_FREE_ON_EMPTY); + } else { ubc_upl_commit_range(pl, pl_offset, size, - UPL_COMMIT_CLEAR_DIRTY | - UPL_COMMIT_FREE_ON_EMPTY); + UPL_COMMIT_CLEAR_DIRTY | + UPL_COMMIT_FREE_ON_EMPTY); + } } - return (error); + return error; } @@ -7242,115 +7731,116 @@ cancel: char nfs_pageouterrorhandler(int); enum actiontype {NOACTION, DUMP, DUMPANDLOG, RETRY, SEVER}; #define NFS_ELAST 88 -static u_char errorcount[NFS_ELAST+1]; /* better be zeros when initialized */ -static const char errortooutcome[NFS_ELAST+1] = { +static u_char errorcount[NFS_ELAST + 1]; /* better be zeros when initialized */ +static const char errortooutcome[NFS_ELAST + 1] = { NOACTION, - DUMP, /* EPERM 1 Operation not permitted */ - DUMP, /* ENOENT 2 No such file or directory */ - DUMPANDLOG, /* ESRCH 3 No such process */ - RETRY, /* EINTR 4 Interrupted system call */ - DUMP, /* EIO 5 Input/output error */ - DUMP, /* ENXIO 6 Device not configured */ - DUMPANDLOG, /* E2BIG 7 Argument list too long */ - DUMPANDLOG, /* ENOEXEC 8 Exec format error */ - DUMPANDLOG, /* EBADF 9 Bad file descriptor */ - DUMPANDLOG, /* ECHILD 10 No child processes */ - DUMPANDLOG, /* EDEADLK 11 Resource deadlock avoided - was EAGAIN */ - RETRY, /* ENOMEM 12 Cannot allocate memory */ - DUMP, /* EACCES 13 Permission denied */ - DUMPANDLOG, /* EFAULT 14 Bad address */ - DUMPANDLOG, /* ENOTBLK 15 POSIX - Block device required */ - RETRY, /* EBUSY 16 Device busy */ - DUMP, /* EEXIST 17 File exists */ - DUMP, /* EXDEV 18 Cross-device link */ - DUMP, /* ENODEV 19 Operation not supported by device */ - DUMP, /* ENOTDIR 20 Not a directory */ - DUMP, /* EISDIR 21 Is a directory */ - DUMP, /* EINVAL 22 Invalid argument */ - DUMPANDLOG, /* ENFILE 23 Too many open files in system */ - DUMPANDLOG, /* EMFILE 24 Too many open files */ - DUMPANDLOG, /* ENOTTY 25 Inappropriate ioctl for device */ - DUMPANDLOG, /* ETXTBSY 26 Text file busy - POSIX */ - DUMP, /* EFBIG 27 File too large */ - DUMP, /* ENOSPC 28 No space left on device */ - DUMPANDLOG, /* ESPIPE 29 Illegal seek */ - DUMP, /* EROFS 30 Read-only file system */ - DUMP, /* EMLINK 31 Too many links */ - RETRY, /* EPIPE 32 Broken pipe */ + DUMP, /* EPERM 1 Operation not permitted */ + DUMP, /* ENOENT 2 No such file or directory */ + DUMPANDLOG, /* ESRCH 3 No such process */ + RETRY, /* EINTR 4 Interrupted system call */ + DUMP, /* EIO 5 Input/output error */ + DUMP, /* ENXIO 6 Device not configured */ + DUMPANDLOG, /* E2BIG 7 Argument list too long */ + DUMPANDLOG, /* ENOEXEC 8 Exec format error */ + DUMPANDLOG, /* EBADF 9 Bad file descriptor */ + DUMPANDLOG, /* ECHILD 10 No child processes */ + DUMPANDLOG, /* EDEADLK 11 Resource deadlock avoided - was EAGAIN */ + RETRY, /* ENOMEM 12 Cannot allocate memory */ + DUMP, /* EACCES 13 Permission denied */ + DUMPANDLOG, /* EFAULT 14 Bad address */ + DUMPANDLOG, /* ENOTBLK 15 POSIX - Block device required */ + RETRY, /* EBUSY 16 Device busy */ + DUMP, /* EEXIST 17 File exists */ + DUMP, /* EXDEV 18 Cross-device link */ + DUMP, /* ENODEV 19 Operation not supported by device */ + DUMP, /* ENOTDIR 20 Not a directory */ + DUMP, /* EISDIR 21 Is a directory */ + DUMP, /* EINVAL 22 Invalid argument */ + DUMPANDLOG, /* ENFILE 23 Too many open files in system */ + DUMPANDLOG, /* EMFILE 24 Too many open files */ + DUMPANDLOG, /* ENOTTY 25 Inappropriate ioctl for device */ + DUMPANDLOG, /* ETXTBSY 26 Text file busy - POSIX */ + DUMP, /* EFBIG 27 File too large */ + DUMP, /* ENOSPC 28 No space left on device */ + DUMPANDLOG, /* ESPIPE 29 Illegal seek */ + DUMP, /* EROFS 30 Read-only file system */ + DUMP, /* EMLINK 31 Too many links */ + RETRY, /* EPIPE 32 Broken pipe */ /* math software */ - DUMPANDLOG, /* EDOM 33 Numerical argument out of domain */ - DUMPANDLOG, /* ERANGE 34 Result too large */ - RETRY, /* EAGAIN/EWOULDBLOCK 35 Resource temporarily unavailable */ - DUMPANDLOG, /* EINPROGRESS 36 Operation now in progress */ - DUMPANDLOG, /* EALREADY 37 Operation already in progress */ + DUMPANDLOG, /* EDOM 33 Numerical argument out of domain */ + DUMPANDLOG, /* ERANGE 34 Result too large */ + RETRY, /* EAGAIN/EWOULDBLOCK 35 Resource temporarily unavailable */ + DUMPANDLOG, /* EINPROGRESS 36 Operation now in progress */ + DUMPANDLOG, /* EALREADY 37 Operation already in progress */ /* ipc/network software -- argument errors */ - DUMPANDLOG, /* ENOTSOC 38 Socket operation on non-socket */ - DUMPANDLOG, /* EDESTADDRREQ 39 Destination address required */ - DUMPANDLOG, /* EMSGSIZE 40 Message too long */ - DUMPANDLOG, /* EPROTOTYPE 41 Protocol wrong type for socket */ - DUMPANDLOG, /* ENOPROTOOPT 42 Protocol not available */ - DUMPANDLOG, /* EPROTONOSUPPORT 43 Protocol not supported */ - DUMPANDLOG, /* ESOCKTNOSUPPORT 44 Socket type not supported */ - DUMPANDLOG, /* ENOTSUP 45 Operation not supported */ - DUMPANDLOG, /* EPFNOSUPPORT 46 Protocol family not supported */ - DUMPANDLOG, /* EAFNOSUPPORT 47 Address family not supported by protocol family */ - DUMPANDLOG, /* EADDRINUSE 48 Address already in use */ - DUMPANDLOG, /* EADDRNOTAVAIL 49 Can't assign requested address */ + DUMPANDLOG, /* ENOTSOC 38 Socket operation on non-socket */ + DUMPANDLOG, /* EDESTADDRREQ 39 Destination address required */ + DUMPANDLOG, /* EMSGSIZE 40 Message too long */ + DUMPANDLOG, /* EPROTOTYPE 41 Protocol wrong type for socket */ + DUMPANDLOG, /* ENOPROTOOPT 42 Protocol not available */ + DUMPANDLOG, /* EPROTONOSUPPORT 43 Protocol not supported */ + DUMPANDLOG, /* ESOCKTNOSUPPORT 44 Socket type not supported */ + DUMPANDLOG, /* ENOTSUP 45 Operation not supported */ + DUMPANDLOG, /* EPFNOSUPPORT 46 Protocol family not supported */ + DUMPANDLOG, /* EAFNOSUPPORT 47 Address family not supported by protocol family */ + DUMPANDLOG, /* EADDRINUSE 48 Address already in use */ + DUMPANDLOG, /* EADDRNOTAVAIL 49 Can't assign requested address */ /* ipc/network software -- operational errors */ - RETRY, /* ENETDOWN 50 Network is down */ - RETRY, /* ENETUNREACH 51 Network is unreachable */ - RETRY, /* ENETRESET 52 Network dropped connection on reset */ - RETRY, /* ECONNABORTED 53 Software caused connection abort */ - RETRY, /* ECONNRESET 54 Connection reset by peer */ - RETRY, /* ENOBUFS 55 No buffer space available */ - RETRY, /* EISCONN 56 Socket is already connected */ - RETRY, /* ENOTCONN 57 Socket is not connected */ - RETRY, /* ESHUTDOWN 58 Can't send after socket shutdown */ - RETRY, /* ETOOMANYREFS 59 Too many references: can't splice */ - RETRY, /* ETIMEDOUT 60 Operation timed out */ - RETRY, /* ECONNREFUSED 61 Connection refused */ - - DUMPANDLOG, /* ELOOP 62 Too many levels of symbolic links */ - DUMP, /* ENAMETOOLONG 63 File name too long */ - RETRY, /* EHOSTDOWN 64 Host is down */ - RETRY, /* EHOSTUNREACH 65 No route to host */ - DUMP, /* ENOTEMPTY 66 Directory not empty */ + RETRY, /* ENETDOWN 50 Network is down */ + RETRY, /* ENETUNREACH 51 Network is unreachable */ + RETRY, /* ENETRESET 52 Network dropped connection on reset */ + RETRY, /* ECONNABORTED 53 Software caused connection abort */ + RETRY, /* ECONNRESET 54 Connection reset by peer */ + RETRY, /* ENOBUFS 55 No buffer space available */ + RETRY, /* EISCONN 56 Socket is already connected */ + RETRY, /* ENOTCONN 57 Socket is not connected */ + RETRY, /* ESHUTDOWN 58 Can't send after socket shutdown */ + RETRY, /* ETOOMANYREFS 59 Too many references: can't splice */ + RETRY, /* ETIMEDOUT 60 Operation timed out */ + RETRY, /* ECONNREFUSED 61 Connection refused */ + + DUMPANDLOG, /* ELOOP 62 Too many levels of symbolic links */ + DUMP, /* ENAMETOOLONG 63 File name too long */ + RETRY, /* EHOSTDOWN 64 Host is down */ + RETRY, /* EHOSTUNREACH 65 No route to host */ + DUMP, /* ENOTEMPTY 66 Directory not empty */ /* quotas & mush */ - DUMPANDLOG, /* PROCLIM 67 Too many processes */ - DUMPANDLOG, /* EUSERS 68 Too many users */ - DUMPANDLOG, /* EDQUOT 69 Disc quota exceeded */ + DUMPANDLOG, /* PROCLIM 67 Too many processes */ + DUMPANDLOG, /* EUSERS 68 Too many users */ + DUMPANDLOG, /* EDQUOT 69 Disc quota exceeded */ /* Network File System */ - DUMP, /* ESTALE 70 Stale NFS file handle */ - DUMP, /* EREMOTE 71 Too many levels of remote in path */ - DUMPANDLOG, /* EBADRPC 72 RPC struct is bad */ - DUMPANDLOG, /* ERPCMISMATCH 73 RPC version wrong */ - DUMPANDLOG, /* EPROGUNAVAIL 74 RPC prog. not avail */ - DUMPANDLOG, /* EPROGMISMATCH 75 Program version wrong */ - DUMPANDLOG, /* EPROCUNAVAIL 76 Bad procedure for program */ - - DUMPANDLOG, /* ENOLCK 77 No locks available */ - DUMPANDLOG, /* ENOSYS 78 Function not implemented */ - DUMPANDLOG, /* EFTYPE 79 Inappropriate file type or format */ - DUMPANDLOG, /* EAUTH 80 Authentication error */ - DUMPANDLOG, /* ENEEDAUTH 81 Need authenticator */ + DUMP, /* ESTALE 70 Stale NFS file handle */ + DUMP, /* EREMOTE 71 Too many levels of remote in path */ + DUMPANDLOG, /* EBADRPC 72 RPC struct is bad */ + DUMPANDLOG, /* ERPCMISMATCH 73 RPC version wrong */ + DUMPANDLOG, /* EPROGUNAVAIL 74 RPC prog. not avail */ + DUMPANDLOG, /* EPROGMISMATCH 75 Program version wrong */ + DUMPANDLOG, /* EPROCUNAVAIL 76 Bad procedure for program */ + + DUMPANDLOG, /* ENOLCK 77 No locks available */ + DUMPANDLOG, /* ENOSYS 78 Function not implemented */ + DUMPANDLOG, /* EFTYPE 79 Inappropriate file type or format */ + DUMPANDLOG, /* EAUTH 80 Authentication error */ + DUMPANDLOG, /* ENEEDAUTH 81 Need authenticator */ /* Intelligent device errors */ - DUMPANDLOG, /* EPWROFF 82 Device power is off */ - DUMPANDLOG, /* EDEVERR 83 Device error, e.g. paper out */ - DUMPANDLOG, /* EOVERFLOW 84 Value too large to be stored in data type */ + DUMPANDLOG, /* EPWROFF 82 Device power is off */ + DUMPANDLOG, /* EDEVERR 83 Device error, e.g. paper out */ + DUMPANDLOG, /* EOVERFLOW 84 Value too large to be stored in data type */ /* Program loading errors */ - DUMPANDLOG, /* EBADEXEC 85 Bad executable */ - DUMPANDLOG, /* EBADARCH 86 Bad CPU type in executable */ - DUMPANDLOG, /* ESHLIBVERS 87 Shared library version mismatch */ - DUMPANDLOG, /* EBADMACHO 88 Malformed Macho file */ + DUMPANDLOG, /* EBADEXEC 85 Bad executable */ + DUMPANDLOG, /* EBADARCH 86 Bad CPU type in executable */ + DUMPANDLOG, /* ESHLIBVERS 87 Shared library version mismatch */ + DUMPANDLOG, /* EBADMACHO 88 Malformed Macho file */ }; char nfs_pageouterrorhandler(int error) { - if (error > NFS_ELAST) - return(DUMP); - else - return(errortooutcome[error]); + if (error > NFS_ELAST) { + return DUMP; + } else { + return errortooutcome[error]; + } } @@ -7363,15 +7853,15 @@ nfs_pageouterrorhandler(int error) int nfs_vnop_pageout( struct vnop_pageout_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - upl_t a_pl; - vm_offset_t a_pl_offset; - off_t a_f_offset; - size_t a_size; - int a_flags; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * upl_t a_pl; + * vm_offset_t a_pl_offset; + * off_t a_f_offset; + * size_t a_size; + * int a_flags; + * vfs_context_t a_context; + * } */*ap) { vnode_t vp = ap->a_vp; upl_t pl = ap->a_pl; @@ -7389,7 +7879,7 @@ nfs_vnop_pageout( off_t off, txoffset, rxoffset; vm_offset_t ioaddr, txaddr, rxaddr; uio_t auio; - char uio_buf [ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; int nofreeupl = flags & UPL_NOCOMMIT; size_t nmwsize, biosize, iosize, pgsize, txsize, rxsize, xsize, remsize; struct nfsreq *req[MAXPAGINGREQS]; @@ -7400,20 +7890,23 @@ nfs_vnop_pageout( FSDBG(323, f_offset, size, pl, pl_offset); - if (pl == (upl_t)NULL) + if (pl == (upl_t)NULL) { panic("nfs_pageout: no upl"); + } if (size <= 0) { printf("nfs_pageout: invalid size %ld", size); - if (!nofreeupl) + if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, 0); - return (EINVAL); + } + return EINVAL; } if (!nmp) { - if (!nofreeupl) - ubc_upl_abort(pl, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY); - return (ENXIO); + if (!nofreeupl) { + ubc_upl_abort(pl, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + } + return ENXIO; } biosize = nmp->nm_biosize; nmwsize = nmp->nm_wsize; @@ -7428,8 +7921,9 @@ nfs_vnop_pageout( off = f_offset + iosize; /* need make sure we do things on block boundaries */ xsize = biosize - (off % biosize); - if (off + xsize > f_offset + size) + if (off + xsize > f_offset + size) { xsize = f_offset + size - off; + } lbn = (daddr64_t)(off / biosize); lck_mtx_lock(nfs_buf_mutex); if ((bp = nfs_buf_incore(np, lbn))) { @@ -7438,9 +7932,10 @@ nfs_vnop_pageout( lck_mtx_unlock(nfs_buf_mutex); nfs_data_unlock_noupdate(np); /* no panic. just tell vm we are busy */ - if (!nofreeupl) + if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, 0); - return (EBUSY); + } + return EBUSY; } if (bp->nb_dirtyend > 0) { /* @@ -7463,44 +7958,48 @@ nfs_vnop_pageout( start = off; end = off + xsize; /* clip end to EOF */ - if (end > (off_t)np->n_size) + if (end > (off_t)np->n_size) { end = np->n_size; + } start -= boff; end -= boff; if ((bp->nb_dirtyoff < start) && (bp->nb_dirtyend > end)) { - /* - * not gonna be able to clip the dirty region - * - * But before returning the bad news, move the - * buffer to the start of the delwri list and - * give the list a push to try to flush the - * buffer out. - */ - FSDBG(323, np, bp, 0xd00deebc, EBUSY); - nfs_buf_remfree(bp); - TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free); - nfsbufdelwricnt++; - nfs_buf_drop(bp); - nfs_buf_delwri_push(1); - lck_mtx_unlock(nfs_buf_mutex); - nfs_data_unlock_noupdate(np); - if (!nofreeupl) - ubc_upl_abort_range(pl, pl_offset, size, 0); - return (EBUSY); + /* + * not gonna be able to clip the dirty region + * + * But before returning the bad news, move the + * buffer to the start of the delwri list and + * give the list a push to try to flush the + * buffer out. + */ + FSDBG(323, np, bp, 0xd00deebc, EBUSY); + nfs_buf_remfree(bp); + TAILQ_INSERT_HEAD(&nfsbufdelwri, bp, nb_free); + nfsbufdelwricnt++; + nfs_buf_drop(bp); + nfs_buf_delwri_push(1); + lck_mtx_unlock(nfs_buf_mutex); + nfs_data_unlock_noupdate(np); + if (!nofreeupl) { + ubc_upl_abort_range(pl, pl_offset, size, 0); + } + return EBUSY; } if ((bp->nb_dirtyoff < start) || (bp->nb_dirtyend > end)) { - /* clip dirty region, if necessary */ - if (bp->nb_dirtyoff < start) - bp->nb_dirtyend = min(bp->nb_dirtyend, start); - if (bp->nb_dirtyend > end) - bp->nb_dirtyoff = max(bp->nb_dirtyoff, end); - FSDBG(323, bp, bp->nb_dirtyoff, bp->nb_dirtyend, 0xd00dee00); - /* we're leaving this block dirty */ - nfs_buf_drop(bp); - lck_mtx_unlock(nfs_buf_mutex); - continue; + /* clip dirty region, if necessary */ + if (bp->nb_dirtyoff < start) { + bp->nb_dirtyend = min(bp->nb_dirtyend, start); + } + if (bp->nb_dirtyend > end) { + bp->nb_dirtyoff = max(bp->nb_dirtyoff, end); + } + FSDBG(323, bp, bp->nb_dirtyoff, bp->nb_dirtyend, 0xd00dee00); + /* we're leaving this block dirty */ + nfs_buf_drop(bp); + lck_mtx_unlock(nfs_buf_mutex); + continue; } } nfs_buf_remfree(bp); @@ -7521,44 +8020,50 @@ nfs_vnop_pageout( thd = vfs_context_thread(ap->a_context); cred = ubc_getcred(vp); - if (!IS_VALID_CRED(cred)) + if (!IS_VALID_CRED(cred)) { cred = vfs_context_ucred(ap->a_context); + } nfs_node_lock_force(np); if (np->n_flag & NWRITEERR) { error = np->n_error; nfs_node_unlock(np); nfs_data_unlock_noupdate(np); - if (!nofreeupl) + if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, - UPL_ABORT_FREE_ON_EMPTY); - return (error); + UPL_ABORT_FREE_ON_EMPTY); + } + return error; } nfs_node_unlock(np); if (f_offset < 0 || f_offset >= (off_t)np->n_size || f_offset & PAGE_MASK_64 || size & PAGE_MASK_64) { nfs_data_unlock_noupdate(np); - if (!nofreeupl) + if (!nofreeupl) { ubc_upl_abort_range(pl, pl_offset, size, - UPL_ABORT_FREE_ON_EMPTY); - return (EINVAL); + UPL_ABORT_FREE_ON_EMPTY); + } + return EINVAL; } kret = ubc_upl_map(pl, &ioaddr); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { panic("nfs_vnop_pageout: ubc_upl_map() failed with (%d)", kret); + } ioaddr += pl_offset; - if ((u_quad_t)f_offset + size > np->n_size) + if ((u_quad_t)f_offset + size > np->n_size) { xsize = np->n_size - f_offset; - else + } else { xsize = size; + } pgsize = round_page_64(xsize); - if ((size > pgsize) && !nofreeupl) + if ((size > pgsize) && !nofreeupl) { ubc_upl_abort_range(pl, pl_offset + pgsize, size - pgsize, - UPL_ABORT_FREE_ON_EMPTY); + UPL_ABORT_FREE_ON_EMPTY); + } /* * check for partial page and clear the @@ -7573,11 +8078,12 @@ nfs_vnop_pageout( nfs_data_unlock_noupdate(np); auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, - &uio_buf, sizeof(uio_buf)); + &uio_buf, sizeof(uio_buf)); tryagain: - if (nmp->nm_vers >= NFS_VER4) + if (nmp->nm_vers >= NFS_VER4) { stategenid = nmp->nm_stategenid; + } wverf = wverf2 = wverfset = 0; txsize = rxsize = xsize; txoffset = rxoffset = f_offset; @@ -7649,10 +8155,11 @@ tryagain: goto cancel; } /* Retain the lowest commitment level returned. */ - if (iomode < commit) + if (iomode < commit) { commit = iomode; - rxaddr += iosize; - rxoffset += iosize; + } + rxaddr += iosize; + rxoffset += iosize; rxsize -= iosize; remsize -= iosize; if (remsize > 0) { @@ -7682,14 +8189,16 @@ tryagain: vrestart = 1; goto cancel; } - if (iomode < commit) + if (iomode < commit) { commit = iomode; - rxaddr += iosize; - rxoffset += iosize; + } + rxaddr += iosize; + rxoffset += iosize; rxsize -= iosize; } - if (txsize) + if (txsize) { break; + } } } while (!error && (txsize || rxsize)); @@ -7719,17 +8228,20 @@ cancel: error = EIO; } else { if (vrestart) { - if (++vrestarts <= 100) /* guard against no progress */ + if (++vrestarts <= 100) { /* guard against no progress */ goto tryagain; + } NP(np, "nfs_pageout: too many restarts, aborting"); FSDBG(323, f_offset, xsize, ERESTART, -1); } if (restart) { if (restarts <= nfs_mount_state_max_restarts(nmp)) { /* guard against no progress */ - if (error == NFSERR_GRACE) - tsleep(&nmp->nm_state, (PZERO-1), "nfsgrace", 2*hz); - if (!(error = nfs_mount_state_wait_for_recovery(nmp))) + if (error == NFSERR_GRACE) { + tsleep(&nmp->nm_state, (PZERO - 1), "nfsgrace", 2 * hz); + } + if (!(error = nfs_mount_state_wait_for_recovery(nmp))) { goto tryagain; + } } else { NP(np, "nfs_pageout: too many restarts, aborting"); FSDBG(323, f_offset, xsize, ERESTART, -1); @@ -7767,83 +8279,85 @@ cancel: if (error) { int abortflags = 0; char action = nfs_pageouterrorhandler(error); - + switch (action) { - case DUMP: - abortflags = UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY; - break; - case DUMPANDLOG: - abortflags = UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY; - if (error <= NFS_ELAST) { - if ((errorcount[error] % 100) == 0) - NP(np, "nfs_pageout: unexpected error %d. dumping vm page", error); - errorcount[error]++; + case DUMP: + abortflags = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY; + break; + case DUMPANDLOG: + abortflags = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY; + if (error <= NFS_ELAST) { + if ((errorcount[error] % 100) == 0) { + NP(np, "nfs_pageout: unexpected error %d. dumping vm page", error); } - break; - case RETRY: - abortflags = UPL_ABORT_FREE_ON_EMPTY; - break; - case SEVER: /* not implemented */ - default: - NP(np, "nfs_pageout: action %d not expected", action); - break; + errorcount[error]++; + } + break; + case RETRY: + abortflags = UPL_ABORT_FREE_ON_EMPTY; + break; + case SEVER: /* not implemented */ + default: + NP(np, "nfs_pageout: action %d not expected", action); + break; } ubc_upl_abort_range(pl, pl_offset, pgsize, abortflags); /* return error in all cases above */ - } else { ubc_upl_commit_range(pl, pl_offset, pgsize, - UPL_COMMIT_CLEAR_DIRTY | - UPL_COMMIT_FREE_ON_EMPTY); + UPL_COMMIT_CLEAR_DIRTY | + UPL_COMMIT_FREE_ON_EMPTY); } } - return (error); + return error; } /* Blktooff derives file offset given a logical block number */ int nfs_vnop_blktooff( struct vnop_blktooff_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - daddr64_t a_lblkno; - off_t *a_offset; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * daddr64_t a_lblkno; + * off_t *a_offset; + * } */*ap) { int biosize; vnode_t vp = ap->a_vp; struct nfsmount *nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } biosize = nmp->nm_biosize; *ap->a_offset = (off_t)(ap->a_lblkno * biosize); - return (0); + return 0; } int nfs_vnop_offtoblk( struct vnop_offtoblk_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - off_t a_offset; - daddr64_t *a_lblkno; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * off_t a_offset; + * daddr64_t *a_lblkno; + * } */*ap) { int biosize; vnode_t vp = ap->a_vp; struct nfsmount *nmp = VTONMP(vp); - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } biosize = nmp->nm_biosize; *ap->a_lblkno = (daddr64_t)(ap->a_offset / biosize); - return (0); + return 0; } /* @@ -7852,20 +8366,21 @@ nfs_vnop_offtoblk( int nfs_vnop_monitor( struct vnop_monitor_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - uint32_t a_events; - uint32_t a_flags; - void *a_handle; - vfs_context_t a_context; - } */ *ap) + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * uint32_t a_events; + * uint32_t a_flags; + * void *a_handle; + * vfs_context_t a_context; + * } */*ap) { nfsnode_t np = VTONFS(ap->a_vp); struct nfsmount *nmp = VTONMP(ap->a_vp); int error = 0; - if (nfs_mount_gone(nmp)) - return (ENXIO); + if (nfs_mount_gone(nmp)) { + return ENXIO; + } /* make sure that the vnode's monitoring status is up to date */ lck_mtx_lock(&nmp->nm_lock); @@ -7881,7 +8396,7 @@ nfs_vnop_monitor( while (np->n_mflag & NMMONSCANINPROG) { struct timespec ts = { 1, 0 }; np->n_mflag |= NMMONSCANWANT; - msleep(&np->n_mflag, &nmp->nm_lock, PZERO-1, "nfswaitmonscan", &ts); + msleep(&np->n_mflag, &nmp->nm_lock, PZERO - 1, "nfswaitmonscan", &ts); } if (np->n_monlink.le_next != NFSNOLIST) { LIST_REMOVE(np, n_monlink); @@ -7890,7 +8405,7 @@ nfs_vnop_monitor( } lck_mtx_unlock(&nmp->nm_lock); - return (error); + return error; } /* @@ -7919,7 +8434,6 @@ nfs_vnode_notify(nfsnode_t np, uint32_t events) vap = &vattr; VATTR_INIT(vap); - vap->va_flags |= nmp->nm_vers > 2 ? VA_64BITOBJIDS : 0; VATTR_RETURN(vap, va_fsid, vfs_statfs(nmp->nm_mountp)->f_fsid.val[0]); VATTR_RETURN(vap, va_fileid, nvattr.nva_fileid); VATTR_RETURN(vap, va_mode, nvattr.nva_mode); diff --git a/bsd/nfs/nfsdiskless.h b/bsd/nfs/nfsdiskless.h index 69f03e73a..3ed0195e1 100644 --- a/bsd/nfs/nfsdiskless.h +++ b/bsd/nfs/nfsdiskless.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -74,19 +74,19 @@ #ifdef __APPLE_API_PRIVATE struct nfs_dlmount { - struct sockaddr_in ndm_saddr; /* Address of file server */ - char ndm_host[MAXHOSTNAMELEN];/* Host name for mount pt */ - char *ndm_path; /* path name for mount pt */ - char *ndm_mntfrom; /* mntfromname for mount pt */ - u_int32_t ndm_nfsv3; /* NFSv3 or NFSv2? */ - u_int32_t ndm_sotype; /* SOCK_STREAM or SOCK_DGRAM? */ - u_int32_t ndm_fhlen; /* length of file handle */ - u_char ndm_fh[NFSX_V3FHMAX]; /* The file's file handle */ + struct sockaddr_in ndm_saddr; /* Address of file server */ + char ndm_host[MAXHOSTNAMELEN];/* Host name for mount pt */ + char *ndm_path; /* path name for mount pt */ + char *ndm_mntfrom; /* mntfromname for mount pt */ + u_int32_t ndm_nfsv3; /* NFSv3 or NFSv2? */ + u_int32_t ndm_sotype; /* SOCK_STREAM or SOCK_DGRAM? */ + u_int32_t ndm_fhlen; /* length of file handle */ + u_char ndm_fh[NFSX_V3FHMAX]; /* The file's file handle */ }; struct nfs_diskless { - struct nfs_dlmount nd_root; /* Mount info for root */ - struct nfs_dlmount nd_private; /* Mount info for private */ + struct nfs_dlmount nd_root; /* Mount info for root */ + struct nfs_dlmount nd_private; /* Mount info for private */ }; #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/nfs/nfsm_subs.h b/bsd/nfs/nfsm_subs.h index dd09a5bbb..6f348ac36 100644 --- a/bsd/nfs/nfsm_subs.h +++ b/bsd/nfs/nfsm_subs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -92,7 +92,7 @@ int nfsm_chain_get_opaque_pointer_f(struct nfsm_chain *, uint32_t, u_char **); int nfsm_chain_get_opaque_f(struct nfsm_chain *, uint32_t, u_char *); int nfsm_chain_get_uio(struct nfsm_chain *, uint32_t, uio_t); int nfsm_chain_get_fh_attr(struct nfsm_chain *, nfsnode_t, - vfs_context_t, int, uint64_t *, fhandle_t *, struct nfs_vattr *); + vfs_context_t, int, uint64_t *, fhandle_t *, struct nfs_vattr *); int nfsm_chain_get_wcc_data_f(struct nfsm_chain *, nfsnode_t, struct timespec *, int *, u_int64_t *); int nfsm_chain_get_secinfo(struct nfsm_chain *, uint32_t *, int *); @@ -102,7 +102,7 @@ int nfsm_mbuf_get_list(size_t, mbuf_t *, int *); int nfsm_chain_add_fattr(struct nfsrv_descript *, struct nfsm_chain *, struct vnode_attr *); int nfsm_chain_add_wcc_data_f(struct nfsrv_descript *, struct nfsm_chain *, int, - struct vnode_attr *, int, struct vnode_attr *); + struct vnode_attr *, int, struct vnode_attr *); int nfsm_chain_get_path_namei(struct nfsm_chain *, uint32_t, struct nameidata *); int nfsm_chain_get_sattr(struct nfsrv_descript *, struct nfsm_chain *, struct vnode_attr *); int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); @@ -111,63 +111,63 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); /* check name length */ #define nfsm_name_len_check(E, ND, LEN) \ do { \ - if (E) break; \ - if (((ND)->nd_vers == NFS_VER2) && ((LEN) > NFS_MAXNAMLEN)) \ - (E) = NFSERR_NAMETOL; \ - if ((LEN) <= 0) \ - error = EBADRPC; \ + if (E) break; \ + if (((ND)->nd_vers == NFS_VER2) && ((LEN) > NFS_MAXNAMLEN)) \ + (E) = NFSERR_NAMETOL; \ + if ((LEN) <= 0) \ + error = EBADRPC; \ } while (0) #define nfsm_assert(E, COND, ERR) \ do { \ - if (E) break; \ - if (!(COND)) \ - (E) = (ERR); \ + if (E) break; \ + if (!(COND)) \ + (E) = (ERR); \ } while (0) /* Initialize a vnode_attr to retrieve attributes for the NFS server. */ #define nfsm_srv_vattr_init(VAP, VERS) \ do { \ - VATTR_INIT(VAP); \ - VATTR_WANTED((VAP), va_type); \ - VATTR_WANTED((VAP), va_mode); \ - VATTR_WANTED((VAP), va_nlink); \ - VATTR_WANTED((VAP), va_uid); \ - VATTR_WANTED((VAP), va_gid); \ - VATTR_WANTED((VAP), va_data_size); \ - VATTR_WANTED((VAP), va_data_alloc); \ - VATTR_WANTED((VAP), va_rdev); \ - VATTR_WANTED((VAP), va_fsid); \ - VATTR_WANTED((VAP), va_fileid); \ - VATTR_WANTED((VAP), va_access_time); \ - VATTR_WANTED((VAP), va_modify_time); \ - VATTR_WANTED((VAP), va_change_time); \ - if ((VERS) == NFS_VER2) \ - VATTR_WANTED((VAP), va_iosize); \ - if ((VERS) == NFS_VER3) \ - VATTR_WANTED((VAP), va_filerev); \ + VATTR_INIT(VAP); \ + VATTR_WANTED((VAP), va_type); \ + VATTR_WANTED((VAP), va_mode); \ + VATTR_WANTED((VAP), va_nlink); \ + VATTR_WANTED((VAP), va_uid); \ + VATTR_WANTED((VAP), va_gid); \ + VATTR_WANTED((VAP), va_data_size); \ + VATTR_WANTED((VAP), va_data_alloc); \ + VATTR_WANTED((VAP), va_rdev); \ + VATTR_WANTED((VAP), va_fsid); \ + VATTR_WANTED((VAP), va_fileid); \ + VATTR_WANTED((VAP), va_access_time); \ + VATTR_WANTED((VAP), va_modify_time); \ + VATTR_WANTED((VAP), va_change_time); \ + if ((VERS) == NFS_VER2) \ + VATTR_WANTED((VAP), va_iosize); \ + if ((VERS) == NFS_VER3) \ + VATTR_WANTED((VAP), va_filerev); \ } while (0) /* Initialize a vnode_attr to retrieve pre-operation attributes for the NFS server. */ #define nfsm_srv_pre_vattr_init(VAP) \ do { \ - VATTR_INIT(VAP); \ - VATTR_WANTED((VAP), va_data_size); \ - VATTR_WANTED((VAP), va_modify_time); \ - VATTR_WANTED((VAP), va_change_time); \ + VATTR_INIT(VAP); \ + VATTR_WANTED((VAP), va_data_size); \ + VATTR_WANTED((VAP), va_modify_time); \ + VATTR_WANTED((VAP), va_change_time); \ } while (0) /* round up to a multiple of 4 */ -#define nfsm_rndup(a) (((a)+3)&(~0x3)) +#define nfsm_rndup(a) (((a)+3)&(~0x3)) -#define nfsm_pad(a) (nfsm_rndup(a) - (a)) +#define nfsm_pad(a) (nfsm_rndup(a) - (a)) /* * control flow macros: * go to the appropriate label on condition */ -#define nfsmout_if(E) do { if (E) goto nfsmout; } while (0) -#define nfsmerr_if(E) do { if (E) goto nfsmerr; } while (0) +#define nfsmout_if(E) do { if (E) goto nfsmout; } while (0) +#define nfsmerr_if(E) do { if (E) goto nfsmerr; } while (0) /* * For NFS v2 errors and EBADRPC, the reply contains only the error. @@ -176,36 +176,36 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); */ #define nfsmout_on_status(ND, E) \ do { \ - if (((ND)->nd_repstat == EBADRPC) || \ - ((ND)->nd_repstat && ((ND)->nd_vers == NFS_VER2))) { \ - (E) = 0; \ - goto nfsmout; \ - } \ + if (((ND)->nd_repstat == EBADRPC) || \ + ((ND)->nd_repstat && ((ND)->nd_vers == NFS_VER2))) { \ + (E) = 0; \ + goto nfsmout; \ + } \ } while (0) /* initialize an mbuf chain */ #define nfsm_chain_null(NMC) \ do { \ - (NMC)->nmc_mhead = (NMC)->nmc_mcur = NULL; \ - (NMC)->nmc_ptr = NULL; \ + (NMC)->nmc_mhead = (NMC)->nmc_mcur = NULL; \ + (NMC)->nmc_ptr = NULL; \ } while (0) /* cleanup an mbuf chain */ #define nfsm_chain_cleanup(NMC) \ do { \ - if (!(NMC)->nmc_mhead) break; \ - mbuf_freem((NMC)->nmc_mhead); \ - nfsm_chain_null(NMC); \ + if (!(NMC)->nmc_mhead) break; \ + mbuf_freem((NMC)->nmc_mhead); \ + nfsm_chain_null(NMC); \ } while (0) /* get an mbuf given a size hint */ #define nfsm_mbuf_get(E, MBP, SIZEHINT) \ do { \ - *(MBP) = NULL; \ - if ((size_t)(SIZEHINT) >= nfs_mbuf_minclsize) \ - (E) = mbuf_mclget(MBUF_WAITOK, MBUF_TYPE_DATA, (MBP)); \ - else \ - (E) = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, (MBP)); \ + *(MBP) = NULL; \ + if ((size_t)(SIZEHINT) >= nfs_mbuf_minclsize) \ + (E) = mbuf_mclget(MBUF_WAITOK, MBUF_TYPE_DATA, (MBP)); \ + else \ + (E) = mbuf_get(MBUF_WAITOK, MBUF_TYPE_DATA, (MBP)); \ } while (0) @@ -216,29 +216,29 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); /* prepare an mbuf chain for building starting with the given mbuf */ #define nfsm_chain_init(NMC, MB) \ do { \ - (NMC)->nmc_mhead = (MB); \ - (NMC)->nmc_mcur = (NMC)->nmc_mhead; \ - (NMC)->nmc_ptr = mbuf_data((NMC)->nmc_mcur); \ - (NMC)->nmc_left = mbuf_trailingspace((NMC)->nmc_mcur); \ - (NMC)->nmc_flags = 0; \ + (NMC)->nmc_mhead = (MB); \ + (NMC)->nmc_mcur = (NMC)->nmc_mhead; \ + (NMC)->nmc_ptr = mbuf_data((NMC)->nmc_mcur); \ + (NMC)->nmc_left = mbuf_trailingspace((NMC)->nmc_mcur); \ + (NMC)->nmc_flags = 0; \ } while (0) /* prepare an mbuf chain for building starting with a newly allocated mbuf */ #define nfsm_chain_build_alloc_init(E, NMC, SIZEHINT) \ do { \ - mbuf_t ncbimb; \ - nfsm_mbuf_get((E), &ncbimb, (SIZEHINT)); \ - if (E) break; \ - nfsm_chain_init((NMC), ncbimb); \ + mbuf_t ncbimb; \ + nfsm_mbuf_get((E), &ncbimb, (SIZEHINT)); \ + if (E) break; \ + nfsm_chain_init((NMC), ncbimb); \ } while (0) /* done building an mbuf chain */ #define nfsm_chain_build_done(E, NMC) \ do { \ - if ((E) || !(NMC)->nmc_mcur) break; \ - /* cap off current mbuf */ \ - mbuf_setlen((NMC)->nmc_mcur, \ - (NMC)->nmc_ptr - (caddr_t)mbuf_data((NMC)->nmc_mcur)); \ + if ((E) || !(NMC)->nmc_mcur) break; \ + /* cap off current mbuf */ \ + mbuf_setlen((NMC)->nmc_mcur, \ + (NMC)->nmc_ptr - (caddr_t)mbuf_data((NMC)->nmc_mcur)); \ } while (0) /* @@ -247,278 +247,278 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); */ #define nfsm_chain_set_recmark(E, NMC, VAL) \ do { \ - if (E) break; \ - *((uint32_t*)mbuf_data((NMC)->nmc_mhead)) \ - = txdr_unsigned(VAL); \ + if (E) break; \ + *((uint32_t*)mbuf_data((NMC)->nmc_mhead)) \ + = txdr_unsigned(VAL); \ } while (0) /* make sure there's room for size bytes in current mbuf */ #define nfsm_chain_check_size(E, NMC, SIZE) \ do { \ - if (E) break; \ - if ((NMC)->nmc_left < (SIZE)) { \ - (E) = nfsm_chain_new_mbuf((NMC), (SIZE)); \ - if (!(E) && ((NMC)->nmc_left < (SIZE))) \ - (E) = ENOMEM; \ - } \ + if (E) break; \ + if ((NMC)->nmc_left < (SIZE)) { \ + (E) = nfsm_chain_new_mbuf((NMC), (SIZE)); \ + if (!(E) && ((NMC)->nmc_left < (SIZE))) \ + (E) = ENOMEM; \ + } \ } while (0) /* add a 32bit value to an mbuf chain extending if necessary */ #define nfsm_chain_add_32(E, NMC, VAL) \ do { \ - nfsm_chain_check_size((E), (NMC), NFSX_UNSIGNED); \ - if (E) break; \ - *((uint32_t*)(NMC)->nmc_ptr) = txdr_unsigned(VAL); \ - (NMC)->nmc_ptr += NFSX_UNSIGNED; \ - (NMC)->nmc_left -= NFSX_UNSIGNED; \ + nfsm_chain_check_size((E), (NMC), NFSX_UNSIGNED); \ + if (E) break; \ + *((uint32_t*)(NMC)->nmc_ptr) = txdr_unsigned(VAL); \ + (NMC)->nmc_ptr += NFSX_UNSIGNED; \ + (NMC)->nmc_left -= NFSX_UNSIGNED; \ } while (0) /* add a 64bit value to an mbuf chain */ #define nfsm_chain_add_64(E, NMC, VAL) \ do { \ - uint64_t __tmp64; \ - nfsm_chain_check_size((E), (NMC), 2 * NFSX_UNSIGNED); \ - if (E) break; \ - __tmp64 = (VAL); \ - txdr_hyper(&__tmp64, (NMC)->nmc_ptr); \ - (NMC)->nmc_ptr += 2 * NFSX_UNSIGNED; \ - (NMC)->nmc_left -= 2 * NFSX_UNSIGNED; \ + uint64_t __tmp64; \ + nfsm_chain_check_size((E), (NMC), 2 * NFSX_UNSIGNED); \ + if (E) break; \ + __tmp64 = (VAL); \ + txdr_hyper(&__tmp64, (NMC)->nmc_ptr); \ + (NMC)->nmc_ptr += 2 * NFSX_UNSIGNED; \ + (NMC)->nmc_left -= 2 * NFSX_UNSIGNED; \ } while (0) /* zero the last 4 bytes for a range of opaque */ /* data to make sure any pad bytes will be zero. */ #define nfsm_chain_zero_opaque_pad(BUF, LEN) \ do { \ - if ((LEN) > 0) \ - *(((uint32_t*)(BUF))+((nfsm_rndup(LEN)>>2)-1)) = 0; \ + if ((LEN) > 0) \ + *(((uint32_t*)(BUF))+((nfsm_rndup(LEN)>>2)-1)) = 0; \ } while (0) /* add buffer of opaque data to an mbuf chain */ #define nfsm_chain_add_opaque(E, NMC, BUF, LEN) \ do { \ - uint32_t rndlen = nfsm_rndup(LEN); \ - if (E) break; \ - if ((NMC)->nmc_left < rndlen) { \ - (E) = nfsm_chain_add_opaque_f((NMC), (const u_char*)(BUF), (LEN)); \ - break; \ - } \ - nfsm_chain_zero_opaque_pad((NMC)->nmc_ptr, (LEN)); \ - bcopy((BUF), (NMC)->nmc_ptr, (LEN)); \ - (NMC)->nmc_ptr += rndlen; \ - (NMC)->nmc_left -= rndlen; \ + uint32_t rndlen = nfsm_rndup(LEN); \ + if (E) break; \ + if ((NMC)->nmc_left < rndlen) { \ + (E) = nfsm_chain_add_opaque_f((NMC), (const u_char*)(BUF), (LEN)); \ + break; \ + } \ + nfsm_chain_zero_opaque_pad((NMC)->nmc_ptr, (LEN)); \ + bcopy((BUF), (NMC)->nmc_ptr, (LEN)); \ + (NMC)->nmc_ptr += rndlen; \ + (NMC)->nmc_left -= rndlen; \ } while (0) /* add buffer of opaque data to an mbuf chain without padding */ #define nfsm_chain_add_opaque_nopad(E, NMC, BUF, LEN) \ do { \ - if (E) break; \ - if ((NMC)->nmc_left < (uint32_t) (LEN)) { \ - (E) = nfsm_chain_add_opaque_nopad_f((NMC), (const u_char*)(BUF), (LEN)); \ - break; \ - } \ - bcopy((BUF), (NMC)->nmc_ptr, (LEN)); \ - (NMC)->nmc_ptr += (LEN); \ - (NMC)->nmc_left -= (LEN); \ + if (E) break; \ + if ((NMC)->nmc_left < (uint32_t) (LEN)) { \ + (E) = nfsm_chain_add_opaque_nopad_f((NMC), (const u_char*)(BUF), (LEN)); \ + break; \ + } \ + bcopy((BUF), (NMC)->nmc_ptr, (LEN)); \ + (NMC)->nmc_ptr += (LEN); \ + (NMC)->nmc_left -= (LEN); \ } while (0) /* finish an mbuf in a chain to allow subsequent insertion */ #define nfsm_chain_finish_mbuf(E, NMC) \ do { \ - if (E) break; \ - mbuf_setlen((NMC)->nmc_mcur, \ - (NMC)->nmc_ptr - (caddr_t)mbuf_data((NMC)->nmc_mcur)); \ - (NMC)->nmc_left = 0; \ + if (E) break; \ + mbuf_setlen((NMC)->nmc_mcur, \ + (NMC)->nmc_ptr - (caddr_t)mbuf_data((NMC)->nmc_mcur)); \ + (NMC)->nmc_left = 0; \ } while (0) /* add a file handle to an mbuf chain */ #define nfsm_chain_add_fh(E, NMC, VERS, FHP, FHLEN) \ do { \ - if (E) break; \ - if ((VERS) != NFS_VER2) \ - nfsm_chain_add_32((E), (NMC), (FHLEN)); \ - nfsm_chain_add_opaque((E), (NMC), (FHP), (FHLEN)); \ + if (E) break; \ + if ((VERS) != NFS_VER2) \ + nfsm_chain_add_32((E), (NMC), (FHLEN)); \ + nfsm_chain_add_opaque((E), (NMC), (FHP), (FHLEN)); \ } while (0) /* add a string to an mbuf chain */ #define nfsm_chain_add_string(E, NMC, STR, LEN) \ do { \ - nfsm_chain_add_32((E), (NMC), (LEN)); \ - nfsm_chain_add_opaque((E), (NMC), (STR), (LEN)); \ + nfsm_chain_add_32((E), (NMC), (LEN)); \ + nfsm_chain_add_opaque((E), (NMC), (STR), (LEN)); \ } while (0) /* add a name to an mbuf chain */ #define nfsm_chain_add_name(E, NMC, STR, LEN, NMP) \ do { \ - if (E) break; \ - if (NMFLAG((NMP), NFC)) \ - (E) = nfsm_chain_add_string_nfc((NMC), (const uint8_t*)(STR), (LEN)); \ - else \ - nfsm_chain_add_string((E), (NMC), (STR), (LEN)); \ + if (E) break; \ + if (NMFLAG((NMP), NFC)) \ + (E) = nfsm_chain_add_string_nfc((NMC), (const uint8_t*)(STR), (LEN)); \ + else \ + nfsm_chain_add_string((E), (NMC), (STR), (LEN)); \ } while (0) /* add an NFSv2 time to an mbuf chain */ #define nfsm_chain_add_v2time(E, NMC, TVP) \ do { \ - if (TVP) { \ - nfsm_chain_add_32((E), (NMC), (TVP)->tv_sec); \ - nfsm_chain_add_32((E), (NMC), ((TVP)->tv_nsec != -1) ? \ - ((uint32_t)(TVP)->tv_nsec / 1000) : 0xffffffff); \ - } else { \ - /* no time... use -1 */ \ - nfsm_chain_add_32((E), (NMC), -1); \ - nfsm_chain_add_32((E), (NMC), -1); \ - } \ + if (TVP) { \ + nfsm_chain_add_32((E), (NMC), (TVP)->tv_sec); \ + nfsm_chain_add_32((E), (NMC), ((TVP)->tv_nsec != -1) ? \ + ((uint32_t)(TVP)->tv_nsec / 1000) : 0xffffffff); \ + } else { \ + /* no time... use -1 */ \ + nfsm_chain_add_32((E), (NMC), -1); \ + nfsm_chain_add_32((E), (NMC), -1); \ + } \ } while (0) /* add an NFSv3 time to an mbuf chain */ #define nfsm_chain_add_v3time(E, NMC, TVP) \ do { \ - nfsm_chain_add_32((E), (NMC), (TVP)->tv_sec); \ - nfsm_chain_add_32((E), (NMC), (TVP)->tv_nsec); \ + nfsm_chain_add_32((E), (NMC), (TVP)->tv_sec); \ + nfsm_chain_add_32((E), (NMC), (TVP)->tv_nsec); \ } while (0) /* add an NFS v2 or v3 time to an mbuf chain */ #define nfsm_chain_add_time(E, NMC, VERS, TVP) \ do { \ - if ((VERS) == NFS_VER2) { \ - nfsm_chain_add_v2time((E), (NMC), (TVP)); \ - } else { \ - nfsm_chain_add_v3time((E), (NMC), (TVP)); \ - } \ + if ((VERS) == NFS_VER2) { \ + nfsm_chain_add_v2time((E), (NMC), (TVP)); \ + } else { \ + nfsm_chain_add_v3time((E), (NMC), (TVP)); \ + } \ } while (0) /* add an NFSv3 postop file handle to an mbuf chain */ #define nfsm_chain_add_postop_fh(E, NMC, FHP, FHLEN) \ do { \ - nfsm_chain_add_32((E), (NMC), TRUE); \ - nfsm_chain_add_fh((E), (NMC), NFS_VER3, (FHP), (FHLEN)); \ + nfsm_chain_add_32((E), (NMC), TRUE); \ + nfsm_chain_add_fh((E), (NMC), NFS_VER3, (FHP), (FHLEN)); \ } while (0) /* add NFSv3 postop attributes to an mbuf chain */ #define nfsm_chain_add_postop_attr(E, ND, NMC, ATTRERR, VAP) \ do { \ - if (E) break; \ - if (ATTRERR) { \ - nfsm_chain_add_32((E), (NMC), FALSE); \ - break; \ - } \ - nfsm_chain_add_32((E), (NMC), TRUE); \ - if (E) break; \ - (E) = nfsm_chain_add_fattr((ND), (NMC), (VAP)); \ + if (E) break; \ + if (ATTRERR) { \ + nfsm_chain_add_32((E), (NMC), FALSE); \ + break; \ + } \ + nfsm_chain_add_32((E), (NMC), TRUE); \ + if (E) break; \ + (E) = nfsm_chain_add_fattr((ND), (NMC), (VAP)); \ } while (0) /* Add an NFSv2 "sattr" structure to an mbuf chain */ #define nfsm_chain_add_v2sattr(E, NMC, VAP, SZRDEV) \ do { \ - if (E) break; \ - (E) = nfsm_chain_add_v2sattr_f((NMC), (VAP), (SZRDEV)); \ + if (E) break; \ + (E) = nfsm_chain_add_v2sattr_f((NMC), (VAP), (SZRDEV)); \ } while (0) /* Add an NFSv3 "sattr" structure to an mbuf chain */ #define nfsm_chain_add_v3sattr(E, NMC, VAP) \ do { \ - if (E) break; \ - (E) = nfsm_chain_add_v3sattr_f((NMC), (VAP)); \ + if (E) break; \ + (E) = nfsm_chain_add_v3sattr_f((NMC), (VAP)); \ } while (0) /* Add an NFSv4 "fattr" structure to an mbuf chain */ #define nfsm_chain_add_fattr4(E, NMC, VAP, NMP) \ do { \ - if (E) break; \ - (E) = nfsm_chain_add_fattr4_f((NMC), (VAP), (NMP)); \ + if (E) break; \ + (E) = nfsm_chain_add_fattr4_f((NMC), (VAP), (NMP)); \ } while (0) /* add NFSv3 WCC data to an mbuf chain */ #define nfsm_chain_add_wcc_data(E, ND, NMC, PREERR, PREVAP, POSTERR, POSTVAP) \ do { \ - if (E) break; \ - (E) = nfsm_chain_add_wcc_data_f((ND), (NMC), \ - (PREERR), (PREVAP), (POSTERR), (POSTVAP)); \ + if (E) break; \ + (E) = nfsm_chain_add_wcc_data_f((ND), (NMC), \ + (PREERR), (PREVAP), (POSTERR), (POSTVAP)); \ } while (0) /* add NFSv4 COMPOUND header */ -#define NFS4_TAG_LENGTH 12 +#define NFS4_TAG_LENGTH 12 #define nfsm_chain_add_compound_header(E, NMC, TAG, MINOR, NUMOPS) \ do { \ - if ((TAG) && strlen(TAG)) { \ - /* put tags into a fixed-length space-padded field */ \ - char __nfstag[NFS4_TAG_LENGTH+1]; \ - snprintf(__nfstag, sizeof(__nfstag), "%-*s", NFS4_TAG_LENGTH, (TAG)); \ - nfsm_chain_add_32((E), (NMC), NFS4_TAG_LENGTH); \ - nfsm_chain_add_opaque((E), (NMC), __nfstag, NFS4_TAG_LENGTH); \ - } else { \ - nfsm_chain_add_32((E), (NMC), 0); \ - } \ - nfsm_chain_add_32((E), (NMC), (MINOR)); /*minorversion*/ \ - nfsm_chain_add_32((E), (NMC), (NUMOPS)); \ + if ((TAG) && strlen(TAG)) { \ + /* put tags into a fixed-length space-padded field */ \ + char __nfstag[NFS4_TAG_LENGTH+1]; \ + snprintf(__nfstag, sizeof(__nfstag), "%-*s", NFS4_TAG_LENGTH, (TAG)); \ + nfsm_chain_add_32((E), (NMC), NFS4_TAG_LENGTH); \ + nfsm_chain_add_opaque((E), (NMC), __nfstag, NFS4_TAG_LENGTH); \ + } else { \ + nfsm_chain_add_32((E), (NMC), 0); \ + } \ + nfsm_chain_add_32((E), (NMC), (MINOR)); /*minorversion*/ \ + nfsm_chain_add_32((E), (NMC), (NUMOPS)); \ } while (0) /* add NFSv4 attr bitmap */ #define nfsm_chain_add_bitmap(E, NMC, B, LEN) \ do { \ - int __i; \ - nfsm_chain_add_32((E), (NMC), (LEN)); \ - for (__i=0; __i < (LEN); __i++) \ - nfsm_chain_add_32((E), (NMC), (B)[__i]); \ + int __i; \ + nfsm_chain_add_32((E), (NMC), (LEN)); \ + for (__i=0; __i < (LEN); __i++) \ + nfsm_chain_add_32((E), (NMC), (B)[__i]); \ } while (0) /* add NFSv4 attr bitmap masked with the given mask */ #define nfsm_chain_add_bitmap_masked(E, NMC, B, LEN, MASK) \ do { \ - int __i; \ - nfsm_chain_add_32((E), (NMC), (LEN)); \ - for (__i=0; __i < (LEN); __i++) \ - nfsm_chain_add_32((E), (NMC), ((B)[__i] & (MASK)[__i])); \ + int __i; \ + nfsm_chain_add_32((E), (NMC), (LEN)); \ + for (__i=0; __i < (LEN); __i++) \ + nfsm_chain_add_32((E), (NMC), ((B)[__i] & (MASK)[__i])); \ } while (0) /* add NFSv4 attr bitmap masked with the supported attributes for this mount/node */ #define nfsm_chain_add_bitmap_supported(E, NMC, B, NMP, NP) \ do { \ - uint32_t __bitmap[NFS_ATTR_BITMAP_LEN], *__bmp = (B); \ - int __nonamedattr = 0, __noacl = 0, __nomode = 0; \ - if (!((NMP)->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) || \ - ((NP) && (((nfsnode_t)(NP))->n_flag & (NISDOTZFS|NISDOTZFSCHILD)))) \ - __nonamedattr = 1; \ - if (!((NMP)->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) \ - __noacl = 1; \ - if (NMFLAG((NMP), ACLONLY)) \ - __nomode = 1; \ - if (__nonamedattr || __noacl || __nomode) { \ - /* don't ask for attrs we're not supporting */ \ - /* some ".zfs" directories can't handle being asked for some attributes */ \ - int __ii; \ - NFS_CLEAR_ATTRIBUTES(__bitmap); \ - for (__ii=0; __ii < NFS_ATTR_BITMAP_LEN; __ii++) \ - __bitmap[__ii] = (B)[__ii]; \ - if (__nonamedattr) \ - NFS_BITMAP_CLR(__bitmap, NFS_FATTR_NAMED_ATTR); \ - if (__noacl) \ - NFS_BITMAP_CLR(__bitmap, NFS_FATTR_ACL); \ - if (__nomode) \ - NFS_BITMAP_CLR(__bitmap, NFS_FATTR_MODE); \ - __bmp = __bitmap; \ - } \ - nfsm_chain_add_bitmap_masked((E), (NMC), __bmp, NFS_ATTR_BITMAP_LEN, (NMP)->nm_fsattr.nfsa_supp_attr); \ + uint32_t __bitmap[NFS_ATTR_BITMAP_LEN], *__bmp = (B); \ + int __nonamedattr = 0, __noacl = 0, __nomode = 0; \ + if (!((NMP)->nm_fsattr.nfsa_flags & NFS_FSFLAG_NAMED_ATTR) || \ + ((NP) && (((nfsnode_t)(NP))->n_flag & (NISDOTZFS|NISDOTZFSCHILD)))) \ + __nonamedattr = 1; \ + if (!((NMP)->nm_fsattr.nfsa_flags & NFS_FSFLAG_ACL)) \ + __noacl = 1; \ + if (NMFLAG((NMP), ACLONLY)) \ + __nomode = 1; \ + if (__nonamedattr || __noacl || __nomode) { \ + /* don't ask for attrs we're not supporting */ \ + /* some ".zfs" directories can't handle being asked for some attributes */ \ + int __ii; \ + NFS_CLEAR_ATTRIBUTES(__bitmap); \ + for (__ii=0; __ii < NFS_ATTR_BITMAP_LEN; __ii++) \ + __bitmap[__ii] = (B)[__ii]; \ + if (__nonamedattr) \ + NFS_BITMAP_CLR(__bitmap, NFS_FATTR_NAMED_ATTR); \ + if (__noacl) \ + NFS_BITMAP_CLR(__bitmap, NFS_FATTR_ACL); \ + if (__nomode) \ + NFS_BITMAP_CLR(__bitmap, NFS_FATTR_MODE); \ + __bmp = __bitmap; \ + } \ + nfsm_chain_add_bitmap_masked((E), (NMC), __bmp, NFS_ATTR_BITMAP_LEN, (NMP)->nm_fsattr.nfsa_supp_attr); \ } while (0) /* Add an NFSv4 "stateid" structure to an mbuf chain */ #define nfsm_chain_add_stateid(E, NMC, SID) \ do { \ - nfsm_chain_add_32((E), (NMC), (SID)->seqid); \ - nfsm_chain_add_32((E), (NMC), (SID)->other[0]); \ - nfsm_chain_add_32((E), (NMC), (SID)->other[1]); \ - nfsm_chain_add_32((E), (NMC), (SID)->other[2]); \ + nfsm_chain_add_32((E), (NMC), (SID)->seqid); \ + nfsm_chain_add_32((E), (NMC), (SID)->other[0]); \ + nfsm_chain_add_32((E), (NMC), (SID)->other[1]); \ + nfsm_chain_add_32((E), (NMC), (SID)->other[2]); \ } while (0) /* add an NFSv4 lock owner structure to an mbuf chain */ #define nfsm_chain_add_lock_owner4(E, NMC, NMP, NLOP) \ do { \ - nfsm_chain_add_64((E), (NMC), (NMP)->nm_clientid); \ - nfsm_chain_add_32((E), (NMC), 5*NFSX_UNSIGNED); \ - nfsm_chain_add_32((E), (NMC), (NLOP)->nlo_name); \ - nfsm_chain_add_32((E), (NMC), (NLOP)->nlo_pid); \ - nfsm_chain_add_64((E), (NMC), (NLOP)->nlo_pid_start.tv_sec); \ - nfsm_chain_add_32((E), (NMC), (NLOP)->nlo_pid_start.tv_usec); \ + nfsm_chain_add_64((E), (NMC), (NMP)->nm_clientid); \ + nfsm_chain_add_32((E), (NMC), 5*NFSX_UNSIGNED); \ + nfsm_chain_add_32((E), (NMC), (NLOP)->nlo_name); \ + nfsm_chain_add_32((E), (NMC), (NLOP)->nlo_pid); \ + nfsm_chain_add_64((E), (NMC), (NLOP)->nlo_pid_start.tv_sec); \ + nfsm_chain_add_32((E), (NMC), (NLOP)->nlo_pid_start.tv_usec); \ } while (0) /* @@ -528,252 +528,252 @@ int nfsm_chain_trim_data(struct nfsm_chain *, int, int *); /* prepare an mbuf chain for dissection starting with the given mbuf */ #define nfsm_chain_dissect_init(E, NMC, H) \ do { \ - if (!(H)) { \ - (E) = EINVAL; \ - break; \ - } \ - (NMC)->nmc_mcur = (NMC)->nmc_mhead = (H); \ - (NMC)->nmc_ptr = mbuf_data(H); \ - (NMC)->nmc_left = mbuf_len(H); \ + if (!(H)) { \ + (E) = EINVAL; \ + break; \ + } \ + (NMC)->nmc_mcur = (NMC)->nmc_mhead = (H); \ + (NMC)->nmc_ptr = mbuf_data(H); \ + (NMC)->nmc_left = mbuf_len(H); \ } while (0) /* skip a number of bytes in an mbuf chain */ #define nfsm_chain_adv(E, NMC, LEN) \ do { \ - if (E) break; \ - if ((NMC)->nmc_left >= (uint32_t)(LEN)) { \ - (NMC)->nmc_left -= (LEN); \ - (NMC)->nmc_ptr += (LEN); \ - } else { \ - (E) = nfsm_chain_advance((NMC), (LEN)); \ - } \ + if (E) break; \ + if ((NMC)->nmc_left >= (uint32_t)(LEN)) { \ + (NMC)->nmc_left -= (LEN); \ + (NMC)->nmc_ptr += (LEN); \ + } else { \ + (E) = nfsm_chain_advance((NMC), (LEN)); \ + } \ } while (0) /* get a 32bit value from an mbuf chain */ #define nfsm_chain_get_32(E, NMC, LVAL) \ do { \ - uint32_t __tmp32, *__tmpptr; \ - if (E) break; \ - if ((NMC)->nmc_left >= NFSX_UNSIGNED) { \ - __tmpptr = (uint32_t*)(NMC)->nmc_ptr; \ - (NMC)->nmc_left -= NFSX_UNSIGNED; \ - (NMC)->nmc_ptr += NFSX_UNSIGNED; \ - } else { \ - __tmpptr = &__tmp32; \ - (E) = nfsm_chain_get_opaque_f((NMC), NFSX_UNSIGNED, (u_char*)__tmpptr); \ - if (E) break; \ - } \ - (LVAL) = fxdr_unsigned(uint32_t, *__tmpptr); \ + uint32_t __tmp32, *__tmpptr; \ + if (E) break; \ + if ((NMC)->nmc_left >= NFSX_UNSIGNED) { \ + __tmpptr = (uint32_t*)(NMC)->nmc_ptr; \ + (NMC)->nmc_left -= NFSX_UNSIGNED; \ + (NMC)->nmc_ptr += NFSX_UNSIGNED; \ + } else { \ + __tmpptr = &__tmp32; \ + (E) = nfsm_chain_get_opaque_f((NMC), NFSX_UNSIGNED, (u_char*)__tmpptr); \ + if (E) break; \ + } \ + (LVAL) = fxdr_unsigned(uint32_t, *__tmpptr); \ } while (0) /* get a 64bit value from an mbuf chain */ #define nfsm_chain_get_64(E, NMC, LVAL) \ do { \ - uint64_t __tmp64, *__tmpptr; \ - if (E) break; \ - if ((NMC)->nmc_left >= 2 * NFSX_UNSIGNED) { \ - __tmpptr = (uint64_t*)(NMC)->nmc_ptr; \ - (NMC)->nmc_left -= 2 * NFSX_UNSIGNED; \ - (NMC)->nmc_ptr += 2 * NFSX_UNSIGNED; \ - } else { \ - __tmpptr = &__tmp64; \ - (E) = nfsm_chain_get_opaque_f((NMC), 2 * NFSX_UNSIGNED, (u_char*)__tmpptr); \ - if (E) break; \ - } \ - fxdr_hyper(__tmpptr, &(LVAL)); \ + uint64_t __tmp64, *__tmpptr; \ + if (E) break; \ + if ((NMC)->nmc_left >= 2 * NFSX_UNSIGNED) { \ + __tmpptr = (uint64_t*)(NMC)->nmc_ptr; \ + (NMC)->nmc_left -= 2 * NFSX_UNSIGNED; \ + (NMC)->nmc_ptr += 2 * NFSX_UNSIGNED; \ + } else { \ + __tmpptr = &__tmp64; \ + (E) = nfsm_chain_get_opaque_f((NMC), 2 * NFSX_UNSIGNED, (u_char*)__tmpptr); \ + if (E) break; \ + } \ + fxdr_hyper(__tmpptr, &(LVAL)); \ } while (0) /* get a pointer to the next consecutive bytes in an mbuf chain */ #define nfsm_chain_get_opaque_pointer(E, NMC, LEN, PTR) \ do { \ - uint32_t rndlen; \ - if (E) break; \ - rndlen = nfsm_rndup(LEN); \ - if (rndlen < (LEN)) { \ - (E) = EBADRPC; \ - break; \ - } \ - if ((NMC)->nmc_left >= rndlen) { \ - (PTR) = (void*)(NMC)->nmc_ptr; \ - (NMC)->nmc_left -= rndlen; \ - (NMC)->nmc_ptr += rndlen; \ - } else { \ - (E) = nfsm_chain_get_opaque_pointer_f((NMC), (LEN), (u_char**)&(PTR)); \ - } \ + uint32_t rndlen; \ + if (E) break; \ + rndlen = nfsm_rndup(LEN); \ + if (rndlen < (LEN)) { \ + (E) = EBADRPC; \ + break; \ + } \ + if ((NMC)->nmc_left >= rndlen) { \ + (PTR) = (void*)(NMC)->nmc_ptr; \ + (NMC)->nmc_left -= rndlen; \ + (NMC)->nmc_ptr += rndlen; \ + } else { \ + (E) = nfsm_chain_get_opaque_pointer_f((NMC), (LEN), (u_char**)&(PTR)); \ + } \ } while (0) /* copy the next consecutive bytes of opaque data from an mbuf chain */ #define nfsm_chain_get_opaque(E, NMC, LEN, PTR) \ do { \ - uint32_t rndlen; \ - if (E) break; \ - rndlen = nfsm_rndup(LEN); \ - if (rndlen < (LEN)) { \ - (E) = EBADRPC; \ - break; \ - } \ - if ((NMC)->nmc_left >= rndlen) { \ - u_char *__tmpptr = (u_char*)(NMC)->nmc_ptr; \ - (NMC)->nmc_left -= rndlen; \ - (NMC)->nmc_ptr += rndlen; \ - bcopy(__tmpptr, (PTR), (LEN)); \ - } else { \ - (E) = nfsm_chain_get_opaque_f((NMC), (LEN), (u_char*)(PTR)); \ - } \ + uint32_t rndlen; \ + if (E) break; \ + rndlen = nfsm_rndup(LEN); \ + if (rndlen < (LEN)) { \ + (E) = EBADRPC; \ + break; \ + } \ + if ((NMC)->nmc_left >= rndlen) { \ + u_char *__tmpptr = (u_char*)(NMC)->nmc_ptr; \ + (NMC)->nmc_left -= rndlen; \ + (NMC)->nmc_ptr += rndlen; \ + bcopy(__tmpptr, (PTR), (LEN)); \ + } else { \ + (E) = nfsm_chain_get_opaque_f((NMC), (LEN), (u_char*)(PTR)); \ + } \ } while (0) /* get the size of and a pointer to a file handle in an mbuf chain */ #define nfsm_chain_get_fh_ptr(E, NMC, VERS, FHP, FHSIZE) \ do { \ - if ((VERS) != NFS_VER2) { \ - nfsm_chain_get_32((E), (NMC), (FHSIZE)); \ - if (E) break; \ - if ((FHSIZE) > NFS_MAX_FH_SIZE) \ - (E) = EBADRPC; \ - } else \ - (FHSIZE) = NFSX_V2FH;\ - if ((E) == 0) \ - nfsm_chain_get_opaque_pointer((E), (NMC), (FHSIZE), (FHP));\ + if ((VERS) != NFS_VER2) { \ + nfsm_chain_get_32((E), (NMC), (FHSIZE)); \ + if (E) break; \ + if ((FHSIZE) > NFS_MAX_FH_SIZE) \ + (E) = EBADRPC; \ + } else \ + (FHSIZE) = NFSX_V2FH;\ + if ((E) == 0) \ + nfsm_chain_get_opaque_pointer((E), (NMC), (FHSIZE), (FHP));\ } while (0) /* get the size of and data for a file handle in an mbuf chain */ #define nfsm_chain_get_fh(E, NMC, VERS, FHP) \ do { \ - if ((VERS) != NFS_VER2) { \ - nfsm_chain_get_32((E), (NMC), (FHP)->fh_len); \ - if ((FHP)->fh_len > sizeof((FHP)->fh_data)) \ - (E) = EBADRPC; \ - } else \ - (FHP)->fh_len = NFSX_V2FH;\ - if ((E) == 0) \ - nfsm_chain_get_opaque((E), (NMC), (uint32_t)(FHP)->fh_len, (FHP)->fh_data);\ - else \ - (FHP)->fh_len = 0;\ + if ((VERS) != NFS_VER2) { \ + nfsm_chain_get_32((E), (NMC), (FHP)->fh_len); \ + if ((FHP)->fh_len > sizeof((FHP)->fh_data)) \ + (E) = EBADRPC; \ + } else \ + (FHP)->fh_len = NFSX_V2FH;\ + if ((E) == 0) \ + nfsm_chain_get_opaque((E), (NMC), (uint32_t)(FHP)->fh_len, (FHP)->fh_data);\ + else \ + (FHP)->fh_len = 0;\ } while (0) /* get an NFS v2 or v3 time from an mbuf chain */ #define nfsm_chain_get_time(E, NMC, VERS, TSEC, TNSEC) \ do { \ - nfsm_chain_get_32((E), (NMC), (TSEC)); \ - nfsm_chain_get_32((E), (NMC), (TNSEC)); \ - if ((E) || ((VERS) != NFS_VER2)) break; \ - if ((uint32_t)(TNSEC) == 0xffffffff) \ - (TNSEC) = 0; \ - else \ - (TNSEC) *= 1000; \ + nfsm_chain_get_32((E), (NMC), (TSEC)); \ + nfsm_chain_get_32((E), (NMC), (TNSEC)); \ + if ((E) || ((VERS) != NFS_VER2)) break; \ + if ((uint32_t)(TNSEC) == 0xffffffff) \ + (TNSEC) = 0; \ + else \ + (TNSEC) *= 1000; \ } while (0) /* get postop attributes from an mbuf chain */ #define nfsm_chain_postop_attr_get(E, NMC, F, VAP) \ do { \ - (F) = 0; \ - if ((E) || !(NMC)->nmc_mhead) break; \ - nfsm_chain_get_32((E), (NMC), (F)); \ - if ((E) || !(F)) break; \ - if (((E) = nfs_parsefattr((NMC), NFS_VER3, (VAP)))) \ - (F) = 0; \ + (F) = 0; \ + if ((E) || !(NMC)->nmc_mhead) break; \ + nfsm_chain_get_32((E), (NMC), (F)); \ + if ((E) || !(F)) break; \ + if (((E) = nfs_parsefattr((NMC), NFS_VER3, (VAP)))) \ + (F) = 0; \ } while (0) /* update a node's attribute cache with postop attributes from an mbuf chain */ /* (F returns whether the attributes were updated or not) */ #define nfsm_chain_postop_attr_update_flag(E, NMC, NP, F, X) \ do { \ - struct nfs_vattr ttvattr; \ - nfsm_chain_postop_attr_get((E), (NMC), (F), &ttvattr); \ - if ((E) || !(F)) break; \ - if (((E) = nfs_loadattrcache((NP), &ttvattr, (X), 1))) { \ - (F) = 0; \ - break; \ - } \ - if (*(X) == 0) \ - (F) = 0; \ + struct nfs_vattr ttvattr; \ + nfsm_chain_postop_attr_get((E), (NMC), (F), &ttvattr); \ + if ((E) || !(F)) break; \ + if (((E) = nfs_loadattrcache((NP), &ttvattr, (X), 1))) { \ + (F) = 0; \ + break; \ + } \ + if (*(X) == 0) \ + (F) = 0; \ } while (0) /* update a node's attribute cache with postop attributes from an mbuf chain */ #define nfsm_chain_postop_attr_update(E, NMC, NP, X) \ do { \ - int __dummy_flag = 0; \ - nfsm_chain_postop_attr_update_flag((E), (NMC), (NP), __dummy_flag, (X)); \ + int __dummy_flag = 0; \ + nfsm_chain_postop_attr_update_flag((E), (NMC), (NP), __dummy_flag, (X)); \ } while (0) /* get and process NFSv3 WCC data from an mbuf chain */ #define nfsm_chain_get_wcc_data(E, NMC, NP, PREMTIME, NEWPOSTATTR, X) \ do { \ - if (E) break; \ - (E) = nfsm_chain_get_wcc_data_f((NMC), (NP), (PREMTIME), (NEWPOSTATTR), (X)); \ + if (E) break; \ + (E) = nfsm_chain_get_wcc_data_f((NMC), (NP), (PREMTIME), (NEWPOSTATTR), (X)); \ } while (0) /* update a node's attribute cache with attributes from an mbuf chain */ #define nfsm_chain_loadattr(E, NMC, NP, VERS, X) \ do { \ - struct nfs_vattr ttvattr; \ - if (E) break; \ - if ((VERS) == NFS_VER4) { \ - (E) = nfs4_parsefattr((NMC), NULL, &ttvattr, NULL, NULL, NULL); \ - } else { \ - (E) = nfs_parsefattr((NMC), (VERS), &ttvattr); \ - } \ - if (!(E) && (NP)) \ - (E) = nfs_loadattrcache((NP), &ttvattr, (X), 0); \ - NVATTR_CLEANUP(&ttvattr); \ + struct nfs_vattr ttvattr; \ + if (E) break; \ + if ((VERS) == NFS_VER4) { \ + (E) = nfs4_parsefattr((NMC), NULL, &ttvattr, NULL, NULL, NULL); \ + } else { \ + (E) = nfs_parsefattr((NMC), (VERS), &ttvattr); \ + } \ + if (!(E) && (NP)) \ + (E) = nfs_loadattrcache((NP), &ttvattr, (X), 0); \ + NVATTR_CLEANUP(&ttvattr); \ } while (0) /* get NFSv4 attr bitmap */ #define nfsm_chain_get_bitmap(E, NMC, B, LEN) \ do { \ - uint32_t __len = 0, __i; \ - nfsm_chain_get_32((E), (NMC), __len); \ - if (E) break; \ - for (__i=0; __i < MIN(__len, (LEN)); __i++) \ - nfsm_chain_get_32((E), (NMC), (B)[__i]); \ - if (E) break; \ - for (; __i < __len; __i++) \ - nfsm_chain_adv((E), (NMC), NFSX_UNSIGNED); \ - for (; __i < (LEN); __i++) \ - (B)[__i] = 0; \ - (LEN) = __len; \ + uint32_t __len = 0, __i; \ + nfsm_chain_get_32((E), (NMC), __len); \ + if (E) break; \ + for (__i=0; __i < MIN(__len, (LEN)); __i++) \ + nfsm_chain_get_32((E), (NMC), (B)[__i]); \ + if (E) break; \ + for (; __i < __len; __i++) \ + nfsm_chain_adv((E), (NMC), NFSX_UNSIGNED); \ + for (; __i < (LEN); __i++) \ + (B)[__i] = 0; \ + (LEN) = __len; \ } while (0) /* get an NFSv4 "stateid" structure from an mbuf chain */ #define nfsm_chain_get_stateid(E, NMC, SID) \ do { \ - nfsm_chain_get_32((E), (NMC), (SID)->seqid); \ - nfsm_chain_get_32((E), (NMC), (SID)->other[0]); \ - nfsm_chain_get_32((E), (NMC), (SID)->other[1]); \ - nfsm_chain_get_32((E), (NMC), (SID)->other[2]); \ + nfsm_chain_get_32((E), (NMC), (SID)->seqid); \ + nfsm_chain_get_32((E), (NMC), (SID)->other[0]); \ + nfsm_chain_get_32((E), (NMC), (SID)->other[1]); \ + nfsm_chain_get_32((E), (NMC), (SID)->other[2]); \ } while (0) #define nfsm_chain_skip_tag(E, NMC) \ do { \ - uint32_t __val = 0; \ - nfsm_chain_get_32((E), (NMC), __val); \ - nfsm_chain_adv((E), (NMC), nfsm_rndup(__val)); \ + uint32_t __val = 0; \ + nfsm_chain_get_32((E), (NMC), __val); \ + nfsm_chain_adv((E), (NMC), nfsm_rndup(__val)); \ } while (0) #define nfsm_chain_op_check(E, NMC, OP) \ do { \ - uint32_t __val = 0; \ - nfsm_chain_get_32((E), (NMC), __val); \ - /* [sigh] some implementations return the "illegal" op for unsupported ops */ \ - nfsm_assert((E), ((__val == (OP)) || (__val == NFS_OP_ILLEGAL)), EBADRPC); \ - nfsm_chain_get_32((E), (NMC), __val); \ - nfsm_assert((E), (__val == NFS_OK), __val); \ + uint32_t __val = 0; \ + nfsm_chain_get_32((E), (NMC), __val); \ + /* [sigh] some implementations return the "illegal" op for unsupported ops */ \ + nfsm_assert((E), ((__val == (OP)) || (__val == NFS_OP_ILLEGAL)), EBADRPC); \ + nfsm_chain_get_32((E), (NMC), __val); \ + nfsm_assert((E), (__val == NFS_OK), __val); \ } while (0) #define nfsm_chain_check_change_info(E, NMC, DNP) \ do { \ - uint64_t __ci_before, __ci_after; \ - uint32_t __ci_atomic = 0; \ - nfsm_chain_get_32((E), (NMC), __ci_atomic); \ - nfsm_chain_get_64((E), (NMC), __ci_before); \ - nfsm_chain_get_64((E), (NMC), __ci_after); \ - if ((E) || !(DNP)) break; \ - if (__ci_atomic && (__ci_before == (DNP)->n_ncchange)) { \ - (DNP)->n_ncchange = __ci_after; \ - } else { \ - cache_purge(NFSTOV(DNP)); \ - (DNP)->n_ncgen++; \ - } \ + uint64_t __ci_before, __ci_after; \ + uint32_t __ci_atomic = 0; \ + nfsm_chain_get_32((E), (NMC), __ci_atomic); \ + nfsm_chain_get_64((E), (NMC), __ci_before); \ + nfsm_chain_get_64((E), (NMC), __ci_after); \ + if ((E) || !(DNP)) break; \ + if (__ci_atomic && (__ci_before == (DNP)->n_ncchange)) { \ + (DNP)->n_ncchange = __ci_after; \ + } else { \ + cache_purge(NFSTOV(DNP)); \ + (DNP)->n_ncgen++; \ + } \ } while (0) #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/nfs/nfsmount.h b/bsd/nfs/nfsmount.h index 90b75a548..c9dc924de 100644 --- a/bsd/nfs/nfsmount.h +++ b/bsd/nfs/nfsmount.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -77,171 +77,171 @@ * NFS mount file system attributes */ struct nfs_fsattr { - uint32_t nfsa_flags; /* file system flags */ - uint32_t nfsa_lease; /* lease time in seconds */ - uint32_t nfsa_maxname; /* maximum filename size */ - uint32_t nfsa_maxlink; /* maximum # links */ - uint32_t nfsa_bsize; /* block size */ - uint32_t nfsa_pad; /* UNUSED */ - uint64_t nfsa_maxfilesize; /* maximum file size */ - uint64_t nfsa_maxread; /* maximum read size */ - uint64_t nfsa_maxwrite; /* maximum write size */ - uint64_t nfsa_files_avail; /* file slots available */ - uint64_t nfsa_files_free; /* file slots free */ - uint64_t nfsa_files_total; /* file slots total */ - uint64_t nfsa_space_avail; /* disk space available */ - uint64_t nfsa_space_free; /* disk space free */ - uint64_t nfsa_space_total; /* disk space total */ - uint32_t nfsa_supp_attr[NFS_ATTR_BITMAP_LEN]; /* attributes supported on this file system */ - uint32_t nfsa_bitmap[NFS_ATTR_BITMAP_LEN]; /* valid attributes */ + uint32_t nfsa_flags; /* file system flags */ + uint32_t nfsa_lease; /* lease time in seconds */ + uint32_t nfsa_maxname; /* maximum filename size */ + uint32_t nfsa_maxlink; /* maximum # links */ + uint32_t nfsa_bsize; /* block size */ + uint32_t nfsa_pad; /* UNUSED */ + uint64_t nfsa_maxfilesize; /* maximum file size */ + uint64_t nfsa_maxread; /* maximum read size */ + uint64_t nfsa_maxwrite; /* maximum write size */ + uint64_t nfsa_files_avail; /* file slots available */ + uint64_t nfsa_files_free; /* file slots free */ + uint64_t nfsa_files_total; /* file slots total */ + uint64_t nfsa_space_avail; /* disk space available */ + uint64_t nfsa_space_free; /* disk space free */ + uint64_t nfsa_space_total; /* disk space total */ + uint32_t nfsa_supp_attr[NFS_ATTR_BITMAP_LEN]; /* attributes supported on this file system */ + uint32_t nfsa_bitmap[NFS_ATTR_BITMAP_LEN]; /* valid attributes */ }; -#define NFS_FSFLAG_LINK 0x00000001 -#define NFS_FSFLAG_SYMLINK 0x00000002 -#define NFS_FSFLAG_UNIQUE_FH 0x00000004 -#define NFS_FSFLAG_ACL 0x00000008 -#define NFS_FSFLAG_SET_TIME 0x00000010 -#define NFS_FSFLAG_CASE_INSENSITIVE 0x00000020 -#define NFS_FSFLAG_CASE_PRESERVING 0x00000040 -#define NFS_FSFLAG_CHOWN_RESTRICTED 0x00000080 -#define NFS_FSFLAG_HOMOGENEOUS 0x00000100 -#define NFS_FSFLAG_NO_TRUNC 0x00000200 -#define NFS_FSFLAG_NAMED_ATTR 0x00000400 -#define NFS_FSFLAG_FHTYPE_MASK 0xFF000000 -#define NFS_FSFLAG_FHTYPE_SHIFT 24 +#define NFS_FSFLAG_LINK 0x00000001 +#define NFS_FSFLAG_SYMLINK 0x00000002 +#define NFS_FSFLAG_UNIQUE_FH 0x00000004 +#define NFS_FSFLAG_ACL 0x00000008 +#define NFS_FSFLAG_SET_TIME 0x00000010 +#define NFS_FSFLAG_CASE_INSENSITIVE 0x00000020 +#define NFS_FSFLAG_CASE_PRESERVING 0x00000040 +#define NFS_FSFLAG_CHOWN_RESTRICTED 0x00000080 +#define NFS_FSFLAG_HOMOGENEOUS 0x00000100 +#define NFS_FSFLAG_NO_TRUNC 0x00000200 +#define NFS_FSFLAG_NAMED_ATTR 0x00000400 +#define NFS_FSFLAG_FHTYPE_MASK 0xFF000000 +#define NFS_FSFLAG_FHTYPE_SHIFT 24 /* * NFS file system location structures */ struct nfs_fs_server { - char * ns_name; /* name of server */ - char ** ns_addresses; /* array of addresses for server */ - uint32_t ns_addrcount; /* # of addresses */ + char * ns_name; /* name of server */ + char ** ns_addresses; /* array of addresses for server */ + uint32_t ns_addrcount; /* # of addresses */ }; struct nfs_fs_path { - char ** np_components; /* array of component pointers */ - uint32_t np_compcount; /* # components in path */ + char ** np_components; /* array of component pointers */ + uint32_t np_compcount; /* # components in path */ }; struct nfs_fs_location { - struct nfs_fs_server ** nl_servers; /* array of server pointers */ - struct nfs_fs_path nl_path; /* file system path */ - uint32_t nl_servcount; /* # of servers */ + struct nfs_fs_server ** nl_servers; /* array of server pointers */ + struct nfs_fs_path nl_path; /* file system path */ + uint32_t nl_servcount; /* # of servers */ }; struct nfs_location_index { - uint8_t nli_flags; /* misc flags */ - uint8_t nli_loc; /* location index */ - uint8_t nli_serv; /* server index */ - uint8_t nli_addr; /* address index */ + uint8_t nli_flags; /* misc flags */ + uint8_t nli_loc; /* location index */ + uint8_t nli_serv; /* server index */ + uint8_t nli_addr; /* address index */ }; -#define NLI_VALID 0x01 /* index is valid */ +#define NLI_VALID 0x01 /* index is valid */ struct nfs_fs_locations { - struct nfs_fs_path nl_root; /* current server's root file system path */ - uint32_t nl_numlocs; /* # of locations */ - struct nfs_location_index nl_current; /* index of current location/server/address */ - struct nfs_fs_location **nl_locations; /* array of fs locations */ + struct nfs_fs_path nl_root; /* current server's root file system path */ + uint32_t nl_numlocs; /* # of locations */ + struct nfs_location_index nl_current; /* index of current location/server/address */ + struct nfs_fs_location **nl_locations; /* array of fs locations */ }; /* * RPC record marker parsing state */ struct nfs_rpc_record_state { - mbuf_t nrrs_m; /* mbufs for current record */ - mbuf_t nrrs_mlast; - uint16_t nrrs_lastfrag; /* last fragment of record */ - uint16_t nrrs_markerleft; /* marker bytes remaining */ - uint32_t nrrs_fragleft; /* fragment bytes remaining */ - uint32_t nrrs_reclen; /* length of RPC record */ + mbuf_t nrrs_m; /* mbufs for current record */ + mbuf_t nrrs_mlast; + uint16_t nrrs_lastfrag; /* last fragment of record */ + uint16_t nrrs_markerleft; /* marker bytes remaining */ + uint32_t nrrs_fragleft; /* fragment bytes remaining */ + uint32_t nrrs_reclen; /* length of RPC record */ }; /* * NFS socket structures */ struct nfs_socket { - lck_mtx_t nso_lock; /* nfs socket lock */ - TAILQ_ENTRY(nfs_socket) nso_link; /* list of sockets */ - struct sockaddr * nso_saddr; /* socket address */ - struct sockaddr * nso_saddr2; /* additional socket address */ - void * nso_wake; /* address to wake up */ - time_t nso_timestamp; - time_t nso_reqtimestamp; /* last request sent */ - socket_t nso_so; /* socket */ - uint8_t nso_sotype; /* Type of socket */ - uint16_t nso_flags; /* NSO_* flags */ - struct nfs_location_index nso_location; /* location index */ - uint32_t nso_protocol; /* RPC protocol */ - uint32_t nso_version; /* RPC protocol version */ - uint32_t nso_pingxid; /* RPC XID of NULL ping request */ - uint32_t nso_nfs_min_vers; /* minimum nfs version for connecting sockets */ - uint32_t nso_nfs_max_vers; /* maximum nfs version for connecting sockets */ - int nso_error; /* saved error/status */ - struct nfs_rpc_record_state nso_rrs; /* RPC record parsing state (TCP) */ + lck_mtx_t nso_lock; /* nfs socket lock */ + TAILQ_ENTRY(nfs_socket) nso_link; /* list of sockets */ + struct sockaddr * nso_saddr; /* socket address */ + struct sockaddr * nso_saddr2; /* additional socket address */ + void * nso_wake; /* address to wake up */ + time_t nso_timestamp; + time_t nso_reqtimestamp; /* last request sent */ + socket_t nso_so; /* socket */ + uint8_t nso_sotype; /* Type of socket */ + uint16_t nso_flags; /* NSO_* flags */ + struct nfs_location_index nso_location; /* location index */ + uint32_t nso_protocol; /* RPC protocol */ + uint32_t nso_version; /* RPC protocol version */ + uint32_t nso_pingxid; /* RPC XID of NULL ping request */ + uint32_t nso_nfs_min_vers; /* minimum nfs version for connecting sockets */ + uint32_t nso_nfs_max_vers; /* maximum nfs version for connecting sockets */ + int nso_error; /* saved error/status */ + struct nfs_rpc_record_state nso_rrs; /* RPC record parsing state (TCP) */ }; TAILQ_HEAD(nfssocketlist, nfs_socket); /* nso_flags */ -#define NSO_UPCALL 0x0001 /* socket upcall in progress */ -#define NSO_DEAD 0x0002 /* socket is dead */ -#define NSO_CONNECTING 0x0004 /* socket is being connected */ -#define NSO_CONNECTED 0x0008 /* socket connection complete */ -#define NSO_PINGING 0x0010 /* socket is being tested */ -#define NSO_VERIFIED 0x0020 /* socket appears functional */ -#define NSO_DISCONNECTING 0x0040 /* socket is being disconnected */ +#define NSO_UPCALL 0x0001 /* socket upcall in progress */ +#define NSO_DEAD 0x0002 /* socket is dead */ +#define NSO_CONNECTING 0x0004 /* socket is being connected */ +#define NSO_CONNECTED 0x0008 /* socket connection complete */ +#define NSO_PINGING 0x0010 /* socket is being tested */ +#define NSO_VERIFIED 0x0020 /* socket appears functional */ +#define NSO_DISCONNECTING 0x0040 /* socket is being disconnected */ /* NFS connect socket search state */ struct nfs_socket_search { - struct nfs_location_index nss_startloc; /* starting location index */ - struct nfs_location_index nss_nextloc; /* next location index */ - struct nfssocketlist nss_socklist; /* list of active sockets */ - time_t nss_timestamp; /* search start time */ - time_t nss_last; /* timestamp of last socket */ - struct nfs_socket * nss_sock; /* found socket */ - uint8_t nss_sotype; /* TCP/UDP */ - uint8_t nss_sockcnt; /* # of active sockets */ - in_port_t nss_port; /* port # to connect to */ - uint32_t nss_protocol; /* RPC protocol */ - uint32_t nss_version; /* RPC protocol version */ - uint32_t nss_flags; /* (see below) */ - int nss_addrcnt; /* Number addresses to try or left */ - int nss_timeo; /* how long we are willing to wait */ - int nss_error; /* best error we've gotten so far */ + struct nfs_location_index nss_startloc; /* starting location index */ + struct nfs_location_index nss_nextloc; /* next location index */ + struct nfssocketlist nss_socklist; /* list of active sockets */ + time_t nss_timestamp; /* search start time */ + time_t nss_last; /* timestamp of last socket */ + struct nfs_socket * nss_sock; /* found socket */ + uint8_t nss_sotype; /* TCP/UDP */ + uint8_t nss_sockcnt; /* # of active sockets */ + in_port_t nss_port; /* port # to connect to */ + uint32_t nss_protocol; /* RPC protocol */ + uint32_t nss_version; /* RPC protocol version */ + uint32_t nss_flags; /* (see below) */ + int nss_addrcnt; /* Number addresses to try or left */ + int nss_timeo; /* how long we are willing to wait */ + int nss_error; /* best error we've gotten so far */ }; /* nss_flags */ -#define NSS_VERBOSE 0x00000001 /* OK to log info about socket search */ -#define NSS_WARNED 0x00000002 /* logged warning about socket search taking a while */ -#define NSS_FALLBACK2PMAP 0x00000004 /* Try V4 on NFS_PORT first, if that fails fall back to portmapper */ +#define NSS_VERBOSE 0x00000001 /* OK to log info about socket search */ +#define NSS_WARNED 0x00000002 /* logged warning about socket search taking a while */ +#define NSS_FALLBACK2PMAP 0x00000004 /* Try V4 on NFS_PORT first, if that fails fall back to portmapper */ /* * function table for calling version-specific NFS functions */ struct nfs_funcs { - int (*nf_mount)(struct nfsmount *, vfs_context_t, nfsnode_t *); - int (*nf_update_statfs)(struct nfsmount *, vfs_context_t); - int (*nf_getquota)(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *); - int (*nf_access_rpc)(nfsnode_t, u_int32_t *, int, vfs_context_t); - int (*nf_getattr_rpc)(nfsnode_t, mount_t, u_char *, size_t, int, vfs_context_t, struct nfs_vattr *, u_int64_t *); - int (*nf_setattr_rpc)(nfsnode_t, struct vnode_attr *, vfs_context_t); - int (*nf_read_rpc_async)(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); - int (*nf_read_rpc_async_finish)(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); - int (*nf_readlink_rpc)(nfsnode_t, char *, uint32_t *, vfs_context_t); - int (*nf_write_rpc_async)(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); - int (*nf_write_rpc_async_finish)(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); - int (*nf_commit_rpc)(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); - int (*nf_lookup_rpc_async)(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); - int (*nf_lookup_rpc_async_finish)(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); - int (*nf_remove_rpc)(nfsnode_t, char *, int, thread_t, kauth_cred_t); - int (*nf_rename_rpc)(nfsnode_t, char *, int, nfsnode_t, char *, int, vfs_context_t); - int (*nf_setlock_rpc)(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); - int (*nf_unlock_rpc)(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, thread_t, kauth_cred_t); - int (*nf_getlock_rpc)(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); + int (*nf_mount)(struct nfsmount *, vfs_context_t, nfsnode_t *); + int (*nf_update_statfs)(struct nfsmount *, vfs_context_t); + int (*nf_getquota)(struct nfsmount *, vfs_context_t, uid_t, int, struct dqblk *); + int (*nf_access_rpc)(nfsnode_t, u_int32_t *, int, vfs_context_t); + int (*nf_getattr_rpc)(nfsnode_t, mount_t, u_char *, size_t, int, vfs_context_t, struct nfs_vattr *, u_int64_t *); + int (*nf_setattr_rpc)(nfsnode_t, struct vnode_attr *, vfs_context_t); + int (*nf_read_rpc_async)(nfsnode_t, off_t, size_t, thread_t, kauth_cred_t, struct nfsreq_cbinfo *, struct nfsreq **); + int (*nf_read_rpc_async_finish)(nfsnode_t, struct nfsreq *, uio_t, size_t *, int *); + int (*nf_readlink_rpc)(nfsnode_t, char *, uint32_t *, vfs_context_t); + int (*nf_write_rpc_async)(nfsnode_t, uio_t, size_t, thread_t, kauth_cred_t, int, struct nfsreq_cbinfo *, struct nfsreq **); + int (*nf_write_rpc_async_finish)(nfsnode_t, struct nfsreq *, int *, size_t *, uint64_t *); + int (*nf_commit_rpc)(nfsnode_t, uint64_t, uint64_t, kauth_cred_t, uint64_t); + int (*nf_lookup_rpc_async)(nfsnode_t, char *, int, vfs_context_t, struct nfsreq **); + int (*nf_lookup_rpc_async_finish)(nfsnode_t, char *, int, vfs_context_t, struct nfsreq *, u_int64_t *, fhandle_t *, struct nfs_vattr *); + int (*nf_remove_rpc)(nfsnode_t, char *, int, thread_t, kauth_cred_t); + int (*nf_rename_rpc)(nfsnode_t, char *, int, nfsnode_t, char *, int, vfs_context_t); + int (*nf_setlock_rpc)(nfsnode_t, struct nfs_open_file *, struct nfs_file_lock *, int, int, thread_t, kauth_cred_t); + int (*nf_unlock_rpc)(nfsnode_t, struct nfs_lock_owner *, int, uint64_t, uint64_t, int, thread_t, kauth_cred_t); + int (*nf_getlock_rpc)(nfsnode_t, struct nfs_lock_owner *, struct flock *, uint64_t, uint64_t, vfs_context_t); }; /* * The long form of the NFSv4 client ID. */ struct nfs_client_id { - TAILQ_ENTRY(nfs_client_id) nci_link; /* list of client IDs */ - char *nci_id; /* client id buffer */ - int nci_idlen; /* length of client id buffer */ + TAILQ_ENTRY(nfs_client_id) nci_link; /* list of client IDs */ + char *nci_id; /* client id buffer */ + int nci_idlen; /* length of client id buffer */ }; TAILQ_HEAD(nfsclientidlist, nfs_client_id); extern struct nfsclientidlist nfsclientids; @@ -252,183 +252,183 @@ extern struct nfsclientidlist nfsclientids; * Holds NFS specific information for mount. */ struct nfsmount { - lck_mtx_t nm_lock; /* nfs mount lock */ - char * nm_args; /* NFS mount args (XDR) */ + lck_mtx_t nm_lock; /* nfs mount lock */ + char * nm_args; /* NFS mount args (XDR) */ uint32_t nm_mattrs[NFS_MATTR_BITMAP_LEN]; /* mount attributes in mount args */ uint32_t nm_mflags_mask[NFS_MFLAG_BITMAP_LEN]; /* mount flags mask in mount args */ uint32_t nm_mflags[NFS_MFLAG_BITMAP_LEN]; /* mount flags in mount args */ uint32_t nm_flags[NFS_MFLAG_BITMAP_LEN]; /* current mount flags (soft, intr, etc...) */ - char * nm_realm; /* Kerberos realm to use */ - char * nm_principal; /* GSS principal to use on initial mount */ - char * nm_sprinc; /* Kerberos principal of the server */ - int nm_ref; /* Reference count on this mount */ - int nm_state; /* Internal state flags */ - int nm_vers; /* NFS version */ - uint32_t nm_minor_vers; /* minor version of above */ - uint32_t nm_min_vers; /* minimum packed version to try */ - uint32_t nm_max_vers; /* maximum packed version to try */ + char * nm_realm; /* Kerberos realm to use */ + char * nm_principal; /* GSS principal to use on initial mount */ + char * nm_sprinc; /* Kerberos principal of the server */ + int nm_ref; /* Reference count on this mount */ + int nm_state; /* Internal state flags */ + int nm_vers; /* NFS version */ + uint32_t nm_minor_vers; /* minor version of above */ + uint32_t nm_min_vers; /* minimum packed version to try */ + uint32_t nm_max_vers; /* maximum packed version to try */ const struct nfs_funcs *nm_funcs;/* version-specific functions */ - kauth_cred_t nm_mcred; /* credential used for the mount */ - mount_t nm_mountp; /* VFS structure for this filesystem */ - nfsnode_t nm_dnp; /* root directory nfsnode pointer */ + kauth_cred_t nm_mcred; /* credential used for the mount */ + mount_t nm_mountp; /* VFS structure for this filesystem */ + nfsnode_t nm_dnp; /* root directory nfsnode pointer */ struct nfs_fs_locations nm_locations; /* file system locations */ - uint32_t nm_numgrps; /* Max. size of groupslist */ - TAILQ_HEAD(, nfs_gss_clnt_ctx) nm_gsscl; /* GSS user contexts */ - uint32_t nm_ncentries; /* GSS expired negative cache entries */ - int nm_timeo; /* Init timer for NFSMNT_DUMBTIMR */ - int nm_retry; /* Max retries */ - uint32_t nm_rsize; /* Max size of read rpc */ - uint32_t nm_wsize; /* Max size of write rpc */ - uint32_t nm_biosize; /* buffer I/O size */ - uint32_t nm_readdirsize; /* Size of a readdir rpc */ - uint32_t nm_readahead; /* Num. of blocks to readahead */ - uint32_t nm_acregmin; /* reg file min attr cache timeout */ - uint32_t nm_acregmax; /* reg file max attr cache timeout */ - uint32_t nm_acdirmin; /* dir min attr cache timeout */ - uint32_t nm_acdirmax; /* dir max attr cache timeout */ - uint32_t nm_auth; /* security mechanism flavor being used */ - uint32_t nm_writers; /* Number of nodes open for writing */ - uint32_t nm_mappers; /* Number of nodes that have mmapped */ - struct nfs_sec nm_sec; /* acceptable security mechanism flavors */ - struct nfs_sec nm_servsec; /* server's acceptable security mechanism flavors */ - struct nfs_etype nm_etype; /* If using kerberos, the support session key encryption types */ - fhandle_t *nm_fh; /* initial file handle */ - uint8_t nm_lockmode; /* advisory file locking mode */ + uint32_t nm_numgrps; /* Max. size of groupslist */ + TAILQ_HEAD(, nfs_gss_clnt_ctx) nm_gsscl; /* GSS user contexts */ + uint32_t nm_ncentries; /* GSS expired negative cache entries */ + int nm_timeo; /* Init timer for NFSMNT_DUMBTIMR */ + int nm_retry; /* Max retries */ + uint32_t nm_rsize; /* Max size of read rpc */ + uint32_t nm_wsize; /* Max size of write rpc */ + uint32_t nm_biosize; /* buffer I/O size */ + uint32_t nm_readdirsize; /* Size of a readdir rpc */ + uint32_t nm_readahead; /* Num. of blocks to readahead */ + uint32_t nm_acregmin; /* reg file min attr cache timeout */ + uint32_t nm_acregmax; /* reg file max attr cache timeout */ + uint32_t nm_acdirmin; /* dir min attr cache timeout */ + uint32_t nm_acdirmax; /* dir max attr cache timeout */ + uint32_t nm_auth; /* security mechanism flavor being used */ + uint32_t nm_writers; /* Number of nodes open for writing */ + uint32_t nm_mappers; /* Number of nodes that have mmapped */ + struct nfs_sec nm_sec; /* acceptable security mechanism flavors */ + struct nfs_sec nm_servsec; /* server's acceptable security mechanism flavors */ + struct nfs_etype nm_etype; /* If using kerberos, the support session key encryption types */ + fhandle_t *nm_fh; /* initial file handle */ + uint8_t nm_lockmode; /* advisory file locking mode */ /* mount info */ - uint32_t nm_fsattrstamp; /* timestamp for fs attrs */ - struct nfs_fsattr nm_fsattr; /* file system attributes */ - uint64_t nm_verf; /* v3/v4 write verifier */ + uint32_t nm_fsattrstamp; /* timestamp for fs attrs */ + struct nfs_fsattr nm_fsattr; /* file system attributes */ + uint64_t nm_verf; /* v3/v4 write verifier */ union { - struct { /* v2/v3 specific fields */ - TAILQ_ENTRY(nfsmount) ldlink; /* chain of mounts registered for lockd use */ - int udp_sent; /* UDP request send count */ - int udp_cwnd; /* UDP request congestion window */ - struct nfs_reqqhead udp_cwndq; /* requests waiting on cwnd */ - struct sockaddr *rqsaddr;/* cached rquota socket address */ - uint32_t rqsaddrstamp; /* timestamp of rquota socket address */ - } v3; - struct { /* v4 specific fields */ - struct nfs_client_id *longid; /* client ID, long form */ - uint64_t mounttime; /* used as client ID verifier */ - uint64_t clientid; /* client ID, short form */ - thread_call_t renew_timer; /* RENEW timer call */ - nfs_fsid fsid; /* NFS file system id */ - TAILQ_HEAD(, nfsnode) delegations; /* list of nodes with delegations */ - TAILQ_HEAD(, nfsnode) dreturnq; /* list of nodes with delegations to return */ - TAILQ_ENTRY(nfsmount) cblink; /* chain of mounts registered for callbacks */ - uint32_t cbid; /* callback channel identifier */ - uint32_t cbrefs; /* # callbacks using this mount */ - } v4; + struct { /* v2/v3 specific fields */ + TAILQ_ENTRY(nfsmount) ldlink; /* chain of mounts registered for lockd use */ + int udp_sent; /* UDP request send count */ + int udp_cwnd; /* UDP request congestion window */ + struct nfs_reqqhead udp_cwndq; /* requests waiting on cwnd */ + struct sockaddr *rqsaddr;/* cached rquota socket address */ + uint32_t rqsaddrstamp; /* timestamp of rquota socket address */ + } v3; + struct { /* v4 specific fields */ + struct nfs_client_id *longid; /* client ID, long form */ + uint64_t mounttime; /* used as client ID verifier */ + uint64_t clientid; /* client ID, short form */ + thread_call_t renew_timer; /* RENEW timer call */ + nfs_fsid fsid; /* NFS file system id */ + TAILQ_HEAD(, nfsnode) delegations; /* list of nodes with delegations */ + TAILQ_HEAD(, nfsnode) dreturnq; /* list of nodes with delegations to return */ + TAILQ_ENTRY(nfsmount) cblink; /* chain of mounts registered for callbacks */ + uint32_t cbid; /* callback channel identifier */ + uint32_t cbrefs; /* # callbacks using this mount */ + } v4; } nm_un; /* common state */ TAILQ_HEAD(, nfs_open_owner) nm_open_owners; /* list of open owners */ - uint32_t nm_stateinuse; /* state in use counter */ - uint32_t nm_stategenid; /* state generation counter */ - time_t nm_recover_start; /* recover start time */ - LIST_HEAD(, nfsnode) nm_monlist; /* list of nodes being monitored */ + uint32_t nm_stateinuse; /* state in use counter */ + uint32_t nm_stategenid; /* state generation counter */ + time_t nm_recover_start; /* recover start time */ + LIST_HEAD(, nfsnode) nm_monlist; /* list of nodes being monitored */ /* async I/O queue */ - struct nfs_reqqhead nm_resendq; /* async I/O resend queue */ - struct nfs_reqqhead nm_iodq; /* async I/O request queue */ - struct nfsiod *nm_niod; /* nfsiod processing this mount */ + struct nfs_reqqhead nm_resendq; /* async I/O resend queue */ + struct nfs_reqqhead nm_iodq; /* async I/O request queue */ + struct nfsiod *nm_niod; /* nfsiod processing this mount */ TAILQ_ENTRY(nfsmount) nm_iodlink; /* chain of mounts awaiting nfsiod */ - int nm_asyncwrites; /* outstanding async I/O writes */ + int nm_asyncwrites; /* outstanding async I/O writes */ /* socket state */ - uint8_t nm_sofamily; /* (preferred) protocol family of socket */ - uint8_t nm_sotype; /* (preferred) type of socket */ - in_port_t nm_nfsport; /* NFS protocol port */ - in_port_t nm_mountport; /* MOUNT protocol port (v2/v3) */ + uint8_t nm_sofamily; /* (preferred) protocol family of socket */ + uint8_t nm_sotype; /* (preferred) type of socket */ + in_port_t nm_nfsport; /* NFS protocol port */ + in_port_t nm_mountport; /* MOUNT protocol port (v2/v3) */ struct nfs_socket_search *nm_nss; /* current socket search structure */ - struct nfs_socket *nm_nso; /* current socket */ - struct sockaddr *nm_saddr; /* Address of server */ - u_short nm_sockflags; /* socket state flags */ - time_t nm_deadto_start; /* dead timeout start time */ - time_t nm_reconnect_start; /* reconnect start time */ - int nm_tprintf_initial_delay; /* delay first "server down" */ - int nm_tprintf_delay; /* delay between "server down" */ - int nm_deadtimeout; /* delay between first "server down" and dead set at mount time */ - int nm_curdeadtimeout; /* current dead timeout. Adjusted by mount state and mobility */ - int nm_srtt[4]; /* Timers for RPCs */ - int nm_sdrtt[4]; - int nm_timeouts; /* Request timeouts */ - int nm_jbreqs; /* # R_JBTPRINTFMSG requests */ - int nm_mounterror; /* status of mount connect */ - TAILQ_ENTRY(nfsmount) nm_pokeq; /* mount poke queue chain */ - thread_t nm_sockthd; /* socket thread for this mount */ + struct nfs_socket *nm_nso; /* current socket */ + struct sockaddr *nm_saddr; /* Address of server */ + u_short nm_sockflags; /* socket state flags */ + time_t nm_deadto_start; /* dead timeout start time */ + time_t nm_reconnect_start; /* reconnect start time */ + int nm_tprintf_initial_delay; /* delay first "server down" */ + int nm_tprintf_delay; /* delay between "server down" */ + int nm_deadtimeout; /* delay between first "server down" and dead set at mount time */ + int nm_curdeadtimeout; /* current dead timeout. Adjusted by mount state and mobility */ + int nm_srtt[4]; /* Timers for RPCs */ + int nm_sdrtt[4]; + int nm_timeouts; /* Request timeouts */ + int nm_jbreqs; /* # R_JBTPRINTFMSG requests */ + int nm_mounterror; /* status of mount connect */ + TAILQ_ENTRY(nfsmount) nm_pokeq; /* mount poke queue chain */ + thread_t nm_sockthd; /* socket thread for this mount */ }; /* macro for checking current mount flags */ -#define NMFLAG(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_flags, NFS_MFLAG_ ## F) +#define NMFLAG(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_flags, NFS_MFLAG_ ## F) /* macros for checking (original) mount attributes/flags */ -#define NM_OMATTR_GIVEN(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_mattrs, NFS_MATTR_ ## F) -#define NM_OMFLAG_GIVEN(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_mflags_mask, NFS_MFLAG_ ## F) -#define NM_OMFLAG(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_mflags, NFS_MFLAG_ ## F) +#define NM_OMATTR_GIVEN(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_mattrs, NFS_MATTR_ ## F) +#define NM_OMFLAG_GIVEN(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_mflags_mask, NFS_MFLAG_ ## F) +#define NM_OMFLAG(NMP, F) NFS_BITMAP_ISSET((NMP)->nm_mflags, NFS_MFLAG_ ## F) /* * NFS mount state flags (nm_state) */ -#define NFSSTA_MOUNT_THREAD 0x00000040 /* nfs_mount_connect_thread running */ -#define NFSSTA_MONITOR_SCAN 0x00000080 /* scan of monitored nodes in progress */ -#define NFSSTA_UNMOUNTING 0x00000100 /* an unmount attempt is in progress */ -#define NFSSTA_NEEDSECINFO 0x00000200 /* need to fetch security info */ -#define NFSSTA_CLIENTID 0x00000400 /* short client ID is valid */ -#define NFSSTA_BIGCOOKIES 0x00000800 /* have seen >32bit dir cookies */ -#define NFSSTA_JUKEBOXTIMEO 0x00001000 /* experienced a jukebox timeout */ -#define NFSSTA_LOCKTIMEO 0x00002000 /* experienced a lock req timeout */ -#define NFSSTA_MOUNTED 0x00004000 /* completely mounted */ -#define NFSSTA_LOCKSWORK 0x00008000 /* lock ops have worked. */ -#define NFSSTA_TIMEO 0x00010000 /* experienced a timeout. */ -#define NFSSTA_FORCE 0x00020000 /* doing a forced unmount. */ -#define NFSSTA_HASWRITEVERF 0x00040000 /* Has write verifier for V3 */ -#define NFSSTA_GOTPATHCONF 0x00080000 /* Got the V3 pathconf info */ -#define NFSSTA_GOTFSINFO 0x00100000 /* Got the V3 fsinfo */ -#define NFSSTA_WANTRQUOTA 0x00200000 /* Want rquota address */ -#define NFSSTA_RQUOTAINPROG 0x00400000 /* Getting rquota address */ -#define NFSSTA_SENDING 0x00800000 /* Sending on socket */ -#define NFSSTA_SNDLOCK 0x01000000 /* Send socket lock */ -#define NFSSTA_WANTSND 0x02000000 /* Want above */ -#define NFSSTA_DEAD 0x04000000 /* mount is dead */ -#define NFSSTA_RECOVER 0x08000000 /* mount state needs to be recovered */ -#define NFSSTA_RECOVER_EXPIRED 0x10000000 /* mount state expired */ -#define NFSSTA_REVOKE 0x20000000 /* need to scan for revoked nodes */ -#define NFSSTA_SQUISHY 0x40000000 /* we can ask to be forcibly unmounted */ -#define NFSSTA_MOUNT_DRAIN 0x80000000 /* mount is draining references */ +#define NFSSTA_MOUNT_THREAD 0x00000040 /* nfs_mount_connect_thread running */ +#define NFSSTA_MONITOR_SCAN 0x00000080 /* scan of monitored nodes in progress */ +#define NFSSTA_UNMOUNTING 0x00000100 /* an unmount attempt is in progress */ +#define NFSSTA_NEEDSECINFO 0x00000200 /* need to fetch security info */ +#define NFSSTA_CLIENTID 0x00000400 /* short client ID is valid */ +#define NFSSTA_BIGCOOKIES 0x00000800 /* have seen >32bit dir cookies */ +#define NFSSTA_JUKEBOXTIMEO 0x00001000 /* experienced a jukebox timeout */ +#define NFSSTA_LOCKTIMEO 0x00002000 /* experienced a lock req timeout */ +#define NFSSTA_MOUNTED 0x00004000 /* completely mounted */ +#define NFSSTA_LOCKSWORK 0x00008000 /* lock ops have worked. */ +#define NFSSTA_TIMEO 0x00010000 /* experienced a timeout. */ +#define NFSSTA_FORCE 0x00020000 /* doing a forced unmount. */ +#define NFSSTA_HASWRITEVERF 0x00040000 /* Has write verifier for V3 */ +#define NFSSTA_GOTPATHCONF 0x00080000 /* Got the V3 pathconf info */ +#define NFSSTA_GOTFSINFO 0x00100000 /* Got the V3 fsinfo */ +#define NFSSTA_WANTRQUOTA 0x00200000 /* Want rquota address */ +#define NFSSTA_RQUOTAINPROG 0x00400000 /* Getting rquota address */ +#define NFSSTA_SENDING 0x00800000 /* Sending on socket */ +#define NFSSTA_SNDLOCK 0x01000000 /* Send socket lock */ +#define NFSSTA_WANTSND 0x02000000 /* Want above */ +#define NFSSTA_DEAD 0x04000000 /* mount is dead */ +#define NFSSTA_RECOVER 0x08000000 /* mount state needs to be recovered */ +#define NFSSTA_RECOVER_EXPIRED 0x10000000 /* mount state expired */ +#define NFSSTA_REVOKE 0x20000000 /* need to scan for revoked nodes */ +#define NFSSTA_SQUISHY 0x40000000 /* we can ask to be forcibly unmounted */ +#define NFSSTA_MOUNT_DRAIN 0x80000000 /* mount is draining references */ /* flags for nm_sockflags */ -#define NMSOCK_READY 0x0001 /* socket is ready for use */ -#define NMSOCK_CONNECTING 0x0002 /* socket is being connect()ed */ -#define NMSOCK_SETUP 0x0004 /* socket/connection is being set up */ -#define NMSOCK_UNMOUNT 0x0008 /* unmounted, no more socket activity */ -#define NMSOCK_HASCONNECTED 0x0010 /* socket has connected before */ -#define NMSOCK_POKE 0x0020 /* socket needs to be poked */ -#define NMSOCK_DISCONNECTING 0x0080 /* socket is being disconnected */ +#define NMSOCK_READY 0x0001 /* socket is ready for use */ +#define NMSOCK_CONNECTING 0x0002 /* socket is being connect()ed */ +#define NMSOCK_SETUP 0x0004 /* socket/connection is being set up */ +#define NMSOCK_UNMOUNT 0x0008 /* unmounted, no more socket activity */ +#define NMSOCK_HASCONNECTED 0x0010 /* socket has connected before */ +#define NMSOCK_POKE 0x0020 /* socket needs to be poked */ +#define NMSOCK_DISCONNECTING 0x0080 /* socket is being disconnected */ /* aliases for version-specific fields */ -#define nm_ldlink nm_un.v3.ldlink -#define nm_sent nm_un.v3.udp_sent -#define nm_cwnd nm_un.v3.udp_cwnd -#define nm_cwndq nm_un.v3.udp_cwndq -#define nm_rqproto nm_un.v3.rqproto -#define nm_rqsaddr nm_un.v3.rqsaddr -#define nm_rqsaddrstamp nm_un.v3.rqsaddrstamp -#define nm_longid nm_un.v4.longid -#define nm_clientid nm_un.v4.clientid -#define nm_mounttime nm_un.v4.mounttime -#define nm_fsid nm_un.v4.fsid -#define nm_renew_timer nm_un.v4.renew_timer -#define nm_cbid nm_un.v4.cbid -#define nm_cblink nm_un.v4.cblink -#define nm_cbrefs nm_un.v4.cbrefs -#define nm_delegations nm_un.v4.delegations -#define nm_dreturnq nm_un.v4.dreturnq +#define nm_ldlink nm_un.v3.ldlink +#define nm_sent nm_un.v3.udp_sent +#define nm_cwnd nm_un.v3.udp_cwnd +#define nm_cwndq nm_un.v3.udp_cwndq +#define nm_rqproto nm_un.v3.rqproto +#define nm_rqsaddr nm_un.v3.rqsaddr +#define nm_rqsaddrstamp nm_un.v3.rqsaddrstamp +#define nm_longid nm_un.v4.longid +#define nm_clientid nm_un.v4.clientid +#define nm_mounttime nm_un.v4.mounttime +#define nm_fsid nm_un.v4.fsid +#define nm_renew_timer nm_un.v4.renew_timer +#define nm_cbid nm_un.v4.cbid +#define nm_cblink nm_un.v4.cblink +#define nm_cbrefs nm_un.v4.cbrefs +#define nm_delegations nm_un.v4.delegations +#define nm_dreturnq nm_un.v4.dreturnq #if defined(KERNEL) /* * Macros to convert from various things to mount structures. */ -#define VFSTONFS(mp) ((mp) ? ((struct nfsmount *)vfs_fsprivate(mp)) : NULL) -#define VTONMP(vp) VFSTONFS(vnode_mount(vp)) -#define NFSTONMP(np) VTONMP(NFSTOV(np)) -#define NFSTOMP(np) (vnode_mount(NFSTOV(np))) +#define VFSTONFS(mp) ((mp) ? ((struct nfsmount *)vfs_fsprivate(mp)) : NULL) +#define VTONMP(vp) VFSTONFS(vnode_mount(vp)) +#define NFSTONMP(np) VTONMP(NFSTOV(np)) +#define NFSTOMP(np) (vnode_mount(NFSTOV(np))) #endif /* KERNEL */ diff --git a/bsd/nfs/nfsnode.h b/bsd/nfs/nfsnode.h index de4913f33..81341cc91 100644 --- a/bsd/nfs/nfsnode.h +++ b/bsd/nfs/nfsnode.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -76,120 +76,121 @@ #include #endif #include +#include /* * Silly rename structure that hangs off the nfsnode until the name * can be removed by nfs_vnop_inactive() */ struct nfs_sillyrename { - kauth_cred_t nsr_cred; - struct nfsnode *nsr_dnp; - int nsr_namlen; - char nsr_name[20]; + kauth_cred_t nsr_cred; + struct nfsnode *nsr_dnp; + int nsr_namlen; + char nsr_name[20]; }; /* * The nfsbuf is the nfs equivalent to a struct buf. */ struct nfsbuf { - LIST_ENTRY(nfsbuf) nb_hash; /* hash chain */ - LIST_ENTRY(nfsbuf) nb_vnbufs; /* nfsnode's nfsbuf chain */ - TAILQ_ENTRY(nfsbuf) nb_free; /* free list position if not active. */ - volatile uint32_t nb_flags; /* NB_* flags. */ - volatile uint32_t nb_lflags; /* NBL_* flags. */ - volatile uint32_t nb_refs; /* outstanding references. */ - uint32_t nb_bufsize; /* buffer size */ - daddr64_t nb_lblkno; /* logical block number. */ - uint64_t nb_verf; /* V3 write verifier */ - int nb_commitlevel; /* lowest write commit level */ - time_t nb_timestamp; /* buffer timestamp */ - int nb_error; /* errno value. */ - u_int32_t nb_valid; /* valid pages in buf */ - u_int32_t nb_dirty; /* dirty pages in buf */ - int nb_validoff; /* offset in buffer of valid region. */ - int nb_validend; /* offset of end of valid region. */ - int nb_dirtyoff; /* offset in buffer of dirty region. */ - int nb_dirtyend; /* offset of end of dirty region. */ - int nb_offio; /* offset in buffer of I/O region. */ - int nb_endio; /* offset of end of I/O region. */ - int nb_rpcs; /* Count of RPCs remaining for this buffer. */ - caddr_t nb_data; /* mapped buffer */ - nfsnode_t nb_np; /* nfsnode buffer belongs to */ - kauth_cred_t nb_rcred; /* read credentials reference */ - kauth_cred_t nb_wcred; /* write credentials reference */ - void * nb_pagelist; /* upl */ + LIST_ENTRY(nfsbuf) nb_hash; /* hash chain */ + LIST_ENTRY(nfsbuf) nb_vnbufs; /* nfsnode's nfsbuf chain */ + TAILQ_ENTRY(nfsbuf) nb_free; /* free list position if not active. */ + volatile uint32_t nb_flags; /* NB_* flags. */ + volatile uint32_t nb_lflags; /* NBL_* flags. */ + volatile uint32_t nb_refs; /* outstanding references. */ + uint32_t nb_bufsize; /* buffer size */ + daddr64_t nb_lblkno; /* logical block number. */ + uint64_t nb_verf; /* V3 write verifier */ + int nb_commitlevel; /* lowest write commit level */ + time_t nb_timestamp; /* buffer timestamp */ + int nb_error; /* errno value. */ + u_int32_t nb_valid; /* valid pages in buf */ + u_int32_t nb_dirty; /* dirty pages in buf */ + int nb_validoff; /* offset in buffer of valid region. */ + int nb_validend; /* offset of end of valid region. */ + int nb_dirtyoff; /* offset in buffer of dirty region. */ + int nb_dirtyend; /* offset of end of dirty region. */ + int nb_offio; /* offset in buffer of I/O region. */ + int nb_endio; /* offset of end of I/O region. */ + int nb_rpcs; /* Count of RPCs remaining for this buffer. */ + caddr_t nb_data; /* mapped buffer */ + nfsnode_t nb_np; /* nfsnode buffer belongs to */ + kauth_cred_t nb_rcred; /* read credentials reference */ + kauth_cred_t nb_wcred; /* write credentials reference */ + void * nb_pagelist; /* upl */ }; -#define NFS_MAXBSIZE (32 * PAGE_SIZE) /* valid/dirty page masks limit buffer size */ +#define NFS_MAXBSIZE (32 * PAGE_SIZE) /* valid/dirty page masks limit buffer size */ -#define NFS_A_LOT_OF_NEEDCOMMITS 256 /* max# uncommitted buffers for a node */ -#define NFS_A_LOT_OF_DELAYED_WRITES MAX(nfsbufcnt/8,512) /* max# "delwri" buffers in system */ +#define NFS_A_LOT_OF_NEEDCOMMITS 256 /* max# uncommitted buffers for a node */ +#define NFS_A_LOT_OF_DELAYED_WRITES MAX(nfsbufcnt/8,512) /* max# "delwri" buffers in system */ /* * These flags are kept in b_lflags... * nfs_buf_mutex must be held before examining/updating */ -#define NBL_BUSY 0x00000001 /* I/O in progress. */ -#define NBL_WANTED 0x00000002 /* Process wants this buffer. */ +#define NBL_BUSY 0x00000001 /* I/O in progress. */ +#define NBL_WANTED 0x00000002 /* Process wants this buffer. */ /* * These flags are kept in nb_flags and they're (purposefully) * very similar to the B_* flags for struct buf. * nfs_buf_mutex is not needed to examine/update these. */ -#define NB_STALEWVERF 0x00000001 /* write verifier changed on us */ -#define NB_NEEDCOMMIT 0x00000002 /* buffer needs to be committed */ -#define NB_ASYNC 0x00000004 /* Start I/O, do not wait. */ -#define NB_CACHE 0x00000020 /* buffer data found in the cache */ -#define NB_STABLE 0x00000040 /* write FILESYNC not UNSTABLE */ -#define NB_DELWRI 0x00000080 /* delayed write: dirty range needs to be written */ -#define NB_DONE 0x00000200 /* I/O completed. */ -#define NB_EINTR 0x00000400 /* I/O was interrupted */ -#define NB_ERROR 0x00000800 /* I/O error occurred. */ -#define NB_INVAL 0x00002000 /* Does not contain valid info. */ -#define NB_NCRDAHEAD 0x00004000 /* "nocache readahead" data */ -#define NB_NOCACHE 0x00008000 /* Do not cache block after use. */ -#define NB_WRITE 0x00000000 /* Write buffer (pseudo flag). */ -#define NB_READ 0x00100000 /* Read buffer. */ -#define NB_MULTASYNCRPC 0x00200000 /* multiple async RPCs issued for buffer */ -#define NB_PAGELIST 0x00400000 /* Buffer describes pagelist I/O. */ -#define NB_WRITEINPROG 0x01000000 /* Write in progress. */ -#define NB_META 0x40000000 /* buffer contains meta-data. */ +#define NB_STALEWVERF 0x00000001 /* write verifier changed on us */ +#define NB_NEEDCOMMIT 0x00000002 /* buffer needs to be committed */ +#define NB_ASYNC 0x00000004 /* Start I/O, do not wait. */ +#define NB_CACHE 0x00000020 /* buffer data found in the cache */ +#define NB_STABLE 0x00000040 /* write FILESYNC not UNSTABLE */ +#define NB_DELWRI 0x00000080 /* delayed write: dirty range needs to be written */ +#define NB_DONE 0x00000200 /* I/O completed. */ +#define NB_EINTR 0x00000400 /* I/O was interrupted */ +#define NB_ERROR 0x00000800 /* I/O error occurred. */ +#define NB_INVAL 0x00002000 /* Does not contain valid info. */ +#define NB_NCRDAHEAD 0x00004000 /* "nocache readahead" data */ +#define NB_NOCACHE 0x00008000 /* Do not cache block after use. */ +#define NB_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define NB_READ 0x00100000 /* Read buffer. */ +#define NB_MULTASYNCRPC 0x00200000 /* multiple async RPCs issued for buffer */ +#define NB_PAGELIST 0x00400000 /* Buffer describes pagelist I/O. */ +#define NB_WRITEINPROG 0x01000000 /* Write in progress. */ +#define NB_META 0x40000000 /* buffer contains meta-data. */ /* Flags for operation type in nfs_buf_get() */ -#define NBLK_READ 0x00000001 /* buffer for read */ -#define NBLK_WRITE 0x00000002 /* buffer for write */ -#define NBLK_META 0x00000004 /* buffer for metadata */ -#define NBLK_OPMASK 0x00000007 /* operation mask */ +#define NBLK_READ 0x00000001 /* buffer for read */ +#define NBLK_WRITE 0x00000002 /* buffer for write */ +#define NBLK_META 0x00000004 /* buffer for metadata */ +#define NBLK_OPMASK 0x00000007 /* operation mask */ /* modifiers for above flags... */ -#define NBLK_NOWAIT 0x40000000 /* don't wait on busy buffer */ -#define NBLK_ONLYVALID 0x80000000 /* only return cached buffer */ +#define NBLK_NOWAIT 0x40000000 /* don't wait on busy buffer */ +#define NBLK_ONLYVALID 0x80000000 /* only return cached buffer */ /* These flags are used for nfsbuf iterating */ -#define NBI_ITER 0x01 /* iteration in progress */ -#define NBI_ITERWANT 0x02 /* waiting to iterate */ -#define NBI_CLEAN 0x04 /* requesting clean buffers */ -#define NBI_DIRTY 0x08 /* requesting dirty buffers */ -#define NBI_NOWAIT 0x10 /* don't block on NBI_ITER */ +#define NBI_ITER 0x01 /* iteration in progress */ +#define NBI_ITERWANT 0x02 /* waiting to iterate */ +#define NBI_CLEAN 0x04 /* requesting clean buffers */ +#define NBI_DIRTY 0x08 /* requesting dirty buffers */ +#define NBI_NOWAIT 0x10 /* don't block on NBI_ITER */ /* Flags for nfs_buf_acquire */ -#define NBAC_NOWAIT 0x01 /* Don't wait if buffer is busy */ -#define NBAC_REMOVE 0x02 /* Remove from free list once buffer is acquired */ +#define NBAC_NOWAIT 0x01 /* Don't wait if buffer is busy */ +#define NBAC_REMOVE 0x02 /* Remove from free list once buffer is acquired */ /* some convenience macros... */ -#define NBOFF(BP) ((off_t)(BP)->nb_lblkno * (off_t)(BP)->nb_bufsize) -#define NBPGVALID(BP,P) (((BP)->nb_valid >> (P)) & 0x1) -#define NBPGDIRTY(BP,P) (((BP)->nb_dirty >> (P)) & 0x1) -#define NBPGVALID_SET(BP,P) ((BP)->nb_valid |= (1 << (P))) -#define NBPGDIRTY_SET(BP,P) ((BP)->nb_dirty |= (1 << (P))) +#define NBOFF(BP) ((off_t)(BP)->nb_lblkno * (off_t)(BP)->nb_bufsize) +#define NBPGVALID(BP, P) (((BP)->nb_valid >> (P)) & 0x1) +#define NBPGDIRTY(BP, P) (((BP)->nb_dirty >> (P)) & 0x1) +#define NBPGVALID_SET(BP, P) ((BP)->nb_valid |= (1 << (P))) +#define NBPGDIRTY_SET(BP, P) ((BP)->nb_dirty |= (1 << (P))) -#define NBUFSTAMPVALID(BP) ((BP)->nb_timestamp != ~0) -#define NBUFSTAMPINVALIDATE(BP) ((BP)->nb_timestamp = ~0) +#define NBUFSTAMPVALID(BP) ((BP)->nb_timestamp != ~0) +#define NBUFSTAMPINVALIDATE(BP) ((BP)->nb_timestamp = ~0) #define NFS_BUF_MAP(BP) \ do { \ - if (!(BP)->nb_data && nfs_buf_map(BP)) \ - panic("nfs_buf_map failed"); \ + if (!(BP)->nb_data && nfs_buf_map(BP)) \ + panic("nfs_buf_map failed"); \ } while (0) LIST_HEAD(nfsbuflists, nfsbuf); @@ -205,27 +206,27 @@ extern struct nfsbuffreehead nfsbuffree, nfsbufdelwri; #define NFSBUFCNTCHK() \ do { \ if ( (nfsbufcnt < 0) || \ - (nfsbufcnt > nfsbufmax) || \ - (nfsbufmetacnt < 0) || \ - (nfsbufmetacnt > nfsbufmetamax) || \ - (nfsbufmetacnt > nfsbufcnt) || \ - (nfsbuffreecnt < 0) || \ - (nfsbuffreecnt > nfsbufmax) || \ - (nfsbuffreecnt > nfsbufcnt) || \ - (nfsbuffreemetacnt < 0) || \ - (nfsbuffreemetacnt > nfsbufmax) || \ - (nfsbuffreemetacnt > nfsbufcnt) || \ - (nfsbuffreemetacnt > nfsbufmetamax) || \ - (nfsbuffreemetacnt > nfsbufmetacnt) || \ - (nfsbufdelwricnt < 0) || \ - (nfsbufdelwricnt > nfsbufmax) || \ - (nfsbufdelwricnt > nfsbufcnt) || \ - (nfs_nbdwrite < 0) || \ - (nfs_nbdwrite > nfsbufcnt) || \ - 0) \ - panic("nfsbuf count error: max %d meta %d cnt %d meta %d free %d meta %d delwr %d bdw %d\n", \ - nfsbufmax, nfsbufmetamax, nfsbufcnt, nfsbufmetacnt, nfsbuffreecnt, nfsbuffreemetacnt, \ - nfsbufdelwricnt, nfs_nbdwrite); \ + (nfsbufcnt > nfsbufmax) || \ + (nfsbufmetacnt < 0) || \ + (nfsbufmetacnt > nfsbufmetamax) || \ + (nfsbufmetacnt > nfsbufcnt) || \ + (nfsbuffreecnt < 0) || \ + (nfsbuffreecnt > nfsbufmax) || \ + (nfsbuffreecnt > nfsbufcnt) || \ + (nfsbuffreemetacnt < 0) || \ + (nfsbuffreemetacnt > nfsbufmax) || \ + (nfsbuffreemetacnt > nfsbufcnt) || \ + (nfsbuffreemetacnt > nfsbufmetamax) || \ + (nfsbuffreemetacnt > nfsbufmetacnt) || \ + (nfsbufdelwricnt < 0) || \ + (nfsbufdelwricnt > nfsbufmax) || \ + (nfsbufdelwricnt > nfsbufcnt) || \ + (nfs_nbdwrite < 0) || \ + (nfs_nbdwrite > nfsbufcnt) || \ + 0) \ + panic("nfsbuf count error: max %d meta %d cnt %d meta %d free %d meta %d delwr %d bdw %d\n", \ + nfsbufmax, nfsbufmetamax, nfsbufcnt, nfsbufmetacnt, nfsbuffreecnt, nfsbuffreemetacnt, \ + nfsbufdelwricnt, nfs_nbdwrite); \ } while (0) #else #define NFSBUFCNTCHK() @@ -245,16 +246,16 @@ extern struct nfsbuffreehead nfsbuffree, nfsbufdelwri; * that is indexed backwards from the end of the buffer. */ struct nfs_dir_buf_header { - uint16_t ndbh_flags; /* flags (see below) */ - uint16_t ndbh_count; /* # of entries */ - uint32_t ndbh_entry_end; /* end offset of direntry data */ - uint32_t ndbh_ncgen; /* name cache generation# */ - uint32_t ndbh_pad; /* reserved */ + uint16_t ndbh_flags; /* flags (see below) */ + uint16_t ndbh_count; /* # of entries */ + uint32_t ndbh_entry_end; /* end offset of direntry data */ + uint32_t ndbh_ncgen; /* name cache generation# */ + uint32_t ndbh_pad; /* reserved */ }; /* ndbh_flags */ -#define NDB_FULL 0x0001 /* buffer has been filled */ -#define NDB_EOF 0x0002 /* buffer contains EOF */ -#define NDB_PLUS 0x0004 /* buffer contains RDIRPLUS data */ +#define NDB_FULL 0x0001 /* buffer has been filled */ +#define NDB_EOF 0x0002 /* buffer contains EOF */ +#define NDB_PLUS 0x0004 /* buffer contains RDIRPLUS data */ #define NFS_DIR_BUF_FIRST_DIRENTRY(BP) \ ((struct direntry*)((char*)((BP)->nb_data) + sizeof(*ndbhp))) @@ -280,30 +281,30 @@ struct nfs_dir_buf_header { * index "mru". The index of the next entry in the list is kept in the * "next" array. (An index value of -1 marks an invalid entry.) */ -#define NFSNUMCOOKIES 14 +#define NFSNUMCOOKIES 14 struct nfsdmap { - int8_t free; /* next unused slot */ - int8_t mru; /* head of MRU list */ - int8_t next[NFSNUMCOOKIES]; /* MRU list links */ + int8_t free; /* next unused slot */ + int8_t mru; /* head of MRU list */ + int8_t next[NFSNUMCOOKIES]; /* MRU list links */ struct { - uint64_t key; /* cookie */ - uint64_t lbn; /* lbn of buffer */ - } cookies[NFSNUMCOOKIES]; /* MRU list entries */ + uint64_t key; /* cookie */ + uint64_t lbn; /* lbn of buffer */ + } cookies[NFSNUMCOOKIES]; /* MRU list entries */ }; /* * NFS vnode attribute structure */ -#define NFSTIME_ACCESS 0 /* time of last access */ -#define NFSTIME_MODIFY 1 /* time of last modification */ -#define NFSTIME_CHANGE 2 /* time file changed */ -#define NFSTIME_CREATE 3 /* time file created */ -#define NFSTIME_BACKUP 4 /* time of last backup */ -#define NFSTIME_COUNT 5 +#define NFSTIME_ACCESS 0 /* time of last access */ +#define NFSTIME_MODIFY 1 /* time of last modification */ +#define NFSTIME_CHANGE 2 /* time file changed */ +#define NFSTIME_CREATE 3 /* time file created */ +#define NFSTIME_BACKUP 4 /* time of last backup */ +#define NFSTIME_COUNT 5 #define NFS_COMPARE_MTIME(TVP, NVAP, CMP) \ - (((TVP)->tv_sec == (NVAP)->nva_timesec[NFSTIME_MODIFY]) ? \ - ((TVP)->tv_nsec CMP (NVAP)->nva_timensec[NFSTIME_MODIFY]) : \ + (((TVP)->tv_sec == (NVAP)->nva_timesec[NFSTIME_MODIFY]) ? \ + ((TVP)->tv_nsec CMP (NVAP)->nva_timensec[NFSTIME_MODIFY]) : \ ((TVP)->tv_sec CMP (NVAP)->nva_timesec[NFSTIME_MODIFY])) #define NFS_COPY_TIME(TVP, NVAP, WHICH) \ do { \ @@ -312,56 +313,56 @@ struct nfsdmap { } while (0) struct nfs_vattr { - enum vtype nva_type; /* vnode type (for create) */ - uint32_t nva_mode; /* file's access mode (and type) */ - uid_t nva_uid; /* owner user id */ - gid_t nva_gid; /* owner group id */ - guid_t nva_uuuid; /* owner user UUID */ - guid_t nva_guuid; /* owner group UUID */ - kauth_acl_t nva_acl; /* access control list */ - nfs_specdata nva_rawdev; /* device the special file represents */ - uint32_t nva_flags; /* file flags (see below) */ - uint32_t nva_maxlink; /* maximum # of links (v4) */ - uint64_t nva_nlink; /* number of references to file */ - uint64_t nva_fileid; /* file id */ - nfs_fsid nva_fsid; /* file system id */ - uint64_t nva_size; /* file size in bytes */ - uint64_t nva_bytes; /* bytes of disk space held by file */ - uint64_t nva_change; /* change attribute */ - int64_t nva_timesec[NFSTIME_COUNT]; - int32_t nva_timensec[NFSTIME_COUNT]; - uint32_t nva_bitmap[NFS_ATTR_BITMAP_LEN]; /* attributes that are valid */ + enum vtype nva_type; /* vnode type (for create) */ + uint32_t nva_mode; /* file's access mode (and type) */ + uid_t nva_uid; /* owner user id */ + gid_t nva_gid; /* owner group id */ + guid_t nva_uuuid; /* owner user UUID */ + guid_t nva_guuid; /* owner group UUID */ + kauth_acl_t nva_acl; /* access control list */ + nfs_specdata nva_rawdev; /* device the special file represents */ + uint32_t nva_flags; /* file flags (see below) */ + uint32_t nva_maxlink; /* maximum # of links (v4) */ + uint64_t nva_nlink; /* number of references to file */ + uint64_t nva_fileid; /* file id */ + nfs_fsid nva_fsid; /* file system id */ + uint64_t nva_size; /* file size in bytes */ + uint64_t nva_bytes; /* bytes of disk space held by file */ + uint64_t nva_change; /* change attribute */ + int64_t nva_timesec[NFSTIME_COUNT]; + int32_t nva_timensec[NFSTIME_COUNT]; + uint32_t nva_bitmap[NFS_ATTR_BITMAP_LEN]; /* attributes that are valid */ }; /* nva_flags */ -#define NFS_FFLAG_ARCHIVED 0x0001 -#define NFS_FFLAG_HIDDEN 0x0002 -#define NFS_FFLAG_HAS_NAMED_ATTRS 0x0004 /* file has named attributes */ -#define NFS_FFLAG_TRIGGER 0x0008 /* node is a trigger/mirror mount point */ -#define NFS_FFLAG_TRIGGER_REFERRAL 0x0010 /* trigger is a referral */ -#define NFS_FFLAG_IS_ATTR 0x8000 /* file is a named attribute file/directory */ +#define NFS_FFLAG_ARCHIVED 0x0001 +#define NFS_FFLAG_HIDDEN 0x0002 +#define NFS_FFLAG_HAS_NAMED_ATTRS 0x0004 /* file has named attributes */ +#define NFS_FFLAG_TRIGGER 0x0008 /* node is a trigger/mirror mount point */ +#define NFS_FFLAG_TRIGGER_REFERRAL 0x0010 /* trigger is a referral */ +#define NFS_FFLAG_IS_ATTR 0x8000 /* file is a named attribute file/directory */ /* flags for nfs_getattr() */ -#define NGA_CACHED 0x0001 /* use cached attributes (if still valid) */ -#define NGA_UNCACHED 0x0002 /* fetch new attributes */ -#define NGA_ACL 0x0004 /* fetch ACL */ -#define NGA_MONITOR 0x0008 /* vnode monitor attr update poll */ -#define NGA_SOFT 0x0010 /* use cached attributes if ETIMEOUT */ +#define NGA_CACHED 0x0001 /* use cached attributes (if still valid) */ +#define NGA_UNCACHED 0x0002 /* fetch new attributes */ +#define NGA_ACL 0x0004 /* fetch ACL */ +#define NGA_MONITOR 0x0008 /* vnode monitor attr update poll */ +#define NGA_SOFT 0x0010 /* use cached attributes if ETIMEOUT */ /* macros for initting/cleaning up nfs_vattr structures */ -#define NVATTR_INIT(NVAP) \ +#define NVATTR_INIT(NVAP) \ do { \ - NFS_CLEAR_ATTRIBUTES((NVAP)->nva_bitmap); \ - (NVAP)->nva_flags = 0; \ - (NVAP)->nva_acl = NULL; \ + NFS_CLEAR_ATTRIBUTES((NVAP)->nva_bitmap); \ + (NVAP)->nva_flags = 0; \ + (NVAP)->nva_acl = NULL; \ } while (0) -#define NVATTR_CLEANUP(NVAP) \ +#define NVATTR_CLEANUP(NVAP) \ do { \ - NFS_CLEAR_ATTRIBUTES((NVAP)->nva_bitmap); \ - if ((NVAP)->nva_acl) { \ - kauth_acl_free((NVAP)->nva_acl); \ - (NVAP)->nva_acl = NULL; \ - } \ + NFS_CLEAR_ATTRIBUTES((NVAP)->nva_bitmap); \ + if ((NVAP)->nva_acl) { \ + kauth_acl_free((NVAP)->nva_acl); \ + (NVAP)->nva_acl = NULL; \ + } \ } while (0) /* @@ -374,26 +375,26 @@ struct nfs_vattr { * For NFSv4, the change attribute is used. */ #define NFS_CHANGED(VERS, NP, NVAP) \ - (((VERS) >= NFS_VER4) ? \ - ((NP)->n_change != (NVAP)->nva_change) : \ - NFS_COMPARE_MTIME(&(NP)->n_mtime, (NVAP), !=)) + (((VERS) >= NFS_VER4) ? \ + ((NP)->n_change != (NVAP)->nva_change) : \ + NFS_COMPARE_MTIME(&(NP)->n_mtime, (NVAP), !=)) #define NFS_CHANGED_NC(VERS, NP, NVAP) \ - (((VERS) >= NFS_VER4) ? \ - ((NP)->n_ncchange != (NVAP)->nva_change) : \ - NFS_COMPARE_MTIME(&(NP)->n_ncmtime, (NVAP), !=)) + (((VERS) >= NFS_VER4) ? \ + ((NP)->n_ncchange != (NVAP)->nva_change) : \ + NFS_COMPARE_MTIME(&(NP)->n_ncmtime, (NVAP), !=)) #define NFS_CHANGED_UPDATE(VERS, NP, NVAP) \ do { \ - if ((VERS) >= NFS_VER4) \ - (NP)->n_change = (NVAP)->nva_change; \ - else \ - NFS_COPY_TIME(&(NP)->n_mtime, (NVAP), MODIFY); \ + if ((VERS) >= NFS_VER4) \ + (NP)->n_change = (NVAP)->nva_change; \ + else \ + NFS_COPY_TIME(&(NP)->n_mtime, (NVAP), MODIFY); \ } while (0) #define NFS_CHANGED_UPDATE_NC(VERS, NP, NVAP) \ do { \ - if ((VERS) >= NFS_VER4) \ - (NP)->n_ncchange = (NVAP)->nva_change; \ - else \ - NFS_COPY_TIME(&(NP)->n_ncmtime, (NVAP), MODIFY); \ + if ((VERS) >= NFS_VER4) \ + (NP)->n_ncchange = (NVAP)->nva_change; \ + else \ + NFS_COPY_TIME(&(NP)->n_ncmtime, (NVAP), MODIFY); \ } while (0) @@ -404,70 +405,70 @@ extern uint32_t nfs_open_owner_seqnum, nfs_lock_owner_seqnum; * NFSv4 open owner structure - one per cred per mount */ struct nfs_open_owner { - TAILQ_ENTRY(nfs_open_owner) noo_link; /* List of open owners (on mount) */ - lck_mtx_t noo_lock; /* owner mutex */ - struct nfsmount * noo_mount; /* NFS mount */ - uint32_t noo_refcnt; /* # outstanding references */ - uint32_t noo_flags; /* see below */ - kauth_cred_t noo_cred; /* credentials of open owner */ - uint32_t noo_name; /* unique name used otw */ - uint32_t noo_seqid; /* client-side sequence ID */ - TAILQ_HEAD(,nfs_open_file) noo_opens; /* list of open files */ + TAILQ_ENTRY(nfs_open_owner) noo_link; /* List of open owners (on mount) */ + lck_mtx_t noo_lock; /* owner mutex */ + struct nfsmount * noo_mount; /* NFS mount */ + os_refcnt_t noo_refcnt; /* # outstanding references */ + uint32_t noo_flags; /* see below */ + kauth_cred_t noo_cred; /* credentials of open owner */ + uint32_t noo_name; /* unique name used otw */ + uint32_t noo_seqid; /* client-side sequence ID */ + TAILQ_HEAD(, nfs_open_file) noo_opens; /* list of open files */ }; /* noo_flags */ -#define NFS_OPEN_OWNER_LINK 0x1 /* linked into mount's open owner list */ -#define NFS_OPEN_OWNER_BUSY 0x2 /* open state-modifying operation in progress */ -#define NFS_OPEN_OWNER_WANT 0x4 /* someone else wants to mark busy */ +#define NFS_OPEN_OWNER_LINK 0x1 /* linked into mount's open owner list */ +#define NFS_OPEN_OWNER_BUSY 0x2 /* open state-modifying operation in progress */ +#define NFS_OPEN_OWNER_WANT 0x4 /* someone else wants to mark busy */ /* * NFS open file structure - one per open owner per nfsnode */ struct nfs_open_file { - lck_mtx_t nof_lock; /* open file mutex */ - TAILQ_ENTRY(nfs_open_file) nof_link; /* list of open files */ - TAILQ_ENTRY(nfs_open_file) nof_oolink; /* list of open owner's open files */ - struct nfs_open_owner * nof_owner; /* open owner */ - nfsnode_t nof_np; /* nfsnode this open is for */ - nfs_stateid nof_stateid; /* open stateid */ - thread_t nof_creator; /* thread that created file */ - uint32_t nof_opencnt; /* open file count */ - uint16_t nof_flags; /* see below */ - uint8_t nof_access:4; /* access mode for this open */ - uint8_t nof_deny:4; /* deny mode for this open */ - uint8_t nof_mmap_access:4; /* mmap open access mode */ - uint8_t nof_mmap_deny:4; /* mmap open deny mode */ + lck_mtx_t nof_lock; /* open file mutex */ + TAILQ_ENTRY(nfs_open_file) nof_link; /* list of open files */ + TAILQ_ENTRY(nfs_open_file) nof_oolink; /* list of open owner's open files */ + struct nfs_open_owner * nof_owner; /* open owner */ + nfsnode_t nof_np; /* nfsnode this open is for */ + nfs_stateid nof_stateid; /* open stateid */ + thread_t nof_creator; /* thread that created file */ + uint32_t nof_opencnt; /* open file count */ + uint16_t nof_flags; /* see below */ + uint8_t nof_access:4; /* access mode for this open */ + uint8_t nof_deny:4; /* deny mode for this open */ + uint8_t nof_mmap_access:4; /* mmap open access mode */ + uint8_t nof_mmap_deny:4; /* mmap open deny mode */ /* counts of access/deny mode open combinations */ - uint32_t nof_r; /* read opens (deny none) */ - uint32_t nof_w; /* write opens (deny none) */ - uint32_t nof_rw; /* read/write opens (deny none) */ - uint32_t nof_r_dw; /* read deny-write opens */ + uint32_t nof_r; /* read opens (deny none) */ + uint32_t nof_w; /* write opens (deny none) */ + uint32_t nof_rw; /* read/write opens (deny none) */ + uint32_t nof_r_dw; /* read deny-write opens */ /* the rest of the counts have a max of 2 (1 for open + 1 for mmap) */ - uint32_t nof_w_dw:2; /* write deny-write opens (max 2) */ - uint32_t nof_rw_dw:2; /* read/write deny-write opens (max 2) */ - uint32_t nof_r_drw:2; /* read deny-read/write opens (max 2) */ - uint32_t nof_w_drw:2; /* write deny-read/write opens (max 2) */ - uint32_t nof_rw_drw:2; /* read/write deny-read/write opens (max 2) */ + uint32_t nof_w_dw:2; /* write deny-write opens (max 2) */ + uint32_t nof_rw_dw:2; /* read/write deny-write opens (max 2) */ + uint32_t nof_r_drw:2; /* read deny-read/write opens (max 2) */ + uint32_t nof_w_drw:2; /* write deny-read/write opens (max 2) */ + uint32_t nof_rw_drw:2; /* read/write deny-read/write opens (max 2) */ /* counts of DELEGATED access/deny mode open combinations */ - uint32_t nof_d_w_dw:2; /* write deny-write opens (max 2) */ - uint32_t nof_d_rw_dw:2; /* read/write deny-write opens (max 2) */ - uint32_t nof_d_r_drw:2; /* read deny-read/write opens (max 2) */ - uint32_t nof_d_w_drw:2; /* write deny-read/write opens (max 2) */ - uint32_t nof_d_rw_drw:2; /* read/write deny-read/write opens (max 2) */ - uint32_t nof_d_r; /* read opens (deny none) */ - uint32_t nof_d_w; /* write opens (deny none) */ - uint32_t nof_d_rw; /* read/write opens (deny none) */ - uint32_t nof_d_r_dw; /* read deny-write opens */ + uint32_t nof_d_w_dw:2; /* write deny-write opens (max 2) */ + uint32_t nof_d_rw_dw:2; /* read/write deny-write opens (max 2) */ + uint32_t nof_d_r_drw:2; /* read deny-read/write opens (max 2) */ + uint32_t nof_d_w_drw:2; /* write deny-read/write opens (max 2) */ + uint32_t nof_d_rw_drw:2; /* read/write deny-read/write opens (max 2) */ + uint32_t nof_d_r; /* read opens (deny none) */ + uint32_t nof_d_w; /* write opens (deny none) */ + uint32_t nof_d_rw; /* read/write opens (deny none) */ + uint32_t nof_d_r_dw; /* read deny-write opens */ }; /* nof_flags */ -#define NFS_OPEN_FILE_BUSY 0x0001 /* open state-modifying operation in progress */ -#define NFS_OPEN_FILE_WANT 0x0002 /* someone else wants to mark busy */ -#define NFS_OPEN_FILE_CREATE 0x0004 /* has an open(RW) from a "CREATE" call */ -#define NFS_OPEN_FILE_NEEDCLOSE 0x0008 /* has an open(R) from an (unopen) VNOP_READ or VNOP_MMAP call */ -#define NFS_OPEN_FILE_SETATTR 0x0020 /* has an open(W) to perform a SETATTR(size) */ -#define NFS_OPEN_FILE_POSIXLOCK 0x0040 /* server supports POSIX locking semantics */ -#define NFS_OPEN_FILE_LOST 0x0080 /* open state has been lost */ -#define NFS_OPEN_FILE_REOPEN 0x0100 /* file needs to be reopened */ -#define NFS_OPEN_FILE_REOPENING 0x0200 /* file is being reopened */ +#define NFS_OPEN_FILE_BUSY 0x0001 /* open state-modifying operation in progress */ +#define NFS_OPEN_FILE_WANT 0x0002 /* someone else wants to mark busy */ +#define NFS_OPEN_FILE_CREATE 0x0004 /* has an open(RW) from a "CREATE" call */ +#define NFS_OPEN_FILE_NEEDCLOSE 0x0008 /* has an open(R) from an (unopen) VNOP_READ or VNOP_MMAP call */ +#define NFS_OPEN_FILE_SETATTR 0x0020 /* has an open(W) to perform a SETATTR(size) */ +#define NFS_OPEN_FILE_POSIXLOCK 0x0040 /* server supports POSIX locking semantics */ +#define NFS_OPEN_FILE_LOST 0x0080 /* open state has been lost */ +#define NFS_OPEN_FILE_REOPEN 0x0100 /* file needs to be reopened */ +#define NFS_OPEN_FILE_REOPENING 0x0200 /* file is being reopened */ struct nfs_lock_owner; /* @@ -477,24 +478,24 @@ struct nfs_lock_owner; * nfs_file_lock structure representing its state. */ struct nfs_file_lock { - TAILQ_ENTRY(nfs_file_lock) nfl_link; /* List of locks on nfsnode */ - TAILQ_ENTRY(nfs_file_lock) nfl_lolink; /* List of locks held by locker */ - struct nfs_lock_owner * nfl_owner; /* lock owner that holds this lock */ - uint64_t nfl_start; /* starting offset */ - uint64_t nfl_end; /* ending offset (inclusive) */ - uint32_t nfl_blockcnt; /* # locks blocked on this lock */ - uint16_t nfl_flags; /* see below */ - uint8_t nfl_type; /* lock type: read/write */ + TAILQ_ENTRY(nfs_file_lock) nfl_link; /* List of locks on nfsnode */ + TAILQ_ENTRY(nfs_file_lock) nfl_lolink; /* List of locks held by locker */ + struct nfs_lock_owner * nfl_owner; /* lock owner that holds this lock */ + uint64_t nfl_start; /* starting offset */ + uint64_t nfl_end; /* ending offset (inclusive) */ + uint32_t nfl_blockcnt; /* # locks blocked on this lock */ + uint16_t nfl_flags; /* see below */ + uint8_t nfl_type; /* lock type: read/write */ }; /* nfl_flags */ -#define NFS_FILE_LOCK_ALLOC 0x01 /* lock was allocated */ -#define NFS_FILE_LOCK_STYLE_POSIX 0x02 /* POSIX-style fcntl() lock */ -#define NFS_FILE_LOCK_STYLE_FLOCK 0x04 /* flock(2)-style lock */ -#define NFS_FILE_LOCK_STYLE_MASK 0x06 /* lock style mask */ -#define NFS_FILE_LOCK_WAIT 0x08 /* may block on conflicting locks */ -#define NFS_FILE_LOCK_BLOCKED 0x10 /* request is blocked */ -#define NFS_FILE_LOCK_DEAD 0x20 /* lock (request) no longer exists */ -#define NFS_FILE_LOCK_DELEGATED 0x40 /* lock acquired via delegation */ +#define NFS_FILE_LOCK_ALLOC 0x01 /* lock was allocated */ +#define NFS_FILE_LOCK_STYLE_POSIX 0x02 /* POSIX-style fcntl() lock */ +#define NFS_FILE_LOCK_STYLE_FLOCK 0x04 /* flock(2)-style lock */ +#define NFS_FILE_LOCK_STYLE_MASK 0x06 /* lock style mask */ +#define NFS_FILE_LOCK_WAIT 0x08 /* may block on conflicting locks */ +#define NFS_FILE_LOCK_BLOCKED 0x10 /* request is blocked */ +#define NFS_FILE_LOCK_DEAD 0x20 /* lock (request) no longer exists */ +#define NFS_FILE_LOCK_DELEGATED 0x40 /* lock acquired via delegation */ TAILQ_HEAD(nfs_file_lock_queue, nfs_file_lock); @@ -503,8 +504,8 @@ TAILQ_HEAD(nfs_file_lock_queue, nfs_file_lock); * Note that struct flock has "to EOF" reported as 0 but * the NFSv4 protocol has "to EOF" reported as UINT64_MAX. */ -#define NFS_FLOCK_LENGTH(S, E) (((E) == UINT64_MAX) ? 0 : ((E) - (S) + 1)) -#define NFS_LOCK_LENGTH(S, E) (((E) == UINT64_MAX) ? UINT64_MAX : ((E) - (S) + 1)) +#define NFS_FLOCK_LENGTH(S, E) (((E) == UINT64_MAX) ? 0 : ((E) - (S) + 1)) +#define NFS_LOCK_LENGTH(S, E) (((E) == UINT64_MAX) ? UINT64_MAX : ((E) - (S) + 1)) /* * NFSv4 lock owner structure - per open owner per process per nfsnode @@ -517,24 +518,24 @@ TAILQ_HEAD(nfs_file_lock_queue, nfs_file_lock); * essentially treated like whole-file POSIX locks. */ struct nfs_lock_owner { - lck_mtx_t nlo_lock; /* owner mutex */ - TAILQ_ENTRY(nfs_lock_owner) nlo_link; /* List of lock owners (on nfsnode) */ - struct nfs_open_owner * nlo_open_owner; /* corresponding open owner */ - struct nfs_file_lock_queue nlo_locks; /* list of locks held */ - struct nfs_file_lock nlo_alock; /* most lockers will only ever have one */ - struct timeval nlo_pid_start; /* Start time of process id */ - pid_t nlo_pid; /* lock-owning process ID */ - uint32_t nlo_refcnt; /* # outstanding references */ - uint32_t nlo_flags; /* see below */ - uint32_t nlo_name; /* unique name used otw */ - uint32_t nlo_seqid; /* client-side sequence ID */ - uint32_t nlo_stategenid; /* mount state generation ID */ - nfs_stateid nlo_stateid; /* lock stateid */ + lck_mtx_t nlo_lock; /* owner mutex */ + TAILQ_ENTRY(nfs_lock_owner) nlo_link; /* List of lock owners (on nfsnode) */ + struct nfs_open_owner * nlo_open_owner; /* corresponding open owner */ + struct nfs_file_lock_queue nlo_locks; /* list of locks held */ + struct nfs_file_lock nlo_alock; /* most lockers will only ever have one */ + struct timeval nlo_pid_start; /* Start time of process id */ + pid_t nlo_pid; /* lock-owning process ID */ + os_refcnt_t nlo_refcnt; /* # outstanding references */ + uint32_t nlo_flags; /* see below */ + uint32_t nlo_name; /* unique name used otw */ + uint32_t nlo_seqid; /* client-side sequence ID */ + uint32_t nlo_stategenid; /* mount state generation ID */ + nfs_stateid nlo_stateid; /* lock stateid */ }; /* nlo_flags */ -#define NFS_LOCK_OWNER_LINK 0x1 /* linked into mount's lock owner list */ -#define NFS_LOCK_OWNER_BUSY 0x2 /* lock state-modifying operation in progress */ -#define NFS_LOCK_OWNER_WANT 0x4 /* someone else wants to mark busy */ +#define NFS_LOCK_OWNER_LINK 0x1 /* linked into mount's lock owner list */ +#define NFS_LOCK_OWNER_BUSY 0x2 /* lock state-modifying operation in progress */ +#define NFS_LOCK_OWNER_WANT 0x4 /* someone else wants to mark busy */ /* * The nfsnode is the NFS equivalent of an inode. @@ -544,222 +545,222 @@ struct nfs_lock_owner { * be well aligned and, therefore, tightly packed. */ -#define NFS_ACCESS_CACHE_SIZE 3 +#define NFS_ACCESS_CACHE_SIZE 3 struct nfsnode { - lck_mtx_t n_lock; /* nfs node lock */ - lck_rw_t n_datalock; /* nfs node data lock */ - void *n_datalockowner;/* nfs node data lock owner (exclusive) */ - LIST_ENTRY(nfsnode) n_hash; /* Hash chain */ - LIST_ENTRY(nfsnode) n_monlink; /* list of monitored nodes */ - u_quad_t n_size; /* Current size of file */ - u_quad_t n_newsize; /* new size of file (pending update) */ - u_int64_t n_xid; /* last xid to loadattr */ - struct nfs_vattr n_vattr; /* Vnode attribute cache */ - time_t n_attrstamp; /* Attr. cache timestamp */ - time_t n_aclstamp; /* ACL cache timestamp */ - time_t n_evtstamp; /* last vnode event timestamp */ - uint32_t n_events; /* pending vnode events */ - u_int8_t n_access[NFS_ACCESS_CACHE_SIZE+1]; /* ACCESS cache */ - uid_t n_accessuid[NFS_ACCESS_CACHE_SIZE]; /* credentials having access */ - time_t n_accessstamp[NFS_ACCESS_CACHE_SIZE]; /* access cache timestamp */ + lck_mtx_t n_lock; /* nfs node lock */ + lck_rw_t n_datalock; /* nfs node data lock */ + void *n_datalockowner;/* nfs node data lock owner (exclusive) */ + LIST_ENTRY(nfsnode) n_hash; /* Hash chain */ + LIST_ENTRY(nfsnode) n_monlink; /* list of monitored nodes */ + u_quad_t n_size; /* Current size of file */ + u_quad_t n_newsize; /* new size of file (pending update) */ + u_int64_t n_xid; /* last xid to loadattr */ + struct nfs_vattr n_vattr; /* Vnode attribute cache */ + time_t n_attrstamp; /* Attr. cache timestamp */ + time_t n_aclstamp; /* ACL cache timestamp */ + time_t n_evtstamp; /* last vnode event timestamp */ + uint32_t n_events; /* pending vnode events */ + u_int8_t n_access[NFS_ACCESS_CACHE_SIZE + 1]; /* ACCESS cache */ + uid_t n_accessuid[NFS_ACCESS_CACHE_SIZE]; /* credentials having access */ + time_t n_accessstamp[NFS_ACCESS_CACHE_SIZE]; /* access cache timestamp */ union { - struct { - struct timespec n3_mtime; /* Prev modify time. */ - struct timespec n3_ncmtime; /* namecache modify time. */ - } v3; - struct { - uint64_t n4_change; /* prev change attribute */ - uint64_t n4_ncchange; /* namecache change attribute */ - u_char *n4_attrdirfh; /* associated attr directory fh */ - struct timeval n4_lastio; /* time of most recent I/O on attr */ - } v4; + struct { + struct timespec n3_mtime; /* Prev modify time. */ + struct timespec n3_ncmtime; /* namecache modify time. */ + } v3; + struct { + uint64_t n4_change;/* prev change attribute */ + uint64_t n4_ncchange;/* namecache change attribute */ + u_char *n4_attrdirfh;/* associated attr directory fh */ + struct timeval n4_lastio;/* time of most recent I/O on attr */ + } v4; } n_un4; - vnode_t n_parent; /* this node's parent */ - u_char *n_fhp; /* NFS File Handle */ - vnode_t n_vnode; /* associated vnode */ - mount_t n_mount; /* associated mount (NHINIT) */ - int n_error; /* Save write error value */ + vnode_t n_parent; /* this node's parent */ + u_char *n_fhp; /* NFS File Handle */ + vnode_t n_vnode; /* associated vnode */ + mount_t n_mount; /* associated mount (NHINIT) */ + int n_error; /* Save write error value */ union { - struct timespec ns_atim; /* Special file times */ - struct timespec nl_rltim; /* Time of last readlink */ - daddr64_t nf_lastread; /* last block# read from (for readahead) */ - uint64_t nd_cookieverf; /* Cookie verifier (dir only) */ + struct timespec ns_atim; /* Special file times */ + struct timespec nl_rltim; /* Time of last readlink */ + daddr64_t nf_lastread; /* last block# read from (for readahead) */ + uint64_t nd_cookieverf; /* Cookie verifier (dir only) */ } n_un1; union { - struct timespec ns_mtim; /* Special file times */ - daddr64_t nf_lastrahead; /* last block# read ahead */ - uint64_t nd_eofcookie; /* Dir. EOF cookie cache */ + struct timespec ns_mtim; /* Special file times */ + daddr64_t nf_lastrahead; /* last block# read ahead */ + uint64_t nd_eofcookie; /* Dir. EOF cookie cache */ } n_un2; union { struct nfs_sillyrename *nf_silly;/* Ptr to silly rename struct */ struct nfsdmap *nd_cookiecache; /* dir cookie cache */ } n_un3; - uint32_t n_flag; /* node flags */ - u_short n_fhsize; /* size in bytes, of fh */ - u_short n_hflag; /* node hash flags */ - u_short n_bflag; /* node buffer flags */ - u_short n_mflag; /* node mount flags */ - u_char n_fh[NFS_SMALLFH];/* Small File Handle */ - uint32_t n_auth; /* security flavor used for this node */ - struct nfsbuflists n_cleanblkhd; /* clean blocklist head */ - struct nfsbuflists n_dirtyblkhd; /* dirty blocklist head */ + uint32_t n_flag; /* node flags */ + u_short n_fhsize; /* size in bytes, of fh */ + u_short n_hflag; /* node hash flags */ + u_short n_bflag; /* node buffer flags */ + u_short n_mflag; /* node mount flags */ + u_char n_fh[NFS_SMALLFH];/* Small File Handle */ + uint32_t n_auth; /* security flavor used for this node */ + struct nfsbuflists n_cleanblkhd; /* clean blocklist head */ + struct nfsbuflists n_dirtyblkhd; /* dirty blocklist head */ union { - int nf_wrbusy; /* # threads in write/fsync */ - uint32_t nd_ncgen; /* dir name cache generation# */ + int nf_wrbusy; /* # threads in write/fsync */ + uint32_t nd_ncgen; /* dir name cache generation# */ } n_un5; union { - int nf_needcommitcnt;/* # bufs that need committing */ - daddr64_t nd_lastdbl; /* last dir buf lookup block# */ + int nf_needcommitcnt;/* # bufs that need committing */ + daddr64_t nd_lastdbl; /* last dir buf lookup block# */ } n_un6; - int n_bufiterflags; /* buf iterator flags */ + int n_bufiterflags; /* buf iterator flags */ union { - int nf_numoutput; /* write I/Os in progress */ - int nd_trigseq; /* vnode trigger seq# */ + int nf_numoutput; /* write I/Os in progress */ + int nd_trigseq; /* vnode trigger seq# */ } n_un7; /* open state */ - lck_mtx_t n_openlock; /* nfs node open lock */ - uint32_t n_openflags; /* open state flags */ - uint32_t n_openrefcnt; /* # non-file opens */ - TAILQ_HEAD(,nfs_open_file) n_opens; /* list of open files */ + lck_mtx_t n_openlock; /* nfs node open lock */ + uint32_t n_openflags; /* open state flags */ + uint32_t n_openrefcnt; /* # non-file opens */ + TAILQ_HEAD(, nfs_open_file) n_opens; /* list of open files */ /* lock state */ TAILQ_HEAD(, nfs_lock_owner) n_lock_owners; /* list of lock owners */ - struct nfs_file_lock_queue n_locks; /* list of locks */ + struct nfs_file_lock_queue n_locks; /* list of locks */ /* delegation state */ - nfs_stateid n_dstateid; /* delegation stateid */ - TAILQ_ENTRY(nfsnode) n_dlink; /* delegation list link */ - TAILQ_ENTRY(nfsnode) n_dreturn; /* delegation return list link */ - struct kauth_ace n_dace; /* delegation ACE */ + nfs_stateid n_dstateid; /* delegation stateid */ + TAILQ_ENTRY(nfsnode) n_dlink; /* delegation list link */ + TAILQ_ENTRY(nfsnode) n_dreturn; /* delegation return list link */ + struct kauth_ace n_dace; /* delegation ACE */ }; -#define NFS_DATA_LOCK_SHARED 1 -#define NFS_DATA_LOCK_EXCLUSIVE 2 +#define NFS_DATA_LOCK_SHARED 1 +#define NFS_DATA_LOCK_EXCLUSIVE 2 -#define nfstimespeccmp(tvp, uvp, cmp) \ - (((tvp)->tv_sec == (uvp)->tv_sec) ? \ - ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ +#define nfstimespeccmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) #define CHECK_NEEDCOMMITCNT(np) \ do { \ - if ((np)->n_needcommitcnt < 0) { \ - printf("nfs: n_needcommitcnt negative\n"); \ - (np)->n_needcommitcnt = 0; \ - } \ + if ((np)->n_needcommitcnt < 0) { \ + printf("nfs: n_needcommitcnt negative\n"); \ + (np)->n_needcommitcnt = 0; \ + } \ } while (0) -#define n_atim n_un1.ns_atim -#define n_mtim n_un2.ns_mtim -#define n_rltim n_un1.nl_rltim -#define n_lastread n_un1.nf_lastread -#define n_lastrahead n_un2.nf_lastrahead -#define n_sillyrename n_un3.nf_silly -#define n_wrbusy n_un5.nf_wrbusy -#define n_needcommitcnt n_un6.nf_needcommitcnt -#define n_numoutput n_un7.nf_numoutput -#define n_cookieverf n_un1.nd_cookieverf -#define n_eofcookie n_un2.nd_eofcookie -#define n_cookiecache n_un3.nd_cookiecache -#define n_ncgen n_un5.nd_ncgen -#define n_lastdbl n_un6.nd_lastdbl -#define n_trigseq n_un7.nd_trigseq -#define n_mtime n_un4.v3.n3_mtime -#define n_ncmtime n_un4.v3.n3_ncmtime -#define n_change n_un4.v4.n4_change -#define n_ncchange n_un4.v4.n4_ncchange -#define n_attrdirfh n_un4.v4.n4_attrdirfh -#define n_lastio n_un4.v4.n4_lastio +#define n_atim n_un1.ns_atim +#define n_mtim n_un2.ns_mtim +#define n_rltim n_un1.nl_rltim +#define n_lastread n_un1.nf_lastread +#define n_lastrahead n_un2.nf_lastrahead +#define n_sillyrename n_un3.nf_silly +#define n_wrbusy n_un5.nf_wrbusy +#define n_needcommitcnt n_un6.nf_needcommitcnt +#define n_numoutput n_un7.nf_numoutput +#define n_cookieverf n_un1.nd_cookieverf +#define n_eofcookie n_un2.nd_eofcookie +#define n_cookiecache n_un3.nd_cookiecache +#define n_ncgen n_un5.nd_ncgen +#define n_lastdbl n_un6.nd_lastdbl +#define n_trigseq n_un7.nd_trigseq +#define n_mtime n_un4.v3.n3_mtime +#define n_ncmtime n_un4.v3.n3_ncmtime +#define n_change n_un4.v4.n4_change +#define n_ncchange n_un4.v4.n4_ncchange +#define n_attrdirfh n_un4.v4.n4_attrdirfh +#define n_lastio n_un4.v4.n4_lastio /* * Flags for n_flag */ -#define NUPDATESIZE 0x00001 /* size of file needs updating */ -#define NREVOKE 0x00002 /* node revoked */ -#define NMODIFIED 0x00004 /* Might have a modified buffer in bio */ -#define NWRITEERR 0x00008 /* Flag write errors so close will know */ -#define NNEEDINVALIDATE 0x00010 /* need to call vinvalbuf() */ -#define NGETATTRINPROG 0x00020 /* GETATTR RPC in progress */ -#define NGETATTRWANT 0x00040 /* waiting for GETATTR RPC */ -#define NACC 0x00100 /* Special file accessed */ -#define NUPD 0x00200 /* Special file updated */ -#define NCHG 0x00400 /* Special file times changed */ -#define NNEGNCENTRIES 0x00800 /* directory has negative name cache entries */ -#define NBUSY 0x01000 /* node is busy */ -#define NBUSYWANT 0x02000 /* waiting on busy node */ -#define NISDOTZFS 0x04000 /* a ".zfs" directory */ -#define NISDOTZFSCHILD 0x08000 /* a child of a ".zfs" directory */ -#define NISMAPPED 0x10000 /* node is mmapped */ -#define NREFRESH 0x20000 /* node's fh needs to be refreshed */ -#define NREFRESHWANT 0x40000 /* Waiting for fh to be refreshed */ +#define NUPDATESIZE 0x00001 /* size of file needs updating */ +#define NREVOKE 0x00002 /* node revoked */ +#define NMODIFIED 0x00004 /* Might have a modified buffer in bio */ +#define NWRITEERR 0x00008 /* Flag write errors so close will know */ +#define NNEEDINVALIDATE 0x00010 /* need to call vinvalbuf() */ +#define NGETATTRINPROG 0x00020 /* GETATTR RPC in progress */ +#define NGETATTRWANT 0x00040 /* waiting for GETATTR RPC */ +#define NACC 0x00100 /* Special file accessed */ +#define NUPD 0x00200 /* Special file updated */ +#define NCHG 0x00400 /* Special file times changed */ +#define NNEGNCENTRIES 0x00800 /* directory has negative name cache entries */ +#define NBUSY 0x01000 /* node is busy */ +#define NBUSYWANT 0x02000 /* waiting on busy node */ +#define NISDOTZFS 0x04000 /* a ".zfs" directory */ +#define NISDOTZFSCHILD 0x08000 /* a child of a ".zfs" directory */ +#define NISMAPPED 0x10000 /* node is mmapped */ +#define NREFRESH 0x20000 /* node's fh needs to be refreshed */ +#define NREFRESHWANT 0x40000 /* Waiting for fh to be refreshed */ /* * Flags for n_hflag * Note: protected by nfs_node_hash_mutex */ -#define NHHASHED 0x0001 /* node is in hash table */ -#define NHINIT 0x0002 /* node is being initialized */ -#define NHLOCKED 0x0004 /* node is locked (initting or deleting) */ -#define NHLOCKWANT 0x0008 /* someone wants to lock */ +#define NHHASHED 0x0001 /* node is in hash table */ +#define NHINIT 0x0002 /* node is being initialized */ +#define NHLOCKED 0x0004 /* node is locked (initting or deleting) */ +#define NHLOCKWANT 0x0008 /* someone wants to lock */ /* * Flags for n_bflag * Note: protected by nfs_buf_mutex */ -#define NBFLUSHINPROG 0x0001 /* Avoid multiple calls to nfs_flush() */ -#define NBFLUSHWANT 0x0002 /* waiting for nfs_flush() to complete */ -#define NBINVALINPROG 0x0004 /* Avoid multiple calls to nfs_vinvalbuf() */ -#define NBINVALWANT 0x0008 /* waiting for nfs_vinvalbuf() to complete */ +#define NBFLUSHINPROG 0x0001 /* Avoid multiple calls to nfs_flush() */ +#define NBFLUSHWANT 0x0002 /* waiting for nfs_flush() to complete */ +#define NBINVALINPROG 0x0004 /* Avoid multiple calls to nfs_vinvalbuf() */ +#define NBINVALWANT 0x0008 /* waiting for nfs_vinvalbuf() to complete */ /* * Flags for n_mflag * Note: protected by nfsmount's nm_lock */ -#define NMMONSCANINPROG 0x0001 /* monitored node is currently updating attributes */ -#define NMMONSCANWANT 0x0002 /* waiting for attribute update to complete */ +#define NMMONSCANINPROG 0x0001 /* monitored node is currently updating attributes */ +#define NMMONSCANWANT 0x0002 /* waiting for attribute update to complete */ /* * n_openflags * Note: protected by n_openlock */ -#define N_OPENBUSY 0x0001 /* open state is busy - being updated */ -#define N_OPENWANT 0x0002 /* someone wants to mark busy */ -#define N_DELEG_READ 0x0004 /* we have a read delegation */ -#define N_DELEG_WRITE 0x0008 /* we have a write delegation */ -#define N_DELEG_MASK 0x000c /* delegation mask */ -#define N_DELEG_RETURN 0x0010 /* delegation queued for return */ -#define N_DELEG_RETURNING 0x0020 /* delegation being returned */ +#define N_OPENBUSY 0x0001 /* open state is busy - being updated */ +#define N_OPENWANT 0x0002 /* someone wants to mark busy */ +#define N_DELEG_READ 0x0004 /* we have a read delegation */ +#define N_DELEG_WRITE 0x0008 /* we have a write delegation */ +#define N_DELEG_MASK 0x000c /* delegation mask */ +#define N_DELEG_RETURN 0x0010 /* delegation queued for return */ +#define N_DELEG_RETURNING 0x0020 /* delegation being returned */ /* attr/access/ACL cache timestamp macros */ -#define NATTRVALID(np) ((np)->n_attrstamp != ~0) -#define NATTRINVALIDATE(np) ((np)->n_attrstamp = ~0) -#define NACCESSVALID(np, slot) (((slot) >= 0) && ((slot) < NFS_ACCESS_CACHE_SIZE) && ((np)->n_accessstamp[(slot)] != ~0)) +#define NATTRVALID(np) ((np)->n_attrstamp != ~0) +#define NATTRINVALIDATE(np) ((np)->n_attrstamp = ~0) +#define NACCESSVALID(np, slot) (((slot) >= 0) && ((slot) < NFS_ACCESS_CACHE_SIZE) && ((np)->n_accessstamp[(slot)] != ~0)) #define NACCESSINVALIDATE(np) \ do { \ - int __i; \ - for (__i=0; __i < NFS_ACCESS_CACHE_SIZE; __i++) \ - (np)->n_accessstamp[__i] = ~0; \ - (np)->n_access[NFS_ACCESS_CACHE_SIZE] = 0; \ + int __i; \ + for (__i=0; __i < NFS_ACCESS_CACHE_SIZE; __i++) \ + (np)->n_accessstamp[__i] = ~0; \ + (np)->n_access[NFS_ACCESS_CACHE_SIZE] = 0; \ } while (0) -#define NACLVALID(np) ((np)->n_aclstamp != ~0) -#define NACLINVALIDATE(np) ((np)->n_aclstamp = ~0) +#define NACLVALID(np) ((np)->n_aclstamp != ~0) +#define NACLINVALIDATE(np) ((np)->n_aclstamp = ~0) /* * NFS-specific flags for nfs_vinvalbuf/nfs_flush */ -#define V_IGNORE_WRITEERR 0x8000 +#define V_IGNORE_WRITEERR 0x8000 /* * Flags for nfs_nget() */ -#define NG_MARKROOT 0x0001 /* mark vnode as root of FS */ -#define NG_MAKEENTRY 0x0002 /* add name cache entry for vnode */ -#define NG_NOCREATE 0x0004 /* don't create a new node, return existing one */ +#define NG_MARKROOT 0x0001 /* mark vnode as root of FS */ +#define NG_MAKEENTRY 0x0002 /* add name cache entry for vnode */ +#define NG_NOCREATE 0x0004 /* don't create a new node, return existing one */ /* * Convert between nfsnode pointers and vnode pointers */ -#define VTONFS(vp) ((nfsnode_t)vnode_fsnode(vp)) -#define NFSTOV(np) ((np)->n_vnode) +#define VTONFS(vp) ((nfsnode_t)vnode_fsnode(vp)) +#define NFSTOV(np) ((np)->n_vnode) /* nfsnode hash table mutex */ extern lck_mtx_t *nfs_node_hash_mutex; @@ -769,17 +770,17 @@ extern lck_mtx_t *nfs_node_hash_mutex; */ #define NP(NP, FMT, ...) \ do { \ - const char *__vname = (NP) ? vnode_getname(NFSTOV(NP)) : NULL; \ - printf(FMT " %s\n", ##__VA_ARGS__, __vname ? __vname : "???"); \ - if (__vname) vnode_putname(__vname); \ + const char *__vname = (NP) ? vnode_getname(NFSTOV(NP)) : NULL; \ + printf(FMT " %s\n", ##__VA_ARGS__, __vname ? __vname : "???"); \ + if (__vname) vnode_putname(__vname); \ } while (0) /* * nfsiod structures */ struct nfsiod { - TAILQ_ENTRY(nfsiod) niod_link; /* List of nfsiods */ - struct nfsmount * niod_nmp; /* mount point for this nfsiod */ + TAILQ_ENTRY(nfsiod) niod_link; /* List of nfsiods */ + struct nfsmount * niod_nmp; /* mount point for this nfsiod */ }; TAILQ_HEAD(nfsiodlist, nfsiod); TAILQ_HEAD(nfsiodmountlist, nfsmount); @@ -790,19 +791,19 @@ extern lck_mtx_t *nfsiod_mutex; #if defined(KERNEL) typedef int vnop_t(void *); -extern vnop_t **fifo_nfsv2nodeop_p; -extern vnop_t **nfsv2_vnodeop_p; -extern vnop_t **spec_nfsv2nodeop_p; -extern vnop_t **fifo_nfsv4nodeop_p; -extern vnop_t **nfsv4_vnodeop_p; -extern vnop_t **spec_nfsv4nodeop_p; +extern vnop_t **fifo_nfsv2nodeop_p; +extern vnop_t **nfsv2_vnodeop_p; +extern vnop_t **spec_nfsv2nodeop_p; +extern vnop_t **fifo_nfsv4nodeop_p; +extern vnop_t **nfsv4_vnodeop_p; +extern vnop_t **spec_nfsv4nodeop_p; /* * Prototypes for NFS vnode operations */ #define nfs_vnop_revoke nop_revoke -int nfs_vnop_inactive(struct vnop_inactive_args *); -int nfs_vnop_reclaim(struct vnop_reclaim_args *); +int nfs_vnop_inactive(struct vnop_inactive_args *); +int nfs_vnop_reclaim(struct vnop_reclaim_args *); int nfs_node_lock(nfsnode_t); int nfs_node_lock_internal(nfsnode_t, int); @@ -826,7 +827,7 @@ void nfs_data_update_size(nfsnode_t, int); /* other stuff */ int nfs_removeit(struct nfs_sillyrename *); -int nfs_nget(mount_t,nfsnode_t,struct componentname *,u_char *,int,struct nfs_vattr *,u_int64_t *,uint32_t,int,nfsnode_t*); +int nfs_nget(mount_t, nfsnode_t, struct componentname *, u_char *, int, struct nfs_vattr *, u_int64_t *, uint32_t, int, nfsnode_t*); int nfs_mount_is_dirty(mount_t); void nfs_dir_cookie_cache(nfsnode_t, uint64_t, uint64_t); int nfs_dir_cookie_to_lbn(nfsnode_t, uint64_t, int *, uint64_t *); diff --git a/bsd/nfs/nfsproto.h b/bsd/nfs/nfsproto.h index f081170df..b45f35145 100644 --- a/bsd/nfs/nfsproto.h +++ b/bsd/nfs/nfsproto.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -81,255 +81,255 @@ /* Only define these if nfs_prot.h hasn't been included */ #ifndef NFS_PROGRAM -#define NFS_PORT 2049 -#define NFS_PROG 100003 -#define NFS_VER2 2 -#define NFS_VER3 3 -#define NFS_VER4 4 -#define NFS_V2MAXDATA 8192 -#define NFS_MAXDGRAMDATA 16384 -#define NFS_PREFDGRAMDATA 8192 -#define NFS_MAXDATA (64*1024) // XXX not ready for >64K -#define NFSRV_MAXDATA (64*1024) // XXX not ready for >64K -#define NFS_MAXPATHLEN 1024 -#define NFS_MAXNAMLEN 255 -#define NFS_MAXPACKET (16*1024*1024) -#define NFS_UDPSOCKBUF (224*1024) -#define NFS_FABLKSIZE 512 /* Size in bytes of a block wrt fa_blocks */ - -#define NFS4_CALLBACK_PROG 0x4E465343 /* "NFSC" */ -#define NFS4_CALLBACK_PROG_VERSION 1 +#define NFS_PORT 2049 +#define NFS_PROG 100003 +#define NFS_VER2 2 +#define NFS_VER3 3 +#define NFS_VER4 4 +#define NFS_V2MAXDATA 8192 +#define NFS_MAXDGRAMDATA 16384 +#define NFS_PREFDGRAMDATA 8192 +#define NFS_MAXDATA (64*1024) // XXX not ready for >64K +#define NFSRV_MAXDATA (64*1024) // XXX not ready for >64K +#define NFS_MAXPATHLEN 1024 +#define NFS_MAXNAMLEN 255 +#define NFS_MAXPACKET (16*1024*1024) +#define NFS_UDPSOCKBUF (224*1024) +#define NFS_FABLKSIZE 512 /* Size in bytes of a block wrt fa_blocks */ + +#define NFS4_CALLBACK_PROG 0x4E465343 /* "NFSC" */ +#define NFS4_CALLBACK_PROG_VERSION 1 /* Stat numbers for NFS RPC returns */ -#define NFS_OK 0 -#define NFSERR_PERM 1 -#define NFSERR_NOENT 2 -#define NFSERR_IO 5 -#define NFSERR_NXIO 6 -#define NFSERR_ACCES 13 -#define NFSERR_EXIST 17 -#define NFSERR_XDEV 18 /* Version 3 only */ -#define NFSERR_NODEV 19 -#define NFSERR_NOTDIR 20 -#define NFSERR_ISDIR 21 -#define NFSERR_INVAL 22 /* Version 3 only */ -#define NFSERR_FBIG 27 -#define NFSERR_NOSPC 28 -#define NFSERR_ROFS 30 -#define NFSERR_MLINK 31 /* Version 3 only */ -#define NFSERR_NAMETOL 63 -#define NFSERR_NOTEMPTY 66 -#define NFSERR_DQUOT 69 -#define NFSERR_STALE 70 -#define NFSERR_REMOTE 71 /* Version 3 only */ -#define NFSERR_WFLUSH 99 /* Version 2 only */ -#define NFSERR_BADHANDLE 10001 /* The rest Version 3 only */ -#define NFSERR_NOT_SYNC 10002 -#define NFSERR_BAD_COOKIE 10003 -#define NFSERR_NOTSUPP 10004 -#define NFSERR_TOOSMALL 10005 -#define NFSERR_SERVERFAULT 10006 -#define NFSERR_BADTYPE 10007 -#define NFSERR_JUKEBOX 10008 -#define NFSERR_TRYLATER NFSERR_JUKEBOX -#define NFSERR_DELAY NFSERR_JUKEBOX -#define NFSERR_SAME 10009 /* The rest Version 4 only */ -#define NFSERR_DENIED 10010 -#define NFSERR_EXPIRED 10011 -#define NFSERR_LOCKED 10012 -#define NFSERR_GRACE 10013 -#define NFSERR_FHEXPIRED 10014 -#define NFSERR_SHARE_DENIED 10015 -#define NFSERR_WRONGSEC 10016 -#define NFSERR_CLID_INUSE 10017 -#define NFSERR_RESOURCE 10018 -#define NFSERR_MOVED 10019 -#define NFSERR_NOFILEHANDLE 10020 -#define NFSERR_MINOR_VERS_MISMATCH 10021 -#define NFSERR_STALE_CLIENTID 10022 -#define NFSERR_STALE_STATEID 10023 -#define NFSERR_OLD_STATEID 10024 -#define NFSERR_BAD_STATEID 10025 -#define NFSERR_BAD_SEQID 10026 -#define NFSERR_NOT_SAME 10027 -#define NFSERR_LOCK_RANGE 10028 -#define NFSERR_SYMLINK 10029 -#define NFSERR_RESTOREFH 10030 -#define NFSERR_LEASE_MOVED 10031 -#define NFSERR_ATTRNOTSUPP 10032 -#define NFSERR_NO_GRACE 10033 -#define NFSERR_RECLAIM_BAD 10034 -#define NFSERR_RECLAIM_CONFLICT 10035 -#define NFSERR_BADXDR 10036 -#define NFSERR_LOCKS_HELD 10037 -#define NFSERR_OPENMODE 10038 -#define NFSERR_BADOWNER 10039 -#define NFSERR_BADCHAR 10040 -#define NFSERR_BADNAME 10041 -#define NFSERR_BAD_RANGE 10042 -#define NFSERR_LOCK_NOTSUPP 10043 -#define NFSERR_OP_ILLEGAL 10044 -#define NFSERR_DEADLOCK 10045 -#define NFSERR_FILE_OPEN 10046 -#define NFSERR_ADMIN_REVOKED 10047 -#define NFSERR_CB_PATH_DOWN 10048 - -#define NFSERR_STALEWRITEVERF 30001 /* Fake return for nfs_commit() */ -#define NFSERR_DIRBUFDROPPED 30002 /* Fake return for nfs*_readdir_rpc() */ +#define NFS_OK 0 +#define NFSERR_PERM 1 +#define NFSERR_NOENT 2 +#define NFSERR_IO 5 +#define NFSERR_NXIO 6 +#define NFSERR_ACCES 13 +#define NFSERR_EXIST 17 +#define NFSERR_XDEV 18 /* Version 3 only */ +#define NFSERR_NODEV 19 +#define NFSERR_NOTDIR 20 +#define NFSERR_ISDIR 21 +#define NFSERR_INVAL 22 /* Version 3 only */ +#define NFSERR_FBIG 27 +#define NFSERR_NOSPC 28 +#define NFSERR_ROFS 30 +#define NFSERR_MLINK 31 /* Version 3 only */ +#define NFSERR_NAMETOL 63 +#define NFSERR_NOTEMPTY 66 +#define NFSERR_DQUOT 69 +#define NFSERR_STALE 70 +#define NFSERR_REMOTE 71 /* Version 3 only */ +#define NFSERR_WFLUSH 99 /* Version 2 only */ +#define NFSERR_BADHANDLE 10001 /* The rest Version 3 only */ +#define NFSERR_NOT_SYNC 10002 +#define NFSERR_BAD_COOKIE 10003 +#define NFSERR_NOTSUPP 10004 +#define NFSERR_TOOSMALL 10005 +#define NFSERR_SERVERFAULT 10006 +#define NFSERR_BADTYPE 10007 +#define NFSERR_JUKEBOX 10008 +#define NFSERR_TRYLATER NFSERR_JUKEBOX +#define NFSERR_DELAY NFSERR_JUKEBOX +#define NFSERR_SAME 10009 /* The rest Version 4 only */ +#define NFSERR_DENIED 10010 +#define NFSERR_EXPIRED 10011 +#define NFSERR_LOCKED 10012 +#define NFSERR_GRACE 10013 +#define NFSERR_FHEXPIRED 10014 +#define NFSERR_SHARE_DENIED 10015 +#define NFSERR_WRONGSEC 10016 +#define NFSERR_CLID_INUSE 10017 +#define NFSERR_RESOURCE 10018 +#define NFSERR_MOVED 10019 +#define NFSERR_NOFILEHANDLE 10020 +#define NFSERR_MINOR_VERS_MISMATCH 10021 +#define NFSERR_STALE_CLIENTID 10022 +#define NFSERR_STALE_STATEID 10023 +#define NFSERR_OLD_STATEID 10024 +#define NFSERR_BAD_STATEID 10025 +#define NFSERR_BAD_SEQID 10026 +#define NFSERR_NOT_SAME 10027 +#define NFSERR_LOCK_RANGE 10028 +#define NFSERR_SYMLINK 10029 +#define NFSERR_RESTOREFH 10030 +#define NFSERR_LEASE_MOVED 10031 +#define NFSERR_ATTRNOTSUPP 10032 +#define NFSERR_NO_GRACE 10033 +#define NFSERR_RECLAIM_BAD 10034 +#define NFSERR_RECLAIM_CONFLICT 10035 +#define NFSERR_BADXDR 10036 +#define NFSERR_LOCKS_HELD 10037 +#define NFSERR_OPENMODE 10038 +#define NFSERR_BADOWNER 10039 +#define NFSERR_BADCHAR 10040 +#define NFSERR_BADNAME 10041 +#define NFSERR_BAD_RANGE 10042 +#define NFSERR_LOCK_NOTSUPP 10043 +#define NFSERR_OP_ILLEGAL 10044 +#define NFSERR_DEADLOCK 10045 +#define NFSERR_FILE_OPEN 10046 +#define NFSERR_ADMIN_REVOKED 10047 +#define NFSERR_CB_PATH_DOWN 10048 + +#define NFSERR_STALEWRITEVERF 30001 /* Fake return for nfs_commit() */ +#define NFSERR_DIRBUFDROPPED 30002 /* Fake return for nfs*_readdir_rpc() */ /* * For gss we would like to return EAUTH when we don't have or can't get credentials, * but some callers don't know what to do with it, so we define our own version * of EAUTH to be EACCES */ -#define NFSERR_EAUTH EACCES +#define NFSERR_EAUTH EACCES -#define NFSERR_RETVOID 0x20000000 /* Return void, not error */ -#define NFSERR_AUTHERR 0x40000000 /* Mark an authentication error */ -#define NFSERR_RETERR 0x80000000 /* Mark an error return for V3 */ +#define NFSERR_RETVOID 0x20000000 /* Return void, not error */ +#define NFSERR_AUTHERR 0x40000000 /* Mark an authentication error */ +#define NFSERR_RETERR 0x80000000 /* Mark an error return for V3 */ #endif /* !NFS_PROGRAM */ /* Sizes in bytes of various nfs rpc components */ -#define NFSX_UNSIGNED 4 +#define NFSX_UNSIGNED 4 /* specific to NFS Version 2 */ -#define NFSX_V2FH 32 -#define NFSX_V2FATTR 68 -#define NFSX_V2SATTR 32 -#define NFSX_V2COOKIE 4 -#define NFSX_V2STATFS 20 +#define NFSX_V2FH 32 +#define NFSX_V2FATTR 68 +#define NFSX_V2SATTR 32 +#define NFSX_V2COOKIE 4 +#define NFSX_V2STATFS 20 /* specific to NFS Version 3 */ -#define NFSX_V3FHMAX 64 /* max. allowed by protocol */ -#define NFSX_V3FATTR 84 -#define NFSX_V3SATTR 60 /* max. all fields filled in */ -#define NFSX_V3POSTOPATTR (NFSX_V3FATTR + NFSX_UNSIGNED) -#define NFSX_V3WCCDATA (NFSX_V3POSTOPATTR + 8 * NFSX_UNSIGNED) -#define NFSX_V3COOKIEVERF 8 -#define NFSX_V3WRITEVERF 8 -#define NFSX_V3CREATEVERF 8 -#define NFSX_V3STATFS 52 -#define NFSX_V3FSINFO 48 -#define NFSX_V3PATHCONF 24 +#define NFSX_V3FHMAX 64 /* max. allowed by protocol */ +#define NFSX_V3FATTR 84 +#define NFSX_V3SATTR 60 /* max. all fields filled in */ +#define NFSX_V3POSTOPATTR (NFSX_V3FATTR + NFSX_UNSIGNED) +#define NFSX_V3WCCDATA (NFSX_V3POSTOPATTR + 8 * NFSX_UNSIGNED) +#define NFSX_V3COOKIEVERF 8 +#define NFSX_V3WRITEVERF 8 +#define NFSX_V3CREATEVERF 8 +#define NFSX_V3STATFS 52 +#define NFSX_V3FSINFO 48 +#define NFSX_V3PATHCONF 24 /* specific to NFS Version 4 */ -#define NFS4_FHSIZE 128 -#define NFS4_VERIFIER_SIZE 8 -#define NFS4_OPAQUE_LIMIT 1024 +#define NFS4_FHSIZE 128 +#define NFS4_VERIFIER_SIZE 8 +#define NFS4_OPAQUE_LIMIT 1024 /* variants for multiple versions */ -#define NFSX_FH(V) (((V) == NFS_VER2) ? NFSX_V2FH : (NFSX_UNSIGNED + \ - (((V) == NFS_VER3) ? NFSX_V3FHMAX : NFS4_FHSIZE))) -#define NFSX_SRVFH(V,FH) (((V) == NFS_VER2) ? NFSX_V2FH : (FH)->nfh_len) -#define NFSX_FATTR(V) (((V) == NFS_VER3) ? NFSX_V3FATTR : NFSX_V2FATTR) -#define NFSX_PREOPATTR(V) (((V) == NFS_VER3) ? (7 * NFSX_UNSIGNED) : 0) -#define NFSX_POSTOPATTR(V) (((V) == NFS_VER3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : 0) -#define NFSX_POSTOPORFATTR(V) (((V) == NFS_VER3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : NFSX_V2FATTR) -#define NFSX_WCCDATA(V) (((V) == NFS_VER3) ? NFSX_V3WCCDATA : 0) -#define NFSX_WCCORFATTR(V) (((V) == NFS_VER3) ? NFSX_V3WCCDATA : NFSX_V2FATTR) -#define NFSX_SATTR(V) (((V) == NFS_VER3) ? NFSX_V3SATTR : NFSX_V2SATTR) -#define NFSX_COOKIEVERF(V) (((V) == NFS_VER3) ? NFSX_V3COOKIEVERF : 0) -#define NFSX_WRITEVERF(V) (((V) == NFS_VER3) ? NFSX_V3WRITEVERF : 0) -#define NFSX_READDIR(V) (((V) == NFS_VER3) ? (5 * NFSX_UNSIGNED) : \ - (2 * NFSX_UNSIGNED)) -#define NFSX_STATFS(V) (((V) == NFS_VER3) ? NFSX_V3STATFS : NFSX_V2STATFS) +#define NFSX_FH(V) (((V) == NFS_VER2) ? NFSX_V2FH : (NFSX_UNSIGNED + \ + (((V) == NFS_VER3) ? NFSX_V3FHMAX : NFS4_FHSIZE))) +#define NFSX_SRVFH(V, FH) (((V) == NFS_VER2) ? NFSX_V2FH : (FH)->nfh_len) +#define NFSX_FATTR(V) (((V) == NFS_VER3) ? NFSX_V3FATTR : NFSX_V2FATTR) +#define NFSX_PREOPATTR(V) (((V) == NFS_VER3) ? (7 * NFSX_UNSIGNED) : 0) +#define NFSX_POSTOPATTR(V) (((V) == NFS_VER3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : 0) +#define NFSX_POSTOPORFATTR(V) (((V) == NFS_VER3) ? (NFSX_V3FATTR + NFSX_UNSIGNED) : NFSX_V2FATTR) +#define NFSX_WCCDATA(V) (((V) == NFS_VER3) ? NFSX_V3WCCDATA : 0) +#define NFSX_WCCORFATTR(V) (((V) == NFS_VER3) ? NFSX_V3WCCDATA : NFSX_V2FATTR) +#define NFSX_SATTR(V) (((V) == NFS_VER3) ? NFSX_V3SATTR : NFSX_V2SATTR) +#define NFSX_COOKIEVERF(V) (((V) == NFS_VER3) ? NFSX_V3COOKIEVERF : 0) +#define NFSX_WRITEVERF(V) (((V) == NFS_VER3) ? NFSX_V3WRITEVERF : 0) +#define NFSX_READDIR(V) (((V) == NFS_VER3) ? (5 * NFSX_UNSIGNED) : \ + (2 * NFSX_UNSIGNED)) +#define NFSX_STATFS(V) (((V) == NFS_VER3) ? NFSX_V3STATFS : NFSX_V2STATFS) /* Only define these if nfs_prot.h hasn't been included */ #ifndef NFS_PROGRAM /* nfs rpc procedure numbers (before version mapping) */ -#define NFSPROC_NULL 0 -#define NFSPROC_GETATTR 1 -#define NFSPROC_SETATTR 2 -#define NFSPROC_LOOKUP 3 -#define NFSPROC_ACCESS 4 -#define NFSPROC_READLINK 5 -#define NFSPROC_READ 6 -#define NFSPROC_WRITE 7 -#define NFSPROC_CREATE 8 -#define NFSPROC_MKDIR 9 -#define NFSPROC_SYMLINK 10 -#define NFSPROC_MKNOD 11 -#define NFSPROC_REMOVE 12 -#define NFSPROC_RMDIR 13 -#define NFSPROC_RENAME 14 -#define NFSPROC_LINK 15 -#define NFSPROC_READDIR 16 -#define NFSPROC_READDIRPLUS 17 -#define NFSPROC_FSSTAT 18 -#define NFSPROC_FSINFO 19 -#define NFSPROC_PATHCONF 20 -#define NFSPROC_COMMIT 21 +#define NFSPROC_NULL 0 +#define NFSPROC_GETATTR 1 +#define NFSPROC_SETATTR 2 +#define NFSPROC_LOOKUP 3 +#define NFSPROC_ACCESS 4 +#define NFSPROC_READLINK 5 +#define NFSPROC_READ 6 +#define NFSPROC_WRITE 7 +#define NFSPROC_CREATE 8 +#define NFSPROC_MKDIR 9 +#define NFSPROC_SYMLINK 10 +#define NFSPROC_MKNOD 11 +#define NFSPROC_REMOVE 12 +#define NFSPROC_RMDIR 13 +#define NFSPROC_RENAME 14 +#define NFSPROC_LINK 15 +#define NFSPROC_READDIR 16 +#define NFSPROC_READDIRPLUS 17 +#define NFSPROC_FSSTAT 18 +#define NFSPROC_FSINFO 19 +#define NFSPROC_PATHCONF 20 +#define NFSPROC_COMMIT 21 #endif /* !NFS_PROGRAM */ -#define NFSPROC_NOOP 22 -#define NFS_NPROCS 23 +#define NFSPROC_NOOP 22 +#define NFS_NPROCS 23 /* Actual Version 2 procedure numbers */ -#define NFSV2PROC_NULL 0 -#define NFSV2PROC_GETATTR 1 -#define NFSV2PROC_SETATTR 2 -#define NFSV2PROC_NOOP 3 -#define NFSV2PROC_ROOT NFSV2PROC_NOOP /* Obsolete */ -#define NFSV2PROC_LOOKUP 4 -#define NFSV2PROC_READLINK 5 -#define NFSV2PROC_READ 6 -#define NFSV2PROC_WRITECACHE NFSV2PROC_NOOP /* Obsolete */ -#define NFSV2PROC_WRITE 8 -#define NFSV2PROC_CREATE 9 -#define NFSV2PROC_REMOVE 10 -#define NFSV2PROC_RENAME 11 -#define NFSV2PROC_LINK 12 -#define NFSV2PROC_SYMLINK 13 -#define NFSV2PROC_MKDIR 14 -#define NFSV2PROC_RMDIR 15 -#define NFSV2PROC_READDIR 16 -#define NFSV2PROC_STATFS 17 +#define NFSV2PROC_NULL 0 +#define NFSV2PROC_GETATTR 1 +#define NFSV2PROC_SETATTR 2 +#define NFSV2PROC_NOOP 3 +#define NFSV2PROC_ROOT NFSV2PROC_NOOP /* Obsolete */ +#define NFSV2PROC_LOOKUP 4 +#define NFSV2PROC_READLINK 5 +#define NFSV2PROC_READ 6 +#define NFSV2PROC_WRITECACHE NFSV2PROC_NOOP /* Obsolete */ +#define NFSV2PROC_WRITE 8 +#define NFSV2PROC_CREATE 9 +#define NFSV2PROC_REMOVE 10 +#define NFSV2PROC_RENAME 11 +#define NFSV2PROC_LINK 12 +#define NFSV2PROC_SYMLINK 13 +#define NFSV2PROC_MKDIR 14 +#define NFSV2PROC_RMDIR 15 +#define NFSV2PROC_READDIR 16 +#define NFSV2PROC_STATFS 17 /* * Constants used by the Version 3 protocol for various RPCs */ -#define NFSV3FSINFO_LINK 0x01 -#define NFSV3FSINFO_SYMLINK 0x02 -#define NFSV3FSINFO_HOMOGENEOUS 0x08 -#define NFSV3FSINFO_CANSETTIME 0x10 +#define NFSV3FSINFO_LINK 0x01 +#define NFSV3FSINFO_SYMLINK 0x02 +#define NFSV3FSINFO_HOMOGENEOUS 0x08 +#define NFSV3FSINFO_CANSETTIME 0x10 /* time setting constants */ -#define NFS_TIME_DONT_CHANGE 0 -#define NFS_TIME_SET_TO_SERVER 1 -#define NFS_TIME_SET_TO_CLIENT 2 -#define NFS4_TIME_SET_TO_SERVER 0 -#define NFS4_TIME_SET_TO_CLIENT 1 +#define NFS_TIME_DONT_CHANGE 0 +#define NFS_TIME_SET_TO_SERVER 1 +#define NFS_TIME_SET_TO_CLIENT 2 +#define NFS4_TIME_SET_TO_SERVER 0 +#define NFS4_TIME_SET_TO_CLIENT 1 /* access() constants */ -#define NFS_ACCESS_READ 0x01 -#define NFS_ACCESS_LOOKUP 0x02 -#define NFS_ACCESS_MODIFY 0x04 -#define NFS_ACCESS_EXTEND 0x08 -#define NFS_ACCESS_DELETE 0x10 -#define NFS_ACCESS_EXECUTE 0x20 -#define NFS_ACCESS_ALL (NFS_ACCESS_READ | NFS_ACCESS_MODIFY \ - | NFS_ACCESS_EXTEND | NFS_ACCESS_EXECUTE \ - | NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP) +#define NFS_ACCESS_READ 0x01 +#define NFS_ACCESS_LOOKUP 0x02 +#define NFS_ACCESS_MODIFY 0x04 +#define NFS_ACCESS_EXTEND 0x08 +#define NFS_ACCESS_DELETE 0x10 +#define NFS_ACCESS_EXECUTE 0x20 +#define NFS_ACCESS_ALL (NFS_ACCESS_READ | NFS_ACCESS_MODIFY \ + | NFS_ACCESS_EXTEND | NFS_ACCESS_EXECUTE \ + | NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP) /* NFS WRITE how constants */ -#define NFS_WRITE_UNSTABLE 0 -#define NFS_WRITE_DATASYNC 1 -#define NFS_WRITE_FILESYNC 2 +#define NFS_WRITE_UNSTABLE 0 +#define NFS_WRITE_DATASYNC 1 +#define NFS_WRITE_FILESYNC 2 /* NFS CREATE types */ -#define NFS_CREATE_UNCHECKED 0 -#define NFS_CREATE_GUARDED 1 -#define NFS_CREATE_EXCLUSIVE 2 +#define NFS_CREATE_UNCHECKED 0 +#define NFS_CREATE_GUARDED 1 +#define NFS_CREATE_EXCLUSIVE 2 /* Only define these if nfs_prot.h hasn't been included */ #ifndef NFS_PROGRAM /* NFS object types */ typedef enum { NFNON=0, NFREG=1, NFDIR=2, NFBLK=3, NFCHR=4, NFLNK=5, - NFSOCK=6, NFFIFO=7, NFATTRDIR=8, NFNAMEDATTR=9 } nfstype; + NFSOCK=6, NFFIFO=7, NFATTRDIR=8, NFNAMEDATTR=9 } nfstype; #endif /* !NFS_PROGRAM */ /* @@ -341,92 +341,92 @@ typedef enum { NFNON=0, NFREG=1, NFDIR=2, NFBLK=3, NFCHR=4, NFLNK=5, * NFS_SMALLFH should be in the range of 32 to 64 and be divisible by 4. */ #ifndef NFS_SMALLFH -#define NFS_SMALLFH 64 +#define NFS_SMALLFH 64 #endif /* * NFS attribute management stuff */ -#define NFS_ATTR_BITMAP_LEN 2 -#define NFS_BITMAP_SET(B, I) (((uint32_t *)(B))[(I)/32] |= 1<<((I)%32)) -#define NFS_BITMAP_CLR(B, I) (((uint32_t *)(B))[(I)/32] &= ~(1<<((I)%32))) -#define NFS_BITMAP_ISSET(B, I) (((uint32_t *)(B))[(I)/32] & (1<<((I)%32))) +#define NFS_ATTR_BITMAP_LEN 2 +#define NFS_BITMAP_SET(B, I) (((uint32_t *)(B))[(I)/32] |= 1<<((I)%32)) +#define NFS_BITMAP_CLR(B, I) (((uint32_t *)(B))[(I)/32] &= ~(1<<((I)%32))) +#define NFS_BITMAP_ISSET(B, I) (((uint32_t *)(B))[(I)/32] & (1<<((I)%32))) #define NFS_BITMAP_ZERO(B, L) \ do { \ - int __i; \ - for (__i=0; __i < (L); __i++) \ - ((uint32_t*)(B))[__i] = 0; \ + int __i; \ + for (__i=0; __i < (L); __i++) \ + ((uint32_t*)(B))[__i] = 0; \ } while (0) extern uint32_t nfs_fs_attr_bitmap[NFS_ATTR_BITMAP_LEN]; extern uint32_t nfs_object_attr_bitmap[NFS_ATTR_BITMAP_LEN]; extern uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN]; -#define NFS_CLEAR_ATTRIBUTES(A) NFS_BITMAP_ZERO((A), NFS_ATTR_BITMAP_LEN) +#define NFS_CLEAR_ATTRIBUTES(A) NFS_BITMAP_ZERO((A), NFS_ATTR_BITMAP_LEN) #define NFS_COPY_ATTRIBUTES(SRC, DST) \ do { \ int __i; \ for (__i=0; __i < NFS_ATTR_BITMAP_LEN; __i++) \ - ((uint32_t*)(DST))[__i] = ((uint32_t*)(SRC))[__i]; \ + ((uint32_t*)(DST))[__i] = ((uint32_t*)(SRC))[__i]; \ } while (0) /* NFS attributes */ -#define NFS_FATTR_SUPPORTED_ATTRS 0 -#define NFS_FATTR_TYPE 1 -#define NFS_FATTR_FH_EXPIRE_TYPE 2 -#define NFS_FATTR_CHANGE 3 -#define NFS_FATTR_SIZE 4 -#define NFS_FATTR_LINK_SUPPORT 5 -#define NFS_FATTR_SYMLINK_SUPPORT 6 -#define NFS_FATTR_NAMED_ATTR 7 -#define NFS_FATTR_FSID 8 -#define NFS_FATTR_UNIQUE_HANDLES 9 -#define NFS_FATTR_LEASE_TIME 10 -#define NFS_FATTR_RDATTR_ERROR 11 -#define NFS_FATTR_FILEHANDLE 19 -#define NFS_FATTR_ACL 12 -#define NFS_FATTR_ACLSUPPORT 13 -#define NFS_FATTR_ARCHIVE 14 -#define NFS_FATTR_CANSETTIME 15 -#define NFS_FATTR_CASE_INSENSITIVE 16 -#define NFS_FATTR_CASE_PRESERVING 17 -#define NFS_FATTR_CHOWN_RESTRICTED 18 -#define NFS_FATTR_FILEID 20 -#define NFS_FATTR_FILES_AVAIL 21 -#define NFS_FATTR_FILES_FREE 22 -#define NFS_FATTR_FILES_TOTAL 23 -#define NFS_FATTR_FS_LOCATIONS 24 -#define NFS_FATTR_HIDDEN 25 -#define NFS_FATTR_HOMOGENEOUS 26 -#define NFS_FATTR_MAXFILESIZE 27 -#define NFS_FATTR_MAXLINK 28 -#define NFS_FATTR_MAXNAME 29 -#define NFS_FATTR_MAXREAD 30 -#define NFS_FATTR_MAXWRITE 31 -#define NFS_FATTR_MIMETYPE 32 -#define NFS_FATTR_MODE 33 -#define NFS_FATTR_NO_TRUNC 34 -#define NFS_FATTR_NUMLINKS 35 -#define NFS_FATTR_OWNER 36 -#define NFS_FATTR_OWNER_GROUP 37 -#define NFS_FATTR_QUOTA_AVAIL_HARD 38 -#define NFS_FATTR_QUOTA_AVAIL_SOFT 39 -#define NFS_FATTR_QUOTA_USED 40 -#define NFS_FATTR_RAWDEV 41 -#define NFS_FATTR_SPACE_AVAIL 42 -#define NFS_FATTR_SPACE_FREE 43 -#define NFS_FATTR_SPACE_TOTAL 44 -#define NFS_FATTR_SPACE_USED 45 -#define NFS_FATTR_SYSTEM 46 -#define NFS_FATTR_TIME_ACCESS 47 -#define NFS_FATTR_TIME_ACCESS_SET 48 -#define NFS_FATTR_TIME_BACKUP 49 -#define NFS_FATTR_TIME_CREATE 50 -#define NFS_FATTR_TIME_DELTA 51 -#define NFS_FATTR_TIME_METADATA 52 -#define NFS_FATTR_TIME_MODIFY 53 -#define NFS_FATTR_TIME_MODIFY_SET 54 -#define NFS_FATTR_MOUNTED_ON_FILEID 55 +#define NFS_FATTR_SUPPORTED_ATTRS 0 +#define NFS_FATTR_TYPE 1 +#define NFS_FATTR_FH_EXPIRE_TYPE 2 +#define NFS_FATTR_CHANGE 3 +#define NFS_FATTR_SIZE 4 +#define NFS_FATTR_LINK_SUPPORT 5 +#define NFS_FATTR_SYMLINK_SUPPORT 6 +#define NFS_FATTR_NAMED_ATTR 7 +#define NFS_FATTR_FSID 8 +#define NFS_FATTR_UNIQUE_HANDLES 9 +#define NFS_FATTR_LEASE_TIME 10 +#define NFS_FATTR_RDATTR_ERROR 11 +#define NFS_FATTR_FILEHANDLE 19 +#define NFS_FATTR_ACL 12 +#define NFS_FATTR_ACLSUPPORT 13 +#define NFS_FATTR_ARCHIVE 14 +#define NFS_FATTR_CANSETTIME 15 +#define NFS_FATTR_CASE_INSENSITIVE 16 +#define NFS_FATTR_CASE_PRESERVING 17 +#define NFS_FATTR_CHOWN_RESTRICTED 18 +#define NFS_FATTR_FILEID 20 +#define NFS_FATTR_FILES_AVAIL 21 +#define NFS_FATTR_FILES_FREE 22 +#define NFS_FATTR_FILES_TOTAL 23 +#define NFS_FATTR_FS_LOCATIONS 24 +#define NFS_FATTR_HIDDEN 25 +#define NFS_FATTR_HOMOGENEOUS 26 +#define NFS_FATTR_MAXFILESIZE 27 +#define NFS_FATTR_MAXLINK 28 +#define NFS_FATTR_MAXNAME 29 +#define NFS_FATTR_MAXREAD 30 +#define NFS_FATTR_MAXWRITE 31 +#define NFS_FATTR_MIMETYPE 32 +#define NFS_FATTR_MODE 33 +#define NFS_FATTR_NO_TRUNC 34 +#define NFS_FATTR_NUMLINKS 35 +#define NFS_FATTR_OWNER 36 +#define NFS_FATTR_OWNER_GROUP 37 +#define NFS_FATTR_QUOTA_AVAIL_HARD 38 +#define NFS_FATTR_QUOTA_AVAIL_SOFT 39 +#define NFS_FATTR_QUOTA_USED 40 +#define NFS_FATTR_RAWDEV 41 +#define NFS_FATTR_SPACE_AVAIL 42 +#define NFS_FATTR_SPACE_FREE 43 +#define NFS_FATTR_SPACE_TOTAL 44 +#define NFS_FATTR_SPACE_USED 45 +#define NFS_FATTR_SYSTEM 46 +#define NFS_FATTR_TIME_ACCESS 47 +#define NFS_FATTR_TIME_ACCESS_SET 48 +#define NFS_FATTR_TIME_BACKUP 49 +#define NFS_FATTR_TIME_CREATE 50 +#define NFS_FATTR_TIME_DELTA 51 +#define NFS_FATTR_TIME_METADATA 52 +#define NFS_FATTR_TIME_MODIFY 53 +#define NFS_FATTR_TIME_MODIFY_SET 54 +#define NFS_FATTR_MOUNTED_ON_FILEID 55 #define NFS4_ALL_ATTRIBUTES(A) \ do { \ @@ -631,138 +631,138 @@ extern uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN]; * NFS OPEN constants */ /* open type */ -#define NFS_OPEN_NOCREATE 0 -#define NFS_OPEN_CREATE 1 +#define NFS_OPEN_NOCREATE 0 +#define NFS_OPEN_CREATE 1 /* delegation space limit */ -#define NFS_LIMIT_SIZE 1 -#define NFS_LIMIT_BLOCKS 2 +#define NFS_LIMIT_SIZE 1 +#define NFS_LIMIT_BLOCKS 2 /* access/deny modes */ -#define NFS_OPEN_SHARE_ACCESS_NONE 0x00000000 -#define NFS_OPEN_SHARE_ACCESS_READ 0x00000001 -#define NFS_OPEN_SHARE_ACCESS_WRITE 0x00000002 -#define NFS_OPEN_SHARE_ACCESS_BOTH 0x00000003 -#define NFS_OPEN_SHARE_DENY_NONE 0x00000000 -#define NFS_OPEN_SHARE_DENY_READ 0x00000001 -#define NFS_OPEN_SHARE_DENY_WRITE 0x00000002 -#define NFS_OPEN_SHARE_DENY_BOTH 0x00000003 +#define NFS_OPEN_SHARE_ACCESS_NONE 0x00000000 +#define NFS_OPEN_SHARE_ACCESS_READ 0x00000001 +#define NFS_OPEN_SHARE_ACCESS_WRITE 0x00000002 +#define NFS_OPEN_SHARE_ACCESS_BOTH 0x00000003 +#define NFS_OPEN_SHARE_DENY_NONE 0x00000000 +#define NFS_OPEN_SHARE_DENY_READ 0x00000001 +#define NFS_OPEN_SHARE_DENY_WRITE 0x00000002 +#define NFS_OPEN_SHARE_DENY_BOTH 0x00000003 /* delegation types */ -#define NFS_OPEN_DELEGATE_NONE 0 -#define NFS_OPEN_DELEGATE_READ 1 -#define NFS_OPEN_DELEGATE_WRITE 2 +#define NFS_OPEN_DELEGATE_NONE 0 +#define NFS_OPEN_DELEGATE_READ 1 +#define NFS_OPEN_DELEGATE_WRITE 2 /* delegation claim types */ -#define NFS_CLAIM_NULL 0 -#define NFS_CLAIM_PREVIOUS 1 -#define NFS_CLAIM_DELEGATE_CUR 2 -#define NFS_CLAIM_DELEGATE_PREV 3 +#define NFS_CLAIM_NULL 0 +#define NFS_CLAIM_PREVIOUS 1 +#define NFS_CLAIM_DELEGATE_CUR 2 +#define NFS_CLAIM_DELEGATE_PREV 3 /* open result flags */ -#define NFS_OPEN_RESULT_CONFIRM 0x00000002 -#define NFS_OPEN_RESULT_LOCKTYPE_POSIX 0x00000004 +#define NFS_OPEN_RESULT_CONFIRM 0x00000002 +#define NFS_OPEN_RESULT_LOCKTYPE_POSIX 0x00000004 /* NFS lock types */ -#define NFS_LOCK_TYPE_READ 1 -#define NFS_LOCK_TYPE_WRITE 2 -#define NFS_LOCK_TYPE_READW 3 /* "blocking" */ -#define NFS_LOCK_TYPE_WRITEW 4 /* "blocking" */ +#define NFS_LOCK_TYPE_READ 1 +#define NFS_LOCK_TYPE_WRITE 2 +#define NFS_LOCK_TYPE_READW 3 /* "blocking" */ +#define NFS_LOCK_TYPE_WRITEW 4 /* "blocking" */ /* NFSv4 RPC procedures */ -#define NFSPROC4_NULL 0 -#define NFSPROC4_COMPOUND 1 -#define NFSPROC4_CB_NULL 0 -#define NFSPROC4_CB_COMPOUND 1 +#define NFSPROC4_NULL 0 +#define NFSPROC4_COMPOUND 1 +#define NFSPROC4_CB_NULL 0 +#define NFSPROC4_CB_COMPOUND 1 /* NFSv4 opcodes */ -#define NFS_OP_ACCESS 3 -#define NFS_OP_CLOSE 4 -#define NFS_OP_COMMIT 5 -#define NFS_OP_CREATE 6 -#define NFS_OP_DELEGPURGE 7 -#define NFS_OP_DELEGRETURN 8 -#define NFS_OP_GETATTR 9 -#define NFS_OP_GETFH 10 -#define NFS_OP_LINK 11 -#define NFS_OP_LOCK 12 -#define NFS_OP_LOCKT 13 -#define NFS_OP_LOCKU 14 -#define NFS_OP_LOOKUP 15 -#define NFS_OP_LOOKUPP 16 -#define NFS_OP_NVERIFY 17 -#define NFS_OP_OPEN 18 -#define NFS_OP_OPENATTR 19 -#define NFS_OP_OPEN_CONFIRM 20 -#define NFS_OP_OPEN_DOWNGRADE 21 -#define NFS_OP_PUTFH 22 -#define NFS_OP_PUTPUBFH 23 -#define NFS_OP_PUTROOTFH 24 -#define NFS_OP_READ 25 -#define NFS_OP_READDIR 26 -#define NFS_OP_READLINK 27 -#define NFS_OP_REMOVE 28 -#define NFS_OP_RENAME 29 -#define NFS_OP_RENEW 30 -#define NFS_OP_RESTOREFH 31 -#define NFS_OP_SAVEFH 32 -#define NFS_OP_SECINFO 33 -#define NFS_OP_SETATTR 34 -#define NFS_OP_SETCLIENTID 35 -#define NFS_OP_SETCLIENTID_CONFIRM 36 -#define NFS_OP_VERIFY 37 -#define NFS_OP_WRITE 38 -#define NFS_OP_RELEASE_LOCKOWNER 39 -#define NFS_OP_ILLEGAL 10044 +#define NFS_OP_ACCESS 3 +#define NFS_OP_CLOSE 4 +#define NFS_OP_COMMIT 5 +#define NFS_OP_CREATE 6 +#define NFS_OP_DELEGPURGE 7 +#define NFS_OP_DELEGRETURN 8 +#define NFS_OP_GETATTR 9 +#define NFS_OP_GETFH 10 +#define NFS_OP_LINK 11 +#define NFS_OP_LOCK 12 +#define NFS_OP_LOCKT 13 +#define NFS_OP_LOCKU 14 +#define NFS_OP_LOOKUP 15 +#define NFS_OP_LOOKUPP 16 +#define NFS_OP_NVERIFY 17 +#define NFS_OP_OPEN 18 +#define NFS_OP_OPENATTR 19 +#define NFS_OP_OPEN_CONFIRM 20 +#define NFS_OP_OPEN_DOWNGRADE 21 +#define NFS_OP_PUTFH 22 +#define NFS_OP_PUTPUBFH 23 +#define NFS_OP_PUTROOTFH 24 +#define NFS_OP_READ 25 +#define NFS_OP_READDIR 26 +#define NFS_OP_READLINK 27 +#define NFS_OP_REMOVE 28 +#define NFS_OP_RENAME 29 +#define NFS_OP_RENEW 30 +#define NFS_OP_RESTOREFH 31 +#define NFS_OP_SAVEFH 32 +#define NFS_OP_SECINFO 33 +#define NFS_OP_SETATTR 34 +#define NFS_OP_SETCLIENTID 35 +#define NFS_OP_SETCLIENTID_CONFIRM 36 +#define NFS_OP_VERIFY 37 +#define NFS_OP_WRITE 38 +#define NFS_OP_RELEASE_LOCKOWNER 39 +#define NFS_OP_ILLEGAL 10044 /* NFSv4 callback opcodes */ -#define NFS_OP_CB_GETATTR 3 -#define NFS_OP_CB_RECALL 4 -#define NFS_OP_CB_ILLEGAL 10044 +#define NFS_OP_CB_GETATTR 3 +#define NFS_OP_CB_RECALL 4 +#define NFS_OP_CB_ILLEGAL 10044 /* NFSv4 file handle type flags */ -#define NFS_FH_PERSISTENT 0x00000000 -#define NFS_FH_NOEXPIRE_WITH_OPEN 0x00000001 -#define NFS_FH_VOLATILE_ANY 0x00000002 -#define NFS_FH_VOL_MIGRATION 0x00000004 -#define NFS_FH_VOL_RENAME 0x00000008 +#define NFS_FH_PERSISTENT 0x00000000 +#define NFS_FH_NOEXPIRE_WITH_OPEN 0x00000001 +#define NFS_FH_VOLATILE_ANY 0x00000002 +#define NFS_FH_VOL_MIGRATION 0x00000004 +#define NFS_FH_VOL_RENAME 0x00000008 /* * NFSv4 ACL constants */ /* ACE support mask bits */ -#define NFS_ACL_SUPPORT_ALLOW_ACL 0x00000001 -#define NFS_ACL_SUPPORT_DENY_ACL 0x00000002 -#define NFS_ACL_SUPPORT_AUDIT_ACL 0x00000004 -#define NFS_ACL_SUPPORT_ALARM_ACL 0x00000008 +#define NFS_ACL_SUPPORT_ALLOW_ACL 0x00000001 +#define NFS_ACL_SUPPORT_DENY_ACL 0x00000002 +#define NFS_ACL_SUPPORT_AUDIT_ACL 0x00000004 +#define NFS_ACL_SUPPORT_ALARM_ACL 0x00000008 /* ACE types */ -#define NFS_ACE_ACCESS_ALLOWED_ACE_TYPE 0x00000000 -#define NFS_ACE_ACCESS_DENIED_ACE_TYPE 0x00000001 -#define NFS_ACE_SYSTEM_AUDIT_ACE_TYPE 0x00000002 -#define NFS_ACE_SYSTEM_ALARM_ACE_TYPE 0x00000003 +#define NFS_ACE_ACCESS_ALLOWED_ACE_TYPE 0x00000000 +#define NFS_ACE_ACCESS_DENIED_ACE_TYPE 0x00000001 +#define NFS_ACE_SYSTEM_AUDIT_ACE_TYPE 0x00000002 +#define NFS_ACE_SYSTEM_ALARM_ACE_TYPE 0x00000003 /* ACE flags */ -#define NFS_ACE_FILE_INHERIT_ACE 0x00000001 -#define NFS_ACE_DIRECTORY_INHERIT_ACE 0x00000002 -#define NFS_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004 -#define NFS_ACE_INHERIT_ONLY_ACE 0x00000008 -#define NFS_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010 -#define NFS_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020 -#define NFS_ACE_IDENTIFIER_GROUP 0x00000040 -#define NFS_ACE_INHERITED_ACE 0x00000080 +#define NFS_ACE_FILE_INHERIT_ACE 0x00000001 +#define NFS_ACE_DIRECTORY_INHERIT_ACE 0x00000002 +#define NFS_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004 +#define NFS_ACE_INHERIT_ONLY_ACE 0x00000008 +#define NFS_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010 +#define NFS_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020 +#define NFS_ACE_IDENTIFIER_GROUP 0x00000040 +#define NFS_ACE_INHERITED_ACE 0x00000080 /* ACE mask flags */ -#define NFS_ACE_READ_DATA 0x00000001 -#define NFS_ACE_LIST_DIRECTORY 0x00000001 -#define NFS_ACE_WRITE_DATA 0x00000002 -#define NFS_ACE_ADD_FILE 0x00000002 -#define NFS_ACE_APPEND_DATA 0x00000004 -#define NFS_ACE_ADD_SUBDIRECTORY 0x00000004 -#define NFS_ACE_READ_NAMED_ATTRS 0x00000008 -#define NFS_ACE_WRITE_NAMED_ATTRS 0x00000010 -#define NFS_ACE_EXECUTE 0x00000020 -#define NFS_ACE_DELETE_CHILD 0x00000040 -#define NFS_ACE_READ_ATTRIBUTES 0x00000080 -#define NFS_ACE_WRITE_ATTRIBUTES 0x00000100 -#define NFS_ACE_DELETE 0x00010000 -#define NFS_ACE_READ_ACL 0x00020000 -#define NFS_ACE_WRITE_ACL 0x00040000 -#define NFS_ACE_WRITE_OWNER 0x00080000 -#define NFS_ACE_SYNCHRONIZE 0x00100000 -#define NFS_ACE_GENERIC_READ 0x00120081 -#define NFS_ACE_GENERIC_WRITE 0x00160106 -#define NFS_ACE_GENERIC_EXECUTE 0x001200A0 +#define NFS_ACE_READ_DATA 0x00000001 +#define NFS_ACE_LIST_DIRECTORY 0x00000001 +#define NFS_ACE_WRITE_DATA 0x00000002 +#define NFS_ACE_ADD_FILE 0x00000002 +#define NFS_ACE_APPEND_DATA 0x00000004 +#define NFS_ACE_ADD_SUBDIRECTORY 0x00000004 +#define NFS_ACE_READ_NAMED_ATTRS 0x00000008 +#define NFS_ACE_WRITE_NAMED_ATTRS 0x00000010 +#define NFS_ACE_EXECUTE 0x00000020 +#define NFS_ACE_DELETE_CHILD 0x00000040 +#define NFS_ACE_READ_ATTRIBUTES 0x00000080 +#define NFS_ACE_WRITE_ATTRIBUTES 0x00000100 +#define NFS_ACE_DELETE 0x00010000 +#define NFS_ACE_READ_ACL 0x00020000 +#define NFS_ACE_WRITE_ACL 0x00040000 +#define NFS_ACE_WRITE_OWNER 0x00080000 +#define NFS_ACE_SYNCHRONIZE 0x00100000 +#define NFS_ACE_GENERIC_READ 0x00120081 +#define NFS_ACE_GENERIC_WRITE 0x00160106 +#define NFS_ACE_GENERIC_EXECUTE 0x001200A0 /* @@ -770,25 +770,25 @@ extern uint32_t nfs_getattr_bitmap[NFS_ATTR_BITMAP_LEN]; * for the protocol and to facilitate xdr conversion. */ struct nfs_uquad { - u_int32_t nfsuquad[2]; + u_int32_t nfsuquad[2]; }; -typedef struct nfs_uquad nfsuint64; +typedef struct nfs_uquad nfsuint64; /* * Used to convert between two u_int32_ts and a u_quad_t. */ union nfs_quadconvert { - u_int32_t lval[2]; - u_quad_t qval; + u_int32_t lval[2]; + u_quad_t qval; }; -typedef union nfs_quadconvert nfsquad_t; +typedef union nfs_quadconvert nfsquad_t; /* * special data/attribute associated with NFBLK/NFCHR */ struct nfs_specdata { - uint32_t specdata1; /* major device number */ - uint32_t specdata2; /* minor device number */ + uint32_t specdata1; /* major device number */ + uint32_t specdata2; /* minor device number */ }; typedef struct nfs_specdata nfs_specdata; @@ -805,8 +805,8 @@ typedef struct nfs_fsid nfs_fsid; * NFSv4 stateid structure */ struct nfs_stateid { - uint32_t seqid; - uint32_t other[3]; + uint32_t seqid; + uint32_t other[3]; }; typedef struct nfs_stateid nfs_stateid; diff --git a/bsd/nfs/nfsrvcache.h b/bsd/nfs/nfsrvcache.h index 5d3e311b5..9c92b00c1 100644 --- a/bsd/nfs/nfsrvcache.h +++ b/bsd/nfs/nfsrvcache.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -83,47 +83,47 @@ union nethostaddr { mbuf_t had_nam; }; -#define NFSRVCACHESIZ 64 +#define NFSRVCACHESIZ 64 struct nfsrvcache { - TAILQ_ENTRY(nfsrvcache) rc_lru; /* LRU chain */ - LIST_ENTRY(nfsrvcache) rc_hash; /* Hash chain */ - u_int32_t rc_xid; /* rpc id number */ + TAILQ_ENTRY(nfsrvcache) rc_lru; /* LRU chain */ + LIST_ENTRY(nfsrvcache) rc_hash; /* Hash chain */ + u_int32_t rc_xid; /* rpc id number */ union { - mbuf_t ru_repmb; /* Reply mbuf list OR */ - int ru_repstat; /* Reply status */ + mbuf_t ru_repmb; /* Reply mbuf list OR */ + int ru_repstat; /* Reply status */ } rc_un; - sa_family_t rc_family; /* address family */ - union nethostaddr rc_haddr; /* Host address */ - u_int32_t rc_proc; /* rpc proc number */ - u_char rc_state; /* Current state of request */ - u_char rc_flag; /* Flag bits */ + sa_family_t rc_family; /* address family */ + union nethostaddr rc_haddr; /* Host address */ + u_int32_t rc_proc; /* rpc proc number */ + u_char rc_state; /* Current state of request */ + u_char rc_flag; /* Flag bits */ }; -#define rc_reply rc_un.ru_repmb -#define rc_status rc_un.ru_repstat -#define rc_inetaddr rc_haddr.had_inetaddr -#define rc_inet6addr rc_haddr.had_inet6addr -#define rc_nam rc_haddr.had_nam +#define rc_reply rc_un.ru_repmb +#define rc_status rc_un.ru_repstat +#define rc_inetaddr rc_haddr.had_inetaddr +#define rc_inet6addr rc_haddr.had_inet6addr +#define rc_nam rc_haddr.had_nam /* Cache entry states */ -#define RC_UNUSED 0 -#define RC_INPROG 1 -#define RC_DONE 2 +#define RC_UNUSED 0 +#define RC_INPROG 1 +#define RC_DONE 2 /* Return values */ -#define RC_DROPIT 0 -#define RC_REPLY 1 -#define RC_DOIT 2 -#define RC_CHECKIT 3 +#define RC_DROPIT 0 +#define RC_REPLY 1 +#define RC_DOIT 2 +#define RC_CHECKIT 3 /* Flag bits */ -#define RC_LOCKED 0x01 -#define RC_WANTED 0x02 -#define RC_REPSTATUS 0x04 -#define RC_REPMBUF 0x08 -#define RC_INETADDR 0x20 -#define RC_NAM 0x40 +#define RC_LOCKED 0x01 +#define RC_WANTED 0x02 +#define RC_REPSTATUS 0x04 +#define RC_REPMBUF 0x08 +#define RC_INETADDR 0x20 +#define RC_NAM 0x40 extern lck_grp_t *nfsrv_reqcache_lck_grp; extern lck_mtx_t *nfsrv_reqcache_mutex; diff --git a/bsd/nfs/rpcv2.h b/bsd/nfs/rpcv2.h index 3a288f203..2a5bc9c6f 100644 --- a/bsd/nfs/rpcv2.h +++ b/bsd/nfs/rpcv2.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -78,84 +78,84 @@ */ /* Version # */ -#define RPC_VER2 2 +#define RPC_VER2 2 /* Authentication */ -#define RPCAUTH_NULL 0 -#define RPCAUTH_NONE RPCAUTH_NULL -#define RPCAUTH_UNIX 1 -#define RPCAUTH_SYS RPCAUTH_UNIX -#define RPCAUTH_SHORT 2 -#define RPCAUTH_KERB4 4 -#define RPCAUTH_KRB5 390003 -#define RPCAUTH_KRB5I 390004 -#define RPCAUTH_KRB5P 390005 -#define RPCAUTH_INVALID ~0U -#define RPCAUTH_UNKNOWN RPCAUTH_INVALID +#define RPCAUTH_NULL 0 +#define RPCAUTH_NONE RPCAUTH_NULL +#define RPCAUTH_UNIX 1 +#define RPCAUTH_SYS RPCAUTH_UNIX +#define RPCAUTH_SHORT 2 +#define RPCAUTH_KERB4 4 +#define RPCAUTH_KRB5 390003 +#define RPCAUTH_KRB5I 390004 +#define RPCAUTH_KRB5P 390005 +#define RPCAUTH_INVALID ~0U +#define RPCAUTH_UNKNOWN RPCAUTH_INVALID -#define RPCAUTH_MAXSIZ 400 -#define RPCAUTH_UNIXGIDS 16 +#define RPCAUTH_MAXSIZ 400 +#define RPCAUTH_UNIXGIDS 16 /* * Constants associated with authentication flavours. */ -#define RPCAKN_FULLNAME 0 -#define RPCAKN_NICKNAME 1 +#define RPCAKN_FULLNAME 0 +#define RPCAKN_NICKNAME 1 /* Rpc Constants */ -#define RPC_CALL 0 -#define RPC_REPLY 1 -#define RPC_MSGACCEPTED 0 -#define RPC_MSGDENIED 1 -#define RPC_SUCCESS 0 -#define RPC_PROGUNAVAIL 1 -#define RPC_PROGMISMATCH 2 -#define RPC_PROCUNAVAIL 3 -#define RPC_GARBAGE 4 /* I like this one */ -#define RPC_SYSTEM_ERR 5 -#define RPC_MISMATCH 0 -#define RPC_AUTHERR 1 +#define RPC_CALL 0 +#define RPC_REPLY 1 +#define RPC_MSGACCEPTED 0 +#define RPC_MSGDENIED 1 +#define RPC_SUCCESS 0 +#define RPC_PROGUNAVAIL 1 +#define RPC_PROGMISMATCH 2 +#define RPC_PROCUNAVAIL 3 +#define RPC_GARBAGE 4 /* I like this one */ +#define RPC_SYSTEM_ERR 5 +#define RPC_MISMATCH 0 +#define RPC_AUTHERR 1 /* Authentication failures */ -#define AUTH_BADCRED 1 -#define AUTH_REJECTCRED 2 -#define AUTH_BADVERF 3 -#define AUTH_REJECTVERF 4 -#define AUTH_TOOWEAK 5 /* Give em wheaties */ -#define AUTH_INVALIDRESP 6 -#define AUTH_FAILED 7 -#define AUTH_KERB_GENERIC 8 -#define AUTH_TIMEEXPIRE 9 -#define AUTH_TKT_FILE 10 -#define AUTH_DECODE 11 -#define AUTH_NET_ADDR 12 -#define RPCSEC_GSS_CREDPROBLEM 13 -#define RPCSEC_GSS_CTXPROBLEM 14 +#define AUTH_BADCRED 1 +#define AUTH_REJECTCRED 2 +#define AUTH_BADVERF 3 +#define AUTH_REJECTVERF 4 +#define AUTH_TOOWEAK 5 /* Give em wheaties */ +#define AUTH_INVALIDRESP 6 +#define AUTH_FAILED 7 +#define AUTH_KERB_GENERIC 8 +#define AUTH_TIMEEXPIRE 9 +#define AUTH_TKT_FILE 10 +#define AUTH_DECODE 11 +#define AUTH_NET_ADDR 12 +#define RPCSEC_GSS_CREDPROBLEM 13 +#define RPCSEC_GSS_CTXPROBLEM 14 /* Sizes of rpc header parts */ -#define RPC_SIZ 24 -#define RPC_REPLYSIZ 28 +#define RPC_SIZ 24 +#define RPC_REPLYSIZ 28 /* RPC Prog definitions */ -#define RPCPROG_MNT 100005 -#define RPCMNT_VER1 1 -#define RPCMNT_VER3 3 -#define RPCMNT_MOUNT 1 -#define RPCMNT_DUMP 2 -#define RPCMNT_UMOUNT 3 -#define RPCMNT_UMNTALL 4 -#define RPCMNT_EXPORT 5 -#define RPCMNT_NAMELEN 255 -#define RPCMNT_PATHLEN 1024 -#define RPCPROG_NFS 100003 +#define RPCPROG_MNT 100005 +#define RPCMNT_VER1 1 +#define RPCMNT_VER3 3 +#define RPCMNT_MOUNT 1 +#define RPCMNT_DUMP 2 +#define RPCMNT_UMOUNT 3 +#define RPCMNT_UMNTALL 4 +#define RPCMNT_EXPORT 5 +#define RPCMNT_NAMELEN 255 +#define RPCMNT_PATHLEN 1024 +#define RPCPROG_NFS 100003 -#define RPCPROG_RQUOTA 100011 -#define RPCRQUOTA_VER 1 -#define RPCRQUOTA_EXT_VER 2 -#define RPCRQUOTA_GET 1 -#define RQUOTA_STAT_OK 1 -#define RQUOTA_STAT_NOQUOTA 2 -#define RQUOTA_STAT_EPERM 3 +#define RPCPROG_RQUOTA 100011 +#define RPCRQUOTA_VER 1 +#define RPCRQUOTA_EXT_VER 2 +#define RPCRQUOTA_GET 1 +#define RQUOTA_STAT_OK 1 +#define RQUOTA_STAT_NOQUOTA 2 +#define RQUOTA_STAT_EPERM 3 #endif /* __APPLE_API_PRIVATE */ #endif /* _NFS_RPCV2_H_ */ diff --git a/bsd/nfs/xdr_subs.h b/bsd/nfs/xdr_subs.h index 77590a33b..24295f487 100644 --- a/bsd/nfs/xdr_subs.h +++ b/bsd/nfs/xdr_subs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -82,14 +82,14 @@ * but we cannot count on their alignment anyway. */ -#define fxdr_unsigned(t, v) ((t)ntohl((uint32_t)(v))) -#define txdr_unsigned(v) (htonl((uint32_t)(v))) +#define fxdr_unsigned(t, v) ((t)ntohl((uint32_t)(v))) +#define txdr_unsigned(v) (htonl((uint32_t)(v))) -#define fxdr_hyper(f, t) { \ +#define fxdr_hyper(f, t) { \ ((uint32_t *)(t))[_QUAD_HIGHWORD] = ntohl(((uint32_t *)(f))[0]); \ ((uint32_t *)(t))[_QUAD_LOWWORD] = ntohl(((uint32_t *)(f))[1]); \ } -#define txdr_hyper(f, t) { \ +#define txdr_hyper(f, t) { \ ((uint32_t *)(t))[0] = htonl(((uint32_t *)(f))[_QUAD_HIGHWORD]); \ ((uint32_t *)(t))[1] = htonl(((uint32_t *)(f))[_QUAD_LOWWORD]); \ } @@ -105,23 +105,23 @@ typedef enum xdrbuf_type { XDRBUF_BUFFER=1 } xdrbuf_type; struct xdrbuf { union { struct { - char * xbb_base; /* base address of buffer */ - uint32_t xbb_size; /* size of buffer */ - uint32_t xbb_len; /* length of data in buffer */ + char * xbb_base; /* base address of buffer */ + uint32_t xbb_size; /* size of buffer */ + uint32_t xbb_len; /* length of data in buffer */ } xb_buffer; } xb_u; - char * xb_ptr; /* pointer to current position */ - size_t xb_left; /* bytes remaining in current buffer */ - size_t xb_growsize; /* bytes to allocate when growing */ - xdrbuf_type xb_type; /* type of xdr buffer */ - uint32_t xb_flags; /* XB_* (see below) */ + char * xb_ptr; /* pointer to current position */ + size_t xb_left; /* bytes remaining in current buffer */ + size_t xb_growsize; /* bytes to allocate when growing */ + xdrbuf_type xb_type; /* type of xdr buffer */ + uint32_t xb_flags; /* XB_* (see below) */ }; -#define XB_CLEANUP 0x0001 /* needs cleanup */ +#define XB_CLEANUP 0x0001 /* needs cleanup */ -#define XDRWORD 4 /* the basic XDR building block is a 4 byte (32 bit) word */ -#define xdr_rndup(a) (((a)+3)&(~0x3)) /* round up to XDRWORD size */ -#define xdr_pad(a) (xdr_rndup(a) - (a)) /* calculate round up padding */ +#define XDRWORD 4 /* the basic XDR building block is a 4 byte (32 bit) word */ +#define xdr_rndup(a) (((a)+3)&(~0x3)) /* round up to XDRWORD size */ +#define xdr_pad(a) (xdr_rndup(a) - (a)) /* calculate round up padding */ void xb_init(struct xdrbuf *, xdrbuf_type); void xb_init_buffer(struct xdrbuf *, char *, size_t); @@ -163,8 +163,9 @@ xb_init_buffer(struct xdrbuf *xbp, char *buf, size_t buflen) xbp->xb_growsize = 512; xbp->xb_ptr = buf; xbp->xb_left = buflen; - if (buf) /* when using an existing buffer, xb code should skip cleanup */ + if (buf) { /* when using an existing buffer, xb code should skip cleanup */ xbp->xb_flags &= ~XB_CLEANUP; + } } /* @@ -173,7 +174,7 @@ xb_init_buffer(struct xdrbuf *xbp, char *buf, size_t buflen) char * xb_buffer_base(struct xdrbuf *xbp) { - return (xbp->xb_u.xb_buffer.xbb_base); + return xbp->xb_u.xb_buffer.xbb_base; } /* @@ -182,12 +183,14 @@ xb_buffer_base(struct xdrbuf *xbp) void xb_cleanup(struct xdrbuf *xbp) { - if (!(xbp->xb_flags & XB_CLEANUP)) + if (!(xbp->xb_flags & XB_CLEANUP)) { return; + } switch (xbp->xb_type) { case XDRBUF_BUFFER: - if (xbp->xb_u.xb_buffer.xbb_base) + if (xbp->xb_u.xb_buffer.xbb_base) { xb_free(xbp->xb_u.xb_buffer.xbb_base); + } break; } xbp->xb_flags &= ~XB_CLEANUP; @@ -216,8 +219,9 @@ xb_advance(struct xdrbuf *xbp, uint32_t len) uint32_t tlen; while (len) { - if (xbp->xb_left <= 0) - return (EBADRPC); + if (xbp->xb_left <= 0) { + return EBADRPC; + } tlen = MIN(xbp->xb_left, len); if (tlen) { xbp->xb_ptr += tlen; @@ -225,7 +229,7 @@ xb_advance(struct xdrbuf *xbp, uint32_t len) len -= tlen; } } - return (0); + return 0; } /* @@ -242,7 +246,7 @@ xb_offset(struct xdrbuf *xbp) break; } - return (offset); + return offset; } /* @@ -251,7 +255,6 @@ xb_offset(struct xdrbuf *xbp) int xb_seek(struct xdrbuf *xbp, uint32_t offset) { - switch (xbp->xb_type) { case XDRBUF_BUFFER: xbp->xb_ptr = xbp->xb_u.xb_buffer.xbb_base + offset; @@ -259,7 +262,7 @@ xb_seek(struct xdrbuf *xbp, uint32_t offset) break; } - return (0); + return 0; } /* @@ -275,7 +278,7 @@ xb_malloc(size_t size) #else buf = malloc(size); #endif - return (buf); + return buf; } /* * free a chunk of memory allocated with xb_malloc() @@ -304,11 +307,13 @@ xb_grow(struct xdrbuf *xbp) oldsize = xbp->xb_u.xb_buffer.xbb_size; oldbuf = xbp->xb_u.xb_buffer.xbb_base; newsize = oldsize + xbp->xb_growsize; - if (newsize < oldsize) - return (ENOMEM); + if (newsize < oldsize) { + return ENOMEM; + } newbuf = xb_malloc(newsize); - if (newbuf == NULL) - return (ENOMEM); + if (newbuf == NULL) { + return ENOMEM; + } if (oldbuf != NULL) { bcopy(oldbuf, newbuf, oldsize); xb_free(oldbuf); @@ -320,7 +325,7 @@ xb_grow(struct xdrbuf *xbp) break; } - return (0); + return 0; } /* @@ -340,16 +345,19 @@ xb_add_bytes(struct xdrbuf *xbp, const char *buf, uint32_t count, int nopad) while (len) { if (xbp->xb_left <= 0) { /* need more space */ - if ((error = xb_grow(xbp))) - return (error); - if (xbp->xb_left <= 0) - return (ENOMEM); + if ((error = xb_grow(xbp))) { + return error; + } + if (xbp->xb_left <= 0) { + return ENOMEM; + } } tlen = MIN(xbp->xb_left, len); if (tlen) { if (count) { - if (tlen > count) + if (tlen > count) { tlen = count; + } bcopy(buf, xbp->xb_ptr, tlen); } else { bzero(xbp->xb_ptr, tlen); @@ -363,7 +371,7 @@ xb_add_bytes(struct xdrbuf *xbp, const char *buf, uint32_t count, int nopad) } } } - return (0); + return 0; } /* @@ -380,13 +388,15 @@ xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) /* copy in "count" bytes and zero out any pad bytes */ while (len) { - if (xbp->xb_left <= 0) - return (ENOMEM); + if (xbp->xb_left <= 0) { + return ENOMEM; + } tlen = MIN(xbp->xb_left, len); if (tlen) { if (count) { - if (tlen > count) + if (tlen > count) { tlen = count; + } bcopy(xbp->xb_ptr, buf, tlen); } xbp->xb_ptr += tlen; @@ -398,7 +408,7 @@ xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) } } } - return (0); + return 0; } #endif /* _NFS_XDR_SUBS_FUNCS_ */ @@ -411,53 +421,53 @@ xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) /* finalize the data that has been added to the buffer */ #define xb_build_done(E, XB) \ do { \ - if (E) break; \ - xb_set_cur_buf_len(XB); \ + if (E) break; \ + xb_set_cur_buf_len(XB); \ } while (0) /* add a 32-bit value */ #define xb_add_32(E, XB, VAL) \ do { \ - uint32_t __tmp; \ - if (E) break; \ - __tmp = txdr_unsigned(VAL); \ - (E) = xb_add_bytes((XB), (void*)&__tmp, XDRWORD, 0); \ + uint32_t __tmp; \ + if (E) break; \ + __tmp = txdr_unsigned(VAL); \ + (E) = xb_add_bytes((XB), (void*)&__tmp, XDRWORD, 0); \ } while (0) /* add a 64-bit value */ #define xb_add_64(E, XB, VAL) \ do { \ - uint64_t __tmp1, __tmp2; \ - if (E) break; \ - __tmp1 = (VAL); \ - txdr_hyper(&__tmp1, &__tmp2); \ - (E) = xb_add_bytes((XB), (char*)&__tmp2, 2 * XDRWORD, 0); \ + uint64_t __tmp1, __tmp2; \ + if (E) break; \ + __tmp1 = (VAL); \ + txdr_hyper(&__tmp1, &__tmp2); \ + (E) = xb_add_bytes((XB), (char*)&__tmp2, 2 * XDRWORD, 0); \ } while (0) /* add an array of XDR words */ #define xb_add_word_array(E, XB, A, LEN) \ do { \ - uint32_t __i; \ - xb_add_32((E), (XB), (LEN)); \ - for (__i=0; __i < (uint32_t)(LEN); __i++) \ - xb_add_32((E), (XB), (A)[__i]); \ + uint32_t __i; \ + xb_add_32((E), (XB), (LEN)); \ + for (__i=0; __i < (uint32_t)(LEN); __i++) \ + xb_add_32((E), (XB), (A)[__i]); \ } while (0) -#define xb_add_bitmap(E, XB, B, LEN) xb_add_word_array((E), (XB), (B), (LEN)) +#define xb_add_bitmap(E, XB, B, LEN) xb_add_word_array((E), (XB), (B), (LEN)) /* add a file handle */ #define xb_add_fh(E, XB, FHP, FHLEN) \ do { \ - xb_add_32((E), (XB), (FHLEN)); \ - if (E) break; \ - (E) = xb_add_bytes((XB), (char*)(FHP), (FHLEN), 0); \ + xb_add_32((E), (XB), (FHLEN)); \ + if (E) break; \ + (E) = xb_add_bytes((XB), (char*)(FHP), (FHLEN), 0); \ } while (0) /* add a string */ #define xb_add_string(E, XB, S, LEN) \ do { \ - xb_add_32((E), (XB), (LEN)); \ - if (E) break; \ - (E) = xb_add_bytes((XB), (const char*)(S), (LEN), 0); \ + xb_add_32((E), (XB), (LEN)); \ + if (E) break; \ + (E) = xb_add_bytes((XB), (const char*)(S), (LEN), 0); \ } while (0) @@ -468,46 +478,46 @@ xb_get_bytes(struct xdrbuf *xbp, char *buf, uint32_t count, int nopad) /* skip past data in the buffer */ #define xb_skip(E, XB, LEN) \ do { \ - if (E) break; \ - (E) = xb_advance((XB), (LEN)); \ + if (E) break; \ + (E) = xb_advance((XB), (LEN)); \ } while (0) /* get a 32-bit value */ #define xb_get_32(E, XB, LVAL) \ do { \ - uint32_t __tmp; \ - if (E) break; \ - (E) = xb_get_bytes((XB), (char*)&__tmp, XDRWORD, 0); \ - if (E) break; \ - (LVAL) = fxdr_unsigned(uint32_t, __tmp); \ + uint32_t __tmp; \ + if (E) break; \ + (E) = xb_get_bytes((XB), (char*)&__tmp, XDRWORD, 0); \ + if (E) break; \ + (LVAL) = fxdr_unsigned(uint32_t, __tmp); \ } while (0) /* get a 64-bit value */ #define xb_get_64(E, XB, LVAL) \ do { \ - uint64_t __tmp; \ - if (E) break; \ - (E) = xb_get_bytes((XB), (char*)&__tmp, 2 * XDRWORD, 0); \ - if (E) break; \ - fxdr_hyper(&__tmp, &(LVAL)); \ + uint64_t __tmp; \ + if (E) break; \ + (E) = xb_get_bytes((XB), (char*)&__tmp, 2 * XDRWORD, 0); \ + if (E) break; \ + fxdr_hyper(&__tmp, &(LVAL)); \ } while (0) /* get an array of XDR words (of a given expected/maximum length) */ #define xb_get_word_array(E, XB, A, LEN) \ do { \ - uint32_t __len = 0, __i; \ - xb_get_32((E), (XB), __len); \ - if (E) break; \ - for (__i=0; __i < MIN(__len, (uint32_t)(LEN)); __i++) \ - xb_get_32((E), (XB), (A)[__i]); \ - if (E) break; \ - for (; __i < __len; __i++) \ - xb_skip((E), (XB), XDRWORD); \ - for (; __i < (uint32_t)(LEN); __i++) \ - (A)[__i] = 0; \ - (LEN) = __len; \ + uint32_t __len = 0, __i; \ + xb_get_32((E), (XB), __len); \ + if (E) break; \ + for (__i=0; __i < MIN(__len, (uint32_t)(LEN)); __i++) \ + xb_get_32((E), (XB), (A)[__i]); \ + if (E) break; \ + for (; __i < __len; __i++) \ + xb_skip((E), (XB), XDRWORD); \ + for (; __i < (uint32_t)(LEN); __i++) \ + (A)[__i] = 0; \ + (LEN) = __len; \ } while (0) -#define xb_get_bitmap(E, XB, B, LEN) xb_get_word_array((E), (XB), (B), (LEN)) +#define xb_get_bitmap(E, XB, B, LEN) xb_get_word_array((E), (XB), (B), (LEN)) #endif /* __APPLE_API_PRIVATE */ #endif /* _NFS_XDR_SUBS_H_ */ diff --git a/bsd/pgo/profile_runtime.c b/bsd/pgo/profile_runtime.c index 4c115151b..8a0def8f0 100644 --- a/bsd/pgo/profile_runtime.c +++ b/bsd/pgo/profile_runtime.c @@ -44,18 +44,18 @@ uint64_t __llvm_profile_get_size_for_buffer(void); int __llvm_profile_write_buffer(char *Buffer); uint64_t __llvm_profile_get_size_for_buffer_internal(const char *DataBegin, - const char *DataEnd, - const char *CountersBegin, - const char *CountersEnd , - const char *NamesBegin, - const char *NamesEnd); + const char *DataEnd, + const char *CountersBegin, + const char *CountersEnd, + const char *NamesBegin, + const char *NamesEnd); int __llvm_profile_write_buffer_internal(char *Buffer, - const char *DataBegin, - const char *DataEnd, - const char *CountersBegin, - const char *CountersEnd , - const char *NamesBegin, - const char *NamesEnd); + const char *DataBegin, + const char *DataEnd, + const char *CountersBegin, + const char *CountersEnd, + const char *NamesBegin, + const char *NamesEnd); extern char __pgo_hib_DataStart __asm("section$start$__HIB$__llvm_prf_data"); extern char __pgo_hib_DataEnd __asm("section$end$__HIB$__llvm_prf_data"); @@ -65,30 +65,32 @@ extern char __pgo_hib_CountersStart __asm("section$start$__HIB$__llvm_prf_cnts") extern char __pgo_hib_CountersEnd __asm("section$end$__HIB$__llvm_prf_cnts"); -static uint64_t get_size_for_buffer(int flags) +static uint64_t +get_size_for_buffer(int flags) { - if (flags & PGO_HIB) { - return __llvm_profile_get_size_for_buffer_internal( - &__pgo_hib_DataStart, &__pgo_hib_DataEnd, - &__pgo_hib_CountersStart, &__pgo_hib_CountersEnd, - &__pgo_hib_NamesStart, &__pgo_hib_NamesEnd); - } else { - return __llvm_profile_get_size_for_buffer(); - } + if (flags & PGO_HIB) { + return __llvm_profile_get_size_for_buffer_internal( + &__pgo_hib_DataStart, &__pgo_hib_DataEnd, + &__pgo_hib_CountersStart, &__pgo_hib_CountersEnd, + &__pgo_hib_NamesStart, &__pgo_hib_NamesEnd); + } else { + return __llvm_profile_get_size_for_buffer(); + } } -static int write_buffer(int flags, char *buffer) +static int +write_buffer(int flags, char *buffer) { - if (flags & PGO_HIB) { - return __llvm_profile_write_buffer_internal( - buffer, - &__pgo_hib_DataStart, &__pgo_hib_DataEnd, - &__pgo_hib_CountersStart, &__pgo_hib_CountersEnd, - &__pgo_hib_NamesStart, &__pgo_hib_NamesEnd); - } else { - return __llvm_profile_write_buffer(buffer); - } + if (flags & PGO_HIB) { + return __llvm_profile_write_buffer_internal( + buffer, + &__pgo_hib_DataStart, &__pgo_hib_DataEnd, + &__pgo_hib_CountersStart, &__pgo_hib_CountersEnd, + &__pgo_hib_NamesStart, &__pgo_hib_NamesEnd); + } else { + return __llvm_profile_write_buffer(buffer); + } } @@ -99,40 +101,41 @@ static int write_buffer(int flags, char *buffer) int kdp_pgo_reset_counters = 0; /* called in debugger context */ -kern_return_t do_pgo_reset_counters() +kern_return_t +do_pgo_reset_counters() { #ifdef PROFILE - memset(&__pgo_hib_CountersStart, 0, - ((uintptr_t)(&__pgo_hib_CountersEnd)) - ((uintptr_t)(&__pgo_hib_CountersStart))); + memset(&__pgo_hib_CountersStart, 0, + ((uintptr_t)(&__pgo_hib_CountersEnd)) - ((uintptr_t)(&__pgo_hib_CountersStart))); #endif - OSKextResetPgoCounters(); - kdp_pgo_reset_counters = 0; - return KERN_SUCCESS; + OSKextResetPgoCounters(); + kdp_pgo_reset_counters = 0; + return KERN_SUCCESS; } static kern_return_t kextpgo_trap() { - return DebuggerTrapWithState(DBOP_RESET_PGO_COUNTERS, NULL, NULL, NULL, 0, NULL, FALSE, 0); + return DebuggerTrapWithState(DBOP_RESET_PGO_COUNTERS, NULL, NULL, NULL, 0, NULL, FALSE, 0); } static kern_return_t pgo_reset_counters() { - kern_return_t r; - boolean_t istate; + kern_return_t r; + boolean_t istate; - OSKextResetPgoCountersLock(); + OSKextResetPgoCountersLock(); - istate = ml_set_interrupts_enabled(FALSE); + istate = ml_set_interrupts_enabled(FALSE); - kdp_pgo_reset_counters = 1; - r = kextpgo_trap(); + kdp_pgo_reset_counters = 1; + r = kextpgo_trap(); - ml_set_interrupts_enabled(istate); + ml_set_interrupts_enabled(istate); - OSKextResetPgoCountersUnlock(); - return r; + OSKextResetPgoCountersUnlock(); + return r; } @@ -147,211 +150,203 @@ pgo_reset_counters() * EIO llvm returned an error. shouldn't ever happen. */ -int grab_pgo_data(struct proc *p, - struct grab_pgo_data_args *uap, - register_t *retval) +int +grab_pgo_data(struct proc *p, + struct grab_pgo_data_args *uap, + register_t *retval) { - char *buffer = NULL; - int err = 0; + char *buffer = NULL; + int err = 0; - (void) p; + (void) p; - if (!kauth_cred_issuser(kauth_cred_get())) { - err = EPERM; - goto out; - } + if (!kauth_cred_issuser(kauth_cred_get())) { + err = EPERM; + goto out; + } #if CONFIG_MACF - err = mac_system_check_info(kauth_cred_get(), "kern.profiling_data"); - if (err) { - goto out; - } + err = mac_system_check_info(kauth_cred_get(), "kern.profiling_data"); + if (err) { + goto out; + } #endif - if ( uap->flags & ~PGO_ALL_FLAGS || - uap->size < 0 || - (uap->size > 0 && uap->buffer == 0)) - { - err = EINVAL; - goto out; - } - - if ( uap->flags & PGO_RESET_ALL ) { - if (uap->flags != PGO_RESET_ALL || uap->uuid || uap->buffer || uap->size ) { - err = EINVAL; - } else { - kern_return_t r = pgo_reset_counters(); - switch (r) { - case KERN_SUCCESS: - err = 0; - break; - case KERN_OPERATION_TIMED_OUT: - err = ETIMEDOUT; - break; - default: - err = EIO; - break; - } - } - goto out; - } - - *retval = 0; - - if (uap->uuid) { - uuid_t uuid; - err = copyin(uap->uuid, &uuid, sizeof(uuid)); - if (err) { - goto out; - } - - if (uap->buffer == 0 && uap->size == 0) { - uint64_t size64; - - if (uap->flags & PGO_WAIT_FOR_UNLOAD) { - err = EINVAL; - goto out; - } - - err = OSKextGrabPgoData(uuid, &size64, NULL, 0, 0, !!(uap->flags & PGO_METADATA)); - if (size64 == 0 && err == 0) { - err = EIO; - } - if (err) { - goto out; - } - - ssize_t size = size64; - if ( ((uint64_t) size) != size64 || - size < 0 ) - { - err = ERANGE; - goto out; - } - - *retval = size; - err = 0; - goto out; - - } else if (!uap->buffer || uap->size <= 0) { - - err = EINVAL; - goto out; - - } else { - - uint64_t size64 = 0 ; - - err = OSKextGrabPgoData(uuid, &size64, NULL, 0, - false, - !!(uap->flags & PGO_METADATA)); - - if (size64 == 0 && err == 0) { - err = EIO; - } - if (err) { - goto out; - } - - if (uap->size < 0 || (uint64_t)uap->size < size64) { - err = EINVAL; - goto out; - } - - MALLOC(buffer, char *, size64, M_TEMP, M_WAITOK | M_ZERO); - if (!buffer) { - err = ENOMEM; - goto out; - } - - err = OSKextGrabPgoData(uuid, &size64, buffer, size64, - !!(uap->flags & PGO_WAIT_FOR_UNLOAD), - !!(uap->flags & PGO_METADATA)); - if (err) { - goto out; - } - - ssize_t size = size64; - if ( ((uint64_t) size) != size64 || - size < 0 ) - { - err = ERANGE; - goto out; - } - - err = copyout(buffer, uap->buffer, size); - if (err) { - goto out; - } - - *retval = size; - goto out; - } - } + if (uap->flags & ~PGO_ALL_FLAGS || + uap->size < 0 || + (uap->size > 0 && uap->buffer == 0)) { + err = EINVAL; + goto out; + } + + if (uap->flags & PGO_RESET_ALL) { + if (uap->flags != PGO_RESET_ALL || uap->uuid || uap->buffer || uap->size) { + err = EINVAL; + } else { + kern_return_t r = pgo_reset_counters(); + switch (r) { + case KERN_SUCCESS: + err = 0; + break; + case KERN_OPERATION_TIMED_OUT: + err = ETIMEDOUT; + break; + default: + err = EIO; + break; + } + } + goto out; + } + + *retval = 0; + + if (uap->uuid) { + uuid_t uuid; + err = copyin(uap->uuid, &uuid, sizeof(uuid)); + if (err) { + goto out; + } + + if (uap->buffer == 0 && uap->size == 0) { + uint64_t size64; + + if (uap->flags & PGO_WAIT_FOR_UNLOAD) { + err = EINVAL; + goto out; + } + + err = OSKextGrabPgoData(uuid, &size64, NULL, 0, 0, !!(uap->flags & PGO_METADATA)); + if (size64 == 0 && err == 0) { + err = EIO; + } + if (err) { + goto out; + } + + ssize_t size = size64; + if (((uint64_t) size) != size64 || + size < 0) { + err = ERANGE; + goto out; + } + + *retval = size; + err = 0; + goto out; + } else if (!uap->buffer || uap->size <= 0) { + err = EINVAL; + goto out; + } else { + uint64_t size64 = 0; + + err = OSKextGrabPgoData(uuid, &size64, NULL, 0, + false, + !!(uap->flags & PGO_METADATA)); + + if (size64 == 0 && err == 0) { + err = EIO; + } + if (err) { + goto out; + } + + if (uap->size < 0 || (uint64_t)uap->size < size64) { + err = EINVAL; + goto out; + } + + MALLOC(buffer, char *, size64, M_TEMP, M_WAITOK | M_ZERO); + if (!buffer) { + err = ENOMEM; + goto out; + } + + err = OSKextGrabPgoData(uuid, &size64, buffer, size64, + !!(uap->flags & PGO_WAIT_FOR_UNLOAD), + !!(uap->flags & PGO_METADATA)); + if (err) { + goto out; + } + + ssize_t size = size64; + if (((uint64_t) size) != size64 || + size < 0) { + err = ERANGE; + goto out; + } + + err = copyout(buffer, uap->buffer, size); + if (err) { + goto out; + } + + *retval = size; + goto out; + } + } #ifdef PROFILE - uint64_t size64 = get_size_for_buffer(uap->flags); - ssize_t size = size64; - - if (uap->flags & (PGO_WAIT_FOR_UNLOAD | PGO_METADATA)) { - err = EINVAL; - goto out; - } - - if ( ((uint64_t) size) != size64 || - size < 0 ) - { - err = ERANGE; - goto out; - } - - - if (uap->buffer == 0 && uap->size == 0) { - *retval = size; - err = 0; - goto out; - } else if (uap->size < size) { - err = EINVAL; - goto out; - } else { - MALLOC(buffer, char *, size, M_TEMP, M_WAITOK | M_ZERO); - if (!buffer) { - err = ENOMEM; - goto out; - } - - err = write_buffer(uap->flags, buffer); - if (err) - { - err = EIO; - goto out; - } - - err = copyout(buffer, uap->buffer, size); - if (err) { - goto out; - } - - *retval = size; - goto out; - } + uint64_t size64 = get_size_for_buffer(uap->flags); + ssize_t size = size64; + + if (uap->flags & (PGO_WAIT_FOR_UNLOAD | PGO_METADATA)) { + err = EINVAL; + goto out; + } + + if (((uint64_t) size) != size64 || + size < 0) { + err = ERANGE; + goto out; + } + + + if (uap->buffer == 0 && uap->size == 0) { + *retval = size; + err = 0; + goto out; + } else if (uap->size < size) { + err = EINVAL; + goto out; + } else { + MALLOC(buffer, char *, size, M_TEMP, M_WAITOK | M_ZERO); + if (!buffer) { + err = ENOMEM; + goto out; + } + + err = write_buffer(uap->flags, buffer); + if (err) { + err = EIO; + goto out; + } + + err = copyout(buffer, uap->buffer, size); + if (err) { + goto out; + } + + *retval = size; + goto out; + } #else - *retval = -1; - err = ENOSYS; - goto out; + *retval = -1; + err = ENOSYS; + goto out; #endif out: - if (buffer) { - FREE(buffer, M_TEMP); - } - if (err) { - *retval = -1; - } - return err; + if (buffer) { + FREE(buffer, M_TEMP); + } + if (err) { + *retval = -1; + } + return err; } diff --git a/bsd/pthread/bsdthread_private.h b/bsd/pthread/bsdthread_private.h index af854feb5..add1853ba 100644 --- a/bsd/pthread/bsdthread_private.h +++ b/bsd/pthread/bsdthread_private.h @@ -37,25 +37,25 @@ /* pthread bsdthread_ctl sysctl commands */ /* bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, thread_port, tsd_entry_addr, 0) */ -#define BSDTHREAD_CTL_SET_QOS 0x10 +#define BSDTHREAD_CTL_SET_QOS 0x10 /* bsdthread_ctl(BSDTHREAD_CTL_GET_QOS, thread_port, 0, 0) */ -#define BSDTHREAD_CTL_GET_QOS 0x20 +#define BSDTHREAD_CTL_GET_QOS 0x20 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread_port, priority, 0) */ -#define BSDTHREAD_CTL_QOS_OVERRIDE_START 0x40 +#define BSDTHREAD_CTL_QOS_OVERRIDE_START 0x40 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread_port, 0, 0) */ -#define BSDTHREAD_CTL_QOS_OVERRIDE_END 0x80 +#define BSDTHREAD_CTL_QOS_OVERRIDE_END 0x80 /* bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, flags) */ -#define BSDTHREAD_CTL_SET_SELF 0x100 +#define BSDTHREAD_CTL_SET_SELF 0x100 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_RESET, 0, 0, 0) */ -#define BSDTHREAD_CTL_QOS_OVERRIDE_RESET 0x200 +#define BSDTHREAD_CTL_QOS_OVERRIDE_RESET 0x200 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread_port, priority, 0) */ -#define BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH 0x400 +#define BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH 0x400 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread_port, priority, resource) */ -#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD 0x401 +#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD 0x401 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, 0|1 (?reset_all), resource, 0) */ -#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET 0x402 +#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET 0x402 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_MAX_PARALLELISM, priority, flags, 0) */ -#define BSDTHREAD_CTL_QOS_MAX_PARALLELISM 0x800 +#define BSDTHREAD_CTL_QOS_MAX_PARALLELISM 0x800 #define _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL 0x1 #define _PTHREAD_QOS_PARALLELISM_REALTIME 0x2 diff --git a/bsd/pthread/priority_private.h b/bsd/pthread/priority_private.h index 5d20e08b3..b73c0ad95 100644 --- a/bsd/pthread/priority_private.h +++ b/bsd/pthread/priority_private.h @@ -93,34 +93,34 @@ */ typedef unsigned long pthread_priority_t; -#define _PTHREAD_PRIORITY_FLAGS_MASK 0xff000000 -#define _PTHREAD_PRIORITY_FLAGS_SHIFT (24ull) - -#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 -#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 /* dispatch only */ -#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 /* dispatch only */ -#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 -#define _PTHREAD_PRIORITY_SCHED_PRI_MASK 0x0000ffff -#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 /* dispatch only */ -#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 /* unused */ -#define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 -#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 -#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 -#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG _PTHREAD_PRIORITY_FALLBACK_FLAG // compat - -#define _PTHREAD_PRIORITY_ENCODING_MASK 0x00a00000 -#define _PTHREAD_PRIORITY_ENCODING_SHIFT (22ull) -#define _PTHREAD_PRIORITY_ENCODING_V0 0x00000000 -#define _PTHREAD_PRIORITY_ENCODING_V1 0x00400000 /* unused */ -#define _PTHREAD_PRIORITY_ENCODING_V2 0x00800000 /* unused */ -#define _PTHREAD_PRIORITY_ENCODING_V3 0x00a00000 /* unused */ - -#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00 -#define _PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK 0x00003f00 -#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) - -#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff -#define _PTHREAD_PRIORITY_PRIORITY_SHIFT (0) +#define _PTHREAD_PRIORITY_FLAGS_MASK 0xff000000 +#define _PTHREAD_PRIORITY_FLAGS_SHIFT (24ull) + +#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000 +#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000 /* dispatch only */ +#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000 /* dispatch only */ +#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000 +#define _PTHREAD_PRIORITY_SCHED_PRI_MASK 0x0000ffff +#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000 /* dispatch only */ +#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000 /* unused */ +#define _PTHREAD_PRIORITY_FALLBACK_FLAG 0x04000000 +#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000 +#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000 +#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG _PTHREAD_PRIORITY_FALLBACK_FLAG // compat + +#define _PTHREAD_PRIORITY_ENCODING_MASK 0x00a00000 +#define _PTHREAD_PRIORITY_ENCODING_SHIFT (22ull) +#define _PTHREAD_PRIORITY_ENCODING_V0 0x00000000 +#define _PTHREAD_PRIORITY_ENCODING_V1 0x00400000 /* unused */ +#define _PTHREAD_PRIORITY_ENCODING_V2 0x00800000 /* unused */ +#define _PTHREAD_PRIORITY_ENCODING_V3 0x00a00000 /* unused */ + +#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00 +#define _PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK 0x00003f00 +#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull) + +#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff +#define _PTHREAD_PRIORITY_PRIORITY_SHIFT (0) #if PRIVATE #if XNU_KERNEL_PRIVATE && !defined(__PTHREAD_EXPOSE_INTERNALS__) @@ -139,16 +139,16 @@ static inline bool _pthread_priority_has_qos(pthread_priority_t pp) { return (pp & (_PTHREAD_PRIORITY_SCHED_PRI_FLAG | - _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) == 0 && - (pp & (_PTHREAD_PRIORITY_QOS_CLASS_MASK & - ~_PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK)) == 0 && - (pp & _PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK) != 0; + _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) == 0 && + (pp & (_PTHREAD_PRIORITY_QOS_CLASS_MASK & + ~_PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK)) == 0 && + (pp & _PTHREAD_PRIORITY_VALID_QOS_CLASS_MASK) != 0; } __attribute__((always_inline, const)) static inline pthread_priority_t _pthread_priority_make_from_thread_qos(thread_qos_t qos, int relpri, - unsigned long flags) + unsigned long flags) { pthread_priority_t pp = (flags & _PTHREAD_PRIORITY_FLAGS_MASK); if (qos && qos < THREAD_QOS_LAST) { diff --git a/bsd/pthread/pthread_priority.c b/bsd/pthread/pthread_priority.c index 53cda953a..05fef52c6 100644 --- a/bsd/pthread/pthread_priority.c +++ b/bsd/pthread/pthread_priority.c @@ -45,9 +45,9 @@ _pthread_priority_normalize(pthread_priority_t pp) pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; } return pp & (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG | - _PTHREAD_PRIORITY_FALLBACK_FLAG | - _PTHREAD_PRIORITY_QOS_CLASS_MASK | - _PTHREAD_PRIORITY_PRIORITY_MASK); + _PTHREAD_PRIORITY_FALLBACK_FLAG | + _PTHREAD_PRIORITY_QOS_CLASS_MASK | + _PTHREAD_PRIORITY_PRIORITY_MASK); } return _pthread_unspecified_priority(); } @@ -61,7 +61,7 @@ _pthread_priority_normalize_for_ipc(pthread_priority_t pp) pp |= _PTHREAD_PRIORITY_PRIORITY_MASK; } return pp & (_PTHREAD_PRIORITY_QOS_CLASS_MASK | - _PTHREAD_PRIORITY_PRIORITY_MASK); + _PTHREAD_PRIORITY_PRIORITY_MASK); } return _pthread_unspecified_priority(); } @@ -82,5 +82,5 @@ _pthread_priority_combine(pthread_priority_t base_pp, thread_qos_t qos) } return _pthread_priority_make_from_thread_qos(qos, 0, - base_pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); + base_pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG); } diff --git a/bsd/pthread/pthread_shims.c b/bsd/pthread/pthread_shims.c index b23487ec0..2f3aadbf3 100644 --- a/bsd/pthread/pthread_shims.c +++ b/bsd/pthread/pthread_shims.c @@ -76,11 +76,11 @@ extern void thread_deallocate_safe(thread_t thread); #define PTHREAD_STRUCT_ACCESSOR(get, set, rettype, structtype, member) \ static rettype \ get(structtype x) { \ - return (x)->member; \ + return (x)->member; \ } \ static void \ set(structtype x, rettype y) { \ - (x)->member = y; \ + (x)->member = y; \ } PTHREAD_STRUCT_ACCESSOR(proc_get_threadstart, proc_set_threadstart, user_addr_t, struct proc*, p_threadstart); @@ -132,17 +132,20 @@ pthread_bootstrap_return(void) } static uint32_t -get_task_threadmax(void) { +get_task_threadmax(void) +{ return task_threadmax; } static uint64_t -proc_get_register(struct proc *p) { - return (p->p_lflag & P_LREGISTER); +proc_get_register(struct proc *p) +{ + return p->p_lflag & P_LREGISTER; } static void -proc_set_register(struct proc *p) { +proc_set_register(struct proc *p) +{ proc_setregister(p); } @@ -170,10 +173,11 @@ qos_main_thread_active(void) return TRUE; } -static int proc_usynch_get_requested_thread_qos(struct uthread *uth) +static int +proc_usynch_get_requested_thread_qos(struct uthread *uth) { - thread_t thread = uth ? uth->uu_thread : current_thread(); - int requested_qos; + thread_t thread = uth ? uth->uu_thread : current_thread(); + int requested_qos; requested_qos = proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS); @@ -192,49 +196,49 @@ static int proc_usynch_get_requested_thread_qos(struct uthread *uth) static boolean_t proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth, - uint64_t tid, int override_qos, boolean_t first_override_for_resource, - user_addr_t resource, int resource_type) + uint64_t tid, int override_qos, boolean_t first_override_for_resource, + user_addr_t resource, int resource_type) { thread_t thread = uth ? uth->uu_thread : THREAD_NULL; return proc_thread_qos_add_override(task, thread, tid, override_qos, - first_override_for_resource, resource, resource_type) == 0; + first_override_for_resource, resource, resource_type) == 0; } static boolean_t proc_usynch_thread_qos_remove_override_for_resource(task_t task, - struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type) + struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type) { thread_t thread = uth ? uth->uu_thread : THREAD_NULL; return proc_thread_qos_remove_override(task, thread, tid, resource, - resource_type) == 0; + resource_type) == 0; } static wait_result_t psynch_wait_prepare(uintptr_t kwq, struct turnstile **tstore, - thread_t owner, block_hint_t block_hint, uint64_t deadline) + thread_t owner, block_hint_t block_hint, uint64_t deadline) { struct turnstile *ts; wait_result_t wr; if (tstore) { ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL, - TURNSTILE_PTHREAD_MUTEX); + TURNSTILE_PTHREAD_MUTEX); turnstile_update_inheritor(ts, owner, - (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); + (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); thread_set_pending_block_hint(current_thread(), block_hint); wr = waitq_assert_wait64_leeway(&ts->ts_waitq, (event64_t)kwq, - THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); + THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); } else { thread_set_pending_block_hint(current_thread(), block_hint); wr = assert_wait_deadline_with_leeway((event_t)kwq, THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); + TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); } return wr; @@ -256,15 +260,15 @@ psynch_wait_complete(uintptr_t kwq, struct turnstile **tstore) static void psynch_wait_update_owner(uintptr_t kwq, thread_t owner, - struct turnstile **tstore) + struct turnstile **tstore) { struct turnstile *ts; ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL, - TURNSTILE_PTHREAD_MUTEX); + TURNSTILE_PTHREAD_MUTEX); turnstile_update_inheritor(ts, owner, - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); turnstile_complete(kwq, tstore, NULL); } @@ -277,7 +281,7 @@ psynch_wait_cleanup(void) static kern_return_t psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe, - struct turnstile **tstore) + struct turnstile **tstore) { struct uthread *uth; struct turnstile *ts; @@ -288,12 +292,12 @@ psynch_wait_wakeup(uintptr_t kwq, struct ksyn_waitq_element *kwe, if (tstore) { ts = turnstile_prepare(kwq, tstore, TURNSTILE_NULL, - TURNSTILE_PTHREAD_MUTEX); + TURNSTILE_PTHREAD_MUTEX); turnstile_update_inheritor(ts, uth->uu_thread, - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); kr = waitq_wakeup64_thread(&ts->ts_waitq, (event64_t)kwq, - uth->uu_thread, THREAD_AWAKENED); + uth->uu_thread, THREAD_AWAKENED); turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); turnstile_complete(kwq, tstore, NULL); @@ -339,20 +343,20 @@ bsdthread_register(struct proc *p, struct bsdthread_register_args *uap, __unused { kern_return_t kr; static_assert(offsetof(struct bsdthread_register_args, threadstart) + sizeof(user_addr_t) == - offsetof(struct bsdthread_register_args, wqthread)); + offsetof(struct bsdthread_register_args, wqthread)); kr = machine_thread_function_pointers_convert_from_user(current_thread(), &uap->threadstart, 2); assert(kr == KERN_SUCCESS); if (pthread_functions->version >= 1) { return pthread_functions->bsdthread_register2(p, uap->threadstart, - uap->wqthread, uap->flags, uap->stack_addr_hint, - uap->targetconc_ptr, uap->dispatchqueue_offset, - uap->tsd_offset, retval); + uap->wqthread, uap->flags, uap->stack_addr_hint, + uap->targetconc_ptr, uap->dispatchqueue_offset, + uap->tsd_offset, retval); } else { return pthread_functions->bsdthread_register(p, uap->threadstart, - uap->wqthread, uap->flags, uap->stack_addr_hint, - uap->targetconc_ptr, uap->dispatchqueue_offset, - retval); + uap->wqthread, uap->flags, uap->stack_addr_hint, + uap->targetconc_ptr, uap->dispatchqueue_offset, + retval); } } @@ -411,7 +415,7 @@ psynch_cvclrprepost(proc_t p, struct psynch_cvclrprepost_args * uap, int *retval } int -psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval) +psynch_rw_longrdlock(proc_t p, struct psynch_rw_longrdlock_args * uap, uint32_t *retval) { return pthread_functions->psynch_rw_longrdlock(p, uap->rwlock, uap->lgenval, uap->ugenval, uap->rw_wc, uap->flags, retval); } @@ -461,15 +465,17 @@ psynch_rw_downgrade(__unused proc_t p, __unused struct psynch_rw_downgrade_args void kdp_pthread_find_owner(thread_t thread, struct stackshot_thread_waitinfo *waitinfo) { - if (pthread_functions->pthread_find_owner) + if (pthread_functions->pthread_find_owner) { pthread_functions->pthread_find_owner(thread, waitinfo); + } } void * kdp_pthread_get_thread_kwq(thread_t thread) { - if (pthread_functions->pthread_get_thread_kwq) + if (pthread_functions->pthread_get_thread_kwq) { return pthread_functions->pthread_get_thread_kwq(thread); + } return NULL; } diff --git a/bsd/pthread/pthread_workqueue.c b/bsd/pthread/pthread_workqueue.c index 0e8aee8cb..0ad001488 100644 --- a/bsd/pthread/pthread_workqueue.c +++ b/bsd/pthread/pthread_workqueue.c @@ -30,7 +30,7 @@ #include // panic() should be marked noreturn -extern void panic(const char *string, ...) __printflike(1,2) __dead2; +extern void panic(const char *string, ...) __printflike(1, 2) __dead2; #include #include @@ -39,7 +39,7 @@ extern void panic(const char *string, ...) __printflike(1,2) __dead2; #include #include #include -#include /* for thread_exception_return */ +#include /* for thread_exception_return */ #include #include #include @@ -65,7 +65,7 @@ extern void panic(const char *string, ...) __printflike(1,2) __dead2; #include #include #include -#include /* for fill_procworkqueue */ +#include /* for fill_procworkqueue */ #include #include #include @@ -82,19 +82,19 @@ extern void panic(const char *string, ...) __printflike(1,2) __dead2; #include -extern thread_t port_name_to_thread(mach_port_name_t port_name); /* osfmk/kern/ipc_tt.h */ +extern thread_t port_name_to_thread(mach_port_name_t port_name); /* osfmk/kern/ipc_tt.h */ static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2; static void workq_schedule_creator(proc_t p, struct workqueue *wq, int flags); static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth, - workq_threadreq_t req); + workq_threadreq_t req); static uint32_t workq_constrained_allowance(struct workqueue *wq, - thread_qos_t at_qos, struct uthread *uth, bool may_start_timer); + thread_qos_t at_qos, struct uthread *uth, bool may_start_timer); static bool workq_thread_is_busy(uint64_t cur_ts, - _Atomic uint64_t *lastblocked_tsp); + _Atomic uint64_t *lastblocked_tsp); static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS; @@ -106,10 +106,10 @@ struct workq_usec_var { }; #define WORKQ_SYSCTL_USECS(var, init) \ - static struct workq_usec_var var = { .usecs = init }; \ - SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \ - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \ - workq_sysctl_handle_usecs, "I", "") + static struct workq_usec_var var = { .usecs = init }; \ + SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \ + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \ + workq_sysctl_handle_usecs, "I", "") static lck_grp_t *workq_lck_grp; static lck_attr_t *workq_lck_attr; @@ -119,7 +119,7 @@ os_refgrp_decl(static, workq_refgrp, "workq", NULL); static zone_t workq_zone_workqueue; static zone_t workq_zone_threadreq; -WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS); +WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS); WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS); WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS); static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS; @@ -136,18 +136,19 @@ workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS #pragma unused(arg2) struct workq_usec_var *v = arg1; int error = sysctl_handle_int(oidp, &v->usecs, 0, req); - if (error || !req->newptr) + if (error || !req->newptr) { return error; + } clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC, - &v->abstime); + &v->abstime); return 0; } SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED, - &wq_max_threads, 0, ""); + &wq_max_threads, 0, ""); SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED, - &wq_max_constrained_threads, 0, ""); + &wq_max_constrained_threads, 0, ""); #pragma mark p_wqptr @@ -236,7 +237,7 @@ workq_thread_wakeup(struct uthread *uth) #define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1)) static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3, - "Make sure we have space to encode a QoS"); + "Make sure we have space to encode a QoS"); static inline wq_thactive_t _wq_thactive(struct workqueue *wq) @@ -258,7 +259,7 @@ _wq_bucket(thread_qos_t qos) } #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \ - ((tha) >> WQ_THACTIVE_QOS_SHIFT) + ((tha) >> WQ_THACTIVE_QOS_SHIFT) static inline thread_qos_t _wq_thactive_best_constrained_req_qos(struct workqueue *wq) @@ -276,7 +277,7 @@ _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq) workq_threadreq_t req; req = priority_queue_max(&wq->wq_constrained_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED; old_qos = _wq_thactive_best_constrained_req_qos(wq); if (old_qos != new_qos) { @@ -289,7 +290,7 @@ _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq) v = os_atomic_add(&wq->wq_thactive, v, relaxed); #ifdef __LP64__ WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v, - (uint64_t)(v >> 64), 0, 0); + (uint64_t)(v >> 64), 0, 0); #else WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0, 0); #endif @@ -318,10 +319,10 @@ _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos) static inline void _wq_thactive_move(struct workqueue *wq, - thread_qos_t old_qos, thread_qos_t new_qos) + thread_qos_t old_qos, thread_qos_t new_qos) { wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) - - _wq_thactive_offset_for_qos(old_qos); + _wq_thactive_offset_for_qos(old_qos); os_atomic_add_orig(&wq->wq_thactive, v, relaxed); wq->wq_thscheduled_count[_wq_bucket(old_qos)]--; wq->wq_thscheduled_count[_wq_bucket(new_qos)]++; @@ -329,7 +330,7 @@ _wq_thactive_move(struct workqueue *wq, static inline uint32_t _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v, - thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount) + thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount) { uint32_t count = 0, active; uint64_t curtime; @@ -405,7 +406,7 @@ workq_lock_spin_is_acquired_kdp(struct workqueue *wq) static inline void workq_lock_spin(struct workqueue *wq) { - lck_spin_lock(&wq->wq_lock); + lck_spin_lock_grp(&wq->wq_lock, workq_lck_grp); } static inline void @@ -417,7 +418,7 @@ workq_lock_held(__assert_only struct workqueue *wq) static inline bool workq_lock_try(struct workqueue *wq) { - return lck_spin_try_lock(&wq->wq_lock); + return lck_spin_try_lock_grp(&wq->wq_lock, workq_lck_grp); } static inline void @@ -429,7 +430,7 @@ workq_unlock(struct workqueue *wq) #pragma mark idle thread lists #define WORKQ_POLICY_INIT(qos) \ - (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos } + (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos } static inline thread_qos_t workq_pri_bucket(struct uu_workq_policy req) @@ -491,8 +492,8 @@ workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth) static void workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth, - struct uu_workq_policy old_pri, struct uu_workq_policy new_pri, - bool force_run) + struct uu_workq_policy old_pri, struct uu_workq_policy new_pri, + bool force_run) { thread_qos_t old_bucket = old_pri.qos_bucket; thread_qos_t new_bucket = workq_pri_bucket(new_pri); @@ -552,14 +553,14 @@ workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth) if (trp.trp_flags & TRP_CPUPERCENT) { thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent, - (uint64_t)trp.trp_refillms * NSEC_PER_SEC); + (uint64_t)trp.trp_refillms * NSEC_PER_SEC); uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT; } } static void workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth, - workq_threadreq_t req) + workq_threadreq_t req) { thread_t th = uth->uu_thread; thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP; @@ -585,7 +586,7 @@ workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth, if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) { mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK; thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri, - POLICY_TIMESHARE); + POLICY_TIMESHARE); return; } @@ -617,8 +618,9 @@ workq_thread_set_max_qos(struct proc *p, struct kqrequest *kqr) struct workqueue *wq = proc_get_wqptr_fast(p); thread_qos_t qos = kqr->kqr_qos_index; - if (uth->uu_workq_pri.qos_max == qos) + if (uth->uu_workq_pri.qos_max == qos) { return; + } workq_lock_spin(wq); old_pri = new_pri = uth->uu_workq_pri; @@ -667,7 +669,7 @@ workq_kill_delay_for_idle_thread(struct workqueue *wq) static inline bool workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth, - uint64_t now) + uint64_t now) { uint64_t delay = workq_kill_delay_for_idle_thread(wq); return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay; @@ -692,8 +694,8 @@ workq_death_call_schedule(struct workqueue *wq, uint64_t deadline) * fall into long-term timer list shenanigans. */ thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline, - wq_reduce_pool_window.abstime / 10, - THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND); + wq_reduce_pool_window.abstime / 10, + THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND); } /* @@ -707,21 +709,24 @@ workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement) struct uthread *uth; assert(wq->wq_thdying_count >= decrement); - if ((wq->wq_thdying_count -= decrement) > 0) + if ((wq->wq_thdying_count -= decrement) > 0) { return; + } - if (wq->wq_thidlecount <= 1) + if (wq->wq_thidlecount <= 1) { return; + } - if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) + if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) { return; + } uint64_t now = mach_absolute_time(); uint64_t delay = workq_kill_delay_for_idle_thread(wq); if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) { WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START, - wq, wq->wq_thidlecount, 0, 0, 0); + wq, wq->wq_thidlecount, 0, 0, 0); wq->wq_thdying_count++; uth->uu_workq_flags |= UT_WORKQ_DYING; workq_thread_wakeup(uth); @@ -729,7 +734,7 @@ workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement) } workq_death_call_schedule(wq, - uth->uu_save.uus_workq_park_data.idle_stamp + delay); + uth->uu_save.uus_workq_park_data.idle_stamp + delay); } void @@ -741,7 +746,7 @@ workq_thread_terminate(struct proc *p, struct uthread *uth) TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry); if (uth->uu_workq_flags & UT_WORKQ_DYING) { WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END, - wq, wq->wq_thidlecount, 0, 0, 0); + wq, wq->wq_thidlecount, 0, 0, 0); workq_death_policy_evaluate(wq, 1); } if (wq->wq_nthreads-- == wq_max_threads) { @@ -840,14 +845,14 @@ workq_add_new_idle_thread(proc_t p, struct workqueue *wq) kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr); if (kret != KERN_SUCCESS) { WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, - kret, 1, 0, 0); + kret, 1, 0, 0); goto out; } kret = thread_create_workq_waiting(p->task, workq_unpark_continue, &th); if (kret != KERN_SUCCESS) { WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, - kret, 0, 0, 0); + kret, 0, 0, 0); pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr); goto out; } @@ -881,7 +886,7 @@ out: __attribute__((noreturn, noinline)) static void workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq, - struct uthread *uth, uint32_t death_flags) + struct uthread *uth, uint32_t death_flags) { thread_qos_t qos = workq_pri_override(uth->uu_workq_pri); bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW; @@ -910,10 +915,12 @@ workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq, thread_t th = uth->uu_thread; vm_map_t vmap = get_task_map(p->task); - if (!first_use) flags |= WQ_FLAG_THREAD_REUSE; + if (!first_use) { + flags |= WQ_FLAG_THREAD_REUSE; + } pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr, - uth->uu_workq_thport, 0, setup_flags, flags); + uth->uu_workq_thport, 0, setup_flags, flags); __builtin_unreachable(); } @@ -926,7 +933,7 @@ workq_is_current_thread_updating_turnstile(struct workqueue *wq) __attribute__((always_inline)) static inline void workq_perform_turnstile_operation_locked(struct workqueue *wq, - void (^operation)(void)) + void (^operation)(void)) { workq_lock_held(wq); wq->wq_turnstile_updater = current_thread(); @@ -936,14 +943,14 @@ workq_perform_turnstile_operation_locked(struct workqueue *wq, static void workq_turnstile_update_inheritor(struct workqueue *wq, - turnstile_inheritor_t inheritor, - turnstile_update_flags_t flags) + turnstile_inheritor_t inheritor, + turnstile_update_flags_t flags) { workq_perform_turnstile_operation_locked(wq, ^{ turnstile_update_inheritor(wq->wq_turnstile, inheritor, - flags | TURNSTILE_IMMEDIATE_UPDATE); + flags | TURNSTILE_IMMEDIATE_UPDATE); turnstile_update_inheritor_complete(wq->wq_turnstile, - TURNSTILE_INTERLOCK_HELD); + TURNSTILE_INTERLOCK_HELD); }); } @@ -961,7 +968,7 @@ workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth) if (wq->wq_creator == uth) { WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0, - uth->uu_save.uus_workq_park_data.yields, 0); + uth->uu_save.uus_workq_park_data.yields, 0); wq->wq_creator = NULL; if (wq->wq_reqcount) { workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ); @@ -986,8 +993,8 @@ workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth) uint16_t cur_idle = wq->wq_thidlecount; if (cur_idle >= wq_max_constrained_threads || - (wq->wq_thdying_count == 0 && oldest && - workq_should_kill_idle_thread(wq, oldest, now))) { + (wq->wq_thdying_count == 0 && oldest && + workq_should_kill_idle_thread(wq, oldest, now))) { /* * Immediately kill threads if we have too may of them. * @@ -1003,7 +1010,7 @@ workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth) } WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START, - wq, cur_idle, 0, 0, 0); + wq, cur_idle, 0, 0, 0); wq->wq_thdying_count++; uth->uu_workq_flags |= UT_WORKQ_DYING; uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP; @@ -1017,7 +1024,7 @@ workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth) wq->wq_thidlecount = cur_idle; if (cur_idle >= wq_death_max_load && tail && - tail->uu_save.uus_workq_park_data.has_stack) { + tail->uu_save.uus_workq_park_data.has_stack) { uth->uu_save.uus_workq_park_data.has_stack = false; TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry); } else { @@ -1078,8 +1085,8 @@ workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req) return true; } if (priority_queue_insert(workq_priority_queue_for_req(wq, req), - &req->tr_entry, workq_priority_for_req(req), - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &req->tr_entry, workq_priority_for_req(req), + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) { _wq_thactive_refresh_best_constrained_req_qos(wq); } @@ -1105,7 +1112,7 @@ workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req) return true; } if (priority_queue_remove(workq_priority_queue_for_req(wq, req), - &req->tr_entry, PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &req->tr_entry, PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) { _wq_thactive_refresh_best_constrained_req_qos(wq); } @@ -1134,7 +1141,7 @@ workq_threadreq_destroy(proc_t p, workq_threadreq_t req) */ static void workq_threadreq_bind_and_unlock(proc_t p, struct workqueue *wq, - workq_threadreq_t req, struct uthread *uth) + workq_threadreq_t req, struct uthread *uth) { uint8_t tr_flags = req->tr_flags; bool needs_commit = false; @@ -1149,9 +1156,9 @@ workq_threadreq_bind_and_unlock(proc_t p, struct workqueue *wq, if (wq->wq_creator == uth) { WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0, - uth->uu_save.uus_workq_park_data.yields, 0); + uth->uu_save.uus_workq_park_data.yields, 0); creator_flags = WORKQ_THREADREQ_CAN_CREATE_THREADS | - WORKQ_THREADREQ_CREATOR_TRANSFER; + WORKQ_THREADREQ_CREATOR_TRANSFER; wq->wq_creator = NULL; _wq_thactive_inc(wq, req->tr_qos); wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++; @@ -1229,18 +1236,18 @@ workq_threadreq_bind_and_unlock(proc_t p, struct workqueue *wq, static inline bool workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend, - uint32_t fail_mask) + uint32_t fail_mask) { uint32_t old_flags, new_flags; os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, { if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) { - os_atomic_rmw_loop_give_up(return false); + os_atomic_rmw_loop_give_up(return false); } if (__improbable(old_flags & WQ_PROC_SUSPENDED)) { - new_flags = old_flags | pend; + new_flags = old_flags | pend; } else { - new_flags = old_flags | sched; + new_flags = old_flags | sched; } }); @@ -1255,8 +1262,8 @@ workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags) assert(!preemption_enabled()); if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED, - WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED | - WQ_IMMEDIATE_CALL_SCHEDULED)) { + WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED | + WQ_IMMEDIATE_CALL_SCHEDULED)) { return false; } @@ -1277,7 +1284,7 @@ workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags) } WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, - _wq_flags(wq), wq->wq_timer_interval, 0); + _wq_flags(wq), wq->wq_timer_interval, 0); thread_call_t call = wq->wq_delayed_call; uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED; @@ -1294,9 +1301,9 @@ workq_schedule_immediate_thread_creation(struct workqueue *wq) assert(!preemption_enabled()); if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED, - WQ_IMMEDIATE_CALL_PENDED, 0)) { + WQ_IMMEDIATE_CALL_PENDED, 0)) { WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, - _wq_flags(wq), 0, 0); + _wq_flags(wq), 0, 0); uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED; if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) { @@ -1310,7 +1317,9 @@ workq_proc_suspended(struct proc *p) { struct workqueue *wq = proc_get_wqptr(p); - if (wq) os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed); + if (wq) { + os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed); + } } void @@ -1319,17 +1328,19 @@ workq_proc_resumed(struct proc *p) struct workqueue *wq = proc_get_wqptr(p); uint32_t wq_flags; - if (!wq) return; + if (!wq) { + return; + } wq_flags = os_atomic_and_orig(&wq->wq_flags, ~(WQ_PROC_SUSPENDED | - WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED), relaxed); + WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED), relaxed); if ((wq_flags & WQ_EXITING) == 0) { disable_preemption(); if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) { workq_schedule_immediate_thread_creation(wq); } else if (wq_flags & WQ_DELAYED_CALL_PENDED) { workq_schedule_delayed_thread_creation(wq, - WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART); + WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART); } enable_preemption(); } @@ -1368,13 +1379,15 @@ workq_add_new_threads_call(void *_p, void *flags) * workq_exit() will set the workqueue to NULL before * it cancels thread calls. */ - if (!wq) return; + if (!wq) { + return; + } assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) || - (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED)); + (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED)); WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq), - wq->wq_nthreads, wq->wq_thidlecount, 0); + wq->wq_nthreads, wq->wq_thidlecount, 0); workq_lock_spin(wq); @@ -1387,7 +1400,7 @@ workq_add_new_threads_call(void *_p, void *flags) workq_unlock(wq); WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0, - wq->wq_nthreads, wq->wq_thidlecount, 0); + wq->wq_nthreads, wq->wq_thidlecount, 0); } #pragma mark thread state tracking @@ -1422,7 +1435,7 @@ workq_sched_callback(int type, thread_t thread) * not a problem. Either timestamp is adequate, so no need to retry */ os_atomic_store(&wq->wq_lastblocked_ts[_wq_bucket(qos)], - thread_last_run_time(thread), relaxed); + thread_last_run_time(thread), relaxed); if (req_qos == THREAD_QOS_UNSPECIFIED) { /* @@ -1436,7 +1449,7 @@ workq_sched_callback(int type, thread_t thread) } else { uint32_t max_busycount, old_req_count; old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive, - req_qos, NULL, &max_busycount); + req_qos, NULL, &max_busycount); /* * If it is possible that may_start_constrained_thread had refused * admission due to being over the max concurrency, we may need to @@ -1456,10 +1469,10 @@ workq_sched_callback(int type, thread_t thread) } if (__improbable(kdebug_enable)) { __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq, - old_thactive, qos, NULL, NULL); + old_thactive, qos, NULL, NULL); WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq, - old - 1, qos | (req_qos << 8), - wq->wq_reqcount << 1 | start_timer, 0); + old - 1, qos | (req_qos << 8), + wq->wq_reqcount << 1 | start_timer, 0); } break; @@ -1475,11 +1488,11 @@ workq_sched_callback(int type, thread_t thread) old_thactive = _wq_thactive_inc(wq, qos); if (__improbable(kdebug_enable)) { __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq, - old_thactive, qos, NULL, NULL); + old_thactive, qos, NULL, NULL); req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive); WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq, - old + 1, qos | (req_qos << 8), - wq->wq_threads_scheduled, 0); + old + 1, qos | (req_qos << 8), + wq->wq_threads_scheduled, 0); } break; } @@ -1528,7 +1541,7 @@ workq_deallocate_safe(struct workqueue *wq) */ int workq_open(struct proc *p, __unused struct workq_open_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { struct workqueue *wq; int error = 0; @@ -1547,8 +1560,9 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, */ limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR; - if (limit > wq_max_constrained_threads) + if (limit > wq_max_constrained_threads) { wq_max_constrained_threads = limit; + } if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) { wq_max_threads = WQ_THACTIVE_BUCKET_HALF; @@ -1561,7 +1575,7 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) { wq_max_parallelism[_wq_bucket(qos)] = - qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL); + qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL); } wq_init_constrained_limit = 0; @@ -1585,32 +1599,32 @@ workq_open(struct proc *p, __unused struct workq_open_args *uap, wq->wq_timer_interval = wq_stalled_window.abstime; wq->wq_proc = p; turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(), - TURNSTILE_WORKQS); + TURNSTILE_WORKQS); TAILQ_INIT(&wq->wq_thrunlist); TAILQ_INIT(&wq->wq_thnewlist); TAILQ_INIT(&wq->wq_thidlelist); priority_queue_init(&wq->wq_overcommit_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + PRIORITY_QUEUE_BUILTIN_MAX_HEAP); priority_queue_init(&wq->wq_constrained_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + PRIORITY_QUEUE_BUILTIN_MAX_HEAP); priority_queue_init(&wq->wq_special_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + PRIORITY_QUEUE_BUILTIN_MAX_HEAP); wq->wq_delayed_call = thread_call_allocate_with_options( - workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL, - THREAD_CALL_OPTIONS_ONCE); + workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL, + THREAD_CALL_OPTIONS_ONCE); wq->wq_immediate_call = thread_call_allocate_with_options( - workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL, - THREAD_CALL_OPTIONS_ONCE); + workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL, + THREAD_CALL_OPTIONS_ONCE); wq->wq_death_call = thread_call_allocate_with_options( - workq_kill_old_threads_call, wq, - THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE); + workq_kill_old_threads_call, wq, + THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE); lck_spin_init(&wq->wq_lock, workq_lck_grp, workq_lck_attr); WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq, - VM_KERNEL_ADDRHIDE(wq), 0, 0, 0); + VM_KERNEL_ADDRHIDE(wq), 0, 0, 0); proc_set_wqptr(p, wq); } out: @@ -1633,9 +1647,11 @@ workq_mark_exiting(struct proc *p) uint32_t wq_flags; workq_threadreq_t mgr_req; - if (!wq) return; + if (!wq) { + return; + } - WQ_TRACE_WQ(TRACE_wq_pthread_exit|DBG_FUNC_START, wq, 0, 0, 0, 0); + WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0, 0); workq_lock_spin(wq); @@ -1673,19 +1689,19 @@ workq_mark_exiting(struct proc *p) * It is hence safe to do the tear down without holding any lock. */ priority_queue_destroy(&wq->wq_overcommit_queue, - struct workq_threadreq_s, tr_entry, ^(void *e){ + struct workq_threadreq_s, tr_entry, ^(void *e){ workq_threadreq_destroy(p, e); }); priority_queue_destroy(&wq->wq_constrained_queue, - struct workq_threadreq_s, tr_entry, ^(void *e){ + struct workq_threadreq_s, tr_entry, ^(void *e){ workq_threadreq_destroy(p, e); }); priority_queue_destroy(&wq->wq_special_queue, - struct workq_threadreq_s, tr_entry, ^(void *e){ + struct workq_threadreq_s, tr_entry, ^(void *e){ workq_threadreq_destroy(p, e); }); - WQ_TRACE(TRACE_wq_pthread_exit|DBG_FUNC_END, 0, 0, 0, 0, 0); + WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0, 0); } /* @@ -1707,7 +1723,7 @@ workq_exit(struct proc *p) if (wq != NULL) { thread_t th = current_thread(); - WQ_TRACE_WQ(TRACE_wq_workqueue_exit|DBG_FUNC_START, wq, 0, 0, 0, 0); + WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0, 0); if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) { /* @@ -1747,11 +1763,11 @@ workq_exit(struct proc *p) assert(TAILQ_EMPTY(&wq->wq_thidlelist)); WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq, - VM_KERNEL_ADDRHIDE(wq), 0, 0, 0); + VM_KERNEL_ADDRHIDE(wq), 0, 0, 0); workq_deallocate(wq); - WQ_TRACE(TRACE_wq_workqueue_exit|DBG_FUNC_END, 0, 0, 0, 0, 0); + WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0, 0); } } @@ -1760,12 +1776,12 @@ workq_exit(struct proc *p) static bool _pthread_priority_to_policy(pthread_priority_t priority, - thread_qos_policy_data_t *data) + thread_qos_policy_data_t *data) { data->qos_tier = _pthread_priority_thread_qos(priority); data->tier_importance = _pthread_priority_relpri(priority); if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 || - data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) { + data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) { return false; } return true; @@ -1773,7 +1789,7 @@ _pthread_priority_to_policy(pthread_priority_t priority, static int bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority, - mach_port_name_t voucher, enum workq_set_self_flags flags) + mach_port_name_t voucher, enum workq_set_self_flags flags) { struct uthread *uth = get_bsdthread_info(th); struct workqueue *wq = proc_get_wqptr(p); @@ -1847,7 +1863,7 @@ qos: if (old_overcommit) { wq->wq_constrained_threads_scheduled++; } else if (wq->wq_constrained_threads_scheduled-- == - wq_max_constrained_threads) { + wq_max_constrained_threads) { force_run = true; } } @@ -1859,7 +1875,7 @@ qos: } kr = thread_policy_set_internal(th, THREAD_QOS_POLICY, - (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT); + (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT); if (kr != KERN_SUCCESS) { qos_rv = EINVAL; } @@ -1875,7 +1891,9 @@ voucher: } fixedpri: - if (qos_rv) goto done; + if (qos_rv) { + goto done; + } if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) { thread_extended_policy_data_t extpol = {.timeshare = 0}; @@ -1886,7 +1904,7 @@ fixedpri: } kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY, - (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT); + (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT); if (kr != KERN_SUCCESS) { fixedpri_rv = EINVAL; goto done; @@ -1901,7 +1919,7 @@ fixedpri: } kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY, - (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT); + (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT); if (kr != KERN_SUCCESS) { fixedpri_rv = EINVAL; goto done; @@ -1935,7 +1953,7 @@ done: static int bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport, - pthread_priority_t pp, user_addr_t resource) + pthread_priority_t pp, user_addr_t resource) { thread_qos_t qos = _pthread_priority_thread_qos(pp); if (qos == THREAD_QOS_UNSPECIFIED) { @@ -1948,7 +1966,7 @@ bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport, } int rv = proc_thread_qos_add_override(p->task, th, 0, qos, TRUE, - resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE); + resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE); thread_deallocate(th); return rv; @@ -1956,7 +1974,7 @@ bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport, static int bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport, - user_addr_t resource) + user_addr_t resource) { thread_t th = port_name_to_thread(kport); if (th == THREAD_NULL) { @@ -1964,7 +1982,7 @@ bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport, } int rv = proc_thread_qos_remove_override(p->task, th, 0, resource, - THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE); + THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE); thread_deallocate(th); return rv; @@ -1972,7 +1990,7 @@ bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport, static int workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport, - pthread_priority_t pp, user_addr_t ulock_addr) + pthread_priority_t pp, user_addr_t ulock_addr) { struct uu_workq_policy old_pri, new_pri; struct workqueue *wq = proc_get_wqptr(p); @@ -1994,7 +2012,7 @@ workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport, } WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE, - wq, thread_tid(thread), 1, pp, 0); + wq, thread_tid(thread), 1, pp, 0); thread_mtx_lock(thread); @@ -2060,12 +2078,12 @@ workq_thread_reset_dispatch_override(proc_t p, thread_t thread) static int bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags, - int *retval) + int *retval) { static_assert(QOS_PARALLELISM_COUNT_LOGICAL == - _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical"); + _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical"); static_assert(QOS_PARALLELISM_REALTIME == - _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime"); + _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime"); if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL)) { return EINVAL; @@ -2084,7 +2102,7 @@ bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags, } #define ENSURE_UNUSED(arg) \ - ({ if ((arg) != 0) { return EINVAL; } }) + ({ if ((arg) != 0) { return EINVAL; } }) int bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval) @@ -2092,27 +2110,27 @@ bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval) switch (uap->cmd) { case BSDTHREAD_CTL_QOS_OVERRIDE_START: return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1, - (pthread_priority_t)uap->arg2, uap->arg3); + (pthread_priority_t)uap->arg2, uap->arg3); case BSDTHREAD_CTL_QOS_OVERRIDE_END: ENSURE_UNUSED(uap->arg3); return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1, - (user_addr_t)uap->arg2); + (user_addr_t)uap->arg2); case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH: return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1, - (pthread_priority_t)uap->arg2, uap->arg3); + (pthread_priority_t)uap->arg2, uap->arg3); case BSDTHREAD_CTL_QOS_OVERRIDE_RESET: return workq_thread_reset_dispatch_override(p, current_thread()); case BSDTHREAD_CTL_SET_SELF: return bsdthread_set_self(p, current_thread(), - (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2, - (enum workq_set_self_flags)uap->arg3); + (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2, + (enum workq_set_self_flags)uap->arg3); case BSDTHREAD_CTL_QOS_MAX_PARALLELISM: ENSURE_UNUSED(uap->arg3); return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1, - (unsigned long)uap->arg2, retval); + (unsigned long)uap->arg2, retval); case BSDTHREAD_CTL_SET_QOS: case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD: @@ -2129,7 +2147,7 @@ bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval) static void __dead2 workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq, - struct uthread *uth); + struct uthread *uth); static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2; @@ -2158,12 +2176,12 @@ workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp) uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI; if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX || - qos == THREAD_QOS_UNSPECIFIED) { + qos == THREAD_QOS_UNSPECIFIED) { return EINVAL; } WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE, - wq, reqcount, pp, 0, 0); + wq, reqcount, pp, 0, 0); workq_threadreq_t req = zalloc(workq_zone_threadreq); priority_queue_entry_init(&req->tr_entry); @@ -2177,7 +2195,7 @@ workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp) } WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, - wq, workq_trace_req_id(req), req->tr_qos, reqcount, 0); + wq, workq_trace_req_id(req), req->tr_qos, reqcount, 0); workq_lock_spin(wq); do { @@ -2233,7 +2251,7 @@ workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp) reqcount--; } } while (unpaced && wq->wq_nthreads < wq_max_threads && - workq_add_new_idle_thread(p, wq)); + workq_add_new_idle_thread(p, wq)); if (_wq_exiting(wq)) { goto exiting; @@ -2255,7 +2273,7 @@ exiting: bool workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr, - struct turnstile *workloop_ts, thread_qos_t qos, int flags) + struct turnstile *workloop_ts, thread_qos_t qos, int flags) { struct workqueue *wq = proc_get_wqptr_fast(p); workq_threadreq_t req = &kqr->kqr_req; @@ -2280,7 +2298,7 @@ workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr, tr_flags = TR_FLAG_KEVENT; } if (qos != WORKQ_THREAD_QOS_MANAGER && - (kqr->kqr_state & KQR_THOVERCOMMIT)) { + (kqr->kqr_state & KQR_THOVERCOMMIT)) { tr_flags |= TR_FLAG_OVERCOMMIT; } @@ -2292,7 +2310,7 @@ workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr, req->tr_qos = qos; WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq, - workq_trace_req_id(req), qos, 1, 0); + workq_trace_req_id(req), qos, 1, 0); if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) { /* @@ -2317,9 +2335,9 @@ workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr, if (workloop_ts) { workq_perform_turnstile_operation_locked(wq, ^{ turnstile_update_inheritor(workloop_ts, wq->wq_turnstile, - TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE); + TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE); turnstile_update_inheritor_complete(workloop_ts, - TURNSTILE_INTERLOCK_HELD); + TURNSTILE_INTERLOCK_HELD); }); } if (workq_threadreq_enqueue(wq, req)) { @@ -2333,7 +2351,7 @@ workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr, void workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr, - thread_qos_t qos, int flags) + thread_qos_t qos, int flags) { struct workqueue *wq = proc_get_wqptr_fast(p); workq_threadreq_t req = &kqr->kqr_req; @@ -2356,7 +2374,7 @@ workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr, } change_overcommit = (bool)(kqr->kqr_state & KQR_THOVERCOMMIT) != - (bool)(req->tr_flags & TR_FLAG_OVERCOMMIT); + (bool)(req->tr_flags & TR_FLAG_OVERCOMMIT); if (_wq_exiting(wq) || (req->tr_qos == qos && !change_overcommit)) { workq_unlock(wq); @@ -2369,7 +2387,7 @@ workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr, } WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq, - workq_trace_req_id(req), qos, 0, 0); + workq_trace_req_id(req), qos, 0, 0); struct priority_queue *pq = workq_priority_queue_for_req(wq, req); workq_threadreq_t req_max; @@ -2381,7 +2399,7 @@ workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr, * maintain the best constrained request qos invariant. */ if (priority_queue_remove(pq, &req->tr_entry, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) { _wq_thactive_refresh_best_constrained_req_qos(wq); } @@ -2402,7 +2420,7 @@ workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr, req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry); if (req_max && req_max->tr_qos >= qos) { priority_queue_insert(pq, &req->tr_entry, workq_priority_for_req(req), - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); workq_unlock(wq); return; } @@ -2437,8 +2455,8 @@ workq_kern_threadreq_unlock(struct proc *p) void workq_kern_threadreq_update_inheritor(struct proc *p, struct kqrequest *kqr, - thread_t owner, struct turnstile *wl_ts, - turnstile_update_flags_t flags) + thread_t owner, struct turnstile *wl_ts, + turnstile_update_flags_t flags) { struct workqueue *wq = proc_get_wqptr_fast(p); workq_threadreq_t req = &kqr->kqr_req; @@ -2450,7 +2468,7 @@ workq_kern_threadreq_update_inheritor(struct proc *p, struct kqrequest *kqr, if (req->tr_state == TR_STATE_BINDING) { kqueue_threadreq_bind(p, req, req->tr_binding_thread, - KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE); + KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE); return; } @@ -2488,14 +2506,18 @@ workq_kern_threadreq_redrive(struct proc *p, int flags) void workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked) { - if (!locked) workq_lock_spin(wq); + if (!locked) { + workq_lock_spin(wq); + } workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_CREATOR_SYNC_UPDATE); - if (!locked) workq_unlock(wq); + if (!locked) { + workq_unlock(wq); + } } static int workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap, - struct workqueue *wq) + struct workqueue *wq) { thread_t th = current_thread(); struct uthread *uth = get_bsdthread_info(th); @@ -2505,7 +2527,7 @@ workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap, user_addr_t eventlist = uap->item; if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) || - (uth->uu_workq_flags & UT_WORKQ_DYING)) { + (uth->uu_workq_flags & UT_WORKQ_DYING)) { return EINVAL; } @@ -2545,14 +2567,16 @@ workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap, upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS; } else { upcall_flags |= uth->uu_workq_pri.qos_req | - WQ_FLAG_THREAD_PRIO_QOS; + WQ_FLAG_THREAD_PRIO_QOS; } } error = pthread_functions->workq_handle_stack_events(p, th, - get_task_map(p->task), uth->uu_workq_stackaddr, - uth->uu_workq_thport, eventlist, nevents, upcall_flags); - if (error) return error; + get_task_map(p->task), uth->uu_workq_stackaddr, + uth->uu_workq_thport, eventlist, nevents, upcall_flags); + if (error) { + return error; + } // pthread is supposed to pass KEVENT_FLAG_PARKING here // which should cause the above call to either: @@ -2600,7 +2624,7 @@ workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *ret * arg3 = kevent support */ int offset = arg2; - if (arg3 & 0x01){ + if (arg3 & 0x01) { // If we get here, then userspace has indicated support for kevent delivery. } @@ -2635,12 +2659,12 @@ workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *ret */ if (pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) { pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK | - _PTHREAD_PRIORITY_SCHED_PRI_FLAG); + _PTHREAD_PRIORITY_SCHED_PRI_FLAG); } else { thread_qos_t qos = _pthread_priority_thread_qos(pri); int relpri = _pthread_priority_relpri(pri); if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE || - qos == THREAD_QOS_UNSPECIFIED) { + qos == THREAD_QOS_UNSPECIFIED) { error = EINVAL; break; } @@ -2673,7 +2697,7 @@ workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *ret thread_t th = current_thread(); struct uthread *uth = get_bsdthread_info(th); if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) || - (uth->uu_workq_flags & (UT_WORKQ_DYING|UT_WORKQ_OVERCOMMIT))) { + (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) { error = EINVAL; break; } @@ -2695,7 +2719,7 @@ workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *ret break; } - return (error); + return error; } /* @@ -2722,7 +2746,7 @@ workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth) */ if (!uth->uu_save.uus_workq_park_data.has_stack) { pthread_functions->workq_markfree_threadstack(p, uth->uu_thread, - get_task_map(p->task), uth->uu_workq_stackaddr); + get_task_map(p->task), uth->uu_workq_stackaddr); } /* @@ -2754,7 +2778,7 @@ workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth) if (uth->uu_workq_flags & UT_WORKQ_DYING) { workq_unpark_for_death_and_unlock(p, wq, uth, - WORKQ_UNPARK_FOR_DEATH_WAS_IDLE); + WORKQ_UNPARK_FOR_DEATH_WAS_IDLE); __builtin_unreachable(); } @@ -2775,12 +2799,12 @@ workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth) * - we are re-using the event manager */ return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 || - (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER); + (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER); } static uint32_t workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos, - struct uthread *uth, bool may_start_timer) + struct uthread *uth, bool may_start_timer) { assert(at_qos != WORKQ_THREAD_QOS_MANAGER); uint32_t count = 0; @@ -2795,8 +2819,8 @@ workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos, } if (max_count >= wq_max_constrained_threads) { WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1, - wq->wq_constrained_threads_scheduled, - wq_max_constrained_threads, 0); + wq->wq_constrained_threads_scheduled, + wq_max_constrained_threads, 0); /* * we need 1 or more constrained threads to return to the kernel before * we can dispatch additional work @@ -2817,10 +2841,10 @@ workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos, uint32_t busycount, thactive_count; thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq), - at_qos, &busycount, NULL); + at_qos, &busycount, NULL); if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER && - at_qos <= uth->uu_workq_pri.qos_bucket) { + at_qos <= uth->uu_workq_pri.qos_bucket) { /* * Don't count this thread as currently active, but only if it's not * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active @@ -2834,11 +2858,11 @@ workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos, if (count > thactive_count + busycount) { count -= thactive_count + busycount; WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2, - thactive_count, busycount, 0); + thactive_count, busycount, 0); return MIN(count, max_count); } else { WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3, - thactive_count, busycount, 0); + thactive_count, busycount, 0); } if (busycount && may_start_timer) { @@ -2854,7 +2878,7 @@ workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos, static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth, - workq_threadreq_t req) + workq_threadreq_t req) { if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) { return workq_may_start_event_mgr_thread(wq, uth); @@ -2882,7 +2906,7 @@ workq_threadreq_select_for_creator(struct workqueue *wq) */ req_pri = priority_queue_max(&wq->wq_special_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); if (req_pri) { pri = priority_queue_entry_key(&wq->wq_special_queue, &req_pri->tr_entry); } @@ -2892,13 +2916,13 @@ workq_threadreq_select_for_creator(struct workqueue *wq) */ req_qos = priority_queue_max(&wq->wq_overcommit_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); if (req_qos) { qos = req_qos->tr_qos; } req_tmp = priority_queue_max(&wq->wq_constrained_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); if (req_tmp && qos < req_tmp->tr_qos) { if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) { @@ -2949,7 +2973,9 @@ workq_threadreq_select(struct workqueue *wq, struct uthread *uth) thread_qos_t qos = THREAD_QOS_UNSPECIFIED; uint8_t pri = 0; - if (uth == wq->wq_creator) uth = NULL; + if (uth == wq->wq_creator) { + uth = NULL; + } req_tmp = wq->wq_event_manager_threadreq; if (req_tmp && workq_may_start_event_mgr_thread(wq, uth)) { @@ -2961,22 +2987,22 @@ workq_threadreq_select(struct workqueue *wq, struct uthread *uth) */ pri = turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, - &proprietor); + &proprietor); if (pri) { struct kqworkloop *kqwl = (struct kqworkloop *)proprietor; req_pri = &kqwl->kqwl_request.kqr_req; if (req_pri->tr_state != TR_STATE_QUEUED) { panic("Invalid thread request (%p) state %d", - req_pri, req_pri->tr_state); + req_pri, req_pri->tr_state); } } else { req_pri = NULL; } req_tmp = priority_queue_max(&wq->wq_special_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); if (req_tmp && pri < priority_queue_entry_key(&wq->wq_special_queue, - &req_tmp->tr_entry)) { + &req_tmp->tr_entry)) { req_pri = req_tmp; pri = priority_queue_entry_key(&wq->wq_special_queue, &req_tmp->tr_entry); } @@ -2986,13 +3012,13 @@ workq_threadreq_select(struct workqueue *wq, struct uthread *uth) */ req_qos = priority_queue_max(&wq->wq_overcommit_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); if (req_qos) { qos = req_qos->tr_qos; } req_tmp = priority_queue_max(&wq->wq_constrained_queue, - struct workq_threadreq_s, tr_entry); + struct workq_threadreq_s, tr_entry); if (req_tmp && qos < req_tmp->tr_qos) { if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) { @@ -3075,7 +3101,7 @@ again: */ if (workq_thread_needs_priority_change(req, uth)) { WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE, - wq, 1, thread_tid(uth->uu_thread), req->tr_qos, 0); + wq, 1, thread_tid(uth->uu_thread), req->tr_qos, 0); workq_thread_reset_pri(wq, uth, req); } } else if (wq->wq_thidlecount) { @@ -3087,9 +3113,9 @@ again: workq_thread_reset_pri(wq, uth, req); } workq_turnstile_update_inheritor(wq, uth->uu_thread, - TURNSTILE_INHERITOR_THREAD); + TURNSTILE_INHERITOR_THREAD); WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE, - wq, 2, thread_tid(uth->uu_thread), req->tr_qos, 0); + wq, 2, thread_tid(uth->uu_thread), req->tr_qos, 0); uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled; uth->uu_save.uus_workq_park_data.yields = 0; workq_thread_wakeup(uth); @@ -3140,7 +3166,7 @@ again: __attribute__((noreturn, noinline)) static void workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq, - struct uthread *uth) + struct uthread *uth) { uint32_t setup_flags = 0; workq_threadreq_t req; @@ -3156,7 +3182,7 @@ workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq, req = uth->uu_save.uus_workq_park_data.thread_request; workq_unlock(wq); WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq, - VM_KERNEL_ADDRHIDE(req), 0, 0, 0); + VM_KERNEL_ADDRHIDE(req), 0, 0, 0); goto run; } else if (_wq_exiting(wq)) { WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0, 0); @@ -3166,7 +3192,7 @@ workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq, WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0, 0); } else { WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq, - workq_trace_req_id(req), 0, 0, 0); + workq_trace_req_id(req), 0, 0, 0); if (uth->uu_workq_flags & UT_WORKQ_NEW) { uth->uu_workq_flags ^= UT_WORKQ_NEW; setup_flags |= WQ_SETUP_FIRST_USE; @@ -3200,7 +3226,7 @@ workq_creator_should_yield(struct workqueue *wq, struct uthread *uth) if (wq->wq_fulfilled - snapshot > conc) { /* we fulfilled more than NCPU requests since being dispatched */ WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1, - wq->wq_fulfilled, snapshot, 0); + wq->wq_fulfilled, snapshot, 0); return true; } @@ -3210,7 +3236,7 @@ workq_creator_should_yield(struct workqueue *wq, struct uthread *uth) if (conc <= cnt) { /* We fulfilled requests and have more than NCPU scheduled threads */ WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2, - wq->wq_fulfilled, snapshot, 0); + wq->wq_fulfilled, snapshot, 0); return true; } @@ -3268,7 +3294,7 @@ workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused) } workq_unpark_for_death_and_unlock(p, wq, uth, - WORKQ_UNPARK_FOR_DEATH_WAS_IDLE); + WORKQ_UNPARK_FOR_DEATH_WAS_IDLE); __builtin_unreachable(); } @@ -3314,7 +3340,7 @@ workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags) * anyway. */ upcall_flags |= uth->uu_save.uus_workq_park_data.qos | - WQ_FLAG_THREAD_PRIO_QOS; + WQ_FLAG_THREAD_PRIO_QOS; } if (uth->uu_workq_thport == MACH_PORT_NULL) { @@ -3329,10 +3355,10 @@ workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags) * onto the stack, sets up the thread state and then returns to userspace. */ WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START, - proc_get_wqptr_fast(p), 0, 0, 0, 0); + proc_get_wqptr_fast(p), 0, 0, 0, 0); thread_sched_call(th, workq_sched_callback); pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr, - uth->uu_workq_thport, 0, setup_flags, upcall_flags); + uth->uu_workq_thport, 0, setup_flags, upcall_flags); __builtin_unreachable(); } @@ -3344,7 +3370,7 @@ fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo) { struct workqueue *wq = proc_get_wqptr(p); int error = 0; - int activecount; + int activecount; if (wq == NULL) { return EINVAL; @@ -3364,7 +3390,7 @@ fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo) wq_thactive_t act = _wq_thactive(wq); activecount = _wq_thactive_aggregate_downto_qos(wq, act, - WORKQ_THREAD_QOS_MIN, NULL, NULL); + WORKQ_THREAD_QOS_MIN, NULL, NULL); if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) { activecount++; } @@ -3387,7 +3413,7 @@ fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo) boolean_t workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total, - boolean_t *exceeded_constrained) + boolean_t *exceeded_constrained) { proc_t p = v; struct proc_workqueueinfo pwqinfo; @@ -3415,12 +3441,12 @@ uint32_t workqueue_get_pwq_state_kdp(void * v) { static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) == - kTaskWqExceededConstrainedThreadLimit); + kTaskWqExceededConstrainedThreadLimit); static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) == - kTaskWqExceededTotalThreadLimit); + kTaskWqExceededTotalThreadLimit); static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable); static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT | - WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7); + WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7); if (v == NULL) { return 0; @@ -3454,14 +3480,14 @@ workq_init(void) workq_lck_grp = lck_grp_alloc_init("workq", workq_lck_grp_attr); workq_zone_workqueue = zinit(sizeof(struct workqueue), - 1024 * sizeof(struct workqueue), 8192, "workq.wq"); + 1024 * sizeof(struct workqueue), 8192, "workq.wq"); workq_zone_threadreq = zinit(sizeof(struct workq_threadreq_s), - 1024 * sizeof(struct workq_threadreq_s), 8192, "workq.threadreq"); + 1024 * sizeof(struct workq_threadreq_s), 8192, "workq.threadreq"); clock_interval_to_absolutetime_interval(wq_stalled_window.usecs, - NSEC_PER_USEC, &wq_stalled_window.abstime); + NSEC_PER_USEC, &wq_stalled_window.abstime); clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs, - NSEC_PER_USEC, &wq_reduce_pool_window.abstime); + NSEC_PER_USEC, &wq_reduce_pool_window.abstime); clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs, - NSEC_PER_USEC, &wq_max_timer_interval.abstime); + NSEC_PER_USEC, &wq_max_timer_interval.abstime); } diff --git a/bsd/pthread/workqueue_internal.h b/bsd/pthread/workqueue_internal.h index a072d35ef..c2a67f5e7 100644 --- a/bsd/pthread/workqueue_internal.h +++ b/bsd/pthread/workqueue_internal.h @@ -90,15 +90,15 @@ typedef union workq_threadreq_param_s { uint8_t trp_pri; uint8_t trp_pol; uint32_t trp_cpupercent: 8, - trp_refillms: 24; + trp_refillms: 24; }; uint64_t trp_value; } workq_threadreq_param_t; -#define TRP_PRIORITY 0x1 -#define TRP_POLICY 0x2 -#define TRP_CPUPERCENT 0x4 -#define TRP_RELEASED 0x8000 +#define TRP_PRIORITY 0x1 +#define TRP_POLICY 0x2 +#define TRP_CPUPERCENT 0x4 +#define TRP_RELEASED 0x8000 typedef struct workq_threadreq_s { union { @@ -113,16 +113,16 @@ typedef struct workq_threadreq_s { TAILQ_HEAD(threadreq_head, workq_threadreq_s); -#define TR_STATE_IDLE 0 /* request isn't in flight */ -#define TR_STATE_NEW 1 /* request is being initiated */ -#define TR_STATE_QUEUED 2 /* request is being queued */ -#define TR_STATE_BINDING 4 /* request is preposted for bind */ +#define TR_STATE_IDLE 0 /* request isn't in flight */ +#define TR_STATE_NEW 1 /* request is being initiated */ +#define TR_STATE_QUEUED 2 /* request is being queued */ +#define TR_STATE_BINDING 4 /* request is preposted for bind */ -#define TR_FLAG_KEVENT 0x01 -#define TR_FLAG_WORKLOOP 0x02 -#define TR_FLAG_OVERCOMMIT 0x04 -#define TR_FLAG_WL_PARAMS 0x08 -#define TR_FLAG_WL_OUTSIDE_QOS 0x10 +#define TR_FLAG_KEVENT 0x01 +#define TR_FLAG_WORKLOOP 0x02 +#define TR_FLAG_OVERCOMMIT 0x04 +#define TR_FLAG_WL_PARAMS 0x08 +#define TR_FLAG_WL_OUTSIDE_QOS 0x10 #if defined(__LP64__) typedef unsigned __int128 wq_thactive_t; @@ -144,27 +144,27 @@ typedef enum { TAILQ_HEAD(workq_uthread_head, uthread); struct workqueue { - thread_call_t wq_delayed_call; - thread_call_t wq_immediate_call; + thread_call_t wq_delayed_call; + thread_call_t wq_immediate_call; thread_call_t wq_death_call; struct turnstile *wq_turnstile; - lck_spin_t wq_lock; + lck_spin_t wq_lock; - uint64_t wq_thread_call_last_run; + uint64_t wq_thread_call_last_run; struct os_refcnt wq_refcnt; workq_state_flags_t _Atomic wq_flags; - uint32_t wq_fulfilled; - uint32_t wq_creations; - uint32_t wq_timer_interval; - uint32_t wq_event_manager_priority; - uint32_t wq_reqcount; /* number of elements on the wq_*_reqlists */ - uint16_t wq_thdying_count; - uint16_t wq_threads_scheduled; - uint16_t wq_constrained_threads_scheduled; - uint16_t wq_nthreads; - uint16_t wq_thidlecount; - uint16_t wq_thscheduled_count[WORKQ_NUM_BUCKETS]; // incl. manager + uint32_t wq_fulfilled; + uint32_t wq_creations; + uint32_t wq_timer_interval; + uint32_t wq_event_manager_priority; + uint32_t wq_reqcount; /* number of elements on the wq_*_reqlists */ + uint16_t wq_thdying_count; + uint16_t wq_threads_scheduled; + uint16_t wq_constrained_threads_scheduled; + uint16_t wq_nthreads; + uint16_t wq_thidlecount; + uint16_t wq_thscheduled_count[WORKQ_NUM_BUCKETS]; // incl. manager _Atomic wq_thactive_t wq_thactive; _Atomic uint64_t wq_lastblocked_ts[WORKQ_NUM_QOS_BUCKETS]; @@ -183,12 +183,12 @@ struct workqueue { }; static_assert(offsetof(struct workqueue, wq_lock) >= sizeof(struct queue_entry), - "Make sure workq_deallocate_enqueue can cast the workqueue"); + "Make sure workq_deallocate_enqueue can cast the workqueue"); -#define WORKQUEUE_MAXTHREADS 512 -#define WQ_STALLED_WINDOW_USECS 200 -#define WQ_REDUCE_POOL_WINDOW_USECS 5000000 -#define WQ_MAX_TIMER_INTERVAL_USECS 50000 +#define WORKQUEUE_MAXTHREADS 512 +#define WQ_STALLED_WINDOW_USECS 200 +#define WQ_REDUCE_POOL_WINDOW_USECS 5000000 +#define WQ_MAX_TIMER_INTERVAL_USECS 50000 #pragma mark definitions @@ -214,15 +214,15 @@ void workq_thread_terminate(struct proc *p, struct uthread *uth); // called with the kq req lock held bool workq_kern_threadreq_initiate(struct proc *p, struct kqrequest *kqr, - struct turnstile *ts, thread_qos_t qos, int flags); + struct turnstile *ts, thread_qos_t qos, int flags); // called with the kq req lock held void workq_kern_threadreq_modify(struct proc *p, struct kqrequest *kqr, - thread_qos_t qos, int flags); + thread_qos_t qos, int flags); // called with the kq req lock held void workq_kern_threadreq_update_inheritor(struct proc *p, struct kqrequest *kqr, - thread_t owner, struct turnstile *ts, turnstile_update_flags_t flags); + thread_t owner, struct turnstile *ts, turnstile_update_flags_t flags); void workq_kern_threadreq_lock(struct proc *p); void workq_kern_threadreq_unlock(struct proc *p); diff --git a/bsd/pthread/workqueue_syscalls.h b/bsd/pthread/workqueue_syscalls.h index e8604193e..f12656aac 100644 --- a/bsd/pthread/workqueue_syscalls.h +++ b/bsd/pthread/workqueue_syscalls.h @@ -35,40 +35,40 @@ #ifdef __PTHREAD_EXPOSE_INTERNALS__ /* workq_kernreturn commands */ -#define WQOPS_THREAD_RETURN 0x04 /* parks the thread back into the kernel */ -#define WQOPS_QUEUE_NEWSPISUPP 0x10 /* this is to check for newer SPI support */ -#define WQOPS_QUEUE_REQTHREADS 0x20 /* request number of threads of a prio */ -#define WQOPS_QUEUE_REQTHREADS2 0x30 /* request a number of threads in a given priority bucket */ -#define WQOPS_THREAD_KEVENT_RETURN 0x40 /* parks the thread after delivering the passed kevent array */ -#define WQOPS_SET_EVENT_MANAGER_PRIORITY 0x80 /* max() in the provided priority in the the priority of the event manager */ -#define WQOPS_THREAD_WORKLOOP_RETURN 0x100 /* parks the thread after delivering the passed kevent array */ -#define WQOPS_SHOULD_NARROW 0x200 /* checks whether we should narrow our concurrency */ +#define WQOPS_THREAD_RETURN 0x04 /* parks the thread back into the kernel */ +#define WQOPS_QUEUE_NEWSPISUPP 0x10 /* this is to check for newer SPI support */ +#define WQOPS_QUEUE_REQTHREADS 0x20 /* request number of threads of a prio */ +#define WQOPS_QUEUE_REQTHREADS2 0x30 /* request a number of threads in a given priority bucket */ +#define WQOPS_THREAD_KEVENT_RETURN 0x40 /* parks the thread after delivering the passed kevent array */ +#define WQOPS_SET_EVENT_MANAGER_PRIORITY 0x80 /* max() in the provided priority in the the priority of the event manager */ +#define WQOPS_THREAD_WORKLOOP_RETURN 0x100 /* parks the thread after delivering the passed kevent array */ +#define WQOPS_SHOULD_NARROW 0x200 /* checks whether we should narrow our concurrency */ /* flag values for upcall flags field, only 8 bits per struct threadlist */ -#define WQ_FLAG_THREAD_PRIO_SCHED 0x00008000 -#define WQ_FLAG_THREAD_PRIO_QOS 0x00004000 -#define WQ_FLAG_THREAD_PRIO_MASK 0x00000fff +#define WQ_FLAG_THREAD_PRIO_SCHED 0x00008000 +#define WQ_FLAG_THREAD_PRIO_QOS 0x00004000 +#define WQ_FLAG_THREAD_PRIO_MASK 0x00000fff -#define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */ -#define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */ -#define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */ -#define WQ_FLAG_THREAD_KEVENT 0x00080000 /* thread is response to kevent req */ -#define WQ_FLAG_THREAD_EVENT_MANAGER 0x00100000 /* event manager thread */ -#define WQ_FLAG_THREAD_TSD_BASE_SET 0x00200000 /* tsd base has already been set */ -#define WQ_FLAG_THREAD_WORKLOOP 0x00400000 /* workloop thread */ -#define WQ_FLAG_THREAD_OUTSIDEQOS 0x00800000 /* thread qos changes should not be sent to kernel */ +#define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */ +#define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */ +#define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */ +#define WQ_FLAG_THREAD_KEVENT 0x00080000 /* thread is response to kevent req */ +#define WQ_FLAG_THREAD_EVENT_MANAGER 0x00100000 /* event manager thread */ +#define WQ_FLAG_THREAD_TSD_BASE_SET 0x00200000 /* tsd base has already been set */ +#define WQ_FLAG_THREAD_WORKLOOP 0x00400000 /* workloop thread */ +#define WQ_FLAG_THREAD_OUTSIDEQOS 0x00800000 /* thread qos changes should not be sent to kernel */ #define WQ_KEVENT_LIST_LEN 16 // WORKQ_KEVENT_EVENT_BUFFER_LEN #define WQ_KEVENT_DATA_SIZE (32 * 1024) /* kqueue_workloop_ctl commands */ -#define KQ_WORKLOOP_CREATE 0x01 -#define KQ_WORKLOOP_DESTROY 0x02 +#define KQ_WORKLOOP_CREATE 0x01 +#define KQ_WORKLOOP_DESTROY 0x02 /* indicate which fields of kq_workloop_create params are valid */ -#define KQ_WORKLOOP_CREATE_SCHED_PRI 0x01 -#define KQ_WORKLOOP_CREATE_SCHED_POL 0x02 -#define KQ_WORKLOOP_CREATE_CPU_PERCENT 0x04 +#define KQ_WORKLOOP_CREATE_SCHED_PRI 0x01 +#define KQ_WORKLOOP_CREATE_SCHED_POL 0x02 +#define KQ_WORKLOOP_CREATE_CPU_PERCENT 0x04 struct kqueue_workloop_params { int kqwlp_version; @@ -81,7 +81,7 @@ struct kqueue_workloop_params { } __attribute__((packed)); _Static_assert(offsetof(struct kqueue_workloop_params, kqwlp_version) == 0, - "kqwlp_version should be first"); + "kqwlp_version should be first"); int __workq_open(void); diff --git a/bsd/pthread/workqueue_trace.h b/bsd/pthread/workqueue_trace.h index 6625798c4..f844067ca 100644 --- a/bsd/pthread/workqueue_trace.h +++ b/bsd/pthread/workqueue_trace.h @@ -37,62 +37,62 @@ #define WQ_TRACE_BSDTHREAD_SUBCLASS 16 #define TRACE_wq_pthread_exit \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x01) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x01) #define TRACE_wq_workqueue_exit \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x02) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x02) #define TRACE_wq_runthread \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x03) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x03) #define TRACE_wq_death_call \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x05) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x05) #define TRACE_wq_thread_block \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x09) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x09) #define TRACE_wq_thactive_update \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x0a) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x0a) #define TRACE_wq_add_timer \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x0b) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x0b) #define TRACE_wq_start_add_timer \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x0c) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x0c) #define TRACE_wq_override_dispatch \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x14) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x14) #define TRACE_wq_override_reset \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x15) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x15) #define TRACE_wq_thread_create_failed \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x1d) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x1d) #define TRACE_wq_thread_terminate \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x1e) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x1e) #define TRACE_wq_thread_create \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x1f) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x1f) #define TRACE_wq_select_threadreq \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x20) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x20) #define TRACE_wq_creator_select \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x23) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x23) #define TRACE_wq_creator_yield \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x24) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x24) #define TRACE_wq_constrained_admission \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x25) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x25) #define TRACE_wq_wqops_reqthreads \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x26) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_WORKQUEUE_SUBCLASS, 0x26) #define TRACE_wq_create \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x01) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x01) #define TRACE_wq_destroy \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x02) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x02) #define TRACE_wq_thread_logical_run \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x03) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x03) #define TRACE_wq_thread_request_initiate \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x05) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x05) #define TRACE_wq_thread_request_modify \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x06) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x06) #define TRACE_wq_thread_request_fulfill \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x08) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_REQUESTS_SUBCLASS, 0x08) #define TRACE_bsdthread_set_qos_self \ - KDBG_CODE(DBG_PTHREAD, WQ_TRACE_BSDTHREAD_SUBCLASS, 0x1) + KDBG_CODE(DBG_PTHREAD, WQ_TRACE_BSDTHREAD_SUBCLASS, 0x1) -#define WQ_TRACE(x,a,b,c,d,e) \ - ({ KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e); }) -#define WQ_TRACE_WQ(x,wq,b,c,d,e) \ - ({ KERNEL_DEBUG_CONSTANT(x, (wq)->wq_proc->p_pid, b, c, d, e); }) +#define WQ_TRACE(x, a, b, c, d, e) \ + ({ KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e); }) +#define WQ_TRACE_WQ(x, wq, b, c, d, e) \ + ({ KERNEL_DEBUG_CONSTANT(x, (wq)->wq_proc->p_pid, b, c, d, e); }) #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) #define __wq_trace_only diff --git a/bsd/security/audit/audit.c b/bsd/security/audit/audit.c index e91adedf4..d6c156a1b 100644 --- a/bsd/security/audit/audit.c +++ b/bsd/security/audit/audit.c @@ -94,11 +94,11 @@ MALLOC_DEFINE(M_AUDITTEXT, "audit_text", "Audit text storage"); * * Define the audit control flags. */ -int audit_enabled; -int audit_suspended; +int audit_enabled; +int audit_suspended; -int audit_syscalls; -au_class_t audit_kevent_mask; +int audit_syscalls; +au_class_t audit_kevent_mask; /* * The audit control mode is used to ensure configuration settings are only @@ -111,31 +111,31 @@ au_expire_after_t audit_expire_after; * Flags controlling behavior in low storage situations. Should we panic if * a write fails? Should we fail stop if we're out of disk space? */ -int audit_panic_on_write_fail; -int audit_fail_stop; -int audit_argv; -int audit_arge; +int audit_panic_on_write_fail; +int audit_fail_stop; +int audit_argv; +int audit_arge; /* * Are we currently "failing stop" due to out of disk space? */ -int audit_in_failure; +int audit_in_failure; /* * Global audit statistics. */ -struct audit_fstat audit_fstat; +struct audit_fstat audit_fstat; /* * Preselection mask for non-attributable events. */ -struct au_mask audit_nae_mask; +struct au_mask audit_nae_mask; /* * Mutex to protect global variables shared between various threads and * processes. */ -struct mtx audit_mtx; +struct mtx audit_mtx; /* * Queue of audit records ready for delivery to disk. We insert new records @@ -145,42 +145,42 @@ struct mtx audit_mtx; * needed to estimate the total size of the combined set of records * outstanding in the system. */ -struct kaudit_queue audit_q; -int audit_q_len; -int audit_pre_q_len; +struct kaudit_queue audit_q; +int audit_q_len; +int audit_pre_q_len; /* * Audit queue control settings (minimum free, low/high water marks, etc.) */ -struct au_qctrl audit_qctrl; +struct au_qctrl audit_qctrl; /* * Condition variable to signal to the worker that it has work to do: either * new records are in the queue, or a log replacement is taking place. */ -struct cv audit_worker_cv; +struct cv audit_worker_cv; /* * Condition variable to signal when the worker is done draining the audit * queue. */ -struct cv audit_drain_cv; +struct cv audit_drain_cv; /* * Condition variable to flag when crossing the low watermark, meaning that * threads blocked due to hitting the high watermark can wake up and continue * to commit records. */ -struct cv audit_watermark_cv; +struct cv audit_watermark_cv; /* * Condition variable for auditing threads wait on when in fail-stop mode. * Threads wait on this CV forever (and ever), never seeing the light of day * again. */ -static struct cv audit_fail_cv; +static struct cv audit_fail_cv; -static zone_t audit_record_zone; +static zone_t audit_record_zone; /* * Kernel audit information. This will store the current audit address @@ -188,20 +188,19 @@ static zone_t audit_record_zone; * audit records. This data is modified by the A_GET{SET}KAUDIT auditon(2) * command. */ -static struct auditinfo_addr audit_kinfo; -static struct rwlock audit_kinfo_lock; +static struct auditinfo_addr audit_kinfo; +static struct rwlock audit_kinfo_lock; -#define KINFO_LOCK_INIT() rw_init(&audit_kinfo_lock, \ - "audit_kinfo_lock") -#define KINFO_RLOCK() rw_rlock(&audit_kinfo_lock) -#define KINFO_WLOCK() rw_wlock(&audit_kinfo_lock) -#define KINFO_RUNLOCK() rw_runlock(&audit_kinfo_lock) -#define KINFO_WUNLOCK() rw_wunlock(&audit_kinfo_lock) +#define KINFO_LOCK_INIT() rw_init(&audit_kinfo_lock, \ + "audit_kinfo_lock") +#define KINFO_RLOCK() rw_rlock(&audit_kinfo_lock) +#define KINFO_WLOCK() rw_wlock(&audit_kinfo_lock) +#define KINFO_RUNLOCK() rw_runlock(&audit_kinfo_lock) +#define KINFO_WUNLOCK() rw_wunlock(&audit_kinfo_lock) void audit_set_kinfo(struct auditinfo_addr *ak) { - KASSERT(ak->ai_termid.at_type == AU_IPv4 || ak->ai_termid.at_type == AU_IPv6, ("audit_set_kinfo: invalid address type")); @@ -214,7 +213,6 @@ audit_set_kinfo(struct auditinfo_addr *ak) void audit_get_kinfo(struct auditinfo_addr *ak) { - KASSERT(audit_kinfo.ai_termid.at_type == AU_IPv4 || audit_kinfo.ai_termid.at_type == AU_IPv6, ("audit_set_kinfo: invalid address type")); @@ -240,8 +238,8 @@ audit_record_ctor(proc_t p, struct kaudit_record *ar) cred = kauth_cred_proc_ref(p); /* - * Export the subject credential. - */ + * Export the subject credential. + */ cru2x(cred, &ar->k_ar.ar_subj_cred); ar->k_ar.ar_subj_ruid = kauth_cred_getruid(cred); ar->k_ar.ar_subj_rgid = kauth_cred_getrgid(cred); @@ -250,7 +248,7 @@ audit_record_ctor(proc_t p, struct kaudit_record *ar) ar->k_ar.ar_subj_auid = cred->cr_audit.as_aia_p->ai_auid; ar->k_ar.ar_subj_asid = cred->cr_audit.as_aia_p->ai_asid; bcopy(&cred->cr_audit.as_mask, &ar->k_ar.ar_subj_amask, - sizeof(struct au_mask)); + sizeof(struct au_mask)); bcopy(&cred->cr_audit.as_aia_p->ai_termid, &ar->k_ar.ar_subj_term_addr, sizeof(struct au_tid_addr)); kauth_cred_unref(&cred); @@ -260,27 +258,36 @@ audit_record_ctor(proc_t p, struct kaudit_record *ar) static void audit_record_dtor(struct kaudit_record *ar) { - - if (ar->k_ar.ar_arg_upath1 != NULL) + if (ar->k_ar.ar_arg_upath1 != NULL) { free(ar->k_ar.ar_arg_upath1, M_AUDITPATH); - if (ar->k_ar.ar_arg_upath2 != NULL) + } + if (ar->k_ar.ar_arg_upath2 != NULL) { free(ar->k_ar.ar_arg_upath2, M_AUDITPATH); - if (ar->k_ar.ar_arg_kpath1 != NULL) + } + if (ar->k_ar.ar_arg_kpath1 != NULL) { free(ar->k_ar.ar_arg_kpath1, M_AUDITPATH); - if (ar->k_ar.ar_arg_kpath2 != NULL) + } + if (ar->k_ar.ar_arg_kpath2 != NULL) { free(ar->k_ar.ar_arg_kpath2, M_AUDITPATH); - if (ar->k_ar.ar_arg_text != NULL) + } + if (ar->k_ar.ar_arg_text != NULL) { free(ar->k_ar.ar_arg_text, M_AUDITTEXT); - if (ar->k_ar.ar_arg_opaque != NULL) + } + if (ar->k_ar.ar_arg_opaque != NULL) { free(ar->k_ar.ar_arg_opaque, M_AUDITDATA); - if (ar->k_ar.ar_arg_data != NULL) + } + if (ar->k_ar.ar_arg_data != NULL) { free(ar->k_ar.ar_arg_data, M_AUDITDATA); - if (ar->k_udata != NULL) + } + if (ar->k_udata != NULL) { free(ar->k_udata, M_AUDITDATA); - if (ar->k_ar.ar_arg_argv != NULL) + } + if (ar->k_ar.ar_arg_argv != NULL) { free(ar->k_ar.ar_arg_argv, M_AUDITTEXT); - if (ar->k_ar.ar_arg_envv != NULL) + } + if (ar->k_ar.ar_arg_envv != NULL) { free(ar->k_ar.ar_arg_envv, M_AUDITTEXT); + } audit_identity_info_destruct(&ar->k_ar.ar_arg_identity); } @@ -292,7 +299,6 @@ audit_record_dtor(struct kaudit_record *ar) void audit_init(void) { - audit_enabled = 0; audit_syscalls = 0; audit_kevent_mask = 0; @@ -307,7 +313,7 @@ audit_init(void) audit_expire_after.size = 0; audit_expire_after.op_type = AUDIT_EXPIRE_OP_AND; - audit_fstat.af_filesz = 0; /* '0' means unset, unbounded. */ + audit_fstat.af_filesz = 0; /* '0' means unset, unbounded. */ audit_fstat.af_currsz = 0; audit_nae_mask.am_success = 0; audit_nae_mask.am_failure = 0; @@ -332,7 +338,7 @@ audit_init(void) cv_init(&audit_fail_cv, "audit_fail_cv"); audit_record_zone = zinit(sizeof(struct kaudit_record), - AQ_HIWATER*sizeof(struct kaudit_record), 8192, "audit_zone"); + AQ_HIWATER * sizeof(struct kaudit_record), 8192, "audit_zone"); #if CONFIG_MACF audit_mac_init(); #endif @@ -359,7 +365,6 @@ audit_init(void) void audit_shutdown(void) { - audit_rotate_vnode(NULL, NULL); } @@ -369,8 +374,7 @@ audit_shutdown(void) struct kaudit_record * currecord(void) { - - return (curthread()->uu_ar); + return curthread()->uu_ar; } /* @@ -399,12 +403,13 @@ audit_new(int event, proc_t p, __unused struct uthread *uthread) audit_override = (AUE_SESSION_START == event || AUE_SESSION_UPDATE == event || AUE_SESSION_END == event || AUE_SESSION_CLOSE == event); - + mtx_lock(&audit_mtx); no_record = (audit_suspended || !audit_enabled); mtx_unlock(&audit_mtx); - if (!audit_override && no_record) - return (NULL); + if (!audit_override && no_record) { + return NULL; + } /* * Initialize the audit record header. @@ -415,8 +420,9 @@ audit_new(int event, proc_t p, __unused struct uthread *uthread) * in the kernel. */ ar = zalloc(audit_record_zone); - if (ar == NULL) + if (ar == NULL) { return NULL; + } audit_record_ctor(p, ar); ar->k_ar.ar_event = event; @@ -424,27 +430,28 @@ audit_new(int event, proc_t p, __unused struct uthread *uthread) if (PROC_NULL != p) { if (audit_mac_new(p, ar) != 0) { zfree(audit_record_zone, ar); - return (NULL); + return NULL; } - } else + } else { ar->k_ar.ar_mac_records = NULL; + } #endif mtx_lock(&audit_mtx); audit_pre_q_len++; mtx_unlock(&audit_mtx); - return (ar); + return ar; } void audit_free(struct kaudit_record *ar) { - audit_record_dtor(ar); #if CONFIG_MACF - if (NULL != ar->k_ar.ar_mac_records) + if (NULL != ar->k_ar.ar_mac_records) { audit_mac_free(ar); + } #endif zfree(audit_record_zone, ar); } @@ -459,24 +466,27 @@ audit_commit(struct kaudit_record *ar, int error, int retval) struct au_mask *aumask; int audit_override; - if (ar == NULL) + if (ar == NULL) { return; + } /* * Decide whether to commit the audit record by checking the error * value from the system call and using the appropriate audit mask. */ - if (ar->k_ar.ar_subj_auid == AU_DEFAUDITID) + if (ar->k_ar.ar_subj_auid == AU_DEFAUDITID) { aumask = &audit_nae_mask; - else + } else { aumask = &ar->k_ar.ar_subj_amask; + } - if (error) + if (error) { sorf = AU_PRS_FAILURE; - else + } else { sorf = AU_PRS_SUCCESS; + } - switch(ar->k_ar.ar_event) { + switch (ar->k_ar.ar_event) { case AUE_OPEN_RWTC: /* * The open syscall always writes a AUE_OPEN_RWTC event; @@ -484,7 +494,7 @@ audit_commit(struct kaudit_record *ar, int error, int retval) * and the error value. */ ar->k_ar.ar_event = audit_flags_and_error_to_openevent( - ar->k_ar.ar_arg_fflags, error); + ar->k_ar.ar_arg_fflags, error); break; case AUE_OPEN_EXTENDED_RWTC: @@ -494,7 +504,7 @@ audit_commit(struct kaudit_record *ar, int error, int retval) * event based on the flags and the error value. */ ar->k_ar.ar_event = audit_flags_and_error_to_openextendedevent( - ar->k_ar.ar_arg_fflags, error); + ar->k_ar.ar_arg_fflags, error); break; case AUE_OPENAT_RWTC: @@ -504,7 +514,7 @@ audit_commit(struct kaudit_record *ar, int error, int retval) * event based on the flags and the error value. */ ar->k_ar.ar_event = audit_flags_and_error_to_openatevent( - ar->k_ar.ar_arg_fflags, error); + ar->k_ar.ar_arg_fflags, error); break; case AUE_OPENBYID_RWT: @@ -514,12 +524,12 @@ audit_commit(struct kaudit_record *ar, int error, int retval) * event based on the flags and the error value. */ ar->k_ar.ar_event = audit_flags_and_error_to_openbyidevent( - ar->k_ar.ar_arg_fflags, error); + ar->k_ar.ar_arg_fflags, error); break; case AUE_SYSCTL: ar->k_ar.ar_event = audit_ctlname_to_sysctlevent( - ar->k_ar.ar_arg_ctlname, ar->k_ar.ar_valid_arg); + ar->k_ar.ar_arg_ctlname, ar->k_ar.ar_valid_arg); break; case AUE_AUDITON: @@ -530,7 +540,7 @@ audit_commit(struct kaudit_record *ar, int error, int retval) case AUE_FCNTL: /* Convert some fcntl() commands to their own events. */ ar->k_ar.ar_event = audit_fcntl_command_event( - ar->k_ar.ar_arg_cmd, ar->k_ar.ar_arg_fflags, error); + ar->k_ar.ar_arg_cmd, ar->k_ar.ar_arg_fflags, error); break; } @@ -550,11 +560,13 @@ audit_commit(struct kaudit_record *ar, int error, int retval) AUE_SESSION_CLOSE == event); ar->k_ar_commit |= AR_COMMIT_KERNEL; - if (au_preselect(event, class, aumask, sorf) != 0) + if (au_preselect(event, class, aumask, sorf) != 0) { ar->k_ar_commit |= AR_PRESELECT_TRAIL; + } if (audit_pipe_preselect(auid, event, class, sorf, - ar->k_ar_commit & AR_PRESELECT_TRAIL) != 0) + ar->k_ar_commit & AR_PRESELECT_TRAIL) != 0) { ar->k_ar_commit |= AR_PRESELECT_PIPE; + } if ((ar->k_ar_commit & (AR_PRESELECT_TRAIL | AR_PRESELECT_PIPE | AR_PRESELECT_USER_TRAIL | AR_PRESELECT_USER_PIPE | AR_PRESELECT_FILTER)) == 0) { @@ -585,8 +597,9 @@ audit_commit(struct kaudit_record *ar, int error, int retval) * Constrain the number of committed audit records based on the * configurable parameter. */ - while (audit_q_len >= audit_qctrl.aq_hiwater) + while (audit_q_len >= audit_qctrl.aq_hiwater) { cv_wait(&audit_watermark_cv, &audit_mtx); + } TAILQ_INSERT_TAIL(&audit_q, ar, k_q); audit_q_len++; @@ -622,11 +635,13 @@ audit_syscall_enter(unsigned int code, proc_t proc, struct uthread *uthread) * the syscall table(s). This table is generated by makesyscalls.sh * from syscalls.master and stored in audit_kevents.c. */ - if (code >= nsysent) + if (code >= nsysent) { return; + } event = sys_au_event[code]; - if (event == AUE_NULL) + if (event == AUE_NULL) { return; + } KASSERT(uthread->uu_ar == NULL, ("audit_syscall_enter: uthread->uu_ar != NULL")); @@ -637,10 +652,11 @@ audit_syscall_enter(unsigned int code, proc_t proc, struct uthread *uthread) */ cred = kauth_cred_proc_ref(proc); auid = cred->cr_audit.as_aia_p->ai_auid; - if (auid == AU_DEFAUDITID) + if (auid == AU_DEFAUDITID) { aumask = &audit_nae_mask; - else + } else { aumask = &cred->cr_audit.as_mask; + } /* * Allocate an audit record, if preselection allows it, and store in @@ -652,8 +668,9 @@ audit_syscall_enter(unsigned int code, proc_t proc, struct uthread *uthread) * Note: audit_mac_syscall_enter() may call audit_new() and allocate * memory for the audit record (uu_ar). */ - if (audit_mac_syscall_enter(code, proc, uthread, cred, event) == 0) + if (audit_mac_syscall_enter(code, proc, uthread, cred, event) == 0) { goto out; + } #endif if (au_preselect(event, class, aumask, AU_PRS_BOTH)) { /* @@ -673,12 +690,14 @@ audit_syscall_enter(unsigned int code, proc_t proc, struct uthread *uthread) cv_wait(&audit_fail_cv, &audit_mtx); panic("audit_failing_stop: thread continued"); } - if (uthread->uu_ar == NULL) + if (uthread->uu_ar == NULL) { uthread->uu_ar = audit_new(event, proc, uthread); + } } else if (audit_pipe_preselect(auid, event, class, AU_PRS_BOTH, 0)) { - if (uthread->uu_ar == NULL) + if (uthread->uu_ar == NULL) { uthread->uu_ar = audit_new(event, proc, uthread); - } + } + } /* * All audited events will contain an identity @@ -718,14 +737,16 @@ audit_syscall_exit(int error, __unsed proc_t proc, struct uthread *uthread) * If there was an error, the return value is set to -1, imitating * the behavior of the cerror routine. */ - if (error) + if (error) { retval = -1; - else + } else { retval = uthread->uu_rval[0]; + } #if CONFIG_MACF - if (audit_mac_syscall_exit(code, uthread, error, retval) != 0) + if (audit_mac_syscall_exit(code, uthread, error, retval) != 0) { goto out; + } #endif audit_commit(uthread->uu_ar, error, retval); @@ -747,16 +768,19 @@ audit_mach_syscall_enter(unsigned short event) au_class_t class; au_id_t auid; - if (event == AUE_NULL) + if (event == AUE_NULL) { return; + } uthread = curthread(); - if (uthread == NULL) + if (uthread == NULL) { return; + } proc = current_proc(); - if (proc == NULL) + if (proc == NULL) { return; + } KASSERT(uthread->uu_ar == NULL, ("audit_mach_syscall_enter: uthread->uu_ar != NULL")); @@ -768,22 +792,24 @@ audit_mach_syscall_enter(unsigned short event) * Check which audit mask to use; either the kernel non-attributable * event mask or the process audit mask. */ - if (auid == AU_DEFAUDITID) + if (auid == AU_DEFAUDITID) { aumask = &audit_nae_mask; - else + } else { aumask = &cred->cr_audit.as_mask; + } /* * Allocate an audit record, if desired, and store in the BSD thread * for later use. */ class = au_event_class(event); - if (au_preselect(event, class, aumask, AU_PRS_BOTH)) + if (au_preselect(event, class, aumask, AU_PRS_BOTH)) { uthread->uu_ar = audit_new(event, proc, uthread); - else if (audit_pipe_preselect(auid, event, class, AU_PRS_BOTH, 0)) + } else if (audit_pipe_preselect(auid, event, class, AU_PRS_BOTH, 0)) { uthread->uu_ar = audit_new(event, proc, uthread); - else + } else { uthread->uu_ar = NULL; + } kauth_cred_unref(&cred); } @@ -808,8 +834,7 @@ audit_mach_syscall_exit(int retval, struct uthread *uthread) int kau_will_audit(void) { - - return (audit_enabled && currecord() != NULL); + return audit_enabled && currecord() != NULL; } #if CONFIG_COREDUMP @@ -832,23 +857,26 @@ audit_proc_coredump(proc_t proc, char *path, int errcode) */ my_cred = kauth_cred_proc_ref(proc); auid = my_cred->cr_audit.as_aia_p->ai_auid; - if (auid == AU_DEFAUDITID) + if (auid == AU_DEFAUDITID) { aumask = &audit_nae_mask; - else + } else { aumask = &my_cred->cr_audit.as_mask; + } kauth_cred_unref(&my_cred); /* * It's possible for coredump(9) generation to fail. Make sure that * we handle this case correctly for preselection. */ - if (errcode != 0) + if (errcode != 0) { sorf = AU_PRS_FAILURE; - else + } else { sorf = AU_PRS_SUCCESS; + } class = au_event_class(AUE_CORE); if (au_preselect(AUE_CORE, class, aumask, sorf) == 0 && - audit_pipe_preselect(auid, AUE_CORE, class, sorf, 0) == 0) + audit_pipe_preselect(auid, AUE_CORE, class, sorf, 0) == 0) { return; + } /* * If we are interested in seeing this audit record, allocate it. * Where possible coredump records should contain a pathname and arg32 @@ -860,15 +888,17 @@ audit_proc_coredump(proc_t proc, char *path, int errcode) pathp = &ar->k_ar.ar_arg_upath1; *pathp = malloc(MAXPATHLEN, M_AUDITPATH, M_WAITOK); if (audit_canon_path(vfs_context_cwd(vfs_context_current()), path, - *pathp)) + *pathp)) { free(*pathp, M_AUDITPATH); - else + } else { ARG_SET_VALID(ar, ARG_UPATH1); + } } ar->k_ar.ar_arg_signum = proc->p_sigacts->ps_sig; ARG_SET_VALID(ar, ARG_SIGNUM); - if (errcode != 0) + if (errcode != 0) { ret = 1; + } audit_commit(ar, errcode, ret); } #endif /* CONFIG_COREDUMP */ diff --git a/bsd/security/audit/audit.h b/bsd/security/audit/audit.h index 08766aa00..13a1b8c24 100644 --- a/bsd/security/audit/audit.h +++ b/bsd/security/audit/audit.h @@ -40,7 +40,7 @@ */ #ifndef _SECURITY_AUDIT_AUDIT_H -#define _SECURITY_AUDIT_AUDIT_H +#define _SECURITY_AUDIT_AUDIT_H #if defined(_KERNEL) || defined(KERNEL) @@ -60,9 +60,9 @@ * performance so an event class map table lookup doesn't have be done for * every system call if only user events are being audited. */ -extern int audit_enabled; -extern int audit_suspended; -extern int audit_syscalls; +extern int audit_enabled; +extern int audit_suspended; +extern int audit_syscalls; /* * Define the masks for the audited arguments. @@ -72,67 +72,67 @@ extern int audit_syscalls; * vnode is being logged. These should move to audit_private.h when that is * fixed. */ -#define ARG_EUID 0x0000000000000001ULL -#define ARG_RUID 0x0000000000000002ULL -#define ARG_SUID 0x0000000000000004ULL -#define ARG_EGID 0x0000000000000008ULL -#define ARG_RGID 0x0000000000000010ULL -#define ARG_SGID 0x0000000000000020ULL -#define ARG_PID 0x0000000000000040ULL -#define ARG_UID 0x0000000000000080ULL -#define ARG_AUID 0x0000000000000100ULL -#define ARG_GID 0x0000000000000200ULL -#define ARG_FD 0x0000000000000400ULL -#define ARG_FD1 ARG_FD -#define ARG_POSIX_IPC_PERM 0x0000000000000800ULL -#define ARG_FFLAGS 0x0000000000001000ULL -#define ARG_MODE 0x0000000000002000ULL -#define ARG_VALUE32 0x0000000000004000ULL -#define ARG_ADDR32 0x0000000000008000ULL -#define ARG_ADDR ARG_ADDR32 -#define ARG_LEN 0x0000000000010000ULL -#define ARG_MASK 0x0000000000020000ULL -#define ARG_SIGNUM 0x0000000000040000ULL -#define ARG_LOGIN 0x0000000000080000ULL -#define ARG_SADDRINET 0x0000000000100000ULL -#define ARG_SADDRINET6 0x0000000000200000ULL -#define ARG_SADDRUNIX 0x0000000000400000ULL -#define ARG_TERMID_ADDR ARG_SADDRUNIX -#define ARG_KPATH1 0x0000000000800000ULL /* darwin-only */ -#define ARG_KPATH2 0x0000000001000000ULL /* darwin-only */ -#define ARG_UPATH1 0x0000000002000000ULL -#define ARG_UPATH2 0x0000000004000000ULL -#define ARG_TEXT 0x0000000008000000ULL -#define ARG_VNODE1 0x0000000010000000ULL -#define ARG_VNODE2 0x0000000020000000ULL -#define ARG_SVIPC_CMD 0x0000000040000000ULL -#define ARG_SVIPC_PERM 0x0000000080000000ULL -#define ARG_SVIPC_ID 0x0000000100000000ULL -#define ARG_SVIPC_ADDR 0x0000000200000000ULL -#define ARG_GROUPSET 0x0000000400000000ULL -#define ARG_CMD 0x0000000800000000ULL -#define ARG_SOCKINFO 0x0000001000000000ULL -#define ARG_ASID 0x0000002000000000ULL -#define ARG_TERMID 0x0000004000000000ULL -#define ARG_AUDITON 0x0000008000000000ULL -#define ARG_VALUE64 0x0000010000000000ULL /* darwin-only */ -#define ARG_AMASK 0x0000020000000000ULL -#define ARG_CTLNAME 0x0000040000000000ULL -#define ARG_PROCESS 0x0000080000000000ULL -#define ARG_MACHPORT1 0x0000100000000000ULL -#define ARG_MACHPORT2 0x0000200000000000ULL -#define ARG_MAC_STRING 0x0000400000000000ULL -#define ARG_EXIT 0x0000800000000000ULL -#define ARG_IOVECSTR 0x0001000000000000ULL -#define ARG_ARGV 0x0002000000000000ULL -#define ARG_ENVV 0x0004000000000000ULL -#define ARG_OPAQUE 0x0008000000000000ULL /* darwin-only */ -#define ARG_DATA 0x0010000000000000ULL /* darwin-only */ -#define ARG_ADDR64 0x0020000000000000ULL /* darwin-only */ -#define ARG_FD2 0x0040000000000000ULL /* darwin-only */ -#define ARG_IDENTITY 0x0080000000000000ULL /* darwin-only */ -#define ARG_NONE 0x0000000000000000ULL -#define ARG_ALL 0xFFFFFFFFFFFFFFFFULL +#define ARG_EUID 0x0000000000000001ULL +#define ARG_RUID 0x0000000000000002ULL +#define ARG_SUID 0x0000000000000004ULL +#define ARG_EGID 0x0000000000000008ULL +#define ARG_RGID 0x0000000000000010ULL +#define ARG_SGID 0x0000000000000020ULL +#define ARG_PID 0x0000000000000040ULL +#define ARG_UID 0x0000000000000080ULL +#define ARG_AUID 0x0000000000000100ULL +#define ARG_GID 0x0000000000000200ULL +#define ARG_FD 0x0000000000000400ULL +#define ARG_FD1 ARG_FD +#define ARG_POSIX_IPC_PERM 0x0000000000000800ULL +#define ARG_FFLAGS 0x0000000000001000ULL +#define ARG_MODE 0x0000000000002000ULL +#define ARG_VALUE32 0x0000000000004000ULL +#define ARG_ADDR32 0x0000000000008000ULL +#define ARG_ADDR ARG_ADDR32 +#define ARG_LEN 0x0000000000010000ULL +#define ARG_MASK 0x0000000000020000ULL +#define ARG_SIGNUM 0x0000000000040000ULL +#define ARG_LOGIN 0x0000000000080000ULL +#define ARG_SADDRINET 0x0000000000100000ULL +#define ARG_SADDRINET6 0x0000000000200000ULL +#define ARG_SADDRUNIX 0x0000000000400000ULL +#define ARG_TERMID_ADDR ARG_SADDRUNIX +#define ARG_KPATH1 0x0000000000800000ULL /* darwin-only */ +#define ARG_KPATH2 0x0000000001000000ULL /* darwin-only */ +#define ARG_UPATH1 0x0000000002000000ULL +#define ARG_UPATH2 0x0000000004000000ULL +#define ARG_TEXT 0x0000000008000000ULL +#define ARG_VNODE1 0x0000000010000000ULL +#define ARG_VNODE2 0x0000000020000000ULL +#define ARG_SVIPC_CMD 0x0000000040000000ULL +#define ARG_SVIPC_PERM 0x0000000080000000ULL +#define ARG_SVIPC_ID 0x0000000100000000ULL +#define ARG_SVIPC_ADDR 0x0000000200000000ULL +#define ARG_GROUPSET 0x0000000400000000ULL +#define ARG_CMD 0x0000000800000000ULL +#define ARG_SOCKINFO 0x0000001000000000ULL +#define ARG_ASID 0x0000002000000000ULL +#define ARG_TERMID 0x0000004000000000ULL +#define ARG_AUDITON 0x0000008000000000ULL +#define ARG_VALUE64 0x0000010000000000ULL /* darwin-only */ +#define ARG_AMASK 0x0000020000000000ULL +#define ARG_CTLNAME 0x0000040000000000ULL +#define ARG_PROCESS 0x0000080000000000ULL +#define ARG_MACHPORT1 0x0000100000000000ULL +#define ARG_MACHPORT2 0x0000200000000000ULL +#define ARG_MAC_STRING 0x0000400000000000ULL +#define ARG_EXIT 0x0000800000000000ULL +#define ARG_IOVECSTR 0x0001000000000000ULL +#define ARG_ARGV 0x0002000000000000ULL +#define ARG_ENVV 0x0004000000000000ULL +#define ARG_OPAQUE 0x0008000000000000ULL /* darwin-only */ +#define ARG_DATA 0x0010000000000000ULL /* darwin-only */ +#define ARG_ADDR64 0x0020000000000000ULL /* darwin-only */ +#define ARG_FD2 0x0040000000000000ULL /* darwin-only */ +#define ARG_IDENTITY 0x0080000000000000ULL /* darwin-only */ +#define ARG_NONE 0x0000000000000000ULL +#define ARG_ALL 0xFFFFFFFFFFFFFFFFULL #if CONFIG_MACF @@ -141,10 +141,10 @@ extern int audit_syscalls; #define MAC_AUDIT_TEXT_TYPE 1 struct mac_audit_record { - int type; /* one of the types defined above */ - int length; /* byte length of the data field */ - u_char *data; /* the payload */ - LIST_ENTRY(mac_audit_record) records; + int type; /* one of the types defined above */ + int length; /* byte length of the data field */ + u_char *data; /* the payload */ + LIST_ENTRY(mac_audit_record) records; }; #endif @@ -153,24 +153,24 @@ struct proc; struct vnode; struct componentname; -int kau_will_audit(void); -void audit_init(void); -void audit_shutdown(void); -void audit_syscall_enter(unsigned int code, - struct proc *proc, struct uthread *uthread); +int kau_will_audit(void); +void audit_init(void); +void audit_shutdown(void); +void audit_syscall_enter(unsigned int code, + struct proc *proc, struct uthread *uthread); #if CONFIG_MACF /* * The parameter list of audit_syscall_exit() was modified to also take the * Darwin syscall number, which is required by mac_audit_check_postselect(). */ -void audit_syscall_exit(unsigned int code, int error, - struct proc *proc, struct uthread *uthread); +void audit_syscall_exit(unsigned int code, int error, + struct proc *proc, struct uthread *uthread); #else -void audit_syscall_exit(int error, struct proc *proc, - struct uthread *uthread); +void audit_syscall_exit(int error, struct proc *proc, + struct uthread *uthread); #endif -void audit_mach_syscall_enter(unsigned short audit_event); -void audit_mach_syscall_exit(int retval, struct uthread *uthread); +void audit_mach_syscall_enter(unsigned short audit_event); +void audit_mach_syscall_exit(int retval, struct uthread *uthread); extern struct auditinfo_addr *audit_default_aia_p; @@ -183,139 +183,139 @@ extern struct auditinfo_addr *audit_default_aia_p; struct ipc_perm; struct sockaddr; union auditon_udata; -void audit_arg_addr(struct kaudit_record *ar, user_addr_t addr); -void audit_arg_exit(struct kaudit_record *ar, int status, int retval); -void audit_arg_len(struct kaudit_record *ar, user_size_t len); -void audit_arg_fd(struct kaudit_record *ar, int fd); -void audit_arg_fd2(struct kaudit_record *ar, int fd); -void audit_arg_fflags(struct kaudit_record *ar, int fflags); -void audit_arg_gid(struct kaudit_record *ar, gid_t gid); -void audit_arg_uid(struct kaudit_record *ar, uid_t uid); -void audit_arg_egid(struct kaudit_record *ar, gid_t egid); -void audit_arg_euid(struct kaudit_record *ar, uid_t euid); -void audit_arg_rgid(struct kaudit_record *ar, gid_t rgid); -void audit_arg_ruid(struct kaudit_record *ar, uid_t ruid); -void audit_arg_sgid(struct kaudit_record *ar, gid_t sgid); -void audit_arg_suid(struct kaudit_record *ar, uid_t suid); -void audit_arg_groupset(struct kaudit_record *ar, gid_t *gidset, - u_int gidset_size); -void audit_arg_login(struct kaudit_record *ar, char *login); -void audit_arg_ctlname(struct kaudit_record *ar, int *name, int namelen); -void audit_arg_mask(struct kaudit_record *ar, int mask); -void audit_arg_mode(struct kaudit_record *ar, mode_t mode); -void audit_arg_value32(struct kaudit_record *ar, uint32_t value32); -void audit_arg_value64(struct kaudit_record *ar, uint64_t value64); -void audit_arg_owner(struct kaudit_record *ar, uid_t uid, gid_t gid); -void audit_arg_pid(struct kaudit_record *ar, pid_t pid); -void audit_arg_process(struct kaudit_record *ar, proc_t p); -void audit_arg_signum(struct kaudit_record *ar, u_int signum); -void audit_arg_socket(struct kaudit_record *ar, int sodomain, int sotype, - int soprotocol); -void audit_arg_sockaddr(struct kaudit_record *ar, struct vnode *cwd_vp, - struct sockaddr *so); -void audit_arg_auid(struct kaudit_record *ar, uid_t auid); -void audit_arg_auditinfo(struct kaudit_record *ar, - struct auditinfo *au_info); -void audit_arg_auditinfo_addr(struct kaudit_record *ar, - struct auditinfo_addr *au_info); -void audit_arg_upath(struct kaudit_record *ar, struct vnode *cwd_vp, - char *upath, u_int64_t flags); -void audit_arg_kpath(struct kaudit_record *ar, - char *kpath, u_int64_t flags); -void audit_arg_vnpath(struct kaudit_record *ar, struct vnode *vp, - u_int64_t flags); -void audit_arg_vnpath_withref(struct kaudit_record *ar, struct vnode *vp, - u_int64_t flags); -void audit_arg_text(struct kaudit_record *ar, char *text); -void audit_arg_opaque(struct kaudit_record *ar, void *data, size_t size); -void audit_arg_data(struct kaudit_record *ar, void *data, size_t size, - size_t number); -void audit_arg_cmd(struct kaudit_record *ar, int cmd); -void audit_arg_svipc_cmd(struct kaudit_record *ar, int cmd); -void audit_arg_svipc_perm(struct kaudit_record *ar, struct ipc_perm *perm); -void audit_arg_svipc_id(struct kaudit_record *ar, int id); -void audit_arg_svipc_addr(struct kaudit_record *ar, user_addr_t addr); -void audit_arg_posix_ipc_perm(struct kaudit_record *ar, uid_t uid, - gid_t gid, mode_t mode); -void audit_arg_auditon(struct kaudit_record *ar, - union auditon_udata *udata); -void audit_arg_file(struct kaudit_record *ar, struct proc *p, - struct fileproc *fp); -void audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, - int length); -void audit_arg_envv(struct kaudit_record *ar, char *envv, int envc, - int length); -void audit_arg_identity(struct kaudit_record *ar); - -void audit_arg_mach_port1(struct kaudit_record *ar, mach_port_name_t port); -void audit_arg_mach_port2(struct kaudit_record *ar, mach_port_name_t port); -void audit_sysclose(struct kaudit_record *ar, struct proc *p, int fd); +void audit_arg_addr(struct kaudit_record *ar, user_addr_t addr); +void audit_arg_exit(struct kaudit_record *ar, int status, int retval); +void audit_arg_len(struct kaudit_record *ar, user_size_t len); +void audit_arg_fd(struct kaudit_record *ar, int fd); +void audit_arg_fd2(struct kaudit_record *ar, int fd); +void audit_arg_fflags(struct kaudit_record *ar, int fflags); +void audit_arg_gid(struct kaudit_record *ar, gid_t gid); +void audit_arg_uid(struct kaudit_record *ar, uid_t uid); +void audit_arg_egid(struct kaudit_record *ar, gid_t egid); +void audit_arg_euid(struct kaudit_record *ar, uid_t euid); +void audit_arg_rgid(struct kaudit_record *ar, gid_t rgid); +void audit_arg_ruid(struct kaudit_record *ar, uid_t ruid); +void audit_arg_sgid(struct kaudit_record *ar, gid_t sgid); +void audit_arg_suid(struct kaudit_record *ar, uid_t suid); +void audit_arg_groupset(struct kaudit_record *ar, gid_t *gidset, + u_int gidset_size); +void audit_arg_login(struct kaudit_record *ar, char *login); +void audit_arg_ctlname(struct kaudit_record *ar, int *name, int namelen); +void audit_arg_mask(struct kaudit_record *ar, int mask); +void audit_arg_mode(struct kaudit_record *ar, mode_t mode); +void audit_arg_value32(struct kaudit_record *ar, uint32_t value32); +void audit_arg_value64(struct kaudit_record *ar, uint64_t value64); +void audit_arg_owner(struct kaudit_record *ar, uid_t uid, gid_t gid); +void audit_arg_pid(struct kaudit_record *ar, pid_t pid); +void audit_arg_process(struct kaudit_record *ar, proc_t p); +void audit_arg_signum(struct kaudit_record *ar, u_int signum); +void audit_arg_socket(struct kaudit_record *ar, int sodomain, int sotype, + int soprotocol); +void audit_arg_sockaddr(struct kaudit_record *ar, struct vnode *cwd_vp, + struct sockaddr *so); +void audit_arg_auid(struct kaudit_record *ar, uid_t auid); +void audit_arg_auditinfo(struct kaudit_record *ar, + struct auditinfo *au_info); +void audit_arg_auditinfo_addr(struct kaudit_record *ar, + struct auditinfo_addr *au_info); +void audit_arg_upath(struct kaudit_record *ar, struct vnode *cwd_vp, + char *upath, u_int64_t flags); +void audit_arg_kpath(struct kaudit_record *ar, + char *kpath, u_int64_t flags); +void audit_arg_vnpath(struct kaudit_record *ar, struct vnode *vp, + u_int64_t flags); +void audit_arg_vnpath_withref(struct kaudit_record *ar, struct vnode *vp, + u_int64_t flags); +void audit_arg_text(struct kaudit_record *ar, char *text); +void audit_arg_opaque(struct kaudit_record *ar, void *data, size_t size); +void audit_arg_data(struct kaudit_record *ar, void *data, size_t size, + size_t number); +void audit_arg_cmd(struct kaudit_record *ar, int cmd); +void audit_arg_svipc_cmd(struct kaudit_record *ar, int cmd); +void audit_arg_svipc_perm(struct kaudit_record *ar, struct ipc_perm *perm); +void audit_arg_svipc_id(struct kaudit_record *ar, int id); +void audit_arg_svipc_addr(struct kaudit_record *ar, user_addr_t addr); +void audit_arg_posix_ipc_perm(struct kaudit_record *ar, uid_t uid, + gid_t gid, mode_t mode); +void audit_arg_auditon(struct kaudit_record *ar, + union auditon_udata *udata); +void audit_arg_file(struct kaudit_record *ar, struct proc *p, + struct fileproc *fp); +void audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, + int length); +void audit_arg_envv(struct kaudit_record *ar, char *envv, int envc, + int length); +void audit_arg_identity(struct kaudit_record *ar); + +void audit_arg_mach_port1(struct kaudit_record *ar, mach_port_name_t port); +void audit_arg_mach_port2(struct kaudit_record *ar, mach_port_name_t port); +void audit_sysclose(struct kaudit_record *ar, struct proc *p, int fd); void audit_proc_coredump(proc_t proc, char *path, int errcode); -void audit_proc_init(struct proc *p); -void audit_proc_fork(struct proc *parent, struct proc *child); -void audit_proc_free(struct proc *p); +void audit_proc_init(struct proc *p); +void audit_proc_fork(struct proc *parent, struct proc *child); +void audit_proc_free(struct proc *p); #ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T +#define _KAUTH_CRED_T struct ucred; typedef struct ucred *kauth_cred_t; #endif /* !_KAUTH_CRED_T */ -void audit_session_ref(kauth_cred_t cred); -void audit_session_unref(kauth_cred_t cred); -void audit_session_procnew(proc_t p); -void audit_session_procexit(proc_t p); -int audit_session_spawnjoin(proc_t p, task_t task, ipc_port_t port); +void audit_session_ref(kauth_cred_t cred); +void audit_session_unref(kauth_cred_t cred); +void audit_session_procnew(proc_t p); +void audit_session_procexit(proc_t p); +int audit_session_spawnjoin(proc_t p, task_t task, ipc_port_t port); -void audit_sdev_submit(au_id_t auid, au_asid_t asid, void *record, - u_int record_len); +void audit_sdev_submit(au_id_t auid, au_asid_t asid, void *record, + u_int record_len); /* - * Audit session macros. + * Audit session macros. */ -#define IS_VALID_SESSION(a) ((a) != NULL && (a) != audit_default_aia_p) +#define IS_VALID_SESSION(a) ((a) != NULL && (a) != audit_default_aia_p) -#define AUDIT_SESSION_REF(cred) audit_session_ref(cred) -#define AUDIT_SESSION_UNREF(cred) audit_session_unref(cred) +#define AUDIT_SESSION_REF(cred) audit_session_ref(cred) +#define AUDIT_SESSION_UNREF(cred) audit_session_unref(cred) -#define AUDIT_SESSION_PROCNEW(p) audit_session_procnew(p) -#define AUDIT_SESSION_PROCEXIT(p) audit_session_procexit(p) +#define AUDIT_SESSION_PROCNEW(p) audit_session_procnew(p) +#define AUDIT_SESSION_PROCEXIT(p) audit_session_procexit(p) #if CONFIG_MACF -/* +/* * audit_mac_data() is the MAC Framework's entry point to the audit subsystem. * It currently creates only text and data audit tokens. */ -int audit_mac_data(int type, int len, u_char *data); -void audit_arg_mac_string(struct kaudit_record *ar, char *string); +int audit_mac_data(int type, int len, u_char *data); +void audit_arg_mac_string(struct kaudit_record *ar, char *string); #endif extern au_event_t sys_au_event[]; -#define AUDIT_RECORD() \ +#define AUDIT_RECORD() \ ((struct uthread*)get_bsdthread_info(current_thread()))->uu_ar -#ifndef AUDIT_USE_BUILTIN_EXPECT -#define AUDIT_USE_BUILTIN_EXPECT +#ifndef AUDIT_USE_BUILTIN_EXPECT +#define AUDIT_USE_BUILTIN_EXPECT #endif -#ifdef AUDIT_USE_BUILTIN_EXPECT +#ifdef AUDIT_USE_BUILTIN_EXPECT /* - * Use branch prediction for the case of auditing enabled but not + * Use branch prediction for the case of auditing enabled but not * auditing system calls. */ -#define AUDIT_SYSCALLS() __builtin_expect(audit_syscalls, 0) -#define AUDIT_ENABLED() __builtin_expect(audit_syscalls && \ - audit_enabled, 0) -#define AUDIT_AUDITING(x) __builtin_expect(NULL != (x), 0) +#define AUDIT_SYSCALLS() __builtin_expect(audit_syscalls, 0) +#define AUDIT_ENABLED() __builtin_expect(audit_syscalls && \ + audit_enabled, 0) +#define AUDIT_AUDITING(x) __builtin_expect(NULL != (x), 0) #else -#define AUDIT_SYSCALLS() (audit_syscalls) -#define AUDIT_ENABLED() (audit_syscalls && audit_enabled) -#define AUDIT_AUDITING(x) (NULL != (x)) +#define AUDIT_SYSCALLS() (audit_syscalls) +#define AUDIT_ENABLED() (audit_syscalls && audit_enabled) +#define AUDIT_AUDITING(x) (NULL != (x)) #endif /* AUDIT_USE_BUILTIN_EXPECT */ @@ -325,102 +325,102 @@ extern au_event_t sys_au_event[]; * Define a macro to wrap the audit_arg_* calls by checking the global * audit_enabled flag before performing the actual call. */ -#define AUDIT_ARG(op, args...) do { \ - if (AUDIT_SYSCALLS()) { \ - struct kaudit_record *__ar = AUDIT_RECORD(); \ - if (AUDIT_AUDITING(__ar)) \ - audit_arg_ ## op (__ar, ## args); \ - } \ +#define AUDIT_ARG(op, args...) do { \ + if (AUDIT_SYSCALLS()) { \ + struct kaudit_record *__ar = AUDIT_RECORD(); \ + if (AUDIT_AUDITING(__ar)) \ + audit_arg_ ## op (__ar, ## args); \ + } \ } while (0) -#define AUDIT_SYSCALL_ENTER(args...) do { \ - if (AUDIT_ENABLED()) { \ - audit_syscall_enter(args); \ - } \ +#define AUDIT_SYSCALL_ENTER(args...) do { \ + if (AUDIT_ENABLED()) { \ + audit_syscall_enter(args); \ + } \ } while (0) /* * Wrap the audit_syscall_exit() function so that it is called only when - * we have a audit record on the thread. Audit records can persist after - * auditing is disabled, so we don't just check audit_enabled here. + * we have a audit record on the thread. Audit records can persist after + * auditing is disabled, so we don't just check audit_enabled here. */ -#define AUDIT_SYSCALL_EXIT(code, proc, uthread, error) do { \ - if (AUDIT_AUDITING(uthread->uu_ar)) \ - audit_syscall_exit(code, error, proc, uthread); \ +#define AUDIT_SYSCALL_EXIT(code, proc, uthread, error) do { \ + if (AUDIT_AUDITING(uthread->uu_ar)) \ + audit_syscall_exit(code, error, proc, uthread); \ } while (0) /* * Wrap the audit_mach_syscall_enter() and audit_mach_syscall_exit() * functions in a manner similar to other system call enter/exit functions. */ -#define AUDIT_MACH_SYSCALL_ENTER(args...) do { \ - if (AUDIT_ENABLED()) { \ - audit_mach_syscall_enter(args); \ - } \ +#define AUDIT_MACH_SYSCALL_ENTER(args...) do { \ + if (AUDIT_ENABLED()) { \ + audit_mach_syscall_enter(args); \ + } \ } while (0) -#define AUDIT_MACH_SYSCALL_EXIT(retval) do { \ - if (AUDIT_SYSCALLS()) { \ - struct uthread *__uthread = \ - get_bsdthread_info(current_thread()); \ - if (AUDIT_AUDITING(__uthread->uu_ar)) \ - audit_mach_syscall_exit(retval, __uthread); \ - } \ +#define AUDIT_MACH_SYSCALL_EXIT(retval) do { \ + if (AUDIT_SYSCALLS()) { \ + struct uthread *__uthread = \ + get_bsdthread_info(current_thread()); \ + if (AUDIT_AUDITING(__uthread->uu_ar)) \ + audit_mach_syscall_exit(retval, __uthread); \ + } \ } while (0) /* * A Macro to wrap the audit_sysclose() function. */ -#define AUDIT_SYSCLOSE(args...) do { \ - if (AUDIT_SYSCALLS()) { \ - struct kaudit_record *__ar = AUDIT_RECORD(); \ - if (AUDIT_AUDITING(__ar)) \ - audit_sysclose(__ar, args); \ - } \ +#define AUDIT_SYSCLOSE(args...) do { \ + if (AUDIT_SYSCALLS()) { \ + struct kaudit_record *__ar = AUDIT_RECORD(); \ + if (AUDIT_AUDITING(__ar)) \ + audit_sysclose(__ar, args); \ + } \ } while (0) #else /* !CONFIG_AUDIT */ -#define AUDIT_ARG(op, args...) do { \ +#define AUDIT_ARG(op, args...) do { \ } while (0) -#define AUDIT_SYSCALL_ENTER(args...) do { \ +#define AUDIT_SYSCALL_ENTER(args...) do { \ } while (0) -#define AUDIT_SYSCALL_EXIT(code, proc, uthread, error) do { \ +#define AUDIT_SYSCALL_EXIT(code, proc, uthread, error) do { \ } while (0) -#define AUDIT_MACH_SYSCALL_ENTER(args...) do { \ +#define AUDIT_MACH_SYSCALL_ENTER(args...) do { \ } while (0) -#define AUDIT_MACH_SYSCALL_EXIT(retval) do { \ +#define AUDIT_MACH_SYSCALL_EXIT(retval) do { \ } while (0) -#define AUDIT_SYSCLOSE(op, args...) do { \ +#define AUDIT_SYSCLOSE(op, args...) do { \ } while (0) -#define AUDIT_SESSION_REF(cred) do { \ +#define AUDIT_SESSION_REF(cred) do { \ } while (0) -#define AUDIT_SESSION_UNREF(cred) do { \ +#define AUDIT_SESSION_UNREF(cred) do { \ } while (0) -#define AUDIT_SESSION_PROCNEW(cred) do { \ +#define AUDIT_SESSION_PROCNEW(cred) do { \ } while (0) -#define AUDIT_SESSION_PROCEXIT(cred) do { \ +#define AUDIT_SESSION_PROCEXIT(cred) do { \ } while (0) -#define AUDIT_SESSION_REF(cred) do { \ +#define AUDIT_SESSION_REF(cred) do { \ } while (0) -#define AUDIT_SESSION_UNREF(cred) do { \ +#define AUDIT_SESSION_UNREF(cred) do { \ } while (0) -#define AUDIT_SESSION_PROCNEW(cred) do { \ +#define AUDIT_SESSION_PROCNEW(cred) do { \ } while (0) -#define AUDIT_SESSION_PROCEXIT(cred) do { \ +#define AUDIT_SESSION_PROCEXIT(cred) do { \ } while (0) #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_arg.c b/bsd/security/audit/audit_arg.c index 919b3dc68..f667243a1 100644 --- a/bsd/security/audit/audit_arg.c +++ b/bsd/security/audit/audit_arg.c @@ -113,16 +113,16 @@ audit_arg_addr(struct kaudit_record *ar, user_addr_t addr) /* * If the process is 64-bit then flag the address as such. */ - if (proc_is64bit(p)) + if (proc_is64bit(p)) { ARG_SET_VALID(ar, ARG_ADDR64); - else + } else { ARG_SET_VALID(ar, ARG_ADDR32); + } } void audit_arg_exit(struct kaudit_record *ar, int status, int retval) { - ar->k_ar.ar_arg_exitstatus = status; ar->k_ar.ar_arg_exitretval = retval; ARG_SET_VALID(ar, ARG_EXIT); @@ -131,7 +131,6 @@ audit_arg_exit(struct kaudit_record *ar, int status, int retval) void audit_arg_len(struct kaudit_record *ar, user_size_t len) { - ar->k_ar.ar_arg_len = len; ARG_SET_VALID(ar, ARG_LEN); } @@ -139,7 +138,6 @@ audit_arg_len(struct kaudit_record *ar, user_size_t len) void audit_arg_fd2(struct kaudit_record *ar, int fd) { - ar->k_ar.ar_arg_fd2 = fd; ARG_SET_VALID(ar, ARG_FD2); } @@ -147,7 +145,6 @@ audit_arg_fd2(struct kaudit_record *ar, int fd) void audit_arg_fd(struct kaudit_record *ar, int fd) { - ar->k_ar.ar_arg_fd = fd; ARG_SET_VALID(ar, ARG_FD); } @@ -155,7 +152,6 @@ audit_arg_fd(struct kaudit_record *ar, int fd) void audit_arg_fflags(struct kaudit_record *ar, int fflags) { - ar->k_ar.ar_arg_fflags = fflags; ARG_SET_VALID(ar, ARG_FFLAGS); } @@ -163,7 +159,6 @@ audit_arg_fflags(struct kaudit_record *ar, int fflags) void audit_arg_gid(struct kaudit_record *ar, gid_t gid) { - ar->k_ar.ar_arg_gid = gid; ARG_SET_VALID(ar, ARG_GID); } @@ -171,7 +166,6 @@ audit_arg_gid(struct kaudit_record *ar, gid_t gid) void audit_arg_uid(struct kaudit_record *ar, uid_t uid) { - ar->k_ar.ar_arg_uid = uid; ARG_SET_VALID(ar, ARG_UID); } @@ -179,7 +173,6 @@ audit_arg_uid(struct kaudit_record *ar, uid_t uid) void audit_arg_egid(struct kaudit_record *ar, gid_t egid) { - ar->k_ar.ar_arg_egid = egid; ARG_SET_VALID(ar, ARG_EGID); } @@ -187,7 +180,6 @@ audit_arg_egid(struct kaudit_record *ar, gid_t egid) void audit_arg_euid(struct kaudit_record *ar, uid_t euid) { - ar->k_ar.ar_arg_euid = euid; ARG_SET_VALID(ar, ARG_EUID); } @@ -195,7 +187,6 @@ audit_arg_euid(struct kaudit_record *ar, uid_t euid) void audit_arg_rgid(struct kaudit_record *ar, gid_t rgid) { - ar->k_ar.ar_arg_rgid = rgid; ARG_SET_VALID(ar, ARG_RGID); } @@ -203,7 +194,6 @@ audit_arg_rgid(struct kaudit_record *ar, gid_t rgid) void audit_arg_ruid(struct kaudit_record *ar, uid_t ruid) { - ar->k_ar.ar_arg_ruid = ruid; ARG_SET_VALID(ar, ARG_RUID); } @@ -211,7 +201,6 @@ audit_arg_ruid(struct kaudit_record *ar, uid_t ruid) void audit_arg_sgid(struct kaudit_record *ar, gid_t sgid) { - ar->k_ar.ar_arg_sgid = sgid; ARG_SET_VALID(ar, ARG_SGID); } @@ -219,7 +208,6 @@ audit_arg_sgid(struct kaudit_record *ar, gid_t sgid) void audit_arg_suid(struct kaudit_record *ar, uid_t suid) { - ar->k_ar.ar_arg_suid = suid; ARG_SET_VALID(ar, ARG_SUID); } @@ -229,8 +217,9 @@ audit_arg_groupset(struct kaudit_record *ar, gid_t *gidset, u_int gidset_size) { u_int i; - for (i = 0; i < gidset_size; i++) + for (i = 0; i < gidset_size; i++) { ar->k_ar.ar_arg_groups.gidset[i] = gidset[i]; + } ar->k_ar.ar_arg_groups.gidset_size = gidset_size; ARG_SET_VALID(ar, ARG_GROUPSET); } @@ -238,7 +227,6 @@ audit_arg_groupset(struct kaudit_record *ar, gid_t *gidset, u_int gidset_size) void audit_arg_login(struct kaudit_record *ar, char *login) { - strlcpy(ar->k_ar.ar_arg_login, login, MAXLOGNAME); ARG_SET_VALID(ar, ARG_LOGIN); } @@ -246,7 +234,6 @@ audit_arg_login(struct kaudit_record *ar, char *login) void audit_arg_ctlname(struct kaudit_record *ar, int *name, int namelen) { - bcopy(name, &ar->k_ar.ar_arg_ctlname, namelen * sizeof(int)); ar->k_ar.ar_arg_len = namelen; ARG_SET_VALID(ar, ARG_CTLNAME | ARG_LEN); @@ -255,7 +242,6 @@ audit_arg_ctlname(struct kaudit_record *ar, int *name, int namelen) void audit_arg_mask(struct kaudit_record *ar, int mask) { - ar->k_ar.ar_arg_mask = mask; ARG_SET_VALID(ar, ARG_MASK); } @@ -263,7 +249,6 @@ audit_arg_mask(struct kaudit_record *ar, int mask) void audit_arg_mode(struct kaudit_record *ar, mode_t mode) { - ar->k_ar.ar_arg_mode = mode; ARG_SET_VALID(ar, ARG_MODE); } @@ -271,7 +256,6 @@ audit_arg_mode(struct kaudit_record *ar, mode_t mode) void audit_arg_value32(struct kaudit_record *ar, uint32_t value32) { - ar->k_ar.ar_arg_value32 = value32; ARG_SET_VALID(ar, ARG_VALUE32); } @@ -279,7 +263,6 @@ audit_arg_value32(struct kaudit_record *ar, uint32_t value32) void audit_arg_value64(struct kaudit_record *ar, uint64_t value64) { - ar->k_ar.ar_arg_value64 = value64; ARG_SET_VALID(ar, ARG_VALUE64); } @@ -287,7 +270,6 @@ audit_arg_value64(struct kaudit_record *ar, uint64_t value64) void audit_arg_owner(struct kaudit_record *ar, uid_t uid, gid_t gid) { - ar->k_ar.ar_arg_uid = uid; ar->k_ar.ar_arg_gid = gid; ARG_SET_VALID(ar, ARG_UID | ARG_GID); @@ -296,7 +278,6 @@ audit_arg_owner(struct kaudit_record *ar, uid_t uid, gid_t gid) void audit_arg_pid(struct kaudit_record *ar, pid_t pid) { - ar->k_ar.ar_arg_pid = pid; ARG_SET_VALID(ar, ARG_PID); } @@ -308,8 +289,9 @@ audit_arg_process(struct kaudit_record *ar, proc_t p) KASSERT(p != NULL, ("audit_arg_process: p == NULL")); - if ( p == NULL) + if (p == NULL) { return; + } my_cred = kauth_cred_proc_ref(p); ar->k_ar.ar_arg_auid = my_cred->cr_audit.as_aia_p->ai_auid; @@ -329,7 +311,6 @@ audit_arg_process(struct kaudit_record *ar, proc_t p) void audit_arg_signum(struct kaudit_record *ar, u_int signum) { - ar->k_ar.ar_arg_signum = signum; ARG_SET_VALID(ar, ARG_SIGNUM); } @@ -338,7 +319,6 @@ void audit_arg_socket(struct kaudit_record *ar, int sodomain, int sotype, int soprotocol) { - ar->k_ar.ar_arg_sockinfo.sai_domain = sodomain; ar->k_ar.ar_arg_sockinfo.sai_type = sotype; ar->k_ar.ar_arg_sockinfo.sai_protocol = soprotocol; @@ -362,13 +342,15 @@ audit_arg_sockaddr(struct kaudit_record *ar, struct vnode *cwd_vp, KASSERT(sa != NULL, ("audit_arg_sockaddr: sa == NULL")); - if (cwd_vp == NULL || sa == NULL) + if (cwd_vp == NULL || sa == NULL) { return; + } - if (sa->sa_len > sizeof(ar->k_ar.ar_arg_sockaddr)) + if (sa->sa_len > sizeof(ar->k_ar.ar_arg_sockaddr)) { bcopy(sa, &ar->k_ar.ar_arg_sockaddr, sizeof(ar->k_ar.ar_arg_sockaddr)); - else + } else { bcopy(sa, &ar->k_ar.ar_arg_sockaddr, sa->sa_len); + } switch (sa->sa_family) { case AF_INET: @@ -392,14 +374,13 @@ audit_arg_sockaddr(struct kaudit_record *ar, struct vnode *cwd_vp, } ARG_SET_VALID(ar, ARG_SADDRUNIX); break; - /* XXXAUDIT: default:? */ + /* XXXAUDIT: default:? */ } } void audit_arg_auid(struct kaudit_record *ar, uid_t auid) { - ar->k_ar.ar_arg_auid = auid; ARG_SET_VALID(ar, ARG_AUID); } @@ -407,7 +388,6 @@ audit_arg_auid(struct kaudit_record *ar, uid_t auid) void audit_arg_auditinfo(struct kaudit_record *ar, struct auditinfo *au_info) { - ar->k_ar.ar_arg_auid = au_info->ai_auid; ar->k_ar.ar_arg_asid = au_info->ai_asid; ar->k_ar.ar_arg_amask.am_success = au_info->ai_mask.am_success; @@ -421,7 +401,6 @@ void audit_arg_auditinfo_addr(struct kaudit_record *ar, struct auditinfo_addr *au_info) { - ar->k_ar.ar_arg_auid = au_info->ai_auid; ar->k_ar.ar_arg_asid = au_info->ai_asid; ar->k_ar.ar_arg_amask.am_success = au_info->ai_mask.am_success; @@ -438,17 +417,18 @@ audit_arg_auditinfo_addr(struct kaudit_record *ar, void audit_arg_text(struct kaudit_record *ar, char *text) { - KASSERT(text != NULL, ("audit_arg_text: text == NULL")); /* Invalidate the text string */ ar->k_ar.ar_valid_arg &= (ARG_ALL ^ ARG_TEXT); - if (text == NULL) + if (text == NULL) { return; + } - if (ar->k_ar.ar_arg_text == NULL) - ar->k_ar.ar_arg_text = malloc(MAXPATHLEN, M_AUDITTEXT, + if (ar->k_ar.ar_arg_text == NULL) { + ar->k_ar.ar_arg_text = malloc(MAXPATHLEN, M_AUDITTEXT, M_WAITOK); + } strncpy(ar->k_ar.ar_arg_text, text, MAXPATHLEN); ARG_SET_VALID(ar, ARG_TEXT); @@ -457,17 +437,18 @@ audit_arg_text(struct kaudit_record *ar, char *text) void audit_arg_opaque(struct kaudit_record *ar, void *data, size_t size) { - KASSERT(data != NULL, ("audit_arg_opaque: data == NULL")); KASSERT(size <= UINT16_MAX, ("audit_arg_opaque: size > UINT16_MAX")); - if (data == NULL || size > UINT16_MAX) + if (data == NULL || size > UINT16_MAX) { return; + } - if (ar->k_ar.ar_arg_opaque == NULL) + if (ar->k_ar.ar_arg_opaque == NULL) { ar->k_ar.ar_arg_opaque = malloc(size, M_AUDITDATA, M_WAITOK); - else + } else { return; + } memcpy(ar->k_ar.ar_arg_opaque, data, size); ar->k_ar.ar_arg_opq_size = (u_int16_t) size; @@ -486,19 +467,21 @@ audit_arg_data(struct kaudit_record *ar, void *data, size_t size, size_t number) ("audit_arg_data: number > UINT8_MAX")); if (data == NULL || size < AUR_BYTE_SIZE || size > AUR_INT64_SIZE || - number > UINT8_MAX) + number > UINT8_MAX) { return; + } sz = size * number; - if (ar->k_ar.ar_arg_data == NULL) + if (ar->k_ar.ar_arg_data == NULL) { ar->k_ar.ar_arg_data = malloc(sz, M_AUDITDATA, M_WAITOK); - else + } else { return; + } memcpy(ar->k_ar.ar_arg_data, data, sz); - switch(size) { + switch (size) { case AUR_BYTE_SIZE: ar->k_ar.ar_arg_data_type = AUR_BYTE; break; @@ -529,7 +512,6 @@ audit_arg_data(struct kaudit_record *ar, void *data, size_t size, size_t number) void audit_arg_cmd(struct kaudit_record *ar, int cmd) { - ar->k_ar.ar_arg_cmd = cmd; ARG_SET_VALID(ar, ARG_CMD); } @@ -537,7 +519,6 @@ audit_arg_cmd(struct kaudit_record *ar, int cmd) void audit_arg_svipc_cmd(struct kaudit_record *ar, int cmd) { - ar->k_ar.ar_arg_svipc_cmd = cmd; ARG_SET_VALID(ar, ARG_SVIPC_CMD); } @@ -545,7 +526,6 @@ audit_arg_svipc_cmd(struct kaudit_record *ar, int cmd) void audit_arg_svipc_perm(struct kaudit_record *ar, struct ipc_perm *perm) { - bcopy(perm, &ar->k_ar.ar_arg_svipc_perm, sizeof(ar->k_ar.ar_arg_svipc_perm)); ARG_SET_VALID(ar, ARG_SVIPC_PERM); @@ -554,7 +534,6 @@ audit_arg_svipc_perm(struct kaudit_record *ar, struct ipc_perm *perm) void audit_arg_svipc_id(struct kaudit_record *ar, int id) { - ar->k_ar.ar_arg_svipc_id = id; ARG_SET_VALID(ar, ARG_SVIPC_ID); } @@ -562,7 +541,6 @@ audit_arg_svipc_id(struct kaudit_record *ar, int id) void audit_arg_svipc_addr(struct kaudit_record *ar, user_addr_t addr) { - ar->k_ar.ar_arg_svipc_addr = addr; ARG_SET_VALID(ar, ARG_SVIPC_ADDR); } @@ -571,7 +549,6 @@ void audit_arg_posix_ipc_perm(struct kaudit_record *ar, uid_t uid, gid_t gid, mode_t mode) { - ar->k_ar.ar_arg_pipc_perm.pipc_uid = uid; ar->k_ar.ar_arg_pipc_perm.pipc_gid = gid; ar->k_ar.ar_arg_pipc_perm.pipc_mode = mode; @@ -581,7 +558,6 @@ audit_arg_posix_ipc_perm(struct kaudit_record *ar, uid_t uid, gid_t gid, void audit_arg_auditon(struct kaudit_record *ar, union auditon_udata *udata) { - bcopy((void *)udata, &ar->k_ar.ar_arg_auditon, sizeof(ar->k_ar.ar_arg_auditon)); ARG_SET_VALID(ar, ARG_AUDITON); @@ -602,7 +578,7 @@ audit_arg_file(struct kaudit_record *ar, __unused proc_t p, switch (FILEGLOB_DTYPE(fp->f_fglob)) { case DTYPE_VNODE: - /* case DTYPE_FIFO: */ + /* case DTYPE_FIFO: */ audit_arg_vnpath_withref(ar, (struct vnode *)fp->f_fglob->fg_data, ARG_VNODE1); break; @@ -610,8 +586,9 @@ audit_arg_file(struct kaudit_record *ar, __unused proc_t p, case DTYPE_SOCKET: so = (struct socket *)fp->f_fglob->fg_data; if (SOCK_CHECK_DOM(so, PF_INET)) { - if (so->so_pcb == NULL) + if (so->so_pcb == NULL) { break; + } ar->k_ar.ar_arg_sockinfo.sai_type = so->so_type; ar->k_ar.ar_arg_sockinfo.sai_domain = SOCK_DOM(so); @@ -628,8 +605,9 @@ audit_arg_file(struct kaudit_record *ar, __unused proc_t p, ARG_SET_VALID(ar, ARG_SOCKINFO); } if (SOCK_CHECK_DOM(so, PF_INET6)) { - if (so->so_pcb == NULL) + if (so->so_pcb == NULL) { break; + } ar->k_ar.ar_arg_sockinfo.sai_type = so->so_type; ar->k_ar.ar_arg_sockinfo.sai_domain = SOCK_DOM(so); @@ -658,7 +636,7 @@ audit_arg_file(struct kaudit_record *ar, __unused proc_t p, * record stored on the user thread. This function will allocate the memory * to store the path info if not already available. This memory will be * freed when the audit record is freed. - * + * * Note that the current working directory vp must be supplied at the audit call * site to permit per thread current working directories, and that it must take * a upath starting with '/' into account for chroot if the path is absolute. @@ -678,19 +656,21 @@ audit_arg_upath(struct kaudit_record *ar, struct vnode *cwd_vp, char *upath, u_i KASSERT((flag != ARG_UPATH1) || (flag != ARG_UPATH2), ("audit_arg_upath: flag %llu", (unsigned long long)flag)); - if (flag == ARG_UPATH1) + if (flag == ARG_UPATH1) { pathp = &ar->k_ar.ar_arg_upath1; - else + } else { pathp = &ar->k_ar.ar_arg_upath2; + } - if (*pathp == NULL) + if (*pathp == NULL) { *pathp = malloc(MAXPATHLEN, M_AUDITPATH, M_WAITOK); - else + } else { return; + } - if (audit_canon_path(cwd_vp, upath, *pathp) == 0) + if (audit_canon_path(cwd_vp, upath, *pathp) == 0) { ARG_SET_VALID(ar, flag); - else { + } else { free(*pathp, M_AUDITPATH); *pathp = NULL; } @@ -707,15 +687,17 @@ audit_arg_kpath(struct kaudit_record *ar, char *kpath, u_int64_t flag) KASSERT((flag != ARG_KPATH1) || (flag != ARG_KPATH2), ("audit_arg_kpath: flag %llu", (unsigned long long)flag)); - if (flag == ARG_KPATH1) + if (flag == ARG_KPATH1) { pathp = &ar->k_ar.ar_arg_kpath1; - else + } else { pathp = &ar->k_ar.ar_arg_kpath2; + } - if (*pathp == NULL) + if (*pathp == NULL) { *pathp = malloc(MAXPATHLEN, M_AUDITPATH, M_WAITOK); - else + } else { return; + } strlcpy(*pathp, kpath, MAXPATHLEN); ARG_SET_VALID(ar, flag); @@ -759,7 +741,7 @@ audit_arg_vnpath(struct kaudit_record *ar, struct vnode *vp, u_int64_t flags) p = current_proc(); - /* + /* * XXXAUDIT: The below clears, and then resets the flags for valid * arguments. Ideally, either the new vnode is used, or the old one * would be. @@ -782,10 +764,11 @@ audit_arg_vnpath(struct kaudit_record *ar, struct vnode *vp, u_int64_t flags) #endif } - if (*pathp == NULL) + if (*pathp == NULL) { *pathp = malloc(MAXPATHLEN, M_AUDITPATH, M_WAITOK); - else + } else { return; + } /* * If vn_getpath() succeeds, place it in a string buffer @@ -794,10 +777,11 @@ audit_arg_vnpath(struct kaudit_record *ar, struct vnode *vp, u_int64_t flags) */ len = MAXPATHLEN; if (vn_getpath(vp, *pathp, &len) == 0) { - if (flags & ARG_VNODE1) + if (flags & ARG_VNODE1) { ARG_SET_VALID(ar, ARG_KPATH1); - else + } else { ARG_SET_VALID(ar, ARG_KPATH2); + } } else { free(*pathp, M_AUDITPATH); *pathp = NULL; @@ -838,17 +822,19 @@ audit_arg_vnpath(struct kaudit_record *ar, struct vnode *vp, u_int64_t flags) vnp->vn_fsid = va.va_fsid; vnp->vn_fileid = (u_int32_t)va.va_fileid; vnp->vn_gen = va.va_gen; - if (flags & ARG_VNODE1) + if (flags & ARG_VNODE1) { ARG_SET_VALID(ar, ARG_VNODE1); - else + } else { ARG_SET_VALID(ar, ARG_VNODE2); + } } void audit_arg_vnpath_withref(struct kaudit_record *ar, struct vnode *vp, u_int64_t flags) { - if (vp == NULL || vnode_getwithref(vp)) + if (vp == NULL || vnode_getwithref(vp)) { return; + } audit_arg_vnpath(ar, vp, flags); (void)vnode_put(vp); } @@ -856,7 +842,6 @@ audit_arg_vnpath_withref(struct kaudit_record *ar, struct vnode *vp, u_int64_t f void audit_arg_mach_port1(struct kaudit_record *ar, mach_port_name_t port) { - ar->k_ar.ar_arg_mach_port1 = port; ARG_SET_VALID(ar, ARG_MACHPORT1); } @@ -864,7 +849,6 @@ audit_arg_mach_port1(struct kaudit_record *ar, mach_port_name_t port) void audit_arg_mach_port2(struct kaudit_record *ar, mach_port_name_t port) { - ar->k_ar.ar_arg_mach_port2 = port; ARG_SET_VALID(ar, ARG_MACHPORT2); } @@ -876,12 +860,13 @@ audit_arg_mach_port2(struct kaudit_record *ar, mach_port_name_t port) void audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, int length) { - - if (audit_argv == 0 || argc == 0) + if (audit_argv == 0 || argc == 0) { return; + } - if (ar->k_ar.ar_arg_argv == NULL) + if (ar->k_ar.ar_arg_argv == NULL) { ar->k_ar.ar_arg_argv = malloc(length, M_AUDITTEXT, M_WAITOK); + } bcopy(argv, ar->k_ar.ar_arg_argv, length); ar->k_ar.ar_arg_argc = argc; ARG_SET_VALID(ar, ARG_ARGV); @@ -893,12 +878,13 @@ audit_arg_argv(struct kaudit_record *ar, char *argv, int argc, int length) void audit_arg_envv(struct kaudit_record *ar, char *envv, int envc, int length) { - - if (audit_arge == 0 || envc == 0) + if (audit_arge == 0 || envc == 0) { return; + } - if (ar->k_ar.ar_arg_envv == NULL) + if (ar->k_ar.ar_arg_envv == NULL) { ar->k_ar.ar_arg_envv = malloc(length, M_AUDITTEXT, M_WAITOK); + } bcopy(envv, ar->k_ar.ar_arg_envv, length); ar->k_ar.ar_arg_envc = envc; ARG_SET_VALID(ar, ARG_ENVV); @@ -919,8 +905,9 @@ audit_sysclose(struct kaudit_record *ar, proc_t p, int fd) audit_arg_fd(ar, fd); - if (fp_getfvp(p, fd, &fp, &vp) != 0) + if (fp_getfvp(p, fd, &fp, &vp) != 0) { return; + } audit_arg_vnpath_withref(ar, (struct vnode *)fp->f_fglob->fg_data, ARG_VNODE1); @@ -974,10 +961,10 @@ audit_identity_info_construct(struct au_identity_info *id_info) if (id_info->signing_id == NULL && signing_id != NULL) { id_info->signing_id = malloc( MAX_AU_IDENTITY_SIGNING_ID_LENGTH, - M_AUDITTEXT, M_WAITOK); + M_AUDITTEXT, M_WAITOK); if (id_info->signing_id != NULL) { src_len = strlcpy(id_info->signing_id, - signing_id, MAX_AU_IDENTITY_SIGNING_ID_LENGTH); + signing_id, MAX_AU_IDENTITY_SIGNING_ID_LENGTH); if (src_len >= MAX_AU_IDENTITY_SIGNING_ID_LENGTH) { id_info->signing_id_trunc = 1; @@ -987,10 +974,10 @@ audit_identity_info_construct(struct au_identity_info *id_info) if (id_info->team_id == NULL && team_id != NULL) { id_info->team_id = malloc(MAX_AU_IDENTITY_TEAM_ID_LENGTH, - M_AUDITTEXT, M_WAITOK); + M_AUDITTEXT, M_WAITOK); if (id_info->team_id != NULL) { src_len = strlcpy(id_info->team_id, team_id, - MAX_AU_IDENTITY_TEAM_ID_LENGTH); + MAX_AU_IDENTITY_TEAM_ID_LENGTH); if (src_len >= MAX_AU_IDENTITY_TEAM_ID_LENGTH) { id_info->team_id_trunc = 1; diff --git a/bsd/security/audit/audit_bsd.c b/bsd/security/audit/audit_bsd.c index 5a6ea3750..5cfc16778 100644 --- a/bsd/security/audit/audit_bsd.c +++ b/bsd/security/audit/audit_bsd.c @@ -57,35 +57,35 @@ extern void ipc_port_release_send(ipc_port_t port); #if CONFIG_AUDIT struct mhdr { - size_t mh_size; - au_malloc_type_t *mh_type; - u_long mh_magic; - char mh_data[0]; + size_t mh_size; + au_malloc_type_t *mh_type; + u_long mh_magic; + char mh_data[0]; }; /* - * The lock group for the audit subsystem. + * The lock group for the audit subsystem. */ static lck_grp_t *audit_lck_grp = NULL; -#define AUDIT_MHMAGIC 0x4D656C53 +#define AUDIT_MHMAGIC 0x4D656C53 #if AUDIT_MALLOC_DEBUG -#define AU_MAX_SHORTDESC 20 -#define AU_MAX_LASTCALLER 20 +#define AU_MAX_SHORTDESC 20 +#define AU_MAX_LASTCALLER 20 struct au_malloc_debug_info { - SInt64 md_size; - SInt64 md_maxsize; - SInt32 md_inuse; - SInt32 md_maxused; - unsigned md_type; - unsigned md_magic; - char md_shortdesc[AU_MAX_SHORTDESC]; - char md_lastcaller[AU_MAX_LASTCALLER]; + SInt64 md_size; + SInt64 md_maxsize; + SInt32 md_inuse; + SInt32 md_maxused; + unsigned md_type; + unsigned md_magic; + char md_shortdesc[AU_MAX_SHORTDESC]; + char md_lastcaller[AU_MAX_LASTCALLER]; }; typedef struct au_malloc_debug_info au_malloc_debug_info_t; -au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES]; +au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES]; static int audit_sysctl_malloc_debug(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req); @@ -94,11 +94,11 @@ SYSCTL_PROC(_kern, OID_AUTO, audit_malloc_debug, CTLFLAG_RD, NULL, 0, audit_sysctl_malloc_debug, "S,audit_malloc_debug", "Current malloc debug info for auditing."); -#define AU_MALLOC_DBINFO_SZ \ +#define AU_MALLOC_DBINFO_SZ \ (NUM_MALLOC_TYPES * sizeof(au_malloc_debug_info_t)) /* - * Copy out the malloc debug info via the sysctl interface. The userland code + * Copy out the malloc debug info via the sysctl interface. The userland code * is something like the following: * * error = sysctlbyname("kern.audit_malloc_debug", buffer_ptr, &buffer_len, @@ -116,35 +116,39 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, /* * This provides a read-only node. */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* - * If just querying then return the space required. + * If just querying then return the space required. */ if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = AU_MALLOC_DBINFO_SZ; - return (0); + req->oldidx = AU_MALLOC_DBINFO_SZ; + return 0; } /* * Alloc a temporary buffer. */ - if (req->oldlen < AU_MALLOC_DBINFO_SZ) - return (ENOMEM); + if (req->oldlen < AU_MALLOC_DBINFO_SZ) { + return ENOMEM; + } amdi_ptr = (au_malloc_debug_info_t *)kalloc(AU_MALLOC_DBINFO_SZ); - if (amdi_ptr == NULL) - return (ENOMEM); + if (amdi_ptr == NULL) { + return ENOMEM; + } bzero(amdi_ptr, AU_MALLOC_DBINFO_SZ); /* - * Build the record array. + * Build the record array. */ sz = 0; nxt_ptr = amdi_ptr; - for(i = 0; i < NUM_MALLOC_TYPES; i++) { - if (audit_malloc_types[i] == NULL) + for (i = 0; i < NUM_MALLOC_TYPES; i++) { + if (audit_malloc_types[i] == NULL) { continue; + } if (audit_malloc_types[i]->mt_magic != M_MAGIC) { nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic; continue; @@ -157,7 +161,7 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, strlcpy(nxt_ptr->md_shortdesc, audit_malloc_types[i]->mt_shortdesc, AU_MAX_SHORTDESC - 1); strlcpy(nxt_ptr->md_lastcaller, - audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER-1); + audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER - 1); sz += sizeof(au_malloc_debug_info_t); nxt_ptr++; } @@ -166,13 +170,13 @@ audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, err = SYSCTL_OUT(req, amdi_ptr, sz); kfree(amdi_ptr, AU_MALLOC_DBINFO_SZ); - return (err); + return err; } #endif /* AUDIT_MALLOC_DEBUG */ - + /* * BSD malloc() - * + * * If the M_NOWAIT flag is set then it may not block and return NULL. * If the M_ZERO flag is set then zero out the buffer. */ @@ -180,31 +184,35 @@ void * #if AUDIT_MALLOC_DEBUG _audit_malloc(size_t size, au_malloc_type_t *type, int flags, const char *fn) #else -_audit_malloc(size_t size, au_malloc_type_t *type, int flags) +_audit_malloc(size_t size, au_malloc_type_t * type, int flags) #endif { - struct mhdr *hdr; - size_t memsize; + struct mhdr *hdr; + size_t memsize; if (os_add_overflow(sizeof(*hdr), size, &memsize)) { - return (NULL); + return NULL; } - if (size == 0) - return (NULL); + if (size == 0) { + return NULL; + } if (flags & M_NOWAIT) { hdr = (void *)kalloc_noblock(memsize); } else { hdr = (void *)kalloc(memsize); - if (hdr == NULL) + if (hdr == NULL) { panic("_audit_malloc: kernel memory exhausted"); + } + } + if (hdr == NULL) { + return NULL; } - if (hdr == NULL) - return (NULL); hdr->mh_size = memsize; hdr->mh_type = type; hdr->mh_magic = AUDIT_MHMAGIC; - if (flags & M_ZERO) + if (flags & M_ZERO) { memset(hdr->mh_data, 0, size); + } #if AUDIT_MALLOC_DEBUG if (type != NULL && type->mt_type < NUM_MALLOC_TYPES) { OSAddAtomic64(memsize, &type->mt_size); @@ -215,7 +223,7 @@ _audit_malloc(size_t size, au_malloc_type_t *type, int flags) audit_malloc_types[type->mt_type] = type; } #endif /* AUDIT_MALLOC_DEBUG */ - return (hdr->mh_data); + return hdr->mh_data; } /* @@ -229,13 +237,14 @@ _audit_free(void *addr, __unused au_malloc_type_t *type) #endif { struct mhdr *hdr; - - if (addr == NULL) + + if (addr == NULL) { return; + } hdr = addr; hdr--; if (hdr->mh_magic != AUDIT_MHMAGIC) { - panic("_audit_free(): hdr->mh_magic (%lx) != AUDIT_MHMAGIC", hdr->mh_magic); + panic("_audit_free(): hdr->mh_magic (%lx) != AUDIT_MHMAGIC", hdr->mh_magic); } #if AUDIT_MALLOC_DEBUG @@ -253,11 +262,11 @@ _audit_free(void *addr, __unused au_malloc_type_t *type) void _audit_cv_init(struct cv *cvp, const char *desc) { - - if (desc == NULL) + if (desc == NULL) { cvp->cv_description = "UNKNOWN"; - else + } else { cvp->cv_description = desc; + } cvp->cv_waiters = 0; } @@ -267,7 +276,6 @@ _audit_cv_init(struct cv *cvp, const char *desc) void _audit_cv_destroy(struct cv *cvp) { - cvp->cv_description = NULL; cvp->cv_waiters = 0; } @@ -278,7 +286,6 @@ _audit_cv_destroy(struct cv *cvp) void _audit_cv_signal(struct cv *cvp) { - if (cvp->cv_waiters > 0) { wakeup_one((caddr_t)cvp); cvp->cv_waiters--; @@ -291,7 +298,6 @@ _audit_cv_signal(struct cv *cvp) void _audit_cv_broadcast(struct cv *cvp) { - if (cvp->cv_waiters > 0) { wakeup((caddr_t)cvp); cvp->cv_waiters = 0; @@ -306,7 +312,6 @@ _audit_cv_broadcast(struct cv *cvp) void _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc) { - cvp->cv_waiters++; (void) msleep(cvp, mp, PZERO, desc, 0); } @@ -320,9 +325,8 @@ _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc) int _audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc) { - cvp->cv_waiters++; - return (msleep(cvp, mp, PSOCK | PCATCH, desc, 0)); + return msleep(cvp, mp, PSOCK | PCATCH, desc, 0); } /* @@ -336,17 +340,16 @@ _audit_mtx_init(struct mtx *mp, __unused const char *lckname) #endif { mp->mtx_lock = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); - KASSERT(mp->mtx_lock != NULL, + KASSERT(mp->mtx_lock != NULL, ("_audit_mtx_init: Could not allocate a mutex.")); #if DIAGNOSTIC - strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME); + strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME); #endif } void _audit_mtx_destroy(struct mtx *mp) { - if (mp->mtx_lock) { lck_mtx_free(mp->mtx_lock, audit_lck_grp); mp->mtx_lock = NULL; @@ -364,17 +367,16 @@ _audit_rw_init(struct rwlock *lp, __unused const char *lckname) #endif { lp->rw_lock = lck_rw_alloc_init(audit_lck_grp, LCK_ATTR_NULL); - KASSERT(lp->rw_lock != NULL, + KASSERT(lp->rw_lock != NULL, ("_audit_rw_init: Could not allocate a rw lock.")); #if DIAGNOSTIC - strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME); + strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME); #endif } void _audit_rw_destroy(struct rwlock *lp) { - if (lp->rw_lock) { lck_rw_free(lp->rw_lock, audit_lck_grp); lp->rw_lock = NULL; @@ -403,7 +405,7 @@ _audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t fun } /* - * Simple recursive lock. + * Simple recursive lock. */ void #if DIAGNOSTIC @@ -412,12 +414,11 @@ _audit_rlck_init(struct rlck *lp, const char *lckname) _audit_rlck_init(struct rlck *lp, __unused const char *lckname) #endif { - lp->rl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); - KASSERT(lp->rl_mtx != NULL, + KASSERT(lp->rl_mtx != NULL, ("_audit_rlck_init: Could not allocate a recursive lock.")); #if DIAGNOSTIC - strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME); + strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME); #endif lp->rl_thread = 0; lp->rl_recurse = 0; @@ -425,11 +426,10 @@ _audit_rlck_init(struct rlck *lp, __unused const char *lckname) /* * Recursive lock. Allow same thread to recursively lock the same lock. - */ + */ void _audit_rlck_lock(struct rlck *lp) { - if (lp->rl_thread == current_thread()) { OSAddAtomic(1, &lp->rl_recurse); KASSERT(lp->rl_recurse < 10000, @@ -447,7 +447,7 @@ _audit_rlck_lock(struct rlck *lp) void _audit_rlck_unlock(struct rlck *lp) { - KASSERT(lp->rl_thread == current_thread(), + KASSERT(lp->rl_thread == current_thread(), ("_audit_rlck_unlock(): Don't own lock.")); /* Note: OSAddAtomic returns old value. */ @@ -456,11 +456,10 @@ _audit_rlck_unlock(struct rlck *lp) lck_mtx_unlock(lp->rl_mtx); } } - + void _audit_rlck_destroy(struct rlck *lp) { - if (lp->rl_mtx) { lck_mtx_free(lp->rl_mtx, audit_lck_grp); lp->rl_mtx = NULL; @@ -474,13 +473,15 @@ void _audit_rlck_assert(struct rlck *lp, u_int assert) { thread_t cthd = current_thread(); - - if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) + + if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) { panic("recursive lock (%p) not held by this thread (%p).", lp, cthd); - if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) + } + if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) { panic("recursive lock (%p) held by thread (%p).", lp, cthd); + } } /* @@ -493,12 +494,11 @@ _audit_slck_init(struct slck *lp, const char *lckname) _audit_slck_init(struct slck *lp, __unused const char *lckname) #endif { - lp->sl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); - KASSERT(lp->sl_mtx != NULL, + KASSERT(lp->sl_mtx != NULL, ("_audit_slck_init: Could not allocate a sleep lock.")); #if DIAGNOSTIC - strlcpy(lp->sl_name, lckname, AU_MAX_LCK_NAME); + strlcpy(lp->sl_name, lckname, AU_MAX_LCK_NAME); #endif lp->sl_locked = 0; lp->sl_waiting = 0; @@ -506,7 +506,7 @@ _audit_slck_init(struct slck *lp, __unused const char *lckname) /* * Sleep lock lock. The 'intr' flag determines if the lock is interruptible. - * If 'intr' is true then signals or other events can interrupt the sleep lock. + * If 'intr' is true then signals or other events can interrupt the sleep lock. */ wait_result_t _audit_slck_lock(struct slck *lp, int intr) @@ -517,13 +517,14 @@ _audit_slck_lock(struct slck *lp, int intr) while (lp->sl_locked && res == THREAD_AWAKENED) { lp->sl_waiting = 1; res = lck_mtx_sleep(lp->sl_mtx, LCK_SLEEP_DEFAULT, - (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT); + (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT); } - if (res == THREAD_AWAKENED) + if (res == THREAD_AWAKENED) { lp->sl_locked = 1; + } lck_mtx_unlock(lp->sl_mtx); - - return (res); + + return res; } /* @@ -532,7 +533,6 @@ _audit_slck_lock(struct slck *lp, int intr) void _audit_slck_unlock(struct slck *lp) { - lck_mtx_lock(lp->sl_mtx); lp->sl_locked = 0; if (lp->sl_waiting) { @@ -545,7 +545,7 @@ _audit_slck_unlock(struct slck *lp) } /* - * Sleep lock try. Don't sleep if it doesn't get the lock. + * Sleep lock try. Don't sleep if it doesn't get the lock. */ int _audit_slck_trylock(struct slck *lp) @@ -554,11 +554,12 @@ _audit_slck_trylock(struct slck *lp) lck_mtx_lock(lp->sl_mtx); result = !lp->sl_locked; - if (result) + if (result) { lp->sl_locked = 1; + } lck_mtx_unlock(lp->sl_mtx); - return (result); + return result; } /* @@ -567,17 +568,17 @@ _audit_slck_trylock(struct slck *lp) void _audit_slck_assert(struct slck *lp, u_int assert) { - - if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) + if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) { panic("sleep lock (%p) not held.", lp); - if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) + } + if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) { panic("sleep lock (%p) held.", lp); + } } void _audit_slck_destroy(struct slck *lp) { - if (lp->sl_mtx) { lck_mtx_free(lp->sl_mtx, audit_lck_grp); lp->sl_mtx = NULL; @@ -590,14 +591,14 @@ _audit_slck_destroy(struct slck *lp) */ #ifndef timersub #define timersub(tvp, uvp, vvp) \ - do { \ - (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ - (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ - if ((vvp)->tv_usec < 0) { \ - (vvp)->tv_sec--; \ - (vvp)->tv_usec += 1000000; \ - } \ - } while (0) + do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ + if ((vvp)->tv_usec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_usec += 1000000; \ + } \ + } while (0) #endif /* @@ -626,16 +627,18 @@ _audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) *lasttime = tv; *curpps = 0; rv = 1; - } else if (maxpps < 0) + } else if (maxpps < 0) { rv = 1; - else if (*curpps < maxpps) + } else if (*curpps < maxpps) { rv = 1; - else + } else { rv = 0; - if (*curpps + 1 > 0) + } + if (*curpps + 1 > 0) { *curpps = *curpps + 1; + } - return (rv); + return rv; } /* @@ -660,10 +663,10 @@ audit_send_trigger(unsigned int trigger) if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { (void)audit_triggers(audit_port, trigger); ipc_port_release_send(audit_port); - return (0); + return 0; } else { printf("Cannot get audit control port\n"); - return (error); + return error; } } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsd.h b/bsd/security/audit/audit_bsd.h index 4293b781b..74425201c 100644 --- a/bsd/security/audit/audit_bsd.h +++ b/bsd/security/audit/audit_bsd.h @@ -35,27 +35,27 @@ #if defined(_KERNEL) || defined(KERNEL) -#if DIAGNOSTIC +#if DIAGNOSTIC #ifdef KASSERT #undef KASSERT #endif #ifdef AUDIT_KASSERT_DEBUG -#define KASSERT(exp, msg) do { \ - if (__builtin_expect(!(exp), 0)) { \ - printf("%s:%d KASSERT failed: ", __FILE__, __LINE__); \ - printf msg; \ - printf("\n"); \ - } \ +#define KASSERT(exp, msg) do { \ + if (__builtin_expect(!(exp), 0)) { \ + printf("%s:%d KASSERT failed: ", __FILE__, __LINE__); \ + printf msg; \ + printf("\n"); \ + } \ } while (0) #else -#define KASSERT(exp, msg) do { \ - if (__builtin_expect(!(exp), 0)) \ - panic msg; \ +#define KASSERT(exp, msg) do { \ + if (__builtin_expect(!(exp), 0)) \ + panic msg; \ } while (0) #endif -#endif /* DIAGNOSTIC */ +#endif /* DIAGNOSTIC */ -#define AU_MAX_LCK_NAME 32 +#define AU_MAX_LCK_NAME 32 #if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN #define be16enc(p, d) *(p) = (d) @@ -72,114 +72,114 @@ #endif /* __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN */ /* - * BSD kernel memory allocation. + * BSD kernel memory allocation. */ -#define AUDIT_MALLOC_DEBUG 0 /* Change to 1 for malloc debugging. */ - -#define M_AUDITUNKNOWN 0 -#define M_AUDITDATA 1 -#define M_AUDITPATH 2 -#define M_AUDITTEXT 3 -#define M_AUDITBSM 4 -#define M_AUDITEVCLASS 5 -#define M_AUDIT_PIPE 6 -#define M_AUDIT_PIPE_ENTRY 7 -#define M_AUDIT_PIPE_PRESELECT 8 -#define M_AU_SESSION 9 -#define M_AU_EV_PLIST 10 - -#define NUM_MALLOC_TYPES 11 - -#ifdef M_WAITOK -#undef M_WAITOK -#define M_WAITOK 0x0000 /* ok to block */ +#define AUDIT_MALLOC_DEBUG 0 /* Change to 1 for malloc debugging. */ + +#define M_AUDITUNKNOWN 0 +#define M_AUDITDATA 1 +#define M_AUDITPATH 2 +#define M_AUDITTEXT 3 +#define M_AUDITBSM 4 +#define M_AUDITEVCLASS 5 +#define M_AUDIT_PIPE 6 +#define M_AUDIT_PIPE_ENTRY 7 +#define M_AUDIT_PIPE_PRESELECT 8 +#define M_AU_SESSION 9 +#define M_AU_EV_PLIST 10 + +#define NUM_MALLOC_TYPES 11 + +#ifdef M_WAITOK +#undef M_WAITOK +#define M_WAITOK 0x0000 /* ok to block */ #endif -#ifdef M_NOWAIT -#undef M_NOWAIT +#ifdef M_NOWAIT +#undef M_NOWAIT #endif -#define M_NOWAIT 0x0001 /* do not block */ -#ifdef M_ZERO -#undef M_ZERO +#define M_NOWAIT 0x0001 /* do not block */ +#ifdef M_ZERO +#undef M_ZERO #endif -#define M_ZERO 0x0004 /* bzero the allocation */ +#define M_ZERO 0x0004 /* bzero the allocation */ -#ifdef M_MAGIC -#undef M_MAGIC +#ifdef M_MAGIC +#undef M_MAGIC #endif -#define M_MAGIC 877983977 +#define M_MAGIC 877983977 -#ifdef MALLOC_DEFINE -#undef MALLOC_DEFINE +#ifdef MALLOC_DEFINE +#undef MALLOC_DEFINE #endif #if AUDIT_MALLOC_DEBUG -struct au_malloc_type { - SInt64 mt_size; - SInt64 mt_maxsize; - SInt32 mt_inuse; - SInt32 mt_maxused; - unsigned mt_type; - unsigned mt_magic; - const char *mt_shortdesc; - const char *mt_lastcaller; +struct au_malloc_type { + SInt64 mt_size; + SInt64 mt_maxsize; + SInt32 mt_inuse; + SInt32 mt_maxused; + unsigned mt_type; + unsigned mt_magic; + const char *mt_shortdesc; + const char *mt_lastcaller; }; -typedef struct au_malloc_type au_malloc_type_t; +typedef struct au_malloc_type au_malloc_type_t; -#define MALLOC_DEFINE(type, shortdesc, longdesc) \ - au_malloc_type_t audit_##type[1] = { \ - { 0, 0, 0, 0, (type < NUM_MALLOC_TYPES) ? type :\ - M_AUDITUNKNOWN, M_MAGIC, shortdesc, NULL } \ +#define MALLOC_DEFINE(type, shortdesc, longdesc) \ + au_malloc_type_t audit_##type[1] = { \ + { 0, 0, 0, 0, (type < NUM_MALLOC_TYPES) ? type :\ + M_AUDITUNKNOWN, M_MAGIC, shortdesc, NULL } \ } -extern au_malloc_type_t *audit_malloc_types[]; +extern au_malloc_type_t *audit_malloc_types[]; #else -struct au_malloc_type { - uint32_t mt_magic; - const char *mt_shortdesc; +struct au_malloc_type { + uint32_t mt_magic; + const char *mt_shortdesc; }; -typedef struct au_malloc_type au_malloc_type_t; +typedef struct au_malloc_type au_malloc_type_t; -#define MALLOC_DEFINE(type, shortdesc, longdesc) \ - au_malloc_type_t audit_##type[1] = { \ - {M_MAGIC, shortdesc } \ +#define MALLOC_DEFINE(type, shortdesc, longdesc) \ + au_malloc_type_t audit_##type[1] = { \ + {M_MAGIC, shortdesc } \ } #endif /* AUDIT_MALLOC_DEBUG */ -#ifdef MALLOC_DECLARE -#undef MALLOC_DECLARE +#ifdef MALLOC_DECLARE +#undef MALLOC_DECLARE #endif -#define MALLOC_DECLARE(type) \ +#define MALLOC_DECLARE(type) \ extern au_malloc_type_t audit_##type[] #if AUDIT_MALLOC_DEBUG -#define malloc(sz, tp, fl) _audit_malloc(sz, audit_##tp, fl, __FUNCTION__) +#define malloc(sz, tp, fl) _audit_malloc(sz, audit_##tp, fl, __FUNCTION__) void *_audit_malloc(size_t size, au_malloc_type_t *type, int flags, const char *fn); #else -#define malloc(sz, tp, fl) _audit_malloc(sz, audit_##tp, fl) +#define malloc(sz, tp, fl) _audit_malloc(sz, audit_##tp, fl) void *_audit_malloc(size_t size, au_malloc_type_t *type, int flags); #endif -#define free(ad, tp) _audit_free(ad, audit_##tp) +#define free(ad, tp) _audit_free(ad, audit_##tp) void _audit_free(void *addr, au_malloc_type_t *type); /* * BSD condition variable. */ struct cv { - const char *cv_description; - int cv_waiters; + const char *cv_description; + int cv_waiters; }; /* * BSD mutex. */ struct mtx { - lck_mtx_t *mtx_lock; + lck_mtx_t *mtx_lock; #if DIAGNOSTIC - char mtx_name[AU_MAX_LCK_NAME]; + char mtx_name[AU_MAX_LCK_NAME]; #endif }; @@ -187,9 +187,9 @@ struct mtx { * BSD rw lock. */ struct rwlock { - lck_rw_t *rw_lock; + lck_rw_t *rw_lock; #if DIAGNOSTIC - char rw_name[AU_MAX_LCK_NAME]; + char rw_name[AU_MAX_LCK_NAME]; #endif }; @@ -197,11 +197,11 @@ struct rwlock { * Sleep lock. */ struct slck { - lck_mtx_t *sl_mtx; - int sl_locked; - int sl_waiting; + lck_mtx_t *sl_mtx; + int sl_locked; + int sl_waiting; #if DIAGNOSTIC - char sl_name[AU_MAX_LCK_NAME]; + char sl_name[AU_MAX_LCK_NAME]; #endif }; @@ -209,14 +209,14 @@ struct slck { * Recursive lock. */ struct rlck { - lck_mtx_t *rl_mtx; - uint32_t rl_recurse; - thread_t rl_thread; + lck_mtx_t *rl_mtx; + uint32_t rl_recurse; + thread_t rl_thread; #if DIAGNOSTIC - char rl_name[AU_MAX_LCK_NAME]; + char rl_name[AU_MAX_LCK_NAME]; #endif }; - + /* * BSD condition variables functions. */ @@ -226,102 +226,102 @@ void _audit_cv_signal(struct cv *cvp); void _audit_cv_broadcast(struct cv *cvp); void _audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc); int _audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc); -int _audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, - thread_continue_t function); -#define cv_init(cvp, desc) _audit_cv_init(cvp, desc) -#define cv_destroy(cvp) _audit_cv_destroy(cvp) -#define cv_signal(cvp) _audit_cv_signal(cvp) -#define cv_broadcast(cvp) _audit_cv_broadcast(cvp) +int _audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, + thread_continue_t function); +#define cv_init(cvp, desc) _audit_cv_init(cvp, desc) +#define cv_destroy(cvp) _audit_cv_destroy(cvp) +#define cv_signal(cvp) _audit_cv_signal(cvp) +#define cv_broadcast(cvp) _audit_cv_broadcast(cvp) #define cv_broadcastpri(cvp, pri) _audit_cv_broadcast(cvp) -#define cv_wait(cvp, mp) _audit_cv_wait(cvp, (mp)->mtx_lock, #cvp) -#define cv_wait_sig(cvp, mp) _audit_cv_wait_sig(cvp, (mp)->mtx_lock, #cvp) -#define cv_wait_continuation(cvp,mp,f) \ +#define cv_wait(cvp, mp) _audit_cv_wait(cvp, (mp)->mtx_lock, #cvp) +#define cv_wait_sig(cvp, mp) _audit_cv_wait_sig(cvp, (mp)->mtx_lock, #cvp) +#define cv_wait_continuation(cvp, mp, f) \ _audit_cv_wait_continuation(cvp, (mp)->mtx_lock, f) /* * BSD Mutexes. */ -void _audit_mtx_init(struct mtx *mp, const char *name); -void _audit_mtx_destroy(struct mtx *mp); -#define mtx_init(mp, name, type, opts) \ - _audit_mtx_init(mp, name) -#define mtx_lock(mp) lck_mtx_lock((mp)->mtx_lock) -#define mtx_unlock(mp) lck_mtx_unlock((mp)->mtx_lock) -#define mtx_destroy(mp) _audit_mtx_destroy(mp) -#define mtx_yield(mp) lck_mtx_yield((mp)->mtx_lock) +void _audit_mtx_init(struct mtx *mp, const char *name); +void _audit_mtx_destroy(struct mtx *mp); +#define mtx_init(mp, name, type, opts) \ + _audit_mtx_init(mp, name) +#define mtx_lock(mp) lck_mtx_lock((mp)->mtx_lock) +#define mtx_unlock(mp) lck_mtx_unlock((mp)->mtx_lock) +#define mtx_destroy(mp) _audit_mtx_destroy(mp) +#define mtx_yield(mp) lck_mtx_yield((mp)->mtx_lock) /* * Sleep lock functions. */ -void _audit_slck_init(struct slck *lp, const char *grpname); -wait_result_t _audit_slck_lock(struct slck *lp, int intr); -void _audit_slck_unlock(struct slck *lp); -int _audit_slck_trylock(struct slck *lp); -void _audit_slck_assert(struct slck *lp, u_int assert); -void _audit_slck_destroy(struct slck *lp); -#define slck_init(lp, name) _audit_slck_init((lp), (name)) -#define slck_lock(lp) _audit_slck_lock((lp), 0) -#define slck_lock_sig(lp) (_audit_slck_lock((lp), 1) != THREAD_AWAKENED) -#define slck_unlock(lp) _audit_slck_unlock((lp)) -#define slck_destroy(lp) _audit_slck_destroy((lp)) +void _audit_slck_init(struct slck *lp, const char *grpname); +wait_result_t _audit_slck_lock(struct slck *lp, int intr); +void _audit_slck_unlock(struct slck *lp); +int _audit_slck_trylock(struct slck *lp); +void _audit_slck_assert(struct slck *lp, u_int assert); +void _audit_slck_destroy(struct slck *lp); +#define slck_init(lp, name) _audit_slck_init((lp), (name)) +#define slck_lock(lp) _audit_slck_lock((lp), 0) +#define slck_lock_sig(lp) (_audit_slck_lock((lp), 1) != THREAD_AWAKENED) +#define slck_unlock(lp) _audit_slck_unlock((lp)) +#define slck_destroy(lp) _audit_slck_destroy((lp)) /* * Recursive lock functions. */ -void _audit_rlck_init(struct rlck *lp, const char *grpname); -void _audit_rlck_lock(struct rlck *lp); -void _audit_rlck_unlock(struct rlck *lp); -void _audit_rlck_assert(struct rlck *lp, u_int assert); -void _audit_rlck_destroy(struct rlck *lp); -#define rlck_init(lp, name) _audit_rlck_init((lp), (name)) -#define rlck_lock(lp) _audit_rlck_lock((lp)) -#define rlck_unlock(lp) _audit_rlck_unlock((lp)) -#define rlck_destroy(lp) _audit_rlck_destroy((lp)) +void _audit_rlck_init(struct rlck *lp, const char *grpname); +void _audit_rlck_lock(struct rlck *lp); +void _audit_rlck_unlock(struct rlck *lp); +void _audit_rlck_assert(struct rlck *lp, u_int assert); +void _audit_rlck_destroy(struct rlck *lp); +#define rlck_init(lp, name) _audit_rlck_init((lp), (name)) +#define rlck_lock(lp) _audit_rlck_lock((lp)) +#define rlck_unlock(lp) _audit_rlck_unlock((lp)) +#define rlck_destroy(lp) _audit_rlck_destroy((lp)) /* * BSD rw locks. */ -void _audit_rw_init(struct rwlock *lp, const char *name); -void _audit_rw_destroy(struct rwlock *lp); -#define rw_init(lp, name) _audit_rw_init(lp, name) -#define rw_rlock(lp) lck_rw_lock_shared((lp)->rw_lock) -#define rw_runlock(lp) lck_rw_unlock_shared((lp)->rw_lock) -#define rw_wlock(lp) lck_rw_lock_exclusive((lp)->rw_lock) -#define rw_wunlock(lp) lck_rw_unlock_exclusive((lp)->rw_lock) -#define rw_destroy(lp) _audit_rw_destroy(lp) - -#define MA_OWNED LCK_MTX_ASSERT_OWNED -#define RA_LOCKED LCK_RW_ASSERT_HELD -#define RA_RLOCKED LCK_RW_ASSERT_SHARED -#define RA_WLOCKED LCK_RW_ASSERT_EXCLUSIVE -#define SA_LOCKED LCK_RW_ASSERT_HELD -#define SA_XLOCKED LCK_RW_ASSERT_EXCLUSIVE -#define SL_OWNED LCK_MTX_ASSERT_OWNED -#define SL_NOTOWNED LCK_MTX_ASSERT_NOTOWNED +void _audit_rw_init(struct rwlock *lp, const char *name); +void _audit_rw_destroy(struct rwlock *lp); +#define rw_init(lp, name) _audit_rw_init(lp, name) +#define rw_rlock(lp) lck_rw_lock_shared((lp)->rw_lock) +#define rw_runlock(lp) lck_rw_unlock_shared((lp)->rw_lock) +#define rw_wlock(lp) lck_rw_lock_exclusive((lp)->rw_lock) +#define rw_wunlock(lp) lck_rw_unlock_exclusive((lp)->rw_lock) +#define rw_destroy(lp) _audit_rw_destroy(lp) + +#define MA_OWNED LCK_MTX_ASSERT_OWNED +#define RA_LOCKED LCK_RW_ASSERT_HELD +#define RA_RLOCKED LCK_RW_ASSERT_SHARED +#define RA_WLOCKED LCK_RW_ASSERT_EXCLUSIVE +#define SA_LOCKED LCK_RW_ASSERT_HELD +#define SA_XLOCKED LCK_RW_ASSERT_EXCLUSIVE +#define SL_OWNED LCK_MTX_ASSERT_OWNED +#define SL_NOTOWNED LCK_MTX_ASSERT_NOTOWNED #if DIAGNOSTIC -#define mtx_assert(mp, wht) lck_mtx_assert((mp)->mtx_lock, wht) -#define rw_assert(lp, wht) lck_rw_assert((lp)->rw_lock, wht) -#define sx_assert(lp, wht) lck_rw_assert((lp)->sx_lock, wht) -#define rlck_assert(lp, wht) _audit_rlck_assert((lp), wht) -#define slck_assert(lp, wht) _audit_slck_assert((lp), wht) +#define mtx_assert(mp, wht) lck_mtx_assert((mp)->mtx_lock, wht) +#define rw_assert(lp, wht) lck_rw_assert((lp)->rw_lock, wht) +#define sx_assert(lp, wht) lck_rw_assert((lp)->sx_lock, wht) +#define rlck_assert(lp, wht) _audit_rlck_assert((lp), wht) +#define slck_assert(lp, wht) _audit_slck_assert((lp), wht) #else -#define mtx_assert(mp, wht) -#define rw_assert(lp, wht) -#define sx_assert(lp, wht) -#define rlck_assert(lp, wht) -#define slck_assert(lp, wht) +#define mtx_assert(mp, wht) +#define rw_assert(lp, wht) +#define sx_assert(lp, wht) +#define rlck_assert(lp, wht) +#define slck_assert(lp, wht) #endif /* DIAGNOSTIC */ /* * Synchronization initialization. */ -void _audit_lck_grp_init(void); +void _audit_lck_grp_init(void); /* * BSD (IPv6) event rate limiter. - */ + */ int _audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps); -#define ppsratecheck(tv, cr, mr) _audit_ppsratecheck(tv, cr, mr) +#define ppsratecheck(tv, cr, mr) _audit_ppsratecheck(tv, cr, mr) #endif /* defined(_KERNEL) || defined(KERNEL) */ #endif /* _SECURITY_AUDIT_AUDIT_BSD_H */ diff --git a/bsd/security/audit/audit_bsm.c b/bsd/security/audit/audit_bsm.c index 876833e4b..9610b52dd 100644 --- a/bsd/security/audit/audit_bsm.c +++ b/bsd/security/audit/audit_bsm.c @@ -64,10 +64,10 @@ MALLOC_DEFINE(M_AUDITBSM, "audit_bsm", "Audit BSM data"); #include #endif -static void audit_sys_auditon(struct audit_record *ar, - struct au_record *rec); -static void audit_sys_fcntl(struct kaudit_record *kar, - struct au_record *rec); +static void audit_sys_auditon(struct audit_record *ar, + struct au_record *rec); +static void audit_sys_fcntl(struct kaudit_record *kar, + struct au_record *rec); /* * Initialize the BSM auditing subsystem. @@ -75,7 +75,6 @@ static void audit_sys_fcntl(struct kaudit_record *kar, void kau_init(void) { - au_evclassmap_init(); } @@ -96,7 +95,7 @@ kau_open(void) rec->len = 0; rec->used = 1; - return (rec); + return rec; } /* @@ -105,7 +104,6 @@ kau_open(void) static void kau_write(struct au_record *rec, struct au_token *tok) { - KASSERT(tok != NULL, ("kau_write: tok == NULL")); TAILQ_INSERT_TAIL(&rec->token_q, tok, tokens); @@ -147,10 +145,11 @@ kau_close(struct au_record *rec, struct timespec *ctime, short event) tm.tv_usec = ctime->tv_nsec / 1000; tm.tv_sec = ctime->tv_sec; - if (hdrsize != AUDIT_HEADER_SIZE) + if (hdrsize != AUDIT_HEADER_SIZE) { hdr = au_to_header32_ex_tm(tot_rec_size, event, 0, tm, &ak); - else + } else { hdr = au_to_header32_tm(tot_rec_size, event, 0, tm); + } TAILQ_INSERT_HEAD(&rec->token_q, hdr, tokens); trail = au_to_trailer(tot_rec_size); @@ -194,181 +193,181 @@ kau_free(struct au_record *rec) * caller are OK with this. */ #if CONFIG_MACF -#define MAC_VNODE1_LABEL_TOKEN do { \ - if (ar->ar_vnode1_mac_labels != NULL && \ - strlen(ar->ar_vnode1_mac_labels) != 0) { \ - tok = au_to_text(ar->ar_vnode1_mac_labels); \ - kau_write(rec, tok); \ - } \ +#define MAC_VNODE1_LABEL_TOKEN do { \ + if (ar->ar_vnode1_mac_labels != NULL && \ + strlen(ar->ar_vnode1_mac_labels) != 0) { \ + tok = au_to_text(ar->ar_vnode1_mac_labels); \ + kau_write(rec, tok); \ + } \ } while (0) -#define MAC_VNODE2_LABEL_TOKEN do { \ - if (ar->ar_vnode2_mac_labels != NULL && \ - strlen(ar->ar_vnode2_mac_labels) != 0) { \ - tok = au_to_text(ar->ar_vnode2_mac_labels); \ - kau_write(rec, tok); \ - } \ +#define MAC_VNODE2_LABEL_TOKEN do { \ + if (ar->ar_vnode2_mac_labels != NULL && \ + strlen(ar->ar_vnode2_mac_labels) != 0) { \ + tok = au_to_text(ar->ar_vnode2_mac_labels); \ + kau_write(rec, tok); \ + } \ } while (0) #else -#define MAC_VNODE1_LABEL_TOKEN -#define MAC_VNODE2_LABEL_TOKEN +#define MAC_VNODE1_LABEL_TOKEN +#define MAC_VNODE2_LABEL_TOKEN #endif -#define UPATH1_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_UPATH1)) { \ - tok = au_to_path(ar->ar_arg_upath1); \ - kau_write(rec, tok); \ - } \ +#define UPATH1_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_UPATH1)) { \ + tok = au_to_path(ar->ar_arg_upath1); \ + kau_write(rec, tok); \ + } \ } while (0) -#define UPATH2_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_UPATH2)) { \ - tok = au_to_path(ar->ar_arg_upath2); \ - kau_write(rec, tok); \ - } \ +#define UPATH2_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_UPATH2)) { \ + tok = au_to_path(ar->ar_arg_upath2); \ + kau_write(rec, tok); \ + } \ } while (0) -#define KPATH2_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_KPATH2)) { \ - tok = au_to_path(ar->ar_arg_kpath2); \ - kau_write(rec, tok); \ - } \ +#define KPATH2_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_KPATH2)) { \ + tok = au_to_path(ar->ar_arg_kpath2); \ + kau_write(rec, tok); \ + } \ } while (0) -#define VNODE1_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_KPATH1)) { \ - tok = au_to_path(ar->ar_arg_kpath1); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_VNODE1)) { \ - tok = au_to_attr32(&ar->ar_arg_vnode1); \ - kau_write(rec, tok); \ - MAC_VNODE1_LABEL_TOKEN; \ - } \ +#define VNODE1_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_KPATH1)) { \ + tok = au_to_path(ar->ar_arg_kpath1); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_VNODE1)) { \ + tok = au_to_attr32(&ar->ar_arg_vnode1); \ + kau_write(rec, tok); \ + MAC_VNODE1_LABEL_TOKEN; \ + } \ } while (0) -#define UPATH1_VNODE1_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_UPATH1)) { \ - tok = au_to_path(ar->ar_arg_upath1); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_KPATH1)) { \ - tok = au_to_path(ar->ar_arg_kpath1); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_VNODE1)) { \ - tok = au_to_attr32(&ar->ar_arg_vnode1); \ - kau_write(rec, tok); \ - MAC_VNODE1_LABEL_TOKEN; \ - } \ +#define UPATH1_VNODE1_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_UPATH1)) { \ + tok = au_to_path(ar->ar_arg_upath1); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_KPATH1)) { \ + tok = au_to_path(ar->ar_arg_kpath1); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_VNODE1)) { \ + tok = au_to_attr32(&ar->ar_arg_vnode1); \ + kau_write(rec, tok); \ + MAC_VNODE1_LABEL_TOKEN; \ + } \ } while (0) -#define VNODE2_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_VNODE2)) { \ - tok = au_to_attr32(&ar->ar_arg_vnode2); \ - kau_write(rec, tok); \ - MAC_VNODE2_LABEL_TOKEN; \ - } \ +#define VNODE2_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_VNODE2)) { \ + tok = au_to_attr32(&ar->ar_arg_vnode2); \ + kau_write(rec, tok); \ + MAC_VNODE2_LABEL_TOKEN; \ + } \ } while (0) -#define VNODE2_PATH_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_KPATH2)) { \ - tok = au_to_path(ar->ar_arg_kpath2); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_VNODE2)) { \ - tok = au_to_attr32(&ar->ar_arg_vnode2); \ - kau_write(rec, tok); \ - MAC_VNODE2_LABEL_TOKEN; \ - } \ +#define VNODE2_PATH_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_KPATH2)) { \ + tok = au_to_path(ar->ar_arg_kpath2); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_VNODE2)) { \ + tok = au_to_attr32(&ar->ar_arg_vnode2); \ + kau_write(rec, tok); \ + MAC_VNODE2_LABEL_TOKEN; \ + } \ } while (0) -#define FD_VNODE1_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_VNODE1)) { \ - if (ARG_IS_VALID(kar, ARG_KPATH1)) { \ - tok = au_to_path(ar->ar_arg_kpath1); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_FD)) { \ - tok = au_to_arg32(1, "fd", ar->ar_arg_fd); \ - kau_write(rec, tok); \ - MAC_VNODE1_LABEL_TOKEN; \ - } \ - tok = au_to_attr32(&ar->ar_arg_vnode1); \ - kau_write(rec, tok); \ - } else { \ - if (ARG_IS_VALID(kar, ARG_FD)) { \ - tok = au_to_arg32(1, "fd", \ - ar->ar_arg_fd); \ - kau_write(rec, tok); \ - MAC_VNODE1_LABEL_TOKEN; \ - } \ - } \ +#define FD_VNODE1_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_VNODE1)) { \ + if (ARG_IS_VALID(kar, ARG_KPATH1)) { \ + tok = au_to_path(ar->ar_arg_kpath1); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_FD)) { \ + tok = au_to_arg32(1, "fd", ar->ar_arg_fd); \ + kau_write(rec, tok); \ + MAC_VNODE1_LABEL_TOKEN; \ + } \ + tok = au_to_attr32(&ar->ar_arg_vnode1); \ + kau_write(rec, tok); \ + } else { \ + if (ARG_IS_VALID(kar, ARG_FD)) { \ + tok = au_to_arg32(1, "fd", \ + ar->ar_arg_fd); \ + kau_write(rec, tok); \ + MAC_VNODE1_LABEL_TOKEN; \ + } \ + } \ } while (0) -#define PROCESS_PID_TOKENS(argn) do { \ - if ((ar->ar_arg_pid > 0) /* Reference a single process */ \ - && (ARG_IS_VALID(kar, ARG_PROCESS))) { \ - tok = au_to_process32_ex(ar->ar_arg_auid, \ - ar->ar_arg_euid, ar->ar_arg_egid, \ - ar->ar_arg_ruid, ar->ar_arg_rgid, \ - ar->ar_arg_pid, ar->ar_arg_asid, \ - &ar->ar_arg_termid_addr); \ - kau_write(rec, tok); \ - } else if (ARG_IS_VALID(kar, ARG_PID)) { \ - tok = au_to_arg32(argn, "process", ar->ar_arg_pid); \ - kau_write(rec, tok); \ - } \ +#define PROCESS_PID_TOKENS(argn) do { \ + if ((ar->ar_arg_pid > 0) /* Reference a single process */ \ + && (ARG_IS_VALID(kar, ARG_PROCESS))) { \ + tok = au_to_process32_ex(ar->ar_arg_auid, \ + ar->ar_arg_euid, ar->ar_arg_egid, \ + ar->ar_arg_ruid, ar->ar_arg_rgid, \ + ar->ar_arg_pid, ar->ar_arg_asid, \ + &ar->ar_arg_termid_addr); \ + kau_write(rec, tok); \ + } else if (ARG_IS_VALID(kar, ARG_PID)) { \ + tok = au_to_arg32(argn, "process", ar->ar_arg_pid); \ + kau_write(rec, tok); \ + } \ } while (0) -#define EXTATTR_TOKENS do { \ - if (ARG_IS_VALID(kar, ARG_VALUE32)) { \ - switch (ar->ar_arg_value32) { \ - case EXTATTR_NAMESPACE_USER: \ - tok = au_to_text(EXTATTR_NAMESPACE_USER_STRING);\ - break; \ - case EXTATTR_NAMESPACE_SYSTEM: \ - tok = au_to_text(EXTATTR_NAMESPACE_SYSTEM_STRING);\ - break; \ - default: \ - tok = au_to_arg32(3, "attrnamespace", \ - ar->ar_arg_value32); \ - break; \ - } \ - kau_write(rec, tok); \ - } \ - /* attrname is in the text field */ \ - if (ARG_IS_VALID(kar, ARG_TEXT)) { \ - tok = au_to_text(ar->ar_arg_text); \ - kau_write(rec, tok); \ - } \ +#define EXTATTR_TOKENS do { \ + if (ARG_IS_VALID(kar, ARG_VALUE32)) { \ + switch (ar->ar_arg_value32) { \ + case EXTATTR_NAMESPACE_USER: \ + tok = au_to_text(EXTATTR_NAMESPACE_USER_STRING);\ + break; \ + case EXTATTR_NAMESPACE_SYSTEM: \ + tok = au_to_text(EXTATTR_NAMESPACE_SYSTEM_STRING);\ + break; \ + default: \ + tok = au_to_arg32(3, "attrnamespace", \ + ar->ar_arg_value32); \ + break; \ + } \ + kau_write(rec, tok); \ + } \ + /* attrname is in the text field */ \ + if (ARG_IS_VALID(kar, ARG_TEXT)) { \ + tok = au_to_text(ar->ar_arg_text); \ + kau_write(rec, tok); \ + } \ } while (0) -#define EXTENDED_TOKENS(n) do { \ - /* ACL data */ \ - if (ARG_IS_VALID(kar, ARG_OPAQUE)) { \ - tok = au_to_opaque(ar->ar_arg_opaque, \ - ar->ar_arg_opq_size); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_MODE)) { \ - tok = au_to_arg32(n+2, "mode", ar->ar_arg_mode);\ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_GID)) { \ - tok = au_to_arg32(n+1, "gid", ar->ar_arg_gid); \ - kau_write(rec, tok); \ - } \ - if (ARG_IS_VALID(kar, ARG_UID)) { \ - tok = au_to_arg32(n, "uid", ar->ar_arg_uid); \ - kau_write(rec, tok); \ - } \ +#define EXTENDED_TOKENS(n) do { \ + /* ACL data */ \ + if (ARG_IS_VALID(kar, ARG_OPAQUE)) { \ + tok = au_to_opaque(ar->ar_arg_opaque, \ + ar->ar_arg_opq_size); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_MODE)) { \ + tok = au_to_arg32(n+2, "mode", ar->ar_arg_mode);\ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_GID)) { \ + tok = au_to_arg32(n+1, "gid", ar->ar_arg_gid); \ + kau_write(rec, tok); \ + } \ + if (ARG_IS_VALID(kar, ARG_UID)) { \ + tok = au_to_arg32(n, "uid", ar->ar_arg_uid); \ + kau_write(rec, tok); \ + } \ } while (0) -#define PROCESS_MAC_TOKENS do { \ - if (ar->ar_valid_arg & ARG_MAC_STRING) { \ - tok = au_to_text(ar->ar_arg_mac_string); \ - kau_write(rec, tok); \ - } \ +#define PROCESS_MAC_TOKENS do { \ + if (ar->ar_valid_arg & ARG_MAC_STRING) { \ + tok = au_to_text(ar->ar_arg_mac_string); \ + kau_write(rec, tok); \ + } \ } while (0) /* @@ -391,7 +390,7 @@ audit_sys_auditon(struct audit_record *ar, struct au_record *rec) kau_write(rec, tok); break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case A_SETPOLICY: tok = au_to_arg32(3, "length", ar->ar_arg_len); kau_write(rec, tok); @@ -431,7 +430,7 @@ audit_sys_auditon(struct audit_record *ar, struct au_record *rec) kau_write(rec, tok); break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case A_SETQCTRL: tok = au_to_arg32(3, "length", ar->ar_arg_len); kau_write(rec, tok); @@ -483,7 +482,7 @@ audit_sys_auditon(struct audit_record *ar, struct au_record *rec) kau_write(rec, tok); break; } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case A_SETCOND: tok = au_to_arg32(3, "length", ar->ar_arg_len); kau_write(rec, tok); @@ -540,7 +539,6 @@ audit_sys_fcntl(struct kaudit_record *kar, struct au_record *rec) struct audit_record *ar = &kar->k_ar; switch (ar->ar_arg_cmd) { - case F_DUPFD: if (ARG_IS_VALID(kar, ARG_VALUE32)) { tok = au_to_arg32(3, "min fd", ar->ar_arg_value32); @@ -570,7 +568,7 @@ audit_sys_fcntl(struct kaudit_record *kar, struct au_record *rec) } break; -#ifdef F_SETSIZE +#ifdef F_SETSIZE case F_SETSIZE: if (ARG_IS_VALID(kar, ARG_VALUE64)) { tok = au_to_arg64(3, "offset", ar->ar_arg_value64); @@ -579,7 +577,7 @@ audit_sys_fcntl(struct kaudit_record *kar, struct au_record *rec) break; #endif /* F_SETSIZE */ -#ifdef F_PATHPKG_CHECK +#ifdef F_PATHPKG_CHECK case F_PATHPKG_CHECK: if (ARG_IS_VALID(kar, ARG_TEXT)) { tok = au_to_text(ar->ar_arg_text); @@ -630,11 +628,11 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tid.machine = ar->ar_subj_term_addr.at_addr[0]; subj_tok = au_to_subject32(ar->ar_subj_auid, /* audit ID */ ar->ar_subj_cred.cr_uid, /* eff uid */ - ar->ar_subj_egid, /* eff group id */ - ar->ar_subj_ruid, /* real uid */ - ar->ar_subj_rgid, /* real group id */ - ar->ar_subj_pid, /* process id */ - ar->ar_subj_asid, /* session ID */ + ar->ar_subj_egid, /* eff group id */ + ar->ar_subj_ruid, /* real uid */ + ar->ar_subj_rgid, /* real group id */ + ar->ar_subj_pid, /* process id */ + ar->ar_subj_asid, /* session ID */ &tid); break; case AU_IPv6: @@ -665,14 +663,14 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) * header and trailer tokens are added by the kau_close() function. * The return token is added outside of the switch statement. */ - switch(ar->ar_event) { + switch (ar->ar_event) { case AUE_SENDFILE: /* For sendfile the file and socket descriptor are both saved */ if (ARG_IS_VALID(kar, ARG_VALUE32)) { tok = au_to_arg32(2, "sd", ar->ar_arg_value32); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_ACCEPT: case AUE_BIND: case AUE_LISTEN: @@ -701,7 +699,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) } if (ARG_IS_VALID(kar, ARG_SADDRINET6)) { tok = au_to_sock_inet128((struct sockaddr_in6 *) - &ar->ar_arg_sockaddr); + &ar->ar_arg_sockaddr); kau_write(rec, tok); } break; @@ -709,13 +707,13 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_SOCKET: case AUE_SOCKETPAIR: if (ARG_IS_VALID(kar, ARG_SOCKINFO)) { - tok = au_to_arg32(1,"domain", + tok = au_to_arg32(1, "domain", au_domain_to_bsm(ar->ar_arg_sockinfo.sai_domain)); kau_write(rec, tok); - tok = au_to_arg32(2,"type", + tok = au_to_arg32(2, "type", au_socket_type_to_bsm(ar->ar_arg_sockinfo.sai_type)); kau_write(rec, tok); - tok = au_to_arg32(3,"protocol", + tok = au_to_arg32(3, "protocol", ar->ar_arg_sockinfo.sai_protocol); kau_write(rec, tok); } @@ -794,12 +792,14 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(1, "setaudit_addr:port", ar->ar_arg_termid_addr.at_port); kau_write(rec, tok); - if (ar->ar_arg_termid_addr.at_type == AU_IPv6) + if (ar->ar_arg_termid_addr.at_type == AU_IPv6) { tok = au_to_in_addr_ex((struct in6_addr *) &ar->ar_arg_termid_addr.at_addr[0]); - if (ar->ar_arg_termid_addr.at_type == AU_IPv4) + } + if (ar->ar_arg_termid_addr.at_type == AU_IPv4) { tok = au_to_in_addr((struct in_addr *) &ar->ar_arg_termid_addr.at_addr[0]); + } kau_write(rec, tok); } break; @@ -812,7 +812,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(1, "cmd", ar->ar_arg_cmd); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_AUDITON_GETCAR: case AUE_AUDITON_GETCLASS: @@ -830,8 +830,9 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_AUDITON_SETUMASK: case AUE_AUDITON_SPOLICY: case AUE_AUDITON_SQCTRL: - if (ARG_IS_VALID(kar, ARG_AUDITON)) + if (ARG_IS_VALID(kar, ARG_AUDITON)) { audit_sys_auditon(ar, rec); + } break; case AUE_AUDITCTL: @@ -855,7 +856,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_GETFSSTAT: case AUE_KQUEUE: case AUE_LSEEK: -#if 0 +#if 0 /* XXXss replace with kext */ case AUE_MODLOAD: case AUE_MODUNLOAD: @@ -988,7 +989,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(0, "child PID", ar->ar_arg_pid); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_EXECVE: if (ARG_IS_VALID(kar, ARG_ARGV)) { @@ -1034,11 +1035,11 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) /* * XXXRW: Some of these need to handle non-vnode cases as well. - */ + */ case AUE_FSTAT_EXTENDED: case AUE_FCHDIR: case AUE_FPATHCONF: - case AUE_FSTAT: /* XXX Need to handle sockets and shm */ + case AUE_FSTAT: /* XXX Need to handle sockets and shm */ case AUE_FSTATFS: case AUE_FSYNC: case AUE_FTRUNCATE: @@ -1071,8 +1072,9 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) break; case AUE_FCNTL: - if (ARG_IS_VALID(kar, ARG_CMD)) + if (ARG_IS_VALID(kar, ARG_CMD)) { audit_sys_fcntl(kar, rec); + } FD_VNODE1_TOKENS; break; @@ -1161,17 +1163,17 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) (u_int32_t)ar->ar_arg_addr); kau_write(rec, tok); } - if (ARG_IS_VALID(kar, ARG_VNODE1)) + if (ARG_IS_VALID(kar, ARG_VNODE1)) { FD_VNODE1_TOKENS; - else { + } else { if (ARG_IS_VALID(kar, ARG_SOCKINFO)) { tok = au_to_socket_ex( - ar->ar_arg_sockinfo.sai_domain, - ar->ar_arg_sockinfo.sai_type, - (struct sockaddr *) - &ar->ar_arg_sockinfo.sai_laddr, - (struct sockaddr *) - &ar->ar_arg_sockinfo.sai_faddr); + ar->ar_arg_sockinfo.sai_domain, + ar->ar_arg_sockinfo.sai_type, + (struct sockaddr *) + &ar->ar_arg_sockinfo.sai_laddr, + (struct sockaddr *) + &ar->ar_arg_sockinfo.sai_faddr); kau_write(rec, tok); } else { if (ARG_IS_VALID(kar, ARG_FD)) { @@ -1243,8 +1245,9 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg64(2, "len", ar->ar_arg_len); kau_write(rec, tok); } - if (ar->ar_event == AUE_MMAP) + if (ar->ar_event == AUE_MMAP) { FD_VNODE1_TOKENS; + } if (ar->ar_event == AUE_MPROTECT) { if (ARG_IS_VALID(kar, ARG_VALUE32)) { tok = au_to_arg32(3, "protection", @@ -1276,7 +1279,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_text(ar->ar_arg_text); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_UMOUNT: case AUE_UNMOUNT: @@ -1299,7 +1302,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_MSGCTL: ar->ar_event = audit_msgctl_to_event(ar->ar_arg_svipc_cmd); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_MSGRCV: case AUE_MSGSND: @@ -1531,7 +1534,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) case AUE_SEMCTL: ar->ar_event = audit_semctl_to_event(ar->ar_arg_svipc_cmd); - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_SEMOP: if (ARG_IS_VALID(kar, ARG_SVIPC_ID)) { @@ -1687,7 +1690,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) } break; default: - break; /* We will audit a bad command */ + break; /* We will audit a bad command */ } break; @@ -1724,7 +1727,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(3, "mode", ar->ar_arg_mode); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_SHMUNLINK: if (ARG_IS_VALID(kar, ARG_TEXT)) { @@ -1759,7 +1762,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) tok = au_to_arg32(4, "value", ar->ar_arg_value32); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_SEMUNLINK: if (ARG_IS_VALID(kar, ARG_TEXT)) { @@ -1822,7 +1825,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) ar->ar_arg_opq_size); kau_write(rec, tok); } - /* FALLTHROUGH */ + /* FALLTHROUGH */ case AUE_UMASK: if (ARG_IS_VALID(kar, ARG_MASK)) { @@ -1877,8 +1880,8 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) break; /************************ - * Mach system calls * - ************************/ + * Mach system calls * + ************************/ case AUE_INITPROCESS: break; @@ -1997,7 +2000,7 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) */ kau_write(rec, subj_tok); kau_free(rec); - return (BSM_NOAUDIT); + return BSM_NOAUDIT; } #if CONFIG_MACF @@ -2008,24 +2011,24 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) LIST_FOREACH(mar, ar->ar_mac_records, records) { switch (mar->type) { case MAC_AUDIT_DATA_TYPE: - tok = au_to_data(AUP_BINARY, AUR_BYTE, - mar->length, - (const char *)mar->data); - break; - case MAC_AUDIT_TEXT_TYPE: - tok = au_to_text((char*) mar->data); - break; - default: - /* - * XXX: we can either continue, - * skipping this particular entry, - * or we can pre-verify the list and - * abort before writing any records - */ - printf("kaudit_to_bsm(): " - "BSM conversion requested for" - "unknown mac_audit data type %d\n", - mar->type); + tok = au_to_data(AUP_BINARY, AUR_BYTE, + mar->length, + (const char *)mar->data); + break; + case MAC_AUDIT_TEXT_TYPE: + tok = au_to_text((char*) mar->data); + break; + default: + /* + * XXX: we can either continue, + * skipping this particular entry, + * or we can pre-verify the list and + * abort before writing any records + */ + printf("kaudit_to_bsm(): " + "BSM conversion requested for" + "unknown mac_audit data type %d\n", + mar->type); } kau_write(rec, tok); @@ -2049,15 +2052,15 @@ kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau) if (ARG_IS_VALID(kar, ARG_IDENTITY)) { struct au_identity_info *id = &ar->ar_arg_identity; tok = au_to_identity(id->signer_type, id->signing_id, - id->signing_id_trunc, id->team_id, id->team_id_trunc, - id->cdhash, id->cdhash_len); + id->signing_id_trunc, id->team_id, id->team_id_trunc, + id->cdhash, id->cdhash_len); kau_write(rec, tok); } kau_close(rec, &ar->ar_endtime, ar->ar_event); *pau = rec; - return (BSM_SUCCESS); + return BSM_SUCCESS; } /* @@ -2073,14 +2076,14 @@ bsm_rec_verify(void *rec, int length) /* A record requires a complete header and trailer token */ if (length < (AUDIT_HEADER_SIZE + AUDIT_TRAILER_SIZE)) { - return (0); + return 0; } hdr = (struct hdr_tok_partial*)rec; /* Ensure the provided length matches what the record shows */ if ((uint32_t)length != ntohl(hdr->len)) { - return (0); + return 0; } trl = (struct trl_tok_partial*)(rec + (length - AUDIT_TRAILER_SIZE)); @@ -2089,19 +2092,19 @@ bsm_rec_verify(void *rec, int length) if (((hdr->type != AUT_HEADER32) && (hdr->type != AUT_HEADER32_EX) && (hdr->type != AUT_HEADER64) && (hdr->type != AUT_HEADER64_EX)) || (trl->type != AUT_TRAILER)) { - return (0); + return 0; } /* Ensure the header and trailer agree on the length */ if (hdr->len != trl->len) { - return (0); + return 0; } /* Ensure the trailer token has a proper magic value */ if (ntohs(trl->magic) != AUT_TRAILER_MAGIC) { - return (0); + return 0; } - return (1); + return 1; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsm_domain.c b/bsd/security/audit/audit_bsm_domain.c index 998f65050..88626e09a 100644 --- a/bsd/security/audit/audit_bsm_domain.c +++ b/bsd/security/audit/audit_bsm_domain.c @@ -38,11 +38,11 @@ #if CONFIG_AUDIT struct bsm_domain { - u_short bd_bsm_domain; - int bd_local_domain; + u_short bd_bsm_domain; + int bd_local_domain; }; -#define PF_NO_LOCAL_MAPPING -600 +#define PF_NO_LOCAL_MAPPING -600 static const struct bsm_domain bsm_domains[] = { { BSM_PF_UNSPEC, PF_UNSPEC }, @@ -50,406 +50,406 @@ static const struct bsm_domain bsm_domains[] = { { BSM_PF_INET, PF_INET }, { BSM_PF_IMPLINK, #ifdef PF_IMPLINK - PF_IMPLINK + PF_IMPLINK #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_PUP, #ifdef PF_PUP - PF_PUP + PF_PUP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_CHAOS, #ifdef PF_CHAOS - PF_CHAOS + PF_CHAOS #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_NS, #ifdef PF_NS - PF_NS + PF_NS #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_NBS, #ifdef PF_NBS - PF_NBS + PF_NBS #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ECMA, #ifdef PF_ECMA - PF_ECMA + PF_ECMA #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_DATAKIT, #ifdef PF_DATAKIT - PF_DATAKIT + PF_DATAKIT #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_CCITT, #ifdef PF_CCITT - PF_CCITT + PF_CCITT #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_SNA, PF_SNA }, { BSM_PF_DECnet, PF_DECnet }, { BSM_PF_DLI, #ifdef PF_DLI - PF_DLI + PF_DLI #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_LAT, #ifdef PF_LAT - PF_LAT + PF_LAT #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_HYLINK, #ifdef PF_HYLINK - PF_HYLINK + PF_HYLINK #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_APPLETALK, PF_APPLETALK }, { BSM_PF_NIT, #ifdef PF_NIT - PF_NIT + PF_NIT #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_802, #ifdef PF_802 - PF_802 + PF_802 #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_OSI, #ifdef PF_OSI - PF_OSI + PF_OSI #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_X25, #ifdef PF_X25 - PF_X25 + PF_X25 #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_OSINET, #ifdef PF_OSINET - PF_OSINET + PF_OSINET #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_GOSIP, #ifdef PF_GOSIP - PF_GOSIP + PF_GOSIP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_IPX, PF_IPX }, { BSM_PF_ROUTE, PF_ROUTE }, { BSM_PF_LINK, #ifdef PF_LINK - PF_LINK + PF_LINK #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_INET6, PF_INET6 }, { BSM_PF_KEY, PF_KEY }, { BSM_PF_NCA, #ifdef PF_NCA - PF_NCA + PF_NCA #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_POLICY, #ifdef PF_POLICY - PF_POLICY + PF_POLICY #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_INET_OFFLOAD, #ifdef PF_INET_OFFLOAD - PF_INET_OFFLOAD + PF_INET_OFFLOAD #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_NETBIOS, #ifdef PF_NETBIOS - PF_NETBIOS + PF_NETBIOS #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ISO, #ifdef PF_ISO - PF_ISO + PF_ISO #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_XTP, #ifdef PF_XTP - PF_XTP + PF_XTP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_COIP, #ifdef PF_COIP - PF_COIP + PF_COIP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_CNT, #ifdef PF_CNT - PF_CNT + PF_CNT #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_RTIP, #ifdef PF_RTIP - PF_RTIP + PF_RTIP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_SIP, #ifdef PF_SIP - PF_SIP + PF_SIP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_PIP, #ifdef PF_PIP - PF_PIP + PF_PIP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ISDN, #ifdef PF_ISDN - PF_ISDN + PF_ISDN #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_E164, #ifdef PF_E164 - PF_E164 + PF_E164 #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_NATM, #ifdef PF_NATM - PF_NATM + PF_NATM #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ATM, #ifdef PF_ATM - PF_ATM + PF_ATM #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_NETGRAPH, #ifdef PF_NETGRAPH - PF_NETGRAPH + PF_NETGRAPH #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_SLOW, #ifdef PF_SLOW - PF_SLOW + PF_SLOW #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_SCLUSTER, #ifdef PF_SCLUSTER - PF_SCLUSTER + PF_SCLUSTER #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ARP, #ifdef PF_ARP - PF_ARP + PF_ARP #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_BLUETOOTH, #ifdef PF_BLUETOOTH - PF_BLUETOOTH + PF_BLUETOOTH #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_IEEE80211, #ifdef PF_IEEE80211 - PF_IEEE80211 + PF_IEEE80211 #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_AX25, #ifdef PF_AX25 - PF_AX25 + PF_AX25 #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ROSE, #ifdef PF_ROSE - PF_ROSE + PF_ROSE #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_NETBEUI, #ifdef PF_NETBEUI - PF_NETBEUI + PF_NETBEUI #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_SECURITY, #ifdef PF_SECURITY - PF_SECURITY + PF_SECURITY #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_PACKET, #ifdef PF_PACKET - PF_PACKET + PF_PACKET #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ASH, #ifdef PF_ASH - PF_ASH + PF_ASH #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ECONET, #ifdef PF_ECONET - PF_ECONET + PF_ECONET #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_ATMSVC, #ifdef PF_ATMSVC - PF_ATMSVC + PF_ATMSVC #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_IRDA, #ifdef PF_IRDA - PF_IRDA + PF_IRDA #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_PPPOX, #ifdef PF_PPPOX - PF_PPPOX + PF_PPPOX #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_WANPIPE, #ifdef PF_WANPIPE - PF_WANPIPE + PF_WANPIPE #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_LLC, #ifdef PF_LLC - PF_LLC + PF_LLC #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_CAN, #ifdef PF_CAN - PF_CAN + PF_CAN #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_TIPC, #ifdef PF_TIPC - PF_TIPC + PF_TIPC #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_IUCV, #ifdef PF_IUCV - PF_IUCV + PF_IUCV #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_RXRPC, #ifdef PF_RXRPC - PF_RXRPC + PF_RXRPC #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, { BSM_PF_PHONET, #ifdef PF_PHONET - PF_PHONET + PF_PHONET #else - PF_NO_LOCAL_MAPPING + PF_NO_LOCAL_MAPPING #endif }, }; static const int bsm_domains_count = sizeof(bsm_domains) / - sizeof(bsm_domains[0]); + sizeof(bsm_domains[0]); static const struct bsm_domain * bsm_lookup_local_domain(int local_domain) @@ -457,10 +457,11 @@ bsm_lookup_local_domain(int local_domain) int i; for (i = 0; i < bsm_domains_count; i++) { - if (bsm_domains[i].bd_local_domain == local_domain) - return (&bsm_domains[i]); + if (bsm_domains[i].bd_local_domain == local_domain) { + return &bsm_domains[i]; + } } - return (NULL); + return NULL; } u_short @@ -469,9 +470,10 @@ au_domain_to_bsm(int local_domain) const struct bsm_domain *bstp; bstp = bsm_lookup_local_domain(local_domain); - if (bstp == NULL) - return (BSM_PF_UNKNOWN); - return (bstp->bd_bsm_domain); + if (bstp == NULL) { + return BSM_PF_UNKNOWN; + } + return bstp->bd_bsm_domain; } static const struct bsm_domain * @@ -480,10 +482,11 @@ bsm_lookup_bsm_domain(u_short bsm_domain) int i; for (i = 0; i < bsm_domains_count; i++) { - if (bsm_domains[i].bd_bsm_domain == bsm_domain) - return (&bsm_domains[i]); + if (bsm_domains[i].bd_bsm_domain == bsm_domain) { + return &bsm_domains[i]; + } } - return (NULL); + return NULL; } int @@ -492,9 +495,10 @@ au_bsm_to_domain(u_short bsm_domain, int *local_domainp) const struct bsm_domain *bstp; bstp = bsm_lookup_bsm_domain(bsm_domain); - if (bstp == NULL || bstp->bd_local_domain) - return (-1); + if (bstp == NULL || bstp->bd_local_domain) { + return -1; + } *local_domainp = bstp->bd_local_domain; - return (0); + return 0; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsm_errno.c b/bsd/security/audit/audit_bsm_errno.c index 70c87ae2b..ecdbc71dc 100644 --- a/bsd/security/audit/audit_bsm_errno.c +++ b/bsd/security/audit/audit_bsm_errno.c @@ -48,19 +48,19 @@ * Don't include string definitions when this code is compiled into a kernel. */ struct bsm_errno { - int be_bsm_errno; - int be_local_errno; + int be_bsm_errno; + int be_local_errno; #if !defined(KERNEL) && !defined(_KERNEL) - const char *be_strerror; + const char *be_strerror; #endif }; -#define ERRNO_NO_LOCAL_MAPPING -600 +#define ERRNO_NO_LOCAL_MAPPING -600 #if !defined(KERNEL) && !defined(_KERNEL) -#define ES(x) x +#define ES(x) x #else -#define ES(x) +#define ES(x) #endif /* @@ -118,60 +118,60 @@ static const struct bsm_errno bsm_errnos[] = { { BSM_ERRNO_EIDRM, EIDRM, ES("Identifier removed") }, { BSM_ERRNO_ECHRNG, #ifdef ECHRNG - ECHRNG, + ECHRNG, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Channel number out of range") }, + ES("Channel number out of range") }, { BSM_ERRNO_EL2NSYNC, #ifdef EL2NSYNC - EL2NSYNC, + EL2NSYNC, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Level 2 not synchronized") }, + ES("Level 2 not synchronized") }, { BSM_ERRNO_EL3HLT, #ifdef EL3HLT - EL3HLT, + EL3HLT, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Level 3 halted") }, + ES("Level 3 halted") }, { BSM_ERRNO_EL3RST, #ifdef EL3RST - EL3RST, + EL3RST, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Level 3 reset") }, + ES("Level 3 reset") }, { BSM_ERRNO_ELNRNG, #ifdef ELNRNG - ELNRNG, + ELNRNG, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Link number out of range") }, + ES("Link number out of range") }, { BSM_ERRNO_EUNATCH, #ifdef EUNATCH - EUNATCH, + EUNATCH, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Protocol driver not attached") }, + ES("Protocol driver not attached") }, { BSM_ERRNO_ENOCSI, #ifdef ENOCSI - ENOCSI, + ENOCSI, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("No CSI structure available") }, + ES("No CSI structure available") }, { BSM_ERRNO_EL2HLT, #ifdef EL2HLT - EL2HLT, + EL2HLT, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Level 2 halted") }, + ES("Level 2 halted") }, { BSM_ERRNO_EDEADLK, EDEADLK, ES("Resource deadlock avoided") }, { BSM_ERRNO_ENOLCK, ENOLCK, ES("No locks available") }, { BSM_ERRNO_ECANCELED, ECANCELED, ES("Operation canceled") }, @@ -179,413 +179,413 @@ static const struct bsm_errno bsm_errnos[] = { { BSM_ERRNO_EDQUOT, EDQUOT, ES("Disc quota exceeded") }, { BSM_ERRNO_EBADE, #ifdef EBADE - EBADE, + EBADE, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Invalid exchange") }, + ES("Invalid exchange") }, { BSM_ERRNO_EBADR, #ifdef EBADR - EBADR, + EBADR, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Invalid request descriptor") }, + ES("Invalid request descriptor") }, { BSM_ERRNO_EXFULL, #ifdef EXFULL - EXFULL, + EXFULL, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Exchange full") }, + ES("Exchange full") }, { BSM_ERRNO_ENOANO, #ifdef ENOANO - ENOANO, + ENOANO, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("No anode") }, + ES("No anode") }, { BSM_ERRNO_EBADRQC, #ifdef EBADRQC - EBADRQC, + EBADRQC, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Invalid request descriptor") }, + ES("Invalid request descriptor") }, { BSM_ERRNO_EBADSLT, #ifdef EBADSLT - EBADSLT, + EBADSLT, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Invalid slot") }, + ES("Invalid slot") }, { BSM_ERRNO_EDEADLOCK, #ifdef EDEADLOCK - EDEADLOCK, + EDEADLOCK, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Resource deadlock avoided") }, + ES("Resource deadlock avoided") }, { BSM_ERRNO_EBFONT, #ifdef EBFONT - EBFONT, + EBFONT, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Bad font file format") }, + ES("Bad font file format") }, { BSM_ERRNO_EOWNERDEAD, #ifdef EOWNERDEAD - EOWNERDEAD, + EOWNERDEAD, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Process died with the lock") }, + ES("Process died with the lock") }, { BSM_ERRNO_ENOTRECOVERABLE, #ifdef ENOTRECOVERABLE - ENOTRECOVERABLE, + ENOTRECOVERABLE, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Lock is not recoverable") }, + ES("Lock is not recoverable") }, { BSM_ERRNO_ENOSTR, #ifdef ENOSTR - ENOSTR, + ENOSTR, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Device not a stream") }, + ES("Device not a stream") }, { BSM_ERRNO_ENONET, #ifdef ENONET - ENONET, + ENONET, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Machine is not on the network") }, + ES("Machine is not on the network") }, { BSM_ERRNO_ENOPKG, #ifdef ENOPKG - ENOPKG, + ENOPKG, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Package not installed") }, + ES("Package not installed") }, { BSM_ERRNO_EREMOTE, EREMOTE, - ES("Too many levels of remote in path") }, + ES("Too many levels of remote in path") }, { BSM_ERRNO_ENOLINK, #ifdef ENOLINK - ENOLINK, + ENOLINK, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Link has been severed") }, + ES("Link has been severed") }, { BSM_ERRNO_EADV, #ifdef EADV - EADV, + EADV, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Advertise error") }, + ES("Advertise error") }, { BSM_ERRNO_ESRMNT, #ifdef ESRMNT - ESRMNT, + ESRMNT, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("srmount error") }, + ES("srmount error") }, { BSM_ERRNO_ECOMM, #ifdef ECOMM - ECOMM, + ECOMM, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Communication error on send") }, + ES("Communication error on send") }, { BSM_ERRNO_EPROTO, #ifdef EPROTO - EPROTO, + EPROTO, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Protocol error") }, + ES("Protocol error") }, { BSM_ERRNO_ELOCKUNMAPPED, #ifdef ELOCKUNMAPPED - ELOCKUNMAPPED, + ELOCKUNMAPPED, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Locked lock was unmapped") }, + ES("Locked lock was unmapped") }, { BSM_ERRNO_ENOTACTIVE, #ifdef ENOTACTIVE - ENOTACTIVE, + ENOTACTIVE, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Facility is not active") }, + ES("Facility is not active") }, { BSM_ERRNO_EMULTIHOP, #ifdef EMULTIHOP - EMULTIHOP, + EMULTIHOP, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Multihop attempted") }, + ES("Multihop attempted") }, { BSM_ERRNO_EBADMSG, #ifdef EBADMSG - EBADMSG, + EBADMSG, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Bad message") }, + ES("Bad message") }, { BSM_ERRNO_ENAMETOOLONG, ENAMETOOLONG, ES("File name too long") }, { BSM_ERRNO_EOVERFLOW, EOVERFLOW, - ES("Value too large to be stored in data type") }, + ES("Value too large to be stored in data type") }, { BSM_ERRNO_ENOTUNIQ, #ifdef ENOTUNIQ - ENOTUNIQ, + ENOTUNIQ, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Given log name not unique") }, + ES("Given log name not unique") }, { BSM_ERRNO_EBADFD, #ifdef EBADFD - EBADFD, + EBADFD, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Given f.d. invalid for this operation") }, + ES("Given f.d. invalid for this operation") }, { BSM_ERRNO_EREMCHG, #ifdef EREMCHG - EREMCHG, + EREMCHG, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Remote address changed") }, + ES("Remote address changed") }, { BSM_ERRNO_ELIBACC, #ifdef ELIBACC - ELIBACC, + ELIBACC, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Can't access a needed shared lib") }, + ES("Can't access a needed shared lib") }, { BSM_ERRNO_ELIBBAD, #ifdef ELIBBAD - ELIBBAD, + ELIBBAD, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Accessing a corrupted shared lib") }, + ES("Accessing a corrupted shared lib") }, { BSM_ERRNO_ELIBSCN, #ifdef ELIBSCN - ELIBSCN, + ELIBSCN, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES(".lib section in a.out corrupted") }, + ES(".lib section in a.out corrupted") }, { BSM_ERRNO_ELIBMAX, #ifdef ELIBMAX - ELIBMAX, + ELIBMAX, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Attempting to link in too many libs") }, + ES("Attempting to link in too many libs") }, { BSM_ERRNO_ELIBEXEC, #ifdef ELIBEXEC - ELIBEXEC, + ELIBEXEC, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Attempting to exec a shared library") }, + ES("Attempting to exec a shared library") }, { BSM_ERRNO_EILSEQ, EILSEQ, ES("Illegal byte sequence") }, { BSM_ERRNO_ENOSYS, ENOSYS, ES("Function not implemented") }, { BSM_ERRNO_ELOOP, ELOOP, ES("Too many levels of symbolic links") }, { BSM_ERRNO_ERESTART, #ifdef ERESTART - ERESTART, + ERESTART, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Restart syscall") }, + ES("Restart syscall") }, { BSM_ERRNO_ESTRPIPE, #ifdef ESTRPIPE - ESTRPIPE, + ESTRPIPE, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("If pipe/FIFO, don't sleep in stream head") }, + ES("If pipe/FIFO, don't sleep in stream head") }, { BSM_ERRNO_ENOTEMPTY, ENOTEMPTY, ES("Directory not empty") }, { BSM_ERRNO_EUSERS, EUSERS, ES("Too many users") }, { BSM_ERRNO_ENOTSOCK, ENOTSOCK, - ES("Socket operation on non-socket") }, + ES("Socket operation on non-socket") }, { BSM_ERRNO_EDESTADDRREQ, EDESTADDRREQ, - ES("Destination address required") }, + ES("Destination address required") }, { BSM_ERRNO_EMSGSIZE, EMSGSIZE, ES("Message too long") }, { BSM_ERRNO_EPROTOTYPE, EPROTOTYPE, - ES("Protocol wrong type for socket") }, + ES("Protocol wrong type for socket") }, { BSM_ERRNO_ENOPROTOOPT, ENOPROTOOPT, ES("Protocol not available") }, { BSM_ERRNO_EPROTONOSUPPORT, EPROTONOSUPPORT, - ES("Protocol not supported") }, + ES("Protocol not supported") }, { BSM_ERRNO_ESOCKTNOSUPPORT, ESOCKTNOSUPPORT, - ES("Socket type not supported") }, + ES("Socket type not supported") }, { BSM_ERRNO_EOPNOTSUPP, EOPNOTSUPP, ES("Operation not supported") }, { BSM_ERRNO_EPFNOSUPPORT, EPFNOSUPPORT, - ES("Protocol family not supported") }, + ES("Protocol family not supported") }, { BSM_ERRNO_EAFNOSUPPORT, EAFNOSUPPORT, - ES("Address family not supported by protocol family") }, + ES("Address family not supported by protocol family") }, { BSM_ERRNO_EADDRINUSE, EADDRINUSE, ES("Address already in use") }, { BSM_ERRNO_EADDRNOTAVAIL, EADDRNOTAVAIL, - ES("Can't assign requested address") }, + ES("Can't assign requested address") }, { BSM_ERRNO_ENETDOWN, ENETDOWN, ES("Network is down") }, { BSM_ERRNO_ENETRESET, ENETRESET, - ES("Network dropped connection on reset") }, + ES("Network dropped connection on reset") }, { BSM_ERRNO_ECONNABORTED, ECONNABORTED, - ES("Software caused connection abort") }, + ES("Software caused connection abort") }, { BSM_ERRNO_ECONNRESET, ECONNRESET, ES("Connection reset by peer") }, { BSM_ERRNO_ENOBUFS, ENOBUFS, ES("No buffer space available") }, { BSM_ERRNO_EISCONN, EISCONN, ES("Socket is already connected") }, { BSM_ERRNO_ENOTCONN, ENOTCONN, ES("Socket is not connected") }, { BSM_ERRNO_ESHUTDOWN, ESHUTDOWN, - ES("Can't send after socket shutdown") }, + ES("Can't send after socket shutdown") }, { BSM_ERRNO_ETOOMANYREFS, ETOOMANYREFS, - ES("Too many references: can't splice") }, + ES("Too many references: can't splice") }, { BSM_ERRNO_ETIMEDOUT, ETIMEDOUT, ES("Operation timed out") }, { BSM_ERRNO_ECONNREFUSED, ECONNREFUSED, ES("Connection refused") }, { BSM_ERRNO_EHOSTDOWN, EHOSTDOWN, ES("Host is down") }, { BSM_ERRNO_EHOSTUNREACH, EHOSTUNREACH, ES("No route to host") }, { BSM_ERRNO_EALREADY, EALREADY, ES("Operation already in progress") }, { BSM_ERRNO_EINPROGRESS, EINPROGRESS, - ES("Operation now in progress") }, + ES("Operation now in progress") }, { BSM_ERRNO_ESTALE, ESTALE, ES("Stale NFS file handle") }, { BSM_ERRNO_EQFULL, EQFULL, ES("Interface output queue is full") }, { BSM_ERRNO_EPWROFF, #ifdef EPWROFF - EPWROFF, + EPWROFF, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Device power is off") }, + ES("Device power is off") }, { BSM_ERRNO_EDEVERR, #ifdef EDEVERR - EDEVERR, + EDEVERR, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Device error") }, + ES("Device error") }, { BSM_ERRNO_EBADEXEC, #ifdef EBADEXEC - EBADEXEC, + EBADEXEC, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Bad executable") }, + ES("Bad executable") }, { BSM_ERRNO_EBADARCH, #ifdef EBADARCH - EBADARCH, + EBADARCH, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Bad CPU type in executable") }, + ES("Bad CPU type in executable") }, { BSM_ERRNO_ESHLIBVERS, #ifdef ESHLIBVERS - ESHLIBVERS, + ESHLIBVERS, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Shared library version mismatch") }, + ES("Shared library version mismatch") }, { BSM_ERRNO_EBADMACHO, #ifdef EBADMACHO - EBADMACHO, + EBADMACHO, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Malformed Macho file") }, + ES("Malformed Macho file") }, { BSM_ERRNO_EPOLICY, #ifdef EPOLICY - EPOLICY, + EPOLICY, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Operation failed by policy") }, + ES("Operation failed by policy") }, { BSM_ERRNO_EDOTDOT, #ifdef EDOTDOT - EDOTDOT, + EDOTDOT, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("RFS specific error") }, + ES("RFS specific error") }, { BSM_ERRNO_EUCLEAN, #ifdef EUCLEAN - EUCLEAN, + EUCLEAN, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Structure needs cleaning") }, + ES("Structure needs cleaning") }, { BSM_ERRNO_ENOTNAM, #ifdef ENOTNAM - ENOTNAM, + ENOTNAM, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Not a XENIX named type file") }, + ES("Not a XENIX named type file") }, { BSM_ERRNO_ENAVAIL, #ifdef ENAVAIL - ENAVAIL, + ENAVAIL, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("No XENIX semaphores available") }, + ES("No XENIX semaphores available") }, { BSM_ERRNO_EISNAM, #ifdef EISNAM - EISNAM, + EISNAM, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Is a named type file") }, + ES("Is a named type file") }, { BSM_ERRNO_EREMOTEIO, #ifdef EREMOTEIO - EREMOTEIO, + EREMOTEIO, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Remote I/O error") }, + ES("Remote I/O error") }, { BSM_ERRNO_ENOMEDIUM, #ifdef ENOMEDIUM - ENOMEDIUM, + ENOMEDIUM, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("No medium found") }, + ES("No medium found") }, { BSM_ERRNO_EMEDIUMTYPE, #ifdef EMEDIUMTYPE - EMEDIUMTYPE, + EMEDIUMTYPE, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Wrong medium type") }, + ES("Wrong medium type") }, { BSM_ERRNO_ENOKEY, #ifdef ENOKEY - ENOKEY, + ENOKEY, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Required key not available") }, + ES("Required key not available") }, { BSM_ERRNO_EKEYEXPIRED, #ifdef EKEEXPIRED - EKEYEXPIRED, + EKEYEXPIRED, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Key has expired") }, + ES("Key has expired") }, { BSM_ERRNO_EKEYREVOKED, #ifdef EKEYREVOKED - EKEYREVOKED, + EKEYREVOKED, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Key has been revoked") }, + ES("Key has been revoked") }, { BSM_ERRNO_EKEYREJECTED, #ifdef EKEREJECTED - EKEYREJECTED, + EKEYREJECTED, #else - ERRNO_NO_LOCAL_MAPPING, + ERRNO_NO_LOCAL_MAPPING, #endif - ES("Key was rejected by service") }, + ES("Key was rejected by service") }, }; static const int bsm_errnos_count = sizeof(bsm_errnos) / sizeof(bsm_errnos[0]); @@ -595,10 +595,11 @@ bsm_lookup_errno_local(int local_errno) int i; for (i = 0; i < bsm_errnos_count; i++) { - if (bsm_errnos[i].be_local_errno == local_errno) - return (&bsm_errnos[i]); + if (bsm_errnos[i].be_local_errno == local_errno) { + return &bsm_errnos[i]; + } } - return (NULL); + return NULL; } /* @@ -611,9 +612,10 @@ au_errno_to_bsm(int local_errno) const struct bsm_errno *bsme; bsme = bsm_lookup_errno_local(local_errno); - if (bsme == NULL) - return (BSM_ERRNO_UNKNOWN); - return (bsme->be_bsm_errno); + if (bsme == NULL) { + return BSM_ERRNO_UNKNOWN; + } + return bsme->be_bsm_errno; } static const struct bsm_errno * @@ -622,10 +624,11 @@ bsm_lookup_errno_bsm(u_char bsm_errno) int i; for (i = 0; i < bsm_errnos_count; i++) { - if (bsm_errnos[i].be_bsm_errno == bsm_errno) - return (&bsm_errnos[i]); + if (bsm_errnos[i].be_bsm_errno == bsm_errno) { + return &bsm_errnos[i]; + } } - return (NULL); + return NULL; } /* @@ -639,10 +642,11 @@ au_bsm_to_errno(u_char bsm_errno, int *errorp) const struct bsm_errno *bsme; bsme = bsm_lookup_errno_bsm(bsm_errno); - if (bsme == NULL || bsme->be_local_errno == ERRNO_NO_LOCAL_MAPPING) - return (-1); + if (bsme == NULL || bsme->be_local_errno == ERRNO_NO_LOCAL_MAPPING) { + return -1; + } *errorp = bsme->be_local_errno; - return (0); + return 0; } #if !defined(KERNEL) && !defined(_KERNEL) @@ -652,11 +656,13 @@ au_strerror(u_char bsm_errno) const struct bsm_errno *bsme; bsme = bsm_lookup_errno_bsm(bsm_errno); - if (bsme == NULL) - return ("Unrecognized BSM error"); - if (bsme->be_local_errno != ERRNO_NO_LOCAL_MAPPING) - return (strerror(bsme->be_local_errno)); - return (bsme->be_strerror); + if (bsme == NULL) { + return "Unrecognized BSM error"; + } + if (bsme->be_local_errno != ERRNO_NO_LOCAL_MAPPING) { + return strerror(bsme->be_local_errno); + } + return bsme->be_strerror; } #endif #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsm_fcntl.c b/bsd/security/audit/audit_bsm_fcntl.c index 15c03f66b..11454fbf4 100644 --- a/bsd/security/audit/audit_bsm_fcntl.c +++ b/bsd/security/audit/audit_bsm_fcntl.c @@ -39,215 +39,215 @@ #if CONFIG_AUDIT struct bsm_fcntl_cmd { - u_short bfc_bsm_fcntl_cmd; - int bfc_local_fcntl_cmd; + u_short bfc_bsm_fcntl_cmd; + int bfc_local_fcntl_cmd; }; -typedef struct bsm_fcntl_cmd bsm_fcntl_cmd_t; +typedef struct bsm_fcntl_cmd bsm_fcntl_cmd_t; static const bsm_fcntl_cmd_t bsm_fcntl_cmdtab[] = { - { BSM_F_DUPFD, F_DUPFD }, - { BSM_F_GETFD, F_GETFD }, - { BSM_F_SETFD, F_SETFD }, - { BSM_F_GETFL, F_GETFL }, - { BSM_F_SETFL, F_SETFL }, -#ifdef F_O_GETLK - { BSM_F_O_GETLK, F_O_GETLK }, + { BSM_F_DUPFD, F_DUPFD }, + { BSM_F_GETFD, F_GETFD }, + { BSM_F_SETFD, F_SETFD }, + { BSM_F_GETFL, F_GETFL }, + { BSM_F_SETFL, F_SETFL }, +#ifdef F_O_GETLK + { BSM_F_O_GETLK, F_O_GETLK }, #endif - { BSM_F_SETLK, F_SETLK }, - { BSM_F_SETLKW, F_SETLK }, -#ifdef F_CHFL - { BSM_F_CHKFL, F_CHKFL }, + { BSM_F_SETLK, F_SETLK }, + { BSM_F_SETLKW, F_SETLK }, +#ifdef F_CHFL + { BSM_F_CHKFL, F_CHKFL }, #endif -#ifdef F_DUP2FD - { BSM_F_DUP2FD, F_DUP2FD }, +#ifdef F_DUP2FD + { BSM_F_DUP2FD, F_DUP2FD }, #endif -#ifdef F_ALLOCSP - { BSM_F_ALLOCSP, F_ALLOCSP }, +#ifdef F_ALLOCSP + { BSM_F_ALLOCSP, F_ALLOCSP }, #endif -#ifdef F_FREESP - { BSM_F_FREESP, F_FREESP }, +#ifdef F_FREESP + { BSM_F_FREESP, F_FREESP }, #endif -#ifdef F_ISSTREAM - { BSM_F_ISSTREAM, F_ISSTREAM}, +#ifdef F_ISSTREAM + { BSM_F_ISSTREAM, F_ISSTREAM}, #endif - { BSM_F_GETLK, F_GETLK }, -#ifdef F_PRIV - { BSM_F_PRIV, F_PRIV }, + { BSM_F_GETLK, F_GETLK }, +#ifdef F_PRIV + { BSM_F_PRIV, F_PRIV }, #endif -#ifdef F_NPRIV - { BSM_F_NPRIV, F_NPRIV }, +#ifdef F_NPRIV + { BSM_F_NPRIV, F_NPRIV }, #endif -#ifdef F_QUOTACTL - { BSM_F_QUOTACTL, F_QUOTACTL }, +#ifdef F_QUOTACTL + { BSM_F_QUOTACTL, F_QUOTACTL }, #endif -#ifdef F_BLOCKS - { BSM_F_BLOCKS, F_BLOCKS }, +#ifdef F_BLOCKS + { BSM_F_BLOCKS, F_BLOCKS }, #endif -#ifdef F_BLKSIZE - { BSM_F_BLKSIZE, F_BLKSIZE }, +#ifdef F_BLKSIZE + { BSM_F_BLKSIZE, F_BLKSIZE }, #endif - { BSM_F_GETOWN, F_GETOWN }, - { BSM_F_SETOWN, F_SETOWN }, -#ifdef F_REVOKE - { BSM_F_REVOKE, F_REVOKE }, + { BSM_F_GETOWN, F_GETOWN }, + { BSM_F_SETOWN, F_SETOWN }, +#ifdef F_REVOKE + { BSM_F_REVOKE, F_REVOKE }, #endif -#ifdef F_HASREMOTEBLOCKS +#ifdef F_HASREMOTEBLOCKS { BSM_F_HASREMOTEBLOCKS, - F_HASREMOTEBLOCKS }, + F_HASREMOTEBLOCKS }, #endif -#ifdef F_FREESP - { BSM_F_FREESP, F_FREESP }, +#ifdef F_FREESP + { BSM_F_FREESP, F_FREESP }, #endif -#ifdef F_ALLOCSP - { BSM_F_ALLOCSP, F_ALLOCSP }, +#ifdef F_ALLOCSP + { BSM_F_ALLOCSP, F_ALLOCSP }, #endif -#ifdef F_FREESP64 - { BSM_F_FREESP64, F_FREESP64 }, +#ifdef F_FREESP64 + { BSM_F_FREESP64, F_FREESP64 }, #endif -#ifdef F_ALLOCSP64 - { BSM_F_ALLOCSP64, F_ALLOCSP64 }, +#ifdef F_ALLOCSP64 + { BSM_F_ALLOCSP64, F_ALLOCSP64 }, #endif -#ifdef F_GETLK64 - { BSM_F_GETLK64, F_GETLK64 }, +#ifdef F_GETLK64 + { BSM_F_GETLK64, F_GETLK64 }, #endif -#ifdef F_SETLK64 - { BSM_F_SETLK64, F_SETLK64 }, +#ifdef F_SETLK64 + { BSM_F_SETLK64, F_SETLK64 }, #endif -#ifdef F_SETLKW64 - { BSM_F_SETLKW64, F_SETLKW64 }, +#ifdef F_SETLKW64 + { BSM_F_SETLKW64, F_SETLKW64 }, #endif -#ifdef F_SHARE - { BSM_F_SHARE, F_SHARE }, +#ifdef F_SHARE + { BSM_F_SHARE, F_SHARE }, #endif -#ifdef F_UNSHARE - { BSM_F_UNSHARE, F_UNSHARE }, +#ifdef F_UNSHARE + { BSM_F_UNSHARE, F_UNSHARE }, #endif -#ifdef F_SETLK_NBMAND - { BSM_F_SETLK_NBMAND, F_SETLK_NBMAND }, +#ifdef F_SETLK_NBMAND + { BSM_F_SETLK_NBMAND, F_SETLK_NBMAND }, #endif -#ifdef F_SHARE_NBMAND - { BSM_F_SHARE_NBMAND, F_SHARE_NBMAND }, +#ifdef F_SHARE_NBMAND + { BSM_F_SHARE_NBMAND, F_SHARE_NBMAND }, #endif -#ifdef F_SETLK64_NBMAND - { BSM_F_SETLK64_NBMAND, F_SETLK64_NBMAND }, +#ifdef F_SETLK64_NBMAND + { BSM_F_SETLK64_NBMAND, F_SETLK64_NBMAND }, #endif -#ifdef F_GETXFL - { BSM_F_GETXFL, F_GETXFL }, +#ifdef F_GETXFL + { BSM_F_GETXFL, F_GETXFL }, #endif -#ifdef F_BADFD - { BSM_F_BADFD, F_BADFD }, +#ifdef F_BADFD + { BSM_F_BADFD, F_BADFD }, #endif -#ifdef F_OGETLK - { BSM_F_OGETLK, F_OGETLK }, +#ifdef F_OGETLK + { BSM_F_OGETLK, F_OGETLK }, #endif -#ifdef F_OSETLK - { BSM_F_OSETLK, F_OSETLK }, +#ifdef F_OSETLK + { BSM_F_OSETLK, F_OSETLK }, #endif -#ifdef F_OSETLKW - { BSM_F_OSETLKW, F_OSETLKW }, +#ifdef F_OSETLKW + { BSM_F_OSETLKW, F_OSETLKW }, #endif -#ifdef F_SETLK_REMOTE - { BSM_F_SETLK_REMOTE, F_SETLK_REMOTE }, +#ifdef F_SETLK_REMOTE + { BSM_F_SETLK_REMOTE, F_SETLK_REMOTE }, #endif -#ifdef F_SETSIG - { BSM_F_SETSIG, F_SETSIG }, +#ifdef F_SETSIG + { BSM_F_SETSIG, F_SETSIG }, #endif -#ifdef F_GETSIG - { BSM_F_GETSIG, F_GETSIG }, +#ifdef F_GETSIG + { BSM_F_GETSIG, F_GETSIG }, #endif -#ifdef F_CHKCLEAN - { BSM_F_CHKCLEAN, F_CHKCLEAN }, +#ifdef F_CHKCLEAN + { BSM_F_CHKCLEAN, F_CHKCLEAN }, #endif -#ifdef F_PREALLOCATE - { BSM_F_PREALLOCATE, F_PREALLOCATE }, +#ifdef F_PREALLOCATE + { BSM_F_PREALLOCATE, F_PREALLOCATE }, #endif -#ifdef F_SETSIZE - { BSM_F_SETSIZE, F_SETSIZE }, +#ifdef F_SETSIZE + { BSM_F_SETSIZE, F_SETSIZE }, #endif -#ifdef F_RDADVISE - { BSM_F_RDADVISE, F_RDADVISE }, +#ifdef F_RDADVISE + { BSM_F_RDADVISE, F_RDADVISE }, #endif -#ifdef F_RDAHEAD - { BSM_F_RDAHEAD, F_RDAHEAD }, +#ifdef F_RDAHEAD + { BSM_F_RDAHEAD, F_RDAHEAD }, #endif -#ifdef F_READBOOTSTRAP - { BSM_F_READBOOTSTRAP, F_READBOOTSTRAP }, +#ifdef F_READBOOTSTRAP + { BSM_F_READBOOTSTRAP, F_READBOOTSTRAP }, #endif -#ifdef F_WRITEBOOTSTRAP - { BSM_F_WRITEBOOTSTRAP, F_WRITEBOOTSTRAP }, +#ifdef F_WRITEBOOTSTRAP + { BSM_F_WRITEBOOTSTRAP, F_WRITEBOOTSTRAP }, #endif -#ifdef F_NOCACHE - { BSM_F_NOCACHE, F_NOCACHE }, +#ifdef F_NOCACHE + { BSM_F_NOCACHE, F_NOCACHE }, #endif -#ifdef F_LOG2PHYS - { BSM_F_LOG2PHYS, F_LOG2PHYS }, +#ifdef F_LOG2PHYS + { BSM_F_LOG2PHYS, F_LOG2PHYS }, #endif -#ifdef F_GETPATH - { BSM_F_GETPATH, F_GETPATH }, +#ifdef F_GETPATH + { BSM_F_GETPATH, F_GETPATH }, #endif -#ifdef F_FULLFSYNC - { BSM_F_FULLFSYNC, F_FULLFSYNC }, +#ifdef F_FULLFSYNC + { BSM_F_FULLFSYNC, F_FULLFSYNC }, #endif -#ifdef F_PATHPKG_CHECK - { BSM_F_PATHPKG_CHECK, F_PATHPKG_CHECK }, +#ifdef F_PATHPKG_CHECK + { BSM_F_PATHPKG_CHECK, F_PATHPKG_CHECK }, #endif -#ifdef F_FREEZE_FS - { BSM_F_FREEZE_FS, F_FREEZE_FS }, +#ifdef F_FREEZE_FS + { BSM_F_FREEZE_FS, F_FREEZE_FS }, #endif -#ifdef F_THAW_FS - { BSM_F_THAW_FS, F_THAW_FS }, +#ifdef F_THAW_FS + { BSM_F_THAW_FS, F_THAW_FS }, #endif -#ifdef F_GLOBAL_NOCACHE - { BSM_F_GLOBAL_NOCACHE, F_GLOBAL_NOCACHE }, +#ifdef F_GLOBAL_NOCACHE + { BSM_F_GLOBAL_NOCACHE, F_GLOBAL_NOCACHE }, #endif -#ifdef F_OPENFROM - { BSM_F_OPENFROM, F_OPENFROM }, +#ifdef F_OPENFROM + { BSM_F_OPENFROM, F_OPENFROM }, #endif -#ifdef F_UNLINKFROM - { BSM_F_UNLINKFROM, F_UNLINKFROM }, +#ifdef F_UNLINKFROM + { BSM_F_UNLINKFROM, F_UNLINKFROM }, #endif -#ifdef F_CHECK_OPENEVT - { BSM_F_CHECK_OPENEVT, F_CHECK_OPENEVT }, +#ifdef F_CHECK_OPENEVT + { BSM_F_CHECK_OPENEVT, F_CHECK_OPENEVT }, #endif -#ifdef F_ADDSIGS - { BSM_F_ADDSIGS, F_ADDSIGS }, +#ifdef F_ADDSIGS + { BSM_F_ADDSIGS, F_ADDSIGS }, #endif -#ifdef F_MARKDEPENDENCY - { BSM_F_MARKDEPENDENCY, F_MARKDEPENDENCY }, +#ifdef F_MARKDEPENDENCY + { BSM_F_MARKDEPENDENCY, F_MARKDEPENDENCY }, #endif -#ifdef F_BARRIERFSYNC - { BSM_F_BARRIERFSYNC, F_BARRIERFSYNC }, +#ifdef F_BARRIERFSYNC + { BSM_F_BARRIERFSYNC, F_BARRIERFSYNC }, #endif -#ifdef F_PUNCHHOLE - { BSM_F_PUNCHHOLE, F_PUNCHHOLE }, +#ifdef F_PUNCHHOLE + { BSM_F_PUNCHHOLE, F_PUNCHHOLE }, #endif -#ifdef F_TRIM_ACTIVE_FILE - { BSM_F_TRIM_ACTIVE_FILE, F_TRIM_ACTIVE_FILE }, +#ifdef F_TRIM_ACTIVE_FILE + { BSM_F_TRIM_ACTIVE_FILE, F_TRIM_ACTIVE_FILE }, #endif -#ifdef FCNTL_FS_SPECIFIC_BASE - { BSM_F_FS_SPECIFIC_0, FCNTL_FS_SPECIFIC_BASE}, - { BSM_F_FS_SPECIFIC_1, FCNTL_FS_SPECIFIC_BASE + 1}, - { BSM_F_FS_SPECIFIC_2, FCNTL_FS_SPECIFIC_BASE + 2}, - { BSM_F_FS_SPECIFIC_3, FCNTL_FS_SPECIFIC_BASE + 3}, - { BSM_F_FS_SPECIFIC_4, FCNTL_FS_SPECIFIC_BASE + 4}, - { BSM_F_FS_SPECIFIC_5, FCNTL_FS_SPECIFIC_BASE + 5}, - { BSM_F_FS_SPECIFIC_6, FCNTL_FS_SPECIFIC_BASE + 6}, - { BSM_F_FS_SPECIFIC_7, FCNTL_FS_SPECIFIC_BASE + 7}, - { BSM_F_FS_SPECIFIC_8, FCNTL_FS_SPECIFIC_BASE + 8}, - { BSM_F_FS_SPECIFIC_9, FCNTL_FS_SPECIFIC_BASE + 9}, - { BSM_F_FS_SPECIFIC_10, FCNTL_FS_SPECIFIC_BASE + 10}, - { BSM_F_FS_SPECIFIC_11, FCNTL_FS_SPECIFIC_BASE + 11}, - { BSM_F_FS_SPECIFIC_12, FCNTL_FS_SPECIFIC_BASE + 12}, - { BSM_F_FS_SPECIFIC_13, FCNTL_FS_SPECIFIC_BASE + 13}, - { BSM_F_FS_SPECIFIC_14, FCNTL_FS_SPECIFIC_BASE + 14}, - { BSM_F_FS_SPECIFIC_15, FCNTL_FS_SPECIFIC_BASE + 15}, -#endif /* FCNTL_FS_SPECIFIC_BASE */ +#ifdef FCNTL_FS_SPECIFIC_BASE + { BSM_F_FS_SPECIFIC_0, FCNTL_FS_SPECIFIC_BASE}, + { BSM_F_FS_SPECIFIC_1, FCNTL_FS_SPECIFIC_BASE + 1}, + { BSM_F_FS_SPECIFIC_2, FCNTL_FS_SPECIFIC_BASE + 2}, + { BSM_F_FS_SPECIFIC_3, FCNTL_FS_SPECIFIC_BASE + 3}, + { BSM_F_FS_SPECIFIC_4, FCNTL_FS_SPECIFIC_BASE + 4}, + { BSM_F_FS_SPECIFIC_5, FCNTL_FS_SPECIFIC_BASE + 5}, + { BSM_F_FS_SPECIFIC_6, FCNTL_FS_SPECIFIC_BASE + 6}, + { BSM_F_FS_SPECIFIC_7, FCNTL_FS_SPECIFIC_BASE + 7}, + { BSM_F_FS_SPECIFIC_8, FCNTL_FS_SPECIFIC_BASE + 8}, + { BSM_F_FS_SPECIFIC_9, FCNTL_FS_SPECIFIC_BASE + 9}, + { BSM_F_FS_SPECIFIC_10, FCNTL_FS_SPECIFIC_BASE + 10}, + { BSM_F_FS_SPECIFIC_11, FCNTL_FS_SPECIFIC_BASE + 11}, + { BSM_F_FS_SPECIFIC_12, FCNTL_FS_SPECIFIC_BASE + 12}, + { BSM_F_FS_SPECIFIC_13, FCNTL_FS_SPECIFIC_BASE + 13}, + { BSM_F_FS_SPECIFIC_14, FCNTL_FS_SPECIFIC_BASE + 14}, + { BSM_F_FS_SPECIFIC_15, FCNTL_FS_SPECIFIC_BASE + 15}, +#endif /* FCNTL_FS_SPECIFIC_BASE */ }; static const int bsm_fcntl_cmd_count = sizeof(bsm_fcntl_cmdtab) / - sizeof(bsm_fcntl_cmdtab[0]); + sizeof(bsm_fcntl_cmdtab[0]); static const bsm_fcntl_cmd_t * bsm_lookup_local_fcntl_cmd(int local_fcntl_cmd) @@ -256,10 +256,11 @@ bsm_lookup_local_fcntl_cmd(int local_fcntl_cmd) for (i = 0; i < bsm_fcntl_cmd_count; i++) { if (bsm_fcntl_cmdtab[i].bfc_local_fcntl_cmd == - local_fcntl_cmd) - return (&bsm_fcntl_cmdtab[i]); + local_fcntl_cmd) { + return &bsm_fcntl_cmdtab[i]; + } } - return (NULL); + return NULL; } u_short @@ -268,9 +269,10 @@ au_fcntl_cmd_to_bsm(int local_fcntl_cmd) const bsm_fcntl_cmd_t *bfcp; bfcp = bsm_lookup_local_fcntl_cmd(local_fcntl_cmd); - if (bfcp == NULL) - return (BSM_F_UNKNOWN); - return (bfcp->bfc_bsm_fcntl_cmd); + if (bfcp == NULL) { + return BSM_F_UNKNOWN; + } + return bfcp->bfc_bsm_fcntl_cmd; } static const bsm_fcntl_cmd_t * @@ -280,10 +282,11 @@ bsm_lookup_bsm_fcntl_cmd(u_short bsm_fcntl_cmd) for (i = 0; i < bsm_fcntl_cmd_count; i++) { if (bsm_fcntl_cmdtab[i].bfc_bsm_fcntl_cmd == - bsm_fcntl_cmd) - return (&bsm_fcntl_cmdtab[i]); + bsm_fcntl_cmd) { + return &bsm_fcntl_cmdtab[i]; + } } - return (NULL); + return NULL; } int @@ -292,9 +295,10 @@ au_bsm_to_fcntl_cmd(u_short bsm_fcntl_cmd, int *local_fcntl_cmdp) const bsm_fcntl_cmd_t *bfcp; bfcp = bsm_lookup_bsm_fcntl_cmd(bsm_fcntl_cmd); - if (bfcp == NULL || bfcp->bfc_local_fcntl_cmd) - return (-1); + if (bfcp == NULL || bfcp->bfc_local_fcntl_cmd) { + return -1; + } *local_fcntl_cmdp = bfcp->bfc_local_fcntl_cmd; - return (0); + return 0; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsm_klib.c b/bsd/security/audit/audit_bsm_klib.c index 30787a14c..1db785758 100644 --- a/bsd/security/audit/audit_bsm_klib.c +++ b/bsd/security/audit/audit_bsm_klib.c @@ -60,7 +60,7 @@ * Hash table functions for the audit event number to event class mask * mapping. */ -#define EVCLASSMAP_HASH_TABLE_SIZE 251 +#define EVCLASSMAP_HASH_TABLE_SIZE 251 struct evclass_elem { au_event_t event; au_class_t class; @@ -71,14 +71,14 @@ struct evclass_list { }; static MALLOC_DEFINE(M_AUDITEVCLASS, "audit_evclass", "Audit event class"); -static struct rwlock evclass_lock; -static struct evclass_list evclass_hash[EVCLASSMAP_HASH_TABLE_SIZE]; +static struct rwlock evclass_lock; +static struct evclass_list evclass_hash[EVCLASSMAP_HASH_TABLE_SIZE]; -#define EVCLASS_LOCK_INIT() rw_init(&evclass_lock, "evclass_lock") -#define EVCLASS_RLOCK() rw_rlock(&evclass_lock) -#define EVCLASS_RUNLOCK() rw_runlock(&evclass_lock) -#define EVCLASS_WLOCK() rw_wlock(&evclass_lock) -#define EVCLASS_WUNLOCK() rw_wunlock(&evclass_lock) +#define EVCLASS_LOCK_INIT() rw_init(&evclass_lock, "evclass_lock") +#define EVCLASS_RLOCK() rw_rlock(&evclass_lock) +#define EVCLASS_RUNLOCK() rw_runlock(&evclass_lock) +#define EVCLASS_WLOCK() rw_wlock(&evclass_lock) +#define EVCLASS_WUNLOCK() rw_wunlock(&evclass_lock) /* * Look up the class for an audit event in the class mapping table. @@ -101,7 +101,7 @@ au_event_class(au_event_t event) } out: EVCLASS_RUNLOCK(); - return (class); + return class; } /* @@ -118,11 +118,10 @@ au_class_protect(au_class_t old_class, au_class_t new_class) /* Check if the reserved class bit has been flipped */ if ((old_class & AU_CLASS_MASK_RESERVED) != - (new_class & AU_CLASS_MASK_RESERVED)) { - + (new_class & AU_CLASS_MASK_RESERVED)) { task_t task = current_task(); if (task != kernel_task && - !IOTaskHasEntitlement(task, AU_CLASS_RESERVED_ENTITLEMENT)) { + !IOTaskHasEntitlement(task, AU_CLASS_RESERVED_ENTITLEMENT)) { /* * If the caller isn't entitled, revert the class bit: * - First remove the reserved bit from the new_class mask @@ -130,7 +129,7 @@ au_class_protect(au_class_t old_class, au_class_t new_class) * - Finally, OR the result from the first two operations */ result = (new_class & ~AU_CLASS_MASK_RESERVED) | - (old_class & AU_CLASS_MASK_RESERVED); + (old_class & AU_CLASS_MASK_RESERVED); } } @@ -161,15 +160,16 @@ au_evclassmap_insert(au_event_t event, au_class_t class) * if the audit syscalls flag needs to be set when preselection masks * are set. */ - if (AUE_IS_A_KEVENT(event)) - audit_kevent_mask |= class; + if (AUE_IS_A_KEVENT(event)) { + audit_kevent_mask |= class; + } /* * Pessimistically, always allocate storage before acquiring mutex. * Free if there is already a mapping for this event. */ evc_new = malloc(sizeof(*evc), M_AUDITEVCLASS, M_WAITOK); - + EVCLASS_WLOCK(); evcl = &evclass_hash[event % EVCLASSMAP_HASH_TABLE_SIZE]; LIST_FOREACH(evc, &evcl->head, entry) { @@ -197,16 +197,17 @@ au_evclassmap_init(void) unsigned int i; EVCLASS_LOCK_INIT(); - for (i = 0; i < EVCLASSMAP_HASH_TABLE_SIZE; i++) + for (i = 0; i < EVCLASSMAP_HASH_TABLE_SIZE; i++) { LIST_INIT(&evclass_hash[i].head); + } /* * Set up the initial event to class mapping for system calls. */ for (i = 0; i < nsysent; i++) { - if (sys_au_event[i] != AUE_NULL) + if (sys_au_event[i] != AUE_NULL) { au_evclassmap_insert(sys_au_event[i], 0); - + } } /* @@ -230,22 +231,26 @@ au_preselect(__unused au_event_t event, au_class_t class, au_mask_t *mask_p, { au_class_t effmask = 0; - if (mask_p == NULL) - return (-1); + if (mask_p == NULL) { + return -1; + } /* * Perform the actual check of the masks against the event. */ - if (sorf & AU_PRS_SUCCESS) + if (sorf & AU_PRS_SUCCESS) { effmask |= (mask_p->am_success & class); + } - if (sorf & AU_PRS_FAILURE) + if (sorf & AU_PRS_FAILURE) { effmask |= (mask_p->am_failure & class); + } - if (effmask) - return (1); - else - return (0); + if (effmask) { + return 1; + } else { + return 0; + } } /* @@ -254,10 +259,10 @@ au_preselect(__unused au_event_t event, au_class_t class, au_mask_t *mask_p, au_event_t audit_ctlname_to_sysctlevent(int name[], uint64_t valid_arg) { - /* can't parse it - so return the worst case */ - if ((valid_arg & (ARG_CTLNAME | ARG_LEN)) != (ARG_CTLNAME | ARG_LEN)) - return (AUE_SYSCTL); + if ((valid_arg & (ARG_CTLNAME | ARG_LEN)) != (ARG_CTLNAME | ARG_LEN)) { + return AUE_SYSCTL; + } switch (name[0]) { /* non-admin "lookups" treat them special */ @@ -277,7 +282,7 @@ audit_ctlname_to_sysctlevent(int name[], uint64_t valid_arg) case KERN_SYMFILE: case KERN_SHREG_PRIVATIZABLE: case KERN_OSVERSION: - return (AUE_SYSCTL_NONADMIN); + return AUE_SYSCTL_NONADMIN; /* only treat the changeable controls as admin */ case KERN_MAXVNODES: @@ -292,11 +297,11 @@ audit_ctlname_to_sysctlevent(int name[], uint64_t valid_arg) case KERN_COREDUMP: case KERN_SUGID_COREDUMP: case KERN_NX_PROTECTION: - return ((valid_arg & ARG_VALUE32) ? - AUE_SYSCTL : AUE_SYSCTL_NONADMIN); + return (valid_arg & ARG_VALUE32) ? + AUE_SYSCTL : AUE_SYSCTL_NONADMIN; default: - return (AUE_SYSCTL); + return AUE_SYSCTL; } /* NOTREACHED */ } @@ -392,10 +397,11 @@ audit_flags_and_error_to_openevent(int oflags, int error) case AUE_OPEN_RWT: case AUE_OPEN_W: case AUE_OPEN_WT: - if (error == ENOENT) + if (error == ENOENT) { aevent = AUE_OPEN; + } } - return (aevent); + return aevent; } /* @@ -489,10 +495,11 @@ audit_flags_and_error_to_openextendedevent(int oflags, int error) case AUE_OPEN_EXTENDED_RWT: case AUE_OPEN_EXTENDED_W: case AUE_OPEN_EXTENDED_WT: - if (error == ENOENT) + if (error == ENOENT) { aevent = AUE_OPEN_EXTENDED; + } } - return (aevent); + return aevent; } /* @@ -586,10 +593,11 @@ audit_flags_and_error_to_openatevent(int oflags, int error) case AUE_OPENAT_RWT: case AUE_OPENAT_W: case AUE_OPENAT_WT: - if (error == ENOENT) + if (error == ENOENT) { aevent = AUE_OPENAT; + } } - return (aevent); + return aevent; } /* @@ -652,10 +660,11 @@ audit_flags_and_error_to_openbyidevent(int oflags, int error) case AUE_OPENBYID_RWT: case AUE_OPENBYID_W: case AUE_OPENBYID_WT: - if (error == ENOENT) + if (error == ENOENT) { aevent = AUE_OPENBYID; + } } - return (aevent); + return aevent; } /* @@ -664,20 +673,19 @@ audit_flags_and_error_to_openbyidevent(int oflags, int error) au_event_t audit_msgctl_to_event(int cmd) { - switch (cmd) { case IPC_RMID: - return (AUE_MSGCTL_RMID); + return AUE_MSGCTL_RMID; case IPC_SET: - return (AUE_MSGCTL_SET); + return AUE_MSGCTL_SET; case IPC_STAT: - return (AUE_MSGCTL_STAT); + return AUE_MSGCTL_STAT; default: /* We will audit a bad command. */ - return (AUE_MSGCTL); + return AUE_MSGCTL; } } @@ -687,41 +695,40 @@ audit_msgctl_to_event(int cmd) au_event_t audit_semctl_to_event(int cmd) { - switch (cmd) { case GETALL: - return (AUE_SEMCTL_GETALL); + return AUE_SEMCTL_GETALL; case GETNCNT: - return (AUE_SEMCTL_GETNCNT); + return AUE_SEMCTL_GETNCNT; case GETPID: - return (AUE_SEMCTL_GETPID); + return AUE_SEMCTL_GETPID; case GETVAL: - return (AUE_SEMCTL_GETVAL); + return AUE_SEMCTL_GETVAL; case GETZCNT: - return (AUE_SEMCTL_GETZCNT); + return AUE_SEMCTL_GETZCNT; case IPC_RMID: - return (AUE_SEMCTL_RMID); + return AUE_SEMCTL_RMID; case IPC_SET: - return (AUE_SEMCTL_SET); + return AUE_SEMCTL_SET; case SETALL: - return (AUE_SEMCTL_SETALL); + return AUE_SEMCTL_SETALL; case SETVAL: - return (AUE_SEMCTL_SETVAL); + return AUE_SEMCTL_SETVAL; case IPC_STAT: - return (AUE_SEMCTL_STAT); + return AUE_SEMCTL_STAT; default: /* We will audit a bad command. */ - return (AUE_SEMCTL); + return AUE_SEMCTL; } } @@ -731,55 +738,54 @@ audit_semctl_to_event(int cmd) au_event_t auditon_command_event(int cmd) { - - switch(cmd) { + switch (cmd) { case A_GETPOLICY: - return (AUE_AUDITON_GPOLICY); + return AUE_AUDITON_GPOLICY; case A_SETPOLICY: - return (AUE_AUDITON_SPOLICY); + return AUE_AUDITON_SPOLICY; case A_GETKMASK: - return (AUE_AUDITON_GETKMASK); + return AUE_AUDITON_GETKMASK; case A_SETKMASK: - return (AUE_AUDITON_SETKMASK); + return AUE_AUDITON_SETKMASK; case A_GETQCTRL: - return (AUE_AUDITON_GQCTRL); + return AUE_AUDITON_GQCTRL; case A_SETQCTRL: - return (AUE_AUDITON_SQCTRL); + return AUE_AUDITON_SQCTRL; case A_GETCWD: - return (AUE_AUDITON_GETCWD); + return AUE_AUDITON_GETCWD; case A_GETCAR: - return (AUE_AUDITON_GETCAR); + return AUE_AUDITON_GETCAR; case A_GETSTAT: - return (AUE_AUDITON_GETSTAT); + return AUE_AUDITON_GETSTAT; case A_SETSTAT: - return (AUE_AUDITON_SETSTAT); + return AUE_AUDITON_SETSTAT; case A_SETUMASK: - return (AUE_AUDITON_SETUMASK); + return AUE_AUDITON_SETUMASK; case A_SETSMASK: - return (AUE_AUDITON_SETSMASK); + return AUE_AUDITON_SETSMASK; case A_GETCOND: - return (AUE_AUDITON_GETCOND); + return AUE_AUDITON_GETCOND; case A_SETCOND: - return (AUE_AUDITON_SETCOND); + return AUE_AUDITON_SETCOND; case A_GETCLASS: - return (AUE_AUDITON_GETCLASS); + return AUE_AUDITON_GETCLASS; case A_SETCLASS: - return (AUE_AUDITON_SETCLASS); + return AUE_AUDITON_SETCLASS; case A_GETPINFO: case A_SETPMASK: @@ -790,33 +796,33 @@ auditon_command_event(int cmd) case A_SETKAUDIT: case A_GETSINFO_ADDR: default: - return (AUE_AUDITON); /* No special record */ + return AUE_AUDITON; /* No special record */ } } /* * For darwin we rewrite events generated by fcntl(F_OPENFROM,...) and * fcntl(F_UNLINKFROM,...) system calls to AUE_OPENAT_* and AUE_UNLINKAT audit - * events. + * events. */ au_event_t audit_fcntl_command_event(int cmd, int oflags, int error) { - switch(cmd) { + switch (cmd) { case F_OPENFROM: - return (audit_flags_and_error_to_openatevent(oflags, error)); - + return audit_flags_and_error_to_openatevent(oflags, error); + case F_UNLINKFROM: - return (AUE_UNLINKAT); + return AUE_UNLINKAT; default: - return (AUE_FCNTL); /* Don't change from AUE_FCNTL. */ + return AUE_FCNTL; /* Don't change from AUE_FCNTL. */ } } /* * Create a canonical path from given path by prefixing either the root - * directory, or the current working directory. + * directory, or the current working directory. */ int audit_canon_path(struct vnode *cwd_vp, char *path, char *cpath) @@ -835,24 +841,27 @@ audit_canon_path(struct vnode *cwd_vp, char *path, char *cpath) * than implicit. */ if (*(path) == '/') { - while (*(bufp) == '/') - bufp++; /* skip leading '/'s */ - if (cwd_vp == NULL) - bufp--; /* restore one '/' */ + while (*(bufp) == '/') { + bufp++; /* skip leading '/'s */ + } + if (cwd_vp == NULL) { + bufp--; /* restore one '/' */ + } } if (cwd_vp != NULL) { len = MAXPATHLEN; ret = vn_getpath(cwd_vp, cpath, &len); if (ret != 0) { cpath[0] = '\0'; - return (ret); + return ret; + } + if (len < MAXPATHLEN) { + cpath[len - 1] = '/'; } - if (len < MAXPATHLEN) - cpath[len-1] = '/'; strlcpy(cpath + len, bufp, MAXPATHLEN - len); } else { strlcpy(cpath, bufp, MAXPATHLEN); } - return (0); + return 0; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsm_socket_type.c b/bsd/security/audit/audit_bsm_socket_type.c index 6b373b27d..06838b09b 100644 --- a/bsd/security/audit/audit_bsm_socket_type.c +++ b/bsd/security/audit/audit_bsm_socket_type.c @@ -38,11 +38,11 @@ #if CONFIG_AUDIT struct bsm_socket_type { - u_short bst_bsm_socket_type; - int bst_local_socket_type; + u_short bst_bsm_socket_type; + int bst_local_socket_type; }; -#define ST_NO_LOCAL_MAPPING -600 +#define ST_NO_LOCAL_MAPPING -600 static const struct bsm_socket_type bsm_socket_types[] = { { BSM_SOCK_DGRAM, SOCK_DGRAM }, @@ -52,7 +52,7 @@ static const struct bsm_socket_type bsm_socket_types[] = { { BSM_SOCK_SEQPACKET, SOCK_SEQPACKET }, }; static const int bsm_socket_types_count = sizeof(bsm_socket_types) / - sizeof(bsm_socket_types[0]); + sizeof(bsm_socket_types[0]); static const struct bsm_socket_type * bsm_lookup_local_socket_type(int local_socket_type) @@ -61,10 +61,11 @@ bsm_lookup_local_socket_type(int local_socket_type) for (i = 0; i < bsm_socket_types_count; i++) { if (bsm_socket_types[i].bst_local_socket_type == - local_socket_type) - return (&bsm_socket_types[i]); + local_socket_type) { + return &bsm_socket_types[i]; + } } - return (NULL); + return NULL; } u_short @@ -73,9 +74,10 @@ au_socket_type_to_bsm(int local_socket_type) const struct bsm_socket_type *bstp; bstp = bsm_lookup_local_socket_type(local_socket_type); - if (bstp == NULL) - return (BSM_SOCK_UNKNOWN); - return (bstp->bst_bsm_socket_type); + if (bstp == NULL) { + return BSM_SOCK_UNKNOWN; + } + return bstp->bst_bsm_socket_type; } static const struct bsm_socket_type * @@ -85,10 +87,11 @@ bsm_lookup_bsm_socket_type(u_short bsm_socket_type) for (i = 0; i < bsm_socket_types_count; i++) { if (bsm_socket_types[i].bst_bsm_socket_type == - bsm_socket_type) - return (&bsm_socket_types[i]); + bsm_socket_type) { + return &bsm_socket_types[i]; + } } - return (NULL); + return NULL; } int @@ -97,9 +100,10 @@ au_bsm_to_socket_type(u_short bsm_socket_type, int *local_socket_typep) const struct bsm_socket_type *bstp; bstp = bsm_lookup_bsm_socket_type(bsm_socket_type); - if (bstp == NULL || bstp->bst_local_socket_type) - return (-1); + if (bstp == NULL || bstp->bst_local_socket_type) { + return -1; + } *local_socket_typep = bstp->bst_local_socket_type; - return (0); + return 0; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_bsm_token.c b/bsd/security/audit/audit_bsm_token.c index 487349595..0e38a6405 100644 --- a/bsd/security/audit/audit_bsm_token.c +++ b/bsd/security/audit/audit_bsm_token.c @@ -56,11 +56,11 @@ #include #if CONFIG_AUDIT -#define GET_TOKEN_AREA(t, dptr, length) do { \ - t = malloc(sizeof(token_t), M_AUDITBSM, M_WAITOK); \ - t->t_data = malloc(length, M_AUDITBSM, M_WAITOK | M_ZERO); \ - t->len = length; \ - dptr = t->t_data; \ +#define GET_TOKEN_AREA(t, dptr, length) do { \ + t = malloc(sizeof(token_t), M_AUDITBSM, M_WAITOK); \ + t->t_data = malloc(length, M_AUDITBSM, M_WAITOK | M_ZERO); \ + t->len = length; \ + dptr = t->t_data; \ } while (0) /* @@ -89,7 +89,7 @@ au_to_arg32(char n, const char *text, u_int32_t v) ADD_U_INT16(dptr, textlen); ADD_STRING(dptr, text, textlen); - return (t); + return t; } token_t * @@ -111,14 +111,13 @@ au_to_arg64(char n, const char *text, u_int64_t v) ADD_U_INT16(dptr, textlen); ADD_STRING(dptr, text, textlen); - return (t); + return t; } token_t * au_to_arg(char n, const char *text, u_int32_t v) { - - return (au_to_arg32(n, text, v)); + return au_to_arg32(n, text, v); } #if defined(_KERNEL) || defined(KERNEL) @@ -164,14 +163,15 @@ au_to_attr32(struct vnode_au_info *vni) if (sizeof(vni->vn_fileid) == sizeof(uint32_t)) { ADD_U_INT32(dptr, pad0_32); ADD_U_INT32(dptr, vni->vn_fileid); - } else if (sizeof(vni->vn_fileid) == sizeof(uint64_t)) + } else if (sizeof(vni->vn_fileid) == sizeof(uint64_t)) { ADD_U_INT64(dptr, vni->vn_fileid); - else + } else { ADD_U_INT64(dptr, 0LL); + } ADD_U_INT32(dptr, vni->vn_dev); - return (t); + return t; } token_t * @@ -188,7 +188,7 @@ au_to_attr64(struct vnode_au_info *vni) ADD_U_CHAR(dptr, AUT_ATTR64); /* - * Darwin defines the size for the file mode + * Darwin defines the size for the file mode * as 2 bytes; BSM defines 4 so pad with 0 */ ADD_U_INT16(dptr, pad0_16); @@ -207,21 +207,21 @@ au_to_attr64(struct vnode_au_info *vni) if (sizeof(vni->vn_fileid) == sizeof(uint32_t)) { ADD_U_INT32(dptr, pad0_32); ADD_U_INT32(dptr, vni->vn_fileid); - } else if (sizeof(vni->vn_fileid) == sizeof(uint64_t)) + } else if (sizeof(vni->vn_fileid) == sizeof(uint64_t)) { ADD_U_INT64(dptr, vni->vn_fileid); - else + } else { ADD_U_INT64(dptr, 0LL); + } ADD_U_INT64(dptr, vni->vn_dev); - return (t); + return t; } token_t * au_to_attr(struct vnode_au_info *vni) { - - return (au_to_attr32(vni)); + return au_to_attr32(vni); } #endif /* defined(_KERNEL) || defined(KERNEL) */ @@ -242,7 +242,7 @@ au_to_data(char unit_print, char unit_type, char unit_count, const char *p) /* Determine the size of the basic unit. */ switch (unit_type) { case AUR_BYTE: - /* case AUR_CHAR: */ + /* case AUR_CHAR: */ datasize = AUR_BYTE_SIZE; break; @@ -251,7 +251,7 @@ au_to_data(char unit_print, char unit_type, char unit_count, const char *p) break; case AUR_INT32: - /* case AUR_INT: */ + /* case AUR_INT: */ datasize = AUR_INT32_SIZE; break; @@ -275,7 +275,7 @@ au_to_data(char unit_print, char unit_type, char unit_count, const char *p) ADD_U_CHAR(dptr, unit_count); ADD_MEM(dptr, p, totdata); - return (t); + return t; } /* @@ -295,7 +295,7 @@ au_to_exit(int retval, int err) ADD_U_INT32(dptr, err); ADD_U_INT32(dptr, retval); - return (t); + return t; } /* @@ -303,8 +303,7 @@ au_to_exit(int retval, int err) token_t * au_to_groups(int *groups) { - - return (au_to_newgroups(AUDIT_MAX_GROUPS, (gid_t *)groups)); + return au_to_newgroups(AUDIT_MAX_GROUPS, (gid_t *)groups); } /* @@ -324,10 +323,11 @@ au_to_newgroups(u_int16_t n, gid_t *groups) ADD_U_CHAR(dptr, AUT_NEWGROUPS); ADD_U_INT16(dptr, n); - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { ADD_U_INT32(dptr, groups[i]); + } - return (t); + return t; } /* @@ -345,7 +345,7 @@ au_to_in_addr(struct in_addr *internet_addr) ADD_U_CHAR(dptr, AUT_IN_ADDR); ADD_MEM(dptr, &internet_addr->s_addr, sizeof(uint32_t)); - return (t); + return t; } /* @@ -366,7 +366,7 @@ au_to_in_addr_ex(struct in6_addr *internet_addr) ADD_U_INT32(dptr, type); ADD_MEM(dptr, internet_addr, 4 * sizeof(uint32_t)); - return (t); + return t; } /* @@ -386,7 +386,7 @@ au_to_ip(struct ip *ip) ADD_U_CHAR(dptr, AUT_IP); ADD_MEM(dptr, ip, sizeof(struct ip)); - return (t); + return t; } /* @@ -406,7 +406,7 @@ au_to_ipc(char type, int id) ADD_U_CHAR(dptr, type); ADD_U_INT32(dptr, id); - return (t); + return t; } /* @@ -426,8 +426,9 @@ au_to_ipc_perm(struct ipc_perm *perm) u_char *dptr = NULL; u_int16_t pad0 = 0; - if (perm == NULL) + if (perm == NULL) { return NULL; + } GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 12 * sizeof(u_int16_t) + sizeof(u_int32_t)); @@ -435,7 +436,7 @@ au_to_ipc_perm(struct ipc_perm *perm) ADD_U_CHAR(dptr, AUT_IPC_PERM); /* - * Darwin defines the size for the file mode + * Darwin defines the size for the file mode * as 2 bytes; BSM defines 4 so pad with 0 */ ADD_U_INT32(dptr, perm->uid); @@ -452,7 +453,7 @@ au_to_ipc_perm(struct ipc_perm *perm) ADD_U_INT16(dptr, pad0); ADD_U_INT16(dptr, perm->_key); - return (t); + return t; } /* @@ -470,7 +471,7 @@ au_to_iport(u_int16_t iport) ADD_U_CHAR(dptr, AUT_IPORT); ADD_U_INT16(dptr, iport); - return (t); + return t; } /* @@ -490,7 +491,7 @@ au_to_opaque(const char *data, uint16_t bytes) ADD_U_INT16(dptr, bytes); ADD_MEM(dptr, data, bytes); - return (t); + return t; } /* @@ -514,15 +515,15 @@ au_to_file(const char *file, struct timeval tm) GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 2 * sizeof(u_int32_t) + sizeof(u_int16_t) + filelen); - timems = tm.tv_usec/1000; + timems = tm.tv_usec / 1000; ADD_U_CHAR(dptr, AUT_OTHER_FILE32); ADD_U_INT32(dptr, tm.tv_sec); - ADD_U_INT32(dptr, timems); /* We need time in ms. */ + ADD_U_INT32(dptr, timems); /* We need time in ms. */ ADD_U_INT16(dptr, filelen); ADD_STRING(dptr, file, filelen); - return (t); + return t; } /* @@ -546,7 +547,7 @@ au_to_text(const char *text) ADD_U_INT16(dptr, textlen); ADD_STRING(dptr, text, textlen); - return (t); + return t; } /* @@ -570,7 +571,7 @@ au_to_path(const char *text) ADD_U_INT16(dptr, textlen); ADD_STRING(dptr, text, textlen); - return (t); + return t; } /* @@ -606,7 +607,7 @@ au_to_process32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, ADD_U_INT32(dptr, tid->port); ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - return (t); + return t; } token_t * @@ -630,16 +631,15 @@ au_to_process64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, ADD_U_INT64(dptr, tid->port); ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - return (t); + return t; } token_t * au_to_process(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) { - - return (au_to_process32(auid, euid, egid, ruid, rgid, pid, sid, - tid)); + return au_to_process32(auid, euid, egid, ruid, rgid, pid, sid, + tid); } /* @@ -665,12 +665,13 @@ au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, KASSERT((tid->at_type == AU_IPv4) || (tid->at_type == AU_IPv6), ("au_to_process32_ex: type %u", (unsigned int)tid->at_type)); - if (tid->at_type == AU_IPv6) + if (tid->at_type == AU_IPv6) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 13 * sizeof(u_int32_t)); - else + } else { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 10 * sizeof(u_int32_t)); + } ADD_U_CHAR(dptr, AUT_PROCESS32_EX); ADD_U_INT32(dptr, auid); @@ -682,12 +683,13 @@ au_to_process32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, ADD_U_INT32(dptr, sid); ADD_U_INT32(dptr, tid->at_port); ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) + if (tid->at_type == AU_IPv6) { ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else + } else { ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); + } - return (t); + return t; } token_t * @@ -697,17 +699,18 @@ au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, token_t *t = NULL; u_char *dptr = NULL; - if (tid->at_type == AU_IPv4) + if (tid->at_type == AU_IPv4) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + 2 * sizeof(u_int32_t)); - else if (tid->at_type == AU_IPv6) + } else if (tid->at_type == AU_IPv6) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + 5 * sizeof(u_int32_t)); - else + } else { panic("au_to_process64_ex: invalidate at_type (%d)", tid->at_type); + } ADD_U_CHAR(dptr, AUT_PROCESS64_EX); ADD_U_INT32(dptr, auid); @@ -726,16 +729,15 @@ au_to_process64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, ADD_MEM(dptr, &tid->at_addr[3], sizeof(u_int32_t)); } - return (t); + return t; } token_t * au_to_process_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) { - - return (au_to_process32_ex(auid, euid, egid, ruid, rgid, pid, sid, - tid)); + return au_to_process32_ex(auid, euid, egid, ruid, rgid, pid, sid, + tid); } /* @@ -755,7 +757,7 @@ au_to_return32(char status, u_int32_t ret) ADD_U_CHAR(dptr, status); ADD_U_INT32(dptr, ret); - return (t); + return t; } token_t * @@ -770,14 +772,13 @@ au_to_return64(char status, u_int64_t ret) ADD_U_CHAR(dptr, status); ADD_U_INT64(dptr, ret); - return (t); + return t; } token_t * au_to_return(char status, u_int32_t ret) { - - return (au_to_return32(status, ret)); + return au_to_return32(status, ret); } /* @@ -795,7 +796,7 @@ au_to_seq(long audit_count) ADD_U_CHAR(dptr, AUT_SEQ); ADD_U_INT32(dptr, (u_int32_t) audit_count); - return (t); + return t; } /* @@ -817,14 +818,15 @@ au_to_socket_ex(u_short so_domain, u_short so_type, struct sockaddr_in *sin; struct sockaddr_in6 *sin6; - if (so_domain == AF_INET) + if (so_domain == AF_INET) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 5 * sizeof(u_int16_t) + 2 * sizeof(u_int32_t)); - else if (so_domain == AF_INET6) + } else if (so_domain == AF_INET6) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 5 * sizeof(u_int16_t) + 8 * sizeof(u_int32_t)); - else - return (NULL); + } else { + return NULL; + } ADD_U_CHAR(dptr, AUT_SOCKET_EX); ADD_U_INT16(dptr, au_domain_to_bsm(so_domain)); @@ -832,12 +834,12 @@ au_to_socket_ex(u_short so_domain, u_short so_type, if (so_domain == AF_INET) { ADD_U_INT16(dptr, AU_IPv4); sin = (struct sockaddr_in *)sa_local; - ADD_MEM(dptr, &sin->sin_port, sizeof(uint16_t)); + ADD_MEM(dptr, &sin->sin_port, sizeof(uint16_t)); ADD_MEM(dptr, &sin->sin_addr.s_addr, sizeof(uint32_t)); sin = (struct sockaddr_in *)sa_remote; ADD_MEM(dptr, &sin->sin_port, sizeof(uint16_t)); ADD_MEM(dptr, &sin->sin_addr.s_addr, sizeof(uint32_t)); - } else /* if (so_domain == AF_INET6) */ { + } else { /* if (so_domain == AF_INET6) */ ADD_U_INT16(dptr, AU_IPv6); sin6 = (struct sockaddr_in6 *)sa_local; ADD_MEM(dptr, &sin6->sin6_port, sizeof(uint16_t)); @@ -847,7 +849,7 @@ au_to_socket_ex(u_short so_domain, u_short so_type, ADD_MEM(dptr, &sin6->sin6_addr, 4 * sizeof(uint32_t)); } - return (t); + return t; } /* @@ -866,11 +868,12 @@ au_to_sock_unix(struct sockaddr_un *so) * Please note that sun_len may not be correctly set and sun_path may * not be NULL terminated. */ - if (so->sun_len >= offsetof(struct sockaddr_un, sun_path)) + if (so->sun_len >= offsetof(struct sockaddr_un, sun_path)) { slen = min(so->sun_len - offsetof(struct sockaddr_un, sun_path), strnlen(so->sun_path, sizeof(so->sun_path))); - else + } else { slen = strnlen(so->sun_path, sizeof(so->sun_path)); + } GET_TOKEN_AREA(t, dptr, 3 * sizeof(u_char) + slen + 1); @@ -878,11 +881,12 @@ au_to_sock_unix(struct sockaddr_un *so) /* BSM token has two bytes for family */ ADD_U_CHAR(dptr, 0); ADD_U_CHAR(dptr, so->sun_family); - if (slen) + if (slen) { ADD_MEM(dptr, so->sun_path, slen); + } ADD_U_CHAR(dptr, '\0'); /* make the path a null-terminated string */ - return (t); + return t; } /* @@ -910,7 +914,7 @@ au_to_sock_inet32(struct sockaddr_in *so) ADD_MEM(dptr, &so->sin_port, sizeof(uint16_t)); ADD_MEM(dptr, &so->sin_addr.s_addr, sizeof(uint32_t)); - return (t); + return t; } /* @@ -934,14 +938,13 @@ au_to_sock_inet128(struct sockaddr_in6 *so) ADD_U_INT16(dptr, so->sin6_port); ADD_MEM(dptr, &so->sin6_addr, 4 * sizeof(uint32_t)); - return (t); + return t; } token_t * au_to_sock_inet(struct sockaddr_in *so) { - - return (au_to_sock_inet32(so)); + return au_to_sock_inet32(so); } /* @@ -977,7 +980,7 @@ au_to_subject32(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, ADD_U_INT32(dptr, tid->port); ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - return (t); + return t; } token_t * @@ -1001,16 +1004,15 @@ au_to_subject64(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, ADD_U_INT64(dptr, tid->port); ADD_MEM(dptr, &tid->machine, sizeof(u_int32_t)); - return (t); + return t; } token_t * au_to_subject(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, au_tid_t *tid) { - - return (au_to_subject32(auid, euid, egid, ruid, rgid, pid, sid, - tid)); + return au_to_subject32(auid, euid, egid, ruid, rgid, pid, sid, + tid); } /* @@ -1036,12 +1038,13 @@ au_to_subject32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, KASSERT((tid->at_type == AU_IPv4) || (tid->at_type == AU_IPv6), ("au_to_subject32_ex: type %u", (unsigned int)tid->at_type)); - if (tid->at_type == AU_IPv6) + if (tid->at_type == AU_IPv6) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 13 * sizeof(u_int32_t)); - else + } else { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 10 * sizeof(u_int32_t)); + } ADD_U_CHAR(dptr, AUT_SUBJECT32_EX); ADD_U_INT32(dptr, auid); @@ -1053,12 +1056,13 @@ au_to_subject32_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, ADD_U_INT32(dptr, sid); ADD_U_INT32(dptr, tid->at_port); ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) + if (tid->at_type == AU_IPv6) { ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else + } else { ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); + } - return (t); + return t; } token_t * @@ -1068,17 +1072,18 @@ au_to_subject64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, token_t *t = NULL; u_char *dptr = NULL; - if (tid->at_type == AU_IPv4) + if (tid->at_type == AU_IPv4) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + 2 * sizeof(u_int32_t)); - else if (tid->at_type == AU_IPv6) + } else if (tid->at_type == AU_IPv6) { GET_TOKEN_AREA(t, dptr, sizeof(u_char) + 7 * sizeof(u_int32_t) + sizeof(u_int64_t) + 5 * sizeof(u_int32_t)); - else + } else { panic("au_to_subject64_ex: invalid at_type (%d)", tid->at_type); + } ADD_U_CHAR(dptr, AUT_SUBJECT64_EX); ADD_U_INT32(dptr, auid); @@ -1090,21 +1095,21 @@ au_to_subject64_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, ADD_U_INT32(dptr, sid); ADD_U_INT64(dptr, tid->at_port); ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) + if (tid->at_type == AU_IPv6) { ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else + } else { ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); + } - return (t); + return t; } token_t * au_to_subject_ex(au_id_t auid, uid_t euid, gid_t egid, uid_t ruid, gid_t rgid, pid_t pid, au_asid_t sid, au_tid_addr_t *tid) { - - return (au_to_subject32_ex(auid, euid, egid, ruid, rgid, pid, sid, - tid)); + return au_to_subject32_ex(auid, euid, egid, ruid, rgid, pid, sid, + tid); } #if !defined(_KERNEL) && !defined(KERNEL) && defined(HAVE_AUDIT_SYSCALLS) @@ -1117,11 +1122,12 @@ au_to_me(void) { auditinfo_t auinfo; - if (getaudit(&auinfo) != 0) - return (NULL); + if (getaudit(&auinfo) != 0) { + return NULL; + } - return (au_to_subject32(auinfo.ai_auid, geteuid(), getegid(), - getuid(), getgid(), getpid(), auinfo.ai_asid, &auinfo.ai_termid)); + return au_to_subject32(auinfo.ai_auid, geteuid(), getegid(), + getuid(), getgid(), getpid(), auinfo.ai_asid, &auinfo.ai_termid); } #endif @@ -1147,7 +1153,7 @@ au_to_exec_strings(const char *strs, int count, u_char type) ADD_U_INT32(dptr, count); ADD_STRING(dptr, strs, totlen); - return (t); + return t; } /* @@ -1158,7 +1164,7 @@ au_to_exec_strings(const char *strs, int count, u_char type) token_t * au_to_exec_args(char *args, int argc) { - return (au_to_exec_strings(args, argc, AUT_EXEC_ARGS)); + return au_to_exec_strings(args, argc, AUT_EXEC_ARGS); } /* @@ -1169,7 +1175,7 @@ au_to_exec_args(char *args, int argc) token_t * au_to_exec_env(char *envs, int envc) { - return (au_to_exec_strings(envs, envc, AUT_EXEC_ENV)); + return au_to_exec_strings(envs, envc, AUT_EXEC_ENV); } /* @@ -1180,7 +1186,7 @@ au_to_exec_env(char *envs, int envc) token_t * au_to_certificate_hash(char *hashes, int hashc) { - return (au_to_exec_strings(hashes, hashc, AUT_CERT_HASH)); + return au_to_exec_strings(hashes, hashc, AUT_CERT_HASH); } /* @@ -1191,7 +1197,7 @@ au_to_certificate_hash(char *hashes, int hashc) token_t * au_to_krb5_principal(char *principals, int princ) { - return (au_to_exec_strings(principals, princ, AUT_KRB5_PRINCIPAL)); + return au_to_exec_strings(principals, princ, AUT_KRB5_PRINCIPAL); } #else /* @@ -1219,7 +1225,7 @@ au_to_exec_args(char **argv) nextarg = *(argv + count); } - totlen += count * sizeof(char); /* nul terminations. */ + totlen += count * sizeof(char); /* nul terminations. */ GET_TOKEN_AREA(t, dptr, sizeof(u_char) + sizeof(u_int32_t) + totlen); ADD_U_CHAR(dptr, AUT_EXEC_ARGS); @@ -1230,7 +1236,7 @@ au_to_exec_args(char **argv) ADD_MEM(dptr, nextarg, strlen(nextarg) + 1); } - return (t); + return t; } /* @@ -1251,7 +1257,7 @@ au_to_zonename(char *zonename) ADD_U_CHAR(dptr, AUT_ZONENAME); ADD_U_INT16(dptr, textlen); ADD_STRING(dptr, zonename, textlen); - return (t); + return t; } /* @@ -1289,7 +1295,7 @@ au_to_exec_env(char **envp) ADD_MEM(dptr, nextenv, strlen(nextenv) + 1); } - return (t); + return t; } #endif /* !(defined(_KERNEL) || defined(KERNEL)) */ @@ -1307,8 +1313,8 @@ au_to_exec_env(char **envp) */ token_t* au_to_identity(uint32_t signer_type, const char* signing_id, - u_char signing_id_trunc, const char* team_id, u_char team_id_trunc, - uint8_t* cdhash, uint16_t cdhash_len) + u_char signing_id_trunc, const char* team_id, u_char team_id_trunc, + uint8_t* cdhash, uint16_t cdhash_len) { token_t *t = NULL; u_char *dptr = NULL; @@ -1325,18 +1331,18 @@ au_to_identity(uint32_t signer_type, const char* signing_id, } totlen = - sizeof(u_char) + // token id - sizeof(uint32_t) + // signer type - sizeof(uint16_t) + // singing id length - signing_id_len + // length of signing id to copy - sizeof(u_char) + // null terminator for signing id - sizeof(u_char) + // if signing id truncated - sizeof(uint16_t) + // team id length - team_id_len + // length of team id to copy - sizeof(u_char) + // null terminator for team id - sizeof(u_char) + // if team id truncated - sizeof(uint16_t) + // cdhash length - cdhash_len; // cdhash buffer + sizeof(u_char) + // token id + sizeof(uint32_t) + // signer type + sizeof(uint16_t) + // singing id length + signing_id_len + // length of signing id to copy + sizeof(u_char) + // null terminator for signing id + sizeof(u_char) + // if signing id truncated + sizeof(uint16_t) + // team id length + team_id_len + // length of team id to copy + sizeof(u_char) + // null terminator for team id + sizeof(u_char) + // if team id truncated + sizeof(uint16_t) + // cdhash length + cdhash_len; // cdhash buffer GET_TOKEN_AREA(t, dptr, totlen); @@ -1353,7 +1359,7 @@ au_to_identity(uint32_t signer_type, const char* signing_id, ADD_U_INT16(dptr, cdhash_len); // cdhash length ADD_MEM(dptr, cdhash, cdhash_len); // cdhash - return (t); + return t; } /* @@ -1365,7 +1371,7 @@ au_to_identity(uint32_t signer_type, const char* signing_id, * address type/length 4 bytes * machine address 4 bytes/16 bytes (IPv4/IPv6 address) * seconds of time 4 bytes/8 bytes (32/64-bits) - * milliseconds of time 4 bytes/8 bytes (32/64-bits) + * milliseconds of time 4 bytes/8 bytes (32/64-bits) */ token_t * au_to_header32_ex_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, @@ -1390,15 +1396,16 @@ au_to_header32_ex_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, ADD_U_INT16(dptr, e_type); ADD_U_INT16(dptr, e_mod); ADD_U_INT32(dptr, tid->at_type); - if (tid->at_type == AU_IPv6) + if (tid->at_type == AU_IPv6) { ADD_MEM(dptr, &tid->at_addr[0], 4 * sizeof(u_int32_t)); - else + } else { ADD_MEM(dptr, &tid->at_addr[0], sizeof(u_int32_t)); + } timems = tm.tv_usec / 1000; /* Add the timestamp */ ADD_U_INT32(dptr, tm.tv_sec); - ADD_U_INT32(dptr, timems); /* We need time in ms. */ - return (t); + ADD_U_INT32(dptr, timems); /* We need time in ms. */ + return t; } /* @@ -1427,12 +1434,12 @@ au_to_header32_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, ADD_U_INT16(dptr, e_type); ADD_U_INT16(dptr, e_mod); - timems = tm.tv_usec/1000; + timems = tm.tv_usec / 1000; /* Add the timestamp */ ADD_U_INT32(dptr, tm.tv_sec); - ADD_U_INT32(dptr, timems); /* We need time in ms. */ + ADD_U_INT32(dptr, timems); /* We need time in ms. */ - return (t); + return t; } token_t * @@ -1452,12 +1459,12 @@ au_to_header64_tm(int rec_size, au_event_t e_type, au_emod_t e_mod, ADD_U_INT16(dptr, e_type); ADD_U_INT16(dptr, e_mod); - timems = tm.tv_usec/1000; + timems = tm.tv_usec / 1000; /* Add the timestamp */ ADD_U_INT64(dptr, tm.tv_sec); - ADD_U_INT64(dptr, timems); /* We need time in ms. */ + ADD_U_INT64(dptr, timems); /* We need time in ms. */ - return (t); + return t; } /* @@ -1479,6 +1486,6 @@ au_to_trailer(int rec_size) ADD_U_INT16(dptr, magic); ADD_U_INT32(dptr, rec_size); - return (t); + return t; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_ioctl.h b/bsd/security/audit/audit_ioctl.h index 1059532b9..a300394f7 100644 --- a/bsd/security/audit/audit_ioctl.h +++ b/bsd/security/audit/audit_ioctl.h @@ -26,12 +26,12 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ - + #ifndef _SECURITY_AUDIT_AUDIT_IOCTL_H_ -#define _SECURITY_AUDIT_AUDIT_IOCTL_H_ +#define _SECURITY_AUDIT_AUDIT_IOCTL_H_ -#define AUDITPIPE_IOBASE 'A' -#define AUDITSDEV_IOBASE 'S' +#define AUDITPIPE_IOBASE 'A' +#define AUDITSDEV_IOBASE 'S' /* * Data structures used for complex ioctl arguments. Do not change existing @@ -39,69 +39,69 @@ * old structures and ioctls for backwards compatibility. */ struct auditpipe_ioctl_preselect { - au_id_t aip_auid; - au_mask_t aip_mask; + au_id_t aip_auid; + au_mask_t aip_mask; }; /* * Possible modes of operation for audit pipe preselection. */ -#define AUDITPIPE_PRESELECT_MODE_TRAIL 1 /* Global audit trail. */ -#define AUDITPIPE_PRESELECT_MODE_LOCAL 2 /* Local audit trail. */ +#define AUDITPIPE_PRESELECT_MODE_TRAIL 1 /* Global audit trail. */ +#define AUDITPIPE_PRESELECT_MODE_LOCAL 2 /* Local audit trail. */ /* * Ioctls to read and control the behavior of individual audit pipe devices. */ -#define AUDITPIPE_GET_QLEN _IOR(AUDITPIPE_IOBASE, 1, u_int) -#define AUDITPIPE_GET_QLIMIT _IOR(AUDITPIPE_IOBASE, 2, u_int) -#define AUDITPIPE_SET_QLIMIT _IOW(AUDITPIPE_IOBASE, 3, u_int) -#define AUDITPIPE_GET_QLIMIT_MIN _IOR(AUDITPIPE_IOBASE, 4, u_int) -#define AUDITPIPE_GET_QLIMIT_MAX _IOR(AUDITPIPE_IOBASE, 5, u_int) -#define AUDITPIPE_GET_PRESELECT_FLAGS _IOR(AUDITPIPE_IOBASE, 6, au_mask_t) -#define AUDITPIPE_SET_PRESELECT_FLAGS _IOW(AUDITPIPE_IOBASE, 7, au_mask_t) -#define AUDITPIPE_GET_PRESELECT_NAFLAGS _IOR(AUDITPIPE_IOBASE, 8, au_mask_t) -#define AUDITPIPE_SET_PRESELECT_NAFLAGS _IOW(AUDITPIPE_IOBASE, 9, au_mask_t) -#define AUDITPIPE_GET_PRESELECT_AUID _IOR(AUDITPIPE_IOBASE, 10, \ - struct auditpipe_ioctl_preselect) -#define AUDITPIPE_SET_PRESELECT_AUID _IOW(AUDITPIPE_IOBASE, 11, \ - struct auditpipe_ioctl_preselect) -#define AUDITPIPE_DELETE_PRESELECT_AUID _IOW(AUDITPIPE_IOBASE, 12, au_id_t) -#define AUDITPIPE_FLUSH_PRESELECT_AUID _IO(AUDITPIPE_IOBASE, 13) -#define AUDITPIPE_GET_PRESELECT_MODE _IOR(AUDITPIPE_IOBASE, 14, int) -#define AUDITPIPE_SET_PRESELECT_MODE _IOW(AUDITPIPE_IOBASE, 15, int) -#define AUDITPIPE_FLUSH _IO(AUDITPIPE_IOBASE, 16) -#define AUDITPIPE_GET_MAXAUDITDATA _IOR(AUDITPIPE_IOBASE, 17, u_int) +#define AUDITPIPE_GET_QLEN _IOR(AUDITPIPE_IOBASE, 1, u_int) +#define AUDITPIPE_GET_QLIMIT _IOR(AUDITPIPE_IOBASE, 2, u_int) +#define AUDITPIPE_SET_QLIMIT _IOW(AUDITPIPE_IOBASE, 3, u_int) +#define AUDITPIPE_GET_QLIMIT_MIN _IOR(AUDITPIPE_IOBASE, 4, u_int) +#define AUDITPIPE_GET_QLIMIT_MAX _IOR(AUDITPIPE_IOBASE, 5, u_int) +#define AUDITPIPE_GET_PRESELECT_FLAGS _IOR(AUDITPIPE_IOBASE, 6, au_mask_t) +#define AUDITPIPE_SET_PRESELECT_FLAGS _IOW(AUDITPIPE_IOBASE, 7, au_mask_t) +#define AUDITPIPE_GET_PRESELECT_NAFLAGS _IOR(AUDITPIPE_IOBASE, 8, au_mask_t) +#define AUDITPIPE_SET_PRESELECT_NAFLAGS _IOW(AUDITPIPE_IOBASE, 9, au_mask_t) +#define AUDITPIPE_GET_PRESELECT_AUID _IOR(AUDITPIPE_IOBASE, 10, \ + struct auditpipe_ioctl_preselect) +#define AUDITPIPE_SET_PRESELECT_AUID _IOW(AUDITPIPE_IOBASE, 11, \ + struct auditpipe_ioctl_preselect) +#define AUDITPIPE_DELETE_PRESELECT_AUID _IOW(AUDITPIPE_IOBASE, 12, au_id_t) +#define AUDITPIPE_FLUSH_PRESELECT_AUID _IO(AUDITPIPE_IOBASE, 13) +#define AUDITPIPE_GET_PRESELECT_MODE _IOR(AUDITPIPE_IOBASE, 14, int) +#define AUDITPIPE_SET_PRESELECT_MODE _IOW(AUDITPIPE_IOBASE, 15, int) +#define AUDITPIPE_FLUSH _IO(AUDITPIPE_IOBASE, 16) +#define AUDITPIPE_GET_MAXAUDITDATA _IOR(AUDITPIPE_IOBASE, 17, u_int) /* * Ioctls to retrieve audit pipe statistics. */ -#define AUDITPIPE_GET_INSERTS _IOR(AUDITPIPE_IOBASE, 100, u_int64_t) -#define AUDITPIPE_GET_READS _IOR(AUDITPIPE_IOBASE, 101, u_int64_t) -#define AUDITPIPE_GET_DROPS _IOR(AUDITPIPE_IOBASE, 102, u_int64_t) -#define AUDITPIPE_GET_TRUNCATES _IOR(AUDITPIPE_IOBASE, 103, u_int64_t) +#define AUDITPIPE_GET_INSERTS _IOR(AUDITPIPE_IOBASE, 100, u_int64_t) +#define AUDITPIPE_GET_READS _IOR(AUDITPIPE_IOBASE, 101, u_int64_t) +#define AUDITPIPE_GET_DROPS _IOR(AUDITPIPE_IOBASE, 102, u_int64_t) +#define AUDITPIPE_GET_TRUNCATES _IOR(AUDITPIPE_IOBASE, 103, u_int64_t) /* * Ioctls for the audit session device. */ -#define AUDITSDEV_GET_QLEN _IOR(AUDITSDEV_IOBASE, 1, u_int) -#define AUDITSDEV_GET_QLIMIT _IOR(AUDITSDEV_IOBASE, 2, u_int) -#define AUDITSDEV_SET_QLIMIT _IOW(AUDITSDEV_IOBASE, 3, u_int) -#define AUDITSDEV_GET_QLIMIT_MIN _IOR(AUDITSDEV_IOBASE, 4, u_int) -#define AUDITSDEV_GET_QLIMIT_MAX _IOR(AUDITSDEV_IOBASE, 5, u_int) -#define AUDITSDEV_FLUSH _IO(AUDITSDEV_IOBASE, 6) -#define AUDITSDEV_GET_MAXDATA _IOR(AUDITSDEV_IOBASE, 7, u_int) +#define AUDITSDEV_GET_QLEN _IOR(AUDITSDEV_IOBASE, 1, u_int) +#define AUDITSDEV_GET_QLIMIT _IOR(AUDITSDEV_IOBASE, 2, u_int) +#define AUDITSDEV_SET_QLIMIT _IOW(AUDITSDEV_IOBASE, 3, u_int) +#define AUDITSDEV_GET_QLIMIT_MIN _IOR(AUDITSDEV_IOBASE, 4, u_int) +#define AUDITSDEV_GET_QLIMIT_MAX _IOR(AUDITSDEV_IOBASE, 5, u_int) +#define AUDITSDEV_FLUSH _IO(AUDITSDEV_IOBASE, 6) +#define AUDITSDEV_GET_MAXDATA _IOR(AUDITSDEV_IOBASE, 7, u_int) /* * Ioctls to retrieve and set the ALLSESSIONS flag in the audit session device. */ -#define AUDITSDEV_GET_ALLSESSIONS _IOR(AUDITSDEV_IOBASE, 100, u_int) -#define AUDITSDEV_SET_ALLSESSIONS _IOW(AUDITSDEV_IOBASE, 101, u_int) +#define AUDITSDEV_GET_ALLSESSIONS _IOR(AUDITSDEV_IOBASE, 100, u_int) +#define AUDITSDEV_SET_ALLSESSIONS _IOW(AUDITSDEV_IOBASE, 101, u_int) /* * Ioctls to retrieve audit sessions device statistics. */ -#define AUDITSDEV_GET_INSERTS _IOR(AUDITSDEV_IOBASE, 200, u_int64_t) -#define AUDITSDEV_GET_READS _IOR(AUDITSDEV_IOBASE, 201, u_int64_t) -#define AUDITSDEV_GET_DROPS _IOR(AUDITSDEV_IOBASE, 202, u_int64_t) +#define AUDITSDEV_GET_INSERTS _IOR(AUDITSDEV_IOBASE, 200, u_int64_t) +#define AUDITSDEV_GET_READS _IOR(AUDITSDEV_IOBASE, 201, u_int64_t) +#define AUDITSDEV_GET_DROPS _IOR(AUDITSDEV_IOBASE, 202, u_int64_t) #endif /* _SECURITY_AUDIT_AUDIT_IOCTL_H_ */ diff --git a/bsd/security/audit/audit_mac.c b/bsd/security/audit/audit_mac.c index f80c948ba..705dad083 100644 --- a/bsd/security/audit/audit_mac.c +++ b/bsd/security/audit/audit_mac.c @@ -69,8 +69,8 @@ #define MAC_ARG_PREFIX "arg: " #define MAC_ARG_PREFIX_LEN 5 -zone_t audit_mac_label_zone; -extern zone_t mac_audit_data_zone; +zone_t audit_mac_label_zone; +extern zone_t mac_audit_data_zone; void audit_mac_init(void) @@ -79,7 +79,7 @@ audit_mac_init(void) * one for creds. */ audit_mac_label_zone = zinit(MAC_AUDIT_LABEL_LEN, - AQ_HIWATER * 3*MAC_AUDIT_LABEL_LEN, 8192, "audit_mac_label_zone"); + AQ_HIWATER * 3 * MAC_AUDIT_LABEL_LEN, 8192, "audit_mac_label_zone"); } int @@ -87,12 +87,13 @@ audit_mac_new(proc_t p, struct kaudit_record *ar) { struct mac mac; - /* + /* * Retrieve the MAC labels for the process. */ ar->k_ar.ar_cred_mac_labels = (char *)zalloc(audit_mac_label_zone); - if (ar->k_ar.ar_cred_mac_labels == NULL) - return (1); + if (ar->k_ar.ar_cred_mac_labels == NULL) { + return 1; + } mac.m_buflen = MAC_AUDIT_LABEL_LEN; mac.m_string = ar->k_ar.ar_cred_mac_labels; mac_cred_label_externalize_audit(p, &mac); @@ -104,12 +105,12 @@ audit_mac_new(proc_t p, struct kaudit_record *ar) kalloc(sizeof(*ar->k_ar.ar_mac_records)); if (ar->k_ar.ar_mac_records == NULL) { zfree(audit_mac_label_zone, ar->k_ar.ar_cred_mac_labels); - return (1); + return 1; } LIST_INIT(ar->k_ar.ar_mac_records); ar->k_ar.ar_forced_by_mac = 0; - - return (0); + + return 0; } void @@ -117,15 +118,19 @@ audit_mac_free(struct kaudit_record *ar) { struct mac_audit_record *head, *next; - if (ar->k_ar.ar_vnode1_mac_labels != NULL) + if (ar->k_ar.ar_vnode1_mac_labels != NULL) { zfree(audit_mac_label_zone, ar->k_ar.ar_vnode1_mac_labels); - if (ar->k_ar.ar_vnode2_mac_labels != NULL) + } + if (ar->k_ar.ar_vnode2_mac_labels != NULL) { zfree(audit_mac_label_zone, ar->k_ar.ar_vnode2_mac_labels); - if (ar->k_ar.ar_cred_mac_labels != NULL) + } + if (ar->k_ar.ar_cred_mac_labels != NULL) { zfree(audit_mac_label_zone, ar->k_ar.ar_cred_mac_labels); - if (ar->k_ar.ar_arg_mac_string != NULL) + } + if (ar->k_ar.ar_arg_mac_string != NULL) { kfree(ar->k_ar.ar_arg_mac_string, MAC_MAX_LABEL_BUF_LEN + MAC_ARG_PREFIX_LEN); + } /* * Free the audit data from the MAC policies. @@ -147,19 +152,19 @@ audit_mac_syscall_enter(unsigned short code, proc_t p, struct uthread *uthread, int error; error = mac_audit_check_preselect(my_cred, code, - (void *)uthread->uu_arg); + (void *)uthread->uu_arg); if (error == MAC_AUDIT_YES) { uthread->uu_ar = audit_new(event, p, uthread); uthread->uu_ar->k_ar.ar_forced_by_mac = 1; au_to_text("Forced by a MAC policy"); - return (1); + return 1; } else if (error == MAC_AUDIT_NO) { - return (0); + return 0; } else if (error == MAC_AUDIT_DEFAULT) { - return (1); + return 1; } - return (0); + return 0; } int @@ -168,11 +173,12 @@ audit_mac_syscall_exit(unsigned short code, struct uthread *uthread, int error, { int mac_error; - if (uthread->uu_ar == NULL) /* syscall wasn't audited */ - return (1); + if (uthread->uu_ar == NULL) { /* syscall wasn't audited */ + return 1; + } /* - * Note, no other postselect mechanism exists. If + * Note, no other postselect mechanism exists. If * mac_audit_check_postselect returns MAC_AUDIT_NO, the record will be * suppressed. Other values at this point result in the audit record * being committed. This suppression behavior will probably go away in @@ -182,13 +188,13 @@ audit_mac_syscall_exit(unsigned short code, struct uthread *uthread, int error, (void *) uthread->uu_arg, error, retval, uthread->uu_ar->k_ar.ar_forced_by_mac); - if (mac_error == MAC_AUDIT_YES) + if (mac_error == MAC_AUDIT_YES) { uthread->uu_ar->k_ar_commit |= AR_COMMIT_KERNEL; - else if (mac_error == MAC_AUDIT_NO) { + } else if (mac_error == MAC_AUDIT_NO) { audit_free(uthread->uu_ar); - return (1); + return 1; } - return (0); + return 0; } /* @@ -196,19 +202,20 @@ audit_mac_syscall_exit(unsigned short code, struct uthread *uthread, int error, * from a policy to the current audit record. */ int -audit_mac_data(int type, int len, u_char *data) { +audit_mac_data(int type, int len, u_char *data) +{ struct kaudit_record *cur; struct mac_audit_record *record; if (audit_enabled == 0) { kfree(data, len); - return (ENOTSUP); + return ENOTSUP; } cur = currecord(); if (cur == NULL) { kfree(data, len); - return (ENOTSUP); + return ENOTSUP; } /* @@ -219,7 +226,7 @@ audit_mac_data(int type, int len, u_char *data) { record = kalloc(sizeof(*record)); if (record == NULL) { kfree(data, len); - return (0); + return 0; } record->type = type; @@ -227,16 +234,16 @@ audit_mac_data(int type, int len, u_char *data) { record->data = data; LIST_INSERT_HEAD(cur->k_ar.ar_mac_records, record, records); - return (0); + return 0; } void audit_arg_mac_string(struct kaudit_record *ar, char *string) { - - if (ar->k_ar.ar_arg_mac_string == NULL) + if (ar->k_ar.ar_arg_mac_string == NULL) { ar->k_ar.ar_arg_mac_string = - kalloc(MAC_MAX_LABEL_BUF_LEN + MAC_ARG_PREFIX_LEN); + kalloc(MAC_MAX_LABEL_BUF_LEN + MAC_ARG_PREFIX_LEN); + } /* * XXX This should be a rare event. If kalloc() returns NULL, @@ -244,9 +251,11 @@ audit_arg_mac_string(struct kaudit_record *ar, char *string) * consistent with the rest of audit, just return * (may need to panic if required to for audit). */ - if (ar->k_ar.ar_arg_mac_string == NULL) - if (ar->k_ar.ar_arg_mac_string == NULL) + if (ar->k_ar.ar_arg_mac_string == NULL) { + if (ar->k_ar.ar_arg_mac_string == NULL) { return; + } + } strncpy(ar->k_ar.ar_arg_mac_string, MAC_ARG_PREFIX, MAC_ARG_PREFIX_LEN); diff --git a/bsd/security/audit/audit_pipe.c b/bsd/security/audit/audit_pipe.c index 7d7501a55..8b34ab598 100644 --- a/bsd/security/audit/audit_pipe.c +++ b/bsd/security/audit/audit_pipe.c @@ -65,17 +65,17 @@ static MALLOC_DEFINE(M_AUDIT_PIPE_PRESELECT, "audit_pipe_presel", /* * Audit pipe buffer parameters. */ -#define AUDIT_PIPE_QLIMIT_DEFAULT (128) -#define AUDIT_PIPE_QLIMIT_MIN (1) -#define AUDIT_PIPE_QLIMIT_MAX (1024) +#define AUDIT_PIPE_QLIMIT_DEFAULT (128) +#define AUDIT_PIPE_QLIMIT_MIN (1) +#define AUDIT_PIPE_QLIMIT_MAX (1024) /* * Description of an entry in an audit_pipe. */ struct audit_pipe_entry { - void *ape_record; - u_int ape_record_len; - TAILQ_ENTRY(audit_pipe_entry) ape_queue; + void *ape_record; + u_int ape_record_len; + TAILQ_ENTRY(audit_pipe_entry) ape_queue; }; /* @@ -90,28 +90,28 @@ struct audit_pipe_entry { * usage patterns for per-auid specifications are clear. */ struct audit_pipe_preselect { - au_id_t app_auid; - au_mask_t app_mask; - TAILQ_ENTRY(audit_pipe_preselect) app_list; + au_id_t app_auid; + au_mask_t app_mask; + TAILQ_ENTRY(audit_pipe_preselect) app_list; }; /* * Description of an individual audit_pipe. Consists largely of a bounded * length queue. */ -#define AUDIT_PIPE_ASYNC 0x00000001 -#define AUDIT_PIPE_NBIO 0x00000002 +#define AUDIT_PIPE_ASYNC 0x00000001 +#define AUDIT_PIPE_NBIO 0x00000002 struct audit_pipe { - int ap_open; /* Device open? */ - u_int ap_flags; + int ap_open; /* Device open? */ + u_int ap_flags; - struct selinfo ap_selinfo; - pid_t ap_sigio; + struct selinfo ap_selinfo; + pid_t ap_sigio; /* * Per-pipe mutex protecting most fields in this data structure. */ - struct mtx ap_mtx; + struct mtx ap_mtx; /* * Per-pipe sleep lock serializing user-generated reads and flushes. @@ -119,13 +119,13 @@ struct audit_pipe { * while the record remains in the queue, so we prevent other threads * from removing it using this lock. */ - struct slck ap_sx; + struct slck ap_sx; /* * Condition variable to signal when data has been delivered to a * pipe. */ - struct cv ap_cv; + struct cv ap_cv; /* * Various queue-related variables: qlen and qlimit are a count of @@ -134,27 +134,27 @@ struct audit_pipe { * first record in the queue. The number of bytes available for * reading in the queue is qbyteslen - qoffset. */ - u_int ap_qlen; - u_int ap_qlimit; - u_int ap_qbyteslen; - u_int ap_qoffset; + u_int ap_qlen; + u_int ap_qlimit; + u_int ap_qbyteslen; + u_int ap_qoffset; /* * Per-pipe operation statistics. */ - u_int64_t ap_inserts; /* Records added. */ - u_int64_t ap_reads; /* Records read. */ - u_int64_t ap_drops; /* Records dropped. */ + u_int64_t ap_inserts; /* Records added. */ + u_int64_t ap_reads; /* Records read. */ + u_int64_t ap_drops; /* Records dropped. */ /* * Fields relating to pipe interest: global masks for unmatched * processes (attributable, non-attributable), and a list of specific * interest specifications by auid. */ - int ap_preselect_mode; - au_mask_t ap_preselect_flags; - au_mask_t ap_preselect_naflags; - TAILQ_HEAD(, audit_pipe_preselect) ap_preselect_list; + int ap_preselect_mode; + au_mask_t ap_preselect_flags; + au_mask_t ap_preselect_naflags; + TAILQ_HEAD(, audit_pipe_preselect) ap_preselect_list; /* * Current pending record list. Protected by a combination of ap_mtx @@ -162,27 +162,27 @@ struct audit_pipe { * remove a record from the head of the queue, as an in-progress read * may sleep while copying and therefore cannot hold ap_mtx. */ - TAILQ_HEAD(, audit_pipe_entry) ap_queue; + TAILQ_HEAD(, audit_pipe_entry) ap_queue; /* * Global pipe list. */ - TAILQ_ENTRY(audit_pipe) ap_list; + TAILQ_ENTRY(audit_pipe) ap_list; }; -#define AUDIT_PIPE_LOCK(ap) mtx_lock(&(ap)->ap_mtx) -#define AUDIT_PIPE_LOCK_ASSERT(ap) mtx_assert(&(ap)->ap_mtx, MA_OWNED) -#define AUDIT_PIPE_LOCK_DESTROY(ap) mtx_destroy(&(ap)->ap_mtx) -#define AUDIT_PIPE_LOCK_INIT(ap) mtx_init(&(ap)->ap_mtx, \ - "audit_pipe_mtx", NULL, MTX_DEF) -#define AUDIT_PIPE_UNLOCK(ap) mtx_unlock(&(ap)->ap_mtx) -#define AUDIT_PIPE_MTX(ap) (&(ap)->ap_mtx) +#define AUDIT_PIPE_LOCK(ap) mtx_lock(&(ap)->ap_mtx) +#define AUDIT_PIPE_LOCK_ASSERT(ap) mtx_assert(&(ap)->ap_mtx, MA_OWNED) +#define AUDIT_PIPE_LOCK_DESTROY(ap) mtx_destroy(&(ap)->ap_mtx) +#define AUDIT_PIPE_LOCK_INIT(ap) mtx_init(&(ap)->ap_mtx, \ + "audit_pipe_mtx", NULL, MTX_DEF) +#define AUDIT_PIPE_UNLOCK(ap) mtx_unlock(&(ap)->ap_mtx) +#define AUDIT_PIPE_MTX(ap) (&(ap)->ap_mtx) -#define AUDIT_PIPE_SX_LOCK_DESTROY(ap) slck_destroy(&(ap)->ap_sx) -#define AUDIT_PIPE_SX_LOCK_INIT(ap) slck_init(&(ap)->ap_sx, "audit_pipe_sx") -#define AUDIT_PIPE_SX_XLOCK_ASSERT(ap) slck_assert(&(ap)->ap_sx, SA_XLOCKED) -#define AUDIT_PIPE_SX_XLOCK_SIG(ap) slck_lock_sig(&(ap)->ap_sx) -#define AUDIT_PIPE_SX_XUNLOCK(ap) slck_unlock(&(ap)->ap_sx) +#define AUDIT_PIPE_SX_LOCK_DESTROY(ap) slck_destroy(&(ap)->ap_sx) +#define AUDIT_PIPE_SX_LOCK_INIT(ap) slck_init(&(ap)->ap_sx, "audit_pipe_sx") +#define AUDIT_PIPE_SX_XLOCK_ASSERT(ap) slck_assert(&(ap)->ap_sx, SA_XLOCKED) +#define AUDIT_PIPE_SX_XLOCK_SIG(ap) slck_lock_sig(&(ap)->ap_sx) +#define AUDIT_PIPE_SX_XUNLOCK(ap) slck_unlock(&(ap)->ap_sx) /* @@ -191,23 +191,23 @@ struct audit_pipe { * between threads walking the list to deliver to individual pipes and add/ * remove of pipes, and are mostly acquired for read. */ -static TAILQ_HEAD(, audit_pipe) audit_pipe_list; -static struct rwlock audit_pipe_lock; - -#define AUDIT_PIPE_LIST_LOCK_INIT() rw_init(&audit_pipe_lock, \ - "audit_pipe_list_lock") -#define AUDIT_PIPE_LIST_RLOCK() rw_rlock(&audit_pipe_lock) -#define AUDIT_PIPE_LIST_RUNLOCK() rw_runlock(&audit_pipe_lock) -#define AUDIT_PIPE_LIST_WLOCK() rw_wlock(&audit_pipe_lock) -#define AUDIT_PIPE_LIST_WLOCK_ASSERT() rw_assert(&audit_pipe_lock, \ - RA_WLOCKED) -#define AUDIT_PIPE_LIST_WUNLOCK() rw_wunlock(&audit_pipe_lock) +static TAILQ_HEAD(, audit_pipe) audit_pipe_list; +static struct rwlock audit_pipe_lock; + +#define AUDIT_PIPE_LIST_LOCK_INIT() rw_init(&audit_pipe_lock, \ + "audit_pipe_list_lock") +#define AUDIT_PIPE_LIST_RLOCK() rw_rlock(&audit_pipe_lock) +#define AUDIT_PIPE_LIST_RUNLOCK() rw_runlock(&audit_pipe_lock) +#define AUDIT_PIPE_LIST_WLOCK() rw_wlock(&audit_pipe_lock) +#define AUDIT_PIPE_LIST_WLOCK_ASSERT() rw_assert(&audit_pipe_lock, \ + RA_WLOCKED) +#define AUDIT_PIPE_LIST_WUNLOCK() rw_wunlock(&audit_pipe_lock) /* * Cloning related variables and constants. */ -#define AUDIT_PIPE_NAME "auditpipe" -#define MAX_AUDIT_PIPES 32 +#define AUDIT_PIPE_NAME "auditpipe" +#define MAX_AUDIT_PIPES 32 static int audit_pipe_major; /* @@ -216,17 +216,17 @@ static int audit_pipe_major; * * XXX We may want to dynamically grow this as needed. */ -static struct audit_pipe *audit_pipe_dtab[MAX_AUDIT_PIPES]; +static struct audit_pipe *audit_pipe_dtab[MAX_AUDIT_PIPES]; /* * Special device methods and definition. */ -static open_close_fcn_t audit_pipe_open; -static open_close_fcn_t audit_pipe_close; -static read_write_fcn_t audit_pipe_read; -static ioctl_fcn_t audit_pipe_ioctl; -static select_fcn_t audit_pipe_poll; +static open_close_fcn_t audit_pipe_open; +static open_close_fcn_t audit_pipe_close; +static read_write_fcn_t audit_pipe_read; +static ioctl_fcn_t audit_pipe_ioctl; +static select_fcn_t audit_pipe_poll; static struct cdevsw audit_pipe_cdevsw = { .d_open = audit_pipe_open, @@ -246,10 +246,10 @@ static struct cdevsw audit_pipe_cdevsw = { /* * Some global statistics on audit pipes. */ -static int audit_pipe_count; /* Current number of pipes. */ -static u_int64_t audit_pipe_ever; /* Pipes ever allocated. */ -static u_int64_t audit_pipe_records; /* Records seen. */ -static u_int64_t audit_pipe_drops; /* Global record drop count. */ +static int audit_pipe_count; /* Current number of pipes. */ +static u_int64_t audit_pipe_ever; /* Pipes ever allocated. */ +static u_int64_t audit_pipe_records; /* Records seen. */ +static u_int64_t audit_pipe_drops; /* Global record drop count. */ /* * Free an audit pipe entry. @@ -257,7 +257,6 @@ static u_int64_t audit_pipe_drops; /* Global record drop count. */ static void audit_pipe_entry_free(struct audit_pipe_entry *ape) { - free(ape->ape_record, M_AUDIT_PIPE_ENTRY); free(ape, M_AUDIT_PIPE_ENTRY); } @@ -273,10 +272,11 @@ audit_pipe_preselect_find(struct audit_pipe *ap, au_id_t auid) AUDIT_PIPE_LOCK_ASSERT(ap); TAILQ_FOREACH(app, &ap->ap_preselect_list, app_list) { - if (app->app_auid == auid) - return (app); + if (app->app_auid == auid) { + return app; + } } - return (NULL); + return NULL; } /* @@ -294,10 +294,11 @@ audit_pipe_preselect_get(struct audit_pipe *ap, au_id_t auid, if (app != NULL) { *maskp = app->app_mask; error = 0; - } else + } else { error = ENOENT; + } AUDIT_PIPE_UNLOCK(ap); - return (error); + return error; } /* @@ -324,8 +325,9 @@ audit_pipe_preselect_set(struct audit_pipe *ap, au_id_t auid, au_mask_t mask) } app->app_mask = mask; AUDIT_PIPE_UNLOCK(ap); - if (app_new != NULL) + if (app_new != NULL) { free(app_new, M_AUDIT_PIPE_PRESELECT); + } } /* @@ -342,12 +344,14 @@ audit_pipe_preselect_delete(struct audit_pipe *ap, au_id_t auid) if (app != NULL) { TAILQ_REMOVE(&ap->ap_preselect_list, app, app_list); error = 0; - } else + } else { error = ENOENT; + } AUDIT_PIPE_UNLOCK(ap); - if (app != NULL) + if (app != NULL) { free(app, M_AUDIT_PIPE_PRESELECT); - return (error); + } + return error; } /* @@ -369,7 +373,6 @@ audit_pipe_preselect_flush_locked(struct audit_pipe *ap) static void audit_pipe_preselect_flush(struct audit_pipe *ap) { - AUDIT_PIPE_LOCK(ap); audit_pipe_preselect_flush_locked(ap); AUDIT_PIPE_UNLOCK(ap); @@ -395,27 +398,29 @@ audit_pipe_preselect_check(struct audit_pipe *ap, au_id_t auid, switch (ap->ap_preselect_mode) { case AUDITPIPE_PRESELECT_MODE_TRAIL: - return (trail_preselect); + return trail_preselect; case AUDITPIPE_PRESELECT_MODE_LOCAL: app = audit_pipe_preselect_find(ap, auid); if (app == NULL) { - if (auid == (uid_t)AU_DEFAUDITID) - return (au_preselect(event, class, - &ap->ap_preselect_naflags, sorf)); - else - return (au_preselect(event, class, - &ap->ap_preselect_flags, sorf)); - } else - return (au_preselect(event, class, &app->app_mask, - sorf)); + if (auid == (uid_t)AU_DEFAUDITID) { + return au_preselect(event, class, + &ap->ap_preselect_naflags, sorf); + } else { + return au_preselect(event, class, + &ap->ap_preselect_flags, sorf); + } + } else { + return au_preselect(event, class, &app->app_mask, + sorf); + } default: panic("audit_pipe_preselect_check: mode %d", ap->ap_preselect_mode); } - return (0); + return 0; } /* @@ -429,22 +434,23 @@ audit_pipe_preselect(au_id_t auid, au_event_t event, au_class_t class, struct audit_pipe *ap; /* Lockless read to avoid acquiring the global lock if not needed. */ - if (TAILQ_EMPTY(&audit_pipe_list)) - return (0); + if (TAILQ_EMPTY(&audit_pipe_list)) { + return 0; + } AUDIT_PIPE_LIST_RLOCK(); TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) { - AUDIT_PIPE_LOCK(ap); + AUDIT_PIPE_LOCK(ap); if (audit_pipe_preselect_check(ap, auid, event, class, sorf, trail_preselect)) { AUDIT_PIPE_UNLOCK(ap); AUDIT_PIPE_LIST_RUNLOCK(); - return (1); + return 1; } AUDIT_PIPE_UNLOCK(ap); } AUDIT_PIPE_LIST_RUNLOCK(); - return (0); + return 0; } /* @@ -488,13 +494,15 @@ audit_pipe_append(struct audit_pipe *ap, void *record, u_int record_len) ap->ap_qlen++; ap->ap_qbyteslen += ape->ape_record_len; selwakeup(&ap->ap_selinfo); - if (ap->ap_flags & AUDIT_PIPE_ASYNC) + if (ap->ap_flags & AUDIT_PIPE_ASYNC) { pgsigio(ap->ap_sigio, SIGIO); + } #if 0 /* XXX - fix select */ selwakeuppri(&ap->ap_selinfo, PSOCK); KNOTE_LOCKED(&ap->ap_selinfo.si_note, 0); - if (ap->ap_flags & AUDIT_PIPE_ASYNC) + if (ap->ap_flags & AUDIT_PIPE_ASYNC) { pgsigio(&ap->ap_sigio, SIGIO, 0); + } #endif cv_broadcast(&ap->ap_cv); } @@ -512,15 +520,17 @@ audit_pipe_submit(au_id_t auid, au_event_t event, au_class_t class, int sorf, /* * Lockless read to avoid lock overhead if pipes are not in use. */ - if (TAILQ_FIRST(&audit_pipe_list) == NULL) + if (TAILQ_FIRST(&audit_pipe_list) == NULL) { return; + } AUDIT_PIPE_LIST_RLOCK(); TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) { AUDIT_PIPE_LOCK(ap); if (audit_pipe_preselect_check(ap, auid, event, class, sorf, - trail_select)) + trail_select)) { audit_pipe_append(ap, record, record_len); + } AUDIT_PIPE_UNLOCK(ap); } AUDIT_PIPE_LIST_RUNLOCK(); @@ -545,8 +555,9 @@ audit_pipe_submit_user(void *record, u_int record_len) /* * Lockless read to avoid lock overhead if pipes are not in use. */ - if (TAILQ_FIRST(&audit_pipe_list) == NULL) + if (TAILQ_FIRST(&audit_pipe_list) == NULL) { return; + } AUDIT_PIPE_LIST_RLOCK(); TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) { @@ -572,8 +583,9 @@ audit_pipe_alloc(void) AUDIT_PIPE_LIST_WLOCK_ASSERT(); ap = malloc(sizeof(*ap), M_AUDIT_PIPE, M_WAITOK | M_ZERO); - if (ap == NULL) - return (NULL); + if (ap == NULL) { + return NULL; + } ap->ap_qlimit = AUDIT_PIPE_QLIMIT_DEFAULT; TAILQ_INIT(&ap->ap_queue); @@ -604,7 +616,7 @@ audit_pipe_alloc(void) audit_pipe_count++; audit_pipe_ever++; - return (ap); + return ap; } /* @@ -638,7 +650,6 @@ audit_pipe_flush(struct audit_pipe *ap) static void audit_pipe_free(struct audit_pipe *ap) { - AUDIT_PIPE_LIST_WLOCK_ASSERT(); AUDIT_PIPE_LOCK_ASSERT(ap); @@ -666,19 +677,21 @@ audit_pipe_clone(__unused dev_t dev, int action) int i; if (action == DEVFS_CLONE_ALLOC) { - for(i = 0; i < MAX_AUDIT_PIPES; i++) - if (audit_pipe_dtab[i] == NULL) - return (i); + for (i = 0; i < MAX_AUDIT_PIPES; i++) { + if (audit_pipe_dtab[i] == NULL) { + return i; + } + } /* * XXX Should really return -1 here but that seems to hang * things in devfs. Instead return 0 and let _open() tell * userland the bad news. */ - return (0); + return 0; } - return (-1); + return -1; } /* @@ -687,15 +700,16 @@ audit_pipe_clone(__unused dev_t dev, int action) * review access. Those file permissions should be managed carefully. */ static int -audit_pipe_open(dev_t dev, __unused int flags, __unused int devtype, +audit_pipe_open(dev_t dev, __unused int flags, __unused int devtype, __unused proc_t p) { struct audit_pipe *ap; int u; u = minor(dev); - if (u < 0 || u >= MAX_AUDIT_PIPES) - return (ENXIO); + if (u < 0 || u >= MAX_AUDIT_PIPES) { + return ENXIO; + } AUDIT_PIPE_LIST_WLOCK(); ap = audit_pipe_dtab[u]; @@ -703,20 +717,20 @@ audit_pipe_open(dev_t dev, __unused int flags, __unused int devtype, ap = audit_pipe_alloc(); if (ap == NULL) { AUDIT_PIPE_LIST_WUNLOCK(); - return (ENOMEM); + return ENOMEM; } audit_pipe_dtab[u] = ap; } else { KASSERT(ap->ap_open, ("audit_pipe_open: ap && !ap_open")); AUDIT_PIPE_LIST_WUNLOCK(); - return (EBUSY); + return EBUSY; } ap->ap_open = 1; AUDIT_PIPE_LIST_WUNLOCK(); #ifndef __APPLE__ fsetown(td->td_proc->p_pid, &ap->ap_sigio); #endif - return (0); + return 0; } /* @@ -743,7 +757,7 @@ audit_pipe_close(dev_t dev, __unused int flags, __unused int devtype, audit_pipe_free(ap); audit_pipe_dtab[u] = NULL; AUDIT_PIPE_LIST_WUNLOCK(); - return (0); + return 0; } /* @@ -771,10 +785,11 @@ audit_pipe_ioctl(dev_t dev, u_long cmd, caddr_t data, switch (cmd) { case FIONBIO: AUDIT_PIPE_LOCK(ap); - if (*(int *)data) + if (*(int *)data) { ap->ap_flags |= AUDIT_PIPE_NBIO; - else + } else { ap->ap_flags &= ~AUDIT_PIPE_NBIO; + } AUDIT_PIPE_UNLOCK(ap); error = 0; break; @@ -788,10 +803,11 @@ audit_pipe_ioctl(dev_t dev, u_long cmd, caddr_t data, case FIOASYNC: AUDIT_PIPE_LOCK(ap); - if (*(int *)data) + if (*(int *)data) { ap->ap_flags |= AUDIT_PIPE_ASYNC; - else + } else { ap->ap_flags &= ~AUDIT_PIPE_ASYNC; + } AUDIT_PIPE_UNLOCK(ap); error = 0; break; @@ -823,8 +839,9 @@ audit_pipe_ioctl(dev_t dev, u_long cmd, caddr_t data, *(u_int *)data <= AUDIT_PIPE_QLIMIT_MAX) { ap->ap_qlimit = *(u_int *)data; error = 0; - } else + } else { error = EINVAL; + } break; case AUDITPIPE_GET_QLIMIT_MIN: @@ -917,8 +934,9 @@ audit_pipe_ioctl(dev_t dev, u_long cmd, caddr_t data, break; case AUDITPIPE_FLUSH: - if (AUDIT_PIPE_SX_XLOCK_SIG(ap) != 0) - return (EINTR); + if (AUDIT_PIPE_SX_XLOCK_SIG(ap) != 0) { + return EINTR; + } AUDIT_PIPE_LOCK(ap); audit_pipe_flush(ap); AUDIT_PIPE_UNLOCK(ap); @@ -954,7 +972,7 @@ audit_pipe_ioctl(dev_t dev, u_long cmd, caddr_t data, default: error = ENOTTY; } - return (error); + return error; } /* @@ -976,20 +994,21 @@ audit_pipe_read(dev_t dev, struct uio *uio, __unused int flag) * We hold an sleep lock over read and flush because we rely on the * stability of a record in the queue during uiomove(9). */ - if (AUDIT_PIPE_SX_XLOCK_SIG(ap) != 0) - return (EINTR); + if (AUDIT_PIPE_SX_XLOCK_SIG(ap) != 0) { + return EINTR; + } AUDIT_PIPE_LOCK(ap); while (TAILQ_EMPTY(&ap->ap_queue)) { if (ap->ap_flags & AUDIT_PIPE_NBIO) { AUDIT_PIPE_UNLOCK(ap); AUDIT_PIPE_SX_XUNLOCK(ap); - return (EAGAIN); + return EAGAIN; } error = cv_wait_sig(&ap->ap_cv, AUDIT_PIPE_MTX(ap)); if (error) { AUDIT_PIPE_UNLOCK(ap); AUDIT_PIPE_SX_XUNLOCK(ap); - return (error); + return error; } } @@ -1014,7 +1033,7 @@ audit_pipe_read(dev_t dev, struct uio *uio, __unused int flag) toread, uio); if (error) { AUDIT_PIPE_SX_XUNLOCK(ap); - return (error); + return error; } /* @@ -1037,7 +1056,7 @@ audit_pipe_read(dev_t dev, struct uio *uio, __unused int flag) } AUDIT_PIPE_UNLOCK(ap); AUDIT_PIPE_SX_XUNLOCK(ap); - return (0); + return 0; } /* @@ -1055,13 +1074,14 @@ audit_pipe_poll(dev_t dev, int events, void *wql, struct proc *p) if (events & (POLLIN | POLLRDNORM)) { AUDIT_PIPE_LOCK(ap); - if (TAILQ_FIRST(&ap->ap_queue) != NULL) + if (TAILQ_FIRST(&ap->ap_queue) != NULL) { revents |= events & (POLLIN | POLLRDNORM); - else + } else { selrecord(p, &ap->ap_selinfo, wql); + } AUDIT_PIPE_UNLOCK(ap); } - return (revents); + return revents; } #ifndef __APPLE__ @@ -1079,10 +1099,10 @@ audit_pipe_kqread(struct knote *kn, long hint) if (ap->ap_qlen != 0) { kn->kn_data = ap->ap_qbyteslen - ap->ap_qoffset; - return (1); + return 1; } else { kn->kn_data = 0; - return (0); + return 0; } } @@ -1114,28 +1134,29 @@ audit_pipe_init(void) AUDIT_PIPE_LIST_LOCK_INIT(); audit_pipe_major = cdevsw_add(-1, &audit_pipe_cdevsw); - if (audit_pipe_major < 0) - return (KERN_FAILURE); + if (audit_pipe_major < 0) { + return KERN_FAILURE; + } dev = makedev(audit_pipe_major, 0); devnode = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600, audit_pipe_clone, "auditpipe", 0); - if (devnode == NULL) - return (KERN_FAILURE); + if (devnode == NULL) { + return KERN_FAILURE; + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } int audit_pipe_shutdown(void) { - /* unwind everything */ devfs_remove(devnode); (void) cdevsw_remove(audit_pipe_major, &audit_pipe_cdevsw); - return (KERN_SUCCESS); + return KERN_SUCCESS; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_private.h b/bsd/security/audit/audit_private.h index 8b58b79c1..4da799daa 100644 --- a/bsd/security/audit/audit_private.h +++ b/bsd/security/audit/audit_private.h @@ -39,7 +39,7 @@ */ #ifndef _SECURITY_AUDIT_PRIVATE_H_ -#define _SECURITY_AUDIT_PRIVATE_H_ +#define _SECURITY_AUDIT_PRIVATE_H_ #if defined(_KERNEL) || defined(KERNEL) @@ -49,7 +49,7 @@ #endif #include -#include /* for PID_MAX */ +#include /* for PID_MAX */ #include #include @@ -64,60 +64,60 @@ MALLOC_DECLARE(M_AUDITTEXT); * Audit control variables that are usually set/read via system calls and * used to control various aspects of auditing. */ -extern struct au_qctrl audit_qctrl; -extern struct audit_fstat audit_fstat; -extern struct au_mask audit_nae_mask; -extern int audit_panic_on_write_fail; -extern int audit_fail_stop; -extern int audit_argv; -extern int audit_arge; -extern au_ctlmode_t audit_ctl_mode; -extern au_expire_after_t audit_expire_after; +extern struct au_qctrl audit_qctrl; +extern struct audit_fstat audit_fstat; +extern struct au_mask audit_nae_mask; +extern int audit_panic_on_write_fail; +extern int audit_fail_stop; +extern int audit_argv; +extern int audit_arge; +extern au_ctlmode_t audit_ctl_mode; +extern au_expire_after_t audit_expire_after; /* * Kernel mask that is used to check to see if system calls need to be audited. */ -extern au_class_t audit_kevent_mask; +extern au_class_t audit_kevent_mask; /* - * The macro used to check to see if the system calls need to be auditing. + * The macro used to check to see if the system calls need to be auditing. * This will pessimisticly set the audit syscalls flag if the audit kevent * mask has not been created yet. User code should build the event/class * mapping table before setting preselection masks to avoid this. */ -#define AUDIT_CHECK_IF_KEVENTS_MASK(m) do { \ - if ((m).am_success || (m).am_failure) \ - if (!audit_kevent_mask || \ - (audit_kevent_mask & (m).am_success) || \ - (audit_kevent_mask & (m).am_failure)) \ - audit_syscalls = 1; \ +#define AUDIT_CHECK_IF_KEVENTS_MASK(m) do { \ + if ((m).am_success || (m).am_failure) \ + if (!audit_kevent_mask || \ + (audit_kevent_mask & (m).am_success) || \ + (audit_kevent_mask & (m).am_failure)) \ + audit_syscalls = 1; \ } while (0) /* * Success/failure conditions for the conversion of a kernel audit record to * BSM format. */ -#define BSM_SUCCESS 0 -#define BSM_FAILURE 1 -#define BSM_NOAUDIT 2 +#define BSM_SUCCESS 0 +#define BSM_FAILURE 1 +#define BSM_NOAUDIT 2 /* * Defines for the kernel audit record k_ar_commit field. Flags are set to * indicate what sort of record it is, and which preselection mechanism * selected it. */ -#define AR_COMMIT_KERNEL 0x00000001U -#define AR_COMMIT_USER 0x00000010U +#define AR_COMMIT_KERNEL 0x00000001U +#define AR_COMMIT_USER 0x00000010U -#define AR_PRESELECT_TRAIL 0x00001000U -#define AR_PRESELECT_PIPE 0x00002000U +#define AR_PRESELECT_TRAIL 0x00001000U +#define AR_PRESELECT_PIPE 0x00002000U -#define AR_PRESELECT_USER_TRAIL 0x00004000U -#define AR_PRESELECT_USER_PIPE 0x00008000U +#define AR_PRESELECT_USER_TRAIL 0x00004000U +#define AR_PRESELECT_USER_PIPE 0x00008000U -#define AR_PRESELECT_FILTER 0x00010000U +#define AR_PRESELECT_FILTER 0x00010000U -#define AR_DRAIN_QUEUE 0x80000000U +#define AR_DRAIN_QUEUE 0x80000000U /* * Audit data is generated as a stream of struct audit_record structures, @@ -127,179 +127,179 @@ extern au_class_t audit_kevent_mask; * This structure is converted to BSM format before being written to disk. */ struct vnode_au_info { - mode_t vn_mode; - uid_t vn_uid; - gid_t vn_gid; - dev_t vn_dev; - long vn_fsid; - long vn_fileid; - long vn_gen; + mode_t vn_mode; + uid_t vn_uid; + gid_t vn_gid; + dev_t vn_dev; + long vn_fsid; + long vn_fileid; + long vn_gen; }; struct groupset { - gid_t gidset[NGROUPS]; - u_int gidset_size; + gid_t gidset[NGROUPS]; + u_int gidset_size; }; struct socket_au_info { - int sai_domain; - int sai_type; - int sai_protocol; + int sai_domain; + int sai_type; + int sai_protocol; /* Foreign (remote) address/port. */ - struct sockaddr_storage sai_faddr; + struct sockaddr_storage sai_faddr; /* Local address/port. */ - struct sockaddr_storage sai_laddr; + struct sockaddr_storage sai_laddr; }; /* * The following is used for A_OLDSETQCTRL and A_OLDGETQCTRL and a 64-bit * userland. */ -struct au_qctrl64 { - u_int64_t aq64_hiwater; - u_int64_t aq64_lowater; - u_int64_t aq64_bufsz; - u_int64_t aq64_delay; - int64_t aq64_minfree; +struct au_qctrl64 { + u_int64_t aq64_hiwater; + u_int64_t aq64_lowater; + u_int64_t aq64_bufsz; + u_int64_t aq64_delay; + int64_t aq64_minfree; }; -typedef struct au_qctrl64 au_qctrl64_t; +typedef struct au_qctrl64 au_qctrl64_t; union auditon_udata { - char *au_path; - int au_cond; - int au_policy; - int64_t au_cond64; - int64_t au_policy64; - int au_trigger; - au_evclass_map_t au_evclass; - au_mask_t au_mask; - au_asflgs_t au_flags; - auditinfo_t au_auinfo; - auditpinfo_t au_aupinfo; - auditpinfo_addr_t au_aupinfo_addr; - au_qctrl_t au_qctrl; - au_qctrl64_t au_qctrl64; - au_stat_t au_stat; - au_fstat_t au_fstat; - auditinfo_addr_t au_kau_info; - au_ctlmode_t au_ctl_mode; - au_expire_after_t au_expire_after; + char *au_path; + int au_cond; + int au_policy; + int64_t au_cond64; + int64_t au_policy64; + int au_trigger; + au_evclass_map_t au_evclass; + au_mask_t au_mask; + au_asflgs_t au_flags; + auditinfo_t au_auinfo; + auditpinfo_t au_aupinfo; + auditpinfo_addr_t au_aupinfo_addr; + au_qctrl_t au_qctrl; + au_qctrl64_t au_qctrl64; + au_stat_t au_stat; + au_fstat_t au_fstat; + auditinfo_addr_t au_kau_info; + au_ctlmode_t au_ctl_mode; + au_expire_after_t au_expire_after; }; struct posix_ipc_perm { - uid_t pipc_uid; - gid_t pipc_gid; - mode_t pipc_mode; + uid_t pipc_uid; + gid_t pipc_gid; + mode_t pipc_mode; }; struct au_identity_info { - u_int32_t signer_type; - char *signing_id; - u_char signing_id_trunc; - char *team_id; - u_char team_id_trunc; - u_int8_t *cdhash; - u_int16_t cdhash_len; + u_int32_t signer_type; + char *signing_id; + u_char signing_id_trunc; + char *team_id; + u_char team_id_trunc; + u_int8_t *cdhash; + u_int16_t cdhash_len; }; struct audit_record { /* Audit record header. */ - u_int32_t ar_magic; - int ar_event; - int ar_retval; /* value returned to the process */ - int ar_errno; /* return status of system call */ - struct timespec ar_starttime; - struct timespec ar_endtime; - u_int64_t ar_valid_arg; /* Bitmask of valid arguments */ + u_int32_t ar_magic; + int ar_event; + int ar_retval; /* value returned to the process */ + int ar_errno; /* return status of system call */ + struct timespec ar_starttime; + struct timespec ar_endtime; + u_int64_t ar_valid_arg; /* Bitmask of valid arguments */ /* Audit subject information. */ - struct xucred ar_subj_cred; - uid_t ar_subj_ruid; - gid_t ar_subj_rgid; - gid_t ar_subj_egid; - uid_t ar_subj_auid; /* Audit user ID */ - pid_t ar_subj_asid; /* Audit session ID */ - pid_t ar_subj_pid; - struct au_tid ar_subj_term; - struct au_tid_addr ar_subj_term_addr; - struct au_mask ar_subj_amask; + struct xucred ar_subj_cred; + uid_t ar_subj_ruid; + gid_t ar_subj_rgid; + gid_t ar_subj_egid; + uid_t ar_subj_auid; /* Audit user ID */ + pid_t ar_subj_asid; /* Audit session ID */ + pid_t ar_subj_pid; + struct au_tid ar_subj_term; + struct au_tid_addr ar_subj_term_addr; + struct au_mask ar_subj_amask; /* Operation arguments. */ - uid_t ar_arg_euid; - uid_t ar_arg_ruid; - uid_t ar_arg_suid; - gid_t ar_arg_egid; - gid_t ar_arg_rgid; - gid_t ar_arg_sgid; - pid_t ar_arg_pid; - pid_t ar_arg_asid; - struct au_tid ar_arg_termid; - struct au_tid_addr ar_arg_termid_addr; - uid_t ar_arg_uid; - uid_t ar_arg_auid; - gid_t ar_arg_gid; - struct groupset ar_arg_groups; - int ar_arg_fd; - int ar_arg_fflags; - mode_t ar_arg_mode; - uint32_t ar_arg_value32; - uint64_t ar_arg_value64; - user_addr_t ar_arg_addr; - user_size_t ar_arg_len; - int ar_arg_mask; - u_int ar_arg_signum; - char ar_arg_login[MAXLOGNAME]; - int ar_arg_ctlname[CTL_MAXNAME]; - struct socket_au_info ar_arg_sockinfo; - char *ar_arg_upath1; - char *ar_arg_upath2; - char *ar_arg_kpath1; /* darwin-only */ - char *ar_arg_kpath2; /* darwin-only */ + uid_t ar_arg_euid; + uid_t ar_arg_ruid; + uid_t ar_arg_suid; + gid_t ar_arg_egid; + gid_t ar_arg_rgid; + gid_t ar_arg_sgid; + pid_t ar_arg_pid; + pid_t ar_arg_asid; + struct au_tid ar_arg_termid; + struct au_tid_addr ar_arg_termid_addr; + uid_t ar_arg_uid; + uid_t ar_arg_auid; + gid_t ar_arg_gid; + struct groupset ar_arg_groups; + int ar_arg_fd; + int ar_arg_fflags; + mode_t ar_arg_mode; + uint32_t ar_arg_value32; + uint64_t ar_arg_value64; + user_addr_t ar_arg_addr; + user_size_t ar_arg_len; + int ar_arg_mask; + u_int ar_arg_signum; + char ar_arg_login[MAXLOGNAME]; + int ar_arg_ctlname[CTL_MAXNAME]; + struct socket_au_info ar_arg_sockinfo; + char *ar_arg_upath1; + char *ar_arg_upath2; + char *ar_arg_kpath1; /* darwin-only */ + char *ar_arg_kpath2; /* darwin-only */ #if CONFIG_MACF - char *ar_vnode1_mac_labels; - char *ar_vnode2_mac_labels; - char *ar_cred_mac_labels; - char *ar_arg_mac_string; + char *ar_vnode1_mac_labels; + char *ar_vnode2_mac_labels; + char *ar_cred_mac_labels; + char *ar_arg_mac_string; #endif - char *ar_arg_text; - void *ar_arg_opaque; /* darwin-only */ - void *ar_arg_data; /* darwin-only */ - u_int16_t ar_arg_opq_size; /* darwin-only */ - u_char ar_arg_data_type; /* darwin-only */ - u_char ar_arg_data_count; /* darwin-only */ - struct au_mask ar_arg_amask; - struct vnode_au_info ar_arg_vnode1; - struct vnode_au_info ar_arg_vnode2; - int ar_arg_cmd; - int ar_arg_svipc_cmd; - struct ipc_perm ar_arg_svipc_perm; - int ar_arg_svipc_id; - user_addr_t ar_arg_svipc_addr; - struct posix_ipc_perm ar_arg_pipc_perm; - mach_port_name_t ar_arg_mach_port1; /* darwin-only */ - mach_port_name_t ar_arg_mach_port2; /* darwin-only */ - union auditon_udata ar_arg_auditon; - char *ar_arg_argv; - int ar_arg_argc; - char *ar_arg_envv; - int ar_arg_envc; - int ar_arg_exitstatus; - int ar_arg_exitretval; + char *ar_arg_text; + void *ar_arg_opaque; /* darwin-only */ + void *ar_arg_data; /* darwin-only */ + u_int16_t ar_arg_opq_size; /* darwin-only */ + u_char ar_arg_data_type; /* darwin-only */ + u_char ar_arg_data_count; /* darwin-only */ + struct au_mask ar_arg_amask; + struct vnode_au_info ar_arg_vnode1; + struct vnode_au_info ar_arg_vnode2; + int ar_arg_cmd; + int ar_arg_svipc_cmd; + struct ipc_perm ar_arg_svipc_perm; + int ar_arg_svipc_id; + user_addr_t ar_arg_svipc_addr; + struct posix_ipc_perm ar_arg_pipc_perm; + mach_port_name_t ar_arg_mach_port1; /* darwin-only */ + mach_port_name_t ar_arg_mach_port2; /* darwin-only */ + union auditon_udata ar_arg_auditon; + char *ar_arg_argv; + int ar_arg_argc; + char *ar_arg_envv; + int ar_arg_envc; + int ar_arg_exitstatus; + int ar_arg_exitretval; struct sockaddr_storage ar_arg_sockaddr; - int ar_arg_fd2; + int ar_arg_fd2; #if CONFIG_MACF /* * MAC security related fields added by MAC policies ar_forced_by_mac * is 1 if mac_audit_check_preselect() forced this call to be audited, * 0 otherwise. - */ - LIST_HEAD(mac_audit_record_list_t, mac_audit_record) *ar_mac_records; - int ar_forced_by_mac; + */ + LIST_HEAD(mac_audit_record_list_t, mac_audit_record) * ar_mac_records; + int ar_forced_by_mac; #endif - struct au_identity_info ar_arg_identity; + struct au_identity_info ar_arg_identity; }; /* @@ -307,16 +307,16 @@ struct audit_record { * indicate if they are present so they can be included in the audit log * stream only if defined. */ -#define ARG_IS_VALID(kar, arg) ((kar)->k_ar.ar_valid_arg & (arg)) -#define ARG_SET_VALID(kar, arg) do { \ - (kar)->k_ar.ar_valid_arg |= (arg); \ +#define ARG_IS_VALID(kar, arg) ((kar)->k_ar.ar_valid_arg & (arg)) +#define ARG_SET_VALID(kar, arg) do { \ + (kar)->k_ar.ar_valid_arg |= (arg); \ } while (0) /* * Current thread macro. get_bsdthread_info() returns a void ptr for some * reason. */ -#define curthread() ((struct uthread *)get_bsdthread_info(current_thread())) +#define curthread() ((struct uthread *)get_bsdthread_info(current_thread())) /* * In-kernel version of audit record; the basic record plus queue meta-data. @@ -324,12 +324,12 @@ struct audit_record { * passed through to the audit writing mechanism. */ struct kaudit_record { - struct audit_record k_ar; - u_int32_t k_ar_commit; - void *k_udata; /* User data. */ - u_int k_ulen; /* User data length. */ - struct uthread *k_uthread; /* Audited thread. */ - TAILQ_ENTRY(kaudit_record) k_q; + struct audit_record k_ar; + u_int32_t k_ar_commit; + void *k_udata; /* User data. */ + u_int k_ulen; /* User data length. */ + struct uthread *k_uthread; /* Audited thread. */ + TAILQ_ENTRY(kaudit_record) k_q; }; TAILQ_HEAD(kaudit_queue, kaudit_record); @@ -337,50 +337,50 @@ TAILQ_HEAD(kaudit_queue, kaudit_record); * Functions to manage the allocation, release, and commit of kernel audit * records. */ -void audit_abort(struct kaudit_record *ar); -void audit_commit(struct kaudit_record *ar, int error, - int retval); -struct kaudit_record *audit_new(int event, proc_t p, struct uthread *td); +void audit_abort(struct kaudit_record *ar); +void audit_commit(struct kaudit_record *ar, int error, + int retval); +struct kaudit_record *audit_new(int event, proc_t p, struct uthread *td); /* * Functions relating to the conversion of internal kernel audit records to * the BSM file format. */ struct au_record; -int kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau); -int bsm_rec_verify(void *rec, int length); +int kaudit_to_bsm(struct kaudit_record *kar, struct au_record **pau); +int bsm_rec_verify(void *rec, int length); /* * Kernel versions of the libbsm audit record functions. */ -void kau_free(struct au_record *rec); -void kau_init(void); +void kau_free(struct au_record *rec); +void kau_init(void); /* * Return values for pre-selection and post-selection decisions. */ -#define AU_PRS_SUCCESS 1 -#define AU_PRS_FAILURE 2 -#define AU_PRS_BOTH (AU_PRS_SUCCESS|AU_PRS_FAILURE) +#define AU_PRS_SUCCESS 1 +#define AU_PRS_FAILURE 2 +#define AU_PRS_BOTH (AU_PRS_SUCCESS|AU_PRS_FAILURE) /* * Data structures relating to the kernel audit queue. Ideally, these might * be abstracted so that only accessor methods are exposed. */ -extern struct mtx audit_mtx; -extern struct cv audit_watermark_cv; -extern struct cv audit_worker_cv; -extern struct cv audit_drain_cv; -extern struct kaudit_queue audit_q; -extern int audit_q_len; -extern int audit_pre_q_len; -extern int audit_in_failure; +extern struct mtx audit_mtx; +extern struct cv audit_watermark_cv; +extern struct cv audit_worker_cv; +extern struct cv audit_drain_cv; +extern struct kaudit_queue audit_q; +extern int audit_q_len; +extern int audit_pre_q_len; +extern int audit_in_failure; /* * Flags to use on audit files when opening and closing. */ -#define AUDIT_OPEN_FLAGS (FWRITE | O_APPEND) -#define AUDIT_CLOSE_FLAGS (FWRITE | O_APPEND) +#define AUDIT_OPEN_FLAGS (FWRITE | O_APPEND) +#define AUDIT_CLOSE_FLAGS (FWRITE | O_APPEND) #include #include @@ -391,92 +391,92 @@ extern int audit_in_failure; * kernel implementations in order to save the copying of large kernel data * structures. The prototypes of these functions are declared here. */ -token_t *kau_to_socket(struct socket_au_info *soi); +token_t *kau_to_socket(struct socket_au_info *soi); /* * audit_klib prototypes */ -int au_preselect(au_event_t event, au_class_t class, - au_mask_t *mask_p, int sorf); -void au_evclassmap_init(void); -void au_evclassmap_insert(au_event_t event, au_class_t class); -au_class_t au_event_class(au_event_t event); -au_event_t audit_ctlname_to_sysctlevent(int name[], uint64_t valid_arg); -au_event_t audit_flags_and_error_to_openevent(int oflags, int error); -au_event_t audit_flags_and_error_to_openextendedevent(int oflags, - int error); -au_event_t audit_flags_and_error_to_openatevent(int oflags, - int error); -au_event_t audit_flags_and_error_to_openbyidevent(int oflags, - int error); -au_event_t audit_msgctl_to_event(int cmd); -au_event_t audit_semctl_to_event(int cmr); -int audit_canon_path(struct vnode *cwd_vp, char *path, - char *cpath); -au_event_t auditon_command_event(int cmd); -au_event_t audit_fcntl_command_event(int cmd, int oflags, int error); +int au_preselect(au_event_t event, au_class_t class, + au_mask_t *mask_p, int sorf); +void au_evclassmap_init(void); +void au_evclassmap_insert(au_event_t event, au_class_t class); +au_class_t au_event_class(au_event_t event); +au_event_t audit_ctlname_to_sysctlevent(int name[], uint64_t valid_arg); +au_event_t audit_flags_and_error_to_openevent(int oflags, int error); +au_event_t audit_flags_and_error_to_openextendedevent(int oflags, + int error); +au_event_t audit_flags_and_error_to_openatevent(int oflags, + int error); +au_event_t audit_flags_and_error_to_openbyidevent(int oflags, + int error); +au_event_t audit_msgctl_to_event(int cmd); +au_event_t audit_semctl_to_event(int cmr); +int audit_canon_path(struct vnode *cwd_vp, char *path, + char *cpath); +au_event_t auditon_command_event(int cmd); +au_event_t audit_fcntl_command_event(int cmd, int oflags, int error); /* * Audit trigger events notify user space of kernel audit conditions * asynchronously. */ -int audit_send_trigger(unsigned int trigger); +int audit_send_trigger(unsigned int trigger); /* * Accessor functions to manage global audit state. */ -void audit_set_kinfo(struct auditinfo_addr *); -void audit_get_kinfo(struct auditinfo_addr *); +void audit_set_kinfo(struct auditinfo_addr *); +void audit_get_kinfo(struct auditinfo_addr *); /* * General audit related functions. */ -struct kaudit_record *currecord(void); -void audit_free(struct kaudit_record *ar); -void audit_rotate_vnode(struct ucred *cred, - struct vnode *vp); -void audit_worker_init(void); -void audit_identity_info_construct( - struct au_identity_info *id_info); -void audit_identity_info_destruct( - struct au_identity_info *id_info); +struct kaudit_record *currecord(void); +void audit_free(struct kaudit_record *ar); +void audit_rotate_vnode(struct ucred *cred, + struct vnode *vp); +void audit_worker_init(void); +void audit_identity_info_construct( + struct au_identity_info *id_info); +void audit_identity_info_destruct( + struct au_identity_info *id_info); /* * Audit pipe functions. */ -int audit_pipe_init(void); -int audit_pipe_shutdown(void); -int audit_pipe_preselect(au_id_t auid, au_event_t event, - au_class_t class, int sorf, int trail_select); -void audit_pipe_submit(au_id_t auid, au_event_t event, au_class_t class, - int sorf, int trail_select, void *record, u_int record_len); -void audit_pipe_submit_user(void *record, u_int record_len); +int audit_pipe_init(void); +int audit_pipe_shutdown(void); +int audit_pipe_preselect(au_id_t auid, au_event_t event, + au_class_t class, int sorf, int trail_select); +void audit_pipe_submit(au_id_t auid, au_event_t event, au_class_t class, + int sorf, int trail_select, void *record, u_int record_len); +void audit_pipe_submit_user(void *record, u_int record_len); /* * Audit MAC prototypes. */ -void audit_mac_init(void); -int audit_mac_new(proc_t p, struct kaudit_record *ar); -void audit_mac_free(struct kaudit_record *ar); -int audit_mac_syscall_enter(unsigned short code, proc_t p, - struct uthread *uthread, kauth_cred_t my_cred, au_event_t event); -int audit_mac_syscall_exit(unsigned short code, struct uthread *uthread, - int error, int retval); +void audit_mac_init(void); +int audit_mac_new(proc_t p, struct kaudit_record *ar); +void audit_mac_free(struct kaudit_record *ar); +int audit_mac_syscall_enter(unsigned short code, proc_t p, + struct uthread *uthread, kauth_cred_t my_cred, au_event_t event); +int audit_mac_syscall_exit(unsigned short code, struct uthread *uthread, + int error, int retval); /* * Audit Session. */ -void audit_session_init(void); -int audit_session_setaia(proc_t p, auditinfo_addr_t *aia_p); +void audit_session_init(void); +int audit_session_setaia(proc_t p, auditinfo_addr_t *aia_p); auditinfo_addr_t *audit_session_update(auditinfo_addr_t *new_aia); -int audit_session_lookup(au_asid_t asid, auditinfo_addr_t *ret_aia); +int audit_session_lookup(au_asid_t asid, auditinfo_addr_t *ret_aia); /* * Kernel assigned audit session IDs start at PID_MAX + 1 and ends at * ASSIGNED_ASID_MAX. */ -#define ASSIGNED_ASID_MIN (PID_MAX + 1) -#define ASSIGNED_ASID_MAX (0xFFFFFFFF - 1) +#define ASSIGNED_ASID_MIN (PID_MAX + 1) +#define ASSIGNED_ASID_MAX (0xFFFFFFFF - 1) /* * Entitlement required to control various audit subsystem settings diff --git a/bsd/security/audit/audit_session.c b/bsd/security/audit/audit_session.c index f7e3ac51c..d99b186fa 100644 --- a/bsd/security/audit/audit_session.c +++ b/bsd/security/audit/audit_session.c @@ -68,33 +68,33 @@ * needs to be the first entry. */ struct au_sentry { - auditinfo_addr_t se_auinfo; /* Public audit session data. */ -#define se_asid se_auinfo.ai_asid -#define se_auid se_auinfo.ai_auid -#define se_mask se_auinfo.ai_mask -#define se_termid se_auinfo.ai_termid -#define se_flags se_auinfo.ai_flags - - long se_refcnt; /* Reference count. */ - long se_procnt; /* Processes in session. */ - ipc_port_t se_port; /* Session port. */ - LIST_ENTRY(au_sentry) se_link; /* Hash bucket link list (1) */ + auditinfo_addr_t se_auinfo; /* Public audit session data. */ +#define se_asid se_auinfo.ai_asid +#define se_auid se_auinfo.ai_auid +#define se_mask se_auinfo.ai_mask +#define se_termid se_auinfo.ai_termid +#define se_flags se_auinfo.ai_flags + + long se_refcnt; /* Reference count. */ + long se_procnt; /* Processes in session. */ + ipc_port_t se_port; /* Session port. */ + LIST_ENTRY(au_sentry) se_link; /* Hash bucket link list (1) */ }; typedef struct au_sentry au_sentry_t; -#define AU_SENTRY_PTR(aia_p) ((au_sentry_t *)(aia_p)) +#define AU_SENTRY_PTR(aia_p) ((au_sentry_t *)(aia_p)) /* - * The default au_sentry/auditinfo_addr entry for ucred. + * The default au_sentry/auditinfo_addr entry for ucred. */ static au_sentry_t audit_default_se = { .se_auinfo = { - .ai_auid = AU_DEFAUDITID, - .ai_asid = AU_DEFAUDITSID, - .ai_termid = { .at_type = AU_IPv4, }, + .ai_auid = AU_DEFAUDITID, + .ai_asid = AU_DEFAUDITSID, + .ai_termid = { .at_type = AU_IPv4, }, }, - .se_refcnt = 1, + .se_refcnt = 1, .se_procnt = 1, }; @@ -110,10 +110,10 @@ void ipc_port_release_send(ipc_port_t); /* * Currently the hash table is a fixed size. */ -#define HASH_TABLE_SIZE 97 -#define HASH_ASID(asid) (audit_session_hash(asid) % HASH_TABLE_SIZE) +#define HASH_TABLE_SIZE 97 +#define HASH_ASID(asid) (audit_session_hash(asid) % HASH_TABLE_SIZE) -static struct rwlock se_entry_lck; /* (1) lock for se_link above */ +static struct rwlock se_entry_lck; /* (1) lock for se_link above */ LIST_HEAD(au_sentry_head, au_sentry); static struct au_sentry_head *au_sentry_bucket = NULL; @@ -132,24 +132,25 @@ typedef enum au_history_event { #define AU_HISTORY_MAX_STACK_DEPTH 8 struct au_history { - struct au_sentry *ptr; - struct au_sentry se; - void *stack[AU_HISTORY_MAX_STACK_DEPTH]; - unsigned int stack_depth; - au_history_event_t event; + struct au_sentry *ptr; + struct au_sentry se; + void *stack[AU_HISTORY_MAX_STACK_DEPTH]; + unsigned int stack_depth; + au_history_event_t event; }; static struct au_history *au_history; -static size_t au_history_size = 65536; -static unsigned int au_history_index; +static size_t au_history_size = 65536; +static unsigned int au_history_index; static inline unsigned int au_history_entries(void) { - if (au_history_index >= au_history_size) + if (au_history_index >= au_history_size) { return au_history_size; - else + } else { return au_history_index; + } } static inline void @@ -173,10 +174,10 @@ au_history_record(au_sentry_t *se, au_history_event_t event) MALLOC_DEFINE(M_AU_SESSION, "audit_session", "Audit session data"); -static void audit_ref_session(au_sentry_t *se); -static void audit_unref_session(au_sentry_t *se); +static void audit_ref_session(au_sentry_t *se); +static void audit_unref_session(au_sentry_t *se); -static void audit_session_event(int event, auditinfo_addr_t *aia_p); +static void audit_session_event(int event, auditinfo_addr_t *aia_p); /* * Audit session device. @@ -189,40 +190,40 @@ static MALLOC_DEFINE(M_AUDIT_SDEV_ENTRY, "audit_sdevent", /* * Default audit sdev buffer parameters. */ -#define AUDIT_SDEV_QLIMIT_DEFAULT 128 -#define AUDIT_SDEV_QLIMIT_MIN 1 -#define AUDIT_SDEV_QLIMIT_MAX 1024 +#define AUDIT_SDEV_QLIMIT_DEFAULT 128 +#define AUDIT_SDEV_QLIMIT_MIN 1 +#define AUDIT_SDEV_QLIMIT_MAX 1024 /* * Entry structure. */ -struct audit_sdev_entry { - void *ase_record; - u_int ase_record_len; - TAILQ_ENTRY(audit_sdev_entry) ase_queue; +struct audit_sdev_entry { + void *ase_record; + u_int ase_record_len; + TAILQ_ENTRY(audit_sdev_entry) ase_queue; }; /* - * Per audit sdev structure. + * Per audit sdev structure. */ struct audit_sdev { - int asdev_open; + int asdev_open; -#define AUDIT_SDEV_ASYNC 0x00000001 -#define AUDIT_SDEV_NBIO 0x00000002 +#define AUDIT_SDEV_ASYNC 0x00000001 +#define AUDIT_SDEV_NBIO 0x00000002 -#define AUDIT_SDEV_ALLSESSIONS 0x00010000 - u_int asdev_flags; +#define AUDIT_SDEV_ALLSESSIONS 0x00010000 + u_int asdev_flags; - struct selinfo asdev_selinfo; - pid_t asdev_sigio; + struct selinfo asdev_selinfo; + pid_t asdev_sigio; - au_id_t asdev_auid; - au_asid_t asdev_asid; + au_id_t asdev_auid; + au_asid_t asdev_asid; /* Per-sdev mutex for most fields in this struct. */ - struct mtx asdev_mtx; + struct mtx asdev_mtx; /* * Per-sdev sleep lock serializing user-generated reads and @@ -230,34 +231,34 @@ struct audit_sdev { * record's data whie the record remains in the queue, so we * prevent other threads from removing it using this lock. */ - struct slck asdev_sx; + struct slck asdev_sx; /* - * Condition variable to signal when data has been delivered to + * Condition variable to signal when data has been delivered to * a sdev. */ - struct cv asdev_cv; + struct cv asdev_cv; /* Count and bound of records in the queue. */ - u_int asdev_qlen; - u_int asdev_qlimit; + u_int asdev_qlen; + u_int asdev_qlimit; /* The number of bytes of data across all records. */ - u_int asdev_qbyteslen; - - /* + u_int asdev_qbyteslen; + + /* * The amount read so far of the first record in the queue. * (The number of bytes available for reading in the queue is * qbyteslen - qoffset.) */ - u_int asdev_qoffset; + u_int asdev_qoffset; /* * Per-sdev operation statistics. */ - u_int64_t asdev_inserts; /* Records added. */ - u_int64_t asdev_reads; /* Records read. */ - u_int64_t asdev_drops; /* Records dropped. */ + u_int64_t asdev_inserts; /* Records added. */ + u_int64_t asdev_reads; /* Records read. */ + u_int64_t asdev_drops; /* Records dropped. */ /* * Current pending record list. This is protected by a @@ -266,34 +267,34 @@ struct audit_sdev { * queue, as an in-progress read may sleep while copying and, * therefore, cannot hold asdev_mtx. */ - TAILQ_HEAD(, audit_sdev_entry) asdev_queue; + TAILQ_HEAD(, audit_sdev_entry) asdev_queue; /* Global sdev list. */ - TAILQ_ENTRY(audit_sdev) asdev_list; + TAILQ_ENTRY(audit_sdev) asdev_list; }; -#define AUDIT_SDEV_LOCK(asdev) mtx_lock(&(asdev)->asdev_mtx) -#define AUDIT_SDEV_LOCK_ASSERT(asdev) mtx_assert(&(asdev)->asdev_mtx, \ - MA_OWNED) -#define AUDIT_SDEV_LOCK_DESTROY(asdev) mtx_destroy(&(asdev)->asdev_mtx) -#define AUDIT_SDEV_LOCK_INIT(asdev) mtx_init(&(asdev)->asdev_mtx, \ - "audit_sdev_mtx", NULL, MTX_DEF) -#define AUDIT_SDEV_UNLOCK(asdev) mtx_unlock(&(asdev)->asdev_mtx) -#define AUDIT_SDEV_MTX(asdev) (&(asdev)->asdev_mtx) - -#define AUDIT_SDEV_SX_LOCK_DESTROY(asd) slck_destroy(&(asd)->asdev_sx) -#define AUDIT_SDEV_SX_LOCK_INIT(asd) slck_init(&(asd)->asdev_sx, \ - "audit_sdev_sx") -#define AUDIT_SDEV_SX_XLOCK_ASSERT(asd) slck_assert(&(asd)->asdev_sx, \ - SA_XLOCKED) -#define AUDIT_SDEV_SX_XLOCK_SIG(asd) slck_lock_sig(&(asd)->asdev_sx) -#define AUDIT_SDEV_SX_XUNLOCK(asd) slck_unlock(&(asd)->asdev_sx) +#define AUDIT_SDEV_LOCK(asdev) mtx_lock(&(asdev)->asdev_mtx) +#define AUDIT_SDEV_LOCK_ASSERT(asdev) mtx_assert(&(asdev)->asdev_mtx, \ + MA_OWNED) +#define AUDIT_SDEV_LOCK_DESTROY(asdev) mtx_destroy(&(asdev)->asdev_mtx) +#define AUDIT_SDEV_LOCK_INIT(asdev) mtx_init(&(asdev)->asdev_mtx, \ + "audit_sdev_mtx", NULL, MTX_DEF) +#define AUDIT_SDEV_UNLOCK(asdev) mtx_unlock(&(asdev)->asdev_mtx) +#define AUDIT_SDEV_MTX(asdev) (&(asdev)->asdev_mtx) + +#define AUDIT_SDEV_SX_LOCK_DESTROY(asd) slck_destroy(&(asd)->asdev_sx) +#define AUDIT_SDEV_SX_LOCK_INIT(asd) slck_init(&(asd)->asdev_sx, \ + "audit_sdev_sx") +#define AUDIT_SDEV_SX_XLOCK_ASSERT(asd) slck_assert(&(asd)->asdev_sx, \ + SA_XLOCKED) +#define AUDIT_SDEV_SX_XLOCK_SIG(asd) slck_lock_sig(&(asd)->asdev_sx) +#define AUDIT_SDEV_SX_XUNLOCK(asd) slck_unlock(&(asd)->asdev_sx) /* * Cloning variables and constants. */ -#define AUDIT_SDEV_NAME "auditsessions" -#define MAX_AUDIT_SDEVS 32 +#define AUDIT_SDEV_NAME "auditsessions" +#define MAX_AUDIT_SDEVS 32 static int audit_sdev_major; static void *devnode; @@ -301,20 +302,20 @@ static void *devnode; /* * Global list of audit sdevs. The list is protected by a rw lock. * Individaul record queues are protected by per-sdev locks. These - * locks synchronize between threads walking the list to deliver to + * locks synchronize between threads walking the list to deliver to * individual sdevs and adds/removes of sdevs. */ static TAILQ_HEAD(, audit_sdev) audit_sdev_list; -static struct rwlock audit_sdev_lock; +static struct rwlock audit_sdev_lock; -#define AUDIT_SDEV_LIST_LOCK_INIT() rw_init(&audit_sdev_lock, \ - "audit_sdev_list_lock") -#define AUDIT_SDEV_LIST_RLOCK() rw_rlock(&audit_sdev_lock) -#define AUDIT_SDEV_LIST_RUNLOCK() rw_runlock(&audit_sdev_lock) -#define AUDIT_SDEV_LIST_WLOCK() rw_wlock(&audit_sdev_lock) -#define AUDIT_SDEV_LIST_WLOCK_ASSERT() rw_assert(&audit_sdev_lock, \ - RA_WLOCKED) -#define AUDIT_SDEV_LIST_WUNLOCK() rw_wunlock(&audit_sdev_lock) +#define AUDIT_SDEV_LIST_LOCK_INIT() rw_init(&audit_sdev_lock, \ + "audit_sdev_list_lock") +#define AUDIT_SDEV_LIST_RLOCK() rw_rlock(&audit_sdev_lock) +#define AUDIT_SDEV_LIST_RUNLOCK() rw_runlock(&audit_sdev_lock) +#define AUDIT_SDEV_LIST_WLOCK() rw_wlock(&audit_sdev_lock) +#define AUDIT_SDEV_LIST_WLOCK_ASSERT() rw_assert(&audit_sdev_lock, \ + RA_WLOCKED) +#define AUDIT_SDEV_LIST_WUNLOCK() rw_wunlock(&audit_sdev_lock) /* * dev_t doesn't have a pointer for "softc" data so we have to keep track of @@ -322,16 +323,16 @@ static struct rwlock audit_sdev_lock; * * XXX We may want to dynamically grow this as need. */ -static struct audit_sdev *audit_sdev_dtab[MAX_AUDIT_SDEVS]; +static struct audit_sdev *audit_sdev_dtab[MAX_AUDIT_SDEVS]; /* * Special device methods and definition. */ -static open_close_fcn_t audit_sdev_open; -static open_close_fcn_t audit_sdev_close; -static read_write_fcn_t audit_sdev_read; -static ioctl_fcn_t audit_sdev_ioctl; -static select_fcn_t audit_sdev_poll; +static open_close_fcn_t audit_sdev_open; +static open_close_fcn_t audit_sdev_close; +static read_write_fcn_t audit_sdev_read; +static ioctl_fcn_t audit_sdev_ioctl; +static select_fcn_t audit_sdev_poll; static struct cdevsw audit_sdev_cdevsw = { .d_open = audit_sdev_open, @@ -351,28 +352,28 @@ static struct cdevsw audit_sdev_cdevsw = { /* * Global statistics on audit sdevs. */ -static int audit_sdev_count; /* Current number of sdevs. */ -static u_int64_t audit_sdev_ever; /* Sdevs ever allocated. */ -static u_int64_t audit_sdev_records; /* Total records seen. */ -static u_int64_t audit_sdev_drops; /* Global record drop count. */ +static int audit_sdev_count; /* Current number of sdevs. */ +static u_int64_t audit_sdev_ever; /* Sdevs ever allocated. */ +static u_int64_t audit_sdev_records; /* Total records seen. */ +static u_int64_t audit_sdev_drops; /* Global record drop count. */ static int audit_sdev_init(void); -#define AUDIT_SENTRY_RWLOCK_INIT() rw_init(&se_entry_lck, \ - "se_entry_lck") -#define AUDIT_SENTRY_RLOCK() rw_rlock(&se_entry_lck) -#define AUDIT_SENTRY_WLOCK() rw_wlock(&se_entry_lck) -#define AUDIT_SENTRY_RWLOCK_ASSERT() rw_assert(&se_entry_lck, RA_LOCKED) -#define AUDIT_SENTRY_RUNLOCK() rw_runlock(&se_entry_lck) -#define AUDIT_SENTRY_WUNLOCK() rw_wunlock(&se_entry_lck) +#define AUDIT_SENTRY_RWLOCK_INIT() rw_init(&se_entry_lck, \ + "se_entry_lck") +#define AUDIT_SENTRY_RLOCK() rw_rlock(&se_entry_lck) +#define AUDIT_SENTRY_WLOCK() rw_wlock(&se_entry_lck) +#define AUDIT_SENTRY_RWLOCK_ASSERT() rw_assert(&se_entry_lck, RA_LOCKED) +#define AUDIT_SENTRY_RUNLOCK() rw_runlock(&se_entry_lck) +#define AUDIT_SENTRY_WUNLOCK() rw_wunlock(&se_entry_lck) /* Access control on the auditinfo_addr.ai_flags member. */ static uint64_t audit_session_superuser_set_sflags_mask; static uint64_t audit_session_superuser_clear_sflags_mask; static uint64_t audit_session_member_set_sflags_mask; static uint64_t audit_session_member_clear_sflags_mask; -SYSCTL_NODE(, OID_AUTO, audit, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Audit controls"); -SYSCTL_NODE(_audit, OID_AUTO, session, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Audit sessions"); +SYSCTL_NODE(, OID_AUTO, audit, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Audit controls"); +SYSCTL_NODE(_audit, OID_AUTO, session, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Audit sessions"); SYSCTL_QUAD(_audit_session, OID_AUTO, superuser_set_sflags_mask, CTLFLAG_RW | CTLFLAG_LOCKED, &audit_session_superuser_set_sflags_mask, "Audit session flags settable by superuser"); @@ -388,14 +389,14 @@ SYSCTL_QUAD(_audit_session, OID_AUTO, member_clear_sflags_mask, CTLFLAG_RW | CTL extern int set_security_token_task_internal(proc_t p, void *task); -#define AUDIT_SESSION_DEBUG 0 -#if AUDIT_SESSION_DEBUG +#define AUDIT_SESSION_DEBUG 0 +#if AUDIT_SESSION_DEBUG /* - * The following is debugging code that can be used to get a snapshot of the + * The following is debugging code that can be used to get a snapshot of the * session state. The audit session information is read out using sysctl: * * error = sysctlbyname("kern.audit_session_debug", buffer_ptr, &buffer_len, - * NULL, 0); + * NULL, 0); */ #include @@ -403,11 +404,11 @@ extern int set_security_token_task_internal(proc_t p, void *task); * The per session record structure for the snapshot data. */ struct au_sentry_debug { - auditinfo_addr_t se_auinfo; - int64_t se_refcnt; /* refereence count */ - int64_t se_procnt; /* process count */ - int64_t se_ptcnt; /* process count from - proc table */ + auditinfo_addr_t se_auinfo; + int64_t se_refcnt; /* refereence count */ + int64_t se_procnt; /* process count */ + int64_t se_ptcnt; /* process count from + * proc table */ }; typedef struct au_sentry_debug au_sentry_debug_t; @@ -424,17 +425,16 @@ SYSCTL_PROC(_kern, OID_AUTO, audit_session_debug, CTLFLAG_RD | CTLFLAG_LOCKED, * in the filterfn while the proc_lock() is held so we really don't need the * callout() function. */ -static int +static int audit_session_debug_callout(__unused proc_t p, __unused void *arg) { - - return (PROC_RETURNED_DONE); + return PROC_RETURNED_DONE; } static int audit_session_debug_filterfn(proc_t p, void *st) { - kauth_cred_t cred = p->p_ucred; + kauth_cred_t cred = p->p_ucred; auditinfo_addr_t *aia_p = cred->cr_audit.as_aia_p; au_sentry_debug_t *sed_tab = (au_sentry_debug_t *) st; au_sentry_debug_t *sdtp; @@ -465,7 +465,7 @@ audit_session_debug_filterfn(proc_t p, void *st) sed_tab->se_ptcnt++; } - return (0); + return 0; } /* @@ -485,28 +485,31 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, /* * This provides a read-only node. */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* * Walk the audit session hash table to determine the size. */ AUDIT_SENTRY_RLOCK(); - for(i = 0; i < HASH_TABLE_SIZE; i++) + for (i = 0; i < HASH_TABLE_SIZE; i++) { LIST_FOREACH(se, &au_sentry_bucket[i], se_link) - if (se != NULL) - entry_cnt++; + if (se != NULL) { + entry_cnt++; + } + } entry_cnt++; /* add one for the default entry */ /* - * If just querying then return the space required. There is an + * If just querying then return the space required. There is an * obvious race condition here so we just fudge this by 3 in case * the audit session table grows. */ if (req->oldptr == USER_ADDR_NULL) { req->oldidx = (entry_cnt + 3) * sizeof(au_sentry_debug_t); AUDIT_SENTRY_RUNLOCK(); - return (0); + return 0; } /* @@ -514,7 +517,7 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, */ if (req->oldlen < (entry_cnt * sizeof(au_sentry_debug_t))) { AUDIT_SENTRY_RUNLOCK(); - return (ENOMEM); + return ENOMEM; } /* * We hold the lock over the alloc since we don't want the table to @@ -524,7 +527,7 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, sizeof(au_sentry_debug_t)); if (sed_tab == NULL) { AUDIT_SENTRY_RUNLOCK(); - return (ENOMEM); + return ENOMEM; } bzero(sed_tab, entry_cnt * sizeof(au_sentry_debug_t)); @@ -534,12 +537,12 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, sz = 0; next_sed = sed_tab; /* add the first entry for processes not tracked in sessions. */ - bcopy(audit_default_aia_p, &next_sed->se_auinfo, sizeof (au_sentry_t)); + bcopy(audit_default_aia_p, &next_sed->se_auinfo, sizeof(au_sentry_t)); next_sed->se_refcnt = (int64_t)audit_default_se.se_refcnt; next_sed->se_procnt = (int64_t)audit_default_se.se_procnt; next_sed++; sz += sizeof(au_sentry_debug_t); - for(i = 0; i < HASH_TABLE_SIZE; i++) { + for (i = 0; i < HASH_TABLE_SIZE; i++) { LIST_FOREACH(se, &au_sentry_bucket[i], se_link) { if (se != NULL) { next_sed->se_auinfo = se->se_auinfo; @@ -562,7 +565,7 @@ audit_sysctl_session_debug(__unused struct sysctl_oid *oidp, err = SYSCTL_OUT(req, sed_tab, sz); kfree(sed_tab, entry_cnt * sizeof(au_sentry_debug_t)); - return (err); + return err; } #endif /* AUDIT_SESSION_DEBUG */ @@ -580,17 +583,19 @@ audit_session_event(int event, auditinfo_addr_t *aia_p) AUE_SESSION_END == event || AUE_SESSION_CLOSE == event, ("audit_session_event: invalid event: %d", event)); - if (NULL == aia_p) + if (NULL == aia_p) { return; + } - /* + /* * Create a new audit record. The record will contain the subject - * ruid, rgid, egid, pid, auid, asid, amask, and term_addr + * ruid, rgid, egid, pid, auid, asid, amask, and term_addr * (implicitly added by audit_new). */ ar = audit_new(event, PROC_NULL, /* Not used */ NULL); - if (NULL == ar) + if (NULL == ar) { return; + } /* * Audit session events are always generated because they are used @@ -598,7 +603,7 @@ audit_session_event(int event, auditinfo_addr_t *aia_p) */ ar->k_ar_commit |= AR_PRESELECT_FILTER; - /* + /* * Populate the subject information. Note that the ruid, rgid, * egid, and pid values are incorrect. We only need the auditinfo_addr * information. @@ -618,7 +623,7 @@ audit_session_event(int event, auditinfo_addr_t *aia_p) ARG_SET_VALID(ar, ARG_AMASK); /* Add the audit session flags to the record. */ - ar->k_ar.ar_arg_value64 = aia_p->ai_flags; + ar->k_ar.ar_arg_value64 = aia_p->ai_flags; ARG_SET_VALID(ar, ARG_VALUE64); @@ -629,41 +634,41 @@ audit_session_event(int event, auditinfo_addr_t *aia_p) /* * Hash the audit session ID using a simple 32-bit mix. */ -static inline uint32_t +static inline uint32_t audit_session_hash(au_asid_t asid) { uint32_t a = (uint32_t) asid; - a = (a - (a << 6)) ^ (a >> 17); + a = (a - (a << 6)) ^ (a >> 17); a = (a - (a << 9)) ^ (a << 4); a = (a - (a << 3)) ^ (a << 10); a = a ^ (a >> 15); - - return (a); + + return a; } /* * Do an hash lookup and find the session entry for a given ASID. Return NULL - * if not found. If the session is found then audit_session_find takes a - * reference. + * if not found. If the session is found then audit_session_find takes a + * reference. */ static au_sentry_t * audit_session_find(au_asid_t asid) { - uint32_t hkey; - au_sentry_t *found_se; + uint32_t hkey; + au_sentry_t *found_se; AUDIT_SENTRY_RWLOCK_ASSERT(); hkey = HASH_ASID(asid); LIST_FOREACH(found_se, &au_sentry_bucket[hkey], se_link) - if (found_se->se_asid == asid) { - au_history_record(found_se, AU_HISTORY_EVENT_FIND); - audit_ref_session(found_se); - return (found_se); - } - return (NULL); + if (found_se->se_asid == asid) { + au_history_record(found_se, AU_HISTORY_EVENT_FIND); + audit_ref_session(found_se); + return found_se; + } + return NULL; } /* @@ -672,13 +677,13 @@ audit_session_find(au_asid_t asid) static void audit_session_remove(au_sentry_t *se) { - uint32_t hkey; - au_sentry_t *found_se, *tmp_se; + uint32_t hkey; + au_sentry_t *found_se, *tmp_se; au_history_record(se, AU_HISTORY_EVENT_DEATH); - KASSERT(se->se_refcnt == 0, ("audit_session_remove: ref count != 0")); + KASSERT(se->se_refcnt == 0, ("audit_session_remove: ref count != 0")); KASSERT(se != &audit_default_se, - ("audit_session_remove: removing default session")); + ("audit_session_remove: removing default session")); hkey = HASH_ASID(se->se_asid); @@ -694,7 +699,6 @@ audit_session_remove(au_sentry_t *se) audit_session_portdestroy(&se->se_port); LIST_FOREACH_SAFE(found_se, &au_sentry_bucket[hkey], se_link, tmp_se) { if (found_se == se) { - /* * Generate an audit event to notify userland of the * session close. @@ -720,8 +724,9 @@ audit_ref_session(au_sentry_t *se) { long old_val; - if (se == NULL || se == &audit_default_se) + if (se == NULL || se == &audit_default_se) { return; + } au_history_record(se, AU_HISTORY_EVENT_REF); @@ -738,14 +743,16 @@ audit_unref_session(au_sentry_t *se) { long old_val; - if (se == NULL || se == &audit_default_se) + if (se == NULL || se == &audit_default_se) { return; + } au_history_record(se, AU_HISTORY_EVENT_UNREF); old_val = OSAddAtomicLong(-1, &se->se_refcnt); - if (old_val == 1) + if (old_val == 1) { audit_session_remove(se); + } KASSERT(old_val > 0, ("audit_unref_session: Too few references on session.")); } @@ -758,9 +765,10 @@ audit_inc_procount(au_sentry_t *se) { long old_val; - if (se == NULL || se == &audit_default_se) + if (se == NULL || se == &audit_default_se) { return; - + } + old_val = OSAddAtomicLong(1, &se->se_procnt); KASSERT(old_val <= PID_MAX, ("audit_inc_procount: proc count > PID_MAX")); @@ -775,24 +783,26 @@ audit_dec_procount(au_sentry_t *se) { long old_val; - if (se == NULL || se == &audit_default_se) + if (se == NULL || se == &audit_default_se) { return; + } old_val = OSAddAtomicLong(-1, &se->se_procnt); /* * If this was the last process generate an audit event to notify * userland of the session ending. */ - if (old_val == 1) + if (old_val == 1) { audit_session_event(AUE_SESSION_END, &se->se_auinfo); + } KASSERT(old_val >= 1, ("audit_dec_procount: proc count < 0")); -} +} /* * Update the session entry and check to see if anything was updated. * Returns: - * 0 Nothing was updated (We don't care about process preselection masks) + * 0 Nothing was updated (We don't care about process preselection masks) * 1 Something was updated. */ static int @@ -801,35 +811,37 @@ audit_update_sentry(au_sentry_t *se, auditinfo_addr_t *new_aia) auditinfo_addr_t *aia = &se->se_auinfo; int update; - KASSERT(new_aia != audit_default_aia_p, - ("audit_update_sentry: Trying to update the default aia.")); + KASSERT(new_aia != audit_default_aia_p, + ("audit_update_sentry: Trying to update the default aia.")); update = (aia->ai_auid != new_aia->ai_auid || bcmp(&aia->ai_termid, &new_aia->ai_termid, - sizeof(new_aia->ai_termid)) || + sizeof(new_aia->ai_termid)) || aia->ai_flags != new_aia->ai_flags); - if (update) + if (update) { bcopy(new_aia, aia, sizeof(*aia)); + } - return (update); + return update; } /* * Return the next session ID. The range of kernel generated audit session IDs * is ASSIGNED_ASID_MIN to ASSIGNED_ASID_MAX. */ -static uint32_t +static uint32_t audit_session_nextid(void) { - static uint32_t next_asid = ASSIGNED_ASID_MIN; + static uint32_t next_asid = ASSIGNED_ASID_MIN; AUDIT_SENTRY_RWLOCK_ASSERT(); - if (next_asid > ASSIGNED_ASID_MAX) + if (next_asid > ASSIGNED_ASID_MAX) { next_asid = ASSIGNED_ASID_MIN; + } - return (next_asid++); + return next_asid++; } /* @@ -845,10 +857,10 @@ audit_session_new(auditinfo_addr_t *new_aia_p, auditinfo_addr_t *old_aia_p) au_sentry_t *se = NULL; au_sentry_t *found_se = NULL; auditinfo_addr_t *aia = NULL; - + KASSERT(new_aia_p != NULL, ("audit_session_new: new_aia_p == NULL")); - new_asid = new_aia_p->ai_asid; + new_asid = new_aia_p->ai_asid; /* * Alloc a new session entry now so we don't wait holding the lock. @@ -861,25 +873,24 @@ audit_session_new(auditinfo_addr_t *new_aia_p, auditinfo_addr_t *old_aia_p) AUDIT_SENTRY_WLOCK(); if (new_asid == AU_ASSIGN_ASID) { do { - new_asid = (au_asid_t)audit_session_nextid(); found_se = audit_session_find(new_asid); - - /* + + /* * If the session ID is currently active then drop the * reference and try again. */ - if (found_se != NULL) + if (found_se != NULL) { audit_unref_session(found_se); - else + } else { break; - } while(1); + } + } while (1); } else { - /* * Check to see if the requested ASID is already in the * hash table. If so, update it with the new auditinfo. - */ + */ if ((found_se = audit_session_find(new_asid)) != NULL) { int updated; @@ -889,18 +900,20 @@ audit_session_new(auditinfo_addr_t *new_aia_p, auditinfo_addr_t *old_aia_p) free(se, M_AU_SESSION); /* If a different session then add this process in. */ - if (new_aia_p != old_aia_p) + if (new_aia_p != old_aia_p) { audit_inc_procount(found_se); + } /* * If the session information was updated then * generate an audit event to notify userland. */ - if (updated) + if (updated) { audit_session_event(AUE_SESSION_UPDATE, &found_se->se_auinfo); + } - return (&found_se->se_auinfo); + return &found_se->se_auinfo; } } @@ -933,7 +946,7 @@ audit_session_new(auditinfo_addr_t *new_aia_p, auditinfo_addr_t *old_aia_p) */ audit_session_event(AUE_SESSION_START, aia); au_history_record(se, AU_HISTORY_EVENT_BIRTH); - return (aia); + return aia; } /* @@ -945,29 +958,30 @@ audit_session_lookup(au_asid_t asid, auditinfo_addr_t *ret_aia) { au_sentry_t *se = NULL; - if ((uint32_t)asid > ASSIGNED_ASID_MAX) - return (-1); + if ((uint32_t)asid > ASSIGNED_ASID_MAX) { + return -1; + } AUDIT_SENTRY_RLOCK(); if ((se = audit_session_find(asid)) == NULL) { AUDIT_SENTRY_RUNLOCK(); - return (1); + return 1; } /* We have a reference on the session so it is safe to drop the lock. */ AUDIT_SENTRY_RUNLOCK(); - if (ret_aia != NULL) + if (ret_aia != NULL) { bcopy(&se->se_auinfo, ret_aia, sizeof(*ret_aia)); + } audit_unref_session(se); - return (0); + return 0; } void audit_session_aiaref(auditinfo_addr_t *aia_p) { - audit_ref_session(AU_SENTRY_PTR(aia_p)); } - + /* * Add a reference to the session entry. */ @@ -979,17 +993,17 @@ audit_session_ref(kauth_cred_t cred) KASSERT(IS_VALID_CRED(cred), ("audit_session_ref: Invalid kauth_cred.")); - aia_p = cred->cr_audit.as_aia_p; + aia_p = cred->cr_audit.as_aia_p; audit_session_aiaref(aia_p); } -void audit_session_aiaunref(auditinfo_addr_t *aia_p) +void +audit_session_aiaunref(auditinfo_addr_t *aia_p) { - audit_unref_session(AU_SENTRY_PTR(aia_p)); } -/* +/* * Remove a reference to the session entry. */ void @@ -1000,7 +1014,7 @@ audit_session_unref(kauth_cred_t cred) KASSERT(IS_VALID_CRED(cred), ("audit_session_unref: Invalid kauth_cred.")); - aia_p = cred->cr_audit.as_aia_p; + aia_p = cred->cr_audit.as_aia_p; audit_session_aiaunref(aia_p); } @@ -1013,11 +1027,11 @@ audit_session_procnew(proc_t p) { kauth_cred_t cred = p->p_ucred; auditinfo_addr_t *aia_p; - - KASSERT(IS_VALID_CRED(cred), + + KASSERT(IS_VALID_CRED(cred), ("audit_session_procnew: Invalid kauth_cred.")); - aia_p = cred->cr_audit.as_aia_p; + aia_p = cred->cr_audit.as_aia_p; audit_inc_procount(AU_SENTRY_PTR(aia_p)); } @@ -1032,16 +1046,16 @@ audit_session_procexit(proc_t p) kauth_cred_t cred = p->p_ucred; auditinfo_addr_t *aia_p; - KASSERT(IS_VALID_CRED(cred), + KASSERT(IS_VALID_CRED(cred), ("audit_session_procexit: Invalid kauth_cred.")); - aia_p = cred->cr_audit.as_aia_p; + aia_p = cred->cr_audit.as_aia_p; audit_dec_procount(AU_SENTRY_PTR(aia_p)); } /* - * Init the audit session code. + * Init the audit session code. */ void audit_session_init(void) @@ -1050,19 +1064,20 @@ audit_session_init(void) KASSERT((ASSIGNED_ASID_MAX - ASSIGNED_ASID_MIN) > PID_MAX, ("audit_session_init: ASSIGNED_ASID_MAX is not large enough.")); - + AUDIT_SENTRY_RWLOCK_INIT(); au_sentry_bucket = malloc( sizeof(struct au_sentry) * HASH_TABLE_SIZE, M_AU_SESSION, M_WAITOK | M_ZERO); - for (i = 0; i < HASH_TABLE_SIZE; i++) + for (i = 0; i < HASH_TABLE_SIZE; i++) { LIST_INIT(&au_sentry_bucket[i]); + } (void)audit_sdev_init(); #if AU_HISTORY_LOGGING au_history = malloc(sizeof(struct au_history) * au_history_size, - M_AU_SESSION, M_WAITOK|M_ZERO); + M_AU_SESSION, M_WAITOK | M_ZERO); #endif } @@ -1073,18 +1088,20 @@ audit_session_update_check(kauth_cred_t cred, auditinfo_addr_t *old, uint64_t n; /* If the current audit ID is not the default then it is immutable. */ - if (old->ai_auid != AU_DEFAUDITID && old->ai_auid != new->ai_auid) - return (EINVAL); + if (old->ai_auid != AU_DEFAUDITID && old->ai_auid != new->ai_auid) { + return EINVAL; + } /* If the current termid is not the default then it is immutable. */ if ((old->ai_termid.at_type != AU_IPv4 || - old->ai_termid.at_port != 0 || - old->ai_termid.at_addr[0] != 0) && + old->ai_termid.at_port != 0 || + old->ai_termid.at_addr[0] != 0) && (old->ai_termid.at_port != new->ai_termid.at_port || - old->ai_termid.at_type != new->ai_termid.at_type || - 0 != bcmp(&old->ai_termid.at_addr, &new->ai_termid.at_addr, - sizeof (old->ai_termid.at_addr)))) - return (EINVAL); + old->ai_termid.at_type != new->ai_termid.at_type || + 0 != bcmp(&old->ai_termid.at_addr, &new->ai_termid.at_addr, + sizeof(old->ai_termid.at_addr)))) { + return EINVAL; + } /* The flags may be set only according to the * audit_session_*_set_sflags_masks. @@ -1092,10 +1109,11 @@ audit_session_update_check(kauth_cred_t cred, auditinfo_addr_t *old, n = ~old->ai_flags & new->ai_flags; if (0 != n && !((n == (audit_session_superuser_set_sflags_mask & n) && - kauth_cred_issuser(cred)) || - (n == (audit_session_member_set_sflags_mask & n) && - old->ai_asid == new->ai_asid))) - return (EINVAL); + kauth_cred_issuser(cred)) || + (n == (audit_session_member_set_sflags_mask & n) && + old->ai_asid == new->ai_asid))) { + return EINVAL; + } /* The flags may be cleared only according to the * audit_session_*_clear_sflags_masks. @@ -1103,17 +1121,18 @@ audit_session_update_check(kauth_cred_t cred, auditinfo_addr_t *old, n = ~new->ai_flags & old->ai_flags; if (0 != n && !((n == (audit_session_superuser_clear_sflags_mask & n) && - kauth_cred_issuser(cred)) || - (n == (audit_session_member_clear_sflags_mask & n) && - old->ai_asid == new->ai_asid))) - return (EINVAL); + kauth_cred_issuser(cred)) || + (n == (audit_session_member_clear_sflags_mask & n) && + old->ai_asid == new->ai_asid))) { + return EINVAL; + } /* The audit masks are mutable. */ - return (0); + return 0; } /* - * Safely update kauth cred of the given process with new the given audit info. + * Safely update kauth cred of the given process with new the given audit info. */ int audit_session_setaia(proc_t p, auditinfo_addr_t *new_aia_p) @@ -1132,8 +1151,9 @@ audit_session_setaia(proc_t p, auditinfo_addr_t *new_aia_p) my_cred = kauth_cred_proc_ref(p); ret = audit_session_update_check(my_cred, &caia, new_aia_p); kauth_cred_unref(&my_cred); - if (ret) - return (ret); + if (ret) { + return ret; + } } my_cred = kauth_cred_proc_ref(p); @@ -1143,8 +1163,9 @@ audit_session_setaia(proc_t p, auditinfo_addr_t *new_aia_p) as.as_aia_p = audit_session_new(new_aia_p, old_aia_p); /* If the process left a session then update the process count. */ - if (old_aia_p != new_aia_p) + if (old_aia_p != new_aia_p) { audit_dec_procount(AU_SENTRY_PTR(old_aia_p)); + } /* @@ -1155,7 +1176,6 @@ audit_session_setaia(proc_t p, auditinfo_addr_t *new_aia_p) * in the target process and take a reference while we muck with it. */ for (;;) { - /* * Set the credential with new info. If there is no change, * we get back the same credential we passed in; if there is @@ -1199,23 +1219,23 @@ audit_session_setaia(proc_t p, auditinfo_addr_t *new_aia_p) /* Propagate the change from the process to the Mach task. */ set_security_token(p); - return (0); + return 0; } /* * audit_session_self (system call) * - * Description: Obtain a Mach send right for the current session. + * Description: Obtain a Mach send right for the current session. * * Parameters: p Process calling audit_session_self(). - * + * * Returns: *ret_port Named Mach send right, which may be - * MACH_PORT_NULL in the failure case. + * MACH_PORT_NULL in the failure case. * * Errno: 0 Success - * EINVAL The calling process' session has not be set. - * ESRCH Bad process, can't get valid cred for process. - * ENOMEM Port allocation failed due to no free memory. + * EINVAL The calling process' session has not be set. + * ESRCH Bad process, can't get valid cred for process. + * ENOMEM Port allocation failed due to no free memory. */ int audit_session_self(proc_t p, __unused struct audit_session_self_args *uap, @@ -1240,15 +1260,16 @@ audit_session_self(proc_t p, __unused struct audit_session_self_args *uap, goto done; } - se = AU_SENTRY_PTR(aia_p); + se = AU_SENTRY_PTR(aia_p); - /* + /* * Processes that join using this mach port will inherit this process' * pre-selection masks. */ - if (se->se_port == IPC_PORT_NULL) + if (se->se_port == IPC_PORT_NULL) { bcopy(&cred->cr_audit.as_mask, &se->se_mask, sizeof(se->se_mask)); + } /* * Get a send right to the session's Mach port and insert it in the @@ -1258,11 +1279,13 @@ audit_session_self(proc_t p, __unused struct audit_session_self_args *uap, *ret_port = ipc_port_copyout_send(sendport, get_task_ipcspace(p->task)); done: - if (cred != NULL) - kauth_cred_unref(&cred); - if (err != 0) + if (cred != NULL) { + kauth_cred_unref(&cred); + } + if (err != 0) { *ret_port = MACH_PORT_NULL; - return (err); + } + return err; } /* @@ -1272,18 +1295,18 @@ done: * * Parameters: p Process calling audit_session_port(). * uap->asid The target audit session ID. The special - * value -1 can be used to target the process's - * own session. + * value -1 can be used to target the process's + * own session. * uap->portnamep User address at which to place port name. * * Returns: 0 Success - * EINVAL The calling process' session has not be set. - * EINVAL The given session ID could not be found. - * EINVAL The Mach port right could not be copied out. - * ESRCH Bad process, can't get valid cred for process. - * EPERM Only the superuser can reference sessions other - * than the process's own. - * ENOMEM Port allocation failed due to no free memory. + * EINVAL The calling process' session has not be set. + * EINVAL The given session ID could not be found. + * EINVAL The Mach port right could not be copied out. + * ESRCH Bad process, can't get valid cred for process. + * EPERM Only the superuser can reference sessions other + * than the process's own. + * ENOMEM Port allocation failed due to no free memory. */ int audit_session_port(proc_t p, struct audit_session_port_args *uap, @@ -1316,7 +1339,6 @@ audit_session_port(proc_t p, struct audit_session_port_args *uap, */ if (uap->asid == (au_asid_t)-1 || uap->asid == aia_p->ai_asid) { - if (!IS_VALID_SESSION(aia_p)) { /* Can't join the default session. */ err = EINVAL; @@ -1349,9 +1371,10 @@ audit_session_port(proc_t p, struct audit_session_port_args *uap, * Processes that join using this mach port will inherit this process' * pre-selection masks. */ - if (se->se_port == IPC_PORT_NULL) + if (se->se_port == IPC_PORT_NULL) { bcopy(&cred->cr_audit.as_mask, &se->se_mask, sizeof(se->se_mask)); + } /* * Use the session reference to create a mach port reference for the @@ -1366,15 +1389,18 @@ audit_session_port(proc_t p, struct audit_session_port_args *uap, } err = copyout(&portname, uap->portnamep, sizeof(mach_port_name_t)); done: - if (cred != NULL) + if (cred != NULL) { kauth_cred_unref(&cred); - if (NULL != se) + } + if (NULL != se) { audit_unref_session(se); - if (MACH_PORT_VALID(portname) && 0 != err) - (void)mach_port_deallocate(get_task_ipcspace(p->task), + } + if (MACH_PORT_VALID(portname) && 0 != err) { + (void)mach_port_deallocate(get_task_ipcspace(p->task), portname); + } - return (err); + return err; } static int @@ -1396,7 +1422,7 @@ audit_session_join_internal(proc_t p, task_t task, ipc_port_t port, au_asid_t *n kauth_cred_ref(p->p_ucred); my_cred = p->p_ucred; if (!IS_VALID_CRED(my_cred)) { - kauth_cred_unref(&my_cred); + kauth_cred_unref(&my_cred); proc_ucred_unlock(p); err = ESRCH; goto done; @@ -1413,7 +1439,7 @@ audit_session_join_internal(proc_t p, task_t task, ipc_port_t port, au_asid_t *n struct au_session new_as; bcopy(&new_aia_p->ai_mask, &new_as.as_mask, - sizeof(new_as.as_mask)); + sizeof(new_as.as_mask)); new_as.as_aia_p = new_aia_p; my_new_cred = kauth_cred_setauditinfo(my_cred, &new_as); @@ -1430,16 +1456,17 @@ audit_session_join_internal(proc_t p, task_t task, ipc_port_t port, au_asid_t *n /* Decrement the process count of the former session. */ audit_dec_procount(AU_SENTRY_PTR(old_aia_p)); - } else { + } else { proc_ucred_unlock(p); } kauth_cred_unref(&my_cred); done: - if (port != IPC_PORT_NULL) + if (port != IPC_PORT_NULL) { ipc_port_release_send(port); + } - return (err); + return err; } /* @@ -1448,33 +1475,33 @@ done: * Description: posix_spawn() interface to audit_session_join_internal(). * * Returns: 0 Success - * EINVAL Invalid Mach port name. - * ESRCH Invalid calling process/cred. + * EINVAL Invalid Mach port name. + * ESRCH Invalid calling process/cred. */ int audit_session_spawnjoin(proc_t p, task_t task, ipc_port_t port) { au_asid_t new_asid; - - return (audit_session_join_internal(p, task, port, &new_asid)); + + return audit_session_join_internal(p, task, port, &new_asid); } /* * audit_session_join (system call) * * Description: Join the session for a given Mach port send right. - * + * * Parameters: p Process calling session join. - * uap->port A Mach send right. + * uap->port A Mach send right. * * Returns: *ret_asid Audit session ID of new session. * In the failure case the return value will be -1 * and 'errno' will be set to a non-zero value * described below. * - * Errno: 0 Success - * EINVAL Invalid Mach port name. - * ESRCH Invalid calling process/cred. + * Errno: 0 Success + * EINVAL Invalid Mach port name. + * ESRCH Invalid calling process/cred. */ int audit_session_join(proc_t p, struct audit_session_join_args *uap, @@ -1484,15 +1511,16 @@ audit_session_join(proc_t p, struct audit_session_join_args *uap, mach_port_name_t send = uap->port; int err = 0; - + if (ipc_object_copyin(get_task_ipcspace(p->task), send, - MACH_MSG_TYPE_COPY_SEND, &port) != KERN_SUCCESS) { + MACH_MSG_TYPE_COPY_SEND, &port) != KERN_SUCCESS) { *ret_asid = AU_DEFAUDITSID; err = EINVAL; - } else + } else { err = audit_session_join_internal(p, p->task, port, ret_asid); + } - return (err); + return err; } /* @@ -1505,7 +1533,6 @@ audit_session_join(proc_t p, struct audit_session_join_args *uap, static void audit_sdev_entry_free(struct audit_sdev_entry *ase) { - free(ase->ase_record, M_AUDIT_SDEV_ENTRY); free(ase, M_AUDIT_SDEV_ENTRY); } @@ -1528,7 +1555,7 @@ audit_sdev_append(struct audit_sdev *asdev, void *record, u_int record_len) return; } - ase = malloc(sizeof (*ase), M_AUDIT_SDEV_ENTRY, M_NOWAIT | M_ZERO); + ase = malloc(sizeof(*ase), M_AUDIT_SDEV_ENTRY, M_NOWAIT | M_ZERO); if (NULL == ase) { asdev->asdev_drops++; audit_sdev_drops++; @@ -1551,8 +1578,9 @@ audit_sdev_append(struct audit_sdev *asdev, void *record, u_int record_len) asdev->asdev_qlen++; asdev->asdev_qbyteslen += ase->ase_record_len; selwakeup(&asdev->asdev_selinfo); - if (asdev->asdev_flags & AUDIT_SDEV_ASYNC) + if (asdev->asdev_flags & AUDIT_SDEV_ASYNC) { pgsigio(asdev->asdev_sigio, SIGIO); + } cv_broadcast(&asdev->asdev_cv); } @@ -1570,22 +1598,24 @@ audit_sdev_submit(__unused au_id_t auid, __unused au_asid_t asid, void *record, * Lockless read to avoid lock overhead if sessio devices are not in * use. */ - if (NULL == TAILQ_FIRST(&audit_sdev_list)) + if (NULL == TAILQ_FIRST(&audit_sdev_list)) { return; + } AUDIT_SDEV_LIST_RLOCK(); TAILQ_FOREACH(asdev, &audit_sdev_list, asdev_list) { AUDIT_SDEV_LOCK(asdev); - - /* + + /* * Only append to the sdev queue if the AUID and ASID match that * of the process that opened this session device or if the * ALLSESSIONS flag is set. */ if ((/* XXXss auid == asdev->asdev_auid && */ - asid == asdev->asdev_asid) || - (asdev->asdev_flags & AUDIT_SDEV_ALLSESSIONS) != 0) + asid == asdev->asdev_asid) || + (asdev->asdev_flags & AUDIT_SDEV_ALLSESSIONS) != 0) { audit_sdev_append(asdev, record, record_len); + } AUDIT_SDEV_UNLOCK(asdev); } AUDIT_SDEV_LIST_RUNLOCK(); @@ -1605,9 +1635,10 @@ audit_sdev_alloc(void) AUDIT_SDEV_LIST_WLOCK_ASSERT(); - asdev = malloc(sizeof (*asdev), M_AUDIT_SDEV, M_WAITOK | M_ZERO); - if (NULL == asdev) - return (NULL); + asdev = malloc(sizeof(*asdev), M_AUDIT_SDEV, M_WAITOK | M_ZERO); + if (NULL == asdev) { + return NULL; + } asdev->asdev_qlimit = AUDIT_SDEV_QLIMIT_DEFAULT; TAILQ_INIT(&asdev->asdev_queue); @@ -1622,7 +1653,7 @@ audit_sdev_alloc(void) audit_sdev_count++; audit_sdev_ever++; - return (asdev); + return asdev; } /* @@ -1654,7 +1685,6 @@ audit_sdev_flush(struct audit_sdev *asdev) static void audit_sdev_free(struct audit_sdev *asdev) { - AUDIT_SDEV_LIST_WLOCK_ASSERT(); AUDIT_SDEV_LOCK_ASSERT(asdev); @@ -1683,26 +1713,28 @@ audit_sdev_get_aia(proc_t p, struct auditinfo_addr *aia_p) scred = kauth_cred_proc_ref(p); error = suser(scred, &p->p_acflag); - if (NULL != aia_p) - bcopy(scred->cr_audit.as_aia_p, aia_p, sizeof (*aia_p)); + if (NULL != aia_p) { + bcopy(scred->cr_audit.as_aia_p, aia_p, sizeof(*aia_p)); + } kauth_cred_unref(&scred); - return (error); + return error; } /* * Audit session dev open method. */ static int -audit_sdev_open(dev_t dev, __unused int flags, __unused int devtype, proc_t p) +audit_sdev_open(dev_t dev, __unused int flags, __unused int devtype, proc_t p) { struct audit_sdev *asdev; struct auditinfo_addr aia; int u; u = minor(dev); - if (u < 0 || u >= MAX_AUDIT_SDEVS) - return (ENXIO); + if (u < 0 || u >= MAX_AUDIT_SDEVS) { + return ENXIO; + } (void) audit_sdev_get_aia(p, &aia); @@ -1712,22 +1744,22 @@ audit_sdev_open(dev_t dev, __unused int flags, __unused int devtype, proc_t p) asdev = audit_sdev_alloc(); if (NULL == asdev) { AUDIT_SDEV_LIST_WUNLOCK(); - return (ENOMEM); + return ENOMEM; } audit_sdev_dtab[u] = asdev; } else { KASSERT(asdev->asdev_open, ("audit_sdev_open: Already open")); AUDIT_SDEV_LIST_WUNLOCK(); - return (EBUSY); + return EBUSY; } asdev->asdev_open = 1; asdev->asdev_auid = aia.ai_auid; asdev->asdev_asid = aia.ai_asid; - asdev->asdev_flags = 0; + asdev->asdev_flags = 0; AUDIT_SDEV_LIST_WUNLOCK(); - return (0); + return 0; } /* @@ -1753,7 +1785,7 @@ audit_sdev_close(dev_t dev, __unused int flags, __unused int devtype, audit_sdev_dtab[u] = NULL; AUDIT_SDEV_LIST_WUNLOCK(); - return (0); + return 0; } /* @@ -1774,10 +1806,11 @@ audit_sdev_ioctl(dev_t dev, u_long cmd, caddr_t data, switch (cmd) { case FIONBIO: AUDIT_SDEV_LOCK(asdev); - if (*(int *)data) + if (*(int *)data) { asdev->asdev_flags |= AUDIT_SDEV_NBIO; - else + } else { asdev->asdev_flags &= ~AUDIT_SDEV_NBIO; + } AUDIT_SDEV_UNLOCK(asdev); break; @@ -1799,8 +1832,9 @@ audit_sdev_ioctl(dev_t dev, u_long cmd, caddr_t data, if (*(u_int *)data >= AUDIT_SDEV_QLIMIT_MIN || *(u_int *)data <= AUDIT_SDEV_QLIMIT_MAX) { asdev->asdev_qlimit = *(u_int *)data; - } else + } else { error = EINVAL; + } break; case AUDITSDEV_GET_QLIMIT_MIN: @@ -1812,8 +1846,9 @@ audit_sdev_ioctl(dev_t dev, u_long cmd, caddr_t data, break; case AUDITSDEV_FLUSH: - if (AUDIT_SDEV_SX_XLOCK_SIG(asdev) != 0) - return (EINTR); + if (AUDIT_SDEV_SX_XLOCK_SIG(asdev) != 0) { + return EINTR; + } AUDIT_SDEV_LOCK(asdev); audit_sdev_flush(asdev); AUDIT_SDEV_UNLOCK(asdev); @@ -1839,22 +1874,25 @@ audit_sdev_ioctl(dev_t dev, u_long cmd, caddr_t data, case AUDITSDEV_GET_ALLSESSIONS: error = audit_sdev_get_aia(p, NULL); - if (error) + if (error) { break; + } *(u_int *)data = (asdev->asdev_flags & AUDIT_SDEV_ALLSESSIONS) ? 1 : 0; break; case AUDITSDEV_SET_ALLSESSIONS: error = audit_sdev_get_aia(p, NULL); - if (error) + if (error) { break; + } AUDIT_SDEV_LOCK(asdev); - if (*(int *)data) + if (*(int *)data) { asdev->asdev_flags |= AUDIT_SDEV_ALLSESSIONS; - else + } else { asdev->asdev_flags &= ~AUDIT_SDEV_ALLSESSIONS; + } AUDIT_SDEV_UNLOCK(asdev); break; @@ -1862,11 +1900,11 @@ audit_sdev_ioctl(dev_t dev, u_long cmd, caddr_t data, error = ENOTTY; } - return (error); + return error; } /* - * Audit session dev read method. + * Audit session dev read method. */ static int audit_sdev_read(dev_t dev, struct uio *uio, __unused int flag) @@ -1883,20 +1921,21 @@ audit_sdev_read(dev_t dev, struct uio *uio, __unused int flag) * We hold a sleep lock over read and flush because we rely on the * stability of a record in the queue during uiomove. */ - if (0 != AUDIT_SDEV_SX_XLOCK_SIG(asdev)) - return (EINTR); + if (0 != AUDIT_SDEV_SX_XLOCK_SIG(asdev)) { + return EINTR; + } AUDIT_SDEV_LOCK(asdev); while (TAILQ_EMPTY(&asdev->asdev_queue)) { if (asdev->asdev_flags & AUDIT_SDEV_NBIO) { AUDIT_SDEV_UNLOCK(asdev); AUDIT_SDEV_SX_XUNLOCK(asdev); - return (EAGAIN); + return EAGAIN; } error = cv_wait_sig(&asdev->asdev_cv, AUDIT_SDEV_MTX(asdev)); if (error) { AUDIT_SDEV_UNLOCK(asdev); AUDIT_SDEV_SX_XUNLOCK(asdev); - return (error); + return error; } } @@ -1921,7 +1960,7 @@ audit_sdev_read(dev_t dev, struct uio *uio, __unused int flag) toread, uio); if (error) { AUDIT_SDEV_SX_XUNLOCK(asdev); - return (error); + return error; } /* @@ -1933,18 +1972,18 @@ audit_sdev_read(dev_t dev, struct uio *uio, __unused int flag) ("audit_sdev_read: queue out of sync after uiomove")); asdev->asdev_qoffset += toread; KASSERT(ase->ase_record_len >= asdev->asdev_qoffset, - ("audit_sdev_read: record_len >= qoffset (2)")); - if (asdev->asdev_qoffset == ase->ase_record_len) { - TAILQ_REMOVE(&asdev->asdev_queue, ase, ase_queue); - asdev->asdev_qbyteslen -= ase->ase_record_len; - audit_sdev_entry_free(ase); - asdev->asdev_qlen--; - asdev->asdev_qoffset = 0; - } + ("audit_sdev_read: record_len >= qoffset (2)")); + if (asdev->asdev_qoffset == ase->ase_record_len) { + TAILQ_REMOVE(&asdev->asdev_queue, ase, ase_queue); + asdev->asdev_qbyteslen -= ase->ase_record_len; + audit_sdev_entry_free(ase); + asdev->asdev_qlen--; + asdev->asdev_qoffset = 0; + } } AUDIT_SDEV_UNLOCK(asdev); AUDIT_SDEV_SX_XUNLOCK(asdev); - return (0); + return 0; } /* @@ -1962,13 +2001,14 @@ audit_sdev_poll(dev_t dev, int events, void *wql, struct proc *p) if (events & (POLLIN | POLLRDNORM)) { AUDIT_SDEV_LOCK(asdev); - if (NULL != TAILQ_FIRST(&asdev->asdev_queue)) + if (NULL != TAILQ_FIRST(&asdev->asdev_queue)) { revents |= events & (POLLIN | POLLRDNORM); - else + } else { selrecord(p, &asdev->asdev_selinfo, wql); + } AUDIT_SDEV_UNLOCK(asdev); } - return (revents); + return revents; } /* @@ -1981,19 +2021,21 @@ audit_sdev_clone(__unused dev_t dev, int action) int i; if (DEVFS_CLONE_ALLOC == action) { - for(i = 0; i < MAX_AUDIT_SDEVS; i++) - if (NULL == audit_sdev_dtab[i]) - return (i); + for (i = 0; i < MAX_AUDIT_SDEVS; i++) { + if (NULL == audit_sdev_dtab[i]) { + return i; + } + } - /* + /* * This really should return -1 here but that seems to * hang things in devfs. We instead return 0 and let * audit_sdev_open tell userland the bad news. */ - return (0); + return 0; } - return (-1); + return -1; } static int @@ -2005,30 +2047,32 @@ audit_sdev_init(void) AUDIT_SDEV_LIST_LOCK_INIT(); audit_sdev_major = cdevsw_add(-1, &audit_sdev_cdevsw); - if (audit_sdev_major < 0) - return (KERN_FAILURE); + if (audit_sdev_major < 0) { + return KERN_FAILURE; + } dev = makedev(audit_sdev_major, 0); devnode = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0644, audit_sdev_clone, AUDIT_SDEV_NAME, 0); - if (NULL == devnode) - return (KERN_FAILURE); + if (NULL == devnode) { + return KERN_FAILURE; + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* XXXss -static int -audit_sdev_shutdown(void) -{ - - devfs_remove(devnode); - (void) cdevsw_remove(audit_sdev_major, &audit_sdev_cdevsw); - - return (KERN_SUCCESS); -} -*/ + * static int + * audit_sdev_shutdown(void) + * { + * + * devfs_remove(devnode); + * (void) cdevsw_remove(audit_sdev_major, &audit_sdev_cdevsw); + * + * return (KERN_SUCCESS); + * } + */ #else @@ -2038,7 +2082,7 @@ audit_session_self(proc_t p, struct audit_session_self_args *uap, { #pragma unused(p, uap, ret_port) - return (ENOSYS); + return ENOSYS; } int @@ -2047,7 +2091,7 @@ audit_session_join(proc_t p, struct audit_session_join_args *uap, { #pragma unused(p, uap, ret_asid) - return (ENOSYS); + return ENOSYS; } int @@ -2055,7 +2099,7 @@ audit_session_port(proc_t p, struct audit_session_port_args *uap, int *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_syscalls.c b/bsd/security/audit/audit_syscalls.c index 191596b5f..0df9209ce 100644 --- a/bsd/security/audit/audit_syscalls.c +++ b/bsd/security/audit/audit_syscalls.c @@ -92,47 +92,47 @@ #if CONFIG_AUDIT -#define IS_NOT_VALID_PID(p) ((p) < 1 || (p) > PID_MAX) +#define IS_NOT_VALID_PID(p) ((p) < 1 || (p) > PID_MAX) #ifdef AUDIT_API_WARNINGS /* * Macro to warn about auditinfo_addr_t/auditpinfo_addr_t changing sizes * to encourage the userland code to be recompiled and updated. */ -#define WARN_IF_AINFO_ADDR_CHANGED(sz1, sz2, scall, tp) do { \ - if ((size_t)(sz1) != (size_t)(sz2)) { \ - char pn[MAXCOMLEN + 1]; \ - \ - proc_selfname(pn, MAXCOMLEN + 1); \ - printf("Size of %s used by %s in %s is different from " \ - "kernel's. Please recompile %s.\n", (tp), \ - (scall), pn, pn); \ - } \ +#define WARN_IF_AINFO_ADDR_CHANGED(sz1, sz2, scall, tp) do { \ + if ((size_t)(sz1) != (size_t)(sz2)) { \ + char pn[MAXCOMLEN + 1]; \ + \ + proc_selfname(pn, MAXCOMLEN + 1); \ + printf("Size of %s used by %s in %s is different from " \ + "kernel's. Please recompile %s.\n", (tp), \ + (scall), pn, pn); \ + } \ } while (0) /* - * Macro to warn about using ASID's outside the range [1 to PID_MAX] to + * Macro to warn about using ASID's outside the range [1 to PID_MAX] to * encourage userland code changes. */ -#define WARN_IF_BAD_ASID(asid, scall) do { \ - if (((asid) < 1 || (asid) > PID_MAX) && \ - (asid) != AU_ASSIGN_ASID) { \ - char pn[MAXCOMLEN + 1]; \ - \ - proc_selfname(pn, MAXCOMLEN + 1); \ - printf("%s in %s is using an ASID (%u) outside the " \ - "range [1 to %d]. Please change %s to use an ASID "\ - "within this range or use AU_ASSIGN_ASID.\n", \ - (scall), pn, (uint32_t)(asid), PID_MAX, pn); \ - } \ +#define WARN_IF_BAD_ASID(asid, scall) do { \ + if (((asid) < 1 || (asid) > PID_MAX) && \ + (asid) != AU_ASSIGN_ASID) { \ + char pn[MAXCOMLEN + 1]; \ + \ + proc_selfname(pn, MAXCOMLEN + 1); \ + printf("%s in %s is using an ASID (%u) outside the " \ + "range [1 to %d]. Please change %s to use an ASID "\ + "within this range or use AU_ASSIGN_ASID.\n", \ + (scall), pn, (uint32_t)(asid), PID_MAX, pn); \ + } \ } while (0) #else /* ! AUDIT_API_WARNINGS */ -#define WARN_IF_AINFO_ADDR_CHANGED(sz1, sz2, scall, tp) do { \ +#define WARN_IF_AINFO_ADDR_CHANGED(sz1, sz2, scall, tp) do { \ } while (0) -#define WARN_IF_BAD_ASID(asid, scall) do { \ +#define WARN_IF_BAD_ASID(asid, scall) do { \ } while (0) #endif /* AUDIT_API_WARNINGS */ @@ -172,7 +172,7 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) mtx_unlock(&audit_mtx); if (IOTaskHasEntitlement(current_task(), - AU_CLASS_RESERVED_ENTITLEMENT)) { + AU_CLASS_RESERVED_ENTITLEMENT)) { /* Entitled tasks are trusted to add appropriate identity info */ add_identity_token = 0; } else { @@ -247,8 +247,8 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) /* Create a new identity token for this buffer */ audit_identity_info_construct(&id_info); id_tok = au_to_identity(id_info.signer_type, id_info.signing_id, - id_info.signing_id_trunc, id_info.team_id, id_info.team_id_trunc, - id_info.cdhash, id_info.cdhash_len); + id_info.signing_id_trunc, id_info.team_id, id_info.team_id_trunc, + id_info.cdhash, id_info.cdhash_len); if (!id_tok) { error = ENOMEM; goto free_out; @@ -271,7 +271,7 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) /* Copy the old trailer */ memcpy(full_rec + bytes_copied, - rec + (uap->length - AUDIT_TRAILER_SIZE), AUDIT_TRAILER_SIZE); + rec + (uap->length - AUDIT_TRAILER_SIZE), AUDIT_TRAILER_SIZE); bytes_copied += AUDIT_TRAILER_SIZE; /* Fix the record size stored in the header token */ @@ -280,7 +280,7 @@ audit(proc_t p, struct audit_args *uap, __unused int32_t *retval) /* Fix the record size stored in the trailer token */ trl = (struct trl_tok_partial*) - (full_rec + bytes_copied - AUDIT_TRAILER_SIZE); + (full_rec + bytes_copied - AUDIT_TRAILER_SIZE); trl->len = htonl(bytes_copied); udata = full_rec; @@ -333,7 +333,7 @@ free_out: free(id_tok, M_AUDITBSM); } - return (error); + return error; } /* @@ -353,13 +353,15 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_system_check_auditon(kauth_cred_get(), uap->cmd); - if (error) - return (error); + if (error) { + return error; + } #endif if ((uap->length <= 0) || (uap->length > - (int)sizeof(union auditon_udata))) - return (EINVAL); + (int)sizeof(union auditon_udata))) { + return EINVAL; + } memset((void *)&udata, 0, sizeof(udata)); @@ -391,8 +393,9 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETCTLMODE: case A_SETEXPAFTER: error = copyin(uap->data, (void *)&udata, uap->length); - if (error) - return (error); + if (error) { + return error; + } AUDIT_ARG(auditon, &udata); AUDIT_ARG(len, uap->length); break; @@ -401,15 +404,15 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) /* Check appropriate privilege. */ switch (uap->cmd) { /* - * A_GETSINFO doesn't require priviledge but only superuser - * gets to see the audit masks. + * A_GETSINFO doesn't require priviledge but only superuser + * gets to see the audit masks. */ case A_GETSINFO_ADDR: if ((sizeof(udata.au_kau_info) != uap->length) || - (audit_session_lookup(udata.au_kau_info.ai_asid, - &udata.au_kau_info) != 0)) + (audit_session_lookup(udata.au_kau_info.ai_asid, + &udata.au_kau_info) != 0)) { error = EINVAL; - else if (!kauth_cred_issuser(kauth_cred_get())) { + } else if (!kauth_cred_issuser(kauth_cred_get())) { udata.au_kau_info.ai_mask.am_success = ~0; udata.au_kau_info.ai_mask.am_failure = ~0; } @@ -424,7 +427,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETCTLMODE: case A_SETEXPAFTER: if (!IOTaskHasEntitlement(current_task(), - AU_CLASS_RESERVED_ENTITLEMENT)) { + AU_CLASS_RESERVED_ENTITLEMENT)) { error = EPERM; } break; @@ -432,8 +435,9 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) error = suser(kauth_cred_get(), &p->p_acflag); break; } - if (error) - return (error); + if (error) { + return error; + } /* * If the audit subsytem is in external control mode, additional @@ -446,13 +450,14 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETPOLICY: case A_SETQCTRL: if (!IOTaskHasEntitlement(current_task(), - AU_CLASS_RESERVED_ENTITLEMENT)) { + AU_CLASS_RESERVED_ENTITLEMENT)) { error = EPERM; } break; } - if (error) - return (error); + if (error) { + return error; + } } /* @@ -464,37 +469,47 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_GETPOLICY: if (sizeof(udata.au_policy64) == uap->length) { mtx_lock(&audit_mtx); - if (!audit_fail_stop) + if (!audit_fail_stop) { udata.au_policy64 |= AUDIT_CNT; - if (audit_panic_on_write_fail) + } + if (audit_panic_on_write_fail) { udata.au_policy64 |= AUDIT_AHLT; - if (audit_argv) + } + if (audit_argv) { udata.au_policy64 |= AUDIT_ARGV; - if (audit_arge) + } + if (audit_arge) { udata.au_policy64 |= AUDIT_ARGE; + } mtx_unlock(&audit_mtx); break; } - if (sizeof(udata.au_policy) != uap->length) - return (EINVAL); + if (sizeof(udata.au_policy) != uap->length) { + return EINVAL; + } mtx_lock(&audit_mtx); - if (!audit_fail_stop) + if (!audit_fail_stop) { udata.au_policy |= AUDIT_CNT; - if (audit_panic_on_write_fail) + } + if (audit_panic_on_write_fail) { udata.au_policy |= AUDIT_AHLT; - if (audit_argv) + } + if (audit_argv) { udata.au_policy |= AUDIT_ARGV; - if (audit_arge) + } + if (audit_arge) { udata.au_policy |= AUDIT_ARGE; + } mtx_unlock(&audit_mtx); break; case A_OLDSETPOLICY: case A_SETPOLICY: if (sizeof(udata.au_policy64) == uap->length) { - if (udata.au_policy64 & ~(AUDIT_CNT|AUDIT_AHLT| - AUDIT_ARGV|AUDIT_ARGE)) - return (EINVAL); + if (udata.au_policy64 & ~(AUDIT_CNT | AUDIT_AHLT | + AUDIT_ARGV | AUDIT_ARGE)) { + return EINVAL; + } mtx_lock(&audit_mtx); audit_fail_stop = ((udata.au_policy64 & AUDIT_CNT) == 0); @@ -504,11 +519,12 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) audit_arge = (udata.au_policy64 & AUDIT_ARGE); mtx_unlock(&audit_mtx); break; - } + } if ((sizeof(udata.au_policy) != uap->length) || - (udata.au_policy & ~(AUDIT_CNT|AUDIT_AHLT|AUDIT_ARGV| - AUDIT_ARGE))) - return (EINVAL); + (udata.au_policy & ~(AUDIT_CNT | AUDIT_AHLT | AUDIT_ARGV | + AUDIT_ARGE))) { + return EINVAL; + } /* * XXX - Need to wake up waiters if the policy relaxes? */ @@ -521,16 +537,18 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) break; case A_GETKMASK: - if (sizeof(udata.au_mask) != uap->length) - return (EINVAL); + if (sizeof(udata.au_mask) != uap->length) { + return EINVAL; + } mtx_lock(&audit_mtx); udata.au_mask = audit_nae_mask; mtx_unlock(&audit_mtx); break; case A_SETKMASK: - if (sizeof(udata.au_mask) != uap->length) - return (EINVAL); + if (sizeof(udata.au_mask) != uap->length) { + return EINVAL; + } mtx_lock(&audit_mtx); audit_nae_mask = udata.au_mask; AUDIT_CHECK_IF_KEVENTS_MASK(audit_nae_mask); @@ -549,13 +567,14 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) (u_int64_t)audit_qctrl.aq_bufsz; udata.au_qctrl64.aq64_delay = (u_int64_t)audit_qctrl.aq_delay; - udata.au_qctrl64.aq64_minfree = + udata.au_qctrl64.aq64_minfree = (int64_t)audit_qctrl.aq_minfree; mtx_unlock(&audit_mtx); break; - } - if (sizeof(udata.au_qctrl) != uap->length) - return (EINVAL); + } + if (sizeof(udata.au_qctrl) != uap->length) { + return EINVAL; + } mtx_lock(&audit_mtx); udata.au_qctrl = audit_qctrl; mtx_unlock(&audit_mtx); @@ -564,21 +583,22 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_OLDSETQCTRL: case A_SETQCTRL: if (sizeof(udata.au_qctrl64) == uap->length) { - if ((udata.au_qctrl64.aq64_hiwater > AQ_MAXHIGH) || - (udata.au_qctrl64.aq64_lowater >= - udata.au_qctrl64.aq64_hiwater) || - (udata.au_qctrl64.aq64_bufsz > AQ_MAXBUFSZ) || - (udata.au_qctrl64.aq64_minfree < 0) || - (udata.au_qctrl64.aq64_minfree > 100)) - return (EINVAL); + if ((udata.au_qctrl64.aq64_hiwater > AQ_MAXHIGH) || + (udata.au_qctrl64.aq64_lowater >= + udata.au_qctrl64.aq64_hiwater) || + (udata.au_qctrl64.aq64_bufsz > AQ_MAXBUFSZ) || + (udata.au_qctrl64.aq64_minfree < 0) || + (udata.au_qctrl64.aq64_minfree > 100)) { + return EINVAL; + } mtx_lock(&audit_mtx); audit_qctrl.aq_hiwater = - (int)udata.au_qctrl64.aq64_hiwater; + (int)udata.au_qctrl64.aq64_hiwater; audit_qctrl.aq_lowater = - (int)udata.au_qctrl64.aq64_lowater; + (int)udata.au_qctrl64.aq64_lowater; audit_qctrl.aq_bufsz = - (int)udata.au_qctrl64.aq64_bufsz; - audit_qctrl.aq_minfree = + (int)udata.au_qctrl64.aq64_bufsz; + audit_qctrl.aq_minfree = (int)udata.au_qctrl64.aq64_minfree; audit_qctrl.aq_delay = -1; /* Not used. */ mtx_unlock(&audit_mtx); @@ -589,8 +609,9 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) (udata.au_qctrl.aq_lowater >= udata.au_qctrl.aq_hiwater) || (udata.au_qctrl.aq_bufsz > AQ_MAXBUFSZ) || (udata.au_qctrl.aq_minfree < 0) || - (udata.au_qctrl.aq_minfree > 100)) - return (EINVAL); + (udata.au_qctrl.aq_minfree > 100)) { + return EINVAL; + } mtx_lock(&audit_mtx); audit_qctrl = udata.au_qctrl; @@ -600,41 +621,44 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) break; case A_GETCWD: - return (ENOSYS); + return ENOSYS; case A_GETCAR: - return (ENOSYS); + return ENOSYS; case A_GETSTAT: - return (ENOSYS); + return ENOSYS; case A_SETSTAT: - return (ENOSYS); + return ENOSYS; case A_SETUMASK: - return (ENOSYS); + return ENOSYS; case A_SETSMASK: - return (ENOSYS); + return ENOSYS; case A_OLDGETCOND: case A_GETCOND: if (sizeof(udata.au_cond64) == uap->length) { mtx_lock(&audit_mtx); - if (audit_enabled && !audit_suspended) + if (audit_enabled && !audit_suspended) { udata.au_cond64 = AUC_AUDITING; - else + } else { udata.au_cond64 = AUC_NOAUDIT; + } mtx_unlock(&audit_mtx); break; } - if (sizeof(udata.au_cond) != uap->length) - return (EINVAL); + if (sizeof(udata.au_cond) != uap->length) { + return EINVAL; + } mtx_lock(&audit_mtx); - if (audit_enabled && !audit_suspended) + if (audit_enabled && !audit_suspended) { udata.au_cond = AUC_AUDITING; - else + } else { udata.au_cond = AUC_NOAUDIT; + } mtx_unlock(&audit_mtx); break; @@ -642,10 +666,12 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETCOND: if (sizeof(udata.au_cond64) == uap->length) { mtx_lock(&audit_mtx); - if (udata.au_cond64 == AUC_NOAUDIT) + if (udata.au_cond64 == AUC_NOAUDIT) { audit_suspended = 1; - if (udata.au_cond64 == AUC_AUDITING) + } + if (udata.au_cond64 == AUC_AUDITING) { audit_suspended = 0; + } if (udata.au_cond64 == AUC_DISABLED) { audit_suspended = 1; mtx_unlock(&audit_mtx); @@ -656,13 +682,15 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) break; } if (sizeof(udata.au_cond) != uap->length) { - return (EINVAL); + return EINVAL; } mtx_lock(&audit_mtx); - if (udata.au_cond == AUC_NOAUDIT) + if (udata.au_cond == AUC_NOAUDIT) { audit_suspended = 1; - if (udata.au_cond == AUC_AUDITING) + } + if (udata.au_cond == AUC_AUDITING) { audit_suspended = 0; + } if (udata.au_cond == AUC_DISABLED) { audit_suspended = 1; mtx_unlock(&audit_mtx); @@ -673,33 +701,37 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) break; case A_GETCLASS: - if (sizeof(udata.au_evclass) != uap->length) - return (EINVAL); + if (sizeof(udata.au_evclass) != uap->length) { + return EINVAL; + } udata.au_evclass.ec_class = au_event_class( - udata.au_evclass.ec_number); + udata.au_evclass.ec_number); break; case A_SETCLASS: - if (sizeof(udata.au_evclass) != uap->length) - return (EINVAL); + if (sizeof(udata.au_evclass) != uap->length) { + return EINVAL; + } au_evclassmap_insert(udata.au_evclass.ec_number, udata.au_evclass.ec_class); break; case A_GETPINFO: if ((sizeof(udata.au_aupinfo) != uap->length) || - IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid)) - return (EINVAL); - if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL) - return (ESRCH); + IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid)) { + return EINVAL; + } + if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL) { + return ESRCH; + } scred = kauth_cred_proc_ref(tp); if (scred->cr_audit.as_aia_p->ai_termid.at_type == AU_IPv6) { kauth_cred_unref(&scred); proc_rele(tp); - return (EINVAL); + return EINVAL; } - + udata.au_aupinfo.ap_auid = scred->cr_audit.as_aia_p->ai_auid; udata.au_aupinfo.ap_mask.am_success = @@ -719,10 +751,12 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETPMASK: if ((sizeof(udata.au_aupinfo) != uap->length) || - IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid)) - return (EINVAL); - if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL) - return (ESRCH); + IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid)) { + return EINVAL; + } + if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL) { + return ESRCH; + } scred = kauth_cred_proc_ref(tp); bcopy(scred->cr_audit.as_aia_p, &aia, sizeof(aia)); kauth_cred_unref(&scred); @@ -734,23 +768,26 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) error = audit_session_setaia(tp, &aia); proc_rele(tp); tp = PROC_NULL; - if (error) - return (error); + if (error) { + return error; + } break; case A_SETFSIZE: if ((sizeof(udata.au_fstat) != uap->length) || ((udata.au_fstat.af_filesz != 0) && - (udata.au_fstat.af_filesz < MIN_AUDIT_FILE_SIZE))) - return (EINVAL); + (udata.au_fstat.af_filesz < MIN_AUDIT_FILE_SIZE))) { + return EINVAL; + } mtx_lock(&audit_mtx); audit_fstat.af_filesz = udata.au_fstat.af_filesz; mtx_unlock(&audit_mtx); break; case A_GETFSIZE: - if (sizeof(udata.au_fstat) != uap->length) - return (EINVAL); + if (sizeof(udata.au_fstat) != uap->length) { + return EINVAL; + } mtx_lock(&audit_mtx); udata.au_fstat.af_filesz = audit_fstat.af_filesz; udata.au_fstat.af_currsz = audit_fstat.af_currsz; @@ -759,10 +796,12 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_GETPINFO_ADDR: if ((sizeof(udata.au_aupinfo_addr) != uap->length) || - IS_NOT_VALID_PID(udata.au_aupinfo_addr.ap_pid)) - return (EINVAL); - if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL) - return (ESRCH); + IS_NOT_VALID_PID(udata.au_aupinfo_addr.ap_pid)) { + return EINVAL; + } + if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL) { + return ESRCH; + } WARN_IF_AINFO_ADDR_CHANGED(uap->length, sizeof(auditpinfo_addr_t), "auditon(A_GETPINFO_ADDR,...)", "auditpinfo_addr_t"); @@ -775,7 +814,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) scred->cr_audit.as_mask.am_success; udata.au_aupinfo_addr.ap_mask.am_failure = scred->cr_audit.as_mask.am_failure; - bcopy(&scred->cr_audit.as_aia_p->ai_termid, + bcopy(&scred->cr_audit.as_aia_p->ai_termid, &udata.au_aupinfo_addr.ap_termid, sizeof(au_tid_addr_t)); udata.au_aupinfo_addr.ap_flags = @@ -786,50 +825,56 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) break; case A_GETKAUDIT: - if (sizeof(udata.au_kau_info) != uap->length) - return (EINVAL); + if (sizeof(udata.au_kau_info) != uap->length) { + return EINVAL; + } audit_get_kinfo(&udata.au_kau_info); break; case A_SETKAUDIT: if ((sizeof(udata.au_kau_info) != uap->length) || (udata.au_kau_info.ai_termid.at_type != AU_IPv4 && - udata.au_kau_info.ai_termid.at_type != AU_IPv6)) - return (EINVAL); + udata.au_kau_info.ai_termid.at_type != AU_IPv6)) { + return EINVAL; + } audit_set_kinfo(&udata.au_kau_info); break; case A_SENDTRIGGER: - if ((sizeof(udata.au_trigger) != uap->length) || + if ((sizeof(udata.au_trigger) != uap->length) || (udata.au_trigger < AUDIT_TRIGGER_MIN) || - (udata.au_trigger > AUDIT_TRIGGER_MAX)) - return (EINVAL); - return (audit_send_trigger(udata.au_trigger)); + (udata.au_trigger > AUDIT_TRIGGER_MAX)) { + return EINVAL; + } + return audit_send_trigger(udata.au_trigger); case A_GETSINFO_ADDR: /* Handled above before switch(). */ break; case A_GETSFLAGS: - if (sizeof(udata.au_flags) != uap->length) - return (EINVAL); + if (sizeof(udata.au_flags) != uap->length) { + return EINVAL; + } bcopy(&(kauth_cred_get()->cr_audit.as_aia_p->ai_flags), &udata.au_flags, sizeof(udata.au_flags)); break; case A_SETSFLAGS: - if (sizeof(udata.au_flags) != uap->length) - return (EINVAL); + if (sizeof(udata.au_flags) != uap->length) { + return EINVAL; + } bcopy(kauth_cred_get()->cr_audit.as_aia_p, &aia, sizeof(aia)); aia.ai_flags = udata.au_flags; error = audit_session_setaia(p, &aia); - if (error) - return (error); + if (error) { + return error; + } break; case A_GETCTLMODE: if (sizeof(udata.au_ctl_mode) != uap->length) { - return (EINVAL); + return EINVAL; } mtx_lock(&audit_mtx); udata.au_ctl_mode = audit_ctl_mode; @@ -838,7 +883,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETCTLMODE: if (sizeof(udata.au_ctl_mode) != uap->length) { - return (EINVAL); + return EINVAL; } mtx_lock(&audit_mtx); @@ -849,7 +894,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) audit_ctl_mode = AUDIT_CTLMODE_EXTERNAL; } else { mtx_unlock(&audit_mtx); - return (EINVAL); + return EINVAL; } mtx_unlock(&audit_mtx); @@ -857,7 +902,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_GETEXPAFTER: if (sizeof(udata.au_expire_after) != uap->length) { - return (EINVAL); + return EINVAL; } mtx_lock(&audit_mtx); udata.au_expire_after.age = audit_expire_after.age; @@ -868,7 +913,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_SETEXPAFTER: if (sizeof(udata.au_expire_after) != uap->length) { - return (EINVAL); + return EINVAL; } mtx_lock(&audit_mtx); audit_expire_after.age = udata.au_expire_after.age; @@ -878,7 +923,7 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) break; default: - return (EINVAL); + return EINVAL; } /* @@ -905,12 +950,13 @@ auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval) case A_GETCTLMODE: case A_GETEXPAFTER: error = copyout((void *)&udata, uap->data, uap->length); - if (error) - return (ENOSYS); + if (error) { + return ENOSYS; + } break; } - return (0); + return 0; } /* @@ -926,18 +972,20 @@ getauid(proc_t p, struct getauid_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_proc_check_getauid(p); - if (error) - return (error); + if (error) { + return error; + } #endif scred = kauth_cred_proc_ref(p); id = scred->cr_audit.as_aia_p->ai_auid; kauth_cred_unref(&scred); error = copyout((void *)&id, uap->auid, sizeof(id)); - if (error) - return (error); + if (error) { + return error; + } - return (0); + return 0; } /* ARGSUSED */ @@ -945,26 +993,28 @@ int setauid(proc_t p, struct setauid_args *uap, __unused int32_t *retval) { int error; - au_id_t id; + au_id_t id; kauth_cred_t scred; struct auditinfo_addr aia; error = copyin(uap->auid, &id, sizeof(id)); - if (error) - return (error); + if (error) { + return error; + } AUDIT_ARG(auid, id); #if CONFIG_MACF error = mac_proc_check_setauid(p, id); - if (error) - return (error); + if (error) { + return error; + } #endif scred = kauth_cred_proc_ref(p); error = suser(scred, &p->p_acflag); if (error) { kauth_cred_unref(&scred); - return (error); + return error; } bcopy(scred->cr_audit.as_aia_p, &aia, sizeof(aia)); @@ -976,7 +1026,7 @@ setauid(proc_t p, struct setauid_args *uap, __unused int32_t *retval) aia.ai_auid = id; error = audit_session_setaia(p, &aia); - return (error); + return error; } static int @@ -986,7 +1036,7 @@ getaudit_addr_internal(proc_t p, user_addr_t user_addr, size_t length) auditinfo_addr_t aia; scred = kauth_cred_proc_ref(p); - bcopy(scred->cr_audit.as_aia_p, &aia, sizeof (auditinfo_addr_t)); + bcopy(scred->cr_audit.as_aia_p, &aia, sizeof(auditinfo_addr_t)); /* * Only superuser gets to see the real mask. */ @@ -996,7 +1046,7 @@ getaudit_addr_internal(proc_t p, user_addr_t user_addr, size_t length) } kauth_cred_unref(&scred); - return (copyout(&aia, user_addr, min(sizeof(aia), length))); + return copyout(&aia, user_addr, min(sizeof(aia), length)); } /* ARGSUSED */ @@ -1007,13 +1057,14 @@ getaudit_addr(proc_t p, struct getaudit_addr_args *uap, #if CONFIG_MACF int error = mac_proc_check_getaudit(p); - if (error) - return (error); + if (error) { + return error; + } #endif /* CONFIG_MACF */ WARN_IF_AINFO_ADDR_CHANGED(uap->length, sizeof(auditinfo_addr_t), "getaudit_addr(2)", "auditinfo_addr_t"); - - return (getaudit_addr_internal(p, uap->auditinfo_addr, uap->length)); + + return getaudit_addr_internal(p, uap->auditinfo_addr, uap->length); } /* ARGSUSED */ @@ -1026,29 +1077,33 @@ setaudit_addr(proc_t p, struct setaudit_addr_args *uap, int error; bzero(&aia, sizeof(auditinfo_addr_t)); - error = copyin(uap->auditinfo_addr, &aia, + error = copyin(uap->auditinfo_addr, &aia, min(sizeof(aia), uap->length)); - if (error) - return (error); + if (error) { + return error; + } AUDIT_ARG(auditinfo_addr, &aia); if (aia.ai_termid.at_type != AU_IPv6 && - aia.ai_termid.at_type != AU_IPv4) - return (EINVAL); - if (aia.ai_asid != AU_ASSIGN_ASID && - (uint32_t)aia.ai_asid > ASSIGNED_ASID_MAX) - return (EINVAL); + aia.ai_termid.at_type != AU_IPv4) { + return EINVAL; + } + if (aia.ai_asid != AU_ASSIGN_ASID && + (uint32_t)aia.ai_asid > ASSIGNED_ASID_MAX) { + return EINVAL; + } #if CONFIG_MACF error = mac_proc_check_setaudit(p, &aia); - if (error) - return (error); + if (error) { + return error; + } #endif scred = kauth_cred_proc_ref(p); error = suser(scred, &p->p_acflag); if (error) { kauth_cred_unref(&scred); - return (error); + return error; } WARN_IF_AINFO_ADDR_CHANGED(uap->length, sizeof(auditinfo_addr_t), @@ -1057,22 +1112,25 @@ setaudit_addr(proc_t p, struct setaudit_addr_args *uap, kauth_cred_unref(&scred); AUDIT_CHECK_IF_KEVENTS_MASK(aia.ai_mask); - if (aia.ai_asid == AU_DEFAUDITSID) + if (aia.ai_asid == AU_DEFAUDITSID) { aia.ai_asid = AU_ASSIGN_ASID; + } error = audit_session_setaia(p, &aia); - if (error) - return (error); + if (error) { + return error; + } /* * If asked to assign an ASID then let the user know what the ASID is * by copying the auditinfo_addr struct back out. */ - if (aia.ai_asid == AU_ASSIGN_ASID) + if (aia.ai_asid == AU_ASSIGN_ASID) { error = getaudit_addr_internal(p, uap->auditinfo_addr, uap->length); + } - return (error); + return error; } /* @@ -1090,8 +1148,9 @@ auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval) au_ctlmode_t ctlmode; error = suser(kauth_cred_get(), &p->p_acflag); - if (error) - return (error); + if (error) { + return error; + } ctlmode = audit_ctl_mode; @@ -1099,8 +1158,8 @@ auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval) * Do not allow setting of a path when auditing is in reserved mode */ if (ctlmode == AUDIT_CTLMODE_EXTERNAL && - !IOTaskHasEntitlement(current_task(), AU_AUDITCTL_RESERVED_ENTITLEMENT)) { - return (EPERM); + !IOTaskHasEntitlement(current_task(), AU_AUDITCTL_RESERVED_ENTITLEMENT)) { + return EPERM; } vp = NULL; @@ -1114,15 +1173,17 @@ auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval) * XXX Changes API slightly. NULL path no longer disables audit but * returns EINVAL. */ - if (uap->path == USER_ADDR_NULL) - return (EINVAL); + if (uap->path == USER_ADDR_NULL) { + return EINVAL; + } NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF | AUDITVNPATH1, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), uap->path, vfs_context_current()); error = vn_open(&nd, AUDIT_OPEN_FLAGS, 0); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; #if CONFIG_MACF /* @@ -1137,13 +1198,13 @@ auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval) if (error) { vn_close(vp, AUDIT_CLOSE_FLAGS, vfs_context_current()); vnode_put(vp); - return (error); + return error; } #endif if (vp->v_type != VREG) { vn_close(vp, AUDIT_CLOSE_FLAGS, vfs_context_current()); vnode_put(vp); - return (EINVAL); + return EINVAL; } mtx_lock(&audit_mtx); /* @@ -1161,7 +1222,7 @@ auditctl(proc_t p, struct auditctl_args *uap, __unused int32_t *retval) audit_rotate_vnode(cred, vp); vnode_put(vp); - return (error); + return error; } #else /* !CONFIG_AUDIT */ @@ -1171,7 +1232,7 @@ audit(proc_t p, struct audit_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } int @@ -1179,7 +1240,7 @@ auditon(proc_t p, struct auditon_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } int @@ -1187,7 +1248,7 @@ getauid(proc_t p, struct getauid_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } int @@ -1195,7 +1256,7 @@ setauid(proc_t p, struct setauid_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } int @@ -1203,7 +1264,7 @@ getaudit_addr(proc_t p, struct getaudit_addr_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } int @@ -1211,7 +1272,7 @@ setaudit_addr(proc_t p, struct setaudit_addr_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } int @@ -1219,7 +1280,7 @@ auditctl(proc_t p, struct auditctl_args *uap, int32_t *retval) { #pragma unused(p, uap, retval) - return (ENOSYS); + return ENOSYS; } #endif /* CONFIG_AUDIT */ diff --git a/bsd/security/audit/audit_worker.c b/bsd/security/audit/audit_worker.c index 85b5c8241..1b5ab82c3 100644 --- a/bsd/security/audit/audit_worker.c +++ b/bsd/security/audit/audit_worker.c @@ -90,29 +90,29 @@ static thread_t audit_thread; * cleared when the next rotation takes place. It is also protected by * audit_worker_sl. */ -static int audit_file_rotate_wait; -static struct slck audit_worker_sl; -static struct vfs_context audit_ctx; -static struct vnode *audit_vp; - -#define AUDIT_WORKER_SX_INIT() slck_init(&audit_worker_sl, \ - "audit_worker_sl") -#define AUDIT_WORKER_SX_XLOCK() slck_lock(&audit_worker_sl) -#define AUDIT_WORKER_SX_XUNLOCK() slck_unlock(&audit_worker_sl) -#define AUDIT_WORKER_SX_ASSERT() slck_assert(&audit_worker_sl, SL_OWNED) -#define AUDIT_WORKER_SX_DESTROY() slck_destroy(&audit_worker_sl) +static int audit_file_rotate_wait; +static struct slck audit_worker_sl; +static struct vfs_context audit_ctx; +static struct vnode *audit_vp; + +#define AUDIT_WORKER_SX_INIT() slck_init(&audit_worker_sl, \ + "audit_worker_sl") +#define AUDIT_WORKER_SX_XLOCK() slck_lock(&audit_worker_sl) +#define AUDIT_WORKER_SX_XUNLOCK() slck_unlock(&audit_worker_sl) +#define AUDIT_WORKER_SX_ASSERT() slck_assert(&audit_worker_sl, SL_OWNED) +#define AUDIT_WORKER_SX_DESTROY() slck_destroy(&audit_worker_sl) /* * The audit_q_draining flag is set when audit is disabled and the audit * worker queue is being drained. */ -static int audit_q_draining; +static int audit_q_draining; /* * The special kernel audit record, audit_drain_kar, is used to mark the end of * the queue when draining it. */ -static struct kaudit_record audit_drain_kar = { +static struct kaudit_record audit_drain_kar = { .k_ar = { .ar_event = AUE_NULL, }, @@ -140,13 +140,15 @@ audit_record_write(struct vnode *vp, struct vfs_context *ctx, void *data, uint64_t temp; off_t file_size; - AUDIT_WORKER_SX_ASSERT(); /* audit_file_rotate_wait. */ + AUDIT_WORKER_SX_ASSERT(); /* audit_file_rotate_wait. */ - if (vp == NULL) + if (vp == NULL) { return; + } - if (vnode_getwithref(vp)) + if (vnode_getwithref(vp)) { return /*(ENOENT)*/; + } mnt_stat = &vp->v_mount->mnt_vfsstat; @@ -156,11 +158,13 @@ audit_record_write(struct vnode *vp, struct vfs_context *ctx, void *data, * operations to indicate a future inability to write to the file. */ error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT); - if (error) + if (error) { goto fail; + } error = vnode_size(vp, &file_size, ctx); - if (error) + if (error) { goto fail; + } audit_fstat.af_currsz = (u_quad_t)file_size; /* @@ -203,9 +207,10 @@ audit_record_write(struct vnode *vp, struct vfs_context *ctx, void *data, temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree); if (mnt_stat->f_bfree < temp && ppsratecheck(&last_lowspace_trigger, - &cur_lowspace_trigger, 1)) - (void)audit_send_trigger( - AUDIT_TRIGGER_LOW_SPACE); + &cur_lowspace_trigger, 1)) { + (void)audit_send_trigger( + AUDIT_TRIGGER_LOW_SPACE); + } } /* @@ -236,10 +241,11 @@ audit_record_write(struct vnode *vp, struct vfs_context *ctx, void *data, if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) * MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >= (unsigned long)(mnt_stat->f_bfree)) { - if (ppsratecheck(&last_fail, &cur_fail, 1)) + if (ppsratecheck(&last_fail, &cur_fail, 1)) { printf("audit_record_write: free space " "below size of audit queue, failing " "stop\n"); + } audit_in_failure = 1; } else if (audit_in_failure) { /* @@ -251,12 +257,13 @@ audit_record_write(struct vnode *vp, struct vfs_context *ctx, void *data, } error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE, - IO_APPEND|IO_UNIT, vfs_context_ucred(ctx), NULL, + IO_APPEND | IO_UNIT, vfs_context_ucred(ctx), NULL, vfs_context_proc(ctx)); - if (error == ENOSPC) + if (error == ENOSPC) { goto fail_enospc; - else if (error) + } else if (error) { goto fail; + } /* * Catch completion of a queue drain here; if we're draining and the @@ -299,8 +306,9 @@ fail: if (audit_panic_on_write_fail) { (void)VNOP_FSYNC(vp, MNT_WAIT, ctx); panic("audit_worker: write error %d\n", error); - } else if (ppsratecheck(&last_fail, &cur_fail, 1)) + } else if (ppsratecheck(&last_fail, &cur_fail, 1)) { printf("audit_worker: write error %d\n", error); + } vnode_put(vp); } @@ -331,8 +339,9 @@ audit_worker_process_record(struct kaudit_record *ar) (ar->k_ar_commit & AR_PRESELECT_TRAIL)) { AUDIT_WORKER_SX_XLOCK(); trail_locked = 1; - } else + } else { trail_locked = 0; + } /* * First, handle the user record, if any: commit to the system trail @@ -346,22 +355,25 @@ audit_worker_process_record(struct kaudit_record *ar) } if ((ar->k_ar_commit & AR_COMMIT_USER) && - (ar->k_ar_commit & AR_PRESELECT_USER_PIPE)) + (ar->k_ar_commit & AR_PRESELECT_USER_PIPE)) { audit_pipe_submit_user(ar->k_udata, ar->k_ulen); + } if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) || ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 && (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0 && - (ar->k_ar_commit & AR_PRESELECT_FILTER) == 0)) + (ar->k_ar_commit & AR_PRESELECT_FILTER) == 0)) { goto out; + } auid = ar->k_ar.ar_subj_auid; event = ar->k_ar.ar_event; class = au_event_class(event); - if (ar->k_ar.ar_errno == 0) + if (ar->k_ar.ar_errno == 0) { sorf = AU_PRS_SUCCESS; - else + } else { sorf = AU_PRS_FAILURE; + } error = kaudit_to_bsm(ar, &bsm); switch (error) { @@ -384,13 +396,13 @@ audit_worker_process_record(struct kaudit_record *ar) audit_record_write(audit_vp, &audit_ctx, bsm->data, bsm->len); } - if (ar->k_ar_commit & AR_PRESELECT_PIPE) + if (ar->k_ar_commit & AR_PRESELECT_PIPE) { audit_pipe_submit(auid, event, class, sorf, ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data, bsm->len); + } if (ar->k_ar_commit & AR_PRESELECT_FILTER) { - /* * XXXss - This needs to be generalized so new filters can * be easily plugged in. @@ -401,8 +413,9 @@ audit_worker_process_record(struct kaudit_record *ar) kau_free(bsm); out: - if (trail_locked) + if (trail_locked) { AUDIT_WORKER_SX_XUNLOCK(); + } } /* @@ -422,8 +435,9 @@ audit_worker(void) struct kaudit_record *ar; int lowater_signal; - if (audit_ctx.vc_thread == NULL) + if (audit_ctx.vc_thread == NULL) { audit_ctx.vc_thread = current_thread(); + } TAILQ_INIT(&ar_worklist); mtx_lock(&audit_mtx); @@ -433,9 +447,10 @@ audit_worker(void) /* * Wait for a record. */ - while (TAILQ_EMPTY(&audit_q)) + while (TAILQ_EMPTY(&audit_q)) { cv_wait_continuation(&audit_worker_cv, &audit_mtx, (thread_continue_t)audit_worker); + } /* * If there are records in the global audit record queue, @@ -448,12 +463,14 @@ audit_worker(void) while ((ar = TAILQ_FIRST(&audit_q))) { TAILQ_REMOVE(&audit_q, ar, k_q); audit_q_len--; - if (audit_q_len == audit_qctrl.aq_lowater) + if (audit_q_len == audit_qctrl.aq_lowater) { lowater_signal++; + } TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q); } - if (lowater_signal) + if (lowater_signal) { cv_broadcast(&audit_watermark_cv); + } mtx_unlock(&audit_mtx); while ((ar = TAILQ_FIRST(&ar_worklist))) { @@ -506,16 +523,18 @@ audit_rotate_vnode(kauth_cred_t cred, struct vnode *vp) * we close the audit trail. */ audit_q_draining = 1; - while (audit_q_len >= audit_qctrl.aq_hiwater) + while (audit_q_len >= audit_qctrl.aq_hiwater) { cv_wait(&audit_watermark_cv, &audit_mtx); + } TAILQ_INSERT_TAIL(&audit_q, &audit_drain_kar, k_q); audit_q_len++; cv_signal(&audit_worker_cv); } /* If the audit queue is draining then wait here until it's done. */ - while (audit_q_draining) + while (audit_q_draining) { cv_wait(&audit_drain_cv, &audit_mtx); + } mtx_unlock(&audit_mtx); @@ -540,9 +559,10 @@ audit_rotate_vnode(kauth_cred_t cred, struct vnode *vp) vn_close(old_audit_vp, AUDIT_CLOSE_FLAGS, vfs_context_kernel()); vnode_put(old_audit_vp); - } else + } else { printf("audit_rotate_vnode: Couldn't close " "audit file.\n"); + } kauth_cred_unref(&old_audit_cred); } } @@ -550,10 +570,10 @@ audit_rotate_vnode(kauth_cred_t cred, struct vnode *vp) void audit_worker_init(void) { - AUDIT_WORKER_SX_INIT(); kernel_thread_start((thread_continue_t)audit_worker, NULL, &audit_thread); - if (audit_thread == THREAD_NULL) + if (audit_thread == THREAD_NULL) { panic("audit_worker_init: Couldn't create audit_worker thread"); + } } diff --git a/bsd/sys/_endian.h b/bsd/sys/_endian.h index 6e00b6c0b..4b8daa852 100644 --- a/bsd/sys/_endian.h +++ b/bsd/sys/_endian.h @@ -2,7 +2,7 @@ * Copyright (c) 2004, 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,7 +31,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -40,10 +40,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -51,7 +51,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -88,7 +88,7 @@ */ #ifndef _SYS__ENDIAN_H_ -#define _SYS__ENDIAN_H_ +#define _SYS__ENDIAN_H_ #include @@ -99,10 +99,10 @@ #if defined(lint) __BEGIN_DECLS -__uint16_t ntohs(__uint16_t); -__uint16_t htons(__uint16_t); -__uint32_t ntohl(__uint32_t); -__uint32_t htonl(__uint32_t); +__uint16_t ntohs(__uint16_t); +__uint16_t htons(__uint16_t); +__uint32_t ntohl(__uint32_t); +__uint32_t htonl(__uint32_t); __END_DECLS #elif __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN @@ -125,27 +125,27 @@ __END_DECLS #define HTONLL(x) (x) #endif /* defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) */ -#else /* __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN */ +#else /* __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN */ #include -#define ntohs(x) __DARWIN_OSSwapInt16(x) -#define htons(x) __DARWIN_OSSwapInt16(x) +#define ntohs(x) __DARWIN_OSSwapInt16(x) +#define htons(x) __DARWIN_OSSwapInt16(x) -#define ntohl(x) __DARWIN_OSSwapInt32(x) -#define htonl(x) __DARWIN_OSSwapInt32(x) +#define ntohl(x) __DARWIN_OSSwapInt32(x) +#define htonl(x) __DARWIN_OSSwapInt32(x) -#if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) +#if defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) #define ntohll(x) __DARWIN_OSSwapInt64(x) #define htonll(x) __DARWIN_OSSwapInt64(x) -#define NTOHL(x) (x) = ntohl((__uint32_t)x) -#define NTOHS(x) (x) = ntohs((__uint16_t)x) -#define NTOHLL(x) (x) = ntohll((__uint64_t)x) -#define HTONL(x) (x) = htonl((__uint32_t)x) -#define HTONS(x) (x) = htons((__uint16_t)x) -#define HTONLL(x) (x) = htonll((__uint64_t)x) +#define NTOHL(x) (x) = ntohl((__uint32_t)x) +#define NTOHS(x) (x) = ntohs((__uint16_t)x) +#define NTOHLL(x) (x) = ntohll((__uint64_t)x) +#define HTONL(x) (x) = htonl((__uint32_t)x) +#define HTONS(x) (x) = htons((__uint16_t)x) +#define HTONLL(x) (x) = htonll((__uint64_t)x) #endif /* defined(KERNEL) || (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) */ #endif /* __DARWIN_BYTE_ORDER */ #endif /* !_SYS__ENDIAN_H_ */ diff --git a/bsd/sys/_select.h b/bsd/sys/_select.h index 6f709210c..567d62185 100644 --- a/bsd/sys/_select.h +++ b/bsd/sys/_select.h @@ -2,7 +2,7 @@ * Copyright (c) 2005, 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,19 +34,19 @@ * FD_SETSIZE will return an error of EINVAL. */ #ifndef _SYS__SELECT_H_ -#define _SYS__SELECT_H_ +#define _SYS__SELECT_H_ -int select(int, fd_set * __restrict, fd_set * __restrict, - fd_set * __restrict, struct timeval * __restrict) +int select(int, fd_set * __restrict, fd_set * __restrict, + fd_set * __restrict, struct timeval * __restrict) #if defined(_DARWIN_C_SOURCE) || defined(_DARWIN_UNLIMITED_SELECT) - __DARWIN_EXTSN_C(select) +__DARWIN_EXTSN_C(select) #else /* !_DARWIN_C_SOURCE && !_DARWIN_UNLIMITED_SELECT */ # if defined(__LP64__) && !__DARWIN_NON_CANCELABLE - __DARWIN_1050(select) +__DARWIN_1050(select) # else /* !__LP64__ || __DARWIN_NON_CANCELABLE */ - __DARWIN_ALIAS_C(select) +__DARWIN_ALIAS_C(select) # endif /* __LP64__ && !__DARWIN_NON_CANCELABLE */ #endif /* _DARWIN_C_SOURCE || _DARWIN_UNLIMITED_SELECT */ - ; +; #endif /* !_SYS__SELECT_H_ */ diff --git a/bsd/sys/_structs.h b/bsd/sys/_structs.h index 63a41609f..5fab28aaa 100644 --- a/bsd/sys/_structs.h +++ b/bsd/sys/_structs.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/_types.h b/bsd/sys/_types.h index 5a532d455..08691552c 100644 --- a/bsd/sys/_types.h +++ b/bsd/sys/_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,30 +52,30 @@ #define __DARWIN_NULL ((void *)0) #endif /* __cplusplus */ -typedef __int64_t __darwin_blkcnt_t; /* total blocks */ -typedef __int32_t __darwin_blksize_t; /* preferred block size */ -typedef __int32_t __darwin_dev_t; /* dev_t */ -typedef unsigned int __darwin_fsblkcnt_t; /* Used by statvfs and fstatvfs */ -typedef unsigned int __darwin_fsfilcnt_t; /* Used by statvfs and fstatvfs */ -typedef __uint32_t __darwin_gid_t; /* [???] process and group IDs */ -typedef __uint32_t __darwin_id_t; /* [XSI] pid_t, uid_t, or gid_t*/ -typedef __uint64_t __darwin_ino64_t; /* [???] Used for 64 bit inodes */ +typedef __int64_t __darwin_blkcnt_t; /* total blocks */ +typedef __int32_t __darwin_blksize_t; /* preferred block size */ +typedef __int32_t __darwin_dev_t; /* dev_t */ +typedef unsigned int __darwin_fsblkcnt_t; /* Used by statvfs and fstatvfs */ +typedef unsigned int __darwin_fsfilcnt_t; /* Used by statvfs and fstatvfs */ +typedef __uint32_t __darwin_gid_t; /* [???] process and group IDs */ +typedef __uint32_t __darwin_id_t; /* [XSI] pid_t, uid_t, or gid_t*/ +typedef __uint64_t __darwin_ino64_t; /* [???] Used for 64 bit inodes */ #if __DARWIN_64_BIT_INO_T -typedef __darwin_ino64_t __darwin_ino_t; /* [???] Used for inodes */ +typedef __darwin_ino64_t __darwin_ino_t; /* [???] Used for inodes */ #else /* !__DARWIN_64_BIT_INO_T */ -typedef __uint32_t __darwin_ino_t; /* [???] Used for inodes */ +typedef __uint32_t __darwin_ino_t; /* [???] Used for inodes */ #endif /* __DARWIN_64_BIT_INO_T */ typedef __darwin_natural_t __darwin_mach_port_name_t; /* Used by mach */ typedef __darwin_mach_port_name_t __darwin_mach_port_t; /* Used by mach */ -typedef __uint16_t __darwin_mode_t; /* [???] Some file attributes */ -typedef __int64_t __darwin_off_t; /* [???] Used for file sizes */ -typedef __int32_t __darwin_pid_t; /* [???] process and group IDs */ -typedef __uint32_t __darwin_sigset_t; /* [???] signal set */ -typedef __int32_t __darwin_suseconds_t; /* [???] microseconds */ -typedef __uint32_t __darwin_uid_t; /* [???] user IDs */ -typedef __uint32_t __darwin_useconds_t; /* [???] microseconds */ -typedef unsigned char __darwin_uuid_t[16]; -typedef char __darwin_uuid_string_t[37]; +typedef __uint16_t __darwin_mode_t; /* [???] Some file attributes */ +typedef __int64_t __darwin_off_t; /* [???] Used for file sizes */ +typedef __int32_t __darwin_pid_t; /* [???] process and group IDs */ +typedef __uint32_t __darwin_sigset_t; /* [???] signal set */ +typedef __int32_t __darwin_suseconds_t; /* [???] microseconds */ +typedef __uint32_t __darwin_uid_t; /* [???] user IDs */ +typedef __uint32_t __darwin_useconds_t; /* [???] microseconds */ +typedef unsigned char __darwin_uuid_t[16]; +typedef char __darwin_uuid_string_t[37]; #ifndef KERNEL #include @@ -91,4 +91,4 @@ typedef char __darwin_uuid_string_t[37]; #include #endif /* KERNEL */ -#endif /* _SYS__TYPES_H_ */ +#endif /* _SYS__TYPES_H_ */ diff --git a/bsd/sys/_types/_blkcnt_t.h b/bsd/sys/_types/_blkcnt_t.h index 30668f8d6..9d4d1ee88 100644 --- a/bsd/sys/_types/_blkcnt_t.h +++ b/bsd/sys/_types/_blkcnt_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _BLKCNT_T -#define _BLKCNT_T +#ifndef _BLKCNT_T +#define _BLKCNT_T #include /* __darwin_blkcnt_t */ -typedef __darwin_blkcnt_t blkcnt_t; +typedef __darwin_blkcnt_t blkcnt_t; #endif /* _BLKCNT_T */ diff --git a/bsd/sys/_types/_blksize_t.h b/bsd/sys/_types/_blksize_t.h index a71c373ce..82931f7bf 100644 --- a/bsd/sys/_types/_blksize_t.h +++ b/bsd/sys/_types/_blksize_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _BLKSIZE_T -#define _BLKSIZE_T +#ifndef _BLKSIZE_T +#define _BLKSIZE_T #include /* __darwin_blksize_t */ -typedef __darwin_blksize_t blksize_t; +typedef __darwin_blksize_t blksize_t; #endif /* _BLKSIZE_T */ diff --git a/bsd/sys/_types/_caddr_t.h b/bsd/sys/_types/_caddr_t.h index ad1ad5f62..159e186d3 100644 --- a/bsd/sys/_types/_caddr_t.h +++ b/bsd/sys/_types/_caddr_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _CADDR_T #define _CADDR_T -typedef char * caddr_t; +typedef char * caddr_t; #endif /* _CADDR_T */ diff --git a/bsd/sys/_types/_clock_t.h b/bsd/sys/_types/_clock_t.h index 6fcdf6ba9..991d2cd52 100644 --- a/bsd/sys/_types/_clock_t.h +++ b/bsd/sys/_types/_clock_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _CLOCK_T diff --git a/bsd/sys/_types/_ct_rune_t.h b/bsd/sys/_types/_ct_rune_t.h index ad66d423a..3878dff80 100644 --- a/bsd/sys/_types/_ct_rune_t.h +++ b/bsd/sys/_types/_ct_rune_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/_types/_dev_t.h b/bsd/sys/_types/_dev_t.h index 8a1e4053f..be5c73ee5 100644 --- a/bsd/sys/_types/_dev_t.h +++ b/bsd/sys/_types/_dev_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _DEV_T -#define _DEV_T +#ifndef _DEV_T +#define _DEV_T #include /* __darwin_dev_t */ -typedef __darwin_dev_t dev_t; /* device number */ +typedef __darwin_dev_t dev_t; /* device number */ #endif /* _DEV_T */ diff --git a/bsd/sys/_types/_errno_t.h b/bsd/sys/_types/_errno_t.h index bb2b2d061..557282a2d 100644 --- a/bsd/sys/_types/_errno_t.h +++ b/bsd/sys/_types/_errno_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _ERRNO_T diff --git a/bsd/sys/_types/_fd_clr.h b/bsd/sys/_types/_fd_clr.h index 52351ea2a..eeb65b362 100644 --- a/bsd/sys/_types/_fd_clr.h +++ b/bsd/sys/_types/_fd_clr.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef FD_CLR -#define FD_CLR(n, p) __DARWIN_FD_CLR(n, p) +#define FD_CLR(n, p) __DARWIN_FD_CLR(n, p) #endif /* FD_CLR */ diff --git a/bsd/sys/_types/_fd_copy.h b/bsd/sys/_types/_fd_copy.h index 2eddb34e7..d0e9c1ec9 100644 --- a/bsd/sys/_types/_fd_copy.h +++ b/bsd/sys/_types/_fd_copy.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef FD_COPY -#define FD_COPY(f, t) __DARWIN_FD_COPY(f, t) +#define FD_COPY(f, t) __DARWIN_FD_COPY(f, t) #endif /* FD_COPY */ diff --git a/bsd/sys/_types/_fd_def.h b/bsd/sys/_types/_fd_def.h index 51c43746c..13137df5f 100644 --- a/bsd/sys/_types/_fd_def.h +++ b/bsd/sys/_types/_fd_def.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _FD_SET @@ -37,17 +37,17 @@ * the default size. */ #ifdef FD_SETSIZE -#define __DARWIN_FD_SETSIZE FD_SETSIZE +#define __DARWIN_FD_SETSIZE FD_SETSIZE #else /* !FD_SETSIZE */ -#define __DARWIN_FD_SETSIZE 1024 +#define __DARWIN_FD_SETSIZE 1024 #endif /* FD_SETSIZE */ -#define __DARWIN_NBBY 8 /* bits in a byte */ -#define __DARWIN_NFDBITS (sizeof(__int32_t) * __DARWIN_NBBY) /* bits per mask */ -#define __DARWIN_howmany(x, y) ((((x) % (y)) == 0) ? ((x) / (y)) : (((x) / (y)) + 1)) /* # y's == x bits? */ +#define __DARWIN_NBBY 8 /* bits in a byte */ +#define __DARWIN_NFDBITS (sizeof(__int32_t) * __DARWIN_NBBY) /* bits per mask */ +#define __DARWIN_howmany(x, y) ((((x) % (y)) == 0) ? ((x) / (y)) : (((x) / (y)) + 1)) /* # y's == x bits? */ __BEGIN_DECLS -typedef struct fd_set { - __int32_t fds_bits[__DARWIN_howmany(__DARWIN_FD_SETSIZE, __DARWIN_NFDBITS)]; +typedef struct fd_set { + __int32_t fds_bits[__DARWIN_howmany(__DARWIN_FD_SETSIZE, __DARWIN_NFDBITS)]; } fd_set; __END_DECLS @@ -55,22 +55,22 @@ __END_DECLS static __inline int __darwin_fd_isset(int _n, const struct fd_set *_p) { - return (_p->fds_bits[(unsigned long)_n/__DARWIN_NFDBITS] & ((__int32_t)(((unsigned long)1)<<((unsigned long)_n % __DARWIN_NFDBITS)))); + return _p->fds_bits[(unsigned long)_n / __DARWIN_NFDBITS] & ((__int32_t)(((unsigned long)1) << ((unsigned long)_n % __DARWIN_NFDBITS))); } -#define __DARWIN_FD_SET(n, p) do { int __fd = (n); ((p)->fds_bits[(unsigned long)__fd/__DARWIN_NFDBITS] |= ((__int32_t)(((unsigned long)1)<<((unsigned long)__fd % __DARWIN_NFDBITS)))); } while(0) -#define __DARWIN_FD_CLR(n, p) do { int __fd = (n); ((p)->fds_bits[(unsigned long)__fd/__DARWIN_NFDBITS] &= ~((__int32_t)(((unsigned long)1)<<((unsigned long)__fd % __DARWIN_NFDBITS)))); } while(0) -#define __DARWIN_FD_ISSET(n, p) __darwin_fd_isset((n), (p)) +#define __DARWIN_FD_SET(n, p) do { int __fd = (n); ((p)->fds_bits[(unsigned long)__fd/__DARWIN_NFDBITS] |= ((__int32_t)(((unsigned long)1)<<((unsigned long)__fd % __DARWIN_NFDBITS)))); } while(0) +#define __DARWIN_FD_CLR(n, p) do { int __fd = (n); ((p)->fds_bits[(unsigned long)__fd/__DARWIN_NFDBITS] &= ~((__int32_t)(((unsigned long)1)<<((unsigned long)__fd % __DARWIN_NFDBITS)))); } while(0) +#define __DARWIN_FD_ISSET(n, p) __darwin_fd_isset((n), (p)) #if __GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ >= 3 /* * Use the built-in bzero function instead of the library version so that * we do not pollute the namespace or introduce prototype warnings. */ -#define __DARWIN_FD_ZERO(p) __builtin_bzero(p, sizeof(*(p))) +#define __DARWIN_FD_ZERO(p) __builtin_bzero(p, sizeof(*(p))) #else -#define __DARWIN_FD_ZERO(p) bzero(p, sizeof(*(p))) +#define __DARWIN_FD_ZERO(p) bzero(p, sizeof(*(p))) #endif -#define __DARWIN_FD_COPY(f, t) bcopy(f, t, sizeof(*(f))) +#define __DARWIN_FD_COPY(f, t) bcopy(f, t, sizeof(*(f))) #endif /* _FD_SET */ diff --git a/bsd/sys/_types/_fd_isset.h b/bsd/sys/_types/_fd_isset.h index 089d0d57d..e3b3d9856 100644 --- a/bsd/sys/_types/_fd_isset.h +++ b/bsd/sys/_types/_fd_isset.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef FD_ISSET -#define FD_ISSET(n, p) __DARWIN_FD_ISSET(n, p) +#define FD_ISSET(n, p) __DARWIN_FD_ISSET(n, p) #endif /* FD_ISSET */ diff --git a/bsd/sys/_types/_fd_set.h b/bsd/sys/_types/_fd_set.h index 1fe7da32e..67f4fa4df 100644 --- a/bsd/sys/_types/_fd_set.h +++ b/bsd/sys/_types/_fd_set.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef FD_SET -#define FD_SET(n, p) __DARWIN_FD_SET(n, p) +#define FD_SET(n, p) __DARWIN_FD_SET(n, p) #endif /* FD_SET */ diff --git a/bsd/sys/_types/_fd_setsize.h b/bsd/sys/_types/_fd_setsize.h index 4bf02ae2c..c5c3ec9d8 100644 --- a/bsd/sys/_types/_fd_setsize.h +++ b/bsd/sys/_types/_fd_setsize.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef FD_SETSIZE -#define FD_SETSIZE __DARWIN_FD_SETSIZE +#ifndef FD_SETSIZE +#define FD_SETSIZE __DARWIN_FD_SETSIZE #endif /* FD_SETSIZE */ diff --git a/bsd/sys/_types/_fd_zero.h b/bsd/sys/_types/_fd_zero.h index 4fe98ae64..8363df3bd 100644 --- a/bsd/sys/_types/_fd_zero.h +++ b/bsd/sys/_types/_fd_zero.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef FD_ZERO -#define FD_ZERO(p) __DARWIN_FD_ZERO(p) +#define FD_ZERO(p) __DARWIN_FD_ZERO(p) #endif /* FD_ZERO */ diff --git a/bsd/sys/_types/_filesec_t.h b/bsd/sys/_types/_filesec_t.h index 58870a2ef..6812eba46 100644 --- a/bsd/sys/_types/_filesec_t.h +++ b/bsd/sys/_types/_filesec_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _FILESEC_T #define _FILESEC_T struct _filesec; -typedef struct _filesec *filesec_t; +typedef struct _filesec *filesec_t; #endif /* _FILESEC_T */ diff --git a/bsd/sys/_types/_fsblkcnt_t.h b/bsd/sys/_types/_fsblkcnt_t.h index ac012b146..a80d02f67 100644 --- a/bsd/sys/_types/_fsblkcnt_t.h +++ b/bsd/sys/_types/_fsblkcnt_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _FSBLKCNT_T #define _FSBLKCNT_T #include /* __darwin_fsblkcnt_t */ -typedef __darwin_fsblkcnt_t fsblkcnt_t; +typedef __darwin_fsblkcnt_t fsblkcnt_t; #endif /* _FSBLKCNT_T */ diff --git a/bsd/sys/_types/_fsfilcnt_t.h b/bsd/sys/_types/_fsfilcnt_t.h index 80bfa76ae..be5e9b4ab 100644 --- a/bsd/sys/_types/_fsfilcnt_t.h +++ b/bsd/sys/_types/_fsfilcnt_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _FSFILCNT_T #define _FSFILCNT_T #include /* __darwin_fsfilcnt_t */ -typedef __darwin_fsfilcnt_t fsfilcnt_t; +typedef __darwin_fsfilcnt_t fsfilcnt_t; #endif /* _FSFILCNT_T */ diff --git a/bsd/sys/_types/_fsid_t.h b/bsd/sys/_types/_fsid_t.h index 5806d16ca..d4e70f299 100644 --- a/bsd/sys/_types/_fsid_t.h +++ b/bsd/sys/_types/_fsid_t.h @@ -28,5 +28,5 @@ #ifndef _FSID_T #define _FSID_T #include /* int32_t */ -typedef struct fsid { int32_t val[2]; } fsid_t; /* file system id type */ +typedef struct fsid { int32_t val[2]; } fsid_t; /* file system id type */ #endif /* _FSID_T */ diff --git a/bsd/sys/_types/_gid_t.h b/bsd/sys/_types/_gid_t.h index 402f5c219..ebf497068 100644 --- a/bsd/sys/_types/_gid_t.h +++ b/bsd/sys/_types/_gid_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _GID_T -#define _GID_T +#ifndef _GID_T +#define _GID_T #include /* __darwin_gid_t */ -typedef __darwin_gid_t gid_t; -#endif +typedef __darwin_gid_t gid_t; +#endif diff --git a/bsd/sys/_types/_guid_t.h b/bsd/sys/_types/_guid_t.h index 47e722d04..ac9cd5c76 100644 --- a/bsd/sys/_types/_guid_t.h +++ b/bsd/sys/_types/_guid_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KAUTH_GUID #define _KAUTH_GUID /* Apple-style globally unique identifier */ typedef struct { -#define KAUTH_GUID_SIZE 16 /* 128-bit identifier */ +#define KAUTH_GUID_SIZE 16 /* 128-bit identifier */ unsigned char g_guid[KAUTH_GUID_SIZE]; } guid_t; #define _GUID_T diff --git a/bsd/sys/_types/_id_t.h b/bsd/sys/_types/_id_t.h index 79cd778da..9af9610a2 100644 --- a/bsd/sys/_types/_id_t.h +++ b/bsd/sys/_types/_id_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _ID_T -#define _ID_T +#ifndef _ID_T +#define _ID_T #include /* __darwin_id_t */ -typedef __darwin_id_t id_t; /* can hold pid_t, gid_t, or uid_t */ +typedef __darwin_id_t id_t; /* can hold pid_t, gid_t, or uid_t */ #endif /* _ID_T */ diff --git a/bsd/sys/_types/_in_addr_t.h b/bsd/sys/_types/_in_addr_t.h index aa4956a1c..edcf66e50 100644 --- a/bsd/sys/_types/_in_addr_t.h +++ b/bsd/sys/_types/_in_addr_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IN_ADDR_T #define _IN_ADDR_T #include /* __uint32_t */ -typedef __uint32_t in_addr_t; /* base type for internet address */ +typedef __uint32_t in_addr_t; /* base type for internet address */ #endif /* _IN_ADDR_T */ diff --git a/bsd/sys/_types/_in_port_t.h b/bsd/sys/_types/_in_port_t.h index 69e719e89..8b102566c 100644 --- a/bsd/sys/_types/_in_port_t.h +++ b/bsd/sys/_types/_in_port_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IN_PORT_T #define _IN_PORT_T #include /* __uint16_t */ -typedef __uint16_t in_port_t; +typedef __uint16_t in_port_t; #endif /* _IN_PORT_T */ diff --git a/bsd/sys/_types/_ino64_t.h b/bsd/sys/_types/_ino64_t.h index effe9f6e6..c142b1bae 100644 --- a/bsd/sys/_types/_ino64_t.h +++ b/bsd/sys/_types/_ino64_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _INO64_T -#define _INO64_T +#ifndef _INO64_T +#define _INO64_T #include /* __darwin_ino64_t */ -typedef __darwin_ino64_t ino64_t; /* 64bit inode number */ +typedef __darwin_ino64_t ino64_t; /* 64bit inode number */ #endif /* _INO64_T */ diff --git a/bsd/sys/_types/_ino_t.h b/bsd/sys/_types/_ino_t.h index 721f8646e..2a693ddbf 100644 --- a/bsd/sys/_types/_ino_t.h +++ b/bsd/sys/_types/_ino_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _INO_T -#define _INO_T +#ifndef _INO_T +#define _INO_T #include /* __darwin_ino_t */ -typedef __darwin_ino_t ino_t; /* inode number */ +typedef __darwin_ino_t ino_t; /* inode number */ #endif /* _INO_T */ diff --git a/bsd/sys/_types/_int16_t.h b/bsd/sys/_types/_int16_t.h index ed373d649..3bf3da068 100644 --- a/bsd/sys/_types/_int16_t.h +++ b/bsd/sys/_types/_int16_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _INT16_T #define _INT16_T -typedef short int16_t; +typedef short int16_t; #endif /* _INT16_T */ diff --git a/bsd/sys/_types/_int32_t.h b/bsd/sys/_types/_int32_t.h index 15041d498..9b1d72ba7 100644 --- a/bsd/sys/_types/_int32_t.h +++ b/bsd/sys/_types/_int32_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _INT32_T #define _INT32_T -typedef int int32_t; +typedef int int32_t; #endif /* _INT32_T */ diff --git a/bsd/sys/_types/_int64_t.h b/bsd/sys/_types/_int64_t.h index fd14b60a9..4f3e7de38 100644 --- a/bsd/sys/_types/_int64_t.h +++ b/bsd/sys/_types/_int64_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _INT64_T #define _INT64_T -typedef long long int64_t; +typedef long long int64_t; #endif /* _INT64_T */ diff --git a/bsd/sys/_types/_int8_t.h b/bsd/sys/_types/_int8_t.h index c48ef254e..9176298a5 100644 --- a/bsd/sys/_types/_int8_t.h +++ b/bsd/sys/_types/_int8_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _INT8_T #define _INT8_T -typedef __signed char int8_t; +typedef __signed char int8_t; #endif /* _INT8_T */ diff --git a/bsd/sys/_types/_intptr_t.h b/bsd/sys/_types/_intptr_t.h index 0e050f7a0..0f494b9e5 100644 --- a/bsd/sys/_types/_intptr_t.h +++ b/bsd/sys/_types/_intptr_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _INTPTR_T #define _INTPTR_T #include /* __darwin_intptr_t */ -typedef __darwin_intptr_t intptr_t; +typedef __darwin_intptr_t intptr_t; #endif /* _INTPTR_T */ diff --git a/bsd/sys/_types/_iovec_t.h b/bsd/sys/_types/_iovec_t.h index 6905450ec..f89c7306f 100644 --- a/bsd/sys/_types/_iovec_t.h +++ b/bsd/sys/_types/_iovec_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _STRUCT_IOVEC -#define _STRUCT_IOVEC +#define _STRUCT_IOVEC #include /* size_t */ struct iovec { - void * iov_base; /* [XSI] Base address of I/O memory region */ - size_t iov_len; /* [XSI] Size of region iov_base points to */ + void * iov_base; /* [XSI] Base address of I/O memory region */ + size_t iov_len; /* [XSI] Size of region iov_base points to */ }; #endif /* _STRUCT_IOVEC */ diff --git a/bsd/sys/_types/_key_t.h b/bsd/sys/_types/_key_t.h index 1d4ca01ed..ec093d769 100644 --- a/bsd/sys/_types/_key_t.h +++ b/bsd/sys/_types/_key_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KEY_T -#define _KEY_T +#ifndef _KEY_T +#define _KEY_T #include /* __int32_t */ -typedef __int32_t key_t; /* IPC key (for Sys V IPC) */ +typedef __int32_t key_t; /* IPC key (for Sys V IPC) */ #endif /* _KEY_T */ diff --git a/bsd/sys/_types/_mach_port_t.h b/bsd/sys/_types/_mach_port_t.h index 8920a37b2..fa96565f6 100644 --- a/bsd/sys/_types/_mach_port_t.h +++ b/bsd/sys/_types/_mach_port_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,23 +22,23 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * mach_port_t - a named port right * * In user-space, "rights" are represented by the name of the * right in the Mach port namespace. Even so, this type is * presented as a unique one to more clearly denote the presence - * of a right coming along with the name. + * of a right coming along with the name. * * Often, various rights for a port held in a single name space * will coalesce and are, therefore, be identified by a single name * [this is the case for send and receive rights]. But not * always [send-once rights currently get a unique name for - * each right]. + * each right]. * * This definition of mach_port_t is only for user-space. * diff --git a/bsd/sys/_types/_mbstate_t.h b/bsd/sys/_types/_mbstate_t.h index 0f51de45c..771728bfa 100644 --- a/bsd/sys/_types/_mbstate_t.h +++ b/bsd/sys/_types/_mbstate_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/_types/_mode_t.h b/bsd/sys/_types/_mode_t.h index c4de010c7..36f8d2b32 100644 --- a/bsd/sys/_types/_mode_t.h +++ b/bsd/sys/_types/_mode_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MODE_T -#define _MODE_T +#ifndef _MODE_T +#define _MODE_T #include /* __darwin_mode_t */ -typedef __darwin_mode_t mode_t; +typedef __darwin_mode_t mode_t; #endif /* _MODE_T */ diff --git a/bsd/sys/_types/_nlink_t.h b/bsd/sys/_types/_nlink_t.h index 7d066e178..c3f83365f 100644 --- a/bsd/sys/_types/_nlink_t.h +++ b/bsd/sys/_types/_nlink_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _NLINK_T -#define _NLINK_T +#ifndef _NLINK_T +#define _NLINK_T #include /* __uint16_t */ -typedef __uint16_t nlink_t; /* link count */ +typedef __uint16_t nlink_t; /* link count */ #endif /* _NLINK_T */ diff --git a/bsd/sys/_types/_null.h b/bsd/sys/_types/_null.h index 537c10a3f..9c21571ea 100644 --- a/bsd/sys/_types/_null.h +++ b/bsd/sys/_types/_null.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef NULL diff --git a/bsd/sys/_types/_o_dsync.h b/bsd/sys/_types/_o_dsync.h index fece722c7..bd4f2884f 100644 --- a/bsd/sys/_types/_o_dsync.h +++ b/bsd/sys/_types/_o_dsync.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef O_DSYNC -#define O_DSYNC 0x400000 /* synch I/O data integrity */ +#ifndef O_DSYNC +#define O_DSYNC 0x400000 /* synch I/O data integrity */ #endif /* O_DSYNC */ diff --git a/bsd/sys/_types/_o_sync.h b/bsd/sys/_types/_o_sync.h index 85bdd6945..a3952cc35 100644 --- a/bsd/sys/_types/_o_sync.h +++ b/bsd/sys/_types/_o_sync.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef O_SYNC -#define O_SYNC 0x0080 /* synch I/O file integrity */ +#ifndef O_SYNC +#define O_SYNC 0x0080 /* synch I/O file integrity */ #endif /* O_SYNC */ diff --git a/bsd/sys/_types/_off_t.h b/bsd/sys/_types/_off_t.h index 205207ea3..bdc3d5e38 100644 --- a/bsd/sys/_types/_off_t.h +++ b/bsd/sys/_types/_off_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _OFF_T -#define _OFF_T +#ifndef _OFF_T +#define _OFF_T #include /* __darwin_off_t */ -typedef __darwin_off_t off_t; +typedef __darwin_off_t off_t; #endif /* _OFF_T */ diff --git a/bsd/sys/_types/_offsetof.h b/bsd/sys/_types/_offsetof.h index 16832a355..fa831a511 100644 --- a/bsd/sys/_types/_offsetof.h +++ b/bsd/sys/_types/_offsetof.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef offsetof diff --git a/bsd/sys/_types/_os_inline.h b/bsd/sys/_types/_os_inline.h index d85c91214..fd68cff47 100644 --- a/bsd/sys/_types/_os_inline.h +++ b/bsd/sys/_types/_os_inline.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if !defined(OS_INLINE) diff --git a/bsd/sys/_types/_pid_t.h b/bsd/sys/_types/_pid_t.h index 5050d5278..994f84e87 100644 --- a/bsd/sys/_types/_pid_t.h +++ b/bsd/sys/_types/_pid_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _PID_T -#define _PID_T +#ifndef _PID_T +#define _PID_T #include /* __darwin_pid_t */ -typedef __darwin_pid_t pid_t; +typedef __darwin_pid_t pid_t; #endif /* _PID_T */ diff --git a/bsd/sys/_types/_posix_vdisable.h b/bsd/sys/_types/_posix_vdisable.h index 4808c5318..970f1b5d7 100644 --- a/bsd/sys/_types/_posix_vdisable.h +++ b/bsd/sys/_types/_posix_vdisable.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,9 +22,9 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _POSIX_VDISABLE -#define _POSIX_VDISABLE ((unsigned char)'\377') +#define _POSIX_VDISABLE ((unsigned char)'\377') #endif /* POSIX_VDISABLE */ diff --git a/bsd/sys/_types/_ptrdiff_t.h b/bsd/sys/_types/_ptrdiff_t.h index 40cba6035..31a065770 100644 --- a/bsd/sys/_types/_ptrdiff_t.h +++ b/bsd/sys/_types/_ptrdiff_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/_types/_rsize_t.h b/bsd/sys/_types/_rsize_t.h index 7150c6693..6aa2f6b32 100644 --- a/bsd/sys/_types/_rsize_t.h +++ b/bsd/sys/_types/_rsize_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _RSIZE_T diff --git a/bsd/sys/_types/_rune_t.h b/bsd/sys/_types/_rune_t.h index aa9d0470d..bd10ef1ba 100644 --- a/bsd/sys/_types/_rune_t.h +++ b/bsd/sys/_types/_rune_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _RUNE_T -#define _RUNE_T +#ifndef _RUNE_T +#define _RUNE_T #include /* __darwin_rune_t */ -typedef __darwin_rune_t rune_t; +typedef __darwin_rune_t rune_t; #endif /* _RUNE_T */ diff --git a/bsd/sys/_types/_s_ifmt.h b/bsd/sys/_types/_s_ifmt.h index 21a75ca2a..1139cb25b 100644 --- a/bsd/sys/_types/_s_ifmt.h +++ b/bsd/sys/_types/_s_ifmt.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,43 +32,43 @@ */ #ifndef S_IFMT /* File type */ -#define S_IFMT 0170000 /* [XSI] type of file mask */ -#define S_IFIFO 0010000 /* [XSI] named pipe (fifo) */ -#define S_IFCHR 0020000 /* [XSI] character special */ -#define S_IFDIR 0040000 /* [XSI] directory */ -#define S_IFBLK 0060000 /* [XSI] block special */ -#define S_IFREG 0100000 /* [XSI] regular */ -#define S_IFLNK 0120000 /* [XSI] symbolic link */ -#define S_IFSOCK 0140000 /* [XSI] socket */ +#define S_IFMT 0170000 /* [XSI] type of file mask */ +#define S_IFIFO 0010000 /* [XSI] named pipe (fifo) */ +#define S_IFCHR 0020000 /* [XSI] character special */ +#define S_IFDIR 0040000 /* [XSI] directory */ +#define S_IFBLK 0060000 /* [XSI] block special */ +#define S_IFREG 0100000 /* [XSI] regular */ +#define S_IFLNK 0120000 /* [XSI] symbolic link */ +#define S_IFSOCK 0140000 /* [XSI] socket */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define S_IFWHT 0160000 /* OBSOLETE: whiteout */ +#define S_IFWHT 0160000 /* OBSOLETE: whiteout */ #endif /* File mode */ /* Read, write, execute/search by owner */ -#define S_IRWXU 0000700 /* [XSI] RWX mask for owner */ -#define S_IRUSR 0000400 /* [XSI] R for owner */ -#define S_IWUSR 0000200 /* [XSI] W for owner */ -#define S_IXUSR 0000100 /* [XSI] X for owner */ +#define S_IRWXU 0000700 /* [XSI] RWX mask for owner */ +#define S_IRUSR 0000400 /* [XSI] R for owner */ +#define S_IWUSR 0000200 /* [XSI] W for owner */ +#define S_IXUSR 0000100 /* [XSI] X for owner */ /* Read, write, execute/search by group */ -#define S_IRWXG 0000070 /* [XSI] RWX mask for group */ -#define S_IRGRP 0000040 /* [XSI] R for group */ -#define S_IWGRP 0000020 /* [XSI] W for group */ -#define S_IXGRP 0000010 /* [XSI] X for group */ +#define S_IRWXG 0000070 /* [XSI] RWX mask for group */ +#define S_IRGRP 0000040 /* [XSI] R for group */ +#define S_IWGRP 0000020 /* [XSI] W for group */ +#define S_IXGRP 0000010 /* [XSI] X for group */ /* Read, write, execute/search by others */ -#define S_IRWXO 0000007 /* [XSI] RWX mask for other */ -#define S_IROTH 0000004 /* [XSI] R for other */ -#define S_IWOTH 0000002 /* [XSI] W for other */ -#define S_IXOTH 0000001 /* [XSI] X for other */ +#define S_IRWXO 0000007 /* [XSI] RWX mask for other */ +#define S_IROTH 0000004 /* [XSI] R for other */ +#define S_IWOTH 0000002 /* [XSI] W for other */ +#define S_IXOTH 0000001 /* [XSI] X for other */ -#define S_ISUID 0004000 /* [XSI] set user id on execution */ -#define S_ISGID 0002000 /* [XSI] set group id on execution */ -#define S_ISVTX 0001000 /* [XSI] directory restrcted delete */ +#define S_ISUID 0004000 /* [XSI] set user id on execution */ +#define S_ISGID 0002000 /* [XSI] set group id on execution */ +#define S_ISVTX 0001000 /* [XSI] directory restrcted delete */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define S_ISTXT S_ISVTX /* sticky bit: not supported */ -#define S_IREAD S_IRUSR /* backward compatability */ -#define S_IWRITE S_IWUSR /* backward compatability */ -#define S_IEXEC S_IXUSR /* backward compatability */ +#define S_ISTXT S_ISVTX /* sticky bit: not supported */ +#define S_IREAD S_IRUSR /* backward compatability */ +#define S_IWRITE S_IWUSR /* backward compatability */ +#define S_IEXEC S_IXUSR /* backward compatability */ #endif -#endif /* !S_IFMT */ +#endif /* !S_IFMT */ diff --git a/bsd/sys/_types/_sa_family_t.h b/bsd/sys/_types/_sa_family_t.h index ccd168b2c..857cdd09c 100644 --- a/bsd/sys/_types/_sa_family_t.h +++ b/bsd/sys/_types/_sa_family_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SA_FAMILY_T -#define _SA_FAMILY_T +#ifndef _SA_FAMILY_T +#define _SA_FAMILY_T #include /* __uint8_t */ -typedef __uint8_t sa_family_t; +typedef __uint8_t sa_family_t; #endif /* _SA_FAMILY_T */ diff --git a/bsd/sys/_types/_seek_set.h b/bsd/sys/_types/_seek_set.h index 6bcdec84e..f55175ad1 100644 --- a/bsd/sys/_types/_seek_set.h +++ b/bsd/sys/_types/_seek_set.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,17 +30,17 @@ /* whence values for lseek(2) */ #ifndef SEEK_SET -#define SEEK_SET 0 /* set file offset to offset */ -#define SEEK_CUR 1 /* set file offset to current plus offset */ -#define SEEK_END 2 /* set file offset to EOF plus offset */ -#endif /* !SEEK_SET */ +#define SEEK_SET 0 /* set file offset to offset */ +#define SEEK_CUR 1 /* set file offset to current plus offset */ +#define SEEK_END 2 /* set file offset to EOF plus offset */ +#endif /* !SEEK_SET */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL #ifndef SEEK_HOLE -#define SEEK_HOLE 3 /* set file offset to the start of the next hole greater than or equal to the supplied offset */ +#define SEEK_HOLE 3 /* set file offset to the start of the next hole greater than or equal to the supplied offset */ #endif #ifndef SEEK_DATA -#define SEEK_DATA 4 /* set file offset to the start of the next non-hole file region greater than or equal to the supplied offset */ +#define SEEK_DATA 4 /* set file offset to the start of the next non-hole file region greater than or equal to the supplied offset */ #endif #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ diff --git a/bsd/sys/_types/_sigaltstack.h b/bsd/sys/_types/_sigaltstack.h index 353cd5b98..8c3430584 100644 --- a/bsd/sys/_types/_sigaltstack.h +++ b/bsd/sys/_types/_sigaltstack.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,19 +32,19 @@ #include /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_SIGALTSTACK struct __darwin_sigaltstack +#define _STRUCT_SIGALTSTACK struct __darwin_sigaltstack #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_SIGALTSTACK struct sigaltstack +#define _STRUCT_SIGALTSTACK struct sigaltstack #endif /* __DARWIN_UNIX03 */ #include /* __darwin_size_t */ _STRUCT_SIGALTSTACK { - void *ss_sp; /* signal stack base */ + void *ss_sp; /* signal stack base */ __darwin_size_t ss_size; /* signal stack length */ int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ }; -typedef _STRUCT_SIGALTSTACK stack_t; /* [???] signal stack */ +typedef _STRUCT_SIGALTSTACK stack_t; /* [???] signal stack */ #endif /* _STRUCT_SIGALTSTACK */ diff --git a/bsd/sys/_types/_sigset_t.h b/bsd/sys/_types/_sigset_t.h index 6bf670407..51844dddb 100644 --- a/bsd/sys/_types/_sigset_t.h +++ b/bsd/sys/_types/_sigset_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SIGSET_T -#define _SIGSET_T +#ifndef _SIGSET_T +#define _SIGSET_T #include /* __darwin_sigset_t */ -typedef __darwin_sigset_t sigset_t; +typedef __darwin_sigset_t sigset_t; #endif /* _SIGSET_T */ diff --git a/bsd/sys/_types/_size_t.h b/bsd/sys/_types/_size_t.h index 67786d594..a14a8885f 100644 --- a/bsd/sys/_types/_size_t.h +++ b/bsd/sys/_types/_size_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SIZE_T -#define _SIZE_T +#ifndef _SIZE_T +#define _SIZE_T #include /* __darwin_size_t */ -typedef __darwin_size_t size_t; +typedef __darwin_size_t size_t; #endif /* _SIZE_T */ diff --git a/bsd/sys/_types/_socklen_t.h b/bsd/sys/_types/_socklen_t.h index b9354fde5..a7b843156 100644 --- a/bsd/sys/_types/_socklen_t.h +++ b/bsd/sys/_types/_socklen_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SOCKLEN_T #define _SOCKLEN_T #include /* __darwin_socklen_t */ -typedef __darwin_socklen_t socklen_t; +typedef __darwin_socklen_t socklen_t; #endif - diff --git a/bsd/sys/_types/_ssize_t.h b/bsd/sys/_types/_ssize_t.h index fef63730f..056607814 100644 --- a/bsd/sys/_types/_ssize_t.h +++ b/bsd/sys/_types/_ssize_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SSIZE_T -#define _SSIZE_T +#ifndef _SSIZE_T +#define _SSIZE_T #include /* __darwin_ssize_t */ -typedef __darwin_ssize_t ssize_t; +typedef __darwin_ssize_t ssize_t; #endif /* _SSIZE_T */ diff --git a/bsd/sys/_types/_suseconds_t.h b/bsd/sys/_types/_suseconds_t.h index 837c4cab1..3980dfedf 100644 --- a/bsd/sys/_types/_suseconds_t.h +++ b/bsd/sys/_types/_suseconds_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SUSECONDS_T -#define _SUSECONDS_T +#ifndef _SUSECONDS_T +#define _SUSECONDS_T #include /* __darwin_suseconds_t */ -typedef __darwin_suseconds_t suseconds_t; +typedef __darwin_suseconds_t suseconds_t; #endif /* _SUSECONDS_T */ diff --git a/bsd/sys/_types/_time_t.h b/bsd/sys/_types/_time_t.h index ae87acb6f..2a91ef225 100644 --- a/bsd/sys/_types/_time_t.h +++ b/bsd/sys/_types/_time_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _TIME_T -#define _TIME_T +#ifndef _TIME_T +#define _TIME_T #include /* __darwin_time_t */ -typedef __darwin_time_t time_t; +typedef __darwin_time_t time_t; #endif /* _TIME_T */ diff --git a/bsd/sys/_types/_timespec.h b/bsd/sys/_types/_timespec.h index 6837be1ad..82cc723ec 100644 --- a/bsd/sys/_types/_timespec.h +++ b/bsd/sys/_types/_timespec.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _STRUCT_TIMESPEC -#define _STRUCT_TIMESPEC struct timespec +#define _STRUCT_TIMESPEC struct timespec #include /* __darwin_time_t */ _STRUCT_TIMESPEC { - __darwin_time_t tv_sec; + __darwin_time_t tv_sec; long tv_nsec; }; #endif /* _STRUCT_TIMESPEC */ diff --git a/bsd/sys/_types/_timeval.h b/bsd/sys/_types/_timeval.h index 2f854b9d9..1b9a000a6 100644 --- a/bsd/sys/_types/_timeval.h +++ b/bsd/sys/_types/_timeval.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,18 +22,18 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _STRUCT_TIMEVAL -#define _STRUCT_TIMEVAL struct timeval +#define _STRUCT_TIMEVAL struct timeval #include /* __darwin_time_t */ #include /* __darwin_suseconds_t */ _STRUCT_TIMEVAL { - __darwin_time_t tv_sec; /* seconds */ + __darwin_time_t tv_sec; /* seconds */ __darwin_suseconds_t tv_usec; /* and microseconds */ }; #endif /* _STRUCT_TIMEVAL */ diff --git a/bsd/sys/_types/_timeval32.h b/bsd/sys/_types/_timeval32.h index dbb66d36e..71518173a 100644 --- a/bsd/sys/_types/_timeval32.h +++ b/bsd/sys/_types/_timeval32.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _STRUCT_TIMEVAL32 -#define _STRUCT_TIMEVAL32 struct timeval32 +#define _STRUCT_TIMEVAL32 struct timeval32 #include /* __int32_t */ _STRUCT_TIMEVAL32 { - __int32_t tv_sec; /* seconds */ - __int32_t tv_usec; /* and microseconds */ + __int32_t tv_sec; /* seconds */ + __int32_t tv_usec; /* and microseconds */ }; #endif /* _STRUCT_TIMEVAL32 */ diff --git a/bsd/sys/_types/_timeval64.h b/bsd/sys/_types/_timeval64.h index 58a3255f9..2eb3c434f 100644 --- a/bsd/sys/_types/_timeval64.h +++ b/bsd/sys/_types/_timeval64.h @@ -31,9 +31,8 @@ #include /* __int64_t */ -struct timeval64 -{ - __int64_t tv_sec; /* seconds */ - __int64_t tv_usec; /* and microseconds */ +struct timeval64 { + __int64_t tv_sec; /* seconds */ + __int64_t tv_usec; /* and microseconds */ }; #endif /* _STRUCT_TIMEVAL32 */ diff --git a/bsd/sys/_types/_u_char.h b/bsd/sys/_types/_u_char.h index 2a8a5b47e..b6add3feb 100644 --- a/bsd/sys/_types/_u_char.h +++ b/bsd/sys/_types/_u_char.h @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_CHAR #define _U_CHAR -typedef unsigned char u_char; +typedef unsigned char u_char; #endif /* _U_CHAR */ diff --git a/bsd/sys/_types/_u_int.h b/bsd/sys/_types/_u_int.h index 79c36d1b4..161b3baf1 100644 --- a/bsd/sys/_types/_u_int.h +++ b/bsd/sys/_types/_u_int.h @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_INT #define _U_INT -typedef unsigned int u_int; +typedef unsigned int u_int; #endif /* _U_INT */ diff --git a/bsd/sys/_types/_u_int16_t.h b/bsd/sys/_types/_u_int16_t.h index a29896811..5a01fc450 100644 --- a/bsd/sys/_types/_u_int16_t.h +++ b/bsd/sys/_types/_u_int16_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_INT16_T #define _U_INT16_T -typedef unsigned short u_int16_t; +typedef unsigned short u_int16_t; #endif /* _U_INT16_T */ diff --git a/bsd/sys/_types/_u_int32_t.h b/bsd/sys/_types/_u_int32_t.h index 7ebf744dc..4f01b22bd 100644 --- a/bsd/sys/_types/_u_int32_t.h +++ b/bsd/sys/_types/_u_int32_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_INT32_T #define _U_INT32_T -typedef unsigned int u_int32_t; +typedef unsigned int u_int32_t; #endif /* _U_INT32_T */ diff --git a/bsd/sys/_types/_u_int64_t.h b/bsd/sys/_types/_u_int64_t.h index ff097cbdc..bd866cbc2 100644 --- a/bsd/sys/_types/_u_int64_t.h +++ b/bsd/sys/_types/_u_int64_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_INT64_T #define _U_INT64_T -typedef unsigned long long u_int64_t; +typedef unsigned long long u_int64_t; #endif /* _U_INT64_T */ diff --git a/bsd/sys/_types/_u_int8_t.h b/bsd/sys/_types/_u_int8_t.h index 569b529a0..ac9bf7711 100644 --- a/bsd/sys/_types/_u_int8_t.h +++ b/bsd/sys/_types/_u_int8_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_INT8_T #define _U_INT8_T -typedef unsigned char u_int8_t; +typedef unsigned char u_int8_t; #endif /* _U_INT8_T */ diff --git a/bsd/sys/_types/_u_short.h b/bsd/sys/_types/_u_short.h index c610d14c8..58816d357 100644 --- a/bsd/sys/_types/_u_short.h +++ b/bsd/sys/_types/_u_short.h @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _U_SHORT #define _U_SHORT -typedef unsigned short u_short; +typedef unsigned short u_short; #endif /* _U_SHORT */ diff --git a/bsd/sys/_types/_ucontext.h b/bsd/sys/_types/_ucontext.h index 56a520d7a..65184e44e 100644 --- a/bsd/sys/_types/_ucontext.h +++ b/bsd/sys/_types/_ucontext.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _STRUCT_UCONTEXT @@ -30,9 +30,9 @@ #include /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_UCONTEXT struct __darwin_ucontext +#define _STRUCT_UCONTEXT struct __darwin_ucontext #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_UCONTEXT struct ucontext +#define _STRUCT_UCONTEXT struct ucontext #endif /* __DARWIN_UNIX03 */ #include /* __darwin_size_t */ @@ -45,7 +45,7 @@ _STRUCT_UCONTEXT __darwin_sigset_t uc_sigmask; /* signal mask used by this context */ _STRUCT_SIGALTSTACK uc_stack; /* stack used by this context */ _STRUCT_UCONTEXT *uc_link; /* pointer to resuming context */ - __darwin_size_t uc_mcsize; /* size of the machine context passed in */ + __darwin_size_t uc_mcsize; /* size of the machine context passed in */ _STRUCT_MCONTEXT *uc_mcontext; /* pointer to machine specific context */ #ifdef _XOPEN_SOURCE _STRUCT_MCONTEXT __mcontext_data; @@ -53,6 +53,6 @@ _STRUCT_UCONTEXT }; /* user context */ -typedef _STRUCT_UCONTEXT ucontext_t; /* [???] user context */ +typedef _STRUCT_UCONTEXT ucontext_t; /* [???] user context */ #endif /* _STRUCT_UCONTEXT */ diff --git a/bsd/sys/_types/_ucontext64.h b/bsd/sys/_types/_ucontext64.h index 1befcc9b0..028f77a5d 100644 --- a/bsd/sys/_types/_ucontext64.h +++ b/bsd/sys/_types/_ucontext64.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _STRUCT_UCONTEXT64 @@ -30,9 +30,9 @@ #include /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_UCONTEXT64 struct __darwin_ucontext64 +#define _STRUCT_UCONTEXT64 struct __darwin_ucontext64 #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_UCONTEXT64 struct ucontext64 +#define _STRUCT_UCONTEXT64 struct ucontext64 #endif /* __DARWIN_UNIX03 */ #include /* __darwin_size_t */ @@ -45,9 +45,9 @@ _STRUCT_UCONTEXT64 __darwin_sigset_t uc_sigmask; /* signal mask used by this context */ _STRUCT_SIGALTSTACK uc_stack; /* stack used by this context */ _STRUCT_UCONTEXT64 *uc_link; /* pointer to resuming context */ - __darwin_size_t uc_mcsize; /* size of the machine context passed in */ - _STRUCT_MCONTEXT64 *uc_mcontext64; /* pointer to machine specific context */ + __darwin_size_t uc_mcsize; /* size of the machine context passed in */ + _STRUCT_MCONTEXT64 *uc_mcontext64; /* pointer to machine specific context */ }; -typedef _STRUCT_UCONTEXT64 ucontext64_t; /* [???] user context */ +typedef _STRUCT_UCONTEXT64 ucontext64_t; /* [???] user context */ #endif /* _STRUCT_UCONTEXT64 */ diff --git a/bsd/sys/_types/_uid_t.h b/bsd/sys/_types/_uid_t.h index a4ca9cb6f..a9769db30 100644 --- a/bsd/sys/_types/_uid_t.h +++ b/bsd/sys/_types/_uid_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _UID_T -#define _UID_T +#ifndef _UID_T +#define _UID_T #include /* __darwin_uid_t */ -typedef __darwin_uid_t uid_t; +typedef __darwin_uid_t uid_t; #endif /* _UID_T */ diff --git a/bsd/sys/_types/_uintptr_t.h b/bsd/sys/_types/_uintptr_t.h index 3b0bcce69..c22d02b1c 100644 --- a/bsd/sys/_types/_uintptr_t.h +++ b/bsd/sys/_types/_uintptr_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _UINTPTR_T #define _UINTPTR_T -typedef unsigned long uintptr_t; +typedef unsigned long uintptr_t; #endif /* _UINTPTR_T */ diff --git a/bsd/sys/_types/_useconds_t.h b/bsd/sys/_types/_useconds_t.h index 751a3748d..1b020a3bd 100644 --- a/bsd/sys/_types/_useconds_t.h +++ b/bsd/sys/_types/_useconds_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _USECONDS_T #define _USECONDS_T #include /* __darwin_useconds_t */ -typedef __darwin_useconds_t useconds_t; +typedef __darwin_useconds_t useconds_t; #endif /* _USECONDS_T */ diff --git a/bsd/sys/_types/_user32_itimerval.h b/bsd/sys/_types/_user32_itimerval.h index 130b19c39..074644eac 100644 --- a/bsd/sys/_types/_user32_itimerval.h +++ b/bsd/sys/_types/_user32_itimerval.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER32_ITIMERVAL -#define _STRUCT_USER32_ITIMERVAL struct user32_itimerval +#define _STRUCT_USER32_ITIMERVAL struct user32_itimerval _STRUCT_USER32_ITIMERVAL { _STRUCT_USER32_TIMEVAL it_interval; /* timer interval */ diff --git a/bsd/sys/_types/_user32_ntptimeval.h b/bsd/sys/_types/_user32_ntptimeval.h index cb69d5949..fb1268ac2 100644 --- a/bsd/sys/_types/_user32_ntptimeval.h +++ b/bsd/sys/_types/_user32_ntptimeval.h @@ -27,7 +27,7 @@ */ #ifdef KERNEL #ifndef _STRUCT_USER32_NTPTIMEVAL -#define _STRUCT_USER32_NTPTIMEVAL struct user32_ntptimeval +#define _STRUCT_USER32_NTPTIMEVAL struct user32_ntptimeval _STRUCT_USER32_NTPTIMEVAL { struct user32_timespec time; @@ -35,7 +35,6 @@ _STRUCT_USER32_NTPTIMEVAL user32_long_t esterror; user32_long_t tai; __int32_t time_state; - }; #endif /* _STRUCT_USER32_NTPTIMEVAL */ #endif /* KERNEL */ diff --git a/bsd/sys/_types/_user32_timespec.h b/bsd/sys/_types/_user32_timespec.h index 981360755..67e09e2d0 100644 --- a/bsd/sys/_types/_user32_timespec.h +++ b/bsd/sys/_types/_user32_timespec.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER32_TIMESPEC -#define _STRUCT_USER32_TIMESPEC struct user32_timespec +#define _STRUCT_USER32_TIMESPEC struct user32_timespec _STRUCT_USER32_TIMESPEC { - user32_time_t tv_sec; /* seconds */ + user32_time_t tv_sec; /* seconds */ user32_long_t tv_nsec; /* and nanoseconds */ }; #endif /* _STRUCT_USER32_TIMESPEC */ diff --git a/bsd/sys/_types/_user32_timeval.h b/bsd/sys/_types/_user32_timeval.h index fb3ef222f..8baa2c5d7 100644 --- a/bsd/sys/_types/_user32_timeval.h +++ b/bsd/sys/_types/_user32_timeval.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER32_TIMEVAL -#define _STRUCT_USER32_TIMEVAL struct user32_timeval +#define _STRUCT_USER32_TIMEVAL struct user32_timeval _STRUCT_USER32_TIMEVAL { - user32_time_t tv_sec; /* seconds */ + user32_time_t tv_sec; /* seconds */ __int32_t tv_usec; /* and microseconds */ }; #endif /* _STRUCT_USER32_TIMEVAL */ diff --git a/bsd/sys/_types/_user32_timex.h b/bsd/sys/_types/_user32_timex.h index 5627982d6..2a6d5a9bf 100644 --- a/bsd/sys/_types/_user32_timex.h +++ b/bsd/sys/_types/_user32_timex.h @@ -27,28 +27,27 @@ */ #ifdef KERNEL #ifndef _STRUCT_USER32_TIMEX -#define _STRUCT_USER32_TIMEX struct user32_timex +#define _STRUCT_USER32_TIMEX struct user32_timex _STRUCT_USER32_TIMEX { u_int32_t modes; - user32_long_t offset; - user32_long_t freq; - user32_long_t maxerror; - user32_long_t esterror; - __int32_t status; - user32_long_t constant; - user32_long_t precision; - user32_long_t tolerance; - - user32_long_t ppsfreq; - user32_long_t jitter; - __int32_t shift; - user32_long_t stabil; - user32_long_t jitcnt; - user32_long_t calcnt; - user32_long_t errcnt; - user32_long_t stbcnt; + user32_long_t offset; + user32_long_t freq; + user32_long_t maxerror; + user32_long_t esterror; + __int32_t status; + user32_long_t constant; + user32_long_t precision; + user32_long_t tolerance; + user32_long_t ppsfreq; + user32_long_t jitter; + __int32_t shift; + user32_long_t stabil; + user32_long_t jitcnt; + user32_long_t calcnt; + user32_long_t errcnt; + user32_long_t stbcnt; }; #endif /* _STRUCT_USER32_TIMEX */ #endif /* KERNEL */ diff --git a/bsd/sys/_types/_user64_itimerval.h b/bsd/sys/_types/_user64_itimerval.h index 4c58fece9..c824130c2 100644 --- a/bsd/sys/_types/_user64_itimerval.h +++ b/bsd/sys/_types/_user64_itimerval.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER64_ITIMERVAL -#define _STRUCT_USER64_ITIMERVAL struct user64_itimerval +#define _STRUCT_USER64_ITIMERVAL struct user64_itimerval _STRUCT_USER64_ITIMERVAL { _STRUCT_USER64_TIMEVAL it_interval; /* timer interval */ diff --git a/bsd/sys/_types/_user64_ntptimeval.h b/bsd/sys/_types/_user64_ntptimeval.h index 3c3c557fd..d85123676 100644 --- a/bsd/sys/_types/_user64_ntptimeval.h +++ b/bsd/sys/_types/_user64_ntptimeval.h @@ -27,7 +27,7 @@ */ #ifdef KERNEL #ifndef _STRUCT_USER64_NTPTIMEVAL -#define _STRUCT_USER64_NTPTIMEVAL struct user64_ntptimeval +#define _STRUCT_USER64_NTPTIMEVAL struct user64_ntptimeval _STRUCT_USER64_NTPTIMEVAL { struct user64_timespec time; @@ -35,7 +35,6 @@ _STRUCT_USER64_NTPTIMEVAL user64_long_t esterror; user64_long_t tai; __int64_t time_state; - }; #endif /* _STRUCT_USER64_NTPTIMEVAL */ #endif /* KERNEL */ diff --git a/bsd/sys/_types/_user64_timespec.h b/bsd/sys/_types/_user64_timespec.h index d80b1cee1..488ecaed2 100644 --- a/bsd/sys/_types/_user64_timespec.h +++ b/bsd/sys/_types/_user64_timespec.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER64_TIMESPEC -#define _STRUCT_USER64_TIMESPEC struct user64_timespec +#define _STRUCT_USER64_TIMESPEC struct user64_timespec _STRUCT_USER64_TIMESPEC { - user64_time_t tv_sec; /* seconds */ + user64_time_t tv_sec; /* seconds */ user64_long_t tv_nsec; /* and nanoseconds */ }; #endif /* _STRUCT_USER64_TIMESPEC */ diff --git a/bsd/sys/_types/_user64_timeval.h b/bsd/sys/_types/_user64_timeval.h index 38b1fca8e..b80339ef8 100644 --- a/bsd/sys/_types/_user64_timeval.h +++ b/bsd/sys/_types/_user64_timeval.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER64_TIMEVAL -#define _STRUCT_USER64_TIMEVAL struct user64_timeval +#define _STRUCT_USER64_TIMEVAL struct user64_timeval _STRUCT_USER64_TIMEVAL { user64_time_t tv_sec; /* seconds */ diff --git a/bsd/sys/_types/_user64_timex.h b/bsd/sys/_types/_user64_timex.h index eb1422e5e..3d69f1b7c 100644 --- a/bsd/sys/_types/_user64_timex.h +++ b/bsd/sys/_types/_user64_timex.h @@ -27,28 +27,27 @@ */ #ifdef KERNEL #ifndef _STRUCT_USER64_TIMEX -#define _STRUCT_USER64_TIMEX struct user64_timex +#define _STRUCT_USER64_TIMEX struct user64_timex _STRUCT_USER64_TIMEX { u_int32_t modes; - user64_long_t offset; - user64_long_t freq; - user64_long_t maxerror; - user64_long_t esterror; - __int32_t status; - user64_long_t constant; - user64_long_t precision; - user64_long_t tolerance; - - user64_long_t ppsfreq; - user64_long_t jitter; - __int32_t shift; - user64_long_t stabil; - user64_long_t jitcnt; - user64_long_t calcnt; - user64_long_t errcnt; - user64_long_t stbcnt; + user64_long_t offset; + user64_long_t freq; + user64_long_t maxerror; + user64_long_t esterror; + __int32_t status; + user64_long_t constant; + user64_long_t precision; + user64_long_t tolerance; + user64_long_t ppsfreq; + user64_long_t jitter; + __int32_t shift; + user64_long_t stabil; + user64_long_t jitcnt; + user64_long_t calcnt; + user64_long_t errcnt; + user64_long_t stbcnt; }; #endif /* _STRUCT_USER64_TIMEX */ #endif /* KERNEL */ diff --git a/bsd/sys/_types/_user_timespec.h b/bsd/sys/_types/_user_timespec.h index 9c25c3eb7..5a1a8444c 100644 --- a/bsd/sys/_types/_user_timespec.h +++ b/bsd/sys/_types/_user_timespec.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL -/* LP64 version of struct timespec. time_t is a long and must grow when +/* LP64 version of struct timespec. time_t is a long and must grow when * we're dealing with a 64-bit process. * WARNING - keep in sync with struct timespec */ #ifndef _STRUCT_USER_TIMESPEC -#define _STRUCT_USER_TIMESPEC struct user_timespec +#define _STRUCT_USER_TIMESPEC struct user_timespec _STRUCT_USER_TIMESPEC { - user_time_t tv_sec; /* seconds */ + user_time_t tv_sec; /* seconds */ user_long_t tv_nsec; /* and nanoseconds */ }; #endif /* _STRUCT_USER_TIMESPEC */ diff --git a/bsd/sys/_types/_user_timeval.h b/bsd/sys/_types/_user_timeval.h index 01ae2404e..1bf72a6fa 100644 --- a/bsd/sys/_types/_user_timeval.h +++ b/bsd/sys/_types/_user_timeval.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL #ifndef _STRUCT_USER_TIMEVAL -#define _STRUCT_USER_TIMEVAL struct user_timeval +#define _STRUCT_USER_TIMEVAL struct user_timeval _STRUCT_USER_TIMEVAL { user_time_t tv_sec; /* seconds */ diff --git a/bsd/sys/_types/_uuid_t.h b/bsd/sys/_types/_uuid_t.h index e459143cd..66e7da794 100644 --- a/bsd/sys/_types/_uuid_t.h +++ b/bsd/sys/_types/_uuid_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _UUID_T #define _UUID_T #include /* __darwin_uuid_t */ -typedef __darwin_uuid_t uuid_t; +typedef __darwin_uuid_t uuid_t; #endif /* _UUID_T */ diff --git a/bsd/sys/_types/_va_list.h b/bsd/sys/_types/_va_list.h index 48a2b9969..f7687baea 100644 --- a/bsd/sys/_types/_va_list.h +++ b/bsd/sys/_types/_va_list.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/_types/_wchar_t.h b/bsd/sys/_types/_wchar_t.h index a452a5fac..d67cfcdd2 100644 --- a/bsd/sys/_types/_wchar_t.h +++ b/bsd/sys/_types/_wchar_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/_types/_wint_t.h b/bsd/sys/_types/_wint_t.h index 66dd7c37f..caad07fdb 100644 --- a/bsd/sys/_types/_wint_t.h +++ b/bsd/sys/_types/_wint_t.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/acct.h b/bsd/sys/acct.h index 8162eb91f..2f86e79bc 100644 --- a/bsd/sys/acct.h +++ b/bsd/sys/acct.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -82,37 +82,37 @@ typedef u_int16_t comp_t; struct acct { - char ac_comm[10]; /* command name */ - comp_t ac_utime; /* user time */ - comp_t ac_stime; /* system time */ - comp_t ac_etime; /* elapsed time */ - u_int32_t ac_btime; /* starting time */ - uid_t ac_uid; /* user id */ - gid_t ac_gid; /* group id */ - u_int16_t ac_mem; /* average memory usage */ - comp_t ac_io; /* count of IO blocks */ - dev_t ac_tty; /* controlling tty */ + char ac_comm[10]; /* command name */ + comp_t ac_utime; /* user time */ + comp_t ac_stime; /* system time */ + comp_t ac_etime; /* elapsed time */ + u_int32_t ac_btime; /* starting time */ + uid_t ac_uid; /* user id */ + gid_t ac_gid; /* group id */ + u_int16_t ac_mem; /* average memory usage */ + comp_t ac_io; /* count of IO blocks */ + dev_t ac_tty; /* controlling tty */ -#define AFORK 0x01 /* fork'd but not exec'd */ -#define ASU 0x02 /* used super-user permissions */ -#define ACOMPAT 0x04 /* used compatibility mode */ -#define ACORE 0x08 /* dumped core */ -#define AXSIG 0x10 /* killed by a signal */ - u_int8_t ac_flag; /* accounting flags */ +#define AFORK 0x01 /* fork'd but not exec'd */ +#define ASU 0x02 /* used super-user permissions */ +#define ACOMPAT 0x04 /* used compatibility mode */ +#define ACORE 0x08 /* dumped core */ +#define AXSIG 0x10 /* killed by a signal */ + u_int8_t ac_flag; /* accounting flags */ }; /* * 1/AHZ is the granularity of the data encoded in the comp_t fields. * This is not necessarily equal to hz. */ -#define AHZ 64 +#define AHZ 64 #ifdef KERNEL #ifdef __APPLE_API_PRIVATE -extern struct vnode *acctp; +extern struct vnode *acctp; __BEGIN_DECLS -int acct_process(struct proc *p); +int acct_process(struct proc *p); __END_DECLS #endif /* __APPLE_API_PRIVATE */ diff --git a/bsd/sys/aio.h b/bsd/sys/aio.h index 7a887288f..7f4feaa5c 100644 --- a/bsd/sys/aio.h +++ b/bsd/sys/aio.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * File: sys/aio.h * Author: Umesh Vaishampayan [umeshv@apple.com] * 05-Feb-2003 umeshv Created. * * Header file for POSIX Asynchronous IO APIs * - */ + */ #ifndef _SYS_AIO_H_ -#define _SYS_AIO_H_ +#define _SYS_AIO_H_ #include #include @@ -44,7 +44,7 @@ /* * [XSI] Inclusion of the header may make visible symbols defined * in the headers , , , and . - * + * * In our case, this is limited to struct timespec, off_t and ssize_t. */ #include @@ -68,46 +68,46 @@ #ifndef KERNEL struct aiocb { - int aio_fildes; /* File descriptor */ - off_t aio_offset; /* File offset */ - volatile void *aio_buf; /* Location of buffer */ - size_t aio_nbytes; /* Length of transfer */ - int aio_reqprio; /* Request priority offset */ - struct sigevent aio_sigevent; /* Signal number and value */ - int aio_lio_opcode; /* Operation to be performed */ + int aio_fildes; /* File descriptor */ + off_t aio_offset; /* File offset */ + volatile void *aio_buf; /* Location of buffer */ + size_t aio_nbytes; /* Length of transfer */ + int aio_reqprio; /* Request priority offset */ + struct sigevent aio_sigevent; /* Signal number and value */ + int aio_lio_opcode; /* Operation to be performed */ }; #endif /* KERNEL */ #ifdef KERNEL struct user_aiocb { - int aio_fildes; /* File descriptor */ - off_t aio_offset; /* File offset */ - user_addr_t aio_buf; /* Location of buffer */ - user_size_t aio_nbytes; /* Length of transfer */ - int aio_reqprio; /* Request priority offset */ - struct user_sigevent aio_sigevent; /* Signal number and value */ - int aio_lio_opcode; /* Operation to be performed */ + int aio_fildes; /* File descriptor */ + off_t aio_offset; /* File offset */ + user_addr_t aio_buf; /* Location of buffer */ + user_size_t aio_nbytes; /* Length of transfer */ + int aio_reqprio; /* Request priority offset */ + struct user_sigevent aio_sigevent; /* Signal number and value */ + int aio_lio_opcode; /* Operation to be performed */ }; struct user64_aiocb { - int aio_fildes; /* File descriptor */ - user64_off_t aio_offset; /* File offset */ - user64_addr_t aio_buf; /* Location of buffer */ - user64_size_t aio_nbytes; /* Length of transfer */ - int aio_reqprio; /* Request priority offset */ - struct user64_sigevent aio_sigevent; /* Signal number and value */ - int aio_lio_opcode; /* Operation to be performed */ + int aio_fildes; /* File descriptor */ + user64_off_t aio_offset; /* File offset */ + user64_addr_t aio_buf; /* Location of buffer */ + user64_size_t aio_nbytes; /* Length of transfer */ + int aio_reqprio; /* Request priority offset */ + struct user64_sigevent aio_sigevent; /* Signal number and value */ + int aio_lio_opcode; /* Operation to be performed */ }; struct user32_aiocb { - int aio_fildes; /* File descriptor */ - user32_off_t aio_offset; /* File offset */ - user32_addr_t aio_buf; /* Location of buffer */ - user32_size_t aio_nbytes; /* Length of transfer */ - int aio_reqprio; /* Request priority offset */ - struct user32_sigevent aio_sigevent; /* Signal number and value */ - int aio_lio_opcode; /* Operation to be performed */ + int aio_fildes; /* File descriptor */ + user32_off_t aio_offset; /* File offset */ + user32_addr_t aio_buf; /* Location of buffer */ + user32_size_t aio_nbytes; /* Length of transfer */ + int aio_reqprio; /* Request priority offset */ + struct user32_sigevent aio_sigevent; /* Signal number and value */ + int aio_lio_opcode; /* Operation to be performed */ }; #endif // KERNEL @@ -120,25 +120,25 @@ struct user32_aiocb { * none of the requested operations could be canceled since they are * already complete. */ -#define AIO_ALLDONE 0x1 +#define AIO_ALLDONE 0x1 /* all requested operations have been canceled */ -#define AIO_CANCELED 0x2 +#define AIO_CANCELED 0x2 /* * some of the requested operations could not be canceled since * they are in progress */ -#define AIO_NOTCANCELED 0x4 +#define AIO_NOTCANCELED 0x4 /* * lio_listio operation options */ -#define LIO_NOP 0x0 /* option indicating that no transfer is requested */ -#define LIO_READ 0x1 /* option requesting a read */ -#define LIO_WRITE 0x2 /* option requesting a write */ +#define LIO_NOP 0x0 /* option indicating that no transfer is requested */ +#define LIO_READ 0x1 /* option requesting a read */ +#define LIO_WRITE 0x2 /* option requesting a write */ /* * lio_listio() modes @@ -150,19 +150,19 @@ struct user32_aiocb { * the lio_listio() operation is being performed, and no * notification is given when the operation is complete */ -#define LIO_NOWAIT 0x1 +#define LIO_NOWAIT 0x1 /* * A lio_listio() synchronization operation indicating * that the calling thread is to suspend until the * lio_listio() operation is complete. */ -#define LIO_WAIT 0x2 +#define LIO_WAIT 0x2 /* * Maximum number of operations in single lio_listio call */ -#define AIO_LISTIO_MAX 16 +#define AIO_LISTIO_MAX 16 #ifndef KERNEL @@ -173,116 +173,116 @@ struct user32_aiocb { __BEGIN_DECLS /* - * Attempt to cancel one or more asynchronous I/O requests currently outstanding - * against file descriptor fd. The aiocbp argument points to the asynchronous I/O - * control block for a particular request to be canceled. If aiocbp is NULL, then + * Attempt to cancel one or more asynchronous I/O requests currently outstanding + * against file descriptor fd. The aiocbp argument points to the asynchronous I/O + * control block for a particular request to be canceled. If aiocbp is NULL, then * all outstanding cancelable asynchronous I/O requests against fd shall be canceled. */ -int aio_cancel( int fd, - struct aiocb * aiocbp ); - +int aio_cancel( int fd, + struct aiocb * aiocbp ); + /* - * Return the error status associated with the aiocb structure referenced by the - * aiocbp argument. The error status for an asynchronous I/O operation is the errno + * Return the error status associated with the aiocb structure referenced by the + * aiocbp argument. The error status for an asynchronous I/O operation is the errno * value that would be set by the corresponding read(), write(), or fsync() - * operation. If the operation has not yet completed, then the error status shall + * operation. If the operation has not yet completed, then the error status shall * be equal to [EINPROGRESS]. */ -int aio_error( const struct aiocb * aiocbp ); +int aio_error( const struct aiocb * aiocbp ); /* - * Asynchronously force all I/O operations associated with the file indicated by - * the file descriptor aio_fildes member of the aiocb structure referenced by the - * aiocbp argument and queued at the time of the call to aio_fsync() to the + * Asynchronously force all I/O operations associated with the file indicated by + * the file descriptor aio_fildes member of the aiocb structure referenced by the + * aiocbp argument and queued at the time of the call to aio_fsync() to the * synchronized I/O completion state. The function call shall return when the * synchronization request has been initiated or queued. op O_SYNC is the only * supported opertation at this time. - * The aiocbp argument refers to an asynchronous I/O control block. The aiocbp - * value may be used as an argument to aio_error() and aio_return() in order to - * determine the error status and return status, respectively, of the asynchronous - * operation while it is proceeding. When the request is queued, the error status - * for the operation is [EINPROGRESS]. When all data has been successfully - * transferred, the error status shall be reset to reflect the success or failure + * The aiocbp argument refers to an asynchronous I/O control block. The aiocbp + * value may be used as an argument to aio_error() and aio_return() in order to + * determine the error status and return status, respectively, of the asynchronous + * operation while it is proceeding. When the request is queued, the error status + * for the operation is [EINPROGRESS]. When all data has been successfully + * transferred, the error status shall be reset to reflect the success or failure * of the operation. */ -int aio_fsync( int op, - struct aiocb * aiocbp ); - +int aio_fsync( int op, + struct aiocb * aiocbp ); + /* - * Read aiocbp->aio_nbytes from the file associated with aiocbp->aio_fildes into - * the buffer pointed to by aiocbp->aio_buf. The function call shall return when + * Read aiocbp->aio_nbytes from the file associated with aiocbp->aio_fildes into + * the buffer pointed to by aiocbp->aio_buf. The function call shall return when * the read request has been initiated or queued. - * The aiocbp value may be used as an argument to aio_error() and aio_return() in - * order to determine the error status and return status, respectively, of the - * asynchronous operation while it is proceeding. If an error condition is - * encountered during queuing, the function call shall return without having - * initiated or queued the request. The requested operation takes place at the - * absolute position in the file as given by aio_offset, as if lseek() were called - * immediately prior to the operation with an offset equal to aio_offset and a - * whence equal to SEEK_SET. After a successful call to enqueue an asynchronous + * The aiocbp value may be used as an argument to aio_error() and aio_return() in + * order to determine the error status and return status, respectively, of the + * asynchronous operation while it is proceeding. If an error condition is + * encountered during queuing, the function call shall return without having + * initiated or queued the request. The requested operation takes place at the + * absolute position in the file as given by aio_offset, as if lseek() were called + * immediately prior to the operation with an offset equal to aio_offset and a + * whence equal to SEEK_SET. After a successful call to enqueue an asynchronous * I/O operation, the value of the file offset for the file is unspecified. */ -int aio_read( struct aiocb * aiocbp ); +int aio_read( struct aiocb * aiocbp ); /* - * Return the return status associated with the aiocb structure referenced by - * the aiocbp argument. The return status for an asynchronous I/O operation is - * the value that would be returned by the corresponding read(), write(), or - * fsync() function call. If the error status for the operation is equal to - * [EINPROGRESS], then the return status for the operation is undefined. The - * aio_return() function may be called exactly once to retrieve the return status - * of a given asynchronous operation; thereafter, if the same aiocb structure - * is used in a call to aio_return() or aio_error(), an error may be returned. + * Return the return status associated with the aiocb structure referenced by + * the aiocbp argument. The return status for an asynchronous I/O operation is + * the value that would be returned by the corresponding read(), write(), or + * fsync() function call. If the error status for the operation is equal to + * [EINPROGRESS], then the return status for the operation is undefined. The + * aio_return() function may be called exactly once to retrieve the return status + * of a given asynchronous operation; thereafter, if the same aiocb structure + * is used in a call to aio_return() or aio_error(), an error may be returned. * When the aiocb structure referred to by aiocbp is used to submit another - * asynchronous operation, then aio_return() may be successfully used to + * asynchronous operation, then aio_return() may be successfully used to * retrieve the return status of that operation. */ -ssize_t aio_return( struct aiocb * aiocbp ); +ssize_t aio_return( struct aiocb * aiocbp ); /* - * Suspend the calling thread until at least one of the asynchronous I/O - * operations referenced by the aiocblist argument has completed, until a signal - * interrupts the function, or, if timeout is not NULL, until the time - * interval specified by timeout has passed. If any of the aiocb structures - * in the aiocblist correspond to completed asynchronous I/O operations (that is, - * the error status for the operation is not equal to [EINPROGRESS]) at the - * time of the call, the function shall return without suspending the calling - * thread. The aiocblist argument is an array of pointers to asynchronous I/O - * control blocks. The nent argument indicates the number of elements in the - * array. Each aiocb structure pointed to has been used in initiating an - * asynchronous I/O request via aio_read(), aio_write(), or lio_listio(). This + * Suspend the calling thread until at least one of the asynchronous I/O + * operations referenced by the aiocblist argument has completed, until a signal + * interrupts the function, or, if timeout is not NULL, until the time + * interval specified by timeout has passed. If any of the aiocb structures + * in the aiocblist correspond to completed asynchronous I/O operations (that is, + * the error status for the operation is not equal to [EINPROGRESS]) at the + * time of the call, the function shall return without suspending the calling + * thread. The aiocblist argument is an array of pointers to asynchronous I/O + * control blocks. The nent argument indicates the number of elements in the + * array. Each aiocb structure pointed to has been used in initiating an + * asynchronous I/O request via aio_read(), aio_write(), or lio_listio(). This * array may contain NULL pointers, which are ignored. */ -int aio_suspend( const struct aiocb *const aiocblist[], - int nent, - const struct timespec * timeoutp ) __DARWIN_ALIAS_C(aio_suspend); - +int aio_suspend( const struct aiocb *const aiocblist[], + int nent, + const struct timespec * timeoutp ) __DARWIN_ALIAS_C(aio_suspend); + /* - * Write aiocbp->aio_nbytes to the file associated with aiocbp->aio_fildes from - * the buffer pointed to by aiocbp->aio_buf. The function shall return when the + * Write aiocbp->aio_nbytes to the file associated with aiocbp->aio_fildes from + * the buffer pointed to by aiocbp->aio_buf. The function shall return when the * write request has been initiated or, at a minimum, queued. - * The aiocbp argument may be used as an argument to aio_error() and aio_return() - * in order to determine the error status and return status, respectively, of the + * The aiocbp argument may be used as an argument to aio_error() and aio_return() + * in order to determine the error status and return status, respectively, of the * asynchronous operation while it is proceeding. */ -int aio_write( struct aiocb * aiocbp ); +int aio_write( struct aiocb * aiocbp ); /* - * Initiate a list of I/O requests with a single function call. The mode - * argument takes one of the values LIO_WAIT or LIO_NOWAIT and determines whether - * the function returns when the I/O operations have been completed, or as soon - * as the operations have been queued. If the mode argument is LIO_WAIT, the - * function shall wait until all I/O is complete and the sig argument shall be - * ignored. - * If the mode argument is LIO_NOWAIT, the function shall return immediately, and - * asynchronous notification shall occur, according to the sig argument, when all + * Initiate a list of I/O requests with a single function call. The mode + * argument takes one of the values LIO_WAIT or LIO_NOWAIT and determines whether + * the function returns when the I/O operations have been completed, or as soon + * as the operations have been queued. If the mode argument is LIO_WAIT, the + * function shall wait until all I/O is complete and the sig argument shall be + * ignored. + * If the mode argument is LIO_NOWAIT, the function shall return immediately, and + * asynchronous notification shall occur, according to the sig argument, when all * the I/O operations complete. If sig is NULL, then no asynchronous notification * shall occur. */ -int lio_listio( int mode, - struct aiocb *const aiocblist[], - int nent, - struct sigevent *sigp ); +int lio_listio( int mode, + struct aiocb *const aiocblist[], + int nent, + struct sigevent *sigp ); __END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/aio_kern.h b/bsd/sys/aio_kern.h index 4b08724b1..8412a2c48 100644 --- a/bsd/sys/aio_kern.h +++ b/bsd/sys/aio_kern.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,88 +22,87 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * File: sys/aio_kern.h * Author: Jerry Cottingham [jerryc@apple.com] * * Header file for kernel only portion of POSIX Asynchronous IO APIs * - */ + */ #include #ifndef _SYS_AIO_KERN_H_ -#define _SYS_AIO_KERN_H_ +#define _SYS_AIO_KERN_H_ #ifdef KERNEL -struct aio_workq_entry -{ - TAILQ_ENTRY( aio_workq_entry ) aio_workq_link; /* Protected by queue lock */ +struct aio_workq_entry { + TAILQ_ENTRY( aio_workq_entry ) aio_workq_link; /* Protected by queue lock */ TAILQ_ENTRY( aio_workq_entry ) aio_proc_link; /* Proteced by proc's aio lock */ /* Proc lock */ - void *group_tag; /* identifier used to group IO requests */ + void *group_tag; /* identifier used to group IO requests */ /* Initialized and never changed, safe to access */ - struct proc *procp; /* user proc that queued this request */ - user_addr_t uaiocbp; /* pointer passed in from user land */ - struct user_aiocb aiocb; /* copy of aiocb from user land */ - vm_map_t aio_map; /* user land map we have a reference to */ - thread_t thread; /* thread that queued this request */ + struct proc *procp; /* user proc that queued this request */ + user_addr_t uaiocbp; /* pointer passed in from user land */ + struct user_aiocb aiocb; /* copy of aiocb from user land */ + vm_map_t aio_map; /* user land map we have a reference to */ + thread_t thread; /* thread that queued this request */ /* Entry lock */ - int aio_refcount; - user_ssize_t returnval; /* return value from read / write request */ - int errorval; /* error value from read / write request */ - int flags; + int aio_refcount; + user_ssize_t returnval; /* return value from read / write request */ + int errorval; /* error value from read / write request */ + int flags; }; typedef struct aio_workq_entry aio_workq_entry; /* * definitions for aio_workq_entry.flags */ -#define AIO_READ 0x00000001 /* a read */ -#define AIO_WRITE 0x00000002 /* a write */ -#define AIO_FSYNC 0x00000004 /* aio_fsync with op = O_SYNC */ -#define AIO_DSYNC 0x00000008 /* aio_fsync with op = O_DSYNC (not supported yet) */ -#define AIO_LIO 0x00000010 /* lio_listio generated IO */ -#define AIO_DO_FREE 0x00000800 /* entry should be freed when last reference is dropped. */ - /* set by aio_return() and _aio_exit() */ -#define AIO_DISABLE 0x00002000 /* process is trying to exit or exec and we need */ - /* to not try to send a signal from do_aio_completion() */ -#define AIO_CLOSE_WAIT 0x00004000 /* process is trying to close and is */ - /* waiting for one or more active IO requests to */ - /* complete */ -#define AIO_EXIT_WAIT 0x00008000 /* process is trying to exit or exec and is */ - /* waiting for one or more active IO requests to */ - /* complete */ - -#define AIO_LIO_NOTIFY 0x00010000 /* wait for list complete */ +#define AIO_READ 0x00000001 /* a read */ +#define AIO_WRITE 0x00000002 /* a write */ +#define AIO_FSYNC 0x00000004 /* aio_fsync with op = O_SYNC */ +#define AIO_DSYNC 0x00000008 /* aio_fsync with op = O_DSYNC (not supported yet) */ +#define AIO_LIO 0x00000010 /* lio_listio generated IO */ +#define AIO_DO_FREE 0x00000800 /* entry should be freed when last reference is dropped. */ + /* set by aio_return() and _aio_exit() */ +#define AIO_DISABLE 0x00002000 /* process is trying to exit or exec and we need */ + /* to not try to send a signal from do_aio_completion() */ +#define AIO_CLOSE_WAIT 0x00004000 /* process is trying to close and is */ + /* waiting for one or more active IO requests to */ + /* complete */ +#define AIO_EXIT_WAIT 0x00008000 /* process is trying to exit or exec and is */ + /* waiting for one or more active IO requests to */ + /* complete */ + +#define AIO_LIO_NOTIFY 0x00010000 /* wait for list complete */ /* * Prototypes */ -__private_extern__ void +__private_extern__ void _aio_close(struct proc *p, int fd); -__private_extern__ void +__private_extern__ void _aio_exit(struct proc *p); -__private_extern__ void +__private_extern__ void _aio_exec(struct proc *p); -__private_extern__ void +__private_extern__ void _aio_create_worker_threads(int num); -__private_extern__ void +__private_extern__ void aio_init(void); -task_t +task_t get_aiotask(void); #endif /* KERNEL */ diff --git a/bsd/sys/appleapiopts.h b/bsd/sys/appleapiopts.h index 20557019b..92e9fd691 100644 --- a/bsd/sys/appleapiopts.h +++ b/bsd/sys/appleapiopts.h @@ -2,7 +2,7 @@ * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -59,4 +59,3 @@ #endif /* __APPLE_API_STRICT_CONFORMANCE */ #endif /* __SYS_APPLEAPIOPTS_H__ */ - diff --git a/bsd/sys/attr.h b/bsd/sys/attr.h index 45540a9aa..cdf7e13a7 100644 --- a/bsd/sys/attr.h +++ b/bsd/sys/attr.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,19 +43,19 @@ #include #include -#define FSOPT_NOFOLLOW 0x00000001 -#define FSOPT_NOINMEMUPDATE 0x00000002 -#define FSOPT_REPORT_FULLSIZE 0x00000004 +#define FSOPT_NOFOLLOW 0x00000001 +#define FSOPT_NOINMEMUPDATE 0x00000002 +#define FSOPT_REPORT_FULLSIZE 0x00000004 /* The following option only valid when requesting ATTR_CMN_RETURNED_ATTRS */ -#define FSOPT_PACK_INVAL_ATTRS 0x00000008 +#define FSOPT_PACK_INVAL_ATTRS 0x00000008 #ifdef PRIVATE #define FSOPT_EXCHANGE_DATA_ONLY 0x0000010 #endif -#define FSOPT_ATTR_CMN_EXTENDED 0x00000020 +#define FSOPT_ATTR_CMN_EXTENDED 0x00000020 #ifdef PRIVATE -#define FSOPT_LIST_SNAPSHOT 0x00000040 +#define FSOPT_LIST_SNAPSHOT 0x00000040 #endif /* PRIVATE */ /* we currently aren't anywhere near this amount for a valid @@ -80,22 +80,22 @@ typedef u_int32_t fsvolid_t; typedef u_int32_t attrgroup_t; struct attrlist { - u_short bitmapcount; /* number of attr. bit sets in list (should be 5) */ - u_int16_t reserved; /* (to maintain 4-byte alignment) */ - attrgroup_t commonattr; /* common attribute group */ - attrgroup_t volattr; /* Volume attribute group */ - attrgroup_t dirattr; /* directory attribute group */ - attrgroup_t fileattr; /* file attribute group */ - attrgroup_t forkattr; /* fork attribute group */ + u_short bitmapcount; /* number of attr. bit sets in list (should be 5) */ + u_int16_t reserved; /* (to maintain 4-byte alignment) */ + attrgroup_t commonattr; /* common attribute group */ + attrgroup_t volattr; /* Volume attribute group */ + attrgroup_t dirattr; /* directory attribute group */ + attrgroup_t fileattr; /* file attribute group */ + attrgroup_t forkattr; /* fork attribute group */ }; #define ATTR_BIT_MAP_COUNT 5 typedef struct attribute_set { - attrgroup_t commonattr; /* common attribute group */ - attrgroup_t volattr; /* Volume attribute group */ - attrgroup_t dirattr; /* directory attribute group */ - attrgroup_t fileattr; /* file attribute group */ - attrgroup_t forkattr; /* fork attribute group */ + attrgroup_t commonattr; /* common attribute group */ + attrgroup_t volattr; /* Volume attribute group */ + attrgroup_t dirattr; /* directory attribute group */ + attrgroup_t fileattr; /* file attribute group */ + attrgroup_t forkattr; /* fork attribute group */ } attribute_set_t; typedef struct attrreference { @@ -106,8 +106,8 @@ typedef struct attrreference { /* XXX PPD This is derived from HFSVolumePriv.h and should perhaps be referenced from there? */ struct diskextent { - u_int32_t startblock; /* first block allocated */ - u_int32_t blockcount; /* number of blocks allocated */ + u_int32_t startblock; /* first block allocated */ + u_int32_t blockcount; /* number of blocks allocated */ }; typedef struct diskextent extentrecord[8]; @@ -127,7 +127,7 @@ typedef struct vol_capabilities_attr { /* * XXX this value needs to be raised - 3893388 */ -#define ATTR_MAX_BUFFER 8192 +#define ATTR_MAX_BUFFER 8192 /* * VOL_CAP_FMT_PERSISTENTOBJECTIDS: When set, the volume has object IDs @@ -185,8 +185,8 @@ typedef struct vol_capabilities_attr { * need not be cached by those upper layers. A volume that caches * the statfs information in its in-memory structures should set this bit. * A volume that must always read from disk or always perform a network - * transaction should not set this bit. - * + * transaction should not set this bit. + * * VOL_CAP_FMT_2TB_FILESIZE: If this bit is set the volume format supports * file sizes larger than 4GB, and potentially up to 2TB; it does not * indicate whether the filesystem supports files larger than that. @@ -206,12 +206,12 @@ typedef struct vol_capabilities_attr { * system are persistent and not recycled. This is a very specialized * capability and it is assumed that most file systems will not support * it. Its use is for legacy non-posix APIs like ResolveFileIDRef. - * - * VOL_CAP_FMT_NO_VOLUME_SIZES: When set, the volume does not support + * + * VOL_CAP_FMT_NO_VOLUME_SIZES: When set, the volume does not support * returning values for total data blocks, available blocks, or free blocks * (as in f_blocks, f_bavail, or f_bfree in "struct statfs"). Historically, * those values were set to 0xFFFFFFFF for volumes that did not support them. - * + * * VOL_CAP_FMT_DECMPFS_COMPRESSION: When set, the volume supports transparent * decompression of compressed files using decmpfs. * @@ -236,29 +236,29 @@ typedef struct vol_capabilities_attr { * VOL_CAP_FMT_NO_PERMISSIONS: When set, the volume does not support setting * permissions. */ -#define VOL_CAP_FMT_PERSISTENTOBJECTIDS 0x00000001 -#define VOL_CAP_FMT_SYMBOLICLINKS 0x00000002 -#define VOL_CAP_FMT_HARDLINKS 0x00000004 -#define VOL_CAP_FMT_JOURNAL 0x00000008 -#define VOL_CAP_FMT_JOURNAL_ACTIVE 0x00000010 -#define VOL_CAP_FMT_NO_ROOT_TIMES 0x00000020 -#define VOL_CAP_FMT_SPARSE_FILES 0x00000040 -#define VOL_CAP_FMT_ZERO_RUNS 0x00000080 -#define VOL_CAP_FMT_CASE_SENSITIVE 0x00000100 -#define VOL_CAP_FMT_CASE_PRESERVING 0x00000200 -#define VOL_CAP_FMT_FAST_STATFS 0x00000400 -#define VOL_CAP_FMT_2TB_FILESIZE 0x00000800 -#define VOL_CAP_FMT_OPENDENYMODES 0x00001000 -#define VOL_CAP_FMT_HIDDEN_FILES 0x00002000 -#define VOL_CAP_FMT_PATH_FROM_ID 0x00004000 -#define VOL_CAP_FMT_NO_VOLUME_SIZES 0x00008000 -#define VOL_CAP_FMT_DECMPFS_COMPRESSION 0x00010000 -#define VOL_CAP_FMT_64BIT_OBJECT_IDS 0x00020000 -#define VOL_CAP_FMT_DIR_HARDLINKS 0x00040000 -#define VOL_CAP_FMT_DOCUMENT_ID 0x00080000 -#define VOL_CAP_FMT_WRITE_GENERATION_COUNT 0x00100000 -#define VOL_CAP_FMT_NO_IMMUTABLE_FILES 0x00200000 -#define VOL_CAP_FMT_NO_PERMISSIONS 0x00400000 +#define VOL_CAP_FMT_PERSISTENTOBJECTIDS 0x00000001 +#define VOL_CAP_FMT_SYMBOLICLINKS 0x00000002 +#define VOL_CAP_FMT_HARDLINKS 0x00000004 +#define VOL_CAP_FMT_JOURNAL 0x00000008 +#define VOL_CAP_FMT_JOURNAL_ACTIVE 0x00000010 +#define VOL_CAP_FMT_NO_ROOT_TIMES 0x00000020 +#define VOL_CAP_FMT_SPARSE_FILES 0x00000040 +#define VOL_CAP_FMT_ZERO_RUNS 0x00000080 +#define VOL_CAP_FMT_CASE_SENSITIVE 0x00000100 +#define VOL_CAP_FMT_CASE_PRESERVING 0x00000200 +#define VOL_CAP_FMT_FAST_STATFS 0x00000400 +#define VOL_CAP_FMT_2TB_FILESIZE 0x00000800 +#define VOL_CAP_FMT_OPENDENYMODES 0x00001000 +#define VOL_CAP_FMT_HIDDEN_FILES 0x00002000 +#define VOL_CAP_FMT_PATH_FROM_ID 0x00004000 +#define VOL_CAP_FMT_NO_VOLUME_SIZES 0x00008000 +#define VOL_CAP_FMT_DECMPFS_COMPRESSION 0x00010000 +#define VOL_CAP_FMT_64BIT_OBJECT_IDS 0x00020000 +#define VOL_CAP_FMT_DIR_HARDLINKS 0x00040000 +#define VOL_CAP_FMT_DOCUMENT_ID 0x00080000 +#define VOL_CAP_FMT_WRITE_GENERATION_COUNT 0x00100000 +#define VOL_CAP_FMT_NO_IMMUTABLE_FILES 0x00200000 +#define VOL_CAP_FMT_NO_PERMISSIONS 0x00400000 /* @@ -329,202 +329,202 @@ typedef struct vol_capabilities_attr { * exclusive rename operation. * */ -#define VOL_CAP_INT_SEARCHFS 0x00000001 -#define VOL_CAP_INT_ATTRLIST 0x00000002 -#define VOL_CAP_INT_NFSEXPORT 0x00000004 -#define VOL_CAP_INT_READDIRATTR 0x00000008 -#define VOL_CAP_INT_EXCHANGEDATA 0x00000010 -#define VOL_CAP_INT_COPYFILE 0x00000020 -#define VOL_CAP_INT_ALLOCATE 0x00000040 -#define VOL_CAP_INT_VOL_RENAME 0x00000080 -#define VOL_CAP_INT_ADVLOCK 0x00000100 -#define VOL_CAP_INT_FLOCK 0x00000200 -#define VOL_CAP_INT_EXTENDED_SECURITY 0x00000400 -#define VOL_CAP_INT_USERACCESS 0x00000800 -#define VOL_CAP_INT_MANLOCK 0x00001000 -#define VOL_CAP_INT_NAMEDSTREAMS 0x00002000 -#define VOL_CAP_INT_EXTENDED_ATTR 0x00004000 +#define VOL_CAP_INT_SEARCHFS 0x00000001 +#define VOL_CAP_INT_ATTRLIST 0x00000002 +#define VOL_CAP_INT_NFSEXPORT 0x00000004 +#define VOL_CAP_INT_READDIRATTR 0x00000008 +#define VOL_CAP_INT_EXCHANGEDATA 0x00000010 +#define VOL_CAP_INT_COPYFILE 0x00000020 +#define VOL_CAP_INT_ALLOCATE 0x00000040 +#define VOL_CAP_INT_VOL_RENAME 0x00000080 +#define VOL_CAP_INT_ADVLOCK 0x00000100 +#define VOL_CAP_INT_FLOCK 0x00000200 +#define VOL_CAP_INT_EXTENDED_SECURITY 0x00000400 +#define VOL_CAP_INT_USERACCESS 0x00000800 +#define VOL_CAP_INT_MANLOCK 0x00001000 +#define VOL_CAP_INT_NAMEDSTREAMS 0x00002000 +#define VOL_CAP_INT_EXTENDED_ATTR 0x00004000 #ifdef PRIVATE /* Volume supports kqueue notifications for remote events */ -#define VOL_CAP_INT_REMOTE_EVENT 0x00008000 +#define VOL_CAP_INT_REMOTE_EVENT 0x00008000 #endif /* PRIVATE */ -#define VOL_CAP_INT_CLONE 0x00010000 -#define VOL_CAP_INT_SNAPSHOT 0x00020000 -#define VOL_CAP_INT_RENAME_SWAP 0x00040000 -#define VOL_CAP_INT_RENAME_EXCL 0x00080000 +#define VOL_CAP_INT_CLONE 0x00010000 +#define VOL_CAP_INT_SNAPSHOT 0x00020000 +#define VOL_CAP_INT_RENAME_SWAP 0x00040000 +#define VOL_CAP_INT_RENAME_EXCL 0x00080000 typedef struct vol_attributes_attr { attribute_set_t validattr; attribute_set_t nativeattr; } vol_attributes_attr_t; -#define ATTR_CMN_NAME 0x00000001 -#define ATTR_CMN_DEVID 0x00000002 -#define ATTR_CMN_FSID 0x00000004 -#define ATTR_CMN_OBJTYPE 0x00000008 -#define ATTR_CMN_OBJTAG 0x00000010 -#define ATTR_CMN_OBJID 0x00000020 -#define ATTR_CMN_OBJPERMANENTID 0x00000040 -#define ATTR_CMN_PAROBJID 0x00000080 -#define ATTR_CMN_SCRIPT 0x00000100 -#define ATTR_CMN_CRTIME 0x00000200 -#define ATTR_CMN_MODTIME 0x00000400 -#define ATTR_CMN_CHGTIME 0x00000800 -#define ATTR_CMN_ACCTIME 0x00001000 -#define ATTR_CMN_BKUPTIME 0x00002000 -#define ATTR_CMN_FNDRINFO 0x00004000 -#define ATTR_CMN_OWNERID 0x00008000 -#define ATTR_CMN_GRPID 0x00010000 -#define ATTR_CMN_ACCESSMASK 0x00020000 -#define ATTR_CMN_FLAGS 0x00040000 +#define ATTR_CMN_NAME 0x00000001 +#define ATTR_CMN_DEVID 0x00000002 +#define ATTR_CMN_FSID 0x00000004 +#define ATTR_CMN_OBJTYPE 0x00000008 +#define ATTR_CMN_OBJTAG 0x00000010 +#define ATTR_CMN_OBJID 0x00000020 +#define ATTR_CMN_OBJPERMANENTID 0x00000040 +#define ATTR_CMN_PAROBJID 0x00000080 +#define ATTR_CMN_SCRIPT 0x00000100 +#define ATTR_CMN_CRTIME 0x00000200 +#define ATTR_CMN_MODTIME 0x00000400 +#define ATTR_CMN_CHGTIME 0x00000800 +#define ATTR_CMN_ACCTIME 0x00001000 +#define ATTR_CMN_BKUPTIME 0x00002000 +#define ATTR_CMN_FNDRINFO 0x00004000 +#define ATTR_CMN_OWNERID 0x00008000 +#define ATTR_CMN_GRPID 0x00010000 +#define ATTR_CMN_ACCESSMASK 0x00020000 +#define ATTR_CMN_FLAGS 0x00040000 /* The following were defined as: */ -/* #define ATTR_CMN_NAMEDATTRCOUNT 0x00080000 */ -/* #define ATTR_CMN_NAMEDATTRLIST 0x00100000 */ +/* #define ATTR_CMN_NAMEDATTRCOUNT 0x00080000 */ +/* #define ATTR_CMN_NAMEDATTRLIST 0x00100000 */ /* These bits have been salvaged for use as: */ /* #define ATTR_CMN_GEN_COUNT 0x00080000 */ /* #define ATTR_CMN_DOCUMENT_ID 0x00100000 */ /* They can only be used with the FSOPT_ATTR_CMN_EXTENDED */ -/* option flag. */ - -#define ATTR_CMN_GEN_COUNT 0x00080000 -#define ATTR_CMN_DOCUMENT_ID 0x00100000 - -#define ATTR_CMN_USERACCESS 0x00200000 -#define ATTR_CMN_EXTENDED_SECURITY 0x00400000 -#define ATTR_CMN_UUID 0x00800000 -#define ATTR_CMN_GRPUUID 0x01000000 -#define ATTR_CMN_FILEID 0x02000000 -#define ATTR_CMN_PARENTID 0x04000000 -#define ATTR_CMN_FULLPATH 0x08000000 -#define ATTR_CMN_ADDEDTIME 0x10000000 -#define ATTR_CMN_ERROR 0x20000000 -#define ATTR_CMN_DATA_PROTECT_FLAGS 0x40000000 +/* option flag. */ + +#define ATTR_CMN_GEN_COUNT 0x00080000 +#define ATTR_CMN_DOCUMENT_ID 0x00100000 + +#define ATTR_CMN_USERACCESS 0x00200000 +#define ATTR_CMN_EXTENDED_SECURITY 0x00400000 +#define ATTR_CMN_UUID 0x00800000 +#define ATTR_CMN_GRPUUID 0x01000000 +#define ATTR_CMN_FILEID 0x02000000 +#define ATTR_CMN_PARENTID 0x04000000 +#define ATTR_CMN_FULLPATH 0x08000000 +#define ATTR_CMN_ADDEDTIME 0x10000000 +#define ATTR_CMN_ERROR 0x20000000 +#define ATTR_CMN_DATA_PROTECT_FLAGS 0x40000000 /* * ATTR_CMN_RETURNED_ATTRS is only valid with getattrlist(2) and * getattrlistbulk(2). It is always the first attribute in the return buffer. */ -#define ATTR_CMN_RETURNED_ATTRS 0x80000000 +#define ATTR_CMN_RETURNED_ATTRS 0x80000000 -#define ATTR_CMN_VALIDMASK 0xFFFFFFFF +#define ATTR_CMN_VALIDMASK 0xFFFFFFFF /* * The settable ATTR_CMN_* attributes include the following: * ATTR_CMN_SCRIPT * ATTR_CMN_CRTIME * ATTR_CMN_MODTIME * ATTR_CMN_CHGTIME - * + * * ATTR_CMN_ACCTIME * ATTR_CMN_BKUPTIME * ATTR_CMN_FNDRINFO * ATTR_CMN_OWNERID - * + * * ATTR_CMN_GRPID * ATTR_CMN_ACCESSMASK * ATTR_CMN_FLAGS - * + * * ATTR_CMN_EXTENDED_SECURITY * ATTR_CMN_UUID - * + * * ATTR_CMN_GRPUUID - * + * * ATTR_CMN_DATA_PROTECT_FLAGS */ -#define ATTR_CMN_SETMASK 0x51C7FF00 -#define ATTR_CMN_VOLSETMASK 0x00006700 - -#define ATTR_VOL_FSTYPE 0x00000001 -#define ATTR_VOL_SIGNATURE 0x00000002 -#define ATTR_VOL_SIZE 0x00000004 -#define ATTR_VOL_SPACEFREE 0x00000008 -#define ATTR_VOL_SPACEAVAIL 0x00000010 -#define ATTR_VOL_MINALLOCATION 0x00000020 -#define ATTR_VOL_ALLOCATIONCLUMP 0x00000040 -#define ATTR_VOL_IOBLOCKSIZE 0x00000080 -#define ATTR_VOL_OBJCOUNT 0x00000100 -#define ATTR_VOL_FILECOUNT 0x00000200 -#define ATTR_VOL_DIRCOUNT 0x00000400 -#define ATTR_VOL_MAXOBJCOUNT 0x00000800 -#define ATTR_VOL_MOUNTPOINT 0x00001000 -#define ATTR_VOL_NAME 0x00002000 -#define ATTR_VOL_MOUNTFLAGS 0x00004000 -#define ATTR_VOL_MOUNTEDDEVICE 0x00008000 -#define ATTR_VOL_ENCODINGSUSED 0x00010000 -#define ATTR_VOL_CAPABILITIES 0x00020000 -#define ATTR_VOL_UUID 0x00040000 -#define ATTR_VOL_QUOTA_SIZE 0x10000000 -#define ATTR_VOL_RESERVED_SIZE 0x20000000 -#define ATTR_VOL_ATTRIBUTES 0x40000000 -#define ATTR_VOL_INFO 0x80000000 - -#define ATTR_VOL_VALIDMASK 0xF007FFFF +#define ATTR_CMN_SETMASK 0x51C7FF00 +#define ATTR_CMN_VOLSETMASK 0x00006700 + +#define ATTR_VOL_FSTYPE 0x00000001 +#define ATTR_VOL_SIGNATURE 0x00000002 +#define ATTR_VOL_SIZE 0x00000004 +#define ATTR_VOL_SPACEFREE 0x00000008 +#define ATTR_VOL_SPACEAVAIL 0x00000010 +#define ATTR_VOL_MINALLOCATION 0x00000020 +#define ATTR_VOL_ALLOCATIONCLUMP 0x00000040 +#define ATTR_VOL_IOBLOCKSIZE 0x00000080 +#define ATTR_VOL_OBJCOUNT 0x00000100 +#define ATTR_VOL_FILECOUNT 0x00000200 +#define ATTR_VOL_DIRCOUNT 0x00000400 +#define ATTR_VOL_MAXOBJCOUNT 0x00000800 +#define ATTR_VOL_MOUNTPOINT 0x00001000 +#define ATTR_VOL_NAME 0x00002000 +#define ATTR_VOL_MOUNTFLAGS 0x00004000 +#define ATTR_VOL_MOUNTEDDEVICE 0x00008000 +#define ATTR_VOL_ENCODINGSUSED 0x00010000 +#define ATTR_VOL_CAPABILITIES 0x00020000 +#define ATTR_VOL_UUID 0x00040000 +#define ATTR_VOL_QUOTA_SIZE 0x10000000 +#define ATTR_VOL_RESERVED_SIZE 0x20000000 +#define ATTR_VOL_ATTRIBUTES 0x40000000 +#define ATTR_VOL_INFO 0x80000000 + +#define ATTR_VOL_VALIDMASK 0xF007FFFF /* * The list of settable ATTR_VOL_* attributes include the following: * ATTR_VOL_NAME * ATTR_VOL_INFO */ -#define ATTR_VOL_SETMASK 0x80002000 +#define ATTR_VOL_SETMASK 0x80002000 /* File/directory attributes: */ -#define ATTR_DIR_LINKCOUNT 0x00000001 -#define ATTR_DIR_ENTRYCOUNT 0x00000002 -#define ATTR_DIR_MOUNTSTATUS 0x00000004 -#define ATTR_DIR_ALLOCSIZE 0x00000008 -#define ATTR_DIR_IOBLOCKSIZE 0x00000010 -#define ATTR_DIR_DATALENGTH 0x00000020 +#define ATTR_DIR_LINKCOUNT 0x00000001 +#define ATTR_DIR_ENTRYCOUNT 0x00000002 +#define ATTR_DIR_MOUNTSTATUS 0x00000004 +#define ATTR_DIR_ALLOCSIZE 0x00000008 +#define ATTR_DIR_IOBLOCKSIZE 0x00000010 +#define ATTR_DIR_DATALENGTH 0x00000020 /* ATTR_DIR_MOUNTSTATUS Flags: */ -#define DIR_MNTSTATUS_MNTPOINT 0x00000001 -#define DIR_MNTSTATUS_TRIGGER 0x00000002 - -#define ATTR_DIR_VALIDMASK 0x0000003f -#define ATTR_DIR_SETMASK 0x00000000 - -#define ATTR_FILE_LINKCOUNT 0x00000001 -#define ATTR_FILE_TOTALSIZE 0x00000002 -#define ATTR_FILE_ALLOCSIZE 0x00000004 -#define ATTR_FILE_IOBLOCKSIZE 0x00000008 -#define ATTR_FILE_DEVTYPE 0x00000020 -#define ATTR_FILE_FORKCOUNT 0x00000080 -#define ATTR_FILE_FORKLIST 0x00000100 -#define ATTR_FILE_DATALENGTH 0x00000200 -#define ATTR_FILE_DATAALLOCSIZE 0x00000400 -#define ATTR_FILE_RSRCLENGTH 0x00001000 -#define ATTR_FILE_RSRCALLOCSIZE 0x00002000 - -#define ATTR_FILE_VALIDMASK 0x000037FF -/* +#define DIR_MNTSTATUS_MNTPOINT 0x00000001 +#define DIR_MNTSTATUS_TRIGGER 0x00000002 + +#define ATTR_DIR_VALIDMASK 0x0000003f +#define ATTR_DIR_SETMASK 0x00000000 + +#define ATTR_FILE_LINKCOUNT 0x00000001 +#define ATTR_FILE_TOTALSIZE 0x00000002 +#define ATTR_FILE_ALLOCSIZE 0x00000004 +#define ATTR_FILE_IOBLOCKSIZE 0x00000008 +#define ATTR_FILE_DEVTYPE 0x00000020 +#define ATTR_FILE_FORKCOUNT 0x00000080 +#define ATTR_FILE_FORKLIST 0x00000100 +#define ATTR_FILE_DATALENGTH 0x00000200 +#define ATTR_FILE_DATAALLOCSIZE 0x00000400 +#define ATTR_FILE_RSRCLENGTH 0x00001000 +#define ATTR_FILE_RSRCALLOCSIZE 0x00002000 + +#define ATTR_FILE_VALIDMASK 0x000037FF +/* * Settable ATTR_FILE_* attributes include: * ATTR_FILE_DEVTYPE */ -#define ATTR_FILE_SETMASK 0x00000020 +#define ATTR_FILE_SETMASK 0x00000020 /* CMNEXT attributes extend the common attributes, but in the forkattr field */ #define ATTR_CMNEXT_RELPATH 0x00000004 #define ATTR_CMNEXT_PRIVATESIZE 0x00000008 -#define ATTR_CMNEXT_LINKID 0x00000010 +#define ATTR_CMNEXT_LINKID 0x00000010 -#define ATTR_CMNEXT_VALIDMASK 0x0000001c -#define ATTR_CMNEXT_SETMASK 0x00000000 +#define ATTR_CMNEXT_VALIDMASK 0x0000001c +#define ATTR_CMNEXT_SETMASK 0x00000000 /* Deprecated fork attributes */ -#define ATTR_FORK_TOTALSIZE 0x00000001 -#define ATTR_FORK_ALLOCSIZE 0x00000002 -#define ATTR_FORK_RESERVED 0xffffffff +#define ATTR_FORK_TOTALSIZE 0x00000001 +#define ATTR_FORK_ALLOCSIZE 0x00000002 +#define ATTR_FORK_RESERVED 0xffffffff -#define ATTR_FORK_VALIDMASK 0x00000003 -#define ATTR_FORK_SETMASK 0x00000000 +#define ATTR_FORK_VALIDMASK 0x00000003 +#define ATTR_FORK_SETMASK 0x00000000 /* Obsolete, implemented, not supported */ -#define ATTR_CMN_NAMEDATTRCOUNT 0x00080000 -#define ATTR_CMN_NAMEDATTRLIST 0x00100000 -#define ATTR_FILE_CLUMPSIZE 0x00000010 /* obsolete */ -#define ATTR_FILE_FILETYPE 0x00000040 /* always zero */ -#define ATTR_FILE_DATAEXTENTS 0x00000800 /* obsolete, HFS-specific */ -#define ATTR_FILE_RSRCEXTENTS 0x00004000 /* obsolete, HFS-specific */ +#define ATTR_CMN_NAMEDATTRCOUNT 0x00080000 +#define ATTR_CMN_NAMEDATTRLIST 0x00100000 +#define ATTR_FILE_CLUMPSIZE 0x00000010 /* obsolete */ +#define ATTR_FILE_FILETYPE 0x00000040 /* always zero */ +#define ATTR_FILE_DATAEXTENTS 0x00000800 /* obsolete, HFS-specific */ +#define ATTR_FILE_RSRCEXTENTS 0x00004000 /* obsolete, HFS-specific */ /* Required attributes for getattrlistbulk(2) */ #define ATTR_BULK_REQUIRED (ATTR_CMN_NAME | ATTR_CMN_RETURNED_ATTRS) @@ -532,29 +532,29 @@ typedef struct vol_attributes_attr { /* * Searchfs */ -#define SRCHFS_START 0x00000001 -#define SRCHFS_MATCHPARTIALNAMES 0x00000002 -#define SRCHFS_MATCHDIRS 0x00000004 -#define SRCHFS_MATCHFILES 0x00000008 -#define SRCHFS_SKIPLINKS 0x00000010 -#define SRCHFS_SKIPINVISIBLE 0x00000020 -#define SRCHFS_SKIPPACKAGES 0x00000040 -#define SRCHFS_SKIPINAPPROPRIATE 0x00000080 - -#define SRCHFS_NEGATEPARAMS 0x80000000 -#define SRCHFS_VALIDOPTIONSMASK 0x800000FF +#define SRCHFS_START 0x00000001 +#define SRCHFS_MATCHPARTIALNAMES 0x00000002 +#define SRCHFS_MATCHDIRS 0x00000004 +#define SRCHFS_MATCHFILES 0x00000008 +#define SRCHFS_SKIPLINKS 0x00000010 +#define SRCHFS_SKIPINVISIBLE 0x00000020 +#define SRCHFS_SKIPPACKAGES 0x00000040 +#define SRCHFS_SKIPINAPPROPRIATE 0x00000080 + +#define SRCHFS_NEGATEPARAMS 0x80000000 +#define SRCHFS_VALIDOPTIONSMASK 0x800000FF struct fssearchblock { - struct attrlist *returnattrs; - void *returnbuffer; - size_t returnbuffersize; - u_long maxmatches; - struct timeval timelimit; - void *searchparams1; - size_t sizeofsearchparams1; - void *searchparams2; - size_t sizeofsearchparams2; - struct attrlist searchattrs; + struct attrlist *returnattrs; + void *returnbuffer; + size_t returnbuffersize; + u_long maxmatches; + struct timeval timelimit; + void *searchparams1; + size_t sizeofsearchparams1; + void *searchparams2; + size_t sizeofsearchparams2; + struct attrlist searchattrs; }; #ifdef KERNEL @@ -568,12 +568,12 @@ struct user64_fssearchblock { user64_addr_t returnbuffer; user64_size_t returnbuffersize; user64_ulong_t maxmatches; - struct user64_timeval timelimit; + struct user64_timeval timelimit; user64_addr_t searchparams1; user64_size_t sizeofsearchparams1; user64_addr_t searchparams2; user64_size_t sizeofsearchparams2; - struct attrlist searchattrs; + struct attrlist searchattrs; }; struct user32_fssearchblock { @@ -581,23 +581,23 @@ struct user32_fssearchblock { user32_addr_t returnbuffer; user32_size_t returnbuffersize; user32_ulong_t maxmatches; - struct user32_timeval timelimit; + struct user32_timeval timelimit; user32_addr_t searchparams1; user32_size_t sizeofsearchparams1; user32_addr_t searchparams2; user32_size_t sizeofsearchparams2; - struct attrlist searchattrs; + struct attrlist searchattrs; }; #endif /* KERNEL */ struct searchstate { - uint32_t ss_union_flags; // for SRCHFS_START - uint32_t ss_union_layer; // 0 = top - u_char ss_fsstate[548]; // fs private + uint32_t ss_union_flags; // for SRCHFS_START + uint32_t ss_union_layer; // 0 = top + u_char ss_fsstate[548]; // fs private } __attribute__((packed)); -#define FST_EOF (-1) /* end-of-file offset */ +#define FST_EOF (-1) /* end-of-file offset */ #endif /* __APPLE_API_UNSTABLE */ #endif /* !_SYS_ATTR_H_ */ diff --git a/bsd/sys/bitstring.h b/bsd/sys/bitstring.h index 3da8f42db..e69067255 100644 --- a/bsd/sys/bitstring.h +++ b/bsd/sys/bitstring.h @@ -59,121 +59,121 @@ */ #ifndef _SYS_BITSTRING_H_ -#define _SYS_BITSTRING_H_ +#define _SYS_BITSTRING_H_ #ifdef XNU_KERNEL_PRIVATE #include -typedef uint8_t bitstr_t; +typedef uint8_t bitstr_t; /* internal macros */ - /* byte of the bitstring bit is in */ -#define _bitstr_byte(bit) \ +/* byte of the bitstring bit is in */ +#define _bitstr_byte(bit) \ ((bit) >> 3) - /* mask for the bit within its byte */ -#define _bitstr_mask(bit) \ +/* mask for the bit within its byte */ +#define _bitstr_mask(bit) \ (1 << ((bit) & 0x7)) /* external macros */ - /* bytes in a bitstring of nbits bits */ -#define bitstr_size(nbits) \ +/* bytes in a bitstring of nbits bits */ +#define bitstr_size(nbits) \ (((nbits) + 7) >> 3) - /* allocate a bitstring on the stack */ -#define bit_decl(name, nbits) \ +/* allocate a bitstring on the stack */ +#define bit_decl(name, nbits) \ ((name)[bitstr_size(nbits)]) - /* is bit N of bitstring name set? */ -#define bitstr_test(name, bit) \ +/* is bit N of bitstring name set? */ +#define bitstr_test(name, bit) \ ((name)[_bitstr_byte(bit)] & _bitstr_mask(bit)) - /* set bit N of bitstring name */ -#define bitstr_set(name, bit) \ +/* set bit N of bitstring name */ +#define bitstr_set(name, bit) \ ((name)[_bitstr_byte(bit)] |= _bitstr_mask(bit)) - /* set bit N of bitstring name (atomic) */ -#define bitstr_set_atomic(name, bit) \ +/* set bit N of bitstring name (atomic) */ +#define bitstr_set_atomic(name, bit) \ atomic_bitset_8(&((name)[_bitstr_byte(bit)]), _bitstr_mask(bit)) - /* clear bit N of bitstring name */ -#define bitstr_clear(name, bit) \ +/* clear bit N of bitstring name */ +#define bitstr_clear(name, bit) \ ((name)[_bitstr_byte(bit)] &= ~_bitstr_mask(bit)) - /* clear bit N of bitstring name (atomic) */ -#define bitstr_clear_atomic(name, bit) \ +/* clear bit N of bitstring name (atomic) */ +#define bitstr_clear_atomic(name, bit) \ atomic_bitclear_8(&((name)[_bitstr_byte(bit)]), _bitstr_mask(bit)) - /* clear bits start ... stop in bitstring */ -#define bitstr_nclear(name, start, stop) do { \ - bitstr_t *_name = (name); \ - int _start = (start), _stop = (stop); \ - int _startbyte = _bitstr_byte(_start); \ - int _stopbyte = _bitstr_byte(_stop); \ - if (_startbyte == _stopbyte) { \ - _name[_startbyte] &= ((0xff >> (8 - (_start & 0x7))) | \ - (0xff << ((_stop & 0x7) + 1))); \ - } else { \ - _name[_startbyte] &= 0xff >> (8 - (_start & 0x7)); \ - while (++_startbyte < _stopbyte) \ - _name[_startbyte] = 0; \ - _name[_stopbyte] &= 0xff << ((_stop & 0x7) + 1); \ - } \ +/* clear bits start ... stop in bitstring */ +#define bitstr_nclear(name, start, stop) do { \ + bitstr_t *_name = (name); \ + int _start = (start), _stop = (stop); \ + int _startbyte = _bitstr_byte(_start); \ + int _stopbyte = _bitstr_byte(_stop); \ + if (_startbyte == _stopbyte) { \ + _name[_startbyte] &= ((0xff >> (8 - (_start & 0x7))) | \ + (0xff << ((_stop & 0x7) + 1))); \ + } else { \ + _name[_startbyte] &= 0xff >> (8 - (_start & 0x7)); \ + while (++_startbyte < _stopbyte) \ + _name[_startbyte] = 0; \ + _name[_stopbyte] &= 0xff << ((_stop & 0x7) + 1); \ + } \ } while (0) - /* set bits start ... stop in bitstring */ -#define bitstr_nset(name, start, stop) do { \ - bitstr_t *_name = (name); \ - int _start = (start), _stop = (stop); \ - int _startbyte = _bitstr_byte(_start); \ - int _stopbyte = _bitstr_byte(_stop); \ - if (_startbyte == _stopbyte) { \ - _name[_startbyte] |= ((0xff << (_start & 0x7)) & \ - (0xff >> (7 - (_stop & 0x7)))); \ - } else { \ - _name[_startbyte] |= 0xff << ((_start) & 0x7); \ - while (++_startbyte < _stopbyte) \ - _name[_startbyte] = 0xff; \ - _name[_stopbyte] |= 0xff >> (7 - (_stop & 0x7)); \ - } \ +/* set bits start ... stop in bitstring */ +#define bitstr_nset(name, start, stop) do { \ + bitstr_t *_name = (name); \ + int _start = (start), _stop = (stop); \ + int _startbyte = _bitstr_byte(_start); \ + int _stopbyte = _bitstr_byte(_stop); \ + if (_startbyte == _stopbyte) { \ + _name[_startbyte] |= ((0xff << (_start & 0x7)) & \ + (0xff >> (7 - (_stop & 0x7)))); \ + } else { \ + _name[_startbyte] |= 0xff << ((_start) & 0x7); \ + while (++_startbyte < _stopbyte) \ + _name[_startbyte] = 0xff; \ + _name[_stopbyte] |= 0xff >> (7 - (_stop & 0x7)); \ + } \ } while (0) - /* find first bit clear in name */ -#define bitstr_ffc(name, nbits, value) do { \ - bitstr_t *_name = (name); \ - int _byte, _nbits = (nbits); \ - int _stopbyte = _bitstr_byte(_nbits - 1), _value = -1; \ - if (_nbits > 0) \ - for (_byte = 0; _byte <= _stopbyte; ++_byte) \ - if (_name[_byte] != 0xff) { \ - bitstr_t _lb; \ - _value = _byte << 3; \ - for (_lb = _name[_byte]; (_lb & 0x1); \ - ++_value, _lb >>= 1); \ - break; \ - } \ - if (_value >= nbits) \ - _value = -1; \ - *(value) = _value; \ +/* find first bit clear in name */ +#define bitstr_ffc(name, nbits, value) do { \ + bitstr_t *_name = (name); \ + int _byte, _nbits = (nbits); \ + int _stopbyte = _bitstr_byte(_nbits - 1), _value = -1; \ + if (_nbits > 0) \ + for (_byte = 0; _byte <= _stopbyte; ++_byte) \ + if (_name[_byte] != 0xff) { \ + bitstr_t _lb; \ + _value = _byte << 3; \ + for (_lb = _name[_byte]; (_lb & 0x1); \ + ++_value, _lb >>= 1); \ + break; \ + } \ + if (_value >= nbits) \ + _value = -1; \ + *(value) = _value; \ } while (0) - /* find first bit set in name */ -#define bitstr_ffs(name, nbits, value) do { \ - bitstr_t *_name = (name); \ - int _byte, _nbits = (nbits); \ - int _stopbyte = _bitstr_byte(_nbits - 1), _value = -1; \ - if (_nbits > 0) \ - for (_byte = 0; _byte <= _stopbyte; ++_byte) \ - if (_name[_byte]) { \ - bitstr_t _lb; \ - _value = _byte << 3; \ - for (_lb = _name[_byte]; !(_lb & 0x1); \ - ++_value, _lb >>= 1); \ - break; \ - } \ - if (_value >= nbits) \ - _value = -1; \ - *(value) = _value; \ +/* find first bit set in name */ +#define bitstr_ffs(name, nbits, value) do { \ + bitstr_t *_name = (name); \ + int _byte, _nbits = (nbits); \ + int _stopbyte = _bitstr_byte(_nbits - 1), _value = -1; \ + if (_nbits > 0) \ + for (_byte = 0; _byte <= _stopbyte; ++_byte) \ + if (_name[_byte]) { \ + bitstr_t _lb; \ + _value = _byte << 3; \ + for (_lb = _name[_byte]; !(_lb & 0x1); \ + ++_value, _lb >>= 1); \ + break; \ + } \ + if (_value >= nbits) \ + _value = -1; \ + *(value) = _value; \ } while (0) #endif /* XNU_KERNEL_PRIVATE */ diff --git a/bsd/sys/bsdtask_info.h b/bsd/sys/bsdtask_info.h index 7f2edccd2..a0f182493 100644 --- a/bsd/sys/bsdtask_info.h +++ b/bsd/sys/bsdtask_info.h @@ -1,9 +1,8 @@ - /* * Copyright (c) 2005, 2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,73 +31,73 @@ #include -struct proc_taskinfo_internal { - uint64_t pti_virtual_size; /* virtual memory size (bytes) */ - uint64_t pti_resident_size; /* resident memory size (bytes) */ - uint64_t pti_total_user; /* total time */ - uint64_t pti_total_system; - uint64_t pti_threads_user; /* existing threads only */ - uint64_t pti_threads_system; - int32_t pti_policy; /* default policy for new threads */ - int32_t pti_faults; /* number of page faults */ - int32_t pti_pageins; /* number of actual pageins */ - int32_t pti_cow_faults; /* number of copy-on-write faults */ - int32_t pti_messages_sent; /* number of messages sent */ - int32_t pti_messages_received; /* number of messages received */ - int32_t pti_syscalls_mach; /* number of mach system calls */ - int32_t pti_syscalls_unix; /* number of unix system calls */ - int32_t pti_csw; /* number of context switches */ - int32_t pti_threadnum; /* number of threads in the task */ - int32_t pti_numrunning; /* number of running threads */ +struct proc_taskinfo_internal { + uint64_t pti_virtual_size; /* virtual memory size (bytes) */ + uint64_t pti_resident_size; /* resident memory size (bytes) */ + uint64_t pti_total_user; /* total time */ + uint64_t pti_total_system; + uint64_t pti_threads_user; /* existing threads only */ + uint64_t pti_threads_system; + int32_t pti_policy; /* default policy for new threads */ + int32_t pti_faults; /* number of page faults */ + int32_t pti_pageins; /* number of actual pageins */ + int32_t pti_cow_faults; /* number of copy-on-write faults */ + int32_t pti_messages_sent; /* number of messages sent */ + int32_t pti_messages_received; /* number of messages received */ + int32_t pti_syscalls_mach; /* number of mach system calls */ + int32_t pti_syscalls_unix; /* number of unix system calls */ + int32_t pti_csw; /* number of context switches */ + int32_t pti_threadnum; /* number of threads in the task */ + int32_t pti_numrunning; /* number of running threads */ int32_t pti_priority; /* task priority*/ }; #define MAXTHREADNAMESIZE 64 struct proc_threadinfo_internal { - uint64_t pth_user_time; /* user run time */ - uint64_t pth_system_time; /* system run time */ - int32_t pth_cpu_usage; /* scaled cpu usage percentage */ - int32_t pth_policy; /* scheduling policy in effect */ - int32_t pth_run_state; /* run state (see below) */ - int32_t pth_flags; /* various flags (see below) */ - int32_t pth_sleep_time; /* number of seconds that thread */ - int32_t pth_curpri; /* cur priority*/ - int32_t pth_priority; /* priority*/ - int32_t pth_maxpriority; /* max priority*/ - char pth_name[MAXTHREADNAMESIZE]; /* thread name, if any */ + uint64_t pth_user_time; /* user run time */ + uint64_t pth_system_time; /* system run time */ + int32_t pth_cpu_usage; /* scaled cpu usage percentage */ + int32_t pth_policy; /* scheduling policy in effect */ + int32_t pth_run_state; /* run state (see below) */ + int32_t pth_flags; /* various flags (see below) */ + int32_t pth_sleep_time; /* number of seconds that thread */ + int32_t pth_curpri; /* cur priority*/ + int32_t pth_priority; /* priority*/ + int32_t pth_maxpriority; /* max priority*/ + char pth_name[MAXTHREADNAMESIZE]; /* thread name, if any */ }; struct proc_regioninfo_internal { - uint32_t pri_protection; - uint32_t pri_max_protection; - uint32_t pri_inheritance; - uint32_t pri_flags; /* shared, external pager, is submap */ - uint64_t pri_offset; - uint32_t pri_behavior; - uint32_t pri_user_wired_count; - uint32_t pri_user_tag; - uint32_t pri_pages_resident; - uint32_t pri_pages_shared_now_private; - uint32_t pri_pages_swapped_out; - uint32_t pri_pages_dirtied; - uint32_t pri_ref_count; - uint32_t pri_shadow_depth; - uint32_t pri_share_mode; - uint32_t pri_private_pages_resident; - uint32_t pri_shared_pages_resident; - uint32_t pri_obj_id; - uint32_t pri_depth; - uint64_t pri_address; - uint64_t pri_size; + uint32_t pri_protection; + uint32_t pri_max_protection; + uint32_t pri_inheritance; + uint32_t pri_flags; /* shared, external pager, is submap */ + uint64_t pri_offset; + uint32_t pri_behavior; + uint32_t pri_user_wired_count; + uint32_t pri_user_tag; + uint32_t pri_pages_resident; + uint32_t pri_pages_shared_now_private; + uint32_t pri_pages_swapped_out; + uint32_t pri_pages_dirtied; + uint32_t pri_ref_count; + uint32_t pri_shadow_depth; + uint32_t pri_share_mode; + uint32_t pri_private_pages_resident; + uint32_t pri_shared_pages_resident; + uint32_t pri_obj_id; + uint32_t pri_depth; + uint64_t pri_address; + uint64_t pri_size; }; #ifdef MACH_KERNEL_PRIVATE -#define PROC_REGION_SUBMAP 1 -#define PROC_REGION_SHARED 2 +#define PROC_REGION_SUBMAP 1 +#define PROC_REGION_SHARED 2 extern uint32_t vnode_vid(void *vp); #if CONFIG_IOSCHED @@ -121,4 +120,3 @@ void bsd_threadcdir(void * uth, void *vptr, int *vidp); extern void bsd_copythreadname(void *dst_uth, void *src_uth); #endif /*_SYS_BSDTASK_INFO_H */ - diff --git a/bsd/sys/buf.h b/bsd/sys/buf.h index 8233ac527..fa96b304c 100644 --- a/bsd/sys/buf.h +++ b/bsd/sys/buf.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -67,7 +67,7 @@ */ #ifndef _SYS_BUF_H_ -#define _SYS_BUF_H_ +#define _SYS_BUF_H_ #include #include @@ -75,23 +75,23 @@ #include -#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ -#define B_READ 0x00000001 /* Read buffer. */ -#define B_ASYNC 0x00000002 /* Start I/O, do not wait. */ -#define B_NOCACHE 0x00000004 /* Do not cache block after use. */ -#define B_DELWRI 0x00000008 /* Delay I/O until buffer reused. */ -#define B_LOCKED 0x00000010 /* Locked in core (not reusable). */ -#define B_PHYS 0x00000020 /* I/O to user memory. */ -#define B_CLUSTER 0x00000040 /* UPL based I/O generated by cluster layer */ -#define B_PAGEIO 0x00000080 /* Page in/out */ -#define B_META 0x00000100 /* buffer contains meta-data. */ -#define B_RAW 0x00000200 /* Set by physio for raw transfers. */ -#define B_FUA 0x00000400 /* Write-through disk cache(if supported) */ -#define B_PASSIVE 0x00000800 /* PASSIVE I/Os are ignored by THROTTLE I/O */ -#define B_IOSTREAMING 0x00001000 /* sequential access pattern detected */ -#define B_THROTTLED_IO 0x00002000 /* low priority I/O (deprecated) */ -#define B_ENCRYPTED_IO 0x00004000 /* Encrypted I/O */ -#define B_STATICCONTENT 0x00008000 /* Buffer is likely to remain unaltered */ +#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define B_READ 0x00000001 /* Read buffer. */ +#define B_ASYNC 0x00000002 /* Start I/O, do not wait. */ +#define B_NOCACHE 0x00000004 /* Do not cache block after use. */ +#define B_DELWRI 0x00000008 /* Delay I/O until buffer reused. */ +#define B_LOCKED 0x00000010 /* Locked in core (not reusable). */ +#define B_PHYS 0x00000020 /* I/O to user memory. */ +#define B_CLUSTER 0x00000040 /* UPL based I/O generated by cluster layer */ +#define B_PAGEIO 0x00000080 /* Page in/out */ +#define B_META 0x00000100 /* buffer contains meta-data. */ +#define B_RAW 0x00000200 /* Set by physio for raw transfers. */ +#define B_FUA 0x00000400 /* Write-through disk cache(if supported) */ +#define B_PASSIVE 0x00000800 /* PASSIVE I/Os are ignored by THROTTLE I/O */ +#define B_IOSTREAMING 0x00001000 /* sequential access pattern detected */ +#define B_THROTTLED_IO 0x00002000 /* low priority I/O (deprecated) */ +#define B_ENCRYPTED_IO 0x00004000 /* Encrypted I/O */ +#define B_STATICCONTENT 0x00008000 /* Buffer is likely to remain unaltered */ /* * make sure to check when adding flags that @@ -102,1096 +102,1096 @@ __BEGIN_DECLS /*! - @function buf_markaged - @abstract Mark a buffer as "aged," i.e. as a good candidate to be discarded and reused after buf_brelse(). - @param bp Buffer to mark. + * @function buf_markaged + * @abstract Mark a buffer as "aged," i.e. as a good candidate to be discarded and reused after buf_brelse(). + * @param bp Buffer to mark. */ -void buf_markaged(buf_t bp); +void buf_markaged(buf_t bp); /*! - @function buf_markinvalid - @abstract Mark a buffer as not having valid data and being ready for immediate reuse after buf_brelse(). - @param bp Buffer to mark. + * @function buf_markinvalid + * @abstract Mark a buffer as not having valid data and being ready for immediate reuse after buf_brelse(). + * @param bp Buffer to mark. */ -void buf_markinvalid(buf_t bp); +void buf_markinvalid(buf_t bp); /*! - @function buf_markdelayed - @abstract Mark a buffer as a delayed write: mark it dirty without actually scheduling I/O. - @discussion Data will be flushed to disk at some later time, not with brelse(). A sync()/fsync() - or pressure necessitating reuse of the buffer will cause it to be written back to disk. - @param bp Buffer to mark. + * @function buf_markdelayed + * @abstract Mark a buffer as a delayed write: mark it dirty without actually scheduling I/O. + * @discussion Data will be flushed to disk at some later time, not with brelse(). A sync()/fsync() + * or pressure necessitating reuse of the buffer will cause it to be written back to disk. + * @param bp Buffer to mark. */ -void buf_markdelayed(buf_t bp); +void buf_markdelayed(buf_t bp); -void buf_markclean(buf_t); +void buf_markclean(buf_t); /*! - @function buf_markeintr - @abstract Mark a buffer as having been interrupted during I/O. - @discussion Waiters for I/O to complete (buf_biowait()) will return with EINTR when woken up. - buf_markeintr does not itself do a wakeup. - @param bp Buffer to mark. + * @function buf_markeintr + * @abstract Mark a buffer as having been interrupted during I/O. + * @discussion Waiters for I/O to complete (buf_biowait()) will return with EINTR when woken up. + * buf_markeintr does not itself do a wakeup. + * @param bp Buffer to mark. */ -void buf_markeintr(buf_t bp); +void buf_markeintr(buf_t bp); /*! - @function buf_markfua - @abstract Mark a buffer for write through disk cache, if disk supports it. - @param bp Buffer to mark. + * @function buf_markfua + * @abstract Mark a buffer for write through disk cache, if disk supports it. + * @param bp Buffer to mark. */ -void buf_markfua(buf_t bp); +void buf_markfua(buf_t bp); /*! - @function buf_fua - @abstract Check if a buffer is marked for write through disk caches. - @param bp Buffer to test. - @return Nonzero if buffer is marked for write-through, 0 if not. + * @function buf_fua + * @abstract Check if a buffer is marked for write through disk caches. + * @param bp Buffer to test. + * @return Nonzero if buffer is marked for write-through, 0 if not. */ -int buf_fua(buf_t bp); +int buf_fua(buf_t bp); /*! - @function buf_valid - @abstract Check if a buffer contains valid data. - @param bp Buffer to test. - @return Nonzero if buffer has valid data, 0 if not. + * @function buf_valid + * @abstract Check if a buffer contains valid data. + * @param bp Buffer to test. + * @return Nonzero if buffer has valid data, 0 if not. */ -int buf_valid(buf_t bp); +int buf_valid(buf_t bp); /*! - @function buf_fromcache - @abstract Check if a buffer's data was found in core. - @discussion Will return truth after a buf_getblk that finds a valid buffer in the cache or the relevant - data in core (but not in a buffer). - @param bp Buffer to test. - @return Nonzero if we got this buffer's data without doing I/O, 0 if not. + * @function buf_fromcache + * @abstract Check if a buffer's data was found in core. + * @discussion Will return truth after a buf_getblk that finds a valid buffer in the cache or the relevant + * data in core (but not in a buffer). + * @param bp Buffer to test. + * @return Nonzero if we got this buffer's data without doing I/O, 0 if not. */ -int buf_fromcache(buf_t bp); +int buf_fromcache(buf_t bp); /*! - @function buf_upl - @abstract Get the upl (Universal Page List) associated with a buffer. - @discussion Buffers allocated with buf_alloc() are not returned with a upl, and - traditional buffers only have a upl while an I/O is in progress. - @param bp Buffer whose upl to grab. - @return Buffer's upl if it has one, else NULL. + * @function buf_upl + * @abstract Get the upl (Universal Page List) associated with a buffer. + * @discussion Buffers allocated with buf_alloc() are not returned with a upl, and + * traditional buffers only have a upl while an I/O is in progress. + * @param bp Buffer whose upl to grab. + * @return Buffer's upl if it has one, else NULL. */ -void * buf_upl(buf_t bp); +void * buf_upl(buf_t bp); /*! - @function buf_uploffset - @abstract Get the offset into a UPL at which this buffer begins. - @discussion This function should only be called on iobufs, i.e. buffers allocated with buf_alloc(). - @param bp Buffer whose uploffset to grab. - @return Buffer's uploffset--does not check whether that value makes sense for this buffer. + * @function buf_uploffset + * @abstract Get the offset into a UPL at which this buffer begins. + * @discussion This function should only be called on iobufs, i.e. buffers allocated with buf_alloc(). + * @param bp Buffer whose uploffset to grab. + * @return Buffer's uploffset--does not check whether that value makes sense for this buffer. */ uint32_t buf_uploffset(buf_t bp); /*! - @function buf_rcred - @abstract Get the credential associated with a buffer for reading. - @discussion No reference is taken; if the credential is to be held on to persistently, an additional - reference must be taken with kauth_cred_ref. - @param bp Buffer whose credential to grab. - @return Credential if it exists, else NULL. + * @function buf_rcred + * @abstract Get the credential associated with a buffer for reading. + * @discussion No reference is taken; if the credential is to be held on to persistently, an additional + * reference must be taken with kauth_cred_ref. + * @param bp Buffer whose credential to grab. + * @return Credential if it exists, else NULL. */ kauth_cred_t buf_rcred(buf_t bp); /*! - @function buf_wcred - @abstract Get the credential associated with a buffer for writing. - @discussion No reference is taken; if the credential is to be held on to persistently, an additional - reference must be taken with kauth_cred_ref. - @param bp Buffer whose credential to grab. - @return Credential if it exists, else NULL. + * @function buf_wcred + * @abstract Get the credential associated with a buffer for writing. + * @discussion No reference is taken; if the credential is to be held on to persistently, an additional + * reference must be taken with kauth_cred_ref. + * @param bp Buffer whose credential to grab. + * @return Credential if it exists, else NULL. */ kauth_cred_t buf_wcred(buf_t bp); /*! - @function buf_proc - @abstract Get the process associated with this buffer. - @discussion buf_proc() will generally return NULL; a process is currently only associated with - a buffer in the event of a physio() call. - @param bp Buffer whose associated process to find. - @return Associated process, possibly NULL. + * @function buf_proc + * @abstract Get the process associated with this buffer. + * @discussion buf_proc() will generally return NULL; a process is currently only associated with + * a buffer in the event of a physio() call. + * @param bp Buffer whose associated process to find. + * @return Associated process, possibly NULL. */ -proc_t buf_proc(buf_t bp); +proc_t buf_proc(buf_t bp); /*! - @function buf_dirtyoff - @abstract Get the starting offset of the dirty region associated with a buffer. - @discussion The dirty offset is zero unless someone explicitly calls buf_setdirtyoff() (which the kernel does not). - @param bp Buffer whose dirty offset to get. - @return Dirty offset (0 if not explicitly changed). + * @function buf_dirtyoff + * @abstract Get the starting offset of the dirty region associated with a buffer. + * @discussion The dirty offset is zero unless someone explicitly calls buf_setdirtyoff() (which the kernel does not). + * @param bp Buffer whose dirty offset to get. + * @return Dirty offset (0 if not explicitly changed). */ uint32_t buf_dirtyoff(buf_t bp); /*! - @function buf_dirtyend - @abstract Get the ending offset of the dirty region associated with a buffer. - @discussion If the buffer's data was found incore and dirty, the dirty end is the size of the block; otherwise, unless - someone outside of xnu explicitly changes it by calling buf_setdirtyend(), it will be zero. - @param bp Buffer whose dirty end to get. - @return 0 if buffer is found clean; size of buffer if found dirty. Can be set to any value by callers of buf_setdirtyend(). + * @function buf_dirtyend + * @abstract Get the ending offset of the dirty region associated with a buffer. + * @discussion If the buffer's data was found incore and dirty, the dirty end is the size of the block; otherwise, unless + * someone outside of xnu explicitly changes it by calling buf_setdirtyend(), it will be zero. + * @param bp Buffer whose dirty end to get. + * @return 0 if buffer is found clean; size of buffer if found dirty. Can be set to any value by callers of buf_setdirtyend(). */ uint32_t buf_dirtyend(buf_t bp); /*! - @function buf_setdirtyoff - @abstract Set the starting offset of the dirty region associated with a buffer. - @discussion This value is zero unless someone set it explicitly. - @param bp Buffer whose dirty end to set. + * @function buf_setdirtyoff + * @abstract Set the starting offset of the dirty region associated with a buffer. + * @discussion This value is zero unless someone set it explicitly. + * @param bp Buffer whose dirty end to set. */ -void buf_setdirtyoff(buf_t bp, uint32_t); +void buf_setdirtyoff(buf_t bp, uint32_t); /*! - @function buf_setdirtyend - @abstract Set the ending offset of the dirty region associated with a buffer. - @discussion If the buffer's data was found incore and dirty, the dirty end is the size of the block; otherwise, unless - someone outside of xnu explicitly changes it by calling buf_setdirtyend(), it will be zero. - @param bp Buffer whose dirty end to set. + * @function buf_setdirtyend + * @abstract Set the ending offset of the dirty region associated with a buffer. + * @discussion If the buffer's data was found incore and dirty, the dirty end is the size of the block; otherwise, unless + * someone outside of xnu explicitly changes it by calling buf_setdirtyend(), it will be zero. + * @param bp Buffer whose dirty end to set. */ -void buf_setdirtyend(buf_t bp, uint32_t); +void buf_setdirtyend(buf_t bp, uint32_t); /*! - @function buf_error - @abstract Get the error value associated with a buffer. - @discussion Errors are set with buf_seterror(). - @param bp Buffer whose error value to retrieve. - @return Error value, directly. + * @function buf_error + * @abstract Get the error value associated with a buffer. + * @discussion Errors are set with buf_seterror(). + * @param bp Buffer whose error value to retrieve. + * @return Error value, directly. */ -errno_t buf_error(buf_t bp); +errno_t buf_error(buf_t bp); /*! - @function buf_seterror - @abstract Set an error value on a buffer. - @param bp Buffer whose error value to set. + * @function buf_seterror + * @abstract Set an error value on a buffer. + * @param bp Buffer whose error value to set. */ -void buf_seterror(buf_t bp, errno_t); +void buf_seterror(buf_t bp, errno_t); /*! - @function buf_setflags - @abstract Set flags on a buffer. - @discussion buffer_flags |= flags - @param bp Buffer whose flags to set. - @param flags Flags to add to buffer's mask. B_LOCKED/B_NOCACHE/B_ASYNC/B_READ/B_WRITE/B_PAGEIO/B_FUA + * @function buf_setflags + * @abstract Set flags on a buffer. + * @discussion buffer_flags |= flags + * @param bp Buffer whose flags to set. + * @param flags Flags to add to buffer's mask. B_LOCKED/B_NOCACHE/B_ASYNC/B_READ/B_WRITE/B_PAGEIO/B_FUA */ -void buf_setflags(buf_t bp, int32_t flags); +void buf_setflags(buf_t bp, int32_t flags); /*! - @function buf_clearflags - @abstract Clear flags on a buffer. - @discussion buffer_flags &= ~flags - @param bp Buffer whose flags to clear. - @param flags Flags to remove from buffer's mask. B_LOCKED/B_NOCACHE/B_ASYNC/B_READ/B_WRITE/B_PAGEIO/B_FUA + * @function buf_clearflags + * @abstract Clear flags on a buffer. + * @discussion buffer_flags &= ~flags + * @param bp Buffer whose flags to clear. + * @param flags Flags to remove from buffer's mask. B_LOCKED/B_NOCACHE/B_ASYNC/B_READ/B_WRITE/B_PAGEIO/B_FUA */ -void buf_clearflags(buf_t bp, int32_t flags); +void buf_clearflags(buf_t bp, int32_t flags); /*! - @function buf_flags - @abstract Get flags set on a buffer. - @discussion Valid flags are B_LOCKED/B_NOCACHE/B_ASYNC/B_READ/B_WRITE/B_PAGEIO/B_FUA. - @param bp Buffer whose flags to grab. - @return flags. + * @function buf_flags + * @abstract Get flags set on a buffer. + * @discussion Valid flags are B_LOCKED/B_NOCACHE/B_ASYNC/B_READ/B_WRITE/B_PAGEIO/B_FUA. + * @param bp Buffer whose flags to grab. + * @return flags. */ -int32_t buf_flags(buf_t bp); +int32_t buf_flags(buf_t bp); /*! - @function buf_reset - @abstract Reset I/O flag state on a buffer. - @discussion Clears current flags on a buffer (internal and external) and allows some new flags to be set. - Used perhaps to prepare an iobuf for reuse. - @param bp Buffer whose flags to grab. - @param flags Flags to set on buffer: B_READ, B_WRITE, B_ASYNC, B_NOCACHE. + * @function buf_reset + * @abstract Reset I/O flag state on a buffer. + * @discussion Clears current flags on a buffer (internal and external) and allows some new flags to be set. + * Used perhaps to prepare an iobuf for reuse. + * @param bp Buffer whose flags to grab. + * @param flags Flags to set on buffer: B_READ, B_WRITE, B_ASYNC, B_NOCACHE. */ -void buf_reset(buf_t bp, int32_t flags); +void buf_reset(buf_t bp, int32_t flags); /*! - @function buf_map - @abstract Get virtual mappings for buffer data. - @discussion For buffers created through buf_getblk() (i.e. traditional buffer cache usage), - buf_map() just returns the address at which data was mapped by but_getblk(). For a B_CLUSTER buffer, i.e. an iobuf - whose upl state is managed manually, there are two possibilities. If the buffer was created - with an underlying "real" buffer through cluster_bp(), the mapping of the "real" buffer is returned. - Otherwise, the buffer was created with buf_alloc() and buf_setupl() was subsequently called; buf_map() - will call ubc_upl_map() to get a mapping for the buffer's upl and return the start of that mapping - plus the buffer's upl offset (set in buf_setupl()). In the last case, buf_unmap() must later be called - to tear down the mapping. NOTE: buf_map() does not set the buffer data pointer; this must be done with buf_setdataptr(). - @param bp Buffer whose mapping to find or create. - @param io_addr Destination for mapping address. - @return 0 for success, ENOMEM if unable to map the buffer. + * @function buf_map + * @abstract Get virtual mappings for buffer data. + * @discussion For buffers created through buf_getblk() (i.e. traditional buffer cache usage), + * buf_map() just returns the address at which data was mapped by but_getblk(). For a B_CLUSTER buffer, i.e. an iobuf + * whose upl state is managed manually, there are two possibilities. If the buffer was created + * with an underlying "real" buffer through cluster_bp(), the mapping of the "real" buffer is returned. + * Otherwise, the buffer was created with buf_alloc() and buf_setupl() was subsequently called; buf_map() + * will call ubc_upl_map() to get a mapping for the buffer's upl and return the start of that mapping + * plus the buffer's upl offset (set in buf_setupl()). In the last case, buf_unmap() must later be called + * to tear down the mapping. NOTE: buf_map() does not set the buffer data pointer; this must be done with buf_setdataptr(). + * @param bp Buffer whose mapping to find or create. + * @param io_addr Destination for mapping address. + * @return 0 for success, ENOMEM if unable to map the buffer. */ -errno_t buf_map(buf_t bp, caddr_t *io_addr); +errno_t buf_map(buf_t bp, caddr_t *io_addr); /*! - @function buf_unmap - @abstract Release mappings for buffer data. - @discussion For buffers created through buf_getblk() (i.e. traditional buffer cache usage), - buf_unmap() does nothing; buf_brelse() will take care of unmapping. For a B_CLUSTER buffer, i.e. an iobuf - whose upl state is managed manually, there are two possibilities. If the buffer was created - with an underlying "real" buffer through cluster_bp(), buf_unmap() does nothing; buf_brelse() on the - underlying buffer will tear down the mapping. Otherwise, the buffer was created with buf_alloc() and - buf_setupl() was subsequently called; buf_map() created the mapping. In this case, buf_unmap() will - unmap the buffer. - @param bp Buffer whose mapping to find or create. - @return 0 for success, EINVAL if unable to unmap buffer. + * @function buf_unmap + * @abstract Release mappings for buffer data. + * @discussion For buffers created through buf_getblk() (i.e. traditional buffer cache usage), + * buf_unmap() does nothing; buf_brelse() will take care of unmapping. For a B_CLUSTER buffer, i.e. an iobuf + * whose upl state is managed manually, there are two possibilities. If the buffer was created + * with an underlying "real" buffer through cluster_bp(), buf_unmap() does nothing; buf_brelse() on the + * underlying buffer will tear down the mapping. Otherwise, the buffer was created with buf_alloc() and + * buf_setupl() was subsequently called; buf_map() created the mapping. In this case, buf_unmap() will + * unmap the buffer. + * @param bp Buffer whose mapping to find or create. + * @return 0 for success, EINVAL if unable to unmap buffer. */ -errno_t buf_unmap(buf_t bp); +errno_t buf_unmap(buf_t bp); /*! - @function buf_setdrvdata - @abstract Set driver-specific data on a buffer. - @param bp Buffer whose driver-data to set. - @param drvdata Opaque driver data. + * @function buf_setdrvdata + * @abstract Set driver-specific data on a buffer. + * @param bp Buffer whose driver-data to set. + * @param drvdata Opaque driver data. */ -void buf_setdrvdata(buf_t bp, void *drvdata); +void buf_setdrvdata(buf_t bp, void *drvdata); /*! - @function buf_setdrvdata - @abstract Get driver-specific data from a buffer. - @param bp Buffer whose driver data to get. - @return Opaque driver data. + * @function buf_setdrvdata + * @abstract Get driver-specific data from a buffer. + * @param bp Buffer whose driver data to get. + * @return Opaque driver data. */ -void * buf_drvdata(buf_t bp); +void * buf_drvdata(buf_t bp); /*! - @function buf_setfsprivate - @abstract Set filesystem-specific data on a buffer. - @param bp Buffer whose filesystem data to set. - @param fsprivate Opaque filesystem data. + * @function buf_setfsprivate + * @abstract Set filesystem-specific data on a buffer. + * @param bp Buffer whose filesystem data to set. + * @param fsprivate Opaque filesystem data. */ -void buf_setfsprivate(buf_t bp, void *fsprivate); +void buf_setfsprivate(buf_t bp, void *fsprivate); /*! - @function buf_fsprivate - @abstract Get filesystem-specific data from a buffer. - @param bp Buffer whose filesystem data to get. - @return Opaque filesystem data. + * @function buf_fsprivate + * @abstract Get filesystem-specific data from a buffer. + * @param bp Buffer whose filesystem data to get. + * @return Opaque filesystem data. */ -void * buf_fsprivate(buf_t bp); +void * buf_fsprivate(buf_t bp); /*! - @function buf_blkno - @abstract Get physical block number associated with a buffer, in the sense of VNOP_BLOCKMAP. - @discussion When a buffer's physical block number is the same is its logical block number, then the physical - block number is considered uninitialized. A physical block number of -1 indicates that there is no valid - physical mapping (e.g. the logical block is invalid or corresponds to a sparse region in a file). Physical - block number is normally set by the cluster layer or by buf_getblk(). - @param bp Buffer whose physical block number to get. - @return Block number. + * @function buf_blkno + * @abstract Get physical block number associated with a buffer, in the sense of VNOP_BLOCKMAP. + * @discussion When a buffer's physical block number is the same is its logical block number, then the physical + * block number is considered uninitialized. A physical block number of -1 indicates that there is no valid + * physical mapping (e.g. the logical block is invalid or corresponds to a sparse region in a file). Physical + * block number is normally set by the cluster layer or by buf_getblk(). + * @param bp Buffer whose physical block number to get. + * @return Block number. */ daddr64_t buf_blkno(buf_t bp); /*! - @function buf_lblkno - @abstract Get logical block number associated with a buffer. - @discussion Logical block number is set on traditionally-used buffers by an argument passed to buf_getblk(), - for example by buf_bread(). - @param bp Buffer whose logical block number to get. - @return Block number. + * @function buf_lblkno + * @abstract Get logical block number associated with a buffer. + * @discussion Logical block number is set on traditionally-used buffers by an argument passed to buf_getblk(), + * for example by buf_bread(). + * @param bp Buffer whose logical block number to get. + * @return Block number. */ daddr64_t buf_lblkno(buf_t bp); /*! - @function buf_setblkno - @abstract Set physical block number associated with a buffer. - @discussion Physical block number is generally set by the cluster layer or by buf_getblk(). - @param bp Buffer whose physical block number to set. - @param blkno Block number to set. + * @function buf_setblkno + * @abstract Set physical block number associated with a buffer. + * @discussion Physical block number is generally set by the cluster layer or by buf_getblk(). + * @param bp Buffer whose physical block number to set. + * @param blkno Block number to set. */ -void buf_setblkno(buf_t bp, daddr64_t blkno); +void buf_setblkno(buf_t bp, daddr64_t blkno); /*! - @function buf_setlblkno - @abstract Set logical block number associated with a buffer. - @discussion Logical block number is set on traditionally-used buffers by an argument passed to buf_getblk(), - for example by buf_bread(). - @param bp Buffer whose logical block number to set. - @param lblkno Block number to set. + * @function buf_setlblkno + * @abstract Set logical block number associated with a buffer. + * @discussion Logical block number is set on traditionally-used buffers by an argument passed to buf_getblk(), + * for example by buf_bread(). + * @param bp Buffer whose logical block number to set. + * @param lblkno Block number to set. */ -void buf_setlblkno(buf_t bp, daddr64_t lblkno); +void buf_setlblkno(buf_t bp, daddr64_t lblkno); /*! - @function buf_count - @abstract Get count of valid bytes in a buffer. This may be less than the space allocated to the buffer. - @param bp Buffer whose byte count to get. - @return Byte count. + * @function buf_count + * @abstract Get count of valid bytes in a buffer. This may be less than the space allocated to the buffer. + * @param bp Buffer whose byte count to get. + * @return Byte count. */ uint32_t buf_count(buf_t bp); /*! - @function buf_size - @abstract Get size of data region allocated to a buffer. - @discussion May be larger than amount of valid data in buffer. - @param bp Buffer whose size to get. - @return Size. + * @function buf_size + * @abstract Get size of data region allocated to a buffer. + * @discussion May be larger than amount of valid data in buffer. + * @param bp Buffer whose size to get. + * @return Size. */ uint32_t buf_size(buf_t bp); /*! - @function buf_resid - @abstract Get a count of bytes which were not consumed by an I/O on a buffer. - @discussion Set when an I/O operations completes. - @param bp Buffer whose outstanding count to get. - @return Count of unwritten/unread bytes. + * @function buf_resid + * @abstract Get a count of bytes which were not consumed by an I/O on a buffer. + * @discussion Set when an I/O operations completes. + * @param bp Buffer whose outstanding count to get. + * @return Count of unwritten/unread bytes. */ uint32_t buf_resid(buf_t bp); /*! - @function buf_setcount - @abstract Set count of valid bytes in a buffer. This may be less than the space allocated to the buffer. - @param bp Buffer whose byte count to set. - @param bcount Count to set. + * @function buf_setcount + * @abstract Set count of valid bytes in a buffer. This may be less than the space allocated to the buffer. + * @param bp Buffer whose byte count to set. + * @param bcount Count to set. */ -void buf_setcount(buf_t bp, uint32_t bcount); +void buf_setcount(buf_t bp, uint32_t bcount); /*! - @function buf_setsize - @abstract Set size of data region allocated to a buffer. - @discussion May be larger than amount of valid data in buffer. Should be used by - code which is manually providing storage for an iobuf, one allocated with buf_alloc(). - @param bp Buffer whose size to set. + * @function buf_setsize + * @abstract Set size of data region allocated to a buffer. + * @discussion May be larger than amount of valid data in buffer. Should be used by + * code which is manually providing storage for an iobuf, one allocated with buf_alloc(). + * @param bp Buffer whose size to set. */ -void buf_setsize(buf_t bp, uint32_t); +void buf_setsize(buf_t bp, uint32_t); /*! - @function buf_setresid - @abstract Set a count of bytes outstanding for I/O in a buffer. - @discussion Set when an I/O operations completes. Examples: called by IOStorageFamily when I/O - completes, often called on an "original" buffer when using a manipulated buffer to perform I/O - on behalf of the first. - @param bp Buffer whose outstanding count to set. + * @function buf_setresid + * @abstract Set a count of bytes outstanding for I/O in a buffer. + * @discussion Set when an I/O operations completes. Examples: called by IOStorageFamily when I/O + * completes, often called on an "original" buffer when using a manipulated buffer to perform I/O + * on behalf of the first. + * @param bp Buffer whose outstanding count to set. */ -void buf_setresid(buf_t bp, uint32_t resid); +void buf_setresid(buf_t bp, uint32_t resid); /*! - @function buf_setdataptr - @abstract Set the address at which a buffer's data will be stored. - @discussion In traditional buffer use, the data pointer will be set automatically. This routine is - useful with iobufs (allocated with buf_alloc()). - @param bp Buffer whose data pointer to set. - @param data Pointer to data region. + * @function buf_setdataptr + * @abstract Set the address at which a buffer's data will be stored. + * @discussion In traditional buffer use, the data pointer will be set automatically. This routine is + * useful with iobufs (allocated with buf_alloc()). + * @param bp Buffer whose data pointer to set. + * @param data Pointer to data region. */ -void buf_setdataptr(buf_t bp, uintptr_t data); +void buf_setdataptr(buf_t bp, uintptr_t data); /*! - @function buf_dataptr - @abstract Get the address at which a buffer's data is stored; for iobufs, this must - be set with buf_setdataptr(). See buf_map(). - @param bp Buffer whose data pointer to retrieve. - @return Data pointer; NULL if unset. + * @function buf_dataptr + * @abstract Get the address at which a buffer's data is stored; for iobufs, this must + * be set with buf_setdataptr(). See buf_map(). + * @param bp Buffer whose data pointer to retrieve. + * @return Data pointer; NULL if unset. */ uintptr_t buf_dataptr(buf_t bp); /*! - @function buf_vnode - @abstract Get the vnode associated with a buffer. - @discussion Every buffer is associated with a file. Because there is an I/O in flight, - there is an iocount on this vnode; it is returned WITHOUT an extra iocount, and vnode_put() - need NOT be called. - @param bp Buffer whose vnode to retrieve. - @return Buffer's vnode. + * @function buf_vnode + * @abstract Get the vnode associated with a buffer. + * @discussion Every buffer is associated with a file. Because there is an I/O in flight, + * there is an iocount on this vnode; it is returned WITHOUT an extra iocount, and vnode_put() + * need NOT be called. + * @param bp Buffer whose vnode to retrieve. + * @return Buffer's vnode. */ -vnode_t buf_vnode(buf_t bp); +vnode_t buf_vnode(buf_t bp); /*! - @function buf_setvnode - @abstract Set the vnode associated with a buffer. - @discussion This call need not be used on traditional buffers; it is for use with iobufs. - @param bp Buffer whose vnode to set. - @param vp The vnode to attach to the buffer. + * @function buf_setvnode + * @abstract Set the vnode associated with a buffer. + * @discussion This call need not be used on traditional buffers; it is for use with iobufs. + * @param bp Buffer whose vnode to set. + * @param vp The vnode to attach to the buffer. */ -void buf_setvnode(buf_t bp, vnode_t vp); +void buf_setvnode(buf_t bp, vnode_t vp); /*! - @function buf_device - @abstract Get the device ID associated with a buffer. - @discussion In traditional buffer use, this value is NODEV until buf_strategy() is called unless - buf_getblk() was passed a device vnode. It is set on an iobuf if buf_alloc() is passed a device - vnode or if buf_setdevice() is called. - @param bp Buffer whose device ID to retrieve. - @return Device id. + * @function buf_device + * @abstract Get the device ID associated with a buffer. + * @discussion In traditional buffer use, this value is NODEV until buf_strategy() is called unless + * buf_getblk() was passed a device vnode. It is set on an iobuf if buf_alloc() is passed a device + * vnode or if buf_setdevice() is called. + * @param bp Buffer whose device ID to retrieve. + * @return Device id. */ -dev_t buf_device(buf_t bp); +dev_t buf_device(buf_t bp); /*! - @function buf_setdevice - @abstract Set the device associated with a buffer. - @discussion A buffer's device is set in buf_strategy() (or in buf_getblk() if the file is a device). - It is also set on an iobuf if buf_alloc() is passed a device vnode. - @param bp Buffer whose device ID to set. - @param vp Device to set on the buffer. - @return 0 for success, EINVAL if vp is not a device file. + * @function buf_setdevice + * @abstract Set the device associated with a buffer. + * @discussion A buffer's device is set in buf_strategy() (or in buf_getblk() if the file is a device). + * It is also set on an iobuf if buf_alloc() is passed a device vnode. + * @param bp Buffer whose device ID to set. + * @param vp Device to set on the buffer. + * @return 0 for success, EINVAL if vp is not a device file. */ -errno_t buf_setdevice(buf_t bp, vnode_t vp); +errno_t buf_setdevice(buf_t bp, vnode_t vp); /*! - @function buf_strategy - @abstract Pass an I/O request for a buffer down to the device layer. - @discussion This is one of the most important routines in the buffer cache layer. For buffers obtained - through buf_getblk, it handles finding physical block numbers for the I/O (with VNOP_BLKTOOFF and - VNOP_BLOCKMAP), packaging the I/O into page-sized chunks, and initiating I/O on the disk by calling - the device's strategy routine. If a buffer's UPL has been set manually with buf_setupl(), it assumes - that the request is already correctly configured with a block number and a size divisible by page size - and will just call directly to the device. - @param devvp Device on which to perform I/O - @param ap vnop_strategy_args structure (most importantly, a buffer). - @return 0 for success, or errors from filesystem or device layers. + * @function buf_strategy + * @abstract Pass an I/O request for a buffer down to the device layer. + * @discussion This is one of the most important routines in the buffer cache layer. For buffers obtained + * through buf_getblk, it handles finding physical block numbers for the I/O (with VNOP_BLKTOOFF and + * VNOP_BLOCKMAP), packaging the I/O into page-sized chunks, and initiating I/O on the disk by calling + * the device's strategy routine. If a buffer's UPL has been set manually with buf_setupl(), it assumes + * that the request is already correctly configured with a block number and a size divisible by page size + * and will just call directly to the device. + * @param devvp Device on which to perform I/O + * @param ap vnop_strategy_args structure (most importantly, a buffer). + * @return 0 for success, or errors from filesystem or device layers. */ -errno_t buf_strategy(vnode_t devvp, void *ap); +errno_t buf_strategy(vnode_t devvp, void *ap); -/* - * Flags for buf_invalblkno() - */ -#define BUF_WAIT 0x01 - -/*! - @function buf_invalblkno - @abstract Invalidate a filesystem logical block in a file. - @discussion buf_invalblkno() tries to make the data for a given block in a file - invalid; if the buffer for that block is found in core and is not busy, we mark it - invalid and call buf_brelse() (see "flags" param for what happens if the buffer is busy). - buf_brelse(), noticing that it is invalid, will - will return the buffer to the empty-buffer list and tell the VM subsystem to abandon - the relevant pages. Data will not be written to backing store--it will be cast aside. - Note that this function will only work if the block in question has been - obtained with a buf_getblk(). If data has been read into core without using - traditional buffer cache routines, buf_invalblkno() will not be able to invalidate it--this - includes the use of iobufs. - @param vp vnode whose block to invalidate. - @param lblkno Logical block number. - @param flags BUF_WAIT: wait for busy buffers to become unbusy and invalidate them then. Otherwise, - just return EBUSY for busy blocks. - @return 0 for success, EINVAL if vp is not a device file. - */ -errno_t buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags); - -/*! - @function buf_callback - @abstract Get the function set to be called when I/O on a buffer completes. - @discussion A function returned by buf_callback was originally set with buf_setcallback(). - @param bp Buffer whose callback to get. - @return 0 for success, or errors from filesystem or device layers. +/* + * Flags for buf_invalblkno() + */ +#define BUF_WAIT 0x01 + +/*! + * @function buf_invalblkno + * @abstract Invalidate a filesystem logical block in a file. + * @discussion buf_invalblkno() tries to make the data for a given block in a file + * invalid; if the buffer for that block is found in core and is not busy, we mark it + * invalid and call buf_brelse() (see "flags" param for what happens if the buffer is busy). + * buf_brelse(), noticing that it is invalid, will + * will return the buffer to the empty-buffer list and tell the VM subsystem to abandon + * the relevant pages. Data will not be written to backing store--it will be cast aside. + * Note that this function will only work if the block in question has been + * obtained with a buf_getblk(). If data has been read into core without using + * traditional buffer cache routines, buf_invalblkno() will not be able to invalidate it--this + * includes the use of iobufs. + * @param vp vnode whose block to invalidate. + * @param lblkno Logical block number. + * @param flags BUF_WAIT: wait for busy buffers to become unbusy and invalidate them then. Otherwise, + * just return EBUSY for busy blocks. + * @return 0 for success, EINVAL if vp is not a device file. + */ +errno_t buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags); + +/*! + * @function buf_callback + * @abstract Get the function set to be called when I/O on a buffer completes. + * @discussion A function returned by buf_callback was originally set with buf_setcallback(). + * @param bp Buffer whose callback to get. + * @return 0 for success, or errors from filesystem or device layers. */ void * buf_callback(buf_t bp); /*! - @function buf_setcallback - @abstract Set a function to be called once when I/O on a buffer completes. - @discussion A one-shot callout set with buf_setcallback() will be called from buf_biodone() - when I/O completes. It will be passed the "transaction" argument as well as the buffer. - buf_setcallback() also marks the buffer as B_ASYNC. - @param bp Buffer whose callback to set. - @param callback function to use as callback. - @param transaction Additional argument to callback function. - @return 0; always succeeds. + * @function buf_setcallback + * @abstract Set a function to be called once when I/O on a buffer completes. + * @discussion A one-shot callout set with buf_setcallback() will be called from buf_biodone() + * when I/O completes. It will be passed the "transaction" argument as well as the buffer. + * buf_setcallback() also marks the buffer as B_ASYNC. + * @param bp Buffer whose callback to set. + * @param callback function to use as callback. + * @param transaction Additional argument to callback function. + * @return 0; always succeeds. */ -errno_t buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction); +errno_t buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction); /*! - @function buf_setupl - @abstract Set the UPL (Universal Page List), and offset therein, on a buffer. - @discussion buf_setupl() should only be called on buffers allocated with buf_alloc(). - A subsequent call to buf_map() will map the UPL and give back the address at which data - begins. After buf_setupl() is called, a buffer is marked B_CLUSTER; when this is the case, - buf_strategy() assumes that a buffer is correctly configured to be passed to the device - layer without modification. Passing a NULL upl will clear the upl and the B_CLUSTER flag on the - buffer. - @param bp Buffer whose upl to set. - @param upl UPL to set in the buffer. - @param offset Offset within upl at which relevant data begin. - @return 0 for success, EINVAL if the buffer was not allocated with buf_alloc(). + * @function buf_setupl + * @abstract Set the UPL (Universal Page List), and offset therein, on a buffer. + * @discussion buf_setupl() should only be called on buffers allocated with buf_alloc(). + * A subsequent call to buf_map() will map the UPL and give back the address at which data + * begins. After buf_setupl() is called, a buffer is marked B_CLUSTER; when this is the case, + * buf_strategy() assumes that a buffer is correctly configured to be passed to the device + * layer without modification. Passing a NULL upl will clear the upl and the B_CLUSTER flag on the + * buffer. + * @param bp Buffer whose upl to set. + * @param upl UPL to set in the buffer. + * @param offset Offset within upl at which relevant data begin. + * @return 0 for success, EINVAL if the buffer was not allocated with buf_alloc(). */ -errno_t buf_setupl(buf_t bp, upl_t upl, uint32_t offset); +errno_t buf_setupl(buf_t bp, upl_t upl, uint32_t offset); /*! - @function buf_clone - @abstract Clone a buffer with a restricted range and an optional callback. - @discussion Generates a buffer which is identical to its "bp" argument except that - it spans a subset of the data of the original. The buffer to be cloned should - have been allocated with buf_alloc(). Checks its arguments to make sure - that the data subset is coherent. Optionally, adds a callback function and argument to it - to be called when I/O completes (as with buf_setcallback(), but B_ASYNC is not set). If the original buffer had - a upl set through buf_setupl(), this upl is copied to the new buffer; otherwise, the original's - data pointer is used raw. The buffer must be released with buf_free(). - @param bp Buffer to clone. - @param io_offset Offset, relative to start of data in original buffer, at which new buffer's data will begin. - @param io_size Size of buffer region in new buffer, in the sense of buf_count(). - @param iodone Callback to be called from buf_biodone() when I/O completes, in the sense of buf_setcallback(). - @param arg Argument to pass to iodone() callback. - @return NULL if io_offset/io_size combination is invalid for the buffer to be cloned; otherwise, the new buffer. + * @function buf_clone + * @abstract Clone a buffer with a restricted range and an optional callback. + * @discussion Generates a buffer which is identical to its "bp" argument except that + * it spans a subset of the data of the original. The buffer to be cloned should + * have been allocated with buf_alloc(). Checks its arguments to make sure + * that the data subset is coherent. Optionally, adds a callback function and argument to it + * to be called when I/O completes (as with buf_setcallback(), but B_ASYNC is not set). If the original buffer had + * a upl set through buf_setupl(), this upl is copied to the new buffer; otherwise, the original's + * data pointer is used raw. The buffer must be released with buf_free(). + * @param bp Buffer to clone. + * @param io_offset Offset, relative to start of data in original buffer, at which new buffer's data will begin. + * @param io_size Size of buffer region in new buffer, in the sense of buf_count(). + * @param iodone Callback to be called from buf_biodone() when I/O completes, in the sense of buf_setcallback(). + * @param arg Argument to pass to iodone() callback. + * @return NULL if io_offset/io_size combination is invalid for the buffer to be cloned; otherwise, the new buffer. */ -buf_t buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg); +buf_t buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg); /*! - @function buf_create_shadow - @abstract Create a shadow buffer with optional private storage and an optional callback. - @param bp Buffer to shadow. - @param force_copy If TRUE, do not link the shadaow to 'bp' and if 'external_storage' == NULL, - force a copy of the data associated with 'bp'. - @param external_storage If non-NULL, associate it with the new buffer as its storage instead of the - storage currently associated with 'bp'. - @param iodone Callback to be called from buf_biodone() when I/O completes, in the sense of buf_setcallback(). - @param arg Argument to pass to iodone() callback. - @return NULL if the buffer to be shadowed is not B_META or a primary buffer (i.e. not a shadow buffer); otherwise, the new buffer. -*/ + * @function buf_create_shadow + * @abstract Create a shadow buffer with optional private storage and an optional callback. + * @param bp Buffer to shadow. + * @param force_copy If TRUE, do not link the shadaow to 'bp' and if 'external_storage' == NULL, + * force a copy of the data associated with 'bp'. + * @param external_storage If non-NULL, associate it with the new buffer as its storage instead of the + * storage currently associated with 'bp'. + * @param iodone Callback to be called from buf_biodone() when I/O completes, in the sense of buf_setcallback(). + * @param arg Argument to pass to iodone() callback. + * @return NULL if the buffer to be shadowed is not B_META or a primary buffer (i.e. not a shadow buffer); otherwise, the new buffer. + */ -buf_t buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg); +buf_t buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg); /*! - @function buf_shadow - @abstract returns true if 'bp' is a shadow of another buffer. - @param bp Buffer to query. - @return 1 if 'bp' is a shadow, 0 otherwise. -*/ -int buf_shadow(buf_t bp); + * @function buf_shadow + * @abstract returns true if 'bp' is a shadow of another buffer. + * @param bp Buffer to query. + * @return 1 if 'bp' is a shadow, 0 otherwise. + */ +int buf_shadow(buf_t bp); /*! - @function buf_alloc - @abstract Allocate an uninitialized buffer. - @discussion A buffer returned by buf_alloc() is marked as busy and as an iobuf; it has no storage set up and must be - set up using buf_setdataptr() or buf_setupl()/buf_map(). - @param vp vnode to associate with the buffer: optionally NULL. If vp is a device file, then - the buffer's associated device will be set. If vp is NULL, it can be set later with buf_setvnode(). - @return New buffer. + * @function buf_alloc + * @abstract Allocate an uninitialized buffer. + * @discussion A buffer returned by buf_alloc() is marked as busy and as an iobuf; it has no storage set up and must be + * set up using buf_setdataptr() or buf_setupl()/buf_map(). + * @param vp vnode to associate with the buffer: optionally NULL. If vp is a device file, then + * the buffer's associated device will be set. If vp is NULL, it can be set later with buf_setvnode(). + * @return New buffer. */ -buf_t buf_alloc(vnode_t vp); +buf_t buf_alloc(vnode_t vp); /*! - @function buf_free - @abstract Free a buffer that was allocated with buf_alloc(). - @discussion The storage (UPL, data pointer) associated with an iobuf must be freed manually. - @param bp The buffer to free. + * @function buf_free + * @abstract Free a buffer that was allocated with buf_alloc(). + * @discussion The storage (UPL, data pointer) associated with an iobuf must be freed manually. + * @param bp The buffer to free. */ -void buf_free(buf_t bp); +void buf_free(buf_t bp); /* * flags for buf_invalidateblks */ -#define BUF_WRITE_DATA 0x0001 /* write data blocks first */ -#define BUF_SKIP_META 0x0002 /* skip over metadata blocks */ -#define BUF_INVALIDATE_LOCKED 0x0004 /* force B_LOCKED blocks to be invalidated */ +#define BUF_WRITE_DATA 0x0001 /* write data blocks first */ +#define BUF_SKIP_META 0x0002 /* skip over metadata blocks */ +#define BUF_INVALIDATE_LOCKED 0x0004 /* force B_LOCKED blocks to be invalidated */ /*! - @function buf_invalidateblks - @abstract Invalidate all the blocks associated with a vnode. - @discussion This function does for all blocks associated with a vnode what buf_invalblkno does for one block. - Again, it will only be able to invalidate data which were populated with traditional buffer cache routines, - i.e. by buf_getblk() and callers thereof. Unlike buf_invalblkno(), it can be made to write dirty data to disk - rather than casting it aside. - @param vp The vnode whose data to invalidate. - @param flags BUF_WRITE_DATA: write dirty data to disk with VNOP_BWRITE() before kicking buffer cache entries out. - BUF_SKIP_META: do not invalidate metadata blocks. - @param slpflag Flags to pass to "msleep" while waiting to acquire busy buffers. - @param slptimeo Timeout in "hz" (1/100 second) to wait for a buffer to become unbusy before waking from sleep - and re-starting the scan. - @return 0 for success, error values from msleep(). + * @function buf_invalidateblks + * @abstract Invalidate all the blocks associated with a vnode. + * @discussion This function does for all blocks associated with a vnode what buf_invalblkno does for one block. + * Again, it will only be able to invalidate data which were populated with traditional buffer cache routines, + * i.e. by buf_getblk() and callers thereof. Unlike buf_invalblkno(), it can be made to write dirty data to disk + * rather than casting it aside. + * @param vp The vnode whose data to invalidate. + * @param flags BUF_WRITE_DATA: write dirty data to disk with VNOP_BWRITE() before kicking buffer cache entries out. + * BUF_SKIP_META: do not invalidate metadata blocks. + * @param slpflag Flags to pass to "msleep" while waiting to acquire busy buffers. + * @param slptimeo Timeout in "hz" (1/100 second) to wait for a buffer to become unbusy before waking from sleep + * and re-starting the scan. + * @return 0 for success, error values from msleep(). */ -int buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo); +int buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo); /* * flags for buf_flushdirtyblks and buf_iterate */ -#define BUF_SKIP_NONLOCKED 0x01 -#define BUF_SKIP_LOCKED 0x02 -#define BUF_SCAN_CLEAN 0x04 /* scan the clean buffers */ -#define BUF_SCAN_DIRTY 0x08 /* scan the dirty buffers */ -#define BUF_NOTIFY_BUSY 0x10 /* notify the caller about the busy pages during the scan */ - - -#define BUF_RETURNED 0 -#define BUF_RETURNED_DONE 1 -#define BUF_CLAIMED 2 -#define BUF_CLAIMED_DONE 3 -/*! - @function buf_flushdirtyblks - @abstract Write dirty file blocks to disk. - @param vp The vnode whose blocks to flush. - @param wait Wait for writes to complete before returning. - @param flags Can pass zero, meaning "flush all dirty buffers." - BUF_SKIP_NONLOCKED: Skip buffers which are not busy when we encounter them. - BUF_SKIP_LOCKED: Skip buffers which are busy when we encounter them. - @param msg String to pass to msleep(). - */ -void buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg); - -/*! - @function buf_iterate - @abstract Perform some operation on all buffers associated with a vnode. - @param vp The vnode whose buffers to scan. - @param callout Function to call on each buffer. Should return one of: - BUF_RETURNED: buf_iterate() should call buf_brelse() on the buffer. - BUF_RETURNED_DONE: buf_iterate() should call buf_brelse() on the buffer and then stop iterating. - BUF_CLAIMED: buf_iterate() should continue iterating (and not call buf_brelse()). - BUF_CLAIMED_DONE: buf_iterate() should stop iterating (and not call buf_brelse()). - @param flags - BUF_SKIP_NONLOCKED: Skip buffers which are not busy when we encounter them. BUF_SKIP_LOCKED: Skip buffers which are busy when we encounter them. - BUF_SCAN_CLEAN: Call out on clean buffers. - BUF_SCAN_DIRTY: Call out on dirty buffers. - BUF_NOTIFY_BUSY: If a buffer cannot be acquired, pass a NULL buffer to callout; otherwise, - that buffer will be silently skipped. - @param arg Argument to pass to callout in addition to buffer. - */ -void buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg); - -/*! - @function buf_clear - @abstract Zero out the storage associated with a buffer. - @discussion Calls buf_map() to get the buffer's data address; for a B_CLUSTER - buffer (one which has had buf_setupl() called on it), it tries to map the buffer's - UPL into memory; should only be called once during the life cycle of an iobuf (one allocated - with buf_alloc()). - @param bp The buffer to zero out. - */ -void buf_clear(buf_t bp); - -/*! - @function buf_bawrite - @abstract Start an asychronous write on a buffer. - @discussion Calls VNOP_BWRITE to start the process of propagating an asynchronous write down to the device layer. - Callers can wait for writes to complete at their discretion using buf_biowait(). When this function is called, - data should already have been written to the buffer's data region. - @param bp The buffer on which to initiate I/O. - @return EWOULDBLOCK if write count is high and "throttle" is zero; otherwise, errors from VNOP_BWRITE. - */ -errno_t buf_bawrite(buf_t bp); - -/*! - @function buf_bdwrite - @abstract Mark a buffer for delayed write. - @discussion Marks a buffer as waiting for delayed write and the current I/O as complete; data will be written to backing store - before the buffer is reused, but it will not be queued for I/O immediately. Note that for buffers allocated - with buf_alloc(), there are no such guarantees; you must take care of your own flushing to disk. If - the number of delayed writes pending on the system is greater than an internal limit and the caller has not - requested otherwise [see return_error] , buf_bdwrite() will unilaterally launch an asynchronous I/O with buf_bawrite() to keep the pile of - delayed writes from getting too large. - @param bp The buffer to mark for delayed write. - @return EAGAIN for return_error != 0 case, 0 for succeess, errors from buf_bawrite. - */ -errno_t buf_bdwrite(buf_t bp); - -/*! - @function buf_bwrite - @abstract Write a buffer's data to backing store. - @discussion Once the data in a buffer has been modified, buf_bwrite() starts sending it to disk by calling - VNOP_STRATEGY. Unless B_ASYNC has been set on the buffer (by buf_setflags() or otherwise), data will have - been written to disk when buf_bwrite() returns. See Bach (p 56). - @param bp The buffer to write to disk. - @return 0 for success; errors from buf_biowait(). - */ -errno_t buf_bwrite(buf_t bp); - -/*! - @function buf_biodone - @abstract Mark an I/O as completed. - @discussion buf_biodone() should be called by whosoever decides that an I/O on a buffer is complete; for example, - IOStorageFamily. It clears the dirty flag on a buffer and signals on the vnode that a write has completed - with vnode_writedone(). If a callout or filter has been set on the buffer, that function is called. In the case - of a callout, that function is expected to take care of cleaning up and freeing the buffer. - Otherwise, if the buffer is marked B_ASYNC (e.g. it was passed to buf_bawrite()), then buf_biodone() - considers itself justified in calling buf_brelse() to return it to free lists--no one is waiting for it. Finally, - waiters on the bp (e.g. in buf_biowait()) are woken up. - @param bp The buffer to mark as done with I/O. - */ -void buf_biodone(buf_t bp); - -/*! - @function buf_biowait - @abstract Wait for I/O on a buffer to complete. - @discussion Waits for I/O on a buffer to finish, as marked by a buf_biodone() call. - @param bp The buffer to wait on. - @return 0 for a successful wait; nonzero the buffer has been marked as EINTR or had an error set on it. - */ -errno_t buf_biowait(buf_t bp); - -/*! - @function buf_brelse - @abstract Release any claim to a buffer, sending it back to free lists. - @discussion buf_brelse() cleans up buffer state and releases a buffer to the free lists. If the buffer - is not marked invalid and its pages are dirty (e.g. a delayed write was made), its data will be commited - to backing store. If it is marked invalid, its data will be discarded completely. - A valid, cacheable buffer will be put on a list and kept in the buffer hash so it - can be found again; otherwise, it will be dissociated from its vnode and treated as empty. Which list a valid - buffer is placed on depends on the use of buf_markaged(), whether it is metadata, and the B_LOCKED flag. A - B_LOCKED buffer will not be available for reuse by other files, though its data may be paged out. - Note that buf_brelse() is intended for use with traditionally allocated buffers. - @param bp The buffer to release. - */ -void buf_brelse(buf_t bp); - -/*! - @function buf_bread - @abstract Synchronously read a block of a file. - @discussion buf_bread() is the traditional way to read a single logical block of a file through the buffer cache. - It tries to find the buffer and corresponding page(s) in core, calls VNOP_STRATEGY if necessary to bring the data - into memory, and waits for I/O to complete. It should not be used to read blocks of greater than 4K (one VM page) - in size; use cluster routines for large reads. Indeed, the cluster layer is a more efficient choice for reading DATA - unless you need some finely-tuned semantics that it cannot provide. - @param vp The file from which to read. - @param blkno The logical (filesystem) block number to read. - @param size Size of block; do not use for sizes > 4K. - @param cred Credential to store and use for reading from disk if data are not already in core. - @param bpp Destination pointer for buffer. - @return 0 for success, or an error from buf_biowait(). - */ -errno_t buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp); - -/*! - @function buf_breadn - @abstract Read a block from a file with read-ahead. - @discussion buf_breadn() reads one block synchronously in the style of buf_bread() and fires - off a specified set of asynchronous reads to improve the likelihood of future cache hits. - It should not be used to read blocks of greater than 4K (one VM page) in size; use cluster - routines for large reads. Indeed, the cluster layer is a more efficient choice for reading DATA - unless you need some finely-tuned semantics that it cannot provide. - @param vp The file from which to read. - @param blkno The logical (filesystem) block number to read synchronously. - @param size Size of block; do not use for sizes > 4K. - @param rablks Array of logical block numbers for asynchronous read-aheads. - @param rasizes Array of block sizes for asynchronous read-aheads, each index corresponding to same index in "rablks." - @param nrablks Number of entries in read-ahead arrays. - @param cred Credential to store and use for reading from disk if data are not already in core. - @param bpp Destination pointer for buffer. - @return 0 for success, or an error from buf_biowait(). - */ -errno_t buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp); - -/*! - @function buf_meta_bread - @abstract Synchronously read a metadata block of a file. - @discussion buf_meta_bread() is the traditional way to read a single logical block of a file through the buffer cache. - It tries to find the buffer and corresponding page(s) in core, calls VNOP_STRATEGY if necessary to bring the data - into memory, and waits for I/O to complete. It should not be used to read blocks of greater than 4K (one VM page) - in size; use cluster routines for large reads. Reading meta-data through the traditional buffer cache, unlike - reading data, is efficient and encouraged, especially if the blocks being read are significantly smaller than page size. - @param vp The file from which to read. - @param blkno The logical (filesystem) block number to read. - @param size Size of block; do not use for sizes > 4K. - @param cred Credential to store and use for reading from disk if data are not already in core. - @param bpp Destination pointer for buffer. - @return 0 for success, or an error from buf_biowait(). - */ -errno_t buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp); - -/*! - @function buf_meta_breadn - @abstract Read a metadata block from a file with read-ahead. - @discussion buf_meta_breadn() reads one block synchronously in the style of buf_meta_bread() and fires - off a specified set of asynchronous reads to improve the likelihood of future cache hits. - It should not be used to read blocks of greater than 4K (one VM page) in size; use cluster - routines for large reads. - @param vp The file from which to read. - @param blkno The logical (filesystem) block number to read synchronously. - @param size Size of block; do not use for sizes > 4K. - @param rablks Array of logical block numbers for asynchronous read-aheads. - @param rasizes Array of block sizes for asynchronous read-aheads, each index corresponding to same index in "rablks." - @param nrablks Number of entries in read-ahead arrays. - @param cred Credential to store and use for reading from disk if data are not already in core. - @param bpp Destination pointer for buffer. - @return 0 for success, or an error from buf_biowait(). - */ -errno_t buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp); - -/*! - @function minphys - @abstract Adjust a buffer's count to be no more than maximum physical I/O transfer size for the host architecture. - @discussion physio() takes as a parameter a function to bound transfer sizes for each VNOP_STRATEGY() call. minphys() - is a default implementation. It calls buf_setcount() to make the buffer's count the min() of its current count - and the max I/O size for the host architecture. - @param bp The buffer whose byte count to modify. - @return New byte count. - */ -u_int minphys(buf_t bp); - -/*! - @function physio - @abstract Perform I/O on a device to/from target memory described by a uio. - @discussion physio() allows I/O directly from a device to user-space memory. It waits - for all I/O to complete before returning. - @param f_strategy Strategy routine to call to initiate I/O. - @param bp Buffer to configure and pass to strategy routine; can be NULL. - @param dev Device on which to perform I/O. - @param flags B_READ or B_WRITE. - @param f_minphys Function which calls buf_setcount() to set a byte count which is suitably - small for the device in question. Returns byte count that has been set (or unchanged) on the buffer. - @param uio UIO describing the I/O operation. - @param blocksize Logical block size for this vnode. - @return 0 for success; EFAULT for an invalid uio; errors from buf_biowait(). - */ -int physio(void (*f_strategy)(buf_t), buf_t bp, dev_t dev, int flags, u_int (*f_minphys)(buf_t), struct uio *uio, int blocksize); +#define BUF_SKIP_NONLOCKED 0x01 +#define BUF_SKIP_LOCKED 0x02 +#define BUF_SCAN_CLEAN 0x04 /* scan the clean buffers */ +#define BUF_SCAN_DIRTY 0x08 /* scan the dirty buffers */ +#define BUF_NOTIFY_BUSY 0x10 /* notify the caller about the busy pages during the scan */ + + +#define BUF_RETURNED 0 +#define BUF_RETURNED_DONE 1 +#define BUF_CLAIMED 2 +#define BUF_CLAIMED_DONE 3 +/*! + * @function buf_flushdirtyblks + * @abstract Write dirty file blocks to disk. + * @param vp The vnode whose blocks to flush. + * @param wait Wait for writes to complete before returning. + * @param flags Can pass zero, meaning "flush all dirty buffers." + * BUF_SKIP_NONLOCKED: Skip buffers which are not busy when we encounter them. + * BUF_SKIP_LOCKED: Skip buffers which are busy when we encounter them. + * @param msg String to pass to msleep(). + */ +void buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg); + +/*! + * @function buf_iterate + * @abstract Perform some operation on all buffers associated with a vnode. + * @param vp The vnode whose buffers to scan. + * @param callout Function to call on each buffer. Should return one of: + * BUF_RETURNED: buf_iterate() should call buf_brelse() on the buffer. + * BUF_RETURNED_DONE: buf_iterate() should call buf_brelse() on the buffer and then stop iterating. + * BUF_CLAIMED: buf_iterate() should continue iterating (and not call buf_brelse()). + * BUF_CLAIMED_DONE: buf_iterate() should stop iterating (and not call buf_brelse()). + * @param flags + * BUF_SKIP_NONLOCKED: Skip buffers which are not busy when we encounter them. BUF_SKIP_LOCKED: Skip buffers which are busy when we encounter them. + * BUF_SCAN_CLEAN: Call out on clean buffers. + * BUF_SCAN_DIRTY: Call out on dirty buffers. + * BUF_NOTIFY_BUSY: If a buffer cannot be acquired, pass a NULL buffer to callout; otherwise, + * that buffer will be silently skipped. + * @param arg Argument to pass to callout in addition to buffer. + */ +void buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg); + +/*! + * @function buf_clear + * @abstract Zero out the storage associated with a buffer. + * @discussion Calls buf_map() to get the buffer's data address; for a B_CLUSTER + * buffer (one which has had buf_setupl() called on it), it tries to map the buffer's + * UPL into memory; should only be called once during the life cycle of an iobuf (one allocated + * with buf_alloc()). + * @param bp The buffer to zero out. + */ +void buf_clear(buf_t bp); + +/*! + * @function buf_bawrite + * @abstract Start an asychronous write on a buffer. + * @discussion Calls VNOP_BWRITE to start the process of propagating an asynchronous write down to the device layer. + * Callers can wait for writes to complete at their discretion using buf_biowait(). When this function is called, + * data should already have been written to the buffer's data region. + * @param bp The buffer on which to initiate I/O. + * @return EWOULDBLOCK if write count is high and "throttle" is zero; otherwise, errors from VNOP_BWRITE. + */ +errno_t buf_bawrite(buf_t bp); + +/*! + * @function buf_bdwrite + * @abstract Mark a buffer for delayed write. + * @discussion Marks a buffer as waiting for delayed write and the current I/O as complete; data will be written to backing store + * before the buffer is reused, but it will not be queued for I/O immediately. Note that for buffers allocated + * with buf_alloc(), there are no such guarantees; you must take care of your own flushing to disk. If + * the number of delayed writes pending on the system is greater than an internal limit and the caller has not + * requested otherwise [see return_error] , buf_bdwrite() will unilaterally launch an asynchronous I/O with buf_bawrite() to keep the pile of + * delayed writes from getting too large. + * @param bp The buffer to mark for delayed write. + * @return EAGAIN for return_error != 0 case, 0 for succeess, errors from buf_bawrite. + */ +errno_t buf_bdwrite(buf_t bp); + +/*! + * @function buf_bwrite + * @abstract Write a buffer's data to backing store. + * @discussion Once the data in a buffer has been modified, buf_bwrite() starts sending it to disk by calling + * VNOP_STRATEGY. Unless B_ASYNC has been set on the buffer (by buf_setflags() or otherwise), data will have + * been written to disk when buf_bwrite() returns. See Bach (p 56). + * @param bp The buffer to write to disk. + * @return 0 for success; errors from buf_biowait(). + */ +errno_t buf_bwrite(buf_t bp); + +/*! + * @function buf_biodone + * @abstract Mark an I/O as completed. + * @discussion buf_biodone() should be called by whosoever decides that an I/O on a buffer is complete; for example, + * IOStorageFamily. It clears the dirty flag on a buffer and signals on the vnode that a write has completed + * with vnode_writedone(). If a callout or filter has been set on the buffer, that function is called. In the case + * of a callout, that function is expected to take care of cleaning up and freeing the buffer. + * Otherwise, if the buffer is marked B_ASYNC (e.g. it was passed to buf_bawrite()), then buf_biodone() + * considers itself justified in calling buf_brelse() to return it to free lists--no one is waiting for it. Finally, + * waiters on the bp (e.g. in buf_biowait()) are woken up. + * @param bp The buffer to mark as done with I/O. + */ +void buf_biodone(buf_t bp); + +/*! + * @function buf_biowait + * @abstract Wait for I/O on a buffer to complete. + * @discussion Waits for I/O on a buffer to finish, as marked by a buf_biodone() call. + * @param bp The buffer to wait on. + * @return 0 for a successful wait; nonzero the buffer has been marked as EINTR or had an error set on it. + */ +errno_t buf_biowait(buf_t bp); + +/*! + * @function buf_brelse + * @abstract Release any claim to a buffer, sending it back to free lists. + * @discussion buf_brelse() cleans up buffer state and releases a buffer to the free lists. If the buffer + * is not marked invalid and its pages are dirty (e.g. a delayed write was made), its data will be commited + * to backing store. If it is marked invalid, its data will be discarded completely. + * A valid, cacheable buffer will be put on a list and kept in the buffer hash so it + * can be found again; otherwise, it will be dissociated from its vnode and treated as empty. Which list a valid + * buffer is placed on depends on the use of buf_markaged(), whether it is metadata, and the B_LOCKED flag. A + * B_LOCKED buffer will not be available for reuse by other files, though its data may be paged out. + * Note that buf_brelse() is intended for use with traditionally allocated buffers. + * @param bp The buffer to release. + */ +void buf_brelse(buf_t bp); + +/*! + * @function buf_bread + * @abstract Synchronously read a block of a file. + * @discussion buf_bread() is the traditional way to read a single logical block of a file through the buffer cache. + * It tries to find the buffer and corresponding page(s) in core, calls VNOP_STRATEGY if necessary to bring the data + * into memory, and waits for I/O to complete. It should not be used to read blocks of greater than 4K (one VM page) + * in size; use cluster routines for large reads. Indeed, the cluster layer is a more efficient choice for reading DATA + * unless you need some finely-tuned semantics that it cannot provide. + * @param vp The file from which to read. + * @param blkno The logical (filesystem) block number to read. + * @param size Size of block; do not use for sizes > 4K. + * @param cred Credential to store and use for reading from disk if data are not already in core. + * @param bpp Destination pointer for buffer. + * @return 0 for success, or an error from buf_biowait(). + */ +errno_t buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp); + +/*! + * @function buf_breadn + * @abstract Read a block from a file with read-ahead. + * @discussion buf_breadn() reads one block synchronously in the style of buf_bread() and fires + * off a specified set of asynchronous reads to improve the likelihood of future cache hits. + * It should not be used to read blocks of greater than 4K (one VM page) in size; use cluster + * routines for large reads. Indeed, the cluster layer is a more efficient choice for reading DATA + * unless you need some finely-tuned semantics that it cannot provide. + * @param vp The file from which to read. + * @param blkno The logical (filesystem) block number to read synchronously. + * @param size Size of block; do not use for sizes > 4K. + * @param rablks Array of logical block numbers for asynchronous read-aheads. + * @param rasizes Array of block sizes for asynchronous read-aheads, each index corresponding to same index in "rablks." + * @param nrablks Number of entries in read-ahead arrays. + * @param cred Credential to store and use for reading from disk if data are not already in core. + * @param bpp Destination pointer for buffer. + * @return 0 for success, or an error from buf_biowait(). + */ +errno_t buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp); + +/*! + * @function buf_meta_bread + * @abstract Synchronously read a metadata block of a file. + * @discussion buf_meta_bread() is the traditional way to read a single logical block of a file through the buffer cache. + * It tries to find the buffer and corresponding page(s) in core, calls VNOP_STRATEGY if necessary to bring the data + * into memory, and waits for I/O to complete. It should not be used to read blocks of greater than 4K (one VM page) + * in size; use cluster routines for large reads. Reading meta-data through the traditional buffer cache, unlike + * reading data, is efficient and encouraged, especially if the blocks being read are significantly smaller than page size. + * @param vp The file from which to read. + * @param blkno The logical (filesystem) block number to read. + * @param size Size of block; do not use for sizes > 4K. + * @param cred Credential to store and use for reading from disk if data are not already in core. + * @param bpp Destination pointer for buffer. + * @return 0 for success, or an error from buf_biowait(). + */ +errno_t buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp); + +/*! + * @function buf_meta_breadn + * @abstract Read a metadata block from a file with read-ahead. + * @discussion buf_meta_breadn() reads one block synchronously in the style of buf_meta_bread() and fires + * off a specified set of asynchronous reads to improve the likelihood of future cache hits. + * It should not be used to read blocks of greater than 4K (one VM page) in size; use cluster + * routines for large reads. + * @param vp The file from which to read. + * @param blkno The logical (filesystem) block number to read synchronously. + * @param size Size of block; do not use for sizes > 4K. + * @param rablks Array of logical block numbers for asynchronous read-aheads. + * @param rasizes Array of block sizes for asynchronous read-aheads, each index corresponding to same index in "rablks." + * @param nrablks Number of entries in read-ahead arrays. + * @param cred Credential to store and use for reading from disk if data are not already in core. + * @param bpp Destination pointer for buffer. + * @return 0 for success, or an error from buf_biowait(). + */ +errno_t buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp); + +/*! + * @function minphys + * @abstract Adjust a buffer's count to be no more than maximum physical I/O transfer size for the host architecture. + * @discussion physio() takes as a parameter a function to bound transfer sizes for each VNOP_STRATEGY() call. minphys() + * is a default implementation. It calls buf_setcount() to make the buffer's count the min() of its current count + * and the max I/O size for the host architecture. + * @param bp The buffer whose byte count to modify. + * @return New byte count. + */ +u_int minphys(buf_t bp); + +/*! + * @function physio + * @abstract Perform I/O on a device to/from target memory described by a uio. + * @discussion physio() allows I/O directly from a device to user-space memory. It waits + * for all I/O to complete before returning. + * @param f_strategy Strategy routine to call to initiate I/O. + * @param bp Buffer to configure and pass to strategy routine; can be NULL. + * @param dev Device on which to perform I/O. + * @param flags B_READ or B_WRITE. + * @param f_minphys Function which calls buf_setcount() to set a byte count which is suitably + * small for the device in question. Returns byte count that has been set (or unchanged) on the buffer. + * @param uio UIO describing the I/O operation. + * @param blocksize Logical block size for this vnode. + * @return 0 for success; EFAULT for an invalid uio; errors from buf_biowait(). + */ +int physio(void (*f_strategy)(buf_t), buf_t bp, dev_t dev, int flags, u_int (*f_minphys)(buf_t), struct uio *uio, int blocksize); /* * Flags for operation type in getblk() */ -#define BLK_READ 0x01 /* buffer for read */ -#define BLK_WRITE 0x02 /* buffer for write */ -#define BLK_META 0x10 /* buffer for metadata */ +#define BLK_READ 0x01 /* buffer for read */ +#define BLK_WRITE 0x02 /* buffer for write */ +#define BLK_META 0x10 /* buffer for metadata */ /* * modifier for above flags... if set, getblk will only return * a bp that is already valid... i.e. found in the cache */ -#define BLK_ONLYVALID 0x80000000 - +#define BLK_ONLYVALID 0x80000000 + /*! - @function buf_getblk - @abstract Traditional buffer cache routine to get a buffer corresponding to a logical block in a file. - @discussion buf_getblk() gets a buffer, not necessarily containing valid data, representing a block in a file. - A metadata buffer will be returned with its own zone-allocated storage, managed by the traditional buffer-cache - layer, whereas data buffers will be returned hooked into backing by the UBC (which in fact controls the caching of data). - buf_getblk() first looks for the buffer header in cache; if the buffer is in-core but busy, buf_getblk() will wait for it to become - unbusy, depending on the slpflag and slptimeo parameters. If the buffer is found unbusy and is a metadata buffer, - it must already contain valid data and will be returned directly; data buffers will have a UPL configured to - prepare for interaction with the underlying UBC. If the buffer is found in core, it will be marked as such - and buf_fromcache() will return truth. A buffer is allocated and initialized (but not filled with data) - if none is found in core. buf_bread(), buf_breadn(), buf_meta_bread(), and buf_meta_breadn() all - return buffers obtained with buf_getblk(). - @param vp File for which to get block. - @param blkno Logical block number. - @param size Size of block. - @param slpflag Flag to pass to msleep() while waiting for buffer to become unbusy. - @param slptimeo Time, in milliseconds, to wait for buffer to become unbusy. 0 means to wait indefinitely. - @param operation BLK_READ: want a read buffer. BLK_WRITE: want a write buffer. BLK_META: want a metadata buffer. BLK_ONLYVALID: - only return buffers which are found in core (do not allocate anew), and do not change buffer size. The last remark means - that if a given logical block is found in core with a different size than what is requested, the buffer size will not be modified. - @return Buffer found in core or newly allocated, either containing valid data or ready for I/O. + * @function buf_getblk + * @abstract Traditional buffer cache routine to get a buffer corresponding to a logical block in a file. + * @discussion buf_getblk() gets a buffer, not necessarily containing valid data, representing a block in a file. + * A metadata buffer will be returned with its own zone-allocated storage, managed by the traditional buffer-cache + * layer, whereas data buffers will be returned hooked into backing by the UBC (which in fact controls the caching of data). + * buf_getblk() first looks for the buffer header in cache; if the buffer is in-core but busy, buf_getblk() will wait for it to become + * unbusy, depending on the slpflag and slptimeo parameters. If the buffer is found unbusy and is a metadata buffer, + * it must already contain valid data and will be returned directly; data buffers will have a UPL configured to + * prepare for interaction with the underlying UBC. If the buffer is found in core, it will be marked as such + * and buf_fromcache() will return truth. A buffer is allocated and initialized (but not filled with data) + * if none is found in core. buf_bread(), buf_breadn(), buf_meta_bread(), and buf_meta_breadn() all + * return buffers obtained with buf_getblk(). + * @param vp File for which to get block. + * @param blkno Logical block number. + * @param size Size of block. + * @param slpflag Flag to pass to msleep() while waiting for buffer to become unbusy. + * @param slptimeo Time, in milliseconds, to wait for buffer to become unbusy. 0 means to wait indefinitely. + * @param operation BLK_READ: want a read buffer. BLK_WRITE: want a write buffer. BLK_META: want a metadata buffer. BLK_ONLYVALID: + * only return buffers which are found in core (do not allocate anew), and do not change buffer size. The last remark means + * that if a given logical block is found in core with a different size than what is requested, the buffer size will not be modified. + * @return Buffer found in core or newly allocated, either containing valid data or ready for I/O. */ -buf_t buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation); +buf_t buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation); /*! - @function buf_geteblk - @abstract Get a metadata buffer which is marked invalid and not associated with any vnode. - @discussion A buffer is returned with zone-allocated storage of the specified size, marked B_META and invalid. - It has no vnode and is not visible in the buffer hash. - @param size Size of buffer. - @return Always returns a new buffer. + * @function buf_geteblk + * @abstract Get a metadata buffer which is marked invalid and not associated with any vnode. + * @discussion A buffer is returned with zone-allocated storage of the specified size, marked B_META and invalid. + * It has no vnode and is not visible in the buffer hash. + * @param size Size of buffer. + * @return Always returns a new buffer. */ -buf_t buf_geteblk(int size); +buf_t buf_geteblk(int size); /*! - @function buf_clear_redundancy_flags - @abstract Clear flags on a buffer. - @discussion buffer_redundancy_flags &= ~flags - @param bp Buffer whose flags to clear. - @param flags Flags to remove from buffer's mask + * @function buf_clear_redundancy_flags + * @abstract Clear flags on a buffer. + * @discussion buffer_redundancy_flags &= ~flags + * @param bp Buffer whose flags to clear. + * @param flags Flags to remove from buffer's mask */ -void buf_clear_redundancy_flags(buf_t bp, uint32_t flags); +void buf_clear_redundancy_flags(buf_t bp, uint32_t flags); /*! - @function buf_redundancyflags - @abstract Get redundancy flags set on a buffer. - @param bp Buffer whose redundancy flags to grab. - @return flags. + * @function buf_redundancyflags + * @abstract Get redundancy flags set on a buffer. + * @param bp Buffer whose redundancy flags to grab. + * @return flags. */ -uint32_t buf_redundancy_flags(buf_t bp); +uint32_t buf_redundancy_flags(buf_t bp); /*! - @function buf_setredundancyflags - @abstract Set redundancy flags on a buffer. - @discussion buffer_redundancy_flags |= flags - @param bp Buffer whose flags to set. - @param flags Flags to add to buffer's redundancy flags + * @function buf_setredundancyflags + * @abstract Set redundancy flags on a buffer. + * @discussion buffer_redundancy_flags |= flags + * @param bp Buffer whose flags to set. + * @param flags Flags to add to buffer's redundancy flags */ -void buf_set_redundancy_flags(buf_t bp, uint32_t flags); +void buf_set_redundancy_flags(buf_t bp, uint32_t flags); /*! - @function buf_attr - @abstract Gets the attributes for this buf. - @param bp Buffer whose attributes to get. - @return bufattr_t. + * @function buf_attr + * @abstract Gets the attributes for this buf. + * @param bp Buffer whose attributes to get. + * @return bufattr_t. */ bufattr_t buf_attr(buf_t bp); /*! - @function buf_markstatic - @abstract Mark a buffer as being likely to contain static data. - @param bp Buffer to mark. + * @function buf_markstatic + * @abstract Mark a buffer as being likely to contain static data. + * @param bp Buffer to mark. */ - void buf_markstatic(buf_t bp); +void buf_markstatic(buf_t bp); /*! - @function buf_static - @abstract Check if a buffer contains static data. - @param bp Buffer to test. - @return Nonzero if buffer has static data, 0 otherwise. + * @function buf_static + * @abstract Check if a buffer contains static data. + * @param bp Buffer to test. + * @return Nonzero if buffer has static data, 0 otherwise. */ -int buf_static(buf_t bp); +int buf_static(buf_t bp); #ifdef KERNEL_PRIVATE -void buf_setfilter(buf_t, void (*)(buf_t, void *), void *, void (**)(buf_t, void *), void **); +void buf_setfilter(buf_t, void (*)(buf_t, void *), void *, void(**)(buf_t, void *), void **); /* bufattr allocation/duplication/deallocation functions */ bufattr_t bufattr_alloc(void); -bufattr_t bufattr_dup (bufattr_t bap); +bufattr_t bufattr_dup(bufattr_t bap); void bufattr_free(bufattr_t bap); /*! - @function bufattr_cpx - @abstract Returns a pointer to a cpx_t structure. - @param bap Buffer Attribute whose cpx_t structure you wish to get. - @return Returns a cpx_t structure, or NULL if not valid + * @function bufattr_cpx + * @abstract Returns a pointer to a cpx_t structure. + * @param bap Buffer Attribute whose cpx_t structure you wish to get. + * @return Returns a cpx_t structure, or NULL if not valid */ struct cpx *bufattr_cpx(bufattr_t bap); /*! - @function bufattr_setcpx - @abstract Set the cp_ctx on a buffer attribute. - @param bap Buffer Attribute that you wish to change + * @function bufattr_setcpx + * @abstract Set the cp_ctx on a buffer attribute. + * @param bap Buffer Attribute that you wish to change */ void bufattr_setcpx(bufattr_t bap, struct cpx *cpx); /*! - @function bufattr_cpoff - @abstract Gets the file offset on the buffer. - @param bap Buffer Attribute whose file offset value is used + * @function bufattr_cpoff + * @abstract Gets the file offset on the buffer. + * @param bap Buffer Attribute whose file offset value is used */ uint64_t bufattr_cpoff(bufattr_t bap); /*! - @function bufattr_setcpoff - @abstract Set the file offset for a content protected I/O on - a buffer attribute. - @param bap Buffer Attribute whose cp file offset has to be set + * @function bufattr_setcpoff + * @abstract Set the file offset for a content protected I/O on + * a buffer attribute. + * @param bap Buffer Attribute whose cp file offset has to be set */ void bufattr_setcpoff(bufattr_t bap, uint64_t); /*! - @function bufattr_rawencrypted - @abstract Check if a buffer contains raw encrypted data. - @param bap Buffer attribute to test. - @return Nonzero if buffer has raw encrypted data, 0 otherwise. + * @function bufattr_rawencrypted + * @abstract Check if a buffer contains raw encrypted data. + * @param bap Buffer attribute to test. + * @return Nonzero if buffer has raw encrypted data, 0 otherwise. */ int bufattr_rawencrypted(bufattr_t bap); /*! - @function bufattr_markgreedymode - @abstract Mark a buffer to use the greedy mode for writing. - @param bap Buffer attributes to mark. - @discussion Greedy Mode: request improved write performance from the underlying device at the expense of storage efficiency + * @function bufattr_markgreedymode + * @abstract Mark a buffer to use the greedy mode for writing. + * @param bap Buffer attributes to mark. + * @discussion Greedy Mode: request improved write performance from the underlying device at the expense of storage efficiency */ - void bufattr_markgreedymode(bufattr_t bap); +void bufattr_markgreedymode(bufattr_t bap); /*! - @function bufattr_greedymode - @abstract Check if a buffer is written using the Greedy Mode - @param bap Buffer attributes to test. - @discussion Greedy Mode: request improved write performance from the underlying device at the expense of storage efficiency - @return Nonzero if buffer uses greedy mode, 0 otherwise. + * @function bufattr_greedymode + * @abstract Check if a buffer is written using the Greedy Mode + * @param bap Buffer attributes to test. + * @discussion Greedy Mode: request improved write performance from the underlying device at the expense of storage efficiency + * @return Nonzero if buffer uses greedy mode, 0 otherwise. */ -int bufattr_greedymode(bufattr_t bap); +int bufattr_greedymode(bufattr_t bap); /*! - @function bufattr_markisochronous - @abstract Mark a buffer to use the isochronous throughput mode for writing. - @param bap Buffer attributes to mark. - @discussion isochronous mode: request improved write performance from the underlying device at the expense of storage efficiency + * @function bufattr_markisochronous + * @abstract Mark a buffer to use the isochronous throughput mode for writing. + * @param bap Buffer attributes to mark. + * @discussion isochronous mode: request improved write performance from the underlying device at the expense of storage efficiency */ - void bufattr_markisochronous(bufattr_t bap); +void bufattr_markisochronous(bufattr_t bap); - /*! - @function bufattr_isochronous - @abstract Check if a buffer is written using the isochronous - @param bap Buffer attributes to test. - @discussion isochronous mode: request improved write performance from the underlying device at the expense of storage efficiency - @return Nonzero if buffer uses isochronous mode, 0 otherwise. +/*! + * @function bufattr_isochronous + * @abstract Check if a buffer is written using the isochronous + * @param bap Buffer attributes to test. + * @discussion isochronous mode: request improved write performance from the underlying device at the expense of storage efficiency + * @return Nonzero if buffer uses isochronous mode, 0 otherwise. */ -int bufattr_isochronous(bufattr_t bap); +int bufattr_isochronous(bufattr_t bap); /*! - @function bufattr_throttled - @abstract Check if a buffer is throttled. - @param bap Buffer attribute to test. - @return Nonzero if the buffer is throttled, 0 otherwise. + * @function bufattr_throttled + * @abstract Check if a buffer is throttled. + * @param bap Buffer attribute to test. + * @return Nonzero if the buffer is throttled, 0 otherwise. */ int bufattr_throttled(bufattr_t bap); /*! - @function bufattr_passive - @abstract Check if a buffer is marked passive. - @param bap Buffer attribute to test. - @return Nonzero if the buffer is marked passive, 0 otherwise. + * @function bufattr_passive + * @abstract Check if a buffer is marked passive. + * @param bap Buffer attribute to test. + * @return Nonzero if the buffer is marked passive, 0 otherwise. */ int bufattr_passive(bufattr_t bap); /*! - @function bufattr_nocache - @abstract Check if a buffer has nocache attribute. - @param bap Buffer attribute to test. - @return Nonzero if the buffer is not cached, 0 otherwise. + * @function bufattr_nocache + * @abstract Check if a buffer has nocache attribute. + * @param bap Buffer attribute to test. + * @return Nonzero if the buffer is not cached, 0 otherwise. */ int bufattr_nocache(bufattr_t bap); /*! - @function bufattr_meta - @abstract Check if a buffer has the bufattr meta attribute. - @param bap Buffer attribute to test. - @return Nonzero if the buffer has meta attribute, 0 otherwise. + * @function bufattr_meta + * @abstract Check if a buffer has the bufattr meta attribute. + * @param bap Buffer attribute to test. + * @return Nonzero if the buffer has meta attribute, 0 otherwise. */ int bufattr_meta(bufattr_t bap); /*! - @function bufattr_markmeta - @abstract Set the bufattr meta attribute. - @param bap Buffer attribute to manipulate. + * @function bufattr_markmeta + * @abstract Set the bufattr meta attribute. + * @param bap Buffer attribute to manipulate. */ void bufattr_markmeta(bufattr_t bap); /*! - @function bufattr_delayidlesleep - @abstract Check if a buffer is marked to delay idle sleep on disk IO. - @param bap Buffer attribute to test. - @return Nonzero if the buffer is marked to delay idle sleep on disk IO, 0 otherwise. + * @function bufattr_delayidlesleep + * @abstract Check if a buffer is marked to delay idle sleep on disk IO. + * @param bap Buffer attribute to test. + * @return Nonzero if the buffer is marked to delay idle sleep on disk IO, 0 otherwise. */ int bufattr_delayidlesleep(bufattr_t bap); /*! - @function buf_kernel_addrperm_addr - @abstract Obfuscate the buf pointers. - @param addr Buf_t pointer. - @return Obfuscated pointer if addr is non zero, 0 otherwise. + * @function buf_kernel_addrperm_addr + * @abstract Obfuscate the buf pointers. + * @param addr Buf_t pointer. + * @return Obfuscated pointer if addr is non zero, 0 otherwise. */ vm_offset_t buf_kernel_addrperm_addr(void * addr); /*! - @function bufattr_markquickcomplete - @abstract Mark a buffer to hint quick completion to the driver. - @discussion This flag hints the storage driver that some thread is waiting for this I/O to complete. - It should therefore attempt to complete it as soon as possible at the cost of device efficiency. - @param bap Buffer attributes to mark. + * @function bufattr_markquickcomplete + * @abstract Mark a buffer to hint quick completion to the driver. + * @discussion This flag hints the storage driver that some thread is waiting for this I/O to complete. + * It should therefore attempt to complete it as soon as possible at the cost of device efficiency. + * @param bap Buffer attributes to mark. */ void bufattr_markquickcomplete(bufattr_t bap); /*! - @function bufattr_quickcomplete - @abstract Check if a buffer is marked for quick completion - @discussion This flag hints the storage driver that some thread is waiting for this I/O to complete. - It should therefore attempt to complete it as soon as possible at the cost of device efficiency. - @param bap Buffer attribute to test. - @return Nonzero if the buffer is marked for quick completion, 0 otherwise. + * @function bufattr_quickcomplete + * @abstract Check if a buffer is marked for quick completion + * @discussion This flag hints the storage driver that some thread is waiting for this I/O to complete. + * It should therefore attempt to complete it as soon as possible at the cost of device efficiency. + * @param bap Buffer attribute to test. + * @return Nonzero if the buffer is marked for quick completion, 0 otherwise. */ int bufattr_quickcomplete(bufattr_t bap); -int count_lock_queue(void); +int count_lock_queue(void); /* * Flags for buf_acquire */ -#define BAC_NOWAIT 0x01 /* Don't wait if buffer is busy */ -#define BAC_REMOVE 0x02 /* Remove from free list once buffer is acquired */ -#define BAC_SKIP_NONLOCKED 0x04 /* Don't return LOCKED buffers */ -#define BAC_SKIP_LOCKED 0x08 /* Only return LOCKED buffers */ +#define BAC_NOWAIT 0x01 /* Don't wait if buffer is busy */ +#define BAC_REMOVE 0x02 /* Remove from free list once buffer is acquired */ +#define BAC_SKIP_NONLOCKED 0x04 /* Don't return LOCKED buffers */ +#define BAC_SKIP_LOCKED 0x08 /* Only return LOCKED buffers */ -errno_t buf_acquire(buf_t, int, int, int); +errno_t buf_acquire(buf_t, int, int, int); -buf_t buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg); +buf_t buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg); -void buf_drop(buf_t); +void buf_drop(buf_t); #endif /* KERNEL_PRIVATE */ @@ -1199,9 +1199,9 @@ __END_DECLS /* Macros to clear/set/test flags. */ -#define SET(t, f) (t) |= (f) -#define CLR(t, f) (t) &= ~(f) -#define ISSET(t, f) ((t) & (f)) +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) #endif /* !_SYS_BUF_H_ */ diff --git a/bsd/sys/buf_internal.h b/bsd/sys/buf_internal.h index 23c9ecba6..c7b206823 100644 --- a/bsd/sys/buf_internal.h +++ b/bsd/sys/buf_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -67,7 +67,7 @@ */ #ifndef _SYS_BUF_INTERNAL_H_ -#define _SYS_BUF_INTERNAL_H_ +#define _SYS_BUF_INTERNAL_H_ #include @@ -93,60 +93,60 @@ struct bufattr { struct cpx *ba_cpx; uint64_t ba_cp_file_off; #endif - uint64_t ba_flags; /* flags. Some are only in-use on embedded devices */ + uint64_t ba_flags; /* flags. Some are only in-use on embedded devices */ }; /* * The buffer header describes an I/O operation in the kernel. */ struct buf { - LIST_ENTRY(buf) b_hash; /* Hash chain. */ - LIST_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */ - TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */ - int b_timestamp; /* timestamp for queuing operation */ + LIST_ENTRY(buf) b_hash; /* Hash chain. */ + LIST_ENTRY(buf) b_vnbufs; /* Buffer's associated vnode. */ + TAILQ_ENTRY(buf) b_freelist; /* Free list position if not active. */ + int b_timestamp; /* timestamp for queuing operation */ struct timeval b_timestamp_tv; /* microuptime for disk conditioner */ - int b_whichq; /* the free list the buffer belongs to */ - volatile uint32_t b_flags; /* B_* flags. */ - volatile uint32_t b_lflags; /* BL_BUSY | BL_WANTED flags... protected by buf_mtx */ - int b_error; /* errno value. */ - int b_bufsize; /* Allocated buffer size. */ - int b_bcount; /* Valid bytes in buffer. */ - int b_resid; /* Remaining I/O. */ - dev_t b_dev; /* Device associated with buffer. */ - uintptr_t b_datap; /* Memory, superblocks, indirect etc.*/ - daddr64_t b_lblkno; /* Logical block number. */ - daddr64_t b_blkno; /* Underlying physical block number. */ - void (*b_iodone)(buf_t, void *); /* Function to call upon completion. */ - vnode_t b_vp; /* File vnode for data, device vnode for metadata. */ - kauth_cred_t b_rcred; /* Read credentials reference. */ - kauth_cred_t b_wcred; /* Write credentials reference. */ - void * b_upl; /* Pointer to UPL */ - buf_t b_real_bp; /* used to track bp generated through cluster_bp */ - TAILQ_ENTRY(buf) b_act; /* Device driver queue when active */ - void * b_drvdata; /* Device driver private use */ - void * b_fsprivate; /* filesystem private use */ - void * b_transaction; /* journal private use */ - int b_dirtyoff; /* Offset in buffer of dirty region. */ - int b_dirtyend; /* Offset of end of dirty region. */ - int b_validoff; /* Offset in buffer of valid region. */ - int b_validend; /* Offset of end of valid region. */ + int b_whichq; /* the free list the buffer belongs to */ + volatile uint32_t b_flags; /* B_* flags. */ + volatile uint32_t b_lflags; /* BL_BUSY | BL_WANTED flags... protected by buf_mtx */ + int b_error; /* errno value. */ + int b_bufsize; /* Allocated buffer size. */ + int b_bcount; /* Valid bytes in buffer. */ + int b_resid; /* Remaining I/O. */ + dev_t b_dev; /* Device associated with buffer. */ + uintptr_t b_datap; /* Memory, superblocks, indirect etc.*/ + daddr64_t b_lblkno; /* Logical block number. */ + daddr64_t b_blkno; /* Underlying physical block number. */ + void (*b_iodone)(buf_t, void *); /* Function to call upon completion. */ + vnode_t b_vp; /* File vnode for data, device vnode for metadata. */ + kauth_cred_t b_rcred; /* Read credentials reference. */ + kauth_cred_t b_wcred; /* Write credentials reference. */ + void * b_upl; /* Pointer to UPL */ + buf_t b_real_bp; /* used to track bp generated through cluster_bp */ + TAILQ_ENTRY(buf) b_act; /* Device driver queue when active */ + void * b_drvdata; /* Device driver private use */ + void * b_fsprivate; /* filesystem private use */ + void * b_transaction; /* journal private use */ + int b_dirtyoff; /* Offset in buffer of dirty region. */ + int b_dirtyend; /* Offset of end of dirty region. */ + int b_validoff; /* Offset in buffer of valid region. */ + int b_validend; /* Offset of end of valid region. */ /* store extra information related to redundancy of data, such as * which redundancy copy to use, etc */ uint32_t b_redundancy_flags; - proc_t b_proc; /* Associated proc; NULL if kernel. */ + proc_t b_proc; /* Associated proc; NULL if kernel. */ #ifdef BUF_MAKE_PRIVATE buf_t b_data_store; #endif struct bufattr b_attr; #ifdef JOE_DEBUG - void * b_owner; - int b_tag; - void * b_lastbrelse; - void * b_stackbrelse[6]; - void * b_stackgetblk[6]; + void * b_owner; + int b_tag; + void * b_lastbrelse; + void * b_stackbrelse[6]; + void * b_stackgetblk[6]; #endif }; @@ -154,9 +154,9 @@ extern vm_offset_t buf_kernel_addrperm; /* cluster_io definitions for use with io bufs */ #define b_uploffset b_bufsize -#define b_orig b_freelist.tqe_prev +#define b_orig b_freelist.tqe_prev #define b_shadow b_freelist.tqe_next -#define b_shadow_ref b_validoff +#define b_shadow_ref b_validoff #ifdef BUF_MAKE_PRIVATE #define b_data_ref b_validend #endif @@ -169,84 +169,84 @@ extern vm_offset_t buf_kernel_addrperm; * These flags are kept in b_lflags... * buf_mtxp must be held before examining/updating */ -#define BL_BUSY 0x00000001 /* I/O in progress. */ -#define BL_WANTED 0x00000002 /* Process wants this buffer. */ -#define BL_IOBUF 0x00000004 /* buffer allocated via 'buf_alloc' */ -#define BL_WANTDEALLOC 0x00000010 /* buffer should be put on empty list when clean */ -#define BL_SHADOW 0x00000020 -#define BL_EXTERNAL 0x00000040 -#define BL_WAITSHADOW 0x00000080 -#define BL_IOBUF_ALLOC 0x00000100 -#define BL_WANTED_REF 0x00000200 -#define BL_IOBUF_VDEV 0x00000400 /* iobuf was for a diskimage */ +#define BL_BUSY 0x00000001 /* I/O in progress. */ +#define BL_WANTED 0x00000002 /* Process wants this buffer. */ +#define BL_IOBUF 0x00000004 /* buffer allocated via 'buf_alloc' */ +#define BL_WANTDEALLOC 0x00000010 /* buffer should be put on empty list when clean */ +#define BL_SHADOW 0x00000020 +#define BL_EXTERNAL 0x00000040 +#define BL_WAITSHADOW 0x00000080 +#define BL_IOBUF_ALLOC 0x00000100 +#define BL_WANTED_REF 0x00000200 +#define BL_IOBUF_VDEV 0x00000400 /* iobuf was for a diskimage */ /* - * Parameters for buffer cache garbage collection + * Parameters for buffer cache garbage collection */ -#define BUF_STALE_THRESHHOLD 30 /* Collect if untouched in the last 30 seconds */ -#define BUF_MAX_GC_BATCH_SIZE 64 /* Under a single grab of the lock */ +#define BUF_STALE_THRESHHOLD 30 /* Collect if untouched in the last 30 seconds */ +#define BUF_MAX_GC_BATCH_SIZE 64 /* Under a single grab of the lock */ /* * mask used by buf_flags... these are the readable external flags */ #define BUF_X_RDFLAGS (B_PHYS | B_RAW | B_LOCKED | B_ASYNC | B_READ | B_WRITE | B_PAGEIO |\ - B_META | B_CLUSTER | B_DELWRI | B_FUA | B_PASSIVE | B_IOSTREAMING |\ - B_ENCRYPTED_IO | B_STATICCONTENT) + B_META | B_CLUSTER | B_DELWRI | B_FUA | B_PASSIVE | B_IOSTREAMING |\ + B_ENCRYPTED_IO | B_STATICCONTENT) /* * mask used by buf_clearflags/buf_setflags... these are the writable external flags */ #define BUF_X_WRFLAGS (B_PHYS | B_RAW | B_LOCKED | B_ASYNC | B_READ | B_WRITE | B_PAGEIO |\ - B_NOCACHE | B_FUA | B_PASSIVE | B_IOSTREAMING) + B_NOCACHE | B_FUA | B_PASSIVE | B_IOSTREAMING) #if 0 /* b_flags defined in buf.h */ -#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ -#define B_READ 0x00000001 /* Read buffer. */ -#define B_ASYNC 0x00000002 /* Start I/O, do not wait. */ -#define B_NOCACHE 0x00000004 /* Do not cache block after use. */ -#define B_DELWRI 0x00000008 /* Delay I/O until buffer reused. */ -#define B_LOCKED 0x00000010 /* Locked in core (not reusable). */ -#define B_PHYS 0x00000020 /* I/O to user memory. */ -#define B_CLUSTER 0x00000040 /* UPL based I/O generated by cluster layer */ -#define B_PAGEIO 0x00000080 /* Page in/out */ -#define B_META 0x00000100 /* buffer contains meta-data. */ -#define B_RAW 0x00000200 /* Set by physio for raw transfers. */ -#define B_FUA 0x00000400 /* Write-through disk cache(if supported) */ -#define B_PASSIVE 0x00000800 /* PASSIVE I/Os are ignored by THROTTLE I/O */ -#define B_IOSTREAMING 0x00001000 /* sequential access pattern detected */ -#define B_ENCRYPTED_IO 0x00004000 /* Encrypted I/O */ -#define B_STATICCONTENT 0x00008000 /* Buffer is likely to remain unaltered */ +#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define B_READ 0x00000001 /* Read buffer. */ +#define B_ASYNC 0x00000002 /* Start I/O, do not wait. */ +#define B_NOCACHE 0x00000004 /* Do not cache block after use. */ +#define B_DELWRI 0x00000008 /* Delay I/O until buffer reused. */ +#define B_LOCKED 0x00000010 /* Locked in core (not reusable). */ +#define B_PHYS 0x00000020 /* I/O to user memory. */ +#define B_CLUSTER 0x00000040 /* UPL based I/O generated by cluster layer */ +#define B_PAGEIO 0x00000080 /* Page in/out */ +#define B_META 0x00000100 /* buffer contains meta-data. */ +#define B_RAW 0x00000200 /* Set by physio for raw transfers. */ +#define B_FUA 0x00000400 /* Write-through disk cache(if supported) */ +#define B_PASSIVE 0x00000800 /* PASSIVE I/Os are ignored by THROTTLE I/O */ +#define B_IOSTREAMING 0x00001000 /* sequential access pattern detected */ +#define B_ENCRYPTED_IO 0x00004000 /* Encrypted I/O */ +#define B_STATICCONTENT 0x00008000 /* Buffer is likely to remain unaltered */ #endif /* * These flags are kept in b_flags... access is lockless * External flags are defined in buf.h and cannot overlap * the internal flags - * + * * these flags are internal... there definition may change */ -#define B_CACHE 0x00010000 /* getblk found us in the cache. */ -#define B_DONE 0x00020000 /* I/O completed. */ -#define B_INVAL 0x00040000 /* Does not contain valid info. */ -#define B_ERROR 0x00080000 /* I/O error occurred. */ -#define B_EINTR 0x00100000 /* I/O was interrupted */ -#define B_AGE 0x00200000 /* Move to age queue when I/O done. */ -#define B_FILTER 0x00400000 /* call b_iodone from biodone as an in-line filter */ -#define B_CALL 0x00800000 /* Call b_iodone from biodone, assumes b_iodone consumes bp */ -#define B_EOT 0x01000000 /* last buffer in a transaction list created by cluster_io */ -#define B_WASDIRTY 0x02000000 /* page was found dirty in the VM cache */ -#define B_HDRALLOC 0x04000000 /* zone allocated buffer header */ -#define B_ZALLOC 0x08000000 /* b_datap is zalloc()ed */ +#define B_CACHE 0x00010000 /* getblk found us in the cache. */ +#define B_DONE 0x00020000 /* I/O completed. */ +#define B_INVAL 0x00040000 /* Does not contain valid info. */ +#define B_ERROR 0x00080000 /* I/O error occurred. */ +#define B_EINTR 0x00100000 /* I/O was interrupted */ +#define B_AGE 0x00200000 /* Move to age queue when I/O done. */ +#define B_FILTER 0x00400000 /* call b_iodone from biodone as an in-line filter */ +#define B_CALL 0x00800000 /* Call b_iodone from biodone, assumes b_iodone consumes bp */ +#define B_EOT 0x01000000 /* last buffer in a transaction list created by cluster_io */ +#define B_WASDIRTY 0x02000000 /* page was found dirty in the VM cache */ +#define B_HDRALLOC 0x04000000 /* zone allocated buffer header */ +#define B_ZALLOC 0x08000000 /* b_datap is zalloc()ed */ /* * private flags used by by the cluster layer */ -#define B_COMMIT_UPL 0x40000000 /* commit/abort the UPL on I/O success/failure */ -#define B_TDONE 0x80000000 /* buf_t that is part of a cluster level transaction has completed */ +#define B_COMMIT_UPL 0x40000000 /* commit/abort the UPL on I/O success/failure */ +#define B_TDONE 0x80000000 /* buf_t that is part of a cluster level transaction has completed */ /* Flags to low-level allocation routines. */ -#define B_CLRBUF 0x01 /* Request allocated buffer be cleared. */ -#define B_SYNC 0x02 /* Do all allocations synchronously. */ -#define B_NOBUFF 0x04 /* Do not allocate struct buf */ +#define B_CLRBUF 0x01 /* Request allocated buffer be cleared. */ +#define B_SYNC 0x02 /* Do all allocations synchronously. */ +#define B_NOBUFF 0x04 /* Do not allocate struct buf */ /* * ba_flags (Buffer Attribute flags) @@ -254,77 +254,77 @@ extern vm_offset_t buf_kernel_addrperm; */ #define BA_RAW_ENCRYPTED_IO 0x00000001 #define BA_THROTTLED_IO 0x00000002 -#define BA_DELAYIDLESLEEP 0x00000004 /* Process is marked to delay idle sleep on disk IO */ -#define BA_NOCACHE 0x00000008 -#define BA_META 0x00000010 -#define BA_GREEDY_MODE 0x00000020 /* High speed writes that consume more storage */ -#define BA_QUICK_COMPLETE 0x00000040 /* Request quick completion at expense of storage efficiency */ -#define BA_PASSIVE 0x00000080 +#define BA_DELAYIDLESLEEP 0x00000004 /* Process is marked to delay idle sleep on disk IO */ +#define BA_NOCACHE 0x00000008 +#define BA_META 0x00000010 +#define BA_GREEDY_MODE 0x00000020 /* High speed writes that consume more storage */ +#define BA_QUICK_COMPLETE 0x00000040 /* Request quick completion at expense of storage efficiency */ +#define BA_PASSIVE 0x00000080 /* * Note: IO_TIERs consume 0x0100, 0x0200, 0x0400, 0x0800 * These are now in-use by the I/O tiering system. - */ -#define BA_IO_TIER_MASK 0x00000f00 -#define BA_IO_TIER_SHIFT 8 + */ +#define BA_IO_TIER_MASK 0x00000f00 +#define BA_IO_TIER_SHIFT 8 -#define BA_ISOCHRONOUS 0x00001000 /* device specific isochronous throughput to media */ +#define BA_ISOCHRONOUS 0x00001000 /* device specific isochronous throughput to media */ #define BA_STRATEGY_TRACKED_IO 0x00002000 /* tracked by spec_strategy */ #define BA_IO_TIER_UPGRADE 0x00004000 /* effective I/O tier is higher than BA_IO_TIER */ -#define GET_BUFATTR_IO_TIER(bap) ((bap->ba_flags & BA_IO_TIER_MASK) >> BA_IO_TIER_SHIFT) -#define SET_BUFATTR_IO_TIER(bap, tier) \ -do { \ - (bap)->ba_flags &= (~BA_IO_TIER_MASK); \ - (bap)->ba_flags |= (((tier) << BA_IO_TIER_SHIFT) & BA_IO_TIER_MASK); \ +#define GET_BUFATTR_IO_TIER(bap) ((bap->ba_flags & BA_IO_TIER_MASK) >> BA_IO_TIER_SHIFT) +#define SET_BUFATTR_IO_TIER(bap, tier) \ +do { \ + (bap)->ba_flags &= (~BA_IO_TIER_MASK); \ + (bap)->ba_flags |= (((tier) << BA_IO_TIER_SHIFT) & BA_IO_TIER_MASK); \ } while(0) -extern int niobuf_headers; /* The number of IO buffer headers for cluster IO */ -extern int nbuf_headers; /* The number of buffer headers */ -extern int max_nbuf_headers; /* The max number of buffer headers */ -extern int nbuf_hashelements; /* The number of elements in bufhash */ -extern struct buf *buf_headers; /* The buffer headers. */ +extern int niobuf_headers; /* The number of IO buffer headers for cluster IO */ +extern int nbuf_headers; /* The number of buffer headers */ +extern int max_nbuf_headers; /* The max number of buffer headers */ +extern int nbuf_hashelements; /* The number of elements in bufhash */ +extern struct buf *buf_headers; /* The buffer headers. */ /* * Definitions for the buffer free lists. */ -#define BQUEUES 6 /* number of free buffer queues */ +#define BQUEUES 6 /* number of free buffer queues */ -#define BQ_LOCKED 0 /* super-blocks &c */ -#define BQ_LRU 1 /* lru, useful buffers */ -#define BQ_AGE 2 /* rubbish */ -#define BQ_EMPTY 3 /* buffer headers with no memory */ -#define BQ_META 4 /* buffer containing metadata */ -#define BQ_LAUNDRY 5 /* buffers that need cleaning */ +#define BQ_LOCKED 0 /* super-blocks &c */ +#define BQ_LRU 1 /* lru, useful buffers */ +#define BQ_AGE 2 /* rubbish */ +#define BQ_EMPTY 3 /* buffer headers with no memory */ +#define BQ_META 4 /* buffer containing metadata */ +#define BQ_LAUNDRY 5 /* buffers that need cleaning */ __BEGIN_DECLS -buf_t alloc_io_buf(vnode_t, int); -void free_io_buf(buf_t); +buf_t alloc_io_buf(vnode_t, int); +void free_io_buf(buf_t); -int allocbuf(struct buf *, int); -void bufinit(void); +int allocbuf(struct buf *, int); +void bufinit(void); -void buf_list_lock(void); -void buf_list_unlock(void); +void buf_list_lock(void); +void buf_list_unlock(void); -void cluster_init(void); +void cluster_init(void); -int count_busy_buffers(void); +int count_busy_buffers(void); -int buf_flushdirtyblks_skipinfo (vnode_t, int, int, const char *); -void buf_wait_for_shadow_io (vnode_t, daddr64_t); +int buf_flushdirtyblks_skipinfo(vnode_t, int, int, const char *); +void buf_wait_for_shadow_io(vnode_t, daddr64_t); #ifdef BUF_MAKE_PRIVATE -errno_t buf_make_private(buf_t bp); +errno_t buf_make_private(buf_t bp); #endif #ifdef CONFIG_PROTECT -void buf_setcpoff (buf_t, uint64_t); +void buf_setcpoff(buf_t, uint64_t); #endif __END_DECLS @@ -334,17 +334,17 @@ __END_DECLS * Stats on usefulness of the buffer cache */ struct bufstats { - long bufs_incore; /* found incore */ - long bufs_busyincore; /* found incore. was busy */ - long bufs_vmhits; /* not incore. found in VM */ - long bufs_miss; /* not incore. not in VM */ - long bufs_sleeps; /* buffer starvation */ - long bufs_eblk; /* Calls to geteblk */ - long bufs_iobufmax; /* Max. number of IO buffers used */ - long bufs_iobufinuse; /* number of IO buffers in use */ - long bufs_iobufsleeps; /* IO buffer starvation */ - long bufs_iobufinuse_vdev; /* number of IO buffers in use by - diskimages */ + long bufs_incore; /* found incore */ + long bufs_busyincore; /* found incore. was busy */ + long bufs_vmhits; /* not incore. found in VM */ + long bufs_miss; /* not incore. not in VM */ + long bufs_sleeps; /* buffer starvation */ + long bufs_eblk; /* Calls to geteblk */ + long bufs_iobufmax; /* Max. number of IO buffers used */ + long bufs_iobufinuse; /* number of IO buffers in use */ + long bufs_iobufsleeps; /* IO buffer starvation */ + long bufs_iobufinuse_vdev; /* number of IO buffers in use by + * diskimages */ }; #endif /* KERNEL */ diff --git a/bsd/sys/callout.h b/bsd/sys/callout.h index cb4e17d78..df833aa3b 100644 --- a/bsd/sys/callout.h +++ b/bsd/sys/callout.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -74,13 +74,13 @@ #ifdef __APPLE_API_OBSOLETE -#define CALLOUT_PRI_SOFTINT0 0 -#define CALLOUT_PRI_SOFTINT1 1 -#define CALLOUT_PRI_RETRACE 2 -#define CALLOUT_PRI_DSP 3 -#define CALLOUT_PRI_THREAD 4 /* run in a thread */ -#define CALLOUT_PRI_NOW 5 /* must be last */ -#define N_CALLOUT_PRI 6 +#define CALLOUT_PRI_SOFTINT0 0 +#define CALLOUT_PRI_SOFTINT1 1 +#define CALLOUT_PRI_RETRACE 2 +#define CALLOUT_PRI_DSP 3 +#define CALLOUT_PRI_THREAD 4 /* run in a thread */ +#define CALLOUT_PRI_NOW 5 /* must be last */ +#define N_CALLOUT_PRI 6 #endif /* __APPLE_API_OBSOLETE */ #endif /* _SYS_CALLOUT_H_ */ diff --git a/bsd/sys/cdefs.h b/bsd/sys/cdefs.h index a7f6639e0..bb42543df 100644 --- a/bsd/sys/cdefs.h +++ b/bsd/sys/cdefs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright 1995 NeXT Computer, Inc. All rights reserved. */ @@ -64,15 +64,15 @@ * @(#)cdefs.h 8.8 (Berkeley) 1/9/95 */ -#ifndef _CDEFS_H_ -#define _CDEFS_H_ +#ifndef _CDEFS_H_ +#define _CDEFS_H_ #if defined(__cplusplus) -#define __BEGIN_DECLS extern "C" { -#define __END_DECLS } +#define __BEGIN_DECLS extern "C" { +#define __END_DECLS } #else -#define __BEGIN_DECLS -#define __END_DECLS +#define __BEGIN_DECLS +#define __END_DECLS #endif /* This SDK is designed to work with clang and specific versions of @@ -109,32 +109,32 @@ * strings produced by the __STRING macro, but this only works with ANSI C. */ #if defined(__STDC__) || defined(__cplusplus) -#define __P(protos) protos /* full-blown ANSI C */ -#define __CONCAT(x,y) x ## y -#define __STRING(x) #x +#define __P(protos) protos /* full-blown ANSI C */ +#define __CONCAT(x, y) x ## y +#define __STRING(x) #x -#define __const const /* define reserved names to standard */ -#define __signed signed -#define __volatile volatile +#define __const const /* define reserved names to standard */ +#define __signed signed +#define __volatile volatile #if defined(__cplusplus) -#define __inline inline /* convert to C++ keyword */ +#define __inline inline /* convert to C++ keyword */ #else #ifndef __GNUC__ -#define __inline /* delete GCC keyword */ +#define __inline /* delete GCC keyword */ #endif /* !__GNUC__ */ #endif /* !__cplusplus */ -#else /* !(__STDC__ || __cplusplus) */ -#define __P(protos) () /* traditional C preprocessor */ -#define __CONCAT(x,y) x/**/y -#define __STRING(x) "x" +#else /* !(__STDC__ || __cplusplus) */ +#define __P(protos) () /* traditional C preprocessor */ +#define __CONCAT(x, y) x /**/ y +#define __STRING(x) "x" #ifndef __GNUC__ -#define __const /* delete pseudo-ANSI C keywords */ -#define __inline -#define __signed -#define __volatile -#endif /* !__GNUC__ */ +#define __const /* delete pseudo-ANSI C keywords */ +#define __inline +#define __signed +#define __volatile +#endif /* !__GNUC__ */ /* * In non-ANSI C environments, new programs will want ANSI-only C keywords @@ -144,26 +144,26 @@ * When using "gcc -traditional", we assume that this is the intent; if * __GNUC__ is defined but __STDC__ is not, we leave the new keywords alone. */ -#ifndef NO_ANSI_KEYWORDS -#define const __const /* convert ANSI C keywords */ -#define inline __inline -#define signed __signed -#define volatile __volatile +#ifndef NO_ANSI_KEYWORDS +#define const __const /* convert ANSI C keywords */ +#define inline __inline +#define signed __signed +#define volatile __volatile #endif /* !NO_ANSI_KEYWORDS */ #endif /* !(__STDC__ || __cplusplus) */ -#define __dead2 __attribute__((noreturn)) -#define __pure2 __attribute__((const)) +#define __dead2 __attribute__((noreturn)) +#define __pure2 __attribute__((const)) /* __unused denotes variables and functions that may not be used, preventing * the compiler from warning about it if not used. */ -#define __unused __attribute__((unused)) +#define __unused __attribute__((unused)) /* __used forces variables and functions to be included even if it appears * to the compiler that they are not used (and would thust be discarded). */ -#define __used __attribute__((used)) +#define __used __attribute__((used)) /* __deprecated causes the compiler to produce a warning when encountering * code using the deprecated functionality. @@ -173,10 +173,10 @@ * __deprecated_enum_msg() should be used on enums, and compilers that support * it will print the deprecation warning. */ -#define __deprecated __attribute__((deprecated)) +#define __deprecated __attribute__((deprecated)) #if __has_extension(attribute_deprecated_with_message) || \ - (defined(__GNUC__) && ((__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)))) + (defined(__GNUC__) && ((__GNUC__ >= 5) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)))) #define __deprecated_msg(_msg) __attribute__((deprecated(_msg))) #else #define __deprecated_msg(_msg) __attribute__((deprecated)) @@ -191,12 +191,12 @@ /* __unavailable causes the compiler to error out when encountering * code using the tagged function of variable. */ -#define __unavailable __attribute__((unavailable)) +#define __unavailable __attribute__((unavailable)) /* Delete pseudo-keywords wherever they are not available or needed. */ #ifndef __dead -#define __dead -#define __pure +#define __dead +#define __pure #endif /* @@ -206,7 +206,7 @@ #if __STDC_VERSION__ < 199901 #define __restrict #else -#define __restrict restrict +#define __restrict restrict #endif /* Compatibility with compilers and environments that don't support the @@ -239,7 +239,7 @@ * optimization inside the marked function. */ #if __has_attribute(disable_tail_calls) -#define __disable_tail_calls __attribute__((__disable_tail_calls__)) +#define __disable_tail_calls __attribute__((__disable_tail_calls__)) #else #define __disable_tail_calls #endif @@ -251,7 +251,7 @@ * "always_inline" cannot be marked as __not_tail_called. */ #if __has_attribute(not_tail_called) -#define __not_tail_called __attribute__((__not_tail_called__)) +#define __not_tail_called __attribute__((__not_tail_called__)) #else #define __not_tail_called #endif @@ -272,7 +272,7 @@ * unavailable in Swift, regardless of any other availability in C. */ #if __has_feature(attribute_availability_swift) -#define __swift_unavailable(_msg) __attribute__((__availability__(swift, unavailable, message=_msg))) +#define __swift_unavailable(_msg) __attribute__((__availability__(swift, unavailable, message=_msg))) #else #define __swift_unavailable(_msg) #endif @@ -294,27 +294,27 @@ */ #if defined(__cplusplus) || \ - (__STDC_VERSION__ >= 199901L && \ - !defined(__GNUC_GNU_INLINE__) && \ - (!defined(__GNUC__) || defined(__clang__))) + (__STDC_VERSION__ >= 199901L && \ + !defined(__GNUC_GNU_INLINE__) && \ + (!defined(__GNUC__) || defined(__clang__))) # define __header_inline inline #elif defined(__GNUC__) && defined(__GNUC_STDC_INLINE__) # define __header_inline extern __inline __attribute__((__gnu_inline__)) #elif defined(__GNUC__) # define __header_inline extern __inline #else - /* If we land here, we've encountered an unsupported compiler, - * so hopefully it understands static __inline as a fallback. - */ +/* If we land here, we've encountered an unsupported compiler, + * so hopefully it understands static __inline as a fallback. + */ # define __header_inline static __inline #endif #ifdef __GNUC__ # define __header_always_inline __header_inline __attribute__ ((__always_inline__)) #else - /* Unfortunately, we're using a compiler that we don't know how to force to - * inline. Oh well. - */ +/* Unfortunately, we're using a compiler that we don't know how to force to + * inline. Oh well. + */ # define __header_always_inline __header_inline #endif @@ -324,16 +324,16 @@ */ #if defined(__clang__) # define __unreachable_ok_push \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wunreachable-code\"") + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wunreachable-code\"") # define __unreachable_ok_pop \ - _Pragma("clang diagnostic pop") + _Pragma("clang diagnostic pop") #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) # define __unreachable_ok_push \ - _Pragma("GCC diagnostic push") \ - _Pragma("GCC diagnostic ignored \"-Wunreachable-code\"") + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wunreachable-code\"") # define __unreachable_ok_pop \ - _Pragma("GCC diagnostic pop") + _Pragma("GCC diagnostic pop") #else # define __unreachable_ok_push # define __unreachable_ok_pop @@ -348,13 +348,13 @@ * types. */ #define __printflike(fmtarg, firstvararg) \ - __attribute__((__format__ (__printf__, fmtarg, firstvararg))) + __attribute__((__format__ (__printf__, fmtarg, firstvararg))) #define __printf0like(fmtarg, firstvararg) \ - __attribute__((__format__ (__printf0__, fmtarg, firstvararg))) + __attribute__((__format__ (__printf0__, fmtarg, firstvararg))) #define __scanflike(fmtarg, firstvararg) \ - __attribute__((__format__ (__scanf__, fmtarg, firstvararg))) + __attribute__((__format__ (__scanf__, fmtarg, firstvararg))) -#define __IDSTRING(name,string) static const char name[] __used = string +#define __IDSTRING(name, string) static const char name[] __used = string #ifndef __COPYRIGHT #define __COPYRIGHT(s) __IDSTRING(copyright,s) @@ -374,19 +374,19 @@ /* Source compatibility only, ID string not emitted in object file */ #ifndef __FBSDID -#define __FBSDID(s) +#define __FBSDID(s) #endif -#ifndef __DECONST -#define __DECONST(type, var) __CAST_AWAY_QUALIFIER(var, const, type) +#ifndef __DECONST +#define __DECONST(type, var) __CAST_AWAY_QUALIFIER(var, const, type) #endif -#ifndef __DEVOLATILE -#define __DEVOLATILE(type, var) __CAST_AWAY_QUALIFIER(var, volatile, type) +#ifndef __DEVOLATILE +#define __DEVOLATILE(type, var) __CAST_AWAY_QUALIFIER(var, volatile, type) #endif -#ifndef __DEQUALIFY -#define __DEQUALIFY(type, var) __CAST_AWAY_QUALIFIER(var, const volatile, type) +#ifndef __DEQUALIFY +#define __DEQUALIFY(type, var) __CAST_AWAY_QUALIFIER(var, const volatile, type) #endif /* @@ -449,87 +449,87 @@ /* These settings are particular to each product. */ #ifdef KERNEL -#define __DARWIN_ONLY_64_BIT_INO_T 0 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 0 -#define __DARWIN_ONLY_VERS_1050 0 +#define __DARWIN_ONLY_64_BIT_INO_T 0 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 0 +#define __DARWIN_ONLY_VERS_1050 0 #if defined(__x86_64__) -#define __DARWIN_SUF_DARWIN14 "_darwin14" -#define __DARWIN14_ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_DARWIN14) +#define __DARWIN_SUF_DARWIN14 "_darwin14" +#define __DARWIN14_ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_DARWIN14) #else -#define __DARWIN14_ALIAS(sym) +#define __DARWIN14_ALIAS(sym) #endif #else /* !KERNEL */ #ifdef PLATFORM_iPhoneOS /* Platform: iPhoneOS */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_iPhoneOS */ #ifdef PLATFORM_iPhoneSimulator /* Platform: iPhoneSimulator */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_iPhoneSimulator */ #ifdef PLATFORM_tvOS /* Platform: tvOS */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_tvOS */ #ifdef PLATFORM_AppleTVOS /* Platform: AppleTVOS */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_AppleTVOS */ #ifdef PLATFORM_tvSimulator /* Platform: tvSimulator */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_tvSimulator */ #ifdef PLATFORM_AppleTVSimulator /* Platform: AppleTVSimulator */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_AppleTVSimulator */ #ifdef PLATFORM_iPhoneOSNano /* Platform: iPhoneOSNano */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_iPhoneOSNano */ #ifdef PLATFORM_iPhoneNanoSimulator /* Platform: iPhoneNanoSimulator */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_iPhoneNanoSimulator */ #ifdef PLATFORM_WatchOS /* Platform: WatchOS */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_WatchOS */ #ifdef PLATFORM_WatchSimulator /* Platform: WatchSimulator */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_WatchSimulator */ #ifdef PLATFORM_BridgeOS /* Platform: BridgeOS */ -#define __DARWIN_ONLY_64_BIT_INO_T 1 -#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 -#define __DARWIN_ONLY_VERS_1050 1 +#define __DARWIN_ONLY_64_BIT_INO_T 1 +#define __DARWIN_ONLY_UNIX_CONFORMANCE 1 +#define __DARWIN_ONLY_VERS_1050 1 #endif /* PLATFORM_BridgeOS */ #ifdef PLATFORM_MacOSX /* Platform: MacOSX */ -#define __DARWIN_ONLY_64_BIT_INO_T 0 +#define __DARWIN_ONLY_64_BIT_INO_T 0 /* #undef __DARWIN_ONLY_UNIX_CONFORMANCE (automatically set for 64-bit) */ -#define __DARWIN_ONLY_VERS_1050 0 +#define __DARWIN_ONLY_VERS_1050 0 #endif /* PLATFORM_MacOSX */ #endif /* KERNEL */ @@ -561,26 +561,26 @@ #if !defined(__DARWIN_UNIX03) # if defined(KERNEL) -# define __DARWIN_UNIX03 0 +# define __DARWIN_UNIX03 0 # elif __DARWIN_ONLY_UNIX_CONFORMANCE # if defined(_NONSTD_SOURCE) # error "Can't define _NONSTD_SOURCE when only UNIX conformance is available." # endif /* _NONSTD_SOURCE */ -# define __DARWIN_UNIX03 1 -# elif defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) < 1040) -# define __DARWIN_UNIX03 0 +# define __DARWIN_UNIX03 1 +# elif defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0) < 1040) +# define __DARWIN_UNIX03 0 # elif defined(_DARWIN_C_SOURCE) || defined(_XOPEN_SOURCE) || defined(_POSIX_C_SOURCE) # if defined(_NONSTD_SOURCE) # error "Can't define both _NONSTD_SOURCE and any of _DARWIN_C_SOURCE, _XOPEN_SOURCE or _POSIX_C_SOURCE." # endif /* _NONSTD_SOURCE */ -# define __DARWIN_UNIX03 1 +# define __DARWIN_UNIX03 1 # elif defined(_NONSTD_SOURCE) -# define __DARWIN_UNIX03 0 +# define __DARWIN_UNIX03 0 # else /* default */ -# if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) < 1050) -# define __DARWIN_UNIX03 0 +# if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0) < 1050) +# define __DARWIN_UNIX03 0 # else /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050 */ -# define __DARWIN_UNIX03 1 +# define __DARWIN_UNIX03 1 # endif /* __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050 */ # endif /* _DARWIN_C_SOURCE || _XOPEN_SOURCE || _POSIX_C_SOURCE || __LP64__ */ #endif /* !__DARWIN_UNIX03 */ @@ -601,7 +601,7 @@ # else /* default */ # if __DARWIN_ONLY_64_BIT_INO_T # define __DARWIN_64_BIT_INO_T 1 -# elif defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) < 1060) || __DARWIN_UNIX03 == 0 +# elif defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0) < 1060) || __DARWIN_UNIX03 == 0 # define __DARWIN_64_BIT_INO_T 0 # else /* default */ # define __DARWIN_64_BIT_INO_T 1 @@ -614,7 +614,7 @@ # define __DARWIN_VERS_1050 0 # elif __DARWIN_ONLY_VERS_1050 # define __DARWIN_VERS_1050 1 -# elif defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__-0) < 1050) || __DARWIN_UNIX03 == 0 +# elif defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && ((__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0) < 1050) || __DARWIN_UNIX03 == 0 # define __DARWIN_VERS_1050 0 # else /* default */ # define __DARWIN_VERS_1050 1 @@ -634,63 +634,63 @@ */ #if __DARWIN_UNIX03 # if __DARWIN_ONLY_UNIX_CONFORMANCE -# define __DARWIN_SUF_UNIX03 /* nothing */ +# define __DARWIN_SUF_UNIX03 /* nothing */ # else /* !__DARWIN_ONLY_UNIX_CONFORMANCE */ -# define __DARWIN_SUF_UNIX03 "$UNIX2003" +# define __DARWIN_SUF_UNIX03 "$UNIX2003" # endif /* __DARWIN_ONLY_UNIX_CONFORMANCE */ # if __DARWIN_64_BIT_INO_T # if __DARWIN_ONLY_64_BIT_INO_T -# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ +# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ # else /* !__DARWIN_ONLY_64_BIT_INO_T */ -# define __DARWIN_SUF_64_BIT_INO_T "$INODE64" +# define __DARWIN_SUF_64_BIT_INO_T "$INODE64" # endif /* __DARWIN_ONLY_64_BIT_INO_T */ # else /* !__DARWIN_64_BIT_INO_T */ -# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ +# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ # endif /* __DARWIN_64_BIT_INO_T */ # if __DARWIN_VERS_1050 # if __DARWIN_ONLY_VERS_1050 -# define __DARWIN_SUF_1050 /* nothing */ +# define __DARWIN_SUF_1050 /* nothing */ # else /* !__DARWIN_ONLY_VERS_1050 */ -# define __DARWIN_SUF_1050 "$1050" +# define __DARWIN_SUF_1050 "$1050" # endif /* __DARWIN_ONLY_VERS_1050 */ # else /* !__DARWIN_VERS_1050 */ -# define __DARWIN_SUF_1050 /* nothing */ +# define __DARWIN_SUF_1050 /* nothing */ # endif /* __DARWIN_VERS_1050 */ # if __DARWIN_NON_CANCELABLE -# define __DARWIN_SUF_NON_CANCELABLE "$NOCANCEL" +# define __DARWIN_SUF_NON_CANCELABLE "$NOCANCEL" # else /* !__DARWIN_NON_CANCELABLE */ -# define __DARWIN_SUF_NON_CANCELABLE /* nothing */ +# define __DARWIN_SUF_NON_CANCELABLE /* nothing */ # endif /* __DARWIN_NON_CANCELABLE */ #else /* !__DARWIN_UNIX03 */ -# define __DARWIN_SUF_UNIX03 /* nothing */ -# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ -# define __DARWIN_SUF_NON_CANCELABLE /* nothing */ -# define __DARWIN_SUF_1050 /* nothing */ +# define __DARWIN_SUF_UNIX03 /* nothing */ +# define __DARWIN_SUF_64_BIT_INO_T /* nothing */ +# define __DARWIN_SUF_NON_CANCELABLE /* nothing */ +# define __DARWIN_SUF_1050 /* nothing */ #endif /* __DARWIN_UNIX03 */ -#define __DARWIN_SUF_EXTSN "$DARWIN_EXTSN" +#define __DARWIN_SUF_EXTSN "$DARWIN_EXTSN" /* * symbol versioning macros */ -#define __DARWIN_ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_UNIX03) -#define __DARWIN_ALIAS_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_NON_CANCELABLE __DARWIN_SUF_UNIX03) -#define __DARWIN_ALIAS_I(sym) __asm("_" __STRING(sym) __DARWIN_SUF_64_BIT_INO_T __DARWIN_SUF_UNIX03) -#define __DARWIN_NOCANCEL(sym) __asm("_" __STRING(sym) __DARWIN_SUF_NON_CANCELABLE) -#define __DARWIN_INODE64(sym) __asm("_" __STRING(sym) __DARWIN_SUF_64_BIT_INO_T) +#define __DARWIN_ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_UNIX03) +#define __DARWIN_ALIAS_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_NON_CANCELABLE __DARWIN_SUF_UNIX03) +#define __DARWIN_ALIAS_I(sym) __asm("_" __STRING(sym) __DARWIN_SUF_64_BIT_INO_T __DARWIN_SUF_UNIX03) +#define __DARWIN_NOCANCEL(sym) __asm("_" __STRING(sym) __DARWIN_SUF_NON_CANCELABLE) +#define __DARWIN_INODE64(sym) __asm("_" __STRING(sym) __DARWIN_SUF_64_BIT_INO_T) -#define __DARWIN_1050(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050) -#define __DARWIN_1050ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_UNIX03) -#define __DARWIN_1050ALIAS_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_NON_CANCELABLE __DARWIN_SUF_UNIX03) -#define __DARWIN_1050ALIAS_I(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_64_BIT_INO_T __DARWIN_SUF_UNIX03) -#define __DARWIN_1050INODE64(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_64_BIT_INO_T) +#define __DARWIN_1050(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050) +#define __DARWIN_1050ALIAS(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_UNIX03) +#define __DARWIN_1050ALIAS_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_NON_CANCELABLE __DARWIN_SUF_UNIX03) +#define __DARWIN_1050ALIAS_I(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_64_BIT_INO_T __DARWIN_SUF_UNIX03) +#define __DARWIN_1050INODE64(sym) __asm("_" __STRING(sym) __DARWIN_SUF_1050 __DARWIN_SUF_64_BIT_INO_T) -#define __DARWIN_EXTSN(sym) __asm("_" __STRING(sym) __DARWIN_SUF_EXTSN) -#define __DARWIN_EXTSN_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_EXTSN __DARWIN_SUF_NON_CANCELABLE) +#define __DARWIN_EXTSN(sym) __asm("_" __STRING(sym) __DARWIN_SUF_EXTSN) +#define __DARWIN_EXTSN_C(sym) __asm("_" __STRING(sym) __DARWIN_SUF_EXTSN __DARWIN_SUF_NON_CANCELABLE) /* * symbol release macros @@ -733,13 +733,13 @@ /* Deal with IEEE Std. 1003.1-1990, in which _POSIX_C_SOURCE == 1L. */ #if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 1L #undef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 199009L +#define _POSIX_C_SOURCE 199009L #endif /* Deal with IEEE Std. 1003.2-1992, in which _POSIX_C_SOURCE == 2L. */ #if defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE == 2L #undef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 199209L +#define _POSIX_C_SOURCE 199209L #endif /* Deal with various X/Open Portability Guides and Single UNIX Spec. */ @@ -749,10 +749,10 @@ #define _POSIX_C_SOURCE 200809L #elif _XOPEN_SOURCE - 0L >= 600L && (!defined(_POSIX_C_SOURCE) || _POSIX_C_SOURCE - 0L < 200112L) #undef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 200112L +#define _POSIX_C_SOURCE 200112L #elif _XOPEN_SOURCE - 0L >= 500L && (!defined(_POSIX_C_SOURCE) || _POSIX_C_SOURCE - 0L < 199506L) #undef _POSIX_C_SOURCE -#define _POSIX_C_SOURCE 199506L +#define _POSIX_C_SOURCE 199506L #endif #endif @@ -803,22 +803,22 @@ * c99 still want long longs. While not perfect, we allow long longs for * g++. */ -#if (defined(__STRICT_ANSI__) && (__STDC_VERSION__-0 < 199901L) && !defined(__GNUG__)) +#if (defined(__STRICT_ANSI__) && (__STDC_VERSION__ - 0 < 199901L) && !defined(__GNUG__)) #define __DARWIN_NO_LONG_LONG 1 #else #define __DARWIN_NO_LONG_LONG 0 #endif /***************************************** - * Public darwin-specific feature macros - *****************************************/ +* Public darwin-specific feature macros +*****************************************/ /* * _DARWIN_FEATURE_64_BIT_INODE indicates that the ino_t type is 64-bit, and * structures modified for 64-bit inodes (like struct stat) will be used. */ #if __DARWIN_64_BIT_INO_T -#define _DARWIN_FEATURE_64_BIT_INODE 1 +#define _DARWIN_FEATURE_64_BIT_INODE 1 #endif /* @@ -828,7 +828,7 @@ * struct stat will already be the 64-bit version. */ #if __DARWIN_ONLY_64_BIT_INO_T -#define _DARWIN_FEATURE_ONLY_64_BIT_INODE 1 +#define _DARWIN_FEATURE_ONLY_64_BIT_INODE 1 #endif /* @@ -836,7 +836,7 @@ * in 10.5 exists; no pre-10.5 variants are available. */ #if __DARWIN_ONLY_VERS_1050 -#define _DARWIN_FEATURE_ONLY_VERS_1050 1 +#define _DARWIN_FEATURE_ONLY_VERS_1050 1 #endif /* @@ -844,7 +844,7 @@ * are available (the legacy BSD APIs are not available) */ #if __DARWIN_ONLY_UNIX_CONFORMANCE -#define _DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE 1 +#define _DARWIN_FEATURE_ONLY_UNIX_CONFORMANCE 1 #endif /* @@ -852,10 +852,10 @@ * and specifies the conformance level (3 is SUSv3) */ #if __DARWIN_UNIX03 -#define _DARWIN_FEATURE_UNIX_CONFORMANCE 3 +#define _DARWIN_FEATURE_UNIX_CONFORMANCE 3 #endif -/* +/* * This macro casts away the qualifier from the variable * * Note: use at your own risk, removing qualifiers can result in @@ -889,9 +889,9 @@ * Selectively ignore cast alignment warnings */ #define __IGNORE_WCASTALIGN(x) _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wcast-align\"") \ - x; \ - _Pragma("clang diagnostic pop") + _Pragma("clang diagnostic ignored \"-Wcast-align\"") \ + x; \ + _Pragma("clang diagnostic pop") #endif #if defined(PRIVATE) || defined(KERNEL) @@ -901,13 +901,13 @@ * to be taken. */ #if !defined(__probable) && !defined(__improbable) -#define __probable(x) __builtin_expect(!!(x), 1) -#define __improbable(x) __builtin_expect(!!(x), 0) +#define __probable(x) __builtin_expect(!!(x), 1) +#define __improbable(x) __builtin_expect(!!(x), 0) #endif /* !defined(__probable) && !defined(__improbable) */ #define __container_of(ptr, type, field) ({ \ - const typeof(((type *)0)->field) *__ptr = (ptr); \ - (type *)((uintptr_t)__ptr - offsetof(type, field)); \ + const typeof(((type *)0)->field) *__ptr = (ptr); \ + (type *)((uintptr_t)__ptr - offsetof(type, field)); \ }) #endif /* KERNEL || PRIVATE */ diff --git a/bsd/sys/clist.h b/bsd/sys/clist.h index 98aa9427b..f4161a638 100644 --- a/bsd/sys/clist.h +++ b/bsd/sys/clist.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -61,21 +61,20 @@ * @(#)clist.h 8.1 (Berkeley) 6/4/93 */ -#ifndef _SYS_CLIST_H_ +#ifndef _SYS_CLIST_H_ #define _SYS_CLIST_H_ #ifdef KERNEL_PRIVATE struct cblock { - struct cblock *c_next; /* next cblock in queue */ - char c_quote[CBQSIZE]; /* quoted characters */ - char c_info[CBSIZE]; /* characters */ + struct cblock *c_next; /* next cblock in queue */ + char c_quote[CBQSIZE]; /* quoted characters */ + char c_info[CBSIZE]; /* characters */ }; -extern struct cblock *cfree, *cfreelist; -extern int cfreecount, nclist; +extern struct cblock *cfree, *cfreelist; +extern int cfreecount, nclist; #endif /* KERNEL_PRIVATE */ -#endif /* _SYS_CLIST_H_ */ - +#endif /* _SYS_CLIST_H_ */ diff --git a/bsd/sys/coalition.h b/bsd/sys/coalition.h index d3ab93f32..34a532a9d 100644 --- a/bsd/sys/coalition.h +++ b/bsd/sys/coalition.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -69,14 +69,14 @@ uint64_t coalition_id(coalition_t coal); * This interface is primarily to support libproc. * * Parameters: - * type : The COALITION_TYPE of the coalitions to investigate. - * Valid types can be found in - * coal_list : Pointer to an array of procinfo_coalinfo structures - * that will be filled with information about each - * coalition whose type matches 'type' - * NOTE: This can be NULL to perform a simple query of - * the total number of coalitions. - * list_sz : The size (in number of structures) of 'coal_list' + * type : The COALITION_TYPE of the coalitions to investigate. + * Valid types can be found in + * coal_list : Pointer to an array of procinfo_coalinfo structures + * that will be filled with information about each + * coalition whose type matches 'type' + * NOTE: This can be NULL to perform a simple query of + * the total number of coalitions. + * list_sz : The size (in number of structures) of 'coal_list' * * Returns: 0 if no coalitions matching 'type' are found * Otherwise: the number of coalitions whose type matches @@ -90,17 +90,17 @@ extern int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, in * Determine if a task is a coalition leader. * * Parameters: - * task : The task to investigate - * coal_type : The COALITION_TYPE of the coalition to investigate. - * Valid types can be found in - * coal : If 'task' is a valid task, and is a member of a coalition - * of type 'coal_type', then 'coal' will be filled in with - * the corresponding coalition_t object. - * NOTE: This will be filled in whether or not the 'task' is - * a leader in the coalition. However, if 'task' is - * not a member of a coalition of type 'coal_type' then - * 'coal' will be filled in with COALITION_NULL. - * NOTE: This can be NULL + * task : The task to investigate + * coal_type : The COALITION_TYPE of the coalition to investigate. + * Valid types can be found in + * coal : If 'task' is a valid task, and is a member of a coalition + * of type 'coal_type', then 'coal' will be filled in with + * the corresponding coalition_t object. + * NOTE: This will be filled in whether or not the 'task' is + * a leader in the coalition. However, if 'task' is + * not a member of a coalition of type 'coal_type' then + * 'coal' will be filled in with COALITION_NULL. + * NOTE: This can be NULL * * Returns: TRUE if 'task' is a coalition leader, FALSE otherwise. */ @@ -126,7 +126,7 @@ extern task_t coalition_get_leader(coalition_t coal); * Sum up the number of tasks in the given coalition * * Parameters: - * coal : The coalition to investigate + * coal : The coalition to investigate * * Returns: The number of tasks in the coalition */ @@ -137,9 +137,9 @@ extern int coalition_get_task_count(coalition_t coal); * Sum up the page count for each task in the coalition specified by 'coal' * * Parameters: - * coal : The coalition to investigate - * ntasks : If non-NULL, this will be filled in with the number of - * tasks in the coalition. + * coal : The coalition to investigate + * ntasks : If non-NULL, this will be filled in with the number of + * tasks in the coalition. * * Returns: The sum of all pages used by all members of the coalition */ @@ -151,18 +151,18 @@ extern uint64_t coalition_get_page_count(coalition_t coal, int *ntasks); * given role. * * Parameters: - * coal : The coalition to investigate - * rolemask : The set of coalition task roles used to filter the list - * of PIDs returned in 'pid_list'. Roles can be combined - * using the COALITION_ROLEMASK_* tokens found in - * . Each PID returned is guaranteed to - * be tagged with one of the task roles specified by this - * mask. - * sort_order : The order in which the returned PIDs should be sorted - * by default this is in descending page count. - * pid_list : Pointer to an array of PIDs that will be filled with - * members of the coalition tagged with the given 'taskrole' - * list_sz : The size (in number of PIDs) of 'pid_list' + * coal : The coalition to investigate + * rolemask : The set of coalition task roles used to filter the list + * of PIDs returned in 'pid_list'. Roles can be combined + * using the COALITION_ROLEMASK_* tokens found in + * . Each PID returned is guaranteed to + * be tagged with one of the task roles specified by this + * mask. + * sort_order : The order in which the returned PIDs should be sorted + * by default this is in descending page count. + * pid_list : Pointer to an array of PIDs that will be filled with + * members of the coalition tagged with the given 'taskrole' + * list_sz : The size (in number of PIDs) of 'pid_list' * * Note: * This function will return the list of PIDs in a sorted order. By default @@ -186,45 +186,51 @@ extern uint64_t coalition_get_page_count(coalition_t coal, int *ntasks); * */ extern int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, - int sort_order, int *pid_list, int list_sz); + int sort_order, int *pid_list, int list_sz); #else /* !CONFIG_COALITIONS */ -static inline uint64_t coalition_id(__unused coalition_t coal) +static inline uint64_t +coalition_id(__unused coalition_t coal) { return 0; } -static inline int coalitions_get_list(__unused int type, - __unused struct procinfo_coalinfo *coal_list, - __unused int list_sz) +static inline int +coalitions_get_list(__unused int type, + __unused struct procinfo_coalinfo *coal_list, + __unused int list_sz) { return 0; } -static inline boolean_t coalition_is_leader(__unused task_t task, - __unused int coal_type, - coalition_t *coal) +static inline boolean_t +coalition_is_leader(__unused task_t task, + __unused int coal_type, + coalition_t *coal) { *coal = COALITION_NULL; return FALSE; } -static inline int coalition_get_task_count(__unused coalition_t coal) +static inline int +coalition_get_task_count(__unused coalition_t coal) { return 0; } -static inline uint64_t coalition_get_page_count(__unused coalition_t coal, - __unused int *ntasks) +static inline uint64_t +coalition_get_page_count(__unused coalition_t coal, + __unused int *ntasks) { return 0; } -static inline int coalition_get_pid_list(__unused coalition_t coal, - __unused uint32_t rolemask, - __unused int sort_order, - __unused int *pid_list, - __unused int list_sz) +static inline int +coalition_get_pid_list(__unused coalition_t coal, + __unused uint32_t rolemask, + __unused int sort_order, + __unused int *pid_list, + __unused int list_sz) { return 0; } diff --git a/bsd/sys/codedir_internal.h b/bsd/sys/codedir_internal.h index 556a311d0..2ef81007c 100644 --- a/bsd/sys/codedir_internal.h +++ b/bsd/sys/codedir_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * File: codesign_internal.h - * Author: Greg Kerr + * Author: Greg Kerr * 05-Dec-2013 * * Header file for Code Directory Functions. * - */ + */ #ifndef _SYS_CODEDIR_INTERNAL_H_ #define _SYS_CODEDIR_INTERNAL_H_ diff --git a/bsd/sys/codesign.h b/bsd/sys/codesign.h index 069725c7e4..26e3e1f64 100644 --- a/bsd/sys/codesign.h +++ b/bsd/sys/codesign.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -39,25 +39,25 @@ #define MAC_VNODE_CHECK_DYLD_SIM 0x1 /* tells the MAC framework that dyld-sim is being loaded */ /* csops operations */ -#define CS_OPS_STATUS 0 /* return status */ -#define CS_OPS_MARKINVALID 1 /* invalidate process */ -#define CS_OPS_MARKHARD 2 /* set HARD flag */ -#define CS_OPS_MARKKILL 3 /* set KILL flag (sticky) */ +#define CS_OPS_STATUS 0 /* return status */ +#define CS_OPS_MARKINVALID 1 /* invalidate process */ +#define CS_OPS_MARKHARD 2 /* set HARD flag */ +#define CS_OPS_MARKKILL 3 /* set KILL flag (sticky) */ #ifdef KERNEL_PRIVATE /* CS_OPS_PIDPATH 4 */ #endif -#define CS_OPS_CDHASH 5 /* get code directory hash */ -#define CS_OPS_PIDOFFSET 6 /* get offset of active Mach-o slice */ -#define CS_OPS_ENTITLEMENTS_BLOB 7 /* get entitlements blob */ -#define CS_OPS_MARKRESTRICT 8 /* set RESTRICT flag (sticky) */ -#define CS_OPS_SET_STATUS 9 /* set codesign flags */ -#define CS_OPS_BLOB 10 /* get codesign blob */ -#define CS_OPS_IDENTITY 11 /* get codesign identity */ -#define CS_OPS_CLEARINSTALLER 12 /* clear INSTALLER flag */ +#define CS_OPS_CDHASH 5 /* get code directory hash */ +#define CS_OPS_PIDOFFSET 6 /* get offset of active Mach-o slice */ +#define CS_OPS_ENTITLEMENTS_BLOB 7 /* get entitlements blob */ +#define CS_OPS_MARKRESTRICT 8 /* set RESTRICT flag (sticky) */ +#define CS_OPS_SET_STATUS 9 /* set codesign flags */ +#define CS_OPS_BLOB 10 /* get codesign blob */ +#define CS_OPS_IDENTITY 11 /* get codesign identity */ +#define CS_OPS_CLEARINSTALLER 12 /* clear INSTALLER flag */ #define CS_OPS_CLEARPLATFORM 13 /* clear platform binary status (DEVELOPMENT-only) */ #define CS_OPS_TEAMID 14 /* get team id */ -#define CS_MAX_TEAMID_LEN 64 +#define CS_MAX_TEAMID_LEN 64 #ifndef KERNEL @@ -83,34 +83,34 @@ struct cs_blob; struct fileglob; __BEGIN_DECLS -int cs_valid(struct proc *); -int cs_process_enforcement(struct proc *); +int cs_valid(struct proc *); +int cs_process_enforcement(struct proc *); int cs_process_global_enforcement(void); int cs_system_enforcement(void); -int cs_require_lv(struct proc *); +int cs_require_lv(struct proc *); int csproc_forced_lv(struct proc* p); -int cs_system_require_lv(void); +int cs_system_require_lv(void); uint32_t cs_entitlement_flags(struct proc *p); -int cs_entitlements_blob_get(struct proc *, void **, size_t *); -int cs_restricted(struct proc *); +int cs_entitlements_blob_get(struct proc *, void **, size_t *); +int cs_restricted(struct proc *); uint8_t * cs_get_cdhash(struct proc *); struct cs_blob * csproc_get_blob(struct proc *); struct cs_blob * csvnode_get_blob(struct vnode *, off_t); -void csvnode_print_debug(struct vnode *); - -off_t csblob_get_base_offset(struct cs_blob *); -vm_size_t csblob_get_size(struct cs_blob *); -vm_address_t csblob_get_addr(struct cs_blob *); -const char * csblob_get_teamid(struct cs_blob *); -const char * csblob_get_identity(struct cs_blob *); -const uint8_t * csblob_get_cdhash(struct cs_blob *); -int csblob_get_platform_binary(struct cs_blob *); -unsigned int csblob_get_flags(struct cs_blob *); -uint8_t csblob_get_hashtype(struct cs_blob const *); -unsigned int csblob_get_signer_type(struct cs_blob *); +void csvnode_print_debug(struct vnode *); + +off_t csblob_get_base_offset(struct cs_blob *); +vm_size_t csblob_get_size(struct cs_blob *); +vm_address_t csblob_get_addr(struct cs_blob *); +const char * csblob_get_teamid(struct cs_blob *); +const char * csblob_get_identity(struct cs_blob *); +const uint8_t * csblob_get_cdhash(struct cs_blob *); +int csblob_get_platform_binary(struct cs_blob *); +unsigned int csblob_get_flags(struct cs_blob *); +uint8_t csblob_get_hashtype(struct cs_blob const *); +unsigned int csblob_get_signer_type(struct cs_blob *); #if DEVELOPMENT || DEBUG -void csproc_clear_platform_binary(struct proc *); +void csproc_clear_platform_binary(struct proc *); #endif void csproc_disable_enforcement(struct proc* p); @@ -118,12 +118,12 @@ void csproc_mark_invalid_allowed(struct proc* p); int csproc_check_invalid_allowed(struct proc* p); int csproc_hardened_runtime(struct proc* p); -int csblob_get_entitlements(struct cs_blob *, void **, size_t *); +int csblob_get_entitlements(struct cs_blob *, void **, size_t *); const CS_GenericBlob * - csblob_find_blob(struct cs_blob *, uint32_t, uint32_t); + csblob_find_blob(struct cs_blob *, uint32_t, uint32_t); const CS_GenericBlob * - csblob_find_blob_bytes(const uint8_t *, size_t, uint32_t, uint32_t); + csblob_find_blob_bytes(const uint8_t *, size_t, uint32_t, uint32_t); void * csblob_entitlements_dictionary_copy(struct cs_blob *csblob); void csblob_entitlements_dictionary_set(struct cs_blob *csblob, void * entitlements); @@ -131,13 +131,13 @@ void csblob_entitlements_dictionary_set(struct cs_blob *csblob, void * Mostly convenience functions below */ -const char * csproc_get_teamid(struct proc *); -const char * csvnode_get_teamid(struct vnode *, off_t); -int csproc_get_platform_binary(struct proc *); +const char * csproc_get_teamid(struct proc *); +const char * csvnode_get_teamid(struct vnode *, off_t); +int csproc_get_platform_binary(struct proc *); int csproc_get_prod_signed(struct proc *); -const char * csfg_get_teamid(struct fileglob *); -int csfg_get_path(struct fileglob *, char *, int *); -int csfg_get_platform_binary(struct fileglob *); +const char * csfg_get_teamid(struct fileglob *); +int csfg_get_path(struct fileglob *, char *, int *); +int csfg_get_platform_binary(struct fileglob *); uint8_t * csfg_get_cdhash(struct fileglob *, uint64_t, size_t *); int csfg_get_prod_signed(struct fileglob *); unsigned int csfg_get_signer_type(struct fileglob *); @@ -154,16 +154,16 @@ extern unsigned int cs_debug_unsigned_exec_failures; extern unsigned int cs_debug_unsigned_mmap_failures; int cs_blob_create_validated(vm_address_t* addr, vm_size_t size, - struct cs_blob ** ret_blob, CS_CodeDirectory const **ret_cd); + struct cs_blob ** ret_blob, CS_CodeDirectory const **ret_cd); void cs_blob_free(struct cs_blob *blob); #ifdef XNU_KERNEL_PRIVATE -void cs_init(void); -int cs_allow_invalid(struct proc *); -int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); -int csproc_get_platform_path(struct proc *); +void cs_init(void); +int cs_allow_invalid(struct proc *); +int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); +int csproc_get_platform_path(struct proc *); #if !SECURE_KERNEL extern int cs_enforcement_panic; diff --git a/bsd/sys/commpage.h b/bsd/sys/commpage.h index 42bfe61b2..83871f07f 100644 --- a/bsd/sys/commpage.h +++ b/bsd/sys/commpage.h @@ -28,13 +28,13 @@ #ifndef _COMMPAGE_H #define _COMMPAGE_H -#ifdef PRIVATE -typedef volatile struct commpage_timeofday_data{ - uint64_t TimeStamp_tick; - uint64_t TimeStamp_sec; - uint64_t TimeStamp_frac; - uint64_t Ticks_scale; - uint64_t Ticks_per_sec; +#ifdef PRIVATE +typedef volatile struct commpage_timeofday_data { + uint64_t TimeStamp_tick; + uint64_t TimeStamp_sec; + uint64_t TimeStamp_frac; + uint64_t Ticks_scale; + uint64_t Ticks_per_sec; } new_commpage_timeofday_data_t; #endif diff --git a/bsd/sys/conf.h b/bsd/sys/conf.h index 273b690ad..68cec736c 100644 --- a/bsd/sys/conf.h +++ b/bsd/sys/conf.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -88,81 +88,81 @@ struct vnode; * Types for d_type. * These are returned by ioctl FIODTYPE */ -#define D_TAPE 1 -#define D_DISK 2 -#define D_TTY 3 +#define D_TAPE 1 +#define D_DISK 2 +#define D_TTY 3 #ifdef KERNEL -/* +/* * Device switch function types. */ typedef int open_close_fcn_t(dev_t dev, int flags, int devtype, - struct proc *p); + struct proc *p); typedef struct tty *d_devtotty_t(dev_t dev); -typedef void strategy_fcn_t(struct buf *bp); +typedef void strategy_fcn_t(struct buf *bp); typedef int ioctl_fcn_t(dev_t dev, u_long cmd, caddr_t data, - int fflag, struct proc *p); + int fflag, struct proc *p); typedef int dump_fcn_t(void); /* parameters vary by architecture */ -typedef int psize_fcn_t(dev_t dev); +typedef int psize_fcn_t(dev_t dev); typedef int read_write_fcn_t(dev_t dev, struct uio *uio, int ioflag); -typedef int stop_fcn_t(struct tty *tp, int rw); -typedef int reset_fcn_t(int uban); -typedef int select_fcn_t(dev_t dev, int which, void * wql, struct proc *p); -typedef int mmap_fcn_t(void); - -#define d_open_t open_close_fcn_t -#define d_close_t open_close_fcn_t -#define d_read_t read_write_fcn_t -#define d_write_t read_write_fcn_t -#define d_ioctl_t ioctl_fcn_t -#define d_stop_t stop_fcn_t -#define d_reset_t reset_fcn_t -#define d_select_t select_fcn_t -#define d_mmap_t mmap_fcn_t -#define d_strategy_t strategy_fcn_t +typedef int stop_fcn_t(struct tty *tp, int rw); +typedef int reset_fcn_t(int uban); +typedef int select_fcn_t(dev_t dev, int which, void * wql, struct proc *p); +typedef int mmap_fcn_t(void); + +#define d_open_t open_close_fcn_t +#define d_close_t open_close_fcn_t +#define d_read_t read_write_fcn_t +#define d_write_t read_write_fcn_t +#define d_ioctl_t ioctl_fcn_t +#define d_stop_t stop_fcn_t +#define d_reset_t reset_fcn_t +#define d_select_t select_fcn_t +#define d_mmap_t mmap_fcn_t +#define d_strategy_t strategy_fcn_t __BEGIN_DECLS -int enodev(void); -void enodev_strat(void); +int enodev(void); +void enodev_strat(void); __END_DECLS /* * Versions of enodev() pointer, cast to appropriate function type. For use * in empty devsw slots. */ -#define eno_opcl ((open_close_fcn_t *)&enodev) -#define eno_strat ((strategy_fcn_t *)&enodev_strat) -#define eno_ioctl ((ioctl_fcn_t *)&enodev) -#define eno_dump ((dump_fcn_t *)&enodev) -#define eno_psize ((psize_fcn_t *)&enodev) -#define eno_rdwrt ((read_write_fcn_t *)&enodev) -#define eno_stop ((stop_fcn_t *)&enodev) -#define eno_reset ((reset_fcn_t *)&enodev) -#define eno_mmap ((mmap_fcn_t *)&enodev) -#define eno_select ((select_fcn_t *)&enodev) +#define eno_opcl ((open_close_fcn_t *)&enodev) +#define eno_strat ((strategy_fcn_t *)&enodev_strat) +#define eno_ioctl ((ioctl_fcn_t *)&enodev) +#define eno_dump ((dump_fcn_t *)&enodev) +#define eno_psize ((psize_fcn_t *)&enodev) +#define eno_rdwrt ((read_write_fcn_t *)&enodev) +#define eno_stop ((stop_fcn_t *)&enodev) +#define eno_reset ((reset_fcn_t *)&enodev) +#define eno_mmap ((mmap_fcn_t *)&enodev) +#define eno_select ((select_fcn_t *)&enodev) /* For source backward compatibility only! */ -#define eno_getc ((void *)&enodev) -#define eno_putc ((void *)&enodev) +#define eno_getc ((void *)&enodev) +#define eno_putc ((void *)&enodev) /* * Block device switch table */ struct bdevsw { - open_close_fcn_t *d_open; - open_close_fcn_t *d_close; - strategy_fcn_t *d_strategy; - ioctl_fcn_t *d_ioctl; - dump_fcn_t *d_dump; - psize_fcn_t *d_psize; - int d_type; + open_close_fcn_t *d_open; + open_close_fcn_t *d_close; + strategy_fcn_t *d_strategy; + ioctl_fcn_t *d_ioctl; + dump_fcn_t *d_dump; + psize_fcn_t *d_psize; + int d_type; }; d_devtotty_t nodevtotty; -d_write_t nowrite; +d_write_t nowrite; #ifdef KERNEL_PRIVATE extern struct bdevsw bdevsw[]; @@ -172,29 +172,29 @@ extern int (*bootcache_contains_block)(dev_t device, u_int64_t blkno); /* * Contents of empty bdevsw slot. */ -#define NO_BDEVICE \ - { eno_opcl, eno_opcl, eno_strat, eno_ioctl, \ - eno_dump, eno_psize, 0 } - +#define NO_BDEVICE \ + { eno_opcl, eno_opcl, eno_strat, eno_ioctl, \ + eno_dump, eno_psize, 0 } + /* * Character device switch table */ struct cdevsw { - open_close_fcn_t *d_open; - open_close_fcn_t *d_close; - read_write_fcn_t *d_read; - read_write_fcn_t *d_write; - ioctl_fcn_t *d_ioctl; - stop_fcn_t *d_stop; - reset_fcn_t *d_reset; - struct tty **d_ttys; - select_fcn_t *d_select; - mmap_fcn_t *d_mmap; - strategy_fcn_t *d_strategy; - void *d_reserved_1; - void *d_reserved_2; - int d_type; + open_close_fcn_t *d_open; + open_close_fcn_t *d_close; + read_write_fcn_t *d_read; + read_write_fcn_t *d_write; + ioctl_fcn_t *d_ioctl; + stop_fcn_t *d_stop; + reset_fcn_t *d_reset; + struct tty **d_ttys; + select_fcn_t *d_select; + mmap_fcn_t *d_mmap; + strategy_fcn_t *d_strategy; + void *d_reserved_1; + void *d_reserved_2; + int d_type; }; #ifdef BSD_KERNEL_PRIVATE @@ -208,10 +208,10 @@ extern uint64_t cdevsw_flags[]; struct thread; typedef struct devsw_lock { - TAILQ_ENTRY(devsw_lock) dl_list; - struct thread *dl_thread; - dev_t dl_dev; - int dl_mode; + TAILQ_ENTRY(devsw_lock) dl_list; + struct thread *dl_thread; + dev_t dl_dev; + int dl_mode; } *devsw_lock_t; #endif /* BSD_KERNEL_PRIVATE */ @@ -221,23 +221,23 @@ typedef struct devsw_lock { * Contents of empty cdevsw slot. */ -#define NO_CDEVICE \ - { \ - eno_opcl, eno_opcl, eno_rdwrt, eno_rdwrt, \ - eno_ioctl, eno_stop, eno_reset, 0, \ - (select_fcn_t *)seltrue, eno_mmap, eno_strat, eno_getc, \ - eno_putc, 0 \ +#define NO_CDEVICE \ + { \ + eno_opcl, eno_opcl, eno_rdwrt, eno_rdwrt, \ + eno_ioctl, eno_stop, eno_reset, 0, \ + (select_fcn_t *)seltrue, eno_mmap, eno_strat, eno_getc, \ + eno_putc, 0 \ } - + #endif /* KERNEL */ - + #ifdef KERNEL_PRIVATE typedef int l_open_t (dev_t dev, struct tty *tp); typedef int l_close_t(struct tty *tp, int flags); typedef int l_read_t (struct tty *tp, struct uio *uio, int flag); typedef int l_write_t(struct tty *tp, struct uio *uio, int flag); typedef int l_ioctl_t(struct tty *tp, u_long cmd, caddr_t data, int flag, - struct proc *p); + struct proc *p); typedef int l_rint_t (int c, struct tty *tp); typedef void l_start_t(struct tty *tp); typedef int l_modem_t(struct tty *tp, int flag); @@ -246,21 +246,21 @@ typedef int l_modem_t(struct tty *tp, int flag); * Line discipline switch table */ struct linesw { - l_open_t *l_open; - l_close_t *l_close; - l_read_t *l_read; - l_write_t *l_write; - l_ioctl_t *l_ioctl; - l_rint_t *l_rint; - l_start_t *l_start; - l_modem_t *l_modem; + l_open_t *l_open; + l_close_t *l_close; + l_read_t *l_read; + l_write_t *l_write; + l_ioctl_t *l_ioctl; + l_rint_t *l_rint; + l_start_t *l_start; + l_modem_t *l_modem; }; extern struct linesw linesw[]; extern const int nlinesw; - -int ldisc_register(int , struct linesw *); + +int ldisc_register(int, struct linesw *); void ldisc_deregister(int); #define LDISC_LOAD -1 /* Loadable line discipline */ @@ -271,14 +271,14 @@ void ldisc_deregister(int); * Swap device table */ struct swdevt { - dev_t sw_dev; - int sw_flags; - int sw_nblks; - struct vnode *sw_vp; + dev_t sw_dev; + int sw_flags; + int sw_nblks; + struct vnode *sw_vp; }; -#define SW_FREED 0x01 -#define SW_SEQUENTIAL 0x02 -#define sw_freed sw_flags /* XXX compat */ +#define SW_FREED 0x01 +#define SW_SEQUENTIAL 0x02 +#define sw_freed sw_flags /* XXX compat */ extern struct swdevt swdevt[]; diff --git a/bsd/sys/content_protection.h b/bsd/sys/content_protection.h index 20eae8b31..efcebc5f2 100644 --- a/bsd/sys/content_protection.h +++ b/bsd/sys/content_protection.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,21 +22,21 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #ifndef _SYS_CONTENT_PROTECTION_H_ #define _SYS_CONTENT_PROTECTION_H_ #ifdef PRIVATE -/* +/* * Protection classes vary in their restrictions on read/writability. A is generally * the strictest, and D is effectively no restriction. */ -/* +/* * dir_none forces new items created in the directory to pick up the mount point default * protection level. it is only allowed for directories. */ diff --git a/bsd/sys/cprotect.h b/bsd/sys/cprotect.h index cfd4c79f8..bfb333614 100644 --- a/bsd/sys/cprotect.h +++ b/bsd/sys/cprotect.h @@ -27,7 +27,7 @@ */ #ifndef _SYS_CPROTECT_H_ -#define _SYS_CPROTECT_H_ +#define _SYS_CPROTECT_H_ #ifdef KERNEL_PRIVATE @@ -42,21 +42,21 @@ __BEGIN_DECLS #define CP_CODE(code) FSDBG_CODE(DBG_CONTENT_PROT, code) -/* +/* * Class DBG_FSYSTEM == 0x03 * Subclass DBG_CONTENT_PROT == 0xCF * These debug codes are of the form 0x03CFzzzz */ enum { - CPDBG_OFFSET_IO = CP_CODE(0), /* 0x03CF0000 */ + CPDBG_OFFSET_IO = CP_CODE(0), /* 0x03CF0000 */ }; /* normally the debug events are no-ops */ -#define CP_DEBUG(x,a,b,c,d,e) do {} while (0); +#define CP_DEBUG(x, a, b, c, d, e) do {} while (0); /* dev kernels only! */ -#if !SECURE_KERNEL +#if !SECURE_KERNEL /* KDEBUG events used by content protection subsystem */ #if 0 @@ -66,12 +66,12 @@ enum { #endif -#define CP_MAX_WRAPPEDKEYSIZE 128 /* The size of the largest allowed key */ +#define CP_MAX_WRAPPEDKEYSIZE 128 /* The size of the largest allowed key */ /* lock events from AppleKeyStore */ enum { - CP_ACTION_LOCKED = 0, - CP_ACTION_UNLOCKED = 1, + CP_ACTION_LOCKED = 0, + CP_ACTION_UNLOCKED = 1, }; /* * Ideally, cp_key_store_action_t would be an enum, but we cannot fix @@ -87,8 +87,8 @@ typedef int cp_key_store_action_t; */ typedef unsigned char cp_lock_state_t; enum { - CP_LOCKED_STATE = 0, - CP_UNLOCKED_STATE = 1, + CP_LOCKED_STATE = 0, + CP_UNLOCKED_STATE = 1, }; typedef uint32_t cp_key_class_t; @@ -125,13 +125,13 @@ typedef cp_wrapped_key_s* cp_wrapped_key_t; typedef struct { union { - ino64_t inode; - cp_crypto_id_t crypto_id; + ino64_t inode; + cp_crypto_id_t crypto_id; }; - uint32_t volume; - pid_t pid; - uid_t uid; - cp_key_revision_t key_revision; + uint32_t volume; + pid_t pid; + uid_t uid; + cp_key_revision_t key_revision; } cp_cred_s; typedef cp_cred_s* cp_cred_t; @@ -143,12 +143,12 @@ typedef int new_key_t(cp_cred_t access, cp_key_class_t dp_class, cp_raw_key_t ke typedef int invalidater_t(cp_cred_t access); /* invalidates keys */ typedef int backup_key_t(cp_cred_t access, const cp_wrapped_key_t wrapped_key_in, cp_wrapped_key_t wrapped_key_out); -/* - * Flags for Interaction between AKS / Kernel +/* + * Flags for Interaction between AKS / Kernel * These are twiddled via the input/output structs in the above * wrapper/unwrapper functions. */ -#define CP_RAW_KEY_WRAPPEDKEY 0x00000001 +#define CP_RAW_KEY_WRAPPEDKEY 0x00000001 /* * Function prototypes for kexts to interface with our internal cprotect @@ -184,7 +184,7 @@ int cp_key_store_action(cp_key_store_action_t); int cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action); cp_key_os_version_t cp_os_version(void); // Should be cp_key_class_t but HFS has a conflicting definition -int cp_is_valid_class (int isdir, int32_t protectionclass); +int cp_is_valid_class(int isdir, int32_t protectionclass); __END_DECLS diff --git a/bsd/sys/csr.h b/bsd/sys/csr.h index 9b6c0d0ca..7c083d461 100644 --- a/bsd/sys/csr.h +++ b/bsd/sys/csr.h @@ -39,37 +39,37 @@ typedef uint32_t csr_config_t; typedef uint32_t csr_op_t; /* Rootless configuration flags */ -#define CSR_ALLOW_UNTRUSTED_KEXTS (1 << 0) -#define CSR_ALLOW_UNRESTRICTED_FS (1 << 1) -#define CSR_ALLOW_TASK_FOR_PID (1 << 2) -#define CSR_ALLOW_KERNEL_DEBUGGER (1 << 3) -#define CSR_ALLOW_APPLE_INTERNAL (1 << 4) -#define CSR_ALLOW_DESTRUCTIVE_DTRACE (1 << 5) /* name deprecated */ -#define CSR_ALLOW_UNRESTRICTED_DTRACE (1 << 5) -#define CSR_ALLOW_UNRESTRICTED_NVRAM (1 << 6) -#define CSR_ALLOW_DEVICE_CONFIGURATION (1 << 7) -#define CSR_ALLOW_ANY_RECOVERY_OS (1 << 8) -#define CSR_ALLOW_UNAPPROVED_KEXTS (1 << 9) -#define CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE (1 << 10) +#define CSR_ALLOW_UNTRUSTED_KEXTS (1 << 0) +#define CSR_ALLOW_UNRESTRICTED_FS (1 << 1) +#define CSR_ALLOW_TASK_FOR_PID (1 << 2) +#define CSR_ALLOW_KERNEL_DEBUGGER (1 << 3) +#define CSR_ALLOW_APPLE_INTERNAL (1 << 4) +#define CSR_ALLOW_DESTRUCTIVE_DTRACE (1 << 5) /* name deprecated */ +#define CSR_ALLOW_UNRESTRICTED_DTRACE (1 << 5) +#define CSR_ALLOW_UNRESTRICTED_NVRAM (1 << 6) +#define CSR_ALLOW_DEVICE_CONFIGURATION (1 << 7) +#define CSR_ALLOW_ANY_RECOVERY_OS (1 << 8) +#define CSR_ALLOW_UNAPPROVED_KEXTS (1 << 9) +#define CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE (1 << 10) #define CSR_VALID_FLAGS (CSR_ALLOW_UNTRUSTED_KEXTS | \ - CSR_ALLOW_UNRESTRICTED_FS | \ - CSR_ALLOW_TASK_FOR_PID | \ - CSR_ALLOW_KERNEL_DEBUGGER | \ - CSR_ALLOW_APPLE_INTERNAL | \ - CSR_ALLOW_UNRESTRICTED_DTRACE | \ - CSR_ALLOW_UNRESTRICTED_NVRAM | \ - CSR_ALLOW_DEVICE_CONFIGURATION | \ - CSR_ALLOW_ANY_RECOVERY_OS | \ - CSR_ALLOW_UNAPPROVED_KEXTS | \ - CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE) + CSR_ALLOW_UNRESTRICTED_FS | \ + CSR_ALLOW_TASK_FOR_PID | \ + CSR_ALLOW_KERNEL_DEBUGGER | \ + CSR_ALLOW_APPLE_INTERNAL | \ + CSR_ALLOW_UNRESTRICTED_DTRACE | \ + CSR_ALLOW_UNRESTRICTED_NVRAM | \ + CSR_ALLOW_DEVICE_CONFIGURATION | \ + CSR_ALLOW_ANY_RECOVERY_OS | \ + CSR_ALLOW_UNAPPROVED_KEXTS | \ + CSR_ALLOW_EXECUTABLE_POLICY_OVERRIDE) #define CSR_ALWAYS_ENFORCED_FLAGS (CSR_ALLOW_DEVICE_CONFIGURATION | CSR_ALLOW_ANY_RECOVERY_OS) /* CSR capabilities that a booter can give to the system */ -#define CSR_CAPABILITY_UNLIMITED (1 << 0) -#define CSR_CAPABILITY_CONFIG (1 << 1) -#define CSR_CAPABILITY_APPLE_INTERNAL (1 << 2) +#define CSR_CAPABILITY_UNLIMITED (1 << 0) +#define CSR_CAPABILITY_CONFIG (1 << 1) +#define CSR_CAPABILITY_APPLE_INTERNAL (1 << 2) #define CSR_VALID_CAPABILITIES (CSR_CAPABILITY_UNLIMITED | CSR_CAPABILITY_CONFIG | CSR_CAPABILITY_APPLE_INTERNAL) diff --git a/bsd/sys/decmpfs.h b/bsd/sys/decmpfs.h index 1f57e93bf..e8f6f3a27 100644 --- a/bsd/sys/decmpfs.h +++ b/bsd/sys/decmpfs.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SYS_DECMPFS_H_ @@ -41,10 +41,10 @@ * #define DECMPFS_ENABLE_KDEBUG_TRACES 1 */ #if DECMPFS_ENABLE_KDEBUG_TRACES -#define DECMPFS_EMIT_TRACE_ENTRY(D, ...)\ - KDBG_FILTERED((D) | DBG_FUNC_START, ## __VA_ARGS__) -#define DECMPFS_EMIT_TRACE_RETURN(D, ...)\ - KDBG_FILTERED((D) | DBG_FUNC_END, ##__VA_ARGS__) +#define DECMPFS_EMIT_TRACE_ENTRY(D, ...) \ + KDBG_FILTERED((D) | DBG_FUNC_START, ## __VA_ARGS__) +#define DECMPFS_EMIT_TRACE_RETURN(D, ...) \ + KDBG_FILTERED((D) | DBG_FUNC_END, ##__VA_ARGS__) #else #define DECMPFS_EMIT_TRACE_ENTRY(D, ...) do {} while (0) #define DECMPFS_EMIT_TRACE_RETURN(D, ...) do {} while (0) @@ -59,17 +59,17 @@ #define DECMPDBG_CODE(code) FSDBG_CODE(DBG_DECMP, code) enum { - DECMPDBG_DECOMPRESS_FILE = DECMPDBG_CODE(0), /* 0x03120000 */ - DECMPDBG_FETCH_COMPRESSED_HEADER = DECMPDBG_CODE(1), /* 0x03120004 */ - DECMPDBG_FETCH_UNCOMPRESSED_DATA = DECMPDBG_CODE(2), /* 0x03120008 */ - DECMPDBG_FREE_COMPRESSED_DATA = DECMPDBG_CODE(4), /* 0x03120010 */ - DECMPDBG_FILE_IS_COMPRESSED = DECMPDBG_CODE(5), /* 0x03120014 */ + DECMPDBG_DECOMPRESS_FILE = DECMPDBG_CODE(0),/* 0x03120000 */ + DECMPDBG_FETCH_COMPRESSED_HEADER = DECMPDBG_CODE(1),/* 0x03120004 */ + DECMPDBG_FETCH_UNCOMPRESSED_DATA = DECMPDBG_CODE(2),/* 0x03120008 */ + DECMPDBG_FREE_COMPRESSED_DATA = DECMPDBG_CODE(4),/* 0x03120010 */ + DECMPDBG_FILE_IS_COMPRESSED = DECMPDBG_CODE(5),/* 0x03120014 */ }; #define MAX_DECMPFS_XATTR_SIZE 3802 /* - NOTE: decmpfs can only be used by thread-safe filesystems + * NOTE: decmpfs can only be used by thread-safe filesystems */ #define DECMPFS_MAGIC 0x636d7066 /* cmpf */ @@ -77,34 +77,34 @@ enum { #define DECMPFS_XATTR_NAME "com.apple.decmpfs" /* extended attribute to use for decmpfs */ typedef struct __attribute__((packed)) { - /* this structure represents the xattr on disk; the fields below are little-endian */ - uint32_t compression_magic; - uint32_t compression_type; /* see the enum below */ - uint64_t uncompressed_size; - unsigned char attr_bytes[0]; /* the bytes of the attribute after the header */ + /* this structure represents the xattr on disk; the fields below are little-endian */ + uint32_t compression_magic; + uint32_t compression_type; /* see the enum below */ + uint64_t uncompressed_size; + unsigned char attr_bytes[0]; /* the bytes of the attribute after the header */ } decmpfs_disk_header; typedef struct __attribute__((packed)) { - /* this structure represents the xattr in memory; the fields below are host-endian */ - uint32_t attr_size; - uint32_t compression_magic; - uint32_t compression_type; - uint64_t uncompressed_size; - unsigned char attr_bytes[0]; /* the bytes of the attribute after the header */ + /* this structure represents the xattr in memory; the fields below are host-endian */ + uint32_t attr_size; + uint32_t compression_magic; + uint32_t compression_type; + uint64_t uncompressed_size; + unsigned char attr_bytes[0]; /* the bytes of the attribute after the header */ } decmpfs_header; /* compression_type values */ enum { - CMP_Type1 = 1, /* uncompressed data in xattr */ - - /* additional types defined in AppleFSCompression project */ - - CMP_MAX = 255 /* Highest compression_type supported */ + CMP_Type1 = 1,/* uncompressed data in xattr */ + + /* additional types defined in AppleFSCompression project */ + + CMP_MAX = 255/* Highest compression_type supported */ }; typedef struct { - void *buf; - user_ssize_t size; + void *buf; + user_ssize_t size; } decmpfs_vector; #ifdef KERNEL @@ -114,14 +114,14 @@ typedef struct { #include struct decmpfs_cnode { - uint8_t cmp_state; - uint8_t cmp_minimal_xattr; /* if non-zero, this file's com.apple.decmpfs xattr contained only the minimal decmpfs_disk_header */ - uint32_t cmp_type; - uint32_t lockcount; - void *lockowner; /* cnode's lock owner (if a thread is currently holding an exclusive lock) */ - uint64_t uncompressed_size __attribute__((aligned(8))); - uint64_t decompression_flags; - lck_rw_t compressed_data_lock; + uint8_t cmp_state; + uint8_t cmp_minimal_xattr; /* if non-zero, this file's com.apple.decmpfs xattr contained only the minimal decmpfs_disk_header */ + uint32_t cmp_type; + uint32_t lockcount; + void *lockowner; /* cnode's lock owner (if a thread is currently holding an exclusive lock) */ + uint64_t uncompressed_size __attribute__((aligned(8))); + uint64_t decompression_flags; + lck_rw_t compressed_data_lock; }; #endif // XNU_KERNEL_PRIVATE @@ -130,10 +130,10 @@ typedef struct decmpfs_cnode decmpfs_cnode; /* return values from decmpfs_file_is_compressed */ enum { - FILE_TYPE_UNKNOWN = 0, - FILE_IS_NOT_COMPRESSED = 1, - FILE_IS_COMPRESSED = 2, - FILE_IS_CONVERTING = 3 /* file is converting from compressed to decompressed */ + FILE_TYPE_UNKNOWN = 0, + FILE_IS_NOT_COMPRESSED = 1, + FILE_IS_COMPRESSED = 2, + FILE_IS_CONVERTING = 3/* file is converting from compressed to decompressed */ }; /* vfs entrypoints */ @@ -175,7 +175,7 @@ typedef int (*decmpfs_free_compressed_data_func)(vnode_t vp, vfs_context_t ctx, typedef uint64_t (*decmpfs_get_decompression_flags_func)(vnode_t vp, vfs_context_t ctx, decmpfs_header *hdr); // returns flags from the DECMPFS_FLAGS enumeration below enum { - DECMPFS_FLAGS_FORCE_FLUSH_ON_DECOMPRESS = 1 << 0, + DECMPFS_FLAGS_FORCE_FLUSH_ON_DECOMPRESS = 1 << 0, }; /* Versions that are supported for binary compatibility */ @@ -185,12 +185,12 @@ enum { #define DECMPFS_REGISTRATION_VERSION (DECMPFS_REGISTRATION_VERSION_V3) typedef struct { - int decmpfs_registration; - decmpfs_validate_compressed_file_func validate; - decmpfs_adjust_fetch_region_func adjust_fetch; - decmpfs_fetch_uncompressed_data_func fetch; - decmpfs_free_compressed_data_func free_data; - decmpfs_get_decompression_flags_func get_flags; + int decmpfs_registration; + decmpfs_validate_compressed_file_func validate; + decmpfs_adjust_fetch_region_func adjust_fetch; + decmpfs_fetch_uncompressed_data_func fetch; + decmpfs_free_compressed_data_func free_data; + decmpfs_get_decompression_flags_func get_flags; } decmpfs_registration; /* hooks for kexts to call */ diff --git a/bsd/sys/dir.h b/bsd/sys/dir.h index e6410375b..d499dca23 100644 --- a/bsd/sys/dir.h +++ b/bsd/sys/dir.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -67,7 +67,7 @@ */ #ifndef _SYS_DIR_H_ -#define _SYS_DIR_H_ +#define _SYS_DIR_H_ #ifdef KERNEL #include diff --git a/bsd/sys/dirent.h b/bsd/sys/dirent.h index 57df2ad9e..c6e1d8868 100644 --- a/bsd/sys/dirent.h +++ b/bsd/sys/dirent.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -72,8 +72,8 @@ * ino_t is in effect; (MAXPATHLEN - 1) when 64-bit ino_t is in effect. */ -#ifndef _SYS_DIRENT_H -#define _SYS_DIRENT_H +#ifndef _SYS_DIRENT_H +#define _SYS_DIRENT_H #include #include @@ -81,23 +81,23 @@ #include -#define __DARWIN_MAXNAMLEN 255 +#define __DARWIN_MAXNAMLEN 255 #pragma pack(4) #if !__DARWIN_64_BIT_INO_T struct dirent { - ino_t d_ino; /* file number of entry */ - __uint16_t d_reclen; /* length of this record */ - __uint8_t d_type; /* file type, see below */ - __uint8_t d_namlen; /* length of string in d_name */ - char d_name[__DARWIN_MAXNAMLEN + 1]; /* name must be no longer than this */ + ino_t d_ino; /* file number of entry */ + __uint16_t d_reclen; /* length of this record */ + __uint8_t d_type; /* file type, see below */ + __uint8_t d_namlen; /* length of string in d_name */ + char d_name[__DARWIN_MAXNAMLEN + 1]; /* name must be no longer than this */ }; #endif /* !__DARWIN_64_BIT_INO_T */ #pragma pack() -#define __DARWIN_MAXPATHLEN 1024 +#define __DARWIN_MAXPATHLEN 1024 #define __DARWIN_STRUCT_DIRENTRY { \ __uint64_t d_ino; /* file number of entry */ \ @@ -119,26 +119,26 @@ struct direntry __DARWIN_STRUCT_DIRENTRY; #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define d_fileno d_ino /* backward compatibility */ -#define MAXNAMLEN __DARWIN_MAXNAMLEN +#define d_fileno d_ino /* backward compatibility */ +#define MAXNAMLEN __DARWIN_MAXNAMLEN /* * File types */ -#define DT_UNKNOWN 0 -#define DT_FIFO 1 -#define DT_CHR 2 -#define DT_DIR 4 -#define DT_BLK 6 -#define DT_REG 8 -#define DT_LNK 10 -#define DT_SOCK 12 -#define DT_WHT 14 +#define DT_UNKNOWN 0 +#define DT_FIFO 1 +#define DT_CHR 2 +#define DT_DIR 4 +#define DT_BLK 6 +#define DT_REG 8 +#define DT_LNK 10 +#define DT_SOCK 12 +#define DT_WHT 14 /* * Convert between stat structure types and directory types. */ -#define IFTODT(mode) (((mode) & 0170000) >> 12) -#define DTTOIF(dirtype) ((dirtype) << 12) +#define IFTODT(mode) (((mode) & 0170000) >> 12) +#define DTTOIF(dirtype) ((dirtype) << 12) #endif #endif /* _SYS_DIRENT_H */ diff --git a/bsd/sys/dis_tables.h b/bsd/sys/dis_tables.h index fcbc46b06..cb8c04883 100644 --- a/bsd/sys/dis_tables.h +++ b/bsd/sys/dis_tables.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * Copyright (c) 1988 Carnegie-Mellon University @@ -36,7 +36,7 @@ * HISTORY */ -#ifndef _SYS_DIS_TABLES_H_ +#ifndef _SYS_DIS_TABLES_H_ #define _SYS_DIS_TABLES_H_ /* @@ -45,4 +45,4 @@ #include -#endif /* _SYS_DIS_TABLES_H_ */ +#endif /* _SYS_DIS_TABLES_H_ */ diff --git a/bsd/sys/disk.h b/bsd/sys/disk.h index f1c4c821e..f0a7a15da 100644 --- a/bsd/sys/disk.h +++ b/bsd/sys/disk.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SYS_DISK_H_ -#define _SYS_DISK_H_ +#ifndef _SYS_DISK_H_ +#define _SYS_DISK_H_ #include #include @@ -88,62 +88,55 @@ #define DK_SYNCHRONIZE_OPTION_BARRIER 0x00000002 -typedef struct -{ - uint64_t offset; - uint64_t length; +typedef struct{ + uint64_t offset; + uint64_t length; } dk_extent_t; -typedef struct -{ - char path[128]; +typedef struct{ + char path[128]; } dk_firmware_path_t; -typedef struct -{ - uint64_t blockCount; - uint32_t blockSize; +typedef struct{ + uint64_t blockCount; + uint32_t blockSize; - uint8_t reserved0096[4]; /* reserved, clear to zero */ + uint8_t reserved0096[4]; /* reserved, clear to zero */ } dk_format_capacity_t; -typedef struct -{ - dk_format_capacity_t * capacities; - uint32_t capacitiesCount; /* use zero to probe count */ +typedef struct{ + dk_format_capacity_t * capacities; + uint32_t capacitiesCount; /* use zero to probe count */ #ifdef __LP64__ - uint8_t reserved0096[4]; /* reserved, clear to zero */ + uint8_t reserved0096[4]; /* reserved, clear to zero */ #else /* !__LP64__ */ - uint8_t reserved0064[8]; /* reserved, clear to zero */ + uint8_t reserved0064[8]; /* reserved, clear to zero */ #endif /* !__LP64__ */ } dk_format_capacities_t; -typedef struct -{ - uint64_t offset; - uint64_t length; +typedef struct{ + uint64_t offset; + uint64_t length; - uint32_t options; + uint32_t options; - uint8_t reserved0160[4]; /* reserved, clear to zero */ + uint8_t reserved0160[4]; /* reserved, clear to zero */ } dk_synchronize_t; -typedef struct -{ - dk_extent_t * extents; - uint32_t extentsCount; +typedef struct{ + dk_extent_t * extents; + uint32_t extentsCount; - uint32_t options; + uint32_t options; #ifndef __LP64__ - uint8_t reserved0096[4]; /* reserved, clear to zero */ + uint8_t reserved0096[4]; /* reserved, clear to zero */ #endif /* !__LP64__ */ } dk_unmap_t; -typedef struct -{ +typedef struct{ uint64_t flags; uint64_t hotfile_size; /* in bytes */ uint64_t hibernate_minsize; @@ -160,26 +153,23 @@ typedef struct #define DK_PROVISION_TYPE_DEALLOCATED 0x01 #define DK_PROVISION_TYPE_ANCHORED 0x02 -typedef struct -{ +typedef struct{ uint64_t offset; uint64_t length; uint8_t provisionType; uint8_t reserved[7]; } dk_provision_extent_t; -typedef struct -{ +typedef struct{ uint64_t offset; /* input: logical byte offset */ uint64_t length; /* input: byte length, 0 for whole length */ uint64_t options; /* reserved, clear to zero */ uint32_t reserved; /* not used */ uint32_t extentsCount; /* input/output: count for extents */ - dk_provision_extent_t * extents; /* output: provision extents */ + dk_provision_extent_t * extents; /* output: provision extents */ } dk_provision_status_t; -typedef struct -{ +typedef struct{ uint64_t options; /* reserved, clear to zero */ uint64_t reserved; /* reserved, clear to zero */ uint64_t description_size; @@ -249,27 +239,25 @@ typedef struct #define DK_TIER_TO_PRIORITY(tier) (((tier) << DK_TIER_SHIFT) | ~DK_TIER_MASK) #define DK_PRIORITY_TO_TIER(priority) ((priority) >> DK_TIER_SHIFT) -typedef struct -{ - uint64_t offset; - uint64_t length; +typedef struct{ + uint64_t offset; + uint64_t length; - uint8_t reserved0128[12]; /* reserved, clear to zero */ + uint8_t reserved0128[12]; /* reserved, clear to zero */ - dev_t dev; + dev_t dev; } dk_physical_extent_t; -typedef struct -{ - dk_extent_t * extents; - uint32_t extentsCount; +typedef struct{ + dk_extent_t * extents; + uint32_t extentsCount; - uint8_t tier; + uint8_t tier; #ifdef __LP64__ - uint8_t reserved0104[3]; /* reserved, clear to zero */ + uint8_t reserved0104[3]; /* reserved, clear to zero */ #else /* !__LP64__ */ - uint8_t reserved0072[7]; /* reserved, clear to zero */ + uint8_t reserved0072[7]; /* reserved, clear to zero */ #endif /* !__LP64__ */ } dk_set_tier_t; @@ -288,12 +276,11 @@ typedef struct #define DKIOCGETIOMINSATURATIONBYTECOUNT _IOR('d', 88, uint32_t) #ifdef XNU_KERNEL_PRIVATE -typedef struct -{ - boolean_t mi_mdev; /* Is this a memdev device? */ - boolean_t mi_phys; /* Physical memory? */ - uint32_t mi_base; /* Base page number of the device? */ - uint64_t mi_size; /* Size of the device (in ) */ +typedef struct{ + boolean_t mi_mdev; /* Is this a memdev device? */ + boolean_t mi_phys; /* Physical memory? */ + uint32_t mi_base; /* Base page number of the device? */ + uint64_t mi_size; /* Size of the device (in ) */ } dk_memdev_info_t; typedef dk_memdev_info_t memdev_info_t; @@ -302,17 +289,17 @@ typedef dk_memdev_info_t memdev_info_t; #endif /* XNU_KERNEL_PRIVATE */ #ifdef PRIVATE typedef struct _dk_cs_pin { - dk_extent_t cp_extent; - int64_t cp_flags; + dk_extent_t cp_extent; + int64_t cp_flags; } _dk_cs_pin_t; /* The following are modifiers to _DKIOCCSPINEXTENT/cp_flags operation */ -#define _DKIOCCSPINTOFASTMEDIA (0) /* Pin extent to the fast (SSD) media */ -#define _DKIOCCSPINFORHIBERNATION (1 << 0) /* Pin of hibernation file, content not preserved */ -#define _DKIOCCSPINDISCARDBLACKLIST (1 << 1) /* Hibernation complete/error, stop blacklisting */ -#define _DKIOCCSPINTOSLOWMEDIA (1 << 2) /* Pin extent to the slow (HDD) media */ -#define _DKIOCCSTEMPORARYPIN (1 << 3) /* Relocate, but do not pin, to indicated media */ -#define _DKIOCCSHIBERNATEIMGSIZE (1 << 4) /* Anticipate/Max size of the upcoming hibernate */ -#define _DKIOCCSPINFORSWAPFILE (1 << 5) /* Pin of swap file, content not preserved */ +#define _DKIOCCSPINTOFASTMEDIA (0) /* Pin extent to the fast (SSD) media */ +#define _DKIOCCSPINFORHIBERNATION (1 << 0) /* Pin of hibernation file, content not preserved */ +#define _DKIOCCSPINDISCARDBLACKLIST (1 << 1) /* Hibernation complete/error, stop blacklisting */ +#define _DKIOCCSPINTOSLOWMEDIA (1 << 2) /* Pin extent to the slow (HDD) media */ +#define _DKIOCCSTEMPORARYPIN (1 << 3) /* Relocate, but do not pin, to indicated media */ +#define _DKIOCCSHIBERNATEIMGSIZE (1 << 4) /* Anticipate/Max size of the upcoming hibernate */ +#define _DKIOCCSPINFORSWAPFILE (1 << 5) /* Pin of swap file, content not preserved */ #define _DKIOCCSSETLVNAME _IOW('d', 198, char[256]) #define _DKIOCCSPINEXTENT _IOW('d', 199, _dk_cs_pin_t) @@ -320,26 +307,26 @@ typedef struct _dk_cs_pin { #define _DKIOCGETMIGRATIONUNITBYTESIZE _IOR('d', 201, uint32_t) typedef struct _dk_cs_map { - dk_extent_t cm_extent; - uint64_t cm_bytes_mapped; + dk_extent_t cm_extent; + uint64_t cm_bytes_mapped; } _dk_cs_map_t; typedef struct _dk_cs_unmap { - dk_extent_t *extents; + dk_extent_t *extents; uint32_t extentsCount; uint32_t options; } _dk_cs_unmap_t; #define _DKIOCCSMAP _IOWR('d', 202, _dk_cs_map_t) // No longer used: _DKIOCCSSETFSVNODE (203) & _DKIOCCSGETFREEBYTES (204) -#define _DKIOCCSUNMAP _IOWR('d', 205, _dk_cs_unmap_t) +#define _DKIOCCSUNMAP _IOWR('d', 205, _dk_cs_unmap_t) typedef enum { DK_APFS_ONE_DEVICE = 1, DK_APFS_FUSION } dk_apfs_flavour_t; -#define DKIOCGETAPFSFLAVOUR _IOR('d', 91, dk_apfs_flavour_t) +#define DKIOCGETAPFSFLAVOUR _IOR('d', 91, dk_apfs_flavour_t) // Extent's offset and length returned in bytes typedef struct dk_apfs_wbc_range { @@ -362,4 +349,4 @@ typedef struct dk_apfs_wbc_range { #endif /* TARGET_OS_EMBEDDED */ #endif /* PRIVATE */ -#endif /* _SYS_DISK_H_ */ +#endif /* _SYS_DISK_H_ */ diff --git a/bsd/sys/disklabel.h b/bsd/sys/disklabel.h index 61c14411d..de27770be 100644 --- a/bsd/sys/disklabel.h +++ b/bsd/sys/disklabel.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -64,15 +64,15 @@ #define _SYS_DISKLABEL_H_ #include -#include /* for daddr_t */ +#include /* for daddr_t */ #ifdef __APPLE_API_OBSOLETE /* * Disk description table, see disktab(5) */ -#define _PATH_DISKTAB "/etc/disktab" -#define DISKTAB "/etc/disktab" /* deprecated */ +#define _PATH_DISKTAB "/etc/disktab" +#define DISKTAB "/etc/disktab" /* deprecated */ /* * Each disk has a label which includes information about the hardware @@ -88,7 +88,7 @@ * This is the maximum value of MAXPARTITIONS for which 'struct disklabel' * is <= DEV_BSIZE bytes long. If MAXPARTITIONS is greater than this, beware. */ -#define MAXMAXPARTITIONS 22 +#define MAXMAXPARTITIONS 22 #if MAXPARTITIONS > MAXMAXPARTITIONS #warning beware: MAXPARTITIONS bigger than MAXMAXPARTITIONS #endif @@ -96,21 +96,21 @@ /* * Translate between device numbers and major/disk unit/disk partition. */ -#define DISKUNIT(dev) (minor(dev) / MAXPARTITIONS) -#define DISKPART(dev) (minor(dev) % MAXPARTITIONS) -#define MAKEDISKDEV(maj, unit, part) \ +#define DISKUNIT(dev) (minor(dev) / MAXPARTITIONS) +#define DISKPART(dev) (minor(dev) % MAXPARTITIONS) +#define MAKEDISKDEV(maj, unit, part) \ (makedev((maj), ((unit) * MAXPARTITIONS) + (part))) -#define DISKMAGIC ((u_int32_t)0x82564557) /* The disk magic number */ +#define DISKMAGIC ((u_int32_t)0x82564557) /* The disk magic number */ #ifndef LOCORE struct disklabel { - u_int32_t d_magic; /* the magic number */ - u_int16_t d_type; /* drive type */ - u_int16_t d_subtype; /* controller/d_type specific */ - char d_typename[16]; /* type name, e.g. "eagle" */ + u_int32_t d_magic; /* the magic number */ + u_int16_t d_type; /* drive type */ + u_int16_t d_subtype; /* controller/d_type specific */ + char d_typename[16]; /* type name, e.g. "eagle" */ - /* + /* * d_packname contains the pack identifier and is returned when * the disklabel is read off the disk or in-core copy. * d_boot0 and d_boot1 are the (optional) names of the @@ -119,23 +119,23 @@ struct disklabel { * getdiskbyname(3) to retrieve the values from /etc/disktab. */ union { - char un_d_packname[16]; /* pack identifier */ + char un_d_packname[16]; /* pack identifier */ struct { - char *un_d_boot0; /* primary bootstrap name */ - char *un_d_boot1; /* secondary bootstrap name */ - } un_b; - } d_un; -#define d_packname d_un.un_d_packname -#define d_boot0 d_un.un_b.un_d_boot0 -#define d_boot1 d_un.un_b.un_d_boot1 - - /* disk geometry: */ - u_int32_t d_secsize; /* # of bytes per sector */ - u_int32_t d_nsectors; /* # of data sectors per track */ - u_int32_t d_ntracks; /* # of tracks per cylinder */ - u_int32_t d_ncylinders; /* # of data cylinders per unit */ - u_int32_t d_secpercyl; /* # of data sectors per cylinder */ - u_int32_t d_secperunit; /* # of data sectors per unit */ + char *un_d_boot0; /* primary bootstrap name */ + char *un_d_boot1; /* secondary bootstrap name */ + } un_b; + } d_un; +#define d_packname d_un.un_d_packname +#define d_boot0 d_un.un_b.un_d_boot0 +#define d_boot1 d_un.un_b.un_d_boot1 + + /* disk geometry: */ + u_int32_t d_secsize; /* # of bytes per sector */ + u_int32_t d_nsectors; /* # of data sectors per track */ + u_int32_t d_ntracks; /* # of tracks per cylinder */ + u_int32_t d_ncylinders; /* # of data cylinders per unit */ + u_int32_t d_secpercyl; /* # of data sectors per cylinder */ + u_int32_t d_secperunit; /* # of data sectors per unit */ /* * Spares (bad sector replacements) below are not counted in @@ -143,15 +143,15 @@ struct disklabel { * be physical sectors which occupy space at the end of each * track and/or cylinder. */ - u_int16_t d_sparespertrack; /* # of spare sectors per track */ - u_int16_t d_sparespercyl; /* # of spare sectors per cylinder */ + u_int16_t d_sparespertrack; /* # of spare sectors per track */ + u_int16_t d_sparespercyl; /* # of spare sectors per cylinder */ /* * Alternate cylinders include maintenance, replacement, configuration * description areas, etc. */ - u_int32_t d_acylinders; /* # of alt. cylinders per unit */ + u_int32_t d_acylinders; /* # of alt. cylinders per unit */ - /* hardware characteristics: */ + /* hardware characteristics: */ /* * d_interleave, d_trackskew and d_cylskew describe perturbations * in the media format used to compensate for a slow controller. @@ -168,61 +168,61 @@ struct disklabel { * is the offset of sector 0 on cylinder N relative to sector 0 * on cylinder N-1. */ - u_int16_t d_rpm; /* rotational speed */ - u_int16_t d_interleave; /* hardware sector interleave */ - u_int16_t d_trackskew; /* sector 0 skew, per track */ - u_int16_t d_cylskew; /* sector 0 skew, per cylinder */ - u_int32_t d_headswitch; /* head switch time, usec */ - u_int32_t d_trkseek; /* track-to-track seek, usec */ - u_int32_t d_flags; /* generic flags */ + u_int16_t d_rpm; /* rotational speed */ + u_int16_t d_interleave; /* hardware sector interleave */ + u_int16_t d_trackskew; /* sector 0 skew, per track */ + u_int16_t d_cylskew; /* sector 0 skew, per cylinder */ + u_int32_t d_headswitch; /* head switch time, usec */ + u_int32_t d_trkseek; /* track-to-track seek, usec */ + u_int32_t d_flags; /* generic flags */ #define NDDATA 5 - u_int32_t d_drivedata[NDDATA]; /* drive-type specific information */ + u_int32_t d_drivedata[NDDATA]; /* drive-type specific information */ #define NSPARE 5 - u_int32_t d_spare[NSPARE]; /* reserved for future use */ - u_int32_t d_magic2; /* the magic number (again) */ - u_int16_t d_checksum; /* xor of data incl. partitions */ - - /* filesystem and partition information: */ - u_int16_t d_npartitions; /* number of partitions in following */ - u_int32_t d_bbsize; /* size of boot area at sn0, bytes */ - u_int32_t d_sbsize; /* max size of fs superblock, bytes */ - struct partition { /* the partition table */ - u_int32_t p_size; /* number of sectors in partition */ - u_int32_t p_offset; /* starting sector */ - u_int32_t p_fsize; /* filesystem basic fragment size */ - u_int8_t p_fstype; /* filesystem type, see below */ - u_int8_t p_frag; /* filesystem fragments per block */ + u_int32_t d_spare[NSPARE]; /* reserved for future use */ + u_int32_t d_magic2; /* the magic number (again) */ + u_int16_t d_checksum; /* xor of data incl. partitions */ + + /* filesystem and partition information: */ + u_int16_t d_npartitions; /* number of partitions in following */ + u_int32_t d_bbsize; /* size of boot area at sn0, bytes */ + u_int32_t d_sbsize; /* max size of fs superblock, bytes */ + struct partition { /* the partition table */ + u_int32_t p_size; /* number of sectors in partition */ + u_int32_t p_offset; /* starting sector */ + u_int32_t p_fsize; /* filesystem basic fragment size */ + u_int8_t p_fstype; /* filesystem type, see below */ + u_int8_t p_frag; /* filesystem fragments per block */ union { - u_int16_t cpg; /* UFS: FS cylinders per group */ - u_int16_t sgs; /* LFS: FS segment shift */ + u_int16_t cpg; /* UFS: FS cylinders per group */ + u_int16_t sgs; /* LFS: FS segment shift */ } __partition_u1; -#define p_cpg __partition_u1.cpg -#define p_sgs __partition_u1.sgs - } d_partitions[MAXPARTITIONS]; /* actually may be more */ +#define p_cpg __partition_u1.cpg +#define p_sgs __partition_u1.sgs + } d_partitions[MAXPARTITIONS]; /* actually may be more */ }; #else /* LOCORE */ - /* - * offsets for asm boot files. - */ - .set d_secsize,40 - .set d_nsectors,44 - .set d_ntracks,48 - .set d_ncylinders,52 - .set d_secpercyl,56 - .set d_secperunit,60 - .set d_end_,276 /* size of disk label */ +/* + * offsets for asm boot files. + */ +.set d_secsize, 40 +.set d_nsectors, 44 +.set d_ntracks, 48 +.set d_ncylinders, 52 +.set d_secpercyl, 56 +.set d_secperunit, 60 +.set d_end_, 276 /* size of disk label */ #endif /* LOCORE */ /* d_type values: */ -#define DTYPE_SMD 1 /* SMD, XSMD; VAX hp/up */ -#define DTYPE_MSCP 2 /* MSCP */ -#define DTYPE_DEC 3 /* other DEC (rk, rl) */ -#define DTYPE_SCSI 4 /* SCSI */ -#define DTYPE_ESDI 5 /* ESDI interface */ -#define DTYPE_ST506 6 /* ST506 etc. */ -#define DTYPE_HPIB 7 /* CS/80 on HP-IB */ -#define DTYPE_HPFL 8 /* HP Fiber-link */ -#define DTYPE_FLOPPY 10 /* floppy */ +#define DTYPE_SMD 1 /* SMD, XSMD; VAX hp/up */ +#define DTYPE_MSCP 2 /* MSCP */ +#define DTYPE_DEC 3 /* other DEC (rk, rl) */ +#define DTYPE_SCSI 4 /* SCSI */ +#define DTYPE_ESDI 5 /* ESDI interface */ +#define DTYPE_ST506 6 /* ST506 etc. */ +#define DTYPE_HPIB 7 /* CS/80 on HP-IB */ +#define DTYPE_HPFL 8 /* HP Fiber-link */ +#define DTYPE_FLOPPY 10 /* floppy */ #ifdef DKTYPENAMES static const char *dktypenames[] = { @@ -239,7 +239,7 @@ static const char *dktypenames[] = { "floppy", NULL }; -#define DKMAXTYPES (sizeof(dktypenames) / sizeof(dktypenames[0]) - 1) +#define DKMAXTYPES (sizeof(dktypenames) / sizeof(dktypenames[0]) - 1) #endif /* @@ -247,24 +247,24 @@ static const char *dktypenames[] = { * Used to interpret other filesystem-specific * per-partition information. */ -#define FS_UNUSED 0 /* unused */ -#define FS_SWAP 1 /* swap */ -#define FS_V6 2 /* Sixth Edition */ -#define FS_V7 3 /* Seventh Edition */ -#define FS_SYSV 4 /* System V */ -#define FS_V71K 5 /* V7 with 1K blocks (4.1, 2.9) */ -#define FS_V8 6 /* Eighth Edition, 4K blocks */ -#define FS_BSDFFS 7 /* 4.2BSD fast file system */ -#define FS_MSDOS 8 /* MSDOS file system */ -#define FS_BSDLFS 9 /* 4.4BSD log-structured file system */ -#define FS_OTHER 10 /* in use, but unknown/unsupported */ -#define FS_HPFS 11 /* OS/2 high-performance file system */ -#define FS_ISO9660 12 /* ISO 9660, normally CD-ROM */ -#define FS_BOOT 13 /* partition contains bootstrap */ -#define FS_ADOS 14 /* AmigaDOS fast file system */ -#define FS_HFS 15 /* Macintosh HFS */ - -#ifdef DKTYPENAMES +#define FS_UNUSED 0 /* unused */ +#define FS_SWAP 1 /* swap */ +#define FS_V6 2 /* Sixth Edition */ +#define FS_V7 3 /* Seventh Edition */ +#define FS_SYSV 4 /* System V */ +#define FS_V71K 5 /* V7 with 1K blocks (4.1, 2.9) */ +#define FS_V8 6 /* Eighth Edition, 4K blocks */ +#define FS_BSDFFS 7 /* 4.2BSD fast file system */ +#define FS_MSDOS 8 /* MSDOS file system */ +#define FS_BSDLFS 9 /* 4.4BSD log-structured file system */ +#define FS_OTHER 10 /* in use, but unknown/unsupported */ +#define FS_HPFS 11 /* OS/2 high-performance file system */ +#define FS_ISO9660 12 /* ISO 9660, normally CD-ROM */ +#define FS_BOOT 13 /* partition contains bootstrap */ +#define FS_ADOS 14 /* AmigaDOS fast file system */ +#define FS_HFS 15 /* Macintosh HFS */ + +#ifdef DKTYPENAMES static const char *fstypenames[] = { "unused", "swap", @@ -284,37 +284,37 @@ static const char *fstypenames[] = { "HFS", NULL }; -#define FSMAXTYPES (sizeof(fstypenames) / sizeof(fstypenames[0]) - 1) +#define FSMAXTYPES (sizeof(fstypenames) / sizeof(fstypenames[0]) - 1) #endif /* * flags shared by various drives: */ -#define D_REMOVABLE 0x01 /* removable media */ -#define D_ECC 0x02 /* supports ECC */ -#define D_BADSECT 0x04 /* supports bad sector forw. */ -#define D_RAMDISK 0x08 /* disk emulator */ -#define D_CHAIN 0x10 /* can do back-back transfers */ +#define D_REMOVABLE 0x01 /* removable media */ +#define D_ECC 0x02 /* supports ECC */ +#define D_BADSECT 0x04 /* supports bad sector forw. */ +#define D_RAMDISK 0x08 /* disk emulator */ +#define D_CHAIN 0x10 /* can do back-back transfers */ /* * Drive data for SMD. */ -#define d_smdflags d_drivedata[0] -#define D_SSE 0x1 /* supports skip sectoring */ -#define d_mindist d_drivedata[1] -#define d_maxdist d_drivedata[2] -#define d_sdist d_drivedata[3] +#define d_smdflags d_drivedata[0] +#define D_SSE 0x1 /* supports skip sectoring */ +#define d_mindist d_drivedata[1] +#define d_maxdist d_drivedata[2] +#define d_sdist d_drivedata[3] /* * Drive data for ST506. */ -#define d_precompcyl d_drivedata[0] -#define d_gap3 d_drivedata[1] /* used only when formatting */ +#define d_precompcyl d_drivedata[0] +#define d_gap3 d_drivedata[1] /* used only when formatting */ /* * Drive data for SCSI. */ -#define d_blind d_drivedata[0] +#define d_blind d_drivedata[0] #ifndef LOCORE /* @@ -323,10 +323,10 @@ static const char *fstypenames[] = { * are device- and driver-dependent. */ struct format_op { - char *df_buf; - int df_count; /* value-result */ - daddr_t df_startblk; - int df_reg[8]; /* result */ + char *df_buf; + int df_count; /* value-result */ + daddr_t df_startblk; + int df_reg[8]; /* result */ }; /* @@ -341,21 +341,21 @@ struct partinfo { /* * Disk-specific ioctls. */ - /* get and set disklabel; DIOCGPART used internally */ -#define DIOCGDINFO _IOR('d', 101, struct disklabel)/* get */ -#define DIOCSDINFO _IOW('d', 102, struct disklabel)/* set */ -#define DIOCWDINFO _IOW('d', 103, struct disklabel)/* set, update disk */ -#define DIOCGPART _IOW('d', 104, struct partinfo) /* get partition */ +/* get and set disklabel; DIOCGPART used internally */ +#define DIOCGDINFO _IOR('d', 101, struct disklabel)/* get */ +#define DIOCSDINFO _IOW('d', 102, struct disklabel)/* set */ +#define DIOCWDINFO _IOW('d', 103, struct disklabel)/* set, update disk */ +#define DIOCGPART _IOW('d', 104, struct partinfo) /* get partition */ /* do format operation, read or write */ -#define DIOCRFORMAT _IOWR('d', 105, struct format_op) -#define DIOCWFORMAT _IOWR('d', 106, struct format_op) +#define DIOCRFORMAT _IOWR('d', 105, struct format_op) +#define DIOCWFORMAT _IOWR('d', 106, struct format_op) -#define DIOCSSTEP _IOW('d', 107, int) /* set step rate */ -#define DIOCSRETRIES _IOW('d', 108, int) /* set # of retries */ -#define DIOCWLABEL _IOW('d', 109, int) /* write en/disable label */ +#define DIOCSSTEP _IOW('d', 107, int) /* set step rate */ +#define DIOCSRETRIES _IOW('d', 108, int) /* set # of retries */ +#define DIOCWLABEL _IOW('d', 109, int) /* write en/disable label */ -#define DIOCSBAD _IOW('d', 110, struct dkbad) /* set kernel dkbad */ +#define DIOCSBAD _IOW('d', 110, struct dkbad) /* set kernel dkbad */ #endif /* LOCORE */ @@ -372,4 +372,3 @@ __END_DECLS #endif /* __APPLE_API_OBSOLETE */ #endif /* ! _SYS_DISKLABEL_H_ */ - diff --git a/bsd/sys/disktab.h b/bsd/sys/disktab.h index a7efed39a..c404a95df 100644 --- a/bsd/sys/disktab.h +++ b/bsd/sys/disktab.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -40,70 +40,70 @@ * /etc/disktab, sorry. */ -#ifndef _SYS_DISKTAB_ -#define _SYS_DISKTAB_ +#ifndef _SYS_DISKTAB_ +#define _SYS_DISKTAB_ #include -#ifdef __APPLE_API_OBSOLETE +#ifdef __APPLE_API_OBSOLETE /* * Disk description table, see disktab(5) */ #ifndef KERNEL -#define DISKTAB "/etc/disktab" -#endif /* !KERNEL */ +#define DISKTAB "/etc/disktab" +#endif /* !KERNEL */ -#define MAXDNMLEN 24 // drive name length -#define MAXMPTLEN 16 // mount point length -#define MAXFSTLEN 8 // file system type length -#define MAXTYPLEN 24 // drive type length -#define NBOOTS 2 // # of boot blocks -#define MAXBFLEN 24 // bootfile name length -#define MAXHNLEN 32 // host name length -#define NPART 8 // # of partitions +#define MAXDNMLEN 24 // drive name length +#define MAXMPTLEN 16 // mount point length +#define MAXFSTLEN 8 // file system type length +#define MAXTYPLEN 24 // drive type length +#define NBOOTS 2 // # of boot blocks +#define MAXBFLEN 24 // bootfile name length +#define MAXHNLEN 32 // host name length +#define NPART 8 // # of partitions typedef struct partition { - int p_base; /* base sector# of partition */ - int p_size; /* #sectors in partition */ - short p_bsize; /* block size in bytes */ - short p_fsize; /* frag size in bytes */ - char p_opt; /* 's'pace/'t'ime optimization pref */ - short p_cpg; /* cylinders per group */ - short p_density; /* bytes per inode density */ - char p_minfree; /* minfree (%) */ - char p_newfs; /* run newfs during init */ - char p_mountpt[MAXMPTLEN];/* mount point */ - char p_automnt; /* auto-mount when inserted */ - char p_type[MAXFSTLEN];/* file system type */ + int p_base; /* base sector# of partition */ + int p_size; /* #sectors in partition */ + short p_bsize; /* block size in bytes */ + short p_fsize; /* frag size in bytes */ + char p_opt; /* 's'pace/'t'ime optimization pref */ + short p_cpg; /* cylinders per group */ + short p_density; /* bytes per inode density */ + char p_minfree; /* minfree (%) */ + char p_newfs; /* run newfs during init */ + char p_mountpt[MAXMPTLEN];/* mount point */ + char p_automnt; /* auto-mount when inserted */ + char p_type[MAXFSTLEN];/* file system type */ } partition_t; typedef struct disktab { - char d_name[MAXDNMLEN]; /* drive name */ - char d_type[MAXTYPLEN]; /* drive type */ - int d_secsize; /* sector size in bytes */ - int d_ntracks; /* # tracks/cylinder */ - int d_nsectors; /* # sectors/track */ - int d_ncylinders; /* # cylinders */ - int d_rpm; /* revolutions/minute */ - short d_front; /* size of front porch (sectors) */ - short d_back; /* size of back porch (sectors) */ - short d_ngroups; /* number of alt groups */ - short d_ag_size; /* alt group size (sectors) */ - short d_ag_alts; /* alternate sectors / alt group */ - short d_ag_off; /* sector offset to first alternate */ - int d_boot0_blkno[NBOOTS]; /* "blk 0" boot locations */ - char d_bootfile[MAXBFLEN]; /* default bootfile */ - char d_hostname[MAXHNLEN]; /* host name */ - char d_rootpartition; /* root partition e.g. 'a' */ - char d_rwpartition; /* r/w partition e.g. 'b' */ + char d_name[MAXDNMLEN]; /* drive name */ + char d_type[MAXTYPLEN]; /* drive type */ + int d_secsize; /* sector size in bytes */ + int d_ntracks; /* # tracks/cylinder */ + int d_nsectors; /* # sectors/track */ + int d_ncylinders; /* # cylinders */ + int d_rpm; /* revolutions/minute */ + short d_front; /* size of front porch (sectors) */ + short d_back; /* size of back porch (sectors) */ + short d_ngroups; /* number of alt groups */ + short d_ag_size; /* alt group size (sectors) */ + short d_ag_alts; /* alternate sectors / alt group */ + short d_ag_off; /* sector offset to first alternate */ + int d_boot0_blkno[NBOOTS]; /* "blk 0" boot locations */ + char d_bootfile[MAXBFLEN]; /* default bootfile */ + char d_hostname[MAXHNLEN]; /* host name */ + char d_rootpartition; /* root partition e.g. 'a' */ + char d_rwpartition; /* r/w partition e.g. 'b' */ partition_t d_partitions[NPART]; } disktab_t; #ifndef KERNEL -struct disktab *getdiskbyname(), *getdiskbydev(); -#endif /* !KERNEL */ +struct disktab *getdiskbyname(), *getdiskbydev(); +#endif /* !KERNEL */ -#endif /* __APPLE_API_OBSOLETE */ +#endif /* __APPLE_API_OBSOLETE */ -#endif /* _SYS_DISKTAB_ */ +#endif /* _SYS_DISKTAB_ */ diff --git a/bsd/sys/dkstat.h b/bsd/sys/dkstat.h index 4fe84af4c..179611e44 100644 --- a/bsd/sys/dkstat.h +++ b/bsd/sys/dkstat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,8 +66,8 @@ * @(#)dkstat.h 8.2 (Berkeley) 1/21/94 */ -#ifndef _SYS_DKSTAT_H_ -#define _SYS_DKSTAT_H_ +#ifndef _SYS_DKSTAT_H_ +#define _SYS_DKSTAT_H_ #ifdef KERNEL_PRIVATE extern long tk_cancc; @@ -76,4 +76,4 @@ extern long tk_nout; extern long tk_rawcc; #endif -#endif /* _SYS_DKSTAT_H_ */ +#endif /* _SYS_DKSTAT_H_ */ diff --git a/bsd/sys/dmap.h b/bsd/sys/dmap.h index a58593098..ff36ad478 100644 --- a/bsd/sys/dmap.h +++ b/bsd/sys/dmap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_DMAP_H_ -#define _SYS_DMAP_H_ +#define _SYS_DMAP_H_ #include @@ -71,12 +71,12 @@ * Definitions for the mapping of vitual swap space to the physical swap * area - the disk map. */ -#define NDMAP 38 /* size of the swap area map */ +#define NDMAP 38 /* size of the swap area map */ struct dmap { - swblk_t dm_size; /* current size used by process */ - swblk_t dm_alloc; /* amount of physical swap space allocated */ - swblk_t dm_map[NDMAP]; /* first disk block number in each chunk */ + swblk_t dm_size; /* current size used by process */ + swblk_t dm_alloc; /* amount of physical swap space allocated */ + swblk_t dm_map[NDMAP]; /* first disk block number in each chunk */ }; #ifdef KERNEL extern struct dmap zdmap; @@ -85,10 +85,10 @@ extern int dmmin, dmmax, dmtext; /* The following structure is that ``returned'' from a call to vstodb(). */ struct dblock { - swblk_t db_base; /* base of physical contig drum block */ - swblk_t db_size; /* size of block */ + swblk_t db_base; /* base of physical contig drum block */ + swblk_t db_size; /* size of block */ }; #endif /* __APPLE_API_OBSOLETE */ -#endif /* !_SYS_DMAP_H_ */ +#endif /* !_SYS_DMAP_H_ */ diff --git a/bsd/sys/doc_tombstone.h b/bsd/sys/doc_tombstone.h index 8dbe1be32..7e16df11b 100644 --- a/bsd/sys/doc_tombstone.h +++ b/bsd/sys/doc_tombstone.h @@ -45,13 +45,13 @@ * uthread structure. */ struct doc_tombstone { - struct vnode *t_lastop_parent; - struct vnode *t_lastop_item; - uint32_t t_lastop_parent_vid; - uint32_t t_lastop_item_vid; + struct vnode *t_lastop_parent; + struct vnode *t_lastop_item; + uint32_t t_lastop_parent_vid; + uint32_t t_lastop_item_vid; uint64_t t_lastop_fileid; uint64_t t_lastop_document_id; - unsigned char t_lastop_filename[NAME_MAX+1]; + unsigned char t_lastop_filename[NAME_MAX + 1]; }; struct componentname; @@ -59,11 +59,11 @@ struct componentname; struct doc_tombstone *doc_tombstone_get(void); void doc_tombstone_clear(struct doc_tombstone *ut, struct vnode **old_vpp); void doc_tombstone_save(struct vnode *dvp, struct vnode *vp, - struct componentname *cnp, uint64_t doc_id, - ino64_t file_id); + struct componentname *cnp, uint64_t doc_id, + ino64_t file_id); bool doc_tombstone_should_ignore_name(const char *nameptr, int len); bool doc_tombstone_should_save(struct doc_tombstone *ut, struct vnode *vp, - struct componentname *cnp); + struct componentname *cnp); #endif // defined(KERNEL_PRIVATE) diff --git a/bsd/sys/domain.h b/bsd/sys/domain.h index 6c4a1a0b6..10ada89ca 100644 --- a/bsd/sys/domain.h +++ b/bsd/sys/domain.h @@ -78,7 +78,7 @@ /* * Forward structure declarations for function prototypes [sic]. */ -struct mbuf; +struct mbuf; #pragma pack(4) @@ -96,27 +96,27 @@ struct domain_old { #else struct domain { #endif /* !XNU_KERNEL_PRIVATE */ - int dom_family; /* AF_xxx */ + int dom_family; /* AF_xxx */ const char *dom_name; - void (*dom_init)(void); /* initialize domain data structures */ - int (*dom_externalize) /* externalize access rights */ - (struct mbuf *); - void (*dom_dispose) /* dispose of internalized rights */ - (struct mbuf *); + void (*dom_init)(void); /* initialize domain data structures */ + int (*dom_externalize) /* externalize access rights */ + (struct mbuf *); + void (*dom_dispose) /* dispose of internalized rights */ + (struct mbuf *); #ifdef XNU_KERNEL_PRIVATE - struct protosw_old *dom_protosw; /* Chain of protosw's for AF */ - struct domain_old *dom_next; + struct protosw_old *dom_protosw; /* Chain of protosw's for AF */ + struct domain_old *dom_next; #else - struct protosw *dom_protosw; /* Chain of protosw's for AF */ - struct domain *dom_next; + struct protosw *dom_protosw; /* Chain of protosw's for AF */ + struct domain *dom_next; #endif /* !XNU_KERNEL_PRIVATE */ - int (*dom_rtattach) /* initialize routing table */ - (void **, int); - int dom_rtoffset; /* an arg to rtattach, in bits */ - int dom_maxrtkey; /* for routing layer */ - int dom_protohdrlen; /* len of protocol header */ - int dom_refs; /* # socreates outstanding */ - lck_mtx_t *dom_mtx; /* domain global mutex */ + int (*dom_rtattach) /* initialize routing table */ + (void **, int); + int dom_rtoffset; /* an arg to rtattach, in bits */ + int dom_maxrtkey; /* for routing layer */ + int dom_protohdrlen; /* len of protocol header */ + int dom_refs; /* # socreates outstanding */ + lck_mtx_t *dom_mtx; /* domain global mutex */ uint32_t dom_flags; uint32_t reserved[2]; }; @@ -131,26 +131,26 @@ struct domain { * Internal, private and extendable representation of domain. */ struct domain { - int dom_family; /* AF_xxx */ - uint32_t dom_flags; /* domain flags (see below ) */ - uint32_t dom_refs; /* # socreates outstanding */ - lck_mtx_t *dom_mtx; /* domain global mutex */ + int dom_family; /* AF_xxx */ + uint32_t dom_flags; /* domain flags (see below ) */ + uint32_t dom_refs; /* # socreates outstanding */ + lck_mtx_t *dom_mtx; /* domain global mutex */ decl_lck_mtx_data(, dom_mtx_s); - TAILQ_ENTRY(domain) dom_entry; /* next domain in list */ + TAILQ_ENTRY(domain) dom_entry; /* next domain in list */ TAILQ_HEAD(, protosw) dom_protosw; /* protosw chain */ - void (*dom_init) /* initialize domain data structures */ - (struct domain *); - int (*dom_externalize) /* externalize access rights */ - (struct mbuf *); - void (*dom_dispose) /* dispose of internalized rights */ - (struct mbuf *); - int (*dom_rtattach) /* initialize routing table */ - (void **, int); - int dom_rtoffset; /* an arg to rtattach, in bits */ - int dom_maxrtkey; /* for routing layer */ - int dom_protohdrlen; /* len of protocol header */ + void (*dom_init) /* initialize domain data structures */ + (struct domain *); + int (*dom_externalize) /* externalize access rights */ + (struct mbuf *); + void (*dom_dispose) /* dispose of internalized rights */ + (struct mbuf *); + int (*dom_rtattach) /* initialize routing table */ + (void **, int); + int dom_rtoffset; /* an arg to rtattach, in bits */ + int dom_maxrtkey; /* for routing layer */ + int dom_protohdrlen; /* len of protocol header */ const char *dom_name; - struct domain_old *dom_old; /* domain pointer per net_add_domain */ + struct domain_old *dom_old; /* domain pointer per net_add_domain */ }; extern TAILQ_HEAD(domains_head, domain) domains; @@ -160,14 +160,14 @@ extern struct domain *localdomain; /* * Values for dom_flags */ -#define DOM_REENTRANT 0x1 +#define DOM_REENTRANT 0x1 #ifdef BSD_KERNEL_PRIVATE -#define DOM_INITIALIZED 0x2 /* domain has been initialized */ -#define DOM_ATTACHED 0x4 /* domain is in the global list */ -#define DOM_OLD 0x10000000 /* domain added via net_add_domain */ +#define DOM_INITIALIZED 0x2 /* domain has been initialized */ +#define DOM_ATTACHED 0x4 /* domain is in the global list */ +#define DOM_OLD 0x10000000 /* domain added via net_add_domain */ /* pseudo-public domain flags */ -#define DOMF_USERFLAGS (DOM_REENTRANT) +#define DOMF_USERFLAGS (DOM_REENTRANT) #endif /* BSD_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -194,4 +194,4 @@ extern struct domain *pffinddomain(int); __END_DECLS #endif /* KERNEL_PRIVATE */ #endif /* PRIVATE */ -#endif /* _SYS_DOMAIN_H_ */ +#endif /* _SYS_DOMAIN_H_ */ diff --git a/bsd/sys/dtrace_glue.h b/bsd/sys/dtrace_glue.h index bf6a94092..51d9804c7 100644 --- a/bsd/sys/dtrace_glue.h +++ b/bsd/sys/dtrace_glue.h @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -50,8 +50,8 @@ /* * cmn_err */ -#define CE_NOTE 1 /* notice */ -#define CE_WARN 2 /* warning */ +#define CE_NOTE 1 /* notice */ +#define CE_WARN 2 /* warning */ extern void cmn_err( int, const char *, ... ); @@ -128,25 +128,25 @@ extern dtrace_cpu_t *cpu_list; * is up to the platform to assure that this is performed properly. Note that * the structure is sized to avoid false sharing. */ -#define CPU_CACHE_COHERENCE_SIZE 64 +#define CPU_CACHE_COHERENCE_SIZE 64 typedef struct cpu_core { - uint64_t cpuc_dtrace_illval; /* DTrace illegal value */ - lck_mtx_t cpuc_pid_lock; /* DTrace pid provider lock */ - uint16_t cpuc_dtrace_flags; /* DTrace flags */ - uint64_t cpuc_missing_tos; /* Addr. of top most stack frame if missing */ - uint8_t cpuc_pad[CPU_CACHE_COHERENCE_SIZE - sizeof(uint64_t) - sizeof(lck_mtx_t) - sizeof(uint16_t) - sizeof(uint64_t) ]; /* padding */ + uint64_t cpuc_dtrace_illval; /* DTrace illegal value */ + lck_mtx_t cpuc_pid_lock; /* DTrace pid provider lock */ + uint16_t cpuc_dtrace_flags; /* DTrace flags */ + uint64_t cpuc_missing_tos; /* Addr. of top most stack frame if missing */ + uint8_t cpuc_pad[CPU_CACHE_COHERENCE_SIZE - sizeof(uint64_t) - sizeof(lck_mtx_t) - sizeof(uint16_t) - sizeof(uint64_t)]; /* padding */ } cpu_core_t; extern cpu_core_t *cpu_core; -extern unsigned int dtrace_max_cpus; /* max number of enabled cpus */ -#define NCPU dtrace_max_cpus +extern unsigned int dtrace_max_cpus; /* max number of enabled cpus */ +#define NCPU dtrace_max_cpus extern int cpu_number(void); /* From #include . Called from probe context, must blacklist. */ -#define CPU (&(cpu_list[cpu_number()])) /* Pointer to current CPU */ -#define CPU_ON_INTR(cpup) ml_at_interrupt_context() /* always invoked on current cpu */ +#define CPU (&(cpu_list[cpu_number()])) /* Pointer to current CPU */ +#define CPU_ON_INTR(cpup) ml_at_interrupt_context() /* always invoked on current cpu */ /* * Routines used to register interest in cpu's being added to or removed @@ -174,26 +174,26 @@ extern void unregister_cpu_setup_func(cpu_setup_func_t *, void *); /* * DTrace flags. */ -#define CPU_DTRACE_NOFAULT 0x0001 /* Don't fault */ -#define CPU_DTRACE_DROP 0x0002 /* Drop this ECB */ -#define CPU_DTRACE_BADADDR 0x0004 /* DTrace fault: bad address */ -#define CPU_DTRACE_BADALIGN 0x0008 /* DTrace fault: bad alignment */ -#define CPU_DTRACE_DIVZERO 0x0010 /* DTrace fault: divide by zero */ -#define CPU_DTRACE_ILLOP 0x0020 /* DTrace fault: illegal operation */ -#define CPU_DTRACE_NOSCRATCH 0x0040 /* DTrace fault: out of scratch */ -#define CPU_DTRACE_KPRIV 0x0080 /* DTrace fault: bad kernel access */ -#define CPU_DTRACE_UPRIV 0x0100 /* DTrace fault: bad user access */ -#define CPU_DTRACE_TUPOFLOW 0x0200 /* DTrace fault: tuple stack overflow */ -#define CPU_DTRACE_USTACK_FP 0x0400 /* pid provider hint to ustack() */ -#define CPU_DTRACE_ENTRY 0x0800 /* pid provider hint to ustack() */ +#define CPU_DTRACE_NOFAULT 0x0001 /* Don't fault */ +#define CPU_DTRACE_DROP 0x0002 /* Drop this ECB */ +#define CPU_DTRACE_BADADDR 0x0004 /* DTrace fault: bad address */ +#define CPU_DTRACE_BADALIGN 0x0008 /* DTrace fault: bad alignment */ +#define CPU_DTRACE_DIVZERO 0x0010 /* DTrace fault: divide by zero */ +#define CPU_DTRACE_ILLOP 0x0020 /* DTrace fault: illegal operation */ +#define CPU_DTRACE_NOSCRATCH 0x0040 /* DTrace fault: out of scratch */ +#define CPU_DTRACE_KPRIV 0x0080 /* DTrace fault: bad kernel access */ +#define CPU_DTRACE_UPRIV 0x0100 /* DTrace fault: bad user access */ +#define CPU_DTRACE_TUPOFLOW 0x0200 /* DTrace fault: tuple stack overflow */ +#define CPU_DTRACE_USTACK_FP 0x0400 /* pid provider hint to ustack() */ +#define CPU_DTRACE_ENTRY 0x0800 /* pid provider hint to ustack() */ #define CPU_DTRACE_BADSTACK 0x1000 /* DTrace fault: bad stack */ -#define CPU_DTRACE_FAULT (CPU_DTRACE_BADADDR | CPU_DTRACE_BADALIGN | \ - CPU_DTRACE_DIVZERO | CPU_DTRACE_ILLOP | \ - CPU_DTRACE_NOSCRATCH | CPU_DTRACE_KPRIV | \ - CPU_DTRACE_UPRIV | CPU_DTRACE_TUPOFLOW | \ - CPU_DTRACE_BADSTACK) -#define CPU_DTRACE_ERROR (CPU_DTRACE_FAULT | CPU_DTRACE_DROP) +#define CPU_DTRACE_FAULT (CPU_DTRACE_BADADDR | CPU_DTRACE_BADALIGN | \ + CPU_DTRACE_DIVZERO | CPU_DTRACE_ILLOP | \ + CPU_DTRACE_NOSCRATCH | CPU_DTRACE_KPRIV | \ + CPU_DTRACE_UPRIV | CPU_DTRACE_TUPOFLOW | \ + CPU_DTRACE_BADSTACK) +#define CPU_DTRACE_ERROR (CPU_DTRACE_FAULT | CPU_DTRACE_DROP) /* * Loadable Modules @@ -204,55 +204,55 @@ struct dtrace_module_symbols; /* Solaris' modctl structure, greatly simplified, shadowing parts of xnu kmod structure. */ typedef struct modctl { - struct modctl *mod_next; - struct modctl *mod_stale; // stale module chain - uint32_t mod_id; // the kext unique identifier - char mod_modname[KMOD_MAX_NAME]; - int mod_loadcnt; - char mod_loaded; - uint16_t mod_flags; // See flags below - int mod_nenabled; // # of enabled DTrace probes in module - vm_address_t mod_address; // starting address (of Mach-o header blob) - vm_size_t mod_size; // total size (of blob) - UUID mod_uuid; + struct modctl *mod_next; + struct modctl *mod_stale; // stale module chain + uint32_t mod_id; // the kext unique identifier + char mod_modname[KMOD_MAX_NAME]; + int mod_loadcnt; + char mod_loaded; + uint16_t mod_flags; // See flags below + int mod_nenabled; // # of enabled DTrace probes in module + vm_address_t mod_address; // starting address (of Mach-o header blob) + vm_size_t mod_size; // total size (of blob) + UUID mod_uuid; struct dtrace_module_symbols* mod_user_symbols; } modctl_t; /* Definitions for mod_flags */ -#define MODCTL_IS_MACH_KERNEL 0x01 // This module represents /mach_kernel -#define MODCTL_HAS_KERNEL_SYMBOLS 0x02 // Kernel symbols (nlist) are available -#define MODCTL_FBT_PROBES_PROVIDED 0x04 // fbt probes have been provided -#define MODCTL_FBT_INVALID 0x08 // Module is invalid for fbt probes -#define MODCTL_SDT_PROBES_PROVIDED 0x10 // sdt probes have been provided -#define MODCTL_SDT_INVALID 0x20 // Module is invalid for sdt probes -#define MODCTL_HAS_UUID 0x40 // Module has UUID -#define MODCTL_FBT_PRIVATE_PROBES_PROVIDED 0x80 // fbt private probes have been provided -#define MODCTL_FBT_PROVIDE_PRIVATE_PROBES 0x100 // fbt provider must provide private probes -#define MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES 0x200 // fbt provider must provide blacklisted probes -#define MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED 0x400 // fbt blacklisted probes have been provided -#define MODCTL_IS_STATIC_KEXT 0x800 // module is a static kext +#define MODCTL_IS_MACH_KERNEL 0x01 // This module represents /mach_kernel +#define MODCTL_HAS_KERNEL_SYMBOLS 0x02 // Kernel symbols (nlist) are available +#define MODCTL_FBT_PROBES_PROVIDED 0x04 // fbt probes have been provided +#define MODCTL_FBT_INVALID 0x08 // Module is invalid for fbt probes +#define MODCTL_SDT_PROBES_PROVIDED 0x10 // sdt probes have been provided +#define MODCTL_SDT_INVALID 0x20 // Module is invalid for sdt probes +#define MODCTL_HAS_UUID 0x40 // Module has UUID +#define MODCTL_FBT_PRIVATE_PROBES_PROVIDED 0x80 // fbt private probes have been provided +#define MODCTL_FBT_PROVIDE_PRIVATE_PROBES 0x100 // fbt provider must provide private probes +#define MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES 0x200 // fbt provider must provide blacklisted probes +#define MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED 0x400 // fbt blacklisted probes have been provided +#define MODCTL_IS_STATIC_KEXT 0x800 // module is a static kext /* Simple/singular mod_flags accessors */ -#define MOD_IS_MACH_KERNEL(mod) (mod->mod_flags & MODCTL_IS_MACH_KERNEL) -#define MOD_HAS_KERNEL_SYMBOLS(mod) (mod->mod_flags & MODCTL_HAS_KERNEL_SYMBOLS) -#define MOD_HAS_USERSPACE_SYMBOLS(mod) (mod->mod_user_symbols) /* No point in duplicating state in the flags bits */ -#define MOD_FBT_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_FBT_PROBES_PROVIDED) -#define MOD_FBT_INVALID(mod) (mod->mod_flags & MODCTL_FBT_INVALID) -#define MOD_SDT_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_SDT_PROBES_PROVIDED) -#define MOD_SDT_INVALID(mod) (mod->mod_flags & MODCTL_SDT_INVALID) -#define MOD_HAS_UUID(mod) (mod->mod_flags & MODCTL_HAS_UUID) -#define MOD_FBT_PRIVATE_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_FBT_PRIVATE_PROBES_PROVIDED) -#define MOD_FBT_PROVIDE_PRIVATE_PROBES(mod) (mod->mod_flags & MODCTL_FBT_PROVIDE_PRIVATE_PROBES) +#define MOD_IS_MACH_KERNEL(mod) (mod->mod_flags & MODCTL_IS_MACH_KERNEL) +#define MOD_HAS_KERNEL_SYMBOLS(mod) (mod->mod_flags & MODCTL_HAS_KERNEL_SYMBOLS) +#define MOD_HAS_USERSPACE_SYMBOLS(mod) (mod->mod_user_symbols) /* No point in duplicating state in the flags bits */ +#define MOD_FBT_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_FBT_PROBES_PROVIDED) +#define MOD_FBT_INVALID(mod) (mod->mod_flags & MODCTL_FBT_INVALID) +#define MOD_SDT_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_SDT_PROBES_PROVIDED) +#define MOD_SDT_INVALID(mod) (mod->mod_flags & MODCTL_SDT_INVALID) +#define MOD_HAS_UUID(mod) (mod->mod_flags & MODCTL_HAS_UUID) +#define MOD_FBT_PRIVATE_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_FBT_PRIVATE_PROBES_PROVIDED) +#define MOD_FBT_PROVIDE_PRIVATE_PROBES(mod) (mod->mod_flags & MODCTL_FBT_PROVIDE_PRIVATE_PROBES) #define MOD_FBT_BLACKLISTED_PROBES_PROVIDED(mod) (mod->mod_flags & MODCTL_FBT_BLACKLISTED_PROBES_PROVIDED) -#define MOD_FBT_PROVIDE_BLACKLISTED_PROBES(mod) (mod->mod_flags & MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES) -#define MOD_IS_STATIC_KEXT(mod) (mod->mod_flags & MODCTL_IS_STATIC_KEXT) +#define MOD_FBT_PROVIDE_BLACKLISTED_PROBES(mod) (mod->mod_flags & MODCTL_FBT_PROVIDE_BLACKLISTED_PROBES) +#define MOD_IS_STATIC_KEXT(mod) (mod->mod_flags & MODCTL_IS_STATIC_KEXT) /* Compound accessors */ -#define MOD_FBT_PRIVATE_PROBES_DONE(mod) (MOD_FBT_PRIVATE_PROBES_PROVIDED(mod) || !MOD_FBT_PROVIDE_PRIVATE_PROBES(mod)) -#define MOD_FBT_BLACKLISTED_PROBES_DONE(mod) (MOD_FBT_BLACKLISTED_PROBES_PROVIDED(mod) || !MOD_FBT_PROVIDE_BLACKLISTED_PROBES(mod)) -#define MOD_FBT_DONE(mod) ((MOD_FBT_PROBES_PROVIDED(mod) && MOD_FBT_PRIVATE_PROBES_DONE(mod) && MOD_FBT_BLACKLISTED_PROBES_DONE(mod)) || MOD_FBT_INVALID(mod)) -#define MOD_SDT_DONE(mod) (MOD_SDT_PROBES_PROVIDED(mod) || MOD_SDT_INVALID(mod)) -#define MOD_SYMBOLS_DONE(mod) (MOD_FBT_DONE(mod) && MOD_SDT_DONE(mod)) +#define MOD_FBT_PRIVATE_PROBES_DONE(mod) (MOD_FBT_PRIVATE_PROBES_PROVIDED(mod) || !MOD_FBT_PROVIDE_PRIVATE_PROBES(mod)) +#define MOD_FBT_BLACKLISTED_PROBES_DONE(mod) (MOD_FBT_BLACKLISTED_PROBES_PROVIDED(mod) || !MOD_FBT_PROVIDE_BLACKLISTED_PROBES(mod)) +#define MOD_FBT_DONE(mod) ((MOD_FBT_PROBES_PROVIDED(mod) && MOD_FBT_PRIVATE_PROBES_DONE(mod) && MOD_FBT_BLACKLISTED_PROBES_DONE(mod)) || MOD_FBT_INVALID(mod)) +#define MOD_SDT_DONE(mod) (MOD_SDT_PROBES_PROVIDED(mod) || MOD_SDT_INVALID(mod)) +#define MOD_SYMBOLS_DONE(mod) (MOD_FBT_DONE(mod) && MOD_SDT_DONE(mod)) extern modctl_t *dtrace_modctl_list; @@ -267,7 +267,7 @@ extern int dtrace_addr_in_module(void*, struct modctl*); #define PRIV_DTRACE_USER 5 #define PRIV_PROC_OWNER 30 #define PRIV_PROC_ZONE 35 -#define PRIV_ALL (-1) /* All privileges required */ +#define PRIV_ALL (-1) /* All privileges required */ /* Privilege sets */ #define PRIV_EFFECTIVE 0 @@ -286,16 +286,16 @@ extern uid_t crgetuid(const cred_t *); /* * "cyclic" */ -#define CY_LOW_LEVEL 0 -#define CY_HIGH_LEVEL 2 -#define CY_LEVELS 3 +#define CY_LOW_LEVEL 0 +#define CY_HIGH_LEVEL 2 +#define CY_LEVELS 3 typedef uintptr_t cyclic_id_t; typedef cyclic_id_t *cyclic_id_list_t; typedef uint16_t cyc_level_t; typedef void (*cyc_func_t)(void *); -#define CYCLIC_NONE ((cyclic_id_t)0) +#define CYCLIC_NONE ((cyclic_id_t)0) typedef struct cyc_time { hrtime_t cyt_when; @@ -329,8 +329,8 @@ extern void cyclic_timer_remove(cyclic_id_t); * ddi */ -#define DDI_SUCCESS 0 -#define DDI_FAILURE -1 +#define DDI_SUCCESS 0 +#define DDI_FAILURE -1 #define DDI_PSEUDO "ddi_pseudo" @@ -338,10 +338,10 @@ typedef enum { DDI_DETACH = 0, DDI_SUSPEND = 1, DDI_PM_SUSPEND = 2, - DDI_HOTPLUG_DETACH = 3 /* detach, don't try to auto-unconfig */ + DDI_HOTPLUG_DETACH = 3 /* detach, don't try to auto-unconfig */ } ddi_detach_cmd_t; -#define DDI_PROP_SUCCESS 0 +#define DDI_PROP_SUCCESS 0 #define DDI_PROP_DONTPASS 1 typedef uint_t major_t; @@ -370,8 +370,8 @@ extern void debug_enter(char *); * kmem */ -#define KM_SLEEP 0x00000000 -#define KM_NOSLEEP 0x00000001 +#define KM_SLEEP 0x00000000 +#define KM_NOSLEEP 0x00000001 typedef struct vmem vmem_t; typedef struct kmem_cache kmem_cache_t; @@ -405,8 +405,8 @@ extern void *dt_kmem_zalloc_aligned_site(size_t, size_t, int, vm_allocation_site extern void dt_kmem_free_aligned(void*, size_t); extern kmem_cache_t * -kmem_cache_create(const char *, size_t, size_t, int (*)(void *, void *, int), - void (*)(void *, void *), void (*)(void *), void *, vmem_t *, int); + kmem_cache_create(const char *, size_t, size_t, int (*)(void *, void *, int), + void (*)(void *, void *), void (*)(void *), void *, vmem_t *, int); extern void *kmem_cache_alloc(kmem_cache_t *, int); extern void kmem_cache_free(kmem_cache_t *, void *); extern void kmem_cache_destroy(kmem_cache_t *); @@ -439,13 +439,13 @@ typedef unsigned int model_t; /* For dtrace_instr_size_isa() prototype in @@ -87,63 +87,63 @@ __END_DECLS * Error codes */ -#define EPERM 1 /* Operation not permitted */ -#define ENOENT 2 /* No such file or directory */ -#define ESRCH 3 /* No such process */ -#define EINTR 4 /* Interrupted system call */ -#define EIO 5 /* Input/output error */ -#define ENXIO 6 /* Device not configured */ -#define E2BIG 7 /* Argument list too long */ -#define ENOEXEC 8 /* Exec format error */ -#define EBADF 9 /* Bad file descriptor */ -#define ECHILD 10 /* No child processes */ -#define EDEADLK 11 /* Resource deadlock avoided */ - /* 11 was EAGAIN */ -#define ENOMEM 12 /* Cannot allocate memory */ -#define EACCES 13 /* Permission denied */ -#define EFAULT 14 /* Bad address */ +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* Input/output error */ +#define ENXIO 6 /* Device not configured */ +#define E2BIG 7 /* Argument list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file descriptor */ +#define ECHILD 10 /* No child processes */ +#define EDEADLK 11 /* Resource deadlock avoided */ + /* 11 was EAGAIN */ +#define ENOMEM 12 /* Cannot allocate memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define ENOTBLK 15 /* Block device required */ +#define ENOTBLK 15 /* Block device required */ #endif -#define EBUSY 16 /* Device / Resource busy */ -#define EEXIST 17 /* File exists */ -#define EXDEV 18 /* Cross-device link */ -#define ENODEV 19 /* Operation not supported by device */ -#define ENOTDIR 20 /* Not a directory */ -#define EISDIR 21 /* Is a directory */ -#define EINVAL 22 /* Invalid argument */ -#define ENFILE 23 /* Too many open files in system */ -#define EMFILE 24 /* Too many open files */ -#define ENOTTY 25 /* Inappropriate ioctl for device */ -#define ETXTBSY 26 /* Text file busy */ -#define EFBIG 27 /* File too large */ -#define ENOSPC 28 /* No space left on device */ -#define ESPIPE 29 /* Illegal seek */ -#define EROFS 30 /* Read-only file system */ -#define EMLINK 31 /* Too many links */ -#define EPIPE 32 /* Broken pipe */ +#define EBUSY 16 /* Device / Resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* Operation not supported by device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* Too many open files in system */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Inappropriate ioctl for device */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ /* math software */ -#define EDOM 33 /* Numerical argument out of domain */ -#define ERANGE 34 /* Result too large */ +#define EDOM 33 /* Numerical argument out of domain */ +#define ERANGE 34 /* Result too large */ /* non-blocking and interrupt i/o */ -#define EAGAIN 35 /* Resource temporarily unavailable */ -#define EWOULDBLOCK EAGAIN /* Operation would block */ -#define EINPROGRESS 36 /* Operation now in progress */ -#define EALREADY 37 /* Operation already in progress */ +#define EAGAIN 35 /* Resource temporarily unavailable */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ /* ipc/network software -- argument errors */ -#define ENOTSOCK 38 /* Socket operation on non-socket */ -#define EDESTADDRREQ 39 /* Destination address required */ -#define EMSGSIZE 40 /* Message too long */ -#define EPROTOTYPE 41 /* Protocol wrong type for socket */ -#define ENOPROTOOPT 42 /* Protocol not available */ -#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ #endif -#define ENOTSUP 45 /* Operation not supported */ +#define ENOTSUP 45 /* Operation not supported */ #if !__DARWIN_UNIX03 && !defined(KERNEL) /* * This is the same for binary and source copmpatability, unless compiling @@ -153,137 +153,137 @@ __END_DECLS * if compiling source with __DARWIN_UNIX03, the conversion in libc is not * done, and the caller gets the expected (discrete) value. */ -#define EOPNOTSUPP ENOTSUP /* Operation not supported on socket */ +#define EOPNOTSUPP ENOTSUP /* Operation not supported on socket */ #endif /* !__DARWIN_UNIX03 && !KERNEL */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ #endif -#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */ -#define EADDRINUSE 48 /* Address already in use */ -#define EADDRNOTAVAIL 49 /* Can't assign requested address */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol family */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Can't assign requested address */ /* ipc/network software -- operational errors */ -#define ENETDOWN 50 /* Network is down */ -#define ENETUNREACH 51 /* Network is unreachable */ -#define ENETRESET 52 /* Network dropped connection on reset */ -#define ECONNABORTED 53 /* Software caused connection abort */ -#define ECONNRESET 54 /* Connection reset by peer */ -#define ENOBUFS 55 /* No buffer space available */ -#define EISCONN 56 /* Socket is already connected */ -#define ENOTCONN 57 /* Socket is not connected */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection on reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Socket is already connected */ +#define ENOTCONN 57 /* Socket is not connected */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define ESHUTDOWN 58 /* Can't send after socket shutdown */ -#define ETOOMANYREFS 59 /* Too many references: can't splice */ +#define ESHUTDOWN 58 /* Can't send after socket shutdown */ +#define ETOOMANYREFS 59 /* Too many references: can't splice */ #endif -#define ETIMEDOUT 60 /* Operation timed out */ -#define ECONNREFUSED 61 /* Connection refused */ +#define ETIMEDOUT 60 /* Operation timed out */ +#define ECONNREFUSED 61 /* Connection refused */ -#define ELOOP 62 /* Too many levels of symbolic links */ -#define ENAMETOOLONG 63 /* File name too long */ +#define ELOOP 62 /* Too many levels of symbolic links */ +#define ENAMETOOLONG 63 /* File name too long */ /* should be rearranged */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTDOWN 64 /* Host is down */ #endif -#define EHOSTUNREACH 65 /* No route to host */ -#define ENOTEMPTY 66 /* Directory not empty */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ /* quotas & mush */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EPROCLIM 67 /* Too many processes */ -#define EUSERS 68 /* Too many users */ +#define EPROCLIM 67 /* Too many processes */ +#define EUSERS 68 /* Too many users */ #endif -#define EDQUOT 69 /* Disc quota exceeded */ +#define EDQUOT 69 /* Disc quota exceeded */ /* Network File System */ -#define ESTALE 70 /* Stale NFS file handle */ +#define ESTALE 70 /* Stale NFS file handle */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EREMOTE 71 /* Too many levels of remote in path */ -#define EBADRPC 72 /* RPC struct is bad */ -#define ERPCMISMATCH 73 /* RPC version wrong */ -#define EPROGUNAVAIL 74 /* RPC prog. not avail */ -#define EPROGMISMATCH 75 /* Program version wrong */ -#define EPROCUNAVAIL 76 /* Bad procedure for program */ +#define EREMOTE 71 /* Too many levels of remote in path */ +#define EBADRPC 72 /* RPC struct is bad */ +#define ERPCMISMATCH 73 /* RPC version wrong */ +#define EPROGUNAVAIL 74 /* RPC prog. not avail */ +#define EPROGMISMATCH 75 /* Program version wrong */ +#define EPROCUNAVAIL 76 /* Bad procedure for program */ #endif -#define ENOLCK 77 /* No locks available */ -#define ENOSYS 78 /* Function not implemented */ +#define ENOLCK 77 /* No locks available */ +#define ENOSYS 78 /* Function not implemented */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EFTYPE 79 /* Inappropriate file type or format */ -#define EAUTH 80 /* Authentication error */ -#define ENEEDAUTH 81 /* Need authenticator */ +#define EFTYPE 79 /* Inappropriate file type or format */ +#define EAUTH 80 /* Authentication error */ +#define ENEEDAUTH 81 /* Need authenticator */ /* Intelligent device errors */ -#define EPWROFF 82 /* Device power is off */ -#define EDEVERR 83 /* Device error, e.g. paper out */ +#define EPWROFF 82 /* Device power is off */ +#define EDEVERR 83 /* Device error, e.g. paper out */ #endif -#define EOVERFLOW 84 /* Value too large to be stored in data type */ +#define EOVERFLOW 84 /* Value too large to be stored in data type */ /* Program loading errors */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EBADEXEC 85 /* Bad executable */ -#define EBADARCH 86 /* Bad CPU type in executable */ -#define ESHLIBVERS 87 /* Shared library version mismatch */ -#define EBADMACHO 88 /* Malformed Macho file */ +#define EBADEXEC 85 /* Bad executable */ +#define EBADARCH 86 /* Bad CPU type in executable */ +#define ESHLIBVERS 87 /* Shared library version mismatch */ +#define EBADMACHO 88 /* Malformed Macho file */ #endif -#define ECANCELED 89 /* Operation canceled */ +#define ECANCELED 89 /* Operation canceled */ -#define EIDRM 90 /* Identifier removed */ -#define ENOMSG 91 /* No message of desired type */ -#define EILSEQ 92 /* Illegal byte sequence */ +#define EIDRM 90 /* Identifier removed */ +#define ENOMSG 91 /* No message of desired type */ +#define EILSEQ 92 /* Illegal byte sequence */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define ENOATTR 93 /* Attribute not found */ +#define ENOATTR 93 /* Attribute not found */ #endif -#define EBADMSG 94 /* Bad message */ -#define EMULTIHOP 95 /* Reserved */ -#define ENODATA 96 /* No message available on STREAM */ -#define ENOLINK 97 /* Reserved */ -#define ENOSR 98 /* No STREAM resources */ -#define ENOSTR 99 /* Not a STREAM */ -#define EPROTO 100 /* Protocol error */ -#define ETIME 101 /* STREAM ioctl timeout */ +#define EBADMSG 94 /* Bad message */ +#define EMULTIHOP 95 /* Reserved */ +#define ENODATA 96 /* No message available on STREAM */ +#define ENOLINK 97 /* Reserved */ +#define ENOSR 98 /* No STREAM resources */ +#define ENOSTR 99 /* Not a STREAM */ +#define EPROTO 100 /* Protocol error */ +#define ETIME 101 /* STREAM ioctl timeout */ #if __DARWIN_UNIX03 || defined(KERNEL) /* This value is only discrete when compiling __DARWIN_UNIX03, or KERNEL */ -#define EOPNOTSUPP 102 /* Operation not supported on socket */ +#define EOPNOTSUPP 102 /* Operation not supported on socket */ #endif /* __DARWIN_UNIX03 || KERNEL */ -#define ENOPOLICY 103 /* No such policy registered */ +#define ENOPOLICY 103 /* No such policy registered */ #if __DARWIN_C_LEVEL >= 200809L -#define ENOTRECOVERABLE 104 /* State not recoverable */ -#define EOWNERDEAD 105 /* Previous owner died */ +#define ENOTRECOVERABLE 104 /* State not recoverable */ +#define EOWNERDEAD 105 /* Previous owner died */ #endif #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define EQFULL 106 /* Interface output queue is full */ -#define ELAST 106 /* Must be equal largest errno */ +#define EQFULL 106 /* Interface output queue is full */ +#define ELAST 106 /* Must be equal largest errno */ #endif #ifdef KERNEL /* pseudo-errors returned inside kernel to modify return to process */ -#define ERESTART (-1) /* restart syscall */ -#define EJUSTRETURN (-2) /* don't modify regs, just return */ +#define ERESTART (-1) /* restart syscall */ +#define EJUSTRETURN (-2) /* don't modify regs, just return */ #ifdef KERNEL_PRIVATE -#define ERECYCLE (-5) /* restart lookup under heavy vnode pressure/recycling */ +#define ERECYCLE (-5) /* restart lookup under heavy vnode pressure/recycling */ #endif #ifdef BSD_KERNEL_PRIVATE -#define EREDRIVEOPEN (-6) -#define EKEEPLOOKING (-7) +#define EREDRIVEOPEN (-6) +#define EKEEPLOOKING (-7) /* used for cvwait error returns to Libc */ -#define ECVCERORR 256 -#define ECVPERORR 512 +#define ECVCERORR 256 +#define ECVPERORR 512 #else /* BSD_KERNEL_PRIVATE */ /* -5, -6 and -7 and -106 are reserved for kernel internal use */ #endif /* BSD_KERNEL_PRIVATE */ #ifdef PRIVATE -#define EQSUSPENDED (-EQFULL) /* Output queue is suspended */ +#define EQSUSPENDED (-EQFULL) /* Output queue is suspended */ #endif /* PRIVATE */ #endif /* KERNEL */ #endif /* _SYS_ERRNO_H_ */ diff --git a/bsd/sys/ev.h b/bsd/sys/ev.h index 8000e9900..5f1e8813d 100644 --- a/bsd/sys/ev.h +++ b/bsd/sys/ev.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1998 Apple Computer, Inc. All rights reserved */ @@ -37,14 +37,14 @@ #include struct eventreq { - int er_type; + int er_type; #define EV_FD 1 // file descriptor - int er_handle; - void *er_data; - int er_rcnt; - int er_wcnt; - int er_ecnt; - int er_eventbits; + int er_handle; + void *er_data; + int er_rcnt; + int er_wcnt; + int er_ecnt; + int er_eventbits; #define EV_RE 1 #define EV_WR 2 #define EV_EX 4 @@ -71,9 +71,9 @@ typedef struct eventreq *er_t; #ifndef KERNEL __BEGIN_DECLS -int modwatch(er_t, int); -int watchevent(er_t, int); -int waitevent(er_t, struct timeval *); +int modwatch(er_t, int); +int watchevent(er_t, int); +int waitevent(er_t, struct timeval *); __END_DECLS #endif @@ -84,33 +84,33 @@ __END_DECLS struct eventreq32 { - int er_type; - int er_handle; - uint32_t er_data; - int er_rcnt; - int er_wcnt; - int er_ecnt; - int er_eventbits; + int er_type; + int er_handle; + uint32_t er_data; + int er_rcnt; + int er_wcnt; + int er_ecnt; + int er_eventbits; }; struct eventreq64 { - int er_type; - int er_handle; - user_addr_t er_data; - int er_rcnt; - int er_wcnt; - int er_ecnt; - int er_eventbits; + int er_type; + int er_handle; + user_addr_t er_data; + int er_rcnt; + int er_wcnt; + int er_ecnt; + int er_eventbits; }; struct eventqelt { - TAILQ_ENTRY(eventqelt) ee_slist; - TAILQ_ENTRY(eventqelt) ee_plist; - struct eventreq64 ee_req; - struct proc * ee_proc; - u_int ee_flags; -#define EV_QUEUED 0x01 - u_int ee_eventmask; + TAILQ_ENTRY(eventqelt) ee_slist; + TAILQ_ENTRY(eventqelt) ee_plist; + struct eventreq64 ee_req; + struct proc * ee_proc; + u_int ee_flags; +#define EV_QUEUED 0x01 + u_int ee_eventmask; }; int waitevent_close(struct proc *p, struct fileproc *); diff --git a/bsd/sys/event.h b/bsd/sys/event.h index 60eee50ab..e8c171fd9 100644 --- a/bsd/sys/event.h +++ b/bsd/sys/event.h @@ -63,67 +63,67 @@ /* * Filter types */ -#define EVFILT_READ (-1) -#define EVFILT_WRITE (-2) -#define EVFILT_AIO (-3) /* attached to aio requests */ -#define EVFILT_VNODE (-4) /* attached to vnodes */ -#define EVFILT_PROC (-5) /* attached to struct proc */ -#define EVFILT_SIGNAL (-6) /* attached to struct proc */ -#define EVFILT_TIMER (-7) /* timers */ -#define EVFILT_MACHPORT (-8) /* Mach portsets */ -#define EVFILT_FS (-9) /* Filesystem events */ +#define EVFILT_READ (-1) +#define EVFILT_WRITE (-2) +#define EVFILT_AIO (-3) /* attached to aio requests */ +#define EVFILT_VNODE (-4) /* attached to vnodes */ +#define EVFILT_PROC (-5) /* attached to struct proc */ +#define EVFILT_SIGNAL (-6) /* attached to struct proc */ +#define EVFILT_TIMER (-7) /* timers */ +#define EVFILT_MACHPORT (-8) /* Mach portsets */ +#define EVFILT_FS (-9) /* Filesystem events */ #define EVFILT_USER (-10) /* User events */ - /* (-11) unused */ -#define EVFILT_VM (-12) /* Virtual memory events */ + /* (-11) unused */ +#define EVFILT_VM (-12) /* Virtual memory events */ #ifdef PRIVATE -#define EVFILT_SOCK (-13) /* Socket events */ -#define EVFILT_MEMORYSTATUS (-14) /* Memorystatus events */ +#define EVFILT_SOCK (-13) /* Socket events */ +#define EVFILT_MEMORYSTATUS (-14) /* Memorystatus events */ #endif /* PRIVATE */ -#define EVFILT_EXCEPT (-15) /* Exception events */ +#define EVFILT_EXCEPT (-15) /* Exception events */ #ifdef PRIVATE #define EVFILT_WORKLOOP (-17) /* Workloop events */ #endif /* PRIVATE */ -#define EVFILT_SYSCOUNT 17 -#define EVFILT_THREADMARKER EVFILT_SYSCOUNT /* Internal use only */ +#define EVFILT_SYSCOUNT 17 +#define EVFILT_THREADMARKER EVFILT_SYSCOUNT /* Internal use only */ #pragma pack(4) struct kevent { - uintptr_t ident; /* identifier for this event */ - int16_t filter; /* filter for event */ - uint16_t flags; /* general flags */ - uint32_t fflags; /* filter-specific flags */ - intptr_t data; /* filter-specific data */ - void *udata; /* opaque user data identifier */ + uintptr_t ident; /* identifier for this event */ + int16_t filter; /* filter for event */ + uint16_t flags; /* general flags */ + uint32_t fflags; /* filter-specific flags */ + intptr_t data; /* filter-specific data */ + void *udata; /* opaque user data identifier */ }; #ifdef KERNEL_PRIVATE struct user64_kevent { - uint64_t ident; /* identifier for this event */ - int16_t filter; /* filter for event */ - uint16_t flags; /* general flags */ - uint32_t fflags; /* filter-specific flags */ - int64_t data; /* filter-specific data */ - user_addr_t udata; /* opaque user data identifier */ + uint64_t ident; /* identifier for this event */ + int16_t filter; /* filter for event */ + uint16_t flags; /* general flags */ + uint32_t fflags; /* filter-specific flags */ + int64_t data; /* filter-specific data */ + user_addr_t udata; /* opaque user data identifier */ }; struct user32_kevent { - uint32_t ident; /* identifier for this event */ - int16_t filter; /* filter for event */ - uint16_t flags; /* general flags */ - uint32_t fflags; /* filter-specific flags */ - int32_t data; /* filter-specific data */ - user32_addr_t udata; /* opaque user data identifier */ + uint32_t ident; /* identifier for this event */ + int16_t filter; /* filter for event */ + uint16_t flags; /* general flags */ + uint32_t fflags; /* filter-specific flags */ + int32_t data; /* filter-specific data */ + user32_addr_t udata; /* opaque user data identifier */ }; struct kevent_internal_s { uint64_t ident; /* identifier for this event */ int16_t filter; /* filter for event */ uint16_t flags; /* general flags */ - int32_t qos; /* quality of service */ + int32_t qos; /* quality of service */ uint32_t fflags; /* filter-specific flags */ // uint32_t xflags; /* extra filter-specific flags */ int64_t data; /* filter-specific data */ @@ -136,26 +136,26 @@ struct kevent_internal_s { #pragma pack() struct kevent64_s { - uint64_t ident; /* identifier for this event */ - int16_t filter; /* filter for event */ - uint16_t flags; /* general flags */ - uint32_t fflags; /* filter-specific flags */ - int64_t data; /* filter-specific data */ - uint64_t udata; /* opaque user data identifier */ - uint64_t ext[2]; /* filter-specific extensions */ + uint64_t ident; /* identifier for this event */ + int16_t filter; /* filter for event */ + uint16_t flags; /* general flags */ + uint32_t fflags; /* filter-specific flags */ + int64_t data; /* filter-specific data */ + uint64_t udata; /* opaque user data identifier */ + uint64_t ext[2]; /* filter-specific extensions */ }; #ifdef PRIVATE struct kevent_qos_s { - uint64_t ident; /* identifier for this event */ - int16_t filter; /* filter for event */ - uint16_t flags; /* general flags */ - int32_t qos; /* quality of service */ - uint64_t udata; /* opaque user data identifier */ - uint32_t fflags; /* filter-specific flags */ - uint32_t xflags; /* extra filter-specific flags */ - int64_t data; /* filter-specific data */ - uint64_t ext[4]; /* filter-specific extensions */ + uint64_t ident; /* identifier for this event */ + int16_t filter; /* filter for event */ + uint16_t flags; /* general flags */ + int32_t qos; /* quality of service */ + uint64_t udata; /* opaque user data identifier */ + uint32_t fflags; /* filter-specific flags */ + uint32_t xflags; /* extra filter-specific flags */ + int64_t data; /* filter-specific data */ + uint64_t ext[4]; /* filter-specific extensions */ }; /* @@ -165,33 +165,33 @@ typedef uint64_t kqueue_id_t; #endif /* PRIVATE */ -#define EV_SET(kevp, a, b, c, d, e, f) do { \ - struct kevent *__kevp__ = (kevp); \ - __kevp__->ident = (a); \ - __kevp__->filter = (b); \ - __kevp__->flags = (c); \ - __kevp__->fflags = (d); \ - __kevp__->data = (e); \ - __kevp__->udata = (f); \ +#define EV_SET(kevp, a, b, c, d, e, f) do { \ + struct kevent *__kevp__ = (kevp); \ + __kevp__->ident = (a); \ + __kevp__->filter = (b); \ + __kevp__->flags = (c); \ + __kevp__->fflags = (d); \ + __kevp__->data = (e); \ + __kevp__->udata = (f); \ } while(0) -#define EV_SET64(kevp, a, b, c, d, e, f, g, h) do { \ - struct kevent64_s *__kevp__ = (kevp); \ - __kevp__->ident = (a); \ - __kevp__->filter = (b); \ - __kevp__->flags = (c); \ - __kevp__->fflags = (d); \ - __kevp__->data = (e); \ - __kevp__->udata = (f); \ - __kevp__->ext[0] = (g); \ - __kevp__->ext[1] = (h); \ +#define EV_SET64(kevp, a, b, c, d, e, f, g, h) do { \ + struct kevent64_s *__kevp__ = (kevp); \ + __kevp__->ident = (a); \ + __kevp__->filter = (b); \ + __kevp__->flags = (c); \ + __kevp__->fflags = (d); \ + __kevp__->data = (e); \ + __kevp__->udata = (f); \ + __kevp__->ext[0] = (g); \ + __kevp__->ext[1] = (h); \ } while(0) /* kevent system call flags */ -#define KEVENT_FLAG_NONE 0x000000 /* no flag value */ -#define KEVENT_FLAG_IMMEDIATE 0x000001 /* immediate timeout */ -#define KEVENT_FLAG_ERROR_EVENTS 0x000002 /* output events only include change errors */ +#define KEVENT_FLAG_NONE 0x000000 /* no flag value */ +#define KEVENT_FLAG_IMMEDIATE 0x000001 /* immediate timeout */ +#define KEVENT_FLAG_ERROR_EVENTS 0x000002 /* output events only include change errors */ #ifdef PRIVATE @@ -228,15 +228,15 @@ typedef uint64_t kqueue_id_t; #define KEVENT_FLAG_DYNAMIC_KQUEUE 0x2000 /* kqueue is dynamically allocated */ #define KEVENT_FLAG_USER (KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS | \ - KEVENT_FLAG_STACK_EVENTS | KEVENT_FLAG_STACK_DATA | \ - KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP | \ - KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) + KEVENT_FLAG_STACK_EVENTS | KEVENT_FLAG_STACK_DATA | \ + KEVENT_FLAG_WORKQ | KEVENT_FLAG_WORKLOOP | \ + KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST | KEVENT_FLAG_DYNAMIC_KQ_MUST_NOT_EXIST) /* * Since some filter ops are not part of the standard sysfilt_ops, we use * kn_filtid starting from EVFILT_SYSCOUNT to identify these cases. This is to * let kn_fops() get the correct fops for all cases. -*/ + */ #define EVFILTID_KQREAD (EVFILT_SYSCOUNT) #define EVFILTID_PIPE_R (EVFILT_SYSCOUNT + 1) #define EVFILTID_PIPE_W (EVFILT_SYSCOUNT + 2) @@ -279,10 +279,10 @@ typedef uint64_t kqueue_id_t; #define EV_UDATA_SPECIFIC 0x0100 /* unique kevent per udata value */ #define EV_DISPATCH2 (EV_DISPATCH | EV_UDATA_SPECIFIC) - /* ... in combination with EV_DELETE */ - /* will defer delete until udata-specific */ - /* event enabled. EINPROGRESS will be */ - /* returned to indicate the deferral */ +/* ... in combination with EV_DELETE */ +/* will defer delete until udata-specific */ +/* event enabled. EINPROGRESS will be */ +/* returned to indicate the deferral */ #define EV_VANISHED 0x0200 /* report that source has vanished */ /* ... only valid with EV_DISPATCH2 */ @@ -320,8 +320,8 @@ typedef uint64_t kqueue_id_t; * number of bytes before the current OOB marker, else data count is the number * of bytes beyond OOB marker. */ -#define EV_POLL EV_FLAG0 -#define EV_OOBAND EV_FLAG1 +#define EV_POLL EV_FLAG0 +#define EV_OOBAND EV_FLAG1 /* * data/hint fflags for EVFILT_USER, shared with userspace @@ -330,7 +330,7 @@ typedef uint64_t kqueue_id_t; /* * On input, NOTE_TRIGGER causes the event to be triggered for output. */ -#define NOTE_TRIGGER 0x01000000 +#define NOTE_TRIGGER 0x01000000 /* * On input, the top two bits of fflags specifies how the lower twenty four @@ -344,7 +344,7 @@ typedef uint64_t kqueue_id_t; #define NOTE_FFOR 0x80000000 /* or fflags */ #define NOTE_FFCOPY 0xc0000000 /* copy fflags */ #define NOTE_FFCTRLMASK 0xc0000000 /* mask for operations */ -#define NOTE_FFLAGSMASK 0x00ffffff +#define NOTE_FFLAGSMASK 0x00ffffff #ifdef PRIVATE /* @@ -415,11 +415,11 @@ typedef uint64_t kqueue_id_t; * EVFILT_WORKLOOP ext[] array indexes/meanings. */ #define EV_EXTIDX_WL_LANE 0 /* lane identifier [in: sync waiter] - [out: thread request] */ + * [out: thread request] */ #define EV_EXTIDX_WL_ADDR 1 /* debounce address [in: NULL==no debounce] */ #define EV_EXTIDX_WL_MASK 2 /* debounce mask [in] */ #define EV_EXTIDX_WL_VALUE 3 /* debounce value [in: not current->ESTALE] - [out: new/debounce value] */ + * [out: new/debounce value] */ #endif /* PRIVATE */ /* @@ -428,23 +428,23 @@ typedef uint64_t kqueue_id_t; * The default behavior for EVFILT_READ is to make the determination * realtive to the current file descriptor read pointer. */ -#define NOTE_LOWAT 0x00000001 /* low water mark */ +#define NOTE_LOWAT 0x00000001 /* low water mark */ /* data/hint flags for EVFILT_EXCEPT, shared with userspace */ -#define NOTE_OOB 0x00000002 /* OOB data */ +#define NOTE_OOB 0x00000002 /* OOB data */ /* * data/hint fflags for EVFILT_VNODE, shared with userspace */ -#define NOTE_DELETE 0x00000001 /* vnode was removed */ -#define NOTE_WRITE 0x00000002 /* data contents changed */ -#define NOTE_EXTEND 0x00000004 /* size increased */ -#define NOTE_ATTRIB 0x00000008 /* attributes changed */ -#define NOTE_LINK 0x00000010 /* link count changed */ -#define NOTE_RENAME 0x00000020 /* vnode was renamed */ -#define NOTE_REVOKE 0x00000040 /* vnode access was revoked */ -#define NOTE_NONE 0x00000080 /* No specific vnode event: to test for EVFILT_READ activation*/ -#define NOTE_FUNLOCK 0x00000100 /* vnode was unlocked by flock(2) */ +#define NOTE_DELETE 0x00000001 /* vnode was removed */ +#define NOTE_WRITE 0x00000002 /* data contents changed */ +#define NOTE_EXTEND 0x00000004 /* size increased */ +#define NOTE_ATTRIB 0x00000008 /* attributes changed */ +#define NOTE_LINK 0x00000010 /* link count changed */ +#define NOTE_RENAME 0x00000020 /* vnode was renamed */ +#define NOTE_REVOKE 0x00000040 /* vnode access was revoked */ +#define NOTE_NONE 0x00000080 /* No specific vnode event: to test for EVFILT_READ activation*/ +#define NOTE_FUNLOCK 0x00000100 /* vnode was unlocked by flock(2) */ /* * data/hint fflags for EVFILT_PROC, shared with userspace @@ -460,16 +460,16 @@ enum { eNoteReapDeprecated __deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is deprecated") = 0x10000000 }; -#define NOTE_EXIT 0x80000000 /* process exited */ -#define NOTE_FORK 0x40000000 /* process forked */ -#define NOTE_EXEC 0x20000000 /* process exec'd */ -#define NOTE_REAP ((unsigned int)eNoteReapDeprecated /* 0x10000000 */) /* process reaped */ -#define NOTE_SIGNAL 0x08000000 /* shared with EVFILT_SIGNAL */ -#define NOTE_EXITSTATUS 0x04000000 /* exit status to be returned, valid for child process only */ -#define NOTE_EXIT_DETAIL 0x02000000 /* provide details on reasons for exit */ +#define NOTE_EXIT 0x80000000 /* process exited */ +#define NOTE_FORK 0x40000000 /* process forked */ +#define NOTE_EXEC 0x20000000 /* process exec'd */ +#define NOTE_REAP ((unsigned int)eNoteReapDeprecated /* 0x10000000 */ ) /* process reaped */ +#define NOTE_SIGNAL 0x08000000 /* shared with EVFILT_SIGNAL */ +#define NOTE_EXITSTATUS 0x04000000 /* exit status to be returned, valid for child process only */ +#define NOTE_EXIT_DETAIL 0x02000000 /* provide details on reasons for exit */ -#define NOTE_PDATAMASK 0x000fffff /* mask for signal & exit status */ -#define NOTE_PCTRLMASK (~NOTE_PDATAMASK) +#define NOTE_PDATAMASK 0x000fffff /* mask for signal & exit status */ +#define NOTE_PCTRLMASK (~NOTE_PDATAMASK) /* * If NOTE_EXITSTATUS is present, provide additional info about exiting process. @@ -477,51 +477,51 @@ enum { enum { eNoteExitReparentedDeprecated __deprecated_enum_msg("This kqueue(2) EVFILT_PROC flag is no longer sent") = 0x00080000 }; -#define NOTE_EXIT_REPARENTED ((unsigned int)eNoteExitReparentedDeprecated) /* exited while reparented */ +#define NOTE_EXIT_REPARENTED ((unsigned int)eNoteExitReparentedDeprecated) /* exited while reparented */ /* * If NOTE_EXIT_DETAIL is present, these bits indicate specific reasons for exiting. */ -#define NOTE_EXIT_DETAIL_MASK 0x00070000 -#define NOTE_EXIT_DECRYPTFAIL 0x00010000 -#define NOTE_EXIT_MEMORY 0x00020000 -#define NOTE_EXIT_CSERROR 0x00040000 +#define NOTE_EXIT_DETAIL_MASK 0x00070000 +#define NOTE_EXIT_DECRYPTFAIL 0x00010000 +#define NOTE_EXIT_MEMORY 0x00020000 +#define NOTE_EXIT_CSERROR 0x00040000 #ifdef PRIVATE /* * If NOTE_EXIT_MEMORY is present, these bits indicate specific jetsam condition. */ -#define NOTE_EXIT_MEMORY_DETAIL_MASK 0xfe000000 -#define NOTE_EXIT_MEMORY_VMPAGESHORTAGE 0x80000000 /* jetsam condition: lowest jetsam priority proc killed due to vm page shortage */ -#define NOTE_EXIT_MEMORY_VMTHRASHING 0x40000000 /* jetsam condition: lowest jetsam priority proc killed due to vm thrashing */ -#define NOTE_EXIT_MEMORY_HIWAT 0x20000000 /* jetsam condition: process reached its high water mark */ -#define NOTE_EXIT_MEMORY_PID 0x10000000 /* jetsam condition: special pid kill requested */ -#define NOTE_EXIT_MEMORY_IDLE 0x08000000 /* jetsam condition: idle process cleaned up */ -#define NOTE_EXIT_MEMORY_VNODE 0X04000000 /* jetsam condition: virtual node kill */ -#define NOTE_EXIT_MEMORY_FCTHRASHING 0x02000000 /* jetsam condition: lowest jetsam priority proc killed due to filecache thrashing */ +#define NOTE_EXIT_MEMORY_DETAIL_MASK 0xfe000000 +#define NOTE_EXIT_MEMORY_VMPAGESHORTAGE 0x80000000 /* jetsam condition: lowest jetsam priority proc killed due to vm page shortage */ +#define NOTE_EXIT_MEMORY_VMTHRASHING 0x40000000 /* jetsam condition: lowest jetsam priority proc killed due to vm thrashing */ +#define NOTE_EXIT_MEMORY_HIWAT 0x20000000 /* jetsam condition: process reached its high water mark */ +#define NOTE_EXIT_MEMORY_PID 0x10000000 /* jetsam condition: special pid kill requested */ +#define NOTE_EXIT_MEMORY_IDLE 0x08000000 /* jetsam condition: idle process cleaned up */ +#define NOTE_EXIT_MEMORY_VNODE 0X04000000 /* jetsam condition: virtual node kill */ +#define NOTE_EXIT_MEMORY_FCTHRASHING 0x02000000 /* jetsam condition: lowest jetsam priority proc killed due to filecache thrashing */ #endif /* * data/hint fflags for EVFILT_VM, shared with userspace. */ -#define NOTE_VM_PRESSURE 0x80000000 /* will react on memory pressure */ -#define NOTE_VM_PRESSURE_TERMINATE 0x40000000 /* will quit on memory pressure, possibly after cleaning up dirty state */ -#define NOTE_VM_PRESSURE_SUDDEN_TERMINATE 0x20000000 /* will quit immediately on memory pressure */ -#define NOTE_VM_ERROR 0x10000000 /* there was an error */ +#define NOTE_VM_PRESSURE 0x80000000 /* will react on memory pressure */ +#define NOTE_VM_PRESSURE_TERMINATE 0x40000000 /* will quit on memory pressure, possibly after cleaning up dirty state */ +#define NOTE_VM_PRESSURE_SUDDEN_TERMINATE 0x20000000 /* will quit immediately on memory pressure */ +#define NOTE_VM_ERROR 0x10000000 /* there was an error */ #ifdef PRIVATE /* * data/hint fflags for EVFILT_MEMORYSTATUS, shared with userspace. */ -#define NOTE_MEMORYSTATUS_PRESSURE_NORMAL 0x00000001 /* system memory pressure has returned to normal */ -#define NOTE_MEMORYSTATUS_PRESSURE_WARN 0x00000002 /* system memory pressure has changed to the warning state */ -#define NOTE_MEMORYSTATUS_PRESSURE_CRITICAL 0x00000004 /* system memory pressure has changed to the critical state */ -#define NOTE_MEMORYSTATUS_LOW_SWAP 0x00000008 /* system is in a low-swap state */ -#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0x00000010 /* process memory limit has hit a warning state */ -#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0x00000020 /* process memory limit has hit a critical state - soft limit */ +#define NOTE_MEMORYSTATUS_PRESSURE_NORMAL 0x00000001 /* system memory pressure has returned to normal */ +#define NOTE_MEMORYSTATUS_PRESSURE_WARN 0x00000002 /* system memory pressure has changed to the warning state */ +#define NOTE_MEMORYSTATUS_PRESSURE_CRITICAL 0x00000004 /* system memory pressure has changed to the critical state */ +#define NOTE_MEMORYSTATUS_LOW_SWAP 0x00000008 /* system is in a low-swap state */ +#define NOTE_MEMORYSTATUS_PROC_LIMIT_WARN 0x00000010 /* process memory limit has hit a warning state */ +#define NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL 0x00000020 /* process memory limit has hit a critical state - soft limit */ #define NOTE_MEMORYSTATUS_MSL_STATUS 0xf0000000 /* bits used to request change to process MSL status */ #ifdef KERNEL_PRIVATE @@ -558,56 +558,56 @@ typedef enum vm_pressure_level { * * All timeouts are implicitly EV_CLEAR events. */ -#define NOTE_SECONDS 0x00000001 /* data is seconds */ -#define NOTE_USECONDS 0x00000002 /* data is microseconds */ -#define NOTE_NSECONDS 0x00000004 /* data is nanoseconds */ -#define NOTE_ABSOLUTE 0x00000008 /* absolute timeout */ - /* ... implicit EV_ONESHOT, timeout uses the gettimeofday epoch */ -#define NOTE_LEEWAY 0x00000010 /* ext[1] holds leeway for power aware timers */ -#define NOTE_CRITICAL 0x00000020 /* system does minimal timer coalescing */ -#define NOTE_BACKGROUND 0x00000040 /* system does maximum timer coalescing */ -#define NOTE_MACH_CONTINUOUS_TIME 0x00000080 - /* - * NOTE_MACH_CONTINUOUS_TIME: - * with NOTE_ABSOLUTE: causes the timer to continue to tick across sleep, - * still uses gettimeofday epoch - * with NOTE_MACHTIME and NOTE_ABSOLUTE: uses mach continuous time epoch - * without NOTE_ABSOLUTE (interval timer mode): continues to tick across sleep - */ +#define NOTE_SECONDS 0x00000001 /* data is seconds */ +#define NOTE_USECONDS 0x00000002 /* data is microseconds */ +#define NOTE_NSECONDS 0x00000004 /* data is nanoseconds */ +#define NOTE_ABSOLUTE 0x00000008 /* absolute timeout */ +/* ... implicit EV_ONESHOT, timeout uses the gettimeofday epoch */ +#define NOTE_LEEWAY 0x00000010 /* ext[1] holds leeway for power aware timers */ +#define NOTE_CRITICAL 0x00000020 /* system does minimal timer coalescing */ +#define NOTE_BACKGROUND 0x00000040 /* system does maximum timer coalescing */ +#define NOTE_MACH_CONTINUOUS_TIME 0x00000080 +/* + * NOTE_MACH_CONTINUOUS_TIME: + * with NOTE_ABSOLUTE: causes the timer to continue to tick across sleep, + * still uses gettimeofday epoch + * with NOTE_MACHTIME and NOTE_ABSOLUTE: uses mach continuous time epoch + * without NOTE_ABSOLUTE (interval timer mode): continues to tick across sleep + */ #define NOTE_MACHTIME 0x00000100 /* data is mach absolute time units */ - /* timeout uses the mach absolute time epoch */ +/* timeout uses the mach absolute time epoch */ #ifdef PRIVATE /* * data/hint fflags for EVFILT_SOCK, shared with userspace. * */ -#define NOTE_CONNRESET 0x00000001 /* Received RST */ -#define NOTE_READCLOSED 0x00000002 /* Read side is shutdown */ -#define NOTE_WRITECLOSED 0x00000004 /* Write side is shutdown */ -#define NOTE_TIMEOUT 0x00000008 /* timeout: rexmt, keep-alive or persist */ -#define NOTE_NOSRCADDR 0x00000010 /* source address not available */ -#define NOTE_IFDENIED 0x00000020 /* interface denied connection */ -#define NOTE_SUSPEND 0x00000040 /* output queue suspended */ -#define NOTE_RESUME 0x00000080 /* output queue resumed */ -#define NOTE_KEEPALIVE 0x00000100 /* TCP Keepalive received */ -#define NOTE_ADAPTIVE_WTIMO 0x00000200 /* TCP adaptive write timeout */ -#define NOTE_ADAPTIVE_RTIMO 0x00000400 /* TCP adaptive read timeout */ -#define NOTE_CONNECTED 0x00000800 /* socket is connected */ -#define NOTE_DISCONNECTED 0x00001000 /* socket is disconnected */ -#define NOTE_CONNINFO_UPDATED 0x00002000 /* connection info was updated */ -#define NOTE_NOTIFY_ACK 0x00004000 /* notify acknowledgement */ +#define NOTE_CONNRESET 0x00000001 /* Received RST */ +#define NOTE_READCLOSED 0x00000002 /* Read side is shutdown */ +#define NOTE_WRITECLOSED 0x00000004 /* Write side is shutdown */ +#define NOTE_TIMEOUT 0x00000008 /* timeout: rexmt, keep-alive or persist */ +#define NOTE_NOSRCADDR 0x00000010 /* source address not available */ +#define NOTE_IFDENIED 0x00000020 /* interface denied connection */ +#define NOTE_SUSPEND 0x00000040 /* output queue suspended */ +#define NOTE_RESUME 0x00000080 /* output queue resumed */ +#define NOTE_KEEPALIVE 0x00000100 /* TCP Keepalive received */ +#define NOTE_ADAPTIVE_WTIMO 0x00000200 /* TCP adaptive write timeout */ +#define NOTE_ADAPTIVE_RTIMO 0x00000400 /* TCP adaptive read timeout */ +#define NOTE_CONNECTED 0x00000800 /* socket is connected */ +#define NOTE_DISCONNECTED 0x00001000 /* socket is disconnected */ +#define NOTE_CONNINFO_UPDATED 0x00002000 /* connection info was updated */ +#define NOTE_NOTIFY_ACK 0x00004000 /* notify acknowledgement */ #define EVFILT_SOCK_LEVEL_TRIGGER_MASK \ - (NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_SUSPEND | NOTE_RESUME | \ - NOTE_CONNECTED | NOTE_DISCONNECTED) + (NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_SUSPEND | NOTE_RESUME | \ + NOTE_CONNECTED | NOTE_DISCONNECTED) #define EVFILT_SOCK_ALL_MASK \ - (NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | \ - NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | NOTE_RESUME | \ - NOTE_KEEPALIVE | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO | \ - NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED | \ - NOTE_NOTIFY_ACK) + (NOTE_CONNRESET | NOTE_READCLOSED | NOTE_WRITECLOSED | NOTE_TIMEOUT | \ + NOTE_NOSRCADDR | NOTE_IFDENIED | NOTE_SUSPEND | NOTE_RESUME | \ + NOTE_KEEPALIVE | NOTE_ADAPTIVE_WTIMO | NOTE_ADAPTIVE_RTIMO | \ + NOTE_CONNECTED | NOTE_DISCONNECTED | NOTE_CONNINFO_UPDATED | \ + NOTE_NOTIFY_ACK) #endif /* PRIVATE */ @@ -649,9 +649,9 @@ typedef enum vm_pressure_level { * NOTE_TRACK, NOTE_TRACKERR, and NOTE_CHILD are no longer supported as of 10.5 */ /* additional flags for EVFILT_PROC */ -#define NOTE_TRACK 0x00000001 /* follow across forks */ -#define NOTE_TRACKERR 0x00000002 /* could not track child */ -#define NOTE_CHILD 0x00000004 /* am a child process */ +#define NOTE_TRACK 0x00000001 /* follow across forks */ +#define NOTE_TRACKERR 0x00000002 /* could not track child */ +#define NOTE_CHILD 0x00000004 /* am a child process */ #ifdef PRIVATE @@ -677,36 +677,36 @@ SLIST_HEAD(klist, knote); MALLOC_DECLARE(M_KQUEUE); #endif -TAILQ_HEAD(kqtailq, knote); /* a list of "queued" events */ +TAILQ_HEAD(kqtailq, knote); /* a list of "queued" events */ /* index into various kq queues */ typedef uint8_t kq_index_t; typedef uint16_t kn_status_t; -#define KN_ACTIVE 0x0001 /* event has been triggered */ -#define KN_QUEUED 0x0002 /* event is on queue */ -#define KN_DISABLED 0x0004 /* event is disabled */ -#define KN_DROPPING 0x0008 /* knote is being dropped */ -#define KN_LOCKED 0x0010 /* knote is locked (kq_knlocks) */ -#define KN_ATTACHING 0x0020 /* event is pending attach */ -#define KN_STAYACTIVE 0x0040 /* force event to stay active */ -#define KN_DEFERDELETE 0x0080 /* defer delete until re-enabled */ -#define KN_ATTACHED 0x0100 /* currently attached to source */ -#define KN_DISPATCH 0x0200 /* disables as part of deliver */ -#define KN_UDATA_SPECIFIC 0x0400 /* udata is part of matching */ -#define KN_SUPPRESSED 0x0800 /* event is suppressed during delivery */ -#define KN_MERGE_QOS 0x1000 /* f_event() / f_* ran concurrently and - overrides must merge */ -#define KN_REQVANISH 0x2000 /* requested EV_VANISH */ -#define KN_VANISHED 0x4000 /* has vanished */ +#define KN_ACTIVE 0x0001 /* event has been triggered */ +#define KN_QUEUED 0x0002 /* event is on queue */ +#define KN_DISABLED 0x0004 /* event is disabled */ +#define KN_DROPPING 0x0008 /* knote is being dropped */ +#define KN_LOCKED 0x0010 /* knote is locked (kq_knlocks) */ +#define KN_ATTACHING 0x0020 /* event is pending attach */ +#define KN_STAYACTIVE 0x0040 /* force event to stay active */ +#define KN_DEFERDELETE 0x0080 /* defer delete until re-enabled */ +#define KN_ATTACHED 0x0100 /* currently attached to source */ +#define KN_DISPATCH 0x0200 /* disables as part of deliver */ +#define KN_UDATA_SPECIFIC 0x0400 /* udata is part of matching */ +#define KN_SUPPRESSED 0x0800 /* event is suppressed during delivery */ +#define KN_MERGE_QOS 0x1000 /* f_event() / f_* ran concurrently and + * overrides must merge */ +#define KN_REQVANISH 0x2000 /* requested EV_VANISH */ +#define KN_VANISHED 0x4000 /* has vanished */ // 0x8000 /* combination defines deferred-delete mode enabled */ -#define KN_DISPATCH2 (KN_DISPATCH | KN_UDATA_SPECIFIC) +#define KN_DISPATCH2 (KN_DISPATCH | KN_UDATA_SPECIFIC) #define KNOTE_KQ_BITSIZE 42 _Static_assert(KNOTE_KQ_BITSIZE >= VM_KERNEL_POINTER_SIGNIFICANT_BITS, - "Make sure sign extending kn_kq_packed is legit"); + "Make sure sign extending kn_kq_packed is legit"); struct kqueue; struct knote { @@ -714,11 +714,11 @@ struct knote { SLIST_ENTRY(knote) kn_link; /* linkage for search list */ SLIST_ENTRY(knote) kn_selnext; /* klist element chain */ uintptr_t kn_filtid:8, /* filter id to index filter ops */ - kn_req_index:4, /* requested qos index */ - kn_qos_index:4, /* in-use qos index */ - kn_qos_override:4, /* qos override index */ - kn_vnode_kqok:1, - kn_vnode_use_ofst:1; + kn_req_index:4, /* requested qos index */ + kn_qos_index:4, /* in-use qos index */ + kn_qos_override:4, /* qos override index */ + kn_vnode_kqok:1, + kn_vnode_use_ofst:1; #if __LP64__ intptr_t kn_kq_packed : KNOTE_KQ_BITSIZE; #else @@ -740,16 +740,16 @@ struct knote { uint16_t kn_inuse; /* inuse count */ kn_status_t kn_status; /* status bits */ -#define kn_id kn_kevent.ident -#define kn_filter kn_kevent.filter -#define kn_flags kn_kevent.flags -#define kn_qos kn_kevent.qos -#define kn_udata kn_kevent.udata -#define kn_fflags kn_kevent.fflags -#define kn_xflags kn_kevent.xflags -#define kn_data kn_kevent.data -#define kn_ext kn_kevent.ext -#define kn_fp kn_ptr.p_fp +#define kn_id kn_kevent.ident +#define kn_filter kn_kevent.filter +#define kn_flags kn_kevent.flags +#define kn_qos kn_kevent.qos +#define kn_udata kn_kevent.udata +#define kn_fflags kn_kevent.fflags +#define kn_xflags kn_kevent.xflags +#define kn_data kn_kevent.data +#define kn_ext kn_kevent.ext +#define kn_fp kn_ptr.p_fp }; static inline struct kqueue * @@ -758,7 +758,8 @@ knote_get_kq(struct knote *kn) return (struct kqueue *)kn->kn_kq_packed; } -static inline int knote_get_seltype(struct knote *kn) +static inline int +knote_get_seltype(struct knote *kn) { switch (kn->kn_filter) { case EVFILT_READ: @@ -767,12 +768,13 @@ static inline int knote_get_seltype(struct knote *kn) return FWRITE; default: panic("%s(%p): invalid filter %d\n", - __func__, kn, kn->kn_filter); + __func__, kn, kn->kn_filter); return 0; } } -static inline void knote_set_error(struct knote *kn, int error) +static inline void +knote_set_error(struct knote *kn, int error) { kn->kn_flags |= EV_ERROR; kn->kn_data = error; @@ -949,7 +951,7 @@ struct uthread; struct waitq; struct filterops { - bool f_isfd; /* true if ident == filedescriptor */ + bool f_isfd; /* true if ident == filedescriptor */ bool f_adjusts_qos; /* true if the filter can override the knote */ bool f_extended_codes; /* hooks return extended codes */ @@ -963,7 +965,7 @@ struct filterops { /* optional & advanced */ bool (*f_allow_drop)(struct knote *kn, struct kevent_internal_s *kev); void (*f_post_register_wait)(struct uthread *uth, struct knote_lock_ctx *ctx, - struct _kevent_register *ss_kr); + struct _kevent_register *ss_kr); }; /* @@ -1032,31 +1034,31 @@ struct filterops { #define FILTER_ADJUST_EVENT_QOS_MASK 0x00000070 #define FILTER_ADJUST_EVENT_QOS_SHIFT 4 #define FILTER_ADJUST_EVENT_QOS(qos) \ - (((qos) << FILTER_ADJUST_EVENT_QOS_SHIFT) | FILTER_ADJUST_EVENT_QOS_BIT) + (((qos) << FILTER_ADJUST_EVENT_QOS_SHIFT) | FILTER_ADJUST_EVENT_QOS_BIT) #define FILTER_RESET_EVENT_QOS FILTER_ADJUST_EVENT_QOS_BIT #define filter_call(_ops, call) \ - ((_ops)->f_extended_codes ? (_ops)->call : !!((_ops)->call)) + ((_ops)->f_extended_codes ? (_ops)->call : !!((_ops)->call)) SLIST_HEAD(klist, knote); -extern void knote_init(void); -extern void klist_init(struct klist *list); - -#define KNOTE(list, hint) knote(list, hint) -#define KNOTE_ATTACH(list, kn) knote_attach(list, kn) -#define KNOTE_DETACH(list, kn) knote_detach(list, kn) - -extern void knote(struct klist *list, long hint); -extern int knote_attach(struct klist *list, struct knote *kn); -extern int knote_detach(struct klist *list, struct knote *kn); -extern void knote_vanish(struct klist *list); -extern void knote_link_waitqset_lazy_alloc(struct knote *kn); +extern void knote_init(void); +extern void klist_init(struct klist *list); + +#define KNOTE(list, hint) knote(list, hint) +#define KNOTE_ATTACH(list, kn) knote_attach(list, kn) +#define KNOTE_DETACH(list, kn) knote_detach(list, kn) + +extern void knote(struct klist *list, long hint); +extern int knote_attach(struct klist *list, struct knote *kn); +extern int knote_detach(struct klist *list, struct knote *kn); +extern void knote_vanish(struct klist *list, bool make_active); +extern void knote_link_waitqset_lazy_alloc(struct knote *kn); extern boolean_t knote_link_waitqset_should_lazy_alloc(struct knote *kn); -extern int knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link); -extern int knote_unlink_waitq(struct knote *kn, struct waitq *wq); -extern void knote_fdclose(struct proc *p, int fd); -extern void knote_markstayactive(struct knote *kn); -extern void knote_clearstayactive(struct knote *kn); +extern int knote_link_waitq(struct knote *kn, struct waitq *wq, uint64_t *reserved_link); +extern int knote_unlink_waitq(struct knote *kn, struct waitq *wq); +extern void knote_fdclose(struct proc *p, int fd); +extern void knote_markstayactive(struct knote *kn); +extern void knote_clearstayactive(struct knote *kn); extern const struct filterops *knote_fops(struct knote *kn); extern void knote_set_error(struct knote *kn, int error); @@ -1066,11 +1068,11 @@ extern struct turnstile *kqueue_alloc_turnstile(struct kqueue *); int kevent_exit_on_workloop_ownership_leak(thread_t thread); int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize); int kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, - uint32_t ubufsize, int32_t *nkqueues_out); + uint32_t ubufsize, int32_t *nkqueues_out); int kevent_copyout_dynkqinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, - uint32_t ubufsize, int32_t *size_out); + uint32_t ubufsize, int32_t *size_out); int kevent_copyout_dynkqextinfo(void *proc, kqueue_id_t kq_id, user_addr_t ubuf, - uint32_t ubufsize, int32_t *nknotes_out); + uint32_t ubufsize, int32_t *nknotes_out); #elif defined(KERNEL_PRIVATE) /* !XNU_KERNEL_PRIVATE: kexts still need a klist structure definition */ @@ -1085,22 +1087,22 @@ SLIST_HEAD(klist, knote); #ifdef PRIVATE /* make these private functions available to the pthread kext */ -extern int kevent_qos_internal(struct proc *p, int fd, - user_addr_t changelist, int nchanges, - user_addr_t eventlist, int nevents, - user_addr_t data_out, user_size_t *data_available, - unsigned int flags, int32_t *retval); - -extern int kevent_id_internal(struct proc *p, kqueue_id_t *id, - user_addr_t changelist, int nchanges, - user_addr_t eventlist, int nevents, - user_addr_t data_out, user_size_t *data_available, - unsigned int flags, int32_t *retval); +extern int kevent_qos_internal(struct proc *p, int fd, + user_addr_t changelist, int nchanges, + user_addr_t eventlist, int nevents, + user_addr_t data_out, user_size_t *data_available, + unsigned int flags, int32_t *retval); + +extern int kevent_id_internal(struct proc *p, kqueue_id_t *id, + user_addr_t changelist, int nchanges, + user_addr_t eventlist, int nevents, + user_addr_t data_out, user_size_t *data_available, + unsigned int flags, int32_t *retval); #endif /* PRIVATE */ #endif /* KERNEL_PRIVATE */ -#else /* KERNEL */ +#else /* KERNEL */ #include @@ -1109,27 +1111,27 @@ struct timespec; __BEGIN_DECLS int kqueue(void); int kevent(int kq, - const struct kevent *changelist, int nchanges, - struct kevent *eventlist, int nevents, - const struct timespec *timeout); + const struct kevent *changelist, int nchanges, + struct kevent *eventlist, int nevents, + const struct timespec *timeout); int kevent64(int kq, - const struct kevent64_s *changelist, int nchanges, - struct kevent64_s *eventlist, int nevents, - unsigned int flags, - const struct timespec *timeout); + const struct kevent64_s *changelist, int nchanges, + struct kevent64_s *eventlist, int nevents, + unsigned int flags, + const struct timespec *timeout); #ifdef PRIVATE int kevent_qos(int kq, - const struct kevent_qos_s *changelist, int nchanges, - struct kevent_qos_s *eventlist, int nevents, - void *data_out, size_t *data_available, - unsigned int flags); + const struct kevent_qos_s *changelist, int nchanges, + struct kevent_qos_s *eventlist, int nevents, + void *data_out, size_t *data_available, + unsigned int flags); int kevent_id(kqueue_id_t id, - const struct kevent_qos_s *changelist, int nchanges, - struct kevent_qos_s *eventlist, int nevents, - void *data_out, size_t *data_available, - unsigned int flags); + const struct kevent_qos_s *changelist, int nchanges, + struct kevent_qos_s *eventlist, int nevents, + void *data_out, size_t *data_available, + unsigned int flags); #endif /* PRIVATE */ __END_DECLS @@ -1140,8 +1142,8 @@ __END_DECLS #ifdef PRIVATE /* Flags for pending events notified by kernel via return-to-kernel ast */ -#define R2K_WORKLOOP_PENDING_EVENTS 0x1 -#define R2K_WORKQ_PENDING_EVENTS 0x2 +#define R2K_WORKLOOP_PENDING_EVENTS 0x1 +#define R2K_WORKQ_PENDING_EVENTS 0x2 #endif /* PRIVATE */ diff --git a/bsd/sys/eventhandler.h b/bsd/sys/eventhandler.h index e5b717bfe..82f2c8439 100644 --- a/bsd/sys/eventhandler.h +++ b/bsd/sys/eventhandler.h @@ -74,70 +74,70 @@ struct eventhandler_lists_ctxt { }; struct eventhandler_entry_arg { - uuid_t ee_fmc_uuid; /* Flow manager UUID */ - uuid_t ee_fr_uuid; /* Flow route UUID */ + uuid_t ee_fmc_uuid; /* Flow manager UUID */ + uuid_t ee_fr_uuid; /* Flow route UUID */ }; struct eventhandler_entry { - TAILQ_ENTRY(eventhandler_entry) ee_link; - int ee_priority; -#define EHE_DEAD_PRIORITY (-1) - struct eventhandler_entry_arg ee_arg; + TAILQ_ENTRY(eventhandler_entry) ee_link; + int ee_priority; +#define EHE_DEAD_PRIORITY (-1) + struct eventhandler_entry_arg ee_arg; }; -#define EVENTHANDLER_MAX_NAME 32 +#define EVENTHANDLER_MAX_NAME 32 struct eventhandler_list { - char el_name[EVENTHANDLER_MAX_NAME]; - int el_flags; -#define EHL_INITTED (1<<0) - u_int el_runcount; + char el_name[EVENTHANDLER_MAX_NAME]; + int el_flags; +#define EHL_INITTED (1<<0) + u_int el_runcount; decl_lck_mtx_data(, el_lock); - TAILQ_ENTRY(eventhandler_list) el_link; - TAILQ_HEAD(,eventhandler_entry) el_entries; + TAILQ_ENTRY(eventhandler_list) el_link; + TAILQ_HEAD(, eventhandler_entry) el_entries; }; -typedef struct eventhandler_entry *eventhandler_tag; +typedef struct eventhandler_entry *eventhandler_tag; -#define EHL_LOCK_INIT(p) lck_mtx_init(&(p)->el_lock, el_lock_grp, el_lock_attr) -#define EHL_LOCK(p) lck_mtx_lock(&(p)->el_lock) -#define EHL_LOCK_SPIN(p) lck_mtx_lock_spin(&(p)->el_lock) -#define EHL_LOCK_CONVERT(p) lck_mtx_convert_spin(&(p)->el_lock) -#define EHL_UNLOCK(p) lck_mtx_unlock(&(p)->el_lock) -#define EHL_LOCK_ASSERT(p, x) LCK_MTX_ASSERT(&(p)->el_lock, x) -#define EHL_LOCK_DESTROY(p) lck_mtx_destroy(&(p)->el_lock, el_lock_grp) +#define EHL_LOCK_INIT(p) lck_mtx_init(&(p)->el_lock, el_lock_grp, el_lock_attr) +#define EHL_LOCK(p) lck_mtx_lock(&(p)->el_lock) +#define EHL_LOCK_SPIN(p) lck_mtx_lock_spin(&(p)->el_lock) +#define EHL_LOCK_CONVERT(p) lck_mtx_convert_spin(&(p)->el_lock) +#define EHL_UNLOCK(p) lck_mtx_unlock(&(p)->el_lock) +#define EHL_LOCK_ASSERT(p, x) LCK_MTX_ASSERT(&(p)->el_lock, x) +#define EHL_LOCK_DESTROY(p) lck_mtx_destroy(&(p)->el_lock, el_lock_grp) #define evhlog(x) do { if (evh_debug >= 1) log x; } while (0) /* * Macro to invoke the handlers for a given event. */ -#define _EVENTHANDLER_INVOKE(name, list, ...) do { \ - struct eventhandler_entry *_ep; \ - struct eventhandler_entry_ ## name *_t; \ - \ - VERIFY((list)->el_flags & EHL_INITTED); \ - EHL_LOCK_ASSERT((list), LCK_MTX_ASSERT_OWNED); \ - (list)->el_runcount++; \ - VERIFY((list)->el_runcount > 0); \ - evhlog((LOG_DEBUG, "eventhandler_invoke(\"" __STRING(name) "\")")); \ - TAILQ_FOREACH(_ep, &((list)->el_entries), ee_link) { \ - if (_ep->ee_priority != EHE_DEAD_PRIORITY) { \ - EHL_UNLOCK((list)); \ - _t = (struct eventhandler_entry_ ## name *)_ep; \ - evhlog((LOG_DEBUG, "eventhandler_invoke: executing %p", \ - VM_KERNEL_UNSLIDE((void *)_t->eh_func))); \ - _t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \ - EHL_LOCK_SPIN((list)); \ - } \ - } \ - VERIFY((list)->el_runcount > 0); \ - (list)->el_runcount--; \ - if ((list)->el_runcount == 0) { \ - EHL_LOCK_CONVERT((list)); \ - eventhandler_prune_list(list); \ - } \ - EHL_UNLOCK((list)); \ +#define _EVENTHANDLER_INVOKE(name, list, ...) do { \ + struct eventhandler_entry *_ep; \ + struct eventhandler_entry_ ## name *_t; \ + \ + VERIFY((list)->el_flags & EHL_INITTED); \ + EHL_LOCK_ASSERT((list), LCK_MTX_ASSERT_OWNED); \ + (list)->el_runcount++; \ + VERIFY((list)->el_runcount > 0); \ + evhlog((LOG_DEBUG, "eventhandler_invoke(\"" __STRING(name) "\")")); \ + TAILQ_FOREACH(_ep, &((list)->el_entries), ee_link) { \ + if (_ep->ee_priority != EHE_DEAD_PRIORITY) { \ + EHL_UNLOCK((list)); \ + _t = (struct eventhandler_entry_ ## name *)_ep; \ + evhlog((LOG_DEBUG, "eventhandler_invoke: executing %p", \ + VM_KERNEL_UNSLIDE((void *)_t->eh_func))); \ + _t->eh_func(_ep->ee_arg , ## __VA_ARGS__); \ + EHL_LOCK_SPIN((list)); \ + } \ + } \ + VERIFY((list)->el_runcount > 0); \ + (list)->el_runcount--; \ + if ((list)->el_runcount == 0) { \ + EHL_LOCK_CONVERT((list)); \ + eventhandler_prune_list(list); \ + } \ + EHL_UNLOCK((list)); \ } while (0) /* @@ -147,48 +147,48 @@ typedef struct eventhandler_entry *eventhandler_tag; * Slow handlers need to be declared, but do not need to be defined. The * declaration must be in scope wherever the handler is to be invoked. */ -#define EVENTHANDLER_DECLARE(name, type) \ -struct eventhandler_entry_ ## name \ -{ \ - struct eventhandler_entry ee; \ - type eh_func; \ -}; \ +#define EVENTHANDLER_DECLARE(name, type) \ +struct eventhandler_entry_ ## name \ +{ \ + struct eventhandler_entry ee; \ + type eh_func; \ +}; \ struct __hack /* * XXX EVENTHANDLER_DEFINE by itself doesn't do much on XNU - * All it does is that it declares the static eventhandler_tag + * All it does is that it declares the static eventhandler_tag * and defines an init routine that still needs to called to put the * event and callback on the list. - */ -#define EVENTHANDLER_DEFINE(evthdlr_ref, name, func, arg, priority) \ - static eventhandler_tag name ## _tag; \ - static void name ## _evh_init(void *ctx) \ - { \ - name ## _tag = EVENTHANDLER_REGISTER(evthdlr_ref, name, func, ctx, \ - priority); \ - } \ - SYSINIT(name ## _evh_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, \ - name ## _evh_init, arg); \ + */ +#define EVENTHANDLER_DEFINE(evthdlr_ref, name, func, arg, priority) \ + static eventhandler_tag name ## _tag; \ + static void name ## _evh_init(void *ctx) \ + { \ + name ## _tag = EVENTHANDLER_REGISTER(evthdlr_ref, name, func, ctx, \ + priority); \ + } \ + SYSINIT(name ## _evh_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, \ + name ## _evh_init, arg); \ struct __hack -#define EVENTHANDLER_INVOKE(evthdlr_ref, name, ...) \ -do { \ - struct eventhandler_list *_el; \ - \ - if ((_el = eventhandler_find_list(evthdlr_ref, #name)) != NULL) \ - _EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \ +#define EVENTHANDLER_INVOKE(evthdlr_ref, name, ...) \ +do { \ + struct eventhandler_list *_el; \ + \ + if ((_el = eventhandler_find_list(evthdlr_ref, #name)) != NULL) \ + _EVENTHANDLER_INVOKE(name, _el , ## __VA_ARGS__); \ } while (0) -#define EVENTHANDLER_REGISTER(evthdlr_ref, name, func, arg, priority) \ +#define EVENTHANDLER_REGISTER(evthdlr_ref, name, func, arg, priority) \ eventhandler_register(evthdlr_ref, NULL, #name, func, arg, priority) -#define EVENTHANDLER_DEREGISTER(evthdlr_ref, name, tag) \ -do { \ - struct eventhandler_list *_el; \ - \ - if ((_el = eventhandler_find_list(evthdlr_ref, #name)) != NULL) \ - eventhandler_deregister(_el, tag); \ +#define EVENTHANDLER_DEREGISTER(evthdlr_ref, name, tag) \ +do { \ + struct eventhandler_list *_el; \ + \ + if ((_el = eventhandler_find_list(evthdlr_ref, #name)) != NULL) \ + eventhandler_deregister(_el, tag); \ } while(0) void eventhandler_init(void); @@ -198,14 +198,14 @@ eventhandler_tag eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_l void eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag); struct eventhandler_list *eventhandler_find_list( - struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name); + struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name); void eventhandler_prune_list(struct eventhandler_list *list); void eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt); void eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt); /* Generic priority levels */ -#define EVENTHANDLER_PRI_FIRST 0 -#define EVENTHANDLER_PRI_ANY 10000 -#define EVENTHANDLER_PRI_LAST 20000 +#define EVENTHANDLER_PRI_FIRST 0 +#define EVENTHANDLER_PRI_ANY 10000 +#define EVENTHANDLER_PRI_LAST 20000 #endif /* _SYS_EVENTHANDLER_H_ */ diff --git a/bsd/sys/eventvar.h b/bsd/sys/eventvar.h index e60eaeb86..e15a1a757 100644 --- a/bsd/sys/eventvar.h +++ b/bsd/sys/eventvar.h @@ -98,7 +98,7 @@ typedef void (*kqueue_continue_t)(struct kqueue *, void *, int); * XXX -> kq-waitq-set lock -> kq-request lock -> pthread kext locks -> thread lock */ -#define KQEXTENT 256 /* linear growth by this amount */ +#define KQEXTENT 256 /* linear growth by this amount */ struct knote_lock_ctx { struct knote *knlc_knote; @@ -124,14 +124,14 @@ LIST_HEAD(knote_locks, knote_lock_ctx); * the stack named `name`. In development kernels, it uses tricks to make sure * not locks was still held when exiting the C-scope that contains this context. */ -__attribute__((noinline,not_tail_called)) +__attribute__((noinline, not_tail_called)) void knote_lock_ctx_chk(struct knote_lock_ctx *ctx); #define KNOTE_LOCK_CTX(n) \ - struct knote_lock_ctx n __attribute__((cleanup(knote_lock_ctx_chk))); \ - n.knlc_state = KNOTE_LOCK_CTX_UNLOCKED + struct knote_lock_ctx n __attribute__((cleanup(knote_lock_ctx_chk))); \ + n.knlc_state = KNOTE_LOCK_CTX_UNLOCKED #else #define KNOTE_LOCK_CTX(n) \ - struct knote_lock_ctx n + struct knote_lock_ctx n #endif /* @@ -208,8 +208,8 @@ struct kqrequest { #define KQR_WORKLOOP 0x01 /* owner is a workloop */ -#define KQR_THREQUESTED 0x02 /* thread has been requested from workq */ -#define KQR_WAKEUP 0x04 /* wakeup called during processing */ +#define KQR_THREQUESTED 0x02 /* thread has been requested from workq */ +#define KQR_WAKEUP 0x04 /* wakeup called during processing */ #define KQR_THOVERCOMMIT 0x08 /* overcommit needed for thread requests */ #define KQR_R2K_NOTIF_ARMED 0x10 /* ast notifications armed */ #define KQR_ALLOCATED_TURNSTILE 0x20 /* kqwl_turnstile is allocated */ @@ -305,10 +305,10 @@ struct kqworkloop { #if CONFIG_WORKLOOP_DEBUG #define KQWL_HISTORY_COUNT 32 #define KQWL_HISTORY_WRITE_ENTRY(kqwl, ...) ({ \ - struct kqworkloop *__kqwl = (kqwl); \ - unsigned int __index = os_atomic_inc_orig(&__kqwl->kqwl_index, relaxed); \ - __kqwl->kqwl_history[__index % KQWL_HISTORY_COUNT] = \ - (struct kqwl_history)__VA_ARGS__; \ + struct kqworkloop *__kqwl = (kqwl); \ + unsigned int __index = os_atomic_inc_orig(&__kqwl->kqwl_index, relaxed); \ + __kqwl->kqwl_history[__index % KQWL_HISTORY_COUNT] = \ + (struct kqwl_history)__VA_ARGS__; \ }) struct kqwl_history { thread_t updater; /* Note: updates can be reordered */ @@ -330,7 +330,7 @@ struct kqworkloop { }; typedef union { - struct kqueue *kq; + struct kqueue *kq; struct kqworkq *kqwq; struct kqfile *kqf; struct kqworkloop *kqwl; @@ -352,7 +352,7 @@ extern void kqueue_threadreq_unbind(struct proc *p, struct kqrequest *kqr); // called with the kq req held #define KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE 0x1 extern void kqueue_threadreq_bind(struct proc *p, workq_threadreq_t req, - thread_t thread, unsigned int flags); + thread_t thread, unsigned int flags); // called with the wq lock held extern void kqueue_threadreq_bind_prepost(struct proc *p, workq_threadreq_t req, thread_t thread); @@ -372,9 +372,9 @@ extern void knotes_dealloc(struct proc *); extern void kqworkloops_dealloc(struct proc *); extern int kevent_register(struct kqueue *, struct kevent_internal_s *, - struct knote_lock_ctx *); + struct knote_lock_ctx *); extern int kqueue_scan(struct kqueue *, kevent_callback_t, kqueue_continue_t, - void *, struct filt_process_s *, struct timeval *, struct proc *); + void *, struct filt_process_s *, struct timeval *, struct proc *); extern int kqueue_stat(struct kqueue *, void *, int, proc_t); #endif /* XNU_KERNEL_PRIVATE */ diff --git a/bsd/sys/exec.h b/bsd/sys/exec.h index ee79964a3..d3138193a 100644 --- a/bsd/sys/exec.h +++ b/bsd/sys/exec.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,8 +66,8 @@ * @(#)exec.h 8.3 (Berkeley) 1/21/94 */ -#ifndef _SYS_EXEC_H_ -#define _SYS_EXEC_H_ +#ifndef _SYS_EXEC_H_ +#define _SYS_EXEC_H_ #include @@ -80,4 +80,3 @@ #include #endif /* !_SYS_EXEC_H_ */ - diff --git a/bsd/sys/fcntl.h b/bsd/sys/fcntl.h index 02c868bba..de413f34c 100644 --- a/bsd/sys/fcntl.h +++ b/bsd/sys/fcntl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -68,7 +68,7 @@ #ifndef _SYS_FCNTL_H_ -#define _SYS_FCNTL_H_ +#define _SYS_FCNTL_H_ /* * This file includes the definitions for open and fcntl @@ -95,10 +95,10 @@ * Open/fcntl flags begin with O_; kernel-internal flags begin with F. */ /* open-only flags */ -#define O_RDONLY 0x0000 /* open for reading only */ -#define O_WRONLY 0x0001 /* open for writing only */ -#define O_RDWR 0x0002 /* open for reading and writing */ -#define O_ACCMODE 0x0003 /* mask for above modes */ +#define O_RDONLY 0x0000 /* open for reading only */ +#define O_WRONLY 0x0001 /* open for writing only */ +#define O_RDWR 0x0002 /* open for reading and writing */ +#define O_ACCMODE 0x0003 /* mask for above modes */ /* * Kernel encoding of open mode; separate read and write bits that are @@ -109,107 +109,107 @@ * which was documented to use FREAD/FWRITE, continues to work. */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define FREAD 0x0001 -#define FWRITE 0x0002 +#define FREAD 0x0001 +#define FWRITE 0x0002 #endif -#define O_NONBLOCK 0x0004 /* no delay */ -#define O_APPEND 0x0008 /* set append mode */ +#define O_NONBLOCK 0x0004 /* no delay */ +#define O_APPEND 0x0008 /* set append mode */ #include #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_SHLOCK 0x0010 /* open with shared file lock */ -#define O_EXLOCK 0x0020 /* open with exclusive file lock */ -#define O_ASYNC 0x0040 /* signal pgrp when data ready */ -#define O_FSYNC O_SYNC /* source compatibility: do not use */ +#define O_SHLOCK 0x0010 /* open with shared file lock */ +#define O_EXLOCK 0x0020 /* open with exclusive file lock */ +#define O_ASYNC 0x0040 /* signal pgrp when data ready */ +#define O_FSYNC O_SYNC /* source compatibility: do not use */ #define O_NOFOLLOW 0x0100 /* don't follow symlinks */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define O_CREAT 0x0200 /* create if nonexistant */ -#define O_TRUNC 0x0400 /* truncate to zero length */ -#define O_EXCL 0x0800 /* error if already exists */ +#define O_CREAT 0x0200 /* create if nonexistant */ +#define O_TRUNC 0x0400 /* truncate to zero length */ +#define O_EXCL 0x0800 /* error if already exists */ #ifdef KERNEL -#define FMARK 0x1000 /* mark during gc() */ -#define FDEFER 0x2000 /* defer for next gc pass */ -#define FHASLOCK 0x4000 /* descriptor holds advisory lock */ +#define FMARK 0x1000 /* mark during gc() */ +#define FDEFER 0x2000 /* defer for next gc pass */ +#define FHASLOCK 0x4000 /* descriptor holds advisory lock */ #endif -#if __DARWIN_C_LEVEL >= 200809L +#if __DARWIN_C_LEVEL >= 200809L /* * Descriptor value for the current working directory */ -#define AT_FDCWD -2 +#define AT_FDCWD -2 /* * Flags for the at functions */ -#define AT_EACCESS 0x0010 /* Use effective ids in access check */ -#define AT_SYMLINK_NOFOLLOW 0x0020 /* Act on the symlink itself not the target */ -#define AT_SYMLINK_FOLLOW 0x0040 /* Act on target of symlink */ -#define AT_REMOVEDIR 0x0080 /* Path refers to directory */ +#define AT_EACCESS 0x0010 /* Use effective ids in access check */ +#define AT_SYMLINK_NOFOLLOW 0x0020 /* Act on the symlink itself not the target */ +#define AT_SYMLINK_FOLLOW 0x0040 /* Act on target of symlink */ +#define AT_REMOVEDIR 0x0080 /* Path refers to directory */ #endif #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_EVTONLY 0x8000 /* descriptor requested for event notifications only */ +#define O_EVTONLY 0x8000 /* descriptor requested for event notifications only */ #endif #ifdef KERNEL -#define FWASWRITTEN 0x10000 /* descriptor was written */ +#define FWASWRITTEN 0x10000 /* descriptor was written */ #endif -#define O_NOCTTY 0x20000 /* don't assign controlling terminal */ +#define O_NOCTTY 0x20000 /* don't assign controlling terminal */ #ifdef KERNEL -#define FNOCACHE 0x40000 /* fcntl(F_NOCACHE, 1) */ -#define FNORDAHEAD 0x80000 /* fcntl(F_RDAHEAD, 0) */ +#define FNOCACHE 0x40000 /* fcntl(F_NOCACHE, 1) */ +#define FNORDAHEAD 0x80000 /* fcntl(F_RDAHEAD, 0) */ #endif #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_DIRECTORY 0x100000 -#define O_SYMLINK 0x200000 /* allow open of a symlink */ +#define O_DIRECTORY 0x100000 +#define O_SYMLINK 0x200000 /* allow open of a symlink */ #endif #include #ifdef KERNEL -#define FNODIRECT 0x800000 /* fcntl(F_NODIRECT, 1) */ +#define FNODIRECT 0x800000 /* fcntl(F_NODIRECT, 1) */ #endif #if __DARWIN_C_LEVEL >= 200809L -#define O_CLOEXEC 0x1000000 /* implicitly set FD_CLOEXEC */ +#define O_CLOEXEC 0x1000000 /* implicitly set FD_CLOEXEC */ #endif #ifdef KERNEL -#define FENCRYPTED 0x2000000 +#define FENCRYPTED 0x2000000 #endif #ifdef KERNEL -#define FSINGLE_WRITER 0x4000000 /* fcntl(F_SINGLE_WRITER, 1) */ +#define FSINGLE_WRITER 0x4000000 /* fcntl(F_SINGLE_WRITER, 1) */ #endif #ifdef KERNEL -#define O_CLOFORK 0x8000000 /* implicitly set FD_CLOFORK */ +#define O_CLOFORK 0x8000000 /* implicitly set FD_CLOFORK */ #endif #ifdef KERNEL -#define FUNENCRYPTED 0x10000000 +#define FUNENCRYPTED 0x10000000 #endif /* Data Protection Flags */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define O_DP_GETRAWENCRYPTED 0x0001 -#define O_DP_GETRAWUNENCRYPTED 0x0002 +#define O_DP_GETRAWENCRYPTED 0x0001 +#define O_DP_GETRAWUNENCRYPTED 0x0002 #endif #ifdef KERNEL /* convert from open() flags to/from fflags; convert O_RD/WR to FREAD/FWRITE */ -#define FFLAGS(oflags) ((oflags) + 1) -#define OFLAGS(fflags) ((fflags) - 1) +#define FFLAGS(oflags) ((oflags) + 1) +#define OFLAGS(fflags) ((fflags) - 1) /* bits to save after open */ -#define FMASK (FREAD|FWRITE|FAPPEND|FASYNC|FFSYNC|FFDSYNC|FNONBLOCK) +#define FMASK (FREAD|FWRITE|FAPPEND|FASYNC|FFSYNC|FFDSYNC|FNONBLOCK) /* bits settable by fcntl(F_SETFL, ...) */ -#define FCNTLFLAGS (FAPPEND|FASYNC|FFSYNC|FFDSYNC|FNONBLOCK) +#define FCNTLFLAGS (FAPPEND|FASYNC|FFSYNC|FFDSYNC|FNONBLOCK) #endif /* @@ -218,13 +218,13 @@ * and for backward compatibility for fcntl. */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define FAPPEND O_APPEND /* kernel/compat */ -#define FASYNC O_ASYNC /* kernel/compat */ -#define FFSYNC O_FSYNC /* kernel */ -#define FFDSYNC O_DSYNC /* kernel */ -#define FNONBLOCK O_NONBLOCK /* kernel */ -#define FNDELAY O_NONBLOCK /* compat */ -#define O_NDELAY O_NONBLOCK /* compat */ +#define FAPPEND O_APPEND /* kernel/compat */ +#define FASYNC O_ASYNC /* kernel/compat */ +#define FFSYNC O_FSYNC /* kernel */ +#define FFDSYNC O_DSYNC /* kernel */ +#define FNONBLOCK O_NONBLOCK /* kernel */ +#define FNDELAY O_NONBLOCK /* compat */ +#define O_NDELAY O_NONBLOCK /* compat */ #endif /* @@ -242,131 +242,131 @@ */ /* command values */ -#define F_DUPFD 0 /* duplicate file descriptor */ -#define F_GETFD 1 /* get file descriptor flags */ -#define F_SETFD 2 /* set file descriptor flags */ -#define F_GETFL 3 /* get file status flags */ -#define F_SETFL 4 /* set file status flags */ -#define F_GETOWN 5 /* get SIGIO/SIGURG proc/pgrp */ -#define F_SETOWN 6 /* set SIGIO/SIGURG proc/pgrp */ -#define F_GETLK 7 /* get record locking information */ -#define F_SETLK 8 /* set record locking information */ -#define F_SETLKW 9 /* F_SETLK; wait if blocked */ +#define F_DUPFD 0 /* duplicate file descriptor */ +#define F_GETFD 1 /* get file descriptor flags */ +#define F_SETFD 2 /* set file descriptor flags */ +#define F_GETFL 3 /* get file status flags */ +#define F_SETFL 4 /* set file status flags */ +#define F_GETOWN 5 /* get SIGIO/SIGURG proc/pgrp */ +#define F_SETOWN 6 /* set SIGIO/SIGURG proc/pgrp */ +#define F_GETLK 7 /* get record locking information */ +#define F_SETLK 8 /* set record locking information */ +#define F_SETLKW 9 /* F_SETLK; wait if blocked */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define F_SETLKWTIMEOUT 10 /* F_SETLK; wait if blocked, return on timeout */ +#define F_SETLKWTIMEOUT 10 /* F_SETLK; wait if blocked, return on timeout */ #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #define F_FLUSH_DATA 40 #define F_CHKCLEAN 41 /* Used for regression test */ -#define F_PREALLOCATE 42 /* Preallocate storage */ -#define F_SETSIZE 43 /* Truncate a file without zeroing space */ +#define F_PREALLOCATE 42 /* Preallocate storage */ +#define F_SETSIZE 43 /* Truncate a file without zeroing space */ #define F_RDADVISE 44 /* Issue an advisory read async with no copy to user */ #define F_RDAHEAD 45 /* turn read ahead off/on for this fd */ /* - * 46,47 used to be F_READBOOTSTRAP and F_WRITEBOOTSTRAP + * 46,47 used to be F_READBOOTSTRAP and F_WRITEBOOTSTRAP */ #define F_NOCACHE 48 /* turn data caching off/on for this fd */ -#define F_LOG2PHYS 49 /* file offset to device offset */ +#define F_LOG2PHYS 49 /* file offset to device offset */ #define F_GETPATH 50 /* return the full path of the fd */ -#define F_FULLFSYNC 51 /* fsync + ask the drive to flush to the media */ +#define F_FULLFSYNC 51 /* fsync + ask the drive to flush to the media */ #define F_PATHPKG_CHECK 52 /* find which component (if any) is a package */ #define F_FREEZE_FS 53 /* "freeze" all fs operations */ #define F_THAW_FS 54 /* "thaw" all fs operations */ -#define F_GLOBAL_NOCACHE 55 /* turn data caching off/on (globally) for this file */ +#define F_GLOBAL_NOCACHE 55 /* turn data caching off/on (globally) for this file */ #ifdef PRIVATE -#define F_OPENFROM 56 /* SPI: open a file relative to fd (must be a dir) */ -#define F_UNLINKFROM 57 /* SPI: open a file relative to fd (must be a dir) */ -#define F_CHECK_OPENEVT 58 /* SPI: if a process is marked OPENEVT, or in O_EVTONLY on opens of this vnode */ +#define F_OPENFROM 56 /* SPI: open a file relative to fd (must be a dir) */ +#define F_UNLINKFROM 57 /* SPI: open a file relative to fd (must be a dir) */ +#define F_CHECK_OPENEVT 58 /* SPI: if a process is marked OPENEVT, or in O_EVTONLY on opens of this vnode */ #endif /* PRIVATE */ -#define F_ADDSIGS 59 /* add detached signatures */ +#define F_ADDSIGS 59 /* add detached signatures */ #ifdef PRIVATE /* Deprecated/Removed in 10.9 */ #define F_MARKDEPENDENCY 60 /* this process hosts the device supporting the fs backing this fd */ #endif -#define F_ADDFILESIGS 61 /* add signature from same file (used by dyld for shared libs) */ +#define F_ADDFILESIGS 61 /* add signature from same file (used by dyld for shared libs) */ -#define F_NODIRECT 62 /* used in conjunction with F_NOCACHE to indicate that DIRECT, synchonous writes */ +#define F_NODIRECT 62 /* used in conjunction with F_NOCACHE to indicate that DIRECT, synchonous writes */ /* should not be used (i.e. its ok to temporaily create cached pages) */ -#define F_GETPROTECTIONCLASS 63 /* Get the protection class of a file from the EA, returns int */ -#define F_SETPROTECTIONCLASS 64 /* Set the protection class of a file for the EA, requires int */ +#define F_GETPROTECTIONCLASS 63 /* Get the protection class of a file from the EA, returns int */ +#define F_SETPROTECTIONCLASS 64 /* Set the protection class of a file for the EA, requires int */ -#define F_LOG2PHYS_EXT 65 /* file offset to device offset, extended */ +#define F_LOG2PHYS_EXT 65 /* file offset to device offset, extended */ -#define F_GETLKPID 66 /* get record locking information, per-process */ +#define F_GETLKPID 66 /* get record locking information, per-process */ /* See F_DUPFD_CLOEXEC below for 67 */ #ifdef PRIVATE -#define F_SETSTATICCONTENT 68 /* - * indicate to the filesystem/storage driver that the content to be - * written is usually static. a nonzero value enables it, 0 disables it. - */ -#define F_MOVEDATAEXTENTS 69 /* Swap only the data associated with two files */ +#define F_SETSTATICCONTENT 68 /* + * indicate to the filesystem/storage driver that the content to be + * written is usually static. a nonzero value enables it, 0 disables it. + */ +#define F_MOVEDATAEXTENTS 69 /* Swap only the data associated with two files */ #endif -#define F_SETBACKINGSTORE 70 /* Mark the file as being the backing store for another filesystem */ -#define F_GETPATH_MTMINFO 71 /* return the full path of the FD, but error in specific mtmd circumstances */ +#define F_SETBACKINGSTORE 70 /* Mark the file as being the backing store for another filesystem */ +#define F_GETPATH_MTMINFO 71 /* return the full path of the FD, but error in specific mtmd circumstances */ -#define F_GETCODEDIR 72 /* Returns the code directory, with associated hashes, to the caller */ +#define F_GETCODEDIR 72 /* Returns the code directory, with associated hashes, to the caller */ -#define F_SETNOSIGPIPE 73 /* No SIGPIPE generated on EPIPE */ -#define F_GETNOSIGPIPE 74 /* Status of SIGPIPE for this fd */ +#define F_SETNOSIGPIPE 73 /* No SIGPIPE generated on EPIPE */ +#define F_GETNOSIGPIPE 74 /* Status of SIGPIPE for this fd */ -#define F_TRANSCODEKEY 75 /* For some cases, we need to rewrap the key for AKS/MKB */ +#define F_TRANSCODEKEY 75 /* For some cases, we need to rewrap the key for AKS/MKB */ -#define F_SINGLE_WRITER 76 /* file being written to a by single writer... if throttling enabled, writes */ +#define F_SINGLE_WRITER 76 /* file being written to a by single writer... if throttling enabled, writes */ /* may be broken into smaller chunks with throttling in between */ -#define F_GETPROTECTIONLEVEL 77 /* Get the protection version number for this filesystem */ +#define F_GETPROTECTIONLEVEL 77 /* Get the protection version number for this filesystem */ -#define F_FINDSIGS 78 /* Add detached code signatures (used by dyld for shared libs) */ +#define F_FINDSIGS 78 /* Add detached code signatures (used by dyld for shared libs) */ #ifdef PRIVATE -#define F_GETDEFAULTPROTLEVEL 79 /* Get the default protection level for the filesystem */ -#define F_MAKECOMPRESSED 80 /* Make the file compressed; truncate & toggle BSD bits */ -#define F_SET_GREEDY_MODE 81 /* - * indicate to the filesystem/storage driver that the content to be - * written should be written in greedy mode for additional speed at - * the cost of storage efficiency. A nonzero value enables it, 0 disables it. - */ - -#define F_SETIOTYPE 82 /* - * Use parameters to describe content being written to the FD. See - * flag definitions below for argument bits. - */ +#define F_GETDEFAULTPROTLEVEL 79 /* Get the default protection level for the filesystem */ +#define F_MAKECOMPRESSED 80 /* Make the file compressed; truncate & toggle BSD bits */ +#define F_SET_GREEDY_MODE 81 /* + * indicate to the filesystem/storage driver that the content to be + * written should be written in greedy mode for additional speed at + * the cost of storage efficiency. A nonzero value enables it, 0 disables it. + */ + +#define F_SETIOTYPE 82 /* + * Use parameters to describe content being written to the FD. See + * flag definitions below for argument bits. + */ #endif -#define F_ADDFILESIGS_FOR_DYLD_SIM 83 /* Add signature from same file, only if it is signed by Apple (used by dyld for simulator) */ +#define F_ADDFILESIGS_FOR_DYLD_SIM 83 /* Add signature from same file, only if it is signed by Apple (used by dyld for simulator) */ #ifdef PRIVATE -#define F_RECYCLE 84 /* Recycle vnode; debug/development builds only */ +#define F_RECYCLE 84 /* Recycle vnode; debug/development builds only */ #endif -#define F_BARRIERFSYNC 85 /* fsync + issue barrier to drive */ +#define F_BARRIERFSYNC 85 /* fsync + issue barrier to drive */ #ifdef PRIVATE -#define F_OFD_SETLK 90 /* Acquire or release open file description lock */ -#define F_OFD_SETLKW 91 /* (as F_OFD_SETLK but blocking if conflicting lock) */ -#define F_OFD_GETLK 92 /* Examine OFD lock */ +#define F_OFD_SETLK 90 /* Acquire or release open file description lock */ +#define F_OFD_SETLKW 91 /* (as F_OFD_SETLK but blocking if conflicting lock) */ +#define F_OFD_GETLK 92 /* Examine OFD lock */ -#define F_OFD_SETLKWTIMEOUT 93 /* (as F_OFD_SETLKW but return if timeout) */ -#define F_OFD_GETLKPID 94 /* get record locking information */ +#define F_OFD_SETLKWTIMEOUT 93 /* (as F_OFD_SETLKW but return if timeout) */ +#define F_OFD_GETLKPID 94 /* get record locking information */ -#define F_SETCONFINED 95 /* "confine" OFD to process */ -#define F_GETCONFINED 96 /* is-fd-confined? */ +#define F_SETCONFINED 95 /* "confine" OFD to process */ +#define F_GETCONFINED 96 /* is-fd-confined? */ #endif -#define F_ADDFILESIGS_RETURN 97 /* Add signature from same file, return end offset in structure on success */ -#define F_CHECK_LV 98 /* Check if Library Validation allows this Mach-O file to be mapped into the calling process */ +#define F_ADDFILESIGS_RETURN 97 /* Add signature from same file, return end offset in structure on success */ +#define F_CHECK_LV 98 /* Check if Library Validation allows this Mach-O file to be mapped into the calling process */ -#define F_PUNCHHOLE 99 /* Deallocate a range of the file */ +#define F_PUNCHHOLE 99 /* Deallocate a range of the file */ -#define F_TRIM_ACTIVE_FILE 100 /* Trim an active file */ +#define F_TRIM_ACTIVE_FILE 100 /* Trim an active file */ // FS-specific fcntl()'s numbers begin at 0x00010000 and go up #define FCNTL_FS_SPECIFIC_BASE 0x00010000 @@ -374,35 +374,35 @@ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #if __DARWIN_C_LEVEL >= 200809L -#define F_DUPFD_CLOEXEC 67 /* mark the dup with FD_CLOEXEC */ +#define F_DUPFD_CLOEXEC 67 /* mark the dup with FD_CLOEXEC */ #endif /* file descriptor flags (F_GETFD, F_SETFD) */ -#define FD_CLOEXEC 1 /* close-on-exec flag */ +#define FD_CLOEXEC 1 /* close-on-exec flag */ #if PRIVATE -#define FD_CLOFORK 2 /* close-on-fork flag */ +#define FD_CLOFORK 2 /* close-on-fork flag */ #endif /* record locking flags (F_GETLK, F_SETLK, F_SETLKW) */ -#define F_RDLCK 1 /* shared or read lock */ -#define F_UNLCK 2 /* unlock */ -#define F_WRLCK 3 /* exclusive or write lock */ +#define F_RDLCK 1 /* shared or read lock */ +#define F_UNLCK 2 /* unlock */ +#define F_WRLCK 3 /* exclusive or write lock */ #ifdef KERNEL -#define F_WAIT 0x010 /* Wait until lock is granted */ -#define F_FLOCK 0x020 /* Use flock(2) semantics for lock */ -#define F_POSIX 0x040 /* Use POSIX semantics for lock */ -#define F_PROV 0x080 /* Non-coalesced provisional lock */ +#define F_WAIT 0x010 /* Wait until lock is granted */ +#define F_FLOCK 0x020 /* Use flock(2) semantics for lock */ +#define F_POSIX 0x040 /* Use POSIX semantics for lock */ +#define F_PROV 0x080 /* Non-coalesced provisional lock */ #define F_WAKE1_SAFE 0x100 /* its safe to only wake one waiter */ -#define F_ABORT 0x200 /* lock attempt aborted (force umount) */ -#define F_OFD_LOCK 0x400 /* Use "OFD" semantics for lock */ +#define F_ABORT 0x200 /* lock attempt aborted (force umount) */ +#define F_OFD_LOCK 0x400 /* Use "OFD" semantics for lock */ #endif #if PRIVATE -/* - * ISOCHRONOUS attempts to sustain a minimum platform-dependent throughput +/* + * ISOCHRONOUS attempts to sustain a minimum platform-dependent throughput * for the duration of the I/O delivered to the driver. */ -#define F_IOTYPE_ISOCHRONOUS 0x0001 +#define F_IOTYPE_ISOCHRONOUS 0x0001 #endif /* @@ -421,13 +421,13 @@ /* allocate flags (F_PREALLOCATE) */ #define F_ALLOCATECONTIG 0x00000002 /* allocate contigious space */ -#define F_ALLOCATEALL 0x00000004 /* allocate all requested space or no space at all */ +#define F_ALLOCATEALL 0x00000004 /* allocate all requested space or no space at all */ /* Position Modes (fst_posmode) for F_PREALLOCATE */ -#define F_PEOFPOSMODE 3 /* Make it past all of the SEEK pos modes so that */ - /* we can keep them in sync should we desire */ -#define F_VOLPOSMODE 4 /* specify volume starting postion */ +#define F_PEOFPOSMODE 3 /* Make it past all of the SEEK pos modes so that */ + /* we can keep them in sync should we desire */ +#define F_VOLPOSMODE 4 /* specify volume starting postion */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* @@ -435,11 +435,11 @@ * information passed to system by user */ struct flock { - off_t l_start; /* starting offset */ - off_t l_len; /* len = 0 means until end of file */ - pid_t l_pid; /* lock owner */ - short l_type; /* lock type: read/write, etc. */ - short l_whence; /* type of l_start */ + off_t l_start; /* starting offset */ + off_t l_len; /* len = 0 means until end of file */ + pid_t l_pid; /* lock owner */ + short l_type; /* lock type: read/write, etc. */ + short l_whence; /* type of l_start */ }; #include @@ -466,56 +466,56 @@ struct flocktimeout { #endif /* KERNEL */ struct radvisory { - off_t ra_offset; - int ra_count; + off_t ra_offset; + int ra_count; }; #ifdef KERNEL #pragma pack() #endif /* KERNEL */ -#ifndef KERNEL +#ifndef KERNEL /** Information the user passes in to get the codeblobs out of the kernel */ typedef struct fcodeblobs { - void *f_cd_hash; - size_t f_hash_size; - void *f_cd_buffer; - size_t f_cd_size; - unsigned int *f_out_size; - int f_arch; - int __padding; + void *f_cd_hash; + size_t f_hash_size; + void *f_cd_buffer; + size_t f_cd_size; + unsigned int *f_out_size; + int f_arch; + int __padding; } fcodeblobs_t; #endif /* KERNEL */ #ifdef KERNEL typedef struct user32_fcodeblobs { - user32_addr_t f_cd_hash; - user32_size_t f_hash_size; - user32_addr_t f_cd_buffer; - user32_size_t f_cd_size; - user32_addr_t f_out_size; - int f_arch; + user32_addr_t f_cd_hash; + user32_size_t f_hash_size; + user32_addr_t f_cd_buffer; + user32_size_t f_cd_size; + user32_addr_t f_out_size; + int f_arch; } user32_fcodeblobs_t; /* LP64 version of fcodeblobs */ typedef struct user64_fcodeblobs { - user64_addr_t f_cd_hash; - user64_size_t f_hash_size; - user64_addr_t f_cd_buffer; - user64_size_t f_cd_size; - user64_addr_t f_out_size; - int f_arch; - int __padding; + user64_addr_t f_cd_hash; + user64_size_t f_hash_size; + user64_addr_t f_cd_buffer; + user64_size_t f_cd_size; + user64_addr_t f_out_size; + int f_arch; + int __padding; } user64_fcodeblobs_t; /* kernel version of fcodeblobs */ typedef struct user_fcodeblobs { - user_addr_t f_cd_hash; - user_size_t f_hash_size; - user_addr_t f_cd_buffer; - user_size_t f_cd_size; - user_addr_t f_out_size; - int f_arch; + user_addr_t f_cd_hash; + user_size_t f_hash_size; + user_addr_t f_cd_buffer; + user_size_t f_cd_size; + user_addr_t f_out_size; + int f_arch; } user_fcodeblobs_t; #endif /* KERNEL */ @@ -526,9 +526,9 @@ typedef struct user_fcodeblobs { * doesn't require mapping of the file in order to load the signature. */ typedef struct fsignatures { - off_t fs_file_start; - void *fs_blob_start; - size_t fs_blob_size; + off_t fs_file_start; + void *fs_blob_start; + size_t fs_blob_size; } fsignatures_t; #ifdef KERNEL /* LP64 version of fsignatures. all pointers @@ -537,17 +537,17 @@ typedef struct fsignatures { */ typedef struct user32_fsignatures { - off_t fs_file_start; - user32_addr_t fs_blob_start; - user32_size_t fs_blob_size; + off_t fs_file_start; + user32_addr_t fs_blob_start; + user32_size_t fs_blob_size; } user32_fsignatures_t; typedef struct user_fsignatures { - off_t fs_file_start; /* offset of Mach-O image in FAT file */ - user_addr_t fs_blob_start; /* F_ADDSIGS: mem address of signature*/ - /* F_ADDFILESIGS: offset of signature */ - /* in Mach-O image */ - user_size_t fs_blob_size; /* size of signature blob */ + off_t fs_file_start; /* offset of Mach-O image in FAT file */ + user_addr_t fs_blob_start; /* F_ADDSIGS: mem address of signature*/ + /* F_ADDFILESIGS: offset of signature */ + /* in Mach-O image */ + user_size_t fs_blob_size; /* size of signature blob */ } user_fsignatures_t; #endif /* KERNEL */ @@ -562,9 +562,9 @@ typedef struct user_fsignatures { * a process that library validation enabled. */ typedef struct fchecklv { - off_t lv_file_start; - size_t lv_error_message_size; - void *lv_error_message; + off_t lv_file_start; + size_t lv_error_message_size; + void *lv_error_message; } fchecklv_t; #ifdef KERNEL @@ -574,34 +574,34 @@ typedef struct fchecklv { */ typedef struct user32_fchecklv { - user32_off_t lv_file_start; - user32_size_t lv_error_message_size; - user32_addr_t lv_error_message; + user32_off_t lv_file_start; + user32_size_t lv_error_message_size; + user32_addr_t lv_error_message; } user32_fchecklv_t; typedef struct user_fchecklv { - off_t lv_file_start; - user_size_t lv_error_message_size; - user_addr_t lv_error_message; + off_t lv_file_start; + user_size_t lv_error_message_size; + user_addr_t lv_error_message; } user_fchecklv_t; #endif /* KERNEL */ /* lock operations for flock(2) */ -#define LOCK_SH 0x01 /* shared file lock */ -#define LOCK_EX 0x02 /* exclusive file lock */ -#define LOCK_NB 0x04 /* don't block when locking */ -#define LOCK_UN 0x08 /* unlock file */ +#define LOCK_SH 0x01 /* shared file lock */ +#define LOCK_EX 0x02 /* exclusive file lock */ +#define LOCK_NB 0x04 /* don't block when locking */ +#define LOCK_UN 0x08 /* unlock file */ /* fstore_t type used by F_PREALLOCATE command */ typedef struct fstore { - unsigned int fst_flags; /* IN: flags word */ - int fst_posmode; /* IN: indicates use of offset field */ - off_t fst_offset; /* IN: start of the region */ - off_t fst_length; /* IN: size of the region */ - off_t fst_bytesalloc; /* OUT: number of bytes allocated */ + unsigned int fst_flags; /* IN: flags word */ + int fst_posmode; /* IN: indicates use of offset field */ + off_t fst_offset; /* IN: start of the region */ + off_t fst_length; /* IN: size of the region */ + off_t fst_bytesalloc; /* OUT: number of bytes allocated */ } fstore_t; /* fpunchhole_t used by F_PUNCHHOLE */ @@ -621,9 +621,9 @@ typedef struct ftrimactivefile { /* fbootstraptransfer_t used by F_READBOOTSTRAP and F_WRITEBOOTSTRAP commands */ typedef struct fbootstraptransfer { - off_t fbt_offset; /* IN: offset to start read/write */ - size_t fbt_length; /* IN: number of bytes to transfer */ - void *fbt_buffer; /* IN: buffer to be read/written */ + off_t fbt_offset; /* IN: offset to start read/write */ + size_t fbt_length; /* IN: number of bytes to transfer */ + void *fbt_buffer; /* IN: buffer to be read/written */ } fbootstraptransfer_t; #ifdef KERNEL @@ -633,15 +633,15 @@ typedef struct fbootstraptransfer { */ typedef struct user32_fbootstraptransfer { - off_t fbt_offset; /* IN: offset to start read/write */ - user32_size_t fbt_length; /* IN: number of bytes to transfer */ - user32_addr_t fbt_buffer; /* IN: buffer to be read/written */ + off_t fbt_offset; /* IN: offset to start read/write */ + user32_size_t fbt_length; /* IN: number of bytes to transfer */ + user32_addr_t fbt_buffer; /* IN: buffer to be read/written */ } user32_fbootstraptransfer_t; typedef struct user_fbootstraptransfer { - off_t fbt_offset; /* IN: offset to start read/write */ - user_size_t fbt_length; /* IN: number of bytes to transfer */ - user_addr_t fbt_buffer; /* IN: buffer to be read/written */ + off_t fbt_offset; /* IN: offset to start read/write */ + user_size_t fbt_length; /* IN: number of bytes to transfer */ + user_addr_t fbt_buffer; /* IN: buffer to be read/written */ } user_fbootstraptransfer_t; #endif // KERNEL @@ -652,7 +652,7 @@ typedef struct user_fbootstraptransfer { * result - the disk device address corresponding to the * current file offset (likely set with an lseek). * - * The flags could hold an indication of whether the # of + * The flags could hold an indication of whether the # of * contiguous bytes reflects the true extent length on disk, * or is an advisory value that indicates there is at least that * many bytes contiguous. For some filesystems it might be too @@ -670,47 +670,47 @@ typedef struct user_fbootstraptransfer { #pragma pack(4) struct log2phys { - unsigned int l2p_flags; /* unused so far */ - off_t l2p_contigbytes; /* F_LOG2PHYS: unused so far */ - /* F_LOG2PHYS_EXT: IN: number of bytes to be queried */ - /* OUT: number of contiguous bytes at this position */ - off_t l2p_devoffset; /* F_LOG2PHYS: OUT: bytes into device */ - /* F_LOG2PHYS_EXT: IN: bytes into file */ - /* OUT: bytes into device */ + unsigned int l2p_flags; /* unused so far */ + off_t l2p_contigbytes; /* F_LOG2PHYS: unused so far */ + /* F_LOG2PHYS_EXT: IN: number of bytes to be queried */ + /* OUT: number of contiguous bytes at this position */ + off_t l2p_devoffset; /* F_LOG2PHYS: OUT: bytes into device */ + /* F_LOG2PHYS_EXT: IN: bytes into file */ + /* OUT: bytes into device */ }; #pragma pack() -#define O_POPUP 0x80000000 /* force window to popup on open */ -#define O_ALERT 0x20000000 /* small, clean popup window */ +#define O_POPUP 0x80000000 /* force window to popup on open */ +#define O_ALERT 0x20000000 /* small, clean popup window */ #ifdef PRIVATE /* * SPI: Argument data for F_OPENFROM */ struct fopenfrom { - unsigned int o_flags; /* same as open(2) */ - mode_t o_mode; /* same as open(2) */ - char * o_pathname; /* relative pathname */ + unsigned int o_flags; /* same as open(2) */ + mode_t o_mode; /* same as open(2) */ + char * o_pathname; /* relative pathname */ }; #ifdef KERNEL /* - * LP64 version of fopenfrom. Memory pointers + * LP64 version of fopenfrom. Memory pointers * grow when we're dealing with a 64-bit process. * * WARNING - keep in sync with fopenfrom (above) */ struct user32_fopenfrom { - unsigned int o_flags; - mode_t o_mode; - user32_addr_t o_pathname; + unsigned int o_flags; + mode_t o_mode; + user32_addr_t o_pathname; }; struct user_fopenfrom { - unsigned int o_flags; - mode_t o_mode; - user_addr_t o_pathname; + unsigned int o_flags; + mode_t o_mode; + user_addr_t o_pathname; }; #endif /* KERNEL */ @@ -742,12 +742,12 @@ typedef enum { #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ __BEGIN_DECLS -int open(const char *, int, ...) __DARWIN_ALIAS_C(open); +int open(const char *, int, ...) __DARWIN_ALIAS_C(open); #if __DARWIN_C_LEVEL >= 200809L -int openat(int, const char *, int, ...) __DARWIN_NOCANCEL(openat) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int openat(int, const char *, int, ...) __DARWIN_NOCANCEL(openat) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); #endif -int creat(const char *, mode_t) __DARWIN_ALIAS_C(creat); -int fcntl(int, int, ...) __DARWIN_ALIAS_C(fcntl); +int creat(const char *, mode_t) __DARWIN_ALIAS_C(creat); +int fcntl(int, int, ...) __DARWIN_ALIAS_C(fcntl); #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #ifdef PRIVATE @@ -767,22 +767,22 @@ typedef __darwin_mach_port_t fileport_t; int fileport_makeport(int, fileport_t*); int fileport_makefd(fileport_t); #endif /* PRIVATE */ -int openx_np(const char *, int, filesec_t); -/* +int openx_np(const char *, int, filesec_t); +/* * data-protected non-portable open(2) : - int open_dprotected_np(user_addr_t path, int flags, int class, int dpflags, int mode) - */ -int open_dprotected_np ( const char *, int, int, int, ...); -int flock(int, int); + * int open_dprotected_np(user_addr_t path, int flags, int class, int dpflags, int mode) + */ +int open_dprotected_np( const char *, int, int, int, ...); +int flock(int, int); filesec_t filesec_init(void); filesec_t filesec_dup(filesec_t); -void filesec_free(filesec_t); -int filesec_get_property(filesec_t, filesec_property_t, void *); -int filesec_query_property(filesec_t, filesec_property_t, int *); -int filesec_set_property(filesec_t, filesec_property_t, const void *); -int filesec_unset_property(filesec_t, filesec_property_t) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); -#define _FILESEC_UNSET_PROPERTY ((void *)0) -#define _FILESEC_REMOVE_ACL ((void *)1) +void filesec_free(filesec_t); +int filesec_get_property(filesec_t, filesec_property_t, void *); +int filesec_query_property(filesec_t, filesec_property_t, int *); +int filesec_set_property(filesec_t, filesec_property_t, const void *); +int filesec_unset_property(filesec_t, filesec_property_t) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +#define _FILESEC_UNSET_PROPERTY ((void *)0) +#define _FILESEC_REMOVE_ACL ((void *)1) #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ __END_DECLS #endif diff --git a/bsd/sys/file.h b/bsd/sys/file.h index e79451706..123407262 100644 --- a/bsd/sys/file.h +++ b/bsd/sys/file.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_FILE_H_ -#define _SYS_FILE_H_ +#define _SYS_FILE_H_ #include #include @@ -78,12 +78,12 @@ #endif #ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T +#define _KAUTH_CRED_T struct ucred; typedef struct ucred *kauth_cred_t; struct posix_cred; typedef struct posix_cred *posix_cred_t; -#endif /* !_KAUTH_CRED_T */ +#endif /* !_KAUTH_CRED_T */ __BEGIN_DECLS #ifdef KERNEL @@ -96,10 +96,10 @@ int file_drop(int); #ifdef KERNEL_PRIVATE int fd_rdwr(int fd, enum uio_rw, uint64_t base, int64_t len, enum uio_seg, - off_t offset, int io_flg, int64_t *aresid); + off_t offset, int io_flg, int64_t *aresid); struct fileproc; struct vnode; int fp_getfvp(struct proc *p, int fd, struct fileproc **resultfp, struct vnode **resultvp); -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ __END_DECLS #endif /* !_SYS_FILE_H_ */ diff --git a/bsd/sys/file_internal.h b/bsd/sys/file_internal.h index 0b9ed96b2..fbd615cbd 100644 --- a/bsd/sys/file_internal.h +++ b/bsd/sys/file_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_FILE_INTERNAL_H_ -#define _SYS_FILE_INTERNAL_H_ +#define _SYS_FILE_INTERNAL_H_ #include #include @@ -100,97 +100,97 @@ struct fileproc { #define FILEPROC_NULL (struct fileproc *)0 -#define FP_INCREATE 0x0001 -#define FP_INCLOSE 0x0002 -#define FP_INSELECT 0x0004 -#define FP_UNUSED 0x0008 /* unused (was FP_INCHRREAD) */ -#define FP_WRITTEN 0x0010 -#define FP_CLOSING 0x0020 -#define FP_WAITCLOSE 0x0040 -#define FP_AIOISSUED 0x0080 -#define FP_WAITEVENT 0x0100 -#define FP_SELCONFLICT 0x0200 /* select conflict on an individual fp */ +#define FP_INCREATE 0x0001 +#define FP_INCLOSE 0x0002 +#define FP_INSELECT 0x0004 +#define FP_UNUSED 0x0008 /* unused (was FP_INCHRREAD) */ +#define FP_WRITTEN 0x0010 +#define FP_CLOSING 0x0020 +#define FP_WAITCLOSE 0x0040 +#define FP_AIOISSUED 0x0080 +#define FP_WAITEVENT 0x0100 +#define FP_SELCONFLICT 0x0200 /* select conflict on an individual fp */ /* squeeze a "type" value into the upper flag bits */ -#define _FP_TYPESHIFT 24 -#define FP_TYPEMASK (0x7 << _FP_TYPESHIFT) /* 8 "types" of fileproc */ +#define _FP_TYPESHIFT 24 +#define FP_TYPEMASK (0x7 << _FP_TYPESHIFT) /* 8 "types" of fileproc */ -#define FILEPROC_TYPE(fp) ((fp)->f_flags & FP_TYPEMASK) +#define FILEPROC_TYPE(fp) ((fp)->f_flags & FP_TYPEMASK) #define FP_ISGUARDED(fp, attribs) \ ((FILEPROC_TYPE(fp) == FTYPE_GUARDED) ? fp_isguarded(fp, attribs) : 0) typedef enum { - FTYPE_SIMPLE = 0, - FTYPE_GUARDED = (1 << _FP_TYPESHIFT) + FTYPE_SIMPLE = 0, + FTYPE_GUARDED = (1 << _FP_TYPESHIFT) } fileproc_type_t; -#define FP_VALID_FLAGS (FP_INCREATE | FP_INCLOSE | FP_INSELECT |\ - FP_WRITTEN | FP_CLOSING | FP_WAITCLOSE |\ - FP_AIOISSUED | FP_WAITEVENT | FP_SELCONFLICT | _FP_TYPEMASK) +#define FP_VALID_FLAGS (FP_INCREATE | FP_INCLOSE | FP_INSELECT |\ + FP_WRITTEN | FP_CLOSING | FP_WAITCLOSE |\ + FP_AIOISSUED | FP_WAITEVENT | FP_SELCONFLICT | _FP_TYPEMASK) #ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T +#define _KAUTH_CRED_T struct ucred; typedef struct ucred *kauth_cred_t; struct posix_cred; typedef struct posix_cred *posix_cred_t; -#endif /* !_KAUTH_CRED_T */ +#endif /* !_KAUTH_CRED_T */ /* file types */ typedef enum { - DTYPE_VNODE = 1, /* file */ - DTYPE_SOCKET, /* communications endpoint */ - DTYPE_PSXSHM, /* POSIX Shared memory */ - DTYPE_PSXSEM, /* POSIX Semaphores */ - DTYPE_KQUEUE, /* kqueue */ - DTYPE_PIPE, /* pipe */ - DTYPE_FSEVENTS, /* fsevents */ - DTYPE_ATALK, /* (obsolete) */ - DTYPE_NETPOLICY, /* networking policy */ + DTYPE_VNODE = 1, /* file */ + DTYPE_SOCKET, /* communications endpoint */ + DTYPE_PSXSHM, /* POSIX Shared memory */ + DTYPE_PSXSEM, /* POSIX Semaphores */ + DTYPE_KQUEUE, /* kqueue */ + DTYPE_PIPE, /* pipe */ + DTYPE_FSEVENTS, /* fsevents */ + DTYPE_ATALK, /* (obsolete) */ + DTYPE_NETPOLICY, /* networking policy */ } file_type_t; /* defines for fg_lflags */ -#define FG_TERM 0x01 /* the fileglob is terminating .. */ -#define FG_INSMSGQ 0x02 /* insert to msgqueue pending .. */ -#define FG_WINSMSGQ 0x04 /* wait for the fielglob is in msgque */ -#define FG_RMMSGQ 0x08 /* the fileglob is being removed from msgqueue */ -#define FG_WRMMSGQ 0x10 /* wait for the fileglob to be removed from msgqueue */ -#define FG_PORTMADE 0x20 /* a port was at some point created for this fileglob */ -#define FG_NOSIGPIPE 0x40 /* don't deliver SIGPIPE with EPIPE return */ -#define FG_OFF_LOCKED 0x80 /* Used as a mutex for offset changes (for vnodes) */ -#define FG_OFF_LOCKWANT 0x100 /* Somebody's wating for the lock */ -#define FG_CONFINED 0x200 /* fileglob confined to process, immutably */ -#define FG_HAS_OFDLOCK 0x400 /* Has or has had an OFD lock */ +#define FG_TERM 0x01 /* the fileglob is terminating .. */ +#define FG_INSMSGQ 0x02 /* insert to msgqueue pending .. */ +#define FG_WINSMSGQ 0x04 /* wait for the fielglob is in msgque */ +#define FG_RMMSGQ 0x08 /* the fileglob is being removed from msgqueue */ +#define FG_WRMMSGQ 0x10 /* wait for the fileglob to be removed from msgqueue */ +#define FG_PORTMADE 0x20 /* a port was at some point created for this fileglob */ +#define FG_NOSIGPIPE 0x40 /* don't deliver SIGPIPE with EPIPE return */ +#define FG_OFF_LOCKED 0x80 /* Used as a mutex for offset changes (for vnodes) */ +#define FG_OFF_LOCKWANT 0x100 /* Somebody's wating for the lock */ +#define FG_CONFINED 0x200 /* fileglob confined to process, immutably */ +#define FG_HAS_OFDLOCK 0x400 /* Has or has had an OFD lock */ struct fileglob { LIST_ENTRY(fileglob) f_msglist;/* list of active files */ - int32_t fg_flag; /* see fcntl.h */ - int32_t fg_count; /* reference count */ - int32_t fg_msgcount; /* references from message queue */ - int32_t fg_lflags; /* file global flags */ - kauth_cred_t fg_cred; /* credentials associated with descriptor */ + int32_t fg_flag; /* see fcntl.h */ + int32_t fg_count; /* reference count */ + int32_t fg_msgcount; /* references from message queue */ + int32_t fg_lflags; /* file global flags */ + kauth_cred_t fg_cred; /* credentials associated with descriptor */ const struct fileops { - file_type_t fo_type; /* descriptor type */ - int (*fo_read) (struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); - int (*fo_write) (struct fileproc *fp, struct uio *uio, - int flags, vfs_context_t ctx); -#define FOF_OFFSET 0x00000001 /* offset supplied to vn_write */ -#define FOF_PCRED 0x00000002 /* cred from proc, not current thread */ - int (*fo_ioctl) (struct fileproc *fp, u_long com, - caddr_t data, vfs_context_t ctx); - int (*fo_select) (struct fileproc *fp, int which, - void *wql, vfs_context_t ctx); - int (*fo_close) (struct fileglob *fg, vfs_context_t ctx); - int (*fo_kqfilter) (struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); - int (*fo_drain) (struct fileproc *fp, vfs_context_t ctx); + file_type_t fo_type; /* descriptor type */ + int (*fo_read) (struct fileproc *fp, struct uio *uio, + int flags, vfs_context_t ctx); + int (*fo_write) (struct fileproc *fp, struct uio *uio, + int flags, vfs_context_t ctx); +#define FOF_OFFSET 0x00000001 /* offset supplied to vn_write */ +#define FOF_PCRED 0x00000002 /* cred from proc, not current thread */ + int (*fo_ioctl)(struct fileproc *fp, u_long com, + caddr_t data, vfs_context_t ctx); + int (*fo_select) (struct fileproc *fp, int which, + void *wql, vfs_context_t ctx); + int (*fo_close) (struct fileglob *fg, vfs_context_t ctx); + int (*fo_kqfilter) (struct fileproc *fp, struct knote *kn, + struct kevent_internal_s *kev, vfs_context_t ctx); + int (*fo_drain) (struct fileproc *fp, vfs_context_t ctx); } *fg_ops; - off_t fg_offset; - void *fg_data; /* vnode or socket or SHM or semaphore */ - void *fg_vn_data; /* Per fd vnode data, used for directories */ + off_t fg_offset; + void *fg_data; /* vnode or socket or SHM or semaphore */ + void *fg_vn_data; /* Per fd vnode data, used for directories */ lck_mtx_t fg_lock; #if CONFIG_MACF struct label *fg_label; /* JMM - use the one in the cred? */ @@ -199,24 +199,24 @@ struct fileglob { #ifdef __APPLE_API_PRIVATE LIST_HEAD(fmsglist, fileglob); -extern struct fmsglist fmsghead; /* head of list of open files */ -extern int maxfiles; /* kernel limit on number of open files */ -extern int nfiles; /* actual number of open files */ +extern struct fmsglist fmsghead; /* head of list of open files */ +extern int maxfiles; /* kernel limit on number of open files */ +extern int nfiles; /* actual number of open files */ extern int maxfilesperproc; -#define FILEGLOB_DTYPE(fg) ((const file_type_t)((fg)->fg_ops->fo_type)) +#define FILEGLOB_DTYPE(fg) ((const file_type_t)((fg)->fg_ops->fo_type)) #endif /* __APPLE_API_PRIVATE */ __BEGIN_DECLS int fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx); int fo_write(struct fileproc *fp, struct uio *uio, int flags, - vfs_context_t ctx); + vfs_context_t ctx); int fo_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx); int fo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx); int fo_close(struct fileglob *fg, vfs_context_t ctx); int fo_kqfilter(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); + struct kevent_internal_s *kev, vfs_context_t ctx); void fileproc_drain(proc_t, struct fileproc *); int fp_tryswap(proc_t, int fd, struct fileproc *nfp); int fp_drop(struct proc *p, int fd, struct fileproc *fp, int locked); diff --git a/bsd/sys/filedesc.h b/bsd/sys/filedesc.h index 16e33533a..80d91f2e4 100644 --- a/bsd/sys/filedesc.h +++ b/bsd/sys/filedesc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_FILEDESC_H_ -#define _SYS_FILEDESC_H_ +#define _SYS_FILEDESC_H_ #include @@ -80,8 +80,8 @@ * should be selected to be the biggest multiple of OFILESIZE (see below) * that will fit in a power-of-two sized piece of memory. */ -#define NDFILE 25 /* 125 bytes */ -#define NDEXTENT 50 /* 250 bytes in 256-byte alloc. */ +#define NDFILE 25 /* 125 bytes */ +#define NDEXTENT 50 /* 250 bytes in 256-byte alloc. */ #ifdef BSD_KERNEL_PRIVATE @@ -91,49 +91,49 @@ struct klist; struct kqlist; struct filedesc { - struct fileproc **fd_ofiles; /* file structures for open files */ + struct fileproc **fd_ofiles; /* file structures for open files */ lck_mtx_t fd_kqhashlock; /* lock for dynamic kqueue hash */ u_long fd_kqhashmask; /* size of dynamic kqueue hash */ struct kqlist *fd_kqhash; /* hash table for dynamic kqueues */ struct kqueue *fd_wqkqueue; /* the workq kqueue */ - char *fd_ofileflags; /* per-process open file flags */ - struct vnode *fd_cdir; /* current directory */ - struct vnode *fd_rdir; /* root directory */ - int fd_nfiles; /* number of open files allocated */ - int fd_lastfile; /* high-water mark of fd_ofiles */ - int fd_freefile; /* approx. next free file */ - u_short fd_cmask; /* mask for file creation */ - int fd_flags; + char *fd_ofileflags; /* per-process open file flags */ + struct vnode *fd_cdir; /* current directory */ + struct vnode *fd_rdir; /* root directory */ + int fd_nfiles; /* number of open files allocated */ + int fd_lastfile; /* high-water mark of fd_ofiles */ + int fd_freefile; /* approx. next free file */ + u_short fd_cmask; /* mask for file creation */ + int fd_flags; int fd_knlistsize; /* size of knlist */ struct klist *fd_knlist; /* list of attached knotes */ u_long fd_knhashmask; /* size of knhash */ struct klist *fd_knhash; /* hash table for attached knotes */ - lck_mtx_t fd_knhashlock; /* lock for hash table for attached knotes */ + lck_mtx_t fd_knhashlock; /* lock for hash table for attached knotes */ }; /* * definitions for fd_flags; */ -#define FD_CHROOT 0x01 /* process was chrooted... keep track even */ +#define FD_CHROOT 0x01 /* process was chrooted... keep track even */ /* if we're force unmounted and unable to */ /* take a vnode_ref on fd_rdir during a fork */ -#define FD_WORKLOOP 0x02 /* process has created a kqworkloop that */ +#define FD_WORKLOOP 0x02 /* process has created a kqworkloop that */ /* requires manual cleanup on exit */ /* * Per-process open flags. */ -#define UF_EXCLOSE 0x01 /* auto-close on exec */ -#define UF_FORKCLOSE 0x02 /* auto-close on fork */ -#define UF_RESERVED 0x04 /* open pending / in progress */ -#define UF_CLOSING 0x08 /* close in progress */ +#define UF_EXCLOSE 0x01 /* auto-close on exec */ +#define UF_FORKCLOSE 0x02 /* auto-close on fork */ +#define UF_RESERVED 0x04 /* open pending / in progress */ +#define UF_CLOSING 0x08 /* close in progress */ #ifdef KERNEL -#define UF_RESVWAIT 0x10 /* close in progress */ -#define UF_INHERIT 0x20 /* "inherit-on-exec" */ +#define UF_RESVWAIT 0x10 /* close in progress */ +#define UF_INHERIT 0x20 /* "inherit-on-exec" */ -#define UF_VALID_FLAGS \ +#define UF_VALID_FLAGS \ (UF_EXCLOSE | UF_FORKCLOSE | UF_RESERVED | UF_CLOSING |\ UF_RESVWAIT | UF_INHERIT) #endif /* KERNEL */ @@ -147,15 +147,15 @@ struct filedesc { /* * Kernel global variables and routines. */ -extern int dupfdopen(struct filedesc *fdp, - int indx, int dfd, int mode, int error); -extern int fdalloc(proc_t p, int want, int *result); -extern void fdrelse(proc_t p, int fd); -extern int fdavail(proc_t p, int n); -#define fdfile(p, fd) \ - (&(p)->p_fd->fd_ofiles[(fd)]) -#define fdflags(p, fd) \ - (&(p)->p_fd->fd_ofileflags[(fd)]) +extern int dupfdopen(struct filedesc *fdp, + int indx, int dfd, int mode, int error); +extern int fdalloc(proc_t p, int want, int *result); +extern void fdrelse(proc_t p, int fd); +extern int fdavail(proc_t p, int n); +#define fdfile(p, fd) \ + (&(p)->p_fd->fd_ofiles[(fd)]) +#define fdflags(p, fd) \ + (&(p)->p_fd->fd_ofileflags[(fd)]) /* * Accesor macros for fd flags @@ -166,17 +166,17 @@ extern int fdavail(proc_t p, int n); #define FDFLAGS_CLR(p, fd, bits) \ (*fdflags(p, fd) &= ~((bits) & (UF_EXCLOSE|UF_FORKCLOSE))) -extern int falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx); +extern int falloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx); #ifdef __APPLE_API_PRIVATE typedef struct fileproc *(*fp_allocfn_t)(void *); -extern int falloc_withalloc(proc_t p, struct fileproc **resultfp, +extern int falloc_withalloc(proc_t p, struct fileproc **resultfp, int *resultfd, vfs_context_t ctx, fp_allocfn_t fp_zalloc, void *crarg); -extern struct filedesc *fdcopy(proc_t p, struct vnode *uth_cdir); -extern void fdfree(proc_t p); -extern void fdexec(proc_t p, short flags, int self_exec); +extern struct filedesc *fdcopy(proc_t p, struct vnode *uth_cdir); +extern void fdfree(proc_t p); +extern void fdexec(proc_t p, short flags, int self_exec); #endif /* __APPLE_API_PRIVATE */ #endif /* KERNEL */ diff --git a/bsd/sys/fileport.h b/bsd/sys/fileport.h index 779179baf..6ce4ab40b 100644 --- a/bsd/sys/fileport.h +++ b/bsd/sys/fileport.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,11 +41,11 @@ __BEGIN_DECLS #ifndef _FILEPORT_T #define _FILEPORT_T typedef __darwin_mach_port_t fileport_t; -#define FILEPORT_NULL ((fileport_t)0) +#define FILEPORT_NULL ((fileport_t)0) #endif /* _FILEPORT_T */ -int fileport_makeport(int, fileport_t *); -int fileport_makefd(fileport_t); +int fileport_makeport(int, fileport_t *); +int fileport_makefd(fileport_t); #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -53,4 +53,4 @@ __END_DECLS #endif /* !KERNEL */ -#endif /* !_SYS_FILEPORT_H_ */ +#endif /* !_SYS_FILEPORT_H_ */ diff --git a/bsd/sys/filio.h b/bsd/sys/filio.h index 80e03305b..6f6076a0a 100644 --- a/bsd/sys/filio.h +++ b/bsd/sys/filio.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,24 +66,24 @@ * @(#)filio.h 8.1 (Berkeley) 3/28/94 */ -#ifndef _SYS_FILIO_H_ -#define _SYS_FILIO_H_ +#ifndef _SYS_FILIO_H_ +#define _SYS_FILIO_H_ #include /* Generic file-descriptor ioctl's. */ -#define FIOCLEX _IO('f', 1) /* set close on exec on fd */ -#define FIONCLEX _IO('f', 2) /* remove close on exec */ -#define FIONREAD _IOR('f', 127, int) /* get # bytes to read */ -#define FIONBIO _IOW('f', 126, int) /* set/clear non-blocking i/o */ -#define FIOASYNC _IOW('f', 125, int) /* set/clear async i/o */ -#define FIOSETOWN _IOW('f', 124, int) /* set owner */ -#define FIOGETOWN _IOR('f', 123, int) /* get owner */ -#define FIODTYPE _IOR('f', 122, int) /* get d_type */ +#define FIOCLEX _IO('f', 1) /* set close on exec on fd */ +#define FIONCLEX _IO('f', 2) /* remove close on exec */ +#define FIONREAD _IOR('f', 127, int) /* get # bytes to read */ +#define FIONBIO _IOW('f', 126, int) /* set/clear non-blocking i/o */ +#define FIOASYNC _IOW('f', 125, int) /* set/clear async i/o */ +#define FIOSETOWN _IOW('f', 124, int) /* set owner */ +#define FIOGETOWN _IOR('f', 123, int) /* get owner */ +#define FIODTYPE _IOR('f', 122, int) /* get d_type */ #ifdef KERNEL_PRIVATE -#define FIODEVICELOCKED _IO('f', 121) /* device locked/unlocked */ -#define FIOPINSWAP _IO('f', 120) /* pin swap file to fast device */ +#define FIODEVICELOCKED _IO('f', 121) /* device locked/unlocked */ +#define FIOPINSWAP _IO('f', 120) /* pin swap file to fast device */ #endif #endif /* !_SYS_FILIO_H_ */ diff --git a/bsd/sys/fsctl.h b/bsd/sys/fsctl.h index eafcb9b45..8c7ec89b0 100644 --- a/bsd/sys/fsctl.h +++ b/bsd/sys/fsctl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,8 +66,8 @@ * @(#)fsctl.h 8.6 (Berkeley) 3/28/94 */ -#ifndef _SYS_FSCTL_H_ -#define _SYS_FSCTL_H_ +#ifndef _SYS_FSCTL_H_ +#define _SYS_FSCTL_H_ #include #include @@ -225,123 +225,126 @@ typedef char fstypename_t[MFSTYPENAMELEN]; #ifdef KERNEL typedef struct user64_package_ext_info { - user64_addr_t strings; - uint32_t num_entries; - uint32_t max_width; + user64_addr_t strings; + uint32_t num_entries; + uint32_t max_width; } user64_package_ext_info; typedef struct user32_package_ext_info { - user32_addr_t strings; - uint32_t num_entries; - uint32_t max_width; + user32_addr_t strings; + uint32_t num_entries; + uint32_t max_width; } user32_package_ext_info; #endif // KERNEL typedef struct package_ext_info { - const char *strings; - uint32_t num_entries; - uint32_t max_width; + const char *strings; + uint32_t num_entries; + uint32_t max_width; } package_ext_info; /* Disk conditioner configuration */ typedef struct disk_conditioner_info { - int enabled; - uint64_t access_time_usec; // maximum latency until transfer begins - uint64_t read_throughput_mbps; // maximum throughput for reads - uint64_t write_throughput_mbps; // maximum throughput for writes - int is_ssd; // behave like an SSD - - /* revision 2 */ - uint32_t ioqueue_depth; - uint32_t maxreadcnt; - uint32_t maxwritecnt; - uint32_t segreadcnt; - uint32_t segwritecnt; + int enabled; + uint64_t access_time_usec; // maximum latency until transfer begins + uint64_t read_throughput_mbps; // maximum throughput for reads + uint64_t write_throughput_mbps; // maximum throughput for writes + int is_ssd; // behave like an SSD + + /* revision 2 */ + uint32_t ioqueue_depth; + uint32_t maxreadcnt; + uint32_t maxwritecnt; + uint32_t segreadcnt; + uint32_t segwritecnt; } disk_conditioner_info; -#define FSCTL_SYNC_FULLSYNC (1<<0) /* Flush the data fully to disk, if supported by the filesystem */ -#define FSCTL_SYNC_WAIT (1<<1) /* Wait for the sync to complete */ +#define FSCTL_SYNC_FULLSYNC (1<<0) /* Flush the data fully to disk, if supported by the filesystem */ +#define FSCTL_SYNC_WAIT (1<<1) /* Wait for the sync to complete */ -#define FSIOC_SYNC_VOLUME _IOW('A', 1, uint32_t) -#define FSCTL_SYNC_VOLUME IOCBASECMD(FSIOC_SYNC_VOLUME) +#define FSIOC_SYNC_VOLUME _IOW('A', 1, uint32_t) +#define FSCTL_SYNC_VOLUME IOCBASECMD(FSIOC_SYNC_VOLUME) -#define FSIOC_SET_PACKAGE_EXTS _IOW('A', 2, struct package_ext_info) -#define FSCTL_SET_PACKAGE_EXTS IOCBASECMD(FSIOC_SET_PACKAGE_EXTS) +#define FSIOC_SET_PACKAGE_EXTS _IOW('A', 2, struct package_ext_info) +#define FSCTL_SET_PACKAGE_EXTS IOCBASECMD(FSIOC_SET_PACKAGE_EXTS) /* Unsupported - previously FSIOC_WAIT_FOR_SYNC */ -#define FSIOC_UNSUPPORTED _IOR('A', 3, int32_t) +#define FSIOC_UNSUPPORTED _IOR('A', 3, int32_t) -#define FSIOC_NAMESPACE_HANDLER_GET _IOW('A', 4, struct namespace_handler_info) -#define FSCTL_NAMESPACE_HANDLER_GET IOCBASECMD(FSIOC_NAMESPACE_HANDLER_GET) +#define FSIOC_NAMESPACE_HANDLER_GET _IOW('A', 4, struct namespace_handler_info) +#define FSCTL_NAMESPACE_HANDLER_GET IOCBASECMD(FSIOC_NAMESPACE_HANDLER_GET) -#define FSIOC_NAMESPACE_HANDLER_UPDATE _IOW('A', 5, nspace_handler_info) -#define FSCTL_NAMESPACE_HANDLER_UPDATE IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UPDATE) +#define FSIOC_NAMESPACE_HANDLER_UPDATE _IOW('A', 5, nspace_handler_info) +#define FSCTL_NAMESPACE_HANDLER_UPDATE IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UPDATE) -#define FSIOC_NAMESPACE_HANDLER_UNBLOCK _IOW('A', 6, nspace_handler_info) -#define FSCTL_NAMESPACE_HANDLER_UNBLOCK IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UNBLOCK) +#define FSIOC_NAMESPACE_HANDLER_UNBLOCK _IOW('A', 6, nspace_handler_info) +#define FSCTL_NAMESPACE_HANDLER_UNBLOCK IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UNBLOCK) -#define FSIOC_NAMESPACE_HANDLER_CANCEL _IOW('A', 7, nspace_handler_info) -#define FSCTL_NAMESPACE_HANDLER_CANCEL IOCBASECMD(FSIOC_NAMESPACE_HANDLER_CANCEL) +#define FSIOC_NAMESPACE_HANDLER_CANCEL _IOW('A', 7, nspace_handler_info) +#define FSCTL_NAMESPACE_HANDLER_CANCEL IOCBASECMD(FSIOC_NAMESPACE_HANDLER_CANCEL) #define FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME _IOW('A', 8, int32_t) -#define FSCTL_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME IOCBASECMD(FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME) +#define FSCTL_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME IOCBASECMD(FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME) -#define FSIOC_OLD_SNAPSHOT_HANDLER_GET _IOW('A', 9, struct namespace_handler_info) -#define FSCTL_OLD_SNAPSHOT_HANDLER_GET IOCBASECMD(FSIOC_OLD_SNAPSHOT_HANDLER_GET) +#define FSIOC_OLD_SNAPSHOT_HANDLER_GET _IOW('A', 9, struct namespace_handler_info) +#define FSCTL_OLD_SNAPSHOT_HANDLER_GET IOCBASECMD(FSIOC_OLD_SNAPSHOT_HANDLER_GET) -#define FSIOC_SET_FSTYPENAME_OVERRIDE _IOW('A', 10, fstypename_t) -#define FSCTL_SET_FSTYPENAME_OVERRIDE IOCBASECMD(FSIOC_SET_FSTYPENAME_OVERRIDE) +#define FSIOC_SET_FSTYPENAME_OVERRIDE _IOW('A', 10, fstypename_t) +#define FSCTL_SET_FSTYPENAME_OVERRIDE IOCBASECMD(FSIOC_SET_FSTYPENAME_OVERRIDE) #define FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS _IOW('A', 11, int32_t) -#define FSCTL_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS IOCBASECMD(FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS) +#define FSCTL_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS IOCBASECMD(FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS) /* 12 was used for TRACKED_HANDLER_GET which has now been removed - as it is no longer used. */ + * as it is no longer used. */ -#define FSIOC_SNAPSHOT_HANDLER_GET_EXT _IOW('A', 13, struct namespace_handler_info_ext) -#define FSCTL_SNAPSHOT_HANDLER_GET_EXT IOCBASECMD(FSIOC_SNAPSHOT_HANDLER_GET_EXT) +#define FSIOC_SNAPSHOT_HANDLER_GET_EXT _IOW('A', 13, struct namespace_handler_info_ext) +#define FSCTL_SNAPSHOT_HANDLER_GET_EXT IOCBASECMD(FSIOC_SNAPSHOT_HANDLER_GET_EXT) /* 14 was used for NAMESPACE_HANDLER_GETDATA which has now been - removed as it is no longer used. */ + * removed as it is no longer used. */ -#define FSIOC_ROUTEFS_SETROUTEID _IO('A', 15) -#define FSCTL_ROUTEFS_SETROUTEID IOCBASECMD(FSIOC_ROUTEFS_SETROUTEID) +#define FSIOC_ROUTEFS_SETROUTEID _IO('A', 15) +#define FSCTL_ROUTEFS_SETROUTEID IOCBASECMD(FSIOC_ROUTEFS_SETROUTEID) /* ioctls to support SEEK_HOLE SEEK_DATA */ -#define FSIOC_FIOSEEKHOLE _IOWR('A', 16, off_t) -#define FSCTL_FIOSEEKHOLE IOCBASECMD(FSIOC_FIOSEEKHOLE) -#define FSIOC_FIOSEEKDATA _IOWR('A', 17, off_t) -#define FSCTL_FIOSEEKDATA IOCBASECMD(FSIOC_FIOSEEKDATA) +#define FSIOC_FIOSEEKHOLE _IOWR('A', 16, off_t) +#define FSCTL_FIOSEEKHOLE IOCBASECMD(FSIOC_FIOSEEKHOLE) +#define FSIOC_FIOSEEKDATA _IOWR('A', 17, off_t) +#define FSCTL_FIOSEEKDATA IOCBASECMD(FSIOC_FIOSEEKDATA) /* Disk conditioner */ -#define DISK_CONDITIONER_IOC_GET _IOR('A', 18, disk_conditioner_info) -#define DISK_CONDITIONER_FSCTL_GET IOCBASECMD(DISK_CONDITIONER_IOC_GET) -#define DISK_CONDITIONER_IOC_SET _IOW('A', 19, disk_conditioner_info) -#define DISK_CONDITIONER_FSCTL_SET IOCBASECMD(DISK_CONDITIONER_IOC_SET) +#define DISK_CONDITIONER_IOC_GET _IOR('A', 18, disk_conditioner_info) +#define DISK_CONDITIONER_FSCTL_GET IOCBASECMD(DISK_CONDITIONER_IOC_GET) +#define DISK_CONDITIONER_IOC_SET _IOW('A', 19, disk_conditioner_info) +#define DISK_CONDITIONER_FSCTL_SET IOCBASECMD(DISK_CONDITIONER_IOC_SET) + +/* Check if a file is only open once (pass zero for the extra arg) */ +#define FSIOC_FD_ONLY_OPEN_ONCE _IOWR('A', 21, uint32_t) // -// Spotlight and fseventsd use these fsctl()'s to find out -// the mount time of a volume and the last time it was +// Spotlight and fseventsd use these fsctl()'s to find out +// the mount time of a volume and the last time it was // unmounted. Both HFS and APFS support these calls. // // NOTE: the values for these defines should _not_ be changed // or else it will break binary compatibility with mds // and fseventsd. // -#define SPOTLIGHT_IOC_GET_MOUNT_TIME _IOR('h', 18, u_int32_t) -#define SPOTLIGHT_FSCTL_GET_MOUNT_TIME IOCBASECMD(SPOTLIGHT_IOC_GET_MOUNT_TIME) -#define SPOTLIGHT_IOC_GET_LAST_MTIME _IOR('h', 19, u_int32_t) -#define SPOTLIGHT_FSCTL_GET_LAST_MTIME IOCBASECMD(SPOTLIGHT_IOC_GET_LAST_MTIME) +#define SPOTLIGHT_IOC_GET_MOUNT_TIME _IOR('h', 18, u_int32_t) +#define SPOTLIGHT_FSCTL_GET_MOUNT_TIME IOCBASECMD(SPOTLIGHT_IOC_GET_MOUNT_TIME) +#define SPOTLIGHT_IOC_GET_LAST_MTIME _IOR('h', 19, u_int32_t) +#define SPOTLIGHT_FSCTL_GET_LAST_MTIME IOCBASECMD(SPOTLIGHT_IOC_GET_LAST_MTIME) /* Mark file's extents as "frozen" because someone has references to physical address */ -#define FSIOC_FREEZE_EXTENTS _IO('h', 20) -#define FSCTL_FREEZE_EXTENTS IOCBASECMD(FSIOC_FREEZE_EXTENTS) +#define FSIOC_FREEZE_EXTENTS _IO('h', 20) +#define FSCTL_FREEZE_EXTENTS IOCBASECMD(FSIOC_FREEZE_EXTENTS) /* Clear the "frozen" status of file's extents */ -#define FSIOC_THAW_EXTENTS _IO('h', 21) -#define FSCTL_THAW_EXTENTS IOCBASECMD(FSIOC_THAW_EXTENTS) +#define FSIOC_THAW_EXTENTS _IO('h', 21) +#define FSCTL_THAW_EXTENTS IOCBASECMD(FSIOC_THAW_EXTENTS) #ifndef KERNEL @@ -349,8 +352,8 @@ typedef struct disk_conditioner_info { __BEGIN_DECLS -int fsctl(const char *,unsigned long,void*,unsigned int); -int ffsctl(int,unsigned long,void*,unsigned int); +int fsctl(const char *, unsigned long, void*, unsigned int); +int ffsctl(int, unsigned long, void*, unsigned int); __END_DECLS diff --git a/bsd/sys/fsevents.h b/bsd/sys/fsevents.h index bf338c6e0..8779bc362 100644 --- a/bsd/sys/fsevents.h +++ b/bsd/sys/fsevents.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef FSEVENT_H @@ -97,35 +97,35 @@ // field that /dev/fsevents provides. // #define FSE_MODE_HLINK (1 << 31) // notification is for a hard-link -#define FSE_MODE_LAST_HLINK (1 << 30) // link count == 0 on a hard-link delete +#define FSE_MODE_LAST_HLINK (1 << 30) // link count == 0 on a hard-link delete #define FSE_REMOTE_DIR_EVENT (1 << 29) // this is a remotely generated directory-level granularity event #define FSE_TRUNCATED_PATH (1 << 28) // the path for this item had to be truncated #define FSE_MODE_CLONE (1 << 27) // notification is for a clone // ioctl's on /dev/fsevents typedef struct fsevent_clone_args { - int8_t *event_list; - int32_t num_events; - int32_t event_queue_depth; - int32_t *fd; + int8_t *event_list; + int32_t num_events; + int32_t event_queue_depth; + int32_t *fd; } fsevent_clone_args; -#define FSEVENTS_CLONE _IOW('s', 1, fsevent_clone_args) +#define FSEVENTS_CLONE _IOW('s', 1, fsevent_clone_args) // ioctl's on the cloned fd #pragma pack(push, 4) typedef struct fsevent_dev_filter_args { - uint32_t num_devices; - dev_t *devices; + uint32_t num_devices; + dev_t *devices; } fsevent_dev_filter_args; #pragma pack(pop) -#define FSEVENTS_DEVICE_FILTER _IOW('s', 100, fsevent_dev_filter_args) -#define FSEVENTS_WANT_COMPACT_EVENTS _IO('s', 101) -#define FSEVENTS_WANT_EXTENDED_INFO _IO('s', 102) -#define FSEVENTS_GET_CURRENT_ID _IOR('s', 103, uint64_t) -#define FSEVENTS_UNMOUNT_PENDING_ACK _IOW('s', 104, dev_t) +#define FSEVENTS_DEVICE_FILTER _IOW('s', 100, fsevent_dev_filter_args) +#define FSEVENTS_WANT_COMPACT_EVENTS _IO('s', 101) +#define FSEVENTS_WANT_EXTENDED_INFO _IO('s', 102) +#define FSEVENTS_GET_CURRENT_ID _IOR('s', 103, uint64_t) +#define FSEVENTS_UNMOUNT_PENDING_ACK _IOW('s', 104, dev_t) #ifdef BSD_KERNEL_PRIVATE @@ -137,12 +137,12 @@ void create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr // misc utility functions for fsevent info and pathbuffers... typedef struct fse_info { - ino64_t ino; - dev_t dev; - int32_t mode; // note: this is not a mode_t (it's 32-bits, not 16) - uid_t uid; - gid_t gid; - uint64_t nlink; // only filled in if the vnode is marked as a hardlink + ino64_t ino; + dev_t dev; + int32_t mode;// note: this is not a mode_t (it's 32-bits, not 16) + uid_t uid; + gid_t gid; + uint64_t nlink;// only filled in if the vnode is marked as a hardlink } fse_info; int get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx); diff --git a/bsd/sys/fsgetpath.h b/bsd/sys/fsgetpath.h index da8e53173..bde5ce6e8 100644 --- a/bsd/sys/fsgetpath.h +++ b/bsd/sys/fsgetpath.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _FSGETPATH_H_ +#ifndef _FSGETPATH_H_ #define _FSGETPATH_H_ #ifndef KERNEL @@ -64,19 +64,19 @@ ssize_t fsgetpath(char *, size_t, fsid_t *, uint64_t) __OSX_AVAILABLE(10.13) __I /* - * openbyid_np: open a file given a file system id and a file system object id - * + * openbyid_np: open a file given a file system id and a file system object id + * * fsid : value corresponding to getattlist ATTR_CMN_FSID attribute, or - * value of stat's st.st_dev ; set fsid = {st.st_dev, 0} + * value of stat's st.st_dev ; set fsid = {st.st_dev, 0} * - * objid: value (link id/node id) corresponding to getattlist ATTR_CMN_OBJID + * objid: value (link id/node id) corresponding to getattlist ATTR_CMN_OBJID * attribute , or * value of stat's st.st_ino (node id); set objid = st.st_ino * * For hfs the value of getattlist ATTR_CMN_FSID is a link id which uniquely identifies a * parent in the case of hard linked files; this allows unique path access validation. * Not all file systems support getattrlist ATTR_CMN_OBJID (link id). - * A node id does not uniquely identify a parent in the case of hard linked files and may + * A node id does not uniquely identify a parent in the case of hard linked files and may * resolve to a path for which access validation can fail. */ int openbyid_np(fsid_t* fsid, fsobj_id_t* objid, int flags); diff --git a/bsd/sys/fslog.h b/bsd/sys/fslog.h index b0a1e94b4..447bc844b 100644 --- a/bsd/sys/fslog.h +++ b/bsd/sys/fslog.h @@ -2,7 +2,7 @@ * Copyright (c) 2006-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _FSLOG_H_ +#ifndef _FSLOG_H_ #define _FSLOG_H_ #include diff --git a/bsd/sys/gmon.h b/bsd/sys/gmon.h index a6576006d..c50bf146a 100644 --- a/bsd/sys/gmon.h +++ b/bsd/sys/gmon.h @@ -2,7 +2,7 @@ * Copyright (c) 2000, 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -69,41 +69,41 @@ * Structure prepended to gmon.out profiling data file. */ struct gmonhdr { - uint32_t lpc; /* base pc address of sample buffer */ - uint32_t hpc; /* max pc address of sampled buffer */ - uint32_t ncnt; /* size of sample buffer (plus this header) */ - int32_t version; /* version number */ - int32_t profrate; /* profiling clock rate */ - int32_t spare[3]; /* reserved */ + uint32_t lpc; /* base pc address of sample buffer */ + uint32_t hpc; /* max pc address of sampled buffer */ + uint32_t ncnt; /* size of sample buffer (plus this header) */ + int32_t version; /* version number */ + int32_t profrate; /* profiling clock rate */ + int32_t spare[3]; /* reserved */ }; -#define GMONVERSION 0x00051879 +#define GMONVERSION 0x00051879 struct gmonhdr_64 { - uint64_t lpc; /* base pc address of sample buffer */ - uint64_t hpc; /* max pc address of sampled buffer */ - uint32_t ncnt; /* size of sample buffer (plus this header) */ - int32_t version; /* version number */ - int32_t profrate; /* profiling clock rate */ - int32_t spare[3]; /* reserved */ + uint64_t lpc; /* base pc address of sample buffer */ + uint64_t hpc; /* max pc address of sampled buffer */ + uint32_t ncnt; /* size of sample buffer (plus this header) */ + int32_t version; /* version number */ + int32_t profrate; /* profiling clock rate */ + int32_t spare[3]; /* reserved */ }; typedef struct #ifndef __LP64__ - gmonhdr + gmonhdr #else - gmonhdr_64 + gmonhdr_64 #endif -gmonhdr_t; + gmonhdr_t; /* * histogram counters are unsigned shorts (according to the kernel). */ -#define HISTCOUNTER unsigned short +#define HISTCOUNTER unsigned short /* * fraction of text space to allocate for histogram counters here, 1/2 */ -#define HISTFRACTION 2 +#define HISTFRACTION 2 /* * Fraction of text space to allocate for from hash buckets. @@ -119,7 +119,7 @@ gmonhdr_t; * calls $0,(r0) * calls $0,(r0) * - * which is separated by only three bytes, thus HASHFRACTION is + * which is separated by only three bytes, thus HASHFRACTION is * calculated as: * * HASHFRACTION = 3 / (2 * 2 - 1) = 1 @@ -127,108 +127,108 @@ gmonhdr_t; * Note that the division above rounds down, thus if MIN_SUBR_FRACTION * is less than three, this algorithm will not work! * - * In practice, however, call instructions are rarely at a minimal + * In practice, however, call instructions are rarely at a minimal * distance. Hence, we will define HASHFRACTION to be 2 across all - * architectures. This saves a reasonable amount of space for + * architectures. This saves a reasonable amount of space for * profiling data structures without (in practice) sacrificing * any granularity. */ -#define HASHFRACTION 2 +#define HASHFRACTION 2 /* * percent of text space to allocate for tostructs with a minimum. */ -#define ARCDENSITY 2 -#define MINARCS 50 -#define MAXARCS ((1 << (8 * sizeof(HISTCOUNTER))) - 2) +#define ARCDENSITY 2 +#define MINARCS 50 +#define MAXARCS ((1 << (8 * sizeof(HISTCOUNTER))) - 2) struct tostruct { - uint32_t selfpc; - int32_t count; - uint16_t link; - uint16_t order; + uint32_t selfpc; + int32_t count; + uint16_t link; + uint16_t order; }; struct tostruct_64 { - uint64_t selfpc; - int32_t count; - uint16_t link; - uint16_t order; + uint64_t selfpc; + int32_t count; + uint16_t link; + uint16_t order; }; typedef struct #ifndef __LP64__ - tostruct + tostruct #else - tostruct_64 + tostruct_64 #endif -tostruct_t; + tostruct_t; /* - * a raw arc, with pointers to the calling site and + * a raw arc, with pointers to the calling site and * the called site and a count. */ struct rawarc { - uint32_t raw_frompc; - uint32_t raw_selfpc; - int32_t raw_count; + uint32_t raw_frompc; + uint32_t raw_selfpc; + int32_t raw_count; }; struct rawarc_64 { - uint64_t raw_frompc; - uint64_t raw_selfpc; - int32_t raw_count; + uint64_t raw_frompc; + uint64_t raw_selfpc; + int32_t raw_count; }; typedef struct #ifndef __LP64__ - rawarc + rawarc #else - rawarc_64 + rawarc_64 #endif -rawarc_t; + rawarc_t; /* * general rounding functions. */ -#define ROUNDDOWN(x,y) (((x)/(y))*(y)) -#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) +#define ROUNDDOWN(x, y) (((x)/(y))*(y)) +#define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y)) /* * The profiling data structures are housed in this structure. */ struct gmonparam { - int state; - u_short *kcount; - u_long kcountsize; - u_short *froms; - u_long fromssize; - tostruct_t *tos; - u_long tossize; - long tolimit; - u_long lowpc; - u_long highpc; - u_long textsize; - u_long hashfraction; + int state; + u_short *kcount; + u_long kcountsize; + u_short *froms; + u_long fromssize; + tostruct_t *tos; + u_long tossize; + long tolimit; + u_long lowpc; + u_long highpc; + u_long textsize; + u_long hashfraction; }; extern struct gmonparam _gmonparam; /* * Possible states of profiling. */ -#define GMON_PROF_ON 0 -#define GMON_PROF_BUSY 1 -#define GMON_PROF_ERROR 2 -#define GMON_PROF_OFF 3 +#define GMON_PROF_ON 0 +#define GMON_PROF_BUSY 1 +#define GMON_PROF_ERROR 2 +#define GMON_PROF_OFF 3 /* * Sysctl definitions for extracting profiling information from the kernel. */ -#define GPROF_STATE 0 /* int: profiling enabling variable */ -#define GPROF_COUNT 1 /* struct: profile tick count buffer */ -#define GPROF_FROMS 2 /* struct: from location hash bucket */ -#define GPROF_TOS 3 /* struct: destination/count structure */ -#define GPROF_GMONPARAM 4 /* struct: profiling parameters (see above) */ +#define GPROF_STATE 0 /* int: profiling enabling variable */ +#define GPROF_COUNT 1 /* struct: profile tick count buffer */ +#define GPROF_FROMS 2 /* struct: from location hash bucket */ +#define GPROF_TOS 3 /* struct: destination/count structure */ +#define GPROF_GMONPARAM 4 /* struct: profiling parameters (see above) */ /* @@ -255,53 +255,52 @@ void mcount(uintptr_t, uintptr_t); #define GMON_MAGIC 0xbeefbabe #define GMON_MAGIC_64 0xbeefbabf typedef struct gmon_data { - uint32_t type; /* constant for type of data following this struct */ - uint32_t size; /* size in bytes of the data following this struct */ + uint32_t type; /* constant for type of data following this struct */ + uint32_t size; /* size in bytes of the data following this struct */ } gmon_data_t; /* * The GMONTYPE_SAMPLES gmon_data.type is for the histogram counters described * above and has a gmonhdr_t followed by the counters. */ -#define GMONTYPE_SAMPLES 1 +#define GMONTYPE_SAMPLES 1 /* * The GMONTYPE_RAWARCS gmon_data.type is for the raw arcs described above. */ -#define GMONTYPE_RAWARCS 2 +#define GMONTYPE_RAWARCS 2 /* * The GMONTYPE_ARCS_ORDERS gmon_data.type is for the raw arcs with a call * order field. The order is the order is a sequence number for the order each * call site was executed. Raw_order values start at 1 not zero. Other than * the raw_order field this is the same information as in the rawarc_t. */ -#define GMONTYPE_ARCS_ORDERS 3 +#define GMONTYPE_ARCS_ORDERS 3 struct rawarc_order { - uint32_t raw_frompc; - uint32_t raw_selfpc; - uint32_t raw_count; - uint32_t raw_order; - + uint32_t raw_frompc; + uint32_t raw_selfpc; + uint32_t raw_count; + uint32_t raw_order; }; struct rawarc_order_64 { - uint64_t raw_frompc; - uint64_t raw_selfpc; - uint32_t raw_count; - uint32_t raw_order; + uint64_t raw_frompc; + uint64_t raw_selfpc; + uint32_t raw_count; + uint32_t raw_order; }; typedef struct #ifndef __LP64__ - rawarc_order + rawarc_order #else - rawarc_order_64 + rawarc_order_64 #endif -rawarc_order_t; + rawarc_order_t; /* * The GMONTYPE_DYLD_STATE gmon_data.type is for the dynamic link editor state * of the program. * The informations starts with an uint32_t with the count of states: * image_count - * Then each state follows in the file. The state is made up of + * Then each state follows in the file. The state is made up of * vmaddr_slide (the amount dyld slid this image from it's vmaddress) * name (the file name dyld loaded this image from) * The vmaddr_slide is a 32-bit value for 32-bit programs and 64-bit value for @@ -314,7 +313,7 @@ rawarc_order_t; * of the program. * The informations starts with an uint32_t with the count of states: * image_count - * Then each state follows in the file. The state is made up of + * Then each state follows in the file. The state is made up of * image_header (the address where dyld loaded this image) * name (the file name dyld loaded this image from) * The image_header is a 32-bit value for 32-bit programs and 64-bit value for diff --git a/bsd/sys/guarded.h b/bsd/sys/guarded.h index f445d4fd6..6bd3d8e62 100644 --- a/bsd/sys/guarded.h +++ b/bsd/sys/guarded.h @@ -2,7 +2,7 @@ * Copyright (c) 2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,15 +45,15 @@ typedef __uint64_t guardid_t; #endif /* _GUARDID_T */ #if !defined(KERNEL) -extern int guarded_open_np(const char *path, - const guardid_t *guard, u_int guardflags, int flags, ...); -extern int guarded_open_dprotected_np(const char *path, - const guardid_t *guard, u_int guardflags, int flags, - int dpclass, int dpflags, ...); +extern int guarded_open_np(const char *path, + const guardid_t *guard, u_int guardflags, int flags, ...); +extern int guarded_open_dprotected_np(const char *path, + const guardid_t *guard, u_int guardflags, int flags, + int dpclass, int dpflags, ...); extern int guarded_kqueue_np(const guardid_t *guard, u_int guardflags); extern int guarded_close_np(int fd, const guardid_t *guard); extern int change_fdguard_np(int fd, const guardid_t *guard, u_int guardflags, - const guardid_t *nguard, u_int nguardflags, int *fdflagsp); + const guardid_t *nguard, u_int nguardflags, int *fdflagsp); extern ssize_t guarded_write_np(int fd, const guardid_t *guard, const void *buf, size_t nbyte); extern ssize_t guarded_pwrite_np(int fd, const guardid_t *guard, const void *buf, size_t nbyte, off_t offset); extern ssize_t guarded_writev_np(int fd, const guardid_t *guard, const struct iovec *iovp, int iovcnt); @@ -61,7 +61,7 @@ extern ssize_t guarded_writev_np(int fd, const guardid_t *guard, const struct io #ifndef GUARD_TYPE_FD /* temporary source compat: use instead */ -#define GUARD_TYPE_FD 0x2 +#define GUARD_TYPE_FD 0x2 #endif /* @@ -72,54 +72,54 @@ extern ssize_t guarded_writev_np(int fd, const guardid_t *guard, const struct io * Forbid close(2), and the implicit close() that a dup2(2) may do. * Forces close-on-fork to be set immutably too. */ -#define GUARD_CLOSE (1u << 0) +#define GUARD_CLOSE (1u << 0) /* * Forbid dup(2), dup2(2), and fcntl(2) subcodes F_DUPFD, F_DUPFD_CLOEXEC * on a guarded fd. Also forbids open's of a guarded fd via /dev/fd/ * (an implicit dup.) */ -#define GUARD_DUP (1u << 1) +#define GUARD_DUP (1u << 1) /* * Forbid sending a guarded fd via a socket */ -#define GUARD_SOCKET_IPC (1u << 2) +#define GUARD_SOCKET_IPC (1u << 2) /* * Forbid creating a fileport from a guarded fd */ -#define GUARD_FILEPORT (1u << 3) +#define GUARD_FILEPORT (1u << 3) /* * Forbid writes on a guarded fd */ -#define GUARD_WRITE (1u << 4) +#define GUARD_WRITE (1u << 4) /* * Violating a guard results in an error (EPERM), and potentially * an exception with one or more of the following bits set. */ enum guard_fd_exception_codes { - kGUARD_EXC_CLOSE = 1u << 0, /* close of a guarded fd */ - kGUARD_EXC_DUP = 1u << 1, /* dup of a guarded fd */ - kGUARD_EXC_NOCLOEXEC = 1u << 2, /* clear close-on-exec */ - kGUARD_EXC_SOCKET_IPC = 1u << 3, /* sendmsg of a guarded fd */ - kGUARD_EXC_FILEPORT = 1u << 4, /* fileport_makeport .. */ - kGUARD_EXC_MISMATCH = 1u << 5, /* wrong guard for guarded fd */ - kGUARD_EXC_WRITE = 1u << 6 /* write on a guarded fd */ + kGUARD_EXC_CLOSE = 1u << 0, /* close of a guarded fd */ + kGUARD_EXC_DUP = 1u << 1, /* dup of a guarded fd */ + kGUARD_EXC_NOCLOEXEC = 1u << 2, /* clear close-on-exec */ + kGUARD_EXC_SOCKET_IPC = 1u << 3, /* sendmsg of a guarded fd */ + kGUARD_EXC_FILEPORT = 1u << 4, /* fileport_makeport .. */ + kGUARD_EXC_MISMATCH = 1u << 5, /* wrong guard for guarded fd */ + kGUARD_EXC_WRITE = 1u << 6 /* write on a guarded fd */ }; /* * Experimental guarded vnode support */ -#define VNG_RENAME_TO (1u << 0) -#define VNG_RENAME_FROM (1u << 1) -#define VNG_UNLINK (1u << 2) -#define VNG_WRITE_OTHER (1u << 3) -#define VNG_TRUNC_OTHER (1u << 4) -#define VNG_LINK (1u << 5) -#define VNG_EXCHDATA (1u << 6) +#define VNG_RENAME_TO (1u << 0) +#define VNG_RENAME_FROM (1u << 1) +#define VNG_UNLINK (1u << 2) +#define VNG_WRITE_OTHER (1u << 3) +#define VNG_TRUNC_OTHER (1u << 4) +#define VNG_LINK (1u << 5) +#define VNG_EXCHDATA (1u << 6) #define VNG_ALL \ (VNG_RENAME_TO | VNG_RENAME_FROM | VNG_UNLINK | VNG_LINK | \ @@ -131,33 +131,33 @@ struct vnguard_set { guardid_t vns_guard; }; -#define VNG_SYSC_PING 0 -#define VNG_SYSC_SET_GUARD 1 +#define VNG_SYSC_PING 0 +#define VNG_SYSC_SET_GUARD 1 -#define VNG_POLICY_NAME "vnguard" +#define VNG_POLICY_NAME "vnguard" /* * Violating a guard may result in an error (EPERM), and potentially * an exception with one or more of the following bits set. */ enum guard_vn_exception_codes { - kGUARD_EXC_RENAME_TO = VNG_RENAME_TO, - kGUARD_EXC_RENAME_FROM = VNG_RENAME_FROM, - kGUARD_EXC_UNLINK = VNG_UNLINK, - kGUARD_EXC_WRITE_OTHER = VNG_WRITE_OTHER, - kGUARD_EXC_TRUNC_OTHER = VNG_TRUNC_OTHER, - kGUARD_EXC_LINK = VNG_LINK, - kGUARD_EXC_EXCHDATA = VNG_EXCHDATA, + kGUARD_EXC_RENAME_TO = VNG_RENAME_TO, + kGUARD_EXC_RENAME_FROM = VNG_RENAME_FROM, + kGUARD_EXC_UNLINK = VNG_UNLINK, + kGUARD_EXC_WRITE_OTHER = VNG_WRITE_OTHER, + kGUARD_EXC_TRUNC_OTHER = VNG_TRUNC_OTHER, + kGUARD_EXC_LINK = VNG_LINK, + kGUARD_EXC_EXCHDATA = VNG_EXCHDATA, }; /* Guard violation behaviors: not all combinations make sense */ -#define kVNG_POLICY_LOGMSG (1u << 0) -#define kVNG_POLICY_EPERM (1u << 1) -#define kVNG_POLICY_EXC (1u << 2) -#define kVNG_POLICY_EXC_CORPSE (1u << 3) -#define kVNG_POLICY_SIGKILL (1u << 4) -#define kVNG_POLICY_UPRINTMSG (1u << 5) +#define kVNG_POLICY_LOGMSG (1u << 0) +#define kVNG_POLICY_EPERM (1u << 1) +#define kVNG_POLICY_EXC (1u << 2) +#define kVNG_POLICY_EXC_CORPSE (1u << 3) +#define kVNG_POLICY_SIGKILL (1u << 4) +#define kVNG_POLICY_UPRINTMSG (1u << 5) #if defined(KERNEL) extern int vnguard_exceptions_active(void); diff --git a/bsd/sys/imageboot.h b/bsd/sys/imageboot.h index dd526f542..7b0f11d9e 100644 --- a/bsd/sys/imageboot.h +++ b/bsd/sys/imageboot.h @@ -2,7 +2,7 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,19 +22,19 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IMAGEBOOT_H_ #define _IMAGEBOOT_H_ -int imageboot_needed(void); -void imageboot_setup(void); -int imageboot_format_is_valid(const char *root_path); -int imageboot_mount_image(const char *root_path, int height); +int imageboot_needed(void); +void imageboot_setup(void); +int imageboot_format_is_valid(const char *root_path); +int imageboot_mount_image(const char *root_path, int height); -#define IMAGEBOOT_CONTAINER_ARG "container-dmg" -#define IMAGEBOOT_ROOT_ARG "root-dmg" -#define IMAGEBOOT_AUTHROOT_ARG "auth-root-dmg" +#define IMAGEBOOT_CONTAINER_ARG "container-dmg" +#define IMAGEBOOT_ROOT_ARG "root-dmg" +#define IMAGEBOOT_AUTHROOT_ARG "auth-root-dmg" #endif diff --git a/bsd/sys/imgact.h b/bsd/sys/imgact.h index 80344fce5..8d5da2872 100644 --- a/bsd/sys/imgact.h +++ b/bsd/sys/imgact.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2005, 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -64,61 +64,61 @@ * Version 2.0. */ #ifndef _SYS_IMGACT_H_ -#define _SYS_IMGACT_H_ +#define _SYS_IMGACT_H_ -#define IMG_SHSIZE 512 /* largest shell interpreter, in bytes */ +#define IMG_SHSIZE 512 /* largest shell interpreter, in bytes */ struct label; struct proc; struct nameidata; struct image_params { - user_addr_t ip_user_fname; /* argument */ - user_addr_t ip_user_argv; /* argument */ - user_addr_t ip_user_envv; /* argument */ - int ip_seg; /* segment for arguments */ - struct vnode *ip_vp; /* file */ - struct vnode_attr *ip_vattr; /* run file attributes */ - struct vnode_attr *ip_origvattr; /* invocation file attributes */ - cpu_type_t ip_origcputype; /* cputype of invocation file */ - cpu_subtype_t ip_origcpusubtype; /* subtype of invocation file */ - char *ip_vdata; /* file data (up to one page) */ - int ip_flags; /* image flags */ - int ip_argc; /* argument count */ - int ip_envc; /* environment count */ - int ip_applec; /* apple vector count */ + user_addr_t ip_user_fname; /* argument */ + user_addr_t ip_user_argv; /* argument */ + user_addr_t ip_user_envv; /* argument */ + int ip_seg; /* segment for arguments */ + struct vnode *ip_vp; /* file */ + struct vnode_attr *ip_vattr; /* run file attributes */ + struct vnode_attr *ip_origvattr; /* invocation file attributes */ + cpu_type_t ip_origcputype; /* cputype of invocation file */ + cpu_subtype_t ip_origcpusubtype; /* subtype of invocation file */ + char *ip_vdata; /* file data (up to one page) */ + int ip_flags; /* image flags */ + int ip_argc; /* argument count */ + int ip_envc; /* environment count */ + int ip_applec; /* apple vector count */ - char *ip_startargv; /* argument vector beginning */ - char *ip_endargv; /* end of argv/start of envv */ - char *ip_endenvv; /* end of envv/start of applev */ + char *ip_startargv; /* argument vector beginning */ + char *ip_endargv; /* end of argv/start of envv */ + char *ip_endenvv; /* end of envv/start of applev */ - char *ip_strings; /* base address for strings */ - char *ip_strendp; /* current end pointer */ + char *ip_strings; /* base address for strings */ + char *ip_strendp; /* current end pointer */ - int ip_argspace; /* remaining space of NCARGS limit (argv+envv) */ - int ip_strspace; /* remaining total string space */ + int ip_argspace; /* remaining space of NCARGS limit (argv+envv) */ + int ip_strspace; /* remaining total string space */ - user_size_t ip_arch_offset; /* subfile offset in ip_vp */ - user_size_t ip_arch_size; /* subfile length in ip_vp */ - char ip_interp_buffer[IMG_SHSIZE]; /* interpreter buffer space */ - int ip_interp_sugid_fd; /* fd for sugid script */ + user_size_t ip_arch_offset; /* subfile offset in ip_vp */ + user_size_t ip_arch_size; /* subfile length in ip_vp */ + char ip_interp_buffer[IMG_SHSIZE]; /* interpreter buffer space */ + int ip_interp_sugid_fd; /* fd for sugid script */ /* Next two fields are for support of architecture translation... */ - struct vfs_context *ip_vfs_context; /* VFS context */ - struct nameidata *ip_ndp; /* current nameidata */ - thread_t ip_new_thread; /* thread for spawn/vfork */ + struct vfs_context *ip_vfs_context; /* VFS context */ + struct nameidata *ip_ndp; /* current nameidata */ + thread_t ip_new_thread; /* thread for spawn/vfork */ - struct label *ip_execlabelp; /* label of the executable */ - struct label *ip_scriptlabelp; /* label of the script */ - struct vnode *ip_scriptvp; /* script */ - unsigned int ip_csflags; /* code signing flags */ - int ip_mac_return; /* return code from mac policy checks */ - void *ip_px_sa; - void *ip_px_sfa; - void *ip_px_spa; - void *ip_px_smpx; /* MAC-specific spawn attrs. */ - void *ip_px_persona; /* persona args */ - void *ip_cs_error; /* codesigning error reason */ + struct label *ip_execlabelp; /* label of the executable */ + struct label *ip_scriptlabelp; /* label of the script */ + struct vnode *ip_scriptvp; /* script */ + unsigned int ip_csflags; /* code signing flags */ + int ip_mac_return; /* return code from mac policy checks */ + void *ip_px_sa; + void *ip_px_sfa; + void *ip_px_spa; + void *ip_px_smpx; /* MAC-specific spawn attrs. */ + void *ip_px_persona; /* persona args */ + void *ip_cs_error; /* codesigning error reason */ uint64_t ip_dyld_fsid; uint64_t ip_dyld_fsobjid; @@ -127,18 +127,18 @@ struct image_params { /* * Image flags */ -#define IMGPF_NONE 0x00000000 /* No flags */ -#define IMGPF_INTERPRET 0x00000001 /* Interpreter invoked */ -#define IMGPF_RESERVED 0x00000002 -#define IMGPF_WAS_64BIT_ADDR 0x00000004 /* exec from a 64Bit address space */ -#define IMGPF_IS_64BIT_ADDR 0x00000008 /* exec to a 64Bit address space */ -#define IMGPF_SPAWN 0x00000010 /* spawn (without setexec) */ -#define IMGPF_DISABLE_ASLR 0x00000020 /* disable ASLR */ -#define IMGPF_ALLOW_DATA_EXEC 0x00000040 /* forcibly disallow data execution */ -#define IMGPF_VFORK_EXEC 0x00000080 /* vfork followed by exec */ -#define IMGPF_EXEC 0x00000100 /* exec */ -#define IMGPF_HIGH_BITS_ASLR 0x00000200 /* randomize high bits of ASLR slide */ -#define IMGPF_IS_64BIT_DATA 0x00000400 /* exec to a 64Bit register state */ +#define IMGPF_NONE 0x00000000 /* No flags */ +#define IMGPF_INTERPRET 0x00000001 /* Interpreter invoked */ +#define IMGPF_RESERVED 0x00000002 +#define IMGPF_WAS_64BIT_ADDR 0x00000004 /* exec from a 64Bit address space */ +#define IMGPF_IS_64BIT_ADDR 0x00000008 /* exec to a 64Bit address space */ +#define IMGPF_SPAWN 0x00000010 /* spawn (without setexec) */ +#define IMGPF_DISABLE_ASLR 0x00000020 /* disable ASLR */ +#define IMGPF_ALLOW_DATA_EXEC 0x00000040 /* forcibly disallow data execution */ +#define IMGPF_VFORK_EXEC 0x00000080 /* vfork followed by exec */ +#define IMGPF_EXEC 0x00000100 /* exec */ +#define IMGPF_HIGH_BITS_ASLR 0x00000200 /* randomize high bits of ASLR slide */ +#define IMGPF_IS_64BIT_DATA 0x00000400 /* exec to a 64Bit register state */ -#endif /* !_SYS_IMGACT */ +#endif /* !_SYS_IMGACT */ diff --git a/bsd/sys/imgsrc.h b/bsd/sys/imgsrc.h index aac577176..717060ce9 100644 --- a/bsd/sys/imgsrc.h +++ b/bsd/sys/imgsrc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -64,36 +64,35 @@ #define _SYS_IMGSRC_H_ #include -/* +/* * For mount(2), defined here for easy use with System.framework/PrivateHeaders. */ -#define MNT_IMGSRC_BY_INDEX 0x20000000 +#define MNT_IMGSRC_BY_INDEX 0x20000000 -typedef struct imgsrc_info -{ - uint32_t ii_height; /* Nesting height: 0 is outermost */ - uint32_t ii_flags; /* Currently unused */ - dev_t ii_dev; /* dev_t for this volume */ - char ii_reserved[24];/* TBD */ +typedef struct imgsrc_info { + uint32_t ii_height; /* Nesting height: 0 is outermost */ + uint32_t ii_flags; /* Currently unused */ + dev_t ii_dev; /* dev_t for this volume */ + char ii_reserved[24];/* TBD */ } *imgsrc_info_t; struct mnt_imgsrc_args { - uint32_t mi_height; /* As determined from an imgsrc_info structure */ - uint32_t mi_flags; /* TBD */ - const char* mi_devpath; /* Path to devnode */ + uint32_t mi_height; /* As determined from an imgsrc_info structure */ + uint32_t mi_flags; /* TBD */ + const char* mi_devpath; /* Path to devnode */ }; #ifdef BSD_KERNEL_PRIVATE struct user64_mnt_imgsrc_args { - uint32_t mi_height; - uint32_t mi_flags; - user64_addr_t mi_devpath; -}; + uint32_t mi_height; + uint32_t mi_flags; + user64_addr_t mi_devpath; +}; struct user32_mnt_imgsrc_args { - uint32_t mi_height; - uint32_t mi_flags; - user32_addr_t mi_devpath; -}; + uint32_t mi_height; + uint32_t mi_flags; + user32_addr_t mi_devpath; +}; #endif /* XNU_KERNEL_PRIVATE */ #endif /* _SYS_IMGSRC_H_ */ diff --git a/bsd/sys/ioccom.h b/bsd/sys/ioccom.h index cc22148f0..cabce8cf7 100644 --- a/bsd/sys/ioccom.h +++ b/bsd/sys/ioccom.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -61,8 +61,8 @@ * @(#)ioccom.h 8.2 (Berkeley) 3/28/94 */ -#ifndef _SYS_IOCCOM_H_ -#define _SYS_IOCCOM_H_ +#ifndef _SYS_IOCCOM_H_ +#define _SYS_IOCCOM_H_ #include @@ -71,29 +71,29 @@ * any in or out parameters in the upper word. The high 3 bits of the * upper word are used to encode the in/out status of the parameter. */ -#define IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */ -#define IOCPARM_LEN(x) (((x) >> 16) & IOCPARM_MASK) -#define IOCBASECMD(x) ((x) & ~(IOCPARM_MASK << 16)) -#define IOCGROUP(x) (((x) >> 8) & 0xff) +#define IOCPARM_MASK 0x1fff /* parameter length, at most 13 bits */ +#define IOCPARM_LEN(x) (((x) >> 16) & IOCPARM_MASK) +#define IOCBASECMD(x) ((x) & ~(IOCPARM_MASK << 16)) +#define IOCGROUP(x) (((x) >> 8) & 0xff) -#define IOCPARM_MAX (IOCPARM_MASK + 1) /* max size of ioctl args */ - /* no parameters */ -#define IOC_VOID (__uint32_t)0x20000000 - /* copy parameters out */ -#define IOC_OUT (__uint32_t)0x40000000 - /* copy parameters in */ -#define IOC_IN (__uint32_t)0x80000000 - /* copy paramters in and out */ -#define IOC_INOUT (IOC_IN|IOC_OUT) - /* mask for IN/OUT/VOID */ -#define IOC_DIRMASK (__uint32_t)0xe0000000 +#define IOCPARM_MAX (IOCPARM_MASK + 1) /* max size of ioctl args */ +/* no parameters */ +#define IOC_VOID (__uint32_t)0x20000000 +/* copy parameters out */ +#define IOC_OUT (__uint32_t)0x40000000 +/* copy parameters in */ +#define IOC_IN (__uint32_t)0x80000000 +/* copy paramters in and out */ +#define IOC_INOUT (IOC_IN|IOC_OUT) +/* mask for IN/OUT/VOID */ +#define IOC_DIRMASK (__uint32_t)0xe0000000 -#define _IOC(inout,group,num,len) \ +#define _IOC(inout, group, num, len) \ (inout | ((len & IOCPARM_MASK) << 16) | ((group) << 8) | (num)) -#define _IO(g,n) _IOC(IOC_VOID, (g), (n), 0) -#define _IOR(g,n,t) _IOC(IOC_OUT, (g), (n), sizeof(t)) -#define _IOW(g,n,t) _IOC(IOC_IN, (g), (n), sizeof(t)) +#define _IO(g, n) _IOC(IOC_VOID, (g), (n), 0) +#define _IOR(g, n, t) _IOC(IOC_OUT, (g), (n), sizeof(t)) +#define _IOW(g, n, t) _IOC(IOC_IN, (g), (n), sizeof(t)) /* this should be _IORW, but stdio got there first */ -#define _IOWR(g,n,t) _IOC(IOC_INOUT, (g), (n), sizeof(t)) +#define _IOWR(g, n, t) _IOC(IOC_INOUT, (g), (n), sizeof(t)) #endif /* !_SYS_IOCCOM_H_ */ diff --git a/bsd/sys/ioctl.h b/bsd/sys/ioctl.h index 8c990e435..4ad481286 100644 --- a/bsd/sys/ioctl.h +++ b/bsd/sys/ioctl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,8 +66,8 @@ * @(#)ioctl.h 8.6 (Berkeley) 3/28/94 */ -#ifndef _SYS_IOCTL_H_ -#define _SYS_IOCTL_H_ +#ifndef _SYS_IOCTL_H_ +#define _SYS_IOCTL_H_ #include @@ -77,13 +77,13 @@ * nonwithstanding). */ struct ttysize { - unsigned short ts_lines; - unsigned short ts_cols; - unsigned short ts_xxx; - unsigned short ts_yyy; + unsigned short ts_lines; + unsigned short ts_cols; + unsigned short ts_xxx; + unsigned short ts_yyy; }; -#define TIOCGSIZE TIOCGWINSZ -#define TIOCSSIZE TIOCSWINSZ +#define TIOCGSIZE TIOCGWINSZ +#define TIOCSSIZE TIOCSWINSZ #include @@ -95,7 +95,7 @@ struct ttysize { #include __BEGIN_DECLS -int ioctl(int, unsigned long, ...); +int ioctl(int, unsigned long, ...); __END_DECLS #endif /* !KERNEL */ #endif /* !_SYS_IOCTL_H_ */ diff --git a/bsd/sys/ioctl_compat.h b/bsd/sys/ioctl_compat.h index 55537e558..729e3e33a 100644 --- a/bsd/sys/ioctl_compat.h +++ b/bsd/sys/ioctl_compat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -67,138 +67,138 @@ */ #ifndef _SYS_IOCTL_COMPAT_H_ -#define _SYS_IOCTL_COMPAT_H_ +#define _SYS_IOCTL_COMPAT_H_ #include #include struct tchars { - char t_intrc; /* interrupt */ - char t_quitc; /* quit */ - char t_startc; /* start output */ - char t_stopc; /* stop output */ - char t_eofc; /* end-of-file */ - char t_brkc; /* input delimiter (like nl) */ + char t_intrc; /* interrupt */ + char t_quitc; /* quit */ + char t_startc; /* start output */ + char t_stopc; /* stop output */ + char t_eofc; /* end-of-file */ + char t_brkc; /* input delimiter (like nl) */ }; struct ltchars { - char t_suspc; /* stop process signal */ - char t_dsuspc; /* delayed stop process signal */ - char t_rprntc; /* reprint line */ - char t_flushc; /* flush output (toggles) */ - char t_werasc; /* word erase */ - char t_lnextc; /* literal next character */ + char t_suspc; /* stop process signal */ + char t_dsuspc; /* delayed stop process signal */ + char t_rprntc; /* reprint line */ + char t_flushc; /* flush output (toggles) */ + char t_werasc; /* word erase */ + char t_lnextc; /* literal next character */ }; /* * Structure for TIOCGETP and TIOCSETP ioctls. */ #ifndef _SGTTYB_ -#define _SGTTYB_ +#define _SGTTYB_ struct sgttyb { - char sg_ispeed; /* input speed */ - char sg_ospeed; /* output speed */ - char sg_erase; /* erase character */ - char sg_kill; /* kill character */ - short sg_flags; /* mode flags */ + char sg_ispeed; /* input speed */ + char sg_ospeed; /* output speed */ + char sg_erase; /* erase character */ + char sg_kill; /* kill character */ + short sg_flags; /* mode flags */ }; #endif #ifdef USE_OLD_TTY # undef TIOCGETD -# define TIOCGETD _IOR('t', 0, int) /* get line discipline */ +# define TIOCGETD _IOR('t', 0, int) /* get line discipline */ # undef TIOCSETD -# define TIOCSETD _IOW('t', 1, int) /* set line discipline */ +# define TIOCSETD _IOW('t', 1, int) /* set line discipline */ #else -# define OTIOCGETD _IOR('t', 0, int) /* get line discipline */ -# define OTIOCSETD _IOW('t', 1, int) /* set line discipline */ +# define OTIOCGETD _IOR('t', 0, int) /* get line discipline */ +# define OTIOCSETD _IOW('t', 1, int) /* set line discipline */ #endif -#define TIOCHPCL _IO('t', 2) /* hang up on last close */ -#define TIOCGETP _IOR('t', 8,struct sgttyb)/* get parameters -- gtty */ -#define TIOCSETP _IOW('t', 9,struct sgttyb)/* set parameters -- stty */ -#define TIOCSETN _IOW('t',10,struct sgttyb)/* as above, but no flushtty*/ -#define TIOCSETC _IOW('t',17,struct tchars)/* set special characters */ -#define TIOCGETC _IOR('t',18,struct tchars)/* get special characters */ -#define TANDEM 0x00000001 /* send stopc on out q full */ -#define CBREAK 0x00000002 /* half-cooked mode */ -#define LCASE 0x00000004 /* simulate lower case */ -#define ECHO 0x00000008 /* echo input */ -#define CRMOD 0x00000010 /* map \r to \r\n on output */ -#define RAW 0x00000020 /* no i/o processing */ -#define ODDP 0x00000040 /* get/send odd parity */ -#define EVENP 0x00000080 /* get/send even parity */ -#define ANYP 0x000000c0 /* get any parity/send none */ -#define NLDELAY 0x00000300 /* \n delay */ -#define TBDELAY 0x00000c00 /* horizontal tab delay */ -#define XTABS 0x00000c00 /* expand tabs on output */ -#define CRDELAY 0x00003000 /* \r delay */ -#define VTDELAY 0x00004000 /* vertical tab delay */ -#define BSDELAY 0x00008000 /* \b delay */ -#ifndef _SYS_TERMIOS_H_ +#define TIOCHPCL _IO('t', 2) /* hang up on last close */ +#define TIOCGETP _IOR('t', 8,struct sgttyb)/* get parameters -- gtty */ +#define TIOCSETP _IOW('t', 9,struct sgttyb)/* set parameters -- stty */ +#define TIOCSETN _IOW('t',10,struct sgttyb)/* as above, but no flushtty*/ +#define TIOCSETC _IOW('t',17,struct tchars)/* set special characters */ +#define TIOCGETC _IOR('t',18,struct tchars)/* get special characters */ +#define TANDEM 0x00000001 /* send stopc on out q full */ +#define CBREAK 0x00000002 /* half-cooked mode */ +#define LCASE 0x00000004 /* simulate lower case */ +#define ECHO 0x00000008 /* echo input */ +#define CRMOD 0x00000010 /* map \r to \r\n on output */ +#define RAW 0x00000020 /* no i/o processing */ +#define ODDP 0x00000040 /* get/send odd parity */ +#define EVENP 0x00000080 /* get/send even parity */ +#define ANYP 0x000000c0 /* get any parity/send none */ +#define NLDELAY 0x00000300 /* \n delay */ +#define TBDELAY 0x00000c00 /* horizontal tab delay */ +#define XTABS 0x00000c00 /* expand tabs on output */ +#define CRDELAY 0x00003000 /* \r delay */ +#define VTDELAY 0x00004000 /* vertical tab delay */ +#define BSDELAY 0x00008000 /* \b delay */ +#ifndef _SYS_TERMIOS_H_ /* * These manifest constants have the same names as those in , * so you are not permitted to have both definitions in scope simultaneously * in the same compilation unit. */ -#define NL0 0x00000000 -#define NL1 0x00000100 /* tty 37 */ -#define NL2 0x00000200 /* vt05 */ -#define NL3 0x00000300 -#define TAB0 0x00000000 -#define TAB1 0x00000400 /* tty 37 */ -#define TAB2 0x00000800 -#define CR0 0x00000000 -#define CR1 0x00001000 /* tn 300 */ -#define CR2 0x00002000 /* tty 37 */ -#define CR3 0x00003000 /* concept 100 */ -#define FF0 0x00000000 -#define FF1 0x00004000 /* tty 37 */ -#define BS0 0x00000000 -#define BS1 0x00008000 -#endif /* !_SYS_TERMIOS_H_ */ -#define ALLDELAY (NLDELAY|TBDELAY|CRDELAY|VTDELAY|BSDELAY) -#define CRTBS 0x00010000 /* do backspacing for crt */ -#define PRTERA 0x00020000 /* \ ... / erase */ -#define CRTERA 0x00040000 /* " \b " to wipe out char */ -#define TILDE 0x00080000 /* hazeltine tilde kludge */ -#define MDMBUF 0x00100000 /*start/stop output on carrier*/ -#define LITOUT 0x00200000 /* literal output */ -#define TOSTOP 0x00400000 /*SIGSTOP on background output*/ -#define FLUSHO 0x00800000 /* flush output to terminal */ -#define NOHANG 0x01000000 /* (no-op) was no SIGHUP on carrier drop */ -#define L001000 0x02000000 -#define CRTKIL 0x04000000 /* kill line with " \b " */ -#define PASS8 0x08000000 -#define CTLECH 0x10000000 /* echo control chars as ^X */ -#define PENDIN 0x20000000 /* tp->t_rawq needs reread */ -#define DECCTQ 0x40000000 /* only ^Q starts after ^S */ -#define NOFLSH 0x80000000 /* no output flush on signal */ -#define TIOCLBIS _IOW('t', 127, int) /* bis local mode bits */ -#define TIOCLBIC _IOW('t', 126, int) /* bic local mode bits */ -#define TIOCLSET _IOW('t', 125, int) /* set entire local mode word */ -#define TIOCLGET _IOR('t', 124, int) /* get local modes */ -#define LCRTBS (CRTBS>>16) -#define LPRTERA (PRTERA>>16) -#define LCRTERA (CRTERA>>16) -#define LTILDE (TILDE>>16) -#define LMDMBUF (MDMBUF>>16) -#define LLITOUT (LITOUT>>16) -#define LTOSTOP (TOSTOP>>16) -#define LFLUSHO (FLUSHO>>16) -#define LNOHANG (NOHANG>>16) -#define LCRTKIL (CRTKIL>>16) -#define LPASS8 (PASS8>>16) -#define LCTLECH (CTLECH>>16) -#define LPENDIN (PENDIN>>16) -#define LDECCTQ (DECCTQ>>16) -#define LNOFLSH (NOFLSH>>16) -#define TIOCSLTC _IOW('t',117,struct ltchars)/* set local special chars*/ -#define TIOCGLTC _IOR('t',116,struct ltchars)/* get local special chars*/ -#define OTIOCCONS _IO('t', 98) /* for hp300 -- sans int arg */ -#define OTTYDISC 0 -#define NETLDISC 1 -#define NTTYDISC 2 +#define NL0 0x00000000 +#define NL1 0x00000100 /* tty 37 */ +#define NL2 0x00000200 /* vt05 */ +#define NL3 0x00000300 +#define TAB0 0x00000000 +#define TAB1 0x00000400 /* tty 37 */ +#define TAB2 0x00000800 +#define CR0 0x00000000 +#define CR1 0x00001000 /* tn 300 */ +#define CR2 0x00002000 /* tty 37 */ +#define CR3 0x00003000 /* concept 100 */ +#define FF0 0x00000000 +#define FF1 0x00004000 /* tty 37 */ +#define BS0 0x00000000 +#define BS1 0x00008000 +#endif /* !_SYS_TERMIOS_H_ */ +#define ALLDELAY (NLDELAY|TBDELAY|CRDELAY|VTDELAY|BSDELAY) +#define CRTBS 0x00010000 /* do backspacing for crt */ +#define PRTERA 0x00020000 /* \ ... / erase */ +#define CRTERA 0x00040000 /* " \b " to wipe out char */ +#define TILDE 0x00080000 /* hazeltine tilde kludge */ +#define MDMBUF 0x00100000 /*start/stop output on carrier*/ +#define LITOUT 0x00200000 /* literal output */ +#define TOSTOP 0x00400000 /*SIGSTOP on background output*/ +#define FLUSHO 0x00800000 /* flush output to terminal */ +#define NOHANG 0x01000000 /* (no-op) was no SIGHUP on carrier drop */ +#define L001000 0x02000000 +#define CRTKIL 0x04000000 /* kill line with " \b " */ +#define PASS8 0x08000000 +#define CTLECH 0x10000000 /* echo control chars as ^X */ +#define PENDIN 0x20000000 /* tp->t_rawq needs reread */ +#define DECCTQ 0x40000000 /* only ^Q starts after ^S */ +#define NOFLSH 0x80000000 /* no output flush on signal */ +#define TIOCLBIS _IOW('t', 127, int) /* bis local mode bits */ +#define TIOCLBIC _IOW('t', 126, int) /* bic local mode bits */ +#define TIOCLSET _IOW('t', 125, int) /* set entire local mode word */ +#define TIOCLGET _IOR('t', 124, int) /* get local modes */ +#define LCRTBS (CRTBS>>16) +#define LPRTERA (PRTERA>>16) +#define LCRTERA (CRTERA>>16) +#define LTILDE (TILDE>>16) +#define LMDMBUF (MDMBUF>>16) +#define LLITOUT (LITOUT>>16) +#define LTOSTOP (TOSTOP>>16) +#define LFLUSHO (FLUSHO>>16) +#define LNOHANG (NOHANG>>16) +#define LCRTKIL (CRTKIL>>16) +#define LPASS8 (PASS8>>16) +#define LCTLECH (CTLECH>>16) +#define LPENDIN (PENDIN>>16) +#define LDECCTQ (DECCTQ>>16) +#define LNOFLSH (NOFLSH>>16) +#define TIOCSLTC _IOW('t',117,struct ltchars)/* set local special chars*/ +#define TIOCGLTC _IOR('t',116,struct ltchars)/* get local special chars*/ +#define OTIOCCONS _IO('t', 98) /* for hp300 -- sans int arg */ +#define OTTYDISC 0 +#define NETLDISC 1 +#define NTTYDISC 2 -#define TIOCGSID _IOR('t', 99, int) /* For svr4 -- get session id */ +#define TIOCGSID _IOR('t', 99, int) /* For svr4 -- get session id */ #endif /* !_SYS_IOCTL_COMPAT_H_ */ diff --git a/bsd/sys/ipc.h b/bsd/sys/ipc.h index 91f3492c3..3a5b7b494 100644 --- a/bsd/sys/ipc.h +++ b/bsd/sys/ipc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -104,20 +104,19 @@ * [XSI] Information used in determining permission to perform an IPC * operation */ -struct ipc_perm -{ - uid_t uid; /* [XSI] Owner's user ID */ - gid_t gid; /* [XSI] Owner's group ID */ - uid_t cuid; /* [XSI] Creator's user ID */ - gid_t cgid; /* [XSI] Creator's group ID */ - mode_t mode; /* [XSI] Read/write permission */ - unsigned short _seq; /* Reserved for internal use */ - key_t _key; /* Reserved for internal use */ +struct ipc_perm { + uid_t uid; /* [XSI] Owner's user ID */ + gid_t gid; /* [XSI] Owner's group ID */ + uid_t cuid; /* [XSI] Creator's user ID */ + gid_t cgid; /* [XSI] Creator's group ID */ + mode_t mode; /* [XSI] Read/write permission */ + unsigned short _seq; /* Reserved for internal use */ + key_t _key; /* Reserved for internal use */ }; -#define __ipc_perm_new ipc_perm -#else /* !__DARWIN_UNIX03 */ -#define ipc_perm __ipc_perm_old -#endif /* !__DARWIN_UNIX03 */ +#define __ipc_perm_new ipc_perm +#else /* !__DARWIN_UNIX03 */ +#define ipc_perm __ipc_perm_old +#endif /* !__DARWIN_UNIX03 */ #if !__DARWIN_UNIX03 /* @@ -126,15 +125,15 @@ struct ipc_perm * should not use this interface, since ID values may be truncated. */ struct __ipc_perm_old { - __uint16_t cuid; /* Creator's user ID */ - __uint16_t cgid; /* Creator's group ID */ - __uint16_t uid; /* Owner's user ID */ - __uint16_t gid; /* Owner's group ID */ - mode_t mode; /* Read/Write permission */ - __uint16_t seq; /* Reserved for internal use */ - key_t key; /* Reserved for internal use */ + __uint16_t cuid; /* Creator's user ID */ + __uint16_t cgid; /* Creator's group ID */ + __uint16_t uid; /* Owner's user ID */ + __uint16_t gid; /* Owner's group ID */ + mode_t mode; /* Read/Write permission */ + __uint16_t seq; /* Reserved for internal use */ + key_t key; /* Reserved for internal use */ }; -#endif /* !__DARWIN_UNIX03 */ +#endif /* !__DARWIN_UNIX03 */ #pragma pack() @@ -143,27 +142,27 @@ struct __ipc_perm_old { */ /* Mode bits */ -#define IPC_CREAT 001000 /* Create entry if key does not exist */ -#define IPC_EXCL 002000 /* Fail if key exists */ -#define IPC_NOWAIT 004000 /* Error if request must wait */ +#define IPC_CREAT 001000 /* Create entry if key does not exist */ +#define IPC_EXCL 002000 /* Fail if key exists */ +#define IPC_NOWAIT 004000 /* Error if request must wait */ /* Keys */ -#define IPC_PRIVATE ((key_t)0) /* Private key */ +#define IPC_PRIVATE ((key_t)0) /* Private key */ /* Control commands */ -#define IPC_RMID 0 /* Remove identifier */ -#define IPC_SET 1 /* Set options */ -#define IPC_STAT 2 /* Get options */ +#define IPC_RMID 0 /* Remove identifier */ +#define IPC_SET 1 /* Set options */ +#define IPC_STAT 2 /* Get options */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* common mode bits */ -#define IPC_R 000400 /* Read permission */ -#define IPC_W 000200 /* Write/alter permission */ -#define IPC_M 010000 /* Modify control info permission */ +#define IPC_R 000400 /* Read permission */ +#define IPC_W 000200 /* Write/alter permission */ +#define IPC_M 010000 /* Modify control info permission */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifdef BSD_KERNEL_PRIVATE @@ -173,22 +172,22 @@ struct __ipc_perm_old { */ /* Macros to convert between ipc ids and array indices or sequence ids */ -#define IPCID_TO_IX(id) ((id) & 0xffff) -#define IPCID_TO_SEQ(id) (((id) >> 16) & 0xffff) -#define IXSEQ_TO_IPCID(ix,perm) (((perm._seq) << 16L) | ((ix) & 0xffff)) +#define IPCID_TO_IX(id) ((id) & 0xffff) +#define IPCID_TO_SEQ(id) (((id) >> 16) & 0xffff) +#define IXSEQ_TO_IPCID(ix, perm) (((perm._seq) << 16L) | ((ix) & 0xffff)) struct ucred; -int ipcperm(struct ucred *, struct ipc_perm *, int); +int ipcperm(struct ucred *, struct ipc_perm *, int); #endif /* BSD_KERNEL_PRIVATE */ #ifndef KERNEL __BEGIN_DECLS /* [XSI] */ -key_t ftok(const char *, int); +key_t ftok(const char *, int); __END_DECLS -#endif /* !KERNEL */ +#endif /* !KERNEL */ #endif /* !_SYS_IPC_H_ */ diff --git a/bsd/sys/ipcs.h b/bsd/sys/ipcs.h index 48215be74..3d119bde8 100644 --- a/bsd/sys/ipcs.h +++ b/bsd/sys/ipcs.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,7 +37,7 @@ #include #include -#define IPCS_MAGIC 0x00000001 /* Version */ +#define IPCS_MAGIC 0x00000001 /* Version */ /* * IPCS_command @@ -49,30 +49,30 @@ */ struct IPCS_command { - int ipcs_magic; /* Magic number for struct layout */ - int ipcs_op; /* Operation to perform */ - int ipcs_cursor; /* Cursor for iteration functions */ - int ipcs_datalen; /* Length of ipcs_data area */ - void *ipcs_data; /* OP specific data */ + int ipcs_magic; /* Magic number for struct layout */ + int ipcs_op; /* Operation to perform */ + int ipcs_cursor; /* Cursor for iteration functions */ + int ipcs_datalen; /* Length of ipcs_data area */ + void *ipcs_data; /* OP specific data */ }; #ifdef KERNEL_PRIVATE #include struct user_IPCS_command { - int ipcs_magic; /* Magic number for struct layout */ - int ipcs_op; /* Operation to perform */ - int ipcs_cursor; /* Cursor for iteration functions */ - int ipcs_datalen; /* Length of ipcs_data area */ - user_addr_t ipcs_data; /* OP specific data */ + int ipcs_magic; /* Magic number for struct layout */ + int ipcs_op; /* Operation to perform */ + int ipcs_cursor; /* Cursor for iteration functions */ + int ipcs_datalen; /* Length of ipcs_data area */ + user_addr_t ipcs_data; /* OP specific data */ }; struct user32_IPCS_command { - int ipcs_magic; /* Magic number for struct layout */ - int ipcs_op; /* Operation to perform */ - int ipcs_cursor; /* Cursor for iteration functions */ - int ipcs_datalen; /* Length of ipcs_data area */ - user32_addr_t ipcs_data; /* OP specific data */ + int ipcs_magic; /* Magic number for struct layout */ + int ipcs_op; /* Operation to perform */ + int ipcs_cursor; /* Cursor for iteration functions */ + int ipcs_datalen; /* Length of ipcs_data area */ + user32_addr_t ipcs_data; /* OP specific data */ }; #endif /* KERNEL_PRIVATE */ @@ -80,21 +80,21 @@ struct user32_IPCS_command { /* * OP code values for 'ipcs_op' */ -#define IPCS_SHM_CONF 0x00000001 /* Obtain shared memory config */ -#define IPCS_SHM_ITER 0x00000002 /* Iterate shared memory info */ +#define IPCS_SHM_CONF 0x00000001 /* Obtain shared memory config */ +#define IPCS_SHM_ITER 0x00000002 /* Iterate shared memory info */ -#define IPCS_SEM_CONF 0x00000010 /* Obtain semaphore config */ -#define IPCS_SEM_ITER 0x00000020 /* Iterate semaphore info */ +#define IPCS_SEM_CONF 0x00000010 /* Obtain semaphore config */ +#define IPCS_SEM_ITER 0x00000020 /* Iterate semaphore info */ -#define IPCS_MSG_CONF 0x00000100 /* Obtain message queue config */ -#define IPCS_MSG_ITER 0x00000200 /* Iterate message queue info */ +#define IPCS_MSG_CONF 0x00000100 /* Obtain message queue config */ +#define IPCS_MSG_ITER 0x00000200 /* Iterate message queue info */ /* * Sysctl oid name values */ -#define IPCS_SHM_SYSCTL "kern.sysv.ipcs.shm" -#define IPCS_SEM_SYSCTL "kern.sysv.ipcs.sem" -#define IPCS_MSG_SYSCTL "kern.sysv.ipcs.msg" +#define IPCS_SHM_SYSCTL "kern.sysv.ipcs.shm" +#define IPCS_SEM_SYSCTL "kern.sysv.ipcs.sem" +#define IPCS_MSG_SYSCTL "kern.sysv.ipcs.msg" -#endif /* _SYS_IPCS_H_ */ +#endif /* _SYS_IPCS_H_ */ diff --git a/bsd/sys/kas_info.h b/bsd/sys/kas_info.h index d95cf420c..5818462c4 100644 --- a/bsd/sys/kas_info.h +++ b/bsd/sys/kas_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _SYS_KAS_INFO_H_ -#define _SYS_KAS_INFO_H_ +#ifndef _SYS_KAS_INFO_H_ +#define _SYS_KAS_INFO_H_ #include #include @@ -41,8 +41,8 @@ __BEGIN_DECLS /* The slide of the main kernel compared to its static link address */ -#define KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR (0) /* returns uint64_t */ -#define KAS_INFO_MAX_SELECTOR (1) +#define KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR (0) /* returns uint64_t */ +#define KAS_INFO_MAX_SELECTOR (1) #ifndef KERNEL @@ -52,4 +52,4 @@ int kas_info(int selector, void *value, size_t *size) __OSX_AVAILABLE_STARTING(_ __END_DECLS -#endif /* !_SYS_KAS_INFO_H_ */ +#endif /* !_SYS_KAS_INFO_H_ */ diff --git a/bsd/sys/kasl.h b/bsd/sys/kasl.h index a00ec9e25..c3b9b415f 100644 --- a/bsd/sys/kasl.h +++ b/bsd/sys/kasl.h @@ -27,12 +27,12 @@ */ #ifndef _SYS_KASL_H_ -#define _SYS_KASL_H_ +#define _SYS_KASL_H_ #ifdef BSD_KERNEL_PRIVATE -#define KASL_KEY_FACILITY "Facility" /* Facility generating messages */ -#define KASL_KEY_LEVEL "Level" /* Priority level */ +#define KASL_KEY_FACILITY "Facility" /* Facility generating messages */ +#define KASL_KEY_LEVEL "Level" /* Priority level */ #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/sys/kauth.h b/bsd/sys/kauth.h index 48ae2b3f2..3a72e0b74 100644 --- a/bsd/sys/kauth.h +++ b/bsd/sys/kauth.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -38,8 +38,8 @@ #include #include #include -#include /* __offsetof() */ -#include /* NGROUPS_MAX */ +#include /* __offsetof() */ +#include /* NGROUPS_MAX */ #ifdef __APPLE_API_EVOLVING @@ -47,26 +47,26 @@ * Identities. */ -#define KAUTH_UID_NONE (~(uid_t)0 - 100) /* not a valid UID */ -#define KAUTH_GID_NONE (~(gid_t)0 - 100) /* not a valid GID */ +#define KAUTH_UID_NONE (~(uid_t)0 - 100) /* not a valid UID */ +#define KAUTH_GID_NONE (~(gid_t)0 - 100) /* not a valid GID */ #include /* NT Security Identifier, structure as defined by Microsoft */ #pragma pack(1) /* push packing of 1 byte */ typedef struct { - u_int8_t sid_kind; - u_int8_t sid_authcount; - u_int8_t sid_authority[6]; + u_int8_t sid_kind; + u_int8_t sid_authcount; + u_int8_t sid_authority[6]; #define KAUTH_NTSID_MAX_AUTHORITIES 16 - u_int32_t sid_authorities[KAUTH_NTSID_MAX_AUTHORITIES]; + u_int32_t sid_authorities[KAUTH_NTSID_MAX_AUTHORITIES]; } ntsid_t; #pragma pack() /* pop packing to previous packing level */ #define _NTSID_T /* valid byte count inside a SID structure */ -#define KAUTH_NTSID_HDRSIZE (8) -#define KAUTH_NTSID_SIZE(_s) (KAUTH_NTSID_HDRSIZE + ((_s)->sid_authcount * sizeof(u_int32_t))) +#define KAUTH_NTSID_HDRSIZE (8) +#define KAUTH_NTSID_SIZE(_s) (KAUTH_NTSID_HDRSIZE + ((_s)->sid_authcount * sizeof(u_int32_t))) /* * External lookup message payload; this structure is shared between the @@ -76,53 +76,53 @@ typedef struct { * the kernel. */ struct kauth_identity_extlookup { - u_int32_t el_seqno; /* request sequence number */ - u_int32_t el_result; /* lookup result */ -#define KAUTH_EXTLOOKUP_SUCCESS 0 /* results here are good */ -#define KAUTH_EXTLOOKUP_BADRQ 1 /* request badly formatted */ -#define KAUTH_EXTLOOKUP_FAILURE 2 /* transient failure during lookup */ -#define KAUTH_EXTLOOKUP_FATAL 3 /* permanent failure during lookup */ -#define KAUTH_EXTLOOKUP_INPROG 100 /* request in progress */ - u_int32_t el_flags; -#define KAUTH_EXTLOOKUP_VALID_UID (1<<0) -#define KAUTH_EXTLOOKUP_VALID_UGUID (1<<1) -#define KAUTH_EXTLOOKUP_VALID_USID (1<<2) -#define KAUTH_EXTLOOKUP_VALID_GID (1<<3) -#define KAUTH_EXTLOOKUP_VALID_GGUID (1<<4) -#define KAUTH_EXTLOOKUP_VALID_GSID (1<<5) -#define KAUTH_EXTLOOKUP_WANT_UID (1<<6) -#define KAUTH_EXTLOOKUP_WANT_UGUID (1<<7) -#define KAUTH_EXTLOOKUP_WANT_USID (1<<8) -#define KAUTH_EXTLOOKUP_WANT_GID (1<<9) -#define KAUTH_EXTLOOKUP_WANT_GGUID (1<<10) -#define KAUTH_EXTLOOKUP_WANT_GSID (1<<11) -#define KAUTH_EXTLOOKUP_WANT_MEMBERSHIP (1<<12) + u_int32_t el_seqno; /* request sequence number */ + u_int32_t el_result; /* lookup result */ +#define KAUTH_EXTLOOKUP_SUCCESS 0 /* results here are good */ +#define KAUTH_EXTLOOKUP_BADRQ 1 /* request badly formatted */ +#define KAUTH_EXTLOOKUP_FAILURE 2 /* transient failure during lookup */ +#define KAUTH_EXTLOOKUP_FATAL 3 /* permanent failure during lookup */ +#define KAUTH_EXTLOOKUP_INPROG 100 /* request in progress */ + u_int32_t el_flags; +#define KAUTH_EXTLOOKUP_VALID_UID (1<<0) +#define KAUTH_EXTLOOKUP_VALID_UGUID (1<<1) +#define KAUTH_EXTLOOKUP_VALID_USID (1<<2) +#define KAUTH_EXTLOOKUP_VALID_GID (1<<3) +#define KAUTH_EXTLOOKUP_VALID_GGUID (1<<4) +#define KAUTH_EXTLOOKUP_VALID_GSID (1<<5) +#define KAUTH_EXTLOOKUP_WANT_UID (1<<6) +#define KAUTH_EXTLOOKUP_WANT_UGUID (1<<7) +#define KAUTH_EXTLOOKUP_WANT_USID (1<<8) +#define KAUTH_EXTLOOKUP_WANT_GID (1<<9) +#define KAUTH_EXTLOOKUP_WANT_GGUID (1<<10) +#define KAUTH_EXTLOOKUP_WANT_GSID (1<<11) +#define KAUTH_EXTLOOKUP_WANT_MEMBERSHIP (1<<12) #define KAUTH_EXTLOOKUP_VALID_MEMBERSHIP (1<<13) -#define KAUTH_EXTLOOKUP_ISMEMBER (1<<14) -#define KAUTH_EXTLOOKUP_VALID_PWNAM (1<<15) -#define KAUTH_EXTLOOKUP_WANT_PWNAM (1<<16) -#define KAUTH_EXTLOOKUP_VALID_GRNAM (1<<17) -#define KAUTH_EXTLOOKUP_WANT_GRNAM (1<<18) -#define KAUTH_EXTLOOKUP_VALID_SUPGRPS (1<<19) -#define KAUTH_EXTLOOKUP_WANT_SUPGRPS (1<<20) - - __darwin_pid_t el_info_pid; /* request on behalf of PID */ - u_int64_t el_extend; /* extension field */ - u_int32_t el_info_reserved_1; /* reserved (APPLE) */ - - uid_t el_uid; /* user ID */ - guid_t el_uguid; /* user GUID */ - u_int32_t el_uguid_valid; /* TTL on translation result (seconds) */ - ntsid_t el_usid; /* user NT SID */ - u_int32_t el_usid_valid; /* TTL on translation result (seconds) */ - gid_t el_gid; /* group ID */ - guid_t el_gguid; /* group GUID */ - u_int32_t el_gguid_valid; /* TTL on translation result (seconds) */ - ntsid_t el_gsid; /* group SID */ - u_int32_t el_gsid_valid; /* TTL on translation result (seconds) */ - u_int32_t el_member_valid; /* TTL on group lookup result */ - u_int32_t el_sup_grp_cnt; /* count of supplemental groups up to NGROUPS */ - gid_t el_sup_groups[NGROUPS_MAX]; /* supplemental group list */ +#define KAUTH_EXTLOOKUP_ISMEMBER (1<<14) +#define KAUTH_EXTLOOKUP_VALID_PWNAM (1<<15) +#define KAUTH_EXTLOOKUP_WANT_PWNAM (1<<16) +#define KAUTH_EXTLOOKUP_VALID_GRNAM (1<<17) +#define KAUTH_EXTLOOKUP_WANT_GRNAM (1<<18) +#define KAUTH_EXTLOOKUP_VALID_SUPGRPS (1<<19) +#define KAUTH_EXTLOOKUP_WANT_SUPGRPS (1<<20) + + __darwin_pid_t el_info_pid; /* request on behalf of PID */ + u_int64_t el_extend; /* extension field */ + u_int32_t el_info_reserved_1; /* reserved (APPLE) */ + + uid_t el_uid; /* user ID */ + guid_t el_uguid; /* user GUID */ + u_int32_t el_uguid_valid; /* TTL on translation result (seconds) */ + ntsid_t el_usid; /* user NT SID */ + u_int32_t el_usid_valid; /* TTL on translation result (seconds) */ + gid_t el_gid; /* group ID */ + guid_t el_gguid; /* group GUID */ + u_int32_t el_gguid_valid; /* TTL on translation result (seconds) */ + ntsid_t el_gsid; /* group SID */ + u_int32_t el_gsid_valid; /* TTL on translation result (seconds) */ + u_int32_t el_member_valid; /* TTL on group lookup result */ + u_int32_t el_sup_grp_cnt; /* count of supplemental groups up to NGROUPS */ + gid_t el_sup_groups[NGROUPS_MAX]; /* supplemental group list */ }; struct kauth_cache_sizes { @@ -130,15 +130,15 @@ struct kauth_cache_sizes { u_int32_t kcs_id_size; }; -#define KAUTH_EXTLOOKUP_REGISTER (0) -#define KAUTH_EXTLOOKUP_RESULT (1<<0) -#define KAUTH_EXTLOOKUP_WORKER (1<<1) -#define KAUTH_EXTLOOKUP_DEREGISTER (1<<2) -#define KAUTH_GET_CACHE_SIZES (1<<3) -#define KAUTH_SET_CACHE_SIZES (1<<4) -#define KAUTH_CLEAR_CACHES (1<<5) +#define KAUTH_EXTLOOKUP_REGISTER (0) +#define KAUTH_EXTLOOKUP_RESULT (1<<0) +#define KAUTH_EXTLOOKUP_WORKER (1<<1) +#define KAUTH_EXTLOOKUP_DEREGISTER (1<<2) +#define KAUTH_GET_CACHE_SIZES (1<<3) +#define KAUTH_SET_CACHE_SIZES (1<<4) +#define KAUTH_CLEAR_CACHES (1<<5) -#define IDENTITYSVC_ENTITLEMENT "com.apple.private.identitysvc" +#define IDENTITYSVC_ENTITLEMENT "com.apple.private.identitysvc" #ifdef KERNEL @@ -156,40 +156,40 @@ struct kauth_cache_sizes { struct kauth_cred_supplement { TAILQ_ENTRY(kauth_cred_supplement) kcs_link; - int kcs_ref; /* reference count */ - int kcs_id; /* vended identifier */ - size_t kcs_size; /* size of data field */ - char kcs_data[0]; + int kcs_ref; /* reference count */ + int kcs_id; /* vended identifier */ + size_t kcs_size; /* size of data field */ + char kcs_data[0]; }; typedef struct kauth_cred_supplement *kauth_cred_supplement_t; struct kauth_cred { - TAILQ_ENTRY(kauth_cred) kc_link; - - int kc_ref; /* reference count */ - uid_t kc_uid; /* effective user id */ - uid_t kc_ruid; /* real user id */ - uid_t kc_svuid; /* saved user id */ - gid_t kc_gid; /* effective group id */ - gid_t kc_rgid; /* real group id */ - gid_t kc_svgid; /* saved group id */ - - int kc_flags; -#define KAUTH_CRED_GRPOVERRIDE (1<<0) /* private group list is authoritative */ - - int kc_npvtgroups; /* private group list, advisory or authoritative */ - gid_t kc_pvtgroups[NGROUPS]; /* based on KAUTH_CRED_GRPOVERRIDE flag */ - - int kc_nsuppgroups; /* supplementary group list */ - gid_t *kc_suppgroups; - - int kc_nwhtgroups; /* whiteout group list */ - gid_t *kc_whtgroups; - - struct au_session cr_audit; /* user auditing data */ - - int kc_nsupplement; /* entry count in supplemental data pointer array */ + TAILQ_ENTRY(kauth_cred) kc_link; + + int kc_ref; /* reference count */ + uid_t kc_uid; /* effective user id */ + uid_t kc_ruid; /* real user id */ + uid_t kc_svuid; /* saved user id */ + gid_t kc_gid; /* effective group id */ + gid_t kc_rgid; /* real group id */ + gid_t kc_svgid; /* saved group id */ + + int kc_flags; +#define KAUTH_CRED_GRPOVERRIDE (1<<0) /* private group list is authoritative */ + + int kc_npvtgroups; /* private group list, advisory or authoritative */ + gid_t kc_pvtgroups[NGROUPS]; /* based on KAUTH_CRED_GRPOVERRIDE flag */ + + int kc_nsuppgroups; /* supplementary group list */ + gid_t *kc_suppgroups; + + int kc_nwhtgroups; /* whiteout group list */ + gid_t *kc_whtgroups; + + struct au_session cr_audit; /* user auditing data */ + + int kc_nsupplement; /* entry count in supplemental data pointer array */ kauth_cred_supplement_t *kc_supplement; }; #else @@ -211,105 +211,101 @@ extern posix_cred_t posix_cred_get(kauth_cred_t cred); extern void posix_cred_label(kauth_cred_t cred, posix_cred_t pcred); extern int posix_cred_access(kauth_cred_t cred, id_t object_uid, id_t object_gid, mode_t object_mode, mode_t mode_req); -extern uid_t kauth_getuid(void); -extern uid_t kauth_getruid(void); -extern gid_t kauth_getgid(void); +extern uid_t kauth_getuid(void); +extern uid_t kauth_getruid(void); +extern gid_t kauth_getgid(void); extern kauth_cred_t kauth_cred_get(void); extern kauth_cred_t kauth_cred_get_with_ref(void); extern kauth_cred_t kauth_cred_proc_ref(proc_t procp); extern kauth_cred_t kauth_cred_create(kauth_cred_t cred); -extern void kauth_cred_ref(kauth_cred_t _cred); +extern void kauth_cred_ref(kauth_cred_t _cred); #ifndef __LP64__ /* Use kauth_cred_unref(), not kauth_cred_rele() */ -extern void kauth_cred_rele(kauth_cred_t _cred) __deprecated; +extern void kauth_cred_rele(kauth_cred_t _cred) __deprecated; #endif -extern void kauth_cred_unref(kauth_cred_t *_cred); +extern void kauth_cred_unref(kauth_cred_t *_cred); #if CONFIG_MACF struct label; -extern kauth_cred_t kauth_cred_label_update(kauth_cred_t cred, struct label *label); +extern kauth_cred_t kauth_cred_label_update(kauth_cred_t cred, struct label *label); extern int kauth_proc_label_update(struct proc *p, struct label *label); #else -/* this is a temp hack to cover us when MAC is not built in a kernel configuration. +/* this is a temp hack to cover us when MAC is not built in a kernel configuration. * Since we cannot build our export list based on the kernel configuration we need - * to define a stub. + * to define a stub. */ -extern kauth_cred_t kauth_cred_label_update(kauth_cred_t cred, void *label); +extern kauth_cred_t kauth_cred_label_update(kauth_cred_t cred, void *label); extern int kauth_proc_label_update(struct proc *p, void *label); #endif +__deprecated_msg("Unsafe interface: requires lock holds that aren't exposed") extern kauth_cred_t kauth_cred_find(kauth_cred_t cred); -extern uid_t kauth_cred_getuid(kauth_cred_t _cred); -extern uid_t kauth_cred_getruid(kauth_cred_t _cred); -extern uid_t kauth_cred_getsvuid(kauth_cred_t _cred); -extern gid_t kauth_cred_getgid(kauth_cred_t _cred); -extern gid_t kauth_cred_getrgid(kauth_cred_t _cred); -extern gid_t kauth_cred_getsvgid(kauth_cred_t _cred); -extern int kauth_cred_pwnam2guid(char *pwnam, guid_t *guidp); -extern int kauth_cred_grnam2guid(char *grnam, guid_t *guidp); -extern int kauth_cred_guid2pwnam(guid_t *guidp, char *pwnam); -extern int kauth_cred_guid2grnam(guid_t *guidp, char *grnam); +extern uid_t kauth_cred_getuid(kauth_cred_t _cred); +extern uid_t kauth_cred_getruid(kauth_cred_t _cred); +extern uid_t kauth_cred_getsvuid(kauth_cred_t _cred); +extern gid_t kauth_cred_getgid(kauth_cred_t _cred); +extern gid_t kauth_cred_getrgid(kauth_cred_t _cred); +extern gid_t kauth_cred_getsvgid(kauth_cred_t _cred); +extern int kauth_cred_pwnam2guid(char *pwnam, guid_t *guidp); +extern int kauth_cred_grnam2guid(char *grnam, guid_t *guidp); +extern int kauth_cred_guid2pwnam(guid_t *guidp, char *pwnam); +extern int kauth_cred_guid2grnam(guid_t *guidp, char *grnam); extern int kauth_cred_guid2uid(guid_t *_guid, uid_t *_uidp); extern int kauth_cred_guid2gid(guid_t *_guid, gid_t *_gidp); extern int kauth_cred_ntsid2uid(ntsid_t *_sid, uid_t *_uidp); extern int kauth_cred_ntsid2gid(ntsid_t *_sid, gid_t *_gidp); extern int kauth_cred_ntsid2guid(ntsid_t *_sid, guid_t *_guidp); extern int kauth_cred_uid2guid(uid_t _uid, guid_t *_guidp); -extern int kauth_cred_getguid(kauth_cred_t _cred, guid_t *_guidp); +extern int kauth_cred_getguid(kauth_cred_t _cred, guid_t *_guidp); extern int kauth_cred_gid2guid(gid_t _gid, guid_t *_guidp); extern int kauth_cred_uid2ntsid(uid_t _uid, ntsid_t *_sidp); -extern int kauth_cred_getntsid(kauth_cred_t _cred, ntsid_t *_sidp); +extern int kauth_cred_getntsid(kauth_cred_t _cred, ntsid_t *_sidp); extern int kauth_cred_gid2ntsid(gid_t _gid, ntsid_t *_sidp); extern int kauth_cred_guid2ntsid(guid_t *_guid, ntsid_t *_sidp); -extern int kauth_cred_ismember_gid(kauth_cred_t _cred, gid_t _gid, int *_resultp); -extern int kauth_cred_ismember_guid(kauth_cred_t _cred, guid_t *_guidp, int *_resultp); -extern int kauth_cred_nfs4domain2dsnode(char *nfs4domain, char *dsnode); -extern int kauth_cred_dsnode2nfs4domain(char *dsnode, char *nfs4domain); +extern int kauth_cred_ismember_gid(kauth_cred_t _cred, gid_t _gid, int *_resultp); +extern int kauth_cred_ismember_guid(kauth_cred_t _cred, guid_t *_guidp, int *_resultp); +extern int kauth_cred_nfs4domain2dsnode(char *nfs4domain, char *dsnode); +extern int kauth_cred_dsnode2nfs4domain(char *dsnode, char *nfs4domain); -extern int groupmember(gid_t gid, kauth_cred_t cred); +extern int groupmember(gid_t gid, kauth_cred_t cred); /* currently only exported in unsupported for use by seatbelt */ -extern int kauth_cred_issuser(kauth_cred_t _cred); +extern int kauth_cred_issuser(kauth_cred_t _cred); /* GUID, NTSID helpers */ -extern guid_t kauth_null_guid; -extern int kauth_guid_equal(guid_t *_guid1, guid_t *_guid2); +extern guid_t kauth_null_guid; +extern int kauth_guid_equal(guid_t *_guid1, guid_t *_guid2); #ifdef XNU_KERNEL_PRIVATE -extern int kauth_ntsid_equal(ntsid_t *_sid1, ntsid_t *_sid2); -#endif /* XNU_KERNEL_PRIVATE */ +extern int kauth_ntsid_equal(ntsid_t *_sid1, ntsid_t *_sid2); -#ifdef XNU_KERNEL_PRIVATE -extern int kauth_wellknown_guid(guid_t *_guid); -#define KAUTH_WKG_NOT 0 /* not a well-known GUID */ -#define KAUTH_WKG_OWNER 1 -#define KAUTH_WKG_GROUP 2 -#define KAUTH_WKG_NOBODY 3 -#define KAUTH_WKG_EVERYBODY 4 - -extern kauth_cred_t kauth_cred_dup(kauth_cred_t cred); -extern gid_t kauth_getrgid(void); -extern kauth_cred_t kauth_cred_alloc(void); -extern int cantrace(proc_t cur_procp, kauth_cred_t creds, proc_t traced_procp, int *errp); +extern int kauth_wellknown_guid(guid_t *_guid); +#define KAUTH_WKG_NOT 0 /* not a well-known GUID */ +#define KAUTH_WKG_OWNER 1 +#define KAUTH_WKG_GROUP 2 +#define KAUTH_WKG_NOBODY 3 +#define KAUTH_WKG_EVERYBODY 4 + +extern gid_t kauth_getrgid(void); +extern int cantrace(proc_t cur_procp, kauth_cred_t creds, proc_t traced_procp, int *errp); extern kauth_cred_t kauth_cred_copy_real(kauth_cred_t cred); -extern kauth_cred_t kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t euid, uid_t svuid, uid_t gmuid); -extern kauth_cred_t kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid); +extern kauth_cred_t kauth_cred_setresuid(kauth_cred_t cred, uid_t ruid, uid_t euid, uid_t svuid, uid_t gmuid); +extern kauth_cred_t kauth_cred_setresgid(kauth_cred_t cred, gid_t rgid, gid_t egid, gid_t svgid); extern kauth_cred_t kauth_cred_setuidgid(kauth_cred_t cred, uid_t uid, gid_t gid); extern kauth_cred_t kauth_cred_setsvuidgid(kauth_cred_t cred, uid_t uid, gid_t gid); -extern kauth_cred_t kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmuid); +extern kauth_cred_t kauth_cred_setgroups(kauth_cred_t cred, gid_t *groups, int groupcount, uid_t gmuid); struct uthread; -extern void kauth_cred_uthread_update(struct uthread *, proc_t); +extern void kauth_cred_uthread_update(struct uthread *, proc_t); #ifdef CONFIG_MACF extern void kauth_proc_label_update_execve(struct proc *p, struct vfs_context *ctx, struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *scriptlabel, struct label *execlabel, unsigned int *csflags, void *psattr, int *disjoint, int *update_return); #endif -extern int kauth_cred_getgroups(kauth_cred_t _cred, gid_t *_groups, int *_groupcount); -extern int kauth_cred_assume(uid_t _uid); -extern int kauth_cred_gid_subset(kauth_cred_t _cred1, kauth_cred_t _cred2, int *_resultp); +extern int kauth_cred_getgroups(kauth_cred_t _cred, gid_t *_groups, int *_groupcount); +extern int kauth_cred_gid_subset(kauth_cred_t _cred1, kauth_cred_t _cred2, int *_resultp); struct auditinfo_addr; extern kauth_cred_t kauth_cred_setauditinfo(kauth_cred_t, au_session_t *); -extern int kauth_cred_supplementary_register(const char *name, int *ident); -extern int kauth_cred_supplementary_add(kauth_cred_t cred, int ident, const void *data, size_t datasize); -extern int kauth_cred_supplementary_remove(kauth_cred_t cred, int ident); +extern int kauth_cred_supplementary_register(const char *name, int *ident); +extern int kauth_cred_supplementary_add(kauth_cred_t cred, int ident, const void *data, size_t datasize); +extern int kauth_cred_supplementary_remove(kauth_cred_t cred, int ident); #endif /* XNU_KERNEL_PRIVATE */ __END_DECLS @@ -325,33 +321,32 @@ typedef u_int32_t kauth_ace_rights_t; /* Access Control List Entry (ACE) */ struct kauth_ace { - guid_t ace_applicable; - u_int32_t ace_flags; -#define KAUTH_ACE_KINDMASK 0xf -#define KAUTH_ACE_PERMIT 1 -#define KAUTH_ACE_DENY 2 -#define KAUTH_ACE_AUDIT 3 /* not implemented */ -#define KAUTH_ACE_ALARM 4 /* not implemented */ -#define KAUTH_ACE_INHERITED (1<<4) -#define KAUTH_ACE_FILE_INHERIT (1<<5) -#define KAUTH_ACE_DIRECTORY_INHERIT (1<<6) -#define KAUTH_ACE_LIMIT_INHERIT (1<<7) -#define KAUTH_ACE_ONLY_INHERIT (1<<8) -#define KAUTH_ACE_SUCCESS (1<<9) /* not implemented (AUDIT/ALARM) */ -#define KAUTH_ACE_FAILURE (1<<10) /* not implemented (AUDIT/ALARM) */ + guid_t ace_applicable; + u_int32_t ace_flags; +#define KAUTH_ACE_KINDMASK 0xf +#define KAUTH_ACE_PERMIT 1 +#define KAUTH_ACE_DENY 2 +#define KAUTH_ACE_AUDIT 3 /* not implemented */ +#define KAUTH_ACE_ALARM 4 /* not implemented */ +#define KAUTH_ACE_INHERITED (1<<4) +#define KAUTH_ACE_FILE_INHERIT (1<<5) +#define KAUTH_ACE_DIRECTORY_INHERIT (1<<6) +#define KAUTH_ACE_LIMIT_INHERIT (1<<7) +#define KAUTH_ACE_ONLY_INHERIT (1<<8) +#define KAUTH_ACE_SUCCESS (1<<9) /* not implemented (AUDIT/ALARM) */ +#define KAUTH_ACE_FAILURE (1<<10) /* not implemented (AUDIT/ALARM) */ /* All flag bits controlling ACE inheritance */ -#define KAUTH_ACE_INHERIT_CONTROL_FLAGS \ - (KAUTH_ACE_FILE_INHERIT | \ - KAUTH_ACE_DIRECTORY_INHERIT | \ - KAUTH_ACE_LIMIT_INHERIT | \ - KAUTH_ACE_ONLY_INHERIT) - kauth_ace_rights_t ace_rights; /* scope specific */ +#define KAUTH_ACE_INHERIT_CONTROL_FLAGS \ + (KAUTH_ACE_FILE_INHERIT | \ + KAUTH_ACE_DIRECTORY_INHERIT | \ + KAUTH_ACE_LIMIT_INHERIT | \ + KAUTH_ACE_ONLY_INHERIT) + kauth_ace_rights_t ace_rights; /* scope specific */ /* These rights are never tested, but may be present in an ACL */ -#define KAUTH_ACE_GENERIC_ALL (1<<21) -#define KAUTH_ACE_GENERIC_EXECUTE (1<<22) -#define KAUTH_ACE_GENERIC_WRITE (1<<23) -#define KAUTH_ACE_GENERIC_READ (1<<24) - +#define KAUTH_ACE_GENERIC_ALL (1<<21) +#define KAUTH_ACE_GENERIC_EXECUTE (1<<22) +#define KAUTH_ACE_GENERIC_WRITE (1<<23) +#define KAUTH_ACE_GENERIC_READ (1<<24) }; #ifndef _KAUTH_ACE @@ -362,23 +357,23 @@ typedef struct kauth_ace *kauth_ace_t; /* Access Control List */ struct kauth_acl { - u_int32_t acl_entrycount; - u_int32_t acl_flags; - + u_int32_t acl_entrycount; + u_int32_t acl_flags; + struct kauth_ace acl_ace[1]; }; /* * XXX this value needs to be raised - 3893388 */ -#define KAUTH_ACL_MAX_ENTRIES 128 +#define KAUTH_ACL_MAX_ENTRIES 128 /* * The low 16 bits of the flags field are reserved for filesystem * internal use and must be preserved by all APIs. This includes * round-tripping flags through user-space interfaces. */ -#define KAUTH_ACL_FLAGS_PRIVATE (0xffff) +#define KAUTH_ACL_FLAGS_PRIVATE (0xffff) /* * The high 16 bits of the flags are used to store attributes and @@ -386,9 +381,9 @@ struct kauth_acl { */ /* inheritance will be deferred until the first rename operation */ -#define KAUTH_ACL_DEFER_INHERIT (1<<16) +#define KAUTH_ACL_DEFER_INHERIT (1<<16) /* this ACL must not be overwritten as part of an inheritance operation */ -#define KAUTH_ACL_NO_INHERIT (1<<17) +#define KAUTH_ACL_NO_INHERIT (1<<17) /* acl_entrycount that tells us the ACL is not valid */ #define KAUTH_FILESEC_NOACL ((u_int32_t)(-1)) @@ -400,8 +395,8 @@ struct kauth_acl { * entry (Windows treats this as "deny all") from one that merely indicates a * file group and/or owner guid values. */ -#define KAUTH_ACL_SIZE(c) (__offsetof(struct kauth_acl, acl_ace) + ((u_int32_t)(c) != KAUTH_FILESEC_NOACL ? ((c) * sizeof(struct kauth_ace)) : 0)) -#define KAUTH_ACL_COPYSIZE(p) KAUTH_ACL_SIZE((p)->acl_entrycount) +#define KAUTH_ACL_SIZE(c) (__offsetof(struct kauth_acl, acl_ace) + ((u_int32_t)(c) != KAUTH_FILESEC_NOACL ? ((c) * sizeof(struct kauth_ace)) : 0)) +#define KAUTH_ACL_COPYSIZE(p) KAUTH_ACL_SIZE((p)->acl_entrycount) #ifndef _KAUTH_ACL @@ -411,8 +406,8 @@ typedef struct kauth_acl *kauth_acl_t; #ifdef KERNEL __BEGIN_DECLS -kauth_acl_t kauth_acl_alloc(int size); -void kauth_acl_free(kauth_acl_t fsp); +kauth_acl_t kauth_acl_alloc(int size); +void kauth_acl_free(kauth_acl_t fsp); __END_DECLS #endif @@ -423,39 +418,39 @@ __END_DECLS /* File Security information */ struct kauth_filesec { - u_int32_t fsec_magic; -#define KAUTH_FILESEC_MAGIC 0x012cc16d - guid_t fsec_owner; - guid_t fsec_group; + u_int32_t fsec_magic; +#define KAUTH_FILESEC_MAGIC 0x012cc16d + guid_t fsec_owner; + guid_t fsec_group; struct kauth_acl fsec_acl; }; /* backwards compatibility */ #define fsec_entrycount fsec_acl.acl_entrycount -#define fsec_flags fsec_acl.acl_flags -#define fsec_ace fsec_acl.acl_ace -#define KAUTH_FILESEC_FLAGS_PRIVATE KAUTH_ACL_FLAGS_PRIVATE -#define KAUTH_FILESEC_DEFER_INHERIT KAUTH_ACL_DEFER_INHERIT -#define KAUTH_FILESEC_NO_INHERIT KAUTH_ACL_NO_INHERIT -#define KAUTH_FILESEC_NONE ((kauth_filesec_t)0) -#define KAUTH_FILESEC_WANTED ((kauth_filesec_t)1) - +#define fsec_flags fsec_acl.acl_flags +#define fsec_ace fsec_acl.acl_ace +#define KAUTH_FILESEC_FLAGS_PRIVATE KAUTH_ACL_FLAGS_PRIVATE +#define KAUTH_FILESEC_DEFER_INHERIT KAUTH_ACL_DEFER_INHERIT +#define KAUTH_FILESEC_NO_INHERIT KAUTH_ACL_NO_INHERIT +#define KAUTH_FILESEC_NONE ((kauth_filesec_t)0) +#define KAUTH_FILESEC_WANTED ((kauth_filesec_t)1) + #ifndef _KAUTH_FILESEC #define _KAUTH_FILESEC typedef struct kauth_filesec *kauth_filesec_t; #endif -#define KAUTH_FILESEC_SIZE(c) (__offsetof(struct kauth_filesec, fsec_acl) + __offsetof(struct kauth_acl, acl_ace) + (c) * sizeof(struct kauth_ace)) -#define KAUTH_FILESEC_COPYSIZE(p) KAUTH_FILESEC_SIZE(((p)->fsec_entrycount == KAUTH_FILESEC_NOACL) ? 0 : (p)->fsec_entrycount) -#define KAUTH_FILESEC_COUNT(s) (((s) - KAUTH_FILESEC_SIZE(0)) / sizeof(struct kauth_ace)) -#define KAUTH_FILESEC_VALID(s) ((s) >= KAUTH_FILESEC_SIZE(0) && (((s) - KAUTH_FILESEC_SIZE(0)) % sizeof(struct kauth_ace)) == 0) +#define KAUTH_FILESEC_SIZE(c) (__offsetof(struct kauth_filesec, fsec_acl) + __offsetof(struct kauth_acl, acl_ace) + (c) * sizeof(struct kauth_ace)) +#define KAUTH_FILESEC_COPYSIZE(p) KAUTH_FILESEC_SIZE(((p)->fsec_entrycount == KAUTH_FILESEC_NOACL) ? 0 : (p)->fsec_entrycount) +#define KAUTH_FILESEC_COUNT(s) (((s) - KAUTH_FILESEC_SIZE(0)) / sizeof(struct kauth_ace)) +#define KAUTH_FILESEC_VALID(s) ((s) >= KAUTH_FILESEC_SIZE(0) && (((s) - KAUTH_FILESEC_SIZE(0)) % sizeof(struct kauth_ace)) == 0) -#define KAUTH_FILESEC_XATTR "com.apple.system.Security" +#define KAUTH_FILESEC_XATTR "com.apple.system.Security" /* Allowable first arguments to kauth_filesec_acl_setendian() */ -#define KAUTH_ENDIAN_HOST 0x00000001 /* set host endianness */ -#define KAUTH_ENDIAN_DISK 0x00000002 /* set disk endianness */ +#define KAUTH_ENDIAN_HOST 0x00000001 /* set host endianness */ +#define KAUTH_ENDIAN_DISK 0x00000002 /* set disk endianness */ #endif /* KERNEL || */ @@ -476,57 +471,57 @@ typedef int kauth_action_t; #endif typedef int (* kauth_scope_callback_t)(kauth_cred_t _credential, - void *_idata, - kauth_action_t _action, - uintptr_t _arg0, - uintptr_t _arg1, - uintptr_t _arg2, - uintptr_t _arg3); + void *_idata, + kauth_action_t _action, + uintptr_t _arg0, + uintptr_t _arg1, + uintptr_t _arg2, + uintptr_t _arg3); -#define KAUTH_RESULT_ALLOW (1) -#define KAUTH_RESULT_DENY (2) -#define KAUTH_RESULT_DEFER (3) +#define KAUTH_RESULT_ALLOW (1) +#define KAUTH_RESULT_DENY (2) +#define KAUTH_RESULT_DEFER (3) struct kauth_acl_eval { - kauth_ace_t ae_acl; - int ae_count; - kauth_ace_rights_t ae_requested; - kauth_ace_rights_t ae_residual; - int ae_result; - boolean_t ae_found_deny; - int ae_options; -#define KAUTH_AEVAL_IS_OWNER (1<<0) /* authorizing operation for owner */ -#define KAUTH_AEVAL_IN_GROUP (1<<1) /* authorizing operation for groupmember */ -#define KAUTH_AEVAL_IN_GROUP_UNKNOWN (1<<2) /* authorizing operation for unknown group membership */ + kauth_ace_t ae_acl; + int ae_count; + kauth_ace_rights_t ae_requested; + kauth_ace_rights_t ae_residual; + int ae_result; + boolean_t ae_found_deny; + int ae_options; +#define KAUTH_AEVAL_IS_OWNER (1<<0) /* authorizing operation for owner */ +#define KAUTH_AEVAL_IN_GROUP (1<<1) /* authorizing operation for groupmember */ +#define KAUTH_AEVAL_IN_GROUP_UNKNOWN (1<<2) /* authorizing operation for unknown group membership */ /* expansions for 'generic' rights bits */ - kauth_ace_rights_t ae_exp_gall; - kauth_ace_rights_t ae_exp_gread; - kauth_ace_rights_t ae_exp_gwrite; - kauth_ace_rights_t ae_exp_gexec; + kauth_ace_rights_t ae_exp_gall; + kauth_ace_rights_t ae_exp_gread; + kauth_ace_rights_t ae_exp_gwrite; + kauth_ace_rights_t ae_exp_gexec; }; typedef struct kauth_acl_eval *kauth_acl_eval_t; - + __BEGIN_DECLS -kauth_filesec_t kauth_filesec_alloc(int size); -void kauth_filesec_free(kauth_filesec_t fsp); +kauth_filesec_t kauth_filesec_alloc(int size); +void kauth_filesec_free(kauth_filesec_t fsp); extern kauth_scope_t kauth_register_scope(const char *_identifier, kauth_scope_callback_t _callback, void *_idata); -extern void kauth_deregister_scope(kauth_scope_t _scope); +extern void kauth_deregister_scope(kauth_scope_t _scope); extern kauth_listener_t kauth_listen_scope(const char *_identifier, kauth_scope_callback_t _callback, void *_idata); -extern void kauth_unlisten_scope(kauth_listener_t _scope); -extern int kauth_authorize_action(kauth_scope_t _scope, kauth_cred_t _credential, kauth_action_t _action, - uintptr_t _arg0, uintptr_t _arg1, uintptr_t _arg2, uintptr_t _arg3); +extern void kauth_unlisten_scope(kauth_listener_t _scope); +extern int kauth_authorize_action(kauth_scope_t _scope, kauth_cred_t _credential, kauth_action_t _action, + uintptr_t _arg0, uintptr_t _arg1, uintptr_t _arg2, uintptr_t _arg3); /* default scope handlers */ -extern int kauth_authorize_allow(kauth_cred_t _credential, void *_idata, kauth_action_t _action, +extern int kauth_authorize_allow(kauth_cred_t _credential, void *_idata, kauth_action_t _action, uintptr_t _arg0, uintptr_t _arg1, uintptr_t _arg2, uintptr_t _arg3); - + #ifdef XNU_KERNEL_PRIVATE -void kauth_filesec_acl_setendian(int, kauth_filesec_t, kauth_acl_t); -int kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp); -extern int kauth_acl_evaluate(kauth_cred_t _credential, kauth_acl_eval_t _eval); -extern int kauth_acl_inherit(vnode_t _dvp, kauth_acl_t _initial, kauth_acl_t *_product, int _isdir, vfs_context_t _ctx); +void kauth_filesec_acl_setendian(int, kauth_filesec_t, kauth_acl_t); +int kauth_copyinfilesec(user_addr_t xsecurity, kauth_filesec_t *xsecdestpp); +extern int kauth_acl_evaluate(kauth_cred_t _credential, kauth_acl_eval_t _eval); +extern int kauth_acl_inherit(vnode_t _dvp, kauth_acl_t _initial, kauth_acl_t *_product, int _isdir, vfs_context_t _ctx); #endif /* XNU_KERNEL_PRIVATE */ @@ -536,28 +531,28 @@ __END_DECLS /* * Generic scope. */ -#define KAUTH_SCOPE_GENERIC "com.apple.kauth.generic" +#define KAUTH_SCOPE_GENERIC "com.apple.kauth.generic" /* Actions */ -#define KAUTH_GENERIC_ISSUSER 1 +#define KAUTH_GENERIC_ISSUSER 1 #ifdef XNU_KERNEL_PRIVATE __BEGIN_DECLS -extern int kauth_authorize_generic(kauth_cred_t credential, kauth_action_t action); +extern int kauth_authorize_generic(kauth_cred_t credential, kauth_action_t action); __END_DECLS #endif /* XNU_KERNEL_PRIVATE */ /* * Process/task scope. */ -#define KAUTH_SCOPE_PROCESS "com.apple.kauth.process" +#define KAUTH_SCOPE_PROCESS "com.apple.kauth.process" /* Actions */ -#define KAUTH_PROCESS_CANSIGNAL 1 -#define KAUTH_PROCESS_CANTRACE 2 +#define KAUTH_PROCESS_CANSIGNAL 1 +#define KAUTH_PROCESS_CANTRACE 2 __BEGIN_DECLS -extern int kauth_authorize_process(kauth_cred_t _credential, kauth_action_t _action, +extern int kauth_authorize_process(kauth_cred_t _credential, kauth_action_t _action, struct proc *_process, uintptr_t _arg1, uintptr_t _arg2, uintptr_t _arg3); __END_DECLS @@ -566,23 +561,23 @@ __END_DECLS * * Prototype for vnode_authorize is in vnode.h */ -#define KAUTH_SCOPE_VNODE "com.apple.kauth.vnode" +#define KAUTH_SCOPE_VNODE "com.apple.kauth.vnode" /* * File system operation scope. * */ -#define KAUTH_SCOPE_FILEOP "com.apple.kauth.fileop" +#define KAUTH_SCOPE_FILEOP "com.apple.kauth.fileop" /* Actions */ -#define KAUTH_FILEOP_OPEN 1 -#define KAUTH_FILEOP_CLOSE 2 -#define KAUTH_FILEOP_RENAME 3 -#define KAUTH_FILEOP_EXCHANGE 4 -#define KAUTH_FILEOP_LINK 5 -#define KAUTH_FILEOP_EXEC 6 -#define KAUTH_FILEOP_DELETE 7 -#define KAUTH_FILEOP_WILL_RENAME 8 +#define KAUTH_FILEOP_OPEN 1 +#define KAUTH_FILEOP_CLOSE 2 +#define KAUTH_FILEOP_RENAME 3 +#define KAUTH_FILEOP_EXCHANGE 4 +#define KAUTH_FILEOP_LINK 5 +#define KAUTH_FILEOP_EXEC 6 +#define KAUTH_FILEOP_DELETE 7 +#define KAUTH_FILEOP_WILL_RENAME 8 /* * arguments passed to KAUTH_FILEOP_OPEN listeners @@ -612,15 +607,15 @@ __END_DECLS * arg0 is pointer to vnode (vnode *) of file/dir that was deleted. * arg1 is pointer to path (char *) of file/dir that was deleted. */ - + /* Flag values returned to close listeners. */ -#define KAUTH_FILEOP_CLOSE_MODIFIED (1<<1) +#define KAUTH_FILEOP_CLOSE_MODIFIED (1<<1) __BEGIN_DECLS #ifdef XNU_KERNEL_PRIVATE -extern int kauth_authorize_fileop_has_listeners(void); +extern int kauth_authorize_fileop_has_listeners(void); #endif /* XNU_KERNEL_PRIVATE */ -extern int kauth_authorize_fileop(kauth_cred_t _credential, kauth_action_t _action, +extern int kauth_authorize_fileop(kauth_cred_t _credential, kauth_action_t _action, uintptr_t _arg0, uintptr_t _arg1); __END_DECLS @@ -629,29 +624,29 @@ __END_DECLS /* Actions, also rights bits in an ACE */ #if defined(KERNEL) || defined (_SYS_ACL_H) -#define KAUTH_VNODE_READ_DATA (1<<1) -#define KAUTH_VNODE_LIST_DIRECTORY KAUTH_VNODE_READ_DATA -#define KAUTH_VNODE_WRITE_DATA (1<<2) -#define KAUTH_VNODE_ADD_FILE KAUTH_VNODE_WRITE_DATA -#define KAUTH_VNODE_EXECUTE (1<<3) -#define KAUTH_VNODE_SEARCH KAUTH_VNODE_EXECUTE -#define KAUTH_VNODE_DELETE (1<<4) -#define KAUTH_VNODE_APPEND_DATA (1<<5) -#define KAUTH_VNODE_ADD_SUBDIRECTORY KAUTH_VNODE_APPEND_DATA -#define KAUTH_VNODE_DELETE_CHILD (1<<6) -#define KAUTH_VNODE_READ_ATTRIBUTES (1<<7) -#define KAUTH_VNODE_WRITE_ATTRIBUTES (1<<8) -#define KAUTH_VNODE_READ_EXTATTRIBUTES (1<<9) -#define KAUTH_VNODE_WRITE_EXTATTRIBUTES (1<<10) -#define KAUTH_VNODE_READ_SECURITY (1<<11) -#define KAUTH_VNODE_WRITE_SECURITY (1<<12) -#define KAUTH_VNODE_TAKE_OWNERSHIP (1<<13) +#define KAUTH_VNODE_READ_DATA (1<<1) +#define KAUTH_VNODE_LIST_DIRECTORY KAUTH_VNODE_READ_DATA +#define KAUTH_VNODE_WRITE_DATA (1<<2) +#define KAUTH_VNODE_ADD_FILE KAUTH_VNODE_WRITE_DATA +#define KAUTH_VNODE_EXECUTE (1<<3) +#define KAUTH_VNODE_SEARCH KAUTH_VNODE_EXECUTE +#define KAUTH_VNODE_DELETE (1<<4) +#define KAUTH_VNODE_APPEND_DATA (1<<5) +#define KAUTH_VNODE_ADD_SUBDIRECTORY KAUTH_VNODE_APPEND_DATA +#define KAUTH_VNODE_DELETE_CHILD (1<<6) +#define KAUTH_VNODE_READ_ATTRIBUTES (1<<7) +#define KAUTH_VNODE_WRITE_ATTRIBUTES (1<<8) +#define KAUTH_VNODE_READ_EXTATTRIBUTES (1<<9) +#define KAUTH_VNODE_WRITE_EXTATTRIBUTES (1<<10) +#define KAUTH_VNODE_READ_SECURITY (1<<11) +#define KAUTH_VNODE_WRITE_SECURITY (1<<12) +#define KAUTH_VNODE_TAKE_OWNERSHIP (1<<13) /* backwards compatibility only */ -#define KAUTH_VNODE_CHANGE_OWNER KAUTH_VNODE_TAKE_OWNERSHIP +#define KAUTH_VNODE_CHANGE_OWNER KAUTH_VNODE_TAKE_OWNERSHIP /* For Windows interoperability only */ -#define KAUTH_VNODE_SYNCHRONIZE (1<<20) +#define KAUTH_VNODE_SYNCHRONIZE (1<<20) /* (1<<21) - (1<<24) are reserved for generic rights bits */ @@ -659,13 +654,13 @@ __END_DECLS /* * Authorizes the vnode as the target of a hard link. */ -#define KAUTH_VNODE_LINKTARGET (1<<25) +#define KAUTH_VNODE_LINKTARGET (1<<25) /* * Indicates that other steps have been taken to authorise the action, * but authorisation should be denied for immutable objects. */ -#define KAUTH_VNODE_CHECKIMMUTABLE (1<<26) +#define KAUTH_VNODE_CHECKIMMUTABLE (1<<26) /* Action modifiers */ /* @@ -676,7 +671,7 @@ __END_DECLS * * This bit will never be present in an ACE. */ -#define KAUTH_VNODE_ACCESS (1<<31) +#define KAUTH_VNODE_ACCESS (1<<31) /* * The KAUTH_VNODE_NOIMMUTABLE bit is passed to the callback along with the @@ -686,7 +681,7 @@ __END_DECLS * The system immutable flags are only ignored when the system securelevel * is low enough to allow their removal. */ -#define KAUTH_VNODE_NOIMMUTABLE (1<<30) +#define KAUTH_VNODE_NOIMMUTABLE (1<<30) /* @@ -697,59 +692,59 @@ __END_DECLS * for an exact match on the last credential to lookup * the component being acted on */ -#define KAUTH_VNODE_SEARCHBYANYONE (1<<29) +#define KAUTH_VNODE_SEARCHBYANYONE (1<<29) /* * when passed as an 'action' to "vnode_uncache_authorized_actions" * it indicates that all of the cached authorizations for that - * vnode should be invalidated + * vnode should be invalidated */ -#define KAUTH_INVALIDATE_CACHED_RIGHTS ((kauth_action_t)~0) +#define KAUTH_INVALIDATE_CACHED_RIGHTS ((kauth_action_t)~0) /* The expansions of the GENERIC bits at evaluation time */ -#define KAUTH_VNODE_GENERIC_READ_BITS (KAUTH_VNODE_READ_DATA | \ - KAUTH_VNODE_READ_ATTRIBUTES | \ - KAUTH_VNODE_READ_EXTATTRIBUTES | \ - KAUTH_VNODE_READ_SECURITY) - -#define KAUTH_VNODE_GENERIC_WRITE_BITS (KAUTH_VNODE_WRITE_DATA | \ - KAUTH_VNODE_APPEND_DATA | \ - KAUTH_VNODE_DELETE | \ - KAUTH_VNODE_DELETE_CHILD | \ - KAUTH_VNODE_WRITE_ATTRIBUTES | \ - KAUTH_VNODE_WRITE_EXTATTRIBUTES | \ - KAUTH_VNODE_WRITE_SECURITY) - +#define KAUTH_VNODE_GENERIC_READ_BITS (KAUTH_VNODE_READ_DATA | \ + KAUTH_VNODE_READ_ATTRIBUTES | \ + KAUTH_VNODE_READ_EXTATTRIBUTES | \ + KAUTH_VNODE_READ_SECURITY) + +#define KAUTH_VNODE_GENERIC_WRITE_BITS (KAUTH_VNODE_WRITE_DATA | \ + KAUTH_VNODE_APPEND_DATA | \ + KAUTH_VNODE_DELETE | \ + KAUTH_VNODE_DELETE_CHILD | \ + KAUTH_VNODE_WRITE_ATTRIBUTES | \ + KAUTH_VNODE_WRITE_EXTATTRIBUTES | \ + KAUTH_VNODE_WRITE_SECURITY) + #define KAUTH_VNODE_GENERIC_EXECUTE_BITS (KAUTH_VNODE_EXECUTE) - -#define KAUTH_VNODE_GENERIC_ALL_BITS (KAUTH_VNODE_GENERIC_READ_BITS | \ - KAUTH_VNODE_GENERIC_WRITE_BITS | \ - KAUTH_VNODE_GENERIC_EXECUTE_BITS) - + +#define KAUTH_VNODE_GENERIC_ALL_BITS (KAUTH_VNODE_GENERIC_READ_BITS | \ + KAUTH_VNODE_GENERIC_WRITE_BITS | \ + KAUTH_VNODE_GENERIC_EXECUTE_BITS) + /* * Some sets of bits, defined here for convenience. */ -#define KAUTH_VNODE_WRITE_RIGHTS (KAUTH_VNODE_ADD_FILE | \ - KAUTH_VNODE_ADD_SUBDIRECTORY | \ - KAUTH_VNODE_DELETE_CHILD | \ - KAUTH_VNODE_WRITE_DATA | \ - KAUTH_VNODE_APPEND_DATA | \ - KAUTH_VNODE_DELETE | \ - KAUTH_VNODE_WRITE_ATTRIBUTES | \ - KAUTH_VNODE_WRITE_EXTATTRIBUTES | \ - KAUTH_VNODE_WRITE_SECURITY | \ - KAUTH_VNODE_TAKE_OWNERSHIP | \ - KAUTH_VNODE_LINKTARGET | \ - KAUTH_VNODE_CHECKIMMUTABLE) +#define KAUTH_VNODE_WRITE_RIGHTS (KAUTH_VNODE_ADD_FILE | \ + KAUTH_VNODE_ADD_SUBDIRECTORY | \ + KAUTH_VNODE_DELETE_CHILD | \ + KAUTH_VNODE_WRITE_DATA | \ + KAUTH_VNODE_APPEND_DATA | \ + KAUTH_VNODE_DELETE | \ + KAUTH_VNODE_WRITE_ATTRIBUTES | \ + KAUTH_VNODE_WRITE_EXTATTRIBUTES | \ + KAUTH_VNODE_WRITE_SECURITY | \ + KAUTH_VNODE_TAKE_OWNERSHIP | \ + KAUTH_VNODE_LINKTARGET | \ + KAUTH_VNODE_CHECKIMMUTABLE) #endif /* KERNEL || */ #ifdef KERNEL -#include /* lck_grp_t */ +#include /* lck_grp_t */ /* * Debugging @@ -758,26 +753,26 @@ __END_DECLS */ #if 0 # ifndef _FN_KPRINTF -# define _FN_KPRINTF +# define _FN_KPRINTF void kprintf(const char *fmt, ...); -# endif /* !_FN_KPRINTF */ +# endif /* !_FN_KPRINTF */ # define KAUTH_DEBUG_ENABLE # define K_UUID_FMT "%08x:%08x:%08x:%08x" # define K_UUID_ARG(_u) *(int *)&_u.g_guid[0],*(int *)&_u.g_guid[4],*(int *)&_u.g_guid[8],*(int *)&_u.g_guid[12] -# define KAUTH_DEBUG(fmt, args...) do { kprintf("%s:%d: " fmt "\n", __PRETTY_FUNCTION__, __LINE__ , ##args); } while (0) -# define KAUTH_DEBUG_CTX(_c) KAUTH_DEBUG("p = %p c = %p", _c->vc_proc, _c->vc_ucred) -# define VFS_DEBUG(_ctx, _vp, fmt, args...) \ - do { \ - kprintf("%p '%s' %s:%d " fmt "\n", \ - _ctx, \ - (_vp != NULL && _vp->v_name != NULL) ? _vp->v_name : "????", \ - __PRETTY_FUNCTION__, __LINE__ , \ - ##args); \ +# define KAUTH_DEBUG(fmt, args...) do { kprintf("%s:%d: " fmt "\n", __PRETTY_FUNCTION__, __LINE__ , ##args); } while (0) +# define KAUTH_DEBUG_CTX(_c) KAUTH_DEBUG("p = %p c = %p", _c->vc_proc, _c->vc_ucred) +# define VFS_DEBUG(_ctx, _vp, fmt, args...) \ + do { \ + kprintf("%p '%s' %s:%d " fmt "\n", \ + _ctx, \ + (_vp != NULL && _vp->v_name != NULL) ? _vp->v_name : "????", \ + __PRETTY_FUNCTION__, __LINE__ , \ + ##args); \ } while(0) -#else /* !0 */ -# define KAUTH_DEBUG(fmt, args...) do { } while (0) -# define VFS_DEBUG(ctx, vp, fmt, args...) do { } while(0) -#endif /* !0 */ +#else /* !0 */ +# define KAUTH_DEBUG(fmt, args...) do { } while (0) +# define VFS_DEBUG(ctx, vp, fmt, args...) do { } while(0) +#endif /* !0 */ /* * Initialisation. @@ -785,17 +780,17 @@ void kprintf(const char *fmt, ...); extern lck_grp_t *kauth_lck_grp; #ifdef XNU_KERNEL_PRIVATE __BEGIN_DECLS -extern void kauth_init(void); -extern void kauth_cred_init(void); +extern void kauth_init(void); +extern void kauth_cred_init(void); #if CONFIG_EXT_RESOLVER -extern void kauth_identity_init(void); -extern void kauth_groups_init(void); -extern void kauth_resolver_init(void); +extern void kauth_identity_init(void); +extern void kauth_groups_init(void); +extern void kauth_resolver_init(void); #endif __END_DECLS #endif /* XNU_KERNEL_PRIVATE */ -#endif /* KERNEL */ +#endif /* KERNEL */ #endif /* __APPLE_API_EVOLVING */ #endif /* _SYS_KAUTH_H */ diff --git a/bsd/sys/kdebug.h b/bsd/sys/kdebug.h index 7d5f89cf8..203b8dc57 100644 --- a/bsd/sys/kdebug.h +++ b/bsd/sys/kdebug.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2016 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -91,22 +91,22 @@ __BEGIN_DECLS /* Generate an eventid corresponding to Class, SubClass, and Code. */ #define KDBG_EVENTID(Class, SubClass, Code) \ - ((((Class) & 0xff) << KDBG_CLASS_OFFSET) | \ - (((SubClass) & 0xff) << KDBG_SUBCLASS_OFFSET) | \ - (((Code) & 0x3fff) << KDBG_CODE_OFFSET)) + ((((Class) & 0xff) << KDBG_CLASS_OFFSET) | \ + (((SubClass) & 0xff) << KDBG_SUBCLASS_OFFSET) | \ + (((Code) & 0x3fff) << KDBG_CODE_OFFSET)) /* Deprecated macro using old naming convention. */ #define KDBG_CODE(Class, SubClass, Code) \ - KDBG_EVENTID(Class, SubClass, Code) + KDBG_EVENTID(Class, SubClass, Code) /* Extract pieces of the debug code. */ #define KDBG_EXTRACT_CLASS(Debugid) \ - ((uint8_t)(((Debugid) & KDBG_CLASS_MASK) >> KDBG_CLASS_OFFSET)) + ((uint8_t)(((Debugid) & KDBG_CLASS_MASK) >> KDBG_CLASS_OFFSET)) #define KDBG_EXTRACT_SUBCLASS(Debugid) \ - ((uint8_t)(((Debugid) & KDBG_SUBCLASS_MASK) >> KDBG_SUBCLASS_OFFSET)) + ((uint8_t)(((Debugid) & KDBG_SUBCLASS_MASK) >> KDBG_SUBCLASS_OFFSET)) #define KDBG_EXTRACT_CSC(Debugid) \ - ((uint16_t)(((Debugid) & KDBG_CSC_MASK) >> KDBG_CSC_OFFSET)) + ((uint16_t)(((Debugid) & KDBG_CSC_MASK) >> KDBG_CSC_OFFSET)) #define KDBG_EXTRACT_CODE(Debugid) \ - ((uint16_t)(((Debugid) & KDBG_CODE_MASK) >> KDBG_CODE_OFFSET)) + ((uint16_t)(((Debugid) & KDBG_CODE_MASK) >> KDBG_CODE_OFFSET)) /* function qualifiers */ #define DBG_FUNC_START 1 @@ -243,7 +243,7 @@ extern int kdebug_trace( uint64_t arg2, uint64_t arg3, uint64_t arg4) - __OSX_AVAILABLE(10.10.2) __IOS_AVAILABLE(8.2); +__OSX_AVAILABLE(10.10.2) __IOS_AVAILABLE(8.2); /*! * @function kdebug_trace_string @@ -310,8 +310,8 @@ extern int kdebug_trace( * `str` is an invalid address or NULL when `str_id` is 0. */ extern uint64_t kdebug_trace_string(uint32_t debugid, uint64_t str_id, - const char *str) - __OSX_AVAILABLE(10.11) __IOS_AVAILABLE(9.0); + const char *str) +__OSX_AVAILABLE(10.11) __IOS_AVAILABLE(9.0); /* * Although the performance impact of kdebug_trace() when kernel @@ -329,16 +329,16 @@ extern uint64_t kdebug_trace_string(uint32_t debugid, uint64_t str_id, * will be returned. Otherwise, 0 will be returned. */ extern bool kdebug_is_enabled(uint32_t code) - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) - __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); /* * Returns a pointer to the userspace typefilter, if one is available. * May return NULL. */ extern void *kdebug_typefilter(void) - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) - __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) +__WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); #endif /* !KERNEL (Private kdebug userspace API) */ #endif /* PRIVATE */ @@ -387,6 +387,15 @@ extern void kdebug_reset(void); #define DBG_MACH_THREAD_GROUP 0xA6 /* Thread groups */ #define DBG_MACH_COALITION 0xA7 /* Coalitions */ #define DBG_MACH_SHAREDREGION 0xA8 /* Shared region */ +#define DBG_MACH_IO 0xAA /* I/O */ + +/* Codes for DBG_MACH_IO */ +#define DBC_MACH_IO_MMIO_READ 0x1 +#define DBC_MACH_IO_MMIO_WRITE 0x2 +#define DBC_MACH_IO_PHYS_READ 0x3 +#define DBC_MACH_IO_PHYS_WRITE 0x4 +#define DBC_MACH_IO_PORTIO_READ 0x5 +#define DBC_MACH_IO_PORTIO_WRITE 0x6 /* Interrupt type bits for DBG_MACH_EXCP_INTR */ #define DBG_INTR_TYPE_UNKNOWN 0x0 /* default/unknown interrupt */ @@ -405,26 +414,26 @@ extern void kdebug_reset(void); #define MACH_MAKE_RUNNABLE 0x6 /* make thread runnable */ #define MACH_PROMOTE 0x7 /* promoted due to resource (replaced by MACH_PROMOTED) */ #define MACH_DEMOTE 0x8 /* promotion undone (replaced by MACH_UNPROMOTED) */ -#define MACH_IDLE 0x9 /* processor idling */ -#define MACH_STACK_DEPTH 0xa /* stack depth at switch */ -#define MACH_MOVED 0xb /* did not use original scheduling decision */ +#define MACH_IDLE 0x9 /* processor idling */ +#define MACH_STACK_DEPTH 0xa /* stack depth at switch */ +#define MACH_MOVED 0xb /* did not use original scheduling decision */ #define MACH_PSET_LOAD_AVERAGE 0xc #define MACH_AMP_DEBUG 0xd -#define MACH_FAILSAFE 0xe /* tripped fixed-pri/RT failsafe */ -#define MACH_BLOCK 0xf /* thread block */ -#define MACH_WAIT 0x10 /* thread wait assertion */ -#define MACH_GET_URGENCY 0x14 /* Urgency queried by platform */ -#define MACH_URGENCY 0x15 /* Urgency (RT/BG/NORMAL) communicated - * to platform - */ -#define MACH_REDISPATCH 0x16 /* "next thread" thread redispatched */ -#define MACH_REMOTE_AST 0x17 /* AST signal issued to remote processor */ -#define MACH_SCHED_CHOOSE_PROCESSOR 0x18 /* Result of choose_processor */ -#define MACH_DEEP_IDLE 0x19 /* deep idle on master processor */ +#define MACH_FAILSAFE 0xe /* tripped fixed-pri/RT failsafe */ +#define MACH_BLOCK 0xf /* thread block */ +#define MACH_WAIT 0x10 /* thread wait assertion */ +#define MACH_GET_URGENCY 0x14 /* Urgency queried by platform */ +#define MACH_URGENCY 0x15 /* Urgency (RT/BG/NORMAL) communicated + * to platform + */ +#define MACH_REDISPATCH 0x16 /* "next thread" thread redispatched */ +#define MACH_REMOTE_AST 0x17 /* AST signal issued to remote processor */ +#define MACH_SCHED_CHOOSE_PROCESSOR 0x18 /* Result of choose_processor */ +#define MACH_DEEP_IDLE 0x19 /* deep idle on master processor */ /* unused 0x1a was MACH_SCHED_DECAY_PRIORITY */ -#define MACH_CPU_THROTTLE_DISABLE 0x1b /* Global CPU Throttle Disable */ -#define MACH_RW_PROMOTE 0x1c /* promoted due to RW lock promotion */ -#define MACH_RW_DEMOTE 0x1d /* promotion due to RW lock undone */ +#define MACH_CPU_THROTTLE_DISABLE 0x1b /* Global CPU Throttle Disable */ +#define MACH_RW_PROMOTE 0x1c /* promoted due to RW lock promotion */ +#define MACH_RW_DEMOTE 0x1d /* promotion due to RW lock undone */ #define MACH_SCHED_MAINTENANCE 0x1f /* periodic maintenance thread */ #define MACH_DISPATCH 0x20 /* context switch completed */ #define MACH_QUANTUM_HANDOFF 0x21 /* quantum handoff occurred */ @@ -434,7 +443,7 @@ extern void kdebug_reset(void); #define MACH_REMOTE_DEFERRED_AST 0x25 /* Deferred AST started against remote processor */ #define MACH_REMOTE_CANCEL_AST 0x26 /* Canceled deferred AST for remote processor */ #define MACH_SCHED_CHANGE_PRIORITY 0x27 /* thread sched priority changed */ -#define MACH_SCHED_UPDATE_REC_CORES 0x28 /* Change to recommended processor bitmask */ +#define MACH_SCHED_UPDATE_REC_CORES 0x28 /* Change to recommended processor bitmask */ #define MACH_STACK_WAIT 0x29 /* Thread could not be switched-to because of kernel stack shortage */ #define MACH_THREAD_BIND 0x2a /* Thread was bound (or unbound) to a processor */ #define MACH_WAITQ_PROMOTE 0x2b /* Thread promoted by waitq boost */ @@ -447,10 +456,10 @@ extern void kdebug_reset(void); #define MACH_AMP_SIGNAL_SPILL 0x32 /* AMP spill signal sent to cpuid */ #define MACH_AMP_STEAL 0x33 /* AMP thread stolen or spilled */ #define MACH_SCHED_LOAD_EFFECTIVE 0x34 /* Effective scheduler load */ -#define MACH_PROMOTED 0x35 /* thread promoted due to mutex priority promotion */ -#define MACH_UNPROMOTED 0x36 /* thread unpromoted due to mutex priority promotion */ -#define MACH_PROMOTED_UPDATE 0x37 /* thread already promoted, but promotion priority changed */ -#define MACH_QUIESCENT_COUNTER 0x38 /* quiescent counter tick */ +#define MACH_PROMOTED 0x35 /* thread promoted due to mutex priority promotion */ +#define MACH_UNPROMOTED 0x36 /* thread unpromoted due to mutex priority promotion */ +#define MACH_PROMOTED_UPDATE 0x37 /* thread already promoted, but promotion priority changed */ +#define MACH_QUIESCENT_COUNTER 0x38 /* quiescent counter tick */ /* Variants for MACH_MULTIQ_DEQUEUE */ #define MACH_MULTIQ_BOUND 1 @@ -463,26 +472,26 @@ extern void kdebug_reset(void); #define DBG_COW_FAULT 3 #define DBG_CACHE_HIT_FAULT 4 #define DBG_NZF_PAGE_FAULT 5 -#define DBG_GUARD_FAULT 6 +#define DBG_GUARD_FAULT 6 #define DBG_PAGEINV_FAULT 7 #define DBG_PAGEIND_FAULT 8 #define DBG_COMPRESSOR_FAULT 9 #define DBG_COMPRESSOR_SWAPIN_FAULT 10 /* Codes for IPC (DBG_MACH_IPC) */ -#define MACH_TASK_SUSPEND 0x0 /* Suspended a task */ -#define MACH_TASK_RESUME 0x1 /* Resumed a task */ -#define MACH_THREAD_SET_VOUCHER 0x2 -#define MACH_IPC_MSG_SEND 0x3 /* mach msg send, uniq msg info */ -#define MACH_IPC_MSG_RECV 0x4 /* mach_msg receive */ -#define MACH_IPC_MSG_RECV_VOUCHER_REFUSED 0x5 /* mach_msg receive, voucher refused */ -#define MACH_IPC_KMSG_FREE 0x6 /* kernel free of kmsg data */ -#define MACH_IPC_VOUCHER_CREATE 0x7 /* Voucher added to global voucher hashtable */ -#define MACH_IPC_VOUCHER_CREATE_ATTR_DATA 0x8 /* Attr data for newly created voucher */ -#define MACH_IPC_VOUCHER_DESTROY 0x9 /* Voucher removed from global voucher hashtable */ -#define MACH_IPC_KMSG_INFO 0xa /* Send/Receive info for a kmsg */ -#define MACH_IPC_KMSG_LINK 0xb /* link a kernel kmsg pointer to user mach_msg_header_t */ -#define MACH_IPC_PORT_ENTRY_MODIFY 0xc /* A port space gained or lost a port right (reference) */ +#define MACH_TASK_SUSPEND 0x0 /* Suspended a task */ +#define MACH_TASK_RESUME 0x1 /* Resumed a task */ +#define MACH_THREAD_SET_VOUCHER 0x2 +#define MACH_IPC_MSG_SEND 0x3 /* mach msg send, uniq msg info */ +#define MACH_IPC_MSG_RECV 0x4 /* mach_msg receive */ +#define MACH_IPC_MSG_RECV_VOUCHER_REFUSED 0x5 /* mach_msg receive, voucher refused */ +#define MACH_IPC_KMSG_FREE 0x6 /* kernel free of kmsg data */ +#define MACH_IPC_VOUCHER_CREATE 0x7 /* Voucher added to global voucher hashtable */ +#define MACH_IPC_VOUCHER_CREATE_ATTR_DATA 0x8 /* Attr data for newly created voucher */ +#define MACH_IPC_VOUCHER_DESTROY 0x9 /* Voucher removed from global voucher hashtable */ +#define MACH_IPC_KMSG_INFO 0xa /* Send/Receive info for a kmsg */ +#define MACH_IPC_KMSG_LINK 0xb /* link a kernel kmsg pointer to user mach_msg_header_t */ +#define MACH_IPC_PORT_ENTRY_MODIFY 0xc /* A port space gained or lost a port right (reference) */ /* Codes for thread groups (DBG_MACH_THREAD_GROUP) */ #define MACH_THREAD_GROUP_NEW 0x0 @@ -493,41 +502,48 @@ extern void kdebug_reset(void); #define MACH_THREAD_GROUP_FLAGS 0x5 /* Codes for coalitions (DBG_MACH_COALITION) */ -#define MACH_COALITION_NEW 0x0 -#define MACH_COALITION_FREE 0x1 -#define MACH_COALITION_ADOPT 0x2 -#define MACH_COALITION_REMOVE 0x3 -#define MACH_COALITION_THREAD_GROUP_SET 0x4 +#define MACH_COALITION_NEW 0x0 +#define MACH_COALITION_FREE 0x1 +#define MACH_COALITION_ADOPT 0x2 +#define MACH_COALITION_REMOVE 0x3 +#define MACH_COALITION_THREAD_GROUP_SET 0x4 /* Codes for pmap (DBG_MACH_PMAP) */ -#define PMAP__CREATE 0x0 -#define PMAP__DESTROY 0x1 -#define PMAP__PROTECT 0x2 -#define PMAP__PAGE_PROTECT 0x3 -#define PMAP__ENTER 0x4 -#define PMAP__REMOVE 0x5 -#define PMAP__NEST 0x6 -#define PMAP__UNNEST 0x7 -#define PMAP__FLUSH_TLBS 0x8 -#define PMAP__UPDATE_INTERRUPT 0x9 -#define PMAP__ATTRIBUTE_CLEAR 0xa -#define PMAP__REUSABLE 0xb /* This appears to be unused */ -#define PMAP__QUERY_RESIDENT 0xc -#define PMAP__FLUSH_KERN_TLBS 0xd -#define PMAP__FLUSH_DELAYED_TLBS 0xe -#define PMAP__FLUSH_TLBS_TO 0xf -#define PMAP__FLUSH_EPT 0x10 -#define PMAP__FAST_FAULT 0x11 -#define PMAP__SWITCH 0x12 -#define PMAP__TTE 0x13 -#define PMAP__SWITCH_USER_TTB 0x14 +#define PMAP__CREATE 0x0 +#define PMAP__DESTROY 0x1 +#define PMAP__PROTECT 0x2 +#define PMAP__PAGE_PROTECT 0x3 +#define PMAP__ENTER 0x4 +#define PMAP__REMOVE 0x5 +#define PMAP__NEST 0x6 +#define PMAP__UNNEST 0x7 +#define PMAP__FLUSH_TLBS 0x8 +#define PMAP__UPDATE_INTERRUPT 0x9 +#define PMAP__ATTRIBUTE_CLEAR 0xa +#define PMAP__REUSABLE 0xb /* This appears to be unused */ +#define PMAP__QUERY_RESIDENT 0xc +#define PMAP__FLUSH_KERN_TLBS 0xd +#define PMAP__FLUSH_DELAYED_TLBS 0xe +#define PMAP__FLUSH_TLBS_TO 0xf +#define PMAP__FLUSH_EPT 0x10 +#define PMAP__FAST_FAULT 0x11 +#define PMAP__SWITCH 0x12 +#define PMAP__TTE 0x13 +#define PMAP__SWITCH_USER_TTB 0x14 /* Codes for clock (DBG_MACH_CLOCK) */ -#define MACH_EPOCH_CHANGE 0x0 /* wake epoch change */ +#define MACH_EPOCH_CHANGE 0x0 /* wake epoch change */ +#define MACH_BRIDGE_RCV_TS 0x1 /* receive timestamp pair from interrupt handler */ +#define MACH_BRIDGE_REMOTE_TIME 0x2 /* calculate remote timestamp */ +#define MACH_BRIDGE_RESET_TS 0x3 /* reset timestamp conversion parameters */ +#define MACH_BRIDGE_TS_PARAMS 0x4 /* recompute timestamp conversion parameters */ +#define MACH_BRIDGE_SKIP_TS 0x5 /* skip timestamp */ +#define MACH_BRIDGE_TS_MISMATCH 0x6 /* mismatch between predicted and received remote timestamp */ +#define MACH_BRIDGE_OBSV_RATE 0x7 /* out of range observed rates */ /* Codes for Stackshot/Microstackshot (DBG_MACH_STACKSHOT) */ -#define MICROSTACKSHOT_RECORD 0x0 -#define MICROSTACKSHOT_GATHER 0x1 +#define MICROSTACKSHOT_RECORD 0x0 +#define MICROSTACKSHOT_GATHER 0x1 /* Codes for sysdiagnose (DBG_MACH_SYSDIAGNOSE) */ #define SYSDIAGNOSE_NOTIFY_USER 0x0 @@ -536,20 +552,20 @@ extern void kdebug_reset(void); #define SYSDIAGNOSE_TAILSPIN 0x3 /* Codes for Selective Forced Idle (DBG_MACH_SFI) */ -#define SFI_SET_WINDOW 0x0 -#define SFI_CANCEL_WINDOW 0x1 -#define SFI_SET_CLASS_OFFTIME 0x2 -#define SFI_CANCEL_CLASS_OFFTIME 0x3 -#define SFI_THREAD_DEFER 0x4 -#define SFI_OFF_TIMER 0x5 -#define SFI_ON_TIMER 0x6 -#define SFI_WAIT_CANCELED 0x7 -#define SFI_PID_SET_MANAGED 0x8 -#define SFI_PID_CLEAR_MANAGED 0x9 -#define SFI_GLOBAL_DEFER 0xa +#define SFI_SET_WINDOW 0x0 +#define SFI_CANCEL_WINDOW 0x1 +#define SFI_SET_CLASS_OFFTIME 0x2 +#define SFI_CANCEL_CLASS_OFFTIME 0x3 +#define SFI_THREAD_DEFER 0x4 +#define SFI_OFF_TIMER 0x5 +#define SFI_ON_TIMER 0x6 +#define SFI_WAIT_CANCELED 0x7 +#define SFI_PID_SET_MANAGED 0x8 +#define SFI_PID_CLEAR_MANAGED 0x9 +#define SFI_GLOBAL_DEFER 0xa /* Codes for Zone Allocator (DBG_MACH_ZALLOC) */ -#define ZALLOC_ZCRAM 0x0 +#define ZALLOC_ZCRAM 0x0 /* Codes for Mach resource management (DBG_MACH_RESOURCE) */ /* _K32A/B codes start at double the low nibble */ @@ -575,73 +591,73 @@ extern void kdebug_reset(void); #define RMON_DISABLE_IO_MONITOR 0x02f /* **** The Kernel Debug Sub Classes for Network (DBG_NETWORK) **** */ -#define DBG_NETIP 1 /* Internet Protocol */ -#define DBG_NETARP 2 /* Address Resolution Protocol */ -#define DBG_NETUDP 3 /* User Datagram Protocol */ -#define DBG_NETTCP 4 /* Transmission Control Protocol */ -#define DBG_NETICMP 5 /* Internet Control Message Protocol */ -#define DBG_NETIGMP 6 /* Internet Group Management Protocol */ -#define DBG_NETRIP 7 /* Routing Information Protocol */ -#define DBG_NETOSPF 8 /* Open Shortest Path First */ -#define DBG_NETISIS 9 /* Intermediate System to Intermediate System */ -#define DBG_NETSNMP 10 /* Simple Network Management Protocol */ -#define DBG_NETSOCK 11 /* Socket Layer */ +#define DBG_NETIP 1 /* Internet Protocol */ +#define DBG_NETARP 2 /* Address Resolution Protocol */ +#define DBG_NETUDP 3 /* User Datagram Protocol */ +#define DBG_NETTCP 4 /* Transmission Control Protocol */ +#define DBG_NETICMP 5 /* Internet Control Message Protocol */ +#define DBG_NETIGMP 6 /* Internet Group Management Protocol */ +#define DBG_NETRIP 7 /* Routing Information Protocol */ +#define DBG_NETOSPF 8 /* Open Shortest Path First */ +#define DBG_NETISIS 9 /* Intermediate System to Intermediate System */ +#define DBG_NETSNMP 10 /* Simple Network Management Protocol */ +#define DBG_NETSOCK 11 /* Socket Layer */ /* For Apple talk */ -#define DBG_NETAARP 100 /* Apple ARP */ -#define DBG_NETDDP 101 /* Datagram Delivery Protocol */ -#define DBG_NETNBP 102 /* Name Binding Protocol */ -#define DBG_NETZIP 103 /* Zone Information Protocol */ -#define DBG_NETADSP 104 /* Name Binding Protocol */ -#define DBG_NETATP 105 /* Apple Transaction Protocol */ -#define DBG_NETASP 106 /* Apple Session Protocol */ -#define DBG_NETAFP 107 /* Apple Filing Protocol */ -#define DBG_NETRTMP 108 /* Routing Table Maintenance Protocol */ -#define DBG_NETAURP 109 /* Apple Update Routing Protocol */ - -#define DBG_NETIPSEC 128 /* IPsec Protocol */ -#define DBG_NETVMNET 129 /* VMNet */ +#define DBG_NETAARP 100 /* Apple ARP */ +#define DBG_NETDDP 101 /* Datagram Delivery Protocol */ +#define DBG_NETNBP 102 /* Name Binding Protocol */ +#define DBG_NETZIP 103 /* Zone Information Protocol */ +#define DBG_NETADSP 104 /* Name Binding Protocol */ +#define DBG_NETATP 105 /* Apple Transaction Protocol */ +#define DBG_NETASP 106 /* Apple Session Protocol */ +#define DBG_NETAFP 107 /* Apple Filing Protocol */ +#define DBG_NETRTMP 108 /* Routing Table Maintenance Protocol */ +#define DBG_NETAURP 109 /* Apple Update Routing Protocol */ + +#define DBG_NETIPSEC 128 /* IPsec Protocol */ +#define DBG_NETVMNET 129 /* VMNet */ /* **** The Kernel Debug Sub Classes for IOKIT (DBG_IOKIT) **** */ -#define DBG_IOINTC 0 /* Interrupt controller */ -#define DBG_IOWORKLOOP 1 /* Work from work loop */ -#define DBG_IOINTES 2 /* Interrupt event source */ -#define DBG_IOCLKES 3 /* Clock event source */ -#define DBG_IOCMDQ 4 /* Command queue latencies */ -#define DBG_IOMCURS 5 /* Memory Cursor */ -#define DBG_IOMDESC 6 /* Memory Descriptors */ -#define DBG_IOPOWER 7 /* Power Managerment */ -#define DBG_IOSERVICE 8 /* Matching etc. */ -#define DBG_IOREGISTRY 9 /* Registry */ +#define DBG_IOINTC 0 /* Interrupt controller */ +#define DBG_IOWORKLOOP 1 /* Work from work loop */ +#define DBG_IOINTES 2 /* Interrupt event source */ +#define DBG_IOCLKES 3 /* Clock event source */ +#define DBG_IOCMDQ 4 /* Command queue latencies */ +#define DBG_IOMCURS 5 /* Memory Cursor */ +#define DBG_IOMDESC 6 /* Memory Descriptors */ +#define DBG_IOPOWER 7 /* Power Managerment */ +#define DBG_IOSERVICE 8 /* Matching etc. */ +#define DBG_IOREGISTRY 9 /* Registry */ /* **** 9-32 reserved for internal IOKit usage **** */ -#define DBG_IOSTORAGE 32 /* Storage layers */ -#define DBG_IONETWORK 33 /* Network layers */ -#define DBG_IOKEYBOARD 34 /* Keyboard */ -#define DBG_IOHID 35 /* HID Devices */ -#define DBG_IOAUDIO 36 /* Audio */ -#define DBG_IOSERIAL 37 /* Serial */ -#define DBG_IOTTY 38 /* TTY layers */ -#define DBG_IOSAM 39 /* SCSI Architecture Model layers */ -#define DBG_IOPARALLELATA 40 /* Parallel ATA */ -#define DBG_IOPARALLELSCSI 41 /* Parallel SCSI */ -#define DBG_IOSATA 42 /* Serial-ATA */ -#define DBG_IOSAS 43 /* SAS */ -#define DBG_IOFIBRECHANNEL 44 /* FiberChannel */ -#define DBG_IOUSB 45 /* USB */ -#define DBG_IOBLUETOOTH 46 /* Bluetooth */ -#define DBG_IOFIREWIRE 47 /* FireWire */ -#define DBG_IOINFINIBAND 48 /* Infiniband */ -#define DBG_IOCPUPM 49 /* CPU Power Management */ -#define DBG_IOGRAPHICS 50 /* Graphics */ -#define DBG_HIBERNATE 51 /* hibernation related events */ -#define DBG_IOTHUNDERBOLT 52 /* Thunderbolt */ -#define DBG_BOOTER 53 /* booter related events */ +#define DBG_IOSTORAGE 32 /* Storage layers */ +#define DBG_IONETWORK 33 /* Network layers */ +#define DBG_IOKEYBOARD 34 /* Keyboard */ +#define DBG_IOHID 35 /* HID Devices */ +#define DBG_IOAUDIO 36 /* Audio */ +#define DBG_IOSERIAL 37 /* Serial */ +#define DBG_IOTTY 38 /* TTY layers */ +#define DBG_IOSAM 39 /* SCSI Architecture Model layers */ +#define DBG_IOPARALLELATA 40 /* Parallel ATA */ +#define DBG_IOPARALLELSCSI 41 /* Parallel SCSI */ +#define DBG_IOSATA 42 /* Serial-ATA */ +#define DBG_IOSAS 43 /* SAS */ +#define DBG_IOFIBRECHANNEL 44 /* FiberChannel */ +#define DBG_IOUSB 45 /* USB */ +#define DBG_IOBLUETOOTH 46 /* Bluetooth */ +#define DBG_IOFIREWIRE 47 /* FireWire */ +#define DBG_IOINFINIBAND 48 /* Infiniband */ +#define DBG_IOCPUPM 49 /* CPU Power Management */ +#define DBG_IOGRAPHICS 50 /* Graphics */ +#define DBG_HIBERNATE 51 /* hibernation related events */ +#define DBG_IOTHUNDERBOLT 52 /* Thunderbolt */ +#define DBG_BOOTER 53 /* booter related events */ /* Backwards compatibility */ -#define DBG_IOPOINTING DBG_IOHID /* OBSOLETE: Use DBG_IOHID instead */ -#define DBG_IODISK DBG_IOSTORAGE /* OBSOLETE: Use DBG_IOSTORAGE instead */ +#define DBG_IOPOINTING DBG_IOHID /* OBSOLETE: Use DBG_IOHID instead */ +#define DBG_IODISK DBG_IOSTORAGE /* OBSOLETE: Use DBG_IOSTORAGE instead */ /* **** The Kernel Debug Sub Classes for Device Drivers (DBG_DRIVERS) **** */ #define DBG_DRVSTORAGE 1 /* Storage layers */ @@ -672,8 +688,8 @@ extern void kdebug_reset(void); #define DBG_DRVANE 27 /* ANE */ /* Backwards compatibility */ -#define DBG_DRVPOINTING DBG_DRVHID /* OBSOLETE: Use DBG_DRVHID instead */ -#define DBG_DRVDISK DBG_DRVSTORAGE /* OBSOLETE: Use DBG_DRVSTORAGE instead */ +#define DBG_DRVPOINTING DBG_DRVHID /* OBSOLETE: Use DBG_DRVHID instead */ +#define DBG_DRVDISK DBG_DRVSTORAGE /* OBSOLETE: Use DBG_DRVSTORAGE instead */ /* **** The Kernel Debug Sub Classes for the DLIL Layer (DBG_DLIL) **** */ #define DBG_DLIL_STATIC 1 /* Static DLIL code */ @@ -710,13 +726,13 @@ extern void kdebug_reset(void); * For Kernel Debug Sub Class DBG_HFS, state bits for hfs_update event */ #define DBG_HFS_UPDATE_ACCTIME 0x01 -#define DBG_HFS_UPDATE_MODTIME 0x02 -#define DBG_HFS_UPDATE_CHGTIME 0x04 -#define DBG_HFS_UPDATE_MODIFIED 0x08 +#define DBG_HFS_UPDATE_MODTIME 0x02 +#define DBG_HFS_UPDATE_CHGTIME 0x04 +#define DBG_HFS_UPDATE_MODIFIED 0x08 #define DBG_HFS_UPDATE_FORCE 0x10 #define DBG_HFS_UPDATE_DATEADDED 0x20 #define DBG_HFS_UPDATE_MINOR 0x40 -#define DBG_HFS_UPDATE_SKIPPED 0x80 +#define DBG_HFS_UPDATE_SKIPPED 0x80 /* The Kernel Debug Sub Classes for BSD */ #define DBG_BSD_PROC 0x01 /* process/signals related */ @@ -781,7 +797,7 @@ extern void kdebug_reset(void); /* The Kernel Debug Sub Classes for DBG_TRACE */ #define DBG_TRACE_DATA 0 #define DBG_TRACE_STRING 1 -#define DBG_TRACE_INFO 2 +#define DBG_TRACE_INFO 2 /* The Kernel Debug events: */ #define TRACE_DATA_NEWTHREAD (TRACEDBG_CODE(DBG_TRACE_DATA, 1)) @@ -802,7 +818,7 @@ extern void kdebug_reset(void); #define TRACE_RETROGRADE_EVENTS (TRACEDBG_CODE(DBG_TRACE_INFO, 5)) /* The Kernel Debug Sub Classes for DBG_CORESTORAGE */ -#define DBG_CS_IO 0 +#define DBG_CS_IO 0 /* The Kernel Debug Sub Classes for DBG_SECURITY */ #define DBG_SEC_KERNEL 0 /* raw entropy collected by the kernel */ @@ -842,16 +858,16 @@ extern void kdebug_reset(void); #define DBG_DYLD_UUID_SHARED_CACHE_32_C (14) /* The Kernel Debug modifiers for the DBG_DKRW sub class */ -#define DKIO_DONE 0x01 -#define DKIO_READ 0x02 -#define DKIO_ASYNC 0x04 -#define DKIO_META 0x08 -#define DKIO_PAGING 0x10 -#define DKIO_THROTTLE 0x20 /* Deprecated, still provided so fs_usage doesn't break */ -#define DKIO_PASSIVE 0x40 -#define DKIO_NOCACHE 0x80 -#define DKIO_TIER_MASK 0xF00 -#define DKIO_TIER_SHIFT 8 +#define DKIO_DONE 0x01 +#define DKIO_READ 0x02 +#define DKIO_ASYNC 0x04 +#define DKIO_META 0x08 +#define DKIO_PAGING 0x10 +#define DKIO_THROTTLE 0x20 /* Deprecated, still provided so fs_usage doesn't break */ +#define DKIO_PASSIVE 0x40 +#define DKIO_NOCACHE 0x80 +#define DKIO_TIER_MASK 0xF00 +#define DKIO_TIER_SHIFT 8 #define DKIO_TIER_UPGRADE 0x1000 /* Kernel Debug Sub Classes for Applications (DBG_APPS) */ @@ -863,14 +879,15 @@ extern void kdebug_reset(void); #define DBG_APP_UIKIT 0x0D #define DBG_APP_DFR 0x0E #define DBG_APP_LAYOUT 0x0F +#define DBG_APP_COREDATA 0x10 #define DBG_APP_SAMBA 0x80 #define DBG_APP_EOSSUPPORT 0x81 #define DBG_APP_MACEFIMANAGER 0x82 /* Kernel Debug codes for Throttling (DBG_THROTTLE) */ -#define OPEN_THROTTLE_WINDOW 0x1 -#define PROCESS_THROTTLED 0x2 -#define IO_THROTTLE_DISABLE 0x3 +#define OPEN_THROTTLE_WINDOW 0x1 +#define PROCESS_THROTTLED 0x2 +#define IO_THROTTLE_DISABLE 0x3 #define IO_TIER_UPL_MISMATCH 0x4 @@ -906,12 +923,12 @@ extern void kdebug_reset(void); #define IMP_UPDATE_TASK_CREATE 0x1 /* Codes for IMP_USYNCH_QOS_OVERRIDE */ -#define IMP_USYNCH_ADD_OVERRIDE 0x0 /* add override for a contended resource */ -#define IMP_USYNCH_REMOVE_OVERRIDE 0x1 /* remove override for a contended resource */ +#define IMP_USYNCH_ADD_OVERRIDE 0x0 /* add override for a contended resource */ +#define IMP_USYNCH_REMOVE_OVERRIDE 0x1 /* remove override for a contended resource */ /* Codes for IMP_DONOR_CHANGE */ -#define IMP_DONOR_UPDATE_LIVE_DONOR_STATE 0x0 -#define IMP_DONOR_INIT_DONOR_STATE 0x1 +#define IMP_DONOR_UPDATE_LIVE_DONOR_STATE 0x0 +#define IMP_DONOR_INIT_DONOR_STATE 0x1 /* Code for IMP_SYNC_IPC_QOS */ #define IMP_SYNC_IPC_QOS_APPLIED 0x0 @@ -946,42 +963,43 @@ extern void kdebug_reset(void); #define TURNSTILE_COMPLETE 0x2 /* Subclasses for MACH Bank Voucher Attribute Manager (DBG_BANK) */ -#define BANK_ACCOUNT_INFO 0x10 /* Trace points related to bank account struct */ -#define BANK_TASK_INFO 0x11 /* Trace points related to bank task struct */ +#define BANK_ACCOUNT_INFO 0x10 /* Trace points related to bank account struct */ +#define BANK_TASK_INFO 0x11 /* Trace points related to bank task struct */ /* Subclasses for MACH ATM Voucher Attribute Manager (ATM) */ -#define ATM_SUBAID_INFO 0x10 -#define ATM_GETVALUE_INFO 0x20 -#define ATM_UNREGISTER_INFO 0x30 +#define ATM_SUBAID_INFO 0x10 +#define ATM_GETVALUE_INFO 0x20 +#define ATM_UNREGISTER_INFO 0x30 /* Codes for BANK_ACCOUNT_INFO */ -#define BANK_SETTLE_CPU_TIME 0x1 /* Bank ledger(chit) rolled up to tasks. */ -#define BANK_SECURE_ORIGINATOR_CHANGED 0x2 /* Secure Originator changed. */ -#define BANK_SETTLE_ENERGY 0x3 /* Bank ledger(energy field) rolled up to tasks. */ +#define BANK_SETTLE_CPU_TIME 0x1 /* Bank ledger(chit) rolled up to tasks. */ +#define BANK_SECURE_ORIGINATOR_CHANGED 0x2 /* Secure Originator changed. */ +#define BANK_SETTLE_ENERGY 0x3 /* Bank ledger(energy field) rolled up to tasks. */ /* Codes for ATM_SUBAID_INFO */ -#define ATM_MIN_CALLED 0x1 -#define ATM_LINK_LIST_TRIM 0x2 +#define ATM_MIN_CALLED 0x1 +#define ATM_LINK_LIST_TRIM 0x2 /* Codes for ATM_GETVALUE_INFO */ -#define ATM_VALUE_REPLACED 0x1 -#define ATM_VALUE_ADDED 0x2 +#define ATM_VALUE_REPLACED 0x1 +#define ATM_VALUE_ADDED 0x2 /* Codes for ATM_UNREGISTER_INFO */ -#define ATM_VALUE_UNREGISTERED 0x1 -#define ATM_VALUE_DIFF_MAILBOX 0x2 +#define ATM_VALUE_UNREGISTERED 0x1 +#define ATM_VALUE_DIFF_MAILBOX 0x2 /* Kernel Debug Sub Classes for daemons (DBG_DAEMON) */ #define DBG_DAEMON_COREDUET 0x1 #define DBG_DAEMON_POWERD 0x2 /* Subclasses for the user space allocator */ -#define DBG_UMALLOC_EXTERNAL 0x1 -#define DBG_UMALLOC_INTERNAL 0x2 +#define DBG_UMALLOC_EXTERNAL 0x1 +#define DBG_UMALLOC_INTERNAL 0x2 + /**********************************************************************/ #define KDBG_MIGCODE(msgid) ((DBG_MIG << KDBG_CLASS_OFFSET) | \ - (((msgid) & 0x3fffff) << KDBG_CODE_OFFSET)) + (((msgid) & 0x3fffff) << KDBG_CODE_OFFSET)) #define MACHDBG_CODE(SubClass, code) KDBG_CODE(DBG_MACH, SubClass, code) #define NETDBG_CODE(SubClass, code) KDBG_CODE(DBG_NETWORK, SubClass, code) @@ -989,13 +1007,13 @@ extern void kdebug_reset(void); #define BSDDBG_CODE(SubClass, code) KDBG_CODE(DBG_BSD, SubClass, code) #define IOKDBG_CODE(SubClass, code) KDBG_CODE(DBG_IOKIT, SubClass, code) #define DRVDBG_CODE(SubClass, code) KDBG_CODE(DBG_DRIVERS, SubClass, code) -#define TRACEDBG_CODE(SubClass,code) KDBG_CODE(DBG_TRACE, SubClass, code) -#define MISCDBG_CODE(SubClass,code) KDBG_CODE(DBG_MISC, SubClass, code) -#define DLILDBG_CODE(SubClass,code) KDBG_CODE(DBG_DLIL, SubClass, code) -#define SECURITYDBG_CODE(SubClass,code) KDBG_CODE(DBG_SECURITY, SubClass, code) -#define DYLDDBG_CODE(SubClass,code) KDBG_CODE(DBG_DYLD, SubClass, code) -#define QTDBG_CODE(SubClass,code) KDBG_CODE(DBG_QT, SubClass, code) -#define APPSDBG_CODE(SubClass,code) KDBG_CODE(DBG_APPS, SubClass, code) +#define TRACEDBG_CODE(SubClass, code) KDBG_CODE(DBG_TRACE, SubClass, code) +#define MISCDBG_CODE(SubClass, code) KDBG_CODE(DBG_MISC, SubClass, code) +#define DLILDBG_CODE(SubClass, code) KDBG_CODE(DBG_DLIL, SubClass, code) +#define SECURITYDBG_CODE(SubClass, code) KDBG_CODE(DBG_SECURITY, SubClass, code) +#define DYLDDBG_CODE(SubClass, code) KDBG_CODE(DBG_DYLD, SubClass, code) +#define QTDBG_CODE(SubClass, code) KDBG_CODE(DBG_QT, SubClass, code) +#define APPSDBG_CODE(SubClass, code) KDBG_CODE(DBG_APPS, SubClass, code) #define ARIADNEDBG_CODE(SubClass, code) KDBG_CODE(DBG_ARIADNE, SubClass, code) #define DAEMONDBG_CODE(SubClass, code) KDBG_CODE(DBG_DAEMON, SubClass, code) #define CPUPM_CODE(code) IOKDBG_CODE(DBG_IOCPUPM, code) @@ -1065,7 +1083,7 @@ extern void kdebug_reset(void); * process filter would reject it. */ #define KDBG_RELEASE_NOPROCFILT(x, ...) \ - KDBG_(_RELEASE_NOPROCFILT, x, ## __VA_ARGS__, 4, 3, 2, 1, 0) + KDBG_(_RELEASE_NOPROCFILT, x, ## __VA_ARGS__, 4, 3, 2, 1, 0) /* * Traced on debug, development, and release kernels. @@ -1127,7 +1145,7 @@ extern unsigned int kdebug_enable; #define KDEBUG_LEVEL KDEBUG_LEVEL_NONE #elif IST_KDEBUG #define KDEBUG_LEVEL KDEBUG_LEVEL_IST - // currently configured for the iOS release kernel +// currently configured for the iOS release kernel #elif KDEBUG #define KDEBUG_LEVEL KDEBUG_LEVEL_FULL #else @@ -1153,10 +1171,10 @@ extern unsigned int kdebug_enable; #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) #define KERNEL_DEBUG_CONSTANT_FILTERED(x, a, b, c, d, ...) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ - kernel_debug_filtered((x), (uintptr_t)(a), (uintptr_t)(b), \ - (uintptr_t)(c), (uintptr_t)(d)); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ + kernel_debug_filtered((x), (uintptr_t)(a), (uintptr_t)(b), \ + (uintptr_t)(c), (uintptr_t)(d)); \ + } \ } while (0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ #define KERNEL_DEBUG_CONSTANT_FILTERED(type, x, a, b, c, d, ...) do {} while (0) @@ -1165,10 +1183,10 @@ extern unsigned int kdebug_enable; #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) #define KERNEL_DEBUG_CONSTANT_RELEASE_NOPROCFILT(x, a, b, c, d, ...) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ - kernel_debug_flags((x), (uintptr_t)(a), (uintptr_t)(b), \ - (uintptr_t)(c), (uintptr_t)(d), KDBG_FLAG_NOPROCFILT); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ + kernel_debug_flags((x), (uintptr_t)(a), (uintptr_t)(b), \ + (uintptr_t)(c), (uintptr_t)(d), KDBG_FLAG_NOPROCFILT); \ + } \ } while (0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ #define KERNEL_DEBUG_CONSTANT_RELEASE_NOPROCFILT(x, a, b, c, d, ...) \ @@ -1179,10 +1197,10 @@ extern unsigned int kdebug_enable; #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) #define KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ - kernel_debug((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ - (uintptr_t)(d),(uintptr_t)(e)); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ + kernel_debug((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ + (uintptr_t)(d),(uintptr_t)(e)); \ + } \ } while (0) /* @@ -1192,16 +1210,16 @@ extern unsigned int kdebug_enable; */ #define KERNEL_DEBUG_CONSTANT1(x, a, b, c, d, e) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ - kernel_debug1((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ - (uintptr_t)(d), (uintptr_t)(e)); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ + kernel_debug1((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ + (uintptr_t)(d), (uintptr_t)(e)); \ + } \ } while (0) #define KERNEL_DEBUG_EARLY(x, a, b, c, d) \ do { \ - kernel_debug_early((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ - (uintptr_t)(c), (uintptr_t)(d)); \ + kernel_debug_early((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ + (uintptr_t)(c), (uintptr_t)(d)); \ } while (0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ #define KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e) do {} while (0) @@ -1223,17 +1241,17 @@ extern unsigned int kdebug_enable; #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) #define KERNEL_DEBUG_CONSTANT_IST(type, x, a, b, c, d, e) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & (type))) { \ - kernel_debug((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ - (uintptr_t)(d), 0); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & (type))) { \ + kernel_debug((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ + (uintptr_t)(d), 0); \ + } \ } while (0) #define KERNEL_DEBUG_CONSTANT_IST1(x, a, b, c, d, e) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable)) { \ - kernel_debug1((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ - (uintptr_t)(d), (uintptr_t)(e)); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable)) { \ + kernel_debug1((x), (uintptr_t)(a), (uintptr_t)(b), (uintptr_t)(c), \ + (uintptr_t)(d), (uintptr_t)(e)); \ + } \ } while (0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ #define KERNEL_DEBUG_CONSTANT_IST(type, x, a, b, c, d, e) do {} while (0) @@ -1255,10 +1273,10 @@ extern unsigned int kdebug_enable; #define KERNEL_DEBUG(x, a, b, c, d, e) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ - kernel_debug((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ - (uintptr_t)(c), (uintptr_t)(d), (uintptr_t)(e)); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ + kernel_debug((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ + (uintptr_t)(c), (uintptr_t)(d), (uintptr_t)(e)); \ + } \ } while (0) /* @@ -1266,60 +1284,60 @@ extern unsigned int kdebug_enable; */ #define KERNEL_DEBUG1(x, a, b, c, d, e) \ do { \ - if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ - kernel_debug1((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ - (uintptr_t)(c), (uintptr_t)(d), (uintptr_t)(e)); \ - } \ + if (KDBG_IMPROBABLE(kdebug_enable & ~KDEBUG_ENABLE_PPT)) { \ + kernel_debug1((uint32_t)(x), (uintptr_t)(a), (uintptr_t)(b), \ + (uintptr_t)(c), (uintptr_t)(d), (uintptr_t)(e)); \ + } \ } while (0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_FULL) */ #define __kdebug_only __unused -#define KERNEL_DEBUG(x,a,b,c,d,e) do {} while (0) -#define KERNEL_DEBUG1(x,a,b,c,d,e) do {} while (0) +#define KERNEL_DEBUG(x, a, b, c, d, e) do {} while (0) +#define KERNEL_DEBUG1(x, a, b, c, d, e) do {} while (0) #endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_FULL) */ extern void kernel_debug( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uintptr_t arg5); + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, + uintptr_t arg5); extern void kernel_debug1( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uintptr_t arg5); + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, + uintptr_t arg5); #define KDBG_FLAG_FILTERED 0x01 #define KDBG_FLAG_NOPROCFILT 0x02 extern void kernel_debug_flags( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4, - uint64_t flags); + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4, + uint64_t flags); extern void kernel_debug_filtered( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4); + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4); extern void kernel_debug_early( - uint32_t debugid, - uintptr_t arg1, - uintptr_t arg2, - uintptr_t arg3, - uintptr_t arg4); + uint32_t debugid, + uintptr_t arg1, + uintptr_t arg2, + uintptr_t arg3, + uintptr_t arg4); /* * EnergyTracing macros. @@ -1330,53 +1348,53 @@ extern void kernel_debug_early( // could change in future to see if DBG_ENERGYTRACE is active #define ENTR_SHOULDTRACE kdebug_enable // encode logical EnergyTracing into 32/64 KDebug trace -#define ENTR_KDTRACE(component, opcode, lifespan, id, quality, value) \ -do { \ - uint32_t kdcode__; \ - uintptr_t highval__, lowval__, mask__ = 0xffffffff; \ - kdcode__ = KDBG_CODE(DBG_ENERGYTRACE,component,opcode)|(lifespan); \ - highval__ = ((value) >> 32) & mask__; \ - lowval__ = (value) & mask__; \ - ENTR_KDTRACEFUNC(kdcode__, id, quality, highval__, lowval__); \ +#define ENTR_KDTRACE(component, opcode, lifespan, id, quality, value) \ +do { \ + uint32_t kdcode__; \ + uintptr_t highval__, lowval__, mask__ = 0xffffffff; \ + kdcode__ = KDBG_CODE(DBG_ENERGYTRACE,component,opcode)|(lifespan); \ + highval__ = ((value) >> 32) & mask__; \ + lowval__ = (value) & mask__; \ + ENTR_KDTRACEFUNC(kdcode__, id, quality, highval__, lowval__); \ } while(0) /* - Trace the association of two existing activations. - - An association is traced as a modification to the parent activation. - In order to fit the sub-activation's component, activation code, and - activation ID into a kdebug tracepoint, the arguments that would hold - the value are left separate, and one stores the component and opcode - of the sub-activation, while the other stores the pointer-sized - activation ID. - - arg2 arg3 arg4 - +-----------------+ +~+----+----+--------+ +----------+ - |kEnTrModAssociate| | | | | | | | - +-----------------+ +~+----+----+--------+ +----------+ - 8-bits unused sub-activation ID - 8-bit sub-component - 16-bit sub-opcode - -*/ + * Trace the association of two existing activations. + * + * An association is traced as a modification to the parent activation. + * In order to fit the sub-activation's component, activation code, and + * activation ID into a kdebug tracepoint, the arguments that would hold + * the value are left separate, and one stores the component and opcode + * of the sub-activation, while the other stores the pointer-sized + * activation ID. + * + * arg2 arg3 arg4 + +-----------------+ +~+----+----+--------+ +----------+ + |kEnTrModAssociate| | | | | | | | + +-----------------+ +~+----+----+--------+ +----------+ + * 8-bits unused sub-activation ID + * 8-bit sub-component + * 16-bit sub-opcode + * + */ #define kEnTrModAssociate (1 << 28) -#define ENTR_KDASSOCIATE(par_comp, par_opcode, par_act_id, \ - sub_comp, sub_opcode, sub_act_id) \ -do { \ - unsigned sub_compcode = ((unsigned)sub_comp << 16) | sub_opcode; \ - ENTR_KDTRACEFUNC(KDBG_CODE(DBG_ENERGYTRACE,par_comp,par_opcode), \ - par_act_id, kEnTrModAssociate, sub_compcode, \ - sub_act_id); \ +#define ENTR_KDASSOCIATE(par_comp, par_opcode, par_act_id, \ + sub_comp, sub_opcode, sub_act_id) \ +do { \ + unsigned sub_compcode = ((unsigned)sub_comp << 16) | sub_opcode; \ + ENTR_KDTRACEFUNC(KDBG_CODE(DBG_ENERGYTRACE,par_comp,par_opcode), \ + par_act_id, kEnTrModAssociate, sub_compcode, \ + sub_act_id); \ } while(0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ #define ENTR_SHOULDTRACE FALSE -#define ENTR_KDTRACE(component, opcode, lifespan, id, quality, value) \ - do {} while (0) -#define ENTR_KDASSOCIATE(par_comp, par_opcode, par_act_id, \ - sub_comp, sub_opcode, sub_act_id) \ - do {} while (0) +#define ENTR_KDTRACE(component, opcode, lifespan, id, quality, value) \ + do {} while (0) +#define ENTR_KDASSOCIATE(par_comp, par_opcode, par_act_id, \ + sub_comp, sub_opcode, sub_act_id) \ + do {} while (0) #endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ @@ -1407,51 +1425,51 @@ extern void kernel_debug_disable(void); #define KDEBUG_COMMPAGE_ENABLE_TYPEFILTER 0x2 /* Forced to false if ENABLE_TRACE is 0 */ // for EnergyTracing user space & clients -#define kEnTrCompKernel 2 +#define kEnTrCompKernel 2 /* - EnergyTracing opcodes - - Activations use DBG_FUNC_START/END. - Events are DBG_FUNC_NONE. + * EnergyTracing opcodes + * + * Activations use DBG_FUNC_START/END. + * Events are DBG_FUNC_NONE. */ /* Socket reads and writes are uniquely identified by the (sanitized) - pointer to the socket struct in question. To associate this address - with the user space file descriptor, we have a socket activation with - the FD as its identifier and the socket struct pointer as its value. -*/ -#define kEnTrActKernSocket 1 -#define kEnTrActKernSockRead 2 -#define kEnTrActKernSockWrite 3 - -#define kEnTrActKernPoll 10 -#define kEnTrActKernSelect 11 -#define kEnTrActKernKQWait 12 + * pointer to the socket struct in question. To associate this address + * with the user space file descriptor, we have a socket activation with + * the FD as its identifier and the socket struct pointer as its value. + */ +#define kEnTrActKernSocket 1 +#define kEnTrActKernSockRead 2 +#define kEnTrActKernSockWrite 3 + +#define kEnTrActKernPoll 10 +#define kEnTrActKernSelect 11 +#define kEnTrActKernKQWait 12 // events -#define kEnTrEvUnblocked 256 +#define kEnTrEvUnblocked 256 // EnergyTracing flags (the low-order 16 bits of 'quality') -#define kEnTrFlagNonBlocking 1 << 0 -#define kEnTrFlagNoWork 1 << 1 +#define kEnTrFlagNonBlocking 1 << 0 +#define kEnTrFlagNoWork 1 << 1 // and now the internal mechanism #ifdef KERNEL_PRIVATE // 20452597 requests that the trace macros not take an argument it throws away -#define KERNEL_DBG_IST_SANE(x, a, b, c, d) \ - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, x, a, b, c, d, \ - 0 /*__unused in kernel_debug()*/) +#define KERNEL_DBG_IST_SANE(x, a, b, c, d) \ + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, x, a, b, c, d, \ + 0 /*__unused in kernel_debug()*/ ) #define ENTR_KDTRACEFUNC KERNEL_DBG_IST_SANE // value is int64_t, quality is uint32_t #define KERNEL_ENERGYTRACE(opcode, lifespan, id, quality, value) \ - ENTR_KDTRACE(kEnTrCompKernel, opcode, lifespan, id, \ - quality, value) + ENTR_KDTRACE(kEnTrCompKernel, opcode, lifespan, id, \ + quality, value) #define KERNEL_ENTR_ASSOCIATE(par_opcode, par_act_id, sub_opcode, sub_act_id) \ - ENTR_KDASSOCIATE(kEnTrCompKernel, par_opcode, par_act_id, \ - kEnTrCompKernel, sub_opcode, sub_act_id) + ENTR_KDASSOCIATE(kEnTrCompKernel, par_opcode, par_act_id, \ + kEnTrCompKernel, sub_opcode, sub_act_id) // end EnergyTracing @@ -1482,10 +1500,10 @@ uint32_t kdebug_commpage_state(void); #define KDBG_VFS_LOOKUP_FLAG_LOOKUP 0x01 #define KDBG_VFS_LOOKUP_FLAG_NOPROCFILT 0x02 void kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, - uint32_t flags); + uint32_t flags); void kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, - boolean_t lookup); + boolean_t lookup); void kdbg_trace_data(struct proc *proc, long *arg_pid, long *arg_uniqueid); @@ -1494,10 +1512,10 @@ void kdbg_trace_string(struct proc *proc, long *arg1, long *arg2, long *arg3, lo void kdbg_dump_trace_to_file(const char *); void kdebug_init(unsigned int n_events, char *filterdesc, boolean_t wrapping); void kdebug_trace_start(unsigned int n_events, const char *filterdesc, - boolean_t wrapping, boolean_t at_wake); + boolean_t wrapping, boolean_t at_wake); void kdebug_free_early_buf(void); struct task; -void release_storage_unit(int cpu, uint32_t storage_unit); +void release_storage_unit(int cpu, uint32_t storage_unit); int allocate_storage_unit(int cpu); #define KDBG_CLASS_ENCODE(Class, SubClass) KDBG_EVENTID(Class, SubClass, 0) @@ -1542,7 +1560,7 @@ typedef struct { } kd_buf; #if defined(__LP64__) || defined(__arm64__) -#define KDBG_TIMESTAMP_MASK 0xffffffffffffffffULL +#define KDBG_TIMESTAMP_MASK 0xffffffffffffffffULL static inline void kdbg_set_cpu(kd_buf *kp, int cpu) { @@ -1577,7 +1595,7 @@ static inline void kdbg_set_cpu(kd_buf *kp, int cpu) { kp->timestamp = (kp->timestamp & KDBG_TIMESTAMP_MASK) | - (((uint64_t) cpu) << KDBG_CPU_SHIFT); + (((uint64_t) cpu) << KDBG_CPU_SHIFT); } static inline int kdbg_get_cpu(kd_buf *kp) @@ -1598,7 +1616,7 @@ static inline void kdbg_set_timestamp_and_cpu(kd_buf *kp, uint64_t thetime, int cpu) { kp->timestamp = (thetime & KDBG_TIMESTAMP_MASK) | - (((uint64_t) cpu) << KDBG_CPU_SHIFT); + (((uint64_t) cpu) << KDBG_CPU_SHIFT); } #endif @@ -1687,7 +1705,7 @@ typedef struct { } kd_cpumap_header; /* cpumap flags */ -#define KDBG_CPUMAP_IS_IOP 0x1 +#define KDBG_CPUMAP_IS_IOP 0x1 typedef struct { uint32_t cpu_id; @@ -1759,9 +1777,9 @@ typedef struct { // that identifies the file as a version 3 trace file. The header payload is // a set of fixed fields followed by a variable number of sub-chunks: /* - ____________________________________________________________________________ + * ____________________________________________________________________________ | Offset | Size | Field | - ---------------------------------------------------------------------------- + | ---------------------------------------------------------------------------- | 0 | 4 | Tag (0x00001000) | | 4 | 4 | Sub-tag. Represents the version of the header. | | 8 | 8 | Length of header payload (40+8x) | @@ -1777,8 +1795,8 @@ typedef struct { | | | as 0 and ignored when reading. | | 56 | 8x | Variable number of sub-chunks. None are required. | | | | Ignore unknown chunks. | - ---------------------------------------------------------------------------- -*/ + | ---------------------------------------------------------------------------- + */ // NOTE: The header sub-chunks are considered part of the header chunk, // so they must be included in the header chunk’s length field. // The CPU map is an optional sub-chunk of the header chunk. It provides @@ -1803,16 +1821,16 @@ typedef struct { uint64_t length; } __attribute__((packed)) kd_chunk_header_v3; -#define RAW_VERSION0 0x55aa0000 -#define RAW_VERSION1 0x55aa0101 +#define RAW_VERSION0 0x55aa0000 +#define RAW_VERSION1 0x55aa0101 #define RAW_VERSION2 0x55aa0200 /* Only used by kperf and Instruments */ #define RAW_VERSION3 0x00001000 -#define V3_CONFIG 0x00001b00 -#define V3_CPU_MAP 0x00001c00 -#define V3_THREAD_MAP 0x00001d00 -#define V3_RAW_EVENTS 0x00001e00 -#define V3_NULL_CHUNK 0x00002000 +#define V3_CONFIG 0x00001b00 +#define V3_CPU_MAP 0x00001c00 +#define V3_THREAD_MAP 0x00001d00 +#define V3_RAW_EVENTS 0x00001e00 +#define V3_NULL_CHUNK 0x00002000 // The current version of all kernel managed chunks is 1. The // V3_CURRENT_CHUNK_VERSION is added to ease the simple case @@ -1829,8 +1847,8 @@ int kdbg_write_v3_chunk_header_to_buffer(void *buffer, uint32_t tag, uint32_t su int kdbg_write_v3_chunk_to_fd(uint32_t tag, uint32_t sub_tag, uint64_t length, void *payload, uint64_t payload_size, int fd); /* VFS lookup events for serial traces */ -#define VFS_LOOKUP (FSDBG_CODE(DBG_FSRW,36)) -#define VFS_LOOKUP_DONE (FSDBG_CODE(DBG_FSRW,39)) +#define VFS_LOOKUP (FSDBG_CODE(DBG_FSRW,36)) +#define VFS_LOOKUP_DONE (FSDBG_CODE(DBG_FSRW,39)) #if !CONFIG_EMBEDDED #if defined(XNU_KERNEL_PRIVATE) && (DEVELOPMENT || DEBUG) diff --git a/bsd/sys/kdebug_signpost.h b/bsd/sys/kdebug_signpost.h index e4332be3b..7db2d075f 100644 --- a/bsd/sys/kdebug_signpost.h +++ b/bsd/sys/kdebug_signpost.h @@ -53,20 +53,20 @@ __BEGIN_DECLS /* * When is NONE, use kdebug_signpost. */ -int kdebug_signpost(uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); +int kdebug_signpost(uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); /* * When is START, use kdebug_signpost_start. */ int kdebug_signpost_start(uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); /* * When is END, use kdebug_signpost_end. */ int kdebug_signpost_end(uint32_t code, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) - __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0); #endif /* !KERNEL */ diff --git a/bsd/sys/kern_control.h b/bsd/sys/kern_control.h index 51324ea65..3fac13c86 100644 --- a/bsd/sys/kern_control.h +++ b/bsd/sys/kern_control.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004, 2012-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kern_control.h - This header defines an API to communicate between a kernel - extension and a process outside of the kernel. + * @header kern_control.h + * This header defines an API to communicate between a kernel + * extension and a process outside of the kernel. */ #ifndef KPI_KERN_CONTROL_H @@ -46,35 +46,35 @@ */ /*! - @defined KEV_CTL_SUBCLASS - @discussion The kernel event subclass for kernel control events. -*/ + * @defined KEV_CTL_SUBCLASS + * @discussion The kernel event subclass for kernel control events. + */ #define KEV_CTL_SUBCLASS 2 /*! - @defined KEV_CTL_REGISTERED - @discussion The event code indicating a new controller was - registered. The data portion will contain a ctl_event_data. -*/ + * @defined KEV_CTL_REGISTERED + * @discussion The event code indicating a new controller was + * registered. The data portion will contain a ctl_event_data. + */ #define KEV_CTL_REGISTERED 1 /* a new controller appears */ /*! - @defined KEV_CTL_DEREGISTERED - @discussion The event code indicating a controller was unregistered. - The data portion will contain a ctl_event_data. -*/ + * @defined KEV_CTL_DEREGISTERED + * @discussion The event code indicating a controller was unregistered. + * The data portion will contain a ctl_event_data. + */ #define KEV_CTL_DEREGISTERED 2 /* a controller disappears */ /*! - @struct ctl_event_data - @discussion This structure is used for KEV_CTL_SUBCLASS kernel - events. - @field ctl_id The kernel control id. - @field ctl_unit The kernel control unit. -*/ + * @struct ctl_event_data + * @discussion This structure is used for KEV_CTL_SUBCLASS kernel + * events. + * @field ctl_id The kernel control id. + * @field ctl_unit The kernel control unit. + */ struct ctl_event_data { - u_int32_t ctl_id; /* Kernel Controller ID */ - u_int32_t ctl_unit; + u_int32_t ctl_id; /* Kernel Controller ID */ + u_int32_t ctl_unit; }; /* @@ -82,118 +82,118 @@ struct ctl_event_data { */ /*! - @defined CTLIOCGCOUNT - @discussion The CTLIOCGCOUNT ioctl can be used to determine the - number of kernel controllers registered. -*/ -#define CTLIOCGCOUNT _IOR('N', 2, int) /* get number of control structures registered */ + * @defined CTLIOCGCOUNT + * @discussion The CTLIOCGCOUNT ioctl can be used to determine the + * number of kernel controllers registered. + */ +#define CTLIOCGCOUNT _IOR('N', 2, int) /* get number of control structures registered */ /*! - @defined CTLIOCGINFO - @discussion The CTLIOCGINFO ioctl can be used to convert a kernel - control name to a kernel control id. -*/ -#define CTLIOCGINFO _IOWR('N', 3, struct ctl_info) /* get id from name */ + * @defined CTLIOCGINFO + * @discussion The CTLIOCGINFO ioctl can be used to convert a kernel + * control name to a kernel control id. + */ +#define CTLIOCGINFO _IOWR('N', 3, struct ctl_info) /* get id from name */ /*! - @defined MAX_KCTL_NAME - @discussion Kernel control names must be no longer than - MAX_KCTL_NAME. -*/ -#define MAX_KCTL_NAME 96 + * @defined MAX_KCTL_NAME + * @discussion Kernel control names must be no longer than + * MAX_KCTL_NAME. + */ +#define MAX_KCTL_NAME 96 /* * Controls destined to the Controller Manager. */ /*! - @struct ctl_info - @discussion This structure is used with the CTLIOCGINFO ioctl to - translate from a kernel control name to a control id. - @field ctl_id The kernel control id, filled out upon return. - @field ctl_name The kernel control name to find. -*/ + * @struct ctl_info + * @discussion This structure is used with the CTLIOCGINFO ioctl to + * translate from a kernel control name to a control id. + * @field ctl_id The kernel control id, filled out upon return. + * @field ctl_name The kernel control name to find. + */ struct ctl_info { - u_int32_t ctl_id; /* Kernel Controller ID */ - char ctl_name[MAX_KCTL_NAME]; /* Kernel Controller Name (a C string) */ + u_int32_t ctl_id; /* Kernel Controller ID */ + char ctl_name[MAX_KCTL_NAME]; /* Kernel Controller Name (a C string) */ }; /*! - @struct sockaddr_ctl - @discussion The controller address structure is used to establish - contact between a user client and a kernel controller. The - sc_id/sc_unit uniquely identify each controller. sc_id is a - unique identifier assigned to the controller. The identifier can - be assigned by the system at registration time or be a 32-bit - creator code obtained from Apple Computer. sc_unit is a unit - number for this sc_id, and is privately used by the kernel - controller to identify several instances of the controller. - @field sc_len The length of the structure. - @field sc_family AF_SYSTEM. - @field ss_sysaddr AF_SYS_KERNCONTROL. - @field sc_id Controller unique identifier. - @field sc_unit Kernel controller private unit number. - @field sc_reserved Reserved, must be set to zero. -*/ + * @struct sockaddr_ctl + * @discussion The controller address structure is used to establish + * contact between a user client and a kernel controller. The + * sc_id/sc_unit uniquely identify each controller. sc_id is a + * unique identifier assigned to the controller. The identifier can + * be assigned by the system at registration time or be a 32-bit + * creator code obtained from Apple Computer. sc_unit is a unit + * number for this sc_id, and is privately used by the kernel + * controller to identify several instances of the controller. + * @field sc_len The length of the structure. + * @field sc_family AF_SYSTEM. + * @field ss_sysaddr AF_SYS_KERNCONTROL. + * @field sc_id Controller unique identifier. + * @field sc_unit Kernel controller private unit number. + * @field sc_reserved Reserved, must be set to zero. + */ struct sockaddr_ctl { - u_char sc_len; /* depends on size of bundle ID string */ - u_char sc_family; /* AF_SYSTEM */ - u_int16_t ss_sysaddr; /* AF_SYS_KERNCONTROL */ - u_int32_t sc_id; /* Controller unique identifier */ - u_int32_t sc_unit; /* Developer private unit number */ - u_int32_t sc_reserved[5]; + u_char sc_len; /* depends on size of bundle ID string */ + u_char sc_family; /* AF_SYSTEM */ + u_int16_t ss_sysaddr; /* AF_SYS_KERNCONTROL */ + u_int32_t sc_id; /* Controller unique identifier */ + u_int32_t sc_unit; /* Developer private unit number */ + u_int32_t sc_reserved[5]; }; #ifdef PRIVATE struct xkctl_reg { - u_int32_t xkr_len; - u_int32_t xkr_kind; - u_int32_t xkr_id; - u_int32_t xkr_reg_unit; - u_int32_t xkr_flags; - u_int64_t xkr_kctlref; - u_int32_t xkr_recvbufsize; - u_int32_t xkr_sendbufsize; - u_int32_t xkr_lastunit; - u_int32_t xkr_pcbcount; - u_int64_t xkr_connect; - u_int64_t xkr_disconnect; - u_int64_t xkr_send; - u_int64_t xkr_send_list; - u_int64_t xkr_setopt; - u_int64_t xkr_getopt; - u_int64_t xkr_rcvd; - char xkr_name[MAX_KCTL_NAME]; + u_int32_t xkr_len; + u_int32_t xkr_kind; + u_int32_t xkr_id; + u_int32_t xkr_reg_unit; + u_int32_t xkr_flags; + u_int64_t xkr_kctlref; + u_int32_t xkr_recvbufsize; + u_int32_t xkr_sendbufsize; + u_int32_t xkr_lastunit; + u_int32_t xkr_pcbcount; + u_int64_t xkr_connect; + u_int64_t xkr_disconnect; + u_int64_t xkr_send; + u_int64_t xkr_send_list; + u_int64_t xkr_setopt; + u_int64_t xkr_getopt; + u_int64_t xkr_rcvd; + char xkr_name[MAX_KCTL_NAME]; }; struct xkctlpcb { - u_int32_t xkp_len; - u_int32_t xkp_kind; - u_int64_t xkp_kctpcb; - u_int32_t xkp_unit; - u_int32_t xkp_kctlid; - u_int64_t xkp_kctlref; - char xkp_kctlname[MAX_KCTL_NAME]; + u_int32_t xkp_len; + u_int32_t xkp_kind; + u_int64_t xkp_kctpcb; + u_int32_t xkp_unit; + u_int32_t xkp_kctlid; + u_int64_t xkp_kctlref; + char xkp_kctlname[MAX_KCTL_NAME]; }; struct kctlstat { - u_int64_t kcs_reg_total __attribute__((aligned(8))); - u_int64_t kcs_reg_count __attribute__((aligned(8))); - u_int64_t kcs_pcbcount __attribute__((aligned(8))); - u_int64_t kcs_gencnt __attribute__((aligned(8))); - u_int64_t kcs_connections __attribute__((aligned(8))); - u_int64_t kcs_conn_fail __attribute__((aligned(8))); - u_int64_t kcs_send_fail __attribute__((aligned(8))); - u_int64_t kcs_send_list_fail __attribute__((aligned(8))); - u_int64_t kcs_enqueue_fail __attribute__((aligned(8))); - u_int64_t kcs_enqueue_fullsock __attribute__((aligned(8))); - u_int64_t kcs_bad_kctlref __attribute__((aligned(8))); - u_int64_t kcs_tbl_size_too_big __attribute__((aligned(8))); - u_int64_t kcs_enqdata_mb_alloc_fail __attribute__((aligned(8))); - u_int64_t kcs_enqdata_sbappend_fail __attribute__((aligned(8))); + u_int64_t kcs_reg_total __attribute__((aligned(8))); + u_int64_t kcs_reg_count __attribute__((aligned(8))); + u_int64_t kcs_pcbcount __attribute__((aligned(8))); + u_int64_t kcs_gencnt __attribute__((aligned(8))); + u_int64_t kcs_connections __attribute__((aligned(8))); + u_int64_t kcs_conn_fail __attribute__((aligned(8))); + u_int64_t kcs_send_fail __attribute__((aligned(8))); + u_int64_t kcs_send_list_fail __attribute__((aligned(8))); + u_int64_t kcs_enqueue_fail __attribute__((aligned(8))); + u_int64_t kcs_enqueue_fullsock __attribute__((aligned(8))); + u_int64_t kcs_bad_kctlref __attribute__((aligned(8))); + u_int64_t kcs_tbl_size_too_big __attribute__((aligned(8))); + u_int64_t kcs_enqdata_mb_alloc_fail __attribute__((aligned(8))); + u_int64_t kcs_enqdata_sbappend_fail __attribute__((aligned(8))); }; #endif /* PRIVATE */ @@ -203,404 +203,403 @@ struct kctlstat { #include /*! - @typedef kern_ctl_ref - @discussion A control reference is used to track an attached kernel - control. Registering a kernel control will create a kernel - control reference. This reference is required for sending data - or removing the kernel control. This reference will be passed to - callbacks for that kernel control. -*/ + * @typedef kern_ctl_ref + * @discussion A control reference is used to track an attached kernel + * control. Registering a kernel control will create a kernel + * control reference. This reference is required for sending data + * or removing the kernel control. This reference will be passed to + * callbacks for that kernel control. + */ typedef void * kern_ctl_ref; /*! - @defined CTL_FLAG_PRIVILEGED - @discussion The CTL_FLAG_PRIVILEGED flag is passed in ctl_flags. If - this flag is set, only privileged processes may attach to this - kernel control. -*/ -#define CTL_FLAG_PRIVILEGED 0x1 -/*! - @defined CTL_FLAG_REG_ID_UNIT - @discussion The CTL_FLAG_REG_ID_UNIT flag is passed to indicate that - the ctl_id specified should be used. If this flag is not - present, a unique ctl_id will be dynamically assigned to your - kernel control. The CTLIOCGINFO ioctl can be used by the client - to find the dynamically assigned id based on the control name - specified in ctl_name. -*/ -#define CTL_FLAG_REG_ID_UNIT 0x2 -/*! - @defined CTL_FLAG_REG_SOCK_STREAM - @discussion Use the CTL_FLAG_REG_SOCK_STREAM flag when client need to open - socket of type SOCK_STREAM to communicate with the kernel control. - By default kernel control sockets are of type SOCK_DGRAM. -*/ -#define CTL_FLAG_REG_SOCK_STREAM 0x4 + * @defined CTL_FLAG_PRIVILEGED + * @discussion The CTL_FLAG_PRIVILEGED flag is passed in ctl_flags. If + * this flag is set, only privileged processes may attach to this + * kernel control. + */ +#define CTL_FLAG_PRIVILEGED 0x1 +/*! + * @defined CTL_FLAG_REG_ID_UNIT + * @discussion The CTL_FLAG_REG_ID_UNIT flag is passed to indicate that + * the ctl_id specified should be used. If this flag is not + * present, a unique ctl_id will be dynamically assigned to your + * kernel control. The CTLIOCGINFO ioctl can be used by the client + * to find the dynamically assigned id based on the control name + * specified in ctl_name. + */ +#define CTL_FLAG_REG_ID_UNIT 0x2 +/*! + * @defined CTL_FLAG_REG_SOCK_STREAM + * @discussion Use the CTL_FLAG_REG_SOCK_STREAM flag when client need to open + * socket of type SOCK_STREAM to communicate with the kernel control. + * By default kernel control sockets are of type SOCK_DGRAM. + */ +#define CTL_FLAG_REG_SOCK_STREAM 0x4 #ifdef KERNEL_PRIVATE /*! - @defined CTL_FLAG_REG_EXTENDED - @discussion This flag indicates that this kernel control utilizes the - the extended fields within the kern_ctl_reg structure. -*/ -#define CTL_FLAG_REG_EXTENDED 0x8 + * @defined CTL_FLAG_REG_EXTENDED + * @discussion This flag indicates that this kernel control utilizes the + * the extended fields within the kern_ctl_reg structure. + */ +#define CTL_FLAG_REG_EXTENDED 0x8 /*! - @defined CTL_FLAG_REG_CRIT - @discussion This flag indicates that this kernel control utilizes the - the extended fields within the kern_ctl_reg structure. -*/ -#define CTL_FLAG_REG_CRIT 0x10 + * @defined CTL_FLAG_REG_CRIT + * @discussion This flag indicates that this kernel control utilizes the + * the extended fields within the kern_ctl_reg structure. + */ +#define CTL_FLAG_REG_CRIT 0x10 #endif /* KERNEL_PRIVATE */ /* Data flags for controllers */ /*! - @defined CTL_DATA_NOWAKEUP - @discussion The CTL_DATA_NOWAKEUP flag can be used for the enqueue - data and enqueue mbuf functions to indicate that the process - should not be woken up yet. This is useful when you want to - enqueue data using more than one call but only want to wake up - the client after all of the data has been enqueued. -*/ -#define CTL_DATA_NOWAKEUP 0x1 + * @defined CTL_DATA_NOWAKEUP + * @discussion The CTL_DATA_NOWAKEUP flag can be used for the enqueue + * data and enqueue mbuf functions to indicate that the process + * should not be woken up yet. This is useful when you want to + * enqueue data using more than one call but only want to wake up + * the client after all of the data has been enqueued. + */ +#define CTL_DATA_NOWAKEUP 0x1 /*! - @defined CTL_DATA_EOR - @discussion The CTL_DATA_EOR flag can be used for the enqueue - data and enqueue mbuf functions to mark the end of a record. -*/ -#define CTL_DATA_EOR 0x2 + * @defined CTL_DATA_EOR + * @discussion The CTL_DATA_EOR flag can be used for the enqueue + * data and enqueue mbuf functions to mark the end of a record. + */ +#define CTL_DATA_EOR 0x2 #ifdef KERNEL_PRIVATE /*! - @defined CTL_DATA_CRIT - @discussion This flag indicates the data is critical to the client - and that it needs to be forced into the socket buffer - by resizing it if needed. -*/ -#define CTL_DATA_CRIT 0x4 + * @defined CTL_DATA_CRIT + * @discussion This flag indicates the data is critical to the client + * and that it needs to be forced into the socket buffer + * by resizing it if needed. + */ +#define CTL_DATA_CRIT 0x4 #endif /* KERNEL_PRIVATE */ __BEGIN_DECLS /*! - @typedef ctl_connect_func - @discussion The ctl_connect_func is used to receive - notification of a client connecting to the kernel control. - @param kctlref The control ref for the kernel control the client is - connecting to. - @param sac The address used to connect to this control. The field sc_unit - contains the unit number of the kernel control instance the client is - connecting to. If CTL_FLAG_REG_ID_UNIT was set when the kernel control - was registered, sc_unit is the ctl_unit of the kern_ctl_reg structure. - If CTL_FLAG_REG_ID_UNIT was not set when the kernel control was - registered, sc_unit is the dynamically allocated unit number of - the new kernel control instance that is used for this connection. - @param unitinfo A placeholder for a pointer to the optional user-defined - private data associated with this kernel control instance. This - opaque info will be provided to the user when the rest of the - callback routines are executed. For example, it can be used - to pass a pointer to an instance-specific data structure in - order for the user to keep track of the states related to this - kernel control instance. - */ -typedef errno_t (*ctl_connect_func)(kern_ctl_ref kctlref, - struct sockaddr_ctl *sac, - void **unitinfo); - -/*! - @typedef ctl_disconnect_func - @discussion The ctl_disconnect_func is used to receive notification - that a client has disconnected from the kernel control. This - usually happens when the socket is closed. If this is the last - socket attached to your kernel control, you may unregister your - kernel control from this callback. - @param kctlref The control ref for the kernel control instance the client has - disconnected from. - @param unit The unit number of the kernel control instance the client has - disconnected from. - @param unitinfo The user-defined private data initialized by the - ctl_connect_func callback. + * @typedef ctl_connect_func + * @discussion The ctl_connect_func is used to receive + * notification of a client connecting to the kernel control. + * @param kctlref The control ref for the kernel control the client is + * connecting to. + * @param sac The address used to connect to this control. The field sc_unit + * contains the unit number of the kernel control instance the client is + * connecting to. If CTL_FLAG_REG_ID_UNIT was set when the kernel control + * was registered, sc_unit is the ctl_unit of the kern_ctl_reg structure. + * If CTL_FLAG_REG_ID_UNIT was not set when the kernel control was + * registered, sc_unit is the dynamically allocated unit number of + * the new kernel control instance that is used for this connection. + * @param unitinfo A placeholder for a pointer to the optional user-defined + * private data associated with this kernel control instance. This + * opaque info will be provided to the user when the rest of the + * callback routines are executed. For example, it can be used + * to pass a pointer to an instance-specific data structure in + * order for the user to keep track of the states related to this + * kernel control instance. + */ +typedef errno_t (*ctl_connect_func)(kern_ctl_ref kctlref, + struct sockaddr_ctl *sac, + void **unitinfo); + +/*! + * @typedef ctl_disconnect_func + * @discussion The ctl_disconnect_func is used to receive notification + * that a client has disconnected from the kernel control. This + * usually happens when the socket is closed. If this is the last + * socket attached to your kernel control, you may unregister your + * kernel control from this callback. + * @param kctlref The control ref for the kernel control instance the client has + * disconnected from. + * @param unit The unit number of the kernel control instance the client has + * disconnected from. + * @param unitinfo The user-defined private data initialized by the + * ctl_connect_func callback. */ typedef errno_t (*ctl_disconnect_func)(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo); /*! - @typedef ctl_send_func - @discussion The ctl_send_func is used to receive data sent from - the client to the kernel control. - @param kctlref The control ref of the kernel control. - @param unit The unit number of the kernel control instance the client has - connected to. - @param unitinfo The user-defined private data initialized by the - ctl_connect_func callback. - @param m The data sent by the client to the kernel control in an - mbuf chain. Your function is responsible for releasing the - mbuf chain. - @param flags The flags specified by the client when calling - send/sendto/sendmsg (MSG_OOB/MSG_DONTROUTE). + * @typedef ctl_send_func + * @discussion The ctl_send_func is used to receive data sent from + * the client to the kernel control. + * @param kctlref The control ref of the kernel control. + * @param unit The unit number of the kernel control instance the client has + * connected to. + * @param unitinfo The user-defined private data initialized by the + * ctl_connect_func callback. + * @param m The data sent by the client to the kernel control in an + * mbuf chain. Your function is responsible for releasing the + * mbuf chain. + * @param flags The flags specified by the client when calling + * send/sendto/sendmsg (MSG_OOB/MSG_DONTROUTE). */ typedef errno_t (*ctl_send_func)(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - mbuf_t m, int flags); - -/*! - @typedef ctl_setopt_func - @discussion The ctl_setopt_func is used to handle set socket option - calls for the SYSPROTO_CONTROL option level. - @param kctlref The control ref of the kernel control. - @param unit The unit number of the kernel control instance. - @param unitinfo The user-defined private data initialized by the - ctl_connect_func callback. - @param opt The socket option. - @param data A pointer to the socket option data. The data has - already been copied in to the kernel for you. - @param len The length of the socket option data. + mbuf_t m, int flags); + +/*! + * @typedef ctl_setopt_func + * @discussion The ctl_setopt_func is used to handle set socket option + * calls for the SYSPROTO_CONTROL option level. + * @param kctlref The control ref of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param unitinfo The user-defined private data initialized by the + * ctl_connect_func callback. + * @param opt The socket option. + * @param data A pointer to the socket option data. The data has + * already been copied in to the kernel for you. + * @param len The length of the socket option data. */ typedef errno_t (*ctl_setopt_func)(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t len); - -/*! - @typedef ctl_getopt_func - @discussion The ctl_getopt_func is used to handle client get socket - option requests for the SYSPROTO_CONTROL option level. A buffer - is allocated for storage and passed to your function. The length - of that buffer is also passed. Upon return, you should set *len - to length of the buffer used. In some cases, data may be NULL. - When this happens, *len should be set to the length you would - have returned had data not been NULL. If the buffer is too small, - return an error. - @param kctlref The control ref of the kernel control. - @param unit The unit number of the kernel control instance. - @param unitinfo The user-defined private data initialized by the - ctl_connect_func callback. - @param opt The socket option. - @param data A buffer to copy the results in to. May be NULL, see - discussion. - @param len A pointer to the length of the buffer. This should be set - to the length of the buffer used before returning. + int opt, void *data, size_t len); + +/*! + * @typedef ctl_getopt_func + * @discussion The ctl_getopt_func is used to handle client get socket + * option requests for the SYSPROTO_CONTROL option level. A buffer + * is allocated for storage and passed to your function. The length + * of that buffer is also passed. Upon return, you should set *len + * to length of the buffer used. In some cases, data may be NULL. + * When this happens, *len should be set to the length you would + * have returned had data not been NULL. If the buffer is too small, + * return an error. + * @param kctlref The control ref of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param unitinfo The user-defined private data initialized by the + * ctl_connect_func callback. + * @param opt The socket option. + * @param data A buffer to copy the results in to. May be NULL, see + * discussion. + * @param len A pointer to the length of the buffer. This should be set + * to the length of the buffer used before returning. */ typedef errno_t (*ctl_getopt_func)(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int opt, void *data, size_t *len); + int opt, void *data, size_t *len); #ifdef KERNEL_PRIVATE /*! - @typedef ctl_rcvd_func - @discussion The ctl_rcvd_func is called when the client reads data from - the kernel control socket. The kernel control can use this callback - in combination with ctl_getenqueuespace() to avoid overflowing - the socket's receive buffer. When ctl_getenqueuespace() returns - 0 or ctl_enqueuedata()/ctl_enqueuembuf() return ENOBUFS, the - kernel control can wait until this callback is called before - trying to enqueue the data again. - @param kctlref The control ref of the kernel control. - @param unit The unit number of the kernel control instance. - @param unitinfo The user-defined private data initialized by the - ctl_connect_func callback. - @param flags The recv flags. See the recv(2) man page. + * @typedef ctl_rcvd_func + * @discussion The ctl_rcvd_func is called when the client reads data from + * the kernel control socket. The kernel control can use this callback + * in combination with ctl_getenqueuespace() to avoid overflowing + * the socket's receive buffer. When ctl_getenqueuespace() returns + * 0 or ctl_enqueuedata()/ctl_enqueuembuf() return ENOBUFS, the + * kernel control can wait until this callback is called before + * trying to enqueue the data again. + * @param kctlref The control ref of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param unitinfo The user-defined private data initialized by the + * ctl_connect_func callback. + * @param flags The recv flags. See the recv(2) man page. */ typedef void (*ctl_rcvd_func)(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - int flags); - -/*! - @typedef ctl_send_list_func - @discussion The ctl_send_list_func is used to receive data sent from - the client to the kernel control. - @param kctlref The control ref of the kernel control. - @param unit The unit number of the kernel control instance the client has - connected to. - @param unitinfo The user-defined private data initialized by the - ctl_connect_func callback. - @param m The data sent by the client to the kernel control in an - mbuf packet chain. Your function is responsible for releasing - mbuf packet chain. - @param flags The flags specified by the client when calling - send/sendto/sendmsg (MSG_OOB/MSG_DONTROUTE). + int flags); + +/*! + * @typedef ctl_send_list_func + * @discussion The ctl_send_list_func is used to receive data sent from + * the client to the kernel control. + * @param kctlref The control ref of the kernel control. + * @param unit The unit number of the kernel control instance the client has + * connected to. + * @param unitinfo The user-defined private data initialized by the + * ctl_connect_func callback. + * @param m The data sent by the client to the kernel control in an + * mbuf packet chain. Your function is responsible for releasing + * mbuf packet chain. + * @param flags The flags specified by the client when calling + * send/sendto/sendmsg (MSG_OOB/MSG_DONTROUTE). */ typedef errno_t (*ctl_send_list_func)(kern_ctl_ref kctlref, u_int32_t unit, void *unitinfo, - mbuf_t m, int flags); - -/*! - @typedef ctl_bind_func - @discussion The ctl_bind_func is an optional function that allows the client - to set up their unitinfo prior to connecting. - @param kctlref The control ref for the kernel control the client is - binding to. - @param sac The address used to connect to this control. The field sc_unit - contains the unit number of the kernel control instance the client is - binding to. If CTL_FLAG_REG_ID_UNIT was set when the kernel control - was registered, sc_unit is the ctl_unit of the kern_ctl_reg structure. - If CTL_FLAG_REG_ID_UNIT was not set when the kernel control was - registered, sc_unit is the dynamically allocated unit number of - the new kernel control instance that is used for this connection. - @param unitinfo A placeholder for a pointer to the optional user-defined - private data associated with this kernel control instance. This - opaque info will be provided to the user when the rest of the - callback routines are executed. For example, it can be used - to pass a pointer to an instance-specific data structure in - order for the user to keep track of the states related to this - kernel control instance. + mbuf_t m, int flags); + +/*! + * @typedef ctl_bind_func + * @discussion The ctl_bind_func is an optional function that allows the client + * to set up their unitinfo prior to connecting. + * @param kctlref The control ref for the kernel control the client is + * binding to. + * @param sac The address used to connect to this control. The field sc_unit + * contains the unit number of the kernel control instance the client is + * binding to. If CTL_FLAG_REG_ID_UNIT was set when the kernel control + * was registered, sc_unit is the ctl_unit of the kern_ctl_reg structure. + * If CTL_FLAG_REG_ID_UNIT was not set when the kernel control was + * registered, sc_unit is the dynamically allocated unit number of + * the new kernel control instance that is used for this connection. + * @param unitinfo A placeholder for a pointer to the optional user-defined + * private data associated with this kernel control instance. This + * opaque info will be provided to the user when the rest of the + * callback routines are executed. For example, it can be used + * to pass a pointer to an instance-specific data structure in + * order for the user to keep track of the states related to this + * kernel control instance. */ typedef errno_t (*ctl_bind_func)(kern_ctl_ref kctlref, - struct sockaddr_ctl *sac, - void **unitinfo); + struct sockaddr_ctl *sac, + void **unitinfo); #endif /* KERNEL_PRIVATE */ /*! - @struct kern_ctl_reg - @discussion This structure defines the properties of a kernel - control being registered. - @field ctl_name A Bundle ID string of up to MAX_KCTL_NAME bytes (including the ending zero). - This string should not be empty. - @field ctl_id The control ID may be dynamically assigned or it can be a - 32-bit creator code assigned by DTS. - For a DTS assigned creator code the CTL_FLAG_REG_ID_UNIT flag must be set. - For a dynamically assigned control ID, do not set the CTL_FLAG_REG_ID_UNIT flag. - The value of the dynamically assigned control ID is set to this field - when the registration succeeds. - @field ctl_unit A separate unit number to register multiple units that - share the same control ID with DTS assigned creator code when - the CTL_FLAG_REG_ID_UNIT flag is set. - This field is ignored for a dynamically assigned control ID. - @field ctl_flags CTL_FLAG_PRIVILEGED and/or CTL_FLAG_REG_ID_UNIT. - @field ctl_sendsize Override the default send size. If set to zero, - the default send size will be used, and this default value - is set to this field to be retrieved by the caller. - @field ctl_recvsize Override the default receive size. If set to - zero, the default receive size will be used, and this default value - is set to this field to be retrieved by the caller. - @field ctl_connect Specify the function to be called whenever a client - connects to the kernel control. This field must be specified. - @field ctl_disconnect Specify a function to be called whenever a - client disconnects from the kernel control. - @field ctl_send Specify a function to handle data send from the - client to the kernel control. - @field ctl_setopt Specify a function to handle set socket option - operations for the kernel control. - @field ctl_getopt Specify a function to handle get socket option - operations for the kernel control. -*/ -struct kern_ctl_reg -{ + * @struct kern_ctl_reg + * @discussion This structure defines the properties of a kernel + * control being registered. + * @field ctl_name A Bundle ID string of up to MAX_KCTL_NAME bytes (including the ending zero). + * This string should not be empty. + * @field ctl_id The control ID may be dynamically assigned or it can be a + * 32-bit creator code assigned by DTS. + * For a DTS assigned creator code the CTL_FLAG_REG_ID_UNIT flag must be set. + * For a dynamically assigned control ID, do not set the CTL_FLAG_REG_ID_UNIT flag. + * The value of the dynamically assigned control ID is set to this field + * when the registration succeeds. + * @field ctl_unit A separate unit number to register multiple units that + * share the same control ID with DTS assigned creator code when + * the CTL_FLAG_REG_ID_UNIT flag is set. + * This field is ignored for a dynamically assigned control ID. + * @field ctl_flags CTL_FLAG_PRIVILEGED and/or CTL_FLAG_REG_ID_UNIT. + * @field ctl_sendsize Override the default send size. If set to zero, + * the default send size will be used, and this default value + * is set to this field to be retrieved by the caller. + * @field ctl_recvsize Override the default receive size. If set to + * zero, the default receive size will be used, and this default value + * is set to this field to be retrieved by the caller. + * @field ctl_connect Specify the function to be called whenever a client + * connects to the kernel control. This field must be specified. + * @field ctl_disconnect Specify a function to be called whenever a + * client disconnects from the kernel control. + * @field ctl_send Specify a function to handle data send from the + * client to the kernel control. + * @field ctl_setopt Specify a function to handle set socket option + * operations for the kernel control. + * @field ctl_getopt Specify a function to handle get socket option + * operations for the kernel control. + */ +struct kern_ctl_reg { /* control information */ - char ctl_name[MAX_KCTL_NAME]; - u_int32_t ctl_id; - u_int32_t ctl_unit; - - /* control settings */ - u_int32_t ctl_flags; - u_int32_t ctl_sendsize; - u_int32_t ctl_recvsize; - - /* Dispatch functions */ - ctl_connect_func ctl_connect; - ctl_disconnect_func ctl_disconnect; - ctl_send_func ctl_send; - ctl_setopt_func ctl_setopt; - ctl_getopt_func ctl_getopt; + char ctl_name[MAX_KCTL_NAME]; + u_int32_t ctl_id; + u_int32_t ctl_unit; + + /* control settings */ + u_int32_t ctl_flags; + u_int32_t ctl_sendsize; + u_int32_t ctl_recvsize; + + /* Dispatch functions */ + ctl_connect_func ctl_connect; + ctl_disconnect_func ctl_disconnect; + ctl_send_func ctl_send; + ctl_setopt_func ctl_setopt; + ctl_getopt_func ctl_getopt; #ifdef KERNEL_PRIVATE - ctl_rcvd_func ctl_rcvd; /* Only valid if CTL_FLAG_REG_EXTENDED is set */ - ctl_send_list_func ctl_send_list; /* Only valid if CTL_FLAG_REG_EXTENDED is set */ - ctl_bind_func ctl_bind; + ctl_rcvd_func ctl_rcvd; /* Only valid if CTL_FLAG_REG_EXTENDED is set */ + ctl_send_list_func ctl_send_list;/* Only valid if CTL_FLAG_REG_EXTENDED is set */ + ctl_bind_func ctl_bind; #endif /* KERNEL_PRIVATE */ }; /*! - @function ctl_register - @discussion Register a kernel control. This will enable clients to - connect to the kernel control using a PF_SYSTEM socket. - @param userkctl A structure defining the kernel control to be - attached. The ctl_connect callback must be specified, the other callbacks - are optional. If ctl_connect is set to zero, ctl_register fails with - the error code EINVAL. - @param kctlref Upon successful return, the kctlref will contain a - reference to the attached kernel control. This reference is used - to unregister the kernel control. This reference will also be - passed in to the callbacks each time they are called. - @result 0 - Kernel control was registered. - EINVAL - The registration structure was not valid. - ENOMEM - There was insufficient memory. - EEXIST - A controller with that id/unit is already registered. + * @function ctl_register + * @discussion Register a kernel control. This will enable clients to + * connect to the kernel control using a PF_SYSTEM socket. + * @param userkctl A structure defining the kernel control to be + * attached. The ctl_connect callback must be specified, the other callbacks + * are optional. If ctl_connect is set to zero, ctl_register fails with + * the error code EINVAL. + * @param kctlref Upon successful return, the kctlref will contain a + * reference to the attached kernel control. This reference is used + * to unregister the kernel control. This reference will also be + * passed in to the callbacks each time they are called. + * @result 0 - Kernel control was registered. + * EINVAL - The registration structure was not valid. + * ENOMEM - There was insufficient memory. + * EEXIST - A controller with that id/unit is already registered. + */ +errno_t +ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref); + +/*! + * @function ctl_deregister + * @discussion Unregister a kernel control. A kernel extension must + * unregister it's kernel control(s) before unloading. If a kernel + * control has clients attached, this call will fail. + * @param kctlref The control reference of the control to unregister. + * @result 0 - Kernel control was unregistered. + * EINVAL - The kernel control reference was invalid. + * EBUSY - The kernel control has clients still attached. + */ +errno_t +ctl_deregister(kern_ctl_ref kctlref); + +/*! + * @function ctl_enqueuedata + * @discussion Send data from the kernel control to the client. + * @param kctlref The control reference of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param data A pointer to the data to send. + * @param len The length of data to send. + * @param flags Send flags. CTL_DATA_NOWAKEUP and CTL_DATA_EOR are currently + * the only supported flags. + * @result 0 - Data was enqueued to be read by the client. + * EINVAL - Invalid parameters. + * EMSGSIZE - The buffer is too large. + * ENOBUFS - The queue is full or there are no free mbufs. */ errno_t -ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref); - -/*! - @function ctl_deregister - @discussion Unregister a kernel control. A kernel extension must - unregister it's kernel control(s) before unloading. If a kernel - control has clients attached, this call will fail. - @param kctlref The control reference of the control to unregister. - @result 0 - Kernel control was unregistered. - EINVAL - The kernel control reference was invalid. - EBUSY - The kernel control has clients still attached. - */ -errno_t -ctl_deregister(kern_ctl_ref kctlref); - -/*! - @function ctl_enqueuedata - @discussion Send data from the kernel control to the client. - @param kctlref The control reference of the kernel control. - @param unit The unit number of the kernel control instance. - @param data A pointer to the data to send. - @param len The length of data to send. - @param flags Send flags. CTL_DATA_NOWAKEUP and CTL_DATA_EOR are currently - the only supported flags. - @result 0 - Data was enqueued to be read by the client. - EINVAL - Invalid parameters. - EMSGSIZE - The buffer is too large. - ENOBUFS - The queue is full or there are no free mbufs. - */ -errno_t ctl_enqueuedata(kern_ctl_ref kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags); /*! - @function ctl_enqueuembuf - @discussion Send data stored in an mbuf chain from the kernel - control to the client. The caller is responsible for freeing - the mbuf chain if ctl_enqueuembuf returns an error. - @param kctlref The control reference of the kernel control. - @param unit The unit number of the kernel control instance. - @param m An mbuf chain containing the data to send to the client. - @param flags Send flags. CTL_DATA_NOWAKEUP and CTL_DATA_EOR are currently - the only supported flags. - @result 0 - Data was enqueued to be read by the client. - EINVAL - Invalid parameters. - ENOBUFS - The queue is full. - */ -errno_t + * @function ctl_enqueuembuf + * @discussion Send data stored in an mbuf chain from the kernel + * control to the client. The caller is responsible for freeing + * the mbuf chain if ctl_enqueuembuf returns an error. + * @param kctlref The control reference of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param m An mbuf chain containing the data to send to the client. + * @param flags Send flags. CTL_DATA_NOWAKEUP and CTL_DATA_EOR are currently + * the only supported flags. + * @result 0 - Data was enqueued to be read by the client. + * EINVAL - Invalid parameters. + * ENOBUFS - The queue is full. + */ +errno_t ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, mbuf_t m, u_int32_t flags); #ifdef PRIVATE /*! - @function ctl_enqueuembuf_list - @discussion Send data stored in an mbuf packet chain from the kernel - control to the client. The caller is responsible for freeing - the mbuf chain if ctl_enqueuembuf returns an error. - Not valid if ctl_flags contains CTL_FLAG_REG_SOCK_STREAM. - @param kctlref The control reference of the kernel control. - @param unit The unit number of the kernel control instance. - @param m_list An mbuf chain containing the data to send to the client. - @param flags Send flags. CTL_DATA_NOWAKEUP is - the only supported flags. - @param m_remain A pointer to the list of mbuf packets in the chain that - could not be enqueued. - @result 0 - Data was enqueued to be read by the client. - EINVAL - Invalid parameters. - ENOBUFS - The queue is full. - */ -errno_t + * @function ctl_enqueuembuf_list + * @discussion Send data stored in an mbuf packet chain from the kernel + * control to the client. The caller is responsible for freeing + * the mbuf chain if ctl_enqueuembuf returns an error. + * Not valid if ctl_flags contains CTL_FLAG_REG_SOCK_STREAM. + * @param kctlref The control reference of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param m_list An mbuf chain containing the data to send to the client. + * @param flags Send flags. CTL_DATA_NOWAKEUP is + * the only supported flags. + * @param m_remain A pointer to the list of mbuf packets in the chain that + * could not be enqueued. + * @result 0 - Data was enqueued to be read by the client. + * EINVAL - Invalid parameters. + * ENOBUFS - The queue is full. + */ +errno_t ctl_enqueuembuf_list(kern_ctl_ref kctlref, u_int32_t unit, mbuf_t m_list, - u_int32_t flags, mbuf_t *m_remain); + u_int32_t flags, mbuf_t *m_remain); /*! - @function ctl_getenqueuepacketcount - @discussion Retrieve the number of packets in the socket - receive buffer. - @param kctlref The control reference of the kernel control. - @param unit The unit number of the kernel control instance. - @param pcnt The address where to return the current count. - @result 0 - Success; the packet count is returned to caller. - EINVAL - Invalid parameters. + * @function ctl_getenqueuepacketcount + * @discussion Retrieve the number of packets in the socket + * receive buffer. + * @param kctlref The control reference of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param pcnt The address where to return the current count. + * @result 0 - Success; the packet count is returned to caller. + * EINVAL - Invalid parameters. */ errno_t ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt); @@ -608,30 +607,30 @@ ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt) #endif /* PRIVATE */ /*! - @function ctl_getenqueuespace - @discussion Retrieve the amount of space currently available for data to be sent - from the kernel control to the client. - @param kctlref The control reference of the kernel control. - @param unit The unit number of the kernel control instance. - @param space The address where to return the current space available - @result 0 - Success; the amount of space is returned to caller. - EINVAL - Invalid parameters. + * @function ctl_getenqueuespace + * @discussion Retrieve the amount of space currently available for data to be sent + * from the kernel control to the client. + * @param kctlref The control reference of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param space The address where to return the current space available + * @result 0 - Success; the amount of space is returned to caller. + * EINVAL - Invalid parameters. */ -errno_t +errno_t ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space); /*! - @function ctl_getenqueuereadable - @discussion Retrieve the difference between enqueued bytes and - low-water mark for the socket receive buffer. - @param kctlref The control reference of the kernel control. - @param unit The unit number of the kernel control instance. - @param difference The address at which to return the current difference - between the low-water mark for the socket and the number of bytes - enqueued. 0 indicates that the socket is readable by the client - (the number of bytes in the buffer is above the low-water mark). - @result 0 - Success; the difference is returned to caller. - EINVAL - Invalid parameters. + * @function ctl_getenqueuereadable + * @discussion Retrieve the difference between enqueued bytes and + * low-water mark for the socket receive buffer. + * @param kctlref The control reference of the kernel control. + * @param unit The unit number of the kernel control instance. + * @param difference The address at which to return the current difference + * between the low-water mark for the socket and the number of bytes + * enqueued. 0 indicates that the socket is readable by the client + * (the number of bytes in the buffer is above the low-water mark). + * @result 0 - Success; the difference is returned to caller. + * EINVAL - Invalid parameters. */ errno_t ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *difference); @@ -659,4 +658,3 @@ __END_DECLS #endif /* KERNEL */ #endif /* KPI_KERN_CONTROL_H */ - diff --git a/bsd/sys/kern_event.h b/bsd/sys/kern_event.h index bd65869e5..8a54be549 100644 --- a/bsd/sys/kern_event.h +++ b/bsd/sys/kern_event.h @@ -27,279 +27,278 @@ */ /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ /*! - @header kern_event.h - This header defines in-kernel functions for generating kernel events as - well as functions for receiving kernel events using a kernel event - socket. + * @header kern_event.h + * This header defines in-kernel functions for generating kernel events as + * well as functions for receiving kernel events using a kernel event + * socket. */ #ifndef SYS_KERN_EVENT_H -#define SYS_KERN_EVENT_H +#define SYS_KERN_EVENT_H #include #include #include -#define KEV_SNDSPACE (4 * 1024) -#define KEV_RECVSPACE (32 * 1024) +#define KEV_SNDSPACE (4 * 1024) +#define KEV_RECVSPACE (32 * 1024) -#define KEV_ANY_VENDOR 0 -#define KEV_ANY_CLASS 0 -#define KEV_ANY_SUBCLASS 0 +#define KEV_ANY_VENDOR 0 +#define KEV_ANY_CLASS 0 +#define KEV_ANY_SUBCLASS 0 /* * Vendor Code */ /*! - @defined KEV_VENDOR_APPLE - @discussion Apple generated kernel events use the hard coded vendor code - value of 1. Third party kernel events use a dynamically allocated vendor - code. The vendor code can be found using the SIOCGKEVVENDOR ioctl. -*/ -#define KEV_VENDOR_APPLE 1 + * @defined KEV_VENDOR_APPLE + * @discussion Apple generated kernel events use the hard coded vendor code + * value of 1. Third party kernel events use a dynamically allocated vendor + * code. The vendor code can be found using the SIOCGKEVVENDOR ioctl. + */ +#define KEV_VENDOR_APPLE 1 /* * Definition of top-level classifications for KEV_VENDOR_APPLE */ /*! - @defined KEV_NETWORK_CLASS - @discussion Network kernel event class. + * @defined KEV_NETWORK_CLASS + * @discussion Network kernel event class. */ -#define KEV_NETWORK_CLASS 1 +#define KEV_NETWORK_CLASS 1 /*! - @defined KEV_IOKIT_CLASS - @discussion IOKit kernel event class. + * @defined KEV_IOKIT_CLASS + * @discussion IOKit kernel event class. */ -#define KEV_IOKIT_CLASS 2 +#define KEV_IOKIT_CLASS 2 /*! - @defined KEV_SYSTEM_CLASS - @discussion System kernel event class. + * @defined KEV_SYSTEM_CLASS + * @discussion System kernel event class. */ -#define KEV_SYSTEM_CLASS 3 +#define KEV_SYSTEM_CLASS 3 /*! - @defined KEV_APPLESHARE_CLASS - @discussion AppleShare kernel event class. + * @defined KEV_APPLESHARE_CLASS + * @discussion AppleShare kernel event class. */ -#define KEV_APPLESHARE_CLASS 4 +#define KEV_APPLESHARE_CLASS 4 /*! - @defined KEV_FIREWALL_CLASS - @discussion Firewall kernel event class. + * @defined KEV_FIREWALL_CLASS + * @discussion Firewall kernel event class. */ -#define KEV_FIREWALL_CLASS 5 +#define KEV_FIREWALL_CLASS 5 /*! - @defined KEV_IEEE80211_CLASS - @discussion IEEE 802.11 kernel event class. + * @defined KEV_IEEE80211_CLASS + * @discussion IEEE 802.11 kernel event class. */ -#define KEV_IEEE80211_CLASS 6 +#define KEV_IEEE80211_CLASS 6 /*! - @struct kern_event_msg - @discussion This structure is prepended to all kernel events. This - structure is used to determine the format of the remainder of - the kernel event. This structure will appear on all messages - received on a kernel event socket. To post a kernel event, a - slightly different structure is used. - @field total_size Total size of the kernel event message including the - header. - @field vendor_code The vendor code indicates which vendor generated the - kernel event. This gives every vendor a unique set of classes - and subclasses to use. Use the SIOCGKEVVENDOR ioctl to look up - vendor codes for vendors other than Apple. Apple uses - KEV_VENDOR_APPLE. - @field kev_class The class of the kernel event. - @field kev_subclass The subclass of the kernel event. - @field id Monotonically increasing value. - @field event_code The event code. - @field event_data Any additional data about this event. Format will - depend on the vendor_code, kev_class, kev_subclass, and - event_code. The length of the event_data can be determined - using total_size - KEV_MSG_HEADER_SIZE. + * @struct kern_event_msg + * @discussion This structure is prepended to all kernel events. This + * structure is used to determine the format of the remainder of + * the kernel event. This structure will appear on all messages + * received on a kernel event socket. To post a kernel event, a + * slightly different structure is used. + * @field total_size Total size of the kernel event message including the + * header. + * @field vendor_code The vendor code indicates which vendor generated the + * kernel event. This gives every vendor a unique set of classes + * and subclasses to use. Use the SIOCGKEVVENDOR ioctl to look up + * vendor codes for vendors other than Apple. Apple uses + * KEV_VENDOR_APPLE. + * @field kev_class The class of the kernel event. + * @field kev_subclass The subclass of the kernel event. + * @field id Monotonically increasing value. + * @field event_code The event code. + * @field event_data Any additional data about this event. Format will + * depend on the vendor_code, kev_class, kev_subclass, and + * event_code. The length of the event_data can be determined + * using total_size - KEV_MSG_HEADER_SIZE. */ struct kern_event_msg { - u_int32_t total_size; /* Size of entire event msg */ - u_int32_t vendor_code; /* For non-Apple extensibility */ - u_int32_t kev_class; /* Layer of event source */ - u_int32_t kev_subclass; /* Component within layer */ - u_int32_t id; /* Monotonically increasing value */ - u_int32_t event_code; /* unique code */ - u_int32_t event_data[1]; /* One or more data words */ + u_int32_t total_size; /* Size of entire event msg */ + u_int32_t vendor_code; /* For non-Apple extensibility */ + u_int32_t kev_class; /* Layer of event source */ + u_int32_t kev_subclass; /* Component within layer */ + u_int32_t id; /* Monotonically increasing value */ + u_int32_t event_code; /* unique code */ + u_int32_t event_data[1]; /* One or more data words */ }; /*! - @defined KEV_MSG_HEADER_SIZE - @discussion Size of the header portion of the kern_event_msg structure. - This accounts for everything right up to event_data. The size - of the data can be found by subtracting KEV_MSG_HEADER_SIZE - from the total size from the kern_event_msg. + * @defined KEV_MSG_HEADER_SIZE + * @discussion Size of the header portion of the kern_event_msg structure. + * This accounts for everything right up to event_data. The size + * of the data can be found by subtracting KEV_MSG_HEADER_SIZE + * from the total size from the kern_event_msg. */ -#define KEV_MSG_HEADER_SIZE (offsetof(struct kern_event_msg, event_data[0])) +#define KEV_MSG_HEADER_SIZE (offsetof(struct kern_event_msg, event_data[0])) /*! - @struct kev_request - @discussion This structure is used with the SIOCSKEVFILT and - SIOCGKEVFILT to set and get the control filter setting for a - kernel control socket. - @field total_size Total size of the kernel event message including the - header. - @field vendor_code All kernel events that don't match this vendor code - will be ignored. KEV_ANY_VENDOR can be used to receive kernel - events with any vendor code. - @field kev_class All kernel events that don't match this class will be - ignored. KEV_ANY_CLASS can be used to receive kernel events with - any class. - @field kev_subclass All kernel events that don't match this subclass - will be ignored. KEV_ANY_SUBCLASS can be used to receive kernel - events with any subclass. + * @struct kev_request + * @discussion This structure is used with the SIOCSKEVFILT and + * SIOCGKEVFILT to set and get the control filter setting for a + * kernel control socket. + * @field total_size Total size of the kernel event message including the + * header. + * @field vendor_code All kernel events that don't match this vendor code + * will be ignored. KEV_ANY_VENDOR can be used to receive kernel + * events with any vendor code. + * @field kev_class All kernel events that don't match this class will be + * ignored. KEV_ANY_CLASS can be used to receive kernel events with + * any class. + * @field kev_subclass All kernel events that don't match this subclass + * will be ignored. KEV_ANY_SUBCLASS can be used to receive kernel + * events with any subclass. */ struct kev_request { - u_int32_t vendor_code; - u_int32_t kev_class; - u_int32_t kev_subclass; + u_int32_t vendor_code; + u_int32_t kev_class; + u_int32_t kev_subclass; }; /*! - @defined KEV_VENDOR_CODE_MAX_STR_LEN - @discussion This define sets the maximum length of a string that can be - used to identify a vendor or kext when looking up a vendor code. + * @defined KEV_VENDOR_CODE_MAX_STR_LEN + * @discussion This define sets the maximum length of a string that can be + * used to identify a vendor or kext when looking up a vendor code. */ -#define KEV_VENDOR_CODE_MAX_STR_LEN 200 +#define KEV_VENDOR_CODE_MAX_STR_LEN 200 /*! - @struct kev_vendor_code - @discussion This structure is used with the SIOCGKEVVENDOR ioctl to - convert from a string identifying a kext or vendor, in the - form of a bundle identifier, to a vendor code. - @field vendor_code After making the SIOCGKEVVENDOR ioctl call, this will - be filled in with the vendor code if there is one. - @field vendor_string A bundle style identifier. + * @struct kev_vendor_code + * @discussion This structure is used with the SIOCGKEVVENDOR ioctl to + * convert from a string identifying a kext or vendor, in the + * form of a bundle identifier, to a vendor code. + * @field vendor_code After making the SIOCGKEVVENDOR ioctl call, this will + * be filled in with the vendor code if there is one. + * @field vendor_string A bundle style identifier. */ #pragma pack(4) struct kev_vendor_code { - u_int32_t vendor_code; - char vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN]; + u_int32_t vendor_code; + char vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN]; }; #pragma pack() /*! - @defined SIOCGKEVID - @discussion Retrieve the current event id. Each event generated will - have a new id. The next event to be generated will have an id - of id+1. + * @defined SIOCGKEVID + * @discussion Retrieve the current event id. Each event generated will + * have a new id. The next event to be generated will have an id + * of id+1. */ -#define SIOCGKEVID _IOR('e', 1, u_int32_t) +#define SIOCGKEVID _IOR('e', 1, u_int32_t) /*! - @defined SIOCSKEVFILT - @discussion Set the kernel event filter for this socket. Kernel events - not matching this filter will not be received on this socket. + * @defined SIOCSKEVFILT + * @discussion Set the kernel event filter for this socket. Kernel events + * not matching this filter will not be received on this socket. */ -#define SIOCSKEVFILT _IOW('e', 2, struct kev_request) +#define SIOCSKEVFILT _IOW('e', 2, struct kev_request) /*! - @defined SIOCGKEVFILT - @discussion Retrieve the kernel event filter for this socket. Kernel - events not matching this filter will not be received on this - socket. + * @defined SIOCGKEVFILT + * @discussion Retrieve the kernel event filter for this socket. Kernel + * events not matching this filter will not be received on this + * socket. */ -#define SIOCGKEVFILT _IOR('e', 3, struct kev_request) +#define SIOCGKEVFILT _IOR('e', 3, struct kev_request) /*! - @defined SIOCGKEVVENDOR - @discussion Lookup the vendor code for the specified vendor. ENOENT will - be returned if a vendor code for that vendor string does not - exist. + * @defined SIOCGKEVVENDOR + * @discussion Lookup the vendor code for the specified vendor. ENOENT will + * be returned if a vendor code for that vendor string does not + * exist. */ -#define SIOCGKEVVENDOR _IOWR('e', 4, struct kev_vendor_code) +#define SIOCGKEVVENDOR _IOWR('e', 4, struct kev_vendor_code) #ifdef PRIVATE struct xkevtpcb { - u_int32_t kep_len; - u_int32_t kep_kind; - u_int64_t kep_evtpcb; - u_int32_t kep_vendor_code_filter; - u_int32_t kep_class_filter; - u_int32_t kep_subclass_filter; + u_int32_t kep_len; + u_int32_t kep_kind; + u_int64_t kep_evtpcb; + u_int32_t kep_vendor_code_filter; + u_int32_t kep_class_filter; + u_int32_t kep_subclass_filter; }; struct kevtstat { - u_int64_t kes_pcbcount __attribute__((aligned(8))); - u_int64_t kes_gencnt __attribute__((aligned(8))); - u_int64_t kes_badvendor __attribute__((aligned(8))); - u_int64_t kes_toobig __attribute__((aligned(8))); - u_int64_t kes_nomem __attribute__((aligned(8))); - u_int64_t kes_fullsock __attribute__((aligned(8))); - u_int64_t kes_posted __attribute__((aligned(8))); - + u_int64_t kes_pcbcount __attribute__((aligned(8))); + u_int64_t kes_gencnt __attribute__((aligned(8))); + u_int64_t kes_badvendor __attribute__((aligned(8))); + u_int64_t kes_toobig __attribute__((aligned(8))); + u_int64_t kes_nomem __attribute__((aligned(8))); + u_int64_t kes_fullsock __attribute__((aligned(8))); + u_int64_t kes_posted __attribute__((aligned(8))); }; #endif /* PRIVATE */ #ifdef KERNEL /*! - @define N_KEV_VECTORS - @discussion The maximum number of kev_d_vectors for a kernel event. + * @define N_KEV_VECTORS + * @discussion The maximum number of kev_d_vectors for a kernel event. */ -#define N_KEV_VECTORS 5 +#define N_KEV_VECTORS 5 /*! - @struct kev_d_vectors - @discussion This structure is used to append some data to a kernel - event. - @field data_length The length of data. - @field data_ptr A pointer to data. + * @struct kev_d_vectors + * @discussion This structure is used to append some data to a kernel + * event. + * @field data_length The length of data. + * @field data_ptr A pointer to data. */ struct kev_d_vectors { - u_int32_t data_length; /* Length of the event data */ - void *data_ptr; /* Pointer to event data */ + u_int32_t data_length; /* Length of the event data */ + void *data_ptr; /* Pointer to event data */ }; /*! - @struct kev_msg - @discussion This structure is used when posting a kernel event. - @field vendor_code The vendor code assigned by kev_vendor_code_find. - @field kev_class The event's class. - @field kev_class The event's subclass. - @field kev_class The event's code. - @field dv An array of vectors describing additional data to be appended - to the kernel event. + * @struct kev_msg + * @discussion This structure is used when posting a kernel event. + * @field vendor_code The vendor code assigned by kev_vendor_code_find. + * @field kev_class The event's class. + * @field kev_class The event's subclass. + * @field kev_class The event's code. + * @field dv An array of vectors describing additional data to be appended + * to the kernel event. */ struct kev_msg { - u_int32_t vendor_code; /* For non-Apple extensibility */ - u_int32_t kev_class; /* Layer of event source */ - u_int32_t kev_subclass; /* Component within layer */ - u_int32_t event_code; /* The event code */ - struct kev_d_vectors dv[N_KEV_VECTORS]; /* Up to n data vectors */ + u_int32_t vendor_code; /* For non-Apple extensibility */ + u_int32_t kev_class; /* Layer of event source */ + u_int32_t kev_subclass; /* Component within layer */ + u_int32_t event_code; /* The event code */ + struct kev_d_vectors dv[N_KEV_VECTORS]; /* Up to n data vectors */ }; /*! - @function kev_vendor_code_find - @discussion Lookup a vendor_code given a unique string. If the vendor - code has not been used since launch, a unique integer will be - assigned for that string. Vendor codes will remain the same - until the machine is rebooted. - @param vendor_string A bundle style vendor identifier(i.e. com.apple). - @param vendor_code Upon return, a unique vendor code for use when - posting kernel events. - @result May return ENOMEM if memory constraints prevent allocation of a - new vendor code. + * @function kev_vendor_code_find + * @discussion Lookup a vendor_code given a unique string. If the vendor + * code has not been used since launch, a unique integer will be + * assigned for that string. Vendor codes will remain the same + * until the machine is rebooted. + * @param vendor_string A bundle style vendor identifier(i.e. com.apple). + * @param vendor_code Upon return, a unique vendor code for use when + * posting kernel events. + * @result May return ENOMEM if memory constraints prevent allocation of a + * new vendor code. */ -errno_t kev_vendor_code_find(const char *vendor_string, u_int32_t *vendor_code); +errno_t kev_vendor_code_find(const char *vendor_string, u_int32_t *vendor_code); /*! - @function kev_msg_post - @discussion Post a kernel event message. - @param event_msg A structure defining the kernel event message to post. - @result Will return zero upon success. May return a number of errors - depending on the type of failure. EINVAL indicates that there - was something wrong with the kerne event. The vendor code of - the kernel event must be assigned using kev_vendor_code_find. - If the message is too large, EMSGSIZE will be returned. + * @function kev_msg_post + * @discussion Post a kernel event message. + * @param event_msg A structure defining the kernel event message to post. + * @result Will return zero upon success. May return a number of errors + * depending on the type of failure. EINVAL indicates that there + * was something wrong with the kerne event. The vendor code of + * the kernel event must be assigned using kev_vendor_code_find. + * If the message is too large, EMSGSIZE will be returned. */ errno_t kev_msg_post(struct kev_msg *event_msg); @@ -308,20 +307,20 @@ errno_t kev_msg_post(struct kev_msg *event_msg); * Internal version of kev_msg_post. Allows posting Apple vendor code kernel * events. */ -int kev_post_msg(struct kev_msg *event); +int kev_post_msg(struct kev_msg *event); LIST_HEAD(kern_event_head, kern_event_pcb); struct kern_event_pcb { - decl_lck_mtx_data(, evp_mtx); /* per-socket mutex */ - LIST_ENTRY(kern_event_pcb) evp_link; /* glue on list of all PCBs */ - struct socket *evp_socket; /* pointer back to socket */ + decl_lck_mtx_data(, evp_mtx); /* per-socket mutex */ + LIST_ENTRY(kern_event_pcb) evp_link; /* glue on list of all PCBs */ + struct socket *evp_socket; /* pointer back to socket */ u_int32_t evp_vendor_code_filter; u_int32_t evp_class_filter; u_int32_t evp_subclass_filter; }; -#define sotoevpcb(so) ((struct kern_event_pcb *)((so)->so_pcb)) +#define sotoevpcb(so) ((struct kern_event_pcb *)((so)->so_pcb)) #endif /* PRIVATE */ #endif /* KERNEL */ diff --git a/bsd/sys/kern_memorystatus.h b/bsd/sys/kern_memorystatus.h index c3f5cec27..0ef5a8132 100644 --- a/bsd/sys/kern_memorystatus.h +++ b/bsd/sys/kern_memorystatus.h @@ -2,7 +2,7 @@ * Copyright (c) 2006-2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,12 +42,12 @@ #define JETSAM_PRIORITY_IDLE_HEAD -2 /* The value -1 is an alias to JETSAM_PRIORITY_DEFAULT */ #define JETSAM_PRIORITY_IDLE 0 -#define JETSAM_PRIORITY_IDLE_DEFERRED 1 /* Keeping this around till all xnu_quick_tests can be moved away from it.*/ -#define JETSAM_PRIORITY_AGING_BAND1 JETSAM_PRIORITY_IDLE_DEFERRED +#define JETSAM_PRIORITY_IDLE_DEFERRED 1 /* Keeping this around till all xnu_quick_tests can be moved away from it.*/ +#define JETSAM_PRIORITY_AGING_BAND1 JETSAM_PRIORITY_IDLE_DEFERRED #define JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC 2 -#define JETSAM_PRIORITY_AGING_BAND2 JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC +#define JETSAM_PRIORITY_AGING_BAND2 JETSAM_PRIORITY_BACKGROUND_OPPORTUNISTIC #define JETSAM_PRIORITY_BACKGROUND 3 -#define JETSAM_PRIORITY_ELEVATED_INACTIVE JETSAM_PRIORITY_BACKGROUND +#define JETSAM_PRIORITY_ELEVATED_INACTIVE JETSAM_PRIORITY_BACKGROUND #define JETSAM_PRIORITY_MAIL 4 #define JETSAM_PRIORITY_PHONE 5 #define JETSAM_PRIORITY_UI_SUPPORT 8 @@ -92,7 +92,7 @@ typedef struct memorystatus_priority_entry { pid_t pid; int32_t priority; uint64_t user_data; - int32_t limit; /* MB */ + int32_t limit; /* MB */ uint32_t state; } memorystatus_priority_entry_t; @@ -103,24 +103,24 @@ typedef struct memorystatus_priority_entry { * above has been in use for a while now. We'll have to deprecate it. * * To support new fields/properties, we will add a new structure with a - * new version and a new size. + * new version and a new size. */ -#define MEMORYSTATUS_MPE_VERSION_1 1 +#define MEMORYSTATUS_MPE_VERSION_1 1 -#define MEMORYSTATUS_MPE_VERSION_1_SIZE sizeof(struct memorystatus_properties_entry_v1) +#define MEMORYSTATUS_MPE_VERSION_1_SIZE sizeof(struct memorystatus_properties_entry_v1) -typedef struct memorystatus_properties_entry_v1 { +typedef struct memorystatus_properties_entry_v1 { int version; pid_t pid; int32_t priority; int use_probability; uint64_t user_data; - int32_t limit; /* MB */ + int32_t limit; /* MB */ uint32_t state; - char proc_name[MAXCOMLEN+1]; + char proc_name[MAXCOMLEN + 1]; char __pad1[3]; } memorystatus_properties_entry_v1_t; - + typedef struct memorystatus_kernel_stats { uint32_t free_pages; uint32_t active_pages; @@ -138,7 +138,7 @@ typedef struct memorystatus_kernel_stats { uint64_t zone_map_size; uint64_t zone_map_capacity; uint64_t largest_zone_size; - char largest_zone_name[MACH_ZONE_NAME_MAX_LEN]; + char largest_zone_name[MACH_ZONE_NAME_MAX_LEN]; } memorystatus_kernel_stats_t; /* @@ -148,7 +148,7 @@ typedef struct memorystatus_kernel_stats { typedef struct jetsam_snapshot_entry { pid_t pid; - char name[(2*MAXCOMLEN)+1]; + char name[(2 * MAXCOMLEN) + 1]; int32_t priority; uint32_t state; uint32_t fds; @@ -167,28 +167,28 @@ typedef struct jetsam_snapshot_entry { uint64_t jse_iokit_mapped_pages; uint64_t jse_page_table_pages; uint64_t jse_memory_region_count; - uint64_t jse_gencount; /* memorystatus_thread generation counter */ - uint64_t jse_starttime; /* absolute time when process starts */ - uint64_t jse_killtime; /* absolute time when jetsam chooses to kill a process */ - uint64_t jse_idle_delta; /* time spent in idle band */ - uint64_t jse_coalition_jetsam_id; /* we only expose coalition id for COALITION_TYPE_JETSAM */ + uint64_t jse_gencount; /* memorystatus_thread generation counter */ + uint64_t jse_starttime; /* absolute time when process starts */ + uint64_t jse_killtime; /* absolute time when jetsam chooses to kill a process */ + uint64_t jse_idle_delta; /* time spent in idle band */ + uint64_t jse_coalition_jetsam_id; /* we only expose coalition id for COALITION_TYPE_JETSAM */ struct timeval64 cpu_time; uint64_t jse_thaw_count; } memorystatus_jetsam_snapshot_entry_t; typedef struct jetsam_snapshot { - uint64_t snapshot_time; /* absolute time snapshot was initialized */ - uint64_t notification_time; /* absolute time snapshot was consumed */ - uint64_t js_gencount; /* memorystatus_thread generation counter */ - memorystatus_kernel_stats_t stats; /* system stat when snapshot is initialized */ + uint64_t snapshot_time; /* absolute time snapshot was initialized */ + uint64_t notification_time; /* absolute time snapshot was consumed */ + uint64_t js_gencount; /* memorystatus_thread generation counter */ + memorystatus_kernel_stats_t stats; /* system stat when snapshot is initialized */ size_t entry_count; memorystatus_jetsam_snapshot_entry_t entries[]; } memorystatus_jetsam_snapshot_t; typedef struct memorystatus_freeze_entry { - int32_t pid; - uint32_t flags; - uint32_t pages; + int32_t pid; + uint32_t flags; + uint32_t pages; } memorystatus_freeze_entry_t; /* TODO - deprecate; see */ @@ -210,48 +210,48 @@ typedef struct memorystatus_freeze_entry { * kMemorystatusKilled... Cause enum * memorystatus_kill_cause_name[] */ -#define JETSAM_REASON_INVALID 0 -#define JETSAM_REASON_GENERIC 1 -#define JETSAM_REASON_MEMORY_HIGHWATER 2 -#define JETSAM_REASON_VNODE 3 -#define JETSAM_REASON_MEMORY_VMPAGESHORTAGE 4 -#define JETSAM_REASON_MEMORY_PROCTHRASHING 5 -#define JETSAM_REASON_MEMORY_FCTHRASHING 6 -#define JETSAM_REASON_MEMORY_PERPROCESSLIMIT 7 -#define JETSAM_REASON_MEMORY_DISK_SPACE_SHORTAGE 8 -#define JETSAM_REASON_MEMORY_IDLE_EXIT 9 -#define JETSAM_REASON_ZONE_MAP_EXHAUSTION 10 -#define JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING 11 -#define JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE 12 - -#define JETSAM_REASON_MEMORYSTATUS_MAX JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE +#define JETSAM_REASON_INVALID 0 +#define JETSAM_REASON_GENERIC 1 +#define JETSAM_REASON_MEMORY_HIGHWATER 2 +#define JETSAM_REASON_VNODE 3 +#define JETSAM_REASON_MEMORY_VMPAGESHORTAGE 4 +#define JETSAM_REASON_MEMORY_PROCTHRASHING 5 +#define JETSAM_REASON_MEMORY_FCTHRASHING 6 +#define JETSAM_REASON_MEMORY_PERPROCESSLIMIT 7 +#define JETSAM_REASON_MEMORY_DISK_SPACE_SHORTAGE 8 +#define JETSAM_REASON_MEMORY_IDLE_EXIT 9 +#define JETSAM_REASON_ZONE_MAP_EXHAUSTION 10 +#define JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING 11 +#define JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE 12 + +#define JETSAM_REASON_MEMORYSTATUS_MAX JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE /* * Jetsam exit reason definitions - not related to memorystatus */ -#define JETSAM_REASON_CPULIMIT 100 +#define JETSAM_REASON_CPULIMIT 100 /* Cause */ enum { - kMemorystatusInvalid = JETSAM_REASON_INVALID, - kMemorystatusKilled = JETSAM_REASON_GENERIC, - kMemorystatusKilledHiwat = JETSAM_REASON_MEMORY_HIGHWATER, - kMemorystatusKilledVnodes = JETSAM_REASON_VNODE, - kMemorystatusKilledVMPageShortage = JETSAM_REASON_MEMORY_VMPAGESHORTAGE, - kMemorystatusKilledProcThrashing = JETSAM_REASON_MEMORY_PROCTHRASHING, - kMemorystatusKilledFCThrashing = JETSAM_REASON_MEMORY_FCTHRASHING, - kMemorystatusKilledPerProcessLimit = JETSAM_REASON_MEMORY_PERPROCESSLIMIT, - kMemorystatusKilledDiskSpaceShortage = JETSAM_REASON_MEMORY_DISK_SPACE_SHORTAGE, - kMemorystatusKilledIdleExit = JETSAM_REASON_MEMORY_IDLE_EXIT, - kMemorystatusKilledZoneMapExhaustion = JETSAM_REASON_ZONE_MAP_EXHAUSTION, - kMemorystatusKilledVMCompressorThrashing = JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING, - kMemorystatusKilledVMCompressorSpaceShortage = JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE, + kMemorystatusInvalid = JETSAM_REASON_INVALID, + kMemorystatusKilled = JETSAM_REASON_GENERIC, + kMemorystatusKilledHiwat = JETSAM_REASON_MEMORY_HIGHWATER, + kMemorystatusKilledVnodes = JETSAM_REASON_VNODE, + kMemorystatusKilledVMPageShortage = JETSAM_REASON_MEMORY_VMPAGESHORTAGE, + kMemorystatusKilledProcThrashing = JETSAM_REASON_MEMORY_PROCTHRASHING, + kMemorystatusKilledFCThrashing = JETSAM_REASON_MEMORY_FCTHRASHING, + kMemorystatusKilledPerProcessLimit = JETSAM_REASON_MEMORY_PERPROCESSLIMIT, + kMemorystatusKilledDiskSpaceShortage = JETSAM_REASON_MEMORY_DISK_SPACE_SHORTAGE, + kMemorystatusKilledIdleExit = JETSAM_REASON_MEMORY_IDLE_EXIT, + kMemorystatusKilledZoneMapExhaustion = JETSAM_REASON_ZONE_MAP_EXHAUSTION, + kMemorystatusKilledVMCompressorThrashing = JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING, + kMemorystatusKilledVMCompressorSpaceShortage = JETSAM_REASON_MEMORY_VMCOMPRESSOR_SPACE_SHORTAGE, }; /* For backwards compatibility */ -#define kMemorystatusKilledDiagnostic kMemorystatusKilledDiskSpaceShortage -#define kMemorystatusKilledVMThrashing kMemorystatusKilledVMCompressorThrashing -#define JETSAM_REASON_MEMORY_VMTHRASHING JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING +#define kMemorystatusKilledDiagnostic kMemorystatusKilledDiskSpaceShortage +#define kMemorystatusKilledVMThrashing kMemorystatusKilledVMCompressorThrashing +#define JETSAM_REASON_MEMORY_VMTHRASHING JETSAM_REASON_MEMORY_VMCOMPRESSOR_THRASHING /* Memorystatus control */ #define MEMORYSTATUS_BUFFERSIZE_MAX 65536 @@ -267,7 +267,7 @@ int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, void *bu #define MEMORYSTATUS_CMD_GET_JETSAM_SNAPSHOT 3 #define MEMORYSTATUS_CMD_GET_PRESSURE_STATUS 4 #define MEMORYSTATUS_CMD_SET_JETSAM_HIGH_WATER_MARK 5 /* Set active memory limit = inactive memory limit, both non-fatal */ -#define MEMORYSTATUS_CMD_SET_JETSAM_TASK_LIMIT 6 /* Set active memory limit = inactive memory limit, both fatal */ +#define MEMORYSTATUS_CMD_SET_JETSAM_TASK_LIMIT 6 /* Set active memory limit = inactive memory limit, both fatal */ #define MEMORYSTATUS_CMD_SET_MEMLIMIT_PROPERTIES 7 /* Set memory limits plus attributes independently */ #define MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES 8 /* Get memory limits plus attributes */ #define MEMORYSTATUS_CMD_PRIVILEGED_LISTENER_ENABLE 9 /* Set the task's status as a privileged listener w.r.t memory notifications */ @@ -275,17 +275,17 @@ int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, void *bu #define MEMORYSTATUS_CMD_AGGRESSIVE_JETSAM_LENIENT_MODE_ENABLE 11 /* Enable the 'lenient' mode for aggressive jetsam. See comments in kern_memorystatus.c near the top. */ #define MEMORYSTATUS_CMD_AGGRESSIVE_JETSAM_LENIENT_MODE_DISABLE 12 /* Disable the 'lenient' mode for aggressive jetsam. */ #define MEMORYSTATUS_CMD_GET_MEMLIMIT_EXCESS 13 /* Compute how much a process's phys_footprint exceeds inactive memory limit */ -#define MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE 14 /* Set the inactive jetsam band for a process to JETSAM_PRIORITY_ELEVATED_INACTIVE */ -#define MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_DISABLE 15 /* Reset the inactive jetsam band for a process to the default band (0)*/ +#define MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_ENABLE 14 /* Set the inactive jetsam band for a process to JETSAM_PRIORITY_ELEVATED_INACTIVE */ +#define MEMORYSTATUS_CMD_ELEVATED_INACTIVEJETSAMPRIORITY_DISABLE 15 /* Reset the inactive jetsam band for a process to the default band (0)*/ #define MEMORYSTATUS_CMD_SET_PROCESS_IS_MANAGED 16 /* (Re-)Set state on a process that marks it as (un-)managed by a system entity e.g. assertiond */ #define MEMORYSTATUS_CMD_GET_PROCESS_IS_MANAGED 17 /* Return the 'managed' status of a process */ #define MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE 18 /* Is the process eligible for freezing? Apps and extensions can pass in FALSE to opt out of freezing, i.e., - if they would prefer being jetsam'ed in the idle band to being frozen in an elevated band. */ + * if they would prefer being jetsam'ed in the idle band to being frozen in an elevated band. */ #define MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE 19 /* Return the freezable state of a process. */ #if CONFIG_FREEZE #if DEVELOPMENT || DEBUG -#define MEMORYSTATUS_CMD_FREEZER_CONTROL 20 +#define MEMORYSTATUS_CMD_FREEZER_CONTROL 20 #endif /* DEVELOPMENT || DEBUG */ #endif /* CONFIG_FREEZE */ @@ -296,8 +296,8 @@ int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, void *bu /* Test commands */ /* Trigger forced jetsam */ -#define MEMORYSTATUS_CMD_TEST_JETSAM 1000 -#define MEMORYSTATUS_CMD_TEST_JETSAM_SORT 1001 +#define MEMORYSTATUS_CMD_TEST_JETSAM 1000 +#define MEMORYSTATUS_CMD_TEST_JETSAM_SORT 1001 /* Panic on jetsam options */ typedef struct memorystatus_jetsam_panic_options { @@ -308,18 +308,18 @@ typedef struct memorystatus_jetsam_panic_options { #define MEMORYSTATUS_CMD_SET_JETSAM_PANIC_BITS 1002 /* Select priority band sort order */ -#define JETSAM_SORT_NOSORT 0 -#define JETSAM_SORT_DEFAULT 1 +#define JETSAM_SORT_NOSORT 0 +#define JETSAM_SORT_DEFAULT 1 #endif /* PRIVATE */ /* memorystatus_control() flags */ -#define MEMORYSTATUS_FLAGS_SNAPSHOT_ON_DEMAND 0x1 /* A populated snapshot buffer is returned on demand */ -#define MEMORYSTATUS_FLAGS_SNAPSHOT_AT_BOOT 0x2 /* Returns a snapshot with memstats collected at boot */ -#define MEMORYSTATUS_FLAGS_SNAPSHOT_COPY 0x4 /* Returns the previously populated snapshot created by the system */ -#define MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY 0x8 /* Set jetsam priorities for a group of pids */ -#define MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY 0x10 /* Set probability of use for a group of processes */ +#define MEMORYSTATUS_FLAGS_SNAPSHOT_ON_DEMAND 0x1 /* A populated snapshot buffer is returned on demand */ +#define MEMORYSTATUS_FLAGS_SNAPSHOT_AT_BOOT 0x2 /* Returns a snapshot with memstats collected at boot */ +#define MEMORYSTATUS_FLAGS_SNAPSHOT_COPY 0x4 /* Returns the previously populated snapshot created by the system */ +#define MEMORYSTATUS_FLAGS_GRP_SET_PRIORITY 0x8 /* Set jetsam priorities for a group of pids */ +#define MEMORYSTATUS_FLAGS_GRP_SET_PROBABILITY 0x10 /* Set probability of use for a group of processes */ /* * For use with memorystatus_control: @@ -341,17 +341,17 @@ typedef struct memorystatus_jetsam_panic_options { * the snapshot entry_count is always 0. * * Copy mode - this returns the previous snapshot - * collected by the system. The current snaphshot + * collected by the system. The current snaphshot * might be only half populated. * * Snapshots are inherently racey between request * for buffer size and actual data compilation. -*/ + */ /* These definitions are required for backwards compatibility */ -#define MEMORYSTATUS_SNAPSHOT_ON_DEMAND MEMORYSTATUS_FLAGS_SNAPSHOT_ON_DEMAND -#define MEMORYSTATUS_SNAPSHOT_AT_BOOT MEMORYSTATUS_FLAGS_SNAPSHOT_AT_BOOT -#define MEMORYSTATUS_SNAPSHOT_COPY MEMORYSTATUS_FLAGS_SNAPSHOT_COPY +#define MEMORYSTATUS_SNAPSHOT_ON_DEMAND MEMORYSTATUS_FLAGS_SNAPSHOT_ON_DEMAND +#define MEMORYSTATUS_SNAPSHOT_AT_BOOT MEMORYSTATUS_FLAGS_SNAPSHOT_AT_BOOT +#define MEMORYSTATUS_SNAPSHOT_COPY MEMORYSTATUS_FLAGS_SNAPSHOT_COPY /* * For use with memorystatus_control: @@ -368,13 +368,13 @@ typedef struct memorystatus_priority_properties { * MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES */ typedef struct memorystatus_memlimit_properties { - int32_t memlimit_active; /* jetsam memory limit (in MB) when process is active */ + int32_t memlimit_active; /* jetsam memory limit (in MB) when process is active */ uint32_t memlimit_active_attr; - int32_t memlimit_inactive; /* jetsam memory limit (in MB) when process is inactive */ + int32_t memlimit_inactive; /* jetsam memory limit (in MB) when process is inactive */ uint32_t memlimit_inactive_attr; } memorystatus_memlimit_properties_t; -#define MEMORYSTATUS_MEMLIMIT_ATTR_FATAL 0x1 /* if set, exceeding the memlimit is fatal */ +#define MEMORYSTATUS_MEMLIMIT_ATTR_FATAL 0x1 /* if set, exceeding the memlimit is fatal */ #ifdef XNU_KERNEL_PRIVATE @@ -421,8 +421,8 @@ typedef struct memorystatus_memlimit_properties { #define P_MEMSTAT_FATAL_MEMLIMIT 0x00002000 /* current fatal state of the process's memlimit */ #define P_MEMSTAT_MEMLIMIT_ACTIVE_FATAL 0x00004000 /* if set, exceeding limit is fatal when the process is active */ #define P_MEMSTAT_MEMLIMIT_INACTIVE_FATAL 0x00008000 /* if set, exceeding limit is fatal when the process is inactive */ -#define P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND 0x00010000 /* if set, the process will go into this band & stay there when in the background instead - of the aging bands and/or the IDLE band. */ +#define P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND 0x00010000 /* if set, the process will go into this band & stay there when in the background instead + * of the aging bands and/or the IDLE band. */ extern void memorystatus_init(void) __attribute__((section("__TEXT, initcode"))); @@ -430,8 +430,8 @@ extern void memorystatus_init_at_boot_snapshot(void); extern int memorystatus_add(proc_t p, boolean_t locked); extern int memorystatus_update(proc_t p, int priority, uint64_t user_data, boolean_t effective, - boolean_t update_memlimit, int32_t memlimit_active, boolean_t memlimit_active_is_fatal, - int32_t memlimit_inactive, boolean_t memlimit_inactive_is_fatal); + boolean_t update_memlimit, int32_t memlimit_active, boolean_t memlimit_active_is_fatal, + int32_t memlimit_inactive, boolean_t memlimit_inactive_is_fatal); extern int memorystatus_remove(proc_t p, boolean_t locked); @@ -470,7 +470,7 @@ int memorystatus_get_pressure_status_kdp(void); #if CONFIG_JETSAM typedef enum memorystatus_policy { - kPolicyDefault = 0x0, + kPolicyDefault = 0x0, kPolicyMoreFree = 0x1, kPolicyDiagnoseAll = 0x2, kPolicyDiagnoseFirst = 0x4, @@ -500,10 +500,10 @@ boolean_t memorystatus_idle_exit_from_VM(void); #define FREEZE_PAGES_MAX (32 * 1024 * 1024 / PAGE_SIZE) #define FREEZE_SUSPENDED_THRESHOLD_DEFAULT 4 -#define FREEZE_PROCESSES_MAX 20 +#define FREEZE_PROCESSES_MAX 20 -#define FREEZE_DAILY_MB_MAX_DEFAULT 1024 -#define FREEZE_DEGRADATION_BUDGET_THRESHOLD 25 //degraded perf. when the daily budget left falls below this threshold percentage +#define FREEZE_DAILY_MB_MAX_DEFAULT 1024 +#define FREEZE_DEGRADATION_BUDGET_THRESHOLD 25 //degraded perf. when the daily budget left falls below this threshold percentage #define MAX_FROZEN_SHARED_MB_PERCENT 10 /* max shared MB calculated as percent of system task limit. */ #define MAX_FROZEN_PROCESS_DEMOTIONS 2 /* max demotions of frozen processes into IDLE band done daily. */ @@ -525,7 +525,7 @@ extern void memorystatus_freeze_init(void) __attribute__((section("__TEXT, initc extern int memorystatus_freeze_process_sync(proc_t p); #if DEVELOPMENT || DEBUG -#define FREEZER_CONTROL_GET_STATUS (1) +#define FREEZER_CONTROL_GET_STATUS (1) #endif /* DEVELOPMENT || DEBUG */ #endif /* CONFIG_FREEZE */ diff --git a/bsd/sys/kern_overrides.h b/bsd/sys/kern_overrides.h index e2212a3c3..24072e888 100644 --- a/bsd/sys/kern_overrides.h +++ b/bsd/sys/kern_overrides.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,22 +40,22 @@ __BEGIN_DECLS * system_override() system call * * The system_override() syscall is used to modify some kernel performance mechanisms. - * The system call needs a special entitlement and should be used with extreme caution. + * The system call needs a special entitlement and should be used with extreme caution. * A misuse of this syscall could lead to severe performance and battery life issues. * - * The caller needs to specify the mask for the specific mechanisms to modify and a - * timeout. The implementation of this system call blocks the thread in the syscall - * for the duration specified in the call. Blocking a thread in the system call allows - * the kernel to revert the modification in case the calling process dies. It also - * makes the change of behavior extremely obvious due to the backtrace of the calling + * The caller needs to specify the mask for the specific mechanisms to modify and a + * timeout. The implementation of this system call blocks the thread in the syscall + * for the duration specified in the call. Blocking a thread in the system call allows + * the kernel to revert the modification in case the calling process dies. It also + * makes the change of behavior extremely obvious due to the backtrace of the calling * thread. * - * Multiple agents are allowed to call this interface at the same time. The behavior - * change is effective from the time the first call is made (for a specific mechanism) - * until the longest timeout specified by any agent. If the caller wishes to disable - * the behavior change caused by itself, it can call the same interface with the - * SYS_OVERRIDE_DISABLE flag and the mechanism mask from another thread in the same - * process. Note that this does not break out the original thread from the block + * Multiple agents are allowed to call this interface at the same time. The behavior + * change is effective from the time the first call is made (for a specific mechanism) + * until the longest timeout specified by any agent. If the caller wishes to disable + * the behavior change caused by itself, it can call the same interface with the + * SYS_OVERRIDE_DISABLE flag and the mechanism mask from another thread in the same + * process. Note that this does not break out the original thread from the block * immediately. It simply undoes the mechanism change underneath. * * The currently supported overrides are: @@ -66,12 +66,12 @@ __BEGIN_DECLS */ /* System Overrides Flags */ -#define SYS_OVERRIDE_DISABLE (~(~0ull >> 1)) -#define SYS_OVERRIDE_IO_THROTTLE 0x1 -#define SYS_OVERRIDE_CPU_THROTTLE 0x2 -#define SYS_OVERRIDE_FAST_JETSAM 0x4 +#define SYS_OVERRIDE_DISABLE (~(~0ull >> 1)) +#define SYS_OVERRIDE_IO_THROTTLE 0x1 +#define SYS_OVERRIDE_CPU_THROTTLE 0x2 +#define SYS_OVERRIDE_FAST_JETSAM 0x4 -#define SYS_OVERRIDE_FLAGS_MASK (SYS_OVERRIDE_DISABLE | SYS_OVERRIDE_IO_THROTTLE | SYS_OVERRIDE_CPU_THROTTLE | SYS_OVERRIDE_FAST_JETSAM) +#define SYS_OVERRIDE_FLAGS_MASK (SYS_OVERRIDE_DISABLE | SYS_OVERRIDE_IO_THROTTLE | SYS_OVERRIDE_CPU_THROTTLE | SYS_OVERRIDE_FAST_JETSAM) #ifdef BSD_KERNEL_PRIVATE void init_system_override(void); diff --git a/bsd/sys/kernel.h b/bsd/sys/kernel.h index 35555f842..efb737aa7 100644 --- a/bsd/sys/kernel.h +++ b/bsd/sys/kernel.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -65,12 +65,12 @@ * * @(#)kernel.h 8.3 (Berkeley) 1/21/94 */ -#ifndef _SYS_KERNEL_H_ +#ifndef _SYS_KERNEL_H_ #define _SYS_KERNEL_H_ #include -#ifdef KERNEL +#ifdef KERNEL #include @@ -87,21 +87,21 @@ extern char domainname[MAXHOSTNAMELEN]; extern int domainnamelen; /* 1.2 */ -extern int stathz; /* statistics clock's frequency */ -extern int profhz; /* profiling clock's frequency */ +extern int stathz; /* statistics clock's frequency */ +extern int profhz; /* profiling clock's frequency */ extern bool send_sigsys; #endif /* BSD_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE -extern struct timezone tz; /* XXX */ +extern struct timezone tz; /* XXX */ -extern int tick; /* usec per tick (1000000 / hz) */ -extern int hz; /* system clock's frequency */ +extern int tick; /* usec per tick (1000000 / hz) */ +extern int hz; /* system clock's frequency */ #endif /* KERNEL_PRIVATE */ -#endif /* KERNEL */ +#endif /* KERNEL */ -#endif /* !_SYS_KERNEL_H_ */ +#endif /* !_SYS_KERNEL_H_ */ diff --git a/bsd/sys/kernel_types.h b/bsd/sys/kernel_types.h index f73ff1570..f43d1f0c2 100644 --- a/bsd/sys/kernel_types.h +++ b/bsd/sys/kernel_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,9 +35,9 @@ #ifdef BSD_BUILD /* Macros(?) to clear/set/test flags. */ -#define SET(t, f) (t) |= (f) -#define CLR(t, f) (t) &= ~(f) -#define ISSET(t, f) ((t) & (f)) +#define SET(t, f) (t) |= (f) +#define CLR(t, f) (t) &= ~(f) +#define ISSET(t, f) ((t) & (f)) #endif @@ -85,17 +85,17 @@ struct __rtentry; struct __if_clone; struct __bufattr; -typedef struct __ifnet* ifnet_t; -typedef struct __mbuf* mbuf_t; -typedef struct __pkthdr* pkthdr_t; -typedef struct __socket* socket_t; -typedef struct __sockopt* sockopt_t; -typedef struct __ifaddr* ifaddr_t; -typedef struct __ifmultiaddr* ifmultiaddr_t; -typedef struct __ifnet_filter* interface_filter_t; -typedef struct __rtentry* route_t; -typedef struct __if_clone* if_clone_t; -typedef struct __bufattr* bufattr_t; +typedef struct __ifnet* ifnet_t; +typedef struct __mbuf* mbuf_t; +typedef struct __pkthdr* pkthdr_t; +typedef struct __socket* socket_t; +typedef struct __sockopt* sockopt_t; +typedef struct __ifaddr* ifaddr_t; +typedef struct __ifmultiaddr* ifmultiaddr_t; +typedef struct __ifnet_filter* interface_filter_t; +typedef struct __rtentry* route_t; +typedef struct __if_clone* if_clone_t; +typedef struct __bufattr* bufattr_t; #else /* BSD_BUILD */ @@ -114,17 +114,17 @@ typedef struct vfstable * vfstable_t; #ifdef KERNEL_PRIVATE typedef struct kern_iovec * kern_iovec_t; -typedef struct ifnet* ifnet_t; -typedef struct mbuf* mbuf_t; -typedef struct pkthdr* pkthdr_t; -typedef struct socket* socket_t; -typedef struct sockopt* sockopt_t; -typedef struct ifaddr* ifaddr_t; -typedef struct ifmultiaddr* ifmultiaddr_t; -typedef struct ifnet_filter* interface_filter_t; -typedef struct rtentry* route_t; -typedef struct if_clone* if_clone_t; -typedef struct bufattr* bufattr_t; +typedef struct ifnet* ifnet_t; +typedef struct mbuf* mbuf_t; +typedef struct pkthdr* pkthdr_t; +typedef struct socket* socket_t; +typedef struct sockopt* sockopt_t; +typedef struct ifaddr* ifaddr_t; +typedef struct ifmultiaddr* ifmultiaddr_t; +typedef struct ifnet_filter* interface_filter_t; +typedef struct rtentry* route_t; +typedef struct if_clone* if_clone_t; +typedef struct bufattr* bufattr_t; #endif /* KERNEL_PRIVATE */ #endif /* !BSD_BUILD */ diff --git a/bsd/sys/kpi_mbuf.h b/bsd/sys/kpi_mbuf.h index 9ecef199c..76d960422 100644 --- a/bsd/sys/kpi_mbuf.h +++ b/bsd/sys/kpi_mbuf.h @@ -26,21 +26,21 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_mbuf.h - This header defines an API for interacting with mbufs. mbufs are the - primary method of storing packets in the networking stack. - - mbufs are used to store various items in the networking stack. The - most common usage of an mbuf is to store a packet or data on a - socket waiting to be sent or received. The mbuf is a contiguous - structure with some header followed by some data. To store more data - than would fit in an mbuf, external data is used. Most mbufs with - external data use clusters to store the external data. - - mbufs can be chained, contiguous data in a packet can be found by - following the m_next chain. Packets may be bundled together using - m_nextpacket. Many parts of the stack do not properly handle chains - of packets. When in doubt, don't chain packets. + * @header kpi_mbuf.h + * This header defines an API for interacting with mbufs. mbufs are the + * primary method of storing packets in the networking stack. + * + * mbufs are used to store various items in the networking stack. The + * most common usage of an mbuf is to store a packet or data on a + * socket waiting to be sent or received. The mbuf is a contiguous + * structure with some header followed by some data. To store more data + * than would fit in an mbuf, external data is used. Most mbufs with + * external data use clusters to store the external data. + * + * mbufs can be chained, contiguous data in a packet can be found by + * following the m_next chain. Packets may be bundled together using + * m_nextpacket. Many parts of the stack do not properly handle chains + * of packets. When in doubt, don't chain packets. */ #ifndef __KPI_MBUF__ @@ -52,308 +52,308 @@ #endif /* KERNEL_PRIVATE */ /*! - @enum mbuf_flags_t - @abstract Constants defining mbuf flags. Only the flags listed below - can be set or retrieved. - @constant MBUF_EXT Indicates this mbuf has external data. - @constant MBUF_PKTHDR Indicates this mbuf has a packet header. - @constant MBUF_EOR Indicates this mbuf is the end of a record. - @constant MBUF_LOOP Indicates this packet is looped back. - @constant MBUF_BCAST Indicates this packet will be sent or was - received as a brodcast. - @constant MBUF_MCAST Indicates this packet will be sent or was - received as a multicast. - @constant MBUF_FRAG Indicates this packet is a fragment of a larger - packet. - @constant MBUF_FIRSTFRAG Indicates this packet is the first fragment. - @constant MBUF_LASTFRAG Indicates this packet is the last fragment. - @constant MBUF_PROMISC Indicates this packet was only received - because the interface is in promiscuous mode. This should be set - by the demux function. These packets will be discarded after - being passed to any interface filters. -*/ + * @enum mbuf_flags_t + * @abstract Constants defining mbuf flags. Only the flags listed below + * can be set or retrieved. + * @constant MBUF_EXT Indicates this mbuf has external data. + * @constant MBUF_PKTHDR Indicates this mbuf has a packet header. + * @constant MBUF_EOR Indicates this mbuf is the end of a record. + * @constant MBUF_LOOP Indicates this packet is looped back. + * @constant MBUF_BCAST Indicates this packet will be sent or was + * received as a brodcast. + * @constant MBUF_MCAST Indicates this packet will be sent or was + * received as a multicast. + * @constant MBUF_FRAG Indicates this packet is a fragment of a larger + * packet. + * @constant MBUF_FIRSTFRAG Indicates this packet is the first fragment. + * @constant MBUF_LASTFRAG Indicates this packet is the last fragment. + * @constant MBUF_PROMISC Indicates this packet was only received + * because the interface is in promiscuous mode. This should be set + * by the demux function. These packets will be discarded after + * being passed to any interface filters. + */ enum { - MBUF_EXT = 0x0001, /* has associated external storage */ - MBUF_PKTHDR = 0x0002, /* start of record */ - MBUF_EOR = 0x0004, /* end of record */ - MBUF_LOOP = 0x0040, /* packet is looped back */ - - MBUF_BCAST = 0x0100, /* send/received as link-level broadcast */ - MBUF_MCAST = 0x0200, /* send/received as link-level multicast */ - MBUF_FRAG = 0x0400, /* packet is a fragment of a larger packet */ - MBUF_FIRSTFRAG = 0x0800, /* packet is first fragment */ - MBUF_LASTFRAG = 0x1000, /* packet is last fragment */ - MBUF_PROMISC = 0x2000, /* packet is promiscuous */ - MBUF_HASFCS = 0x4000 /* packet has FCS */ + MBUF_EXT = 0x0001, /* has associated external storage */ + MBUF_PKTHDR = 0x0002, /* start of record */ + MBUF_EOR = 0x0004, /* end of record */ + MBUF_LOOP = 0x0040, /* packet is looped back */ + + MBUF_BCAST = 0x0100, /* send/received as link-level broadcast */ + MBUF_MCAST = 0x0200, /* send/received as link-level multicast */ + MBUF_FRAG = 0x0400, /* packet is a fragment of a larger packet */ + MBUF_FIRSTFRAG = 0x0800, /* packet is first fragment */ + MBUF_LASTFRAG = 0x1000, /* packet is last fragment */ + MBUF_PROMISC = 0x2000, /* packet is promiscuous */ + MBUF_HASFCS = 0x4000 /* packet has FCS */ }; typedef u_int32_t mbuf_flags_t; /*! - @enum mbuf_type_t - @abstract Types of mbufs. - @discussion Some mbufs represent packets, some represnt data waiting - on sockets. Other mbufs store control data or other various - structures. The mbuf type is used to store what sort of data the - mbuf contains. - @constant MBUF_MT_FREE Indicates the mbuf is free and is - sitting on the queue of free mbufs. If you find that an mbuf you - have a reference to has this type, something has gone terribly - wrong. - @constant MBUF_MT_DATA Indicates this mbuf is being used to store - data. - @constant MBUF_MT_HEADER Indicates this mbuf has a packet header, - this is probably a packet. - @constant MBUF_MT_SOCKET Socket structure. - @constant MBUF_MT_PCB Protocol control block. - @constant MBUF_MT_RTABLE Routing table entry. - @constant MBUF_MT_HTABLE IMP host tables???. - @constant MBUF_MT_ATABLE Address resolution table data. - @constant MBUF_MT_SONAME Socket name, usually a sockaddr of some - sort. - @constant MBUF_MT_FTABLE Fragment reassembly header. - @constant MBUF_MT_RIGHTS Access rights. - @constant MBUF_MT_IFADDR Interface address. - @constant MBUF_MT_CONTROL Extra-data protocol message (control - message). - @constant MBUF_MT_OOBDATA Out of band data. -*/ + * @enum mbuf_type_t + * @abstract Types of mbufs. + * @discussion Some mbufs represent packets, some represnt data waiting + * on sockets. Other mbufs store control data or other various + * structures. The mbuf type is used to store what sort of data the + * mbuf contains. + * @constant MBUF_MT_FREE Indicates the mbuf is free and is + * sitting on the queue of free mbufs. If you find that an mbuf you + * have a reference to has this type, something has gone terribly + * wrong. + * @constant MBUF_MT_DATA Indicates this mbuf is being used to store + * data. + * @constant MBUF_MT_HEADER Indicates this mbuf has a packet header, + * this is probably a packet. + * @constant MBUF_MT_SOCKET Socket structure. + * @constant MBUF_MT_PCB Protocol control block. + * @constant MBUF_MT_RTABLE Routing table entry. + * @constant MBUF_MT_HTABLE IMP host tables???. + * @constant MBUF_MT_ATABLE Address resolution table data. + * @constant MBUF_MT_SONAME Socket name, usually a sockaddr of some + * sort. + * @constant MBUF_MT_FTABLE Fragment reassembly header. + * @constant MBUF_MT_RIGHTS Access rights. + * @constant MBUF_MT_IFADDR Interface address. + * @constant MBUF_MT_CONTROL Extra-data protocol message (control + * message). + * @constant MBUF_MT_OOBDATA Out of band data. + */ enum { - MBUF_TYPE_FREE = 0, /* should be on free list */ - MBUF_TYPE_DATA = 1, /* dynamic (data) allocation */ - MBUF_TYPE_HEADER = 2, /* packet header */ - MBUF_TYPE_SOCKET = 3, /* socket structure */ - MBUF_TYPE_PCB = 4, /* protocol control block */ - MBUF_TYPE_RTABLE = 5, /* routing tables */ - MBUF_TYPE_HTABLE = 6, /* IMP host tables */ - MBUF_TYPE_ATABLE = 7, /* address resolution tables */ - MBUF_TYPE_SONAME = 8, /* socket name */ - MBUF_TYPE_SOOPTS = 10, /* socket options */ - MBUF_TYPE_FTABLE = 11, /* fragment reassembly header */ - MBUF_TYPE_RIGHTS = 12, /* access rights */ - MBUF_TYPE_IFADDR = 13, /* interface address */ - MBUF_TYPE_CONTROL = 14, /* extra-data protocol message */ - MBUF_TYPE_OOBDATA = 15 /* expedited data */ + MBUF_TYPE_FREE = 0, /* should be on free list */ + MBUF_TYPE_DATA = 1, /* dynamic (data) allocation */ + MBUF_TYPE_HEADER = 2, /* packet header */ + MBUF_TYPE_SOCKET = 3, /* socket structure */ + MBUF_TYPE_PCB = 4, /* protocol control block */ + MBUF_TYPE_RTABLE = 5, /* routing tables */ + MBUF_TYPE_HTABLE = 6, /* IMP host tables */ + MBUF_TYPE_ATABLE = 7, /* address resolution tables */ + MBUF_TYPE_SONAME = 8, /* socket name */ + MBUF_TYPE_SOOPTS = 10, /* socket options */ + MBUF_TYPE_FTABLE = 11, /* fragment reassembly header */ + MBUF_TYPE_RIGHTS = 12, /* access rights */ + MBUF_TYPE_IFADDR = 13, /* interface address */ + MBUF_TYPE_CONTROL = 14, /* extra-data protocol message */ + MBUF_TYPE_OOBDATA = 15 /* expedited data */ }; typedef u_int32_t mbuf_type_t; /*! - @enum mbuf_csum_request_flags_t - @abstract Checksum performed/requested flags. - @discussion Mbufs often contain packets. Some hardware supports - performing checksums in hardware. The stack uses these flags to - indicate to the driver what sort of checksumming should be - handled in by the driver/hardware. These flags will only be set - if the driver indicates that it supports the corresponding - checksums using ifnet_set_offload. - @constant MBUF_CSUM_REQ_IP Indicates the IP checksum has not been - calculated yet. - @constant MBUF_CSUM_REQ_TCP Indicates the TCP checksum has not been - calculated yet. - @constant MBUF_CSUM_REQ_UDP Indicates the UDP checksum has not been - calculated yet. - @constant MBUF_CSUM_REQ_TCPIPV6 Indicates the TCP checksum for IPv6 - has not been calculated yet. - @constant MBUF_CSUM_REQ_UDPIPV6 Indicates the UDP checksum for IPv6 - has not been calculated yet. -*/ + * @enum mbuf_csum_request_flags_t + * @abstract Checksum performed/requested flags. + * @discussion Mbufs often contain packets. Some hardware supports + * performing checksums in hardware. The stack uses these flags to + * indicate to the driver what sort of checksumming should be + * handled in by the driver/hardware. These flags will only be set + * if the driver indicates that it supports the corresponding + * checksums using ifnet_set_offload. + * @constant MBUF_CSUM_REQ_IP Indicates the IP checksum has not been + * calculated yet. + * @constant MBUF_CSUM_REQ_TCP Indicates the TCP checksum has not been + * calculated yet. + * @constant MBUF_CSUM_REQ_UDP Indicates the UDP checksum has not been + * calculated yet. + * @constant MBUF_CSUM_REQ_TCPIPV6 Indicates the TCP checksum for IPv6 + * has not been calculated yet. + * @constant MBUF_CSUM_REQ_UDPIPV6 Indicates the UDP checksum for IPv6 + * has not been calculated yet. + */ enum { - MBUF_TSO_IPV4 = 0x100000, - MBUF_TSO_IPV6 = 0x200000 + MBUF_TSO_IPV4 = 0x100000, + MBUF_TSO_IPV6 = 0x200000 }; typedef u_int32_t mbuf_tso_request_flags_t; enum { #ifdef KERNEL_PRIVATE - MBUF_CSUM_PARTIAL = 0x1000, /* 16-bit 1's complement sum */ - MBUF_CSUM_REQ_SUM16 = MBUF_CSUM_PARTIAL, + MBUF_CSUM_PARTIAL = 0x1000, /* 16-bit 1's complement sum */ + MBUF_CSUM_REQ_SUM16 = MBUF_CSUM_PARTIAL, MBUF_CSUM_REQ_ZERO_INVERT = 0x2000, #endif /* KERNEL_PRIVATE */ - MBUF_CSUM_REQ_IP = 0x0001, - MBUF_CSUM_REQ_TCP = 0x0002, - MBUF_CSUM_REQ_UDP = 0x0004, - MBUF_CSUM_REQ_TCPIPV6 = 0x0020, - MBUF_CSUM_REQ_UDPIPV6 = 0x0040 + MBUF_CSUM_REQ_IP = 0x0001, + MBUF_CSUM_REQ_TCP = 0x0002, + MBUF_CSUM_REQ_UDP = 0x0004, + MBUF_CSUM_REQ_TCPIPV6 = 0x0020, + MBUF_CSUM_REQ_UDPIPV6 = 0x0040 }; typedef u_int32_t mbuf_csum_request_flags_t; /*! - @enum mbuf_csum_performed_flags_t - @abstract Checksum performed/requested flags. - @discussion Mbufs often contain packets. Some hardware supports - performing checksums in hardware. The driver uses these flags to - communicate to the stack the checksums that were calculated in - hardware. - @constant MBUF_CSUM_DID_IP Indicates that the driver/hardware verified - the IP checksum in hardware. - @constant MBUF_CSUM_IP_GOOD Indicates whether or not the IP checksum - was good or bad. Only valid when MBUF_CSUM_DID_IP is set. - @constant MBUF_CSUM_DID_DATA Indicates that the TCP or UDP checksum - was calculated. The value for the checksum calculated in - hardware should be passed as the second parameter of - mbuf_set_csum_performed. The hardware calculated checksum value - can be retrieved using the second parameter passed to - mbuf_get_csum_performed. This should be done for IPv4 or IPv6. - @constant MBUF_CSUM_PSEUDO_HDR If set, this indicates that the - checksum value for MBUF_CSUM_DID_DATA includes the pseudo header - value. If this is not set, the stack will calculate the pseudo - header value and add that to the checksum. The value of this bit - is only valid when MBUF_CSUM_DID_DATA is set. -*/ + * @enum mbuf_csum_performed_flags_t + * @abstract Checksum performed/requested flags. + * @discussion Mbufs often contain packets. Some hardware supports + * performing checksums in hardware. The driver uses these flags to + * communicate to the stack the checksums that were calculated in + * hardware. + * @constant MBUF_CSUM_DID_IP Indicates that the driver/hardware verified + * the IP checksum in hardware. + * @constant MBUF_CSUM_IP_GOOD Indicates whether or not the IP checksum + * was good or bad. Only valid when MBUF_CSUM_DID_IP is set. + * @constant MBUF_CSUM_DID_DATA Indicates that the TCP or UDP checksum + * was calculated. The value for the checksum calculated in + * hardware should be passed as the second parameter of + * mbuf_set_csum_performed. The hardware calculated checksum value + * can be retrieved using the second parameter passed to + * mbuf_get_csum_performed. This should be done for IPv4 or IPv6. + * @constant MBUF_CSUM_PSEUDO_HDR If set, this indicates that the + * checksum value for MBUF_CSUM_DID_DATA includes the pseudo header + * value. If this is not set, the stack will calculate the pseudo + * header value and add that to the checksum. The value of this bit + * is only valid when MBUF_CSUM_DID_DATA is set. + */ enum { #ifdef KERNEL_PRIVATE - MBUF_CSUM_TCP_SUM16 = MBUF_CSUM_PARTIAL, + MBUF_CSUM_TCP_SUM16 = MBUF_CSUM_PARTIAL, #endif /* KERNEL_PRIVATE */ - MBUF_CSUM_DID_IP = 0x0100, - MBUF_CSUM_IP_GOOD = 0x0200, - MBUF_CSUM_DID_DATA = 0x0400, - MBUF_CSUM_PSEUDO_HDR = 0x0800 + MBUF_CSUM_DID_IP = 0x0100, + MBUF_CSUM_IP_GOOD = 0x0200, + MBUF_CSUM_DID_DATA = 0x0400, + MBUF_CSUM_PSEUDO_HDR = 0x0800 }; typedef u_int32_t mbuf_csum_performed_flags_t; /*! - @enum mbuf_how_t - @abstract Method of allocating an mbuf. - @discussion Blocking on the input or output path can impact - performance. There are some cases where making a blocking call - is acceptable. When in doubt, use MBUF_DONTWAIT. - @constant MBUF_WAITOK Allow a call to allocate an mbuf to block. - @constant MBUF_DONTWAIT Don't allow the mbuf allocation call to - block, if blocking is necessary fail and return immediately. -*/ + * @enum mbuf_how_t + * @abstract Method of allocating an mbuf. + * @discussion Blocking on the input or output path can impact + * performance. There are some cases where making a blocking call + * is acceptable. When in doubt, use MBUF_DONTWAIT. + * @constant MBUF_WAITOK Allow a call to allocate an mbuf to block. + * @constant MBUF_DONTWAIT Don't allow the mbuf allocation call to + * block, if blocking is necessary fail and return immediately. + */ enum { - MBUF_WAITOK = 0, /* Ok to block to get memory */ - MBUF_DONTWAIT = 1 /* Don't block, fail if blocking would be required */ + MBUF_WAITOK = 0, /* Ok to block to get memory */ + MBUF_DONTWAIT = 1 /* Don't block, fail if blocking would be required */ }; typedef u_int32_t mbuf_how_t; typedef u_int32_t mbuf_tag_id_t; -typedef u_int16_t mbuf_tag_type_t; - -/*! - @struct mbuf_stat - @discussion The mbuf_stat contains mbuf statistics. - @field mbufs Number of mbufs (free or otherwise). - @field clusters Number of clusters (free or otherwise). - @field clfree Number of free clusters. - @field drops Number of times allocation failed. - @field wait Number of times allocation blocked. - @field drain Number of times protocol drain functions were called. - @field mtypes An array of counts of each type of mbuf allocated. - @field mcfail Number of times m_copym failed. - @field mpfail Number of times m_pullup failed. - @field msize Length of an mbuf. - @field mclbytes Length of an mbuf cluster. - @field minclsize Minimum length of data to allocate a cluster. - Anything smaller than this should be placed in chained mbufs. - @field mlen Length of data in an mbuf. - @field mhlen Length of data in an mbuf with a packet header. - @field bigclusters Number of big clusters. - @field bigclfree Number of unused big clusters. - @field bigmclbytes Length of a big mbuf cluster. -*/ +typedef u_int16_t mbuf_tag_type_t; + +/*! + * @struct mbuf_stat + * @discussion The mbuf_stat contains mbuf statistics. + * @field mbufs Number of mbufs (free or otherwise). + * @field clusters Number of clusters (free or otherwise). + * @field clfree Number of free clusters. + * @field drops Number of times allocation failed. + * @field wait Number of times allocation blocked. + * @field drain Number of times protocol drain functions were called. + * @field mtypes An array of counts of each type of mbuf allocated. + * @field mcfail Number of times m_copym failed. + * @field mpfail Number of times m_pullup failed. + * @field msize Length of an mbuf. + * @field mclbytes Length of an mbuf cluster. + * @field minclsize Minimum length of data to allocate a cluster. + * Anything smaller than this should be placed in chained mbufs. + * @field mlen Length of data in an mbuf. + * @field mhlen Length of data in an mbuf with a packet header. + * @field bigclusters Number of big clusters. + * @field bigclfree Number of unused big clusters. + * @field bigmclbytes Length of a big mbuf cluster. + */ struct mbuf_stat { - u_int32_t mbufs; /* mbufs obtained from page pool */ - u_int32_t clusters; /* clusters obtained from page pool */ - u_int32_t clfree; /* free clusters */ - u_int32_t drops; /* times failed to find space */ - u_int32_t wait; /* times waited for space */ - u_int32_t drain; /* times drained protocols for space */ - u_short mtypes[256]; /* type specific mbuf allocations */ - u_int32_t mcfail; /* times m_copym failed */ - u_int32_t mpfail; /* times m_pullup failed */ - u_int32_t msize; /* length of an mbuf */ - u_int32_t mclbytes; /* length of an mbuf cluster */ - u_int32_t minclsize; /* min length of data to allocate a cluster */ - u_int32_t mlen; /* length of data in an mbuf */ - u_int32_t mhlen; /* length of data in a header mbuf */ - u_int32_t bigclusters; /* number of big clusters */ - u_int32_t bigclfree; /* number of big clustser free */ - u_int32_t bigmclbytes; /* length of data in a big cluster */ + u_int32_t mbufs; /* mbufs obtained from page pool */ + u_int32_t clusters; /* clusters obtained from page pool */ + u_int32_t clfree; /* free clusters */ + u_int32_t drops; /* times failed to find space */ + u_int32_t wait; /* times waited for space */ + u_int32_t drain; /* times drained protocols for space */ + u_short mtypes[256]; /* type specific mbuf allocations */ + u_int32_t mcfail; /* times m_copym failed */ + u_int32_t mpfail; /* times m_pullup failed */ + u_int32_t msize; /* length of an mbuf */ + u_int32_t mclbytes; /* length of an mbuf cluster */ + u_int32_t minclsize; /* min length of data to allocate a cluster */ + u_int32_t mlen; /* length of data in an mbuf */ + u_int32_t mhlen; /* length of data in a header mbuf */ + u_int32_t bigclusters; /* number of big clusters */ + u_int32_t bigclfree; /* number of big clustser free */ + u_int32_t bigmclbytes; /* length of data in a big cluster */ }; /* Parameter for m_copym to copy all bytes */ -#define MBUF_COPYALL 1000000000 +#define MBUF_COPYALL 1000000000 __BEGIN_DECLS /* Data access */ /*! - @function mbuf_data - @discussion Returns a pointer to the start of data in this mbuf. - There may be additional data on chained mbufs. The data you're - looking for may not be virtually contiguous if it spans more - than one mbuf. In addition, data that is virtually contiguous - might not be represented by physically contiguous pages; see - further comments in mbuf_data_to_physical. Use mbuf_len to - determine the length of data available in this mbuf. If a data - structure you want to access stradles two mbufs in a chain, - either use mbuf_pullup to get the data contiguous in one mbuf - or copy the pieces of data from each mbuf in to a contiguous - buffer. Using mbuf_pullup has the advantage of not having to - copy the data. On the other hand, if you don't make sure there - is space in the mbuf, mbuf_pullup may fail and free the mbuf. - @param mbuf The mbuf. - @result A pointer to the data in the mbuf. + * @function mbuf_data + * @discussion Returns a pointer to the start of data in this mbuf. + * There may be additional data on chained mbufs. The data you're + * looking for may not be virtually contiguous if it spans more + * than one mbuf. In addition, data that is virtually contiguous + * might not be represented by physically contiguous pages; see + * further comments in mbuf_data_to_physical. Use mbuf_len to + * determine the length of data available in this mbuf. If a data + * structure you want to access stradles two mbufs in a chain, + * either use mbuf_pullup to get the data contiguous in one mbuf + * or copy the pieces of data from each mbuf in to a contiguous + * buffer. Using mbuf_pullup has the advantage of not having to + * copy the data. On the other hand, if you don't make sure there + * is space in the mbuf, mbuf_pullup may fail and free the mbuf. + * @param mbuf The mbuf. + * @result A pointer to the data in the mbuf. */ extern void *mbuf_data(mbuf_t mbuf); /*! - @function mbuf_datastart - @discussion Returns the start of the space set aside for storing - data in an mbuf. An mbuf's data may come from a cluster or be - embedded in the mbuf structure itself. The data pointer - retrieved by mbuf_data may not be at the start of the data - (mbuf_leadingspace will be non-zero). This function will return - a pointer that matches mbuf_data() - mbuf_leadingspace(). - @param mbuf The mbuf. - @result A pointer to smallest possible value for data. + * @function mbuf_datastart + * @discussion Returns the start of the space set aside for storing + * data in an mbuf. An mbuf's data may come from a cluster or be + * embedded in the mbuf structure itself. The data pointer + * retrieved by mbuf_data may not be at the start of the data + * (mbuf_leadingspace will be non-zero). This function will return + * a pointer that matches mbuf_data() - mbuf_leadingspace(). + * @param mbuf The mbuf. + * @result A pointer to smallest possible value for data. */ extern void *mbuf_datastart(mbuf_t mbuf); /*! - @function mbuf_setdata - @discussion Sets the data and length values for an mbuf. The data - value must be in a valid range. In the case of an mbuf with a cluster, - the data value must point to a location in the cluster and the data - value plus the length, must be less than the end of the cluster. For - data embedded directly in an mbuf (no cluster), the data value must - fall somewhere between the start and end of the data area in the - mbuf and the data + length must also be in the same range. - @param mbuf The mbuf. - @param data The new pointer value for data. - @param len The new length of data in the mbuf. - @result 0 on success, errno error on failure. + * @function mbuf_setdata + * @discussion Sets the data and length values for an mbuf. The data + * value must be in a valid range. In the case of an mbuf with a cluster, + * the data value must point to a location in the cluster and the data + * value plus the length, must be less than the end of the cluster. For + * data embedded directly in an mbuf (no cluster), the data value must + * fall somewhere between the start and end of the data area in the + * mbuf and the data + length must also be in the same range. + * @param mbuf The mbuf. + * @param data The new pointer value for data. + * @param len The new length of data in the mbuf. + * @result 0 on success, errno error on failure. */ extern errno_t mbuf_setdata(mbuf_t mbuf, void *data, size_t len); /*! - @function mbuf_align_32 - @discussion mbuf_align_32 is a replacement for M_ALIGN and MH_ALIGN. - mbuf_align_32 will set the data pointer to a location aligned on - a four byte boundry with at least 'len' bytes between the data - pointer and the end of the data block. - @param mbuf The mbuf. - @param len The minimum length of space that should follow the new - data location. - @result 0 on success, errno error on failure. + * @function mbuf_align_32 + * @discussion mbuf_align_32 is a replacement for M_ALIGN and MH_ALIGN. + * mbuf_align_32 will set the data pointer to a location aligned on + * a four byte boundry with at least 'len' bytes between the data + * pointer and the end of the data block. + * @param mbuf The mbuf. + * @param len The minimum length of space that should follow the new + * data location. + * @result 0 on success, errno error on failure. */ extern errno_t mbuf_align_32(mbuf_t mbuf, size_t len); /*! - @function mbuf_data_to_physical - @discussion mbuf_data_to_physical is a replacement for mcl_to_paddr. - Given a pointer returned from mbuf_data of mbuf_datastart, - mbuf_data_to_physical will return the phyical address for that - block of data. Note that even though the data is in virtually - contiguous span, the underlying physical pages might not be - physically contiguous. Because of this, callers must ensure - to call this routine for each page boundary. Device drivers - that deal with DMA are strongly encouraged to utilize the - IOMbufNaturalMemoryCursor and walk down the list of vectors - instead of using this interface to obtain the physical address. - Use of this routine is therefore discouraged. - @param ptr A pointer to data stored in an mbuf. - @result The 64 bit physical address of the mbuf data or NULL if ptr - does not point to data stored in an mbuf. + * @function mbuf_data_to_physical + * @discussion mbuf_data_to_physical is a replacement for mcl_to_paddr. + * Given a pointer returned from mbuf_data of mbuf_datastart, + * mbuf_data_to_physical will return the phyical address for that + * block of data. Note that even though the data is in virtually + * contiguous span, the underlying physical pages might not be + * physically contiguous. Because of this, callers must ensure + * to call this routine for each page boundary. Device drivers + * that deal with DMA are strongly encouraged to utilize the + * IOMbufNaturalMemoryCursor and walk down the list of vectors + * instead of using this interface to obtain the physical address. + * Use of this routine is therefore discouraged. + * @param ptr A pointer to data stored in an mbuf. + * @result The 64 bit physical address of the mbuf data or NULL if ptr + * does not point to data stored in an mbuf. */ extern addr64_t mbuf_data_to_physical(void *ptr); @@ -361,90 +361,90 @@ extern addr64_t mbuf_data_to_physical(void *ptr); /* Allocation */ /*! - @function mbuf_get - @discussion Allocates an mbuf without a cluster for external data. - @param how Blocking or non-blocking. - @param type The type of the mbuf. - @param mbuf The mbuf. - @result 0 on success, errno error on failure. + * @function mbuf_get + * @discussion Allocates an mbuf without a cluster for external data. + * @param how Blocking or non-blocking. + * @param type The type of the mbuf. + * @param mbuf The mbuf. + * @result 0 on success, errno error on failure. */ extern errno_t mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf); /*! - @function mbuf_gethdr - @discussion Allocates an mbuf without a cluster for external data. - Sets a flag to indicate there is a packet header and initializes - the packet header. - @param how Blocking or non-blocking. - @param type The type of the mbuf. - @param mbuf The mbuf. - @result 0 on success, errno error on failure. + * @function mbuf_gethdr + * @discussion Allocates an mbuf without a cluster for external data. + * Sets a flag to indicate there is a packet header and initializes + * the packet header. + * @param how Blocking or non-blocking. + * @param type The type of the mbuf. + * @param mbuf The mbuf. + * @result 0 on success, errno error on failure. */ extern errno_t mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf); /*! - @function mbuf_attachcluster - @discussion Attach an external buffer as a cluster for an mbuf. If mbuf - points to a NULL mbuf_t, an mbuf will be allocated for you. If - mbuf points to a non-NULL mbuf_t, the user-supplied mbuf will - be used instead. The caller is responsible for allocating the - external buffer by calling mbuf_alloccluster(). - @param how Blocking or non-blocking. - @param type The type of the mbuf if mbuf is non-NULL; otherwise ignored. - @param mbuf Pointer to the address of the mbuf; if NULL, an mbuf will - be allocated, otherwise, it must point to a valid mbuf address. - If the user-supplied mbuf is already attached to a cluster, the - current cluster will be freed before the mbuf gets attached to - the supplied external buffer. Note that this routine may return - a different mbuf_t than the one you passed in. - @param extbuf Address of the external buffer. - @param extfree Free routine for the external buffer; the caller is - required to defined a routine that will be invoked when the - mbuf is freed. - @param extsize Size of the external buffer. - @param extarg Private value that will be passed to the free routine - when it is called at the time the mbuf is freed. - @result 0 on success - EINVAL - Invalid parameter - ENOMEM - Not enough memory available + * @function mbuf_attachcluster + * @discussion Attach an external buffer as a cluster for an mbuf. If mbuf + * points to a NULL mbuf_t, an mbuf will be allocated for you. If + * mbuf points to a non-NULL mbuf_t, the user-supplied mbuf will + * be used instead. The caller is responsible for allocating the + * external buffer by calling mbuf_alloccluster(). + * @param how Blocking or non-blocking. + * @param type The type of the mbuf if mbuf is non-NULL; otherwise ignored. + * @param mbuf Pointer to the address of the mbuf; if NULL, an mbuf will + * be allocated, otherwise, it must point to a valid mbuf address. + * If the user-supplied mbuf is already attached to a cluster, the + * current cluster will be freed before the mbuf gets attached to + * the supplied external buffer. Note that this routine may return + * a different mbuf_t than the one you passed in. + * @param extbuf Address of the external buffer. + * @param extfree Free routine for the external buffer; the caller is + * required to defined a routine that will be invoked when the + * mbuf is freed. + * @param extsize Size of the external buffer. + * @param extarg Private value that will be passed to the free routine + * when it is called at the time the mbuf is freed. + * @result 0 on success + * EINVAL - Invalid parameter + * ENOMEM - Not enough memory available */ extern errno_t mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, - mbuf_t *mbuf, caddr_t extbuf, void (*extfree)(caddr_t , u_int, caddr_t), + mbuf_t *mbuf, caddr_t extbuf, void (*extfree)(caddr_t, u_int, caddr_t), size_t extsize, caddr_t extarg); /*! - @function mbuf_alloccluster - @discussion Allocate a cluster that can be later attached to an - mbuf by calling mbuf_attachcluster(). The allocated cluster - can also be freed (without being attached to an mbuf) by - calling mbuf_freecluster(). At the moment this routine - will either return a cluster of 2048, 4096 or 16384 bytes - depending on the requested size. Note that clusters greater - than 4096 bytes might not be available in all configurations; - the caller must additionally check for ENOTSUP (see below). - @param how Blocking or non-blocking. - @param size Pointer to size of requested cluster. Sizes up to 2048 - will be rounded up to 2048; sizes greater than 2048 and up - to 4096 will be rounded up to 4096. Sizes greater than 4096 - will be rounded up to 16384. - @param addr Pointer to the address of the requested cluster. - @result 0 on success or ENOMEM if failure. If the caller requests - greater than 4096 bytes and the system is unable to fulfill - the request due to the lack of jumbo clusters support based - on the configuration, this routine will return ENOTSUP. - In this case, the caller is advised to use 4096 bytes or - smaller during subseqent requests. + * @function mbuf_alloccluster + * @discussion Allocate a cluster that can be later attached to an + * mbuf by calling mbuf_attachcluster(). The allocated cluster + * can also be freed (without being attached to an mbuf) by + * calling mbuf_freecluster(). At the moment this routine + * will either return a cluster of 2048, 4096 or 16384 bytes + * depending on the requested size. Note that clusters greater + * than 4096 bytes might not be available in all configurations; + * the caller must additionally check for ENOTSUP (see below). + * @param how Blocking or non-blocking. + * @param size Pointer to size of requested cluster. Sizes up to 2048 + * will be rounded up to 2048; sizes greater than 2048 and up + * to 4096 will be rounded up to 4096. Sizes greater than 4096 + * will be rounded up to 16384. + * @param addr Pointer to the address of the requested cluster. + * @result 0 on success or ENOMEM if failure. If the caller requests + * greater than 4096 bytes and the system is unable to fulfill + * the request due to the lack of jumbo clusters support based + * on the configuration, this routine will return ENOTSUP. + * In this case, the caller is advised to use 4096 bytes or + * smaller during subseqent requests. */ extern errno_t mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr); /*! - @function mbuf_freecluster - @discussion Free a cluster that was previously allocated by a call - to mbuf_alloccluster(). The caller must pass the actual - size of the cluster as returned by mbuf_alloccluster(), - which at this point must be either 2048, 4096 or 16384 bytes. - @param addr The address of the cluster. - @param size The actual size of the cluster. + * @function mbuf_freecluster + * @discussion Free a cluster that was previously allocated by a call + * to mbuf_alloccluster(). The caller must pass the actual + * size of the cluster as returned by mbuf_alloccluster(), + * which at this point must be either 2048, 4096 or 16384 bytes. + * @param addr The address of the cluster. + * @param size The actual size of the cluster. */ extern void mbuf_freecluster(caddr_t addr, size_t size); @@ -463,370 +463,370 @@ extern errno_t mbuf_cluster_get_prop(mbuf_t mbuf, u_int32_t *prop); #endif /* BSD_KERNEL_PRIVATE */ /*! - @function mbuf_getcluster - @discussion Allocate a cluster of the requested size and attach it to - an mbuf for use as external data. If mbuf points to a NULL - mbuf_t, an mbuf will be allocated for you. If mbuf points to - a non-NULL mbuf_t, mbuf_getcluster may return a different - mbuf_t than the one you passed in. - @param how Blocking or non-blocking. - @param type The type of the mbuf. - @param size The size of the cluster to be allocated. Supported sizes - for a cluster are be 2048, 4096, or 16384. Any other value - with return EINVAL. Note that clusters greater than 4096 - bytes might not be available in all configurations; the - caller must additionally check for ENOTSUP (see below). - @param mbuf The mbuf the cluster will be attached to. - @result 0 on success, errno error on failure. If you specified NULL - for the mbuf, any intermediate mbuf that may have been allocated - will be freed. If you specify an mbuf value in *mbuf, - mbuf_mclget will not free it. - EINVAL - Invalid parameter - ENOMEM - Not enough memory available - ENOTSUP - The caller had requested greater than 4096 bytes - cluster and the system is unable to fulfill it due to the - lack of jumbo clusters support based on the configuration. - In this case, the caller is advised to use 4096 bytes or - smaller during subsequent requests. + * @function mbuf_getcluster + * @discussion Allocate a cluster of the requested size and attach it to + * an mbuf for use as external data. If mbuf points to a NULL + * mbuf_t, an mbuf will be allocated for you. If mbuf points to + * a non-NULL mbuf_t, mbuf_getcluster may return a different + * mbuf_t than the one you passed in. + * @param how Blocking or non-blocking. + * @param type The type of the mbuf. + * @param size The size of the cluster to be allocated. Supported sizes + * for a cluster are be 2048, 4096, or 16384. Any other value + * with return EINVAL. Note that clusters greater than 4096 + * bytes might not be available in all configurations; the + * caller must additionally check for ENOTSUP (see below). + * @param mbuf The mbuf the cluster will be attached to. + * @result 0 on success, errno error on failure. If you specified NULL + * for the mbuf, any intermediate mbuf that may have been allocated + * will be freed. If you specify an mbuf value in *mbuf, + * mbuf_mclget will not free it. + * EINVAL - Invalid parameter + * ENOMEM - Not enough memory available + * ENOTSUP - The caller had requested greater than 4096 bytes + * cluster and the system is unable to fulfill it due to the + * lack of jumbo clusters support based on the configuration. + * In this case, the caller is advised to use 4096 bytes or + * smaller during subsequent requests. */ extern errno_t mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t *mbuf); /*! - @function mbuf_mclget - @discussion Allocate a cluster and attach it to an mbuf for use as - external data. If mbuf points to a NULL mbuf_t, an mbuf will be - allocated for you. If mbuf points to a non-NULL mbuf_t, - mbuf_mclget may return a different mbuf_t than the one you - passed in. - @param how Blocking or non-blocking. - @param type The type of the mbuf. - @param mbuf The mbuf the cluster will be attached to. - @result 0 on success, errno error on failure. If you specified NULL - for the mbuf, any intermediate mbuf that may have been allocated - will be freed. If you specify an mbuf value in *mbuf, - mbuf_mclget will not free it. + * @function mbuf_mclget + * @discussion Allocate a cluster and attach it to an mbuf for use as + * external data. If mbuf points to a NULL mbuf_t, an mbuf will be + * allocated for you. If mbuf points to a non-NULL mbuf_t, + * mbuf_mclget may return a different mbuf_t than the one you + * passed in. + * @param how Blocking or non-blocking. + * @param type The type of the mbuf. + * @param mbuf The mbuf the cluster will be attached to. + * @result 0 on success, errno error on failure. If you specified NULL + * for the mbuf, any intermediate mbuf that may have been allocated + * will be freed. If you specify an mbuf value in *mbuf, + * mbuf_mclget will not free it. */ extern errno_t mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf); /*! - @function mbuf_allocpacket - @discussion Allocate an mbuf chain to store a single packet of the - requested length. According to the requested length, a chain - of mbufs will be created. The mbuf type will be set to - MBUF_TYPE_DATA. The caller may specify the maximum number of - buffer. - @param how Blocking or non-blocking - @param packetlen The total length of the packet mbuf to be allocated. - The length must be greater than zero. - @param maxchunks An input/output pointer to the maximum number of mbufs - segments making up the chain. On input, if maxchunks is NULL, - or the value pointed to by maxchunks is zero, the packet will - be made up of as few or as many buffer segments as necessary - to fit the length. The allocation will fail with ENOBUFS if - the number of segments requested is too small and the sum of - the maximum size of each individual segment is less than the - packet length. On output, if the allocation succeed and - maxchunks is non-NULL, it will point to the actual number - of segments allocated. - Additional notes for packetlen greater than 4096 bytes: - the caller may pass a non-NULL maxchunks and initialize it - with zero such that upon success, it can find out whether - or not the system configuration allows for larger than - 4096 bytes cluster allocations, by checking on the value - pointed to by maxchunks. E.g. a request for 9018 bytes may - result in 1 chunk when jumbo clusters are available, or - 3 chunks otherwise. - @param mbuf Upon success, *mbuf will be a reference to the new mbuf. - @result Returns 0 upon success or the following error code: - EINVAL - Invalid parameter - ENOMEM - Not enough memory available - ENOBUFS - Buffers not big enough for the maximum number of - chunks requested -*/ + * @function mbuf_allocpacket + * @discussion Allocate an mbuf chain to store a single packet of the + * requested length. According to the requested length, a chain + * of mbufs will be created. The mbuf type will be set to + * MBUF_TYPE_DATA. The caller may specify the maximum number of + * buffer. + * @param how Blocking or non-blocking + * @param packetlen The total length of the packet mbuf to be allocated. + * The length must be greater than zero. + * @param maxchunks An input/output pointer to the maximum number of mbufs + * segments making up the chain. On input, if maxchunks is NULL, + * or the value pointed to by maxchunks is zero, the packet will + * be made up of as few or as many buffer segments as necessary + * to fit the length. The allocation will fail with ENOBUFS if + * the number of segments requested is too small and the sum of + * the maximum size of each individual segment is less than the + * packet length. On output, if the allocation succeed and + * maxchunks is non-NULL, it will point to the actual number + * of segments allocated. + * Additional notes for packetlen greater than 4096 bytes: + * the caller may pass a non-NULL maxchunks and initialize it + * with zero such that upon success, it can find out whether + * or not the system configuration allows for larger than + * 4096 bytes cluster allocations, by checking on the value + * pointed to by maxchunks. E.g. a request for 9018 bytes may + * result in 1 chunk when jumbo clusters are available, or + * 3 chunks otherwise. + * @param mbuf Upon success, *mbuf will be a reference to the new mbuf. + * @result Returns 0 upon success or the following error code: + * EINVAL - Invalid parameter + * ENOMEM - Not enough memory available + * ENOBUFS - Buffers not big enough for the maximum number of + * chunks requested + */ extern errno_t mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int * maxchunks, mbuf_t *mbuf); /*! - @function mbuf_allocpacket_list - @discussion Allocate a linked list of packets. According to the - requested length, each packet will made of a chain of one - or more mbufs. The mbuf type will be set to MBUF_TYPE_DATA. - The caller may specify the maximum number of element for - each mbuf chain making up a packet. - @param numpkts Number of packets to allocate - @param how Blocking or non-blocking - @param packetlen The total length of the packet mbuf to be allocated. - The length must be greater than zero. - @param maxchunks An input/output pointer to the maximum number of - mbufs segments making up the chain. On input, if maxchunks is - zero, or the value pointed to by maxchunks is zero, the packet - will be made of as few or as many buffer segments as necessary - to fit the length. The allocation will fail with ENOBUFS if - the number of segments requested is too small and the sum of - the maximum size of each individual segment is less than the - packet length. On output, if the allocation succeed and - maxchunks is non zero, it will point to the actual number - of segments allocated. - Additional notes for packetlen greater than 4096 bytes: - the caller may pass a non-NULL maxchunks and initialize it - with zero such that upon success, it can find out whether - or not the system configuration allows for larger than - 4096 bytes cluster allocations, by checking on the value - pointed to by maxchunks. E.g. a request for 9018 bytes may - result in 1 chunk when jumbo clusters are available, or - 3 chunks otherwise. - @param mbuf Upon success, *mbuf will be a reference to the new mbuf. - @result Returns 0 upon success or the following error code: - EINVAL - Invalid parameter - ENOMEM - Not enough memory available - ENOBUFS - Buffers not big enough for the maximum number of - chunks requested -*/ + * @function mbuf_allocpacket_list + * @discussion Allocate a linked list of packets. According to the + * requested length, each packet will made of a chain of one + * or more mbufs. The mbuf type will be set to MBUF_TYPE_DATA. + * The caller may specify the maximum number of element for + * each mbuf chain making up a packet. + * @param numpkts Number of packets to allocate + * @param how Blocking or non-blocking + * @param packetlen The total length of the packet mbuf to be allocated. + * The length must be greater than zero. + * @param maxchunks An input/output pointer to the maximum number of + * mbufs segments making up the chain. On input, if maxchunks is + * zero, or the value pointed to by maxchunks is zero, the packet + * will be made of as few or as many buffer segments as necessary + * to fit the length. The allocation will fail with ENOBUFS if + * the number of segments requested is too small and the sum of + * the maximum size of each individual segment is less than the + * packet length. On output, if the allocation succeed and + * maxchunks is non zero, it will point to the actual number + * of segments allocated. + * Additional notes for packetlen greater than 4096 bytes: + * the caller may pass a non-NULL maxchunks and initialize it + * with zero such that upon success, it can find out whether + * or not the system configuration allows for larger than + * 4096 bytes cluster allocations, by checking on the value + * pointed to by maxchunks. E.g. a request for 9018 bytes may + * result in 1 chunk when jumbo clusters are available, or + * 3 chunks otherwise. + * @param mbuf Upon success, *mbuf will be a reference to the new mbuf. + * @result Returns 0 upon success or the following error code: + * EINVAL - Invalid parameter + * ENOMEM - Not enough memory available + * ENOBUFS - Buffers not big enough for the maximum number of + * chunks requested + */ extern errno_t mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen, unsigned int * maxchunks, mbuf_t *mbuf); /*! - @function mbuf_getpacket - @discussion Allocate an mbuf, allocate and attach a cluster, and set - the packet header flag. - @param how Blocking or non-blocking. - @param mbuf Upon success, *mbuf will be a reference to the new mbuf. - @result 0 on success, errno error on failure. + * @function mbuf_getpacket + * @discussion Allocate an mbuf, allocate and attach a cluster, and set + * the packet header flag. + * @param how Blocking or non-blocking. + * @param mbuf Upon success, *mbuf will be a reference to the new mbuf. + * @result 0 on success, errno error on failure. */ extern errno_t mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf); /*! - @function mbuf_free - @discussion Frees a single mbuf. Not commonly used because it - doesn't touch the rest of the mbufs on the chain. - @param mbuf The mbuf to free. - @result The next mbuf in the chain. + * @function mbuf_free + * @discussion Frees a single mbuf. Not commonly used because it + * doesn't touch the rest of the mbufs on the chain. + * @param mbuf The mbuf to free. + * @result The next mbuf in the chain. */ extern mbuf_t mbuf_free(mbuf_t mbuf); /*! - @function mbuf_freem - @discussion Frees a chain of mbufs link through mnext. - @param mbuf The first mbuf in the chain to free. + * @function mbuf_freem + * @discussion Frees a chain of mbufs link through mnext. + * @param mbuf The first mbuf in the chain to free. */ extern void mbuf_freem(mbuf_t mbuf); /*! - @function mbuf_freem_list - @discussion Frees linked list of mbuf chains. Walks through - mnextpackt and does the equivalent of mbuf_freem to each. - @param mbuf The first mbuf in the linked list to free. - @result The number of mbufs freed. + * @function mbuf_freem_list + * @discussion Frees linked list of mbuf chains. Walks through + * mnextpackt and does the equivalent of mbuf_freem to each. + * @param mbuf The first mbuf in the linked list to free. + * @result The number of mbufs freed. */ extern int mbuf_freem_list(mbuf_t mbuf); /*! - @function mbuf_leadingspace - @discussion Determines the space available in the mbuf proceeding - the current data. - @param mbuf The mbuf. - @result The number of unused bytes at the start of the mbuf. + * @function mbuf_leadingspace + * @discussion Determines the space available in the mbuf proceeding + * the current data. + * @param mbuf The mbuf. + * @result The number of unused bytes at the start of the mbuf. */ extern size_t mbuf_leadingspace(const mbuf_t mbuf); /*! - @function mbuf_trailingspace - @discussion Determines the space available in the mbuf following - the current data. - @param mbuf The mbuf. - @result The number of unused bytes following the current data. + * @function mbuf_trailingspace + * @discussion Determines the space available in the mbuf following + * the current data. + * @param mbuf The mbuf. + * @result The number of unused bytes following the current data. */ extern size_t mbuf_trailingspace(const mbuf_t mbuf); /* Manipulation */ /*! - @function mbuf_copym - @discussion Copies len bytes from offset from src to a new mbuf. If - the original mbuf contains a packet header, the new mbuf will - contain similar packet header except for any tags which may be - associated with the original mbuf. mbuf_dup() should be used - instead if tags are to be copied to the new mbuf. - @param src The source mbuf. - @param offset The offset in the mbuf to start copying from. - @param len The the number of bytes to copy. - @param how To block or not to block, that is a question. - @param new_mbuf Upon success, the newly allocated mbuf. - @result 0 upon success otherwise the errno error. + * @function mbuf_copym + * @discussion Copies len bytes from offset from src to a new mbuf. If + * the original mbuf contains a packet header, the new mbuf will + * contain similar packet header except for any tags which may be + * associated with the original mbuf. mbuf_dup() should be used + * instead if tags are to be copied to the new mbuf. + * @param src The source mbuf. + * @param offset The offset in the mbuf to start copying from. + * @param len The the number of bytes to copy. + * @param how To block or not to block, that is a question. + * @param new_mbuf Upon success, the newly allocated mbuf. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_copym(const mbuf_t src, size_t offset, size_t len, mbuf_how_t how, mbuf_t *new_mbuf); /*! - @function mbuf_dup - @discussion Exactly duplicates an mbuf chain. If the original mbuf - contains a packet header (including tags), the new mbuf will have - the same packet header contents and a copy of each tag associated - with the original mbuf. - @param src The source mbuf. - @param how Blocking or non-blocking. - @param new_mbuf Upon success, the newly allocated mbuf. - @result 0 upon success otherwise the errno error. + * @function mbuf_dup + * @discussion Exactly duplicates an mbuf chain. If the original mbuf + * contains a packet header (including tags), the new mbuf will have + * the same packet header contents and a copy of each tag associated + * with the original mbuf. + * @param src The source mbuf. + * @param how Blocking or non-blocking. + * @param new_mbuf Upon success, the newly allocated mbuf. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf); /*! - @function mbuf_prepend - @discussion Prepend len bytes to an mbuf. If there is space - (mbuf_leadingspace >= len), the mbuf's data ptr is changed and - the same mbuf is returned. If there is no space, a new mbuf may - be allocated and prepended to the mbuf chain. If the operation - fails, the mbuf may be freed (*mbuf will be NULL). - @param mbuf The mbuf to prepend data to. This may change if a new - mbuf must be allocated or may be NULL if the operation fails. - @param len The length, in bytes, to be prepended to the mbuf. - @param how Blocking or non-blocking. - @result 0 upon success otherwise the errno error. + * @function mbuf_prepend + * @discussion Prepend len bytes to an mbuf. If there is space + * (mbuf_leadingspace >= len), the mbuf's data ptr is changed and + * the same mbuf is returned. If there is no space, a new mbuf may + * be allocated and prepended to the mbuf chain. If the operation + * fails, the mbuf may be freed (*mbuf will be NULL). + * @param mbuf The mbuf to prepend data to. This may change if a new + * mbuf must be allocated or may be NULL if the operation fails. + * @param len The length, in bytes, to be prepended to the mbuf. + * @param how Blocking or non-blocking. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_prepend(mbuf_t *mbuf, size_t len, mbuf_how_t how); /*! - @function mbuf_split - @discussion Split an mbuf chain at a specific offset. - @param src The mbuf to be split. - @param offset The offset in the buffer where the mbuf should be - split. - @param how Blocking or non-blocking. - @param new_mbuf Upon success, the second half of the split mbuf - chain. - @result 0 upon success otherwise the errno error. In the case of - failure, the original mbuf chain passed in to src will be - preserved. + * @function mbuf_split + * @discussion Split an mbuf chain at a specific offset. + * @param src The mbuf to be split. + * @param offset The offset in the buffer where the mbuf should be + * split. + * @param how Blocking or non-blocking. + * @param new_mbuf Upon success, the second half of the split mbuf + * chain. + * @result 0 upon success otherwise the errno error. In the case of + * failure, the original mbuf chain passed in to src will be + * preserved. */ extern errno_t mbuf_split(mbuf_t src, size_t offset, mbuf_how_t how, mbuf_t *new_mbuf); /*! - @function mbuf_pullup - @discussion Move the next len bytes in to mbuf from other mbufs in - the chain. This is commonly used to get the IP and TCP or UDP - header contiguous in the first mbuf. If mbuf_pullup fails, the - entire mbuf chain will be freed. - @param mbuf The mbuf in the chain the data should be contiguous in. - @param len The number of bytes to pull from the next mbuf(s). - @result 0 upon success otherwise the errno error. In the case of an - error, the mbuf chain has been freed. + * @function mbuf_pullup + * @discussion Move the next len bytes in to mbuf from other mbufs in + * the chain. This is commonly used to get the IP and TCP or UDP + * header contiguous in the first mbuf. If mbuf_pullup fails, the + * entire mbuf chain will be freed. + * @param mbuf The mbuf in the chain the data should be contiguous in. + * @param len The number of bytes to pull from the next mbuf(s). + * @result 0 upon success otherwise the errno error. In the case of an + * error, the mbuf chain has been freed. */ extern errno_t mbuf_pullup(mbuf_t *mbuf, size_t len); /*! - @function mbuf_pulldown - @discussion Make length bytes at offset in the mbuf chain - contiguous. Nothing before offset bytes in the chain will be - modified. Upon return, location will be the mbuf the data is - contiguous in and offset will be the offset in that mbuf at - which the data is located. In the case of a failure, the mbuf - chain will be freed. - @param src The start of the mbuf chain. - @param offset Pass in a pointer to a value with the offset of the - data you're interested in making contiguous. Upon success, this - will be overwritten with the offset from the mbuf returned in - location. - @param length The length of data that should be made contiguous. - @param location Upon success, *location will be the mbuf the data is - in. - @result 0 upon success otherwise the errno error. + * @function mbuf_pulldown + * @discussion Make length bytes at offset in the mbuf chain + * contiguous. Nothing before offset bytes in the chain will be + * modified. Upon return, location will be the mbuf the data is + * contiguous in and offset will be the offset in that mbuf at + * which the data is located. In the case of a failure, the mbuf + * chain will be freed. + * @param src The start of the mbuf chain. + * @param offset Pass in a pointer to a value with the offset of the + * data you're interested in making contiguous. Upon success, this + * will be overwritten with the offset from the mbuf returned in + * location. + * @param length The length of data that should be made contiguous. + * @param location Upon success, *location will be the mbuf the data is + * in. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_pulldown(mbuf_t src, size_t *offset, size_t length, mbuf_t *location); /*! - @function mbuf_adj - @discussion Trims len bytes from the mbuf. If the length is greater - than zero, the bytes are trimmed from the front of the mbuf. If - the length is less than zero, the bytes are trimmed from the end - of the mbuf chain. - @param mbuf The mbuf chain to trim. - @param len The number of bytes to trim from the mbuf chain. + * @function mbuf_adj + * @discussion Trims len bytes from the mbuf. If the length is greater + * than zero, the bytes are trimmed from the front of the mbuf. If + * the length is less than zero, the bytes are trimmed from the end + * of the mbuf chain. + * @param mbuf The mbuf chain to trim. + * @param len The number of bytes to trim from the mbuf chain. */ extern void mbuf_adj(mbuf_t mbuf, int len); /*! - @function mbuf_adjustlen - @discussion Adds amount to the mbuf len. Verifies that the new - length is valid (greater than or equal to zero and less than - maximum amount of data that may be stored in the mbuf). This - function will not adjust the packet header length field or - affect any other mbufs in a chain. - @param mbuf The mbuf to adjust. - @param amount The number of bytes increment the length by. - @result 0 upon success otherwise the errno error. + * @function mbuf_adjustlen + * @discussion Adds amount to the mbuf len. Verifies that the new + * length is valid (greater than or equal to zero and less than + * maximum amount of data that may be stored in the mbuf). This + * function will not adjust the packet header length field or + * affect any other mbufs in a chain. + * @param mbuf The mbuf to adjust. + * @param amount The number of bytes increment the length by. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_adjustlen(mbuf_t mbuf, int amount); /*! - @function mbuf_concatenate - @discussion Concatenate mbuf chain src to dst using m_next and return - a chain which represents the concatenated chain. The routine - does not prevent two chains of different mbuf types to be - concatenated, nor does it modify any packet header in the - destination chain. Therefore, it's the responsibility of the - caller to ensure that the resulted concatenated mbuf chain is - correct for further usages. - @param dst The destination mbuf chain. - @param src The source mbuf chain. - @result A pointer to the head of the concatenated mbuf chain. This - should be treated as the updated destination mbuf chain; the - caller must no longer refer to the original src or dst mbuf - chain. Otherwise it returns NULL if the original dst mbuf - chain is NULL. + * @function mbuf_concatenate + * @discussion Concatenate mbuf chain src to dst using m_next and return + * a chain which represents the concatenated chain. The routine + * does not prevent two chains of different mbuf types to be + * concatenated, nor does it modify any packet header in the + * destination chain. Therefore, it's the responsibility of the + * caller to ensure that the resulted concatenated mbuf chain is + * correct for further usages. + * @param dst The destination mbuf chain. + * @param src The source mbuf chain. + * @result A pointer to the head of the concatenated mbuf chain. This + * should be treated as the updated destination mbuf chain; the + * caller must no longer refer to the original src or dst mbuf + * chain. Otherwise it returns NULL if the original dst mbuf + * chain is NULL. */ extern mbuf_t mbuf_concatenate(mbuf_t dst, mbuf_t src); /*! - @function mbuf_copydata - @discussion Copies data out of an mbuf in to a specified buffer. If - the data is stored in a chain of mbufs, the data will be copied - from each mbuf in the chain until length bytes have been copied. - @param mbuf The mbuf chain to copy data out of. - @param offset The offset in to the mbuf to start copying. - @param length The number of bytes to copy. - @param out_data A pointer to the location where the data will be - copied. - @result 0 upon success otherwise the errno error. + * @function mbuf_copydata + * @discussion Copies data out of an mbuf in to a specified buffer. If + * the data is stored in a chain of mbufs, the data will be copied + * from each mbuf in the chain until length bytes have been copied. + * @param mbuf The mbuf chain to copy data out of. + * @param offset The offset in to the mbuf to start copying. + * @param length The number of bytes to copy. + * @param out_data A pointer to the location where the data will be + * copied. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_copydata(const mbuf_t mbuf, size_t offset, size_t length, void *out_data); /*! - @function mbuf_copyback - @discussion Copies data from a buffer to an mbuf chain. - mbuf_copyback will grow the chain to fit the specified buffer. - - If mbuf_copydata is unable to allocate enough mbufs to grow the - chain, ENOBUFS will be returned. The mbuf chain will be shorter - than expected but all of the data up to the end of the mbuf - chain will be valid. - - If an offset is specified, mbuf_copyback will skip that many - bytes in the mbuf chain before starting to write the buffer in - to the chain. If the mbuf chain does not contain this many - bytes, mbufs will be allocated to create the space. - @param mbuf The first mbuf in the chain to copy the data in to. - @param offset Offset in bytes to skip before copying data. - @param length The length, in bytes, of the data to copy in to the mbuf - chain. - @param data A pointer to data in the kernel's address space. - @param how Blocking or non-blocking. - @result 0 upon success, EINVAL or ENOBUFS upon failure. + * @function mbuf_copyback + * @discussion Copies data from a buffer to an mbuf chain. + * mbuf_copyback will grow the chain to fit the specified buffer. + * + * If mbuf_copydata is unable to allocate enough mbufs to grow the + * chain, ENOBUFS will be returned. The mbuf chain will be shorter + * than expected but all of the data up to the end of the mbuf + * chain will be valid. + * + * If an offset is specified, mbuf_copyback will skip that many + * bytes in the mbuf chain before starting to write the buffer in + * to the chain. If the mbuf chain does not contain this many + * bytes, mbufs will be allocated to create the space. + * @param mbuf The first mbuf in the chain to copy the data in to. + * @param offset Offset in bytes to skip before copying data. + * @param length The length, in bytes, of the data to copy in to the mbuf + * chain. + * @param data A pointer to data in the kernel's address space. + * @param how Blocking or non-blocking. + * @result 0 upon success, EINVAL or ENOBUFS upon failure. */ extern errno_t mbuf_copyback(mbuf_t mbuf, size_t offset, size_t length, const void *data, mbuf_how_t how); /*! - @function mbuf_mclhasreference - @discussion Check if a cluster of an mbuf is referenced by another mbuf. - References may be taken, for example, as a result of a call to - mbuf_split or mbuf_copym - @param mbuf The mbuf with the cluster to test. - @result 0 if there is no reference by another mbuf, 1 otherwise. + * @function mbuf_mclhasreference + * @discussion Check if a cluster of an mbuf is referenced by another mbuf. + * References may be taken, for example, as a result of a call to + * mbuf_split or mbuf_copym + * @param mbuf The mbuf with the cluster to test. + * @result 0 if there is no reference by another mbuf, 1 otherwise. */ extern int mbuf_mclhasreference(mbuf_t mbuf); @@ -834,463 +834,463 @@ extern int mbuf_mclhasreference(mbuf_t mbuf); /* mbuf header */ /*! - @function mbuf_next - @discussion Returns the next mbuf in the chain. - @param mbuf The mbuf. - @result The next mbuf in the chain. + * @function mbuf_next + * @discussion Returns the next mbuf in the chain. + * @param mbuf The mbuf. + * @result The next mbuf in the chain. */ extern mbuf_t mbuf_next(const mbuf_t mbuf); /*! - @function mbuf_setnext - @discussion Sets the next mbuf in the chain. - @param mbuf The mbuf. - @param next The new next mbuf. - @result 0 upon success otherwise the errno error. + * @function mbuf_setnext + * @discussion Sets the next mbuf in the chain. + * @param mbuf The mbuf. + * @param next The new next mbuf. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_setnext(mbuf_t mbuf, mbuf_t next); /*! - @function mbuf_nextpkt - @discussion Gets the next packet from the mbuf. - @param mbuf The mbuf. - @result The nextpkt. + * @function mbuf_nextpkt + * @discussion Gets the next packet from the mbuf. + * @param mbuf The mbuf. + * @result The nextpkt. */ extern mbuf_t mbuf_nextpkt(const mbuf_t mbuf); /*! - @function mbuf_setnextpkt - @discussion Sets the next packet attached to this mbuf. - @param mbuf The mbuf. - @param nextpkt The new next packet. + * @function mbuf_setnextpkt + * @discussion Sets the next packet attached to this mbuf. + * @param mbuf The mbuf. + * @param nextpkt The new next packet. */ extern void mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt); /*! - @function mbuf_len - @discussion Gets the length of data in this mbuf. - @param mbuf The mbuf. - @result The length. + * @function mbuf_len + * @discussion Gets the length of data in this mbuf. + * @param mbuf The mbuf. + * @result The length. */ extern size_t mbuf_len(const mbuf_t mbuf); /*! - @function mbuf_setlen - @discussion Sets the length of data in this packet. Be careful to - not set the length over the space available in the mbuf. - @param mbuf The mbuf. - @param len The new length. + * @function mbuf_setlen + * @discussion Sets the length of data in this packet. Be careful to + * not set the length over the space available in the mbuf. + * @param mbuf The mbuf. + * @param len The new length. */ extern void mbuf_setlen(mbuf_t mbuf, size_t len); /*! - @function mbuf_maxlen - @discussion Retrieves the maximum length of data that may be stored - in this mbuf. This value assumes that the data pointer was set - to the start of the possible range for that pointer - (mbuf_data_start). - @param mbuf The mbuf. - @result The maximum lenght of data for this mbuf. + * @function mbuf_maxlen + * @discussion Retrieves the maximum length of data that may be stored + * in this mbuf. This value assumes that the data pointer was set + * to the start of the possible range for that pointer + * (mbuf_data_start). + * @param mbuf The mbuf. + * @result The maximum lenght of data for this mbuf. */ extern size_t mbuf_maxlen(const mbuf_t mbuf); /*! - @function mbuf_type - @discussion Gets the type of mbuf. - @param mbuf The mbuf. - @result The type. + * @function mbuf_type + * @discussion Gets the type of mbuf. + * @param mbuf The mbuf. + * @result The type. */ extern mbuf_type_t mbuf_type(const mbuf_t mbuf); /*! - @function mbuf_settype - @discussion Sets the type of mbuf. - @param mbuf The mbuf. - @param new_type The new type. - @result 0 upon success otherwise the errno error. + * @function mbuf_settype + * @discussion Sets the type of mbuf. + * @param mbuf The mbuf. + * @param new_type The new type. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type); /*! - @function mbuf_flags - @discussion Returns the set flags. - @param mbuf The mbuf. - @result The flags. + * @function mbuf_flags + * @discussion Returns the set flags. + * @param mbuf The mbuf. + * @result The flags. */ extern mbuf_flags_t mbuf_flags(const mbuf_t mbuf); /*! - @function mbuf_setflags - @discussion Sets the set of set flags. - @param mbuf The mbuf. - @param flags The flags that should be set, all other flags will be - cleared. Certain flags such as MBUF_EXT cannot be altered. - @result 0 upon success otherwise the errno error. + * @function mbuf_setflags + * @discussion Sets the set of set flags. + * @param mbuf The mbuf. + * @param flags The flags that should be set, all other flags will be + * cleared. Certain flags such as MBUF_EXT cannot be altered. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags); /*! - @function mbuf_setflags_mask - @discussion Useful for setting or clearing individual flags. Easier - than calling mbuf_setflags(m, mbuf_flags(m) | M_FLAG). - @param mbuf The mbuf. - @param flags The flags that should be set or cleared. Certain flags - such as MBUF_EXT cannot be altered. - @param mask The mask controlling which flags will be modified. - @result 0 upon success otherwise the errno error. + * @function mbuf_setflags_mask + * @discussion Useful for setting or clearing individual flags. Easier + * than calling mbuf_setflags(m, mbuf_flags(m) | M_FLAG). + * @param mbuf The mbuf. + * @param flags The flags that should be set or cleared. Certain flags + * such as MBUF_EXT cannot be altered. + * @param mask The mask controlling which flags will be modified. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask); /*! - @function mbuf_copy_pkthdr - @discussion Copies the packet header from src to dest. - @param src The mbuf from which the packet header will be copied. - @param dest The mbuf to which the packet header will be copied. - @result 0 upon success otherwise the errno error. + * @function mbuf_copy_pkthdr + * @discussion Copies the packet header from src to dest. + * @param src The mbuf from which the packet header will be copied. + * @param dest The mbuf to which the packet header will be copied. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src); /*! - @function mbuf_pkthdr_len - @discussion Returns the length as reported by the packet header. - @param mbuf The mbuf containing the packet header - @result The length, in bytes, of the packet. + * @function mbuf_pkthdr_len + * @discussion Returns the length as reported by the packet header. + * @param mbuf The mbuf containing the packet header + * @result The length, in bytes, of the packet. */ extern size_t mbuf_pkthdr_len(const mbuf_t mbuf); /*! - @function mbuf_pkthdr_setlen - @discussion Sets the length of the packet in the packet header. - @param mbuf The mbuf containing the packet header. - @param len The new length of the packet. + * @function mbuf_pkthdr_setlen + * @discussion Sets the length of the packet in the packet header. + * @param mbuf The mbuf containing the packet header. + * @param len The new length of the packet. */ extern void mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len); #ifdef XNU_KERNEL_PRIVATE /*! - @function mbuf_pkthdr_maxlen - @discussion Retrieves the maximum length of data that may be stored - in this mbuf packet. This value assumes that the data pointer - was set to the start of the possible range for that pointer - for each mbuf in the packet chain - @param mbuf The mbuf. - @result The maximum lenght of data for this mbuf. + * @function mbuf_pkthdr_maxlen + * @discussion Retrieves the maximum length of data that may be stored + * in this mbuf packet. This value assumes that the data pointer + * was set to the start of the possible range for that pointer + * for each mbuf in the packet chain + * @param mbuf The mbuf. + * @result The maximum lenght of data for this mbuf. */ extern size_t mbuf_pkthdr_maxlen(const mbuf_t mbuf); #endif /* XNU_KERNEL_PRIVATE */ /*! - @function mbuf_pkthdr_adjustlen - @discussion Adjusts the length of the packet in the packet header. - @param mbuf The mbuf containing the packet header. - @param amount The number of bytes to adjust the packet header length - field by. + * @function mbuf_pkthdr_adjustlen + * @discussion Adjusts the length of the packet in the packet header. + * @param mbuf The mbuf containing the packet header. + * @param amount The number of bytes to adjust the packet header length + * field by. */ extern void mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount); /*! - @function mbuf_pkthdr_rcvif - @discussion Returns the interface the packet was received on. This - funciton does not modify the reference count of the interface. - The interface is only valid for as long as the mbuf is not freed - and the rcvif for the mbuf is not changed. Take a reference on - the interface that you will release later before doing any of - the following: free the mbuf, change the rcvif, pass the mbuf to - any function that may free the mbuf or change the rcvif. - @param mbuf The mbuf containing the packet header. - @result A reference to the interface. + * @function mbuf_pkthdr_rcvif + * @discussion Returns the interface the packet was received on. This + * funciton does not modify the reference count of the interface. + * The interface is only valid for as long as the mbuf is not freed + * and the rcvif for the mbuf is not changed. Take a reference on + * the interface that you will release later before doing any of + * the following: free the mbuf, change the rcvif, pass the mbuf to + * any function that may free the mbuf or change the rcvif. + * @param mbuf The mbuf containing the packet header. + * @result A reference to the interface. */ extern ifnet_t mbuf_pkthdr_rcvif(const mbuf_t mbuf); /*! - @function mbuf_pkthdr_setrcvif - @discussion Sets the interface the packet was received on. - @param mbuf The mbuf containing the packet header. - @param ifp A reference to an interface. - @result 0 upon success otherwise the errno error. + * @function mbuf_pkthdr_setrcvif + * @discussion Sets the interface the packet was received on. + * @param mbuf The mbuf containing the packet header. + * @param ifp A reference to an interface. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifp); /*! - @function mbuf_pkthdr_header - @discussion Returns a pointer to the packet header. - @param mbuf The mbuf containing the packet header. - @result A pointer to the packet header. + * @function mbuf_pkthdr_header + * @discussion Returns a pointer to the packet header. + * @param mbuf The mbuf containing the packet header. + * @result A pointer to the packet header. */ extern void *mbuf_pkthdr_header(const mbuf_t mbuf); /*! - @function mbuf_pkthdr_setheader - @discussion Sets the pointer to the packet header. - @param mbuf The mbuf containing the packet header. - @param header A pointer to the header. + * @function mbuf_pkthdr_setheader + * @discussion Sets the pointer to the packet header. + * @param mbuf The mbuf containing the packet header. + * @param header A pointer to the header. */ extern void mbuf_pkthdr_setheader(mbuf_t mbuf, void *header); /* Checksums */ /*! - @function mbuf_inbound_modified - @discussion This function will clear the checksum flags to indicate - that a hardware checksum should not be used. Any filter - modifying data should call this function on an mbuf before - passing the packet up the stack. If a filter modifies a packet - in a way that affects any checksum, the filter is responsible - for either modifying the checksum to compensate for the changes - or verifying the checksum before making the changes and then - modifying the data and calculating a new checksum only if the - original checksum was valid. - @param mbuf The mbuf that has been modified. + * @function mbuf_inbound_modified + * @discussion This function will clear the checksum flags to indicate + * that a hardware checksum should not be used. Any filter + * modifying data should call this function on an mbuf before + * passing the packet up the stack. If a filter modifies a packet + * in a way that affects any checksum, the filter is responsible + * for either modifying the checksum to compensate for the changes + * or verifying the checksum before making the changes and then + * modifying the data and calculating a new checksum only if the + * original checksum was valid. + * @param mbuf The mbuf that has been modified. */ extern void mbuf_inbound_modified(mbuf_t mbuf); /*! - @function mbuf_outbound_finalize - @discussion This function will "finalize" the packet allowing your - code to inspect the final packet. - - There are a number of operations that are performed in hardware, - such as calculating checksums. This function will perform in - software the various opterations that were scheduled to be done - in hardware. Future operations may include IPSec processing or - vlan support. If you are redirecting a packet to a new interface - which may not have the same hardware support or encapsulating - the packet, you should call this function to force the stack to - calculate and fill out the checksums. This will bypass hardware - checksums but give you a complete packet to work with. If you - need to inspect aspects of the packet which may be generated by - hardware, you must call this function to get an aproximate final - packet. If you plan to modify the packet in any way, you should - call this function. - - This function should be called before modifying any outbound - packets. - - This function may be called at various levels, in some cases - additional headers may have already been prepended, such as the - case of a packet seen by an interface filter. To handle this, - the caller must pass the protocol family of the packet as well - as the offset from the start of the packet to the protocol - header. - @param mbuf The mbuf that should be finalized. - @param protocol_family The protocol family of the packet in the - mbuf. - @param protocol_offset The offset from the start of the mbuf to the - protocol header. For an IP packet with an ethernet header, this - would be the length of an ethernet header. + * @function mbuf_outbound_finalize + * @discussion This function will "finalize" the packet allowing your + * code to inspect the final packet. + * + * There are a number of operations that are performed in hardware, + * such as calculating checksums. This function will perform in + * software the various opterations that were scheduled to be done + * in hardware. Future operations may include IPSec processing or + * vlan support. If you are redirecting a packet to a new interface + * which may not have the same hardware support or encapsulating + * the packet, you should call this function to force the stack to + * calculate and fill out the checksums. This will bypass hardware + * checksums but give you a complete packet to work with. If you + * need to inspect aspects of the packet which may be generated by + * hardware, you must call this function to get an aproximate final + * packet. If you plan to modify the packet in any way, you should + * call this function. + * + * This function should be called before modifying any outbound + * packets. + * + * This function may be called at various levels, in some cases + * additional headers may have already been prepended, such as the + * case of a packet seen by an interface filter. To handle this, + * the caller must pass the protocol family of the packet as well + * as the offset from the start of the packet to the protocol + * header. + * @param mbuf The mbuf that should be finalized. + * @param protocol_family The protocol family of the packet in the + * mbuf. + * @param protocol_offset The offset from the start of the mbuf to the + * protocol header. For an IP packet with an ethernet header, this + * would be the length of an ethernet header. */ extern void mbuf_outbound_finalize(mbuf_t mbuf, u_int32_t protocol_family, size_t protocol_offset); /*! - @function mbuf_set_vlan_tag - @discussion This function is used by interfaces that support vlan - tagging in hardware. This function will set properties in the - mbuf to indicate which vlan the packet was received for. - @param mbuf The mbuf containing the packet. - @param vlan The protocol family of the aux data to add. - @result 0 upon success otherwise the errno error. + * @function mbuf_set_vlan_tag + * @discussion This function is used by interfaces that support vlan + * tagging in hardware. This function will set properties in the + * mbuf to indicate which vlan the packet was received for. + * @param mbuf The mbuf containing the packet. + * @param vlan The protocol family of the aux data to add. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_set_vlan_tag(mbuf_t mbuf, u_int16_t vlan); /*! - @function mbuf_get_vlan_tag - @discussion This function is used by drivers that support hardware - vlan tagging to determine which vlan this packet belongs to. To - differentiate between the case where the vlan tag is zero and - the case where there is no vlan tag, this function will return - ENXIO when there is no vlan. - @param mbuf The mbuf containing the packet. - @param vlan The protocol family of the aux data to add. - @result 0 upon success otherwise the errno error. ENXIO indicates - that the vlan tag is not set. + * @function mbuf_get_vlan_tag + * @discussion This function is used by drivers that support hardware + * vlan tagging to determine which vlan this packet belongs to. To + * differentiate between the case where the vlan tag is zero and + * the case where there is no vlan tag, this function will return + * ENXIO when there is no vlan. + * @param mbuf The mbuf containing the packet. + * @param vlan The protocol family of the aux data to add. + * @result 0 upon success otherwise the errno error. ENXIO indicates + * that the vlan tag is not set. */ extern errno_t mbuf_get_vlan_tag(mbuf_t mbuf, u_int16_t *vlan); /*! - @function mbuf_clear_vlan_tag - @discussion This function will clear any vlan tag associated with - the mbuf. - @param mbuf The mbuf containing the packet. - @result 0 upon success otherwise the errno error. + * @function mbuf_clear_vlan_tag + * @discussion This function will clear any vlan tag associated with + * the mbuf. + * @param mbuf The mbuf containing the packet. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_clear_vlan_tag(mbuf_t mbuf); #ifdef KERNEL_PRIVATE /*! - @function mbuf_set_csum_requested - @discussion This function is used by the stack to indicate which - checksums should be calculated in hardware. The stack normally - sets these flags as the packet is processed in the outbound - direction. Just before send the packe to the interface, the - stack will look at these flags and perform any checksums in - software that are not supported by the interface. - @param mbuf The mbuf containing the packet. - @param request Flags indicating which checksums are being requested - for this packet. - @param value This parameter is currently unsupported. - @result 0 upon success otherwise the errno error. + * @function mbuf_set_csum_requested + * @discussion This function is used by the stack to indicate which + * checksums should be calculated in hardware. The stack normally + * sets these flags as the packet is processed in the outbound + * direction. Just before send the packe to the interface, the + * stack will look at these flags and perform any checksums in + * software that are not supported by the interface. + * @param mbuf The mbuf containing the packet. + * @param request Flags indicating which checksums are being requested + * for this packet. + * @param value This parameter is currently unsupported. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_set_csum_requested(mbuf_t mbuf, mbuf_csum_request_flags_t request, u_int32_t value); #endif /* KERNEL_PRIVATE */ /*! - @function mbuf_get_csum_requested - @discussion This function is used by the driver to determine which - checksum operations should be performed in hardware. - @param mbuf The mbuf containing the packet. - @param request Flags indicating which checksums are being requested - for this packet. - @param value This parameter is currently unsupported. - @result 0 upon success otherwise the errno error. + * @function mbuf_get_csum_requested + * @discussion This function is used by the driver to determine which + * checksum operations should be performed in hardware. + * @param mbuf The mbuf containing the packet. + * @param request Flags indicating which checksums are being requested + * for this packet. + * @param value This parameter is currently unsupported. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_get_csum_requested(mbuf_t mbuf, mbuf_csum_request_flags_t *request, u_int32_t *value); /*! - @function mbuf_get_tso_requested - @discussion This function is used by the driver to determine which - checksum operations should be performed in hardware. - @param mbuf The mbuf containing the packet. - @param request Flags indicating which values are being requested - for this packet. - @param value The requested value. - @result 0 upon success otherwise the errno error. + * @function mbuf_get_tso_requested + * @discussion This function is used by the driver to determine which + * checksum operations should be performed in hardware. + * @param mbuf The mbuf containing the packet. + * @param request Flags indicating which values are being requested + * for this packet. + * @param value The requested value. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_get_tso_requested(mbuf_t mbuf, mbuf_tso_request_flags_t *request, u_int32_t *value); /*! - @function mbuf_clear_csum_requested - @discussion This function clears the checksum request flags. - @param mbuf The mbuf containing the packet. - @result 0 upon success otherwise the errno error. + * @function mbuf_clear_csum_requested + * @discussion This function clears the checksum request flags. + * @param mbuf The mbuf containing the packet. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_clear_csum_requested(mbuf_t mbuf); /*! - @function mbuf_set_csum_performed - @discussion This is used by the driver to indicate to the stack which - checksum operations were performed in hardware. - @param mbuf The mbuf containing the packet. - @param flags Flags indicating which hardware checksum operations - were performed. - @param value If the MBUF_CSUM_DID_DATA flag is set, value should be - set to the value of the TCP or UDP header as calculated by the - hardware. - @result 0 upon success otherwise the errno error. + * @function mbuf_set_csum_performed + * @discussion This is used by the driver to indicate to the stack which + * checksum operations were performed in hardware. + * @param mbuf The mbuf containing the packet. + * @param flags Flags indicating which hardware checksum operations + * were performed. + * @param value If the MBUF_CSUM_DID_DATA flag is set, value should be + * set to the value of the TCP or UDP header as calculated by the + * hardware. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_set_csum_performed(mbuf_t mbuf, mbuf_csum_performed_flags_t flags, u_int32_t value); #ifdef KERNEL_PRIVATE /* - @function mbuf_get_csum_performed - @discussion This is used by the stack to determine which checksums - were calculated in hardware on the inbound path. - @param mbuf The mbuf containing the packet. - @param flags Flags indicating which hardware checksum operations - were performed. - @param value If the MBUF_CSUM_DID_DATA flag is set, value will be - set to the value of the TCP or UDP header as calculated by the - hardware. - @result 0 upon success otherwise the errno error. + * @function mbuf_get_csum_performed + * @discussion This is used by the stack to determine which checksums + * were calculated in hardware on the inbound path. + * @param mbuf The mbuf containing the packet. + * @param flags Flags indicating which hardware checksum operations + * were performed. + * @param value If the MBUF_CSUM_DID_DATA flag is set, value will be + * set to the value of the TCP or UDP header as calculated by the + * hardware. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_get_csum_performed(mbuf_t mbuf, mbuf_csum_performed_flags_t *flags, u_int32_t *value); #endif /* KERNEL_PRIVATE */ /*! - @function mbuf_get_mlen - @discussion This routine returns the number of data bytes in a normal - mbuf, i.e. an mbuf that is not a packet header, nor one with - an external cluster attached to it. This is equivalent to the - legacy MLEN macro. - @result The number of bytes of available data. + * @function mbuf_get_mlen + * @discussion This routine returns the number of data bytes in a normal + * mbuf, i.e. an mbuf that is not a packet header, nor one with + * an external cluster attached to it. This is equivalent to the + * legacy MLEN macro. + * @result The number of bytes of available data. */ extern u_int32_t mbuf_get_mlen(void); /*! - @function mbuf_get_mhlen - @discussion This routine returns the number of data bytes in a packet - header mbuf. This is equivalent to the legacy MHLEN macro. - @result The number of bytes of available data. + * @function mbuf_get_mhlen + * @discussion This routine returns the number of data bytes in a packet + * header mbuf. This is equivalent to the legacy MHLEN macro. + * @result The number of bytes of available data. */ extern u_int32_t mbuf_get_mhlen(void); /*! - @function mbuf_get_minclsize - @discussion This routine returns the minimum number of data bytes - before an external cluster is used. This is equivalent to the - legacy MINCLSIZE macro. - @result The minimum number of bytes before a cluster will be used. + * @function mbuf_get_minclsize + * @discussion This routine returns the minimum number of data bytes + * before an external cluster is used. This is equivalent to the + * legacy MINCLSIZE macro. + * @result The minimum number of bytes before a cluster will be used. */ extern u_int32_t mbuf_get_minclsize(void); /*! - @function mbuf_clear_csum_performed - @discussion Clears the hardware checksum flags and values. - @param mbuf The mbuf containing the packet. - @result 0 upon success otherwise the errno error. + * @function mbuf_clear_csum_performed + * @discussion Clears the hardware checksum flags and values. + * @param mbuf The mbuf containing the packet. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_clear_csum_performed(mbuf_t mbuf); /*! - @function mbuf_inet_cksum - @discussion Calculates 16-bit 1's complement Internet checksum of the - transport segment with or without the pseudo header checksum - of a given IPv4 packet. If the caller specifies a non-zero - transport protocol, the checksum returned will also include - the pseudo header checksum for the corresponding transport - header. Otherwise, no header parsing will be done and the - caller may use this to calculate the Internet checksum of - an arbitrary span of data. - - This routine does not modify the contents of the packet. If - the caller specifies a non-zero protocol and/or offset, the - routine expects the complete protocol header to be present - at the beginning of the first mbuf. - @param mbuf The mbuf (or chain of mbufs) containing the packet. - @param protocol A zero or non-zero value. A non-zero value specifies - the transport protocol used for pseudo header checksum. - @param offset A zero or non-zero value; if the latter, it specifies - the offset of the transport header from the beginning of mbuf. - @param length The total (non-zero) length of the transport segment. - @param csum Pointer to the checksum variable; upon success, this - routine will return the calculated Internet checksum through - this variable. The caller must set it to a non-NULL value. - @result 0 upon success otherwise the errno error. + * @function mbuf_inet_cksum + * @discussion Calculates 16-bit 1's complement Internet checksum of the + * transport segment with or without the pseudo header checksum + * of a given IPv4 packet. If the caller specifies a non-zero + * transport protocol, the checksum returned will also include + * the pseudo header checksum for the corresponding transport + * header. Otherwise, no header parsing will be done and the + * caller may use this to calculate the Internet checksum of + * an arbitrary span of data. + * + * This routine does not modify the contents of the packet. If + * the caller specifies a non-zero protocol and/or offset, the + * routine expects the complete protocol header to be present + * at the beginning of the first mbuf. + * @param mbuf The mbuf (or chain of mbufs) containing the packet. + * @param protocol A zero or non-zero value. A non-zero value specifies + * the transport protocol used for pseudo header checksum. + * @param offset A zero or non-zero value; if the latter, it specifies + * the offset of the transport header from the beginning of mbuf. + * @param length The total (non-zero) length of the transport segment. + * @param csum Pointer to the checksum variable; upon success, this + * routine will return the calculated Internet checksum through + * this variable. The caller must set it to a non-NULL value. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, u_int16_t *csum); /*! - @function mbuf_inet6_cksum - @discussion Calculates 16-bit 1's complement Internet checksum of the - transport segment with or without the pseudo header checksum - of a given IPv6 packet. If the caller specifies a non-zero - transport protocol, the checksum returned will also include - the pseudo header checksum for the corresponding transport - header. Otherwise, no header parsing will be done and the - caller may use this to calculate the Internet checksum of - an arbitrary span of data. - - This routine does not modify the contents of the packet. If - the caller specifies a non-zero protocol and/or offset, the - routine expects the complete protocol header(s) to be present - at the beginning of the first mbuf. - @param mbuf The mbuf (or chain of mbufs) containing the packet. - @param protocol A zero or non-zero value. A non-zero value specifies - the transport protocol used for pseudo header checksum. - @param offset A zero or non-zero value; if the latter, it specifies - the offset of the transport header from the beginning of mbuf. - @param length The total (non-zero) length of the transport segment. - @param csum Pointer to the checksum variable; upon success, this - routine will return the calculated Internet checksum through - this variable. The caller must set it to a non-NULL value. - @result 0 upon success otherwise the errno error. + * @function mbuf_inet6_cksum + * @discussion Calculates 16-bit 1's complement Internet checksum of the + * transport segment with or without the pseudo header checksum + * of a given IPv6 packet. If the caller specifies a non-zero + * transport protocol, the checksum returned will also include + * the pseudo header checksum for the corresponding transport + * header. Otherwise, no header parsing will be done and the + * caller may use this to calculate the Internet checksum of + * an arbitrary span of data. + * + * This routine does not modify the contents of the packet. If + * the caller specifies a non-zero protocol and/or offset, the + * routine expects the complete protocol header(s) to be present + * at the beginning of the first mbuf. + * @param mbuf The mbuf (or chain of mbufs) containing the packet. + * @param protocol A zero or non-zero value. A non-zero value specifies + * the transport protocol used for pseudo header checksum. + * @param offset A zero or non-zero value; if the latter, it specifies + * the offset of the transport header from the beginning of mbuf. + * @param length The total (non-zero) length of the transport segment. + * @param csum Pointer to the checksum variable; upon success, this + * routine will return the calculated Internet checksum through + * this variable. The caller must set it to a non-NULL value. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length, u_int16_t *csum); @@ -1298,128 +1298,128 @@ extern errno_t mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, /* mbuf tags */ /*! - @function mbuf_tag_id_find - @discussion Lookup the module id for a string. If there is no module - id assigned to this string, a new module id will be assigned. - The string should be the bundle id of the kext. In the case of a - tag that will be shared across multiple kexts, a common bundle - id style string should be used. - - The lookup operation is not optimized. A module should call this - function once during startup and chache the module id. The - module id will not be resassigned until the machine reboots. - @param module_string A unique string identifying your module. - Example: com.apple.nke.SharedIP. - @param module_id Upon return, a unique identifier for use with - mbuf_tag_* functions. This identifier is valid until the machine - is rebooted. - @result 0 upon success otherwise the errno error. + * @function mbuf_tag_id_find + * @discussion Lookup the module id for a string. If there is no module + * id assigned to this string, a new module id will be assigned. + * The string should be the bundle id of the kext. In the case of a + * tag that will be shared across multiple kexts, a common bundle + * id style string should be used. + * + * The lookup operation is not optimized. A module should call this + * function once during startup and chache the module id. The + * module id will not be resassigned until the machine reboots. + * @param module_string A unique string identifying your module. + * Example: com.apple.nke.SharedIP. + * @param module_id Upon return, a unique identifier for use with + * mbuf_tag_* functions. This identifier is valid until the machine + * is rebooted. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_tag_id_find(const char *module_string, mbuf_tag_id_t *module_id); /*! - @function mbuf_tag_allocate - @discussion Allocate an mbuf tag. Mbuf tags allow various portions - of the stack to tag mbufs with data that will travel with the - mbuf through the stack. - - Tags may only be added to mbufs with packet headers - (MBUF_PKTHDR flag is set). Mbuf tags are freed when the mbuf is - freed or when mbuf_tag_free is called. - @param mbuf The mbuf to attach this tag to. - @param module_id A module identifier returned by mbuf_tag_id_find. - @param type A 16 bit type value. For a given module_id, you can use - a number of different tag types. - @param length The length, in bytes, to allocate for storage that - will be associated with this tag on this mbuf. - @param how Indicate whether you want to block and wait for memory if - memory is not immediately available. - @param data_p Upon successful return, *data_p will point to the - buffer allocated for the mtag. - @result 0 upon success otherwise the errno error. + * @function mbuf_tag_allocate + * @discussion Allocate an mbuf tag. Mbuf tags allow various portions + * of the stack to tag mbufs with data that will travel with the + * mbuf through the stack. + * + * Tags may only be added to mbufs with packet headers + * (MBUF_PKTHDR flag is set). Mbuf tags are freed when the mbuf is + * freed or when mbuf_tag_free is called. + * @param mbuf The mbuf to attach this tag to. + * @param module_id A module identifier returned by mbuf_tag_id_find. + * @param type A 16 bit type value. For a given module_id, you can use + * a number of different tag types. + * @param length The length, in bytes, to allocate for storage that + * will be associated with this tag on this mbuf. + * @param how Indicate whether you want to block and wait for memory if + * memory is not immediately available. + * @param data_p Upon successful return, *data_p will point to the + * buffer allocated for the mtag. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_tag_allocate(mbuf_t mbuf, mbuf_tag_id_t module_id, mbuf_tag_type_t type, size_t length, mbuf_how_t how, void **data_p); /*! - @function mbuf_tag_find - @discussion Find the data associated with an mbuf tag. - @param mbuf The mbuf the tag is attached to. - @param module_id A module identifier returned by mbuf_tag_id_find. - @param type The 16 bit type of the tag to find. - @param length Upon success, the length of data will be store in - *length. - @param data_p Upon successful return, *data_p will point to the - buffer allocated for the mtag. - @result 0 upon success otherwise the errno error. + * @function mbuf_tag_find + * @discussion Find the data associated with an mbuf tag. + * @param mbuf The mbuf the tag is attached to. + * @param module_id A module identifier returned by mbuf_tag_id_find. + * @param type The 16 bit type of the tag to find. + * @param length Upon success, the length of data will be store in + * length. + * @param data_p Upon successful return, *data_p will point to the + * buffer allocated for the mtag. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_tag_find(mbuf_t mbuf, mbuf_tag_id_t module_id, mbuf_tag_type_t type, size_t *length, void **data_p); /*! - @function mbuf_tag_free - @discussion Frees a previously allocated mbuf tag. - @param mbuf The mbuf the tag was allocated on. - @param module_id The ID of the tag to free. - @param type The type of the tag to free. + * @function mbuf_tag_free + * @discussion Frees a previously allocated mbuf tag. + * @param mbuf The mbuf the tag was allocated on. + * @param module_id The ID of the tag to free. + * @param type The type of the tag to free. */ extern void mbuf_tag_free(mbuf_t mbuf, mbuf_tag_id_t module_id, mbuf_tag_type_t type); #ifdef KERNEL_PRIVATE /*! - @function mbuf_add_drvaux - @discussion Allocate space for driver auxiliary data and attach it - to the packet (MBUF_PKTHDR is required.) This space is freed - when the mbuf is freed or when mbuf_del_drvaux is called. - Only one instance of driver auxiliary data may be attached to - a packet. Any attempt to add it to a packet already associated - with one will yield an error, and the existing one must first - be removed via mbuf_del_drvaux. The format and length of the - data depend largely on the family and sub-family. The system - makes no attempt to define and/or interpret the contents of - the data, and simply acts as a conduit between its producer - and consumer. - @param mbuf The mbuf to attach the auxiliary data to. - @param how Indicate whether you are willing to block and wait for - memory, if memory is not immediately available. - @param family The interface family as defined in net/kpi_interface.h. - @param subfamily The interface sub-family as defined in - net/kpi_interface.h. - @param length The length of the auxiliary data, must be greater than 0. - @param data_p Upon successful return, *data_p will point to the - space allocated for the data. Caller may set this to NULL. - @result 0 upon success otherwise the errno error. + * @function mbuf_add_drvaux + * @discussion Allocate space for driver auxiliary data and attach it + * to the packet (MBUF_PKTHDR is required.) This space is freed + * when the mbuf is freed or when mbuf_del_drvaux is called. + * Only one instance of driver auxiliary data may be attached to + * a packet. Any attempt to add it to a packet already associated + * with one will yield an error, and the existing one must first + * be removed via mbuf_del_drvaux. The format and length of the + * data depend largely on the family and sub-family. The system + * makes no attempt to define and/or interpret the contents of + * the data, and simply acts as a conduit between its producer + * and consumer. + * @param mbuf The mbuf to attach the auxiliary data to. + * @param how Indicate whether you are willing to block and wait for + * memory, if memory is not immediately available. + * @param family The interface family as defined in net/kpi_interface.h. + * @param subfamily The interface sub-family as defined in + * net/kpi_interface.h. + * @param length The length of the auxiliary data, must be greater than 0. + * @param data_p Upon successful return, *data_p will point to the + * space allocated for the data. Caller may set this to NULL. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family, u_int32_t subfamily, size_t length, void **data_p); /*! - @function mbuf_find_drvaux - @discussion Find the driver auxiliary data associated with a packet. - @param mbuf The mbuf the auxiliary data is attached to. - @param family_p Upon successful return, *family_p will contain - the interface family associated with the data, as defined - in net/kpi_interface.h. Caller may set this to NULL. - @param subfamily_p Upon successful return, *subfamily_p will contain - the interface family associated with the data, as defined - in net/kpi_interface.h. Caller may set this to NULL. - @param length_p Upon successful return, *length_p will contain - the length of the driver auxiliary data. Caller may - set this to NULL. - @param data_p Upon successful return, *data_p will point to the - space allocated for the data. - @result 0 upon success otherwise the errno error. + * @function mbuf_find_drvaux + * @discussion Find the driver auxiliary data associated with a packet. + * @param mbuf The mbuf the auxiliary data is attached to. + * @param family_p Upon successful return, *family_p will contain + * the interface family associated with the data, as defined + * in net/kpi_interface.h. Caller may set this to NULL. + * @param subfamily_p Upon successful return, *subfamily_p will contain + * the interface family associated with the data, as defined + * in net/kpi_interface.h. Caller may set this to NULL. + * @param length_p Upon successful return, *length_p will contain + * the length of the driver auxiliary data. Caller may + * set this to NULL. + * @param data_p Upon successful return, *data_p will point to the + * space allocated for the data. + * @result 0 upon success otherwise the errno error. */ extern errno_t mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p, u_int32_t *length_p, void **data_p); /*! - @function mbuf_del_drvaux - @discussion Remove and free any driver auxility data associated - with the packet. - @param mbuf The mbuf the auxiliary data is attached to. + * @function mbuf_del_drvaux + * @discussion Remove and free any driver auxility data associated + * with the packet. + * @param mbuf The mbuf the auxiliary data is attached to. */ extern void mbuf_del_drvaux(mbuf_t mbuf); #endif /* KERNEL_PRIVATE */ @@ -1427,240 +1427,240 @@ extern void mbuf_del_drvaux(mbuf_t mbuf); /* mbuf stats */ /*! - @function mbuf_stats - @discussion Get the mbuf statistics. - @param stats Storage to copy the stats in to. + * @function mbuf_stats + * @discussion Get the mbuf statistics. + * @param stats Storage to copy the stats in to. */ extern void mbuf_stats(struct mbuf_stat *stats); /*! - @enum mbuf_traffic_class_t - @abstract Traffic class of a packet - @discussion Property that represent the category of traffic of a packet. - This information may be used by the driver and at the link level. - @constant MBUF_TC_BE Best effort, normal class. - @constant MBUF_TC_BK Background, low priority or bulk traffic. - @constant MBUF_TC_VI Interactive video, constant bit rate, low latency. - @constant MBUF_TC_VO Interactive voice, constant bit rate, lowest latency. -*/ + * @enum mbuf_traffic_class_t + * @abstract Traffic class of a packet + * @discussion Property that represent the category of traffic of a packet. + * This information may be used by the driver and at the link level. + * @constant MBUF_TC_BE Best effort, normal class. + * @constant MBUF_TC_BK Background, low priority or bulk traffic. + * @constant MBUF_TC_VI Interactive video, constant bit rate, low latency. + * @constant MBUF_TC_VO Interactive voice, constant bit rate, lowest latency. + */ typedef enum { #ifdef XNU_KERNEL_PRIVATE - MBUF_TC_UNSPEC = -1, /* Internal: not specified */ + MBUF_TC_UNSPEC = -1, /* Internal: not specified */ #endif - MBUF_TC_BE = 0, - MBUF_TC_BK = 1, - MBUF_TC_VI = 2, - MBUF_TC_VO = 3 + MBUF_TC_BE = 0, + MBUF_TC_BK = 1, + MBUF_TC_VI = 2, + MBUF_TC_VO = 3 #ifdef XNU_KERNEL_PRIVATE - , - MBUF_TC_MAX = 4 /* Internal: traffic class count */ + , + MBUF_TC_MAX = 4 /* Internal: traffic class count */ #endif } mbuf_traffic_class_t; /*! - @function mbuf_get_traffic_class - @discussion Get the traffic class of an mbuf packet - @param mbuf The mbuf to get the traffic class of. - @result The traffic class -*/ + * @function mbuf_get_traffic_class + * @discussion Get the traffic class of an mbuf packet + * @param mbuf The mbuf to get the traffic class of. + * @result The traffic class + */ extern mbuf_traffic_class_t mbuf_get_traffic_class(mbuf_t mbuf); /*! - @function mbuf_set_traffic_class - @discussion Set the traffic class of an mbuf packet. - @param mbuf The mbuf to set the traffic class on. - @param tc The traffic class - @result 0 on success, EINVAL if bad parameter is passed -*/ + * @function mbuf_set_traffic_class + * @discussion Set the traffic class of an mbuf packet. + * @param mbuf The mbuf to set the traffic class on. + * @param tc The traffic class + * @result 0 on success, EINVAL if bad parameter is passed + */ extern errno_t mbuf_set_traffic_class(mbuf_t mbuf, mbuf_traffic_class_t tc); /*! - @function mbuf_is_traffic_class_privileged - @discussion Returns the privileged status of the traffic class - of the packet specified by the mbuf. - @param mbuf The mbuf to retrieve the status from. - @result Non-zero if privileged, 0 otherwise. + * @function mbuf_is_traffic_class_privileged + * @discussion Returns the privileged status of the traffic class + * of the packet specified by the mbuf. + * @param mbuf The mbuf to retrieve the status from. + * @result Non-zero if privileged, 0 otherwise. */ extern int mbuf_is_traffic_class_privileged(mbuf_t mbuf); #ifdef KERNEL_PRIVATE /*! - @function mbuf_get_traffic_class_max_count - @discussion Returns the maximum number of mbuf traffic class types - @result The total count of mbuf traffic classes + * @function mbuf_get_traffic_class_max_count + * @discussion Returns the maximum number of mbuf traffic class types + * @result The total count of mbuf traffic classes */ extern u_int32_t mbuf_get_traffic_class_max_count(void); /*! - @function mbuf_get_traffic_class_index - @discussion Returns the zero-based index of an mbuf traffic class value - @param tc The traffic class - @param index Pointer to the index value - @result 0 on success, EINVAL if bad parameter is passed + * @function mbuf_get_traffic_class_index + * @discussion Returns the zero-based index of an mbuf traffic class value + * @param tc The traffic class + * @param index Pointer to the index value + * @result 0 on success, EINVAL if bad parameter is passed */ extern errno_t mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index); /*! - @enum mbuf_svc_class_t - @abstract Service class of a packet - @discussion Property that represents the category of service - of a packet. This information may be used by the driver - and at the link level. - @constant MBUF_SC_BK_SYS "Background System-Initiated", high delay - tolerant, high loss tolerant, elastic flow, variable size & - long-lived. - @constant MBUF_SC_BK "Background", user-initiated, high delay tolerant, - high loss tolerant, elastic flow, variable size. This level - corresponds to WMM access class "BG", or MBUF_TC_BK. - @constant MBUF_SC_BE "Best Effort", unclassified/standard. This is - the default service class; pretty much a mix of everything. - This level corresponds to WMM access class "BE" or MBUF_TC_BE. - @constant MBUF_SC_RD - "Responsive Data", a notch higher than "Best Effort", medium - delay tolerant, medium loss tolerant, elastic flow, bursty, - long-lived. - @constant MBUF_SC_OAM "Operations, Administration, and Management", - medium delay tolerant, low-medium loss tolerant, elastic & - inelastic flows, variable size. - @constant MBUF_SC_AV "Multimedia Audio/Video Streaming", medium delay - tolerant, low-medium loss tolerant, elastic flow, constant - packet interval, variable rate & size. - @constant MBUF_SC_RV "Responsive Multimedia Audio/Video", low delay - tolerant, low-medium loss tolerant, elastic flow, variable - packet interval, rate and size. - @constant MBUF_SC_VI "Interactive Video", low delay tolerant, low- - medium loss tolerant, elastic flow, constant packet interval, - variable rate & size. This level corresponds to WMM access - class "VI" or MBUF_TC_VI. - @constant MBUF_SC_SIG "Signaling", low delay tolerant, low loss - tolerant, inelastic flow, jitter tolerant, rate is bursty but - short, variable size. e.g. SIP. This level corresponds to WMM - access class "VI" or MBUF_TC_VI. - @constant MBUF_SC_VO "Interactive Voice", low delay tolerant, low loss - tolerant, inelastic flow, constant packet rate, somewhat fixed - size. This level corresponds to WMM access class "VO" or - MBUF_TC_VO. - @constant MBUF_SC_CTL "Network Control", low delay tolerant, low loss - tolerant, inelastic flow, rate is short & burst, variable size. -*/ + * @enum mbuf_svc_class_t + * @abstract Service class of a packet + * @discussion Property that represents the category of service + * of a packet. This information may be used by the driver + * and at the link level. + * @constant MBUF_SC_BK_SYS "Background System-Initiated", high delay + * tolerant, high loss tolerant, elastic flow, variable size & + * long-lived. + * @constant MBUF_SC_BK "Background", user-initiated, high delay tolerant, + * high loss tolerant, elastic flow, variable size. This level + * corresponds to WMM access class "BG", or MBUF_TC_BK. + * @constant MBUF_SC_BE "Best Effort", unclassified/standard. This is + * the default service class; pretty much a mix of everything. + * This level corresponds to WMM access class "BE" or MBUF_TC_BE. + * @constant MBUF_SC_RD + * "Responsive Data", a notch higher than "Best Effort", medium + * delay tolerant, medium loss tolerant, elastic flow, bursty, + * long-lived. + * @constant MBUF_SC_OAM "Operations, Administration, and Management", + * medium delay tolerant, low-medium loss tolerant, elastic & + * inelastic flows, variable size. + * @constant MBUF_SC_AV "Multimedia Audio/Video Streaming", medium delay + * tolerant, low-medium loss tolerant, elastic flow, constant + * packet interval, variable rate & size. + * @constant MBUF_SC_RV "Responsive Multimedia Audio/Video", low delay + * tolerant, low-medium loss tolerant, elastic flow, variable + * packet interval, rate and size. + * @constant MBUF_SC_VI "Interactive Video", low delay tolerant, low- + * medium loss tolerant, elastic flow, constant packet interval, + * variable rate & size. This level corresponds to WMM access + * class "VI" or MBUF_TC_VI. + * @constant MBUF_SC_SIG "Signaling", low delay tolerant, low loss + * tolerant, inelastic flow, jitter tolerant, rate is bursty but + * short, variable size. e.g. SIP. This level corresponds to WMM + * access class "VI" or MBUF_TC_VI. + * @constant MBUF_SC_VO "Interactive Voice", low delay tolerant, low loss + * tolerant, inelastic flow, constant packet rate, somewhat fixed + * size. This level corresponds to WMM access class "VO" or + * MBUF_TC_VO. + * @constant MBUF_SC_CTL "Network Control", low delay tolerant, low loss + * tolerant, inelastic flow, rate is short & burst, variable size. + */ typedef enum { #ifdef XNU_KERNEL_PRIVATE - MBUF_SC_UNSPEC = -1, /* Internal: not specified */ + MBUF_SC_UNSPEC = -1, /* Internal: not specified */ #endif - MBUF_SC_BK_SYS = 0x00080090, /* lowest class */ - MBUF_SC_BK = 0x00100080, + MBUF_SC_BK_SYS = 0x00080090, /* lowest class */ + MBUF_SC_BK = 0x00100080, - MBUF_SC_BE = 0x00000000, - MBUF_SC_RD = 0x00180010, - MBUF_SC_OAM = 0x00200020, + MBUF_SC_BE = 0x00000000, + MBUF_SC_RD = 0x00180010, + MBUF_SC_OAM = 0x00200020, - MBUF_SC_AV = 0x00280120, - MBUF_SC_RV = 0x00300110, - MBUF_SC_VI = 0x00380100, - MBUF_SC_SIG = 0x00380130, + MBUF_SC_AV = 0x00280120, + MBUF_SC_RV = 0x00300110, + MBUF_SC_VI = 0x00380100, + MBUF_SC_SIG = 0x00380130, - MBUF_SC_VO = 0x00400180, - MBUF_SC_CTL = 0x00480190, /* highest class */ + MBUF_SC_VO = 0x00400180, + MBUF_SC_CTL = 0x00480190, /* highest class */ } mbuf_svc_class_t; /*! - @function mbuf_get_service_class_max_count - @discussion Returns the maximum number of mbuf service class types. - @result The total count of mbuf service classes. + * @function mbuf_get_service_class_max_count + * @discussion Returns the maximum number of mbuf service class types. + * @result The total count of mbuf service classes. */ extern u_int32_t mbuf_get_service_class_max_count(void); /*! - @function mbuf_get_service_class_index - @discussion Returns the zero-based index of an mbuf service class value - @param sc The service class - @param index Pointer to the index value - @result 0 on success, EINVAL if bad parameter is passed + * @function mbuf_get_service_class_index + * @discussion Returns the zero-based index of an mbuf service class value + * @param sc The service class + * @param index Pointer to the index value + * @result 0 on success, EINVAL if bad parameter is passed */ extern errno_t mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index); /*! - @function mbuf_get_service_class - @discussion Get the service class of an mbuf packet - @param mbuf The mbuf to get the service class of. - @result The service class -*/ + * @function mbuf_get_service_class + * @discussion Get the service class of an mbuf packet + * @param mbuf The mbuf to get the service class of. + * @result The service class + */ extern mbuf_svc_class_t mbuf_get_service_class(mbuf_t mbuf); /*! - @function mbuf_set_servicec_class - @discussion Set the service class of an mbuf packet. - @param mbuf The mbuf to set the service class on. - @param sc The service class - @result 0 on success, EINVAL if bad parameter is passed -*/ + * @function mbuf_set_servicec_class + * @discussion Set the service class of an mbuf packet. + * @param mbuf The mbuf to set the service class on. + * @param sc The service class + * @result 0 on success, EINVAL if bad parameter is passed + */ extern errno_t mbuf_set_service_class(mbuf_t mbuf, mbuf_svc_class_t sc); /*! - @function mbuf_is_service_class_privileged - @discussion Returns the privileged status of the service class - of the packet specified by the mbuf. - @param mbuf The mbuf to retrieve the status from. - @result Non-zero if privileged, 0 otherwise. + * @function mbuf_is_service_class_privileged + * @discussion Returns the privileged status of the service class + * of the packet specified by the mbuf. + * @param mbuf The mbuf to retrieve the status from. + * @result Non-zero if privileged, 0 otherwise. */ extern int mbuf_is_service_class_privileged(mbuf_t mbuf); /*! - @enum mbuf_pkthdr_aux_flags_t - @abstract Constants defining mbuf auxiliary flags. Only the flags - listed below can be retrieved. - @constant MBUF_PKTAUXF_INET_RESOLVE_RTR Indicates this is an ARP - request packet, whose target is the address of the default - IPv4 router. - @constant MBUF_PKTAUXF_INET6_RESOLVE_RTR Indicates this is an ICMPv6 - Neighbor Solicitation packet, whose target is the address of - the default IPv6 router. + * @enum mbuf_pkthdr_aux_flags_t + * @abstract Constants defining mbuf auxiliary flags. Only the flags + * listed below can be retrieved. + * @constant MBUF_PKTAUXF_INET_RESOLVE_RTR Indicates this is an ARP + * request packet, whose target is the address of the default + * IPv4 router. + * @constant MBUF_PKTAUXF_INET6_RESOLVE_RTR Indicates this is an ICMPv6 + * Neighbor Solicitation packet, whose target is the address of + * the default IPv6 router. */ enum { - MBUF_PKTAUXF_INET_RESOLVE_RTR = 0x0004, - MBUF_PKTAUXF_INET6_RESOLVE_RTR = 0x0008, + MBUF_PKTAUXF_INET_RESOLVE_RTR = 0x0004, + MBUF_PKTAUXF_INET6_RESOLVE_RTR = 0x0008, }; typedef u_int32_t mbuf_pkthdr_aux_flags_t; /*! - @function mbuf_pkthdr_aux_flags - @discussion Returns the auxiliary flags of a packet. - @param mbuf The mbuf containing the packet header. - @param paux_flags Pointer to mbuf_pkthdr_aux_flags_t variable. - @result 0 upon success otherwise the errno error. -*/ + * @function mbuf_pkthdr_aux_flags + * @discussion Returns the auxiliary flags of a packet. + * @param mbuf The mbuf containing the packet header. + * @param paux_flags Pointer to mbuf_pkthdr_aux_flags_t variable. + * @result 0 upon success otherwise the errno error. + */ extern errno_t mbuf_pkthdr_aux_flags(mbuf_t mbuf, mbuf_pkthdr_aux_flags_t *paux_flags); /*! - @function mbuf_get_driver_scratch - @discussion Returns a pointer to a driver specific area in the mbuf - @param m The mbuf whose driver scratch space is to be returned - @param area A pointer to a location to store the address of the - driver scratch space. This value is guaranteed to be 32-bit - aligned. - @param area_ln A pointer to a location to store the total length of - the memory location. -*/ + * @function mbuf_get_driver_scratch + * @discussion Returns a pointer to a driver specific area in the mbuf + * @param m The mbuf whose driver scratch space is to be returned + * @param area A pointer to a location to store the address of the + * driver scratch space. This value is guaranteed to be 32-bit + * aligned. + * @param area_ln A pointer to a location to store the total length of + * the memory location. + */ extern errno_t mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_ln); /*! - @function mbuf_get_unsent_data_bytes - @discussion Returns the amount of data that is waiting to be sent - on this interface. This is a private SPI used by cellular - interface as an indication of future activity on that - interface. - @param m The mbuf containing the packet header - @param unsent_data A pointer to an integer where the value of - unsent data will be set. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_get_unsent_data_bytes + * @discussion Returns the amount of data that is waiting to be sent + * on this interface. This is a private SPI used by cellular + * interface as an indication of future activity on that + * interface. + * @param m The mbuf containing the packet header + * @param unsent_data A pointer to an integer where the value of + * unsent data will be set. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_get_unsent_data_bytes(const mbuf_t m, u_int32_t *unsent_data); @@ -1671,48 +1671,48 @@ typedef struct { } mbuf_buffer_status_t; /*! - @function mbuf_get_buffer_status - @discussion Returns the amount of data that is waiting to be sent - on this interface. This is a private SPI used by cellular - interface as an indication of future activity on that - interface. - @param m The mbuf containing the packet header - @param buf_status A pointer to the structure where the value of - unsent data will be set. - @result 0 upon success. If any of the arguments is NULL or if the - mbuf packet header does not have valid data bytes, - EINVAL will be returned + * @function mbuf_get_buffer_status + * @discussion Returns the amount of data that is waiting to be sent + * on this interface. This is a private SPI used by cellular + * interface as an indication of future activity on that + * interface. + * @param m The mbuf containing the packet header + * @param buf_status A pointer to the structure where the value of + * unsent data will be set. + * @result 0 upon success. If any of the arguments is NULL or if the + * mbuf packet header does not have valid data bytes, + * EINVAL will be returned */ extern errno_t mbuf_get_buffer_status(const mbuf_t m, - mbuf_buffer_status_t *buf_status); + mbuf_buffer_status_t *buf_status); /*! - @function mbuf_pkt_new_flow - @discussion This function is used to check if the packet is from a - new flow that can be treated with higher priority. This is - a private SPI. - @param m The mbuf containing the packet header - @param retval A pointer to an integer used as an out argument. The - value is set to 1 if the packet is from a new flow, - otherwise it is set to 0. - @result 0 upon success otherwise the errno error. If any of the - arguments is NULL or if the mbuf does not have valid packet - header, the error code will be EINVAL + * @function mbuf_pkt_new_flow + * @discussion This function is used to check if the packet is from a + * new flow that can be treated with higher priority. This is + * a private SPI. + * @param m The mbuf containing the packet header + * @param retval A pointer to an integer used as an out argument. The + * value is set to 1 if the packet is from a new flow, + * otherwise it is set to 0. + * @result 0 upon success otherwise the errno error. If any of the + * arguments is NULL or if the mbuf does not have valid packet + * header, the error code will be EINVAL */ extern errno_t mbuf_pkt_new_flow(const mbuf_t m, u_int32_t *retval); /*! - @function mbuf_last_pkt - @discussion This function is used to check if the packet is the - last one sent on a TCP socket. This is an advisory - for the underlying layers. - @param m The mbuf containing the packet header - @param retval A pointer to an integer whose value will be set to - 1 if the packet is the last packet, otherwise it will - be set to 0. - @result 0 upon success otherwise the errno error. If any of the - arguments is NULL or if the mbuf does not have valid - packet header, the error code will be EINVAL + * @function mbuf_last_pkt + * @discussion This function is used to check if the packet is the + * last one sent on a TCP socket. This is an advisory + * for the underlying layers. + * @param m The mbuf containing the packet header + * @param retval A pointer to an integer whose value will be set to + * 1 if the packet is the last packet, otherwise it will + * be set to 0. + * @result 0 upon success otherwise the errno error. If any of the + * arguments is NULL or if the mbuf does not have valid + * packet header, the error code will be EINVAL */ extern errno_t mbuf_last_pkt(const mbuf_t m, u_int32_t *retval); @@ -1720,194 +1720,194 @@ extern errno_t mbuf_last_pkt(const mbuf_t m, u_int32_t *retval); #ifdef XNU_KERNEL_PRIVATE /*! - @function mbuf_pkt_list_len - @discussion Retrieves the length of the list of mbuf packets. - @param mbuf The mbuf. - @result The length of the mbuf packet list. + * @function mbuf_pkt_list_len + * @discussion Retrieves the length of the list of mbuf packets. + * @param mbuf The mbuf. + * @result The length of the mbuf packet list. */ extern size_t mbuf_pkt_list_len(const mbuf_t mbuf); /*! - @function mbuf_pkt_list_maxlen - @discussion Retrieves the maximum length of data that may be stored - in the list of mbuf packet. This value assumes that the data pointer - was set to the start of the possible range for that pointer - for each mbuf in the packet chain - @param mbuf The mbuf. - @result The maximum length of data for this mbuf. + * @function mbuf_pkt_list_maxlen + * @discussion Retrieves the maximum length of data that may be stored + * in the list of mbuf packet. This value assumes that the data pointer + * was set to the start of the possible range for that pointer + * for each mbuf in the packet chain + * @param mbuf The mbuf. + * @result The maximum length of data for this mbuf. */ extern size_t mbuf_pkt_list_maxlen(const mbuf_t mbuf); #endif /* XNU_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE /*! - @function mbuf_get_timestamp - @discussion Retrieves the timestamp of the packet. - @param mbuf The mbuf representing the packet. - @param ts A pointer where the value of the timestamp will be copied - to. - @param valid A pointer to a boolean value that indicate if the - timestamp is valid (i.e. the packet timestamp has been set). - If "false" the value of "ts" is undetermined. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_get_timestamp + * @discussion Retrieves the timestamp of the packet. + * @param mbuf The mbuf representing the packet. + * @param ts A pointer where the value of the timestamp will be copied + * to. + * @param valid A pointer to a boolean value that indicate if the + * timestamp is valid (i.e. the packet timestamp has been set). + * If "false" the value of "ts" is undetermined. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_get_timestamp(mbuf_t mbuf, u_int64_t *ts, boolean_t *valid); /*! - @function mbuf_set_timestamp - @discussion Set the timestamp of the packet. - @param mbuf The mbuf representing the packet. - @param ts The value of the timestamp to be stored in the mbuf packet - header - @param valid A boolean value that indicate if the timestamp is valid. - Passing false clears any previous timestamp value. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_set_timestamp + * @discussion Set the timestamp of the packet. + * @param mbuf The mbuf representing the packet. + * @param ts The value of the timestamp to be stored in the mbuf packet + * header + * @param valid A boolean value that indicate if the timestamp is valid. + * Passing false clears any previous timestamp value. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_set_timestamp(mbuf_t mbuf, u_int64_t ts, boolean_t valid); /*! - @typedef mbuf_tx_compl_func - @discussion This callback is used to indicate when a driver has - transmitted a packet. - @param pktid The packet indentifier that was returned by - mbuf_set_timestamp_requested() - @param ifp The outgoing interface or NULL if the packet was dropped - before reaching the driver - @param ts The timestamp in nanoseconds when the packet was transmitted - @param tx_compl_arg An argument set by the driver - @param tx_compl_data Additional data set by the driver - @param tx_compl_val The transmission status is expected to be an - IOReturn value -- see -*/ + * @typedef mbuf_tx_compl_func + * @discussion This callback is used to indicate when a driver has + * transmitted a packet. + * @param pktid The packet indentifier that was returned by + * mbuf_set_timestamp_requested() + * @param ifp The outgoing interface or NULL if the packet was dropped + * before reaching the driver + * @param ts The timestamp in nanoseconds when the packet was transmitted + * @param tx_compl_arg An argument set by the driver + * @param tx_compl_data Additional data set by the driver + * @param tx_compl_val The transmission status is expected to be an + * IOReturn value -- see + */ typedef void (*mbuf_tx_compl_func)(uintptr_t pktid, ifnet_t ifp, u_int64_t ts, uintptr_t tx_compl_arg, uintptr_t tx_compl_data, kern_return_t tx_compl_val); /*! - @function mbuf_register_tx_compl_callback - @discussion Register a transmit completion callback function. The - callback function must be unregistered before the calling - module unloads. - @param callback The completion callback function to register - @result 0 upon success otherwise the errno error. ENOSPC is returned - if too many callbacks are registered. EINVAL is returned when - the function pointer is invalid. EEXIST is returned when - the function pointer is already registered. + * @function mbuf_register_tx_compl_callback + * @discussion Register a transmit completion callback function. The + * callback function must be unregistered before the calling + * module unloads. + * @param callback The completion callback function to register + * @result 0 upon success otherwise the errno error. ENOSPC is returned + * if too many callbacks are registered. EINVAL is returned when + * the function pointer is invalid. EEXIST is returned when + * the function pointer is already registered. */ extern errno_t mbuf_register_tx_compl_callback( - mbuf_tx_compl_func callback); + mbuf_tx_compl_func callback); /*! - @function mbuf_unregister_tx_compl_callback - @discussion Unregister a transmit completion callback function. The - callback function must be unregistered before the calling - module unloads. - @param callback The completion callback function to unregister - @result 0 upon success otherwise the errno error. EINVAL is returned - when the function pointer is invalid. ENOENT is returned when - the function pointer is not registered. + * @function mbuf_unregister_tx_compl_callback + * @discussion Unregister a transmit completion callback function. The + * callback function must be unregistered before the calling + * module unloads. + * @param callback The completion callback function to unregister + * @result 0 upon success otherwise the errno error. EINVAL is returned + * when the function pointer is invalid. ENOENT is returned when + * the function pointer is not registered. */ extern errno_t mbuf_unregister_tx_compl_callback( - mbuf_tx_compl_func callback); + mbuf_tx_compl_func callback); /*! - @function mbuf_get_timestamp_requested - @discussion Tell if the packet timestamp needs to be set. This is meant - to be used by a driver on egress packets. - @param mbuf The mbuf representing the packet. - @param requested A pointer to a boolean value that indicate if the - timestamp was requested to be set. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_get_timestamp_requested + * @discussion Tell if the packet timestamp needs to be set. This is meant + * to be used by a driver on egress packets. + * @param mbuf The mbuf representing the packet. + * @param requested A pointer to a boolean value that indicate if the + * timestamp was requested to be set. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_get_timestamp_requested(mbuf_t mbuf, boolean_t *requested); /*! - @function mbuf_set_timestamp_requested - @discussion Indicate the callback is expected to be called with the - transmission complete timestamp. This is meant to be used - on egress packet by the driver. - @param mbuf The mbuf representing the packet. - @param callback A previously registered completion callback function. - @param pktid An output parameter with an opaque value that can be used - to identify the packet. - @result 0 upon success otherwise the errno error. EINVAL is retuned - if the mbuf is not a valid packet or if one of the parameter - is NULL. ENOENT if the callback is not registred. + * @function mbuf_set_timestamp_requested + * @discussion Indicate the callback is expected to be called with the + * transmission complete timestamp. This is meant to be used + * on egress packet by the driver. + * @param mbuf The mbuf representing the packet. + * @param callback A previously registered completion callback function. + * @param pktid An output parameter with an opaque value that can be used + * to identify the packet. + * @result 0 upon success otherwise the errno error. EINVAL is retuned + * if the mbuf is not a valid packet or if one of the parameter + * is NULL. ENOENT if the callback is not registred. */ -extern errno_t mbuf_set_timestamp_requested(mbuf_t mbuf, +extern errno_t mbuf_set_timestamp_requested(mbuf_t mbuf, uintptr_t *pktid, mbuf_tx_compl_func callback); /*! - @function mbuf_get_status - @discussion Retrieves the packet completion status. - @param mbuf The mbuf representing the packet. - @param status A pointer where the value of the completion status will - be copied to. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_get_status + * @discussion Retrieves the packet completion status. + * @param mbuf The mbuf representing the packet. + * @param status A pointer where the value of the completion status will + * be copied to. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_get_status(mbuf_t mbuf, kern_return_t *status); /*! - @function mbuf_set_status - @discussion Store the packet completion status in the mbuf packet - header. - @param mbuf The mbuf representing the packet. - @param status The value of the completion status. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_set_status + * @discussion Store the packet completion status in the mbuf packet + * header. + * @param mbuf The mbuf representing the packet. + * @param status The value of the completion status. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_set_status(mbuf_t mbuf, kern_return_t status); /*! - @function mbuf_get_tx_compl_data - @discussion Retrieves the packet completion status. - @param m The mbuf representing the packet. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_get_tx_compl_data + * @discussion Retrieves the packet completion status. + * @param m The mbuf representing the packet. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_get_tx_compl_data(mbuf_t m, uintptr_t *arg, uintptr_t *data); /*! - @function mbuf_set_tx_compl_data - @discussion Retrieves the packet completion status. - @param m The mbuf representing the packet. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_set_tx_compl_data + * @discussion Retrieves the packet completion status. + * @param m The mbuf representing the packet. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_set_tx_compl_data(mbuf_t m, uintptr_t arg, uintptr_t data); /*! - @function mbuf_get_flowid - @discussion Retrieve the flow ID of the packet . - @param mbuf The mbuf representing the packet. - @param flowid The flow ID of the packet. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_get_flowid + * @discussion Retrieve the flow ID of the packet . + * @param mbuf The mbuf representing the packet. + * @param flowid The flow ID of the packet. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_get_flowid(mbuf_t mbuf, u_int16_t *flowid); /*! - @function mbuf_set_flowid - @discussion Set the flow ID of the packet . - @param mbuf The mbuf representing the packet. - @param flowid The flow ID to be set. - @result 0 upon success otherwise the errno error. If the mbuf - packet header does not have valid data bytes, the error - code will be EINVAL + * @function mbuf_set_flowid + * @discussion Set the flow ID of the packet . + * @param mbuf The mbuf representing the packet. + * @param flowid The flow ID to be set. + * @result 0 upon success otherwise the errno error. If the mbuf + * packet header does not have valid data bytes, the error + * code will be EINVAL */ extern errno_t mbuf_set_flowid(mbuf_t mbuf, u_int16_t flowid); @@ -1916,32 +1916,32 @@ extern errno_t mbuf_set_flowid(mbuf_t mbuf, u_int16_t flowid); /* IF_QUEUE interaction */ -#define IF_ENQUEUE_MBUF(ifq, m) { \ - mbuf_setnextpkt((m), 0); \ - if ((ifq)->ifq_tail == 0) \ - (ifq)->ifq_head = (m); \ - else \ - mbuf_setnextpkt((mbuf_t)(ifq)->ifq_tail, (m)); \ - (ifq)->ifq_tail = (m); \ - (ifq)->ifq_len++; \ +#define IF_ENQUEUE_MBUF(ifq, m) { \ + mbuf_setnextpkt((m), 0); \ + if ((ifq)->ifq_tail == 0) \ + (ifq)->ifq_head = (m); \ + else \ + mbuf_setnextpkt((mbuf_t)(ifq)->ifq_tail, (m)); \ + (ifq)->ifq_tail = (m); \ + (ifq)->ifq_len++; \ } -#define IF_PREPEND_MBUF(ifq, m) { \ - mbuf_setnextpkt((m), (ifq)->ifq_head); \ - if ((ifq)->ifq_tail == 0) \ - (ifq)->ifq_tail = (m); \ - (ifq)->ifq_head = (m); \ - (ifq)->ifq_len++; \ +#define IF_PREPEND_MBUF(ifq, m) { \ + mbuf_setnextpkt((m), (ifq)->ifq_head); \ + if ((ifq)->ifq_tail == 0) \ + (ifq)->ifq_tail = (m); \ + (ifq)->ifq_head = (m); \ + (ifq)->ifq_len++; \ } -#define IF_DEQUEUE_MBUF(ifq, m) { \ - (m) = (ifq)->ifq_head; \ - if (m) { \ - if (((ifq)->ifq_head = mbuf_nextpkt((m))) == 0) \ - (ifq)->ifq_tail = 0; \ - mbuf_setnextpkt((m), 0); \ - (ifq)->ifq_len--; \ - } \ +#define IF_DEQUEUE_MBUF(ifq, m) { \ + (m) = (ifq)->ifq_head; \ + if (m) { \ + if (((ifq)->ifq_head = mbuf_nextpkt((m))) == 0) \ + (ifq)->ifq_tail = 0; \ + mbuf_setnextpkt((m), 0); \ + (ifq)->ifq_len--; \ + } \ } __END_DECLS diff --git a/bsd/sys/kpi_private.h b/bsd/sys/kpi_private.h index bbd67a2f2..17f8a3334 100644 --- a/bsd/sys/kpi_private.h +++ b/bsd/sys/kpi_private.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SYS_KPI_PRIVATE_H -#define _SYS_KPI_PRIVATE_H +#define _SYS_KPI_PRIVATE_H /* - * Assorted odds and ends for exported private KPI (internal use only) + * Assorted odds and ends for exported private KPI (internal use only) */ #ifdef KERNEL @@ -41,12 +41,12 @@ __BEGIN_DECLS #ifdef KERNEL_PRIVATE /* kernel-exported qsort */ -void kx_qsort (void* array, size_t nm, size_t member_size, int (*)(const void * , const void *)); +void kx_qsort(void* array, size_t nm, size_t member_size, int (*)(const void *, const void *)); #endif /* KERNEL_PRIVATE */ __END_DECLS -#endif /* KERNEL */ +#endif /* KERNEL */ #endif /* !_SYS_KPI_PRIVATE_H */ diff --git a/bsd/sys/kpi_socket.h b/bsd/sys/kpi_socket.h index 837611b5c..aa5a89f26 100644 --- a/bsd/sys/kpi_socket.h +++ b/bsd/sys/kpi_socket.h @@ -26,13 +26,13 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_socket.h - This header defines an API for creating and interacting with sockets - in the kernel. It is possible to create sockets in the kernel - without an associated file descriptor. In some cases, a reference to - the socket may be known while the file descriptor is not. These - functions can be used for interacting with sockets in the kernel. - The API is similar to the user space socket API. + * @header kpi_socket.h + * This header defines an API for creating and interacting with sockets + * in the kernel. It is possible to create sockets in the kernel + * without an associated file descriptor. In some cases, a reference to + * the socket may be known while the file descriptor is not. These + * functions can be used for interacting with sockets in the kernel. + * The API is similar to the user space socket API. */ #ifndef __KPI_SOCKET__ #define __KPI_SOCKET__ @@ -46,70 +46,70 @@ __BEGIN_DECLS struct timeval; /*! - @typedef sock_upcall - - @discussion sock_upcall is used by a socket to notify an in kernel - client that data is waiting. Instead of making blocking calls in - the kernel, a client can specify an upcall which will be called - when data is available or the socket is ready for sending. - - Calls to your upcall function are not serialized and may be - called concurrently from multiple threads in the kernel. - - Your upcall function will be called: - when there is data more than the low water mark for reading, - or when there is space for a write, - or when there is a connection to accept, - or when a socket is connected, - or when a socket is closed or disconnected - - @param so A reference to the socket that's ready. - @param cookie The cookie passed in when the socket was created. - @param waitf Indicates whether or not it's safe to block. -*/ + * @typedef sock_upcall + * + * @discussion sock_upcall is used by a socket to notify an in kernel + * client that data is waiting. Instead of making blocking calls in + * the kernel, a client can specify an upcall which will be called + * when data is available or the socket is ready for sending. + * + * Calls to your upcall function are not serialized and may be + * called concurrently from multiple threads in the kernel. + * + * Your upcall function will be called: + * when there is data more than the low water mark for reading, + * or when there is space for a write, + * or when there is a connection to accept, + * or when a socket is connected, + * or when a socket is closed or disconnected + * + * @param so A reference to the socket that's ready. + * @param cookie The cookie passed in when the socket was created. + * @param waitf Indicates whether or not it's safe to block. + */ typedef void (*sock_upcall)(socket_t so, void *cookie, int waitf); #ifdef KERNEL_PRIVATE /*! - @typedef sock_evupcall - - @discussion sock_evupcall is used by a socket to notify an in kernel - client when an event occurs. Instead of making blocking calls in - the kernel, a client can specify an upcall which will be called - when an event status is available. - @param so A reference to the socket that's ready. - @param cookie The cookie passed in when the socket was created. - @param event Indicates the event as defined by SO_FILT_HINT_* -*/ + * @typedef sock_evupcall + * + * @discussion sock_evupcall is used by a socket to notify an in kernel + * client when an event occurs. Instead of making blocking calls in + * the kernel, a client can specify an upcall which will be called + * when an event status is available. + * @param so A reference to the socket that's ready. + * @param cookie The cookie passed in when the socket was created. + * @param event Indicates the event as defined by SO_FILT_HINT_* + */ typedef void (*sock_evupcall)(socket_t so, void *cookie, u_int32_t event); #endif /* KERNEL_PRIVATE */ /*! - @function sock_accept - @discussion Accepts an incoming connection on a socket. See 'man 2 - accept' for more information. Allocating a socket in this manner - creates a socket with no associated file descriptor. - @param so The listening socket you'd like to accept a connection on. - @param from A pointer to a socket address that will be filled in - with the address the connection is from. - @param fromlen Maximum length of from. - @param flags Supports MSG_DONTWAIT and MSG_USEUPCALL. If - MSG_DONTWAIT is set, accept will return EWOULDBLOCK if there are - no connections ready to be accepted. If MSG_USEUPCALL is set, - the created socket will use the same upcall function attached to - the original socket. - @param callback A notifier function to be called when an event - occurs on the socket. This may be NULL. - @param cookie A cookie passed directly to the callback. - @param new_so Upon success, *new_so will be a reference to a new - socket for tracking the connection. - @result 0 on success otherwise the errno error. + * @function sock_accept + * @discussion Accepts an incoming connection on a socket. See 'man 2 + * accept' for more information. Allocating a socket in this manner + * creates a socket with no associated file descriptor. + * @param so The listening socket you'd like to accept a connection on. + * @param from A pointer to a socket address that will be filled in + * with the address the connection is from. + * @param fromlen Maximum length of from. + * @param flags Supports MSG_DONTWAIT and MSG_USEUPCALL. If + * MSG_DONTWAIT is set, accept will return EWOULDBLOCK if there are + * no connections ready to be accepted. If MSG_USEUPCALL is set, + * the created socket will use the same upcall function attached to + * the original socket. + * @param callback A notifier function to be called when an event + * occurs on the socket. This may be NULL. + * @param cookie A cookie passed directly to the callback. + * @param new_so Upon success, *new_so will be a reference to a new + * socket for tracking the connection. + * @result 0 on success otherwise the errno error. */ #ifdef KERNEL_PRIVATE extern errno_t sock_accept_internal(socket_t so, struct sockaddr *from, int fromlen, int flags, sock_upcall callback, void *cookie, socket_t *new_so); -#define sock_accept(so, from, fromlen, flags, callback, cookie, new_so) \ +#define sock_accept(so, from, fromlen, flags, callback, cookie, new_so) \ sock_accept_internal((so), (from), (fromlen), (flags), (callback), \ (cookie), (new_so)) #else @@ -118,142 +118,142 @@ extern errno_t sock_accept(socket_t so, struct sockaddr *from, int fromlen, #endif /* KERNEL_PRIVATE */ /*! - @function sock_bind - @discussion Binds a socket to a specific address. See 'man 2 bind' - for more information. - @param so The socket to be bound. - @param to The local address the socket should be bound to. - @result 0 on success otherwise the errno error. + * @function sock_bind + * @discussion Binds a socket to a specific address. See 'man 2 bind' + * for more information. + * @param so The socket to be bound. + * @param to The local address the socket should be bound to. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_bind(socket_t so, const struct sockaddr *to); /*! - @function sock_connect - @discussion Initiates a connection on the socket. See 'man 2 - connect' for more information. - @param so The socket to be connect. - @param to The remote address the socket should connect to. - @param flags Flags for connecting. The only flag supported so far is - MSG_DONTWAIT. MSG_DONTWAIT will perform a non-blocking connect. - sock_connect will return immediately with EINPROGRESS. The - upcall, if supplied, will be called when the connection is - completed. - @result 0 on success, EINPROGRESS for a non-blocking connect that - has not completed, otherwise the errno error. + * @function sock_connect + * @discussion Initiates a connection on the socket. See 'man 2 + * connect' for more information. + * @param so The socket to be connect. + * @param to The remote address the socket should connect to. + * @param flags Flags for connecting. The only flag supported so far is + * MSG_DONTWAIT. MSG_DONTWAIT will perform a non-blocking connect. + * sock_connect will return immediately with EINPROGRESS. The + * upcall, if supplied, will be called when the connection is + * completed. + * @result 0 on success, EINPROGRESS for a non-blocking connect that + * has not completed, otherwise the errno error. */ extern errno_t sock_connect(socket_t so, const struct sockaddr *to, int flags); #ifdef KERNEL_PRIVATE /* - This function was added to support NFS. NFS does something funny, - setting a short timeout and checking to see if it should abort the - connect every two seconds. Ideally, NFS would use the upcall to be - notified when the connect is complete. - - If you feel you need to use this function, please contact us to - explain why. - - @function sock_connectwait - @discussion Allows a caller to wait on a socket connect. - @param so The socket being connected. - @param tv The amount of time to wait. - @result 0 on success otherwise the errno error. EINPROGRESS will be - returned if the connection did not complete in the timeout - specified. + * This function was added to support NFS. NFS does something funny, + * setting a short timeout and checking to see if it should abort the + * connect every two seconds. Ideally, NFS would use the upcall to be + * notified when the connect is complete. + * + * If you feel you need to use this function, please contact us to + * explain why. + * + * @function sock_connectwait + * @discussion Allows a caller to wait on a socket connect. + * @param so The socket being connected. + * @param tv The amount of time to wait. + * @result 0 on success otherwise the errno error. EINPROGRESS will be + * returned if the connection did not complete in the timeout + * specified. */ extern errno_t sock_connectwait(socket_t so, const struct timeval *tv); #endif /* KERNEL_PRIVATE */ /*! - @function sock_getpeername - @discussion Retrieves the remote address of a connected socket. See - 'man 2 getpeername'. - @param so The socket. - @param peername Storage for the peer name. - @param peernamelen Length of storage for the peer name. - @result 0 on success otherwise the errno error. + * @function sock_getpeername + * @discussion Retrieves the remote address of a connected socket. See + * 'man 2 getpeername'. + * @param so The socket. + * @param peername Storage for the peer name. + * @param peernamelen Length of storage for the peer name. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_getpeername(socket_t so, struct sockaddr *peername, int peernamelen); /*! - @function sock_getsockname - @discussion Retrieves the local address of a socket. See 'man 2 - getsockname'. - @param so The socket. - @param sockname Storage for the local name. - @param socknamelen Length of storage for the socket name. - @result 0 on success otherwise the errno error. + * @function sock_getsockname + * @discussion Retrieves the local address of a socket. See 'man 2 + * getsockname'. + * @param so The socket. + * @param sockname Storage for the local name. + * @param socknamelen Length of storage for the socket name. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_getsockname(socket_t so, struct sockaddr *sockname, int socknamelen); /*! - @function sock_getsockopt - @discussion Retrieves a socket option. See 'man 2 getsockopt'. - @param so The socket. - @param level Level of the socket option. - @param optname The option name. - @param optval The option value. - @param optlen The length of optval, returns the actual length. - @result 0 on success otherwise the errno error. + * @function sock_getsockopt + * @discussion Retrieves a socket option. See 'man 2 getsockopt'. + * @param so The socket. + * @param level Level of the socket option. + * @param optname The option name. + * @param optval The option value. + * @param optlen The length of optval, returns the actual length. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_getsockopt(socket_t so, int level, int optname, void *optval, int *optlen); /*! - @function sock_ioctl - @discussion Performs an ioctl operation on a socket. See 'man 2 ioctl'. - @param so The socket. - @param request The ioctl name. - @param argp The argument. - @result 0 on success otherwise the errno error. + * @function sock_ioctl + * @discussion Performs an ioctl operation on a socket. See 'man 2 ioctl'. + * @param so The socket. + * @param request The ioctl name. + * @param argp The argument. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_ioctl(socket_t so, unsigned long request, void *argp); /*! - @function sock_setsockopt - @discussion Sets a socket option. See 'man 2 setsockopt'. - @param so The socket. - @param level Level of the socket option. - @param optname The option name. - @param optval The option value. - @param optlen The length of optval. - @result 0 on success otherwise the errno error. + * @function sock_setsockopt + * @discussion Sets a socket option. See 'man 2 setsockopt'. + * @param so The socket. + * @param level Level of the socket option. + * @param optname The option name. + * @param optval The option value. + * @param optlen The length of optval. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_setsockopt(socket_t so, int level, int optname, const void *optval, int optlen); #ifdef KERNEL_PRIVATE /* - This function was added to support AFP setting the traffic class - for a backup stream within a wireless LAN or over link-local address. - - If you feel you need to use this function, please contact us to - explain why. - - @function sock_settclassopt - @discussion Allows a caller to set the traffic class. - @param so The socket. - @param optval The option value. - @param optlen The length of optval. - @result 0 on success otherwise the errno error. + * This function was added to support AFP setting the traffic class + * for a backup stream within a wireless LAN or over link-local address. + * + * If you feel you need to use this function, please contact us to + * explain why. + * + * @function sock_settclassopt + * @discussion Allows a caller to set the traffic class. + * @param so The socket. + * @param optval The option value. + * @param optlen The length of optval. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_settclassopt(socket_t so, const void* optval, size_t optlen); /* - This function was added to support AFP getting the traffic class - set on a stream. - - This is also a private API, please contact us if you need to use it. - - @function sockgettclassopt - @discussion Allows a caller to get the traffic class. - @param so The socket. - @param optval The option value. - @param optlen The length of optval, returns the actual length. - @result 0 on success otherwise the errno error. -*/ + * This function was added to support AFP getting the traffic class + * set on a stream. + * + * This is also a private API, please contact us if you need to use it. + * + * @function sockgettclassopt + * @discussion Allows a caller to get the traffic class. + * @param so The socket. + * @param optval The option value. + * @param optlen The length of optval, returns the actual length. + * @result 0 on success otherwise the errno error. + */ extern errno_t sock_gettclassopt(socket_t so, void* optval, size_t* optlen); #ifdef XNU_KERNEL_PRIVATE @@ -270,114 +270,114 @@ extern errno_t sock_receive_internal(socket_t, struct msghdr *, mbuf_t *, #endif /* KERNEL_PRIVATE */ /*! - @function sock_listen - @discussion Indicate that the socket should start accepting incoming - connections. See 'man 2 listen'. - @param so The socket. - @param backlog The maximum length of the queue of pending connections. - @result 0 on success otherwise the errno error. + * @function sock_listen + * @discussion Indicate that the socket should start accepting incoming + * connections. See 'man 2 listen'. + * @param so The socket. + * @param backlog The maximum length of the queue of pending connections. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_listen(socket_t so, int backlog); /*! - @function sock_receive - @discussion Receive data from a socket. Similar to recvmsg. See 'man - 2 recvmsg' for more information about receiving data. - @param so The socket. - @param msg The msg describing how the data should be received. - @param flags See 'man 2 recvmsg'. - @param recvdlen Number of bytes received, same as return value of - userland recvmsg. - @result 0 on success, EWOULDBLOCK if non-blocking and operation - would cause the thread to block, otherwise the errno error. + * @function sock_receive + * @discussion Receive data from a socket. Similar to recvmsg. See 'man + * 2 recvmsg' for more information about receiving data. + * @param so The socket. + * @param msg The msg describing how the data should be received. + * @param flags See 'man 2 recvmsg'. + * @param recvdlen Number of bytes received, same as return value of + * userland recvmsg. + * @result 0 on success, EWOULDBLOCK if non-blocking and operation + * would cause the thread to block, otherwise the errno error. */ extern errno_t sock_receive(socket_t so, struct msghdr *msg, int flags, size_t *recvdlen); /*! - @function sock_receivembuf - @discussion Receive data from a socket. Similar to sock_receive - though data is returned as a chain of mbufs. See 'man 2 recvmsg' - for more information about receiving data. - @param so The socket. - @param msg The msg describing how the data should be received. May - be NULL. The msg_iov is ignored. - @param data Upon return *data will be a reference to an mbuf chain - containing the data received. This eliminates copying the data - out of the mbufs. Caller is responsible for freeing the mbufs. - @param flags See 'man 2 recvmsg'. - @param recvlen Maximum number of bytes to receive in the mbuf chain. - Upon return, this value will be set to the number of bytes - received, same as return value of userland recvmsg. - @result 0 on success, EWOULDBLOCK if non-blocking and operation - would cause the thread to block, otherwise the errno error. + * @function sock_receivembuf + * @discussion Receive data from a socket. Similar to sock_receive + * though data is returned as a chain of mbufs. See 'man 2 recvmsg' + * for more information about receiving data. + * @param so The socket. + * @param msg The msg describing how the data should be received. May + * be NULL. The msg_iov is ignored. + * @param data Upon return *data will be a reference to an mbuf chain + * containing the data received. This eliminates copying the data + * out of the mbufs. Caller is responsible for freeing the mbufs. + * @param flags See 'man 2 recvmsg'. + * @param recvlen Maximum number of bytes to receive in the mbuf chain. + * Upon return, this value will be set to the number of bytes + * received, same as return value of userland recvmsg. + * @result 0 on success, EWOULDBLOCK if non-blocking and operation + * would cause the thread to block, otherwise the errno error. */ extern errno_t sock_receivembuf(socket_t so, struct msghdr *msg, mbuf_t *data, int flags, size_t *recvlen); /*! - @function sock_send - @discussion Send data on a socket. Similar to sendmsg. See 'man 2 - sendmsg' for more information about sending data. - @param so The socket. - @param msg The msg describing how the data should be sent. Any - pointers must point to data in the kernel. - @param flags See 'man 2 sendmsg'. - @param sentlen The number of bytes sent. - @result 0 on success, EWOULDBLOCK if non-blocking and operation - would cause the thread to block, otherwise the errno error. + * @function sock_send + * @discussion Send data on a socket. Similar to sendmsg. See 'man 2 + * sendmsg' for more information about sending data. + * @param so The socket. + * @param msg The msg describing how the data should be sent. Any + * pointers must point to data in the kernel. + * @param flags See 'man 2 sendmsg'. + * @param sentlen The number of bytes sent. + * @result 0 on success, EWOULDBLOCK if non-blocking and operation + * would cause the thread to block, otherwise the errno error. */ extern errno_t sock_send(socket_t so, const struct msghdr *msg, int flags, size_t *sentlen); /*! - @function sock_sendmbuf - @discussion Send data in an mbuf on a socket. Similar to sock_send - only the data to be sent is taken from the mbuf chain. - @param so The socket. - @param msg The msg describing how the data should be sent. The - msg_iov is ignored. msg may be NULL. - @param data The mbuf chain of data to send. - @param flags See 'man 2 sendmsg'. - @param sentlen The number of bytes sent. - @result 0 on success, EWOULDBLOCK if non-blocking and operation - would cause the thread to block, otherwise the errno error. - Regardless of return value, the mbuf chain 'data' will be freed. + * @function sock_sendmbuf + * @discussion Send data in an mbuf on a socket. Similar to sock_send + * only the data to be sent is taken from the mbuf chain. + * @param so The socket. + * @param msg The msg describing how the data should be sent. The + * msg_iov is ignored. msg may be NULL. + * @param data The mbuf chain of data to send. + * @param flags See 'man 2 sendmsg'. + * @param sentlen The number of bytes sent. + * @result 0 on success, EWOULDBLOCK if non-blocking and operation + * would cause the thread to block, otherwise the errno error. + * Regardless of return value, the mbuf chain 'data' will be freed. */ extern errno_t sock_sendmbuf(socket_t so, const struct msghdr *msg, mbuf_t data, int flags, size_t *sentlen); /*! - @function sock_shutdown - @discussion Shutdown one or both directions of a connection. See - 'man 2 shutdown' for more information. - @param so The socket. - @param how SHUT_RD - shutdown receive. - SHUT_WR - shutdown send. - SHUT_RDWR - shutdown both. - @result 0 on success otherwise the errno error. + * @function sock_shutdown + * @discussion Shutdown one or both directions of a connection. See + * 'man 2 shutdown' for more information. + * @param so The socket. + * @param how SHUT_RD - shutdown receive. + * SHUT_WR - shutdown send. + * SHUT_RDWR - shutdown both. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_shutdown(socket_t so, int how); /*! - @function sock_socket - @discussion Allocate a socket. Allocating a socket in this manner - creates a socket with no associated file descriptor. For more - information, see 'man 2 socket'. - @param domain The socket domain (PF_INET, etc...). - @param type The socket type (SOCK_STREAM, SOCK_DGRAM, etc...). - @param protocol The socket protocol. - @param callback A notifier function to be called when an event - occurs on the socket. This may be NULL. - @param cookie A cookie passed directly to the callback. - @param new_so Upon success, a reference to the new socket. - @result 0 on success otherwise the errno error. + * @function sock_socket + * @discussion Allocate a socket. Allocating a socket in this manner + * creates a socket with no associated file descriptor. For more + * information, see 'man 2 socket'. + * @param domain The socket domain (PF_INET, etc...). + * @param type The socket type (SOCK_STREAM, SOCK_DGRAM, etc...). + * @param protocol The socket protocol. + * @param callback A notifier function to be called when an event + * occurs on the socket. This may be NULL. + * @param cookie A cookie passed directly to the callback. + * @param new_so Upon success, a reference to the new socket. + * @result 0 on success otherwise the errno error. */ #ifdef KERNEL_PRIVATE extern errno_t sock_socket_internal(int domain, int type, int protocol, sock_upcall callback, void *cookie, socket_t *new_so); - -#define sock_socket(domain, type, protocol, callback, cookie, new_so) \ + +#define sock_socket(domain, type, protocol, callback, cookie, new_so) \ sock_socket_internal((domain), (type), (protocol), \ (callback), (cookie), (new_so)) #else @@ -386,198 +386,198 @@ extern errno_t sock_socket(int domain, int type, int protocol, #endif /* KERNEL_PRIVATE */ /*! - @function sock_close - @discussion Close the socket. - @param so The socket to close. This should only ever be a socket - created with sock_socket. Closing a socket created in user space - using sock_close may leave a file descriptor pointing to the - closed socket, resulting in undefined behavior. + * @function sock_close + * @discussion Close the socket. + * @param so The socket to close. This should only ever be a socket + * created with sock_socket. Closing a socket created in user space + * using sock_close may leave a file descriptor pointing to the + * closed socket, resulting in undefined behavior. */ extern void sock_close(socket_t so); #ifdef KERNEL_PRIVATE /* - @function sock_retain - @discussion Prevents the socket from closing - @param so The socket to close. Increment a retain count on the - socket, preventing it from being closed when sock_close is - called. This is used when a File Descriptor is passed (and - closed) from userland and the kext wants to keep ownership of - that socket. It is used in conjunction with - sock_release(socket_t so). + * @function sock_retain + * @discussion Prevents the socket from closing + * @param so The socket to close. Increment a retain count on the + * socket, preventing it from being closed when sock_close is + * called. This is used when a File Descriptor is passed (and + * closed) from userland and the kext wants to keep ownership of + * that socket. It is used in conjunction with + * sock_release(socket_t so). */ extern void sock_retain(socket_t so); /* - @function sock_release - @discussion Decrement the retain count and close the socket if the - retain count reaches zero. - @param so The socket to release. This is used to release ownership - on a socket acquired with sock_retain. When the last retain - count is reached, this will call sock_close to close the socket. + * @function sock_release + * @discussion Decrement the retain count and close the socket if the + * retain count reaches zero. + * @param so The socket to release. This is used to release ownership + * on a socket acquired with sock_retain. When the last retain + * count is reached, this will call sock_close to close the socket. */ extern void sock_release(socket_t so); #endif /* KERNEL_PRIVATE */ /*! - @function sock_setpriv - @discussion Set the privileged bit in the socket. Allows for - operations that require root privileges. - @param so The socket on which to modify the SS_PRIV flag. - @param on Indicate whether or not the SS_PRIV flag should be set. - @result 0 on success otherwise the errno error. + * @function sock_setpriv + * @discussion Set the privileged bit in the socket. Allows for + * operations that require root privileges. + * @param so The socket on which to modify the SS_PRIV flag. + * @param on Indicate whether or not the SS_PRIV flag should be set. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_setpriv(socket_t so, int on); /*! - @function sock_isconnected - @discussion Returns whether or not the socket is connected. - @param so The socket to check. - @result 0 - socket is not connected. 1 - socket is connected. + * @function sock_isconnected + * @discussion Returns whether or not the socket is connected. + * @param so The socket to check. + * @result 0 - socket is not connected. 1 - socket is connected. */ extern int sock_isconnected(socket_t so); /*! - @function sock_isnonblocking - @discussion Returns whether or not the socket is non-blocking. In - the context of this KPI, non-blocking means that functions to - perform operations on a socket will not wait for completion. - - To enable or disable blocking, use the FIONBIO ioctl. The - parameter is an int. If the int is zero, the socket will block. - If the parameter is non-zero, the socket will not block. - @result 0 - socket will block. 1 - socket will not block. + * @function sock_isnonblocking + * @discussion Returns whether or not the socket is non-blocking. In + * the context of this KPI, non-blocking means that functions to + * perform operations on a socket will not wait for completion. + * + * To enable or disable blocking, use the FIONBIO ioctl. The + * parameter is an int. If the int is zero, the socket will block. + * If the parameter is non-zero, the socket will not block. + * @result 0 - socket will block. 1 - socket will not block. */ extern int sock_isnonblocking(socket_t so); /*! - @function sock_gettype - @discussion Retrieves information about the socket. This is the same - information that was used to create the socket. If any of the - parameters following so are NULL, that information is not - retrieved. - @param so The socket to check. - @param domain The domain of the socket (PF_INET, ...). May be NULL. - @param type The socket type (SOCK_STREAM, SOCK_DGRAM, ...). May be NULL. - @param protocol The socket protocol. May be NULL. - @result 0 on success otherwise the errno error. + * @function sock_gettype + * @discussion Retrieves information about the socket. This is the same + * information that was used to create the socket. If any of the + * parameters following so are NULL, that information is not + * retrieved. + * @param so The socket to check. + * @param domain The domain of the socket (PF_INET, ...). May be NULL. + * @param type The socket type (SOCK_STREAM, SOCK_DGRAM, ...). May be NULL. + * @param protocol The socket protocol. May be NULL. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_gettype(socket_t so, int *domain, int *type, int *protocol); #ifdef KERNEL_PRIVATE /* - @function sock_nointerrupt - @discussion Disables interrupt on socket buffers (sets SB_NOINTR on - send and receive socket buffers). - @param so The socket to modify. - @param on Indicate whether or not the SB_NOINTR flag should be set. - @result 0 on success otherwise the errno error. + * @function sock_nointerrupt + * @discussion Disables interrupt on socket buffers (sets SB_NOINTR on + * send and receive socket buffers). + * @param so The socket to modify. + * @param on Indicate whether or not the SB_NOINTR flag should be set. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_nointerrupt(socket_t so, int on); /* - @function sock_getlistener - @discussion Retrieves the listening socket of a pre-accepted socket, - i.e. a socket which is still in the incomplete/completed list. - Once a socket has been accepted, the information pertaining - to its listener is no longer available. Therefore, modules - interested in finding out the listening socket should install - the appropriate socket filter callback (sf_attach) which gets - invoked prior to the socket being fully accepted, and call - this routine at such a time to obtain the listener. Callers - are guaranteed that the listener socket will not go away - during the sf_attach callback, and therefore the value is - safe to be used only in that callback context. Callers should - therefore take note that the listening socket's lock will be - held throughout the duration of the callback. - @param so The pre-accepted socket. - @result Non-NULL value which indicates the listening socket; otherwise, - NULL if the socket is not in the incomplete/completed list - of a listener. + * @function sock_getlistener + * @discussion Retrieves the listening socket of a pre-accepted socket, + * i.e. a socket which is still in the incomplete/completed list. + * Once a socket has been accepted, the information pertaining + * to its listener is no longer available. Therefore, modules + * interested in finding out the listening socket should install + * the appropriate socket filter callback (sf_attach) which gets + * invoked prior to the socket being fully accepted, and call + * this routine at such a time to obtain the listener. Callers + * are guaranteed that the listener socket will not go away + * during the sf_attach callback, and therefore the value is + * safe to be used only in that callback context. Callers should + * therefore take note that the listening socket's lock will be + * held throughout the duration of the callback. + * @param so The pre-accepted socket. + * @result Non-NULL value which indicates the listening socket; otherwise, + * NULL if the socket is not in the incomplete/completed list + * of a listener. */ extern socket_t sock_getlistener(socket_t so); /* - @function sock_getaddr - @discussion Retrieves the local or remote address of a socket. - This is a composite of sock_getpeername and sock_getsockname, - except that the allocated socket address is returned to the - caller, and that the caller is reponsible for calling - sock_freeaddr once finished with it. - @param so The socket. - @param psockname Pointer to the storage for the socket name. - @param peername 0 for local address, and non-zero for peer address. - @result 0 on success otherwise the errno error. + * @function sock_getaddr + * @discussion Retrieves the local or remote address of a socket. + * This is a composite of sock_getpeername and sock_getsockname, + * except that the allocated socket address is returned to the + * caller, and that the caller is reponsible for calling + * sock_freeaddr once finished with it. + * @param so The socket. + * @param psockname Pointer to the storage for the socket name. + * @param peername 0 for local address, and non-zero for peer address. + * @result 0 on success otherwise the errno error. */ extern errno_t sock_getaddr(socket_t so, struct sockaddr **psockname, int peername); /* - @function sock_freeaddr - @discussion Frees the socket address allocated by sock_getaddr. - @param sockname The socket name to be freed. + * @function sock_freeaddr + * @discussion Frees the socket address allocated by sock_getaddr. + * @param sockname The socket name to be freed. */ extern void sock_freeaddr(struct sockaddr *sockname); /* - @function sock_setupcall - @discussion Set the notifier function to be called when an event - occurs on the socket. This may be set to NULL to disable - further notifications. Setting the function does not - affect currently notifications about to be sent or being sent. - Note: When this function is used on a socket passed from - userspace it is crucial to call sock_retain() on the socket - otherwise a callback could be dispatched on a closed socket - and cause a crash. - @param sock The socket. - @param callback The notifier function - @param context A cookie passed directly to the callback -*/ + * @function sock_setupcall + * @discussion Set the notifier function to be called when an event + * occurs on the socket. This may be set to NULL to disable + * further notifications. Setting the function does not + * affect currently notifications about to be sent or being sent. + * Note: When this function is used on a socket passed from + * userspace it is crucial to call sock_retain() on the socket + * otherwise a callback could be dispatched on a closed socket + * and cause a crash. + * @param sock The socket. + * @param callback The notifier function + * @param context A cookie passed directly to the callback + */ extern errno_t sock_setupcall(socket_t sock, sock_upcall callback, void *context); /* - @function sock_setupcalls - @discussion Set the notifier function to be called when an event - occurs on the socket. This may be set to NULL to disable - further notifications. Setting the function does not - affect currently notifications about to be sent or being sent. - Note: When this function is used on a socket passed from - userspace it is crucial to call sock_retain() on the socket - otherwise a callback could be dispatched on a closed socket - and cause a crash. - @param sock The socket. - @param read_callback The read notifier function - @param read_context A cookie passed directly to the read callback - @param write_callback The write notifier function - @param write_context A cookie passed directly to the write callback -*/ + * @function sock_setupcalls + * @discussion Set the notifier function to be called when an event + * occurs on the socket. This may be set to NULL to disable + * further notifications. Setting the function does not + * affect currently notifications about to be sent or being sent. + * Note: When this function is used on a socket passed from + * userspace it is crucial to call sock_retain() on the socket + * otherwise a callback could be dispatched on a closed socket + * and cause a crash. + * @param sock The socket. + * @param read_callback The read notifier function + * @param read_context A cookie passed directly to the read callback + * @param write_callback The write notifier function + * @param write_context A cookie passed directly to the write callback + */ extern errno_t sock_setupcalls(socket_t sock, sock_upcall read_callback, void *read_context, sock_upcall write_callback, void *write_context); /* - @function sock_setupcalls_locked - @discussion The locked version of sock_setupcalls - @param locked: When sets, indicates that the callbacks expect to be - on a locked socket. Thus, no unlock is done prior to - calling the callback. + * @function sock_setupcalls_locked + * @discussion The locked version of sock_setupcalls + * @param locked: When sets, indicates that the callbacks expect to be + * on a locked socket. Thus, no unlock is done prior to + * calling the callback. */ extern void sock_setupcalls_locked(socket_t sock, sock_upcall rcallback, void *rcontext, sock_upcall wcallback, void *wcontext, int locked); /* - @function sock_catchevents - @discussion Set the notifier function to be called when an event - occurs on the socket. This may be set to NULL to disable - further notifications. Setting the function does not - affect currently notifications about to be sent or being sent. - @param sock The socket. - @param event_callback The event notifier function - @param event_context A cookie passed directly to the event callback - @param event_mask One or more SO_FILT_HINT_* values OR'ed together, - indicating the registered event(s). -*/ + * @function sock_catchevents + * @discussion Set the notifier function to be called when an event + * occurs on the socket. This may be set to NULL to disable + * further notifications. Setting the function does not + * affect currently notifications about to be sent or being sent. + * @param sock The socket. + * @param event_callback The event notifier function + * @param event_context A cookie passed directly to the event callback + * @param event_mask One or more SO_FILT_HINT_* values OR'ed together, + * indicating the registered event(s). + */ extern errno_t sock_catchevents(socket_t sock, sock_evupcall event_callback, void *event_context, u_int32_t event_mask); @@ -586,12 +586,12 @@ extern void sock_catchevents_locked(socket_t sock, sock_evupcall ecallback, /* - @function sock_iskernel - @discussion Returns true if the socket was created by the kernel or - is owned by the kernel. - @param sock The socket. - @result True if the kernel owns the socket. -*/ + * @function sock_iskernel + * @discussion Returns true if the socket was created by the kernel or + * is owned by the kernel. + * @param sock The socket. + * @result True if the kernel owns the socket. + */ extern int sock_iskernel(socket_t); #endif /* KERNEL_PRIVATE */ diff --git a/bsd/sys/kpi_socketfilter.h b/bsd/sys/kpi_socketfilter.h index d3ac71b96..e82a0f52f 100644 --- a/bsd/sys/kpi_socketfilter.h +++ b/bsd/sys/kpi_socketfilter.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2017 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,32 +22,32 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header kpi_socketfilter.h - This header defines an API for intercepting communications at the - socket layer. - - For the most part, socket filters want to do three things: Filter - data in and out, watch for state changes, and intercept a few calls - for security. The number of function pointers supplied by a socket - filter has been significantly reduced. The filter no longer has any - knowledge of socket buffers. The filter no longer intercepts nearly - every internal socket call. There are two data filters, an in - filter, and an out filter. The in filter occurs before data is - placed in the receive socket buffer. This is done to avoid waking - the process unnecessarily. The out filter occurs before the data is - appended to the send socket buffer. This should cover inbound and - outbound data. For monitoring state changes, we've added a notify - function that will be called when various events that the filter can - not intercept occur. In addition, we've added a few functions that a - filter may use to intercept common operations. These functions are: - connect (inbound), connect (outbound), bind, set socket option, - get socket option, and listen. Bind, listen, connect in, and connect - out could be used together to build a fairly comprehensive firewall - without having to do much with individual packets. + * @header kpi_socketfilter.h + * This header defines an API for intercepting communications at the + * socket layer. + * + * For the most part, socket filters want to do three things: Filter + * data in and out, watch for state changes, and intercept a few calls + * for security. The number of function pointers supplied by a socket + * filter has been significantly reduced. The filter no longer has any + * knowledge of socket buffers. The filter no longer intercepts nearly + * every internal socket call. There are two data filters, an in + * filter, and an out filter. The in filter occurs before data is + * placed in the receive socket buffer. This is done to avoid waking + * the process unnecessarily. The out filter occurs before the data is + * appended to the send socket buffer. This should cover inbound and + * outbound data. For monitoring state changes, we've added a notify + * function that will be called when various events that the filter can + * not intercept occur. In addition, we've added a few functions that a + * filter may use to intercept common operations. These functions are: + * connect (inbound), connect (outbound), bind, set socket option, + * get socket option, and listen. Bind, listen, connect in, and connect + * out could be used together to build a fairly comprehensive firewall + * without having to do much with individual packets. */ #ifndef __KPI_SOCKETFILTER__ #define __KPI_SOCKETFILTER__ @@ -58,522 +58,522 @@ struct sockaddr; /*! - @enum sflt_flags - @abstract Constants defining mbuf flags. Only the flags listed below - can be set or retrieved. - @constant SFLT_GLOBAL Indicates this socket filter should be - attached to all new sockets when they're created. - @constant SFLT_PROG Indicates this socket filter should be attached - only when request by the application using the SO_NKE socket - option. - @constant SFLT_EXTENDED Indicates that this socket filter utilizes - the extended fields within the sflt_filter structure. - @constant SFLT_EXTENDED_REGISTRY Indicates that this socket filter - wants to attach to all the sockets already present on the - system. It will also receive notifications for these sockets. -*/ + * @enum sflt_flags + * @abstract Constants defining mbuf flags. Only the flags listed below + * can be set or retrieved. + * @constant SFLT_GLOBAL Indicates this socket filter should be + * attached to all new sockets when they're created. + * @constant SFLT_PROG Indicates this socket filter should be attached + * only when request by the application using the SO_NKE socket + * option. + * @constant SFLT_EXTENDED Indicates that this socket filter utilizes + * the extended fields within the sflt_filter structure. + * @constant SFLT_EXTENDED_REGISTRY Indicates that this socket filter + * wants to attach to all the sockets already present on the + * system. It will also receive notifications for these sockets. + */ enum { - SFLT_GLOBAL = 0x01, - SFLT_PROG = 0x02, - SFLT_EXTENDED = 0x04, - SFLT_EXTENDED_REGISTRY = 0x08 + SFLT_GLOBAL = 0x01, + SFLT_PROG = 0x02, + SFLT_EXTENDED = 0x04, + SFLT_EXTENDED_REGISTRY = 0x08 }; -typedef u_int32_t sflt_flags; - -/*! - @typedef sflt_handle - @abstract A 4 byte identifier used with the SO_NKE socket option to - identify the socket filter to be attached. -*/ -typedef u_int32_t sflt_handle; - -/*! - @enum sflt_event_t - @abstract Events notify a filter of state changes and other various - events related to the socket. These events cannot be prevented - or intercepted, only observed. - @constant sock_evt_connected Indicates this socket has moved to the - connected state. - @constant sock_evt_disconnected Indicates this socket has moved to - the disconnected state. - @constant sock_evt_flush_read The read socket buffer has been - flushed. - @constant sock_evt_shutdown The read and or write side(s) of the - connection have been shutdown. The param will point to an - integer that indicates the direction that has been shutdown. See - 'man 2 shutdown' for more information. - @constant sock_evt_cantrecvmore Indicates the socket cannot receive - more data. - @constant sock_evt_cantsendmore Indicates the socket cannot send - more data. - @constant sock_evt_closing Indicates the socket is closing. - @constant sock_evt_bound Indicates this socket has moved to the - bound state (only for PF_INET/PF_INET6 domain). -*/ +typedef u_int32_t sflt_flags; + +/*! + * @typedef sflt_handle + * @abstract A 4 byte identifier used with the SO_NKE socket option to + * identify the socket filter to be attached. + */ +typedef u_int32_t sflt_handle; + +/*! + * @enum sflt_event_t + * @abstract Events notify a filter of state changes and other various + * events related to the socket. These events cannot be prevented + * or intercepted, only observed. + * @constant sock_evt_connected Indicates this socket has moved to the + * connected state. + * @constant sock_evt_disconnected Indicates this socket has moved to + * the disconnected state. + * @constant sock_evt_flush_read The read socket buffer has been + * flushed. + * @constant sock_evt_shutdown The read and or write side(s) of the + * connection have been shutdown. The param will point to an + * integer that indicates the direction that has been shutdown. See + * 'man 2 shutdown' for more information. + * @constant sock_evt_cantrecvmore Indicates the socket cannot receive + * more data. + * @constant sock_evt_cantsendmore Indicates the socket cannot send + * more data. + * @constant sock_evt_closing Indicates the socket is closing. + * @constant sock_evt_bound Indicates this socket has moved to the + * bound state (only for PF_INET/PF_INET6 domain). + */ enum { - sock_evt_connecting = 1, - sock_evt_connected = 2, - sock_evt_disconnecting = 3, - sock_evt_disconnected = 4, - sock_evt_flush_read = 5, - sock_evt_shutdown = 6, /* param points to an integer specifying how (read, write, or both) see man 2 shutdown */ - sock_evt_cantrecvmore = 7, - sock_evt_cantsendmore = 8, - sock_evt_closing = 9, - sock_evt_bound = 10 + sock_evt_connecting = 1, + sock_evt_connected = 2, + sock_evt_disconnecting = 3, + sock_evt_disconnected = 4, + sock_evt_flush_read = 5, + sock_evt_shutdown = 6, /* param points to an integer specifying how (read, write, or both) see man 2 shutdown */ + sock_evt_cantrecvmore = 7, + sock_evt_cantsendmore = 8, + sock_evt_closing = 9, + sock_evt_bound = 10 }; -typedef u_int32_t sflt_event_t; - -/*! - @enum sflt_data_flag_t - @abstract Inbound and outbound data filters may handle many - different types of incoming and outgoing data. These flags help - distinguish between normal data, out-of-band data, and records. - @constant sock_data_filt_flag_oob Indicates this data is out-of-band - data. - @constant sock_data_filt_flag_record Indicates this data is a - record. This flag is only ever seen on inbound data. -*/ +typedef u_int32_t sflt_event_t; + +/*! + * @enum sflt_data_flag_t + * @abstract Inbound and outbound data filters may handle many + * different types of incoming and outgoing data. These flags help + * distinguish between normal data, out-of-band data, and records. + * @constant sock_data_filt_flag_oob Indicates this data is out-of-band + * data. + * @constant sock_data_filt_flag_record Indicates this data is a + * record. This flag is only ever seen on inbound data. + */ enum { - sock_data_filt_flag_oob = 1, - sock_data_filt_flag_record = 2 + sock_data_filt_flag_oob = 1, + sock_data_filt_flag_record = 2 }; -typedef u_int32_t sflt_data_flag_t; +typedef u_int32_t sflt_data_flag_t; __BEGIN_DECLS /*! - @typedef sf_unregistered_func - - @discussion sf_unregistered_func is called to notify the filter it - has been unregistered. This is the last function the stack will - call and this function will only be called once all other - function calls in to your filter have completed. Once this - function has been called, your kext may safely unload. - @param handle The socket filter handle used to identify this filter. -*/ -typedef void (*sf_unregistered_func)(sflt_handle handle); - -/*! - @typedef sf_attach_func - - @discussion sf_attach_func is called to notify the filter it has - been attached to a socket. The filter may allocate memory for - this attachment and use the cookie to track it. This filter is - called in one of two cases: - 1) You've installed a global filter and a new socket was created. - 2) Your non-global socket filter is being attached using the SO_NKE - socket option. - @param cookie Used to allow the socket filter to set the cookie for - this attachment. - @param so The socket the filter is being attached to. - @result If you return a non-zero value, your filter will not be - attached to this socket. -*/ -typedef errno_t (*sf_attach_func)(void **cookie, socket_t so); - -/*! - @typedef sf_detach_func - - @discussion sf_detach_func is called to notify the filter it has - been detached from a socket. If the filter allocated any memory - for this attachment, it should be freed. This function will - be called when the socket is disposed of. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @discussion If you return a non-zero value, your filter will not be - attached to this socket. -*/ -typedef void (*sf_detach_func)(void *cookie, socket_t so); - -/*! - @typedef sf_notify_func - - @discussion sf_notify_func is called to notify the filter of various - state changes and other events occuring on the socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param event The type of event that has occurred. - @param param Additional information about the event. -*/ -typedef void (*sf_notify_func)(void *cookie, socket_t so, sflt_event_t event, + * @typedef sf_unregistered_func + * + * @discussion sf_unregistered_func is called to notify the filter it + * has been unregistered. This is the last function the stack will + * call and this function will only be called once all other + * function calls in to your filter have completed. Once this + * function has been called, your kext may safely unload. + * @param handle The socket filter handle used to identify this filter. + */ +typedef void (*sf_unregistered_func)(sflt_handle handle); + +/*! + * @typedef sf_attach_func + * + * @discussion sf_attach_func is called to notify the filter it has + * been attached to a socket. The filter may allocate memory for + * this attachment and use the cookie to track it. This filter is + * called in one of two cases: + * 1) You've installed a global filter and a new socket was created. + * 2) Your non-global socket filter is being attached using the SO_NKE + * socket option. + * @param cookie Used to allow the socket filter to set the cookie for + * this attachment. + * @param so The socket the filter is being attached to. + * @result If you return a non-zero value, your filter will not be + * attached to this socket. + */ +typedef errno_t (*sf_attach_func)(void **cookie, socket_t so); + +/*! + * @typedef sf_detach_func + * + * @discussion sf_detach_func is called to notify the filter it has + * been detached from a socket. If the filter allocated any memory + * for this attachment, it should be freed. This function will + * be called when the socket is disposed of. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @discussion If you return a non-zero value, your filter will not be + * attached to this socket. + */ +typedef void (*sf_detach_func)(void *cookie, socket_t so); + +/*! + * @typedef sf_notify_func + * + * @discussion sf_notify_func is called to notify the filter of various + * state changes and other events occuring on the socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param event The type of event that has occurred. + * @param param Additional information about the event. + */ +typedef void (*sf_notify_func)(void *cookie, socket_t so, sflt_event_t event, void *param); /*! - @typedef sf_getpeername_func - - @discussion sf_getpeername_func is called to allow a filter to - to intercept the getpeername function. When called, sa will - point to a pointer to a socket address that was malloced - in zone M_SONAME. If you want to replace this address, either - modify the currenty copy or allocate a new one and free the - old one. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param sa A pointer to a socket address pointer. - @result If you return a non-zero value, processing will stop. If - you return EJUSTRETURN, no further filters will be called - but a result of zero will be returned to the caller of - getpeername. -*/ -typedef int (*sf_getpeername_func)(void *cookie, socket_t so, + * @typedef sf_getpeername_func + * + * @discussion sf_getpeername_func is called to allow a filter to + * to intercept the getpeername function. When called, sa will + * point to a pointer to a socket address that was malloced + * in zone M_SONAME. If you want to replace this address, either + * modify the currenty copy or allocate a new one and free the + * old one. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param sa A pointer to a socket address pointer. + * @result If you return a non-zero value, processing will stop. If + * you return EJUSTRETURN, no further filters will be called + * but a result of zero will be returned to the caller of + * getpeername. + */ +typedef int (*sf_getpeername_func)(void *cookie, socket_t so, struct sockaddr **sa); /*! - @typedef sf_getsockname_func - - @discussion sf_getsockname_func is called to allow a filter to - to intercept the getsockname function. When called, sa will - point to a pointer to a socket address that was malloced - in zone M_SONAME. If you want to replace this address, either - modify the currenty copy or allocate a new one and free the - old one. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param sa A pointer to a socket address pointer. - @result If you return a non-zero value, processing will stop. If - you return EJUSTRETURN, no further filters will be called - but a result of zero will be returned to the caller of - getsockname. -*/ -typedef int (*sf_getsockname_func)(void *cookie, socket_t so, + * @typedef sf_getsockname_func + * + * @discussion sf_getsockname_func is called to allow a filter to + * to intercept the getsockname function. When called, sa will + * point to a pointer to a socket address that was malloced + * in zone M_SONAME. If you want to replace this address, either + * modify the currenty copy or allocate a new one and free the + * old one. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param sa A pointer to a socket address pointer. + * @result If you return a non-zero value, processing will stop. If + * you return EJUSTRETURN, no further filters will be called + * but a result of zero will be returned to the caller of + * getsockname. + */ +typedef int (*sf_getsockname_func)(void *cookie, socket_t so, struct sockaddr **sa); /*! - @typedef sf_data_in_func - - @discussion sf_data_in_func is called to filter incoming data. If your - filter intercepts data for later reinjection, it must queue - all incoming data to preserve the order of the data. Use - sock_inject_data_in to later reinject this data if you return - EJUSTRETURN. Warning: This filter is on the data path. Do not - spend excesive time. Do not wait for data on another socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param from The addres the data is from, may be NULL if the socket - is connected. - @param data The data being received. Control data may appear in the - mbuf chain, be sure to check the mbuf types to find control - data. - @param control Control data being passed separately from the data. - @param flags Flags to indicate if this is out of band data or a - record. - @result Return: - 0 - The caller will continue with normal processing of the data. - EJUSTRETURN - The caller will stop processing the data, the - data will not be freed. - Anything Else - The caller will free the data and stop - processing. -*/ -typedef errno_t (*sf_data_in_func)(void *cookie, socket_t so, + * @typedef sf_data_in_func + * + * @discussion sf_data_in_func is called to filter incoming data. If your + * filter intercepts data for later reinjection, it must queue + * all incoming data to preserve the order of the data. Use + * sock_inject_data_in to later reinject this data if you return + * EJUSTRETURN. Warning: This filter is on the data path. Do not + * spend excesive time. Do not wait for data on another socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param from The addres the data is from, may be NULL if the socket + * is connected. + * @param data The data being received. Control data may appear in the + * mbuf chain, be sure to check the mbuf types to find control + * data. + * @param control Control data being passed separately from the data. + * @param flags Flags to indicate if this is out of band data or a + * record. + * @result Return: + * 0 - The caller will continue with normal processing of the data. + * EJUSTRETURN - The caller will stop processing the data, the + * data will not be freed. + * Anything Else - The caller will free the data and stop + * processing. + */ +typedef errno_t (*sf_data_in_func)(void *cookie, socket_t so, const struct sockaddr *from, mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags); /*! - @typedef sf_data_out_func - - @discussion sf_data_out_func is called to filter outbound data. If - your filter intercepts data for later reinjection, it must queue - all outbound data to preserve the order of the data when - reinjecting. Use sock_inject_data_out to later reinject this - data. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param to The address the data is to, may be NULL if the socket - is connected. - @param data The data being received. Control data may appear in the - mbuf chain, be sure to check the mbuf types to find control - data. - @param control Control data being passed separately from the data. - @param flags Flags to indicate if this is out of band data or a - record. - @result Return: - 0 - The caller will continue with normal processing of the data. - EJUSTRETURN - The caller will stop processing the data, - the data will not be freed. - Anything Else - The caller will free the data and stop - processing. -*/ -typedef errno_t (*sf_data_out_func)(void *cookie, socket_t so, + * @typedef sf_data_out_func + * + * @discussion sf_data_out_func is called to filter outbound data. If + * your filter intercepts data for later reinjection, it must queue + * all outbound data to preserve the order of the data when + * reinjecting. Use sock_inject_data_out to later reinject this + * data. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param to The address the data is to, may be NULL if the socket + * is connected. + * @param data The data being received. Control data may appear in the + * mbuf chain, be sure to check the mbuf types to find control + * data. + * @param control Control data being passed separately from the data. + * @param flags Flags to indicate if this is out of band data or a + * record. + * @result Return: + * 0 - The caller will continue with normal processing of the data. + * EJUSTRETURN - The caller will stop processing the data, + * the data will not be freed. + * Anything Else - The caller will free the data and stop + * processing. + */ +typedef errno_t (*sf_data_out_func)(void *cookie, socket_t so, const struct sockaddr *to, mbuf_t *data, mbuf_t *control, sflt_data_flag_t flags); /*! - @typedef sf_connect_in_func - - @discussion sf_connect_in_func is called to filter inbound connections. - A protocol will call this before accepting an incoming - connection and placing it on the queue of completed connections. - Warning: This filter is on the data path. Do not spend excesive - time. Do not wait for data on another socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param from The address the incoming connection is from. - @result Return: - 0 - The caller will continue with normal processing of the - connection. - Anything Else - The caller will rejecting the incoming - connection. -*/ -typedef errno_t (*sf_connect_in_func)(void *cookie, socket_t so, + * @typedef sf_connect_in_func + * + * @discussion sf_connect_in_func is called to filter inbound connections. + * A protocol will call this before accepting an incoming + * connection and placing it on the queue of completed connections. + * Warning: This filter is on the data path. Do not spend excesive + * time. Do not wait for data on another socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param from The address the incoming connection is from. + * @result Return: + * 0 - The caller will continue with normal processing of the + * connection. + * Anything Else - The caller will rejecting the incoming + * connection. + */ +typedef errno_t (*sf_connect_in_func)(void *cookie, socket_t so, const struct sockaddr *from); /*! - @typedef sf_connect_out_func - - @discussion sf_connect_out_func is called to filter outbound - connections. A protocol will call this before initiating an - outbound connection. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param to The remote address of the outbound connection. - @result Return: - 0 - The caller will continue with normal processing of the - connection. - EJUSTRETURN - The caller will return with a value of 0 (no error) - from that point without further processing the connect command. The - protocol layer will not see the call. - Anything Else - The caller will rejecting the outbound - connection. -*/ -typedef errno_t (*sf_connect_out_func)(void *cookie, socket_t so, + * @typedef sf_connect_out_func + * + * @discussion sf_connect_out_func is called to filter outbound + * connections. A protocol will call this before initiating an + * outbound connection. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param to The remote address of the outbound connection. + * @result Return: + * 0 - The caller will continue with normal processing of the + * connection. + * EJUSTRETURN - The caller will return with a value of 0 (no error) + * from that point without further processing the connect command. The + * protocol layer will not see the call. + * Anything Else - The caller will rejecting the outbound + * connection. + */ +typedef errno_t (*sf_connect_out_func)(void *cookie, socket_t so, const struct sockaddr *to); /*! - @typedef sf_bind_func - - @discussion sf_bind_func is called before performing a bind - operation on a socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param to The local address of the socket will be bound to. - @result Return: - 0 - The caller will continue with normal processing of the bind. - EJUSTRETURN - The caller will return with a value of 0 (no error) - from that point without further processing the bind command. The - protocol layer will not see the call. - Anything Else - The caller will rejecting the bind. -*/ -typedef errno_t (*sf_bind_func)(void *cookie, socket_t so, + * @typedef sf_bind_func + * + * @discussion sf_bind_func is called before performing a bind + * operation on a socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param to The local address of the socket will be bound to. + * @result Return: + * 0 - The caller will continue with normal processing of the bind. + * EJUSTRETURN - The caller will return with a value of 0 (no error) + * from that point without further processing the bind command. The + * protocol layer will not see the call. + * Anything Else - The caller will rejecting the bind. + */ +typedef errno_t (*sf_bind_func)(void *cookie, socket_t so, const struct sockaddr *to); /*! - @typedef sf_setoption_func - - @discussion sf_setoption_func is called before performing setsockopt - on a socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param opt The socket option to set. - @result Return: - 0 - The caller will continue with normal processing of the - setsockopt. - EJUSTRETURN - The caller will return with a value of 0 (no error) - from that point without further propagating the set option - command. The socket and protocol layers will not see the call. - Anything Else - The caller will stop processing and return - this error. -*/ -typedef errno_t (*sf_setoption_func)(void *cookie, socket_t so, sockopt_t opt); - -/*! - @typedef sf_getoption_func - - @discussion sf_getoption_func is called before performing getsockopt - on a socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param opt The socket option to get. - @result Return: - 0 - The caller will continue with normal processing of the - getsockopt. - EJUSTRETURN - The caller will return with a value of 0 (no error) - from that point without further propagating the get option - command. The socket and protocol layers will not see the call. - Anything Else - The caller will stop processing and return - this error. -*/ -typedef errno_t (*sf_getoption_func)(void *cookie, socket_t so, sockopt_t opt); - -/*! - @typedef sf_listen_func - - @discussion sf_listen_func is called before performing listen - on a socket. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @result Return: - 0 - The caller will continue with normal processing of listen. - EJUSTRETURN - The caller will return with a value of 0 (no error) - from that point without further processing the listen command. The - protocol will not see the call. - Anything Else - The caller will stop processing and return - this error. -*/ -typedef errno_t (*sf_listen_func)(void *cookie, socket_t so); - -/*! - @typedef sf_ioctl_func - - @discussion sf_ioctl_func is called before performing an ioctl - on a socket. - - All undefined ioctls are reserved for future use by Apple. If - you need to communicate with your kext using an ioctl, please - use SIOCSIFKPI and SIOCGIFKPI. - @param cookie Cookie value specified when the filter attach was - called. - @param so The socket the filter is attached to. - @param request The ioctl name. - @param argp A pointer to the ioctl parameter. - @result Return: - 0 - The caller will continue with normal processing of - this ioctl. - EJUSTRETURN - The caller will return with a value of 0 (no error) - from that point without further processing or propogating - the ioctl. - Anything Else - The caller will stop processing and return - this error. -*/ -typedef errno_t (*sf_ioctl_func)(void *cookie, socket_t so, + * @typedef sf_setoption_func + * + * @discussion sf_setoption_func is called before performing setsockopt + * on a socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param opt The socket option to set. + * @result Return: + * 0 - The caller will continue with normal processing of the + * setsockopt. + * EJUSTRETURN - The caller will return with a value of 0 (no error) + * from that point without further propagating the set option + * command. The socket and protocol layers will not see the call. + * Anything Else - The caller will stop processing and return + * this error. + */ +typedef errno_t (*sf_setoption_func)(void *cookie, socket_t so, sockopt_t opt); + +/*! + * @typedef sf_getoption_func + * + * @discussion sf_getoption_func is called before performing getsockopt + * on a socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param opt The socket option to get. + * @result Return: + * 0 - The caller will continue with normal processing of the + * getsockopt. + * EJUSTRETURN - The caller will return with a value of 0 (no error) + * from that point without further propagating the get option + * command. The socket and protocol layers will not see the call. + * Anything Else - The caller will stop processing and return + * this error. + */ +typedef errno_t (*sf_getoption_func)(void *cookie, socket_t so, sockopt_t opt); + +/*! + * @typedef sf_listen_func + * + * @discussion sf_listen_func is called before performing listen + * on a socket. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @result Return: + * 0 - The caller will continue with normal processing of listen. + * EJUSTRETURN - The caller will return with a value of 0 (no error) + * from that point without further processing the listen command. The + * protocol will not see the call. + * Anything Else - The caller will stop processing and return + * this error. + */ +typedef errno_t (*sf_listen_func)(void *cookie, socket_t so); + +/*! + * @typedef sf_ioctl_func + * + * @discussion sf_ioctl_func is called before performing an ioctl + * on a socket. + * + * All undefined ioctls are reserved for future use by Apple. If + * you need to communicate with your kext using an ioctl, please + * use SIOCSIFKPI and SIOCGIFKPI. + * @param cookie Cookie value specified when the filter attach was + * called. + * @param so The socket the filter is attached to. + * @param request The ioctl name. + * @param argp A pointer to the ioctl parameter. + * @result Return: + * 0 - The caller will continue with normal processing of + * this ioctl. + * EJUSTRETURN - The caller will return with a value of 0 (no error) + * from that point without further processing or propogating + * the ioctl. + * Anything Else - The caller will stop processing and return + * this error. + */ +typedef errno_t (*sf_ioctl_func)(void *cookie, socket_t so, unsigned long request, const char* argp); /*! - @typedef sf_accept_func - - @discussion sf_accept_func is called after a socket is dequeued - off the completed (incoming) connection list and before - the file descriptor is associated with it. A filter can - utilize this callback to intercept the accepted socket - in order to examine it, prior to returning the socket to - the caller of accept. Such a filter may also choose to - discard the accepted socket if it wishes to do so. - @param cookie Cookie value specified when the filter attach was called. - @param so_listen The listening socket. - @param so The socket that is about to be accepted. - @param local The local address of the about to be accepted socket. - @param remote The remote address of the about to be accepted socket. - @result Return: - 0 - The caller will continue with normal processing of accept. - EJUSTRETURN - The to be accepted socket will be disconnected - prior to being returned to the caller of accept. No further - control or data operations on the socket will be allowed. - This is the recommended return value as it has the least - amount of impact, especially to applications which don't - check the error value returned by accept. - Anything Else - The to be accepted socket will be closed and - the error will be returned to the caller of accept. - Note that socket filter developers are advised to exercise - caution when returning non-zero values to the caller, - since some applications don't check the error value - returned by accept and therefore risk breakage. + * @typedef sf_accept_func + * + * @discussion sf_accept_func is called after a socket is dequeued + * off the completed (incoming) connection list and before + * the file descriptor is associated with it. A filter can + * utilize this callback to intercept the accepted socket + * in order to examine it, prior to returning the socket to + * the caller of accept. Such a filter may also choose to + * discard the accepted socket if it wishes to do so. + * @param cookie Cookie value specified when the filter attach was called. + * @param so_listen The listening socket. + * @param so The socket that is about to be accepted. + * @param local The local address of the about to be accepted socket. + * @param remote The remote address of the about to be accepted socket. + * @result Return: + * 0 - The caller will continue with normal processing of accept. + * EJUSTRETURN - The to be accepted socket will be disconnected + * prior to being returned to the caller of accept. No further + * control or data operations on the socket will be allowed. + * This is the recommended return value as it has the least + * amount of impact, especially to applications which don't + * check the error value returned by accept. + * Anything Else - The to be accepted socket will be closed and + * the error will be returned to the caller of accept. + * Note that socket filter developers are advised to exercise + * caution when returning non-zero values to the caller, + * since some applications don't check the error value + * returned by accept and therefore risk breakage. */ typedef errno_t (*sf_accept_func)(void *cookie, socket_t so_listen, socket_t so, const struct sockaddr *local, const struct sockaddr *remote); /*! - @struct sflt_filter - @discussion This structure is used to define a socket filter. - @field sf_handle A value used to find socket filters by - applications. An application can use this value to specify that - this filter should be attached when using the SO_NKE socket - option. - @field sf_flags Indicate whether this filter should be attached to - all new sockets or just those that request the filter be - attached using the SO_NKE socket option. If this filter - utilizes the socket filter extension fields, it must also - set SFLT_EXTENDED. - @field sf_name A name used for debug purposes. - @field sf_unregistered Your function for being notified when your - filter has been unregistered. - @field sf_attach Your function for handling attaches to sockets. - @field sf_detach Your function for handling detaches from sockets. - @field sf_notify Your function for handling events. May be null. - @field sf_data_in Your function for handling incoming data. May be - null. - @field sf_data_out Your function for handling outgoing data. May be - null. - @field sf_connect_in Your function for handling inbound - connections. May be null. - @field sf_connect_out Your function for handling outbound - connections. May be null. - @field sf_bind Your function for handling binds. May be null. - @field sf_setoption Your function for handling setsockopt. May be null. - @field sf_getoption Your function for handling getsockopt. May be null. - @field sf_listen Your function for handling listen. May be null. - @field sf_ioctl Your function for handling ioctls. May be null. - @field sf_len Length of socket filter extension structure; developers - must initialize this to sizeof sflt_filter_ext structure. - This field and all fields following it will only be valid - if SFLT_EXTENDED flag is set in sf_flags field. - @field sf_ext_accept Your function for handling inbound connections - at accept time. May be null. - @field sf_ext_rsvd Reserved for future use; you must initialize - the reserved fields with zeroes. -*/ + * @struct sflt_filter + * @discussion This structure is used to define a socket filter. + * @field sf_handle A value used to find socket filters by + * applications. An application can use this value to specify that + * this filter should be attached when using the SO_NKE socket + * option. + * @field sf_flags Indicate whether this filter should be attached to + * all new sockets or just those that request the filter be + * attached using the SO_NKE socket option. If this filter + * utilizes the socket filter extension fields, it must also + * set SFLT_EXTENDED. + * @field sf_name A name used for debug purposes. + * @field sf_unregistered Your function for being notified when your + * filter has been unregistered. + * @field sf_attach Your function for handling attaches to sockets. + * @field sf_detach Your function for handling detaches from sockets. + * @field sf_notify Your function for handling events. May be null. + * @field sf_data_in Your function for handling incoming data. May be + * null. + * @field sf_data_out Your function for handling outgoing data. May be + * null. + * @field sf_connect_in Your function for handling inbound + * connections. May be null. + * @field sf_connect_out Your function for handling outbound + * connections. May be null. + * @field sf_bind Your function for handling binds. May be null. + * @field sf_setoption Your function for handling setsockopt. May be null. + * @field sf_getoption Your function for handling getsockopt. May be null. + * @field sf_listen Your function for handling listen. May be null. + * @field sf_ioctl Your function for handling ioctls. May be null. + * @field sf_len Length of socket filter extension structure; developers + * must initialize this to sizeof sflt_filter_ext structure. + * This field and all fields following it will only be valid + * if SFLT_EXTENDED flag is set in sf_flags field. + * @field sf_ext_accept Your function for handling inbound connections + * at accept time. May be null. + * @field sf_ext_rsvd Reserved for future use; you must initialize + * the reserved fields with zeroes. + */ struct sflt_filter { - sflt_handle sf_handle; - int sf_flags; - char *sf_name; - - sf_unregistered_func sf_unregistered; - sf_attach_func sf_attach; - sf_detach_func sf_detach; - - sf_notify_func sf_notify; - sf_getpeername_func sf_getpeername; - sf_getsockname_func sf_getsockname; - sf_data_in_func sf_data_in; - sf_data_out_func sf_data_out; - sf_connect_in_func sf_connect_in; - sf_connect_out_func sf_connect_out; - sf_bind_func sf_bind; - sf_setoption_func sf_setoption; - sf_getoption_func sf_getoption; - sf_listen_func sf_listen; - sf_ioctl_func sf_ioctl; + sflt_handle sf_handle; + int sf_flags; + char *sf_name; + + sf_unregistered_func sf_unregistered; + sf_attach_func sf_attach; + sf_detach_func sf_detach; + + sf_notify_func sf_notify; + sf_getpeername_func sf_getpeername; + sf_getsockname_func sf_getsockname; + sf_data_in_func sf_data_in; + sf_data_out_func sf_data_out; + sf_connect_in_func sf_connect_in; + sf_connect_out_func sf_connect_out; + sf_bind_func sf_bind; + sf_setoption_func sf_setoption; + sf_getoption_func sf_getoption; + sf_listen_func sf_listen; + sf_ioctl_func sf_ioctl; /* * The following are valid only if SFLT_EXTENDED flag is set. * Initialize sf_ext_len to sizeof sflt_filter_ext structure. * Filters must also initialize reserved fields with zeroes. */ struct sflt_filter_ext { - unsigned int sf_ext_len; - sf_accept_func sf_ext_accept; - void *sf_ext_rsvd[5]; /* Reserved */ + unsigned int sf_ext_len; + sf_accept_func sf_ext_accept; + void *sf_ext_rsvd[5]; /* Reserved */ } sf_ext; -#define sf_len sf_ext.sf_ext_len -#define sf_accept sf_ext.sf_ext_accept +#define sf_len sf_ext.sf_ext_len +#define sf_accept sf_ext.sf_ext_accept }; /*! - @function sflt_register - @discussion Registers a socket filter. See 'man 2 socket' for a - desciption of domain, type, and protocol. - @param filter A structure describing the filter. - @param domain The protocol domain these filters will be attached to. - Only PF_INET & PF_INET6 domains are supported. - @param type The socket type these filters will be attached to. - @param protocol The protocol these filters will be attached to. - @result 0 on success otherwise the errno error. + * @function sflt_register + * @discussion Registers a socket filter. See 'man 2 socket' for a + * desciption of domain, type, and protocol. + * @param filter A structure describing the filter. + * @param domain The protocol domain these filters will be attached to. + * Only PF_INET & PF_INET6 domains are supported. + * @param type The socket type these filters will be attached to. + * @param protocol The protocol these filters will be attached to. + * @result 0 on success otherwise the errno error. */ #ifdef KERNEL_PRIVATE extern errno_t sflt_register_internal(const struct sflt_filter *filter, int domain, int type, int protocol); -#define sflt_register(filter, domain, type, protocol) \ +#define sflt_register(filter, domain, type, protocol) \ sflt_register_internal((filter), (domain), (type), (protocol)) #else extern errno_t sflt_register(const struct sflt_filter *filter, int domain, @@ -581,32 +581,32 @@ extern errno_t sflt_register(const struct sflt_filter *filter, int domain, #endif /* KERNEL_PRIVATE */ /*! - @function sflt_unregister - @discussion Unregisters a socket filter. This will not detach the - socket filter from all sockets it may be attached to at the - time, it will just prevent the socket filter from being attached - to any new sockets. - @param handle The sf_handle of the socket filter to unregister. - @result 0 on success otherwise the errno error. + * @function sflt_unregister + * @discussion Unregisters a socket filter. This will not detach the + * socket filter from all sockets it may be attached to at the + * time, it will just prevent the socket filter from being attached + * to any new sockets. + * @param handle The sf_handle of the socket filter to unregister. + * @result 0 on success otherwise the errno error. */ extern errno_t sflt_unregister(sflt_handle handle); /*! - @function sflt_attach - @discussion Attaches a socket filter to the specified socket. A - filter must be registered before it can be attached. - @param socket The socket the filter should be attached to. - @param handle The handle of the registered filter to be attached. - @result 0 on success otherwise the errno error. + * @function sflt_attach + * @discussion Attaches a socket filter to the specified socket. A + * filter must be registered before it can be attached. + * @param socket The socket the filter should be attached to. + * @param handle The handle of the registered filter to be attached. + * @result 0 on success otherwise the errno error. */ extern errno_t sflt_attach(socket_t socket, sflt_handle handle); /*! - @function sflt_detach - @discussion Detaches a socket filter from a specified socket. - @param socket The socket the filter should be detached from. - @param handle The handle of the registered filter to be detached. - @result 0 on success otherwise the errno error. + * @function sflt_detach + * @discussion Detaches a socket filter from a specified socket. + * @param socket The socket the filter should be detached from. + * @param handle The handle of the registered filter to be detached. + * @result 0 on success otherwise the errno error. */ extern errno_t sflt_detach(socket_t socket, sflt_handle handle); @@ -619,37 +619,37 @@ extern errno_t sflt_detach(socket_t socket, sflt_handle handle); */ /*! - @function sock_inject_data_in - @discussion Inject data in to the receive buffer of the socket as if - it had come from the network. - @param so The socket to inject the data on. - @param from The address the data is from, only necessary on - un-connected sockets. A copy of the address will be made, caller - is responsible for freeing the address after calling this - function. - @param data The data and possibly control mbufs. - @param control The separate control mbufs. - @param flags Flags indicating the type of data. - @result 0 on success otherwise the errno error. If the function - returns an error, the caller is responsible for freeing the - mbuf. + * @function sock_inject_data_in + * @discussion Inject data in to the receive buffer of the socket as if + * it had come from the network. + * @param so The socket to inject the data on. + * @param from The address the data is from, only necessary on + * un-connected sockets. A copy of the address will be made, caller + * is responsible for freeing the address after calling this + * function. + * @param data The data and possibly control mbufs. + * @param control The separate control mbufs. + * @param flags Flags indicating the type of data. + * @result 0 on success otherwise the errno error. If the function + * returns an error, the caller is responsible for freeing the + * mbuf. */ extern errno_t sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data, mbuf_t control, sflt_data_flag_t flags); /*! - @function sock_inject_data_out - @discussion Inject data in to the send buffer of the socket as if it - had come from the client. - @param so The socket to inject the data on. - @param to The address the data should be sent to, only necessary on - un-connected sockets. The caller is responsible for freeing the - to address after sock_inject_data_out returns. - @param data The data and possibly control mbufs. - @param control The separate control mbufs. - @param flags Flags indicating the type of data. - @result 0 on success otherwise the errno error. The data and control - values are always freed regardless of return value. + * @function sock_inject_data_out + * @discussion Inject data in to the send buffer of the socket as if it + * had come from the client. + * @param so The socket to inject the data on. + * @param to The address the data should be sent to, only necessary on + * un-connected sockets. The caller is responsible for freeing the + * to address after sock_inject_data_out returns. + * @param data The data and possibly control mbufs. + * @param control The separate control mbufs. + * @param flags Flags indicating the type of data. + * @result 0 on success otherwise the errno error. The data and control + * values are always freed regardless of return value. */ extern errno_t sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data, mbuf_t control, sflt_data_flag_t flags); @@ -660,61 +660,61 @@ extern errno_t sock_inject_data_out(socket_t so, const struct sockaddr *to, */ enum { - sockopt_get = 1, - sockopt_set = 2 + sockopt_get = 1, + sockopt_set = 2 }; typedef u_int8_t sockopt_dir; /*! - @function sockopt_direction - @discussion Retrieves the direction of the socket option (Get or - Set). - @param sopt The socket option. - @result sock_opt_get or sock_opt_set. + * @function sockopt_direction + * @discussion Retrieves the direction of the socket option (Get or + * Set). + * @param sopt The socket option. + * @result sock_opt_get or sock_opt_set. */ extern sockopt_dir sockopt_direction(sockopt_t sopt); /*! - @function sockopt_level - @discussion Retrieves the socket option level. (SOL_SOCKET, etc). - @param sopt The socket option. - @result The socket option level. See man 2 setsockopt + * @function sockopt_level + * @discussion Retrieves the socket option level. (SOL_SOCKET, etc). + * @param sopt The socket option. + * @result The socket option level. See man 2 setsockopt */ extern int sockopt_level(sockopt_t sopt); /*! - @function sockopt_name - @discussion Retrieves the socket option name. (SO_SNDBUF, etc). - @param sopt The socket option. - @result The socket option name. See man 2 setsockopt + * @function sockopt_name + * @discussion Retrieves the socket option name. (SO_SNDBUF, etc). + * @param sopt The socket option. + * @result The socket option name. See man 2 setsockopt */ extern int sockopt_name(sockopt_t sopt); /*! - @function sockopt_valsize - @discussion Retrieves the size of the socket option data. - @param sopt The socket option. - @result The length, in bytes, of the data. + * @function sockopt_valsize + * @discussion Retrieves the size of the socket option data. + * @param sopt The socket option. + * @result The length, in bytes, of the data. */ extern size_t sockopt_valsize(sockopt_t sopt); /*! - @function sockopt_copyin - @discussion Copies the data from the socket option to a buffer. - @param sopt The socket option. - @param data A pointer to the buffer to copy the data in to. - @param length The number of bytes to copy. - @result An errno error or zero upon success. + * @function sockopt_copyin + * @discussion Copies the data from the socket option to a buffer. + * @param sopt The socket option. + * @param data A pointer to the buffer to copy the data in to. + * @param length The number of bytes to copy. + * @result An errno error or zero upon success. */ extern errno_t sockopt_copyin(sockopt_t sopt, void *data, size_t length); /*! - @function sockopt_copyout - @discussion Copies the data from a buffer to a socket option. - @param sopt The socket option. - @param data A pointer to the buffer to copy the data out of. - @param length The number of bytes to copy. - @result An errno error or zero upon success. + * @function sockopt_copyout + * @discussion Copies the data from a buffer to a socket option. + * @param sopt The socket option. + * @param data A pointer to the buffer to copy the data out of. + * @param length The number of bytes to copy. + * @result An errno error or zero upon success. */ extern errno_t sockopt_copyout(sockopt_t sopt, void *data, size_t length); diff --git a/bsd/sys/ktrace.h b/bsd/sys/ktrace.h index 735dc82c3..61d0fb3d1 100644 --- a/bsd/sys/ktrace.h +++ b/bsd/sys/ktrace.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/lctx.h b/bsd/sys/lctx.h index e5205c632..302097969 100644 --- a/bsd/sys/lctx.h +++ b/bsd/sys/lctx.h @@ -26,4 +26,4 @@ setlcid(pid_t pid, pid_t lcid) #define LCID_REMOVE (-1) #define LCID_CREATE (0) -#endif /* !_SYS_LCTX_H_ */ +#endif /* !_SYS_LCTX_H_ */ diff --git a/bsd/sys/linker_set.h b/bsd/sys/linker_set.h index 4ebdcec61..820bc2e36 100644 --- a/bsd/sys/linker_set.h +++ b/bsd/sys/linker_set.h @@ -2,7 +2,7 @@ * Copyright (c) 2006-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * * @@ -64,7 +64,7 @@ * For Mach-O, this is done by constructing a separate segment inside the * __DATA section for each set. The contents of this segment are an array * of pointers to the objects in the set. - * + * * Note that due to limitations of the Mach-O format, there cannot * be more than 255 sections in a segment, so linker set usage should be * conserved. Set names may not exceed 16 characters. @@ -100,28 +100,28 @@ struct linker_set_entry { #ifdef __LS_VA_STRCONCAT__ # undef __LS_VA_STRCONCAT__ #endif -#define __LS_VA_STRINGIFY(_x...) #_x -#define __LS_VA_STRCONCAT(_x,_y) __LS_VA_STRINGIFY(_x,_y) -#define __LINKER_MAKE_SET(_set, _sym) \ - /*__unused*/ /*static*/ const struct linker_set_entry /*const*/ __set_##_set##_sym_##_sym \ +#define __LS_VA_STRINGIFY(_x ...) #_x +#define __LS_VA_STRCONCAT(_x, _y) __LS_VA_STRINGIFY(_x,_y) +#define __LINKER_MAKE_SET(_set, _sym) \ + /*__unused*/ /*static*/ const struct linker_set_entry /*const*/ __set_##_set##_sym_##_sym \ __attribute__ ((section(__LS_VA_STRCONCAT(__DATA,_set)),used)) = { (void *)&_sym } /* the line above is very fragile - if your compiler breaks linker sets, - just play around with "static", "const", "used" etc. :-) */ + * just play around with "static", "const", "used" etc. :-) */ /* * Public macros. */ -#define LINKER_SET_ENTRY(_set, _sym) __LINKER_MAKE_SET(_set, _sym) +#define LINKER_SET_ENTRY(_set, _sym) __LINKER_MAKE_SET(_set, _sym) /* * FreeBSD compatibility. */ #ifdef __APPLE_API_OBSOLETE -# define TEXT_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) -# define DATA_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) -# define BSS_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) -# define ABS_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) -# define SET_ENTRY(_set, _sym) __LINKER_MAKE_SET(_set, _sym) +# define TEXT_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) +# define DATA_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) +# define BSS_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) +# define ABS_SET(_set, _sym) __LINKER_MAKE_SET(_set, _sym) +# define SET_ENTRY(_set, _sym) __LINKER_MAKE_SET(_set, _sym) #endif /* __APPLE_API_OBSOLETE */ /* @@ -149,22 +149,22 @@ struct linker_set_entry { * LINKER_SET_FOREACH((set_member_type **)_pvar, _cast, _set) * * Example of _cast: For the _pvar "struct sysctl_oid **oidpp", _cast would be - * "struct sysctl_oid **" + * "struct sysctl_oid **" * */ -#define LINKER_SET_OBJECT_BEGIN(_object, _set) __linker_set_object_begin(_object, _set) -#define LINKER_SET_OBJECT_LIMIT(_object, _set) __linker_set_object_limit(_object, _set) +#define LINKER_SET_OBJECT_BEGIN(_object, _set) __linker_set_object_begin(_object, _set) +#define LINKER_SET_OBJECT_LIMIT(_object, _set) __linker_set_object_limit(_object, _set) -#define LINKER_SET_OBJECT_FOREACH(_object, _pvar, _cast, _set) \ - for (_pvar = (_cast) LINKER_SET_OBJECT_BEGIN(_object, _set); \ - _pvar < (_cast) LINKER_SET_OBJECT_LIMIT(_object, _set); \ +#define LINKER_SET_OBJECT_FOREACH(_object, _pvar, _cast, _set) \ + for (_pvar = (_cast) LINKER_SET_OBJECT_BEGIN(_object, _set); \ + _pvar < (_cast) LINKER_SET_OBJECT_LIMIT(_object, _set); \ _pvar++) -#define LINKER_SET_OBJECT_ITEM(_object, _cast, _set, _i) \ +#define LINKER_SET_OBJECT_ITEM(_object, _cast, _set, _i) \ (((_cast)(LINKER_SET_OBJECT_BEGIN(_object, _set)))[_i]) -#define LINKER_SET_FOREACH(_pvar, _cast, _set) \ +#define LINKER_SET_FOREACH(_pvar, _cast, _set) \ LINKER_SET_OBJECT_FOREACH((kernel_mach_header_t *)&_mh_execute_header, _pvar, _cast, _set) /* @@ -178,7 +178,7 @@ struct linker_set_entry { static __inline void ** __linker_set_object_begin(kernel_mach_header_t *_header, const char *_set) - __attribute__((__const__)); +__attribute__((__const__)); static __inline void ** __linker_set_object_begin(kernel_mach_header_t *_header, const char *_set) { @@ -186,12 +186,12 @@ __linker_set_object_begin(kernel_mach_header_t *_header, const char *_set) unsigned long _size; _set_begin = getsectdatafromheader(_header, "__DATA", _set, &_size); - return( (void **) _set_begin ); + return (void **) _set_begin; } static __inline void ** __linker_set_object_limit(kernel_mach_header_t *_header, const char *_set) - __attribute__((__const__)); +__attribute__((__const__)); static __inline void ** __linker_set_object_limit(kernel_mach_header_t *_header, const char *_set) { @@ -199,12 +199,10 @@ __linker_set_object_limit(kernel_mach_header_t *_header, const char *_set) unsigned long _size; _set_begin = getsectdatafromheader(_header, "__DATA", _set, &_size); - - return ((void **) ((uintptr_t) _set_begin + _size)); + + return (void **) ((uintptr_t) _set_begin + _size); } #endif /* !KERNEL || __APPLE_API_PRIVATE */ #endif /* _SYS_LINKER_SET_H_ */ - - diff --git a/bsd/sys/loadable_fs.h b/bsd/sys/loadable_fs.h index a5e736101..6668a780a 100644 --- a/bsd/sys/loadable_fs.h +++ b/bsd/sys/loadable_fs.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,25 +22,25 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @(#)loadable_fs.h 2.0 26/06/90 (c) 1990 NeXT */ /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX * - * W I L L D R A S T I C A L L Y C H A N G E S O O N + * W I L L D R A S T I C A L L Y C H A N G E S O O N * U S E A T Y O U R O W N R I S K * * XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ -/* +/* * loadable_fs.h - message struct for loading and initializing loadable * file systems. */ -#ifndef _SYS_LOADABLE_FS_ +#ifndef _SYS_LOADABLE_FS_ #define _SYS_LOADABLE_FS_ @@ -49,61 +49,61 @@ * * Example of a /usr/filesystems directory * - * /usr/filesystems/dos.fs/dos.util utility with which WSM + * /usr/filesystems/dos.fs/dos.util utility with which WSM * communicates - * /usr/filesystems/dos.fs/dos.name "DOS Floppy" + * /usr/filesystems/dos.fs/dos.name "DOS Floppy" * /usr/filesystems/dos.fs/dos_reloc actual loadable filesystem - * /usr/filesystems/dos.fs/dos.openfs.tiff "open folder" icon - * /usr/filesystems/dos.fs/dos.fs.tiff "closed folder" icon + * /usr/filesystems/dos.fs/dos.openfs.tiff "open folder" icon + * /usr/filesystems/dos.fs/dos.fs.tiff "closed folder" icon */ -#define FS_DIR_LOCATION "/System/Library/Filesystems" -#define FS_DIR_SUFFIX ".fs" -#define FS_UTIL_SUFFIX ".util" +#define FS_DIR_LOCATION "/System/Library/Filesystems" +#define FS_DIR_SUFFIX ".fs" +#define FS_UTIL_SUFFIX ".util" /* * .util program commands - all sent in the form "-p" or "-m" ... as argv[1]. */ -#define FSUC_PROBE 'p' /* probe FS for mount or init */ - /* example usage: foo.util -p fd0 removable writable */ +#define FSUC_PROBE 'p' /* probe FS for mount or init */ +/* example usage: foo.util -p fd0 removable writable */ -#define FSUC_PROBEFORINIT 'P' /* probe FS for init only */ - /* example usage: foo.util -P fd0 removable */ +#define FSUC_PROBEFORINIT 'P' /* probe FS for init only */ +/* example usage: foo.util -P fd0 removable */ -#define FSUC_MOUNT 'm' /* mount FS */ - /* example usage: foo.util -m fd0 /bar removable writable */ +#define FSUC_MOUNT 'm' /* mount FS */ +/* example usage: foo.util -m fd0 /bar removable writable */ -#define FSUC_REPAIR 'r' /* repair ('fsck') FS */ - /* example usage: foo.util -r fd0 removable */ +#define FSUC_REPAIR 'r' /* repair ('fsck') FS */ +/* example usage: foo.util -r fd0 removable */ -#define FSUC_UNMOUNT 'u' /* unmount FS */ - /* example usage: foo.util -u fd0 /bar */ +#define FSUC_UNMOUNT 'u' /* unmount FS */ +/* example usage: foo.util -u fd0 /bar */ /* The following is not used by Workspace Manager */ -#define FSUC_MOUNT_FORCE 'M' /* like FSUC_MOUNT, but proceed even on - * error. */ +#define FSUC_MOUNT_FORCE 'M' /* like FSUC_MOUNT, but proceed even on + * error. */ /* * Return codes from .util program */ -#define FSUR_RECOGNIZED (-1) /* response to FSUC_PROBE; implies that - * a mount is possible */ -#define FSUR_UNRECOGNIZED (-2) /* negative response to FSUC_PROBE */ -#define FSUR_IO_SUCCESS (-3) /* mount, unmount, repair succeeded */ -#define FSUR_IO_FAIL (-4) /* unrecoverable I/O error */ -#define FSUR_IO_UNCLEAN (-5) /* mount failed, file system not clean - */ -#define FSUR_INVAL (-6) /* invalid argument */ -#define FSUR_LOADERR (-7) /* kern_loader error */ -#define FSUR_INITRECOGNIZED (-8) /* response to FSUC_PROBE or - * FSUC_PROBEFORINIT, implies that - * initialization is possible */ +#define FSUR_RECOGNIZED (-1) /* response to FSUC_PROBE; implies that + * a mount is possible */ +#define FSUR_UNRECOGNIZED (-2) /* negative response to FSUC_PROBE */ +#define FSUR_IO_SUCCESS (-3) /* mount, unmount, repair succeeded */ +#define FSUR_IO_FAIL (-4) /* unrecoverable I/O error */ +#define FSUR_IO_UNCLEAN (-5) /* mount failed, file system not clean + */ +#define FSUR_INVAL (-6) /* invalid argument */ +#define FSUR_LOADERR (-7) /* kern_loader error */ +#define FSUR_INITRECOGNIZED (-8) /* response to FSUC_PROBE or + * FSUC_PROBEFORINIT, implies that + * initialization is possible */ /* * mount parameters passed from WSM to the .util program. */ -#define DEVICE_READONLY "readonly" -#define DEVICE_WRITABLE "writable" +#define DEVICE_READONLY "readonly" +#define DEVICE_WRITABLE "writable" -#define DEVICE_REMOVABLE "removable" -#define DEVICE_FIXED "fixed" +#define DEVICE_REMOVABLE "removable" +#define DEVICE_FIXED "fixed" -#endif /* _SYS_LOADABLE_FS_ */ +#endif /* _SYS_LOADABLE_FS_ */ diff --git a/bsd/sys/lock.h b/bsd/sys/lock.h index 4e2c537dc..c72bba77c 100644 --- a/bsd/sys/lock.h +++ b/bsd/sys/lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ -/* +/* * Copyright (c) 1995 * The Regents of the University of California. All rights reserved. * @@ -65,8 +65,8 @@ * @(#)lock.h 8.12 (Berkeley) 5/19/95 */ -#ifndef _SYS_LOCK_H_ -#define _SYS_LOCK_H_ +#ifndef _SYS_LOCK_H_ +#define _SYS_LOCK_H_ #include #include @@ -78,4 +78,4 @@ #endif /* KERNEL */ -#endif /* _SYS_LOCK_H_ */ +#endif /* _SYS_LOCK_H_ */ diff --git a/bsd/sys/lockf.h b/bsd/sys/lockf.h index e20bf8733..da52967a0 100644 --- a/bsd/sys/lockf.h +++ b/bsd/sys/lockf.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -61,7 +61,7 @@ */ #ifndef _SYS_LOCKF_H_ -#define _SYS_LOCKF_H_ +#define _SYS_LOCKF_H_ #include #include @@ -90,20 +90,20 @@ TAILQ_HEAD(locklist, lockf); #pragma pack(4) struct lockf { - short lf_flags; /* Semantics: F_POSIX, F_FLOCK, F_WAIT */ - short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ - off_t lf_start; /* Byte # of the start of the lock */ - off_t lf_end; /* Byte # of the end of the lock (-1=EOF) */ - caddr_t lf_id; /* Id of the resource holding the lock */ - struct lockf **lf_head; /* Back pointer to the head of the locf list */ - struct vnode *lf_vnode; /* Back pointer to the inode */ - struct lockf *lf_next; /* Pointer to the next lock on this inode */ - struct locklist lf_blkhd; /* List of requests blocked on this lock */ + short lf_flags; /* Semantics: F_POSIX, F_FLOCK, F_WAIT */ + short lf_type; /* Lock type: F_RDLCK, F_WRLCK */ + off_t lf_start; /* Byte # of the start of the lock */ + off_t lf_end; /* Byte # of the end of the lock (-1=EOF) */ + caddr_t lf_id; /* Id of the resource holding the lock */ + struct lockf **lf_head; /* Back pointer to the head of the locf list */ + struct vnode *lf_vnode; /* Back pointer to the inode */ + struct lockf *lf_next; /* Pointer to the next lock on this inode */ + struct locklist lf_blkhd; /* List of requests blocked on this lock */ TAILQ_ENTRY(lockf) lf_block;/* A request waiting for a lock */ #if IMPORTANCE_INHERITANCE int lf_boosted; /* Is the owner of the lock boosted */ #endif - struct proc *lf_owner; /* The proc that did the SETLK, if known */ + struct proc *lf_owner; /* The proc that did the SETLK, if known */ }; #pragma pack() @@ -114,14 +114,14 @@ struct lockf { __BEGIN_DECLS #ifdef KERNEL_PRIVATE -int lf_advlock(struct vnop_advlock_args *); -int lf_assert(struct vnop_advlock_args *, void **); -void lf_commit(void *, int); -void lf_abort_advlocks(vnode_t); +int lf_advlock(struct vnop_advlock_args *); +int lf_assert(struct vnop_advlock_args *, void **); +void lf_commit(void *, int); +void lf_abort_advlocks(vnode_t); #ifdef LOCKF_DEBUG -void lf_print(char *, struct lockf *); -void lf_printlist(char *, struct lockf *); +void lf_print(char *, struct lockf *); +void lf_printlist(char *, struct lockf *); #endif #endif /* KERNEL_PRIVATE */ diff --git a/bsd/sys/lockstat.h b/bsd/sys/lockstat.h index 870789261..327b03304 100644 --- a/bsd/sys/lockstat.h +++ b/bsd/sys/lockstat.h @@ -25,194 +25,71 @@ */ #ifndef _SYS_LOCKSTAT_H -#define _SYS_LOCKSTAT_H -#endif +#define _SYS_LOCKSTAT_H /* #pragma ident "@(#)lockstat.h 1.6 05/06/08 SMI" */ -#ifdef __cplusplus +#ifdef __cplusplus extern "C" { #endif -/* - * Spin locks - we have less variants - */ -#define LS_LCK_SPIN_LOCK_ACQUIRE 0 -#define LS_LCK_SPIN_LOCK_SPIN 1 -#define LS_LCK_SPIN_UNLOCK_RELEASE 2 -/* - * Mutexes can also have interlock-spin events, which are - * unique to our lock implementation. - */ -#define LS_LCK_MTX_LOCK_ACQUIRE 3 -#define LS_LCK_MTX_LOCK_BLOCK 5 -#define LS_LCK_MTX_LOCK_SPIN 6 -#define LS_LCK_MTX_LOCK_ILK_SPIN 7 -#define LS_LCK_MTX_TRY_LOCK_ACQUIRE 8 -#define LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE 9 -#define LS_LCK_MTX_UNLOCK_RELEASE 10 - -#define LS_LCK_MTX_LOCK_SPIN_ACQUIRE 39 -/* - * Provide a parallel set for indirect mutexes - */ -#define LS_LCK_MTX_EXT_LOCK_ACQUIRE 17 -#define LS_LCK_MTX_EXT_LOCK_BLOCK 18 -#define LS_LCK_MTX_EXT_LOCK_SPIN 19 -#define LS_LCK_MTX_EXT_LOCK_ILK_SPIN 20 -#define LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE 21 -#define LS_LCK_MTX_EXT_UNLOCK_RELEASE 22 -/* - * Our reader-writer locks support a blocking upgrade primitive, as - * well as the possibility of spinning on the interlock. - */ -#define LS_LCK_RW_LOCK_SHARED_ACQUIRE 23 -#define LS_LCK_RW_LOCK_SHARED_BLOCK 24 -#define LS_LCK_RW_LOCK_SHARED_SPIN 25 - -#define LS_LCK_RW_LOCK_EXCL_ACQUIRE 26 -#define LS_LCK_RW_LOCK_EXCL_BLOCK 27 -#define LS_LCK_RW_LOCK_EXCL_SPIN 28 - -#define LS_LCK_RW_DONE_RELEASE 29 - -#define LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE 30 -#define LS_LCK_RW_TRY_LOCK_SHARED_SPIN 31 - -#define LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE 32 -#define LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN 33 - -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE 34 -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN 35 -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK 36 - -#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE 37 -#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN 38 - -#define LS_NPROBES 40 -#define LS_LCK_INVALID LS_NPROBES - /* * Name the various locking functions... */ -#define LS_LCK_MTX_LOCK "lck_mtx_lock" -#define LS_LCK_MTX_SPIN_LOCK "lck_mtx_spin_lock" -#define LS_LCK_MTX_UNLOCK "lck_mtx_unlock" -#define LS_LCK_MTX_TRY_LOCK "lck_mtx_try_lock" -#define LS_LCK_MTX_TRY_SPIN_LOCK "lck_mtx_try_spin_lock" -#define LS_LCK_MTX_EXT_LOCK "lck_mtx_ext_lock" -#define LS_LCK_MTX_EXT_UNLOCK "lck_mtx_ext_unlock" -#define LS_LCK_MTX_EXT_TRY_LOCK "lck_mtx_ext_try_lock" -#define LS_LCK_MTX_LOCK_SPIN_LOCK "lck_mtx_lock_spin" - -#define LS_LCK_SPIN_LOCK "lck_spin_lock" -#define LS_LCK_SPIN_TRY_LOCK "lck_spin_try_lock" -#define LS_LCK_SPIN_UNLOCK "lck_spin_unlock" -#define LS_LCK_RW_LOCK_SHARED "lck_rw_lock_shared" -#define LS_LCK_RW_LOCK_EXCL "lck_rw_lock_exclusive" -#define LS_LCK_RW_DONE "lck_rw_done" -#define LS_LCK_RW_TRY_LOCK_EXCL "lck_rw_try_lock_exclusive" -#define LS_LCK_RW_TRY_LOCK_SHARED "lck_rw_try_lock_shared" -#define LS_LCK_RW_LOCK_SHARED_TO_EXCL "lck_rw_shared_to_exclusive" -#define LS_LCK_RW_LOCK_EXCL_TO_SHARED "lck_rw_exclusive_to_shared" - -#define LS_ACQUIRE "acquire" -#define LS_RELEASE "release" -#define LS_SPIN "spin" -#define LS_BLOCK "block" -#define LS_UPGRADE "upgrade" -#define LS_DOWNGRADE "downgrade" - -#define LS_TYPE_ADAPTIVE "adaptive" -#define LS_TYPE_SPIN "spin" -#define LS_TYPE_ILK "interlock" /* OS X only */ -#define LS_TYPE_THREAD "thread" /* Solaris only */ -#define LS_TYPE_RW "rw" -#define LS_TYPE_RWUPGRADE "rwupgrade" /* OS X only */ - -#define LSA_ACQUIRE (LS_TYPE_ADAPTIVE "-" LS_ACQUIRE) -#define LSA_RELEASE (LS_TYPE_ADAPTIVE "-" LS_RELEASE) -#define LSA_SPIN (LS_TYPE_ADAPTIVE "-" LS_SPIN) -#define LSA_BLOCK (LS_TYPE_ADAPTIVE "-" LS_BLOCK) -#define LSA_ILK_SPIN (LS_TYPE_ILK "-" LS_SPIN) -#define LSS_ACQUIRE (LS_TYPE_SPIN "-" LS_ACQUIRE) -#define LSS_RELEASE (LS_TYPE_SPIN "-" LS_RELEASE) -#define LSS_SPIN (LS_TYPE_SPIN "-" LS_SPIN) -#define LSR_ACQUIRE (LS_TYPE_RW "-" LS_ACQUIRE) -#define LSR_RELEASE (LS_TYPE_RW "-" LS_RELEASE) -#define LSR_BLOCK (LS_TYPE_RW "-" LS_BLOCK) -#define LSR_SPIN (LS_TYPE_RW "-" LS_SPIN) -#define LSR_UPGRADE (LS_TYPE_RW "-" LS_UPGRADE) -#define LSR_UPGRADE_BLOCK (LS_TYPE_RWUPGRADE "-" LS_BLOCK) -#define LSR_DOWNGRADE (LS_TYPE_RW "-" LS_DOWNGRADE) -#define LST_SPIN (LS_TYPE_THREAD "-" LS_SPIN) - -#ifndef _ASM - -#include -#ifdef KERNEL - -#ifndef _KERNEL -#define _KERNEL /* Solaris vs. Darwin */ -#endif - -/* - * Platform-independent kernel support for the lockstat driver. - */ -#if defined(NEED_DTRACE_DEFS) -typedef uint32_t dtrace_id_t; /* probe identifier - also in dtrace.h! */ -typedef uint64_t u_longlong_t; /* also in dtrace.h! */ -#endif - -extern dtrace_id_t lockstat_probemap[LS_NPROBES]; -extern void (*lockstat_probe)(dtrace_id_t, uint64_t, uint64_t, - uint64_t, uint64_t, uint64_t); - - - -#ifdef _KERNEL - -#if CONFIG_DTRACE - -extern void (lockstat_probe_wrapper)(int, uintptr_t, int); - -/* - * Macros to record lockstat probes. - */ -#define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3) \ - { \ - dtrace_id_t id; \ - if (__improbable(id = lockstat_probemap[(probe)])) { \ - (*lockstat_probe)(id, (uintptr_t)(lp), (arg0), \ - (arg1), (arg2), (arg3)); \ - } \ - } - -#define LOCKSTAT_RECORD2(probe, lp, arg1, arg2) \ - LOCKSTAT_RECORD4(probe, lp, arg1, arg2, 0, 0) - -#define LOCKSTAT_RECORD(probe, lp, arg) \ - LOCKSTAT_RECORD4(probe, lp, arg, 0, 0, 0) - -#define LOCKSTAT_RECORD0(probe, lp) \ - LOCKSTAT_RECORD4(probe, lp, 0, 0, 0, 0) -#else - /* No Lockstat provider */ - -#define LOCKSTAT_RECORD() -#define LOCKSTAT_RECORD0() -#define LOCKSTAT_RECORD2() -#define LOCKSTAT_RECORD4() - -#endif /* !CONFIG_DTRACE */ - -#endif /* _KERNEL */ - -#endif /* _ASM */ - -#ifdef __cplusplus +#define LS_LCK_MTX_LOCK "lck_mtx_lock" +#define LS_LCK_MTX_SPIN_LOCK "lck_mtx_spin_lock" +#define LS_LCK_MTX_UNLOCK "lck_mtx_unlock" +#define LS_LCK_MTX_TRY_LOCK "lck_mtx_try_lock" +#define LS_LCK_MTX_TRY_SPIN_LOCK "lck_mtx_try_spin_lock" +#define LS_LCK_MTX_EXT_LOCK "lck_mtx_ext_lock" +#define LS_LCK_MTX_EXT_UNLOCK "lck_mtx_ext_unlock" +#define LS_LCK_MTX_EXT_TRY_LOCK "lck_mtx_ext_try_lock" +#define LS_LCK_MTX_LOCK_SPIN_LOCK "lck_mtx_lock_spin" + +#define LS_LCK_SPIN_LOCK "lck_spin_lock" +#define LS_LCK_SPIN_TRY_LOCK "lck_spin_try_lock" +#define LS_LCK_SPIN_UNLOCK "lck_spin_unlock" +#define LS_LCK_RW_LOCK_SHARED "lck_rw_lock_shared" +#define LS_LCK_RW_LOCK_EXCL "lck_rw_lock_exclusive" +#define LS_LCK_RW_DONE "lck_rw_done" +#define LS_LCK_RW_TRY_LOCK_EXCL "lck_rw_try_lock_exclusive" +#define LS_LCK_RW_TRY_LOCK_SHARED "lck_rw_try_lock_shared" +#define LS_LCK_RW_LOCK_SHARED_TO_EXCL "lck_rw_shared_to_exclusive" +#define LS_LCK_RW_LOCK_EXCL_TO_SHARED "lck_rw_exclusive_to_shared" + +#define LS_ACQUIRE "acquire" +#define LS_RELEASE "release" +#define LS_SPIN "spin" +#define LS_BLOCK "block" +#define LS_UPGRADE "upgrade" +#define LS_DOWNGRADE "downgrade" + +#define LS_TYPE_ADAPTIVE "adaptive" +#define LS_TYPE_SPIN "spin" +#define LS_TYPE_ILK "interlock" /* OS X only */ +#define LS_TYPE_THREAD "thread" /* Solaris only */ +#define LS_TYPE_RW "rw" +#define LS_TYPE_RWUPGRADE "rwupgrade" /* OS X only */ + +#define LSA_ACQUIRE (LS_TYPE_ADAPTIVE "-" LS_ACQUIRE) +#define LSA_RELEASE (LS_TYPE_ADAPTIVE "-" LS_RELEASE) +#define LSA_SPIN (LS_TYPE_ADAPTIVE "-" LS_SPIN) +#define LSA_BLOCK (LS_TYPE_ADAPTIVE "-" LS_BLOCK) +#define LSA_ILK_SPIN (LS_TYPE_ILK "-" LS_SPIN) +#define LSS_ACQUIRE (LS_TYPE_SPIN "-" LS_ACQUIRE) +#define LSS_RELEASE (LS_TYPE_SPIN "-" LS_RELEASE) +#define LSS_SPIN (LS_TYPE_SPIN "-" LS_SPIN) +#define LSR_ACQUIRE (LS_TYPE_RW "-" LS_ACQUIRE) +#define LSR_RELEASE (LS_TYPE_RW "-" LS_RELEASE) +#define LSR_BLOCK (LS_TYPE_RW "-" LS_BLOCK) +#define LSR_SPIN (LS_TYPE_RW "-" LS_SPIN) +#define LSR_UPGRADE (LS_TYPE_RW "-" LS_UPGRADE) +#define LSR_UPGRADE_BLOCK (LS_TYPE_RWUPGRADE "-" LS_BLOCK) +#define LSR_DOWNGRADE (LS_TYPE_RW "-" LS_DOWNGRADE) +#define LST_SPIN (LS_TYPE_THREAD "-" LS_SPIN) + +#ifdef __cplusplus } #endif -#endif /* _SYS_LOCKSTAT_H */ - +#endif /* _SYS_LOCKSTAT_H */ diff --git a/bsd/sys/mach_swapon.h b/bsd/sys/mach_swapon.h index f0330c413..e5728fde7 100644 --- a/bsd/sys/mach_swapon.h +++ b/bsd/sys/mach_swapon.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,23 +22,23 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Copyright (c) 1989,1995 NeXT, Inc. * All rights reserved. * */ -#ifndef _MACH_SWAPON_H -#define _MACH_SWAPON_H +#ifndef _MACH_SWAPON_H +#define _MACH_SWAPON_H #include #warning obsolete header file! Please delete the include from your sources. #ifdef __APPLE_API_OBSOLETE -#define MS_PREFER 0x1 /* This device/file is preferred */ +#define MS_PREFER 0x1 /* This device/file is preferred */ #endif /* __APPLE_API_OBSOLETE */ -#endif /* _MACH_SWAPON_H */ +#endif /* _MACH_SWAPON_H */ diff --git a/bsd/sys/malloc.h b/bsd/sys/malloc.h index fea78a29c..66dccfa5b 100644 --- a/bsd/sys/malloc.h +++ b/bsd/sys/malloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1998, 1999 Apple Computer, Inc. All Rights Reserved */ @@ -69,7 +69,7 @@ */ #ifndef _SYS_MALLOC_H_ -#define _SYS_MALLOC_H_ +#define _SYS_MALLOC_H_ #include @@ -78,10 +78,10 @@ /* * flags to malloc */ -#define M_WAITOK 0x0000 -#define M_NOWAIT 0x0001 +#define M_WAITOK 0x0000 +#define M_NOWAIT 0x0001 #define M_ZERO 0x0004 /* bzero the allocation */ -#define M_NULL 0x0008 /* return NULL if space is unavailable*/ +#define M_NULL 0x0008 /* return NULL if space is unavailable*/ #ifdef BSD_KERNEL_PRIVATE @@ -90,151 +90,151 @@ /* * Types of memory to be allocated (not all are used by us) */ -#define M_FREE 0 /* should be on free list */ -#define M_MBUF 1 /* mbuf */ -#define M_DEVBUF 2 /* device driver memory */ -#define M_SOCKET 3 /* socket structure */ -#define M_PCB 4 /* protocol control block */ -#define M_RTABLE 5 /* routing tables */ -#define M_HTABLE 6 /* IMP host tables */ -#define M_FTABLE 7 /* fragment reassembly header */ -#define M_ZOMBIE 8 /* zombie proc status */ -#define M_IFADDR 9 /* interface address */ -#define M_SOOPTS 10 /* socket options */ -#define M_SONAME 11 /* socket name */ -#define M_NAMEI 12 /* namei path name buffer */ -#define M_GPROF 13 /* kernel profiling buffer */ -#define M_IOCTLOPS 14 /* ioctl data buffer */ -#define M_MAPMEM 15 /* mapped memory descriptors */ -#define M_CRED 16 /* credentials */ -#define M_PGRP 17 /* process group header */ -#define M_SESSION 18 /* session header */ -#define M_IOV32 19 /* large iov's for 32 bit process */ -#define M_MOUNT 20 /* vfs mount struct */ -#define M_FHANDLE 21 /* network file handle */ -#define M_NFSREQ 22 /* NFS request header */ -#define M_NFSMNT 23 /* NFS mount structure */ -#define M_NFSNODE 24 /* NFS vnode private part */ -#define M_VNODE 25 /* Dynamically allocated vnodes */ -#define M_CACHE 26 /* Dynamically allocated cache entries */ -#define M_DQUOT 27 /* UFS quota entries */ -#define M_PROC_UUID_POLICY 28 /* proc UUID policy entries */ -#define M_SHM 29 /* SVID compatible shared memory segments */ -#define M_PLIMIT 30 /* plimit structures */ -#define M_SIGACTS 31 /* sigacts structures */ -#define M_VMOBJ 32 /* VM object structure */ -#define M_VMOBJHASH 33 /* VM object hash structure */ -#define M_VMPMAP 34 /* VM pmap */ -#define M_VMPVENT 35 /* VM phys-virt mapping entry */ -#define M_VMPAGER 36 /* XXX: VM pager struct */ -#define M_VMPGDATA 37 /* XXX: VM pager private data */ -#define M_FILEPROC 38 /* Open file structure */ -#define M_FILEDESC 39 /* Open file descriptor table */ -#define M_LOCKF 40 /* Byte-range locking structures */ -#define M_PROC 41 /* Proc structures */ -#define M_PSTATS 42 /* pstats proc sub-structures */ -#define M_SEGMENT 43 /* Segment for LFS */ -#define M_LFSNODE 44 /* LFS vnode private part */ -#define M_FFSNODE 45 /* FFS vnode private part */ -#define M_MFSNODE 46 /* MFS vnode private part */ -#define M_NQLEASE 47 /* XXX: Nqnfs lease */ -#define M_NQMHOST 48 /* XXX: Nqnfs host address table */ -#define M_NETADDR 49 /* Export host address structure */ -#define M_NFSSVC 50 /* NFS server structure */ -#define M_NFSUID 51 /* XXX: NFS uid mapping structure */ -#define M_NFSD 52 /* NFS server daemon structure */ -#define M_IPMOPTS 53 /* internet multicast options */ -#define M_IPMADDR 54 /* internet multicast address */ -#define M_IFMADDR 55 /* link-level multicast address */ -#define M_MRTABLE 56 /* multicast routing tables */ -#define M_ISOFSMNT 57 /* ISOFS mount structure */ -#define M_ISOFSNODE 58 /* ISOFS vnode private part */ -#define M_NFSRVDESC 59 /* NFS server socket descriptor */ -#define M_NFSDIROFF 60 /* NFS directory offset data */ -#define M_NFSBIGFH 61 /* NFS version 3 file handle */ -#define M_MSDOSFSMNT 62 /* MSDOS FS mount structure */ -#define M_MSDOSFSFAT 63 /* MSDOS FS fat table */ -#define M_MSDOSFSNODE 64 /* MSDOS FS vnode private part */ -#define M_TTYS 65 /* allocated tty structures */ -#define M_EXEC 66 /* argument lists & other mem used by exec */ -#define M_MISCFSMNT 67 /* miscfs mount structures */ -#define M_MISCFSNODE 68 /* miscfs vnode private part */ -#define M_ADOSFSMNT 69 /* adosfs mount structures */ -#define M_ADOSFSNODE 70 /* adosfs vnode private part */ -#define M_ANODE 71 /* adosfs anode structures and tables. */ -#define M_BUFHDR 72 /* File buffer cache headers */ -#define M_OFILETABL 73 /* Open file descriptor table */ -#define M_MCLUST 74 /* mbuf cluster buffers */ +#define M_FREE 0 /* should be on free list */ +#define M_MBUF 1 /* mbuf */ +#define M_DEVBUF 2 /* device driver memory */ +#define M_SOCKET 3 /* socket structure */ +#define M_PCB 4 /* protocol control block */ +#define M_RTABLE 5 /* routing tables */ +#define M_HTABLE 6 /* IMP host tables */ +#define M_FTABLE 7 /* fragment reassembly header */ +#define M_ZOMBIE 8 /* zombie proc status */ +#define M_IFADDR 9 /* interface address */ +#define M_SOOPTS 10 /* socket options */ +#define M_SONAME 11 /* socket name */ +#define M_NAMEI 12 /* namei path name buffer */ +#define M_GPROF 13 /* kernel profiling buffer */ +#define M_IOCTLOPS 14 /* ioctl data buffer */ +#define M_MAPMEM 15 /* mapped memory descriptors */ +#define M_CRED 16 /* credentials */ +#define M_PGRP 17 /* process group header */ +#define M_SESSION 18 /* session header */ +#define M_IOV32 19 /* large iov's for 32 bit process */ +#define M_MOUNT 20 /* vfs mount struct */ +#define M_FHANDLE 21 /* network file handle */ +#define M_NFSREQ 22 /* NFS request header */ +#define M_NFSMNT 23 /* NFS mount structure */ +#define M_NFSNODE 24 /* NFS vnode private part */ +#define M_VNODE 25 /* Dynamically allocated vnodes */ +#define M_CACHE 26 /* Dynamically allocated cache entries */ +#define M_DQUOT 27 /* UFS quota entries */ +#define M_PROC_UUID_POLICY 28 /* proc UUID policy entries */ +#define M_SHM 29 /* SVID compatible shared memory segments */ +#define M_PLIMIT 30 /* plimit structures */ +#define M_SIGACTS 31 /* sigacts structures */ +#define M_VMOBJ 32 /* VM object structure */ +#define M_VMOBJHASH 33 /* VM object hash structure */ +#define M_VMPMAP 34 /* VM pmap */ +#define M_VMPVENT 35 /* VM phys-virt mapping entry */ +#define M_VMPAGER 36 /* XXX: VM pager struct */ +#define M_VMPGDATA 37 /* XXX: VM pager private data */ +#define M_FILEPROC 38 /* Open file structure */ +#define M_FILEDESC 39 /* Open file descriptor table */ +#define M_LOCKF 40 /* Byte-range locking structures */ +#define M_PROC 41 /* Proc structures */ +#define M_PSTATS 42 /* pstats proc sub-structures */ +#define M_SEGMENT 43 /* Segment for LFS */ +#define M_LFSNODE 44 /* LFS vnode private part */ +#define M_FFSNODE 45 /* FFS vnode private part */ +#define M_MFSNODE 46 /* MFS vnode private part */ +#define M_NQLEASE 47 /* XXX: Nqnfs lease */ +#define M_NQMHOST 48 /* XXX: Nqnfs host address table */ +#define M_NETADDR 49 /* Export host address structure */ +#define M_NFSSVC 50 /* NFS server structure */ +#define M_NFSUID 51 /* XXX: NFS uid mapping structure */ +#define M_NFSD 52 /* NFS server daemon structure */ +#define M_IPMOPTS 53 /* internet multicast options */ +#define M_IPMADDR 54 /* internet multicast address */ +#define M_IFMADDR 55 /* link-level multicast address */ +#define M_MRTABLE 56 /* multicast routing tables */ +#define M_ISOFSMNT 57 /* ISOFS mount structure */ +#define M_ISOFSNODE 58 /* ISOFS vnode private part */ +#define M_NFSRVDESC 59 /* NFS server socket descriptor */ +#define M_NFSDIROFF 60 /* NFS directory offset data */ +#define M_NFSBIGFH 61 /* NFS version 3 file handle */ +#define M_MSDOSFSMNT 62 /* MSDOS FS mount structure */ +#define M_MSDOSFSFAT 63 /* MSDOS FS fat table */ +#define M_MSDOSFSNODE 64 /* MSDOS FS vnode private part */ +#define M_TTYS 65 /* allocated tty structures */ +#define M_EXEC 66 /* argument lists & other mem used by exec */ +#define M_MISCFSMNT 67 /* miscfs mount structures */ +#define M_MISCFSNODE 68 /* miscfs vnode private part */ +#define M_ADOSFSMNT 69 /* adosfs mount structures */ +#define M_ADOSFSNODE 70 /* adosfs vnode private part */ +#define M_ANODE 71 /* adosfs anode structures and tables. */ +#define M_BUFHDR 72 /* File buffer cache headers */ +#define M_OFILETABL 73 /* Open file descriptor table */ +#define M_MCLUST 74 /* mbuf cluster buffers */ /* unused 75 */ /* unused 76 */ /* unused 77 */ /* unused 78 */ /* unused 79 */ -#define M_TEMP 80 /* misc temporary data buffers */ -#define M_SECA 81 /* security associations, key management */ -#define M_DEVFS 82 -#define M_IPFW 83 /* IP Forwarding/NAT */ -#define M_UDFNODE 84 /* UDF inodes */ -#define M_UDFMNT 85 /* UDF mount structures */ -#define M_IP6NDP 86 /* IPv6 Neighbour Discovery*/ -#define M_IP6OPT 87 /* IPv6 options management */ -#define M_IP6MISC 88 /* IPv6 misc. memory */ +#define M_TEMP 80 /* misc temporary data buffers */ +#define M_SECA 81 /* security associations, key management */ +#define M_DEVFS 82 +#define M_IPFW 83 /* IP Forwarding/NAT */ +#define M_UDFNODE 84 /* UDF inodes */ +#define M_UDFMNT 85 /* UDF mount structures */ +#define M_IP6NDP 86 /* IPv6 Neighbour Discovery*/ +#define M_IP6OPT 87 /* IPv6 options management */ +#define M_IP6MISC 88 /* IPv6 misc. memory */ /* unused 89 */ -#define M_IGMP 90 +#define M_IGMP 90 /* unused 91 */ /* unused 92 */ -#define M_SPECINFO 93 /* special file node */ -#define M_KQUEUE 94 /* kqueue system */ +#define M_SPECINFO 93 /* special file node */ +#define M_KQUEUE 94 /* kqueue system */ /* unused 95 */ -#define M_CLRDAHEAD 96 /* storage for cluster read-ahead state */ -#define M_CLWRBEHIND 97 /* storage for cluster write-behind state */ -#define M_IOV64 98 /* large iov's for 64 bit process */ -#define M_FILEGLOB 99 /* fileglobal */ -#define M_KAUTH 100 /* kauth subsystem */ -#define M_DUMMYNET 101 /* dummynet */ +#define M_CLRDAHEAD 96 /* storage for cluster read-ahead state */ +#define M_CLWRBEHIND 97 /* storage for cluster write-behind state */ +#define M_IOV64 98 /* large iov's for 64 bit process */ +#define M_FILEGLOB 99 /* fileglobal */ +#define M_KAUTH 100 /* kauth subsystem */ +#define M_DUMMYNET 101 /* dummynet */ /* M_UNSAFEFS 102 */ -#define M_MACPIPELABEL 103 /* MAC pipe labels */ -#define M_MACTEMP 104 /* MAC framework */ -#define M_SBUF 105 /* string buffers */ -#define M_EXTATTR 106 /* extended attribute */ -#define M_SELECT 107 /* per-thread select memory */ +#define M_MACPIPELABEL 103 /* MAC pipe labels */ +#define M_MACTEMP 104 /* MAC framework */ +#define M_SBUF 105 /* string buffers */ +#define M_EXTATTR 106 /* extended attribute */ +#define M_SELECT 107 /* per-thread select memory */ /* M_TRAFFIC_MGT 108 */ #if FS_COMPRESSION -#define M_DECMPFS_CNODE 109 /* decmpfs cnode structures */ +#define M_DECMPFS_CNODE 109 /* decmpfs cnode structures */ #endif /* FS_COMPRESSION */ -#define M_INMFILTER 110 /* IPv4 multicast PCB-layer source filter */ -#define M_IPMSOURCE 111 /* IPv4 multicast IGMP-layer source filter */ -#define M_IN6MFILTER 112 /* IPv6 multicast PCB-layer source filter */ -#define M_IP6MOPTS 113 /* IPv6 multicast options */ -#define M_IP6MSOURCE 114 /* IPv6 multicast MLD-layer source filter */ -#define M_FLOW_DIVERT_PCB 115 /* flow divert control block */ -#define M_FLOW_DIVERT_GROUP 116 /* flow divert socket group */ -#define M_IP6CGA 117 -#define M_NECP 118 /* General NECP policy data */ +#define M_INMFILTER 110 /* IPv4 multicast PCB-layer source filter */ +#define M_IPMSOURCE 111 /* IPv4 multicast IGMP-layer source filter */ +#define M_IN6MFILTER 112 /* IPv6 multicast PCB-layer source filter */ +#define M_IP6MOPTS 113 /* IPv6 multicast options */ +#define M_IP6MSOURCE 114 /* IPv6 multicast MLD-layer source filter */ +#define M_FLOW_DIVERT_PCB 115 /* flow divert control block */ +#define M_FLOW_DIVERT_GROUP 116 /* flow divert socket group */ +#define M_IP6CGA 117 +#define M_NECP 118 /* General NECP policy data */ #define M_NECP_SESSION_POLICY 119 /* NECP session policies */ #define M_NECP_SOCKET_POLICY 120 /* NECP socket-level policies */ #define M_NECP_IP_POLICY 121 /* NECP IP-level policies */ -#define M_FD_VN_DATA 122 /* Per fd vnode data */ -#define M_FD_DIRBUF 123 /* Directory entries' buffer */ -#define M_NETAGENT 124 /* Network Agents */ -#define M_EVENTHANDLER 125 /* Eventhandler */ -#define M_LLTABLE 126 /* Link layer table */ -#define M_NWKWQ 127 /* Network work queue */ +#define M_FD_VN_DATA 122 /* Per fd vnode data */ +#define M_FD_DIRBUF 123 /* Directory entries' buffer */ +#define M_NETAGENT 124 /* Network Agents */ +#define M_EVENTHANDLER 125 /* Eventhandler */ +#define M_LLTABLE 126 /* Link layer table */ +#define M_NWKWQ 127 /* Network work queue */ #define M_CFIL 128 /* Content Filter */ -#define M_LAST 129 /* Must be last type + 1 */ +#define M_LAST 129 /* Must be last type + 1 */ #else /* BSD_KERNEL_PRIVATE */ -#define M_RTABLE 5 /* routing tables */ -#define M_IFADDR 9 /* interface address (IOFireWireIP)*/ -#define M_LOCKF 40 /* Byte-range locking structures (msdos) */ -#define M_TEMP 80 /* misc temporary data buffers */ -#define M_KAUTH 100 /* kauth subsystem (smb) */ -#define M_SONAME 11 /* socket name (smb) */ -#define M_PCB 4 /* protocol control block (smb) */ -#define M_UDFNODE 84 /* UDF inodes (udf)*/ -#define M_UDFMNT 85 /* UDF mount structures (udf)*/ +#define M_RTABLE 5 /* routing tables */ +#define M_IFADDR 9 /* interface address (IOFireWireIP)*/ +#define M_LOCKF 40 /* Byte-range locking structures (msdos) */ +#define M_TEMP 80 /* misc temporary data buffers */ +#define M_KAUTH 100 /* kauth subsystem (smb) */ +#define M_SONAME 11 /* socket name (smb) */ +#define M_PCB 4 /* protocol control block (smb) */ +#define M_UDFNODE 84 /* UDF inodes (udf)*/ +#define M_UDFMNT 85 /* UDF mount structures (udf)*/ #endif /* BSD_KERNEL_PRIVATE */ @@ -242,16 +242,16 @@ #if KMEMSTATS struct kmemstats { - long ks_inuse; /* # of packets of this type currently - * in use */ - long ks_calls; /* total packets of this type ever allocated */ - long ks_memuse; /* total memory held in bytes */ - u_short ks_limblocks; /* number of times blocked for hitting limit */ - u_short ks_mapblocks; /* number of times blocked for kernel map */ - long ks_maxused; /* maximum number ever used */ - long ks_limit; /* most that are allowed to exist */ - long ks_size; /* sizes of this thing that are allocated */ - long ks_spare; + long ks_inuse; /* # of packets of this type currently + * in use */ + long ks_calls; /* total packets of this type ever allocated */ + long ks_memuse; /* total memory held in bytes */ + u_short ks_limblocks; /* number of times blocked for hitting limit */ + u_short ks_mapblocks; /* number of times blocked for kernel map */ + long ks_maxused; /* maximum number ever used */ + long ks_limit; /* most that are allowed to exist */ + long ks_size; /* sizes of this thing that are allocated */ + long ks_spare; }; extern struct kmemstats kmemstats[]; @@ -267,71 +267,90 @@ extern struct kmemstats kmemstats[]; #include -#define MALLOC(space, cast, size, type, flags) \ +#define MALLOC(space, cast, size, type, flags) \ ({ VM_ALLOC_SITE_STATIC(0, 0); \ (space) = (cast)__MALLOC(size, type, flags, &site); }) -#define REALLOC(space, cast, addr, size, type, flags) \ +#define REALLOC(space, cast, addr, size, type, flags) \ ({ VM_ALLOC_SITE_STATIC(0, 0); \ (space) = (cast)__REALLOC(addr, size, type, flags, &site); }) -#define _MALLOC(size, type, flags) \ +#define _MALLOC(size, type, flags) \ ({ VM_ALLOC_SITE_STATIC(0, 0); \ __MALLOC(size, type, flags, &site); }) -#define _REALLOC(addr, size, type, flags) \ +#define _REALLOC(addr, size, type, flags) \ ({ VM_ALLOC_SITE_STATIC(0, 0); \ __REALLOC(addr, size, type, flags, &site); }) -#define _MALLOC_ZONE(size, type, flags) \ +#define _MALLOC_ZONE(size, type, flags) \ ({ VM_ALLOC_SITE_STATIC(0, 0); \ __MALLOC_ZONE(size, type, flags, &site); }) #define FREE(addr, type) \ - _FREE((void *)addr, type) +_Pragma("clang diagnostic push") \ +_Pragma("clang diagnostic ignored \"-Wshadow\"") \ + do { \ + _Static_assert(sizeof (addr) == sizeof (void *) || sizeof (addr) == sizeof (mach_vm_address_t), "addr is not a pointer"); \ + void *__tmp_addr = (void *)addr; \ + int __tmp_type = type; \ + addr = (typeof(addr)) NULL; \ + _FREE(__tmp_addr, __tmp_type); \ + } while (0) \ +_Pragma("clang diagnostic pop") #define MALLOC_ZONE(space, cast, size, type, flags) \ (space) = (cast)_MALLOC_ZONE(size, type, flags) #define FREE_ZONE(addr, size, type) \ - _FREE_ZONE((void *)addr, size, type) - -extern void *__MALLOC( - size_t size, - int type, - int flags, - vm_allocation_site_t *site) __attribute__((alloc_size(1))); - -extern void _FREE( - void *addr, - int type); - -extern void *__REALLOC( - void *addr, - size_t size, - int type, - int flags, - vm_allocation_site_t *site) __attribute__((alloc_size(2))); - -extern void *__MALLOC_ZONE( - size_t size, - int type, - int flags, - vm_allocation_site_t *site); - -extern void _FREE_ZONE( - void *elem, - size_t size, - int type); +_Pragma("clang diagnostic push") \ +_Pragma("clang diagnostic ignored \"-Wshadow\"") \ + do { \ + _Static_assert(sizeof (addr) == sizeof (void *) || sizeof (addr) == sizeof (mach_vm_address_t), "addr is not a pointer"); \ + void *__tmp_addr = (void *)addr; \ + size_t __tmp_size = size; \ + int __tmp_type = type; \ + addr = (typeof(addr)) NULL; \ + _FREE_ZONE(__tmp_addr, __tmp_size, __tmp_type); \ + } while (0) \ +_Pragma("clang diagnostic pop") + +extern void *__MALLOC( + size_t size, + int type, + int flags, + vm_allocation_site_t *site) __attribute__((alloc_size(1))); + +extern void _FREE( + void *addr, + int type); + +extern void *__REALLOC( + void *addr, + size_t size, + int type, + int flags, + vm_allocation_site_t *site) __attribute__((alloc_size(2))); + +extern void *__MALLOC_ZONE( + size_t size, + int type, + int flags, + vm_allocation_site_t *site); + +extern void _FREE_ZONE( + void *elem, + size_t size, + int type); #else /* XNU_KERNEL_PRIVATE */ -#define MALLOC(space, cast, size, type, flags) \ +#define MALLOC(space, cast, size, type, flags) \ (space) = (cast)_MALLOC(size, type, flags) #define FREE(addr, type) \ _FREE((void *)addr, type) -#define REALLOC(space, cast, addr, size, type, flags) \ +#define REALLOC(space, cast, addr, size, type, flags) \ (space) = (cast)_REALLOC(addr, size, type, flags) #define MALLOC_ZONE(space, cast, size, type, flags) \ @@ -340,34 +359,34 @@ extern void _FREE_ZONE( #define FREE_ZONE(addr, size, type) \ _FREE_ZONE((void *)addr, size, type) -extern void *_MALLOC( - size_t size, - int type, - int flags); +extern void *_MALLOC( + size_t size, + int type, + int flags); -extern void _FREE( - void *addr, - int type); +extern void _FREE( + void *addr, + int type); -extern void *_REALLOC( - void *addr, - size_t size, - int type, - int flags); +extern void *_REALLOC( + void *addr, + size_t size, + int type, + int flags); -extern void *_MALLOC_ZONE( - size_t size, - int type, - int flags); +extern void *_MALLOC_ZONE( + size_t size, + int type, + int flags); -extern void _FREE_ZONE( - void *elem, - size_t size, - int type); +extern void _FREE_ZONE( + void *elem, + size_t size, + int type); #endif /* !XNU_KERNEL_PRIVATE */ -#endif /* KERNEL */ +#endif /* KERNEL */ -#endif /* _SYS_MALLOC_H_ */ +#endif /* _SYS_MALLOC_H_ */ diff --git a/bsd/sys/mbuf.h b/bsd/sys/mbuf.h index b89ee1459..afa2424a8 100644 --- a/bsd/sys/mbuf.h +++ b/bsd/sys/mbuf.h @@ -76,8 +76,8 @@ * Version 2.0. */ -#ifndef _SYS_MBUF_H_ -#define _SYS_MBUF_H_ +#ifndef _SYS_MBUF_H_ +#define _SYS_MBUF_H_ #include #include @@ -106,29 +106,29 @@ * that are outside of xnu must use the mbuf_get_{mlen,mhlen} routines since * the sizes of the structures are dependent upon specific xnu configs. */ -#define _MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */ -#define _MHLEN (_MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */ +#define _MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */ +#define _MHLEN (_MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */ -#define NMBPGSHIFT (PAGE_SHIFT - MSIZESHIFT) -#define NMBPG (1 << NMBPGSHIFT) /* # of mbufs per page */ +#define NMBPGSHIFT (PAGE_SHIFT - MSIZESHIFT) +#define NMBPG (1 << NMBPGSHIFT) /* # of mbufs per page */ -#define NCLPGSHIFT (PAGE_SHIFT - MCLSHIFT) -#define NCLPG (1 << NCLPGSHIFT) /* # of cl per page */ +#define NCLPGSHIFT (PAGE_SHIFT - MCLSHIFT) +#define NCLPG (1 << NCLPGSHIFT) /* # of cl per page */ -#define NBCLPGSHIFT (PAGE_SHIFT - MBIGCLSHIFT) -#define NBCLPG (1 << NBCLPGSHIFT) /* # of big cl per page */ +#define NBCLPGSHIFT (PAGE_SHIFT - MBIGCLSHIFT) +#define NBCLPG (1 << NBCLPGSHIFT) /* # of big cl per page */ -#define NMBPCLSHIFT (MCLSHIFT - MSIZESHIFT) -#define NMBPCL (1 << NMBPCLSHIFT) /* # of mbufs per cl */ +#define NMBPCLSHIFT (MCLSHIFT - MSIZESHIFT) +#define NMBPCL (1 << NMBPCLSHIFT) /* # of mbufs per cl */ -#define NCLPJCLSHIFT (M16KCLSHIFT - MCLSHIFT) -#define NCLPJCL (1 << NCLPJCLSHIFT) /* # of cl per jumbo cl */ +#define NCLPJCLSHIFT (M16KCLSHIFT - MCLSHIFT) +#define NCLPJCL (1 << NCLPJCLSHIFT) /* # of cl per jumbo cl */ -#define NCLPBGSHIFT (MBIGCLSHIFT - MCLSHIFT) -#define NCLPBG (1 << NCLPBGSHIFT) /* # of cl per big cl */ +#define NCLPBGSHIFT (MBIGCLSHIFT - MCLSHIFT) +#define NCLPBG (1 << NCLPBGSHIFT) /* # of cl per big cl */ -#define NMBPBGSHIFT (MBIGCLSHIFT - MSIZESHIFT) -#define NMBPBG (1 << NMBPBGSHIFT) /* # of mbufs per big cl */ +#define NMBPBGSHIFT (MBIGCLSHIFT - MSIZESHIFT) +#define NMBPBG (1 << NMBPBGSHIFT) /* # of mbufs per big cl */ /* * Macros for type conversion @@ -136,18 +136,18 @@ * mtodo(m, o) -- Same as above but with offset 'o' into data. * dtom(x) - convert data pointer within mbuf to mbuf pointer (XXX) */ -#define mtod(m, t) ((t)m_mtod(m)) +#define mtod(m, t) ((t)m_mtod(m)) #define mtodo(m, o) ((void *)(mtod(m, uint8_t *) + (o))) -#define dtom(x) m_dtom(x) +#define dtom(x) m_dtom(x) /* header at beginning of each mbuf: */ struct m_hdr { - struct mbuf *mh_next; /* next buffer in chain */ - struct mbuf *mh_nextpkt; /* next chain in queue/record */ - caddr_t mh_data; /* location of data */ - int32_t mh_len; /* amount of data in this mbuf */ - u_int16_t mh_type; /* type of data in this mbuf */ - u_int16_t mh_flags; /* flags; see below */ + struct mbuf *mh_next; /* next buffer in chain */ + struct mbuf *mh_nextpkt; /* next chain in queue/record */ + caddr_t mh_data; /* location of data */ + int32_t mh_len; /* amount of data in this mbuf */ + u_int16_t mh_type; /* type of data in this mbuf */ + u_int16_t mh_flags; /* flags; see below */ #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) /* This is needed because of how _MLEN is defined and used. Ideally, _MLEN * should be defined using the offsetof(struct mbuf, M_dat), since there is @@ -165,21 +165,21 @@ struct m_hdr { * Packet tag structure (see below for details). */ struct m_tag { - u_int64_t m_tag_cookie; /* Error checking */ + u_int64_t m_tag_cookie; /* Error checking */ #ifndef __LP64__ - u_int32_t pad; /* For structure alignment */ + u_int32_t pad; /* For structure alignment */ #endif /* !__LP64__ */ - SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ - u_int16_t m_tag_type; /* Module specific type */ - u_int16_t m_tag_len; /* Length of data */ - u_int32_t m_tag_id; /* Module ID */ + SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */ + u_int16_t m_tag_type; /* Module specific type */ + u_int16_t m_tag_len; /* Length of data */ + u_int32_t m_tag_id; /* Module ID */ }; -#define M_TAG_ALIGN(len) \ +#define M_TAG_ALIGN(len) \ (P2ROUNDUP(len, sizeof (u_int64_t)) + sizeof (struct m_tag)) -#define M_TAG_VALID_PATTERN 0xfeedfacefeedfaceULL -#define M_TAG_FREE_PATTERN 0xdeadbeefdeadbeefULL +#define M_TAG_VALID_PATTERN 0xfeedfacefeedfaceULL +#define M_TAG_FREE_PATTERN 0xdeadbeefdeadbeefULL /* * Packet tag header structure (at the top of mbuf). Pointers are @@ -187,39 +187,39 @@ struct m_tag { */ struct m_taghdr { #ifndef __LP64__ - u_int32_t pad; /* For structure alignment */ + u_int32_t pad; /* For structure alignment */ #endif /* !__LP64__ */ - u_int64_t refcnt; /* Number of tags in this mbuf */ + u_int64_t refcnt; /* Number of tags in this mbuf */ }; /* * Driver auxiliary metadata tag (KERNEL_TAG_TYPE_DRVAUX). */ struct m_drvaux_tag { - u_int32_t da_family; /* IFNET_FAMILY values */ - u_int32_t da_subfamily; /* IFNET_SUBFAMILY values */ - u_int32_t da_reserved; /* for future */ - u_int32_t da_length; /* length of following data */ + u_int32_t da_family; /* IFNET_FAMILY values */ + u_int32_t da_subfamily; /* IFNET_SUBFAMILY values */ + u_int32_t da_reserved; /* for future */ + u_int32_t da_length; /* length of following data */ }; /* Values for pftag_flags (16-bit wide) */ -#define PF_TAG_GENERATED 0x1 /* pkt generated by PF */ -#define PF_TAG_FRAGCACHE 0x2 -#define PF_TAG_TRANSLATE_LOCALHOST 0x4 +#define PF_TAG_GENERATED 0x1 /* pkt generated by PF */ +#define PF_TAG_FRAGCACHE 0x2 +#define PF_TAG_TRANSLATE_LOCALHOST 0x4 #if PF_ECN -#define PF_TAG_HDR_INET 0x8 /* hdr points to IPv4 */ -#define PF_TAG_HDR_INET6 0x10 /* hdr points to IPv6 */ +#define PF_TAG_HDR_INET 0x8 /* hdr points to IPv4 */ +#define PF_TAG_HDR_INET6 0x10 /* hdr points to IPv6 */ #endif /* PF_ECN */ /* * PF mbuf tag */ struct pf_mtag { - u_int16_t pftag_flags; /* PF_TAG flags */ - u_int16_t pftag_rtableid; /* alternate routing table id */ - u_int16_t pftag_tag; - u_int16_t pftag_routed; + u_int16_t pftag_flags; /* PF_TAG flags */ + u_int16_t pftag_rtableid; /* alternate routing table id */ + u_int16_t pftag_tag; + u_int16_t pftag_routed; #if PF_ECN - void *pftag_hdr; /* saved hdr pos in mbuf, for ECN */ + void *pftag_hdr; /* saved hdr pos in mbuf, for ECN */ #endif /* PF_ECN */ }; @@ -229,44 +229,44 @@ struct pf_mtag { struct tcp_pktinfo { union { struct { - u_int32_t segsz; /* segment size (actual MSS) */ - u_int32_t start_seq; /* start seq of this packet */ + u_int32_t segsz; /* segment size (actual MSS) */ + u_int32_t start_seq; /* start seq of this packet */ pid_t pid; pid_t e_pid; } __tx; struct { - u_int16_t lro_pktlen; /* max seg size encountered */ - u_int8_t lro_npkts; /* # of coalesced TCP pkts */ - u_int8_t lro_timediff; /* time spent in LRO */ + u_int16_t lro_pktlen; /* max seg size encountered */ + u_int8_t lro_npkts; /* # of coalesced TCP pkts */ + u_int8_t lro_timediff; /* time spent in LRO */ } __rx; } __offload; union { - u_int32_t pri; /* send msg priority */ - u_int32_t seq; /* recv msg sequence # */ + u_int32_t pri; /* send msg priority */ + u_int32_t seq; /* recv msg sequence # */ } __msgattr; -#define tso_segsz proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.segsz -#define tx_start_seq proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.start_seq -#define tx_tcp_pid proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.pid -#define tx_tcp_e_pid proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.e_pid -#define lro_pktlen proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_pktlen -#define lro_npkts proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_npkts -#define lro_elapsed proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_timediff -#define msg_pri proto_mtag.__pr_u.tcp.tm_tcp.__msgattr.pri -#define msg_seq proto_mtag.__pr_u.tcp.tm_tcp.__msgattr.seq +#define tso_segsz proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.segsz +#define tx_start_seq proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.start_seq +#define tx_tcp_pid proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.pid +#define tx_tcp_e_pid proto_mtag.__pr_u.tcp.tm_tcp.__offload.__tx.e_pid +#define lro_pktlen proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_pktlen +#define lro_npkts proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_npkts +#define lro_elapsed proto_mtag.__pr_u.tcp.tm_tcp.__offload.__rx.lro_timediff +#define msg_pri proto_mtag.__pr_u.tcp.tm_tcp.__msgattr.pri +#define msg_seq proto_mtag.__pr_u.tcp.tm_tcp.__msgattr.seq }; /* * MPTCP mbuf tag */ struct mptcp_pktinfo { - u_int64_t mtpi_dsn; /* MPTCP Data Sequence Number */ - u_int32_t mtpi_rel_seq; /* Relative Seq Number */ - u_int16_t mtpi_length; /* Length of mapping */ - u_int16_t mtpi_csum; -#define mp_dsn proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_dsn -#define mp_rseq proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_rel_seq -#define mp_rlen proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_length -#define mp_csum proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_csum + u_int64_t mtpi_dsn; /* MPTCP Data Sequence Number */ + u_int32_t mtpi_rel_seq; /* Relative Seq Number */ + u_int16_t mtpi_length; /* Length of mapping */ + u_int16_t mtpi_csum; +#define mp_dsn proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_dsn +#define mp_rseq proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_rel_seq +#define mp_rlen proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_length +#define mp_csum proto_mtag.__pr_u.tcp.tm_mptcp.mtpi_csum }; /* @@ -277,34 +277,34 @@ struct mptcp_pktinfo { */ struct tcp_mtag { union { - struct tcp_pktinfo tm_tcp; /* TCP and below */ - struct mptcp_pktinfo tm_mptcp; /* MPTCP-TCP only */ + struct tcp_pktinfo tm_tcp; /* TCP and below */ + struct mptcp_pktinfo tm_mptcp; /* MPTCP-TCP only */ }; }; struct udp_mtag { pid_t _pid; pid_t _e_pid; -#define tx_udp_pid proto_mtag.__pr_u.udp._pid -#define tx_udp_e_pid proto_mtag.__pr_u.udp._e_pid +#define tx_udp_pid proto_mtag.__pr_u.udp._pid +#define tx_udp_e_pid proto_mtag.__pr_u.udp._e_pid }; struct rawip_mtag { pid_t _pid; pid_t _e_pid; -#define tx_rawip_pid proto_mtag.__pr_u.rawip._pid -#define tx_rawip_e_pid proto_mtag.__pr_u.rawip._e_pid +#define tx_rawip_pid proto_mtag.__pr_u.rawip._pid +#define tx_rawip_e_pid proto_mtag.__pr_u.rawip._e_pid }; struct driver_mtag_ { - uintptr_t _drv_tx_compl_arg; - uintptr_t _drv_tx_compl_data; - kern_return_t _drv_tx_status; - uint16_t _drv_flowid; -#define drv_tx_compl_arg builtin_mtag._drv_mtag._drv_tx_compl_arg -#define drv_tx_compl_data builtin_mtag._drv_mtag._drv_tx_compl_data -#define drv_tx_status builtin_mtag._drv_mtag._drv_tx_status -#define drv_flowid builtin_mtag._drv_mtag._drv_flowid + uintptr_t _drv_tx_compl_arg; + uintptr_t _drv_tx_compl_data; + kern_return_t _drv_tx_status; + uint16_t _drv_flowid; +#define drv_tx_compl_arg builtin_mtag._drv_mtag._drv_tx_compl_arg +#define drv_tx_compl_data builtin_mtag._drv_mtag._drv_tx_compl_data +#define drv_tx_status builtin_mtag._drv_mtag._drv_tx_status +#define drv_flowid builtin_mtag._drv_mtag._drv_flowid }; /* @@ -317,9 +317,9 @@ struct driver_mtag_ { */ struct proto_mtag_ { union { - struct tcp_mtag tcp; /* TCP specific */ - struct udp_mtag udp; /* UDP specific */ - struct rawip_mtag rawip; /* raw IPv4/IPv6 specific */ + struct tcp_mtag tcp; /* TCP specific */ + struct udp_mtag udp; /* UDP specific */ + struct rawip_mtag rawip; /* raw IPv4/IPv6 specific */ } __pr_u; }; @@ -327,17 +327,17 @@ struct proto_mtag_ { * NECP specific mbuf tag. */ struct necp_mtag_ { - u_int32_t necp_policy_id; - u_int32_t necp_skip_policy_id; - u_int32_t necp_route_rule_id; - u_int16_t necp_last_interface_index; - u_int16_t necp_app_id; + u_int32_t necp_policy_id; + u_int32_t necp_skip_policy_id; + u_int32_t necp_route_rule_id; + u_int16_t necp_last_interface_index; + u_int16_t necp_app_id; }; union builtin_mtag { struct { - struct proto_mtag_ _proto_mtag; /* built-in protocol-specific tag */ - struct pf_mtag _pf_mtag; /* built-in PF tag */ + struct proto_mtag_ _proto_mtag; /* built-in protocol-specific tag */ + struct pf_mtag _pf_mtag; /* built-in PF tag */ struct necp_mtag_ _necp_mtag; /* built-in NECP tag */ } _net_mtag; struct driver_mtag_ _drv_mtag; @@ -350,33 +350,33 @@ union builtin_mtag { * Record/packet header in first mbuf of chain; valid only if M_PKTHDR set. */ struct pkthdr { - struct ifnet *rcvif; /* rcv interface */ + struct ifnet *rcvif; /* rcv interface */ /* variables for ip and tcp reassembly */ - void *pkt_hdr; /* pointer to packet header */ - int32_t len; /* total packet length */ + void *pkt_hdr; /* pointer to packet header */ + int32_t len; /* total packet length */ /* variables for hardware checksum */ /* Note: csum_flags is used for hardware checksum and VLAN */ - u_int32_t csum_flags; /* flags regarding checksum */ + u_int32_t csum_flags; /* flags regarding checksum */ union { struct { - u_int16_t val; /* checksum value */ + u_int16_t val; /* checksum value */ u_int16_t start; /* checksum start offset */ } _csum_rx; -#define csum_rx_val _csum_rx.val -#define csum_rx_start _csum_rx.start +#define csum_rx_val _csum_rx.val +#define csum_rx_start _csum_rx.start struct { u_int16_t start; /* checksum start offset */ u_int16_t stuff; /* checksum stuff offset */ } _csum_tx; -#define csum_tx_start _csum_tx.start -#define csum_tx_stuff _csum_tx.stuff +#define csum_tx_start _csum_tx.start +#define csum_tx_stuff _csum_tx.stuff /* * Generic data field used by csum routines. * It gets used differently in different contexts. */ u_int32_t csum_data; }; - u_int16_t vlan_tag; /* VLAN tag, host byte order */ + u_int16_t vlan_tag; /* VLAN tag, host byte order */ /* * Packet classifier info * @@ -406,34 +406,34 @@ struct pkthdr { * to achieve this. For now, we will just rely on the address family * related code paths examining this mbuf to interpret the flags. */ - u_int8_t pkt_proto; /* IPPROTO value */ - u_int8_t pkt_flowsrc; /* FLOWSRC values */ - u_int32_t pkt_flowid; /* flow ID */ - u_int32_t pkt_flags; /* PKTF flags (see below) */ - u_int32_t pkt_svc; /* MBUF_SVC value */ + u_int8_t pkt_proto; /* IPPROTO value */ + u_int8_t pkt_flowsrc; /* FLOWSRC values */ + u_int32_t pkt_flowid; /* flow ID */ + u_int32_t pkt_flags; /* PKTF flags (see below) */ + u_int32_t pkt_svc; /* MBUF_SVC value */ - u_int32_t pkt_compl_context; /* Packet completion context */ + u_int32_t pkt_compl_context; /* Packet completion context */ union { struct { - u_int16_t src; /* ifindex of src addr i/f */ - u_int16_t src_flags; /* src PKT_IFAIFF flags */ - u_int16_t dst; /* ifindex of dst addr i/f */ - u_int16_t dst_flags; /* dst PKT_IFAIFF flags */ + u_int16_t src; /* ifindex of src addr i/f */ + u_int16_t src_flags; /* src PKT_IFAIFF flags */ + u_int16_t dst; /* ifindex of dst addr i/f */ + u_int16_t dst_flags; /* dst PKT_IFAIFF flags */ } _pkt_iaif; -#define src_ifindex _pkt_iaif.src -#define src_iff _pkt_iaif.src_flags -#define dst_ifindex _pkt_iaif.dst -#define dst_iff _pkt_iaif.dst_flags - u_int64_t pkt_ifainfo; /* data field used by ifainfo */ +#define src_ifindex _pkt_iaif.src +#define src_iff _pkt_iaif.src_flags +#define dst_ifindex _pkt_iaif.dst +#define dst_iff _pkt_iaif.dst_flags + u_int64_t pkt_ifainfo; /* data field used by ifainfo */ struct { u_int32_t if_data; /* bytes in interface queue */ u_int32_t sndbuf_data; /* bytes in socket buffer */ - } _pkt_bsr; /* Buffer status report used by cellular interface */ -#define bufstatus_if _pkt_bsr.if_data -#define bufstatus_sndbuf _pkt_bsr.sndbuf_data + } _pkt_bsr; /* Buffer status report used by cellular interface */ +#define bufstatus_if _pkt_bsr.if_data +#define bufstatus_sndbuf _pkt_bsr.sndbuf_data }; - u_int64_t pkt_timestamp; /* enqueue time */ + u_int64_t pkt_timestamp; /* enqueue time */ /* * Tags (external and built-in) @@ -448,25 +448,25 @@ struct pkthdr { */ struct { union { - u_int8_t __mpriv8[16]; - u_int16_t __mpriv16[8]; + u_int8_t __mpriv8[16]; + u_int16_t __mpriv16[8]; struct { union { - u_int8_t __val8[4]; - u_int16_t __val16[2]; - u_int32_t __val32; + u_int8_t __val8[4]; + u_int16_t __val16[2]; + u_int32_t __val32; } __mpriv32_u; - } __mpriv32[4]; - u_int64_t __mpriv64[2]; + } __mpriv32[4]; + u_int64_t __mpriv64[2]; } __mpriv_u; } pkt_mpriv __attribute__((aligned(4))); -#define pkt_mpriv_hash pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val32 -#define pkt_mpriv_flags pkt_mpriv.__mpriv_u.__mpriv32[1].__mpriv32_u.__val32 -#define pkt_mpriv_srcid pkt_mpriv.__mpriv_u.__mpriv32[2].__mpriv32_u.__val32 -#define pkt_mpriv_fidx pkt_mpriv.__mpriv_u.__mpriv32[3].__mpriv32_u.__val32 +#define pkt_mpriv_hash pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val32 +#define pkt_mpriv_flags pkt_mpriv.__mpriv_u.__mpriv32[1].__mpriv32_u.__val32 +#define pkt_mpriv_srcid pkt_mpriv.__mpriv_u.__mpriv32[2].__mpriv32_u.__val32 +#define pkt_mpriv_fidx pkt_mpriv.__mpriv_u.__mpriv32[3].__mpriv32_u.__val32 - u_int32_t redzone; /* red zone */ - u_int32_t pkt_compl_callbacks; /* Packet completion callbacks */ + u_int32_t redzone; /* red zone */ + u_int32_t pkt_compl_callbacks; /* Packet completion callbacks */ }; /* @@ -476,10 +476,10 @@ struct pkthdr { * to identify the data source object and inform that it can resume its * transmission (in the event it was flow controlled.) */ -#define FLOWSRC_INPCB 1 /* flow ID generated by INPCB */ -#define FLOWSRC_IFNET 2 /* flow ID generated by interface */ -#define FLOWSRC_PF 3 /* flow ID generated by PF */ -#define FLOWSRC_CHANNEL 4 /* flow ID generated by channel */ +#define FLOWSRC_INPCB 1 /* flow ID generated by INPCB */ +#define FLOWSRC_IFNET 2 /* flow ID generated by interface */ +#define FLOWSRC_PF 3 /* flow ID generated by PF */ +#define FLOWSRC_CHANNEL 4 /* flow ID generated by channel */ /* * Packet flags. Unlike m_flags, all packet flags are copied along when @@ -502,41 +502,41 @@ struct pkthdr { * if the packet was looped back to the system. This flag should be * used instead for newer code. */ -#define PKTF_FLOW_ID 0x1 /* pkt has valid flowid value */ -#define PKTF_FLOW_ADV 0x2 /* pkt triggers local flow advisory */ -#define PKTF_FLOW_LOCALSRC 0x4 /* pkt is locally originated */ -#define PKTF_FLOW_RAWSOCK 0x8 /* pkt locally generated by raw sock */ -#define PKTF_PRIO_PRIVILEGED 0x10 /* packet priority is privileged */ -#define PKTF_PROXY_DST 0x20 /* processed but not locally destined */ -#define PKTF_INET_RESOLVE 0x40 /* IPv4 resolver packet */ -#define PKTF_INET6_RESOLVE 0x80 /* IPv6 resolver packet */ -#define PKTF_RESOLVE_RTR 0x100 /* pkt is for resolving router */ -#define PKTF_SW_LRO_PKT 0x200 /* pkt is a large coalesced pkt */ -#define PKTF_SW_LRO_DID_CSUM 0x400 /* IP and TCP checksums done by LRO */ -#define PKTF_MPTCP 0x800 /* TCP with MPTCP metadata */ -#define PKTF_MPSO 0x1000 /* MPTCP socket meta data */ -#define PKTF_LOOP 0x2000 /* loopbacked packet */ -#define PKTF_IFAINFO 0x4000 /* pkt has valid interface addr info */ -#define PKTF_SO_BACKGROUND 0x8000 /* data is from background source */ -#define PKTF_FORWARDED 0x10000 /* pkt was forwarded from another i/f */ -#define PKTF_PRIV_GUARDED 0x20000 /* pkt_mpriv area guard enabled */ -#define PKTF_KEEPALIVE 0x40000 /* pkt is kernel-generated keepalive */ -#define PKTF_SO_REALTIME 0x80000 /* data is realtime traffic */ -#define PKTF_VALID_UNSENT_DATA 0x100000 /* unsent data is valid */ -#define PKTF_TCP_REXMT 0x200000 /* packet is TCP retransmission */ -#define PKTF_REASSEMBLED 0x400000 /* Packet was reassembled */ -#define PKTF_TX_COMPL_TS_REQ 0x800000 /* tx completion timestamp requested */ -#define PKTF_TS_VALID 0x1000000 /* pkt timestamp is valid */ -#define PKTF_DRIVER_MTAG 0x2000000 /* driver mbuf tags fields inited */ -#define PKTF_NEW_FLOW 0x4000000 /* Data from a new flow */ -#define PKTF_START_SEQ 0x8000000 /* valid start sequence */ -#define PKTF_LAST_PKT 0x10000000 /* last packet in the flow */ -#define PKTF_MPTCP_REINJ 0x20000000 /* Packet has been reinjected for MPTCP */ -#define PKTF_MPTCP_DFIN 0x40000000 /* Packet is a data-fin */ -#define PKTF_HBH_CHKED 0x80000000 /* HBH option is checked */ +#define PKTF_FLOW_ID 0x1 /* pkt has valid flowid value */ +#define PKTF_FLOW_ADV 0x2 /* pkt triggers local flow advisory */ +#define PKTF_FLOW_LOCALSRC 0x4 /* pkt is locally originated */ +#define PKTF_FLOW_RAWSOCK 0x8 /* pkt locally generated by raw sock */ +#define PKTF_PRIO_PRIVILEGED 0x10 /* packet priority is privileged */ +#define PKTF_PROXY_DST 0x20 /* processed but not locally destined */ +#define PKTF_INET_RESOLVE 0x40 /* IPv4 resolver packet */ +#define PKTF_INET6_RESOLVE 0x80 /* IPv6 resolver packet */ +#define PKTF_RESOLVE_RTR 0x100 /* pkt is for resolving router */ +#define PKTF_SW_LRO_PKT 0x200 /* pkt is a large coalesced pkt */ +#define PKTF_SW_LRO_DID_CSUM 0x400 /* IP and TCP checksums done by LRO */ +#define PKTF_MPTCP 0x800 /* TCP with MPTCP metadata */ +#define PKTF_MPSO 0x1000 /* MPTCP socket meta data */ +#define PKTF_LOOP 0x2000 /* loopbacked packet */ +#define PKTF_IFAINFO 0x4000 /* pkt has valid interface addr info */ +#define PKTF_SO_BACKGROUND 0x8000 /* data is from background source */ +#define PKTF_FORWARDED 0x10000 /* pkt was forwarded from another i/f */ +#define PKTF_PRIV_GUARDED 0x20000 /* pkt_mpriv area guard enabled */ +#define PKTF_KEEPALIVE 0x40000 /* pkt is kernel-generated keepalive */ +#define PKTF_SO_REALTIME 0x80000 /* data is realtime traffic */ +#define PKTF_VALID_UNSENT_DATA 0x100000 /* unsent data is valid */ +#define PKTF_TCP_REXMT 0x200000 /* packet is TCP retransmission */ +#define PKTF_REASSEMBLED 0x400000 /* Packet was reassembled */ +#define PKTF_TX_COMPL_TS_REQ 0x800000 /* tx completion timestamp requested */ +#define PKTF_TS_VALID 0x1000000 /* pkt timestamp is valid */ +#define PKTF_DRIVER_MTAG 0x2000000 /* driver mbuf tags fields inited */ +#define PKTF_NEW_FLOW 0x4000000 /* Data from a new flow */ +#define PKTF_START_SEQ 0x8000000 /* valid start sequence */ +#define PKTF_LAST_PKT 0x10000000 /* last packet in the flow */ +#define PKTF_MPTCP_REINJ 0x20000000 /* Packet has been reinjected for MPTCP */ +#define PKTF_MPTCP_DFIN 0x40000000 /* Packet is a data-fin */ +#define PKTF_HBH_CHKED 0x80000000 /* HBH option is checked */ /* flags related to flow control/advisory and identification */ -#define PKTF_FLOW_MASK \ +#define PKTF_FLOW_MASK \ (PKTF_FLOW_ID | PKTF_FLOW_ADV | PKTF_FLOW_LOCALSRC | PKTF_FLOW_RAWSOCK) /* @@ -544,10 +544,10 @@ struct pkthdr { */ typedef void (*m_ext_free_func_t)(caddr_t, u_int, caddr_t); struct m_ext { - caddr_t ext_buf; /* start of buffer */ - m_ext_free_func_t ext_free; /* free routine if not the usual */ - u_int ext_size; /* size of buffer, for ext_free */ - caddr_t ext_arg; /* additional ext_free argument */ + caddr_t ext_buf; /* start of buffer */ + m_ext_free_func_t ext_free; /* free routine if not the usual */ + u_int ext_size; /* size of buffer, for ext_free */ + caddr_t ext_arg; /* additional ext_free argument */ struct ext_ref { struct mbuf *paired; u_int16_t minref; @@ -569,90 +569,90 @@ struct mbuf { struct m_hdr m_hdr; union { struct { - struct pkthdr MH_pkthdr; /* M_PKTHDR set */ + struct pkthdr MH_pkthdr; /* M_PKTHDR set */ union { - struct m_ext MH_ext; /* M_EXT set */ - char MH_databuf[_MHLEN]; + struct m_ext MH_ext; /* M_EXT set */ + char MH_databuf[_MHLEN]; } MH_dat; } MH; - char M_databuf[_MLEN]; /* !M_PKTHDR, !M_EXT */ + char M_databuf[_MLEN]; /* !M_PKTHDR, !M_EXT */ } M_dat; }; -#define m_next m_hdr.mh_next -#define m_len m_hdr.mh_len -#define m_data m_hdr.mh_data -#define m_type m_hdr.mh_type -#define m_flags m_hdr.mh_flags -#define m_nextpkt m_hdr.mh_nextpkt -#define m_act m_nextpkt -#define m_pkthdr M_dat.MH.MH_pkthdr -#define m_ext M_dat.MH.MH_dat.MH_ext -#define m_pktdat M_dat.MH.MH_dat.MH_databuf -#define m_dat M_dat.M_databuf -#define m_pktlen(_m) ((_m)->m_pkthdr.len) -#define m_pftag(_m) (&(_m)->m_pkthdr.builtin_mtag._net_mtag._pf_mtag) +#define m_next m_hdr.mh_next +#define m_len m_hdr.mh_len +#define m_data m_hdr.mh_data +#define m_type m_hdr.mh_type +#define m_flags m_hdr.mh_flags +#define m_nextpkt m_hdr.mh_nextpkt +#define m_act m_nextpkt +#define m_pkthdr M_dat.MH.MH_pkthdr +#define m_ext M_dat.MH.MH_dat.MH_ext +#define m_pktdat M_dat.MH.MH_dat.MH_databuf +#define m_dat M_dat.M_databuf +#define m_pktlen(_m) ((_m)->m_pkthdr.len) +#define m_pftag(_m) (&(_m)->m_pkthdr.builtin_mtag._net_mtag._pf_mtag) /* mbuf flags (private) */ -#define M_EXT 0x0001 /* has associated external storage */ -#define M_PKTHDR 0x0002 /* start of record */ -#define M_EOR 0x0004 /* end of record */ -#define M_PROTO1 0x0008 /* protocol-specific */ -#define M_PROTO2 0x0010 /* protocol-specific */ -#define M_PROTO3 0x0020 /* protocol-specific */ -#define M_LOOP 0x0040 /* packet is looped back (also see PKTF_LOOP) */ -#define M_PROTO5 0x0080 /* protocol-specific */ +#define M_EXT 0x0001 /* has associated external storage */ +#define M_PKTHDR 0x0002 /* start of record */ +#define M_EOR 0x0004 /* end of record */ +#define M_PROTO1 0x0008 /* protocol-specific */ +#define M_PROTO2 0x0010 /* protocol-specific */ +#define M_PROTO3 0x0020 /* protocol-specific */ +#define M_LOOP 0x0040 /* packet is looped back (also see PKTF_LOOP) */ +#define M_PROTO5 0x0080 /* protocol-specific */ /* mbuf pkthdr flags, also in m_flags (private) */ -#define M_BCAST 0x0100 /* send/received as link-level broadcast */ -#define M_MCAST 0x0200 /* send/received as link-level multicast */ -#define M_FRAG 0x0400 /* packet is a fragment of a larger packet */ -#define M_FIRSTFRAG 0x0800 /* packet is first fragment */ -#define M_LASTFRAG 0x1000 /* packet is last fragment */ -#define M_PROMISC 0x2000 /* packet is promiscuous (shouldn't go to stack) */ -#define M_HASFCS 0x4000 /* packet has FCS */ -#define M_TAGHDR 0x8000 /* m_tag hdr structure at top of mbuf data */ +#define M_BCAST 0x0100 /* send/received as link-level broadcast */ +#define M_MCAST 0x0200 /* send/received as link-level multicast */ +#define M_FRAG 0x0400 /* packet is a fragment of a larger packet */ +#define M_FIRSTFRAG 0x0800 /* packet is first fragment */ +#define M_LASTFRAG 0x1000 /* packet is last fragment */ +#define M_PROMISC 0x2000 /* packet is promiscuous (shouldn't go to stack) */ +#define M_HASFCS 0x4000 /* packet has FCS */ +#define M_TAGHDR 0x8000 /* m_tag hdr structure at top of mbuf data */ /* * Flags to purge when crossing layers. */ -#define M_PROTOFLAGS \ +#define M_PROTOFLAGS \ (M_PROTO1|M_PROTO2|M_PROTO3|M_PROTO5) /* flags copied when copying m_pkthdr */ -#define M_COPYFLAGS \ - (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO2|M_PROTO3 | \ - M_LOOP|M_PROTO5|M_BCAST|M_MCAST|M_FRAG | \ +#define M_COPYFLAGS \ + (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO2|M_PROTO3 | \ + M_LOOP|M_PROTO5|M_BCAST|M_MCAST|M_FRAG | \ M_FIRSTFRAG|M_LASTFRAG|M_PROMISC|M_HASFCS) /* flags indicating hw checksum support and sw checksum requirements */ -#define CSUM_IP 0x0001 /* will csum IP */ -#define CSUM_TCP 0x0002 /* will csum TCP */ -#define CSUM_UDP 0x0004 /* will csum UDP */ -#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */ -#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */ -#define CSUM_TCPIPV6 0x0020 /* will csum TCP for IPv6 */ -#define CSUM_UDPIPV6 0x0040 /* will csum UDP for IPv6 */ -#define CSUM_FRAGMENT_IPV6 0x0080 /* will do IPv6 fragmentation */ - -#define CSUM_IP_CHECKED 0x0100 /* did csum IP */ -#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */ -#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */ -#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */ -#define CSUM_PARTIAL 0x1000 /* simple Sum16 computation */ -#define CSUM_ZERO_INVERT 0x2000 /* invert 0 to -0 (0xffff) */ - -#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP) -#define CSUM_DELAY_IP (CSUM_IP) /* IPv4 only: no IPv6 IP cksum */ -#define CSUM_DELAY_IPV6_DATA (CSUM_TCPIPV6 | CSUM_UDPIPV6) -#define CSUM_DATA_IPV6_VALID CSUM_DATA_VALID /* csum_data field is valid */ - -#define CSUM_TX_FLAGS \ - (CSUM_DELAY_IP | CSUM_DELAY_DATA | CSUM_DELAY_IPV6_DATA | \ +#define CSUM_IP 0x0001 /* will csum IP */ +#define CSUM_TCP 0x0002 /* will csum TCP */ +#define CSUM_UDP 0x0004 /* will csum UDP */ +#define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */ +#define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */ +#define CSUM_TCPIPV6 0x0020 /* will csum TCP for IPv6 */ +#define CSUM_UDPIPV6 0x0040 /* will csum UDP for IPv6 */ +#define CSUM_FRAGMENT_IPV6 0x0080 /* will do IPv6 fragmentation */ + +#define CSUM_IP_CHECKED 0x0100 /* did csum IP */ +#define CSUM_IP_VALID 0x0200 /* ... the csum is valid */ +#define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */ +#define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */ +#define CSUM_PARTIAL 0x1000 /* simple Sum16 computation */ +#define CSUM_ZERO_INVERT 0x2000 /* invert 0 to -0 (0xffff) */ + +#define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP) +#define CSUM_DELAY_IP (CSUM_IP) /* IPv4 only: no IPv6 IP cksum */ +#define CSUM_DELAY_IPV6_DATA (CSUM_TCPIPV6 | CSUM_UDPIPV6) +#define CSUM_DATA_IPV6_VALID CSUM_DATA_VALID /* csum_data field is valid */ + +#define CSUM_TX_FLAGS \ + (CSUM_DELAY_IP | CSUM_DELAY_DATA | CSUM_DELAY_IPV6_DATA | \ CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_ZERO_INVERT) -#define CSUM_RX_FLAGS \ - (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_PSEUDO_HDR | \ +#define CSUM_RX_FLAGS \ + (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_PSEUDO_HDR | \ CSUM_DATA_VALID | CSUM_PARTIAL) /* @@ -660,48 +660,48 @@ struct mbuf { */ /* VLAN tag present */ -#define CSUM_VLAN_TAG_VALID 0x10000 /* vlan_tag field is valid */ +#define CSUM_VLAN_TAG_VALID 0x10000 /* vlan_tag field is valid */ /* TCP Segment Offloading requested on this mbuf */ -#define CSUM_TSO_IPV4 0x100000 /* This mbuf needs to be segmented by the NIC */ -#define CSUM_TSO_IPV6 0x200000 /* This mbuf needs to be segmented by the NIC */ +#define CSUM_TSO_IPV4 0x100000 /* This mbuf needs to be segmented by the NIC */ +#define CSUM_TSO_IPV6 0x200000 /* This mbuf needs to be segmented by the NIC */ -#define TSO_IPV4_OK(_ifp, _m) \ - (((_ifp)->if_hwassist & IFNET_TSO_IPV4) && \ - ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) \ +#define TSO_IPV4_OK(_ifp, _m) \ + (((_ifp)->if_hwassist & IFNET_TSO_IPV4) && \ + ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) \ -#define TSO_IPV4_NOTOK(_ifp, _m) \ - (!((_ifp)->if_hwassist & IFNET_TSO_IPV4) && \ - ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) \ +#define TSO_IPV4_NOTOK(_ifp, _m) \ + (!((_ifp)->if_hwassist & IFNET_TSO_IPV4) && \ + ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) \ -#define TSO_IPV6_OK(_ifp, _m) \ - (((_ifp)->if_hwassist & IFNET_TSO_IPV6) && \ - ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV6)) \ +#define TSO_IPV6_OK(_ifp, _m) \ + (((_ifp)->if_hwassist & IFNET_TSO_IPV6) && \ + ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV6)) \ -#define TSO_IPV6_NOTOK(_ifp, _m) \ - (!((_ifp)->if_hwassist & IFNET_TSO_IPV6) && \ - ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV6)) \ +#define TSO_IPV6_NOTOK(_ifp, _m) \ + (!((_ifp)->if_hwassist & IFNET_TSO_IPV6) && \ + ((_m)->m_pkthdr.csum_flags & CSUM_TSO_IPV6)) \ #endif /* XNU_KERNEL_PRIVATE */ /* mbuf types */ -#define MT_FREE 0 /* should be on free list */ -#define MT_DATA 1 /* dynamic (data) allocation */ -#define MT_HEADER 2 /* packet header */ -#define MT_SOCKET 3 /* socket structure */ -#define MT_PCB 4 /* protocol control block */ -#define MT_RTABLE 5 /* routing tables */ -#define MT_HTABLE 6 /* IMP host tables */ -#define MT_ATABLE 7 /* address resolution tables */ -#define MT_SONAME 8 /* socket name */ -#define MT_SOOPTS 10 /* socket options */ -#define MT_FTABLE 11 /* fragment reassembly header */ -#define MT_RIGHTS 12 /* access rights */ -#define MT_IFADDR 13 /* interface address */ -#define MT_CONTROL 14 /* extra-data protocol message */ -#define MT_OOBDATA 15 /* expedited data */ -#define MT_TAG 16 /* volatile metadata associated to pkts */ -#define MT_MAX 32 /* enough? */ +#define MT_FREE 0 /* should be on free list */ +#define MT_DATA 1 /* dynamic (data) allocation */ +#define MT_HEADER 2 /* packet header */ +#define MT_SOCKET 3 /* socket structure */ +#define MT_PCB 4 /* protocol control block */ +#define MT_RTABLE 5 /* routing tables */ +#define MT_HTABLE 6 /* IMP host tables */ +#define MT_ATABLE 7 /* address resolution tables */ +#define MT_SONAME 8 /* socket name */ +#define MT_SOOPTS 10 /* socket options */ +#define MT_FTABLE 11 /* fragment reassembly header */ +#define MT_RIGHTS 12 /* access rights */ +#define MT_IFADDR 13 /* interface address */ +#define MT_CONTROL 14 /* extra-data protocol message */ +#define MT_OOBDATA 15 /* expedited data */ +#define MT_TAG 16 /* volatile metadata associated to pkts */ +#define MT_MAX 32 /* enough? */ #ifdef XNU_KERNEL_PRIVATE /* @@ -716,14 +716,14 @@ struct mbuf { */ #if 1 -#define MCHECK(m) m_mcheck(m) +#define MCHECK(m) m_mcheck(m) #else -#define MCHECK(m) +#define MCHECK(m) #endif -#define MGET(m, how, type) ((m) = m_get((how), (type))) +#define MGET(m, how, type) ((m) = m_get((how), (type))) -#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) +#define MGETHDR(m, how, type) ((m) = m_gethdr((how), (type))) /* * Mbuf cluster macros. @@ -738,33 +738,33 @@ struct mbuf { * pointer while on the free list. */ union mcluster { - union mcluster *mcl_next; - char mcl_buf[MCLBYTES]; + union mcluster *mcl_next; + char mcl_buf[MCLBYTES]; }; -#define MCLALLOC(p, how) ((p) = m_mclalloc(how)) +#define MCLALLOC(p, how) ((p) = m_mclalloc(how)) -#define MCLFREE(p) m_mclfree(p) +#define MCLFREE(p) m_mclfree(p) -#define MCLGET(m, how) ((m) = m_mclget(m, how)) +#define MCLGET(m, how) ((m) = m_mclget(m, how)) /* * Mbuf big cluster */ union mbigcluster { - union mbigcluster *mbc_next; - char mbc_buf[MBIGCLBYTES]; + union mbigcluster *mbc_next; + char mbc_buf[MBIGCLBYTES]; }; /* * Mbuf jumbo cluster */ union m16kcluster { - union m16kcluster *m16kcl_next; - char m16kcl_buf[M16KCLBYTES]; + union m16kcluster *m16kcl_next; + char m16kcl_buf[M16KCLBYTES]; }; -#define MCLHASREFERENCE(m) m_mclhasreference(m) +#define MCLHASREFERENCE(m) m_mclhasreference(m) /* * MFREE(struct mbuf *m, struct mbuf *n) @@ -772,25 +772,25 @@ union m16kcluster { * Place the successor, if any, in n. */ -#define MFREE(m, n) ((n) = m_free(m)) +#define MFREE(m, n) ((n) = m_free(m)) /* * Copy mbuf pkthdr from from to to. * from must have M_PKTHDR set, and to must be empty. * aux pointer will be moved to `to'. */ -#define M_COPY_PKTHDR(to, from) m_copy_pkthdr(to, from) +#define M_COPY_PKTHDR(to, from) m_copy_pkthdr(to, from) -#define M_COPY_PFTAG(to, from) m_copy_pftag(to, from) +#define M_COPY_PFTAG(to, from) m_copy_pftag(to, from) -#define M_COPY_CLASSIFIER(to, from) m_copy_classifier(to, from) +#define M_COPY_CLASSIFIER(to, from) m_copy_classifier(to, from) /* * Evaluate TRUE if it's safe to write to the mbuf m's data region (this can * be both the local data payload, or an external buffer area, depending on * whether M_EXT is set). */ -#define M_WRITABLE(m) (((m)->m_flags & M_EXT) == 0 || !MCLHASREFERENCE(m)) +#define M_WRITABLE(m) (((m)->m_flags & M_EXT) == 0 || !MCLHASREFERENCE(m)) /* * These macros are mapped to the appropriate KPIs, so that private code @@ -805,22 +805,22 @@ union m16kcluster { * handling external storage, packet-header mbufs, and regular data mbufs. */ #define M_START(m) \ - (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ - ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] : \ - &(m)->m_dat[0]) + (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_buf : \ + ((m)->m_flags & M_PKTHDR) ? &(m)->m_pktdat[0] : \ + &(m)->m_dat[0]) /* * Return the size of the buffer associated with an mbuf, handling external * storage, packet-header mbufs, and regular data mbufs. */ #define M_SIZE(m) \ - (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ - ((m)->m_flags & M_PKTHDR) ? MHLEN : \ - MLEN) + (((m)->m_flags & M_EXT) ? (m)->m_ext.ext_size : \ + ((m)->m_flags & M_PKTHDR) ? MHLEN : \ + MLEN) -#define M_ALIGN(m, len) m_align(m, len) -#define MH_ALIGN(m, len) m_align(m, len) -#define MEXT_ALIGN(m, len) m_align(m, len) +#define M_ALIGN(m, len) m_align(m, len) +#define MH_ALIGN(m, len) m_align(m, len) +#define MEXT_ALIGN(m, len) m_align(m, len) /* * Compute the amount of space available before the current start of data in @@ -829,7 +829,7 @@ union m16kcluster { * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. */ -#define M_LEADINGSPACE(m) \ +#define M_LEADINGSPACE(m) \ (M_WRITABLE(m) ? ((m)->m_data - M_START(m)) : 0) /* @@ -838,8 +838,8 @@ union m16kcluster { * The M_WRITABLE() is a temporary, conservative safety measure: the burden * of checking writability of the mbuf data area rests solely with the caller. */ -#define M_TRAILINGSPACE(m) \ - (M_WRITABLE(m) ? \ +#define M_TRAILINGSPACE(m) \ + (M_WRITABLE(m) ? \ ((M_START(m) + M_SIZE(m)) - ((m)->m_data + (m)->m_len)) : 0) /* @@ -848,19 +848,19 @@ union m16kcluster { * If how is M_DONTWAIT and allocation fails, the original mbuf chain * is freed and m is set to NULL. */ -#define M_PREPEND(m, plen, how, align) \ +#define M_PREPEND(m, plen, how, align) \ ((m) = m_prepend_2((m), (plen), (how), (align))) /* change mbuf to new type */ -#define MCHTYPE(m, t) m_mchtype(m, t) +#define MCHTYPE(m, t) m_mchtype(m, t) /* compatiblity with 4.3 */ -#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT) +#define m_copy(m, o, l) m_copym((m), (o), (l), M_DONTWAIT) -#define MBSHIFT 20 /* 1MB */ -#define MBSIZE (1 << MBSHIFT) -#define GBSHIFT 30 /* 1GB */ -#define GBSIZE (1 << GBSHIFT) +#define MBSHIFT 20 /* 1MB */ +#define MBSIZE (1 << MBSHIFT) +#define GBSHIFT 30 /* 1GB */ +#define GBSIZE (1 << GBSHIFT) /* * M_STRUCT_GET ensures that intermediate protocol header (from "off" to @@ -871,59 +871,59 @@ union m16kcluster { * M_STRUCT_GET0 does the same, except that it aligns the structure at * very top of mbuf. GET0 is likely to make memory copy than GET. */ -#define M_STRUCT_GET(val, typ, m, off, len) \ -do { \ - struct mbuf *t; \ - int tmp; \ - \ - if ((m)->m_len >= (off) + (len)) { \ - (val) = (typ)(mtod((m), caddr_t) + (off)); \ - } else { \ - t = m_pulldown((m), (off), (len), &tmp); \ - if (t != NULL) { \ - if (t->m_len < tmp + (len)) \ - panic("m_pulldown malfunction"); \ - (val) = (typ)(mtod(t, caddr_t) + tmp); \ - } else { \ - (val) = (typ)NULL; \ - (m) = NULL; \ - } \ - } \ +#define M_STRUCT_GET(val, typ, m, off, len) \ +do { \ + struct mbuf *t; \ + int tmp; \ + \ + if ((m)->m_len >= (off) + (len)) { \ + (val) = (typ)(mtod((m), caddr_t) + (off)); \ + } else { \ + t = m_pulldown((m), (off), (len), &tmp); \ + if (t != NULL) { \ + if (t->m_len < tmp + (len)) \ + panic("m_pulldown malfunction"); \ + (val) = (typ)(mtod(t, caddr_t) + tmp); \ + } else { \ + (val) = (typ)NULL; \ + (m) = NULL; \ + } \ + } \ } while (0) -#define M_STRUCT_GET0(val, typ, m, off, len) \ -do { \ - struct mbuf *t; \ - \ - if ((off) == 0 && ((m)->m_len >= (len))) { \ - (val) = (typ)(void *)mtod(m, caddr_t); \ - } else { \ - t = m_pulldown((m), (off), (len), NULL); \ - if (t != NULL) { \ - if (t->m_len < (len)) \ - panic("m_pulldown malfunction"); \ - (val) = (typ)(void *)mtod(t, caddr_t); \ - } else { \ - (val) = (typ)NULL; \ - (m) = NULL; \ - } \ - } \ +#define M_STRUCT_GET0(val, typ, m, off, len) \ +do { \ + struct mbuf *t; \ + \ + if ((off) == 0 && ((m)->m_len >= (len))) { \ + (val) = (typ)(void *)mtod(m, caddr_t); \ + } else { \ + t = m_pulldown((m), (off), (len), NULL); \ + if (t != NULL) { \ + if (t->m_len < (len)) \ + panic("m_pulldown malfunction"); \ + (val) = (typ)(void *)mtod(t, caddr_t); \ + } else { \ + (val) = (typ)NULL; \ + (m) = NULL; \ + } \ + } \ } while (0) -#define MBUF_INPUT_CHECK(m, rcvif) \ -do { \ - if (!(m->m_flags & MBUF_PKTHDR) || \ - m->m_len < 0 || \ - m->m_len > ((njcl > 0) ? njclbytes : MBIGCLBYTES) || \ - m->m_type == MT_FREE || \ - ((m->m_flags & M_EXT) != 0 && m->m_ext.ext_buf == NULL)) { \ - panic_plain("Failed mbuf validity check: mbuf %p len %d " \ - "type %d flags 0x%x data %p rcvif %s ifflags 0x%x", \ - m, m->m_len, m->m_type, m->m_flags, \ - ((m->m_flags & M_EXT) ? m->m_ext.ext_buf : m->m_data), \ - if_name(rcvif), \ - (rcvif->if_flags & 0xffff)); \ - } \ +#define MBUF_INPUT_CHECK(m, rcvif) \ +do { \ + if (!(m->m_flags & MBUF_PKTHDR) || \ + m->m_len < 0 || \ + m->m_len > ((njcl > 0) ? njclbytes : MBIGCLBYTES) || \ + m->m_type == MT_FREE || \ + ((m->m_flags & M_EXT) != 0 && m->m_ext.ext_buf == NULL)) { \ + panic_plain("Failed mbuf validity check: mbuf %p len %d " \ + "type %d flags 0x%x data %p rcvif %s ifflags 0x%x", \ + m, m->m_len, m->m_type, m->m_flags, \ + ((m->m_flags & M_EXT) ? m->m_ext.ext_buf : m->m_data), \ + if_name(rcvif), \ + (rcvif->if_flags & 0xffff)); \ + } \ } while (0) /* @@ -934,241 +934,241 @@ do { \ * * m_next is ignored, so queueing chains of mbufs is possible */ -#define MBUFQ_HEAD(name) \ -struct name { \ - struct mbuf *mq_first; /* first packet */ \ - struct mbuf **mq_last; /* addr of last next packet */ \ +#define MBUFQ_HEAD(name) \ +struct name { \ + struct mbuf *mq_first; /* first packet */ \ + struct mbuf **mq_last; /* addr of last next packet */ \ } -#define MBUFQ_INIT(q) do { \ - MBUFQ_FIRST(q) = NULL; \ - (q)->mq_last = &MBUFQ_FIRST(q); \ +#define MBUFQ_INIT(q) do { \ + MBUFQ_FIRST(q) = NULL; \ + (q)->mq_last = &MBUFQ_FIRST(q); \ } while (0) -#define MBUFQ_PREPEND(q, m) do { \ - if ((MBUFQ_NEXT(m) = MBUFQ_FIRST(q)) == NULL) \ - (q)->mq_last = &MBUFQ_NEXT(m); \ - MBUFQ_FIRST(q) = (m); \ +#define MBUFQ_PREPEND(q, m) do { \ + if ((MBUFQ_NEXT(m) = MBUFQ_FIRST(q)) == NULL) \ + (q)->mq_last = &MBUFQ_NEXT(m); \ + MBUFQ_FIRST(q) = (m); \ } while (0) -#define MBUFQ_ENQUEUE(q, m) do { \ - MBUFQ_NEXT(m) = NULL; \ - *(q)->mq_last = (m); \ - (q)->mq_last = &MBUFQ_NEXT(m); \ +#define MBUFQ_ENQUEUE(q, m) do { \ + MBUFQ_NEXT(m) = NULL; \ + *(q)->mq_last = (m); \ + (q)->mq_last = &MBUFQ_NEXT(m); \ } while (0) -#define MBUFQ_ENQUEUE_MULTI(q, m, n) do { \ - MBUFQ_NEXT(n) = NULL; \ - *(q)->mq_last = (m); \ - (q)->mq_last = &MBUFQ_NEXT(n); \ +#define MBUFQ_ENQUEUE_MULTI(q, m, n) do { \ + MBUFQ_NEXT(n) = NULL; \ + *(q)->mq_last = (m); \ + (q)->mq_last = &MBUFQ_NEXT(n); \ } while (0) -#define MBUFQ_DEQUEUE(q, m) do { \ - if (((m) = MBUFQ_FIRST(q)) != NULL) { \ - if ((MBUFQ_FIRST(q) = MBUFQ_NEXT(m)) == NULL) \ - (q)->mq_last = &MBUFQ_FIRST(q); \ - else \ - MBUFQ_NEXT(m) = NULL; \ - } \ +#define MBUFQ_DEQUEUE(q, m) do { \ + if (((m) = MBUFQ_FIRST(q)) != NULL) { \ + if ((MBUFQ_FIRST(q) = MBUFQ_NEXT(m)) == NULL) \ + (q)->mq_last = &MBUFQ_FIRST(q); \ + else \ + MBUFQ_NEXT(m) = NULL; \ + } \ } while (0) -#define MBUFQ_REMOVE(q, m) do { \ - if (MBUFQ_FIRST(q) == (m)) { \ - MBUFQ_DEQUEUE(q, m); \ - } else { \ - struct mbuf *_m = MBUFQ_FIRST(q); \ - while (MBUFQ_NEXT(_m) != (m)) \ - _m = MBUFQ_NEXT(_m); \ - if ((MBUFQ_NEXT(_m) = \ - MBUFQ_NEXT(MBUFQ_NEXT(_m))) == NULL) \ - (q)->mq_last = &MBUFQ_NEXT(_m); \ - } \ +#define MBUFQ_REMOVE(q, m) do { \ + if (MBUFQ_FIRST(q) == (m)) { \ + MBUFQ_DEQUEUE(q, m); \ + } else { \ + struct mbuf *_m = MBUFQ_FIRST(q); \ + while (MBUFQ_NEXT(_m) != (m)) \ + _m = MBUFQ_NEXT(_m); \ + if ((MBUFQ_NEXT(_m) = \ + MBUFQ_NEXT(MBUFQ_NEXT(_m))) == NULL) \ + (q)->mq_last = &MBUFQ_NEXT(_m); \ + } \ } while (0) -#define MBUFQ_DRAIN(q) do { \ - struct mbuf *__m0; \ - while ((__m0 = MBUFQ_FIRST(q)) != NULL) { \ - MBUFQ_FIRST(q) = MBUFQ_NEXT(__m0); \ - MBUFQ_NEXT(__m0) = NULL; \ - m_freem(__m0); \ - } \ - (q)->mq_last = &MBUFQ_FIRST(q); \ +#define MBUFQ_DRAIN(q) do { \ + struct mbuf *__m0; \ + while ((__m0 = MBUFQ_FIRST(q)) != NULL) { \ + MBUFQ_FIRST(q) = MBUFQ_NEXT(__m0); \ + MBUFQ_NEXT(__m0) = NULL; \ + m_freem(__m0); \ + } \ + (q)->mq_last = &MBUFQ_FIRST(q); \ } while (0) -#define MBUFQ_FOREACH(m, q) \ - for ((m) = MBUFQ_FIRST(q); \ - (m); \ +#define MBUFQ_FOREACH(m, q) \ + for ((m) = MBUFQ_FIRST(q); \ + (m); \ (m) = MBUFQ_NEXT(m)) -#define MBUFQ_FOREACH_SAFE(m, q, tvar) \ - for ((m) = MBUFQ_FIRST(q); \ - (m) && ((tvar) = MBUFQ_NEXT(m), 1); \ +#define MBUFQ_FOREACH_SAFE(m, q, tvar) \ + for ((m) = MBUFQ_FIRST(q); \ + (m) && ((tvar) = MBUFQ_NEXT(m), 1); \ (m) = (tvar)) -#define MBUFQ_EMPTY(q) ((q)->mq_first == NULL) -#define MBUFQ_FIRST(q) ((q)->mq_first) -#define MBUFQ_NEXT(m) ((m)->m_nextpkt) +#define MBUFQ_EMPTY(q) ((q)->mq_first == NULL) +#define MBUFQ_FIRST(q) ((q)->mq_first) +#define MBUFQ_NEXT(m) ((m)->m_nextpkt) /* * mq_last is initialized to point to mq_first, so check if they're * equal and return NULL when the list is empty. Otherwise, we need * to subtract the offset of MBUQ_NEXT (i.e. m_nextpkt field) to get * to the base mbuf address to return to caller. */ -#define MBUFQ_LAST(head) \ - (((head)->mq_last == &MBUFQ_FIRST(head)) ? NULL : \ - ((struct mbuf *)(void *)((char *)(head)->mq_last - \ +#define MBUFQ_LAST(head) \ + (((head)->mq_last == &MBUFQ_FIRST(head)) ? NULL : \ + ((struct mbuf *)(void *)((char *)(head)->mq_last - \ (size_t)(&MBUFQ_NEXT((struct mbuf *)0))))) -#define max_linkhdr P2ROUNDUP(_max_linkhdr, sizeof (u_int32_t)) -#define max_protohdr P2ROUNDUP(_max_protohdr, sizeof (u_int32_t)) +#define max_linkhdr P2ROUNDUP(_max_linkhdr, sizeof (u_int32_t)) +#define max_protohdr P2ROUNDUP(_max_protohdr, sizeof (u_int32_t)) #endif /* XNU_KERNEL_PRIVATE */ /* * Mbuf statistics (legacy). */ struct mbstat { - u_int32_t m_mbufs; /* mbufs obtained from page pool */ - u_int32_t m_clusters; /* clusters obtained from page pool */ - u_int32_t m_spare; /* spare field */ - u_int32_t m_clfree; /* free clusters */ - u_int32_t m_drops; /* times failed to find space */ - u_int32_t m_wait; /* times waited for space */ - u_int32_t m_drain; /* times drained protocols for space */ - u_short m_mtypes[256]; /* type specific mbuf allocations */ - u_int32_t m_mcfail; /* times m_copym failed */ - u_int32_t m_mpfail; /* times m_pullup failed */ - u_int32_t m_msize; /* length of an mbuf */ - u_int32_t m_mclbytes; /* length of an mbuf cluster */ - u_int32_t m_minclsize; /* min length of data to allocate a cluster */ - u_int32_t m_mlen; /* length of data in an mbuf */ - u_int32_t m_mhlen; /* length of data in a header mbuf */ - u_int32_t m_bigclusters; /* clusters obtained from page pool */ - u_int32_t m_bigclfree; /* free clusters */ - u_int32_t m_bigmclbytes; /* length of an mbuf cluster */ + u_int32_t m_mbufs; /* mbufs obtained from page pool */ + u_int32_t m_clusters; /* clusters obtained from page pool */ + u_int32_t m_spare; /* spare field */ + u_int32_t m_clfree; /* free clusters */ + u_int32_t m_drops; /* times failed to find space */ + u_int32_t m_wait; /* times waited for space */ + u_int32_t m_drain; /* times drained protocols for space */ + u_short m_mtypes[256]; /* type specific mbuf allocations */ + u_int32_t m_mcfail; /* times m_copym failed */ + u_int32_t m_mpfail; /* times m_pullup failed */ + u_int32_t m_msize; /* length of an mbuf */ + u_int32_t m_mclbytes; /* length of an mbuf cluster */ + u_int32_t m_minclsize; /* min length of data to allocate a cluster */ + u_int32_t m_mlen; /* length of data in an mbuf */ + u_int32_t m_mhlen; /* length of data in a header mbuf */ + u_int32_t m_bigclusters; /* clusters obtained from page pool */ + u_int32_t m_bigclfree; /* free clusters */ + u_int32_t m_bigmclbytes; /* length of an mbuf cluster */ }; /* Compatibillity with 10.3 */ struct ombstat { - u_int32_t m_mbufs; /* mbufs obtained from page pool */ - u_int32_t m_clusters; /* clusters obtained from page pool */ - u_int32_t m_spare; /* spare field */ - u_int32_t m_clfree; /* free clusters */ - u_int32_t m_drops; /* times failed to find space */ - u_int32_t m_wait; /* times waited for space */ - u_int32_t m_drain; /* times drained protocols for space */ - u_short m_mtypes[256]; /* type specific mbuf allocations */ - u_int32_t m_mcfail; /* times m_copym failed */ - u_int32_t m_mpfail; /* times m_pullup failed */ - u_int32_t m_msize; /* length of an mbuf */ - u_int32_t m_mclbytes; /* length of an mbuf cluster */ - u_int32_t m_minclsize; /* min length of data to allocate a cluster */ - u_int32_t m_mlen; /* length of data in an mbuf */ - u_int32_t m_mhlen; /* length of data in a header mbuf */ + u_int32_t m_mbufs; /* mbufs obtained from page pool */ + u_int32_t m_clusters; /* clusters obtained from page pool */ + u_int32_t m_spare; /* spare field */ + u_int32_t m_clfree; /* free clusters */ + u_int32_t m_drops; /* times failed to find space */ + u_int32_t m_wait; /* times waited for space */ + u_int32_t m_drain; /* times drained protocols for space */ + u_short m_mtypes[256]; /* type specific mbuf allocations */ + u_int32_t m_mcfail; /* times m_copym failed */ + u_int32_t m_mpfail; /* times m_pullup failed */ + u_int32_t m_msize; /* length of an mbuf */ + u_int32_t m_mclbytes; /* length of an mbuf cluster */ + u_int32_t m_minclsize; /* min length of data to allocate a cluster */ + u_int32_t m_mlen; /* length of data in an mbuf */ + u_int32_t m_mhlen; /* length of data in a header mbuf */ }; /* * mbuf class statistics. */ -#define MAX_MBUF_CNAME 15 +#define MAX_MBUF_CNAME 15 #if defined(XNU_KERNEL_PRIVATE) /* For backwards compatibility with 32-bit userland process */ struct omb_class_stat { - char mbcl_cname[MAX_MBUF_CNAME + 1]; /* class name */ - u_int32_t mbcl_size; /* buffer size */ - u_int32_t mbcl_total; /* # of buffers created */ - u_int32_t mbcl_active; /* # of active buffers */ - u_int32_t mbcl_infree; /* # of available buffers */ - u_int32_t mbcl_slab_cnt; /* # of available slabs */ - u_int64_t mbcl_alloc_cnt; /* # of times alloc is called */ - u_int64_t mbcl_free_cnt; /* # of times free is called */ - u_int64_t mbcl_notified; /* # of notified wakeups */ - u_int64_t mbcl_purge_cnt; /* # of purges so far */ - u_int64_t mbcl_fail_cnt; /* # of allocation failures */ - u_int32_t mbcl_ctotal; /* total only for this class */ - u_int32_t mbcl_release_cnt; /* amount of memory returned */ + char mbcl_cname[MAX_MBUF_CNAME + 1]; /* class name */ + u_int32_t mbcl_size; /* buffer size */ + u_int32_t mbcl_total; /* # of buffers created */ + u_int32_t mbcl_active; /* # of active buffers */ + u_int32_t mbcl_infree; /* # of available buffers */ + u_int32_t mbcl_slab_cnt; /* # of available slabs */ + u_int64_t mbcl_alloc_cnt; /* # of times alloc is called */ + u_int64_t mbcl_free_cnt; /* # of times free is called */ + u_int64_t mbcl_notified; /* # of notified wakeups */ + u_int64_t mbcl_purge_cnt; /* # of purges so far */ + u_int64_t mbcl_fail_cnt; /* # of allocation failures */ + u_int32_t mbcl_ctotal; /* total only for this class */ + u_int32_t mbcl_release_cnt; /* amount of memory returned */ /* * Cache layer statistics */ - u_int32_t mbcl_mc_state; /* cache state (see below) */ - u_int32_t mbcl_mc_cached; /* # of cached buffers */ - u_int32_t mbcl_mc_waiter_cnt; /* # waiters on the cache */ - u_int32_t mbcl_mc_wretry_cnt; /* # of wait retries */ - u_int32_t mbcl_mc_nwretry_cnt; /* # of no-wait retry attempts */ - u_int64_t mbcl_reserved[4]; /* for future use */ + u_int32_t mbcl_mc_state; /* cache state (see below) */ + u_int32_t mbcl_mc_cached; /* # of cached buffers */ + u_int32_t mbcl_mc_waiter_cnt; /* # waiters on the cache */ + u_int32_t mbcl_mc_wretry_cnt; /* # of wait retries */ + u_int32_t mbcl_mc_nwretry_cnt; /* # of no-wait retry attempts */ + u_int64_t mbcl_reserved[4]; /* for future use */ } __attribute__((__packed__)); #endif /* XNU_KERNEL_PRIVATE */ typedef struct mb_class_stat { - char mbcl_cname[MAX_MBUF_CNAME + 1]; /* class name */ - u_int32_t mbcl_size; /* buffer size */ - u_int32_t mbcl_total; /* # of buffers created */ - u_int32_t mbcl_active; /* # of active buffers */ - u_int32_t mbcl_infree; /* # of available buffers */ - u_int32_t mbcl_slab_cnt; /* # of available slabs */ + char mbcl_cname[MAX_MBUF_CNAME + 1]; /* class name */ + u_int32_t mbcl_size; /* buffer size */ + u_int32_t mbcl_total; /* # of buffers created */ + u_int32_t mbcl_active; /* # of active buffers */ + u_int32_t mbcl_infree; /* # of available buffers */ + u_int32_t mbcl_slab_cnt; /* # of available slabs */ #if defined(KERNEL) || defined(__LP64__) - u_int32_t mbcl_pad; /* padding */ + u_int32_t mbcl_pad; /* padding */ #endif /* KERNEL || __LP64__ */ - u_int64_t mbcl_alloc_cnt; /* # of times alloc is called */ - u_int64_t mbcl_free_cnt; /* # of times free is called */ - u_int64_t mbcl_notified; /* # of notified wakeups */ - u_int64_t mbcl_purge_cnt; /* # of purges so far */ - u_int64_t mbcl_fail_cnt; /* # of allocation failures */ - u_int32_t mbcl_ctotal; /* total only for this class */ - u_int32_t mbcl_release_cnt; /* amount of memory returned */ + u_int64_t mbcl_alloc_cnt; /* # of times alloc is called */ + u_int64_t mbcl_free_cnt; /* # of times free is called */ + u_int64_t mbcl_notified; /* # of notified wakeups */ + u_int64_t mbcl_purge_cnt; /* # of purges so far */ + u_int64_t mbcl_fail_cnt; /* # of allocation failures */ + u_int32_t mbcl_ctotal; /* total only for this class */ + u_int32_t mbcl_release_cnt; /* amount of memory returned */ /* * Cache layer statistics */ - u_int32_t mbcl_mc_state; /* cache state (see below) */ - u_int32_t mbcl_mc_cached; /* # of cached buffers */ - u_int32_t mbcl_mc_waiter_cnt; /* # waiters on the cache */ - u_int32_t mbcl_mc_wretry_cnt; /* # of wait retries */ - u_int32_t mbcl_mc_nwretry_cnt; /* # of no-wait retry attempts */ - u_int32_t mbcl_peak_reported; /* last usage peak reported */ - u_int32_t mbcl_reserved[7]; /* for future use */ + u_int32_t mbcl_mc_state; /* cache state (see below) */ + u_int32_t mbcl_mc_cached; /* # of cached buffers */ + u_int32_t mbcl_mc_waiter_cnt; /* # waiters on the cache */ + u_int32_t mbcl_mc_wretry_cnt; /* # of wait retries */ + u_int32_t mbcl_mc_nwretry_cnt; /* # of no-wait retry attempts */ + u_int32_t mbcl_peak_reported; /* last usage peak reported */ + u_int32_t mbcl_reserved[7]; /* for future use */ } mb_class_stat_t; -#define MCS_DISABLED 0 /* cache is permanently disabled */ -#define MCS_ONLINE 1 /* cache is online */ -#define MCS_PURGING 2 /* cache is being purged */ -#define MCS_OFFLINE 3 /* cache is offline (resizing) */ +#define MCS_DISABLED 0 /* cache is permanently disabled */ +#define MCS_ONLINE 1 /* cache is online */ +#define MCS_PURGING 2 /* cache is being purged */ +#define MCS_OFFLINE 3 /* cache is offline (resizing) */ #if defined(XNU_KERNEL_PRIVATE) /* For backwards compatibility with 32-bit userland process */ struct omb_stat { - u_int32_t mbs_cnt; /* number of classes */ - struct omb_class_stat mbs_class[1]; /* class array */ + u_int32_t mbs_cnt; /* number of classes */ + struct omb_class_stat mbs_class[1]; /* class array */ } __attribute__((__packed__)); #endif /* XNU_KERNEL_PRIVATE */ typedef struct mb_stat { - u_int32_t mbs_cnt; /* number of classes */ + u_int32_t mbs_cnt; /* number of classes */ #if defined(KERNEL) || defined(__LP64__) - u_int32_t mbs_pad; /* padding */ + u_int32_t mbs_pad; /* padding */ #endif /* KERNEL || __LP64__ */ - mb_class_stat_t mbs_class[1]; /* class array */ + mb_class_stat_t mbs_class[1]; /* class array */ } mb_stat_t; #ifdef PRIVATE -#define MLEAK_STACK_DEPTH 16 /* Max PC stack depth */ +#define MLEAK_STACK_DEPTH 16 /* Max PC stack depth */ typedef struct mleak_trace_stat { - u_int64_t mltr_collisions; - u_int64_t mltr_hitcount; - u_int64_t mltr_allocs; - u_int64_t mltr_depth; - u_int64_t mltr_addr[MLEAK_STACK_DEPTH]; + u_int64_t mltr_collisions; + u_int64_t mltr_hitcount; + u_int64_t mltr_allocs; + u_int64_t mltr_depth; + u_int64_t mltr_addr[MLEAK_STACK_DEPTH]; } mleak_trace_stat_t; typedef struct mleak_stat { - u_int32_t ml_isaddr64; /* 64-bit KVA? */ - u_int32_t ml_cnt; /* number of traces */ - mleak_trace_stat_t ml_trace[1]; /* trace array */ + u_int32_t ml_isaddr64; /* 64-bit KVA? */ + u_int32_t ml_cnt; /* number of traces */ + mleak_trace_stat_t ml_trace[1]; /* trace array */ } mleak_stat_t; struct mleak_table { - u_int32_t mleak_capture; /* sampling capture counter */ - u_int32_t mleak_sample_factor; /* sample factor */ + u_int32_t mleak_capture; /* sampling capture counter */ + u_int32_t mleak_sample_factor; /* sample factor */ /* Times two active records want to occupy the same spot */ u_int64_t alloc_collisions; @@ -1197,7 +1197,7 @@ __BEGIN_DECLS * Exported (private) */ -extern struct mbstat mbstat; /* statistics */ +extern struct mbstat mbstat; /* statistics */ __END_DECLS #endif /* KERNEL_PRIVATE */ @@ -1211,22 +1211,22 @@ __BEGIN_DECLS /* flags to m_get/MGET */ /* Need to include malloc.h to get right options for malloc */ -#include +#include struct mbuf; /* length to m_copy to copy all */ -#define M_COPYALL 1000000000 +#define M_COPYALL 1000000000 -#define M_DONTWAIT M_NOWAIT -#define M_WAIT M_WAITOK +#define M_DONTWAIT M_NOWAIT +#define M_WAIT M_WAITOK /* modes for m_copym and variants */ -#define M_COPYM_NOOP_HDR 0 /* don't copy/move pkthdr contents */ -#define M_COPYM_COPY_HDR 1 /* copy pkthdr from old to new */ -#define M_COPYM_MOVE_HDR 2 /* move pkthdr from old to new */ -#define M_COPYM_MUST_COPY_HDR 3 /* MUST copy pkthdr from old to new */ -#define M_COPYM_MUST_MOVE_HDR 4 /* MUST move pkthdr from old to new */ +#define M_COPYM_NOOP_HDR 0 /* don't copy/move pkthdr contents */ +#define M_COPYM_COPY_HDR 1 /* copy pkthdr from old to new */ +#define M_COPYM_MOVE_HDR 2 /* move pkthdr from old to new */ +#define M_COPYM_MUST_COPY_HDR 3 /* MUST copy pkthdr from old to new */ +#define M_COPYM_MUST_MOVE_HDR 4 /* MUST move pkthdr from old to new */ extern void m_freem(struct mbuf *); extern u_int64_t mcl_to_paddr(char *); @@ -1253,28 +1253,28 @@ extern void m_mclfree(caddr_t p); * headers), and assert otherwise. */ #if defined(__i386__) || defined(__x86_64__) -#define MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(_m) +#define MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(_m) #else /* !__i386__ && !__x86_64__ */ -#define MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(_m) do { \ - if (!IS_P2ALIGNED((_m)->m_data, sizeof (u_int32_t))) { \ - if (((_m)->m_flags & M_PKTHDR) && \ - (_m)->m_pkthdr.rcvif != NULL) { \ - panic_plain("\n%s: mbuf %p data ptr %p is not " \ - "32-bit aligned [%s: alignerrs=%lld]\n", \ - __func__, (_m), (_m)->m_data, \ - if_name((_m)->m_pkthdr.rcvif), \ - (_m)->m_pkthdr.rcvif->if_alignerrs); \ - } else { \ - panic_plain("\n%s: mbuf %p data ptr %p is not " \ - "32-bit aligned\n", \ - __func__, (_m), (_m)->m_data); \ - } \ - } \ +#define MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(_m) do { \ + if (!IS_P2ALIGNED((_m)->m_data, sizeof (u_int32_t))) { \ + if (((_m)->m_flags & M_PKTHDR) && \ + (_m)->m_pkthdr.rcvif != NULL) { \ + panic_plain("\n%s: mbuf %p data ptr %p is not " \ + "32-bit aligned [%s: alignerrs=%lld]\n", \ + __func__, (_m), (_m)->m_data, \ + if_name((_m)->m_pkthdr.rcvif), \ + (_m)->m_pkthdr.rcvif->if_alignerrs); \ + } else { \ + panic_plain("\n%s: mbuf %p data ptr %p is not " \ + "32-bit aligned\n", \ + __func__, (_m), (_m)->m_data); \ + } \ + } \ } while (0) #endif /* !__i386__ && !__x86_64__ */ /* Maximum number of MBUF_SC values (excluding MBUF_SC_UNSPEC) */ -#define MBUF_SC_MAX_CLASSES 10 +#define MBUF_SC_MAX_CLASSES 10 /* * These conversion macros rely on the corresponding MBUF_SC and @@ -1302,71 +1302,71 @@ extern void m_mclfree(caddr_t p); * corresponding portion, and never assume that a higher class corresponds * to a higher index. */ -#define MBUF_SCVAL(x) ((x) & 0xffff) -#define MBUF_SCIDX(x) ((((x) >> 16) & 0xff) >> 3) -#define MBUF_SC2TC(_sc) (MBUF_SCVAL(_sc) >> 7) -#define MBUF_TC2SCVAL(_tc) ((_tc) << 7) +#define MBUF_SCVAL(x) ((x) & 0xffff) +#define MBUF_SCIDX(x) ((((x) >> 16) & 0xff) >> 3) +#define MBUF_SC2TC(_sc) (MBUF_SCVAL(_sc) >> 7) +#define MBUF_TC2SCVAL(_tc) ((_tc) << 7) #define IS_MBUF_SC_BACKGROUND(_sc) (((_sc) == MBUF_SC_BK_SYS) || \ ((_sc) == MBUF_SC_BK)) -#define IS_MBUF_SC_REALTIME(_sc) ((_sc) >= MBUF_SC_AV && (_sc) <= MBUF_SC_VO) -#define IS_MBUF_SC_BESTEFFORT(_sc) ((_sc) == MBUF_SC_BE || \ +#define IS_MBUF_SC_REALTIME(_sc) ((_sc) >= MBUF_SC_AV && (_sc) <= MBUF_SC_VO) +#define IS_MBUF_SC_BESTEFFORT(_sc) ((_sc) == MBUF_SC_BE || \ (_sc) == MBUF_SC_RD || (_sc) == MBUF_SC_OAM) -#define SCIDX_BK_SYS MBUF_SCIDX(MBUF_SC_BK_SYS) -#define SCIDX_BK MBUF_SCIDX(MBUF_SC_BK) -#define SCIDX_BE MBUF_SCIDX(MBUF_SC_BE) -#define SCIDX_RD MBUF_SCIDX(MBUF_SC_RD) -#define SCIDX_OAM MBUF_SCIDX(MBUF_SC_OAM) -#define SCIDX_AV MBUF_SCIDX(MBUF_SC_AV) -#define SCIDX_RV MBUF_SCIDX(MBUF_SC_RV) -#define SCIDX_VI MBUF_SCIDX(MBUF_SC_VI) -#define SCIDX_SIG MBUF_SCIDX(MBUF_SC_SIG) -#define SCIDX_VO MBUF_SCIDX(MBUF_SC_VO) -#define SCIDX_CTL MBUF_SCIDX(MBUF_SC_CTL) - -#define SCVAL_BK_SYS MBUF_SCVAL(MBUF_SC_BK_SYS) -#define SCVAL_BK MBUF_SCVAL(MBUF_SC_BK) -#define SCVAL_BE MBUF_SCVAL(MBUF_SC_BE) -#define SCVAL_RD MBUF_SCVAL(MBUF_SC_RD) -#define SCVAL_OAM MBUF_SCVAL(MBUF_SC_OAM) -#define SCVAL_AV MBUF_SCVAL(MBUF_SC_AV) -#define SCVAL_RV MBUF_SCVAL(MBUF_SC_RV) -#define SCVAL_VI MBUF_SCVAL(MBUF_SC_VI) -#define SCVAL_SIG MBUF_SCVAL(MBUF_SC_SIG) -#define SCVAL_VO MBUF_SCVAL(MBUF_SC_VO) -#define SCVAL_CTL MBUF_SCVAL(MBUF_SC_CTL) - -#define MBUF_VALID_SC(c) \ - (c == MBUF_SC_BK_SYS || c == MBUF_SC_BK || c == MBUF_SC_BE || \ - c == MBUF_SC_RD || c == MBUF_SC_OAM || c == MBUF_SC_AV || \ - c == MBUF_SC_RV || c == MBUF_SC_VI || c == MBUF_SC_SIG || \ +#define SCIDX_BK_SYS MBUF_SCIDX(MBUF_SC_BK_SYS) +#define SCIDX_BK MBUF_SCIDX(MBUF_SC_BK) +#define SCIDX_BE MBUF_SCIDX(MBUF_SC_BE) +#define SCIDX_RD MBUF_SCIDX(MBUF_SC_RD) +#define SCIDX_OAM MBUF_SCIDX(MBUF_SC_OAM) +#define SCIDX_AV MBUF_SCIDX(MBUF_SC_AV) +#define SCIDX_RV MBUF_SCIDX(MBUF_SC_RV) +#define SCIDX_VI MBUF_SCIDX(MBUF_SC_VI) +#define SCIDX_SIG MBUF_SCIDX(MBUF_SC_SIG) +#define SCIDX_VO MBUF_SCIDX(MBUF_SC_VO) +#define SCIDX_CTL MBUF_SCIDX(MBUF_SC_CTL) + +#define SCVAL_BK_SYS MBUF_SCVAL(MBUF_SC_BK_SYS) +#define SCVAL_BK MBUF_SCVAL(MBUF_SC_BK) +#define SCVAL_BE MBUF_SCVAL(MBUF_SC_BE) +#define SCVAL_RD MBUF_SCVAL(MBUF_SC_RD) +#define SCVAL_OAM MBUF_SCVAL(MBUF_SC_OAM) +#define SCVAL_AV MBUF_SCVAL(MBUF_SC_AV) +#define SCVAL_RV MBUF_SCVAL(MBUF_SC_RV) +#define SCVAL_VI MBUF_SCVAL(MBUF_SC_VI) +#define SCVAL_SIG MBUF_SCVAL(MBUF_SC_SIG) +#define SCVAL_VO MBUF_SCVAL(MBUF_SC_VO) +#define SCVAL_CTL MBUF_SCVAL(MBUF_SC_CTL) + +#define MBUF_VALID_SC(c) \ + (c == MBUF_SC_BK_SYS || c == MBUF_SC_BK || c == MBUF_SC_BE || \ + c == MBUF_SC_RD || c == MBUF_SC_OAM || c == MBUF_SC_AV || \ + c == MBUF_SC_RV || c == MBUF_SC_VI || c == MBUF_SC_SIG || \ c == MBUF_SC_VO || c == MBUF_SC_CTL) -#define MBUF_VALID_SCIDX(c) \ - (c == SCIDX_BK_SYS || c == SCIDX_BK || c == SCIDX_BE || \ - c == SCIDX_RD || c == SCIDX_OAM || c == SCIDX_AV || \ - c == SCIDX_RV || c == SCIDX_VI || c == SCIDX_SIG || \ +#define MBUF_VALID_SCIDX(c) \ + (c == SCIDX_BK_SYS || c == SCIDX_BK || c == SCIDX_BE || \ + c == SCIDX_RD || c == SCIDX_OAM || c == SCIDX_AV || \ + c == SCIDX_RV || c == SCIDX_VI || c == SCIDX_SIG || \ c == SCIDX_VO || c == SCIDX_CTL) -#define MBUF_VALID_SCVAL(c) \ - (c == SCVAL_BK_SYS || c == SCVAL_BK || c == SCVAL_BE || \ - c == SCVAL_RD || c == SCVAL_OAM || c == SCVAL_AV || \ - c == SCVAL_RV || c == SCVAL_VI || c == SCVAL_SIG || \ +#define MBUF_VALID_SCVAL(c) \ + (c == SCVAL_BK_SYS || c == SCVAL_BK || c == SCVAL_BE || \ + c == SCVAL_RD || c == SCVAL_OAM || c == SCVAL_AV || \ + c == SCVAL_RV || c == SCVAL_VI || c == SCVAL_SIG || \ c == SCVAL_VO || SCVAL_CTL) -extern unsigned char *mbutl; /* start VA of mbuf pool */ -extern unsigned char *embutl; /* end VA of mbuf pool */ -extern unsigned int nmbclusters; /* number of mapped clusters */ -extern int njcl; /* # of jumbo clusters */ -extern int njclbytes; /* size of a jumbo cluster */ -extern int max_hdr; /* largest link+protocol header */ -extern int max_datalen; /* MHLEN - max_hdr */ +extern unsigned char *mbutl; /* start VA of mbuf pool */ +extern unsigned char *embutl; /* end VA of mbuf pool */ +extern unsigned int nmbclusters; /* number of mapped clusters */ +extern int njcl; /* # of jumbo clusters */ +extern int njclbytes; /* size of a jumbo cluster */ +extern int max_hdr; /* largest link+protocol header */ +extern int max_datalen; /* MHLEN - max_hdr */ /* Use max_linkhdr instead of _max_linkhdr */ -extern int _max_linkhdr; /* largest link-level header */ +extern int _max_linkhdr; /* largest link-level header */ /* Use max_protohdr instead of _max_protohdr */ -extern int _max_protohdr; /* largest protocol header */ +extern int _max_protohdr; /* largest protocol header */ __private_extern__ unsigned int mbuf_default_ncl(int, u_int64_t); __private_extern__ void mbinit(void); @@ -1463,27 +1463,27 @@ __private_extern__ void mbuf_drain(boolean_t); * struct m_tag *mtag = &p->tag; */ -#define KERNEL_MODULE_TAG_ID 0 +#define KERNEL_MODULE_TAG_ID 0 enum { - KERNEL_TAG_TYPE_NONE = 0, - KERNEL_TAG_TYPE_DUMMYNET = 1, - KERNEL_TAG_TYPE_DIVERT = 2, - KERNEL_TAG_TYPE_IPFORWARD = 3, - KERNEL_TAG_TYPE_IPFILT = 4, - KERNEL_TAG_TYPE_MACLABEL = 5, - KERNEL_TAG_TYPE_MAC_POLICY_LABEL = 6, - KERNEL_TAG_TYPE_ENCAP = 8, - KERNEL_TAG_TYPE_INET6 = 9, - KERNEL_TAG_TYPE_IPSEC = 10, - KERNEL_TAG_TYPE_DRVAUX = 11, - KERNEL_TAG_TYPE_CFIL_UDP = 13, + KERNEL_TAG_TYPE_NONE = 0, + KERNEL_TAG_TYPE_DUMMYNET = 1, + KERNEL_TAG_TYPE_DIVERT = 2, + KERNEL_TAG_TYPE_IPFORWARD = 3, + KERNEL_TAG_TYPE_IPFILT = 4, + KERNEL_TAG_TYPE_MACLABEL = 5, + KERNEL_TAG_TYPE_MAC_POLICY_LABEL = 6, + KERNEL_TAG_TYPE_ENCAP = 8, + KERNEL_TAG_TYPE_INET6 = 9, + KERNEL_TAG_TYPE_IPSEC = 10, + KERNEL_TAG_TYPE_DRVAUX = 11, + KERNEL_TAG_TYPE_CFIL_UDP = 13, }; /* Packet tag routines */ __private_extern__ struct m_tag *m_tag_alloc(u_int32_t, u_int16_t, int, int); __private_extern__ struct m_tag *m_tag_create(u_int32_t, u_int16_t, int, int, - struct mbuf *); + struct mbuf *); __private_extern__ void m_tag_free(struct m_tag *); __private_extern__ void m_tag_prepend(struct mbuf *, struct m_tag *); __private_extern__ void m_tag_unlink(struct mbuf *, struct m_tag *); @@ -1509,9 +1509,9 @@ __private_extern__ mbuf_svc_class_t m_service_class_from_val(u_int32_t); __private_extern__ int m_set_traffic_class(struct mbuf *, mbuf_traffic_class_t); __private_extern__ mbuf_traffic_class_t m_get_traffic_class(struct mbuf *); -#define ADDCARRY(_x) do { \ - while (((_x) >> 16) != 0) \ - (_x) = ((_x) >> 16) + ((_x) & 0xffff); \ +#define ADDCARRY(_x) do { \ + while (((_x) >> 16) != 0) \ + (_x) = ((_x) >> 16) + ((_x) & 0xffff); \ } while (0) __private_extern__ u_int16_t m_adj_sum16(struct mbuf *, u_int32_t, @@ -1529,4 +1529,4 @@ __private_extern__ mbuf_tx_compl_func m_get_tx_compl_callback(u_int32_t); __END_DECLS #endif /* XNU_KERNEL_PRIVATE */ -#endif /* !_SYS_MBUF_H_ */ +#endif /* !_SYS_MBUF_H_ */ diff --git a/bsd/sys/mcache.h b/bsd/sys/mcache.h index b8007aa6d..0c5aa5255 100644 --- a/bsd/sys/mcache.h +++ b/bsd/sys/mcache.h @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SYS_MCACHE_H -#define _SYS_MCACHE_H +#define _SYS_MCACHE_H #ifdef KERNEL_PRIVATE @@ -51,152 +51,152 @@ extern "C" { /* * Unlike VERIFY(), ASSERT() is evaluated only in DEBUG/DEVELOPMENT build. */ -#define VERIFY(EX) \ +#define VERIFY(EX) \ ((void)(__probable((EX)) || assfail(#EX, __FILE__, __LINE__))) #if (DEBUG || DEVELOPMENT) -#define ASSERT(EX) VERIFY(EX) +#define ASSERT(EX) VERIFY(EX) #else -#define ASSERT(EX) ((void)0) +#define ASSERT(EX) ((void)0) #endif /* * Compile time assert; this should be on its own someday. */ -#define _CASSERT(x) _Static_assert(x, "compile-time assertion failed") +#define _CASSERT(x) _Static_assert(x, "compile-time assertion failed") /* * Atomic macros; these should be on their own someday. */ -#define atomic_add_16_ov(a, n) \ +#define atomic_add_16_ov(a, n) \ ((u_int16_t) OSAddAtomic16(n, (volatile SInt16 *)a)) -#define atomic_add_16(a, n) \ +#define atomic_add_16(a, n) \ ((void) atomic_add_16_ov(a, n)) -#define atomic_add_32_ov(a, n) \ +#define atomic_add_32_ov(a, n) \ ((u_int32_t) OSAddAtomic(n, (volatile SInt32 *)a)) -#define atomic_add_32(a, n) \ +#define atomic_add_32(a, n) \ ((void) atomic_add_32_ov(a, n)) -#define atomic_add_64_ov(a, n) \ +#define atomic_add_64_ov(a, n) \ ((u_int64_t) OSAddAtomic64(n, (volatile SInt64 *)a)) -#define atomic_add_64(a, n) \ +#define atomic_add_64(a, n) \ ((void) atomic_add_64_ov(a, n)) -#define atomic_test_set_32(a, o, n) \ +#define atomic_test_set_32(a, o, n) \ OSCompareAndSwap(o, n, (volatile UInt32 *)a) -#define atomic_set_32(a, n) do { \ - while (!atomic_test_set_32(a, *a, n)) \ - ; \ +#define atomic_set_32(a, n) do { \ + while (!atomic_test_set_32(a, *a, n)) \ + ; \ } while (0) -#define atomic_test_set_64(a, o, n) \ +#define atomic_test_set_64(a, o, n) \ OSCompareAndSwap64(o, n, (volatile UInt64 *)a) -#define atomic_set_64(a, n) do { \ - while (!atomic_test_set_64(a, *a, n)) \ - ; \ +#define atomic_set_64(a, n) do { \ + while (!atomic_test_set_64(a, *a, n)) \ + ; \ } while (0) #if defined(__LP64__) -#define atomic_get_64(n, a) do { \ - (n) = *(a); \ +#define atomic_get_64(n, a) do { \ + (n) = *(a); \ } while (0) #else -#define atomic_get_64(n, a) do { \ - (n) = atomic_add_64_ov(a, 0); \ +#define atomic_get_64(n, a) do { \ + (n) = atomic_add_64_ov(a, 0); \ } while (0) #endif /* __LP64__ */ -#define atomic_test_set_ptr(a, o, n) \ +#define atomic_test_set_ptr(a, o, n) \ OSCompareAndSwapPtr(o, n, (void * volatile *)a) -#define atomic_set_ptr(a, n) do { \ - while (!atomic_test_set_ptr(a, *a, n)) \ - ; \ +#define atomic_set_ptr(a, n) do { \ + while (!atomic_test_set_ptr(a, *a, n)) \ + ; \ } while (0) -#define atomic_or_8_ov(a, n) \ +#define atomic_or_8_ov(a, n) \ ((u_int8_t) OSBitOrAtomic8(n, (volatile UInt8 *)a)) -#define atomic_or_8(a, n) \ +#define atomic_or_8(a, n) \ ((void) atomic_or_8_ov(a, n)) -#define atomic_bitset_8(a, n) \ +#define atomic_bitset_8(a, n) \ atomic_or_8(a, n) -#define atomic_or_16_ov(a, n) \ +#define atomic_or_16_ov(a, n) \ ((u_int16_t) OSBitOrAtomic16(n, (volatile UInt16 *)a)) -#define atomic_or_16(a, n) \ +#define atomic_or_16(a, n) \ ((void) atomic_or_16_ov(a, n)) -#define atomic_bitset_16(a, n) \ +#define atomic_bitset_16(a, n) \ atomic_or_16(a, n) -#define atomic_or_32_ov(a, n) \ +#define atomic_or_32_ov(a, n) \ ((u_int32_t) OSBitOrAtomic(n, (volatile UInt32 *)a)) -#define atomic_or_32(a, n) \ +#define atomic_or_32(a, n) \ ((void) atomic_or_32_ov(a, n)) -#define atomic_bitset_32(a, n) \ +#define atomic_bitset_32(a, n) \ atomic_or_32(a, n) -#define atomic_bitset_32_ov(a, n) \ +#define atomic_bitset_32_ov(a, n) \ atomic_or_32_ov(a, n) -#define atomic_and_8_ov(a, n) \ +#define atomic_and_8_ov(a, n) \ ((u_int8_t) OSBitAndAtomic8(n, (volatile UInt8 *)a)) -#define atomic_and_8(a, n) \ +#define atomic_and_8(a, n) \ ((void) atomic_and_8_ov(a, n)) -#define atomic_bitclear_8(a, n) \ +#define atomic_bitclear_8(a, n) \ atomic_and_8(a, ~(n)) -#define atomic_and_16_ov(a, n) \ +#define atomic_and_16_ov(a, n) \ ((u_int16_t) OSBitAndAtomic16(n, (volatile UInt16 *)a)) -#define atomic_and_16(a, n) \ +#define atomic_and_16(a, n) \ ((void) atomic_and_16_ov(a, n)) -#define atomic_bitclear_16(a, n) \ +#define atomic_bitclear_16(a, n) \ atomic_and_16(a, ~(n)) -#define atomic_and_32_ov(a, n) \ +#define atomic_and_32_ov(a, n) \ ((u_int32_t) OSBitAndAtomic(n, (volatile UInt32 *)a)) -#define atomic_and_32(a, n) \ +#define atomic_and_32(a, n) \ ((void) atomic_and_32_ov(a, n)) -#define atomic_bitclear_32(a, n) \ +#define atomic_bitclear_32(a, n) \ atomic_and_32(a, ~(n)) -#define membar_sync OSMemoryBarrier +#define membar_sync OSMemoryBarrier /* * Use CPU_CACHE_LINE_SIZE instead of MAX_CPU_CACHE_LINE_SIZE, unless * wasting space is of no concern. */ -#define MAX_CPU_CACHE_LINE_SIZE 128 -#define CPU_CACHE_LINE_SIZE mcache_cache_line_size() +#define MAX_CPU_CACHE_LINE_SIZE 128 +#define CPU_CACHE_LINE_SIZE mcache_cache_line_size() #ifndef IS_P2ALIGNED -#define IS_P2ALIGNED(v, a) \ +#define IS_P2ALIGNED(v, a) \ ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) #endif /* IS_P2ALIGNED */ #ifndef P2ROUNDUP -#define P2ROUNDUP(x, align) \ +#define P2ROUNDUP(x, align) \ (-(-((uintptr_t)(x)) & -((uintptr_t)align))) #endif /* P2ROUNDUP */ #ifndef P2ROUNDDOWN -#define P2ROUNDDOWN(x, align) \ +#define P2ROUNDDOWN(x, align) \ (((uintptr_t)(x)) & ~((uintptr_t)(align) - 1)) #endif /* P2ROUNDDOWN */ @@ -205,8 +205,8 @@ extern "C" { ((uintptr_t)(x) & -((uintptr_t)(align))) #endif /* P2ALIGN */ -#define MCACHE_FREE_PATTERN 0xdeadbeefdeadbeefULL -#define MCACHE_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL +#define MCACHE_FREE_PATTERN 0xdeadbeefdeadbeefULL +#define MCACHE_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL /* * mcache allocation request flags. @@ -227,13 +227,13 @@ extern "C" { * * Regular mcache clients should only use MCR_SLEEP or MCR_NOSLEEP. */ -#define MCR_SLEEP 0x0000 /* same as M_WAITOK */ -#define MCR_NOSLEEP 0x0001 /* same as M_NOWAIT */ -#define MCR_FAILOK 0x0100 /* private, for internal use only */ -#define MCR_TRYHARD 0x0200 /* private, for internal use only */ -#define MCR_USR1 0x1000 /* private, for internal use only */ +#define MCR_SLEEP 0x0000 /* same as M_WAITOK */ +#define MCR_NOSLEEP 0x0001 /* same as M_NOWAIT */ +#define MCR_FAILOK 0x0100 /* private, for internal use only */ +#define MCR_TRYHARD 0x0200 /* private, for internal use only */ +#define MCR_USR1 0x1000 /* private, for internal use only */ -#define MCR_NONBLOCKING (MCR_NOSLEEP | MCR_FAILOK | MCR_TRYHARD) +#define MCR_NONBLOCKING (MCR_NOSLEEP | MCR_FAILOK | MCR_TRYHARD) /* * Generic one-way linked list element structure. This is used to handle @@ -241,38 +241,38 @@ extern "C" { * together before returning them to the caller. */ typedef struct mcache_obj { - struct mcache_obj *obj_next; + struct mcache_obj *obj_next; } mcache_obj_t; typedef struct mcache_bkt { - void *bkt_next; /* next bucket in list */ - void *bkt_obj[1]; /* one or more objects */ + void *bkt_next; /* next bucket in list */ + void *bkt_obj[1]; /* one or more objects */ } mcache_bkt_t; typedef struct mcache_bktlist { - mcache_bkt_t *bl_list; /* bucket list */ - u_int32_t bl_total; /* number of buckets */ - u_int32_t bl_min; /* min since last update */ - u_int32_t bl_reaplimit; /* max reapable buckets */ - u_int64_t bl_alloc; /* allocations from this list */ + mcache_bkt_t *bl_list; /* bucket list */ + u_int32_t bl_total; /* number of buckets */ + u_int32_t bl_min; /* min since last update */ + u_int32_t bl_reaplimit; /* max reapable buckets */ + u_int64_t bl_alloc; /* allocations from this list */ } mcache_bktlist_t; typedef struct mcache_bkttype { - int bt_bktsize; /* bucket size (number of elements) */ - size_t bt_minbuf; /* all smaller buffers qualify */ - size_t bt_maxbuf; /* no larger bfufers qualify */ - struct mcache *bt_cache; /* bucket cache */ + int bt_bktsize; /* bucket size (number of elements) */ + size_t bt_minbuf; /* all smaller buffers qualify */ + size_t bt_maxbuf; /* no larger bfufers qualify */ + struct mcache *bt_cache; /* bucket cache */ } mcache_bkttype_t; typedef struct mcache_cpu { decl_lck_mtx_data(, cc_lock); - mcache_bkt_t *cc_filled; /* the currently filled bucket */ - mcache_bkt_t *cc_pfilled; /* the previously filled bucket */ - u_int64_t cc_alloc; /* allocations from this cpu */ - u_int64_t cc_free; /* frees to this cpu */ - int cc_objs; /* number of objects in filled bkt */ - int cc_pobjs; /* number of objects in previous bkt */ - int cc_bktsize; /* number of elements in a full bkt */ + mcache_bkt_t *cc_filled; /* the currently filled bucket */ + mcache_bkt_t *cc_pfilled; /* the previously filled bucket */ + u_int64_t cc_alloc; /* allocations from this cpu */ + u_int64_t cc_free; /* frees to this cpu */ + int cc_objs; /* number of objects in filled bkt */ + int cc_pobjs; /* number of objects in previous bkt */ + int cc_bktsize; /* number of elements in a full bkt */ } __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE))) mcache_cpu_t; typedef unsigned int (*mcache_allocfn_t)(void *, mcache_obj_t ***, @@ -286,90 +286,90 @@ typedef struct mcache { /* * Cache properties */ - LIST_ENTRY(mcache) mc_list; /* cache linkage */ - char mc_name[32]; /* cache name */ - struct zone *mc_slab_zone; /* backend zone allocator */ - mcache_allocfn_t mc_slab_alloc; /* slab layer allocate callback */ - mcache_freefn_t mc_slab_free; /* slab layer free callback */ - mcache_auditfn_t mc_slab_audit; /* slab layer audit callback */ - mcache_logfn_t mc_slab_log; /* slab layer log callback */ + LIST_ENTRY(mcache) mc_list; /* cache linkage */ + char mc_name[32]; /* cache name */ + struct zone *mc_slab_zone; /* backend zone allocator */ + mcache_allocfn_t mc_slab_alloc; /* slab layer allocate callback */ + mcache_freefn_t mc_slab_free; /* slab layer free callback */ + mcache_auditfn_t mc_slab_audit; /* slab layer audit callback */ + mcache_logfn_t mc_slab_log; /* slab layer log callback */ mcache_notifyfn_t mc_slab_notify; /* slab layer notify callback */ - void *mc_private; /* opaque arg to callbacks */ - size_t mc_bufsize; /* object size */ - size_t mc_align; /* object alignment */ - u_int32_t mc_flags; /* cache creation flags */ - u_int32_t mc_purge_cnt; /* # of purges requested by slab */ - u_int32_t mc_enable_cnt; /* # of reenables due to purges */ - u_int32_t mc_waiter_cnt; /* # of slab layer waiters */ - u_int32_t mc_wretry_cnt; /* # of wait retries */ - u_int32_t mc_nwretry_cnt; /* # of no-wait retry attempts */ - u_int32_t mc_nwfail_cnt; /* # of no-wait retries that failed */ + void *mc_private; /* opaque arg to callbacks */ + size_t mc_bufsize; /* object size */ + size_t mc_align; /* object alignment */ + u_int32_t mc_flags; /* cache creation flags */ + u_int32_t mc_purge_cnt; /* # of purges requested by slab */ + u_int32_t mc_enable_cnt; /* # of reenables due to purges */ + u_int32_t mc_waiter_cnt; /* # of slab layer waiters */ + u_int32_t mc_wretry_cnt; /* # of wait retries */ + u_int32_t mc_nwretry_cnt; /* # of no-wait retry attempts */ + u_int32_t mc_nwfail_cnt; /* # of no-wait retries that failed */ decl_lck_mtx_data(, mc_sync_lock); /* protects purges and reenables */ - lck_attr_t *mc_sync_lock_attr; - lck_grp_t *mc_sync_lock_grp; - lck_grp_attr_t *mc_sync_lock_grp_attr; + lck_attr_t *mc_sync_lock_attr; + lck_grp_t *mc_sync_lock_grp; + lck_grp_attr_t *mc_sync_lock_grp_attr; /* * Keep CPU and buckets layers lock statistics separate. */ - lck_attr_t *mc_cpu_lock_attr; - lck_grp_t *mc_cpu_lock_grp; - lck_grp_attr_t *mc_cpu_lock_grp_attr; + lck_attr_t *mc_cpu_lock_attr; + lck_grp_t *mc_cpu_lock_grp; + lck_grp_attr_t *mc_cpu_lock_grp_attr; /* * Bucket layer common to all CPUs */ decl_lck_mtx_data(, mc_bkt_lock); - lck_attr_t *mc_bkt_lock_attr; - lck_grp_t *mc_bkt_lock_grp; + lck_attr_t *mc_bkt_lock_attr; + lck_grp_t *mc_bkt_lock_grp; lck_grp_attr_t *mc_bkt_lock_grp_attr; - mcache_bkttype_t *cache_bkttype; /* bucket type */ - mcache_bktlist_t mc_full; /* full buckets */ - mcache_bktlist_t mc_empty; /* empty buckets */ - size_t mc_chunksize; /* bufsize + alignment */ - u_int32_t mc_bkt_contention; /* lock contention count */ - u_int32_t mc_bkt_contention_prev; /* previous snapshot */ + mcache_bkttype_t *cache_bkttype; /* bucket type */ + mcache_bktlist_t mc_full; /* full buckets */ + mcache_bktlist_t mc_empty; /* empty buckets */ + size_t mc_chunksize; /* bufsize + alignment */ + u_int32_t mc_bkt_contention; /* lock contention count */ + u_int32_t mc_bkt_contention_prev; /* previous snapshot */ /* * Per-CPU layer, aligned at cache line boundary */ - mcache_cpu_t mc_cpu[1] - __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE))); + mcache_cpu_t mc_cpu[1] + __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE))); } mcache_t; -#define MCACHE_ALIGN 8 /* default guaranteed alignment */ +#define MCACHE_ALIGN 8 /* default guaranteed alignment */ /* Valid values for mc_flags */ -#define MCF_VERIFY 0x00000001 /* enable verification */ -#define MCF_TRACE 0x00000002 /* enable transaction auditing */ -#define MCF_NOCPUCACHE 0x00000010 /* disable CPU layer caching */ -#define MCF_NOLEAKLOG 0x00000100 /* disable leak logging */ -#define MCF_EXPLEAKLOG 0x00000200 /* expose leak info to user space */ - -#define MCF_DEBUG (MCF_VERIFY | MCF_TRACE) -#define MCF_FLAGS_MASK \ +#define MCF_VERIFY 0x00000001 /* enable verification */ +#define MCF_TRACE 0x00000002 /* enable transaction auditing */ +#define MCF_NOCPUCACHE 0x00000010 /* disable CPU layer caching */ +#define MCF_NOLEAKLOG 0x00000100 /* disable leak logging */ +#define MCF_EXPLEAKLOG 0x00000200 /* expose leak info to user space */ + +#define MCF_DEBUG (MCF_VERIFY | MCF_TRACE) +#define MCF_FLAGS_MASK \ (MCF_DEBUG | MCF_NOCPUCACHE | MCF_NOLEAKLOG | MCF_EXPLEAKLOG) /* Valid values for notify callback */ -#define MCN_RETRYALLOC 0x00000001 /* Allocation should be retried */ +#define MCN_RETRYALLOC 0x00000001 /* Allocation should be retried */ -#define MCACHE_STACK_DEPTH 16 +#define MCACHE_STACK_DEPTH 16 -#define MCA_TRN_MAX 2 /* Number of transactions to record */ +#define MCA_TRN_MAX 2 /* Number of transactions to record */ typedef struct mcache_audit { - struct mcache_audit *mca_next; /* next audit struct */ - void *mca_addr; /* address of buffer */ - mcache_t *mca_cache; /* parent cache of the buffer */ - size_t mca_contents_size; /* size of saved contents */ - void *mca_contents; /* user-specific saved contents */ - void *mca_uptr; /* user-specific pointer */ - uint32_t mca_uflags; /* user-specific flags */ - uint32_t mca_next_trn; + struct mcache_audit *mca_next; /* next audit struct */ + void *mca_addr; /* address of buffer */ + mcache_t *mca_cache; /* parent cache of the buffer */ + size_t mca_contents_size; /* size of saved contents */ + void *mca_contents; /* user-specific saved contents */ + void *mca_uptr; /* user-specific pointer */ + uint32_t mca_uflags; /* user-specific flags */ + uint32_t mca_next_trn; struct mca_trn { - struct thread *mca_thread; /* thread doing transaction */ - uint32_t mca_tstamp; - uint16_t mca_depth; - void *mca_stack[MCACHE_STACK_DEPTH]; + struct thread *mca_thread; /* thread doing transaction */ + uint32_t mca_tstamp; + uint16_t mca_depth; + void *mca_stack[MCACHE_STACK_DEPTH]; } mca_trns[MCA_TRN_MAX]; } mcache_audit_t; diff --git a/bsd/sys/md5.h b/bsd/sys/md5.h index 8d581add4..3e7c05866 100644 --- a/bsd/sys/md5.h +++ b/bsd/sys/md5.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,7 +34,7 @@ * The supported interface resides in . */ #warning \ - " is a legacy header file; use instead." + " is a legacy header file; use instead." #include diff --git a/bsd/sys/memory_maintenance.h b/bsd/sys/memory_maintenance.h index 1de00c6eb..75d6bf5e7 100644 --- a/bsd/sys/memory_maintenance.h +++ b/bsd/sys/memory_maintenance.h @@ -43,22 +43,21 @@ * * Operating the kern.darkboot sysctl is done via using the commands below: * - * - MEMORY_MAINTENANCE_DARK_BOOT_UNSET - * Unset the kern.darkboot sysctl (kern.sysctl=0). - * - MEMORY_MAINTENANCE_DARK_BOOT_SET - * Set the kern.darkboot sysctl (kern.sysctl=1). - * - MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT - * Set the kern.darkboot sysctl (kern.sysctl=1) and save its - * value into the 'darkboot' NVRAM variable. + * - MEMORY_MAINTENANCE_DARK_BOOT_UNSET + * Unset the kern.darkboot sysctl (kern.sysctl=0). + * - MEMORY_MAINTENANCE_DARK_BOOT_SET + * Set the kern.darkboot sysctl (kern.sysctl=1). + * - MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT + * Set the kern.darkboot sysctl (kern.sysctl=1) and save its + * value into the 'darkboot' NVRAM variable. * * Example: - * sysctl kern.darkboot=2 + * sysctl kern.darkboot=2 */ -#define MEMORY_MAINTENANCE_DARK_BOOT_UNSET (0) -#define MEMORY_MAINTENANCE_DARK_BOOT_SET (1) -#define MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT (2) +#define MEMORY_MAINTENANCE_DARK_BOOT_UNSET (0) +#define MEMORY_MAINTENANCE_DARK_BOOT_SET (1) +#define MEMORY_MAINTENANCE_DARK_BOOT_SET_PERSISTENT (2) -#define MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME "darkboot" +#define MEMORY_MAINTENANCE_DARK_BOOT_NVRAM_NAME "darkboot" #endif /* _SYS_MEMORY_MAINTENANCE_H_ */ - diff --git a/bsd/sys/mman.h b/bsd/sys/mman.h index 8aba6441f..bd0b0618f 100644 --- a/bsd/sys/mman.h +++ b/bsd/sys/mman.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -73,7 +73,7 @@ * [TYM] posix_typed_mem_open() */ -#ifndef _SYS_MMAN_H_ +#ifndef _SYS_MMAN_H_ #define _SYS_MMAN_H_ #include @@ -92,40 +92,40 @@ /* * Protections are chosen from these bits, or-ed together */ -#define PROT_NONE 0x00 /* [MC2] no permissions */ -#define PROT_READ 0x01 /* [MC2] pages can be read */ -#define PROT_WRITE 0x02 /* [MC2] pages can be written */ -#define PROT_EXEC 0x04 /* [MC2] pages can be executed */ +#define PROT_NONE 0x00 /* [MC2] no permissions */ +#define PROT_READ 0x01 /* [MC2] pages can be read */ +#define PROT_WRITE 0x02 /* [MC2] pages can be written */ +#define PROT_EXEC 0x04 /* [MC2] pages can be executed */ /* * Flags contain sharing type and options. * Sharing types; choose one. */ -#define MAP_SHARED 0x0001 /* [MF|SHM] share changes */ -#define MAP_PRIVATE 0x0002 /* [MF|SHM] changes are private */ +#define MAP_SHARED 0x0001 /* [MF|SHM] share changes */ +#define MAP_PRIVATE 0x0002 /* [MF|SHM] changes are private */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define MAP_COPY MAP_PRIVATE /* Obsolete */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define MAP_COPY MAP_PRIVATE /* Obsolete */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Other flags */ -#define MAP_FIXED 0x0010 /* [MF|SHM] interpret addr exactly */ +#define MAP_FIXED 0x0010 /* [MF|SHM] interpret addr exactly */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define MAP_RENAME 0x0020 /* Sun: rename private pages to file */ -#define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */ -#define MAP_RESERVED0080 0x0080 /* previously unimplemented MAP_INHERIT */ -#define MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */ -#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */ -#define MAP_NOCACHE 0x0400 /* don't cache pages for this mapping */ -#define MAP_JIT 0x0800 /* Allocate a region that will be used for JIT purposes */ +#define MAP_RENAME 0x0020 /* Sun: rename private pages to file */ +#define MAP_NORESERVE 0x0040 /* Sun: don't reserve needed swap area */ +#define MAP_RESERVED0080 0x0080 /* previously unimplemented MAP_INHERIT */ +#define MAP_NOEXTEND 0x0100 /* for MAP_FILE, don't change file size */ +#define MAP_HASSEMAPHORE 0x0200 /* region may contain semaphores */ +#define MAP_NOCACHE 0x0400 /* don't cache pages for this mapping */ +#define MAP_JIT 0x0800 /* Allocate a region that will be used for JIT purposes */ /* * Mapping type */ -#define MAP_FILE 0x0000 /* map from file (default) */ -#define MAP_ANON 0x1000 /* allocated from memory, swap space */ -#define MAP_ANONYMOUS MAP_ANON +#define MAP_FILE 0x0000 /* map from file (default) */ +#define MAP_ANON 0x1000 /* allocated from memory, swap space */ +#define MAP_ANONYMOUS MAP_ANON /* * The MAP_RESILIENT_* flags can be used when the caller wants to map some @@ -135,110 +135,110 @@ * only). * * MAP_RESILIENT_CODESIGN: - * accessing this mapping will not generate code-signing violations, + * accessing this mapping will not generate code-signing violations, * even if the contents are tainted. * MAP_RESILIENT_MEDIA: * accessing this mapping will not generate an exception if the contents * are not available (unreachable removable or remote media, access beyond * end-of-file, ...). Missing contents will be replaced with zeroes. */ -#define MAP_RESILIENT_CODESIGN 0x2000 /* no code-signing failures */ -#define MAP_RESILIENT_MEDIA 0x4000 /* no backing-store failures */ +#define MAP_RESILIENT_CODESIGN 0x2000 /* no code-signing failures */ +#define MAP_RESILIENT_MEDIA 0x4000 /* no backing-store failures */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Process memory locking */ -#define MCL_CURRENT 0x0001 /* [ML] Lock only current memory */ -#define MCL_FUTURE 0x0002 /* [ML] Lock all future memory as well */ +#define MCL_CURRENT 0x0001 /* [ML] Lock only current memory */ +#define MCL_FUTURE 0x0002 /* [ML] Lock all future memory as well */ /* * Error return from mmap() */ -#define MAP_FAILED ((void *)-1) /* [MF|SHM] mmap failed */ +#define MAP_FAILED ((void *)-1) /* [MF|SHM] mmap failed */ /* * msync() flags */ -#define MS_ASYNC 0x0001 /* [MF|SIO] return immediately */ -#define MS_INVALIDATE 0x0002 /* [MF|SIO] invalidate all cached data */ -#define MS_SYNC 0x0010 /* [MF|SIO] msync synchronously */ +#define MS_ASYNC 0x0001 /* [MF|SIO] return immediately */ +#define MS_INVALIDATE 0x0002 /* [MF|SIO] invalidate all cached data */ +#define MS_SYNC 0x0010 /* [MF|SIO] msync synchronously */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #define MS_KILLPAGES 0x0004 /* invalidate pages, leave mapped */ #define MS_DEACTIVATE 0x0008 /* deactivate pages, leave mapped */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Advice to madvise */ -#define POSIX_MADV_NORMAL 0 /* [MC1] no further special treatment */ -#define POSIX_MADV_RANDOM 1 /* [MC1] expect random page refs */ -#define POSIX_MADV_SEQUENTIAL 2 /* [MC1] expect sequential page refs */ -#define POSIX_MADV_WILLNEED 3 /* [MC1] will need these pages */ -#define POSIX_MADV_DONTNEED 4 /* [MC1] dont need these pages */ +#define POSIX_MADV_NORMAL 0 /* [MC1] no further special treatment */ +#define POSIX_MADV_RANDOM 1 /* [MC1] expect random page refs */ +#define POSIX_MADV_SEQUENTIAL 2 /* [MC1] expect sequential page refs */ +#define POSIX_MADV_WILLNEED 3 /* [MC1] will need these pages */ +#define POSIX_MADV_DONTNEED 4 /* [MC1] dont need these pages */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define MADV_NORMAL POSIX_MADV_NORMAL -#define MADV_RANDOM POSIX_MADV_RANDOM -#define MADV_SEQUENTIAL POSIX_MADV_SEQUENTIAL -#define MADV_WILLNEED POSIX_MADV_WILLNEED -#define MADV_DONTNEED POSIX_MADV_DONTNEED -#define MADV_FREE 5 /* pages unneeded, discard contents */ -#define MADV_ZERO_WIRED_PAGES 6 /* zero the wired pages that have not been unwired before the entry is deleted */ -#define MADV_FREE_REUSABLE 7 /* pages can be reused (by anyone) */ -#define MADV_FREE_REUSE 8 /* caller wants to reuse those pages */ -#define MADV_CAN_REUSE 9 -#define MADV_PAGEOUT 10 /* page out now (internal only) */ +#define MADV_NORMAL POSIX_MADV_NORMAL +#define MADV_RANDOM POSIX_MADV_RANDOM +#define MADV_SEQUENTIAL POSIX_MADV_SEQUENTIAL +#define MADV_WILLNEED POSIX_MADV_WILLNEED +#define MADV_DONTNEED POSIX_MADV_DONTNEED +#define MADV_FREE 5 /* pages unneeded, discard contents */ +#define MADV_ZERO_WIRED_PAGES 6 /* zero the wired pages that have not been unwired before the entry is deleted */ +#define MADV_FREE_REUSABLE 7 /* pages can be reused (by anyone) */ +#define MADV_FREE_REUSE 8 /* caller wants to reuse those pages */ +#define MADV_CAN_REUSE 9 +#define MADV_PAGEOUT 10 /* page out now (internal only) */ /* * Return bits from mincore */ -#define MINCORE_INCORE 0x1 /* Page is incore */ -#define MINCORE_REFERENCED 0x2 /* Page has been referenced by us */ -#define MINCORE_MODIFIED 0x4 /* Page has been modified by us */ -#define MINCORE_REFERENCED_OTHER 0x8 /* Page has been referenced */ -#define MINCORE_MODIFIED_OTHER 0x10 /* Page has been modified */ +#define MINCORE_INCORE 0x1 /* Page is incore */ +#define MINCORE_REFERENCED 0x2 /* Page has been referenced by us */ +#define MINCORE_MODIFIED 0x4 /* Page has been modified by us */ +#define MINCORE_REFERENCED_OTHER 0x8 /* Page has been referenced */ +#define MINCORE_MODIFIED_OTHER 0x10 /* Page has been modified */ #define MINCORE_PAGED_OUT 0x20 /* Page has been paged out */ #define MINCORE_COPIED 0x40 /* Page has been copied */ #define MINCORE_ANONYMOUS 0x80 /* Page belongs to an anonymous object */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifndef KERNEL __BEGIN_DECLS /* [ML] */ -int mlockall(int); -int munlockall(void); +int mlockall(int); +int munlockall(void); /* [MR] */ -int mlock(const void *, size_t); +int mlock(const void *, size_t); #ifndef _MMAP -#define _MMAP +#define _MMAP /* [MC3]*/ -void * mmap(void *, size_t, int, int, int, off_t) __DARWIN_ALIAS(mmap); +void * mmap(void *, size_t, int, int, int, off_t) __DARWIN_ALIAS(mmap); #endif /* [MPR] */ -int mprotect(void *, size_t, int) __DARWIN_ALIAS(mprotect); +int mprotect(void *, size_t, int) __DARWIN_ALIAS(mprotect); /* [MF|SIO] */ -int msync(void *, size_t, int) __DARWIN_ALIAS_C(msync); +int msync(void *, size_t, int) __DARWIN_ALIAS_C(msync); /* [MR] */ -int munlock(const void *, size_t); +int munlock(const void *, size_t); /* [MC3]*/ -int munmap(void *, size_t) __DARWIN_ALIAS(munmap); +int munmap(void *, size_t) __DARWIN_ALIAS(munmap); /* [SHM] */ -int shm_open(const char *, int, ...); -int shm_unlink(const char *); +int shm_open(const char *, int, ...); +int shm_unlink(const char *); /* [ADV] */ -int posix_madvise(void *, size_t, int); +int posix_madvise(void *, size_t, int); #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -int madvise(void *, size_t, int); -int mincore(const void *, size_t, char *); -int minherit(void *, size_t, int); +int madvise(void *, size_t, int); +int mincore(const void *, size_t, char *); +int minherit(void *, size_t, int); #endif #ifdef PRIVATE @@ -247,9 +247,9 @@ int mremap_encrypted(void *, size_t, __uint32_t, __uint32_t, __uint32_t); __END_DECLS -#else /* KERNEL */ +#else /* KERNEL */ #ifdef XNU_KERNEL_PRIVATE -void pshm_cache_init(void); /* for bsd_init() */ +void pshm_cache_init(void); /* for bsd_init() */ void pshm_lock_init(void); /* @@ -259,7 +259,7 @@ void pshm_lock_init(void); struct mmap_args; struct fileproc; int pshm_mmap(struct proc *p, struct mmap_args *uap, user_addr_t *retval, - struct fileproc *fp, off_t pageoff); + struct fileproc *fp, off_t pageoff); /* Really need to overhaul struct fileops to avoid this... */ struct pshmnode; struct stat; diff --git a/bsd/sys/monotonic.h b/bsd/sys/monotonic.h index e880a9a0a..cfca2afee 100644 --- a/bsd/sys/monotonic.h +++ b/bsd/sys/monotonic.h @@ -95,13 +95,13 @@ union monotonic_ctl_info { #define MT_KDBG_TMPCPU_(CODE, FUNC) \ do { \ - if (kdebug_enable && \ - kdebug_debugid_enabled(MT_KDBG_TMPCPU_EVT(CODE))) { \ - uint64_t __counts[MT_CORE_NFIXED]; \ - mt_fixed_counts(__counts); \ - KDBG(MT_KDBG_TMPCPU_EVT(CODE) | (FUNC), COUNTS_INSTRS, \ - __counts[MT_CORE_CYCLES]); \ - } \ + if (kdebug_enable && \ + kdebug_debugid_enabled(MT_KDBG_TMPCPU_EVT(CODE))) { \ + uint64_t __counts[MT_CORE_NFIXED]; \ + mt_fixed_counts(__counts); \ + KDBG(MT_KDBG_TMPCPU_EVT(CODE) | (FUNC), COUNTS_INSTRS, \ + __counts[MT_CORE_CYCLES]); \ + } \ } while (0) #define MT_KDBG_TMPCPU(CODE) MT_KDBG_TMPCPU_(CODE, DBG_FUNC_NONE) @@ -118,13 +118,13 @@ union monotonic_ctl_info { #define MT_KDBG_TMPTH_(CODE, FUNC) \ do { \ - if (kdebug_enable && \ - kdebug_debugid_enabled(MT_KDBG_TMPTH_EVT(CODE))) { \ - uint64_t __counts[MT_CORE_NFIXED]; \ - mt_cur_thread_fixed_counts(__counts); \ - KDBG(MT_KDBG_TMPTH_EVT(CODE) | (FUNC), COUNTS_INSTRS, \ - __counts[MT_CORE_CYCLES]); \ - } \ + if (kdebug_enable && \ + kdebug_debugid_enabled(MT_KDBG_TMPTH_EVT(CODE))) { \ + uint64_t __counts[MT_CORE_NFIXED]; \ + mt_cur_thread_fixed_counts(__counts); \ + KDBG(MT_KDBG_TMPTH_EVT(CODE) | (FUNC), COUNTS_INSTRS, \ + __counts[MT_CORE_CYCLES]); \ + } \ } while (0) #define MT_KDBG_TMPTH(CODE) MT_KDBG_TMPTH_(CODE, DBG_FUNC_NONE) @@ -133,11 +133,11 @@ union monotonic_ctl_info { struct mt_device { const char *mtd_name; - int (* const mtd_init)(struct mt_device *dev); - int (* const mtd_add)(struct monotonic_config *config, uint32_t *ctr_out); - void (* const mtd_reset)(void); - void (* const mtd_enable)(bool enable); - int (* const mtd_read)(uint64_t ctr_mask, uint64_t *counts_out); + int(*const mtd_init)(struct mt_device *dev); + int(*const mtd_add)(struct monotonic_config *config, uint32_t *ctr_out); + void(*const mtd_reset)(void); + void(*const mtd_enable)(bool enable); + int(*const mtd_read)(uint64_t ctr_mask, uint64_t *counts_out); decl_lck_mtx_data(, mtd_lock); uint8_t mtd_nmonitors; diff --git a/bsd/sys/mount.h b/bsd/sys/mount.h index dff0bb6db..bff53904b 100644 --- a/bsd/sys/mount.h +++ b/bsd/sys/mount.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -69,16 +69,16 @@ #ifndef _SYS_MOUNT_H_ -#define _SYS_MOUNT_H_ +#define _SYS_MOUNT_H_ #include #include -#include /* needed for vol_capabilities_attr_t */ +#include /* needed for vol_capabilities_attr_t */ #ifndef KERNEL #include #include -#include /* XXX needed for user builds */ +#include /* XXX needed for user builds */ #include #else #include @@ -91,32 +91,32 @@ * file system statistics */ -#define MFSNAMELEN 15 /* length of fs type name, not inc. null */ -#define MFSTYPENAMELEN 16 /* length of fs type name including null */ +#define MFSNAMELEN 15 /* length of fs type name, not inc. null */ +#define MFSTYPENAMELEN 16 /* length of fs type name including null */ #if __DARWIN_64_BIT_INO_T -#define MNAMELEN MAXPATHLEN /* length of buffer for returned name */ +#define MNAMELEN MAXPATHLEN /* length of buffer for returned name */ #else /* ! __DARWIN_64_BIT_INO_T */ -#define MNAMELEN 90 /* length of buffer for returned name */ +#define MNAMELEN 90 /* length of buffer for returned name */ #endif /* __DARWIN_64_BIT_INO_T */ #define __DARWIN_STRUCT_STATFS64 { \ - uint32_t f_bsize; /* fundamental file system block size */ \ - int32_t f_iosize; /* optimal transfer block size */ \ - uint64_t f_blocks; /* total data blocks in file system */ \ - uint64_t f_bfree; /* free blocks in fs */ \ - uint64_t f_bavail; /* free blocks avail to non-superuser */ \ - uint64_t f_files; /* total file nodes in file system */ \ - uint64_t f_ffree; /* free file nodes in fs */ \ - fsid_t f_fsid; /* file system id */ \ - uid_t f_owner; /* user that mounted the filesystem */ \ - uint32_t f_type; /* type of filesystem */ \ - uint32_t f_flags; /* copy of mount exported flags */ \ - uint32_t f_fssubtype; /* fs sub-type (flavor) */ \ - char f_fstypename[MFSTYPENAMELEN]; /* fs type name */ \ - char f_mntonname[MAXPATHLEN]; /* directory on which mounted */ \ - char f_mntfromname[MAXPATHLEN]; /* mounted filesystem */ \ - uint32_t f_reserved[8]; /* For future use */ \ + uint32_t f_bsize; /* fundamental file system block size */ \ + int32_t f_iosize; /* optimal transfer block size */ \ + uint64_t f_blocks; /* total data blocks in file system */ \ + uint64_t f_bfree; /* free blocks in fs */ \ + uint64_t f_bavail; /* free blocks avail to non-superuser */ \ + uint64_t f_files; /* total file nodes in file system */ \ + uint64_t f_ffree; /* free file nodes in fs */ \ + fsid_t f_fsid; /* file system id */ \ + uid_t f_owner; /* user that mounted the filesystem */ \ + uint32_t f_type; /* type of filesystem */ \ + uint32_t f_flags; /* copy of mount exported flags */ \ + uint32_t f_fssubtype; /* fs sub-type (flavor) */ \ + char f_fstypename[MFSTYPENAMELEN]; /* fs type name */ \ + char f_mntonname[MAXPATHLEN]; /* directory on which mounted */ \ + char f_mntfromname[MAXPATHLEN]; /* mounted filesystem */ \ + uint32_t f_reserved[8]; /* For future use */ \ } #if !__DARWIN_ONLY_64_BIT_INO_T @@ -135,26 +135,26 @@ struct statfs __DARWIN_STRUCT_STATFS64; * LP64 - WARNING - must be kept in sync with struct user_statfs in mount_internal.h. */ struct statfs { - short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ - short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ - long f_bsize; /* fundamental file system block size */ - long f_iosize; /* optimal transfer block size */ - long f_blocks; /* total data blocks in file system */ - long f_bfree; /* free blocks in fs */ - long f_bavail; /* free blocks avail to non-superuser */ - long f_files; /* total file nodes in file system */ - long f_ffree; /* free file nodes in fs */ - fsid_t f_fsid; /* file system id */ - uid_t f_owner; /* user that mounted the filesystem */ - short f_reserved1; /* spare for later */ - short f_type; /* type of filesystem */ - long f_flags; /* copy of mount exported flags */ - long f_reserved2[2]; /* reserved for future use */ - char f_fstypename[MFSNAMELEN]; /* fs type name */ - char f_mntonname[MNAMELEN]; /* directory on which mounted */ - char f_mntfromname[MNAMELEN];/* mounted filesystem */ - char f_reserved3; /* For alignment */ - long f_reserved4[4]; /* For future use */ + short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ + short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ + long f_bsize; /* fundamental file system block size */ + long f_iosize; /* optimal transfer block size */ + long f_blocks; /* total data blocks in file system */ + long f_bfree; /* free blocks in fs */ + long f_bavail; /* free blocks avail to non-superuser */ + long f_files; /* total file nodes in file system */ + long f_ffree; /* free file nodes in fs */ + fsid_t f_fsid; /* file system id */ + uid_t f_owner; /* user that mounted the filesystem */ + short f_reserved1; /* spare for later */ + short f_type; /* type of filesystem */ + long f_flags; /* copy of mount exported flags */ + long f_reserved2[2]; /* reserved for future use */ + char f_fstypename[MFSNAMELEN]; /* fs type name */ + char f_mntonname[MNAMELEN]; /* directory on which mounted */ + char f_mntfromname[MNAMELEN];/* mounted filesystem */ + char f_reserved3; /* For alignment */ + long f_reserved4[4]; /* For future use */ }; #endif /* __DARWIN_64_BIT_INO_T */ @@ -162,22 +162,22 @@ struct statfs { #pragma pack(4) struct vfsstatfs { - uint32_t f_bsize; /* fundamental file system block size */ - size_t f_iosize; /* optimal transfer block size */ - uint64_t f_blocks; /* total data blocks in file system */ - uint64_t f_bfree; /* free blocks in fs */ - uint64_t f_bavail; /* free blocks avail to non-superuser */ - uint64_t f_bused; /* free blocks avail to non-superuser */ - uint64_t f_files; /* total file nodes in file system */ - uint64_t f_ffree; /* free file nodes in fs */ - fsid_t f_fsid; /* file system id */ - uid_t f_owner; /* user that mounted the filesystem */ - uint64_t f_flags; /* copy of mount exported flags */ - char f_fstypename[MFSTYPENAMELEN];/* fs type name inclus */ - char f_mntonname[MAXPATHLEN];/* directory on which mounted */ - char f_mntfromname[MAXPATHLEN];/* mounted filesystem */ - uint32_t f_fssubtype; /* fs sub-type (flavor) */ - void *f_reserved[2]; /* For future use == 0 */ + uint32_t f_bsize; /* fundamental file system block size */ + size_t f_iosize; /* optimal transfer block size */ + uint64_t f_blocks; /* total data blocks in file system */ + uint64_t f_bfree; /* free blocks in fs */ + uint64_t f_bavail; /* free blocks avail to non-superuser */ + uint64_t f_bused; /* free blocks avail to non-superuser */ + uint64_t f_files; /* total file nodes in file system */ + uint64_t f_ffree; /* free file nodes in fs */ + fsid_t f_fsid; /* file system id */ + uid_t f_owner; /* user that mounted the filesystem */ + uint64_t f_flags; /* copy of mount exported flags */ + char f_fstypename[MFSTYPENAMELEN];/* fs type name inclus */ + char f_mntonname[MAXPATHLEN];/* directory on which mounted */ + char f_mntfromname[MAXPATHLEN];/* mounted filesystem */ + uint32_t f_fssubtype; /* fs sub-type (flavor) */ + void *f_reserved[2]; /* For future use == 0 */ }; #pragma pack() @@ -189,42 +189,42 @@ struct vfsstatfs { * vfs_setattr() KPIs. */ -#define VFSATTR_INIT(s) ((s)->f_supported = (s)->f_active = 0LL) -#define VFSATTR_SET_SUPPORTED(s, a) ((s)->f_supported |= VFSATTR_ ## a) -#define VFSATTR_IS_SUPPORTED(s, a) ((s)->f_supported & VFSATTR_ ## a) -#define VFSATTR_CLEAR_ACTIVE(s, a) ((s)->f_active &= ~VFSATTR_ ## a) -#define VFSATTR_IS_ACTIVE(s, a) ((s)->f_active & VFSATTR_ ## a) -#define VFSATTR_ALL_SUPPORTED(s) (((s)->f_active & (s)->f_supported) == (s)->f_active) -#define VFSATTR_WANTED(s, a) ((s)->f_active |= VFSATTR_ ## a) -#define VFSATTR_RETURN(s, a, x) do { (s)-> a = (x); VFSATTR_SET_SUPPORTED(s, a);} while(0) - -#define VFSATTR_f_objcount (1LL<< 0) -#define VFSATTR_f_filecount (1LL<< 1) -#define VFSATTR_f_dircount (1LL<< 2) -#define VFSATTR_f_maxobjcount (1LL<< 3) -#define VFSATTR_f_bsize (1LL<< 4) -#define VFSATTR_f_iosize (1LL<< 5) -#define VFSATTR_f_blocks (1LL<< 6) -#define VFSATTR_f_bfree (1LL<< 7) -#define VFSATTR_f_bavail (1LL<< 8) -#define VFSATTR_f_bused (1LL<< 9) -#define VFSATTR_f_files (1LL<< 10) -#define VFSATTR_f_ffree (1LL<< 11) -#define VFSATTR_f_fsid (1LL<< 12) -#define VFSATTR_f_owner (1LL<< 13) -#define VFSATTR_f_capabilities (1LL<< 14) -#define VFSATTR_f_attributes (1LL<< 15) -#define VFSATTR_f_create_time (1LL<< 16) -#define VFSATTR_f_modify_time (1LL<< 17) -#define VFSATTR_f_access_time (1LL<< 18) -#define VFSATTR_f_backup_time (1LL<< 19) -#define VFSATTR_f_fssubtype (1LL<< 20) -#define VFSATTR_f_vol_name (1LL<< 21) -#define VFSATTR_f_signature (1LL<< 22) -#define VFSATTR_f_carbon_fsid (1LL<< 23) -#define VFSATTR_f_uuid (1LL<< 24) -#define VFSATTR_f_quota (1LL<< 25) -#define VFSATTR_f_reserved (1LL<< 26) +#define VFSATTR_INIT(s) ((s)->f_supported = (s)->f_active = 0LL) +#define VFSATTR_SET_SUPPORTED(s, a) ((s)->f_supported |= VFSATTR_ ## a) +#define VFSATTR_IS_SUPPORTED(s, a) ((s)->f_supported & VFSATTR_ ## a) +#define VFSATTR_CLEAR_ACTIVE(s, a) ((s)->f_active &= ~VFSATTR_ ## a) +#define VFSATTR_IS_ACTIVE(s, a) ((s)->f_active & VFSATTR_ ## a) +#define VFSATTR_ALL_SUPPORTED(s) (((s)->f_active & (s)->f_supported) == (s)->f_active) +#define VFSATTR_WANTED(s, a) ((s)->f_active |= VFSATTR_ ## a) +#define VFSATTR_RETURN(s, a, x) do { (s)-> a = (x); VFSATTR_SET_SUPPORTED(s, a);} while(0) + +#define VFSATTR_f_objcount (1LL<< 0) +#define VFSATTR_f_filecount (1LL<< 1) +#define VFSATTR_f_dircount (1LL<< 2) +#define VFSATTR_f_maxobjcount (1LL<< 3) +#define VFSATTR_f_bsize (1LL<< 4) +#define VFSATTR_f_iosize (1LL<< 5) +#define VFSATTR_f_blocks (1LL<< 6) +#define VFSATTR_f_bfree (1LL<< 7) +#define VFSATTR_f_bavail (1LL<< 8) +#define VFSATTR_f_bused (1LL<< 9) +#define VFSATTR_f_files (1LL<< 10) +#define VFSATTR_f_ffree (1LL<< 11) +#define VFSATTR_f_fsid (1LL<< 12) +#define VFSATTR_f_owner (1LL<< 13) +#define VFSATTR_f_capabilities (1LL<< 14) +#define VFSATTR_f_attributes (1LL<< 15) +#define VFSATTR_f_create_time (1LL<< 16) +#define VFSATTR_f_modify_time (1LL<< 17) +#define VFSATTR_f_access_time (1LL<< 18) +#define VFSATTR_f_backup_time (1LL<< 19) +#define VFSATTR_f_fssubtype (1LL<< 20) +#define VFSATTR_f_vol_name (1LL<< 21) +#define VFSATTR_f_signature (1LL<< 22) +#define VFSATTR_f_carbon_fsid (1LL<< 23) +#define VFSATTR_f_uuid (1LL<< 24) +#define VFSATTR_f_quota (1LL<< 25) +#define VFSATTR_f_reserved (1LL<< 26) /* @@ -238,90 +238,90 @@ struct vfsstatfs { * size of the structure or attempt to copy it. */ struct vfs_attr { - uint64_t f_supported; - uint64_t f_active; - - uint64_t f_objcount; /* number of filesystem objects in volume */ - uint64_t f_filecount; /* ... files */ - uint64_t f_dircount; /* ... directories */ - uint64_t f_maxobjcount; /* maximum number of filesystem objects */ - - uint32_t f_bsize; /* block size for the below size values */ - size_t f_iosize; /* optimal transfer block size */ - uint64_t f_blocks; /* total data blocks in file system */ - uint64_t f_bfree; /* free blocks in fs */ - uint64_t f_bavail; /* free blocks avail to non-superuser */ - uint64_t f_bused; /* blocks in use */ - uint64_t f_files; /* total file nodes in file system */ - uint64_t f_ffree; /* free file nodes in fs */ - fsid_t f_fsid; /* file system id */ - uid_t f_owner; /* user that mounted the filesystem */ - - vol_capabilities_attr_t f_capabilities; + uint64_t f_supported; + uint64_t f_active; + + uint64_t f_objcount; /* number of filesystem objects in volume */ + uint64_t f_filecount; /* ... files */ + uint64_t f_dircount; /* ... directories */ + uint64_t f_maxobjcount; /* maximum number of filesystem objects */ + + uint32_t f_bsize; /* block size for the below size values */ + size_t f_iosize; /* optimal transfer block size */ + uint64_t f_blocks; /* total data blocks in file system */ + uint64_t f_bfree; /* free blocks in fs */ + uint64_t f_bavail; /* free blocks avail to non-superuser */ + uint64_t f_bused; /* blocks in use */ + uint64_t f_files; /* total file nodes in file system */ + uint64_t f_ffree; /* free file nodes in fs */ + fsid_t f_fsid; /* file system id */ + uid_t f_owner; /* user that mounted the filesystem */ + + vol_capabilities_attr_t f_capabilities; vol_attributes_attr_t f_attributes; - struct timespec f_create_time; /* creation time */ - struct timespec f_modify_time; /* last modification time */ - struct timespec f_access_time; /* time of last access */ - struct timespec f_backup_time; /* last backup time */ + struct timespec f_create_time; /* creation time */ + struct timespec f_modify_time; /* last modification time */ + struct timespec f_access_time; /* time of last access */ + struct timespec f_backup_time; /* last backup time */ - uint32_t f_fssubtype; /* filesystem subtype */ + uint32_t f_fssubtype; /* filesystem subtype */ - char *f_vol_name; /* volume name */ + char *f_vol_name; /* volume name */ - uint16_t f_signature; /* used for ATTR_VOL_SIGNATURE, Carbon's FSVolumeInfo.signature */ - uint16_t f_carbon_fsid; /* same as Carbon's FSVolumeInfo.filesystemID */ - uuid_t f_uuid; /* file system UUID (version 3 or 5), available in 10.6 and later */ - uint64_t f_quota; /* total quota data blocks in file system */ - uint64_t f_reserved; /* total reserved data blocks in file system */ + uint16_t f_signature; /* used for ATTR_VOL_SIGNATURE, Carbon's FSVolumeInfo.signature */ + uint16_t f_carbon_fsid; /* same as Carbon's FSVolumeInfo.filesystemID */ + uuid_t f_uuid; /* file system UUID (version 3 or 5), available in 10.6 and later */ + uint64_t f_quota; /* total quota data blocks in file system */ + uint64_t f_reserved; /* total reserved data blocks in file system */ }; #pragma pack() -#endif /* KERNEL */ +#endif /* KERNEL */ /* * User specifiable flags. * * Unmount uses MNT_FORCE flag. */ -#define MNT_RDONLY 0x00000001 /* read only filesystem */ -#define MNT_SYNCHRONOUS 0x00000002 /* file system written synchronously */ -#define MNT_NOEXEC 0x00000004 /* can't exec from filesystem */ -#define MNT_NOSUID 0x00000008 /* don't honor setuid bits on fs */ -#define MNT_NODEV 0x00000010 /* don't interpret special files */ -#define MNT_UNION 0x00000020 /* union with underlying filesystem */ -#define MNT_ASYNC 0x00000040 /* file system written asynchronously */ -#define MNT_CPROTECT 0x00000080 /* file system supports content protection */ +#define MNT_RDONLY 0x00000001 /* read only filesystem */ +#define MNT_SYNCHRONOUS 0x00000002 /* file system written synchronously */ +#define MNT_NOEXEC 0x00000004 /* can't exec from filesystem */ +#define MNT_NOSUID 0x00000008 /* don't honor setuid bits on fs */ +#define MNT_NODEV 0x00000010 /* don't interpret special files */ +#define MNT_UNION 0x00000020 /* union with underlying filesystem */ +#define MNT_ASYNC 0x00000040 /* file system written asynchronously */ +#define MNT_CPROTECT 0x00000080 /* file system supports content protection */ /* * NFS export related mount flags. */ -#define MNT_EXPORTED 0x00000100 /* file system is exported */ +#define MNT_EXPORTED 0x00000100 /* file system is exported */ /* * MAC labeled / "quarantined" flag */ -#define MNT_QUARANTINE 0x00000400 /* file system is quarantined */ +#define MNT_QUARANTINE 0x00000400 /* file system is quarantined */ /* * Flags set by internal operations. */ -#define MNT_LOCAL 0x00001000 /* filesystem is stored locally */ -#define MNT_QUOTA 0x00002000 /* quotas are enabled on filesystem */ -#define MNT_ROOTFS 0x00004000 /* identifies the root filesystem */ -#define MNT_DOVOLFS 0x00008000 /* FS supports volfs (deprecated flag in Mac OS X 10.5) */ +#define MNT_LOCAL 0x00001000 /* filesystem is stored locally */ +#define MNT_QUOTA 0x00002000 /* quotas are enabled on filesystem */ +#define MNT_ROOTFS 0x00004000 /* identifies the root filesystem */ +#define MNT_DOVOLFS 0x00008000 /* FS supports volfs (deprecated flag in Mac OS X 10.5) */ -#define MNT_DONTBROWSE 0x00100000 /* file system is not appropriate path to user data */ +#define MNT_DONTBROWSE 0x00100000 /* file system is not appropriate path to user data */ #define MNT_IGNORE_OWNERSHIP 0x00200000 /* VFS will ignore ownership information on filesystem objects */ -#define MNT_AUTOMOUNTED 0x00400000 /* filesystem was mounted by automounter */ -#define MNT_JOURNALED 0x00800000 /* filesystem is journaled */ -#define MNT_NOUSERXATTR 0x01000000 /* Don't allow user extended attributes */ -#define MNT_DEFWRITE 0x02000000 /* filesystem should defer writes */ -#define MNT_MULTILABEL 0x04000000 /* MAC support for individual labels */ -#define MNT_NOATIME 0x10000000 /* disable update of file access time */ -#define MNT_SNAPSHOT 0x40000000 /* The mount is a snapshot */ +#define MNT_AUTOMOUNTED 0x00400000 /* filesystem was mounted by automounter */ +#define MNT_JOURNALED 0x00800000 /* filesystem is journaled */ +#define MNT_NOUSERXATTR 0x01000000 /* Don't allow user extended attributes */ +#define MNT_DEFWRITE 0x02000000 /* filesystem should defer writes */ +#define MNT_MULTILABEL 0x04000000 /* MAC support for individual labels */ +#define MNT_NOATIME 0x10000000 /* disable update of file access time */ +#define MNT_SNAPSHOT 0x40000000 /* The mount is a snapshot */ #ifdef BSD_KERNEL_PRIVATE /* #define MNT_IMGSRC_BY_INDEX 0x20000000 see sys/imgsrc.h */ #endif /* BSD_KERNEL_PRIVATE */ @@ -334,25 +334,25 @@ struct vfs_attr { * XXX I think that this could now become (~(MNT_CMDFLAGS)) * but the 'mount' program may need changing to handle this. */ -#define MNT_VISFLAGMASK (MNT_RDONLY | MNT_SYNCHRONOUS | MNT_NOEXEC | \ - MNT_NOSUID | MNT_NODEV | MNT_UNION | \ - MNT_ASYNC | MNT_EXPORTED | MNT_QUARANTINE | \ - MNT_LOCAL | MNT_QUOTA | \ - MNT_ROOTFS | MNT_DOVOLFS | MNT_DONTBROWSE | \ - MNT_IGNORE_OWNERSHIP | MNT_AUTOMOUNTED | MNT_JOURNALED | \ - MNT_NOUSERXATTR | MNT_DEFWRITE | MNT_MULTILABEL | \ - MNT_NOATIME | MNT_SNAPSHOT | MNT_CPROTECT) +#define MNT_VISFLAGMASK (MNT_RDONLY | MNT_SYNCHRONOUS | MNT_NOEXEC | \ + MNT_NOSUID | MNT_NODEV | MNT_UNION | \ + MNT_ASYNC | MNT_EXPORTED | MNT_QUARANTINE | \ + MNT_LOCAL | MNT_QUOTA | \ + MNT_ROOTFS | MNT_DOVOLFS | MNT_DONTBROWSE | \ + MNT_IGNORE_OWNERSHIP | MNT_AUTOMOUNTED | MNT_JOURNALED | \ + MNT_NOUSERXATTR | MNT_DEFWRITE | MNT_MULTILABEL | \ + MNT_NOATIME | MNT_SNAPSHOT | MNT_CPROTECT) /* * External filesystem command modifier flags. * Unmount can use the MNT_FORCE flag. * XXX These are not STATES and really should be somewhere else. * External filesystem control flags. */ -#define MNT_UPDATE 0x00010000 /* not a real mount, just an update */ -#define MNT_NOBLOCK 0x00020000 /* don't block unmount if not responding */ -#define MNT_RELOAD 0x00040000 /* reload filesystem data */ -#define MNT_FORCE 0x00080000 /* force unmount or readonly change */ -#define MNT_CMDFLAGS (MNT_UPDATE|MNT_NOBLOCK|MNT_RELOAD|MNT_FORCE) +#define MNT_UPDATE 0x00010000 /* not a real mount, just an update */ +#define MNT_NOBLOCK 0x00020000 /* don't block unmount if not responding */ +#define MNT_RELOAD 0x00040000 /* reload filesystem data */ +#define MNT_FORCE 0x00080000 /* force unmount or readonly change */ +#define MNT_CMDFLAGS (MNT_UPDATE|MNT_NOBLOCK|MNT_RELOAD|MNT_FORCE) @@ -362,25 +362,25 @@ struct vfs_attr { * Second level identifier specifies which filesystem. Second level * identifier VFS_GENERIC returns information about all filesystems. */ -#define VFS_GENERIC 0 /* generic filesystem information */ -#define VFS_NUMMNTOPS 1 /* int: total num of vfs mount/unmount operations */ +#define VFS_GENERIC 0 /* generic filesystem information */ +#define VFS_NUMMNTOPS 1 /* int: total num of vfs mount/unmount operations */ /* * Third level identifiers for VFS_GENERIC are given below; third * level identifiers for specific filesystems are given in their * mount specific header files. */ -#define VFS_MAXTYPENUM 1 /* int: highest defined filesystem type */ -#define VFS_CONF 2 /* struct: vfsconf for filesystem given - as next argument */ +#define VFS_MAXTYPENUM 1 /* int: highest defined filesystem type */ +#define VFS_CONF 2 /* struct: vfsconf for filesystem given + * as next argument */ /* * Flags for various system call interfaces. * * waitfor flags to vfs_sync() and getfsstat() */ -#define MNT_WAIT 1 /* synchronized I/O file integrity completion */ -#define MNT_NOWAIT 2 /* start all I/O, but do not wait for it */ -#define MNT_DWAIT 4 /* synchronized I/O data integrity completion */ +#define MNT_WAIT 1 /* synchronized I/O file integrity completion */ +#define MNT_NOWAIT 2 /* start all I/O, but do not wait for it */ +#define MNT_DWAIT 4 /* synchronized I/O data integrity completion */ #ifndef KERNEL @@ -392,47 +392,47 @@ typedef struct vnode * vnode_t; /* Reserved fields preserve binary compatibility */ struct vfsconf { - uint32_t vfc_reserved1; /* opaque */ - char vfc_name[MFSNAMELEN]; /* filesystem type name */ - int vfc_typenum; /* historic filesystem type number */ - int vfc_refcount; /* number mounted of this type */ - int vfc_flags; /* permanent flags */ - uint32_t vfc_reserved2; /* opaque */ - uint32_t vfc_reserved3; /* opaque */ + uint32_t vfc_reserved1; /* opaque */ + char vfc_name[MFSNAMELEN]; /* filesystem type name */ + int vfc_typenum; /* historic filesystem type number */ + int vfc_refcount; /* number mounted of this type */ + int vfc_flags; /* permanent flags */ + uint32_t vfc_reserved2; /* opaque */ + uint32_t vfc_reserved3; /* opaque */ }; struct vfsidctl { - int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ - fsid_t vc_fsid; /* fsid to operate on. */ - void *vc_ptr; /* pointer to data structure. */ - size_t vc_len; /* sizeof said structure. */ - u_int32_t vc_spare[12]; /* spare (must be zero). */ + int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ + fsid_t vc_fsid; /* fsid to operate on. */ + void *vc_ptr; /* pointer to data structure. */ + size_t vc_len; /* sizeof said structure. */ + u_int32_t vc_spare[12]; /* spare (must be zero). */ }; /* vfsidctl API version. */ -#define VFS_CTL_VERS1 0x01 +#define VFS_CTL_VERS1 0x01 #ifdef KERNEL struct user_vfsidctl { - int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ - fsid_t vc_fsid; /* fsid to operate on. */ - user_addr_t vc_ptr __attribute((aligned(8))); /* pointer to data structure. */ - user_size_t vc_len; /* sizeof said structure. */ - u_int32_t vc_spare[12]; /* spare (must be zero). */ + int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ + fsid_t vc_fsid; /* fsid to operate on. */ + user_addr_t vc_ptr __attribute((aligned(8))); /* pointer to data structure. */ + user_size_t vc_len; /* sizeof said structure. */ + u_int32_t vc_spare[12]; /* spare (must be zero). */ }; struct user32_vfsidctl { - int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ - fsid_t vc_fsid; /* fsid to operate on. */ - user32_addr_t vc_ptr; /* pointer to data structure. */ - user32_size_t vc_len; /* sizeof said structure. */ - u_int32_t vc_spare[12]; /* spare (must be zero). */ + int vc_vers; /* should be VFSIDCTL_VERS1 (below) */ + fsid_t vc_fsid; /* fsid to operate on. */ + user32_addr_t vc_ptr; /* pointer to data structure. */ + user32_size_t vc_len; /* sizeof said structure. */ + u_int32_t vc_spare[12]; /* spare (must be zero). */ }; union union_vfsidctl { /* the fields vc_vers and vc_fsid are compatible */ - struct user32_vfsidctl vc32; - struct user_vfsidctl vc64; + struct user32_vfsidctl vc32; + struct user_vfsidctl vc64; }; #endif /* KERNEL */ @@ -441,305 +441,305 @@ union union_vfsidctl { /* the fields vc_vers and vc_fsid are compatible */ * New style VFS sysctls, do not reuse/conflict with the namespace for * private sysctls. */ -#define VFS_CTL_STATFS 0x00010001 /* statfs */ -#define VFS_CTL_UMOUNT 0x00010002 /* unmount */ -#define VFS_CTL_QUERY 0x00010003 /* anything wrong? (vfsquery) */ -#define VFS_CTL_NEWADDR 0x00010004 /* reconnect to new address */ -#define VFS_CTL_TIMEO 0x00010005 /* set timeout for vfs notification */ -#define VFS_CTL_NOLOCKS 0x00010006 /* disable file locking */ -#define VFS_CTL_SADDR 0x00010007 /* get server address */ -#define VFS_CTL_DISC 0x00010008 /* server disconnected */ +#define VFS_CTL_STATFS 0x00010001 /* statfs */ +#define VFS_CTL_UMOUNT 0x00010002 /* unmount */ +#define VFS_CTL_QUERY 0x00010003 /* anything wrong? (vfsquery) */ +#define VFS_CTL_NEWADDR 0x00010004 /* reconnect to new address */ +#define VFS_CTL_TIMEO 0x00010005 /* set timeout for vfs notification */ +#define VFS_CTL_NOLOCKS 0x00010006 /* disable file locking */ +#define VFS_CTL_SADDR 0x00010007 /* get server address */ +#define VFS_CTL_DISC 0x00010008 /* server disconnected */ #define VFS_CTL_SERVERINFO 0x00010009 /* information about fs server */ -#define VFS_CTL_NSTATUS 0x0001000A /* netfs mount status */ +#define VFS_CTL_NSTATUS 0x0001000A /* netfs mount status */ struct vfsquery { - u_int32_t vq_flags; - u_int32_t vq_spare[31]; + u_int32_t vq_flags; + u_int32_t vq_spare[31]; }; struct vfs_server { - int32_t vs_minutes; /* minutes until server goes down. */ - u_int8_t vs_server_name[MAXHOSTNAMELEN*3]; /* UTF8 server name to display (null terminated) */ + int32_t vs_minutes; /* minutes until server goes down. */ + u_int8_t vs_server_name[MAXHOSTNAMELEN * 3]; /* UTF8 server name to display (null terminated) */ }; /* * NetFS mount status - returned by VFS_CTL_NSTATUS */ struct netfs_status { - u_int32_t ns_status; // Current status of mount (vfsquery flags) - char ns_mountopts[512]; // Significant mount options - uint32_t ns_waittime; // Time waiting for reply (sec) - uint32_t ns_threadcount; // Number of threads blocked on network calls - uint64_t ns_threadids[0]; // Thread IDs of those blocked threads + u_int32_t ns_status; // Current status of mount (vfsquery flags) + char ns_mountopts[512]; // Significant mount options + uint32_t ns_waittime; // Time waiting for reply (sec) + uint32_t ns_threadcount; // Number of threads blocked on network calls + uint64_t ns_threadids[0]; // Thread IDs of those blocked threads }; /* vfsquery flags */ -#define VQ_NOTRESP 0x0001 /* server down */ -#define VQ_NEEDAUTH 0x0002 /* server bad auth */ -#define VQ_LOWDISK 0x0004 /* we're low on space */ -#define VQ_MOUNT 0x0008 /* new filesystem arrived */ -#define VQ_UNMOUNT 0x0010 /* filesystem has left */ -#define VQ_DEAD 0x0020 /* filesystem is dead, needs force unmount */ -#define VQ_ASSIST 0x0040 /* filesystem needs assistance from external program */ -#define VQ_NOTRESPLOCK 0x0080 /* server lockd down */ -#define VQ_UPDATE 0x0100 /* filesystem information has changed */ -#define VQ_VERYLOWDISK 0x0200 /* file system has *very* little disk space left */ -#define VQ_SYNCEVENT 0x0400 /* a sync just happened (not set by kernel starting Mac OS X 10.9) */ +#define VQ_NOTRESP 0x0001 /* server down */ +#define VQ_NEEDAUTH 0x0002 /* server bad auth */ +#define VQ_LOWDISK 0x0004 /* we're low on space */ +#define VQ_MOUNT 0x0008 /* new filesystem arrived */ +#define VQ_UNMOUNT 0x0010 /* filesystem has left */ +#define VQ_DEAD 0x0020 /* filesystem is dead, needs force unmount */ +#define VQ_ASSIST 0x0040 /* filesystem needs assistance from external program */ +#define VQ_NOTRESPLOCK 0x0080 /* server lockd down */ +#define VQ_UPDATE 0x0100 /* filesystem information has changed */ +#define VQ_VERYLOWDISK 0x0200 /* file system has *very* little disk space left */ +#define VQ_SYNCEVENT 0x0400 /* a sync just happened (not set by kernel starting Mac OS X 10.9) */ #define VQ_SERVEREVENT 0x0800 /* server issued notification/warning */ -#define VQ_QUOTA 0x1000 /* a user quota has been hit */ -#define VQ_NEARLOWDISK 0x2000 /* Above lowdisk and below desired disk space */ -#define VQ_DESIRED_DISK 0x4000 /* the desired disk space */ -#define VQ_FLAG8000 0x8000 /* placeholder */ +#define VQ_QUOTA 0x1000 /* a user quota has been hit */ +#define VQ_NEARLOWDISK 0x2000 /* Above lowdisk and below desired disk space */ +#define VQ_DESIRED_DISK 0x4000 /* the desired disk space */ +#define VQ_FLAG8000 0x8000 /* placeholder */ #ifdef KERNEL /* Structure for setting device IO parameters per mount point */ struct vfsioattr { - u_int32_t io_maxreadcnt; /* Max. byte count for read */ - u_int32_t io_maxwritecnt; /* Max. byte count for write */ - u_int32_t io_segreadcnt; /* Max. segment count for read */ - u_int32_t io_segwritecnt; /* Max. segment count for write */ - u_int32_t io_maxsegreadsize; /* Max. segment read size */ - u_int32_t io_maxsegwritesize; /* Max. segment write size */ - u_int32_t io_devblocksize; /* the underlying device block size */ - u_int32_t io_flags; /* flags for underlying device */ + u_int32_t io_maxreadcnt; /* Max. byte count for read */ + u_int32_t io_maxwritecnt; /* Max. byte count for write */ + u_int32_t io_segreadcnt; /* Max. segment count for read */ + u_int32_t io_segwritecnt; /* Max. segment count for write */ + u_int32_t io_maxsegreadsize; /* Max. segment read size */ + u_int32_t io_maxsegwritesize; /* Max. segment write size */ + u_int32_t io_devblocksize; /* the underlying device block size */ + u_int32_t io_flags; /* flags for underlying device */ union { - int64_t io_max_swappin_available; + int64_t io_max_swappin_available; // On 32 bit architectures, we don't have any spare void *io_reserved[2]; }; }; -#define VFS_IOATTR_FLAGS_FUA 0x00000001 /* Write-through cache supported */ -#define VFS_IOATTR_FLAGS_UNMAP 0x00000002 /* Unmap (trim) supported */ -#define VFS_IOATTR_FLAGS_SWAPPIN_SUPPORTED 0x00000010 /* Pinning swap file supported */ +#define VFS_IOATTR_FLAGS_FUA 0x00000001 /* Write-through cache supported */ +#define VFS_IOATTR_FLAGS_UNMAP 0x00000002 /* Unmap (trim) supported */ +#define VFS_IOATTR_FLAGS_SWAPPIN_SUPPORTED 0x00000010 /* Pinning swap file supported */ /* * Filesystem Registration information */ -#define VFS_TBLTHREADSAFE 0x0001 /* Only threadsafe filesystems are supported */ -#define VFS_TBLFSNODELOCK 0x0002 /* Only threadsafe filesystems are supported */ -#define VFS_TBLNOTYPENUM 0x0008 -#define VFS_TBLLOCALVOL 0x0010 -#define VFS_TBL64BITREADY 0x0020 -#define VFS_TBLNATIVEXATTR 0x0040 -#define VFS_TBLDIRLINKS 0x0080 -#define VFS_TBLUNMOUNT_PREFLIGHT 0x0100 /* does a preflight check before unmounting */ -#define VFS_TBLGENERICMNTARGS 0x0200 /* force generic mount args for local fs */ -#define VFS_TBLREADDIR_EXTENDED 0x0400 /* fs supports VNODE_READDIR_EXTENDED */ -#define VFS_TBLNOMACLABEL 0x1000 -#define VFS_TBLVNOP_PAGEINV2 0x2000 -#define VFS_TBLVNOP_PAGEOUTV2 0x4000 -#define VFS_TBLVNOP_NOUPDATEID_RENAME 0x8000 /* vfs should not call vnode_update_ident on rename */ -#define VFS_TBLVNOP_SECLUDE_RENAME 0x10000 -#define VFS_TBLCANMOUNTROOT 0x20000 +#define VFS_TBLTHREADSAFE 0x0001 /* Only threadsafe filesystems are supported */ +#define VFS_TBLFSNODELOCK 0x0002 /* Only threadsafe filesystems are supported */ +#define VFS_TBLNOTYPENUM 0x0008 +#define VFS_TBLLOCALVOL 0x0010 +#define VFS_TBL64BITREADY 0x0020 +#define VFS_TBLNATIVEXATTR 0x0040 +#define VFS_TBLDIRLINKS 0x0080 +#define VFS_TBLUNMOUNT_PREFLIGHT 0x0100 /* does a preflight check before unmounting */ +#define VFS_TBLGENERICMNTARGS 0x0200 /* force generic mount args for local fs */ +#define VFS_TBLREADDIR_EXTENDED 0x0400 /* fs supports VNODE_READDIR_EXTENDED */ +#define VFS_TBLNOMACLABEL 0x1000 +#define VFS_TBLVNOP_PAGEINV2 0x2000 +#define VFS_TBLVNOP_PAGEOUTV2 0x4000 +#define VFS_TBLVNOP_NOUPDATEID_RENAME 0x8000 /* vfs should not call vnode_update_ident on rename */ +#define VFS_TBLVNOP_SECLUDE_RENAME 0x10000 +#define VFS_TBLCANMOUNTROOT 0x20000 struct vfs_fsentry { - struct vfsops * vfe_vfsops; /* vfs operations */ - int vfe_vopcnt; /* # of vnodeopv_desc being registered (reg, spec, fifo ...) */ + struct vfsops * vfe_vfsops; /* vfs operations */ + int vfe_vopcnt; /* # of vnodeopv_desc being registered (reg, spec, fifo ...) */ struct vnodeopv_desc ** vfe_opvdescs; /* null terminated; */ - int vfe_fstypenum; /* historic filesystem type number */ - char vfe_fsname[MFSNAMELEN]; /* filesystem type name */ - uint32_t vfe_flags; /* defines the FS capabilities */ - void * vfe_reserv[2]; /* reserved for future use; set this to zero*/ - }; + int vfe_fstypenum; /* historic filesystem type number */ + char vfe_fsname[MFSNAMELEN]; /* filesystem type name */ + uint32_t vfe_flags; /* defines the FS capabilities */ + void * vfe_reserv[2];/* reserved for future use; set this to zero*/ +}; struct vfsops { /*! - @function vfs_mount - @abstract Perform filesystem-specific operations required for mounting. - @discussion Typical operations include setting the mount-specific data with vfs_setfsprivate(). - Note that if a mount call fails, the filesystem must clean up any state it has constructed, because - vfs-level mount code will not clean it up. - @param mp Mount structure for the newly mounted filesystem. - @param devvp Device that the filesystem is mounted from. - @param data Filesystem-specific data passed down from userspace. - @param context Context to authenticate for mount. - @return 0 for success, else an error code. Once success is returned, the filesystem should be ready to go active; - VFS will not ask again. + * @function vfs_mount + * @abstract Perform filesystem-specific operations required for mounting. + * @discussion Typical operations include setting the mount-specific data with vfs_setfsprivate(). + * Note that if a mount call fails, the filesystem must clean up any state it has constructed, because + * vfs-level mount code will not clean it up. + * @param mp Mount structure for the newly mounted filesystem. + * @param devvp Device that the filesystem is mounted from. + * @param data Filesystem-specific data passed down from userspace. + * @param context Context to authenticate for mount. + * @return 0 for success, else an error code. Once success is returned, the filesystem should be ready to go active; + * VFS will not ask again. */ int (*vfs_mount)(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context); - + /*! - @function vfs_start - @abstract Mark a mount as ready to be used. - @discussion After receiving this calldown, a filesystem will be hooked into the mount list and should expect - calls down from the VFS layer. - @param mp Mount structure being activated. - @param flags Unused. - @param context Context to authenticate for mount. - @return Return value is ignored. + * @function vfs_start + * @abstract Mark a mount as ready to be used. + * @discussion After receiving this calldown, a filesystem will be hooked into the mount list and should expect + * calls down from the VFS layer. + * @param mp Mount structure being activated. + * @param flags Unused. + * @param context Context to authenticate for mount. + * @return Return value is ignored. */ int (*vfs_start)(struct mount *mp, int flags, vfs_context_t context); - + /*! - @function vfs_unmount - @abstract Perform filesystem-specific cleanup as part of unmount. - @discussion If the unmount downcall succeeds, VFS considers itself authorized to destroy all - state related to the mount. - @param mp Mount structure to unmount. - @param mntflags MNT_FORCE indicates that we wish to unmount even if there are active vnodes. - @param context Context to authenticate for unmount. - @return 0 for success, else an error code. + * @function vfs_unmount + * @abstract Perform filesystem-specific cleanup as part of unmount. + * @discussion If the unmount downcall succeeds, VFS considers itself authorized to destroy all + * state related to the mount. + * @param mp Mount structure to unmount. + * @param mntflags MNT_FORCE indicates that we wish to unmount even if there are active vnodes. + * @param context Context to authenticate for unmount. + * @return 0 for success, else an error code. */ int (*vfs_unmount)(struct mount *mp, int mntflags, vfs_context_t context); - + /*! - @function vfs_root - @abstract Get the root vnode of a filesystem. - @discussion Upon success, should return with an iocount held on the root vnode which the caller will - drop with vnode_put(). - @param mp Mount for which to get the root. - @param vpp Destination for root vnode. - @param context Context to authenticate for getting the root. - @return 0 for success, else an error code. + * @function vfs_root + * @abstract Get the root vnode of a filesystem. + * @discussion Upon success, should return with an iocount held on the root vnode which the caller will + * drop with vnode_put(). + * @param mp Mount for which to get the root. + * @param vpp Destination for root vnode. + * @param context Context to authenticate for getting the root. + * @return 0 for success, else an error code. */ int (*vfs_root)(struct mount *mp, struct vnode **vpp, vfs_context_t context); - + /*! - @function vfs_quotactl - @abstract Manipulate quotas for a volume. - @param mp Mount for which to manipulate quotas. - @param cmds Detailed in "quotactl" manual page. - @param uid Detailed in "quotactl" manual page. - @param arg Detailed in "quotactl" manual page. - @param context Context to authenticate for changing quotas. - @return 0 for success, else an error code. + * @function vfs_quotactl + * @abstract Manipulate quotas for a volume. + * @param mp Mount for which to manipulate quotas. + * @param cmds Detailed in "quotactl" manual page. + * @param uid Detailed in "quotactl" manual page. + * @param arg Detailed in "quotactl" manual page. + * @param context Context to authenticate for changing quotas. + * @return 0 for success, else an error code. */ int (*vfs_quotactl)(struct mount *mp, int cmds, uid_t uid, caddr_t arg, vfs_context_t context); /*! - @function vfs_getattr - @abstract Get filesystem attributes. - @discussion See VFSATTR_RETURN, VFSATTR_ACTIVE, VFSATTR_SET_SUPPORTED, VFSATTR_WANTED macros. - @param mp Mount for which to get parameters. - @param vfa Container for specifying which attributes are desired and which attributes the filesystem - supports, as well as for returning results. - @param ctx Context to authenticate for getting filesystem attributes. - @return 0 for success, else an error code. + * @function vfs_getattr + * @abstract Get filesystem attributes. + * @discussion See VFSATTR_RETURN, VFSATTR_ACTIVE, VFSATTR_SET_SUPPORTED, VFSATTR_WANTED macros. + * @param mp Mount for which to get parameters. + * @param vfa Container for specifying which attributes are desired and which attributes the filesystem + * supports, as well as for returning results. + * @param ctx Context to authenticate for getting filesystem attributes. + * @return 0 for success, else an error code. */ int (*vfs_getattr)(struct mount *mp, struct vfs_attr *, vfs_context_t context); /* int (*vfs_statfs)(struct mount *mp, struct vfsstatfs *sbp, vfs_context_t context);*/ /*! - @function vfs_sync - @abstract Flush all filesystem data to backing store. - @discussion vfs_sync will be called as part of the sync() system call and during unmount. - @param mp Mountpoint to sync. - @param waitfor MNT_WAIT: flush synchronously, waiting for all data to be written before returning. MNT_NOWAIT: start I/O but do not wait for it. - @param ctx Context to authenticate for the sync. - @return 0 for success, else an error code. + * @function vfs_sync + * @abstract Flush all filesystem data to backing store. + * @discussion vfs_sync will be called as part of the sync() system call and during unmount. + * @param mp Mountpoint to sync. + * @param waitfor MNT_WAIT: flush synchronously, waiting for all data to be written before returning. MNT_NOWAIT: start I/O but do not wait for it. + * @param ctx Context to authenticate for the sync. + * @return 0 for success, else an error code. */ int (*vfs_sync)(struct mount *mp, int waitfor, vfs_context_t context); - + /*! - @function vfs_vget - @abstract Get a vnode by file id (inode number). - @discussion This routine is chiefly used to build paths to vnodes. Result should be turned with an iocount that the - caller will drop with vnode_put(). - @param mp Mount against which to look up inode number. - @param ino File ID for desired file, as found through a readdir. - @param vpp Destination for vnode. - @return 0 for success, else an error code. + * @function vfs_vget + * @abstract Get a vnode by file id (inode number). + * @discussion This routine is chiefly used to build paths to vnodes. Result should be turned with an iocount that the + * caller will drop with vnode_put(). + * @param mp Mount against which to look up inode number. + * @param ino File ID for desired file, as found through a readdir. + * @param vpp Destination for vnode. + * @return 0 for success, else an error code. */ int (*vfs_vget)(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context); - + /*! - @function vfs_fhtovp - @abstract Get the vnode corresponding to a file handle. - @discussion Filesystems can return handles to files which are independent of their (transient) vnode identities. - vfs_thtovp converts that persistent handle back to a vnode. The vnode should be returned with an iocount which - the caller will drop with vnode_put(). - @param mp Mount against which to look up file handle. - @param fhlen Size of file handle structure, as returned by vfs_vptofh. - @param fhp Pointer to handle. - @param vpp Destination for vnode. - @param ctx Context against which to authenticate the file-handle conversion. - @return 0 for success, else an error code. + * @function vfs_fhtovp + * @abstract Get the vnode corresponding to a file handle. + * @discussion Filesystems can return handles to files which are independent of their (transient) vnode identities. + * vfs_thtovp converts that persistent handle back to a vnode. The vnode should be returned with an iocount which + * the caller will drop with vnode_put(). + * @param mp Mount against which to look up file handle. + * @param fhlen Size of file handle structure, as returned by vfs_vptofh. + * @param fhp Pointer to handle. + * @param vpp Destination for vnode. + * @param ctx Context against which to authenticate the file-handle conversion. + * @return 0 for success, else an error code. */ int (*vfs_fhtovp)(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, - vfs_context_t context); + vfs_context_t context); /*! - @function vfs_vptofh - @abstract Get a persistent handle corresponding to a vnode. - @param mp Mount against which to convert the vnode to a handle. - @param fhlen Size of buffer provided for handle; set to size of actual handle returned. - @param fhp Pointer to buffer in which to place handle data. - @param ctx Context against which to authenticate the file-handle request. - @return 0 for success, else an error code. + * @function vfs_vptofh + * @abstract Get a persistent handle corresponding to a vnode. + * @param mp Mount against which to convert the vnode to a handle. + * @param fhlen Size of buffer provided for handle; set to size of actual handle returned. + * @param fhp Pointer to buffer in which to place handle data. + * @param ctx Context against which to authenticate the file-handle request. + * @return 0 for success, else an error code. */ int (*vfs_vptofh)(struct vnode *vp, int *fhlen, unsigned char *fhp, vfs_context_t context); /*! - @function vfs_init - @abstract Prepare a filesystem for having instances mounted. - @discussion This routine is called once, before any particular instance of a filesystem - is mounted; it allows the filesystem to initialize whatever global data structures - are shared across all mounts. If this returns successfully, a filesystem should be ready to have - instances mounted. - @param vfsconf Configuration information. Currently, the only useful data are the filesystem name, - typenum, and flags. The flags field will be either 0 or MNT_LOCAL. Many filesystems ignore this - parameter. - @return 0 for success, else an error code. + * @function vfs_init + * @abstract Prepare a filesystem for having instances mounted. + * @discussion This routine is called once, before any particular instance of a filesystem + * is mounted; it allows the filesystem to initialize whatever global data structures + * are shared across all mounts. If this returns successfully, a filesystem should be ready to have + * instances mounted. + * @param vfsconf Configuration information. Currently, the only useful data are the filesystem name, + * typenum, and flags. The flags field will be either 0 or MNT_LOCAL. Many filesystems ignore this + * parameter. + * @return 0 for success, else an error code. */ int (*vfs_init)(struct vfsconf *); - + /*! - @function vfs_sysctl - @abstract Broad interface for querying and controlling filesystem. - @discussion VFS defines VFS_CTL_QUERY as a generic status request which is answered - with the VQ_* macros in a "struct vfsquery." - A filesystem may also define implementation-specific commands. See "man 3 sysctl" - for the meaning of sysctl parameters. - @param context Context against which to authenticate command. - @return 0 for success, else an error code. + * @function vfs_sysctl + * @abstract Broad interface for querying and controlling filesystem. + * @discussion VFS defines VFS_CTL_QUERY as a generic status request which is answered + * with the VQ_* macros in a "struct vfsquery." + * A filesystem may also define implementation-specific commands. See "man 3 sysctl" + * for the meaning of sysctl parameters. + * @param context Context against which to authenticate command. + * @return 0 for success, else an error code. */ int (*vfs_sysctl)(int *, u_int, user_addr_t, size_t *, user_addr_t, size_t, vfs_context_t context); /*! - @function vfs_setattr - @abstract Set filesystem attributes. - @discussion The other side of the vfs_getattr coin. Currently only called to set volume name. - @param mp Mount on which to set attributes. - @param vfa VFS attribute structure containing requested attributes to set and their values. Currently - will only be called with f_vol_name set. - @param context Context against which to authenticate attribute change. - @return 0 for success, else an error code. + * @function vfs_setattr + * @abstract Set filesystem attributes. + * @discussion The other side of the vfs_getattr coin. Currently only called to set volume name. + * @param mp Mount on which to set attributes. + * @param vfa VFS attribute structure containing requested attributes to set and their values. Currently + * will only be called with f_vol_name set. + * @param context Context against which to authenticate attribute change. + * @return 0 for success, else an error code. */ int (*vfs_setattr)(struct mount *mp, struct vfs_attr *, vfs_context_t context); /*! - @function vfs_ioctl - @abstract File system control operations. - @discussion Unlike vfs_sysctl, this is specific to a particular volume. - @param mp The mount to execute the command on. - @param command Identifier for action to take. The command used here - should be in the same namespace as VNOP ioctl commands. - @param data Pointer to data; this can be an integer constant (of 32 bits - only) or an address to be read from or written to, depending on "command." - If it is an address, it is valid and resides in the kernel; callers of - VFS_IOCTL() are responsible for copying to and from userland. - @param flags Reserved for future use, set to zero - @param ctx Context against which to authenticate ioctl request. - @return 0 for success, else an error code. + * @function vfs_ioctl + * @abstract File system control operations. + * @discussion Unlike vfs_sysctl, this is specific to a particular volume. + * @param mp The mount to execute the command on. + * @param command Identifier for action to take. The command used here + * should be in the same namespace as VNOP ioctl commands. + * @param data Pointer to data; this can be an integer constant (of 32 bits + * only) or an address to be read from or written to, depending on "command." + * If it is an address, it is valid and resides in the kernel; callers of + * VFS_IOCTL() are responsible for copying to and from userland. + * @param flags Reserved for future use, set to zero + * @param ctx Context against which to authenticate ioctl request. + * @return 0 for success, else an error code. */ int (*vfs_ioctl)(struct mount *mp, u_long command, caddr_t data, - int flags, vfs_context_t context); + int flags, vfs_context_t context); /*! - @function vfs_vget_snapdir - @abstract Get the vnode for the snapshot directory of a filesystem. - @discussion Upon success, should return with an iocount held on the root vnode which the caller will - drop with vnode_put(). - @param mp Mount for which to get the root. - @param vpp Destination for snapshot directory vnode. - @param context Context to authenticate for getting the snapshot directory. - @return 0 for success, else an error code. + * @function vfs_vget_snapdir + * @abstract Get the vnode for the snapshot directory of a filesystem. + * @discussion Upon success, should return with an iocount held on the root vnode which the caller will + * drop with vnode_put(). + * @param mp Mount for which to get the root. + * @param vpp Destination for snapshot directory vnode. + * @param context Context to authenticate for getting the snapshot directory. + * @return 0 for success, else an error code. */ int (*vfs_vget_snapdir)(struct mount *mp, struct vnode **vpp, vfs_context_t context); void *vfs_reserved5; @@ -763,13 +763,13 @@ struct fs_snapshot_mount_args { #define VFSIOC_MOUNT_SNAPSHOT _IOW('V', 1, struct fs_snapshot_mount_args) struct fs_snapshot_revert_args { - struct componentname *sr_cnp; + struct componentname *sr_cnp; }; #define VFSIOC_REVERT_SNAPSHOT _IOW('V', 2, struct fs_snapshot_revert_args) struct fs_snapshot_root_args { - struct componentname *sr_cnp; -}; + struct componentname *sr_cnp; +}; #define VFSIOC_ROOT_SNAPSHOT _IOW('V', 3, struct fs_snapshot_root_args) #endif /* KERNEL */ @@ -778,17 +778,17 @@ struct fs_snapshot_root_args { * flags passed into vfs_iterate */ #ifdef PRIVATE -#define VFS_ITERATE_TAIL_FIRST (1 << 0) -#define VFS_ITERATE_CB_DROPREF (1 << 1) // Callback will drop the iterref +#define VFS_ITERATE_TAIL_FIRST (1 << 0) +#define VFS_ITERATE_CB_DROPREF (1 << 1) // Callback will drop the iterref #endif /* PRIVATE */ /* * return values from callback */ -#define VFS_RETURNED 0 /* done with vnode, reference can be dropped */ -#define VFS_RETURNED_DONE 1 /* done with vnode, reference can be dropped, terminate iteration */ -#define VFS_CLAIMED 2 /* don't drop reference */ -#define VFS_CLAIMED_DONE 3 /* don't drop reference, terminate iteration */ +#define VFS_RETURNED 0 /* done with vnode, reference can be dropped */ +#define VFS_RETURNED_DONE 1 /* done with vnode, reference can be dropped, terminate iteration */ +#define VFS_CLAIMED 2 /* don't drop reference */ +#define VFS_CLAIMED_DONE 3 /* don't drop reference, terminate iteration */ __BEGIN_DECLS @@ -805,7 +805,7 @@ extern int VFS_VGET(mount_t, ino64_t, vnode_t *, vfs_context_t); extern int VFS_FHTOVP(mount_t, int, unsigned char *, vnode_t *, vfs_context_t); extern int VFS_VPTOFH(vnode_t, int *, unsigned char *, vfs_context_t); extern int VFS_IOCTL(mount_t mp, u_long command, caddr_t data, - int flags, vfs_context_t context); + int flags, vfs_context_t context); extern int VFS_VGET_SNAPDIR(mount_t, vnode_t *, vfs_context_t); #endif /* BSD_KERNEL_PRIVATE */ /* @@ -813,252 +813,252 @@ extern int VFS_VGET_SNAPDIR(mount_t, vnode_t *, vfs_context_t); */ /*! - @function vfs_fsadd - @abstract Register a filesystem with VFS. - @discussion Typically called by a filesystem Kernel Extension when it is loaded. - @param vfe Filesystem information: table of vfs operations, list of vnode operation tables, - filesystem type number (can be omitted with VFS_TBLNOTYPENUM flag), name, flags. - @param handle Opaque handle which will be passed to vfs_fsremove. - @return 0 for success, else an error code. - */ + * @function vfs_fsadd + * @abstract Register a filesystem with VFS. + * @discussion Typically called by a filesystem Kernel Extension when it is loaded. + * @param vfe Filesystem information: table of vfs operations, list of vnode operation tables, + * filesystem type number (can be omitted with VFS_TBLNOTYPENUM flag), name, flags. + * @param handle Opaque handle which will be passed to vfs_fsremove. + * @return 0 for success, else an error code. + */ int vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle); /*! - @function vfs_fsremove - @abstract Unregister a filesystem with VFS. - @discussion Typically called by a filesystem Kernel Extension when it is unloaded. - @param handle Handle which was returned by vfs_fsadd. - @return 0 for success, else an error code. - */ + * @function vfs_fsremove + * @abstract Unregister a filesystem with VFS. + * @discussion Typically called by a filesystem Kernel Extension when it is unloaded. + * @param handle Handle which was returned by vfs_fsadd. + * @return 0 for success, else an error code. + */ int vfs_fsremove(vfstable_t handle); /*! - @function vfs_iterate - @abstract Iterate over all mountpoints with a callback. Used, for example, by sync(). - @param flags Unused. - @param callout Function which takes a mount and arbitrary passed-in "arg," and returns one of VFS_RETURNED_DONE or VFS_CLAIMED_DONE: end - iteration and return success. VFS_RETURNED or VFS_CLAIMED: continue iterating. Anything else: continue iterating. - @param arg Arbitrary data to pass to callback. - @return 0 for success, else an error code. - */ -int vfs_iterate(int flags, int (*callout)(struct mount *, void *), void *arg); + * @function vfs_iterate + * @abstract Iterate over all mountpoints with a callback. Used, for example, by sync(). + * @param flags Unused. + * @param callout Function which takes a mount and arbitrary passed-in "arg," and returns one of VFS_RETURNED_DONE or VFS_CLAIMED_DONE: end + * iteration and return success. VFS_RETURNED or VFS_CLAIMED: continue iterating. Anything else: continue iterating. + * @param arg Arbitrary data to pass to callback. + * @return 0 for success, else an error code. + */ +int vfs_iterate(int flags, int (*callout)(struct mount *, void *), void *arg); /*! - @function vfs_init_io_attributes - @abstract Set I/O attributes on a mountpoint based on device properties. - @param devvp Block device vnode from which a filesystem is being mounted. - @param mp Mountpoint whose I/O parameters to initialize. - @return 0 for success, else an error code. - */ -int vfs_init_io_attributes(vnode_t devvp, mount_t mp); + * @function vfs_init_io_attributes + * @abstract Set I/O attributes on a mountpoint based on device properties. + * @param devvp Block device vnode from which a filesystem is being mounted. + * @param mp Mountpoint whose I/O parameters to initialize. + * @return 0 for success, else an error code. + */ +int vfs_init_io_attributes(vnode_t devvp, mount_t mp); /*! - @function vfs_flags - @abstract Retrieve mount flags. - @discussion Results will be in the bitwise "OR" of MNT_VISFLAGMASK and MNT_CMDFLAGS. - @param mp Mount whose flags to grab. - @return Flags. - */ + * @function vfs_flags + * @abstract Retrieve mount flags. + * @discussion Results will be in the bitwise "OR" of MNT_VISFLAGMASK and MNT_CMDFLAGS. + * @param mp Mount whose flags to grab. + * @return Flags. + */ uint64_t vfs_flags(mount_t mp); /*! - @function vfs_setflags - @abstract Set flags on a mount. - @discussion Sets mount flags to the bitwise "OR" of their current value and the specified bits. Often - used by a filesystem as part of the mount process. - @param mp Mount whose flags to set. - @param flags Flags to activate. Must be in the bitwise "OR" of MNT_VISFLAGMASK and MNT_CMDFLAGS. - */ -void vfs_setflags(mount_t mp, uint64_t flags); + * @function vfs_setflags + * @abstract Set flags on a mount. + * @discussion Sets mount flags to the bitwise "OR" of their current value and the specified bits. Often + * used by a filesystem as part of the mount process. + * @param mp Mount whose flags to set. + * @param flags Flags to activate. Must be in the bitwise "OR" of MNT_VISFLAGMASK and MNT_CMDFLAGS. + */ +void vfs_setflags(mount_t mp, uint64_t flags); /*! - @function vfs_clearflags - @abstract Clear flags on a mount. - @discussion Sets mount flags to the bitwise "AND" of their current value and the complement of the specified bits. - @param mp Mount whose flags to set. - @param flags Flags to deactivate. Must be in the bitwise "OR" of MNT_VISFLAGMASK and MNT_CMDFLAGS. - */ -void vfs_clearflags(mount_t mp, uint64_t flags); + * @function vfs_clearflags + * @abstract Clear flags on a mount. + * @discussion Sets mount flags to the bitwise "AND" of their current value and the complement of the specified bits. + * @param mp Mount whose flags to set. + * @param flags Flags to deactivate. Must be in the bitwise "OR" of MNT_VISFLAGMASK and MNT_CMDFLAGS. + */ +void vfs_clearflags(mount_t mp, uint64_t flags); /*! - @function vfs_issynchronous - @abstract Determine if writes to a filesystem occur synchronously. - @param mp Mount to test. - @return Nonzero if writes occur synchronously, else 0. - */ -int vfs_issynchronous(mount_t mp); + * @function vfs_issynchronous + * @abstract Determine if writes to a filesystem occur synchronously. + * @param mp Mount to test. + * @return Nonzero if writes occur synchronously, else 0. + */ +int vfs_issynchronous(mount_t mp); /*! - @function vfs_iswriteupgrade - @abstract Determine if a filesystem is mounted read-only but a request has been made to upgrade - to read-write. - @param mp Mount to test. - @return Nonzero if a request has been made to update from read-only to read-write, else 0. - */ -int vfs_iswriteupgrade(mount_t mp); + * @function vfs_iswriteupgrade + * @abstract Determine if a filesystem is mounted read-only but a request has been made to upgrade + * to read-write. + * @param mp Mount to test. + * @return Nonzero if a request has been made to update from read-only to read-write, else 0. + */ +int vfs_iswriteupgrade(mount_t mp); /*! - @function vfs_isupdate - @abstract Determine if a mount update is in progress. - @param mp Mount to test. - @return Nonzero if a mount update is in progress, 0 otherwise. - */ -int vfs_isupdate(mount_t mp); + * @function vfs_isupdate + * @abstract Determine if a mount update is in progress. + * @param mp Mount to test. + * @return Nonzero if a mount update is in progress, 0 otherwise. + */ +int vfs_isupdate(mount_t mp); /*! - @function vfs_isreload - @abstract Determine if a reload of filesystem data is in progress. This can only be the case - for a read-only filesystem; all data is brought in from secondary storage. - @param mp Mount to test. - @return Nonzero if a request has been made to reload data, else 0. - */ -int vfs_isreload(mount_t mp); + * @function vfs_isreload + * @abstract Determine if a reload of filesystem data is in progress. This can only be the case + * for a read-only filesystem; all data is brought in from secondary storage. + * @param mp Mount to test. + * @return Nonzero if a request has been made to reload data, else 0. + */ +int vfs_isreload(mount_t mp); /*! - @function vfs_isforce - @abstract Determine if a forced unmount is in progress. - @discussion A forced unmount invalidates open files. - @param mp Mount to test. - @return Nonzero if a request has been made to forcibly unmount, else 0. - */ -int vfs_isforce(mount_t mp); + * @function vfs_isforce + * @abstract Determine if a forced unmount is in progress. + * @discussion A forced unmount invalidates open files. + * @param mp Mount to test. + * @return Nonzero if a request has been made to forcibly unmount, else 0. + */ +int vfs_isforce(mount_t mp); /*! - @function vfs_isunmount - @abstract Determine if an unmount is in progress. - @discussion This is an unsynchronized snapshot of the mount state. It should only be called - if the mount is known to be valid, e.g. there are known to be live files on that volume. - @param mp Mount to test. - @return Nonzero if an unmount is in progress, else zero. - */ -int vfs_isunmount(mount_t mp); + * @function vfs_isunmount + * @abstract Determine if an unmount is in progress. + * @discussion This is an unsynchronized snapshot of the mount state. It should only be called + * if the mount is known to be valid, e.g. there are known to be live files on that volume. + * @param mp Mount to test. + * @return Nonzero if an unmount is in progress, else zero. + */ +int vfs_isunmount(mount_t mp); /*! - @function vfs_isrdonly - @abstract Determine if a filesystem is mounted read-only. - @param mp Mount to test. - @return Nonzero if filesystem is mounted read-only, else 0. - */ -int vfs_isrdonly(mount_t mp); + * @function vfs_isrdonly + * @abstract Determine if a filesystem is mounted read-only. + * @param mp Mount to test. + * @return Nonzero if filesystem is mounted read-only, else 0. + */ +int vfs_isrdonly(mount_t mp); /*! - @function vfs_isrdwr - @abstract Determine if a filesystem is mounted with writes enabled. - @param mp Mount to test. - @return Nonzero if filesystem is mounted read-write, else 0. - */ -int vfs_isrdwr(mount_t mp); + * @function vfs_isrdwr + * @abstract Determine if a filesystem is mounted with writes enabled. + * @param mp Mount to test. + * @return Nonzero if filesystem is mounted read-write, else 0. + */ +int vfs_isrdwr(mount_t mp); /*! - @function vfs_authopaque - @abstract Determine if a filesystem's authorization decisions occur remotely. - @param mp Mount to test. - @return Nonzero if filesystem authorization is controlled remotely, else 0. - */ -int vfs_authopaque(mount_t mp); + * @function vfs_authopaque + * @abstract Determine if a filesystem's authorization decisions occur remotely. + * @param mp Mount to test. + * @return Nonzero if filesystem authorization is controlled remotely, else 0. + */ +int vfs_authopaque(mount_t mp); /*! - @function vfs_authopaqueaccess - @abstract Check if a filesystem is marked as having reliable remote VNOP_ACCESS support. - @param mp Mount to test. - @return Nonzero if VNOP_ACCESS is supported remotely, else 0. - */ -int vfs_authopaqueaccess(mount_t mp); + * @function vfs_authopaqueaccess + * @abstract Check if a filesystem is marked as having reliable remote VNOP_ACCESS support. + * @param mp Mount to test. + * @return Nonzero if VNOP_ACCESS is supported remotely, else 0. + */ +int vfs_authopaqueaccess(mount_t mp); /*! - @function vfs_setauthopaque - @abstract Mark a filesystem as having authorization decisions controlled remotely. - @param mp Mount to mark. - */ -void vfs_setauthopaque(mount_t mp); + * @function vfs_setauthopaque + * @abstract Mark a filesystem as having authorization decisions controlled remotely. + * @param mp Mount to mark. + */ +void vfs_setauthopaque(mount_t mp); /*! - @function vfs_setauthopaqueaccess - @abstract Mark a filesystem as having remote VNOP_ACCESS support. - @param mp Mount to mark. - */ -void vfs_setauthopaqueaccess(mount_t mp); + * @function vfs_setauthopaqueaccess + * @abstract Mark a filesystem as having remote VNOP_ACCESS support. + * @param mp Mount to mark. + */ +void vfs_setauthopaqueaccess(mount_t mp); /*! - @function vfs_clearauthopaque - @abstract Mark a filesystem as not having remote authorization decisions. - @param mp Mount to mark. - */ -void vfs_clearauthopaque(mount_t mp); + * @function vfs_clearauthopaque + * @abstract Mark a filesystem as not having remote authorization decisions. + * @param mp Mount to mark. + */ +void vfs_clearauthopaque(mount_t mp); /*! - @function vfs_clearauthopaque - @abstract Mark a filesystem as not having remote VNOP_ACCESS support. - @param mp Mount to mark. - */ -void vfs_clearauthopaqueaccess(mount_t mp); + * @function vfs_clearauthopaque + * @abstract Mark a filesystem as not having remote VNOP_ACCESS support. + * @param mp Mount to mark. + */ +void vfs_clearauthopaqueaccess(mount_t mp); /*! - @function vfs_setextendedsecurity - @abstract Mark a filesystem as supporting security controls beyond POSIX permissions. - @discussion Specific controls include ACLs, file owner UUIDs, and group UUIDs. - @param mp Mount to test. - */ -void vfs_setextendedsecurity(mount_t mp); + * @function vfs_setextendedsecurity + * @abstract Mark a filesystem as supporting security controls beyond POSIX permissions. + * @discussion Specific controls include ACLs, file owner UUIDs, and group UUIDs. + * @param mp Mount to test. + */ +void vfs_setextendedsecurity(mount_t mp); /*! - @function vfs_clearextendedsecurity - @abstract Mark a filesystem as NOT supporting security controls beyond POSIX permissions. - @discussion Specific controls include ACLs, file owner UUIDs, and group UUIDs. - @param mp Mount to test. - */ -void vfs_clearextendedsecurity(mount_t mp); + * @function vfs_clearextendedsecurity + * @abstract Mark a filesystem as NOT supporting security controls beyond POSIX permissions. + * @discussion Specific controls include ACLs, file owner UUIDs, and group UUIDs. + * @param mp Mount to test. + */ +void vfs_clearextendedsecurity(mount_t mp); /*! - @function vfs_setnoswap - @abstract Mark a filesystem as unable to use swap files. - @param mp Mount to mark. - */ -void vfs_setnoswap(mount_t mp); + * @function vfs_setnoswap + * @abstract Mark a filesystem as unable to use swap files. + * @param mp Mount to mark. + */ +void vfs_setnoswap(mount_t mp); /*! - @function vfs_clearnoswap - @abstract Mark a filesystem as capable of using swap files. - @param mp Mount to mark. - */ -void vfs_clearnoswap(mount_t mp); + * @function vfs_clearnoswap + * @abstract Mark a filesystem as capable of using swap files. + * @param mp Mount to mark. + */ +void vfs_clearnoswap(mount_t mp); /*! - @function vfs_setlocklocal - @abstract Mark a filesystem as using VFS-level advisory locking support. - @discussion Advisory locking operations will not call down to the filesystem if this flag is set. - @param mp Mount to mark. - */ -void vfs_setlocklocal(mount_t mp); + * @function vfs_setlocklocal + * @abstract Mark a filesystem as using VFS-level advisory locking support. + * @discussion Advisory locking operations will not call down to the filesystem if this flag is set. + * @param mp Mount to mark. + */ +void vfs_setlocklocal(mount_t mp); /*! - @function vfs_authcache_ttl - @abstract Determine the time-to-live of cached authorized credentials for files in this filesystem. - @discussion If a filesystem is set to allow caching credentials, the VFS layer can authorize - previously-authorized actions from the same vfs_context_t without calling down to the filesystem (though - it will not deny based on the cache). - @param mp Mount for which to check cache lifetime. - @return Cache lifetime in seconds. CACHED_RIGHT_INFINITE_TTL indicates that credentials never expire. - */ -int vfs_authcache_ttl(mount_t mp); + * @function vfs_authcache_ttl + * @abstract Determine the time-to-live of cached authorized credentials for files in this filesystem. + * @discussion If a filesystem is set to allow caching credentials, the VFS layer can authorize + * previously-authorized actions from the same vfs_context_t without calling down to the filesystem (though + * it will not deny based on the cache). + * @param mp Mount for which to check cache lifetime. + * @return Cache lifetime in seconds. CACHED_RIGHT_INFINITE_TTL indicates that credentials never expire. + */ +int vfs_authcache_ttl(mount_t mp); /*! - @function vfs_setauthcache_ttl - @abstract Enable credential caching and set time-to-live of cached authorized credentials for files in this filesystem. - @discussion If a filesystem is set to allow caching credentials, the VFS layer can authorize - previously-authorized actions from the same vfs_context_t without calling down to the filesystem (though - it will not deny based on the cache). - @param mp Mount for which to set cache lifetime. - */ -void vfs_setauthcache_ttl(mount_t mp, int ttl); + * @function vfs_setauthcache_ttl + * @abstract Enable credential caching and set time-to-live of cached authorized credentials for files in this filesystem. + * @discussion If a filesystem is set to allow caching credentials, the VFS layer can authorize + * previously-authorized actions from the same vfs_context_t without calling down to the filesystem (though + * it will not deny based on the cache). + * @param mp Mount for which to set cache lifetime. + */ +void vfs_setauthcache_ttl(mount_t mp, int ttl); /*! - @function vfs_clearauthcache_ttl - @abstract Remove time-to-live controls for cached credentials on a filesytem. Filesystems with remote authorization - decisions (opaque) will still have KAUTH_VNODE_SEARCH rights cached for a default of CACHED_LOOKUP_RIGHT_TTL seconds. - @param mp Mount for which to clear cache lifetime. - */ -void vfs_clearauthcache_ttl(mount_t mp); + * @function vfs_clearauthcache_ttl + * @abstract Remove time-to-live controls for cached credentials on a filesytem. Filesystems with remote authorization + * decisions (opaque) will still have KAUTH_VNODE_SEARCH rights cached for a default of CACHED_LOOKUP_RIGHT_TTL seconds. + * @param mp Mount for which to clear cache lifetime. + */ +void vfs_clearauthcache_ttl(mount_t mp); /* * return value from vfs_cachedrights_ttl if @@ -1066,294 +1066,294 @@ void vfs_clearauthcache_ttl(mount_t mp); * is set in mnt_kern_flag.. it indicates * that no TTL is being applied to the vnode rights cache */ -#define CACHED_RIGHT_INFINITE_TTL ~0 +#define CACHED_RIGHT_INFINITE_TTL ~0 /*! - @function vfs_maxsymlen - @abstract Get the maximum length of a symbolic link on a filesystem. - @param mp Mount from which to get symlink length cap. - @return Max symlink length. - */ + * @function vfs_maxsymlen + * @abstract Get the maximum length of a symbolic link on a filesystem. + * @param mp Mount from which to get symlink length cap. + * @return Max symlink length. + */ uint32_t vfs_maxsymlen(mount_t mp); /*! - @function vfs_setmaxsymlen - @abstract Set the maximum length of a symbolic link on a filesystem. - @param mp Mount on which to set symlink length cap. - @param symlen Length to set. - */ -void vfs_setmaxsymlen(mount_t mp, uint32_t symlen); + * @function vfs_setmaxsymlen + * @abstract Set the maximum length of a symbolic link on a filesystem. + * @param mp Mount on which to set symlink length cap. + * @param symlen Length to set. + */ +void vfs_setmaxsymlen(mount_t mp, uint32_t symlen); /*! - @function vfs_fsprivate - @abstract Get filesystem-private mount data. - @discussion A filesystem generally has an internal mount structure which it attaches to the VFS-level mount structure - as part of the mounting process. - @param mp Mount for which to get private data. - @return Private data. - */ -void * vfs_fsprivate(mount_t mp); + * @function vfs_fsprivate + * @abstract Get filesystem-private mount data. + * @discussion A filesystem generally has an internal mount structure which it attaches to the VFS-level mount structure + * as part of the mounting process. + * @param mp Mount for which to get private data. + * @return Private data. + */ +void * vfs_fsprivate(mount_t mp); /*! - @function vfs_setfsprivate - @abstract Set filesystem-private mount data. - @discussion A filesystem generally has an internal mount structure which it attaches to the VFS-level mount structure - as part of the mounting process. - @param mp Mount for which to set private data. - */ -void vfs_setfsprivate(mount_t mp, void *mntdata); + * @function vfs_setfsprivate + * @abstract Set filesystem-private mount data. + * @discussion A filesystem generally has an internal mount structure which it attaches to the VFS-level mount structure + * as part of the mounting process. + * @param mp Mount for which to set private data. + */ +void vfs_setfsprivate(mount_t mp, void *mntdata); /*! - @function vfs_statfs - @abstract Get information about filesystem status. - @discussion Each filesystem has a struct vfsstatfs associated with it which is updated as events occur; this function - returns a pointer to it. Note that the data in the structure will continue to change over time and also that it may - be quite stale if vfs_update_vfsstat has not been called recently. - @param mp Mount for which to get vfsstatfs pointer. - @return Pointer to vfsstatfs. - */ -struct vfsstatfs * vfs_statfs(mount_t mp); -#define VFS_USER_EVENT 0 -#define VFS_KERNEL_EVENT 1 + * @function vfs_statfs + * @abstract Get information about filesystem status. + * @discussion Each filesystem has a struct vfsstatfs associated with it which is updated as events occur; this function + * returns a pointer to it. Note that the data in the structure will continue to change over time and also that it may + * be quite stale if vfs_update_vfsstat has not been called recently. + * @param mp Mount for which to get vfsstatfs pointer. + * @return Pointer to vfsstatfs. + */ +struct vfsstatfs * vfs_statfs(mount_t mp); +#define VFS_USER_EVENT 0 +#define VFS_KERNEL_EVENT 1 /*! - @function vfs_update_vfsstat - @abstract Update cached filesystem status information in the VFS mount structure. - @discussion Each filesystem has a struct vfsstatfs associated with it which is updated as events occur; this function - updates it so that the structure pointer returned by vfs_statfs() returns a pointer to fairly recent data. - @param mp Mount for which to update cached status information. - @param ctx Context to authenticate against for call down to filesystem. - @param eventtype VFS_USER_EVENT: need for update is driven by user-level request; perform additional authentication. - VFS_KERNEL_EVENT: need for update is driven by in-kernel events. Skip extra authentication. - @return 0 for success, or an error code for authentication failure or problem with call to filesystem to - request information. - */ -int vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, int eventtype); + * @function vfs_update_vfsstat + * @abstract Update cached filesystem status information in the VFS mount structure. + * @discussion Each filesystem has a struct vfsstatfs associated with it which is updated as events occur; this function + * updates it so that the structure pointer returned by vfs_statfs() returns a pointer to fairly recent data. + * @param mp Mount for which to update cached status information. + * @param ctx Context to authenticate against for call down to filesystem. + * @param eventtype VFS_USER_EVENT: need for update is driven by user-level request; perform additional authentication. + * VFS_KERNEL_EVENT: need for update is driven by in-kernel events. Skip extra authentication. + * @return 0 for success, or an error code for authentication failure or problem with call to filesystem to + * request information. + */ +int vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, int eventtype); /*! - @function vfs_typenum - @abstract Get (archaic) filesystem type number. - @discussion Filesystem type numbers are an old construct; most filesystems just get a number assigned based on - the order in which they are registered with the system. - @param mp Mount for which to get type number. - @return Type number. - */ -int vfs_typenum(mount_t mp); + * @function vfs_typenum + * @abstract Get (archaic) filesystem type number. + * @discussion Filesystem type numbers are an old construct; most filesystems just get a number assigned based on + * the order in which they are registered with the system. + * @param mp Mount for which to get type number. + * @return Type number. + */ +int vfs_typenum(mount_t mp); /*! - @function vfs_name - @abstract Copy filesystem name into a buffer. - @discussion Get filesystem name; this refers to the filesystem type of which a mount is an instantiation, - rather than a name specific to the mountpoint. - @param mp Mount for which to get name. - @param buffer Destination for name; length should be at least MFSNAMELEN. - */ -void vfs_name(mount_t mp, char *buffer); + * @function vfs_name + * @abstract Copy filesystem name into a buffer. + * @discussion Get filesystem name; this refers to the filesystem type of which a mount is an instantiation, + * rather than a name specific to the mountpoint. + * @param mp Mount for which to get name. + * @param buffer Destination for name; length should be at least MFSNAMELEN. + */ +void vfs_name(mount_t mp, char *buffer); /*! - @function vfs_devblocksize - @abstract Get the block size of the device underlying a mount. - @param mp Mount for which to get block size. - @return Block size. - */ -int vfs_devblocksize(mount_t mp); + * @function vfs_devblocksize + * @abstract Get the block size of the device underlying a mount. + * @param mp Mount for which to get block size. + * @return Block size. + */ +int vfs_devblocksize(mount_t mp); /*! - @function vfs_ioattr - @abstract Get I/O attributes associated with a mounpoint. - @param mp Mount for which to get attributes. If NULL, system defaults are filled into ioattrp. - @param ioattrp Destination for results. - */ -void vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp); + * @function vfs_ioattr + * @abstract Get I/O attributes associated with a mounpoint. + * @param mp Mount for which to get attributes. If NULL, system defaults are filled into ioattrp. + * @param ioattrp Destination for results. + */ +void vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp); /*! - @function vfs_setioattr - @abstract Set I/O attributes associated with a mounpoint. - @param mp Mount for which to set attributes. - @param ioattrp Structure containing I/O parameters; all fields must be filled in. - */ -void vfs_setioattr(mount_t mp, struct vfsioattr *ioattrp); + * @function vfs_setioattr + * @abstract Set I/O attributes associated with a mounpoint. + * @param mp Mount for which to set attributes. + * @param ioattrp Structure containing I/O parameters; all fields must be filled in. + */ +void vfs_setioattr(mount_t mp, struct vfsioattr *ioattrp); /*! - @function vfs_64bitready - @abstract Check if the filesystem associated with a mountpoint is marked ready for interaction with 64-bit user processes. - @param mp Mount to test. - @return Nonzero if filesystem is ready for 64-bit; 0 otherwise. - */ -int vfs_64bitready(mount_t mp); + * @function vfs_64bitready + * @abstract Check if the filesystem associated with a mountpoint is marked ready for interaction with 64-bit user processes. + * @param mp Mount to test. + * @return Nonzero if filesystem is ready for 64-bit; 0 otherwise. + */ +int vfs_64bitready(mount_t mp); #define LK_NOWAIT 1 /*! - @function vfs_busy - @abstract "Busy" a mountpoint. - @discussion vfs_busy() will "busy" a mountpoint, preventing unmounts from taking off, by taking its reader-writer lock - in a shared manner. If a mount is dead, - it will fail; if an unmount is in progress, depending on flags, it will either fail immediately or block - until the unmount completes (then failing if the unmount has succeeded, or potentially succeeding if unmounting failed). - A successful vfs_busy() must be followed by a vfs_unbusy() to release the lock on the mount. - @param mp Mount to busy. - @param flags LK_NOWAIT: fail with ENOENT if an unmount is in progress. - @return 0 for success, with a lock held; an error code otherwise, with no lock held. - */ -int vfs_busy(mount_t mp, int flags); + * @function vfs_busy + * @abstract "Busy" a mountpoint. + * @discussion vfs_busy() will "busy" a mountpoint, preventing unmounts from taking off, by taking its reader-writer lock + * in a shared manner. If a mount is dead, + * it will fail; if an unmount is in progress, depending on flags, it will either fail immediately or block + * until the unmount completes (then failing if the unmount has succeeded, or potentially succeeding if unmounting failed). + * A successful vfs_busy() must be followed by a vfs_unbusy() to release the lock on the mount. + * @param mp Mount to busy. + * @param flags LK_NOWAIT: fail with ENOENT if an unmount is in progress. + * @return 0 for success, with a lock held; an error code otherwise, with no lock held. + */ +int vfs_busy(mount_t mp, int flags); /*! - @function vfs_unbusy - @abstract "Unbusy" a mountpoint by releasing its read-write lock. - @discussion A successful vfs_busy() must be followed by a vfs_unbusy() to release the lock on the mount. - @param mp Mount to unbusy. - */ -void vfs_unbusy(mount_t mp); + * @function vfs_unbusy + * @abstract "Unbusy" a mountpoint by releasing its read-write lock. + * @discussion A successful vfs_busy() must be followed by a vfs_unbusy() to release the lock on the mount. + * @param mp Mount to unbusy. + */ +void vfs_unbusy(mount_t mp); /*! - @function vfs_getnewfsid - @abstract Generate a unique filesystem ID for a mount and store it in the mount structure. - @discussion Filesystem IDs are returned as part of "struct statfs." This function is typically - called as part of file-system specific mount code (i.e. through VFS_MOUNT). - @param mp Mount to set an ID for. - */ -void vfs_getnewfsid(struct mount *mp); + * @function vfs_getnewfsid + * @abstract Generate a unique filesystem ID for a mount and store it in the mount structure. + * @discussion Filesystem IDs are returned as part of "struct statfs." This function is typically + * called as part of file-system specific mount code (i.e. through VFS_MOUNT). + * @param mp Mount to set an ID for. + */ +void vfs_getnewfsid(struct mount *mp); /*! - @function vfs_getvfs - @abstract Given a filesystem ID, look up a mount structure. - @param fsid Filesystem ID to look up. - @return Mountpoint if found, else NULL. Note unmounting mountpoints can be returned. - */ -mount_t vfs_getvfs(fsid_t *fsid); + * @function vfs_getvfs + * @abstract Given a filesystem ID, look up a mount structure. + * @param fsid Filesystem ID to look up. + * @return Mountpoint if found, else NULL. Note unmounting mountpoints can be returned. + */ +mount_t vfs_getvfs(fsid_t *fsid); /*! - @function vfs_mountedon - @abstract Check whether a given block device has a filesystem mounted on it. - @discussion Note that this is NOT a check for a covered vnode (the directory upon which - a filesystem is mounted)--it is a test for whether a block device is being used as the source - of a filesystem. Note that a block device marked as being mounted on cannot be opened. - @param vp The vnode to test. - @return EBUSY if vnode is indeed the source of a filesystem; 0 if it is not. - */ -int vfs_mountedon(struct vnode *vp); + * @function vfs_mountedon + * @abstract Check whether a given block device has a filesystem mounted on it. + * @discussion Note that this is NOT a check for a covered vnode (the directory upon which + * a filesystem is mounted)--it is a test for whether a block device is being used as the source + * of a filesystem. Note that a block device marked as being mounted on cannot be opened. + * @param vp The vnode to test. + * @return EBUSY if vnode is indeed the source of a filesystem; 0 if it is not. + */ +int vfs_mountedon(struct vnode *vp); /*! - @function vfs_unmountbyfsid - @abstract Find a filesystem by ID and unmount it. - @param fsid ID of filesystem to unmount, as found through (for example) statfs. - @param flags MNT_FORCE: forcibly invalidate files open on the mount (though in-flight I/O operations - will be allowed to complete). - @param ctx Context against which to authenticate unmount operation. - @return 0 for succcess, nonero for failure. - */ -int vfs_unmountbyfsid(fsid_t *fsid, int flags, vfs_context_t ctx); + * @function vfs_unmountbyfsid + * @abstract Find a filesystem by ID and unmount it. + * @param fsid ID of filesystem to unmount, as found through (for example) statfs. + * @param flags MNT_FORCE: forcibly invalidate files open on the mount (though in-flight I/O operations + * will be allowed to complete). + * @param ctx Context against which to authenticate unmount operation. + * @return 0 for succcess, nonero for failure. + */ +int vfs_unmountbyfsid(fsid_t *fsid, int flags, vfs_context_t ctx); /*! - @function vfs_event_signal - @abstract Post a kqueue-style event on a filesystem (EVFILT_FS). - @param fsid Unused. - @param event Events to post. - @param data Unused. - */ -void vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data); + * @function vfs_event_signal + * @abstract Post a kqueue-style event on a filesystem (EVFILT_FS). + * @param fsid Unused. + * @param event Events to post. + * @param data Unused. + */ +void vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data); /*! - @function vfs_event_init - @abstract This function should not be called by kexts. - */ -void vfs_event_init(void); /* XXX We should not export this */ + * @function vfs_event_init + * @abstract This function should not be called by kexts. + */ +void vfs_event_init(void); /* XXX We should not export this */ /*! - @function vfs_set_root_unmount_cleanly - @abstract This function should be called by the root file system - when it is being mounted if the file system state is consistent. -*/ + * @function vfs_set_root_unmount_cleanly + * @abstract This function should be called by the root file system + * when it is being mounted if the file system state is consistent. + */ void vfs_set_root_unmounted_cleanly(void); #ifdef KERNEL_PRIVATE -int vfs_getbyid(fsid_t *fsid, ino64_t ino, vnode_t *vpp, vfs_context_t ctx); -int vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx); -int vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx); -int vfs_extendedsecurity(mount_t); -mount_t vfs_getvfs_by_mntonname(char *); +int vfs_getbyid(fsid_t *fsid, ino64_t ino, vnode_t *vpp, vfs_context_t ctx); +int vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx); +int vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx); +int vfs_extendedsecurity(mount_t); +mount_t vfs_getvfs_by_mntonname(char *); vnode_t vfs_vnodecovered(mount_t mp); /* Returns vnode with an iocount that must be released with vnode_put() */ vnode_t vfs_devvp(mount_t mp); /* Please see block comment with implementation */ -int vfs_nativexattrs (mount_t mp); /* whether or not the FS supports EAs natively */ +int vfs_nativexattrs(mount_t mp); /* whether or not the FS supports EAs natively */ void * vfs_mntlabel(mount_t mp); /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */ -void vfs_setcompoundopen(mount_t mp); +void vfs_setcompoundopen(mount_t mp); uint64_t vfs_throttle_mask(mount_t mp); int vfs_isswapmount(mount_t mp); struct vnode_trigger_info; /*! - @function vfs_addtrigger - @abstract Create an "external" trigger vnode: look up a vnode and mark it as - a trigger. Can only safely be called in the context of a callback set by - vfs_settriggercallback(). May only be used on a file which is not already - marked as a trigger. - @param relpath Path relative to root of mountpoint at which to mark trigger. - @param vtip Information about trigger; analogous to "vnode_trigger_param" - argument to vnode_create. - @param ctx Authorization context. + * @function vfs_addtrigger + * @abstract Create an "external" trigger vnode: look up a vnode and mark it as + * a trigger. Can only safely be called in the context of a callback set by + * vfs_settriggercallback(). May only be used on a file which is not already + * marked as a trigger. + * @param relpath Path relative to root of mountpoint at which to mark trigger. + * @param vtip Information about trigger; analogous to "vnode_trigger_param" + * argument to vnode_create. + * @param ctx Authorization context. */ -int vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx); +int vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx); /*! - @enum vfs_trigger_callback_op_t - @abstract Operation to perform after an attempted unmount (successful or otherwise). - @constant VTC_REPLACE Unmount failed: attempt to replace triggers. Only valid - VFS operation to perform in this context is vfs_addtrigger(). - @constant VTC_RELEASE Unmount succeeded: release external triggering context. + * @enum vfs_trigger_callback_op_t + * @abstract Operation to perform after an attempted unmount (successful or otherwise). + * @constant VTC_REPLACE Unmount failed: attempt to replace triggers. Only valid + * VFS operation to perform in this context is vfs_addtrigger(). + * @constant VTC_RELEASE Unmount succeeded: release external triggering context. */ -typedef enum { +typedef enum { VTC_REPLACE, VTC_RELEASE } vfs_trigger_callback_op_t; /*! - @typedef vfs_trigger_callback_t - @abstract Callback to be passed to vfs_settriggercallback() and invoked from - unmount context. - @param mp Mountpoint on which unmount is occurring. - @param op Operation (see vfs_trigger_callback_op_t) - @param data Context passed to vfs_settriggercallback() - @param ctx Authorization context in which unmount is occurring. + * @typedef vfs_trigger_callback_t + * @abstract Callback to be passed to vfs_settriggercallback() and invoked from + * unmount context. + * @param mp Mountpoint on which unmount is occurring. + * @param op Operation (see vfs_trigger_callback_op_t) + * @param data Context passed to vfs_settriggercallback() + * @param ctx Authorization context in which unmount is occurring. */ typedef void vfs_trigger_callback_t(mount_t mp, vfs_trigger_callback_op_t op, void *data, vfs_context_t ctx); /*! - @function vfs_settriggercallback - @abstract Install a callback to be called after unmount attempts on a volume, - to restore triggers for failed unmounts and release state for successful ones. - @discussion Installs a callback which will be called in two situations: a - failed unmount where vnodes may have been reclaimed and a successful unmount. - Gives an external trigger-marking entity an opportunity to replace triggers - which may have been reclaimed. The callback can only be installed (not - cleared), and only one callback can be installed. The callback will be called - with a read-write lock held on the mount point; in the VTC_REPLACE case, the - only valid VFS operation to perform in the context of the callback is - vfs_addtrigger() on the mountpoint in question. This rwlock is held in order - to attempt to provide some modicum of coverage from lookups which might find - missing trigger vnodes and receive spurious ENOENTs. Note that this - protection is incomplete--current working directories, or traversals up into a - volume via ".." may still find missing triggers. As of this writing, no - serialization mechanism exists to do better than this. - When the "op" is VTC_RELEASE, the mountpoint is going away, and the only valid - VFS operation is to free the private data pointer if needed. The callback - will be called immediately, with VTC_REPLACE, from vfs_settriggercallback(), - if installation is successful. - @param fsid FSID for filesystem in question. - @param vtc Callback pointer. - @param data Context pointer to be passed to callback. - @param flags Currently unused. - @param ctx Authorization context. - @return 0 for success. EBUSY if a trigger has already been installed. - */ -int vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags, vfs_context_t ctx); + * @function vfs_settriggercallback + * @abstract Install a callback to be called after unmount attempts on a volume, + * to restore triggers for failed unmounts and release state for successful ones. + * @discussion Installs a callback which will be called in two situations: a + * failed unmount where vnodes may have been reclaimed and a successful unmount. + * Gives an external trigger-marking entity an opportunity to replace triggers + * which may have been reclaimed. The callback can only be installed (not + * cleared), and only one callback can be installed. The callback will be called + * with a read-write lock held on the mount point; in the VTC_REPLACE case, the + * only valid VFS operation to perform in the context of the callback is + * vfs_addtrigger() on the mountpoint in question. This rwlock is held in order + * to attempt to provide some modicum of coverage from lookups which might find + * missing trigger vnodes and receive spurious ENOENTs. Note that this + * protection is incomplete--current working directories, or traversals up into a + * volume via ".." may still find missing triggers. As of this writing, no + * serialization mechanism exists to do better than this. + * When the "op" is VTC_RELEASE, the mountpoint is going away, and the only valid + * VFS operation is to free the private data pointer if needed. The callback + * will be called immediately, with VTC_REPLACE, from vfs_settriggercallback(), + * if installation is successful. + * @param fsid FSID for filesystem in question. + * @param vtc Callback pointer. + * @param data Context pointer to be passed to callback. + * @param flags Currently unused. + * @param ctx Authorization context. + * @return 0 for success. EBUSY if a trigger has already been installed. + */ +int vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags, vfs_context_t ctx); /* tags a volume as not supporting extended readdir for NFS exports */ -void mount_set_noreaddirext (mount_t); +void mount_set_noreaddirext(mount_t); -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ __END_DECLS #endif /* KERNEL */ @@ -1363,43 +1363,43 @@ __END_DECLS /* * Generic file handle */ -#define NFS_MAX_FH_SIZE NFSV4_MAX_FH_SIZE -#define NFSV4_MAX_FH_SIZE 128 -#define NFSV3_MAX_FH_SIZE 64 -#define NFSV2_MAX_FH_SIZE 32 +#define NFS_MAX_FH_SIZE NFSV4_MAX_FH_SIZE +#define NFSV4_MAX_FH_SIZE 128 +#define NFSV3_MAX_FH_SIZE 64 +#define NFSV2_MAX_FH_SIZE 32 struct fhandle { - unsigned int fh_len; /* length of file handle */ - unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */ + unsigned int fh_len; /* length of file handle */ + unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */ }; -typedef struct fhandle fhandle_t; +typedef struct fhandle fhandle_t; __BEGIN_DECLS -int fhopen(const struct fhandle *, int); -int fstatfs(int, struct statfs *) __DARWIN_INODE64(fstatfs); +int fhopen(const struct fhandle *, int); +int fstatfs(int, struct statfs *) __DARWIN_INODE64(fstatfs); #if !__DARWIN_ONLY_64_BIT_INO_T -int fstatfs64(int, struct statfs64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); +int fstatfs64(int, struct statfs64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); #endif /* !__DARWIN_ONLY_64_BIT_INO_T */ -int getfh(const char *, fhandle_t *); -int getfsstat(struct statfs *, int, int) __DARWIN_INODE64(getfsstat); +int getfh(const char *, fhandle_t *); +int getfsstat(struct statfs *, int, int) __DARWIN_INODE64(getfsstat); #if !__DARWIN_ONLY_64_BIT_INO_T -int getfsstat64(struct statfs64 *, int, int) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); +int getfsstat64(struct statfs64 *, int, int) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); #endif /* !__DARWIN_ONLY_64_BIT_INO_T */ -int getmntinfo(struct statfs **, int) __DARWIN_INODE64(getmntinfo); -int getmntinfo_r_np(struct statfs **, int) __DARWIN_INODE64(getmntinfo_r_np) - __OSX_AVAILABLE(10.13) __IOS_AVAILABLE(11.0) - __TVOS_AVAILABLE(11.0) __WATCHOS_AVAILABLE(4.0); +int getmntinfo(struct statfs **, int) __DARWIN_INODE64(getmntinfo); +int getmntinfo_r_np(struct statfs **, int) __DARWIN_INODE64(getmntinfo_r_np) +__OSX_AVAILABLE(10.13) __IOS_AVAILABLE(11.0) +__TVOS_AVAILABLE(11.0) __WATCHOS_AVAILABLE(4.0); #if !__DARWIN_ONLY_64_BIT_INO_T -int getmntinfo64(struct statfs64 **, int) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); +int getmntinfo64(struct statfs64 **, int) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); #endif /* !__DARWIN_ONLY_64_BIT_INO_T */ -int mount(const char *, const char *, int, void *); -int fmount(const char *, int, int, void *) __OSX_AVAILABLE(10.13) __IOS_AVAILABLE(11.0) __TVOS_AVAILABLE(11.0) __WATCHOS_AVAILABLE(4.0); -int statfs(const char *, struct statfs *) __DARWIN_INODE64(statfs); +int mount(const char *, const char *, int, void *); +int fmount(const char *, int, int, void *) __OSX_AVAILABLE(10.13) __IOS_AVAILABLE(11.0) __TVOS_AVAILABLE(11.0) __WATCHOS_AVAILABLE(4.0); +int statfs(const char *, struct statfs *) __DARWIN_INODE64(statfs); #if !__DARWIN_ONLY_64_BIT_INO_T -int statfs64(const char *, struct statfs64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); +int statfs64(const char *, struct statfs64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); #endif /* !__DARWIN_ONLY_64_BIT_INO_T */ -int unmount(const char *, int); -int getvfsbyname(const char *, struct vfsconf *); +int unmount(const char *, int); +int getvfsbyname(const char *, struct vfsconf *); __END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/mount_internal.h b/bsd/sys/mount_internal.h index 243c75e02..2bb9d8d11 100644 --- a/bsd/sys/mount_internal.h +++ b/bsd/sys/mount_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -68,7 +68,7 @@ */ #ifndef _SYS_MOUNT_INTERNAL_H_ -#define _SYS_MOUNT_INTERNAL_H_ +#define _SYS_MOUNT_INTERNAL_H_ #include #ifndef KERNEL @@ -80,8 +80,8 @@ #include #include #include -#include /* XXX for AF_MAX */ -#include /* XXX for AF_MAX */ +#include /* XXX for AF_MAX */ +#include /* XXX for AF_MAX */ #include #include #include @@ -105,60 +105,60 @@ typedef uint32_t pending_io_t; TAILQ_HEAD(vnodelst, vnode); struct mount { - TAILQ_ENTRY(mount) mnt_list; /* mount list */ - int32_t mnt_count; /* reference on the mount */ - lck_mtx_t mnt_mlock; /* mutex that protects mount point */ - struct vfsops *mnt_op; /* operations on fs */ - struct vfstable *mnt_vtable; /* configuration info */ - struct vnode *mnt_vnodecovered; /* vnode we mounted on */ - struct vnodelst mnt_vnodelist; /* list of vnodes this mount */ - struct vnodelst mnt_workerqueue; /* list of vnodes this mount */ - struct vnodelst mnt_newvnodes; /* list of vnodes this mount */ - uint32_t mnt_flag; /* flags */ - uint32_t mnt_kern_flag; /* kernel only flags */ - uint32_t mnt_compound_ops; /* Available compound operations */ - uint32_t mnt_lflag; /* mount life cycle flags */ - uint32_t mnt_maxsymlinklen; /* max size of short symlink */ - struct vfsstatfs mnt_vfsstat; /* cache of filesystem stats */ - qaddr_t mnt_data; /* private data */ + TAILQ_ENTRY(mount) mnt_list; /* mount list */ + int32_t mnt_count; /* reference on the mount */ + lck_mtx_t mnt_mlock; /* mutex that protects mount point */ + struct vfsops *mnt_op; /* operations on fs */ + struct vfstable *mnt_vtable; /* configuration info */ + struct vnode *mnt_vnodecovered; /* vnode we mounted on */ + struct vnodelst mnt_vnodelist; /* list of vnodes this mount */ + struct vnodelst mnt_workerqueue; /* list of vnodes this mount */ + struct vnodelst mnt_newvnodes; /* list of vnodes this mount */ + uint32_t mnt_flag; /* flags */ + uint32_t mnt_kern_flag; /* kernel only flags */ + uint32_t mnt_compound_ops; /* Available compound operations */ + uint32_t mnt_lflag; /* mount life cycle flags */ + uint32_t mnt_maxsymlinklen; /* max size of short symlink */ + struct vfsstatfs mnt_vfsstat; /* cache of filesystem stats */ + qaddr_t mnt_data; /* private data */ /* Cached values of the IO constraints for the device */ - uint32_t mnt_maxreadcnt; /* Max. byte count for read */ - uint32_t mnt_maxwritecnt; /* Max. byte count for write */ - uint32_t mnt_segreadcnt; /* Max. segment count for read */ - uint32_t mnt_segwritecnt; /* Max. segment count for write */ - uint32_t mnt_maxsegreadsize; /* Max. segment read size */ - uint32_t mnt_maxsegwritesize; /* Max. segment write size */ - uint32_t mnt_alignmentmask; /* Mask of bits that aren't addressable via DMA */ - uint32_t mnt_devblocksize; /* the underlying device block size */ - uint32_t mnt_ioqueue_depth; /* the maxiumum number of commands a device can accept */ - uint32_t mnt_ioscale; /* scale the various throttles/limits imposed on the amount of I/O in flight */ - uint32_t mnt_ioflags; /* flags for underlying device */ - uint32_t mnt_minsaturationbytecount; /* if non-zero, mininum amount of writes (in bytes) needed to max out throughput */ - pending_io_t mnt_pending_write_size __attribute__((aligned(sizeof(pending_io_t)))); /* byte count of pending writes */ - pending_io_t mnt_pending_read_size __attribute__((aligned(sizeof(pending_io_t)))); /* byte count of pending reads */ - struct timeval mnt_last_write_issued_timestamp; - struct timeval mnt_last_write_completed_timestamp; - int64_t mnt_max_swappin_available; - - lck_rw_t mnt_rwlock; /* mutex readwrite lock */ - lck_mtx_t mnt_renamelock; /* mutex that serializes renames that change shape of tree */ - vnode_t mnt_devvp; /* the device mounted on for local file systems */ - uint32_t mnt_devbsdunit; /* the BSD unit number of the device */ - uint64_t mnt_throttle_mask; /* the throttle mask of what devices will be affected by I/O from this mnt */ - void *mnt_throttle_info; /* used by the throttle code */ - int32_t mnt_crossref; /* refernces to cover lookups crossing into mp */ - int32_t mnt_iterref; /* refernces to cover iterations; drained makes it -ve */ + uint32_t mnt_maxreadcnt; /* Max. byte count for read */ + uint32_t mnt_maxwritecnt; /* Max. byte count for write */ + uint32_t mnt_segreadcnt; /* Max. segment count for read */ + uint32_t mnt_segwritecnt; /* Max. segment count for write */ + uint32_t mnt_maxsegreadsize; /* Max. segment read size */ + uint32_t mnt_maxsegwritesize; /* Max. segment write size */ + uint32_t mnt_alignmentmask; /* Mask of bits that aren't addressable via DMA */ + uint32_t mnt_devblocksize; /* the underlying device block size */ + uint32_t mnt_ioqueue_depth; /* the maxiumum number of commands a device can accept */ + uint32_t mnt_ioscale; /* scale the various throttles/limits imposed on the amount of I/O in flight */ + uint32_t mnt_ioflags; /* flags for underlying device */ + uint32_t mnt_minsaturationbytecount; /* if non-zero, mininum amount of writes (in bytes) needed to max out throughput */ + pending_io_t mnt_pending_write_size __attribute__((aligned(sizeof(pending_io_t)))); /* byte count of pending writes */ + pending_io_t mnt_pending_read_size __attribute__((aligned(sizeof(pending_io_t)))); /* byte count of pending reads */ + struct timeval mnt_last_write_issued_timestamp; + struct timeval mnt_last_write_completed_timestamp; + int64_t mnt_max_swappin_available; + + lck_rw_t mnt_rwlock; /* mutex readwrite lock */ + lck_mtx_t mnt_renamelock; /* mutex that serializes renames that change shape of tree */ + vnode_t mnt_devvp; /* the device mounted on for local file systems */ + uint32_t mnt_devbsdunit; /* the BSD unit number of the device */ + uint64_t mnt_throttle_mask; /* the throttle mask of what devices will be affected by I/O from this mnt */ + void *mnt_throttle_info; /* used by the throttle code */ + int32_t mnt_crossref; /* refernces to cover lookups crossing into mp */ + int32_t mnt_iterref; /* refernces to cover iterations; drained makes it -ve */ #if CONFIG_TRIGGERS - int32_t mnt_numtriggers; /* num of trigger vnodes for this mount */ + int32_t mnt_numtriggers; /* num of trigger vnodes for this mount */ vfs_trigger_callback_t *mnt_triggercallback; - void *mnt_triggerdata; + void *mnt_triggerdata; #endif - /* XXX 3762912 hack to support HFS filesystem 'owner' */ - uid_t mnt_fsowner; - gid_t mnt_fsgroup; + /* XXX 3762912 hack to support HFS filesystem 'owner' */ + uid_t mnt_fsowner; + gid_t mnt_fsgroup; - struct label *mnt_mntlabel; /* MAC mount label */ - struct label *mnt_fslabel; /* MAC default fs label */ + struct label *mnt_mntlabel; /* MAC mount label */ + struct label *mnt_fslabel; /* MAC default fs label */ /* * cache the rootvp of the last mount point @@ -174,16 +174,16 @@ struct mount { * we don't take an explicit long term reference * on it when we mount it */ - vnode_t mnt_realrootvp; - uint32_t mnt_realrootvp_vid; + vnode_t mnt_realrootvp; + uint32_t mnt_realrootvp_vid; /* * bumped each time a mount or unmount * occurs... its used to invalidate * 'mnt_realrootvp' from the cache */ uint32_t mnt_generation; - /* - * if 'MNTK_AUTH_CACHE_TIMEOUT' is + /* + * if 'MNTK_AUTH_CACHE_TIMEOUT' is * set, then 'mnt_authcache_ttl' is * the time-to-live for the per-vnode authentication cache * on this mount... if zero, no cache is maintained... @@ -191,41 +191,41 @@ struct mount { * time-to-live for the cached lookup right for * volumes marked 'MNTK_AUTH_OPAQUE'. */ - int mnt_authcache_ttl; - char fstypename_override[MFSTYPENAMELEN]; + int mnt_authcache_ttl; + char fstypename_override[MFSTYPENAMELEN]; - uint32_t mnt_iobufinuse; + uint32_t mnt_iobufinuse; void *mnt_disk_conditioner_info; - lck_mtx_t mnt_iter_lock; /* mutex that protects iteration of vnodes */ + lck_mtx_t mnt_iter_lock; /* mutex that protects iteration of vnodes */ }; /* * default number of seconds to keep cached lookup * rights valid on mounts marked MNTK_AUTH_OPAQUE */ -#define CACHED_LOOKUP_RIGHT_TTL 2 +#define CACHED_LOOKUP_RIGHT_TTL 2 /* * ioflags */ -#define MNT_IOFLAGS_FUA_SUPPORTED 0x00000001 -#define MNT_IOFLAGS_UNMAP_SUPPORTED 0x00000002 -#define MNT_IOFLAGS_IOSCHED_SUPPORTED 0x00000004 -#define MNT_IOFLAGS_CSUNMAP_SUPPORTED 0x00000008 -#define MNT_IOFLAGS_SWAPPIN_SUPPORTED 0x00000010 -#define MNT_IOFLAGS_FUSION_DRIVE 0x00000020 +#define MNT_IOFLAGS_FUA_SUPPORTED 0x00000001 +#define MNT_IOFLAGS_UNMAP_SUPPORTED 0x00000002 +#define MNT_IOFLAGS_IOSCHED_SUPPORTED 0x00000004 +#define MNT_IOFLAGS_CSUNMAP_SUPPORTED 0x00000008 +#define MNT_IOFLAGS_SWAPPIN_SUPPORTED 0x00000010 +#define MNT_IOFLAGS_FUSION_DRIVE 0x00000020 /* * ioqueue depth for devices that don't report one */ -#define MNT_DEFAULT_IOQUEUE_DEPTH 32 +#define MNT_DEFAULT_IOQUEUE_DEPTH 32 /* * mnt_ioscale value for the given ioqueue depth */ -#define MNT_IOSCALE(ioqueue_depth) ((ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH) +#define MNT_IOSCALE(ioqueue_depth) ((ioqueue_depth + (MNT_DEFAULT_IOQUEUE_DEPTH - 1)) / MNT_DEFAULT_IOQUEUE_DEPTH) /* mount point to which dead vps point to */ extern struct mount * dead_mountp; @@ -241,62 +241,62 @@ extern struct mount * dead_mountp; * because the bits here were broken out from the high bits * of the mount flags. */ -#define MNTK_NOSWAP 0x00000080 /* swap files cannot be used on this mount */ -#define MNTK_SWAP_MOUNT 0x00000100 /* we are swapping to this mount */ +#define MNTK_NOSWAP 0x00000080 /* swap files cannot be used on this mount */ +#define MNTK_SWAP_MOUNT 0x00000100 /* we are swapping to this mount */ #define MNTK_DENY_READDIREXT 0x00000200 /* Deny Extended-style readdir's for this volume */ -#define MNTK_PERMIT_UNMOUNT 0x00000400 /* Allow (non-forced) unmounts by UIDs other than the one that mounted the volume */ +#define MNTK_PERMIT_UNMOUNT 0x00000400 /* Allow (non-forced) unmounts by UIDs other than the one that mounted the volume */ #ifdef NFSCLIENT #define MNTK_TYPENAME_OVERRIDE 0x00000800 /* override the fstypename for statfs() */ #endif /* NFSCLIENT */ -#define MNTK_KERNEL_MOUNT 0x00001000 /* mount came from kernel side */ +#define MNTK_KERNEL_MOUNT 0x00001000 /* mount came from kernel side */ #ifdef CONFIG_IMGSRC_ACCESS -#define MNTK_HAS_MOVED 0x00002000 -#define MNTK_BACKS_ROOT 0x00004000 +#define MNTK_HAS_MOVED 0x00002000 +#define MNTK_BACKS_ROOT 0x00004000 #endif /* CONFIG_IMGSRC_ACCESS */ -#define MNTK_AUTH_CACHE_TTL 0x00008000 /* rights cache has TTL - TTL of 0 disables cache */ -#define MNTK_PATH_FROM_ID 0x00010000 /* mounted file system supports id-to-path lookups */ -#define MNTK_UNMOUNT_PREFLIGHT 0x00020000 /* mounted file system wants preflight check during unmount */ -#define MNTK_NAMED_STREAMS 0x00040000 /* mounted file system supports Named Streams VNOPs */ -#define MNTK_EXTENDED_ATTRS 0x00080000 /* mounted file system supports Extended Attributes VNOPs */ -#define MNTK_LOCK_LOCAL 0x00100000 /* advisory locking is done above the VFS itself */ -#define MNTK_VIRTUALDEV 0x00200000 /* mounted on a virtual device i.e. a disk image */ -#define MNTK_ROOTDEV 0x00400000 /* this filesystem resides on the same device as the root */ -#define MNTK_SSD 0x00800000 /* underlying device is of the solid state variety */ -#define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ -#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ -#define MNTK_WANTRDWR 0x04000000 /* upgrade to read/write requested */ +#define MNTK_AUTH_CACHE_TTL 0x00008000 /* rights cache has TTL - TTL of 0 disables cache */ +#define MNTK_PATH_FROM_ID 0x00010000 /* mounted file system supports id-to-path lookups */ +#define MNTK_UNMOUNT_PREFLIGHT 0x00020000 /* mounted file system wants preflight check during unmount */ +#define MNTK_NAMED_STREAMS 0x00040000 /* mounted file system supports Named Streams VNOPs */ +#define MNTK_EXTENDED_ATTRS 0x00080000 /* mounted file system supports Extended Attributes VNOPs */ +#define MNTK_LOCK_LOCAL 0x00100000 /* advisory locking is done above the VFS itself */ +#define MNTK_VIRTUALDEV 0x00200000 /* mounted on a virtual device i.e. a disk image */ +#define MNTK_ROOTDEV 0x00400000 /* this filesystem resides on the same device as the root */ +#define MNTK_SSD 0x00800000 /* underlying device is of the solid state variety */ +#define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ +#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ +#define MNTK_WANTRDWR 0x04000000 /* upgrade to read/write requested */ #if REV_ENDIAN_FS -#define MNT_REVEND 0x08000000 /* Reverse endian FS */ +#define MNT_REVEND 0x08000000 /* Reverse endian FS */ #endif /* REV_ENDIAN_FS */ -#define MNTK_DIR_HARDLINKS 0x10000000 /* mounted file system supports directory hard links */ +#define MNTK_DIR_HARDLINKS 0x10000000 /* mounted file system supports directory hard links */ #define MNTK_AUTH_OPAQUE 0x20000000 /* authorisation decisions are not made locally */ #define MNTK_AUTH_OPAQUE_ACCESS 0x40000000 /* VNOP_ACCESS is reliable for remote auth */ -#define MNTK_EXTENDED_SECURITY 0x80000000 /* extended security supported */ +#define MNTK_EXTENDED_SECURITY 0x80000000 /* extended security supported */ -#define MNT_LNOTRESP 0x00000001 /* mount not responding */ -#define MNT_LUNMOUNT 0x00000002 /* mount in unmount */ -#define MNT_LFORCE 0x00000004 /* mount in forced unmount */ -#define MNT_LDRAIN 0x00000008 /* mount in drain */ -#define MNT_LITER 0x00000010 /* mount in iteration */ -#define MNT_LNEWVN 0x00000020 /* mount has new vnodes created */ -#define MNT_LWAIT 0x00000040 /* wait for unmount op */ -#define MNT_LUNUSED 0x00000080 /* available flag bit, used to be MNT_LITERWAIT */ -#define MNT_LDEAD 0x00000100 /* mount already unmounted*/ -#define MNT_LNOSUB 0x00000200 /* submount - no recursion */ +#define MNT_LNOTRESP 0x00000001 /* mount not responding */ +#define MNT_LUNMOUNT 0x00000002 /* mount in unmount */ +#define MNT_LFORCE 0x00000004 /* mount in forced unmount */ +#define MNT_LDRAIN 0x00000008 /* mount in drain */ +#define MNT_LITER 0x00000010 /* mount in iteration */ +#define MNT_LNEWVN 0x00000020 /* mount has new vnodes created */ +#define MNT_LWAIT 0x00000040 /* wait for unmount op */ +#define MNT_LUNUSED 0x00000080 /* available flag bit, used to be MNT_LITERWAIT */ +#define MNT_LDEAD 0x00000100 /* mount already unmounted*/ +#define MNT_LNOSUB 0x00000200 /* submount - no recursion */ /* * Generic file handle */ -#define NFS_MAX_FH_SIZE NFSV4_MAX_FH_SIZE -#define NFSV4_MAX_FH_SIZE 128 -#define NFSV3_MAX_FH_SIZE 64 -#define NFSV2_MAX_FH_SIZE 32 +#define NFS_MAX_FH_SIZE NFSV4_MAX_FH_SIZE +#define NFSV4_MAX_FH_SIZE 128 +#define NFSV3_MAX_FH_SIZE 64 +#define NFSV2_MAX_FH_SIZE 32 struct fhandle { - unsigned int fh_len; /* length of file handle */ - unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */ + unsigned int fh_len; /* length of file handle */ + unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */ }; -typedef struct fhandle fhandle_t; +typedef struct fhandle fhandle_t; @@ -306,44 +306,44 @@ typedef struct fhandle fhandle_t; * mount time to identify the requested filesystem. */ struct vfstable { - struct vfsops *vfc_vfsops; /* filesystem operations vector */ - char vfc_name[MFSNAMELEN]; /* filesystem type name */ - int vfc_typenum; /* historic filesystem type number */ - int vfc_refcount; /* number mounted of this type */ - int vfc_flags; /* permanent flags */ - int (*vfc_mountroot)(mount_t, vnode_t, vfs_context_t); /* if != NULL, routine to mount root */ - struct vfstable *vfc_next; /* next in list */ - int32_t vfc_reserved1; + struct vfsops *vfc_vfsops; /* filesystem operations vector */ + char vfc_name[MFSNAMELEN]; /* filesystem type name */ + int vfc_typenum; /* historic filesystem type number */ + int vfc_refcount; /* number mounted of this type */ + int vfc_flags; /* permanent flags */ + int (*vfc_mountroot)(mount_t, vnode_t, vfs_context_t); /* if != NULL, routine to mount root */ + struct vfstable *vfc_next; /* next in list */ + int32_t vfc_reserved1; int32_t vfc_reserved2; - int vfc_vfsflags; /* for optional types */ - void * vfc_descptr; /* desc table allocated address */ - int vfc_descsize; /* size allocated for desc table */ - struct sysctl_oid *vfc_sysctl; /* dynamically registered sysctl node */ + int vfc_vfsflags; /* for optional types */ + void * vfc_descptr; /* desc table allocated address */ + int vfc_descsize; /* size allocated for desc table */ + struct sysctl_oid *vfc_sysctl; /* dynamically registered sysctl node */ }; /* vfc_vfsflags: */ -#define VFC_VFSLOCALARGS 0x002 -#define VFC_VFSGENERICARGS 0x004 -#define VFC_VFSNATIVEXATTR 0x010 +#define VFC_VFSLOCALARGS 0x002 +#define VFC_VFSGENERICARGS 0x004 +#define VFC_VFSNATIVEXATTR 0x010 #define VFC_VFSCANMOUNTROOT 0x020 -#define VFC_VFSPREFLIGHT 0x040 -#define VFC_VFSREADDIR_EXTENDED 0x080 -#define VFC_VFS64BITREADY 0x100 -#define VFC_VFSNOMACLABEL 0x1000 -#define VFC_VFSVNOP_PAGEINV2 0x2000 -#define VFC_VFSVNOP_PAGEOUTV2 0x4000 -#define VFC_VFSVNOP_NOUPDATEID_RENAME 0x8000 -#define VFC_VFSVNOP_SECLUDE_RENAME 0x10000 - -extern int maxvfstypenum; /* highest defined filesystem type */ -extern struct vfstable *vfsconf; /* head of list of filesystem types */ -extern const int maxvfsslots; /* Maximum statically allocated slots available to be used */ -extern int numused_vfsslots; /* number of statically allocated slots already used */ -extern int numregistered_fses; /* number of total registered filesystems */ +#define VFC_VFSPREFLIGHT 0x040 +#define VFC_VFSREADDIR_EXTENDED 0x080 +#define VFC_VFS64BITREADY 0x100 +#define VFC_VFSNOMACLABEL 0x1000 +#define VFC_VFSVNOP_PAGEINV2 0x2000 +#define VFC_VFSVNOP_PAGEOUTV2 0x4000 +#define VFC_VFSVNOP_NOUPDATEID_RENAME 0x8000 +#define VFC_VFSVNOP_SECLUDE_RENAME 0x10000 + +extern int maxvfstypenum; /* highest defined filesystem type */ +extern struct vfstable *vfsconf; /* head of list of filesystem types */ +extern const int maxvfsslots; /* Maximum statically allocated slots available to be used */ +extern int numused_vfsslots; /* number of statically allocated slots already used */ +extern int numregistered_fses; /* number of total registered filesystems */ /* the following two are xnu private */ -struct vfstable * vfstable_add(struct vfstable *); -int vfstable_del(struct vfstable *); +struct vfstable * vfstable_add(struct vfstable *); +int vfstable_del(struct vfstable *); struct vfsmount_args { @@ -353,7 +353,7 @@ struct vfsmount_args { void * mnt_fsdata; } mnt_localfs_args; struct { - void * mnt_fsdata; /* FS specific */ + void * mnt_fsdata; /* FS specific */ } mnt_remotefs_args; } mountfs_args; }; @@ -364,26 +364,26 @@ struct vfsmount_args { * NOTE - must be kept in sync with struct statfs in mount.h */ struct user64_statfs { - short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ - short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ - user64_long_t f_bsize; /* fundamental file system block size */ - user64_long_t f_iosize; /* optimal transfer block size */ - user64_long_t f_blocks; /* total data blocks in file system */ - user64_long_t f_bfree; /* free blocks in fs */ - user64_long_t f_bavail; /* free blocks avail to non-superuser */ - user64_long_t f_files; /* total file nodes in file system */ - user64_long_t f_ffree; /* free file nodes in fs */ - fsid_t f_fsid; /* file system id */ - uid_t f_owner; /* user that mounted the filesystem */ - short f_reserved1; /* spare for later */ - short f_type; /* type of filesystem */ - user64_long_t f_flags; /* copy of mount exported flags */ - user64_long_t f_reserved2[2]; /* reserved for future use */ - char f_fstypename[MFSNAMELEN]; /* fs type name */ - char f_mntonname[MNAMELEN]; /* directory on which mounted */ - char f_mntfromname[MNAMELEN];/* mounted filesystem */ - char f_reserved3; /* For alignment */ - user64_long_t f_reserved4[4]; /* For future use */ + short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ + short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ + user64_long_t f_bsize; /* fundamental file system block size */ + user64_long_t f_iosize; /* optimal transfer block size */ + user64_long_t f_blocks; /* total data blocks in file system */ + user64_long_t f_bfree; /* free blocks in fs */ + user64_long_t f_bavail; /* free blocks avail to non-superuser */ + user64_long_t f_files; /* total file nodes in file system */ + user64_long_t f_ffree; /* free file nodes in fs */ + fsid_t f_fsid; /* file system id */ + uid_t f_owner; /* user that mounted the filesystem */ + short f_reserved1; /* spare for later */ + short f_type; /* type of filesystem */ + user64_long_t f_flags; /* copy of mount exported flags */ + user64_long_t f_reserved2[2]; /* reserved for future use */ + char f_fstypename[MFSNAMELEN]; /* fs type name */ + char f_mntonname[MNAMELEN]; /* directory on which mounted */ + char f_mntfromname[MNAMELEN];/* mounted filesystem */ + char f_reserved3; /* For alignment */ + user64_long_t f_reserved4[4]; /* For future use */ }; /* @@ -391,26 +391,26 @@ struct user64_statfs { * NOTE - must be kept in sync with struct statfs in mount.h */ struct user32_statfs { - short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ - short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ - user32_long_t f_bsize; /* fundamental file system block size */ - user32_long_t f_iosize; /* optimal transfer block size */ - user32_long_t f_blocks; /* total data blocks in file system */ - user32_long_t f_bfree; /* free blocks in fs */ - user32_long_t f_bavail; /* free blocks avail to non-superuser */ - user32_long_t f_files; /* total file nodes in file system */ - user32_long_t f_ffree; /* free file nodes in fs */ - fsid_t f_fsid; /* file system id */ - uid_t f_owner; /* user that mounted the filesystem */ - short f_reserved1; /* spare for later */ - short f_type; /* type of filesystem */ - user32_long_t f_flags; /* copy of mount exported flags */ - user32_long_t f_reserved2[2]; /* reserved for future use */ - char f_fstypename[MFSNAMELEN]; /* fs type name */ - char f_mntonname[MNAMELEN]; /* directory on which mounted */ - char f_mntfromname[MNAMELEN];/* mounted filesystem */ - char f_reserved3; /* For alignment */ - user32_long_t f_reserved4[4]; /* For future use */ + short f_otype; /* TEMPORARY SHADOW COPY OF f_type */ + short f_oflags; /* TEMPORARY SHADOW COPY OF f_flags */ + user32_long_t f_bsize; /* fundamental file system block size */ + user32_long_t f_iosize; /* optimal transfer block size */ + user32_long_t f_blocks; /* total data blocks in file system */ + user32_long_t f_bfree; /* free blocks in fs */ + user32_long_t f_bavail; /* free blocks avail to non-superuser */ + user32_long_t f_files; /* total file nodes in file system */ + user32_long_t f_ffree; /* free file nodes in fs */ + fsid_t f_fsid; /* file system id */ + uid_t f_owner; /* user that mounted the filesystem */ + short f_reserved1; /* spare for later */ + short f_type; /* type of filesystem */ + user32_long_t f_flags; /* copy of mount exported flags */ + user32_long_t f_reserved2[2]; /* reserved for future use */ + char f_fstypename[MFSNAMELEN]; /* fs type name */ + char f_mntonname[MNAMELEN]; /* directory on which mounted */ + char f_mntfromname[MNAMELEN];/* mounted filesystem */ + char f_reserved3; /* For alignment */ + user32_long_t f_reserved4[4]; /* For future use */ }; /* @@ -442,11 +442,11 @@ int mount_refdrain(mount_t); /* vfs_rootmountalloc should be kept as a private api */ errno_t vfs_rootmountalloc(const char *, const char *, mount_t *mpp); -int vfs_mountroot(void); -void vfs_unmountall(void); -int safedounmount(struct mount *, int, vfs_context_t); -int dounmount(struct mount *, int, int, vfs_context_t); -void dounmount_submounts(struct mount *, int, vfs_context_t); +int vfs_mountroot(void); +void vfs_unmountall(void); +int safedounmount(struct mount *, int, vfs_context_t); +int dounmount(struct mount *, int, int, vfs_context_t); +void dounmount_submounts(struct mount *, int, vfs_context_t); /* xnu internal api */ void mount_dropcrossref(mount_t, vnode_t, int); @@ -461,10 +461,10 @@ void mount_iterdrain(mount_t); void mount_iterreset(mount_t); /* Private NFS spi */ -#define KERNEL_MOUNT_NOAUTH 0x01 /* Don't check the UID of the directory we are mounting on */ -#define KERNEL_MOUNT_PERMIT_UNMOUNT 0x02 /* Allow (non-forced) unmounts by users other the one who mounted the volume */ +#define KERNEL_MOUNT_NOAUTH 0x01 /* Don't check the UID of the directory we are mounting on */ +#define KERNEL_MOUNT_PERMIT_UNMOUNT 0x02 /* Allow (non-forced) unmounts by users other the one who mounted the volume */ /* used by snapshot mounting SPI */ -#define KERNEL_MOUNT_SNAPSHOT 0x04 /* Mounting a snapshot */ +#define KERNEL_MOUNT_SNAPSHOT 0x04 /* Mounting a snapshot */ #if NFSCLIENT || DEVFS || ROUTEFS /* * NOTE: kernel_mount() does not force MNT_NOSUID, MNT_NOEXEC, or MNT_NODEC for non-privileged diff --git a/bsd/sys/msg.h b/bsd/sys/msg.h index 6f530fa66..64e0dd90f 100644 --- a/bsd/sys/msg.h +++ b/bsd/sys/msg.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: msg.h,v 1.4 1994/06/29 06:44:43 cgd Exp $ */ @@ -80,16 +80,16 @@ #include /* [XSI] Used for the number of messages in the message queue */ -typedef unsigned long msgqnum_t; +typedef unsigned long msgqnum_t; /* [XSI] Used for the number of bytes allowed in a message queue */ -typedef unsigned long msglen_t; +typedef unsigned long msglen_t; /* * Possible values for the fifth parameter to msgrcv(), in addition to the * IPC_NOWAIT flag, which is permitted. */ -#define MSG_NOERROR 010000 /* [XSI] No error if big message */ +#define MSG_NOERROR 010000 /* [XSI] No error if big message */ /* @@ -102,7 +102,7 @@ typedef unsigned long msglen_t; #pragma pack(4) /* * Structure used internally. - * + * * Structure whose address is passed as the third parameter to msgctl() * when the second parameter is IPC_SET or IPC_STAT. In the case of the * IPC_SET command, only the msg_perm.{uid|gid|perm} and msg_qbytes are @@ -115,50 +115,50 @@ typedef unsigned long msglen_t; #if (defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE)) struct msqid_ds #else -#define msqid_ds __msqid_ds_new +#define msqid_ds __msqid_ds_new struct __msqid_ds_new #endif { - struct __ipc_perm_new msg_perm; /* [XSI] msg queue permissions */ - __int32_t msg_first; /* RESERVED: kernel use only */ - __int32_t msg_last; /* RESERVED: kernel use only */ - msglen_t msg_cbytes; /* # of bytes on the queue */ - msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ - msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ - pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ - pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ - time_t msg_stime; /* [XSI] time of last msgsnd() */ - __int32_t msg_pad1; /* RESERVED: DO NOT USE */ - time_t msg_rtime; /* [XSI] time of last msgrcv() */ - __int32_t msg_pad2; /* RESERVED: DO NOT USE */ - time_t msg_ctime; /* [XSI] time of last msgctl() */ - __int32_t msg_pad3; /* RESERVED: DO NOT USE */ - __int32_t msg_pad4[4]; /* RESERVED: DO NOT USE */ + struct __ipc_perm_new msg_perm; /* [XSI] msg queue permissions */ + __int32_t msg_first; /* RESERVED: kernel use only */ + __int32_t msg_last; /* RESERVED: kernel use only */ + msglen_t msg_cbytes; /* # of bytes on the queue */ + msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ + msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ + pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ + pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ + time_t msg_stime; /* [XSI] time of last msgsnd() */ + __int32_t msg_pad1; /* RESERVED: DO NOT USE */ + time_t msg_rtime; /* [XSI] time of last msgrcv() */ + __int32_t msg_pad2; /* RESERVED: DO NOT USE */ + time_t msg_ctime; /* [XSI] time of last msgctl() */ + __int32_t msg_pad3; /* RESERVED: DO NOT USE */ + __int32_t msg_pad4[4]; /* RESERVED: DO NOT USE */ }; #pragma pack() -#else /* !__DARWIN_UNIX03 */ -#define msqid_ds __msqid_ds_old -#endif /* !__DARWIN_UNIX03 */ +#else /* !__DARWIN_UNIX03 */ +#define msqid_ds __msqid_ds_old +#endif /* !__DARWIN_UNIX03 */ #if !__DARWIN_UNIX03 struct __msqid_ds_old { - struct __ipc_perm_old msg_perm; /* [XSI] msg queue permissions */ - __int32_t msg_first; /* RESERVED: kernel use only */ - __int32_t msg_last; /* RESERVED: kernel use only */ - msglen_t msg_cbytes; /* # of bytes on the queue */ - msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ - msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ - pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ - pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ - time_t msg_stime; /* [XSI] time of last msgsnd() */ - __int32_t msg_pad1; /* RESERVED: DO NOT USE */ - time_t msg_rtime; /* [XSI] time of last msgrcv() */ - __int32_t msg_pad2; /* RESERVED: DO NOT USE */ - time_t msg_ctime; /* [XSI] time of last msgctl() */ - __int32_t msg_pad3; /* RESERVED: DO NOT USE */ - __int32_t msg_pad4[4]; /* RESERVED: DO NOT USE */ + struct __ipc_perm_old msg_perm; /* [XSI] msg queue permissions */ + __int32_t msg_first; /* RESERVED: kernel use only */ + __int32_t msg_last; /* RESERVED: kernel use only */ + msglen_t msg_cbytes; /* # of bytes on the queue */ + msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ + msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ + pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ + pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ + time_t msg_stime; /* [XSI] time of last msgsnd() */ + __int32_t msg_pad1; /* RESERVED: DO NOT USE */ + time_t msg_rtime; /* [XSI] time of last msgrcv() */ + __int32_t msg_pad2; /* RESERVED: DO NOT USE */ + time_t msg_ctime; /* [XSI] time of last msgctl() */ + __int32_t msg_pad3; /* RESERVED: DO NOT USE */ + __int32_t msg_pad4[4]; /* RESERVED: DO NOT USE */ }; -#endif /* !__DARWIN_UNIX03 */ +#endif /* !__DARWIN_UNIX03 */ #ifdef KERNEL #ifdef __APPLE_API_PRIVATE @@ -168,31 +168,31 @@ struct __msqid_ds_old { #pragma options align=natural #endif -typedef user_ulong_t user_msgqnum_t; -typedef user64_ulong_t user64_msgqnum_t; -typedef user32_ulong_t user32_msgqnum_t; +typedef user_ulong_t user_msgqnum_t; +typedef user64_ulong_t user64_msgqnum_t; +typedef user32_ulong_t user32_msgqnum_t; -typedef user_ulong_t user_msglen_t; -typedef user64_ulong_t user64_msglen_t; -typedef user32_ulong_t user32_msglen_t; +typedef user_ulong_t user_msglen_t; +typedef user64_ulong_t user64_msglen_t; +typedef user32_ulong_t user32_msglen_t; /* kernel version */ struct user_msqid_ds { - struct ipc_perm msg_perm; /* [XSI] msg queue permissions */ - struct msg *msg_first; /* first message in the queue */ - struct msg *msg_last; /* last message in the queue */ - user_msglen_t msg_cbytes; /* # of bytes on the queue */ - user_msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ - user_msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ - pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ - pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ - user_time_t msg_stime; /* [XSI] time of last msgsnd() */ - __int32_t msg_pad1; /* RESERVED: DO NOT USE */ - user_time_t msg_rtime; /* [XSI] time of last msgrcv() */ - __int32_t msg_pad2; /* RESERVED: DO NOT USE */ - user_time_t msg_ctime; /* [XSI] time of last msgctl() */ - __int32_t msg_pad3; /* RESERVED: DO NOT USE */ - __int32_t msg_pad4[4]; + struct ipc_perm msg_perm; /* [XSI] msg queue permissions */ + struct msg *msg_first; /* first message in the queue */ + struct msg *msg_last; /* last message in the queue */ + user_msglen_t msg_cbytes; /* # of bytes on the queue */ + user_msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ + user_msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ + pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ + pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ + user_time_t msg_stime; /* [XSI] time of last msgsnd() */ + __int32_t msg_pad1; /* RESERVED: DO NOT USE */ + user_time_t msg_rtime; /* [XSI] time of last msgrcv() */ + __int32_t msg_pad2; /* RESERVED: DO NOT USE */ + user_time_t msg_ctime; /* [XSI] time of last msgctl() */ + __int32_t msg_pad3; /* RESERVED: DO NOT USE */ + __int32_t msg_pad4[4]; }; /* @@ -200,40 +200,39 @@ struct user_msqid_ds { * compiled LP64, because the 32 bit kernel doesn't need it */ struct user64_msqid_ds { - struct ipc_perm msg_perm; /* [XSI] msg queue permissions */ - __int32_t msg_first; /* RESERVED: kernel use only */ - __int32_t msg_last; /* RESERVED: kernel use only */ - user64_msglen_t msg_cbytes; /* # of bytes on the queue */ - user64_msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ - user64_msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ - pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ - pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ - user64_time_t msg_stime; /* [XSI] time of last msgsnd() */ - __int32_t msg_pad1; /* RESERVED: DO NOT USE */ - user64_time_t msg_rtime; /* [XSI] time of last msgrcv() */ - __int32_t msg_pad2; /* RESERVED: DO NOT USE */ - user64_time_t msg_ctime; /* [XSI] time of last msgctl() */ - __int32_t msg_pad3; /* RESERVED: DO NOT USE */ - __int32_t msg_pad4[4]; + struct ipc_perm msg_perm; /* [XSI] msg queue permissions */ + __int32_t msg_first; /* RESERVED: kernel use only */ + __int32_t msg_last; /* RESERVED: kernel use only */ + user64_msglen_t msg_cbytes; /* # of bytes on the queue */ + user64_msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ + user64_msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ + pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ + pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ + user64_time_t msg_stime; /* [XSI] time of last msgsnd() */ + __int32_t msg_pad1; /* RESERVED: DO NOT USE */ + user64_time_t msg_rtime; /* [XSI] time of last msgrcv() */ + __int32_t msg_pad2; /* RESERVED: DO NOT USE */ + user64_time_t msg_ctime; /* [XSI] time of last msgctl() */ + __int32_t msg_pad3; /* RESERVED: DO NOT USE */ + __int32_t msg_pad4[4]; } __attribute__((__packed__)); -struct user32_msqid_ds -{ - struct __ipc_perm_new msg_perm; /* [XSI] msg queue permissions */ - __int32_t msg_first; /* RESERVED: kernel use only */ - __int32_t msg_last; /* RESERVED: kernel use only */ - user32_msglen_t msg_cbytes; /* # of bytes on the queue */ - user32_msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ - user32_msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ - pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ - pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ - user32_time_t msg_stime; /* [XSI] time of last msgsnd() */ - __int32_t msg_pad1; /* RESERVED: DO NOT USE */ - user32_time_t msg_rtime; /* [XSI] time of last msgrcv() */ - __int32_t msg_pad2; /* RESERVED: DO NOT USE */ - user32_time_t msg_ctime; /* [XSI] time of last msgctl() */ - __int32_t msg_pad3; /* RESERVED: DO NOT USE */ - __int32_t msg_pad4[4]; /* RESERVED: DO NOT USE */ +struct user32_msqid_ds { + struct __ipc_perm_new msg_perm; /* [XSI] msg queue permissions */ + __int32_t msg_first; /* RESERVED: kernel use only */ + __int32_t msg_last; /* RESERVED: kernel use only */ + user32_msglen_t msg_cbytes; /* # of bytes on the queue */ + user32_msgqnum_t msg_qnum; /* [XSI] number of msgs on the queue */ + user32_msglen_t msg_qbytes; /* [XSI] max bytes on the queue */ + pid_t msg_lspid; /* [XSI] pid of last msgsnd() */ + pid_t msg_lrpid; /* [XSI] pid of last msgrcv() */ + user32_time_t msg_stime; /* [XSI] time of last msgsnd() */ + __int32_t msg_pad1; /* RESERVED: DO NOT USE */ + user32_time_t msg_rtime; /* [XSI] time of last msgrcv() */ + __int32_t msg_pad2; /* RESERVED: DO NOT USE */ + user32_time_t msg_ctime; /* [XSI] time of last msgctl() */ + __int32_t msg_pad3; /* RESERVED: DO NOT USE */ + __int32_t msg_pad4[4]; /* RESERVED: DO NOT USE */ }; #if __DARWIN_ALIGN_NATURAL @@ -246,12 +245,12 @@ struct label; * Kernel wrapper for the user-level structure */ struct msqid_kernel { - struct user_msqid_ds u; - struct label *label; /* MAC framework label */ + struct user_msqid_ds u; + struct label *label; /* MAC framework label */ }; -#endif /* __APPLE_API_PRIVATE */ -#endif /* KERNEL */ +#endif /* __APPLE_API_PRIVATE */ +#endif /* KERNEL */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -259,13 +258,13 @@ struct msqid_kernel { /* XXX kernel only; protect with macro later */ struct msg { - struct msg *msg_next; /* next msg in the chain */ - long msg_type; /* type of this message */ - /* >0 -> type of this message */ - /* 0 -> free header */ - unsigned short msg_ts; /* size of this message */ - short msg_spot; /* location of msg start in buffer */ - struct label *label; /* MAC label */ + struct msg *msg_next; /* next msg in the chain */ + long msg_type; /* type of this message */ + /* >0 -> type of this message */ + /* 0 -> free header */ + unsigned short msg_ts; /* size of this message */ + short msg_spot; /* location of msg start in buffer */ + struct label *label; /* MAC label */ }; /* @@ -277,8 +276,8 @@ struct msg { * backward compatability with existing source code. */ struct mymsg { - long mtype; /* message type (+ve integer) */ - char mtext[1]; /* message body */ + long mtype; /* message type (+ve integer) */ + char mtext[1]; /* message body */ }; /* @@ -292,40 +291,40 @@ struct mymsg { * two between 8 and 1024 inclusive (and panic's if it isn't). */ struct msginfo { - int msgmax, /* max chars in a message */ - msgmni, /* max message queue identifiers */ - msgmnb, /* max chars in a queue */ - msgtql, /* max messages in system */ - msgssz, /* size of a message segment (see notes above) */ - msgseg; /* number of message segments */ + int msgmax, /* max chars in a message */ + msgmni, /* max message queue identifiers */ + msgmnb, /* max chars in a queue */ + msgtql, /* max messages in system */ + msgssz, /* size of a message segment (see notes above) */ + msgseg; /* number of message segments */ }; #ifdef KERNEL -extern struct msginfo msginfo; +extern struct msginfo msginfo; #ifndef MSGSSZ -#define MSGSSZ 8 /* Each segment must be 2^N long */ +#define MSGSSZ 8 /* Each segment must be 2^N long */ #endif #ifndef MSGSEG -#define MSGSEG 2048 /* must be less than 32767 */ +#define MSGSEG 2048 /* must be less than 32767 */ #endif -#define MSGMAX (MSGSSZ*MSGSEG) +#define MSGMAX (MSGSSZ*MSGSEG) #ifndef MSGMNB -#define MSGMNB 2048 /* max # of bytes in a queue */ +#define MSGMNB 2048 /* max # of bytes in a queue */ #endif #ifndef MSGMNI -#define MSGMNI 40 +#define MSGMNI 40 #endif #ifndef MSGTQL -#define MSGTQL 40 +#define MSGTQL 40 #endif /* * macros to convert between msqid_ds's and msqid's. * (specific to this implementation) */ -#define MSQID(ix,ds) ((ix) & 0xffff | (((ds).msg_perm.seq << 16) & 0xffff0000)) -#define MSQID_IX(id) ((id) & 0xffff) -#define MSQID_SEQ(id) (((id) >> 16) & 0xffff) +#define MSQID(ix, ds) ((ix) & 0xffff | (((ds).msg_perm.seq << 16) & 0xffff0000)) +#define MSQID_IX(id) ((id) & 0xffff) +#define MSQID_SEQ(id) (((id) >> 16) & 0xffff) /* * The rest of this file is specific to this particular implementation. @@ -336,29 +335,29 @@ extern struct msginfo msginfo; * Stuff allocated in machdep.h */ struct msgmap { - short next; /* next segment in buffer */ - /* -1 -> available */ - /* 0..(MSGSEG-1) -> index of next segment */ + short next; /* next segment in buffer */ + /* -1 -> available */ + /* 0..(MSGSEG-1) -> index of next segment */ }; /* The following four externs really, really need to die; should be static */ -extern char *msgpool; /* MSGMAX byte long msg buffer pool */ -extern struct msgmap *msgmaps; /* MSGSEG msgmap structures */ -extern struct msg *msghdrs; /* MSGTQL msg headers */ -extern struct msqid_kernel *msqids; /* MSGMNI user_msqid_ds struct's */ +extern char *msgpool; /* MSGMAX byte long msg buffer pool */ +extern struct msgmap *msgmaps; /* MSGSEG msgmap structures */ +extern struct msg *msghdrs; /* MSGTQL msg headers */ +extern struct msqid_kernel *msqids; /* MSGMNI user_msqid_ds struct's */ -#define MSG_LOCKED 01000 /* Is this msqid_ds locked? */ +#define MSG_LOCKED 01000 /* Is this msqid_ds locked? */ -#endif /* KERNEL */ -#endif /* __APPLE_API_UNSTABLE */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* KERNEL */ +#endif /* __APPLE_API_UNSTABLE */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifndef KERNEL __BEGIN_DECLS #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) int msgsys(int, ...); -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ int msgctl(int, int, struct msqid_ds *) __DARWIN_ALIAS(msgctl); int msgget(key_t, int); ssize_t msgrcv(int, void *, size_t, long, int) __DARWIN_ALIAS_C(msgrcv); @@ -368,5 +367,3 @@ __END_DECLS #endif /* !KERNEL */ #endif /* !_SYS_MSG_H_ */ - - diff --git a/bsd/sys/msgbuf.h b/bsd/sys/msgbuf.h index 2bceb084e..a4bfa51e0 100644 --- a/bsd/sys/msgbuf.h +++ b/bsd/sys/msgbuf.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -60,25 +60,25 @@ * * @(#)msgbuf.h 8.1 (Berkeley) 6/2/93 */ -#ifndef _SYS_MSGBUF_H_ +#ifndef _SYS_MSGBUF_H_ #define _SYS_MSGBUF_H_ #include -#define MAX_MSG_BSIZE (1*1024*1024) -struct msgbuf { -#define MSG_MAGIC 0x063061 - int msg_magic; - int msg_size; - int msg_bufx; /* write pointer */ - int msg_bufr; /* read pointer */ - char *msg_bufc; /* buffer */ +#define MAX_MSG_BSIZE (1*1024*1024) +struct msgbuf { +#define MSG_MAGIC 0x063061 + int msg_magic; + int msg_size; + int msg_bufx; /* write pointer */ + int msg_bufr; /* read pointer */ + char *msg_bufc; /* buffer */ }; #ifdef XNU_KERNEL_PRIVATE __BEGIN_DECLS -extern struct msgbuf *msgbufp; -extern struct msgbuf *aslbufp; +extern struct msgbuf *msgbufp; +extern struct msgbuf *aslbufp; extern void log_putc(char); extern void log_putc_locked(struct msgbuf *, char); extern int log_setsize(int size); @@ -86,4 +86,4 @@ extern int log_dmesg(user_addr_t, uint32_t, int32_t *); __END_DECLS #endif /* XNU_KERNEL_PRIVATE */ -#endif /* !_SYS_MSGBUF_H_ */ +#endif /* !_SYS_MSGBUF_H_ */ diff --git a/bsd/sys/munge.h b/bsd/sys/munge.h index b7e55762e..5ad78a7c1 100644 --- a/bsd/sys/munge.h +++ b/bsd/sys/munge.h @@ -2,7 +2,7 @@ * Coyright (c) 2005-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/sys/namei.h b/bsd/sys/namei.h index 26a5c707b..ea81e399c 100644 --- a/bsd/sys/namei.h +++ b/bsd/sys/namei.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,14 +62,14 @@ */ #ifndef _SYS_NAMEI_H_ -#define _SYS_NAMEI_H_ +#define _SYS_NAMEI_H_ #include #ifdef KERNEL -#define LOCKLEAF 0x0004 /* lock inode on return */ -#define LOCKPARENT 0x0008 /* want parent vnode returned */ -#define WANTPARENT 0x0010 /* want parent vnode returned */ +#define LOCKLEAF 0x0004 /* lock inode on return */ +#define LOCKPARENT 0x0008 /* want parent vnode returned */ +#define WANTPARENT 0x0010 /* want parent vnode returned */ #ifdef KERNEL_PRIVATE #define CN_SECLUDE_RENAME 0x10000000 /*rename iff ¬(hard-linked ∨ opened ∨ mmaped)*/ @@ -81,7 +81,7 @@ #ifdef BSD_KERNEL_PRIVATE /* VFS Supports "/..namedfork/rsrc" access. */ -#define NAMEDRSRCFORK NAMEDSTREAMS +#define NAMEDRSRCFORK NAMEDSTREAMS #include @@ -90,57 +90,57 @@ #include #include -#define PATHBUFLEN 256 +#define PATHBUFLEN 256 /* * Encapsulation of namei parameters. */ -struct nameidata { +struct nameidata { /* * Arguments to namei/lookup. */ - user_addr_t ni_dirp; /* pathname pointer */ - enum uio_seg ni_segflg; /* location of pathname */ + user_addr_t ni_dirp; /* pathname pointer */ + enum uio_seg ni_segflg; /* location of pathname */ #if CONFIG_TRIGGERS - enum path_operation ni_op; /* intended operation, see enum path_operation in vnode.h */ + enum path_operation ni_op; /* intended operation, see enum path_operation in vnode.h */ #endif /* CONFIG_TRIGGERS */ /* * Arguments to lookup. */ - struct vnode *ni_startdir; /* starting directory */ - struct vnode *ni_rootdir; /* logical root directory */ - struct vnode *ni_usedvp; /* directory passed in via USEDVP */ + struct vnode *ni_startdir; /* starting directory */ + struct vnode *ni_rootdir; /* logical root directory */ + struct vnode *ni_usedvp; /* directory passed in via USEDVP */ /* * Results: returned from/manipulated by lookup */ - struct vnode *ni_vp; /* vnode of result */ - struct vnode *ni_dvp; /* vnode of intermediate directory */ + struct vnode *ni_vp; /* vnode of result */ + struct vnode *ni_dvp; /* vnode of intermediate directory */ /* * Shared between namei and lookup/commit routines. */ - u_int ni_pathlen; /* remaining chars in path */ - char *ni_next; /* next location in pathname */ - char ni_pathbuf[PATHBUFLEN]; - u_long ni_loopcnt; /* count of symlinks encountered */ + u_int ni_pathlen; /* remaining chars in path */ + char *ni_next; /* next location in pathname */ + char ni_pathbuf[PATHBUFLEN]; + u_long ni_loopcnt; /* count of symlinks encountered */ struct componentname ni_cnd; int32_t ni_flag; - int ni_ncgeneration; /* For a batched vnop, grab generation beforehand */ + int ni_ncgeneration; /* For a batched vnop, grab generation beforehand */ }; -#define NAMEI_CONTLOOKUP 0x002 /* Continue processing a lookup which was partially processed in a compound VNOP */ -#define NAMEI_TRAILINGSLASH 0x004 /* There was at least one trailing slash after last component */ -#define NAMEI_UNFINISHED 0x008 /* We broke off a lookup to do a compound op */ -/* - * XXX Hack: we need to encode the intended VNOP in order to +#define NAMEI_CONTLOOKUP 0x002 /* Continue processing a lookup which was partially processed in a compound VNOP */ +#define NAMEI_TRAILINGSLASH 0x004 /* There was at least one trailing slash after last component */ +#define NAMEI_UNFINISHED 0x008 /* We broke off a lookup to do a compound op */ +/* + * XXX Hack: we need to encode the intended VNOP in order to * be able to include information about which operations a filesystem * supports in the decision to break off a lookup early. */ -#define NAMEI_COMPOUNDOPEN 0x010 -#define NAMEI_COMPOUNDREMOVE 0x020 -#define NAMEI_COMPOUNDMKDIR 0x040 -#define NAMEI_COMPOUNDRMDIR 0x080 -#define NAMEI_COMPOUNDRENAME 0x100 +#define NAMEI_COMPOUNDOPEN 0x010 +#define NAMEI_COMPOUNDREMOVE 0x020 +#define NAMEI_COMPOUNDMKDIR 0x040 +#define NAMEI_COMPOUNDRMDIR 0x080 +#define NAMEI_COMPOUNDRENAME 0x100 #define NAMEI_COMPOUND_OP_MASK (NAMEI_COMPOUNDOPEN | NAMEI_COMPOUNDREMOVE | NAMEI_COMPOUNDMKDIR | NAMEI_COMPOUNDRMDIR | NAMEI_COMPOUNDRENAME) #ifdef KERNEL @@ -148,11 +148,11 @@ struct nameidata { * namei operational modifier flags, stored in ni_cnd.flags * Also includes LOCKLEAF, LOCKPARENT, and WANTPARENT flags, defined above. */ -#define NOCACHE 0x00000020 /* name must not be left in cache */ -#define NOFOLLOW 0x00000000 /* do not follow symbolic links (pseudo) */ +#define NOCACHE 0x00000020 /* name must not be left in cache */ +#define NOFOLLOW 0x00000000 /* do not follow symbolic links (pseudo) */ /* public FOLLOW 0x00000040 see vnode.h */ -#define SHAREDLEAF 0x00000080 /* OK to have shared leaf lock */ -#define MODMASK 0x100000fc /* mask of operational modifiers */ +#define SHAREDLEAF 0x00000080 /* OK to have shared leaf lock */ +#define MODMASK 0x100000fc /* mask of operational modifiers */ /* * Namei parameter descriptors. * @@ -162,32 +162,32 @@ struct nameidata { * name being sought. The caller is responsible for releasing the * buffer and for vrele'ing ni_startdir. */ -#define SAVENAME 0 /* save pathanme buffer ***obsolete */ -#define NOCROSSMOUNT 0x00000100 /* do not cross mount points */ -#define RDONLY 0x00000200 /* lookup with read-only semantics */ -#define HASBUF 0x00000400 /* has allocated pathname buffer */ -#define DONOTAUTH 0x00000800 /* do not authorize during lookup */ -#define SAVESTART 0x00001000 /* save starting directory */ +#define SAVENAME 0 /* save pathanme buffer ***obsolete */ +#define NOCROSSMOUNT 0x00000100 /* do not cross mount points */ +#define RDONLY 0x00000200 /* lookup with read-only semantics */ +#define HASBUF 0x00000400 /* has allocated pathname buffer */ +#define DONOTAUTH 0x00000800 /* do not authorize during lookup */ +#define SAVESTART 0x00001000 /* save starting directory */ /* public ISDOTDOT 0x00002000 see vnode.h */ /* public MAKEENTRY 0x00004000 see vnode.h */ /* public ISLASTCN 0x00008000 see vnode.h */ -#define ISSYMLINK 0x00010000 /* symlink needs interpretation */ +#define ISSYMLINK 0x00010000 /* symlink needs interpretation */ /* public ISWHITEOUT 0x00020000 see vnode.h */ /* public DOWHITEOUT 0x00040000 see vnode.h */ -#define WILLBEDIR 0x00080000 /* new files will be dirs; allow trailing / */ -#define AUDITVNPATH1 0x00100000 /* audit the path/vnode info */ -#define AUDITVNPATH2 0x00200000 /* audit the path/vnode info */ -#define USEDVP 0x00400000 /* start the lookup at ndp.ni_dvp */ -#define CN_VOLFSPATH 0x00800000 /* user path was a volfs style path */ -#define UNIONCREATED 0x02000000 /* union fs creation of vnode */ +#define WILLBEDIR 0x00080000 /* new files will be dirs; allow trailing / */ +#define AUDITVNPATH1 0x00100000 /* audit the path/vnode info */ +#define AUDITVNPATH2 0x00200000 /* audit the path/vnode info */ +#define USEDVP 0x00400000 /* start the lookup at ndp.ni_dvp */ +#define CN_VOLFSPATH 0x00800000 /* user path was a volfs style path */ +#define UNIONCREATED 0x02000000 /* union fs creation of vnode */ #if NAMEDRSRCFORK #define CN_WANTSRSRCFORK 0x04000000 #define CN_ALLOWRSRCFORK 0x08000000 #endif // NAMEDRSRCFORK // CN_SECLUDE_RENAME is defined above as 0x10000000 (SPI) -#define CN_NBMOUNTLOOK 0x20000000 /* do not block for cross mount lookups */ +#define CN_NBMOUNTLOOK 0x20000000 /* do not block for cross mount lookups */ #ifdef BSD_KERNEL_PRIVATE -#define CN_SKIPNAMECACHE 0x40000000 /* skip cache during lookup(), allow FS to handle all components */ +#define CN_SKIPNAMECACHE 0x40000000 /* skip cache during lookup(), allow FS to handle all components */ #endif // CN_RAW_ENCRYPTED is defined above as 0x80000000 (SPI) @@ -196,17 +196,17 @@ struct nameidata { */ #if CONFIG_TRIGGERS -/* Note: vnode triggers require more precise path operation (ni_op) */ +/* Note: vnode triggers require more precise path operation (ni_op) */ #define NDINIT(ndp, op, pop, flags, segflg, namep, ctx) { \ (ndp)->ni_cnd.cn_nameiop = op; \ (ndp)->ni_op = pop; \ (ndp)->ni_cnd.cn_flags = flags; \ if ((segflg) == UIO_USERSPACE) { \ - (ndp)->ni_segflg = ((IS_64BIT_PROCESS(vfs_context_proc(ctx))) ? UIO_USERSPACE64 : UIO_USERSPACE32); \ + (ndp)->ni_segflg = ((IS_64BIT_PROCESS(vfs_context_proc(ctx))) ? UIO_USERSPACE64 : UIO_USERSPACE32); \ } \ else { \ - (ndp)->ni_segflg = segflg; \ + (ndp)->ni_segflg = segflg; \ } \ (ndp)->ni_dirp = namep; \ (ndp)->ni_cnd.cn_context = ctx; \ @@ -218,10 +218,10 @@ struct nameidata { (ndp)->ni_cnd.cn_nameiop = op; \ (ndp)->ni_cnd.cn_flags = flags; \ if ((segflg) == UIO_USERSPACE) { \ - (ndp)->ni_segflg = ((IS_64BIT_PROCESS(vfs_context_proc(ctx))) ? UIO_USERSPACE64 : UIO_USERSPACE32); \ + (ndp)->ni_segflg = ((IS_64BIT_PROCESS(vfs_context_proc(ctx))) ? UIO_USERSPACE64 : UIO_USERSPACE32); \ } \ else { \ - (ndp)->ni_segflg = segflg; \ + (ndp)->ni_segflg = segflg; \ } \ (ndp)->ni_dirp = namep; \ (ndp)->ni_cnd.cn_context = ctx; \ @@ -236,62 +236,62 @@ struct nameidata { * This structure describes the elements in the cache of recent * names looked up by namei. */ -struct namecache { - TAILQ_ENTRY(namecache) nc_entry; /* chain of all entries */ - TAILQ_ENTRY(namecache) nc_child; /* chain of ncp's that are children of a vp */ - union { - LIST_ENTRY(namecache) nc_link; /* chain of ncp's that 'name' a vp */ - TAILQ_ENTRY(namecache) nc_negentry; /* chain of ncp's that 'name' a vp */ +struct namecache { + TAILQ_ENTRY(namecache) nc_entry; /* chain of all entries */ + TAILQ_ENTRY(namecache) nc_child; /* chain of ncp's that are children of a vp */ + union { + LIST_ENTRY(namecache) nc_link; /* chain of ncp's that 'name' a vp */ + TAILQ_ENTRY(namecache) nc_negentry; /* chain of ncp's that 'name' a vp */ } nc_un; - LIST_ENTRY(namecache) nc_hash; /* hash chain */ - vnode_t nc_dvp; /* vnode of parent of name */ - vnode_t nc_vp; /* vnode the name refers to */ - unsigned int nc_hashval; /* hashval of stringname */ - const char *nc_name; /* pointer to segment name in string cache */ + LIST_ENTRY(namecache) nc_hash; /* hash chain */ + vnode_t nc_dvp; /* vnode of parent of name */ + vnode_t nc_vp; /* vnode the name refers to */ + unsigned int nc_hashval; /* hashval of stringname */ + const char *nc_name; /* pointer to segment name in string cache */ }; #ifdef KERNEL -int namei(struct nameidata *ndp); -void nameidone(struct nameidata *); -int lookup(struct nameidata *ndp); -int relookup(struct vnode *dvp, struct vnode **vpp, - struct componentname *cnp); -int lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx); -void lookup_compound_vnop_post_hook(int error, vnode_t dvp, vnode_t vp, struct nameidata *ndp, int did_create); -void kdebug_lookup(struct vnode *dp, struct componentname *cnp); +int namei(struct nameidata *ndp); +void nameidone(struct nameidata *); +int lookup(struct nameidata *ndp); +int relookup(struct vnode *dvp, struct vnode **vpp, + struct componentname *cnp); +int lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx); +void lookup_compound_vnop_post_hook(int error, vnode_t dvp, vnode_t vp, struct nameidata *ndp, int did_create); +void kdebug_lookup(struct vnode *dp, struct componentname *cnp); /* * namecache function prototypes */ void cache_purgevfs(mount_t mp); -int cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, - vfs_context_t context, int *dp_authorized, vnode_t last_dp); +int cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, + vfs_context_t context, int *dp_authorized, vnode_t last_dp); -void vnode_cache_authorized_action(vnode_t vp, vfs_context_t context, kauth_action_t action); -void vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action); -boolean_t vnode_cache_is_stale(vnode_t vp); -boolean_t vnode_cache_is_authorized(vnode_t vp, vfs_context_t context, kauth_action_t action); -int lookup_validate_creation_path(struct nameidata *ndp); -int namei_compound_available(vnode_t dp, struct nameidata *ndp); +void vnode_cache_authorized_action(vnode_t vp, vfs_context_t context, kauth_action_t action); +void vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action); +boolean_t vnode_cache_is_stale(vnode_t vp); +boolean_t vnode_cache_is_authorized(vnode_t vp, vfs_context_t context, kauth_action_t action); +int lookup_validate_creation_path(struct nameidata *ndp); +int namei_compound_available(vnode_t dp, struct nameidata *ndp); #endif /* KERNEL */ /* * Stats on usefulness of namei caches. */ -struct nchstats { - long ncs_goodhits; /* hits that we can really use */ - long ncs_neghits; /* negative hits that we can use */ - long ncs_badhits; /* hits we must drop */ - long ncs_miss; /* misses */ - long ncs_pass2; /* names found with passes == 2 */ - long ncs_2passes; /* number of times we attempt it */ - long ncs_stolen; - long ncs_enters; - long ncs_deletes; - long ncs_badvid; +struct nchstats { + long ncs_goodhits; /* hits that we can really use */ + long ncs_neghits; /* negative hits that we can use */ + long ncs_badhits; /* hits we must drop */ + long ncs_miss; /* misses */ + long ncs_pass2; /* names found with passes == 2 */ + long ncs_2passes; /* number of times we attempt it */ + long ncs_stolen; + long ncs_enters; + long ncs_deletes; + long ncs_badvid; }; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/sys/netboot.h b/bsd/sys/netboot.h index 717100d7f..d9317bbc6 100644 --- a/bsd/sys/netboot.h +++ b/bsd/sys/netboot.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * netboot.h * - definitions for network booting/rooting */ @@ -37,14 +37,14 @@ #include #include -int netboot_setup(void); -int netboot_mountroot(void); -int netboot_root(void); +int netboot_setup(void); +int netboot_mountroot(void); +int netboot_root(void); -boolean_t netboot_iaddr(struct in_addr * iaddr_p); +boolean_t netboot_iaddr(struct in_addr * iaddr_p); -boolean_t netboot_rootpath(struct in_addr * server_ip, - char * name, int name_len, - char * path, int path_len); +boolean_t netboot_rootpath(struct in_addr * server_ip, + char * name, int name_len, + char * path, int path_len); #endif /* _SYS_NETBOOT_H */ diff --git a/bsd/sys/netport.h b/bsd/sys/netport.h index 0095d9dda..14ebe566b 100644 --- a/bsd/sys/netport.h +++ b/bsd/sys/netport.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,33 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ -/* +/* * Copyright (c) 1987,1988,1989 Carnegie-Mellon University All rights reserved. */ -#ifndef _SYS_NETPORT_H_ +#ifndef _SYS_NETPORT_H_ #define _SYS_NETPORT_H_ #include <_types/_uint32_t.h> /* uint32_t */ -typedef uint32_t netaddr_t; +typedef uint32_t netaddr_t; /* * Network Port structure. */ typedef struct { - long np_uid_high; - long np_uid_low; + long np_uid_high; + long np_uid_low; } np_uid_t; typedef struct { - netaddr_t np_receiver; - netaddr_t np_owner; - np_uid_t np_puid; - np_uid_t np_sid; + netaddr_t np_receiver; + netaddr_t np_owner; + np_uid_t np_puid; + np_uid_t np_sid; } network_port_t; -#endif /* !_SYS_NETPORT_H_ */ - +#endif /* !_SYS_NETPORT_H_ */ diff --git a/bsd/sys/param.h b/bsd/sys/param.h index 012e75630..ca57a6725 100644 --- a/bsd/sys/param.h +++ b/bsd/sys/param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -66,15 +66,15 @@ * @(#)param.h 8.3 (Berkeley) 4/4/95 */ -#ifndef _SYS_PARAM_H_ +#ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ -#define BSD 199506 /* System version (year & month). */ -#define BSD4_3 1 -#define BSD4_4 1 +#define BSD 199506 /* System version (year & month). */ +#define BSD4_3 1 +#define BSD4_4 1 -#define NeXTBSD 1995064 /* NeXTBSD version (year, month, release) */ -#define NeXTBSD4_0 0 /* NeXTBSD 4.0 */ +#define NeXTBSD 1995064 /* NeXTBSD version (year, month, release) */ +#define NeXTBSD4_0 0 /* NeXTBSD 4.0 */ #include #include @@ -92,16 +92,16 @@ */ #include -#define MAXCOMLEN 16 /* max command name remembered */ -#define MAXINTERP 64 /* max interpreter file name length */ -#define MAXLOGNAME 255 /* max login name length */ -#define MAXUPRC CHILD_MAX /* max simultaneous processes */ -#define NCARGS ARG_MAX /* max bytes for an exec function */ -#define NGROUPS NGROUPS_MAX /* max number groups */ -#define NOFILE 256 /* default max open files per process */ -#define NOGROUP 65535 /* marker for empty group set member */ -#define MAXHOSTNAMELEN 256 /* max hostname size */ -#define MAXDOMNAMELEN 256 /* maximum domain name length */ +#define MAXCOMLEN 16 /* max command name remembered */ +#define MAXINTERP 64 /* max interpreter file name length */ +#define MAXLOGNAME 255 /* max login name length */ +#define MAXUPRC CHILD_MAX /* max simultaneous processes */ +#define NCARGS ARG_MAX /* max bytes for an exec function */ +#define NGROUPS NGROUPS_MAX /* max number groups */ +#define NOFILE 256 /* default max open files per process */ +#define NOGROUP 65535 /* marker for empty group set member */ +#define MAXHOSTNAMELEN 256 /* max hostname size */ +#define MAXDOMNAMELEN 256 /* maximum domain name length */ /* Machine type dependent parameters. */ #include @@ -126,56 +126,56 @@ * Priorities. Note that with 32 run queues, differences less than 4 are * insignificant. */ -#define PSWP 0 -#define PVM 4 -#define PINOD 8 -#define PRIBIO 16 -#define PVFS 20 -#define PZERO 22 /* No longer magic, shouldn't be here. XXX */ -#define PSOCK 24 -#define PWAIT 32 -#define PLOCK 36 -#define PPAUSE 40 -#define PUSER 50 -#define MAXPRI 127 /* Priorities range from 0 through MAXPRI. */ +#define PSWP 0 +#define PVM 4 +#define PINOD 8 +#define PRIBIO 16 +#define PVFS 20 +#define PZERO 22 /* No longer magic, shouldn't be here. XXX */ +#define PSOCK 24 +#define PWAIT 32 +#define PLOCK 36 +#define PPAUSE 40 +#define PUSER 50 +#define MAXPRI 127 /* Priorities range from 0 through MAXPRI. */ -#define PRIMASK 0x0ff -#define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ -#define PTTYBLOCK 0x200 /* for tty SIGTTOU and SIGTTIN blocking */ -#define PDROP 0x400 /* OR'd with pri to stop re-aquistion of mutex upon wakeup */ -#define PSPIN 0x800 /* OR'd with pri to require mutex in spin mode upon wakeup */ +#define PRIMASK 0x0ff +#define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ +#define PTTYBLOCK 0x200 /* for tty SIGTTOU and SIGTTIN blocking */ +#define PDROP 0x400 /* OR'd with pri to stop re-aquistion of mutex upon wakeup */ +#define PSPIN 0x800 /* OR'd with pri to require mutex in spin mode upon wakeup */ -#define NBPW sizeof(int) /* number of bytes per word (integer) */ +#define NBPW sizeof(int) /* number of bytes per word (integer) */ -#define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ -#define NODEV (dev_t)(-1) /* non-existent device */ +#define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ +#define NODEV (dev_t)(-1) /* non-existent device */ /* * Clustering of hardware pages on machines with ridiculously small * page sizes is done here. The paging subsystem deals with units of * CLSIZE pte's describing NBPG (from machine/param.h) pages each. */ -#define CLBYTES (CLSIZE*NBPG) -#define CLOFSET (CLSIZE*NBPG-1) /* for clusters, like PGOFSET */ -#define claligned(x) ((((int)(x))&CLOFSET)==0) -#define CLOFF CLOFSET -#define CLSHIFT (PGSHIFT+CLSIZELOG2) +#define CLBYTES (CLSIZE*NBPG) +#define CLOFSET (CLSIZE*NBPG-1) /* for clusters, like PGOFSET */ +#define claligned(x) ((((int)(x))&CLOFSET)==0) +#define CLOFF CLOFSET +#define CLSHIFT (PGSHIFT+CLSIZELOG2) -#if CLSIZE==1 -#define clbase(i) (i) -#define clrnd(i) (i) +#if CLSIZE == 1 +#define clbase(i) (i) +#define clrnd(i) (i) #else /* Give the base virtual address (first of CLSIZE). */ -#define clbase(i) ((i) &~ (CLSIZE-1)) +#define clbase(i) ((i) &~ (CLSIZE-1)) /* Round a number of clicks up to a whole cluster. */ -#define clrnd(i) (((i) + (CLSIZE-1)) &~ (CLSIZE-1)) +#define clrnd(i) (((i) + (CLSIZE-1)) &~ (CLSIZE-1)) #endif -#define CBLOCK 64 /* Clist block size, must be a power of 2. */ -#define CBQSIZE (CBLOCK/NBBY) /* Quote bytes/cblock - can do better. */ - /* Data chars/clist. */ -#define CBSIZE (CBLOCK - sizeof(struct cblock *) - CBQSIZE) -#define CROUND (CBLOCK - 1) /* Clist rounding. */ +#define CBLOCK 64 /* Clist block size, must be a power of 2. */ +#define CBQSIZE (CBLOCK/NBBY) /* Quote bytes/cblock - can do better. */ + /* Data chars/clist. */ +#define CBSIZE (CBLOCK - sizeof(struct cblock *) - CBQSIZE) +#define CROUND (CBLOCK - 1) /* Clist rounding. */ /* * File system parameters and macros. @@ -188,11 +188,11 @@ * We set this to track the value of MAX_UPL_TRANSFER_BYTES from * osfmk/mach/memory_object_types.h to bound it at the maximum UPL size. */ -#define MAXBSIZE (256 * 4096) -#define MAXPHYSIO MAXPHYS -#define MAXFRAG 8 +#define MAXBSIZE (256 * 4096) +#define MAXPHYSIO MAXPHYS +#define MAXFRAG 8 -#define MAXPHYSIO_WIRED (16 * 1024 * 1024) +#define MAXPHYSIO_WIRED (16 * 1024 * 1024) /* * MAXPATHLEN defines the longest permissable path length after expanding @@ -203,30 +203,30 @@ * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ -#define MAXPATHLEN PATH_MAX -#define MAXSYMLINKS 32 +#define MAXPATHLEN PATH_MAX +#define MAXSYMLINKS 32 /* Bit map related macros. */ -#define setbit(a,i) (((char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) -#define clrbit(a,i) (((char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) -#define isset(a,i) (((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) -#define isclr(a,i) ((((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) +#define setbit(a, i) (((char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) +#define clrbit(a, i) (((char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) +#define isset(a, i) (((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) +#define isclr(a, i) ((((char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany -#define howmany(x, y) ((((x) % (y)) == 0) ? ((x) / (y)) : (((x) / (y)) + 1)) +#define howmany(x, y) ((((x) % (y)) == 0) ? ((x) / (y)) : (((x) / (y)) + 1)) #endif -#define roundup(x, y) ((((x) % (y)) == 0) ? \ - (x) : ((x) + ((y) - ((x) % (y))))) -#define powerof2(x) ((((x)-1)&(x))==0) +#define roundup(x, y) ((((x) % (y)) == 0) ? \ + (x) : ((x) + ((y) - ((x) % (y))))) +#define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #ifndef MIN -#define MIN(a,b) (((a)<(b))?(a):(b)) +#define MIN(a, b) (((a)<(b))?(a):(b)) #endif /* MIN */ #ifndef MAX -#define MAX(a,b) (((a)>(b))?(a):(b)) -#endif /* MAX */ +#define MAX(a, b) (((a)>(b))?(a):(b)) +#endif /* MAX */ /* * Constants for setting the parameters of the kernel memory allocator. @@ -243,8 +243,8 @@ * Constraints: CLBYTES <= MAXALLOCSAVE <= 2 ** (MINBUCKET + 14), and * MAXALLOCSIZE must be a power of two. */ -#define MINBUCKET 4 /* 4 => min allocation of 16 bytes */ -#define MAXALLOCSAVE (2 * CLBYTES) +#define MINBUCKET 4 /* 4 => min allocation of 16 bytes */ +#define MAXALLOCSAVE (2 * CLBYTES) /* * Scale factor for scaled integers used to count %cpu time and load avgs. @@ -257,7 +257,7 @@ * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ -#define FSHIFT 11 /* bits to right of fixed binary point */ -#define FSCALE (1< diff --git a/bsd/sys/persona.h b/bsd/sys/persona.h index 64d135168..c01074897 100644 --- a/bsd/sys/persona.h +++ b/bsd/sys/persona.h @@ -52,7 +52,7 @@ struct kpersona_info { uint32_t persona_ngroups; gid_t persona_groups[NGROUPS]; uid_t persona_gmuid; - char persona_name[MAXLOGNAME+1]; + char persona_name[MAXLOGNAME + 1]; /* TODO: MAC policies?! */ }; @@ -200,7 +200,7 @@ struct persona { uid_t pna_id; int pna_type; - char pna_login[MAXLOGNAME+1]; + char pna_login[MAXLOGNAME + 1]; kauth_cred_t pna_cred; uid_t pna_pgid; @@ -230,38 +230,46 @@ struct persona { LCK_MTX_ASSERT(&(persona)->pna_lock, LCK_MTX_ASSERT_OWNED) #ifdef PERSONA_DEBUG -static inline const char *persona_desc(struct persona *persona, int locked) +static inline const char * +persona_desc(struct persona *persona, int locked) { - if (!persona) + if (!persona) { return ""; + } - if (persona->pna_desc[0] != 0) + if (persona->pna_desc[0] != 0) { return persona->pna_desc; + } - if (!locked) + if (!locked) { persona_lock(persona); - if (persona->pna_desc[0] != 0) + } + if (persona->pna_desc[0] != 0) { goto out_unlock; + } char *p = &persona->pna_desc[0]; char *end = p + sizeof(persona->pna_desc) - 1; *end = 0; p += snprintf(p, end - p, "%s/%d:%d", - persona->pna_login, - kauth_cred_getuid(persona->pna_cred), - kauth_cred_getgid(persona->pna_cred)); + persona->pna_login, + kauth_cred_getuid(persona->pna_cred), + kauth_cred_getgid(persona->pna_cred)); - if (p <= end) + if (p <= end) { *p = 0; + } out_unlock: - if (!locked) + if (!locked) { persona_unlock(persona); + } return persona->pna_desc; } #else /* !PERSONA_DEBUG */ -static inline const char *persona_desc(struct persona *persona, int locked) +static inline const char * +persona_desc(struct persona *persona, int locked) { (void)persona; (void)locked; @@ -277,9 +285,9 @@ struct persona; __BEGIN_DECLS #ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T +#define _KAUTH_CRED_T typedef struct ucred *kauth_cred_t; -#endif /* !_KAUTH_CRED_T */ +#endif /* !_KAUTH_CRED_T */ /* returns the persona ID for the given pesona structure */ uid_t persona_get_id(struct persona *persona); @@ -298,7 +306,7 @@ struct persona *persona_lookup(uid_t id); * total found (could be more than original value of 'plen') */ int persona_find(const char *login, uid_t uid, - struct persona **persona, size_t *plen); + struct persona **persona, size_t *plen); /* returns a reference to the persona tied to the current thread */ struct persona *current_persona_get(void); @@ -323,33 +331,37 @@ extern struct persona *g_system_persona; void personas_bootstrap(void); struct persona *persona_alloc(uid_t id, const char *login, - int type, int *error); + int type, int *error); int persona_init_begin(struct persona *persona); void persona_init_end(struct persona *persona, int error); struct persona *persona_lookup_and_invalidate(uid_t id); -static inline int proc_has_persona(proc_t p) +static inline int +proc_has_persona(proc_t p) { - if (p && p->p_persona) + if (p && p->p_persona) { return 1; + } return 0; } -static inline uid_t persona_id_from_proc(proc_t p) +static inline uid_t +persona_id_from_proc(proc_t p) { - if (p && p->p_persona) + if (p && p->p_persona) { return p->p_persona->pna_id; + } return PERSONA_ID_NONE; } int persona_proc_inherit(proc_t child, proc_t parent); int persona_proc_adopt_id(proc_t p, uid_t id, - kauth_cred_t auth_override); + kauth_cred_t auth_override); int persona_proc_adopt(proc_t p, struct persona *persona, - kauth_cred_t auth_override); + kauth_cred_t auth_override); int persona_proc_drop(proc_t p); int persona_set_cred(struct persona *persona, kauth_cred_t cred); @@ -365,19 +377,21 @@ int persona_get_groups(struct persona *persona, unsigned *ngroups, gid_t *groups uid_t persona_get_gmuid(struct persona *persona); -int persona_get_login(struct persona *persona, char login[MAXLOGNAME+1]); +int persona_get_login(struct persona *persona, char login[MAXLOGNAME + 1]); /* returns a reference that must be released with persona_put() */ struct persona *persona_proc_get(pid_t pid); #else /* !CONFIG_PERSONAS */ -static inline int proc_has_persona(__unused proc_t p) +static inline int +proc_has_persona(__unused proc_t p) { return 0; } -static inline uid_t persona_id_from_proc(__unused proc_t p) +static inline uid_t +persona_id_from_proc(__unused proc_t p) { return PERSONA_ID_NONE; } diff --git a/bsd/sys/pgo.h b/bsd/sys/pgo.h index fcd669b51..35ddf7e70 100644 --- a/bsd/sys/pgo.h +++ b/bsd/sys/pgo.h @@ -56,28 +56,28 @@ * All members are in network byte order. */ struct pgo_metadata_footer { - /** - * number of pairs. - * - * This should be htonl(n), where n is the number of key-value pairs in the - * metadata buffer - */ - uint32_t number_of_pairs; + /** + * number of pairs. + * + * This should be htonl(n), where n is the number of key-value pairs in the + * metadata buffer + */ + uint32_t number_of_pairs; - /** - * pointer to the metadata buffer - * - * This should be htonl(offset), where offset is the backwards offset from - * the end of the file to the metadata buffer. - */ - uint32_t offset_to_pairs; + /** + * pointer to the metadata buffer + * + * This should be htonl(offset), where offset is the backwards offset from + * the end of the file to the metadata buffer. + */ + uint32_t offset_to_pairs; - /** - * magic number - * - * This should be htonl(0x6d657461); - */ - uint32_t magic; + /** + * magic number + * + * This should be htonl(0x6d657461); + */ + uint32_t magic; }; #ifndef KERNEL diff --git a/bsd/sys/pipe.h b/bsd/sys/pipe.h index 09b25dbce..294be47be 100644 --- a/bsd/sys/pipe.h +++ b/bsd/sys/pipe.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -57,10 +57,10 @@ #ifndef _SYS_PIPE_H_ #define _SYS_PIPE_H_ -#ifdef KERNEL +#ifdef KERNEL #include #endif -#include /* for TAILQ macros */ +#include /* for TAILQ macros */ #include #include #include @@ -70,17 +70,17 @@ * Pipe buffer size, keep moderate in value, pipes take kva space. */ #ifndef PIPE_SIZE -#define PIPE_SIZE 16384 +#define PIPE_SIZE 16384 #endif -#define PIPE_KVAMAX (1024 * 1024 * 16) +#define PIPE_KVAMAX (1024 * 1024 * 16) #ifndef BIG_PIPE_SIZE -#define BIG_PIPE_SIZE (64*1024) +#define BIG_PIPE_SIZE (64*1024) #endif #ifndef SMALL_PIPE_SIZE -#define SMALL_PIPE_SIZE PAGE_SIZE +#define SMALL_PIPE_SIZE PAGE_SIZE #endif /* @@ -88,10 +88,10 @@ * than PIPE_BUF. */ #ifndef PIPE_MINDIRECT -#define PIPE_MINDIRECT 8192 +#define PIPE_MINDIRECT 8192 #endif -#define PIPENPAGES (BIG_PIPE_SIZE / PAGE_SIZE + 1) +#define PIPENPAGES (BIG_PIPE_SIZE / PAGE_SIZE + 1) /* * Pipe buffer information. @@ -99,11 +99,11 @@ * Buffered write is active when the buffer.cnt field is set. */ struct pipebuf { - u_int cnt; /* number of chars currently in buffer */ - u_int in; /* in pointer */ - u_int out; /* out pointer */ - u_int size; /* size of buffer */ - caddr_t buffer; /* kva of buffer */ + u_int cnt; /* number of chars currently in buffer */ + u_int in; /* in pointer */ + u_int out; /* out pointer */ + u_int size; /* size of buffer */ + caddr_t buffer; /* kva of buffer */ }; @@ -112,34 +112,34 @@ struct pipebuf { * Information to support direct transfers between processes for pipes. */ struct pipemapping { - vm_offset_t kva; /* kernel virtual address */ - vm_size_t cnt; /* number of chars in buffer */ - vm_size_t pos; /* current position of transfer */ - int npages; /* number of pages */ - vm_page_t ms[PIPENPAGES]; /* pages in source process */ + vm_offset_t kva; /* kernel virtual address */ + vm_size_t cnt; /* number of chars in buffer */ + vm_size_t pos; /* current position of transfer */ + int npages; /* number of pages */ + vm_page_t ms[PIPENPAGES]; /* pages in source process */ }; #endif /* * Bits in pipe_state. */ -#define PIPE_ASYNC 0x004 /* Async? I/O. */ -#define PIPE_WANTR 0x008 /* Reader wants some characters. */ -#define PIPE_WANTW 0x010 /* Writer wants space to put characters. */ -#define PIPE_WANT 0x020 /* Pipe is wanted to be run-down. */ -#define PIPE_SEL 0x040 /* Pipe has a select active. */ -#define PIPE_EOF 0x080 /* Pipe is in EOF condition. */ -#define PIPE_LOCKFL 0x100 /* Process has exclusive access to pointers/data. */ -#define PIPE_LWANT 0x200 /* Process wants exclusive access to pointers/data. */ -#define PIPE_DIRECTW 0x400 /* Pipe direct write active. */ -#define PIPE_DIRECTOK 0x800 /* Direct mode ok. */ -#define PIPE_KNOTE 0x1000 /* Pipe has kernel events activated */ -#define PIPE_DRAIN 0x2000 /* Waiting for I/O to drop for a close. Treated like EOF; - only separate for easier debugging. */ -#define PIPE_WSELECT 0x4000 /* Some thread has done an FWRITE select on the pipe */ -#define PIPE_DEAD 0x8000 /* Pipe is dead and needs garbage collection */ - -#ifdef KERNEL +#define PIPE_ASYNC 0x004 /* Async? I/O. */ +#define PIPE_WANTR 0x008 /* Reader wants some characters. */ +#define PIPE_WANTW 0x010 /* Writer wants space to put characters. */ +#define PIPE_WANT 0x020 /* Pipe is wanted to be run-down. */ +#define PIPE_SEL 0x040 /* Pipe has a select active. */ +#define PIPE_EOF 0x080 /* Pipe is in EOF condition. */ +#define PIPE_LOCKFL 0x100 /* Process has exclusive access to pointers/data. */ +#define PIPE_LWANT 0x200 /* Process wants exclusive access to pointers/data. */ +#define PIPE_DIRECTW 0x400 /* Pipe direct write active. */ +#define PIPE_DIRECTOK 0x800 /* Direct mode ok. */ +#define PIPE_KNOTE 0x1000 /* Pipe has kernel events activated */ +#define PIPE_DRAIN 0x2000 /* Waiting for I/O to drop for a close. Treated like EOF; + * only separate for easier debugging. */ +#define PIPE_WSELECT 0x4000 /* Some thread has done an FWRITE select on the pipe */ +#define PIPE_DEAD 0x8000 /* Pipe is dead and needs garbage collection */ + +#ifdef KERNEL struct label; @@ -148,27 +148,27 @@ struct label; * Two of these are linked together to produce bi-directional pipes. */ struct pipe { - struct pipebuf pipe_buffer; /* data storage */ + struct pipebuf pipe_buffer; /* data storage */ #ifdef PIPE_DIRECT - struct pipemapping pipe_map; /* pipe mapping for direct I/O */ + struct pipemapping pipe_map; /* pipe mapping for direct I/O */ #endif - struct selinfo pipe_sel; /* for compat with select */ - pid_t pipe_pgid; /* information for async I/O */ - struct pipe *pipe_peer; /* link with other direction */ - u_int pipe_state; /* pipe status info */ - int pipe_busy; /* busy flag, mostly to handle rundown sanely */ - TAILQ_HEAD(,eventqelt) pipe_evlist; - lck_mtx_t *pipe_mtxp; /* shared mutex between both pipes */ - struct timespec st_atimespec; /* time of last access */ - struct timespec st_mtimespec; /* time of last data modification */ - struct timespec st_ctimespec; /* time of last status change */ - struct label *pipe_label; /* pipe MAC label - shared */ + struct selinfo pipe_sel; /* for compat with select */ + pid_t pipe_pgid; /* information for async I/O */ + struct pipe *pipe_peer; /* link with other direction */ + u_int pipe_state; /* pipe status info */ + int pipe_busy; /* busy flag, mostly to handle rundown sanely */ + TAILQ_HEAD(, eventqelt) pipe_evlist; + lck_mtx_t *pipe_mtxp; /* shared mutex between both pipes */ + struct timespec st_atimespec; /* time of last access */ + struct timespec st_mtimespec; /* time of last data modification */ + struct timespec st_ctimespec; /* time of last status change */ + struct label *pipe_label; /* pipe MAC label - shared */ }; -#define PIPE_MTX(pipe) ((pipe)->pipe_mtxp) +#define PIPE_MTX(pipe) ((pipe)->pipe_mtxp) -#define PIPE_LOCK(pipe) lck_mtx_lock(PIPE_MTX(pipe)) -#define PIPE_UNLOCK(pipe) lck_mtx_unlock(PIPE_MTX(pipe)) +#define PIPE_LOCK(pipe) lck_mtx_lock(PIPE_MTX(pipe)) +#define PIPE_UNLOCK(pipe) lck_mtx_unlock(PIPE_MTX(pipe)) #define PIPE_LOCK_ASSERT(pipe, type) LCK_MTX_ASSERT(PIPE_MTX(pipe), (type)) __BEGIN_DECLS @@ -176,6 +176,6 @@ void pipeinit(void); extern int pipe_stat(struct pipe *, void *, int); __END_DECLS -#endif /* KERNEL */ +#endif /* KERNEL */ #endif /* !_SYS_PIPE_H_ */ diff --git a/bsd/sys/poll.h b/bsd/sys/poll.h index 03e44923a..6558df603 100644 --- a/bsd/sys/poll.h +++ b/bsd/sys/poll.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -55,7 +55,7 @@ */ #ifndef _SYS_POLL_H_ -#define _SYS_POLL_H_ +#define _SYS_POLL_H_ /* * This file is intended to be compatible with the traditional poll.h. @@ -65,36 +65,35 @@ * Requestable events. If poll(2) finds any of these set, they are * copied to revents on return. */ -#define POLLIN 0x0001 /* any readable data available */ -#define POLLPRI 0x0002 /* OOB/Urgent readable data */ -#define POLLOUT 0x0004 /* file descriptor is writeable */ -#define POLLRDNORM 0x0040 /* non-OOB/URG data available */ -#define POLLWRNORM POLLOUT /* no write type differentiation */ -#define POLLRDBAND 0x0080 /* OOB/Urgent readable data */ -#define POLLWRBAND 0x0100 /* OOB/Urgent data can be written */ +#define POLLIN 0x0001 /* any readable data available */ +#define POLLPRI 0x0002 /* OOB/Urgent readable data */ +#define POLLOUT 0x0004 /* file descriptor is writeable */ +#define POLLRDNORM 0x0040 /* non-OOB/URG data available */ +#define POLLWRNORM POLLOUT /* no write type differentiation */ +#define POLLRDBAND 0x0080 /* OOB/Urgent readable data */ +#define POLLWRBAND 0x0100 /* OOB/Urgent data can be written */ /* * FreeBSD extensions: polling on a regular file might return one * of these events (currently only supported on local filesystems). */ -#define POLLEXTEND 0x0200 /* file may have been extended */ -#define POLLATTRIB 0x0400 /* file attributes may have changed */ -#define POLLNLINK 0x0800 /* (un)link/rename may have happened */ -#define POLLWRITE 0x1000 /* file's contents may have changed */ +#define POLLEXTEND 0x0200 /* file may have been extended */ +#define POLLATTRIB 0x0400 /* file attributes may have changed */ +#define POLLNLINK 0x0800 /* (un)link/rename may have happened */ +#define POLLWRITE 0x1000 /* file's contents may have changed */ /* * These events are set if they occur regardless of whether they were * requested. */ -#define POLLERR 0x0008 /* some poll error occurred */ -#define POLLHUP 0x0010 /* file descriptor was "hung up" */ -#define POLLNVAL 0x0020 /* requested events "invalid" */ +#define POLLERR 0x0008 /* some poll error occurred */ +#define POLLHUP 0x0010 /* file descriptor was "hung up" */ +#define POLLNVAL 0x0020 /* requested events "invalid" */ -#define POLLSTANDARD (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\ - POLLWRBAND|POLLERR|POLLHUP|POLLNVAL) +#define POLLSTANDARD (POLLIN|POLLPRI|POLLOUT|POLLRDNORM|POLLRDBAND|\ + POLLWRBAND|POLLERR|POLLHUP|POLLNVAL) -struct pollfd -{ +struct pollfd { int fd; short events; short revents; @@ -112,7 +111,7 @@ __BEGIN_DECLS * This is defined here (instead of ) because this is where * traditional SVR4 code will look to find it. */ -extern int poll (struct pollfd *, nfds_t, int) __DARWIN_ALIAS_C(poll); +extern int poll(struct pollfd *, nfds_t, int) __DARWIN_ALIAS_C(poll); __END_DECLS diff --git a/bsd/sys/posix_sem.h b/bsd/sys/posix_sem.h index 1d416d12f..793511a70 100644 --- a/bsd/sys/posix_sem.h +++ b/bsd/sys/posix_sem.h @@ -1,9 +1,8 @@ - /* * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,7 +41,7 @@ * */ -#ifndef _SYS_POSIX_SEM_H_ +#ifndef _SYS_POSIX_SEM_H_ #define _SYS_POSIX_SEM_H_ #include @@ -51,30 +50,30 @@ struct label; -#define PSEMNAMLEN 31 /* maximum name segment length we bother with */ +#define PSEMNAMLEN 31 /* maximum name segment length we bother with */ struct pseminfo { - unsigned int psem_flags; - unsigned int psem_usecount; - mode_t psem_mode; - uid_t psem_uid; - gid_t psem_gid; - char psem_name[PSEMNAMLEN + 1]; /* segment name */ - void * psem_semobject; + unsigned int psem_flags; + unsigned int psem_usecount; + mode_t psem_mode; + uid_t psem_uid; + gid_t psem_gid; + char psem_name[PSEMNAMLEN + 1]; /* segment name */ + void * psem_semobject; struct label * psem_label; - pid_t psem_creator_pid; - uint64_t psem_creator_uniqueid; + pid_t psem_creator_pid; + uint64_t psem_creator_uniqueid; }; #define PSEMINFO_NULL (struct pseminfo *)0 -#define PSEM_NONE 1 -#define PSEM_DEFINED 2 -#define PSEM_ALLOCATED 4 -#define PSEM_MAPPED 8 -#define PSEM_INUSE 0x10 -#define PSEM_REMOVED 0x20 -#define PSEM_INCREATE 0x40 -#define PSEM_INDELETE 0x80 +#define PSEM_NONE 1 +#define PSEM_DEFINED 2 +#define PSEM_ALLOCATED 4 +#define PSEM_MAPPED 8 +#define PSEM_INUSE 0x10 +#define PSEM_REMOVED 0x20 +#define PSEM_INCREATE 0x40 +#define PSEM_INDELETE 0x80 #endif diff --git a/bsd/sys/posix_shm.h b/bsd/sys/posix_shm.h index 068fdee73..72e7b0c01 100644 --- a/bsd/sys/posix_shm.h +++ b/bsd/sys/posix_shm.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -41,7 +41,7 @@ * */ -#ifndef _SYS_POSIX_SHM_H_ +#ifndef _SYS_POSIX_SHM_H_ #define _SYS_POSIX_SHM_H_ #include @@ -50,33 +50,18 @@ struct label; -#define PSHMNAMLEN 31 /* maximum name segment length we bother with */ +#define PSHMNAMLEN 31 /* maximum name segment length we bother with */ struct pshminfo { - unsigned int pshm_flags; - unsigned int pshm_usecount; - off_t pshm_length; - mode_t pshm_mode; - uid_t pshm_uid; - gid_t pshm_gid; - char pshm_name[PSHMNAMLEN + 1]; /* segment name */ - void * pshm_memobject; -#if DIAGNOSTIC - unsigned int pshm_readcount; - unsigned int pshm_writecount; - struct proc * pshm_proc; -#endif /* DIAGNOSTIC */ - struct label * pshm_label; + unsigned int pshm_flags; + unsigned int pshm_usecount; + off_t pshm_length; + mode_t pshm_mode; + uid_t pshm_uid; + gid_t pshm_gid; + char pshm_name[PSHMNAMLEN + 1]; + void *pshm_memobject; + struct label *pshm_label; }; -#define PSHMINFO_NULL (struct pshminfo *)0 - -#define PSHM_NONE 1 -#define PSHM_DEFINED 2 -#define PSHM_ALLOCATED 4 -#define PSHM_MAPPED 8 -#define PSHM_INUSE 0x10 -#define PSHM_REMOVED 0x20 -#define PSHM_INCREATE 0x40 -#define PSHM_INDELETE 0x80 #endif diff --git a/bsd/sys/priv.h b/bsd/sys/priv.h index 688da6449..cdeb994a4 100644 --- a/bsd/sys/priv.h +++ b/bsd/sys/priv.h @@ -60,7 +60,7 @@ * Kernel privilege checking interface. */ #ifndef _SYS_PRIV_H_ -#define _SYS_PRIV_H_ +#define _SYS_PRIV_H_ /* * Privilege list, sorted loosely by kernel subsystem. @@ -77,62 +77,62 @@ * privileges, such as the ability to reboot, and then loosely by * subsystem, indicated by a subsystem name. */ -#define PRIV_ADJTIME 1000 /* Set time adjustment. */ -#define PRIV_PROC_UUID_POLICY 1001 /* Change process uuid policy table. */ -#define PRIV_GLOBAL_PROC_INFO 1002 /* Query information for processes owned by other users */ -#define PRIV_SYSTEM_OVERRIDE 1003 /* Override global system settings for various subsystems for a limited duration/system-mode */ -#define PRIV_HW_DEBUG_DATA 1004 /* Extract hw-specific debug data (e.g. ECC data) */ -#define PRIV_SELECTIVE_FORCED_IDLE 1005 /* Configure and control Selective Forced Idle (SFI) subsystem */ -#define PRIV_PROC_TRACE_INSPECT 1006 /* Request trace memory of arbitrary process to be inspected */ -#define PRIV_DARKBOOT 1007 /* Manipulate the darkboot flag */ -#define PRIV_WORK_INTERVAL 1008 /* Express details about a work interval */ -#define PRIV_SMB_TIMEMACHINE_CONTROL 1009 /* Control Time Machine properties of an SMB share */ -#define PRIV_AUDIO_LATENCY 1010 /* set audio latency requirements for background tracing */ -#define PRIV_KTRACE_BACKGROUND 1011 /* Operate ktrace in the background */ -#define PRIV_SETPRIORITY_DARWIN_ROLE 1012 /* Allow setpriority(PRIO_DARWIN_ROLE) */ -#define PRIV_PACKAGE_EXTENSIONS 1013 /* Push package extension list used by vn_path_package_check() */ -#define PRIV_TRIM_ACTIVE_FILE 1014 /* Allow freeing space out from under an active file */ -#define PRIV_PROC_CPUMON_OVERRIDE 1015 /* Allow CPU usage monitor parameters less restrictive than default */ +#define PRIV_ADJTIME 1000 /* Set time adjustment. */ +#define PRIV_PROC_UUID_POLICY 1001 /* Change process uuid policy table. */ +#define PRIV_GLOBAL_PROC_INFO 1002 /* Query information for processes owned by other users */ +#define PRIV_SYSTEM_OVERRIDE 1003 /* Override global system settings for various subsystems for a limited duration/system-mode */ +#define PRIV_HW_DEBUG_DATA 1004 /* Extract hw-specific debug data (e.g. ECC data) */ +#define PRIV_SELECTIVE_FORCED_IDLE 1005 /* Configure and control Selective Forced Idle (SFI) subsystem */ +#define PRIV_PROC_TRACE_INSPECT 1006 /* Request trace memory of arbitrary process to be inspected */ +#define PRIV_DARKBOOT 1007 /* Manipulate the darkboot flag */ +#define PRIV_WORK_INTERVAL 1008 /* Express details about a work interval */ +#define PRIV_SMB_TIMEMACHINE_CONTROL 1009 /* Control Time Machine properties of an SMB share */ +#define PRIV_AUDIO_LATENCY 1010 /* set audio latency requirements for background tracing */ +#define PRIV_KTRACE_BACKGROUND 1011 /* Operate ktrace in the background */ +#define PRIV_SETPRIORITY_DARWIN_ROLE 1012 /* Allow setpriority(PRIO_DARWIN_ROLE) */ +#define PRIV_PACKAGE_EXTENSIONS 1013 /* Push package extension list used by vn_path_package_check() */ +#define PRIV_TRIM_ACTIVE_FILE 1014 /* Allow freeing space out from under an active file */ +#define PRIV_PROC_CPUMON_OVERRIDE 1015 /* Allow CPU usage monitor parameters less restrictive than default */ /* * Virtual memory privileges. */ -#define PRIV_VM_PRESSURE 6000 /* Check VM pressure. */ -#define PRIV_VM_JETSAM 6001 /* Adjust jetsam configuration. */ -#define PRIV_VM_FOOTPRINT_LIMIT 6002 /* Adjust physical footprint limit. */ +#define PRIV_VM_PRESSURE 6000 /* Check VM pressure. */ +#define PRIV_VM_JETSAM 6001 /* Adjust jetsam configuration. */ +#define PRIV_VM_FOOTPRINT_LIMIT 6002 /* Adjust physical footprint limit. */ /* * Network stack privileges. */ -#define PRIV_NET_PRIVILEGED_TRAFFIC_CLASS 10000 /* Set SO_PRIVILEGED_TRAFFIC_CLASS. */ -#define PRIV_NET_PRIVILEGED_SOCKET_DELEGATE 10001 /* Set delegate on a socket */ -#define PRIV_NET_INTERFACE_CONTROL 10002 /* Enable interface debug logging. */ -#define PRIV_NET_PRIVILEGED_NETWORK_STATISTICS 10003 /* Access to all sockets */ -#define PRIV_NET_PRIVILEGED_NECP_POLICIES 10004 /* Access to privileged Network Extension policies */ -#define PRIV_NET_RESTRICTED_AWDL 10005 /* Access to restricted AWDL mode */ -#define PRIV_NET_PRIVILEGED_NECP_MATCH 10006 /* Privilege verified by Network Extension policies */ -#define PRIV_NET_QOSMARKING_POLICY_OVERRIDE 10007 /* Privilege verified by Network Extension policies */ -#define PRIV_NET_RESTRICTED_INTCOPROC 10008 /* Access to internal co-processor network interfaces */ +#define PRIV_NET_PRIVILEGED_TRAFFIC_CLASS 10000 /* Set SO_PRIVILEGED_TRAFFIC_CLASS. */ +#define PRIV_NET_PRIVILEGED_SOCKET_DELEGATE 10001 /* Set delegate on a socket */ +#define PRIV_NET_INTERFACE_CONTROL 10002 /* Enable interface debug logging. */ +#define PRIV_NET_PRIVILEGED_NETWORK_STATISTICS 10003 /* Access to all sockets */ +#define PRIV_NET_PRIVILEGED_NECP_POLICIES 10004 /* Access to privileged Network Extension policies */ +#define PRIV_NET_RESTRICTED_AWDL 10005 /* Access to restricted AWDL mode */ +#define PRIV_NET_PRIVILEGED_NECP_MATCH 10006 /* Privilege verified by Network Extension policies */ +#define PRIV_NET_QOSMARKING_POLICY_OVERRIDE 10007 /* Privilege verified by Network Extension policies */ +#define PRIV_NET_RESTRICTED_INTCOPROC 10008 /* Access to internal co-processor network interfaces */ -#define PRIV_NET_PRIVILEGED_MULTIPATH 10009 /* Multipath usage */ -#define PRIV_NET_RESTRICTED_MULTIPATH_EXTENDED 10010 /* Extended multipath (more aggressive on cell) */ -#define PRIV_NET_RESTRICTED_ROUTE_NC_READ 10011 /* Enable route neighbhor cache read operations */ +#define PRIV_NET_PRIVILEGED_MULTIPATH 10009 /* Multipath usage */ +#define PRIV_NET_RESTRICTED_MULTIPATH_EXTENDED 10010 /* Extended multipath (more aggressive on cell) */ +#define PRIV_NET_RESTRICTED_ROUTE_NC_READ 10011 /* Enable route neighbhor cache read operations */ /* * IPv4 and IPv6 privileges. */ -#define PRIV_NETINET_RESERVEDPORT 11000 /* Bind low port number. */ +#define PRIV_NETINET_RESERVEDPORT 11000 /* Bind low port number. */ /* * VFS privileges */ -#define PRIV_VFS_OPEN_BY_ID 14000 /* Allow calling openbyid_np() */ -#define PRIV_VFS_MOVE_DATA_EXTENTS 14001 /* Allow F_MOVEDATAEXTENTS fcntl */ -#define PRIV_VFS_SNAPSHOT 14002 /* Allow create/rename/delete of snapshots */ -#define PRIV_VFS_SNAPSHOT_REVERT 14003 /* Allow reverting filesystem to a previous snapshot */ +#define PRIV_VFS_OPEN_BY_ID 14000 /* Allow calling openbyid_np() */ +#define PRIV_VFS_MOVE_DATA_EXTENTS 14001 /* Allow F_MOVEDATAEXTENTS fcntl */ +#define PRIV_VFS_SNAPSHOT 14002 /* Allow create/rename/delete of snapshots */ +#define PRIV_VFS_SNAPSHOT_REVERT 14003 /* Allow reverting filesystem to a previous snapshot */ -#define PRIV_APFS_EMBED_DRIVER 14100 /* Allow embedding an EFI driver into the APFS container */ +#define PRIV_APFS_EMBED_DRIVER 14100 /* Allow embedding an EFI driver into the APFS container */ #define PRIV_APFS_FUSION_DEBUG 14101 /* Allow getting internal statistics and controlling the APFS Fusion container */ #define PRIV_APFS_FUSION_ALLOW_PIN_FASTPROMOTE 14102 /* Allow changing pinned/fastPromote inode flags in APFS Fusion container */ @@ -149,7 +149,7 @@ #define PRIVCHECK_DEFAULT_UNPRIVILEGED_FLAG (1) /* Don't grant root privilege by default */ __BEGIN_DECLS -int priv_check_cred(kauth_cred_t cred, int priv, int flags); +int priv_check_cred(kauth_cred_t cred, int priv, int flags); __END_DECLS #endif diff --git a/bsd/sys/proc.h b/bsd/sys/proc.h index 46536dee4..ef8015554 100644 --- a/bsd/sys/proc.h +++ b/bsd/sys/proc.h @@ -67,11 +67,11 @@ */ #ifndef _SYS_PROC_H_ -#define _SYS_PROC_H_ +#define _SYS_PROC_H_ #include #include -#include /* For struct selinfo. */ +#include /* For struct selinfo. */ #include #include #include @@ -84,7 +84,7 @@ #include #ifdef XNU_KERNEL_PRIVATE -#include /* COALITION_NUM_TYPES */ +#include /* COALITION_NUM_TYPES */ #endif #if defined(XNU_KERNEL_PRIVATE) || !defined(KERNEL) @@ -97,117 +97,117 @@ struct proc; struct extern_proc { union { struct { - struct proc *__p_forw; /* Doubly-linked run/sleep queue. */ - struct proc *__p_back; + struct proc *__p_forw; /* Doubly-linked run/sleep queue. */ + struct proc *__p_back; } p_st1; - struct timeval __p_starttime; /* process start time */ + struct timeval __p_starttime; /* process start time */ } p_un; #define p_forw p_un.p_st1.__p_forw #define p_back p_un.p_st1.__p_back #define p_starttime p_un.__p_starttime - struct vmspace *p_vmspace; /* Address space. */ - struct sigacts *p_sigacts; /* Signal actions, state (PROC ONLY). */ - int p_flag; /* P_* flags. */ - char p_stat; /* S* process status. */ - pid_t p_pid; /* Process identifier. */ - pid_t p_oppid; /* Save parent pid during ptrace. XXX */ - int p_dupfd; /* Sideways return value from fdopen. XXX */ + struct vmspace *p_vmspace; /* Address space. */ + struct sigacts *p_sigacts; /* Signal actions, state (PROC ONLY). */ + int p_flag; /* P_* flags. */ + char p_stat; /* S* process status. */ + pid_t p_pid; /* Process identifier. */ + pid_t p_oppid; /* Save parent pid during ptrace. XXX */ + int p_dupfd; /* Sideways return value from fdopen. XXX */ /* Mach related */ - caddr_t user_stack; /* where user stack was allocated */ - void *exit_thread; /* XXX Which thread is exiting? */ - int p_debugger; /* allow to debug */ - boolean_t sigwait; /* indication to suspend */ + caddr_t user_stack; /* where user stack was allocated */ + void *exit_thread; /* XXX Which thread is exiting? */ + int p_debugger; /* allow to debug */ + boolean_t sigwait; /* indication to suspend */ /* scheduling */ - u_int p_estcpu; /* Time averaged value of p_cpticks. */ - int p_cpticks; /* Ticks of cpu time. */ - fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ - void *p_wchan; /* Sleep address. */ - char *p_wmesg; /* Reason for sleep. */ - u_int p_swtime; /* Time swapped in or out. */ - u_int p_slptime; /* Time since last blocked. */ - struct itimerval p_realtimer; /* Alarm timer. */ - struct timeval p_rtime; /* Real time. */ - u_quad_t p_uticks; /* Statclock hits in user mode. */ - u_quad_t p_sticks; /* Statclock hits in system mode. */ - u_quad_t p_iticks; /* Statclock hits processing intr. */ - int p_traceflag; /* Kernel trace points. */ - struct vnode *p_tracep; /* Trace to vnode. */ - int p_siglist; /* DEPRECATED. */ - struct vnode *p_textvp; /* Vnode of executable. */ - int p_holdcnt; /* If non-zero, don't swap. */ - sigset_t p_sigmask; /* DEPRECATED. */ - sigset_t p_sigignore; /* Signals being ignored. */ - sigset_t p_sigcatch; /* Signals being caught by user. */ - u_char p_priority; /* Process priority. */ - u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ - char p_nice; /* Process "nice" value. */ - char p_comm[MAXCOMLEN+1]; - struct pgrp *p_pgrp; /* Pointer to process group. */ - struct user *p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ - u_short p_xstat; /* Exit status for wait; also stop signal. */ - u_short p_acflag; /* Accounting flags. */ - struct rusage *p_ru; /* Exit information. XXX */ + u_int p_estcpu; /* Time averaged value of p_cpticks. */ + int p_cpticks; /* Ticks of cpu time. */ + fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ + void *p_wchan; /* Sleep address. */ + char *p_wmesg; /* Reason for sleep. */ + u_int p_swtime; /* Time swapped in or out. */ + u_int p_slptime; /* Time since last blocked. */ + struct itimerval p_realtimer; /* Alarm timer. */ + struct timeval p_rtime; /* Real time. */ + u_quad_t p_uticks; /* Statclock hits in user mode. */ + u_quad_t p_sticks; /* Statclock hits in system mode. */ + u_quad_t p_iticks; /* Statclock hits processing intr. */ + int p_traceflag; /* Kernel trace points. */ + struct vnode *p_tracep; /* Trace to vnode. */ + int p_siglist; /* DEPRECATED. */ + struct vnode *p_textvp; /* Vnode of executable. */ + int p_holdcnt; /* If non-zero, don't swap. */ + sigset_t p_sigmask; /* DEPRECATED. */ + sigset_t p_sigignore; /* Signals being ignored. */ + sigset_t p_sigcatch; /* Signals being caught by user. */ + u_char p_priority; /* Process priority. */ + u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ + char p_nice; /* Process "nice" value. */ + char p_comm[MAXCOMLEN + 1]; + struct pgrp *p_pgrp; /* Pointer to process group. */ + struct user *p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ + u_short p_xstat; /* Exit status for wait; also stop signal. */ + u_short p_acflag; /* Accounting flags. */ + struct rusage *p_ru; /* Exit information. XXX */ }; /* Status values. */ -#define SIDL 1 /* Process being created by fork. */ -#define SRUN 2 /* Currently runnable. */ -#define SSLEEP 3 /* Sleeping on an address. */ -#define SSTOP 4 /* Process debugging or suspension. */ -#define SZOMB 5 /* Awaiting collection by parent. */ +#define SIDL 1 /* Process being created by fork. */ +#define SRUN 2 /* Currently runnable. */ +#define SSLEEP 3 /* Sleeping on an address. */ +#define SSTOP 4 /* Process debugging or suspension. */ +#define SZOMB 5 /* Awaiting collection by parent. */ /* These flags are kept in extern_proc.p_flag. */ -#define P_ADVLOCK 0x00000001 /* Process may hold POSIX adv. lock */ -#define P_CONTROLT 0x00000002 /* Has a controlling terminal */ -#define P_LP64 0x00000004 /* Process is LP64 */ -#define P_NOCLDSTOP 0x00000008 /* No SIGCHLD when children stop */ +#define P_ADVLOCK 0x00000001 /* Process may hold POSIX adv. lock */ +#define P_CONTROLT 0x00000002 /* Has a controlling terminal */ +#define P_LP64 0x00000004 /* Process is LP64 */ +#define P_NOCLDSTOP 0x00000008 /* No SIGCHLD when children stop */ -#define P_PPWAIT 0x00000010 /* Parent waiting for chld exec/exit */ -#define P_PROFIL 0x00000020 /* Has started profiling */ -#define P_SELECT 0x00000040 /* Selecting; wakeup/waiting danger */ -#define P_CONTINUED 0x00000080 /* Process was stopped and continued */ +#define P_PPWAIT 0x00000010 /* Parent waiting for chld exec/exit */ +#define P_PROFIL 0x00000020 /* Has started profiling */ +#define P_SELECT 0x00000040 /* Selecting; wakeup/waiting danger */ +#define P_CONTINUED 0x00000080 /* Process was stopped and continued */ -#define P_SUGID 0x00000100 /* Has set privileges since last exec */ -#define P_SYSTEM 0x00000200 /* Sys proc: no sigs, stats or swap */ -#define P_TIMEOUT 0x00000400 /* Timing out during sleep */ -#define P_TRACED 0x00000800 /* Debugged process being traced */ +#define P_SUGID 0x00000100 /* Has set privileges since last exec */ +#define P_SYSTEM 0x00000200 /* Sys proc: no sigs, stats or swap */ +#define P_TIMEOUT 0x00000400 /* Timing out during sleep */ +#define P_TRACED 0x00000800 /* Debugged process being traced */ -#define P_DISABLE_ASLR 0x00001000 /* Disable address space layout randomization */ -#define P_WEXIT 0x00002000 /* Working on exiting */ -#define P_EXEC 0x00004000 /* Process called exec. */ +#define P_DISABLE_ASLR 0x00001000 /* Disable address space layout randomization */ +#define P_WEXIT 0x00002000 /* Working on exiting */ +#define P_EXEC 0x00004000 /* Process called exec. */ /* Should be moved to machine-dependent areas. */ -#define P_OWEUPC 0x00008000 /* Owe process an addupc() call at next ast. */ +#define P_OWEUPC 0x00008000 /* Owe process an addupc() call at next ast. */ -#define P_AFFINITY 0x00010000 /* xxx */ -#define P_TRANSLATED 0x00020000 /* xxx */ -#define P_CLASSIC P_TRANSLATED /* xxx */ +#define P_AFFINITY 0x00010000 /* xxx */ +#define P_TRANSLATED 0x00020000 /* xxx */ +#define P_CLASSIC P_TRANSLATED /* xxx */ -#define P_DELAYIDLESLEEP 0x00040000 /* Process is marked to delay idle sleep on disk IO */ -#define P_CHECKOPENEVT 0x00080000 /* check if a vnode has the OPENEVT flag set on open */ +#define P_DELAYIDLESLEEP 0x00040000 /* Process is marked to delay idle sleep on disk IO */ +#define P_CHECKOPENEVT 0x00080000 /* check if a vnode has the OPENEVT flag set on open */ -#define P_DEPENDENCY_CAPABLE 0x00100000 /* process is ok to call vfs_markdependency() */ -#define P_REBOOT 0x00200000 /* Process called reboot() */ -#define P_RESV6 0x00400000 /* used to be P_TBE */ -#define P_RESV7 0x00800000 /* (P_SIGEXC)signal exceptions */ +#define P_DEPENDENCY_CAPABLE 0x00100000 /* process is ok to call vfs_markdependency() */ +#define P_REBOOT 0x00200000 /* Process called reboot() */ +#define P_RESV6 0x00400000 /* used to be P_TBE */ +#define P_RESV7 0x00800000 /* (P_SIGEXC)signal exceptions */ -#define P_THCWD 0x01000000 /* process has thread cwd */ -#define P_RESV9 0x02000000 /* (P_VFORK)process has vfork children */ -#define P_ADOPTPERSONA 0x04000000 /* process adopted a persona (used to be P_NOATTACH) */ -#define P_RESV11 0x08000000 /* (P_INVFORK) proc in vfork */ +#define P_THCWD 0x01000000 /* process has thread cwd */ +#define P_RESV9 0x02000000 /* (P_VFORK)process has vfork children */ +#define P_ADOPTPERSONA 0x04000000 /* process adopted a persona (used to be P_NOATTACH) */ +#define P_RESV11 0x08000000 /* (P_INVFORK) proc in vfork */ -#define P_NOSHLIB 0x10000000 /* no shared libs are in use for proc */ - /* flag set on exec */ -#define P_FORCEQUOTA 0x20000000 /* Force quota for root */ -#define P_NOCLDWAIT 0x40000000 /* No zombies when chil procs exit */ -#define P_NOREMOTEHANG 0x80000000 /* Don't hang on remote FS ops */ +#define P_NOSHLIB 0x10000000 /* no shared libs are in use for proc */ + /* flag set on exec */ +#define P_FORCEQUOTA 0x20000000 /* Force quota for root */ +#define P_NOCLDWAIT 0x40000000 /* No zombies when chil procs exit */ +#define P_NOREMOTEHANG 0x80000000 /* Don't hang on remote FS ops */ -#define P_INMEM 0 /* Obsolete: retained for compilation */ -#define P_NOSWAP 0 /* Obsolete: retained for compilation */ -#define P_PHYSIO 0 /* Obsolete: retained for compilation */ -#define P_FSTRACE 0 /* Obsolete: retained for compilation */ -#define P_SSTEP 0 /* Obsolete: retained for compilation */ +#define P_INMEM 0 /* Obsolete: retained for compilation */ +#define P_NOSWAP 0 /* Obsolete: retained for compilation */ +#define P_PHYSIO 0 /* Obsolete: retained for compilation */ +#define P_FSTRACE 0 /* Obsolete: retained for compilation */ +#define P_SSTEP 0 /* Obsolete: retained for compilation */ #define P_DIRTY_TRACK 0x00000001 /* track dirty state */ #define P_DIRTY_ALLOW_IDLE_EXIT 0x00000002 /* process can be idle-exited when clean */ @@ -220,7 +220,7 @@ struct extern_proc { #define P_DIRTY_AGING_IN_PROGRESS 0x00000100 /* aging in one of the 'aging bands' */ #define P_DIRTY_LAUNCH_IN_PROGRESS 0x00000200 /* launch is in progress */ #define P_DIRTY_DEFER_ALWAYS 0x00000400 /* defer going to idle-exit after every dirty->clean transition. - * For legacy jetsam policy only. This is the default with the other policies.*/ + * For legacy jetsam policy only. This is the default with the other policies.*/ #define P_DIRTY_IS_DIRTY (P_DIRTY | P_DIRTY_SHUTDOWN) #define P_DIRTY_IDLE_EXIT_ENABLED (P_DIRTY_TRACK|P_DIRTY_ALLOW_IDLE_EXIT) @@ -235,8 +235,8 @@ extern proc_t kernproc; extern int proc_is_classic(proc_t p); proc_t current_proc_EXTERNAL(void); -extern int msleep(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, struct timespec * ts ); -extern void wakeup(void *chan); +extern int msleep(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, struct timespec * ts ); +extern void wakeup(void *chan); extern void wakeup_one(caddr_t chan); /* proc kpis */ @@ -299,31 +299,35 @@ extern int proc_issetugid(proc_t p); extern int proc_tbe(proc_t); /*! - @function proc_selfpgrpid - @abstract Get the process group id for the current process, as with proc_pgrpid(). - @return pgrpid of current process. + * @function proc_selfpgrpid + * @abstract Get the process group id for the current process, as with proc_pgrpid(). + * @return pgrpid of current process. */ pid_t proc_selfpgrpid(void); /*! - @function proc_pgrpid - @abstract Get the process group id for the passed-in process. - @param p Process whose pgrpid to grab. - @return pgrpid for "p". + * @function proc_pgrpid + * @abstract Get the process group id for the passed-in process. + * @param p Process whose pgrpid to grab. + * @return pgrpid for "p". */ pid_t proc_pgrpid(proc_t p); #ifdef KERNEL_PRIVATE // mark a process as being allowed to call vfs_markdependency() void bsd_set_dependency_capable(task_t task); -#ifdef __arm__ -static inline int IS_64BIT_PROCESS(__unused proc_t p) { return 0; } +#ifdef __arm__ +static inline int +IS_64BIT_PROCESS(__unused proc_t p) +{ + return 0; +} #else extern int IS_64BIT_PROCESS(proc_t); #endif /* __arm__ */ -extern int tsleep(void *chan, int pri, const char *wmesg, int timo); -extern int msleep1(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, u_int64_t timo); +extern int tsleep(void *chan, int pri, const char *wmesg, int timo); +extern int msleep1(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, u_int64_t timo); task_t proc_task(proc_t); extern int proc_pidversion(proc_t); @@ -333,16 +337,16 @@ extern uint32_t proc_getgid(proc_t); extern int proc_getcdhash(proc_t, unsigned char *); /*! - @function proc_pidbackgrounded - @abstract KPI to determine if a process is currently backgrounded. - @discussion The process may move into or out of background state at any time, - so be prepared for this value to be outdated immediately. - @param pid PID of the process to be queried. - @param state Pointer to a value which will be set to 1 if the process - is currently backgrounded, 0 otherwise. - @return ESRCH if pid cannot be found or has started exiting. - - EINVAL if state is NULL. + * @function proc_pidbackgrounded + * @abstract KPI to determine if a process is currently backgrounded. + * @discussion The process may move into or out of background state at any time, + * so be prepared for this value to be outdated immediately. + * @param pid PID of the process to be queried. + * @param state Pointer to a value which will be set to 1 if the process + * is currently backgrounded, 0 otherwise. + * @return ESRCH if pid cannot be found or has started exiting. + * + * EINVAL if state is NULL. */ extern int proc_pidbackgrounded(pid_t pid, uint32_t* state); @@ -374,7 +378,7 @@ extern int proc_pidoriginatoruuid(uuid_t uuid_buf, uint32_t buffersize); extern uint64_t proc_was_throttled(proc_t); extern uint64_t proc_did_throttle(proc_t); -extern void proc_coalitionids(proc_t, uint64_t [COALITION_NUM_TYPES]); +extern void proc_coalitionids(proc_t, uint64_t[COALITION_NUM_TYPES]); #ifdef CONFIG_32BIT_TELEMETRY extern void proc_log_32bit_telemetry(proc_t p); @@ -389,18 +393,18 @@ extern int networking_memstatus_callout(proc_t p, uint32_t); __END_DECLS -#endif /* KERNEL */ +#endif /* KERNEL */ #ifdef PRIVATE /* Values for pid_shutdown_sockets */ -#define SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC 0x00000001 -#define SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL 0x00000002 +#define SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC 0x00000001 +#define SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL 0x00000002 #ifdef KERNEL -#define SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL 0x10000000 -#define SHUTDOWN_SOCKET_LEVEL_NECP 0x20000000 -#define SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER 0x40000000 +#define SHUTDOWN_SOCKET_LEVEL_DISCONNECT_INTERNAL 0x10000000 +#define SHUTDOWN_SOCKET_LEVEL_NECP 0x20000000 +#define SHUTDOWN_SOCKET_LEVEL_CONTENT_FILTER 0x40000000 #endif #ifndef KERNEL @@ -420,4 +424,4 @@ __END_DECLS #endif /* !KERNEL */ #endif /* PRIVATE */ -#endif /* !_SYS_PROC_H_ */ +#endif /* !_SYS_PROC_H_ */ diff --git a/bsd/sys/proc_info.h b/bsd/sys/proc_info.h index 8e247fcf1..15dc50f70 100644 --- a/bsd/sys/proc_info.h +++ b/bsd/sys/proc_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2005-2017 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,65 +52,65 @@ __BEGIN_DECLS -#define PROC_ALL_PIDS 1 -#define PROC_PGRP_ONLY 2 -#define PROC_TTY_ONLY 3 -#define PROC_UID_ONLY 4 -#define PROC_RUID_ONLY 5 -#define PROC_PPID_ONLY 6 -#define PROC_KDBG_ONLY 7 +#define PROC_ALL_PIDS 1 +#define PROC_PGRP_ONLY 2 +#define PROC_TTY_ONLY 3 +#define PROC_UID_ONLY 4 +#define PROC_RUID_ONLY 5 +#define PROC_PPID_ONLY 6 +#define PROC_KDBG_ONLY 7 struct proc_bsdinfo { - uint32_t pbi_flags; /* 64bit; emulated etc */ - uint32_t pbi_status; - uint32_t pbi_xstatus; - uint32_t pbi_pid; - uint32_t pbi_ppid; - uid_t pbi_uid; - gid_t pbi_gid; - uid_t pbi_ruid; - gid_t pbi_rgid; - uid_t pbi_svuid; - gid_t pbi_svgid; - uint32_t rfu_1; /* reserved */ - char pbi_comm[MAXCOMLEN]; - char pbi_name[2*MAXCOMLEN]; /* empty if no name is registered */ - uint32_t pbi_nfiles; - uint32_t pbi_pgid; - uint32_t pbi_pjobc; - uint32_t e_tdev; /* controlling tty dev */ - uint32_t e_tpgid; /* tty process group id */ - int32_t pbi_nice; - uint64_t pbi_start_tvsec; - uint64_t pbi_start_tvusec; + uint32_t pbi_flags; /* 64bit; emulated etc */ + uint32_t pbi_status; + uint32_t pbi_xstatus; + uint32_t pbi_pid; + uint32_t pbi_ppid; + uid_t pbi_uid; + gid_t pbi_gid; + uid_t pbi_ruid; + gid_t pbi_rgid; + uid_t pbi_svuid; + gid_t pbi_svgid; + uint32_t rfu_1; /* reserved */ + char pbi_comm[MAXCOMLEN]; + char pbi_name[2 * MAXCOMLEN]; /* empty if no name is registered */ + uint32_t pbi_nfiles; + uint32_t pbi_pgid; + uint32_t pbi_pjobc; + uint32_t e_tdev; /* controlling tty dev */ + uint32_t e_tpgid; /* tty process group id */ + int32_t pbi_nice; + uint64_t pbi_start_tvsec; + uint64_t pbi_start_tvusec; }; struct proc_bsdshortinfo { - uint32_t pbsi_pid; /* process id */ - uint32_t pbsi_ppid; /* process parent id */ - uint32_t pbsi_pgid; /* process perp id */ - uint32_t pbsi_status; /* p_stat value, SZOMB, SRUN, etc */ - char pbsi_comm[MAXCOMLEN]; /* upto 16 characters of process name */ + uint32_t pbsi_pid; /* process id */ + uint32_t pbsi_ppid; /* process parent id */ + uint32_t pbsi_pgid; /* process perp id */ + uint32_t pbsi_status; /* p_stat value, SZOMB, SRUN, etc */ + char pbsi_comm[MAXCOMLEN]; /* upto 16 characters of process name */ uint32_t pbsi_flags; /* 64bit; emulated etc */ - uid_t pbsi_uid; /* current uid on process */ - gid_t pbsi_gid; /* current gid on process */ - uid_t pbsi_ruid; /* current ruid on process */ - gid_t pbsi_rgid; /* current tgid on process */ - uid_t pbsi_svuid; /* current svuid on process */ - gid_t pbsi_svgid; /* current svgid on process */ - uint32_t pbsi_rfu; /* reserved for future use*/ + uid_t pbsi_uid; /* current uid on process */ + gid_t pbsi_gid; /* current gid on process */ + uid_t pbsi_ruid; /* current ruid on process */ + gid_t pbsi_rgid; /* current tgid on process */ + uid_t pbsi_svuid; /* current svuid on process */ + gid_t pbsi_svgid; /* current svgid on process */ + uint32_t pbsi_rfu; /* reserved for future use*/ }; #ifdef PRIVATE struct proc_uniqidentifierinfo { - uint8_t p_uuid[16]; /* UUID of the main executable */ - uint64_t p_uniqueid; /* 64 bit unique identifier for process */ - uint64_t p_puniqueid; /* unique identifier for process's parent */ - uint64_t p_reserve2; /* reserved for future use */ - uint64_t p_reserve3; /* reserved for future use */ - uint64_t p_reserve4; /* reserved for future use */ + uint8_t p_uuid[16]; /* UUID of the main executable */ + uint64_t p_uniqueid; /* 64 bit unique identifier for process */ + uint64_t p_puniqueid; /* unique identifier for process's parent */ + uint64_t p_reserve2; /* reserved for future use */ + uint64_t p_reserve3; /* reserved for future use */ + uint64_t p_reserve4; /* reserved for future use */ }; @@ -120,8 +120,8 @@ struct proc_bsdinfowithuniqid { }; struct proc_archinfo { - cpu_type_t p_cputype; - cpu_subtype_t p_cpusubtype; + cpu_type_t p_cputype; + cpu_subtype_t p_cpusubtype; }; struct proc_pidcoalitioninfo { @@ -143,108 +143,108 @@ struct proc_originatorinfo { /* pbi_flags values */ -#define PROC_FLAG_SYSTEM 1 /* System process */ -#define PROC_FLAG_TRACED 2 /* process currently being traced, possibly by gdb */ -#define PROC_FLAG_INEXIT 4 /* process is working its way in exit() */ -#define PROC_FLAG_PPWAIT 8 -#define PROC_FLAG_LP64 0x10 /* 64bit process */ -#define PROC_FLAG_SLEADER 0x20 /* The process is the session leader */ -#define PROC_FLAG_CTTY 0x40 /* process has a control tty */ -#define PROC_FLAG_CONTROLT 0x80 /* Has a controlling terminal */ -#define PROC_FLAG_THCWD 0x100 /* process has a thread with cwd */ +#define PROC_FLAG_SYSTEM 1 /* System process */ +#define PROC_FLAG_TRACED 2 /* process currently being traced, possibly by gdb */ +#define PROC_FLAG_INEXIT 4 /* process is working its way in exit() */ +#define PROC_FLAG_PPWAIT 8 +#define PROC_FLAG_LP64 0x10 /* 64bit process */ +#define PROC_FLAG_SLEADER 0x20 /* The process is the session leader */ +#define PROC_FLAG_CTTY 0x40 /* process has a control tty */ +#define PROC_FLAG_CONTROLT 0x80 /* Has a controlling terminal */ +#define PROC_FLAG_THCWD 0x100 /* process has a thread with cwd */ /* process control bits for resource starvation */ -#define PROC_FLAG_PC_THROTTLE 0x200 /* In resource starvation situations, this process is to be throttled */ -#define PROC_FLAG_PC_SUSP 0x400 /* In resource starvation situations, this process is to be suspended */ -#define PROC_FLAG_PC_KILL 0x600 /* In resource starvation situations, this process is to be terminated */ -#define PROC_FLAG_PC_MASK 0x600 +#define PROC_FLAG_PC_THROTTLE 0x200 /* In resource starvation situations, this process is to be throttled */ +#define PROC_FLAG_PC_SUSP 0x400 /* In resource starvation situations, this process is to be suspended */ +#define PROC_FLAG_PC_KILL 0x600 /* In resource starvation situations, this process is to be terminated */ +#define PROC_FLAG_PC_MASK 0x600 /* process action bits for resource starvation */ -#define PROC_FLAG_PA_THROTTLE 0x800 /* The process is currently throttled due to resource starvation */ -#define PROC_FLAG_PA_SUSP 0x1000 /* The process is currently suspended due to resource starvation */ -#define PROC_FLAG_PSUGID 0x2000 /* process has set privileges since last exec */ -#define PROC_FLAG_EXEC 0x4000 /* process has called exec */ +#define PROC_FLAG_PA_THROTTLE 0x800 /* The process is currently throttled due to resource starvation */ +#define PROC_FLAG_PA_SUSP 0x1000 /* The process is currently suspended due to resource starvation */ +#define PROC_FLAG_PSUGID 0x2000 /* process has set privileges since last exec */ +#define PROC_FLAG_EXEC 0x4000 /* process has called exec */ #ifdef PRIVATE -#define PROC_FLAG_DARWINBG 0x8000 /* process in darwin background */ -#define PROC_FLAG_EXT_DARWINBG 0x10000 /* process in darwin background - external enforcement */ -#define PROC_FLAG_IOS_APPLEDAEMON 0x20000 /* Process is apple daemon */ -#define PROC_FLAG_DELAYIDLESLEEP 0x40000 /* Process is marked to delay idle sleep on disk IO */ -#define PROC_FLAG_IOS_IMPPROMOTION 0x80000 /* Process is daemon which receives importane donation */ +#define PROC_FLAG_DARWINBG 0x8000 /* process in darwin background */ +#define PROC_FLAG_EXT_DARWINBG 0x10000 /* process in darwin background - external enforcement */ +#define PROC_FLAG_IOS_APPLEDAEMON 0x20000 /* Process is apple daemon */ +#define PROC_FLAG_DELAYIDLESLEEP 0x40000 /* Process is marked to delay idle sleep on disk IO */ +#define PROC_FLAG_IOS_IMPPROMOTION 0x80000 /* Process is daemon which receives importane donation */ #define PROC_FLAG_ADAPTIVE 0x100000 /* Process is adaptive */ #define PROC_FLAG_ADAPTIVE_IMPORTANT 0x200000 /* Process is adaptive, and is currently important */ #define PROC_FLAG_IMPORTANCE_DONOR 0x400000 /* Process is marked as an importance donor */ #define PROC_FLAG_SUPPRESSED 0x800000 /* Process is suppressed */ -#define PROC_FLAG_APPLICATION 0x1000000 /* Process is an application */ -#define PROC_FLAG_IOS_APPLICATION PROC_FLAG_APPLICATION /* Process is an application */ +#define PROC_FLAG_APPLICATION 0x1000000 /* Process is an application */ +#define PROC_FLAG_IOS_APPLICATION PROC_FLAG_APPLICATION /* Process is an application */ #endif struct proc_taskinfo { - uint64_t pti_virtual_size; /* virtual memory size (bytes) */ - uint64_t pti_resident_size; /* resident memory size (bytes) */ - uint64_t pti_total_user; /* total time */ - uint64_t pti_total_system; - uint64_t pti_threads_user; /* existing threads only */ - uint64_t pti_threads_system; - int32_t pti_policy; /* default policy for new threads */ - int32_t pti_faults; /* number of page faults */ - int32_t pti_pageins; /* number of actual pageins */ - int32_t pti_cow_faults; /* number of copy-on-write faults */ - int32_t pti_messages_sent; /* number of messages sent */ - int32_t pti_messages_received; /* number of messages received */ - int32_t pti_syscalls_mach; /* number of mach system calls */ - int32_t pti_syscalls_unix; /* number of unix system calls */ - int32_t pti_csw; /* number of context switches */ - int32_t pti_threadnum; /* number of threads in the task */ - int32_t pti_numrunning; /* number of running threads */ - int32_t pti_priority; /* task priority*/ + uint64_t pti_virtual_size; /* virtual memory size (bytes) */ + uint64_t pti_resident_size; /* resident memory size (bytes) */ + uint64_t pti_total_user; /* total time */ + uint64_t pti_total_system; + uint64_t pti_threads_user; /* existing threads only */ + uint64_t pti_threads_system; + int32_t pti_policy; /* default policy for new threads */ + int32_t pti_faults; /* number of page faults */ + int32_t pti_pageins; /* number of actual pageins */ + int32_t pti_cow_faults; /* number of copy-on-write faults */ + int32_t pti_messages_sent; /* number of messages sent */ + int32_t pti_messages_received; /* number of messages received */ + int32_t pti_syscalls_mach; /* number of mach system calls */ + int32_t pti_syscalls_unix; /* number of unix system calls */ + int32_t pti_csw; /* number of context switches */ + int32_t pti_threadnum; /* number of threads in the task */ + int32_t pti_numrunning; /* number of running threads */ + int32_t pti_priority; /* task priority*/ }; struct proc_taskallinfo { - struct proc_bsdinfo pbsd; - struct proc_taskinfo ptinfo; + struct proc_bsdinfo pbsd; + struct proc_taskinfo ptinfo; }; #define MAXTHREADNAMESIZE 64 struct proc_threadinfo { - uint64_t pth_user_time; /* user run time */ - uint64_t pth_system_time; /* system run time */ - int32_t pth_cpu_usage; /* scaled cpu usage percentage */ - int32_t pth_policy; /* scheduling policy in effect */ - int32_t pth_run_state; /* run state (see below) */ - int32_t pth_flags; /* various flags (see below) */ - int32_t pth_sleep_time; /* number of seconds that thread */ - int32_t pth_curpri; /* cur priority*/ - int32_t pth_priority; /* priority*/ - int32_t pth_maxpriority; /* max priority*/ - char pth_name[MAXTHREADNAMESIZE]; /* thread name, if any */ + uint64_t pth_user_time; /* user run time */ + uint64_t pth_system_time; /* system run time */ + int32_t pth_cpu_usage; /* scaled cpu usage percentage */ + int32_t pth_policy; /* scheduling policy in effect */ + int32_t pth_run_state; /* run state (see below) */ + int32_t pth_flags; /* various flags (see below) */ + int32_t pth_sleep_time; /* number of seconds that thread */ + int32_t pth_curpri; /* cur priority*/ + int32_t pth_priority; /* priority*/ + int32_t pth_maxpriority; /* max priority*/ + char pth_name[MAXTHREADNAMESIZE]; /* thread name, if any */ }; struct proc_regioninfo { - uint32_t pri_protection; - uint32_t pri_max_protection; - uint32_t pri_inheritance; - uint32_t pri_flags; /* shared, external pager, is submap */ - uint64_t pri_offset; - uint32_t pri_behavior; - uint32_t pri_user_wired_count; - uint32_t pri_user_tag; - uint32_t pri_pages_resident; - uint32_t pri_pages_shared_now_private; - uint32_t pri_pages_swapped_out; - uint32_t pri_pages_dirtied; - uint32_t pri_ref_count; - uint32_t pri_shadow_depth; - uint32_t pri_share_mode; - uint32_t pri_private_pages_resident; - uint32_t pri_shared_pages_resident; - uint32_t pri_obj_id; - uint32_t pri_depth; - uint64_t pri_address; - uint64_t pri_size; -}; - -#define PROC_REGION_SUBMAP 1 -#define PROC_REGION_SHARED 2 + uint32_t pri_protection; + uint32_t pri_max_protection; + uint32_t pri_inheritance; + uint32_t pri_flags; /* shared, external pager, is submap */ + uint64_t pri_offset; + uint32_t pri_behavior; + uint32_t pri_user_wired_count; + uint32_t pri_user_tag; + uint32_t pri_pages_resident; + uint32_t pri_pages_shared_now_private; + uint32_t pri_pages_swapped_out; + uint32_t pri_pages_dirtied; + uint32_t pri_ref_count; + uint32_t pri_shadow_depth; + uint32_t pri_share_mode; + uint32_t pri_private_pages_resident; + uint32_t pri_shared_pages_resident; + uint32_t pri_obj_id; + uint32_t pri_depth; + uint64_t pri_address; + uint64_t pri_size; +}; + +#define PROC_REGION_SUBMAP 1 +#define PROC_REGION_SHARED 2 #define SM_COW 1 #define SM_PRIVATE 2 @@ -260,26 +260,26 @@ struct proc_regioninfo { * Thread run states (state field). */ -#define TH_STATE_RUNNING 1 /* thread is running normally */ -#define TH_STATE_STOPPED 2 /* thread is stopped */ -#define TH_STATE_WAITING 3 /* thread is waiting normally */ -#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible - wait */ -#define TH_STATE_HALTED 5 /* thread is halted at a - clean point */ +#define TH_STATE_RUNNING 1 /* thread is running normally */ +#define TH_STATE_STOPPED 2 /* thread is stopped */ +#define TH_STATE_WAITING 3 /* thread is waiting normally */ +#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible + * wait */ +#define TH_STATE_HALTED 5 /* thread is halted at a + * clean point */ /* * Thread flags (flags field). */ -#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */ -#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */ +#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */ +#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */ struct proc_workqueueinfo { - uint32_t pwq_nthreads; /* total number of workqueue threads */ - uint32_t pwq_runthreads; /* total number of running workqueue threads */ - uint32_t pwq_blockedthreads; /* total number of blocked workqueue threads */ - uint32_t pwq_state; + uint32_t pwq_nthreads; /* total number of workqueue threads */ + uint32_t pwq_runthreads; /* total number of running workqueue threads */ + uint32_t pwq_blockedthreads; /* total number of blocked workqueue threads */ + uint32_t pwq_state; }; /* @@ -290,105 +290,105 @@ struct proc_workqueueinfo { #define WQ_FLAGS_AVAILABLE 0x4 struct proc_fileinfo { - uint32_t fi_openflags; - uint32_t fi_status; - off_t fi_offset; - int32_t fi_type; - uint32_t fi_guardflags; + uint32_t fi_openflags; + uint32_t fi_status; + off_t fi_offset; + int32_t fi_type; + uint32_t fi_guardflags; }; /* stats flags in proc_fileinfo */ -#define PROC_FP_SHARED 1 /* shared by more than one fd */ -#define PROC_FP_CLEXEC 2 /* close on exec */ -#define PROC_FP_GUARDED 4 /* guarded fd */ -#define PROC_FP_CLFORK 8 /* close on fork */ +#define PROC_FP_SHARED 1 /* shared by more than one fd */ +#define PROC_FP_CLEXEC 2 /* close on exec */ +#define PROC_FP_GUARDED 4 /* guarded fd */ +#define PROC_FP_CLFORK 8 /* close on fork */ -#define PROC_FI_GUARD_CLOSE (1u << 0) -#define PROC_FI_GUARD_DUP (1u << 1) -#define PROC_FI_GUARD_SOCKET_IPC (1u << 2) -#define PROC_FI_GUARD_FILEPORT (1u << 3) +#define PROC_FI_GUARD_CLOSE (1u << 0) +#define PROC_FI_GUARD_DUP (1u << 1) +#define PROC_FI_GUARD_SOCKET_IPC (1u << 2) +#define PROC_FI_GUARD_FILEPORT (1u << 3) struct proc_exitreasonbasicinfo { - uint32_t beri_namespace; - uint64_t beri_code; - uint64_t beri_flags; - uint32_t beri_reason_buf_size; + uint32_t beri_namespace; + uint64_t beri_code; + uint64_t beri_flags; + uint32_t beri_reason_buf_size; } __attribute__((packed)); struct proc_exitreasoninfo { - uint32_t eri_namespace; - uint64_t eri_code; - uint64_t eri_flags; - uint32_t eri_reason_buf_size; - uint64_t eri_kcd_buf; + uint32_t eri_namespace; + uint64_t eri_code; + uint64_t eri_flags; + uint32_t eri_reason_buf_size; + uint64_t eri_kcd_buf; } __attribute__((packed)); /* * A copy of stat64 with static sized fields. */ struct vinfo_stat { - uint32_t vst_dev; /* [XSI] ID of device containing file */ - uint16_t vst_mode; /* [XSI] Mode of file (see below) */ - uint16_t vst_nlink; /* [XSI] Number of hard links */ - uint64_t vst_ino; /* [XSI] File serial number */ - uid_t vst_uid; /* [XSI] User ID of the file */ - gid_t vst_gid; /* [XSI] Group ID of the file */ - int64_t vst_atime; /* [XSI] Time of last access */ - int64_t vst_atimensec; /* nsec of last access */ - int64_t vst_mtime; /* [XSI] Last data modification time */ - int64_t vst_mtimensec; /* last data modification nsec */ - int64_t vst_ctime; /* [XSI] Time of last status change */ - int64_t vst_ctimensec; /* nsec of last status change */ - int64_t vst_birthtime; /* File creation time(birth) */ - int64_t vst_birthtimensec; /* nsec of File creation time */ - off_t vst_size; /* [XSI] file size, in bytes */ - int64_t vst_blocks; /* [XSI] blocks allocated for file */ - int32_t vst_blksize; /* [XSI] optimal blocksize for I/O */ - uint32_t vst_flags; /* user defined flags for file */ - uint32_t vst_gen; /* file generation number */ - uint32_t vst_rdev; /* [XSI] Device ID */ - int64_t vst_qspare[2]; /* RESERVED: DO NOT USE! */ + uint32_t vst_dev; /* [XSI] ID of device containing file */ + uint16_t vst_mode; /* [XSI] Mode of file (see below) */ + uint16_t vst_nlink; /* [XSI] Number of hard links */ + uint64_t vst_ino; /* [XSI] File serial number */ + uid_t vst_uid; /* [XSI] User ID of the file */ + gid_t vst_gid; /* [XSI] Group ID of the file */ + int64_t vst_atime; /* [XSI] Time of last access */ + int64_t vst_atimensec; /* nsec of last access */ + int64_t vst_mtime; /* [XSI] Last data modification time */ + int64_t vst_mtimensec; /* last data modification nsec */ + int64_t vst_ctime; /* [XSI] Time of last status change */ + int64_t vst_ctimensec; /* nsec of last status change */ + int64_t vst_birthtime; /* File creation time(birth) */ + int64_t vst_birthtimensec; /* nsec of File creation time */ + off_t vst_size; /* [XSI] file size, in bytes */ + int64_t vst_blocks; /* [XSI] blocks allocated for file */ + int32_t vst_blksize; /* [XSI] optimal blocksize for I/O */ + uint32_t vst_flags; /* user defined flags for file */ + uint32_t vst_gen; /* file generation number */ + uint32_t vst_rdev; /* [XSI] Device ID */ + int64_t vst_qspare[2]; /* RESERVED: DO NOT USE! */ }; struct vnode_info { - struct vinfo_stat vi_stat; - int vi_type; - int vi_pad; - fsid_t vi_fsid; + struct vinfo_stat vi_stat; + int vi_type; + int vi_pad; + fsid_t vi_fsid; }; struct vnode_info_path { - struct vnode_info vip_vi; - char vip_path[MAXPATHLEN]; /* tail end of it */ + struct vnode_info vip_vi; + char vip_path[MAXPATHLEN]; /* tail end of it */ }; struct vnode_fdinfo { - struct proc_fileinfo pfi; - struct vnode_info pvi; + struct proc_fileinfo pfi; + struct vnode_info pvi; }; struct vnode_fdinfowithpath { - struct proc_fileinfo pfi; - struct vnode_info_path pvip; + struct proc_fileinfo pfi; + struct vnode_info_path pvip; }; struct proc_regionwithpathinfo { - struct proc_regioninfo prp_prinfo; - struct vnode_info_path prp_vip; + struct proc_regioninfo prp_prinfo; + struct vnode_info_path prp_vip; }; struct proc_vnodepathinfo { - struct vnode_info_path pvi_cdir; - struct vnode_info_path pvi_rdir; + struct vnode_info_path pvi_cdir; + struct vnode_info_path pvi_rdir; }; struct proc_threadwithpathinfo { - struct proc_threadinfo pt; - struct vnode_info_path pvip; + struct proc_threadinfo pt; + struct vnode_info_path pvip; }; /* - * Socket + * Socket */ @@ -400,71 +400,71 @@ struct proc_threadwithpathinfo { #define INI_IPV6 0x2 struct in4in6_addr { - u_int32_t i46a_pad32[3]; - struct in_addr i46a_addr4; + u_int32_t i46a_pad32[3]; + struct in_addr i46a_addr4; }; struct in_sockinfo { - int insi_fport; /* foreign port */ - int insi_lport; /* local port */ - uint64_t insi_gencnt; /* generation count of this instance */ - uint32_t insi_flags; /* generic IP/datagram flags */ - uint32_t insi_flow; - - uint8_t insi_vflag; /* ini_IPV4 or ini_IPV6 */ - uint8_t insi_ip_ttl; /* time to live proto */ - uint32_t rfu_1; /* reserved */ + int insi_fport; /* foreign port */ + int insi_lport; /* local port */ + uint64_t insi_gencnt; /* generation count of this instance */ + uint32_t insi_flags; /* generic IP/datagram flags */ + uint32_t insi_flow; + + uint8_t insi_vflag; /* ini_IPV4 or ini_IPV6 */ + uint8_t insi_ip_ttl; /* time to live proto */ + uint32_t rfu_1; /* reserved */ /* protocol dependent part */ union { - struct in4in6_addr ina_46; - struct in6_addr ina_6; - } insi_faddr; /* foreign host table entry */ + struct in4in6_addr ina_46; + struct in6_addr ina_6; + } insi_faddr; /* foreign host table entry */ union { - struct in4in6_addr ina_46; - struct in6_addr ina_6; - } insi_laddr; /* local host table entry */ + struct in4in6_addr ina_46; + struct in6_addr ina_6; + } insi_laddr; /* local host table entry */ struct { - u_char in4_tos; /* type of service */ - } insi_v4; + u_char in4_tos; /* type of service */ + } insi_v4; struct { - uint8_t in6_hlim; - int in6_cksum; - u_short in6_ifindex; - short in6_hops; - } insi_v6; + uint8_t in6_hlim; + int in6_cksum; + u_short in6_ifindex; + short in6_hops; + } insi_v6; }; /* * TCP Sockets */ -#define TSI_T_REXMT 0 /* retransmit */ -#define TSI_T_PERSIST 1 /* retransmit persistence */ -#define TSI_T_KEEP 2 /* keep alive */ -#define TSI_T_2MSL 3 /* 2*msl quiet time timer */ -#define TSI_T_NTIMERS 4 - -#define TSI_S_CLOSED 0 /* closed */ -#define TSI_S_LISTEN 1 /* listening for connection */ -#define TSI_S_SYN_SENT 2 /* active, have sent syn */ -#define TSI_S_SYN_RECEIVED 3 /* have send and received syn */ -#define TSI_S_ESTABLISHED 4 /* established */ -#define TSI_S__CLOSE_WAIT 5 /* rcvd fin, waiting for close */ -#define TSI_S_FIN_WAIT_1 6 /* have closed, sent fin */ -#define TSI_S_CLOSING 7 /* closed xchd FIN; await FIN ACK */ -#define TSI_S_LAST_ACK 8 /* had fin and close; await FIN ACK */ -#define TSI_S_FIN_WAIT_2 9 /* have closed, fin is acked */ -#define TSI_S_TIME_WAIT 10 /* in 2*msl quiet wait after close */ -#define TSI_S_RESERVED 11 /* pseudo state: reserved */ +#define TSI_T_REXMT 0 /* retransmit */ +#define TSI_T_PERSIST 1 /* retransmit persistence */ +#define TSI_T_KEEP 2 /* keep alive */ +#define TSI_T_2MSL 3 /* 2*msl quiet time timer */ +#define TSI_T_NTIMERS 4 + +#define TSI_S_CLOSED 0 /* closed */ +#define TSI_S_LISTEN 1 /* listening for connection */ +#define TSI_S_SYN_SENT 2 /* active, have sent syn */ +#define TSI_S_SYN_RECEIVED 3 /* have send and received syn */ +#define TSI_S_ESTABLISHED 4 /* established */ +#define TSI_S__CLOSE_WAIT 5 /* rcvd fin, waiting for close */ +#define TSI_S_FIN_WAIT_1 6 /* have closed, sent fin */ +#define TSI_S_CLOSING 7 /* closed xchd FIN; await FIN ACK */ +#define TSI_S_LAST_ACK 8 /* had fin and close; await FIN ACK */ +#define TSI_S_FIN_WAIT_2 9 /* have closed, fin is acked */ +#define TSI_S_TIME_WAIT 10 /* in 2*msl quiet wait after close */ +#define TSI_S_RESERVED 11 /* pseudo state: reserved */ struct tcp_sockinfo { - struct in_sockinfo tcpsi_ini; - int tcpsi_state; - int tcpsi_timer[TSI_T_NTIMERS]; - int tcpsi_mss; - uint32_t tcpsi_flags; - uint32_t rfu_1; /* reserved */ - uint64_t tcpsi_tp; /* opaque handle of TCP protocol control block */ + struct in_sockinfo tcpsi_ini; + int tcpsi_state; + int tcpsi_timer[TSI_T_NTIMERS]; + int tcpsi_mss; + uint32_t tcpsi_flags; + uint32_t rfu_1; /* reserved */ + uint64_t tcpsi_tp; /* opaque handle of TCP protocol control block */ }; /* @@ -473,16 +473,16 @@ struct tcp_sockinfo { struct un_sockinfo { - uint64_t unsi_conn_so; /* opaque handle of connected socket */ - uint64_t unsi_conn_pcb; /* opaque handle of connected protocol control block */ + uint64_t unsi_conn_so; /* opaque handle of connected socket */ + uint64_t unsi_conn_pcb; /* opaque handle of connected protocol control block */ union { - struct sockaddr_un ua_sun; - char ua_dummy[SOCK_MAXADDRLEN]; - } unsi_addr; /* bound address */ + struct sockaddr_un ua_sun; + char ua_dummy[SOCK_MAXADDRLEN]; + } unsi_addr; /* bound address */ union { - struct sockaddr_un ua_sun; - char ua_dummy[SOCK_MAXADDRLEN]; - } unsi_caddr; /* address of socket connected to */ + struct sockaddr_un ua_sun; + char ua_dummy[SOCK_MAXADDRLEN]; + } unsi_caddr; /* address of socket connected to */ }; /* @@ -490,9 +490,9 @@ struct un_sockinfo { */ struct ndrv_info { - uint32_t ndrvsi_if_family; - uint32_t ndrvsi_if_unit; - char ndrvsi_if_name[IF_NAMESIZE]; + uint32_t ndrvsi_if_family; + uint32_t ndrvsi_if_unit; + char ndrvsi_if_name[IF_NAMESIZE]; }; /* @@ -500,141 +500,141 @@ struct ndrv_info { */ struct kern_event_info { - uint32_t kesi_vendor_code_filter; - uint32_t kesi_class_filter; - uint32_t kesi_subclass_filter; -}; + uint32_t kesi_vendor_code_filter; + uint32_t kesi_class_filter; + uint32_t kesi_subclass_filter; +}; /* * Kernel Control Sockets */ struct kern_ctl_info { - uint32_t kcsi_id; - uint32_t kcsi_reg_unit; - uint32_t kcsi_flags; /* support flags */ - uint32_t kcsi_recvbufsize; /* request more than the default buffer size */ - uint32_t kcsi_sendbufsize; /* request more than the default buffer size */ - uint32_t kcsi_unit; - char kcsi_name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */ + uint32_t kcsi_id; + uint32_t kcsi_reg_unit; + uint32_t kcsi_flags; /* support flags */ + uint32_t kcsi_recvbufsize; /* request more than the default buffer size */ + uint32_t kcsi_sendbufsize; /* request more than the default buffer size */ + uint32_t kcsi_unit; + char kcsi_name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */ }; /* soi_state */ -#define SOI_S_NOFDREF 0x0001 /* no file table ref any more */ -#define SOI_S_ISCONNECTED 0x0002 /* socket connected to a peer */ -#define SOI_S_ISCONNECTING 0x0004 /* in process of connecting to peer */ -#define SOI_S_ISDISCONNECTING 0x0008 /* in process of disconnecting */ -#define SOI_S_CANTSENDMORE 0x0010 /* can't send more data to peer */ -#define SOI_S_CANTRCVMORE 0x0020 /* can't receive more data from peer */ -#define SOI_S_RCVATMARK 0x0040 /* at mark on input */ -#define SOI_S_PRIV 0x0080 /* privileged for broadcast, raw... */ -#define SOI_S_NBIO 0x0100 /* non-blocking ops */ -#define SOI_S_ASYNC 0x0200 /* async i/o notify */ -#define SOI_S_INCOMP 0x0800 /* Unaccepted, incomplete connection */ -#define SOI_S_COMP 0x1000 /* unaccepted, complete connection */ -#define SOI_S_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ -#define SOI_S_DRAINING 0x4000 /* close waiting for blocked system calls to drain */ +#define SOI_S_NOFDREF 0x0001 /* no file table ref any more */ +#define SOI_S_ISCONNECTED 0x0002 /* socket connected to a peer */ +#define SOI_S_ISCONNECTING 0x0004 /* in process of connecting to peer */ +#define SOI_S_ISDISCONNECTING 0x0008 /* in process of disconnecting */ +#define SOI_S_CANTSENDMORE 0x0010 /* can't send more data to peer */ +#define SOI_S_CANTRCVMORE 0x0020 /* can't receive more data from peer */ +#define SOI_S_RCVATMARK 0x0040 /* at mark on input */ +#define SOI_S_PRIV 0x0080 /* privileged for broadcast, raw... */ +#define SOI_S_NBIO 0x0100 /* non-blocking ops */ +#define SOI_S_ASYNC 0x0200 /* async i/o notify */ +#define SOI_S_INCOMP 0x0800 /* Unaccepted, incomplete connection */ +#define SOI_S_COMP 0x1000 /* unaccepted, complete connection */ +#define SOI_S_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ +#define SOI_S_DRAINING 0x4000 /* close waiting for blocked system calls to drain */ struct sockbuf_info { - uint32_t sbi_cc; - uint32_t sbi_hiwat; /* SO_RCVBUF, SO_SNDBUF */ - uint32_t sbi_mbcnt; - uint32_t sbi_mbmax; - uint32_t sbi_lowat; - short sbi_flags; - short sbi_timeo; + uint32_t sbi_cc; + uint32_t sbi_hiwat; /* SO_RCVBUF, SO_SNDBUF */ + uint32_t sbi_mbcnt; + uint32_t sbi_mbmax; + uint32_t sbi_lowat; + short sbi_flags; + short sbi_timeo; }; enum { - SOCKINFO_GENERIC = 0, - SOCKINFO_IN = 1, - SOCKINFO_TCP = 2, - SOCKINFO_UN = 3, - SOCKINFO_NDRV = 4, - SOCKINFO_KERN_EVENT = 5, - SOCKINFO_KERN_CTL = 6 + SOCKINFO_GENERIC = 0, + SOCKINFO_IN = 1, + SOCKINFO_TCP = 2, + SOCKINFO_UN = 3, + SOCKINFO_NDRV = 4, + SOCKINFO_KERN_EVENT = 5, + SOCKINFO_KERN_CTL = 6 }; struct socket_info { - struct vinfo_stat soi_stat; - uint64_t soi_so; /* opaque handle of socket */ - uint64_t soi_pcb; /* opaque handle of protocol control block */ - int soi_type; - int soi_protocol; - int soi_family; - short soi_options; - short soi_linger; - short soi_state; - short soi_qlen; - short soi_incqlen; - short soi_qlimit; - short soi_timeo; - u_short soi_error; - uint32_t soi_oobmark; - struct sockbuf_info soi_rcv; - struct sockbuf_info soi_snd; - int soi_kind; - uint32_t rfu_1; /* reserved */ + struct vinfo_stat soi_stat; + uint64_t soi_so; /* opaque handle of socket */ + uint64_t soi_pcb; /* opaque handle of protocol control block */ + int soi_type; + int soi_protocol; + int soi_family; + short soi_options; + short soi_linger; + short soi_state; + short soi_qlen; + short soi_incqlen; + short soi_qlimit; + short soi_timeo; + u_short soi_error; + uint32_t soi_oobmark; + struct sockbuf_info soi_rcv; + struct sockbuf_info soi_snd; + int soi_kind; + uint32_t rfu_1; /* reserved */ union { - struct in_sockinfo pri_in; /* SOCKINFO_IN */ - struct tcp_sockinfo pri_tcp; /* SOCKINFO_TCP */ - struct un_sockinfo pri_un; /* SOCKINFO_UN */ - struct ndrv_info pri_ndrv; /* SOCKINFO_NDRV */ - struct kern_event_info pri_kern_event; /* SOCKINFO_KERN_EVENT */ - struct kern_ctl_info pri_kern_ctl; /* SOCKINFO_KERN_CTL */ - } soi_proto; + struct in_sockinfo pri_in; /* SOCKINFO_IN */ + struct tcp_sockinfo pri_tcp; /* SOCKINFO_TCP */ + struct un_sockinfo pri_un; /* SOCKINFO_UN */ + struct ndrv_info pri_ndrv; /* SOCKINFO_NDRV */ + struct kern_event_info pri_kern_event; /* SOCKINFO_KERN_EVENT */ + struct kern_ctl_info pri_kern_ctl; /* SOCKINFO_KERN_CTL */ + } soi_proto; }; struct socket_fdinfo { - struct proc_fileinfo pfi; - struct socket_info psi; + struct proc_fileinfo pfi; + struct socket_info psi; }; struct psem_info { - struct vinfo_stat psem_stat; - char psem_name[MAXPATHLEN]; + struct vinfo_stat psem_stat; + char psem_name[MAXPATHLEN]; }; struct psem_fdinfo { - struct proc_fileinfo pfi; - struct psem_info pseminfo; + struct proc_fileinfo pfi; + struct psem_info pseminfo; }; struct pshm_info { - struct vinfo_stat pshm_stat; - uint64_t pshm_mappaddr; - char pshm_name[MAXPATHLEN]; + struct vinfo_stat pshm_stat; + uint64_t pshm_mappaddr; + char pshm_name[MAXPATHLEN]; }; struct pshm_fdinfo { - struct proc_fileinfo pfi; - struct pshm_info pshminfo; + struct proc_fileinfo pfi; + struct pshm_info pshminfo; }; struct pipe_info { - struct vinfo_stat pipe_stat; - uint64_t pipe_handle; - uint64_t pipe_peerhandle; - int pipe_status; - int rfu_1; /* reserved */ + struct vinfo_stat pipe_stat; + uint64_t pipe_handle; + uint64_t pipe_peerhandle; + int pipe_status; + int rfu_1; /* reserved */ }; struct pipe_fdinfo { - struct proc_fileinfo pfi; - struct pipe_info pipeinfo; + struct proc_fileinfo pfi; + struct pipe_info pipeinfo; }; struct kqueue_info { - struct vinfo_stat kq_stat; - uint32_t kq_state; - uint32_t rfu_1; /* reserved */ + struct vinfo_stat kq_stat; + uint32_t kq_state; + uint32_t rfu_1; /* reserved */ }; struct kqueue_dyninfo { @@ -646,19 +646,19 @@ struct kqueue_dyninfo { uint8_t kqdi_async_qos; uint16_t kqdi_request_state; uint8_t kqdi_events_qos; - uint8_t kqdi_pri; - uint8_t kqdi_pol; - uint8_t kqdi_cpupercent; + uint8_t kqdi_pri; + uint8_t kqdi_pol; + uint8_t kqdi_cpupercent; uint8_t _kqdi_reserved0[4]; uint64_t _kqdi_reserved1[4]; }; /* keep in sync with KQ_* in sys/eventvar.h */ -#define PROC_KQUEUE_SELECT 0x01 -#define PROC_KQUEUE_SLEEP 0x02 -#define PROC_KQUEUE_32 0x08 -#define PROC_KQUEUE_64 0x10 -#define PROC_KQUEUE_QOS 0x20 +#define PROC_KQUEUE_SELECT 0x01 +#define PROC_KQUEUE_SLEEP 0x02 +#define PROC_KQUEUE_32 0x08 +#define PROC_KQUEUE_64 0x10 +#define PROC_KQUEUE_QOS 0x20 #ifdef PRIVATE struct kevent_extinfo { @@ -671,123 +671,123 @@ struct kevent_extinfo { #endif /* PRIVATE */ struct kqueue_fdinfo { - struct proc_fileinfo pfi; - struct kqueue_info kqueueinfo; + struct proc_fileinfo pfi; + struct kqueue_info kqueueinfo; }; struct appletalk_info { - struct vinfo_stat atalk_stat; + struct vinfo_stat atalk_stat; }; struct appletalk_fdinfo { - struct proc_fileinfo pfi; - struct appletalk_info appletalkinfo; + struct proc_fileinfo pfi; + struct appletalk_info appletalkinfo; }; typedef uint64_t proc_info_udata_t; /* defns of process file desc type */ -#define PROX_FDTYPE_ATALK 0 -#define PROX_FDTYPE_VNODE 1 -#define PROX_FDTYPE_SOCKET 2 -#define PROX_FDTYPE_PSHM 3 -#define PROX_FDTYPE_PSEM 4 -#define PROX_FDTYPE_KQUEUE 5 -#define PROX_FDTYPE_PIPE 6 -#define PROX_FDTYPE_FSEVENTS 7 -#define PROX_FDTYPE_NETPOLICY 9 +#define PROX_FDTYPE_ATALK 0 +#define PROX_FDTYPE_VNODE 1 +#define PROX_FDTYPE_SOCKET 2 +#define PROX_FDTYPE_PSHM 3 +#define PROX_FDTYPE_PSEM 4 +#define PROX_FDTYPE_KQUEUE 5 +#define PROX_FDTYPE_PIPE 6 +#define PROX_FDTYPE_FSEVENTS 7 +#define PROX_FDTYPE_NETPOLICY 9 struct proc_fdinfo { - int32_t proc_fd; - uint32_t proc_fdtype; + int32_t proc_fd; + uint32_t proc_fdtype; }; struct proc_fileportinfo { - uint32_t proc_fileport; - uint32_t proc_fdtype; + uint32_t proc_fileport; + uint32_t proc_fdtype; }; /* Flavors for proc_pidinfo() */ -#define PROC_PIDLISTFDS 1 -#define PROC_PIDLISTFD_SIZE (sizeof(struct proc_fdinfo)) +#define PROC_PIDLISTFDS 1 +#define PROC_PIDLISTFD_SIZE (sizeof(struct proc_fdinfo)) -#define PROC_PIDTASKALLINFO 2 -#define PROC_PIDTASKALLINFO_SIZE (sizeof(struct proc_taskallinfo)) +#define PROC_PIDTASKALLINFO 2 +#define PROC_PIDTASKALLINFO_SIZE (sizeof(struct proc_taskallinfo)) -#define PROC_PIDTBSDINFO 3 -#define PROC_PIDTBSDINFO_SIZE (sizeof(struct proc_bsdinfo)) +#define PROC_PIDTBSDINFO 3 +#define PROC_PIDTBSDINFO_SIZE (sizeof(struct proc_bsdinfo)) -#define PROC_PIDTASKINFO 4 -#define PROC_PIDTASKINFO_SIZE (sizeof(struct proc_taskinfo)) +#define PROC_PIDTASKINFO 4 +#define PROC_PIDTASKINFO_SIZE (sizeof(struct proc_taskinfo)) -#define PROC_PIDTHREADINFO 5 -#define PROC_PIDTHREADINFO_SIZE (sizeof(struct proc_threadinfo)) +#define PROC_PIDTHREADINFO 5 +#define PROC_PIDTHREADINFO_SIZE (sizeof(struct proc_threadinfo)) -#define PROC_PIDLISTTHREADS 6 -#define PROC_PIDLISTTHREADS_SIZE (2* sizeof(uint32_t)) +#define PROC_PIDLISTTHREADS 6 +#define PROC_PIDLISTTHREADS_SIZE (2* sizeof(uint32_t)) -#define PROC_PIDREGIONINFO 7 -#define PROC_PIDREGIONINFO_SIZE (sizeof(struct proc_regioninfo)) +#define PROC_PIDREGIONINFO 7 +#define PROC_PIDREGIONINFO_SIZE (sizeof(struct proc_regioninfo)) -#define PROC_PIDREGIONPATHINFO 8 -#define PROC_PIDREGIONPATHINFO_SIZE (sizeof(struct proc_regionwithpathinfo)) +#define PROC_PIDREGIONPATHINFO 8 +#define PROC_PIDREGIONPATHINFO_SIZE (sizeof(struct proc_regionwithpathinfo)) -#define PROC_PIDVNODEPATHINFO 9 -#define PROC_PIDVNODEPATHINFO_SIZE (sizeof(struct proc_vnodepathinfo)) +#define PROC_PIDVNODEPATHINFO 9 +#define PROC_PIDVNODEPATHINFO_SIZE (sizeof(struct proc_vnodepathinfo)) -#define PROC_PIDTHREADPATHINFO 10 -#define PROC_PIDTHREADPATHINFO_SIZE (sizeof(struct proc_threadwithpathinfo)) +#define PROC_PIDTHREADPATHINFO 10 +#define PROC_PIDTHREADPATHINFO_SIZE (sizeof(struct proc_threadwithpathinfo)) -#define PROC_PIDPATHINFO 11 -#define PROC_PIDPATHINFO_SIZE (MAXPATHLEN) -#define PROC_PIDPATHINFO_MAXSIZE (4*MAXPATHLEN) +#define PROC_PIDPATHINFO 11 +#define PROC_PIDPATHINFO_SIZE (MAXPATHLEN) +#define PROC_PIDPATHINFO_MAXSIZE (4*MAXPATHLEN) -#define PROC_PIDWORKQUEUEINFO 12 -#define PROC_PIDWORKQUEUEINFO_SIZE (sizeof(struct proc_workqueueinfo)) +#define PROC_PIDWORKQUEUEINFO 12 +#define PROC_PIDWORKQUEUEINFO_SIZE (sizeof(struct proc_workqueueinfo)) -#define PROC_PIDT_SHORTBSDINFO 13 -#define PROC_PIDT_SHORTBSDINFO_SIZE (sizeof(struct proc_bsdshortinfo)) +#define PROC_PIDT_SHORTBSDINFO 13 +#define PROC_PIDT_SHORTBSDINFO_SIZE (sizeof(struct proc_bsdshortinfo)) -#define PROC_PIDLISTFILEPORTS 14 -#define PROC_PIDLISTFILEPORTS_SIZE (sizeof(struct proc_fileportinfo)) +#define PROC_PIDLISTFILEPORTS 14 +#define PROC_PIDLISTFILEPORTS_SIZE (sizeof(struct proc_fileportinfo)) -#define PROC_PIDTHREADID64INFO 15 -#define PROC_PIDTHREADID64INFO_SIZE (sizeof(struct proc_threadinfo)) +#define PROC_PIDTHREADID64INFO 15 +#define PROC_PIDTHREADID64INFO_SIZE (sizeof(struct proc_threadinfo)) -#define PROC_PID_RUSAGE 16 -#define PROC_PID_RUSAGE_SIZE 0 +#define PROC_PID_RUSAGE 16 +#define PROC_PID_RUSAGE_SIZE 0 #ifdef PRIVATE -#define PROC_PIDUNIQIDENTIFIERINFO 17 +#define PROC_PIDUNIQIDENTIFIERINFO 17 #define PROC_PIDUNIQIDENTIFIERINFO_SIZE \ - (sizeof(struct proc_uniqidentifierinfo)) + (sizeof(struct proc_uniqidentifierinfo)) -#define PROC_PIDT_BSDINFOWITHUNIQID 18 +#define PROC_PIDT_BSDINFOWITHUNIQID 18 #define PROC_PIDT_BSDINFOWITHUNIQID_SIZE \ - (sizeof(struct proc_bsdinfowithuniqid)) + (sizeof(struct proc_bsdinfowithuniqid)) -#define PROC_PIDARCHINFO 19 -#define PROC_PIDARCHINFO_SIZE \ - (sizeof(struct proc_archinfo)) +#define PROC_PIDARCHINFO 19 +#define PROC_PIDARCHINFO_SIZE \ + (sizeof(struct proc_archinfo)) -#define PROC_PIDCOALITIONINFO 20 -#define PROC_PIDCOALITIONINFO_SIZE (sizeof(struct proc_pidcoalitioninfo)) +#define PROC_PIDCOALITIONINFO 20 +#define PROC_PIDCOALITIONINFO_SIZE (sizeof(struct proc_pidcoalitioninfo)) -#define PROC_PIDNOTEEXIT 21 -#define PROC_PIDNOTEEXIT_SIZE (sizeof(uint32_t)) +#define PROC_PIDNOTEEXIT 21 +#define PROC_PIDNOTEEXIT_SIZE (sizeof(uint32_t)) -#define PROC_PIDREGIONPATHINFO2 22 -#define PROC_PIDREGIONPATHINFO2_SIZE (sizeof(struct proc_regionwithpathinfo)) +#define PROC_PIDREGIONPATHINFO2 22 +#define PROC_PIDREGIONPATHINFO2_SIZE (sizeof(struct proc_regionwithpathinfo)) -#define PROC_PIDREGIONPATHINFO3 23 -#define PROC_PIDREGIONPATHINFO3_SIZE (sizeof(struct proc_regionwithpathinfo)) +#define PROC_PIDREGIONPATHINFO3 23 +#define PROC_PIDREGIONPATHINFO3_SIZE (sizeof(struct proc_regionwithpathinfo)) -#define PROC_PIDEXITREASONINFO 24 -#define PROC_PIDEXITREASONINFO_SIZE (sizeof(struct proc_exitreasoninfo)) +#define PROC_PIDEXITREASONINFO 24 +#define PROC_PIDEXITREASONINFO_SIZE (sizeof(struct proc_exitreasoninfo)) -#define PROC_PIDEXITREASONBASICINFO 25 -#define PROC_PIDEXITREASONBASICINFOSIZE (sizeof(struct proc_exitreasonbasicinfo)) +#define PROC_PIDEXITREASONBASICINFO 25 +#define PROC_PIDEXITREASONBASICINFOSIZE (sizeof(struct proc_exitreasonbasicinfo)) #define PROC_PIDLISTUPTRS 26 #define PROC_PIDLISTUPTRS_SIZE (sizeof(uint64_t)) @@ -795,70 +795,70 @@ struct proc_fileportinfo { #define PROC_PIDLISTDYNKQUEUES 27 #define PROC_PIDLISTDYNKQUEUES_SIZE (sizeof(kqueue_id_t)) -#define PROC_PIDLISTTHREADIDS 28 -#define PROC_PIDLISTTHREADIDS_SIZE (2* sizeof(uint32_t)) +#define PROC_PIDLISTTHREADIDS 28 +#define PROC_PIDLISTTHREADIDS_SIZE (2* sizeof(uint32_t)) -#define PROC_PIDVMRTFAULTINFO 29 +#define PROC_PIDVMRTFAULTINFO 29 #define PROC_PIDVMRTFAULTINFO_SIZE (7 * sizeof(uint64_t)) #endif /* PRIVATE */ /* Flavors for proc_pidfdinfo */ -#define PROC_PIDFDVNODEINFO 1 -#define PROC_PIDFDVNODEINFO_SIZE (sizeof(struct vnode_fdinfo)) +#define PROC_PIDFDVNODEINFO 1 +#define PROC_PIDFDVNODEINFO_SIZE (sizeof(struct vnode_fdinfo)) -#define PROC_PIDFDVNODEPATHINFO 2 -#define PROC_PIDFDVNODEPATHINFO_SIZE (sizeof(struct vnode_fdinfowithpath)) +#define PROC_PIDFDVNODEPATHINFO 2 +#define PROC_PIDFDVNODEPATHINFO_SIZE (sizeof(struct vnode_fdinfowithpath)) -#define PROC_PIDFDSOCKETINFO 3 -#define PROC_PIDFDSOCKETINFO_SIZE (sizeof(struct socket_fdinfo)) +#define PROC_PIDFDSOCKETINFO 3 +#define PROC_PIDFDSOCKETINFO_SIZE (sizeof(struct socket_fdinfo)) -#define PROC_PIDFDPSEMINFO 4 -#define PROC_PIDFDPSEMINFO_SIZE (sizeof(struct psem_fdinfo)) +#define PROC_PIDFDPSEMINFO 4 +#define PROC_PIDFDPSEMINFO_SIZE (sizeof(struct psem_fdinfo)) -#define PROC_PIDFDPSHMINFO 5 -#define PROC_PIDFDPSHMINFO_SIZE (sizeof(struct pshm_fdinfo)) +#define PROC_PIDFDPSHMINFO 5 +#define PROC_PIDFDPSHMINFO_SIZE (sizeof(struct pshm_fdinfo)) -#define PROC_PIDFDPIPEINFO 6 -#define PROC_PIDFDPIPEINFO_SIZE (sizeof(struct pipe_fdinfo)) +#define PROC_PIDFDPIPEINFO 6 +#define PROC_PIDFDPIPEINFO_SIZE (sizeof(struct pipe_fdinfo)) -#define PROC_PIDFDKQUEUEINFO 7 -#define PROC_PIDFDKQUEUEINFO_SIZE (sizeof(struct kqueue_fdinfo)) +#define PROC_PIDFDKQUEUEINFO 7 +#define PROC_PIDFDKQUEUEINFO_SIZE (sizeof(struct kqueue_fdinfo)) -#define PROC_PIDFDATALKINFO 8 -#define PROC_PIDFDATALKINFO_SIZE (sizeof(struct appletalk_fdinfo)) +#define PROC_PIDFDATALKINFO 8 +#define PROC_PIDFDATALKINFO_SIZE (sizeof(struct appletalk_fdinfo)) #ifdef PRIVATE -#define PROC_PIDFDKQUEUE_EXTINFO 9 -#define PROC_PIDFDKQUEUE_EXTINFO_SIZE (sizeof(struct kevent_extinfo)) -#define PROC_PIDFDKQUEUE_KNOTES_MAX (1024 * 128) -#define PROC_PIDDYNKQUEUES_MAX (1024 * 128) +#define PROC_PIDFDKQUEUE_EXTINFO 9 +#define PROC_PIDFDKQUEUE_EXTINFO_SIZE (sizeof(struct kevent_extinfo)) +#define PROC_PIDFDKQUEUE_KNOTES_MAX (1024 * 128) +#define PROC_PIDDYNKQUEUES_MAX (1024 * 128) #endif /* PRIVATE */ /* Flavors for proc_pidfileportinfo */ -#define PROC_PIDFILEPORTVNODEPATHINFO 2 /* out: vnode_fdinfowithpath */ -#define PROC_PIDFILEPORTVNODEPATHINFO_SIZE \ - PROC_PIDFDVNODEPATHINFO_SIZE +#define PROC_PIDFILEPORTVNODEPATHINFO 2 /* out: vnode_fdinfowithpath */ +#define PROC_PIDFILEPORTVNODEPATHINFO_SIZE \ + PROC_PIDFDVNODEPATHINFO_SIZE -#define PROC_PIDFILEPORTSOCKETINFO 3 /* out: socket_fdinfo */ -#define PROC_PIDFILEPORTSOCKETINFO_SIZE PROC_PIDFDSOCKETINFO_SIZE +#define PROC_PIDFILEPORTSOCKETINFO 3 /* out: socket_fdinfo */ +#define PROC_PIDFILEPORTSOCKETINFO_SIZE PROC_PIDFDSOCKETINFO_SIZE -#define PROC_PIDFILEPORTPSHMINFO 5 /* out: pshm_fdinfo */ -#define PROC_PIDFILEPORTPSHMINFO_SIZE PROC_PIDFDPSHMINFO_SIZE +#define PROC_PIDFILEPORTPSHMINFO 5 /* out: pshm_fdinfo */ +#define PROC_PIDFILEPORTPSHMINFO_SIZE PROC_PIDFDPSHMINFO_SIZE -#define PROC_PIDFILEPORTPIPEINFO 6 /* out: pipe_fdinfo */ -#define PROC_PIDFILEPORTPIPEINFO_SIZE PROC_PIDFDPIPEINFO_SIZE +#define PROC_PIDFILEPORTPIPEINFO 6 /* out: pipe_fdinfo */ +#define PROC_PIDFILEPORTPIPEINFO_SIZE PROC_PIDFDPIPEINFO_SIZE /* used for proc_setcontrol */ -#define PROC_SELFSET_PCONTROL 1 +#define PROC_SELFSET_PCONTROL 1 -#define PROC_SELFSET_THREADNAME 2 -#define PROC_SELFSET_THREADNAME_SIZE (MAXTHREADNAMESIZE -1) +#define PROC_SELFSET_THREADNAME 2 +#define PROC_SELFSET_THREADNAME_SIZE (MAXTHREADNAMESIZE -1) -#define PROC_SELFSET_VMRSRCOWNER 3 +#define PROC_SELFSET_VMRSRCOWNER 3 -#define PROC_SELFSET_DELAYIDLESLEEP 4 +#define PROC_SELFSET_DELAYIDLESLEEP 4 /* used for proc_dirtycontrol */ #define PROC_DIRTYCONTROL_TRACK 1 @@ -880,26 +880,26 @@ struct proc_fileportinfo { #define PROC_DIRTY_LAUNCH_IS_IN_PROGRESS 0x8 /* Flavors for proc_udata_info */ -#define PROC_UDATA_INFO_GET 1 -#define PROC_UDATA_INFO_SET 2 +#define PROC_UDATA_INFO_GET 1 +#define PROC_UDATA_INFO_SET 2 #ifdef PRIVATE /* Flavors for proc_pidoriginatorinfo */ -#define PROC_PIDORIGINATOR_UUID 0x1 -#define PROC_PIDORIGINATOR_UUID_SIZE (sizeof(uuid_t)) +#define PROC_PIDORIGINATOR_UUID 0x1 +#define PROC_PIDORIGINATOR_UUID_SIZE (sizeof(uuid_t)) -#define PROC_PIDORIGINATOR_BGSTATE 0x2 +#define PROC_PIDORIGINATOR_BGSTATE 0x2 #define PROC_PIDORIGINATOR_BGSTATE_SIZE (sizeof(uint32_t)) #define PROC_PIDORIGINATOR_PID_UUID 0x3 #define PROC_PIDORIGINATOR_PID_UUID_SIZE (sizeof(struct proc_originatorinfo)) /* Flavors for proc_listcoalitions */ -#define LISTCOALITIONS_ALL_COALS 1 +#define LISTCOALITIONS_ALL_COALS 1 #define LISTCOALITIONS_ALL_COALS_SIZE (sizeof(struct procinfo_coalinfo)) -#define LISTCOALITIONS_SINGLE_TYPE 2 +#define LISTCOALITIONS_SINGLE_TYPE 2 #define LISTCOALITIONS_SINGLE_TYPE_SIZE (sizeof(struct procinfo_coalinfo)) /* reasons for proc_can_use_foreground_hw */ @@ -944,7 +944,7 @@ struct pshmnode; #endif #ifndef psemnode -struct psemnode ; +struct psemnode; #endif #ifndef pipe @@ -957,16 +957,16 @@ extern int fill_pseminfo(struct psemnode * psem, struct psem_info * pinfo); extern int fill_pipeinfo(struct pipe * cpipe, struct pipe_info * pinfo); extern int fill_kqueueinfo(struct kqueue * kq, struct kqueue_info * kinfo); extern int pid_kqueue_extinfo(proc_t, struct kqueue * kq, user_addr_t buffer, - uint32_t buffersize, int32_t * retval); + uint32_t buffersize, int32_t * retval); extern int pid_kqueue_udatainfo(proc_t p, struct kqueue *kq, uint64_t *buf, - uint32_t bufsize); + uint32_t bufsize); extern int pid_kqueue_listdynamickqueues(proc_t p, user_addr_t ubuf, - uint32_t bufsize, int32_t *retval); + uint32_t bufsize, int32_t *retval); extern int pid_dynamickqueue_extinfo(proc_t p, kqueue_id_t kq_id, - user_addr_t ubuf, uint32_t bufsize, int32_t *retval); + user_addr_t ubuf, uint32_t bufsize, int32_t *retval); extern int fill_procworkqueue(proc_t, struct proc_workqueueinfo *); extern boolean_t workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total, - boolean_t *exceeded_constrained); + boolean_t *exceeded_constrained); extern uint32_t workqueue_get_pwq_state_kdp(void *proc); #endif /* XNU_KERNEL_PRIVATE */ diff --git a/bsd/sys/proc_internal.h b/bsd/sys/proc_internal.h index c2aacbc96..adaef95ff 100644 --- a/bsd/sys/proc_internal.h +++ b/bsd/sys/proc_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -73,7 +73,7 @@ */ #ifndef _SYS_PROC_INTERNAL_H_ -#define _SYS_PROC_INTERNAL_H_ +#define _SYS_PROC_INTERNAL_H_ #include #include @@ -88,10 +88,10 @@ __BEGIN_DECLS __END_DECLS #if DEBUG -#define __PROC_INTERNAL_DEBUG 1 +#define __PROC_INTERNAL_DEBUG 1 #endif -/* +/* * The short form for various locks that protect fields in the data structures. * PL = Process Lock * PGL = Process Group Lock @@ -100,25 +100,25 @@ __END_DECLS * PSL = Process Spin Lock * LL = List Lock * SL = Session Lock -*/ + */ struct label; /* * One structure allocated per session. */ -struct session { - int s_count; /* Ref cnt; pgrps in session. (LL) */ - struct proc * s_leader; /* Session leader.(static) */ - struct vnode * s_ttyvp; /* Vnode of controlling terminal.(SL) */ - int s_ttyvid; /* Vnode id of the controlling terminal (SL) */ - struct tty * s_ttyp; /* Controlling terminal. (SL + ttyvp != NULL) */ - pid_t s_ttypgrpid; /* tty's pgrp id */ - pid_t s_sid; /* Session ID (static) */ - char s_login[MAXLOGNAME]; /* Setlogin() name.(SL) */ - int s_flags; /* Session flags (s_mlock) */ - LIST_ENTRY(session) s_hash; /* Hash chain.(LL) */ - lck_mtx_t s_mlock; /* mutex lock to protect session */ - int s_listflags; +struct session { + int s_count; /* Ref cnt; pgrps in session. (LL) */ + struct proc * s_leader; /* Session leader.(static) */ + struct vnode * s_ttyvp; /* Vnode of controlling terminal.(SL) */ + int s_ttyvid; /* Vnode id of the controlling terminal (SL) */ + struct tty * s_ttyp; /* Controlling terminal. (SL + ttyvp != NULL) */ + pid_t s_ttypgrpid; /* tty's pgrp id */ + pid_t s_sid; /* Session ID (static) */ + char s_login[MAXLOGNAME]; /* Setlogin() name.(SL) */ + int s_flags; /* Session flags (s_mlock) */ + LIST_ENTRY(session) s_hash; /* Hash chain.(LL) */ + lck_mtx_t s_mlock; /* mutex lock to protect session */ + int s_listflags; }; #define SESSION_NULL (struct session *)0 @@ -133,39 +133,39 @@ struct session { * NB: is not in scope and there is not typedef type enforcement, * or '0' below would be 'TTY_NULL'. */ -#define SESSION_TP(sp) (((sp)->s_ttyvp != 0) ? (sp)->s_ttyp : 0) +#define SESSION_TP(sp) (((sp)->s_ttyvp != 0) ? (sp)->s_ttyp : 0) /* * Session flags; used to tunnel information to lower layers and line * disciplines, etc. */ -#define S_DEFAULT 0x00000000 /* No flags set */ -#define S_NOCTTY 0x00000001 /* Do not associate controlling tty */ -#define S_CTTYREF 0x00000010 /* vnode ref taken by cttyopen */ +#define S_DEFAULT 0x00000000 /* No flags set */ +#define S_NOCTTY 0x00000001 /* Do not associate controlling tty */ +#define S_CTTYREF 0x00000010 /* vnode ref taken by cttyopen */ -#define S_LIST_TERM 1 /* marked for termination */ -#define S_LIST_DEAD 2 /* already dead */ +#define S_LIST_TERM 1 /* marked for termination */ +#define S_LIST_DEAD 2 /* already dead */ /* * One structure allocated per process group. */ -struct pgrp { - LIST_ENTRY(pgrp) pg_hash; /* Hash chain. (LL) */ - LIST_HEAD(, proc) pg_members; /* Pointer to pgrp members. (PGL) */ - struct session * pg_session; /* Pointer to session. (LL ) */ - pid_t pg_id; /* Pgrp id. (static) */ - int pg_jobc; /* # procs qualifying pgrp for job control (PGL) */ - int pg_membercnt; /* Number of processes in the pgrocess group (PGL) */ - int pg_refcount; /* number of current iterators (LL) */ - unsigned int pg_listflags; /* (LL) */ - lck_mtx_t pg_mlock; /* mutex lock to protect pgrp */ +struct pgrp { + LIST_ENTRY(pgrp) pg_hash; /* Hash chain. (LL) */ + LIST_HEAD(, proc) pg_members; /* Pointer to pgrp members. (PGL) */ + struct session * pg_session; /* Pointer to session. (LL ) */ + pid_t pg_id; /* Pgrp id. (static) */ + int pg_jobc; /* # procs qualifying pgrp for job control (PGL) */ + int pg_membercnt; /* Number of processes in the pgrocess group (PGL) */ + int pg_refcount; /* number of current iterators (LL) */ + unsigned int pg_listflags; /* (LL) */ + lck_mtx_t pg_mlock; /* mutex lock to protect pgrp */ }; -#define PGRP_FLAG_TERMINATE 1 +#define PGRP_FLAG_TERMINATE 1 #define PGRP_FLAG_WAITTERMINATE 2 -#define PGRP_FLAG_DEAD 4 -#define PGRP_FLAG_ITERABEGIN 8 -#define PGRP_FLAG_ITERWAIT 0x10 +#define PGRP_FLAG_DEAD 4 +#define PGRP_FLAG_ITERABEGIN 8 +#define PGRP_FLAG_ITERWAIT 0x10 #define PGRP_NULL (struct pgrp *)0 struct proc; @@ -191,176 +191,176 @@ struct proc; * which might be addressible only on a processor on which the process * is running. */ -struct proc { - LIST_ENTRY(proc) p_list; /* List of all processes. */ - - void * task; /* corresponding task (static)*/ - struct proc * p_pptr; /* Pointer to parent process.(LL) */ - pid_t p_ppid; /* process's parent pid number */ - pid_t p_pgrpid; /* process group id of the process (LL)*/ - uid_t p_uid; - gid_t p_gid; - uid_t p_ruid; - gid_t p_rgid; - uid_t p_svuid; - gid_t p_svgid; - uint64_t p_uniqueid; /* process unique ID - incremented on fork/spawn/vfork, remains same across exec. */ - uint64_t p_puniqueid; /* parent's unique ID - set on fork/spawn/vfork, doesn't change if reparented. */ - - lck_mtx_t p_mlock; /* mutex lock for proc */ - pid_t p_pid; /* Process identifier. (static)*/ - char p_stat; /* S* process status. (PL)*/ - char p_shutdownstate; - char p_kdebug; /* P_KDEBUG eq (CC)*/ - char p_btrace; /* P_BTRACE eq (CC)*/ - - LIST_ENTRY(proc) p_pglist; /* List of processes in pgrp.(PGL) */ - LIST_ENTRY(proc) p_sibling; /* List of sibling processes. (LL)*/ - LIST_HEAD(, proc) p_children; /* Pointer to list of children. (LL)*/ - TAILQ_HEAD( , uthread) p_uthlist; /* List of uthreads (PL) */ - - LIST_ENTRY(proc) p_hash; /* Hash chain. (LL)*/ - TAILQ_HEAD( ,eventqelt) p_evlist; /* (PL) */ +struct proc { + LIST_ENTRY(proc) p_list; /* List of all processes. */ + + void * task; /* corresponding task (static)*/ + struct proc * p_pptr; /* Pointer to parent process.(LL) */ + pid_t p_ppid; /* process's parent pid number */ + pid_t p_pgrpid; /* process group id of the process (LL)*/ + uid_t p_uid; + gid_t p_gid; + uid_t p_ruid; + gid_t p_rgid; + uid_t p_svuid; + gid_t p_svgid; + uint64_t p_uniqueid; /* process unique ID - incremented on fork/spawn/vfork, remains same across exec. */ + uint64_t p_puniqueid; /* parent's unique ID - set on fork/spawn/vfork, doesn't change if reparented. */ + + lck_mtx_t p_mlock; /* mutex lock for proc */ + pid_t p_pid; /* Process identifier. (static)*/ + char p_stat; /* S* process status. (PL)*/ + char p_shutdownstate; + char p_kdebug; /* P_KDEBUG eq (CC)*/ + char p_btrace; /* P_BTRACE eq (CC)*/ + + LIST_ENTRY(proc) p_pglist; /* List of processes in pgrp.(PGL) */ + LIST_ENTRY(proc) p_sibling; /* List of sibling processes. (LL)*/ + LIST_HEAD(, proc) p_children; /* Pointer to list of children. (LL)*/ + TAILQ_HEAD(, uthread) p_uthlist; /* List of uthreads (PL) */ + + LIST_ENTRY(proc) p_hash; /* Hash chain. (LL)*/ + TAILQ_HEAD(, eventqelt) p_evlist; /* (PL) */ #if CONFIG_PERSONAS struct persona *p_persona; LIST_ENTRY(proc) p_persona_list; #endif - lck_mtx_t p_fdmlock; /* proc lock to protect fdesc */ - lck_mtx_t p_ucred_mlock; /* mutex lock to protect p_ucred */ + lck_mtx_t p_fdmlock; /* proc lock to protect fdesc */ + lck_mtx_t p_ucred_mlock; /* mutex lock to protect p_ucred */ /* substructures: */ - kauth_cred_t p_ucred; /* Process owner's identity. (PUCL) */ - struct filedesc *p_fd; /* Ptr to open files structure. (PFDL) */ - struct pstats *p_stats; /* Accounting/statistics (PL). */ - struct plimit *p_limit; /* Process limits.(PL) */ - - struct sigacts *p_sigacts; /* Signal actions, state (PL) */ - lck_spin_t p_slock; /* spin lock for itimer/profil protection */ - -#define p_rlimit p_limit->pl_rlimit - - struct plimit *p_olimit; /* old process limits - not inherited by child (PL) */ - int p_siglist; /* signals captured back from threads */ - unsigned int p_flag; /* P_* flags. (atomic bit ops) */ - unsigned int p_lflag; /* local flags (PL) */ - unsigned int p_listflag; /* list flags (LL) */ - unsigned int p_ladvflag; /* local adv flags (atomic) */ - int p_refcount; /* number of outstanding users(LL) */ - int p_childrencnt; /* children holding ref on parent (LL) */ - int p_parentref; /* children lookup ref on parent (LL) */ - pid_t p_oppid; /* Save parent pid during ptrace. XXX */ - u_int p_xstat; /* Exit status for wait; also stop signal. */ + kauth_cred_t p_ucred; /* Process owner's identity. (PUCL) */ + struct filedesc *p_fd; /* Ptr to open files structure. (PFDL) */ + struct pstats *p_stats; /* Accounting/statistics (PL). */ + struct plimit *p_limit; /* Process limits.(PL) */ + + struct sigacts *p_sigacts; /* Signal actions, state (PL) */ + lck_spin_t p_slock; /* spin lock for itimer/profil protection */ + +#define p_rlimit p_limit->pl_rlimit + + struct plimit *p_olimit; /* old process limits - not inherited by child (PL) */ + int p_siglist; /* signals captured back from threads */ + unsigned int p_flag; /* P_* flags. (atomic bit ops) */ + unsigned int p_lflag; /* local flags (PL) */ + unsigned int p_listflag; /* list flags (LL) */ + unsigned int p_ladvflag; /* local adv flags (atomic) */ + int p_refcount; /* number of outstanding users(LL) */ + int p_childrencnt; /* children holding ref on parent (LL) */ + int p_parentref; /* children lookup ref on parent (LL) */ + pid_t p_oppid; /* Save parent pid during ptrace. XXX */ + u_int p_xstat; /* Exit status for wait; also stop signal. */ #ifdef _PROC_HAS_SCHEDINFO_ /* may need cleanup, not used */ - u_int p_estcpu; /* Time averaged value of p_cpticks.(used by aio and proc_comapre) */ - fixpt_t p_pctcpu; /* %cpu for this process during p_swtime (used by aio)*/ - u_int p_slptime; /* used by proc_compare */ + u_int p_estcpu; /* Time averaged value of p_cpticks.(used by aio and proc_comapre) */ + fixpt_t p_pctcpu; /* %cpu for this process during p_swtime (used by aio)*/ + u_int p_slptime; /* used by proc_compare */ #endif /* _PROC_HAS_SCHEDINFO_ */ - struct itimerval p_realtimer; /* Alarm timer. (PSL) */ - struct timeval p_rtime; /* Real time.(PSL) */ - struct itimerval p_vtimer_user; /* Virtual timers.(PSL) */ - struct itimerval p_vtimer_prof; /* (PSL) */ - - struct timeval p_rlim_cpu; /* Remaining rlim cpu value.(PSL) */ - int p_debugger; /* NU 1: can exec set-bit programs if suser */ - boolean_t sigwait; /* indication to suspend (PL) */ - void *sigwait_thread; /* 'thread' holding sigwait(PL) */ - void *exit_thread; /* Which thread is exiting(PL) */ - void * p_vforkact; /* activation running this vfork proc)(static) */ - int p_vforkcnt; /* number of outstanding vforks(PL) */ - int p_fpdrainwait; /* (PFDL) */ + struct itimerval p_realtimer; /* Alarm timer. (PSL) */ + struct timeval p_rtime; /* Real time.(PSL) */ + struct itimerval p_vtimer_user; /* Virtual timers.(PSL) */ + struct itimerval p_vtimer_prof; /* (PSL) */ + + struct timeval p_rlim_cpu; /* Remaining rlim cpu value.(PSL) */ + int p_debugger; /* NU 1: can exec set-bit programs if suser */ + boolean_t sigwait; /* indication to suspend (PL) */ + void *sigwait_thread; /* 'thread' holding sigwait(PL) */ + void *exit_thread; /* Which thread is exiting(PL) */ + void * p_vforkact; /* activation running this vfork proc)(static) */ + int p_vforkcnt; /* number of outstanding vforks(PL) */ + int p_fpdrainwait; /* (PFDL) */ /* Following fields are info from SIGCHLD (PL) */ - pid_t si_pid; /* (PL) */ - u_int si_status; /* (PL) */ - u_int si_code; /* (PL) */ - uid_t si_uid; /* (PL) */ + pid_t si_pid; /* (PL) */ + u_int si_status; /* (PL) */ + u_int si_code; /* (PL) */ + uid_t si_uid; /* (PL) */ - void * vm_shm; /* (SYSV SHM Lock) for sysV shared memory */ + void * vm_shm; /* (SYSV SHM Lock) for sysV shared memory */ #if CONFIG_DTRACE - user_addr_t p_dtrace_argv; /* (write once, read only after that) */ - user_addr_t p_dtrace_envp; /* (write once, read only after that) */ - lck_mtx_t p_dtrace_sprlock; /* sun proc lock emulation */ + user_addr_t p_dtrace_argv; /* (write once, read only after that) */ + user_addr_t p_dtrace_envp; /* (write once, read only after that) */ + lck_mtx_t p_dtrace_sprlock; /* sun proc lock emulation */ uint8_t p_dtrace_stop; /* indicates a DTrace-desired stop */ - int p_dtrace_probes; /* (PL) are there probes for this proc? */ - u_int p_dtrace_count; /* (sprlock) number of DTrace tracepoints */ - struct dtrace_ptss_page* p_dtrace_ptss_pages; /* (sprlock) list of user ptss pages */ - struct dtrace_ptss_page_entry* p_dtrace_ptss_free_list; /* (atomic) list of individual ptss entries */ - struct dtrace_helpers* p_dtrace_helpers; /* (dtrace_lock) DTrace per-proc private */ - struct dof_ioctl_data* p_dtrace_lazy_dofs; /* (sprlock) unloaded dof_helper_t's */ + int p_dtrace_probes; /* (PL) are there probes for this proc? */ + u_int p_dtrace_count; /* (sprlock) number of DTrace tracepoints */ + struct dtrace_ptss_page* p_dtrace_ptss_pages; /* (sprlock) list of user ptss pages */ + struct dtrace_ptss_page_entry* p_dtrace_ptss_free_list; /* (atomic) list of individual ptss entries */ + struct dtrace_helpers* p_dtrace_helpers; /* (dtrace_lock) DTrace per-proc private */ + struct dof_ioctl_data* p_dtrace_lazy_dofs; /* (sprlock) unloaded dof_helper_t's */ #endif /* CONFIG_DTRACE */ /* XXXXXXXXXXXXX BCOPY'ed on fork XXXXXXXXXXXXXXXX */ /* The following fields are all copied upon creation in fork. */ -#define p_startcopy p_argslen +#define p_startcopy p_argslen - u_int p_argslen; /* Length of process arguments. */ - int p_argc; /* saved argc for sysctl_procargs() */ - user_addr_t user_stack; /* where user stack was allocated */ - struct vnode *p_textvp; /* Vnode of executable. */ - off_t p_textoff; /* offset in executable vnode */ + u_int p_argslen; /* Length of process arguments. */ + int p_argc; /* saved argc for sysctl_procargs() */ + user_addr_t user_stack; /* where user stack was allocated */ + struct vnode *p_textvp; /* Vnode of executable. */ + off_t p_textoff; /* offset in executable vnode */ - sigset_t p_sigmask; /* DEPRECATED */ - sigset_t p_sigignore; /* Signals being ignored. (PL) */ - sigset_t p_sigcatch; /* Signals being caught by user.(PL) */ + sigset_t p_sigmask; /* DEPRECATED */ + sigset_t p_sigignore; /* Signals being ignored. (PL) */ + sigset_t p_sigcatch; /* Signals being caught by user.(PL) */ - u_char p_priority; /* (NU) Process priority. */ - u_char p_resv0; /* (NU) User-priority based on p_cpu and p_nice. */ - char p_nice; /* Process "nice" value.(PL) */ - u_char p_resv1; /* (NU) User-priority based on p_cpu and p_nice. */ + u_char p_priority; /* (NU) Process priority. */ + u_char p_resv0; /* (NU) User-priority based on p_cpu and p_nice. */ + char p_nice; /* Process "nice" value.(PL) */ + u_char p_resv1; /* (NU) User-priority based on p_cpu and p_nice. */ // types currently in sys/param.h command_t p_comm; - proc_name_t p_name; /* can be changed by the process */ - uint8_t p_xhighbits; /* Stores the top byte of exit status to avoid truncation*/ - pid_t p_contproc; /* last PID to send us a SIGCONT (PL) */ + proc_name_t p_name; /* can be changed by the process */ + uint8_t p_xhighbits; /* Stores the top byte of exit status to avoid truncation*/ + pid_t p_contproc; /* last PID to send us a SIGCONT (PL) */ - struct pgrp *p_pgrp; /* Pointer to process group. (LL) */ - uint32_t p_csflags; /* flags for codesign (PL) */ - uint32_t p_pcaction; /* action for process control on starvation */ - uint8_t p_uuid[16]; /* from LC_UUID load command */ + struct pgrp *p_pgrp; /* Pointer to process group. (LL) */ + uint32_t p_csflags; /* flags for codesign (PL) */ + uint32_t p_pcaction; /* action for process control on starvation */ + uint8_t p_uuid[16]; /* from LC_UUID load command */ - /* + /* * CPU type and subtype of binary slice executed in * this process. Protected by proc lock. */ - cpu_type_t p_cputype; - cpu_subtype_t p_cpusubtype; + cpu_type_t p_cputype; + cpu_subtype_t p_cpusubtype; /* End area that is copied on creation. */ /* XXXXXXXXXXXXX End of BCOPY'ed on fork (AIOLOCK)XXXXXXXXXXXXXXXX */ -#define p_endcopy p_aio_total_count - int p_aio_total_count; /* all allocated AIO requests for this proc */ - int p_aio_active_count; /* all unfinished AIO requests for this proc */ - TAILQ_HEAD( , aio_workq_entry ) p_aio_activeq; /* active async IO requests */ - TAILQ_HEAD( , aio_workq_entry ) p_aio_doneq; /* completed async IO requests */ +#define p_endcopy p_aio_total_count + int p_aio_total_count; /* all allocated AIO requests for this proc */ + int p_aio_active_count; /* all unfinished AIO requests for this proc */ + TAILQ_HEAD(, aio_workq_entry ) p_aio_activeq; /* active async IO requests */ + TAILQ_HEAD(, aio_workq_entry ) p_aio_doneq; /* completed async IO requests */ struct klist p_klist; /* knote list (PL ?)*/ - struct rusage_superset *p_ru; /* Exit information. (PL) */ - thread_t p_signalholder; - thread_t p_transholder; - int p_sigwaitcnt; + struct rusage_superset *p_ru; /* Exit information. (PL) */ + thread_t p_signalholder; + thread_t p_transholder; + int p_sigwaitcnt; /* DEPRECATE following field */ - u_short p_acflag; /* Accounting flags. */ - volatile u_short p_vfs_iopolicy; /* VFS iopolicy flags. (atomic bit ops) */ - - user_addr_t p_threadstart; /* pthread start fn */ - user_addr_t p_wqthread; /* pthread workqueue fn */ - int p_pthsize; /* pthread size */ - uint32_t p_pth_tsd_offset; /* offset from pthread_t to TSD for new threads */ - user_addr_t p_stack_addr_hint; /* stack allocation hint for wq threads */ - struct workqueue *_Atomic p_wqptr; /* workq ptr */ - - struct timeval p_start; /* starting time */ - void * p_rcall; - int p_ractive; - int p_idversion; /* version of process identity */ - void * p_pthhash; /* pthread waitqueue hash */ + u_short p_acflag; /* Accounting flags. */ + volatile u_short p_vfs_iopolicy; /* VFS iopolicy flags. (atomic bit ops) */ + + user_addr_t p_threadstart; /* pthread start fn */ + user_addr_t p_wqthread; /* pthread workqueue fn */ + int p_pthsize; /* pthread size */ + uint32_t p_pth_tsd_offset; /* offset from pthread_t to TSD for new threads */ + user_addr_t p_stack_addr_hint; /* stack allocation hint for wq threads */ + struct workqueue *_Atomic p_wqptr; /* workq ptr */ + + struct timeval p_start; /* starting time */ + void * p_rcall; + int p_ractive; + int p_idversion; /* version of process identity */ + void * p_pthhash; /* pthread waitqueue hash */ volatile uint64_t was_throttled __attribute__((aligned(8))); /* Counter for number of throttled I/Os */ volatile uint64_t did_throttle __attribute__((aligned(8))); /* Counter for number of I/Os this proc throttled */ @@ -372,12 +372,12 @@ struct proc { unsigned int unlockpc[8]; #endif /* SIGNAL_DEBUG */ #endif /* DIAGNOSTIC */ - uint64_t p_dispatchqueue_offset; - uint64_t p_dispatchqueue_serialno_offset; - uint64_t p_return_to_kernel_offset; - uint64_t p_mach_thread_self_offset; + uint64_t p_dispatchqueue_offset; + uint64_t p_dispatchqueue_serialno_offset; + uint64_t p_return_to_kernel_offset; + uint64_t p_mach_thread_self_offset; #if VM_PRESSURE_EVENTS - struct timeval vm_pressure_last_notify_tstamp; + struct timeval vm_pressure_last_notify_tstamp; #endif #if CONFIG_MEMORYSTATUS @@ -390,94 +390,95 @@ struct proc { uint64_t p_memstat_userdata; /* user state */ uint64_t p_memstat_idledeadline; /* time at which process became clean */ uint64_t p_memstat_idle_start; /* abstime process transitions into the idle band */ - uint64_t p_memstat_idle_delta; /* abstime delta spent in idle band */ + uint64_t p_memstat_idle_delta; /* abstime delta spent in idle band */ int32_t p_memstat_memlimit; /* cached memory limit, toggles between active and inactive limits */ - int32_t p_memstat_memlimit_active; /* memory limit enforced when process is in active jetsam state */ - int32_t p_memstat_memlimit_inactive; /* memory limit enforced when process is in inactive jetsam state */ + int32_t p_memstat_memlimit_active; /* memory limit enforced when process is in active jetsam state */ + int32_t p_memstat_memlimit_inactive; /* memory limit enforced when process is in inactive jetsam state */ #if CONFIG_FREEZE uint32_t p_memstat_freeze_sharedanon_pages; /* shared pages left behind after freeze */ - uint32_t p_memstat_frozen_count; - uint32_t p_memstat_thaw_count; + uint32_t p_memstat_frozen_count; + uint32_t p_memstat_thaw_count; #endif /* CONFIG_FREEZE */ #endif /* CONFIG_MEMORYSTATUS */ /* cached proc-specific data required for corpse inspection */ - pid_t p_responsible_pid; /* pid resonsible for this process */ + pid_t p_responsible_pid; /* pid resonsible for this process */ _Atomic uint32_t p_user_faults; /* count the number of user faults generated */ struct os_reason *p_exit_reason; #if !CONFIG_EMBEDDED - uint64_t p_user_data; /* general-purpose storage for userland-provided data */ + uint64_t p_user_data; /* general-purpose storage for userland-provided data */ #endif /* !CONFIG_EMBEDDED */ }; #define PGRPID_DEAD 0xdeaddead -/* p_listflag */ -#define P_LIST_DRAIN 0x00000001 -#define P_LIST_DRAINWAIT 0x00000002 -#define P_LIST_DRAINED 0x00000004 -#define P_LIST_DEAD 0x00000008 -#define P_LIST_WAITING 0x00000010 -#define P_LIST_EXITED 0x00000040 -#define P_LIST_CHILDDRSTART 0x00000080 -#define P_LIST_CHILDDRAINED 0x00000100 -#define P_LIST_CHILDDRWAIT 0x00000200 -#define P_LIST_CHILDLKWAIT 0x00000400 -#define P_LIST_DEADPARENT 0x00000800 -#define P_LIST_PARENTREFWAIT 0x00001000 -#define P_LIST_INCREATE 0x00002000 +/* p_listflag */ +#define P_LIST_DRAIN 0x00000001 +#define P_LIST_DRAINWAIT 0x00000002 +#define P_LIST_DRAINED 0x00000004 +#define P_LIST_DEAD 0x00000008 +#define P_LIST_WAITING 0x00000010 +#define P_LIST_EXITED 0x00000040 +#define P_LIST_CHILDDRSTART 0x00000080 +#define P_LIST_CHILDDRAINED 0x00000100 +#define P_LIST_CHILDDRWAIT 0x00000200 +#define P_LIST_CHILDLKWAIT 0x00000400 +#define P_LIST_DEADPARENT 0x00000800 +#define P_LIST_PARENTREFWAIT 0x00001000 +#define P_LIST_INCREATE 0x00002000 /* 0x4000 & 0x8000 Not used */ -#define P_LIST_INHASH 0x00010000 /* process is in hash */ -#define P_LIST_INPGRP 0x00020000 /* process is in pgrp */ -#define P_LIST_PGRPTRANS 0x00040000 /* pgrp is getting replaced */ -#define P_LIST_PGRPTRWAIT 0x00080000 /* wait for pgrp replacement */ -#define P_LIST_EXITCOUNT 0x00100000 /* counted for process exit */ -#define P_LIST_REFWAIT 0x00200000 /* wait to take a ref */ - +#define P_LIST_INHASH 0x00010000 /* process is in hash */ +#define P_LIST_INPGRP 0x00020000 /* process is in pgrp */ +#define P_LIST_PGRPTRANS 0x00040000 /* pgrp is getting replaced */ +#define P_LIST_PGRPTRWAIT 0x00080000 /* wait for pgrp replacement */ +#define P_LIST_EXITCOUNT 0x00100000 /* counted for process exit */ +#define P_LIST_REFWAIT 0x00200000 /* wait to take a ref */ + /* local flags */ -#define P_LDELAYTERM 0x00000001 /* */ -#define P_LNOZOMB 0x00000002 /* */ -#define P_LTERM 0x00000004 /* */ -#define P_LEXIT 0x00000008 /* */ -#define P_LPEXIT 0x00000010 -#define P_LTRANSCOMMIT 0x00000020 /* process is committed to trans */ -#define P_LINTRANSIT 0x00000040 /* process in exec or in creation */ -#define P_LTRANSWAIT 0x00000080 /* waiting for trans to complete */ +#define P_LDELAYTERM 0x00000001 /* */ +#define P_LNOZOMB 0x00000002 /* */ +#define P_LTERM 0x00000004 /* */ +#define P_LEXIT 0x00000008 /* */ +#define P_LPEXIT 0x00000010 +#define P_LTRANSCOMMIT 0x00000020 /* process is committed to trans */ +#define P_LINTRANSIT 0x00000040 /* process in exec or in creation */ +#define P_LTRANSWAIT 0x00000080 /* waiting for trans to complete */ #define P_LVFORK 0x00000100 /* parent proc of a vfork */ #define P_LINVFORK 0x00000200 /* child proc of a vfork */ #define P_LTRACED 0x00000400 /* */ #define P_LSIGEXC 0x00000800 /* */ #define P_LNOATTACH 0x00001000 /* */ #define P_LPPWAIT 0x00002000 /* */ -#define P_LKQWDRAIN 0x00004000 -#define P_LKQWDRAINWAIT 0x00008000 -#define P_LKQWDEAD 0x00010000 -#define P_LLIMCHANGE 0x00020000 -#define P_LLIMWAIT 0x00040000 -#define P_LWAITED 0x00080000 -#define P_LINSIGNAL 0x00100000 -#define P_LRAGE_VNODES 0x00400000 -#define P_LREGISTER 0x00800000 /* thread start fns registered */ -#define P_LVMRSRCOWNER 0x01000000 /* can handle the resource ownership of */ -#define P_LTERM_DECRYPTFAIL 0x04000000 /* process terminating due to key failure to decrypt */ -#define P_LTERM_JETSAM 0x08000000 /* process is being jetsam'd */ -#define P_JETSAM_VMPAGESHORTAGE 0x00000000 /* jetsam: lowest jetsam priority proc, killed due to vm page shortage */ -#define P_JETSAM_VMTHRASHING 0x10000000 /* jetsam: lowest jetsam priority proc, killed due to vm thrashing */ -#define P_JETSAM_HIWAT 0x20000000 /* jetsam: high water mark */ -#define P_JETSAM_PID 0x30000000 /* jetsam: pid */ -#define P_JETSAM_IDLEEXIT 0x40000000 /* jetsam: idle exit */ -#define P_JETSAM_VNODE 0x50000000 /* jetsam: vnode kill */ -#define P_JETSAM_FCTHRASHING 0x60000000 /* jetsam: lowest jetsam priority proc, killed due to filecache thrashing */ -#define P_JETSAM_MASK 0x70000000 /* jetsam type mask */ +#define P_LKQWDRAIN 0x00004000 +#define P_LKQWDRAINWAIT 0x00008000 +#define P_LKQWDEAD 0x00010000 +#define P_LLIMCHANGE 0x00020000 +#define P_LLIMWAIT 0x00040000 +#define P_LWAITED 0x00080000 +#define P_LINSIGNAL 0x00100000 +#define P_LCUSTOM_STACK 0x00200000 /* process is using custom stack size */ +#define P_LRAGE_VNODES 0x00400000 +#define P_LREGISTER 0x00800000 /* thread start fns registered */ +#define P_LVMRSRCOWNER 0x01000000 /* can handle the resource ownership of */ +#define P_LTERM_DECRYPTFAIL 0x04000000 /* process terminating due to key failure to decrypt */ +#define P_LTERM_JETSAM 0x08000000 /* process is being jetsam'd */ +#define P_JETSAM_VMPAGESHORTAGE 0x00000000 /* jetsam: lowest jetsam priority proc, killed due to vm page shortage */ +#define P_JETSAM_VMTHRASHING 0x10000000 /* jetsam: lowest jetsam priority proc, killed due to vm thrashing */ +#define P_JETSAM_HIWAT 0x20000000 /* jetsam: high water mark */ +#define P_JETSAM_PID 0x30000000 /* jetsam: pid */ +#define P_JETSAM_IDLEEXIT 0x40000000 /* jetsam: idle exit */ +#define P_JETSAM_VNODE 0x50000000 /* jetsam: vnode kill */ +#define P_JETSAM_FCTHRASHING 0x60000000 /* jetsam: lowest jetsam priority proc, killed due to filecache thrashing */ +#define P_JETSAM_MASK 0x70000000 /* jetsam type mask */ /* Process control state for resource starvation */ -#define P_PCTHROTTLE 1 -#define P_PCSUSP 2 -#define P_PCKILL 3 -#define P_PCMAX 3 +#define P_PCTHROTTLE 1 +#define P_PCSUSP 2 +#define P_PCKILL 3 +#define P_PCMAX 3 /* Process control action state on resrouce starvation */ #define PROC_ACTION_MASK 0xffff0000; @@ -491,27 +492,27 @@ struct proc { #define PROC_EXITREASON_FLAGS(p) p->p_exit_reason->osr_flags /* additional process flags */ -#define P_LADVLOCK 0x01 -#define P_LXBKIDLEINPROG 0x02 +#define P_LADVLOCK 0x01 +#define P_LXBKIDLEINPROG 0x02 /* p_vfs_iopolicy flags */ -#define P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY 0x0001 -#define P_VFS_IOPOLICY_ATIME_UPDATES 0x0002 -#define P_VFS_IOPOLICY_VALID_MASK (P_VFS_IOPOLICY_ATIME_UPDATES | P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) +#define P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY 0x0001 +#define P_VFS_IOPOLICY_ATIME_UPDATES 0x0002 +#define P_VFS_IOPOLICY_VALID_MASK (P_VFS_IOPOLICY_ATIME_UPDATES | P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY) /* process creation arguments */ -#define PROC_CREATE_FORK 0 /* independent child (running) */ -#define PROC_CREATE_SPAWN 1 /* independent child (suspended) */ -#define PROC_CREATE_VFORK 2 /* child borrows context */ +#define PROC_CREATE_FORK 0 /* independent child (running) */ +#define PROC_CREATE_SPAWN 1 /* independent child (suspended) */ +#define PROC_CREATE_VFORK 2 /* child borrows context */ -/* LP64 version of extern_proc. all pointers +/* LP64 version of extern_proc. all pointers * grow when we're dealing with a 64-bit process. * WARNING - keep in sync with extern_proc * but use native alignment of 64-bit process. */ #ifdef KERNEL -#include /* user_timeval, user_itimerval */ +#include /* user_timeval, user_itimerval */ /* * This packing is required to ensure symmetry between userspace and kernelspace @@ -525,159 +526,157 @@ struct proc { struct user32_extern_proc { union { struct { - uint32_t __p_forw; /* Doubly-linked run/sleep queue. */ + uint32_t __p_forw; /* Doubly-linked run/sleep queue. */ uint32_t __p_back; } p_st1; - struct user32_timeval __p_starttime; /* process start time */ + struct user32_timeval __p_starttime; /* process start time */ } p_un; - uint32_t p_vmspace; /* Address space. */ - uint32_t p_sigacts; /* Signal actions, state (PROC ONLY). */ - int p_flag; /* P_* flags. */ - char p_stat; /* S* process status. */ - pid_t p_pid; /* Process identifier. */ - pid_t p_oppid; /* Save parent pid during ptrace. XXX */ - int p_dupfd; /* Sideways return value from fdopen. XXX */ + uint32_t p_vmspace; /* Address space. */ + uint32_t p_sigacts; /* Signal actions, state (PROC ONLY). */ + int p_flag; /* P_* flags. */ + char p_stat; /* S* process status. */ + pid_t p_pid; /* Process identifier. */ + pid_t p_oppid; /* Save parent pid during ptrace. XXX */ + int p_dupfd; /* Sideways return value from fdopen. XXX */ /* Mach related */ - uint32_t user_stack; /* where user stack was allocated */ + uint32_t user_stack; /* where user stack was allocated */ uint32_t exit_thread; /* XXX Which thread is exiting? */ - int p_debugger; /* allow to debug */ - boolean_t sigwait; /* indication to suspend */ + int p_debugger; /* allow to debug */ + boolean_t sigwait; /* indication to suspend */ /* scheduling */ - u_int p_estcpu; /* Time averaged value of p_cpticks. */ - int p_cpticks; /* Ticks of cpu time. */ - fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ - uint32_t p_wchan; /* Sleep address. */ - uint32_t p_wmesg; /* Reason for sleep. */ - u_int p_swtime; /* Time swapped in or out. */ - u_int p_slptime; /* Time since last blocked. */ - struct user32_itimerval p_realtimer; /* Alarm timer. */ - struct user32_timeval p_rtime; /* Real time. */ - u_quad_t p_uticks; /* Statclock hits in user mode. */ - u_quad_t p_sticks; /* Statclock hits in system mode. */ - u_quad_t p_iticks; /* Statclock hits processing intr. */ - int p_traceflag; /* Kernel trace points. */ - uint32_t p_tracep; /* Trace to vnode. */ - int p_siglist; /* DEPRECATED */ - uint32_t p_textvp; /* Vnode of executable. */ - int p_holdcnt; /* If non-zero, don't swap. */ - sigset_t p_sigmask; /* DEPRECATED. */ - sigset_t p_sigignore; /* Signals being ignored. */ - sigset_t p_sigcatch; /* Signals being caught by user. */ - u_char p_priority; /* Process priority. */ - u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ - char p_nice; /* Process "nice" value. */ - char p_comm[MAXCOMLEN+1]; - uint32_t p_pgrp; /* Pointer to process group. */ - uint32_t p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ - u_short p_xstat; /* Exit status for wait; also stop signal. */ - u_short p_acflag; /* Accounting flags. */ - uint32_t p_ru; /* Exit information. XXX */ + u_int p_estcpu; /* Time averaged value of p_cpticks. */ + int p_cpticks; /* Ticks of cpu time. */ + fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ + uint32_t p_wchan; /* Sleep address. */ + uint32_t p_wmesg; /* Reason for sleep. */ + u_int p_swtime; /* Time swapped in or out. */ + u_int p_slptime; /* Time since last blocked. */ + struct user32_itimerval p_realtimer; /* Alarm timer. */ + struct user32_timeval p_rtime; /* Real time. */ + u_quad_t p_uticks; /* Statclock hits in user mode. */ + u_quad_t p_sticks; /* Statclock hits in system mode. */ + u_quad_t p_iticks; /* Statclock hits processing intr. */ + int p_traceflag; /* Kernel trace points. */ + uint32_t p_tracep; /* Trace to vnode. */ + int p_siglist; /* DEPRECATED */ + uint32_t p_textvp; /* Vnode of executable. */ + int p_holdcnt; /* If non-zero, don't swap. */ + sigset_t p_sigmask; /* DEPRECATED. */ + sigset_t p_sigignore; /* Signals being ignored. */ + sigset_t p_sigcatch; /* Signals being caught by user. */ + u_char p_priority; /* Process priority. */ + u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ + char p_nice; /* Process "nice" value. */ + char p_comm[MAXCOMLEN + 1]; + uint32_t p_pgrp; /* Pointer to process group. */ + uint32_t p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ + u_short p_xstat; /* Exit status for wait; also stop signal. */ + u_short p_acflag; /* Accounting flags. */ + uint32_t p_ru; /* Exit information. XXX */ }; #pragma pack() struct user64_extern_proc { union { struct { - user_addr_t __p_forw; /* Doubly-linked run/sleep queue. */ + user_addr_t __p_forw; /* Doubly-linked run/sleep queue. */ user_addr_t __p_back; } p_st1; - struct user64_timeval __p_starttime; /* process start time */ + struct user64_timeval __p_starttime; /* process start time */ } p_un; - user_addr_t p_vmspace; /* Address space. */ - user_addr_t p_sigacts; /* Signal actions, state (PROC ONLY). */ - int p_flag; /* P_* flags. */ - char p_stat; /* S* process status. */ - pid_t p_pid; /* Process identifier. */ - pid_t p_oppid; /* Save parent pid during ptrace. XXX */ - int p_dupfd; /* Sideways return value from fdopen. XXX */ + user_addr_t p_vmspace; /* Address space. */ + user_addr_t p_sigacts; /* Signal actions, state (PROC ONLY). */ + int p_flag; /* P_* flags. */ + char p_stat; /* S* process status. */ + pid_t p_pid; /* Process identifier. */ + pid_t p_oppid; /* Save parent pid during ptrace. XXX */ + int p_dupfd; /* Sideways return value from fdopen. XXX */ /* Mach related */ - user_addr_t user_stack __attribute((aligned(8))); /* where user stack was allocated */ + user_addr_t user_stack __attribute((aligned(8))); /* where user stack was allocated */ user_addr_t exit_thread; /* XXX Which thread is exiting? */ - int p_debugger; /* allow to debug */ - boolean_t sigwait; /* indication to suspend */ + int p_debugger; /* allow to debug */ + boolean_t sigwait; /* indication to suspend */ /* scheduling */ - u_int p_estcpu; /* Time averaged value of p_cpticks. */ - int p_cpticks; /* Ticks of cpu time. */ - fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ - user_addr_t p_wchan __attribute((aligned(8))); /* Sleep address. */ - user_addr_t p_wmesg; /* Reason for sleep. */ - u_int p_swtime; /* Time swapped in or out. */ - u_int p_slptime; /* Time since last blocked. */ - struct user64_itimerval p_realtimer; /* Alarm timer. */ - struct user64_timeval p_rtime; /* Real time. */ - u_quad_t p_uticks; /* Statclock hits in user mode. */ - u_quad_t p_sticks; /* Statclock hits in system mode. */ - u_quad_t p_iticks; /* Statclock hits processing intr. */ - int p_traceflag; /* Kernel trace points. */ - user_addr_t p_tracep __attribute((aligned(8))); /* Trace to vnode. */ - int p_siglist; /* DEPRECATED */ - user_addr_t p_textvp __attribute((aligned(8))); /* Vnode of executable. */ - int p_holdcnt; /* If non-zero, don't swap. */ - sigset_t p_sigmask; /* DEPRECATED. */ - sigset_t p_sigignore; /* Signals being ignored. */ - sigset_t p_sigcatch; /* Signals being caught by user. */ - u_char p_priority; /* Process priority. */ - u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ - char p_nice; /* Process "nice" value. */ - char p_comm[MAXCOMLEN+1]; - user_addr_t p_pgrp __attribute((aligned(8))); /* Pointer to process group. */ - user_addr_t p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ - u_short p_xstat; /* Exit status for wait; also stop signal. */ - u_short p_acflag; /* Accounting flags. */ - user_addr_t p_ru __attribute((aligned(8))); /* Exit information. XXX */ + u_int p_estcpu; /* Time averaged value of p_cpticks. */ + int p_cpticks; /* Ticks of cpu time. */ + fixpt_t p_pctcpu; /* %cpu for this process during p_swtime */ + user_addr_t p_wchan __attribute((aligned(8))); /* Sleep address. */ + user_addr_t p_wmesg; /* Reason for sleep. */ + u_int p_swtime; /* Time swapped in or out. */ + u_int p_slptime; /* Time since last blocked. */ + struct user64_itimerval p_realtimer; /* Alarm timer. */ + struct user64_timeval p_rtime; /* Real time. */ + u_quad_t p_uticks; /* Statclock hits in user mode. */ + u_quad_t p_sticks; /* Statclock hits in system mode. */ + u_quad_t p_iticks; /* Statclock hits processing intr. */ + int p_traceflag; /* Kernel trace points. */ + user_addr_t p_tracep __attribute((aligned(8))); /* Trace to vnode. */ + int p_siglist; /* DEPRECATED */ + user_addr_t p_textvp __attribute((aligned(8))); /* Vnode of executable. */ + int p_holdcnt; /* If non-zero, don't swap. */ + sigset_t p_sigmask; /* DEPRECATED. */ + sigset_t p_sigignore; /* Signals being ignored. */ + sigset_t p_sigcatch; /* Signals being caught by user. */ + u_char p_priority; /* Process priority. */ + u_char p_usrpri; /* User-priority based on p_cpu and p_nice. */ + char p_nice; /* Process "nice" value. */ + char p_comm[MAXCOMLEN + 1]; + user_addr_t p_pgrp __attribute((aligned(8))); /* Pointer to process group. */ + user_addr_t p_addr; /* Kernel virtual addr of u-area (PROC ONLY). */ + u_short p_xstat; /* Exit status for wait; also stop signal. */ + u_short p_acflag; /* Accounting flags. */ + user_addr_t p_ru __attribute((aligned(8))); /* Exit information. XXX */ }; -#endif /* KERNEL */ +#endif /* KERNEL */ /* * We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t, * as it is used to represent "no process group". */ -extern int nprocs, maxproc; /* Current and max number of procs. */ -extern int maxprocperuid; /* Current number of procs per uid */ -extern int hard_maxproc; /* hard limit */ +extern int nprocs, maxproc; /* Current and max number of procs. */ +extern int maxprocperuid; /* Current number of procs per uid */ +extern int hard_maxproc; /* hard limit */ extern unsigned int proc_shutdown_exitcount; -#define PID_MAX 99999 -#define NO_PID 100000 +#define PID_MAX 99999 +#define NO_PID 100000 extern lck_mtx_t * proc_list_mlock; extern lck_mtx_t * proc_klist_mlock; -#define BSD_SIMUL_EXECS 33 /* 32 , allow for rounding */ -#define BSD_PAGEABLE_SIZE_PER_EXEC (NCARGS + PAGE_SIZE + PAGE_SIZE) /* page for apple vars, page for executable header */ +#define BSD_SIMUL_EXECS 33 /* 32 , allow for rounding */ +#define BSD_PAGEABLE_SIZE_PER_EXEC (NCARGS + PAGE_SIZE + PAGE_SIZE) /* page for apple vars, page for executable header */ extern int execargs_cache_size; extern int execargs_free_count; extern vm_offset_t * execargs_cache; -#define SESS_LEADER(p, sessp) ((sessp)->s_leader == (p)) +#define SESS_LEADER(p, sessp) ((sessp)->s_leader == (p)) -#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) -extern LIST_HEAD(pidhashhead, proc) *pidhashtbl; +#define PIDHASH(pid) (&pidhashtbl[(pid) & pidhash]) +extern LIST_HEAD(pidhashhead, proc) * pidhashtbl; extern u_long pidhash; -#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) -extern LIST_HEAD(pgrphashhead, pgrp) *pgrphashtbl; +#define PGRPHASH(pgid) (&pgrphashtbl[(pgid) & pgrphash]) +extern LIST_HEAD(pgrphashhead, pgrp) * pgrphashtbl; extern u_long pgrphash; -#define SESSHASH(sessid) (&sesshashtbl[(sessid) & sesshash]) -extern LIST_HEAD(sesshashhead, session) *sesshashtbl; +#define SESSHASH(sessid) (&sesshashtbl[(sessid) & sesshash]) +extern LIST_HEAD(sesshashhead, session) * sesshashtbl; extern u_long sesshash; extern lck_grp_t * proc_lck_grp; extern lck_grp_t * proc_fdmlock_grp; extern lck_grp_t * proc_kqhashlock_grp; extern lck_grp_t * proc_knhashlock_grp; -#if CONFIG_FINE_LOCK_GROUPS extern lck_grp_t * proc_mlock_grp; extern lck_grp_t * proc_ucred_mlock_grp; extern lck_grp_t * proc_slock_grp; -#endif extern lck_grp_attr_t * proc_lck_grp_attr; extern lck_attr_t * proc_lck_attr; LIST_HEAD(proclist, proc); -extern struct proclist allproc; /* List of all processes. */ -extern struct proclist zombproc; /* List of zombie processes. */ +extern struct proclist allproc; /* List of all processes. */ +extern struct proclist zombproc; /* List of zombie processes. */ extern struct proc *initproc; -extern void procinit(void); +extern void procinit(void); extern void proc_lock(struct proc *); extern void proc_unlock(struct proc *); extern void proc_spinlock(struct proc *); @@ -693,31 +692,31 @@ extern void proc_fdlock_assert(proc_t p, int assertflags); extern void proc_ucred_lock(struct proc *); extern void proc_ucred_unlock(struct proc *); __private_extern__ int proc_core_name(const char *name, uid_t uid, pid_t pid, - char *cr_name, size_t cr_name_len); + char *cr_name, size_t cr_name_len); extern int isinferior(struct proc *, struct proc *); -__private_extern__ struct proc *pzfind(pid_t); /* Find zombie by id. */ -__private_extern__ struct proc *proc_find_zombref(pid_t); /* Find zombie by id. */ -__private_extern__ void proc_drop_zombref(struct proc * p); /* Find zombie by id. */ - - -extern int chgproccnt(uid_t uid, int diff); -extern void pinsertchild(struct proc *parent, struct proc *child); -extern int enterpgrp(struct proc *p, pid_t pgid, int mksess); -extern void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); -extern int inferior(struct proc *p); -extern int leavepgrp(struct proc *p); -extern void resetpriority(struct proc *); -extern void setrunnable(struct proc *); -extern void setrunqueue(struct proc *); -extern int sleep(void *chan, int pri); -extern int tsleep0(void *chan, int pri, const char *wmesg, int timo, int (*continuation)(int)); -extern int tsleep1(void *chan, int pri, const char *wmesg, u_int64_t abstime, int (*continuation)(int)); -extern int msleep0(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, int timo, int (*continuation)(int)); -extern void vfork_return(struct proc *child, int32_t *retval, int rval); -extern int exit1(struct proc *, int, int *); -extern int exit1_internal(struct proc *, int, int *, boolean_t, boolean_t, int); -extern int exit_with_reason(struct proc *, int, int *, boolean_t, boolean_t, int, struct os_reason *); -extern int fork1(proc_t, thread_t *, int, coalition_t *); +__private_extern__ struct proc *pzfind(pid_t); /* Find zombie by id. */ +__private_extern__ struct proc *proc_find_zombref(pid_t); /* Find zombie by id. */ +__private_extern__ void proc_drop_zombref(struct proc * p); /* Find zombie by id. */ + + +extern int chgproccnt(uid_t uid, int diff); +extern void pinsertchild(struct proc *parent, struct proc *child); +extern int enterpgrp(struct proc *p, pid_t pgid, int mksess); +extern void fixjobc(struct proc *p, struct pgrp *pgrp, int entering); +extern int inferior(struct proc *p); +extern int leavepgrp(struct proc *p); +extern void resetpriority(struct proc *); +extern void setrunnable(struct proc *); +extern void setrunqueue(struct proc *); +extern int sleep(void *chan, int pri); +extern int tsleep0(void *chan, int pri, const char *wmesg, int timo, int (*continuation)(int)); +extern int tsleep1(void *chan, int pri, const char *wmesg, u_int64_t abstime, int (*continuation)(int)); +extern int msleep0(void *chan, lck_mtx_t *mtx, int pri, const char *wmesg, int timo, int (*continuation)(int)); +extern void vfork_return(struct proc *child, int32_t *retval, int rval); +extern int exit1(struct proc *, int, int *); +extern int exit1_internal(struct proc *, int, int *, boolean_t, boolean_t, int); +extern int exit_with_reason(struct proc *, int, int *, boolean_t, boolean_t, int, struct os_reason *); +extern int fork1(proc_t, thread_t *, int, coalition_t *); extern void vfork_exit_internal(struct proc *p, int rv, int forced); extern void proc_reparentlocked(struct proc *child, struct proc * newparent, int cansignal, int locked); @@ -743,7 +742,7 @@ extern void pgrp_unlock(struct pgrp * pgrp); extern void session_lock(struct session * sess); extern void session_unlock(struct session * sess); extern struct session * pgrp_session(struct pgrp * pgrp); -extern void session_rele(struct session *sess); +extern void session_rele(struct session *sess); extern int isbackground(proc_t p, struct tty *tp); extern proc_t proc_parent(proc_t); extern proc_t proc_parentholdref(proc_t); @@ -855,4 +854,4 @@ pid_t dtrace_proc_selfpid(void); pid_t dtrace_proc_selfppid(void); uid_t dtrace_proc_selfruid(void); -#endif /* !_SYS_PROC_INTERNAL_H_ */ +#endif /* !_SYS_PROC_INTERNAL_H_ */ diff --git a/bsd/sys/proc_uuid_policy.h b/bsd/sys/proc_uuid_policy.h index ca9a37a0b..b51feb1f2 100644 --- a/bsd/sys/proc_uuid_policy.h +++ b/bsd/sys/proc_uuid_policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -47,18 +47,18 @@ __BEGIN_DECLS * have a specified behavior for. */ -#define PROC_UUID_POLICY_OPERATION_CLEAR 0x00000000 -#define PROC_UUID_POLICY_OPERATION_ADD 0x00000001 -#define PROC_UUID_POLICY_OPERATION_REMOVE 0x00000002 +#define PROC_UUID_POLICY_OPERATION_CLEAR 0x00000000 +#define PROC_UUID_POLICY_OPERATION_ADD 0x00000001 +#define PROC_UUID_POLICY_OPERATION_REMOVE 0x00000002 /* The namespace of flags are managed by in-kernel clients */ -#define PROC_UUID_POLICY_FLAGS_NONE 0x00000000 -#define PROC_UUID_NO_CELLULAR 0x00000001 -#define PROC_UUID_NECP_APP_POLICY 0x00000002 -#define PROC_UUID_ALT_DYLD_POLICY 0x00000004 +#define PROC_UUID_POLICY_FLAGS_NONE 0x00000000 +#define PROC_UUID_NO_CELLULAR 0x00000001 +#define PROC_UUID_NECP_APP_POLICY 0x00000002 +#define PROC_UUID_ALT_DYLD_POLICY 0x00000004 /* To be removed, replaced by PROC_UUID_NECP_APP_POLICY */ -#define PROC_UUID_FLOW_DIVERT 0x00000002 +#define PROC_UUID_FLOW_DIVERT 0x00000002 #ifdef BSD_KERNEL_PRIVATE /* diff --git a/bsd/sys/process_policy.h b/bsd/sys/process_policy.h index ca679bf2f..4c2e3ce6b 100644 --- a/bsd/sys/process_policy.h +++ b/bsd/sys/process_policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,111 +41,111 @@ __BEGIN_DECLS /* defns of scope */ -#define PROC_POLICY_SCOPE_PROCESS 1 /* the policy setting is for process wide effect */ -#define PROC_POLICY_SCOPE_THREAD 2 /* the policy setting is for thread inside a proc */ +#define PROC_POLICY_SCOPE_PROCESS 1 /* the policy setting is for process wide effect */ +#define PROC_POLICY_SCOPE_THREAD 2 /* the policy setting is for thread inside a proc */ /* defns of actions with no attributes */ -#define PROC_POLICY_ACTION_APPLY 1 /* enforce the set policy */ -#define PROC_POLICY_ACTION_RESTORE 2 /* revert the applied action back */ -#define PROC_POLICY_ACTION_DENYINHERIT 3 /* set for no inheritence of the specified policy */ -#define PROC_POLICY_ACTION_DENYSELFSET 4 /* set for the process to set its own policy */ -#define PROC_POLICY_ACTION_ENABLE 5 /* enable policy and its actions */ -#define PROC_POLICY_ACTION_DISABLE 6 /* disable policy and its actions, also clears any actions that have already happened */ +#define PROC_POLICY_ACTION_APPLY 1 /* enforce the set policy */ +#define PROC_POLICY_ACTION_RESTORE 2 /* revert the applied action back */ +#define PROC_POLICY_ACTION_DENYINHERIT 3 /* set for no inheritence of the specified policy */ +#define PROC_POLICY_ACTION_DENYSELFSET 4 /* set for the process to set its own policy */ +#define PROC_POLICY_ACTION_ENABLE 5 /* enable policy and its actions */ +#define PROC_POLICY_ACTION_DISABLE 6 /* disable policy and its actions, also clears any actions that have already happened */ /* defns of actions with attributes */ -#define PROC_POLICY_ACTION_SET 10 /* set the policy attributes */ -#define PROC_POLICY_ACTION_GET 11 /* get the policy attributes */ -#define PROC_POLICY_ACTION_ADD 12 /* add a policy attribute */ -#define PROC_POLICY_ACTION_REMOVE 13 /* remove a policy attribute */ +#define PROC_POLICY_ACTION_SET 10 /* set the policy attributes */ +#define PROC_POLICY_ACTION_GET 11 /* get the policy attributes */ +#define PROC_POLICY_ACTION_ADD 12 /* add a policy attribute */ +#define PROC_POLICY_ACTION_REMOVE 13 /* remove a policy attribute */ #define PROC_POLICY_ACTION_HOLD 14 /* hold an importance boost assertion */ #define PROC_POLICY_ACTION_DROP 15 /* drop an importance boost assertion */ /* policies */ #define PROC_POLICY NONE 0 -#define PROC_POLICY_BACKGROUND 1 /* darwin background policy */ -#define PROC_POLICY_HARDWARE_ACCESS 2 /* access to various hardware */ -#define PROC_POLICY_RESOURCE_STARVATION 3 /* behavior on resource starvation */ -#define PROC_POLICY_RESOURCE_USAGE 4 /* behavior on resource consumption */ +#define PROC_POLICY_BACKGROUND 1 /* darwin background policy */ +#define PROC_POLICY_HARDWARE_ACCESS 2 /* access to various hardware */ +#define PROC_POLICY_RESOURCE_STARVATION 3 /* behavior on resource starvation */ +#define PROC_POLICY_RESOURCE_USAGE 4 /* behavior on resource consumption */ #if CONFIG_EMBEDDED || TARGET_OS_EMBEDDED -#define PROC_POLICY_APP_LIFECYCLE 5 /* app life cycle management */ +#define PROC_POLICY_APP_LIFECYCLE 5 /* app life cycle management */ #else /* CONFIG_EMBEDDED */ -#define PROC_POLICY_RESERVED 5 /* behavior on resource consumption */ +#define PROC_POLICY_RESERVED 5 /* behavior on resource consumption */ #endif /* CONFIG_EMBEDDED */ -#define PROC_POLICY_APPTYPE 6 /* behavior on resource consumption */ +#define PROC_POLICY_APPTYPE 6 /* behavior on resource consumption */ #define PROC_POLICY_BOOST 7 /* importance boost/drop */ /* sub policies for background policy */ -#define PROC_POLICY_BG_NONE 0 /* none */ -#define PROC_POLICY_BG_LOWCPUPRI 1 /* Low cpu priority */ -#define PROC_POLICY_BG_DISKTHROTTLE 2 /* disk accesses throttled */ -#define PROC_POLICY_BG_NETTHROTTLE 4 /* network accesses throttled */ -#define PROC_POLICY_BG_GPUDENY 8 /* no access to GPU */ +#define PROC_POLICY_BG_NONE 0 /* none */ +#define PROC_POLICY_BG_LOWCPUPRI 1 /* Low cpu priority */ +#define PROC_POLICY_BG_DISKTHROTTLE 2 /* disk accesses throttled */ +#define PROC_POLICY_BG_NETTHROTTLE 4 /* network accesses throttled */ +#define PROC_POLICY_BG_GPUDENY 8 /* no access to GPU */ #if CONFIG_EMBEDDED || TARGET_OS_EMBEDDED #define PROC_POLICY_BG_ALL 0x0F #else /* CONFIG_EMBEDDED */ #define PROC_POLICY_BG_ALL 0x07 #endif /* CONFIG_EMBEDDED */ -#define PROC_POLICY_BG_DEFAULT PROC_POLICY_BG_ALL +#define PROC_POLICY_BG_DEFAULT PROC_POLICY_BG_ALL /* sub policies for hardware */ -#define PROC_POLICY_HWACCESS_NONE 0 -#define PROC_POLICY_HWACCESS_DISK 1 /* disk access */ -#define PROC_POLICY_HWACCESS_GPU 2 /* GPU access */ -#define PROC_POLICY_HWACCESS_NETWORK 3 /* network access */ -#define PROC_POLICY_HWACCESS_CPU 4 /* cpu access */ +#define PROC_POLICY_HWACCESS_NONE 0 +#define PROC_POLICY_HWACCESS_DISK 1 /* disk access */ +#define PROC_POLICY_HWACCESS_GPU 2 /* GPU access */ +#define PROC_POLICY_HWACCESS_NETWORK 3 /* network access */ +#define PROC_POLICY_HWACCESS_CPU 4 /* cpu access */ /* attribute values for disk hardware access, bit different as it should reflect IOPOL_XXX */ -#define PROC_POLICY_DISKACC_NONE 0 -#define PROC_POLICY_DISKACC_NORMAL 1 /* normal access to the disk */ -#define PROC_POLICY_DISKACC_FULLACCESS 1 /* normal access to the disk */ -#define PROC_POLICY_DISKACC_PASSIVE 2 /* treat the I/Os as passive */ -#define PROC_POLICY_DISKACC_THROTTLE 3 /* throttle the disk IOs */ -#define PROC_POLICY_DISKACC_DEFAULT PROC_POLICY_DISKACC_FULLACCESS +#define PROC_POLICY_DISKACC_NONE 0 +#define PROC_POLICY_DISKACC_NORMAL 1 /* normal access to the disk */ +#define PROC_POLICY_DISKACC_FULLACCESS 1 /* normal access to the disk */ +#define PROC_POLICY_DISKACC_PASSIVE 2 /* treat the I/Os as passive */ +#define PROC_POLICY_DISKACC_THROTTLE 3 /* throttle the disk IOs */ +#define PROC_POLICY_DISKACC_DEFAULT PROC_POLICY_DISKACC_FULLACCESS /* attribute values for GPU hardware access */ -#define PROC_POLICY_GPUACC_NONE 0 -#define PROC_POLICY_GPUACC_FULLACCESS 0 /* complete access to the GPU */ -#define PROC_POLICY_GPUACC_DENYACCESS 1 /* deny any access to the GPU */ -#define PROC_POLICY_GPUACC_DEFAULT PROC_POLICY_GPUACC_FULLACCESS /* default is complete access */ +#define PROC_POLICY_GPUACC_NONE 0 +#define PROC_POLICY_GPUACC_FULLACCESS 0 /* complete access to the GPU */ +#define PROC_POLICY_GPUACC_DENYACCESS 1 /* deny any access to the GPU */ +#define PROC_POLICY_GPUACC_DEFAULT PROC_POLICY_GPUACC_FULLACCESS /* default is complete access */ /* atrribute values for network hardware access */ -#define PROC_POLICY_NETACC_NONE 0 -#define PROC_POLICY_NETACC_FULLACCESS 0 /* complete access to the network */ -#define PROC_POLICY_NETACC_THROTTLE 1 /* throttle access to network */ -#define PROC_POLICY_NETACC_DEFAULT PROC_POLICY_NETACC_FULLACCESS /* default is complete access */ +#define PROC_POLICY_NETACC_NONE 0 +#define PROC_POLICY_NETACC_FULLACCESS 0 /* complete access to the network */ +#define PROC_POLICY_NETACC_THROTTLE 1 /* throttle access to network */ +#define PROC_POLICY_NETACC_DEFAULT PROC_POLICY_NETACC_FULLACCESS /* default is complete access */ /* atrribute values for network hardware access */ -#define PROC_POLICY_CPUACC_NONE 0 -#define PROC_POLICY_CPUACC_FULLACCESS 0 /* access to all avialable cpus */ -#define PROC_POLICY_CPUACC_ONE 1 /* access to only one available cpu */ -#define PROC_POLICY_CPUACC_LLCACHE 2 /* access to only one last level cache */ -#define PROC_POLICY_CPUACC_DEFAULT PROC_POLICY_CPUACC_FULLACCESS /* default is access to all cpus */ +#define PROC_POLICY_CPUACC_NONE 0 +#define PROC_POLICY_CPUACC_FULLACCESS 0 /* access to all avialable cpus */ +#define PROC_POLICY_CPUACC_ONE 1 /* access to only one available cpu */ +#define PROC_POLICY_CPUACC_LLCACHE 2 /* access to only one last level cache */ +#define PROC_POLICY_CPUACC_DEFAULT PROC_POLICY_CPUACC_FULLACCESS /* default is access to all cpus */ /* System Resource management (ie usage and starvation related) definitions */ /* sub policies for resource starvation */ -#define PROC_POLICY_RS_NONE 0 -#define PROC_POLICY_RS_VIRTUALMEM 1 /* virtual memory starvation */ +#define PROC_POLICY_RS_NONE 0 +#define PROC_POLICY_RS_VIRTUALMEM 1 /* virtual memory starvation */ /* sub policies for resource usage */ -#define PROC_POLICY_RUSAGE_NONE 0 -#define PROC_POLICY_RUSAGE_WIREDMEM 1 /* wired memory usages */ -#define PROC_POLICY_RUSAGE_VIRTMEM 2 /* virtual memory usage */ -#define PROC_POLICY_RUSAGE_CPU 3 /* amount of cpu usage */ -#define PROC_POLICY_RUSAGE_DISK 4 /* amount of disk usage */ -#define PROC_POLICY_RUSAGE_NETWORK 5 /* amount of network usage */ -#define PROC_POLICY_RUSAGE_POWER 6 /* amount of power/battery consumption */ +#define PROC_POLICY_RUSAGE_NONE 0 +#define PROC_POLICY_RUSAGE_WIREDMEM 1 /* wired memory usages */ +#define PROC_POLICY_RUSAGE_VIRTMEM 2 /* virtual memory usage */ +#define PROC_POLICY_RUSAGE_CPU 3 /* amount of cpu usage */ +#define PROC_POLICY_RUSAGE_DISK 4 /* amount of disk usage */ +#define PROC_POLICY_RUSAGE_NETWORK 5 /* amount of network usage */ +#define PROC_POLICY_RUSAGE_POWER 6 /* amount of power/battery consumption */ /* attribute values for the resource usage and low resource - MUST match corresponding task definitions */ -#define PROC_POLICY_RSRCACT_NONE 0 -#define PROC_POLICY_RSRCACT_THROTTLE 1 /* throttle on resource condition */ -#define PROC_POLICY_RSRCACT_SUSPEND 2 /* suspend on resource condition */ -#define PROC_POLICY_RSRCACT_TERMINATE 3 /* kill on resource condition */ -#define PROC_POLICY_RSRCACT_NOTIFY_KQ 4 /* send kqueue notification */ -#define PROC_POLICY_RSRCACT_NOTIFY_EXC 5 /* send exception */ +#define PROC_POLICY_RSRCACT_NONE 0 +#define PROC_POLICY_RSRCACT_THROTTLE 1 /* throttle on resource condition */ +#define PROC_POLICY_RSRCACT_SUSPEND 2 /* suspend on resource condition */ +#define PROC_POLICY_RSRCACT_TERMINATE 3 /* kill on resource condition */ +#define PROC_POLICY_RSRCACT_NOTIFY_KQ 4 /* send kqueue notification */ +#define PROC_POLICY_RSRCACT_NOTIFY_EXC 5 /* send exception */ -#define PROC_POLICY_CPUMON_DISABLE 0xFF /* Disable CPU usage monitor */ -#define PROC_POLICY_CPUMON_DEFAULTS 0xFE /* Set default CPU usage monitor params */ +#define PROC_POLICY_CPUMON_DISABLE 0xFF /* Disable CPU usage monitor */ +#define PROC_POLICY_CPUMON_DEFAULTS 0xFE /* Set default CPU usage monitor params */ /* sub policies for importance boost/drop */ #define PROC_POLICY_IMP_IMPORTANT 1 /* Important-level boost */ @@ -153,35 +153,35 @@ __BEGIN_DECLS #define PROC_POLICY_IMP_DONATION 3 /* Mark a task as an importance source */ typedef struct proc_policy_attribute { - uint32_t ppattr_attribute; /* the policy attribute to be modified or returned */ - uint32_t ppattr_resv; /* pad field */ - uint64_t ppattr_value1; /* 64bit policy specific attribute */ - uint64_t ppattr_value2; /* 64bit policy specific attribute */ - uint64_t ppattr_value3; /* 64bit policy specific attribute */ - uint64_t ppattr_resv1[4]; /* reserved for future use */ + uint32_t ppattr_attribute; /* the policy attribute to be modified or returned */ + uint32_t ppattr_resv; /* pad field */ + uint64_t ppattr_value1; /* 64bit policy specific attribute */ + uint64_t ppattr_value2; /* 64bit policy specific attribute */ + uint64_t ppattr_value3; /* 64bit policy specific attribute */ + uint64_t ppattr_resv1[4]; /* reserved for future use */ } proc_policy_attribute_t; typedef struct proc_policy_cpuusage_attr { - uint32_t ppattr_cpu_attr ; /* specified action as in PROC_POLICY_RSRCACT_xx */ - uint32_t ppattr_cpu_percentage; /* percentage of interval */ - uint64_t ppattr_cpu_attr_interval; /* 64bit interval in nsecs */ - uint64_t ppattr_cpu_attr_deadline; /* 64bit deadline in nsecs */ + uint32_t ppattr_cpu_attr; /* specified action as in PROC_POLICY_RSRCACT_xx */ + uint32_t ppattr_cpu_percentage; /* percentage of interval */ + uint64_t ppattr_cpu_attr_interval; /* 64bit interval in nsecs */ + uint64_t ppattr_cpu_attr_deadline; /* 64bit deadline in nsecs */ } proc_policy_cpuusage_attr_t; #if CONFIG_EMBEDDED || TARGET_OS_EMBEDDED /* sub policies for app lifecycle management */ -#define PROC_POLICY_APPLIFE_NONE 0 /* does nothing.. */ -#define PROC_POLICY_APPLIFE_STATE 1 /* sets the app to various lifecycle states */ -#define PROC_POLICY_APPLIFE_DEVSTATUS 2 /* notes the device in inactive or short/long term */ -#define PROC_POLICY_APPLIFE_PIDBIND 3 /* a thread is to be bound to another processes app state */ +#define PROC_POLICY_APPLIFE_NONE 0 /* does nothing.. */ +#define PROC_POLICY_APPLIFE_STATE 1 /* sets the app to various lifecycle states */ +#define PROC_POLICY_APPLIFE_DEVSTATUS 2 /* notes the device in inactive or short/long term */ +#define PROC_POLICY_APPLIFE_PIDBIND 3 /* a thread is to be bound to another processes app state */ #endif /* CONFIG_EMBEDDED */ /* sub policies for PROC_POLICY_APPTYPE */ -#define PROC_POLICY_APPTYPE_NONE 0 /* does nothing.. */ -#define PROC_POLICY_APPTYPE_MODIFY 1 /* sets the app to various lifecycle states */ +#define PROC_POLICY_APPTYPE_NONE 0 /* does nothing.. */ +#define PROC_POLICY_APPTYPE_MODIFY 1 /* sets the app to various lifecycle states */ #if CONFIG_EMBEDDED || TARGET_OS_EMBEDDED -#define PROC_POLICY_APPTYPE_THREADTHR 2 /* notes the device in inactive or short/long term */ +#define PROC_POLICY_APPTYPE_THREADTHR 2 /* notes the device in inactive or short/long term */ #endif /* CONFIG_EMBEDDED */ /* exported apptypes for PROC_POLICY_APPTYPE */ diff --git a/bsd/sys/protosw.h b/bsd/sys/protosw.h index 9a9311ee6..0beec9bc5 100644 --- a/bsd/sys/protosw.h +++ b/bsd/sys/protosw.h @@ -64,13 +64,13 @@ */ #ifndef _SYS_PROTOSW_H_ -#define _SYS_PROTOSW_H_ +#define _SYS_PROTOSW_H_ #include #include /* XXX: this will go away */ -#define PR_SLOWHZ 2 /* 2 slow timeouts per second */ +#define PR_SLOWHZ 2 /* 2 slow timeouts per second */ /* * The arguments to the ctlinput routine are @@ -78,32 +78,32 @@ * where cmd is one of the commands below, sa is a pointer to a sockaddr, * and arg is a `void *' argument used within a protocol family. */ -#define PRC_IFDOWN 0 /* interface transition */ -#define PRC_ROUTEDEAD 1 /* select new route if possible ??? */ -#define PRC_IFUP 2 /* interface has come back up */ -#define PRC_QUENCH2 3 /* DEC congestion bit says slow down */ -#define PRC_QUENCH 4 /* some one said to slow down */ -#define PRC_MSGSIZE 5 /* message size forced drop */ -#define PRC_HOSTDEAD 6 /* host appears to be down */ -#define PRC_HOSTUNREACH 7 /* deprecated (use PRC_UNREACH_HOST) */ -#define PRC_UNREACH_NET 8 /* no route to network */ -#define PRC_UNREACH_HOST 9 /* no route to host */ -#define PRC_UNREACH_PROTOCOL 10 /* dst says bad protocol */ -#define PRC_UNREACH_PORT 11 /* bad port # */ +#define PRC_IFDOWN 0 /* interface transition */ +#define PRC_ROUTEDEAD 1 /* select new route if possible ??? */ +#define PRC_IFUP 2 /* interface has come back up */ +#define PRC_QUENCH2 3 /* DEC congestion bit says slow down */ +#define PRC_QUENCH 4 /* some one said to slow down */ +#define PRC_MSGSIZE 5 /* message size forced drop */ +#define PRC_HOSTDEAD 6 /* host appears to be down */ +#define PRC_HOSTUNREACH 7 /* deprecated (use PRC_UNREACH_HOST) */ +#define PRC_UNREACH_NET 8 /* no route to network */ +#define PRC_UNREACH_HOST 9 /* no route to host */ +#define PRC_UNREACH_PROTOCOL 10 /* dst says bad protocol */ +#define PRC_UNREACH_PORT 11 /* bad port # */ /* was PRC_UNREACH_NEEDFRAG 12 (use PRC_MSGSIZE) */ -#define PRC_UNREACH_SRCFAIL 13 /* source route failed */ -#define PRC_REDIRECT_NET 14 /* net routing redirect */ -#define PRC_REDIRECT_HOST 15 /* host routing redirect */ -#define PRC_REDIRECT_TOSNET 16 /* redirect for type of service & net */ -#define PRC_REDIRECT_TOSHOST 17 /* redirect for tos & host */ -#define PRC_TIMXCEED_INTRANS 18 /* packet lifetime expired in transit */ -#define PRC_TIMXCEED_REASS 19 /* lifetime expired on reass q */ -#define PRC_PARAMPROB 20 /* header incorrect */ -#define PRC_UNREACH_ADMIN_PROHIB 21 /* packet administrativly prohibited */ +#define PRC_UNREACH_SRCFAIL 13 /* source route failed */ +#define PRC_REDIRECT_NET 14 /* net routing redirect */ +#define PRC_REDIRECT_HOST 15 /* host routing redirect */ +#define PRC_REDIRECT_TOSNET 16 /* redirect for type of service & net */ +#define PRC_REDIRECT_TOSHOST 17 /* redirect for tos & host */ +#define PRC_TIMXCEED_INTRANS 18 /* packet lifetime expired in transit */ +#define PRC_TIMXCEED_REASS 19 /* lifetime expired on reass q */ +#define PRC_PARAMPROB 20 /* header incorrect */ +#define PRC_UNREACH_ADMIN_PROHIB 21 /* packet administrativly prohibited */ -#define PRC_NCMDS 22 +#define PRC_NCMDS 22 -#define PRC_IS_REDIRECT(cmd) \ +#define PRC_IS_REDIRECT(cmd) \ ((cmd) >= PRC_REDIRECT_NET && (cmd) <= PRC_REDIRECT_TOSHOST) #ifdef BSD_KERNEL_PRIVATE @@ -145,55 +145,55 @@ struct protosw_old { #else struct protosw { #endif /* !XNU_KERNEL_PRIVATE */ - short pr_type; /* socket type used for */ - struct domain *pr_domain; /* domain protocol a member of */ - short pr_protocol; /* protocol number */ - unsigned int pr_flags; /* see below */ + short pr_type; /* socket type used for */ + struct domain *pr_domain; /* domain protocol a member of */ + short pr_protocol; /* protocol number */ + unsigned int pr_flags; /* see below */ /* * protocol-protocol hooks */ - void (*pr_input) /* input to protocol (from below) */ - (struct mbuf *, int len); - int (*pr_output) /* output to protocol (from above) */ - (struct mbuf *m, struct socket *so); - void (*pr_ctlinput) /* control input (from below) */ - (int, struct sockaddr *, void *, struct ifnet *); - int (*pr_ctloutput) /* control output (from above) */ - (struct socket *, struct sockopt *); + void (*pr_input) /* input to protocol (from below) */ + (struct mbuf *, int len); + int (*pr_output) /* output to protocol (from above) */ + (struct mbuf *m, struct socket *so); + void (*pr_ctlinput) /* control input (from below) */ + (int, struct sockaddr *, void *, struct ifnet *); + int (*pr_ctloutput) /* control output (from above) */ + (struct socket *, struct sockopt *); /* * user-protocol hook */ - void *pr_ousrreq; + void *pr_ousrreq; /* * utility hooks */ - void (*pr_init)(void); /* initialization hook */ - void (*pr_unused)(void); /* placeholder - fasttimo is removed */ - void (*pr_unused2)(void); /* placeholder - slowtimo is removed */ - void (*pr_drain)(void); /* flush any excess space possible */ - int (*pr_sysctl) /* sysctl for protocol */ - (int *, u_int, void *, size_t *, void *, size_t); + void (*pr_init)(void); /* initialization hook */ + void (*pr_unused)(void); /* placeholder - fasttimo is removed */ + void (*pr_unused2)(void); /* placeholder - slowtimo is removed */ + void (*pr_drain)(void); /* flush any excess space possible */ + int (*pr_sysctl) /* sysctl for protocol */ + (int *, u_int, void *, size_t *, void *, size_t); #ifdef XNU_KERNEL_PRIVATE - struct pr_usrreqs_old *pr_usrreqs; /* supersedes pr_usrreq() */ + struct pr_usrreqs_old *pr_usrreqs; /* supersedes pr_usrreq() */ #else - struct pr_usrreqs *pr_usrreqs; /* supersedes pr_usrreq() */ + struct pr_usrreqs *pr_usrreqs; /* supersedes pr_usrreq() */ #endif /* !XNU_KERNEL_PRIVATE */ - int (*pr_lock) /* lock function for protocol */ - (struct socket *so, int refcnt, void *debug); - int (*pr_unlock) /* unlock for protocol */ - (struct socket *so, int refcnt, void *debug); - lck_mtx_t *(*pr_getlock) /* retrieve protocol lock */ - (struct socket *so, int flags); + int (*pr_lock) /* lock function for protocol */ + (struct socket *so, int refcnt, void *debug); + int (*pr_unlock) /* unlock for protocol */ + (struct socket *so, int refcnt, void *debug); + lck_mtx_t *(*pr_getlock) /* retrieve protocol lock */ + (struct socket *so, int flags); /* * Implant hooks */ TAILQ_HEAD(, socket_filter) pr_filter_head; #ifdef XNU_KERNEL_PRIVATE - struct protosw_old *pr_next; /* chain for domain */ + struct protosw_old *pr_next; /* chain for domain */ #else - struct protosw *pr_next; /* chain for domain */ + struct protosw *pr_next; /* chain for domain */ #endif /* !XNU_KERNEL_PRIVATE */ - u_int32_t reserved[1]; /* padding for future use */ + u_int32_t reserved[1]; /* padding for future use */ }; #pragma pack() @@ -230,40 +230,40 @@ struct protosw { */ struct protosw { TAILQ_ENTRY(protosw) pr_entry; /* chain for domain */ - struct domain *pr_domain; /* domain protocol a member of */ - struct protosw *pr_protosw; /* pointer to self */ - u_int16_t pr_type; /* socket type used for */ - u_int16_t pr_protocol; /* protocol number */ - u_int32_t pr_flags; /* see below */ + struct domain *pr_domain; /* domain protocol a member of */ + struct protosw *pr_protosw; /* pointer to self */ + u_int16_t pr_type; /* socket type used for */ + u_int16_t pr_protocol; /* protocol number */ + u_int32_t pr_flags; /* see below */ /* * protocol-protocol hooks */ - void (*pr_input) /* input to protocol (from below) */ - (struct mbuf *, int len); - int (*pr_output) /* output to protocol (from above) */ - (struct mbuf *m, struct socket *so); - void (*pr_ctlinput) /* control input (from below) */ - (int, struct sockaddr *, void *, struct ifnet *); - int (*pr_ctloutput) /* control output (from above) */ - (struct socket *, struct sockopt *); + void (*pr_input) /* input to protocol (from below) */ + (struct mbuf *, int len); + int (*pr_output) /* output to protocol (from above) */ + (struct mbuf *m, struct socket *so); + void (*pr_ctlinput) /* control input (from below) */ + (int, struct sockaddr *, void *, struct ifnet *); + int (*pr_ctloutput) /* control output (from above) */ + (struct socket *, struct sockopt *); /* * user-protocol hook */ - struct pr_usrreqs *pr_usrreqs; /* user request; see list below */ + struct pr_usrreqs *pr_usrreqs; /* user request; see list below */ /* * utility hooks */ - void (*pr_init) /* initialization hook */ - (struct protosw *, struct domain *); - void (*pr_drain)(void); /* flush any excess space possible */ - int (*pr_sysctl) /* sysctl for protocol */ - (int *, u_int, void *, size_t *, void *, size_t); - int (*pr_lock) /* lock function for protocol */ - (struct socket *so, int refcnt, void *debug); - int (*pr_unlock) /* unlock for protocol */ - (struct socket *so, int refcnt, void *debug); - lck_mtx_t *(*pr_getlock) /* retrieve protocol lock */ - (struct socket *so, int flags); + void (*pr_init) /* initialization hook */ + (struct protosw *, struct domain *); + void (*pr_drain)(void); /* flush any excess space possible */ + int (*pr_sysctl) /* sysctl for protocol */ + (int *, u_int, void *, size_t *, void *, size_t); + int (*pr_lock) /* lock function for protocol */ + (struct socket *so, int refcnt, void *debug); + int (*pr_unlock) /* unlock for protocol */ + (struct socket *so, int refcnt, void *debug); + lck_mtx_t *(*pr_getlock) /* retrieve protocol lock */ + (struct socket *so, int flags); /* * misc */ @@ -274,7 +274,7 @@ struct protosw { /* * Values for the flags argument of pr_getlock */ -#define PR_F_WILLUNLOCK 0x01 /* Will unlock (e.g., msleep) after the pr_getlock call */ +#define PR_F_WILLUNLOCK 0x01 /* Will unlock (e.g., msleep) after the pr_getlock call */ #endif /* XNU_KERNEL_PRIVATE */ @@ -287,34 +287,34 @@ struct protosw { * is only relevant if PR_CONNREQUIRED is set (otherwise sendto is allowed * anyhow). */ -#define PR_ATOMIC 0x01 /* exchange atomic messages only */ -#define PR_ADDR 0x02 /* addresses given with messages */ -#define PR_CONNREQUIRED 0x04 /* connection required by protocol */ -#define PR_WANTRCVD 0x08 /* want PRU_RCVD calls */ -#define PR_RIGHTS 0x10 /* passes capabilities */ -#define PR_IMPLOPCL 0x20 /* implied open/close */ -#define PR_LASTHDR 0x40 /* enforce ipsec policy; last header */ -#define PR_PROTOLOCK 0x80 /* protocol takes care of it's own locking */ -#define PR_PCBLOCK 0x100 /* protocol supports per pcb locking */ -#define PR_DISPOSE 0x200 /* protocol requires late lists disposal */ +#define PR_ATOMIC 0x01 /* exchange atomic messages only */ +#define PR_ADDR 0x02 /* addresses given with messages */ +#define PR_CONNREQUIRED 0x04 /* connection required by protocol */ +#define PR_WANTRCVD 0x08 /* want PRU_RCVD calls */ +#define PR_RIGHTS 0x10 /* passes capabilities */ +#define PR_IMPLOPCL 0x20 /* implied open/close */ +#define PR_LASTHDR 0x40 /* enforce ipsec policy; last header */ +#define PR_PROTOLOCK 0x80 /* protocol takes care of it's own locking */ +#define PR_PCBLOCK 0x100 /* protocol supports per pcb locking */ +#define PR_DISPOSE 0x200 /* protocol requires late lists disposal */ #ifdef BSD_KERNEL_PRIVATE -#define PR_INITIALIZED 0x400 /* protocol has been initialized */ -#define PR_ATTACHED 0x800 /* protocol is attached to a domain */ -#define PR_MULTICONN 0x1000 /* supports multiple connect calls */ -#define PR_EVCONNINFO 0x2000 /* protocol generates conninfo event */ -#define PR_PRECONN_WRITE 0x4000 /* protocol supports preconnect write */ -#define PR_DATA_IDEMPOTENT 0x8000 /* protocol supports idempotent data at connectx-time */ -#define PR_OLD 0x10000000 /* added via net_add_proto */ +#define PR_INITIALIZED 0x400 /* protocol has been initialized */ +#define PR_ATTACHED 0x800 /* protocol is attached to a domain */ +#define PR_MULTICONN 0x1000 /* supports multiple connect calls */ +#define PR_EVCONNINFO 0x2000 /* protocol generates conninfo event */ +#define PR_PRECONN_WRITE 0x4000 /* protocol supports preconnect write */ +#define PR_DATA_IDEMPOTENT 0x8000 /* protocol supports idempotent data at connectx-time */ +#define PR_OLD 0x10000000 /* added via net_add_proto */ /* pseudo-public domain flags */ -#define PRF_USERFLAGS \ - (PR_ATOMIC|PR_ADDR|PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS| \ +#define PRF_USERFLAGS \ + (PR_ATOMIC|PR_ADDR|PR_CONNREQUIRED|PR_WANTRCVD|PR_RIGHTS| \ PR_IMPLOPCL|PR_LASTHDR|PR_PROTOLOCK|PR_PCBLOCK|PR_DISPOSE) #endif /* BSD_KERNEL_PRIVATE */ #ifdef BSD_KERNEL_PRIVATE #ifdef PRCREQUESTS -char *prcrequests[] = { +char *prcrequests[] = { "IFDOWN", "ROUTEDEAD", "IFUP", "DEC-BIT-QUENCH2", "QUENCH", "MSGSIZE", "HOSTDEAD", "#7", "NET-UNREACH", "HOST-UNREACH", "PROTO-UNREACH", "PORT-UNREACH", @@ -337,13 +337,13 @@ char *prcrequests[] = { * A non-zero return from usrreq gives an * UNIX error number which should be passed to higher level software. */ -#define PRCO_GETOPT 0 -#define PRCO_SETOPT 1 +#define PRCO_GETOPT 0 +#define PRCO_SETOPT 1 -#define PRCO_NCMDS 2 +#define PRCO_NCMDS 2 #ifdef PRCOREQUESTS -char *prcorequests[] = { +char *prcorequests[] = { "GETOPT", "SETOPT", }; #endif /* PRCOREQUESTS */ @@ -355,41 +355,41 @@ char *prcorequests[] = { * which offers a number of benefits (such as type checking for arguments). * These older constants are still present in order to support TCP debugging. */ -#define PRU_ATTACH 0 /* attach protocol to up */ -#define PRU_DETACH 1 /* detach protocol from up */ -#define PRU_BIND 2 /* bind socket to address */ -#define PRU_LISTEN 3 /* listen for connection */ -#define PRU_CONNECT 4 /* establish connection to peer */ -#define PRU_ACCEPT 5 /* accept connection from peer */ -#define PRU_DISCONNECT 6 /* disconnect from peer */ -#define PRU_SHUTDOWN 7 /* won't send any more data */ -#define PRU_RCVD 8 /* have taken data; more room now */ -#define PRU_SEND 9 /* send this data */ -#define PRU_ABORT 10 /* abort (fast DISCONNECT, DETATCH) */ -#define PRU_CONTROL 11 /* control operations on protocol */ -#define PRU_SENSE 12 /* return status into m */ -#define PRU_RCVOOB 13 /* retrieve out of band data */ -#define PRU_SENDOOB 14 /* send out of band data */ -#define PRU_SOCKADDR 15 /* fetch socket's address */ -#define PRU_PEERADDR 16 /* fetch peer's address */ -#define PRU_CONNECT2 17 /* connect two sockets */ +#define PRU_ATTACH 0 /* attach protocol to up */ +#define PRU_DETACH 1 /* detach protocol from up */ +#define PRU_BIND 2 /* bind socket to address */ +#define PRU_LISTEN 3 /* listen for connection */ +#define PRU_CONNECT 4 /* establish connection to peer */ +#define PRU_ACCEPT 5 /* accept connection from peer */ +#define PRU_DISCONNECT 6 /* disconnect from peer */ +#define PRU_SHUTDOWN 7 /* won't send any more data */ +#define PRU_RCVD 8 /* have taken data; more room now */ +#define PRU_SEND 9 /* send this data */ +#define PRU_ABORT 10 /* abort (fast DISCONNECT, DETATCH) */ +#define PRU_CONTROL 11 /* control operations on protocol */ +#define PRU_SENSE 12 /* return status into m */ +#define PRU_RCVOOB 13 /* retrieve out of band data */ +#define PRU_SENDOOB 14 /* send out of band data */ +#define PRU_SOCKADDR 15 /* fetch socket's address */ +#define PRU_PEERADDR 16 /* fetch peer's address */ +#define PRU_CONNECT2 17 /* connect two sockets */ /* begin for protocols internal use */ -#define PRU_FASTTIMO 18 /* 200ms timeout */ -#define PRU_SLOWTIMO 19 /* 500ms timeout */ -#define PRU_PROTORCV 20 /* receive from below */ -#define PRU_PROTOSEND 21 /* send to below */ +#define PRU_FASTTIMO 18 /* 200ms timeout */ +#define PRU_SLOWTIMO 19 /* 500ms timeout */ +#define PRU_PROTORCV 20 /* receive from below */ +#define PRU_PROTOSEND 21 /* send to below */ /* end for protocol's internal use */ -#define PRU_SEND_EOF 22 /* send and close */ -#define PRU_NREQ 22 +#define PRU_SEND_EOF 22 /* send and close */ +#define PRU_NREQ 22 #ifdef PRUREQUESTS char *prurequests[] = { - "ATTACH", "DETACH", "BIND", "LISTEN", - "CONNECT", "ACCEPT", "DISCONNECT", "SHUTDOWN", - "RCVD", "SEND", "ABORT", "CONTROL", - "SENSE", "RCVOOB", "SENDOOB", "SOCKADDR", - "PEERADDR", "CONNECT2", "FASTTIMO", "SLOWTIMO", - "PROTORCV", "PROTOSEND", "SEND_EOF", + "ATTACH", "DETACH", "BIND", "LISTEN", + "CONNECT", "ACCEPT", "DISCONNECT", "SHUTDOWN", + "RCVD", "SEND", "ABORT", "CONTROL", + "SENSE", "RCVOOB", "SENDOOB", "SOCKADDR", + "PEERADDR", "CONNECT2", "FASTTIMO", "SLOWTIMO", + "PROTORCV", "PROTOSEND", "SEND_EOF", }; #endif /* PRUREQUESTS */ #endif /* BSD_KERNEL_PRIVATE */ @@ -412,36 +412,36 @@ struct pr_usrreqs_old { #else struct pr_usrreqs { #endif /* !XNU_KERNEL_PRIVATE */ - int (*pru_abort)(struct socket *so); - int (*pru_accept)(struct socket *so, struct sockaddr **nam); - int (*pru_attach)(struct socket *so, int proto, struct proc *p); - int (*pru_bind)(struct socket *so, struct sockaddr *nam, - struct proc *p); - int (*pru_connect)(struct socket *so, struct sockaddr *nam, - struct proc *p); - int (*pru_connect2)(struct socket *so1, struct socket *so2); - int (*pru_control)(struct socket *so, u_long cmd, caddr_t data, - struct ifnet *ifp, struct proc *p); - int (*pru_detach)(struct socket *so); - int (*pru_disconnect)(struct socket *so); - int (*pru_listen)(struct socket *so, struct proc *p); - int (*pru_peeraddr)(struct socket *so, struct sockaddr **nam); - int (*pru_rcvd)(struct socket *so, int flags); - int (*pru_rcvoob)(struct socket *so, struct mbuf *m, int flags); - int (*pru_send)(struct socket *so, int flags, struct mbuf *m, - struct sockaddr *addr, struct mbuf *control, - struct proc *p); - int (*pru_sense)(struct socket *so, void *sb, int isstat64); - int (*pru_shutdown)(struct socket *so); - int (*pru_sockaddr)(struct socket *so, struct sockaddr **nam); - int (*pru_sosend)(struct socket *so, struct sockaddr *addr, - struct uio *uio, struct mbuf *top, struct mbuf *control, - int flags); - int (*pru_soreceive)(struct socket *so, struct sockaddr **paddr, - struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, - int *flagsp); - int (*pru_sopoll)(struct socket *so, int events, - struct ucred *cred, void *); + int (*pru_abort)(struct socket *so); + int (*pru_accept)(struct socket *so, struct sockaddr **nam); + int (*pru_attach)(struct socket *so, int proto, struct proc *p); + int (*pru_bind)(struct socket *so, struct sockaddr *nam, + struct proc *p); + int (*pru_connect)(struct socket *so, struct sockaddr *nam, + struct proc *p); + int (*pru_connect2)(struct socket *so1, struct socket *so2); + int (*pru_control)(struct socket *so, u_long cmd, caddr_t data, + struct ifnet *ifp, struct proc *p); + int (*pru_detach)(struct socket *so); + int (*pru_disconnect)(struct socket *so); + int (*pru_listen)(struct socket *so, struct proc *p); + int (*pru_peeraddr)(struct socket *so, struct sockaddr **nam); + int (*pru_rcvd)(struct socket *so, int flags); + int (*pru_rcvoob)(struct socket *so, struct mbuf *m, int flags); + int (*pru_send)(struct socket *so, int flags, struct mbuf *m, + struct sockaddr *addr, struct mbuf *control, + struct proc *p); + int (*pru_sense)(struct socket *so, void *sb, int isstat64); + int (*pru_shutdown)(struct socket *so); + int (*pru_sockaddr)(struct socket *so, struct sockaddr **nam); + int (*pru_sosend)(struct socket *so, struct sockaddr *addr, + struct uio *uio, struct mbuf *top, struct mbuf *control, + int flags); + int (*pru_soreceive)(struct socket *so, struct sockaddr **paddr, + struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, + int *flagsp); + int (*pru_sopoll)(struct socket *so, int events, + struct ucred *cred, void *); }; #ifdef XNU_KERNEL_PRIVATE @@ -454,52 +454,52 @@ struct pr_usrreqs { * NOTE: When adding new ones, also add default callbacks in pru_sanitize(). */ struct pr_usrreqs { - uint32_t pru_flags; /* see PRUF flags below */ - int (*pru_abort)(struct socket *); - int (*pru_accept)(struct socket *, struct sockaddr **); - int (*pru_attach)(struct socket *, int proto, struct proc *); - int (*pru_bind)(struct socket *, struct sockaddr *, struct proc *); - int (*pru_connect)(struct socket *, struct sockaddr *, - struct proc *); - int (*pru_connect2)(struct socket *, struct socket *); - int (*pru_connectx)(struct socket *, struct sockaddr *, - struct sockaddr *, struct proc *, uint32_t, - sae_associd_t, sae_connid_t *, uint32_t, void *, uint32_t, - struct uio *, user_ssize_t *); - int (*pru_control)(struct socket *, u_long, caddr_t, - struct ifnet *, struct proc *); - int (*pru_detach)(struct socket *); - int (*pru_disconnect)(struct socket *); - int (*pru_disconnectx)(struct socket *, - sae_associd_t, sae_connid_t); - int (*pru_listen)(struct socket *, struct proc *); - int (*pru_peeraddr)(struct socket *, struct sockaddr **); - int (*pru_rcvd)(struct socket *, int); - int (*pru_rcvoob)(struct socket *, struct mbuf *, int); - int (*pru_send)(struct socket *, int, struct mbuf *, - struct sockaddr *, struct mbuf *, struct proc *); - int (*pru_send_list)(struct socket *, int, struct mbuf *, - struct sockaddr *, struct mbuf *, struct proc *); -#define PRUS_OOB 0x1 -#define PRUS_EOF 0x2 -#define PRUS_MORETOCOME 0x4 - int (*pru_sense)(struct socket *, void *, int); - int (*pru_shutdown)(struct socket *); - int (*pru_sockaddr)(struct socket *, struct sockaddr **); - int (*pru_sopoll)(struct socket *, int, struct ucred *, void *); - int (*pru_soreceive)(struct socket *, struct sockaddr **, - struct uio *, struct mbuf **, struct mbuf **, int *); - int (*pru_soreceive_list)(struct socket *, struct recv_msg_elem *, u_int, - int *); - int (*pru_sosend)(struct socket *, struct sockaddr *, - struct uio *, struct mbuf *, struct mbuf *, int); - int (*pru_sosend_list)(struct socket *, struct uio **, u_int, int); - int (*pru_socheckopt)(struct socket *, struct sockopt *); - int (*pru_preconnect)(struct socket *so); + uint32_t pru_flags; /* see PRUF flags below */ + int (*pru_abort)(struct socket *); + int (*pru_accept)(struct socket *, struct sockaddr **); + int (*pru_attach)(struct socket *, int proto, struct proc *); + int (*pru_bind)(struct socket *, struct sockaddr *, struct proc *); + int (*pru_connect)(struct socket *, struct sockaddr *, + struct proc *); + int (*pru_connect2)(struct socket *, struct socket *); + int (*pru_connectx)(struct socket *, struct sockaddr *, + struct sockaddr *, struct proc *, uint32_t, + sae_associd_t, sae_connid_t *, uint32_t, void *, uint32_t, + struct uio *, user_ssize_t *); + int (*pru_control)(struct socket *, u_long, caddr_t, + struct ifnet *, struct proc *); + int (*pru_detach)(struct socket *); + int (*pru_disconnect)(struct socket *); + int (*pru_disconnectx)(struct socket *, + sae_associd_t, sae_connid_t); + int (*pru_listen)(struct socket *, struct proc *); + int (*pru_peeraddr)(struct socket *, struct sockaddr **); + int (*pru_rcvd)(struct socket *, int); + int (*pru_rcvoob)(struct socket *, struct mbuf *, int); + int (*pru_send)(struct socket *, int, struct mbuf *, + struct sockaddr *, struct mbuf *, struct proc *); + int (*pru_send_list)(struct socket *, int, struct mbuf *, + struct sockaddr *, struct mbuf *, struct proc *); +#define PRUS_OOB 0x1 +#define PRUS_EOF 0x2 +#define PRUS_MORETOCOME 0x4 + int (*pru_sense)(struct socket *, void *, int); + int (*pru_shutdown)(struct socket *); + int (*pru_sockaddr)(struct socket *, struct sockaddr **); + int (*pru_sopoll)(struct socket *, int, struct ucred *, void *); + int (*pru_soreceive)(struct socket *, struct sockaddr **, + struct uio *, struct mbuf **, struct mbuf **, int *); + int (*pru_soreceive_list)(struct socket *, struct recv_msg_elem *, u_int, + int *); + int (*pru_sosend)(struct socket *, struct sockaddr *, + struct uio *, struct mbuf *, struct mbuf *, int); + int (*pru_sosend_list)(struct socket *, struct uio **, u_int, int); + int (*pru_socheckopt)(struct socket *, struct sockopt *); + int (*pru_preconnect)(struct socket *so); }; /* Values for pru_flags */ -#define PRUF_OLD 0x10000000 /* added via net_add_proto */ +#define PRUF_OLD 0x10000000 /* added via net_add_proto */ #ifdef BSD_KERNEL_PRIVATE /* @@ -542,7 +542,7 @@ extern int pru_sense_null(struct socket *so, void * sb, int isstat64); extern int pru_shutdown_notsupp(struct socket *so); extern int pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam); extern int pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, - struct uio *uio, struct mbuf *top, struct mbuf *control, int flags); + struct uio *uio, struct mbuf *top, struct mbuf *control, int flags); extern int pru_sosend_list_notsupp(struct socket *so, struct uio **uio, u_int, int flags); extern int pru_soreceive_notsupp(struct socket *so, @@ -578,4 +578,4 @@ extern int net_del_proto(int, int, struct domain *); extern struct protosw *pffindproto(int family, int protocol, int type); __END_DECLS #endif /* KERNEL_PRIVATE */ -#endif /* !_SYS_PROTOSW_H_ */ +#endif /* !_SYS_PROTOSW_H_ */ diff --git a/bsd/sys/pthread_internal.h b/bsd/sys/pthread_internal.h index 3f4c3f12c..42e23d812 100644 --- a/bsd/sys/pthread_internal.h +++ b/bsd/sys/pthread_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,4 +45,3 @@ void workq_exit(struct proc *); void pthread_init(void); #endif /* _SYS_PTHREAD_INTERNAL_H_ */ - diff --git a/bsd/sys/pthread_shims.h b/bsd/sys/pthread_shims.h index 03b2333a1..7a2d607dd 100644 --- a/bsd/sys/pthread_shims.h +++ b/bsd/sys/pthread_shims.h @@ -88,11 +88,11 @@ typedef const struct pthread_functions_s { void *__unused_was_workq_open; /* psynch syscalls */ - int (*psynch_mutexwait)(proc_t p, user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t *retval); - int (*psynch_mutexdrop)(proc_t p, user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t *retval); + int (*psynch_mutexwait)(proc_t p, user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t *retval); + int (*psynch_mutexdrop)(proc_t p, user_addr_t mutex, uint32_t mgen, uint32_t ugen, uint64_t tid, uint32_t flags, uint32_t *retval); int (*psynch_cvbroad)(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint64_t cvudgen, uint32_t flags, user_addr_t mutex, uint64_t mugen, uint64_t tid, uint32_t *retval); int (*psynch_cvsignal)(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint32_t cvugen, int thread_port, user_addr_t mutex, uint64_t mugen, uint64_t tid, uint32_t flags, uint32_t *retval); - int (*psynch_cvwait)(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint32_t cvugen, user_addr_t mutex, uint64_t mugen, uint32_t flags, int64_t sec, uint32_t nsec, uint32_t * retval); + int (*psynch_cvwait)(proc_t p, user_addr_t cv, uint64_t cvlsgen, uint32_t cvugen, user_addr_t mutex, uint64_t mugen, uint32_t flags, int64_t sec, uint32_t nsec, uint32_t * retval); int (*psynch_cvclrprepost)(proc_t p, user_addr_t cv, uint32_t cvgen, uint32_t cvugen, uint32_t cvsgen, uint32_t prepocnt, uint32_t preposeq, uint32_t flags, int *retval); int (*psynch_rw_longrdlock)(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval); int (*psynch_rw_rdlock)(proc_t p, user_addr_t rwlock, uint32_t lgenval, uint32_t ugenval, uint32_t rw_wc, int flags, uint32_t *retval); @@ -120,21 +120,21 @@ typedef const struct pthread_functions_s { void *__unused_was_workq_threadreq; int (*workq_handle_stack_events)(proc_t p, thread_t th, vm_map_t map, - user_addr_t stackaddr, mach_port_name_t kport, - user_addr_t events, int nevents, int upcall_flags); + user_addr_t stackaddr, mach_port_name_t kport, + user_addr_t events, int nevents, int upcall_flags); int (*workq_create_threadstack)(proc_t p, vm_map_t vmap, - mach_vm_offset_t *out_addr); + mach_vm_offset_t *out_addr); int (*workq_destroy_threadstack)(proc_t p, vm_map_t vmap, - mach_vm_offset_t stackaddr); + mach_vm_offset_t stackaddr); void (*workq_setup_thread)(proc_t p, thread_t th, vm_map_t map, - user_addr_t stackaddr, mach_port_name_t kport, int th_qos, - int setup_flags, int upcall_flags); + user_addr_t stackaddr, mach_port_name_t kport, int th_qos, + int setup_flags, int upcall_flags); void (*workq_markfree_threadstack)(proc_t p, thread_t, vm_map_t map, - user_addr_t stackaddr); + user_addr_t stackaddr); /* padding for future */ void * _pad[83]; @@ -167,8 +167,8 @@ typedef const struct pthread_callbacks_s { void *__unused_was_proc_get_wqptr; wait_result_t (*psynch_wait_prepare)(uintptr_t kwq, - struct turnstile **tstore, thread_t owner, block_hint_t block_hint, - uint64_t deadline); + struct turnstile **tstore, thread_t owner, block_hint_t block_hint, + uint64_t deadline); void (*psynch_wait_update_complete)(struct turnstile *turnstile); @@ -177,10 +177,10 @@ typedef const struct pthread_callbacks_s { void (*psynch_wait_cleanup)(void); kern_return_t (*psynch_wait_wakeup)(uintptr_t kwq, - struct ksyn_waitq_element *kwe, struct turnstile **tstore); + struct ksyn_waitq_element *kwe, struct turnstile **tstore); void (*psynch_wait_update_owner)(uintptr_t kwq, thread_t owner, - struct turnstile **tstore); + struct turnstile **tstore); void* (*proc_get_pthhash)(struct proc *t); void (*proc_set_pthhash)(struct proc *t, void* ptr); @@ -267,7 +267,7 @@ typedef const struct pthread_callbacks_s { kern_return_t (*thread_set_tsd_base)(thread_t thread, mach_vm_offset_t tsd_base); - int (*proc_usynch_get_requested_thread_qos)(struct uthread *); + int (*proc_usynch_get_requested_thread_qos)(struct uthread *); uint64_t (*proc_get_mach_thread_self_tsd_offset)(struct proc *p); void (*proc_set_mach_thread_self_tsd_offset)(struct proc *p, uint64_t mach_thread_self_tsd_offset); diff --git a/bsd/sys/ptrace.h b/bsd/sys/ptrace.h index cf589929b..2f380b5dc 100644 --- a/bsd/sys/ptrace.h +++ b/bsd/sys/ptrace.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -61,8 +61,8 @@ * @(#)ptrace.h 8.2 (Berkeley) 1/4/94 */ -#ifndef _SYS_PTRACE_H_ -#define _SYS_PTRACE_H_ +#ifndef _SYS_PTRACE_H_ +#define _SYS_PTRACE_H_ #include #include @@ -72,35 +72,35 @@ enum { }; -#define PT_TRACE_ME 0 /* child declares it's being traced */ -#define PT_READ_I 1 /* read word in child's I space */ -#define PT_READ_D 2 /* read word in child's D space */ -#define PT_READ_U 3 /* read word in child's user structure */ -#define PT_WRITE_I 4 /* write word in child's I space */ -#define PT_WRITE_D 5 /* write word in child's D space */ -#define PT_WRITE_U 6 /* write word in child's user structure */ -#define PT_CONTINUE 7 /* continue the child */ -#define PT_KILL 8 /* kill the child process */ -#define PT_STEP 9 /* single step the child */ -#define PT_ATTACH ePtAttachDeprecated /* trace some running process */ -#define PT_DETACH 11 /* stop tracing a process */ -#define PT_SIGEXC 12 /* signals as exceptions for current_proc */ -#define PT_THUPDATE 13 /* signal for thread# */ -#define PT_ATTACHEXC 14 /* attach to running process with signal exception */ +#define PT_TRACE_ME 0 /* child declares it's being traced */ +#define PT_READ_I 1 /* read word in child's I space */ +#define PT_READ_D 2 /* read word in child's D space */ +#define PT_READ_U 3 /* read word in child's user structure */ +#define PT_WRITE_I 4 /* write word in child's I space */ +#define PT_WRITE_D 5 /* write word in child's D space */ +#define PT_WRITE_U 6 /* write word in child's user structure */ +#define PT_CONTINUE 7 /* continue the child */ +#define PT_KILL 8 /* kill the child process */ +#define PT_STEP 9 /* single step the child */ +#define PT_ATTACH ePtAttachDeprecated /* trace some running process */ +#define PT_DETACH 11 /* stop tracing a process */ +#define PT_SIGEXC 12 /* signals as exceptions for current_proc */ +#define PT_THUPDATE 13 /* signal for thread# */ +#define PT_ATTACHEXC 14 /* attach to running process with signal exception */ -#define PT_FORCEQUOTA 30 /* Enforce quota for root */ -#define PT_DENY_ATTACH 31 +#define PT_FORCEQUOTA 30 /* Enforce quota for root */ +#define PT_DENY_ATTACH 31 -#define PT_FIRSTMACH 32 /* for machine-specific requests */ +#define PT_FIRSTMACH 32 /* for machine-specific requests */ __BEGIN_DECLS #ifndef KERNEL -int ptrace(int _request, pid_t _pid, caddr_t _addr, int _data); +int ptrace(int _request, pid_t _pid, caddr_t _addr, int _data); #endif /* !KERNEL */ __END_DECLS -#endif /* !_SYS_PTRACE_H_ */ +#endif /* !_SYS_PTRACE_H_ */ diff --git a/bsd/sys/queue.h b/bsd/sys/queue.h index aa26d7636..8791385d7 100644 --- a/bsd/sys/queue.h +++ b/bsd/sys/queue.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -57,14 +57,14 @@ */ #ifndef _SYS_QUEUE_H_ -#define _SYS_QUEUE_H_ +#define _SYS_QUEUE_H_ #ifdef KERNEL_PRIVATE #include /* panic function call */ -#include /* __improbable in kernelspace */ +#include /* __improbable in kernelspace */ #else #ifndef __improbable -#define __improbable(x) (x) /* noop in userspace */ +#define __improbable(x) (x) /* noop in userspace */ #endif /* __improbable */ #endif /* KERNEL_PRIVATE */ @@ -158,29 +158,29 @@ struct qm_trace { int prevline; }; -#define TRACEBUF struct qm_trace trace; -#define TRASHIT(x) do {(x) = (void *)-1;} while (0) +#define TRACEBUF struct qm_trace trace; +#define TRASHIT(x) do {(x) = (void *)-1;} while (0) -#define QMD_TRACE_HEAD(head) do { \ - (head)->trace.prevline = (head)->trace.lastline; \ - (head)->trace.prevfile = (head)->trace.lastfile; \ - (head)->trace.lastline = __LINE__; \ - (head)->trace.lastfile = __FILE__; \ +#define QMD_TRACE_HEAD(head) do { \ + (head)->trace.prevline = (head)->trace.lastline; \ + (head)->trace.prevfile = (head)->trace.lastfile; \ + (head)->trace.lastline = __LINE__; \ + (head)->trace.lastfile = __FILE__; \ } while (0) -#define QMD_TRACE_ELEM(elem) do { \ - (elem)->trace.prevline = (elem)->trace.lastline; \ - (elem)->trace.prevfile = (elem)->trace.lastfile; \ - (elem)->trace.lastline = __LINE__; \ - (elem)->trace.lastfile = __FILE__; \ +#define QMD_TRACE_ELEM(elem) do { \ + (elem)->trace.prevline = (elem)->trace.lastline; \ + (elem)->trace.prevfile = (elem)->trace.lastfile; \ + (elem)->trace.lastline = __LINE__; \ + (elem)->trace.lastfile = __FILE__; \ } while (0) #else -#define QMD_TRACE_ELEM(elem) -#define QMD_TRACE_HEAD(head) -#define TRACEBUF -#define TRASHIT(x) -#endif /* QUEUE_MACRO_DEBUG */ +#define QMD_TRACE_ELEM(elem) +#define QMD_TRACE_HEAD(head) +#define TRACEBUF +#define TRASHIT(x) +#endif /* QUEUE_MACRO_DEBUG */ /* * Horrible macros to enable use of code that was meant to be C-specific @@ -197,10 +197,10 @@ struct qm_trace { * prepend "struct" to "type" and will cause C++ to blow up. */ #if defined(__clang__) && defined(__cplusplus) -#define __MISMATCH_TAGS_PUSH \ - _Pragma("clang diagnostic push") \ +#define __MISMATCH_TAGS_PUSH \ + _Pragma("clang diagnostic push") \ _Pragma("clang diagnostic ignored \"-Wmismatched-tags\"") -#define __MISMATCH_TAGS_POP \ +#define __MISMATCH_TAGS_POP \ _Pragma("clang diagnostic pop") #else #define __MISMATCH_TAGS_PUSH @@ -210,235 +210,235 @@ struct qm_trace { /* * Singly-linked List declarations. */ -#define SLIST_HEAD(name, type) \ -__MISMATCH_TAGS_PUSH \ -struct name { \ - struct type *slh_first; /* first element */ \ -} \ +#define SLIST_HEAD(name, type) \ +__MISMATCH_TAGS_PUSH \ +struct name { \ + struct type *slh_first; /* first element */ \ +} \ __MISMATCH_TAGS_POP -#define SLIST_HEAD_INITIALIZER(head) \ +#define SLIST_HEAD_INITIALIZER(head) \ { NULL } -#define SLIST_ENTRY(type) \ -__MISMATCH_TAGS_PUSH \ -struct { \ - struct type *sle_next; /* next element */ \ -} \ +#define SLIST_ENTRY(type) \ +__MISMATCH_TAGS_PUSH \ +struct { \ + struct type *sle_next; /* next element */ \ +} \ __MISMATCH_TAGS_POP /* * Singly-linked List functions. */ -#define SLIST_EMPTY(head) ((head)->slh_first == NULL) +#define SLIST_EMPTY(head) ((head)->slh_first == NULL) -#define SLIST_FIRST(head) ((head)->slh_first) +#define SLIST_FIRST(head) ((head)->slh_first) -#define SLIST_FOREACH(var, head, field) \ - for ((var) = SLIST_FIRST((head)); \ - (var); \ +#define SLIST_FOREACH(var, head, field) \ + for ((var) = SLIST_FIRST((head)); \ + (var); \ (var) = SLIST_NEXT((var), field)) -#define SLIST_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = SLIST_FIRST((head)); \ - (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ +#define SLIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = SLIST_FIRST((head)); \ + (var) && ((tvar) = SLIST_NEXT((var), field), 1); \ (var) = (tvar)) -#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ - for ((varp) = &SLIST_FIRST((head)); \ - ((var) = *(varp)) != NULL; \ +#define SLIST_FOREACH_PREVPTR(var, varp, head, field) \ + for ((varp) = &SLIST_FIRST((head)); \ + ((var) = *(varp)) != NULL; \ (varp) = &SLIST_NEXT((var), field)) -#define SLIST_INIT(head) do { \ - SLIST_FIRST((head)) = NULL; \ +#define SLIST_INIT(head) do { \ + SLIST_FIRST((head)) = NULL; \ } while (0) -#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ - SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ - SLIST_NEXT((slistelm), field) = (elm); \ +#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ + SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \ + SLIST_NEXT((slistelm), field) = (elm); \ } while (0) -#define SLIST_INSERT_HEAD(head, elm, field) do { \ - SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ - SLIST_FIRST((head)) = (elm); \ +#define SLIST_INSERT_HEAD(head, elm, field) do { \ + SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \ + SLIST_FIRST((head)) = (elm); \ } while (0) -#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) +#define SLIST_NEXT(elm, field) ((elm)->field.sle_next) -#define SLIST_REMOVE(head, elm, type, field) \ -__MISMATCH_TAGS_PUSH \ -do { \ - if (SLIST_FIRST((head)) == (elm)) { \ - SLIST_REMOVE_HEAD((head), field); \ - } \ - else { \ - struct type *curelm = SLIST_FIRST((head)); \ - while (SLIST_NEXT(curelm, field) != (elm)) \ - curelm = SLIST_NEXT(curelm, field); \ - SLIST_REMOVE_AFTER(curelm, field); \ - } \ - TRASHIT((elm)->field.sle_next); \ -} while (0) \ +#define SLIST_REMOVE(head, elm, type, field) \ +__MISMATCH_TAGS_PUSH \ +do { \ + if (SLIST_FIRST((head)) == (elm)) { \ + SLIST_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = SLIST_FIRST((head)); \ + while (SLIST_NEXT(curelm, field) != (elm)) \ + curelm = SLIST_NEXT(curelm, field); \ + SLIST_REMOVE_AFTER(curelm, field); \ + } \ + TRASHIT((elm)->field.sle_next); \ +} while (0) \ __MISMATCH_TAGS_POP -#define SLIST_REMOVE_AFTER(elm, field) do { \ - SLIST_NEXT(elm, field) = \ - SLIST_NEXT(SLIST_NEXT(elm, field), field); \ +#define SLIST_REMOVE_AFTER(elm, field) do { \ + SLIST_NEXT(elm, field) = \ + SLIST_NEXT(SLIST_NEXT(elm, field), field); \ } while (0) -#define SLIST_REMOVE_HEAD(head, field) do { \ - SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ +#define SLIST_REMOVE_HEAD(head, field) do { \ + SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \ } while (0) /* * Singly-linked Tail queue declarations. */ -#define STAILQ_HEAD(name, type) \ -__MISMATCH_TAGS_PUSH \ -struct name { \ - struct type *stqh_first;/* first element */ \ - struct type **stqh_last;/* addr of last next element */ \ -} \ +#define STAILQ_HEAD(name, type) \ +__MISMATCH_TAGS_PUSH \ +struct name { \ + struct type *stqh_first;/* first element */ \ + struct type **stqh_last;/* addr of last next element */ \ +} \ __MISMATCH_TAGS_POP -#define STAILQ_HEAD_INITIALIZER(head) \ +#define STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } -#define STAILQ_ENTRY(type) \ -__MISMATCH_TAGS_PUSH \ -struct { \ - struct type *stqe_next; /* next element */ \ -} \ +#define STAILQ_ENTRY(type) \ +__MISMATCH_TAGS_PUSH \ +struct { \ + struct type *stqe_next; /* next element */ \ +} \ __MISMATCH_TAGS_POP /* * Singly-linked Tail queue functions. */ -#define STAILQ_CONCAT(head1, head2) do { \ - if (!STAILQ_EMPTY((head2))) { \ - *(head1)->stqh_last = (head2)->stqh_first; \ - (head1)->stqh_last = (head2)->stqh_last; \ - STAILQ_INIT((head2)); \ - } \ +#define STAILQ_CONCAT(head1, head2) do { \ + if (!STAILQ_EMPTY((head2))) { \ + *(head1)->stqh_last = (head2)->stqh_first; \ + (head1)->stqh_last = (head2)->stqh_last; \ + STAILQ_INIT((head2)); \ + } \ } while (0) -#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) +#define STAILQ_EMPTY(head) ((head)->stqh_first == NULL) -#define STAILQ_FIRST(head) ((head)->stqh_first) +#define STAILQ_FIRST(head) ((head)->stqh_first) -#define STAILQ_FOREACH(var, head, field) \ - for((var) = STAILQ_FIRST((head)); \ - (var); \ +#define STAILQ_FOREACH(var, head, field) \ + for((var) = STAILQ_FIRST((head)); \ + (var); \ (var) = STAILQ_NEXT((var), field)) -#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = STAILQ_FIRST((head)); \ - (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ +#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = STAILQ_FIRST((head)); \ + (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \ (var) = (tvar)) -#define STAILQ_INIT(head) do { \ - STAILQ_FIRST((head)) = NULL; \ - (head)->stqh_last = &STAILQ_FIRST((head)); \ +#define STAILQ_INIT(head) do { \ + STAILQ_FIRST((head)) = NULL; \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) -#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ +#define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \ if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ - STAILQ_NEXT((tqelm), field) = (elm); \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ + STAILQ_NEXT((tqelm), field) = (elm); \ } while (0) -#define STAILQ_INSERT_HEAD(head, elm, field) do { \ - if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ - STAILQ_FIRST((head)) = (elm); \ +#define STAILQ_INSERT_HEAD(head, elm, field) do { \ + if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ + STAILQ_FIRST((head)) = (elm); \ } while (0) -#define STAILQ_INSERT_TAIL(head, elm, field) do { \ - STAILQ_NEXT((elm), field) = NULL; \ - *(head)->stqh_last = (elm); \ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ +#define STAILQ_INSERT_TAIL(head, elm, field) do { \ + STAILQ_NEXT((elm), field) = NULL; \ + *(head)->stqh_last = (elm); \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ } while (0) -#define STAILQ_LAST(head, type, field) \ -__MISMATCH_TAGS_PUSH \ - (STAILQ_EMPTY((head)) ? \ - NULL : \ - ((struct type *)(void *) \ - ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\ +#define STAILQ_LAST(head, type, field) \ +__MISMATCH_TAGS_PUSH \ + (STAILQ_EMPTY((head)) ? \ + NULL : \ + ((struct type *)(void *) \ + ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\ __MISMATCH_TAGS_POP -#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) - -#define STAILQ_REMOVE(head, elm, type, field) \ -__MISMATCH_TAGS_PUSH \ -do { \ - if (STAILQ_FIRST((head)) == (elm)) { \ - STAILQ_REMOVE_HEAD((head), field); \ - } \ - else { \ - struct type *curelm = STAILQ_FIRST((head)); \ - while (STAILQ_NEXT(curelm, field) != (elm)) \ - curelm = STAILQ_NEXT(curelm, field); \ - STAILQ_REMOVE_AFTER(head, curelm, field); \ - } \ - TRASHIT((elm)->field.stqe_next); \ -} while (0) \ +#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) + +#define STAILQ_REMOVE(head, elm, type, field) \ +__MISMATCH_TAGS_PUSH \ +do { \ + if (STAILQ_FIRST((head)) == (elm)) { \ + STAILQ_REMOVE_HEAD((head), field); \ + } \ + else { \ + struct type *curelm = STAILQ_FIRST((head)); \ + while (STAILQ_NEXT(curelm, field) != (elm)) \ + curelm = STAILQ_NEXT(curelm, field); \ + STAILQ_REMOVE_AFTER(head, curelm, field); \ + } \ + TRASHIT((elm)->field.stqe_next); \ +} while (0) \ __MISMATCH_TAGS_POP -#define STAILQ_REMOVE_HEAD(head, field) do { \ - if ((STAILQ_FIRST((head)) = \ - STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ - (head)->stqh_last = &STAILQ_FIRST((head)); \ +#define STAILQ_REMOVE_HEAD(head, field) do { \ + if ((STAILQ_FIRST((head)) = \ + STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ } while (0) #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \ if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \ - (head)->stqh_last = &STAILQ_FIRST((head)); \ -} while (0) - -#define STAILQ_REMOVE_AFTER(head, elm, field) do { \ - if ((STAILQ_NEXT(elm, field) = \ - STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ - (head)->stqh_last = &STAILQ_NEXT((elm), field); \ -} while (0) - -#define STAILQ_SWAP(head1, head2, type) \ -__MISMATCH_TAGS_PUSH \ -do { \ - struct type *swap_first = STAILQ_FIRST(head1); \ - struct type **swap_last = (head1)->stqh_last; \ - STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ - (head1)->stqh_last = (head2)->stqh_last; \ - STAILQ_FIRST(head2) = swap_first; \ - (head2)->stqh_last = swap_last; \ - if (STAILQ_EMPTY(head1)) \ - (head1)->stqh_last = &STAILQ_FIRST(head1); \ - if (STAILQ_EMPTY(head2)) \ - (head2)->stqh_last = &STAILQ_FIRST(head2); \ -} while (0) \ + (head)->stqh_last = &STAILQ_FIRST((head)); \ +} while (0) + +#define STAILQ_REMOVE_AFTER(head, elm, field) do { \ + if ((STAILQ_NEXT(elm, field) = \ + STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \ + (head)->stqh_last = &STAILQ_NEXT((elm), field); \ +} while (0) + +#define STAILQ_SWAP(head1, head2, type) \ +__MISMATCH_TAGS_PUSH \ +do { \ + struct type *swap_first = STAILQ_FIRST(head1); \ + struct type **swap_last = (head1)->stqh_last; \ + STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \ + (head1)->stqh_last = (head2)->stqh_last; \ + STAILQ_FIRST(head2) = swap_first; \ + (head2)->stqh_last = swap_last; \ + if (STAILQ_EMPTY(head1)) \ + (head1)->stqh_last = &STAILQ_FIRST(head1); \ + if (STAILQ_EMPTY(head2)) \ + (head2)->stqh_last = &STAILQ_FIRST(head2); \ +} while (0) \ __MISMATCH_TAGS_POP /* * List declarations. */ -#define LIST_HEAD(name, type) \ -__MISMATCH_TAGS_PUSH \ -struct name { \ - struct type *lh_first; /* first element */ \ -} \ +#define LIST_HEAD(name, type) \ +__MISMATCH_TAGS_PUSH \ +struct name { \ + struct type *lh_first; /* first element */ \ +} \ __MISMATCH_TAGS_POP -#define LIST_HEAD_INITIALIZER(head) \ +#define LIST_HEAD_INITIALIZER(head) \ { NULL } -#define LIST_ENTRY(type) \ -__MISMATCH_TAGS_PUSH \ -struct { \ - struct type *le_next; /* next element */ \ - struct type **le_prev; /* address of previous next element */ \ -} \ +#define LIST_ENTRY(type) \ +__MISMATCH_TAGS_PUSH \ +struct { \ + struct type *le_next; /* next element */ \ + struct type **le_prev; /* address of previous next element */ \ +} \ __MISMATCH_TAGS_POP /* @@ -446,270 +446,270 @@ __MISMATCH_TAGS_POP */ #ifdef KERNEL_PRIVATE -#define LIST_CHECK_HEAD(head, field) do { \ - if (__improbable( \ - LIST_FIRST((head)) != NULL && \ - LIST_FIRST((head))->field.le_prev != \ - &LIST_FIRST((head)))) \ - panic("Bad list head %p first->prev != head", (head)); \ +#define LIST_CHECK_HEAD(head, field) do { \ + if (__improbable( \ + LIST_FIRST((head)) != NULL && \ + LIST_FIRST((head))->field.le_prev != \ + &LIST_FIRST((head)))) \ + panic("Bad list head %p first->prev != head", (head)); \ } while (0) -#define LIST_CHECK_NEXT(elm, field) do { \ - if (__improbable( \ - LIST_NEXT((elm), field) != NULL && \ - LIST_NEXT((elm), field)->field.le_prev != \ - &((elm)->field.le_next))) \ - panic("Bad link elm %p next->prev != elm", (elm)); \ +#define LIST_CHECK_NEXT(elm, field) do { \ + if (__improbable( \ + LIST_NEXT((elm), field) != NULL && \ + LIST_NEXT((elm), field)->field.le_prev != \ + &((elm)->field.le_next))) \ + panic("Bad link elm %p next->prev != elm", (elm)); \ } while (0) -#define LIST_CHECK_PREV(elm, field) do { \ - if (__improbable(*(elm)->field.le_prev != (elm))) \ - panic("Bad link elm %p prev->next != elm", (elm)); \ +#define LIST_CHECK_PREV(elm, field) do { \ + if (__improbable(*(elm)->field.le_prev != (elm))) \ + panic("Bad link elm %p prev->next != elm", (elm)); \ } while (0) #else -#define LIST_CHECK_HEAD(head, field) -#define LIST_CHECK_NEXT(elm, field) -#define LIST_CHECK_PREV(elm, field) +#define LIST_CHECK_HEAD(head, field) +#define LIST_CHECK_NEXT(elm, field) +#define LIST_CHECK_PREV(elm, field) #endif /* KERNEL_PRIVATE */ -#define LIST_EMPTY(head) ((head)->lh_first == NULL) +#define LIST_EMPTY(head) ((head)->lh_first == NULL) -#define LIST_FIRST(head) ((head)->lh_first) +#define LIST_FIRST(head) ((head)->lh_first) -#define LIST_FOREACH(var, head, field) \ - for ((var) = LIST_FIRST((head)); \ - (var); \ +#define LIST_FOREACH(var, head, field) \ + for ((var) = LIST_FIRST((head)); \ + (var); \ (var) = LIST_NEXT((var), field)) -#define LIST_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = LIST_FIRST((head)); \ - (var) && ((tvar) = LIST_NEXT((var), field), 1); \ +#define LIST_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = LIST_FIRST((head)); \ + (var) && ((tvar) = LIST_NEXT((var), field), 1); \ (var) = (tvar)) -#define LIST_INIT(head) do { \ - LIST_FIRST((head)) = NULL; \ +#define LIST_INIT(head) do { \ + LIST_FIRST((head)) = NULL; \ } while (0) -#define LIST_INSERT_AFTER(listelm, elm, field) do { \ - LIST_CHECK_NEXT(listelm, field); \ +#define LIST_INSERT_AFTER(listelm, elm, field) do { \ + LIST_CHECK_NEXT(listelm, field); \ if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\ - LIST_NEXT((listelm), field)->field.le_prev = \ - &LIST_NEXT((elm), field); \ - LIST_NEXT((listelm), field) = (elm); \ - (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ -} while (0) - -#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ - LIST_CHECK_PREV(listelm, field); \ - (elm)->field.le_prev = (listelm)->field.le_prev; \ - LIST_NEXT((elm), field) = (listelm); \ - *(listelm)->field.le_prev = (elm); \ - (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ -} while (0) - -#define LIST_INSERT_HEAD(head, elm, field) do { \ - LIST_CHECK_HEAD((head), field); \ - if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ - LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ - LIST_FIRST((head)) = (elm); \ - (elm)->field.le_prev = &LIST_FIRST((head)); \ -} while (0) - -#define LIST_NEXT(elm, field) ((elm)->field.le_next) - -#define LIST_REMOVE(elm, field) do { \ - LIST_CHECK_NEXT(elm, field); \ - LIST_CHECK_PREV(elm, field); \ - if (LIST_NEXT((elm), field) != NULL) \ - LIST_NEXT((elm), field)->field.le_prev = \ - (elm)->field.le_prev; \ - *(elm)->field.le_prev = LIST_NEXT((elm), field); \ - TRASHIT((elm)->field.le_next); \ - TRASHIT((elm)->field.le_prev); \ -} while (0) - -#define LIST_SWAP(head1, head2, type, field) \ -__MISMATCH_TAGS_PUSH \ -do { \ - struct type *swap_tmp = LIST_FIRST((head1)); \ - LIST_FIRST((head1)) = LIST_FIRST((head2)); \ - LIST_FIRST((head2)) = swap_tmp; \ - if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ - swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ - if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ - swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ -} while (0) \ + LIST_NEXT((listelm), field)->field.le_prev = \ + &LIST_NEXT((elm), field); \ + LIST_NEXT((listelm), field) = (elm); \ + (elm)->field.le_prev = &LIST_NEXT((listelm), field); \ +} while (0) + +#define LIST_INSERT_BEFORE(listelm, elm, field) do { \ + LIST_CHECK_PREV(listelm, field); \ + (elm)->field.le_prev = (listelm)->field.le_prev; \ + LIST_NEXT((elm), field) = (listelm); \ + *(listelm)->field.le_prev = (elm); \ + (listelm)->field.le_prev = &LIST_NEXT((elm), field); \ +} while (0) + +#define LIST_INSERT_HEAD(head, elm, field) do { \ + LIST_CHECK_HEAD((head), field); \ + if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \ + LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\ + LIST_FIRST((head)) = (elm); \ + (elm)->field.le_prev = &LIST_FIRST((head)); \ +} while (0) + +#define LIST_NEXT(elm, field) ((elm)->field.le_next) + +#define LIST_REMOVE(elm, field) do { \ + LIST_CHECK_NEXT(elm, field); \ + LIST_CHECK_PREV(elm, field); \ + if (LIST_NEXT((elm), field) != NULL) \ + LIST_NEXT((elm), field)->field.le_prev = \ + (elm)->field.le_prev; \ + *(elm)->field.le_prev = LIST_NEXT((elm), field); \ + TRASHIT((elm)->field.le_next); \ + TRASHIT((elm)->field.le_prev); \ +} while (0) + +#define LIST_SWAP(head1, head2, type, field) \ +__MISMATCH_TAGS_PUSH \ +do { \ + struct type *swap_tmp = LIST_FIRST((head1)); \ + LIST_FIRST((head1)) = LIST_FIRST((head2)); \ + LIST_FIRST((head2)) = swap_tmp; \ + if ((swap_tmp = LIST_FIRST((head1))) != NULL) \ + swap_tmp->field.le_prev = &LIST_FIRST((head1)); \ + if ((swap_tmp = LIST_FIRST((head2))) != NULL) \ + swap_tmp->field.le_prev = &LIST_FIRST((head2)); \ +} while (0) \ __MISMATCH_TAGS_POP /* * Tail queue declarations. */ -#define TAILQ_HEAD(name, type) \ -__MISMATCH_TAGS_PUSH \ -struct name { \ - struct type *tqh_first; /* first element */ \ - struct type **tqh_last; /* addr of last next element */ \ - TRACEBUF \ -} \ +#define TAILQ_HEAD(name, type) \ +__MISMATCH_TAGS_PUSH \ +struct name { \ + struct type *tqh_first; /* first element */ \ + struct type **tqh_last; /* addr of last next element */ \ + TRACEBUF \ +} \ __MISMATCH_TAGS_POP -#define TAILQ_HEAD_INITIALIZER(head) \ +#define TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } -#define TAILQ_ENTRY(type) \ -__MISMATCH_TAGS_PUSH \ -struct { \ - struct type *tqe_next; /* next element */ \ - struct type **tqe_prev; /* address of previous next element */ \ - TRACEBUF \ -} \ +#define TAILQ_ENTRY(type) \ +__MISMATCH_TAGS_PUSH \ +struct { \ + struct type *tqe_next; /* next element */ \ + struct type **tqe_prev; /* address of previous next element */ \ + TRACEBUF \ +} \ __MISMATCH_TAGS_POP /* * Tail queue functions. */ #ifdef KERNEL_PRIVATE -#define TAILQ_CHECK_HEAD(head, field) do { \ - if (__improbable( \ - TAILQ_FIRST((head)) != NULL && \ - TAILQ_FIRST((head))->field.tqe_prev != \ - &TAILQ_FIRST((head)))) \ - panic("Bad tailq head %p first->prev != head", (head)); \ -} while (0) - -#define TAILQ_CHECK_NEXT(elm, field) do { \ - if (__improbable( \ - TAILQ_NEXT((elm), field) != NULL && \ - TAILQ_NEXT((elm), field)->field.tqe_prev != \ - &((elm)->field.tqe_next))) \ - panic("Bad tailq elm %p next->prev != elm", (elm)); \ +#define TAILQ_CHECK_HEAD(head, field) do { \ + if (__improbable( \ + TAILQ_FIRST((head)) != NULL && \ + TAILQ_FIRST((head))->field.tqe_prev != \ + &TAILQ_FIRST((head)))) \ + panic("Bad tailq head %p first->prev != head", (head)); \ +} while (0) + +#define TAILQ_CHECK_NEXT(elm, field) do { \ + if (__improbable( \ + TAILQ_NEXT((elm), field) != NULL && \ + TAILQ_NEXT((elm), field)->field.tqe_prev != \ + &((elm)->field.tqe_next))) \ + panic("Bad tailq elm %p next->prev != elm", (elm)); \ } while(0) -#define TAILQ_CHECK_PREV(elm, field) do { \ - if (__improbable(*(elm)->field.tqe_prev != (elm))) \ - panic("Bad tailq elm %p prev->next != elm", (elm)); \ +#define TAILQ_CHECK_PREV(elm, field) do { \ + if (__improbable(*(elm)->field.tqe_prev != (elm))) \ + panic("Bad tailq elm %p prev->next != elm", (elm)); \ } while(0) #else -#define TAILQ_CHECK_HEAD(head, field) -#define TAILQ_CHECK_NEXT(elm, field) -#define TAILQ_CHECK_PREV(elm, field) +#define TAILQ_CHECK_HEAD(head, field) +#define TAILQ_CHECK_NEXT(elm, field) +#define TAILQ_CHECK_PREV(elm, field) #endif /* KERNEL_PRIVATE */ -#define TAILQ_CONCAT(head1, head2, field) do { \ - if (!TAILQ_EMPTY(head2)) { \ - *(head1)->tqh_last = (head2)->tqh_first; \ - (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ - (head1)->tqh_last = (head2)->tqh_last; \ - TAILQ_INIT((head2)); \ - QMD_TRACE_HEAD(head1); \ - QMD_TRACE_HEAD(head2); \ - } \ +#define TAILQ_CONCAT(head1, head2, field) do { \ + if (!TAILQ_EMPTY(head2)) { \ + *(head1)->tqh_last = (head2)->tqh_first; \ + (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ + (head1)->tqh_last = (head2)->tqh_last; \ + TAILQ_INIT((head2)); \ + QMD_TRACE_HEAD(head1); \ + QMD_TRACE_HEAD(head2); \ + } \ } while (0) -#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) +#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) -#define TAILQ_FIRST(head) ((head)->tqh_first) +#define TAILQ_FIRST(head) ((head)->tqh_first) -#define TAILQ_FOREACH(var, head, field) \ - for ((var) = TAILQ_FIRST((head)); \ - (var); \ +#define TAILQ_FOREACH(var, head, field) \ + for ((var) = TAILQ_FIRST((head)); \ + (var); \ (var) = TAILQ_NEXT((var), field)) -#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ - for ((var) = TAILQ_FIRST((head)); \ - (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ +#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ + for ((var) = TAILQ_FIRST((head)); \ + (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \ (var) = (tvar)) -#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ - for ((var) = TAILQ_LAST((head), headname); \ - (var); \ +#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ + for ((var) = TAILQ_LAST((head), headname); \ + (var); \ (var) = TAILQ_PREV((var), headname, field)) -#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ - for ((var) = TAILQ_LAST((head), headname); \ - (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ +#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ + for ((var) = TAILQ_LAST((head), headname); \ + (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \ (var) = (tvar)) -#define TAILQ_INIT(head) do { \ - TAILQ_FIRST((head)) = NULL; \ - (head)->tqh_last = &TAILQ_FIRST((head)); \ - QMD_TRACE_HEAD(head); \ +#define TAILQ_INIT(head) do { \ + TAILQ_FIRST((head)) = NULL; \ + (head)->tqh_last = &TAILQ_FIRST((head)); \ + QMD_TRACE_HEAD(head); \ } while (0) -#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ - TAILQ_CHECK_NEXT(listelm, field); \ +#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ + TAILQ_CHECK_NEXT(listelm, field); \ if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ - TAILQ_NEXT((elm), field)->field.tqe_prev = \ - &TAILQ_NEXT((elm), field); \ - else { \ - (head)->tqh_last = &TAILQ_NEXT((elm), field); \ - QMD_TRACE_HEAD(head); \ - } \ - TAILQ_NEXT((listelm), field) = (elm); \ - (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ - QMD_TRACE_ELEM(&(elm)->field); \ - QMD_TRACE_ELEM(&listelm->field); \ -} while (0) - -#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ - TAILQ_CHECK_PREV(listelm, field); \ - (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ - TAILQ_NEXT((elm), field) = (listelm); \ - *(listelm)->field.tqe_prev = (elm); \ - (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ - QMD_TRACE_ELEM(&(elm)->field); \ - QMD_TRACE_ELEM(&listelm->field); \ -} while (0) - -#define TAILQ_INSERT_HEAD(head, elm, field) do { \ - TAILQ_CHECK_HEAD(head, field); \ - if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ - TAILQ_FIRST((head))->field.tqe_prev = \ - &TAILQ_NEXT((elm), field); \ - else \ - (head)->tqh_last = &TAILQ_NEXT((elm), field); \ - TAILQ_FIRST((head)) = (elm); \ - (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ - QMD_TRACE_HEAD(head); \ - QMD_TRACE_ELEM(&(elm)->field); \ -} while (0) - -#define TAILQ_INSERT_TAIL(head, elm, field) do { \ - TAILQ_NEXT((elm), field) = NULL; \ - (elm)->field.tqe_prev = (head)->tqh_last; \ - *(head)->tqh_last = (elm); \ - (head)->tqh_last = &TAILQ_NEXT((elm), field); \ - QMD_TRACE_HEAD(head); \ - QMD_TRACE_ELEM(&(elm)->field); \ -} while (0) - -#define TAILQ_LAST(head, headname) \ -__MISMATCH_TAGS_PUSH \ - (*(((struct headname *)((head)->tqh_last))->tqh_last)) \ + TAILQ_NEXT((elm), field)->field.tqe_prev = \ + &TAILQ_NEXT((elm), field); \ + else { \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + QMD_TRACE_HEAD(head); \ + } \ + TAILQ_NEXT((listelm), field) = (elm); \ + (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ + QMD_TRACE_ELEM(&(elm)->field); \ + QMD_TRACE_ELEM(&listelm->field); \ +} while (0) + +#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ + TAILQ_CHECK_PREV(listelm, field); \ + (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ + TAILQ_NEXT((elm), field) = (listelm); \ + *(listelm)->field.tqe_prev = (elm); \ + (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \ + QMD_TRACE_ELEM(&(elm)->field); \ + QMD_TRACE_ELEM(&listelm->field); \ +} while (0) + +#define TAILQ_INSERT_HEAD(head, elm, field) do { \ + TAILQ_CHECK_HEAD(head, field); \ + if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ + TAILQ_FIRST((head))->field.tqe_prev = \ + &TAILQ_NEXT((elm), field); \ + else \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + TAILQ_FIRST((head)) = (elm); \ + (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ + QMD_TRACE_HEAD(head); \ + QMD_TRACE_ELEM(&(elm)->field); \ +} while (0) + +#define TAILQ_INSERT_TAIL(head, elm, field) do { \ + TAILQ_NEXT((elm), field) = NULL; \ + (elm)->field.tqe_prev = (head)->tqh_last; \ + *(head)->tqh_last = (elm); \ + (head)->tqh_last = &TAILQ_NEXT((elm), field); \ + QMD_TRACE_HEAD(head); \ + QMD_TRACE_ELEM(&(elm)->field); \ +} while (0) + +#define TAILQ_LAST(head, headname) \ +__MISMATCH_TAGS_PUSH \ + (*(((struct headname *)((head)->tqh_last))->tqh_last)) \ __MISMATCH_TAGS_POP -#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) +#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) -#define TAILQ_PREV(elm, headname, field) \ -__MISMATCH_TAGS_PUSH \ - (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \ +#define TAILQ_PREV(elm, headname, field) \ +__MISMATCH_TAGS_PUSH \ + (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \ __MISMATCH_TAGS_POP -#define TAILQ_REMOVE(head, elm, field) do { \ - TAILQ_CHECK_NEXT(elm, field); \ - TAILQ_CHECK_PREV(elm, field); \ - if ((TAILQ_NEXT((elm), field)) != NULL) \ - TAILQ_NEXT((elm), field)->field.tqe_prev = \ - (elm)->field.tqe_prev; \ - else { \ - (head)->tqh_last = (elm)->field.tqe_prev; \ - QMD_TRACE_HEAD(head); \ - } \ - *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ - TRASHIT((elm)->field.tqe_next); \ - TRASHIT((elm)->field.tqe_prev); \ - QMD_TRACE_ELEM(&(elm)->field); \ +#define TAILQ_REMOVE(head, elm, field) do { \ + TAILQ_CHECK_NEXT(elm, field); \ + TAILQ_CHECK_PREV(elm, field); \ + if ((TAILQ_NEXT((elm), field)) != NULL) \ + TAILQ_NEXT((elm), field)->field.tqe_prev = \ + (elm)->field.tqe_prev; \ + else { \ + (head)->tqh_last = (elm)->field.tqe_prev; \ + QMD_TRACE_HEAD(head); \ + } \ + *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ + TRASHIT((elm)->field.tqe_next); \ + TRASHIT((elm)->field.tqe_prev); \ + QMD_TRACE_ELEM(&(elm)->field); \ } while (0) /* @@ -725,139 +725,139 @@ do { \ (head2)->tqh_first = swap_first; \ (head2)->tqh_last = swap_last; \ if ((swap_first = (head1)->tqh_first) != NULL) \ - swap_first->field.tqe_prev = &(head1)->tqh_first; \ + swap_first->field.tqe_prev = &(head1)->tqh_first; \ else \ - (head1)->tqh_last = &(head1)->tqh_first; \ + (head1)->tqh_last = &(head1)->tqh_first; \ if ((swap_first = (head2)->tqh_first) != NULL) \ - swap_first->field.tqe_prev = &(head2)->tqh_first; \ + swap_first->field.tqe_prev = &(head2)->tqh_first; \ else \ - (head2)->tqh_last = &(head2)->tqh_first; \ + (head2)->tqh_last = &(head2)->tqh_first; \ } while (0) \ __MISMATCH_TAGS_POP /* * Circular queue definitions. */ -#define CIRCLEQ_HEAD(name, type) \ -__MISMATCH_TAGS_PUSH \ -struct name { \ - struct type *cqh_first; /* first element */ \ - struct type *cqh_last; /* last element */ \ -} \ +#define CIRCLEQ_HEAD(name, type) \ +__MISMATCH_TAGS_PUSH \ +struct name { \ + struct type *cqh_first; /* first element */ \ + struct type *cqh_last; /* last element */ \ +} \ __MISMATCH_TAGS_POP -#define CIRCLEQ_ENTRY(type) \ -__MISMATCH_TAGS_PUSH \ -struct { \ - struct type *cqe_next; /* next element */ \ - struct type *cqe_prev; /* previous element */ \ -} \ +#define CIRCLEQ_ENTRY(type) \ +__MISMATCH_TAGS_PUSH \ +struct { \ + struct type *cqe_next; /* next element */ \ + struct type *cqe_prev; /* previous element */ \ +} \ __MISMATCH_TAGS_POP /* * Circular queue functions. */ #ifdef KERNEL_PRIVATE -#define CIRCLEQ_CHECK_HEAD(head, field) do { \ - if (__improbable( \ - CIRCLEQ_FIRST((head)) != ((void*)(head)) && \ +#define CIRCLEQ_CHECK_HEAD(head, field) do { \ + if (__improbable( \ + CIRCLEQ_FIRST((head)) != ((void*)(head)) && \ CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head))))\ - panic("Bad circleq head %p first->prev != head", (head)); \ + panic("Bad circleq head %p first->prev != head", (head)); \ } while(0) -#define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \ - if (__improbable( \ - CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \ - CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \ - panic("Bad circleq elm %p next->prev != elm", (elm)); \ +#define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \ + if (__improbable( \ + CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \ + CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \ + panic("Bad circleq elm %p next->prev != elm", (elm)); \ } while(0) -#define CIRCLEQ_CHECK_PREV(head, elm, field) do { \ - if (__improbable( \ - CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \ - CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \ - panic("Bad circleq elm %p prev->next != elm", (elm)); \ +#define CIRCLEQ_CHECK_PREV(head, elm, field) do { \ + if (__improbable( \ + CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \ + CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \ + panic("Bad circleq elm %p prev->next != elm", (elm)); \ } while(0) #else -#define CIRCLEQ_CHECK_HEAD(head, field) -#define CIRCLEQ_CHECK_NEXT(head, elm, field) -#define CIRCLEQ_CHECK_PREV(head, elm, field) +#define CIRCLEQ_CHECK_HEAD(head, field) +#define CIRCLEQ_CHECK_NEXT(head, elm, field) +#define CIRCLEQ_CHECK_PREV(head, elm, field) #endif /* KERNEL_PRIVATE */ #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) #define CIRCLEQ_FIRST(head) ((head)->cqh_first) -#define CIRCLEQ_FOREACH(var, head, field) \ - for((var) = (head)->cqh_first; \ - (var) != (void *)(head); \ +#define CIRCLEQ_FOREACH(var, head, field) \ + for((var) = (head)->cqh_first; \ + (var) != (void *)(head); \ (var) = (var)->field.cqe_next) -#define CIRCLEQ_INIT(head) do { \ - (head)->cqh_first = (void *)(head); \ - (head)->cqh_last = (void *)(head); \ -} while (0) - -#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ - CIRCLEQ_CHECK_NEXT(head, listelm, field); \ - (elm)->field.cqe_next = (listelm)->field.cqe_next; \ - (elm)->field.cqe_prev = (listelm); \ - if ((listelm)->field.cqe_next == (void *)(head)) \ - (head)->cqh_last = (elm); \ - else \ - (listelm)->field.cqe_next->field.cqe_prev = (elm); \ - (listelm)->field.cqe_next = (elm); \ -} while (0) - -#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ - CIRCLEQ_CHECK_PREV(head, listelm, field); \ - (elm)->field.cqe_next = (listelm); \ - (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ - if ((listelm)->field.cqe_prev == (void *)(head)) \ - (head)->cqh_first = (elm); \ - else \ - (listelm)->field.cqe_prev->field.cqe_next = (elm); \ - (listelm)->field.cqe_prev = (elm); \ -} while (0) - -#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ - CIRCLEQ_CHECK_HEAD(head, field); \ - (elm)->field.cqe_next = (head)->cqh_first; \ - (elm)->field.cqe_prev = (void *)(head); \ - if ((head)->cqh_last == (void *)(head)) \ - (head)->cqh_last = (elm); \ - else \ - (head)->cqh_first->field.cqe_prev = (elm); \ - (head)->cqh_first = (elm); \ -} while (0) - -#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ - (elm)->field.cqe_next = (void *)(head); \ - (elm)->field.cqe_prev = (head)->cqh_last; \ - if ((head)->cqh_first == (void *)(head)) \ - (head)->cqh_first = (elm); \ - else \ - (head)->cqh_last->field.cqe_next = (elm); \ - (head)->cqh_last = (elm); \ +#define CIRCLEQ_INIT(head) do { \ + (head)->cqh_first = (void *)(head); \ + (head)->cqh_last = (void *)(head); \ +} while (0) + +#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ + CIRCLEQ_CHECK_NEXT(head, listelm, field); \ + (elm)->field.cqe_next = (listelm)->field.cqe_next; \ + (elm)->field.cqe_prev = (listelm); \ + if ((listelm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (listelm)->field.cqe_next->field.cqe_prev = (elm); \ + (listelm)->field.cqe_next = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ + CIRCLEQ_CHECK_PREV(head, listelm, field); \ + (elm)->field.cqe_next = (listelm); \ + (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ + if ((listelm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (listelm)->field.cqe_prev->field.cqe_next = (elm); \ + (listelm)->field.cqe_prev = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ + CIRCLEQ_CHECK_HEAD(head, field); \ + (elm)->field.cqe_next = (head)->cqh_first; \ + (elm)->field.cqe_prev = (void *)(head); \ + if ((head)->cqh_last == (void *)(head)) \ + (head)->cqh_last = (elm); \ + else \ + (head)->cqh_first->field.cqe_prev = (elm); \ + (head)->cqh_first = (elm); \ +} while (0) + +#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ + (elm)->field.cqe_next = (void *)(head); \ + (elm)->field.cqe_prev = (head)->cqh_last; \ + if ((head)->cqh_first == (void *)(head)) \ + (head)->cqh_first = (elm); \ + else \ + (head)->cqh_last->field.cqe_next = (elm); \ + (head)->cqh_last = (elm); \ } while (0) #define CIRCLEQ_LAST(head) ((head)->cqh_last) -#define CIRCLEQ_NEXT(elm,field) ((elm)->field.cqe_next) +#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) -#define CIRCLEQ_PREV(elm,field) ((elm)->field.cqe_prev) +#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) -#define CIRCLEQ_REMOVE(head, elm, field) do { \ - CIRCLEQ_CHECK_NEXT(head, elm, field); \ - CIRCLEQ_CHECK_PREV(head, elm, field); \ - if ((elm)->field.cqe_next == (void *)(head)) \ - (head)->cqh_last = (elm)->field.cqe_prev; \ - else \ - (elm)->field.cqe_next->field.cqe_prev = \ - (elm)->field.cqe_prev; \ - if ((elm)->field.cqe_prev == (void *)(head)) \ - (head)->cqh_first = (elm)->field.cqe_next; \ - else \ - (elm)->field.cqe_prev->field.cqe_next = \ - (elm)->field.cqe_next; \ +#define CIRCLEQ_REMOVE(head, elm, field) do { \ + CIRCLEQ_CHECK_NEXT(head, elm, field); \ + CIRCLEQ_CHECK_PREV(head, elm, field); \ + if ((elm)->field.cqe_next == (void *)(head)) \ + (head)->cqh_last = (elm)->field.cqe_prev; \ + else \ + (elm)->field.cqe_next->field.cqe_prev = \ + (elm)->field.cqe_prev; \ + if ((elm)->field.cqe_prev == (void *)(head)) \ + (head)->cqh_first = (elm)->field.cqe_next; \ + else \ + (elm)->field.cqe_prev->field.cqe_next = \ + (elm)->field.cqe_next; \ } while (0) #ifdef _KERNEL @@ -879,21 +879,21 @@ struct quehead { static __inline void chkquenext(void *a) { - struct quehead *element = (struct quehead *)a; - if (__improbable(element->qh_link != NULL && - element->qh_link->qh_rlink != element)) { - panic("Bad que elm %p next->prev != elm", a); - } + struct quehead *element = (struct quehead *)a; + if (__improbable(element->qh_link != NULL && + element->qh_link->qh_rlink != element)) { + panic("Bad que elm %p next->prev != elm", a); + } } static __inline void chkqueprev(void *a) { - struct quehead *element = (struct quehead *)a; - if (__improbable(element->qh_rlink != NULL && - element->qh_rlink->qh_link != element)) { - panic("Bad que elm %p prev->next != elm", a); - } + struct quehead *element = (struct quehead *)a; + if (__improbable(element->qh_rlink != NULL && + element->qh_rlink->qh_link != element)) { + panic("Bad que elm %p prev->next != elm", a); + } } #else /* !KERNEL_PRIVATE */ #define chkquenext(a) @@ -904,7 +904,7 @@ static __inline void insque(void *a, void *b) { struct quehead *element = (struct quehead *)a, - *head = (struct quehead *)b; + *head = (struct quehead *)b; chkquenext(head); element->qh_link = head->qh_link; @@ -927,8 +927,8 @@ remque(void *a) #else /* !__GNUC__ */ -void insque(void *a, void *b); -void remque(void *a); +void insque(void *a, void *b); +void remque(void *a); #endif /* __GNUC__ */ diff --git a/bsd/sys/quota.h b/bsd/sys/quota.h index d0022c0d9..bc9da9345 100644 --- a/bsd/sys/quota.h +++ b/bsd/sys/quota.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -86,8 +86,8 @@ * failure). The timer is started when the user crosses their soft limit, it * is reset when they go below their soft limit. */ -#define MAX_IQ_TIME (7*24*60*60) /* seconds in 1 week */ -#define MAX_DQ_TIME (7*24*60*60) /* seconds in 1 week */ +#define MAX_IQ_TIME (7*24*60*60) /* seconds in 1 week */ +#define MAX_DQ_TIME (7*24*60*60) /* seconds in 1 week */ /* * The following constants define the usage of the quota file array in the @@ -96,38 +96,38 @@ * the remainder of the quota code treats them generically and need not be * inspected when changing the size of the array. */ -#define MAXQUOTAS 2 -#define USRQUOTA 0 /* element used for user quotas */ -#define GRPQUOTA 1 /* element used for group quotas */ +#define MAXQUOTAS 2 +#define USRQUOTA 0 /* element used for user quotas */ +#define GRPQUOTA 1 /* element used for group quotas */ /* * Definitions for the default names of the quotas files. */ #define INITQFNAMES { \ - "user", /* USRQUOTA */ \ - "group", /* GRPQUOTA */ \ + "user", /* USRQUOTA */ \ + "group", /* GRPQUOTA */ \ "undefined", \ }; -#define QUOTAFILENAME ".quota" +#define QUOTAFILENAME ".quota" #define QUOTAOPSNAME ".quota.ops" -#define QUOTAGROUP "operator" +#define QUOTAGROUP "operator" /* * Command definitions for the 'quotactl' system call. The commands are * broken into a main command defined below and a subcommand that is used * to convey the type of quota that is being manipulated (see above). */ -#define SUBCMDMASK 0x00ff -#define SUBCMDSHIFT 8 -#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK)) - -#define Q_QUOTAON 0x0100 /* enable quotas */ -#define Q_QUOTAOFF 0x0200 /* disable quotas */ -#define Q_GETQUOTA 0x0300 /* get limits and usage */ -#define Q_SETQUOTA 0x0400 /* set limits and usage */ -#define Q_SETUSE 0x0500 /* set usage */ -#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ -#define Q_QUOTASTAT 0x0700 /* get quota on/off status */ +#define SUBCMDMASK 0x00ff +#define SUBCMDSHIFT 8 +#define QCMD(cmd, type) (((cmd) << SUBCMDSHIFT) | ((type) & SUBCMDMASK)) + +#define Q_QUOTAON 0x0100 /* enable quotas */ +#define Q_QUOTAOFF 0x0200 /* disable quotas */ +#define Q_GETQUOTA 0x0300 /* get limits and usage */ +#define Q_SETQUOTA 0x0400 /* set limits and usage */ +#define Q_SETUSE 0x0500 /* set usage */ +#define Q_SYNC 0x0600 /* sync disk copy of a filesystems quotas */ +#define Q_QUOTASTAT 0x0700 /* get quota on/off status */ /* * The following two structures define the format of the disk @@ -147,55 +147,55 @@ * file (a pointer is retained in the filesystem mount structure). */ struct dqfilehdr { - u_int32_t dqh_magic; - u_int32_t dqh_version; /* == QF_VERSION */ - u_int32_t dqh_maxentries; /* must be a power of 2 */ - u_int32_t dqh_entrycnt; /* count of active entries */ - u_int32_t dqh_flags; /* reserved for now (0) */ - u_int32_t dqh_chktime; /* time of last quota check */ - u_int32_t dqh_btime; /* time limit for excessive disk use */ - u_int32_t dqh_itime; /* time limit for excessive files */ - char dqh_string[16]; /* tag string */ - u_int32_t dqh_spare[4]; /* pad struct to power of 2 */ + u_int32_t dqh_magic; + u_int32_t dqh_version; /* == QF_VERSION */ + u_int32_t dqh_maxentries; /* must be a power of 2 */ + u_int32_t dqh_entrycnt; /* count of active entries */ + u_int32_t dqh_flags; /* reserved for now (0) */ + u_int32_t dqh_chktime; /* time of last quota check */ + u_int32_t dqh_btime; /* time limit for excessive disk use */ + u_int32_t dqh_itime; /* time limit for excessive files */ + char dqh_string[16]; /* tag string */ + u_int32_t dqh_spare[4]; /* pad struct to power of 2 */ }; struct dqblk { - u_int64_t dqb_bhardlimit; /* absolute limit on disk bytes alloc */ - u_int64_t dqb_bsoftlimit; /* preferred limit on disk bytes */ - u_int64_t dqb_curbytes; /* current byte count */ - u_int32_t dqb_ihardlimit; /* maximum # allocated inodes + 1 */ - u_int32_t dqb_isoftlimit; /* preferred inode limit */ - u_int32_t dqb_curinodes; /* current # allocated inodes */ - u_int32_t dqb_btime; /* time limit for excessive disk use */ - u_int32_t dqb_itime; /* time limit for excessive files */ - u_int32_t dqb_id; /* identifier (0 for empty entries) */ - u_int32_t dqb_spare[4]; /* pad struct to power of 2 */ + u_int64_t dqb_bhardlimit; /* absolute limit on disk bytes alloc */ + u_int64_t dqb_bsoftlimit; /* preferred limit on disk bytes */ + u_int64_t dqb_curbytes; /* current byte count */ + u_int32_t dqb_ihardlimit; /* maximum # allocated inodes + 1 */ + u_int32_t dqb_isoftlimit; /* preferred inode limit */ + u_int32_t dqb_curinodes; /* current # allocated inodes */ + u_int32_t dqb_btime; /* time limit for excessive disk use */ + u_int32_t dqb_itime; /* time limit for excessive files */ + u_int32_t dqb_id; /* identifier (0 for empty entries) */ + u_int32_t dqb_spare[4]; /* pad struct to power of 2 */ }; #ifdef KERNEL_PRIVATE -#include /* user_time_t */ -/* LP64 version of struct dqblk. time_t is a long and must grow when +#include /* user_time_t */ +/* LP64 version of struct dqblk. time_t is a long and must grow when * we're dealing with a 64-bit process. * WARNING - keep in sync with struct dqblk */ struct user_dqblk { - u_int64_t dqb_bhardlimit; /* absolute limit on disk bytes alloc */ - u_int64_t dqb_bsoftlimit; /* preferred limit on disk bytes */ - u_int64_t dqb_curbytes; /* current byte count */ - u_int32_t dqb_ihardlimit; /* maximum # allocated inodes + 1 */ - u_int32_t dqb_isoftlimit; /* preferred inode limit */ - u_int32_t dqb_curinodes; /* current # allocated inodes */ - u_int32_t dqb_btime; /* time limit for excessive disk use */ - u_int32_t dqb_itime; /* time limit for excessive files */ - u_int32_t dqb_id; /* identifier (0 for empty entries) */ - u_int32_t dqb_spare[4]; /* pad struct to power of 2 */ + u_int64_t dqb_bhardlimit; /* absolute limit on disk bytes alloc */ + u_int64_t dqb_bsoftlimit; /* preferred limit on disk bytes */ + u_int64_t dqb_curbytes; /* current byte count */ + u_int32_t dqb_ihardlimit; /* maximum # allocated inodes + 1 */ + u_int32_t dqb_isoftlimit; /* preferred inode limit */ + u_int32_t dqb_curinodes; /* current # allocated inodes */ + u_int32_t dqb_btime; /* time limit for excessive disk use */ + u_int32_t dqb_itime; /* time limit for excessive files */ + u_int32_t dqb_id; /* identifier (0 for empty entries) */ + u_int32_t dqb_spare[4]; /* pad struct to power of 2 */ }; #endif /* KERNEL_PRIVATE */ #define INITQMAGICS { \ - 0xff31ff35, /* USRQUOTA */ \ - 0xff31ff27, /* GRPQUOTA */ \ + 0xff31ff35, /* USRQUOTA */ \ + 0xff31ff27, /* GRPQUOTA */ \ }; #define QF_VERSION 1 @@ -237,9 +237,10 @@ dqhashshift(u_int32_t size) { int shift; - for (shift = 32; size > 1; size >>= 1, --shift) + for (shift = 32; size > 1; size >>= 1, --shift) { continue; - return (shift); + } + return shift; } @@ -257,7 +258,7 @@ __END_DECLS /* Quota file info */ struct quotafile { - lck_mtx_t qf_lock; /* quota file mutex */ + lck_mtx_t qf_lock; /* quota file mutex */ struct vnode *qf_vp; /* quota file vnode */ kauth_cred_t qf_cred; /* quota file access cred */ int qf_shift; /* primary hash shift */ @@ -266,19 +267,19 @@ struct quotafile { u_int32_t qf_btime; /* block quota time limit */ u_int32_t qf_itime; /* inode quota time limit */ - /* the following 2 fields are protected */ - /* by the quota list lock */ + /* the following 2 fields are protected */ + /* by the quota list lock */ char qf_qflags; /* quota specific flags */ - int qf_refcnt; /* count of dquot refs on this file */ + int qf_refcnt; /* count of dquot refs on this file */ }; /* * Flags describing the runtime state of quotas. * (in qf_qflags) */ -#define QTF_OPENING 0x01 /* Q_QUOTAON in progress */ -#define QTF_CLOSING 0x02 /* Q_QUOTAOFF in progress */ -#define QTF_WANTED 0x04 /* waiting for change of state */ +#define QTF_OPENING 0x01 /* Q_QUOTAON in progress */ +#define QTF_CLOSING 0x02 /* Q_QUOTAOFF in progress */ +#define QTF_WANTED 0x04 /* waiting for change of state */ /* @@ -288,57 +289,57 @@ struct quotafile { * used entries. */ struct dquot { - LIST_ENTRY(dquot) dq_hash; /* hash list */ - TAILQ_ENTRY(dquot) dq_freelist; /* free list */ - u_int16_t dq_flags; /* flags, see below */ - u_int16_t dq_cnt_unused; /* Replaced by dq_cnt below */ - u_int16_t dq_lflags; /* protected by the quota list lock */ - u_int16_t dq_type; /* quota type of this dquot */ - u_int32_t dq_id; /* identifier this applies to */ - u_int32_t dq_index; /* index into quota file */ - struct quotafile *dq_qfile; /* quota file that this is taken from */ - struct dqblk dq_dqb; /* actual usage & quotas */ - uint32_t dq_cnt; /* count of active references */ + LIST_ENTRY(dquot) dq_hash; /* hash list */ + TAILQ_ENTRY(dquot) dq_freelist; /* free list */ + u_int16_t dq_flags; /* flags, see below */ + u_int16_t dq_cnt_unused; /* Replaced by dq_cnt below */ + u_int16_t dq_lflags; /* protected by the quota list lock */ + u_int16_t dq_type; /* quota type of this dquot */ + u_int32_t dq_id; /* identifier this applies to */ + u_int32_t dq_index; /* index into quota file */ + struct quotafile *dq_qfile; /* quota file that this is taken from */ + struct dqblk dq_dqb; /* actual usage & quotas */ + uint32_t dq_cnt; /* count of active references */ }; /* * dq_lflags values */ -#define DQ_LLOCK 0x01 /* this quota locked (no MODS) */ -#define DQ_LWANT 0x02 /* wakeup on unlock */ +#define DQ_LLOCK 0x01 /* this quota locked (no MODS) */ +#define DQ_LWANT 0x02 /* wakeup on unlock */ /* * dq_flags values */ -#define DQ_MOD 0x01 /* this quota modified since read */ -#define DQ_FAKE 0x02 /* no limits here, just usage */ -#define DQ_BLKS 0x04 /* has been warned about blk limit */ -#define DQ_INODS 0x08 /* has been warned about inode limit */ +#define DQ_MOD 0x01 /* this quota modified since read */ +#define DQ_FAKE 0x02 /* no limits here, just usage */ +#define DQ_BLKS 0x04 /* has been warned about blk limit */ +#define DQ_INODS 0x08 /* has been warned about inode limit */ /* * Shorthand notation. */ -#define dq_bhardlimit dq_dqb.dqb_bhardlimit -#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit -#define dq_curbytes dq_dqb.dqb_curbytes -#define dq_ihardlimit dq_dqb.dqb_ihardlimit -#define dq_isoftlimit dq_dqb.dqb_isoftlimit -#define dq_curinodes dq_dqb.dqb_curinodes -#define dq_btime dq_dqb.dqb_btime -#define dq_itime dq_dqb.dqb_itime +#define dq_bhardlimit dq_dqb.dqb_bhardlimit +#define dq_bsoftlimit dq_dqb.dqb_bsoftlimit +#define dq_curbytes dq_dqb.dqb_curbytes +#define dq_ihardlimit dq_dqb.dqb_ihardlimit +#define dq_isoftlimit dq_dqb.dqb_isoftlimit +#define dq_curinodes dq_dqb.dqb_curinodes +#define dq_btime dq_dqb.dqb_btime +#define dq_itime dq_dqb.dqb_itime /* * If the system has never checked for a quota for this file, then it is * set to NODQUOT. Once a write attempt is made the inode pointer is set * to reference a dquot structure. */ -#define NODQUOT NULL +#define NODQUOT NULL /* * Flags to chkdq() and chkiq() */ -#define FORCE 0x01 /* force usage changes independent of limits */ -#define CHOWN 0x02 /* (advisory) change initiated by chown */ +#define FORCE 0x01 /* force usage changes independent of limits */ +#define CHOWN 0x02 /* (advisory) change initiated by chown */ /* @@ -346,24 +347,24 @@ struct dquot { * on-disk dqblk data structures. */ __BEGIN_DECLS -void dqfileinit(struct quotafile *); -int dqfileopen(struct quotafile *, int); -void dqfileclose(struct quotafile *, int); -void dqflush(struct vnode *); -int dqget(u_int32_t, struct quotafile *, int, struct dquot **); -void dqhashinit(void); -void dqinit(void); -int dqisinitialized(void); -void dqref(struct dquot *); -void dqrele(struct dquot *); -void dqreclaim(struct dquot *); -int dqsync(struct dquot *); -void dqsync_orphans(struct quotafile *); -void dqlock(struct dquot *); -void dqunlock(struct dquot *); - -int qf_get(struct quotafile *, int type); -void qf_put(struct quotafile *, int type); +void dqfileinit(struct quotafile *); +int dqfileopen(struct quotafile *, int); +void dqfileclose(struct quotafile *, int); +void dqflush(struct vnode *); +int dqget(u_int32_t, struct quotafile *, int, struct dquot **); +void dqhashinit(void); +void dqinit(void); +int dqisinitialized(void); +void dqref(struct dquot *); +void dqrele(struct dquot *); +void dqreclaim(struct dquot *); +int dqsync(struct dquot *); +void dqsync_orphans(struct quotafile *); +void dqlock(struct dquot *); +void dqunlock(struct dquot *); + +int qf_get(struct quotafile *, int type); +void qf_put(struct quotafile *, int type); __private_extern__ void munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64); __END_DECLS diff --git a/bsd/sys/random.h b/bsd/sys/random.h index 5ba451a5f..1d5b00556 100644 --- a/bsd/sys/random.h +++ b/bsd/sys/random.h @@ -2,7 +2,7 @@ * Copyright (c) 1999, 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,7 +34,7 @@ #ifndef KERNEL __BEGIN_DECLS -__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) + __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) int getentropy(void* buffer, size_t size); __END_DECLS @@ -49,4 +49,3 @@ __END_DECLS #endif /* KERNEL */ #endif /* __SYS_RANDOM_H__ */ - diff --git a/bsd/sys/reason.h b/bsd/sys/reason.h index ce2d47670..c69534091 100644 --- a/bsd/sys/reason.h +++ b/bsd/sys/reason.h @@ -41,14 +41,14 @@ __BEGIN_DECLS #include typedef struct os_reason { - decl_lck_mtx_data(, osr_lock) - unsigned int osr_refcount; - uint32_t osr_namespace; - uint64_t osr_code; - uint64_t osr_flags; - uint32_t osr_bufsize; - struct kcdata_descriptor osr_kcd_descriptor; - char *osr_kcd_buf; + decl_lck_mtx_data(, osr_lock) + unsigned int osr_refcount; + uint32_t osr_namespace; + uint64_t osr_code; + uint64_t osr_flags; + uint32_t osr_bufsize; + struct kcdata_descriptor osr_kcd_descriptor; + char *osr_kcd_buf; } *os_reason_t; #define OS_REASON_NULL ((os_reason_t) 0) @@ -59,7 +59,7 @@ typedef struct os_reason { void os_reason_init(void); os_reason_t build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reason_code, user_addr_t payload, uint32_t payload_size, - user_addr_t reason_string, uint64_t reason_flags); + user_addr_t reason_string, uint64_t reason_flags); char *launchd_exit_reason_get_string_desc(os_reason_t exit_reason); /* The blocking allocation is currently not exported to KEXTs */ @@ -171,7 +171,7 @@ void abort_with_reason(uint32_t reason_namespace, uint64_t reason_code, const ch * Outputs: Does not return. */ void abort_with_payload(uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags) __attribute__((noreturn)); + uint64_t reason_flags) __attribute__((noreturn)); /* * terminate_with_reason: Used to terminate a specific process and pass along @@ -210,7 +210,7 @@ int terminate_with_reason(int pid, uint32_t reason_namespace, uint64_t reason_co * returns 0 otherwise */ int terminate_with_payload(int pid, uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size, - const char *reason_string, uint64_t reason_flags); + const char *reason_string, uint64_t reason_flags); #endif /* KERNEL */ /* diff --git a/bsd/sys/reboot.h b/bsd/sys/reboot.h index fcdc2fe55..38088aa9b 100644 --- a/bsd/sys/reboot.h +++ b/bsd/sys/reboot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -61,7 +61,7 @@ * @(#)reboot.h 8.3 (Berkeley) 12/13/94 */ -#ifndef _SYS_REBOOT_H_ +#ifndef _SYS_REBOOT_H_ #define _SYS_REBOOT_H_ #include @@ -73,21 +73,21 @@ */ #ifdef __APPLE_API_PRIVATE -#define RB_AUTOBOOT 0 /* flags for system auto-booting itself */ - -#define RB_ASKNAME 0x01 /* ask for file name to reboot from */ -#define RB_SINGLE 0x02 /* reboot to single user only */ -#define RB_NOSYNC 0x04 /* dont sync before reboot */ -#define RB_HALT 0x08 /* don't reboot, just halt */ -#define RB_INITNAME 0x10 /* name given for /etc/init */ -#define RB_DFLTROOT 0x20 /* use compiled-in rootdev */ -#define RB_ALTBOOT 0x40 /* use /boot.old vs /boot */ -#define RB_UNIPROC 0x80 /* don't start slaves */ -#define RB_SAFEBOOT 0x100 /* booting safe */ +#define RB_AUTOBOOT 0 /* flags for system auto-booting itself */ + +#define RB_ASKNAME 0x01 /* ask for file name to reboot from */ +#define RB_SINGLE 0x02 /* reboot to single user only */ +#define RB_NOSYNC 0x04 /* dont sync before reboot */ +#define RB_HALT 0x08 /* don't reboot, just halt */ +#define RB_INITNAME 0x10 /* name given for /etc/init */ +#define RB_DFLTROOT 0x20 /* use compiled-in rootdev */ +#define RB_ALTBOOT 0x40 /* use /boot.old vs /boot */ +#define RB_UNIPROC 0x80 /* don't start slaves */ +#define RB_SAFEBOOT 0x100 /* booting safe */ #define RB_UPSDELAY 0x200 /* Delays restart by 5 minutes */ -#define RB_QUICK 0x400 /* quick and ungraceful reboot with file system caches flushed*/ -#define RB_PANIC 0x800 /* panic the kernel */ -#define RB_PANIC_ZPRINT 0x1000 /* add zprint info to panic string */ +#define RB_QUICK 0x400 /* quick and ungraceful reboot with file system caches flushed*/ +#define RB_PANIC 0x800 /* panic the kernel */ +#define RB_PANIC_ZPRINT 0x1000 /* add zprint info to panic string */ #ifndef KERNEL __BEGIN_DECLS @@ -116,23 +116,23 @@ __END_DECLS * |MA | AD| CT| UN| PART | TYPE | * -------------------------------- */ -#define B_ADAPTORSHIFT 24 -#define B_ADAPTORMASK 0x0f -#define B_ADAPTOR(val) (((val) >> B_ADAPTORSHIFT) & B_ADAPTORMASK) -#define B_CONTROLLERSHIFT 20 -#define B_CONTROLLERMASK 0xf -#define B_CONTROLLER(val) (((val)>>B_CONTROLLERSHIFT) & B_CONTROLLERMASK) -#define B_UNITSHIFT 16 -#define B_UNITMASK 0xff -#define B_UNIT(val) (((val) >> B_UNITSHIFT) & B_UNITMASK) +#define B_ADAPTORSHIFT 24 +#define B_ADAPTORMASK 0x0f +#define B_ADAPTOR(val) (((val) >> B_ADAPTORSHIFT) & B_ADAPTORMASK) +#define B_CONTROLLERSHIFT 20 +#define B_CONTROLLERMASK 0xf +#define B_CONTROLLER(val) (((val)>>B_CONTROLLERSHIFT) & B_CONTROLLERMASK) +#define B_UNITSHIFT 16 +#define B_UNITMASK 0xff +#define B_UNIT(val) (((val) >> B_UNITSHIFT) & B_UNITMASK) #define B_PARTITIONSHIFT 8 -#define B_PARTITIONMASK 0xff -#define B_PARTITION(val) (((val) >> B_PARTITIONSHIFT) & B_PARTITIONMASK) -#define B_TYPESHIFT 0 -#define B_TYPEMASK 0xff -#define B_TYPE(val) (((val) >> B_TYPESHIFT) & B_TYPEMASK) -#define B_MAGICMASK 0xf0000000 -#define B_DEVMAGIC 0xa0000000 +#define B_PARTITIONMASK 0xff +#define B_PARTITION(val) (((val) >> B_PARTITIONSHIFT) & B_PARTITIONMASK) +#define B_TYPESHIFT 0 +#define B_TYPEMASK 0xff +#define B_TYPE(val) (((val) >> B_TYPESHIFT) & B_TYPEMASK) +#define B_MAGICMASK 0xf0000000 +#define B_DEVMAGIC 0xa0000000 #define MAKEBOOTDEV(type, adaptor, controller, unit, partition) \ (((type) << B_TYPESHIFT) | ((adaptor) << B_ADAPTORSHIFT) | \ @@ -145,7 +145,7 @@ __END_DECLS #include __BEGIN_DECLS -int reboot_kernel(int, char *); +int reboot_kernel(int, char *); __END_DECLS #define PROC_SHUTDOWN_LOG "/var/log/kernel-shutdown.log" @@ -158,4 +158,4 @@ int get_system_inshutdown(void); __END_DECLS #endif /* KERNEL_PRIVATE */ -#endif /* _SYS_REBOOT_H_ */ +#endif /* _SYS_REBOOT_H_ */ diff --git a/bsd/sys/resource.h b/bsd/sys/resource.h index 55b553a1b..357768313 100644 --- a/bsd/sys/resource.h +++ b/bsd/sys/resource.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_RESOURCE_H_ -#define _SYS_RESOURCE_H_ +#define _SYS_RESOURCE_H_ #include #include @@ -92,7 +92,7 @@ /* * Resource limit type (low 63 bits, excluding the sign bit) */ -typedef __uint64_t rlim_t; +typedef __uint64_t rlim_t; /***** @@ -103,17 +103,17 @@ typedef __uint64_t rlim_t; * Possible values of the first parameter to getpriority()/setpriority(), * used to indicate the type of the second parameter. */ -#define PRIO_PROCESS 0 /* Second argument is a PID */ -#define PRIO_PGRP 1 /* Second argument is a GID */ -#define PRIO_USER 2 /* Second argument is a UID */ +#define PRIO_PROCESS 0 /* Second argument is a PID */ +#define PRIO_PGRP 1 /* Second argument is a GID */ +#define PRIO_USER 2 /* Second argument is a UID */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define PRIO_DARWIN_THREAD 3 /* Second argument is always 0 (current thread) */ -#define PRIO_DARWIN_PROCESS 4 /* Second argument is a PID */ +#define PRIO_DARWIN_THREAD 3 /* Second argument is always 0 (current thread) */ +#define PRIO_DARWIN_PROCESS 4 /* Second argument is a PID */ #ifdef PRIVATE -#define PRIO_DARWIN_GPU 5 /* Second argument is a PID */ +#define PRIO_DARWIN_GPU 5 /* Second argument is a PID */ #define PRIO_DARWIN_GPU_ALLOW 0x1 #define PRIO_DARWIN_GPU_DENY 0x2 @@ -133,10 +133,10 @@ typedef __uint64_t rlim_t; /* * Range limitations for the value of the third parameter to setpriority(). */ -#define PRIO_MIN -20 -#define PRIO_MAX 20 +#define PRIO_MIN -20 +#define PRIO_MAX 20 -/* +/* * use PRIO_DARWIN_BG to set the current thread into "background" state * which lowers CPU, disk IO, and networking priorites until thread terminates * or "background" state is revoked @@ -149,7 +149,7 @@ typedef __uint64_t rlim_t; */ #define PRIO_DARWIN_NONUI 0x1001 -#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ +#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ @@ -161,8 +161,8 @@ typedef __uint64_t rlim_t; * Possible values of the first parameter to getrusage(), used to indicate * the scope of the information to be returned. */ -#define RUSAGE_SELF 0 /* Current process information */ -#define RUSAGE_CHILDREN -1 /* Current process' children */ +#define RUSAGE_SELF 0 /* Current process information */ +#define RUSAGE_CHILDREN -1 /* Current process' children */ /* * A structure representing an accounting of resource utilization. The @@ -173,46 +173,46 @@ typedef __uint64_t rlim_t; * defined and subject to change in a future release. Their use * is discouraged for standards compliant programs. */ -struct rusage { - struct timeval ru_utime; /* user time used (PL) */ - struct timeval ru_stime; /* system time used (PL) */ +struct rusage { + struct timeval ru_utime; /* user time used (PL) */ + struct timeval ru_stime; /* system time used (PL) */ #if __DARWIN_C_LEVEL < __DARWIN_C_FULL - long ru_opaque[14]; /* implementation defined */ + long ru_opaque[14]; /* implementation defined */ #else /* * Informational aliases for source compatibility with programs * that need more information than that provided by standards, * and which do not mind being OS-dependent. */ - long ru_maxrss; /* max resident set size (PL) */ -#define ru_first ru_ixrss /* internal: ruadd() range start */ - long ru_ixrss; /* integral shared memory size (NU) */ - long ru_idrss; /* integral unshared data (NU) */ - long ru_isrss; /* integral unshared stack (NU) */ - long ru_minflt; /* page reclaims (NU) */ - long ru_majflt; /* page faults (NU) */ - long ru_nswap; /* swaps (NU) */ - long ru_inblock; /* block input operations (atomic) */ - long ru_oublock; /* block output operations (atomic) */ - long ru_msgsnd; /* messages sent (atomic) */ - long ru_msgrcv; /* messages received (atomic) */ - long ru_nsignals; /* signals received (atomic) */ - long ru_nvcsw; /* voluntary context switches (atomic) */ - long ru_nivcsw; /* involuntary " */ -#define ru_last ru_nivcsw /* internal: ruadd() range end */ -#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ + long ru_maxrss; /* max resident set size (PL) */ +#define ru_first ru_ixrss /* internal: ruadd() range start */ + long ru_ixrss; /* integral shared memory size (NU) */ + long ru_idrss; /* integral unshared data (NU) */ + long ru_isrss; /* integral unshared stack (NU) */ + long ru_minflt; /* page reclaims (NU) */ + long ru_majflt; /* page faults (NU) */ + long ru_nswap; /* swaps (NU) */ + long ru_inblock; /* block input operations (atomic) */ + long ru_oublock; /* block output operations (atomic) */ + long ru_msgsnd; /* messages sent (atomic) */ + long ru_msgrcv; /* messages received (atomic) */ + long ru_nsignals; /* signals received (atomic) */ + long ru_nvcsw; /* voluntary context switches (atomic) */ + long ru_nivcsw; /* involuntary " */ +#define ru_last ru_nivcsw /* internal: ruadd() range end */ +#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ }; #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL /* * Flavors for proc_pid_rusage(). */ -#define RUSAGE_INFO_V0 0 -#define RUSAGE_INFO_V1 1 -#define RUSAGE_INFO_V2 2 -#define RUSAGE_INFO_V3 3 -#define RUSAGE_INFO_V4 4 -#define RUSAGE_INFO_CURRENT RUSAGE_INFO_V4 +#define RUSAGE_INFO_V0 0 +#define RUSAGE_INFO_V1 1 +#define RUSAGE_INFO_V2 2 +#define RUSAGE_INFO_V3 3 +#define RUSAGE_INFO_V4 4 +#define RUSAGE_INFO_CURRENT RUSAGE_INFO_V4 typedef void *rusage_info_t; @@ -338,7 +338,7 @@ struct rusage_info_v4 { uint64_t ri_cycles; uint64_t ri_billed_energy; uint64_t ri_serviced_energy; - uint64_t ri_interval_max_phys_footprint; + uint64_t ri_interval_max_phys_footprint; // 1 reserve counter(s) remaining for future extension uint64_t ri_unused[1]; }; @@ -350,8 +350,8 @@ typedef struct rusage_info_v4 rusage_info_current; #ifdef KERNEL struct rusage_superset { - struct rusage ru; - rusage_info_current ri; + struct rusage ru; + rusage_info_current ri; }; struct rusage_info_child { @@ -363,42 +363,42 @@ struct rusage_info_child { uint64_t ri_child_elapsed_abstime; }; -struct user64_rusage { - struct user64_timeval ru_utime; /* user time used */ - struct user64_timeval ru_stime; /* system time used */ - user64_long_t ru_maxrss; /* max resident set size */ - user64_long_t ru_ixrss; /* integral shared memory size */ - user64_long_t ru_idrss; /* integral unshared data " */ - user64_long_t ru_isrss; /* integral unshared stack " */ - user64_long_t ru_minflt; /* page reclaims */ - user64_long_t ru_majflt; /* page faults */ - user64_long_t ru_nswap; /* swaps */ - user64_long_t ru_inblock; /* block input operations */ - user64_long_t ru_oublock; /* block output operations */ - user64_long_t ru_msgsnd; /* messages sent */ - user64_long_t ru_msgrcv; /* messages received */ - user64_long_t ru_nsignals; /* signals received */ - user64_long_t ru_nvcsw; /* voluntary context switches */ - user64_long_t ru_nivcsw; /* involuntary " */ +struct user64_rusage { + struct user64_timeval ru_utime; /* user time used */ + struct user64_timeval ru_stime; /* system time used */ + user64_long_t ru_maxrss; /* max resident set size */ + user64_long_t ru_ixrss; /* integral shared memory size */ + user64_long_t ru_idrss; /* integral unshared data " */ + user64_long_t ru_isrss; /* integral unshared stack " */ + user64_long_t ru_minflt; /* page reclaims */ + user64_long_t ru_majflt; /* page faults */ + user64_long_t ru_nswap; /* swaps */ + user64_long_t ru_inblock; /* block input operations */ + user64_long_t ru_oublock; /* block output operations */ + user64_long_t ru_msgsnd; /* messages sent */ + user64_long_t ru_msgrcv; /* messages received */ + user64_long_t ru_nsignals; /* signals received */ + user64_long_t ru_nvcsw; /* voluntary context switches */ + user64_long_t ru_nivcsw; /* involuntary " */ }; -struct user32_rusage { - struct user32_timeval ru_utime; /* user time used */ - struct user32_timeval ru_stime; /* system time used */ - user32_long_t ru_maxrss; /* max resident set size */ - user32_long_t ru_ixrss; /* integral shared memory size */ - user32_long_t ru_idrss; /* integral unshared data " */ - user32_long_t ru_isrss; /* integral unshared stack " */ - user32_long_t ru_minflt; /* page reclaims */ - user32_long_t ru_majflt; /* page faults */ - user32_long_t ru_nswap; /* swaps */ - user32_long_t ru_inblock; /* block input operations */ - user32_long_t ru_oublock; /* block output operations */ - user32_long_t ru_msgsnd; /* messages sent */ - user32_long_t ru_msgrcv; /* messages received */ - user32_long_t ru_nsignals; /* signals received */ - user32_long_t ru_nvcsw; /* voluntary context switches */ - user32_long_t ru_nivcsw; /* involuntary " */ +struct user32_rusage { + struct user32_timeval ru_utime; /* user time used */ + struct user32_timeval ru_stime; /* system time used */ + user32_long_t ru_maxrss; /* max resident set size */ + user32_long_t ru_ixrss; /* integral shared memory size */ + user32_long_t ru_idrss; /* integral unshared data " */ + user32_long_t ru_isrss; /* integral unshared stack " */ + user32_long_t ru_minflt; /* page reclaims */ + user32_long_t ru_majflt; /* page faults */ + user32_long_t ru_nswap; /* swaps */ + user32_long_t ru_inblock; /* block input operations */ + user32_long_t ru_oublock; /* block output operations */ + user32_long_t ru_msgsnd; /* messages sent */ + user32_long_t ru_msgrcv; /* messages received */ + user32_long_t ru_nsignals; /* signals received */ + user32_long_t ru_nvcsw; /* voluntary context switches */ + user32_long_t ru_nivcsw; /* involuntary " */ }; #endif /* KERNEL */ @@ -413,38 +413,38 @@ struct user32_rusage { * as a type rlim_t, we are permitted to define RLIM_SAVED_* in terms of * RLIM_INFINITY. */ -#define RLIM_INFINITY (((__uint64_t)1 << 63) - 1) /* no limit */ -#define RLIM_SAVED_MAX RLIM_INFINITY /* Unrepresentable hard limit */ -#define RLIM_SAVED_CUR RLIM_INFINITY /* Unrepresentable soft limit */ +#define RLIM_INFINITY (((__uint64_t)1 << 63) - 1) /* no limit */ +#define RLIM_SAVED_MAX RLIM_INFINITY /* Unrepresentable hard limit */ +#define RLIM_SAVED_CUR RLIM_INFINITY /* Unrepresentable soft limit */ /* * Possible values of the first parameter to getrlimit()/setrlimit(), to * indicate for which resource the operation is being performed. */ -#define RLIMIT_CPU 0 /* cpu time per process */ -#define RLIMIT_FSIZE 1 /* file size */ -#define RLIMIT_DATA 2 /* data segment size */ -#define RLIMIT_STACK 3 /* stack size */ -#define RLIMIT_CORE 4 /* core file size */ -#define RLIMIT_AS 5 /* address space (resident set size) */ +#define RLIMIT_CPU 0 /* cpu time per process */ +#define RLIMIT_FSIZE 1 /* file size */ +#define RLIMIT_DATA 2 /* data segment size */ +#define RLIMIT_STACK 3 /* stack size */ +#define RLIMIT_CORE 4 /* core file size */ +#define RLIMIT_AS 5 /* address space (resident set size) */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define RLIMIT_RSS RLIMIT_AS /* source compatibility alias */ -#define RLIMIT_MEMLOCK 6 /* locked-in-memory address space */ -#define RLIMIT_NPROC 7 /* number of processes */ -#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ -#define RLIMIT_NOFILE 8 /* number of open files */ +#define RLIMIT_RSS RLIMIT_AS /* source compatibility alias */ +#define RLIMIT_MEMLOCK 6 /* locked-in-memory address space */ +#define RLIMIT_NPROC 7 /* number of processes */ +#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ +#define RLIMIT_NOFILE 8 /* number of open files */ #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define RLIM_NLIMITS 9 /* total number of resource limits */ -#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ -#define _RLIMIT_POSIX_FLAG 0x1000 /* Set bit for strict POSIX */ +#define RLIM_NLIMITS 9 /* total number of resource limits */ +#endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ +#define _RLIMIT_POSIX_FLAG 0x1000 /* Set bit for strict POSIX */ /* * A structure representing a resource limit. The address of an instance * of this structure is the second parameter to getrlimit()/setrlimit(). */ struct rlimit { - rlim_t rlim_cur; /* current (soft) limit */ - rlim_t rlim_max; /* maximum value for rlim_cur */ + rlim_t rlim_cur; /* current (soft) limit */ + rlim_t rlim_max; /* maximum value for rlim_cur */ }; #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL @@ -453,29 +453,29 @@ struct rlimit { * * Resource limit flavors */ -#define RLIMIT_WAKEUPS_MONITOR 0x1 /* Configure the wakeups monitor. */ -#define RLIMIT_CPU_USAGE_MONITOR 0x2 /* Configure the CPU usage monitor. */ -#define RLIMIT_THREAD_CPULIMITS 0x3 /* Configure a blocking, per-thread, CPU limits. */ -#define RLIMIT_FOOTPRINT_INTERVAL 0x4 /* Configure memory footprint interval tracking */ +#define RLIMIT_WAKEUPS_MONITOR 0x1 /* Configure the wakeups monitor. */ +#define RLIMIT_CPU_USAGE_MONITOR 0x2 /* Configure the CPU usage monitor. */ +#define RLIMIT_THREAD_CPULIMITS 0x3 /* Configure a blocking, per-thread, CPU limits. */ +#define RLIMIT_FOOTPRINT_INTERVAL 0x4 /* Configure memory footprint interval tracking */ /* * Flags for wakeups monitor control. */ -#define WAKEMON_ENABLE 0x01 -#define WAKEMON_DISABLE 0x02 -#define WAKEMON_GET_PARAMS 0x04 -#define WAKEMON_SET_DEFAULTS 0x08 -#define WAKEMON_MAKE_FATAL 0x10 /* Configure the task so that violations are fatal. */ +#define WAKEMON_ENABLE 0x01 +#define WAKEMON_DISABLE 0x02 +#define WAKEMON_GET_PARAMS 0x04 +#define WAKEMON_SET_DEFAULTS 0x08 +#define WAKEMON_MAKE_FATAL 0x10 /* Configure the task so that violations are fatal. */ /* * Flags for CPU usage monitor control. */ -#define CPUMON_MAKE_FATAL 0x1000 +#define CPUMON_MAKE_FATAL 0x1000 /* * Flags for memory footprint interval tracking. */ -#define FOOTPRINT_INTERVAL_RESET 0x1 /* Reset the footprint interval counter to zero */ +#define FOOTPRINT_INTERVAL_RESET 0x1 /* Reset the footprint interval counter to zero */ struct proc_rlimit_control_wakeupmon { uint32_t wm_flags; @@ -483,17 +483,17 @@ struct proc_rlimit_control_wakeupmon { }; #if PRIVATE -/* +/* * Flags for I/O monitor control. */ -#define IOMON_ENABLE 0x01 -#define IOMON_DISABLE 0x02 +#define IOMON_ENABLE 0x01 +#define IOMON_DISABLE 0x02 #endif /* PRIVATE */ /* I/O type */ -#define IOPOL_TYPE_DISK 0 +#define IOPOL_TYPE_DISK 0 #if PRIVATE #define IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY 1 #endif @@ -505,24 +505,24 @@ struct proc_rlimit_control_wakeupmon { #define IOPOL_SCOPE_DARWIN_BG 2 /* I/O Priority */ -#define IOPOL_DEFAULT 0 -#define IOPOL_IMPORTANT 1 -#define IOPOL_PASSIVE 2 -#define IOPOL_THROTTLE 3 -#define IOPOL_UTILITY 4 -#define IOPOL_STANDARD 5 +#define IOPOL_DEFAULT 0 +#define IOPOL_IMPORTANT 1 +#define IOPOL_PASSIVE 2 +#define IOPOL_THROTTLE 3 +#define IOPOL_UTILITY 4 +#define IOPOL_STANDARD 5 /* compatibility with older names */ #define IOPOL_APPLICATION IOPOL_STANDARD #define IOPOL_NORMAL IOPOL_IMPORTANT #if PRIVATE -#define IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT 0 -#define IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE 1 +#define IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT 0 +#define IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE 1 #endif -#define IOPOL_ATIME_UPDATES_DEFAULT 0 -#define IOPOL_ATIME_UPDATES_OFF 1 +#define IOPOL_ATIME_UPDATES_DEFAULT 0 +#define IOPOL_ATIME_UPDATES_OFF 1 #ifdef PRIVATE /* @@ -533,36 +533,36 @@ struct proc_rlimit_control_wakeupmon { /* * the command to iopolicysys() */ -#define IOPOL_CMD_GET 0x00000001 /* Get I/O policy */ -#define IOPOL_CMD_SET 0x00000002 /* Set I/O policy */ +#define IOPOL_CMD_GET 0x00000001 /* Get I/O policy */ +#define IOPOL_CMD_SET 0x00000002 /* Set I/O policy */ /* * Second parameter to iopolicysys() */ struct _iopol_param_t { - int iop_scope; /* current process or a thread */ + int iop_scope; /* current process or a thread */ int iop_iotype; int iop_policy; }; -#endif /* PRIVATE */ +#endif /* PRIVATE */ #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ #ifndef KERNEL __BEGIN_DECLS -int getpriority(int, id_t); +int getpriority(int, id_t); #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -int getiopolicy_np(int, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int getiopolicy_np(int, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ -int getrlimit(int, struct rlimit *) __DARWIN_ALIAS(getrlimit); -int getrusage(int, struct rusage *); -int setpriority(int, id_t, int); +int getrlimit(int, struct rlimit *) __DARWIN_ALIAS(getrlimit); +int getrusage(int, struct rusage *); +int setpriority(int, id_t, int); #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -int setiopolicy_np(int, int, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int setiopolicy_np(int, int, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL */ -int setrlimit(int, const struct rlimit *) __DARWIN_ALIAS(setrlimit); +int setrlimit(int, const struct rlimit *) __DARWIN_ALIAS(setrlimit); __END_DECLS -#endif /* !KERNEL */ -#endif /* !_SYS_RESOURCE_H_ */ +#endif /* !KERNEL */ +#endif /* !_SYS_RESOURCE_H_ */ diff --git a/bsd/sys/resourcevar.h b/bsd/sys/resourcevar.h index 6a21d2b6d..9637ead44 100644 --- a/bsd/sys/resourcevar.h +++ b/bsd/sys/resourcevar.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -61,8 +61,8 @@ * @(#)resourcevar.h 8.4 (Berkeley) 1/9/95 */ -#ifndef _SYS_RESOURCEVAR_H_ -#define _SYS_RESOURCEVAR_H_ +#ifndef _SYS_RESOURCEVAR_H_ +#define _SYS_RESOURCEVAR_H_ #include #include @@ -72,30 +72,30 @@ * (not necessarily resident except when running). */ struct pstats { - struct rusage p_ru; /* stats for this proc */ - struct rusage p_cru; /* (PL) sum of stats for reaped children */ + struct rusage p_ru; /* stats for this proc */ + struct rusage p_cru; /* (PL) sum of stats for reaped children */ - struct uprof { /* profile arguments */ + struct uprof { /* profile arguments */ struct uprof *pr_next; /* multiple prof buffers allowed */ - caddr_t pr_base; /* buffer base */ - u_int32_t pr_size; /* buffer size */ - u_int32_t pr_off; /* pc offset */ - u_int32_t pr_scale; /* pc scaling */ - u_int32_t pr_addr; /* temp storage for addr until AST */ - u_int32_t pr_ticks; /* temp storage for ticks until AST */ + caddr_t pr_base; /* buffer base */ + u_int32_t pr_size; /* buffer size */ + u_int32_t pr_off; /* pc offset */ + u_int32_t pr_scale; /* pc scaling */ + u_int32_t pr_addr; /* temp storage for addr until AST */ + u_int32_t pr_ticks; /* temp storage for ticks until AST */ } p_prof; - - uint64_t ps_start; /* starting time ; compat only */ + + uint64_t ps_start; /* starting time ; compat only */ #ifdef KERNEL - struct rusage_info_child ri_child; /* (PL) sum of additional stats for reaped children (proc_pid_rusage) */ - struct user_uprof { /* profile arguments */ + struct rusage_info_child ri_child; /* (PL) sum of additional stats for reaped children (proc_pid_rusage) */ + struct user_uprof { /* profile arguments */ struct user_uprof *pr_next; /* multiple prof buffers allowed */ - user_addr_t pr_base; /* buffer base */ - user_size_t pr_size; /* buffer size */ - user_ulong_t pr_off; /* pc offset */ - user_ulong_t pr_scale; /* pc scaling */ - user_ulong_t pr_addr; /* temp storage for addr until AST */ - user_ulong_t pr_ticks; /* temp storage for ticks until AST */ + user_addr_t pr_base; /* buffer base */ + user_size_t pr_size; /* buffer size */ + user_ulong_t pr_off; /* pc offset */ + user_ulong_t pr_scale; /* pc scaling */ + user_ulong_t pr_addr; /* temp storage for addr until AST */ + user_ulong_t pr_ticks; /* temp storage for ticks until AST */ } user_p_prof; #endif // KERNEL }; @@ -108,31 +108,31 @@ struct pstats { * and a copy must be made for the child of a new fork that isn't * sharing modifications to the limits. */ -/* - * Modifications are done with the list lock held (p_limit as well)and access indv - * limits can be done without limit as we keep the old copy in p_olimit. Which is +/* + * Modifications are done with the list lock held (p_limit as well)and access indv + * limits can be done without limit as we keep the old copy in p_olimit. Which is * dropped in proc_exit. This way all access will have a valid kernel address */ struct plimit { - struct rlimit pl_rlimit[RLIM_NLIMITS]; - int pl_refcnt; /* number of references */ + struct rlimit pl_rlimit[RLIM_NLIMITS]; + int pl_refcnt; /* number of references */ }; #ifdef KERNEL /* add user profiling from AST */ -#define ADDUPROF(p) \ - addupc_task(p, \ - (proc_is64bit((p)) ? (p)->p_stats->user_p_prof.pr_addr \ - : CAST_USER_ADDR_T((p)->p_stats->p_prof.pr_addr)), \ - (proc_is64bit((p)) ? (p)->p_stats->user_p_prof.pr_ticks \ - : (p)->p_stats->p_prof.pr_ticks)) +#define ADDUPROF(p) \ + addupc_task(p, \ + (proc_is64bit((p)) ? (p)->p_stats->user_p_prof.pr_addr \ + : CAST_USER_ADDR_T((p)->p_stats->p_prof.pr_addr)), \ + (proc_is64bit((p)) ? (p)->p_stats->user_p_prof.pr_ticks \ + : (p)->p_stats->p_prof.pr_ticks)) -void addupc_intr(struct proc *p, uint32_t pc, u_int ticks); -void addupc_task(struct proc *p, user_addr_t pc, u_int ticks); -void calcru(struct proc *p, struct timeval *up, struct timeval *sp, - struct timeval *ip); -void ruadd(struct rusage *ru, struct rusage *ru2); -void update_rusage_info_child(struct rusage_info_child *ru, rusage_info_current *ru_current); +void addupc_intr(struct proc *p, uint32_t pc, u_int ticks); +void addupc_task(struct proc *p, user_addr_t pc, u_int ticks); +void calcru(struct proc *p, struct timeval *up, struct timeval *sp, + struct timeval *ip); +void ruadd(struct rusage *ru, struct rusage *ru2); +void update_rusage_info_child(struct rusage_info_child *ru, rusage_info_current *ru_current); void proc_limitget(proc_t p, int whichi, struct rlimit * limp); void proc_limitdrop(proc_t p, int exiting); void proc_limitfork(proc_t parent, proc_t child); @@ -142,4 +142,4 @@ void proc_limitunblock(proc_t); #endif /* KERNEL */ -#endif /* !_SYS_RESOURCEVAR_H_ */ +#endif /* !_SYS_RESOURCEVAR_H_ */ diff --git a/bsd/sys/sbuf.h b/bsd/sys/sbuf.h index 86ff6eebd..0a874635e 100644 --- a/bsd/sys/sbuf.h +++ b/bsd/sys/sbuf.h @@ -29,7 +29,7 @@ */ #ifndef _SYS_SBUF_H_ -#define _SYS_SBUF_H_ +#define _SYS_SBUF_H_ #include #include @@ -38,47 +38,47 @@ * Structure definition */ struct sbuf { - char *s_buf; /* storage buffer */ - void *s_unused; /* binary compatibility. */ - int s_size; /* size of storage buffer */ - int s_len; /* current length of string */ -#define SBUF_FIXEDLEN 0x00000000 /* fixed length buffer (default) */ -#define SBUF_AUTOEXTEND 0x00000001 /* automatically extend buffer */ -#define SBUF_USRFLAGMSK 0x0000ffff /* mask of flags the user may specify */ -#define SBUF_DYNAMIC 0x00010000 /* s_buf must be freed */ -#define SBUF_FINISHED 0x00020000 /* set by sbuf_finish() */ -#define SBUF_OVERFLOWED 0x00040000 /* sbuf overflowed */ -#define SBUF_DYNSTRUCT 0x00080000 /* sbuf must be freed */ - int s_flags; /* flags */ + char *s_buf; /* storage buffer */ + void *s_unused; /* binary compatibility. */ + int s_size; /* size of storage buffer */ + int s_len; /* current length of string */ +#define SBUF_FIXEDLEN 0x00000000 /* fixed length buffer (default) */ +#define SBUF_AUTOEXTEND 0x00000001 /* automatically extend buffer */ +#define SBUF_USRFLAGMSK 0x0000ffff /* mask of flags the user may specify */ +#define SBUF_DYNAMIC 0x00010000 /* s_buf must be freed */ +#define SBUF_FINISHED 0x00020000 /* set by sbuf_finish() */ +#define SBUF_OVERFLOWED 0x00040000 /* sbuf overflowed */ +#define SBUF_DYNSTRUCT 0x00080000 /* sbuf must be freed */ + int s_flags; /* flags */ }; __BEGIN_DECLS /* * API functions */ -struct sbuf *sbuf_new(struct sbuf *, char *, int, int); -void sbuf_clear(struct sbuf *); -int sbuf_setpos(struct sbuf *, int); -int sbuf_bcat(struct sbuf *, const void *, size_t); -int sbuf_bcpy(struct sbuf *, const void *, size_t); -int sbuf_cat(struct sbuf *, const char *); -int sbuf_cpy(struct sbuf *, const char *); -int sbuf_printf(struct sbuf *, const char *, ...) __printflike(2, 3); -int sbuf_vprintf(struct sbuf *, const char *, va_list) __printflike(2, 0); -int sbuf_putc(struct sbuf *, int); -int sbuf_trim(struct sbuf *); -int sbuf_overflowed(struct sbuf *); -void sbuf_finish(struct sbuf *); -char *sbuf_data(struct sbuf *); -int sbuf_len(struct sbuf *); -int sbuf_done(struct sbuf *); -void sbuf_delete(struct sbuf *); +struct sbuf *sbuf_new(struct sbuf *, char *, int, int); +void sbuf_clear(struct sbuf *); +int sbuf_setpos(struct sbuf *, int); +int sbuf_bcat(struct sbuf *, const void *, size_t); +int sbuf_bcpy(struct sbuf *, const void *, size_t); +int sbuf_cat(struct sbuf *, const char *); +int sbuf_cpy(struct sbuf *, const char *); +int sbuf_printf(struct sbuf *, const char *, ...) __printflike(2, 3); +int sbuf_vprintf(struct sbuf *, const char *, va_list) __printflike(2, 0); +int sbuf_putc(struct sbuf *, int); +int sbuf_trim(struct sbuf *); +int sbuf_overflowed(struct sbuf *); +void sbuf_finish(struct sbuf *); +char *sbuf_data(struct sbuf *); +int sbuf_len(struct sbuf *); +int sbuf_done(struct sbuf *); +void sbuf_delete(struct sbuf *); #ifdef KERNEL struct uio; -struct sbuf *sbuf_uionew(struct sbuf *, struct uio *, int *); -int sbuf_bcopyin(struct sbuf *, const void *, size_t); -int sbuf_copyin(struct sbuf *, const void *, size_t); +struct sbuf *sbuf_uionew(struct sbuf *, struct uio *, int *); +int sbuf_bcopyin(struct sbuf *, const void *, size_t); +int sbuf_copyin(struct sbuf *, const void *, size_t); #endif __END_DECLS diff --git a/bsd/sys/sdt.h b/bsd/sys/sdt.h index 31acf140a..9799cb843 100644 --- a/bsd/sys/sdt.h +++ b/bsd/sys/sdt.h @@ -25,7 +25,7 @@ */ #ifndef _SYS_SDT_H -#define _SYS_SDT_H +#define _SYS_SDT_H /* * This is a wrapper header that wraps the mach visible sdt.h header so that @@ -44,4 +44,4 @@ __BEGIN_DECLS #include __END_DECLS -#endif /* _SYS_SDT_H */ +#endif /* _SYS_SDT_H */ diff --git a/bsd/sys/sdt_impl.h b/bsd/sys/sdt_impl.h index f48f83e50..f0d840c38 100644 --- a/bsd/sys/sdt_impl.h +++ b/bsd/sys/sdt_impl.h @@ -25,7 +25,7 @@ */ #ifndef _SDT_IMPL_H -#define _SDT_IMPL_H +#define _SDT_IMPL_H /* * This file has been created by splitting up the original DTrace sdt.h @@ -34,26 +34,26 @@ /* #pragma ident "@(#)sdt.h 1.7 05/06/08 SMI" */ -#ifdef __cplusplus +#ifdef __cplusplus extern "C" { #endif extern const char *sdt_prefix; typedef struct sdt_probedesc { - char *sdpd_name; /* name of this probe */ - char *sdpd_func; /* APPLE NOTE: function name */ - unsigned long sdpd_offset; /* offset of call in text */ - struct sdt_probedesc *sdpd_next; /* next static probe */ + char *sdpd_name; /* name of this probe */ + char *sdpd_func; /* APPLE NOTE: function name */ + unsigned long sdpd_offset; /* offset of call in text */ + struct sdt_probedesc *sdpd_next; /* next static probe */ } sdt_probedesc_t; -#ifdef __cplusplus +#ifdef __cplusplus } #endif /* #pragma ident "@(#)sdt_impl.h 1.3 05/06/08 SMI" */ -#ifdef __cplusplus +#ifdef __cplusplus extern "C" { #endif @@ -87,42 +87,42 @@ typedef uint32_t sdt_instr_t; #endif typedef struct sdt_provider { - const char *sdtp_name; /* name of provider */ - const char *sdtp_prefix; /* prefix for probe names */ - dtrace_pattr_t *sdtp_attr; /* stability attributes */ - dtrace_provider_id_t sdtp_id; /* provider ID */ + const char *sdtp_name; /* name of provider */ + const char *sdtp_prefix; /* prefix for probe names */ + dtrace_pattr_t *sdtp_attr; /* stability attributes */ + dtrace_provider_id_t sdtp_id; /* provider ID */ } sdt_provider_t; -extern sdt_provider_t sdt_providers[]; /* array of providers */ +extern sdt_provider_t sdt_providers[]; /* array of providers */ typedef struct sdt_probe { - sdt_provider_t *sdp_provider; /* provider */ - char *sdp_name; /* name of probe */ - int sdp_namelen; /* length of allocated name */ - dtrace_id_t sdp_id; /* probe ID */ - struct modctl *sdp_ctl; /* modctl for module */ - int sdp_loadcnt; /* load count for module */ - int sdp_primary; /* non-zero if primary mod */ - sdt_instr_t *sdp_patchpoint; /* patch point */ - sdt_instr_t sdp_patchval; /* instruction to patch */ - sdt_instr_t sdp_savedval; /* saved instruction value */ - struct sdt_probe *sdp_next; /* next probe */ - struct sdt_probe *sdp_hashnext; /* next on hash */ + sdt_provider_t *sdp_provider; /* provider */ + char *sdp_name; /* name of probe */ + int sdp_namelen; /* length of allocated name */ + dtrace_id_t sdp_id; /* probe ID */ + struct modctl *sdp_ctl; /* modctl for module */ + int sdp_loadcnt; /* load count for module */ + int sdp_primary; /* non-zero if primary mod */ + sdt_instr_t *sdp_patchpoint; /* patch point */ + sdt_instr_t sdp_patchval; /* instruction to patch */ + sdt_instr_t sdp_savedval; /* saved instruction value */ + struct sdt_probe *sdp_next; /* next probe */ + struct sdt_probe *sdp_hashnext; /* next on hash */ } sdt_probe_t; typedef struct sdt_argdesc { - const char *sda_provider; /* provider for arg */ - const char *sda_name; /* name of probe */ - const int sda_ndx; /* argument index */ - const int sda_mapping; /* mapping of argument */ - const char *sda_native; /* native type of argument */ - const char *sda_xlate; /* translated type of arg */ + const char *sda_provider; /* provider for arg */ + const char *sda_name; /* name of probe */ + const int sda_ndx; /* argument index */ + const int sda_mapping; /* mapping of argument */ + const char *sda_native; /* native type of argument */ + const char *sda_xlate; /* translated type of arg */ } sdt_argdesc_t; extern void sdt_getargdesc(void *, dtrace_id_t, void *, dtrace_argdesc_t *); -#ifdef __cplusplus +#ifdef __cplusplus } #endif -#endif /* _SDT_IMPL_H */ +#endif /* _SDT_IMPL_H */ diff --git a/bsd/sys/select.h b/bsd/sys/select.h index 1fa3f7605..1ffa4c0b0 100644 --- a/bsd/sys/select.h +++ b/bsd/sys/select.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -61,7 +61,7 @@ */ #ifndef _SYS_SELECT_H_ -#define _SYS_SELECT_H_ +#define _SYS_SELECT_H_ #include #include @@ -105,7 +105,7 @@ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #include -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifdef KERNEL #include @@ -118,15 +118,15 @@ */ #ifdef KERNEL_PRIVATE struct selinfo { - struct waitq si_waitq; /* waitq for wait/wakeup */ - struct klist si_note; /* JMM - temporary separation */ - u_int si_flags; /* see below */ + struct waitq si_waitq; /* waitq for wait/wakeup */ + struct klist si_note; /* JMM - temporary separation */ + u_int si_flags; /* see below */ }; -#define SI_COLL 0x0001 /* collision occurred */ -#define SI_RECORDED 0x0004 /* select has been recorded */ -#define SI_INITED 0x0008 /* selinfo has been inited */ -#define SI_CLEAR 0x0010 /* selinfo has been cleared */ +#define SI_COLL 0x0001 /* collision occurred */ +#define SI_RECORDED 0x0004 /* select has been recorded */ +#define SI_INITED 0x0008 /* selinfo has been inited */ +#define SI_CLEAR 0x0010 /* selinfo has been cleared */ #else struct selinfo; @@ -135,9 +135,9 @@ struct selinfo; __BEGIN_DECLS extern int selwait; -void selrecord(proc_t selector, struct selinfo *, void *); -void selwakeup(struct selinfo *); -void selthreadclear(struct selinfo *); +void selrecord(proc_t selector, struct selinfo *, void *); +void selwakeup(struct selinfo *); +void selthreadclear(struct selinfo *); __END_DECLS @@ -146,22 +146,22 @@ __END_DECLS __BEGIN_DECLS #ifndef __MWERKS__ -int pselect(int, fd_set * __restrict, fd_set * __restrict, - fd_set * __restrict, const struct timespec * __restrict, - const sigset_t * __restrict) +int pselect(int, fd_set * __restrict, fd_set * __restrict, + fd_set * __restrict, const struct timespec * __restrict, + const sigset_t * __restrict) #if defined(_DARWIN_C_SOURCE) || defined(_DARWIN_UNLIMITED_SELECT) - __DARWIN_EXTSN_C(pselect) +__DARWIN_EXTSN_C(pselect) #else /* !_DARWIN_C_SOURCE && !_DARWIN_UNLIMITED_SELECT */ # if defined(__LP64__) && !__DARWIN_NON_CANCELABLE - __DARWIN_1050(pselect) +__DARWIN_1050(pselect) # else /* !__LP64__ || __DARWIN_NON_CANCELABLE */ - __DARWIN_ALIAS_C(pselect) +__DARWIN_ALIAS_C(pselect) # endif /* __LP64__ && !__DARWIN_NON_CANCELABLE */ #endif /* _DARWIN_C_SOURCE || _DARWIN_UNLIMITED_SELECT */ - ; +; #endif /* __MWERKS__ */ -#include /* select() prototype */ +#include /* select() prototype */ __END_DECLS diff --git a/bsd/sys/sem.h b/bsd/sys/sem.h index 4c19918c5..44fafe891 100644 --- a/bsd/sys/sem.h +++ b/bsd/sys/sem.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: sem.h,v 1.5 1994/06/29 06:45:15 cgd Exp $ */ @@ -70,7 +70,7 @@ #pragma pack(4) /* * Structure used internally. - * + * * This structure is exposed because standards dictate that it is used as * the semun union member 'buf' as the fourth argment to semctl() when the * third argument is IPC_STAT or IPC_SET. @@ -81,59 +81,59 @@ #if (defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE)) struct semid_ds #else -#define semid_ds __semid_ds_new +#define semid_ds __semid_ds_new struct __semid_ds_new #endif { - struct __ipc_perm_new sem_perm; /* [XSI] operation permission struct */ - __int32_t sem_base; /* 32 bit base ptr for semaphore set */ - unsigned short sem_nsems; /* [XSI] number of sems in set */ - time_t sem_otime; /* [XSI] last operation time */ - __int32_t sem_pad1; /* RESERVED: DO NOT USE! */ - time_t sem_ctime; /* [XSI] last change time */ - /* Times measured in secs since */ - /* 00:00:00 GMT, Jan. 1, 1970 */ - __int32_t sem_pad2; /* RESERVED: DO NOT USE! */ - __int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ + struct __ipc_perm_new sem_perm; /* [XSI] operation permission struct */ + __int32_t sem_base; /* 32 bit base ptr for semaphore set */ + unsigned short sem_nsems; /* [XSI] number of sems in set */ + time_t sem_otime; /* [XSI] last operation time */ + __int32_t sem_pad1; /* RESERVED: DO NOT USE! */ + time_t sem_ctime; /* [XSI] last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + __int32_t sem_pad2; /* RESERVED: DO NOT USE! */ + __int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ }; #pragma pack() -#else /* !__DARWIN_UNIX03 */ -#define semid_ds __semid_ds_old -#endif /* __DARWIN_UNIX03 */ +#else /* !__DARWIN_UNIX03 */ +#define semid_ds __semid_ds_old +#endif /* __DARWIN_UNIX03 */ #if !__DARWIN_UNIX03 struct __semid_ds_old { - struct __ipc_perm_old sem_perm; /* [XSI] operation permission struct */ - __int32_t sem_base; /* 32 bit base ptr for semaphore set */ - unsigned short sem_nsems; /* [XSI] number of sems in set */ - time_t sem_otime; /* [XSI] last operation time */ - __int32_t sem_pad1; /* RESERVED: DO NOT USE! */ - time_t sem_ctime; /* [XSI] last change time */ - /* Times measured in secs since */ - /* 00:00:00 GMT, Jan. 1, 1970 */ - __int32_t sem_pad2; /* RESERVED: DO NOT USE! */ - __int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ + struct __ipc_perm_old sem_perm; /* [XSI] operation permission struct */ + __int32_t sem_base; /* 32 bit base ptr for semaphore set */ + unsigned short sem_nsems; /* [XSI] number of sems in set */ + time_t sem_otime; /* [XSI] last operation time */ + __int32_t sem_pad1; /* RESERVED: DO NOT USE! */ + time_t sem_ctime; /* [XSI] last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + __int32_t sem_pad2; /* RESERVED: DO NOT USE! */ + __int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ }; -#endif /* !__DARWIN_UNIX03 */ +#endif /* !__DARWIN_UNIX03 */ /* * Possible values for the third argument to semctl() */ -#define GETNCNT 3 /* [XSI] Return the value of semncnt {READ} */ -#define GETPID 4 /* [XSI] Return the value of sempid {READ} */ -#define GETVAL 5 /* [XSI] Return the value of semval {READ} */ -#define GETALL 6 /* [XSI] Return semvals into arg.array {READ} */ -#define GETZCNT 7 /* [XSI] Return the value of semzcnt {READ} */ -#define SETVAL 8 /* [XSI] Set the value of semval to arg.val {ALTER} */ -#define SETALL 9 /* [XSI] Set semvals from arg.array {ALTER} */ +#define GETNCNT 3 /* [XSI] Return the value of semncnt {READ} */ +#define GETPID 4 /* [XSI] Return the value of sempid {READ} */ +#define GETVAL 5 /* [XSI] Return the value of semval {READ} */ +#define GETALL 6 /* [XSI] Return semvals into arg.array {READ} */ +#define GETZCNT 7 /* [XSI] Return the value of semzcnt {READ} */ +#define SETVAL 8 /* [XSI] Set the value of semval to arg.val {ALTER} */ +#define SETALL 9 /* [XSI] Set semvals from arg.array {ALTER} */ /* A semaphore; this is an anonymous structure, not for external use */ struct sem { - unsigned short semval; /* semaphore value */ - pid_t sempid; /* pid of last operation */ - unsigned short semncnt; /* # awaiting semval > cval */ - unsigned short semzcnt; /* # awaiting semval == 0 */ + unsigned short semval; /* semaphore value */ + pid_t sempid; /* pid of last operation */ + unsigned short semncnt; /* # awaiting semval > cval */ + unsigned short semzcnt; /* # awaiting semval == 0 */ }; @@ -141,15 +141,15 @@ struct sem { * Structure of array element for second argument to semop() */ struct sembuf { - unsigned short sem_num; /* [XSI] semaphore # */ - short sem_op; /* [XSI] semaphore operation */ - short sem_flg; /* [XSI] operation flags */ + unsigned short sem_num; /* [XSI] semaphore # */ + short sem_op; /* [XSI] semaphore operation */ + short sem_flg; /* [XSI] operation flags */ }; /* * Possible flag values for sem_flg */ -#define SEM_UNDO 010000 /* [XSI] Set up adjust on exit entry */ +#define SEM_UNDO 010000 /* [XSI] Set up adjust on exit entry */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -157,7 +157,7 @@ struct sembuf { /* * Union used as the fourth argment to semctl() in all cases. Specific * member values are used for different values of the third parameter: - * + * * Command Member * ------------------------------------------- ------ * GETALL, SETALL array @@ -166,7 +166,7 @@ struct sembuf { * * The union definition is intended to be defined by the user application * in conforming applications; it is provided here for two reasons: - * + * * 1) Historical source compatability for non-conforming applications * expecting this header to declare the union type on their behalf * @@ -175,9 +175,9 @@ struct sembuf { * not function correctly */ union semun { - int val; /* value for SETVAL */ - struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */ - unsigned short *array; /* array for GETALL & SETALL */ + int val; /* value for SETVAL */ + struct semid_ds *buf; /* buffer for IPC_STAT & IPC_SET */ + unsigned short *array; /* array for GETALL & SETALL */ }; typedef union semun semun_t; @@ -185,10 +185,10 @@ typedef union semun semun_t; /* * Permissions */ -#define SEM_A 0200 /* alter permission */ -#define SEM_R 0400 /* read permission */ +#define SEM_A 0200 /* alter permission */ +#define SEM_R 0400 /* read permission */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -196,11 +196,11 @@ typedef union semun semun_t; __BEGIN_DECLS #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -int semsys(int, ...); +int semsys(int, ...); #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -int semctl(int, int, int, ...) __DARWIN_ALIAS(semctl); -int semget(key_t, int, int); -int semop(int, struct sembuf *, size_t); +int semctl(int, int, int, ...) __DARWIN_ALIAS(semctl); +int semget(key_t, int, int); +int semop(int, struct sembuf *, size_t); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/sys/sem_internal.h b/bsd/sys/sem_internal.h index bf697a1ea..14ef51653 100644 --- a/bsd/sys/sem_internal.h +++ b/bsd/sys/sem_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -58,49 +58,49 @@ #endif struct user_semid_ds { - struct ipc_perm sem_perm; /* [XSI] operation permission struct */ - struct sem *sem_base; /* 32 bit base ptr for semaphore set */ - unsigned short sem_nsems; /* [XSI] number of sems in set */ - user_time_t sem_otime; /* [XSI] last operation time */ - __int32_t sem_pad1; /* RESERVED: DO NOT USE! */ - user_time_t sem_ctime; /* [XSI] last change time */ - /* Times measured in secs since */ - /* 00:00:00 GMT, Jan. 1, 1970 */ - __int32_t sem_pad2; /* RESERVED: DO NOT USE! */ - __int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ + struct ipc_perm sem_perm; /* [XSI] operation permission struct */ + struct sem *sem_base; /* 32 bit base ptr for semaphore set */ + unsigned short sem_nsems; /* [XSI] number of sems in set */ + user_time_t sem_otime; /* [XSI] last operation time */ + __int32_t sem_pad1; /* RESERVED: DO NOT USE! */ + user_time_t sem_ctime; /* [XSI] last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + __int32_t sem_pad2; /* RESERVED: DO NOT USE! */ + __int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ }; #pragma pack(4) struct user64_semid_ds { - struct ipc_perm sem_perm; /* [XSI] operation permission struct */ - int32_t sem_base; /* 32 bit base ptr for semaphore set */ - unsigned short sem_nsems; /* [XSI] number of sems in set */ - user64_time_t sem_otime; /* [XSI] last operation time */ - int32_t sem_pad1; /* RESERVED: DO NOT USE! */ - user64_time_t sem_ctime; /* [XSI] last change time */ - /* Times measured in secs since */ - /* 00:00:00 GMT, Jan. 1, 1970 */ - int32_t sem_pad2; /* RESERVED: DO NOT USE! */ - int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ + struct ipc_perm sem_perm; /* [XSI] operation permission struct */ + int32_t sem_base; /* 32 bit base ptr for semaphore set */ + unsigned short sem_nsems; /* [XSI] number of sems in set */ + user64_time_t sem_otime; /* [XSI] last operation time */ + int32_t sem_pad1; /* RESERVED: DO NOT USE! */ + user64_time_t sem_ctime; /* [XSI] last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + int32_t sem_pad2; /* RESERVED: DO NOT USE! */ + int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ }; struct user32_semid_ds { - struct ipc_perm sem_perm; /* [XSI] operation permission struct */ - int32_t sem_base; /* 32 bit base ptr for semaphore set */ - unsigned short sem_nsems; /* [XSI] number of sems in set */ - user32_time_t sem_otime; /* [XSI] last operation time */ - int32_t sem_pad1; /* RESERVED: DO NOT USE! */ - user32_time_t sem_ctime; /* [XSI] last change time */ - /* Times measured in secs since */ - /* 00:00:00 GMT, Jan. 1, 1970 */ - int32_t sem_pad2; /* RESERVED: DO NOT USE! */ - int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ + struct ipc_perm sem_perm; /* [XSI] operation permission struct */ + int32_t sem_base; /* 32 bit base ptr for semaphore set */ + unsigned short sem_nsems; /* [XSI] number of sems in set */ + user32_time_t sem_otime; /* [XSI] last operation time */ + int32_t sem_pad1; /* RESERVED: DO NOT USE! */ + user32_time_t sem_ctime; /* [XSI] last change time */ + /* Times measured in secs since */ + /* 00:00:00 GMT, Jan. 1, 1970 */ + int32_t sem_pad2; /* RESERVED: DO NOT USE! */ + int32_t sem_pad3[4]; /* RESERVED: DO NOT USE! */ }; #pragma pack() union user_semun { - user_addr_t buf; /* buffer for IPC_STAT & IPC_SET */ - user_addr_t array; /* array for GETALL & SETALL */ + user_addr_t buf; /* buffer for IPC_STAT & IPC_SET */ + user_addr_t array; /* array for GETALL & SETALL */ }; typedef union user_semun user_semun_t; @@ -112,8 +112,8 @@ typedef union user_semun user_semun_t; /* * Kernel implementation stuff */ -#define SEMVMX 32767 /* semaphore maximum value */ -#define SEMAEM 16384 /* adjust on exit max value */ +#define SEMVMX 32767 /* semaphore maximum value */ +#define SEMAEM 16384 /* adjust on exit max value */ /* * Configuration parameters. SEMMNI, SEMMNS, and SEMMNU are hard limits. @@ -125,28 +125,28 @@ typedef union user_semun user_semun_t; /* * Configuration parameters */ -#ifndef SEMMNS /* # of semaphores in system */ -#define SEMMNS (1048576/sizeof(struct sem)) -#endif /* no more than 1M of semaphore data */ -#ifndef SEMMNI /* # of semaphore identifiers */ -#define SEMMNI SEMMNS /* max of 1 for each semaphore */ +#ifndef SEMMNS /* # of semaphores in system */ +#define SEMMNS (1048576/sizeof(struct sem)) +#endif /* no more than 1M of semaphore data */ +#ifndef SEMMNI /* # of semaphore identifiers */ +#define SEMMNI SEMMNS /* max of 1 for each semaphore */ #endif #ifndef SEMUME -#define SEMUME 10 /* max # of undo entries per process */ +#define SEMUME 10 /* max # of undo entries per process */ #endif -#ifndef SEMMNU /* # of undo structures in system */ -#define SEMMNU SEMMNS /* 1 for each semaphore. This is quite large */ -#endif /* This should be max 1 for each process */ +#ifndef SEMMNU /* # of undo structures in system */ +#define SEMMNU SEMMNS /* 1 for each semaphore. This is quite large */ +#endif /* This should be max 1 for each process */ /* shouldn't need tuning */ #ifndef SEMMAP -#define SEMMAP 30 /* # of entries in semaphore map */ +#define SEMMAP 30 /* # of entries in semaphore map */ #endif #ifndef SEMMSL -#define SEMMSL SEMMNS /* max # of semaphores per id */ +#define SEMMSL SEMMNS /* max # of semaphores per id */ #endif #ifndef SEMOPM -#define SEMOPM 5 /* max # of operations per semop call */ +#define SEMOPM 5 /* max # of operations per semop call */ #endif @@ -154,50 +154,50 @@ typedef union user_semun user_semun_t; * Undo structure (internal: one per process) */ struct sem_undo { - int un_next_idx; /* index of next active undo structure */ - struct proc *un_proc; /* owner of this structure */ - short un_cnt; /* # of active entries */ + int un_next_idx; /* index of next active undo structure */ + struct proc *un_proc; /* owner of this structure */ + short un_cnt; /* # of active entries */ struct undo { - short une_adjval; /* adjust on exit values */ - short une_num; /* semaphore # */ - int une_id; /* semid */ - struct undo *une_next; /* next undo entry */ - } *un_ent; /* undo entries */ + short une_adjval; /* adjust on exit values */ + short une_num; /* semaphore # */ + int une_id; /* semid */ + struct undo *une_next; /* next undo entry */ + } *un_ent; /* undo entries */ }; /* * semaphore info struct (internal; for administrative limits and ipcs) */ struct seminfo { - int semmap, /* # of entries in semaphore map */ - semmni, /* # of semaphore identifiers */ - semmns, /* # of semaphores in system */ - semmnu, /* # of undo structures in system */ - semmsl, /* max # of semaphores per id */ - semopm, /* max # of operations per semop call */ - semume, /* max # of undo entries per process */ - semusz, /* size in bytes of undo structure */ - semvmx, /* semaphore maximum value */ - semaem; /* adjust on exit max value */ + int semmap, /* # of entries in semaphore map */ + semmni, /* # of semaphore identifiers */ + semmns, /* # of semaphores in system */ + semmnu, /* # of undo structures in system */ + semmsl, /* max # of semaphores per id */ + semopm, /* max # of operations per semop call */ + semume, /* max # of undo entries per process */ + semusz, /* size in bytes of undo structure */ + semvmx, /* semaphore maximum value */ + semaem; /* adjust on exit max value */ }; -extern struct seminfo seminfo; +extern struct seminfo seminfo; /* * Kernel wrapper for the user-level structure */ struct semid_kernel { - struct user_semid_ds u; - struct label *label; /* MAC framework label */ + struct user_semid_ds u; + struct label *label; /* MAC framework label */ }; /* internal "mode" bits */ -#define SEM_ALLOC 01000 /* semaphore is allocated */ -#define SEM_DEST 02000 /* semaphore will be destroyed on last detach */ +#define SEM_ALLOC 01000 /* semaphore is allocated */ +#define SEM_DEST 02000 /* semaphore will be destroyed on last detach */ -#define SEMMNI_INC 8 /* increment value for semaphore identifiers */ -#define SEMMNS_INC 64 /* increment value for semaphores */ -#define SEMMNU_INC 32 /* increment value for undo structures */ +#define SEMMNI_INC 8 /* increment value for semaphore identifiers */ +#define SEMMNS_INC 64 /* increment value for semaphores */ +#define SEMMNU_INC 32 /* increment value for undo structures */ /* * Due to the way semaphore memory is allocated, we have to ensure that @@ -215,14 +215,14 @@ struct semid_kernel { /* * #define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME])) */ -#define SEMUSZ sizeof(struct sem_undo) +#define SEMUSZ sizeof(struct sem_undo) -extern struct semid_kernel *sema; /* semaphore id pool */ -extern struct sem *sem_pool; /* semaphore pool */ -/* This is now a struct sem_undo with the new memory allocation +extern struct semid_kernel *sema; /* semaphore id pool */ +extern struct sem *sem_pool; /* semaphore pool */ +/* This is now a struct sem_undo with the new memory allocation * extern int *semu; // undo structure pool */ -extern struct sem_undo *semu; /* undo structure pool */ +extern struct sem_undo *semu; /* undo structure pool */ /* * Macro to find a particular sem_undo vector @@ -235,7 +235,7 @@ extern struct sem_undo *semu; /* undo structure pool */ /* * This macro doesn't work because we are using a staticly allocated array * for semu now. - * #define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * SEMUSZ)) + * #define SEMU(ix) ((struct sem_undo *)(((intptr_t)semu)+ix * SEMUSZ)) */ #define SEMU(ix) (&semu[ix]) @@ -243,6 +243,6 @@ extern struct sem_undo *semu; /* undo structure pool */ /* * Process sem_undo vectors at proc exit. */ -void semexit(struct proc *p); +void semexit(struct proc *p); #endif /* !_SYS_SEM__INTERNALH_ */ diff --git a/bsd/sys/semaphore.h b/bsd/sys/semaphore.h index 96e2535fe..b55852012 100644 --- a/bsd/sys/semaphore.h +++ b/bsd/sys/semaphore.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @(#)semaphore.h 1.0 2/29/00 */ -/* +/* * semaphore.h - POSIX semaphores * * HISTORY @@ -37,7 +37,7 @@ * Created for Mac OS X */ -#ifndef _SYS_SEMAPHORE_H_ +#ifndef _SYS_SEMAPHORE_H_ #define _SYS_SEMAPHORE_H_ typedef int sem_t; @@ -61,9 +61,9 @@ int sem_unlink(const char *); int sem_wait(sem_t *) __DARWIN_ALIAS_C(sem_wait); __END_DECLS -#else /* KERNEL */ +#else /* KERNEL */ void psem_lock_init(void); void psem_cache_init(void); -#endif /* KERNEL */ +#endif /* KERNEL */ -#endif /* _SYS_SEMAPHORE_H_ */ +#endif /* _SYS_SEMAPHORE_H_ */ diff --git a/bsd/sys/sfi.h b/bsd/sys/sfi.h index bd3d22aea..f9799ce0c 100644 --- a/bsd/sys/sfi.h +++ b/bsd/sys/sfi.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,10 +38,10 @@ */ /* Flags for use with sfi_process_set_flags() */ -#define SFI_PROCESS_SET_MANAGED 0x00000001 -#define SFI_PROCESS_SET_UNMANAGED 0x00000002 +#define SFI_PROCESS_SET_MANAGED 0x00000001 +#define SFI_PROCESS_SET_UNMANAGED 0x00000002 -#define SFI_PROCESS_SET_MANAGED_MASK 0x00000003 +#define SFI_PROCESS_SET_MANAGED_MASK 0x00000003 #ifndef KERNEL /* @@ -72,7 +72,7 @@ int system_get_sfi_window(uint64_t *sfi_window_usec); * used is implementation dependent and may be * longer. sfi_get_class_offtime() can be used to determine the actual * value. - * + * * A value of 0 for offtime_usec can be used to disable "Selective * Forced Idle" for all the threads placed in class_id. * @@ -98,17 +98,17 @@ int sfi_process_get_flags(pid_t pid, uint32_t *flags); #if PRIVATE /* This is the private system call interface between Libsyscall and xnu */ -#define SFI_CTL_OPERATION_SFI_SET_WINDOW 0x00000001 -#define SFI_CTL_OPERATION_SFI_GET_WINDOW 0x00000002 -#define SFI_CTL_OPERATION_SET_CLASS_OFFTIME 0x00000003 -#define SFI_CTL_OPERATION_GET_CLASS_OFFTIME 0x00000004 +#define SFI_CTL_OPERATION_SFI_SET_WINDOW 0x00000001 +#define SFI_CTL_OPERATION_SFI_GET_WINDOW 0x00000002 +#define SFI_CTL_OPERATION_SET_CLASS_OFFTIME 0x00000003 +#define SFI_CTL_OPERATION_GET_CLASS_OFFTIME 0x00000004 -#define SFI_PIDCTL_OPERATION_PID_SET_FLAGS 0x00000001 -#define SFI_PIDCTL_OPERATION_PID_GET_FLAGS 0x00000002 +#define SFI_PIDCTL_OPERATION_PID_SET_FLAGS 0x00000001 +#define SFI_PIDCTL_OPERATION_PID_GET_FLAGS 0x00000002 int __sfi_ctl(uint32_t operation, uint32_t sfi_class, uint64_t time, uint64_t *out_time); int __sfi_pidctl(uint32_t operation, pid_t pid, uint32_t sfi_flags, uint32_t *out_sfi_flags); #endif /* PRIVATE */ -#endif /* _SYS_SFI_H_ */ +#endif /* _SYS_SFI_H_ */ diff --git a/bsd/sys/shm.h b/bsd/sys/shm.h index 150261adc..226a5fea3 100644 --- a/bsd/sys/shm.h +++ b/bsd/sys/shm.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: shm.h,v 1.15 1994/06/29 06:45:17 cgd Exp $ */ @@ -90,15 +90,15 @@ * that MUST be able to store values at least as large as a type unsigned * short. */ -typedef unsigned short shmatt_t; +typedef unsigned short shmatt_t; /* * Possible flag values which may be OR'ed into the third argument to * shmat() */ -#define SHM_RDONLY 010000 /* [XSI] Attach read-only (else read-write) */ -#define SHM_RND 020000 /* [XSI] Round attach address to SHMLBA */ +#define SHM_RDONLY 010000 /* [XSI] Attach read-only (else read-write) */ +#define SHM_RND 020000 /* [XSI] Round attach address to SHMLBA */ /* * This value is symbolic, and generally not expected to be sed by user @@ -109,10 +109,10 @@ typedef unsigned short shmatt_t; * headers at this time, to avoid the resulting namespace * pollution, which is why we discourages its use. */ -#define SHMLBA 4096 /* [XSI] Segment low boundary address multiple*/ +#define SHMLBA 4096 /* [XSI] Segment low boundary address multiple*/ /* "official" access mode definitions; somewhat braindead since you have - to specify (SHM_* >> 3) for group and (SHM_* >> 6) for world permissions */ + * to specify (SHM_* >> 3) for group and (SHM_* >> 6) for world permissions */ #define SHM_R (IPC_R) #define SHM_W (IPC_W) @@ -127,7 +127,7 @@ typedef unsigned short shmatt_t; #if __DARWIN_UNIX03 || defined(KERNEL) /* * Structure used internally. - * + * * This structure is exposed because standards dictate that it is used as * the third argment to shmctl(). * @@ -137,37 +137,37 @@ typedef unsigned short shmatt_t; #if (defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE)) struct shmid_ds #else -#define shmid_ds __shmid_ds_new +#define shmid_ds __shmid_ds_new struct __shmid_ds_new #endif { - struct __ipc_perm_new shm_perm; /* [XSI] Operation permission value */ - size_t shm_segsz; /* [XSI] Size of segment in bytes */ - pid_t shm_lpid; /* [XSI] PID of last shared memory op */ - pid_t shm_cpid; /* [XSI] PID of creator */ - shmatt_t shm_nattch; /* [XSI] Number of current attaches */ - time_t shm_atime; /* [XSI] Time of last shmat() */ - time_t shm_dtime; /* [XSI] Time of last shmdt() */ - time_t shm_ctime; /* [XSI] Time of last shmctl() change */ - void *shm_internal; /* reserved for kernel use */ + struct __ipc_perm_new shm_perm; /* [XSI] Operation permission value */ + size_t shm_segsz; /* [XSI] Size of segment in bytes */ + pid_t shm_lpid; /* [XSI] PID of last shared memory op */ + pid_t shm_cpid; /* [XSI] PID of creator */ + shmatt_t shm_nattch; /* [XSI] Number of current attaches */ + time_t shm_atime; /* [XSI] Time of last shmat() */ + time_t shm_dtime; /* [XSI] Time of last shmdt() */ + time_t shm_ctime; /* [XSI] Time of last shmctl() change */ + void *shm_internal; /* reserved for kernel use */ }; -#else /* !__DARWIN_UNIX03 */ -#define shmid_ds __shmid_ds_old -#endif /* !__DARWIN_UNIX03 */ +#else /* !__DARWIN_UNIX03 */ +#define shmid_ds __shmid_ds_old +#endif /* !__DARWIN_UNIX03 */ #if !__DARWIN_UNIX03 struct __shmid_ds_old { - struct __ipc_perm_old shm_perm; /* [XSI] Operation permission value */ - size_t shm_segsz; /* [XSI] Size of segment in bytes */ - pid_t shm_lpid; /* [XSI] PID of last shared memory op */ - pid_t shm_cpid; /* [XSI] PID of creator */ - shmatt_t shm_nattch; /* [XSI] Number of current attaches */ - time_t shm_atime; /* [XSI] Time of last shmat() */ - time_t shm_dtime; /* [XSI] Time of last shmdt() */ - time_t shm_ctime; /* [XSI] Time of last shmctl() change */ - void *shm_internal; /* reserved for kernel use */ + struct __ipc_perm_old shm_perm; /* [XSI] Operation permission value */ + size_t shm_segsz; /* [XSI] Size of segment in bytes */ + pid_t shm_lpid; /* [XSI] PID of last shared memory op */ + pid_t shm_cpid; /* [XSI] PID of creator */ + shmatt_t shm_nattch; /* [XSI] Number of current attaches */ + time_t shm_atime; /* [XSI] Time of last shmat() */ + time_t shm_dtime; /* [XSI] Time of last shmdt() */ + time_t shm_ctime; /* [XSI] Time of last shmctl() change */ + void *shm_internal; /* reserved for kernel use */ }; -#endif /* !__DARWIN_UNIX03 */ +#endif /* !__DARWIN_UNIX03 */ #pragma pack() @@ -175,12 +175,12 @@ struct __shmid_ds_old { __BEGIN_DECLS #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -int shmsys(int, ...); +int shmsys(int, ...); #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -void *shmat (int, const void *, int); -int shmctl(int, int, struct shmid_ds *) __DARWIN_ALIAS(shmctl); -int shmdt(const void *); -int shmget(key_t, size_t, int); +void *shmat(int, const void *, int); +int shmctl(int, int, struct shmid_ds *) __DARWIN_ALIAS(shmctl); +int shmdt(const void *); +int shmget(key_t, size_t, int); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/sys/shm_internal.h b/bsd/sys/shm_internal.h index 86a785e97..5a2c941c4 100644 --- a/bsd/sys/shm_internal.h +++ b/bsd/sys/shm_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: shm.h,v 1.15 1994/06/29 06:45:17 cgd Exp $ */ @@ -79,27 +79,27 @@ #pragma pack(4) struct user_shmid_ds { - struct ipc_perm shm_perm; /* operation permission structure */ - user_size_t shm_segsz; /* size of segment in bytes */ - pid_t shm_lpid; /* PID of last shared memory op */ - pid_t shm_cpid; /* PID of creator */ - short shm_nattch; /* number of current attaches */ - user_time_t shm_atime; /* time of last shmat() */ - user_time_t shm_dtime; /* time of last shmdt() */ - user_time_t shm_ctime; /* time of last change by shmctl() */ - user_addr_t shm_internal; /* reserved for kernel use */ + struct ipc_perm shm_perm; /* operation permission structure */ + user_size_t shm_segsz; /* size of segment in bytes */ + pid_t shm_lpid; /* PID of last shared memory op */ + pid_t shm_cpid; /* PID of creator */ + short shm_nattch; /* number of current attaches */ + user_time_t shm_atime; /* time of last shmat() */ + user_time_t shm_dtime; /* time of last shmdt() */ + user_time_t shm_ctime; /* time of last change by shmctl() */ + user_addr_t shm_internal; /* reserved for kernel use */ }; struct user32_shmid_ds { - struct ipc_perm shm_perm; /* operation permission structure */ - uint32_t shm_segsz; /* size of segment in bytes */ - pid_t shm_lpid; /* PID of last shared memory op */ - pid_t shm_cpid; /* PID of creator */ - short shm_nattch; /* number of current attaches */ - uint32_t shm_atime; /* time of last shmat() */ - uint32_t shm_dtime; /* time of last shmdt() */ - uint32_t shm_ctime; /* time of last change by shmctl() */ - user32_addr_t shm_internal; /* reserved for kernel use */ + struct ipc_perm shm_perm; /* operation permission structure */ + uint32_t shm_segsz; /* size of segment in bytes */ + pid_t shm_lpid; /* PID of last shared memory op */ + pid_t shm_cpid; /* PID of creator */ + short shm_nattch; /* number of current attaches */ + uint32_t shm_atime; /* time of last shmat() */ + uint32_t shm_dtime; /* time of last shmdt() */ + uint32_t shm_ctime; /* time of last change by shmctl() */ + user32_addr_t shm_internal; /* reserved for kernel use */ }; #pragma pack() @@ -113,24 +113,24 @@ struct user32_shmid_ds { * so let's use int64_t explicitely... */ struct shminfo { - int64_t shmmax; /* max shm segment size (bytes) */ - int64_t shmmin; /* min shm segment size (bytes) */ - int64_t shmmni; /* max number of shm identifiers */ - int64_t shmseg; /* max shm segments per process */ - int64_t shmall; /* max amount of shm (pages) */ + int64_t shmmax; /* max shm segment size (bytes) */ + int64_t shmmin; /* min shm segment size (bytes) */ + int64_t shmmni; /* max number of shm identifiers */ + int64_t shmseg; /* max shm segments per process */ + int64_t shmall; /* max amount of shm (pages) */ }; #ifdef KERNEL struct label; -/* +/* * Add a kernel wrapper to the shmid_ds struct so that private info (like the * MAC label) can be added to it, without changing the user interface. */ struct shmid_kernel { struct user_shmid_ds u; - struct label *label; /* MAC label */ + struct label *label; /* MAC label */ }; extern struct shminfo shminfo; @@ -140,9 +140,9 @@ struct proc; __BEGIN_DECLS -void shmexit(struct proc *); -int shmfork(struct proc *, struct proc *); -__private_extern__ void shmexec(struct proc *); +void shmexit(struct proc *); +int shmfork(struct proc *, struct proc *); +__private_extern__ void shmexec(struct proc *); __END_DECLS diff --git a/bsd/sys/signal.h b/bsd/sys/signal.h index e7218b566..594d6a670 100644 --- a/bsd/sys/signal.h +++ b/bsd/sys/signal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,61 +66,61 @@ * @(#)signal.h 8.2 (Berkeley) 1/21/94 */ -#ifndef _SYS_SIGNAL_H_ -#define _SYS_SIGNAL_H_ +#ifndef _SYS_SIGNAL_H_ +#define _SYS_SIGNAL_H_ #include #include #include -#define __DARWIN_NSIG 32 /* counting 0; could be 33 (mask is 1-32) */ +#define __DARWIN_NSIG 32 /* counting 0; could be 33 (mask is 1-32) */ #if !defined(_ANSI_SOURCE) && (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define NSIG __DARWIN_NSIG +#define NSIG __DARWIN_NSIG #endif -#include /* sigcontext; codes for SIGILL, SIGFPE */ +#include /* sigcontext; codes for SIGILL, SIGFPE */ -#define SIGHUP 1 /* hangup */ -#define SIGINT 2 /* interrupt */ -#define SIGQUIT 3 /* quit */ -#define SIGILL 4 /* illegal instruction (not reset when caught) */ -#define SIGTRAP 5 /* trace trap (not reset when caught) */ -#define SIGABRT 6 /* abort() */ +#define SIGHUP 1 /* hangup */ +#define SIGINT 2 /* interrupt */ +#define SIGQUIT 3 /* quit */ +#define SIGILL 4 /* illegal instruction (not reset when caught) */ +#define SIGTRAP 5 /* trace trap (not reset when caught) */ +#define SIGABRT 6 /* abort() */ #if (defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE)) -#define SIGPOLL 7 /* pollable event ([XSR] generated, not supported) */ -#else /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define SIGIOT SIGABRT /* compatibility */ -#define SIGEMT 7 /* EMT instruction */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define SIGFPE 8 /* floating point exception */ -#define SIGKILL 9 /* kill (cannot be caught or ignored) */ -#define SIGBUS 10 /* bus error */ -#define SIGSEGV 11 /* segmentation violation */ -#define SIGSYS 12 /* bad argument to system call */ -#define SIGPIPE 13 /* write on a pipe with no one to read it */ -#define SIGALRM 14 /* alarm clock */ -#define SIGTERM 15 /* software termination signal from kill */ -#define SIGURG 16 /* urgent condition on IO channel */ -#define SIGSTOP 17 /* sendable stop signal not from tty */ -#define SIGTSTP 18 /* stop signal from tty */ -#define SIGCONT 19 /* continue a stopped process */ -#define SIGCHLD 20 /* to parent on child stop or exit */ -#define SIGTTIN 21 /* to readers pgrp upon background tty read */ -#define SIGTTOU 22 /* like TTIN for output if (tp->t_local<OSTOP) */ +#define SIGPOLL 7 /* pollable event ([XSR] generated, not supported) */ +#else /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define SIGIOT SIGABRT /* compatibility */ +#define SIGEMT 7 /* EMT instruction */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define SIGFPE 8 /* floating point exception */ +#define SIGKILL 9 /* kill (cannot be caught or ignored) */ +#define SIGBUS 10 /* bus error */ +#define SIGSEGV 11 /* segmentation violation */ +#define SIGSYS 12 /* bad argument to system call */ +#define SIGPIPE 13 /* write on a pipe with no one to read it */ +#define SIGALRM 14 /* alarm clock */ +#define SIGTERM 15 /* software termination signal from kill */ +#define SIGURG 16 /* urgent condition on IO channel */ +#define SIGSTOP 17 /* sendable stop signal not from tty */ +#define SIGTSTP 18 /* stop signal from tty */ +#define SIGCONT 19 /* continue a stopped process */ +#define SIGCHLD 20 /* to parent on child stop or exit */ +#define SIGTTIN 21 /* to readers pgrp upon background tty read */ +#define SIGTTOU 22 /* like TTIN for output if (tp->t_local<OSTOP) */ #if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define SIGIO 23 /* input/output possible signal */ +#define SIGIO 23 /* input/output possible signal */ #endif -#define SIGXCPU 24 /* exceeded CPU time limit */ -#define SIGXFSZ 25 /* exceeded file size limit */ -#define SIGVTALRM 26 /* virtual time alarm */ -#define SIGPROF 27 /* profiling time alarm */ +#define SIGXCPU 24 /* exceeded CPU time limit */ +#define SIGXFSZ 25 /* exceeded file size limit */ +#define SIGVTALRM 26 /* virtual time alarm */ +#define SIGPROF 27 /* profiling time alarm */ #if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define SIGWINCH 28 /* window size changes */ -#define SIGINFO 29 /* information request */ +#define SIGWINCH 28 /* window size changes */ +#define SIGINFO 29 /* information request */ #endif -#define SIGUSR1 30 /* user defined signal 1 */ -#define SIGUSR2 31 /* user defined signal 2 */ +#define SIGUSR1 30 /* user defined signal 1 */ +#define SIGUSR2 31 /* user defined signal 2 */ #if defined(_ANSI_SOURCE) || __DARWIN_UNIX03 || defined(__cplusplus) /* @@ -128,16 +128,16 @@ * actually supply three. Ugh! * SIG_HOLD is chosen to avoid KERN_SIG_* values in */ -#define SIG_DFL (void (*)(int))0 -#define SIG_IGN (void (*)(int))1 -#define SIG_HOLD (void (*)(int))5 -#define SIG_ERR ((void (*)(int))-1) +#define SIG_DFL (void (*)(int))0 +#define SIG_IGN (void (*)(int))1 +#define SIG_HOLD (void (*)(int))5 +#define SIG_ERR ((void (*)(int))-1) #else /* DO NOT REMOVE THE COMMENTED OUT int: fixincludes needs to see them */ -#define SIG_DFL (void (*)(/*int*/))0 -#define SIG_IGN (void (*)(/*int*/))1 -#define SIG_HOLD (void (*)(/*int*/))5 -#define SIG_ERR ((void (*)(/*int*/))-1) +#define SIG_DFL (void (*)( /*int*/ ))0 +#define SIG_IGN (void (*)( /*int*/ ))1 +#define SIG_HOLD (void (*)( /*int*/ ))5 +#define SIG_ERR ((void (*)( /*int*/ ))-1) #endif #ifndef _ANSI_SOURCE @@ -159,21 +159,21 @@ union sigval { /* Members as suggested by Annex C of POSIX 1003.1b. */ - int sival_int; - void *sival_ptr; + int sival_int; + void *sival_ptr; }; -#define SIGEV_NONE 0 /* No async notification */ -#define SIGEV_SIGNAL 1 /* aio - completion notification */ -#define SIGEV_THREAD 3 /* [NOTIMP] [RTS] call notification function */ +#define SIGEV_NONE 0 /* No async notification */ +#define SIGEV_SIGNAL 1 /* aio - completion notification */ +#define SIGEV_THREAD 3 /* [NOTIMP] [RTS] call notification function */ #ifndef KERNEL struct sigevent { - int sigev_notify; /* Notification type */ - int sigev_signo; /* Signal number */ - union sigval sigev_value; /* Signal value */ - void (*sigev_notify_function)(union sigval); /* Notification function */ - pthread_attr_t *sigev_notify_attributes; /* Notification attributes */ + int sigev_notify; /* Notification type */ + int sigev_signo; /* Signal number */ + union sigval sigev_value; /* Signal value */ + void (*sigev_notify_function)(union sigval); /* Notification function */ + pthread_attr_t *sigev_notify_attributes; /* Notification attributes */ }; #endif /* KERNEL */ @@ -181,117 +181,117 @@ struct sigevent { union user64_sigval { struct { - uint32_t pad; /* assumes Motorola byte order */ - int32_t sival_int; + uint32_t pad; /* assumes Motorola byte order */ + int32_t sival_int; } size_equivalent; - user64_addr_t sival_ptr; + user64_addr_t sival_ptr; }; union user32_sigval { /* Members as suggested by Annex C of POSIX 1003.1b. */ - int32_t sival_int; + int32_t sival_int; user32_addr_t sival_ptr; }; union user_sigval { struct { - uint32_t pad; /* assumes Motorola byte order */ - int32_t sival_int; + uint32_t pad; /* assumes Motorola byte order */ + int32_t sival_int; } size_equivalent; user_addr_t sival_ptr; }; struct user64_sigevent { - int sigev_notify; /* Notification type */ - int sigev_signo; /* Signal number */ - union user64_sigval sigev_value; /* Signal value */ - user64_addr_t sigev_notify_function; /* Notify function */ - user64_addr_t sigev_notify_attributes; /* Notify attributes */ + int sigev_notify; /* Notification type */ + int sigev_signo; /* Signal number */ + union user64_sigval sigev_value; /* Signal value */ + user64_addr_t sigev_notify_function; /* Notify function */ + user64_addr_t sigev_notify_attributes; /* Notify attributes */ }; struct user32_sigevent { - int sigev_notify; /* Notification type */ - int sigev_signo; /* Signal number */ - union user32_sigval sigev_value; /* Signal value */ - user32_addr_t sigev_notify_function; /* Notify function */ - user32_addr_t sigev_notify_attributes; /* Notify attributes */ + int sigev_notify; /* Notification type */ + int sigev_signo; /* Signal number */ + union user32_sigval sigev_value; /* Signal value */ + user32_addr_t sigev_notify_function; /* Notify function */ + user32_addr_t sigev_notify_attributes; /* Notify attributes */ }; struct user_sigevent { - int sigev_notify; /* Notification type */ - int sigev_signo; /* Signal number */ - union user_sigval sigev_value; /* Signal value */ - user_addr_t sigev_notify_function; /* Notify function */ - user_addr_t sigev_notify_attributes; /* Notify attributes */ + int sigev_notify; /* Notification type */ + int sigev_signo; /* Signal number */ + union user_sigval sigev_value; /* Signal value */ + user_addr_t sigev_notify_function; /* Notify function */ + user_addr_t sigev_notify_attributes; /* Notify attributes */ }; -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ typedef struct __siginfo { - int si_signo; /* signal number */ - int si_errno; /* errno association */ - int si_code; /* signal code */ - pid_t si_pid; /* sending process */ - uid_t si_uid; /* sender's ruid */ - int si_status; /* exit value */ - void *si_addr; /* faulting instruction */ - union sigval si_value; /* signal value */ - long si_band; /* band event for SIGPOLL */ - unsigned long __pad[7]; /* Reserved for Future Use */ + int si_signo; /* signal number */ + int si_errno; /* errno association */ + int si_code; /* signal code */ + pid_t si_pid; /* sending process */ + uid_t si_uid; /* sender's ruid */ + int si_status; /* exit value */ + void *si_addr; /* faulting instruction */ + union sigval si_value; /* signal value */ + long si_band; /* band event for SIGPOLL */ + unsigned long __pad[7]; /* Reserved for Future Use */ } siginfo_t; #ifdef BSD_KERNEL_PRIVATE typedef struct user_siginfo { - int si_signo; /* signal number */ - int si_errno; /* errno association */ - int si_code; /* signal code */ - pid_t si_pid; /* sending process */ - uid_t si_uid; /* sender's ruid */ - int si_status; /* exit value */ - user_addr_t si_addr; /* faulting instruction (see below) */ - union user_sigval si_value; /* signal value */ - user_long_t si_band; /* band event for SIGPOLL */ - user_ulong_t pad[7]; /* Reserved for Future Use */ + int si_signo; /* signal number */ + int si_errno; /* errno association */ + int si_code; /* signal code */ + pid_t si_pid; /* sending process */ + uid_t si_uid; /* sender's ruid */ + int si_status; /* exit value */ + user_addr_t si_addr; /* faulting instruction (see below) */ + union user_sigval si_value; /* signal value */ + user_long_t si_band; /* band event for SIGPOLL */ + user_ulong_t pad[7]; /* Reserved for Future Use */ } user_siginfo_t; typedef struct user64_siginfo { - int si_signo; /* signal number */ - int si_errno; /* errno association */ - int si_code; /* signal code */ - pid_t si_pid; /* sending process */ - uid_t si_uid; /* sender's ruid */ - int si_status; /* exit value */ - user64_addr_t si_addr; /* faulting instruction (see below) */ - union user64_sigval si_value; /* signal value */ - user64_long_t si_band; /* band event for SIGPOLL */ - user64_ulong_t __pad[7]; /* Reserved for Future Use */ + int si_signo; /* signal number */ + int si_errno; /* errno association */ + int si_code; /* signal code */ + pid_t si_pid; /* sending process */ + uid_t si_uid; /* sender's ruid */ + int si_status; /* exit value */ + user64_addr_t si_addr; /* faulting instruction (see below) */ + union user64_sigval si_value; /* signal value */ + user64_long_t si_band; /* band event for SIGPOLL */ + user64_ulong_t __pad[7]; /* Reserved for Future Use */ } user64_siginfo_t; typedef struct user32_siginfo { - int si_signo; /* signal number */ - int si_errno; /* errno association */ - int si_code; /* signal code */ - pid_t si_pid; /* sending process */ - uid_t si_uid; /* sender's ruid */ - int si_status; /* exit value */ - user32_addr_t si_addr; /* faulting instruction (see below) */ - union user32_sigval si_value; /* signal value */ - user32_long_t si_band; /* band event for SIGPOLL */ - user32_ulong_t __pad[7]; /* Reserved for Future Use */ + int si_signo; /* signal number */ + int si_errno; /* errno association */ + int si_code; /* signal code */ + pid_t si_pid; /* sending process */ + uid_t si_uid; /* sender's ruid */ + int si_status; /* exit value */ + user32_addr_t si_addr; /* faulting instruction (see below) */ + union user32_sigval si_value; /* signal value */ + user32_long_t si_band; /* band event for SIGPOLL */ + user32_ulong_t __pad[7]; /* Reserved for Future Use */ } user32_siginfo_t; void siginfo_user_to_user32(user_siginfo_t *, user32_siginfo_t *); void siginfo_user_to_user64(user_siginfo_t *, user64_siginfo_t *); -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ -/* - * When the signal is SIGILL or SIGFPE, si_addr contains the address of +/* + * When the signal is SIGILL or SIGFPE, si_addr contains the address of * the faulting instruction. - * When the signal is SIGSEGV or SIGBUS, si_addr contains the address of + * When the signal is SIGSEGV or SIGBUS, si_addr contains the address of * the faulting memory reference. Although for x86 there are cases of SIGSEGV - * for which si_addr cannot be determined and is NULL. + * for which si_addr cannot be determined and is NULL. * If the signal is SIGCHLD, the si_pid field will contain the child process ID, * si_status contains the exit value or signal and * si_uid contains the real user ID of the process that sent the signal. @@ -301,93 +301,93 @@ void siginfo_user_to_user64(user_siginfo_t *, user64_siginfo_t *); /* Codes for SIGILL */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define ILL_NOOP 0 /* if only I knew... */ +#define ILL_NOOP 0 /* if only I knew... */ #endif -#define ILL_ILLOPC 1 /* [XSI] illegal opcode */ -#define ILL_ILLTRP 2 /* [XSI] illegal trap */ -#define ILL_PRVOPC 3 /* [XSI] privileged opcode */ -#define ILL_ILLOPN 4 /* [XSI] illegal operand -NOTIMP */ -#define ILL_ILLADR 5 /* [XSI] illegal addressing mode -NOTIMP */ -#define ILL_PRVREG 6 /* [XSI] privileged register -NOTIMP */ -#define ILL_COPROC 7 /* [XSI] coprocessor error -NOTIMP */ -#define ILL_BADSTK 8 /* [XSI] internal stack error -NOTIMP */ +#define ILL_ILLOPC 1 /* [XSI] illegal opcode */ +#define ILL_ILLTRP 2 /* [XSI] illegal trap */ +#define ILL_PRVOPC 3 /* [XSI] privileged opcode */ +#define ILL_ILLOPN 4 /* [XSI] illegal operand -NOTIMP */ +#define ILL_ILLADR 5 /* [XSI] illegal addressing mode -NOTIMP */ +#define ILL_PRVREG 6 /* [XSI] privileged register -NOTIMP */ +#define ILL_COPROC 7 /* [XSI] coprocessor error -NOTIMP */ +#define ILL_BADSTK 8 /* [XSI] internal stack error -NOTIMP */ /* Codes for SIGFPE */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define FPE_NOOP 0 /* if only I knew... */ +#define FPE_NOOP 0 /* if only I knew... */ #endif -#define FPE_FLTDIV 1 /* [XSI] floating point divide by zero */ -#define FPE_FLTOVF 2 /* [XSI] floating point overflow */ -#define FPE_FLTUND 3 /* [XSI] floating point underflow */ -#define FPE_FLTRES 4 /* [XSI] floating point inexact result */ -#define FPE_FLTINV 5 /* [XSI] invalid floating point operation */ -#define FPE_FLTSUB 6 /* [XSI] subscript out of range -NOTIMP */ -#define FPE_INTDIV 7 /* [XSI] integer divide by zero */ -#define FPE_INTOVF 8 /* [XSI] integer overflow */ +#define FPE_FLTDIV 1 /* [XSI] floating point divide by zero */ +#define FPE_FLTOVF 2 /* [XSI] floating point overflow */ +#define FPE_FLTUND 3 /* [XSI] floating point underflow */ +#define FPE_FLTRES 4 /* [XSI] floating point inexact result */ +#define FPE_FLTINV 5 /* [XSI] invalid floating point operation */ +#define FPE_FLTSUB 6 /* [XSI] subscript out of range -NOTIMP */ +#define FPE_INTDIV 7 /* [XSI] integer divide by zero */ +#define FPE_INTOVF 8 /* [XSI] integer overflow */ /* Codes for SIGSEGV */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SEGV_NOOP 0 /* if only I knew... */ +#define SEGV_NOOP 0 /* if only I knew... */ #endif -#define SEGV_MAPERR 1 /* [XSI] address not mapped to object */ -#define SEGV_ACCERR 2 /* [XSI] invalid permission for mapped object */ +#define SEGV_MAPERR 1 /* [XSI] address not mapped to object */ +#define SEGV_ACCERR 2 /* [XSI] invalid permission for mapped object */ /* Codes for SIGBUS */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define BUS_NOOP 0 /* if only I knew... */ +#define BUS_NOOP 0 /* if only I knew... */ #endif -#define BUS_ADRALN 1 /* [XSI] Invalid address alignment */ -#define BUS_ADRERR 2 /* [XSI] Nonexistent physical address -NOTIMP */ -#define BUS_OBJERR 3 /* [XSI] Object-specific HW error - NOTIMP */ +#define BUS_ADRALN 1 /* [XSI] Invalid address alignment */ +#define BUS_ADRERR 2 /* [XSI] Nonexistent physical address -NOTIMP */ +#define BUS_OBJERR 3 /* [XSI] Object-specific HW error - NOTIMP */ /* Codes for SIGTRAP */ -#define TRAP_BRKPT 1 /* [XSI] Process breakpoint -NOTIMP */ -#define TRAP_TRACE 2 /* [XSI] Process trace trap -NOTIMP */ +#define TRAP_BRKPT 1 /* [XSI] Process breakpoint -NOTIMP */ +#define TRAP_TRACE 2 /* [XSI] Process trace trap -NOTIMP */ /* Codes for SIGCHLD */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define CLD_NOOP 0 /* if only I knew... */ +#define CLD_NOOP 0 /* if only I knew... */ #endif -#define CLD_EXITED 1 /* [XSI] child has exited */ -#define CLD_KILLED 2 /* [XSI] terminated abnormally, no core file */ -#define CLD_DUMPED 3 /* [XSI] terminated abnormally, core file */ -#define CLD_TRAPPED 4 /* [XSI] traced child has trapped */ -#define CLD_STOPPED 5 /* [XSI] child has stopped */ -#define CLD_CONTINUED 6 /* [XSI] stopped child has continued */ +#define CLD_EXITED 1 /* [XSI] child has exited */ +#define CLD_KILLED 2 /* [XSI] terminated abnormally, no core file */ +#define CLD_DUMPED 3 /* [XSI] terminated abnormally, core file */ +#define CLD_TRAPPED 4 /* [XSI] traced child has trapped */ +#define CLD_STOPPED 5 /* [XSI] child has stopped */ +#define CLD_CONTINUED 6 /* [XSI] stopped child has continued */ /* Codes for SIGPOLL */ -#define POLL_IN 1 /* [XSR] Data input available */ -#define POLL_OUT 2 /* [XSR] Output buffers available */ -#define POLL_MSG 3 /* [XSR] Input message available */ -#define POLL_ERR 4 /* [XSR] I/O error */ -#define POLL_PRI 5 /* [XSR] High priority input available */ -#define POLL_HUP 6 /* [XSR] Device disconnected */ +#define POLL_IN 1 /* [XSR] Data input available */ +#define POLL_OUT 2 /* [XSR] Output buffers available */ +#define POLL_MSG 3 /* [XSR] Input message available */ +#define POLL_ERR 4 /* [XSR] I/O error */ +#define POLL_PRI 5 /* [XSR] High priority input available */ +#define POLL_HUP 6 /* [XSR] Device disconnected */ /* union for signal handlers */ union __sigaction_u { void (*__sa_handler)(int); void (*__sa_sigaction)(int, struct __siginfo *, - void *); + void *); }; /* Signal vector template for Kernel user boundary */ -struct __sigaction { +struct __sigaction { union __sigaction_u __sigaction_u; /* signal handler */ void (*sa_tramp)(void *, int, int, siginfo_t *, void *); - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; /* * Signal vector "template" used in sigaction call. */ -struct sigaction { +struct sigaction { union __sigaction_u __sigaction_u; /* signal handler */ - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; -#ifdef BSD_KERNEL_PRIVATE +#ifdef BSD_KERNEL_PRIVATE #include union __user32_sigaction_u { @@ -395,89 +395,89 @@ union __user32_sigaction_u { user32_addr_t __sa_sigaction; }; -struct user32_sigaction { +struct user32_sigaction { union __user32_sigaction_u __sigaction_u; /* signal handler */ - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; -struct __user32_sigaction { +struct __user32_sigaction { union __user32_sigaction_u __sigaction_u; /* signal handler */ user32_addr_t sa_tramp; - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; union __user64_sigaction_u { - user64_addr_t __sa_handler; - user64_addr_t __sa_sigaction; + user64_addr_t __sa_handler; + user64_addr_t __sa_sigaction; }; -struct user64_sigaction { +struct user64_sigaction { union __user64_sigaction_u __sigaction_u; /* signal handler */ - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; -struct __user64_sigaction { +struct __user64_sigaction { union __user64_sigaction_u __sigaction_u; /* signal handler */ - user64_addr_t sa_tramp; /* signal mask to apply */ - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + user64_addr_t sa_tramp; /* signal mask to apply */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; union __kern_sigaction_u { - user_addr_t __sa_handler; - user_addr_t __sa_sigaction; + user_addr_t __sa_handler; + user_addr_t __sa_sigaction; }; -struct kern_sigaction { +struct kern_sigaction { union __kern_sigaction_u __sigaction_u; /* signal handler */ - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; -struct __kern_sigaction { +struct __kern_sigaction { union __kern_sigaction_u __sigaction_u; /* signal handler */ - user_addr_t sa_tramp; /* signal mask to apply */ - sigset_t sa_mask; /* signal mask to apply */ - int sa_flags; /* see signal options below */ + user_addr_t sa_tramp; /* signal mask to apply */ + sigset_t sa_mask; /* signal mask to apply */ + int sa_flags; /* see signal options below */ }; #undef SIG_DFL #undef SIG_IGN #undef SIG_ERR -#define SIG_DFL ((user_addr_t)0LL) -#define SIG_IGN ((user_addr_t)1LL) -#define SIG_ERR ((user_addr_t)-1LL) +#define SIG_DFL ((user_addr_t)0LL) +#define SIG_IGN ((user_addr_t)1LL) +#define SIG_ERR ((user_addr_t)-1LL) -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ /* if SA_SIGINFO is set, sa_sigaction is to be used instead of sa_handler. */ -#define sa_handler __sigaction_u.__sa_handler -#define sa_sigaction __sigaction_u.__sa_sigaction - -#define SA_ONSTACK 0x0001 /* take signal on signal stack */ -#define SA_RESTART 0x0002 /* restart system on signal return */ -#ifdef BSD_KERNEL_PRIVATE -#define SA_DISABLE 0x0004 /* disable taking signals on alternate stack - for user_sigaltstack.ss_flags only */ -#endif /* BSD_KERNEL_PRIVATE */ -#define SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */ -#define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ -#define SA_NODEFER 0x0010 /* don't mask the signal we're delivering */ -#define SA_NOCLDWAIT 0x0020 /* don't keep zombies around */ -#define SA_SIGINFO 0x0040 /* signal handler with SA_SIGINFO args */ +#define sa_handler __sigaction_u.__sa_handler +#define sa_sigaction __sigaction_u.__sa_sigaction + +#define SA_ONSTACK 0x0001 /* take signal on signal stack */ +#define SA_RESTART 0x0002 /* restart system on signal return */ +#ifdef BSD_KERNEL_PRIVATE +#define SA_DISABLE 0x0004 /* disable taking signals on alternate stack - for user_sigaltstack.ss_flags only */ +#endif /* BSD_KERNEL_PRIVATE */ +#define SA_RESETHAND 0x0004 /* reset to SIG_DFL when taking signal */ +#define SA_NOCLDSTOP 0x0008 /* do not generate SIGCHLD on child stop */ +#define SA_NODEFER 0x0010 /* don't mask the signal we're delivering */ +#define SA_NOCLDWAIT 0x0020 /* don't keep zombies around */ +#define SA_SIGINFO 0x0040 /* signal handler with SA_SIGINFO args */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ +#define SA_USERTRAMP 0x0100 /* do not bounce off kernel's sigtramp */ /* This will provide 64bit register set in a 32bit user address space */ -#define SA_64REGSET 0x0200 /* signal handler with SA_SIGINFO args with 64bit regs information */ +#define SA_64REGSET 0x0200 /* signal handler with SA_SIGINFO args with 64bit regs information */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#ifdef BSD_KERNEL_PRIVATE -#define SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP 0x0400 /* use token to validate sigreturn was called from matching sigtramp */ -#endif /* BSD_KERNEL_PRIVATE */ +#ifdef BSD_KERNEL_PRIVATE +#define SA_VALIDATE_SIGRETURN_FROM_SIGTRAMP 0x0400 /* use token to validate sigreturn was called from matching sigtramp */ +#endif /* BSD_KERNEL_PRIVATE */ -/* the following are the only bits we support from user space, the +/* the following are the only bits we support from user space, the * rest are for kernel use only. */ #define SA_USERSPACE_MASK (SA_ONSTACK | SA_RESTART | SA_RESETHAND | SA_NOCLDSTOP | SA_NODEFER | SA_NOCLDWAIT | SA_SIGINFO) @@ -485,78 +485,78 @@ struct __kern_sigaction { /* * Flags for sigprocmask: */ -#define SIG_BLOCK 1 /* block specified signal set */ -#define SIG_UNBLOCK 2 /* unblock specified signal set */ -#define SIG_SETMASK 3 /* set specified signal set */ +#define SIG_BLOCK 1 /* block specified signal set */ +#define SIG_UNBLOCK 2 /* unblock specified signal set */ +#define SIG_SETMASK 3 /* set specified signal set */ /* POSIX 1003.1b required values. */ -#define SI_USER 0x10001 /* [CX] signal from kill() */ -#define SI_QUEUE 0x10002 /* [CX] signal from sigqueue() */ -#define SI_TIMER 0x10003 /* [CX] timer expiration */ -#define SI_ASYNCIO 0x10004 /* [CX] aio request completion */ -#define SI_MESGQ 0x10005 /* [CX] from message arrival on empty queue */ +#define SI_USER 0x10001 /* [CX] signal from kill() */ +#define SI_QUEUE 0x10002 /* [CX] signal from sigqueue() */ +#define SI_TIMER 0x10003 /* [CX] timer expiration */ +#define SI_ASYNCIO 0x10004 /* [CX] aio request completion */ +#define SI_MESGQ 0x10005 /* [CX] from message arrival on empty queue */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -typedef void (*sig_t)(int); /* type of signal function */ +typedef void (*sig_t)(int); /* type of signal function */ #endif /* * Structure used in sigaltstack call. */ -#ifdef BSD_KERNEL_PRIVATE +#ifdef BSD_KERNEL_PRIVATE struct user32_sigaltstack { - user32_addr_t ss_sp; /* signal stack base */ - user32_size_t ss_size; /* signal stack length */ - int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ + user32_addr_t ss_sp; /* signal stack base */ + user32_size_t ss_size; /* signal stack length */ + int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ }; struct user64_sigaltstack { - user64_addr_t ss_sp; /* signal stack base */ - user64_size_t ss_size; /* signal stack length */ - int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ + user64_addr_t ss_sp; /* signal stack base */ + user64_size_t ss_size; /* signal stack length */ + int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ }; struct kern_sigaltstack { - user_addr_t ss_sp; /* signal stack base */ - user_size_t ss_size; /* signal stack length */ - int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ + user_addr_t ss_sp; /* signal stack base */ + user_size_t ss_size; /* signal stack length */ + int ss_flags; /* SA_DISABLE and/or SA_ONSTACK */ }; -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ -#define SS_ONSTACK 0x0001 /* take signal on signal stack */ -#define SS_DISABLE 0x0004 /* disable taking signals on alternate stack */ -#define MINSIGSTKSZ 32768 /* (32K)minimum allowable stack */ -#define SIGSTKSZ 131072 /* (128K)recommended stack size */ +#define SS_ONSTACK 0x0001 /* take signal on signal stack */ +#define SS_DISABLE 0x0004 /* disable taking signals on alternate stack */ +#define MINSIGSTKSZ 32768 /* (32K)minimum allowable stack */ +#define SIGSTKSZ 131072 /* (128K)recommended stack size */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* * 4.3 compatibility: * Signal vector "template" used in sigvec call. */ -struct sigvec { - void (*sv_handler)(int); /* signal handler */ - int sv_mask; /* signal mask to apply */ - int sv_flags; /* see signal options below */ +struct sigvec { + void (*sv_handler)(int); /* signal handler */ + int sv_mask; /* signal mask to apply */ + int sv_flags; /* see signal options below */ }; -#define SV_ONSTACK SA_ONSTACK -#define SV_INTERRUPT SA_RESTART /* same bit, opposite sense */ -#define SV_RESETHAND SA_RESETHAND -#define SV_NODEFER SA_NODEFER -#define SV_NOCLDSTOP SA_NOCLDSTOP -#define SV_SIGINFO SA_SIGINFO +#define SV_ONSTACK SA_ONSTACK +#define SV_INTERRUPT SA_RESTART /* same bit, opposite sense */ +#define SV_RESETHAND SA_RESETHAND +#define SV_NODEFER SA_NODEFER +#define SV_NOCLDSTOP SA_NOCLDSTOP +#define SV_SIGINFO SA_SIGINFO -#define sv_onstack sv_flags /* isn't compatibility wonderful! */ +#define sv_onstack sv_flags /* isn't compatibility wonderful! */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Structure used in sigstack call. */ -struct sigstack { - char *ss_sp; /* signal stack pointer */ - int ss_onstack; /* current status */ +struct sigstack { + char *ss_sp; /* signal stack pointer */ + int ss_onstack; /* current status */ }; #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -564,17 +564,17 @@ struct sigstack { * Macro for converting signal number to a mask suitable for * sigblock(). */ -#define sigmask(m) (1 << ((m)-1)) +#define sigmask(m) (1 << ((m)-1)) -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* * signals delivered on a per-thread basis. */ #define threadmask (sigmask(SIGILL)|sigmask(SIGTRAP)|\ - sigmask(SIGABRT)|sigmask(SIGEMT)|\ - sigmask(SIGFPE)|sigmask(SIGBUS)|\ - sigmask(SIGSEGV)|sigmask(SIGSYS)|\ - sigmask(SIGPIPE)|sigmask(SIGKILL)) + sigmask(SIGABRT)|sigmask(SIGEMT)|\ + sigmask(SIGFPE)|sigmask(SIGBUS)|\ + sigmask(SIGSEGV)|sigmask(SIGSYS)|\ + sigmask(SIGPIPE)|sigmask(SIGKILL)) #define workq_threadmask ((threadmask | sigcantmask | sigmask(SIGPROF)) & ~sigmask(SIGABRT)) @@ -582,24 +582,24 @@ struct sigstack { * Signals carried across exec. */ #define execmask (sigmask(SIGHUP)|sigmask(SIGINT)|\ - sigmask(SIGQUIT)|sigmask(SIGKILL)|\ - sigmask(SIGTERM)|sigmask(SIGSTOP)|\ - sigmask(SIGTSTP)|sigmask(SIGCONT)|\ - sigmask(SIGTTIN)|sigmask(SIGTTOU)|\ - sigmask(SIGUSR1)|sigmask(SIGUSR2)) + sigmask(SIGQUIT)|sigmask(SIGKILL)|\ + sigmask(SIGTERM)|sigmask(SIGSTOP)|\ + sigmask(SIGTSTP)|sigmask(SIGCONT)|\ + sigmask(SIGTTIN)|sigmask(SIGTTOU)|\ + sigmask(SIGUSR1)|sigmask(SIGUSR2)) -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#define BADSIG SIG_ERR +#define BADSIG SIG_ERR -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#endif /* !_ANSI_SOURCE */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* !_ANSI_SOURCE */ /* * For historical reasons; programs expect signal's return value to be * defined by . */ __BEGIN_DECLS -void (*signal(int, void (*)(int)))(int); + void(*signal(int, void (*)(int)))(int); __END_DECLS -#endif /* !_SYS_SIGNAL_H_ */ +#endif /* !_SYS_SIGNAL_H_ */ diff --git a/bsd/sys/signalvar.h b/bsd/sys/signalvar.h index 3419e8c23..e96bfab71 100644 --- a/bsd/sys/signalvar.h +++ b/bsd/sys/signalvar.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -61,8 +61,8 @@ * @(#)signalvar.h 8.3 (Berkeley) 1/4/94 */ -#ifndef _SYS_SIGNALVAR_H_ /* tmp for user.h */ -#define _SYS_SIGNALVAR_H_ +#ifndef _SYS_SIGNALVAR_H_ /* tmp for user.h */ +#define _SYS_SIGNALVAR_H_ #include @@ -79,37 +79,37 @@ * Process signal actions and state, needed only within the process * (not necessarily resident). */ -struct sigacts { - user_addr_t ps_sigact[NSIG]; /* disposition of signals */ - user_addr_t ps_trampact[NSIG]; /* disposition of signals */ - sigset_t ps_catchmask[NSIG]; /* signals to be blocked */ - sigset_t ps_sigonstack; /* signals to take on sigstack */ - sigset_t ps_sigintr; /* signals that interrupt syscalls */ - sigset_t ps_sigreset; /* signals that reset when caught */ - sigset_t ps_signodefer; /* signals not masked while handled */ - sigset_t ps_siginfo; /* signals that want SA_SIGINFO args */ - sigset_t ps_oldmask; /* saved mask from before sigpause */ +struct sigacts { + user_addr_t ps_sigact[NSIG]; /* disposition of signals */ + user_addr_t ps_trampact[NSIG]; /* disposition of signals */ + sigset_t ps_catchmask[NSIG]; /* signals to be blocked */ + sigset_t ps_sigonstack; /* signals to take on sigstack */ + sigset_t ps_sigintr; /* signals that interrupt syscalls */ + sigset_t ps_sigreset; /* signals that reset when caught */ + sigset_t ps_signodefer; /* signals not masked while handled */ + sigset_t ps_siginfo; /* signals that want SA_SIGINFO args */ + sigset_t ps_oldmask; /* saved mask from before sigpause */ user_addr_t ps_sigreturn_token; /* random token used to validate sigreturn arguments */ _Atomic uint32_t ps_sigreturn_validation; /* sigreturn argument validation state */ - int ps_flags; /* signal flags, below */ - struct kern_sigaltstack ps_sigstk; /* sp, length & flags */ - int ps_sig; /* for core dump/debugger XXX */ - int ps_code; /* for core dump/debugger XXX */ - int ps_addr; /* for core dump/debugger XXX */ + int ps_flags; /* signal flags, below */ + struct kern_sigaltstack ps_sigstk; /* sp, length & flags */ + int ps_sig; /* for core dump/debugger XXX */ + int ps_code; /* for core dump/debugger XXX */ + int ps_addr; /* for core dump/debugger XXX */ }; /* signal flags */ -#define SAS_OLDMASK 0x01 /* need to restore mask before pause */ -#define SAS_ALTSTACK 0x02 /* have alternate signal stack */ +#define SAS_OLDMASK 0x01 /* need to restore mask before pause */ +#define SAS_ALTSTACK 0x02 /* have alternate signal stack */ /* * Additional signal action values, used only temporarily/internally; these * values should be non-intersecting with values defined in signal.h, e.g.: * SIG_IGN, SIG_DFL, SIG_ERR, SIG_IGN. */ -#define KERN_SIG_CATCH CAST_USER_ADDR_T(2) -#define KERN_SIG_HOLD CAST_USER_ADDR_T(3) -#define KERN_SIG_WAIT CAST_USER_ADDR_T(4) +#define KERN_SIG_CATCH CAST_USER_ADDR_T(2) +#define KERN_SIG_HOLD CAST_USER_ADDR_T(3) +#define KERN_SIG_WAIT CAST_USER_ADDR_T(4) /* Values for ps_sigreturn_validation */ #define PS_SIGRETURN_VALIDATION_DEFAULT 0x0u @@ -119,24 +119,24 @@ struct sigacts { /* * get signal action for process and signal; currently only for current process */ -#define SIGACTION(p, sig) (p->p_sigacts->ps_sigact[(sig)]) +#define SIGACTION(p, sig) (p->p_sigacts->ps_sigact[(sig)]) /* * Check for per-process and per thread signals. */ -#define SHOULDissignal(p,uthreadp) \ - (((uthreadp)->uu_siglist) \ +#define SHOULDissignal(p, uthreadp) \ + (((uthreadp)->uu_siglist) \ & ~((((uthreadp)->uu_sigmask) \ | (((p)->p_lflag & P_LTRACED) ? 0 : (p)->p_sigignore)) \ & ~sigcantmask)) /* - * Check for signals and per-thread signals. + * Check for signals and per-thread signals. * Use in trap() and syscall() before * exiting kernel. */ -#define CHECK_SIGNALS(p, thread, uthreadp) \ - (!thread_should_halt(thread) \ +#define CHECK_SIGNALS(p, thread, uthreadp) \ + (!thread_should_halt(thread) \ && (SHOULDissignal(p,uthreadp))) /* @@ -144,61 +144,61 @@ struct sigacts { * The array below categorizes the signals and their default actions * according to the following properties: */ -#define SA_KILL 0x01 /* terminates process by default */ -#define SA_CORE 0x02 /* ditto and coredumps */ -#define SA_STOP 0x04 /* suspend process */ -#define SA_TTYSTOP 0x08 /* ditto, from tty */ -#define SA_IGNORE 0x10 /* ignore by default */ -#define SA_CONT 0x20 /* continue if suspended */ -#define SA_CANTMASK 0x40 /* non-maskable, catchable */ - -#ifdef SIGPROP +#define SA_KILL 0x01 /* terminates process by default */ +#define SA_CORE 0x02 /* ditto and coredumps */ +#define SA_STOP 0x04 /* suspend process */ +#define SA_TTYSTOP 0x08 /* ditto, from tty */ +#define SA_IGNORE 0x10 /* ignore by default */ +#define SA_CONT 0x20 /* continue if suspended */ +#define SA_CANTMASK 0x40 /* non-maskable, catchable */ + +#ifdef SIGPROP int sigprop[NSIG] = { - 0, /* unused */ - SA_KILL, /* SIGHUP */ - SA_KILL, /* SIGINT */ - SA_KILL|SA_CORE, /* SIGQUIT */ - SA_KILL|SA_CORE, /* SIGILL */ - SA_KILL|SA_CORE, /* SIGTRAP */ - SA_KILL|SA_CORE, /* SIGABRT */ - SA_KILL|SA_CORE, /* SIGEMT */ - SA_KILL|SA_CORE, /* SIGFPE */ - SA_KILL, /* SIGKILL */ - SA_KILL|SA_CORE, /* SIGBUS */ - SA_KILL|SA_CORE, /* SIGSEGV */ - SA_KILL|SA_CORE, /* SIGSYS */ - SA_KILL, /* SIGPIPE */ - SA_KILL, /* SIGALRM */ - SA_KILL, /* SIGTERM */ - SA_IGNORE, /* SIGURG */ - SA_STOP, /* SIGSTOP */ - SA_STOP|SA_TTYSTOP, /* SIGTSTP */ - SA_IGNORE|SA_CONT, /* SIGCONT */ - SA_IGNORE, /* SIGCHLD */ - SA_STOP|SA_TTYSTOP, /* SIGTTIN */ - SA_STOP|SA_TTYSTOP, /* SIGTTOU */ - SA_IGNORE, /* SIGIO */ - SA_KILL, /* SIGXCPU */ - SA_KILL, /* SIGXFSZ */ - SA_KILL, /* SIGVTALRM */ - SA_KILL, /* SIGPROF */ - SA_IGNORE, /* SIGWINCH */ - SA_IGNORE, /* SIGINFO */ - SA_KILL, /* SIGUSR1 */ - SA_KILL, /* SIGUSR2 */ + 0, /* unused */ + SA_KILL, /* SIGHUP */ + SA_KILL, /* SIGINT */ + SA_KILL | SA_CORE, /* SIGQUIT */ + SA_KILL | SA_CORE, /* SIGILL */ + SA_KILL | SA_CORE, /* SIGTRAP */ + SA_KILL | SA_CORE, /* SIGABRT */ + SA_KILL | SA_CORE, /* SIGEMT */ + SA_KILL | SA_CORE, /* SIGFPE */ + SA_KILL, /* SIGKILL */ + SA_KILL | SA_CORE, /* SIGBUS */ + SA_KILL | SA_CORE, /* SIGSEGV */ + SA_KILL | SA_CORE, /* SIGSYS */ + SA_KILL, /* SIGPIPE */ + SA_KILL, /* SIGALRM */ + SA_KILL, /* SIGTERM */ + SA_IGNORE, /* SIGURG */ + SA_STOP, /* SIGSTOP */ + SA_STOP | SA_TTYSTOP, /* SIGTSTP */ + SA_IGNORE | SA_CONT, /* SIGCONT */ + SA_IGNORE, /* SIGCHLD */ + SA_STOP | SA_TTYSTOP, /* SIGTTIN */ + SA_STOP | SA_TTYSTOP, /* SIGTTOU */ + SA_IGNORE, /* SIGIO */ + SA_KILL, /* SIGXCPU */ + SA_KILL, /* SIGXFSZ */ + SA_KILL, /* SIGVTALRM */ + SA_KILL, /* SIGPROF */ + SA_IGNORE, /* SIGWINCH */ + SA_IGNORE, /* SIGINFO */ + SA_KILL, /* SIGUSR1 */ + SA_KILL, /* SIGUSR2 */ }; -#define contsigmask (sigmask(SIGCONT)) -#define stopsigmask (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \ - sigmask(SIGTTIN) | sigmask(SIGTTOU)) +#define contsigmask (sigmask(SIGCONT)) +#define stopsigmask (sigmask(SIGSTOP) | sigmask(SIGTSTP) | \ + sigmask(SIGTTIN) | sigmask(SIGTTOU)) #endif /* SIGPROP */ -#define sigcantmask (sigmask(SIGKILL) | sigmask(SIGSTOP)) +#define sigcantmask (sigmask(SIGKILL) | sigmask(SIGSTOP)) #define SIGRESTRICTMASK (sigmask(SIGILL) | sigmask(SIGTRAP) | sigmask(SIGABRT) | \ - sigmask(SIGFPE) | sigmask(SIGBUS) | sigmask(SIGSEGV) | \ - sigmask(SIGSYS)) + sigmask(SIGFPE) | sigmask(SIGBUS) | sigmask(SIGSEGV) | \ + sigmask(SIGSYS)) extern unsigned sigrestrict_arg; @@ -206,49 +206,49 @@ extern unsigned sigrestrict_arg; * Machine-independent functions: */ -void execsigs(struct proc *p, thread_t thread); -void gsignal(int pgid, int sig); -int issignal_locked(struct proc *p); -int CURSIG(struct proc *p); +void execsigs(struct proc *p, thread_t thread); +void gsignal(int pgid, int sig); +int issignal_locked(struct proc *p); +int CURSIG(struct proc *p); int clear_procsiglist(struct proc *p, int bit, int in_signalstart); int set_procsigmask(struct proc *p, int bit); -void postsig_locked(int sig); -void siginit(struct proc *p); -void trapsignal(struct proc *p, int sig, unsigned code); -void pt_setrunnable(struct proc *p); -int hassigprop(int sig, int prop); +void postsig_locked(int sig); +void siginit(struct proc *p); +void trapsignal(struct proc *p, int sig, unsigned code); +void pt_setrunnable(struct proc *p); +int hassigprop(int sig, int prop); int setsigvec(proc_t, thread_t, int signum, struct __kern_sigaction *, boolean_t in_sigstart); struct os_reason; /* * Machine-dependent functions: */ -void sendsig(struct proc *, /*sig_t*/ user_addr_t action, int sig, - int returnmask, uint32_t code); - -void psignal(struct proc *p, int sig); -void psignal_with_reason(struct proc *p, int sig, struct os_reason *signal_reason); -void psignal_locked(struct proc *, int); -void psignal_try_thread(proc_t, thread_t, int signum); -void psignal_try_thread_with_reason(proc_t, thread_t, int, struct os_reason*); -void psignal_thread_with_reason(proc_t, thread_t, int, struct os_reason*); -void psignal_uthread(thread_t, int); -void pgsignal(struct pgrp *pgrp, int sig, int checkctty); -void tty_pgsignal(struct tty * tp, int sig, int checkctty); -void threadsignal(thread_t sig_actthread, int signum, - mach_exception_code_t code, boolean_t set_exitreason); -int thread_issignal(proc_t p, thread_t th, sigset_t mask); -void psignal_vfork(struct proc *p, task_t new_task, thread_t thread, - int signum); +void sendsig(struct proc *, /*sig_t*/ user_addr_t action, int sig, + int returnmask, uint32_t code); + +void psignal(struct proc *p, int sig); +void psignal_with_reason(struct proc *p, int sig, struct os_reason *signal_reason); +void psignal_locked(struct proc *, int); +void psignal_try_thread(proc_t, thread_t, int signum); +void psignal_try_thread_with_reason(proc_t, thread_t, int, struct os_reason*); +void psignal_thread_with_reason(proc_t, thread_t, int, struct os_reason*); +void psignal_uthread(thread_t, int); +void pgsignal(struct pgrp *pgrp, int sig, int checkctty); +void tty_pgsignal(struct tty * tp, int sig, int checkctty); +void threadsignal(thread_t sig_actthread, int signum, + mach_exception_code_t code, boolean_t set_exitreason); +int thread_issignal(proc_t p, thread_t th, sigset_t mask); +void psignal_vfork(struct proc *p, task_t new_task, thread_t thread, + int signum); void psignal_vfork_with_reason(proc_t p, task_t new_task, thread_t thread, - int signum, struct os_reason *signal_reason); -void signal_setast(thread_t sig_actthread); -void pgsigio(pid_t pgid, int signalnum); + int signum, struct os_reason *signal_reason); +void signal_setast(thread_t sig_actthread); +void pgsigio(pid_t pgid, int signalnum); void sig_lock_to_exit(struct proc *p); int sig_try_locked(struct proc *p); -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE @@ -258,10 +258,10 @@ int sig_try_locked(struct proc *p); #define COREDUMP_IGNORE_ULIMIT 0x0001 /* Ignore the process's core file ulimit. */ #define COREDUMP_FULLFSYNC 0x0002 /* Run F_FULLFSYNC on the core file's vnode */ -int coredump(struct proc *p, uint32_t reserve_mb, int coredump_flags); +int coredump(struct proc *p, uint32_t reserve_mb, int coredump_flags); void set_thread_exit_reason(void *th, void *reason, boolean_t proc_locked); #endif /* XNU_KERNEL_PRIVATE */ -#endif /* !_SYS_SIGNALVAR_H_ */ +#endif /* !_SYS_SIGNALVAR_H_ */ diff --git a/bsd/sys/snapshot.h b/bsd/sys/snapshot.h index 3953eab3e..f097beb36 100644 --- a/bsd/sys/snapshot.h +++ b/bsd/sys/snapshot.h @@ -27,7 +27,7 @@ */ #ifndef _SYS_SNAPSHOT_H_ -#define _SYS_SNAPSHOT_H_ +#define _SYS_SNAPSHOT_H_ #ifndef KERNEL diff --git a/bsd/sys/socket.h b/bsd/sys/socket.h index f6bafa632..9f68d473f 100644 --- a/bsd/sys/socket.h +++ b/bsd/sys/socket.h @@ -70,7 +70,7 @@ */ #ifndef _SYS_SOCKET_H_ -#define _SYS_SOCKET_H_ +#define _SYS_SOCKET_H_ #include #include @@ -114,90 +114,90 @@ /* * Types */ -#define SOCK_STREAM 1 /* stream socket */ -#define SOCK_DGRAM 2 /* datagram socket */ -#define SOCK_RAW 3 /* raw-protocol interface */ +#define SOCK_STREAM 1 /* stream socket */ +#define SOCK_DGRAM 2 /* datagram socket */ +#define SOCK_RAW 3 /* raw-protocol interface */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SOCK_RDM 4 /* reliably-delivered message */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define SOCK_SEQPACKET 5 /* sequenced packet stream */ +#define SOCK_RDM 4 /* reliably-delivered message */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define SOCK_SEQPACKET 5 /* sequenced packet stream */ /* * Option flags per-socket. */ -#define SO_DEBUG 0x0001 /* turn on debugging info recording */ -#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ -#define SO_REUSEADDR 0x0004 /* allow local address reuse */ -#define SO_KEEPALIVE 0x0008 /* keep connections alive */ -#define SO_DONTROUTE 0x0010 /* just use interface addresses */ -#define SO_BROADCAST 0x0020 /* permit sending of broadcast msgs */ +#define SO_DEBUG 0x0001 /* turn on debugging info recording */ +#define SO_ACCEPTCONN 0x0002 /* socket has had listen() */ +#define SO_REUSEADDR 0x0004 /* allow local address reuse */ +#define SO_KEEPALIVE 0x0008 /* keep connections alive */ +#define SO_DONTROUTE 0x0010 /* just use interface addresses */ +#define SO_BROADCAST 0x0020 /* permit sending of broadcast msgs */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SO_USELOOPBACK 0x0040 /* bypass hardware when possible */ -#define SO_LINGER 0x0080 /* linger on close if data present (in ticks) */ +#define SO_USELOOPBACK 0x0040 /* bypass hardware when possible */ +#define SO_LINGER 0x0080 /* linger on close if data present (in ticks) */ #else -#define SO_LINGER 0x1080 /* linger on close if data present (in seconds) */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define SO_OOBINLINE 0x0100 /* leave received OOB data in line */ +#define SO_LINGER 0x1080 /* linger on close if data present (in seconds) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define SO_OOBINLINE 0x0100 /* leave received OOB data in line */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SO_REUSEPORT 0x0200 /* allow local address & port reuse */ -#define SO_TIMESTAMP 0x0400 /* timestamp received dgram traffic */ -#define SO_TIMESTAMP_MONOTONIC 0x0800 /* Monotonically increasing timestamp on rcvd dgram */ +#define SO_REUSEPORT 0x0200 /* allow local address & port reuse */ +#define SO_TIMESTAMP 0x0400 /* timestamp received dgram traffic */ +#define SO_TIMESTAMP_MONOTONIC 0x0800 /* Monotonically increasing timestamp on rcvd dgram */ #ifndef __APPLE__ -#define SO_ACCEPTFILTER 0x1000 /* there is an accept filter */ +#define SO_ACCEPTFILTER 0x1000 /* there is an accept filter */ #else -#define SO_DONTTRUNC 0x2000 /* APPLE: Retain unread data */ - /* (ATOMIC proto) */ -#define SO_WANTMORE 0x4000 /* APPLE: Give hint when more data ready */ -#define SO_WANTOOBFLAG 0x8000 /* APPLE: Want OOB in MSG_FLAG on receive */ +#define SO_DONTTRUNC 0x2000 /* APPLE: Retain unread data */ + /* (ATOMIC proto) */ +#define SO_WANTMORE 0x4000 /* APPLE: Give hint when more data ready */ +#define SO_WANTOOBFLAG 0x8000 /* APPLE: Want OOB in MSG_FLAG on receive */ #ifdef PRIVATE -#define SO_NOWAKEFROMSLEEP 0x10000 /* Don't wake for traffic to this socket */ -#define SO_NOAPNFALLBK 0x20000 /* Don't attempt APN fallback for the socket */ -#define SO_TIMESTAMP_CONTINUOUS 0x40000 /* Continuous monotonic timestamp on rcvd dgram */ +#define SO_NOWAKEFROMSLEEP 0x10000 /* Don't wake for traffic to this socket */ +#define SO_NOAPNFALLBK 0x20000 /* Don't attempt APN fallback for the socket */ +#define SO_TIMESTAMP_CONTINUOUS 0x40000 /* Continuous monotonic timestamp on rcvd dgram */ #endif #endif /* (!__APPLE__) */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Additional options, not kept in so_options. */ -#define SO_SNDBUF 0x1001 /* send buffer size */ -#define SO_RCVBUF 0x1002 /* receive buffer size */ -#define SO_SNDLOWAT 0x1003 /* send low-water mark */ -#define SO_RCVLOWAT 0x1004 /* receive low-water mark */ -#define SO_SNDTIMEO 0x1005 /* send timeout */ -#define SO_RCVTIMEO 0x1006 /* receive timeout */ -#define SO_ERROR 0x1007 /* get error status and clear */ -#define SO_TYPE 0x1008 /* get socket type */ +#define SO_SNDBUF 0x1001 /* send buffer size */ +#define SO_RCVBUF 0x1002 /* receive buffer size */ +#define SO_SNDLOWAT 0x1003 /* send low-water mark */ +#define SO_RCVLOWAT 0x1004 /* receive low-water mark */ +#define SO_SNDTIMEO 0x1005 /* send timeout */ +#define SO_RCVTIMEO 0x1006 /* receive timeout */ +#define SO_ERROR 0x1007 /* get error status and clear */ +#define SO_TYPE 0x1008 /* get socket type */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SO_LABEL 0x1010 /* socket's MAC label */ -#define SO_PEERLABEL 0x1011 /* socket's peer MAC label */ +#define SO_LABEL 0x1010 /* socket's MAC label */ +#define SO_PEERLABEL 0x1011 /* socket's peer MAC label */ #ifdef __APPLE__ -#define SO_NREAD 0x1020 /* APPLE: get 1st-packet byte count */ -#define SO_NKE 0x1021 /* APPLE: Install socket-level NKE */ -#define SO_NOSIGPIPE 0x1022 /* APPLE: No SIGPIPE on EPIPE */ -#define SO_NOADDRERR 0x1023 /* APPLE: Returns EADDRNOTAVAIL when src is not available anymore */ -#define SO_NWRITE 0x1024 /* APPLE: Get number of bytes currently in send socket buffer */ -#define SO_REUSESHAREUID 0x1025 /* APPLE: Allow reuse of port/socket by different userids */ +#define SO_NREAD 0x1020 /* APPLE: get 1st-packet byte count */ +#define SO_NKE 0x1021 /* APPLE: Install socket-level NKE */ +#define SO_NOSIGPIPE 0x1022 /* APPLE: No SIGPIPE on EPIPE */ +#define SO_NOADDRERR 0x1023 /* APPLE: Returns EADDRNOTAVAIL when src is not available anymore */ +#define SO_NWRITE 0x1024 /* APPLE: Get number of bytes currently in send socket buffer */ +#define SO_REUSESHAREUID 0x1025 /* APPLE: Allow reuse of port/socket by different userids */ #ifdef __APPLE_API_PRIVATE -#define SO_NOTIFYCONFLICT 0x1026 /* APPLE: send notification if there is a bind on a port which is already in use */ -#define SO_UPCALLCLOSEWAIT 0x1027 /* APPLE: block on close until an upcall returns */ +#define SO_NOTIFYCONFLICT 0x1026 /* APPLE: send notification if there is a bind on a port which is already in use */ +#define SO_UPCALLCLOSEWAIT 0x1027 /* APPLE: block on close until an upcall returns */ #endif -#define SO_LINGER_SEC 0x1080 /* linger on close if data present (in seconds) */ +#define SO_LINGER_SEC 0x1080 /* linger on close if data present (in seconds) */ #ifdef PRIVATE -#define SO_RESTRICTIONS 0x1081 /* APPLE: deny flag set */ -#define SO_RESTRICT_DENY_IN 0x1 /* deny inbound (trapdoor) */ -#define SO_RESTRICT_DENY_OUT 0x2 /* deny outbound (trapdoor) */ -#define SO_RESTRICT_DENY_CELLULAR 0x4 /* deny use of cellular (trapdoor) */ -#define SO_RESTRICT_DENY_EXPENSIVE 0x8 /* deny use of expensive if (trapdoor) */ +#define SO_RESTRICTIONS 0x1081 /* APPLE: deny flag set */ +#define SO_RESTRICT_DENY_IN 0x1 /* deny inbound (trapdoor) */ +#define SO_RESTRICT_DENY_OUT 0x2 /* deny outbound (trapdoor) */ +#define SO_RESTRICT_DENY_CELLULAR 0x4 /* deny use of cellular (trapdoor) */ +#define SO_RESTRICT_DENY_EXPENSIVE 0x8 /* deny use of expensive if (trapdoor) */ #endif /* PRIVATE */ -#define SO_RANDOMPORT 0x1082 /* APPLE: request local port randomization */ -#define SO_NP_EXTENSIONS 0x1083 /* To turn off some POSIX behavior */ +#define SO_RANDOMPORT 0x1082 /* APPLE: request local port randomization */ +#define SO_NP_EXTENSIONS 0x1083 /* To turn off some POSIX behavior */ #endif #ifdef PRIVATE -#define SO_EXECPATH 0x1085 /* Application Firewall Socket option */ +#define SO_EXECPATH 0x1085 /* Application Firewall Socket option */ /* * Traffic service class definitions (lowest to highest): @@ -255,84 +255,84 @@ * certain types of locally-originated ICMP, ICMPv6; IGMP/MLD join/leave, * ARP. */ -#define SO_TRAFFIC_CLASS 0x1086 /* Traffic service class (int) */ -#define SO_TC_BK_SYS 100 /* lowest class */ -#define SO_TC_BK 200 -#define SO_TC_BE 0 -#define SO_TC_RD 300 -#define SO_TC_OAM 400 -#define SO_TC_AV 500 -#define SO_TC_RV 600 -#define SO_TC_VI 700 -#define SO_TC_VO 800 -#define SO_TC_CTL 900 /* highest class */ -#define SO_TC_MAX 10 /* Total # of traffic classes */ +#define SO_TRAFFIC_CLASS 0x1086 /* Traffic service class (int) */ +#define SO_TC_BK_SYS 100 /* lowest class */ +#define SO_TC_BK 200 +#define SO_TC_BE 0 +#define SO_TC_RD 300 +#define SO_TC_OAM 400 +#define SO_TC_AV 500 +#define SO_TC_RV 600 +#define SO_TC_VI 700 +#define SO_TC_VO 800 +#define SO_TC_CTL 900 /* highest class */ +#define SO_TC_MAX 10 /* Total # of traffic classes */ #ifdef XNU_KERNEL_PRIVATE -#define _SO_TC_BK 1 /* deprecated */ -#define _SO_TC_VI 2 /* deprecated */ -#define _SO_TC_VO 3 /* deprecated */ -#define _SO_TC_MAX 4 /* deprecated */ - -#define SO_VALID_TC(c) \ - (c == SO_TC_BK_SYS || c == SO_TC_BK || c == SO_TC_BE || \ - c == SO_TC_RD || c == SO_TC_OAM || c == SO_TC_AV || \ - c == SO_TC_RV || c == SO_TC_VI || c == SO_TC_VO || \ +#define _SO_TC_BK 1 /* deprecated */ +#define _SO_TC_VI 2 /* deprecated */ +#define _SO_TC_VO 3 /* deprecated */ +#define _SO_TC_MAX 4 /* deprecated */ + +#define SO_VALID_TC(c) \ + (c == SO_TC_BK_SYS || c == SO_TC_BK || c == SO_TC_BE || \ + c == SO_TC_RD || c == SO_TC_OAM || c == SO_TC_AV || \ + c == SO_TC_RV || c == SO_TC_VI || c == SO_TC_VO || \ c == SO_TC_CTL || c == SO_TC_NETSVC_SIG) -#define SO_TC_UNSPEC ((int)-1) /* Traffic class not specified */ +#define SO_TC_UNSPEC ((int)-1) /* Traffic class not specified */ -#define SO_TC_SIG SO_TC_VI /* to be removed XXX */ +#define SO_TC_SIG SO_TC_VI /* to be removed XXX */ -#define SOTCIX_BK_SYS 0 -#define SOTCIX_BK 1 -#define SOTCIX_BE 2 -#define SOTCIX_RD 3 -#define SOTCIX_OAM 4 -#define SOTCIX_AV 5 -#define SOTCIX_RV 6 -#define SOTCIX_VI 7 -#define SOTCIX_VO 8 -#define SOTCIX_CTL 9 +#define SOTCIX_BK_SYS 0 +#define SOTCIX_BK 1 +#define SOTCIX_BE 2 +#define SOTCIX_RD 3 +#define SOTCIX_OAM 4 +#define SOTCIX_AV 5 +#define SOTCIX_RV 6 +#define SOTCIX_VI 7 +#define SOTCIX_VO 8 +#define SOTCIX_CTL 9 #endif /* XNU_KERNEL_PRIVATE */ /* Background socket configuration flags */ -#define TRAFFIC_MGT_SO_BACKGROUND 0x0001 /* background socket */ -#define TRAFFIC_MGT_TCP_RECVBG 0x0002 /* Only TCP sockets, receiver throttling */ +#define TRAFFIC_MGT_SO_BACKGROUND 0x0001 /* background socket */ +#define TRAFFIC_MGT_TCP_RECVBG 0x0002 /* Only TCP sockets, receiver throttling */ -#define SO_RECV_TRAFFIC_CLASS 0x1087 /* Receive traffic class (bool) */ -#define SO_TRAFFIC_CLASS_DBG 0x1088 /* Debug traffic class (struct so_tcdbg) */ -#define SO_TRAFFIC_CLASS_STATS 0x1089 /* Traffic class statistics */ -#define SO_PRIVILEGED_TRAFFIC_CLASS 0x1090 /* Privileged traffic class (bool) */ -#define SO_DEFUNCTIT 0x1091 /* Defunct a socket (only in internal builds) */ -#define SO_DEFUNCTOK 0x1100 /* can be defunct'd */ -#define SO_ISDEFUNCT 0x1101 /* get defunct status */ +#define SO_RECV_TRAFFIC_CLASS 0x1087 /* Receive traffic class (bool) */ +#define SO_TRAFFIC_CLASS_DBG 0x1088 /* Debug traffic class (struct so_tcdbg) */ +#define SO_TRAFFIC_CLASS_STATS 0x1089 /* Traffic class statistics */ +#define SO_PRIVILEGED_TRAFFIC_CLASS 0x1090 /* Privileged traffic class (bool) */ +#define SO_DEFUNCTIT 0x1091 /* Defunct a socket (only in internal builds) */ +#define SO_DEFUNCTOK 0x1100 /* can be defunct'd */ +#define SO_ISDEFUNCT 0x1101 /* get defunct status */ -#define SO_OPPORTUNISTIC 0x1102 /* deprecated; use SO_TRAFFIC_CLASS */ +#define SO_OPPORTUNISTIC 0x1102 /* deprecated; use SO_TRAFFIC_CLASS */ /* * SO_FLUSH flushes any unsent data generated by a given socket. It takes * an integer parameter, which can be any of the SO_TC traffic class values, * or the special SO_TC_ALL value. */ -#define SO_FLUSH 0x1103 /* flush unsent data (int) */ -#define SO_TC_ALL (-1) +#define SO_FLUSH 0x1103 /* flush unsent data (int) */ +#define SO_TC_ALL (-1) -#define SO_RECV_ANYIF 0x1104 /* unrestricted inbound processing */ -#define SO_TRAFFIC_MGT_BACKGROUND 0x1105 /* Background traffic management */ +#define SO_RECV_ANYIF 0x1104 /* unrestricted inbound processing */ +#define SO_TRAFFIC_MGT_BACKGROUND 0x1105 /* Background traffic management */ -#define SO_FLOW_DIVERT_TOKEN 0x1106 /* flow divert token */ +#define SO_FLOW_DIVERT_TOKEN 0x1106 /* flow divert token */ -#define SO_DELEGATED 0x1107 /* set socket as delegate (pid_t) */ -#define SO_DELEGATED_UUID 0x1108 /* set socket as delegate (uuid_t) */ -#define SO_NECP_ATTRIBUTES 0x1109 /* NECP socket attributes (domain, account, etc.) */ -#define SO_CFIL_SOCK_ID 0x1110 /* get content filter socket ID (cfil_sock_id_t) */ -#define SO_NECP_CLIENTUUID 0x1111 /* NECP Client uuid */ +#define SO_DELEGATED 0x1107 /* set socket as delegate (pid_t) */ +#define SO_DELEGATED_UUID 0x1108 /* set socket as delegate (uuid_t) */ +#define SO_NECP_ATTRIBUTES 0x1109 /* NECP socket attributes (domain, account, etc.) */ +#define SO_CFIL_SOCK_ID 0x1110 /* get content filter socket ID (cfil_sock_id_t) */ +#define SO_NECP_CLIENTUUID 0x1111 /* NECP Client uuid */ #endif /* PRIVATE */ -#define SO_NUMRCVPKT 0x1112 /* number of datagrams in receive socket buffer */ +#define SO_NUMRCVPKT 0x1112 /* number of datagrams in receive socket buffer */ #ifdef PRIVATE -#define SO_AWDL_UNRESTRICTED 0x1113 /* try to use AWDL in restricted mode */ -#define SO_EXTENDED_BK_IDLE 0x1114 /* extended time to keep socket idle after app is suspended (int) */ -#define SO_MARK_CELLFALLBACK 0x1115 /* Mark as initiated by cell fallback */ +#define SO_AWDL_UNRESTRICTED 0x1113 /* try to use AWDL in restricted mode */ +#define SO_EXTENDED_BK_IDLE 0x1114 /* extended time to keep socket idle after app is suspended (int) */ +#define SO_MARK_CELLFALLBACK 0x1115 /* Mark as initiated by cell fallback */ #endif /* PRIVATE */ /* @@ -417,26 +417,26 @@ * inelastic flow, constant packet rate, somewhat fixed size. * E.g. VoIP. */ -#define SO_NET_SERVICE_TYPE 0x1116 /* Network service type */ - -#define NET_SERVICE_TYPE_BE 0 /* Best effort */ -#define NET_SERVICE_TYPE_BK 1 /* Background system initiated */ -#define NET_SERVICE_TYPE_SIG 2 /* Signaling */ -#define NET_SERVICE_TYPE_VI 3 /* Interactive Video */ -#define NET_SERVICE_TYPE_VO 4 /* Interactive Voice */ -#define NET_SERVICE_TYPE_RV 5 /* Responsive Multimedia Audio/Video */ -#define NET_SERVICE_TYPE_AV 6 /* Multimedia Audio/Video Streaming */ -#define NET_SERVICE_TYPE_OAM 7 /* Operations, Administration, and Management */ -#define NET_SERVICE_TYPE_RD 8 /* Responsive Data */ +#define SO_NET_SERVICE_TYPE 0x1116 /* Network service type */ + +#define NET_SERVICE_TYPE_BE 0 /* Best effort */ +#define NET_SERVICE_TYPE_BK 1 /* Background system initiated */ +#define NET_SERVICE_TYPE_SIG 2 /* Signaling */ +#define NET_SERVICE_TYPE_VI 3 /* Interactive Video */ +#define NET_SERVICE_TYPE_VO 4 /* Interactive Voice */ +#define NET_SERVICE_TYPE_RV 5 /* Responsive Multimedia Audio/Video */ +#define NET_SERVICE_TYPE_AV 6 /* Multimedia Audio/Video Streaming */ +#define NET_SERVICE_TYPE_OAM 7 /* Operations, Administration, and Management */ +#define NET_SERVICE_TYPE_RD 8 /* Responsive Data */ #if PRIVATE -#define SO_QOSMARKING_POLICY_OVERRIDE 0x1117 /* int */ -#define SO_INTCOPROC_ALLOW 0x1118 /* Try to use internal co-processor interfaces. */ +#define SO_QOSMARKING_POLICY_OVERRIDE 0x1117 /* int */ +#define SO_INTCOPROC_ALLOW 0x1118 /* Try to use internal co-processor interfaces. */ -#define _NET_SERVICE_TYPE_COUNT 9 -#define _NET_SERVICE_TYPE_UNSPEC ((int)-1) +#define _NET_SERVICE_TYPE_COUNT 9 +#define _NET_SERVICE_TYPE_UNSPEC ((int)-1) -#define IS_VALID_NET_SERVICE_TYPE(c) \ +#define IS_VALID_NET_SERVICE_TYPE(c) \ (c >= NET_SERVICE_TYPE_BE && c <= NET_SERVICE_TYPE_RD) extern const int sotc_by_netservicetype[_NET_SERVICE_TYPE_COUNT]; @@ -446,53 +446,53 @@ extern const int sotc_by_netservicetype[_NET_SERVICE_TYPE_COUNT]; * Mostly useful to simplify implementation of frameworks to adopt the new * Network Service Type values for Signaling. */ -#define SO_TC_NET_SERVICE_OFFSET 10000 -#define SO_TC_NETSVC_SIG (SO_TC_NET_SERVICE_OFFSET + NET_SERVICE_TYPE_SIG) +#define SO_TC_NET_SERVICE_OFFSET 10000 +#define SO_TC_NETSVC_SIG (SO_TC_NET_SERVICE_OFFSET + NET_SERVICE_TYPE_SIG) #endif /* PRIVATE */ -#define SO_NETSVC_MARKING_LEVEL 0x1119 /* Get QoS marking in effect for socket */ +#define SO_NETSVC_MARKING_LEVEL 0x1119 /* Get QoS marking in effect for socket */ -#define NETSVC_MRKNG_UNKNOWN 0 /* The outgoing network interface is not known */ -#define NETSVC_MRKNG_LVL_L2 1 /* Default marking at layer 2 (for example Wi-Fi WMM) */ -#define NETSVC_MRKNG_LVL_L3L2_ALL 2 /* Layer 3 DSCP marking and layer 2 marking for all Network Service Types */ -#define NETSVC_MRKNG_LVL_L3L2_BK 3 /* The system policy limits layer 3 DSCP marking and layer 2 marking - * to background Network Service Types */ +#define NETSVC_MRKNG_UNKNOWN 0 /* The outgoing network interface is not known */ +#define NETSVC_MRKNG_LVL_L2 1 /* Default marking at layer 2 (for example Wi-Fi WMM) */ +#define NETSVC_MRKNG_LVL_L3L2_ALL 2 /* Layer 3 DSCP marking and layer 2 marking for all Network Service Types */ +#define NETSVC_MRKNG_LVL_L3L2_BK 3 /* The system policy limits layer 3 DSCP marking and layer 2 marking + * to background Network Service Types */ typedef __uint32_t sae_associd_t; -#define SAE_ASSOCID_ANY 0 -#define SAE_ASSOCID_ALL ((sae_associd_t)(-1ULL)) +#define SAE_ASSOCID_ANY 0 +#define SAE_ASSOCID_ALL ((sae_associd_t)(-1ULL)) typedef __uint32_t sae_connid_t; -#define SAE_CONNID_ANY 0 -#define SAE_CONNID_ALL ((sae_connid_t)(-1ULL)) +#define SAE_CONNID_ANY 0 +#define SAE_CONNID_ALL ((sae_connid_t)(-1ULL)) /* connectx() flag parameters */ -#define CONNECT_RESUME_ON_READ_WRITE 0x1 /* resume connect() on read/write */ -#define CONNECT_DATA_IDEMPOTENT 0x2 /* data is idempotent */ -#define CONNECT_DATA_AUTHENTICATED 0x4 /* data includes security that replaces the TFO-cookie */ +#define CONNECT_RESUME_ON_READ_WRITE 0x1 /* resume connect() on read/write */ +#define CONNECT_DATA_IDEMPOTENT 0x2 /* data is idempotent */ +#define CONNECT_DATA_AUTHENTICATED 0x4 /* data includes security that replaces the TFO-cookie */ /* sockaddr endpoints */ typedef struct sa_endpoints { - unsigned int sae_srcif; /* optional source interface */ - const struct sockaddr *sae_srcaddr; /* optional source address */ - socklen_t sae_srcaddrlen; /* size of source address */ - const struct sockaddr *sae_dstaddr; /* destination address */ - socklen_t sae_dstaddrlen; /* size of destination address */ + unsigned int sae_srcif; /* optional source interface */ + const struct sockaddr *sae_srcaddr; /* optional source address */ + socklen_t sae_srcaddrlen; /* size of source address */ + const struct sockaddr *sae_dstaddr; /* destination address */ + socklen_t sae_dstaddrlen; /* size of destination address */ } sa_endpoints_t; -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Structure used for manipulating linger option. */ -struct linger { - int l_onoff; /* option on/off */ - int l_linger; /* linger time */ +struct linger { + int l_onoff; /* option on/off */ + int l_linger; /* linger time */ }; #ifndef __APPLE__ -struct accept_filter_arg { - char af_name[16]; - char af_arg[256-16]; +struct accept_filter_arg { + char af_name[16]; + char af_arg[256 - 16]; }; #endif @@ -503,17 +503,17 @@ struct accept_filter_arg { * Structure to control non-portable Sockets extension to POSIX */ struct so_np_extensions { - u_int32_t npx_flags; - u_int32_t npx_mask; + u_int32_t npx_flags; + u_int32_t npx_mask; }; -#define SONPX_SETOPTSHUT 0x000000001 /* flag for allowing setsockopt after shutdown */ +#define SONPX_SETOPTSHUT 0x000000001 /* flag for allowing setsockopt after shutdown */ #ifdef KERNEL_PRIVATE -#define SONPX_MASK_VALID (SONPX_SETOPTSHUT) -#define IS_SO_TC_BACKGROUND(_tc_) ((_tc_) == SO_TC_BK || (_tc_) == SO_TC_BK_SYS) -#define IS_SO_TC_BACKGROUNDSYSTEM(_tc_) ((_tc_) == SO_TC_BK_SYS) +#define SONPX_MASK_VALID (SONPX_SETOPTSHUT) +#define IS_SO_TC_BACKGROUND(_tc_) ((_tc_) == SO_TC_BK || (_tc_) == SO_TC_BK_SYS) +#define IS_SO_TC_BACKGROUNDSYSTEM(_tc_) ((_tc_) == SO_TC_BK_SYS) #endif /* KERNEL_PRIVATE */ #endif @@ -522,170 +522,170 @@ struct so_np_extensions { /* * Level number for (get/set)sockopt() to apply to socket itself. */ -#define SOL_SOCKET 0xffff /* options for socket level */ +#define SOL_SOCKET 0xffff /* options for socket level */ /* * Address families. */ -#define AF_UNSPEC 0 /* unspecified */ -#define AF_UNIX 1 /* local to host (pipes) */ +#define AF_UNSPEC 0 /* unspecified */ +#define AF_UNIX 1 /* local to host (pipes) */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define AF_LOCAL AF_UNIX /* backward compatibility */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define AF_INET 2 /* internetwork: UDP, TCP, etc. */ +#define AF_LOCAL AF_UNIX /* backward compatibility */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define AF_INET 2 /* internetwork: UDP, TCP, etc. */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define AF_IMPLINK 3 /* arpanet imp addresses */ -#define AF_PUP 4 /* pup protocols: e.g. BSP */ -#define AF_CHAOS 5 /* mit CHAOS protocols */ -#define AF_NS 6 /* XEROX NS protocols */ -#define AF_ISO 7 /* ISO protocols */ -#define AF_OSI AF_ISO -#define AF_ECMA 8 /* European computer manufacturers */ -#define AF_DATAKIT 9 /* datakit protocols */ -#define AF_CCITT 10 /* CCITT protocols, X.25 etc */ -#define AF_SNA 11 /* IBM SNA */ -#define AF_DECnet 12 /* DECnet */ -#define AF_DLI 13 /* DEC Direct data link interface */ -#define AF_LAT 14 /* LAT */ -#define AF_HYLINK 15 /* NSC Hyperchannel */ -#define AF_APPLETALK 16 /* Apple Talk */ -#define AF_ROUTE 17 /* Internal Routing Protocol */ -#define AF_LINK 18 /* Link layer interface */ -#define pseudo_AF_XTP 19 /* eXpress Transfer Protocol (no AF) */ -#define AF_COIP 20 /* connection-oriented IP, aka ST II */ -#define AF_CNT 21 /* Computer Network Technology */ -#define pseudo_AF_RTIP 22 /* Help Identify RTIP packets */ -#define AF_IPX 23 /* Novell Internet Protocol */ -#define AF_SIP 24 /* Simple Internet Protocol */ -#define pseudo_AF_PIP 25 /* Help Identify PIP packets */ -#define AF_NDRV 27 /* Network Driver 'raw' access */ -#define AF_ISDN 28 /* Integrated Services Digital Network */ -#define AF_E164 AF_ISDN /* CCITT E.164 recommendation */ -#define pseudo_AF_KEY 29 /* Internal key-management function */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#define AF_INET6 30 /* IPv6 */ +#define AF_IMPLINK 3 /* arpanet imp addresses */ +#define AF_PUP 4 /* pup protocols: e.g. BSP */ +#define AF_CHAOS 5 /* mit CHAOS protocols */ +#define AF_NS 6 /* XEROX NS protocols */ +#define AF_ISO 7 /* ISO protocols */ +#define AF_OSI AF_ISO +#define AF_ECMA 8 /* European computer manufacturers */ +#define AF_DATAKIT 9 /* datakit protocols */ +#define AF_CCITT 10 /* CCITT protocols, X.25 etc */ +#define AF_SNA 11 /* IBM SNA */ +#define AF_DECnet 12 /* DECnet */ +#define AF_DLI 13 /* DEC Direct data link interface */ +#define AF_LAT 14 /* LAT */ +#define AF_HYLINK 15 /* NSC Hyperchannel */ +#define AF_APPLETALK 16 /* Apple Talk */ +#define AF_ROUTE 17 /* Internal Routing Protocol */ +#define AF_LINK 18 /* Link layer interface */ +#define pseudo_AF_XTP 19 /* eXpress Transfer Protocol (no AF) */ +#define AF_COIP 20 /* connection-oriented IP, aka ST II */ +#define AF_CNT 21 /* Computer Network Technology */ +#define pseudo_AF_RTIP 22 /* Help Identify RTIP packets */ +#define AF_IPX 23 /* Novell Internet Protocol */ +#define AF_SIP 24 /* Simple Internet Protocol */ +#define pseudo_AF_PIP 25 /* Help Identify PIP packets */ +#define AF_NDRV 27 /* Network Driver 'raw' access */ +#define AF_ISDN 28 /* Integrated Services Digital Network */ +#define AF_E164 AF_ISDN /* CCITT E.164 recommendation */ +#define pseudo_AF_KEY 29 /* Internal key-management function */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define AF_INET6 30 /* IPv6 */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define AF_NATM 31 /* native ATM access */ -#define AF_SYSTEM 32 /* Kernel event messages */ -#define AF_NETBIOS 33 /* NetBIOS */ -#define AF_PPP 34 /* PPP communication protocol */ -#define pseudo_AF_HDRCMPLT 35 /* Used by BPF to not rewrite headers - in interface output routine */ +#define AF_NATM 31 /* native ATM access */ +#define AF_SYSTEM 32 /* Kernel event messages */ +#define AF_NETBIOS 33 /* NetBIOS */ +#define AF_PPP 34 /* PPP communication protocol */ +#define pseudo_AF_HDRCMPLT 35 /* Used by BPF to not rewrite headers + * in interface output routine */ #ifdef PRIVATE -#define AF_AFP 36 /* Used by AFP */ +#define AF_AFP 36 /* Used by AFP */ #else -#define AF_RESERVED_36 36 /* Reserved for internal usage */ +#define AF_RESERVED_36 36 /* Reserved for internal usage */ #endif -#define AF_IEEE80211 37 /* IEEE 802.11 protocol */ -#define AF_UTUN 38 +#define AF_IEEE80211 37 /* IEEE 802.11 protocol */ +#define AF_UTUN 38 #ifdef PRIVATE -#define AF_MULTIPATH 39 +#define AF_MULTIPATH 39 #endif /* PRIVATE */ -#define AF_MAX 40 -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#define AF_MAX 40 +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * [XSI] Structure used by kernel to store most addresses. */ struct sockaddr { - __uint8_t sa_len; /* total length */ - sa_family_t sa_family; /* [XSI] address family */ - char sa_data[14]; /* [XSI] addr value (actually larger) */ + __uint8_t sa_len; /* total length */ + sa_family_t sa_family; /* [XSI] address family */ + char sa_data[14]; /* [XSI] addr value (actually larger) */ }; #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SOCK_MAXADDRLEN 255 /* longest possible addresses */ +#define SOCK_MAXADDRLEN 255 /* longest possible addresses */ /* * Structure used by kernel to pass protocol * information in raw sockets. */ struct sockproto { - __uint16_t sp_family; /* address family */ - __uint16_t sp_protocol; /* protocol */ + __uint16_t sp_family; /* address family */ + __uint16_t sp_protocol; /* protocol */ }; -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * RFC 2553: protocol-independent placeholder for socket addresses */ -#define _SS_MAXSIZE 128 -#define _SS_ALIGNSIZE (sizeof(__int64_t)) -#define _SS_PAD1SIZE \ - (_SS_ALIGNSIZE - sizeof(__uint8_t) - sizeof(sa_family_t)) -#define _SS_PAD2SIZE \ - (_SS_MAXSIZE - sizeof(__uint8_t) - sizeof(sa_family_t) - \ - _SS_PAD1SIZE - _SS_ALIGNSIZE) +#define _SS_MAXSIZE 128 +#define _SS_ALIGNSIZE (sizeof(__int64_t)) +#define _SS_PAD1SIZE \ + (_SS_ALIGNSIZE - sizeof(__uint8_t) - sizeof(sa_family_t)) +#define _SS_PAD2SIZE \ + (_SS_MAXSIZE - sizeof(__uint8_t) - sizeof(sa_family_t) - \ + _SS_PAD1SIZE - _SS_ALIGNSIZE) /* * [XSI] sockaddr_storage */ struct sockaddr_storage { - __uint8_t ss_len; /* address length */ - sa_family_t ss_family; /* [XSI] address family */ - char __ss_pad1[_SS_PAD1SIZE]; - __int64_t __ss_align; /* force structure storage alignment */ - char __ss_pad2[_SS_PAD2SIZE]; + __uint8_t ss_len; /* address length */ + sa_family_t ss_family; /* [XSI] address family */ + char __ss_pad1[_SS_PAD1SIZE]; + __int64_t __ss_align; /* force structure storage alignment */ + char __ss_pad2[_SS_PAD2SIZE]; }; /* * Protocol families, same as address families for now. */ -#define PF_UNSPEC AF_UNSPEC -#define PF_LOCAL AF_LOCAL -#define PF_UNIX PF_LOCAL /* backward compatibility */ -#define PF_INET AF_INET -#define PF_IMPLINK AF_IMPLINK -#define PF_PUP AF_PUP -#define PF_CHAOS AF_CHAOS -#define PF_NS AF_NS -#define PF_ISO AF_ISO -#define PF_OSI AF_ISO -#define PF_ECMA AF_ECMA -#define PF_DATAKIT AF_DATAKIT -#define PF_CCITT AF_CCITT -#define PF_SNA AF_SNA -#define PF_DECnet AF_DECnet -#define PF_DLI AF_DLI -#define PF_LAT AF_LAT -#define PF_HYLINK AF_HYLINK -#define PF_APPLETALK AF_APPLETALK -#define PF_ROUTE AF_ROUTE -#define PF_LINK AF_LINK -#define PF_XTP pseudo_AF_XTP /* really just proto family, no AF */ -#define PF_COIP AF_COIP -#define PF_CNT AF_CNT -#define PF_SIP AF_SIP -#define PF_IPX AF_IPX /* same format as AF_NS */ -#define PF_RTIP pseudo_AF_RTIP /* same format as AF_INET */ -#define PF_PIP pseudo_AF_PIP -#define PF_NDRV AF_NDRV -#define PF_ISDN AF_ISDN -#define PF_KEY pseudo_AF_KEY -#define PF_INET6 AF_INET6 -#define PF_NATM AF_NATM -#define PF_SYSTEM AF_SYSTEM -#define PF_NETBIOS AF_NETBIOS -#define PF_PPP AF_PPP +#define PF_UNSPEC AF_UNSPEC +#define PF_LOCAL AF_LOCAL +#define PF_UNIX PF_LOCAL /* backward compatibility */ +#define PF_INET AF_INET +#define PF_IMPLINK AF_IMPLINK +#define PF_PUP AF_PUP +#define PF_CHAOS AF_CHAOS +#define PF_NS AF_NS +#define PF_ISO AF_ISO +#define PF_OSI AF_ISO +#define PF_ECMA AF_ECMA +#define PF_DATAKIT AF_DATAKIT +#define PF_CCITT AF_CCITT +#define PF_SNA AF_SNA +#define PF_DECnet AF_DECnet +#define PF_DLI AF_DLI +#define PF_LAT AF_LAT +#define PF_HYLINK AF_HYLINK +#define PF_APPLETALK AF_APPLETALK +#define PF_ROUTE AF_ROUTE +#define PF_LINK AF_LINK +#define PF_XTP pseudo_AF_XTP /* really just proto family, no AF */ +#define PF_COIP AF_COIP +#define PF_CNT AF_CNT +#define PF_SIP AF_SIP +#define PF_IPX AF_IPX /* same format as AF_NS */ +#define PF_RTIP pseudo_AF_RTIP /* same format as AF_INET */ +#define PF_PIP pseudo_AF_PIP +#define PF_NDRV AF_NDRV +#define PF_ISDN AF_ISDN +#define PF_KEY pseudo_AF_KEY +#define PF_INET6 AF_INET6 +#define PF_NATM AF_NATM +#define PF_SYSTEM AF_SYSTEM +#define PF_NETBIOS AF_NETBIOS +#define PF_PPP AF_PPP #ifdef PRIVATE -#define PF_AFP AF_AFP +#define PF_AFP AF_AFP #else -#define PF_RESERVED_36 AF_RESERVED_36 +#define PF_RESERVED_36 AF_RESERVED_36 #endif -#define PF_UTUN AF_UTUN +#define PF_UTUN AF_UTUN #ifdef PRIVATE -#define PF_MULTIPATH AF_MULTIPATH +#define PF_MULTIPATH AF_MULTIPATH #endif /* PRIVATE */ -#define PF_MAX AF_MAX +#define PF_MAX AF_MAX /* * These do not have socket-layer support: */ -#define PF_VLAN ((uint32_t)0x766c616e) /* 'vlan' */ -#define PF_BOND ((uint32_t)0x626f6e64) /* 'bond' */ +#define PF_VLAN ((uint32_t)0x766c616e) /* 'vlan' */ +#define PF_BOND ((uint32_t)0x626f6e64) /* 'bond' */ #ifdef KERNEL_PRIVATE -#define PF_BRIDGE ((uint32_t)0x62726467) /* 'brdg' */ +#define PF_BRIDGE ((uint32_t)0x62726467) /* 'brdg' */ #endif /* KERNEL_PRIVATE */ /* @@ -697,11 +697,11 @@ struct sockaddr_storage { * Further levels are defined by the individual families below. */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define NET_MAXID AF_MAX +#define NET_MAXID AF_MAX #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #ifdef KERNEL_PRIVATE -#define CTL_NET_NAMES { \ +#define CTL_NET_NAMES { \ { 0, 0 }, \ { "local", CTLTYPE_NODE }, \ { "inet", CTLTYPE_NODE }, \ @@ -750,27 +750,27 @@ struct sockaddr_storage { * Fifth: type of info, defined below * Sixth: flag(s) to mask with for NET_RT_FLAGS */ -#define NET_RT_DUMP 1 /* dump; may limit to a.f. */ -#define NET_RT_FLAGS 2 /* by flags, e.g. RESOLVING */ -#define NET_RT_IFLIST 3 /* survey interface list */ -#define NET_RT_STAT 4 /* routing statistics */ -#define NET_RT_TRASH 5 /* routes not in table but not freed */ -#define NET_RT_IFLIST2 6 /* interface list with addresses */ -#define NET_RT_DUMP2 7 /* dump; may limit to a.f. */ +#define NET_RT_DUMP 1 /* dump; may limit to a.f. */ +#define NET_RT_FLAGS 2 /* by flags, e.g. RESOLVING */ +#define NET_RT_IFLIST 3 /* survey interface list */ +#define NET_RT_STAT 4 /* routing statistics */ +#define NET_RT_TRASH 5 /* routes not in table but not freed */ +#define NET_RT_IFLIST2 6 /* interface list with addresses */ +#define NET_RT_DUMP2 7 /* dump; may limit to a.f. */ #ifdef PRIVATE -#define NET_RT_DUMPX 8 /* private */ -#define NET_RT_DUMPX_FLAGS 9 /* private */ +#define NET_RT_DUMPX 8 /* private */ +#define NET_RT_DUMPX_FLAGS 9 /* private */ #endif /* PRIVATE */ /* * Allows read access non-local host's MAC address * if the process has neighbor cache entitlement. */ -#define NET_RT_FLAGS_PRIV 10 -#define NET_RT_MAXID 11 +#define NET_RT_FLAGS_PRIV 10 +#define NET_RT_MAXID 11 #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #ifdef KERNEL_PRIVATE -#define CTL_NET_RT_NAMES { \ +#define CTL_NET_RT_NAMES { \ { 0, 0 }, \ { "dump", CTLTYPE_STRUCT }, \ { "flags", CTLTYPE_STRUCT }, \ @@ -788,20 +788,20 @@ struct sockaddr_storage { /* * Maximum queue length specifiable by listen. */ -#define SOMAXCONN 128 +#define SOMAXCONN 128 /* * [XSI] Message header for recvmsg and sendmsg calls. * Used value-result for recvmsg, value only for sendmsg. */ struct msghdr { - void *msg_name; /* [XSI] optional address */ - socklen_t msg_namelen; /* [XSI] size of address */ - struct iovec *msg_iov; /* [XSI] scatter/gather array */ - int msg_iovlen; /* [XSI] # elements in msg_iov */ - void *msg_control; /* [XSI] ancillary data, see below */ - socklen_t msg_controllen; /* [XSI] ancillary data buffer len */ - int msg_flags; /* [XSI] flags on received message */ + void *msg_name; /* [XSI] optional address */ + socklen_t msg_namelen; /* [XSI] size of address */ + struct iovec *msg_iov; /* [XSI] scatter/gather array */ + int msg_iovlen; /* [XSI] # elements in msg_iov */ + void *msg_control; /* [XSI] ancillary data, see below */ + socklen_t msg_controllen; /* [XSI] ancillary data buffer len */ + int msg_flags; /* [XSI] flags on received message */ }; #ifdef PRIVATE @@ -815,14 +815,14 @@ struct msghdr { * the iovec array -- like sendmsg(). The field msg_datalen is ignored. */ struct msghdr_x { - void *msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - struct iovec *msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - void *msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ - size_t msg_datalen; /* byte length of buffer in msg_iov */ + void *msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + struct iovec *msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + void *msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ + size_t msg_datalen; /* byte length of buffer in msg_iov */ }; #endif /* PRIVATE */ @@ -834,13 +834,13 @@ struct msghdr_x { */ struct user_msghdr { - user_addr_t msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - user_addr_t msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - user_addr_t msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ + user_addr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + user_addr_t msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + user_addr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ }; /* @@ -849,13 +849,13 @@ struct user_msghdr { */ struct user64_msghdr { - user64_addr_t msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - user64_addr_t msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - user64_addr_t msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ + user64_addr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + user64_addr_t msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + user64_addr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ }; /* @@ -864,13 +864,13 @@ struct user64_msghdr { */ struct user32_msghdr { - user32_addr_t msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - user32_addr_t msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - user32_addr_t msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ + user32_addr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + user32_addr_t msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + user32_addr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ }; /* @@ -880,14 +880,14 @@ struct user32_msghdr { */ struct user_msghdr_x { - user_addr_t msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - user_addr_t msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - user_addr_t msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ - size_t msg_datalen; /* byte length of buffer in msg_iov */ + user_addr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + user_addr_t msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + user_addr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ + size_t msg_datalen; /* byte length of buffer in msg_iov */ }; /* @@ -896,14 +896,14 @@ struct user_msghdr_x { */ struct user64_msghdr_x { - user64_addr_t msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - user64_addr_t msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - user64_addr_t msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ - user64_size_t msg_datalen; /* byte length of buffer in msg_iov */ + user64_addr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + user64_addr_t msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + user64_addr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ + user64_size_t msg_datalen; /* byte length of buffer in msg_iov */ }; /* @@ -912,14 +912,14 @@ struct user64_msghdr_x { */ struct user32_msghdr_x { - user32_addr_t msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - user32_addr_t msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - user32_addr_t msg_control; /* ancillary data, see below */ - socklen_t msg_controllen; /* ancillary data buffer len */ - int msg_flags; /* flags on received message */ - user32_size_t msg_datalen; /* byte length of buffer in msg_iov */ + user32_addr_t msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + user32_addr_t msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + user32_addr_t msg_control; /* ancillary data, see below */ + socklen_t msg_controllen; /* ancillary data buffer len */ + int msg_flags; /* flags on received message */ + user32_size_t msg_datalen; /* byte length of buffer in msg_iov */ }; /* @@ -929,11 +929,11 @@ struct user32_msghdr_x { */ struct user_sa_endpoints { - unsigned int sae_srcif; /* optional source interface */ - user_addr_t sae_srcaddr; /* optional source address */ - socklen_t sae_srcaddrlen; /* size of source address */ - user_addr_t sae_dstaddr; /* destination address */ - socklen_t sae_dstaddrlen; /* size of destination address */ + unsigned int sae_srcif; /* optional source interface */ + user_addr_t sae_srcaddr; /* optional source address */ + socklen_t sae_srcaddrlen; /* size of source address */ + user_addr_t sae_dstaddr; /* destination address */ + socklen_t sae_dstaddrlen; /* size of destination address */ }; /* @@ -942,11 +942,11 @@ struct user_sa_endpoints { */ struct user64_sa_endpoints { - unsigned int sae_srcif; /* optional source interface */ - user64_addr_t sae_srcaddr; /* optional source address */ - socklen_t sae_srcaddrlen; /* size of source address */ - user64_addr_t sae_dstaddr; /* destination address */ - socklen_t sae_dstaddrlen; /* size of destination address */ + unsigned int sae_srcif; /* optional source interface */ + user64_addr_t sae_srcaddr; /* optional source address */ + socklen_t sae_srcaddrlen; /* size of source address */ + user64_addr_t sae_dstaddr; /* destination address */ + socklen_t sae_dstaddrlen; /* size of destination address */ }; /* @@ -955,51 +955,51 @@ struct user64_sa_endpoints { */ struct user32_sa_endpoints { - unsigned int sae_srcif; /* optional source interface */ - user32_addr_t sae_srcaddr; /* optional source address */ - socklen_t sae_srcaddrlen; /* size of source address */ - user32_addr_t sae_dstaddr; /* destination address */ - socklen_t sae_dstaddrlen; /* size of destination address */ + unsigned int sae_srcif; /* optional source interface */ + user32_addr_t sae_srcaddr; /* optional source address */ + socklen_t sae_srcaddrlen; /* size of source address */ + user32_addr_t sae_dstaddr; /* destination address */ + socklen_t sae_dstaddrlen; /* size of destination address */ }; #endif /* XNU_KERNEL_PRIVATE */ -#define MSG_OOB 0x1 /* process out-of-band data */ -#define MSG_PEEK 0x2 /* peek at incoming message */ -#define MSG_DONTROUTE 0x4 /* send without using routing tables */ -#define MSG_EOR 0x8 /* data completes record */ -#define MSG_TRUNC 0x10 /* data discarded before delivery */ -#define MSG_CTRUNC 0x20 /* control data lost before delivery */ -#define MSG_WAITALL 0x40 /* wait for full request or error */ +#define MSG_OOB 0x1 /* process out-of-band data */ +#define MSG_PEEK 0x2 /* peek at incoming message */ +#define MSG_DONTROUTE 0x4 /* send without using routing tables */ +#define MSG_EOR 0x8 /* data completes record */ +#define MSG_TRUNC 0x10 /* data discarded before delivery */ +#define MSG_CTRUNC 0x20 /* control data lost before delivery */ +#define MSG_WAITALL 0x40 /* wait for full request or error */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define MSG_DONTWAIT 0x80 /* this message should be nonblocking */ -#define MSG_EOF 0x100 /* data completes connection */ +#define MSG_DONTWAIT 0x80 /* this message should be nonblocking */ +#define MSG_EOF 0x100 /* data completes connection */ #ifdef __APPLE__ #ifndef PRIVATE #ifdef __APPLE_API_OBSOLETE -#define MSG_WAITSTREAM 0x200 /* wait up to full request.. may return partial */ +#define MSG_WAITSTREAM 0x200 /* wait up to full request.. may return partial */ #endif #else -#define MSG_WAITSTREAM 0x200 /* wait up to full request.. may return partial */ +#define MSG_WAITSTREAM 0x200 /* wait up to full request.. may return partial */ #endif -#define MSG_FLUSH 0x400 /* Start of 'hold' seq; dump so_temp */ -#define MSG_HOLD 0x800 /* Hold frag in so_temp */ -#define MSG_SEND 0x1000 /* Send the packet in so_temp */ -#define MSG_HAVEMORE 0x2000 /* Data ready to be read */ -#define MSG_RCVMORE 0x4000 /* Data remains in current pkt */ +#define MSG_FLUSH 0x400 /* Start of 'hold' seq; dump so_temp */ +#define MSG_HOLD 0x800 /* Hold frag in so_temp */ +#define MSG_SEND 0x1000 /* Send the packet in so_temp */ +#define MSG_HAVEMORE 0x2000 /* Data ready to be read */ +#define MSG_RCVMORE 0x4000 /* Data remains in current pkt */ #endif #ifdef KERNEL_PRIVATE -#define MSG_COMPAT 0x8000 /* deprecated */ +#define MSG_COMPAT 0x8000 /* deprecated */ #endif /* KERNEL_PRIVATE */ -#define MSG_NEEDSA 0x10000 /* Fail receive if socket address cannot be allocated */ +#define MSG_NEEDSA 0x10000 /* Fail receive if socket address cannot be allocated */ #ifdef KERNEL_PRIVATE -#define MSG_NBIO 0x20000 /* FIONBIO mode, used by fifofs */ -#define MSG_SKIPCFIL 0x40000 /* skip pass content filter */ +#define MSG_NBIO 0x20000 /* FIONBIO mode, used by fifofs */ +#define MSG_SKIPCFIL 0x40000 /* skip pass content filter */ #endif -#ifdef KERNEL -#define MSG_USEUPCALL 0x80000000 /* Inherit upcall in sock_accept */ +#ifdef KERNEL +#define MSG_USEUPCALL 0x80000000 /* Inherit upcall in sock_accept */ #endif -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * Header for ancillary data objects in msg_control buffer. @@ -1008,9 +1008,9 @@ struct user32_sa_endpoints { * of message elements headed by cmsghdr structures. */ struct cmsghdr { - socklen_t cmsg_len; /* [XSI] data byte count, including hdr */ - int cmsg_level; /* [XSI] originating protocol */ - int cmsg_type; /* [XSI] protocol-specific type */ + socklen_t cmsg_len; /* [XSI] data byte count, including hdr */ + int cmsg_level; /* [XSI] originating protocol */ + int cmsg_type; /* [XSI] protocol-specific type */ /* followed by unsigned char cmsg_data[]; */ }; @@ -1021,7 +1021,7 @@ struct cmsghdr { * be able to fit in an mbuf, and NGROUPS_MAX is too large to allow * this. */ -#define CMGROUP_MAX 16 +#define CMGROUP_MAX 16 /* * Credentials structure, used to verify the identity of a peer @@ -1031,25 +1031,25 @@ struct cmsghdr { * is the effective GID.) */ struct cmsgcred { - pid_t cmcred_pid; /* PID of sending process */ - uid_t cmcred_uid; /* real UID of sending process */ - uid_t cmcred_euid; /* effective UID of sending process */ - gid_t cmcred_gid; /* real GID of sending process */ - short cmcred_ngroups; /* number or groups */ - gid_t cmcred_groups[CMGROUP_MAX]; /* groups */ + pid_t cmcred_pid; /* PID of sending process */ + uid_t cmcred_uid; /* real UID of sending process */ + uid_t cmcred_euid; /* effective UID of sending process */ + gid_t cmcred_gid; /* real GID of sending process */ + short cmcred_ngroups; /* number or groups */ + gid_t cmcred_groups[CMGROUP_MAX]; /* groups */ }; #endif -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* given pointer to struct cmsghdr, return pointer to data */ -#define CMSG_DATA(cmsg) ((unsigned char *)(cmsg) + \ +#define CMSG_DATA(cmsg) ((unsigned char *)(cmsg) + \ __DARWIN_ALIGN32(sizeof(struct cmsghdr))) /* * RFC 2292 requires to check msg_controllen, in case that the kernel returns * an empty list for some reasons. */ -#define CMSG_FIRSTHDR(mhdr) \ +#define CMSG_FIRSTHDR(mhdr) \ ((mhdr)->msg_controllen >= sizeof(struct cmsghdr) ? \ (struct cmsghdr *)(mhdr)->msg_control : \ (struct cmsghdr *)0L) @@ -1059,38 +1059,38 @@ struct cmsgcred { * Given pointer to struct cmsghdr, return pointer to next cmsghdr * RFC 2292 says that CMSG_NXTHDR(mhdr, NULL) is equivalent to CMSG_FIRSTHDR(mhdr) */ -#define CMSG_NXTHDR(mhdr, cmsg) \ - ((char *)(cmsg) == (char *)0L ? CMSG_FIRSTHDR(mhdr) : \ - ((((unsigned char *)(cmsg) + \ - __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len) + \ - __DARWIN_ALIGN32(sizeof(struct cmsghdr))) > \ - ((unsigned char *)(mhdr)->msg_control + \ - (mhdr)->msg_controllen)) ? \ - (struct cmsghdr *)0L /* NULL */ : \ - (struct cmsghdr *)(void *)((unsigned char *)(cmsg) + \ - __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len)))) +#define CMSG_NXTHDR(mhdr, cmsg) \ + ((char *)(cmsg) == (char *)0L ? CMSG_FIRSTHDR(mhdr) : \ + ((((unsigned char *)(cmsg) + \ + __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len) + \ + __DARWIN_ALIGN32(sizeof(struct cmsghdr))) > \ + ((unsigned char *)(mhdr)->msg_control + \ + (mhdr)->msg_controllen)) ? \ + (struct cmsghdr *)0L /* NULL */ : \ + (struct cmsghdr *)(void *)((unsigned char *)(cmsg) + \ + __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len)))) #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* RFC 2292 additions */ -#define CMSG_SPACE(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + __DARWIN_ALIGN32(l)) -#define CMSG_LEN(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (l)) +#define CMSG_SPACE(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + __DARWIN_ALIGN32(l)) +#define CMSG_LEN(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (l)) #ifdef KERNEL -#define CMSG_ALIGN(n) __DARWIN_ALIGN32(n) +#define CMSG_ALIGN(n) __DARWIN_ALIGN32(n) #endif -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* "Socket"-level control message types: */ -#define SCM_RIGHTS 0x01 /* access rights (array of int) */ +#define SCM_RIGHTS 0x01 /* access rights (array of int) */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define SCM_TIMESTAMP 0x02 /* timestamp (struct timeval) */ -#define SCM_CREDS 0x03 /* process creds (struct cmsgcred) */ -#define SCM_TIMESTAMP_MONOTONIC 0x04 /* timestamp (uint64_t) */ +#define SCM_TIMESTAMP 0x02 /* timestamp (struct timeval) */ +#define SCM_CREDS 0x03 /* process creds (struct cmsgcred) */ +#define SCM_TIMESTAMP_MONOTONIC 0x04 /* timestamp (uint64_t) */ #ifdef PRIVATE -#define SCM_SEQNUM 0x05 /* TCP unordered recv seq no */ -#define SCM_MSG_PRIORITY 0x06 /* TCP unordered snd priority */ -#define SCM_TIMESTAMP_CONTINUOUS 0x07 /* timestamp (uint64_t) */ +#define SCM_SEQNUM 0x05 /* TCP unordered recv seq no */ +#define SCM_MSG_PRIORITY 0x06 /* TCP unordered snd priority */ +#define SCM_TIMESTAMP_CONTINUOUS 0x07 /* timestamp (uint64_t) */ #endif /* PRIVATE */ #ifdef KERNEL_PRIVATE @@ -1098,73 +1098,73 @@ struct cmsgcred { * 4.3 compat sockaddr (deprecated) */ struct osockaddr { - __uint16_t sa_family; /* address family */ - char sa_data[14]; /* up to 14 bytes of direct address */ + __uint16_t sa_family; /* address family */ + char sa_data[14]; /* up to 14 bytes of direct address */ }; /* * 4.3-compat message header (deprecated) */ struct omsghdr { - void *msg_name; /* optional address */ - socklen_t msg_namelen; /* size of address */ - struct iovec *msg_iov; /* scatter/gather array */ - int msg_iovlen; /* # elements in msg_iov */ - void *msg_accrights; /* access rights sent/rcvd */ - int msg_accrightslen; + void *msg_name; /* optional address */ + socklen_t msg_namelen; /* size of address */ + struct iovec *msg_iov; /* scatter/gather array */ + int msg_iovlen; /* # elements in msg_iov */ + void *msg_accrights; /* access rights sent/rcvd */ + int msg_accrightslen; }; -#define SA(s) ((struct sockaddr *)(void *)(s)) +#define SA(s) ((struct sockaddr *)(void *)(s)) #endif /* KERNEL_PRIVATE */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ /* * howto arguments for shutdown(2), specified by Posix.1g. */ -#define SHUT_RD 0 /* shut down the reading side */ -#define SHUT_WR 1 /* shut down the writing side */ -#define SHUT_RDWR 2 /* shut down both sides */ +#define SHUT_RD 0 /* shut down the reading side */ +#define SHUT_WR 1 /* shut down the writing side */ +#define SHUT_RDWR 2 /* shut down both sides */ #if !defined(_POSIX_C_SOURCE) /* * sendfile(2) header/trailer struct */ struct sf_hdtr { - struct iovec *headers; /* pointer to an array of header struct iovec's */ - int hdr_cnt; /* number of header iovec's */ - struct iovec *trailers; /* pointer to an array of trailer struct iovec's */ - int trl_cnt; /* number of trailer iovec's */ + struct iovec *headers; /* pointer to an array of header struct iovec's */ + int hdr_cnt; /* number of header iovec's */ + struct iovec *trailers; /* pointer to an array of trailer struct iovec's */ + int trl_cnt; /* number of trailer iovec's */ }; #ifdef KERNEL /* In-kernel representation */ struct user_sf_hdtr { - user_addr_t headers; /* pointer to an array of header struct iovec's */ - int hdr_cnt; /* number of header iovec's */ - user_addr_t trailers; /* pointer to an array of trailer struct iovec's */ - int trl_cnt; /* number of trailer iovec's */ + user_addr_t headers; /* pointer to an array of header struct iovec's */ + int hdr_cnt; /* number of header iovec's */ + user_addr_t trailers; /* pointer to an array of trailer struct iovec's */ + int trl_cnt; /* number of trailer iovec's */ }; /* LP64 user version of struct sf_hdtr */ struct user64_sf_hdtr { - user64_addr_t headers; /* pointer to an array of header struct iovec's */ - int hdr_cnt; /* number of header iovec's */ - user64_addr_t trailers; /* pointer to an array of trailer struct iovec's */ - int trl_cnt; /* number of trailer iovec's */ + user64_addr_t headers; /* pointer to an array of header struct iovec's */ + int hdr_cnt; /* number of header iovec's */ + user64_addr_t trailers; /* pointer to an array of trailer struct iovec's */ + int trl_cnt; /* number of trailer iovec's */ }; /* ILP32 user version of struct sf_hdtr */ struct user32_sf_hdtr { - user32_addr_t headers; /* pointer to an array of header struct iovec's */ - int hdr_cnt; /* number of header iovec's */ - user32_addr_t trailers; /* pointer to an array of trailer struct iovec's */ - int trl_cnt; /* number of trailer iovec's */ + user32_addr_t headers; /* pointer to an array of header struct iovec's */ + int hdr_cnt; /* number of header iovec's */ + user32_addr_t trailers; /* pointer to an array of trailer struct iovec's */ + int trl_cnt; /* number of trailer iovec's */ }; #endif /* KERNEL */ -#endif /* !_POSIX_C_SOURCE */ +#endif /* !_POSIX_C_SOURCE */ #ifdef PRIVATE #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -1173,19 +1173,19 @@ struct user32_sf_hdtr { * Structure for SIOCGASSOCIDS */ struct so_aidreq { - __uint32_t sar_cnt; /* number of associations */ - sae_associd_t *sar_aidp; /* array of association IDs */ + __uint32_t sar_cnt; /* number of associations */ + sae_associd_t *sar_aidp; /* array of association IDs */ }; #ifdef BSD_KERNEL_PRIVATE struct so_aidreq32 { - __uint32_t sar_cnt; - user32_addr_t sar_aidp; + __uint32_t sar_cnt; + user32_addr_t sar_aidp; }; struct so_aidreq64 { - __uint32_t sar_cnt; - user64_addr_t sar_aidp __attribute__((aligned(8))); + __uint32_t sar_cnt; + user64_addr_t sar_aidp __attribute__((aligned(8))); }; #endif /* BSD_KERNEL_PRIVATE */ @@ -1193,22 +1193,22 @@ struct so_aidreq64 { * Structure for SIOCGCONNIDS */ struct so_cidreq { - sae_associd_t scr_aid; /* association ID */ - __uint32_t scr_cnt; /* number of connections */ - sae_connid_t *scr_cidp; /* array of connection IDs */ + sae_associd_t scr_aid; /* association ID */ + __uint32_t scr_cnt; /* number of connections */ + sae_connid_t *scr_cidp; /* array of connection IDs */ }; #ifdef BSD_KERNEL_PRIVATE struct so_cidreq32 { - sae_associd_t scr_aid; - __uint32_t scr_cnt; - user32_addr_t scr_cidp; + sae_associd_t scr_aid; + __uint32_t scr_cnt; + user32_addr_t scr_cidp; }; struct so_cidreq64 { - sae_associd_t scr_aid; - __uint32_t scr_cnt; - user64_addr_t scr_cidp __attribute__((aligned(8))); + sae_associd_t scr_aid; + __uint32_t scr_cnt; + user64_addr_t scr_cidp __attribute__((aligned(8))); }; #endif /* BSD_KERNEL_PRIVATE */ @@ -1216,92 +1216,92 @@ struct so_cidreq64 { * Structure for SIOCGCONNINFO */ struct so_cinforeq { - sae_connid_t scir_cid; /* connection ID */ - __uint32_t scir_flags; /* see flags below */ - __uint32_t scir_ifindex; /* (last) outbound interface */ - __int32_t scir_error; /* most recent error */ - struct sockaddr *scir_src; /* source address */ - socklen_t scir_src_len; /* source address len */ - struct sockaddr *scir_dst; /* destination address */ - socklen_t scir_dst_len; /* destination address len */ - __uint32_t scir_aux_type; /* aux data type (CIAUX) */ - void *scir_aux_data; /* aux data */ - __uint32_t scir_aux_len; /* aux data len */ + sae_connid_t scir_cid; /* connection ID */ + __uint32_t scir_flags; /* see flags below */ + __uint32_t scir_ifindex; /* (last) outbound interface */ + __int32_t scir_error; /* most recent error */ + struct sockaddr *scir_src; /* source address */ + socklen_t scir_src_len; /* source address len */ + struct sockaddr *scir_dst; /* destination address */ + socklen_t scir_dst_len; /* destination address len */ + __uint32_t scir_aux_type; /* aux data type (CIAUX) */ + void *scir_aux_data; /* aux data */ + __uint32_t scir_aux_len; /* aux data len */ }; #ifdef BSD_KERNEL_PRIVATE struct so_cinforeq32 { - sae_connid_t scir_cid; - __uint32_t scir_flags; - __uint32_t scir_ifindex; - __int32_t scir_error; - user32_addr_t scir_src; - socklen_t scir_src_len; - user32_addr_t scir_dst; - socklen_t scir_dst_len; - __uint32_t scir_aux_type; - user32_addr_t scir_aux_data; - __uint32_t scir_aux_len; + sae_connid_t scir_cid; + __uint32_t scir_flags; + __uint32_t scir_ifindex; + __int32_t scir_error; + user32_addr_t scir_src; + socklen_t scir_src_len; + user32_addr_t scir_dst; + socklen_t scir_dst_len; + __uint32_t scir_aux_type; + user32_addr_t scir_aux_data; + __uint32_t scir_aux_len; }; struct so_cinforeq64 { - sae_connid_t scir_cid; - __uint32_t scir_flags; - __uint32_t scir_ifindex; - __int32_t scir_error; - user64_addr_t scir_src __attribute__((aligned(8))); - socklen_t scir_src_len; - user64_addr_t scir_dst __attribute__((aligned(8))); - socklen_t scir_dst_len; - __uint32_t scir_aux_type; - user64_addr_t scir_aux_data __attribute__((aligned(8))); - __uint32_t scir_aux_len; + sae_connid_t scir_cid; + __uint32_t scir_flags; + __uint32_t scir_ifindex; + __int32_t scir_error; + user64_addr_t scir_src __attribute__((aligned(8))); + socklen_t scir_src_len; + user64_addr_t scir_dst __attribute__((aligned(8))); + socklen_t scir_dst_len; + __uint32_t scir_aux_type; + user64_addr_t scir_aux_data __attribute__((aligned(8))); + __uint32_t scir_aux_len; }; #endif /* BSD_KERNEL_PRIVATE */ /* valid connection info flags */ -#define CIF_CONNECTING 0x1 /* connection was attempted */ -#define CIF_CONNECTED 0x2 /* connection is established */ -#define CIF_DISCONNECTING 0x4 /* disconnection was attempted */ -#define CIF_DISCONNECTED 0x8 /* has been disconnected */ -#define CIF_BOUND_IF 0x10 /* bound to an interface */ -#define CIF_BOUND_IP 0x20 /* bound to a src address */ -#define CIF_BOUND_PORT 0x40 /* bound to a src port */ -#define CIF_PREFERRED 0x80 /* connection is primary/preferred */ -#define CIF_MP_CAPABLE 0x100 /* supports multipath protocol */ -#define CIF_MP_READY 0x200 /* multipath protocol confirmed */ -#define CIF_MP_DEGRADED 0x400 /* has lost its multipath capability */ -#define CIF_MP_ACTIVE 0x800 /* this is the active subflow */ +#define CIF_CONNECTING 0x1 /* connection was attempted */ +#define CIF_CONNECTED 0x2 /* connection is established */ +#define CIF_DISCONNECTING 0x4 /* disconnection was attempted */ +#define CIF_DISCONNECTED 0x8 /* has been disconnected */ +#define CIF_BOUND_IF 0x10 /* bound to an interface */ +#define CIF_BOUND_IP 0x20 /* bound to a src address */ +#define CIF_BOUND_PORT 0x40 /* bound to a src port */ +#define CIF_PREFERRED 0x80 /* connection is primary/preferred */ +#define CIF_MP_CAPABLE 0x100 /* supports multipath protocol */ +#define CIF_MP_READY 0x200 /* multipath protocol confirmed */ +#define CIF_MP_DEGRADED 0x400 /* has lost its multipath capability */ +#define CIF_MP_ACTIVE 0x800 /* this is the active subflow */ /* valid connection info auxiliary data types */ -#define CIAUX_TCP 0x1 /* TCP auxiliary data (conninfo_tcp_t) */ -#define CIAUX_MPTCP 0x2 /* MPTCP auxiliary data (conninfo_mptcp_t) */ +#define CIAUX_TCP 0x1 /* TCP auxiliary data (conninfo_tcp_t) */ +#define CIAUX_MPTCP 0x2 /* MPTCP auxiliary data (conninfo_mptcp_t) */ /* * Structure for SIOC{S,G}CONNORDER */ struct so_cordreq { - sae_connid_t sco_cid; /* connection ID */ - __uint32_t sco_rank; /* rank (0 means unspecified) */ + sae_connid_t sco_cid; /* connection ID */ + __uint32_t sco_rank; /* rank (0 means unspecified) */ }; /* * Common structure for KEV_NETPOLICY_SUBCLASS */ struct netpolicy_event_data { - __uint64_t eupid; /* effective unique PID */ - pid_t epid; /* effective PID */ + __uint64_t eupid; /* effective unique PID */ + pid_t epid; /* effective PID */ #if !defined(__LP64__) - __uint32_t pad; + __uint32_t pad; #endif /* __LP64__ */ - uuid_t euuid; /* effective UUID */ + uuid_t euuid; /* effective UUID */ }; /* * NETPOLICY_IFDENIED event structure */ struct kev_netpolicy_ifdenied { - struct netpolicy_event_data ev_data; + struct netpolicy_event_data ev_data; __uint32_t ev_if_functional_type; }; @@ -1321,11 +1321,11 @@ struct kev_socket_closed { * Network Service Type to DiffServ Code Point mapping */ struct netsvctype_dscp_map { - int netsvctype; - u_int8_t dscp; /* 6 bits diffserv code point */ + int netsvctype; + u_int8_t dscp; /* 6 bits diffserv code point */ }; -#ifndef KERNEL +#ifndef KERNEL __BEGIN_DECLS extern int peeloff(int s, sae_associd_t); @@ -1396,42 +1396,42 @@ ssize_t recvmsg_x(int s, const struct msghdr_x *msgp, u_int cnt, int flags); ssize_t sendmsg_x(int s, const struct msghdr_x *msgp, u_int cnt, int flags); __END_DECLS #endif /* !KERNEL */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #endif /* PRIVATE */ -#ifndef KERNEL +#ifndef KERNEL __BEGIN_DECLS -int accept(int, struct sockaddr * __restrict, socklen_t * __restrict) - __DARWIN_ALIAS_C(accept); -int bind(int, const struct sockaddr *, socklen_t) __DARWIN_ALIAS(bind); -int connect(int, const struct sockaddr *, socklen_t) __DARWIN_ALIAS_C(connect); -int getpeername(int, struct sockaddr * __restrict, socklen_t * __restrict) - __DARWIN_ALIAS(getpeername); -int getsockname(int, struct sockaddr * __restrict, socklen_t * __restrict) - __DARWIN_ALIAS(getsockname); -int getsockopt(int, int, int, void * __restrict, socklen_t * __restrict); -int listen(int, int) __DARWIN_ALIAS(listen); -ssize_t recv(int, void *, size_t, int) __DARWIN_ALIAS_C(recv); -ssize_t recvfrom(int, void *, size_t, int, struct sockaddr * __restrict, - socklen_t * __restrict) __DARWIN_ALIAS_C(recvfrom); -ssize_t recvmsg(int, struct msghdr *, int) __DARWIN_ALIAS_C(recvmsg); -ssize_t send(int, const void *, size_t, int) __DARWIN_ALIAS_C(send); -ssize_t sendmsg(int, const struct msghdr *, int) __DARWIN_ALIAS_C(sendmsg); -ssize_t sendto(int, const void *, size_t, - int, const struct sockaddr *, socklen_t) __DARWIN_ALIAS_C(sendto); -int setsockopt(int, int, int, const void *, socklen_t); -int shutdown(int, int); -int sockatmark(int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); -int socket(int, int, int); -int socketpair(int, int, int, int *) __DARWIN_ALIAS(socketpair); +int accept(int, struct sockaddr * __restrict, socklen_t * __restrict) +__DARWIN_ALIAS_C(accept); +int bind(int, const struct sockaddr *, socklen_t) __DARWIN_ALIAS(bind); +int connect(int, const struct sockaddr *, socklen_t) __DARWIN_ALIAS_C(connect); +int getpeername(int, struct sockaddr * __restrict, socklen_t * __restrict) +__DARWIN_ALIAS(getpeername); +int getsockname(int, struct sockaddr * __restrict, socklen_t * __restrict) +__DARWIN_ALIAS(getsockname); +int getsockopt(int, int, int, void * __restrict, socklen_t * __restrict); +int listen(int, int) __DARWIN_ALIAS(listen); +ssize_t recv(int, void *, size_t, int) __DARWIN_ALIAS_C(recv); +ssize_t recvfrom(int, void *, size_t, int, struct sockaddr * __restrict, + socklen_t * __restrict) __DARWIN_ALIAS_C(recvfrom); +ssize_t recvmsg(int, struct msghdr *, int) __DARWIN_ALIAS_C(recvmsg); +ssize_t send(int, const void *, size_t, int) __DARWIN_ALIAS_C(send); +ssize_t sendmsg(int, const struct msghdr *, int) __DARWIN_ALIAS_C(sendmsg); +ssize_t sendto(int, const void *, size_t, + int, const struct sockaddr *, socklen_t) __DARWIN_ALIAS_C(sendto); +int setsockopt(int, int, int, const void *, socklen_t); +int shutdown(int, int); +int sockatmark(int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int socket(int, int, int); +int socketpair(int, int, int, int *) __DARWIN_ALIAS(socketpair); #if !defined(_POSIX_C_SOURCE) -int sendfile(int, int, off_t, off_t *, struct sf_hdtr *, int); -#endif /* !_POSIX_C_SOURCE */ +int sendfile(int, int, off_t, off_t *, struct sf_hdtr *, int); +#endif /* !_POSIX_C_SOURCE */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -void pfctlinput(int, struct sockaddr *); +void pfctlinput(int, struct sockaddr *); __API_AVAILABLE(macosx(10.11), ios(9.0), tvos(9.0), watchos(2.0)) int connectx(int, const sa_endpoints_t *, sae_associd_t, unsigned int, @@ -1439,7 +1439,7 @@ int connectx(int, const sa_endpoints_t *, sae_associd_t, unsigned int, __API_AVAILABLE(macosx(10.11), ios(9.0), tvos(9.0), watchos(2.0)) int disconnectx(int, sae_associd_t, sae_connid_t); -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/sys/socketvar.h b/bsd/sys/socketvar.h index caf612051..250f8724f 100644 --- a/bsd/sys/socketvar.h +++ b/bsd/sys/socketvar.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2017 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -70,14 +70,14 @@ */ #ifndef _SYS_SOCKETVAR_H_ -#define _SYS_SOCKETVAR_H_ +#define _SYS_SOCKETVAR_H_ #include #include #include /* u_quad_t */ #ifdef KERNEL_PRIVATE -#include /* for TAILQ macros */ -#include /* for struct selinfo */ +#include /* for TAILQ macros */ +#include /* for struct selinfo */ #include #include #include @@ -86,7 +86,7 @@ #endif /* BSD_KERNEL_PRIVATE */ #endif /* KERNEL_PRIVATE */ -typedef u_quad_t so_gen_t; +typedef u_quad_t so_gen_t; #ifdef KERNEL_PRIVATE struct mbuf; @@ -96,33 +96,33 @@ struct sockif; struct sockutil; /* strings for sleep message: */ -extern char netio[], netcon[], netcls[]; -#define SOCKET_CACHE_ON -#define SO_CACHE_FLUSH_INTERVAL 1 /* Seconds */ -#define SO_CACHE_TIME_LIMIT (120/SO_CACHE_FLUSH_INTERVAL) /* Seconds */ -#define SO_CACHE_MAX_FREE_BATCH 50 -#define MAX_CACHED_SOCKETS 512 -#define TEMPDEBUG 0 +extern char netio[], netcon[], netcls[]; +#define SOCKET_CACHE_ON +#define SO_CACHE_FLUSH_INTERVAL 1 /* Seconds */ +#define SO_CACHE_TIME_LIMIT (120/SO_CACHE_FLUSH_INTERVAL) /* Seconds */ +#define SO_CACHE_MAX_FREE_BATCH 50 +#define MAX_CACHED_SOCKETS 512 +#define TEMPDEBUG 0 #endif /* KERNEL_PRIVATE */ #ifdef PRIVATE -#define SO_TC_STATS_MAX 4 +#define SO_TC_STATS_MAX 4 struct data_stats { - u_int64_t rxpackets; - u_int64_t rxbytes; - u_int64_t txpackets; - u_int64_t txbytes; + u_int64_t rxpackets; + u_int64_t rxbytes; + u_int64_t txpackets; + u_int64_t txbytes; }; -#define MSG_PRI_0 0 /* TCP message priority, lowest */ -#define MSG_PRI_1 1 -#define MSG_PRI_2 2 -#define MSG_PRI_3 3 /* TCP message priority, highest */ -#define MSG_PRI_MAX MSG_PRI_3 -#define MSG_PRI_MIN MSG_PRI_0 -#define MSG_PRI_COUNT 4 -#define MSG_PRI_DEFAULT MSG_PRI_1 +#define MSG_PRI_0 0 /* TCP message priority, lowest */ +#define MSG_PRI_1 1 +#define MSG_PRI_2 2 +#define MSG_PRI_3 3 /* TCP message priority, highest */ +#define MSG_PRI_MAX MSG_PRI_3 +#define MSG_PRI_MIN MSG_PRI_0 +#define MSG_PRI_COUNT 4 +#define MSG_PRI_DEFAULT MSG_PRI_1 #endif /* PRIVATE */ #ifdef KERNEL_PRIVATE @@ -131,9 +131,9 @@ struct msg_priq { struct mbuf *msgq_head; /* first mbuf in the queue */ struct mbuf *msgq_tail; /* last mbuf in the queue */ struct mbuf *msgq_lastmsg; /* last message in the queue */ - u_int32_t msgq_flags; /* flags per priority queue */ -#define MSGQ_MSG_NOTDONE 0x1 /* set when EOR of a msg is not seen */ - u_int32_t msgq_bytes; /* data bytes in this queue */ + u_int32_t msgq_flags; /* flags per priority queue */ +#define MSGQ_MSG_NOTDONE 0x1 /* set when EOR of a msg is not seen */ + u_int32_t msgq_bytes; /* data bytes in this queue */ }; struct msg_state { @@ -143,7 +143,7 @@ struct msg_state { }; /* mbuf flag used to indicate out of order data received */ -#define M_UNORDERED_DATA M_PROTO1 +#define M_UNORDERED_DATA M_PROTO1 /* * Kernel structure per socket. @@ -152,14 +152,14 @@ struct msg_state { * private data and error information. */ struct socket { - int so_zone; /* zone we were allocated from */ - short so_type; /* generic type, see socket.h */ - u_short so_error; /* error affecting connection */ - u_int32_t so_options; /* from socket call, see socket.h */ - short so_linger; /* time to linger while closing */ - short so_state; /* internal state flags SS_*, below */ - void *so_pcb; /* protocol control block */ - struct protosw *so_proto; /* protocol handle */ + int so_zone; /* zone we were allocated from */ + short so_type; /* generic type, see socket.h */ + u_short so_error; /* error affecting connection */ + u_int32_t so_options; /* from socket call, see socket.h */ + short so_linger; /* time to linger while closing */ + short so_state; /* internal state flags SS_*, below */ + void *so_pcb; /* protocol control block */ + struct protosw *so_proto; /* protocol handle */ /* * Variables for connection queueing. * Socket where accepts occur is so_head in all subsidiary sockets. @@ -171,147 +171,147 @@ struct socket { * We allow connections to queue up based on current queue lengths * and limit on number of queued connections for this socket. */ - struct socket *so_head; /* back pointer to accept socket */ - TAILQ_HEAD(, socket) so_incomp; /* q of partially unaccepted conns */ - TAILQ_HEAD(, socket) so_comp; /* q of complete unaccepted conns */ - TAILQ_ENTRY(socket) so_list; /* list of unaccepted connections */ - short so_qlen; /* number of unaccepted connections */ - short so_incqlen; /* number of unaccepted incomplete - connections */ - short so_qlimit; /* max number queued connections */ - short so_timeo; /* connection timeout */ - pid_t so_pgid; /* pgid for signals */ - u_int32_t so_oobmark; /* chars to oob mark */ + struct socket *so_head; /* back pointer to accept socket */ + TAILQ_HEAD(, socket) so_incomp; /* q of partially unaccepted conns */ + TAILQ_HEAD(, socket) so_comp; /* q of complete unaccepted conns */ + TAILQ_ENTRY(socket) so_list; /* list of unaccepted connections */ + short so_qlen; /* number of unaccepted connections */ + short so_incqlen; /* number of unaccepted incomplete + * connections */ + short so_qlimit; /* max number queued connections */ + short so_timeo; /* connection timeout */ + pid_t so_pgid; /* pgid for signals */ + u_int32_t so_oobmark; /* chars to oob mark */ /* * Variables for socket buffering. */ struct sockbuf { - u_int32_t sb_cc; /* actual chars in buffer */ - u_int32_t sb_hiwat; /* max actual char count */ - u_int32_t sb_mbcnt; /* chars of mbufs used */ - u_int32_t sb_mbmax; /* max chars of mbufs to use */ - u_int32_t sb_ctl; /* non-data chars in buffer */ - u_int32_t sb_lowat; /* low water mark */ - struct mbuf *sb_mb; /* the mbuf chain */ - struct mbuf *sb_mbtail; /* the last mbuf in the chain */ - struct mbuf *sb_lastrecord; /* first mbuf of last record */ - struct socket *sb_so; /* socket back ptr for kexts */ - struct selinfo sb_sel; /* process selecting rd/wr */ - struct timeval sb_timeo; /* timeout for read/write */ - u_int32_t sb_flags; /* flags, see below */ - u_int32_t sb_idealsize; /* Ideal size for the sb based - on bandwidth and delay */ - void (*sb_upcall)(struct socket *, void *arg, int waitf); - void *sb_upcallarg; /* Arg for above */ - u_int32_t sb_wantlock; /* # of SB_LOCK waiters */ - u_int32_t sb_waiters; /* # of data/space waiters */ - thread_t sb_cfil_thread; /* content filter thread */ - u_int32_t sb_cfil_refs; /* # of nested calls */ - u_int32_t sb_preconn_hiwat; /* preconnect hiwat mark */ + u_int32_t sb_cc; /* actual chars in buffer */ + u_int32_t sb_hiwat; /* max actual char count */ + u_int32_t sb_mbcnt; /* chars of mbufs used */ + u_int32_t sb_mbmax; /* max chars of mbufs to use */ + u_int32_t sb_ctl; /* non-data chars in buffer */ + u_int32_t sb_lowat; /* low water mark */ + struct mbuf *sb_mb; /* the mbuf chain */ + struct mbuf *sb_mbtail; /* the last mbuf in the chain */ + struct mbuf *sb_lastrecord; /* first mbuf of last record */ + struct socket *sb_so; /* socket back ptr for kexts */ + struct selinfo sb_sel; /* process selecting rd/wr */ + struct timeval sb_timeo; /* timeout for read/write */ + u_int32_t sb_flags; /* flags, see below */ + u_int32_t sb_idealsize; /* Ideal size for the sb based + * on bandwidth and delay */ + void (*sb_upcall)(struct socket *, void *arg, int waitf); + void *sb_upcallarg; /* Arg for above */ + u_int32_t sb_wantlock; /* # of SB_LOCK waiters */ + u_int32_t sb_waiters; /* # of data/space waiters */ + thread_t sb_cfil_thread; /* content filter thread */ + u_int32_t sb_cfil_refs; /* # of nested calls */ + u_int32_t sb_preconn_hiwat; /* preconnect hiwat mark */ } so_rcv, so_snd; -#define SB_MAX (8192*1024) /* default for max chars in sockbuf */ -#define LOW_SB_MAX (2*9*1024) /* lower limit on max socket buffer - size, 2 max datagrams */ -#define SB_LOCK 0x1 /* lock on data queue */ -#define SB_NOINTR 0x2 /* operations not interruptible */ -#define SB_RECV 0x4 /* this is rcv sb */ -#define SB_SEL 0x8 /* someone is selecting */ -#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ -#define SB_UPCALL 0x20 /* someone wants an upcall */ -#define SB_KNOTE 0x40 /* kernel note attached */ -#define SB_DROP 0x80 /* does not accept any more data */ -#define SB_UNIX 0x100 /* UNIX domain socket buffer */ -#define SB_USRSIZE 0x200 /* user specified sbreserve */ -#define SB_AUTOSIZE 0x400 /* automatically size socket buffer */ -#define SB_TRIM 0x800 /* Trim the socket buffer */ -#define SB_NOCOMPRESS 0x1000 /* do not compress socket buffer */ -#define SB_SNDBYTE_CNT 0x2000 /* keep track of snd bytes per interface */ -#define SB_UPCALL_LOCK 0x4000 /* Keep socket locked when doing the upcall */ - caddr_t so_tpcb; /* Misc. protocol control block, used - by some kexts */ - - void (*so_event)(struct socket *, void *, u_int32_t); - void *so_eventarg; /* Arg for above */ - kauth_cred_t so_cred; /* cred of who opened the socket */ +#define SB_MAX (8192*1024) /* default for max chars in sockbuf */ +#define LOW_SB_MAX (2*9*1024) /* lower limit on max socket buffer + * size, 2 max datagrams */ +#define SB_LOCK 0x1 /* lock on data queue */ +#define SB_NOINTR 0x2 /* operations not interruptible */ +#define SB_RECV 0x4 /* this is rcv sb */ +#define SB_SEL 0x8 /* someone is selecting */ +#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */ +#define SB_UPCALL 0x20 /* someone wants an upcall */ +#define SB_KNOTE 0x40 /* kernel note attached */ +#define SB_DROP 0x80 /* does not accept any more data */ +#define SB_UNIX 0x100 /* UNIX domain socket buffer */ +#define SB_USRSIZE 0x200 /* user specified sbreserve */ +#define SB_AUTOSIZE 0x400 /* automatically size socket buffer */ +#define SB_TRIM 0x800 /* Trim the socket buffer */ +#define SB_NOCOMPRESS 0x1000 /* do not compress socket buffer */ +#define SB_SNDBYTE_CNT 0x2000 /* keep track of snd bytes per interface */ +#define SB_UPCALL_LOCK 0x4000 /* Keep socket locked when doing the upcall */ + caddr_t so_tpcb; /* Misc. protocol control block, used + * by some kexts */ + + void (*so_event)(struct socket *, void *, u_int32_t); + void *so_eventarg; /* Arg for above */ + kauth_cred_t so_cred; /* cred of who opened the socket */ /* NB: generation count must not be first; easiest to make it last. */ - so_gen_t so_gencnt; /* generation count */ + so_gen_t so_gencnt; /* generation count */ TAILQ_HEAD(, eventqelt) so_evlist; - STAILQ_ENTRY(socket) so_cache_ent; /* socache entry */ - caddr_t so_saved_pcb; /* Saved pcb when cacheing */ - u_int32_t cache_timestamp; /* time socket was cached */ + STAILQ_ENTRY(socket) so_cache_ent; /* socache entry */ + caddr_t so_saved_pcb; /* Saved pcb when cacheing */ + u_int32_t cache_timestamp; /* time socket was cached */ - pid_t last_pid; /* pid of most recent accessor */ - u_int64_t last_upid; /* upid of most recent accessor */ + pid_t last_pid; /* pid of most recent accessor */ + u_int64_t last_upid; /* upid of most recent accessor */ - struct mbuf *so_temp; /* Holding area for outbound frags */ + struct mbuf *so_temp; /* Holding area for outbound frags */ /* Plug-in support - make the socket interface overridable */ - struct mbuf *so_tail; - struct socket_filter_entry *so_filt; /* NKE hook */ - u_int32_t so_flags; /* Flags */ -#define SOF_NOSIGPIPE 0x00000001 -#define SOF_NOADDRAVAIL 0x00000002 /* EADDRNOTAVAIL if src addr is gone */ -#define SOF_PCBCLEARING 0x00000004 /* pru_disconnect done; don't - call pru_detach */ -#define SOF_DEFUNCT 0x00000008 /* socket marked as inactive */ -#define SOF_CLOSEWAIT 0x00000010 /* blocked in close awaiting some events */ -#define SOF_REUSESHAREUID 0x00000040 /* Allows SO_REUSEADDR/SO_REUSEPORT - for multiple so_uid */ -#define SOF_MULTIPAGES 0x00000080 /* jumbo clusters may be used for sosend */ -#define SOF_ABORTED 0x00000100 /* soabort was already called once */ -#define SOF_OVERFLOW 0x00000200 /* socket was dropped as overflow of - listen q */ -#define SOF_NOTIFYCONFLICT 0x00000400 /* notify that a bind was done on a - port already in use */ -#define SOF_UPCALLCLOSEWAIT 0x00000800 /* block close until upcall returns */ -#define SOF_BINDRANDOMPORT 0x00001000 /* Randomized port number for bind */ -#define SOF_NPX_SETOPTSHUT 0x00002000 /* Non POSIX extension to allow - setsockopt(2) after shut down */ -#define SOF_RECV_TRAFFIC_CLASS 0x00004000 /* Receive TC as ancillary data */ -#define SOF_NODEFUNCT 0x00008000 /* socket cannot be defunct'd */ -#define SOF_PRIVILEGED_TRAFFIC_CLASS 0x00010000 /* traffic class is privileged */ -#define SOF_SUSPENDED 0x00020000 /* i/f output queue is suspended */ -#define SOF_INCOMP_INPROGRESS 0x00040000 /* incomp socket is being processed */ -#define SOF_NOTSENT_LOWAT 0x00080000 /* A different lowat on not sent - data has been set */ -#define SOF_KNOTE 0x00100000 /* socket is on the EV_SOCK klist */ -#define SOF_USELRO 0x00200000 /* TCP must use LRO on these sockets */ -#define SOF_ENABLE_MSGS 0x00400000 /* TCP must enable message delivery */ -#define SOF_FLOW_DIVERT 0x00800000 /* Flow Divert is enabled */ -#define SOF_MP_SUBFLOW 0x01000000 /* is a multipath subflow socket */ -#define SOF_MP_SEC_SUBFLOW 0x04000000 /* Set up secondary flow */ -#define SOF_MP_TRYFAILOVER 0x08000000 /* Failing subflow */ -#define SOF_DELEGATED 0x10000000 /* on behalf of another process */ -#define SOF_CONTENT_FILTER 0x20000000 /* Content filter enabled */ - - uint32_t so_upcallusecount; /* number of upcalls in progress */ - int so_usecount; /* refcounting of socket use */; - int so_retaincnt; - u_int32_t so_filteruse; /* usecount for the socket filters */ - u_int16_t so_traffic_class; - int8_t so_netsvctype; - u_int8_t so_restrictions; - thread_t so_send_filt_thread; + struct mbuf *so_tail; + struct socket_filter_entry *so_filt; /* NKE hook */ + u_int32_t so_flags; /* Flags */ +#define SOF_NOSIGPIPE 0x00000001 +#define SOF_NOADDRAVAIL 0x00000002 /* EADDRNOTAVAIL if src addr is gone */ +#define SOF_PCBCLEARING 0x00000004 /* pru_disconnect done; don't + * call pru_detach */ +#define SOF_DEFUNCT 0x00000008 /* socket marked as inactive */ +#define SOF_CLOSEWAIT 0x00000010 /* blocked in close awaiting some events */ +#define SOF_REUSESHAREUID 0x00000040 /* Allows SO_REUSEADDR/SO_REUSEPORT + * for multiple so_uid */ +#define SOF_MULTIPAGES 0x00000080 /* jumbo clusters may be used for sosend */ +#define SOF_ABORTED 0x00000100 /* soabort was already called once */ +#define SOF_OVERFLOW 0x00000200 /* socket was dropped as overflow of + * listen q */ +#define SOF_NOTIFYCONFLICT 0x00000400 /* notify that a bind was done on a + * port already in use */ +#define SOF_UPCALLCLOSEWAIT 0x00000800 /* block close until upcall returns */ +#define SOF_BINDRANDOMPORT 0x00001000 /* Randomized port number for bind */ +#define SOF_NPX_SETOPTSHUT 0x00002000 /* Non POSIX extension to allow + * setsockopt(2) after shut down */ +#define SOF_RECV_TRAFFIC_CLASS 0x00004000 /* Receive TC as ancillary data */ +#define SOF_NODEFUNCT 0x00008000 /* socket cannot be defunct'd */ +#define SOF_PRIVILEGED_TRAFFIC_CLASS 0x00010000 /* traffic class is privileged */ +#define SOF_SUSPENDED 0x00020000 /* i/f output queue is suspended */ +#define SOF_INCOMP_INPROGRESS 0x00040000 /* incomp socket is being processed */ +#define SOF_NOTSENT_LOWAT 0x00080000 /* A different lowat on not sent + * data has been set */ +#define SOF_KNOTE 0x00100000 /* socket is on the EV_SOCK klist */ +#define SOF_USELRO 0x00200000 /* TCP must use LRO on these sockets */ +#define SOF_ENABLE_MSGS 0x00400000 /* TCP must enable message delivery */ +#define SOF_FLOW_DIVERT 0x00800000 /* Flow Divert is enabled */ +#define SOF_MP_SUBFLOW 0x01000000 /* is a multipath subflow socket */ +#define SOF_MP_SEC_SUBFLOW 0x04000000 /* Set up secondary flow */ +#define SOF_MP_TRYFAILOVER 0x08000000 /* Failing subflow */ +#define SOF_DELEGATED 0x10000000 /* on behalf of another process */ +#define SOF_CONTENT_FILTER 0x20000000 /* Content filter enabled */ + + uint32_t so_upcallusecount; /* number of upcalls in progress */ + int so_usecount; /* refcounting of socket use */; + int so_retaincnt; + u_int32_t so_filteruse; /* usecount for the socket filters */ + u_int16_t so_traffic_class; + int8_t so_netsvctype; + u_int8_t so_restrictions; + thread_t so_send_filt_thread; /* for debug pruposes */ -#define SO_LCKDBG_MAX 4 /* number of debug locking Link Registers recorded */ - void *lock_lr[SO_LCKDBG_MAX]; /* locking calling history */ - void *unlock_lr[SO_LCKDBG_MAX]; /* unlocking caller history */ - u_int8_t next_lock_lr; - u_int8_t next_unlock_lr; +#define SO_LCKDBG_MAX 4 /* number of debug locking Link Registers recorded */ + void *lock_lr[SO_LCKDBG_MAX]; /* locking calling history */ + void *unlock_lr[SO_LCKDBG_MAX]; /* unlocking caller history */ + u_int8_t next_lock_lr; + u_int8_t next_unlock_lr; - u_int16_t so_pktheadroom; /* headroom before packet payload */ + u_int16_t so_pktheadroom; /* headroom before packet payload */ - u_int32_t so_ifdenied_notifies; /* # of notifications generated */ + u_int32_t so_ifdenied_notifies; /* # of notifications generated */ - struct label *so_label; /* MAC label for socket */ - struct label *so_peerlabel; /* cached MAC label for socket peer */ - thread_t so_background_thread; /* thread that marked - this socket background */ + struct label *so_label; /* MAC label for socket */ + struct label *so_peerlabel; /* cached MAC label for socket peer */ + thread_t so_background_thread; /* thread that marked + * this socket background */ struct data_stats so_tc_stats[SO_TC_STATS_MAX]; - struct klist so_klist; /* klist for EV_SOCK events */ + struct klist so_klist; /* klist for EV_SOCK events */ - struct msg_state *so_msg_state; /* unordered snd/rcv state */ - struct flow_divert_pcb *so_fd_pcb; /* Flow Divert control block */ + struct msg_state *so_msg_state; /* unordered snd/rcv state */ + struct flow_divert_pcb *so_fd_pcb; /* Flow Divert control block */ #if CONTENT_FILTER struct cfil_info *so_cfil; @@ -319,235 +319,235 @@ struct socket { u_int32_t so_state_change_cnt; /* incr for each connect, disconnect */ #endif - u_int32_t so_eventmask; /* event mask */ - - pid_t e_pid; /* pid of the effective owner */ - u_int64_t e_upid; /* upid of the effective owner */ - - uuid_t last_uuid; /* uuid of most recent accessor */ - uuid_t e_uuid; /* uuid of effective owner */ - uuid_t so_vuuid; /* UUID of the Voucher originator */ - - int32_t so_policy_gencnt; /* UUID policy gencnt */ - - u_int32_t so_flags1; -#define SOF1_POST_FALLBACK_SYNC 0x00000001 /* fallback to TCP */ -#define SOF1_AWDL_PRIVILEGED 0x00000002 /* unused */ -#define SOF1_IF_2KCL 0x00000004 /* interface prefers 2 KB clusters */ -#define SOF1_DEFUNCTINPROG 0x00000008 -#define SOF1_DATA_IDEMPOTENT 0x00000010 /* idempotent data for TFO */ -#define SOF1_PRECONNECT_DATA 0x00000020 /* request for preconnect data */ -#define SOF1_EXTEND_BK_IDLE_WANTED 0x00000040 /* option set */ -#define SOF1_EXTEND_BK_IDLE_INPROG 0x00000080 /* socket */ -#define SOF1_CACHED_IN_SOCK_LAYER 0x00000100 /* bundled with inpcb and - tcpcb */ -#define SOF1_TFO_REWIND 0x00000200 /* rewind mptcp meta data */ -#define SOF1_CELLFALLBACK 0x00000400 /* Initiated by cell fallback */ -#define SOF1_QOSMARKING_ALLOWED 0x00000800 /* policy allows DSCP map */ -#define SOF1_TC_NET_SERV_TYPE 0x00001000 /* traffic class set by SO_NETWORK_SERVICE_TYPE */ -#define SOF1_TRAFFIC_MGT_SO_BACKGROUND 0x00002000 /* background socket */ -#define SOF1_TRAFFIC_MGT_TCP_RECVBG 0x00004000 /* Only TCP sockets, receiver throttling */ -#define SOF1_QOSMARKING_POLICY_OVERRIDE 0x00008000 /* Opt-out of QoS marking NECP policy */ -#define SOF1_DATA_AUTHENTICATED 0x00010000 /* idempotent data is authenticated */ -#define SOF1_ACCEPT_LIST_HELD 0x00020000 /* Another thread is accessing one of the accept lists */ -#define SOF1_CONTENT_FILTER_SKIP 0x00040000 /* Content filter should be skipped, socket is blessed */ -#define SOF1_HAS_NECP_CLIENT_UUID 0x00080000 /* NECP client UUID option set */ -#define SOF1_IN_KERNEL_SOCKET 0x00100000 /* Socket created in kernel via KPI */ -#define SOF1_CONNECT_COUNTED 0x00200000 /* connect() call was counted */ -#define SOF1_DNS_COUNTED 0x00400000 /* socket counted to send DNS queries */ - - u_int64_t so_extended_bk_start; + u_int32_t so_eventmask; /* event mask */ + + pid_t e_pid; /* pid of the effective owner */ + u_int64_t e_upid; /* upid of the effective owner */ + + uuid_t last_uuid; /* uuid of most recent accessor */ + uuid_t e_uuid; /* uuid of effective owner */ + uuid_t so_vuuid; /* UUID of the Voucher originator */ + + int32_t so_policy_gencnt; /* UUID policy gencnt */ + + u_int32_t so_flags1; +#define SOF1_POST_FALLBACK_SYNC 0x00000001 /* fallback to TCP */ +#define SOF1_AWDL_PRIVILEGED 0x00000002 /* unused */ +#define SOF1_IF_2KCL 0x00000004 /* interface prefers 2 KB clusters */ +#define SOF1_DEFUNCTINPROG 0x00000008 +#define SOF1_DATA_IDEMPOTENT 0x00000010 /* idempotent data for TFO */ +#define SOF1_PRECONNECT_DATA 0x00000020 /* request for preconnect data */ +#define SOF1_EXTEND_BK_IDLE_WANTED 0x00000040 /* option set */ +#define SOF1_EXTEND_BK_IDLE_INPROG 0x00000080 /* socket */ +#define SOF1_CACHED_IN_SOCK_LAYER 0x00000100 /* bundled with inpcb and + * tcpcb */ +#define SOF1_TFO_REWIND 0x00000200 /* rewind mptcp meta data */ +#define SOF1_CELLFALLBACK 0x00000400 /* Initiated by cell fallback */ +#define SOF1_QOSMARKING_ALLOWED 0x00000800 /* policy allows DSCP map */ +#define SOF1_TC_NET_SERV_TYPE 0x00001000 /* traffic class set by SO_NETWORK_SERVICE_TYPE */ +#define SOF1_TRAFFIC_MGT_SO_BACKGROUND 0x00002000 /* background socket */ +#define SOF1_TRAFFIC_MGT_TCP_RECVBG 0x00004000 /* Only TCP sockets, receiver throttling */ +#define SOF1_QOSMARKING_POLICY_OVERRIDE 0x00008000 /* Opt-out of QoS marking NECP policy */ +#define SOF1_DATA_AUTHENTICATED 0x00010000 /* idempotent data is authenticated */ +#define SOF1_ACCEPT_LIST_HELD 0x00020000 /* Another thread is accessing one of the accept lists */ +#define SOF1_CONTENT_FILTER_SKIP 0x00040000 /* Content filter should be skipped, socket is blessed */ +#define SOF1_HAS_NECP_CLIENT_UUID 0x00080000 /* NECP client UUID option set */ +#define SOF1_IN_KERNEL_SOCKET 0x00100000 /* Socket created in kernel via KPI */ +#define SOF1_CONNECT_COUNTED 0x00200000 /* connect() call was counted */ +#define SOF1_DNS_COUNTED 0x00400000 /* socket counted to send DNS queries */ + + u_int64_t so_extended_bk_start; }; /* Control message accessor in mbufs */ -#define _MIN_NXT_CMSGHDR_PTR(cmsg) \ - ((char *)(cmsg) + \ - __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len) + \ +#define _MIN_NXT_CMSGHDR_PTR(cmsg) \ + ((char *)(cmsg) + \ + __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len) + \ __DARWIN_ALIGN32(sizeof(struct cmsghdr))) -#define M_FIRST_CMSGHDR(m) \ - ((char *)(m) != (char *)0L && \ - (size_t)(m)->m_len >= sizeof (struct cmsghdr) && \ - (socklen_t)(m)->m_len >= \ +#define M_FIRST_CMSGHDR(m) \ + ((char *)(m) != (char *)0L && \ + (size_t)(m)->m_len >= sizeof (struct cmsghdr) && \ + (socklen_t)(m)->m_len >= \ __DARWIN_ALIGN32(((struct cmsghdr *)(void *)(m)->m_data)->cmsg_len) ? \ (struct cmsghdr *)(void *)(m)->m_data : (struct cmsghdr *)0L) -#define M_NXT_CMSGHDR(m, cmsg) \ - ((char *)(cmsg) == (char *)0L ? M_FIRST_CMSGHDR(m) : \ +#define M_NXT_CMSGHDR(m, cmsg) \ + ((char *)(cmsg) == (char *)0L ? M_FIRST_CMSGHDR(m) : \ _MIN_NXT_CMSGHDR_PTR(cmsg) > ((char *)(m)->m_data) + (m)->m_len || \ - _MIN_NXT_CMSGHDR_PTR(cmsg) < (char *)(m)->m_data ? \ - (struct cmsghdr *)0L /* NULL */ : \ - (struct cmsghdr *)(void *)((unsigned char *)(cmsg) + \ + _MIN_NXT_CMSGHDR_PTR(cmsg) < (char *)(m)->m_data ? \ + (struct cmsghdr *)0L /* NULL */ : \ + (struct cmsghdr *)(void *)((unsigned char *)(cmsg) + \ __DARWIN_ALIGN32((__uint32_t)(cmsg)->cmsg_len))) /* * Socket state bits. */ -#define SS_NOFDREF 0x0001 /* no file table ref any more */ -#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ -#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ -#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ -#define SS_CANTSENDMORE 0x0010 /* can't send more data to peer */ -#define SS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ -#define SS_RCVATMARK 0x0040 /* at mark on input */ - -#define SS_PRIV 0x0080 /* privileged for broadcast, raw... */ -#define SS_NBIO 0x0100 /* non-blocking ops */ -#define SS_ASYNC 0x0200 /* async i/o notify */ -#define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ -#define SS_INCOMP 0x0800 /* Unaccepted, incomplete connection */ -#define SS_COMP 0x1000 /* unaccepted, complete connection */ -#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ -#define SS_DRAINING 0x4000 /* close waiting for blocked system - calls to drain */ -#define SS_DEFUNCT 0x8000 /* has been fully defunct'd */ +#define SS_NOFDREF 0x0001 /* no file table ref any more */ +#define SS_ISCONNECTED 0x0002 /* socket connected to a peer */ +#define SS_ISCONNECTING 0x0004 /* in process of connecting to peer */ +#define SS_ISDISCONNECTING 0x0008 /* in process of disconnecting */ +#define SS_CANTSENDMORE 0x0010 /* can't send more data to peer */ +#define SS_CANTRCVMORE 0x0020 /* can't receive more data from peer */ +#define SS_RCVATMARK 0x0040 /* at mark on input */ + +#define SS_PRIV 0x0080 /* privileged for broadcast, raw... */ +#define SS_NBIO 0x0100 /* non-blocking ops */ +#define SS_ASYNC 0x0200 /* async i/o notify */ +#define SS_ISCONFIRMING 0x0400 /* deciding to accept connection req */ +#define SS_INCOMP 0x0800 /* Unaccepted, incomplete connection */ +#define SS_COMP 0x1000 /* unaccepted, complete connection */ +#define SS_ISDISCONNECTED 0x2000 /* socket disconnected from peer */ +#define SS_DRAINING 0x4000 /* close waiting for blocked system + * calls to drain */ +#define SS_DEFUNCT 0x8000 /* has been fully defunct'd */ #endif /* KERNEL_PRIVATE */ #if defined(__LP64__) -#define _XSOCKET_PTR(x) u_int32_t +#define _XSOCKET_PTR(x) u_int32_t #else -#define _XSOCKET_PTR(x) x +#define _XSOCKET_PTR(x) x #endif #ifdef PRIVATE /* Flags returned in data field for EVFILT_SOCK events. */ -#define SOCKEV_CONNECTED 0x00000001 /* connected */ -#define SOCKEV_DISCONNECTED 0x00000002 /* disconnected */ +#define SOCKEV_CONNECTED 0x00000001 /* connected */ +#define SOCKEV_DISCONNECTED 0x00000002 /* disconnected */ #endif /* PRIVATE */ #pragma pack(4) struct xsockbuf { - u_int32_t sb_cc; - u_int32_t sb_hiwat; - u_int32_t sb_mbcnt; - u_int32_t sb_mbmax; - int32_t sb_lowat; - short sb_flags; - short sb_timeo; + u_int32_t sb_cc; + u_int32_t sb_hiwat; + u_int32_t sb_mbcnt; + u_int32_t sb_mbmax; + int32_t sb_lowat; + short sb_flags; + short sb_timeo; }; /* * Externalized form of struct socket used by the sysctl(3) interface. */ -struct xsocket { - u_int32_t xso_len; /* length of this structure */ - _XSOCKET_PTR(struct socket *) xso_so; /* makes a convenient handle */ - short so_type; - short so_options; - short so_linger; - short so_state; - _XSOCKET_PTR(caddr_t) so_pcb; /* another convenient handle */ - int xso_protocol; - int xso_family; - short so_qlen; - short so_incqlen; - short so_qlimit; - short so_timeo; - u_short so_error; - pid_t so_pgid; - u_int32_t so_oobmark; - struct xsockbuf so_rcv; - struct xsockbuf so_snd; - uid_t so_uid; /* XXX */ +struct xsocket { + u_int32_t xso_len; /* length of this structure */ + _XSOCKET_PTR(struct socket *) xso_so; /* makes a convenient handle */ + short so_type; + short so_options; + short so_linger; + short so_state; + _XSOCKET_PTR(caddr_t) so_pcb; /* another convenient handle */ + int xso_protocol; + int xso_family; + short so_qlen; + short so_incqlen; + short so_qlimit; + short so_timeo; + u_short so_error; + pid_t so_pgid; + u_int32_t so_oobmark; + struct xsockbuf so_rcv; + struct xsockbuf so_snd; + uid_t so_uid; /* XXX */ }; #if !CONFIG_EMBEDDED -struct xsocket64 { - u_int32_t xso_len; /* length of this structure */ - u_int64_t xso_so; /* makes a convenient handle */ - short so_type; - short so_options; - short so_linger; - short so_state; - u_int64_t so_pcb; /* another convenient handle */ - int xso_protocol; - int xso_family; - short so_qlen; - short so_incqlen; - short so_qlimit; - short so_timeo; - u_short so_error; - pid_t so_pgid; - u_int32_t so_oobmark; - struct xsockbuf so_rcv; - struct xsockbuf so_snd; - uid_t so_uid; /* XXX */ +struct xsocket64 { + u_int32_t xso_len; /* length of this structure */ + u_int64_t xso_so; /* makes a convenient handle */ + short so_type; + short so_options; + short so_linger; + short so_state; + u_int64_t so_pcb; /* another convenient handle */ + int xso_protocol; + int xso_family; + short so_qlen; + short so_incqlen; + short so_qlimit; + short so_timeo; + u_short so_error; + pid_t so_pgid; + u_int32_t so_oobmark; + struct xsockbuf so_rcv; + struct xsockbuf so_snd; + uid_t so_uid; /* XXX */ }; #endif /* !CONFIG_EMBEDDED */ #ifdef PRIVATE -#define XSO_SOCKET 0x001 -#define XSO_RCVBUF 0x002 -#define XSO_SNDBUF 0x004 -#define XSO_STATS 0x008 -#define XSO_INPCB 0x010 -#define XSO_TCPCB 0x020 -#define XSO_KCREG 0x040 -#define XSO_KCB 0x080 -#define XSO_EVT 0x100 - -struct xsocket_n { - u_int32_t xso_len; /* length of this structure */ - u_int32_t xso_kind; /* XSO_SOCKET */ - u_int64_t xso_so; /* makes a convenient handle */ - short so_type; - u_int32_t so_options; - short so_linger; - short so_state; - u_int64_t so_pcb; /* another convenient handle */ - int xso_protocol; - int xso_family; - short so_qlen; - short so_incqlen; - short so_qlimit; - short so_timeo; - u_short so_error; - pid_t so_pgid; - u_int32_t so_oobmark; - uid_t so_uid; /* XXX */ - pid_t so_last_pid; - pid_t so_e_pid; +#define XSO_SOCKET 0x001 +#define XSO_RCVBUF 0x002 +#define XSO_SNDBUF 0x004 +#define XSO_STATS 0x008 +#define XSO_INPCB 0x010 +#define XSO_TCPCB 0x020 +#define XSO_KCREG 0x040 +#define XSO_KCB 0x080 +#define XSO_EVT 0x100 + +struct xsocket_n { + u_int32_t xso_len; /* length of this structure */ + u_int32_t xso_kind; /* XSO_SOCKET */ + u_int64_t xso_so; /* makes a convenient handle */ + short so_type; + u_int32_t so_options; + short so_linger; + short so_state; + u_int64_t so_pcb; /* another convenient handle */ + int xso_protocol; + int xso_family; + short so_qlen; + short so_incqlen; + short so_qlimit; + short so_timeo; + u_short so_error; + pid_t so_pgid; + u_int32_t so_oobmark; + uid_t so_uid; /* XXX */ + pid_t so_last_pid; + pid_t so_e_pid; }; struct xsockbuf_n { - u_int32_t xsb_len; /* length of this structure */ - u_int32_t xsb_kind; /* XSO_RCVBUF or XSO_SNDBUF */ - u_int32_t sb_cc; - u_int32_t sb_hiwat; - u_int32_t sb_mbcnt; - u_int32_t sb_mbmax; - int32_t sb_lowat; - short sb_flags; - short sb_timeo; + u_int32_t xsb_len; /* length of this structure */ + u_int32_t xsb_kind; /* XSO_RCVBUF or XSO_SNDBUF */ + u_int32_t sb_cc; + u_int32_t sb_hiwat; + u_int32_t sb_mbcnt; + u_int32_t sb_mbmax; + int32_t sb_lowat; + short sb_flags; + short sb_timeo; }; struct xsockstat_n { - u_int32_t xst_len; /* length of this structure */ - u_int32_t xst_kind; /* XSO_STATS */ - struct data_stats xst_tc_stats[SO_TC_STATS_MAX]; + u_int32_t xst_len; /* length of this structure */ + u_int32_t xst_kind; /* XSO_STATS */ + struct data_stats xst_tc_stats[SO_TC_STATS_MAX]; }; /* * Global socket statistics */ struct soextbkidlestat { - u_int32_t so_xbkidle_maxperproc; - u_int32_t so_xbkidle_time; - u_int32_t so_xbkidle_rcvhiwat; - int32_t so_xbkidle_notsupp; - int32_t so_xbkidle_toomany; - int32_t so_xbkidle_wantok; - int32_t so_xbkidle_active; - int32_t so_xbkidle_nocell; - int32_t so_xbkidle_notime; - int32_t so_xbkidle_forced; - int32_t so_xbkidle_resumed; - int32_t so_xbkidle_expired; - int32_t so_xbkidle_resched; - int32_t so_xbkidle_nodlgtd; - int32_t so_xbkidle_drained; + u_int32_t so_xbkidle_maxperproc; + u_int32_t so_xbkidle_time; + u_int32_t so_xbkidle_rcvhiwat; + int32_t so_xbkidle_notsupp; + int32_t so_xbkidle_toomany; + int32_t so_xbkidle_wantok; + int32_t so_xbkidle_active; + int32_t so_xbkidle_nocell; + int32_t so_xbkidle_notime; + int32_t so_xbkidle_forced; + int32_t so_xbkidle_resumed; + int32_t so_xbkidle_expired; + int32_t so_xbkidle_resched; + int32_t so_xbkidle_nodlgtd; + int32_t so_xbkidle_drained; }; #endif /* PRIVATE */ @@ -562,12 +562,12 @@ struct soextbkidlestat { */ enum sopt_dir { SOPT_GET, SOPT_SET }; struct sockopt { - enum sopt_dir sopt_dir; /* is this a get or a set? */ - int sopt_level; /* second arg of [gs]etsockopt */ - int sopt_name; /* third arg of [gs]etsockopt */ - user_addr_t sopt_val; /* fourth arg of [gs]etsockopt */ - size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ - struct proc *sopt_p; /* calling process or null if kernel */ + enum sopt_dir sopt_dir; /* is this a get or a set? */ + int sopt_level; /* second arg of [gs]etsockopt */ + int sopt_name; /* third arg of [gs]etsockopt */ + user_addr_t sopt_val; /* fourth arg of [gs]etsockopt */ + size_t sopt_valsize; /* (almost) fifth arg of [gs]etsockopt */ + struct proc *sopt_p; /* calling process or null if kernel */ }; #ifdef MALLOC_DECLARE @@ -576,6 +576,9 @@ MALLOC_DECLARE(M_SONAME); #endif /* MALLOC_DECLARE */ #ifdef BSD_KERNEL_PRIVATE +struct cmsghdr; +extern boolean_t is_cmsg_valid(struct mbuf *control, struct cmsghdr *cmsg); + /* * Socket extension mechanism: control block hooks: * This is the "head" of any control block for an extenstion @@ -584,122 +587,122 @@ MALLOC_DECLARE(M_SONAME); * operation, e.g., to disable some functions. */ struct kextcb { - struct kextcb *e_next; /* Next kext control block */ - void *e_fcb; /* Real filter control block */ - struct NFDescriptor *e_nfd; /* NKE Descriptor */ + struct kextcb *e_next; /* Next kext control block */ + void *e_fcb; /* Real filter control block */ + struct NFDescriptor *e_nfd; /* NKE Descriptor */ /* Plug-in support - intercept functions */ - struct sockif *e_soif; /* Socket functions */ - struct sockutil *e_sout; /* Sockbuf utility functions */ + struct sockif *e_soif; /* Socket functions */ + struct sockutil *e_sout; /* Sockbuf utility functions */ }; -#define EXT_NULL 0x0 /* STATE: Not in use */ +#define EXT_NULL 0x0 /* STATE: Not in use */ /* Hints for socket event processing */ -#define SO_FILT_HINT_LOCKED 0x00000001 /* socket is already locked */ -#define SO_FILT_HINT_CONNRESET 0x00000002 /* Reset is received */ -#define SO_FILT_HINT_CANTRCVMORE 0x00000004 /* No more data to read */ -#define SO_FILT_HINT_CANTSENDMORE 0x00000008 /* Can't write more data */ -#define SO_FILT_HINT_TIMEOUT 0x00000010 /* timeout */ -#define SO_FILT_HINT_NOSRCADDR 0x00000020 /* No src address available */ -#define SO_FILT_HINT_IFDENIED 0x00000040 /* interface denied access */ -#define SO_FILT_HINT_SUSPEND 0x00000080 /* output queue suspended */ -#define SO_FILT_HINT_RESUME 0x00000100 /* output queue resumed */ -#define SO_FILT_HINT_KEEPALIVE 0x00000200 /* TCP Keepalive received */ -#define SO_FILT_HINT_ADAPTIVE_WTIMO 0x00000400 /* TCP adaptive write timeout */ -#define SO_FILT_HINT_ADAPTIVE_RTIMO 0x00000800 /* TCP adaptive read timeout */ -#define SO_FILT_HINT_CONNECTED 0x00001000 /* socket is connected */ -#define SO_FILT_HINT_DISCONNECTED 0x00002000 /* socket is disconnected */ -#define SO_FILT_HINT_CONNINFO_UPDATED 0x00004000 /* updated conninfo avail. */ -#define SO_FILT_HINT_MPFAILOVER 0x00008000 /* multipath failover */ -#define SO_FILT_HINT_MPSTATUS 0x00010000 /* multipath status */ -#define SO_FILT_HINT_MUSTRST 0x00020000 /* must send RST and close */ -#define SO_FILT_HINT_MPCANTRCVMORE 0x00040000 /* MPTCP DFIN Received */ -#define SO_FILT_HINT_NOTIFY_ACK 0x00080000 /* Notify Acknowledgement */ - -#define SO_FILT_HINT_BITS \ - "\020\1LOCKED\2CONNRESET\3CANTRCVMORE\4CANTSENDMORE\5TIMEOUT" \ - "\6NOSRCADDR\7IFDENIED\10SUSPEND\11RESUME\12KEEPALIVE\13AWTIMO" \ - "\14ARTIMO\15CONNECTED\16DISCONNECTED\17CONNINFO_UPDATED" \ +#define SO_FILT_HINT_LOCKED 0x00000001 /* socket is already locked */ +#define SO_FILT_HINT_CONNRESET 0x00000002 /* Reset is received */ +#define SO_FILT_HINT_CANTRCVMORE 0x00000004 /* No more data to read */ +#define SO_FILT_HINT_CANTSENDMORE 0x00000008 /* Can't write more data */ +#define SO_FILT_HINT_TIMEOUT 0x00000010 /* timeout */ +#define SO_FILT_HINT_NOSRCADDR 0x00000020 /* No src address available */ +#define SO_FILT_HINT_IFDENIED 0x00000040 /* interface denied access */ +#define SO_FILT_HINT_SUSPEND 0x00000080 /* output queue suspended */ +#define SO_FILT_HINT_RESUME 0x00000100 /* output queue resumed */ +#define SO_FILT_HINT_KEEPALIVE 0x00000200 /* TCP Keepalive received */ +#define SO_FILT_HINT_ADAPTIVE_WTIMO 0x00000400 /* TCP adaptive write timeout */ +#define SO_FILT_HINT_ADAPTIVE_RTIMO 0x00000800 /* TCP adaptive read timeout */ +#define SO_FILT_HINT_CONNECTED 0x00001000 /* socket is connected */ +#define SO_FILT_HINT_DISCONNECTED 0x00002000 /* socket is disconnected */ +#define SO_FILT_HINT_CONNINFO_UPDATED 0x00004000 /* updated conninfo avail. */ +#define SO_FILT_HINT_MPFAILOVER 0x00008000 /* multipath failover */ +#define SO_FILT_HINT_MPSTATUS 0x00010000 /* multipath status */ +#define SO_FILT_HINT_MUSTRST 0x00020000 /* must send RST and close */ +#define SO_FILT_HINT_MPCANTRCVMORE 0x00040000 /* MPTCP DFIN Received */ +#define SO_FILT_HINT_NOTIFY_ACK 0x00080000 /* Notify Acknowledgement */ + +#define SO_FILT_HINT_BITS \ + "\020\1LOCKED\2CONNRESET\3CANTRCVMORE\4CANTSENDMORE\5TIMEOUT" \ + "\6NOSRCADDR\7IFDENIED\10SUSPEND\11RESUME\12KEEPALIVE\13AWTIMO" \ + "\14ARTIMO\15CONNECTED\16DISCONNECTED\17CONNINFO_UPDATED" \ "\20MPFAILOVER\21MPSTATUS\22MUSTRST\23MPCANTRCVMORE\24NOTIFYACK" /* Mask for hints that have corresponding kqueue events */ -#define SO_FILT_HINT_EV \ - (SO_FILT_HINT_CONNRESET | SO_FILT_HINT_CANTRCVMORE | \ - SO_FILT_HINT_CANTSENDMORE | SO_FILT_HINT_TIMEOUT | \ - SO_FILT_HINT_NOSRCADDR | SO_FILT_HINT_IFDENIED | \ - SO_FILT_HINT_SUSPEND | SO_FILT_HINT_RESUME | \ - SO_FILT_HINT_KEEPALIVE | SO_FILT_HINT_ADAPTIVE_WTIMO | \ - SO_FILT_HINT_ADAPTIVE_RTIMO | SO_FILT_HINT_CONNECTED | \ - SO_FILT_HINT_DISCONNECTED | SO_FILT_HINT_CONNINFO_UPDATED | \ +#define SO_FILT_HINT_EV \ + (SO_FILT_HINT_CONNRESET | SO_FILT_HINT_CANTRCVMORE | \ + SO_FILT_HINT_CANTSENDMORE | SO_FILT_HINT_TIMEOUT | \ + SO_FILT_HINT_NOSRCADDR | SO_FILT_HINT_IFDENIED | \ + SO_FILT_HINT_SUSPEND | SO_FILT_HINT_RESUME | \ + SO_FILT_HINT_KEEPALIVE | SO_FILT_HINT_ADAPTIVE_WTIMO | \ + SO_FILT_HINT_ADAPTIVE_RTIMO | SO_FILT_HINT_CONNECTED | \ + SO_FILT_HINT_DISCONNECTED | SO_FILT_HINT_CONNINFO_UPDATED | \ SO_FILT_HINT_NOTIFY_ACK) #if SENDFILE struct sf_buf { - SLIST_ENTRY(sf_buf) free_list; /* list of free buffer slots */ - int refcnt; /* reference count */ - struct vm_page *m; /* currently mapped page */ - vm_offset_t kva; /* va of mapping */ + SLIST_ENTRY(sf_buf) free_list; /* list of free buffer slots */ + int refcnt; /* reference count */ + struct vm_page *m; /* currently mapped page */ + vm_offset_t kva; /* va of mapping */ }; #endif /* SENDFILE */ -#define SBLASTRECORDCHK(sb, s) \ +#define SBLASTRECORDCHK(sb, s) \ if (socket_debug) sblastrecordchk(sb, s); -#define SBLASTMBUFCHK(sb, s) \ +#define SBLASTMBUFCHK(sb, s) \ if (socket_debug) sblastmbufchk(sb, s); -#define SB_EMPTY_FIXUP(sb) { \ - if ((sb)->sb_mb == NULL) { \ - (sb)->sb_mbtail = NULL; \ - (sb)->sb_lastrecord = NULL; \ - } \ +#define SB_EMPTY_FIXUP(sb) { \ + if ((sb)->sb_mb == NULL) { \ + (sb)->sb_mbtail = NULL; \ + (sb)->sb_lastrecord = NULL; \ + } \ } -#define SB_MB_CHECK(sb) do { \ - if (((sb)->sb_mb != NULL && \ - (sb)->sb_cc == 0) || \ - ((sb)->sb_mb == NULL && (sb)->sb_cc > 0)) \ - panic("corrupt so_rcv: sb_mb %p sb_cc %d\n", \ - (sb)->sb_mb, (sb)->sb_cc); \ +#define SB_MB_CHECK(sb) do { \ + if (((sb)->sb_mb != NULL && \ + (sb)->sb_cc == 0) || \ + ((sb)->sb_mb == NULL && (sb)->sb_cc > 0)) \ + panic("corrupt so_rcv: sb_mb %p sb_cc %d\n", \ + (sb)->sb_mb, (sb)->sb_cc); \ } while (0) -#define SODEFUNCTLOG(fmt, ...) do { \ - if (sodefunctlog) \ - printf(fmt, __VA_ARGS__); \ +#define SODEFUNCTLOG(fmt, ...) do { \ + if (sodefunctlog) \ + printf(fmt, __VA_ARGS__); \ } while (0) -#define SOTHROTTLELOG(fmt, ...) do { \ - if (sothrottlelog) \ - printf(fmt, __VA_ARGS__); \ +#define SOTHROTTLELOG(fmt, ...) do { \ + if (sothrottlelog) \ + printf(fmt, __VA_ARGS__); \ } while (0) /* * For debugging traffic class behaviors */ -#define SOTCDB_RESERVED 0x01 -#define SOTCDB_NO_MTC 0x02 /* Do not set the mbuf traffic class */ -#define SOTCDB_NO_SENDTCPBG 0x04 /* Do not use background TCP CC algorithm for sender */ -#define SOTCDB_NO_LCLTST 0x08 /* Do not test for local destination for setting DSCP */ -#define SOTCDB_NO_DSCPTST 0x10 /* Overwritte any existing DSCP code */ -#define SOTCDB_NO_RECVTCPBG 0x20 /* Do not use throttling on receiver-side of TCP */ -#define SOTCDB_NO_PRIVILEGED 0x40 /* Do not set privileged traffic flag */ - -#define SOCK_DOM(so) ((so)->so_proto->pr_domain->dom_family) -#define SOCK_TYPE(so) ((so)->so_proto->pr_type) -#define SOCK_PROTO(so) ((so)->so_proto->pr_protocol) - -#define SOCK_CHECK_DOM(so, dom) (SOCK_DOM(so) == (dom)) -#define SOCK_CHECK_TYPE(so, type) (SOCK_TYPE(so) == (type)) -#define SOCK_CHECK_PROTO(so, proto) (SOCK_PROTO(so) == (proto)) +#define SOTCDB_RESERVED 0x01 +#define SOTCDB_NO_MTC 0x02 /* Do not set the mbuf traffic class */ +#define SOTCDB_NO_SENDTCPBG 0x04 /* Do not use background TCP CC algorithm for sender */ +#define SOTCDB_NO_LCLTST 0x08 /* Do not test for local destination for setting DSCP */ +#define SOTCDB_NO_DSCPTST 0x10 /* Overwritte any existing DSCP code */ +#define SOTCDB_NO_RECVTCPBG 0x20 /* Do not use throttling on receiver-side of TCP */ +#define SOTCDB_NO_PRIVILEGED 0x40 /* Do not set privileged traffic flag */ + +#define SOCK_DOM(so) ((so)->so_proto->pr_domain->dom_family) +#define SOCK_TYPE(so) ((so)->so_proto->pr_type) +#define SOCK_PROTO(so) ((so)->so_proto->pr_protocol) + +#define SOCK_CHECK_DOM(so, dom) (SOCK_DOM(so) == (dom)) +#define SOCK_CHECK_TYPE(so, type) (SOCK_TYPE(so) == (type)) +#define SOCK_CHECK_PROTO(so, proto) (SOCK_PROTO(so) == (proto)) /* * Socket process information */ struct so_procinfo { - pid_t spi_pid; - pid_t spi_epid; - uuid_t spi_uuid; - uuid_t spi_euuid; - int spi_delegated; + pid_t spi_pid; + pid_t spi_epid; + uuid_t spi_uuid; + uuid_t spi_euuid; + int spi_delegated; }; extern u_int32_t sb_max; @@ -725,8 +728,8 @@ extern u_int32_t net_io_policy_uuid; extern struct soextbkidlestat soextbkidlestat; struct net_qos_dscp_map { - u_int8_t sotc_to_dscp[SO_TC_MAX]; - u_int8_t netsvctype_to_dscp[_NET_SERVICE_TYPE_COUNT]; + u_int8_t sotc_to_dscp[SO_TC_MAX]; + u_int8_t netsvctype_to_dscp[_NET_SERVICE_TYPE_COUNT]; }; #endif /* BSD_KERNEL_PRIVATE */ @@ -736,9 +739,9 @@ struct sockaddr; struct ucred; struct uio; -#define SOCK_MSG_SA 0x01 -#define SOCK_MSG_CONTROL 0x02 -#define SOCK_MSG_DATA 0x04 +#define SOCK_MSG_SA 0x01 +#define SOCK_MSG_CONTROL 0x02 +#define SOCK_MSG_DATA 0x04 struct recv_msg_elem { struct uio *uio; @@ -785,14 +788,14 @@ extern void sorwakeup(struct socket *so); extern int sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags); extern int sosend_reinject(struct socket *so, struct sockaddr *addr, struct mbuf *top, - struct mbuf *control, uint32_t sendflags); + struct mbuf *control, uint32_t sendflags); extern int sosend_list(struct socket *so, struct uio **uio, u_int uiocnt, int flags); extern int soreceive_list(struct socket *so, struct recv_msg_elem *msgarray, u_int msgcnt, int *flags); extern void sonullevent(struct socket *so, void *arg, uint32_t hint); extern struct mbuf *sbconcat_mbufs(struct sockbuf *sb, struct sockaddr *asa, struct mbuf *m0, - struct mbuf *control); + struct mbuf *control); __END_DECLS @@ -850,10 +853,10 @@ extern void sbfree_chunk(struct sockbuf *sb, struct mbuf *m); /* * Flags to sblock(). */ -#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ -#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ -#define SBL_IGNDEFUNCT 0x00000004 /* Ignore defunct'd state */ -#define SBL_VALID (SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT) +#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */ +#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */ +#define SBL_IGNDEFUNCT 0x00000004 /* Ignore defunct'd state */ +#define SBL_VALID (SBL_WAIT | SBL_NOINTR | SBL_IGNDEFUNCT) extern int sblock(struct sockbuf *sb, uint32_t flags); extern void sbunlock(struct sockbuf *sb, boolean_t keeplocked); @@ -875,7 +878,7 @@ extern int sodisconnectx(struct socket *so, sae_associd_t, sae_connid_t); extern int sodisconnectxlocked(struct socket *so, sae_associd_t, sae_connid_t); extern void soevupcall(struct socket *, u_int32_t); /* flags for socreate_internal */ -#define SOCF_ASYNC 0x1 /* non-blocking socket */ +#define SOCF_ASYNC 0x1 /* non-blocking socket */ extern int socreate_internal(int dom, struct socket **aso, int type, int proto, struct proc *, uint32_t, struct proc *); extern int socreate(int dom, struct socket **aso, int type, int proto); @@ -937,17 +940,17 @@ extern int soo_kqfilter(struct fileproc *, struct knote *, struct kevent_internal_s *kev, vfs_context_t); /* Service class flags used for setting service class on a packet */ -#define PKT_SCF_IPV6 0x00000001 /* IPv6 packet */ -#define PKT_SCF_TCP_ACK 0x00000002 /* Pure TCP ACK */ -#define PKT_SCF_TCP_SYN 0x00000004 /* TCP SYN */ +#define PKT_SCF_IPV6 0x00000001 /* IPv6 packet */ +#define PKT_SCF_TCP_ACK 0x00000002 /* Pure TCP ACK */ +#define PKT_SCF_TCP_SYN 0x00000004 /* TCP SYN */ /* * Flags for connectx(2) user-protocol request routine. */ -#define CONNREQF_MPTCP 0x1 /* called internally by MPTCP */ -#define CONNREQF_UIO 0x2 /* there's data */ -#define CONNREQF_IDEM 0x4 /* data is idempotent */ +#define CONNREQF_MPTCP 0x1 /* called internally by MPTCP */ +#define CONNREQF_UIO 0x2 /* there's data */ +#define CONNREQF_IDEM 0x4 /* data is idempotent */ extern void set_packet_service_class(struct mbuf *, struct socket *, mbuf_svc_class_t, u_int32_t); @@ -994,7 +997,7 @@ extern void mptcp_preproc_sbdrop(struct socket *, struct mbuf *, unsigned int); extern void mptcp_postproc_sbdrop(struct mbuf *, u_int64_t, u_int32_t, u_int32_t); extern int mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, - uint64_t dsn, uint32_t rseq, uint16_t dlen); + uint64_t dsn, uint32_t rseq, uint16_t dlen); extern void netpolicy_post_msg(uint32_t, struct netpolicy_event_data *, uint32_t); diff --git a/bsd/sys/sockio.h b/bsd/sys/sockio.h index 0ef6be269..a973c4896 100644 --- a/bsd/sys/sockio.h +++ b/bsd/sys/sockio.h @@ -61,266 +61,285 @@ * @(#)sockio.h 8.1 (Berkeley) 3/28/94 */ -#ifndef _SYS_SOCKIO_H_ -#define _SYS_SOCKIO_H_ +#ifndef _SYS_SOCKIO_H_ +#define _SYS_SOCKIO_H_ #include #include /* Socket ioctl's. */ -#define SIOCSHIWAT _IOW('s', 0, int) /* set high watermark */ -#define SIOCGHIWAT _IOR('s', 1, int) /* get high watermark */ -#define SIOCSLOWAT _IOW('s', 2, int) /* set low watermark */ -#define SIOCGLOWAT _IOR('s', 3, int) /* get low watermark */ -#define SIOCATMARK _IOR('s', 7, int) /* at oob mark? */ -#define SIOCSPGRP _IOW('s', 8, int) /* set process group */ -#define SIOCGPGRP _IOR('s', 9, int) /* get process group */ +#define SIOCSHIWAT _IOW('s', 0, int) /* set high watermark */ +#define SIOCGHIWAT _IOR('s', 1, int) /* get high watermark */ +#define SIOCSLOWAT _IOW('s', 2, int) /* set low watermark */ +#define SIOCGLOWAT _IOR('s', 3, int) /* get low watermark */ +#define SIOCATMARK _IOR('s', 7, int) /* at oob mark? */ +#define SIOCSPGRP _IOW('s', 8, int) /* set process group */ +#define SIOCGPGRP _IOR('s', 9, int) /* get process group */ /* * OSIOCGIF* ioctls are deprecated; they are kept for binary compatibility. */ -#define SIOCSIFADDR _IOW('i', 12, struct ifreq) /* set ifnet address */ +#define SIOCSIFADDR _IOW('i', 12, struct ifreq) /* set ifnet address */ #ifdef KERNEL_PRIVATE -#define OSIOCGIFADDR _IOWR('i', 13, struct ifreq) /* deprecated */ +#define OSIOCGIFADDR _IOWR('i', 13, struct ifreq) /* deprecated */ #endif /* KERNEL_PRIVATE */ -#define SIOCSIFDSTADDR _IOW('i', 14, struct ifreq) /* set p-p address */ +#define SIOCSIFDSTADDR _IOW('i', 14, struct ifreq) /* set p-p address */ #ifdef KERNEL_PRIVATE -#define OSIOCGIFDSTADDR _IOWR('i', 15, struct ifreq) /* deprecated */ +#define OSIOCGIFDSTADDR _IOWR('i', 15, struct ifreq) /* deprecated */ #endif /* KERNEL_PRIVATE */ -#define SIOCSIFFLAGS _IOW('i', 16, struct ifreq) /* set ifnet flags */ -#define SIOCGIFFLAGS _IOWR('i', 17, struct ifreq) /* get ifnet flags */ +#define SIOCSIFFLAGS _IOW('i', 16, struct ifreq) /* set ifnet flags */ +#define SIOCGIFFLAGS _IOWR('i', 17, struct ifreq) /* get ifnet flags */ #ifdef KERNEL_PRIVATE -#define OSIOCGIFBRDADDR _IOWR('i', 18, struct ifreq) /* deprecated */ +#define OSIOCGIFBRDADDR _IOWR('i', 18, struct ifreq) /* deprecated */ #endif /* KERNEL_PRIVATE */ -#define SIOCSIFBRDADDR _IOW('i', 19, struct ifreq) /* set broadcast addr */ +#define SIOCSIFBRDADDR _IOW('i', 19, struct ifreq) /* set broadcast addr */ #ifdef KERNEL_PRIVATE -#define OSIOCGIFCONF _IOWR('i', 20, struct ifconf) /* deprecated */ -#define OSIOCGIFCONF32 _IOWR('i', 20, struct ifconf32) /* deprecated */ -#define OSIOCGIFCONF64 _IOWR('i', 20, struct ifconf64) /* deprecated */ -#define OSIOCGIFNETMASK _IOWR('i', 21, struct ifreq) /* deprecated */ +#define OSIOCGIFCONF _IOWR('i', 20, struct ifconf) /* deprecated */ +#define OSIOCGIFCONF32 _IOWR('i', 20, struct ifconf32) /* deprecated */ +#define OSIOCGIFCONF64 _IOWR('i', 20, struct ifconf64) /* deprecated */ +#define OSIOCGIFNETMASK _IOWR('i', 21, struct ifreq) /* deprecated */ #endif /* KERNEL_PRIVATE */ -#define SIOCSIFNETMASK _IOW('i', 22, struct ifreq) /* set net addr mask */ -#define SIOCGIFMETRIC _IOWR('i', 23, struct ifreq) /* get IF metric */ -#define SIOCSIFMETRIC _IOW('i', 24, struct ifreq) /* set IF metric */ -#define SIOCDIFADDR _IOW('i', 25, struct ifreq) /* delete IF addr */ -#define SIOCAIFADDR _IOW('i', 26, struct ifaliasreq)/* add/chg IF alias */ - -#define SIOCGIFADDR _IOWR('i', 33, struct ifreq) /* get ifnet address */ -#define SIOCGIFDSTADDR _IOWR('i', 34, struct ifreq) /* get p-p address */ -#define SIOCGIFBRDADDR _IOWR('i', 35, struct ifreq) /* get broadcast addr */ +#define SIOCSIFNETMASK _IOW('i', 22, struct ifreq) /* set net addr mask */ +#define SIOCGIFMETRIC _IOWR('i', 23, struct ifreq) /* get IF metric */ +#define SIOCSIFMETRIC _IOW('i', 24, struct ifreq) /* set IF metric */ +#define SIOCDIFADDR _IOW('i', 25, struct ifreq) /* delete IF addr */ +#define SIOCAIFADDR _IOW('i', 26, struct ifaliasreq)/* add/chg IF alias */ + +#define SIOCGIFADDR _IOWR('i', 33, struct ifreq) /* get ifnet address */ +#define SIOCGIFDSTADDR _IOWR('i', 34, struct ifreq) /* get p-p address */ +#define SIOCGIFBRDADDR _IOWR('i', 35, struct ifreq) /* get broadcast addr */ #if !defined(KERNEL) || defined(KERNEL_PRIVATE) -#define SIOCGIFCONF _IOWR('i', 36, struct ifconf) /* get ifnet list */ +#define SIOCGIFCONF _IOWR('i', 36, struct ifconf) /* get ifnet list */ #endif /* !KERNEL || KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE -#define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32) /* get ifnet list */ -#define SIOCGIFCONF64 _IOWR('i', 36, struct ifconf64) /* get ifnet list */ +#define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32) /* get ifnet list */ +#define SIOCGIFCONF64 _IOWR('i', 36, struct ifconf64) /* get ifnet list */ #endif /* KERNEL_PRIVATE */ -#define SIOCGIFNETMASK _IOWR('i', 37, struct ifreq) /* get net addr mask */ -#define SIOCAUTOADDR _IOWR('i', 38, struct ifreq) /* autoconf address */ -#define SIOCAUTONETMASK _IOW('i', 39, struct ifreq) /* autoconf netmask */ -#define SIOCARPIPLL _IOWR('i', 40, struct ifreq) /* arp for IPv4LL address */ - -#define SIOCADDMULTI _IOW('i', 49, struct ifreq) /* add m'cast addr */ -#define SIOCDELMULTI _IOW('i', 50, struct ifreq) /* del m'cast addr */ -#define SIOCGIFMTU _IOWR('i', 51, struct ifreq) /* get IF mtu */ -#define SIOCSIFMTU _IOW('i', 52, struct ifreq) /* set IF mtu */ -#define SIOCGIFPHYS _IOWR('i', 53, struct ifreq) /* get IF wire */ -#define SIOCSIFPHYS _IOW('i', 54, struct ifreq) /* set IF wire */ -#define SIOCSIFMEDIA _IOWR('i', 55, struct ifreq) /* set net media */ -#define SIOCGIFMEDIA _IOWR('i', 56, struct ifmediareq) /* get net media */ +#define SIOCGIFNETMASK _IOWR('i', 37, struct ifreq) /* get net addr mask */ +#define SIOCAUTOADDR _IOWR('i', 38, struct ifreq) /* autoconf address */ +#define SIOCAUTONETMASK _IOW('i', 39, struct ifreq) /* autoconf netmask */ +#define SIOCARPIPLL _IOWR('i', 40, struct ifreq) /* arp for IPv4LL address */ + +#define SIOCADDMULTI _IOW('i', 49, struct ifreq) /* add m'cast addr */ +#define SIOCDELMULTI _IOW('i', 50, struct ifreq) /* del m'cast addr */ +#define SIOCGIFMTU _IOWR('i', 51, struct ifreq) /* get IF mtu */ +#define SIOCSIFMTU _IOW('i', 52, struct ifreq) /* set IF mtu */ +#define SIOCGIFPHYS _IOWR('i', 53, struct ifreq) /* get IF wire */ +#define SIOCSIFPHYS _IOW('i', 54, struct ifreq) /* set IF wire */ +#define SIOCSIFMEDIA _IOWR('i', 55, struct ifreq) /* set net media */ + +/* + * The command SIOCGIFMEDIA does not allow a process to access the extended + * media subtype and extended subtype values are returned as IFM_OTHER. + */ +#define SIOCGIFMEDIA _IOWR('i', 56, struct ifmediareq) /* get compatible net media */ #ifdef KERNEL_PRIVATE -#define SIOCGIFMEDIA32 _IOWR('i', 56, struct ifmediareq32) /* get net media */ -#define SIOCGIFMEDIA64 _IOWR('i', 56, struct ifmediareq64) /* get net media (64-bit) */ +#define SIOCGIFMEDIA32 _IOWR('i', 56, struct ifmediareq32) /* get compatible net media (32-bit) */ +#define SIOCGIFMEDIA64 _IOWR('i', 56, struct ifmediareq64) /* get compatible net media (64-bit) */ #endif /* KERNEL_PRIVATE */ -#define SIOCSIFGENERIC _IOW('i', 57, struct ifreq) /* generic IF set op */ -#define SIOCGIFGENERIC _IOWR('i', 58, struct ifreq) /* generic IF get op */ + +#define SIOCSIFGENERIC _IOW('i', 57, struct ifreq) /* generic IF set op */ +#define SIOCGIFGENERIC _IOWR('i', 58, struct ifreq) /* generic IF get op */ #define SIOCRSLVMULTI _IOWR('i', 59, struct rslvmulti_req) -#define SIOCSIFLLADDR _IOW('i', 60, struct ifreq) /* set link level addr */ -#define SIOCGIFSTATUS _IOWR('i', 61, struct ifstat) /* get IF status */ -#define SIOCSIFPHYADDR _IOW('i', 62, struct ifaliasreq) /* set gif addres */ -#define SIOCGIFPSRCADDR _IOWR('i', 63, struct ifreq) /* get gif psrc addr */ -#define SIOCGIFPDSTADDR _IOWR('i', 64, struct ifreq) /* get gif pdst addr */ -#define SIOCDIFPHYADDR _IOW('i', 65, struct ifreq) /* delete gif addrs */ +#define SIOCSIFLLADDR _IOW('i', 60, struct ifreq) /* set link level addr */ +#define SIOCGIFSTATUS _IOWR('i', 61, struct ifstat) /* get IF status */ +#define SIOCSIFPHYADDR _IOW('i', 62, struct ifaliasreq) /* set gif addres */ +#define SIOCGIFPSRCADDR _IOWR('i', 63, struct ifreq) /* get gif psrc addr */ +#define SIOCGIFPDSTADDR _IOWR('i', 64, struct ifreq) /* get gif pdst addr */ +#define SIOCDIFPHYADDR _IOW('i', 65, struct ifreq) /* delete gif addrs */ + +#define SIOCGIFDEVMTU _IOWR('i', 68, struct ifreq) /* get if ifdevmtu */ +#define SIOCSIFALTMTU _IOW('i', 69, struct ifreq) /* set if alternate mtu */ +#define SIOCGIFALTMTU _IOWR('i', 72, struct ifreq) /* get if alternate mtu */ +#define SIOCSIFBOND _IOW('i', 70, struct ifreq) /* set bond if config */ +#define SIOCGIFBOND _IOWR('i', 71, struct ifreq) /* get bond if config */ -#define SIOCGIFDEVMTU _IOWR('i', 68, struct ifreq) /* get if ifdevmtu */ -#define SIOCSIFALTMTU _IOW('i', 69, struct ifreq) /* set if alternate mtu */ -#define SIOCGIFALTMTU _IOWR('i', 72, struct ifreq) /* get if alternate mtu */ -#define SIOCSIFBOND _IOW('i', 70, struct ifreq) /* set bond if config */ -#define SIOCGIFBOND _IOWR('i', 71, struct ifreq) /* get bond if config */ +/* + * The command SIOCGIFXMEDIA is meant to be used by processes only to be able + * to access the extended media subtypes with the extended IFM_TMASK. + * + * An ifnet must not implement SIOCGIFXMEDIA as it gets the extended + * media subtypes by simply compiling with + */ +#define SIOCGIFXMEDIA _IOWR('i', 72, struct ifmediareq) /* get net extended media */ +#ifdef KERNEL_PRIVATE +#define SIOCGIFXMEDIA32 _IOWR('i', 72, struct ifmediareq32) /* get net extended media */ +#define SIOCGIFXMEDIA64 _IOWR('i', 72, struct ifmediareq64) /* get net extended media (64-bit) */ +#endif /* KERNEL_PRIVATE */ #ifdef PRIVATE /* * temporary control calls to attach/detach IP to/from an ethernet interface */ -#define SIOCPROTOATTACH _IOWR('i', 80, struct ifreq) /* attach proto to interface */ -#define SIOCPROTODETACH _IOWR('i', 81, struct ifreq) /* detach proto from interface */ +#define SIOCPROTOATTACH _IOWR('i', 80, struct ifreq) /* attach proto to interface */ +#define SIOCPROTODETACH _IOWR('i', 81, struct ifreq) /* detach proto from interface */ #endif /* PRIVATE */ #define SIOCSIFCAP _IOW('i', 90, struct ifreq) /* set IF features */ #define SIOCGIFCAP _IOWR('i', 91, struct ifreq) /* get IF features */ -#define SIOCIFCREATE _IOWR('i', 120, struct ifreq) /* create clone if */ -#define SIOCIFDESTROY _IOW('i', 121, struct ifreq) /* destroy clone if */ +#define SIOCIFCREATE _IOWR('i', 120, struct ifreq) /* create clone if */ +#define SIOCIFDESTROY _IOW('i', 121, struct ifreq) /* destroy clone if */ #define SIOCIFCREATE2 _IOWR('i', 122, struct ifreq) /* create clone if with data */ #define SIOCSDRVSPEC _IOW('i', 123, struct ifdrv) /* set driver-specific - parameters */ + * parameters */ #define SIOCGDRVSPEC _IOWR('i', 123, struct ifdrv) /* get driver-specific - parameters */ + * parameters */ #ifdef KERNEL_PRIVATE #define SIOCSDRVSPEC32 _IOW('i', 123, struct ifdrv32) /* set driver-specific - parameters */ + * parameters */ #define SIOCGDRVSPEC32 _IOWR('i', 123, struct ifdrv32) /* get driver-specific - parameters */ + * parameters */ #define SIOCSDRVSPEC64 _IOW('i', 123, struct ifdrv64) /* set driver-specific - parameters */ + * parameters */ #define SIOCGDRVSPEC64 _IOWR('i', 123, struct ifdrv64) /* get driver-specific - parameters */ + * parameters */ #endif /* KERNEL_PRIVATE */ -#define SIOCSIFVLAN _IOW('i', 126, struct ifreq) /* set VLAN config */ -#define SIOCGIFVLAN _IOWR('i', 127, struct ifreq) /* get VLAN config */ -#define SIOCSETVLAN SIOCSIFVLAN -#define SIOCGETVLAN SIOCGIFVLAN +#define SIOCSIFVLAN _IOW('i', 126, struct ifreq) /* set VLAN config */ +#define SIOCGIFVLAN _IOWR('i', 127, struct ifreq) /* get VLAN config */ +#define SIOCSETVLAN SIOCSIFVLAN +#define SIOCGETVLAN SIOCGIFVLAN #ifdef KERNEL_PRIVATE -#define SIOCSIFDEVMTU SIOCSIFALTMTU /* deprecated */ +#define SIOCSIFDEVMTU SIOCSIFALTMTU /* deprecated */ #endif /* KERNEL_PRIVATE */ #if !defined(KERNEL) || defined(KERNEL_PRIVATE) -#define SIOCIFGCLONERS _IOWR('i', 129, struct if_clonereq) /* get cloners */ +#define SIOCIFGCLONERS _IOWR('i', 129, struct if_clonereq) /* get cloners */ #endif /* !KERNEL || KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE -#define SIOCIFGCLONERS32 _IOWR('i', 129, struct if_clonereq32) /* get cloners */ -#define SIOCIFGCLONERS64 _IOWR('i', 129, struct if_clonereq64) /* get cloners */ +#define SIOCIFGCLONERS32 _IOWR('i', 129, struct if_clonereq32) /* get cloners */ +#define SIOCIFGCLONERS64 _IOWR('i', 129, struct if_clonereq64) /* get cloners */ #endif /* KERNEL_PRIVATE */ -#define SIOCGIFASYNCMAP _IOWR('i', 124, struct ifreq) /* get ppp asyncmap */ -#define SIOCSIFASYNCMAP _IOW('i', 125, struct ifreq) /* set ppp asyncmap */ +#define SIOCGIFASYNCMAP _IOWR('i', 124, struct ifreq) /* get ppp asyncmap */ +#define SIOCSIFASYNCMAP _IOW('i', 125, struct ifreq) /* set ppp asyncmap */ #ifdef PRIVATE #define SIOCSETOT _IOW('s', 128, int) /* deprecated */ #endif /* PRIVATE */ -#define SIOCGIFMAC _IOWR('i', 130, struct ifreq) /* get IF MAC label */ -#define SIOCSIFMAC _IOW('i', 131, struct ifreq) /* set IF MAC label */ -#define SIOCSIFKPI _IOW('i', 134, struct ifreq) /* set interface kext param - root only */ -#define SIOCGIFKPI _IOWR('i', 135, struct ifreq) /* get interface kext param */ +#define SIOCGIFMAC _IOWR('i', 130, struct ifreq) /* get IF MAC label */ +#define SIOCSIFMAC _IOW('i', 131, struct ifreq) /* set IF MAC label */ +#define SIOCSIFKPI _IOW('i', 134, struct ifreq) /* set interface kext param - root only */ +#define SIOCGIFKPI _IOWR('i', 135, struct ifreq) /* get interface kext param */ -#define SIOCGIFWAKEFLAGS _IOWR('i', 136, struct ifreq) /* get interface wake property flags */ +#define SIOCGIFWAKEFLAGS _IOWR('i', 136, struct ifreq) /* get interface wake property flags */ #ifdef PRIVATE -#define SIOCGIFGETRTREFCNT _IOWR('i', 137, struct ifreq) /* get interface route refcnt */ -#define SIOCGIFLINKQUALITYMETRIC _IOWR('i', 138, struct ifreq) /* get LQM */ -#define SIOCSIFOPPORTUNISTIC _IOWR('i', 139, struct ifreq) /* deprecated; use SIOCSIFTHROTTLE */ -#define SIOCGIFOPPORTUNISTIC _IOWR('i', 140, struct ifreq) /* deprecated; use SIOCGIFTHROTTLE */ -#define SIOCSETROUTERMODE _IOWR('i', 141, struct ifreq) /* enable/disable IPv4 router mode on interface */ -#define SIOCGIFEFLAGS _IOWR('i', 142, struct ifreq) /* get extended ifnet flags */ -#define SIOCSIFDESC _IOWR('i', 143, struct if_descreq) -#define SIOCGIFDESC _IOWR('i', 144, struct if_descreq) -#define SIOCSIFLINKPARAMS _IOWR('i', 145, struct if_linkparamsreq) -#define SIOCGIFLINKPARAMS _IOWR('i', 146, struct if_linkparamsreq) -#define SIOCGIFQUEUESTATS _IOWR('i', 147, struct if_qstatsreq) -#define SIOCSIFTHROTTLE _IOWR('i', 148, struct if_throttlereq) -#define SIOCGIFTHROTTLE _IOWR('i', 149, struct if_throttlereq) - -#define SIOCGASSOCIDS _IOWR('s', 150, struct so_aidreq) /* get associds */ -#define SIOCGCONNIDS _IOWR('s', 151, struct so_cidreq) /* get connids */ -#define SIOCGCONNINFO _IOWR('s', 152, struct so_cinforeq) /* get conninfo */ +#define SIOCGIFGETRTREFCNT _IOWR('i', 137, struct ifreq) /* get interface route refcnt */ +#define SIOCGIFLINKQUALITYMETRIC _IOWR('i', 138, struct ifreq) /* get LQM */ +#define SIOCSIFOPPORTUNISTIC _IOWR('i', 139, struct ifreq) /* deprecated; use SIOCSIFTHROTTLE */ +#define SIOCGIFOPPORTUNISTIC _IOWR('i', 140, struct ifreq) /* deprecated; use SIOCGIFTHROTTLE */ +#define SIOCSETROUTERMODE _IOWR('i', 141, struct ifreq) /* enable/disable IPv4 router mode on interface */ +#define SIOCGIFEFLAGS _IOWR('i', 142, struct ifreq) /* get extended ifnet flags */ +#define SIOCSIFDESC _IOWR('i', 143, struct if_descreq) +#define SIOCGIFDESC _IOWR('i', 144, struct if_descreq) +#define SIOCSIFLINKPARAMS _IOWR('i', 145, struct if_linkparamsreq) +#define SIOCGIFLINKPARAMS _IOWR('i', 146, struct if_linkparamsreq) +#define SIOCGIFQUEUESTATS _IOWR('i', 147, struct if_qstatsreq) +#define SIOCSIFTHROTTLE _IOWR('i', 148, struct if_throttlereq) +#define SIOCGIFTHROTTLE _IOWR('i', 149, struct if_throttlereq) + +#define SIOCGASSOCIDS _IOWR('s', 150, struct so_aidreq) /* get associds */ +#define SIOCGCONNIDS _IOWR('s', 151, struct so_cidreq) /* get connids */ +#define SIOCGCONNINFO _IOWR('s', 152, struct so_cinforeq) /* get conninfo */ #ifdef BSD_KERNEL_PRIVATE -#define SIOCGASSOCIDS32 _IOWR('s', 150, struct so_aidreq32) -#define SIOCGASSOCIDS64 _IOWR('s', 150, struct so_aidreq64) -#define SIOCGCONNIDS32 _IOWR('s', 151, struct so_cidreq32) -#define SIOCGCONNIDS64 _IOWR('s', 151, struct so_cidreq64) -#define SIOCGCONNINFO32 _IOWR('s', 152, struct so_cinforeq32) -#define SIOCGCONNINFO64 _IOWR('s', 152, struct so_cinforeq64) +#define SIOCGASSOCIDS32 _IOWR('s', 150, struct so_aidreq32) +#define SIOCGASSOCIDS64 _IOWR('s', 150, struct so_aidreq64) +#define SIOCGCONNIDS32 _IOWR('s', 151, struct so_cidreq32) +#define SIOCGCONNIDS64 _IOWR('s', 151, struct so_cidreq64) +#define SIOCGCONNINFO32 _IOWR('s', 152, struct so_cinforeq32) +#define SIOCGCONNINFO64 _IOWR('s', 152, struct so_cinforeq64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSCONNORDER _IOWR('s', 153, struct so_cordreq) /* set conn order */ -#define SIOCGCONNORDER _IOWR('s', 154, struct so_cordreq) /* get conn order */ - -#define SIOCSIFLOG _IOWR('i', 155, struct ifreq) -#define SIOCGIFLOG _IOWR('i', 156, struct ifreq) -#define SIOCGIFDELEGATE _IOWR('i', 157, struct ifreq) -#define SIOCGIFLLADDR _IOWR('i', 158, struct ifreq) /* get link level addr */ -#define SIOCGIFTYPE _IOWR('i', 159, struct ifreq) /* get interface type */ -#define SIOCGIFEXPENSIVE _IOWR('i', 160, struct ifreq) /* get interface expensive flag */ -#define SIOCSIFEXPENSIVE _IOWR('i', 161, struct ifreq) /* mark interface expensive */ -#define SIOCGIF2KCL _IOWR('i', 162, struct ifreq) /* interface prefers 2 KB clusters */ -#define SIOCSIF2KCL _IOWR('i', 163, struct ifreq) -#define SIOCGSTARTDELAY _IOWR('i', 164, struct ifreq) - -#define SIOCAIFAGENTID _IOWR('i', 165, struct if_agentidreq) /* Add netagent id */ -#define SIOCDIFAGENTID _IOWR('i', 166, struct if_agentidreq) /* Delete netagent id */ -#define SIOCGIFAGENTIDS _IOWR('i', 167, struct if_agentidsreq) /* Get netagent ids */ -#define SIOCGIFAGENTDATA _IOWR('i', 168, struct netagent_req) /* Get netagent data */ +#define SIOCSCONNORDER _IOWR('s', 153, struct so_cordreq) /* set conn order */ +#define SIOCGCONNORDER _IOWR('s', 154, struct so_cordreq) /* get conn order */ + +#define SIOCSIFLOG _IOWR('i', 155, struct ifreq) +#define SIOCGIFLOG _IOWR('i', 156, struct ifreq) +#define SIOCGIFDELEGATE _IOWR('i', 157, struct ifreq) +#define SIOCGIFLLADDR _IOWR('i', 158, struct ifreq) /* get link level addr */ +#define SIOCGIFTYPE _IOWR('i', 159, struct ifreq) /* get interface type */ +#define SIOCGIFEXPENSIVE _IOWR('i', 160, struct ifreq) /* get interface expensive flag */ +#define SIOCSIFEXPENSIVE _IOWR('i', 161, struct ifreq) /* mark interface expensive */ +#define SIOCGIF2KCL _IOWR('i', 162, struct ifreq) /* interface prefers 2 KB clusters */ +#define SIOCSIF2KCL _IOWR('i', 163, struct ifreq) +#define SIOCGSTARTDELAY _IOWR('i', 164, struct ifreq) + +#define SIOCAIFAGENTID _IOWR('i', 165, struct if_agentidreq) /* Add netagent id */ +#define SIOCDIFAGENTID _IOWR('i', 166, struct if_agentidreq) /* Delete netagent id */ +#define SIOCGIFAGENTIDS _IOWR('i', 167, struct if_agentidsreq) /* Get netagent ids */ +#define SIOCGIFAGENTDATA _IOWR('i', 168, struct netagent_req) /* Get netagent data */ #ifdef BSD_KERNEL_PRIVATE -#define SIOCGIFAGENTIDS32 _IOWR('i', 167, struct if_agentidsreq32) -#define SIOCGIFAGENTIDS64 _IOWR('i', 167, struct if_agentidsreq64) -#define SIOCGIFAGENTDATA32 _IOWR('i', 168, struct netagent_req32) -#define SIOCGIFAGENTDATA64 _IOWR('i', 168, struct netagent_req64) +#define SIOCGIFAGENTIDS32 _IOWR('i', 167, struct if_agentidsreq32) +#define SIOCGIFAGENTIDS64 _IOWR('i', 167, struct if_agentidsreq64) +#define SIOCGIFAGENTDATA32 _IOWR('i', 168, struct netagent_req32) +#define SIOCGIFAGENTDATA64 _IOWR('i', 168, struct netagent_req64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSIFINTERFACESTATE _IOWR('i', 169, struct ifreq) /* set interface state */ -#define SIOCGIFINTERFACESTATE _IOWR('i', 170, struct ifreq) /* get interface state */ -#define SIOCSIFPROBECONNECTIVITY _IOWR('i', 171, struct ifreq) /* Start/Stop probes to check connectivity */ -#define SIOCGIFPROBECONNECTIVITY _IOWR('i', 172, struct ifreq) /* check if connectivity probes are enabled */ +#define SIOCSIFINTERFACESTATE _IOWR('i', 169, struct ifreq) /* set interface state */ +#define SIOCGIFINTERFACESTATE _IOWR('i', 170, struct ifreq) /* get interface state */ +#define SIOCSIFPROBECONNECTIVITY _IOWR('i', 171, struct ifreq) /* Start/Stop probes to check connectivity */ +#define SIOCGIFPROBECONNECTIVITY _IOWR('i', 172, struct ifreq) /* check if connectivity probes are enabled */ #endif /* PRIVATE */ -#define SIOCGIFFUNCTIONALTYPE _IOWR('i', 173, struct ifreq) /* get interface functional type */ +#define SIOCGIFFUNCTIONALTYPE _IOWR('i', 173, struct ifreq) /* get interface functional type */ #ifdef PRIVATE -#define SIOCSIFNETSIGNATURE _IOWR('i', 174, struct if_nsreq) -#define SIOCGIFNETSIGNATURE _IOWR('i', 175, struct if_nsreq) +#define SIOCSIFNETSIGNATURE _IOWR('i', 174, struct if_nsreq) +#define SIOCGIFNETSIGNATURE _IOWR('i', 175, struct if_nsreq) -#define SIOCGECNMODE _IOWR('i', 176, struct ifreq) -#define SIOCSECNMODE _IOW('i', 177, struct ifreq) +#define SIOCGECNMODE _IOWR('i', 176, struct ifreq) +#define SIOCSECNMODE _IOW('i', 177, struct ifreq) -#define SIOCSIFORDER _IOWR('i', 178, struct if_order) +#define SIOCSIFORDER _IOWR('i', 178, struct if_order) -#define SIOCSQOSMARKINGMODE _IOWR('i', 180, struct ifreq) -#define SIOCSFASTLANECAPABLE SIOCSQOSMARKINGMODE -#define SIOCSQOSMARKINGENABLED _IOWR('i', 181, struct ifreq) -#define SIOCSFASTLEENABLED SIOCSQOSMARKINGENABLED -#define SIOCGQOSMARKINGMODE _IOWR('i', 182, struct ifreq) -#define SIOCGQOSMARKINGENABLED _IOWR('i', 183, struct ifreq) +#define SIOCSQOSMARKINGMODE _IOWR('i', 180, struct ifreq) +#define SIOCSFASTLANECAPABLE SIOCSQOSMARKINGMODE +#define SIOCSQOSMARKINGENABLED _IOWR('i', 181, struct ifreq) +#define SIOCSFASTLEENABLED SIOCSQOSMARKINGENABLED +#define SIOCGQOSMARKINGMODE _IOWR('i', 182, struct ifreq) +#define SIOCGQOSMARKINGENABLED _IOWR('i', 183, struct ifreq) -#define SIOCSIFTIMESTAMPENABLE _IOWR('i', 184, struct ifreq) -#define SIOCSIFTIMESTAMPDISABLE _IOWR('i', 185, struct ifreq) -#define SIOCGIFTIMESTAMPENABLED _IOWR('i', 186, struct ifreq) +#define SIOCSIFTIMESTAMPENABLE _IOWR('i', 184, struct ifreq) +#define SIOCSIFTIMESTAMPDISABLE _IOWR('i', 185, struct ifreq) +#define SIOCGIFTIMESTAMPENABLED _IOWR('i', 186, struct ifreq) -#define SIOCSIFDISABLEOUTPUT _IOWR('i', 187, struct ifreq) +#define SIOCSIFDISABLEOUTPUT _IOWR('i', 187, struct ifreq) -#define SIOCGIFAGENTLIST _IOWR('i', 190, struct netagentlist_req) /* Get netagent dump */ +#define SIOCGIFAGENTLIST _IOWR('i', 190, struct netagentlist_req) /* Get netagent dump */ #ifdef BSD_KERNEL_PRIVATE -#define SIOCGIFAGENTLIST32 _IOWR('i', 190, struct netagentlist_req32) -#define SIOCGIFAGENTLIST64 _IOWR('i', 190, struct netagentlist_req64) +#define SIOCGIFAGENTLIST32 _IOWR('i', 190, struct netagentlist_req32) +#define SIOCGIFAGENTLIST64 _IOWR('i', 190, struct netagentlist_req64) #endif /* BSD_KERNEL_PRIVATE */ -#define SIOCSIFLOWINTERNET _IOWR('i', 191, struct ifreq) -#define SIOCGIFLOWINTERNET _IOWR('i', 192, struct ifreq) +#define SIOCSIFLOWINTERNET _IOWR('i', 191, struct ifreq) +#define SIOCGIFLOWINTERNET _IOWR('i', 192, struct ifreq) #if INET6 -#define SIOCGIFNAT64PREFIX _IOWR('i', 193, struct if_nat64req) -#define SIOCSIFNAT64PREFIX _IOWR('i', 194, struct if_nat64req) +#define SIOCGIFNAT64PREFIX _IOWR('i', 193, struct if_nat64req) +#define SIOCSIFNAT64PREFIX _IOWR('i', 194, struct if_nat64req) #endif -#define SIOCGIFNEXUS _IOWR('i', 195, struct if_nexusreq) -#define SIOCGIFPROTOLIST _IOWR('i', 196, struct if_protolistreq) /* get list of attached protocols */ +#define SIOCGIFNEXUS _IOWR('i', 195, struct if_nexusreq) +#define SIOCGIFPROTOLIST _IOWR('i', 196, struct if_protolistreq) /* get list of attached protocols */ #ifdef BSD_KERNEL_PRIVATE -#define SIOCGIFPROTOLIST32 _IOWR('i', 196, struct if_protolistreq32) -#define SIOCGIFPROTOLIST64 _IOWR('i', 196, struct if_protolistreq64) +#define SIOCGIFPROTOLIST32 _IOWR('i', 196, struct if_protolistreq32) +#define SIOCGIFPROTOLIST64 _IOWR('i', 196, struct if_protolistreq64) #endif /* BSD_KERNEL_PRIVATE */ #endif /* PRIVATE */ #ifdef PRIVATE -#define SIOCGIFLOWPOWER _IOWR('i', 199, struct ifreq) /* Low Power Mode */ -#define SIOCSIFLOWPOWER _IOWR('i', 200, struct ifreq) /* Low Power Mode */ +#define SIOCGIFLOWPOWER _IOWR('i', 199, struct ifreq) /* Low Power Mode */ +#define SIOCSIFLOWPOWER _IOWR('i', 200, struct ifreq) /* Low Power Mode */ #if INET6 -#define SIOCGIFCLAT46ADDR _IOWR('i', 201, struct if_clat46req) +#define SIOCGIFCLAT46ADDR _IOWR('i', 201, struct if_clat46req) #endif /* INET6 */ #endif /* PRIVATE */ diff --git a/bsd/sys/spawn.h b/bsd/sys/spawn.h index bcf1d6367..790d9c47a 100644 --- a/bsd/sys/spawn.h +++ b/bsd/sys/spawn.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,41 +34,41 @@ * manifest constants, at the current time. */ -#ifndef _SYS_SPAWN_H_ -#define _SYS_SPAWN_H_ +#ifndef _SYS_SPAWN_H_ +#define _SYS_SPAWN_H_ /* * Possible bit values which may be OR'ed together and provided as the second * parameter to posix_spawnattr_setflags() or implicit returned in the value of * the second parameter to posix_spawnattr_getflags(). */ -#define POSIX_SPAWN_RESETIDS 0x0001 /* [SPN] R[UG]ID not E[UG]ID */ -#define POSIX_SPAWN_SETPGROUP 0x0002 /* [SPN] set non-parent PGID */ -#define POSIX_SPAWN_SETSIGDEF 0x0004 /* [SPN] reset sigset default */ -#define POSIX_SPAWN_SETSIGMASK 0x0008 /* [SPN] set signal mask */ +#define POSIX_SPAWN_RESETIDS 0x0001 /* [SPN] R[UG]ID not E[UG]ID */ +#define POSIX_SPAWN_SETPGROUP 0x0002 /* [SPN] set non-parent PGID */ +#define POSIX_SPAWN_SETSIGDEF 0x0004 /* [SPN] reset sigset default */ +#define POSIX_SPAWN_SETSIGMASK 0x0008 /* [SPN] set signal mask */ -#if 0 /* _POSIX_PRIORITY_SCHEDULING [PS] : not supported */ -#define POSIX_SPAWN_SETSCHEDPARAM 0x0010 -#define POSIX_SPAWN_SETSCHEDULER 0x0020 -#endif /* 0 */ +#if 0 /* _POSIX_PRIORITY_SCHEDULING [PS] : not supported */ +#define POSIX_SPAWN_SETSCHEDPARAM 0x0010 +#define POSIX_SPAWN_SETSCHEDULER 0x0020 +#endif /* 0 */ -#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) +#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* * Darwin-specific flags */ -#define POSIX_SPAWN_SETEXEC 0x0040 -#define POSIX_SPAWN_START_SUSPENDED 0x0080 -#ifdef PRIVATE -#define _POSIX_SPAWN_DISABLE_ASLR 0x0100 +#define POSIX_SPAWN_SETEXEC 0x0040 +#define POSIX_SPAWN_START_SUSPENDED 0x0080 +#ifdef PRIVATE +#define _POSIX_SPAWN_DISABLE_ASLR 0x0100 #define _POSIX_SPAWN_NANO_ALLOCATOR 0x0200 /* unused 0x0400 */ /* unused 0x0800 */ /* unused 0x1000 */ -#define _POSIX_SPAWN_ALLOW_DATA_EXEC 0x2000 -#endif /* PRIVATE */ -#define POSIX_SPAWN_CLOEXEC_DEFAULT 0x4000 +#define _POSIX_SPAWN_ALLOW_DATA_EXEC 0x2000 +#endif /* PRIVATE */ +#define POSIX_SPAWN_CLOEXEC_DEFAULT 0x4000 #ifdef PRIVATE -#define _POSIX_SPAWN_HIGH_BITS_ASLR 0x8000 +#define _POSIX_SPAWN_HIGH_BITS_ASLR 0x8000 #endif /* PRIVATE */ /* @@ -77,11 +77,11 @@ * POSIX_SPAWN_PCONTROL_SUSPEND indicates that the process is to be suspended on starvation. * POSIX_SPAWN_PCONTROL_KILL indicates that the process is to be terminated on starvation. */ -#define POSIX_SPAWN_PCONTROL_NONE 0x0000 -#define POSIX_SPAWN_PCONTROL_THROTTLE 0x0001 -#define POSIX_SPAWN_PCONTROL_SUSPEND 0x0002 -#define POSIX_SPAWN_PCONTROL_KILL 0x0003 +#define POSIX_SPAWN_PCONTROL_NONE 0x0000 +#define POSIX_SPAWN_PCONTROL_THROTTLE 0x0001 +#define POSIX_SPAWN_PCONTROL_SUSPEND 0x0002 +#define POSIX_SPAWN_PCONTROL_KILL 0x0003 #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#endif /* _SYS_SPAWN_H_ */ +#endif /* _SYS_SPAWN_H_ */ diff --git a/bsd/sys/spawn_internal.h b/bsd/sys/spawn_internal.h index 069897d1b..64877ea3d 100644 --- a/bsd/sys/spawn_internal.h +++ b/bsd/sys/spawn_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,17 +37,17 @@ * their code because of structure size changes or data reorganization. */ -#ifndef _SYS_SPAWN_INTERNAL_H_ -#define _SYS_SPAWN_INTERNAL_H_ +#ifndef _SYS_SPAWN_INTERNAL_H_ +#define _SYS_SPAWN_INTERNAL_H_ -#include /* __offsetof(), __darwin_size_t */ +#include /* __offsetof(), __darwin_size_t */ #include -#include /* PATH_MAX */ +#include /* PATH_MAX */ #include #include #include #include -#include /* COALITION_NUM_TYPES */ +#include /* COALITION_NUM_TYPES */ #include /* @@ -56,7 +56,7 @@ * * If the size calculation overflows a size_t value, this macro returns 0. */ -#define PS_ACTION_SIZE(x,_type,_member_type) ({ \ +#define PS_ACTION_SIZE(x, _type, _member_type) ({\ size_t _ps_count = (size_t)x; \ size_t _ps_size = 0; \ /* (count * sizeof(_member_type)) + sizeof(_type) */ \ @@ -64,7 +64,7 @@ sizeof(_member_type), \ sizeof(_type), \ &_ps_size)) { \ - _ps_size = 0; \ + _ps_size = 0; \ } \ _ps_size; }) @@ -84,49 +84,49 @@ typedef enum { * but could be extended to other inheritable port types. */ typedef struct _ps_port_action { - pspa_t port_type; - exception_mask_t mask; - mach_port_name_t new_port; - exception_behavior_t behavior; - thread_state_flavor_t flavor; - int which; + pspa_t port_type; + exception_mask_t mask; + mach_port_name_t new_port; + exception_behavior_t behavior; + thread_state_flavor_t flavor; + int which; } _ps_port_action_t; /* * A collection of port actions to take on the newly spawned process. */ typedef struct _posix_spawn_port_actions { - int pspa_alloc; - int pspa_count; - _ps_port_action_t pspa_actions[]; + int pspa_alloc; + int pspa_count; + _ps_port_action_t pspa_actions[]; } *_posix_spawn_port_actions_t; /* * Returns size in bytes of a _posix_spawn_port_actions holding x elements. */ -#define PS_PORT_ACTIONS_SIZE(x) \ +#define PS_PORT_ACTIONS_SIZE(x) \ PS_ACTION_SIZE(x, struct _posix_spawn_port_actions, _ps_port_action_t) -#define NBINPREFS 4 +#define NBINPREFS 4 /* * Mapping of opaque data pointer to a MAC policy (specified by name). */ typedef struct _ps_mac_policy_extension { - char policyname[128]; + char policyname[128]; union { - uint64_t data; - void *datap; /* pointer in kernel memory */ + uint64_t data; + void *datap; /* pointer in kernel memory */ }; - uint64_t datalen; + uint64_t datalen; } _ps_mac_policy_extension_t; /* * A collection of extra data passed to MAC policies for the newly spawned process. */ typedef struct _posix_spawn_mac_policy_extensions { - int psmx_alloc; - int psmx_count; + int psmx_alloc; + int psmx_count; _ps_mac_policy_extension_t psmx_extensions[]; } *_posix_spawn_mac_policy_extensions_t; @@ -136,7 +136,7 @@ typedef struct _posix_spawn_mac_policy_extensions { #define PS_MAC_EXTENSIONS_SIZE(x) \ PS_ACTION_SIZE(x, struct _posix_spawn_mac_policy_extensions, _ps_mac_policy_extension_t) -#define PS_MAC_EXTENSIONS_INIT_COUNT 2 +#define PS_MAC_EXTENSIONS_INIT_COUNT 2 /* * Coalition posix spawn attributes @@ -188,27 +188,27 @@ struct _posix_spawn_persona_info { */ typedef struct _posix_spawnattr { - short psa_flags; /* spawn attribute flags */ - short flags_padding; /* get the flags to be int aligned */ - sigset_t psa_sigdefault; /* signal set to default */ - sigset_t psa_sigmask; /* signal set to mask */ - pid_t psa_pgroup; /* pgroup to spawn into */ - cpu_type_t psa_binprefs[NBINPREFS]; /* cpu affinity prefs*/ - int psa_pcontrol; /* process control bits on resource starvation */ - int psa_apptype; /* app type and process spec behav */ - uint64_t psa_cpumonitor_percent; /* CPU usage monitor percentage */ - uint64_t psa_cpumonitor_interval; /* CPU usage monitor interval, in seconds */ - uint64_t psa_reserved; - - short psa_jetsam_flags; /* jetsam flags */ - short short_padding; /* Padding for alignment issues */ - int psa_priority; /* jetsam relative importance */ - int psa_memlimit_active; /* jetsam memory limit (in MB) when process is active */ - int psa_memlimit_inactive; /* jetsam memory limit (in MB) when process is inactive */ + short psa_flags; /* spawn attribute flags */ + short flags_padding; /* get the flags to be int aligned */ + sigset_t psa_sigdefault; /* signal set to default */ + sigset_t psa_sigmask; /* signal set to mask */ + pid_t psa_pgroup; /* pgroup to spawn into */ + cpu_type_t psa_binprefs[NBINPREFS]; /* cpu affinity prefs*/ + int psa_pcontrol; /* process control bits on resource starvation */ + int psa_apptype; /* app type and process spec behav */ + uint64_t psa_cpumonitor_percent; /* CPU usage monitor percentage */ + uint64_t psa_cpumonitor_interval; /* CPU usage monitor interval, in seconds */ + uint64_t psa_reserved; + + short psa_jetsam_flags; /* jetsam flags */ + short short_padding; /* Padding for alignment issues */ + int psa_priority; /* jetsam relative importance */ + int psa_memlimit_active; /* jetsam memory limit (in MB) when process is active */ + int psa_memlimit_inactive; /* jetsam memory limit (in MB) when process is inactive */ uint64_t psa_qos_clamp; /* QoS Clamp to set on the new process */ uint64_t psa_darwin_role; /* PRIO_DARWIN_ROLE to set on the new process */ - int psa_thread_limit; /* thread limit */ + int psa_thread_limit; /* thread limit */ uint64_t psa_max_addr; /* Max valid VM address */ @@ -217,7 +217,7 @@ typedef struct _posix_spawnattr { * everything above this point stays the same size on different bitnesses * see */ - _posix_spawn_port_actions_t psa_ports; /* special/exception ports */ + _posix_spawn_port_actions_t psa_ports; /* special/exception ports */ _posix_spawn_mac_policy_extensions_t psa_mac_extensions; /* MAC policy-specific extensions. */ struct _posix_spawn_coalition_info *psa_coalition_info; /* coalition info */ struct _posix_spawn_persona_info *psa_persona_info; /* spawn new process into given persona */ @@ -226,22 +226,22 @@ typedef struct _posix_spawnattr { /* * Jetsam flags eg: psa_jetsam_flags */ -#define POSIX_SPAWN_JETSAM_SET 0x8000 +#define POSIX_SPAWN_JETSAM_SET 0x8000 -#define POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY 0x01 -#define POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND 0x02 /* to be deprecated */ -#define POSIX_SPAWN_JETSAM_MEMLIMIT_FATAL 0x04 /* to be deprecated */ +#define POSIX_SPAWN_JETSAM_USE_EFFECTIVE_PRIORITY 0x01 +#define POSIX_SPAWN_JETSAM_HIWATER_BACKGROUND 0x02 /* to be deprecated */ +#define POSIX_SPAWN_JETSAM_MEMLIMIT_FATAL 0x04 /* to be deprecated */ /* * Additional flags available for use with * the posix_spawnattr_setjetsam_ext() call */ -#define POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL 0x04 /* if set, limit is fatal when the process is active */ -#define POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL 0x08 /* if set, limit is fatal when the process is inactive */ +#define POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL 0x04 /* if set, limit is fatal when the process is active */ +#define POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL 0x08 /* if set, limit is fatal when the process is inactive */ /* * Deprecated posix_spawn psa_flags values - * + * * POSIX_SPAWN_OSX_TALAPP_START 0x0400 * POSIX_SPAWN_IOS_RESV1_APP_START 0x0400 * POSIX_SPAWN_IOS_APPLE_DAEMON_START 0x0800 @@ -321,12 +321,12 @@ typedef enum { * XXX: Currently overloading psfao_oflag for PSFA_DUP2 */ typedef struct _psfa_action { - psfa_t psfaa_type; /* file action type */ - int psfaa_filedes; /* fd to operate on */ + psfa_t psfaa_type; /* file action type */ + int psfaa_filedes; /* fd to operate on */ struct _psfaa_open { - int psfao_oflag; /* open flags to use */ - mode_t psfao_mode; /* mode for open */ - char psfao_path[PATH_MAX]; /* path to open */ + int psfao_oflag; /* open flags to use */ + mode_t psfao_mode; /* mode for open */ + char psfao_path[PATH_MAX]; /* path to open */ } psfaa_openargs; } _psfa_action_t; @@ -349,16 +349,16 @@ typedef struct _psfa_action { * for 32 vs. 64 bt programming SPIs. */ typedef struct _posix_spawn_file_actions { - int psfa_act_alloc; /* available actions space */ - int psfa_act_count; /* count of defined actions */ - _psfa_action_t psfa_act_acts[]; /* actions array (uses c99) */ + int psfa_act_alloc; /* available actions space */ + int psfa_act_count; /* count of defined actions */ + _psfa_action_t psfa_act_acts[]; /* actions array (uses c99) */ } *_posix_spawn_file_actions_t; /* * Calculate the size of a structure, given the number of elements that it is * capable of containing. */ -#define PSF_ACTIONS_SIZE(x) \ +#define PSF_ACTIONS_SIZE(x) \ PS_ACTION_SIZE(x, struct _posix_spawn_file_actions, _psfa_action_t) /* @@ -366,7 +366,7 @@ typedef struct _posix_spawn_file_actions { * first allocated; this should be non-zero, since we expect that one would not * have been allocated unless there was an intent to use it. */ -#define PSF_ACTIONS_INIT_COUNT 2 +#define PSF_ACTIONS_INIT_COUNT 2 /* * Structure defining the true third argument to the posix_spawn() system call @@ -376,20 +376,20 @@ typedef struct _posix_spawn_file_actions { * performance optimization. */ struct _posix_spawn_args_desc { - __darwin_size_t attr_size; /* size of attributes block */ - _posix_spawnattr_t attrp; /* pointer to block */ - __darwin_size_t file_actions_size; /* size of file actions block */ + __darwin_size_t attr_size; /* size of attributes block */ + _posix_spawnattr_t attrp; /* pointer to block */ + __darwin_size_t file_actions_size; /* size of file actions block */ _posix_spawn_file_actions_t - file_actions; /* pointer to block */ - __darwin_size_t port_actions_size; /* size of port actions block */ + file_actions; /* pointer to block */ + __darwin_size_t port_actions_size; /* size of port actions block */ _posix_spawn_port_actions_t - port_actions; /* pointer to port block */ + port_actions; /* pointer to port block */ __darwin_size_t mac_extensions_size; _posix_spawn_mac_policy_extensions_t - mac_extensions; /* pointer to policy-specific - * attributes */ + mac_extensions; /* pointer to policy-specific + * attributes */ __darwin_size_t coal_info_size; - struct _posix_spawn_coalition_info *coal_info; /* pointer to coalition info */ + struct _posix_spawn_coalition_info *coal_info; /* pointer to coalition info */ __darwin_size_t persona_info_size; struct _posix_spawn_persona_info *persona_info; @@ -404,33 +404,33 @@ struct _posix_spawn_args_desc { #endif struct user32__posix_spawn_args_desc { - uint32_t attr_size; /* size of attributes block */ - uint32_t attrp; /* pointer to block */ - uint32_t file_actions_size; /* size of file actions block */ - uint32_t file_actions; /* pointer to block */ - uint32_t port_actions_size; /* size of port actions block */ - uint32_t port_actions; /* pointer to block */ - uint32_t mac_extensions_size; - uint32_t mac_extensions; - uint32_t coal_info_size; - uint32_t coal_info; - uint32_t persona_info_size; - uint32_t persona_info; + uint32_t attr_size; /* size of attributes block */ + uint32_t attrp; /* pointer to block */ + uint32_t file_actions_size; /* size of file actions block */ + uint32_t file_actions; /* pointer to block */ + uint32_t port_actions_size; /* size of port actions block */ + uint32_t port_actions; /* pointer to block */ + uint32_t mac_extensions_size; + uint32_t mac_extensions; + uint32_t coal_info_size; + uint32_t coal_info; + uint32_t persona_info_size; + uint32_t persona_info; }; struct user__posix_spawn_args_desc { - user_size_t attr_size; /* size of attributes block */ - user_addr_t attrp; /* pointer to block */ - user_size_t file_actions_size; /* size of file actions block */ - user_addr_t file_actions; /* pointer to block */ - user_size_t port_actions_size; /* size of port actions block */ - user_addr_t port_actions; /* pointer to block */ - user_size_t mac_extensions_size; /* size of MAC-specific attrs. */ - user_addr_t mac_extensions; /* pointer to block */ - user_size_t coal_info_size; - user_addr_t coal_info; - user_size_t persona_info_size; - user_addr_t persona_info; + user_size_t attr_size; /* size of attributes block */ + user_addr_t attrp; /* pointer to block */ + user_size_t file_actions_size; /* size of file actions block */ + user_addr_t file_actions; /* pointer to block */ + user_size_t port_actions_size; /* size of port actions block */ + user_addr_t port_actions; /* pointer to block */ + user_size_t mac_extensions_size; /* size of MAC-specific attrs. */ + user_addr_t mac_extensions; /* pointer to block */ + user_size_t coal_info_size; + user_addr_t coal_info; + user_size_t persona_info_size; + user_addr_t persona_info; }; @@ -438,7 +438,7 @@ struct user__posix_spawn_args_desc { #pragma options align=reset #endif -#endif /* __APPLE_API_PRIVATE */ -#endif /* KERNEL */ - -#endif /* _SYS_SPAWN_INTERNAL_H_ */ +#endif /* __APPLE_API_PRIVATE */ +#endif /* KERNEL */ + +#endif /* _SYS_SPAWN_INTERNAL_H_ */ diff --git a/bsd/sys/stackshot.h b/bsd/sys/stackshot.h index dcd24eada..b5c00135c 100644 --- a/bsd/sys/stackshot.h +++ b/bsd/sys/stackshot.h @@ -35,17 +35,17 @@ typedef struct stackshot_config { /* Input options */ - int sc_pid; /* PID to trace, or -1 for the entire system */ - uint32_t sc_flags; /* Stackshot flags */ - uint64_t sc_delta_timestamp; /* Retrieve a delta stackshot of system state that has changed since this time */ + int sc_pid; /* PID to trace, or -1 for the entire system */ + uint32_t sc_flags; /* Stackshot flags */ + uint64_t sc_delta_timestamp; /* Retrieve a delta stackshot of system state that has changed since this time */ /* Stackshot results */ - uint64_t sc_buffer; /* Pointer to stackshot buffer */ - uint32_t sc_size; /* Length of the stackshot buffer */ + uint64_t sc_buffer; /* Pointer to stackshot buffer */ + uint32_t sc_size; /* Length of the stackshot buffer */ /* Internals */ - uint64_t sc_out_buffer_addr; /* Location where the kernel should copy the address of the newly mapped buffer in user space */ - uint64_t sc_out_size_addr; /* Location where the kernel should copy the size of the stackshot buffer */ + uint64_t sc_out_buffer_addr; /* Location where the kernel should copy the address of the newly mapped buffer in user space */ + uint64_t sc_out_size_addr; /* Location where the kernel should copy the size of the stackshot buffer */ } stackshot_config_t; #ifndef KERNEL diff --git a/bsd/sys/stat.h b/bsd/sys/stat.h index 1169924d5..b5f73326a 100644 --- a/bsd/sys/stat.h +++ b/bsd/sys/stat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -68,7 +68,7 @@ #ifndef _SYS_STAT_H_ -#define _SYS_STAT_H_ +#define _SYS_STAT_H_ #include #include @@ -92,7 +92,7 @@ */ #include #include -#include /* device number */ +#include /* device number */ #include #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -116,40 +116,40 @@ * vnode_internal.h). */ struct ostat { - __uint16_t st_dev; /* inode's device */ - ino_t st_ino; /* inode's number */ - mode_t st_mode; /* inode protection mode */ - nlink_t st_nlink; /* number of hard links */ - __uint16_t st_uid; /* user ID of the file's owner */ - __uint16_t st_gid; /* group ID of the file's group */ - __uint16_t st_rdev; /* device type */ - __int32_t st_size; /* file size, in bytes */ - struct timespec st_atimespec; /* time of last access */ - struct timespec st_mtimespec; /* time of last data modification */ - struct timespec st_ctimespec; /* time of last file status change */ - __int32_t st_blksize; /* optimal blocksize for I/O */ - __int32_t st_blocks; /* blocks allocated for file */ - __uint32_t st_flags; /* user defined flags for file */ - __uint32_t st_gen; /* file generation number */ + __uint16_t st_dev; /* inode's device */ + ino_t st_ino; /* inode's number */ + mode_t st_mode; /* inode protection mode */ + nlink_t st_nlink; /* number of hard links */ + __uint16_t st_uid; /* user ID of the file's owner */ + __uint16_t st_gid; /* group ID of the file's group */ + __uint16_t st_rdev; /* device type */ + __int32_t st_size; /* file size, in bytes */ + struct timespec st_atimespec; /* time of last access */ + struct timespec st_mtimespec; /* time of last data modification */ + struct timespec st_ctimespec; /* time of last file status change */ + __int32_t st_blksize; /* optimal blocksize for I/O */ + __int32_t st_blocks; /* blocks allocated for file */ + __uint32_t st_flags; /* user defined flags for file */ + __uint32_t st_gen; /* file generation number */ }; #define __DARWIN_STRUCT_STAT64_TIMES \ - struct timespec st_atimespec; /* time of last access */ \ - struct timespec st_mtimespec; /* time of last data modification */ \ - struct timespec st_ctimespec; /* time of last status change */ \ - struct timespec st_birthtimespec; /* time of file creation(birth) */ + struct timespec st_atimespec; /* time of last access */ \ + struct timespec st_mtimespec; /* time of last data modification */ \ + struct timespec st_ctimespec; /* time of last status change */ \ + struct timespec st_birthtimespec; /* time of file creation(birth) */ #else /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ #define __DARWIN_STRUCT_STAT64_TIMES \ - time_t st_atime; /* [XSI] Time of last access */ \ - long st_atimensec; /* nsec of last access */ \ - time_t st_mtime; /* [XSI] Last data modification time */ \ - long st_mtimensec; /* last data modification nsec */ \ - time_t st_ctime; /* [XSI] Time of last status change */ \ - long st_ctimensec; /* nsec of last status change */ \ - time_t st_birthtime; /* File creation time(birth) */ \ - long st_birthtimensec; /* nsec of File creation time */ + time_t st_atime; /* [XSI] Time of last access */ \ + long st_atimensec; /* nsec of last access */ \ + time_t st_mtime; /* [XSI] Last data modification time */ \ + long st_mtimensec; /* last data modification nsec */ \ + time_t st_ctime; /* [XSI] Time of last status change */ \ + long st_ctimensec; /* nsec of last status change */ \ + time_t st_birthtime; /* File creation time(birth) */ \ + long st_birthtimensec; /* nsec of File creation time */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -164,21 +164,21 @@ struct ostat { * number instead of 32bit ino_t and the addition of create(birth) time. */ #define __DARWIN_STRUCT_STAT64 { \ - dev_t st_dev; /* [XSI] ID of device containing file */ \ - mode_t st_mode; /* [XSI] Mode of file (see below) */ \ - nlink_t st_nlink; /* [XSI] Number of hard links */ \ - __darwin_ino64_t st_ino; /* [XSI] File serial number */ \ - uid_t st_uid; /* [XSI] User ID of the file */ \ - gid_t st_gid; /* [XSI] Group ID of the file */ \ - dev_t st_rdev; /* [XSI] Device ID */ \ + dev_t st_dev; /* [XSI] ID of device containing file */ \ + mode_t st_mode; /* [XSI] Mode of file (see below) */ \ + nlink_t st_nlink; /* [XSI] Number of hard links */ \ + __darwin_ino64_t st_ino; /* [XSI] File serial number */ \ + uid_t st_uid; /* [XSI] User ID of the file */ \ + gid_t st_gid; /* [XSI] Group ID of the file */ \ + dev_t st_rdev; /* [XSI] Device ID */ \ __DARWIN_STRUCT_STAT64_TIMES \ - off_t st_size; /* [XSI] file size, in bytes */ \ - blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ \ - blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ \ - __uint32_t st_flags; /* user defined flags for file */ \ - __uint32_t st_gen; /* file generation number */ \ - __int32_t st_lspare; /* RESERVED: DO NOT USE! */ \ - __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ \ + off_t st_size; /* [XSI] file size, in bytes */ \ + blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ \ + blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ \ + __uint32_t st_flags; /* user defined flags for file */ \ + __uint32_t st_gen; /* file generation number */ \ + __int32_t st_lspare; /* RESERVED: DO NOT USE! */ \ + __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ \ } /* @@ -192,32 +192,32 @@ struct stat __DARWIN_STRUCT_STAT64; #else /* !__DARWIN_64_BIT_INO_T */ struct stat { - dev_t st_dev; /* [XSI] ID of device containing file */ - ino_t st_ino; /* [XSI] File serial number */ - mode_t st_mode; /* [XSI] Mode of file (see below) */ - nlink_t st_nlink; /* [XSI] Number of hard links */ - uid_t st_uid; /* [XSI] User ID of the file */ - gid_t st_gid; /* [XSI] Group ID of the file */ - dev_t st_rdev; /* [XSI] Device ID */ + dev_t st_dev; /* [XSI] ID of device containing file */ + ino_t st_ino; /* [XSI] File serial number */ + mode_t st_mode; /* [XSI] Mode of file (see below) */ + nlink_t st_nlink; /* [XSI] Number of hard links */ + uid_t st_uid; /* [XSI] User ID of the file */ + gid_t st_gid; /* [XSI] Group ID of the file */ + dev_t st_rdev; /* [XSI] Device ID */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) - struct timespec st_atimespec; /* time of last access */ - struct timespec st_mtimespec; /* time of last data modification */ - struct timespec st_ctimespec; /* time of last status change */ + struct timespec st_atimespec; /* time of last access */ + struct timespec st_mtimespec; /* time of last data modification */ + struct timespec st_ctimespec; /* time of last status change */ #else - time_t st_atime; /* [XSI] Time of last access */ - long st_atimensec; /* nsec of last access */ - time_t st_mtime; /* [XSI] Last data modification time */ - long st_mtimensec; /* last data modification nsec */ - time_t st_ctime; /* [XSI] Time of last status change */ - long st_ctimensec; /* nsec of last status change */ + time_t st_atime; /* [XSI] Time of last access */ + long st_atimensec; /* nsec of last access */ + time_t st_mtime; /* [XSI] Last data modification time */ + long st_mtimensec; /* last data modification nsec */ + time_t st_ctime; /* [XSI] Time of last status change */ + long st_ctimensec; /* nsec of last status change */ #endif - off_t st_size; /* [XSI] file size, in bytes */ - blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ - blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ - __uint32_t st_flags; /* user defined flags for file */ - __uint32_t st_gen; /* file generation number */ - __int32_t st_lspare; /* RESERVED: DO NOT USE! */ - __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ + off_t st_size; /* [XSI] file size, in bytes */ + blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ + blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ + __uint32_t st_flags; /* user defined flags for file */ + __uint32_t st_gen; /* file generation number */ + __int32_t st_lspare; /* RESERVED: DO NOT USE! */ + __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ }; #endif /* __DARWIN_64_BIT_INO_T */ @@ -235,38 +235,38 @@ struct stat64 __DARWIN_STRUCT_STAT64; #ifdef KERNEL #ifdef BSD_KERNEL_PRIVATE -/* LP64 version of struct stat. time_t (see timespec) is a long and must +/* LP64 version of struct stat. time_t (see timespec) is a long and must * grow when we're dealing with a 64-bit process. * WARNING - keep in sync with struct stat */ struct user64_stat { - dev_t st_dev; /* [XSI] ID of device containing file */ - ino_t st_ino; /* [XSI] File serial number */ - mode_t st_mode; /* [XSI] Mode of file (see below) */ - nlink_t st_nlink; /* [XSI] Number of hard links */ - uid_t st_uid; /* [XSI] User ID of the file */ - gid_t st_gid; /* [XSI] Group ID of the file */ - dev_t st_rdev; /* [XSI] Device ID */ + dev_t st_dev; /* [XSI] ID of device containing file */ + ino_t st_ino; /* [XSI] File serial number */ + mode_t st_mode; /* [XSI] Mode of file (see below) */ + nlink_t st_nlink; /* [XSI] Number of hard links */ + uid_t st_uid; /* [XSI] User ID of the file */ + gid_t st_gid; /* [XSI] Group ID of the file */ + dev_t st_rdev; /* [XSI] Device ID */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) - struct user64_timespec st_atimespec; /* time of last access */ - struct user64_timespec st_mtimespec; /* time of last data modification */ - struct user64_timespec st_ctimespec; /* time of last status change */ + struct user64_timespec st_atimespec; /* time of last access */ + struct user64_timespec st_mtimespec; /* time of last data modification */ + struct user64_timespec st_ctimespec; /* time of last status change */ #else - user64_time_t st_atime; /* [XSI] Time of last access */ - user64_long_t st_atimensec; /* nsec of last access */ - user64_time_t st_mtime; /* [XSI] Last data modification */ - user64_long_t st_mtimensec; /* last data modification nsec */ - user64_time_t st_ctime; /* [XSI] Time of last status change */ - user64_long_t st_ctimensec; /* nsec of last status change */ + user64_time_t st_atime; /* [XSI] Time of last access */ + user64_long_t st_atimensec; /* nsec of last access */ + user64_time_t st_mtime; /* [XSI] Last data modification */ + user64_long_t st_mtimensec; /* last data modification nsec */ + user64_time_t st_ctime; /* [XSI] Time of last status change */ + user64_long_t st_ctimensec; /* nsec of last status change */ #endif - off_t st_size; /* [XSI] File size, in bytes */ - blkcnt_t st_blocks; /* [XSI] Blocks allocated for file */ - blksize_t st_blksize; /* [XSI] Optimal blocksize for I/O */ - __uint32_t st_flags; /* user defined flags for file */ - __uint32_t st_gen; /* file generation number */ - __int32_t st_lspare; /* RESERVED: DO NOT USE! */ - __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ + off_t st_size; /* [XSI] File size, in bytes */ + blkcnt_t st_blocks; /* [XSI] Blocks allocated for file */ + blksize_t st_blksize; /* [XSI] Optimal blocksize for I/O */ + __uint32_t st_flags; /* user defined flags for file */ + __uint32_t st_gen; /* file generation number */ + __int32_t st_lspare; /* RESERVED: DO NOT USE! */ + __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ }; /* ILP32 version of struct stat. @@ -274,32 +274,32 @@ struct user64_stat { */ struct user32_stat { - dev_t st_dev; /* [XSI] ID of device containing file */ - ino_t st_ino; /* [XSI] File serial number */ - mode_t st_mode; /* [XSI] Mode of file (see below) */ - nlink_t st_nlink; /* [XSI] Number of hard links */ - uid_t st_uid; /* [XSI] User ID of the file */ - gid_t st_gid; /* [XSI] Group ID of the file */ - dev_t st_rdev; /* [XSI] Device ID */ + dev_t st_dev; /* [XSI] ID of device containing file */ + ino_t st_ino; /* [XSI] File serial number */ + mode_t st_mode; /* [XSI] Mode of file (see below) */ + nlink_t st_nlink; /* [XSI] Number of hard links */ + uid_t st_uid; /* [XSI] User ID of the file */ + gid_t st_gid; /* [XSI] Group ID of the file */ + dev_t st_rdev; /* [XSI] Device ID */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) - struct user32_timespec st_atimespec; /* time of last access */ - struct user32_timespec st_mtimespec; /* time of last data modification */ - struct user32_timespec st_ctimespec; /* time of last status change */ + struct user32_timespec st_atimespec; /* time of last access */ + struct user32_timespec st_mtimespec; /* time of last data modification */ + struct user32_timespec st_ctimespec; /* time of last status change */ #else - user32_time_t st_atime; /* [XSI] Time of last access */ - user32_long_t st_atimensec; /* nsec of last access */ - user32_time_t st_mtime; /* [XSI] Last data modification */ - user32_long_t st_mtimensec; /* last data modification nsec */ - user32_time_t st_ctime; /* [XSI] Time of last status change */ - user32_long_t st_ctimensec; /* nsec of last status change */ + user32_time_t st_atime; /* [XSI] Time of last access */ + user32_long_t st_atimensec; /* nsec of last access */ + user32_time_t st_mtime; /* [XSI] Last data modification */ + user32_long_t st_mtimensec; /* last data modification nsec */ + user32_time_t st_ctime; /* [XSI] Time of last status change */ + user32_long_t st_ctimensec; /* nsec of last status change */ #endif - off_t st_size; /* [XSI] File size, in bytes */ - blkcnt_t st_blocks; /* [XSI] Blocks allocated for file */ - blksize_t st_blksize; /* [XSI] Optimal blocksize for I/O */ - __uint32_t st_flags; /* user defined flags for file */ - __uint32_t st_gen; /* file generation number */ - __int32_t st_lspare; /* RESERVED: DO NOT USE! */ - __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ + off_t st_size; /* [XSI] File size, in bytes */ + blkcnt_t st_blocks; /* [XSI] Blocks allocated for file */ + blksize_t st_blksize; /* [XSI] Optimal blocksize for I/O */ + __uint32_t st_flags; /* user defined flags for file */ + __uint32_t st_gen; /* file generation number */ + __int32_t st_lspare; /* RESERVED: DO NOT USE! */ + __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ }; extern void munge_user64_stat(struct stat *sbp, struct user64_stat *usbp); @@ -307,67 +307,67 @@ extern void munge_user32_stat(struct stat *sbp, struct user32_stat *usbp); struct user64_stat64 { - dev_t st_dev; /* [XSI] ID of device containing file */ - mode_t st_mode; /* [XSI] Mode of file (see below) */ - nlink_t st_nlink; /* [XSI] Number of hard links */ - ino64_t st_ino; /* [XSI] File serial number */ - uid_t st_uid; /* [XSI] User ID of the file */ - gid_t st_gid; /* [XSI] Group ID of the file */ - dev_t st_rdev; /* [XSI] Device ID */ + dev_t st_dev; /* [XSI] ID of device containing file */ + mode_t st_mode; /* [XSI] Mode of file (see below) */ + nlink_t st_nlink; /* [XSI] Number of hard links */ + ino64_t st_ino; /* [XSI] File serial number */ + uid_t st_uid; /* [XSI] User ID of the file */ + gid_t st_gid; /* [XSI] Group ID of the file */ + dev_t st_rdev; /* [XSI] Device ID */ #ifndef _POSIX_C_SOURCE - struct user64_timespec st_atimespec; /* time of last access */ - struct user64_timespec st_mtimespec; /* time of last data modification */ - struct user64_timespec st_ctimespec; /* time of last status change */ - struct user64_timespec st_birthtimespec; /* time of file creation(birth) */ + struct user64_timespec st_atimespec; /* time of last access */ + struct user64_timespec st_mtimespec; /* time of last data modification */ + struct user64_timespec st_ctimespec; /* time of last status change */ + struct user64_timespec st_birthtimespec; /* time of file creation(birth) */ #else - user64_time_t st_atime; /* [XSI] Time of last access */ - user64_long_t st_atimensec; /* nsec of last access */ - user64_time_t st_mtime; /* [XSI] Last data modification time */ - user64_long_t st_mtimensec; /* last data modification nsec */ - user64_time_t st_ctime; /* [XSI] Time of last status change */ - user64_long_t st_ctimensec; /* nsec of last status change */ - user64_time_t st_birthtime; /* File creation time(birth) */ - user64_long_t st_birthtimensec; /* nsec of File creation time */ + user64_time_t st_atime; /* [XSI] Time of last access */ + user64_long_t st_atimensec; /* nsec of last access */ + user64_time_t st_mtime; /* [XSI] Last data modification time */ + user64_long_t st_mtimensec; /* last data modification nsec */ + user64_time_t st_ctime; /* [XSI] Time of last status change */ + user64_long_t st_ctimensec; /* nsec of last status change */ + user64_time_t st_birthtime; /* File creation time(birth) */ + user64_long_t st_birthtimensec; /* nsec of File creation time */ #endif - off_t st_size; /* [XSI] file size, in bytes */ - blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ - blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ - __uint32_t st_flags; /* user defined flags for file */ - __uint32_t st_gen; /* file generation number */ - __uint32_t st_lspare; /* RESERVED: DO NOT USE! */ - __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ + off_t st_size; /* [XSI] file size, in bytes */ + blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ + blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ + __uint32_t st_flags; /* user defined flags for file */ + __uint32_t st_gen; /* file generation number */ + __uint32_t st_lspare; /* RESERVED: DO NOT USE! */ + __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ }; struct user32_stat64 { - dev_t st_dev; /* [XSI] ID of device containing file */ - mode_t st_mode; /* [XSI] Mode of file (see below) */ - nlink_t st_nlink; /* [XSI] Number of hard links */ - ino64_t st_ino; /* [XSI] File serial number */ - uid_t st_uid; /* [XSI] User ID of the file */ - gid_t st_gid; /* [XSI] Group ID of the file */ - dev_t st_rdev; /* [XSI] Device ID */ + dev_t st_dev; /* [XSI] ID of device containing file */ + mode_t st_mode; /* [XSI] Mode of file (see below) */ + nlink_t st_nlink; /* [XSI] Number of hard links */ + ino64_t st_ino; /* [XSI] File serial number */ + uid_t st_uid; /* [XSI] User ID of the file */ + gid_t st_gid; /* [XSI] Group ID of the file */ + dev_t st_rdev; /* [XSI] Device ID */ #ifndef _POSIX_C_SOURCE - struct user32_timespec st_atimespec; /* time of last access */ - struct user32_timespec st_mtimespec; /* time of last data modification */ - struct user32_timespec st_ctimespec; /* time of last status change */ - struct user32_timespec st_birthtimespec; /* time of file creation(birth) */ + struct user32_timespec st_atimespec; /* time of last access */ + struct user32_timespec st_mtimespec; /* time of last data modification */ + struct user32_timespec st_ctimespec; /* time of last status change */ + struct user32_timespec st_birthtimespec; /* time of file creation(birth) */ #else - user32_time_t st_atime; /* [XSI] Time of last access */ - user32_long_t st_atimensec; /* nsec of last access */ - user32_time_t st_mtime; /* [XSI] Last data modification time */ - user32_long_t st_mtimensec; /* last data modification nsec */ - user32_time_t st_ctime; /* [XSI] Time of last status change */ - user32_long_t st_ctimensec; /* nsec of last status change */ - user32_time_t st_birthtime; /* File creation time(birth) */ - user32_long_t st_birthtimensec; /* nsec of File creation time */ + user32_time_t st_atime; /* [XSI] Time of last access */ + user32_long_t st_atimensec; /* nsec of last access */ + user32_time_t st_mtime; /* [XSI] Last data modification time */ + user32_long_t st_mtimensec; /* last data modification nsec */ + user32_time_t st_ctime; /* [XSI] Time of last status change */ + user32_long_t st_ctimensec; /* nsec of last status change */ + user32_time_t st_birthtime; /* File creation time(birth) */ + user32_long_t st_birthtimensec; /* nsec of File creation time */ #endif - off_t st_size; /* [XSI] file size, in bytes */ - blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ - blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ - __uint32_t st_flags; /* user defined flags for file */ - __uint32_t st_gen; /* file generation number */ - __uint32_t st_lspare; /* RESERVED: DO NOT USE! */ - __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ + off_t st_size; /* [XSI] file size, in bytes */ + blkcnt_t st_blocks; /* [XSI] blocks allocated for file */ + blksize_t st_blksize; /* [XSI] optimal blocksize for I/O */ + __uint32_t st_flags; /* user defined flags for file */ + __uint32_t st_gen; /* file generation number */ + __uint32_t st_lspare; /* RESERVED: DO NOT USE! */ + __int64_t st_qspare[2]; /* RESERVED: DO NOT USE! */ #if defined(__x86_64__) /* * This packing is required to ensure symmetry between userspace and kernelspace @@ -375,7 +375,7 @@ struct user32_stat64 { * supported ARM slices (arm64/armv7k/arm64_32) contain the same struct * alignment ABI so this packing isn't needed for ARM. */ -} __attribute__((packed,aligned(4))); +} __attribute__((packed, aligned(4))); #else }; #endif @@ -407,15 +407,15 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); * of st_mode from a stat structure. The macro shall evaluate to a non-zero * value if the test is true; 0 if the test is false. */ -#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) /* block special */ -#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) /* char special */ -#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) /* directory */ -#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) /* fifo or socket */ -#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) /* regular file */ -#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) /* symbolic link */ -#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK) /* socket */ +#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) /* block special */ +#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) /* char special */ +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) /* directory */ +#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) /* fifo or socket */ +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) /* regular file */ +#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) /* symbolic link */ +#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK) /* socket */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define S_ISWHT(m) (((m) & S_IFMT) == S_IFWHT) /* OBSOLETE: whiteout */ +#define S_ISWHT(m) (((m) & S_IFMT) == S_IFWHT) /* OBSOLETE: whiteout */ #endif /* @@ -433,9 +433,9 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); * provides these macros to ensure source compatability with * implementations which do. */ -#define S_TYPEISMQ(buf) (0) /* Test for a message queue */ -#define S_TYPEISSEM(buf) (0) /* Test for a semaphore */ -#define S_TYPEISSHM(buf) (0) /* Test for a shared memory object */ +#define S_TYPEISMQ(buf) (0) /* Test for a message queue */ +#define S_TYPEISSEM(buf) (0) /* Test for a semaphore */ +#define S_TYPEISSHM(buf) (0) /* Test for a shared memory object */ /* * [TYM] The implementation may implement typed memory objects as distinct @@ -451,55 +451,55 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); * provides this macro to ensure source compatability with * implementations which do. */ -#define S_TYPEISTMO(buf) (0) /* Test for a typed memory object */ +#define S_TYPEISTMO(buf) (0) /* Test for a typed memory object */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define ACCESSPERMS (S_IRWXU|S_IRWXG|S_IRWXO) /* 0777 */ - /* 7777 */ -#define ALLPERMS (S_ISUID|S_ISGID|S_ISTXT|S_IRWXU|S_IRWXG|S_IRWXO) - /* 0666 */ -#define DEFFILEMODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) +#define ACCESSPERMS (S_IRWXU|S_IRWXG|S_IRWXO) /* 0777 */ + /* 7777 */ +#define ALLPERMS (S_ISUID|S_ISGID|S_ISTXT|S_IRWXU|S_IRWXG|S_IRWXO) +/* 0666 */ +#define DEFFILEMODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH) -#define S_BLKSIZE 512 /* block size used in the stat struct */ +#define S_BLKSIZE 512 /* block size used in the stat struct */ /* * Definitions of flags stored in file flags word. * * Super-user and owner changeable flags. */ -#define UF_SETTABLE 0x0000ffff /* mask of owner changeable flags */ -#define UF_NODUMP 0x00000001 /* do not dump file */ -#define UF_IMMUTABLE 0x00000002 /* file may not be changed */ -#define UF_APPEND 0x00000004 /* writes to file may only append */ -#define UF_OPAQUE 0x00000008 /* directory is opaque wrt. union */ +#define UF_SETTABLE 0x0000ffff /* mask of owner changeable flags */ +#define UF_NODUMP 0x00000001 /* do not dump file */ +#define UF_IMMUTABLE 0x00000002 /* file may not be changed */ +#define UF_APPEND 0x00000004 /* writes to file may only append */ +#define UF_OPAQUE 0x00000008 /* directory is opaque wrt. union */ /* * The following bit is reserved for FreeBSD. It is not implemented * in Mac OS X. */ /* #define UF_NOUNLINK 0x00000010 */ /* file may not be removed or renamed */ -#define UF_COMPRESSED 0x00000020 /* file is compressed (some file-systems) */ +#define UF_COMPRESSED 0x00000020 /* file is compressed (some file-systems) */ /* UF_TRACKED is used for dealing with document IDs. We no longer issue - notifications for deletes or renames for files which have UF_TRACKED set. */ -#define UF_TRACKED 0x00000040 + * notifications for deletes or renames for files which have UF_TRACKED set. */ +#define UF_TRACKED 0x00000040 -#define UF_DATAVAULT 0x00000080 /* entitlement required for reading */ - /* and writing */ +#define UF_DATAVAULT 0x00000080 /* entitlement required for reading */ + /* and writing */ /* Bits 0x0100 through 0x4000 are currently undefined. */ -#define UF_HIDDEN 0x00008000 /* hint that this item should not be */ - /* displayed in a GUI */ +#define UF_HIDDEN 0x00008000 /* hint that this item should not be */ + /* displayed in a GUI */ /* * Super-user changeable flags. */ -#define SF_SUPPORTED 0x001f0000 /* mask of superuser supported flags */ -#define SF_SETTABLE 0xffff0000 /* mask of superuser changeable flags */ -#define SF_ARCHIVED 0x00010000 /* file is archived */ -#define SF_IMMUTABLE 0x00020000 /* file may not be changed */ -#define SF_APPEND 0x00040000 /* writes to file may only append */ -#define SF_RESTRICTED 0x00080000 /* entitlement required for writing */ -#define SF_NOUNLINK 0x00100000 /* Item may not be removed, renamed or mounted on */ +#define SF_SUPPORTED 0x001f0000 /* mask of superuser supported flags */ +#define SF_SETTABLE 0xffff0000 /* mask of superuser changeable flags */ +#define SF_ARCHIVED 0x00010000 /* file is archived */ +#define SF_IMMUTABLE 0x00020000 /* file may not be changed */ +#define SF_APPEND 0x00040000 /* writes to file may only append */ +#define SF_RESTRICTED 0x00080000 /* entitlement required for writing */ +#define SF_NOUNLINK 0x00100000 /* Item may not be removed, renamed or mounted on */ /* * The following two bits are reserved for FreeBSD. They are not @@ -512,9 +512,9 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); /* * Shorthand abbreviations of above. */ -#define OPAQUE (UF_OPAQUE) -#define APPEND (UF_APPEND | SF_APPEND) -#define IMMUTABLE (UF_IMMUTABLE | SF_IMMUTABLE) +#define OPAQUE (UF_OPAQUE) +#define APPEND (UF_APPEND | SF_APPEND) +#define IMMUTABLE (UF_IMMUTABLE | SF_IMMUTABLE) #endif #endif @@ -522,54 +522,54 @@ extern void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp); __BEGIN_DECLS /* [XSI] */ -int chmod(const char *, mode_t) __DARWIN_ALIAS(chmod); -int fchmod(int, mode_t) __DARWIN_ALIAS(fchmod); -int fstat(int, struct stat *) __DARWIN_INODE64(fstat); -int lstat(const char *, struct stat *) __DARWIN_INODE64(lstat); -int mkdir(const char *, mode_t); -int mkfifo(const char *, mode_t); -int stat(const char *, struct stat *) __DARWIN_INODE64(stat); -int mknod(const char *, mode_t, dev_t); -mode_t umask(mode_t); +int chmod(const char *, mode_t) __DARWIN_ALIAS(chmod); +int fchmod(int, mode_t) __DARWIN_ALIAS(fchmod); +int fstat(int, struct stat *) __DARWIN_INODE64(fstat); +int lstat(const char *, struct stat *) __DARWIN_INODE64(lstat); +int mkdir(const char *, mode_t); +int mkfifo(const char *, mode_t); +int stat(const char *, struct stat *) __DARWIN_INODE64(stat); +int mknod(const char *, mode_t, dev_t); +mode_t umask(mode_t); #if __DARWIN_C_LEVEL >= 200809L -int fchmodat(int, const char *, mode_t, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int fstatat(int, const char *, struct stat *, int) __DARWIN_INODE64(fstatat) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int mkdirat(int, const char *, mode_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int fchmodat(int, const char *, mode_t, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int fstatat(int, const char *, struct stat *, int) __DARWIN_INODE64(fstatat) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int mkdirat(int, const char *, mode_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -#define UTIME_NOW -1 -#define UTIME_OMIT -2 +#define UTIME_NOW -1 +#define UTIME_OMIT -2 -int futimens(int __fd, const struct timespec __times[2]) __API_AVAILABLE(macosx(10.13), ios(11.0), tvos(11.0), watchos(4.0)); -int utimensat(int __fd, const char *__path, const struct timespec __times[2], - int __flag) __API_AVAILABLE(macosx(10.13), ios(11.0), tvos(11.0), watchos(4.0)); +int futimens(int __fd, const struct timespec __times[2]) __API_AVAILABLE(macosx(10.13), ios(11.0), tvos(11.0), watchos(4.0)); +int utimensat(int __fd, const char *__path, const struct timespec __times[2], + int __flag) __API_AVAILABLE(macosx(10.13), ios(11.0), tvos(11.0), watchos(4.0)); #endif #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #include -int chflags(const char *, __uint32_t); -int chmodx_np(const char *, filesec_t); -int fchflags(int, __uint32_t); -int fchmodx_np(int, filesec_t); -int fstatx_np(int, struct stat *, filesec_t) __DARWIN_INODE64(fstatx_np); -int lchflags(const char *, __uint32_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); -int lchmod(const char *, mode_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); -int lstatx_np(const char *, struct stat *, filesec_t) __DARWIN_INODE64(lstatx_np); -int mkdirx_np(const char *, filesec_t); -int mkfifox_np(const char *, filesec_t); -int statx_np(const char *, struct stat *, filesec_t) __DARWIN_INODE64(statx_np); -int umaskx_np(filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_4,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); +int chflags(const char *, __uint32_t); +int chmodx_np(const char *, filesec_t); +int fchflags(int, __uint32_t); +int fchmodx_np(int, filesec_t); +int fstatx_np(int, struct stat *, filesec_t) __DARWIN_INODE64(fstatx_np); +int lchflags(const char *, __uint32_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int lchmod(const char *, mode_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int lstatx_np(const char *, struct stat *, filesec_t) __DARWIN_INODE64(lstatx_np); +int mkdirx_np(const char *, filesec_t); +int mkfifox_np(const char *, filesec_t); +int statx_np(const char *, struct stat *, filesec_t) __DARWIN_INODE64(statx_np); +int umaskx_np(filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_4, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); #if !__DARWIN_ONLY_64_BIT_INO_T /* The following deprecated routines are simillar to stat and friends except provide struct stat64 instead of struct stat */ -int fstatx64_np(int, struct stat64 *, filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); -int lstatx64_np(const char *, struct stat64 *, filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); -int statx64_np(const char *, struct stat64 *, filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); -int fstat64(int, struct stat64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); -int lstat64(const char *, struct stat64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); -int stat64(const char *, struct stat64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5,__MAC_10_6,__IPHONE_NA,__IPHONE_NA); +int fstatx64_np(int, struct stat64 *, filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); +int lstatx64_np(const char *, struct stat64 *, filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); +int statx64_np(const char *, struct stat64 *, filesec_t) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); +int fstat64(int, struct stat64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); +int lstat64(const char *, struct stat64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); +int stat64(const char *, struct stat64 *) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_5, __MAC_10_6, __IPHONE_NA, __IPHONE_NA); #endif /* !__DARWIN_ONLY_64_BIT_INO_T */ #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ diff --git a/bsd/sys/stdio.h b/bsd/sys/stdio.h index b6957c83b..441eaf6aa 100644 --- a/bsd/sys/stdio.h +++ b/bsd/sys/stdio.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SYS_STDIO_H_ -#define _SYS_STDIO_H_ +#define _SYS_STDIO_H_ #include @@ -37,13 +37,13 @@ __BEGIN_DECLS -int renameat(int, const char *, int, const char *) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int renameat(int, const char *, int, const char *) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -#define RENAME_SECLUDE 0x00000001 -#define RENAME_SWAP 0x00000002 -#define RENAME_EXCL 0x00000004 +#define RENAME_SECLUDE 0x00000001 +#define RENAME_SWAP 0x00000002 +#define RENAME_EXCL 0x00000004 int renamex_np(const char *, const char *, unsigned int) __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0); int renameatx_np(int, const char *, int, const char *, unsigned int) __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0); diff --git a/bsd/sys/subr_prf.h b/bsd/sys/subr_prf.h index 966aedf5a..26016c257 100644 --- a/bsd/sys/subr_prf.h +++ b/bsd/sys/subr_prf.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -69,7 +69,7 @@ #include -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifdef __APPLE_API_PRIVATE @@ -77,16 +77,15 @@ * "flags" argument to prf(). * NB: Used in integer flags field, private to bsd/kern/subr_prf.c */ -#define TOCONS 0x00000001 /* output to console */ -#define TOTTY 0x00000002 /* output to tty */ -#define TOLOG 0x00000004 /* output to log (log lock not held) */ -#define TOSTR 0x00000008 /* output to string */ -#define TOLOGLOCKED 0x00000010 /* output to log (log lock held) */ +#define TOCONS 0x00000001 /* output to console */ +#define TOTTY 0x00000002 /* output to tty */ +#define TOLOG 0x00000004 /* output to log (log lock not held) */ +#define TOSTR 0x00000008 /* output to string */ +#define TOLOGLOCKED 0x00000010 /* output to log (log lock held) */ -extern int prf(const char *fmt, va_list ap, int flags, struct tty *ttyp) __printflike(1,0); +extern int prf(const char *fmt, va_list ap, int flags, struct tty *ttyp) __printflike(1, 0); #endif /* __APPLE_API_PRIVATE */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #endif /* ! _SYS_SUBRPRF_H_ */ - diff --git a/bsd/sys/sys_domain.h b/bsd/sys/sys_domain.h index 8a12b455c..d14a5abcb 100644 --- a/bsd/sys/sys_domain.h +++ b/bsd/sys/sys_domain.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005, 2012, 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,30 +35,30 @@ #include #ifdef KERNEL_PRIVATE -#include +#include #endif /* KERNEL_PRIVATE */ -/* Kernel Events Protocol */ -#define SYSPROTO_EVENT 1 /* kernel events protocol */ +/* Kernel Events Protocol */ +#define SYSPROTO_EVENT 1 /* kernel events protocol */ /* Kernel Control Protocol */ -#define SYSPROTO_CONTROL 2 /* kernel control protocol */ -#define AF_SYS_CONTROL 2 /* corresponding sub address type */ +#define SYSPROTO_CONTROL 2 /* kernel control protocol */ +#define AF_SYS_CONTROL 2 /* corresponding sub address type */ /* System family socket address */ struct sockaddr_sys { - u_char ss_len; /* sizeof(struct sockaddr_sys) */ - u_char ss_family; /* AF_SYSTEM */ - u_int16_t ss_sysaddr; /* protocol address in AF_SYSTEM */ - u_int32_t ss_reserved[7]; /* reserved to the protocol use */ + u_char ss_len; /* sizeof(struct sockaddr_sys) */ + u_char ss_family; /* AF_SYSTEM */ + u_int16_t ss_sysaddr; /* protocol address in AF_SYSTEM */ + u_int32_t ss_reserved[7]; /* reserved to the protocol use */ }; #ifdef PRIVATE struct xsystmgen { - u_int32_t xg_len; /* length of this structure */ - u_int32_t xg_count; /* number of PCBs at this time */ - u_int64_t xg_gen; /* generation count at this time */ - u_int64_t xg_sogen; /* current socket generation count */ + u_int32_t xg_len; /* length of this structure */ + u_int32_t xg_count; /* number of PCBs at this time */ + u_int64_t xg_gen; /* generation count at this time */ + u_int64_t xg_sogen; /* current socket generation count */ }; #endif /* PRIVATE */ @@ -76,5 +76,3 @@ __END_DECLS #endif /* KERNEL_PRIVATE */ #endif /* _SYSTEM_DOMAIN_H_ */ - - diff --git a/bsd/sys/sysctl.h b/bsd/sys/sysctl.h index 0d4414a58..aea56f700 100644 --- a/bsd/sys/sysctl.h +++ b/bsd/sys/sysctl.h @@ -71,7 +71,7 @@ */ #ifndef _SYS_SYSCTL_H_ -#define _SYS_SYSCTL_H_ +#define _SYS_SYSCTL_H_ /* * These are for the eproc structure defined below. @@ -104,7 +104,7 @@ * respective subsystem header files. */ -#define CTL_MAXNAME 12 /* largest number of components supported */ +#define CTL_MAXNAME 12 /* largest number of components supported */ /* * Each subsystem defined by sysctl defines a list of variables @@ -141,29 +141,29 @@ * see sysctl_mem_hold() for details). */ struct ctlname { - char *ctl_name; /* subsystem name */ - int ctl_type; /* type of name */ + char *ctl_name; /* subsystem name */ + int ctl_type; /* type of name */ }; -#define CTLTYPE 0xf /* Mask for the type */ -#define CTLTYPE_NODE 1 /* name is a node */ -#define CTLTYPE_INT 2 /* name describes an integer */ -#define CTLTYPE_STRING 3 /* name describes a string */ -#define CTLTYPE_QUAD 4 /* name describes a 64-bit number */ -#define CTLTYPE_OPAQUE 5 /* name describes a structure */ -#define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */ - -#define CTLFLAG_RD 0x80000000 /* Allow reads of variable */ -#define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */ -#define CTLFLAG_RW (CTLFLAG_RD|CTLFLAG_WR) -#define CTLFLAG_NOLOCK 0x20000000 /* XXX Don't Lock */ -#define CTLFLAG_ANYBODY 0x10000000 /* All users can set this var */ -#define CTLFLAG_SECURE 0x08000000 /* Permit set only if securelevel<=0 */ -#define CTLFLAG_MASKED 0x04000000 /* deprecated variable, do not display */ -#define CTLFLAG_NOAUTO 0x02000000 /* do not auto-register */ -#define CTLFLAG_KERN 0x01000000 /* valid inside the kernel */ -#define CTLFLAG_LOCKED 0x00800000 /* node will handle locking itself */ -#define CTLFLAG_OID2 0x00400000 /* struct sysctl_oid has version info */ +#define CTLTYPE 0xf /* Mask for the type */ +#define CTLTYPE_NODE 1 /* name is a node */ +#define CTLTYPE_INT 2 /* name describes an integer */ +#define CTLTYPE_STRING 3 /* name describes a string */ +#define CTLTYPE_QUAD 4 /* name describes a 64-bit number */ +#define CTLTYPE_OPAQUE 5 /* name describes a structure */ +#define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */ + +#define CTLFLAG_RD 0x80000000 /* Allow reads of variable */ +#define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */ +#define CTLFLAG_RW (CTLFLAG_RD|CTLFLAG_WR) +#define CTLFLAG_NOLOCK 0x20000000 /* XXX Don't Lock */ +#define CTLFLAG_ANYBODY 0x10000000 /* All users can set this var */ +#define CTLFLAG_SECURE 0x08000000 /* Permit set only if securelevel<=0 */ +#define CTLFLAG_MASKED 0x04000000 /* deprecated variable, do not display */ +#define CTLFLAG_NOAUTO 0x02000000 /* do not auto-register */ +#define CTLFLAG_KERN 0x01000000 /* valid inside the kernel */ +#define CTLFLAG_LOCKED 0x00800000 /* node will handle locking itself */ +#define CTLFLAG_OID2 0x00400000 /* struct sysctl_oid has version info */ /* * USE THIS instead of a hardwired number from the categories below @@ -178,7 +178,7 @@ struct ctlname { * in I/O-Kit. In this case, you have to call sysctl_register_oid() * manually - just like in a KEXT. */ -#define OID_AUTO (-1) +#define OID_AUTO (-1) #define OID_AUTO_START 100 /* conventional */ #ifdef KERNEL @@ -191,21 +191,21 @@ struct ctlname { * so that we can use the interface from the kernel or from user-space. */ struct sysctl_req { - struct proc *p; - int lock; - user_addr_t oldptr; /* pointer to user supplied buffer */ - size_t oldlen; /* user buffer length (also returned) */ - size_t oldidx; /* total data iteratively copied out */ - int (*oldfunc)(struct sysctl_req *, const void *, size_t); - user_addr_t newptr; /* buffer containing new value */ - size_t newlen; /* length of new value */ - size_t newidx; /* total data iteratively copied in */ - int (*newfunc)(struct sysctl_req *, void *, size_t); + struct proc *p; + int lock; + user_addr_t oldptr; /* pointer to user supplied buffer */ + size_t oldlen; /* user buffer length (also returned) */ + size_t oldidx; /* total data iteratively copied out */ + int (*oldfunc)(struct sysctl_req *, const void *, size_t); + user_addr_t newptr; /* buffer containing new value */ + size_t newlen; /* length of new value */ + size_t newidx; /* total data iteratively copied in */ + int (*newfunc)(struct sysctl_req *, void *, size_t); }; SLIST_HEAD(sysctl_oid_list, sysctl_oid); -#define SYSCTL_OID_VERSION 1 /* current OID structure version */ +#define SYSCTL_OID_VERSION 1 /* current OID structure version */ /* * This describes one "oid" in the MIB tree. Potentially more nodes can @@ -245,16 +245,16 @@ SLIST_HEAD(sysctl_oid_list, sysctl_oid); struct sysctl_oid { struct sysctl_oid_list *oid_parent; SLIST_ENTRY(sysctl_oid) oid_link; - int oid_number; - int oid_kind; - void *oid_arg1; - int oid_arg2; - const char *oid_name; - int (*oid_handler) SYSCTL_HANDLER_ARGS; - const char *oid_fmt; - const char *oid_descr; /* offsetof() field / long description */ - int oid_version; - int oid_refcnt; + int oid_number; + int oid_kind; + void *oid_arg1; + int oid_arg2; + const char *oid_name; + int (*oid_handler)SYSCTL_HANDLER_ARGS; + const char *oid_fmt; + const char *oid_descr; /* offsetof() field / long description */ + int oid_version; + int oid_refcnt; }; #define SYSCTL_IN(r, p, l) (r->newfunc)(r, p, l) @@ -288,7 +288,7 @@ void sysctl_register_fixed(void) __deprecated; __END_DECLS /* Declare an oid to allow child oids to be added to it. */ -#define SYSCTL_DECL(name) \ +#define SYSCTL_DECL(name) \ extern struct sysctl_oid_list sysctl_##name##_children #ifdef XNU_KERNEL_PRIVATE @@ -306,29 +306,29 @@ __END_DECLS * nbr: ID. Almost certainly OID_AUTO ("pick one for me") for you. * name: name for this particular item (e.g. "thesysctl" for "kern.thesysctl") * kind/access: Control flags (CTLFLAG_*). Some notable options include: - * CTLFLAG_ANYBODY: non-root users allowed - * CTLFLAG_MASKED: don't show in sysctl listing in userland - * CTLFLAG_LOCKED: does own locking (no additional protection needed) - * CTLFLAG_KERN: valid inside kernel (best avoided generally) - * CTLFLAG_WR: "new" value accepted + * CTLFLAG_ANYBODY: non-root users allowed + * CTLFLAG_MASKED: don't show in sysctl listing in userland + * CTLFLAG_LOCKED: does own locking (no additional protection needed) + * CTLFLAG_KERN: valid inside kernel (best avoided generally) + * CTLFLAG_WR: "new" value accepted * a1, a2: entry-data, passed to handler (see specific macros) * Format String: Tells "sysctl" tool how to print data from this entry. - * "A" - string - * "I" - list of integers. "IU" - list of unsigned integers. space-separated. - * "-" - do not print - * "L" - longs, as ints with I + * "A" - string + * "I" - list of integers. "IU" - list of unsigned integers. space-separated. + * "-" - do not print + * "L" - longs, as ints with I * "P" - pointer - * "Q" - quads - * "S","T" - clock info, see sysctl.c in system_cmds (you probably don't need this) + * "Q" - quads + * "S","T" - clock info, see sysctl.c in system_cmds (you probably don't need this) * Description: unused */ /* This constructs a "raw" MIB oid. */ #define SYSCTL_STRUCT_INIT(parent, nbr, name, kind, a1, a2, handler, fmt, descr) \ - { \ - &sysctl_##parent##_children, { 0 }, \ - nbr, (int)(kind|CTLFLAG_OID2), a1, (int)(a2), #name, handler, fmt, descr, SYSCTL_OID_VERSION, 0 \ + { \ + &sysctl_##parent##_children, { 0 }, \ + nbr, (int)(kind|CTLFLAG_OID2), a1, (int)(a2), #name, handler, fmt, descr, SYSCTL_OID_VERSION, 0 \ } #define SYSCTL_OID(parent, nbr, name, kind, a1, a2, handler, fmt, descr) \ @@ -336,65 +336,65 @@ __END_DECLS SYSCTL_LINKER_SET_ENTRY(__sysctl_set, sysctl_##parent##_##name) /* This constructs a node from which other oids can hang. */ -#define SYSCTL_NODE(parent, nbr, name, access, handler, descr) \ - struct sysctl_oid_list sysctl_##parent##_##name##_children; \ - SYSCTL_OID(parent, nbr, name, CTLTYPE_NODE|access, \ - (void*)&sysctl_##parent##_##name##_children, 0, handler, \ - "N", descr); +#define SYSCTL_NODE(parent, nbr, name, access, handler, descr) \ + struct sysctl_oid_list sysctl_##parent##_##name##_children; \ + SYSCTL_OID(parent, nbr, name, CTLTYPE_NODE|access, \ + (void*)&sysctl_##parent##_##name##_children, 0, handler, \ + "N", descr); /* Oid for a string. len can be 0 to indicate '\0' termination. */ #define SYSCTL_STRING(parent, nbr, name, access, arg, len, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_STRING|access, \ - arg, len, sysctl_handle_string, "A", descr) + arg, len, sysctl_handle_string, "A", descr) #define SYSCTL_COMPAT_INT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ - ptr, val, sysctl_handle_int, "I", descr) + ptr, val, sysctl_handle_int, "I", descr) #define SYSCTL_COMPAT_UINT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ - ptr, val, sysctl_handle_int, "IU", descr) + ptr, val, sysctl_handle_int, "IU", descr) /* Oid for an int. If ptr is NULL, val is returned. */ #define SYSCTL_INT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ - ptr, val, sysctl_handle_int, "I", descr); \ + ptr, val, sysctl_handle_int, "I", descr); \ typedef char _sysctl_##parent##_##name##_size_check[(__builtin_constant_p(ptr) || sizeof(*(ptr)) == sizeof(int)) ? 0 : -1]; /* Oid for an unsigned int. If ptr is NULL, val is returned. */ #define SYSCTL_UINT(parent, nbr, name, access, ptr, val, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ - ptr, val, sysctl_handle_int, "IU", descr); \ + ptr, val, sysctl_handle_int, "IU", descr); \ typedef char _sysctl_##parent##_##name##_size_check[(__builtin_constant_p(ptr) || sizeof(*(ptr)) == sizeof(unsigned int)) ? 0 : -1]; /* Oid for a long. The pointer must be non NULL. */ #define SYSCTL_LONG(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ - ptr, 0, sysctl_handle_long, "L", descr); \ + ptr, 0, sysctl_handle_long, "L", descr); \ typedef char _sysctl_##parent##_##name##_size_check[(__builtin_constant_p(ptr) || sizeof(*(ptr)) == sizeof(long)) ? 0 : -1]; /* Oid for a unsigned long. The pointer must be non NULL. */ #define SYSCTL_ULONG(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_INT|access, \ - ptr, 0, sysctl_handle_long, "LU", descr); \ + ptr, 0, sysctl_handle_long, "LU", descr); \ typedef char _sysctl_##parent##_##name##_size_check[(__builtin_constant_p(ptr) || sizeof(*(ptr)) == sizeof(unsigned long)) ? 0 : -1]; /* Oid for a quad. The pointer must be non NULL. */ #define SYSCTL_QUAD(parent, nbr, name, access, ptr, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_QUAD|access, \ - ptr, 0, sysctl_handle_quad, "Q", descr); \ + ptr, 0, sysctl_handle_quad, "Q", descr); \ typedef char _sysctl_##parent##_##name##_size_check[(__builtin_constant_p(ptr) || sizeof(*(ptr)) == sizeof(long long)) ? 0 : -1]; /* Oid for an opaque object. Specified by a pointer and a length. */ #define SYSCTL_OPAQUE(parent, nbr, name, access, ptr, len, fmt, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|access, \ - ptr, len, sysctl_handle_opaque, fmt, descr) + ptr, len, sysctl_handle_opaque, fmt, descr) /* Oid for a struct. Specified by a pointer and a type. */ #define SYSCTL_STRUCT(parent, nbr, name, access, ptr, type, descr) \ SYSCTL_OID(parent, nbr, name, CTLTYPE_OPAQUE|access, \ - ptr, sizeof(struct type), sysctl_handle_opaque, \ - "S," #type, descr) + ptr, sizeof(struct type), sysctl_handle_opaque, \ + "S," #type, descr) /* * Oid for a procedure. Specified by a pointer and an arg. @@ -403,7 +403,7 @@ __END_DECLS */ #define SYSCTL_PROC(parent, nbr, name, access, ptr, arg, handler, fmt, descr) \ SYSCTL_OID(parent, nbr, name, access, \ - ptr, arg, handler, fmt, descr) + ptr, arg, handler, fmt, descr) extern struct sysctl_oid_list sysctl__children; @@ -425,20 +425,20 @@ SYSCTL_DECL(_hw_features); #ifndef SYSCTL_SKMEM_UPDATE_FIELD -#define SYSCTL_SKMEM 0 -#define SYSCTL_SKMEM_UPDATE_FIELD(field, value) -#define SYSCTL_SKMEM_UPDATE_AT_OFFSET(offset, value) -#define SYSCTL_SKMEM_INT(parent, oid, sysctl_name, access, ptr, offset, descr) \ +#define SYSCTL_SKMEM 0 +#define SYSCTL_SKMEM_UPDATE_FIELD(field, value) +#define SYSCTL_SKMEM_UPDATE_AT_OFFSET(offset, value) +#define SYSCTL_SKMEM_INT(parent, oid, sysctl_name, access, ptr, offset, descr) \ SYSCTL_INT(parent, oid, sysctl_name, access, ptr, 0, descr) -#define SYSCTL_SKMEM_TCP_INT(oid, sysctl_name, access, variable_type, \ - variable_name, initial_value, descr) \ - variable_type variable_name = initial_value; \ - SYSCTL_SKMEM_INT(_net_inet_tcp, oid, sysctl_name, access, \ - &variable_name, 0, descr) +#define SYSCTL_SKMEM_TCP_INT(oid, sysctl_name, access, variable_type, \ + variable_name, initial_value, descr) \ + variable_type variable_name = initial_value; \ + SYSCTL_SKMEM_INT(_net_inet_tcp, oid, sysctl_name, access, \ + &variable_name, 0, descr) #else /* SYSCTL_SKMEM_UPDATE_FIELD */ -#define SYSCTL_SKMEM 1 +#define SYSCTL_SKMEM 1 #endif /* SYSCTL_SKMEM_UPDATE_FIELD */ @@ -457,16 +457,16 @@ SYSCTL_DECL(_hw_features); /* * Top-level identifiers */ -#define CTL_UNSPEC 0 /* unused */ -#define CTL_KERN 1 /* "high kernel": proc, limits */ -#define CTL_VM 2 /* virtual memory */ -#define CTL_VFS 3 /* file system, mount type is next */ -#define CTL_NET 4 /* network, see socket.h */ -#define CTL_DEBUG 5 /* debugging parameters */ -#define CTL_HW 6 /* generic cpu/io */ -#define CTL_MACHDEP 7 /* machine dependent */ -#define CTL_USER 8 /* user-level */ -#define CTL_MAXID 9 /* number of valid top-level ids */ +#define CTL_UNSPEC 0 /* unused */ +#define CTL_KERN 1 /* "high kernel": proc, limits */ +#define CTL_VM 2 /* virtual memory */ +#define CTL_VFS 3 /* file system, mount type is next */ +#define CTL_NET 4 /* network, see socket.h */ +#define CTL_DEBUG 5 /* debugging parameters */ +#define CTL_HW 6 /* generic cpu/io */ +#define CTL_MACHDEP 7 /* machine dependent */ +#define CTL_USER 8 /* user-level */ +#define CTL_MAXID 9 /* number of valid top-level ids */ #define CTL_NAMES { \ { 0, 0 }, \ @@ -483,83 +483,83 @@ SYSCTL_DECL(_hw_features); /* * CTL_KERN identifiers */ -#define KERN_OSTYPE 1 /* string: system version */ -#define KERN_OSRELEASE 2 /* string: system release */ -#define KERN_OSREV 3 /* int: system revision */ -#define KERN_VERSION 4 /* string: compile time info */ -#define KERN_MAXVNODES 5 /* int: max vnodes */ -#define KERN_MAXPROC 6 /* int: max processes */ -#define KERN_MAXFILES 7 /* int: max open files */ -#define KERN_ARGMAX 8 /* int: max arguments to exec */ -#define KERN_SECURELVL 9 /* int: system security level */ -#define KERN_HOSTNAME 10 /* string: hostname */ -#define KERN_HOSTID 11 /* int: host identifier */ -#define KERN_CLOCKRATE 12 /* struct: struct clockrate */ -#define KERN_VNODE 13 /* struct: vnode structures */ -#define KERN_PROC 14 /* struct: process entries */ -#define KERN_FILE 15 /* struct: file entries */ -#define KERN_PROF 16 /* node: kernel profiling info */ -#define KERN_POSIX1 17 /* int: POSIX.1 version */ -#define KERN_NGROUPS 18 /* int: # of supplemental group ids */ -#define KERN_JOB_CONTROL 19 /* int: is job control available */ -#define KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */ -#define KERN_BOOTTIME 21 /* struct: time kernel was booted */ -#define KERN_NISDOMAINNAME 22 /* string: YP domain name */ -#define KERN_DOMAINNAME KERN_NISDOMAINNAME -#define KERN_MAXPARTITIONS 23 /* int: number of partitions/disk */ -#define KERN_KDEBUG 24 /* int: kernel trace points */ -#define KERN_UPDATEINTERVAL 25 /* int: update process sleep time */ -#define KERN_OSRELDATE 26 /* int: OS release date */ -#define KERN_NTP_PLL 27 /* node: NTP PLL control */ -#define KERN_BOOTFILE 28 /* string: name of booted kernel */ -#define KERN_MAXFILESPERPROC 29 /* int: max open files per proc */ -#define KERN_MAXPROCPERUID 30 /* int: max processes per uid */ -#define KERN_DUMPDEV 31 /* dev_t: device to dump on */ -#define KERN_IPC 32 /* node: anything related to IPC */ -#define KERN_DUMMY 33 /* unused */ -#define KERN_PS_STRINGS 34 /* int: address of PS_STRINGS */ -#define KERN_USRSTACK32 35 /* int: address of USRSTACK */ -#define KERN_LOGSIGEXIT 36 /* int: do we log sigexit procs? */ -#define KERN_SYMFILE 37 /* string: kernel symbol filename */ -#define KERN_PROCARGS 38 - /* 39 was KERN_PCSAMPLES... now deprecated */ -#define KERN_NETBOOT 40 /* int: are we netbooted? 1=yes,0=no */ - /* 41 was KERN_PANICINFO : panic UI information (deprecated) */ -#define KERN_SYSV 42 /* node: System V IPC information */ -#define KERN_AFFINITY 43 /* xxx */ -#define KERN_TRANSLATE 44 /* xxx */ -#define KERN_CLASSIC KERN_TRANSLATE /* XXX backwards compat */ -#define KERN_EXEC 45 /* xxx */ -#define KERN_CLASSICHANDLER KERN_EXEC /* XXX backwards compatibility */ -#define KERN_AIOMAX 46 /* int: max aio requests */ -#define KERN_AIOPROCMAX 47 /* int: max aio requests per process */ -#define KERN_AIOTHREADS 48 /* int: max aio worker threads */ +#define KERN_OSTYPE 1 /* string: system version */ +#define KERN_OSRELEASE 2 /* string: system release */ +#define KERN_OSREV 3 /* int: system revision */ +#define KERN_VERSION 4 /* string: compile time info */ +#define KERN_MAXVNODES 5 /* int: max vnodes */ +#define KERN_MAXPROC 6 /* int: max processes */ +#define KERN_MAXFILES 7 /* int: max open files */ +#define KERN_ARGMAX 8 /* int: max arguments to exec */ +#define KERN_SECURELVL 9 /* int: system security level */ +#define KERN_HOSTNAME 10 /* string: hostname */ +#define KERN_HOSTID 11 /* int: host identifier */ +#define KERN_CLOCKRATE 12 /* struct: struct clockrate */ +#define KERN_VNODE 13 /* struct: vnode structures */ +#define KERN_PROC 14 /* struct: process entries */ +#define KERN_FILE 15 /* struct: file entries */ +#define KERN_PROF 16 /* node: kernel profiling info */ +#define KERN_POSIX1 17 /* int: POSIX.1 version */ +#define KERN_NGROUPS 18 /* int: # of supplemental group ids */ +#define KERN_JOB_CONTROL 19 /* int: is job control available */ +#define KERN_SAVED_IDS 20 /* int: saved set-user/group-ID */ +#define KERN_BOOTTIME 21 /* struct: time kernel was booted */ +#define KERN_NISDOMAINNAME 22 /* string: YP domain name */ +#define KERN_DOMAINNAME KERN_NISDOMAINNAME +#define KERN_MAXPARTITIONS 23 /* int: number of partitions/disk */ +#define KERN_KDEBUG 24 /* int: kernel trace points */ +#define KERN_UPDATEINTERVAL 25 /* int: update process sleep time */ +#define KERN_OSRELDATE 26 /* int: OS release date */ +#define KERN_NTP_PLL 27 /* node: NTP PLL control */ +#define KERN_BOOTFILE 28 /* string: name of booted kernel */ +#define KERN_MAXFILESPERPROC 29 /* int: max open files per proc */ +#define KERN_MAXPROCPERUID 30 /* int: max processes per uid */ +#define KERN_DUMPDEV 31 /* dev_t: device to dump on */ +#define KERN_IPC 32 /* node: anything related to IPC */ +#define KERN_DUMMY 33 /* unused */ +#define KERN_PS_STRINGS 34 /* int: address of PS_STRINGS */ +#define KERN_USRSTACK32 35 /* int: address of USRSTACK */ +#define KERN_LOGSIGEXIT 36 /* int: do we log sigexit procs? */ +#define KERN_SYMFILE 37 /* string: kernel symbol filename */ +#define KERN_PROCARGS 38 +/* 39 was KERN_PCSAMPLES... now deprecated */ +#define KERN_NETBOOT 40 /* int: are we netbooted? 1=yes,0=no */ +/* 41 was KERN_PANICINFO : panic UI information (deprecated) */ +#define KERN_SYSV 42 /* node: System V IPC information */ +#define KERN_AFFINITY 43 /* xxx */ +#define KERN_TRANSLATE 44 /* xxx */ +#define KERN_CLASSIC KERN_TRANSLATE /* XXX backwards compat */ +#define KERN_EXEC 45 /* xxx */ +#define KERN_CLASSICHANDLER KERN_EXEC /* XXX backwards compatibility */ +#define KERN_AIOMAX 46 /* int: max aio requests */ +#define KERN_AIOPROCMAX 47 /* int: max aio requests per process */ +#define KERN_AIOTHREADS 48 /* int: max aio worker threads */ #ifdef __APPLE_API_UNSTABLE -#define KERN_PROCARGS2 49 +#define KERN_PROCARGS2 49 #endif /* __APPLE_API_UNSTABLE */ -#define KERN_COREFILE 50 /* string: corefile format string */ -#define KERN_COREDUMP 51 /* int: whether to coredump at all */ -#define KERN_SUGID_COREDUMP 52 /* int: whether to dump SUGID cores */ -#define KERN_PROCDELAYTERM 53 /* int: set/reset current proc for delayed termination during shutdown */ -#define KERN_SHREG_PRIVATIZABLE 54 /* int: can shared regions be privatized ? */ - /* 55 was KERN_PROC_LOW_PRI_IO... now deprecated */ -#define KERN_LOW_PRI_WINDOW 56 /* int: set/reset throttle window - milliseconds */ -#define KERN_LOW_PRI_DELAY 57 /* int: set/reset throttle delay - milliseconds */ -#define KERN_POSIX 58 /* node: posix tunables */ -#define KERN_USRSTACK64 59 /* LP64 user stack query */ -#define KERN_NX_PROTECTION 60 /* int: whether no-execute protection is enabled */ -#define KERN_TFP 61 /* Task for pid settings */ -#define KERN_PROCNAME 62 /* setup process program name(2*MAXCOMLEN) */ -#define KERN_THALTSTACK 63 /* for compat with older x86 and does nothing */ -#define KERN_SPECULATIVE_READS 64 /* int: whether speculative reads are disabled */ -#define KERN_OSVERSION 65 /* for build number i.e. 9A127 */ -#define KERN_SAFEBOOT 66 /* are we booted safe? */ - /* 67 was KERN_LCTX (login context) */ -#define KERN_RAGEVNODE 68 -#define KERN_TTY 69 /* node: tty settings */ +#define KERN_COREFILE 50 /* string: corefile format string */ +#define KERN_COREDUMP 51 /* int: whether to coredump at all */ +#define KERN_SUGID_COREDUMP 52 /* int: whether to dump SUGID cores */ +#define KERN_PROCDELAYTERM 53 /* int: set/reset current proc for delayed termination during shutdown */ +#define KERN_SHREG_PRIVATIZABLE 54 /* int: can shared regions be privatized ? */ +/* 55 was KERN_PROC_LOW_PRI_IO... now deprecated */ +#define KERN_LOW_PRI_WINDOW 56 /* int: set/reset throttle window - milliseconds */ +#define KERN_LOW_PRI_DELAY 57 /* int: set/reset throttle delay - milliseconds */ +#define KERN_POSIX 58 /* node: posix tunables */ +#define KERN_USRSTACK64 59 /* LP64 user stack query */ +#define KERN_NX_PROTECTION 60 /* int: whether no-execute protection is enabled */ +#define KERN_TFP 61 /* Task for pid settings */ +#define KERN_PROCNAME 62 /* setup process program name(2*MAXCOMLEN) */ +#define KERN_THALTSTACK 63 /* for compat with older x86 and does nothing */ +#define KERN_SPECULATIVE_READS 64 /* int: whether speculative reads are disabled */ +#define KERN_OSVERSION 65 /* for build number i.e. 9A127 */ +#define KERN_SAFEBOOT 66 /* are we booted safe? */ +/* 67 was KERN_LCTX (login context) */ +#define KERN_RAGEVNODE 68 +#define KERN_TTY 69 /* node: tty settings */ #define KERN_CHECKOPENEVT 70 /* spi: check the VOPENEVT flag on vnodes at open time */ -#define KERN_THREADNAME 71 /* set/get thread name */ -#define KERN_MAXID 72 /* number of valid kern ids */ +#define KERN_THREADNAME 71 /* set/get thread name */ +#define KERN_MAXID 72 /* number of valid kern ids */ /* * Don't add any more sysctls like this. Instead, use the SYSCTL_*() macros * and OID_AUTO. This will have the added benefit of not having to recompile @@ -567,32 +567,32 @@ SYSCTL_DECL(_hw_features); */ #if COUNT_SYSCALLS && defined(KERNEL) -#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000) /* keep called count for each bsd syscall */ +#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000) /* keep called count for each bsd syscall */ #endif #if defined(__LP64__) -#define KERN_USRSTACK KERN_USRSTACK64 +#define KERN_USRSTACK KERN_USRSTACK64 #else -#define KERN_USRSTACK KERN_USRSTACK32 +#define KERN_USRSTACK KERN_USRSTACK32 #endif /* KERN_RAGEVNODE types */ -#define KERN_RAGE_PROC 1 -#define KERN_RAGE_THREAD 2 -#define KERN_UNRAGE_PROC 3 -#define KERN_UNRAGE_THREAD 4 +#define KERN_RAGE_PROC 1 +#define KERN_RAGE_THREAD 2 +#define KERN_UNRAGE_PROC 3 +#define KERN_UNRAGE_THREAD 4 /* KERN_OPENEVT types */ #define KERN_OPENEVT_PROC 1 #define KERN_UNOPENEVT_PROC 2 /* KERN_TFP types */ -#define KERN_TFP_POLICY 1 +#define KERN_TFP_POLICY 1 /* KERN_TFP_POLICY values . All policies allow task port for self */ -#define KERN_TFP_POLICY_DENY 0 /* Deny Mode: None allowed except privileged */ -#define KERN_TFP_POLICY_DEFAULT 2 /* Default Mode: related ones allowed and upcall authentication */ +#define KERN_TFP_POLICY_DENY 0 /* Deny Mode: None allowed except privileged */ +#define KERN_TFP_POLICY_DEFAULT 2 /* Default Mode: related ones allowed and upcall authentication */ /* KERN_KDEBUG types */ #define KERN_KDEFLAGS 1 @@ -663,9 +663,9 @@ SYSCTL_DECL(_hw_features); { "logsigexit", CTLTYPE_INT }, \ { "symfile",CTLTYPE_STRING },\ { "procargs",CTLTYPE_STRUCT },\ - { "dummy", CTLTYPE_INT }, /* deprecated pcsamples */ \ + { "dummy", CTLTYPE_INT }, /* deprecated pcsamples */ \ { "netboot", CTLTYPE_INT }, \ - { "dummy", CTLTYPE_INT }, /* deprecated: panicinfo */ \ + { "dummy", CTLTYPE_INT }, /* deprecated: panicinfo */ \ { "sysv", CTLTYPE_NODE }, \ { "dummy", CTLTYPE_INT }, \ { "dummy", CTLTYPE_INT }, \ @@ -691,9 +691,9 @@ SYSCTL_DECL(_hw_features); { "speculative_reads_disabled", CTLTYPE_INT }, \ { "osversion", CTLTYPE_STRING }, \ { "safeboot", CTLTYPE_INT }, \ - { "dummy", CTLTYPE_INT }, /* deprecated: lctx */ \ + { "dummy", CTLTYPE_INT }, /* deprecated: lctx */ \ { "rage_vnode", CTLTYPE_INT }, \ - { "tty", CTLTYPE_NODE }, \ + { "tty", CTLTYPE_NODE }, \ { "check_openevt", CTLTYPE_INT }, \ { "thread_name", CTLTYPE_STRING } \ } @@ -708,14 +708,14 @@ SYSCTL_DECL(_hw_features); /* * KERN_PROC subtypes */ -#define KERN_PROC_ALL 0 /* everything */ -#define KERN_PROC_PID 1 /* by process id */ -#define KERN_PROC_PGRP 2 /* by process group id */ -#define KERN_PROC_SESSION 3 /* by session of pid */ -#define KERN_PROC_TTY 4 /* by controlling tty */ -#define KERN_PROC_UID 5 /* by effective uid */ -#define KERN_PROC_RUID 6 /* by real uid */ -#define KERN_PROC_LCID 7 /* by login context id */ +#define KERN_PROC_ALL 0 /* everything */ +#define KERN_PROC_PID 1 /* by process id */ +#define KERN_PROC_PGRP 2 /* by process group id */ +#define KERN_PROC_SESSION 3 /* by session of pid */ +#define KERN_PROC_TTY 4 /* by controlling tty */ +#define KERN_PROC_UID 5 /* by effective uid */ +#define KERN_PROC_RUID 6 /* by real uid */ +#define KERN_PROC_LCID 7 /* by login context id */ #if defined(XNU_KERNEL_PRIVATE) || !defined(KERNEL) /* @@ -723,48 +723,48 @@ SYSCTL_DECL(_hw_features); */ struct _pcred { - char pc_lock[72]; /* opaque content */ - struct ucred *pc_ucred; /* Current credentials. */ - uid_t p_ruid; /* Real user id. */ - uid_t p_svuid; /* Saved effective user id. */ - gid_t p_rgid; /* Real group id. */ - gid_t p_svgid; /* Saved effective group id. */ - int p_refcnt; /* Number of references. */ + char pc_lock[72]; /* opaque content */ + struct ucred *pc_ucred; /* Current credentials. */ + uid_t p_ruid; /* Real user id. */ + uid_t p_svuid; /* Saved effective user id. */ + gid_t p_rgid; /* Real group id. */ + gid_t p_svgid; /* Saved effective group id. */ + int p_refcnt; /* Number of references. */ }; struct _ucred { - int32_t cr_ref; /* reference count */ - uid_t cr_uid; /* effective user id */ - short cr_ngroups; /* number of groups */ - gid_t cr_groups[NGROUPS]; /* groups */ + int32_t cr_ref; /* reference count */ + uid_t cr_uid; /* effective user id */ + short cr_ngroups; /* number of groups */ + gid_t cr_groups[NGROUPS]; /* groups */ }; struct kinfo_proc { - struct extern_proc kp_proc; /* proc structure */ - struct eproc { - struct proc *e_paddr; /* address of proc */ - struct session *e_sess; /* session pointer */ - struct _pcred e_pcred; /* process credentials */ - struct _ucred e_ucred; /* current credentials */ - struct vmspace e_vm; /* address space */ - pid_t e_ppid; /* parent process id */ - pid_t e_pgid; /* process group id */ - short e_jobc; /* job control counter */ - dev_t e_tdev; /* controlling tty dev */ - pid_t e_tpgid; /* tty process group id */ - struct session *e_tsess; /* tty session pointer */ -#define WMESGLEN 7 - char e_wmesg[WMESGLEN+1]; /* wchan message */ - segsz_t e_xsize; /* text size */ - short e_xrssize; /* text rss */ - short e_xccount; /* text references */ - short e_xswrss; - int32_t e_flag; -#define EPROC_CTTY 0x01 /* controlling tty vnode active */ -#define EPROC_SLEADER 0x02 /* session leader */ -#define COMAPT_MAXLOGNAME 12 - char e_login[COMAPT_MAXLOGNAME]; /* short setlogin() name */ - int32_t e_spare[4]; + struct extern_proc kp_proc; /* proc structure */ + struct eproc { + struct proc *e_paddr; /* address of proc */ + struct session *e_sess; /* session pointer */ + struct _pcred e_pcred; /* process credentials */ + struct _ucred e_ucred; /* current credentials */ + struct vmspace e_vm; /* address space */ + pid_t e_ppid; /* parent process id */ + pid_t e_pgid; /* process group id */ + short e_jobc; /* job control counter */ + dev_t e_tdev; /* controlling tty dev */ + pid_t e_tpgid; /* tty process group id */ + struct session *e_tsess; /* tty session pointer */ +#define WMESGLEN 7 + char e_wmesg[WMESGLEN + 1]; /* wchan message */ + segsz_t e_xsize; /* text size */ + short e_xrssize; /* text rss */ + short e_xccount; /* text references */ + short e_xswrss; + int32_t e_flag; +#define EPROC_CTTY 0x01 /* controlling tty vnode active */ +#define EPROC_SLEADER 0x02 /* session leader */ +#define COMAPT_MAXLOGNAME 12 + char e_login[COMAPT_MAXLOGNAME]; /* short setlogin() name */ + int32_t e_spare[4]; } kp_eproc; }; @@ -779,22 +779,22 @@ struct kinfo_proc { */ struct user32_pcred { - char pc_lock[72]; /* opaque content */ - user32_addr_t pc_ucred; /* Current credentials. */ - uid_t p_ruid; /* Real user id. */ - uid_t p_svuid; /* Saved effective user id. */ - gid_t p_rgid; /* Real group id. */ - gid_t p_svgid; /* Saved effective group id. */ - int p_refcnt; /* Number of references. */ + char pc_lock[72]; /* opaque content */ + user32_addr_t pc_ucred; /* Current credentials. */ + uid_t p_ruid; /* Real user id. */ + uid_t p_svuid; /* Saved effective user id. */ + gid_t p_rgid; /* Real group id. */ + gid_t p_svgid; /* Saved effective group id. */ + int p_refcnt; /* Number of references. */ }; struct user64_pcred { - char pc_lock[72]; /* opaque content */ - user64_addr_t pc_ucred; /* Current credentials. */ - uid_t p_ruid; /* Real user id. */ - uid_t p_svuid; /* Saved effective user id. */ - gid_t p_rgid; /* Real group id. */ - gid_t p_svgid; /* Saved effective group id. */ - int p_refcnt; /* Number of references. */ + char pc_lock[72]; /* opaque content */ + user64_addr_t pc_ucred; /* Current credentials. */ + uid_t p_ruid; /* Real user id. */ + uid_t p_svuid; /* Saved effective user id. */ + gid_t p_rgid; /* Real group id. */ + gid_t p_svgid; /* Saved effective group id. */ + int p_refcnt; /* Number of references. */ }; /* LP64 version of kinfo_proc. all pointers @@ -802,84 +802,84 @@ struct user64_pcred { * WARNING - keep in sync with kinfo_proc */ struct user32_kinfo_proc { - struct user32_extern_proc kp_proc; /* proc structure */ - struct user32_eproc { - user32_addr_t e_paddr; /* address of proc */ - user32_addr_t e_sess; /* session pointer */ - struct user32_pcred e_pcred; /* process credentials */ - struct _ucred e_ucred; /* current credentials */ - struct user32_vmspace e_vm; /* address space */ - pid_t e_ppid; /* parent process id */ - pid_t e_pgid; /* process group id */ - short e_jobc; /* job control counter */ - dev_t e_tdev; /* controlling tty dev */ - pid_t e_tpgid; /* tty process group id */ - user32_addr_t e_tsess; /* tty session pointer */ - char e_wmesg[WMESGLEN+1]; /* wchan message */ - segsz_t e_xsize; /* text size */ - short e_xrssize; /* text rss */ - short e_xccount; /* text references */ - short e_xswrss; - int32_t e_flag; - char e_login[COMAPT_MAXLOGNAME]; /* short setlogin() name */ - int32_t e_spare[4]; + struct user32_extern_proc kp_proc; /* proc structure */ + struct user32_eproc { + user32_addr_t e_paddr; /* address of proc */ + user32_addr_t e_sess; /* session pointer */ + struct user32_pcred e_pcred; /* process credentials */ + struct _ucred e_ucred; /* current credentials */ + struct user32_vmspace e_vm; /* address space */ + pid_t e_ppid; /* parent process id */ + pid_t e_pgid; /* process group id */ + short e_jobc; /* job control counter */ + dev_t e_tdev; /* controlling tty dev */ + pid_t e_tpgid; /* tty process group id */ + user32_addr_t e_tsess; /* tty session pointer */ + char e_wmesg[WMESGLEN + 1]; /* wchan message */ + segsz_t e_xsize; /* text size */ + short e_xrssize; /* text rss */ + short e_xccount; /* text references */ + short e_xswrss; + int32_t e_flag; + char e_login[COMAPT_MAXLOGNAME]; /* short setlogin() name */ + int32_t e_spare[4]; } kp_eproc; }; struct user64_kinfo_proc { - struct user64_extern_proc kp_proc; /* proc structure */ - struct user64_eproc { - user_addr_t e_paddr; /* address of proc */ - user_addr_t e_sess; /* session pointer */ - struct user64_pcred e_pcred; /* process credentials */ - struct _ucred e_ucred; /* current credentials */ - struct user_vmspace e_vm; /* address space */ - pid_t e_ppid; /* parent process id */ - pid_t e_pgid; /* process group id */ - short e_jobc; /* job control counter */ - dev_t e_tdev; /* controlling tty dev */ - pid_t e_tpgid; /* tty process group id */ - user64_addr_t e_tsess __attribute((aligned(8))); /* tty session pointer */ - char e_wmesg[WMESGLEN+1]; /* wchan message */ - segsz_t e_xsize; /* text size */ - short e_xrssize; /* text rss */ - short e_xccount; /* text references */ - short e_xswrss; - int32_t e_flag; - char e_login[COMAPT_MAXLOGNAME]; /* short setlogin() name */ - int32_t e_spare[4]; + struct user64_extern_proc kp_proc; /* proc structure */ + struct user64_eproc { + user_addr_t e_paddr; /* address of proc */ + user_addr_t e_sess; /* session pointer */ + struct user64_pcred e_pcred; /* process credentials */ + struct _ucred e_ucred; /* current credentials */ + struct user_vmspace e_vm; /* address space */ + pid_t e_ppid; /* parent process id */ + pid_t e_pgid; /* process group id */ + short e_jobc; /* job control counter */ + dev_t e_tdev; /* controlling tty dev */ + pid_t e_tpgid; /* tty process group id */ + user64_addr_t e_tsess __attribute((aligned(8))); /* tty session pointer */ + char e_wmesg[WMESGLEN + 1]; /* wchan message */ + segsz_t e_xsize; /* text size */ + short e_xrssize; /* text rss */ + short e_xccount; /* text references */ + short e_xswrss; + int32_t e_flag; + char e_login[COMAPT_MAXLOGNAME]; /* short setlogin() name */ + int32_t e_spare[4]; } kp_eproc; }; -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ /* * KERN_IPC identifiers */ -#define KIPC_MAXSOCKBUF 1 /* int: max size of a socket buffer */ -#define KIPC_SOCKBUF_WASTE 2 /* int: wastage factor in sockbuf */ -#define KIPC_SOMAXCONN 3 /* int: max length of connection q */ -#define KIPC_MAX_LINKHDR 4 /* int: max length of link header */ -#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header */ -#define KIPC_MAX_HDR 6 /* int: max total length of headers */ -#define KIPC_MAX_DATALEN 7 /* int: max length of data? */ -#define KIPC_MBSTAT 8 /* struct: mbuf usage statistics */ -#define KIPC_NMBCLUSTERS 9 /* int: maximum mbuf clusters */ -#define KIPC_SOQLIMITCOMPAT 10 /* int: socket queue limit */ +#define KIPC_MAXSOCKBUF 1 /* int: max size of a socket buffer */ +#define KIPC_SOCKBUF_WASTE 2 /* int: wastage factor in sockbuf */ +#define KIPC_SOMAXCONN 3 /* int: max length of connection q */ +#define KIPC_MAX_LINKHDR 4 /* int: max length of link header */ +#define KIPC_MAX_PROTOHDR 5 /* int: max length of network header */ +#define KIPC_MAX_HDR 6 /* int: max total length of headers */ +#define KIPC_MAX_DATALEN 7 /* int: max length of data? */ +#define KIPC_MBSTAT 8 /* struct: mbuf usage statistics */ +#define KIPC_NMBCLUSTERS 9 /* int: maximum mbuf clusters */ +#define KIPC_SOQLIMITCOMPAT 10 /* int: socket queue limit */ /* * CTL_VM identifiers */ -#define VM_METER 1 /* struct vmmeter */ -#define VM_LOADAVG 2 /* struct loadavg */ +#define VM_METER 1 /* struct vmmeter */ +#define VM_LOADAVG 2 /* struct loadavg */ /* * Note: "3" was skipped sometime ago and should probably remain unused * to avoid any new entry from being accepted by older kernels... */ -#define VM_MACHFACTOR 4 /* struct loadavg with mach factor*/ -#define VM_SWAPUSAGE 5 /* total swap usage */ -#define VM_MAXID 6 /* number of valid vm ids */ +#define VM_MACHFACTOR 4 /* struct loadavg with mach factor*/ +#define VM_SWAPUSAGE 5 /* total swap usage */ +#define VM_MAXID 6 /* number of valid vm ids */ -#define CTL_VM_NAMES { \ +#define CTL_VM_NAMES { \ { 0, 0 }, \ { "vmmeter", CTLTYPE_STRUCT }, \ { "loadavg", CTLTYPE_STRUCT }, \ @@ -889,68 +889,68 @@ struct user64_kinfo_proc { } struct xsw_usage { - u_int64_t xsu_total; - u_int64_t xsu_avail; - u_int64_t xsu_used; - u_int32_t xsu_pagesize; - boolean_t xsu_encrypted; + u_int64_t xsu_total; + u_int64_t xsu_avail; + u_int64_t xsu_used; + u_int32_t xsu_pagesize; + boolean_t xsu_encrypted; }; #ifdef __APPLE_API_PRIVATE /* Load average structure. Use of fixpt_t assume in scope. */ /* XXX perhaps we should protect fixpt_t, and define it here (or discard it) */ struct loadavg { - fixpt_t ldavg[3]; - long fscale; + fixpt_t ldavg[3]; + long fscale; }; extern struct loadavg averunnable; -#define LSCALE 1000 /* scaling for "fixed point" arithmetic */ +#define LSCALE 1000 /* scaling for "fixed point" arithmetic */ #ifdef BSD_KERNEL_PRIVATE struct user32_loadavg { - fixpt_t ldavg[3]; - user32_long_t fscale; + fixpt_t ldavg[3]; + user32_long_t fscale; }; struct user64_loadavg { - fixpt_t ldavg[3]; - user64_long_t fscale; + fixpt_t ldavg[3]; + user64_long_t fscale; }; -#endif /* BSD_KERNEL_PRIVATE */ +#endif /* BSD_KERNEL_PRIVATE */ #endif /* __APPLE_API_PRIVATE */ /* * CTL_HW identifiers */ -#define HW_MACHINE 1 /* string: machine class */ -#define HW_MODEL 2 /* string: specific machine model */ -#define HW_NCPU 3 /* int: number of cpus */ -#define HW_BYTEORDER 4 /* int: machine byte order */ -#define HW_PHYSMEM 5 /* int: total memory */ -#define HW_USERMEM 6 /* int: non-kernel memory */ -#define HW_PAGESIZE 7 /* int: software page size */ -#define HW_DISKNAMES 8 /* strings: disk drive names */ -#define HW_DISKSTATS 9 /* struct: diskstats[] */ -#define HW_EPOCH 10 /* int: 0 for Legacy, else NewWorld */ -#define HW_FLOATINGPT 11 /* int: has HW floating point? */ -#define HW_MACHINE_ARCH 12 /* string: machine architecture */ -#define HW_VECTORUNIT 13 /* int: has HW vector unit? */ -#define HW_BUS_FREQ 14 /* int: Bus Frequency */ -#define HW_CPU_FREQ 15 /* int: CPU Frequency */ -#define HW_CACHELINE 16 /* int: Cache Line Size in Bytes */ -#define HW_L1ICACHESIZE 17 /* int: L1 I Cache Size in Bytes */ -#define HW_L1DCACHESIZE 18 /* int: L1 D Cache Size in Bytes */ -#define HW_L2SETTINGS 19 /* int: L2 Cache Settings */ -#define HW_L2CACHESIZE 20 /* int: L2 Cache Size in Bytes */ -#define HW_L3SETTINGS 21 /* int: L3 Cache Settings */ -#define HW_L3CACHESIZE 22 /* int: L3 Cache Size in Bytes */ -#define HW_TB_FREQ 23 /* int: Bus Frequency */ -#define HW_MEMSIZE 24 /* uint64_t: physical ram size */ -#define HW_AVAILCPU 25 /* int: number of available CPUs */ -#define HW_MAXID 26 /* number of valid hw ids */ +#define HW_MACHINE 1 /* string: machine class */ +#define HW_MODEL 2 /* string: specific machine model */ +#define HW_NCPU 3 /* int: number of cpus */ +#define HW_BYTEORDER 4 /* int: machine byte order */ +#define HW_PHYSMEM 5 /* int: total memory */ +#define HW_USERMEM 6 /* int: non-kernel memory */ +#define HW_PAGESIZE 7 /* int: software page size */ +#define HW_DISKNAMES 8 /* strings: disk drive names */ +#define HW_DISKSTATS 9 /* struct: diskstats[] */ +#define HW_EPOCH 10 /* int: 0 for Legacy, else NewWorld */ +#define HW_FLOATINGPT 11 /* int: has HW floating point? */ +#define HW_MACHINE_ARCH 12 /* string: machine architecture */ +#define HW_VECTORUNIT 13 /* int: has HW vector unit? */ +#define HW_BUS_FREQ 14 /* int: Bus Frequency */ +#define HW_CPU_FREQ 15 /* int: CPU Frequency */ +#define HW_CACHELINE 16 /* int: Cache Line Size in Bytes */ +#define HW_L1ICACHESIZE 17 /* int: L1 I Cache Size in Bytes */ +#define HW_L1DCACHESIZE 18 /* int: L1 D Cache Size in Bytes */ +#define HW_L2SETTINGS 19 /* int: L2 Cache Settings */ +#define HW_L2CACHESIZE 20 /* int: L2 Cache Size in Bytes */ +#define HW_L3SETTINGS 21 /* int: L3 Cache Settings */ +#define HW_L3CACHESIZE 22 /* int: L3 Cache Size in Bytes */ +#define HW_TB_FREQ 23 /* int: Bus Frequency */ +#define HW_MEMSIZE 24 /* uint64_t: physical ram size */ +#define HW_AVAILCPU 25 /* int: number of available CPUs */ +#define HW_MAXID 26 /* number of valid hw ids */ #define CTL_HW_NAMES { \ { 0, 0 }, \ @@ -1068,29 +1068,29 @@ struct user64_loadavg { /* * CTL_USER definitions */ -#define USER_CS_PATH 1 /* string: _CS_PATH */ -#define USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */ -#define USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */ -#define USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */ -#define USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */ -#define USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */ -#define USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */ -#define USER_LINE_MAX 8 /* int: LINE_MAX */ -#define USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */ -#define USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */ -#define USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */ -#define USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */ -#define USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */ -#define USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */ -#define USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */ -#define USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */ -#define USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */ -#define USER_POSIX2_UPE 18 /* int: POSIX2_UPE */ -#define USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */ -#define USER_TZNAME_MAX 20 /* int: POSIX2_TZNAME_MAX */ -#define USER_MAXID 21 /* number of valid user ids */ - -#define CTL_USER_NAMES { \ +#define USER_CS_PATH 1 /* string: _CS_PATH */ +#define USER_BC_BASE_MAX 2 /* int: BC_BASE_MAX */ +#define USER_BC_DIM_MAX 3 /* int: BC_DIM_MAX */ +#define USER_BC_SCALE_MAX 4 /* int: BC_SCALE_MAX */ +#define USER_BC_STRING_MAX 5 /* int: BC_STRING_MAX */ +#define USER_COLL_WEIGHTS_MAX 6 /* int: COLL_WEIGHTS_MAX */ +#define USER_EXPR_NEST_MAX 7 /* int: EXPR_NEST_MAX */ +#define USER_LINE_MAX 8 /* int: LINE_MAX */ +#define USER_RE_DUP_MAX 9 /* int: RE_DUP_MAX */ +#define USER_POSIX2_VERSION 10 /* int: POSIX2_VERSION */ +#define USER_POSIX2_C_BIND 11 /* int: POSIX2_C_BIND */ +#define USER_POSIX2_C_DEV 12 /* int: POSIX2_C_DEV */ +#define USER_POSIX2_CHAR_TERM 13 /* int: POSIX2_CHAR_TERM */ +#define USER_POSIX2_FORT_DEV 14 /* int: POSIX2_FORT_DEV */ +#define USER_POSIX2_FORT_RUN 15 /* int: POSIX2_FORT_RUN */ +#define USER_POSIX2_LOCALEDEF 16 /* int: POSIX2_LOCALEDEF */ +#define USER_POSIX2_SW_DEV 17 /* int: POSIX2_SW_DEV */ +#define USER_POSIX2_UPE 18 /* int: POSIX2_UPE */ +#define USER_STREAM_MAX 19 /* int: POSIX2_STREAM_MAX */ +#define USER_TZNAME_MAX 20 /* int: POSIX2_TZNAME_MAX */ +#define USER_MAXID 21 /* number of valid user ids */ + +#define CTL_USER_NAMES { \ { 0, 0 }, \ { "cs_path", CTLTYPE_STRING }, \ { "bc_base_max", CTLTYPE_INT }, \ @@ -1122,9 +1122,9 @@ struct user64_loadavg { * Second level identifier specifies which debug variable. * Third level identifier specifies which stucture component. */ -#define CTL_DEBUG_NAME 0 /* string: variable name */ -#define CTL_DEBUG_VALUE 1 /* int: variable value */ -#define CTL_DEBUG_MAXID 20 +#define CTL_DEBUG_NAME 0 /* string: variable name */ +#define CTL_DEBUG_VALUE 1 /* int: variable value */ +#define CTL_DEBUG_MAXID 20 #if (CTL_MAXID != 9) || (KERN_MAXID != 72) || (VM_MAXID != 6) || (HW_MAXID != 26) || (USER_MAXID != 21) || (CTL_DEBUG_MAXID != 20) @@ -1132,20 +1132,32 @@ struct user64_loadavg { #endif -#ifdef KERNEL +#ifdef KERNEL #ifdef BSD_KERNEL_PRIVATE -extern char machine[]; -extern char osrelease[]; -extern char ostype[]; -extern char osversion[]; -extern char osbuild_config[]; +extern char machine[]; +extern char osrelease[]; +extern char ostype[]; +extern char osversion[]; +extern char osproductversion[]; +extern char osbuild_config[]; +#if defined(XNU_TARGET_OS_BRIDGE) +/* + * 15 characters at maximum so both the productversion + * and the build version can fit in the panic header + * osversion field with the formatting requirements. + */ +#define MACOS_VERS_LEN 15 + +extern char macosproductversion[]; +extern char macosversion[]; +#endif struct linker_set; -void sysctl_register_set(const char *set); -void sysctl_unregister_set(const char *set); -void sysctl_mib_init(void); +void sysctl_register_set(const char *set); +void sysctl_unregister_set(const char *set); +void sysctl_mib_init(void); int sysctl_int(user_addr_t, size_t *, user_addr_t, size_t, int *); int sysctl_quad(user_addr_t, size_t *, user_addr_t, size_t, quad_t *); @@ -1153,18 +1165,18 @@ int sysctl_quad(user_addr_t, size_t *, user_addr_t, size_t, quad_t *); void sysctl_early_init(void); #endif /* BSD_KERNEL_PRIVATE */ -#else /* !KERNEL */ +#else /* !KERNEL */ __BEGIN_DECLS -int sysctl(int *, u_int, void *, size_t *, void *, size_t); -int sysctlbyname(const char *, void *, size_t *, void *, size_t); -int sysctlnametomib(const char *, int *, size_t *); +int sysctl(int *, u_int, void *, size_t *, void *, size_t); +int sysctlbyname(const char *, void *, size_t *, void *, size_t); +int sysctlnametomib(const char *, int *, size_t *); __END_DECLS -#endif /* KERNEL */ +#endif /* KERNEL */ #endif /* SYSCTL_DEF_ENABLED */ -#endif /* !_SYS_SYSCTL_H_ */ +#endif /* !_SYS_SYSCTL_H_ */ diff --git a/bsd/sys/sysent.h b/bsd/sys/sysent.h index 6d529d6f5..a309bb90b 100644 --- a/bsd/sys/sysent.h +++ b/bsd/sys/sysent.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SYS_SYSENT_H_ -#define _SYS_SYSENT_H_ +#define _SYS_SYSENT_H_ #include #include @@ -35,49 +35,49 @@ #ifdef KERNEL_PRIVATE #ifdef __APPLE_API_PRIVATE -typedef int32_t sy_call_t(struct proc *, void *, int *); +typedef int32_t sy_call_t(struct proc *, void *, int *); #if CONFIG_REQUIRES_U32_MUNGING -typedef void sy_munge_t(void *); +typedef void sy_munge_t(void *); #elif __arm__ && (__BIGGEST_ALIGNMENT__ > 4) -typedef int sy_munge_t(const void *, void *); +typedef int sy_munge_t(const void *, void *); #endif -struct sysent { /* system call table */ - sy_call_t *sy_call; /* implementing function */ +struct sysent { /* system call table */ + sy_call_t *sy_call; /* implementing function */ #if CONFIG_REQUIRES_U32_MUNGING || (__arm__ && (__BIGGEST_ALIGNMENT__ > 4)) - sy_munge_t *sy_arg_munge32; /* system call arguments munger for 32-bit process */ + sy_munge_t *sy_arg_munge32; /* system call arguments munger for 32-bit process */ #endif - int32_t sy_return_type; /* system call return types */ - int16_t sy_narg; /* number of args */ - uint16_t sy_arg_bytes; /* Total size of arguments in bytes for - * 32-bit system calls - */ + int32_t sy_return_type; /* system call return types */ + int16_t sy_narg; /* number of args */ + uint16_t sy_arg_bytes; /* Total size of arguments in bytes for + * 32-bit system calls + */ }; #ifndef __INIT_SYSENT_C__ extern struct sysent sysent[]; -#endif /* __INIT_SYSENT_C__ */ +#endif /* __INIT_SYSENT_C__ */ extern unsigned int nsysent; -/* +/* * Valid values for sy_cancel */ -#define _SYSCALL_CANCEL_NONE 0 /* Not a cancellation point */ -#define _SYSCALL_CANCEL_PRE 1 /* Canbe cancelled on entry itself */ -#define _SYSCALL_CANCEL_POST 2 /* Can only be cancelled after syscall is run */ +#define _SYSCALL_CANCEL_NONE 0 /* Not a cancellation point */ +#define _SYSCALL_CANCEL_PRE 1 /* Canbe cancelled on entry itself */ +#define _SYSCALL_CANCEL_POST 2 /* Can only be cancelled after syscall is run */ /* * Valid values for sy_return_type */ -#define _SYSCALL_RET_NONE 0 -#define _SYSCALL_RET_INT_T 1 -#define _SYSCALL_RET_UINT_T 2 -#define _SYSCALL_RET_OFF_T 3 -#define _SYSCALL_RET_ADDR_T 4 -#define _SYSCALL_RET_SIZE_T 5 -#define _SYSCALL_RET_SSIZE_T 6 -#define _SYSCALL_RET_UINT64_T 7 +#define _SYSCALL_RET_NONE 0 +#define _SYSCALL_RET_INT_T 1 +#define _SYSCALL_RET_UINT_T 2 +#define _SYSCALL_RET_OFF_T 3 +#define _SYSCALL_RET_ADDR_T 4 +#define _SYSCALL_RET_SIZE_T 5 +#define _SYSCALL_RET_SSIZE_T 6 +#define _SYSCALL_RET_UINT64_T 7 #endif /* __APPLE_API_PRIVATE */ #endif /* KERNEL_PRIVATE */ diff --git a/bsd/sys/syslimits.h b/bsd/sys/syslimits.h index 56528fa2a..28424700f 100644 --- a/bsd/sys/syslimits.h +++ b/bsd/sys/syslimits.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* $NetBSD: syslimits.h,v 1.15 1997/06/25 00:48:09 lukem Exp $ */ @@ -72,45 +72,45 @@ * Note: CHILD_MAX *must* be less than hard_maxproc, which is set at * compile time; you *cannot* set it higher than the hard limit!! */ -#define ARG_MAX (256 * 1024) /* max bytes for an exec function */ +#define ARG_MAX (256 * 1024) /* max bytes for an exec function */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define CHILD_MAX 266 /* max simultaneous processes */ -#define GID_MAX 2147483647U /* max value for a gid_t (2^31-2) */ +#define CHILD_MAX 266 /* max simultaneous processes */ +#define GID_MAX 2147483647U /* max value for a gid_t (2^31-2) */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define LINK_MAX 32767 /* max file link count */ -#define MAX_CANON 1024 /* max bytes in term canon input line */ -#define MAX_INPUT 1024 /* max bytes in terminal input */ -#define NAME_MAX 255 /* max bytes in a file name */ -#define NGROUPS_MAX 16 /* max supplemental group id's */ +#define LINK_MAX 32767 /* max file link count */ +#define MAX_CANON 1024 /* max bytes in term canon input line */ +#define MAX_INPUT 1024 /* max bytes in terminal input */ +#define NAME_MAX 255 /* max bytes in a file name */ +#define NGROUPS_MAX 16 /* max supplemental group id's */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define UID_MAX 2147483647U /* max value for a uid_t (2^31-2) */ +#define UID_MAX 2147483647U /* max value for a uid_t (2^31-2) */ -#define OPEN_MAX 10240 /* max open files per process - todo, make a config option? */ +#define OPEN_MAX 10240 /* max open files per process - todo, make a config option? */ #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define PATH_MAX 1024 /* max bytes in pathname */ -#define PIPE_BUF 512 /* max bytes for atomic pipe writes */ +#define PATH_MAX 1024 /* max bytes in pathname */ +#define PIPE_BUF 512 /* max bytes for atomic pipe writes */ -#define BC_BASE_MAX 99 /* max ibase/obase values in bc(1) */ -#define BC_DIM_MAX 2048 /* max array elements in bc(1) */ -#define BC_SCALE_MAX 99 /* max scale value in bc(1) */ -#define BC_STRING_MAX 1000 /* max const string length in bc(1) */ -#define CHARCLASS_NAME_MAX 14 /* max character class name size */ -#define COLL_WEIGHTS_MAX 2 /* max weights for order keyword */ -#define EQUIV_CLASS_MAX 2 -#define EXPR_NEST_MAX 32 /* max expressions nested in expr(1) */ -#define LINE_MAX 2048 /* max bytes in an input line */ -#define RE_DUP_MAX 255 /* max RE's in interval notation */ +#define BC_BASE_MAX 99 /* max ibase/obase values in bc(1) */ +#define BC_DIM_MAX 2048 /* max array elements in bc(1) */ +#define BC_SCALE_MAX 99 /* max scale value in bc(1) */ +#define BC_STRING_MAX 1000 /* max const string length in bc(1) */ +#define CHARCLASS_NAME_MAX 14 /* max character class name size */ +#define COLL_WEIGHTS_MAX 2 /* max weights for order keyword */ +#define EQUIV_CLASS_MAX 2 +#define EXPR_NEST_MAX 32 /* max expressions nested in expr(1) */ +#define LINE_MAX 2048 /* max bytes in an input line */ +#define RE_DUP_MAX 255 /* max RE's in interval notation */ #if __DARWIN_UNIX03 -#define NZERO 20 /* default priority [XSI] */ - /* = ((PRIO_MAX - PRIO_MIN) / 2) + 1 */ - /* range: 0 - 39 [(2 * NZERO) - 1] */ - /* 0 is not actually used */ +#define NZERO 20 /* default priority [XSI] */ + /* = ((PRIO_MAX - PRIO_MIN) / 2) + 1 */ + /* range: 0 - 39 [(2 * NZERO) - 1] */ + /* 0 is not actually used */ #else /* !__DARWIN_UNIX03 */ -#define NZERO 0 /* default priority */ - /* range: -20 - 20 */ - /* (PRIO_MIN - PRIO_MAX) */ +#define NZERO 0 /* default priority */ + /* range: -20 - 20 */ + /* (PRIO_MIN - PRIO_MAX) */ #endif /* __DARWIN_UNIX03 */ #endif /* !_ANSI_SOURCE */ diff --git a/bsd/sys/syslog.h b/bsd/sys/syslog.h index 48f91cd1b..facf1c036 100644 --- a/bsd/sys/syslog.h +++ b/bsd/sys/syslog.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -64,7 +64,7 @@ #include #include -#define _PATH_LOG "/var/run/syslog" +#define _PATH_LOG "/var/run/syslog" /* * priorities/facilities are encoded into a single 32-bit quantity, where the @@ -75,131 +75,131 @@ * * priorities (these are ordered) */ -#define LOG_EMERG 0 /* system is unusable */ -#define LOG_ALERT 1 /* action must be taken immediately */ -#define LOG_CRIT 2 /* critical conditions */ -#define LOG_ERR 3 /* error conditions */ -#define LOG_WARNING 4 /* warning conditions */ -#define LOG_NOTICE 5 /* normal but significant condition */ -#define LOG_INFO 6 /* informational */ -#define LOG_DEBUG 7 /* debug-level messages */ +#define LOG_EMERG 0 /* system is unusable */ +#define LOG_ALERT 1 /* action must be taken immediately */ +#define LOG_CRIT 2 /* critical conditions */ +#define LOG_ERR 3 /* error conditions */ +#define LOG_WARNING 4 /* warning conditions */ +#define LOG_NOTICE 5 /* normal but significant condition */ +#define LOG_INFO 6 /* informational */ +#define LOG_DEBUG 7 /* debug-level messages */ -#define LOG_PRIMASK 0x07 /* mask to extract priority part (internal) */ +#define LOG_PRIMASK 0x07 /* mask to extract priority part (internal) */ /* extract priority */ -#define LOG_PRI(p) ((p) & LOG_PRIMASK) -#define LOG_MAKEPRI(fac, pri) ((fac) | (pri)) +#define LOG_PRI(p) ((p) & LOG_PRIMASK) +#define LOG_MAKEPRI(fac, pri) ((fac) | (pri)) #ifdef SYSLOG_NAMES -#define INTERNAL_NOPRI 0x10 /* the "no priority" priority */ +#define INTERNAL_NOPRI 0x10 /* the "no priority" priority */ /* mark "facility" */ -#define INTERNAL_MARK LOG_MAKEPRI((LOG_NFACILITIES<<3), 0) +#define INTERNAL_MARK LOG_MAKEPRI((LOG_NFACILITIES<<3), 0) typedef struct _code { - const char *c_name; - int c_val; + const char *c_name; + int c_val; } CODE; CODE prioritynames[] = { - { "alert", LOG_ALERT, }, - { "crit", LOG_CRIT, }, - { "debug", LOG_DEBUG, }, - { "emerg", LOG_EMERG, }, - { "err", LOG_ERR, }, - { "error", LOG_ERR, }, /* DEPRECATED */ - { "info", LOG_INFO, }, - { "none", INTERNAL_NOPRI, }, /* INTERNAL */ - { "notice", LOG_NOTICE, }, - { "panic", LOG_EMERG, }, /* DEPRECATED */ - { "warn", LOG_WARNING, }, /* DEPRECATED */ - { "warning", LOG_WARNING, }, - { NULL, -1, } + { "alert", LOG_ALERT, }, + { "crit", LOG_CRIT, }, + { "debug", LOG_DEBUG, }, + { "emerg", LOG_EMERG, }, + { "err", LOG_ERR, }, + { "error", LOG_ERR, }, /* DEPRECATED */ + { "info", LOG_INFO, }, + { "none", INTERNAL_NOPRI, }, /* INTERNAL */ + { "notice", LOG_NOTICE, }, + { "panic", LOG_EMERG, }, /* DEPRECATED */ + { "warn", LOG_WARNING, }, /* DEPRECATED */ + { "warning", LOG_WARNING, }, + { NULL, -1, } }; #endif /* facility codes */ -#define LOG_KERN (0<<3) /* kernel messages */ -#define LOG_USER (1<<3) /* random user-level messages */ -#define LOG_MAIL (2<<3) /* mail system */ -#define LOG_DAEMON (3<<3) /* system daemons */ -#define LOG_AUTH (4<<3) /* authorization messages */ -#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */ -#define LOG_LPR (6<<3) /* line printer subsystem */ -#define LOG_NEWS (7<<3) /* network news subsystem */ -#define LOG_UUCP (8<<3) /* UUCP subsystem */ -#define LOG_CRON (9<<3) /* clock daemon */ -#define LOG_AUTHPRIV (10<<3) /* authorization messages (private) */ +#define LOG_KERN (0<<3) /* kernel messages */ +#define LOG_USER (1<<3) /* random user-level messages */ +#define LOG_MAIL (2<<3) /* mail system */ +#define LOG_DAEMON (3<<3) /* system daemons */ +#define LOG_AUTH (4<<3) /* authorization messages */ +#define LOG_SYSLOG (5<<3) /* messages generated internally by syslogd */ +#define LOG_LPR (6<<3) /* line printer subsystem */ +#define LOG_NEWS (7<<3) /* network news subsystem */ +#define LOG_UUCP (8<<3) /* UUCP subsystem */ +#define LOG_CRON (9<<3) /* clock daemon */ +#define LOG_AUTHPRIV (10<<3) /* authorization messages (private) */ /* Facility #10 clashes in DEC UNIX, where */ /* it's defined as LOG_MEGASAFE for AdvFS */ /* event logging. */ -#define LOG_FTP (11<<3) /* ftp daemon */ +#define LOG_FTP (11<<3) /* ftp daemon */ //#define LOG_NTP (12<<3) /* NTP subsystem */ //#define LOG_SECURITY (13<<3) /* security subsystems (firewalling, etc.) */ //#define LOG_CONSOLE (14<<3) /* /dev/console output */ -#define LOG_NETINFO (12<<3) /* NetInfo */ -#define LOG_REMOTEAUTH (13<<3) /* remote authentication/authorization */ -#define LOG_INSTALL (14<<3) /* installer subsystem */ -#define LOG_RAS (15<<3) /* Remote Access Service (VPN / PPP) */ +#define LOG_NETINFO (12<<3) /* NetInfo */ +#define LOG_REMOTEAUTH (13<<3) /* remote authentication/authorization */ +#define LOG_INSTALL (14<<3) /* installer subsystem */ +#define LOG_RAS (15<<3) /* Remote Access Service (VPN / PPP) */ /* other codes through 15 reserved for system use */ -#define LOG_LOCAL0 (16<<3) /* reserved for local use */ -#define LOG_LOCAL1 (17<<3) /* reserved for local use */ -#define LOG_LOCAL2 (18<<3) /* reserved for local use */ -#define LOG_LOCAL3 (19<<3) /* reserved for local use */ -#define LOG_LOCAL4 (20<<3) /* reserved for local use */ -#define LOG_LOCAL5 (21<<3) /* reserved for local use */ -#define LOG_LOCAL6 (22<<3) /* reserved for local use */ -#define LOG_LOCAL7 (23<<3) /* reserved for local use */ +#define LOG_LOCAL0 (16<<3) /* reserved for local use */ +#define LOG_LOCAL1 (17<<3) /* reserved for local use */ +#define LOG_LOCAL2 (18<<3) /* reserved for local use */ +#define LOG_LOCAL3 (19<<3) /* reserved for local use */ +#define LOG_LOCAL4 (20<<3) /* reserved for local use */ +#define LOG_LOCAL5 (21<<3) /* reserved for local use */ +#define LOG_LOCAL6 (22<<3) /* reserved for local use */ +#define LOG_LOCAL7 (23<<3) /* reserved for local use */ -#define LOG_LAUNCHD (24<<3) /* launchd - general bootstrap daemon */ +#define LOG_LAUNCHD (24<<3) /* launchd - general bootstrap daemon */ -#define LOG_NFACILITIES 25 /* current number of facilities */ -#define LOG_FACMASK 0x03f8 /* mask to extract facility part */ +#define LOG_NFACILITIES 25 /* current number of facilities */ +#define LOG_FACMASK 0x03f8 /* mask to extract facility part */ /* facility of pri */ -#define LOG_FAC(p) (((p) & LOG_FACMASK) >> 3) +#define LOG_FAC(p) (((p) & LOG_FACMASK) >> 3) #ifdef SYSLOG_NAMES CODE facilitynames[] = { - { "auth", LOG_AUTH, }, - { "authpriv", LOG_AUTHPRIV, }, - { "cron", LOG_CRON, }, - { "daemon", LOG_DAEMON, }, - { "ftp", LOG_FTP, }, - { "install", LOG_INSTALL }, - { "kern", LOG_KERN, }, - { "lpr", LOG_LPR, }, - { "mail", LOG_MAIL, }, - { "mark", INTERNAL_MARK, }, /* INTERNAL */ - { "netinfo", LOG_NETINFO, }, - { "ras", LOG_RAS }, - { "remoteauth", LOG_REMOTEAUTH }, - { "news", LOG_NEWS, }, - { "security", LOG_AUTH }, /* DEPRECATED */ - { "syslog", LOG_SYSLOG, }, - { "user", LOG_USER, }, - { "uucp", LOG_UUCP, }, - { "local0", LOG_LOCAL0, }, - { "local1", LOG_LOCAL1, }, - { "local2", LOG_LOCAL2, }, - { "local3", LOG_LOCAL3, }, - { "local4", LOG_LOCAL4, }, - { "local5", LOG_LOCAL5, }, - { "local6", LOG_LOCAL6, }, - { "local7", LOG_LOCAL7, }, - { "launchd", LOG_LAUNCHD }, - { NULL, -1, } + { "auth", LOG_AUTH, }, + { "authpriv", LOG_AUTHPRIV, }, + { "cron", LOG_CRON, }, + { "daemon", LOG_DAEMON, }, + { "ftp", LOG_FTP, }, + { "install", LOG_INSTALL }, + { "kern", LOG_KERN, }, + { "lpr", LOG_LPR, }, + { "mail", LOG_MAIL, }, + { "mark", INTERNAL_MARK, }, /* INTERNAL */ + { "netinfo", LOG_NETINFO, }, + { "ras", LOG_RAS }, + { "remoteauth", LOG_REMOTEAUTH }, + { "news", LOG_NEWS, }, + { "security", LOG_AUTH }, /* DEPRECATED */ + { "syslog", LOG_SYSLOG, }, + { "user", LOG_USER, }, + { "uucp", LOG_UUCP, }, + { "local0", LOG_LOCAL0, }, + { "local1", LOG_LOCAL1, }, + { "local2", LOG_LOCAL2, }, + { "local3", LOG_LOCAL3, }, + { "local4", LOG_LOCAL4, }, + { "local5", LOG_LOCAL5, }, + { "local6", LOG_LOCAL6, }, + { "local7", LOG_LOCAL7, }, + { "launchd", LOG_LAUNCHD }, + { NULL, -1, } }; #endif #ifdef KERNEL #ifdef __APPLE_API_PRIVATE -#define LOG_PRINTF -1 /* pseudo-priority to indicate use of printf */ +#define LOG_PRINTF -1 /* pseudo-priority to indicate use of printf */ #endif /* __APPLE_API_PRIVATE */ #endif /* * arguments to setlogmask. */ -#define LOG_MASK(pri) (1 << (pri)) /* mask for one priority */ -#define LOG_UPTO(pri) ((1 << ((pri)+1)) - 1) /* all priorities through pri */ +#define LOG_MASK(pri) (1 << (pri)) /* mask for one priority */ +#define LOG_UPTO(pri) ((1 << ((pri)+1)) - 1) /* all priorities through pri */ /* * Option flags for openlog. @@ -207,12 +207,12 @@ CODE facilitynames[] = { * LOG_ODELAY no longer does anything. * LOG_NDELAY is the inverse of what it used to be. */ -#define LOG_PID 0x01 /* log the pid with each message */ -#define LOG_CONS 0x02 /* log on the console if errors in sending */ -#define LOG_ODELAY 0x04 /* delay open until first syslog() (default) */ -#define LOG_NDELAY 0x08 /* don't delay open */ -#define LOG_NOWAIT 0x10 /* don't wait for console forks: DEPRECATED */ -#define LOG_PERROR 0x20 /* log to stderr as well */ +#define LOG_PID 0x01 /* log the pid with each message */ +#define LOG_CONS 0x02 /* log on the console if errors in sending */ +#define LOG_ODELAY 0x04 /* delay open until first syslog() (default) */ +#define LOG_NDELAY 0x08 /* don't delay open */ +#define LOG_NOWAIT 0x10 /* don't wait for console forks: DEPRECATED */ +#define LOG_PERROR 0x20 /* log to stderr as well */ #ifndef KERNEL @@ -226,16 +226,16 @@ CODE facilitynames[] = { #include __BEGIN_DECLS -void closelog(void); -void openlog(const char *, int, int); -int setlogmask(int); +void closelog(void); +void openlog(const char *, int, int); +int setlogmask(int); #if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __DARWIN_C_LEVEL >= __DARWIN_C_FULL -void syslog(int, const char *, ...) __DARWIN_ALIAS_STARTING(__MAC_10_13, __IPHONE_NA, __DARWIN_EXTSN(syslog)) __printflike(2, 3) __not_tail_called; +void syslog(int, const char *, ...) __DARWIN_ALIAS_STARTING(__MAC_10_13, __IPHONE_NA, __DARWIN_EXTSN(syslog)) __printflike(2, 3) __not_tail_called; #else -void syslog(int, const char *, ...) __printflike(2, 3) __not_tail_called; +void syslog(int, const char *, ...) __printflike(2, 3) __not_tail_called; #endif #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL -void vsyslog(int, const char *, __darwin_va_list) __printflike(2, 0) __not_tail_called; +void vsyslog(int, const char *, __darwin_va_list) __printflike(2, 0) __not_tail_called; #endif __END_DECLS @@ -290,7 +290,7 @@ __END_DECLS * rv_name field. When %n or %N is used rd_values are searched and the * symbolic value is printed if a match is found, if no match is found * "???" is printed. - * + * * printf("%C", val); * int val; * @@ -313,21 +313,21 @@ struct reg_values { * rd_mask and rd_shift must be defined, other entries may be null */ struct reg_desc { - unsigned rd_mask; /* mask to extract field */ - int rd_shift; /* shift for extracted value, - >>, + << */ - char *rd_name; /* field name */ - char *rd_format; /* format to print field */ - struct reg_values *rd_values; /* symbolic names of values */ + unsigned rd_mask; /* mask to extract field */ + int rd_shift; /* shift for extracted value, - >>, + << */ + char *rd_name; /* field name */ + char *rd_format; /* format to print field */ + struct reg_values *rd_values; /* symbolic names of values */ }; #endif /* __APPLE_API_OBSOLETE */ #include __BEGIN_DECLS -void log(int, const char *, ...); +void log(int, const char *, ...); #ifdef XNU_KERNEL_PRIVATE -int vaddlog(const char *, va_list) __printflike(1,0); -void logtime(time_t); +int vaddlog(const char *, va_list) __printflike(1, 0); +void logtime(time_t); #endif /* XNU_KERNEL_PRIVATE */ __END_DECLS diff --git a/bsd/sys/systm.h b/bsd/sys/systm.h index bec0bc45e..a06576c40 100644 --- a/bsd/sys/systm.h +++ b/bsd/sys/systm.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -65,7 +65,7 @@ * * @(#)systm.h 8.7 (Berkeley) 3/29/95 */ - + /* * The `securelevel' variable controls the security level of the system. * It can only be decreased by process 1 (/sbin/init). @@ -78,7 +78,7 @@ * raw disks of mounted filesystems, /dev/mem, and /dev/kmem are * read-only. * 2 highly secure mode - same as (1) plus raw disks are always - * read-only whether mounted or not. This level precludes tampering + * read-only whether mounted or not. This level precludes tampering * with filesystems by unmounting them, but also inhibits running * newfs while the system is secured. * @@ -95,7 +95,7 @@ */ #ifndef _SYS_SYSTM_H_ -#define _SYS_SYSTM_H_ +#define _SYS_SYSTM_H_ #include #include @@ -117,15 +117,15 @@ __BEGIN_DECLS __END_DECLS #ifdef BSD_KERNEL_PRIVATE -extern char version[]; /* system version */ -extern const char copyright[]; /* system copyright */ +extern char version[]; /* system version */ +extern const char copyright[]; /* system copyright */ -extern int boothowto; /* reboot flags, from console subsystem */ -extern int show_space; -extern int minimalboot; +extern int boothowto; /* reboot flags, from console subsystem */ +extern int show_space; +extern int minimalboot; #if CONFIG_EMBEDDED -extern int darkboot; +extern int darkboot; #endif extern const int nblkdev; /* number of entries in bdevsw */ @@ -134,52 +134,52 @@ extern const int nchrdev; /* number of entries in cdevsw */ #ifdef KERNEL_PRIVATE -extern int securelevel; /* system security level */ -extern dev_t rootdev; /* root device */ -extern struct vnode *rootvp; /* vnode equivalent to above */ +extern int securelevel; /* system security level */ +extern dev_t rootdev; /* root device */ +extern struct vnode *rootvp; /* vnode equivalent to above */ #endif /* KERNEL_PRIVATE */ -#define SYSINIT(a,b,c,d,e) -#define MALLOC_DEFINE(a,b,c) +#define SYSINIT(a, b, c, d, e) +#define MALLOC_DEFINE(a, b, c) -#define getenv_int(a,b) (*b = 0) -#define KASSERT(exp,msg) +#define getenv_int(a, b) (*b = 0) +#define KASSERT(exp, msg) /* * General function declarations. */ __BEGIN_DECLS #ifdef BSD_KERNEL_PRIVATE -int einval(void); -void nullsys(void); -int errsys(void); -int seltrue(dev_t dev, int which, struct proc *p); -void ttyprintf(struct tty *, const char *, ...) __printflike(2, 3); -void realitexpire(struct proc *); -int hzto(struct timeval *tv); -void tablefull(const char *); -int kvprintf(char const *, void (*)(int, void*), void *, int, - __darwin_va_list) __printflike(1,0); -void uprintf(const char *, ...) __printflike(1,2); -int copywithin(void *saddr, void *daddr, size_t len); -int64_t fulong(user_addr_t addr); -int sulong(user_addr_t addr, int64_t longword); +int einval(void); +void nullsys(void); +int errsys(void); +int seltrue(dev_t dev, int which, struct proc *p); +void ttyprintf(struct tty *, const char *, ...) __printflike(2, 3); +void realitexpire(struct proc *); +int hzto(struct timeval *tv); +void tablefull(const char *); +int kvprintf(char const *, void (*)(int, void*), void *, int, + __darwin_va_list) __printflike(1, 0); +void uprintf(const char *, ...) __printflike(1, 2); +int copywithin(void *saddr, void *daddr, size_t len); +int64_t fulong(user_addr_t addr); +int sulong(user_addr_t addr, int64_t longword); uint64_t fuulong(user_addr_t addr); -int suulong(user_addr_t addr, uint64_t ulongword); -int clone_system_shared_regions(int shared_regions_active, - int chain_regions, - int base_vnode); +int suulong(user_addr_t addr, uint64_t ulongword); +int clone_system_shared_regions(int shared_regions_active, + int chain_regions, + int base_vnode); extern kern_return_t bsd_exception(int, mach_exception_data_t codes, int); -extern void bsdinit_task(void); +extern void bsdinit_task(void); extern void unix_syscall_return(int) __dead2; -void initclocks(void); -void startprofclock(struct proc *); -void stopprofclock(struct proc *); -void setstatclockrate(int hzrate); +void initclocks(void); +void startprofclock(struct proc *); +void stopprofclock(struct proc *); +void setstatclockrate(int hzrate); struct time_value; -void get_procrustime(struct time_value *tv); -void load_init_program(struct proc *p); +void get_procrustime(struct time_value *tv); +void load_init_program(struct proc *p); void __pthread_testcancel(int presyscall); void throttle_info_get_last_io_time(mount_t mp, struct timeval *tv); void update_last_io_time(mount_t mp); @@ -187,68 +187,68 @@ void throttle_info_end_io(buf_t bp); #endif /* BSD_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE -void timeout(void (*)(void *), void *arg, int ticks); -void timeout_with_leeway(void (*)(void *), void *arg, int ticks, int leeway_ticks); -void untimeout(void (*)(void *), void *arg); -int bsd_hostname(char *, int, int*); -int vslock(user_addr_t addr, user_size_t len); -int vsunlock(user_addr_t addr, user_size_t len, int dirtied); +void timeout(void (*)(void *), void *arg, int ticks); +void timeout_with_leeway(void (*)(void *), void *arg, int ticks, int leeway_ticks); +void untimeout(void (*)(void *), void *arg); +int bsd_hostname(char *, int, int*); +int vslock(user_addr_t addr, user_size_t len); +int vsunlock(user_addr_t addr, user_size_t len, int dirtied); #endif /* KERNEL_PRIVATE */ -int nullop(void); -int nulldev(void); -int enoioctl(void); -int enosys(void); -int enxio(void); -int eopnotsupp(void); -void *hashinit(int count, int type, u_long *hashmask); -void ovbcopy(const void *from, void *to, size_t len); -int fubyte(user_addr_t addr); -int fuibyte(user_addr_t addr); -int subyte(user_addr_t addr, int byte); -int suibyte(user_addr_t addr, int byte); +int nullop(void); +int nulldev(void); +int enoioctl(void); +int enosys(void); +int enxio(void); +int eopnotsupp(void); +void *hashinit(int count, int type, u_long *hashmask); +void ovbcopy(const void *from, void *to, size_t len); +int fubyte(user_addr_t addr); +int fuibyte(user_addr_t addr); +int subyte(user_addr_t addr, int byte); +int suibyte(user_addr_t addr, int byte); long fuword(user_addr_t addr); long fuiword(user_addr_t addr); int suword(user_addr_t addr, long word); int suiword(user_addr_t addr, long word); -#define fusize(_a) ((user_size_t)fulong(_a)) -#define susize(_a, _s) sulong((_a), (_s)) -#define fuptr(a) ((user_addr_t)fulong(_a) -#define suptr(_a, _p) sulong((_a), (_p)) -int useracc(user_addr_t addr, user_size_t len,int prot); +#define fusize(_a) ((user_size_t)fulong(_a)) +#define susize(_a, _s) sulong((_a), (_s)) +#define fuptr(a) ((user_addr_t)fulong(_a) +#define suptr(_a, _p) sulong((_a), (_p)) +int useracc(user_addr_t addr, user_size_t len, int prot); typedef void (*timeout_fcn_t)(void *); -void bsd_timeout(void (*)(void *), void *arg, struct timespec * ts); -void bsd_untimeout(void (*)(void *), void *arg); -void set_fsblocksize(struct vnode *); +void bsd_timeout(void (*)(void *), void *arg, struct timespec * ts); +void bsd_untimeout(void (*)(void *), void *arg); +void set_fsblocksize(struct vnode *); uint64_t tvtoabstime(struct timeval *); uint64_t tstoabstime(struct timespec *); -void *throttle_info_create(void); -void throttle_info_mount_ref(mount_t mp, void * throttle_info); -void throttle_info_mount_rel(mount_t mp); -void throttle_info_release(void *throttle_info); -void throttle_info_update(void *throttle_info, int flags); +void *throttle_info_create(void); +void throttle_info_mount_ref(mount_t mp, void * throttle_info); +void throttle_info_mount_rel(mount_t mp); +void throttle_info_release(void *throttle_info); +void throttle_info_update(void *throttle_info, int flags); uint32_t throttle_lowpri_io(int sleep_amount); -void throttle_set_thread_io_policy(int policy); -int throttle_get_thread_effective_io_policy(void); +void throttle_set_thread_io_policy(int policy); +int throttle_get_thread_effective_io_policy(void); typedef struct __throttle_info_handle *throttle_info_handle_t; -int throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle); -void throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle); -void throttle_info_update_by_mask(void *throttle_info_handle, int flags); -void throttle_info_disable_throttle(int devno, boolean_t isfusion); +int throttle_info_ref_by_mask(uint64_t throttle_mask, throttle_info_handle_t *throttle_info_handle); +void throttle_info_rel_by_mask(throttle_info_handle_t throttle_info_handle); +void throttle_info_update_by_mask(void *throttle_info_handle, int flags); +void throttle_info_disable_throttle(int devno, boolean_t isfusion); /* * 'throttle_info_handle' acquired via 'throttle_info_ref_by_mask' * 'policy' should be specified as either IOPOL_UTILITY or IPOL_THROTTLE, * all other values will be treated as IOPOL_NORMAL (i.e. no throttling) */ -int throttle_info_io_will_be_throttled(void *throttle_info_handle, int policy); +int throttle_info_io_will_be_throttled(void *throttle_info_handle, int policy); #ifdef KERNEL_PRIVATE /* returned by throttle_io_will_be_throttled */ -#define THROTTLE_DISENGAGED 0 -#define THROTTLE_ENGAGED 1 -#define THROTTLE_NOW 2 +#define THROTTLE_DISENGAGED 0 +#define THROTTLE_ENGAGED 1 +#define THROTTLE_NOW 2 int throttle_io_will_be_throttled(int lowpri_window_msecs, mount_t mp); int throttle_lowpri_window(void) __attribute__((pure)); diff --git a/bsd/sys/termios.h b/bsd/sys/termios.h index ac0cacb1e..9de092c60 100644 --- a/bsd/sys/termios.h +++ b/bsd/sys/termios.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -73,74 +73,74 @@ * * Name Subscript Enabled by */ -#define VEOF 0 /* ICANON */ -#define VEOL 1 /* ICANON */ +#define VEOF 0 /* ICANON */ +#define VEOL 1 /* ICANON */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define VEOL2 2 /* ICANON together with IEXTEN */ +#define VEOL2 2 /* ICANON together with IEXTEN */ #endif -#define VERASE 3 /* ICANON */ +#define VERASE 3 /* ICANON */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define VWERASE 4 /* ICANON together with IEXTEN */ +#define VWERASE 4 /* ICANON together with IEXTEN */ #endif -#define VKILL 5 /* ICANON */ +#define VKILL 5 /* ICANON */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define VREPRINT 6 /* ICANON together with IEXTEN */ +#define VREPRINT 6 /* ICANON together with IEXTEN */ #endif /* 7 spare 1 */ -#define VINTR 8 /* ISIG */ -#define VQUIT 9 /* ISIG */ -#define VSUSP 10 /* ISIG */ +#define VINTR 8 /* ISIG */ +#define VQUIT 9 /* ISIG */ +#define VSUSP 10 /* ISIG */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define VDSUSP 11 /* ISIG together with IEXTEN */ +#define VDSUSP 11 /* ISIG together with IEXTEN */ #endif -#define VSTART 12 /* IXON, IXOFF */ -#define VSTOP 13 /* IXON, IXOFF */ +#define VSTART 12 /* IXON, IXOFF */ +#define VSTOP 13 /* IXON, IXOFF */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define VLNEXT 14 /* IEXTEN */ -#define VDISCARD 15 /* IEXTEN */ +#define VLNEXT 14 /* IEXTEN */ +#define VDISCARD 15 /* IEXTEN */ #endif -#define VMIN 16 /* !ICANON */ -#define VTIME 17 /* !ICANON */ +#define VMIN 16 /* !ICANON */ +#define VTIME 17 /* !ICANON */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define VSTATUS 18 /* ICANON together with IEXTEN */ +#define VSTATUS 18 /* ICANON together with IEXTEN */ /* 19 spare 2 */ #endif -#define NCCS 20 +#define NCCS 20 #include #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define CCEQ(val, c) ((c) == (val) ? (val) != _POSIX_VDISABLE : 0) +#define CCEQ(val, c) ((c) == (val) ? (val) != _POSIX_VDISABLE : 0) #endif /* * Input flags - software input processing */ -#define IGNBRK 0x00000001 /* ignore BREAK condition */ -#define BRKINT 0x00000002 /* map BREAK to SIGINTR */ -#define IGNPAR 0x00000004 /* ignore (discard) parity errors */ -#define PARMRK 0x00000008 /* mark parity and framing errors */ -#define INPCK 0x00000010 /* enable checking of parity errors */ -#define ISTRIP 0x00000020 /* strip 8th bit off chars */ -#define INLCR 0x00000040 /* map NL into CR */ -#define IGNCR 0x00000080 /* ignore CR */ -#define ICRNL 0x00000100 /* map CR to NL (ala CRMOD) */ -#define IXON 0x00000200 /* enable output flow control */ -#define IXOFF 0x00000400 /* enable input flow control */ -#define IXANY 0x00000800 /* any char will restart after stop */ +#define IGNBRK 0x00000001 /* ignore BREAK condition */ +#define BRKINT 0x00000002 /* map BREAK to SIGINTR */ +#define IGNPAR 0x00000004 /* ignore (discard) parity errors */ +#define PARMRK 0x00000008 /* mark parity and framing errors */ +#define INPCK 0x00000010 /* enable checking of parity errors */ +#define ISTRIP 0x00000020 /* strip 8th bit off chars */ +#define INLCR 0x00000040 /* map NL into CR */ +#define IGNCR 0x00000080 /* ignore CR */ +#define ICRNL 0x00000100 /* map CR to NL (ala CRMOD) */ +#define IXON 0x00000200 /* enable output flow control */ +#define IXOFF 0x00000400 /* enable input flow control */ +#define IXANY 0x00000800 /* any char will restart after stop */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define IMAXBEL 0x00002000 /* ring bell on input queue full */ -#define IUTF8 0x00004000 /* maintain state for UTF-8 VERASE */ +#define IMAXBEL 0x00002000 /* ring bell on input queue full */ +#define IUTF8 0x00004000 /* maintain state for UTF-8 VERASE */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* * Output flags - software output processing */ -#define OPOST 0x00000001 /* enable following output processing */ -#define ONLCR 0x00000002 /* map NL to CR-NL (ala CRMOD) */ +#define OPOST 0x00000001 /* enable following output processing */ +#define ONLCR 0x00000002 /* map NL to CR-NL (ala CRMOD) */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define OXTABS 0x00000004 /* expand tabs to spaces */ -#define ONOEOT 0x00000008 /* discard EOT's (^D) on output) */ +#define OXTABS 0x00000004 /* expand tabs to spaces */ +#define ONOEOT 0x00000008 /* discard EOT's (^D) on output) */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* * The following block of features is unimplemented. Use of these flags in @@ -148,17 +148,17 @@ * * - Begin unimplemented features */ -#define OCRNL 0x00000010 /* map CR to NL on output */ -#define ONOCR 0x00000020 /* no CR output at column 0 */ -#define ONLRET 0x00000040 /* NL performs CR function */ -#define OFILL 0x00000080 /* use fill characters for delay */ -#define NLDLY 0x00000300 /* \n delay */ -#define TABDLY 0x00000c04 /* horizontal tab delay */ -#define CRDLY 0x00003000 /* \r delay */ -#define FFDLY 0x00004000 /* form feed delay */ -#define BSDLY 0x00008000 /* \b delay */ -#define VTDLY 0x00010000 /* vertical tab delay */ -#define OFDEL 0x00020000 /* fill is DEL, else NUL */ +#define OCRNL 0x00000010 /* map CR to NL on output */ +#define ONOCR 0x00000020 /* no CR output at column 0 */ +#define ONLRET 0x00000040 /* NL performs CR function */ +#define OFILL 0x00000080 /* use fill characters for delay */ +#define NLDLY 0x00000300 /* \n delay */ +#define TABDLY 0x00000c04 /* horizontal tab delay */ +#define CRDLY 0x00003000 /* \r delay */ +#define FFDLY 0x00004000 /* form feed delay */ +#define BSDLY 0x00008000 /* \b delay */ +#define VTDLY 0x00010000 /* vertical tab delay */ +#define OFDEL 0x00020000 /* fill is DEL, else NUL */ #if !defined(_SYS_IOCTL_COMPAT_H_) || __DARWIN_UNIX03 /* * These manifest constants have the same names as those in the header @@ -170,28 +170,28 @@ * attempt to maintain these as the same values so as to avoid this being * an outright error in most compilers. */ -#define NL0 0x00000000 -#define NL1 0x00000100 +#define NL0 0x00000000 +#define NL1 0x00000100 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define NL2 0x00000200 -#define NL3 0x00000300 +#define NL2 0x00000200 +#define NL3 0x00000300 #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define TAB0 0x00000000 -#define TAB1 0x00000400 -#define TAB2 0x00000800 +#define TAB0 0x00000000 +#define TAB1 0x00000400 +#define TAB2 0x00000800 /* not in sys/ioctl_compat.h, use OXTABS value */ -#define TAB3 0x00000004 -#define CR0 0x00000000 -#define CR1 0x00001000 -#define CR2 0x00002000 -#define CR3 0x00003000 -#define FF0 0x00000000 -#define FF1 0x00004000 -#define BS0 0x00000000 -#define BS1 0x00008000 -#define VT0 0x00000000 -#define VT1 0x00010000 -#endif /* !_SYS_IOCTL_COMPAT_H_ */ +#define TAB3 0x00000004 +#define CR0 0x00000000 +#define CR1 0x00001000 +#define CR2 0x00002000 +#define CR3 0x00003000 +#define FF0 0x00000000 +#define FF1 0x00004000 +#define BS0 0x00000000 +#define BS1 0x00008000 +#define VT0 0x00000000 +#define VT1 0x00010000 +#endif /* !_SYS_IOCTL_COMPAT_H_ */ /* * + End unimplemented features */ @@ -200,27 +200,27 @@ * Control flags - hardware control of terminal */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define CIGNORE 0x00000001 /* ignore control flags */ +#define CIGNORE 0x00000001 /* ignore control flags */ #endif -#define CSIZE 0x00000300 /* character size mask */ -#define CS5 0x00000000 /* 5 bits (pseudo) */ -#define CS6 0x00000100 /* 6 bits */ -#define CS7 0x00000200 /* 7 bits */ -#define CS8 0x00000300 /* 8 bits */ -#define CSTOPB 0x00000400 /* send 2 stop bits */ -#define CREAD 0x00000800 /* enable receiver */ -#define PARENB 0x00001000 /* parity enable */ -#define PARODD 0x00002000 /* odd parity, else even */ -#define HUPCL 0x00004000 /* hang up on last close */ -#define CLOCAL 0x00008000 /* ignore modem status lines */ +#define CSIZE 0x00000300 /* character size mask */ +#define CS5 0x00000000 /* 5 bits (pseudo) */ +#define CS6 0x00000100 /* 6 bits */ +#define CS7 0x00000200 /* 7 bits */ +#define CS8 0x00000300 /* 8 bits */ +#define CSTOPB 0x00000400 /* send 2 stop bits */ +#define CREAD 0x00000800 /* enable receiver */ +#define PARENB 0x00001000 /* parity enable */ +#define PARODD 0x00002000 /* odd parity, else even */ +#define HUPCL 0x00004000 /* hang up on last close */ +#define CLOCAL 0x00008000 /* ignore modem status lines */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define CCTS_OFLOW 0x00010000 /* CTS flow control of output */ -#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW) -#define CRTS_IFLOW 0x00020000 /* RTS flow control of input */ -#define CDTR_IFLOW 0x00040000 /* DTR flow control of input */ -#define CDSR_OFLOW 0x00080000 /* DSR flow control of output */ -#define CCAR_OFLOW 0x00100000 /* DCD flow control of output */ -#define MDMBUF 0x00100000 /* old name for CCAR_OFLOW */ +#define CCTS_OFLOW 0x00010000 /* CTS flow control of output */ +#define CRTSCTS (CCTS_OFLOW | CRTS_IFLOW) +#define CRTS_IFLOW 0x00020000 /* RTS flow control of input */ +#define CDTR_IFLOW 0x00040000 /* DTR flow control of input */ +#define CDSR_OFLOW 0x00080000 /* DSR flow control of output */ +#define CCAR_OFLOW 0x00100000 /* DCD flow control of output */ +#define MDMBUF 0x00100000 /* old name for CCAR_OFLOW */ #endif @@ -233,50 +233,50 @@ */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define ECHOKE 0x00000001 /* visual erase for line kill */ +#define ECHOKE 0x00000001 /* visual erase for line kill */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define ECHOE 0x00000002 /* visually erase chars */ -#define ECHOK 0x00000004 /* echo NL after line kill */ -#define ECHO 0x00000008 /* enable echoing */ -#define ECHONL 0x00000010 /* echo NL even if ECHO is off */ +#define ECHOE 0x00000002 /* visually erase chars */ +#define ECHOK 0x00000004 /* echo NL after line kill */ +#define ECHO 0x00000008 /* enable echoing */ +#define ECHONL 0x00000010 /* echo NL even if ECHO is off */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define ECHOPRT 0x00000020 /* visual erase mode for hardcopy */ -#define ECHOCTL 0x00000040 /* echo control chars as ^(Char) */ +#define ECHOPRT 0x00000020 /* visual erase mode for hardcopy */ +#define ECHOCTL 0x00000040 /* echo control chars as ^(Char) */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define ISIG 0x00000080 /* enable signals INTR, QUIT, [D]SUSP */ -#define ICANON 0x00000100 /* canonicalize input lines */ +#define ISIG 0x00000080 /* enable signals INTR, QUIT, [D]SUSP */ +#define ICANON 0x00000100 /* canonicalize input lines */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define ALTWERASE 0x00000200 /* use alternate WERASE algorithm */ +#define ALTWERASE 0x00000200 /* use alternate WERASE algorithm */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define IEXTEN 0x00000400 /* enable DISCARD and LNEXT */ +#define IEXTEN 0x00000400 /* enable DISCARD and LNEXT */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #define EXTPROC 0x00000800 /* external processing */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define TOSTOP 0x00400000 /* stop background jobs from output */ +#define TOSTOP 0x00400000 /* stop background jobs from output */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define FLUSHO 0x00800000 /* output being flushed (state) */ -#define NOKERNINFO 0x02000000 /* no kernel output from VSTATUS */ -#define PENDIN 0x20000000 /* XXX retype pending input (state) */ +#define FLUSHO 0x00800000 /* output being flushed (state) */ +#define NOKERNINFO 0x02000000 /* no kernel output from VSTATUS */ +#define PENDIN 0x20000000 /* XXX retype pending input (state) */ #endif /*(_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ -#define NOFLSH 0x80000000 /* don't flush after interrupt */ +#define NOFLSH 0x80000000 /* don't flush after interrupt */ -typedef unsigned long tcflag_t; -typedef unsigned char cc_t; -typedef unsigned long speed_t; +typedef unsigned long tcflag_t; +typedef unsigned char cc_t; +typedef unsigned long speed_t; struct termios { - tcflag_t c_iflag; /* input flags */ - tcflag_t c_oflag; /* output flags */ - tcflag_t c_cflag; /* control flags */ - tcflag_t c_lflag; /* local flags */ - cc_t c_cc[NCCS]; /* control chars */ - speed_t c_ispeed; /* input speed */ - speed_t c_ospeed; /* output speed */ + tcflag_t c_iflag; /* input flags */ + tcflag_t c_oflag; /* output flags */ + tcflag_t c_cflag; /* control flags */ + tcflag_t c_lflag; /* local flags */ + cc_t c_cc[NCCS]; /* control chars */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ }; #ifdef KERNEL -typedef __uint64_t user_tcflag_t; -typedef __uint64_t user_speed_t; +typedef __uint64_t user_tcflag_t; +typedef __uint64_t user_speed_t; /* * LP64 version of struct termios. tcflag_t and speed_t are long and must @@ -285,96 +285,96 @@ typedef __uint64_t user_speed_t; */ struct user_termios { - user_tcflag_t c_iflag; /* input flags */ - user_tcflag_t c_oflag; /* output flags */ - user_tcflag_t c_cflag; /* control flags */ - user_tcflag_t c_lflag; /* local flags */ - cc_t c_cc[NCCS]; /* control chars */ - user_speed_t c_ispeed __attribute((aligned(8))); /* input speed */ - user_speed_t c_ospeed; /* output speed */ + user_tcflag_t c_iflag; /* input flags */ + user_tcflag_t c_oflag; /* output flags */ + user_tcflag_t c_cflag; /* control flags */ + user_tcflag_t c_lflag; /* local flags */ + cc_t c_cc[NCCS]; /* control chars */ + user_speed_t c_ispeed __attribute((aligned(8))); /* input speed */ + user_speed_t c_ospeed; /* output speed */ }; /* 32 bit version */ struct termios32 { - __uint32_t c_iflag; /* input flags */ - __uint32_t c_oflag; /* output flags */ - __uint32_t c_cflag; /* control flags */ - __uint32_t c_lflag; /* local flags */ - cc_t c_cc[NCCS]; /* control chars */ - __uint32_t c_ispeed; /* input speed */ - __uint32_t c_ospeed; /* output speed */ + __uint32_t c_iflag; /* input flags */ + __uint32_t c_oflag; /* output flags */ + __uint32_t c_cflag; /* control flags */ + __uint32_t c_lflag; /* local flags */ + cc_t c_cc[NCCS]; /* control chars */ + __uint32_t c_ispeed; /* input speed */ + __uint32_t c_ospeed; /* output speed */ }; -#endif /* KERNEL */ +#endif /* KERNEL */ /* * Commands passed to tcsetattr() for setting the termios structure. */ -#define TCSANOW 0 /* make change immediate */ -#define TCSADRAIN 1 /* drain output, then change */ -#define TCSAFLUSH 2 /* drain output, flush input */ +#define TCSANOW 0 /* make change immediate */ +#define TCSADRAIN 1 /* drain output, then change */ +#define TCSAFLUSH 2 /* drain output, flush input */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define TCSASOFT 0x10 /* flag - don't alter h.w. state */ +#define TCSASOFT 0x10 /* flag - don't alter h.w. state */ #endif /* * Standard speeds */ -#define B0 0 -#define B50 50 -#define B75 75 -#define B110 110 -#define B134 134 -#define B150 150 -#define B200 200 -#define B300 300 -#define B600 600 -#define B1200 1200 -#define B1800 1800 -#define B2400 2400 -#define B4800 4800 -#define B9600 9600 -#define B19200 19200 -#define B38400 38400 +#define B0 0 +#define B50 50 +#define B75 75 +#define B110 110 +#define B134 134 +#define B150 150 +#define B200 200 +#define B300 300 +#define B600 600 +#define B1200 1200 +#define B1800 1800 +#define B2400 2400 +#define B4800 4800 +#define B9600 9600 +#define B19200 19200 +#define B38400 38400 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define B7200 7200 -#define B14400 14400 -#define B28800 28800 -#define B57600 57600 -#define B76800 76800 -#define B115200 115200 -#define B230400 230400 -#define EXTA 19200 -#define EXTB 38400 +#define B7200 7200 +#define B14400 14400 +#define B28800 28800 +#define B57600 57600 +#define B76800 76800 +#define B115200 115200 +#define B230400 230400 +#define EXTA 19200 +#define EXTB 38400 #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifndef KERNEL -#define TCIFLUSH 1 -#define TCOFLUSH 2 -#define TCIOFLUSH 3 -#define TCOOFF 1 -#define TCOON 2 -#define TCIOFF 3 -#define TCION 4 +#define TCIFLUSH 1 +#define TCOFLUSH 2 +#define TCIOFLUSH 3 +#define TCOOFF 1 +#define TCOON 2 +#define TCIOFF 3 +#define TCION 4 #include __BEGIN_DECLS -speed_t cfgetispeed(const struct termios *); -speed_t cfgetospeed(const struct termios *); -int cfsetispeed(struct termios *, speed_t); -int cfsetospeed(struct termios *, speed_t); -int tcgetattr(int, struct termios *); -int tcsetattr(int, int, const struct termios *); -int tcdrain(int) __DARWIN_ALIAS_C(tcdrain); -int tcflow(int, int); -int tcflush(int, int); -int tcsendbreak(int, int); +speed_t cfgetispeed(const struct termios *); +speed_t cfgetospeed(const struct termios *); +int cfsetispeed(struct termios *, speed_t); +int cfsetospeed(struct termios *, speed_t); +int tcgetattr(int, struct termios *); +int tcsetattr(int, int, const struct termios *); +int tcdrain(int) __DARWIN_ALIAS_C(tcdrain); +int tcflow(int, int); +int tcflush(int, int); +int tcsendbreak(int, int); #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -void cfmakeraw(struct termios *); -int cfsetspeed(struct termios *, speed_t); +void cfmakeraw(struct termios *); +int cfsetspeed(struct termios *, speed_t); #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ __END_DECLS diff --git a/bsd/sys/time.h b/bsd/sys/time.h index 97a536416..7b55c8234 100644 --- a/bsd/sys/time.h +++ b/bsd/sys/time.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -67,8 +67,8 @@ #include #include #ifdef KERNEL -#include /* user_time_t */ -#include /* uint64_t */ +#include /* user_time_t */ +#include /* uint64_t */ #else /* !KERNEL */ #include #endif /* KERNEL */ @@ -103,18 +103,18 @@ * Structure used as a parameter by getitimer(2) and setitimer(2) system * calls. */ -struct itimerval { - struct timeval it_interval; /* timer interval */ - struct timeval it_value; /* current value */ +struct itimerval { + struct timeval it_interval; /* timer interval */ + struct timeval it_value; /* current value */ }; /* * Names of the interval timers, and structure * defining a timer setting. */ -#define ITIMER_REAL 0 -#define ITIMER_VIRTUAL 1 -#define ITIMER_PROF 2 +#define ITIMER_REAL 0 +#define ITIMER_VIRTUAL 1 +#define ITIMER_PROF 2 /* * Select uses bit masks of file descriptors in longs. These macros @@ -132,51 +132,51 @@ struct itimerval { #include -#define TIMEVAL_TO_TIMESPEC(tv, ts) { \ - (ts)->tv_sec = (tv)->tv_sec; \ - (ts)->tv_nsec = (tv)->tv_usec * 1000; \ +#define TIMEVAL_TO_TIMESPEC(tv, ts) { \ + (ts)->tv_sec = (tv)->tv_sec; \ + (ts)->tv_nsec = (tv)->tv_usec * 1000; \ } -#define TIMESPEC_TO_TIMEVAL(tv, ts) { \ - (tv)->tv_sec = (ts)->tv_sec; \ - (tv)->tv_usec = (ts)->tv_nsec / 1000; \ +#define TIMESPEC_TO_TIMEVAL(tv, ts) { \ + (tv)->tv_sec = (ts)->tv_sec; \ + (tv)->tv_usec = (ts)->tv_nsec / 1000; \ } struct timezone { - int tz_minuteswest; /* minutes west of Greenwich */ - int tz_dsttime; /* type of dst correction */ + int tz_minuteswest; /* minutes west of Greenwich */ + int tz_dsttime; /* type of dst correction */ }; -#define DST_NONE 0 /* not on dst */ -#define DST_USA 1 /* USA style dst */ -#define DST_AUST 2 /* Australian style dst */ -#define DST_WET 3 /* Western European dst */ -#define DST_MET 4 /* Middle European dst */ -#define DST_EET 5 /* Eastern European dst */ -#define DST_CAN 6 /* Canada */ +#define DST_NONE 0 /* not on dst */ +#define DST_USA 1 /* USA style dst */ +#define DST_AUST 2 /* Australian style dst */ +#define DST_WET 3 /* Western European dst */ +#define DST_MET 4 /* Middle European dst */ +#define DST_EET 5 /* Eastern European dst */ +#define DST_CAN 6 /* Canada */ /* Operations on timevals. */ -#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 -#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) -#define timercmp(tvp, uvp, cmp) \ - (((tvp)->tv_sec == (uvp)->tv_sec) ? \ - ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ +#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0 +#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec) +#define timercmp(tvp, uvp, cmp) \ + (((tvp)->tv_sec == (uvp)->tv_sec) ? \ + ((tvp)->tv_usec cmp (uvp)->tv_usec) : \ ((tvp)->tv_sec cmp (uvp)->tv_sec)) -#define timeradd(tvp, uvp, vvp) \ - do { \ - (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ - (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ - if ((vvp)->tv_usec >= 1000000) { \ - (vvp)->tv_sec++; \ - (vvp)->tv_usec -= 1000000; \ - } \ +#define timeradd(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \ + if ((vvp)->tv_usec >= 1000000) { \ + (vvp)->tv_sec++; \ + (vvp)->tv_usec -= 1000000; \ + } \ } while (0) -#define timersub(tvp, uvp, vvp) \ - do { \ - (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ - (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ - if ((vvp)->tv_usec < 0) { \ - (vvp)->tv_sec--; \ - (vvp)->tv_usec += 1000000; \ - } \ +#define timersub(tvp, uvp, vvp) \ + do { \ + (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ + (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ + if ((vvp)->tv_usec < 0) { \ + (vvp)->tv_sec--; \ + (vvp)->tv_usec += 1000000; \ + } \ } while (0) #define timevalcmp(l, r, cmp) timercmp(l, r, cmp) /* freebsd */ @@ -185,11 +185,11 @@ struct timezone { * Getkerninfo clock information structure */ struct clockinfo { - int hz; /* clock frequency */ - int tick; /* micro-seconds per hz tick */ - int tickadj; /* clock skew rate for adjtime() */ - int stathz; /* statistics clock frequency */ - int profhz; /* profiling clock frequency */ + int hz; /* clock frequency */ + int tick; /* micro-seconds per hz tick */ + int tickadj; /* clock skew rate for adjtime() */ + int stathz; /* statistics clock frequency */ + int profhz; /* profiling clock frequency */ }; #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ @@ -201,24 +201,24 @@ struct clockinfo { #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) __BEGIN_DECLS -void microtime(struct timeval *tv); -void microtime_with_abstime(struct timeval *tv, uint64_t *abstime); -void microuptime(struct timeval *tv); -#define getmicrotime(a) microtime(a) -#define getmicrouptime(a) microuptime(a) -void nanotime(struct timespec *ts); -void nanouptime(struct timespec *ts); -#define getnanotime(a) nanotime(a) -#define getnanouptime(a) nanouptime(a) -void timevaladd(struct timeval *t1, struct timeval *t2); -void timevalsub(struct timeval *t1, struct timeval *t2); -void timevalfix(struct timeval *t1); -#ifdef BSD_KERNEL_PRIVATE -time_t boottime_sec(void); -void boottime_timeval(struct timeval *tv); -void inittodr(time_t base); -int ratecheck(struct timeval *lasttime, const struct timeval *mininterval); -int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps); +void microtime(struct timeval *tv); +void microtime_with_abstime(struct timeval *tv, uint64_t *abstime); +void microuptime(struct timeval *tv); +#define getmicrotime(a) microtime(a) +#define getmicrouptime(a) microuptime(a) +void nanotime(struct timespec *ts); +void nanouptime(struct timespec *ts); +#define getnanotime(a) nanotime(a) +#define getnanouptime(a) nanouptime(a) +void timevaladd(struct timeval *t1, struct timeval *t2); +void timevalsub(struct timeval *t1, struct timeval *t2); +void timevalfix(struct timeval *t1); +#ifdef BSD_KERNEL_PRIVATE +time_t boottime_sec(void); +void boottime_timeval(struct timeval *tv); +void inittodr(time_t base); +int ratecheck(struct timeval *lasttime, const struct timeval *mininterval); +int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps); #endif /* BSD_KERNEL_PRIVATE */ __END_DECLS @@ -234,20 +234,20 @@ __END_DECLS __BEGIN_DECLS #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -int adjtime(const struct timeval *, struct timeval *); -int futimes(int, const struct timeval *); -int lutimes(const char *, const struct timeval *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); -int settimeofday(const struct timeval *, const struct timezone *); +int adjtime(const struct timeval *, struct timeval *); +int futimes(int, const struct timeval *); +int lutimes(const char *, const struct timeval *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int settimeofday(const struct timeval *, const struct timezone *); #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -int getitimer(int, struct itimerval *); -int gettimeofday(struct timeval * __restrict, void * __restrict); +int getitimer(int, struct itimerval *); +int gettimeofday(struct timeval * __restrict, void * __restrict); -#include /* select() prototype */ +#include /* select() prototype */ -int setitimer(int, const struct itimerval * __restrict, - struct itimerval * __restrict); -int utimes(const char *, const struct timeval *); +int setitimer(int, const struct itimerval * __restrict, + struct itimerval * __restrict); +int utimes(const char *, const struct timeval *); __END_DECLS diff --git a/bsd/sys/timeb.h b/bsd/sys/timeb.h index 0debabf8f..92b94a34e 100644 --- a/bsd/sys/timeb.h +++ b/bsd/sys/timeb.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -80,16 +80,16 @@ * [XSI] Structure whose address is passed as the first parameter to ftime() */ struct timeb { - time_t time; /* [XSI] Seconds since the Epoch */ - unsigned short millitm; /* [XSI] Milliseconds since the Epoch */ - short timezone; /* [XSI] Minutes west of CUT */ - short dstflag; /* [XSI] non-zero if DST in effect */ + time_t time; /* [XSI] Seconds since the Epoch */ + unsigned short millitm; /* [XSI] Milliseconds since the Epoch */ + short timezone; /* [XSI] Minutes west of CUT */ + short dstflag; /* [XSI] non-zero if DST in effect */ }; #ifndef KERNEL __BEGIN_DECLS /* [XSI] Legacy interface */ -int ftime(struct timeb *); +int ftime(struct timeb *); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/sys/times.h b/bsd/sys/times.h index eeacb398b..b63d0f15b 100644 --- a/bsd/sys/times.h +++ b/bsd/sys/times.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -66,8 +66,8 @@ * @(#)times.h 8.4 (Berkeley) 1/21/94 */ -#ifndef _SYS_TIMES_H_ -#define _SYS_TIMES_H_ +#ifndef _SYS_TIMES_H_ +#define _SYS_TIMES_H_ #include #include @@ -80,15 +80,15 @@ * [XSI] Structure whose address is passed as the first parameter to times() */ struct tms { - clock_t tms_utime; /* [XSI] User CPU time */ - clock_t tms_stime; /* [XSI] System CPU time */ - clock_t tms_cutime; /* [XSI] Terminated children user CPU time */ - clock_t tms_cstime; /* [XSI] Terminated children System CPU time */ + clock_t tms_utime; /* [XSI] User CPU time */ + clock_t tms_stime; /* [XSI] System CPU time */ + clock_t tms_cutime; /* [XSI] Terminated children user CPU time */ + clock_t tms_cstime; /* [XSI] Terminated children System CPU time */ }; #ifndef KERNEL __BEGIN_DECLS -clock_t times(struct tms *); +clock_t times(struct tms *); __END_DECLS #endif #endif /* !_SYS_TIMES_H_ */ diff --git a/bsd/sys/timex.h b/bsd/sys/timex.h index 5e8a3bfdc..8093cc79c 100644 --- a/bsd/sys/timex.h +++ b/bsd/sys/timex.h @@ -65,7 +65,7 @@ #include -#define NTP_API 4 /* NTP API version */ +#define NTP_API 4 /* NTP API version */ /* * The following defines establish the performance envelope of the @@ -100,8 +100,8 @@ #define MOD_TIMECONST 0x0020 /* set PLL time constant */ #define MOD_PPSMAX 0x0040 /* set PPS maximum averaging time */ #define MOD_TAI 0x0080 /* set TAI offset */ -#define MOD_MICRO 0x1000 /* select microsecond resolution */ -#define MOD_NANO 0x2000 /* select nanosecond resolution */ +#define MOD_MICRO 0x1000 /* select microsecond resolution */ +#define MOD_NANO 0x2000 /* select nanosecond resolution */ #define MOD_CLKB 0x4000 /* select clock B */ #define MOD_CLKA 0x8000 /* select clock A */ @@ -145,11 +145,11 @@ * NTP user interface -- ntp_gettime - used to read kernel clock values */ struct ntptimeval { - struct timespec time; /* current time (ns) (ro) */ - long maxerror; /* maximum error (us) (ro) */ - long esterror; /* estimated error (us) (ro) */ - long tai; /* TAI offset */ - int time_state; /* time status */ + struct timespec time; /* current time (ns) (ro) */ + long maxerror; /* maximum error (us) (ro) */ + long esterror; /* estimated error (us) (ro) */ + long tai; /* TAI offset */ + int time_state; /* time status */ }; /* @@ -160,29 +160,29 @@ struct ntptimeval { * STA_NANO is zero and nanoseconds if not. */ struct timex { - unsigned int modes; /* clock mode bits (wo) */ - long offset; /* time offset (ns/us) (rw) */ - long freq; /* frequency offset (scaled PPM) (rw) */ - long maxerror; /* maximum error (us) (rw) */ - long esterror; /* estimated error (us) (rw) */ - int status; /* clock status bits (rw) */ - long constant; /* poll interval (log2 s) (rw) */ - long precision; /* clock precision (ns/us) (ro) */ - long tolerance; /* clock frequency tolerance (scaled - * PPM) (ro) */ + unsigned int modes; /* clock mode bits (wo) */ + long offset; /* time offset (ns/us) (rw) */ + long freq; /* frequency offset (scaled PPM) (rw) */ + long maxerror; /* maximum error (us) (rw) */ + long esterror; /* estimated error (us) (rw) */ + int status; /* clock status bits (rw) */ + long constant; /* poll interval (log2 s) (rw) */ + long precision; /* clock precision (ns/us) (ro) */ + long tolerance; /* clock frequency tolerance (scaled + * PPM) (ro) */ /* * The following read-only structure members are used by * the PPS signal discipline that is currently not supported. * They are included for compatibility. */ - long ppsfreq; /* PPS frequency (scaled PPM) (ro) */ - long jitter; /* PPS jitter (ns/us) (ro) */ - int shift; /* interval duration (s) (shift) (ro) */ - long stabil; /* PPS stability (scaled PPM) (ro) */ - long jitcnt; /* jitter limit exceeded (ro) */ - long calcnt; /* calibration intervals (ro) */ - long errcnt; /* calibration errors (ro) */ - long stbcnt; /* stability limit exceeded (ro) */ + long ppsfreq; /* PPS frequency (scaled PPM) (ro) */ + long jitter; /* PPS jitter (ns/us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + long stabil; /* PPS stability (scaled PPM) (ro) */ + long jitcnt; /* jitter limit exceeded (ro) */ + long calcnt; /* calibration intervals (ro) */ + long errcnt; /* calibration errors (ro) */ + long stbcnt; /* stability limit exceeded (ro) */ }; #ifdef KERNEL @@ -194,16 +194,16 @@ struct timex { #include int64_t ntp_get_freq(void); -void ntp_update_second(int64_t *adjustment, clock_sec_t secs); -void ntp_init(void); +void ntp_update_second(int64_t *adjustment, clock_sec_t secs); +void ntp_init(void); #endif #else /* !_KERNEL */ #include __BEGIN_DECLS #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -int ntp_adjtime(struct timex *); -int ntp_gettime(struct ntptimeval *); +int ntp_adjtime(struct timex *); +int ntp_gettime(struct ntptimeval *); #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ __END_DECLS #endif /* KERNEL */ diff --git a/bsd/sys/tprintf.h b/bsd/sys/tprintf.h index a9d704a4e..48c3ec946 100644 --- a/bsd/sys/tprintf.h +++ b/bsd/sys/tprintf.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_TPRINTF_H_ -#define _SYS_TPRINTF_H_ +#define _SYS_TPRINTF_H_ #include #include @@ -72,9 +72,9 @@ typedef struct session *tpr_t; __BEGIN_DECLS -tpr_t tprintf_open(struct proc *); -void tprintf_close(tpr_t); -void tprintf(tpr_t, const char *fmt, ...) __printflike(2,3); +tpr_t tprintf_open(struct proc *); +void tprintf_close(tpr_t); +void tprintf(tpr_t, const char *fmt, ...) __printflike(2, 3); __END_DECLS #endif /* __APPLE_API_UNSTABLE */ diff --git a/bsd/sys/trace.h b/bsd/sys/trace.h index 75ec17900..e99ff30ee 100644 --- a/bsd/sys/trace.h +++ b/bsd/sys/trace.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_TRACE_H_ -#define _SYS_TRACE_H_ +#define _SYS_TRACE_H_ #include @@ -70,86 +70,85 @@ /* * File system buffer tracing points; all trace */ -#define TR_BREADHIT 0 /* buffer read found in cache */ -#define TR_BREADMISS 1 /* buffer read not in cache */ -#define TR_BWRITE 2 /* buffer written */ -#define TR_BREADHITRA 3 /* buffer read-ahead found in cache */ -#define TR_BREADMISSRA 4 /* buffer read-ahead not in cache */ -#define TR_XFODMISS 5 /* exe fod read */ -#define TR_XFODHIT 6 /* exe fod read */ -#define TR_BRELSE 7 /* brelse */ -#define TR_BREALLOC 8 /* expand/contract a buffer */ +#define TR_BREADHIT 0 /* buffer read found in cache */ +#define TR_BREADMISS 1 /* buffer read not in cache */ +#define TR_BWRITE 2 /* buffer written */ +#define TR_BREADHITRA 3 /* buffer read-ahead found in cache */ +#define TR_BREADMISSRA 4 /* buffer read-ahead not in cache */ +#define TR_XFODMISS 5 /* exe fod read */ +#define TR_XFODHIT 6 /* exe fod read */ +#define TR_BRELSE 7 /* brelse */ +#define TR_BREALLOC 8 /* expand/contract a buffer */ /* * Memory allocator trace points; all trace the amount of memory involved */ -#define TR_MALL 10 /* memory allocated */ +#define TR_MALL 10 /* memory allocated */ /* * Paging trace points: all are */ -#define TR_INTRANS 20 /* page intransit block */ -#define TR_EINTRANS 21 /* page intransit wait done */ -#define TR_FRECLAIM 22 /* reclaim from free list */ -#define TR_RECLAIM 23 /* reclaim from loop */ -#define TR_XSFREC 24 /* reclaim from free list instead of drum */ -#define TR_XIFREC 25 /* reclaim from free list instead of fsys */ -#define TR_WAITMEM 26 /* wait for memory in pagein */ -#define TR_EWAITMEM 27 /* end memory wait in pagein */ -#define TR_ZFOD 28 /* zfod page fault */ -#define TR_EXFOD 29 /* exec fod page fault */ -#define TR_VRFOD 30 /* vread fod page fault */ -#define TR_CACHEFOD 31 /* fod in file system cache */ -#define TR_SWAPIN 32 /* drum page fault */ -#define TR_PGINDONE 33 /* page in done */ -#define TR_SWAPIO 34 /* swap i/o request arrives */ +#define TR_INTRANS 20 /* page intransit block */ +#define TR_EINTRANS 21 /* page intransit wait done */ +#define TR_FRECLAIM 22 /* reclaim from free list */ +#define TR_RECLAIM 23 /* reclaim from loop */ +#define TR_XSFREC 24 /* reclaim from free list instead of drum */ +#define TR_XIFREC 25 /* reclaim from free list instead of fsys */ +#define TR_WAITMEM 26 /* wait for memory in pagein */ +#define TR_EWAITMEM 27 /* end memory wait in pagein */ +#define TR_ZFOD 28 /* zfod page fault */ +#define TR_EXFOD 29 /* exec fod page fault */ +#define TR_VRFOD 30 /* vread fod page fault */ +#define TR_CACHEFOD 31 /* fod in file system cache */ +#define TR_SWAPIN 32 /* drum page fault */ +#define TR_PGINDONE 33 /* page in done */ +#define TR_SWAPIO 34 /* swap i/o request arrives */ /* * System call trace points. */ -#define TR_VADVISE 40 /* vadvise occurred with */ +#define TR_VADVISE 40 /* vadvise occurred with */ /* * Miscellaneous */ -#define TR_STAMP 45 /* user said vtrace(VTR_STAMP, value); */ +#define TR_STAMP 45 /* user said vtrace(VTR_STAMP, value); */ /* * This defines the size of the trace flags array. */ -#define TR_NFLAGS 100 /* generous */ +#define TR_NFLAGS 100 /* generous */ -#define TRCSIZ 4096 +#define TRCSIZ 4096 /* * Specifications of the vtrace() system call, which takes one argument. */ -#define VTRACE 64+51 +#define VTRACE 64+51 -#define VTR_DISABLE 0 /* set a trace flag to 0 */ -#define VTR_ENABLE 1 /* set a trace flag to 1 */ -#define VTR_VALUE 2 /* return value of a trace flag */ -#define VTR_UALARM 3 /* set alarm to go off (sig 16) */ - /* in specified number of hz */ -#define VTR_STAMP 4 /* user specified stamp */ +#define VTR_DISABLE 0 /* set a trace flag to 0 */ +#define VTR_ENABLE 1 /* set a trace flag to 1 */ +#define VTR_VALUE 2 /* return value of a trace flag */ +#define VTR_UALARM 3 /* set alarm to go off (sig 16) */ + /* in specified number of hz */ +#define VTR_STAMP 4 /* user specified stamp */ #ifdef KERNEL #if TRACE -extern struct proc *traceproc; -extern int tracewhich, tracebuf[TRCSIZ]; -extern u_int tracex; -extern char traceflags[TR_NFLAGS]; -#define pack(v,b) (((v)->v_mount->mnt_vfsstat.f_fsid.val[0])<<16)|(b) -#define trace(a,b,c) { \ - if (traceflags[a]) \ - trace1(a,b,c); \ +extern struct proc *traceproc; +extern int tracewhich, tracebuf[TRCSIZ]; +extern u_int tracex; +extern char traceflags[TR_NFLAGS]; +#define pack(v, b) (((v)->v_mount->mnt_vfsstat.f_fsid.val[0])<<16)|(b) +#define trace(a, b, c) { \ + if (traceflags[a]) \ + trace1(a,b,c); \ } #else -#define trace(a,b,c) +#define trace(a, b, c) #endif #endif /* KERNEL */ #endif /* __APPLE_API_OBSOLETE */ #endif /* !_SYS_TRACE_H_ */ - diff --git a/bsd/sys/tty.h b/bsd/sys/tty.h index fb9bcbfd7..45708bf47 100644 --- a/bsd/sys/tty.h +++ b/bsd/sys/tty.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -67,12 +67,12 @@ */ #ifndef _SYS_TTY_H_ -#define _SYS_TTY_H_ +#define _SYS_TTY_H_ #include #include #include -#include /* For struct selinfo. */ +#include /* For struct selinfo. */ #ifdef KERNEL @@ -86,17 +86,17 @@ __END_DECLS * exactly the same behaviour as in true clists. * if c_cq is NULL, the ring buffer has no TTY_QUOTE functionality * (but, saves memory and cpu time) - * + * * *DON'T* play with c_cs, c_ce, c_cq, or c_cl outside tty_subr.c!!! */ struct clist { - int c_cc; /* count of characters in queue */ - int c_cn; /* total ring buffer length */ - u_char *c_cf; /* points to first character */ - u_char *c_cl; /* points to next open character */ - u_char *c_cs; /* start of ring buffer */ - u_char *c_ce; /* c_ce + c_len */ - u_char *c_cq; /* N bits/bytes long, see tty_subr.c */ + int c_cc; /* count of characters in queue */ + int c_cn; /* total ring buffer length */ + u_char *c_cf; /* points to first character */ + u_char *c_cl; /* points to next open character */ + u_char *c_cs; /* start of ring buffer */ + u_char *c_ce; /* c_ce + c_len */ + u_char *c_cq; /* N bits/bytes long, see tty_subr.c */ }; #ifndef TTYCLSIZE @@ -111,220 +111,220 @@ struct clist { * (low, high, timeout). */ struct tty { - lck_mtx_t t_lock; /* Per tty lock */ - - struct clist t_rawq; /* Device raw input queue. */ - long t_rawcc; /* Raw input queue statistics. */ - struct clist t_canq; /* Device canonical queue. */ - long t_cancc; /* Canonical queue statistics. */ - struct clist t_outq; /* Device output queue. */ - long t_outcc; /* Output queue statistics. */ - int t_line; /* Interface to device drivers. */ - dev_t t_dev; /* Device. */ - int t_state; /* Device and driver (TS*) state. */ - int t_flags; /* Tty flags. */ + lck_mtx_t t_lock; /* Per tty lock */ + + struct clist t_rawq; /* Device raw input queue. */ + long t_rawcc; /* Raw input queue statistics. */ + struct clist t_canq; /* Device canonical queue. */ + long t_cancc; /* Canonical queue statistics. */ + struct clist t_outq; /* Device output queue. */ + long t_outcc; /* Output queue statistics. */ + int t_line; /* Interface to device drivers. */ + dev_t t_dev; /* Device. */ + int t_state; /* Device and driver (TS*) state. */ + int t_flags; /* Tty flags. */ int t_timeout; /* Timeout for ttywait() */ - struct pgrp *t_pgrp; /* Foreground process group. */ - struct session *t_session; /* Enclosing session. */ - struct selinfo t_rsel; /* Tty read/oob select. */ - struct selinfo t_wsel; /* Tty write select. */ - struct termios t_termios; /* Termios state. */ - struct winsize t_winsize; /* Window size. */ - /* Start output. */ - void (*t_oproc)(struct tty *); - /* Stop output. */ - void (*t_stop)(struct tty *, int); - /* Set hardware state. */ - int (*t_param)(struct tty *, struct termios *); - void *t_sc; /* XXX: net/if_sl.c:sl_softc. */ - int t_column; /* Tty output column. */ - int t_rocount, t_rocol; /* Tty. */ - int t_hiwat; /* High water mark. */ - int t_lowat; /* Low water mark. */ - int t_gen; /* Generation number. */ - void *t_iokit; /* IOKit management */ - int t_refcnt; /* reference count */ + struct pgrp *t_pgrp; /* Foreground process group. */ + struct session *t_session; /* Enclosing session. */ + struct selinfo t_rsel; /* Tty read/oob select. */ + struct selinfo t_wsel; /* Tty write select. */ + struct termios t_termios; /* Termios state. */ + struct winsize t_winsize; /* Window size. */ + /* Start output. */ + void (*t_oproc)(struct tty *); + /* Stop output. */ + void (*t_stop)(struct tty *, int); + /* Set hardware state. */ + int (*t_param)(struct tty *, struct termios *); + void *t_sc; /* XXX: net/if_sl.c:sl_softc. */ + int t_column; /* Tty output column. */ + int t_rocount, t_rocol; /* Tty. */ + int t_hiwat; /* High water mark. */ + int t_lowat; /* Low water mark. */ + int t_gen; /* Generation number. */ + void *t_iokit; /* IOKit management */ + int t_refcnt; /* reference count */ }; #define TTY_NULL (struct tty *)0 -#define t_cc t_termios.c_cc -#define t_cflag t_termios.c_cflag -#define t_iflag t_termios.c_iflag -#define t_ispeed t_termios.c_ispeed -#define t_lflag t_termios.c_lflag -#define t_min t_termios.c_min -#define t_oflag t_termios.c_oflag -#define t_ospeed t_termios.c_ospeed -#define t_time t_termios.c_time +#define t_cc t_termios.c_cc +#define t_cflag t_termios.c_cflag +#define t_iflag t_termios.c_iflag +#define t_ispeed t_termios.c_ispeed +#define t_lflag t_termios.c_lflag +#define t_min t_termios.c_min +#define t_oflag t_termios.c_oflag +#define t_ospeed t_termios.c_ospeed +#define t_time t_termios.c_time -#define TTIPRI 25 /* Sleep priority for tty reads. */ -#define TTOPRI 26 /* Sleep priority for tty writes. */ +#define TTIPRI 25 /* Sleep priority for tty reads. */ +#define TTOPRI 26 /* Sleep priority for tty writes. */ /* * User data unfortunately has to be copied through buffers on the way to * and from clists. The buffers are on the stack so their sizes must be * fairly small. */ -#define IBUFSIZ 384 /* Should be >= max value of MIN. */ -#define OBUFSIZ 100 +#define IBUFSIZ 384 /* Should be >= max value of MIN. */ +#define OBUFSIZ 100 #ifndef TTYHOG -#define TTYHOG 1024 +#define TTYHOG 1024 #endif -#define TTMAXHIWAT roundup(2048, CBSIZE) -#define TTMINHIWAT roundup(100, CBSIZE) -#define TTMAXLOWAT 256 -#define TTMINLOWAT 32 +#define TTMAXHIWAT roundup(2048, CBSIZE) +#define TTMINHIWAT roundup(100, CBSIZE) +#define TTMAXLOWAT 256 +#define TTMINLOWAT 32 #else struct tty; struct clist; #endif /* KERNEL */ /* These flags are kept in t_state. */ -#define TS_SO_OLOWAT 0x00001 /* Wake up when output <= low water. */ -#define TS_ASYNC 0x00002 /* Tty in async I/O mode. */ -#define TS_BUSY 0x00004 /* Draining output. */ -#define TS_CARR_ON 0x00008 /* Carrier is present. */ -#define TS_FLUSH 0x00010 /* Outq has been flushed during DMA. */ -#define TS_ISOPEN 0x00020 /* Open has completed. */ -#define TS_TBLOCK 0x00040 /* Further input blocked. */ -#define TS_TIMEOUT 0x00080 /* Wait for output char processing. */ -#define TS_TTSTOP 0x00100 /* Output paused. */ +#define TS_SO_OLOWAT 0x00001 /* Wake up when output <= low water. */ +#define TS_ASYNC 0x00002 /* Tty in async I/O mode. */ +#define TS_BUSY 0x00004 /* Draining output. */ +#define TS_CARR_ON 0x00008 /* Carrier is present. */ +#define TS_FLUSH 0x00010 /* Outq has been flushed during DMA. */ +#define TS_ISOPEN 0x00020 /* Open has completed. */ +#define TS_TBLOCK 0x00040 /* Further input blocked. */ +#define TS_TIMEOUT 0x00080 /* Wait for output char processing. */ +#define TS_TTSTOP 0x00100 /* Output paused. */ #ifdef notyet -#define TS_WOPEN 0x00200 /* Open in progress. */ +#define TS_WOPEN 0x00200 /* Open in progress. */ #endif -#define TS_XCLUDE 0x00400 /* Tty requires exclusivity. */ +#define TS_XCLUDE 0x00400 /* Tty requires exclusivity. */ /* State for intra-line fancy editing work. */ -#define TS_BKSL 0x00800 /* State for lowercase \ work. */ -#define TS_CNTTB 0x01000 /* Counting tab width, ignore FLUSHO. */ -#define TS_ERASE 0x02000 /* Within a \.../ for PRTRUB. */ -#define TS_LNCH 0x04000 /* Next character is literal. */ -#define TS_TYPEN 0x08000 /* Retyping suspended input (PENDIN). */ -#define TS_LOCAL (TS_BKSL | TS_CNTTB | TS_ERASE | TS_LNCH | TS_TYPEN) +#define TS_BKSL 0x00800 /* State for lowercase \ work. */ +#define TS_CNTTB 0x01000 /* Counting tab width, ignore FLUSHO. */ +#define TS_ERASE 0x02000 /* Within a \.../ for PRTRUB. */ +#define TS_LNCH 0x04000 /* Next character is literal. */ +#define TS_TYPEN 0x08000 /* Retyping suspended input (PENDIN). */ +#define TS_LOCAL (TS_BKSL | TS_CNTTB | TS_ERASE | TS_LNCH | TS_TYPEN) /* Extras. */ -#define TS_CAN_BYPASS_L_RINT 0x010000 /* Device in "raw" mode. */ -#define TS_CONNECTED 0x020000 /* Connection open. */ -#define TS_SNOOP 0x040000 /* Device is being snooped on. */ -#define TS_SO_OCOMPLETE 0x080000 /* Wake up when output completes. */ -#define TS_ZOMBIE 0x100000 /* Connection lost. */ +#define TS_CAN_BYPASS_L_RINT 0x010000 /* Device in "raw" mode. */ +#define TS_CONNECTED 0x020000 /* Connection open. */ +#define TS_SNOOP 0x040000 /* Device is being snooped on. */ +#define TS_SO_OCOMPLETE 0x080000 /* Wake up when output completes. */ +#define TS_ZOMBIE 0x100000 /* Connection lost. */ /* Hardware flow-control-invoked bits. */ -#define TS_CAR_OFLOW 0x200000 /* For MDMBUF (XXX handle in driver). */ +#define TS_CAR_OFLOW 0x200000 /* For MDMBUF (XXX handle in driver). */ #ifdef notyet -#define TS_CTS_OFLOW 0x400000 /* For CCTS_OFLOW. */ -#define TS_DSR_OFLOW 0x800000 /* For CDSR_OFLOW. */ +#define TS_CTS_OFLOW 0x400000 /* For CCTS_OFLOW. */ +#define TS_DSR_OFLOW 0x800000 /* For CDSR_OFLOW. */ #endif -#define TS_IOCTL_NOT_OK 0x1000000 /* Workaround */ -#define TS_PGRPHUP 0x2000000 /* Don't change Foregroud process group */ +#define TS_IOCTL_NOT_OK 0x1000000 /* Workaround */ +#define TS_PGRPHUP 0x2000000 /* Don't change Foregroud process group */ /* Character type information. */ -#define ORDINARY 0 -#define CONTROL 1 -#define BACKSPACE 2 -#define NEWLINE 3 -#define TAB 4 -#define VTAB 5 -#define RETURN 6 +#define ORDINARY 0 +#define CONTROL 1 +#define BACKSPACE 2 +#define NEWLINE 3 +#define TAB 4 +#define VTAB 5 +#define RETURN 6 struct speedtab { - int sp_speed; /* Speed. */ - int sp_code; /* Code. */ + int sp_speed; /* Speed. */ + int sp_code; /* Code. */ }; /* Modem control commands (driver). */ -#define DMSET 0 -#define DMBIS 1 -#define DMBIC 2 -#define DMGET 3 +#define DMSET 0 +#define DMBIS 1 +#define DMBIC 2 +#define DMGET 3 /* Flags on a character passed to ttyinput. */ -#define TTY_CHARMASK 0x000000ff /* Character mask */ -#define TTY_QUOTE 0x00000100 /* Character quoted */ -#define TTY_ERRORMASK 0xff000000 /* Error mask */ -#define TTY_FE 0x01000000 /* Framing error */ -#define TTY_PE 0x02000000 /* Parity error */ -#define TTY_OE 0x04000000 /* Overrun error */ -#define TTY_BI 0x08000000 /* Break condition */ +#define TTY_CHARMASK 0x000000ff /* Character mask */ +#define TTY_QUOTE 0x00000100 /* Character quoted */ +#define TTY_ERRORMASK 0xff000000 /* Error mask */ +#define TTY_FE 0x01000000 /* Framing error */ +#define TTY_PE 0x02000000 /* Parity error */ +#define TTY_OE 0x04000000 /* Overrun error */ +#define TTY_BI 0x08000000 /* Break condition */ #ifdef KERNEL /* Unique sleep addresses. */ -#define TSA_CARR_ON(tp) ((void *)&(tp)->t_rawq) -#define TSA_HUP_OR_INPUT(tp) ((void *)&(tp)->t_rawq.c_cf) -#define TSA_OCOMPLETE(tp) ((void *)&(tp)->t_outq.c_cl) -#define TSA_OLOWAT(tp) ((void *)&(tp)->t_outq) -#define TSA_PTC_READ(tp) ((void *)&(tp)->t_outq.c_cf) -#define TSA_PTC_WRITE(tp) ((void *)&(tp)->t_rawq.c_cl) -#define TSA_PTS_READ(tp) ((void *)&(tp)->t_canq) +#define TSA_CARR_ON(tp) ((void *)&(tp)->t_rawq) +#define TSA_HUP_OR_INPUT(tp) ((void *)&(tp)->t_rawq.c_cf) +#define TSA_OCOMPLETE(tp) ((void *)&(tp)->t_outq.c_cl) +#define TSA_OLOWAT(tp) ((void *)&(tp)->t_outq) +#define TSA_PTC_READ(tp) ((void *)&(tp)->t_outq.c_cf) +#define TSA_PTC_WRITE(tp) ((void *)&(tp)->t_rawq.c_cl) +#define TSA_PTS_READ(tp) ((void *)&(tp)->t_canq) __BEGIN_DECLS -int b_to_q(const u_char *cp, int cc, struct clist *q); -void catq(struct clist *from, struct clist *to); -void clist_init(void); -int getc(struct clist *q); -void ndflush(struct clist *q, int cc); -int ndqb(struct clist *q, int flag); -u_char *firstc (struct clist *clp, int *c); -u_char *nextc(struct clist *q, u_char *cp, int *c); -int putc(int c, struct clist *q); -int q_to_b(struct clist *q, u_char *cp, int cc); -int unputc(struct clist *q); -int clalloc(struct clist *clp, int size, int quot); -void clfree(struct clist *clp); -void cinit(void); -void clrbits(u_char *cp, int off, int len); +int b_to_q(const u_char *cp, int cc, struct clist *q); +void catq(struct clist *from, struct clist *to); +void clist_init(void); +int getc(struct clist *q); +void ndflush(struct clist *q, int cc); +int ndqb(struct clist *q, int flag); +u_char *firstc(struct clist *clp, int *c); +u_char *nextc(struct clist *q, u_char *cp, int *c); +int putc(int c, struct clist *q); +int q_to_b(struct clist *q, u_char *cp, int cc); +int unputc(struct clist *q); +int clalloc(struct clist *clp, int size, int quot); +void clfree(struct clist *clp); +void cinit(void); +void clrbits(u_char *cp, int off, int len); #ifdef KERNEL_PRIVATE -void tty_init(void); +void tty_init(void); /* * The locked version of this function is used from routines which hold * the tty_lock(), such as ttcompat() in tty_compat.c */ -int ttioctl_locked(struct tty *tp, u_long com, caddr_t data, int flag, - struct proc *p); +int ttioctl_locked(struct tty *tp, u_long com, caddr_t data, int flag, + struct proc *p); -int ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, - struct proc *p); +int ttcompat(struct tty *tp, u_long com, caddr_t data, int flag, + struct proc *p); #endif /* KERNEL_PRIVATE */ void tty_lock(struct tty *tp); void tty_unlock(struct tty *tp); -void termioschars(struct termios *t); -int tputchar(int c, struct tty *tp); -int ttioctl(struct tty *tp, u_long com, caddr_t data, int flag, - struct proc *p); -int ttread(struct tty *tp, struct uio *uio, int flag); -int ttyselect(struct tty *tp, int rw, void * wql, struct proc *p); -int ttselect(dev_t dev, int rw, void * wql, struct proc *p); -void ttsetwater(struct tty *tp); -int ttspeedtab(int speed, struct speedtab *table); -int ttstart(struct tty *tp); -void ttwakeup(struct tty *tp); -int ttwrite(struct tty *tp, struct uio *uio, int flag); -void ttwwakeup(struct tty *tp); -void ttyblock(struct tty *tp); -int ttycheckoutq(struct tty *tp, int wait); -int ttyclose(struct tty *tp); /* LEGACY: avoid using */ -void ttyflush(struct tty *tp, int rw); -void ttyinfo(struct tty *tp); -void ttyinfo_locked(struct tty *tp); -int ttyinput(int c, struct tty *tp); -int ttylclose(struct tty *tp, int flag); -int ttymodem(struct tty *tp, int flag); -int ttyopen(dev_t device, struct tty *tp); -int ttysleep(struct tty *tp, - void *chan, int pri, const char *wmesg, int timeout); -int ttywait(struct tty *tp); +void termioschars(struct termios *t); +int tputchar(int c, struct tty *tp); +int ttioctl(struct tty *tp, u_long com, caddr_t data, int flag, + struct proc *p); +int ttread(struct tty *tp, struct uio *uio, int flag); +int ttyselect(struct tty *tp, int rw, void * wql, struct proc *p); +int ttselect(dev_t dev, int rw, void * wql, struct proc *p); +void ttsetwater(struct tty *tp); +int ttspeedtab(int speed, struct speedtab *table); +int ttstart(struct tty *tp); +void ttwakeup(struct tty *tp); +int ttwrite(struct tty *tp, struct uio *uio, int flag); +void ttwwakeup(struct tty *tp); +void ttyblock(struct tty *tp); +int ttycheckoutq(struct tty *tp, int wait); +int ttyclose(struct tty *tp); /* LEGACY: avoid using */ +void ttyflush(struct tty *tp, int rw); +void ttyinfo(struct tty *tp); +void ttyinfo_locked(struct tty *tp); +int ttyinput(int c, struct tty *tp); +int ttylclose(struct tty *tp, int flag); +int ttymodem(struct tty *tp, int flag); +int ttyopen(dev_t device, struct tty *tp); +int ttysleep(struct tty *tp, + void *chan, int pri, const char *wmesg, int timeout); +int ttywait(struct tty *tp); struct tty *ttymalloc(void); void ttyfree(struct tty *); void ttysetpgrphup(struct tty *tp); diff --git a/bsd/sys/ttychars.h b/bsd/sys/ttychars.h index e536c1a2b..5a6990b2a 100644 --- a/bsd/sys/ttychars.h +++ b/bsd/sys/ttychars.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -74,26 +74,25 @@ */ struct ttychars { - char tc_erase; /* erase last character */ - char tc_kill; /* erase entire line */ - char tc_intrc; /* interrupt */ - char tc_quitc; /* quit */ - char tc_startc; /* start output */ - char tc_stopc; /* stop output */ - char tc_eofc; /* end-of-file */ - char tc_brkc; /* input delimiter (like nl) */ - char tc_suspc; /* stop process signal */ - char tc_dsuspc; /* delayed stop process signal */ - char tc_rprntc; /* reprint line */ - char tc_flushc; /* flush output (toggles) */ - char tc_werasc; /* word erase */ - char tc_lnextc; /* literal next character */ + char tc_erase; /* erase last character */ + char tc_kill; /* erase entire line */ + char tc_intrc; /* interrupt */ + char tc_quitc; /* quit */ + char tc_startc; /* start output */ + char tc_stopc; /* stop output */ + char tc_eofc; /* end-of-file */ + char tc_brkc; /* input delimiter (like nl) */ + char tc_suspc; /* stop process signal */ + char tc_dsuspc; /* delayed stop process signal */ + char tc_rprntc; /* reprint line */ + char tc_flushc; /* flush output (toggles) */ + char tc_werasc; /* word erase */ + char tc_lnextc; /* literal next character */ }; #ifdef USE_OLD_TTY -#include /* to pick up character defaults */ +#include /* to pick up character defaults */ #endif #endif /* __APPLE_API_UNSTABLE */ #endif /* !_SYS_TTYCHARS_H_ */ - diff --git a/bsd/sys/ttycom.h b/bsd/sys/ttycom.h index 6f874a197..c8eed3145 100644 --- a/bsd/sys/ttycom.h +++ b/bsd/sys/ttycom.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -66,8 +66,8 @@ * @(#)ttycom.h 8.1 (Berkeley) 3/28/94 */ -#ifndef _SYS_TTYCOM_H_ -#define _SYS_TTYCOM_H_ +#ifndef _SYS_TTYCOM_H_ +#define _SYS_TTYCOM_H_ #include /* @@ -80,110 +80,110 @@ * in order to provide a consistent interface, but is not used by the kernel. */ struct winsize { - unsigned short ws_row; /* rows, in characters */ - unsigned short ws_col; /* columns, in characters */ - unsigned short ws_xpixel; /* horizontal size, pixels */ - unsigned short ws_ypixel; /* vertical size, pixels */ + unsigned short ws_row; /* rows, in characters */ + unsigned short ws_col; /* columns, in characters */ + unsigned short ws_xpixel; /* horizontal size, pixels */ + unsigned short ws_ypixel; /* vertical size, pixels */ }; -#define TIOCMODG _IOR('t', 3, int) /* get modem control state */ -#define TIOCMODS _IOW('t', 4, int) /* set modem control state */ -#define TIOCM_LE 0001 /* line enable */ -#define TIOCM_DTR 0002 /* data terminal ready */ -#define TIOCM_RTS 0004 /* request to send */ -#define TIOCM_ST 0010 /* secondary transmit */ -#define TIOCM_SR 0020 /* secondary receive */ -#define TIOCM_CTS 0040 /* clear to send */ -#define TIOCM_CAR 0100 /* carrier detect */ -#define TIOCM_CD TIOCM_CAR -#define TIOCM_RNG 0200 /* ring */ -#define TIOCM_RI TIOCM_RNG -#define TIOCM_DSR 0400 /* data set ready */ - /* 8-10 compat */ -#define TIOCEXCL _IO('t', 13) /* set exclusive use of tty */ -#define TIOCNXCL _IO('t', 14) /* reset exclusive use of tty */ - /* 15 unused */ -#define TIOCFLUSH _IOW('t', 16, int) /* flush buffers */ - /* 17-18 compat */ -#define TIOCGETA _IOR('t', 19, struct termios) /* get termios struct */ -#define TIOCSETA _IOW('t', 20, struct termios) /* set termios struct */ -#define TIOCSETAW _IOW('t', 21, struct termios) /* drain output, set */ -#define TIOCSETAF _IOW('t', 22, struct termios) /* drn out, fls in, set */ +#define TIOCMODG _IOR('t', 3, int) /* get modem control state */ +#define TIOCMODS _IOW('t', 4, int) /* set modem control state */ +#define TIOCM_LE 0001 /* line enable */ +#define TIOCM_DTR 0002 /* data terminal ready */ +#define TIOCM_RTS 0004 /* request to send */ +#define TIOCM_ST 0010 /* secondary transmit */ +#define TIOCM_SR 0020 /* secondary receive */ +#define TIOCM_CTS 0040 /* clear to send */ +#define TIOCM_CAR 0100 /* carrier detect */ +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RNG 0200 /* ring */ +#define TIOCM_RI TIOCM_RNG +#define TIOCM_DSR 0400 /* data set ready */ + /* 8-10 compat */ +#define TIOCEXCL _IO('t', 13) /* set exclusive use of tty */ +#define TIOCNXCL _IO('t', 14) /* reset exclusive use of tty */ + /* 15 unused */ +#define TIOCFLUSH _IOW('t', 16, int) /* flush buffers */ + /* 17-18 compat */ +#define TIOCGETA _IOR('t', 19, struct termios) /* get termios struct */ +#define TIOCSETA _IOW('t', 20, struct termios) /* set termios struct */ +#define TIOCSETAW _IOW('t', 21, struct termios) /* drain output, set */ +#define TIOCSETAF _IOW('t', 22, struct termios) /* drn out, fls in, set */ #ifdef KERNEL -#define TIOCGETA_32 _IOR('t', 19, struct termios32) /* get termios struct */ -#define TIOCSETA_32 _IOW('t', 20, struct termios32) /* set termios struct */ -#define TIOCSETAW_32 _IOW('t', 21, struct termios32) /* drain output, set */ -#define TIOCSETAF_32 _IOW('t', 22, struct termios32) /* drn out, fls in, set */ -#define TIOCGETA_64 _IOR('t', 19, struct user_termios) -#define TIOCSETA_64 _IOW('t', 20, struct user_termios) -#define TIOCSETAW_64 _IOW('t', 21, struct user_termios) -#define TIOCSETAF_64 _IOW('t', 22, struct user_termios) -#endif /* KERNEL */ -#define TIOCGETD _IOR('t', 26, int) /* get line discipline */ -#define TIOCSETD _IOW('t', 27, int) /* set line discipline */ -#define TIOCIXON _IO('t', 129) /* internal input VSTART */ -#define TIOCIXOFF _IO('t', 128) /* internal input VSTOP */ - /* 127-124 compat */ -#define TIOCSBRK _IO('t', 123) /* set break bit */ -#define TIOCCBRK _IO('t', 122) /* clear break bit */ -#define TIOCSDTR _IO('t', 121) /* set data terminal ready */ -#define TIOCCDTR _IO('t', 120) /* clear data terminal ready */ -#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */ -#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */ - /* 117-116 compat */ -#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ -#define TIOCSTI _IOW('t', 114, char) /* simulate terminal input */ -#define TIOCNOTTY _IO('t', 113) /* void tty association */ -#define TIOCPKT _IOW('t', 112, int) /* pty: set/clear packet mode */ -#define TIOCPKT_DATA 0x00 /* data packet */ -#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */ -#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */ -#define TIOCPKT_STOP 0x04 /* stop output */ -#define TIOCPKT_START 0x08 /* start output */ -#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */ -#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */ -#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */ -#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ -#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ -#define TIOCMSET _IOW('t', 109, int) /* set all modem bits */ -#define TIOCMBIS _IOW('t', 108, int) /* bis modem bits */ -#define TIOCMBIC _IOW('t', 107, int) /* bic modem bits */ -#define TIOCMGET _IOR('t', 106, int) /* get all modem bits */ -#define TIOCREMOTE _IOW('t', 105, int) /* remote input editing */ -#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */ -#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */ -#define TIOCUCNTL _IOW('t', 102, int) /* pty: set/clr usr cntl mode */ -#define TIOCSTAT _IO('t', 101) /* simulate ^T status message */ -#define UIOCCMD(n) _IO('u', n) /* usr cntl op "n" */ -#define TIOCSCONS _IO('t', 99) /* 4.2 compatibility */ -#define TIOCCONS _IOW('t', 98, int) /* become virtual console */ -#define TIOCSCTTY _IO('t', 97) /* become controlling tty */ -#define TIOCEXT _IOW('t', 96, int) /* pty: external processing */ -#define TIOCSIG _IO('t', 95) /* pty: generate signal */ -#define TIOCDRAIN _IO('t', 94) /* wait till output drained */ -#define TIOCMSDTRWAIT _IOW('t', 91, int) /* modem: set wait on close */ -#define TIOCMGDTRWAIT _IOR('t', 90, int) /* modem: get wait on close */ -#define TIOCTIMESTAMP _IOR('t', 89, struct timeval) /* enable/get timestamp - * of last input event */ -#define TIOCDCDTIMESTAMP _IOR('t', 88, struct timeval) /* enable/get timestamp - * of last DCd rise */ +#define TIOCGETA_32 _IOR('t', 19, struct termios32) /* get termios struct */ +#define TIOCSETA_32 _IOW('t', 20, struct termios32) /* set termios struct */ +#define TIOCSETAW_32 _IOW('t', 21, struct termios32) /* drain output, set */ +#define TIOCSETAF_32 _IOW('t', 22, struct termios32) /* drn out, fls in, set */ +#define TIOCGETA_64 _IOR('t', 19, struct user_termios) +#define TIOCSETA_64 _IOW('t', 20, struct user_termios) +#define TIOCSETAW_64 _IOW('t', 21, struct user_termios) +#define TIOCSETAF_64 _IOW('t', 22, struct user_termios) +#endif /* KERNEL */ +#define TIOCGETD _IOR('t', 26, int) /* get line discipline */ +#define TIOCSETD _IOW('t', 27, int) /* set line discipline */ +#define TIOCIXON _IO('t', 129) /* internal input VSTART */ +#define TIOCIXOFF _IO('t', 128) /* internal input VSTOP */ + /* 127-124 compat */ +#define TIOCSBRK _IO('t', 123) /* set break bit */ +#define TIOCCBRK _IO('t', 122) /* clear break bit */ +#define TIOCSDTR _IO('t', 121) /* set data terminal ready */ +#define TIOCCDTR _IO('t', 120) /* clear data terminal ready */ +#define TIOCGPGRP _IOR('t', 119, int) /* get pgrp of tty */ +#define TIOCSPGRP _IOW('t', 118, int) /* set pgrp of tty */ + /* 117-116 compat */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ +#define TIOCSTI _IOW('t', 114, char) /* simulate terminal input */ +#define TIOCNOTTY _IO('t', 113) /* void tty association */ +#define TIOCPKT _IOW('t', 112, int) /* pty: set/clear packet mode */ +#define TIOCPKT_DATA 0x00 /* data packet */ +#define TIOCPKT_FLUSHREAD 0x01 /* flush packet */ +#define TIOCPKT_FLUSHWRITE 0x02 /* flush packet */ +#define TIOCPKT_STOP 0x04 /* stop output */ +#define TIOCPKT_START 0x08 /* start output */ +#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */ +#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */ +#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCMSET _IOW('t', 109, int) /* set all modem bits */ +#define TIOCMBIS _IOW('t', 108, int) /* bis modem bits */ +#define TIOCMBIC _IOW('t', 107, int) /* bic modem bits */ +#define TIOCMGET _IOR('t', 106, int) /* get all modem bits */ +#define TIOCREMOTE _IOW('t', 105, int) /* remote input editing */ +#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */ +#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */ +#define TIOCUCNTL _IOW('t', 102, int) /* pty: set/clr usr cntl mode */ +#define TIOCSTAT _IO('t', 101) /* simulate ^T status message */ +#define UIOCCMD(n) _IO('u', n) /* usr cntl op "n" */ +#define TIOCSCONS _IO('t', 99) /* 4.2 compatibility */ +#define TIOCCONS _IOW('t', 98, int) /* become virtual console */ +#define TIOCSCTTY _IO('t', 97) /* become controlling tty */ +#define TIOCEXT _IOW('t', 96, int) /* pty: external processing */ +#define TIOCSIG _IO('t', 95) /* pty: generate signal */ +#define TIOCDRAIN _IO('t', 94) /* wait till output drained */ +#define TIOCMSDTRWAIT _IOW('t', 91, int) /* modem: set wait on close */ +#define TIOCMGDTRWAIT _IOR('t', 90, int) /* modem: get wait on close */ +#define TIOCTIMESTAMP _IOR('t', 89, struct timeval) /* enable/get timestamp + * of last input event */ +#define TIOCDCDTIMESTAMP _IOR('t', 88, struct timeval) /* enable/get timestamp + * of last DCd rise */ #ifdef KERNEL -#define TIOCTIMESTAMP_32 _IOR('t', 89, struct user32_timeval) -#define TIOCDCDTIMESTAMP_32 _IOR('t', 88, struct user32_timeval) -#define TIOCTIMESTAMP_64 _IOR('t', 89, struct user64_timeval) -#define TIOCDCDTIMESTAMP_64 _IOR('t', 88, struct user64_timeval) +#define TIOCTIMESTAMP_32 _IOR('t', 89, struct user32_timeval) +#define TIOCDCDTIMESTAMP_32 _IOR('t', 88, struct user32_timeval) +#define TIOCTIMESTAMP_64 _IOR('t', 89, struct user64_timeval) +#define TIOCDCDTIMESTAMP_64 _IOR('t', 88, struct user64_timeval) #endif -#define TIOCSDRAINWAIT _IOW('t', 87, int) /* set ttywait timeout */ -#define TIOCGDRAINWAIT _IOR('t', 86, int) /* get ttywait timeout */ -#define TIOCDSIMICROCODE _IO('t', 85) /* download microcode to - * DSI Softmodem */ -#define TIOCPTYGRANT _IO('t', 84) /* grantpt(3) */ -#define TIOCPTYGNAME _IOC(IOC_OUT, 't', 83, 128) /* ptsname(3) */ -#define TIOCPTYUNLK _IO('t', 82) /* unlockpt(3) */ +#define TIOCSDRAINWAIT _IOW('t', 87, int) /* set ttywait timeout */ +#define TIOCGDRAINWAIT _IOR('t', 86, int) /* get ttywait timeout */ +#define TIOCDSIMICROCODE _IO('t', 85) /* download microcode to + * DSI Softmodem */ +#define TIOCPTYGRANT _IO('t', 84) /* grantpt(3) */ +#define TIOCPTYGNAME _IOC(IOC_OUT, 't', 83, 128) /* ptsname(3) */ +#define TIOCPTYUNLK _IO('t', 82) /* unlockpt(3) */ -#define TTYDISC 0 /* termios tty line discipline */ -#define TABLDISC 3 /* tablet discipline */ -#define SLIPDISC 4 /* serial IP discipline */ -#define PPPDISC 5 /* PPP discipline */ +#define TTYDISC 0 /* termios tty line discipline */ +#define TABLDISC 3 /* tablet discipline */ +#define SLIPDISC 4 /* serial IP discipline */ +#define PPPDISC 5 /* PPP discipline */ #endif /* !_SYS_TTYCOM_H_ */ diff --git a/bsd/sys/ttydefaults.h b/bsd/sys/ttydefaults.h index 115d7dac4..eed46dded 100644 --- a/bsd/sys/ttydefaults.h +++ b/bsd/sys/ttydefaults.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -70,43 +70,43 @@ * System wide defaults for terminal state. */ #ifndef _SYS_TTYDEFAULTS_H_ -#define _SYS_TTYDEFAULTS_H_ +#define _SYS_TTYDEFAULTS_H_ /* * Defaults on "first" open. */ -#define TTYDEF_IFLAG (BRKINT | ICRNL | IMAXBEL | IXON | IXANY) -#define TTYDEF_OFLAG (OPOST | ONLCR) -#define TTYDEF_LFLAG (ECHO | ICANON | ISIG | IEXTEN | ECHOE|ECHOKE|ECHOCTL) -#define TTYDEF_CFLAG (CREAD | CS8 | HUPCL) -#define TTYDEF_SPEED (B9600) +#define TTYDEF_IFLAG (BRKINT | ICRNL | IMAXBEL | IXON | IXANY) +#define TTYDEF_OFLAG (OPOST | ONLCR) +#define TTYDEF_LFLAG (ECHO | ICANON | ISIG | IEXTEN | ECHOE|ECHOKE|ECHOCTL) +#define TTYDEF_CFLAG (CREAD | CS8 | HUPCL) +#define TTYDEF_SPEED (B9600) /* * Control Character Defaults */ -#define CTRL(x) (x&037) -#define CEOF CTRL('d') -#define CEOL 0xff /* XXX avoid _POSIX_VDISABLE */ -#define CERASE 0177 -#define CINTR CTRL('c') -#define CSTATUS CTRL('t') -#define CKILL CTRL('u') -#define CMIN 1 -#define CQUIT 034 /* FS, ^\ */ -#define CSUSP CTRL('z') -#define CTIME 0 -#define CDSUSP CTRL('y') -#define CSTART CTRL('q') -#define CSTOP CTRL('s') -#define CLNEXT CTRL('v') -#define CDISCARD CTRL('o') -#define CWERASE CTRL('w') -#define CREPRINT CTRL('r') -#define CEOT CEOF +#define CTRL(x) (x&037) +#define CEOF CTRL('d') +#define CEOL 0xff /* XXX avoid _POSIX_VDISABLE */ +#define CERASE 0177 +#define CINTR CTRL('c') +#define CSTATUS CTRL('t') +#define CKILL CTRL('u') +#define CMIN 1 +#define CQUIT 034 /* FS, ^\ */ +#define CSUSP CTRL('z') +#define CTIME 0 +#define CDSUSP CTRL('y') +#define CSTART CTRL('q') +#define CSTOP CTRL('s') +#define CLNEXT CTRL('v') +#define CDISCARD CTRL('o') +#define CWERASE CTRL('w') +#define CREPRINT CTRL('r') +#define CEOT CEOF /* compat */ -#define CBRK CEOL -#define CRPRNT CREPRINT -#define CFLUSH CDISCARD +#define CBRK CEOL +#define CRPRNT CREPRINT +#define CFLUSH CDISCARD /* PROTECTED INCLUSION ENDS HERE */ #endif /* !_SYS_TTYDEFAULTS_H_ */ @@ -115,10 +115,10 @@ * #define TTYDEFCHARS to include an array of default control characters. */ #ifdef TTYDEFCHARS -static cc_t ttydefchars[NCCS] = { - CEOF, CEOL, CEOL, CERASE, CWERASE, CKILL, CREPRINT, - _POSIX_VDISABLE, CINTR, CQUIT, CSUSP, CDSUSP, CSTART, CSTOP, CLNEXT, - CDISCARD, CMIN, CTIME, CSTATUS, _POSIX_VDISABLE +static cc_t ttydefchars[NCCS] = { + CEOF, CEOL, CEOL, CERASE, CWERASE, CKILL, CREPRINT, + _POSIX_VDISABLE, CINTR, CQUIT, CSUSP, CDSUSP, CSTART, CSTOP, CLNEXT, + CDISCARD, CMIN, CTIME, CSTATUS, _POSIX_VDISABLE }; #undef TTYDEFCHARS #endif diff --git a/bsd/sys/ttydev.h b/bsd/sys/ttydev.h index 00b1fe0db..d448e1be2 100644 --- a/bsd/sys/ttydev.h +++ b/bsd/sys/ttydev.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All Rights Reserved */ @@ -64,25 +64,25 @@ /* COMPATIBILITY HEADER FILE */ #ifndef _SYS_TTYDEV_H_ -#define _SYS_TTYDEV_H_ +#define _SYS_TTYDEV_H_ #ifdef USE_OLD_TTY -#define B0 0 -#define B50 1 -#define B75 2 -#define B110 3 -#define B134 4 -#define B150 5 -#define B200 6 -#define B300 7 -#define B600 8 -#define B1200 9 -#define B1800 10 -#define B2400 11 -#define B4800 12 -#define B9600 13 -#define EXTA 14 -#define EXTB 15 +#define B0 0 +#define B50 1 +#define B75 2 +#define B110 3 +#define B134 4 +#define B150 5 +#define B200 6 +#define B300 7 +#define B600 8 +#define B1200 9 +#define B1800 10 +#define B2400 11 +#define B4800 12 +#define B9600 13 +#define EXTA 14 +#define EXTB 15 #define B57600 16 #define B115200 17 #endif /* USE_OLD_TTY */ diff --git a/bsd/sys/types.h b/bsd/sys/types.h index f11f23ff2..67b8a73c3 100644 --- a/bsd/sys/types.h +++ b/bsd/sys/types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -67,7 +67,7 @@ */ #ifndef _SYS_TYPES_H_ -#define _SYS_TYPES_H_ +#define _SYS_TYPES_H_ #include @@ -85,24 +85,24 @@ #include #include #ifndef _U_LONG -typedef unsigned long u_long; +typedef unsigned long u_long; #define _U_LONG #endif -typedef unsigned short ushort; /* Sys V compatibility */ -typedef unsigned int uint; /* Sys V compatibility */ +typedef unsigned short ushort; /* Sys V compatibility */ +typedef unsigned int uint; /* Sys V compatibility */ #endif -typedef u_int64_t u_quad_t; /* quads */ -typedef int64_t quad_t; -typedef quad_t * qaddr_t; +typedef u_int64_t u_quad_t; /* quads */ +typedef int64_t quad_t; +typedef quad_t * qaddr_t; -#include /* core address */ +#include /* core address */ -typedef int32_t daddr_t; /* disk address */ +typedef int32_t daddr_t; /* disk address */ -#include /* device number */ +#include /* device number */ -typedef u_int32_t fixpt_t; /* fixed point number */ +typedef u_int32_t fixpt_t; /* fixed point number */ #include #include @@ -112,7 +112,7 @@ typedef u_int32_t fixpt_t; /* fixed point number */ #include #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#include /* 64bit inode number */ +#include /* 64bit inode number */ #endif /* !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) */ #include @@ -122,8 +122,8 @@ typedef u_int32_t fixpt_t; /* fixed point number */ #include #include -typedef int32_t segsz_t; /* segment size */ -typedef int32_t swblk_t; /* swap offset */ +typedef int32_t segsz_t; /* segment size */ +typedef int32_t swblk_t; /* swap offset */ #include @@ -135,29 +135,32 @@ typedef int32_t swblk_t; /* swap offset */ * so for C++, we must use inline functions instead. */ -static inline __int32_t major(__uint32_t _x) +static inline __int32_t +major(__uint32_t _x) { return (__int32_t)(((__uint32_t)_x >> 24) & 0xff); } -static inline __int32_t minor(__uint32_t _x) +static inline __int32_t +minor(__uint32_t _x) { return (__int32_t)((_x) & 0xffffff); } -static inline dev_t makedev(__uint32_t _major, __uint32_t _minor) +static inline dev_t +makedev(__uint32_t _major, __uint32_t _minor) { return (dev_t)(((_major) << 24) | (_minor)); } -#else /* !__cplusplus */ +#else /* !__cplusplus */ -#define major(x) ((int32_t)(((u_int32_t)(x) >> 24) & 0xff)) -#define minor(x) ((int32_t)((x) & 0xffffff)) -#define makedev(x,y) ((dev_t)(((x) << 24) | (y))) +#define major(x) ((int32_t)(((u_int32_t)(x) >> 24) & 0xff)) +#define minor(x) ((int32_t)((x) & 0xffffff)) +#define makedev(x, y) ((dev_t)(((x) << 24) | (y))) -#endif /* !__cplusplus */ -#endif /* !_POSIX_C_SOURCE */ +#endif /* !__cplusplus */ +#endif /* !_POSIX_C_SOURCE */ #include #include @@ -180,10 +183,10 @@ static inline dev_t makedev(__uint32_t _major, __uint32_t _minor) */ #include -#define NBBY __DARWIN_NBBY /* bits in a byte */ -#define NFDBITS __DARWIN_NFDBITS /* bits per mask */ -#define howmany(x, y) __DARWIN_howmany(x, y) /* # y's == x bits? */ -typedef __int32_t fd_mask; +#define NBBY __DARWIN_NBBY /* bits in a byte */ +#define NFDBITS __DARWIN_NFDBITS /* bits per mask */ +#define howmany(x, y) __DARWIN_howmany(x, y) /* # y's == x bits? */ +typedef __int32_t fd_mask; /* * Select uses bit masks of file descriptors in longs. These macros @@ -199,7 +202,7 @@ typedef __int32_t fd_mask; #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) #include -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #if defined(__STDC__) && defined(KERNEL) @@ -208,14 +211,14 @@ typedef __int32_t fd_mask; * common structures that cross subsystem boundaries here; others are mostly * used in the same place that the structure is defined. */ -struct proc; -struct pgrp; -struct ucred; -struct rusage; -struct file; -struct buf; -struct tty; -struct uio; +struct proc; +struct pgrp; +struct ucred; +struct rusage; +struct file; +struct buf; +struct tty; +struct uio; #endif #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ diff --git a/bsd/sys/ubc.h b/bsd/sys/ubc.h index 4c209a6e7..bc91fde31 100644 --- a/bsd/sys/ubc.h +++ b/bsd/sys/ubc.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Header file for Unified Buffer Cache. * - */ + */ -#ifndef _SYS_UBC_H_ -#define _SYS_UBC_H_ +#ifndef _SYS_UBC_H_ +#define _SYS_UBC_H_ #include #include @@ -47,17 +47,17 @@ /* defns for ubc_msync() and ubc_msync */ -#define UBC_PUSHDIRTY 0x01 /* clean any dirty pages in the specified range to the backing store */ -#define UBC_PUSHALL 0x02 /* push both dirty and precious pages to the backing store */ -#define UBC_INVALIDATE 0x04 /* invalidate pages in the specified range... may be used with UBC_PUSHDIRTY/ALL */ -#define UBC_SYNC 0x08 /* wait for I/Os generated by UBC_PUSHDIRTY to complete */ +#define UBC_PUSHDIRTY 0x01 /* clean any dirty pages in the specified range to the backing store */ +#define UBC_PUSHALL 0x02 /* push both dirty and precious pages to the backing store */ +#define UBC_INVALIDATE 0x04 /* invalidate pages in the specified range... may be used with UBC_PUSHDIRTY/ALL */ +#define UBC_SYNC 0x08 /* wait for I/Os generated by UBC_PUSHDIRTY to complete */ __BEGIN_DECLS -off_t ubc_blktooff(struct vnode *, daddr64_t); -daddr64_t ubc_offtoblk(struct vnode *, off_t); -off_t ubc_getsize(struct vnode *); -int ubc_setsize(struct vnode *, off_t); +off_t ubc_blktooff(struct vnode *, daddr64_t); +daddr64_t ubc_offtoblk(struct vnode *, off_t); +off_t ubc_getsize(struct vnode *); +int ubc_setsize(struct vnode *, off_t); #ifdef KERNEL_PRIVATE @@ -72,16 +72,16 @@ errno_t ubc_setsize_ex(vnode_t vp, off_t nsize, ubc_setsize_opts_t opts); kauth_cred_t ubc_getcred(struct vnode *); struct thread; -int ubc_setthreadcred(struct vnode *, struct proc *, struct thread *); +int ubc_setthreadcred(struct vnode *, struct proc *, struct thread *); errno_t ubc_msync(vnode_t, off_t, off_t, off_t *, int); -int ubc_pages_resident(vnode_t); -int ubc_page_op(vnode_t, off_t, int, ppnum_t *, int *); -int ubc_range_op(vnode_t, off_t, off_t, int, int *); +int ubc_pages_resident(vnode_t); +int ubc_page_op(vnode_t, off_t, int, ppnum_t *, int *); +int ubc_range_op(vnode_t, off_t, off_t, int, int *); #ifdef KERNEL_PRIVATE /* This API continues to exist only until is resolved */ -int ubc_setcred(struct vnode *, struct proc *) __deprecated; +int ubc_setcred(struct vnode *, struct proc *) __deprecated; /* code signing */ struct cs_blob; struct cs_blob *ubc_cs_blob_get(vnode_t, cpu_type_t, off_t); @@ -100,32 +100,32 @@ const char *cs_identity_get(proc_t); /* cluster IO routines */ void cluster_update_state(vnode_t, vm_object_offset_t, vm_object_offset_t, boolean_t); -int advisory_read(vnode_t, off_t, off_t, int); -int advisory_read_ext(vnode_t, off_t, off_t, int, int (*)(buf_t, void *), void *, int); +int advisory_read(vnode_t, off_t, off_t, int); +int advisory_read_ext(vnode_t, off_t, off_t, int, int (*)(buf_t, void *), void *, int); -int cluster_read(vnode_t, struct uio *, off_t, int); -int cluster_read_ext(vnode_t, struct uio *, off_t, int, int (*)(buf_t, void *), void *); +int cluster_read(vnode_t, struct uio *, off_t, int); +int cluster_read_ext(vnode_t, struct uio *, off_t, int, int (*)(buf_t, void *), void *); -int cluster_write(vnode_t, struct uio *, off_t, off_t, off_t, off_t, int); -int cluster_write_ext(vnode_t, struct uio *, off_t, off_t, off_t, off_t, int, int (*)(buf_t, void *), void *); +int cluster_write(vnode_t, struct uio *, off_t, off_t, off_t, off_t, int); +int cluster_write_ext(vnode_t, struct uio *, off_t, off_t, off_t, off_t, int, int (*)(buf_t, void *), void *); -int cluster_pageout(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int); -int cluster_pageout_ext(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int, int (*)(buf_t, void *), void *); +int cluster_pageout(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int); +int cluster_pageout_ext(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int, int (*)(buf_t, void *), void *); -int cluster_pagein(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int); -int cluster_pagein_ext(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int, int (*)(buf_t, void *), void *); +int cluster_pagein(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int); +int cluster_pagein_ext(vnode_t, upl_t, upl_offset_t, off_t, int, off_t, int, int (*)(buf_t, void *), void *); -int cluster_push(vnode_t, int); -int cluster_push_ext(vnode_t, int, int (*)(buf_t, void *), void *); -int cluster_push_err(vnode_t, int, int (*)(buf_t, void *), void *, int *); +int cluster_push(vnode_t, int); +int cluster_push_ext(vnode_t, int, int (*)(buf_t, void *), void *); +int cluster_push_err(vnode_t, int, int (*)(buf_t, void *), void *, int *); -int cluster_bp(buf_t); -int cluster_bp_ext(buf_t, int (*)(buf_t, void *), void *); +int cluster_bp(buf_t); +int cluster_bp_ext(buf_t, int (*)(buf_t, void *), void *); -void cluster_zero(upl_t, upl_offset_t, int, buf_t); +void cluster_zero(upl_t, upl_offset_t, int, buf_t); -int cluster_copy_upl_data(uio_t, upl_t, int, int *); -int cluster_copy_ubc_data(vnode_t, uio_t, int *, int); +int cluster_copy_upl_data(uio_t, upl_t, int, int *); +int cluster_copy_ubc_data(vnode_t, uio_t, int *, int); typedef struct cl_direct_read_lock cl_direct_read_lock_t; cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t exclusive); @@ -133,28 +133,28 @@ void cluster_unlock_direct_read(cl_direct_read_lock_t *lck); /* UPL routines */ #ifndef XNU_KERNEL_PRIVATE -int ubc_create_upl(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int); +int ubc_create_upl(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int); #endif /* XNU_KERNEL_PRIVATE */ -int ubc_upl_map(upl_t, vm_offset_t *); -int ubc_upl_unmap(upl_t); -int ubc_upl_commit(upl_t); -int ubc_upl_commit_range(upl_t, upl_offset_t, upl_size_t, int); -int ubc_upl_abort(upl_t, int); -int ubc_upl_abort_range(upl_t, upl_offset_t, upl_size_t, int); -void ubc_upl_range_needed(upl_t, int, int); +int ubc_upl_map(upl_t, vm_offset_t *); +int ubc_upl_unmap(upl_t); +int ubc_upl_commit(upl_t); +int ubc_upl_commit_range(upl_t, upl_offset_t, upl_size_t, int); +int ubc_upl_abort(upl_t, int); +int ubc_upl_abort_range(upl_t, upl_offset_t, upl_size_t, int); +void ubc_upl_range_needed(upl_t, int, int); upl_page_info_t *ubc_upl_pageinfo(upl_t); upl_size_t ubc_upl_maxbufsize(void); -int is_file_clean(vnode_t, off_t); +int is_file_clean(vnode_t, off_t); errno_t mach_to_bsd_errno(kern_return_t mach_err); #ifdef KERNEL_PRIVATE -int ubc_create_upl_external(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int); -#ifdef XNU_KERNEL_PRIVATE -int ubc_create_upl_kernel(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int, vm_tag_t); +int ubc_create_upl_external(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int); +#ifdef XNU_KERNEL_PRIVATE +int ubc_create_upl_kernel(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int, vm_tag_t); #endif /* XNU_KERNEL_PRIVATE */ __attribute__((pure)) boolean_t ubc_is_mapped(const struct vnode *, boolean_t *writable); @@ -166,5 +166,4 @@ uint32_t cluster_max_io_size(mount_t, int); __END_DECLS -#endif /* _SYS_UBC_H_ */ - +#endif /* _SYS_UBC_H_ */ diff --git a/bsd/sys/ubc_internal.h b/bsd/sys/ubc_internal.h index be82f0f66..030feca59 100644 --- a/bsd/sys/ubc_internal.h +++ b/bsd/sys/ubc_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * File: ubc.h * Author: Umesh Vaishampayan [umeshv@apple.com] * 05-Aug-1999 umeshv Created. * * Header file for Unified Buffer Cache. * - */ + */ -#ifndef _SYS_UBC_INTERNAL_H_ -#define _SYS_UBC_INTERNAL_H_ +#ifndef _SYS_UBC_INTERNAL_H_ +#define _SYS_UBC_INTERNAL_H_ #include #include @@ -52,46 +52,46 @@ #include -#define UBC_INFO_NULL ((struct ubc_info *) 0) +#define UBC_INFO_NULL ((struct ubc_info *) 0) -extern struct zone *ubc_info_zone; +extern struct zone *ubc_info_zone; -/* +/* * Maximum number of vfs clusters per vnode */ -#define MAX_CLUSTERS CONFIG_MAX_CLUSTERS +#define MAX_CLUSTERS CONFIG_MAX_CLUSTERS -#define SPARSE_PUSH_LIMIT 4 /* limit on number of concurrent sparse pushes outside of the cl_lockw */ +#define SPARSE_PUSH_LIMIT 4 /* limit on number of concurrent sparse pushes outside of the cl_lockw */ /* once we reach this limit, we'll hold the lock */ struct cl_extent { - daddr64_t b_addr; - daddr64_t e_addr; + daddr64_t b_addr; + daddr64_t e_addr; }; struct cl_wextent { - daddr64_t b_addr; - daddr64_t e_addr; - int io_flags; + daddr64_t b_addr; + daddr64_t e_addr; + int io_flags; }; struct cl_readahead { - lck_mtx_t cl_lockr; - daddr64_t cl_lastr; /* last block read by client */ - daddr64_t cl_maxra; /* last block prefetched by the read ahead */ - int cl_ralen; /* length of last prefetch */ + lck_mtx_t cl_lockr; + daddr64_t cl_lastr; /* last block read by client */ + daddr64_t cl_maxra; /* last block prefetched by the read ahead */ + int cl_ralen; /* length of last prefetch */ }; struct cl_writebehind { - lck_mtx_t cl_lockw; - void * cl_scmap; /* pointer to sparse cluster map */ - off_t cl_last_write; /* offset of the end of the last write */ - off_t cl_seq_written; /* sequentially written bytes */ - int cl_sparse_pushes; /* number of pushes outside of the cl_lockw in progress */ - int cl_sparse_wait; /* synchronous push is in progress */ - int cl_number; /* number of packed write behind clusters currently valid */ - struct cl_wextent cl_clusters[MAX_CLUSTERS]; /* packed write behind clusters */ + lck_mtx_t cl_lockw; + void * cl_scmap; /* pointer to sparse cluster map */ + off_t cl_last_write; /* offset of the end of the last write */ + off_t cl_seq_written; /* sequentially written bytes */ + int cl_sparse_pushes; /* number of pushes outside of the cl_lockw in progress */ + int cl_sparse_wait; /* synchronous push is in progress */ + int cl_number; /* number of packed write behind clusters currently valid */ + struct cl_wextent cl_clusters[MAX_CLUSTERS]; /* packed write behind clusters */ }; struct cs_hash; @@ -99,31 +99,31 @@ struct cs_hash; uint8_t cs_hash_type(struct cs_hash const *); struct cs_blob { - struct cs_blob *csb_next; - cpu_type_t csb_cpu_type; - unsigned int csb_flags; - off_t csb_base_offset; /* Offset of Mach-O binary in fat binary */ - off_t csb_start_offset; /* Blob coverage area start, from csb_base_offset */ - off_t csb_end_offset; /* Blob coverage area end, from csb_base_offset */ - vm_size_t csb_mem_size; - vm_offset_t csb_mem_offset; - vm_address_t csb_mem_kaddr; - unsigned char csb_cdhash[CS_CDHASH_LEN]; + struct cs_blob *csb_next; + cpu_type_t csb_cpu_type; + unsigned int csb_flags; + off_t csb_base_offset; /* Offset of Mach-O binary in fat binary */ + off_t csb_start_offset; /* Blob coverage area start, from csb_base_offset */ + off_t csb_end_offset; /* Blob coverage area end, from csb_base_offset */ + vm_size_t csb_mem_size; + vm_offset_t csb_mem_offset; + vm_address_t csb_mem_kaddr; + unsigned char csb_cdhash[CS_CDHASH_LEN]; const struct cs_hash *csb_hashtype; - vm_size_t csb_hash_pagesize; /* each hash entry represent this many bytes in the file */ - vm_size_t csb_hash_pagemask; - vm_size_t csb_hash_pageshift; - vm_size_t csb_hash_firstlevel_pagesize; /* First hash this many bytes, then hash the hashes together */ + vm_size_t csb_hash_pagesize; /* each hash entry represent this many bytes in the file */ + vm_size_t csb_hash_pagemask; + vm_size_t csb_hash_pageshift; + vm_size_t csb_hash_firstlevel_pagesize; /* First hash this many bytes, then hash the hashes together */ const CS_CodeDirectory *csb_cd; - const char *csb_teamid; - const CS_GenericBlob *csb_entitlements_blob; /* raw blob, subrange of csb_mem_kaddr */ - void * csb_entitlements; /* The entitlements as an OSDictionary */ - unsigned int csb_signer_type; + const char *csb_teamid; + const CS_GenericBlob *csb_entitlements_blob; /* raw blob, subrange of csb_mem_kaddr */ + void * csb_entitlements; /* The entitlements as an OSDictionary */ + unsigned int csb_signer_type; - unsigned int csb_reconstituted; /* signature has potentially been modified after validation */ + unsigned int csb_reconstituted; /* signature has potentially been modified after validation */ /* The following two will be replaced by the csb_signer_type. */ - unsigned int csb_platform_binary:1; - unsigned int csb_platform_path:1; + unsigned int csb_platform_binary:1; + unsigned int csb_platform_path:1; }; @@ -132,39 +132,39 @@ struct cs_blob { * a vnode to the correspondig VM objects. */ struct ubc_info { - memory_object_t ui_pager; /* pager */ - memory_object_control_t ui_control; /* VM control for the pager */ - vnode_t ui_vnode; /* vnode for this ubc_info */ - kauth_cred_t ui_ucred; /* holds credentials for NFS paging */ - off_t ui_size; /* file size for the vnode */ - uint32_t ui_flags; /* flags */ - uint32_t cs_add_gen; /* generation count when csblob was validated */ - - struct cl_readahead *cl_rahead; /* cluster read ahead context */ - struct cl_writebehind *cl_wbehind; /* cluster write behind context */ - - struct timespec cs_mtime; /* modify time of file when - first cs_blob was loaded */ - struct cs_blob *cs_blobs; /* for CODE SIGNING */ + memory_object_t ui_pager; /* pager */ + memory_object_control_t ui_control; /* VM control for the pager */ + vnode_t ui_vnode; /* vnode for this ubc_info */ + kauth_cred_t ui_ucred; /* holds credentials for NFS paging */ + off_t ui_size; /* file size for the vnode */ + uint32_t ui_flags; /* flags */ + uint32_t cs_add_gen; /* generation count when csblob was validated */ + + struct cl_readahead *cl_rahead; /* cluster read ahead context */ + struct cl_writebehind *cl_wbehind; /* cluster write behind context */ + + struct timespec cs_mtime; /* modify time of file when + * first cs_blob was loaded */ + struct cs_blob *cs_blobs; /* for CODE SIGNING */ #if CHECK_CS_VALIDATION_BITMAP - void *cs_valid_bitmap; /* right now: used only for signed files on the read-only root volume */ - uint64_t cs_valid_bitmap_size; /* Save original bitmap size in case the file size changes. - * In the future, we may want to reconsider changing the - * underlying bitmap to reflect the new file size changes. - */ + void *cs_valid_bitmap; /* right now: used only for signed files on the read-only root volume */ + uint64_t cs_valid_bitmap_size; /* Save original bitmap size in case the file size changes. + * In the future, we may want to reconsider changing the + * underlying bitmap to reflect the new file size changes. + */ #endif /* CHECK_CS_VALIDATION_BITMAP */ }; /* Defines for ui_flags */ -#define UI_NONE 0x00000000 /* none */ -#define UI_HASPAGER 0x00000001 /* has a pager associated */ -#define UI_INITED 0x00000002 /* newly initialized vnode */ -#define UI_HASOBJREF 0x00000004 /* hold a reference on object */ -#define UI_WASMAPPED 0x00000008 /* vnode was mapped */ -#define UI_ISMAPPED 0x00000010 /* vnode is currently mapped */ -#define UI_MAPBUSY 0x00000020 /* vnode is being mapped or unmapped */ -#define UI_MAPWAITING 0x00000040 /* someone waiting for UI_MAPBUSY */ -#define UI_MAPPEDWRITE 0x00000080 /* it's mapped with PROT_WRITE */ +#define UI_NONE 0x00000000 /* none */ +#define UI_HASPAGER 0x00000001 /* has a pager associated */ +#define UI_INITED 0x00000002 /* newly initialized vnode */ +#define UI_HASOBJREF 0x00000004 /* hold a reference on object */ +#define UI_WASMAPPED 0x00000008 /* vnode was mapped */ +#define UI_ISMAPPED 0x00000010 /* vnode is currently mapped */ +#define UI_MAPBUSY 0x00000020 /* vnode is being mapped or unmapped */ +#define UI_MAPWAITING 0x00000040 /* someone waiting for UI_MAPBUSY */ +#define UI_MAPPEDWRITE 0x00000080 /* it's mapped with PROT_WRITE */ /* * exported primitives for loadable file systems. @@ -172,31 +172,31 @@ struct ubc_info { __BEGIN_DECLS __private_extern__ void ubc_init(void); -__private_extern__ int ubc_umount(mount_t mp); -__private_extern__ void ubc_unmountall(void); +__private_extern__ int ubc_umount(mount_t mp); +__private_extern__ void ubc_unmountall(void); __private_extern__ memory_object_t ubc_getpager(vnode_t); -__private_extern__ void ubc_destroy_named(vnode_t); +__private_extern__ void ubc_destroy_named(vnode_t); /* internal only */ -__private_extern__ void cluster_release(struct ubc_info *); +__private_extern__ void cluster_release(struct ubc_info *); __private_extern__ uint32_t cluster_throttle_io_limit(vnode_t, uint32_t *); /* Flags for ubc_getobject() */ -#define UBC_FLAGS_NONE 0x0000 -#define UBC_HOLDOBJECT 0x0001 +#define UBC_FLAGS_NONE 0x0000 +#define UBC_HOLDOBJECT 0x0001 #define UBC_FOR_PAGEOUT 0x0002 memory_object_control_t ubc_getobject(vnode_t, int); -int ubc_info_init(vnode_t); -int ubc_info_init_withsize(vnode_t, off_t); -void ubc_info_deallocate(struct ubc_info *); +int ubc_info_init(vnode_t); +int ubc_info_init_withsize(vnode_t, off_t); +void ubc_info_deallocate(struct ubc_info *); -int ubc_isinuse(vnode_t, int); -int ubc_isinuse_locked(vnode_t, int, int); +int ubc_isinuse(vnode_t, int); +int ubc_isinuse_locked(vnode_t, int, int); -int ubc_getcdhash(vnode_t, off_t, unsigned char *); +int ubc_getcdhash(vnode_t, off_t, unsigned char *); #ifdef XNU_KERNEL_PRIVATE int UBCINFOEXISTS(const struct vnode *); @@ -204,19 +204,18 @@ int UBCINFOEXISTS(const struct vnode *); /* code signing */ struct cs_blob; -int ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t *, vm_size_t, struct image_params *, int, struct cs_blob **); -int ubc_cs_sigpup_add(vnode_t, vm_address_t, vm_size_t); +int ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t *, vm_size_t, struct image_params *, int, struct cs_blob **); +int ubc_cs_sigpup_add(vnode_t, vm_address_t, vm_size_t); struct cs_blob *ubc_get_cs_blobs(vnode_t); -void ubc_get_cs_mtime(vnode_t, struct timespec *); -int ubc_cs_getcdhash(vnode_t, off_t, unsigned char *); +void ubc_get_cs_mtime(vnode_t, struct timespec *); +int ubc_cs_getcdhash(vnode_t, off_t, unsigned char *); kern_return_t ubc_cs_blob_allocate(vm_offset_t *, vm_size_t *); void ubc_cs_blob_deallocate(vm_offset_t, vm_size_t); boolean_t ubc_cs_is_range_codesigned(vnode_t, mach_vm_offset_t, mach_vm_size_t); -kern_return_t ubc_cs_validation_bitmap_allocate( vnode_t ); -void ubc_cs_validation_bitmap_deallocate( vnode_t ); +kern_return_t ubc_cs_validation_bitmap_allocate( vnode_t ); +void ubc_cs_validation_bitmap_deallocate( vnode_t ); __END_DECLS -#endif /* _SYS_UBC_INTERNAL_H_ */ - +#endif /* _SYS_UBC_INTERNAL_H_ */ diff --git a/bsd/sys/ucontext.h b/bsd/sys/ucontext.h index 8240c35bd..06f867686 100644 --- a/bsd/sys/ucontext.h +++ b/bsd/sys/ucontext.h @@ -2,7 +2,7 @@ * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -39,27 +39,27 @@ #include #ifdef KERNEL -#include /* user_addr_t, user_size_t */ +#include /* user_addr_t, user_size_t */ /* kernel representation of struct ucontext64 for 64 bit processes */ typedef struct user_ucontext64 { - int uc_onstack; - sigset_t uc_sigmask; /* signal mask */ - struct user64_sigaltstack uc_stack; /* stack */ - user_addr_t uc_link; /* ucontext pointer */ - user_size_t uc_mcsize; /* mcontext size */ - user_addr_t uc_mcontext64; /* machine context */ + int uc_onstack; + sigset_t uc_sigmask; /* signal mask */ + struct user64_sigaltstack uc_stack; /* stack */ + user_addr_t uc_link; /* ucontext pointer */ + user_size_t uc_mcsize; /* mcontext size */ + user_addr_t uc_mcontext64; /* machine context */ } user_ucontext64_t; typedef struct user_ucontext32 { - int uc_onstack; - sigset_t uc_sigmask; /* signal mask */ - struct user32_sigaltstack uc_stack; /* stack */ - user32_addr_t uc_link; /* ucontext pointer */ - user32_size_t uc_mcsize; /* mcontext size */ - user32_addr_t uc_mcontext; /* machine context */ + int uc_onstack; + sigset_t uc_sigmask; /* signal mask */ + struct user32_sigaltstack uc_stack; /* stack */ + user32_addr_t uc_link; /* ucontext pointer */ + user32_size_t uc_mcsize; /* mcontext size */ + user32_addr_t uc_mcontext; /* machine context */ } user_ucontext32_t; -#endif /* KERNEL */ +#endif /* KERNEL */ #endif /* _SYS_UCONTEXT_H_ */ diff --git a/bsd/sys/ucred.h b/bsd/sys/ucred.h index dae092dac..febbf1aea 100644 --- a/bsd/sys/ucred.h +++ b/bsd/sys/ucred.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */ @@ -68,7 +68,7 @@ */ #ifndef _SYS_UCRED_H_ -#define _SYS_UCRED_H_ +#define _SYS_UCRED_H_ #include #include @@ -87,70 +87,76 @@ struct label; * it or copies of it be exported outside. */ struct ucred { - TAILQ_ENTRY(ucred) cr_link; /* never modify this without KAUTH_CRED_HASH_LOCK */ - u_long cr_ref; /* reference count */ - -struct posix_cred { + LIST_ENTRY(ucred) cr_link; /* never modify this without KAUTH_CRED_HASH_LOCK */ +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) + _Atomic u_long cr_ref; /* reference count */ +#elif defined(__cplusplus) && __cplusplus >= 201103L + _Atomic u_long cr_ref; /* reference count */ +#else + volatile u_long cr_ref; /* reference count */ +#endif + + struct posix_cred { + /* + * The credential hash depends on everything from this point on + * (see kauth_cred_get_hashkey) + */ + uid_t cr_uid; /* effective user id */ + uid_t cr_ruid; /* real user id */ + uid_t cr_svuid; /* saved user id */ + short cr_ngroups; /* number of groups in advisory list */ + gid_t cr_groups[NGROUPS];/* advisory group list */ + gid_t cr_rgid; /* real group id */ + gid_t cr_svgid; /* saved group id */ + uid_t cr_gmuid; /* UID for group membership purposes */ + int cr_flags; /* flags on credential */ + } cr_posix; + struct label *cr_label; /* MAC label */ /* - * The credential hash depends on everything from this point on - * (see kauth_cred_get_hashkey) - */ - uid_t cr_uid; /* effective user id */ - uid_t cr_ruid; /* real user id */ - uid_t cr_svuid; /* saved user id */ - short cr_ngroups; /* number of groups in advisory list */ - gid_t cr_groups[NGROUPS]; /* advisory group list */ - gid_t cr_rgid; /* real group id */ - gid_t cr_svgid; /* saved group id */ - uid_t cr_gmuid; /* UID for group membership purposes */ - int cr_flags; /* flags on credential */ -} cr_posix; - struct label *cr_label; /* MAC label */ - /* * NOTE: If anything else (besides the flags) * added after the label, you must change * kauth_cred_find(). */ - struct au_session cr_audit; /* user auditing data */ + struct au_session cr_audit; /* user auditing data */ }; #ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T +#define _KAUTH_CRED_T typedef struct ucred *kauth_cred_t; typedef struct posix_cred *posix_cred_t; -#endif /* !_KAUTH_CRED_T */ +#endif /* !_KAUTH_CRED_T */ /* * Credential flags that can be set on a credential */ -#define CRF_NOMEMBERD 0x00000001 /* memberd opt out by setgroups() */ -#define CRF_MAC_ENFORCE 0x00000002 /* force entry through MAC Framework */ - /* also forces credential cache miss */ +#define CRF_NOMEMBERD 0x00000001 /* memberd opt out by setgroups() */ +#define CRF_MAC_ENFORCE 0x00000002 /* force entry through MAC Framework */ + /* also forces credential cache miss */ /* * This is the external representation of struct ucred. */ struct xucred { - u_int cr_version; /* structure layout version */ - uid_t cr_uid; /* effective user id */ - short cr_ngroups; /* number of advisory groups */ - gid_t cr_groups[NGROUPS]; /* advisory group list */ + u_int cr_version; /* structure layout version */ + uid_t cr_uid; /* effective user id */ + short cr_ngroups; /* number of advisory groups */ + gid_t cr_groups[NGROUPS]; /* advisory group list */ }; #define XUCRED_VERSION 0 #define cr_gid cr_groups[0] -#define NOCRED ((kauth_cred_t )0) /* no credential available */ -#define FSCRED ((kauth_cred_t )-1) /* filesystem credential */ +#define NOCRED ((kauth_cred_t )0) /* no credential available */ +#define FSCRED ((kauth_cred_t )-1) /* filesystem credential */ -#define IS_VALID_CRED(_cr) ((_cr) != NOCRED && (_cr) != FSCRED) +#define IS_VALID_CRED(_cr) ((_cr) != NOCRED && (_cr) != FSCRED) #ifdef KERNEL #ifdef __APPLE_API_OBSOLETE __BEGIN_DECLS -int crcmp(kauth_cred_t cr1, kauth_cred_t cr2); -int suser(kauth_cred_t cred, u_short *acflag); -int set_security_token(struct proc * p); -int set_security_token_task_internal(struct proc *p, void *task); -void cru2x(kauth_cred_t cr, struct xucred *xcr); +int crcmp(kauth_cred_t cr1, kauth_cred_t cr2); +int suser(kauth_cred_t cred, u_short *acflag); +int set_security_token(struct proc * p); +int set_security_token_task_internal(struct proc *p, void *task); +void cru2x(kauth_cred_t cr, struct xucred *xcr); __END_DECLS #endif /* __APPLE_API_OBSOLETE */ #endif /* KERNEL */ diff --git a/bsd/sys/uio.h b/bsd/sys/uio.h index 84f9f690b..1c848d670 100644 --- a/bsd/sys/uio.h +++ b/bsd/sys/uio.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_UIO_H_ -#define _SYS_UIO_H_ +#define _SYS_UIO_H_ #include #include @@ -103,18 +103,18 @@ enum uio_rw { UIO_READ, UIO_WRITE }; * user / kernel address space type flags. * WARNING - make sure to check when adding flags! Be sure new flags * don't overlap the definitions in uio_internal.h - * NOTES - + * NOTES - * UIO_USERSPACE is equivalent to UIO_USERSPACE32, but UIO_USERSPACE32 * is preferred. UIO_USERSPACE remains for backwards compatibility. * UIO_SYSSPACE is equivalent to UIO_SYSSPACE32, but UIO_SYSSPACE * is preferred. */ enum uio_seg { - UIO_USERSPACE = 0, /* kernel address is virtual, to/from user virtual */ - UIO_SYSSPACE = 2, /* kernel address is virtual, to/from system virtual */ - UIO_USERSPACE32 = 5, /* kernel address is virtual, to/from user 32-bit virtual */ - UIO_USERSPACE64 = 8, /* kernel address is virtual, to/from user 64-bit virtual */ - UIO_SYSSPACE32 = 11 /* deprecated */ + UIO_USERSPACE = 0, /* kernel address is virtual, to/from user virtual */ + UIO_SYSSPACE = 2, /* kernel address is virtual, to/from system virtual */ + UIO_USERSPACE32 = 5, /* kernel address is virtual, to/from user 32-bit virtual */ + UIO_USERSPACE64 = 8, /* kernel address is virtual, to/from user 64-bit virtual */ + UIO_SYSSPACE32 = 11 /* deprecated */ }; #define UIO_SEG_IS_USER_SPACE( a_uio_seg ) \ @@ -126,26 +126,26 @@ __BEGIN_DECLS /* * uio_create - create an uio_t. - * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t + * Space is allocated to hold up to a_iovcount number of iovecs. The uio_t * is not fully initialized until all iovecs are added using uio_addiov calls. * a_iovcount is the maximum number of iovecs you may add. */ -uio_t uio_create( int a_iovcount, /* max number of iovecs */ - off_t a_offset, /* current offset */ - int a_spacetype, /* type of address space */ - int a_iodirection ); /* read or write flag */ +uio_t uio_create( int a_iovcount, /* max number of iovecs */ + off_t a_offset, /* current offset */ + int a_spacetype, /* type of address space */ + int a_iodirection ); /* read or write flag */ /* * uio_reset - reset an uio_t. - * Reset the given uio_t to initial values. The uio_t is not fully initialized - * until all iovecs are added using uio_add_ov calls. - * The a_iovcount value passed in the uio_create is the maximum number of + * Reset the given uio_t to initial values. The uio_t is not fully initialized + * until all iovecs are added using uio_add_ov calls. + * The a_iovcount value passed in the uio_create is the maximum number of * iovecs you may add. */ void uio_reset( uio_t a_uio, - off_t a_offset, /* current offset */ - int a_spacetype, /* type of address space */ - int a_iodirection ); /* read or write flag */ + off_t a_offset, /* current offset */ + int a_spacetype, /* type of address space */ + int a_iodirection ); /* read or write flag */ /* * uio_duplicate - allocate a new uio and make a copy of the given uio_t. @@ -155,13 +155,13 @@ uio_t uio_duplicate( uio_t a_uio ); /* - * uio_free - free a uio_t allocated via uio_create. + * uio_free - free a uio_t allocated via uio_create. */ void uio_free( uio_t a_uio ); /* * uio_addiov - add an iovec to the given uio_t. You may call this up to - * the a_iovcount number that was passed to uio_create. + * the a_iovcount number that was passed to uio_create. * returns 0 if add was successful else non zero. */ int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length ); @@ -170,18 +170,18 @@ int uio_addiov( uio_t a_uio, user_addr_t a_baseaddr, user_size_t a_length ); * uio_getiov - get iovec data associated with the given uio_t. Use * a_index to iterate over each iovec (0 to (uio_iovcnt(uio_t) - 1)). * a_baseaddr_p and a_length_p may be NULL. - * returns -1 when a_index is out of range or invalid uio_t. + * returns -1 when a_index is out of range or invalid uio_t. * returns 0 when data is returned. */ -int uio_getiov( uio_t a_uio, - int a_index, - user_addr_t * a_baseaddr_p, - user_size_t * a_length_p ); +int uio_getiov( uio_t a_uio, + int a_index, + user_addr_t * a_baseaddr_p, + user_size_t * a_length_p ); /* * uio_update - update the given uio_t for a_count of completed IO. * This call adjusts decrements the current iovec length and residual IO, - * and increments the current iovec base address and offset value. + * and increments the current iovec base address and offset value. */ void uio_update( uio_t a_uio, user_size_t a_count ); @@ -221,19 +221,19 @@ int uio_rw( uio_t a_uio ); void uio_setrw( uio_t a_uio, int a_value ); /* - * uio_isuserspace - return non zero value if the address space + * uio_isuserspace - return non zero value if the address space * flag is for a user address space (could be 32 or 64 bit). */ int uio_isuserspace( uio_t a_uio ); /* - * uio_curriovbase - return the base address of the current iovec associated + * uio_curriovbase - return the base address of the current iovec associated * with the given uio_t. May return 0. */ user_addr_t uio_curriovbase( uio_t a_uio ); /* - * uio_curriovlen - return the length value of the current iovec associated + * uio_curriovlen - return the length value of the current iovec associated * with the given uio_t. */ user_size_t uio_curriovlen( uio_t a_uio ); @@ -241,8 +241,8 @@ user_size_t uio_curriovlen( uio_t a_uio ); /* * Limits */ -#define UIO_MAXIOV 1024 /* max 1K of iov's */ -#define UIO_SMALLIOV 8 /* 8 on stack, else malloc */ +#define UIO_MAXIOV 1024 /* max 1K of iov's */ +#define UIO_SMALLIOV 8 /* 8 on stack, else malloc */ extern int uiomove(const char * cp, int n, struct uio *uio); extern int uiomove64(const __uint64_t cp, int n, struct uio *uio); @@ -250,11 +250,11 @@ __END_DECLS #endif /* KERNEL */ -#ifndef KERNEL +#ifndef KERNEL __BEGIN_DECLS -ssize_t readv(int, const struct iovec *, int) __DARWIN_ALIAS_C(readv); -ssize_t writev(int, const struct iovec *, int) __DARWIN_ALIAS_C(writev); +ssize_t readv(int, const struct iovec *, int) __DARWIN_ALIAS_C(readv); +ssize_t writev(int, const struct iovec *, int) __DARWIN_ALIAS_C(writev); __END_DECLS #endif /* !KERNEL */ diff --git a/bsd/sys/uio_internal.h b/bsd/sys/uio_internal.h index e1535ae82..91f00abb2 100644 --- a/bsd/sys/uio_internal.h +++ b/bsd/sys/uio_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_UIO_INTERNAL_H_ -#define _SYS_UIO_INTERNAL_H_ +#define _SYS_UIO_INTERNAL_H_ #include @@ -76,17 +76,17 @@ * WARNING - make sure to check when adding flags! Be sure new flags * don't overlap the definitions in uio.h */ -// UIO_USERSPACE 0 defined in uio.h -#define UIO_USERISPACE 1 +// UIO_USERSPACE 0 defined in uio.h +#define UIO_USERISPACE 1 // UIO_SYSSPACE 2 defined in uio.h -#define UIO_PHYS_USERSPACE 3 -#define UIO_PHYS_SYSSPACE 4 +#define UIO_PHYS_USERSPACE 3 +#define UIO_PHYS_SYSSPACE 4 // UIO_USERSPACE32 5 defined in uio.h -#define UIO_USERISPACE32 6 -#define UIO_PHYS_USERSPACE32 7 +#define UIO_USERISPACE32 6 +#define UIO_PHYS_USERSPACE32 7 // UIO_USERSPACE64 8 defined in uio.h -#define UIO_USERISPACE64 9 -#define UIO_PHYS_USERSPACE64 10 +#define UIO_USERISPACE64 9 +#define UIO_PHYS_USERSPACE64 10 // UIO_SYSSPACE32 11 defined in uio.h // UIO_PHYS_SYSSPACE32 12 reserved, never used. Use UIO_PHYS_SYSSPACE // UIO_SYSSPACE64 13 reserved, never used. Use UIO_SYSSPACE @@ -109,51 +109,51 @@ __private_extern__ void uio_pushback( uio_t a_uio, user_size_t a_count ); /* use kern_iovec for system space requests */ struct kern_iovec { - u_int64_t iov_base; /* Base address. */ - u_int64_t iov_len; /* Length. */ + u_int64_t iov_base; /* Base address. */ + u_int64_t iov_len; /* Length. */ }; /* use user_iovec for user space requests */ struct user_iovec { - user_addr_t iov_base; /* Base address. */ - user_size_t iov_len; /* Length. */ + user_addr_t iov_base; /* Base address. */ + user_size_t iov_len; /* Length. */ }; /* use user32_iovec/user64_iovec for representing * in-memory structures in 32-64 processes during copyin */ struct user32_iovec { - uint32_t iov_base; /* Base address. */ - uint32_t iov_len; /* Length. */ + uint32_t iov_base; /* Base address. */ + uint32_t iov_len; /* Length. */ }; struct user64_iovec { - uint64_t iov_base; /* Base address. */ - uint64_t iov_len; /* Length. */ + uint64_t iov_base; /* Base address. */ + uint64_t iov_len; /* Length. */ }; union iovecs { - struct kern_iovec *kiovp; - struct user_iovec *uiovp; + struct kern_iovec *kiovp; + struct user_iovec *uiovp; }; /* WARNING - use accessor calls for uio_iov and uio_resid since these */ /* fields vary depending on the originating address space. */ struct uio { - union iovecs uio_iovs; /* current iovec */ - int uio_iovcnt; /* active iovecs */ - off_t uio_offset; - enum uio_seg uio_segflg; - enum uio_rw uio_rw; - user_size_t uio_resid_64; - int uio_size; /* size for use with kfree */ - int uio_max_iovs; /* max number of iovecs this uio_t can hold */ - u_int32_t uio_flags; + union iovecs uio_iovs; /* current iovec */ + int uio_iovcnt; /* active iovecs */ + off_t uio_offset; + enum uio_seg uio_segflg; + enum uio_rw uio_rw; + user_size_t uio_resid_64; + int uio_size; /* size for use with kfree */ + int uio_max_iovs; /* max number of iovecs this uio_t can hold */ + u_int32_t uio_flags; }; /* values for uio_flags */ -#define UIO_FLAGS_INITED 0x00000001 -#define UIO_FLAGS_WE_ALLOCED 0x00000002 -#define UIO_FLAGS_IS_COMPRESSED_FILE 0x00000004 +#define UIO_FLAGS_INITED 0x00000001 +#define UIO_FLAGS_WE_ALLOCED 0x00000002 +#define UIO_FLAGS_IS_COMPRESSED_FILE 0x00000004 __END_DECLS @@ -164,7 +164,7 @@ __END_DECLS */ #define UIO_SIZEOF( a_iovcount ) \ ( sizeof(struct uio) + (MAX(sizeof(struct user_iovec), sizeof(struct kern_iovec)) * (a_iovcount)) ) - + #define UIO_IS_USER_SPACE32( a_uio_t ) \ ( (a_uio_t)->uio_segflg == UIO_USERSPACE32 || (a_uio_t)->uio_segflg == UIO_PHYS_USERSPACE32 || \ (a_uio_t)->uio_segflg == UIO_USERISPACE32 ) diff --git a/bsd/sys/ulock.h b/bsd/sys/ulock.h index 5a1b5f62e..bb48d3a72 100644 --- a/bsd/sys/ulock.h +++ b/bsd/sys/ulock.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -58,7 +58,7 @@ ulock_owner_value_to_port_name(uint32_t uval) #ifndef KERNEL extern int __ulock_wait(uint32_t operation, void *addr, uint64_t value, - uint32_t timeout); /* timeout is specified in microseconds */ + uint32_t timeout); /* timeout is specified in microseconds */ extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); #endif /* !KERNEL */ @@ -66,21 +66,21 @@ extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); /* * operation bits [7, 0] contain the operation code */ -#define UL_COMPARE_AND_WAIT 1 -#define UL_UNFAIR_LOCK 2 +#define UL_COMPARE_AND_WAIT 1 +#define UL_UNFAIR_LOCK 2 /* obsolete names */ -#define UL_OSSPINLOCK UL_COMPARE_AND_WAIT -#define UL_HANDOFFLOCK UL_UNFAIR_LOCK +#define UL_OSSPINLOCK UL_COMPARE_AND_WAIT +#define UL_HANDOFFLOCK UL_UNFAIR_LOCK /* These operation code are only implemented in (DEVELOPMENT || DEBUG) kernels */ -#define UL_DEBUG_SIMULATE_COPYIN_FAULT 253 -#define UL_DEBUG_HASH_DUMP_ALL 254 -#define UL_DEBUG_HASH_DUMP_PID 255 +#define UL_DEBUG_SIMULATE_COPYIN_FAULT 253 +#define UL_DEBUG_HASH_DUMP_ALL 254 +#define UL_DEBUG_HASH_DUMP_PID 255 /* * operation bits [15, 8] contain the flags for __ulock_wake */ -#define ULF_WAKE_ALL 0x00000100 -#define ULF_WAKE_THREAD 0x00000200 +#define ULF_WAKE_ALL 0x00000100 +#define ULF_WAKE_THREAD 0x00000200 /* * operation bits [23, 16] contain the flags for __ulock_wait @@ -99,22 +99,22 @@ extern int __ulock_wake(uint32_t operation, void *addr, uint64_t wake_value); /* * operation bits [31, 24] contain the generic flags */ -#define ULF_NO_ERRNO 0x01000000 +#define ULF_NO_ERRNO 0x01000000 /* * masks */ -#define UL_OPCODE_MASK 0x000000FF -#define UL_FLAGS_MASK 0xFFFFFF00 -#define ULF_GENERIC_MASK 0xFFFF0000 +#define UL_OPCODE_MASK 0x000000FF +#define UL_FLAGS_MASK 0xFFFFFF00 +#define ULF_GENERIC_MASK 0xFFFF0000 -#define ULF_WAIT_MASK (ULF_NO_ERRNO | \ - ULF_WAIT_WORKQ_DATA_CONTENTION | \ - ULF_WAIT_CANCEL_POINT) +#define ULF_WAIT_MASK (ULF_NO_ERRNO | \ + ULF_WAIT_WORKQ_DATA_CONTENTION | \ + ULF_WAIT_CANCEL_POINT) -#define ULF_WAKE_MASK (ULF_WAKE_ALL | \ - ULF_WAKE_THREAD | \ - ULF_NO_ERRNO) +#define ULF_WAKE_MASK (ULF_WAKE_ALL | \ + ULF_WAKE_THREAD | \ + ULF_NO_ERRNO) #endif /* PRIVATE */ diff --git a/bsd/sys/un.h b/bsd/sys/un.h index f6c6d592a..6dd075754 100644 --- a/bsd/sys/un.h +++ b/bsd/sys/un.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -73,25 +73,25 @@ /* * [XSI] Definitions for UNIX IPC domain. */ -struct sockaddr_un { - unsigned char sun_len; /* sockaddr len including null */ - sa_family_t sun_family; /* [XSI] AF_UNIX */ - char sun_path[104]; /* [XSI] path name (gag) */ +struct sockaddr_un { + unsigned char sun_len; /* sockaddr len including null */ + sa_family_t sun_family; /* [XSI] AF_UNIX */ + char sun_path[104]; /* [XSI] path name (gag) */ }; #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* Level number of get/setsockopt for local domain sockets */ -#define SOL_LOCAL 0 +#define SOL_LOCAL 0 /* Socket options. */ -#define LOCAL_PEERCRED 0x001 /* retrieve peer credentials */ -#define LOCAL_PEERPID 0x002 /* retrieve peer pid */ -#define LOCAL_PEEREPID 0x003 /* retrieve eff. peer pid */ -#define LOCAL_PEERUUID 0x004 /* retrieve peer UUID */ -#define LOCAL_PEEREUUID 0x005 /* retrieve eff. peer UUID */ +#define LOCAL_PEERCRED 0x001 /* retrieve peer credentials */ +#define LOCAL_PEERPID 0x002 /* retrieve peer pid */ +#define LOCAL_PEEREPID 0x003 /* retrieve eff. peer pid */ +#define LOCAL_PEERUUID 0x004 /* retrieve peer UUID */ +#define LOCAL_PEEREUUID 0x005 /* retrieve eff. peer UUID */ -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #ifdef KERNEL @@ -102,14 +102,14 @@ struct mbuf; struct socket; struct sockopt; -int uipc_usrreq(struct socket *so, int req, struct mbuf *m, - struct mbuf *nam, struct mbuf *control); -int uipc_ctloutput (struct socket *so, struct sockopt *sopt); -int unp_connect2(struct socket *so, struct socket *so2); -void unp_dispose(struct mbuf *m); -int unp_externalize(struct mbuf *rights); -void unp_init(void); -extern struct pr_usrreqs uipc_usrreqs; +int uipc_usrreq(struct socket *so, int req, struct mbuf *m, + struct mbuf *nam, struct mbuf *control); +int uipc_ctloutput(struct socket *so, struct sockopt *sopt); +int unp_connect2(struct socket *so, struct socket *so2); +void unp_dispose(struct mbuf *m); +int unp_externalize(struct mbuf *rights); +void unp_init(void); +extern struct pr_usrreqs uipc_usrreqs; int unp_lock(struct socket *, int, void *); int unp_unlock(struct socket *, int, void *); lck_mtx_t* unp_getlock(struct socket *, int); @@ -121,7 +121,7 @@ __END_DECLS /* actual length of an initialized sockaddr_un */ #define SUN_LEN(su) \ (sizeof(*(su)) - sizeof((su)->sun_path) + strlen((su)->sun_path)) -#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ +#endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ #endif /* KERNEL */ diff --git a/bsd/sys/unistd.h b/bsd/sys/unistd.h index e373f2feb..d20b1cf7c 100644 --- a/bsd/sys/unistd.h +++ b/bsd/sys/unistd.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_UNISTD_H_ -#define _SYS_UNISTD_H_ +#define _SYS_UNISTD_H_ #include @@ -72,24 +72,24 @@ * root. We use the saved IDs in seteuid/setegid, which are not currently * part of the POSIX 1003.1 specification. */ -#ifdef _NOT_AVAILABLE -#define _POSIX_SAVED_IDS /* saved set-user-ID and set-group-ID */ +#ifdef _NOT_AVAILABLE +#define _POSIX_SAVED_IDS /* saved set-user-ID and set-group-ID */ #endif -#define _POSIX_VERSION 200112L -#define _POSIX2_VERSION 200112L +#define _POSIX_VERSION 200112L +#define _POSIX2_VERSION 200112L /* execution-time symbolic constants */ - /* may disable terminal special characters */ +/* may disable terminal special characters */ #include #define _POSIX_THREAD_KEYS_MAX 128 /* access function */ -#define F_OK 0 /* test for existence of file */ -#define X_OK (1<<0) /* test for execute or search permission */ -#define W_OK (1<<1) /* test for write permission */ -#define R_OK (1<<2) /* test for read permission */ +#define F_OK 0 /* test for existence of file */ +#define X_OK (1<<0) /* test for execute or search permission */ +#define W_OK (1<<1) /* test for write permission */ +#define R_OK (1<<2) /* test for read permission */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* @@ -97,25 +97,25 @@ * Note that we depend on these matching the definitions in sys/kauth.h, * but with the bits shifted left by 8. */ -#define _READ_OK (1<<9) /* read file data / read directory */ -#define _WRITE_OK (1<<10) /* write file data / add file to directory */ -#define _EXECUTE_OK (1<<11) /* execute file / search in directory*/ -#define _DELETE_OK (1<<12) /* delete file / delete directory */ -#define _APPEND_OK (1<<13) /* append to file / add subdirectory to directory */ -#define _RMFILE_OK (1<<14) /* - / remove file from directory */ -#define _RATTR_OK (1<<15) /* read basic attributes */ -#define _WATTR_OK (1<<16) /* write basic attributes */ -#define _REXT_OK (1<<17) /* read extended attributes */ -#define _WEXT_OK (1<<18) /* write extended attributes */ -#define _RPERM_OK (1<<19) /* read permissions */ -#define _WPERM_OK (1<<20) /* write permissions */ -#define _CHOWN_OK (1<<21) /* change ownership */ +#define _READ_OK (1<<9) /* read file data / read directory */ +#define _WRITE_OK (1<<10) /* write file data / add file to directory */ +#define _EXECUTE_OK (1<<11) /* execute file / search in directory*/ +#define _DELETE_OK (1<<12) /* delete file / delete directory */ +#define _APPEND_OK (1<<13) /* append to file / add subdirectory to directory */ +#define _RMFILE_OK (1<<14) /* - / remove file from directory */ +#define _RATTR_OK (1<<15) /* read basic attributes */ +#define _WATTR_OK (1<<16) /* write basic attributes */ +#define _REXT_OK (1<<17) /* read extended attributes */ +#define _WEXT_OK (1<<18) /* write extended attributes */ +#define _RPERM_OK (1<<19) /* read permissions */ +#define _WPERM_OK (1<<20) /* write permissions */ +#define _CHOWN_OK (1<<21) /* change ownership */ #define _ACCESS_EXTENDED_MASK (_READ_OK | _WRITE_OK | _EXECUTE_OK | \ - _DELETE_OK | _APPEND_OK | \ - _RMFILE_OK | _REXT_OK | \ - _WEXT_OK | _RATTR_OK | _WATTR_OK | _RPERM_OK | \ - _WPERM_OK | _CHOWN_OK) + _DELETE_OK | _APPEND_OK | \ + _RMFILE_OK | _REXT_OK | \ + _WEXT_OK | _RATTR_OK | _WATTR_OK | _RPERM_OK | \ + _WPERM_OK | _CHOWN_OK) #endif /* whence values for lseek(2) */ @@ -123,9 +123,9 @@ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* whence values for lseek(2); renamed by POSIX 1003.1 */ -#define L_SET SEEK_SET -#define L_INCR SEEK_CUR -#define L_XTND SEEK_END +#define L_SET SEEK_SET +#define L_INCR SEEK_CUR +#define L_XTND SEEK_END #endif #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) @@ -134,45 +134,45 @@ struct accessx_descriptor { int ad_flags; int ad_pad[2]; }; -#define ACCESSX_MAX_DESCRIPTORS 100 -#define ACCESSX_MAX_TABLESIZE (16 * 1024) +#define ACCESSX_MAX_DESCRIPTORS 100 +#define ACCESSX_MAX_TABLESIZE (16 * 1024) #endif /* configurable pathname variables */ -#define _PC_LINK_MAX 1 -#define _PC_MAX_CANON 2 -#define _PC_MAX_INPUT 3 -#define _PC_NAME_MAX 4 -#define _PC_PATH_MAX 5 -#define _PC_PIPE_BUF 6 -#define _PC_CHOWN_RESTRICTED 7 -#define _PC_NO_TRUNC 8 -#define _PC_VDISABLE 9 +#define _PC_LINK_MAX 1 +#define _PC_MAX_CANON 2 +#define _PC_MAX_INPUT 3 +#define _PC_NAME_MAX 4 +#define _PC_PATH_MAX 5 +#define _PC_PIPE_BUF 6 +#define _PC_CHOWN_RESTRICTED 7 +#define _PC_NO_TRUNC 8 +#define _PC_VDISABLE 9 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) -#define _PC_NAME_CHARS_MAX 10 -#define _PC_CASE_SENSITIVE 11 -#define _PC_CASE_PRESERVING 12 +#define _PC_NAME_CHARS_MAX 10 +#define _PC_CASE_SENSITIVE 11 +#define _PC_CASE_PRESERVING 12 #define _PC_EXTENDED_SECURITY_NP 13 #define _PC_AUTH_OPAQUE_NP 14 #endif -#define _PC_2_SYMLINKS 15 /* Symlink supported in directory */ -#define _PC_ALLOC_SIZE_MIN 16 /* Minimum storage actually allocated */ -#define _PC_ASYNC_IO 17 /* Async I/O [AIO] supported? */ -#define _PC_FILESIZEBITS 18 /* # of bits to represent file size */ -#define _PC_PRIO_IO 19 /* Priority I/O [PIO] supported? */ -#define _PC_REC_INCR_XFER_SIZE 20 /* Recommended increment for next two */ -#define _PC_REC_MAX_XFER_SIZE 21 /* Recommended max file transfer size */ -#define _PC_REC_MIN_XFER_SIZE 22 /* Recommended min file transfer size */ -#define _PC_REC_XFER_ALIGN 23 /* Recommended buffer alignment */ -#define _PC_SYMLINK_MAX 24 /* Max # of bytes in symlink name */ -#define _PC_SYNC_IO 25 /* Sync I/O [SIO] supported? */ -#define _PC_XATTR_SIZE_BITS 26 /* # of bits to represent maximum xattr size */ -#define _PC_MIN_HOLE_SIZE 27 /* Recommended minimum hole size for sparse files */ +#define _PC_2_SYMLINKS 15 /* Symlink supported in directory */ +#define _PC_ALLOC_SIZE_MIN 16 /* Minimum storage actually allocated */ +#define _PC_ASYNC_IO 17 /* Async I/O [AIO] supported? */ +#define _PC_FILESIZEBITS 18 /* # of bits to represent file size */ +#define _PC_PRIO_IO 19 /* Priority I/O [PIO] supported? */ +#define _PC_REC_INCR_XFER_SIZE 20 /* Recommended increment for next two */ +#define _PC_REC_MAX_XFER_SIZE 21 /* Recommended max file transfer size */ +#define _PC_REC_MIN_XFER_SIZE 22 /* Recommended min file transfer size */ +#define _PC_REC_XFER_ALIGN 23 /* Recommended buffer alignment */ +#define _PC_SYMLINK_MAX 24 /* Max # of bytes in symlink name */ +#define _PC_SYNC_IO 25 /* Sync I/O [SIO] supported? */ +#define _PC_XATTR_SIZE_BITS 26 /* # of bits to represent maximum xattr size */ +#define _PC_MIN_HOLE_SIZE 27 /* Recommended minimum hole size for sparse files */ /* configurable system strings */ -#define _CS_PATH 1 +#define _CS_PATH 1 #ifndef KERNEL #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL @@ -185,9 +185,9 @@ struct accessx_descriptor { __BEGIN_DECLS -int getattrlistbulk(int, void *, void *, size_t, uint64_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int getattrlistat(int, const char *, void *, void *, size_t, unsigned long) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int setattrlistat(int, const char *, void *, void *, size_t, uint32_t) __OSX_AVAILABLE(10.13) __IOS_AVAILABLE(11.0) __TVOS_AVAILABLE(11.0) __WATCHOS_AVAILABLE(4.0); +int getattrlistbulk(int, void *, void *, size_t, uint64_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int getattrlistat(int, const char *, void *, void *, size_t, unsigned long) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int setattrlistat(int, const char *, void *, void *, size_t, uint32_t) __OSX_AVAILABLE(10.13) __IOS_AVAILABLE(11.0) __TVOS_AVAILABLE(11.0) __WATCHOS_AVAILABLE(4.0); __END_DECLS @@ -205,12 +205,12 @@ __END_DECLS __BEGIN_DECLS -int faccessat(int, const char *, int, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int fchownat(int, const char *, uid_t, gid_t, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int linkat(int, const char *, int, const char *, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -ssize_t readlinkat(int, const char *, char *, size_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int symlinkat(const char *, int, const char *) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); -int unlinkat(int, const char *, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int faccessat(int, const char *, int, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int fchownat(int, const char *, uid_t, gid_t, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int linkat(int, const char *, int, const char *, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +ssize_t readlinkat(int, const char *, char *, size_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int symlinkat(const char *, int, const char *) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int unlinkat(int, const char *, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); __END_DECLS diff --git a/bsd/sys/unpcb.h b/bsd/sys/unpcb.h index 9c66ca69b..b79e41f55 100644 --- a/bsd/sys/unpcb.h +++ b/bsd/sys/unpcb.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -99,19 +99,19 @@ typedef u_quad_t unp_gen_t; #if defined(__LP64__) struct _unpcb_list_entry { - u_int32_t le_next; - u_int32_t le_prev; -}; -#define _UCPCB_LIST_HEAD(name, type) \ -struct name { \ - u_int32_t lh_first; \ + u_int32_t le_next; + u_int32_t le_prev; }; -#define _UNPCB_LIST_ENTRY(x) struct _unpcb_list_entry -#define _UNPCB_PTR(x) u_int32_t -#else -#define _UCPCB_LIST_HEAD(name, type) LIST_HEAD(name, type) -#define _UNPCB_LIST_ENTRY(x) LIST_ENTRY(x) -#define _UNPCB_PTR(x) x +#define _UCPCB_LIST_HEAD(name, type) \ +struct name { \ + u_int32_t lh_first; \ +}; +#define _UNPCB_LIST_ENTRY(x) struct _unpcb_list_entry +#define _UNPCB_PTR(x) u_int32_t +#else +#define _UCPCB_LIST_HEAD(name, type) LIST_HEAD(name, type) +#define _UNPCB_LIST_ENTRY(x) LIST_ENTRY(x) +#define _UNPCB_PTR(x) x #endif #ifdef PRIVATE @@ -121,22 +121,22 @@ _UCPCB_LIST_HEAD(unp_head, unpcb); LIST_HEAD(unp_head, unpcb); #define sotounpcb(so) ((struct unpcb *)((so)->so_pcb)) -struct unpcb { - LIST_ENTRY(unpcb) unp_link; /* glue on list of all PCBs */ - struct socket *unp_socket; /* pointer back to socket */ - struct vnode *unp_vnode; /* if associated with file */ - ino_t unp_ino; /* fake inode number */ - struct unpcb *unp_conn; /* control block of connected socket */ - struct unp_head unp_refs; /* referencing socket linked list */ - LIST_ENTRY(unpcb) unp_reflink; /* link in unp_refs list */ - struct sockaddr_un *unp_addr; /* bound address of socket */ - int unp_cc; /* copy of rcv.sb_cc */ - int unp_mbcnt; /* copy of rcv.sb_mbcnt */ - unp_gen_t unp_gencnt; /* generation count of this instance */ - int unp_flags; /* flags */ - struct xucred unp_peercred; /* peer credentials, if applicable */ - decl_lck_mtx_data( ,unp_mtx); /* per unpcb lock */ - int rw_thrcount; /* disconnect should wait for this count to become zero */ +struct unpcb { + LIST_ENTRY(unpcb) unp_link; /* glue on list of all PCBs */ + struct socket *unp_socket; /* pointer back to socket */ + struct vnode *unp_vnode; /* if associated with file */ + ino_t unp_ino; /* fake inode number */ + struct unpcb *unp_conn; /* control block of connected socket */ + struct unp_head unp_refs; /* referencing socket linked list */ + LIST_ENTRY(unpcb) unp_reflink; /* link in unp_refs list */ + struct sockaddr_un *unp_addr; /* bound address of socket */ + int unp_cc; /* copy of rcv.sb_cc */ + int unp_mbcnt; /* copy of rcv.sb_mbcnt */ + unp_gen_t unp_gencnt; /* generation count of this instance */ + int unp_flags; /* flags */ + struct xucred unp_peercred; /* peer credentials, if applicable */ + decl_lck_mtx_data(, unp_mtx); /* per unpcb lock */ + int rw_thrcount; /* disconnect should wait for this count to become zero */ }; #endif /* KERNEL */ @@ -153,32 +153,32 @@ struct unpcb { * (there may not even be a peer). This is set in unp_listen() when * it fills in unp_peercred for later consumption by unp_connect(). */ -#define UNP_HAVEPC 0x0001 -#define UNP_HAVEPCCACHED 0x0002 -#define UNP_DONTDISCONNECT 0x0004 -#define UNP_TRACE_MDNS 0x1000 +#define UNP_HAVEPC 0x0001 +#define UNP_HAVEPCCACHED 0x0002 +#define UNP_DONTDISCONNECT 0x0004 +#define UNP_TRACE_MDNS 0x1000 #ifdef KERNEL struct unpcb_compat { #else /* KERNEL */ #define unpcb_compat unpcb -struct unpcb { +struct unpcb { #endif /* KERNEL */ - _UNPCB_LIST_ENTRY(unpcb_compat) unp_link; /* glue on list of all PCBs */ - _UNPCB_PTR(struct socket *) unp_socket; /* pointer back to socket */ - _UNPCB_PTR(struct vnode *) unp_vnode; /* if associated with file */ - u_int32_t unp_ino; /* fake inode number */ - _UNPCB_PTR(struct unpcb_compat *) unp_conn; /* control block of connected socket */ + _UNPCB_LIST_ENTRY(unpcb_compat) unp_link; /* glue on list of all PCBs */ + _UNPCB_PTR(struct socket *) unp_socket; /* pointer back to socket */ + _UNPCB_PTR(struct vnode *) unp_vnode; /* if associated with file */ + u_int32_t unp_ino; /* fake inode number */ + _UNPCB_PTR(struct unpcb_compat *) unp_conn; /* control block of connected socket */ #if defined(KERNEL) - u_int32_t unp_refs; + u_int32_t unp_refs; #else - struct unp_head unp_refs; /* referencing socket linked list */ + struct unp_head unp_refs; /* referencing socket linked list */ #endif - _UNPCB_LIST_ENTRY(unpcb_compat) unp_reflink; /* link in unp_refs list */ - _UNPCB_PTR(struct sockaddr_un *) unp_addr; /* bound address of socket */ - int unp_cc; /* copy of rcv.sb_cc */ - int unp_mbcnt; /* copy of rcv.sb_mbcnt */ - unp_gen_t unp_gencnt; /* generation count of this instance */ + _UNPCB_LIST_ENTRY(unpcb_compat) unp_reflink; /* link in unp_refs list */ + _UNPCB_PTR(struct sockaddr_un *) unp_addr; /* bound address of socket */ + int unp_cc; /* copy of rcv.sb_cc */ + int unp_mbcnt; /* copy of rcv.sb_mbcnt */ + unp_gen_t unp_gencnt; /* generation count of this instance */ }; /* Hack alert -- this structure depends on . */ @@ -187,55 +187,55 @@ struct unpcb { #pragma pack(4) struct xunpcb { - u_int32_t xu_len; /* length of this structure */ - _UNPCB_PTR(struct unpcb_compat *) xu_unpp; /* to help netstat, fstat */ - struct unpcb_compat xu_unp; /* our information */ + u_int32_t xu_len; /* length of this structure */ + _UNPCB_PTR(struct unpcb_compat *) xu_unpp; /* to help netstat, fstat */ + struct unpcb_compat xu_unp; /* our information */ union { - struct sockaddr_un xuu_addr; /* our bound address */ - char xu_dummy1[256]; + struct sockaddr_un xuu_addr; /* our bound address */ + char xu_dummy1[256]; } xu_au; #define xu_addr xu_au.xuu_addr union { - struct sockaddr_un xuu_caddr; /* their bound address */ - char xu_dummy2[256]; + struct sockaddr_un xuu_caddr; /* their bound address */ + char xu_dummy2[256]; } xu_cau; #define xu_caddr xu_cau.xuu_caddr - struct xsocket xu_socket; - u_quad_t xu_alignment_hack; + struct xsocket xu_socket; + u_quad_t xu_alignment_hack; }; #if !CONFIG_EMBEDDED struct xunpcb64_list_entry { - u_int64_t le_next; - u_int64_t le_prev; + u_int64_t le_next; + u_int64_t le_prev; }; struct xunpcb64 { - u_int32_t xu_len; /* length of this structure */ - u_int64_t xu_unpp; /* to help netstat, fstat */ - struct xunpcb64_list_entry xunp_link; /* glue on list of all PCBs */ - u_int64_t xunp_socket; /* pointer back to socket */ - u_int64_t xunp_vnode; /* if associated with file */ - u_int64_t xunp_ino; /* fake inode number */ - u_int64_t xunp_conn; /* control block of connected socket */ - u_int64_t xunp_refs; /* referencing socket linked list */ - struct xunpcb64_list_entry xunp_reflink; /* link in unp_refs list */ - int xunp_cc; /* copy of rcv.sb_cc */ - int xunp_mbcnt; /* copy of rcv.sb_mbcnt */ - unp_gen_t xunp_gencnt; /* generation count of this instance */ - int xunp_flags; /* flags */ - union { - struct sockaddr_un xuu_addr; - char xu_dummy1[256]; - } xu_au; /* our bound address */ + u_int32_t xu_len; /* length of this structure */ + u_int64_t xu_unpp; /* to help netstat, fstat */ + struct xunpcb64_list_entry xunp_link; /* glue on list of all PCBs */ + u_int64_t xunp_socket; /* pointer back to socket */ + u_int64_t xunp_vnode; /* if associated with file */ + u_int64_t xunp_ino; /* fake inode number */ + u_int64_t xunp_conn; /* control block of connected socket */ + u_int64_t xunp_refs; /* referencing socket linked list */ + struct xunpcb64_list_entry xunp_reflink; /* link in unp_refs list */ + int xunp_cc; /* copy of rcv.sb_cc */ + int xunp_mbcnt; /* copy of rcv.sb_mbcnt */ + unp_gen_t xunp_gencnt; /* generation count of this instance */ + int xunp_flags; /* flags */ + union { + struct sockaddr_un xuu_addr; + char xu_dummy1[256]; + } xu_au; /* our bound address */ #define xunp_addr xu_au.xuu_addr - union { - struct sockaddr_un xuu_caddr; - char xu_dummy2[256]; - } xu_cau; /* their bound address */ + union { + struct sockaddr_un xuu_caddr; + char xu_dummy2[256]; + } xu_cau; /* their bound address */ #define xunp_caddr xu_cau.xuu_caddr - struct xsocket64 xu_socket; + struct xsocket64 xu_socket; }; #endif /* !CONFIG_EMBEDDED */ @@ -246,11 +246,11 @@ struct xunpcb64 { #endif /* PRIVATE */ -struct xunpgen { - u_int32_t xug_len; - u_int xug_count; - unp_gen_t xug_gen; - so_gen_t xug_sogen; +struct xunpgen { + u_int32_t xug_len; + u_int xug_count; + unp_gen_t xug_gen; + so_gen_t xug_sogen; }; #endif /* _SYS_UNPCB_H_ */ diff --git a/bsd/sys/user.h b/bsd/sys/user.h index 552bacac6..4c79d0d8f 100644 --- a/bsd/sys/user.h +++ b/bsd/sys/user.h @@ -61,8 +61,8 @@ * @(#)user.h 8.2 (Berkeley) 9/23/93 */ -#ifndef _SYS_USER_H_ -#define _SYS_USER_H_ +#ifndef _SYS_USER_H_ +#define _SYS_USER_H_ #include struct waitq_set; @@ -80,7 +80,7 @@ struct waitq_set; #include #include #endif -#include /* XXX */ +#include /* XXX */ #include #ifdef KERNEL @@ -96,17 +96,17 @@ struct waitq_set; * VFS context structure (part of uthread) */ struct vfs_context { - thread_t vc_thread; /* pointer to Mach thread */ - kauth_cred_t vc_ucred; /* per thread credential */ + thread_t vc_thread; /* pointer to Mach thread */ + kauth_cred_t vc_ucred; /* per thread credential */ }; #endif /* !__LP64 || XNU_KERNEL_PRIVATE */ #ifdef BSD_KERNEL_PRIVATE /* XXX Deprecated: xnu source compatability */ -#define uu_ucred uu_context.vc_ucred +#define uu_ucred uu_context.vc_ucred -struct label; /* MAC label dummy struct */ +struct label; /* MAC label dummy struct */ #define MAXTHREADNAMESIZE 64 /* @@ -117,12 +117,12 @@ struct uthread { /* syscall parameters, results and catches */ u_int64_t uu_arg[8]; /* arguments to current system call */ int uu_rval[2]; - char uu_cursig; /* p_cursig for exc. */ + char uu_cursig; /* p_cursig for exc. */ unsigned int syscall_code; /* current syscall code */ /* thread exception handling */ - int uu_exception; - mach_exception_code_t uu_code; /* ``code'' to trap */ + int uu_exception; + mach_exception_code_t uu_code; /* ``code'' to trap */ mach_exception_subcode_t uu_subcode; /* support for syscalls which use continuations */ @@ -206,53 +206,53 @@ struct uthread { /* Persistent memory allocations across system calls */ struct _select { - u_int32_t *ibits, *obits; /* bits to select on */ - uint nbytes; /* number of bytes in ibits and obits */ - } uu_select; /* saved state for select() */ + u_int32_t *ibits, *obits; /* bits to select on */ + uint nbytes; /* number of bytes in ibits and obits */ + } uu_select; /* saved state for select() */ /* internal support for continuation framework */ int (*uu_continuation)(int); int uu_pri; int uu_timo; - caddr_t uu_wchan; /* sleeping thread wait channel */ - const char *uu_wmesg; /* ... wait message */ + caddr_t uu_wchan; /* sleeping thread wait channel */ + const char *uu_wmesg; /* ... wait message */ struct proc *uu_proc; thread_t uu_thread; void * uu_userstate; - struct waitq_set *uu_wqset; /* waitq state cached across select calls */ - size_t uu_wqstate_sz; /* ...size of uu_wqset buffer */ + struct waitq_set *uu_wqset; /* waitq state cached across select calls */ + size_t uu_wqstate_sz; /* ...size of uu_wqset buffer */ int uu_flag; - sigset_t uu_siglist; /* signals pending for the thread */ - sigset_t uu_sigwait; /* sigwait on this thread*/ - sigset_t uu_sigmask; /* signal mask for the thread */ - sigset_t uu_oldmask; /* signal mask saved before sigpause */ - sigset_t uu_vforkmask; /* saved signal mask during vfork */ - struct vfs_context uu_context; /* thread + cred */ + sigset_t uu_siglist; /* signals pending for the thread */ + sigset_t uu_sigwait; /* sigwait on this thread*/ + sigset_t uu_sigmask; /* signal mask for the thread */ + sigset_t uu_oldmask; /* signal mask saved before sigpause */ + sigset_t uu_vforkmask; /* saved signal mask during vfork */ + struct vfs_context uu_context; /* thread + cred */ - TAILQ_ENTRY(uthread) uu_list; /* List of uthreads in proc */ + TAILQ_ENTRY(uthread) uu_list; /* List of uthreads in proc */ - struct kaudit_record *uu_ar; /* audit record */ - struct task* uu_aio_task; /* target task for async io */ + struct kaudit_record *uu_ar; /* audit record */ + struct task* uu_aio_task; /* target task for async io */ - lck_mtx_t *uu_mtx; + lck_mtx_t *uu_mtx; - lck_spin_t uu_rethrottle_lock; /* locks was_rethrottled and is_throttled */ - TAILQ_ENTRY(uthread) uu_throttlelist; /* List of uthreads currently throttled */ - void * uu_throttle_info; /* pointer to throttled I/Os info */ - int uu_on_throttlelist; - int uu_lowpri_window; + lck_spin_t uu_rethrottle_lock; /* locks was_rethrottled and is_throttled */ + TAILQ_ENTRY(uthread) uu_throttlelist; /* List of uthreads currently throttled */ + void * uu_throttle_info; /* pointer to throttled I/Os info */ + int uu_on_throttlelist; + int uu_lowpri_window; /* These boolean fields are protected by different locks */ - bool uu_was_rethrottled; - bool uu_is_throttled; - bool uu_throttle_bc; + bool uu_was_rethrottled; + bool uu_is_throttled; + bool uu_throttle_bc; - u_int32_t uu_network_marks; /* network control flow marks */ + u_int32_t uu_network_marks; /* network control flow marks */ struct kern_sigaltstack uu_sigstk; - vnode_t uu_vreclaims; - vnode_t uu_cdir; /* per thread CWD */ - int uu_dupfd; /* fd in fdesc_open/dupfdopen */ - int uu_defer_reclaims; + vnode_t uu_vreclaims; + vnode_t uu_cdir; /* per thread CWD */ + int uu_dupfd; /* fd in fdesc_open/dupfdopen */ + int uu_defer_reclaims; /* * Bound kqueue request. This field is only cleared by the current thread, @@ -272,18 +272,18 @@ struct uthread { kq_index_t uu_kqueue_override; #ifdef JOE_DEBUG - int uu_iocount; - int uu_vpindex; - void *uu_vps[32]; + int uu_iocount; + int uu_vpindex; + void *uu_vps[32]; void *uu_pcs[32][10]; #endif #if CONFIG_WORKLOOP_DEBUG #define UU_KEVENT_HISTORY_COUNT 32 #define UU_KEVENT_HISTORY_WRITE_ENTRY(uth, ...) ({ \ - struct uthread *__uth = (uth); \ - unsigned int __index = __uth->uu_kevent_index++; \ - __uth->uu_kevent_history[__index % UU_KEVENT_HISTORY_COUNT] = \ - (struct uu_kevent_history)__VA_ARGS__; \ + struct uthread *__uth = (uth); \ + unsigned int __index = __uth->uu_kevent_index++; \ + __uth->uu_kevent_history[__index % UU_KEVENT_HISTORY_COUNT] = \ + (struct uu_kevent_history)__VA_ARGS__; \ }) struct uu_kevent_history { uint64_t uu_kqid; @@ -293,18 +293,18 @@ struct uthread { } uu_kevent_history[UU_KEVENT_HISTORY_COUNT]; unsigned int uu_kevent_index; #endif - int uu_proc_refcount; + int uu_proc_refcount; #if PROC_REF_DEBUG #define NUM_PROC_REFS_TO_TRACK 32 #define PROC_REF_STACK_DEPTH 10 - int uu_pindex; - void * uu_proc_ps[NUM_PROC_REFS_TO_TRACK]; - uintptr_t uu_proc_pcs[NUM_PROC_REFS_TO_TRACK][PROC_REF_STACK_DEPTH]; + int uu_pindex; + void * uu_proc_ps[NUM_PROC_REFS_TO_TRACK]; + uintptr_t uu_proc_pcs[NUM_PROC_REFS_TO_TRACK][PROC_REF_STACK_DEPTH]; #endif #if CONFIG_DTRACE - uint32_t t_dtrace_errno; /* Most recent errno */ - siginfo_t t_dtrace_siginfo; + uint32_t t_dtrace_errno; /* Most recent errno */ + siginfo_t t_dtrace_siginfo; uint64_t t_dtrace_resumepid; /* DTrace's pidresume() pid */ uint8_t t_dtrace_stop; /* indicates a DTrace desired stop */ uint8_t t_dtrace_sig; /* signal sent via DTrace's raise() */ @@ -330,12 +330,12 @@ struct uthread { #define t_dtrace_reg _tdu._tds._t_dtrace_reg #endif - user_addr_t t_dtrace_pc; /* DTrace saved pc from fasttrap */ - user_addr_t t_dtrace_npc; /* DTrace next pc from fasttrap */ - user_addr_t t_dtrace_scrpc; /* DTrace per-thread scratch location */ - user_addr_t t_dtrace_astpc; /* DTrace return sequence location */ + user_addr_t t_dtrace_pc; /* DTrace saved pc from fasttrap */ + user_addr_t t_dtrace_npc; /* DTrace next pc from fasttrap */ + user_addr_t t_dtrace_scrpc; /* DTrace per-thread scratch location */ + user_addr_t t_dtrace_astpc; /* DTrace return sequence location */ - struct dtrace_ptss_page_entry* t_dtrace_scratch; /* scratch space entry */ + struct dtrace_ptss_page_entry* t_dtrace_scratch; /* scratch space entry */ #if __sol64 || defined(__APPLE__) uint64_t t_dtrace_regv; /* DTrace saved reg from fasttrap */ @@ -353,30 +353,30 @@ struct uthread { typedef struct uthread * uthread_t; /* Definition of uu_flag */ -#define UT_SAS_OLDMASK 0x00000001 /* need to restore mask before pause */ -#define UT_NO_SIGMASK 0x00000002 /* exited thread; invalid sigmask */ -#define UT_NOTCANCELPT 0x00000004 /* not a cancelation point */ -#define UT_CANCEL 0x00000008 /* thread marked for cancel */ -#define UT_CANCELED 0x00000010 /* thread cancelled */ +#define UT_SAS_OLDMASK 0x00000001 /* need to restore mask before pause */ +#define UT_NO_SIGMASK 0x00000002 /* exited thread; invalid sigmask */ +#define UT_NOTCANCELPT 0x00000004 /* not a cancelation point */ +#define UT_CANCEL 0x00000008 /* thread marked for cancel */ +#define UT_CANCELED 0x00000010 /* thread cancelled */ #define UT_CANCELDISABLE 0x00000020 /* thread cancel disabled */ -#define UT_ALTSTACK 0x00000040 /* this thread has alt stack for signals */ -#define UT_THROTTLE_IO 0x00000080 /* this thread issues throttle I/O */ -#define UT_PASSIVE_IO 0x00000100 /* this thread issues passive I/O */ -#define UT_PROCEXIT 0x00000200 /* this thread completed the proc exit */ -#define UT_RAGE_VNODES 0x00000400 /* rapid age any vnodes created by this thread */ -#define UT_KERN_RAGE_VNODES 0x00000800 /* rapid age any vnodes created by this thread (kernel set) */ +#define UT_ALTSTACK 0x00000040 /* this thread has alt stack for signals */ +#define UT_THROTTLE_IO 0x00000080 /* this thread issues throttle I/O */ +#define UT_PASSIVE_IO 0x00000100 /* this thread issues passive I/O */ +#define UT_PROCEXIT 0x00000200 /* this thread completed the proc exit */ +#define UT_RAGE_VNODES 0x00000400 /* rapid age any vnodes created by this thread */ +#define UT_KERN_RAGE_VNODES 0x00000800 /* rapid age any vnodes created by this thread (kernel set) */ /* 0x00001000 unused, used to be UT_BACKGROUND_TRAFFIC_MGT */ -#define UT_ATIME_UPDATE 0x00002000 /* don't update atime for files accessed by this thread */ -#define UT_VFORK 0x02000000 /* thread has vfork children */ -#define UT_SETUID 0x04000000 /* thread is settugid() */ -#define UT_WASSETUID 0x08000000 /* thread was settugid() (in vfork) */ -#define UT_VFORKING 0x10000000 /* thread in vfork() syscall */ +#define UT_ATIME_UPDATE 0x00002000 /* don't update atime for files accessed by this thread */ +#define UT_VFORK 0x02000000 /* thread has vfork children */ +#define UT_SETUID 0x04000000 /* thread is settugid() */ +#define UT_WASSETUID 0x08000000 /* thread was settugid() (in vfork) */ +#define UT_VFORKING 0x10000000 /* thread in vfork() syscall */ #endif /* BSD_KERNEL_PRIVATE */ #endif /* __APPLE_API_PRIVATE */ -#endif /* KERNEL */ +#endif /* KERNEL */ /* * Per process structure containing data that isn't needed in core @@ -385,8 +385,8 @@ typedef struct uthread * uthread_t; * in all processes. */ -struct user { +struct user { /* NOT USED ANYMORE */ }; -#endif /* !_SYS_USER_H_ */ +#endif /* !_SYS_USER_H_ */ diff --git a/bsd/sys/utfconv.h b/bsd/sys/utfconv.h index c872fc0b6..7f69dd5ab 100644 --- a/bsd/sys/utfconv.h +++ b/bsd/sys/utfconv.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _SYS_UTFCONV_H_ -#define _SYS_UTFCONV_H_ +#define _SYS_UTFCONV_H_ #include -#include +#include #ifdef KERNEL #ifdef __APPLE_API_UNSTABLE @@ -38,18 +38,18 @@ /* * UTF-8 encode/decode flags */ -#define UTF_REVERSE_ENDIAN 0x0001 /* reverse UCS-2 byte order */ +#define UTF_REVERSE_ENDIAN 0x0001 /* reverse UCS-2 byte order */ #define UTF_NO_NULL_TERM 0x0002 /* do not add null termination */ -#define UTF_DECOMPOSED 0x0004 /* generate fully decomposed UCS-2 */ -#define UTF_PRECOMPOSED 0x0008 /* generate precomposed UCS-2 */ +#define UTF_DECOMPOSED 0x0004 /* generate fully decomposed UCS-2 */ +#define UTF_PRECOMPOSED 0x0008 /* generate precomposed UCS-2 */ #define UTF_ESCAPE_ILLEGAL 0x0010 /* escape illegal UTF-8 */ #define UTF_SFM_CONVERSIONS 0x0020 /* Use SFM mappings for illegal NTFS chars */ #define UTF_BIG_ENDIAN \ - ((BYTE_ORDER == BIG_ENDIAN) ? 0 : UTF_REVERSE_ENDIAN) + ((BYTE_ORDER == BIG_ENDIAN) ? 0 : UTF_REVERSE_ENDIAN) #define UTF_LITTLE_ENDIAN \ - ((BYTE_ORDER == LITTLE_ENDIAN) ? 0 : UTF_REVERSE_ENDIAN) + ((BYTE_ORDER == LITTLE_ENDIAN) ? 0 : UTF_REVERSE_ENDIAN) __BEGIN_DECLS @@ -65,7 +65,7 @@ int unicode_combinable(u_int16_t character); /* * Test for a precomposed character. - * + * * Similar to __CFUniCharIsDecomposableCharacter. */ @@ -93,7 +93,7 @@ int unicode_decomposeable(u_int16_t character); */ size_t utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash, - int flags); + int flags); /* @@ -126,7 +126,7 @@ utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash, */ int utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, - size_t * utf8len, size_t buflen, u_int16_t altslash, int flags); + size_t * utf8len, size_t buflen, u_int16_t altslash, int flags); /* @@ -161,7 +161,7 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, */ int utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, - size_t *ucslen, size_t buflen, u_int16_t altslash, int flags); + size_t *ucslen, size_t buflen, u_int16_t altslash, int flags); /* @@ -190,7 +190,7 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, */ int utf8_normalizestr(const u_int8_t* instr, size_t inlen, u_int8_t* outstr, - size_t *outlen, size_t buflen, int flags); + size_t *outlen, size_t buflen, int flags); /* diff --git a/bsd/sys/utsname.h b/bsd/sys/utsname.h index 5143e5085..b4ae9f4c4 100644 --- a/bsd/sys/utsname.h +++ b/bsd/sys/utsname.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright 1993,1995 NeXT Computer Inc. All Rights Reserved */ @@ -63,24 +63,24 @@ * * @(#)utsname.h 8.1 (Berkeley) 1/4/94 */ - -#ifndef _SYS_UTSNAME_H -#define _SYS_UTSNAME_H + +#ifndef _SYS_UTSNAME_H +#define _SYS_UTSNAME_H #include -#define _SYS_NAMELEN 256 +#define _SYS_NAMELEN 256 -struct utsname { - char sysname[_SYS_NAMELEN]; /* [XSI] Name of OS */ - char nodename[_SYS_NAMELEN]; /* [XSI] Name of this network node */ - char release[_SYS_NAMELEN]; /* [XSI] Release level */ - char version[_SYS_NAMELEN]; /* [XSI] Version level */ - char machine[_SYS_NAMELEN]; /* [XSI] Hardware type */ +struct utsname { + char sysname[_SYS_NAMELEN]; /* [XSI] Name of OS */ + char nodename[_SYS_NAMELEN]; /* [XSI] Name of this network node */ + char release[_SYS_NAMELEN]; /* [XSI] Release level */ + char version[_SYS_NAMELEN]; /* [XSI] Version level */ + char machine[_SYS_NAMELEN]; /* [XSI] Hardware type */ }; __BEGIN_DECLS int uname(struct utsname *); __END_DECLS -#endif /* !_SYS_UTSNAME_H */ +#endif /* !_SYS_UTSNAME_H */ diff --git a/bsd/sys/ux_exception.h b/bsd/sys/ux_exception.h index 99352e29a..16482a7de 100644 --- a/bsd/sys/ux_exception.h +++ b/bsd/sys/ux_exception.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * Copyright (c) 1988 Carnegie-Mellon University @@ -38,18 +38,18 @@ * Codes for Unix software exceptions under EXC_SOFTWARE. */ -#ifndef _SYS_UX_EXCEPTION_H_ +#ifndef _SYS_UX_EXCEPTION_H_ #define _SYS_UX_EXCEPTION_H_ #include #ifdef __APPLE_API_UNSTABLE -#define EXC_UNIX_BAD_SYSCALL 0x10000 /* SIGSYS */ +#define EXC_UNIX_BAD_SYSCALL 0x10000 /* SIGSYS */ -#define EXC_UNIX_BAD_PIPE 0x10001 /* SIGPIPE */ +#define EXC_UNIX_BAD_PIPE 0x10001 /* SIGPIPE */ -#define EXC_UNIX_ABORT 0x10002 /* SIGABRT */ +#define EXC_UNIX_ABORT 0x10002 /* SIGABRT */ #endif /* __APPLE_API_UNSTABLE */ @@ -61,14 +61,13 @@ extern int machine_exception(int exception, mach_exception_code_t code, - mach_exception_subcode_t subcode); + mach_exception_subcode_t subcode); extern kern_return_t handle_ux_exception(thread_t thread, int exception, - mach_exception_code_t code, - mach_exception_subcode_t subcode); + mach_exception_code_t code, + mach_exception_subcode_t subcode); #endif /* XNU_KERNEL_PRIVATE */ -#endif /* _SYS_UX_EXCEPTION_H_ */ - +#endif /* _SYS_UX_EXCEPTION_H_ */ diff --git a/bsd/sys/vadvise.h b/bsd/sys/vadvise.h index 7148b55b7..c35b3046d 100644 --- a/bsd/sys/vadvise.h +++ b/bsd/sys/vadvise.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ -#ifndef _SYS_VADVISE_H_ +#ifndef _SYS_VADVISE_H_ #define _SYS_VADVISE_H_ #include @@ -78,11 +78,11 @@ * VA_SEQL Sequential behaviour expected. * VA_FLUSH Invalidate all page table entries. */ -#define VA_NORM 0 -#define VA_ANOM 1 -#define VA_SEQL 2 -#define VA_FLUSH 3 +#define VA_NORM 0 +#define VA_ANOM 1 +#define VA_SEQL 2 +#define VA_FLUSH 3 #endif /* __APPLE_API_OBSOLETE */ -#endif /* !_SYS_VADVISE_H_ */ +#endif /* !_SYS_VADVISE_H_ */ diff --git a/bsd/sys/vcmd.h b/bsd/sys/vcmd.h index 435299f22..b5ffa78b8 100644 --- a/bsd/sys/vcmd.h +++ b/bsd/sys/vcmd.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,14 +62,14 @@ */ #ifndef _SYS_VCMD_H_ -#define _SYS_VCMD_H_ +#define _SYS_VCMD_H_ #include -#define VPRINT 0100 -#define VPLOT 0200 -#define VPRINTPLOT 0400 +#define VPRINT 0100 +#define VPLOT 0200 +#define VPRINTPLOT 0400 -#define VGETSTATE _IOR('v', 0, int) -#define VSETSTATE _IOW('v', 1, int) +#define VGETSTATE _IOR('v', 0, int) +#define VSETSTATE _IOW('v', 1, int) #endif /* !_SYS_VCMD_H_ */ diff --git a/bsd/sys/vlimit.h b/bsd/sys/vlimit.h index 48348249c..9e8c76b6a 100644 --- a/bsd/sys/vlimit.h +++ b/bsd/sys/vlimit.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -61,22 +61,22 @@ * @(#)vlimit.h 8.1 (Berkeley) 6/2/93 */ -#ifndef _SYS_VLIMIT_H_ +#ifndef _SYS_VLIMIT_H_ #define _SYS_VLIMIT_H_ /* * Limits for u.u_limit[i], per process, inherited. */ -#define LIM_NORAISE 0 /* if <> 0, can't raise limits */ -#define LIM_CPU 1 /* max secs cpu time */ -#define LIM_FSIZE 2 /* max size of file created */ -#define LIM_DATA 3 /* max growth of data space */ -#define LIM_STACK 4 /* max growth of stack */ -#define LIM_CORE 5 /* max size of ``core'' file */ -#define LIM_MAXRSS 6 /* max desired data+stack core usage */ +#define LIM_NORAISE 0 /* if <> 0, can't raise limits */ +#define LIM_CPU 1 /* max secs cpu time */ +#define LIM_FSIZE 2 /* max size of file created */ +#define LIM_DATA 3 /* max growth of data space */ +#define LIM_STACK 4 /* max growth of stack */ +#define LIM_CORE 5 /* max size of ``core'' file */ +#define LIM_MAXRSS 6 /* max desired data+stack core usage */ -#define NLIMITS 6 +#define NLIMITS 6 -#define INFINITY 0x7fffffff +#define INFINITY 0x7fffffff -#endif /* !_SYS_VLIMIT_H_ */ +#endif /* !_SYS_VLIMIT_H_ */ diff --git a/bsd/sys/vm.h b/bsd/sys/vm.h index 752ef89b5..f23c43c88 100644 --- a/bsd/sys/vm.h +++ b/bsd/sys/vm.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ /* HISTORY * 05-Jun-95 Mac Gillon (mgillon) at NeXT - * 4.4 code uses this file to import MACH API + * 4.4 code uses this file to import MACH API */ #ifndef _SYS_VM_H @@ -74,7 +74,7 @@ #ifdef BSD_KERNEL_PRIVATE /* Machine specific config stuff */ -#if defined(KERNEL) && !defined(MACH_USER_API) +#if defined(KERNEL) && !defined(MACH_USER_API) #include #include #include @@ -86,49 +86,49 @@ * Several fields are temporary (text, data stuff). */ struct vmspace { - int vm_refcnt; /* number of references */ - caddr_t vm_shm; /* SYS5 shared memory private data XXX */ + int vm_refcnt; /* number of references */ + caddr_t vm_shm; /* SYS5 shared memory private data XXX */ /* we copy from vm_startcopy to the end of the structure on fork */ #define vm_startcopy vm_rssize - segsz_t vm_rssize; /* current resident set size in pages */ - segsz_t vm_swrss; /* resident set size before last swap */ - segsz_t vm_tsize; /* text size (pages) XXX */ - segsz_t vm_dsize; /* data size (pages) XXX */ - segsz_t vm_ssize; /* stack size (pages) */ - caddr_t vm_taddr; /* user virtual address of text XXX */ - caddr_t vm_daddr; /* user virtual address of data XXX */ - caddr_t vm_maxsaddr; /* user VA at max stack growth */ + segsz_t vm_rssize; /* current resident set size in pages */ + segsz_t vm_swrss; /* resident set size before last swap */ + segsz_t vm_tsize; /* text size (pages) XXX */ + segsz_t vm_dsize; /* data size (pages) XXX */ + segsz_t vm_ssize; /* stack size (pages) */ + caddr_t vm_taddr; /* user virtual address of text XXX */ + caddr_t vm_daddr; /* user virtual address of data XXX */ + caddr_t vm_maxsaddr; /* user VA at max stack growth */ }; #ifdef KERNEL -/* LP64 version of vmspace. all pointers +/* LP64 version of vmspace. all pointers * grow when we're dealing with a 64-bit process. * WARNING - keep in sync with vmspace */ struct user32_vmspace { - int vm_refcnt; /* number of references */ - uint32_t vm_shm; /* SYS5 shared memory private data XXX */ - segsz_t vm_rssize; /* current resident set size in pages */ - segsz_t vm_swrss; /* resident set size before last swap */ - segsz_t vm_tsize; /* text size (pages) XXX */ - segsz_t vm_dsize; /* data size (pages) XXX */ - segsz_t vm_ssize; /* stack size (pages) */ - uint32_t vm_taddr; /* user virtual address of text XXX */ - uint32_t vm_daddr; /* user virtual address of data XXX */ - uint32_t vm_maxsaddr; /* user VA at max stack growth */ + int vm_refcnt; /* number of references */ + uint32_t vm_shm; /* SYS5 shared memory private data XXX */ + segsz_t vm_rssize; /* current resident set size in pages */ + segsz_t vm_swrss; /* resident set size before last swap */ + segsz_t vm_tsize; /* text size (pages) XXX */ + segsz_t vm_dsize; /* data size (pages) XXX */ + segsz_t vm_ssize; /* stack size (pages) */ + uint32_t vm_taddr; /* user virtual address of text XXX */ + uint32_t vm_daddr; /* user virtual address of data XXX */ + uint32_t vm_maxsaddr; /* user VA at max stack growth */ }; struct user_vmspace { - int vm_refcnt; /* number of references */ - user_addr_t vm_shm __attribute((aligned(8))); /* SYS5 shared memory private data XXX */ - segsz_t vm_rssize; /* current resident set size in pages */ - segsz_t vm_swrss; /* resident set size before last swap */ - segsz_t vm_tsize; /* text size (pages) XXX */ - segsz_t vm_dsize; /* data size (pages) XXX */ - segsz_t vm_ssize; /* stack size (pages) */ - user_addr_t vm_taddr __attribute((aligned(8))); /* user virtual address of text XXX */ - user_addr_t vm_daddr; /* user virtual address of data XXX */ - user_addr_t vm_maxsaddr; /* user VA at max stack growth */ + int vm_refcnt; /* number of references */ + user_addr_t vm_shm __attribute((aligned(8))); /* SYS5 shared memory private data XXX */ + segsz_t vm_rssize; /* current resident set size in pages */ + segsz_t vm_swrss; /* resident set size before last swap */ + segsz_t vm_tsize; /* text size (pages) XXX */ + segsz_t vm_dsize; /* data size (pages) XXX */ + segsz_t vm_ssize; /* stack size (pages) */ + user_addr_t vm_taddr __attribute((aligned(8))); /* user virtual address of text XXX */ + user_addr_t vm_daddr; /* user virtual address of data XXX */ + user_addr_t vm_maxsaddr; /* user VA at max stack growth */ }; #endif /* KERNEL */ @@ -143,10 +143,10 @@ struct user_vmspace { /* just to keep kinfo_proc happy */ /* NOTE: Pointer fields are size variant for LP64 */ struct vmspace { - int32_t dummy; - caddr_t dummy2; - int32_t dummy3[5]; - caddr_t dummy4[3]; + int32_t dummy; + caddr_t dummy2; + int32_t dummy3[5]; + caddr_t dummy4[3]; }; #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/sys/vmmeter.h b/bsd/sys/vmmeter.h index f1041c842..3b99464b1 100644 --- a/bsd/sys/vmmeter.h +++ b/bsd/sys/vmmeter.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -62,7 +62,7 @@ */ #ifndef _SYS_VMMETER_H_ -#define _SYS_VMMETER_H_ +#define _SYS_VMMETER_H_ #include @@ -74,69 +74,68 @@ struct vmmeter { /* * General system activity. */ - unsigned int v_swtch; /* context switches */ - unsigned int v_trap; /* calls to trap */ - unsigned int v_syscall; /* calls to syscall() */ - unsigned int v_intr; /* device interrupts */ - unsigned int v_soft; /* software interrupts */ - unsigned int v_faults; /* total faults taken */ + unsigned int v_swtch; /* context switches */ + unsigned int v_trap; /* calls to trap */ + unsigned int v_syscall; /* calls to syscall() */ + unsigned int v_intr; /* device interrupts */ + unsigned int v_soft; /* software interrupts */ + unsigned int v_faults; /* total faults taken */ /* * Virtual memory activity. */ - unsigned int v_lookups; /* object cache lookups */ - unsigned int v_hits; /* object cache hits */ - unsigned int v_vm_faults; /* number of address memory faults */ - unsigned int v_cow_faults; /* number of copy-on-writes */ - unsigned int v_swpin; /* swapins */ - unsigned int v_swpout; /* swapouts */ - unsigned int v_pswpin; /* pages swapped in */ - unsigned int v_pswpout; /* pages swapped out */ - unsigned int v_pageins; /* number of pageins */ - unsigned int v_pageouts; /* number of pageouts */ - unsigned int v_pgpgin; /* pages paged in */ - unsigned int v_pgpgout; /* pages paged out */ - unsigned int v_intrans; /* intransit blocking page faults */ - unsigned int v_reactivated; /* number of pages reactivated from free list */ - unsigned int v_rev; /* revolutions of the hand */ - unsigned int v_scan; /* scans in page out daemon */ - unsigned int v_dfree; /* pages freed by daemon */ - unsigned int v_pfree; /* pages freed by exiting processes */ - unsigned int v_zfod; /* pages zero filled on demand */ - unsigned int v_nzfod; /* number of zfod's created */ + unsigned int v_lookups; /* object cache lookups */ + unsigned int v_hits; /* object cache hits */ + unsigned int v_vm_faults; /* number of address memory faults */ + unsigned int v_cow_faults; /* number of copy-on-writes */ + unsigned int v_swpin; /* swapins */ + unsigned int v_swpout; /* swapouts */ + unsigned int v_pswpin; /* pages swapped in */ + unsigned int v_pswpout; /* pages swapped out */ + unsigned int v_pageins; /* number of pageins */ + unsigned int v_pageouts; /* number of pageouts */ + unsigned int v_pgpgin; /* pages paged in */ + unsigned int v_pgpgout; /* pages paged out */ + unsigned int v_intrans; /* intransit blocking page faults */ + unsigned int v_reactivated; /* number of pages reactivated from free list */ + unsigned int v_rev; /* revolutions of the hand */ + unsigned int v_scan; /* scans in page out daemon */ + unsigned int v_dfree; /* pages freed by daemon */ + unsigned int v_pfree; /* pages freed by exiting processes */ + unsigned int v_zfod; /* pages zero filled on demand */ + unsigned int v_nzfod; /* number of zfod's created */ /* * Distribution of page usages. */ - unsigned int v_page_size; /* page size in bytes */ - unsigned int v_kernel_pages; /* number of pages in use by kernel */ - unsigned int v_free_target; /* number of pages desired free */ - unsigned int v_free_min; /* minimum number of pages desired free */ - unsigned int v_free_count; /* number of pages free */ - unsigned int v_wire_count; /* number of pages wired down */ - unsigned int v_active_count; /* number of pages active */ + unsigned int v_page_size; /* page size in bytes */ + unsigned int v_kernel_pages; /* number of pages in use by kernel */ + unsigned int v_free_target; /* number of pages desired free */ + unsigned int v_free_min; /* minimum number of pages desired free */ + unsigned int v_free_count; /* number of pages free */ + unsigned int v_wire_count; /* number of pages wired down */ + unsigned int v_active_count; /* number of pages active */ unsigned int v_inactive_target; /* number of pages desired inactive */ unsigned int v_inactive_count; /* number of pages inactive */ }; /* systemwide totals computed every five seconds */ -struct vmtotal -{ - int16_t t_rq; /* length of the run queue */ - int16_t t_dw; /* jobs in ``disk wait'' (neg priority) */ - int16_t t_pw; /* jobs in page wait */ - int16_t t_sl; /* jobs sleeping in core */ - int16_t t_sw; /* swapped out runnable/short block jobs */ - int32_t t_vm; /* total virtual memory */ - int32_t t_avm; /* active virtual memory */ - int32_t t_rm; /* total real memory in use */ - int32_t t_arm; /* active real memory */ - int32_t t_vmshr; /* shared virtual memory */ - int32_t t_avmshr; /* active shared virtual memory */ - int32_t t_rmshr; /* shared real memory */ - int32_t t_armshr; /* active shared real memory */ - int32_t t_free; /* free memory pages */ +struct vmtotal { + int16_t t_rq; /* length of the run queue */ + int16_t t_dw; /* jobs in ``disk wait'' (neg priority) */ + int16_t t_pw; /* jobs in page wait */ + int16_t t_sl; /* jobs sleeping in core */ + int16_t t_sw; /* swapped out runnable/short block jobs */ + int32_t t_vm; /* total virtual memory */ + int32_t t_avm; /* active virtual memory */ + int32_t t_rm; /* total real memory in use */ + int32_t t_arm; /* active real memory */ + int32_t t_vmshr; /* shared virtual memory */ + int32_t t_avmshr; /* active shared virtual memory */ + int32_t t_rmshr; /* shared real memory */ + int32_t t_armshr; /* active shared real memory */ + int32_t t_free; /* free memory pages */ }; #ifdef KERNEL -extern struct vmtotal total; +extern struct vmtotal total; #endif #endif /*__APPLE_API_OBSOLETE */ diff --git a/bsd/sys/vmparam.h b/bsd/sys/vmparam.h index 19f781a9a..81bfe320e 100644 --- a/bsd/sys/vmparam.h +++ b/bsd/sys/vmparam.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * Copyright (c) 1988 Carnegie-Mellon University @@ -36,7 +36,7 @@ * HISTORY */ -#ifndef _SYS_VMPARAM_H_ +#ifndef _SYS_VMPARAM_H_ #define _SYS_VMPARAM_H_ /* @@ -45,4 +45,4 @@ #include -#endif /* _SYS_VMPARAM_H_ */ +#endif /* _SYS_VMPARAM_H_ */ diff --git a/bsd/sys/vnioctl.h b/bsd/sys/vnioctl.h index 12e1b4741..88e33ce75 100644 --- a/bsd/sys/vnioctl.h +++ b/bsd/sys/vnioctl.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -80,30 +80,30 @@ * Ioctl definitions for file (vnode) disk pseudo-device. */ -#define _PATH_VNTAB "/etc/vntab" /* default config file */ +#define _PATH_VNTAB "/etc/vntab" /* default config file */ typedef enum { vncontrol_readwrite_io_e = 0 } vncontrol_t; struct vn_ioctl { - char * vn_file; /* pathname of file to mount */ - int vn_size; /* (returned) size of disk */ - vncontrol_t vn_control; + char * vn_file; /* pathname of file to mount */ + int vn_size; /* (returned) size of disk */ + vncontrol_t vn_control; }; #ifdef KERNEL_PRIVATE struct vn_ioctl_64 { - u_int64_t vn_file; /* pathname of file to mount */ - int vn_size; /* (returned) size of disk */ - vncontrol_t vn_control; + u_int64_t vn_file; /* pathname of file to mount */ + int vn_size; /* (returned) size of disk */ + vncontrol_t vn_control; }; struct vn_ioctl_32 { - u_int32_t vn_file; /* pathname of file to mount */ - int vn_size; /* (returned) size of disk */ - vncontrol_t vn_control; + u_int32_t vn_file; /* pathname of file to mount */ + int vn_size; /* (returned) size of disk */ + vncontrol_t vn_control; }; #endif /* KERNEL_PRIVATE */ @@ -114,30 +114,30 @@ struct vn_ioctl_32 { * an VNIOCCLR must be used to reset a configuration. An attempt to * VNIOCSET an already active unit will return EBUSY. */ -#define VNIOCATTACH _IOWR('F', 0, struct vn_ioctl) /* attach file */ -#define VNIOCDETACH _IOWR('F', 1, struct vn_ioctl) /* detach disk */ -#define VNIOCGSET _IOWR('F', 2, u_int32_t ) /* set global option */ -#define VNIOCGCLEAR _IOWR('F', 3, u_int32_t ) /* reset --//-- */ -#define VNIOCUSET _IOWR('F', 4, u_int32_t ) /* set unit option */ -#define VNIOCUCLEAR _IOWR('F', 5, u_int32_t ) /* reset --//-- */ -#define VNIOCSHADOW _IOWR('F', 6, struct vn_ioctl) /* attach shadow */ +#define VNIOCATTACH _IOWR('F', 0, struct vn_ioctl) /* attach file */ +#define VNIOCDETACH _IOWR('F', 1, struct vn_ioctl) /* detach disk */ +#define VNIOCGSET _IOWR('F', 2, u_int32_t ) /* set global option */ +#define VNIOCGCLEAR _IOWR('F', 3, u_int32_t ) /* reset --//-- */ +#define VNIOCUSET _IOWR('F', 4, u_int32_t ) /* set unit option */ +#define VNIOCUCLEAR _IOWR('F', 5, u_int32_t ) /* reset --//-- */ +#define VNIOCSHADOW _IOWR('F', 6, struct vn_ioctl) /* attach shadow */ #ifdef KERNEL_PRIVATE -#define VNIOCATTACH64 _IOWR('F', 0, struct vn_ioctl_64) /* attach file - LP64 */ -#define VNIOCDETACH64 _IOWR('F', 1, struct vn_ioctl_64) /* detach disk - LP64 */ -#define VNIOCSHADOW64 _IOWR('F', 6, struct vn_ioctl_64) /* attach shadow - LP64 */ +#define VNIOCATTACH64 _IOWR('F', 0, struct vn_ioctl_64) /* attach file - LP64 */ +#define VNIOCDETACH64 _IOWR('F', 1, struct vn_ioctl_64) /* detach disk - LP64 */ +#define VNIOCSHADOW64 _IOWR('F', 6, struct vn_ioctl_64) /* attach shadow - LP64 */ #ifdef __LP64__ -#define VNIOCATTACH32 _IOWR('F', 0, struct vn_ioctl_32) /* attach file - U32 version for K64 */ -#define VNIOCDETACH32 _IOWR('F', 1, struct vn_ioctl_32) /* detach disk - U32 version for K64 */ -#define VNIOCSHADOW32 _IOWR('F', 6, struct vn_ioctl_32) /* attach shadow - U32 version for K64 */ +#define VNIOCATTACH32 _IOWR('F', 0, struct vn_ioctl_32) /* attach file - U32 version for K64 */ +#define VNIOCDETACH32 _IOWR('F', 1, struct vn_ioctl_32) /* detach disk - U32 version for K64 */ +#define VNIOCSHADOW32 _IOWR('F', 6, struct vn_ioctl_32) /* attach shadow - U32 version for K64 */ #endif #endif /* KERNEL_PRIVATE */ -#define VN_LABELS 0x1 /* Use disk(/slice) labels */ -#define VN_FOLLOW 0x2 /* Debug flow in vn driver */ -#define VN_DEBUG 0x4 /* Debug data in vn driver */ -#define VN_IO 0x8 /* Debug I/O in vn driver */ -#define VN_DONTCLUSTER 0x10 /* Don't cluster */ -#define VN_RESERVE 0x20 /* Pre-reserve swap */ +#define VN_LABELS 0x1 /* Use disk(/slice) labels */ +#define VN_FOLLOW 0x2 /* Debug flow in vn driver */ +#define VN_DEBUG 0x4 /* Debug data in vn driver */ +#define VN_IO 0x8 /* Debug I/O in vn driver */ +#define VN_DONTCLUSTER 0x10 /* Don't cluster */ +#define VN_RESERVE 0x20 /* Pre-reserve swap */ -#endif /* _SYS_VNIOCTL_H_*/ +#endif /* _SYS_VNIOCTL_H_*/ diff --git a/bsd/sys/vnode.h b/bsd/sys/vnode.h index b7aa1efe8..a21365bfe 100644 --- a/bsd/sys/vnode.h +++ b/bsd/sys/vnode.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -60,7 +60,7 @@ * * @(#)vnode.h 8.17 (Berkeley) 5/20/95 */ - + #ifndef _VNODE_H_ #define _VNODE_H_ @@ -81,13 +81,13 @@ /* * Vnode types. VNON means no type. */ -enum vtype { +enum vtype { /* 0 */ - VNON, + VNON, /* 1 - 5 */ - VREG, VDIR, VBLK, VCHR, VLNK, + VREG, VDIR, VBLK, VCHR, VLNK, /* 6 - 10 */ - VSOCK, VFIFO, VBAD, VSTR, VCPLX + VSOCK, VFIFO, VBAD, VSTR, VCPLX }; /* @@ -95,7 +95,7 @@ enum vtype { * These are for the benefit of external programs only (e.g., pstat) * and should NEVER be inspected by the kernel. */ -enum vtagtype { +enum vtagtype { /* 0 */ VT_NON, /* 1 reserved, overlaps with (CTL_VFS, VFS_NUMMNTOPS) */ @@ -103,11 +103,11 @@ enum vtagtype { /* 2 - 5 */ VT_NFS, VT_MFS, VT_MSDOSFS, VT_LFS, /* 6 - 10 */ - VT_LOFS, VT_FDESC, VT_PORTAL, VT_NULL, VT_UMAP, + VT_LOFS, VT_FDESC, VT_PORTAL, VT_NULL, VT_UMAP, /* 11 - 15 */ VT_KERNFS, VT_PROCFS, VT_AFS, VT_ISOFS, VT_MOCKFS, /* 16 - 20 */ - VT_HFS, VT_ZFS, VT_DEVFS, VT_WEBDAV, VT_UDF, + VT_HFS, VT_ZFS, VT_DEVFS, VT_WEBDAV, VT_UDF, /* 21 - 25 */ VT_AFP, VT_CDDA, VT_CIFS, VT_OTHER, VT_APFS }; @@ -115,25 +115,25 @@ enum vtagtype { /* * flags for VNOP_BLOCKMAP */ -#define VNODE_READ 0x01 -#define VNODE_WRITE 0x02 +#define VNODE_READ 0x01 +#define VNODE_WRITE 0x02 #define VNODE_BLOCKMAP_NO_TRACK 0x04 // APFS Fusion: Do not track this request /* flags for VNOP_ALLOCATE */ -#define PREALLOCATE 0x00000001 /* preallocate allocation blocks */ -#define ALLOCATECONTIG 0x00000002 /* allocate contigious space */ -#define ALLOCATEALL 0x00000004 /* allocate all requested space */ - /* or no space at all */ -#define FREEREMAINDER 0x00000008 /* deallocate allocated but */ - /* unfilled blocks */ -#define ALLOCATEFROMPEOF 0x00000010 /* allocate from the physical eof */ -#define ALLOCATEFROMVOL 0x00000020 /* allocate from the volume offset */ +#define PREALLOCATE 0x00000001 /* preallocate allocation blocks */ +#define ALLOCATECONTIG 0x00000002 /* allocate contigious space */ +#define ALLOCATEALL 0x00000004 /* allocate all requested space */ +/* or no space at all */ +#define FREEREMAINDER 0x00000008 /* deallocate allocated but */ +/* unfilled blocks */ +#define ALLOCATEFROMPEOF 0x00000010 /* allocate from the physical eof */ +#define ALLOCATEFROMVOL 0x00000020 /* allocate from the volume offset */ /* * Token indicating no attribute value yet assigned. some user source uses this */ -#define VNOVAL (-1) +#define VNOVAL (-1) #ifdef KERNEL @@ -141,34 +141,34 @@ enum vtagtype { /* * Flags for ioflag. */ -#define IO_UNIT 0x0001 /* do I/O as atomic unit */ -#define IO_APPEND 0x0002 /* append write to end */ -#define IO_SYNC 0x0004 /* do I/O synchronously */ -#define IO_NODELOCKED 0x0008 /* underlying node already locked */ -#define IO_NDELAY 0x0010 /* FNDELAY flag set in file table */ -#define IO_NOZEROFILL 0x0020 /* F_SETSIZE fcntl uses to prevent zero filling */ +#define IO_UNIT 0x0001 /* do I/O as atomic unit */ +#define IO_APPEND 0x0002 /* append write to end */ +#define IO_SYNC 0x0004 /* do I/O synchronously */ +#define IO_NODELOCKED 0x0008 /* underlying node already locked */ +#define IO_NDELAY 0x0010 /* FNDELAY flag set in file table */ +#define IO_NOZEROFILL 0x0020 /* F_SETSIZE fcntl uses to prevent zero filling */ #ifdef XNU_KERNEL_PRIVATE -#define IO_REVOKE IO_NOZEROFILL /* revoked close for tty, will Not be used in conjunction */ +#define IO_REVOKE IO_NOZEROFILL /* revoked close for tty, will Not be used in conjunction */ #endif /* XNU_KERNEL_PRIVATE */ -#define IO_TAILZEROFILL 0x0040 /* zero fills at the tail of write */ -#define IO_HEADZEROFILL 0x0080 /* zero fills at the head of write */ -#define IO_NOZEROVALID 0x0100 /* do not zero fill if valid page */ -#define IO_NOZERODIRTY 0x0200 /* do not zero fill if page is dirty */ -#define IO_CLOSE 0x0400 /* I/O issued from close path */ -#define IO_NOCACHE 0x0800 /* same effect as VNOCACHE_DATA, but only for this 1 I/O */ -#define IO_RAOFF 0x1000 /* same effect as VRAOFF, but only for this 1 I/O */ -#define IO_DEFWRITE 0x2000 /* defer write if vfs.defwrite is set */ -#define IO_PASSIVE 0x4000 /* this I/O is marked as background I/O so it won't throttle Throttleable I/O */ +#define IO_TAILZEROFILL 0x0040 /* zero fills at the tail of write */ +#define IO_HEADZEROFILL 0x0080 /* zero fills at the head of write */ +#define IO_NOZEROVALID 0x0100 /* do not zero fill if valid page */ +#define IO_NOZERODIRTY 0x0200 /* do not zero fill if page is dirty */ +#define IO_CLOSE 0x0400 /* I/O issued from close path */ +#define IO_NOCACHE 0x0800 /* same effect as VNOCACHE_DATA, but only for this 1 I/O */ +#define IO_RAOFF 0x1000 /* same effect as VRAOFF, but only for this 1 I/O */ +#define IO_DEFWRITE 0x2000 /* defer write if vfs.defwrite is set */ +#define IO_PASSIVE 0x4000 /* this I/O is marked as background I/O so it won't throttle Throttleable I/O */ #define IO_BACKGROUND IO_PASSIVE /* used for backward compatibility. to be removed after IO_BACKGROUND is no longer - * used by DiskImages in-kernel mode */ -#define IO_NOAUTH 0x8000 /* No authorization checks. */ -#define IO_NODIRECT 0x10000 /* don't use direct synchronous writes if IO_NOCACHE is specified */ -#define IO_ENCRYPTED 0x20000 /* Retrieve encrypted blocks from the filesystem */ -#define IO_RETURN_ON_THROTTLE 0x40000 -#define IO_SINGLE_WRITER 0x80000 -#define IO_SYSCALL_DISPATCH 0x100000 /* I/O was originated from a file table syscall */ -#define IO_SWAP_DISPATCH 0x200000 /* I/O was originated from the swap layer */ -#define IO_SKIP_ENCRYPTION 0x400000 /* Skips en(de)cryption on the IO. Must be initiated from kernel */ + * used by DiskImages in-kernel mode */ +#define IO_NOAUTH 0x8000 /* No authorization checks. */ +#define IO_NODIRECT 0x10000 /* don't use direct synchronous writes if IO_NOCACHE is specified */ +#define IO_ENCRYPTED 0x20000 /* Retrieve encrypted blocks from the filesystem */ +#define IO_RETURN_ON_THROTTLE 0x40000 +#define IO_SINGLE_WRITER 0x80000 +#define IO_SYSCALL_DISPATCH 0x100000 /* I/O was originated from a file table syscall */ +#define IO_SWAP_DISPATCH 0x200000 /* I/O was originated from the swap layer */ +#define IO_SKIP_ENCRYPTION 0x400000 /* Skips en(de)cryption on the IO. Must be initiated from kernel */ #define IO_EVTONLY 0x800000 /* the i/o is being done on an fd that's marked O_EVTONLY */ /* @@ -179,81 +179,81 @@ struct componentname { /* * Arguments to lookup. */ - uint32_t cn_nameiop; /* lookup operation */ - uint32_t cn_flags; /* flags (see below) */ + uint32_t cn_nameiop; /* lookup operation */ + uint32_t cn_flags; /* flags (see below) */ #ifdef BSD_KERNEL_PRIVATE - vfs_context_t cn_context; - struct nameidata *cn_ndp; /* pointer back to nameidata */ + vfs_context_t cn_context; + struct nameidata *cn_ndp; /* pointer back to nameidata */ /* XXX use of these defines are deprecated */ -#define cn_proc (cn_context->vc_proc + 0) /* non-lvalue */ -#define cn_cred (cn_context->vc_ucred + 0) /* non-lvalue */ +#define cn_proc (cn_context->vc_proc + 0) /* non-lvalue */ +#define cn_cred (cn_context->vc_ucred + 0) /* non-lvalue */ #else - void * cn_reserved1; /* use vfs_context_t */ - void * cn_reserved2; /* use vfs_context_t */ + void * cn_reserved1; /* use vfs_context_t */ + void * cn_reserved2; /* use vfs_context_t */ #endif /* * Shared between lookup and commit routines. */ - char *cn_pnbuf; /* pathname buffer */ - int cn_pnlen; /* length of allocated buffer */ - char *cn_nameptr; /* pointer to looked up name */ - int cn_namelen; /* length of looked up component */ - uint32_t cn_hash; /* hash value of looked up name */ - uint32_t cn_consume; /* chars to consume in lookup() */ + char *cn_pnbuf; /* pathname buffer */ + int cn_pnlen; /* length of allocated buffer */ + char *cn_nameptr; /* pointer to looked up name */ + int cn_namelen; /* length of looked up component */ + uint32_t cn_hash; /* hash value of looked up name */ + uint32_t cn_consume; /* chars to consume in lookup() */ }; /* * component name operations (for VNOP_LOOKUP) */ -#define LOOKUP 0 /* perform name lookup only */ -#define CREATE 1 /* setup for file creation */ -#define DELETE 2 /* setup for file deletion */ -#define RENAME 3 /* setup for file renaming */ -#define OPMASK 3 /* mask for operation */ +#define LOOKUP 0 /* perform name lookup only */ +#define CREATE 1 /* setup for file creation */ +#define DELETE 2 /* setup for file deletion */ +#define RENAME 3 /* setup for file renaming */ +#define OPMASK 3 /* mask for operation */ /* * component name operational modifier flags */ -#define FOLLOW 0x00000040 /* follow symbolic links */ +#define FOLLOW 0x00000040 /* follow symbolic links */ /* * component name parameter descriptors. */ -#define ISDOTDOT 0x00002000 /* current component name is .. */ -#define MAKEENTRY 0x00004000 /* entry is to be added to name cache */ -#define ISLASTCN 0x00008000 /* this is last component of pathname */ +#define ISDOTDOT 0x00002000 /* current component name is .. */ +#define MAKEENTRY 0x00004000 /* entry is to be added to name cache */ +#define ISLASTCN 0x00008000 /* this is last component of pathname */ /* The following structure specifies a vnode for creation */ struct vnode_fsparam { - struct mount * vnfs_mp; /* mount point to which this vnode_t is part of */ - enum vtype vnfs_vtype; /* vnode type */ - const char * vnfs_str; /* File system Debug aid */ - struct vnode * vnfs_dvp; /* The parent vnode */ - void * vnfs_fsnode; /* inode */ - int (**vnfs_vops)(void *); /* vnode dispatch table */ - int vnfs_markroot; /* is this a root vnode in FS (not a system wide one) */ - int vnfs_marksystem; /* is a system vnode */ - dev_t vnfs_rdev; /* dev_t for block or char vnodes */ - off_t vnfs_filesize; /* that way no need for getattr in UBC */ + struct mount * vnfs_mp; /* mount point to which this vnode_t is part of */ + enum vtype vnfs_vtype; /* vnode type */ + const char * vnfs_str; /* File system Debug aid */ + struct vnode * vnfs_dvp; /* The parent vnode */ + void * vnfs_fsnode; /* inode */ + int(**vnfs_vops)(void *); /* vnode dispatch table */ + int vnfs_markroot; /* is this a root vnode in FS (not a system wide one) */ + int vnfs_marksystem; /* is a system vnode */ + dev_t vnfs_rdev; /* dev_t for block or char vnodes */ + off_t vnfs_filesize; /* that way no need for getattr in UBC */ struct componentname * vnfs_cnp; /* component name to add to namecache */ - uint32_t vnfs_flags; /* flags */ + uint32_t vnfs_flags; /* flags */ }; -#define VNFS_NOCACHE 0x01 /* do not add to name cache at this time */ -#define VNFS_CANTCACHE 0x02 /* never add this instance to the name cache */ -#define VNFS_ADDFSREF 0x04 /* take fs (named) reference */ +#define VNFS_NOCACHE 0x01 /* do not add to name cache at this time */ +#define VNFS_CANTCACHE 0x02 /* never add this instance to the name cache */ +#define VNFS_ADDFSREF 0x04 /* take fs (named) reference */ -#define VNCREATE_FLAVOR 0 +#define VNCREATE_FLAVOR 0 #define VCREATESIZE sizeof(struct vnode_fsparam) #ifdef KERNEL_PRIVATE /* * For use with SPI to create trigger vnodes. */ struct vnode_trigger_param; -#define VNCREATE_TRIGGER (('T' << 8) + ('V')) -#define VNCREATE_TRIGGER_SIZE sizeof(struct vnode_trigger_param) +#define VNCREATE_TRIGGER (('T' << 8) + ('V')) +#define VNCREATE_TRIGGER_SIZE sizeof(struct vnode_trigger_param) #endif /* KERNEL_PRIVATE */ @@ -265,10 +265,10 @@ struct vnode_trigger_param; */ /*! - @enum Pathname Lookup Operations - @abstract Constants defining pathname operations (passed to resolver callbacks) + * @enum Pathname Lookup Operations + * @abstract Constants defining pathname operations (passed to resolver callbacks) */ -enum path_operation { +enum path_operation { OP_LOOKUP, OP_MOUNT, OP_UNMOUNT, @@ -299,17 +299,17 @@ enum path_operation { OP_SETXATTR, OP_REMOVEXATTR, OP_LISTXATTR, - OP_MAXOP /* anything beyond previous entry is invalid */ + OP_MAXOP /* anything beyond previous entry is invalid */ }; /*! - @enum resolver status - @abstract Constants defining resolver status - @constant RESOLVER_RESOLVED the resolver has finished (typically means a successful mount) - @constant RESOLVER_NOCHANGE the resolver status didn't change - @constant RESOLVER_UNRESOLVED the resolver has finished (typically means a successful unmount) - @constant RESOLVER_ERROR the resolver encountered an error (errno passed in aux value) - @constant RESOLVER_STOP a request to destroy trigger XXX do we need this??? + * @enum resolver status + * @abstract Constants defining resolver status + * @constant RESOLVER_RESOLVED the resolver has finished (typically means a successful mount) + * @constant RESOLVER_NOCHANGE the resolver status didn't change + * @constant RESOLVER_UNRESOLVED the resolver has finished (typically means a successful unmount) + * @constant RESOLVER_ERROR the resolver encountered an error (errno passed in aux value) + * @constant RESOLVER_STOP a request to destroy trigger XXX do we need this??? */ enum resolver_status { RESOLVER_RESOLVED, @@ -350,89 +350,89 @@ extern int vfs_resolver_auxiliary(resolver_result_t); /*! - @typedef trigger_vnode_resolve_callback_t - @abstract function prototype for a trigger vnode resolve callback - @discussion This function is associated with a trigger vnode during a vnode create. It is - typically called when a lookup operation occurs for a trigger vnode - @param vp The trigger vnode which needs resolving - @param cnp Various data about lookup, e.g. filename and state flags - @param pop The pathname operation that initiated the lookup (see enum path_operation). - @param flags resolve flags - @param data Arbitrary data supplied by vnode trigger creator - @param ctx Context for authentication. - @return RESOLVER_RESOLVED, RESOLVER_NOCHANGE, RESOLVER_UNRESOLVED or RESOLVER_ERROR -*/ + * @typedef trigger_vnode_resolve_callback_t + * @abstract function prototype for a trigger vnode resolve callback + * @discussion This function is associated with a trigger vnode during a vnode create. It is + * typically called when a lookup operation occurs for a trigger vnode + * @param vp The trigger vnode which needs resolving + * @param cnp Various data about lookup, e.g. filename and state flags + * @param pop The pathname operation that initiated the lookup (see enum path_operation). + * @param flags resolve flags + * @param data Arbitrary data supplied by vnode trigger creator + * @param ctx Context for authentication. + * @return RESOLVER_RESOLVED, RESOLVER_NOCHANGE, RESOLVER_UNRESOLVED or RESOLVER_ERROR + */ typedef resolver_result_t (* trigger_vnode_resolve_callback_t)( - vnode_t vp, - const struct componentname * cnp, - enum path_operation pop, - int flags, - void * data, - vfs_context_t ctx); - -/*! - @typedef trigger_vnode_unresolve_callback_t - @abstract function prototype for a trigger vnode unresolve callback - @discussion This function is associated with a trigger vnode during a vnode create. It is - called to unresolve a trigger vnode (typically this means unmount). - @param vp The trigger vnode which needs unresolving - @param flags Unmount flags - @param data Arbitrary data supplied by vnode trigger creator - @param ctx Context for authentication. - @return RESOLVER_NOCHANGE, RESOLVER_UNRESOLVED or RESOLVER_ERROR -*/ + vnode_t vp, + const struct componentname * cnp, + enum path_operation pop, + int flags, + void * data, + vfs_context_t ctx); + +/*! + * @typedef trigger_vnode_unresolve_callback_t + * @abstract function prototype for a trigger vnode unresolve callback + * @discussion This function is associated with a trigger vnode during a vnode create. It is + * called to unresolve a trigger vnode (typically this means unmount). + * @param vp The trigger vnode which needs unresolving + * @param flags Unmount flags + * @param data Arbitrary data supplied by vnode trigger creator + * @param ctx Context for authentication. + * @return RESOLVER_NOCHANGE, RESOLVER_UNRESOLVED or RESOLVER_ERROR + */ typedef resolver_result_t (* trigger_vnode_unresolve_callback_t)( - vnode_t vp, - int flags, - void * data, - vfs_context_t ctx); - -/*! - @typedef trigger_vnode_rearm_callback_t - @abstract function prototype for a trigger vnode rearm callback - @discussion This function is associated with a trigger vnode during a vnode create. It is - called to verify a rearm from VFS (i.e. should VFS rearm the trigger?). - @param vp The trigger vnode which needs rearming - @param flags rearm flags - @param data Arbitrary data supplied by vnode trigger creator - @param ctx Context for authentication. - @return RESOLVER_NOCHANGE or RESOLVER_ERROR -*/ + vnode_t vp, + int flags, + void * data, + vfs_context_t ctx); + +/*! + * @typedef trigger_vnode_rearm_callback_t + * @abstract function prototype for a trigger vnode rearm callback + * @discussion This function is associated with a trigger vnode during a vnode create. It is + * called to verify a rearm from VFS (i.e. should VFS rearm the trigger?). + * @param vp The trigger vnode which needs rearming + * @param flags rearm flags + * @param data Arbitrary data supplied by vnode trigger creator + * @param ctx Context for authentication. + * @return RESOLVER_NOCHANGE or RESOLVER_ERROR + */ typedef resolver_result_t (* trigger_vnode_rearm_callback_t)( - vnode_t vp, - int flags, - void * data, - vfs_context_t ctx); - -/*! - @typedef trigger_vnode_reclaim_callback_t - @abstract function prototype for a trigger vnode reclaim callback - @discussion This function is associated with a trigger vnode during a vnode create. It is - called to deallocate private callback argument data - @param vp The trigger vnode associated with the data - @param data The arbitrary data supplied by vnode trigger creator -*/ + vnode_t vp, + int flags, + void * data, + vfs_context_t ctx); + +/*! + * @typedef trigger_vnode_reclaim_callback_t + * @abstract function prototype for a trigger vnode reclaim callback + * @discussion This function is associated with a trigger vnode during a vnode create. It is + * called to deallocate private callback argument data + * @param vp The trigger vnode associated with the data + * @param data The arbitrary data supplied by vnode trigger creator + */ typedef void (* trigger_vnode_reclaim_callback_t)( - vnode_t vp, - void * data); + vnode_t vp, + void * data); /*! - @function vnode_trigger_update - @abstract Update a trigger vnode's state. - @discussion This allows a resolver to notify VFS of a state change in a trigger vnode. - @param vp The trigger vnode whose information to update. - @param result A compound resolver result value - @return EINVAL if result value is invalid or vp isn't a trigger vnode + * @function vnode_trigger_update + * @abstract Update a trigger vnode's state. + * @discussion This allows a resolver to notify VFS of a state change in a trigger vnode. + * @param vp The trigger vnode whose information to update. + * @param result A compound resolver result value + * @return EINVAL if result value is invalid or vp isn't a trigger vnode */ extern int vnode_trigger_update(vnode_t vp, resolver_result_t result); struct vnode_trigger_info { - trigger_vnode_resolve_callback_t vti_resolve_func; - trigger_vnode_unresolve_callback_t vti_unresolve_func; - trigger_vnode_rearm_callback_t vti_rearm_func; - trigger_vnode_reclaim_callback_t vti_reclaim_func; - void * vti_data; /* auxiliary data (optional) */ - uint32_t vti_flags; /* optional flags (see below) */ + trigger_vnode_resolve_callback_t vti_resolve_func; + trigger_vnode_unresolve_callback_t vti_unresolve_func; + trigger_vnode_rearm_callback_t vti_rearm_func; + trigger_vnode_reclaim_callback_t vti_reclaim_func; + void * vti_data; /* auxiliary data (optional) */ + uint32_t vti_flags; /* optional flags (see below) */ }; /* @@ -449,13 +449,13 @@ struct vnode_trigger_info { * ENOMEM */ struct vnode_trigger_param { - struct vnode_fsparam vnt_params; /* same as for VNCREATE_FLAVOR */ - trigger_vnode_resolve_callback_t vnt_resolve_func; - trigger_vnode_unresolve_callback_t vnt_unresolve_func; - trigger_vnode_rearm_callback_t vnt_rearm_func; - trigger_vnode_reclaim_callback_t vnt_reclaim_func; - void * vnt_data; /* auxiliary data (optional) */ - uint32_t vnt_flags; /* optional flags (see below) */ + struct vnode_fsparam vnt_params; /* same as for VNCREATE_FLAVOR */ + trigger_vnode_resolve_callback_t vnt_resolve_func; + trigger_vnode_unresolve_callback_t vnt_unresolve_func; + trigger_vnode_rearm_callback_t vnt_rearm_func; + trigger_vnode_reclaim_callback_t vnt_reclaim_func; + void * vnt_data; /* auxiliary data (optional) */ + uint32_t vnt_flags; /* optional flags (see below) */ }; /* @@ -468,9 +468,9 @@ struct vnode_trigger_param { * A trigger vnode instance that doesn't directly trigger a mount, * instead it triggers the mounting of sub-trigger nodes. */ -#define VNT_AUTO_REARM (1 << 0) -#define VNT_NO_DIRECT_MOUNT (1 << 1) -#define VNT_VALID_MASK (VNT_AUTO_REARM | VNT_NO_DIRECT_MOUNT) +#define VNT_AUTO_REARM (1 << 0) +#define VNT_NO_DIRECT_MOUNT (1 << 1) +#define VNT_VALID_MASK (VNT_AUTO_REARM | VNT_NO_DIRECT_MOUNT) #endif /* KERNEL_PRIVATE */ @@ -484,250 +484,250 @@ struct vnode_trigger_param { * Note that this structure may be extended, but existing fields must not move. */ -#define VATTR_INIT(v) do {(v)->va_supported = (v)->va_active = 0ll; (v)->va_vaflags = 0; } while(0) -#define VATTR_SET_ACTIVE(v, a) ((v)->va_active |= VNODE_ATTR_ ## a) -#define VATTR_SET_SUPPORTED(v, a) ((v)->va_supported |= VNODE_ATTR_ ## a) -#define VATTR_IS_SUPPORTED(v, a) ((v)->va_supported & VNODE_ATTR_ ## a) -#define VATTR_CLEAR_ACTIVE(v, a) ((v)->va_active &= ~VNODE_ATTR_ ## a) -#define VATTR_CLEAR_SUPPORTED(v, a) ((v)->va_supported &= ~VNODE_ATTR_ ## a) -#define VATTR_CLEAR_SUPPORTED_ALL(v) ((v)->va_supported = 0) -#define VATTR_IS_ACTIVE(v, a) ((v)->va_active & VNODE_ATTR_ ## a) -#define VATTR_ALL_SUPPORTED(v) (((v)->va_active & (v)->va_supported) == (v)->va_active) -#define VATTR_INACTIVE_SUPPORTED(v) do {(v)->va_active &= ~(v)->va_supported; (v)->va_supported = 0;} while(0) -#define VATTR_SET(v, a, x) do { (v)-> a = (x); VATTR_SET_ACTIVE(v, a);} while(0) -#define VATTR_WANTED(v, a) VATTR_SET_ACTIVE(v, a) -#define VATTR_RETURN(v, a, x) do { (v)-> a = (x); VATTR_SET_SUPPORTED(v, a);} while(0) -#define VATTR_NOT_RETURNED(v, a) (VATTR_IS_ACTIVE(v, a) && !VATTR_IS_SUPPORTED(v, a)) +#define VATTR_INIT(v) do {(v)->va_supported = (v)->va_active = 0ll; (v)->va_vaflags = 0; } while(0) +#define VATTR_SET_ACTIVE(v, a) ((v)->va_active |= VNODE_ATTR_ ## a) +#define VATTR_SET_SUPPORTED(v, a) ((v)->va_supported |= VNODE_ATTR_ ## a) +#define VATTR_IS_SUPPORTED(v, a) ((v)->va_supported & VNODE_ATTR_ ## a) +#define VATTR_CLEAR_ACTIVE(v, a) ((v)->va_active &= ~VNODE_ATTR_ ## a) +#define VATTR_CLEAR_SUPPORTED(v, a) ((v)->va_supported &= ~VNODE_ATTR_ ## a) +#define VATTR_CLEAR_SUPPORTED_ALL(v) ((v)->va_supported = 0) +#define VATTR_IS_ACTIVE(v, a) ((v)->va_active & VNODE_ATTR_ ## a) +#define VATTR_ALL_SUPPORTED(v) (((v)->va_active & (v)->va_supported) == (v)->va_active) +#define VATTR_INACTIVE_SUPPORTED(v) do {(v)->va_active &= ~(v)->va_supported; (v)->va_supported = 0;} while(0) +#define VATTR_SET(v, a, x) do { (v)-> a = (x); VATTR_SET_ACTIVE(v, a);} while(0) +#define VATTR_WANTED(v, a) VATTR_SET_ACTIVE(v, a) +#define VATTR_RETURN(v, a, x) do { (v)-> a = (x); VATTR_SET_SUPPORTED(v, a);} while(0) +#define VATTR_NOT_RETURNED(v, a) (VATTR_IS_ACTIVE(v, a) && !VATTR_IS_SUPPORTED(v, a)) /* * Two macros to simplify conditional checking in kernel code. */ -#define VATTR_IS(v, a, x) (VATTR_IS_SUPPORTED(v, a) && (v)-> a == (x)) -#define VATTR_IS_NOT(v, a, x) (VATTR_IS_SUPPORTED(v, a) && (v)-> a != (x)) - -#define VNODE_ATTR_va_rdev (1LL<< 0) /* 00000001 */ -#define VNODE_ATTR_va_nlink (1LL<< 1) /* 00000002 */ -#define VNODE_ATTR_va_total_size (1LL<< 2) /* 00000004 */ -#define VNODE_ATTR_va_total_alloc (1LL<< 3) /* 00000008 */ -#define VNODE_ATTR_va_data_size (1LL<< 4) /* 00000010 */ -#define VNODE_ATTR_va_data_alloc (1LL<< 5) /* 00000020 */ -#define VNODE_ATTR_va_iosize (1LL<< 6) /* 00000040 */ -#define VNODE_ATTR_va_uid (1LL<< 7) /* 00000080 */ -#define VNODE_ATTR_va_gid (1LL<< 8) /* 00000100 */ -#define VNODE_ATTR_va_mode (1LL<< 9) /* 00000200 */ -#define VNODE_ATTR_va_flags (1LL<<10) /* 00000400 */ -#define VNODE_ATTR_va_acl (1LL<<11) /* 00000800 */ -#define VNODE_ATTR_va_create_time (1LL<<12) /* 00001000 */ -#define VNODE_ATTR_va_access_time (1LL<<13) /* 00002000 */ -#define VNODE_ATTR_va_modify_time (1LL<<14) /* 00004000 */ -#define VNODE_ATTR_va_change_time (1LL<<15) /* 00008000 */ -#define VNODE_ATTR_va_backup_time (1LL<<16) /* 00010000 */ -#define VNODE_ATTR_va_fileid (1LL<<17) /* 00020000 */ -#define VNODE_ATTR_va_linkid (1LL<<18) /* 00040000 */ -#define VNODE_ATTR_va_parentid (1LL<<19) /* 00080000 */ -#define VNODE_ATTR_va_fsid (1LL<<20) /* 00100000 */ -#define VNODE_ATTR_va_filerev (1LL<<21) /* 00200000 */ -#define VNODE_ATTR_va_gen (1LL<<22) /* 00400000 */ -#define VNODE_ATTR_va_encoding (1LL<<23) /* 00800000 */ -#define VNODE_ATTR_va_type (1LL<<24) /* 01000000 */ -#define VNODE_ATTR_va_name (1LL<<25) /* 02000000 */ -#define VNODE_ATTR_va_uuuid (1LL<<26) /* 04000000 */ -#define VNODE_ATTR_va_guuid (1LL<<27) /* 08000000 */ -#define VNODE_ATTR_va_nchildren (1LL<<28) /* 10000000 */ -#define VNODE_ATTR_va_dirlinkcount (1LL<<29) /* 20000000 */ -#define VNODE_ATTR_va_addedtime (1LL<<30) /* 40000000 */ -#define VNODE_ATTR_va_dataprotect_class (1LL<<31) /* 80000000 */ -#define VNODE_ATTR_va_dataprotect_flags (1LL<<32) /* 100000000 */ -#define VNODE_ATTR_va_document_id (1LL<<33) /* 200000000 */ -#define VNODE_ATTR_va_devid (1LL<<34) /* 400000000 */ -#define VNODE_ATTR_va_objtype (1LL<<35) /* 800000000 */ -#define VNODE_ATTR_va_objtag (1LL<<36) /* 1000000000 */ -#define VNODE_ATTR_va_user_access (1LL<<37) /* 2000000000 */ -#define VNODE_ATTR_va_finderinfo (1LL<<38) /* 4000000000 */ -#define VNODE_ATTR_va_rsrc_length (1LL<<39) /* 8000000000 */ -#define VNODE_ATTR_va_rsrc_alloc (1LL<<40) /* 10000000000 */ -#define VNODE_ATTR_va_fsid64 (1LL<<41) /* 20000000000 */ -#define VNODE_ATTR_va_write_gencount (1LL<<42) /* 40000000000 */ -#define VNODE_ATTR_va_private_size (1LL<<43) /* 80000000000 */ - -#define VNODE_ATTR_BIT(n) (VNODE_ATTR_ ## n) +#define VATTR_IS(v, a, x) (VATTR_IS_SUPPORTED(v, a) && (v)-> a == (x)) +#define VATTR_IS_NOT(v, a, x) (VATTR_IS_SUPPORTED(v, a) && (v)-> a != (x)) + +#define VNODE_ATTR_va_rdev (1LL<< 0) /* 00000001 */ +#define VNODE_ATTR_va_nlink (1LL<< 1) /* 00000002 */ +#define VNODE_ATTR_va_total_size (1LL<< 2) /* 00000004 */ +#define VNODE_ATTR_va_total_alloc (1LL<< 3) /* 00000008 */ +#define VNODE_ATTR_va_data_size (1LL<< 4) /* 00000010 */ +#define VNODE_ATTR_va_data_alloc (1LL<< 5) /* 00000020 */ +#define VNODE_ATTR_va_iosize (1LL<< 6) /* 00000040 */ +#define VNODE_ATTR_va_uid (1LL<< 7) /* 00000080 */ +#define VNODE_ATTR_va_gid (1LL<< 8) /* 00000100 */ +#define VNODE_ATTR_va_mode (1LL<< 9) /* 00000200 */ +#define VNODE_ATTR_va_flags (1LL<<10) /* 00000400 */ +#define VNODE_ATTR_va_acl (1LL<<11) /* 00000800 */ +#define VNODE_ATTR_va_create_time (1LL<<12) /* 00001000 */ +#define VNODE_ATTR_va_access_time (1LL<<13) /* 00002000 */ +#define VNODE_ATTR_va_modify_time (1LL<<14) /* 00004000 */ +#define VNODE_ATTR_va_change_time (1LL<<15) /* 00008000 */ +#define VNODE_ATTR_va_backup_time (1LL<<16) /* 00010000 */ +#define VNODE_ATTR_va_fileid (1LL<<17) /* 00020000 */ +#define VNODE_ATTR_va_linkid (1LL<<18) /* 00040000 */ +#define VNODE_ATTR_va_parentid (1LL<<19) /* 00080000 */ +#define VNODE_ATTR_va_fsid (1LL<<20) /* 00100000 */ +#define VNODE_ATTR_va_filerev (1LL<<21) /* 00200000 */ +#define VNODE_ATTR_va_gen (1LL<<22) /* 00400000 */ +#define VNODE_ATTR_va_encoding (1LL<<23) /* 00800000 */ +#define VNODE_ATTR_va_type (1LL<<24) /* 01000000 */ +#define VNODE_ATTR_va_name (1LL<<25) /* 02000000 */ +#define VNODE_ATTR_va_uuuid (1LL<<26) /* 04000000 */ +#define VNODE_ATTR_va_guuid (1LL<<27) /* 08000000 */ +#define VNODE_ATTR_va_nchildren (1LL<<28) /* 10000000 */ +#define VNODE_ATTR_va_dirlinkcount (1LL<<29) /* 20000000 */ +#define VNODE_ATTR_va_addedtime (1LL<<30) /* 40000000 */ +#define VNODE_ATTR_va_dataprotect_class (1LL<<31) /* 80000000 */ +#define VNODE_ATTR_va_dataprotect_flags (1LL<<32) /* 100000000 */ +#define VNODE_ATTR_va_document_id (1LL<<33) /* 200000000 */ +#define VNODE_ATTR_va_devid (1LL<<34) /* 400000000 */ +#define VNODE_ATTR_va_objtype (1LL<<35) /* 800000000 */ +#define VNODE_ATTR_va_objtag (1LL<<36) /* 1000000000 */ +#define VNODE_ATTR_va_user_access (1LL<<37) /* 2000000000 */ +#define VNODE_ATTR_va_finderinfo (1LL<<38) /* 4000000000 */ +#define VNODE_ATTR_va_rsrc_length (1LL<<39) /* 8000000000 */ +#define VNODE_ATTR_va_rsrc_alloc (1LL<<40) /* 10000000000 */ +#define VNODE_ATTR_va_fsid64 (1LL<<41) /* 20000000000 */ +#define VNODE_ATTR_va_write_gencount (1LL<<42) /* 40000000000 */ +#define VNODE_ATTR_va_private_size (1LL<<43) /* 80000000000 */ + +#define VNODE_ATTR_BIT(n) (VNODE_ATTR_ ## n) /* * ALL of the attributes. */ -#define VNODE_ATTR_ALL (VNODE_ATTR_BIT(va_rdev) | \ - VNODE_ATTR_BIT(va_nlink) | \ - VNODE_ATTR_BIT(va_total_size) | \ - VNODE_ATTR_BIT(va_total_alloc) | \ - VNODE_ATTR_BIT(va_data_size) | \ - VNODE_ATTR_BIT(va_data_alloc) | \ - VNODE_ATTR_BIT(va_iosize) | \ - VNODE_ATTR_BIT(va_uid) | \ - VNODE_ATTR_BIT(va_gid) | \ - VNODE_ATTR_BIT(va_mode) | \ - VNODE_ATTR_BIT(va_flags) | \ - VNODE_ATTR_BIT(va_acl) | \ - VNODE_ATTR_BIT(va_create_time) | \ - VNODE_ATTR_BIT(va_access_time) | \ - VNODE_ATTR_BIT(va_modify_time) | \ - VNODE_ATTR_BIT(va_change_time) | \ - VNODE_ATTR_BIT(va_backup_time) | \ - VNODE_ATTR_BIT(va_fileid) | \ - VNODE_ATTR_BIT(va_linkid) | \ - VNODE_ATTR_BIT(va_parentid) | \ - VNODE_ATTR_BIT(va_fsid) | \ - VNODE_ATTR_BIT(va_filerev) | \ - VNODE_ATTR_BIT(va_gen) | \ - VNODE_ATTR_BIT(va_encoding) | \ - VNODE_ATTR_BIT(va_type) | \ - VNODE_ATTR_BIT(va_name) | \ - VNODE_ATTR_BIT(va_uuuid) | \ - VNODE_ATTR_BIT(va_guuid) | \ - VNODE_ATTR_BIT(va_nchildren) | \ - VNODE_ATTR_BIT(va_dirlinkcount) | \ - VNODE_ATTR_BIT(va_addedtime) | \ - VNODE_ATTR_BIT(va_dataprotect_class) | \ - VNODE_ATTR_BIT(va_dataprotect_flags) | \ - VNODE_ATTR_BIT(va_document_id) | \ - VNODE_ATTR_BIT(va_devid) | \ - VNODE_ATTR_BIT(va_objtype) | \ - VNODE_ATTR_BIT(va_objtag) | \ - VNODE_ATTR_BIT(va_user_access) | \ - VNODE_ATTR_BIT(va_finderinfo) | \ - VNODE_ATTR_BIT(va_rsrc_length) | \ - VNODE_ATTR_BIT(va_rsrc_alloc) | \ - VNODE_ATTR_BIT(va_fsid64) | \ - VNODE_ATTR_BIT(va_write_gencount) | \ - VNODE_ATTR_BIT(va_private_size)) +#define VNODE_ATTR_ALL (VNODE_ATTR_BIT(va_rdev) | \ + VNODE_ATTR_BIT(va_nlink) | \ + VNODE_ATTR_BIT(va_total_size) | \ + VNODE_ATTR_BIT(va_total_alloc) | \ + VNODE_ATTR_BIT(va_data_size) | \ + VNODE_ATTR_BIT(va_data_alloc) | \ + VNODE_ATTR_BIT(va_iosize) | \ + VNODE_ATTR_BIT(va_uid) | \ + VNODE_ATTR_BIT(va_gid) | \ + VNODE_ATTR_BIT(va_mode) | \ + VNODE_ATTR_BIT(va_flags) | \ + VNODE_ATTR_BIT(va_acl) | \ + VNODE_ATTR_BIT(va_create_time) | \ + VNODE_ATTR_BIT(va_access_time) | \ + VNODE_ATTR_BIT(va_modify_time) | \ + VNODE_ATTR_BIT(va_change_time) | \ + VNODE_ATTR_BIT(va_backup_time) | \ + VNODE_ATTR_BIT(va_fileid) | \ + VNODE_ATTR_BIT(va_linkid) | \ + VNODE_ATTR_BIT(va_parentid) | \ + VNODE_ATTR_BIT(va_fsid) | \ + VNODE_ATTR_BIT(va_filerev) | \ + VNODE_ATTR_BIT(va_gen) | \ + VNODE_ATTR_BIT(va_encoding) | \ + VNODE_ATTR_BIT(va_type) | \ + VNODE_ATTR_BIT(va_name) | \ + VNODE_ATTR_BIT(va_uuuid) | \ + VNODE_ATTR_BIT(va_guuid) | \ + VNODE_ATTR_BIT(va_nchildren) | \ + VNODE_ATTR_BIT(va_dirlinkcount) | \ + VNODE_ATTR_BIT(va_addedtime) | \ + VNODE_ATTR_BIT(va_dataprotect_class) | \ + VNODE_ATTR_BIT(va_dataprotect_flags) | \ + VNODE_ATTR_BIT(va_document_id) | \ + VNODE_ATTR_BIT(va_devid) | \ + VNODE_ATTR_BIT(va_objtype) | \ + VNODE_ATTR_BIT(va_objtag) | \ + VNODE_ATTR_BIT(va_user_access) | \ + VNODE_ATTR_BIT(va_finderinfo) | \ + VNODE_ATTR_BIT(va_rsrc_length) | \ + VNODE_ATTR_BIT(va_rsrc_alloc) | \ + VNODE_ATTR_BIT(va_fsid64) | \ + VNODE_ATTR_BIT(va_write_gencount) | \ + VNODE_ATTR_BIT(va_private_size)) /* * Read-only attributes. */ -#define VNODE_ATTR_RDONLY (VNODE_ATTR_BIT(va_rdev) | \ - VNODE_ATTR_BIT(va_nlink) | \ - VNODE_ATTR_BIT(va_total_size) | \ - VNODE_ATTR_BIT(va_total_alloc) | \ - VNODE_ATTR_BIT(va_data_alloc) | \ - VNODE_ATTR_BIT(va_iosize) | \ - VNODE_ATTR_BIT(va_fileid) | \ - VNODE_ATTR_BIT(va_linkid) | \ - VNODE_ATTR_BIT(va_parentid) | \ - VNODE_ATTR_BIT(va_fsid) | \ - VNODE_ATTR_BIT(va_filerev) | \ - VNODE_ATTR_BIT(va_gen) | \ - VNODE_ATTR_BIT(va_name) | \ - VNODE_ATTR_BIT(va_type) | \ - VNODE_ATTR_BIT(va_nchildren) | \ - VNODE_ATTR_BIT(va_dirlinkcount) | \ - VNODE_ATTR_BIT(va_devid) | \ - VNODE_ATTR_BIT(va_objtype) | \ - VNODE_ATTR_BIT(va_objtag) | \ - VNODE_ATTR_BIT(va_user_access) | \ - VNODE_ATTR_BIT(va_finderinfo) | \ - VNODE_ATTR_BIT(va_rsrc_length) | \ - VNODE_ATTR_BIT(va_rsrc_alloc) | \ - VNODE_ATTR_BIT(va_fsid64) | \ - VNODE_ATTR_BIT(va_write_gencount) | \ - VNODE_ATTR_BIT(va_private_size)) +#define VNODE_ATTR_RDONLY (VNODE_ATTR_BIT(va_rdev) | \ + VNODE_ATTR_BIT(va_nlink) | \ + VNODE_ATTR_BIT(va_total_size) | \ + VNODE_ATTR_BIT(va_total_alloc) | \ + VNODE_ATTR_BIT(va_data_alloc) | \ + VNODE_ATTR_BIT(va_iosize) | \ + VNODE_ATTR_BIT(va_fileid) | \ + VNODE_ATTR_BIT(va_linkid) | \ + VNODE_ATTR_BIT(va_parentid) | \ + VNODE_ATTR_BIT(va_fsid) | \ + VNODE_ATTR_BIT(va_filerev) | \ + VNODE_ATTR_BIT(va_gen) | \ + VNODE_ATTR_BIT(va_name) | \ + VNODE_ATTR_BIT(va_type) | \ + VNODE_ATTR_BIT(va_nchildren) | \ + VNODE_ATTR_BIT(va_dirlinkcount) | \ + VNODE_ATTR_BIT(va_devid) | \ + VNODE_ATTR_BIT(va_objtype) | \ + VNODE_ATTR_BIT(va_objtag) | \ + VNODE_ATTR_BIT(va_user_access) | \ + VNODE_ATTR_BIT(va_finderinfo) | \ + VNODE_ATTR_BIT(va_rsrc_length) | \ + VNODE_ATTR_BIT(va_rsrc_alloc) | \ + VNODE_ATTR_BIT(va_fsid64) | \ + VNODE_ATTR_BIT(va_write_gencount) | \ + VNODE_ATTR_BIT(va_private_size)) /* * Attributes that can be applied to a new file object. */ -#define VNODE_ATTR_NEWOBJ (VNODE_ATTR_BIT(va_rdev) | \ - VNODE_ATTR_BIT(va_uid) | \ - VNODE_ATTR_BIT(va_gid) | \ - VNODE_ATTR_BIT(va_mode) | \ - VNODE_ATTR_BIT(va_flags) | \ - VNODE_ATTR_BIT(va_acl) | \ - VNODE_ATTR_BIT(va_create_time) | \ - VNODE_ATTR_BIT(va_modify_time) | \ - VNODE_ATTR_BIT(va_change_time) | \ - VNODE_ATTR_BIT(va_encoding) | \ - VNODE_ATTR_BIT(va_type) | \ - VNODE_ATTR_BIT(va_uuuid) | \ - VNODE_ATTR_BIT(va_guuid) | \ - VNODE_ATTR_BIT(va_dataprotect_class) | \ - VNODE_ATTR_BIT(va_dataprotect_flags) | \ - VNODE_ATTR_BIT(va_document_id)) +#define VNODE_ATTR_NEWOBJ (VNODE_ATTR_BIT(va_rdev) | \ + VNODE_ATTR_BIT(va_uid) | \ + VNODE_ATTR_BIT(va_gid) | \ + VNODE_ATTR_BIT(va_mode) | \ + VNODE_ATTR_BIT(va_flags) | \ + VNODE_ATTR_BIT(va_acl) | \ + VNODE_ATTR_BIT(va_create_time) | \ + VNODE_ATTR_BIT(va_modify_time) | \ + VNODE_ATTR_BIT(va_change_time) | \ + VNODE_ATTR_BIT(va_encoding) | \ + VNODE_ATTR_BIT(va_type) | \ + VNODE_ATTR_BIT(va_uuuid) | \ + VNODE_ATTR_BIT(va_guuid) | \ + VNODE_ATTR_BIT(va_dataprotect_class) | \ + VNODE_ATTR_BIT(va_dataprotect_flags) | \ + VNODE_ATTR_BIT(va_document_id)) #include struct vnode_attr { /* bitfields */ - uint64_t va_supported; - uint64_t va_active; + uint64_t va_supported; + uint64_t va_active; /* * Control flags. The low 16 bits are reserved for the * ioflags being passed for truncation operations. */ - int va_vaflags; - + int va_vaflags; + /* traditional stat(2) parameter fields */ - dev_t va_rdev; /* device id (device nodes only) */ - uint64_t va_nlink; /* number of references to this file */ - uint64_t va_total_size; /* size in bytes of all forks */ - uint64_t va_total_alloc; /* disk space used by all forks */ - uint64_t va_data_size; /* size in bytes of the fork managed by current vnode */ - uint64_t va_data_alloc; /* disk space used by the fork managed by current vnode */ - uint32_t va_iosize; /* optimal I/O blocksize */ + dev_t va_rdev; /* device id (device nodes only) */ + uint64_t va_nlink; /* number of references to this file */ + uint64_t va_total_size; /* size in bytes of all forks */ + uint64_t va_total_alloc; /* disk space used by all forks */ + uint64_t va_data_size; /* size in bytes of the fork managed by current vnode */ + uint64_t va_data_alloc; /* disk space used by the fork managed by current vnode */ + uint32_t va_iosize; /* optimal I/O blocksize */ /* file security information */ - uid_t va_uid; /* owner UID */ - gid_t va_gid; /* owner GID */ - mode_t va_mode; /* posix permissions */ - uint32_t va_flags; /* file flags */ - struct kauth_acl *va_acl; /* access control list */ + uid_t va_uid; /* owner UID */ + gid_t va_gid; /* owner GID */ + mode_t va_mode; /* posix permissions */ + uint32_t va_flags; /* file flags */ + struct kauth_acl *va_acl; /* access control list */ /* timestamps */ - struct timespec va_create_time; /* time of creation */ - struct timespec va_access_time; /* time of last access */ - struct timespec va_modify_time; /* time of last data modification */ - struct timespec va_change_time; /* time of last metadata change */ - struct timespec va_backup_time; /* time of last backup */ - + struct timespec va_create_time; /* time of creation */ + struct timespec va_access_time; /* time of last access */ + struct timespec va_modify_time; /* time of last data modification */ + struct timespec va_change_time; /* time of last metadata change */ + struct timespec va_backup_time; /* time of last backup */ + /* file parameters */ - uint64_t va_fileid; /* file unique ID in filesystem */ - uint64_t va_linkid; /* file link unique ID */ - uint64_t va_parentid; /* parent ID */ - uint32_t va_fsid; /* filesystem ID */ - uint64_t va_filerev; /* file revision counter */ /* XXX */ - uint32_t va_gen; /* file generation count */ /* XXX - relationship of - * these two? */ + uint64_t va_fileid; /* file unique ID in filesystem */ + uint64_t va_linkid; /* file link unique ID */ + uint64_t va_parentid; /* parent ID */ + uint32_t va_fsid; /* filesystem ID */ + uint64_t va_filerev; /* file revision counter */ /* XXX */ + uint32_t va_gen; /* file generation count */ /* XXX - relationship of + * these two? */ /* misc parameters */ - uint32_t va_encoding; /* filename encoding script */ + uint32_t va_encoding; /* filename encoding script */ + + enum vtype va_type; /* file type */ + char * va_name; /* Name for ATTR_CMN_NAME; MAXPATHLEN bytes */ + guid_t va_uuuid; /* file owner UUID */ + guid_t va_guuid; /* file group UUID */ - enum vtype va_type; /* file type */ - char * va_name; /* Name for ATTR_CMN_NAME; MAXPATHLEN bytes */ - guid_t va_uuuid; /* file owner UUID */ - guid_t va_guuid; /* file group UUID */ - /* Meaningful for directories only */ - uint64_t va_nchildren; /* Number of items in a directory */ - uint64_t va_dirlinkcount; /* Real references to dir (i.e. excluding "." and ".." refs) */ + uint64_t va_nchildren; /* Number of items in a directory */ + uint64_t va_dirlinkcount; /* Real references to dir (i.e. excluding "." and ".." refs) */ #ifdef BSD_KERNEL_PRIVATE struct kauth_acl *va_base_acl; #else - void * va_reserved1; + void * va_reserved1; #endif /* BSD_KERNEL_PRIVATE */ - struct timespec va_addedtime; /* timestamp when item was added to parent directory */ - + struct timespec va_addedtime; /* timestamp when item was added to parent directory */ + /* Data Protection fields */ - uint32_t va_dataprotect_class; /* class specified for this file if it didn't exist */ - uint32_t va_dataprotect_flags; /* flags from NP open(2) to the filesystem */ + uint32_t va_dataprotect_class; /* class specified for this file if it didn't exist */ + uint32_t va_dataprotect_flags; /* flags from NP open(2) to the filesystem */ /* Document revision tracking */ uint32_t va_document_id; /* Fields for Bulk args */ - uint32_t va_devid; /* devid of filesystem */ - uint32_t va_objtype; /* type of object */ - uint32_t va_objtag; /* vnode tag of filesystem */ - uint32_t va_user_access; /* access for user */ - uint8_t va_finderinfo[32]; /* Finder Info */ - uint64_t va_rsrc_length; /* Resource Fork length */ - uint64_t va_rsrc_alloc; /* Resource Fork allocation size */ - fsid_t va_fsid64; /* fsid, of the correct type */ + uint32_t va_devid; /* devid of filesystem */ + uint32_t va_objtype; /* type of object */ + uint32_t va_objtag; /* vnode tag of filesystem */ + uint32_t va_user_access; /* access for user */ + uint8_t va_finderinfo[32]; /* Finder Info */ + uint64_t va_rsrc_length; /* Resource Fork length */ + uint64_t va_rsrc_alloc; /* Resource Fork allocation size */ + fsid_t va_fsid64; /* fsid, of the correct type */ uint32_t va_write_gencount; /* counter that increments each time the file changes */ @@ -737,7 +737,7 @@ struct vnode_attr { }; #ifdef BSD_KERNEL_PRIVATE -/* +/* * Flags for va_dataprotect_flags */ #define VA_DP_RAWENCRYPTED 0x0001 @@ -748,52 +748,52 @@ struct vnode_attr { /* * Flags for va_vaflags. */ -#define VA_UTIMES_NULL 0x010000 /* utimes argument was NULL */ -#define VA_EXCLUSIVE 0x020000 /* exclusive create request */ -#define VA_NOINHERIT 0x040000 /* Don't inherit ACLs from parent */ -#define VA_NOAUTH 0x080000 -#define VA_64BITOBJIDS 0x100000 /* fileid/linkid/parentid are 64 bit */ +#define VA_UTIMES_NULL 0x010000 /* utimes argument was NULL */ +#define VA_EXCLUSIVE 0x020000 /* exclusive create request */ +#define VA_NOINHERIT 0x040000 /* Don't inherit ACLs from parent */ +#define VA_NOAUTH 0x080000 +#define VA_64BITOBJIDS 0x100000 /* fileid/linkid/parentid are 64 bit */ /* * Modes. Some values same as Ixxx entries from inode.h for now. */ -#define VSUID 0x800 /*04000*/ /* set user id on execution */ -#define VSGID 0x400 /*02000*/ /* set group id on execution */ -#define VSVTX 0x200 /*01000*/ /* save swapped text even after use */ -#define VREAD 0x100 /*00400*/ /* read, write, execute permissions */ -#define VWRITE 0x080 /*00200*/ -#define VEXEC 0x040 /*00100*/ +#define VSUID 0x800 /*04000*/ /* set user id on execution */ +#define VSGID 0x400 /*02000*/ /* set group id on execution */ +#define VSVTX 0x200 /*01000*/ /* save swapped text even after use */ +#define VREAD 0x100 /*00400*/ /* read, write, execute permissions */ +#define VWRITE 0x080 /*00200*/ +#define VEXEC 0x040 /*00100*/ /* * Convert between vnode types and inode formats (since POSIX.1 * defines mode word of stat structure in terms of inode formats). */ -extern enum vtype iftovt_tab[]; -extern int vttoif_tab[]; -#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12]) -#define VTTOIF(indx) (vttoif_tab[(int)(indx)]) -#define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode)) +extern enum vtype iftovt_tab[]; +extern int vttoif_tab[]; +#define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12]) +#define VTTOIF(indx) (vttoif_tab[(int)(indx)]) +#define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode)) /* * Flags to various vnode functions. */ -#define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */ -#define FORCECLOSE 0x0002 /* vflush: force file closeure */ -#define WRITECLOSE 0x0004 /* vflush: only close writeable files */ -#define SKIPSWAP 0x0008 /* vflush: skip vnodes marked VSWAP */ -#define SKIPROOT 0x0010 /* vflush: skip root vnodes marked VROOT */ +#define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */ +#define FORCECLOSE 0x0002 /* vflush: force file closeure */ +#define WRITECLOSE 0x0004 /* vflush: only close writeable files */ +#define SKIPSWAP 0x0008 /* vflush: skip vnodes marked VSWAP */ +#define SKIPROOT 0x0010 /* vflush: skip root vnodes marked VROOT */ -#define DOCLOSE 0x0008 /* vclean: close active files */ +#define DOCLOSE 0x0008 /* vclean: close active files */ -#define V_SAVE 0x0001 /* vinvalbuf: sync file first */ -#define V_SAVEMETA 0x0002 /* vinvalbuf: leave indirect blocks */ +#define V_SAVE 0x0001 /* vinvalbuf: sync file first */ +#define V_SAVEMETA 0x0002 /* vinvalbuf: leave indirect blocks */ -#define REVOKEALL 0x0001 /* vnop_revoke: revoke all aliases */ +#define REVOKEALL 0x0001 /* vnop_revoke: revoke all aliases */ /* VNOP_REMOVE/unlink flags */ -#define VNODE_REMOVE_NODELETEBUSY 0x0001 /* Don't delete busy files (Carbon) */ -#define VNODE_REMOVE_SKIP_NAMESPACE_EVENT 0x0002 /* Do not upcall to userland handlers */ -#define VNODE_REMOVE_NO_AUDIT_PATH 0x0004 /* Do not audit the path */ +#define VNODE_REMOVE_NODELETEBUSY 0x0001 /* Don't delete busy files (Carbon) */ +#define VNODE_REMOVE_SKIP_NAMESPACE_EVENT 0x0002 /* Do not upcall to userland handlers */ +#define VNODE_REMOVE_NO_AUDIT_PATH 0x0004 /* Do not audit the path */ /* VNOP_READDIR flags: */ #define VNODE_READDIR_EXTENDED 0x0001 /* use extended directory entries */ @@ -806,13 +806,13 @@ extern int vttoif_tab[]; #define VNODE_CLONEFILE_NOOWNERCOPY 0x0001 /* Don't copy ownership information */ -#define NULLVP ((struct vnode *)NULL) +#define NULLVP ((struct vnode *)NULL) #ifndef BSD_KERNEL_PRIVATE struct vnodeop_desc; #endif -extern int desiredvnodes; /* number of vnodes desired */ +extern int desiredvnodes; /* number of vnodes desired */ /* @@ -820,18 +820,18 @@ extern int desiredvnodes; /* number of vnodes desired */ */ struct vnodeopv_entry_desc { struct vnodeop_desc *opve_op; /* which operation this is */ - int (*opve_impl)(void *); /* code implementing this operation */ + int (*opve_impl)(void *); /* code implementing this operation */ }; struct vnodeopv_desc { - /* ptr to the ptr to the vector where op should go */ - int (***opv_desc_vector_p)(void *); + /* ptr to the ptr to the vector where op should go */ + int(***opv_desc_vector_p)(void *); struct vnodeopv_entry_desc *opv_desc_ops; /* null terminated list */ }; /*! - @function vn_default_error - @abstract Default vnode operation to fill unsupported slots in vnode operation vectors. - @return ENOTSUP + * @function vn_default_error + * @abstract Default vnode operation to fill unsupported slots in vnode operation vectors. + * @return ENOTSUP */ int vn_default_error(void); @@ -849,998 +849,1003 @@ struct vnop_generic_args { __BEGIN_DECLS /*! - @function vnode_create - @abstract Create and initialize a vnode. - @discussion Returns wth an iocount held on the vnode which must eventually be dropped with vnode_put(). - @param flavor Should be VNCREATE_FLAVOR. - @param size Size of the struct vnode_fsparam in "data". - @param data Pointer to a struct vnode_fsparam containing initialization information. - @param vpp Pointer to a vnode pointer, to be filled in with newly created vnode. - @return 0 for success, error code otherwise. + * @function vnode_create + * @abstract Create and initialize a vnode. + * @discussion Returns wth an iocount held on the vnode which must eventually be dropped with vnode_put(). + * @param flavor Should be VNCREATE_FLAVOR. + * @param size Size of the struct vnode_fsparam in "data". + * @param data Pointer to a struct vnode_fsparam containing initialization information. + * @param vpp Pointer to a vnode pointer, to be filled in with newly created vnode. + * @return 0 for success, error code otherwise. */ -errno_t vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp); +errno_t vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp); #ifdef KERNEL_PRIVATE /*! - @function vnode_create_empty - @abstract Create an empty, uninitialized vnode. - @discussion Returns with an iocount held on the vnode which must eventually be - dropped with vnode_put(). The next operation performed on the vnode must be - vnode_initialize (or vnode_put if the vnode is not needed anymore). - This interface is provided as a mechanism to pre-flight obtaining a vnode for - certain filesystem operations which may need to get a vnode without filesystem - locks held. It is imperative that nothing be done with the vnode till the - succeeding vnode_initialize (or vnode_put as the case may be) call. - @param vpp Pointer to a vnode pointer, to be filled in with newly created vnode. - @return 0 for success, error code otherwise. - */ -errno_t vnode_create_empty(vnode_t *vpp); - -/*! - @function vnode_initialize - @abstract Initialize a vnode obtained by vnode_create_empty - @discussion Does not drop iocount held on the vnode which must eventually be - dropped with vnode_put(). In case of an error however, the vnode's iocount is - dropped and the vnode must not be referenced again by the caller. - @param flavor Should be VNCREATE_FLAVOR. - @param size Size of the struct vnode_fsparam in "data". - @param data Pointer to a struct vnode_fsparam containing initialization information. - @param vpp Pointer to a vnode pointer, to be filled in with newly created vnode. - @return 0 for success, error code otherwise. - */ -errno_t vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp); + * @function vnode_create_empty + * @abstract Create an empty, uninitialized vnode. + * @discussion Returns with an iocount held on the vnode which must eventually be + * dropped with vnode_put(). The next operation performed on the vnode must be + * vnode_initialize (or vnode_put if the vnode is not needed anymore). + * This interface is provided as a mechanism to pre-flight obtaining a vnode for + * certain filesystem operations which may need to get a vnode without filesystem + * locks held. It is imperative that nothing be done with the vnode till the + * succeeding vnode_initialize (or vnode_put as the case may be) call. + * @param vpp Pointer to a vnode pointer, to be filled in with newly created vnode. + * @return 0 for success, error code otherwise. + */ +errno_t vnode_create_empty(vnode_t *vpp); + +/*! + * @function vnode_initialize + * @abstract Initialize a vnode obtained by vnode_create_empty + * @discussion Does not drop iocount held on the vnode which must eventually be + * dropped with vnode_put(). In case of an error however, the vnode's iocount is + * dropped and the vnode must not be referenced again by the caller. + * @param flavor Should be VNCREATE_FLAVOR. + * @param size Size of the struct vnode_fsparam in "data". + * @param data Pointer to a struct vnode_fsparam containing initialization information. + * @param vpp Pointer to a vnode pointer, to be filled in with newly created vnode. + * @return 0 for success, error code otherwise. + */ +errno_t vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp); #endif /* KERNEL_PRIVATE */ /*! - @function vnode_addfsref - @abstract Mark a vnode as being stored in a filesystem hash. - @discussion Should only be called once on a vnode, and never if that vnode was created with VNFS_ADDFSREF. - There should be a corresponding call to vnode_removefsref() when the vnode is reclaimed; VFS assumes that a - n unused vnode will not be marked as referenced by a filesystem. - @param vp The vnode to mark. - @return Always 0. + * @function vnode_addfsref + * @abstract Mark a vnode as being stored in a filesystem hash. + * @discussion Should only be called once on a vnode, and never if that vnode was created with VNFS_ADDFSREF. + * There should be a corresponding call to vnode_removefsref() when the vnode is reclaimed; VFS assumes that a + * n unused vnode will not be marked as referenced by a filesystem. + * @param vp The vnode to mark. + * @return Always 0. */ -int vnode_addfsref(vnode_t vp); +int vnode_addfsref(vnode_t vp); /*! - @function vnode_removefsref - @abstract Mark a vnode as no longer being stored in a filesystem hash. - @discussion Should only be called once on a vnode (during a reclaim), and only after the vnode has either been created with VNFS_ADDFSREF or marked by vnode_addfsref(). - @param vp The vnode to unmark. - @return Always 0. + * @function vnode_removefsref + * @abstract Mark a vnode as no longer being stored in a filesystem hash. + * @discussion Should only be called once on a vnode (during a reclaim), and only after the vnode has either been created with VNFS_ADDFSREF or marked by vnode_addfsref(). + * @param vp The vnode to unmark. + * @return Always 0. */ -int vnode_removefsref(vnode_t vp); +int vnode_removefsref(vnode_t vp); /*! - @function vnode_hasdirtyblks - @abstract Check if a vnode has dirty data waiting to be written to disk. - @discussion Note that this routine is unsynchronized; it is only a snapshot and its result may cease to be true at the moment it is returned.. - @param vp The vnode to test. - @return Nonzero if there are dirty blocks, 0 otherwise + * @function vnode_hasdirtyblks + * @abstract Check if a vnode has dirty data waiting to be written to disk. + * @discussion Note that this routine is unsynchronized; it is only a snapshot and its result may cease to be true at the moment it is returned.. + * @param vp The vnode to test. + * @return Nonzero if there are dirty blocks, 0 otherwise */ -int vnode_hasdirtyblks(vnode_t vp); +int vnode_hasdirtyblks(vnode_t vp); /*! - @function vnode_hascleanblks - @abstract Check if a vnode has clean buffers associated with it. - @discussion Note that this routine is unsynchronized; it is only a snapshot and its result may cease to be true at the moment it is returned.. - @param vp The vnode to test. - @return Nonzero if there are clean blocks, 0 otherwise. + * @function vnode_hascleanblks + * @abstract Check if a vnode has clean buffers associated with it. + * @discussion Note that this routine is unsynchronized; it is only a snapshot and its result may cease to be true at the moment it is returned.. + * @param vp The vnode to test. + * @return Nonzero if there are clean blocks, 0 otherwise. */ -int vnode_hascleanblks(vnode_t vp); +int vnode_hascleanblks(vnode_t vp); -#define VNODE_ASYNC_THROTTLE 15 +#define VNODE_ASYNC_THROTTLE 15 /*! - @function vnode_waitforwrites - @abstract Wait for the number of pending writes on a vnode to drop below a target. - @param vp The vnode to monitor. - @param output_target Max pending write count with which to return. - @param slpflag Flags for msleep(). - @param slptimeout Frequency with which to force a check for completion; increments of 10 ms. - @param msg String to pass msleep() . - @return 0 for success, or an error value from msleep(). + * @function vnode_waitforwrites + * @abstract Wait for the number of pending writes on a vnode to drop below a target. + * @param vp The vnode to monitor. + * @param output_target Max pending write count with which to return. + * @param slpflag Flags for msleep(). + * @param slptimeout Frequency with which to force a check for completion; increments of 10 ms. + * @param msg String to pass msleep() . + * @return 0 for success, or an error value from msleep(). */ -int vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg); +int vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg); /*! - @function vnode_startwrite - @abstract Increment the count of pending writes on a vnode. - @param vp The vnode whose count to increment. + * @function vnode_startwrite + * @abstract Increment the count of pending writes on a vnode. + * @param vp The vnode whose count to increment. */ -void vnode_startwrite(vnode_t vp); +void vnode_startwrite(vnode_t vp); /*! - @function vnode_startwrite - @abstract Decrement the count of pending writes on a vnode . - @discussion Also wakes up threads waiting for the write count to drop, as in vnode_waitforwrites. - @param vp The vnode whose count to decrement. + * @function vnode_startwrite + * @abstract Decrement the count of pending writes on a vnode . + * @discussion Also wakes up threads waiting for the write count to drop, as in vnode_waitforwrites. + * @param vp The vnode whose count to decrement. */ -void vnode_writedone(vnode_t vp); +void vnode_writedone(vnode_t vp); /*! - @function vnode_vtype - @abstract Return a vnode's type. - @param vp The vnode whose type to grab. - @return The vnode's type. + * @function vnode_vtype + * @abstract Return a vnode's type. + * @param vp The vnode whose type to grab. + * @return The vnode's type. */ -enum vtype vnode_vtype(vnode_t vp); +enum vtype vnode_vtype(vnode_t vp); /*! - @function vnode_vid - @abstract Return a vnode's vid (generation number), which is constant from creation until reclaim. - @param vp The vnode whose vid to grab. - @return The vnode's vid. + * @function vnode_vid + * @abstract Return a vnode's vid (generation number), which is constant from creation until reclaim. + * @param vp The vnode whose vid to grab. + * @return The vnode's vid. */ -uint32_t vnode_vid(vnode_t vp); +uint32_t vnode_vid(vnode_t vp); /*! - @function vnode_mountedhere - @abstract Returns a pointer to a mount placed on top of a vnode, should it exist. - @param vp The vnode from whom to take the covering mount. - @return Pointer to mount covering a vnode, or NULL if none exists. + * @function vnode_mountedhere + * @abstract Returns a pointer to a mount placed on top of a vnode, should it exist. + * @param vp The vnode from whom to take the covering mount. + * @return Pointer to mount covering a vnode, or NULL if none exists. */ -mount_t vnode_mountedhere(vnode_t vp); +mount_t vnode_mountedhere(vnode_t vp); /*! - @function vnode_mount - @abstract Get the mount structure for the filesystem that a vnode belongs to. - @param vp The vnode whose mount to grab. - @return The mount, directly. + * @function vnode_mount + * @abstract Get the mount structure for the filesystem that a vnode belongs to. + * @param vp The vnode whose mount to grab. + * @return The mount, directly. */ -mount_t vnode_mount(vnode_t vp); +mount_t vnode_mount(vnode_t vp); /*! - @function vnode_specrdev - @abstract Return the device id of the device associated with a special file. - @param vp The vnode whose device id to extract--vnode must be a special file. - @return The device id. + * @function vnode_specrdev + * @abstract Return the device id of the device associated with a special file. + * @param vp The vnode whose device id to extract--vnode must be a special file. + * @return The device id. */ -dev_t vnode_specrdev(vnode_t vp); +dev_t vnode_specrdev(vnode_t vp); /*! - @function vnode_fsnode - @abstract Gets the filesystem-specific data associated with a vnode. - @param vp The vnode whose data to grab. - @return The filesystem-specific data, directly. + * @function vnode_fsnode + * @abstract Gets the filesystem-specific data associated with a vnode. + * @param vp The vnode whose data to grab. + * @return The filesystem-specific data, directly. */ -void * vnode_fsnode(vnode_t vp); +void * vnode_fsnode(vnode_t vp); /*! - @function vnode_clearfsnode - @abstract Sets a vnode's filesystem-specific data to be NULL. - @discussion This routine should only be called when a vnode is no longer in use, i.e. during a VNOP_RECLAIM. - @param vp The vnode whose data to clear out. + * @function vnode_clearfsnode + * @abstract Sets a vnode's filesystem-specific data to be NULL. + * @discussion This routine should only be called when a vnode is no longer in use, i.e. during a VNOP_RECLAIM. + * @param vp The vnode whose data to clear out. */ -void vnode_clearfsnode(vnode_t vp); +void vnode_clearfsnode(vnode_t vp); /*! - @function vnode_isvroot - @abstract Determine if a vnode is the root of its filesystem. - @param vp The vnode to test. - @return Nonzero if the vnode is the root, 0 if it is not. + * @function vnode_isvroot + * @abstract Determine if a vnode is the root of its filesystem. + * @param vp The vnode to test. + * @return Nonzero if the vnode is the root, 0 if it is not. */ -int vnode_isvroot(vnode_t vp); +int vnode_isvroot(vnode_t vp); /*! - @function vnode_issystem - @abstract Determine if a vnode is marked as a System vnode. - @param vp The vnode to test. - @return Nonzero if the vnode is a system vnode, 0 if it is not. + * @function vnode_issystem + * @abstract Determine if a vnode is marked as a System vnode. + * @param vp The vnode to test. + * @return Nonzero if the vnode is a system vnode, 0 if it is not. */ -int vnode_issystem(vnode_t vp); +int vnode_issystem(vnode_t vp); /*! - @function vnode_ismount - @abstract Determine if there is currently a mount occurring which will cover this vnode. - @discussion Note that this is only a snapshot; a mount may begin or end at any time. - @param vp The vnode to test. - @return Nonzero if there is a mount in progress, 0 otherwise. + * @function vnode_ismount + * @abstract Determine if there is currently a mount occurring which will cover this vnode. + * @discussion Note that this is only a snapshot; a mount may begin or end at any time. + * @param vp The vnode to test. + * @return Nonzero if there is a mount in progress, 0 otherwise. */ -int vnode_ismount(vnode_t vp); +int vnode_ismount(vnode_t vp); /*! - @function vnode_isreg - @abstract Determine if a vnode is a regular file. - @param vp The vnode to test. - @return Nonzero if the vnode is of type VREG, 0 otherwise. + * @function vnode_isreg + * @abstract Determine if a vnode is a regular file. + * @param vp The vnode to test. + * @return Nonzero if the vnode is of type VREG, 0 otherwise. */ -int vnode_isreg(vnode_t vp); +int vnode_isreg(vnode_t vp); /*! - @function vnode_isdir - @abstract Determine if a vnode is a directory. - @param vp The vnode to test. - @return Nonzero if the vnode is of type VDIR, 0 otherwise. + * @function vnode_isdir + * @abstract Determine if a vnode is a directory. + * @param vp The vnode to test. + * @return Nonzero if the vnode is of type VDIR, 0 otherwise. */ -int vnode_isdir(vnode_t vp); +int vnode_isdir(vnode_t vp); /*! - @function vnode_islnk - @abstract Determine if a vnode is a symbolic link. - @param vp The vnode to test. - @return Nonzero if the vnode is of type VLNK, 0 otherwise. + * @function vnode_islnk + * @abstract Determine if a vnode is a symbolic link. + * @param vp The vnode to test. + * @return Nonzero if the vnode is of type VLNK, 0 otherwise. */ -int vnode_islnk(vnode_t vp); +int vnode_islnk(vnode_t vp); /*! - @function vnode_isfifo - @abstract Determine if a vnode is a named pipe. - @param vp The vnode to test. - @return Nonzero if the vnode is of type VFIFO, 0 otherwise. + * @function vnode_isfifo + * @abstract Determine if a vnode is a named pipe. + * @param vp The vnode to test. + * @return Nonzero if the vnode is of type VFIFO, 0 otherwise. */ -int vnode_isfifo(vnode_t vp); +int vnode_isfifo(vnode_t vp); /*! - @function vnode_isblk - @abstract Determine if a vnode is a block device special file. - @param vp The vnode to test. - @return Nonzero if the vnode is of type VBLK, 0 otherwise. + * @function vnode_isblk + * @abstract Determine if a vnode is a block device special file. + * @param vp The vnode to test. + * @return Nonzero if the vnode is of type VBLK, 0 otherwise. */ -int vnode_isblk(vnode_t vp); +int vnode_isblk(vnode_t vp); /*! - @function vnode_ischr - @abstract Determine if a vnode is a character device special file. - @param vp The vnode to test. - @return Nonzero if the vnode is of type VCHR, 0 otherwise. + * @function vnode_ischr + * @abstract Determine if a vnode is a character device special file. + * @param vp The vnode to test. + * @return Nonzero if the vnode is of type VCHR, 0 otherwise. */ -int vnode_ischr(vnode_t vp); +int vnode_ischr(vnode_t vp); /*! - @function vnode_isswap - @abstract Determine if a vnode is being used as a swap file. - @param vp The vnode to test. - @return Nonzero if the vnode is being used as swap, 0 otherwise. + * @function vnode_isswap + * @abstract Determine if a vnode is being used as a swap file. + * @param vp The vnode to test. + * @return Nonzero if the vnode is being used as swap, 0 otherwise. */ -int vnode_isswap(vnode_t vp); +int vnode_isswap(vnode_t vp); /*! - @function vnode_isnamedstream - @abstract Determine if a vnode is a named stream. - @param vp The vnode to test. - @return Nonzero if the vnode is a named stream, 0 otherwise. + * @function vnode_isnamedstream + * @abstract Determine if a vnode is a named stream. + * @param vp The vnode to test. + * @return Nonzero if the vnode is a named stream, 0 otherwise. */ -int vnode_isnamedstream(vnode_t vp); +int vnode_isnamedstream(vnode_t vp); #ifdef KERNEL_PRIVATE /*! - @function vnode_setasnamedstream - @abstract Set svp as a named stream of vp and take appropriate references. - @param vp The vnode whose namedstream has to be set. - @param svp The namedstream vnode. - @return 0 if the operation is successful, an error otherwise. + * @function vnode_setasnamedstream + * @abstract Set svp as a named stream of vp and take appropriate references. + * @param vp The vnode whose namedstream has to be set. + * @param svp The namedstream vnode. + * @return 0 if the operation is successful, an error otherwise. */ -errno_t vnode_setasnamedstream(vnode_t vp, vnode_t svp); +errno_t vnode_setasnamedstream(vnode_t vp, vnode_t svp); #endif /*! - @function vnode_ismountedon - @abstract Determine if a vnode is a block device on which a filesystem has been mounted. - @discussion A block device marked as being mounted on cannot be opened. - @param vp The vnode to test. - @return Nonzero if the vnode is a block device on which an filesystem is mounted, 0 otherwise. + * @function vnode_ismountedon + * @abstract Determine if a vnode is a block device on which a filesystem has been mounted. + * @discussion A block device marked as being mounted on cannot be opened. + * @param vp The vnode to test. + * @return Nonzero if the vnode is a block device on which an filesystem is mounted, 0 otherwise. */ -int vnode_ismountedon(vnode_t vp); +int vnode_ismountedon(vnode_t vp); /*! - @function vnode_setmountedon - @abstract Set flags indicating that a block device vnode has been mounted as a filesystem. - @discussion A block device marked as being mounted on cannot be opened. - @param vp The vnode to set flags on, a block device. + * @function vnode_setmountedon + * @abstract Set flags indicating that a block device vnode has been mounted as a filesystem. + * @discussion A block device marked as being mounted on cannot be opened. + * @param vp The vnode to set flags on, a block device. */ -void vnode_setmountedon(vnode_t vp); +void vnode_setmountedon(vnode_t vp); /*! - @function vnode_clearmountedon - @abstract Clear flags indicating that a block device vnode has been mounted as a filesystem. - @param vp The vnode to clear flags on, a block device. + * @function vnode_clearmountedon + * @abstract Clear flags indicating that a block device vnode has been mounted as a filesystem. + * @param vp The vnode to clear flags on, a block device. */ -void vnode_clearmountedon(vnode_t vp); +void vnode_clearmountedon(vnode_t vp); /*! - @function vnode_isrecycled - @abstract Check if a vnode is dead or in the process of being killed (recycled). - @discussion This is only a snapshot: a vnode may start to be recycled, or go from dead to in use, at any time. - @param vp The vnode to test. - @return Nonzero if vnode is dead or being recycled, 0 otherwise. + * @function vnode_isrecycled + * @abstract Check if a vnode is dead or in the process of being killed (recycled). + * @discussion This is only a snapshot: a vnode may start to be recycled, or go from dead to in use, at any time. + * @param vp The vnode to test. + * @return Nonzero if vnode is dead or being recycled, 0 otherwise. */ -int vnode_isrecycled(vnode_t vp); +int vnode_isrecycled(vnode_t vp); /*! - @function vnode_isnocache - @abstract Check if a vnode is set to not have its data cached in memory (i.e. we write-through to disk and always read from disk). - @param vp The vnode to test. - @return Nonzero if vnode is set to not have data chached, 0 otherwise. + * @function vnode_isnocache + * @abstract Check if a vnode is set to not have its data cached in memory (i.e. we write-through to disk and always read from disk). + * @param vp The vnode to test. + * @return Nonzero if vnode is set to not have data chached, 0 otherwise. */ -int vnode_isnocache(vnode_t vp); +int vnode_isnocache(vnode_t vp); /*! - @function vnode_israge - @abstract Check if a vnode is marked for rapid aging - @param vp The vnode to test. - @return Nonzero if vnode is marked for rapid aging, 0 otherwise + * @function vnode_israge + * @abstract Check if a vnode is marked for rapid aging + * @param vp The vnode to test. + * @return Nonzero if vnode is marked for rapid aging, 0 otherwise */ -int vnode_israge(vnode_t vp); +int vnode_israge(vnode_t vp); /*! - @function vnode_needssnapshots - @abstract Check if a vnode needs snapshots events (regardless of its ctime status) - @param vp The vnode to test. - @return Nonzero if vnode needs snapshot events, 0 otherwise + * @function vnode_needssnapshots + * @abstract Check if a vnode needs snapshots events (regardless of its ctime status) + * @param vp The vnode to test. + * @return Nonzero if vnode needs snapshot events, 0 otherwise */ -int vnode_needssnapshots(vnode_t vp); +int vnode_needssnapshots(vnode_t vp); /*! - @function vnode_setnocache - @abstract Set a vnode to not have its data cached in memory (i.e. we write-through to disk and always read from disk). - @param vp The vnode whose flags to set. + * @function vnode_setnocache + * @abstract Set a vnode to not have its data cached in memory (i.e. we write-through to disk and always read from disk). + * @param vp The vnode whose flags to set. */ -void vnode_setnocache(vnode_t vp); +void vnode_setnocache(vnode_t vp); /*! - @function vnode_clearnocache - @abstract Clear the flag on a vnode indicating that data should not be cached in memory (i.e. we write-through to disk and always read from disk). - @param vp The vnode whose flags to clear. + * @function vnode_clearnocache + * @abstract Clear the flag on a vnode indicating that data should not be cached in memory (i.e. we write-through to disk and always read from disk). + * @param vp The vnode whose flags to clear. */ -void vnode_clearnocache(vnode_t vp); +void vnode_clearnocache(vnode_t vp); /*! - @function vnode_isnoreadahead - @abstract Check if a vnode is set to not have data speculatively read in in hopes of future cache hits. - @param vp The vnode to test. - @return Nonzero if readahead is disabled, 0 otherwise. + * @function vnode_isnoreadahead + * @abstract Check if a vnode is set to not have data speculatively read in in hopes of future cache hits. + * @param vp The vnode to test. + * @return Nonzero if readahead is disabled, 0 otherwise. */ -int vnode_isnoreadahead(vnode_t vp); +int vnode_isnoreadahead(vnode_t vp); /*! - @function vnode_setnoreadahead - @abstract Set a vnode to not have data speculatively read in in hopes of hitting in cache. - @param vp The vnode on which to prevent readahead. + * @function vnode_setnoreadahead + * @abstract Set a vnode to not have data speculatively read in in hopes of hitting in cache. + * @param vp The vnode on which to prevent readahead. */ -void vnode_setnoreadahead(vnode_t vp); +void vnode_setnoreadahead(vnode_t vp); /*! - @function vnode_clearnoreadahead - @abstract Clear the flag indicating that a vnode should not have data speculatively read in. - @param vp The vnode whose flag to clear. + * @function vnode_clearnoreadahead + * @abstract Clear the flag indicating that a vnode should not have data speculatively read in. + * @param vp The vnode whose flag to clear. */ -void vnode_clearnoreadahead(vnode_t vp); +void vnode_clearnoreadahead(vnode_t vp); /*! - @function vnode_isfastdevicecandidate - @abstract Check if a vnode is a candidate to store on the fast device of a composite disk system - @param vp The vnode which you want to test. - @return Nonzero if the vnode is marked as a fast-device candidate + * @function vnode_isfastdevicecandidate + * @abstract Check if a vnode is a candidate to store on the fast device of a composite disk system + * @param vp The vnode which you want to test. + * @return Nonzero if the vnode is marked as a fast-device candidate */ -int vnode_isfastdevicecandidate(vnode_t vp); +int vnode_isfastdevicecandidate(vnode_t vp); /*! - @function vnode_setfastdevicecandidate - @abstract Mark a vnode as a candidate to store on the fast device of a composite disk system - @discussion If the vnode is a directory, all its children will inherit this bit. - @param vp The vnode which you want marked. + * @function vnode_setfastdevicecandidate + * @abstract Mark a vnode as a candidate to store on the fast device of a composite disk system + * @discussion If the vnode is a directory, all its children will inherit this bit. + * @param vp The vnode which you want marked. */ -void vnode_setfastdevicecandidate(vnode_t vp); +void vnode_setfastdevicecandidate(vnode_t vp); /*! - @function vnode_clearfastdevicecandidate - @abstract Clear the status of a vnode being a candidate to store on the fast device of a composite disk system. - @param vp The vnode whose flag to clear. + * @function vnode_clearfastdevicecandidate + * @abstract Clear the status of a vnode being a candidate to store on the fast device of a composite disk system. + * @param vp The vnode whose flag to clear. */ -void vnode_clearfastdevicecandidate(vnode_t vp); +void vnode_clearfastdevicecandidate(vnode_t vp); /*! - @function vnode_isautocandidate - @abstract Check if a vnode was automatically selected to be fast-dev candidate (see vnode_setfastdevicecandidate) - @param vp The vnode which you want to test. - @return Nonzero if the vnode was automatically marked as a fast-device candidate + * @function vnode_isautocandidate + * @abstract Check if a vnode was automatically selected to be fast-dev candidate (see vnode_setfastdevicecandidate) + * @param vp The vnode which you want to test. + * @return Nonzero if the vnode was automatically marked as a fast-device candidate */ -int vnode_isautocandidate(vnode_t vp); +int vnode_isautocandidate(vnode_t vp); /*! - @function vnode_setfastdevicecandidate - @abstract Mark a vnode as an automatically selected candidate for storing on the fast device of a composite disk system - @discussion If the vnode is a directory, all its children will inherit this bit. - @param vp The vnode which you want marked. + * @function vnode_setfastdevicecandidate + * @abstract Mark a vnode as an automatically selected candidate for storing on the fast device of a composite disk system + * @discussion If the vnode is a directory, all its children will inherit this bit. + * @param vp The vnode which you want marked. */ -void vnode_setautocandidate(vnode_t vp); +void vnode_setautocandidate(vnode_t vp); /*! - @function vnode_clearautocandidate - @abstract Clear the status of a vnode being an automatic candidate (see above) - @param vp The vnode whose flag to clear. + * @function vnode_clearautocandidate + * @abstract Clear the status of a vnode being an automatic candidate (see above) + * @param vp The vnode whose flag to clear. */ -void vnode_clearautocandidate(vnode_t vp); +void vnode_clearautocandidate(vnode_t vp); /* left only for compat reasons as User code depends on this from getattrlist, for ex */ /*! - @function vnode_settag - @abstract Set a vnode filesystem-specific "tag." - @discussion Sets a tag indicating which filesystem a vnode belongs to, e.g. VT_HFS, VT_UDF, VT_ZFS. The kernel never inspects this data, though the filesystem tags are defined in vnode.h; it is for the benefit of user programs via getattrlist. - @param vp The vnode whose tag to set. + * @function vnode_settag + * @abstract Set a vnode filesystem-specific "tag." + * @discussion Sets a tag indicating which filesystem a vnode belongs to, e.g. VT_HFS, VT_UDF, VT_ZFS. The kernel never inspects this data, though the filesystem tags are defined in vnode.h; it is for the benefit of user programs via getattrlist. + * @param vp The vnode whose tag to set. */ -void vnode_settag(vnode_t vp, int tag); +void vnode_settag(vnode_t vp, int tag); /*! - @function vnode_tag - @abstract Get the vnode filesystem-specific "tag." - @discussion Gets the tag indicating which filesystem a vnode belongs to, e.g. VT_HFS, VT_UDF, VT_ZFS. The kernel never inspects this data, though the filesystem tags are defined in vnode.h; it is for the benefit of user programs via getattrlist. - @param vp The vnode whose tag to grab. - @return The tag. + * @function vnode_tag + * @abstract Get the vnode filesystem-specific "tag." + * @discussion Gets the tag indicating which filesystem a vnode belongs to, e.g. VT_HFS, VT_UDF, VT_ZFS. The kernel never inspects this data, though the filesystem tags are defined in vnode.h; it is for the benefit of user programs via getattrlist. + * @param vp The vnode whose tag to grab. + * @return The tag. */ -int vnode_tag(vnode_t vp); +int vnode_tag(vnode_t vp); /*! - @function vnode_getattr - @abstract Get vnode attributes. - @discussion Desired attributes are set with VATTR_SET_ACTIVE and VNODE_ATTR* macros. Supported attributes are determined after call with VATTR_IS_SUPPORTED. - @param vp The vnode whose attributes to grab. - @param vap Structure containing: 1) A list of requested attributes 2) Space to indicate which attributes are supported and being returned 3) Space to return attributes. - @param ctx Context for authentication. - @return 0 for success or an error code. + * @function vnode_getattr + * @abstract Get vnode attributes. + * @discussion Desired attributes are set with VATTR_SET_ACTIVE and VNODE_ATTR* macros. Supported attributes are determined after call with VATTR_IS_SUPPORTED. + * @param vp The vnode whose attributes to grab. + * @param vap Structure containing: 1) A list of requested attributes 2) Space to indicate which attributes are supported and being returned 3) Space to return attributes. + * @param ctx Context for authentication. + * @return 0 for success or an error code. */ -int vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx); +int vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx); + +/* + * Utility function to deal with 32/64 bit fsid + */ +extern uint64_t vnode_get_va_fsid(struct vnode_attr *vap); /*! - @function vnode_setattr - @abstract Set vnode attributes. - @discussion Attributes to set are marked with VATTR_SET_ACTIVE and VNODE_ATTR* macros. Attributes successfully set are determined after call with VATTR_IS_SUPPORTED. - @param vp The vnode whose attributes to set. - @param vap Structure containing: 1) A list of attributes to set 2) Space for values for those attributes 3) Space to indicate which attributes were set. - @param ctx Context for authentication. - @return 0 for success or an error code. + * @function vnode_setattr + * @abstract Set vnode attributes. + * @discussion Attributes to set are marked with VATTR_SET_ACTIVE and VNODE_ATTR* macros. Attributes successfully set are determined after call with VATTR_IS_SUPPORTED. + * @param vp The vnode whose attributes to set. + * @param vap Structure containing: 1) A list of attributes to set 2) Space for values for those attributes 3) Space to indicate which attributes were set. + * @param ctx Context for authentication. + * @return 0 for success or an error code. */ -int vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx); +int vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx); /*! - @function vfs_rootvnode - @abstract Returns the root vnode with an iocount. - @discussion Caller must vnode_put() the root node when done. - @return Pointer to root vnode if successful; error code if there is a problem taking an iocount. + * @function vfs_rootvnode + * @abstract Returns the root vnode with an iocount. + * @discussion Caller must vnode_put() the root node when done. + * @return Pointer to root vnode if successful; error code if there is a problem taking an iocount. */ vnode_t vfs_rootvnode(void); /*! - @function vnode_uncache_credentials - @abstract Clear out cached credentials on a vnode. - @discussion When we authorize an action on a vnode, we cache the credential that was authorized and the actions it was authorized for in case a similar request follows. This function destroys that caching. - @param vp The vnode whose cache to clear. + * @function vnode_uncache_credentials + * @abstract Clear out cached credentials on a vnode. + * @discussion When we authorize an action on a vnode, we cache the credential that was authorized and the actions it was authorized for in case a similar request follows. This function destroys that caching. + * @param vp The vnode whose cache to clear. */ -void vnode_uncache_credentials(vnode_t vp); +void vnode_uncache_credentials(vnode_t vp); /*! - @function vnode_setmultipath - @abstract Mark a vnode as being reachable by multiple paths, i.e. as a hard link. - @discussion "Multipath" vnodes can be reached through more than one entry in the filesystem, and so must be handled differently for caching and event notification purposes. A filesystem should mark a vnode with multiple hardlinks this way. - @param vp The vnode to mark. + * @function vnode_setmultipath + * @abstract Mark a vnode as being reachable by multiple paths, i.e. as a hard link. + * @discussion "Multipath" vnodes can be reached through more than one entry in the filesystem, and so must be handled differently for caching and event notification purposes. A filesystem should mark a vnode with multiple hardlinks this way. + * @param vp The vnode to mark. */ -void vnode_setmultipath(vnode_t vp); +void vnode_setmultipath(vnode_t vp); /*! - @function vnode_vfsmaxsymlen - @abstract Determine the maximum length of a symbolic link for the filesystem on which a vnode resides. - @param vp The vnode for which to get filesystem symlink size cap. - @return Max symlink length. + * @function vnode_vfsmaxsymlen + * @abstract Determine the maximum length of a symbolic link for the filesystem on which a vnode resides. + * @param vp The vnode for which to get filesystem symlink size cap. + * @return Max symlink length. */ uint32_t vnode_vfsmaxsymlen(vnode_t vp); /*! - @function vnode_vfsisrdonly - @abstract Determine if the filesystem to which a vnode belongs is mounted read-only. - @param vp The vnode for which to get filesystem writeability. - @return Nonzero if the filesystem is read-only, 0 otherwise. + * @function vnode_vfsisrdonly + * @abstract Determine if the filesystem to which a vnode belongs is mounted read-only. + * @param vp The vnode for which to get filesystem writeability. + * @return Nonzero if the filesystem is read-only, 0 otherwise. */ -int vnode_vfsisrdonly(vnode_t vp); +int vnode_vfsisrdonly(vnode_t vp); /*! - @function vnode_vfstypenum - @abstract Get the "type number" of the filesystem to which a vnode belongs. - @discussion This is an archaic construct; most filesystems are assigned a type number based on the order in which they are registered with the system. - @param vp The vnode whose filesystem to examine. - @return The type number of the fileystem to which the vnode belongs. + * @function vnode_vfstypenum + * @abstract Get the "type number" of the filesystem to which a vnode belongs. + * @discussion This is an archaic construct; most filesystems are assigned a type number based on the order in which they are registered with the system. + * @param vp The vnode whose filesystem to examine. + * @return The type number of the fileystem to which the vnode belongs. */ -int vnode_vfstypenum(vnode_t vp); +int vnode_vfstypenum(vnode_t vp); /*! - @function vnode_vfsname - @abstract Get the name of the filesystem to which a vnode belongs. - @param vp The vnode whose filesystem to examine. - @param buf Destination for vfs name: should have size MFSNAMELEN or greater. + * @function vnode_vfsname + * @abstract Get the name of the filesystem to which a vnode belongs. + * @param vp The vnode whose filesystem to examine. + * @param buf Destination for vfs name: should have size MFSNAMELEN or greater. */ -void vnode_vfsname(vnode_t vp, char *buf); +void vnode_vfsname(vnode_t vp, char *buf); /*! - @function vnode_vfs64bitready - @abstract Determine if the filesystem to which a vnode belongs is marked as ready to interact with 64-bit user processes. - @param vp The vnode whose filesystem to examine. - @return Nonzero if filesystem is marked ready for 64-bit interactions; 0 otherwise. + * @function vnode_vfs64bitready + * @abstract Determine if the filesystem to which a vnode belongs is marked as ready to interact with 64-bit user processes. + * @param vp The vnode whose filesystem to examine. + * @return Nonzero if filesystem is marked ready for 64-bit interactions; 0 otherwise. */ -int vnode_vfs64bitready(vnode_t vp); +int vnode_vfs64bitready(vnode_t vp); /* These should move to private ... not documenting for now */ -int vfs_context_get_special_port(vfs_context_t, int, ipc_port_t *); -int vfs_context_set_special_port(vfs_context_t, int, ipc_port_t); +int vfs_context_get_special_port(vfs_context_t, int, ipc_port_t *); +int vfs_context_set_special_port(vfs_context_t, int, ipc_port_t); /*! - @function vfs_context_proc - @abstract Get the BSD process structure associated with a vfs_context_t. - @param ctx Context whose associated process to find. - @return Process if available, NULL otherwise. + * @function vfs_context_proc + * @abstract Get the BSD process structure associated with a vfs_context_t. + * @param ctx Context whose associated process to find. + * @return Process if available, NULL otherwise. */ -proc_t vfs_context_proc(vfs_context_t ctx); +proc_t vfs_context_proc(vfs_context_t ctx); /*! - @function vfs_context_ucred - @abstract Get the credential associated with a vfs_context_t. - @discussion Succeeds if and only if the context has a thread, the thread has a task, and the task has a BSD proc. - @param ctx Context whose associated process to find. - @returns credential if process available; NULL otherwise + * @function vfs_context_ucred + * @abstract Get the credential associated with a vfs_context_t. + * @discussion Succeeds if and only if the context has a thread, the thread has a task, and the task has a BSD proc. + * @param ctx Context whose associated process to find. + * @returns credential if process available; NULL otherwise */ -kauth_cred_t vfs_context_ucred(vfs_context_t ctx); +kauth_cred_t vfs_context_ucred(vfs_context_t ctx); /*! - @function vfs_context_pid - @abstract Get the process id of the BSD process associated with a vfs_context_t. - @param ctx Context whose associated process to find. - @return Process id. + * @function vfs_context_pid + * @abstract Get the process id of the BSD process associated with a vfs_context_t. + * @param ctx Context whose associated process to find. + * @return Process id. */ -int vfs_context_pid(vfs_context_t ctx); +int vfs_context_pid(vfs_context_t ctx); /*! - @function vfs_context_issignal - @abstract Get a bitfield of pending signals for the BSD process associated with a vfs_context_t. - @discussion The bitfield is constructed using the sigmask() macro, in the sense of bits |= sigmask(SIGSEGV). - @param ctx Context whose associated process to find. - @return Bitfield of pending signals. + * @function vfs_context_issignal + * @abstract Get a bitfield of pending signals for the BSD process associated with a vfs_context_t. + * @discussion The bitfield is constructed using the sigmask() macro, in the sense of bits |= sigmask(SIGSEGV). + * @param ctx Context whose associated process to find. + * @return Bitfield of pending signals. */ -int vfs_context_issignal(vfs_context_t ctx, sigset_t mask); +int vfs_context_issignal(vfs_context_t ctx, sigset_t mask); /*! - @function vfs_context_suser - @abstract Determine if a vfs_context_t corresponds to the superuser. - @param ctx Context to examine. - @return 0 if context belongs to superuser, EPERM otherwise. + * @function vfs_context_suser + * @abstract Determine if a vfs_context_t corresponds to the superuser. + * @param ctx Context to examine. + * @return 0 if context belongs to superuser, EPERM otherwise. */ -int vfs_context_suser(vfs_context_t ctx); +int vfs_context_suser(vfs_context_t ctx); /*! - @function vfs_context_is64bit - @abstract Determine if a vfs_context_t corresponds to a 64-bit user process. - @param ctx Context to examine. - @return Nonzero if context is of 64-bit process, 0 otherwise. + * @function vfs_context_is64bit + * @abstract Determine if a vfs_context_t corresponds to a 64-bit user process. + * @param ctx Context to examine. + * @return Nonzero if context is of 64-bit process, 0 otherwise. */ -int vfs_context_is64bit(vfs_context_t ctx); +int vfs_context_is64bit(vfs_context_t ctx); /*! - @function vfs_context_create - @abstract Create a new vfs_context_t with appropriate references held. - @discussion The context must be released with vfs_context_rele() when no longer in use. - @param ctx Context to copy, or NULL to use information from running thread. - @return The new context, or NULL in the event of failure. + * @function vfs_context_create + * @abstract Create a new vfs_context_t with appropriate references held. + * @discussion The context must be released with vfs_context_rele() when no longer in use. + * @param ctx Context to copy, or NULL to use information from running thread. + * @return The new context, or NULL in the event of failure. */ vfs_context_t vfs_context_create(vfs_context_t ctx); /*! - @function vfs_context_rele - @abstract Release references on components of a context and deallocate it. - @discussion A context should not be referenced after vfs_context_rele has been called. - @param ctx Context to release. - @return Always 0. + * @function vfs_context_rele + * @abstract Release references on components of a context and deallocate it. + * @discussion A context should not be referenced after vfs_context_rele has been called. + * @param ctx Context to release. + * @return Always 0. */ int vfs_context_rele(vfs_context_t ctx); /*! - @function vfs_context_current - @abstract Get the vfs_context for the current thread, or the kernel context if there is no context for current thread. - @discussion Kexts should not use this function--it is preferred to use vfs_context_create(NULL) and vfs_context_rele(), which ensure proper reference counting of underlying structures. - @return Context for current thread, or kernel context if thread context is unavailable. + * @function vfs_context_current + * @abstract Get the vfs_context for the current thread, or the kernel context if there is no context for current thread. + * @discussion Kexts should not use this function--it is preferred to use vfs_context_create(NULL) and vfs_context_rele(), which ensure proper reference counting of underlying structures. + * @return Context for current thread, or kernel context if thread context is unavailable. */ vfs_context_t vfs_context_current(void); #ifdef KERNEL_PRIVATE -int vfs_context_bind(vfs_context_t); +int vfs_context_bind(vfs_context_t); /*! - @function vfs_ctx_skipatime - @abstract Check to see if this context should skip updating a vnode's access times. - @discussion This is currently tied to the vnode rapid aging process. If the process is marked for rapid aging, - then the kernel should not update vnodes it touches for access time purposes. This will check to see if the - specified process and/or thread is marked for rapid aging when it manipulates vnodes. - @param ctx The context being investigated. - @return 1 if we should skip access time updates. - @return 0 if we should NOT skip access time updates. + * @function vfs_ctx_skipatime + * @abstract Check to see if this context should skip updating a vnode's access times. + * @discussion This is currently tied to the vnode rapid aging process. If the process is marked for rapid aging, + * then the kernel should not update vnodes it touches for access time purposes. This will check to see if the + * specified process and/or thread is marked for rapid aging when it manipulates vnodes. + * @param ctx The context being investigated. + * @return 1 if we should skip access time updates. + * @return 0 if we should NOT skip access time updates. */ -int vfs_ctx_skipatime(vfs_context_t ctx); +int vfs_ctx_skipatime(vfs_context_t ctx); #endif /*! - @function vflush - @abstract Reclaim the vnodes associated with a mount. - @param mp The mount whose vnodes to kill. - @param skipvp A specific vnode to not reclaim or to let interrupt an un-forced flush - @param flags Control which - @discussion This function is used to clear out the vnodes associated with a mount as part of the unmount process. - Its parameters can determine which vnodes to skip in the process and whether in-use vnodes should be forcibly reclaimed. - Filesystems should call this function from their unmount code, because VFS code will always call it with SKIPROOT | SKIPSWAP | SKIPSYSTEM; filesystems - must take care of such vnodes themselves. - SKIPSYSTEM skip vnodes marked VSYSTEM - FORCECLOSE force file closeure - WRITECLOSE only close writeable files - SKIPSWAP skip vnodes marked VSWAP - SKIPROOT skip root vnodes marked VROOT - @return 0 for success, EBUSY if vnodes were busy and FORCECLOSE was not set. - */ -int vflush(struct mount *mp, struct vnode *skipvp, int flags); - -/*! - @function vnode_get - @abstract Increase the iocount on a vnode. - @discussion If vnode_get() succeeds, the resulting io-reference must be dropped with vnode_put(). - This function succeeds unless the vnode in question is dead or in the process of dying AND the current iocount is zero. - This means that it can block an ongoing reclaim which is blocked behind some other iocount. - - On success, vnode_get() returns with an iocount held on the vnode; this type of reference is intended to be held only for short periods of time (e.g. - across a function call) and provides a strong guarantee about the life of the vnode; vnodes with positive iocounts cannot be - recycled, and an iocount is required for any operation on a vnode. However, vnode_get() does not provide any guarantees - about the identity of the vnode it is called on; unless there is a known existing iocount on the vnode at time the call is made, - it could be recycled and put back in use before the vnode_get() succeeds, so the caller may be referencing a - completely different vnode than was intended. vnode_getwithref() and vnode_getwithvid() - provide guarantees about vnode identity. - - @return 0 for success, ENOENT if the vnode is dead and without existing io-reference. + * @function vflush + * @abstract Reclaim the vnodes associated with a mount. + * @param mp The mount whose vnodes to kill. + * @param skipvp A specific vnode to not reclaim or to let interrupt an un-forced flush + * @param flags Control which + * @discussion This function is used to clear out the vnodes associated with a mount as part of the unmount process. + * Its parameters can determine which vnodes to skip in the process and whether in-use vnodes should be forcibly reclaimed. + * Filesystems should call this function from their unmount code, because VFS code will always call it with SKIPROOT | SKIPSWAP | SKIPSYSTEM; filesystems + * must take care of such vnodes themselves. + * SKIPSYSTEM skip vnodes marked VSYSTEM + * FORCECLOSE force file closeure + * WRITECLOSE only close writeable files + * SKIPSWAP skip vnodes marked VSWAP + * SKIPROOT skip root vnodes marked VROOT + * @return 0 for success, EBUSY if vnodes were busy and FORCECLOSE was not set. + */ +int vflush(struct mount *mp, struct vnode *skipvp, int flags); + +/*! + * @function vnode_get + * @abstract Increase the iocount on a vnode. + * @discussion If vnode_get() succeeds, the resulting io-reference must be dropped with vnode_put(). + * This function succeeds unless the vnode in question is dead or in the process of dying AND the current iocount is zero. + * This means that it can block an ongoing reclaim which is blocked behind some other iocount. + * + * On success, vnode_get() returns with an iocount held on the vnode; this type of reference is intended to be held only for short periods of time (e.g. + * across a function call) and provides a strong guarantee about the life of the vnode; vnodes with positive iocounts cannot be + * recycled, and an iocount is required for any operation on a vnode. However, vnode_get() does not provide any guarantees + * about the identity of the vnode it is called on; unless there is a known existing iocount on the vnode at time the call is made, + * it could be recycled and put back in use before the vnode_get() succeeds, so the caller may be referencing a + * completely different vnode than was intended. vnode_getwithref() and vnode_getwithvid() + * provide guarantees about vnode identity. + * + * @return 0 for success, ENOENT if the vnode is dead and without existing io-reference. */ -int vnode_get(vnode_t); +int vnode_get(vnode_t); /*! - @function vnode_getwithvid - @abstract Increase the iocount on a vnode, checking that the vnode is alive and has not changed vid (i.e. been recycled) - @discussion If vnode_getwithvid() succeeds, the resulting io-reference must be dropped with vnode_put(). - This function succeeds unless the vnode in question is dead, in the process of dying, or has been recycled (and given a different vnode id). - The intended usage is that a vnode is stored and its vid (vnode_vid(vp)) recorded while an iocount is held (example: a filesystem hash). The - iocount is then dropped, and time passes (perhaps locks are dropped and picked back up). Subsequently, vnode_getwithvid() is called to get an iocount, - but we are alerted if the vnode has been recycled. - - On success, vnode_getwithvid() returns with an iocount held on the vnode; this type of reference is intended to be held only for short periods of time (e.g. - across a function call) and provides a strong guarantee about the life of the vnode. vnodes with positive iocounts cannot be - recycled. An iocount is required for any operation on a vnode. - @return 0 for success, ENOENT if the vnode is dead, in the process of being reclaimed, or has been recycled and reused. + * @function vnode_getwithvid + * @abstract Increase the iocount on a vnode, checking that the vnode is alive and has not changed vid (i.e. been recycled) + * @discussion If vnode_getwithvid() succeeds, the resulting io-reference must be dropped with vnode_put(). + * This function succeeds unless the vnode in question is dead, in the process of dying, or has been recycled (and given a different vnode id). + * The intended usage is that a vnode is stored and its vid (vnode_vid(vp)) recorded while an iocount is held (example: a filesystem hash). The + * iocount is then dropped, and time passes (perhaps locks are dropped and picked back up). Subsequently, vnode_getwithvid() is called to get an iocount, + * but we are alerted if the vnode has been recycled. + * + * On success, vnode_getwithvid() returns with an iocount held on the vnode; this type of reference is intended to be held only for short periods of time (e.g. + * across a function call) and provides a strong guarantee about the life of the vnode. vnodes with positive iocounts cannot be + * recycled. An iocount is required for any operation on a vnode. + * @return 0 for success, ENOENT if the vnode is dead, in the process of being reclaimed, or has been recycled and reused. */ -int vnode_getwithvid(vnode_t, uint32_t); +int vnode_getwithvid(vnode_t, uint32_t); #ifdef BSD_KERNEL_PRIVATE int vnode_getwithvid_drainok(vnode_t, uint32_t); #endif /* BSD_KERNEL_PRIVATE */ /*! - @function vnode_getwithref - @abstract Increase the iocount on a vnode on which a usecount (persistent reference) is held. - @discussion If vnode_getwithref() succeeds, the resulting io-reference must be dropped with vnode_put(). - vnode_getwithref() will succeed on dead vnodes; it should fail with ENOENT on vnodes which are in the process of being reclaimed. - Because it is only called with a usecount on the vnode, the caller is guaranteed that the vnode has not been - reused for a different file, though it may now be dead and have deadfs vnops (which return errors like EIO, ENXIO, ENOTDIR). - On success, vnode_getwithref() returns with an iocount held on the vnode; this type of reference is intended to be held only for short periods of time (e.g. - across a function call) and provides a strong guarantee about the life of the vnode. vnodes with positive iocounts cannot be - recycled. An iocount is required for any operation on a vnode. - @return 0 for success, ENOENT if the vnode is dead, in the process of being reclaimed, or has been recycled and reused. + * @function vnode_getwithref + * @abstract Increase the iocount on a vnode on which a usecount (persistent reference) is held. + * @discussion If vnode_getwithref() succeeds, the resulting io-reference must be dropped with vnode_put(). + * vnode_getwithref() will succeed on dead vnodes; it should fail with ENOENT on vnodes which are in the process of being reclaimed. + * Because it is only called with a usecount on the vnode, the caller is guaranteed that the vnode has not been + * reused for a different file, though it may now be dead and have deadfs vnops (which return errors like EIO, ENXIO, ENOTDIR). + * On success, vnode_getwithref() returns with an iocount held on the vnode; this type of reference is intended to be held only for short periods of time (e.g. + * across a function call) and provides a strong guarantee about the life of the vnode. vnodes with positive iocounts cannot be + * recycled. An iocount is required for any operation on a vnode. + * @return 0 for success, ENOENT if the vnode is dead, in the process of being reclaimed, or has been recycled and reused. */ -int vnode_getwithref(vnode_t vp); +int vnode_getwithref(vnode_t vp); /*! - @function vnode_put - @abstract Decrement the iocount on a vnode. - @discussion vnode_put() is called to indicate that a vnode is no longer in active use. It removes the guarantee that a - vnode will not be recycled. This routine should be used to release io references no matter how they were obtained. - @param vp The vnode whose iocount to drop. - @return Always 0. + * @function vnode_put + * @abstract Decrement the iocount on a vnode. + * @discussion vnode_put() is called to indicate that a vnode is no longer in active use. It removes the guarantee that a + * vnode will not be recycled. This routine should be used to release io references no matter how they were obtained. + * @param vp The vnode whose iocount to drop. + * @return Always 0. */ -int vnode_put(vnode_t vp); +int vnode_put(vnode_t vp); /*! - @function vnode_ref - @abstract Increment the usecount on a vnode. - @discussion If vnode_ref() succeeds, the resulting usecount must be released with vnode_rele(). vnode_ref() is called to obtain - a persistent reference on a vnode. This type of reference does not provide the same strong guarantee that a vnode will persist - as does an iocount--it merely ensures that a vnode will not be reused to represent a different file. However, a usecount may be - held for extended periods of time, whereas an iocount is intended to be obtained and released quickly as part of performing a - vnode operation. A holder of a usecount must call vnode_getwithref()/vnode_put() in order to perform any operations on that vnode. - @param vp The vnode on which to obtain a persistent reference. - @return 0 for success; ENOENT if the vnode is dead or in the process of being recycled AND the calling thread is not the vnode owner. + * @function vnode_ref + * @abstract Increment the usecount on a vnode. + * @discussion If vnode_ref() succeeds, the resulting usecount must be released with vnode_rele(). vnode_ref() is called to obtain + * a persistent reference on a vnode. This type of reference does not provide the same strong guarantee that a vnode will persist + * as does an iocount--it merely ensures that a vnode will not be reused to represent a different file. However, a usecount may be + * held for extended periods of time, whereas an iocount is intended to be obtained and released quickly as part of performing a + * vnode operation. A holder of a usecount must call vnode_getwithref()/vnode_put() in order to perform any operations on that vnode. + * @param vp The vnode on which to obtain a persistent reference. + * @return 0 for success; ENOENT if the vnode is dead or in the process of being recycled AND the calling thread is not the vnode owner. */ -int vnode_ref(vnode_t vp); +int vnode_ref(vnode_t vp); /*! - @function vnode_rele - @abstract Decrement the usecount on a vnode. - @discussion vnode_rele() is called to relese a persistent reference on a vnode. Releasing the last usecount - opens the door for a vnode to be reused as a new file; it also triggers a VNOP_INACTIVE call to the filesystem, - though that will not happen immediately if there are outstanding iocount references. - @param vp The vnode whose usecount to drop. + * @function vnode_rele + * @abstract Decrement the usecount on a vnode. + * @discussion vnode_rele() is called to relese a persistent reference on a vnode. Releasing the last usecount + * opens the door for a vnode to be reused as a new file; it also triggers a VNOP_INACTIVE call to the filesystem, + * though that will not happen immediately if there are outstanding iocount references. + * @param vp The vnode whose usecount to drop. */ -void vnode_rele(vnode_t vp); +void vnode_rele(vnode_t vp); /*! - @function vnode_isinuse - @abstract Determine if the number of persistent (usecount) references on a vnode is greater than a given count. - @discussion vnode_isinuse() compares a vnode's usecount (corresponding to vnode_ref() calls) to its refcnt parameter - (the number of references the caller expects to be on the vnode). Note that "kusecount" references, corresponding - to parties interested only in event notifications, e.g. open(..., O_EVTONLY), are not counted towards the total; the comparison is - (usecount - kusecount > recnt). It is - also important to note that the result is only a snapshot; usecounts can change from moment to moment, and the result of vnode_isinuse - may no longer be correct the very moment that the caller receives it. - @param vp The vnode whose use-status to check. - @param refcnt The threshold for saying that a vnode is in use. + * @function vnode_isinuse + * @abstract Determine if the number of persistent (usecount) references on a vnode is greater than a given count. + * @discussion vnode_isinuse() compares a vnode's usecount (corresponding to vnode_ref() calls) to its refcnt parameter + * (the number of references the caller expects to be on the vnode). Note that "kusecount" references, corresponding + * to parties interested only in event notifications, e.g. open(..., O_EVTONLY), are not counted towards the total; the comparison is + * (usecount - kusecount > recnt). It is + * also important to note that the result is only a snapshot; usecounts can change from moment to moment, and the result of vnode_isinuse + * may no longer be correct the very moment that the caller receives it. + * @param vp The vnode whose use-status to check. + * @param refcnt The threshold for saying that a vnode is in use. */ -int vnode_isinuse(vnode_t vp, int refcnt); +int vnode_isinuse(vnode_t vp, int refcnt); /*! - @function vnode_recycle - @abstract Cause a vnode to be reclaimed and prepared for reuse. - @discussion Like all vnode KPIs, must be called with an iocount on the target vnode. - vnode_recycle() will mark that vnode for reclaim when all existing references are dropped. - @param vp The vnode to recycle. - @return 1 if the vnode was reclaimed (i.e. there were no existing references), 0 if it was only marked for future reclaim. + * @function vnode_recycle + * @abstract Cause a vnode to be reclaimed and prepared for reuse. + * @discussion Like all vnode KPIs, must be called with an iocount on the target vnode. + * vnode_recycle() will mark that vnode for reclaim when all existing references are dropped. + * @param vp The vnode to recycle. + * @return 1 if the vnode was reclaimed (i.e. there were no existing references), 0 if it was only marked for future reclaim. */ -int vnode_recycle(vnode_t vp); +int vnode_recycle(vnode_t vp); #ifdef KERNEL_PRIVATE -#define VNODE_EVENT_DELETE 0x00000001 /* file was removed */ -#define VNODE_EVENT_WRITE 0x00000002 /* file or directory contents changed */ -#define VNODE_EVENT_EXTEND 0x00000004 /* ubc size increased */ -#define VNODE_EVENT_ATTRIB 0x00000008 /* attributes changed (suitable for permission changes if type unknown)*/ -#define VNODE_EVENT_LINK 0x00000010 /* link count changed */ -#define VNODE_EVENT_RENAME 0x00000020 /* vnode was renamed */ -#define VNODE_EVENT_PERMS 0x00000040 /* permissions changed: will cause a NOTE_ATTRIB */ -#define VNODE_EVENT_FILE_CREATED 0x00000080 /* file created in directory: will cause NOTE_WRITE */ -#define VNODE_EVENT_DIR_CREATED 0x00000100 /* directory created inside this directory: will cause NOTE_WRITE */ -#define VNODE_EVENT_FILE_REMOVED 0x00000200 /* file removed from this directory: will cause NOTE_WRITE */ -#define VNODE_EVENT_DIR_REMOVED 0x00000400 /* subdirectory from this directory: will cause NOTE_WRITE */ - -#ifdef BSD_KERNEL_PRIVATE -#define VNODE_NOTIFY_ATTRS (VNODE_ATTR_BIT(va_fsid) | \ - VNODE_ATTR_BIT(va_fileid)| \ - VNODE_ATTR_BIT(va_mode) | \ - VNODE_ATTR_BIT(va_uid) | \ - VNODE_ATTR_BIT(va_gid) | \ - VNODE_ATTR_BIT(va_dirlinkcount) | \ - VNODE_ATTR_BIT(va_nlink)) - - +#define VNODE_EVENT_DELETE 0x00000001 /* file was removed */ +#define VNODE_EVENT_WRITE 0x00000002 /* file or directory contents changed */ +#define VNODE_EVENT_EXTEND 0x00000004 /* ubc size increased */ +#define VNODE_EVENT_ATTRIB 0x00000008 /* attributes changed (suitable for permission changes if type unknown)*/ +#define VNODE_EVENT_LINK 0x00000010 /* link count changed */ +#define VNODE_EVENT_RENAME 0x00000020 /* vnode was renamed */ +#define VNODE_EVENT_PERMS 0x00000040 /* permissions changed: will cause a NOTE_ATTRIB */ +#define VNODE_EVENT_FILE_CREATED 0x00000080 /* file created in directory: will cause NOTE_WRITE */ +#define VNODE_EVENT_DIR_CREATED 0x00000100 /* directory created inside this directory: will cause NOTE_WRITE */ +#define VNODE_EVENT_FILE_REMOVED 0x00000200 /* file removed from this directory: will cause NOTE_WRITE */ +#define VNODE_EVENT_DIR_REMOVED 0x00000400 /* subdirectory from this directory: will cause NOTE_WRITE */ + +#ifdef BSD_KERNEL_PRIVATE +#define VNODE_NOTIFY_ATTRS (VNODE_ATTR_BIT(va_fsid) | \ + VNODE_ATTR_BIT(va_fileid)| \ + VNODE_ATTR_BIT(va_mode) | \ + VNODE_ATTR_BIT(va_uid) | \ + VNODE_ATTR_BIT(va_gid) | \ + VNODE_ATTR_BIT(va_dirlinkcount) | \ + VNODE_ATTR_BIT(va_nlink)) + + #endif /* BSD_KERNEL_PRIVATE */ /*! - @function vnode_ismonitored - @abstract Check whether a file has watchers that would make it useful to query a server - for file changes. - @param vp Vnode to examine. - @discussion Will not reenter the filesystem. - @return Zero if not monitored, nonzero if monitored. - */ -int vnode_ismonitored(vnode_t vp); + * @function vnode_ismonitored + * @abstract Check whether a file has watchers that would make it useful to query a server + * for file changes. + * @param vp Vnode to examine. + * @discussion Will not reenter the filesystem. + * @return Zero if not monitored, nonzero if monitored. + */ +int vnode_ismonitored(vnode_t vp); /*! - @function vnode_isdyldsharedcache - @abstract Check whether a file is a dyld shared cache file. - @param vp Vnode to examine. - @discussion Will not reenter the filesystem. - @return nonzero if a dyld shared cache file, zero otherwise. - */ -int vnode_isdyldsharedcache(vnode_t vp); + * @function vnode_isdyldsharedcache + * @abstract Check whether a file is a dyld shared cache file. + * @param vp Vnode to examine. + * @discussion Will not reenter the filesystem. + * @return nonzero if a dyld shared cache file, zero otherwise. + */ +int vnode_isdyldsharedcache(vnode_t vp); /*! - @function vn_getpath_fsenter - @abstract Attempt to get a vnode's path, willing to enter the filesystem. - @discussion Paths to vnodes are not always straightforward: a file with multiple hard-links will have multiple pathnames, - and it is sometimes impossible to determine a vnode's full path. vn_getpath_fsenter() may enter the filesystem - to try to construct a path, so filesystems should be wary of calling it. - @param vp Vnode whose path to get - @param pathbuf Buffer in which to store path. - @param len Destination for length of resulting path string. Result will include NULL-terminator in count--that is, "len" - will be strlen(pathbuf) + 1. - @return 0 for success or an error. + * @function vn_getpath_fsenter + * @abstract Attempt to get a vnode's path, willing to enter the filesystem. + * @discussion Paths to vnodes are not always straightforward: a file with multiple hard-links will have multiple pathnames, + * and it is sometimes impossible to determine a vnode's full path. vn_getpath_fsenter() may enter the filesystem + * to try to construct a path, so filesystems should be wary of calling it. + * @param vp Vnode whose path to get + * @param pathbuf Buffer in which to store path. + * @param len Destination for length of resulting path string. Result will include NULL-terminator in count--that is, "len" + * will be strlen(pathbuf) + 1. + * @return 0 for success or an error. */ -int vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len); +int vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len); /*! - @function vn_getpath_fsenter_with_parent - @abstract Attempt to get a vnode's path by entering the file system if needed given a vnode and it's directory vnode. - @discussion Same as vn_getpath_fsenter but is given the directory vnode as well as the target vnode. Used -to get the path from the vnode while performing rename, rmdir, and unlink. This is done to avoid potential -dead lock if another thread is doing a forced unmount. - @param dvp Containing directory vnode. Must be holding an IO count. - @param vp Vnode whose path to get. Must be holding an IO count. - @param pathbuf Buffer in which to store path. - @param len Destination for length of resulting path string. Result will include NULL-terminator in count--that is, "len" - will be strlen(pathbuf) + 1. - @return 0 for success or an error. -*/ -int vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len); + * @function vn_getpath_fsenter_with_parent + * @abstract Attempt to get a vnode's path by entering the file system if needed given a vnode and it's directory vnode. + * @discussion Same as vn_getpath_fsenter but is given the directory vnode as well as the target vnode. Used + * to get the path from the vnode while performing rename, rmdir, and unlink. This is done to avoid potential + * dead lock if another thread is doing a forced unmount. + * @param dvp Containing directory vnode. Must be holding an IO count. + * @param vp Vnode whose path to get. Must be holding an IO count. + * @param pathbuf Buffer in which to store path. + * @param len Destination for length of resulting path string. Result will include NULL-terminator in count--that is, "len" + * will be strlen(pathbuf) + 1. + * @return 0 for success or an error. + */ +int vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len); #endif /* KERNEL_PRIVATE */ -#define VNODE_UPDATE_PARENT 0x01 -#define VNODE_UPDATE_NAMEDSTREAM_PARENT VNODE_UPDATE_PARENT -#define VNODE_UPDATE_NAME 0x02 -#define VNODE_UPDATE_CACHE 0x04 -#define VNODE_UPDATE_PURGE 0x08 -/*! - @function vnode_update_identity - @abstract Update vnode data associated with the vfs cache. - @discussion The vfs namecache is central to tracking vnode-identifying data and to locating files on the system. vnode_update_identity() - is used to update vnode data associated with the cache. It can set a vnode's parent and/or name (also potentially set by vnode_create()) - or flush cache data. - @param vp The vnode whose information to update. - @param dvp Parent to set on the vnode if VNODE_UPDATE_PARENT is used. - @param name Name to set in the cache for the vnode if VNODE_UPDATE_NAME is used. The buffer passed in can be subsequently freed, as the cache - does its own name storage. String should be NULL-terminated unless length and hash value are specified. - @param name_len Length of name, if known. Passing 0 causes the cache to determine the length itself. - @param name_hashval Hash value of name, if known. Passing 0 causes the cache to hash the name itself. - @param flags VNODE_UPDATE_PARENT: set parent. VNODE_UPDATE_NAME: set name. VNODE_UPDATE_CACHE: flush cache entries for hard links - associated with this file. VNODE_UPDATE_PURGE: flush cache entries for hard links and children of this file. - */ -void vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags); - -/*! - @function vn_bwrite - @abstract System-provided implementation of "bwrite" vnop. - @discussion This routine is available for filesystems which do not want to implement their own "bwrite" vnop. It just calls - buf_bwrite() without modifying its arguments. - @param ap Standard parameters to a bwrite vnop. - @return Results of buf_bwrite directly. - */ -int vn_bwrite(struct vnop_bwrite_args *ap); - -/*! - @function vnode_authorize - @abstract Authorize a kauth-style action on a vnode. - @discussion Operations on dead vnodes are always allowed (though never do anything). - @param vp Vnode on which to authorize action. - @param dvp Parent of "vp," can be NULL. - @param action Action to authorize, e.g. KAUTH_VNODE_READ_DATA. See bsd/sys/kauth.h. - @param ctx Context for which to authorize actions. - @return EACCESS if permission is denied. 0 if operation allowed. Various errors from lower layers. - */ -int vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx); +#define VNODE_UPDATE_PARENT 0x01 +#define VNODE_UPDATE_NAMEDSTREAM_PARENT VNODE_UPDATE_PARENT +#define VNODE_UPDATE_NAME 0x02 +#define VNODE_UPDATE_CACHE 0x04 +#define VNODE_UPDATE_PURGE 0x08 +/*! + * @function vnode_update_identity + * @abstract Update vnode data associated with the vfs cache. + * @discussion The vfs namecache is central to tracking vnode-identifying data and to locating files on the system. vnode_update_identity() + * is used to update vnode data associated with the cache. It can set a vnode's parent and/or name (also potentially set by vnode_create()) + * or flush cache data. + * @param vp The vnode whose information to update. + * @param dvp Parent to set on the vnode if VNODE_UPDATE_PARENT is used. + * @param name Name to set in the cache for the vnode if VNODE_UPDATE_NAME is used. The buffer passed in can be subsequently freed, as the cache + * does its own name storage. String should be NULL-terminated unless length and hash value are specified. + * @param name_len Length of name, if known. Passing 0 causes the cache to determine the length itself. + * @param name_hashval Hash value of name, if known. Passing 0 causes the cache to hash the name itself. + * @param flags VNODE_UPDATE_PARENT: set parent. VNODE_UPDATE_NAME: set name. VNODE_UPDATE_CACHE: flush cache entries for hard links + * associated with this file. VNODE_UPDATE_PURGE: flush cache entries for hard links and children of this file. + */ +void vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags); + +/*! + * @function vn_bwrite + * @abstract System-provided implementation of "bwrite" vnop. + * @discussion This routine is available for filesystems which do not want to implement their own "bwrite" vnop. It just calls + * buf_bwrite() without modifying its arguments. + * @param ap Standard parameters to a bwrite vnop. + * @return Results of buf_bwrite directly. + */ +int vn_bwrite(struct vnop_bwrite_args *ap); + +/*! + * @function vnode_authorize + * @abstract Authorize a kauth-style action on a vnode. + * @discussion Operations on dead vnodes are always allowed (though never do anything). + * @param vp Vnode on which to authorize action. + * @param dvp Parent of "vp," can be NULL. + * @param action Action to authorize, e.g. KAUTH_VNODE_READ_DATA. See bsd/sys/kauth.h. + * @param ctx Context for which to authorize actions. + * @return EACCESS if permission is denied. 0 if operation allowed. Various errors from lower layers. + */ +int vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx); #ifdef KERNEL_PRIVATE /*! - @function vnode_attr_authorize_init - @abstract Initialize attributes for authorization of a kauth-style action on a file system object based on its attributes. - @discussion This function tells the caller what attributes may be required for a authorizing - a kauth style action. - @param vap attributes of file system object on which to authorize action. - @param dvap attributes of parent of file system object, can be NULL. - @param action Action to authorize, e.g. KAUTH_VNODE_READ_DATA. See bsd/sys/kauth.h. - @param ctx Context for which to authorize actions. - @return EINVAL if a required parameters are not passed (for eg. not passing dvap when the action is KAUTH_ACTION_DELETE), 0 otherwise. - */ -#define VNODE_ATTR_AUTHORIZE_AVAILABLE 0x01 -int vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap, kauth_action_t action, vfs_context_t ctx); - -/*! - @function vnode_attr_authorize - @abstract Authorize a kauth-style action on a file system object based on its attributes. - @discussion This function should be preceded by a call to vnode_attr_authorize_init to get what attributes are required. - @param vap attributes of file system object on which to authorize action. - @param dvap attributes of parent of file system object, can be NULL. - @param mp mountpoint to which file system object belongs, can be NULL. - @param action Action to authorize, e.g. KAUTH_VNODE_READ_DATA. See bsd/sys/kauth.h. - @param ctx Context for which to authorize actions. - @return EACCESS if permission is denied. 0 if operation allowed. Various errors from lower layers. - */ -int vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp, kauth_action_t action, vfs_context_t ctx); + * @function vnode_attr_authorize_init + * @abstract Initialize attributes for authorization of a kauth-style action on a file system object based on its attributes. + * @discussion This function tells the caller what attributes may be required for a authorizing + * a kauth style action. + * @param vap attributes of file system object on which to authorize action. + * @param dvap attributes of parent of file system object, can be NULL. + * @param action Action to authorize, e.g. KAUTH_VNODE_READ_DATA. See bsd/sys/kauth.h. + * @param ctx Context for which to authorize actions. + * @return EINVAL if a required parameters are not passed (for eg. not passing dvap when the action is KAUTH_ACTION_DELETE), 0 otherwise. + */ +#define VNODE_ATTR_AUTHORIZE_AVAILABLE 0x01 +int vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap, kauth_action_t action, vfs_context_t ctx); + +/*! + * @function vnode_attr_authorize + * @abstract Authorize a kauth-style action on a file system object based on its attributes. + * @discussion This function should be preceded by a call to vnode_attr_authorize_init to get what attributes are required. + * @param vap attributes of file system object on which to authorize action. + * @param dvap attributes of parent of file system object, can be NULL. + * @param mp mountpoint to which file system object belongs, can be NULL. + * @param action Action to authorize, e.g. KAUTH_VNODE_READ_DATA. See bsd/sys/kauth.h. + * @param ctx Context for which to authorize actions. + * @return EACCESS if permission is denied. 0 if operation allowed. Various errors from lower layers. + */ +int vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp, kauth_action_t action, vfs_context_t ctx); #endif /* KERNEL_PRIVATE */ /*! - @function vnode_authattr - @abstract Given a vnode_attr structure, determine what kauth-style actions must be authorized in order to set those attributes. - @discussion vnode_authorize requires kauth-style actions; if we want to set a vnode_attr structure on a vnode, we need to translate - the set of attributes to a set of kauth-style actions. This routine will return errors for certain obviously disallowed, or - incoherent, actions. - @param vp The vnode on which to authorize action. - @param vap Pointer to vnode_attr struct containing desired attributes to set and their values. - @param actionp Destination for set of actions to authorize - @param ctx Context for which to authorize actions. - @return 0 (and a result in "actionp" for success. Otherwise, an error code. + * @function vnode_authattr + * @abstract Given a vnode_attr structure, determine what kauth-style actions must be authorized in order to set those attributes. + * @discussion vnode_authorize requires kauth-style actions; if we want to set a vnode_attr structure on a vnode, we need to translate + * the set of attributes to a set of kauth-style actions. This routine will return errors for certain obviously disallowed, or + * incoherent, actions. + * @param vp The vnode on which to authorize action. + * @param vap Pointer to vnode_attr struct containing desired attributes to set and their values. + * @param actionp Destination for set of actions to authorize + * @param ctx Context for which to authorize actions. + * @return 0 (and a result in "actionp" for success. Otherwise, an error code. */ -int vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx); +int vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx); /*! - @function vnode_authattr_new - @abstract Initialize and validate file creation parameters with respect to the current context. - @discussion vnode_authattr_new() will fill in unitialized values in the vnode_attr struct with defaults, and will validate the structure - with respect to the current context for file creation. - @param dvp The directory in which creation will occur. - @param vap Pointer to vnode_attr struct containing desired attributes to set and their values. - @param noauth If 1, treat the caller as the superuser, i.e. do not check permissions. - @param ctx Context for which to authorize actions. - @return KAUTH_RESULT_ALLOW for success, an error to indicate invalid or disallowed attributes. + * @function vnode_authattr_new + * @abstract Initialize and validate file creation parameters with respect to the current context. + * @discussion vnode_authattr_new() will fill in unitialized values in the vnode_attr struct with defaults, and will validate the structure + * with respect to the current context for file creation. + * @param dvp The directory in which creation will occur. + * @param vap Pointer to vnode_attr struct containing desired attributes to set and their values. + * @param noauth If 1, treat the caller as the superuser, i.e. do not check permissions. + * @param ctx Context for which to authorize actions. + * @return KAUTH_RESULT_ALLOW for success, an error to indicate invalid or disallowed attributes. */ -int vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx); +int vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx); /*! - @function vnode_close - @abstract Close a file as opened with vnode_open(). - @discussion vnode_close() drops the refcount (persistent reference) picked up in vnode_open() and calls down to the filesystem with VNOP_CLOSE. It should - be called with both an iocount and a refcount on the vnode and will drop both. - @param vp The vnode to close. - @param flags Flags to close: FWASWRITTEN indicates that the file was written to. - @param ctx Context against which to validate operation. - @return 0 for success or an error from the filesystem. + * @function vnode_close + * @abstract Close a file as opened with vnode_open(). + * @discussion vnode_close() drops the refcount (persistent reference) picked up in vnode_open() and calls down to the filesystem with VNOP_CLOSE. It should + * be called with both an iocount and a refcount on the vnode and will drop both. + * @param vp The vnode to close. + * @param flags Flags to close: FWASWRITTEN indicates that the file was written to. + * @param ctx Context against which to validate operation. + * @return 0 for success or an error from the filesystem. */ errno_t vnode_close(vnode_t vp, int flags, vfs_context_t ctx); /*! - @function vn_getpath - @abstract Construct the path to a vnode. - @discussion Paths to vnodes are not always straightforward: a file with multiple hard-links will have multiple pathnames, - and it is sometimes impossible to determine a vnode's full path. vn_getpath() will not enter the filesystem. - @param vp The vnode whose path to obtain. - @param pathbuf Destination for pathname; should be of size MAXPATHLEN - @param len Destination for length of resulting path string. Result will include NULL-terminator in count--that is, "len" - will be strlen(pathbuf) + 1. - @return 0 for success or an error code. + * @function vn_getpath + * @abstract Construct the path to a vnode. + * @discussion Paths to vnodes are not always straightforward: a file with multiple hard-links will have multiple pathnames, + * and it is sometimes impossible to determine a vnode's full path. vn_getpath() will not enter the filesystem. + * @param vp The vnode whose path to obtain. + * @param pathbuf Destination for pathname; should be of size MAXPATHLEN + * @param len Destination for length of resulting path string. Result will include NULL-terminator in count--that is, "len" + * will be strlen(pathbuf) + 1. + * @return 0 for success or an error code. */ int vn_getpath(struct vnode *vp, char *pathbuf, int *len); /*! - @function vnode_notify - @abstract Send a notification up to VFS. - @param vp Vnode for which to provide notification. - @param vap Attributes for that vnode, to be passed to fsevents. - @discussion Filesystem determines which attributes to pass up using - vfs_get_notify_attributes(&vap). The most specific events possible should be passed, - e.g. VNODE_EVENT_FILE_CREATED on a directory rather than just VNODE_EVENT_WRITE, but - a less specific event can be passed up if more specific information is not available. - Will not reenter the filesystem. - @return 0 for success, else an error code. - */ -int vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap); - -/*! - @function vfs_get_notify_attributes - @abstract Determine what attributes are required to send up a notification with vnode_notify(). - @param vap Structure to initialize and activate required attributes on. - @discussion Will not reenter the filesystem. - @return 0 for success, nonzero for error (currently always succeeds). - */ -int vfs_get_notify_attributes(struct vnode_attr *vap); + * @function vnode_notify + * @abstract Send a notification up to VFS. + * @param vp Vnode for which to provide notification. + * @param vap Attributes for that vnode, to be passed to fsevents. + * @discussion Filesystem determines which attributes to pass up using + * vfs_get_notify_attributes(&vap). The most specific events possible should be passed, + * e.g. VNODE_EVENT_FILE_CREATED on a directory rather than just VNODE_EVENT_WRITE, but + * a less specific event can be passed up if more specific information is not available. + * Will not reenter the filesystem. + * @return 0 for success, else an error code. + */ +int vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap); + +/*! + * @function vfs_get_notify_attributes + * @abstract Determine what attributes are required to send up a notification with vnode_notify(). + * @param vap Structure to initialize and activate required attributes on. + * @discussion Will not reenter the filesystem. + * @return 0 for success, nonzero for error (currently always succeeds). + */ +int vfs_get_notify_attributes(struct vnode_attr *vap); /* * Flags for the vnode_lookup and vnode_open */ -#define VNODE_LOOKUP_NOFOLLOW 0x01 -#define VNODE_LOOKUP_NOCROSSMOUNT 0x02 -#define VNODE_LOOKUP_CROSSMOUNTNOWAIT 0x04 +#define VNODE_LOOKUP_NOFOLLOW 0x01 +#define VNODE_LOOKUP_NOCROSSMOUNT 0x02 +#define VNODE_LOOKUP_CROSSMOUNTNOWAIT 0x04 /*! - @function vnode_lookup - @abstract Convert a path into a vnode. - @discussion This routine is a thin wrapper around xnu-internal lookup routines; if successful, - it returns with an iocount held on the resulting vnode which must be dropped with vnode_put(). - @param path Path to look up. - @param flags VNODE_LOOKUP_NOFOLLOW: do not follow symbolic links. VNODE_LOOKUP_NOCROSSMOUNT: do not cross mount points. - @return Results 0 for success or an error code. + * @function vnode_lookup + * @abstract Convert a path into a vnode. + * @discussion This routine is a thin wrapper around xnu-internal lookup routines; if successful, + * it returns with an iocount held on the resulting vnode which must be dropped with vnode_put(). + * @param path Path to look up. + * @param flags VNODE_LOOKUP_NOFOLLOW: do not follow symbolic links. VNODE_LOOKUP_NOCROSSMOUNT: do not cross mount points. + * @return Results 0 for success or an error code. */ errno_t vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx); /*! - @function vnode_open - @abstract Open a file identified by a path--roughly speaking an in-kernel open(2). - @discussion If vnode_open() succeeds, it returns with both an iocount and a usecount on the returned vnode. These must - be released eventually; the iocount should be released with vnode_put() as soon as any initial operations - on the vnode are over, whereas the usecount should be released via vnode_close(). - @param path Path to look up. - @param fmode e.g. O_NONBLOCK, O_APPEND; see bsd/sys/fcntl.h. - @param cmode Permissions with which to create file if it does not exist. - @param flags Same as vnode_lookup(). - @param vpp Destination for vnode. - @param ctx Context with which to authorize open/creation. - @return 0 for success or an error code. + * @function vnode_open + * @abstract Open a file identified by a path--roughly speaking an in-kernel open(2). + * @discussion If vnode_open() succeeds, it returns with both an iocount and a usecount on the returned vnode. These must + * be released eventually; the iocount should be released with vnode_put() as soon as any initial operations + * on the vnode are over, whereas the usecount should be released via vnode_close(). + * @param path Path to look up. + * @param fmode e.g. O_NONBLOCK, O_APPEND; see bsd/sys/fcntl.h. + * @param cmode Permissions with which to create file if it does not exist. + * @param flags Same as vnode_lookup(). + * @param vpp Destination for vnode. + * @param ctx Context with which to authorize open/creation. + * @return 0 for success or an error code. */ errno_t vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx); @@ -1849,111 +1854,111 @@ errno_t vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *v */ /*! - @function vnode_iterate - @abstract Perform an operation on (almost) all vnodes from a given mountpoint. - @param mp Mount whose vnodes to operate on. - @param flags - VNODE_RELOAD Mark inactive vnodes for recycle. - VNODE_WAIT - VNODE_WRITEABLE Only examine vnodes with writes in progress. - VNODE_WITHID No effect. - VNODE_NOLOCK_INTERNAL No effect. - VNODE_NODEAD No effect. - VNODE_NOSUSPEND No effect. - VNODE_ITERATE_ALL No effect. - VNODE_ITERATE_ACTIVE No effect. - VNODE_ITERATE_INACTIVE No effect. - - @param callout Function to call on each vnode. - @param arg Argument which will be passed to callout along with each vnode. - @return Zero for success, else an error code. Will return 0 immediately if there are no vnodes hooked into the mount. - @discussion Skips vnodes which are dead, in the process of reclaim, suspended, or of type VNON. - */ -int vnode_iterate(struct mount *mp, int flags, int (*callout)(struct vnode *, void *), void *arg); + * @function vnode_iterate + * @abstract Perform an operation on (almost) all vnodes from a given mountpoint. + * @param mp Mount whose vnodes to operate on. + * @param flags + * VNODE_RELOAD Mark inactive vnodes for recycle. + * VNODE_WAIT + * VNODE_WRITEABLE Only examine vnodes with writes in progress. + * VNODE_WITHID No effect. + * VNODE_NOLOCK_INTERNAL No effect. + * VNODE_NODEAD No effect. + * VNODE_NOSUSPEND No effect. + * VNODE_ITERATE_ALL No effect. + * VNODE_ITERATE_ACTIVE No effect. + * VNODE_ITERATE_INACTIVE No effect. + * + * @param callout Function to call on each vnode. + * @param arg Argument which will be passed to callout along with each vnode. + * @return Zero for success, else an error code. Will return 0 immediately if there are no vnodes hooked into the mount. + * @discussion Skips vnodes which are dead, in the process of reclaim, suspended, or of type VNON. + */ +int vnode_iterate(struct mount *mp, int flags, int (*callout)(struct vnode *, void *), void *arg); /* * flags passed into vnode_iterate */ -#define VNODE_RELOAD 0x01 -#define VNODE_WAIT 0x02 -#define VNODE_WRITEABLE 0x04 -#define VNODE_WITHID 0x08 -#define VNODE_NOLOCK_INTERNAL 0x10 -#define VNODE_NODEAD 0x20 -#define VNODE_NOSUSPEND 0x40 -#define VNODE_ITERATE_ALL 0x80 -#define VNODE_ITERATE_ACTIVE 0x100 -#define VNODE_ITERATE_INACTIVE 0x200 +#define VNODE_RELOAD 0x01 +#define VNODE_WAIT 0x02 +#define VNODE_WRITEABLE 0x04 +#define VNODE_WITHID 0x08 +#define VNODE_NOLOCK_INTERNAL 0x10 +#define VNODE_NODEAD 0x20 +#define VNODE_NOSUSPEND 0x40 +#define VNODE_ITERATE_ALL 0x80 +#define VNODE_ITERATE_ACTIVE 0x100 +#define VNODE_ITERATE_INACTIVE 0x200 #ifdef BSD_KERNEL_PRIVATE -#define VNODE_ALWAYS 0x400 -#define VNODE_DRAINO 0x800 +#define VNODE_ALWAYS 0x400 +#define VNODE_DRAINO 0x800 #endif /* BSD_KERNEL_PRIVATE */ /* * return values from callback */ -#define VNODE_RETURNED 0 /* done with vnode, reference can be dropped */ -#define VNODE_RETURNED_DONE 1 /* done with vnode, reference can be dropped, terminate iteration */ -#define VNODE_CLAIMED 2 /* don't drop reference */ -#define VNODE_CLAIMED_DONE 3 /* don't drop reference, terminate iteration */ +#define VNODE_RETURNED 0 /* done with vnode, reference can be dropped */ +#define VNODE_RETURNED_DONE 1 /* done with vnode, reference can be dropped, terminate iteration */ +#define VNODE_CLAIMED 2 /* don't drop reference */ +#define VNODE_CLAIMED_DONE 3 /* don't drop reference, terminate iteration */ /*! - @function vn_revoke - @abstract Invalidate all references to a vnode. - @discussion Reclaims the vnode, giving it deadfs vnops (though not halting operations which are already in progress). - Also reclaims all aliased vnodes (important for devices). People holding usecounts on the vnode, e.g. processes - with the file open, will find that all subsequent operations but closing the file fail. - @param vp The vnode to revoke. - @param flags Unused. - @param ctx Context against which to validate operation. - @return 0 always. + * @function vn_revoke + * @abstract Invalidate all references to a vnode. + * @discussion Reclaims the vnode, giving it deadfs vnops (though not halting operations which are already in progress). + * Also reclaims all aliased vnodes (important for devices). People holding usecounts on the vnode, e.g. processes + * with the file open, will find that all subsequent operations but closing the file fail. + * @param vp The vnode to revoke. + * @param flags Unused. + * @param ctx Context against which to validate operation. + * @return 0 always. */ -int vn_revoke(vnode_t vp, int flags, vfs_context_t ctx); +int vn_revoke(vnode_t vp, int flags, vfs_context_t ctx); /* namecache function prototypes */ /*! - @function cache_lookup - @abstract Check for a filename in a directory using the VFS name cache. - @discussion cache_lookup() will flush negative cache entries and return 0 if the operation of the cn_nameiop is CREATE or RENAME. - Often used from the filesystem during a lookup vnop. The filesystem will be called to if there is a negative cache entry for a file, - so it can make sense to initially check for negative entries (and possibly lush them). - @param dvp Directory in which lookup is occurring. - @param vpp Destination for vnode pointer. - @param cnp Various data about lookup, e.g. filename and intended operation. - @return ENOENT: the filesystem has previously added a negative entry with cache_enter() to indicate that there is no - file of the given name in "dp." -1: successfully found a cached vnode (vpp is set). 0: No data in the cache, or operation is CRETE/RENAME. + * @function cache_lookup + * @abstract Check for a filename in a directory using the VFS name cache. + * @discussion cache_lookup() will flush negative cache entries and return 0 if the operation of the cn_nameiop is CREATE or RENAME. + * Often used from the filesystem during a lookup vnop. The filesystem will be called to if there is a negative cache entry for a file, + * so it can make sense to initially check for negative entries (and possibly lush them). + * @param dvp Directory in which lookup is occurring. + * @param vpp Destination for vnode pointer. + * @param cnp Various data about lookup, e.g. filename and intended operation. + * @return ENOENT: the filesystem has previously added a negative entry with cache_enter() to indicate that there is no + * file of the given name in "dp." -1: successfully found a cached vnode (vpp is set). 0: No data in the cache, or operation is CRETE/RENAME. */ -int cache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp); +int cache_lookup(vnode_t dvp, vnode_t *vpp, struct componentname *cnp); /*! - @function cache_enter - @abstract Add a (name,vnode) entry to the VFS namecache. - @discussion Generally used to add a cache entry after a successful filesystem-level lookup or to add a - negative entry after one which did not find its target. - @param dvp Directory in which file lives. - @param vp File to add to cache. A non-NULL vp is stored for rapid access; a NULL vp indicates - that there is no such file in the directory and speeds future failed lookups. - @param cnp Various data about lookup, e.g. filename and intended operation. + * @function cache_enter + * @abstract Add a (name,vnode) entry to the VFS namecache. + * @discussion Generally used to add a cache entry after a successful filesystem-level lookup or to add a + * negative entry after one which did not find its target. + * @param dvp Directory in which file lives. + * @param vp File to add to cache. A non-NULL vp is stored for rapid access; a NULL vp indicates + * that there is no such file in the directory and speeds future failed lookups. + * @param cnp Various data about lookup, e.g. filename and intended operation. */ -void cache_enter(vnode_t dvp, vnode_t vp, struct componentname *cnp); +void cache_enter(vnode_t dvp, vnode_t vp, struct componentname *cnp); /*! - @function cache_purge - @abstract Remove all data relating to a vnode from the namecache. - @discussion Will flush all hardlinks to the vnode as well as all children (should any exist). Logical - to use when cached data about a vnode becomes invalid, for instance in an unlink. - @param vp The vnode to purge. + * @function cache_purge + * @abstract Remove all data relating to a vnode from the namecache. + * @discussion Will flush all hardlinks to the vnode as well as all children (should any exist). Logical + * to use when cached data about a vnode becomes invalid, for instance in an unlink. + * @param vp The vnode to purge. */ -void cache_purge(vnode_t vp); +void cache_purge(vnode_t vp); /*! - @function cache_purge_negatives - @abstract Remove all negative cache entries which are children of a given vnode. - @discussion Appropriate to use when negative cache information for a directory could have - become invalid, e.g. after file creation. - @param vp The vnode whose negative children to purge. + * @function cache_purge_negatives + * @abstract Remove all negative cache entries which are children of a given vnode. + * @discussion Appropriate to use when negative cache information for a directory could have + * become invalid, e.g. after file creation. + * @param vp The vnode whose negative children to purge. */ -void cache_purge_negatives(vnode_t vp); +void cache_purge_negatives(vnode_t vp); /* @@ -1962,194 +1967,194 @@ void cache_purge_negatives(vnode_t vp); * There are no flags for now but maybe someday. */ /*! - @function vfs_addname - @abstract Deprecated - @discussion vnode_update_identity() and vnode_create() make vfs_addname() unnecessary for kexts. + * @function vfs_addname + * @abstract Deprecated + * @discussion vnode_update_identity() and vnode_create() make vfs_addname() unnecessary for kexts. */ const char *vfs_addname(const char *name, uint32_t len, uint32_t nc_hash, uint32_t flags); /*! - @function vfs_removename - @abstract Deprecated - @discussion vnode_update_identity() and vnode_create() make vfs_addname() unnecessary for kexts. + * @function vfs_removename + * @abstract Deprecated + * @discussion vnode_update_identity() and vnode_create() make vfs_addname() unnecessary for kexts. */ int vfs_removename(const char *name); /*! - @function vcount - @abstract Count total references to a given file, disregarding "kusecount" (event listener, as with O_EVTONLY) references. - @discussion For a regular file, just return (usecount-kusecount); for device files, return the sum over all - vnodes 'v' which reference that device of (usecount(v) - kusecount(v)). Note that this is merely a snapshot and could be - invalid by the time the caller checks the result. - @param vp The vnode whose references to count. - @return Count of references. + * @function vcount + * @abstract Count total references to a given file, disregarding "kusecount" (event listener, as with O_EVTONLY) references. + * @discussion For a regular file, just return (usecount-kusecount); for device files, return the sum over all + * vnodes 'v' which reference that device of (usecount(v) - kusecount(v)). Note that this is merely a snapshot and could be + * invalid by the time the caller checks the result. + * @param vp The vnode whose references to count. + * @return Count of references. */ -int vcount(vnode_t vp); +int vcount(vnode_t vp); /*! - @function vn_path_package_check - @abstract Figure out if a path corresponds to a Mac OS X package. - @discussion Determines if the extension on a path is a known OS X extension type. - @param vp Unused. - @param path Path to check. - @param pathlen Size of path buffer. - @param component Set to index of start of last path component if the path is found to be a package. Set to -1 if - the path is not a known package type. - @return 0 unless some parameter was invalid, in which case EINVAL is returned. Determine package-ness by checking - what *component is set to. + * @function vn_path_package_check + * @abstract Figure out if a path corresponds to a Mac OS X package. + * @discussion Determines if the extension on a path is a known OS X extension type. + * @param vp Unused. + * @param path Path to check. + * @param pathlen Size of path buffer. + * @param component Set to index of start of last path component if the path is found to be a package. Set to -1 if + * the path is not a known package type. + * @return 0 unless some parameter was invalid, in which case EINVAL is returned. Determine package-ness by checking + * what *component is set to. */ int vn_path_package_check(vnode_t vp, char *path, int pathlen, int *component); #ifdef KERNEL_PRIVATE /*! - @function vn_searchfs_inappropriate_name - @abstract Figure out if the component is inappropriate for a SearchFS query. - @param name component to check - @param len length of component. - @return 0 if no match, 1 if inappropriate. + * @function vn_searchfs_inappropriate_name + * @abstract Figure out if the component is inappropriate for a SearchFS query. + * @param name component to check + * @param len length of component. + * @return 0 if no match, 1 if inappropriate. */ -int vn_searchfs_inappropriate_name(const char *name, int len); -#endif +int vn_searchfs_inappropriate_name(const char *name, int len); +#endif /*! - @function vn_rdwr - @abstract Read from or write to a file. - @discussion vn_rdwr() abstracts the details of constructing a uio and picking a vnode operation to allow - simple in-kernel file I/O. - @param rw UIO_READ for a read, UIO_WRITE for a write. - @param vp The vnode on which to perform I/O. - @param base Start of buffer into which to read or from which to write data. - @param len Length of buffer. - @param offset Offset within the file at which to start I/O. - @param segflg What kind of address "base" is. See uio_seg definition in sys/uio.h. UIO_SYSSPACE for kernelspace, UIO_USERSPACE for userspace. - UIO_USERSPACE32 and UIO_USERSPACE64 are in general preferred, but vn_rdwr will make sure that has the correct address sizes. - @param ioflg Defined in vnode.h, e.g. IO_NOAUTH, IO_NOCACHE. - @param cred Credential to pass down to filesystem for authentication. - @param aresid Destination for amount of requested I/O which was not completed, as with uio_resid(). - @param p Process requesting I/O. - @return 0 for success; errors from filesystem, and EIO if did not perform all requested I/O and the "aresid" parameter is NULL. + * @function vn_rdwr + * @abstract Read from or write to a file. + * @discussion vn_rdwr() abstracts the details of constructing a uio and picking a vnode operation to allow + * simple in-kernel file I/O. + * @param rw UIO_READ for a read, UIO_WRITE for a write. + * @param vp The vnode on which to perform I/O. + * @param base Start of buffer into which to read or from which to write data. + * @param len Length of buffer. + * @param offset Offset within the file at which to start I/O. + * @param segflg What kind of address "base" is. See uio_seg definition in sys/uio.h. UIO_SYSSPACE for kernelspace, UIO_USERSPACE for userspace. + * UIO_USERSPACE32 and UIO_USERSPACE64 are in general preferred, but vn_rdwr will make sure that has the correct address sizes. + * @param ioflg Defined in vnode.h, e.g. IO_NOAUTH, IO_NOCACHE. + * @param cred Credential to pass down to filesystem for authentication. + * @param aresid Destination for amount of requested I/O which was not completed, as with uio_resid(). + * @param p Process requesting I/O. + * @return 0 for success; errors from filesystem, and EIO if did not perform all requested I/O and the "aresid" parameter is NULL. */ -int vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset, enum uio_seg segflg, int ioflg, kauth_cred_t cred, int *aresid, proc_t p); +int vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, int len, off_t offset, enum uio_seg segflg, int ioflg, kauth_cred_t cred, int *aresid, proc_t p); /*! - @function vnode_getname - @abstract Get the name of a vnode from the VFS namecache. - @discussion Not all vnodes have names, and vnode names can change (notably, hardlinks). Use this routine at your own risk. - The string is returned with a refcount incremented in the cache; callers must call vnode_putname() to release that reference. - @param vp The vnode whose name to grab. - @return The name, or NULL if unavailable. + * @function vnode_getname + * @abstract Get the name of a vnode from the VFS namecache. + * @discussion Not all vnodes have names, and vnode names can change (notably, hardlinks). Use this routine at your own risk. + * The string is returned with a refcount incremented in the cache; callers must call vnode_putname() to release that reference. + * @param vp The vnode whose name to grab. + * @return The name, or NULL if unavailable. */ -const char *vnode_getname(vnode_t vp); +const char *vnode_getname(vnode_t vp); /*! - @function vnode_putname - @abstract Release a reference on a name from the VFS cache. - @discussion Should be called on a string obtained with vnode_getname(). - @param name String to release. + * @function vnode_putname + * @abstract Release a reference on a name from the VFS cache. + * @discussion Should be called on a string obtained with vnode_getname(). + * @param name String to release. */ -void vnode_putname(const char *name); +void vnode_putname(const char *name); /*! - @function vnode_getparent - @abstract Get an iocount on the parent of a vnode. - @discussion A vnode's parent may change over time or be reclaimed, so vnode_getparent() may return different - results at different times (e.g. a multiple-hardlink file). The parent is returned with an iocount which must - subsequently be dropped with vnode_put(). - @param vp The vnode whose parent to grab. - @return Parent if available, else NULL. + * @function vnode_getparent + * @abstract Get an iocount on the parent of a vnode. + * @discussion A vnode's parent may change over time or be reclaimed, so vnode_getparent() may return different + * results at different times (e.g. a multiple-hardlink file). The parent is returned with an iocount which must + * subsequently be dropped with vnode_put(). + * @param vp The vnode whose parent to grab. + * @return Parent if available, else NULL. */ -vnode_t vnode_getparent(vnode_t vp); +vnode_t vnode_getparent(vnode_t vp); /*! - @function vnode_setdirty - @abstract Mark the vnode as having data or metadata that needs to be written out during reclaim - @discussion The vnode should be marked as dirty anytime a file system defers flushing of data or meta-data associated with it. - @param vp the vnode to mark as dirty - @return 0 if successful else an error code. + * @function vnode_setdirty + * @abstract Mark the vnode as having data or metadata that needs to be written out during reclaim + * @discussion The vnode should be marked as dirty anytime a file system defers flushing of data or meta-data associated with it. + * @param vp the vnode to mark as dirty + * @return 0 if successful else an error code. */ -int vnode_setdirty(vnode_t vp); +int vnode_setdirty(vnode_t vp); /*! - @function vnode_cleardirty - @abstract Mark the vnode as clean i.e. all its data or metadata has been flushed - @discussion The vnode should be marked as clean whenever the file system is done flushing data or meta-data associated with it. - @param vp the vnode to clear as being dirty - @return 0 if successful else an error code. + * @function vnode_cleardirty + * @abstract Mark the vnode as clean i.e. all its data or metadata has been flushed + * @discussion The vnode should be marked as clean whenever the file system is done flushing data or meta-data associated with it. + * @param vp the vnode to clear as being dirty + * @return 0 if successful else an error code. */ -int vnode_cleardirty(vnode_t vp); +int vnode_cleardirty(vnode_t vp); /*! - @function vnode_isdirty - @abstract Determine if a vnode is marked dirty. - @discussion The vnode should be marked as clean whenever the file system is done flushing data or meta-data associated with it. - @param vp the vnode to test. - @return Non-zero if the vnode is dirty, 0 otherwise. + * @function vnode_isdirty + * @abstract Determine if a vnode is marked dirty. + * @discussion The vnode should be marked as clean whenever the file system is done flushing data or meta-data associated with it. + * @param vp the vnode to test. + * @return Non-zero if the vnode is dirty, 0 otherwise. */ -int vnode_isdirty(vnode_t vp); +int vnode_isdirty(vnode_t vp); #ifdef KERNEL_PRIVATE -/*! - @function vnode_lookup_continue_needed - @abstract Determine whether vnode needs additional processing in VFS before being opened. - @discussion If result is zero, filesystem can open this vnode. If result is nonzero, - additional processing is needed in VFS (e.g. symlink, mountpoint). Nonzero results should - be passed up to VFS. - @param vp Vnode to consider opening (found by filesystem). - @param cnp Componentname as passed to filesystem from VFS. - @result 0 to indicate that a vnode can be opened, or an error that should be passed up to VFS. +/*! + * @function vnode_lookup_continue_needed + * @abstract Determine whether vnode needs additional processing in VFS before being opened. + * @discussion If result is zero, filesystem can open this vnode. If result is nonzero, + * additional processing is needed in VFS (e.g. symlink, mountpoint). Nonzero results should + * be passed up to VFS. + * @param vp Vnode to consider opening (found by filesystem). + * @param cnp Componentname as passed to filesystem from VFS. + * @result 0 to indicate that a vnode can be opened, or an error that should be passed up to VFS. */ int vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp); /*! - @function vnode_istty - @abstract Determine if the given vnode represents a tty device. - @param vp Vnode to examine. - @result Non-zero to indicate that the vnode represents a tty device. Zero otherwise. + * @function vnode_istty + * @abstract Determine if the given vnode represents a tty device. + * @param vp Vnode to examine. + * @result Non-zero to indicate that the vnode represents a tty device. Zero otherwise. */ int vnode_istty(vnode_t vp); /*! - @function bdevvp - @abstract create a vnode for a given dev_t - @result non-zero to indicate failure, vnode provided in *vpp arg - */ -int bdevvp (dev_t dev, struct vnode **vpp); + * @function bdevvp + * @abstract create a vnode for a given dev_t + * @result non-zero to indicate failure, vnode provided in *vpp arg + */ +int bdevvp(dev_t dev, struct vnode **vpp); /* - @function vnode_getfromfd - @abstract get a vnode from a file descriptor - @result non-zero to indicate failure, vnode provided in *vpp arg + * @function vnode_getfromfd + * @abstract get a vnode from a file descriptor + * @result non-zero to indicate failure, vnode provided in *vpp arg */ -int vnode_getfromfd (vfs_context_t ctx, int fd, vnode_t *vpp); +int vnode_getfromfd(vfs_context_t ctx, int fd, vnode_t *vpp); #endif /* KERNEL_PRIVATE */ #ifdef BSD_KERNEL_PRIVATE /* Not in export list so can be private */ struct stat; -int vn_stat(struct vnode *vp, void * sb, kauth_filesec_t *xsec, int isstat64, - vfs_context_t ctx); -int vn_stat_noauth(struct vnode *vp, void * sb, kauth_filesec_t *xsec, int isstat64, - vfs_context_t ctx, struct ucred *file_cred); -int vaccess(mode_t file_mode, uid_t uid, gid_t gid, - mode_t acc_mode, kauth_cred_t cred); -int check_mountedon(dev_t dev, enum vtype type, int *errorp); +int vn_stat(struct vnode *vp, void * sb, kauth_filesec_t *xsec, int isstat64, + vfs_context_t ctx); +int vn_stat_noauth(struct vnode *vp, void * sb, kauth_filesec_t *xsec, int isstat64, + vfs_context_t ctx, struct ucred *file_cred); +int vaccess(mode_t file_mode, uid_t uid, gid_t gid, + mode_t acc_mode, kauth_cred_t cred); +int check_mountedon(dev_t dev, enum vtype type, int *errorp); int vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash); -void vnode_reclaim(vnode_t); -vnode_t current_rootdir(void); -vnode_t current_workingdir(void); -void *vnode_vfsfsprivate(vnode_t); +void vnode_reclaim(vnode_t); +vnode_t current_rootdir(void); +vnode_t current_workingdir(void); +void *vnode_vfsfsprivate(vnode_t); struct vfsstatfs *vnode_vfsstatfs(vnode_t); uint32_t vnode_vfsvisflags(vnode_t); uint32_t vnode_vfscmdflags(vnode_t); -int vnode_is_openevt(vnode_t); -void vnode_set_openevt(vnode_t); -void vnode_clear_openevt(vnode_t); -int vnode_isstandard(vnode_t); -int vnode_makeimode(int, int); -enum vtype vnode_iftovt(int); -int vnode_vttoif(enum vtype); -int vnode_isshadow(vnode_t); +int vnode_is_openevt(vnode_t); +void vnode_set_openevt(vnode_t); +void vnode_clear_openevt(vnode_t); +int vnode_isstandard(vnode_t); +int vnode_makeimode(int, int); +enum vtype vnode_iftovt(int); +int vnode_vttoif(enum vtype); +int vnode_isshadow(vnode_t); boolean_t vnode_on_reliable_media(vnode_t); /* * Indicate that a file has multiple hard links. VFS will always call @@ -2168,28 +2173,28 @@ vnode_t vnode_mountdevvp(vnode_t); #ifdef KERNEL_PRIVATE /*! - @function vnode_getname_printable - @abstract Get a non-null printable name of a vnode. - @Used to make sure a printable name is returned for all vnodes. If a name exists or can be artificially created, the routine creates a new entry in the VFS namecache. Otherwise, the function returns an artificially created vnode name which is safer and easier to use. vnode_putname_printable() should be used to release names obtained by this routine. - @param vp The vnode whose name to grab. - @return The printable name. + * @function vnode_getname_printable + * @abstract Get a non-null printable name of a vnode. + * @Used to make sure a printable name is returned for all vnodes. If a name exists or can be artificially created, the routine creates a new entry in the VFS namecache. Otherwise, the function returns an artificially created vnode name which is safer and easier to use. vnode_putname_printable() should be used to release names obtained by this routine. + * @param vp The vnode whose name to grab. + * @return The printable name. */ const char *vnode_getname_printable(vnode_t vp); /*! - @function vnode_putname_printable - @abstract Release a reference on a name from the VFS cache if it was added by the matching vnode_getname_printable() call. - @param name String to release. + * @function vnode_putname_printable + * @abstract Release a reference on a name from the VFS cache if it was added by the matching vnode_getname_printable() call. + * @param name String to release. */ void vnode_putname_printable(const char *name); #endif // KERNEL_PRIVATE /*! - @function vnode_getbackingvnode - @abstract If the input vnode is a NULLFS mirrored vnode, then return the vnode it wraps. - @Used to un-mirror files, primarily for security purposes. On success, out_vp is always set to a vp with an iocount. The caller must release the iocount. - @param in_vp The vnode being asked about - @param out_vpp A pointer to the output vnode, unchanged on error - @return 0 on Success, ENOENT if in_vp doesn't mirror anything, EINVAL on parameter errors. + * @function vnode_getbackingvnode + * @abstract If the input vnode is a NULLFS mirrored vnode, then return the vnode it wraps. + * @Used to un-mirror files, primarily for security purposes. On success, out_vp is always set to a vp with an iocount. The caller must release the iocount. + * @param in_vp The vnode being asked about + * @param out_vpp A pointer to the output vnode, unchanged on error + * @return 0 on Success, ENOENT if in_vp doesn't mirror anything, EINVAL on parameter errors. */ int vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp); @@ -2198,31 +2203,31 @@ int vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp); */ /*! - @function vfs_setup_vattr_from_attrlist - @abstract Setup a vnode_attr structure given an attrlist structure. - @Used by a VNOP_GETATTRLISTBULK implementation to setup a vnode_attr structure from a attribute list. It also returns the fixed size of the attribute buffer required. - @warning this forces new fork attr behavior, i.e. reinterpret forkattr bits as ATTR_CMNEXT - @param alp Pointer to attribute list structure. - @param vap Pointer to vnode_attr structure. - @param obj_vtype Type of object - If VNON is passed, then the type is ignored and common, file and dir attrs are used to initialise the vattrs. If set to VDIR, only common and directory attributes are used. For all other types, only common and file attrbutes are used. - @param attr_fixed_sizep Returns the fixed length required in the attrbute buffer for the object. NULL should be passed if it is not required. - @param ctx vfs context of caller. - @return error. + * @function vfs_setup_vattr_from_attrlist + * @abstract Setup a vnode_attr structure given an attrlist structure. + * @Used by a VNOP_GETATTRLISTBULK implementation to setup a vnode_attr structure from a attribute list. It also returns the fixed size of the attribute buffer required. + * @warning this forces new fork attr behavior, i.e. reinterpret forkattr bits as ATTR_CMNEXT + * @param alp Pointer to attribute list structure. + * @param vap Pointer to vnode_attr structure. + * @param obj_vtype Type of object - If VNON is passed, then the type is ignored and common, file and dir attrs are used to initialise the vattrs. If set to VDIR, only common and directory attributes are used. For all other types, only common and file attrbutes are used. + * @param attr_fixed_sizep Returns the fixed length required in the attrbute buffer for the object. NULL should be passed if it is not required. + * @param ctx vfs context of caller. + * @return error. */ errno_t vfs_setup_vattr_from_attrlist(struct attrlist *alp, struct vnode_attr *vap, enum vtype obj_vtype, ssize_t *attr_fixed_sizep, vfs_context_t ctx); /*! - @function vfs_attr_pack - @abstract Pack a vnode_attr structure into a buffer in the same format as getattrlist(2). - @Used by a VNOP_GETATTRLISTBULK implementation to pack data provided into a vnode_attr structure into a buffer the way getattrlist(2) does. - @param vp If available, the vnode for which the attributes are being given, NULL if vnode is not available (which will usually be the case for a VNOP_GETATTRLISTBULK implementation. - @param uio - a uio_t initialised with one iovec.. - @param alp - Pointer to an attrlist structure. - @param options - options for call (same as options for getattrlistbulk(2)). - @param vap Pointer to a filled in vnode_attr structure. Data from the vnode_attr structure will be used to copy and lay out the data in the required format for getatrlistbulk(2) by this function. - @param fndesc Currently unused - @param ctx vfs context of caller. - @return error. + * @function vfs_attr_pack + * @abstract Pack a vnode_attr structure into a buffer in the same format as getattrlist(2). + * @Used by a VNOP_GETATTRLISTBULK implementation to pack data provided into a vnode_attr structure into a buffer the way getattrlist(2) does. + * @param vp If available, the vnode for which the attributes are being given, NULL if vnode is not available (which will usually be the case for a VNOP_GETATTRLISTBULK implementation. + * @param uio - a uio_t initialised with one iovec.. + * @param alp - Pointer to an attrlist structure. + * @param options - options for call (same as options for getattrlistbulk(2)). + * @param vap Pointer to a filled in vnode_attr structure. Data from the vnode_attr structure will be used to copy and lay out the data in the required format for getatrlistbulk(2) by this function. + * @param fndesc Currently unused + * @param ctx vfs context of caller. + * @return error. */ errno_t vfs_attr_pack(vnode_t vp, uio_t uio, struct attrlist *alp, uint64_t options, struct vnode_attr *vap, void *fndesc, vfs_context_t ctx); @@ -2241,9 +2246,9 @@ int vnode_usecount(vnode_t vp); int vnode_iocount(vnode_t vp); void vnode_rele_ext(vnode_t, int, int); int is_package_name(const char *name, int len); -int vfs_context_issuser(vfs_context_t); +int vfs_context_issuser(vfs_context_t); int vfs_context_iskernel(vfs_context_t); -vfs_context_t vfs_context_kernel(void); /* get from 1st kernel thread */ +vfs_context_t vfs_context_kernel(void); /* get from 1st kernel thread */ vnode_t vfs_context_cwd(vfs_context_t); int vnode_isnoflush(vnode_t); void vnode_setnoflush(vnode_t); @@ -2254,7 +2259,7 @@ void vnode_clearnoflush(vnode_t); #define BUILDPATH_CHECK_MOVED 0x4 /* Return EAGAIN if the parent hierarchy is modified */ #define BUILDPATH_VOLUME_RELATIVE 0x8 /* Return path relative to the nearest mount point */ -int build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx); +int build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx); int vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx); diff --git a/bsd/sys/vnode_if.h b/bsd/sys/vnode_if.h index 75f5cdd7e..1b8cc8af3 100644 --- a/bsd/sys/vnode_if.h +++ b/bsd/sys/vnode_if.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -191,16 +191,16 @@ struct vnop_lookup_args { }; /*! - @function VNOP_LOOKUP - @abstract Call down to a filesystem to look for a directory entry by name. - @discussion VNOP_LOOKUP is the key pathway through which VFS asks a filesystem to find a file. The vnode - should be returned with an iocount to be dropped by the caller. A VNOP_LOOKUP() calldown can come without - a preceding VNOP_OPEN(). - @param dvp Directory in which to look up file. - @param vpp Destination for found vnode. - @param cnp Structure describing filename to find, reason for lookup, and various other data. - @param ctx Context against which to authenticate lookup request. - @return 0 for success or a filesystem-specific error. + * @function VNOP_LOOKUP + * @abstract Call down to a filesystem to look for a directory entry by name. + * @discussion VNOP_LOOKUP is the key pathway through which VFS asks a filesystem to find a file. The vnode + * should be returned with an iocount to be dropped by the caller. A VNOP_LOOKUP() calldown can come without + * a preceding VNOP_OPEN(). + * @param dvp Directory in which to look up file. + * @param vpp Destination for found vnode. + * @param cnp Structure describing filename to find, reason for lookup, and various other data. + * @param ctx Context against which to authenticate lookup request. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_LOOKUP(vnode_t, vnode_t *, struct componentname *, vfs_context_t); @@ -216,17 +216,17 @@ struct vnop_create_args { }; /*! - @function VNOP_CREATE - @abstract Call down to a filesystem to create a regular file (VREG). - @discussion If file creation succeeds, "vpp" should be returned with an iocount to be dropped by the caller. - A VNOP_CREATE() calldown can come without a preceding VNOP_OPEN(). - @param dvp Directory in which to create file. - @param vpp Destination for vnode for newly created file. - @param cnp Description of filename to create. - @param vap File creation properties, as seen in vnode_getattr(). Manipulated with VATTR_ISACTIVE, VATTR_RETURN, - VATTR_SET_SUPPORTED, and so forth. - @param ctx Context against which to authenticate file creation. - @return 0 for success or a filesystem-specific error. + * @function VNOP_CREATE + * @abstract Call down to a filesystem to create a regular file (VREG). + * @discussion If file creation succeeds, "vpp" should be returned with an iocount to be dropped by the caller. + * A VNOP_CREATE() calldown can come without a preceding VNOP_OPEN(). + * @param dvp Directory in which to create file. + * @param vpp Destination for vnode for newly created file. + * @param cnp Description of filename to create. + * @param vap File creation properties, as seen in vnode_getattr(). Manipulated with VATTR_ISACTIVE, VATTR_RETURN, + * VATTR_SET_SUPPORTED, and so forth. + * @param ctx Context against which to authenticate file creation. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_CREATE(vnode_t, vnode_t *, struct componentname *, struct vnode_attr *, vfs_context_t); @@ -241,45 +241,45 @@ struct vnop_whiteout_args { }; /*! - @function VNOP_WHITEOUT - @abstract Obsolete - no longer supported. - @discussion Whiteouts are used to support the union filesystem, whereby one filesystem is mounted "transparently" - on top of another. A whiteout in the upper layer of a union mount is a "deletion" of a file in the lower layer; - lookups will catch the whiteout and fail, setting ISWHITEOUT in the componentname structure, even if an underlying - file of the same name exists. The whiteout vnop is used for creation, deletion, and checking whether a directory - supports whiteouts (see flags). - also support the LOOKUP flag, which is used to test whether a directory supports whiteouts. - @param dvp Directory in which to create. - @param cnp Name information for whiteout. - @param flags CREATE: create a whiteout. LOOKUP: check whether a directory supports whiteouts, DELETE: remove a whiteout. - @param ctx Context against which to authenticate whiteout creation. - @return 0 for success or a filesystem-specific error. Returning 0 for LOOKUP indicates that a directory does support whiteouts. + * @function VNOP_WHITEOUT + * @abstract Obsolete - no longer supported. + * @discussion Whiteouts are used to support the union filesystem, whereby one filesystem is mounted "transparently" + * on top of another. A whiteout in the upper layer of a union mount is a "deletion" of a file in the lower layer; + * lookups will catch the whiteout and fail, setting ISWHITEOUT in the componentname structure, even if an underlying + * file of the same name exists. The whiteout vnop is used for creation, deletion, and checking whether a directory + * supports whiteouts (see flags). + * also support the LOOKUP flag, which is used to test whether a directory supports whiteouts. + * @param dvp Directory in which to create. + * @param cnp Name information for whiteout. + * @param flags CREATE: create a whiteout. LOOKUP: check whether a directory supports whiteouts, DELETE: remove a whiteout. + * @param ctx Context against which to authenticate whiteout creation. + * @return 0 for success or a filesystem-specific error. Returning 0 for LOOKUP indicates that a directory does support whiteouts. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_WHITEOUT(vnode_t, struct componentname *, int, vfs_context_t); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_mknod_args { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; + struct vnodeop_desc *a_desc; + vnode_t a_dvp; + vnode_t *a_vpp; + struct componentname *a_cnp; + struct vnode_attr *a_vap; + vfs_context_t a_context; }; /*! - @function VNOP_MKNOD - @abstract Call down to a filesystem to create a special file. - @discussion The mknod vnop is used to create character and block device files, named pipe (FIFO) files, and named sockets. - The newly created file should be returned with an iocount which will be dropped by the caller. A VNOP_MKNOD() call - can come down without a preceding VNOP_OPEN(). - @param dvp Directory in which to create the special file. - @param vpp Destination for newly created vnode. - @param cnp Name information for new file. - @param vap Attributes for new file, including type. - @param ctx Context against which to authenticate node creation. - @return 0 for success or a filesystem-specific error. + * @function VNOP_MKNOD + * @abstract Call down to a filesystem to create a special file. + * @discussion The mknod vnop is used to create character and block device files, named pipe (FIFO) files, and named sockets. + * The newly created file should be returned with an iocount which will be dropped by the caller. A VNOP_MKNOD() call + * can come down without a preceding VNOP_OPEN(). + * @param dvp Directory in which to create the special file. + * @param vpp Destination for newly created vnode. + * @param cnp Name information for new file. + * @param vap Attributes for new file, including type. + * @param ctx Context against which to authenticate node creation. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_MKNOD(vnode_t, vnode_t *, struct componentname *, struct vnode_attr *, vfs_context_t); @@ -296,29 +296,29 @@ struct vnop_open_args { struct vnop_compound_open_args { struct vnodeop_desc *a_desc; - vnode_t a_dvp; /* Directory in which to open/create */ - vnode_t *a_vpp; /* Resulting vnode */ - int a_fmode; /* Open mode */ - struct componentname *a_cnp; /* Path to look up */ - struct vnode_attr *a_vap; /* Attributes with which to create, if appropriate */ - uint32_t a_flags; /* VNOP-control flags */ - uint32_t *a_status; /* Information about results */ - - vfs_context_t a_context; /* Authorization context */ - - int (*a_open_create_authorizer)( /* Authorizer for create case */ - vnode_t dvp, /* Directory in which to create */ - struct componentname *cnp, /* As passed to VNOP */ - struct vnode_attr *vap, /* As passed to VNOP */ - vfs_context_t ctx, /* Context */ - void *reserved); /* Who knows */ - - int (*a_open_existing_authorizer)( /* Authorizer for preexisting case */ - vnode_t vp, /* vp to open */ - struct componentname *cnp, /* Lookup state */ - int fmode, /* As passed to VNOP */ - vfs_context_t ctx, /* Context */ - void *reserved); /* Who knows */ + vnode_t a_dvp; /* Directory in which to open/create */ + vnode_t *a_vpp; /* Resulting vnode */ + int a_fmode; /* Open mode */ + struct componentname *a_cnp; /* Path to look up */ + struct vnode_attr *a_vap; /* Attributes with which to create, if appropriate */ + uint32_t a_flags; /* VNOP-control flags */ + uint32_t *a_status; /* Information about results */ + + vfs_context_t a_context; /* Authorization context */ + + int (*a_open_create_authorizer)( /* Authorizer for create case */ + vnode_t dvp, /* Directory in which to create */ + struct componentname *cnp, /* As passed to VNOP */ + struct vnode_attr *vap, /* As passed to VNOP */ + vfs_context_t ctx, /* Context */ + void *reserved); /* Who knows */ + + int (*a_open_existing_authorizer)( /* Authorizer for preexisting case */ + vnode_t vp, /* vp to open */ + struct componentname *cnp, /* Lookup state */ + int fmode, /* As passed to VNOP */ + vfs_context_t ctx, /* Context */ + void *reserved); /* Who knows */ void *a_reserved; }; @@ -328,15 +328,15 @@ struct vnop_compound_open_args { #endif /* KERNEL_PRIVATE */ /*! - @function VNOP_OPEN - @abstract Call down to a filesystem to open a file. - @discussion The open vnop gives a filesystem a chance to initialize a file for - operations like reading, writing, and ioctls. VFS promises to send down exactly one VNOP_CLOSE() - for each VNOP_OPEN(). - @param vp File to open. - @param mode FREAD and/or FWRITE. - @param ctx Context against which to authenticate open. - @return 0 for success or a filesystem-specific error. + * @function VNOP_OPEN + * @abstract Call down to a filesystem to open a file. + * @discussion The open vnop gives a filesystem a chance to initialize a file for + * operations like reading, writing, and ioctls. VFS promises to send down exactly one VNOP_CLOSE() + * for each VNOP_OPEN(). + * @param vp File to open. + * @param mode FREAD and/or FWRITE. + * @param ctx Context against which to authenticate open. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_OPEN(vnode_t, int, vfs_context_t); @@ -355,15 +355,15 @@ struct vnop_close_args { }; /*! - @function VNOP_CLOSE - @abstract Call down to a filesystem to close a file. - @discussion The close vnop gives a filesystem a chance to release state set up - by a VNOP_OPEN(). VFS promises to send down exactly one VNOP_CLOSE() for each VNOP_OPEN(). - @param vp File to close. - @param fflag FREAD and/or FWRITE; in the case of a file opened with open(2), fflag corresponds - to how the file was opened. - @param ctx Context against which to authenticate close. - @return 0 for success or a filesystem-specific error. + * @function VNOP_CLOSE + * @abstract Call down to a filesystem to close a file. + * @discussion The close vnop gives a filesystem a chance to release state set up + * by a VNOP_OPEN(). VFS promises to send down exactly one VNOP_CLOSE() for each VNOP_OPEN(). + * @param vp File to close. + * @param fflag FREAD and/or FWRITE; in the case of a file opened with open(2), fflag corresponds + * to how the file was opened. + * @param ctx Context against which to authenticate close. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_CLOSE(vnode_t, int, vfs_context_t); @@ -377,15 +377,15 @@ struct vnop_access_args { }; /*! - @function VNOP_ACCESS - @abstract Call down to a filesystem to see if a kauth-style operation is permitted. - @discussion VNOP_ACCESS is currently only called on filesystems which mark themselves - as doing their authentication remotely (vfs_setauthopaque(), vfs_authopaque()). A VNOP_ACCESS() - calldown may come without any preceding VNOP_OPEN(). - @param vp File to authorize action for. - @param action kauth-style action to be checked for permissions, e.g. KAUTH_VNODE_DELETE. - @param ctx Context against which to authenticate action. - @return 0 for success or a filesystem-specific error. + * @function VNOP_ACCESS + * @abstract Call down to a filesystem to see if a kauth-style operation is permitted. + * @discussion VNOP_ACCESS is currently only called on filesystems which mark themselves + * as doing their authentication remotely (vfs_setauthopaque(), vfs_authopaque()). A VNOP_ACCESS() + * calldown may come without any preceding VNOP_OPEN(). + * @param vp File to authorize action for. + * @param action kauth-style action to be checked for permissions, e.g. KAUTH_VNODE_DELETE. + * @param ctx Context against which to authenticate action. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_ACCESS(vnode_t, int, vfs_context_t); @@ -399,18 +399,18 @@ struct vnop_getattr_args { }; /*! - @function VNOP_GETATTR - @abstract Call down to a filesystem to get vnode attributes. - @discussion Supported attributes ("Yes, I am returning this information") are set with VATTR_SET_SUPPORTED. - Which attributes have been requested is checked with VATTR_IS_ACTIVE. Attributes - are returned with VATTR_RETURN. It is through VNOP_GETATTR that routines like stat() get their information. - A VNOP_GETATTR() calldown may come without any preceding VNOP_OPEN(). - @param vp The vnode whose attributes to get. - @param vap Container for which attributes are requested, which attributes are supported by the filesystem, and attribute values. - @param ctx Context against which to authenticate request for attributes. - @return 0 for success or a filesystem-specific error. VNOP_GETATTR() can return success even if not - all requested attributes were returned; returning an error-value should indicate that something went wrong, rather than that - some attribute is not supported. + * @function VNOP_GETATTR + * @abstract Call down to a filesystem to get vnode attributes. + * @discussion Supported attributes ("Yes, I am returning this information") are set with VATTR_SET_SUPPORTED. + * Which attributes have been requested is checked with VATTR_IS_ACTIVE. Attributes + * are returned with VATTR_RETURN. It is through VNOP_GETATTR that routines like stat() get their information. + * A VNOP_GETATTR() calldown may come without any preceding VNOP_OPEN(). + * @param vp The vnode whose attributes to get. + * @param vap Container for which attributes are requested, which attributes are supported by the filesystem, and attribute values. + * @param ctx Context against which to authenticate request for attributes. + * @return 0 for success or a filesystem-specific error. VNOP_GETATTR() can return success even if not + * all requested attributes were returned; returning an error-value should indicate that something went wrong, rather than that + * some attribute is not supported. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_GETATTR(vnode_t, struct vnode_attr *, vfs_context_t); @@ -424,19 +424,19 @@ struct vnop_setattr_args { }; /*! - @function VNOP_SETATTR - @abstract Call down to a filesystem to set vnode attributes. - @discussion Supported attributes ("Yes, I am setting this attribute.") are set with VATTR_SET_SUPPORTED. - Requested attributes are checked with VATTR_IS_ACTIVE. Attribute values are accessed directly through - structure fields. VNOP_SETATTR() is the core of the KPI function vnode_setattr(), which is used by chmod(), - chown(), truncate(), and many others. A VNOP_SETATTR() call may come without any preceding VNOP_OPEN(). - @param vp The vnode whose attributes to set. - @param vap Container for which attributes are to be set and their desired values, as well as for the filesystem to - return information about which attributes were successfully set. - @param ctx Context against which to authenticate request for attribute change. - @return 0 for success or a filesystem-specific error. VNOP_SETATTR() can return success even if not - all requested attributes were set; returning an error-value should indicate that something went wrong, rather than that - some attribute is not supported. + * @function VNOP_SETATTR + * @abstract Call down to a filesystem to set vnode attributes. + * @discussion Supported attributes ("Yes, I am setting this attribute.") are set with VATTR_SET_SUPPORTED. + * Requested attributes are checked with VATTR_IS_ACTIVE. Attribute values are accessed directly through + * structure fields. VNOP_SETATTR() is the core of the KPI function vnode_setattr(), which is used by chmod(), + * chown(), truncate(), and many others. A VNOP_SETATTR() call may come without any preceding VNOP_OPEN(). + * @param vp The vnode whose attributes to set. + * @param vap Container for which attributes are to be set and their desired values, as well as for the filesystem to + * return information about which attributes were successfully set. + * @param ctx Context against which to authenticate request for attribute change. + * @return 0 for success or a filesystem-specific error. VNOP_SETATTR() can return success even if not + * all requested attributes were set; returning an error-value should indicate that something went wrong, rather than that + * some attribute is not supported. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_SETATTR(vnode_t, struct vnode_attr *, vfs_context_t); @@ -451,18 +451,18 @@ struct vnop_read_args { }; /*! - @function VNOP_READ - @abstract Call down to a filesystem to read file data. - @discussion VNOP_READ() is where the hard work of of the read() system call happens. The filesystem may use - the buffer cache, the cluster layer, or an alternative method to get its data; uio routines will be used to see that data - is copied to the correct virtual address in the correct address space and will update its uio argument - to indicate how much data has been moved. - @param vp The vnode to read from. - @param uio Description of request, including file offset, amount of data requested, destination address for data, - and whether that destination is in kernel or user space. - @param ctx Context against which to authenticate read request. - @return 0 for success or a filesystem-specific error. VNOP_READ() can return success even if less data was - read than originally requested; returning an error value should indicate that something actually went wrong. + * @function VNOP_READ + * @abstract Call down to a filesystem to read file data. + * @discussion VNOP_READ() is where the hard work of of the read() system call happens. The filesystem may use + * the buffer cache, the cluster layer, or an alternative method to get its data; uio routines will be used to see that data + * is copied to the correct virtual address in the correct address space and will update its uio argument + * to indicate how much data has been moved. + * @param vp The vnode to read from. + * @param uio Description of request, including file offset, amount of data requested, destination address for data, + * and whether that destination is in kernel or user space. + * @param ctx Context against which to authenticate read request. + * @return 0 for success or a filesystem-specific error. VNOP_READ() can return success even if less data was + * read than originally requested; returning an error value should indicate that something actually went wrong. */ extern errno_t VNOP_READ(vnode_t vp, struct uio *uio, int, vfs_context_t ctx); @@ -475,18 +475,18 @@ struct vnop_write_args { }; /*! - @function VNOP_WRITE - @abstract Call down to the filesystem to write file data. - @discussion VNOP_WRITE() is to write() as VNOP_READ() is to read(). The filesystem may use - the buffer cache, the cluster layer, or an alternative method to write its data; uio routines will be used to see that data - is copied to the correct virtual address in the correct address space and will update its uio argument - to indicate how much data has been moved. - @param vp The vnode to write to. - @param uio Description of request, including file offset, amount of data to write, source address for data, - and whether that destination is in kernel or user space. - @param ctx Context against which to authenticate write request. - @return 0 for success or a filesystem-specific error. VNOP_WRITE() can return success even if less data was - written than originally requested; returning an error value should indicate that something actually went wrong. + * @function VNOP_WRITE + * @abstract Call down to the filesystem to write file data. + * @discussion VNOP_WRITE() is to write() as VNOP_READ() is to read(). The filesystem may use + * the buffer cache, the cluster layer, or an alternative method to write its data; uio routines will be used to see that data + * is copied to the correct virtual address in the correct address space and will update its uio argument + * to indicate how much data has been moved. + * @param vp The vnode to write to. + * @param uio Description of request, including file offset, amount of data to write, source address for data, + * and whether that destination is in kernel or user space. + * @param ctx Context against which to authenticate write request. + * @return 0 for success or a filesystem-specific error. VNOP_WRITE() can return success even if less data was + * written than originally requested; returning an error value should indicate that something actually went wrong. */ extern errno_t VNOP_WRITE(vnode_t vp, struct uio *uio, int ioflag, vfs_context_t ctx); @@ -500,21 +500,21 @@ struct vnop_ioctl_args { }; /*! - @function VNOP_IOCTL - @abstract Call down to a filesystem or device driver to execute various control operations on or request data about a file. - @discussion Ioctl controls are typically associated with devices, but they can in fact be passed - down for any file; they are used to implement any of a wide range of controls and information requests. - fcntl() calls VNOP_IOCTL for several commands, and will attempt a VNOP_IOCTL if it is passed an unknown command, - though no copyin or copyout of arguments can occur in this case--the "arg" must be an integer value. - Filesystems can define their own fcntls using this mechanism. How ioctl commands are structured - is slightly complicated; see the manual page for ioctl(2). - @param vp The vnode to execute the command on. - @param command Identifier for action to take. - @param data Pointer to data; this can be an integer constant (of 32 bits only) or an address to be read from or written to, - depending on "command." If it is an address, it is valid and resides in the kernel; callers of VNOP_IOCTL() are - responsible for copying to and from userland. - @param ctx Context against which to authenticate ioctl request. - @return 0 for success or a filesystem-specific error. + * @function VNOP_IOCTL + * @abstract Call down to a filesystem or device driver to execute various control operations on or request data about a file. + * @discussion Ioctl controls are typically associated with devices, but they can in fact be passed + * down for any file; they are used to implement any of a wide range of controls and information requests. + * fcntl() calls VNOP_IOCTL for several commands, and will attempt a VNOP_IOCTL if it is passed an unknown command, + * though no copyin or copyout of arguments can occur in this case--the "arg" must be an integer value. + * Filesystems can define their own fcntls using this mechanism. How ioctl commands are structured + * is slightly complicated; see the manual page for ioctl(2). + * @param vp The vnode to execute the command on. + * @param command Identifier for action to take. + * @param data Pointer to data; this can be an integer constant (of 32 bits only) or an address to be read from or written to, + * depending on "command." If it is an address, it is valid and resides in the kernel; callers of VNOP_IOCTL() are + * responsible for copying to and from userland. + * @param ctx Context against which to authenticate ioctl request. + * @return 0 for success or a filesystem-specific error. */ extern errno_t VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx); @@ -528,21 +528,21 @@ struct vnop_select_args { }; /*! - @function VNOP_SELECT - @abstract Call down to a filesystem or device to check if a file is ready for I/O and request later notification if it is not currently ready. - @discussion In general, regular are always "ready for I/O" and their select vnops simply return "1." - Devices, though, may or may not be read; they keep track of who is selecting on them and send notifications - when they become ready. xnu provides structures and routines for tracking threads waiting for I/O and waking up - those threads: see selrecord(), selthreadclear(), seltrue(), selwait(), selwakeup(), and the selinfo structure (sys/select.h). - @param vp The vnode to check for I/O readiness. - @param which What kind of I/O is desired: FREAD, FWRITE. - @param fflags Flags from fileglob as seen in fcntl.h, e.g. O_NONBLOCK, O_APPEND. - @param wql Opaque object to pass to selrecord(). - @param ctx Context to authenticate for select request. - @return Nonzero indicates that a file is ready for I/O. 0 indicates that the file is not ready for I/O; - there is no way to return an error. 0 should be returned if the device (or file) is not ready for I/O - and the driver (or filesystem) is going to track the request and provide subsequent wakeups. - the device (or filesystem) will provide a wakeup. + * @function VNOP_SELECT + * @abstract Call down to a filesystem or device to check if a file is ready for I/O and request later notification if it is not currently ready. + * @discussion In general, regular are always "ready for I/O" and their select vnops simply return "1." + * Devices, though, may or may not be read; they keep track of who is selecting on them and send notifications + * when they become ready. xnu provides structures and routines for tracking threads waiting for I/O and waking up + * those threads: see selrecord(), selthreadclear(), seltrue(), selwait(), selwakeup(), and the selinfo structure (sys/select.h). + * @param vp The vnode to check for I/O readiness. + * @param which What kind of I/O is desired: FREAD, FWRITE. + * @param fflags Flags from fileglob as seen in fcntl.h, e.g. O_NONBLOCK, O_APPEND. + * @param wql Opaque object to pass to selrecord(). + * @param ctx Context to authenticate for select request. + * @return Nonzero indicates that a file is ready for I/O. 0 indicates that the file is not ready for I/O; + * there is no way to return an error. 0 should be returned if the device (or file) is not ready for I/O + * and the driver (or filesystem) is going to track the request and provide subsequent wakeups. + * the device (or filesystem) will provide a wakeup. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_SELECT(vnode_t, int, int, void *, vfs_context_t); @@ -551,21 +551,21 @@ extern errno_t VNOP_SELECT(vnode_t, int, int, void *, vfs_context_t); struct vnop_exchange_args { struct vnodeop_desc *a_desc; vnode_t a_fvp; - vnode_t a_tvp; + vnode_t a_tvp; int a_options; vfs_context_t a_context; }; /*! - @function VNOP_EXCHANGE - @abstract Call down to a filesystem to atomically exchange the data of two files. - @discussion VNOP_EXCHANGE() is currently only called by the exchangedata() system call. It will only - be applied to files on the same volume. - @param fvp First vnode. - @param tvp Second vnode. - @param options Unused. - @param ctx Context to authenticate for exchangedata request. - @return 0 for success, else an error code. + * @function VNOP_EXCHANGE + * @abstract Call down to a filesystem to atomically exchange the data of two files. + * @discussion VNOP_EXCHANGE() is currently only called by the exchangedata() system call. It will only + * be applied to files on the same volume. + * @param fvp First vnode. + * @param tvp Second vnode. + * @param options Unused. + * @param ctx Context to authenticate for exchangedata request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_EXCHANGE(vnode_t, vnode_t, int, vfs_context_t); @@ -579,15 +579,15 @@ struct vnop_revoke_args { }; /*! - @function VNOP_REVOKE - @abstract Call down to a filesystem to invalidate all open file descriptors for a vnode. - @discussion This function is typically called as part of a TTY revoke, but can also be - used on regular files. Most filesystems simply use nop_revoke(), which calls vn_revoke(), - as their revoke vnop implementation. - @param vp The vnode to revoke. - @param flags Unused. - @param ctx Context to authenticate for revoke request. - @return 0 for success, else an error code. + * @function VNOP_REVOKE + * @abstract Call down to a filesystem to invalidate all open file descriptors for a vnode. + * @discussion This function is typically called as part of a TTY revoke, but can also be + * used on regular files. Most filesystems simply use nop_revoke(), which calls vn_revoke(), + * as their revoke vnop implementation. + * @param vp The vnode to revoke. + * @param flags Unused. + * @param ctx Context to authenticate for revoke request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_REVOKE(vnode_t, int, vfs_context_t); @@ -601,13 +601,13 @@ struct vnop_mmap_args { }; /*! - @function VNOP_MMAP - @abstract Notify a filesystem that a file is being mmap-ed. - @discussion VNOP_MMAP is an advisory calldown to say that the system is mmap-ing a file. - @param vp The vnode being mmapped. - @param flags Memory protection: PROT_READ, PROT_WRITE, PROT_EXEC. - @param ctx Context to authenticate for mmap request. - @return 0 for success; all errors except EPERM are ignored. + * @function VNOP_MMAP + * @abstract Notify a filesystem that a file is being mmap-ed. + * @discussion VNOP_MMAP is an advisory calldown to say that the system is mmap-ing a file. + * @param vp The vnode being mmapped. + * @param flags Memory protection: PROT_READ, PROT_WRITE, PROT_EXEC. + * @param ctx Context to authenticate for mmap request. + * @return 0 for success; all errors except EPERM are ignored. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_MMAP(vnode_t, int, vfs_context_t); @@ -620,12 +620,12 @@ struct vnop_mnomap_args { }; /*! - @function VNOP_MNOMAP - @abstract Inform a filesystem that a file is no longer mapped. - @discussion In general, no action is required of a filesystem for VNOP_MNOMAP. - @param vp The vnode which is no longer mapped. - @param ctx Context to authenticate for mnomap request. - @return Return value is ignored. + * @function VNOP_MNOMAP + * @abstract Inform a filesystem that a file is no longer mapped. + * @discussion In general, no action is required of a filesystem for VNOP_MNOMAP. + * @param vp The vnode which is no longer mapped. + * @param ctx Context to authenticate for mnomap request. + * @return Return value is ignored. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_MNOMAP(vnode_t, vfs_context_t); @@ -639,13 +639,13 @@ struct vnop_fsync_args { }; /*! - @function VNOP_FSYNC - @abstract Call down to a filesystem to synchronize a file with on-disk state. - @discussion VNOP_FSYNC is called whenever we need to make sure that a file's data has been - pushed to backing store, for example when recycling; it is also the heart of the fsync() system call. - @param vp The vnode whose data to flush to backing store. - @param ctx Context to authenticate for fsync request. - @return 0 for success, else an error code. + * @function VNOP_FSYNC + * @abstract Call down to a filesystem to synchronize a file with on-disk state. + * @discussion VNOP_FSYNC is called whenever we need to make sure that a file's data has been + * pushed to backing store, for example when recycling; it is also the heart of the fsync() system call. + * @param vp The vnode whose data to flush to backing store. + * @param ctx Context to authenticate for fsync request. + * @return 0 for success, else an error code. */ extern errno_t VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx); @@ -659,15 +659,15 @@ struct vnop_remove_args { }; /*! - @function VNOP_REMOVE - @abstract Call down to a filesystem to delete a file. - @discussion VNOP_REMOVE is called to remove a file from a filesystem's namespace, for example by unlink(). - It can operate on regular files, named pipes, special files, and in some cases on directories. - @param dvp Directory in which to delete a file. - @param vp The file to delete. - @param cnp Filename information. - @param ctx Context to authenticate for fsync request. - @return 0 for success, else an error code. + * @function VNOP_REMOVE + * @abstract Call down to a filesystem to delete a file. + * @discussion VNOP_REMOVE is called to remove a file from a filesystem's namespace, for example by unlink(). + * It can operate on regular files, named pipes, special files, and in some cases on directories. + * @param dvp Directory in which to delete a file. + * @param vp The file to delete. + * @param cnp Filename information. + * @param ctx Context to authenticate for fsync request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_REMOVE(vnode_t, vnode_t, struct componentname *, int, vfs_context_t); @@ -676,25 +676,25 @@ extern errno_t VNOP_REMOVE(vnode_t, vnode_t, struct componentname *, int, vfs_co #ifdef KERNEL_PRIVATE struct vnop_compound_remove_args { struct vnodeop_desc *a_desc; - vnode_t a_dvp; /* Directory in which to lookup and remove */ - vnode_t *a_vpp; /* File to remove; may or may not point to NULL pointer */ - struct componentname *a_cnp; /* Name of file to remove */ - struct vnode_attr *a_vap; /* Destination for file attributes on successful delete */ - uint32_t a_flags; /* Control flags (unused) */ - vfs_context_t a_context; /* Authorization context */ - int (*a_remove_authorizer)( /* Authorizer callback */ - vnode_t dvp, /* Directory in which to delete */ - vnode_t vp, /* File to delete */ - struct componentname *cnp, /* As passed to VNOP */ - vfs_context_t ctx, /* As passed to VNOP */ - void *reserved); /* Always NULL */ - void *a_reserved; /* Unused */ + vnode_t a_dvp; /* Directory in which to lookup and remove */ + vnode_t *a_vpp; /* File to remove; may or may not point to NULL pointer */ + struct componentname *a_cnp; /* Name of file to remove */ + struct vnode_attr *a_vap; /* Destination for file attributes on successful delete */ + uint32_t a_flags; /* Control flags (unused) */ + vfs_context_t a_context; /* Authorization context */ + int (*a_remove_authorizer)( /* Authorizer callback */ + vnode_t dvp, /* Directory in which to delete */ + vnode_t vp, /* File to delete */ + struct componentname *cnp, /* As passed to VNOP */ + vfs_context_t ctx, /* As passed to VNOP */ + void *reserved); /* Always NULL */ + void *a_reserved; /* Unused */ }; #endif /* KERNEL_PRIVATE */ -#ifdef BSD_KERNEL_PRIVATE +#ifdef BSD_KERNEL_PRIVATE extern errno_t VNOP_COMPOUND_REMOVE(vnode_t, vnode_t*, struct nameidata *, int32_t flags, struct vnode_attr *vap, vfs_context_t); -#endif +#endif struct vnop_link_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -704,14 +704,14 @@ struct vnop_link_args { }; /*! - @function VNOP_LINK - @abstract Call down to a filesystem to create a hardlink to a file. - @discussion See "man 2 link". - @param vp File to link to. - @param dvp Directory in which to create the link. - @param cnp Filename information for new link. - @param ctx Context to authenticate for link request. - @return 0 for success, else an error code. + * @function VNOP_LINK + * @abstract Call down to a filesystem to create a hardlink to a file. + * @discussion See "man 2 link". + * @param vp File to link to. + * @param dvp Directory in which to create the link. + * @param cnp Filename information for new link. + * @param ctx Context to authenticate for link request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_LINK(vnode_t, vnode_t, struct componentname *, vfs_context_t); @@ -729,17 +729,17 @@ struct vnop_rename_args { }; /*! - @function VNOP_RENAME - @abstract Call down to a filesystem to rename a file. - @discussion VNOP_RENAME() will only be called with a source and target on the same volume. - @param fdvp Directory in which source file resides. - @param fvp File being renamed. - @param fcnp Name information for source file. - @param tdvp Directory file is being moved to. - @param tvp Existing file with same name as target, should one exist. - @param tcnp Name information for target path. - @param ctx Context to authenticate for rename request. - @return 0 for success, else an error code. + * @function VNOP_RENAME + * @abstract Call down to a filesystem to rename a file. + * @discussion VNOP_RENAME() will only be called with a source and target on the same volume. + * @param fdvp Directory in which source file resides. + * @param fvp File being renamed. + * @param fcnp Name information for source file. + * @param tdvp Directory file is being moved to. + * @param tvp Existing file with same name as target, should one exist. + * @param tcnp Name information for target path. + * @param ctx Context to authenticate for rename request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_RENAME(vnode_t, vnode_t, struct componentname *, vnode_t, vnode_t, struct componentname *, vfs_context_t); @@ -749,12 +749,12 @@ typedef unsigned int vfs_rename_flags_t; // Must match sys/stdio.h enum { - VFS_RENAME_SECLUDE = 0x00000001, - VFS_RENAME_SWAP = 0x00000002, - VFS_RENAME_EXCL = 0x00000004, + VFS_RENAME_SECLUDE = 0x00000001, + VFS_RENAME_SWAP = 0x00000002, + VFS_RENAME_EXCL = 0x00000004, - VFS_RENAME_FLAGS_MASK = (VFS_RENAME_SECLUDE | VFS_RENAME_SWAP - | VFS_RENAME_EXCL), + VFS_RENAME_FLAGS_MASK = (VFS_RENAME_SECLUDE | VFS_RENAME_SWAP + | VFS_RENAME_EXCL), }; struct vnop_renamex_args { @@ -765,24 +765,24 @@ struct vnop_renamex_args { vnode_t a_tdvp; vnode_t a_tvp; struct componentname *a_tcnp; - struct vnode_attr *a_vap; // Reserved for future use + struct vnode_attr *a_vap; // Reserved for future use vfs_rename_flags_t a_flags; vfs_context_t a_context; }; /*! - @function VNOP_RENAMEX - @abstract Call down to a filesystem to rename a file. - @discussion VNOP_RENAMEX() will only be called with a source and target on the same volume. - @param fdvp Directory in which source file resides. - @param fvp File being renamed. - @param fcnp Name information for source file. - @param tdvp Directory file is being moved to. - @param tvp Existing file with same name as target, should one exist. - @param tcnp Name information for target path. - @param flags Control certain rename semantics. - @param ctx Context to authenticate for rename request. - @return 0 for success, else an error code. + * @function VNOP_RENAMEX + * @abstract Call down to a filesystem to rename a file. + * @discussion VNOP_RENAMEX() will only be called with a source and target on the same volume. + * @param fdvp Directory in which source file resides. + * @param fvp File being renamed. + * @param fcnp Name information for source file. + * @param tdvp Directory file is being moved to. + * @param tvp Existing file with same name as target, should one exist. + * @param tcnp Name information for target path. + * @param flags Control certain rename semantics. + * @param ctx Context to authenticate for rename request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_RENAMEX(vnode_t, vnode_t, struct componentname *, vnode_t, vnode_t, struct componentname *, vfs_rename_flags_t, vfs_context_t); @@ -792,37 +792,37 @@ extern errno_t VNOP_RENAMEX(vnode_t, vnode_t, struct componentname *, vnode_t, v struct vnop_compound_rename_args { struct vnodeop_desc *a_desc; - vnode_t a_fdvp; /* Directory from which to rename */ - vnode_t *a_fvpp; /* Vnode to rename (can point to a NULL pointer) */ - struct componentname *a_fcnp; /* Source name */ - struct vnode_attr *a_fvap; + vnode_t a_fdvp; /* Directory from which to rename */ + vnode_t *a_fvpp; /* Vnode to rename (can point to a NULL pointer) */ + struct componentname *a_fcnp; /* Source name */ + struct vnode_attr *a_fvap; - vnode_t a_tdvp; /* Directory to which to rename */ - vnode_t *a_tvpp; /* Vnode to rename over (can point to a NULL pointer) */ - struct componentname *a_tcnp; /* Destination name */ + vnode_t a_tdvp; /* Directory to which to rename */ + vnode_t *a_tvpp; /* Vnode to rename over (can point to a NULL pointer) */ + struct componentname *a_tcnp; /* Destination name */ struct vnode_attr *a_tvap; - uint32_t a_flags; /* Control flags: currently unused */ - vfs_context_t a_context; /* Authorization context */ - int (*a_rename_authorizer)( /* Authorization callback */ - vnode_t fdvp, /* As passed to VNOP */ - vnode_t fvp, /* Vnode to rename */ - struct componentname *fcnp, /* As passed to VNOP */ - vnode_t tdvp, /* As passed to VNOP */ - vnode_t tvp, /* Vnode to rename over (can be NULL) */ - struct componentname *tcnp, /* As passed to VNOP */ - vfs_context_t ctx, /* As passed to VNOP */ - void *reserved); /* Always NULL */ - void *a_reserved; /* Currently unused */ + uint32_t a_flags; /* Control flags: currently unused */ + vfs_context_t a_context; /* Authorization context */ + int (*a_rename_authorizer)( /* Authorization callback */ + vnode_t fdvp, /* As passed to VNOP */ + vnode_t fvp, /* Vnode to rename */ + struct componentname *fcnp, /* As passed to VNOP */ + vnode_t tdvp, /* As passed to VNOP */ + vnode_t tvp, /* Vnode to rename over (can be NULL) */ + struct componentname *tcnp, /* As passed to VNOP */ + vfs_context_t ctx, /* As passed to VNOP */ + void *reserved); /* Always NULL */ + void *a_reserved; /* Currently unused */ }; #endif /* KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE -errno_t -VNOP_COMPOUND_RENAME( - struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, - struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, - uint32_t flags,vfs_context_t ctx); +errno_t +VNOP_COMPOUND_RENAME( + struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, + struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, + uint32_t flags, vfs_context_t ctx); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_mkdir_args { @@ -835,15 +835,15 @@ struct vnop_mkdir_args { }; /*! - @function VNOP_MKDIR - @abstract Call down to a filesystem to create a directory. - @discussion The newly created directory should be returned with an iocount which will be dropped by the caller. - @param dvp Directory in which to create new directory. - @param vpp Destination for pointer to new directory's vnode. - @param cnp Name information for new directory. - @param vap Attributes for new directory. - @param ctx Context to authenticate for mkdir request. - @return 0 for success, else an error code. + * @function VNOP_MKDIR + * @abstract Call down to a filesystem to create a directory. + * @discussion The newly created directory should be returned with an iocount which will be dropped by the caller. + * @param dvp Directory in which to create new directory. + * @param vpp Destination for pointer to new directory's vnode. + * @param cnp Name information for new directory. + * @param vap Attributes for new directory. + * @param ctx Context to authenticate for mkdir request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_MKDIR(vnode_t, vnode_t *, struct componentname *, struct vnode_attr *, vfs_context_t); @@ -853,16 +853,16 @@ extern errno_t VNOP_MKDIR(vnode_t, vnode_t *, struct componentname *, struct vno #ifdef KERNEL_PRIVATE struct vnop_compound_mkdir_args { struct vnodeop_desc *a_desc; - vnode_t a_dvp; /* Directory in which to create */ - vnode_t *a_vpp; /* Destination for found or created vnode */ - struct componentname *a_cnp; /* Name of directory to create */ - struct vnode_attr *a_vap; /* Creation attributes */ - uint32_t a_flags; /* Control flags (unused) */ - vfs_context_t a_context; /* Authorization context */ + vnode_t a_dvp; /* Directory in which to create */ + vnode_t *a_vpp; /* Destination for found or created vnode */ + struct componentname *a_cnp; /* Name of directory to create */ + struct vnode_attr *a_vap; /* Creation attributes */ + uint32_t a_flags; /* Control flags (unused) */ + vfs_context_t a_context; /* Authorization context */ #if 0 int (*a_mkdir_authorizer)(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved); #endif /* 0 */ - void *a_reserved; /* Unused */ + void *a_reserved; /* Unused */ }; #endif /* KERNEL_PRIVATE */ @@ -879,13 +879,13 @@ struct vnop_rmdir_args { }; /*! - @function VNOP_RMDIR - @abstract Call down to a filesystem to delete a directory. - @param dvp Parent of directory to be removed. - @param vp Directory to remove. - @param cnp Name information for directory to be deleted. - @param ctx Context to authenticate for rmdir request. - @return 0 for success, else an error code. + * @function VNOP_RMDIR + * @abstract Call down to a filesystem to delete a directory. + * @param dvp Parent of directory to be removed. + * @param vp Directory to remove. + * @param cnp Name information for directory to be deleted. + * @param ctx Context to authenticate for rmdir request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_RMDIR(vnode_t, vnode_t, struct componentname *, vfs_context_t); @@ -894,19 +894,19 @@ extern errno_t VNOP_RMDIR(vnode_t, vnode_t, struct componentname *, vfs_context_ #ifdef KERNEL_PRIVATE struct vnop_compound_rmdir_args { struct vnodeop_desc *a_desc; - vnode_t a_dvp; /* Directory in which to look up and delete */ - vnode_t *a_vpp; /* Destination for found vnode */ - struct componentname *a_cnp; /* Name to delete */ - struct vnode_attr *a_vap; /* Location in which to store attributes if delete succeeds (can be NULL) */ - uint32_t a_flags; /* Control flags (currently unused) */ - vfs_context_t a_context; /* Context for authorization */ - int (*a_rmdir_authorizer)( /* Authorization callback */ - vnode_t dvp, /* As passed to VNOP */ - vnode_t vp, /* Directory to delete */ - struct componentname *cnp, /* As passed to VNOP */ - vfs_context_t ctx, /* As passed to VNOP */ - void *reserved); /* Always NULL */ - void *a_reserved; /* Unused */ + vnode_t a_dvp; /* Directory in which to look up and delete */ + vnode_t *a_vpp; /* Destination for found vnode */ + struct componentname *a_cnp; /* Name to delete */ + struct vnode_attr *a_vap; /* Location in which to store attributes if delete succeeds (can be NULL) */ + uint32_t a_flags; /* Control flags (currently unused) */ + vfs_context_t a_context; /* Context for authorization */ + int (*a_rmdir_authorizer)( /* Authorization callback */ + vnode_t dvp, /* As passed to VNOP */ + vnode_t vp, /* Directory to delete */ + struct componentname *cnp, /* As passed to VNOP */ + vfs_context_t ctx, /* As passed to VNOP */ + void *reserved); /* Always NULL */ + void *a_reserved; /* Unused */ }; #endif /* KERNEL_PRIVATE */ @@ -916,28 +916,28 @@ extern errno_t VNOP_COMPOUND_RMDIR(vnode_t, vnode_t*, struct nameidata *, struct struct vnop_symlink_args { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - char *a_target; - vfs_context_t a_context; + struct vnodeop_desc *a_desc; + vnode_t a_dvp; + vnode_t *a_vpp; + struct componentname *a_cnp; + struct vnode_attr *a_vap; + char *a_target; + vfs_context_t a_context; }; /*! - @function VNOP_SYMLINK - @abstract Call down to a filesystem to create a symbolic link. - @param If VNOP_SYMLINK() is successful, the new file should be returned with an iocount which will - be dropped by the caller. VFS does not ensure that the target path will have a length shorter - than the max symlink length for the filesystem. - @param dvp Parent directory for new symlink file. - @param vpp - @param cnp Name information for new symlink. - @param vap Attributes for symlink. - @param target Path for symlink to store; for "ln -s /var/vardir linktovardir", "target" would be "/var/vardir" - @param ctx Context to authenticate for symlink request. - @return 0 for success, else an error code. + * @function VNOP_SYMLINK + * @abstract Call down to a filesystem to create a symbolic link. + * @param If VNOP_SYMLINK() is successful, the new file should be returned with an iocount which will + * be dropped by the caller. VFS does not ensure that the target path will have a length shorter + * than the max symlink length for the filesystem. + * @param dvp Parent directory for new symlink file. + * @param vpp + * @param cnp Name information for new symlink. + * @param vap Attributes for symlink. + * @param target Path for symlink to store; for "ln -s /var/vardir linktovardir", "target" would be "/var/vardir" + * @param ctx Context to authenticate for symlink request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_SYMLINK(vnode_t, vnode_t *, struct componentname *, struct vnode_attr *, char *, vfs_context_t); @@ -973,17 +973,17 @@ struct vnop_readdir_args { }; /*! - @function VNOP_READDIR - @abstract Call down to a filesystem to enumerate directory entries. - @discussion VNOP_READDIR() packs a buffer with "struct dirent" directory entry representations as described - by the "getdirentries" manual page. - @param vp Directory to enumerate. - @param uio Destination information for resulting direntries. - @param flags VNODE_READDIR_EXTENDED, VNODE_READDIR_REQSEEKOFF, VNODE_READDIR_SEEKOFF32: Apple-internal flags. - @param eofflag Should be set to 1 if the end of the directory has been reached. - @param numdirent Should be set to number of entries written into buffer. - @param ctx Context to authenticate for readdir request. - @return 0 for success, else an error code. + * @function VNOP_READDIR + * @abstract Call down to a filesystem to enumerate directory entries. + * @discussion VNOP_READDIR() packs a buffer with "struct dirent" directory entry representations as described + * by the "getdirentries" manual page. + * @param vp Directory to enumerate. + * @param uio Destination information for resulting direntries. + * @param flags VNODE_READDIR_EXTENDED, VNODE_READDIR_REQSEEKOFF, VNODE_READDIR_SEEKOFF32: Apple-internal flags. + * @param eofflag Should be set to 1 if the end of the directory has been reached. + * @param numdirent Should be set to number of entries written into buffer. + * @param ctx Context to authenticate for readdir request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_READDIR(vnode_t, struct uio *, int, int *, int *, vfs_context_t); @@ -1003,21 +1003,21 @@ struct vnop_readdirattr_args { }; /*! - @function VNOP_READDIRATTR - @abstract Call down to get file attributes for many files in a directory at once. - @discussion VNOP_READDIRATTR() packs a buffer with file attributes, as if the results of many "getattrlist" calls. - @param vp Directory in which to enumerate entries' attributes. - @param alist Which attributes are wanted for each directory entry. - @param uio Destination information for resulting attributes. - @param maxcount Maximum count of files to get attributes for. - @param options FSOPT_NOFOLLOW: do not follow symbolic links. FSOPT_NOINMEMUPDATE: do not use data which have been - updated since an inode was loaded into memory. - @param newstate The "newstate" should be set to a value which changes if the contents of a directory change - through an addition or deletion but stays the same otherwise. - @param eofflag Should be set to 1 if the end of the directory has been reached. - @param actualcount Should be set to number of files whose attributes were written into buffer. - @param ctx Context to authenticate for readdirattr request. - @return 0 for success, else an error code. + * @function VNOP_READDIRATTR + * @abstract Call down to get file attributes for many files in a directory at once. + * @discussion VNOP_READDIRATTR() packs a buffer with file attributes, as if the results of many "getattrlist" calls. + * @param vp Directory in which to enumerate entries' attributes. + * @param alist Which attributes are wanted for each directory entry. + * @param uio Destination information for resulting attributes. + * @param maxcount Maximum count of files to get attributes for. + * @param options FSOPT_NOFOLLOW: do not follow symbolic links. FSOPT_NOINMEMUPDATE: do not use data which have been + * updated since an inode was loaded into memory. + * @param newstate The "newstate" should be set to a value which changes if the contents of a directory change + * through an addition or deletion but stays the same otherwise. + * @param eofflag Should be set to 1 if the end of the directory has been reached. + * @param actualcount Should be set to number of files whose attributes were written into buffer. + * @param ctx Context to authenticate for readdirattr request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_READDIRATTR(vnode_t, struct attrlist *, struct uio *, uint32_t, uint32_t, uint32_t *, int *, uint32_t *, vfs_context_t); @@ -1037,22 +1037,22 @@ struct vnop_getattrlistbulk_args { }; /*! - @function VNOP_GETATTRLISTBULK - @abstract Call down to get file attributes for many files in a directory at once. - @discussion VNOP_GETATTRLISTBULK() packs a buffer with file attributes, as if the results of many "getattrlist" calls. - @param vp Directory in which to enumerate entries' attributes. - @param alist Which attributes are wanted for each directory entry. - @param uio Destination information for resulting attributes. - @param vap initialised vnode_attr structure pointer. This structure also has memory allocated (MAXPATHLEN bytes) and assigned to the va_name field for filesystems to use. - @param private reserved for future use. - @param options - @param eofflag Should be set to 1 if the end of the directory has been reached. - @param actualcount Should be set to number of files whose attributes were written into buffer. - @param ctx Context to authenticate for getattrlistbulk request. - @return 0 for success, else an error code. + * @function VNOP_GETATTRLISTBULK + * @abstract Call down to get file attributes for many files in a directory at once. + * @discussion VNOP_GETATTRLISTBULK() packs a buffer with file attributes, as if the results of many "getattrlist" calls. + * @param vp Directory in which to enumerate entries' attributes. + * @param alist Which attributes are wanted for each directory entry. + * @param uio Destination information for resulting attributes. + * @param vap initialised vnode_attr structure pointer. This structure also has memory allocated (MAXPATHLEN bytes) and assigned to the va_name field for filesystems to use. + * @param private reserved for future use. + * @param options + * @param eofflag Should be set to 1 if the end of the directory has been reached. + * @param actualcount Should be set to number of files whose attributes were written into buffer. + * @param ctx Context to authenticate for getattrlistbulk request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE -extern errno_t VNOP_GETATTRLISTBULK(vnode_t, struct attrlist *, struct vnode_attr *, uio_t, void *, uint64_t, int32_t *, int32_t *, vfs_context_t); +extern errno_t VNOP_GETATTRLISTBULK(vnode_t, struct attrlist *, struct vnode_attr *, uio_t, void *, uint64_t, int32_t *, int32_t *, vfs_context_t); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_readlink_args { @@ -1063,13 +1063,13 @@ struct vnop_readlink_args { }; /*! - @function VNOP_READLINK - @abstract Call down to a filesystem to get the pathname represented by a symbolic link. - @discussion VNOP_READLINK() gets the path stored in a symbolic link; it is called by namei() and the readlink() system call. - @param vp Symbolic link to read from. - @param uio Destination information for link path. - @param ctx Context to authenticate for readlink request. - @return 0 for success, else an error code. + * @function VNOP_READLINK + * @abstract Call down to a filesystem to get the pathname represented by a symbolic link. + * @discussion VNOP_READLINK() gets the path stored in a symbolic link; it is called by namei() and the readlink() system call. + * @param vp Symbolic link to read from. + * @param uio Destination information for link path. + * @param ctx Context to authenticate for readlink request. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_READLINK(vnode_t, struct uio *, vfs_context_t); @@ -1082,16 +1082,16 @@ struct vnop_inactive_args { }; /*! - @function VNOP_INACTIVE - @abstract Notify a filesystem that the last usecount (persistent reference) on a vnode has been dropped. - @discussion VNOP_INACTVE() gives a filesystem a chance to aggressively release resources assocated with a vnode, perhaps - even to call vnode_recycle(), but no action is prescribed; it is acceptable for VNOP_INACTIVE to be a no-op and - to defer all reclamation until VNOP_RECLAIM(). - VNOP_INACTVE() will not be called on a vnode if no persistent reference is ever taken; an - important example is a stat(), which takes an iocount, reads its data, and drops that iocount. - @param vp The vnode which is now inactive. - @param ctx Context to authenticate for inactive message. - @return 0 for success, else an error code, but return value is currently ignored. + * @function VNOP_INACTIVE + * @abstract Notify a filesystem that the last usecount (persistent reference) on a vnode has been dropped. + * @discussion VNOP_INACTVE() gives a filesystem a chance to aggressively release resources assocated with a vnode, perhaps + * even to call vnode_recycle(), but no action is prescribed; it is acceptable for VNOP_INACTIVE to be a no-op and + * to defer all reclamation until VNOP_RECLAIM(). + * VNOP_INACTVE() will not be called on a vnode if no persistent reference is ever taken; an + * important example is a stat(), which takes an iocount, reads its data, and drops that iocount. + * @param vp The vnode which is now inactive. + * @param ctx Context to authenticate for inactive message. + * @return 0 for success, else an error code, but return value is currently ignored. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_INACTIVE(vnode_t, vfs_context_t); @@ -1104,18 +1104,18 @@ struct vnop_reclaim_args { }; /*! - @function VNOP_RECLAIM - @abstract Release filesystem-internal resources for a vnode. - @discussion VNOP_RECLAIM() is called as part of the process of recycling a vnode. During - a reclaim routine, a filesystem should remove a vnode from its hash and deallocate any resources - allocated to that vnode. VFS guarantees that when VNOP_RECLAIM() is called, there are no more - iocount references on a vnode (though there may still be usecount references--these are invalidated - by the reclaim) and that no more will be granted. This means in practice that there will be no - filesystem calls on the vnode being reclaimed until the reclaim has finished and the vnode has - been reused. - @param vp The vnode to reclaim. - @param ctx Context to authenticate for reclaim. - @return 0 for success, or an error code. A nonzero return value results in a panic. + * @function VNOP_RECLAIM + * @abstract Release filesystem-internal resources for a vnode. + * @discussion VNOP_RECLAIM() is called as part of the process of recycling a vnode. During + * a reclaim routine, a filesystem should remove a vnode from its hash and deallocate any resources + * allocated to that vnode. VFS guarantees that when VNOP_RECLAIM() is called, there are no more + * iocount references on a vnode (though there may still be usecount references--these are invalidated + * by the reclaim) and that no more will be granted. This means in practice that there will be no + * filesystem calls on the vnode being reclaimed until the reclaim has finished and the vnode has + * been reused. + * @param vp The vnode to reclaim. + * @param ctx Context to authenticate for reclaim. + * @return 0 for success, or an error code. A nonzero return value results in a panic. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_RECLAIM(vnode_t, vfs_context_t); @@ -1130,14 +1130,14 @@ struct vnop_pathconf_args { }; /*! - @function VNOP_PATHCONF - @abstract Query a filesystem for path properties. - @param vp The vnode whose filesystem to query. - @param name Which property to request: see unistd.h. For example: _PC_CASE_SENSITIVE (is - a filesystem case-sensitive?). Only one property can be requested at a time. - @param retval Destination for value of property. - @param ctx Context to authenticate for pathconf request. - @return 0 for success, or an error code. + * @function VNOP_PATHCONF + * @abstract Query a filesystem for path properties. + * @param vp The vnode whose filesystem to query. + * @param name Which property to request: see unistd.h. For example: _PC_CASE_SENSITIVE (is + * a filesystem case-sensitive?). Only one property can be requested at a time. + * @param retval Destination for value of property. + * @param ctx Context to authenticate for pathconf request. + * @return 0 for success, or an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_PATHCONF(vnode_t, int, int32_t *, vfs_context_t); @@ -1155,23 +1155,23 @@ struct vnop_advlock_args { }; /*! - @function VNOP_ADVLOCK - @abstract Aquire or release and advisory lock on a vnode. - @discussion Advisory locking is somewhat complicated. VNOP_ADVLOCK is overloaded for - both flock() and POSIX advisory locking usage, though not all filesystems support both (or any). VFS - provides an advisory locking mechanism for filesystems which can take advantage of it; vfs_setlocklocal() - marks a filesystem as using VFS advisory locking support. - @param vp The vnode to lock or unlock. - @param id Identifier for lock holder: ignored by most filesystems. - @param op Which locking operation: F_SETLK: set locking information about a region. - F_GETLK: get locking information about the specified region. F_UNLCK: Unlock a region. - @param fl Description of file region to lock. l_whence is as with "lseek." - Includes a type: F_RDLCK (shared lock), F_UNLCK (unlock) , and F_WRLCK (exclusive lock). - @param flags F_FLOCK: use flock() semantics. F_POSIX: use POSIX semantics. F_WAIT: sleep if necessary. - F_PROV: Non-coelesced provisional lock (unused in xnu). - @param ctx Context to authenticate for advisory locking request. - @param timeout Timespec for timeout in case of F_SETLKWTIMEOUT. - @return 0 for success, or an error code. + * @function VNOP_ADVLOCK + * @abstract Aquire or release and advisory lock on a vnode. + * @discussion Advisory locking is somewhat complicated. VNOP_ADVLOCK is overloaded for + * both flock() and POSIX advisory locking usage, though not all filesystems support both (or any). VFS + * provides an advisory locking mechanism for filesystems which can take advantage of it; vfs_setlocklocal() + * marks a filesystem as using VFS advisory locking support. + * @param vp The vnode to lock or unlock. + * @param id Identifier for lock holder: ignored by most filesystems. + * @param op Which locking operation: F_SETLK: set locking information about a region. + * F_GETLK: get locking information about the specified region. F_UNLCK: Unlock a region. + * @param fl Description of file region to lock. l_whence is as with "lseek." + * Includes a type: F_RDLCK (shared lock), F_UNLCK (unlock) , and F_WRLCK (exclusive lock). + * @param flags F_FLOCK: use flock() semantics. F_POSIX: use POSIX semantics. F_WAIT: sleep if necessary. + * F_PROV: Non-coelesced provisional lock (unused in xnu). + * @param ctx Context to authenticate for advisory locking request. + * @param timeout Timespec for timeout in case of F_SETLKWTIMEOUT. + * @return 0 for success, or an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_ADVLOCK(vnode_t, caddr_t, int, struct flock *, int, vfs_context_t, struct timespec *); @@ -1188,27 +1188,27 @@ struct vnop_allocate_args { }; /*! - @function VNOP_ALLOCATE - @abstract Pre-allocate space for a file. - @discussion VNOP_ALLOCATE() changes the amount of backing store set aside to - a file. It can be used to either shrink or grow a file. If the file shrinks, - its ubc size will be modified accordingly, but if it grows, then the ubc size is unchanged; - space is set aside without being actively used by the file. VNOP_ALLOCATE() is currently only - called as part of the F_PREALLOCATE fcntl. - @param vp The vnode for which to preallocate space. - @param length Desired preallocated file length. - @param flags - PREALLOCATE: preallocate allocation blocks. - ALLOCATECONTIG: allocate contigious space. - ALLOCATEALL: allocate all requested space or no space at all. - FREEREMAINDER: deallocate allocated but unfilled blocks. - ALLOCATEFROMPEOF: allocate from the physical eof. - ALLOCATEFROMVOL: allocate from the volume offset. - @param bytesallocated Additional bytes set aside for file. Set to 0 if none are allocated - OR if the file is contracted. - @param offset Hint for where to find free blocks. - @param ctx Context to authenticate for allocation request. - @return 0 for success, or an error code. + * @function VNOP_ALLOCATE + * @abstract Pre-allocate space for a file. + * @discussion VNOP_ALLOCATE() changes the amount of backing store set aside to + * a file. It can be used to either shrink or grow a file. If the file shrinks, + * its ubc size will be modified accordingly, but if it grows, then the ubc size is unchanged; + * space is set aside without being actively used by the file. VNOP_ALLOCATE() is currently only + * called as part of the F_PREALLOCATE fcntl. + * @param vp The vnode for which to preallocate space. + * @param length Desired preallocated file length. + * @param flags + * PREALLOCATE: preallocate allocation blocks. + * ALLOCATECONTIG: allocate contigious space. + * ALLOCATEALL: allocate all requested space or no space at all. + * FREEREMAINDER: deallocate allocated but unfilled blocks. + * ALLOCATEFROMPEOF: allocate from the physical eof. + * ALLOCATEFROMVOL: allocate from the volume offset. + * @param bytesallocated Additional bytes set aside for file. Set to 0 if none are allocated + * OR if the file is contracted. + * @param offset Hint for where to find free blocks. + * @param ctx Context to authenticate for allocation request. + * @return 0 for success, or an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_ALLOCATE(vnode_t, off_t, u_int32_t, off_t *, off_t, vfs_context_t); @@ -1226,24 +1226,24 @@ struct vnop_pagein_args { }; /*! - @function VNOP_PAGEIN - @abstract Pull file data into memory. - @discussion VNOP_PAGEIN() is called by when a process faults on data mapped from a file or - when madvise() demands pre-fetching. It is conceptually somewhat similar to VNOP_READ(). Filesystems - are typically expected to call cluster_pagein() to handle the labor of mapping and committing the UPL. - @param vp The vnode for which to page in data. - @param pl UPL describing pages needing to be paged in. - @param pl_offset Offset in UPL at which to start placing data. - @param f_offset Offset in file of data needing to be paged in. - @param size Amount of data to page in (in bytes). - @param flags UPL-style flags: UPL_IOSYNC, UPL_NOCOMMIT, UPL_NORDAHEAD, UPL_VNODE_PAGER, UPL_MSYNC. - Filesystems should generally leave it to the cluster layer to handle these flags. See the - memory_object_types.h header in the kernel framework if interested. - @param ctx Context to authenticate for pagein request. - @return 0 for success, or an error code. + * @function VNOP_PAGEIN + * @abstract Pull file data into memory. + * @discussion VNOP_PAGEIN() is called by when a process faults on data mapped from a file or + * when madvise() demands pre-fetching. It is conceptually somewhat similar to VNOP_READ(). Filesystems + * are typically expected to call cluster_pagein() to handle the labor of mapping and committing the UPL. + * @param vp The vnode for which to page in data. + * @param pl UPL describing pages needing to be paged in. + * @param pl_offset Offset in UPL at which to start placing data. + * @param f_offset Offset in file of data needing to be paged in. + * @param size Amount of data to page in (in bytes). + * @param flags UPL-style flags: UPL_IOSYNC, UPL_NOCOMMIT, UPL_NORDAHEAD, UPL_VNODE_PAGER, UPL_MSYNC. + * Filesystems should generally leave it to the cluster layer to handle these flags. See the + * memory_object_types.h header in the kernel framework if interested. + * @param ctx Context to authenticate for pagein request. + * @return 0 for success, or an error code. */ #ifdef XNU_KERNEL_PRIVATE -extern errno_t VNOP_PAGEIN(vnode_t, upl_t, upl_offset_t, off_t, size_t, int, vfs_context_t); +extern errno_t VNOP_PAGEIN(vnode_t, upl_t, upl_offset_t, off_t, size_t, int, vfs_context_t); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_pageout_args { @@ -1258,29 +1258,29 @@ struct vnop_pageout_args { }; /*! - @function VNOP_PAGEOUT - @abstract Write data from a mapped file back to disk. - @discussion VNOP_PAGEOUT() is called when data from a mapped file needs to be flushed to disk, either - because of an msync() call or due to memory pressure. Filesystems are for the most part expected to - just call cluster_pageout(). However, if they opt into the VFC_VFSVNOP_PAGEOUTV2 flag, then - they will be responsible for creating their own UPLs. - @param vp The vnode for which to page out data. - @param pl UPL describing pages needed to be paged out. If UPL is NULL, then it means the filesystem - has opted into VFC_VFSVNOP_PAGEOUTV2 semantics, which means that it will create and operate on its own UPLs - as opposed to relying on the one passed down into the filesystem. This means that the filesystem must be - responsible for N cluster_pageout calls for N dirty ranges in the UPL. - @param pl_offset Offset in UPL from which to start paging out data. Under the new VFC_VFSVNOP_PAGEOUTV2 - semantics, this is the offset in the range specified that must be paged out if the associated page is dirty. - @param f_offset Offset in file of data needing to be paged out. Under the new VFC_VFSVNOP_PAGEOUTV2 - semantics, this represents the offset in the file where we should start looking for dirty pages. - @param size Amount of data to page out (in bytes). Under VFC_VFSVNOP_PAGEOUTV2, this represents - the size of the range to be considered. The fileystem is free to extend or shrink the specified range - to better fit its blocking model as long as the page at 'pl_offset' is included. - @param flags UPL-style flags: UPL_IOSYNC, UPL_NOCOMMIT, UPL_NORDAHEAD, UPL_VNODE_PAGER, UPL_MSYNC. - Filesystems should generally leave it to the cluster layer to handle these flags. See the - memory_object_types.h header in the kernel framework if interested. - @param ctx Context to authenticate for pageout request. - @return 0 for success, or an error code. + * @function VNOP_PAGEOUT + * @abstract Write data from a mapped file back to disk. + * @discussion VNOP_PAGEOUT() is called when data from a mapped file needs to be flushed to disk, either + * because of an msync() call or due to memory pressure. Filesystems are for the most part expected to + * just call cluster_pageout(). However, if they opt into the VFC_VFSVNOP_PAGEOUTV2 flag, then + * they will be responsible for creating their own UPLs. + * @param vp The vnode for which to page out data. + * @param pl UPL describing pages needed to be paged out. If UPL is NULL, then it means the filesystem + * has opted into VFC_VFSVNOP_PAGEOUTV2 semantics, which means that it will create and operate on its own UPLs + * as opposed to relying on the one passed down into the filesystem. This means that the filesystem must be + * responsible for N cluster_pageout calls for N dirty ranges in the UPL. + * @param pl_offset Offset in UPL from which to start paging out data. Under the new VFC_VFSVNOP_PAGEOUTV2 + * semantics, this is the offset in the range specified that must be paged out if the associated page is dirty. + * @param f_offset Offset in file of data needing to be paged out. Under the new VFC_VFSVNOP_PAGEOUTV2 + * semantics, this represents the offset in the file where we should start looking for dirty pages. + * @param size Amount of data to page out (in bytes). Under VFC_VFSVNOP_PAGEOUTV2, this represents + * the size of the range to be considered. The fileystem is free to extend or shrink the specified range + * to better fit its blocking model as long as the page at 'pl_offset' is included. + * @param flags UPL-style flags: UPL_IOSYNC, UPL_NOCOMMIT, UPL_NORDAHEAD, UPL_VNODE_PAGER, UPL_MSYNC. + * Filesystems should generally leave it to the cluster layer to handle these flags. See the + * memory_object_types.h header in the kernel framework if interested. + * @param ctx Context to authenticate for pageout request. + * @return 0 for success, or an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_PAGEOUT(vnode_t, upl_t, upl_offset_t, off_t, size_t, int, vfs_context_t); @@ -1304,33 +1304,33 @@ struct vnop_searchfs_args { }; /* - @function VNOP_SEARCHFS - @abstract Search a filesystem quickly for files or directories that match the passed-in search criteria. - @discussion VNOP_SEARCHFS is a getattrlist-based system call which is implemented almost entirely inside - supported filesystems. Callers provide a set of criteria to match against, and the filesystem is responsible - for finding all files or directories that match the criteria. Once these files or directories are found, - the user-requested attributes of these files is provided as output. The set of searchable attributes is a - subset of the getattrlist attributes. For example, ATTR_CMN_UUID is not a valid searchable attribute as of - 10.6. A common usage scenario could be to request all files whose mod dates is greater than time X, less than - time Y, and provide the inode ID and filename of the matching objects as output. - @param vp The vnode representing the mountpoint of the filesystem to be searched. - @param a_searchparams1 If one-argument search criteria is requested, the search criteria would go here. However, - some search criteria, like ATTR_CMN_MODTIME, can be bounded. The user could request files modified between time X - and time Y. In this case, the lower bound goes in a_searchparams1. - @param a_searchparams2 If two-argument search criteria is requested, the upper bound goes in here. - @param a_searchattrs Contains the getattrlist-style attribute bits which are requested by the current search. - @param a_maxmatches The maximum number of matches to return in a single system call. - @param a_timelimit The suggested maximum amount of time we can spend in the kernel to service this system call. - Filesystems should use this as a guide only, and set their own internal maximum time to avoid denial of service. - @param a_returnattrs The getattrlist-style attributes to return for items in the filesystem that match the search - criteria above. - @param a_scriptcode Currently ignored. - @param a_uio The uio in which to write out the search matches. - @param a_searchstate Sometimes searches cannot be completed in a single system call. In this case, we provide - an identifier back to the user which indicates where to resume a previously-started search. This is an opaque structure - used by the filesystem to identify where to resume said search. - @param a_context The context in which to perform the filesystem search. - @return 0 on success, EAGAIN for searches which could not be completed in 1 call, and other ERRNOS as needed. + * @function VNOP_SEARCHFS + * @abstract Search a filesystem quickly for files or directories that match the passed-in search criteria. + * @discussion VNOP_SEARCHFS is a getattrlist-based system call which is implemented almost entirely inside + * supported filesystems. Callers provide a set of criteria to match against, and the filesystem is responsible + * for finding all files or directories that match the criteria. Once these files or directories are found, + * the user-requested attributes of these files is provided as output. The set of searchable attributes is a + * subset of the getattrlist attributes. For example, ATTR_CMN_UUID is not a valid searchable attribute as of + * 10.6. A common usage scenario could be to request all files whose mod dates is greater than time X, less than + * time Y, and provide the inode ID and filename of the matching objects as output. + * @param vp The vnode representing the mountpoint of the filesystem to be searched. + * @param a_searchparams1 If one-argument search criteria is requested, the search criteria would go here. However, + * some search criteria, like ATTR_CMN_MODTIME, can be bounded. The user could request files modified between time X + * and time Y. In this case, the lower bound goes in a_searchparams1. + * @param a_searchparams2 If two-argument search criteria is requested, the upper bound goes in here. + * @param a_searchattrs Contains the getattrlist-style attribute bits which are requested by the current search. + * @param a_maxmatches The maximum number of matches to return in a single system call. + * @param a_timelimit The suggested maximum amount of time we can spend in the kernel to service this system call. + * Filesystems should use this as a guide only, and set their own internal maximum time to avoid denial of service. + * @param a_returnattrs The getattrlist-style attributes to return for items in the filesystem that match the search + * criteria above. + * @param a_scriptcode Currently ignored. + * @param a_uio The uio in which to write out the search matches. + * @param a_searchstate Sometimes searches cannot be completed in a single system call. In this case, we provide + * an identifier back to the user which indicates where to resume a previously-started search. This is an opaque structure + * used by the filesystem to identify where to resume said search. + * @param a_context The context in which to perform the filesystem search. + * @return 0 on success, EAGAIN for searches which could not be completed in 1 call, and other ERRNOS as needed. */ #ifdef XNU_KERNEL_PRIVATE @@ -1367,32 +1367,32 @@ struct vnop_clonefile_args { struct vnode_attr *a_vap; uint32_t a_flags; vfs_context_t a_context; - int (*a_dir_clone_authorizer)( /* Authorization callback */ - struct vnode_attr *vap, /* attribute to be authorized */ - kauth_action_t action, /* action for which attribute is to be authorized */ - struct vnode_attr *dvap, /* target directory attributes */ - vnode_t sdvp, /* source directory vnode pointer (optional) */ - mount_t mp, /* mount point of filesystem */ - dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */ - uint32_t flags, /* needs to have the value passed to a_flags */ - vfs_context_t ctx, /* As passed to VNOP */ - void *reserved); /* Always NULL */ - void *a_reserved; /* Currently unused */ + int (*a_dir_clone_authorizer)( /* Authorization callback */ + struct vnode_attr *vap, /* attribute to be authorized */ + kauth_action_t action, /* action for which attribute is to be authorized */ + struct vnode_attr *dvap, /* target directory attributes */ + vnode_t sdvp, /* source directory vnode pointer (optional) */ + mount_t mp, /* mount point of filesystem */ + dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */ + uint32_t flags, /* needs to have the value passed to a_flags */ + vfs_context_t ctx, /* As passed to VNOP */ + void *reserved); /* Always NULL */ + void *a_reserved; /* Currently unused */ }; /*! - @function VNOP_CLONEFILE - @abstract Call down to a filesystem to clone a filesystem object (regular file, directory or symbolic link.) - @discussion If file creation succeeds, "vpp" should be returned with an iocount to be dropped by the caller. - @param dvp Directory in which to clone object. - @param vpp Destination for vnode for newly cloned object. - @param cnp Description of name of object to clone. - @param vap File creation properties, as seen in vnode_getattr(). Manipulated with VATTR_ISACTIVE, VATTR_RETURN, - VATTR_SET_SUPPORTED, and so forth. All attributes not set here should either be copied - from the source object - or set to values which are used for creating new filesystem objects - @param ctx Context against which to authenticate file creation. - @return 0 for success or a filesystem-specific error. + * @function VNOP_CLONEFILE + * @abstract Call down to a filesystem to clone a filesystem object (regular file, directory or symbolic link.) + * @discussion If file creation succeeds, "vpp" should be returned with an iocount to be dropped by the caller. + * @param dvp Directory in which to clone object. + * @param vpp Destination for vnode for newly cloned object. + * @param cnp Description of name of object to clone. + * @param vap File creation properties, as seen in vnode_getattr(). Manipulated with VATTR_ISACTIVE, VATTR_RETURN, + * VATTR_SET_SUPPORTED, and so forth. All attributes not set here should either be copied + * from the source object + * or set to values which are used for creating new filesystem objects + * @param ctx Context against which to authenticate file creation. + * @return 0 for success or a filesystem-specific error. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_CLONEFILE(vnode_t, vnode_t, vnode_t *, struct componentname *, struct vnode_attr *, uint32_t, vfs_context_t); @@ -1410,15 +1410,15 @@ struct vnop_getxattr_args { extern struct vnodeop_desc vnop_getxattr_desc; /*! - @function VNOP_GETXATTR - @abstract Get extended file attributes. - @param vp The vnode to get extended attributes for. - @param name Which property to extract. - @param uio Destination information for attribute value. - @param size Should be set to the amount of data written. - @param options XATTR_NOSECURITY: bypass security-checking. - @param ctx Context to authenticate for getxattr request. - @return 0 for success, or an error code. + * @function VNOP_GETXATTR + * @abstract Get extended file attributes. + * @param vp The vnode to get extended attributes for. + * @param name Which property to extract. + * @param uio Destination information for attribute value. + * @param size Should be set to the amount of data written. + * @param options XATTR_NOSECURITY: bypass security-checking. + * @param ctx Context to authenticate for getxattr request. + * @return 0 for success, or an error code. */ extern errno_t VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx); @@ -1433,15 +1433,15 @@ struct vnop_setxattr_args { extern struct vnodeop_desc vnop_setxattr_desc; /*! - @function VNOP_SETXATTR - @abstract Set extended file attributes. - @param vp The vnode to set extended attributes for. - @param name Which property to extract. - @param uio Source information for attribute value. - @param options XATTR_NOSECURITY: bypass security-checking. XATTR_CREATE: set value, fail if exists. - XATTR_REPLACE: set value, fail if does not exist. - @param ctx Context to authenticate for setxattr request. - @return 0 for success, or an error code. + * @function VNOP_SETXATTR + * @abstract Set extended file attributes. + * @param vp The vnode to set extended attributes for. + * @param name Which property to extract. + * @param uio Source information for attribute value. + * @param options XATTR_NOSECURITY: bypass security-checking. XATTR_CREATE: set value, fail if exists. + * XATTR_REPLACE: set value, fail if does not exist. + * @param ctx Context to authenticate for setxattr request. + * @return 0 for success, or an error code. */ extern errno_t VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx); @@ -1455,13 +1455,13 @@ struct vnop_removexattr_args { extern struct vnodeop_desc vnop_removexattr_desc; /*! - @function VNOP_REMOVEXATTR - @abstract Remove extended file attributes. - @param vp The vnode from which to remove extended attributes. - @param name Which attribute to delete. - @param options XATTR_NOSECURITY: bypass security-checking. - @param ctx Context to authenticate for attribute delete request. - @return 0 for success, or an error code. + * @function VNOP_REMOVEXATTR + * @abstract Remove extended file attributes. + * @param vp The vnode from which to remove extended attributes. + * @param name Which attribute to delete. + * @param options XATTR_NOSECURITY: bypass security-checking. + * @param ctx Context to authenticate for attribute delete request. + * @return 0 for success, or an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_REMOVEXATTR(vnode_t, const char *, int, vfs_context_t); @@ -1478,16 +1478,16 @@ struct vnop_listxattr_args { extern struct vnodeop_desc vnop_listxattr_desc; /*! - @function VNOP_LISTXATTR - @abstract List extended attribute keys. - @discussion Should write a sequence of unseparated, null-terminated extended-attribute - names into the space described by the provided uio. These keys can then be passed to - getxattr() (and VNOP_GETXATTR()). - @param vp The vnode for which to get extended attribute keys. - @param uio Description of target memory for attribute keys. - @param size Should be set to amount of data written to buffer. - @param options XATTR_NOSECURITY: bypass security checking. - @param ctx Context to authenticate for attribute name request. + * @function VNOP_LISTXATTR + * @abstract List extended attribute keys. + * @discussion Should write a sequence of unseparated, null-terminated extended-attribute + * names into the space described by the provided uio. These keys can then be passed to + * getxattr() (and VNOP_GETXATTR()). + * @param vp The vnode for which to get extended attribute keys. + * @param uio Description of target memory for attribute keys. + * @param size Should be set to amount of data written to buffer. + * @param options XATTR_NOSECURITY: bypass security checking. + * @param ctx Context to authenticate for attribute name request. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_LISTXATTR(vnode_t, uio_t, size_t *, int, vfs_context_t); @@ -1501,17 +1501,17 @@ struct vnop_blktooff_args { }; /*! - @function VNOP_BLKTOOFF - @abstract Call down to a filesystem to convert a logical block number to a file offset. - @discussion VNOP_BLKTOOFF() converts a logical block to a file offset in bytes. That offset - can be passed to VNOP_BLOCKMAP(), then, to get a physical block number--buf_strategy() does this. - @param vp The vnode for which to convert a logical block to an offset. - @param lblkno Logical block number to turn into offset. - @param offset Destination for file offset. - @return 0 for success, else an error code. + * @function VNOP_BLKTOOFF + * @abstract Call down to a filesystem to convert a logical block number to a file offset. + * @discussion VNOP_BLKTOOFF() converts a logical block to a file offset in bytes. That offset + * can be passed to VNOP_BLOCKMAP(), then, to get a physical block number--buf_strategy() does this. + * @param vp The vnode for which to convert a logical block to an offset. + * @param lblkno Logical block number to turn into offset. + * @param offset Destination for file offset. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE -extern errno_t VNOP_BLKTOOFF(vnode_t, daddr64_t, off_t *); +extern errno_t VNOP_BLKTOOFF(vnode_t, daddr64_t, off_t *); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_offtoblk_args { @@ -1522,15 +1522,15 @@ struct vnop_offtoblk_args { }; /*! - @function VNOP_OFFTOBLK - @abstract Call down to a filesystem to convert a file offset to a logical block number. - @param vp The vnode for which to convert an offset to a logical block number. - @param offset File offset to convert. - @param lblkno Destination for corresponding logical block number. - @return 0 for success, else an error code. + * @function VNOP_OFFTOBLK + * @abstract Call down to a filesystem to convert a file offset to a logical block number. + * @param vp The vnode for which to convert an offset to a logical block number. + * @param offset File offset to convert. + * @param lblkno Destination for corresponding logical block number. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE -extern errno_t VNOP_OFFTOBLK(vnode_t, off_t, daddr64_t *); +extern errno_t VNOP_OFFTOBLK(vnode_t, off_t, daddr64_t *); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_blockmap_args { @@ -1546,24 +1546,24 @@ struct vnop_blockmap_args { }; /*! - @function VNOP_BLOCKMAP - @abstract Call down to a filesystem to get information about the on-disk layout of a file region. - @discussion VNOP_BLOCKMAP() returns the information required to pass a request for a contiguous region - down to a device's strategy routine. - @param vp The vnode for which to get on-disk information. - @param foffset Offset (in bytes) at which region starts. - @param size Size of region. - @param bpn Destination for physical block number at which region begins on disk. - @param run Destination for number of bytes which can be found contiguously on-disk before - first discontinuity. - @param poff Currently unused. - @param flags VNODE_READ: request is for a read. VNODE_WRITE: request is for a write. - @param ctx Context to authenticate for blockmap request; currently often set to NULL. - @return 0 for success, else an error code. + * @function VNOP_BLOCKMAP + * @abstract Call down to a filesystem to get information about the on-disk layout of a file region. + * @discussion VNOP_BLOCKMAP() returns the information required to pass a request for a contiguous region + * down to a device's strategy routine. + * @param vp The vnode for which to get on-disk information. + * @param foffset Offset (in bytes) at which region starts. + * @param size Size of region. + * @param bpn Destination for physical block number at which region begins on disk. + * @param run Destination for number of bytes which can be found contiguously on-disk before + * first discontinuity. + * @param poff Currently unused. + * @param flags VNODE_READ: request is for a read. VNODE_WRITE: request is for a write. + * @param ctx Context to authenticate for blockmap request; currently often set to NULL. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_BLOCKMAP(vnode_t, off_t, size_t, daddr64_t *, size_t *, void *, - int, vfs_context_t); + int, vfs_context_t); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_strategy_args { @@ -1572,13 +1572,13 @@ struct vnop_strategy_args { }; /*! - @function VNOP_STRATEGY - @abstract Initiate I/O on a file (both read and write). - @discussion A filesystem strategy routine takes a buffer, performs whatever manipulations are necessary for passing - the I/O request down to the device layer, and calls the appropriate device's strategy routine. Most filesystems should - just call buf_strategy() with "bp" as the argument. - @param bp Complete specificiation of requested I/O: region of data involved, whether request is for read or write, and so on. - @return 0 for success, else an error code. + * @function VNOP_STRATEGY + * @abstract Initiate I/O on a file (both read and write). + * @discussion A filesystem strategy routine takes a buffer, performs whatever manipulations are necessary for passing + * the I/O request down to the device layer, and calls the appropriate device's strategy routine. Most filesystems should + * just call buf_strategy() with "bp" as the argument. + * @param bp Complete specificiation of requested I/O: region of data involved, whether request is for read or write, and so on. + * @return 0 for success, else an error code. */ extern errno_t VNOP_STRATEGY(struct buf *bp); @@ -1588,13 +1588,13 @@ struct vnop_bwrite_args { }; /*! - @function VNOP_BWRITE - @abstract Write a buffer to backing store. - @discussion VNOP_BWRITE() is called by buf_bawrite() (asynchronous write) and potentially by buf_bdwrite() (delayed write) - but not by buf_bwrite(). A filesystem may choose to perform some kind of manipulation of the buffer in this routine; it - generally will end up calling VFS's default implementation, vn_bwrite() (which calls buf_bwrite() without further ado). - @param bp The buffer to write. - @return 0 for success, else an error code. + * @function VNOP_BWRITE + * @abstract Write a buffer to backing store. + * @discussion VNOP_BWRITE() is called by buf_bawrite() (asynchronous write) and potentially by buf_bdwrite() (delayed write) + * but not by buf_bwrite(). A filesystem may choose to perform some kind of manipulation of the buffer in this routine; it + * generally will end up calling VFS's default implementation, vn_bwrite() (which calls buf_bwrite() without further ado). + * @param bp The buffer to write. + * @return 0 for success, else an error code. */ extern errno_t VNOP_BWRITE(buf_t bp); @@ -1607,7 +1607,7 @@ struct vnop_kqfilt_add_args { extern struct vnodeop_desc vnop_kqfilt_add_desc; #ifdef XNU_KERNEL_PRIVATE -extern errno_t VNOP_KQFILT_ADD(vnode_t , struct knote *, vfs_context_t); +extern errno_t VNOP_KQFILT_ADD(vnode_t, struct knote *, vfs_context_t); #endif /* XNU_KERNEL_PRIVATE */ struct vnop_kqfilt_remove_args { @@ -1619,14 +1619,14 @@ struct vnop_kqfilt_remove_args { extern struct vnodeop_desc vnop_kqfilt_remove_desc; #ifdef XNU_KERNEL_PRIVATE -errno_t VNOP_KQFILT_REMOVE(vnode_t , uintptr_t , vfs_context_t); +errno_t VNOP_KQFILT_REMOVE(vnode_t, uintptr_t, vfs_context_t); #endif /* XNU_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE -#define VNODE_MONITOR_BEGIN 0x01 -#define VNODE_MONITOR_END 0x02 -#define VNODE_MONITOR_UPDATE 0x04 +#define VNODE_MONITOR_BEGIN 0x01 +#define VNODE_MONITOR_END 0x02 +#define VNODE_MONITOR_UPDATE 0x04 struct vnop_monitor_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -1640,26 +1640,26 @@ extern struct vnodeop_desc vnop_monitor_desc; #ifdef XNU_KERNEL_PRIVATE /*! - @function VNOP_MONITOR - @abstract Indicate to a filesystem that the number of watchers of a file has changed. - @param vp The vnode whose watch state has changed. - @param events Unused. Filesystems can ignore this parameter. - @param flags Type of change to the watch state. VNODE_MONITOR_BEGIN is passed when the kernel - begins tracking a new watcher of a file. VNODE_MONITOR_END is passed when a watcher stops watching a file. - VNODE_MONITOR_UPDATE is currently unused. A filesystem is guaranteed that each VNODE_MONITOR_BEGIN - will be matched by a VNODE_MONITOR_END with the same "handle" argument. - @param handle Unique identifier for a given watcher. A VNODE_MONITOR_BEGIN for a given handle will be matched with a - VNODE_MONITOR_END for the same handle; a filesystem need not consider this parameter unless - it for some reason wants be able to match specific VNOP_MONITOR calls rather than just keeping - a count. - @param ctx The context which is starting to monitor a file or ending a watch on a file. A matching - pair of VNODE_MONITOR_BEGIN and VNODE_MONITOR_END need not have the same context. - @discussion VNOP_MONITOR() is intended to let networked filesystems know when they should bother - listening for changes to files which occur remotely, so that they can post notifications using - vnode_notify(). Local filesystems should not implement a monitor vnop. - It is called when there is a new watcher for a file or when a watcher for a file goes away. - Each BEGIN will be matched with an END with the same handle. Note that vnode_ismonitored() can - be used to see if there are currently watchers for a file. + * @function VNOP_MONITOR + * @abstract Indicate to a filesystem that the number of watchers of a file has changed. + * @param vp The vnode whose watch state has changed. + * @param events Unused. Filesystems can ignore this parameter. + * @param flags Type of change to the watch state. VNODE_MONITOR_BEGIN is passed when the kernel + * begins tracking a new watcher of a file. VNODE_MONITOR_END is passed when a watcher stops watching a file. + * VNODE_MONITOR_UPDATE is currently unused. A filesystem is guaranteed that each VNODE_MONITOR_BEGIN + * will be matched by a VNODE_MONITOR_END with the same "handle" argument. + * @param handle Unique identifier for a given watcher. A VNODE_MONITOR_BEGIN for a given handle will be matched with a + * VNODE_MONITOR_END for the same handle; a filesystem need not consider this parameter unless + * it for some reason wants be able to match specific VNOP_MONITOR calls rather than just keeping + * a count. + * @param ctx The context which is starting to monitor a file or ending a watch on a file. A matching + * pair of VNODE_MONITOR_BEGIN and VNODE_MONITOR_END need not have the same context. + * @discussion VNOP_MONITOR() is intended to let networked filesystems know when they should bother + * listening for changes to files which occur remotely, so that they can post notifications using + * vnode_notify(). Local filesystems should not implement a monitor vnop. + * It is called when there is a new watcher for a file or when a watcher for a file goes away. + * Each BEGIN will be matched with an END with the same handle. Note that vnode_ismonitored() can + * be used to see if there are currently watchers for a file. */ errno_t VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx); #endif /* XNU_KERNEL_PRIVATE */ @@ -1674,12 +1674,12 @@ struct vnop_setlabel_args { extern struct vnodeop_desc vnop_setlabel_desc; /*! - @function VNOP_SETLABEL - @abstract Associate a MACF label with a file. - @param vp The vnode to label. - @param label The desired label. - @param ctx Context to authenticate for label change. - @return 0 for success, else an error code. + * @function VNOP_SETLABEL + * @abstract Associate a MACF label with a file. + * @param vp The vnode to label. + * @param label The desired label. + * @param ctx Context to authenticate for label change. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE errno_t VNOP_SETLABEL(vnode_t, struct label *, vfs_context_t); @@ -1689,7 +1689,7 @@ errno_t VNOP_SETLABEL(vnode_t, struct label *, vfs_context_t); #if NAMEDSTREAMS -enum nsoperation { NS_OPEN, NS_CREATE, NS_DELETE }; +enum nsoperation { NS_OPEN, NS_CREATE, NS_DELETE }; /* a_flags for vnop_getnamedstream_args: */ #define NS_GETRAWENCRYPTED 0x00000001 @@ -1705,20 +1705,20 @@ struct vnop_getnamedstream_args { }; /*! - @function VNOP_GETNAMEDSTREAM - @abstract Get a named stream associated with a file. - @discussion If this call sucecss, svpp should be returned with an iocount which the caller - will drop. VFS provides a facility for simulating named streams when interacting with filesystems - which do not support them. - @param vp The vnode for which to get a named stream. - @param svpp Destination for pointer to named stream's vnode. - @param name The name of the named stream, e.g. "com.apple.ResourceFork". - @param operation Operation to perform. In HFS and AFP, this parameter is only considered as follows: - if the resource fork has not been opened and the operation is not NS_OPEN, fail with ENOATTR. Currently - only passed as NS_OPEN by VFS. - @param flags Flags used to control getnamedstream behavior. Currently only used for raw-encrypted-requests. - @param ctx Context to authenticate for getting named stream. - @return 0 for success, else an error code. + * @function VNOP_GETNAMEDSTREAM + * @abstract Get a named stream associated with a file. + * @discussion If this call sucecss, svpp should be returned with an iocount which the caller + * will drop. VFS provides a facility for simulating named streams when interacting with filesystems + * which do not support them. + * @param vp The vnode for which to get a named stream. + * @param svpp Destination for pointer to named stream's vnode. + * @param name The name of the named stream, e.g. "com.apple.ResourceFork". + * @param operation Operation to perform. In HFS and AFP, this parameter is only considered as follows: + * if the resource fork has not been opened and the operation is not NS_OPEN, fail with ENOATTR. Currently + * only passed as NS_OPEN by VFS. + * @param flags Flags used to control getnamedstream behavior. Currently only used for raw-encrypted-requests. + * @param ctx Context to authenticate for getting named stream. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_GETNAMEDSTREAM(vnode_t, vnode_t *, const char *, enum nsoperation, int flags, vfs_context_t); @@ -1734,17 +1734,17 @@ struct vnop_makenamedstream_args { }; /*! - @function VNOP_MAKENAMEDSTREAM - @abstract Create a named stream associated with a file. - @discussion If this call succeeds, svpp should be returned with an iocount which the caller will drop. - VFS provides a facility for simulating named streams when interacting with filesystems - which do not support them. - @param vp The vnode for which to get a named stream. - @param svpp Destination for pointer to named stream's vnode. - @param name The name of the named stream, e.g. "com.apple.ResourceFork". - @param flags Currently unused. - @param ctx Context to authenticate creating named stream. - @return 0 for success, else an error code. + * @function VNOP_MAKENAMEDSTREAM + * @abstract Create a named stream associated with a file. + * @discussion If this call succeeds, svpp should be returned with an iocount which the caller will drop. + * VFS provides a facility for simulating named streams when interacting with filesystems + * which do not support them. + * @param vp The vnode for which to get a named stream. + * @param svpp Destination for pointer to named stream's vnode. + * @param name The name of the named stream, e.g. "com.apple.ResourceFork". + * @param flags Currently unused. + * @param ctx Context to authenticate creating named stream. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_MAKENAMEDSTREAM(vnode_t, vnode_t *, const char *, int flags, vfs_context_t); @@ -1760,16 +1760,16 @@ struct vnop_removenamedstream_args { }; /*! - @function VNOP_REMOVENAMEDSTREAM - @abstract Delete a named stream associated with a file. - @discussion VFS provides a facility for simulating named streams when interacting with filesystems - which do not support them. - @param vp The vnode to which the named stream belongs. - @param svp The named stream's vnode. - @param name The name of the named stream, e.g. "com.apple.ResourceFork". - @param flags Currently unused. - @param ctx Context to authenticate deleting named stream. - @return 0 for success, else an error code. + * @function VNOP_REMOVENAMEDSTREAM + * @abstract Delete a named stream associated with a file. + * @discussion VFS provides a facility for simulating named streams when interacting with filesystems + * which do not support them. + * @param vp The vnode to which the named stream belongs. + * @param svp The named stream's vnode. + * @param name The name of the named stream, e.g. "com.apple.ResourceFork". + * @param flags Currently unused. + * @param ctx Context to authenticate deleting named stream. + * @return 0 for success, else an error code. */ #ifdef XNU_KERNEL_PRIVATE extern errno_t VNOP_REMOVENAMEDSTREAM(vnode_t, vnode_t, const char *, int flags, vfs_context_t); diff --git a/bsd/sys/vnode_internal.h b/bsd/sys/vnode_internal.h index b34dbc110..bde95e48c 100644 --- a/bsd/sys/vnode_internal.h +++ b/bsd/sys/vnode_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -98,21 +98,21 @@ LIST_HEAD(buflists, buf); * VFS Internal (private) trigger vnode resolver info. */ struct vnode_resolve { - lck_mtx_t vr_lock; /* protects vnode_resolve_t fields */ - trigger_vnode_resolve_callback_t vr_resolve_func; - trigger_vnode_unresolve_callback_t vr_unresolve_func; - trigger_vnode_rearm_callback_t vr_rearm_func; - trigger_vnode_reclaim_callback_t vr_reclaim_func; - void * vr_data; /* private data for resolver */ - uint32_t vr_flags; - uint32_t vr_lastseq; + lck_mtx_t vr_lock; /* protects vnode_resolve_t fields */ + trigger_vnode_resolve_callback_t vr_resolve_func; + trigger_vnode_unresolve_callback_t vr_unresolve_func; + trigger_vnode_rearm_callback_t vr_rearm_func; + trigger_vnode_reclaim_callback_t vr_reclaim_func; + void * vr_data; /* private data for resolver */ + uint32_t vr_flags; + uint32_t vr_lastseq; }; typedef struct vnode_resolve *vnode_resolve_t; /* private vr_flags */ #define VNT_RESOLVED (1UL << 31) #define VNT_VFS_UNMOUNTED (1UL << 30) -#define VNT_EXTERNAL (1UL << 29) +#define VNT_EXTERNAL (1UL << 29) #endif /* CONFIG_TRIGGERS */ @@ -125,146 +125,146 @@ typedef struct vnode_resolve *vnode_resolve_t; * the rest of the structure is protected by the vnode_lock */ struct vnode { - lck_mtx_t v_lock; /* vnode mutex */ - TAILQ_ENTRY(vnode) v_freelist; /* vnode freelist */ - TAILQ_ENTRY(vnode) v_mntvnodes; /* vnodes for mount point */ - TAILQ_HEAD(, namecache) v_ncchildren; /* name cache entries that regard us as their parent */ - LIST_HEAD(, namecache) v_nclinks; /* name cache entries that name this vnode */ - vnode_t v_defer_reclaimlist; /* in case we have to defer the reclaim to avoid recursion */ - uint32_t v_listflag; /* flags protected by the vnode_list_lock (see below) */ - uint32_t v_flag; /* vnode flags (see below) */ - uint16_t v_lflag; /* vnode local and named ref flags */ - uint8_t v_iterblkflags; /* buf iterator flags */ - uint8_t v_references; /* number of times io_count has been granted */ - int32_t v_kusecount; /* count of in-kernel refs */ - int32_t v_usecount; /* reference count of users */ - int32_t v_iocount; /* iocounters */ - void * v_owner; /* act that owns the vnode */ - uint16_t v_type; /* vnode type */ - uint16_t v_tag; /* type of underlying data */ - uint32_t v_id; /* identity of vnode contents */ + lck_mtx_t v_lock; /* vnode mutex */ + TAILQ_ENTRY(vnode) v_freelist; /* vnode freelist */ + TAILQ_ENTRY(vnode) v_mntvnodes; /* vnodes for mount point */ + TAILQ_HEAD(, namecache) v_ncchildren; /* name cache entries that regard us as their parent */ + LIST_HEAD(, namecache) v_nclinks; /* name cache entries that name this vnode */ + vnode_t v_defer_reclaimlist; /* in case we have to defer the reclaim to avoid recursion */ + uint32_t v_listflag; /* flags protected by the vnode_list_lock (see below) */ + uint32_t v_flag; /* vnode flags (see below) */ + uint16_t v_lflag; /* vnode local and named ref flags */ + uint8_t v_iterblkflags; /* buf iterator flags */ + uint8_t v_references; /* number of times io_count has been granted */ + int32_t v_kusecount; /* count of in-kernel refs */ + int32_t v_usecount; /* reference count of users */ + int32_t v_iocount; /* iocounters */ + void * v_owner; /* act that owns the vnode */ + uint16_t v_type; /* vnode type */ + uint16_t v_tag; /* type of underlying data */ + uint32_t v_id; /* identity of vnode contents */ union { - struct mount *vu_mountedhere;/* ptr to mounted vfs (VDIR) */ - struct socket *vu_socket; /* unix ipc (VSOCK) */ - struct specinfo *vu_specinfo; /* device (VCHR, VBLK) */ - struct fifoinfo *vu_fifoinfo; /* fifo (VFIFO) */ - struct ubc_info *vu_ubcinfo; /* valid for (VREG) */ + struct mount *vu_mountedhere;/* ptr to mounted vfs (VDIR) */ + struct socket *vu_socket; /* unix ipc (VSOCK) */ + struct specinfo *vu_specinfo; /* device (VCHR, VBLK) */ + struct fifoinfo *vu_fifoinfo; /* fifo (VFIFO) */ + struct ubc_info *vu_ubcinfo; /* valid for (VREG) */ } v_un; - struct buflists v_cleanblkhd; /* clean blocklist head */ - struct buflists v_dirtyblkhd; /* dirty blocklist head */ - struct klist v_knotes; /* knotes attached to this vnode */ - /* + struct buflists v_cleanblkhd; /* clean blocklist head */ + struct buflists v_dirtyblkhd; /* dirty blocklist head */ + struct klist v_knotes; /* knotes attached to this vnode */ + /* * the following 4 fields are protected - * by the name_cache_lock held in + * by the name_cache_lock held in * excluive mode */ - kauth_cred_t v_cred; /* last authorized credential */ - kauth_action_t v_authorized_actions; /* current authorized actions for v_cred */ - int v_cred_timestamp; /* determine if entry is stale for MNTK_AUTH_OPAQUE */ - int v_nc_generation; /* changes when nodes are removed from the name cache */ - /* + kauth_cred_t v_cred; /* last authorized credential */ + kauth_action_t v_authorized_actions; /* current authorized actions for v_cred */ + int v_cred_timestamp; /* determine if entry is stale for MNTK_AUTH_OPAQUE */ + int v_nc_generation; /* changes when nodes are removed from the name cache */ + /* * back to the vnode lock for protection */ - int32_t v_numoutput; /* num of writes in progress */ - int32_t v_writecount; /* reference count of writers */ - const char *v_name; /* name component of the vnode */ - vnode_t v_parent; /* pointer to parent vnode */ - struct lockf *v_lockf; /* advisory lock list head */ - int (**v_op)(void *); /* vnode operations vector */ - mount_t v_mount; /* ptr to vfs we are in */ - void * v_data; /* private data for fs */ + int32_t v_numoutput; /* num of writes in progress */ + int32_t v_writecount; /* reference count of writers */ + const char *v_name; /* name component of the vnode */ + vnode_t v_parent; /* pointer to parent vnode */ + struct lockf *v_lockf; /* advisory lock list head */ + int(**v_op)(void *); /* vnode operations vector */ + mount_t v_mount; /* ptr to vfs we are in */ + void * v_data; /* private data for fs */ #if CONFIG_MACF - struct label *v_label; /* MAC security label */ + struct label *v_label; /* MAC security label */ #endif #if CONFIG_TRIGGERS - vnode_resolve_t v_resolve; /* trigger vnode resolve info (VDIR only) */ + vnode_resolve_t v_resolve; /* trigger vnode resolve info (VDIR only) */ #endif /* CONFIG_TRIGGERS */ }; -#define v_mountedhere v_un.vu_mountedhere -#define v_socket v_un.vu_socket -#define v_specinfo v_un.vu_specinfo -#define v_fifoinfo v_un.vu_fifoinfo -#define v_ubcinfo v_un.vu_ubcinfo +#define v_mountedhere v_un.vu_mountedhere +#define v_socket v_un.vu_socket +#define v_specinfo v_un.vu_specinfo +#define v_fifoinfo v_un.vu_fifoinfo +#define v_ubcinfo v_un.vu_ubcinfo /* * v_iterblkflags */ -#define VBI_ITER 0x1 -#define VBI_ITERWANT 0x2 -#define VBI_CLEAN 0x4 -#define VBI_DIRTY 0x8 -#define VBI_NEWBUF 0x10 +#define VBI_ITER 0x1 +#define VBI_ITERWANT 0x2 +#define VBI_CLEAN 0x4 +#define VBI_DIRTY 0x8 +#define VBI_NEWBUF 0x10 /* * v_listflag */ -#define VLIST_RAGE 0x01 /* vnode is currently in the rapid age list */ -#define VLIST_DEAD 0x02 /* vnode is currently in the dead list */ -#define VLIST_ASYNC_WORK 0x04 /* vnode is currently on the deferred async work queue */ +#define VLIST_RAGE 0x01 /* vnode is currently in the rapid age list */ +#define VLIST_DEAD 0x02 /* vnode is currently in the dead list */ +#define VLIST_ASYNC_WORK 0x04 /* vnode is currently on the deferred async work queue */ /* * v_lflags */ -#define VL_SUSPENDED 0x0001 /* vnode is suspended */ -#define VL_DRAIN 0x0002 /* vnode is being drained */ -#define VL_TERMINATE 0x0004 /* vnode is in the process of being recycled */ -#define VL_TERMWANT 0x0008 /* there's a waiter for recycle finish (vnode_getiocount)*/ -#define VL_DEAD 0x0010 /* vnode is dead, cleaned of filesystem-specific info */ -#define VL_MARKTERM 0x0020 /* vnode should be recycled when no longer referenced */ -#define VL_NEEDINACTIVE 0x0080 /* delay VNOP_INACTIVE until iocount goes to 0 */ - -#define VL_LABEL 0x0100 /* vnode is marked for labeling */ -#define VL_LABELWAIT 0x0200 /* vnode is marked for labeling */ -#define VL_LABELED 0x0400 /* vnode is labeled */ -#define VL_LWARNED 0x0800 -#define VL_HASSTREAMS 0x1000 /* vnode has had at least one associated named stream vnode (may not have one any longer) */ - -#define VNAMED_UBC 0x2000 /* ubc named reference */ -#define VNAMED_MOUNT 0x4000 /* mount point named reference */ -#define VNAMED_FSHASH 0x8000 /* FS hash named reference */ +#define VL_SUSPENDED 0x0001 /* vnode is suspended */ +#define VL_DRAIN 0x0002 /* vnode is being drained */ +#define VL_TERMINATE 0x0004 /* vnode is in the process of being recycled */ +#define VL_TERMWANT 0x0008 /* there's a waiter for recycle finish (vnode_getiocount)*/ +#define VL_DEAD 0x0010 /* vnode is dead, cleaned of filesystem-specific info */ +#define VL_MARKTERM 0x0020 /* vnode should be recycled when no longer referenced */ +#define VL_NEEDINACTIVE 0x0080 /* delay VNOP_INACTIVE until iocount goes to 0 */ + +#define VL_LABEL 0x0100 /* vnode is marked for labeling */ +#define VL_LABELWAIT 0x0200 /* vnode is marked for labeling */ +#define VL_LABELED 0x0400 /* vnode is labeled */ +#define VL_LWARNED 0x0800 +#define VL_HASSTREAMS 0x1000 /* vnode has had at least one associated named stream vnode (may not have one any longer) */ + +#define VNAMED_UBC 0x2000 /* ubc named reference */ +#define VNAMED_MOUNT 0x4000 /* mount point named reference */ +#define VNAMED_FSHASH 0x8000 /* FS hash named reference */ /* * v_flags */ -#define VROOT 0x000001 /* root of its file system */ -#define VTEXT 0x000002 /* vnode is a pure text prototype */ -#define VSYSTEM 0x000004 /* vnode being used by kernel */ -#define VISTTY 0x000008 /* vnode represents a tty */ -#define VRAGE 0x000010 /* vnode is in rapid age state */ -#define VBDEVVP 0x000020 /* vnode created by bdevvp */ -#define VDEVFLUSH 0x000040 /* device vnode after vflush */ -#define VMOUNT 0x000080 /* mount operation in progress */ -#define VBWAIT 0x000100 /* waiting for output to complete */ -#define VSHARED_DYLD 0x000200 /* vnode is a dyld shared cache file */ -#define VNOCACHE_DATA 0x000400 /* don't keep data cached once it's been consumed */ -#define VSTANDARD 0x000800 /* vnode obtained from common pool */ -#define VAGE 0x001000 /* Insert vnode at head of free list */ -#define VRAOFF 0x002000 /* read ahead disabled */ -#define VNCACHEABLE 0x004000 /* vnode is allowed to be put back in name cache */ +#define VROOT 0x000001 /* root of its file system */ +#define VTEXT 0x000002 /* vnode is a pure text prototype */ +#define VSYSTEM 0x000004 /* vnode being used by kernel */ +#define VISTTY 0x000008 /* vnode represents a tty */ +#define VRAGE 0x000010 /* vnode is in rapid age state */ +#define VBDEVVP 0x000020 /* vnode created by bdevvp */ +#define VDEVFLUSH 0x000040 /* device vnode after vflush */ +#define VMOUNT 0x000080 /* mount operation in progress */ +#define VBWAIT 0x000100 /* waiting for output to complete */ +#define VSHARED_DYLD 0x000200 /* vnode is a dyld shared cache file */ +#define VNOCACHE_DATA 0x000400 /* don't keep data cached once it's been consumed */ +#define VSTANDARD 0x000800 /* vnode obtained from common pool */ +#define VAGE 0x001000 /* Insert vnode at head of free list */ +#define VRAOFF 0x002000 /* read ahead disabled */ +#define VNCACHEABLE 0x004000 /* vnode is allowed to be put back in name cache */ #if NAMEDSTREAMS -#define VISSHADOW 0x008000 /* vnode is a shadow file */ +#define VISSHADOW 0x008000 /* vnode is a shadow file */ #endif -#define VSWAP 0x010000 /* vnode is being used as swapfile */ -#define VTHROTTLED 0x020000 /* writes or pageouts have been throttled */ - /* wakeup tasks waiting when count falls below threshold */ -#define VNOFLUSH 0x040000 /* don't vflush() if SKIPSYSTEM */ -#define VLOCKLOCAL 0x080000 /* this vnode does adv locking in vfs */ -#define VISHARDLINK 0x100000 /* hard link needs special processing on lookup and in volfs */ -#define VISUNION 0x200000 /* union special processing */ -#define VISNAMEDSTREAM 0x400000 /* vnode is a named stream (eg HFS resource fork) */ +#define VSWAP 0x010000 /* vnode is being used as swapfile */ +#define VTHROTTLED 0x020000 /* writes or pageouts have been throttled */ +/* wakeup tasks waiting when count falls below threshold */ +#define VNOFLUSH 0x040000 /* don't vflush() if SKIPSYSTEM */ +#define VLOCKLOCAL 0x080000 /* this vnode does adv locking in vfs */ +#define VISHARDLINK 0x100000 /* hard link needs special processing on lookup and in volfs */ +#define VISUNION 0x200000 /* union special processing */ +#define VISNAMEDSTREAM 0x400000 /* vnode is a named stream (eg HFS resource fork) */ #define VOPENEVT 0x800000 /* if process is P_CHECKOPENEVT, then or in the O_EVTONLY flag on open */ #define VNEEDSSNAPSHOT 0x1000000 -#define VNOCS 0x2000000 /* is there no code signature available */ -#define VISDIRTY 0x4000000 /* vnode will need IO if reclaimed */ +#define VNOCS 0x2000000 /* is there no code signature available */ +#define VISDIRTY 0x4000000 /* vnode will need IO if reclaimed */ #define VFASTDEVCANDIDATE 0x8000000 /* vnode is a candidate to store on a fast device */ #define VAUTOCANDIDATE 0x10000000 /* vnode was automatically marked as a fast-dev candidate */ /* - 0x20000000 not used - 0x40000000 not used - 0x80000000 not used. -*/ + * 0x20000000 not used + * 0x40000000 not used + * 0x80000000 not used. + */ /* * This structure describes vnode data which is specific to a file descriptor. @@ -289,9 +289,9 @@ struct fd_vn_data { * That may not be enough for some filesytems so the current algorithm works its * way upto FV_DIRBUF_MAX_SIZ */ -#define FV_DIRBUF_DIRENTRY_SIZ (sizeof(struct direntry)) -#define FV_DIRBUF_START_SIZ FV_DIRBUF_DIRENTRY_SIZ -#define FV_DIRBUF_MAX_SIZ (4*(sizeof(struct direntry))) +#define FV_DIRBUF_DIRENTRY_SIZ (sizeof(struct direntry)) +#define FV_DIRBUF_START_SIZ FV_DIRBUF_DIRENTRY_SIZ +#define FV_DIRBUF_MAX_SIZ (4*(sizeof(struct direntry))) #define FV_LOCK(fvd) lck_mtx_lock(&(((struct fd_vn_data *)fvd)->fv_lock)) #define FV_UNLOCK(fvd) lck_mtx_unlock(&(((struct fd_vn_data *)fvd)->fv_lock)) @@ -299,11 +299,11 @@ struct fd_vn_data { /* * Global vnode data. */ -extern struct vnode *rootvnode; /* root (i.e. "/") vnode */ +extern struct vnode *rootvnode; /* root (i.e. "/") vnode */ #ifdef CONFIG_IMGSRC_ACCESS -#define MAX_IMAGEBOOT_NESTING 2 -extern struct vnode *imgsrc_rootvnodes[]; +#define MAX_IMAGEBOOT_NESTING 2 +extern struct vnode *imgsrc_rootvnodes[]; #endif /* CONFIG_IMGSRC_ACCESS */ @@ -314,16 +314,16 @@ extern struct vnode *imgsrc_rootvnodes[]; /* * Flags for vdesc_flags: */ -#define VDESC_MAX_VPS 16 +#define VDESC_MAX_VPS 16 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */ -#define VDESC_VP0_WILLRELE 0x00001 -#define VDESC_VP1_WILLRELE 0x00002 -#define VDESC_VP2_WILLRELE 0x00004 -#define VDESC_VP3_WILLRELE 0x00008 -#define VDESC_NOMAP_VPP 0x00100 -#define VDESC_VPP_WILLRELE 0x00200 +#define VDESC_VP0_WILLRELE 0x00001 +#define VDESC_VP1_WILLRELE 0x00002 +#define VDESC_VP2_WILLRELE 0x00004 +#define VDESC_VP3_WILLRELE 0x00008 +#define VDESC_NOMAP_VPP 0x00100 +#define VDESC_VPP_WILLRELE 0x00200 -#define VDESC_DISABLED 0x10000 /* descriptor defined but op is unused, has no op slot */ +#define VDESC_DISABLED 0x10000 /* descriptor defined but op is unused, has no op slot */ /* * VDESC_NO_OFFSET is used to identify the end of the offset list @@ -335,9 +335,9 @@ extern struct vnode *imgsrc_rootvnodes[]; * This structure describes the vnode operation taking place. */ struct vnodeop_desc { - int vdesc_offset; /* offset in vector--first for speed */ - const char *vdesc_name; /* a readable name for debugging */ - int vdesc_flags; /* VDESC_* flags */ + int vdesc_offset; /* offset in vector--first for speed */ + const char *vdesc_name; /* a readable name for debugging */ + int vdesc_flags; /* VDESC_* flags */ /* * These ops are used by bypass routines to map and locate arguments. @@ -345,18 +345,18 @@ struct vnodeop_desc { * they are useful to (for example) transport layers. * Nameidata is useful because it has a cred in it. */ - int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */ - int vdesc_vpp_offset; /* return vpp location */ - int vdesc_cred_offset; /* cred location, if any */ - int vdesc_proc_offset; /* proc location, if any */ - int vdesc_componentname_offset; /* if any */ - int vdesc_context_offset; /* context location, if any */ + int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */ + int vdesc_vpp_offset; /* return vpp location */ + int vdesc_cred_offset; /* cred location, if any */ + int vdesc_proc_offset; /* proc location, if any */ + int vdesc_componentname_offset; /* if any */ + int vdesc_context_offset; /* context location, if any */ /* * Finally, we've got a list of private data (about each operation) * for each transport layer. (Support to manage this list is not * yet part of BSD.) */ - caddr_t *vdesc_transports; + caddr_t *vdesc_transports; }; /* @@ -372,11 +372,11 @@ extern struct vnodeop_desc *vnodeop_descs[]; * Crays, so if you decide to port this to such a serious machine, * you might want to consult Intrisics.h's XtOffset{,Of,To}. */ -#define VOPARG_OFFSET(p_type,field) \ - ((int) (((char *) (&(((p_type)NULL)->field))) - ((char *) NULL))) -#define VOPARG_OFFSETOF(s_type,field) \ +#define VOPARG_OFFSET(p_type, field) \ + ((int) (((char *) (&(((p_type)NULL)->field))) - ((char *) NULL))) +#define VOPARG_OFFSETOF(s_type, field) \ VOPARG_OFFSET(s_type*,field) -#define VOPARG_OFFSETTO(S_TYPE,S_OFFSET,STRUCT_P) \ +#define VOPARG_OFFSETTO(S_TYPE, S_OFFSET, STRUCT_P) \ ((S_TYPE)(((char*)(STRUCT_P))+(S_OFFSET))) @@ -386,66 +386,66 @@ extern struct vnodeop_desc *vnodeop_descs[]; * vclean changes the ops vector and then wants to call ops with the old * vector. */ -#define VOCALL(OPSV,OFF,AP) (( *((OPSV)[(OFF)])) (AP)) +#define VOCALL(OPSV, OFF, AP) (( *((OPSV)[(OFF)])) (AP)) /* * This call works for vnodes in the kernel. */ -#define VCALL(VP,OFF,AP) VOCALL((VP)->v_op,(OFF),(AP)) +#define VCALL(VP, OFF, AP) VOCALL((VP)->v_op,(OFF),(AP)) #define VDESC(OP) (& __CONCAT(OP,_desc)) #define VOFFSET(OP) (VDESC(OP)->vdesc_offset) struct ostat; /* bdevvp moved to vnode.h as private KPI */ -void cvtstat(struct stat *st, struct ostat *ost); -void vprint(const char *label, struct vnode *vp); +void cvtstat(struct stat *st, struct ostat *ost); +void vprint(const char *label, struct vnode *vp); __private_extern__ int set_package_extensions_table(user_addr_t data, int nentries, int maxwidth); -int vn_rdwr_64(enum uio_rw rw, struct vnode *vp, uint64_t base, - int64_t len, off_t offset, enum uio_seg segflg, - int ioflg, kauth_cred_t cred, int64_t *aresid, - struct proc *p); +int vn_rdwr_64(enum uio_rw rw, struct vnode *vp, uint64_t base, + int64_t len, off_t offset, enum uio_seg segflg, + int ioflg, kauth_cred_t cred, int64_t *aresid, + struct proc *p); #if CONFIG_MACF -int vn_setlabel (struct vnode *vp, struct label *intlabel, - vfs_context_t context); +int vn_setlabel(struct vnode *vp, struct label *intlabel, + vfs_context_t context); #endif -void fifo_printinfo(struct vnode *vp); -int vn_open(struct nameidata *ndp, int fmode, int cmode); -int vn_open_modflags(struct nameidata *ndp, int *fmode, int cmode); -int vn_open_auth(struct nameidata *ndp, int *fmode, struct vnode_attr *); -int vn_close(vnode_t, int flags, vfs_context_t ctx); +void fifo_printinfo(struct vnode *vp); +int vn_open(struct nameidata *ndp, int fmode, int cmode); +int vn_open_modflags(struct nameidata *ndp, int *fmode, int cmode); +int vn_open_auth(struct nameidata *ndp, int *fmode, struct vnode_attr *); +int vn_close(vnode_t, int flags, vfs_context_t ctx); errno_t vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx); errno_t vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, - struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, - uint32_t flags, vfs_context_t ctx); + struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, + uint32_t flags, vfs_context_t ctx); -void lock_vnode_and_post(vnode_t, int); +void lock_vnode_and_post(vnode_t, int); #define post_event_if_success(_vp, _error, _event) \ do { \ - if (0 == (_error)) { \ - lock_vnode_and_post((_vp), (_event)); \ - } \ - } while (0) - + if (0 == (_error)) { \ + lock_vnode_and_post((_vp), (_event)); \ + } \ + } while (0) + /* Authorization subroutines */ -int vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved); -int vn_authorize_create(vnode_t, struct componentname *, struct vnode_attr *, vfs_context_t, void*); -int vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx); -void vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields); -int vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved); -int vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx, void *reserved); -int vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved); -int vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path, - vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved); -int vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved); +int vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved); +int vn_authorize_create(vnode_t, struct componentname *, struct vnode_attr *, vfs_context_t, void*); +int vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx); +void vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields); +int vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved); +int vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx, void *reserved); +int vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved); +int vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path, + vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved); +int vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved); typedef int (*vn_create_authorizer_t)(vnode_t, struct componentname *, struct vnode_attr *, vfs_context_t, void*); int vn_authorize_mkdir(vnode_t, struct componentname *, struct vnode_attr *, vfs_context_t, void*); @@ -455,19 +455,19 @@ int vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action uint32_t flags, vfs_context_t ctx, void *reserved); /* End of authorization subroutines */ -#define VN_CREATE_NOAUTH (1<<0) -#define VN_CREATE_NOINHERIT (1<<1) -#define VN_CREATE_UNION (1<<2) -#define VN_CREATE_NOLABEL (1<<3) -#define VN_CREATE_DOOPEN (1<<4) /* Open file if a batched operation is available */ +#define VN_CREATE_NOAUTH (1<<0) +#define VN_CREATE_NOINHERIT (1<<1) +#define VN_CREATE_UNION (1<<2) +#define VN_CREATE_NOLABEL (1<<3) +#define VN_CREATE_DOOPEN (1<<4) /* Open file if a batched operation is available */ errno_t vn_create(vnode_t, vnode_t *, struct nameidata *, struct vnode_attr *, uint32_t, int, uint32_t*, vfs_context_t); -int vn_mkdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx); -int vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx); +int vn_mkdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx); +int vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx); -int vn_getxattr(vnode_t, const char *, uio_t, size_t *, int, vfs_context_t); -int vn_setxattr(vnode_t, const char *, uio_t, int, vfs_context_t); -int vn_removexattr(vnode_t, const char *, int, vfs_context_t); -int vn_listxattr(vnode_t, uio_t, size_t *, int, vfs_context_t); +int vn_getxattr(vnode_t, const char *, uio_t, size_t *, int, vfs_context_t); +int vn_setxattr(vnode_t, const char *, uio_t, int, vfs_context_t); +int vn_removexattr(vnode_t, const char *, int, vfs_context_t); +int vn_listxattr(vnode_t, uio_t, size_t *, int, vfs_context_t); #if NAMEDSTREAMS errno_t vnode_getnamedstream(vnode_t, vnode_t *, const char *, enum nsoperation, int, vfs_context_t); @@ -475,74 +475,74 @@ errno_t vnode_makenamedstream(vnode_t, vnode_t *, const char *, int, vfs_contex errno_t vnode_removenamedstream(vnode_t, vnode_t, const char *, int, vfs_context_t); errno_t vnode_flushnamedstream(vnode_t vp, vnode_t svp, vfs_context_t context); errno_t vnode_relenamedstream(vnode_t vp, vnode_t svp); -errno_t vnode_verifynamedstream (vnode_t vp); +errno_t vnode_verifynamedstream(vnode_t vp); #endif -void nchinit(void); -int resize_namecache(int newsize); -void name_cache_lock_shared(void); -void name_cache_lock(void); -void name_cache_unlock(void); -void cache_enter_with_gen(vnode_t dvp, vnode_t vp, struct componentname *cnp, int gen); +void nchinit(void); +int resize_namecache(int newsize); +void name_cache_lock_shared(void); +void name_cache_lock(void); +void name_cache_unlock(void); +void cache_enter_with_gen(vnode_t dvp, vnode_t vp, struct componentname *cnp, int gen); const char *cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp); -extern int nc_disabled; +extern int nc_disabled; -#define vnode_lock_convert(v) lck_mtx_convert_spin(&(v)->v_lock) +#define vnode_lock_convert(v) lck_mtx_convert_spin(&(v)->v_lock) -void vnode_lock_spin(vnode_t); +void vnode_lock_spin(vnode_t); -void vnode_list_lock(void); -void vnode_list_unlock(void); +void vnode_list_lock(void); +void vnode_list_unlock(void); -#define VNODE_REF_FORCE 0x1 -int vnode_ref_ext(vnode_t, int, int); +#define VNODE_REF_FORCE 0x1 +int vnode_ref_ext(vnode_t, int, int); -void vnode_rele_internal(vnode_t, int, int, int); +void vnode_rele_internal(vnode_t, int, int, int); #ifdef BSD_KERNEL_PRIVATE -int vnode_getalways(vnode_t); -int vget_internal(vnode_t, int, int); +int vnode_getalways(vnode_t); +int vget_internal(vnode_t, int, int); errno_t vnode_getiocount(vnode_t, unsigned int, int); #endif /* BSD_KERNEL_PRIVATE */ -int vnode_get_locked(vnode_t); -int vnode_put_locked(vnode_t); +int vnode_get_locked(vnode_t); +int vnode_put_locked(vnode_t); -int vnode_issock(vnode_t); -int vnode_isaliased(vnode_t); +int vnode_issock(vnode_t); +int vnode_isaliased(vnode_t); -void unlock_fsnode(vnode_t, int *); -int lock_fsnode(vnode_t, int *); +void unlock_fsnode(vnode_t, int *); +int lock_fsnode(vnode_t, int *); -errno_t vnode_resume(vnode_t); -errno_t vnode_suspend(vnode_t); +errno_t vnode_resume(vnode_t); +errno_t vnode_suspend(vnode_t); -errno_t vnode_mtime(vnode_t, struct timespec *, vfs_context_t); +errno_t vnode_mtime(vnode_t, struct timespec *, vfs_context_t); errno_t vnode_flags(vnode_t, uint32_t *, vfs_context_t); -errno_t vnode_size(vnode_t, off_t *, vfs_context_t); -errno_t vnode_setsize(vnode_t, off_t, int ioflag, vfs_context_t); -int vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx); -int vnode_isspec(vnode_t vp); +errno_t vnode_size(vnode_t, off_t *, vfs_context_t); +errno_t vnode_setsize(vnode_t, off_t, int ioflag, vfs_context_t); +int vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx); +int vnode_isspec(vnode_t vp); #ifdef BSD_KERNEL_PRIVATE typedef uint32_t compound_vnop_id_t; -#define COMPOUND_VNOP_OPEN 0x01 -#define COMPOUND_VNOP_MKDIR 0x02 -#define COMPOUND_VNOP_RENAME 0x04 -#define COMPOUND_VNOP_REMOVE 0x08 -#define COMPOUND_VNOP_RMDIR 0x10 - -int vnode_compound_rename_available(vnode_t vp); -int vnode_compound_rmdir_available(vnode_t vp); -int vnode_compound_mkdir_available(vnode_t vp); -int vnode_compound_remove_available(vnode_t vp); -int vnode_compound_open_available(vnode_t vp); -int vnode_compound_op_available(vnode_t, compound_vnop_id_t); +#define COMPOUND_VNOP_OPEN 0x01 +#define COMPOUND_VNOP_MKDIR 0x02 +#define COMPOUND_VNOP_RENAME 0x04 +#define COMPOUND_VNOP_REMOVE 0x08 +#define COMPOUND_VNOP_RMDIR 0x10 + +int vnode_compound_rename_available(vnode_t vp); +int vnode_compound_rmdir_available(vnode_t vp); +int vnode_compound_mkdir_available(vnode_t vp); +int vnode_compound_remove_available(vnode_t vp); +int vnode_compound_open_available(vnode_t vp); +int vnode_compound_op_available(vnode_t, compound_vnop_id_t); #endif /* BSD_KERNEL_PRIVATE */ void vn_setunionwait(vnode_t); @@ -552,23 +552,23 @@ void vn_clearunionwait(vnode_t, int); void SPECHASH_LOCK(void); void SPECHASH_UNLOCK(void); -void vnode_authorize_init(void); +void vnode_authorize_init(void); -void vfsinit(void); +void vfsinit(void); void vnode_lock(vnode_t); void vnode_unlock(vnode_t); void vn_print_state(vnode_t /* vp */, const char * /* fmt */, ...) - __printflike(2,3); +__printflike(2, 3); #if DEVELOPMENT || DEBUG -#define VNASSERT(exp, vp, msg) \ -do { \ - if (__improbable(!(exp))) { \ - vn_print_state(vp, "VNASSERT failed %s:%d\n", __FILE__, \ - __LINE__); \ - panic msg; \ - } \ +#define VNASSERT(exp, vp, msg) \ +do { \ + if (__improbable(!(exp))) { \ + vn_print_state(vp, "VNASSERT failed %s:%d\n", __FILE__, \ + __LINE__); \ + panic msg; \ + } \ } while (0) #else #define VNASSERT(exp, vp, msg) @@ -577,24 +577,24 @@ do { \ /* * XXX exported symbols; should be static */ -void vfs_op_init(void); -void vfs_opv_init(void); +void vfs_op_init(void); +void vfs_opv_init(void); #ifdef BSD_KERNEL_PRIVATE int vfs_sysctl_node SYSCTL_HANDLER_ARGS; void vnode_setneedinactive(vnode_t); -int vnode_hasnamedstreams(vnode_t); /* Does this vnode have associated named streams? */ +int vnode_hasnamedstreams(vnode_t); /* Does this vnode have associated named streams? */ void nspace_proc_exit(struct proc *p); errno_t vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, - int *numdirent, vfs_context_t ctxp); + int *numdirent, vfs_context_t ctxp); void vnode_setswapmount(vnode_t); -int64_t vnode_getswappin_avail(vnode_t); +int64_t vnode_getswappin_avail(vnode_t); -int vnode_get_snapdir(vnode_t , vnode_t *, vfs_context_t); +int vnode_get_snapdir(vnode_t, vnode_t *, vfs_context_t); #if CONFIG_TRIGGERS /* VFS Internal Vnode Trigger Interfaces (Private) */ @@ -603,7 +603,7 @@ void vnode_trigger_rearm(vnode_t, vfs_context_t); void vfs_nested_trigger_unmounts(mount_t, int, vfs_context_t); #endif /* CONFIG_TRIGGERS */ -int build_path_with_parent(vnode_t, vnode_t /* parent */, char *, int, int *, int, vfs_context_t); +int build_path_with_parent(vnode_t, vnode_t /* parent */, char *, int, int *, int, vfs_context_t); #endif /* BSD_KERNEL_PRIVATE */ diff --git a/bsd/sys/vstat.h b/bsd/sys/vstat.h index 2685b741c..05f4d86dc 100644 --- a/bsd/sys/vstat.h +++ b/bsd/sys/vstat.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1998 Apple Computer, Inc. All Rights Reserved */ /*- - * @(#)vstat.h + * @(#)vstat.h */ #ifndef _SYS_VSTAT_H_ -#define _SYS_VSTAT_H_ +#define _SYS_VSTAT_H_ #include #include @@ -46,32 +46,32 @@ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) struct vstat { - fsid_t vst_volid; /* volume identifier */ - fsobj_id_t vst_nodeid; /* object's id */ - fsobj_type_t vst_vnodetype; /* vnode type (VREG, VDIR, etc.) */ - fsobj_tag_t vst_vnodetag; /* vnode tag (HFS, UFS, etc.) */ - mode_t vst_mode; /* inode protection mode */ - nlink_t vst_nlink; /* number of hard links */ - uid_t vst_uid; /* user ID of the file's owner */ - gid_t vst_gid; /* group ID of the file's group */ - dev_t vst_dev; /* inode's device */ - dev_t vst_rdev; /* device type */ + fsid_t vst_volid; /* volume identifier */ + fsobj_id_t vst_nodeid; /* object's id */ + fsobj_type_t vst_vnodetype; /* vnode type (VREG, VDIR, etc.) */ + fsobj_tag_t vst_vnodetag; /* vnode tag (HFS, UFS, etc.) */ + mode_t vst_mode; /* inode protection mode */ + nlink_t vst_nlink; /* number of hard links */ + uid_t vst_uid; /* user ID of the file's owner */ + gid_t vst_gid; /* group ID of the file's group */ + dev_t vst_dev; /* inode's device */ + dev_t vst_rdev; /* device type */ #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) - struct timespec vst_atimespec; /* time of last access */ - struct timespec vst_mtimespec; /* time of last data modification */ - struct timespec vst_ctimespec; /* time of last file status change */ + struct timespec vst_atimespec; /* time of last access */ + struct timespec vst_mtimespec; /* time of last data modification */ + struct timespec vst_ctimespec; /* time of last file status change */ #else - time_t vst_atime; /* time of last access */ - long vst_atimensec; /* nsec of last access */ - time_t vst_mtime; /* time of last data modification */ - long vst_mtimensec; /* nsec of last data modification */ - time_t vst_ctime; /* time of last file status change */ - long vst_ctimensec; /* nsec of last file status change */ + time_t vst_atime; /* time of last access */ + long vst_atimensec; /* nsec of last access */ + time_t vst_mtime; /* time of last data modification */ + long vst_mtimensec; /* nsec of last data modification */ + time_t vst_ctime; /* time of last file status change */ + long vst_ctimensec; /* nsec of last file status change */ #endif - off_t vst_filesize; /* file size, in bytes */ - quad_t vst_blocks; /* bytes allocated for file */ - u_int32_t vst_blksize; /* optimal blocksize for I/O */ - u_int32_t vst_flags; /* user defined flags for file */ + off_t vst_filesize; /* file size, in bytes */ + quad_t vst_blocks; /* bytes allocated for file */ + u_int32_t vst_blksize; /* optimal blocksize for I/O */ + u_int32_t vst_flags; /* user defined flags for file */ }; #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ diff --git a/bsd/sys/wait.h b/bsd/sys/wait.h index 23506faff..498749fba 100644 --- a/bsd/sys/wait.h +++ b/bsd/sys/wait.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -60,9 +60,9 @@ * * @(#)wait.h 8.2 (Berkeley) 7/10/94 */ - + #ifndef _SYS_WAIT_H_ -#define _SYS_WAIT_H_ +#define _SYS_WAIT_H_ #include #include @@ -106,8 +106,8 @@ typedef enum { * well, or in future releases your stware may not compile * without modification. */ -#include /* [XSI] for siginfo_t */ -#include /* [XSI] for struct rusage */ +#include /* [XSI] for siginfo_t */ +#include /* [XSI] for struct rusage */ /* * Option bits for the third argument of wait4. WNOHANG causes the @@ -118,45 +118,45 @@ typedef enum { * this option is done, it is as though they were still running... nothing * about them is returned. */ -#define WNOHANG 0x00000001 /* [XSI] no hang in wait/no child to reap */ -#define WUNTRACED 0x00000002 /* [XSI] notify on stop, untraced child */ +#define WNOHANG 0x00000001 /* [XSI] no hang in wait/no child to reap */ +#define WUNTRACED 0x00000002 /* [XSI] notify on stop, untraced child */ /* * Macros to test the exit status returned by wait * and extract the relevant values. */ #if defined(_POSIX_C_SOURCE) && !defined(_DARWIN_C_SOURCE) -#define _W_INT(i) (i) +#define _W_INT(i) (i) #else -#define _W_INT(w) (*(int *)&(w)) /* convert union wait to int */ -#define WCOREFLAG 0200 +#define _W_INT(w) (*(int *)&(w)) /* convert union wait to int */ +#define WCOREFLAG 0200 #endif /* (_POSIX_C_SOURCE && !_DARWIN_C_SOURCE) */ /* These macros are permited, as they are in the implementation namespace */ -#define _WSTATUS(x) (_W_INT(x) & 0177) -#define _WSTOPPED 0177 /* _WSTATUS if process is stopped */ +#define _WSTATUS(x) (_W_INT(x) & 0177) +#define _WSTOPPED 0177 /* _WSTATUS if process is stopped */ /* * [XSI] The header shall define the following macros for * analysis of process status values */ #if __DARWIN_UNIX03 -#define WEXITSTATUS(x) ((_W_INT(x) >> 8) & 0x000000ff) +#define WEXITSTATUS(x) ((_W_INT(x) >> 8) & 0x000000ff) #else /* !__DARWIN_UNIX03 */ -#define WEXITSTATUS(x) (_W_INT(x) >> 8) +#define WEXITSTATUS(x) (_W_INT(x) >> 8) #endif /* !__DARWIN_UNIX03 */ /* 0x13 == SIGCONT */ -#define WSTOPSIG(x) (_W_INT(x) >> 8) +#define WSTOPSIG(x) (_W_INT(x) >> 8) #define WIFCONTINUED(x) (_WSTATUS(x) == _WSTOPPED && WSTOPSIG(x) == 0x13) -#define WIFSTOPPED(x) (_WSTATUS(x) == _WSTOPPED && WSTOPSIG(x) != 0x13) -#define WIFEXITED(x) (_WSTATUS(x) == 0) -#define WIFSIGNALED(x) (_WSTATUS(x) != _WSTOPPED && _WSTATUS(x) != 0) -#define WTERMSIG(x) (_WSTATUS(x)) +#define WIFSTOPPED(x) (_WSTATUS(x) == _WSTOPPED && WSTOPSIG(x) != 0x13) +#define WIFEXITED(x) (_WSTATUS(x) == 0) +#define WIFSIGNALED(x) (_WSTATUS(x) != _WSTOPPED && _WSTATUS(x) != 0) +#define WTERMSIG(x) (_WSTATUS(x)) #if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -#define WCOREDUMP(x) (_W_INT(x) & WCOREFLAG) +#define WCOREDUMP(x) (_W_INT(x) & WCOREFLAG) -#define W_EXITCODE(ret, sig) ((ret) << 8 | (sig)) -#define W_STOPCODE(sig) ((sig) << 8 | _WSTOPPED) +#define W_EXITCODE(ret, sig) ((ret) << 8 | (sig)) +#define W_STOPCODE(sig) ((sig) << 8 | _WSTOPPED) #endif /* (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) */ /* @@ -165,13 +165,13 @@ typedef enum { */ /* WNOHANG already defined for wait4() */ /* WUNTRACED defined for wait4() but not for waitid() */ -#define WEXITED 0x00000004 /* [XSI] Processes which have exitted */ +#define WEXITED 0x00000004 /* [XSI] Processes which have exitted */ #if __DARWIN_UNIX03 /* waitid() parameter */ -#define WSTOPPED 0x00000008 /* [XSI] Any child stopped by signal */ +#define WSTOPPED 0x00000008 /* [XSI] Any child stopped by signal */ #endif -#define WCONTINUED 0x00000010 /* [XSI] Any child stopped then continued */ -#define WNOWAIT 0x00000020 /* [XSI] Leave process returned waitable */ +#define WCONTINUED 0x00000010 /* [XSI] Any child stopped then continued */ +#define WNOWAIT 0x00000020 /* [XSI] Leave process returned waitable */ #if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) @@ -180,8 +180,8 @@ typedef enum { /* * Tokens for special values of the "pid" parameter to wait4. */ -#define WAIT_ANY (-1) /* any process */ -#define WAIT_MYPGRP 0 /* any process in my process group */ +#define WAIT_ANY (-1) /* any process */ +#define WAIT_MYPGRP 0 /* any process in my process group */ #include @@ -192,22 +192,22 @@ typedef enum { * the information returned, else the first. */ union wait { - int w_status; /* used in syscall */ + int w_status; /* used in syscall */ /* * Terminated process status. */ struct { -#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN - unsigned int w_Termsig:7, /* termination signal */ - w_Coredump:1, /* core dump indicator */ - w_Retcode:8, /* exit code if w_termsig==0 */ - w_Filler:16; /* upper bits filler */ +#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN + unsigned int w_Termsig:7, /* termination signal */ + w_Coredump:1, /* core dump indicator */ + w_Retcode:8, /* exit code if w_termsig==0 */ + w_Filler:16; /* upper bits filler */ #endif -#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN - unsigned int w_Filler:16, /* upper bits filler */ - w_Retcode:8, /* exit code if w_termsig==0 */ - w_Coredump:1, /* core dump indicator */ - w_Termsig:7; /* termination signal */ +#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN + unsigned int w_Filler:16, /* upper bits filler */ + w_Retcode:8, /* exit code if w_termsig==0 */ + w_Coredump:1, /* core dump indicator */ + w_Termsig:7; /* termination signal */ #endif } w_T; /* @@ -216,23 +216,23 @@ union wait { * with the WUNTRACED option bit. */ struct { -#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN - unsigned int w_Stopval:8, /* == W_STOPPED if stopped */ - w_Stopsig:8, /* signal that stopped us */ - w_Filler:16; /* upper bits filler */ +#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN + unsigned int w_Stopval:8, /* == W_STOPPED if stopped */ + w_Stopsig:8, /* signal that stopped us */ + w_Filler:16; /* upper bits filler */ #endif -#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN - unsigned int w_Filler:16, /* upper bits filler */ - w_Stopsig:8, /* signal that stopped us */ - w_Stopval:8; /* == W_STOPPED if stopped */ +#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN + unsigned int w_Filler:16, /* upper bits filler */ + w_Stopsig:8, /* signal that stopped us */ + w_Stopval:8; /* == W_STOPPED if stopped */ #endif } w_S; }; -#define w_termsig w_T.w_Termsig -#define w_coredump w_T.w_Coredump -#define w_retcode w_T.w_Retcode -#define w_stopval w_S.w_Stopval -#define w_stopsig w_S.w_Stopsig +#define w_termsig w_T.w_Termsig +#define w_coredump w_T.w_Coredump +#define w_retcode w_T.w_Retcode +#define w_stopval w_S.w_Stopval +#define w_stopsig w_S.w_Stopsig #endif /* (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) */ @@ -241,19 +241,19 @@ union wait { * Stopped state value; cannot use waitid() parameter of the same name * in the same scope */ -#define WSTOPPED _WSTOPPED +#define WSTOPPED _WSTOPPED #endif /* !__DARWIN_UNIX03 */ #ifndef KERNEL __BEGIN_DECLS -pid_t wait(int *) __DARWIN_ALIAS_C(wait); -pid_t waitpid(pid_t, int *, int) __DARWIN_ALIAS_C(waitpid); +pid_t wait(int *) __DARWIN_ALIAS_C(wait); +pid_t waitpid(pid_t, int *, int) __DARWIN_ALIAS_C(waitpid); #ifndef _ANSI_SOURCE -int waitid(idtype_t, id_t, siginfo_t *, int) __DARWIN_ALIAS_C(waitid); +int waitid(idtype_t, id_t, siginfo_t *, int) __DARWIN_ALIAS_C(waitid); #endif /* !_ANSI_SOURCE */ #if (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) -pid_t wait3(int *, int, struct rusage *); -pid_t wait4(pid_t, int *, int, struct rusage *); +pid_t wait3(int *, int, struct rusage *); +pid_t wait4(pid_t, int *, int, struct rusage *); #endif /* (!defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)) */ __END_DECLS #endif diff --git a/bsd/sys/work_interval.h b/bsd/sys/work_interval.h index 797f929bc..ab5d80fb2 100644 --- a/bsd/sys/work_interval.h +++ b/bsd/sys/work_interval.h @@ -161,9 +161,9 @@ int work_interval_create(work_interval_t *interval_handle, uint32_t flags); * Only the process which created the work interval may notify */ int work_interval_notify(work_interval_t interval_handle, - uint64_t start, uint64_t finish, - uint64_t deadline, uint64_t next_start, - uint32_t flags); + uint64_t start, uint64_t finish, + uint64_t deadline, uint64_t next_start, + uint32_t flags); /* * Notify, with "finish" implicitly set to the current time @@ -171,8 +171,8 @@ int work_interval_notify(work_interval_t interval_handle, * Only the process which created the work interval may notify */ int work_interval_notify_simple(work_interval_t interval_handle, - uint64_t start, uint64_t deadline, - uint64_t next_start); + uint64_t start, uint64_t deadline, + uint64_t next_start); /* * Deallocate work interval handle @@ -230,12 +230,12 @@ int work_interval_leave(void); #define WORK_INTERVAL_OPERATION_JOIN 0x00000005 /* arg is a port_name */ struct work_interval_notification { - uint64_t start; - uint64_t finish; - uint64_t deadline; - uint64_t next_start; - uint32_t notify_flags; - uint32_t create_flags; + uint64_t start; + uint64_t finish; + uint64_t deadline; + uint64_t next_start; + uint32_t notify_flags; + uint32_t create_flags; }; typedef struct work_interval_notification *work_interval_notification_t; @@ -253,4 +253,3 @@ int __work_interval_ctl(uint32_t operation, uint64_t work_interval_id, void __END_DECLS #endif /* _SYS_WORK_INTERVAL_H */ - diff --git a/bsd/sys/xattr.h b/bsd/sys/xattr.h index 1e062b615..5577f8d37 100644 --- a/bsd/sys/xattr.h +++ b/bsd/sys/xattr.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -47,12 +47,12 @@ /* option for f/getxattr() and f/listxattr() to expose the HFS Compression extended attributes */ #define XATTR_SHOWCOMPRESSION 0x0020 -#define XATTR_MAXNAMELEN 127 +#define XATTR_MAXNAMELEN 127 /* See the ATTR_CMN_FNDRINFO section of getattrlist(2) for details on FinderInfo */ -#define XATTR_FINDERINFO_NAME "com.apple.FinderInfo" +#define XATTR_FINDERINFO_NAME "com.apple.FinderInfo" -#define XATTR_RESOURCEFORK_NAME "com.apple.ResourceFork" +#define XATTR_RESOURCEFORK_NAME "com.apple.ResourceFork" #ifdef KERNEL @@ -67,16 +67,16 @@ int xattr_protected(const char *); int xattr_validatename(const char *); /* Maximum extended attribute size supported by VFS */ -#define XATTR_MAXSIZE INT32_MAX +#define XATTR_MAXSIZE INT32_MAX #ifdef PRIVATE -/* Maximum extended attribute size in an Apple Double file */ -#define AD_XATTR_MAXSIZE XATTR_MAXSIZE +/* Maximum extended attribute size in an Apple Double file */ +#define AD_XATTR_MAXSIZE XATTR_MAXSIZE -/* Number of bits used to represent the maximum size of +/* Number of bits used to represent the maximum size of * extended attribute stored in an Apple Double file. */ -#define AD_XATTR_SIZE_BITS 31 +#define AD_XATTR_SIZE_BITS 31 #endif /* PRIVATE */ __END_DECLS @@ -98,7 +98,7 @@ int removexattr(const char *path, const char *name, int options); int fremovexattr(int fd, const char *name, int options); ssize_t listxattr(const char *path, char *namebuff, size_t size, int options); - + ssize_t flistxattr(int fd, char *namebuff, size_t size, int options); __END_DECLS diff --git a/bsd/tests/bsd_tests.c b/bsd/tests/bsd_tests.c index dfb379143..a53bbdc19 100644 --- a/bsd/tests/bsd_tests.c +++ b/bsd/tests/bsd_tests.c @@ -130,17 +130,18 @@ kalloc_test() #define XNUPOST_TNAME_MAXLEN 132 struct kcdata_subtype_descriptor kc_xnupost_test_def[] = { - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT16, 0, sizeof(uint16_t), "config"}, - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT16, 1 * sizeof(uint16_t), sizeof(uint16_t), "test_num"}, - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_INT32, 2 * sizeof(uint16_t), sizeof(int32_t), "retval"}, - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_INT32, 2 * sizeof(uint16_t) + sizeof(int32_t), sizeof(int32_t), "expected_retval"}, - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 2 * (sizeof(uint16_t) + sizeof(int32_t)), sizeof(uint64_t), "begin_time"}, - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 2 * (sizeof(uint16_t) + sizeof(int32_t)) + sizeof(uint64_t), sizeof(uint64_t), "end_time"}, - {KCS_SUBTYPE_FLAGS_ARRAY, - KC_ST_CHAR, - 2 * (sizeof(uint16_t) + sizeof(int32_t) + sizeof(uint64_t)), - KCS_SUBTYPE_PACK_SIZE(XNUPOST_TNAME_MAXLEN * sizeof(char), sizeof(char)), - "test_name"}}; + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT16, 0, sizeof(uint16_t), "config"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT16, 1 * sizeof(uint16_t), sizeof(uint16_t), "test_num"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_INT32, 2 * sizeof(uint16_t), sizeof(int32_t), "retval"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_INT32, 2 * sizeof(uint16_t) + sizeof(int32_t), sizeof(int32_t), "expected_retval"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 2 * (sizeof(uint16_t) + sizeof(int32_t)), sizeof(uint64_t), "begin_time"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 2 * (sizeof(uint16_t) + sizeof(int32_t)) + sizeof(uint64_t), sizeof(uint64_t), "end_time"}, + {KCS_SUBTYPE_FLAGS_ARRAY, + KC_ST_CHAR, + 2 * (sizeof(uint16_t) + sizeof(int32_t) + sizeof(uint64_t)), + KCS_SUBTYPE_PACK_SIZE(XNUPOST_TNAME_MAXLEN * sizeof(char), sizeof(char)), + "test_name"} +}; const uint32_t kc_xnupost_test_def_count = sizeof(kc_xnupost_test_def) / sizeof(struct kcdata_subtype_descriptor); @@ -154,39 +155,46 @@ xnupost_copyout_test(xnupost_test_t t, mach_vm_address_t outaddr) uint32_t namelen = 0; kret = copyout(&t->xt_config, outaddr, sizeof(uint16_t)); - if (kret) + if (kret) { return kret; + } outaddr += sizeof(uint16_t); kret = copyout(&t->xt_test_num, outaddr, sizeof(uint16_t)); - if (kret) + if (kret) { return kret; + } outaddr += sizeof(uint16_t); kret = copyout(&t->xt_retval, outaddr, sizeof(uint32_t)); - if (kret) + if (kret) { return kret; + } outaddr += sizeof(uint32_t); kret = copyout(&t->xt_expected_retval, outaddr, sizeof(uint32_t)); - if (kret) + if (kret) { return kret; + } outaddr += sizeof(uint32_t); kret = copyout(&t->xt_begin_time, outaddr, sizeof(uint64_t)); - if (kret) + if (kret) { return kret; + } outaddr += sizeof(uint64_t); kret = copyout(&t->xt_end_time, outaddr, sizeof(uint64_t)); - if (kret) + if (kret) { return kret; + } outaddr += sizeof(uint64_t); namelen = strnlen(t->xt_name, XNUPOST_TNAME_MAXLEN); kret = copyout(t->xt_name, outaddr, namelen); - if (kret) + if (kret) { return kret; + } outaddr += namelen; return 0; @@ -197,7 +205,7 @@ xnupost_get_estimated_testdata_size(void) { uint32_t total_tests = bsd_post_tests_count + kernel_post_tests_count; uint32_t elem_size = kc_xnupost_test_def[kc_xnupost_test_def_count - 1].kcs_elem_offset + - kcs_get_elem_size(&kc_xnupost_test_def[kc_xnupost_test_def_count - 1]); + kcs_get_elem_size(&kc_xnupost_test_def[kc_xnupost_test_def_count - 1]); uint32_t retval = 1024; /* account for type definition and mach timebase */ retval += 1024; /* kernel version and boot-args string data */ retval += (total_tests * elem_size); @@ -219,9 +227,9 @@ xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp) #define RET_IF_OP_FAIL \ do { \ - if (kret != KERN_SUCCESS) { \ - return (kret == KERN_NO_ACCESS) ? EACCES : ((kret == KERN_RESOURCE_SHORTAGE) ? ENOMEM : EINVAL); \ - } \ + if (kret != KERN_SUCCESS) { \ + return (kret == KERN_NO_ACCESS) ? EACCES : ((kret == KERN_RESOURCE_SHORTAGE) ? ENOMEM : EINVAL); \ + } \ } while (0) kret = kcdata_memory_static_init(&kcd, (mach_vm_address_t)outp, KCDATA_BUFFER_BEGIN_XNUPOST_CONFIG, size, KCFLAG_USE_COPYOUT); @@ -249,13 +257,13 @@ xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp) /* add type definition to buffer */ kret = kcdata_add_type_definition(&kcd, XNUPOST_KCTYPE_TESTCONFIG, kctype_name, &kc_xnupost_test_def[0], - kc_xnupost_test_def_count); + kc_xnupost_test_def_count); RET_IF_OP_FAIL; /* add the tests to buffer as array */ uint32_t total_tests = bsd_post_tests_count + kernel_post_tests_count; uint32_t elem_size = kc_xnupost_test_def[kc_xnupost_test_def_count - 1].kcs_elem_offset + - kcs_get_elem_size(&kc_xnupost_test_def[kc_xnupost_test_def_count - 1]); + kcs_get_elem_size(&kc_xnupost_test_def[kc_xnupost_test_def_count - 1]); kret = kcdata_get_memory_addr_for_array(&kcd, XNUPOST_KCTYPE_TESTCONFIG, elem_size, total_tests, &user_addr); RET_IF_OP_FAIL; @@ -273,8 +281,9 @@ xnupost_export_testdata(void * outp, uint32_t size, uint32_t * lenp) RET_IF_OP_FAIL; } - if (kret == KERN_SUCCESS && lenp != NULL) + if (kret == KERN_SUCCESS && lenp != NULL) { *lenp = (uint32_t)kcdata_memory_get_used_bytes(&kcd); + } RET_IF_OP_FAIL; #undef RET_IF_OP_FAIL diff --git a/bsd/tests/ctrr_test_sysctl.c b/bsd/tests/ctrr_test_sysctl.c index ca1056fcf..bea84e1ab 100644 --- a/bsd/tests/ctrr_test_sysctl.c +++ b/bsd/tests/ctrr_test_sysctl.c @@ -2,7 +2,7 @@ * Copyright (c) 2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/bsd/tests/pmap_test_sysctl.c b/bsd/tests/pmap_test_sysctl.c index d1280372f..f94028df8 100644 --- a/bsd/tests/pmap_test_sysctl.c +++ b/bsd/tests/pmap_test_sysctl.c @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,14 +37,15 @@ sysctl_test_pmap_enter_disconnect(__unused struct sysctl_oid *oidp, __unused voi unsigned int num_loops; int error, changed; error = sysctl_io_number(req, 0, sizeof(num_loops), &num_loops, &changed); - if (error || !changed) + if (error || !changed) { return error; + } return test_pmap_enter_disconnect(num_loops); } SYSCTL_PROC(_kern, OID_AUTO, pmap_enter_disconnect_test, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_test_pmap_enter_disconnect, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_test_pmap_enter_disconnect, "I", ""); static int sysctl_test_pmap_iommu_disconnect(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) @@ -52,11 +53,12 @@ sysctl_test_pmap_iommu_disconnect(__unused struct sysctl_oid *oidp, __unused voi unsigned int run = 0; int error, changed; error = sysctl_io_number(req, 0, sizeof(run), &run, &changed); - if (error || !changed) + if (error || !changed) { return error; + } return test_pmap_iommu_disconnect(); } SYSCTL_PROC(_kern, OID_AUTO, pmap_iommu_disconnect_test, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - 0, 0, sysctl_test_pmap_iommu_disconnect, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_test_pmap_iommu_disconnect, "I", ""); diff --git a/bsd/uuid/uuid.h b/bsd/uuid/uuid.h index f751dc2ed..28f231f8b 100644 --- a/bsd/uuid/uuid.h +++ b/bsd/uuid/uuid.h @@ -1,6 +1,6 @@ /* * Public include file for the UUID library - * + * * Copyright (C) 1996, 1997, 1998 Theodore Ts'o. * * %Begin-Header% @@ -16,7 +16,7 @@ * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. - * + * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF @@ -40,10 +40,10 @@ #ifndef _UUID_STRING_T #define _UUID_STRING_T -typedef __darwin_uuid_string_t uuid_string_t; +typedef __darwin_uuid_string_t uuid_string_t; #endif /* _UUID_STRING_T */ -#define UUID_DEFINE(name,u0,u1,u2,u3,u4,u5,u6,u7,u8,u9,u10,u11,u12,u13,u14,u15) \ +#define UUID_DEFINE(name, u0, u1, u2, u3, u4, u5, u6, u7, u8, u9, u10, u11, u12, u13, u14, u15) \ static const uuid_t name __attribute__ ((unused)) = {u0,u1,u2,u3,u4,u5,u6,u7,u8,u9,u10,u11,u12,u13,u14,u15} UUID_DEFINE(UUID_NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); diff --git a/bsd/uxkern/ux_exception.c b/bsd/uxkern/ux_exception.c index b69437f3d..1ef01a1d4 100644 --- a/bsd/uxkern/ux_exception.c +++ b/bsd/uxkern/ux_exception.c @@ -54,46 +54,48 @@ */ static int ux_exception(int exception, - mach_exception_code_t code, - mach_exception_subcode_t subcode) + mach_exception_code_t code, + mach_exception_subcode_t subcode) { int machine_signal = 0; /* Try machine-dependent translation first. */ - if ((machine_signal = machine_exception(exception, code, subcode)) != 0) + if ((machine_signal = machine_exception(exception, code, subcode)) != 0) { return machine_signal; + } - switch(exception) { - case EXC_BAD_ACCESS: - if (code == KERN_INVALID_ADDRESS) - return SIGSEGV; - else - return SIGBUS; - - case EXC_BAD_INSTRUCTION: - return SIGILL; - - case EXC_ARITHMETIC: - return SIGFPE; - - case EXC_EMULATION: - return SIGEMT; - - case EXC_SOFTWARE: - switch (code) { - case EXC_UNIX_BAD_SYSCALL: - return SIGSYS; - case EXC_UNIX_BAD_PIPE: - return SIGPIPE; - case EXC_UNIX_ABORT: - return SIGABRT; - case EXC_SOFT_SIGNAL: - return SIGKILL; - } - break; + switch (exception) { + case EXC_BAD_ACCESS: + if (code == KERN_INVALID_ADDRESS) { + return SIGSEGV; + } else { + return SIGBUS; + } - case EXC_BREAKPOINT: - return SIGTRAP; + case EXC_BAD_INSTRUCTION: + return SIGILL; + + case EXC_ARITHMETIC: + return SIGFPE; + + case EXC_EMULATION: + return SIGEMT; + + case EXC_SOFTWARE: + switch (code) { + case EXC_UNIX_BAD_SYSCALL: + return SIGSYS; + case EXC_UNIX_BAD_PIPE: + return SIGPIPE; + case EXC_UNIX_ABORT: + return SIGABRT; + case EXC_SOFT_SIGNAL: + return SIGKILL; + } + break; + + case EXC_BREAKPOINT: + return SIGTRAP; } return 0; @@ -104,16 +106,17 @@ ux_exception(int exception, */ kern_return_t handle_ux_exception(thread_t thread, - int exception, - mach_exception_code_t code, - mach_exception_subcode_t subcode) + int exception, + mach_exception_code_t code, + mach_exception_subcode_t subcode) { /* Returns +1 proc reference */ proc_t p = proc_findthread(thread); /* Can't deliver a signal without a bsd process reference */ - if (p == NULL) + if (p == NULL) { return KERN_FAILURE; + } /* Translate exception and code to signal type */ int ux_signal = ux_exception(exception, code, subcode); @@ -153,7 +156,7 @@ handle_ux_exception(thread_t thread, (ut->uu_sigwait & mask) || (ut->uu_sigmask & mask) || (ps->ps_sigact[SIGSEGV] == SIG_IGN) || - (! (ps->ps_sigonstack & mask))) { + (!(ps->ps_sigonstack & mask))) { p->p_sigignore &= ~mask; p->p_sigcatch &= ~mask; ps->ps_sigact[SIGSEGV] = SIG_DFL; @@ -175,4 +178,3 @@ handle_ux_exception(thread_t thread, return KERN_SUCCESS; } - diff --git a/bsd/vfs/doc_tombstone.c b/bsd/vfs/doc_tombstone.c index 05120a56d..17fbf4ef8 100644 --- a/bsd/vfs/doc_tombstone.c +++ b/bsd/vfs/doc_tombstone.c @@ -86,15 +86,16 @@ doc_tombstone_clear(struct doc_tombstone *ut, vnode_t *old_vpp) if (old_vpp) { *old_vpp = NULL; if (old_id && ut->t_lastop_item - && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) { + && vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid) { int res = vnode_get(ut->t_lastop_item); if (!res) { // Need to check vid again if (vnode_vid(ut->t_lastop_item) == ut->t_lastop_item_vid - && !ISSET(ut->t_lastop_item->v_lflag, VL_TERMINATE)) + && !ISSET(ut->t_lastop_item->v_lflag, VL_TERMINATE)) { *old_vpp = ut->t_lastop_item; - else + } else { vnode_put(ut->t_lastop_item); + } } } } @@ -112,15 +113,16 @@ doc_tombstone_clear(struct doc_tombstone *ut, vnode_t *old_vpp) // temp filenames to work-around questionable application // behavior from apps like Autocad that perform unusual // sequences of file system operations for a "safe save". -bool doc_tombstone_should_ignore_name(const char *nameptr, int len) +bool +doc_tombstone_should_ignore_name(const char *nameptr, int len) { if (len == 0) { len = strlen(nameptr); } - if ( strncmp(nameptr, "atmp", 4) == 0 - || (len > 4 && strncmp(nameptr+len-4, ".bak", 4) == 0) - || (len > 4 && strncmp(nameptr+len-4, ".tmp", 4) == 0)) { + if (strncmp(nameptr, "atmp", 4) == 0 + || (len > 4 && strncmp(nameptr + len - 4, ".bak", 4) == 0) + || (len > 4 && strncmp(nameptr + len - 4, ".tmp", 4) == 0)) { return true; } @@ -132,15 +134,16 @@ bool doc_tombstone_should_ignore_name(const char *nameptr, int len) // save a tombstone - but if there already is one and the name we're // given is an ignorable name, then we will not save a tombstone. // -bool doc_tombstone_should_save(struct doc_tombstone *ut, struct vnode *vp, - struct componentname *cnp) +bool +doc_tombstone_should_save(struct doc_tombstone *ut, struct vnode *vp, + struct componentname *cnp) { if (cnp->cn_nameptr == NULL) { return false; } if (ut->t_lastop_document_id && ut->t_lastop_item == vp - && doc_tombstone_should_ignore_name(cnp->cn_nameptr, cnp->cn_namelen)) { + && doc_tombstone_should_ignore_name(cnp->cn_nameptr, cnp->cn_namelen)) { return false; } @@ -162,8 +165,8 @@ bool doc_tombstone_should_save(struct doc_tombstone *ut, struct vnode *vp, // void doc_tombstone_save(struct vnode *dvp, struct vnode *vp, - struct componentname *cnp, uint64_t doc_id, - ino64_t file_id) + struct componentname *cnp, uint64_t doc_id, + ino64_t file_id) { struct doc_tombstone *ut; ut = doc_tombstone_get(); @@ -173,7 +176,7 @@ doc_tombstone_save(struct vnode *dvp, struct vnode *vp, ut->t_lastop_fileid = file_id; ut->t_lastop_item = vp; ut->t_lastop_item_vid = vp ? vnode_vid(vp) : 0; - ut->t_lastop_document_id = doc_id; + ut->t_lastop_document_id = doc_id; strlcpy((char *)&ut->t_lastop_filename[0], cnp->cn_nameptr, sizeof(ut->t_lastop_filename)); } diff --git a/bsd/vfs/kpi_vfs.c b/bsd/vfs/kpi_vfs.c index f09e98f74..a72dd4259 100644 --- a/bsd/vfs/kpi_vfs.c +++ b/bsd/vfs/kpi_vfs.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -139,9 +139,9 @@ #if CONFIG_APPLEDOUBLE static void xattrfile_remove(vnode_t dvp, const char *basename, - vfs_context_t ctx, int force); + vfs_context_t ctx, int force); static void xattrfile_setattr(vnode_t dvp, const char * basename, - struct vnode_attr * vap, vfs_context_t ctx); + struct vnode_attr * vap, vfs_context_t ctx); #endif /* CONFIG_APPLEDOUBLE */ static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp); @@ -150,23 +150,23 @@ static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp) * vnode_setneedinactive * * Description: Indicate that when the last iocount on this vnode goes away, - * and the usecount is also zero, we should inform the filesystem - * via VNOP_INACTIVE. + * and the usecount is also zero, we should inform the filesystem + * via VNOP_INACTIVE. * * Parameters: vnode_t vnode to mark * * Returns: Nothing * - * Notes: Notably used when we're deleting a file--we need not have a - * usecount, so VNOP_INACTIVE may not get called by anyone. We - * want it called when we drop our iocount. + * Notes: Notably used when we're deleting a file--we need not have a + * usecount, so VNOP_INACTIVE may not get called by anyone. We + * want it called when we drop our iocount. */ void vnode_setneedinactive(vnode_t vp) { - cache_purge(vp); + cache_purge(vp); - vnode_lock_spin(vp); + vnode_lock_spin(vp); vp->v_lflag |= VL_NEEDINACTIVE; vnode_unlock(vp); } @@ -179,53 +179,54 @@ vnode_setneedinactive(vnode_t vp) /* * implementations of exported VFS operations */ -int +int VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) { + return ENOTSUP; + } if (vfs_context_is64bit(ctx)) { if (vfs_64bitready(mp)) { error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx); - } - else { + } else { error = ENOTSUP; } - } - else { + } else { error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx); } - - return (error); + + return error; } -int +int VFS_START(mount_t mp, int flags, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) { + return ENOTSUP; + } error = (*mp->mnt_op->vfs_start)(mp, flags, ctx); - return (error); + return error; } -int +int VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) { + return ENOTSUP; + } error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx); - return (error); + return error; } /* @@ -245,13 +246,14 @@ VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx) * additional error codes which may be propagated from underlying * routines called by hfs_vget. */ -int +int VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); @@ -259,46 +261,49 @@ VFS_ROOT(mount_t mp, struct vnode ** vpp, vfs_context_t ctx) error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx); - return (error); + return error; } -int +int VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) { + return ENOTSUP; + } error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx); - return (error); + return error; } -int +int VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); } error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx); - - return(error); + + return error; } -int +int VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); @@ -306,16 +311,17 @@ VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx); - return(error); + return error; } -int +int VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); @@ -323,16 +329,17 @@ VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx) error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx); - return(error); + return error; } -int +int VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); @@ -340,16 +347,17 @@ VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx) error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx); - return(error); + return error; } -int -VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx) +int +VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); @@ -357,16 +365,17 @@ VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_ error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx); - return(error); + return error; } -int +int VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx) { int error; - if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) - return(ENOTSUP); + if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) { + return ENOTSUP; + } if (ctx == NULL) { ctx = vfs_context_current(); @@ -374,17 +383,19 @@ VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx) error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx); - return(error); + return error; } -int VFS_IOCTL(struct mount *mp, u_long command, caddr_t data, - int flags, vfs_context_t context) +int +VFS_IOCTL(struct mount *mp, u_long command, caddr_t data, + int flags, vfs_context_t context) { - if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) + if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) { return ENOTSUP; + } return mp->mnt_op->vfs_ioctl(mp, command, data, flags, - context ?: vfs_context_current()); + context ?: vfs_context_current()); } int @@ -392,36 +403,38 @@ VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx) { int error; - if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) - return(ENOTSUP); + if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) { + return ENOTSUP; + } - if (ctx == NULL) + if (ctx == NULL) { ctx = vfs_context_current(); + } error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx); - return (error); + return error; } /* returns the cached throttle mask for the mount_t */ uint64_t vfs_throttle_mask(mount_t mp) { - return(mp->mnt_throttle_mask); + return mp->mnt_throttle_mask; } /* returns a copy of vfs type name for the mount_t */ -void +void vfs_name(mount_t mp, char *buffer) { - strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN); + strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN); } /* returns vfs type number for the mount_t */ -int +int vfs_typenum(mount_t mp) { - return(mp->mnt_vtable->vfc_typenum); + return mp->mnt_vtable->vfc_typenum; } /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers. */ @@ -432,17 +445,17 @@ vfs_mntlabel(mount_t mp) } /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */ -uint64_t +uint64_t vfs_flags(mount_t mp) { - return((uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK))); + return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); } /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */ -void +void vfs_setflags(mount_t mp, uint64_t flags) { - uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); + uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); mount_lock(mp); mp->mnt_flag |= lflags; @@ -450,10 +463,10 @@ vfs_setflags(mount_t mp, uint64_t flags) } /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */ -void -vfs_clearflags(mount_t mp , uint64_t flags) +void +vfs_clearflags(mount_t mp, uint64_t flags) { - uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); + uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK)); mount_lock(mp); mp->mnt_flag &= ~lflags; @@ -461,58 +474,59 @@ vfs_clearflags(mount_t mp , uint64_t flags) } /* Is the mount_t ronly and upgrade read/write requested? */ -int +int vfs_iswriteupgrade(mount_t mp) /* ronly && MNTK_WANTRDWR */ { - return ((mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR)); + return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR); } /* Is the mount_t mounted ronly */ -int +int vfs_isrdonly(mount_t mp) { - return (mp->mnt_flag & MNT_RDONLY); + return mp->mnt_flag & MNT_RDONLY; } /* Is the mount_t mounted for filesystem synchronous writes? */ -int +int vfs_issynchronous(mount_t mp) { - return (mp->mnt_flag & MNT_SYNCHRONOUS); + return mp->mnt_flag & MNT_SYNCHRONOUS; } /* Is the mount_t mounted read/write? */ -int +int vfs_isrdwr(mount_t mp) { - return ((mp->mnt_flag & MNT_RDONLY) == 0); + return (mp->mnt_flag & MNT_RDONLY) == 0; } /* Is mount_t marked for update (ie MNT_UPDATE) */ -int -vfs_isupdate(mount_t mp) +int +vfs_isupdate(mount_t mp) { - return (mp->mnt_flag & MNT_UPDATE); + return mp->mnt_flag & MNT_UPDATE; } /* Is mount_t marked for reload (ie MNT_RELOAD) */ -int +int vfs_isreload(mount_t mp) { - return ((mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD)); + return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD); } /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */ -int +int vfs_isforce(mount_t mp) { - if (mp->mnt_lflag & MNT_LFORCE) - return(1); - else - return(0); + if (mp->mnt_lflag & MNT_LFORCE) { + return 1; + } else { + return 0; + } } int @@ -528,20 +542,22 @@ vfs_isunmount(mount_t mp) int vfs_64bitready(mount_t mp) { - if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) - return(1); - else - return(0); + if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) { + return 1; + } else { + return 0; + } } int vfs_authcache_ttl(mount_t mp) { - if ( (mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) - return (mp->mnt_authcache_ttl); - else - return (CACHED_RIGHT_INFINITE_TTL); + if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { + return mp->mnt_authcache_ttl; + } else { + return CACHED_RIGHT_INFINITE_TTL; + } } void @@ -569,19 +585,21 @@ vfs_clearauthcache_ttl(mount_t mp) int vfs_authopaque(mount_t mp) { - if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) - return(1); - else - return(0); + if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) { + return 1; + } else { + return 0; + } } -int +int vfs_authopaqueaccess(mount_t mp) { - if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) - return(1); - else - return(0); + if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) { + return 1; + } else { + return 0; + } } void @@ -592,7 +610,7 @@ vfs_setauthopaque(mount_t mp) mount_unlock(mp); } -void +void vfs_setauthopaqueaccess(mount_t mp) { mount_lock(mp); @@ -608,7 +626,7 @@ vfs_clearauthopaque(mount_t mp) mount_unlock(mp); } -void +void vfs_clearauthopaqueaccess(mount_t mp) { mount_lock(mp); @@ -651,59 +669,62 @@ vfs_clearnoswap(mount_t mp) int vfs_extendedsecurity(mount_t mp) { - return(mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY); + return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY; } /* returns the max size of short symlink in this mount_t */ -uint32_t +uint32_t vfs_maxsymlen(mount_t mp) { - return(mp->mnt_maxsymlinklen); + return mp->mnt_maxsymlinklen; } /* set max size of short symlink on mount_t */ -void +void vfs_setmaxsymlen(mount_t mp, uint32_t symlen) { mp->mnt_maxsymlinklen = symlen; } /* return a pointer to the RO vfs_statfs associated with mount_t */ -struct vfsstatfs * +struct vfsstatfs * vfs_statfs(mount_t mp) { - return(&mp->mnt_vfsstat); + return &mp->mnt_vfsstat; } int vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { - int error; + int error; - if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) - return(error); + if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) { + return error; + } /* - * If we have a filesystem create time, use it to default some others. - */ - if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) { - if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) - VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time); - } + * If we have a filesystem create time, use it to default some others. + */ + if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) { + if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) { + VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time); + } + } - return(0); + return 0; } int vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) { int error; - - if (vfs_isrdonly(mp)) + + if (vfs_isrdonly(mp)) { return EROFS; + } error = VFS_SETATTR(mp, vfa, ctx); - + /* * If we had alternate ways of setting vfs attributes, we'd * fall back here. @@ -716,11 +737,11 @@ vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx) void * vfs_fsprivate(mount_t mp) { - return(mp->mnt_data); + return mp->mnt_data; } /* set the private data handle in mount_t */ -void +void vfs_setfsprivate(mount_t mp, void *mntdata) { mount_lock(mp); @@ -729,26 +750,27 @@ vfs_setfsprivate(mount_t mp, void *mntdata) } /* query whether the mount point supports native EAs */ -int -vfs_nativexattrs(mount_t mp) { - return (mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS); +int +vfs_nativexattrs(mount_t mp) +{ + return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS; } - + /* * return the block size of the underlying * device associated with mount_t */ int -vfs_devblocksize(mount_t mp) { - - return(mp->mnt_devblocksize); +vfs_devblocksize(mount_t mp) +{ + return mp->mnt_devblocksize; } -/* - * Returns vnode with an iocount that must be released with vnode_put() +/* + * Returns vnode with an iocount that must be released with vnode_put() */ vnode_t -vfs_vnodecovered(mount_t mp) +vfs_vnodecovered(mount_t mp) { vnode_t vp = mp->mnt_vnodecovered; if ((vp == NULL) || (vnode_getwithref(vp) != 0)) { @@ -763,7 +785,7 @@ vfs_vnodecovered(mount_t mp) * The iocount must be released with vnode_put(). Note that this KPI is subtle * with respect to the validity of using this device vnode for anything substantial * (which is discouraged). If commands are sent to the device driver without - * taking proper steps to ensure that the device is still open, chaos may ensue. + * taking proper steps to ensure that the device is still open, chaos may ensue. * Similarly, this routine should only be called if there is some guarantee that * the mount itself is still valid. */ @@ -814,12 +836,13 @@ vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp) /* * set the IO attributes associated with mount_t */ -void +void vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp) { - if (mp == NULL) - return; - mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt; + if (mp == NULL) { + return; + } + mp->mnt_maxreadcnt = ioattrp->io_maxreadcnt; mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt; mp->mnt_segreadcnt = ioattrp->io_segreadcnt; mp->mnt_segwritecnt = ioattrp->io_segwritecnt; @@ -829,10 +852,10 @@ vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp) mp->mnt_ioflags = ioattrp->io_flags; mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available; } - + /* * Add a new filesystem into the kernel specified in passed in - * vfstable structure. It fills in the vnode + * vfstable structure. It fills in the vnode * dispatch vector that is to be passed to when vnodes are created. * It returns a handle which is to be used to when the FS is to be removed */ @@ -841,11 +864,11 @@ extern int vfs_opv_numops; errno_t vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) { - struct vfstable *newvfstbl = NULL; - int i,j; - int (***opv_desc_vector_p)(void *); - int (**opv_desc_vector)(void *); - struct vnodeopv_entry_desc *opve_descp; + struct vfstable *newvfstbl = NULL; + int i, j; + int(***opv_desc_vector_p)(void *); + int(**opv_desc_vector)(void *); + struct vnodeopv_entry_desc *opve_descp; int desccount; int descsize; PFI *descptr; @@ -855,61 +878,76 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) * ordinarily be done as part of the system startup; */ - if (vfe == (struct vfs_fsentry *)0) - return(EINVAL); + if (vfe == (struct vfs_fsentry *)0) { + return EINVAL; + } desccount = vfe->vfe_vopcnt; - if ((desccount <=0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL) - || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) - return(EINVAL); + if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL) + || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) { + return EINVAL; + } /* Non-threadsafe filesystems are not supported */ if ((vfe->vfe_flags & (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) { - return (EINVAL); + return EINVAL; } MALLOC(newvfstbl, void *, sizeof(struct vfstable), M_TEMP, - M_WAITOK); + M_WAITOK); bzero(newvfstbl, sizeof(struct vfstable)); newvfstbl->vfc_vfsops = vfe->vfe_vfsops; strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN); - if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) + if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) { newvfstbl->vfc_typenum = maxvfstypenum++; - else + } else { newvfstbl->vfc_typenum = vfe->vfe_fstypenum; - + } + newvfstbl->vfc_refcount = 0; newvfstbl->vfc_flags = 0; newvfstbl->vfc_mountroot = NULL; newvfstbl->vfc_next = NULL; newvfstbl->vfc_vfsflags = 0; - if (vfe->vfe_flags & VFS_TBL64BITREADY) + if (vfe->vfe_flags & VFS_TBL64BITREADY) { newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY; - if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) + } + if (vfe->vfe_flags & VFS_TBLVNOP_PAGEINV2) { newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2; - if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) + } + if (vfe->vfe_flags & VFS_TBLVNOP_PAGEOUTV2) { newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2; - if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) + } + if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) { newvfstbl->vfc_flags |= MNT_LOCAL; - if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) + } + if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) { newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS; - else + } else { newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS; + } - if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) + if (vfe->vfe_flags & VFS_TBLNATIVEXATTR) { newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR; - if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) + } + if (vfe->vfe_flags & VFS_TBLUNMOUNT_PREFLIGHT) { newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT; - if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) + } + if (vfe->vfe_flags & VFS_TBLREADDIR_EXTENDED) { newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED; - if (vfe->vfe_flags & VFS_TBLNOMACLABEL) + } + if (vfe->vfe_flags & VFS_TBLNOMACLABEL) { newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL; - if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) + } + if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) { newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME; - if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) + } + if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) { newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME; - if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) + } + if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) { newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT; + } /* * Allocate and init the vectors. @@ -922,91 +960,94 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) descsize = desccount * vfs_opv_numops * sizeof(PFI); MALLOC(descptr, PFI *, descsize, - M_TEMP, M_WAITOK); + M_TEMP, M_WAITOK); bzero(descptr, descsize); newvfstbl->vfc_descptr = descptr; newvfstbl->vfc_descsize = descsize; - + newvfstbl->vfc_sysctl = NULL; - for (i= 0; i< desccount; i++ ) { - opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p; - /* - * Fill in the caller's pointer to the start of the i'th vector. - * They'll need to supply it when calling vnode_create. - */ - opv_desc_vector = descptr + i * vfs_opv_numops; - *opv_desc_vector_p = opv_desc_vector; + for (i = 0; i < desccount; i++) { + opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p; + /* + * Fill in the caller's pointer to the start of the i'th vector. + * They'll need to supply it when calling vnode_create. + */ + opv_desc_vector = descptr + i * vfs_opv_numops; + *opv_desc_vector_p = opv_desc_vector; - for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) { - opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]); + for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) { + opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]); - /* Silently skip known-disabled operations */ - if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) { - printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n", - vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name); - continue; + /* Silently skip known-disabled operations */ + if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) { + printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n", + vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name); + continue; + } + + /* + * Sanity check: is this operation listed + * in the list of operations? We check this + * by seeing if its offset is zero. Since + * the default routine should always be listed + * first, it should be the only one with a zero + * offset. Any other operation with a zero + * offset is probably not listed in + * vfs_op_descs, and so is probably an error. + * + * A panic here means the layer programmer + * has committed the all-too common bug + * of adding a new operation to the layer's + * list of vnode operations but + * not adding the operation to the system-wide + * list of supported operations. + */ + if (opve_descp->opve_op->vdesc_offset == 0 && + opve_descp->opve_op != VDESC(vnop_default)) { + printf("vfs_fsadd: operation %s not listed in %s.\n", + opve_descp->opve_op->vdesc_name, + "vfs_op_descs"); + panic("vfs_fsadd: bad operation"); + } + /* + * Fill in this entry. + */ + opv_desc_vector[opve_descp->opve_op->vdesc_offset] = + opve_descp->opve_impl; } + /* - * Sanity check: is this operation listed - * in the list of operations? We check this - * by seeing if its offset is zero. Since - * the default routine should always be listed - * first, it should be the only one with a zero - * offset. Any other operation with a zero - * offset is probably not listed in - * vfs_op_descs, and so is probably an error. - * - * A panic here means the layer programmer - * has committed the all-too common bug - * of adding a new operation to the layer's - * list of vnode operations but - * not adding the operation to the system-wide - * list of supported operations. + * Finally, go back and replace unfilled routines + * with their default. (Sigh, an O(n^3) algorithm. I + * could make it better, but that'd be work, and n is small.) */ - if (opve_descp->opve_op->vdesc_offset == 0 && - opve_descp->opve_op != VDESC(vnop_default)) { - printf("vfs_fsadd: operation %s not listed in %s.\n", - opve_descp->opve_op->vdesc_name, - "vfs_op_descs"); - panic("vfs_fsadd: bad operation"); - } + opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p; + /* - * Fill in this entry. + * Force every operations vector to have a default routine. */ - opv_desc_vector[opve_descp->opve_op->vdesc_offset] = - opve_descp->opve_impl; - } - - - /* - * Finally, go back and replace unfilled routines - * with their default. (Sigh, an O(n^3) algorithm. I - * could make it better, but that'd be work, and n is small.) - */ - opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p; - - /* - * Force every operations vector to have a default routine. - */ - opv_desc_vector = *opv_desc_vector_p; - if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) - panic("vfs_fsadd: operation vector without default routine."); - for (j = 0; j < vfs_opv_numops; j++) - if (opv_desc_vector[j] == NULL) - opv_desc_vector[j] = - opv_desc_vector[VOFFSET(vnop_default)]; - + opv_desc_vector = *opv_desc_vector_p; + if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) { + panic("vfs_fsadd: operation vector without default routine."); + } + for (j = 0; j < vfs_opv_numops; j++) { + if (opv_desc_vector[j] == NULL) { + opv_desc_vector[j] = + opv_desc_vector[VOFFSET(vnop_default)]; + } + } } /* end of each vnodeopv_desc parsing */ - + *handle = vfstable_add(newvfstbl); - if (newvfstbl->vfc_typenum <= maxvfstypenum ) - maxvfstypenum = newvfstbl->vfc_typenum + 1; + if (newvfstbl->vfc_typenum <= maxvfstypenum) { + maxvfstypenum = newvfstbl->vfc_typenum + 1; + } if (newvfstbl->vfc_vfsops->vfs_init) { struct vfsconf vfsc; @@ -1024,28 +1065,28 @@ vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle) FREE(newvfstbl, M_TEMP); - return(0); + return 0; } /* * Removes the filesystem from kernel. - * The argument passed in is the handle that was given when + * The argument passed in is the handle that was given when * file system was added */ -errno_t +errno_t vfs_fsremove(vfstable_t handle) { struct vfstable * vfstbl = (struct vfstable *)handle; void *old_desc = NULL; errno_t err; - + /* Preflight check for any mounts */ mount_list_lock(); - if ( vfstbl->vfc_refcount != 0 ) { + if (vfstbl->vfc_refcount != 0) { mount_list_unlock(); return EBUSY; } - + /* * save the old descriptor; the free cannot occur unconditionally, * since vfstable_del() may fail. @@ -1062,10 +1103,11 @@ vfs_fsremove(vfstable_t handle) FREE(old_desc, M_TEMP); } - return(err); + return err; } -void vfs_setowner(mount_t mp, uid_t uid, gid_t gid) +void +vfs_setowner(mount_t mp, uid_t uid, gid_t gid) { mp->mnt_fsowner = uid; mp->mnt_fsgroup = gid; @@ -1077,30 +1119,32 @@ void vfs_setowner(mount_t mp, uid_t uid, gid_t gid) * it isn't either. Point is: be prepared to deal with strange values * being returned. */ -uint64_t vfs_idle_time(mount_t mp) +uint64_t +vfs_idle_time(mount_t mp) { - if (mp->mnt_pending_write_size) + if (mp->mnt_pending_write_size) { return 0; + } struct timeval now; microuptime(&now); - return ((now.tv_sec - - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000 - + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec); + return (now.tv_sec + - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000 + + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec; } int vfs_context_pid(vfs_context_t ctx) { - return (proc_pid(vfs_context_proc(ctx))); + return proc_pid(vfs_context_proc(ctx)); } int vfs_context_suser(vfs_context_t ctx) { - return (suser(ctx->vc_ucred, NULL)); + return suser(ctx->vc_ucred, NULL); } /* @@ -1113,9 +1157,10 @@ int vfs_context_issignal(vfs_context_t ctx, sigset_t mask) { proc_t p = vfs_context_proc(ctx); - if (p) - return(proc_pendingsignals(p, mask)); - return(0); + if (p) { + return proc_pendingsignals(p, mask); + } + return 0; } int @@ -1123,9 +1168,10 @@ vfs_context_is64bit(vfs_context_t ctx) { proc_t proc = vfs_context_proc(ctx); - if (proc) - return(proc_is64bit(proc)); - return(0); + if (proc) { + return proc_is64bit(proc); + } + return 0; } @@ -1155,35 +1201,38 @@ vfs_context_is64bit(vfs_context_t ctx) proc_t vfs_context_proc(vfs_context_t ctx) { - proc_t proc = NULL; + proc_t proc = NULL; - if (ctx != NULL && ctx->vc_thread != NULL) + if (ctx != NULL && ctx->vc_thread != NULL) { proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread); - if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) + } + if (proc != NULL && (proc->p_fd == NULL || (proc->p_lflag & P_LVFORK))) { proc = NULL; + } - return(proc == NULL ? current_proc() : proc); + return proc == NULL ? current_proc() : proc; } /* * vfs_context_get_special_port * * Description: Return the requested special port from the task associated - * with the given context. + * with the given context. * * Parameters: vfs_context_t The context to use - * int Index of special port - * ipc_port_t * Pointer to returned port + * int Index of special port + * ipc_port_t * Pointer to returned port * * Returns: kern_return_t see task_get_special_port() */ kern_return_t vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp) { - task_t task = NULL; + task_t task = NULL; - if (ctx != NULL && ctx->vc_thread != NULL) + if (ctx != NULL && ctx->vc_thread != NULL) { task = get_threadtask(ctx->vc_thread); + } return task_get_special_port(task, which, portp); } @@ -1192,21 +1241,22 @@ vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp) * vfs_context_set_special_port * * Description: Set the requested special port in the task associated - * with the given context. + * with the given context. * * Parameters: vfs_context_t The context to use - * int Index of special port - * ipc_port_t New special port + * int Index of special port + * ipc_port_t New special port * * Returns: kern_return_t see task_set_special_port() */ kern_return_t vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port) { - task_t task = NULL; + task_t task = NULL; - if (ctx != NULL && ctx->vc_thread != NULL) + if (ctx != NULL && ctx->vc_thread != NULL) { task = get_threadtask(ctx->vc_thread); + } return task_set_special_port(task, which, port); } @@ -1231,7 +1281,7 @@ vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port) thread_t vfs_context_thread(vfs_context_t ctx) { - return(ctx->vc_thread); + return ctx->vc_thread; } @@ -1256,7 +1306,7 @@ vfs_context_cwd(vfs_context_t ctx) { vnode_t cwd = NULLVP; - if(ctx != NULL && ctx->vc_thread != NULL) { + if (ctx != NULL && ctx->vc_thread != NULL) { uthread_t uth = get_bsdthread_info(ctx->vc_thread); proc_t proc; @@ -1266,25 +1316,26 @@ vfs_context_cwd(vfs_context_t ctx) */ if ((cwd = uth->uu_cdir) == NULLVP && (proc = (proc_t)get_bsdthreadtask_info(ctx->vc_thread)) != NULL && - proc->p_fd != NULL) + proc->p_fd != NULL) { cwd = proc->p_fd->fd_cdir; + } } - return(cwd); + return cwd; } /* * vfs_context_create * - * Description: Allocate and initialize a new context. + * Description: Allocate and initialize a new context. * - * Parameters: vfs_context_t: Context to copy, or NULL for new + * Parameters: vfs_context_t: Context to copy, or NULL for new * * Returns: Pointer to new context * - * Notes: Copy cred and thread from argument, if available; else - * initialize with current thread and new cred. Returns - * with a reference held on the credential. + * Notes: Copy cred and thread from argument, if available; else + * initialize with current thread and new cred. Returns + * with a reference held on the credential. */ vfs_context_t vfs_context_create(vfs_context_t ctx) @@ -1302,12 +1353,13 @@ vfs_context_create(vfs_context_t ctx) newcontext->vc_thread = current_thread(); safecred = kauth_cred_get(); } - if (IS_VALID_CRED(safecred)) + if (IS_VALID_CRED(safecred)) { kauth_cred_ref(safecred); + } newcontext->vc_ucred = safecred; - return(newcontext); + return newcontext; } - return(NULL); + return NULL; } @@ -1317,13 +1369,13 @@ vfs_context_current(void) vfs_context_t ctx = NULL; volatile uthread_t ut = (uthread_t)get_bsdthread_info(current_thread()); - if (ut != NULL ) { + if (ut != NULL) { if (ut->uu_context.vc_ucred != NULL) { ctx = &ut->uu_context; } } - return(ctx == NULL ? vfs_context_kernel() : ctx); + return ctx == NULL ? vfs_context_kernel() : ctx; } @@ -1348,12 +1400,14 @@ static struct vfs_context kerncontext; vfs_context_t vfs_context_kernel(void) { - if (kerncontext.vc_ucred == NOCRED) + if (kerncontext.vc_ucred == NOCRED) { kerncontext.vc_ucred = kernproc->p_ucred; - if (kerncontext.vc_thread == NULL) + } + if (kerncontext.vc_thread == NULL) { kerncontext.vc_thread = proc_thread(kernproc); + } - return(&kerncontext); + return &kerncontext; } @@ -1361,18 +1415,19 @@ int vfs_context_rele(vfs_context_t ctx) { if (ctx) { - if (IS_VALID_CRED(ctx->vc_ucred)) + if (IS_VALID_CRED(ctx->vc_ucred)) { kauth_cred_unref(&ctx->vc_ucred); + } kfree(ctx, sizeof(struct vfs_context)); } - return(0); + return 0; } kauth_cred_t vfs_context_ucred(vfs_context_t ctx) { - return (ctx->vc_ucred); + return ctx->vc_ucred; } /* @@ -1381,10 +1436,11 @@ vfs_context_ucred(vfs_context_t ctx) int vfs_context_issuser(vfs_context_t ctx) { - return(kauth_cred_issuser(vfs_context_ucred(ctx))); + return kauth_cred_issuser(vfs_context_ucred(ctx)); } -int vfs_context_iskernel(vfs_context_t ctx) +int +vfs_context_iskernel(vfs_context_t ctx) { return ctx == &kerncontext; } @@ -1392,15 +1448,15 @@ int vfs_context_iskernel(vfs_context_t ctx) /* * Given a context, for all fields of vfs_context_t which * are not held with a reference, set those fields to the - * values for the current execution context. Currently, this + * values for the current execution context. Currently, this * just means the vc_thread. * * Returns: 0 for success, nonzero for failure * * The intended use is: * 1. vfs_context_create() gets the caller a context - * 2. vfs_context_bind() sets the unrefcounted data - * 3. vfs_context_rele() releases the context + * 2. vfs_context_bind() sets the unrefcounted data + * 3. vfs_context_rele() releases the context * */ int @@ -1410,31 +1466,32 @@ vfs_context_bind(vfs_context_t ctx) return 0; } -int vfs_isswapmount(mount_t mnt) +int +vfs_isswapmount(mount_t mnt) { return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0; } /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */ - + /* * Convert between vnode types and inode formats (since POSIX.1 * defines mode word of stat structure in terms of inode formats). */ -enum vtype +enum vtype vnode_iftovt(int mode) { - return(iftovt_tab[((mode) & S_IFMT) >> 12]); + return iftovt_tab[((mode) & S_IFMT) >> 12]; } -int +int vnode_vttoif(enum vtype indx) { - return(vttoif_tab[(int)(indx)]); + return vttoif_tab[(int)(indx)]; } -int +int vnode_makeimode(int indx, int mode) { return (int)(VTTOIF(indx) | (mode)); @@ -1446,164 +1503,168 @@ vnode_makeimode(int indx, int mode) */ /* returns system root vnode iocount; It should be released using vnode_put() */ -vnode_t +vnode_t vfs_rootvnode(void) { int error; error = vnode_get(rootvnode); - if (error) - return ((vnode_t)0); - else + if (error) { + return (vnode_t)0; + } else { return rootvnode; -} + } +} -uint32_t +uint32_t vnode_vid(vnode_t vp) { - return ((uint32_t)(vp->v_id)); -} + return (uint32_t)(vp->v_id); +} -mount_t +mount_t vnode_mount(vnode_t vp) { - return (vp->v_mount); + return vp->v_mount; } #if CONFIG_IOSCHED vnode_t vnode_mountdevvp(vnode_t vp) { - if (vp->v_mount) - return (vp->v_mount->mnt_devvp); - else - return ((vnode_t)0); + if (vp->v_mount) { + return vp->v_mount->mnt_devvp; + } else { + return (vnode_t)0; + } } #endif -mount_t +mount_t vnode_mountedhere(vnode_t vp) { mount_t mp; if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) && - (mp->mnt_vnodecovered == vp)) - return (mp); - else + (mp->mnt_vnodecovered == vp)) { + return mp; + } else { return (mount_t)NULL; + } } /* returns vnode type of vnode_t */ -enum vtype +enum vtype vnode_vtype(vnode_t vp) { - return (vp->v_type); + return vp->v_type; } /* returns FS specific node saved in vnode */ -void * +void * vnode_fsnode(vnode_t vp) { - return (vp->v_data); + return vp->v_data; } -void +void vnode_clearfsnode(vnode_t vp) { vp->v_data = NULL; } -dev_t +dev_t vnode_specrdev(vnode_t vp) { - return(vp->v_rdev); + return vp->v_rdev; } /* Accessor functions */ /* is vnode_t a root vnode */ -int +int vnode_isvroot(vnode_t vp) { - return ((vp->v_flag & VROOT)? 1 : 0); + return (vp->v_flag & VROOT)? 1 : 0; } /* is vnode_t a system vnode */ -int +int vnode_issystem(vnode_t vp) { - return ((vp->v_flag & VSYSTEM)? 1 : 0); + return (vp->v_flag & VSYSTEM)? 1 : 0; } /* is vnode_t a swap file vnode */ -int +int vnode_isswap(vnode_t vp) { - return ((vp->v_flag & VSWAP)? 1 : 0); + return (vp->v_flag & VSWAP)? 1 : 0; } /* is vnode_t a tty */ int vnode_istty(vnode_t vp) { - return ((vp->v_flag & VISTTY) ? 1 : 0); + return (vp->v_flag & VISTTY) ? 1 : 0; } /* if vnode_t mount operation in progress */ -int +int vnode_ismount(vnode_t vp) { - return ((vp->v_flag & VMOUNT)? 1 : 0); + return (vp->v_flag & VMOUNT)? 1 : 0; } /* is this vnode under recyle now */ -int +int vnode_isrecycled(vnode_t vp) { int ret; vnode_lock_spin(vp); - ret = (vp->v_lflag & (VL_TERMINATE|VL_DEAD))? 1 : 0; + ret = (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0; vnode_unlock(vp); - return(ret); + return ret; } /* vnode was created by background task requesting rapid aging - and has not since been referenced by a normal task */ + * and has not since been referenced by a normal task */ int vnode_israge(vnode_t vp) { - return ((vp->v_flag & VRAGE)? 1 : 0); + return (vp->v_flag & VRAGE)? 1 : 0; } int vnode_needssnapshots(vnode_t vp) { - return ((vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0); + return (vp->v_flag & VNEEDSSNAPSHOT)? 1 : 0; } /* Check the process/thread to see if we should skip atime updates */ int -vfs_ctx_skipatime (vfs_context_t ctx) { +vfs_ctx_skipatime(vfs_context_t ctx) +{ struct uthread *ut; proc_t proc; thread_t thr; proc = vfs_context_proc(ctx); - thr = vfs_context_thread (ctx); + thr = vfs_context_thread(ctx); /* Validate pointers in case we were invoked via a kernel context */ if (thr && proc) { - ut = get_bsdthread_info (thr); + ut = get_bsdthread_info(thr); if (proc->p_lflag & P_LRAGE_VNODES) { return 1; } if (ut) { - if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) { + if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) { return 1; } } @@ -1616,10 +1677,10 @@ vfs_ctx_skipatime (vfs_context_t ctx) { } /* is vnode_t marked to not keep data cached once it's been consumed */ -int +int vnode_isnocache(vnode_t vp) { - return ((vp->v_flag & VNOCACHE_DATA)? 1 : 0); + return (vp->v_flag & VNOCACHE_DATA)? 1 : 0; } /* @@ -1628,48 +1689,48 @@ vnode_isnocache(vnode_t vp) int vnode_isnoreadahead(vnode_t vp) { - return ((vp->v_flag & VRAOFF)? 1 : 0); + return (vp->v_flag & VRAOFF)? 1 : 0; } int vnode_is_openevt(vnode_t vp) { - return ((vp->v_flag & VOPENEVT)? 1 : 0); + return (vp->v_flag & VOPENEVT)? 1 : 0; } /* is vnode_t a standard one? */ -int +int vnode_isstandard(vnode_t vp) { - return ((vp->v_flag & VSTANDARD)? 1 : 0); + return (vp->v_flag & VSTANDARD)? 1 : 0; } /* don't vflush() if SKIPSYSTEM */ -int +int vnode_isnoflush(vnode_t vp) { - return ((vp->v_flag & VNOFLUSH)? 1 : 0); + return (vp->v_flag & VNOFLUSH)? 1 : 0; } /* is vnode_t a regular file */ -int +int vnode_isreg(vnode_t vp) { - return ((vp->v_type == VREG)? 1 : 0); + return (vp->v_type == VREG)? 1 : 0; } /* is vnode_t a directory? */ -int +int vnode_isdir(vnode_t vp) { - return ((vp->v_type == VDIR)? 1 : 0); + return (vp->v_type == VDIR)? 1 : 0; } /* is vnode_t a symbolic link ? */ -int +int vnode_islnk(vnode_t vp) { - return ((vp->v_type == VLNK)? 1 : 0); + return (vp->v_type == VLNK)? 1 : 0; } int @@ -1691,13 +1752,12 @@ vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp) goto yes; } #endif /* CONFIG_TRIGGERS */ - } if (vnode_islnk(vp)) { /* From lookup(): || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */ - if (cnp->cn_flags & FOLLOW) { + if (cnp->cn_flags & FOLLOW) { goto yes; } if (ndp->ni_flag & NAMEI_TRAILINGSLASH) { @@ -1713,102 +1773,102 @@ yes: } /* is vnode_t a fifo ? */ -int +int vnode_isfifo(vnode_t vp) { - return ((vp->v_type == VFIFO)? 1 : 0); + return (vp->v_type == VFIFO)? 1 : 0; } /* is vnode_t a block device? */ -int +int vnode_isblk(vnode_t vp) { - return ((vp->v_type == VBLK)? 1 : 0); + return (vp->v_type == VBLK)? 1 : 0; } int vnode_isspec(vnode_t vp) { - return (((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0); + return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0; } /* is vnode_t a char device? */ -int +int vnode_ischr(vnode_t vp) { - return ((vp->v_type == VCHR)? 1 : 0); + return (vp->v_type == VCHR)? 1 : 0; } /* is vnode_t a socket? */ -int +int vnode_issock(vnode_t vp) { - return ((vp->v_type == VSOCK)? 1 : 0); + return (vp->v_type == VSOCK)? 1 : 0; } /* is vnode_t a device with multiple active vnodes referring to it? */ int vnode_isaliased(vnode_t vp) -{ +{ enum vtype vt = vp->v_type; if (!((vt == VCHR) || (vt == VBLK))) { return 0; } else { - return (vp->v_specflags & SI_ALIASED); + return vp->v_specflags & SI_ALIASED; } } /* is vnode_t a named stream? */ -int +int vnode_isnamedstream( #if NAMEDSTREAMS - vnode_t vp + vnode_t vp #else - __unused vnode_t vp + __unused vnode_t vp #endif - ) + ) { #if NAMEDSTREAMS - return ((vp->v_flag & VISNAMEDSTREAM) ? 1 : 0); + return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0; #else - return (0); + return 0; #endif } -int +int vnode_isshadow( #if NAMEDSTREAMS - vnode_t vp + vnode_t vp #else - __unused vnode_t vp + __unused vnode_t vp #endif - ) + ) { #if NAMEDSTREAMS - return ((vp->v_flag & VISSHADOW) ? 1 : 0); + return (vp->v_flag & VISSHADOW) ? 1 : 0; #else - return (0); + return 0; #endif } /* does vnode have associated named stream vnodes ? */ -int +int vnode_hasnamedstreams( #if NAMEDSTREAMS - vnode_t vp + vnode_t vp #else - __unused vnode_t vp + __unused vnode_t vp #endif - ) + ) { #if NAMEDSTREAMS - return ((vp->v_lflag & VL_HASSTREAMS) ? 1 : 0); + return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0; #else - return (0); + return 0; #endif } /* TBD: set vnode_t to not cache data after it is consumed once; used for quota */ -void +void vnode_setnocache(vnode_t vp) { vnode_lock_spin(vp); @@ -1816,7 +1876,7 @@ vnode_setnocache(vnode_t vp) vnode_unlock(vp); } -void +void vnode_clearnocache(vnode_t vp) { vnode_lock_spin(vp); @@ -1841,7 +1901,7 @@ vnode_clear_openevt(vnode_t vp) } -void +void vnode_setnoreadahead(vnode_t vp) { vnode_lock_spin(vp); @@ -1849,7 +1909,7 @@ vnode_setnoreadahead(vnode_t vp) vnode_unlock(vp); } -void +void vnode_clearnoreadahead(vnode_t vp) { vnode_lock_spin(vp); @@ -1860,7 +1920,7 @@ vnode_clearnoreadahead(vnode_t vp) int vnode_isfastdevicecandidate(vnode_t vp) { - return ((vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0); + return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0; } void @@ -1882,7 +1942,7 @@ vnode_clearfastdevicecandidate(vnode_t vp) int vnode_isautocandidate(vnode_t vp) { - return ((vp->v_flag & VAUTOCANDIDATE)? 1 : 0); + return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0; } void @@ -1905,7 +1965,7 @@ vnode_clearautocandidate(vnode_t vp) /* mark vnode_t to skip vflush() is SKIPSYSTEM */ -void +void vnode_setnoflush(vnode_t vp) { vnode_lock_spin(vp); @@ -1913,7 +1973,7 @@ vnode_setnoflush(vnode_t vp) vnode_unlock(vp); } -void +void vnode_clearnoflush(vnode_t vp) { vnode_lock_spin(vp); @@ -1923,13 +1983,13 @@ vnode_clearnoflush(vnode_t vp) /* is vnode_t a blkdevice and has a FS mounted on it */ -int +int vnode_ismountedon(vnode_t vp) { - return ((vp->v_specflags & SI_MOUNTEDON)? 1 : 0); + return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0; } -void +void vnode_setmountedon(vnode_t vp) { vnode_lock_spin(vp); @@ -1937,7 +1997,7 @@ vnode_setmountedon(vnode_t vp) vnode_unlock(vp); } -void +void vnode_clearmountedon(vnode_t vp) { vnode_lock_spin(vp); @@ -1950,20 +2010,18 @@ void vnode_settag(vnode_t vp, int tag) { vp->v_tag = tag; - } int vnode_tag(vnode_t vp) { - return(vp->v_tag); + return vp->v_tag; } -vnode_t +vnode_t vnode_parent(vnode_t vp) { - - return(vp->v_parent); + return vp->v_parent; } void @@ -1979,106 +2037,106 @@ vnode_setname(vnode_t vp, char * name) } /* return the registered FS name when adding the FS to kernel */ -void +void vnode_vfsname(vnode_t vp, char * buf) { - strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN); + strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN); } /* return the FS type number */ -int +int vnode_vfstypenum(vnode_t vp) { - return(vp->v_mount->mnt_vtable->vfc_typenum); + return vp->v_mount->mnt_vtable->vfc_typenum; } int -vnode_vfs64bitready(vnode_t vp) +vnode_vfs64bitready(vnode_t vp) { - - /* + /* * Checking for dead_mountp is a bit of a hack for SnowLeopard: */ - if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) - return(1); - else - return(0); + if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) { + return 1; + } else { + return 0; + } } /* return the visible flags on associated mount point of vnode_t */ -uint32_t +uint32_t vnode_vfsvisflags(vnode_t vp) { - return(vp->v_mount->mnt_flag & MNT_VISFLAGMASK); + return vp->v_mount->mnt_flag & MNT_VISFLAGMASK; } /* return the command modifier flags on associated mount point of vnode_t */ -uint32_t +uint32_t vnode_vfscmdflags(vnode_t vp) { - return(vp->v_mount->mnt_flag & MNT_CMDFLAGS); + return vp->v_mount->mnt_flag & MNT_CMDFLAGS; } /* return the max symlink of short links of vnode_t */ -uint32_t +uint32_t vnode_vfsmaxsymlen(vnode_t vp) { - return(vp->v_mount->mnt_maxsymlinklen); + return vp->v_mount->mnt_maxsymlinklen; } /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */ struct vfsstatfs * vnode_vfsstatfs(vnode_t vp) { - return(&vp->v_mount->mnt_vfsstat); + return &vp->v_mount->mnt_vfsstat; } /* return a handle to the FSs specific private handle associated with vnode_t's mount point */ void * vnode_vfsfsprivate(vnode_t vp) { - return(vp->v_mount->mnt_data); + return vp->v_mount->mnt_data; } /* is vnode_t in a rdonly mounted FS */ -int +int vnode_vfsisrdonly(vnode_t vp) { - return ((vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0); + return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0; } int -vnode_compound_rename_available(vnode_t vp) +vnode_compound_rename_available(vnode_t vp) { return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME); } int -vnode_compound_rmdir_available(vnode_t vp) +vnode_compound_rmdir_available(vnode_t vp) { return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR); } int -vnode_compound_mkdir_available(vnode_t vp) +vnode_compound_mkdir_available(vnode_t vp) { return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR); } int -vnode_compound_remove_available(vnode_t vp) +vnode_compound_remove_available(vnode_t vp) { return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE); } int -vnode_compound_open_available(vnode_t vp) +vnode_compound_open_available(vnode_t vp) { return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN); } int -vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid) +vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid) { - return ((vp->v_mount->mnt_compound_ops & opid) != 0); + return (vp->v_mount->mnt_compound_ops & opid) != 0; } /* @@ -2087,22 +2145,23 @@ vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid) * * XXX Published, but not used. */ -vnode_t +vnode_t current_workingdir(void) { return vfs_context_cwd(vfs_context_current()); } /* returns vnode ref to current root(chroot) directory */ -vnode_t +vnode_t current_rootdir(void) { proc_t proc = current_proc(); - struct vnode * vp ; + struct vnode * vp; - if ( (vp = proc->p_fd->fd_rdir) ) { - if ( (vnode_getwithref(vp)) ) - return (NULL); + if ((vp = proc->p_fd->fd_rdir)) { + if ((vnode_getwithref(vp))) { + return NULL; + } } return vp; } @@ -2144,12 +2203,12 @@ static int vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) { kauth_filesec_t fsec; - uio_t fsec_uio; - size_t fsec_size; - size_t xsize, rsize; - int error; - uint32_t host_fsec_magic; - uint32_t host_acl_entrycount; + uio_t fsec_uio; + size_t fsec_size; + size_t xsize, rsize; + int error; + uint32_t host_fsec_magic; + uint32_t host_acl_entrycount; fsec = NULL; fsec_uio = NULL; @@ -2158,8 +2217,9 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx); if (error != 0) { /* no EA, no filesec */ - if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) + if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) { error = 0; + } /* either way, we are done */ goto out; } @@ -2171,11 +2231,11 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) * rather than partial entries. Otherwise, we ignore it. */ if (!KAUTH_FILESEC_VALID(xsize)) { - KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize); + KAUTH_DEBUG(" ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize); error = 0; goto out; } - + /* how many entries would fit? */ fsec_size = KAUTH_FILESEC_COUNT(xsize); if (fsec_size > KAUTH_ACL_MAX_ENTRIES) { @@ -2188,7 +2248,7 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) if (((fsec = kauth_filesec_alloc(fsec_size)) == NULL) || ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) || uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) { - KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL"); + KAUTH_DEBUG(" ERROR - could not allocate iov to read ACL"); error = ENOMEM; goto out; } @@ -2196,15 +2256,15 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) /* read security attribute */ rsize = xsize; if ((error = vn_getxattr(vp, - KAUTH_FILESEC_XATTR, - fsec_uio, - &rsize, - XATTR_NOSECURITY, - ctx)) != 0) { - + KAUTH_FILESEC_XATTR, + fsec_uio, + &rsize, + XATTR_NOSECURITY, + ctx)) != 0) { /* no attribute - no security data */ - if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) + if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) { error = 0; + } /* either way, we are done */ goto out; } @@ -2234,7 +2294,7 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount); goto out; } - if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) { + if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) { KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize); goto out; } @@ -2246,13 +2306,16 @@ vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx) fsec = NULL; error = 0; out: - if (fsec != NULL) + if (fsec != NULL) { kauth_filesec_free(fsec); - if (fsec_uio != NULL) + } + if (fsec_uio != NULL) { uio_free(fsec_uio); - if (error) + } + if (error) { *fsecp = NULL; - return(error); + } + return error; } /* @@ -2289,14 +2352,14 @@ out: static int vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx) { - uio_t fsec_uio; - int error; - uint32_t saved_acl_copysize; + uio_t fsec_uio; + int error; + uint32_t saved_acl_copysize; fsec_uio = NULL; - + if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) { - KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL"); + KAUTH_DEBUG(" ERROR - could not allocate iov to write ACL"); error = ENOMEM; goto out; } @@ -2313,16 +2376,17 @@ vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context error = vn_setxattr(vp, KAUTH_FILESEC_XATTR, fsec_uio, - XATTR_NOSECURITY, /* we have auth'ed already */ + XATTR_NOSECURITY, /* we have auth'ed already */ ctx); VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error); kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl); out: - if (fsec_uio != NULL) + if (fsec_uio != NULL) { uio_free(fsec_uio); - return(error); + } + return error; } @@ -2341,15 +2405,16 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) { kauth_filesec_t fsec; kauth_acl_t facl; - int error; - uid_t nuid; - gid_t ngid; + int error; + uid_t nuid; + gid_t ngid; /* * Reject attempts to fetch unknown attributes. */ - if (vap->va_active & ~VNODE_ATTR_ALL) - return (EINVAL); + if (vap->va_active & ~VNODE_ATTR_ALL) { + return EINVAL; + } /* don't ask for extended security data if the filesystem doesn't support it */ if (!vfs_extendedsecurity(vnode_mount(vp))) { @@ -2370,7 +2435,7 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) VATTR_SET_ACTIVE(vap, va_total_size); VATTR_SET_ACTIVE(vap, va_total_alloc); } - + error = VNOP_GETATTR(vp, vap, ctx); if (error) { KAUTH_DEBUG("ERROR - returning %d", error); @@ -2386,8 +2451,9 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) if (XATTR_VNODE_SUPPORTED(vp)) { /* try to get the filesec */ - if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) + if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) { goto out; + } } /* if no filesec, no attributes */ if (fsec == NULL) { @@ -2395,7 +2461,6 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) VATTR_RETURN(vap, va_uuuid, kauth_null_guid); VATTR_RETURN(vap, va_guuid, kauth_null_guid); } else { - /* looks good, try to return what we were asked for */ VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner); VATTR_RETURN(vap, va_guuid, fsec->fsec_group); @@ -2424,12 +2489,13 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) * only expect what they asked for. */ if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) { - if (vap->va_acl != NULL) + if (vap->va_acl != NULL) { kauth_acl_free(vap->va_acl); + } VATTR_CLEAR_SUPPORTED(vap, va_acl); } -#if 0 /* enable when we have a filesystem only supporting UUIDs */ +#if 0 /* enable when we have a filesystem only supporting UUIDs */ /* * Handle the case where we need a UID/GID, but only have extended * security information. @@ -2437,17 +2503,19 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) if (VATTR_NOT_RETURNED(vap, va_uid) && VATTR_IS_SUPPORTED(vap, va_uuuid) && !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) { - if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) + if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) { VATTR_RETURN(vap, va_uid, nuid); + } } if (VATTR_NOT_RETURNED(vap, va_gid) && VATTR_IS_SUPPORTED(vap, va_guuid) && !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) { - if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) + if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) { VATTR_RETURN(vap, va_gid, ngid); + } } #endif - + /* * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here. */ @@ -2456,16 +2524,18 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) nuid = vap->va_uid; } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { nuid = vp->v_mount->mnt_fsowner; - if (nuid == KAUTH_UID_NONE) + if (nuid == KAUTH_UID_NONE) { nuid = 99; + } } else if (VATTR_IS_SUPPORTED(vap, va_uid)) { nuid = vap->va_uid; } else { /* this will always be something sensible */ nuid = vp->v_mount->mnt_fsowner; } - if ((nuid == 99) && !vfs_context_issuser(ctx)) + if ((nuid == 99) && !vfs_context_issuser(ctx)) { nuid = kauth_cred_getuid(vfs_context_ucred(ctx)); + } VATTR_RETURN(vap, va_uid, nuid); } if (VATTR_IS_ACTIVE(vap, va_gid)) { @@ -2473,68 +2543,80 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) ngid = vap->va_gid; } else if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) { ngid = vp->v_mount->mnt_fsgroup; - if (ngid == KAUTH_GID_NONE) + if (ngid == KAUTH_GID_NONE) { ngid = 99; + } } else if (VATTR_IS_SUPPORTED(vap, va_gid)) { ngid = vap->va_gid; } else { /* this will always be something sensible */ ngid = vp->v_mount->mnt_fsgroup; } - if ((ngid == 99) && !vfs_context_issuser(ctx)) + if ((ngid == 99) && !vfs_context_issuser(ctx)) { ngid = kauth_cred_getgid(vfs_context_ucred(ctx)); + } VATTR_RETURN(vap, va_gid, ngid); } /* * Synthesise some values that can be reasonably guessed. */ - if (!VATTR_IS_SUPPORTED(vap, va_iosize)) + if (!VATTR_IS_SUPPORTED(vap, va_iosize)) { VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize); - - if (!VATTR_IS_SUPPORTED(vap, va_flags)) + } + + if (!VATTR_IS_SUPPORTED(vap, va_flags)) { VATTR_RETURN(vap, va_flags, 0); + } - if (!VATTR_IS_SUPPORTED(vap, va_filerev)) + if (!VATTR_IS_SUPPORTED(vap, va_filerev)) { VATTR_RETURN(vap, va_filerev, 0); + } - if (!VATTR_IS_SUPPORTED(vap, va_gen)) + if (!VATTR_IS_SUPPORTED(vap, va_gen)) { VATTR_RETURN(vap, va_gen, 0); + } /* * Default sizes. Ordering here is important, as later defaults build on earlier ones. */ - if (!VATTR_IS_SUPPORTED(vap, va_data_size)) + if (!VATTR_IS_SUPPORTED(vap, va_data_size)) { VATTR_RETURN(vap, va_data_size, 0); + } /* do we want any of the possibly-computed values? */ if (VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size) || VATTR_IS_ACTIVE(vap, va_total_alloc)) { - /* make sure f_bsize is valid */ - if (vp->v_mount->mnt_vfsstat.f_bsize == 0) { - if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) - goto out; - } + /* make sure f_bsize is valid */ + if (vp->v_mount->mnt_vfsstat.f_bsize == 0) { + if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) { + goto out; + } + } /* default va_data_alloc from va_data_size */ - if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) + if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) { VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize)); + } /* default va_total_size from va_data_size */ - if (!VATTR_IS_SUPPORTED(vap, va_total_size)) + if (!VATTR_IS_SUPPORTED(vap, va_total_size)) { VATTR_RETURN(vap, va_total_size, vap->va_data_size); + } /* default va_total_alloc from va_total_size which is guaranteed at this point */ - if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) + if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) { VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize)); + } } /* * If we don't have a change time, pull it from the modtime. */ - if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) + if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) { VATTR_RETURN(vap, va_change_time, vap->va_modify_time); + } /* * This is really only supported for the creation VNOPs, but since the field is there @@ -2549,7 +2631,19 @@ vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) out: - return(error); + return error; +} + +/* + * Choose 32 bit or 64 bit fsid + */ +uint64_t +vnode_get_va_fsid(struct vnode_attr *vap) +{ + if (VATTR_IS_SUPPORTED(vap, va_fsid64)) { + return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32); + } + return vap->va_fsid; } /* @@ -2577,18 +2671,19 @@ out: int vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) { - int error; + int error; #if CONFIG_FSE uint64_t active; - int is_perm_change = 0; - int is_stat_change = 0; + int is_perm_change = 0; + int is_stat_change = 0; #endif /* * Reject attempts to set unknown attributes. */ - if (vap->va_active & ~VNODE_ATTR_ALL) - return (EINVAL); + if (vap->va_active & ~VNODE_ATTR_ALL) { + return EINVAL; + } /* * Make sure the filesystem is mounted R/W. @@ -2620,8 +2715,8 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } #endif /* Check for truncation */ - if(VATTR_IS_ACTIVE(vap, va_data_size)) { - switch(vp->v_type) { + if (VATTR_IS_ACTIVE(vap, va_data_size)) { + switch (vp->v_type) { case VREG: /* For regular files it's ok */ break; @@ -2632,13 +2727,14 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) default: /* For everything else we will clear the bit and let underlying FS decide on the rest */ VATTR_CLEAR_ACTIVE(vap, va_data_size); - if (vap->va_active) + if (vap->va_active) { break; + } /* If it was the only bit set, return success, to handle cases like redirect to /dev/null */ - return (0); + return 0; } } - + /* * If ownership is being ignored on this volume, we silently discard * ownership changes. @@ -2661,7 +2757,7 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) /* Never allow the setting of any unsupported superuser flags. */ if (VATTR_IS_ACTIVE(vap, va_flags)) { - vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE); + vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE); } #if CONFIG_FSE @@ -2674,13 +2770,14 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) error = VNOP_SETATTR(vp, vap, ctx); - if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) + if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) { error = vnode_setattr_fallback(vp, vap, ctx); + } #if CONFIG_FSE -#define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \ - VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \ - VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl)) +#define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \ + VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \ + VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl)) /* * Now that we've changed them, decide whether to send an @@ -2695,28 +2792,29 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) * changes. */ active &= ~(PERMISSION_BITS | - VNODE_ATTR_BIT(va_access_time) | - VNODE_ATTR_BIT(va_backup_time)); + VNODE_ATTR_BIT(va_access_time) | + VNODE_ATTR_BIT(va_backup_time)); /* Anything left to notify about? */ - if (active & vap->va_supported) + if (active & vap->va_supported) { is_stat_change = 1; + } } if (error == 0) { - if (is_perm_change) { - if (need_fsevent(FSE_CHOWN, vp)) { - add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); + if (is_perm_change) { + if (need_fsevent(FSE_CHOWN, vp)) { + add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); + } + } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) { + add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); } - } else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) { - add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); - } } #undef PERMISSION_BITS #endif out: - return(error); + return error; } /* @@ -2749,7 +2847,7 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) kauth_filesec_t fsec; kauth_acl_t facl; struct kauth_filesec lfsec; - int error; + int error; error = 0; @@ -2797,7 +2895,7 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } /* find the ACL */ facl = &fsec->fsec_acl; - + /* if we're using the local filesec, we need to initialise it */ if (fsec == &lfsec) { fsec->fsec_magic = KAUTH_FILESEC_MAGIC; @@ -2830,7 +2928,7 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } VATTR_SET_SUPPORTED(vap, va_acl); } - + /* * If the filesec data is all invalid, we can just remove * the EA completely. @@ -2840,8 +2938,9 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) { error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx); /* no attribute is ok, nothing to delete */ - if (error == ENOATTR) + if (error == ENOATTR) { error = 0; + } VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error); } else { /* write the EA */ @@ -2850,12 +2949,13 @@ vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx) } /* if we fetched a filesec, dispose of the buffer */ - if (fsec != &lfsec) + if (fsec != &lfsec) { kauth_filesec_free(fsec); + } } out: - return(error); + return error; } /* @@ -2863,25 +2963,25 @@ out: * event on a vnode. */ int -vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap) +vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap) { /* These are the same as the corresponding knotes, at least for now. Cheating a little. */ - uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME - | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB); - uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED - | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED); + uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME + | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB); + uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED + | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED); uint32_t knote_events = (events & knote_mask); /* Permissions are not explicitly part of the kqueue model */ if (events & VNODE_EVENT_PERMS) { knote_events |= NOTE_ATTRIB; - } + } /* Directory contents information just becomes NOTE_WRITE */ if ((vnode_isdir(vp)) && (events & dir_contents_mask)) { knote_events |= NOTE_WRITE; } - + if (knote_events) { lock_vnode_and_post(vp, knote_events); #if CONFIG_FSE @@ -2891,7 +2991,7 @@ vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap) #else (void)vap; #endif - } + } return 0; } @@ -2901,7 +3001,7 @@ vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap) int vnode_isdyldsharedcache(vnode_t vp) { - return ((vp->v_flag & VSHARED_DYLD) ? 1 : 0); + return (vp->v_flag & VSHARED_DYLD) ? 1 : 0; } @@ -2910,8 +3010,9 @@ vnode_isdyldsharedcache(vnode_t vp) * check whether a vnode is being monitored. */ int -vnode_ismonitored(vnode_t vp) { - return (vp->v_knotes.slh_first != NULL); +vnode_ismonitored(vnode_t vp) +{ + return vp->v_knotes.slh_first != NULL; } int @@ -2933,15 +3034,15 @@ vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp) * by the vnode_notify() call. */ int -vfs_get_notify_attributes(struct vnode_attr *vap) +vfs_get_notify_attributes(struct vnode_attr *vap) { - VATTR_INIT(vap); + VATTR_INIT(vap); vap->va_active = VNODE_NOTIFY_ATTRS; return 0; } #if CONFIG_TRIGGERS -int +int vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx) { int error; @@ -2984,10 +3085,10 @@ out: #if 0 /* - *# - *#% lookup dvp L ? ? - *#% lookup vpp - L - - */ +*# +*#% lookup dvp L ? ? +*#% lookup vpp - L - +*/ struct vnop_lookup_args { struct vnodeop_desc *a_desc; vnode_t a_dvp; @@ -3019,7 +3120,7 @@ struct vnop_lookup_args { * be returned by HFS from hfs_lookup, not including additional * error code which may be propagated from underlying routines. */ -errno_t +errno_t VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx) { int _err; @@ -3036,7 +3137,7 @@ VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t DTRACE_FSINFO(lookup, vnode_t, *vpp); } - return (_err); + return _err; } #if 0 @@ -3090,7 +3191,7 @@ VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t fla _err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a); if (want_create) { - if (_err == 0 && *vpp) { + if (_err == 0 && *vpp) { DTRACE_FSINFO(compound_open, vnode_t, *vpp); } else { DTRACE_FSINFO(compound_open, vnode_t, dvp); @@ -3105,10 +3206,10 @@ VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t fla panic("Filesystem did a create, even though none was requested?"); } - if (did_create) { + if (did_create) { #if CONFIG_APPLEDOUBLE if (!NATIVE_XATTR(dvp)) { - /* + /* * Remove stale Apple Double file (if any). */ xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); @@ -3126,8 +3227,7 @@ VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t fla } #endif /* 0 */ - return (_err); - + return _err; } #if 0 @@ -3140,7 +3240,7 @@ struct vnop_create_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; @@ -3160,7 +3260,7 @@ VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode #if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { - /* + /* * Remove stale Apple Double file (if any). */ xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); @@ -3169,17 +3269,17 @@ VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode post_event_if_success(dvp, _err, NOTE_WRITE); - return (_err); + return _err; } #if 0 /* - *# - *#% whiteout dvp L L L - *#% whiteout cnp - - - - *#% whiteout flag - - - - *# - */ +*# +*#% whiteout dvp L L L +*#% whiteout cnp - - - +*#% whiteout flag - - - +*# +*/ struct vnop_whiteout_args { struct vnodeop_desc *a_desc; vnode_t a_dvp; @@ -3188,59 +3288,58 @@ struct vnop_whiteout_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - return (ENOTSUP); // XXX OBSOLETE + return ENOTSUP; // XXX OBSOLETE } #if 0 /* - *# - *#% mknod dvp L U U - *#% mknod vpp - X - - *# - */ +*# +*#% mknod dvp L U U +*#% mknod vpp - X - +*# +*/ struct vnop_mknod_args { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; + struct vnodeop_desc *a_desc; + vnode_t a_dvp; + vnode_t *a_vpp; + struct componentname *a_cnp; + struct vnode_attr *a_vap; + vfs_context_t a_context; }; #endif /* 0*/ errno_t VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx) { + int _err; + struct vnop_mknod_args a; - int _err; - struct vnop_mknod_args a; - - a.a_desc = &vnop_mknod_desc; - a.a_dvp = dvp; - a.a_vpp = vpp; - a.a_cnp = cnp; - a.a_vap = vap; - a.a_context = ctx; + a.a_desc = &vnop_mknod_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + a.a_context = ctx; - _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a); - if (_err == 0 && *vpp) { + _err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a); + if (_err == 0 && *vpp) { DTRACE_FSINFO(mknod, vnode_t, *vpp); - } + } - post_event_if_success(dvp, _err, NOTE_WRITE); + post_event_if_success(dvp, _err, NOTE_WRITE); - return (_err); + return _err; } #if 0 /* - *# - *#% open vp L L L - *# - */ +*# +*#% open vp L L L +*# +*/ struct vnop_open_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3248,32 +3347,32 @@ struct vnop_open_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t -VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx) +errno_t +VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx) { int _err; struct vnop_open_args a; if (ctx == NULL) { ctx = vfs_context_current(); - } + } a.a_desc = &vnop_open_desc; a.a_vp = vp; a.a_mode = mode; - a.a_context = ctx; + a.a_context = ctx; _err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a); DTRACE_FSINFO(open, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% close vp U U U - *# - */ +*# +*#% close vp U U U +*# +*/ struct vnop_close_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3281,7 +3380,7 @@ struct vnop_close_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx) { int _err; @@ -3298,15 +3397,15 @@ VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx) _err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a); DTRACE_FSINFO(close, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% access vp L L L - *# - */ +*# +*#% access vp L L L +*# +*/ struct vnop_access_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3314,7 +3413,7 @@ struct vnop_access_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx) { int _err; @@ -3331,15 +3430,15 @@ VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx) _err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a); DTRACE_FSINFO(access, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% getattr vp = = = - *# - */ +*# +*#% getattr vp = = = +*# +*/ struct vnop_getattr_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3347,7 +3446,7 @@ struct vnop_getattr_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; @@ -3361,15 +3460,15 @@ VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) _err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a); DTRACE_FSINFO(getattr, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% setattr vp L L L - *# - */ +*# +*#% setattr vp L L L +*# +*/ struct vnop_setattr_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3377,7 +3476,7 @@ struct vnop_setattr_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) { int _err; @@ -3392,7 +3491,7 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) DTRACE_FSINFO(setattr, vnode_t, vp); #if CONFIG_APPLEDOUBLE - /* + /* * Shadow uid/gid/mod change to extended attribute file. */ if (_err == 0 && !NATIVE_XATTR(vp)) { @@ -3413,17 +3512,19 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) change = 1; } if (change) { - vnode_t dvp; + vnode_t dvp; const char *vname; dvp = vnode_getparent(vp); vname = vnode_getname(vp); xattrfile_setattr(dvp, vname, &va, ctx); - if (dvp != NULLVP) - vnode_put(dvp); - if (vname != NULL) - vnode_putname(vname); + if (dvp != NULLVP) { + vnode_put(dvp); + } + if (vname != NULL) { + vnode_putname(vname); + } } } #endif /* CONFIG_APPLEDOUBLE */ @@ -3434,14 +3535,14 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) * cache */ if (_err == 0 && ( - VATTR_IS_SUPPORTED(vap, va_mode) || - VATTR_IS_SUPPORTED(vap, va_uid) || - VATTR_IS_SUPPORTED(vap, va_gid) || - VATTR_IS_SUPPORTED(vap, va_flags) || - VATTR_IS_SUPPORTED(vap, va_acl) || - VATTR_IS_SUPPORTED(vap, va_uuuid) || - VATTR_IS_SUPPORTED(vap, va_guuid))) { - vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); + VATTR_IS_SUPPORTED(vap, va_mode) || + VATTR_IS_SUPPORTED(vap, va_uid) || + VATTR_IS_SUPPORTED(vap, va_gid) || + VATTR_IS_SUPPORTED(vap, va_flags) || + VATTR_IS_SUPPORTED(vap, va_acl) || + VATTR_IS_SUPPORTED(vap, va_uuuid) || + VATTR_IS_SUPPORTED(vap, va_guuid))) { + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); #if NAMEDSTREAMS if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) { @@ -3449,24 +3550,24 @@ VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx) if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) { vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS); vnode_put(svp); - } - } + } + } #endif /* NAMEDSTREAMS */ } post_event_if_success(vp, _err, NOTE_ATTRIB); - return (_err); + return _err; } #if 0 /* - *# - *#% read vp L L L - *# - */ +*# +*#% read vp L L L +*# +*/ struct vnop_read_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3475,7 +3576,7 @@ struct vnop_read_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) { int _err; @@ -3498,16 +3599,16 @@ VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) DTRACE_FSINFO_IO(read, vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); - return (_err); + return _err; } #if 0 /* - *# - *#% write vp L L L - *# - */ +*# +*#% write vp L L L +*# +*/ struct vnop_write_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3516,7 +3617,7 @@ struct vnop_write_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) { struct vnop_write_args a; @@ -3541,16 +3642,16 @@ VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx) post_event_if_success(vp, _err, NOTE_WRITE); - return (_err); + return _err; } #if 0 /* - *# - *#% ioctl vp U U U - *# - */ +*# +*#% ioctl vp U U U +*# +*/ struct vnop_ioctl_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3560,7 +3661,7 @@ struct vnop_ioctl_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx) { int _err; @@ -3586,7 +3687,7 @@ VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ct */ if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) { if (data != NULL && !vnode_vfs64bitready(vp)) { - return(ENOTTY); + return ENOTTY; } } @@ -3595,21 +3696,21 @@ VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ct a.a_command = command; a.a_data = data; a.a_fflag = fflag; - a.a_context= ctx; + a.a_context = ctx; _err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a); DTRACE_FSINFO(ioctl, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% select vp U U U - *# - */ +*# +*#% select vp U U U +*# +*/ struct vnop_select_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3619,8 +3720,8 @@ struct vnop_select_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t -VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx) +errno_t +VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx) { int _err; struct vnop_select_args a; @@ -3638,26 +3739,26 @@ VNOP_SELECT(vnode_t vp, int which , int fflags, void * wql, vfs_context_t ctx) _err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a); DTRACE_FSINFO(select, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% exchange fvp L L L - *#% exchange tvp L L L - *# - */ +*# +*#% exchange fvp L L L +*#% exchange tvp L L L +*# +*/ struct vnop_exchange_args { struct vnodeop_desc *a_desc; vnode_t a_fvp; - vnode_t a_tvp; - int a_options; + vnode_t a_tvp; + int a_options; vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx) { int _err; @@ -3676,16 +3777,16 @@ VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx) post_event_if_success(fvp, _err, NOTE_ATTRIB); post_event_if_success(tvp, _err, NOTE_ATTRIB); - return (_err); + return _err; } #if 0 /* - *# - *#% revoke vp U U U - *# - */ +*# +*#% revoke vp U U U +*# +*/ struct vnop_revoke_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3693,7 +3794,7 @@ struct vnop_revoke_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx) { struct vnop_revoke_args a; @@ -3707,16 +3808,16 @@ VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx) _err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a); DTRACE_FSINFO(revoke, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *# mmap - vp U U U - *# - */ +*# +*# mmap - vp U U U +*# +*/ struct vnop_mmap_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3724,7 +3825,7 @@ struct vnop_mmap_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx) { int _err; @@ -3738,23 +3839,23 @@ VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx) _err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a); DTRACE_FSINFO(mmap, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *# mnomap - vp U U U - *# - */ +*# +*# mnomap - vp U U U +*# +*/ struct vnop_mnomap_args { struct vnodeop_desc *a_desc; vnode_t a_vp; vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx) { int _err; @@ -3767,16 +3868,16 @@ VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx) _err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a); DTRACE_FSINFO(mnomap, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% fsync vp L L L - *# - */ +*# +*#% fsync vp L L L +*# +*/ struct vnop_fsync_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3784,7 +3885,7 @@ struct vnop_fsync_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx) { struct vnop_fsync_args a; @@ -3798,17 +3899,17 @@ VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx) _err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a); DTRACE_FSINFO(fsync, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% remove dvp L U U - *#% remove vp L U U - *# - */ +*# +*#% remove dvp L U U +*#% remove vp L U U +*# +*/ struct vnop_remove_args { struct vnodeop_desc *a_desc; vnode_t a_dvp; @@ -3818,7 +3919,7 @@ struct vnop_remove_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx) { int _err; @@ -3835,21 +3936,21 @@ VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_ DTRACE_FSINFO(remove, vnode_t, vp); if (_err == 0) { - vnode_setneedinactive(vp); + vnode_setneedinactive(vp); #if CONFIG_APPLEDOUBLE - if ( !(NATIVE_XATTR(dvp)) ) { - /* + if (!(NATIVE_XATTR(dvp))) { + /* * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1); } #endif /* CONFIG_APPLEDOUBLE */ } post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK); post_event_if_success(dvp, _err, NOTE_WRITE); - - return (_err); + + return _err; } int @@ -3875,13 +3976,13 @@ VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t f DTRACE_FSINFO(compound_remove, vnode_t, dvp); } if (_err == 0) { - vnode_setneedinactive(*vpp); + vnode_setneedinactive(*vpp); #if CONFIG_APPLEDOUBLE - if ( !(NATIVE_XATTR(dvp)) ) { - /* + if (!(NATIVE_XATTR(dvp))) { + /* * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1); + xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1); } #endif /* CONFIG_APPLEDOUBLE */ } @@ -3899,16 +4000,16 @@ VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t f //printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err); - return (_err); + return _err; } #if 0 /* - *# - *#% link vp U U U - *#% link tdvp L U U - *# - */ +*# +*#% link vp U U U +*#% link tdvp L U U +*# +*/ struct vnop_link_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -3917,7 +4018,7 @@ struct vnop_link_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx) { int _err; @@ -3928,7 +4029,7 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ct * For file systems with non-native extended attributes, * disallow linking to an existing "._" Apple Double file. */ - if ( !NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) { + if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) { const char *vname; vname = vnode_getname(vp); @@ -3938,8 +4039,9 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ct _err = EPERM; } vnode_putname(vname); - if (_err) - return (_err); + if (_err) { + return _err; + } } } #endif /* CONFIG_APPLEDOUBLE */ @@ -3956,13 +4058,13 @@ VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ct post_event_if_success(vp, _err, NOTE_LINK); post_event_if_success(tdvp, _err, NOTE_WRITE); - return (_err); + return _err; } errno_t vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, - struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, - vfs_rename_flags_t flags, vfs_context_t ctx) + struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, + vfs_rename_flags_t flags, vfs_context_t ctx) { int _err; struct nameidata *fromnd = NULL; @@ -3976,24 +4078,25 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s char *xtoname = NULL; #endif /* CONFIG_APPLEDOUBLE */ int batched; - uint32_t tdfflags; // Target directory file flags + uint32_t tdfflags; // Target directory file flags batched = vnode_compound_rename_available(fdvp); if (!batched) { - if (*fvpp == NULLVP) + if (*fvpp == NULLVP) { panic("Not batched, and no fvp?"); + } } #if CONFIG_APPLEDOUBLE - /* + /* * We need to preflight any potential AppleDouble file for the source file * before doing the rename operation, since we could potentially be doing * this operation on a network filesystem, and would end up duplicating * the work. Also, save the source and destination names. Skip it if the * source has a "._" prefix. */ - + if (!NATIVE_XATTR(fdvp) && !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) { size_t len; @@ -4008,7 +4111,7 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s } strlcpy(xfromname, "._", min(sizeof smallname1, len)); strncat(xfromname, fcnp->cn_nameptr, fcnp->cn_namelen); - xfromname[len-1] = '\0'; + xfromname[len - 1] = '\0'; /* Get destination attribute file name. */ len = tcnp->cn_namelen + 3; @@ -4019,17 +4122,17 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s } strlcpy(xtoname, "._", min(sizeof smallname2, len)); strncat(xtoname, tcnp->cn_nameptr, tcnp->cn_namelen); - xtoname[len-1] = '\0'; - - /* + xtoname[len - 1] = '\0'; + + /* * Look up source attribute file, keep reference on it if exists. * Note that we do the namei with the nameiop of RENAME, which is different than * in the rename syscall. It's OK if the source file does not exist, since this * is only for AppleDouble files. */ - MALLOC(fromnd, struct nameidata *, sizeof (struct nameidata), M_TEMP, M_WAITOK); + MALLOC(fromnd, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK); NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, - UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx); + UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx); fromnd->ni_dvp = fdvp; error = namei(fromnd); @@ -4075,8 +4178,9 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx); } } - } else + } else { _err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx); + } } /* @@ -4107,12 +4211,12 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s #endif #if CONFIG_APPLEDOUBLE - /* + /* * Rename any associated extended attribute file (._ AppleDouble file). */ if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) { int error = 0; - + /* * Get destination attribute file vnode. * Note that tdvp already has an iocount reference. Make sure to check that we @@ -4120,57 +4224,58 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s */ MALLOC(tond, struct nameidata *, sizeof(struct nameidata), M_TEMP, M_WAITOK); NDINIT(tond, RENAME, OP_RENAME, - NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, - CAST_USER_ADDR_T(xtoname), ctx); + NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE, + CAST_USER_ADDR_T(xtoname), ctx); tond->ni_dvp = tdvp; error = namei(tond); - if (error) + if (error) { goto ad_error; - + } + if (tond->ni_vp) { dst_attr_vp = tond->ni_vp; } - + if (src_attr_vp) { const char *old_name = src_attr_vp->v_name; vnode_t old_parent = src_attr_vp->v_parent; - + if (batched) { error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL, - tdvp, &dst_attr_vp, &tond->ni_cnd, NULL, - 0, ctx); + tdvp, &dst_attr_vp, &tond->ni_cnd, NULL, + 0, ctx); } else { - error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd, - tdvp, dst_attr_vp, &tond->ni_cnd, ctx); + error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd, + tdvp, dst_attr_vp, &tond->ni_cnd, ctx); } if (error == 0 && old_name == src_attr_vp->v_name && - old_parent == src_attr_vp->v_parent) { + old_parent == src_attr_vp->v_parent) { int update_flags = VNODE_UPDATE_NAME; - - if (fdvp != tdvp) + + if (fdvp != tdvp) { update_flags |= VNODE_UPDATE_PARENT; - + } + if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) { vnode_update_identity(src_attr_vp, tdvp, - tond->ni_cnd.cn_nameptr, - tond->ni_cnd.cn_namelen, - tond->ni_cnd.cn_hash, - update_flags); + tond->ni_cnd.cn_nameptr, + tond->ni_cnd.cn_namelen, + tond->ni_cnd.cn_hash, + update_flags); } } - - /* kevent notifications for moving resource files + + /* kevent notifications for moving resource files * _err is zero if we're here, so no need to notify directories, code * below will do that. only need to post the rename on the source and * possibly a delete on the dest */ post_event_if_success(src_attr_vp, error, NOTE_RENAME); if (dst_attr_vp) { - post_event_if_success(dst_attr_vp, error, NOTE_DELETE); + post_event_if_success(dst_attr_vp, error, NOTE_DELETE); } - } else if (dst_attr_vp) { /* * Just delete destination attribute file vnode if it exists, since @@ -4179,7 +4284,7 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s */ struct vnop_remove_args args; - + args.a_desc = &vnop_remove_desc; args.a_dvp = tdvp; args.a_vp = dst_attr_vp; @@ -4189,15 +4294,16 @@ vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, s if (error == 0) { error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args); - if (error == 0) + if (error == 0) { vnode_setneedinactive(dst_attr_vp); + } } - + /* kevent notification for deleting the destination's attribute file * if it existed. Only need to post the delete on the destination, since - * the code below will handle the directories. + * the code below will handle the directories. */ - post_event_if_success(dst_attr_vp, error, NOTE_DELETE); + post_event_if_success(dst_attr_vp, error, NOTE_DELETE); } } ad_error: @@ -4228,13 +4334,13 @@ ad_error: #if 0 /* - *# - *#% rename fdvp U U U - *#% rename fvp U U U - *#% rename tdvp L U U - *#% rename tvp X U U - *# - */ +*# +*#% rename fdvp U U U +*#% rename fvp U U U +*#% rename tdvp L U U +*#% rename tvp X U U +*# +*/ struct vnop_rename_args { struct vnodeop_desc *a_desc; vnode_t a_fdvp; @@ -4248,8 +4354,8 @@ struct vnop_rename_args { #endif /* 0*/ errno_t VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx) + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx) { int _err = 0; struct vnop_rename_args a; @@ -4267,8 +4373,9 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, _err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a); DTRACE_FSINFO(rename, vnode_t, fdvp); - if (_err) + if (_err) { return _err; + } return post_rename(fdvp, fvp, tdvp, tvp); } @@ -4276,15 +4383,16 @@ VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp) { - if (tvp && tvp != fvp) + if (tvp && tvp != fvp) { vnode_setneedinactive(tvp); + } /* Wrote at least one directory. If transplanted a dir, also changed link counts */ int events = NOTE_WRITE; if (vnode_isdir(fvp)) { /* Link count on dir changed only if we are moving a dir and... - * --Moved to new dir, not overwriting there - * --Kept in same dir and DID overwrite + * --Moved to new dir, not overwriting there + * --Kept in same dir and DID overwrite */ if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) { events |= NOTE_LINK; @@ -4293,12 +4401,11 @@ post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp) lock_vnode_and_post(fdvp, events); if (fdvp != tdvp) { - lock_vnode_and_post(tdvp, events); + lock_vnode_and_post(tdvp, events); } /* If you're replacing the target, post a deletion for it */ - if (tvp) - { + if (tvp) { lock_vnode_and_post(tvp, NOTE_DELETE); } @@ -4309,13 +4416,13 @@ post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp) #if 0 /* - *# - *#% renamex fdvp U U U - *#% renamex fvp U U U - *#% renamex tdvp L U U - *#% renamex tvp X U U - *# - */ +*# +*#% renamex fdvp U U U +*#% renamex fvp U U U +*#% renamex tdvp L U U +*#% renamex tvp X U U +*# +*/ struct vnop_renamex_args { struct vnodeop_desc *a_desc; vnode_t a_fdvp; @@ -4330,8 +4437,8 @@ struct vnop_renamex_args { #endif /* 0*/ errno_t VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_rename_flags_t flags, vfs_context_t ctx) + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_rename_flags_t flags, vfs_context_t ctx) { int _err = 0; struct vnop_renamex_args a; @@ -4350,18 +4457,19 @@ VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, _err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a); DTRACE_FSINFO(renamex, vnode_t, fdvp); - if (_err) + if (_err) { return _err; + } return post_rename(fdvp, fvp, tdvp, tvp); } int -VNOP_COMPOUND_RENAME( - struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, - struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, - uint32_t flags, vfs_context_t ctx) +VNOP_COMPOUND_RENAME( + struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap, + struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap, + uint32_t flags, vfs_context_t ctx) { int _err = 0; int events; @@ -4372,7 +4480,7 @@ VNOP_COMPOUND_RENAME( no_tvp = (*tvpp) == NULLVP; a.a_desc = &vnop_compound_rename_desc; - + a.a_fdvp = fdvp; a.a_fvpp = fvpp; a.a_fcnp = fcnp; @@ -4382,7 +4490,7 @@ VNOP_COMPOUND_RENAME( a.a_tvpp = tvpp; a.a_tcnp = tcnp; a.a_tvap = tvap; - + a.a_flags = flags; a.a_context = ctx; a.a_rename_authorizer = vn_authorize_rename; @@ -4393,8 +4501,9 @@ VNOP_COMPOUND_RENAME( DTRACE_FSINFO(compound_rename, vnode_t, fdvp); if (_err == 0) { - if (*tvpp && *tvpp != *fvpp) - vnode_setneedinactive(*tvpp); + if (*tvpp && *tvpp != *fvpp) { + vnode_setneedinactive(*tvpp); + } } /* Wrote at least one directory. If transplanted a dir, also changed link counts */ @@ -4406,8 +4515,8 @@ VNOP_COMPOUND_RENAME( events = NOTE_WRITE; if (vnode_isdir(*fvpp)) { /* Link count on dir changed only if we are moving a dir and... - * --Moved to new dir, not overwriting there - * --Kept in same dir and DID overwrite + * --Moved to new dir, not overwriting there + * --Kept in same dir and DID overwrite */ if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) { events |= NOTE_LINK; @@ -4416,12 +4525,11 @@ VNOP_COMPOUND_RENAME( lock_vnode_and_post(fdvp, events); if (fdvp != tdvp) { - lock_vnode_and_post(tdvp, events); + lock_vnode_and_post(tdvp, events); } /* If you're replacing the target, post a deletion for it */ - if (*tvpp) - { + if (*tvpp) { lock_vnode_and_post(*tvpp, NOTE_DELETE); } @@ -4429,7 +4537,7 @@ VNOP_COMPOUND_RENAME( } if (no_fvp) { - lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0); + lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0); } if (no_tvp && *tvpp != NULLVP) { lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0); @@ -4446,12 +4554,12 @@ VNOP_COMPOUND_RENAME( } } - return (_err); + return _err; } int vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, - struct vnode_attr *vap, vfs_context_t ctx) + struct vnode_attr *vap, vfs_context_t ctx) { if (ndp->ni_cnd.cn_nameiop != CREATE) { panic("Non-CREATE nameiop in vn_mkdir()?"); @@ -4466,78 +4574,78 @@ vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, #if 0 /* - *# - *#% mkdir dvp L U U - *#% mkdir vpp - L - - *# - */ +*# +*#% mkdir dvp L U U +*#% mkdir vpp - L - +*# +*/ struct vnop_mkdir_args { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - vfs_context_t a_context; + struct vnodeop_desc *a_desc; + vnode_t a_dvp; + vnode_t *a_vpp; + struct componentname *a_cnp; + struct vnode_attr *a_vap; + vfs_context_t a_context; }; #endif /* 0*/ errno_t VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, - struct vnode_attr *vap, vfs_context_t ctx) + struct vnode_attr *vap, vfs_context_t ctx) { - int _err; - struct vnop_mkdir_args a; + int _err; + struct vnop_mkdir_args a; - a.a_desc = &vnop_mkdir_desc; - a.a_dvp = dvp; - a.a_vpp = vpp; - a.a_cnp = cnp; - a.a_vap = vap; - a.a_context = ctx; + a.a_desc = &vnop_mkdir_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + a.a_context = ctx; - _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a); + _err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a); if (_err == 0 && *vpp) { DTRACE_FSINFO(mkdir, vnode_t, *vpp); } #if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { - /* + /* * Remove stale Apple Double file (if any). */ xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); } #endif /* CONFIG_APPLEDOUBLE */ - post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); + post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); - return (_err); + return _err; } int VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, - struct vnode_attr *vap, vfs_context_t ctx) -{ - int _err; - struct vnop_compound_mkdir_args a; - - a.a_desc = &vnop_compound_mkdir_desc; - a.a_dvp = dvp; - a.a_vpp = vpp; - a.a_cnp = &ndp->ni_cnd; - a.a_vap = vap; - a.a_flags = 0; - a.a_context = ctx; + struct vnode_attr *vap, vfs_context_t ctx) +{ + int _err; + struct vnop_compound_mkdir_args a; + + a.a_desc = &vnop_compound_mkdir_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = &ndp->ni_cnd; + a.a_vap = vap; + a.a_flags = 0; + a.a_context = ctx; #if 0 - a.a_mkdir_authorizer = vn_authorize_mkdir; + a.a_mkdir_authorizer = vn_authorize_mkdir; #endif /* 0 */ - a.a_reserved = NULL; + a.a_reserved = NULL; - _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a); + _err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a); if (_err == 0 && *vpp) { DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp); } #if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { - /* + /* * Remove stale Apple Double file (if any). */ xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0); @@ -4552,7 +4660,7 @@ VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp *vpp = NULLVP; } - return (_err); + return _err; } int @@ -4573,11 +4681,11 @@ vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *va #if 0 /* - *# - *#% rmdir dvp L U U - *#% rmdir vp L U U - *# - */ +*# +*#% rmdir dvp L U U +*#% rmdir vp L U U +*# +*/ struct vnop_rmdir_args { struct vnodeop_desc *a_desc; vnode_t a_dvp; @@ -4603,13 +4711,13 @@ VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_c DTRACE_FSINFO(rmdir, vnode_t, vp); if (_err == 0) { - vnode_setneedinactive(vp); + vnode_setneedinactive(vp); #if CONFIG_APPLEDOUBLE - if ( !(NATIVE_XATTR(dvp)) ) { - /* + if (!(NATIVE_XATTR(dvp))) { + /* * Remove any associated extended attribute file (._ AppleDouble file). */ - xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1); + xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1); } #endif } @@ -4618,36 +4726,36 @@ VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_c post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK); post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); - return (_err); + return _err; } int VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp, - struct vnode_attr *vap, vfs_context_t ctx) + struct vnode_attr *vap, vfs_context_t ctx) { - int _err; - struct vnop_compound_rmdir_args a; - int no_vp; + int _err; + struct vnop_compound_rmdir_args a; + int no_vp; - a.a_desc = &vnop_mkdir_desc; - a.a_dvp = dvp; - a.a_vpp = vpp; - a.a_cnp = &ndp->ni_cnd; - a.a_vap = vap; - a.a_flags = 0; - a.a_context = ctx; - a.a_rmdir_authorizer = vn_authorize_rmdir; - a.a_reserved = NULL; + a.a_desc = &vnop_mkdir_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = &ndp->ni_cnd; + a.a_vap = vap; + a.a_flags = 0; + a.a_context = ctx; + a.a_rmdir_authorizer = vn_authorize_rmdir; + a.a_reserved = NULL; - no_vp = (*vpp == NULLVP); + no_vp = (*vpp == NULLVP); - _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a); + _err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a); if (_err == 0 && *vpp) { DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp); } #if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { - /* + /* * Remove stale Apple Double file (if any). */ xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0); @@ -4659,18 +4767,18 @@ VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp } post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE); - if (no_vp) { - lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0); + if (no_vp) { + lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0); #if 0 /* Removing orphaned ._ files requires a vp.... */ - if (*vpp && _err && _err != EKEEPLOOKING) { - vnode_put(*vpp); - *vpp = NULLVP; - } + if (*vpp && _err && _err != EKEEPLOOKING) { + vnode_put(*vpp); + *vpp = NULLVP; + } #endif /* 0 */ - } + } - return (_err); + return _err; } #if CONFIG_APPLEDOUBLE @@ -4679,7 +4787,7 @@ VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp */ #define AD_STALE_SECS (180) static void -xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force) +xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force) { vnode_t xvp; struct nameidata nd; @@ -4699,15 +4807,17 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int forc len = snprintf(filename, len, "._%s", basename); } NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(filename), ctx); + CAST_USER_ADDR_T(filename), ctx); nd.ni_dvp = dvp; - if (namei(&nd) != 0) + if (namei(&nd) != 0) { goto out2; + } xvp = nd.ni_vp; nameidone(&nd); - if (xvp->v_type != VREG) + if (xvp->v_type != VREG) { goto out1; + } /* * When creating a new object and a "._" file already @@ -4720,9 +4830,9 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int forc VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_modify_time); - if (VNOP_GETATTR(xvp, &va, ctx) == 0 && - VATTR_IS_SUPPORTED(&va, va_data_size) && - VATTR_IS_SUPPORTED(&va, va_modify_time) && + if (VNOP_GETATTR(xvp, &va, ctx) == 0 && + VATTR_IS_SUPPORTED(&va, va_data_size) && + VATTR_IS_SUPPORTED(&va, va_modify_time) && va.va_data_size != 0) { struct timeval tv; @@ -4735,16 +4845,17 @@ xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int forc } if (force) { int error; - + error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx); - if (error == 0) + if (error == 0) { vnode_setneedinactive(xvp); + } post_event_if_success(xvp, error, NOTE_DELETE); post_event_if_success(dvp, error, NOTE_WRITE); } -out1: +out1: vnode_put(dvp); vnode_put(xvp); out2: @@ -4758,7 +4869,7 @@ out2: */ static void xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, - vfs_context_t ctx) + vfs_context_t ctx) { vnode_t xvp; struct nameidata nd; @@ -4779,10 +4890,11 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap, len = snprintf(filename, len, "._%s", basename); } NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE, - CAST_USER_ADDR_T(filename), ctx); + CAST_USER_ADDR_T(filename), ctx); nd.ni_dvp = dvp; - if (namei(&nd) != 0) + if (namei(&nd) != 0) { goto out2; + } xvp = nd.ni_vp; nameidone(&nd); @@ -4808,42 +4920,42 @@ out2: #if 0 /* - *# - *#% symlink dvp L U U - *#% symlink vpp - U - - *# - */ +*# +*#% symlink dvp L U U +*#% symlink vpp - U - +*# +*/ struct vnop_symlink_args { - struct vnodeop_desc *a_desc; - vnode_t a_dvp; - vnode_t *a_vpp; - struct componentname *a_cnp; - struct vnode_attr *a_vap; - char *a_target; - vfs_context_t a_context; + struct vnodeop_desc *a_desc; + vnode_t a_dvp; + vnode_t *a_vpp; + struct componentname *a_cnp; + struct vnode_attr *a_vap; + char *a_target; + vfs_context_t a_context; }; #endif /* 0*/ errno_t VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, - struct vnode_attr *vap, char *target, vfs_context_t ctx) + struct vnode_attr *vap, char *target, vfs_context_t ctx) { - int _err; - struct vnop_symlink_args a; + int _err; + struct vnop_symlink_args a; - a.a_desc = &vnop_symlink_desc; - a.a_dvp = dvp; - a.a_vpp = vpp; - a.a_cnp = cnp; - a.a_vap = vap; - a.a_target = target; - a.a_context = ctx; + a.a_desc = &vnop_symlink_desc; + a.a_dvp = dvp; + a.a_vpp = vpp; + a.a_cnp = cnp; + a.a_vap = vap; + a.a_target = target; + a.a_context = ctx; - _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a); + _err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a); DTRACE_FSINFO(symlink, vnode_t, dvp); #if CONFIG_APPLEDOUBLE if (_err == 0 && !NATIVE_XATTR(dvp)) { - /* + /* * Remove stale Apple Double file (if any). Posts its own knotes */ xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0); @@ -4852,15 +4964,15 @@ VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, post_event_if_success(dvp, _err, NOTE_WRITE); - return (_err); + return _err; } #if 0 /* - *# - *#% readdir vp L L L - *# - */ +*# +*#% readdir vp L L L +*# +*/ struct vnop_readdir_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -4872,9 +4984,9 @@ struct vnop_readdir_args { }; #endif /* 0*/ -errno_t +errno_t VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag, - int *numdirent, vfs_context_t ctx) + int *numdirent, vfs_context_t ctx) { int _err; struct vnop_readdir_args a; @@ -4894,15 +5006,15 @@ VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag, DTRACE_FSINFO_IO(readdir, vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); - return (_err); + return _err; } #if 0 /* - *# - *#% readdirattr vp L L L - *# - */ +*# +*#% readdirattr vp L L L +*# +*/ struct vnop_readdirattr_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -4917,9 +5029,9 @@ struct vnop_readdirattr_args { }; #endif /* 0*/ -errno_t +errno_t VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount, - uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx) + uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx) { int _err; struct vnop_readdirattr_args a; @@ -4942,7 +5054,7 @@ VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint DTRACE_FSINFO_IO(readdirattr, vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); - return (_err); + return _err; } #if 0 @@ -4985,15 +5097,15 @@ VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist, DTRACE_FSINFO_IO(getattrlistbulk, vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); - return (_err); + return _err; } #if 0 /* - *# - *#% readlink vp L L L - *# - */ +*# +*#% readlink vp L L L +*# +*/ struct vnop_readlink_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5021,7 +5133,7 @@ struct vnop_readlink_args { * additional error code which may be propagated from underlying * routines. */ -errno_t +errno_t VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx) { int _err; @@ -5038,22 +5150,22 @@ VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx) DTRACE_FSINFO_IO(readlink, vnode_t, vp, user_ssize_t, (resid - uio_resid(uio))); - return (_err); + return _err; } #if 0 /* - *# - *#% inactive vp L U U - *# - */ +*# +*#% inactive vp L U U +*# +*/ struct vnop_inactive_args { struct vnodeop_desc *a_desc; vnode_t a_vp; vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx) { int _err; @@ -5062,14 +5174,14 @@ VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx) a.a_desc = &vnop_inactive_desc; a.a_vp = vp; a.a_context = ctx; - + _err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a); DTRACE_FSINFO(inactive, vnode_t, vp); #if NAMEDSTREAMS - /* For file systems that do not support namedstream natively, mark - * the shadow stream file vnode to be recycled as soon as the last - * reference goes away. To avoid re-entering reclaim code, do not + /* For file systems that do not support namedstream natively, mark + * the shadow stream file vnode to be recycled as soon as the last + * reference goes away. To avoid re-entering reclaim code, do not * call recycle on terminating namedstream vnodes. */ if (vnode_isnamedstream(vp) && @@ -5080,16 +5192,16 @@ VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx) } #endif - return (_err); + return _err; } #if 0 /* - *# - *#% reclaim vp U U U - *# - */ +*# +*#% reclaim vp U U U +*# +*/ struct vnop_reclaim_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5109,7 +5221,7 @@ VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx) _err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a); DTRACE_FSINFO(reclaim, vnode_t, vp); - return (_err); + return _err; } @@ -5122,10 +5234,10 @@ VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx) */ #if 0 /* - *# - *#% pathconf vp L L L - *# - */ +*# +*#% pathconf vp L L L +*# +*/ struct vnop_pathconf_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5134,7 +5246,7 @@ struct vnop_pathconf_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx) { int _err; @@ -5149,7 +5261,7 @@ VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx) _err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a); DTRACE_FSINFO(pathconf, vnode_t, vp); - return (_err); + return _err; } /* @@ -5165,10 +5277,10 @@ VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx) */ #if 0 /* - *# - *#% advlock vp U U U - *# - */ +*# +*#% advlock vp U U U +*# +*/ struct vnop_advlock_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5179,7 +5291,7 @@ struct vnop_advlock_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout) { int _err; @@ -5209,21 +5321,22 @@ VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a); } DTRACE_FSINFO(advlock, vnode_t, vp); - if (op == F_UNLCK && flags == F_FLOCK) + if (op == F_UNLCK && flags == F_FLOCK) { post_event_if_success(vp, _err, NOTE_FUNLOCK); + } } - return (_err); + return _err; } #if 0 /* - *# - *#% allocate vp L L L - *# - */ +*# +*#% allocate vp L L L +*# +*/ struct vnop_allocate_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5235,7 +5348,7 @@ struct vnop_allocate_args { }; #endif /* 0*/ -errno_t +errno_t VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx) { int _err; @@ -5257,15 +5370,15 @@ VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesalloc } #endif - return (_err); + return _err; } #if 0 /* - *# - *#% pagein vp = = = - *# - */ +*# +*#% pagein vp = = = +*# +*/ struct vnop_pagein_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5277,7 +5390,7 @@ struct vnop_pagein_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) { int _err; @@ -5295,15 +5408,15 @@ VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, _err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a); DTRACE_FSINFO(pagein, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% pageout vp = = = - *# - */ +*# +*#% pageout vp = = = +*# +*/ struct vnop_pageout_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5316,7 +5429,7 @@ struct vnop_pageout_args { }; #endif /* 0*/ -errno_t +errno_t VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx) { int _err; @@ -5336,7 +5449,7 @@ VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, post_event_if_success(vp, _err, NOTE_WRITE); - return (_err); + return _err; } int @@ -5353,10 +5466,10 @@ vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struc #if 0 /* - *# - *#% searchfs vp L L L - *# - */ +*# +*#% searchfs vp L L L +*# +*/ struct vnop_searchfs_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5375,7 +5488,7 @@ struct vnop_searchfs_args { }; #endif /* 0*/ -errno_t +errno_t VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx) { int _err; @@ -5399,18 +5512,18 @@ VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct _err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a); DTRACE_FSINFO(searchfs, vnode_t, vp); - return (_err); + return _err; } #endif /* CONFIG_SEARCHFS */ #if 0 /* - *# - *#% copyfile fvp U U U - *#% copyfile tdvp L U U - *#% copyfile tvp X U U - *# - */ +*# +*#% copyfile fvp U U U +*#% copyfile tdvp L U U +*#% copyfile tvp X U U +*# +*/ struct vnop_copyfile_args { struct vnodeop_desc *a_desc; vnode_t a_fvp; @@ -5422,9 +5535,9 @@ struct vnop_copyfile_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - int mode, int flags, vfs_context_t ctx) + int mode, int flags, vfs_context_t ctx) { int _err; struct vnop_copyfile_args a; @@ -5438,7 +5551,7 @@ VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct c a.a_context = ctx; _err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a); DTRACE_FSINFO(copyfile, vnode_t, fvp); - return (_err); + return _err; } #if 0 @@ -5451,17 +5564,17 @@ struct vnop_clonefile_args { struct vnode_attr *a_vap; uint32_t a_flags; vfs_context_t a_context; - int (*a_dir_clone_authorizer)( /* Authorization callback */ - struct vnode_attr *vap, /* attribute to be authorized */ - kauth_action_t action, /* action for which attribute is to be authorized */ - struct vnode_attr *dvap, /* target directory attributes */ - vnode_t sdvp, /* source directory vnode pointer (optional) */ - mount_t mp, /* mount point of filesystem */ - dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */ - uint32_t flags; /* value passed in a_flags to the VNOP */ - vfs_context_t ctx, /* As passed to VNOP */ - void *reserved); /* Always NULL */ - void *a_reserved; /* Currently unused */ + int (*a_dir_clone_authorizer)( /* Authorization callback */ + struct vnode_attr *vap, /* attribute to be authorized */ + kauth_action_t action, /* action for which attribute is to be authorized */ + struct vnode_attr *dvap, /* target directory attributes */ + vnode_t sdvp, /* source directory vnode pointer (optional) */ + mount_t mp, /* mount point of filesystem */ + dir_clone_authorizer_op_t vattr_op, /* specific operation requested : setup, authorization or cleanup */ + uint32_t flags; /* value passed in a_flags to the VNOP */ + vfs_context_t ctx, /* As passed to VNOP */ + void *reserved); /* Always NULL */ + void *a_reserved; /* Currently unused */ }; #endif /* 0 */ @@ -5481,22 +5594,24 @@ VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp, a.a_flags = flags; a.a_context = ctx; - if (vnode_vtype(fvp) == VDIR) + if (vnode_vtype(fvp) == VDIR) { a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone; - else + } else { a.a_dir_clone_authorizer = NULL; + } _err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a); if (_err == 0 && *vpp) { DTRACE_FSINFO(clonefile, vnode_t, *vpp); - if (kdebug_enable) + if (kdebug_enable) { kdebug_lookup(*vpp, cnp); + } } post_event_if_success(dvp, _err, NOTE_WRITE); - return (_err); + return _err; } errno_t @@ -5516,7 +5631,7 @@ VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a); DTRACE_FSINFO(getxattr, vnode_t, vp); - return (error); + return error; } errno_t @@ -5535,12 +5650,13 @@ VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_ error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a); DTRACE_FSINFO(setxattr, vnode_t, vp); - if (error == 0) - vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); + if (error == 0) { + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); + } post_event_if_success(vp, error, NOTE_ATTRIB); - return (error); + return error; } errno_t @@ -5559,8 +5675,8 @@ VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx) DTRACE_FSINFO(removexattr, vnode_t, vp); post_event_if_success(vp, error, NOTE_ATTRIB); - - return (error); + + return error; } errno_t @@ -5579,16 +5695,16 @@ VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t c error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a); DTRACE_FSINFO(listxattr, vnode_t, vp); - return (error); + return error; } #if 0 /* - *# - *#% blktooff vp = = = - *# - */ +*# +*#% blktooff vp = = = +*# +*/ struct vnop_blktooff_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5596,7 +5712,7 @@ struct vnop_blktooff_args { off_t *a_offset; }; #endif /* 0*/ -errno_t +errno_t VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset) { int _err; @@ -5610,15 +5726,15 @@ VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset) _err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a); DTRACE_FSINFO(blktooff, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% offtoblk vp = = = - *# - */ +*# +*#% offtoblk vp = = = +*# +*/ struct vnop_offtoblk_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5626,7 +5742,7 @@ struct vnop_offtoblk_args { daddr64_t *a_lblkno; }; #endif /* 0*/ -errno_t +errno_t VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno) { int _err; @@ -5640,15 +5756,15 @@ VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno) _err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a); DTRACE_FSINFO(offtoblk, vnode_t, vp); - return (_err); + return _err; } #if 0 /* - *# - *#% blockmap vp L L L - *# - */ +*# +*#% blockmap vp L L L +*# +*/ struct vnop_blockmap_args { struct vnodeop_desc *a_desc; vnode_t a_vp; @@ -5661,7 +5777,7 @@ struct vnop_blockmap_args { vfs_context_t a_context; }; #endif /* 0*/ -errno_t +errno_t VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx) { int _err; @@ -5700,7 +5816,7 @@ VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size } } - return (_err); + return _err; } #if 0 @@ -5710,7 +5826,7 @@ struct vnop_strategy_args { }; #endif /* 0*/ -errno_t +errno_t VNOP_STRATEGY(struct buf *bp) { int _err; @@ -5720,7 +5836,7 @@ VNOP_STRATEGY(struct buf *bp) a.a_bp = bp; _err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a); DTRACE_FSINFO(strategy, vnode_t, vp); - return (_err); + return _err; } #if 0 @@ -5729,7 +5845,7 @@ struct vnop_bwrite_args { buf_t a_bp; }; #endif /* 0*/ -errno_t +errno_t VNOP_BWRITE(struct buf *bp) { int _err; @@ -5739,7 +5855,7 @@ VNOP_BWRITE(struct buf *bp) a.a_bp = bp; _err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a); DTRACE_FSINFO(bwrite, vnode_t, vp); - return (_err); + return _err; } #if 0 @@ -5763,8 +5879,8 @@ VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx) _err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a); DTRACE_FSINFO(kqfilt_add, vnode_t, vp); - - return(_err); + + return _err; } #if 0 @@ -5789,7 +5905,7 @@ VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx) _err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a); DTRACE_FSINFO(kqfilt_remove, vnode_t, vp); - return(_err); + return _err; } errno_t @@ -5808,7 +5924,7 @@ VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_cont _err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a); DTRACE_FSINFO(monitor, vnode_t, vp); - return(_err); + return _err; } #if 0 @@ -5833,7 +5949,7 @@ VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx) _err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a); DTRACE_FSINFO(setlabel, vnode_t, vp); - return(_err); + return _err; } @@ -5841,7 +5957,7 @@ VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx) /* * Get a named streamed */ -errno_t +errno_t VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx) { int _err; @@ -5857,13 +5973,13 @@ VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperatio _err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a); DTRACE_FSINFO(getnamedstream, vnode_t, vp); - return (_err); + return _err; } /* * Create a named streamed */ -errno_t +errno_t VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx) { int _err; @@ -5878,14 +5994,14 @@ VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs _err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a); DTRACE_FSINFO(makenamedstream, vnode_t, vp); - return (_err); + return _err; } /* * Remove a named streamed */ -errno_t +errno_t VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx) { int _err; @@ -5900,6 +6016,6 @@ VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs _err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a); DTRACE_FSINFO(removenamedstream, vnode_t, vp); - return (_err); + return _err; } #endif diff --git a/bsd/vfs/vfs_attrlist.c b/bsd/vfs/vfs_attrlist.c index cd8cbacad..5453d20c7 100644 --- a/bsd/vfs/vfs_attrlist.c +++ b/bsd/vfs/vfs_attrlist.c @@ -2,7 +2,7 @@ * Copyright (c) 1995-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -57,18 +57,18 @@ #include #endif -#define ATTR_TIME_SIZE -1 +#define ATTR_TIME_SIZE -1 /* * Structure describing the state of an in-progress attrlist operation. */ struct _attrlist_buf { - char *base; - char *fixedcursor; - char *varcursor; - ssize_t allocated; + char *base; + char *fixedcursor; + char *varcursor; + ssize_t allocated; ssize_t needed; - attribute_set_t actual; + attribute_set_t actual; attribute_set_t valid; }; @@ -80,25 +80,25 @@ struct _attrlist_buf { static void attrlist_pack_fixed(struct _attrlist_buf *ab, void *source, ssize_t count) { - /* + /* * Use ssize_t for pointer math purposes, * since a ssize_t is a signed long */ - ssize_t fit; + ssize_t fit; /* * Compute the amount of remaining space in the attrlist buffer * based on how much we've used for fixed width fields vs. the - * start of the attributes. - * - * If we've still got room, then 'fit' will contain the amount of - * remaining space. - * - * Note that this math is safe because, in the event that the + * start of the attributes. + * + * If we've still got room, then 'fit' will contain the amount of + * remaining space. + * + * Note that this math is safe because, in the event that the * fixed-width cursor has moved beyond the end of the buffer, - * then, the second input into lmin() below will be negative, and - * we will fail the (fit > 0) check below. - */ + * then, the second input into lmin() below will be negative, and + * we will fail the (fit > 0) check below. + */ fit = lmin(count, ab->allocated - (ab->fixedcursor - ab->base)); if (fit > 0) { /* Copy in as much as we can */ @@ -113,20 +113,20 @@ attrlist_pack_fixed(struct _attrlist_buf *ab, void *source, ssize_t count) * Attempt to pack one (or two) variable width attributes into the attrlist * buffer. If we are trying to pack two variable width attributes, they are treated * as a single variable-width attribute from the POV of the system call caller. - * - * Recall that a variable-width attribute has two components: the fixed-width + * + * Recall that a variable-width attribute has two components: the fixed-width * attribute that tells the caller where to look, and the actual variable width data. */ static void -attrlist_pack_variable2(struct _attrlist_buf *ab, const void *source, ssize_t count, - const void *ext, ssize_t extcount) +attrlist_pack_variable2(struct _attrlist_buf *ab, const void *source, ssize_t count, + const void *ext, ssize_t extcount) { /* Use ssize_t's for pointer math ease */ struct attrreference ar; ssize_t fit; /* - * Pack the fixed-width component to the variable object. + * Pack the fixed-width component to the variable object. * Note that we may be able to pack the fixed width attref, but not * the variable (if there's no room). */ @@ -134,13 +134,13 @@ attrlist_pack_variable2(struct _attrlist_buf *ab, const void *source, ssize_t co ar.attr_length = count + extcount; attrlist_pack_fixed(ab, &ar, sizeof(ar)); - /* + /* * Use an lmin() to do a signed comparison. We use a signed comparison * to detect the 'out of memory' conditions as described above in the * fixed width check above. * * Then pack the first variable attribute as space allows. Note that we advance - * the variable cursor only if we we had some available space. + * the variable cursor only if we we had some available space. */ fit = lmin(count, ab->allocated - (ab->varcursor - ab->base)); if (fit > 0) { @@ -163,7 +163,7 @@ attrlist_pack_variable2(struct _attrlist_buf *ab, const void *source, ssize_t co ab->varcursor = (char *)roundup((uintptr_t)ab->varcursor, 4); } -/* +/* * Packing a single variable-width attribute is the same as calling the two, but with * an invalid 2nd attribute. */ @@ -204,7 +204,7 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count } /* - * Construct the fixed-width attribute that refers to this string. + * Construct the fixed-width attribute that refers to this string. */ ar.attr_dataoffset = ab->varcursor - ab->fixedcursor; ar.attr_length = count + 1; @@ -226,8 +226,8 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count if (space > 0) { int bytes_to_zero; - /* - * If there is space remaining, copy data in, and + /* + * If there is space remaining, copy data in, and * accommodate the trailing NUL terminator. * * NOTE: if "space" is too small to hold the string and its NUL @@ -248,49 +248,49 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count */ bytes_to_zero = min((roundup(fit, 4) - fit), space - fit); - if (bytes_to_zero) + if (bytes_to_zero) { bzero(&(ab->varcursor[fit]), bytes_to_zero); + } } } - /* + /* * always move in increments of 4 (including the trailing NUL) */ - ab->varcursor += roundup((count+1), 4); - + ab->varcursor += roundup((count + 1), 4); } #define ATTR_PACK4(AB, V) \ do { \ - if ((AB.allocated - (AB.fixedcursor - AB.base)) >= 4) { \ - *(uint32_t *)AB.fixedcursor = V; \ - AB.fixedcursor += 4; \ - } \ + if ((AB.allocated - (AB.fixedcursor - AB.base)) >= 4) { \ + *(uint32_t *)AB.fixedcursor = V; \ + AB.fixedcursor += 4; \ + } \ } while (0) #define ATTR_PACK8(AB, V) \ do { \ - if ((AB.allocated - (AB.fixedcursor - AB.base)) >= 8) { \ - memcpy(AB.fixedcursor, &V, 8); \ - AB.fixedcursor += 8; \ - } \ + if ((AB.allocated - (AB.fixedcursor - AB.base)) >= 8) { \ + memcpy(AB.fixedcursor, &V, 8); \ + AB.fixedcursor += 8; \ + } \ } while (0) -#define ATTR_PACK(b, v) attrlist_pack_fixed(b, &v, sizeof(v)) -#define ATTR_PACK_CAST(b, t, v) \ - do { \ - t _f = (t)v; \ - ATTR_PACK(b, _f); \ +#define ATTR_PACK(b, v) attrlist_pack_fixed(b, &v, sizeof(v)) +#define ATTR_PACK_CAST(b, t, v) \ + do { \ + t _f = (t)v; \ + ATTR_PACK(b, _f); \ } while (0) -#define ATTR_PACK_TIME(b, v, is64) \ - do { \ - if (is64) { \ - struct user64_timespec us = {v.tv_sec, v.tv_nsec}; \ - ATTR_PACK(&b, us); \ - } else { \ - struct user32_timespec us = {v.tv_sec, v.tv_nsec}; \ - ATTR_PACK(&b, us); \ - } \ +#define ATTR_PACK_TIME(b, v, is64) \ + do { \ + if (is64) { \ + struct user64_timespec us = {v.tv_sec, v.tv_nsec}; \ + ATTR_PACK(&b, us); \ + } else { \ + struct user32_timespec us = {v.tv_sec, v.tv_nsec}; \ + ATTR_PACK(&b, us); \ + } \ } while(0) @@ -298,39 +298,39 @@ attrlist_pack_string(struct _attrlist_buf *ab, const char *source, ssize_t count * Table-driven setup for all valid common/volume attributes. */ struct getvolattrlist_attrtab { - attrgroup_t attr; - uint64_t bits; -#define VFSATTR_BIT(b) (VFSATTR_ ## b) - ssize_t size; + attrgroup_t attr; + uint64_t bits; +#define VFSATTR_BIT(b) (VFSATTR_ ## b) + ssize_t size; }; static struct getvolattrlist_attrtab getvolattrlist_common_tab[] = { - {ATTR_CMN_NAME, 0, sizeof(struct attrreference)}, - {ATTR_CMN_DEVID, 0, sizeof(dev_t)}, - {ATTR_CMN_FSID, 0, sizeof(fsid_t)}, - {ATTR_CMN_OBJTYPE, 0, sizeof(fsobj_type_t)}, - {ATTR_CMN_OBJTAG, 0, sizeof(fsobj_tag_t)}, - {ATTR_CMN_OBJID, 0, sizeof(fsobj_id_t)}, - {ATTR_CMN_OBJPERMANENTID, 0, sizeof(fsobj_id_t)}, - {ATTR_CMN_PAROBJID, 0, sizeof(fsobj_id_t)}, - {ATTR_CMN_SCRIPT, 0, sizeof(text_encoding_t)}, - {ATTR_CMN_CRTIME, VFSATTR_BIT(f_create_time), ATTR_TIME_SIZE}, - {ATTR_CMN_MODTIME, VFSATTR_BIT(f_modify_time), ATTR_TIME_SIZE}, - {ATTR_CMN_CHGTIME, VFSATTR_BIT(f_modify_time), ATTR_TIME_SIZE}, - {ATTR_CMN_ACCTIME, VFSATTR_BIT(f_access_time), ATTR_TIME_SIZE}, - {ATTR_CMN_BKUPTIME, VFSATTR_BIT(f_backup_time), ATTR_TIME_SIZE}, - {ATTR_CMN_FNDRINFO, 0, 32}, - {ATTR_CMN_OWNERID, 0, sizeof(uid_t)}, - {ATTR_CMN_GRPID, 0, sizeof(gid_t)}, - {ATTR_CMN_ACCESSMASK, 0, sizeof(uint32_t)}, - {ATTR_CMN_FLAGS, 0, sizeof(uint32_t)}, - {ATTR_CMN_USERACCESS, 0, sizeof(uint32_t)}, - {ATTR_CMN_EXTENDED_SECURITY, 0, sizeof(struct attrreference)}, - {ATTR_CMN_UUID, 0, sizeof(guid_t)}, - {ATTR_CMN_GRPUUID, 0, sizeof(guid_t)}, - {ATTR_CMN_FILEID, 0, sizeof(uint64_t)}, - {ATTR_CMN_PARENTID, 0, sizeof(uint64_t)}, - {ATTR_CMN_RETURNED_ATTRS, 0, sizeof(attribute_set_t)}, - {ATTR_CMN_ERROR, 0, sizeof(uint32_t)}, + {ATTR_CMN_NAME, 0, sizeof(struct attrreference)}, + {ATTR_CMN_DEVID, 0, sizeof(dev_t)}, + {ATTR_CMN_FSID, 0, sizeof(fsid_t)}, + {ATTR_CMN_OBJTYPE, 0, sizeof(fsobj_type_t)}, + {ATTR_CMN_OBJTAG, 0, sizeof(fsobj_tag_t)}, + {ATTR_CMN_OBJID, 0, sizeof(fsobj_id_t)}, + {ATTR_CMN_OBJPERMANENTID, 0, sizeof(fsobj_id_t)}, + {ATTR_CMN_PAROBJID, 0, sizeof(fsobj_id_t)}, + {ATTR_CMN_SCRIPT, 0, sizeof(text_encoding_t)}, + {ATTR_CMN_CRTIME, VFSATTR_BIT(f_create_time), ATTR_TIME_SIZE}, + {ATTR_CMN_MODTIME, VFSATTR_BIT(f_modify_time), ATTR_TIME_SIZE}, + {ATTR_CMN_CHGTIME, VFSATTR_BIT(f_modify_time), ATTR_TIME_SIZE}, + {ATTR_CMN_ACCTIME, VFSATTR_BIT(f_access_time), ATTR_TIME_SIZE}, + {ATTR_CMN_BKUPTIME, VFSATTR_BIT(f_backup_time), ATTR_TIME_SIZE}, + {ATTR_CMN_FNDRINFO, 0, 32}, + {ATTR_CMN_OWNERID, 0, sizeof(uid_t)}, + {ATTR_CMN_GRPID, 0, sizeof(gid_t)}, + {ATTR_CMN_ACCESSMASK, 0, sizeof(uint32_t)}, + {ATTR_CMN_FLAGS, 0, sizeof(uint32_t)}, + {ATTR_CMN_USERACCESS, 0, sizeof(uint32_t)}, + {ATTR_CMN_EXTENDED_SECURITY, 0, sizeof(struct attrreference)}, + {ATTR_CMN_UUID, 0, sizeof(guid_t)}, + {ATTR_CMN_GRPUUID, 0, sizeof(guid_t)}, + {ATTR_CMN_FILEID, 0, sizeof(uint64_t)}, + {ATTR_CMN_PARENTID, 0, sizeof(uint64_t)}, + {ATTR_CMN_RETURNED_ATTRS, 0, sizeof(attribute_set_t)}, + {ATTR_CMN_ERROR, 0, sizeof(uint32_t)}, {0, 0, 0} }; #define ATTR_CMN_VOL_INVALID \ @@ -338,37 +338,37 @@ static struct getvolattrlist_attrtab getvolattrlist_common_tab[] = { ATTR_CMN_FILEID | ATTR_CMN_PARENTID) static struct getvolattrlist_attrtab getvolattrlist_vol_tab[] = { - {ATTR_VOL_FSTYPE, 0, sizeof(uint32_t)}, - {ATTR_VOL_SIGNATURE, VFSATTR_BIT(f_signature), sizeof(uint32_t)}, - {ATTR_VOL_SIZE, VFSATTR_BIT(f_blocks) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_SPACEFREE, VFSATTR_BIT(f_bfree) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_SPACEAVAIL, VFSATTR_BIT(f_bavail) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_MINALLOCATION, VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_ALLOCATIONCLUMP, VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_IOBLOCKSIZE, VFSATTR_BIT(f_iosize), sizeof(uint32_t)}, - {ATTR_VOL_OBJCOUNT, VFSATTR_BIT(f_objcount), sizeof(uint32_t)}, - {ATTR_VOL_FILECOUNT, VFSATTR_BIT(f_filecount), sizeof(uint32_t)}, - {ATTR_VOL_DIRCOUNT, VFSATTR_BIT(f_dircount), sizeof(uint32_t)}, - {ATTR_VOL_MAXOBJCOUNT, VFSATTR_BIT(f_maxobjcount), sizeof(uint32_t)}, - {ATTR_VOL_MOUNTPOINT, 0, sizeof(struct attrreference)}, - {ATTR_VOL_NAME, VFSATTR_BIT(f_vol_name), sizeof(struct attrreference)}, - {ATTR_VOL_MOUNTFLAGS, 0, sizeof(uint32_t)}, - {ATTR_VOL_MOUNTEDDEVICE, 0, sizeof(struct attrreference)}, - {ATTR_VOL_ENCODINGSUSED, 0, sizeof(uint64_t)}, - {ATTR_VOL_CAPABILITIES, VFSATTR_BIT(f_capabilities), sizeof(vol_capabilities_attr_t)}, - {ATTR_VOL_UUID, VFSATTR_BIT(f_uuid), sizeof(uuid_t)}, - {ATTR_VOL_QUOTA_SIZE, VFSATTR_BIT(f_quota) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_RESERVED_SIZE, VFSATTR_BIT(f_reserved) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, - {ATTR_VOL_ATTRIBUTES, VFSATTR_BIT(f_attributes), sizeof(vol_attributes_attr_t)}, + {ATTR_VOL_FSTYPE, 0, sizeof(uint32_t)}, + {ATTR_VOL_SIGNATURE, VFSATTR_BIT(f_signature), sizeof(uint32_t)}, + {ATTR_VOL_SIZE, VFSATTR_BIT(f_blocks) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_SPACEFREE, VFSATTR_BIT(f_bfree) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_SPACEAVAIL, VFSATTR_BIT(f_bavail) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_MINALLOCATION, VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_ALLOCATIONCLUMP, VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_IOBLOCKSIZE, VFSATTR_BIT(f_iosize), sizeof(uint32_t)}, + {ATTR_VOL_OBJCOUNT, VFSATTR_BIT(f_objcount), sizeof(uint32_t)}, + {ATTR_VOL_FILECOUNT, VFSATTR_BIT(f_filecount), sizeof(uint32_t)}, + {ATTR_VOL_DIRCOUNT, VFSATTR_BIT(f_dircount), sizeof(uint32_t)}, + {ATTR_VOL_MAXOBJCOUNT, VFSATTR_BIT(f_maxobjcount), sizeof(uint32_t)}, + {ATTR_VOL_MOUNTPOINT, 0, sizeof(struct attrreference)}, + {ATTR_VOL_NAME, VFSATTR_BIT(f_vol_name), sizeof(struct attrreference)}, + {ATTR_VOL_MOUNTFLAGS, 0, sizeof(uint32_t)}, + {ATTR_VOL_MOUNTEDDEVICE, 0, sizeof(struct attrreference)}, + {ATTR_VOL_ENCODINGSUSED, 0, sizeof(uint64_t)}, + {ATTR_VOL_CAPABILITIES, VFSATTR_BIT(f_capabilities), sizeof(vol_capabilities_attr_t)}, + {ATTR_VOL_UUID, VFSATTR_BIT(f_uuid), sizeof(uuid_t)}, + {ATTR_VOL_QUOTA_SIZE, VFSATTR_BIT(f_quota) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_RESERVED_SIZE, VFSATTR_BIT(f_reserved) | VFSATTR_BIT(f_bsize), sizeof(off_t)}, + {ATTR_VOL_ATTRIBUTES, VFSATTR_BIT(f_attributes), sizeof(vol_attributes_attr_t)}, {ATTR_VOL_INFO, 0, 0}, {0, 0, 0} }; static int getvolattrlist_parsetab(struct getvolattrlist_attrtab *tab, attrgroup_t attrs, struct vfs_attr *vsp, - ssize_t *sizep, int is_64bit, unsigned int maxiter) + ssize_t *sizep, int is_64bit, unsigned int maxiter) { - attrgroup_t recognised; + attrgroup_t recognised; recognised = 0; do { @@ -387,11 +387,12 @@ getvolattrlist_parsetab(struct getvolattrlist_attrtab *tab, attrgroup_t attrs, s } } } while (((++tab)->attr != 0) && (--maxiter > 0)); - + /* check to make sure that we recognised all of the passed-in attributes */ - if (attrs & ~recognised) - return(EINVAL); - return(0); + if (attrs & ~recognised) { + return EINVAL; + } + return 0; } /* @@ -401,31 +402,33 @@ getvolattrlist_parsetab(struct getvolattrlist_attrtab *tab, attrgroup_t attrs, s static int getvolattrlist_setupvfsattr(struct attrlist *alp, struct vfs_attr *vsp, ssize_t *sizep, int is_64bit) { - int error; - if (!alp) + int error; + if (!alp) { return EINVAL; + } /* * Parse the above tables. */ - *sizep = sizeof(uint32_t); /* length count */ + *sizep = sizeof(uint32_t); /* length count */ if (alp->commonattr) { if ((alp->commonattr & ATTR_CMN_VOL_INVALID) && (alp->commonattr & ATTR_CMN_RETURNED_ATTRS) == 0) { - return (EINVAL); + return EINVAL; } if ((error = getvolattrlist_parsetab(getvolattrlist_common_tab, - alp->commonattr, vsp, sizep, - is_64bit, - sizeof(getvolattrlist_common_tab)/sizeof(getvolattrlist_common_tab[0]))) != 0) { - return(error); + alp->commonattr, vsp, sizep, + is_64bit, + sizeof(getvolattrlist_common_tab) / sizeof(getvolattrlist_common_tab[0]))) != 0) { + return error; } } if (alp->volattr && - (error = getvolattrlist_parsetab(getvolattrlist_vol_tab, alp->volattr, vsp, sizep, is_64bit, sizeof(getvolattrlist_vol_tab)/sizeof(getvolattrlist_vol_tab[0]))) != 0) - return(error); + (error = getvolattrlist_parsetab(getvolattrlist_vol_tab, alp->volattr, vsp, sizep, is_64bit, sizeof(getvolattrlist_vol_tab) / sizeof(getvolattrlist_vol_tab[0]))) != 0) { + return error; + } - return(0); + return 0; } /* @@ -464,80 +467,80 @@ getvolattrlist_fixupattrs(attribute_set_t *asp, struct vfs_attr *vsp) * Table-driven setup for all valid common/dir/file/fork attributes against files. */ struct getattrlist_attrtab { - attrgroup_t attr; - uint64_t bits; -#define VATTR_BIT(b) (VNODE_ATTR_ ## b) - ssize_t size; - kauth_action_t action; + attrgroup_t attr; + uint64_t bits; +#define VATTR_BIT(b) (VNODE_ATTR_ ## b) + ssize_t size; + kauth_action_t action; }; -/* - * A zero after the ATTR_ bit indicates that we don't expect the underlying FS to report back with this +/* + * A zero after the ATTR_ bit indicates that we don't expect the underlying FS to report back with this * information, and we will synthesize it at the VFS level. */ static struct getattrlist_attrtab getattrlist_common_tab[] = { - {ATTR_CMN_NAME, VATTR_BIT(va_name), sizeof(struct attrreference), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_DEVID, 0, sizeof(dev_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FSID, 0, sizeof(fsid_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_OBJTYPE, 0, sizeof(fsobj_type_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_OBJTAG, 0, sizeof(fsobj_tag_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_OBJID, VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_NAME, VATTR_BIT(va_name), sizeof(struct attrreference), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_DEVID, 0, sizeof(dev_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FSID, 0, sizeof(fsid_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_OBJTYPE, 0, sizeof(fsobj_type_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_OBJTAG, 0, sizeof(fsobj_tag_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_OBJID, VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES}, {ATTR_CMN_OBJPERMANENTID, VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_PAROBJID, VATTR_BIT(va_parentid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_SCRIPT, VATTR_BIT(va_encoding), sizeof(text_encoding_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_CRTIME, VATTR_BIT(va_create_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_MODTIME, VATTR_BIT(va_modify_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_CHGTIME, VATTR_BIT(va_change_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_ACCTIME, VATTR_BIT(va_access_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_BKUPTIME, VATTR_BIT(va_backup_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FNDRINFO, 0, 32, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_OWNERID, VATTR_BIT(va_uid), sizeof(uid_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_GRPID, VATTR_BIT(va_gid), sizeof(gid_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_ACCESSMASK, VATTR_BIT(va_mode), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FLAGS, VATTR_BIT(va_flags), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_GEN_COUNT, VATTR_BIT(va_write_gencount), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_DOCUMENT_ID, VATTR_BIT(va_document_id), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_USERACCESS, 0, sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_EXTENDED_SECURITY, VATTR_BIT(va_acl), sizeof(struct attrreference), KAUTH_VNODE_READ_SECURITY}, - {ATTR_CMN_UUID, VATTR_BIT(va_uuuid), sizeof(guid_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_GRPUUID, VATTR_BIT(va_guuid), sizeof(guid_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FILEID, VATTR_BIT(va_fileid), sizeof(uint64_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_PARENTID, VATTR_BIT(va_parentid), sizeof(uint64_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FULLPATH, 0, sizeof(struct attrreference), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_ADDEDTIME, VATTR_BIT(va_addedtime), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_RETURNED_ATTRS, 0, sizeof(attribute_set_t), 0}, - {ATTR_CMN_ERROR, 0, sizeof(uint32_t), 0}, - {ATTR_CMN_DATA_PROTECT_FLAGS, VATTR_BIT(va_dataprotect_class), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_PAROBJID, VATTR_BIT(va_parentid), sizeof(fsobj_id_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_SCRIPT, VATTR_BIT(va_encoding), sizeof(text_encoding_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_CRTIME, VATTR_BIT(va_create_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_MODTIME, VATTR_BIT(va_modify_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_CHGTIME, VATTR_BIT(va_change_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_ACCTIME, VATTR_BIT(va_access_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_BKUPTIME, VATTR_BIT(va_backup_time), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FNDRINFO, 0, 32, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_OWNERID, VATTR_BIT(va_uid), sizeof(uid_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_GRPID, VATTR_BIT(va_gid), sizeof(gid_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_ACCESSMASK, VATTR_BIT(va_mode), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FLAGS, VATTR_BIT(va_flags), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_GEN_COUNT, VATTR_BIT(va_write_gencount), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_DOCUMENT_ID, VATTR_BIT(va_document_id), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_USERACCESS, 0, sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_EXTENDED_SECURITY, VATTR_BIT(va_acl), sizeof(struct attrreference), KAUTH_VNODE_READ_SECURITY}, + {ATTR_CMN_UUID, VATTR_BIT(va_uuuid), sizeof(guid_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_GRPUUID, VATTR_BIT(va_guuid), sizeof(guid_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FILEID, VATTR_BIT(va_fileid), sizeof(uint64_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_PARENTID, VATTR_BIT(va_parentid), sizeof(uint64_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FULLPATH, 0, sizeof(struct attrreference), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_ADDEDTIME, VATTR_BIT(va_addedtime), ATTR_TIME_SIZE, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_RETURNED_ATTRS, 0, sizeof(attribute_set_t), 0}, + {ATTR_CMN_ERROR, 0, sizeof(uint32_t), 0}, + {ATTR_CMN_DATA_PROTECT_FLAGS, VATTR_BIT(va_dataprotect_class), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, {0, 0, 0, 0} }; static struct getattrlist_attrtab getattrlist_dir_tab[] = { - {ATTR_DIR_LINKCOUNT, VATTR_BIT(va_dirlinkcount), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_DIR_ENTRYCOUNT, VATTR_BIT(va_nchildren), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_DIR_MOUNTSTATUS, 0, sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_DIR_ALLOCSIZE, VATTR_BIT(va_total_alloc) | VATTR_BIT(va_total_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_DIR_IOBLOCKSIZE, VATTR_BIT(va_iosize), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_DIR_DATALENGTH, VATTR_BIT(va_total_size) | VATTR_BIT(va_data_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_DIR_LINKCOUNT, VATTR_BIT(va_dirlinkcount), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_DIR_ENTRYCOUNT, VATTR_BIT(va_nchildren), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_DIR_MOUNTSTATUS, 0, sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_DIR_ALLOCSIZE, VATTR_BIT(va_total_alloc) | VATTR_BIT(va_total_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_DIR_IOBLOCKSIZE, VATTR_BIT(va_iosize), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_DIR_DATALENGTH, VATTR_BIT(va_total_size) | VATTR_BIT(va_data_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, {0, 0, 0, 0} }; static struct getattrlist_attrtab getattrlist_file_tab[] = { - {ATTR_FILE_LINKCOUNT, VATTR_BIT(va_nlink), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_TOTALSIZE, VATTR_BIT(va_total_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_ALLOCSIZE, VATTR_BIT(va_total_alloc) | VATTR_BIT(va_total_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_IOBLOCKSIZE, VATTR_BIT(va_iosize), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_DEVTYPE, VATTR_BIT(va_rdev), sizeof(dev_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_DATALENGTH, VATTR_BIT(va_total_size) | VATTR_BIT(va_data_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_DATAALLOCSIZE, VATTR_BIT(va_total_alloc)| VATTR_BIT(va_data_alloc), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_RSRCLENGTH, 0, sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_RSRCALLOCSIZE, 0, sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_LINKCOUNT, VATTR_BIT(va_nlink), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_TOTALSIZE, VATTR_BIT(va_total_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_ALLOCSIZE, VATTR_BIT(va_total_alloc) | VATTR_BIT(va_total_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_IOBLOCKSIZE, VATTR_BIT(va_iosize), sizeof(uint32_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_DEVTYPE, VATTR_BIT(va_rdev), sizeof(dev_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_DATALENGTH, VATTR_BIT(va_total_size) | VATTR_BIT(va_data_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_DATAALLOCSIZE, VATTR_BIT(va_total_alloc) | VATTR_BIT(va_data_alloc), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_RSRCLENGTH, 0, sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_RSRCALLOCSIZE, 0, sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, {0, 0, 0, 0} }; //for forkattr bits repurposed as new common attributes static struct getattrlist_attrtab getattrlist_common_extended_tab[] = { - {ATTR_CMNEXT_RELPATH, 0, sizeof(struct attrreference), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMNEXT_PRIVATESIZE, VATTR_BIT(va_private_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMNEXT_LINKID, VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(uint64_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMNEXT_RELPATH, 0, sizeof(struct attrreference), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMNEXT_PRIVATESIZE, VATTR_BIT(va_private_size), sizeof(off_t), KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMNEXT_LINKID, VATTR_BIT(va_fileid) | VATTR_BIT(va_linkid), sizeof(uint64_t), KAUTH_VNODE_READ_ATTRIBUTES}, {0, 0, 0, 0} }; @@ -551,18 +554,18 @@ static struct getattrlist_attrtab getattrlist_common_extended_tab[] = { * accounted from the common, file and directory tables. */ static struct getattrlist_attrtab getattrlistbulk_common_tab[] = { - {ATTR_CMN_DEVID, VATTR_BIT(va_devid), 0, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FSID, VATTR_BIT(va_fsid64), 0, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_OBJTYPE, VATTR_BIT(va_objtype), 0, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_OBJTAG, VATTR_BIT(va_objtag), 0, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_USERACCESS, VATTR_BIT(va_user_access), 0, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_CMN_FNDRINFO, VATTR_BIT(va_finderinfo), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_DEVID, VATTR_BIT(va_devid), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FSID, VATTR_BIT(va_fsid64), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_OBJTYPE, VATTR_BIT(va_objtype), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_OBJTAG, VATTR_BIT(va_objtag), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_USERACCESS, VATTR_BIT(va_user_access), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_CMN_FNDRINFO, VATTR_BIT(va_finderinfo), 0, KAUTH_VNODE_READ_ATTRIBUTES}, {0, 0, 0, 0} }; static struct getattrlist_attrtab getattrlistbulk_file_tab[] = { - {ATTR_FILE_RSRCLENGTH, VATTR_BIT(va_rsrc_length), 0, KAUTH_VNODE_READ_ATTRIBUTES}, - {ATTR_FILE_RSRCALLOCSIZE, VATTR_BIT(va_rsrc_alloc), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_RSRCLENGTH, VATTR_BIT(va_rsrc_length), 0, KAUTH_VNODE_READ_ATTRIBUTES}, + {ATTR_FILE_RSRCALLOCSIZE, VATTR_BIT(va_rsrc_alloc), 0, KAUTH_VNODE_READ_ATTRIBUTES}, {0, 0, 0, 0} }; @@ -577,77 +580,82 @@ static struct getattrlist_attrtab getattrlistbulk_common_extended_tab[] = { * * A majority of them are the same attributes that are required for stat(2) and statfs(2). */ -#define VFS_DFLT_ATTR_VOL (ATTR_VOL_FSTYPE | ATTR_VOL_SIGNATURE | \ - ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | ATTR_VOL_QUOTA_SIZE | ATTR_VOL_RESERVED_SIZE | \ - ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | \ - ATTR_VOL_ALLOCATIONCLUMP | ATTR_VOL_IOBLOCKSIZE | \ - ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | \ - ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | \ - ATTR_VOL_ATTRIBUTES | ATTR_VOL_ENCODINGSUSED) - -#define VFS_DFLT_ATTR_CMN (ATTR_CMN_NAME | ATTR_CMN_DEVID | \ - ATTR_CMN_FSID | ATTR_CMN_OBJTYPE | \ - ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | \ - ATTR_CMN_PAROBJID | ATTR_CMN_SCRIPT | \ - ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | \ - ATTR_CMN_FNDRINFO | \ - ATTR_CMN_OWNERID | ATTR_CMN_GRPID | \ - ATTR_CMN_ACCESSMASK | ATTR_CMN_FLAGS | \ - ATTR_CMN_USERACCESS | ATTR_CMN_FILEID | \ - ATTR_CMN_PARENTID | ATTR_CMN_RETURNED_ATTRS | \ - ATTR_CMN_DOCUMENT_ID | ATTR_CMN_GEN_COUNT | \ - ATTR_CMN_DATA_PROTECT_FLAGS) - -#define VFS_DFLT_ATTR_CMN_EXT (ATTR_CMNEXT_PRIVATESIZE | ATTR_CMNEXT_LINKID) - -#define VFS_DFLT_ATTR_DIR (ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS) - -#define VFS_DFLT_ATTR_FILE (ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | \ - ATTR_FILE_ALLOCSIZE | ATTR_FILE_IOBLOCKSIZE | \ - ATTR_FILE_DEVTYPE | ATTR_FILE_DATALENGTH | \ - ATTR_FILE_DATAALLOCSIZE | ATTR_FILE_RSRCLENGTH | \ - ATTR_FILE_RSRCALLOCSIZE) +#define VFS_DFLT_ATTR_VOL (ATTR_VOL_FSTYPE | ATTR_VOL_SIGNATURE | \ + ATTR_VOL_SIZE | ATTR_VOL_SPACEFREE | ATTR_VOL_QUOTA_SIZE | ATTR_VOL_RESERVED_SIZE | \ + ATTR_VOL_SPACEAVAIL | ATTR_VOL_MINALLOCATION | \ + ATTR_VOL_ALLOCATIONCLUMP | ATTR_VOL_IOBLOCKSIZE | \ + ATTR_VOL_MOUNTPOINT | ATTR_VOL_MOUNTFLAGS | \ + ATTR_VOL_MOUNTEDDEVICE | ATTR_VOL_CAPABILITIES | \ + ATTR_VOL_ATTRIBUTES | ATTR_VOL_ENCODINGSUSED) + +#define VFS_DFLT_ATTR_CMN (ATTR_CMN_NAME | ATTR_CMN_DEVID | \ + ATTR_CMN_FSID | ATTR_CMN_OBJTYPE | \ + ATTR_CMN_OBJTAG | ATTR_CMN_OBJID | \ + ATTR_CMN_PAROBJID | ATTR_CMN_SCRIPT | \ + ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | \ + ATTR_CMN_FNDRINFO | \ + ATTR_CMN_OWNERID | ATTR_CMN_GRPID | \ + ATTR_CMN_ACCESSMASK | ATTR_CMN_FLAGS | \ + ATTR_CMN_USERACCESS | ATTR_CMN_FILEID | \ + ATTR_CMN_PARENTID | ATTR_CMN_RETURNED_ATTRS | \ + ATTR_CMN_DOCUMENT_ID | ATTR_CMN_GEN_COUNT | \ + ATTR_CMN_DATA_PROTECT_FLAGS) + +#define VFS_DFLT_ATTR_CMN_EXT (ATTR_CMNEXT_PRIVATESIZE | ATTR_CMNEXT_LINKID) + +#define VFS_DFLT_ATTR_DIR (ATTR_DIR_LINKCOUNT | ATTR_DIR_MOUNTSTATUS) + +#define VFS_DFLT_ATTR_FILE (ATTR_FILE_LINKCOUNT | ATTR_FILE_TOTALSIZE | \ + ATTR_FILE_ALLOCSIZE | ATTR_FILE_IOBLOCKSIZE | \ + ATTR_FILE_DEVTYPE | ATTR_FILE_DATALENGTH | \ + ATTR_FILE_DATAALLOCSIZE | ATTR_FILE_RSRCLENGTH | \ + ATTR_FILE_RSRCALLOCSIZE) static int getattrlist_parsetab(struct getattrlist_attrtab *tab, attrgroup_t attrs, struct vnode_attr *vap, ssize_t *sizep, kauth_action_t *actionp, int is_64bit, unsigned int maxiter) { - attrgroup_t recognised; + attrgroup_t recognised; recognised = 0; - if (!tab) + if (!tab) { return EINVAL; + } do { /* is this attribute set? */ if (tab->attr & attrs) { recognised |= tab->attr; - if (vap) + if (vap) { vap->va_active |= tab->bits; + } if (sizep) { if (tab->size == ATTR_TIME_SIZE) { if (is_64bit) { *sizep += sizeof( - struct user64_timespec); + struct user64_timespec); } else { *sizep += sizeof( - struct user32_timespec); + struct user32_timespec); } } else { *sizep += tab->size; } } - if (actionp) + if (actionp) { *actionp |= tab->action; - if (attrs == recognised) + } + if (attrs == recognised) { break; /* all done, get out */ + } } } while (((++tab)->attr != 0) && (--maxiter > 0)); - + /* check to make sure that we recognised all of the passed-in attributes */ - if (attrs & ~recognised) - return(EINVAL); - return(0); + if (attrs & ~recognised) { + return EINVAL; + } + return 0; } /* @@ -657,27 +665,31 @@ getattrlist_parsetab(struct getattrlist_attrtab *tab, attrgroup_t attrs, static int getattrlist_setupvattr(struct attrlist *alp, struct vnode_attr *vap, ssize_t *sizep, kauth_action_t *actionp, int is_64bit, int isdir, int use_fork) { - int error; + int error; /* * Parse the above tables. */ - *sizep = sizeof(uint32_t); /* length count */ + *sizep = sizeof(uint32_t); /* length count */ *actionp = 0; if (alp->commonattr && - (error = getattrlist_parsetab(getattrlist_common_tab, alp->commonattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_common_tab)/sizeof(getattrlist_common_tab[0]))) != 0) - return(error); + (error = getattrlist_parsetab(getattrlist_common_tab, alp->commonattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_common_tab) / sizeof(getattrlist_common_tab[0]))) != 0) { + return error; + } if (isdir && alp->dirattr && - (error = getattrlist_parsetab(getattrlist_dir_tab, alp->dirattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_dir_tab)/sizeof(getattrlist_dir_tab[0]))) != 0) - return(error); + (error = getattrlist_parsetab(getattrlist_dir_tab, alp->dirattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_dir_tab) / sizeof(getattrlist_dir_tab[0]))) != 0) { + return error; + } if (!isdir && alp->fileattr && - (error = getattrlist_parsetab(getattrlist_file_tab, alp->fileattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_file_tab)/sizeof(getattrlist_file_tab[0]))) != 0) - return(error); + (error = getattrlist_parsetab(getattrlist_file_tab, alp->fileattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_file_tab) / sizeof(getattrlist_file_tab[0]))) != 0) { + return error; + } if (use_fork && alp->forkattr && - (error = getattrlist_parsetab(getattrlist_common_extended_tab, alp->forkattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_common_extended_tab)/sizeof(getattrlist_common_extended_tab[0]))) != 0) - return(error); + (error = getattrlist_parsetab(getattrlist_common_extended_tab, alp->forkattr, vap, sizep, actionp, is_64bit, sizeof(getattrlist_common_extended_tab) / sizeof(getattrlist_common_extended_tab[0]))) != 0) { + return error; + } - return(0); + return 0; } /* @@ -688,7 +700,7 @@ static int getattrlist_setupvattr_all(struct attrlist *alp, struct vnode_attr *vap, enum vtype obj_type, ssize_t *fixedsize, int is_64bit, int use_fork) { - int error = 0; + int error = 0; /* * Parse the above tables. @@ -698,14 +710,14 @@ getattrlist_setupvattr_all(struct attrlist *alp, struct vnode_attr *vap, } if (alp->commonattr) { error = getattrlist_parsetab(getattrlist_common_tab, - alp->commonattr, vap, fixedsize, NULL, is_64bit, - sizeof(getattrlist_common_tab)/sizeof(getattrlist_common_tab[0])); + alp->commonattr, vap, fixedsize, NULL, is_64bit, + sizeof(getattrlist_common_tab) / sizeof(getattrlist_common_tab[0])); if (!error) { /* Ignore any errrors from the bulk table */ (void)getattrlist_parsetab(getattrlistbulk_common_tab, - alp->commonattr, vap, fixedsize, NULL, is_64bit, - sizeof(getattrlistbulk_common_tab)/sizeof(getattrlistbulk_common_tab[0])); + alp->commonattr, vap, fixedsize, NULL, is_64bit, + sizeof(getattrlistbulk_common_tab) / sizeof(getattrlistbulk_common_tab[0])); /* * turn off va_fsid since we will be using only * va_fsid64 for ATTR_CMN_FSID. @@ -716,37 +728,37 @@ getattrlist_setupvattr_all(struct attrlist *alp, struct vnode_attr *vap, if (!error && (obj_type == VNON || obj_type == VDIR) && alp->dirattr) { error = getattrlist_parsetab(getattrlist_dir_tab, alp->dirattr, - vap, fixedsize, NULL, is_64bit, - sizeof(getattrlist_dir_tab)/sizeof(getattrlist_dir_tab[0])); + vap, fixedsize, NULL, is_64bit, + sizeof(getattrlist_dir_tab) / sizeof(getattrlist_dir_tab[0])); } if (!error && (obj_type != VDIR) && alp->fileattr) { error = getattrlist_parsetab(getattrlist_file_tab, - alp->fileattr, vap, fixedsize, NULL, is_64bit, - sizeof(getattrlist_file_tab)/sizeof(getattrlist_file_tab[0])); + alp->fileattr, vap, fixedsize, NULL, is_64bit, + sizeof(getattrlist_file_tab) / sizeof(getattrlist_file_tab[0])); if (!error) { /*Ignore any errors from the bulk table */ (void)getattrlist_parsetab(getattrlistbulk_file_tab, - alp->fileattr, vap, fixedsize, NULL, is_64bit, - sizeof(getattrlistbulk_file_tab)/sizeof(getattrlistbulk_file_tab[0])); + alp->fileattr, vap, fixedsize, NULL, is_64bit, + sizeof(getattrlistbulk_file_tab) / sizeof(getattrlistbulk_file_tab[0])); } } /* fork attributes are like extended common attributes if enabled*/ if (!error && use_fork && alp->forkattr) { error = getattrlist_parsetab(getattrlist_common_extended_tab, - alp->forkattr, vap, fixedsize, NULL, is_64bit, - sizeof(getattrlist_common_extended_tab)/sizeof(getattrlist_common_extended_tab[0])); + alp->forkattr, vap, fixedsize, NULL, is_64bit, + sizeof(getattrlist_common_extended_tab) / sizeof(getattrlist_common_extended_tab[0])); if (!error) { (void)getattrlist_parsetab(getattrlistbulk_common_extended_tab, - alp->forkattr, vap, fixedsize, NULL, is_64bit, - sizeof(getattrlistbulk_common_extended_tab)/sizeof(getattrlistbulk_common_extended_tab[0])); + alp->forkattr, vap, fixedsize, NULL, is_64bit, + sizeof(getattrlistbulk_common_extended_tab) / sizeof(getattrlistbulk_common_extended_tab[0])); } } - return (error); + return error; } int @@ -755,8 +767,8 @@ vfs_setup_vattr_from_attrlist(struct attrlist *alp, struct vnode_attr *vap, { // the caller passes us no options, we assume the caller wants the new fork // attr behavior, hence the hardcoded 1 - return (getattrlist_setupvattr_all(alp, vap, obj_vtype, - attrs_fixed_sizep, IS_64BIT_PROCESS(vfs_context_proc(ctx)), 1)); + return getattrlist_setupvattr_all(alp, vap, obj_vtype, + attrs_fixed_sizep, IS_64BIT_PROCESS(vfs_context_proc(ctx)), 1); } @@ -775,20 +787,20 @@ getattrlist_fixupattrs(attribute_set_t *asp, struct vnode_attr *vap, int use_for if (asp->commonattr) { tab = getattrlist_common_tab; do { - /* + /* * This if() statement is slightly confusing. We're trying to - * iterate through all of the bits listed in the array + * iterate through all of the bits listed in the array * getattr_common_tab, and see if the filesystem was expected * to support it, and whether or not we need to do anything about this. - * + * * This array is full of structs that have 4 fields (attr, bits, size, action). - * The first is used to store the ATTR_CMN_* bit that was being requested + * The first is used to store the ATTR_CMN_* bit that was being requested * from userland. The second stores the VATTR_BIT corresponding to the field * filled in vnode_attr struct. If it is 0, then we don't typically expect * the filesystem to fill in this field. The third is the size of the field, * and the fourth is the type of kauth actions needed. * - * So, for all of the ATTR_CMN bits listed in this array, we iterate through + * So, for all of the ATTR_CMN bits listed in this array, we iterate through * them, and check to see if it was both passed down to the filesystem via the * va_active bitfield, and whether or not we expect it to be emitted from * the filesystem. If it wasn't supported, then we un-twiddle the bit and move @@ -837,9 +849,9 @@ getattrlist_fixupattrs(attribute_set_t *asp, struct vnode_attr *vap, int use_for static int setattrlist_setfinderinfo(vnode_t vp, char *fndrinfo, struct vfs_context *ctx) { - uio_t auio; - char uio_buf[UIO_SIZEOF(1)]; - int error; + uio_t auio; + char uio_buf[UIO_SIZEOF(1)]; + int error; if ((auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, uio_buf, sizeof(uio_buf))) == NULL) { error = ENOMEM; @@ -851,10 +863,10 @@ setattrlist_setfinderinfo(vnode_t vp, char *fndrinfo, struct vfs_context *ctx) #if CONFIG_FSE if (error == 0 && need_fsevent(FSE_FINDER_INFO_CHANGED, vp)) { - add_fsevent(FSE_FINDER_INFO_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); + add_fsevent(FSE_FINDER_INFO_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); } #endif - return (error); + return error; } @@ -865,8 +877,8 @@ setattrlist_setfinderinfo(vnode_t vp, char *fndrinfo, struct vfs_context *ctx) static void getattrlist_findnamecomp(const char *mn, const char **np, ssize_t *nl) { - int counting; - const char *cp; + int counting; + const char *cp; /* * We're looking for the last sequence of non / characters, but @@ -891,27 +903,28 @@ getattrlist_findnamecomp(const char *mn, const char **np, ssize_t *nl) } } /* need to close run? */ - if (counting) + if (counting) { *nl = cp - *np; + } } static int getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, - user_addr_t attributeBuffer, size_t bufferSize, uint64_t options, - enum uio_seg segflg, int is_64bit) + user_addr_t attributeBuffer, size_t bufferSize, uint64_t options, + enum uio_seg segflg, int is_64bit) { struct vfs_attr vs; struct vnode_attr va; struct _attrlist_buf ab; - int error; - ssize_t fixedsize, varsize; - const char *cnp = NULL; /* protected by ATTR_CMN_NAME */ - ssize_t cnl = 0; /* protected by ATTR_CMN_NAME */ - int release_str = 0; - mount_t mnt; - int return_valid; - int pack_invalid; + int error; + ssize_t fixedsize, varsize; + const char *cnp = NULL; /* protected by ATTR_CMN_NAME */ + ssize_t cnl = 0; /* protected by ATTR_CMN_NAME */ + int release_str = 0; + mount_t mnt; + int return_valid; + int pack_invalid; ab.base = NULL; VATTR_INIT(&va); @@ -919,7 +932,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, vs.f_vol_name = NULL; mnt = vp->v_mount; - + /* Check for special packing semantics */ return_valid = (alp->commonattr & ATTR_CMN_RETURNED_ATTRS); pack_invalid = (options & FSOPT_PACK_INVAL_ATTRS); @@ -930,9 +943,9 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, goto out; } /* Keep invalid attrs from being uninitialized */ - bzero(&vs, sizeof (vs)); + bzero(&vs, sizeof(vs)); /* Generate a valid mask for post processing */ - bcopy(&alp->commonattr, &ab.valid, sizeof (attribute_set_t)); + bcopy(&alp->commonattr, &ab.valid, sizeof(attribute_set_t)); } /* @@ -982,8 +995,9 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, if (!VFSATTR_ALL_SUPPORTED(&vs)) { /* default value for volume subtype */ if (VFSATTR_IS_ACTIVE(&vs, f_fssubtype) - && !VFSATTR_IS_SUPPORTED(&vs, f_fssubtype)) + && !VFSATTR_IS_SUPPORTED(&vs, f_fssubtype)) { VFSATTR_RETURN(&vs, f_fssubtype, 0); + } /* * If the file system didn't supply f_signature, then @@ -991,25 +1005,27 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, * that most Carbon file systems should return. */ if (VFSATTR_IS_ACTIVE(&vs, f_signature) - && !VFSATTR_IS_SUPPORTED(&vs, f_signature)) + && !VFSATTR_IS_SUPPORTED(&vs, f_signature)) { VFSATTR_RETURN(&vs, f_signature, 0x4244); + } /* default for block size */ if (VFSATTR_IS_ACTIVE(&vs, f_bsize) - && !VFSATTR_IS_SUPPORTED(&vs, f_bsize)) + && !VFSATTR_IS_SUPPORTED(&vs, f_bsize)) { VFSATTR_RETURN(&vs, f_bsize, mnt->mnt_devblocksize); + } /* default value for volume f_attributes */ if (VFSATTR_IS_ACTIVE(&vs, f_attributes) && !VFSATTR_IS_SUPPORTED(&vs, f_attributes)) { vol_attributes_attr_t *attrp = &vs.f_attributes; - + attrp->validattr.commonattr = VFS_DFLT_ATTR_CMN; attrp->validattr.volattr = VFS_DFLT_ATTR_VOL; attrp->validattr.dirattr = VFS_DFLT_ATTR_DIR; attrp->validattr.fileattr = VFS_DFLT_ATTR_FILE; attrp->validattr.forkattr = VFS_DFLT_ATTR_CMN_EXT; - + attrp->nativeattr.commonattr = 0; attrp->nativeattr.volattr = 0; attrp->nativeattr.dirattr = 0; @@ -1026,14 +1042,13 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, vs.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] = VOL_CAP_INT_ATTRLIST; vs.f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED1] = 0; vs.f_capabilities.capabilities[VOL_CAPABILITIES_RESERVED2] = 0; - + vs.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] = 0; vs.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] = VOL_CAP_INT_ATTRLIST; vs.f_capabilities.valid[VOL_CAPABILITIES_RESERVED1] = 0; vs.f_capabilities.valid[VOL_CAPABILITIES_RESERVED2] = 0; VFSATTR_SET_SUPPORTED(&vs, f_capabilities); - } - else { + } else { /* OR in VOL_CAP_INT_ATTRLIST if f_capabilities is supported */ vs.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_ATTRLIST; vs.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_ATTRLIST; @@ -1046,7 +1061,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, if (pack_invalid) { /* Fix up valid mask for post processing */ getvolattrlist_fixupattrs(&ab.valid, &vs); - + /* Force packing of everything asked for */ vs.f_supported = vs.f_active; } else { @@ -1084,12 +1099,13 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, #endif if (VATTR_IS_ACTIVE(&va, va_encoding) && !VATTR_IS_SUPPORTED(&va, va_encoding)) { - if (!return_valid || pack_invalid) + if (!return_valid || pack_invalid) { /* use kTextEncodingMacUnicode */ VATTR_RETURN(&va, va_encoding, 0x7e); - else + } else { /* don't use a default */ alp->commonattr &= ~ATTR_CMN_SCRIPT; + } } } @@ -1099,7 +1115,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, varsize = 0; if (alp->commonattr & ATTR_CMN_NAME) { if (vp->v_mount->mnt_vfsstat.f_mntonname[1] == 0x00 && - vp->v_mount->mnt_vfsstat.f_mntonname[0] == '/') { + vp->v_mount->mnt_vfsstat.f_mntonname[0] == '/') { /* special case for boot volume. Use root name when it's * available (which is the volume name) or just the mount on * name of "/". we must do this for binary compatibility with @@ -1110,26 +1126,27 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, if (cnp == NULL) { /* just use "/" as name */ cnp = &vp->v_mount->mnt_vfsstat.f_mntonname[0]; - } - else { + } else { release_str = 1; } cnl = strlen(cnp); - } - else { + } else { getattrlist_findnamecomp(vp->v_mount->mnt_vfsstat.f_mntonname, &cnp, &cnl); } - if (alp->commonattr & ATTR_CMN_NAME) + if (alp->commonattr & ATTR_CMN_NAME) { varsize += roundup(cnl + 1, 4); + } } - if (alp->volattr & ATTR_VOL_MOUNTPOINT) + if (alp->volattr & ATTR_VOL_MOUNTPOINT) { varsize += roundup(strlen(mnt->mnt_vfsstat.f_mntonname) + 1, 4); + } if (alp->volattr & ATTR_VOL_NAME) { - vs.f_vol_name[MAXPATHLEN-1] = '\0'; /* Ensure nul-termination */ + vs.f_vol_name[MAXPATHLEN - 1] = '\0'; /* Ensure nul-termination */ varsize += roundup(strlen(vs.f_vol_name) + 1, 4); } - if (alp->volattr & ATTR_VOL_MOUNTEDDEVICE) + if (alp->volattr & ATTR_VOL_MOUNTEDDEVICE) { varsize += roundup(strlen(mnt->mnt_vfsstat.f_mntfromname) + 1, 4); + } /* * Allocate a target buffer for attribute results. @@ -1175,8 +1192,8 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, */ ab.fixedcursor = ab.base + sizeof(uint32_t); if (return_valid) { - ab.fixedcursor += sizeof (attribute_set_t); - bzero(&ab.actual, sizeof (ab.actual)); + ab.fixedcursor += sizeof(attribute_set_t); + bzero(&ab.actual, sizeof(ab.actual)); } ab.varcursor = ab.base + fixedsize; ab.needed = fixedsize + varsize; @@ -1199,8 +1216,9 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ab.actual.commonattr |= ATTR_CMN_FSID; } if (alp->commonattr & ATTR_CMN_OBJTYPE) { - if (!return_valid || pack_invalid) + if (!return_valid || pack_invalid) { ATTR_PACK4(ab, 0); + } } if (alp->commonattr & ATTR_CMN_OBJTAG) { ATTR_PACK4(ab, vp->v_tag); @@ -1238,8 +1256,9 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ab.actual.commonattr |= ATTR_CMN_MODTIME; } if (alp->commonattr & ATTR_CMN_CHGTIME) { - if (!return_valid || pack_invalid) + if (!return_valid || pack_invalid) { ATTR_PACK_TIME(ab, vs.f_modify_time, is_64bit); + } } if (alp->commonattr & ATTR_CMN_ACCTIME) { ATTR_PACK_TIME(ab, vs.f_access_time, is_64bit); @@ -1285,39 +1304,51 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ATTR_PACK4(ab, va.va_flags); ab.actual.commonattr |= ATTR_CMN_FLAGS; } - if (alp->commonattr & ATTR_CMN_USERACCESS) { /* XXX this is expensive and also duplicate work */ - uint32_t perms = 0; + if (alp->commonattr & ATTR_CMN_USERACCESS) { /* XXX this is expensive and also duplicate work */ + uint32_t perms = 0; if (vnode_isdir(vp)) { if (vnode_authorize(vp, NULL, - KAUTH_VNODE_ACCESS | KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_DELETE_CHILD, ctx) == 0) + KAUTH_VNODE_ACCESS | KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_DELETE_CHILD, ctx) == 0) { perms |= W_OK; - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_LIST_DIRECTORY, ctx) == 0) + } + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_LIST_DIRECTORY, ctx) == 0) { perms |= R_OK; - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_SEARCH, ctx) == 0) + } + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_SEARCH, ctx) == 0) { perms |= X_OK; + } } else { - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA, ctx) == 0) + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA, ctx) == 0) { perms |= W_OK; - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA, ctx) == 0) + } + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA, ctx) == 0) { perms |= R_OK; - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_EXECUTE, ctx) == 0) + } + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_EXECUTE, ctx) == 0) { perms |= X_OK; + } } #if CONFIG_MACF - /* + /* * Rather than MAC preceding DAC, in this case we want * the smallest set of permissions granted by both MAC & DAC * checks. We won't add back any permissions. */ - if (perms & W_OK) - if (mac_vnode_check_access(ctx, vp, W_OK) != 0) + if (perms & W_OK) { + if (mac_vnode_check_access(ctx, vp, W_OK) != 0) { perms &= ~W_OK; - if (perms & R_OK) - if (mac_vnode_check_access(ctx, vp, R_OK) != 0) + } + } + if (perms & R_OK) { + if (mac_vnode_check_access(ctx, vp, R_OK) != 0) { perms &= ~R_OK; - if (perms & X_OK) - if (mac_vnode_check_access(ctx, vp, X_OK) != 0) + } + } + if (perms & X_OK) { + if (mac_vnode_check_access(ctx, vp, X_OK) != 0) { perms &= ~X_OK; + } + } #endif /* MAC */ KAUTH_DEBUG("ATTRLIST - returning user access %x", perms); ATTR_PACK4(ab, perms); @@ -1330,16 +1361,21 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, if (pack_invalid) { uint64_t fid = 0; - if (alp->commonattr & ATTR_CMN_EXTENDED_SECURITY) + if (alp->commonattr & ATTR_CMN_EXTENDED_SECURITY) { attrlist_pack_variable(&ab, NULL, 0); - if (alp->commonattr & ATTR_CMN_UUID) + } + if (alp->commonattr & ATTR_CMN_UUID) { ATTR_PACK(&ab, kauth_null_guid); - if (alp->commonattr & ATTR_CMN_GRPUUID) + } + if (alp->commonattr & ATTR_CMN_GRPUUID) { ATTR_PACK(&ab, kauth_null_guid); - if (alp->commonattr & ATTR_CMN_FILEID) + } + if (alp->commonattr & ATTR_CMN_FILEID) { ATTR_PACK8(ab, fid); - if (alp->commonattr & ATTR_CMN_PARENTID) + } + if (alp->commonattr & ATTR_CMN_PARENTID) { ATTR_PACK8(ab, fid); + } } /* volume attributes **************************************************/ @@ -1348,8 +1384,8 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ATTR_PACK_CAST(&ab, uint32_t, vfs_typenum(mnt)); ab.actual.volattr |= ATTR_VOL_FSTYPE; } - if (alp->volattr & ATTR_VOL_SIGNATURE) { - ATTR_PACK_CAST(&ab, uint32_t, vs.f_signature); + if (alp->volattr & ATTR_VOL_SIGNATURE) { + ATTR_PACK_CAST(&ab, uint32_t, vs.f_signature); ab.actual.volattr |= ATTR_VOL_SIGNATURE; } if (alp->volattr & ATTR_VOL_SIZE) { @@ -1369,7 +1405,7 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ab.actual.volattr |= ATTR_VOL_MINALLOCATION; } if (alp->volattr & ATTR_VOL_ALLOCATIONCLUMP) { - ATTR_PACK_CAST(&ab, off_t, vs.f_bsize); /* not strictly true */ + ATTR_PACK_CAST(&ab, off_t, vs.f_bsize); /* not strictly true */ ab.actual.volattr |= ATTR_VOL_ALLOCATIONCLUMP; } if (alp->volattr & ATTR_VOL_IOBLOCKSIZE) { @@ -1409,8 +1445,9 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ab.actual.volattr |= ATTR_VOL_MOUNTEDDEVICE; } if (alp->volattr & ATTR_VOL_ENCODINGSUSED) { - if (!return_valid || pack_invalid) + if (!return_valid || pack_invalid) { ATTR_PACK_CAST(&ab, uint64_t, ~0LL); /* return all encodings */ + } } if (alp->volattr & ATTR_VOL_CAPABILITIES) { /* fix up volume capabilities */ @@ -1467,13 +1504,15 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ATTR_PACK(&ab, vs.f_attributes); ab.actual.volattr |= ATTR_VOL_ATTRIBUTES; } - + /* diagnostic */ - if (!return_valid && (ab.fixedcursor - ab.base) != fixedsize) + if (!return_valid && (ab.fixedcursor - ab.base) != fixedsize) { panic("packed field size mismatch; allocated %ld but packed %ld for common %08x vol %08x", fixedsize, (long) (ab.fixedcursor - ab.base), alp->commonattr, alp->volattr); - if (!return_valid && ab.varcursor != (ab.base + ab.needed)) + } + if (!return_valid && ab.varcursor != (ab.base + ab.needed)) { panic("packed variable field size mismatch; used %ld but expected %ld", (long) (ab.varcursor - ab.base), ab.needed); + } /* * In the compatible case, we report the smaller of the required and returned sizes. @@ -1492,25 +1531,28 @@ getvolattrlist(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, ab.actual.commonattr &= ab.valid.commonattr; ab.actual.volattr &= ab.valid.volattr; } - bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof (ab.actual)); + bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof(ab.actual)); } - if (UIO_SEG_IS_USER_SPACE(segflg)) + if (UIO_SEG_IS_USER_SPACE(segflg)) { error = copyout(ab.base, CAST_USER_ADDR_T(attributeBuffer), - ulmin(bufferSize, ab.needed)); - else + ulmin(bufferSize, ab.needed)); + } else { bcopy(ab.base, (void *)attributeBuffer, (size_t)ulmin(bufferSize, ab.needed)); + } out: - if (vs.f_vol_name != NULL) + if (vs.f_vol_name != NULL) { kfree(vs.f_vol_name, MAXPATHLEN); + } if (release_str) { vnode_putname(cnp); } - if (ab.base != NULL) + if (ab.base != NULL) { FREE(ab.base, M_TEMP); + } VFS_DEBUG(ctx, vp, "ATTRLIST - returning %d", error); - return(error); + return error; } /* @@ -1521,14 +1563,14 @@ out: * are in ad. */ static errno_t -attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, +attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, struct _attrlist_buf *abp, struct vnode_attr *vap, int proc_is64, const char *cnp, ssize_t cnl, const char *fullpathptr, ssize_t fullpathlen, int return_valid, int pack_invalid, int vtype, int is_bulk) { - uint32_t perms = 0; - int error = 0; + uint32_t perms = 0; + int error = 0; if ((alp->commonattr & ATTR_CMN_ERROR) && (!return_valid || pack_invalid)) { @@ -1596,9 +1638,9 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, */ if (vap->va_vaflags & VA_64BITOBJIDS) { if (VATTR_IS_SUPPORTED(vap, va_linkid)) { - ATTR_PACK8((*abp), vap->va_linkid); + ATTR_PACK8((*abp), vap->va_linkid); } else { - ATTR_PACK8((*abp), vap->va_fileid); + ATTR_PACK8((*abp), vap->va_fileid); } } else { fsobj_id_t f; @@ -1621,9 +1663,9 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, */ if (vap->va_vaflags & VA_64BITOBJIDS) { if (VATTR_IS_SUPPORTED(vap, va_linkid)) { - ATTR_PACK8((*abp), vap->va_linkid); + ATTR_PACK8((*abp), vap->va_linkid); } else { - ATTR_PACK8((*abp), vap->va_fileid); + ATTR_PACK8((*abp), vap->va_fileid); } } else { fsobj_id_t f; @@ -1649,7 +1691,7 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, abp->actual.commonattr |= ATTR_CMN_PAROBJID; } if (alp->commonattr & ATTR_CMN_SCRIPT) { - if (VATTR_IS_SUPPORTED(vap, va_encoding)) { + if (VATTR_IS_SUPPORTED(vap, va_encoding)) { ATTR_PACK4((*abp), vap->va_encoding); abp->actual.commonattr |= ATTR_CMN_SCRIPT; } else if (!return_valid || pack_invalid) { @@ -1677,36 +1719,42 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, abp->actual.commonattr |= ATTR_CMN_BKUPTIME; } /* - * They are requesting user access, we should obtain this before getting + * They are requesting user access, we should obtain this before getting * the finder info. For some network file systems this is a performance * improvement. */ - if (alp->commonattr & ATTR_CMN_USERACCESS) { /* this is expensive */ + if (alp->commonattr & ATTR_CMN_USERACCESS) { /* this is expensive */ if (vp && !is_bulk) { if (vtype == VDIR) { if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | - KAUTH_VNODE_DELETE_CHILD, ctx) == 0) + KAUTH_VNODE_DELETE_CHILD, ctx) == 0) { perms |= W_OK; + } if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | - KAUTH_VNODE_LIST_DIRECTORY, ctx) == 0) + KAUTH_VNODE_LIST_DIRECTORY, ctx) == 0) { perms |= R_OK; + } if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | - KAUTH_VNODE_SEARCH, ctx) == 0) + KAUTH_VNODE_SEARCH, ctx) == 0) { perms |= X_OK; + } } else { if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | - KAUTH_VNODE_WRITE_DATA, ctx) == 0) + KAUTH_VNODE_WRITE_DATA, ctx) == 0) { perms |= W_OK; + } - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA, ctx) == 0) + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA, ctx) == 0) { perms |= R_OK; - if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_EXECUTE, ctx) == 0) + } + if (vnode_authorize(vp, NULL, KAUTH_VNODE_ACCESS | KAUTH_VNODE_EXECUTE, ctx) == 0) { perms |= X_OK; + } } } else if (is_bulk && VATTR_IS_SUPPORTED(vap, va_user_access)) { @@ -1714,12 +1762,12 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, } } if (alp->commonattr & ATTR_CMN_FNDRINFO) { - size_t fisize = 32; + size_t fisize = 32; - error = 0; + error = 0; if (vp && !is_bulk) { - uio_t auio; - char uio_buf[UIO_SIZEOF(1)]; + uio_t auio; + char uio_buf[UIO_SIZEOF(1)]; if ((auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, uio_buf, sizeof(uio_buf))) == NULL) { @@ -1730,7 +1778,7 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, fisize); /* fisize may be reset to 0 after this call */ error = vn_getxattr(vp, XATTR_FINDERINFO_NAME, auio, - &fisize, XATTR_NOSECURITY, ctx); + &fisize, XATTR_NOSECURITY, ctx); uio_free(auio); /* @@ -1799,7 +1847,7 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, abp->actual.commonattr |= ATTR_CMN_DOCUMENT_ID; } else if (!return_valid || pack_invalid) { ATTR_PACK4((*abp), 0); - } + } } /* We already obtain the user access, so just fill in the buffer here */ if (alp->commonattr & ATTR_CMN_USERACCESS) { @@ -1810,15 +1858,21 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, * the smallest set of permissions granted by both MAC & * DAC checks. We won't add back any permissions. */ - if (perms & W_OK) - if (mac_vnode_check_access(ctx, vp, W_OK) != 0) + if (perms & W_OK) { + if (mac_vnode_check_access(ctx, vp, W_OK) != 0) { perms &= ~W_OK; - if (perms & R_OK) - if (mac_vnode_check_access(ctx, vp, R_OK) != 0) + } + } + if (perms & R_OK) { + if (mac_vnode_check_access(ctx, vp, R_OK) != 0) { perms &= ~R_OK; - if (perms & X_OK) - if (mac_vnode_check_access(ctx, vp, X_OK) != 0) + } + } + if (perms & X_OK) { + if (mac_vnode_check_access(ctx, vp, X_OK) != 0) { perms &= ~X_OK; + } + } } #endif /* MAC */ VFS_DEBUG(ctx, vp, "ATTRLIST - granting perms %d", perms); @@ -1848,7 +1902,7 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, } } if (alp->commonattr & ATTR_CMN_UUID) { - if (VATTR_IS_SUPPORTED(vap, va_uuuid)) { + if (VATTR_IS_SUPPORTED(vap, va_uuuid)) { ATTR_PACK(abp, vap->va_uuuid); abp->actual.commonattr |= ATTR_CMN_UUID; } else if (!return_valid || pack_invalid) { @@ -1871,14 +1925,14 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, ATTR_PACK8((*abp), vap->va_parentid); abp->actual.commonattr |= ATTR_CMN_PARENTID; } - + if (alp->commonattr & ATTR_CMN_FULLPATH) { if (vp) { - attrlist_pack_string (abp, fullpathptr, fullpathlen); + attrlist_pack_string(abp, fullpathptr, fullpathlen); abp->actual.commonattr |= ATTR_CMN_FULLPATH; } } - + if (alp->commonattr & ATTR_CMN_ADDEDTIME) { if (VATTR_IS_SUPPORTED(vap, va_addedtime)) { ATTR_PACK_TIME((*abp), vap->va_addedtime, proc_is64); @@ -1898,7 +1952,7 @@ attr_pack_common(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, } } out: - return (error); + return error; } static errno_t @@ -1923,7 +1977,7 @@ attr_pack_dir(struct vnode *vp, struct attrlist *alp, struct _attrlist_buf *abp, * on vnode. In either case, the directory should * be reported as a mount point. */ - if ((vp->v_flag & VROOT) || vnode_mountedhere(vp)) { + if ((vp->v_flag & VROOT) || vnode_mountedhere(vp)) { mntstat = DIR_MNTSTATUS_MNTPOINT; } else { mntstat = 0; @@ -1993,11 +2047,11 @@ attr_pack_dir(struct vnode *vp, struct attrlist *alp, struct _attrlist_buf *abp, * attempt is made to retrieve them by calling back into the filesystem. */ static errno_t -attr_pack_file(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, +attr_pack_file(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, struct _attrlist_buf *abp, struct vnode_attr *vap, int return_valid, int pack_invalid, int is_bulk) { - size_t rsize = 0; + size_t rsize = 0; uint64_t rlength = 0; uint64_t ralloc = 0; int error = 0; @@ -2009,7 +2063,6 @@ attr_pack_file(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, if (vp && !is_bulk && (alp->fileattr & (ATTR_FILE_TOTALSIZE | ATTR_FILE_ALLOCSIZE | ATTR_FILE_RSRCLENGTH | ATTR_FILE_RSRCALLOCSIZE))) { - error = vn_getxattr(vp, XATTR_RESOURCEFORK_NAME, NULL, &rsize, XATTR_NOSECURITY, ctx); if (error) { @@ -2148,7 +2201,7 @@ attr_pack_file(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, if (VATTR_IS_SUPPORTED(vap, va_data_size)) { ATTR_PACK8((*abp), vap->va_data_size); abp->actual.fileattr |= ATTR_FILE_DATALENGTH; - } else if (VATTR_IS_SUPPORTED(vap, va_total_size)){ + } else if (VATTR_IS_SUPPORTED(vap, va_total_size)) { ATTR_PACK8((*abp), vap->va_total_size); abp->actual.fileattr |= ATTR_FILE_DATALENGTH; } else if (!return_valid || pack_invalid) { @@ -2160,7 +2213,7 @@ attr_pack_file(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, if (VATTR_IS_SUPPORTED(vap, va_data_alloc)) { ATTR_PACK8((*abp), vap->va_data_alloc); abp->actual.fileattr |= ATTR_FILE_DATAALLOCSIZE; - } else if (VATTR_IS_SUPPORTED(vap, va_total_alloc)){ + } else if (VATTR_IS_SUPPORTED(vap, va_total_alloc)) { ATTR_PACK8((*abp), vap->va_total_alloc); abp->actual.fileattr |= ATTR_FILE_DATAALLOCSIZE; } else if (!return_valid || pack_invalid) { @@ -2196,7 +2249,7 @@ attr_pack_file(vfs_context_t ctx, struct vnode *vp, struct attrlist *alp, } } out: - return (error); + return error; } /* @@ -2208,8 +2261,8 @@ out: */ static errno_t attr_pack_common_extended(struct vnode *vp, struct attrlist *alp, - struct _attrlist_buf *abp, const char *relpathptr, ssize_t relpathlen, - struct vnode_attr *vap, int return_valid, int pack_invalid) + struct _attrlist_buf *abp, const char *relpathptr, ssize_t relpathlen, + struct vnode_attr *vap, int return_valid, int pack_invalid) { if (vp && (alp->forkattr & ATTR_CMNEXT_RELPATH)) { attrlist_pack_string(abp, relpathptr, relpathlen); @@ -2229,10 +2282,11 @@ attr_pack_common_extended(struct vnode *vp, struct attrlist *alp, if (alp->forkattr & ATTR_CMNEXT_LINKID) { uint64_t linkid; - if (VATTR_IS_SUPPORTED(vap, va_linkid)) + if (VATTR_IS_SUPPORTED(vap, va_linkid)) { linkid = vap->va_linkid; - else + } else { linkid = vap->va_fileid; + } ATTR_PACK8((*abp), linkid); abp->actual.forkattr |= ATTR_CMNEXT_LINKID; @@ -2255,14 +2309,14 @@ vattr_get_alt_data(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, /* forget we wanted this */ VATTR_CLEAR_ACTIVE(vap, va_linkid); } - + /* * Many filesystems don't know their parent object id. * If necessary, attempt to derive it from the vnode. */ if ((alp->commonattr & (ATTR_CMN_PAROBJID | ATTR_CMN_PARENTID)) && !VATTR_IS_SUPPORTED(vap, va_parentid) && vp && !is_bulk) { - vnode_t dvp; + vnode_t dvp; if ((dvp = vnode_getparent(vp)) != NULLVP) { struct vnode_attr lva; @@ -2317,9 +2371,9 @@ vattr_get_alt_data(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, static errno_t calc_varsize(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, - ssize_t *varsizep, char *fullpathptr, ssize_t *fullpathlenp, - char *relpathptr, ssize_t *relpathlenp, const char **vnamep, - const char **cnpp, ssize_t *cnlp) + ssize_t *varsizep, char *fullpathptr, ssize_t *fullpathlenp, + char *relpathptr, ssize_t *relpathlenp, const char **vnamep, + const char **cnpp, ssize_t *cnlp) { int error = 0; @@ -2327,14 +2381,14 @@ calc_varsize(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, /* We may need to fix up the name attribute if requested */ if (alp->commonattr & ATTR_CMN_NAME) { if (VATTR_IS_SUPPORTED(vap, va_name)) { - vap->va_name[MAXPATHLEN-1] = '\0'; /* Ensure nul-termination */ + vap->va_name[MAXPATHLEN - 1] = '\0'; /* Ensure nul-termination */ *cnpp = vap->va_name; *cnlp = strlen(*cnpp); } else if (vp) { /* Filesystem did not support getting the name */ if (vnode_isvroot(vp)) { if (vp->v_mount->mnt_vfsstat.f_mntonname[1] == 0x00 && - vp->v_mount->mnt_vfsstat.f_mntonname[0] == '/') { + vp->v_mount->mnt_vfsstat.f_mntonname[0] == '/') { /* special case for boot volume. Use root name when it's * available (which is the volume name) or just the mount on * name of "/". we must do this for binary compatibility with @@ -2347,12 +2401,10 @@ calc_varsize(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, *cnpp = &vp->v_mount->mnt_vfsstat.f_mntonname[0]; } *cnlp = strlen(*cnpp); - } - else { + } else { getattrlist_findnamecomp(vp->v_mount->mnt_vfsstat.f_mntonname, cnpp, cnlp); } - } - else { + } else { *cnpp = *vnamep = vnode_getname(vp); *cnlp = 0; if (*cnpp != NULL) { @@ -2365,7 +2417,7 @@ calc_varsize(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, *varsizep += roundup(*cnlp + 1, 4); } - /* + /* * Compute the full path to this vnode, if necessary. This attribute is almost certainly * not supported by any filesystem, so build the path to this vnode at this time. */ @@ -2380,7 +2432,7 @@ calc_varsize(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, goto out; } *fullpathlenp = 0; - if (fullpathptr){ + if (fullpathptr) { *fullpathlenp = strlen(fullpathptr); } *varsizep += roundup(((*fullpathlenp) + 1), 4); @@ -2412,23 +2464,21 @@ calc_varsize(vnode_t vp, struct attrlist *alp, struct vnode_attr *vap, * user-space this is OK. */ if ((alp->commonattr & ATTR_CMN_EXTENDED_SECURITY) && - VATTR_IS_SUPPORTED(vap, va_acl) && - (vap->va_acl != NULL)) { - - /* + VATTR_IS_SUPPORTED(vap, va_acl) && + (vap->va_acl != NULL)) { + /* * Since we have a kauth_acl_t (not a kauth_filesec_t), we have to check against * KAUTH_FILESEC_NOACL ourselves - */ + */ if (vap->va_acl->acl_entrycount == KAUTH_FILESEC_NOACL) { *varsizep += roundup((KAUTH_FILESEC_SIZE(0)), 4); - } - else { - *varsizep += roundup ((KAUTH_FILESEC_SIZE(vap->va_acl->acl_entrycount)), 4); + } else { + *varsizep += roundup((KAUTH_FILESEC_SIZE(vap->va_acl->acl_entrycount)), 4); } } out: - return (error); + return error; } static errno_t @@ -2439,12 +2489,12 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, struct _attrlist_buf ab; ssize_t buf_size; size_t copy_size; - ssize_t varsize; + ssize_t varsize; const char *vname = NULL; const char *cnp; ssize_t cnl; char *fullpathptr; - ssize_t fullpathlen; + ssize_t fullpathlen; char *relpathptr; ssize_t relpathlen; int error; @@ -2466,8 +2516,9 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, alloc_local_buf = 0; buf_size = (ssize_t)uio_resid(auio); - if ((buf_size <= 0) || (uio_iovcnt(auio) > 1)) - return (EINVAL); + if ((buf_size <= 0) || (uio_iovcnt(auio) > 1)) { + return EINVAL; + } copy_size = 0; /* Check for special packing semantics */ @@ -2476,7 +2527,7 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, if (pack_invalid) { /* Generate a valid mask for post processing */ - bcopy(&(alp->commonattr), &ab.valid, sizeof (attribute_set_t)); + bcopy(&(alp->commonattr), &ab.valid, sizeof(attribute_set_t)); } /* did we ask for something the filesystem doesn't support? */ @@ -2489,20 +2540,21 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, if (return_valid && pack_invalid) { /* Fix up valid mask for post processing */ getattrlist_fixupattrs(&ab.valid, vap, use_fork); - + /* Force packing of everything asked for */ vap->va_supported = vap->va_active; } else if (return_valid) { /* Adjust the requested attributes */ getattrlist_fixupattrs( - (attribute_set_t *)&(alp->commonattr), vap, use_fork); + (attribute_set_t *)&(alp->commonattr), vap, use_fork); } else { error = EINVAL; } } - if (error) + if (error) { goto out; + } } //if a path is requested, allocate a temporary buffer to build it @@ -2510,7 +2562,7 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, fullpathptr = (char*) kalloc(MAXPATHLEN); if (fullpathptr == NULL) { error = ENOMEM; - VFS_DEBUG(ctx,vp, "ATTRLIST - ERROR: cannot allocate fullpath buffer"); + VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: cannot allocate fullpath buffer"); goto out; } bzero(fullpathptr, MAXPATHLEN); @@ -2521,7 +2573,7 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, relpathptr = (char*) kalloc(MAXPATHLEN); if (relpathptr == NULL) { error = ENOMEM; - VFS_DEBUG(ctx,vp, "ATTRLIST - ERROR: cannot allocate relpath buffer"); + VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: cannot allocate relpath buffer"); goto out; } bzero(relpathptr, MAXPATHLEN); @@ -2531,9 +2583,10 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, * Compute variable-space requirements. */ error = calc_varsize(vp, alp, vap, &varsize, fullpathptr, &fullpathlen, - relpathptr, &relpathlen, &vname, &cnp, &cnl); - if (error) + relpathptr, &relpathlen, &vname, &cnp, &cnl); + if (error) { goto out; + } /* * Allocate a target buffer for attribute results. @@ -2544,7 +2597,7 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, */ ab.allocated = fixedsize + varsize; /* Cast 'allocated' to an unsigned to verify allocation size */ - if ( ((size_t)ab.allocated) > ATTR_MAX_BUFFER) { + if (((size_t)ab.allocated) > ATTR_MAX_BUFFER) { error = ENOMEM; VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: buffer size too large (%d limit %d)", ab.allocated, ATTR_MAX_BUFFER); goto out; @@ -2562,8 +2615,9 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, newlen = (ab.allocated + 7) & ~0x07; /* Align only if enough space for alignment */ - if (newlen <= (uint32_t)buf_size) + if (newlen <= (uint32_t)buf_size) { ab.allocated = newlen; + } } } @@ -2573,7 +2627,7 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, */ if (uio_isuserspace(auio) || (buf_size < ab.allocated)) { MALLOC(ab.base, char *, ab.allocated, M_TEMP, - M_ZERO | M_WAITOK); + M_ZERO | M_WAITOK); alloc_local_buf = 1; } else { /* @@ -2641,8 +2695,8 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, */ ab.fixedcursor = ab.base + sizeof(uint32_t); if (return_valid) { - ab.fixedcursor += sizeof (attribute_set_t); - bzero(&ab.actual, sizeof (ab.actual)); + ab.fixedcursor += sizeof(attribute_set_t); + bzero(&ab.actual, sizeof(ab.actual)); } ab.varcursor = ab.base + fixedsize; ab.needed = ab.allocated; @@ -2668,15 +2722,18 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, vap, return_valid, pack_invalid); } - if (error) + if (error) { goto out; - + } + /* diagnostic */ - if (!return_valid && (ab.fixedcursor - ab.base) != fixedsize) + if (!return_valid && (ab.fixedcursor - ab.base) != fixedsize) { panic("packed field size mismatch; allocated %ld but packed %ld for common %08x vol %08x", fixedsize, (long) (ab.fixedcursor - ab.base), alp->commonattr, alp->volattr); - if (!return_valid && ab.varcursor != (ab.base + ab.needed)) + } + if (!return_valid && ab.varcursor != (ab.base + ab.needed)) { panic("packed variable field size mismatch; used %ld but expected %ld", (long) (ab.varcursor - ab.base), ab.needed); + } /* * In the compatible case, we report the smaller of the required and returned sizes. @@ -2695,7 +2752,7 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, ab.actual.dirattr &= ab.valid.dirattr; ab.actual.fileattr &= ab.valid.fileattr; } - bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof (ab.actual)); + bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof(ab.actual)); } copy_size = imin(buf_size, ab.allocated); @@ -2723,15 +2780,19 @@ vfs_attr_pack_internal(vnode_t vp, uio_t auio, struct attrlist *alp, } out: - if (vname) + if (vname) { vnode_putname(vname); - if (fullpathptr) + } + if (fullpathptr) { kfree(fullpathptr, MAXPATHLEN); - if (relpathptr) + } + if (relpathptr) { kfree(relpathptr, MAXPATHLEN); - if (ab.base != NULL && alloc_local_buf) + } + if (ab.base != NULL && alloc_local_buf) { FREE(ab.base, M_TEMP); - return (error); + } + return error; } errno_t @@ -2744,10 +2805,11 @@ vfs_attr_pack(vnode_t vp, uio_t uio, struct attrlist *alp, uint64_t options, struct attrlist orig_al; enum vtype v_type; - if (vp) + if (vp) { v_type = vnode_vtype(vp); - else + } else { v_type = vap->va_objtype; + } orig_al = *alp; orig_active = vap->va_active; @@ -2762,15 +2824,15 @@ vfs_attr_pack(vnode_t vp, uio_t uio, struct attrlist *alp, uint64_t options, goto out; } - error = vfs_attr_pack_internal(vp, uio, alp, - options|FSOPT_REPORT_FULLSIZE, vap, NULL, ctx, 1, v_type, + error = vfs_attr_pack_internal(vp, uio, alp, + options | FSOPT_REPORT_FULLSIZE, vap, NULL, ctx, 1, v_type, fixedsize); VATTR_CLEAR_SUPPORTED_ALL(vap); vap->va_active = orig_active; *alp = orig_al; out: - return (error); + return error; } /* @@ -2780,7 +2842,7 @@ out: * name obtained from some authoritative source (eg. readdir vnop); where * filesystems' getattr vnops do not support ATTR_CMN_NAME, the alt_name will be * used as the ATTR_CMN_NAME attribute returned in vnode_attr.va_name. - * + * */ static int getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, @@ -2788,32 +2850,34 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, enum uio_seg segflg, char* authoritative_name, struct ucred *file_cred) { struct vnode_attr va; - kauth_action_t action; - ssize_t fixedsize; - char *va_name; - int proc_is64; - int error; - int return_valid; - int pack_invalid; - int vtype = 0; - uio_t auio; - char uio_buf[ UIO_SIZEOF(1)]; + kauth_action_t action; + ssize_t fixedsize; + char *va_name; + int proc_is64; + int error; + int return_valid; + int pack_invalid; + int vtype = 0; + uio_t auio; + char uio_buf[UIO_SIZEOF(1)]; // must be true for fork attributes to be used as new common attributes const int use_fork = (options & FSOPT_ATTR_CMN_EXTENDED) != 0; - if (bufferSize < sizeof(uint32_t)) - return (ERANGE); + if (bufferSize < sizeof(uint32_t)) { + return ERANGE; + } proc_is64 = proc_is64bit(vfs_context_proc(ctx)); if (segflg == UIO_USERSPACE) { - if (proc_is64) + if (proc_is64) { segflg = UIO_USERSPACE64; - else + } else { segflg = UIO_USERSPACE32; + } } auio = uio_createwithbuffer(1, 0, segflg, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, attributeBuffer, bufferSize); VATTR_INIT(&va); @@ -2830,8 +2894,9 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, #if CONFIG_MACF error = mac_vnode_check_getattrlist(ctx, vp, alp); - if (error) + if (error) { goto out; + } #endif /* MAC */ /* @@ -2848,7 +2913,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, } /* handle volume attribute request */ error = getvolattrlist(ctx, vp, alp, attributeBuffer, - bufferSize, options, segflg, proc_is64); + bufferSize, options, segflg, proc_is64); goto out; } @@ -2885,7 +2950,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, goto out; } /* Keep invalid attrs from being uninitialized */ - bzero(&va, sizeof (va)); + bzero(&va, sizeof(va)); } /* Pick up the vnode type. If the FS is bad and changes vnode types on us, we @@ -2964,7 +3029,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, (void)file_cred; #endif - /* + /* * It we ask for the name, i.e., vname is non null and * we have an authoritative name, then reset va_name is * active and if needed set va_name is supported. @@ -2973,7 +3038,7 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, * to us. We try to deal with that here as well. */ va.va_active = va_active; - if (authoritative_name && va_name) { + if (authoritative_name && va_name) { VATTR_SET_ACTIVE(&va, va_name); if (!(VATTR_IS_SUPPORTED(&va, va_name))) { VATTR_SET_SUPPORTED(&va, va_name); @@ -2981,18 +3046,20 @@ getattrlist_internal(vfs_context_t ctx, vnode_t vp, struct attrlist *alp, } va.va_name = va_name; } - + error = vfs_attr_pack_internal(vp, auio, alp, options, &va, NULL, ctx, 0, vtype, fixedsize); out: - if (va_name) + if (va_name) { FREE_ZONE(va_name, MAXPATHLEN, M_NAMEI); - if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) + } + if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) { kauth_acl_free(va.va_acl); + } VFS_DEBUG(ctx, vp, "ATTRLIST - returning %d", error); - return(error); + return error; } int @@ -3009,32 +3076,38 @@ fgetattrlist(proc_t p, struct fgetattrlist_args *uap, __unused int32_t *retval) fp = NULL; error = 0; - if ((error = file_vnode(uap->fd, &vp)) != 0) - return (error); + if ((error = file_vnode(uap->fd, &vp)) != 0) { + return error; + } if ((error = fp_lookup(p, uap->fd, &fp, 0)) != 0 || - (error = vnode_getwithref(vp)) != 0) + (error = vnode_getwithref(vp)) != 0) { + vp = NULL; goto out; + } /* * Fetch the attribute request. */ error = copyin(uap->alist, &al, sizeof(al)); - if (error) + if (error) { goto out; + } /* Default to using the vnode's name. */ error = getattrlist_internal(ctx, vp, &al, uap->attributeBuffer, - uap->bufferSize, uap->options, - (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : \ - UIO_USERSPACE32), NULL, - fp->f_fglob->fg_cred); + uap->bufferSize, uap->options, + (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : \ + UIO_USERSPACE32), NULL, + fp->f_fglob->fg_cred); out: - if (fp) + if (fp) { fp_drop(p, uap->fd, fp, 0); - if (vp) + } + if (vp) { vnode_put(vp); + } file_drop(uap->fd); return error; @@ -3054,8 +3127,9 @@ getattrlistat_internal(vfs_context_t ctx, user_addr_t path, /* * Look up the file. */ - if (!(options & FSOPT_NOFOLLOW)) + if (!(options & FSOPT_NOFOLLOW)) { nameiflags |= FOLLOW; + } nameiflags |= AUDITVNPATH1; NDINIT(&nd, LOOKUP, OP_GETATTR, nameiflags, pathsegflg, @@ -3063,18 +3137,19 @@ getattrlistat_internal(vfs_context_t ctx, user_addr_t path, error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; error = getattrlist_internal(ctx, vp, alp, attributeBuffer, bufferSize, options, segflg, NULL, NOCRED); - + /* Retain the namei reference until the getattrlist completes. */ nameidone(&nd); vnode_put(vp); - return (error); + return error; } int @@ -3090,13 +3165,14 @@ getattrlist(proc_t p, struct getattrlist_args *uap, __unused int32_t *retval) * Fetch the attribute request. */ error = copyin(uap->alist, &al, sizeof(al)); - if (error) + if (error) { return error; + } - return (getattrlistat_internal(vfs_context_current(), - CAST_USER_ADDR_T(uap->path), &al, - CAST_USER_ADDR_T(uap->attributeBuffer), uap->bufferSize, - (uint64_t)uap->options, segflg, segflg, AT_FDCWD)); + return getattrlistat_internal(vfs_context_current(), + CAST_USER_ADDR_T(uap->path), &al, + CAST_USER_ADDR_T(uap->attributeBuffer), uap->bufferSize, + (uint64_t)uap->options, segflg, segflg, AT_FDCWD); } int @@ -3112,13 +3188,14 @@ getattrlistat(proc_t p, struct getattrlistat_args *uap, __unused int32_t *retval * Fetch the attribute request. */ error = copyin(uap->alist, &al, sizeof(al)); - if (error) + if (error) { return error; + } - return (getattrlistat_internal(vfs_context_current(), - CAST_USER_ADDR_T(uap->path), &al, - CAST_USER_ADDR_T(uap->attributeBuffer), uap->bufferSize, - (uint64_t)uap->options, segflg, segflg, uap->fd)); + return getattrlistat_internal(vfs_context_current(), + CAST_USER_ADDR_T(uap->path), &al, + CAST_USER_ADDR_T(uap->attributeBuffer), uap->bufferSize, + (uint64_t)uap->options, segflg, segflg, uap->fd); } /* @@ -3204,10 +3281,10 @@ retry_alloc: /* Save offsets */ fvd->fv_soff = fvd->fv_eoff; fvd->fv_eoff = uio_offset(rdir_uio); - /* Save eofflag state but don't return EOF for this time.*/ + /* Save eofflag state but don't return EOF for this time.*/ fvd->fv_eofflag = eofflag; eofflag = 0; - /* Reset buffer parameters */ + /* Reset buffer parameters */ fvd->fv_bufsiz = rdirbufused; fvd->fv_bufdone = 0; bzero(fvd->fv_buf + rdirbufused, rdirbufsiz - rdirbufused); @@ -3258,7 +3335,7 @@ retry_alloc: *eofflagp = eofflag; - return (error); + return error; } /* @@ -3282,16 +3359,16 @@ get_direntry(vfs_context_t ctx, vnode_t dvp, struct fd_vn_data *fvd, if (!fvd->fv_bufsiz) { error = refill_fd_direntries(ctx, dvp, fvd, &eofflag); if (error) { - return (error); + return error; } if (eofflag) { *eofflagp = eofflag; - return (error); + return error; } } *dpp = (struct direntry *)(fvd->fv_buf + fvd->fv_bufdone); - return (error); + return error; } /* @@ -3426,8 +3503,8 @@ get_error_attributes(vnode_t vp, struct attrlist *alp, uint64_t options, * Initialise the value for ATTR_CMN_RETURNED_ATTRS and leave space * Leave space for filling in its value here at the end. */ - bzero(&ab.actual, sizeof (ab.actual)); - ab.fixedcursor += sizeof (attribute_set_t); + bzero(&ab.actual, sizeof(ab.actual)); + ab.fixedcursor += sizeof(attribute_set_t); ab.allocated = ab.needed; @@ -3451,7 +3528,7 @@ get_error_attributes(vnode_t vp, struct attrlist *alp, uint64_t options, * ATTR_CMN_NAME. */ ab.actual.commonattr |= ATTR_CMN_NAME | ATTR_CMN_RETURNED_ATTRS; - bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof (ab.actual)); + bcopy(&ab.actual, ab.base + sizeof(uint32_t), sizeof(ab.actual)); out: return; } @@ -3482,7 +3559,7 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, *eofflagp = 0; if (uio_iovcnt(auio) > 1) { - return (EINVAL); + return EINVAL; } /* @@ -3494,7 +3571,7 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, kern_attr_buf_siz = ATTR_MAX_BUFFER; } else if (kern_attr_buf_siz == 0) { /* Nothing to do */ - return (error); + return error; } MALLOC(kern_attr_buf, caddr_t, kern_attr_buf_siz, M_TEMP, M_WAITOK); @@ -3548,17 +3625,17 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, /* * We have an iocount on the directory already. - * + * * Note that we supply NOCROSSMOUNT to the namei call as we attempt to acquire * a vnode for this particular entry. This is because the native call will * (likely) attempt to emit attributes based on its own metadata in order to avoid * creating vnodes where posssible. If the native call is not going to walk * up the vnode mounted-on chain in order to find the top-most mount point, then we - * should not either in this emulated readdir+getattrlist() approach. We + * should not either in this emulated readdir+getattrlist() approach. We * will be responsible for setting DIR_MNTSTATUS_MNTPOINT on that directory that - * contains a mount point. + * contains a mount point. */ - NDINIT(&nd, LOOKUP, OP_GETATTR, (AUDITVNPATH1 | USEDVP | NOCROSSMOUNT), + NDINIT(&nd, LOOKUP, OP_GETATTR, (AUDITVNPATH1 | USEDVP | NOCROSSMOUNT), UIO_SYSSPACE, CAST_USER_ADDR_T(name_buffer), ctx); nd.ni_dvp = dvp; @@ -3581,7 +3658,7 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, error = getattrlist_internal(ctx, vp, &al, CAST_USER_ADDR_T(kern_attr_buf), kern_attr_buf_siz, - options | FSOPT_REPORT_FULLSIZE, UIO_SYSSPACE, + options | FSOPT_REPORT_FULLSIZE, UIO_SYSSPACE, CAST_DOWN_EXPLICIT(char *, name_buffer), NOCRED); @@ -3684,11 +3761,11 @@ readdirattr(vnode_t dvp, struct fd_vn_data *fvd, uio_t auio, */ uio_setoffset(auio, fvd->fv_eoff); - return (error); + return error; } /* - *int getattrlistbulk(int dirfd, struct attrlist *alist, void *attributeBuffer, + * int getattrlistbulk(int dirfd, struct attrlist *alist, void *attributeBuffer, * size_t bufferSize, uint64_t options) * * Gets directory entries alongwith their attributes in the same way @@ -3700,7 +3777,7 @@ int getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) { struct attrlist al; - vnode_t dvp; + vnode_t dvp = NULLVP; struct fileproc *fp; struct fd_vn_data *fvdata; vfs_context_t ctx; @@ -3708,7 +3785,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) enum uio_seg segflg; int count; uio_t auio = NULL; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; kauth_action_t action; int eofflag; uint64_t options; @@ -3717,8 +3794,9 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) *retval = 0; error = fp_getfvp(p, uap->dirfd, &fp, &dvp); - if (error) - return (error); + if (error) { + return error; + } count = 0; fvdata = NULL; @@ -3729,9 +3807,10 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) if ((fp->f_fglob->fg_flag & FREAD) == 0) { /* - AUDIT_ARG(vnpath_withref, dvp, ARG_VNODE1); - */ + * AUDIT_ARG(vnpath_withref, dvp, ARG_VNODE1); + */ error = EBADF; + dvp = NULLVP; goto out; } @@ -3750,8 +3829,9 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) /* switch directory to snapshot directory */ error = vnode_get_snapdir(dvp, &snapdvp, ctx); - if (error) + if (error) { goto out; + } vnode_put(dvp); dvp = snapdvp; } @@ -3763,13 +3843,14 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) #if CONFIG_MACF error = mac_file_check_change_offset(vfs_context_ucred(ctx), - fp->f_fglob); - if (error) + fp->f_fglob); + if (error) { goto out; + } #endif /* * XXX : Audit Support - *AUDIT_ARG(vnpath, dvp, ARG_VNODE1); + * AUDIT_ARG(vnpath, dvp, ARG_VNODE1); */ options = uap->options | FSOPT_ATTR_CMN_EXTENDED; @@ -3798,9 +3879,10 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) * they need SEARCH as well. */ action = KAUTH_VNODE_LIST_DIRECTORY; - if ((al.commonattr & ~ATTR_CMN_NAME) || al.fileattr || al.dirattr) + if ((al.commonattr & ~ATTR_CMN_NAME) || al.fileattr || al.dirattr) { action |= KAUTH_VNODE_SEARCH; - + } + error = vnode_authorize(dvp, NULL, action, ctx); if (error) { goto out; @@ -3821,8 +3903,9 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) */ if (!fp->f_fglob->fg_offset) { fvdata->fv_offset = 0; - if (fvdata->fv_buf) + if (fvdata->fv_buf) { FREE(fvdata->fv_buf, M_FD_DIRBUF); + } fvdata->fv_buf = NULL; fvdata->fv_bufsiz = 0; fvdata->fv_bufdone = 0; @@ -3847,7 +3930,7 @@ getattrlistbulk(proc_t p, struct getattrlistbulk_args *uap, int32_t *retval) (ATTR_CMN_UUID | ATTR_CMN_GRPUUID | ATTR_CMN_EXTENDED_SECURITY)) || !(al.commonattr & ATTR_CMN_OBJTYPE)) { error = ENOTSUP; - } else { + } else { struct vnode_attr va; char *va_name; @@ -3926,36 +4009,37 @@ out: file_drop(uap->dirfd); - return (error); + return error; } static int attrlist_unpack_fixed(char **cursor, char *end, void *buf, ssize_t size) { /* make sure we have enough source data */ - if ((*cursor) + size > end) - return(EINVAL); + if ((*cursor) + size > end) { + return EINVAL; + } bcopy(*cursor, buf, size); *cursor += size; - return(0); + return 0; } -#define ATTR_UNPACK(v) do {if ((error = attrlist_unpack_fixed(&cursor, bufend, &v, sizeof(v))) != 0) goto out;} while(0); -#define ATTR_UNPACK_CAST(t, v) do { t _f; ATTR_UNPACK(_f); v = _f;} while(0) -#define ATTR_UNPACK_TIME(v, is64) \ - do { \ - if (is64) { \ - struct user64_timespec us; \ - ATTR_UNPACK(us); \ - v.tv_sec = us.tv_sec; \ - v.tv_nsec = us.tv_nsec; \ - } else { \ - struct user32_timespec us; \ - ATTR_UNPACK(us); \ - v.tv_sec = us.tv_sec; \ - v.tv_nsec = us.tv_nsec; \ - } \ +#define ATTR_UNPACK(v) do {if ((error = attrlist_unpack_fixed(&cursor, bufend, &v, sizeof(v))) != 0) goto out;} while(0); +#define ATTR_UNPACK_CAST(t, v) do { t _f; ATTR_UNPACK(_f); v = _f;} while(0) +#define ATTR_UNPACK_TIME(v, is64) \ + do { \ + if (is64) { \ + struct user64_timespec us; \ + ATTR_UNPACK(us); \ + v.tv_sec = us.tv_sec; \ + v.tv_nsec = us.tv_nsec; \ + } else { \ + struct user32_timespec us; \ + ATTR_UNPACK(us); \ + v.tv_sec = us.tv_sec; \ + v.tv_nsec = us.tv_nsec; \ + } \ } while(0) @@ -3968,10 +4052,10 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con struct attrlist al; struct vnode_attr va; struct attrreference ar; - kauth_action_t action; - char *user_buf, *cursor, *bufend, *fndrinfo, *cp, *volname; - int proc_is64, error; - uint32_t nace; + kauth_action_t action; + char *user_buf, *cursor, *bufend, *fndrinfo, *cp, *volname; + int proc_is64, error; + uint32_t nace; kauth_filesec_t rfsec; user_buf = NULL; @@ -3980,12 +4064,13 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con error = 0; proc_is64 = proc_is64bit(p); VATTR_INIT(&va); - + /* * Fetch the attribute set and validate. */ - if ((error = copyin(uap->alist, (caddr_t) &al, sizeof (al)))) + if ((error = copyin(uap->alist, (caddr_t) &al, sizeof(al)))) { goto out; + } if (al.bitmapcount != ATTR_BIT_MAP_COUNT) { error = EINVAL; goto out; @@ -4035,14 +4120,14 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con * returns a NULL pointer, which would cause setattrlist to return ENOMEM. */ if (al.commonattr == 0 && - (al.volattr & ~ATTR_VOL_INFO) == 0 && - al.dirattr == 0 && - al.fileattr == 0 && - al.forkattr == 0) { + (al.volattr & ~ATTR_VOL_INFO) == 0 && + al.dirattr == 0 && + al.fileattr == 0 && + al.forkattr == 0) { error = 0; goto out; } - + /* * Make the naive assumption that the caller has supplied a reasonable buffer * size. We could be more careful by pulling in the fixed-size region, checking @@ -4071,8 +4156,9 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con #if CONFIG_MACF error = mac_vnode_check_setattrlist(ctx, vp, &al); - if (error) + if (error) { goto out; + } #endif /* MAC */ /* @@ -4132,12 +4218,12 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con ATTR_UNPACK(va.va_flags); VATTR_SET_ACTIVE(&va, va_flags); #if CONFIG_MACF - if ((error = mac_vnode_check_setflags(ctx, vp, va.va_flags)) != 0) + if ((error = mac_vnode_check_setflags(ctx, vp, va.va_flags)) != 0) { goto out; + } #endif } if (al.commonattr & ATTR_CMN_EXTENDED_SECURITY) { - /* * We are (for now) passed a kauth_filesec_t, but all we want from * it is the ACL. @@ -4152,18 +4238,19 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con cp += ar.attr_dataoffset; rfsec = (kauth_filesec_t)cp; - if (((((char *)rfsec) + KAUTH_FILESEC_SIZE(0)) > bufend) || /* no space for acl */ + if (((((char *)rfsec) + KAUTH_FILESEC_SIZE(0)) > bufend) || /* no space for acl */ (rfsec->fsec_magic != KAUTH_FILESEC_MAGIC) || /* bad magic */ (KAUTH_FILESEC_COPYSIZE(rfsec) != ar.attr_length) || /* size does not match */ - ((cp + KAUTH_FILESEC_COPYSIZE(rfsec)) > bufend)) { /* ACEs overrun buffer */ + ((cp + KAUTH_FILESEC_COPYSIZE(rfsec)) > bufend)) { /* ACEs overrun buffer */ error = EINVAL; VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: bad ACL supplied", ar.attr_length); goto out; } nace = rfsec->fsec_entrycount; - if (nace == KAUTH_FILESEC_NOACL) + if (nace == KAUTH_FILESEC_NOACL) { nace = 0; - if (nace > KAUTH_ACL_MAX_ENTRIES) { /* ACL size invalid */ + } + if (nace > KAUTH_ACL_MAX_ENTRIES) { /* ACL size invalid */ error = EINVAL; VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: bad ACL supplied"); goto out; @@ -4173,8 +4260,7 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con /* deleting ACL */ VATTR_SET(&va, va_acl, NULL); } else { - - if (nace > KAUTH_ACL_MAX_ENTRIES) { /* ACL size invalid */ + if (nace > KAUTH_ACL_MAX_ENTRIES) { /* ACL size invalid */ error = EINVAL; VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: supplied ACL is too large"); goto out; @@ -4204,11 +4290,11 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con if (al.volattr & ATTR_VOL_INFO) { if (al.volattr & ATTR_VOL_NAME) { volname = cursor; - ATTR_UNPACK(ar); + ATTR_UNPACK(ar); /* attr_length cannot be 0! */ if ((ar.attr_dataoffset < 0) || (ar.attr_length == 0) || - (ar.attr_length > uap->bufferSize) || - (uap->bufferSize - ar.attr_length < (unsigned)ar.attr_dataoffset)) { + (ar.attr_length > uap->bufferSize) || + (uap->bufferSize - ar.attr_length < (unsigned)ar.attr_dataoffset)) { VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: bad offset supplied (2) ", ar.attr_dataoffset); error = EINVAL; goto out; @@ -4289,8 +4375,9 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con #if CONFIG_MACF mac_vnode_notify_setattrlist(ctx, vp, &al); - if (VATTR_IS_ACTIVE(&va, va_flags)) + if (VATTR_IS_ACTIVE(&va, va_flags)) { mac_vnode_notify_setflags(ctx, vp, va.va_flags); + } #endif /* @@ -4301,8 +4388,9 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con if (vp->v_tag == VT_HFS) { #define HFS_SET_BOOT_INFO (FCNTL_FS_SPECIFIC_BASE + 0x00005) error = VNOP_IOCTL(vp, HFS_SET_BOOT_INFO, (caddr_t)fndrinfo, 0, ctx); - if (error != 0) + if (error != 0) { goto out; + } } else { /* XXX should never get here */ } @@ -4311,29 +4399,29 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con } } - /* + /* * Set the volume name, if we have one */ - if (volname != NULL) - { + if (volname != NULL) { struct vfs_attr vs; - + VFSATTR_INIT(&vs); - - vs.f_vol_name = volname; /* References the setattrlist buffer directly */ + + vs.f_vol_name = volname; /* References the setattrlist buffer directly */ VFSATTR_WANTED(&vs, f_vol_name); - + #if CONFIG_MACF error = mac_mount_check_setattr(ctx, vp->v_mount, &vs); - if (error != 0) + if (error != 0) { goto out; + } #endif if ((error = vfs_setattr(vp->v_mount, &vs, ctx)) != 0) { VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: setting volume name failed"); goto out; } - + if (!VFSATTR_ALL_SUPPORTED(&vs)) { error = EINVAL; VFS_DEBUG(ctx, vp, "ATTRLIST - ERROR: could not set volume name"); @@ -4342,12 +4430,13 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con } /* all done and successful */ - + out: - if (user_buf != NULL) + if (user_buf != NULL) { FREE(user_buf, M_TEMP); + } VFS_DEBUG(ctx, vp, "ATTRLIST - set returning %d", error); - return(error); + return error; } int @@ -4355,8 +4444,8 @@ setattrlist(proc_t p, struct setattrlist_args *uap, __unused int32_t *retval) { struct vfs_context *ctx; struct nameidata nd; - vnode_t vp = NULL; - u_long nameiflags; + vnode_t vp = NULL; + u_long nameiflags; int error = 0; ctx = vfs_context_current(); @@ -4365,18 +4454,21 @@ setattrlist(proc_t p, struct setattrlist_args *uap, __unused int32_t *retval) * Look up the file. */ nameiflags = AUDITVNPATH1; - if ((uap->options & FSOPT_NOFOLLOW) == 0) + if ((uap->options & FSOPT_NOFOLLOW) == 0) { nameiflags |= FOLLOW; + } NDINIT(&nd, LOOKUP, OP_SETATTR, nameiflags, UIO_USERSPACE, uap->path, ctx); - if ((error = namei(&nd)) != 0) + if ((error = namei(&nd)) != 0) { goto out; + } vp = nd.ni_vp; nameidone(&nd); error = setattrlist_internal(vp, uap, p, ctx); out: - if (vp != NULL) + if (vp != NULL) { vnode_put(vp); + } return error; } @@ -4397,11 +4489,13 @@ setattrlistat(proc_t p, struct setattrlistat_args *uap, __unused int32_t *retval * Look up the file. */ nameiflags = AUDITVNPATH1; - if (!(uap->options & FSOPT_NOFOLLOW)) + if (!(uap->options & FSOPT_NOFOLLOW)) { nameiflags |= FOLLOW; + } NDINIT(&nd, LOOKUP, OP_SETATTR, nameiflags, UIO_USERSPACE, uap->path, ctx); - if ((error = nameiat(&nd, uap->fd)) != 0) + if ((error = nameiat(&nd, uap->fd)) != 0) { goto out; + } vp = nd.ni_vp; nameidone(&nd); @@ -4413,27 +4507,29 @@ setattrlistat(proc_t p, struct setattrlistat_args *uap, __unused int32_t *retval error = setattrlist_internal(vp, &ap, p, ctx); out: - if (vp) + if (vp) { vnode_put(vp); - return (error); + } + return error; } int fsetattrlist(proc_t p, struct fsetattrlist_args *uap, __unused int32_t *retval) { struct vfs_context *ctx; - vnode_t vp = NULL; - int error; + vnode_t vp = NULL; + int error; struct setattrlist_args ap; ctx = vfs_context_current(); - if ((error = file_vnode(uap->fd, &vp)) != 0) - return (error); + if ((error = file_vnode(uap->fd, &vp)) != 0) { + return error; + } if ((error = vnode_getwithref(vp)) != 0) { file_drop(uap->fd); - return(error); + return error; } ap.path = 0; @@ -4444,9 +4540,9 @@ fsetattrlist(proc_t p, struct fsetattrlist_args *uap, __unused int32_t *retval) error = setattrlist_internal(vp, &ap, p, ctx); file_drop(uap->fd); - if (vp != NULL) + if (vp != NULL) { vnode_put(vp); + } return error; } - diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index d26613ce8..8ba4e78d2 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -94,12 +94,12 @@ #include #include -#include /* fslog_io_error() */ -#include /* dk_error_description_t */ +#include /* fslog_io_error() */ +#include /* dk_error_description_t */ #include #include -#include /* thread_block() */ +#include /* thread_block() */ #include #include @@ -112,22 +112,22 @@ #include -int bcleanbuf(buf_t bp, boolean_t discard); -static int brecover_data(buf_t bp); +int bcleanbuf(buf_t bp, boolean_t discard); +static int brecover_data(buf_t bp); static boolean_t incore(vnode_t vp, daddr64_t blkno); /* timeout is in msecs */ -static buf_t getnewbuf(int slpflag, int slptimeo, int *queue); -static void bremfree_locked(buf_t bp); -static void buf_reassign(buf_t bp, vnode_t newvp); -static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); -static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); -static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); +static buf_t getnewbuf(int slpflag, int slptimeo, int *queue); +static void bremfree_locked(buf_t bp); +static void buf_reassign(buf_t bp, vnode_t newvp); +static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo); +static int buf_iterprepare(vnode_t vp, struct buflists *, int flags); +static void buf_itercomplete(vnode_t vp, struct buflists *, int flags); static boolean_t buffer_cache_gc(int); -static buf_t buf_brelse_shadow(buf_t bp); -static void buf_free_meta_store(buf_t bp); +static buf_t buf_brelse_shadow(buf_t bp); +static void buf_free_meta_store(buf_t bp); -static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, - uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv); +static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, + uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv); int bdwrite_internal(buf_t, int); @@ -135,23 +135,23 @@ int bdwrite_internal(buf_t, int); extern void disk_conditioner_delay(buf_t, int, int, uint64_t); /* zone allocated buffer headers */ -static void bufzoneinit(void); -static void bcleanbuf_thread_init(void); -static void bcleanbuf_thread(void); +static void bufzoneinit(void); +static void bcleanbuf_thread_init(void); +static void bcleanbuf_thread(void); -static zone_t buf_hdr_zone; -static int buf_hdr_count; +static zone_t buf_hdr_zone; +static int buf_hdr_count; /* * Definitions for the buffer hash lists. */ -#define BUFHASH(dvp, lbn) \ +#define BUFHASH(dvp, lbn) \ (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash]) -LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; -u_long bufhash; +LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash; +u_long bufhash; -static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp); +static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp); /* Definitions for the buffer stats. */ struct bufstats bufstats; @@ -168,12 +168,12 @@ static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES]; static int needbuffer; static int need_iobuffer; -static lck_grp_t *buf_mtx_grp; -static lck_attr_t *buf_mtx_attr; +static lck_grp_t *buf_mtx_grp; +static lck_attr_t *buf_mtx_attr; static lck_grp_attr_t *buf_mtx_grp_attr; -static lck_mtx_t *iobuffer_mtxp; -static lck_mtx_t *buf_mtxp; -static lck_mtx_t *buf_gc_callout; +static lck_mtx_t *iobuffer_mtxp; +static lck_mtx_t *buf_mtxp; +static lck_mtx_t *buf_gc_callout; static int buf_busycount; @@ -188,42 +188,42 @@ fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = static __inline__ int buf_timestamp(void) { - struct timeval t; + struct timeval t; microuptime(&t); - return (t.tv_sec); + return t.tv_sec; } /* * Insq/Remq for the buffer free lists. */ -#define binsheadfree(bp, dp, whichq) do { \ - TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ - } while (0) +#define binsheadfree(bp, dp, whichq) do { \ + TAILQ_INSERT_HEAD(dp, bp, b_freelist); \ + } while (0) -#define binstailfree(bp, dp, whichq) do { \ - TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ - } while (0) +#define binstailfree(bp, dp, whichq) do { \ + TAILQ_INSERT_TAIL(dp, bp, b_freelist); \ + } while (0) -#define BHASHENTCHECK(bp) \ - if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ - panic("%p: b_hash.le_prev is not deadbeef", (bp)); +#define BHASHENTCHECK(bp) \ + if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \ + panic("%p: b_hash.le_prev is not deadbeef", (bp)); -#define BLISTNONE(bp) \ - (bp)->b_hash.le_next = (struct buf *)0; \ +#define BLISTNONE(bp) \ + (bp)->b_hash.le_next = (struct buf *)0; \ (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef; /* * Insq/Remq for the vnode usage lists. */ -#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) -#define bufremvn(bp) { \ - LIST_REMOVE(bp, b_vnbufs); \ - (bp)->b_vnbufs.le_next = NOLIST; \ +#define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs) +#define bufremvn(bp) { \ + LIST_REMOVE(bp, b_vnbufs); \ + (bp)->b_vnbufs.le_next = NOLIST; \ } /* - * Time in seconds before a buffer on a list is - * considered as a stale buffer + * Time in seconds before a buffer on a list is + * considered as a stale buffer */ #define LRU_IS_STALE 120 /* default value for the LRU */ #define AGE_IS_STALE 60 /* default value for the AGE */ @@ -233,50 +233,56 @@ int lru_is_stale = LRU_IS_STALE; int age_is_stale = AGE_IS_STALE; int meta_is_stale = META_IS_STALE; -#define MAXLAUNDRY 10 +#define MAXLAUNDRY 10 /* LIST_INSERT_HEAD() with assertions */ static __inline__ void blistenterhead(struct bufhashhdr * head, buf_t bp) { - if ((bp->b_hash.le_next = (head)->lh_first) != NULL) + if ((bp->b_hash.le_next = (head)->lh_first) != NULL) { (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next; + } (head)->lh_first = bp; bp->b_hash.le_prev = &(head)->lh_first; - if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) { panic("blistenterhead: le_prev is deadbeef"); + } } -static __inline__ void +static __inline__ void binshash(buf_t bp, struct bufhashhdr *dp) { #if DIAGNOSTIC - buf_t nbp; + buf_t nbp; #endif /* DIAGNOSTIC */ BHASHENTCHECK(bp); #if DIAGNOSTIC nbp = dp->lh_first; - for(; nbp != NULL; nbp = nbp->b_hash.le_next) { - if(nbp == bp) + for (; nbp != NULL; nbp = nbp->b_hash.le_next) { + if (nbp == bp) { panic("buf already in hashlist"); + } } #endif /* DIAGNOSTIC */ blistenterhead(dp, bp); } -static __inline__ void -bremhash(buf_t bp) +static __inline__ void +bremhash(buf_t bp) { - if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) + if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) { panic("bremhash le_prev is deadbeef"); - if (bp->b_hash.le_next == bp) + } + if (bp->b_hash.le_next == bp) { panic("bremhash: next points to self"); + } - if (bp->b_hash.le_next != NULL) + if (bp->b_hash.le_next != NULL) { bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev; + } *bp->b_hash.le_prev = (bp)->b_hash.le_next; } @@ -305,42 +311,44 @@ buf_release_credentials(buf_t bp) int -buf_valid(buf_t bp) { - - if ( (bp->b_flags & (B_DONE | B_DELWRI)) ) - return 1; +buf_valid(buf_t bp) +{ + if ((bp->b_flags & (B_DONE | B_DELWRI))) { + return 1; + } return 0; } int -buf_fromcache(buf_t bp) { - - if ( (bp->b_flags & B_CACHE) ) - return 1; +buf_fromcache(buf_t bp) +{ + if ((bp->b_flags & B_CACHE)) { + return 1; + } return 0; } void -buf_markinvalid(buf_t bp) { - - SET(bp->b_flags, B_INVAL); +buf_markinvalid(buf_t bp) +{ + SET(bp->b_flags, B_INVAL); } void -buf_markdelayed(buf_t bp) { - +buf_markdelayed(buf_t bp) +{ if (!ISSET(bp->b_flags, B_DELWRI)) { SET(bp->b_flags, B_DELWRI); OSAddAtomicLong(1, &nbdwrite); buf_reassign(bp, bp->b_vp); } - SET(bp->b_flags, B_DONE); + SET(bp->b_flags, B_DONE); } void -buf_markclean(buf_t bp) { - +buf_markclean(buf_t bp) +{ if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); @@ -350,349 +358,391 @@ buf_markclean(buf_t bp) { } void -buf_markeintr(buf_t bp) { - - SET(bp->b_flags, B_EINTR); +buf_markeintr(buf_t bp) +{ + SET(bp->b_flags, B_EINTR); } void -buf_markaged(buf_t bp) { - - SET(bp->b_flags, B_AGE); +buf_markaged(buf_t bp) +{ + SET(bp->b_flags, B_AGE); } int -buf_fua(buf_t bp) { - - if ((bp->b_flags & B_FUA) == B_FUA) - return 1; +buf_fua(buf_t bp) +{ + if ((bp->b_flags & B_FUA) == B_FUA) { + return 1; + } return 0; } -void -buf_markfua(buf_t bp) { - - SET(bp->b_flags, B_FUA); +void +buf_markfua(buf_t bp) +{ + SET(bp->b_flags, B_FUA); } #if CONFIG_PROTECT -cpx_t bufattr_cpx(bufattr_t bap) +cpx_t +bufattr_cpx(bufattr_t bap) { return bap->ba_cpx; } -void bufattr_setcpx(bufattr_t bap, cpx_t cpx) +void +bufattr_setcpx(bufattr_t bap, cpx_t cpx) { bap->ba_cpx = cpx; } void -buf_setcpoff (buf_t bp, uint64_t foffset) { +buf_setcpoff(buf_t bp, uint64_t foffset) +{ bp->b_attr.ba_cp_file_off = foffset; } uint64_t -bufattr_cpoff(bufattr_t bap) { +bufattr_cpoff(bufattr_t bap) +{ return bap->ba_cp_file_off; } void -bufattr_setcpoff(bufattr_t bap, uint64_t foffset) { +bufattr_setcpoff(bufattr_t bap, uint64_t foffset) +{ bap->ba_cp_file_off = foffset; } #else // !CONTECT_PROTECT uint64_t -bufattr_cpoff(bufattr_t bap __unused) { +bufattr_cpoff(bufattr_t bap __unused) +{ return 0; } void -bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) { +bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset) +{ return; } -struct cpx *bufattr_cpx(__unused bufattr_t bap) +struct cpx * +bufattr_cpx(__unused bufattr_t bap) { return NULL; } -void bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx) +void +bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx) { } #endif /* !CONFIG_PROTECT */ bufattr_t -bufattr_alloc() { +bufattr_alloc() +{ bufattr_t bap; MALLOC(bap, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); - if (bap == NULL) + if (bap == NULL) { return NULL; + } bzero(bap, sizeof(struct bufattr)); return bap; } void -bufattr_free(bufattr_t bap) { - if (bap) +bufattr_free(bufattr_t bap) +{ + if (bap) { FREE(bap, M_TEMP); + } } bufattr_t -bufattr_dup(bufattr_t bap) { +bufattr_dup(bufattr_t bap) +{ bufattr_t new_bufattr; MALLOC(new_bufattr, bufattr_t, sizeof(struct bufattr), M_TEMP, M_WAITOK); - if (new_bufattr == NULL) + if (new_bufattr == NULL) { return NULL; + } /* Copy the provided one into the new copy */ - memcpy (new_bufattr, bap, sizeof(struct bufattr)); + memcpy(new_bufattr, bap, sizeof(struct bufattr)); return new_bufattr; } int -bufattr_rawencrypted(bufattr_t bap) { - if ( (bap->ba_flags & BA_RAW_ENCRYPTED_IO) ) +bufattr_rawencrypted(bufattr_t bap) +{ + if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) { return 1; + } return 0; } int -bufattr_throttled(bufattr_t bap) { - return (GET_BUFATTR_IO_TIER(bap)); +bufattr_throttled(bufattr_t bap) +{ + return GET_BUFATTR_IO_TIER(bap); } int -bufattr_passive(bufattr_t bap) { - if ( (bap->ba_flags & BA_PASSIVE) ) +bufattr_passive(bufattr_t bap) +{ + if ((bap->ba_flags & BA_PASSIVE)) { return 1; + } return 0; } int -bufattr_nocache(bufattr_t bap) { - if ( (bap->ba_flags & BA_NOCACHE) ) +bufattr_nocache(bufattr_t bap) +{ + if ((bap->ba_flags & BA_NOCACHE)) { return 1; + } return 0; } int -bufattr_meta(bufattr_t bap) { - if ( (bap->ba_flags & BA_META) ) +bufattr_meta(bufattr_t bap) +{ + if ((bap->ba_flags & BA_META)) { return 1; + } return 0; } void -bufattr_markmeta(bufattr_t bap) { - SET(bap->ba_flags, BA_META); +bufattr_markmeta(bufattr_t bap) +{ + SET(bap->ba_flags, BA_META); } int #if !CONFIG_EMBEDDED -bufattr_delayidlesleep(bufattr_t bap) +bufattr_delayidlesleep(bufattr_t bap) #else /* !CONFIG_EMBEDDED */ -bufattr_delayidlesleep(__unused bufattr_t bap) +bufattr_delayidlesleep(__unused bufattr_t bap) #endif /* !CONFIG_EMBEDDED */ { #if !CONFIG_EMBEDDED - if ( (bap->ba_flags & BA_DELAYIDLESLEEP) ) + if ((bap->ba_flags & BA_DELAYIDLESLEEP)) { return 1; + } #endif /* !CONFIG_EMBEDDED */ return 0; } bufattr_t -buf_attr(buf_t bp) { +buf_attr(buf_t bp) +{ return &bp->b_attr; } -void -buf_markstatic(buf_t bp __unused) { +void +buf_markstatic(buf_t bp __unused) +{ SET(bp->b_flags, B_STATICCONTENT); } int -buf_static(buf_t bp) { - if ( (bp->b_flags & B_STATICCONTENT) ) - return 1; - return 0; +buf_static(buf_t bp) +{ + if ((bp->b_flags & B_STATICCONTENT)) { + return 1; + } + return 0; } -void -bufattr_markgreedymode(bufattr_t bap) { +void +bufattr_markgreedymode(bufattr_t bap) +{ SET(bap->ba_flags, BA_GREEDY_MODE); } int -bufattr_greedymode(bufattr_t bap) { - if ( (bap->ba_flags & BA_GREEDY_MODE) ) - return 1; - return 0; +bufattr_greedymode(bufattr_t bap) +{ + if ((bap->ba_flags & BA_GREEDY_MODE)) { + return 1; + } + return 0; } -void -bufattr_markisochronous(bufattr_t bap) { +void +bufattr_markisochronous(bufattr_t bap) +{ SET(bap->ba_flags, BA_ISOCHRONOUS); } int -bufattr_isochronous(bufattr_t bap) { - if ( (bap->ba_flags & BA_ISOCHRONOUS) ) - return 1; - return 0; +bufattr_isochronous(bufattr_t bap) +{ + if ((bap->ba_flags & BA_ISOCHRONOUS)) { + return 1; + } + return 0; } -void -bufattr_markquickcomplete(bufattr_t bap) { +void +bufattr_markquickcomplete(bufattr_t bap) +{ SET(bap->ba_flags, BA_QUICK_COMPLETE); } int -bufattr_quickcomplete(bufattr_t bap) { - if ( (bap->ba_flags & BA_QUICK_COMPLETE) ) - return 1; - return 0; +bufattr_quickcomplete(bufattr_t bap) +{ + if ((bap->ba_flags & BA_QUICK_COMPLETE)) { + return 1; + } + return 0; } errno_t -buf_error(buf_t bp) { - - return (bp->b_error); +buf_error(buf_t bp) +{ + return bp->b_error; } void -buf_seterror(buf_t bp, errno_t error) { - - if ((bp->b_error = error)) - SET(bp->b_flags, B_ERROR); - else - CLR(bp->b_flags, B_ERROR); +buf_seterror(buf_t bp, errno_t error) +{ + if ((bp->b_error = error)) { + SET(bp->b_flags, B_ERROR); + } else { + CLR(bp->b_flags, B_ERROR); + } } void -buf_setflags(buf_t bp, int32_t flags) { - - SET(bp->b_flags, (flags & BUF_X_WRFLAGS)); +buf_setflags(buf_t bp, int32_t flags) +{ + SET(bp->b_flags, (flags & BUF_X_WRFLAGS)); } void -buf_clearflags(buf_t bp, int32_t flags) { - - CLR(bp->b_flags, (flags & BUF_X_WRFLAGS)); +buf_clearflags(buf_t bp, int32_t flags) +{ + CLR(bp->b_flags, (flags & BUF_X_WRFLAGS)); } int32_t -buf_flags(buf_t bp) { - - return ((bp->b_flags & BUF_X_RDFLAGS)); +buf_flags(buf_t bp) +{ + return bp->b_flags & BUF_X_RDFLAGS; } void -buf_reset(buf_t bp, int32_t io_flags) { - - CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA)); +buf_reset(buf_t bp, int32_t io_flags) +{ + CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA)); SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE))); bp->b_error = 0; } uint32_t -buf_count(buf_t bp) { - - return (bp->b_bcount); +buf_count(buf_t bp) +{ + return bp->b_bcount; } void -buf_setcount(buf_t bp, uint32_t bcount) { - - bp->b_bcount = bcount; +buf_setcount(buf_t bp, uint32_t bcount) +{ + bp->b_bcount = bcount; } uint32_t -buf_size(buf_t bp) { - - return (bp->b_bufsize); +buf_size(buf_t bp) +{ + return bp->b_bufsize; } void -buf_setsize(buf_t bp, uint32_t bufsize) { - - bp->b_bufsize = bufsize; +buf_setsize(buf_t bp, uint32_t bufsize) +{ + bp->b_bufsize = bufsize; } uint32_t -buf_resid(buf_t bp) { - - return (bp->b_resid); +buf_resid(buf_t bp) +{ + return bp->b_resid; } void -buf_setresid(buf_t bp, uint32_t resid) { - - bp->b_resid = resid; +buf_setresid(buf_t bp, uint32_t resid) +{ + bp->b_resid = resid; } uint32_t -buf_dirtyoff(buf_t bp) { - - return (bp->b_dirtyoff); +buf_dirtyoff(buf_t bp) +{ + return bp->b_dirtyoff; } uint32_t -buf_dirtyend(buf_t bp) { - - return (bp->b_dirtyend); +buf_dirtyend(buf_t bp) +{ + return bp->b_dirtyend; } void -buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) { - - bp->b_dirtyoff = dirtyoff; +buf_setdirtyoff(buf_t bp, uint32_t dirtyoff) +{ + bp->b_dirtyoff = dirtyoff; } void -buf_setdirtyend(buf_t bp, uint32_t dirtyend) { - - bp->b_dirtyend = dirtyend; +buf_setdirtyend(buf_t bp, uint32_t dirtyend) +{ + bp->b_dirtyend = dirtyend; } uintptr_t -buf_dataptr(buf_t bp) { - - return (bp->b_datap); +buf_dataptr(buf_t bp) +{ + return bp->b_datap; } void -buf_setdataptr(buf_t bp, uintptr_t data) { - - bp->b_datap = data; +buf_setdataptr(buf_t bp, uintptr_t data) +{ + bp->b_datap = data; } vnode_t -buf_vnode(buf_t bp) { - - return (bp->b_vp); +buf_vnode(buf_t bp) +{ + return bp->b_vp; } void -buf_setvnode(buf_t bp, vnode_t vp) { - - bp->b_vp = vp; +buf_setvnode(buf_t bp, vnode_t vp) +{ + bp->b_vp = vp; } void * buf_callback(buf_t bp) { - if ( !(bp->b_flags & B_CALL) ) - return ((void *) NULL); + if (!(bp->b_flags & B_CALL)) { + return (void *) NULL; + } - return ((void *)bp->b_iodone); + return (void *)bp->b_iodone; } @@ -701,77 +751,84 @@ buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction) { assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY)); - if (callback) - bp->b_flags |= (B_CALL | B_ASYNC); - else - bp->b_flags &= ~B_CALL; + if (callback) { + bp->b_flags |= (B_CALL | B_ASYNC); + } else { + bp->b_flags &= ~B_CALL; + } bp->b_transaction = transaction; bp->b_iodone = callback; - return (0); + return 0; } errno_t buf_setupl(buf_t bp, upl_t upl, uint32_t offset) { + if (!(bp->b_lflags & BL_IOBUF)) { + return EINVAL; + } - if ( !(bp->b_lflags & BL_IOBUF) ) - return (EINVAL); - - if (upl) - bp->b_flags |= B_CLUSTER; - else - bp->b_flags &= ~B_CLUSTER; + if (upl) { + bp->b_flags |= B_CLUSTER; + } else { + bp->b_flags &= ~B_CLUSTER; + } bp->b_upl = upl; bp->b_uploffset = offset; - return (0); + return 0; } buf_t buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg) { - buf_t io_bp; + buf_t io_bp; - if (io_offset < 0 || io_size < 0) - return (NULL); + if (io_offset < 0 || io_size < 0) { + return NULL; + } - if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) - return (NULL); + if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) { + return NULL; + } if (bp->b_flags & B_CLUSTER) { - if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) - return (NULL); + if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) { + return NULL; + } - if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) - return (NULL); + if (((bp->b_uploffset + io_offset + io_size) & PAGE_MASK) && ((io_offset + io_size) < bp->b_bcount)) { + return NULL; + } } io_bp = alloc_io_buf(bp->b_vp, 0); io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA); if (iodone) { - io_bp->b_transaction = arg; + io_bp->b_transaction = arg; io_bp->b_iodone = iodone; io_bp->b_flags |= B_CALL; } if (bp->b_flags & B_CLUSTER) { - io_bp->b_upl = bp->b_upl; + io_bp->b_upl = bp->b_upl; io_bp->b_uploffset = bp->b_uploffset + io_offset; } else { - io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset); + io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset); } io_bp->b_bcount = io_size; - return (io_bp); + return io_bp; } int buf_shadow(buf_t bp) { - if (bp->b_lflags & BL_SHADOW) + if (bp->b_lflags & BL_SHADOW) { return 1; + } return 0; } @@ -779,31 +836,31 @@ buf_shadow(buf_t bp) buf_t buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) { - return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1)); + return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1); } buf_t buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg) { - return (buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0)); + return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0); } static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv) { - buf_t io_bp; + buf_t io_bp; KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0); - if ( !(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) { - + if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) { KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0); - return (NULL); + return NULL; } #ifdef BUF_MAKE_PRIVATE - if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) + if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) { panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref); + } #endif io_bp = alloc_io_buf(bp->b_vp, priv); @@ -812,7 +869,7 @@ buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_st io_bp->b_lblkno = bp->b_lblkno; if (iodone) { - io_bp->b_transaction = arg; + io_bp->b_transaction = arg; io_bp->b_iodone = iodone; io_bp->b_flags |= B_CALL; } @@ -841,10 +898,11 @@ buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_st bp->b_shadow_ref++; #ifdef BUF_MAKE_PRIVATE - if (external_storage) + if (external_storage) { io_bp->b_lflags |= BL_EXTERNAL; - else + } else { bp->b_data_ref++; + } #endif lck_mtx_unlock(buf_mtxp); } else { @@ -868,7 +926,7 @@ buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_st } KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0); - return (io_bp); + return io_bp; } @@ -876,16 +934,15 @@ buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_st errno_t buf_make_private(buf_t bp) { - buf_t ds_bp; - buf_t t_bp; + buf_t ds_bp; + buf_t t_bp; struct buf my_buf; KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0); if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) { - KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); - return (EINVAL); + return EINVAL; } my_buf.b_flags = B_META; my_buf.b_datap = (uintptr_t)NULL; @@ -896,16 +953,19 @@ buf_make_private(buf_t bp) lck_mtx_lock_spin(buf_mtxp); for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { - if ( !ISSET(bp->b_lflags, BL_EXTERNAL)) + if (!ISSET(bp->b_lflags, BL_EXTERNAL)) { break; + } } ds_bp = t_bp; - if (ds_bp == NULL && bp->b_data_ref) + if (ds_bp == NULL && bp->b_data_ref) { panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL"); + } - if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) + if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) { panic("buf_make_private: ref_count == 0 && ds_bp != NULL"); + } if (ds_bp == NULL) { lck_mtx_unlock(buf_mtxp); @@ -913,11 +973,12 @@ buf_make_private(buf_t bp) buf_free_meta_store(&my_buf); KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0); - return (EINVAL); + return EINVAL; } for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) { - if ( !ISSET(t_bp->b_lflags, BL_EXTERNAL)) + if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) { t_bp->b_data_store = ds_bp; + } } ds_bp->b_data_ref = bp->b_data_ref; @@ -927,66 +988,70 @@ buf_make_private(buf_t bp) lck_mtx_unlock(buf_mtxp); KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0); - return (0); + return 0; } #endif void buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction, - void (**old_iodone)(buf_t, void *), void **old_transaction) + void(**old_iodone)(buf_t, void *), void **old_transaction) { assert(ISSET(bp->b_lflags, BL_BUSY)); - if (old_iodone) + if (old_iodone) { *old_iodone = bp->b_iodone; - if (old_transaction) + } + if (old_transaction) { *old_transaction = bp->b_transaction; + } bp->b_transaction = transaction; bp->b_iodone = filter; - if (filter) - bp->b_flags |= B_FILTER; - else - bp->b_flags &= ~B_FILTER; + if (filter) { + bp->b_flags |= B_FILTER; + } else { + bp->b_flags &= ~B_FILTER; + } } daddr64_t -buf_blkno(buf_t bp) { - - return (bp->b_blkno); +buf_blkno(buf_t bp) +{ + return bp->b_blkno; } daddr64_t -buf_lblkno(buf_t bp) { - - return (bp->b_lblkno); +buf_lblkno(buf_t bp) +{ + return bp->b_lblkno; } void -buf_setblkno(buf_t bp, daddr64_t blkno) { - - bp->b_blkno = blkno; +buf_setblkno(buf_t bp, daddr64_t blkno) +{ + bp->b_blkno = blkno; } void -buf_setlblkno(buf_t bp, daddr64_t lblkno) { - - bp->b_lblkno = lblkno; +buf_setlblkno(buf_t bp, daddr64_t lblkno) +{ + bp->b_lblkno = lblkno; } dev_t -buf_device(buf_t bp) { - - return (bp->b_dev); +buf_device(buf_t bp) +{ + return bp->b_dev; } errno_t -buf_setdevice(buf_t bp, vnode_t vp) { - - if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) - return EINVAL; +buf_setdevice(buf_t bp, vnode_t vp) +{ + if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) { + return EINVAL; + } bp->b_dev = vp->v_rdev; return 0; @@ -994,117 +1059,119 @@ buf_setdevice(buf_t bp, vnode_t vp) { void * -buf_drvdata(buf_t bp) { - - return (bp->b_drvdata); +buf_drvdata(buf_t bp) +{ + return bp->b_drvdata; } void -buf_setdrvdata(buf_t bp, void *drvdata) { - - bp->b_drvdata = drvdata; +buf_setdrvdata(buf_t bp, void *drvdata) +{ + bp->b_drvdata = drvdata; } void * -buf_fsprivate(buf_t bp) { - - return (bp->b_fsprivate); +buf_fsprivate(buf_t bp) +{ + return bp->b_fsprivate; } void -buf_setfsprivate(buf_t bp, void *fsprivate) { - - bp->b_fsprivate = fsprivate; +buf_setfsprivate(buf_t bp, void *fsprivate) +{ + bp->b_fsprivate = fsprivate; } kauth_cred_t -buf_rcred(buf_t bp) { - - return (bp->b_rcred); +buf_rcred(buf_t bp) +{ + return bp->b_rcred; } kauth_cred_t -buf_wcred(buf_t bp) { - - return (bp->b_wcred); +buf_wcred(buf_t bp) +{ + return bp->b_wcred; } void * -buf_upl(buf_t bp) { - - return (bp->b_upl); +buf_upl(buf_t bp) +{ + return bp->b_upl; } uint32_t -buf_uploffset(buf_t bp) { - - return ((uint32_t)(bp->b_uploffset)); +buf_uploffset(buf_t bp) +{ + return (uint32_t)(bp->b_uploffset); } proc_t -buf_proc(buf_t bp) { - - return (bp->b_proc); +buf_proc(buf_t bp) +{ + return bp->b_proc; } errno_t buf_map(buf_t bp, caddr_t *io_addr) { - buf_t real_bp; - vm_offset_t vaddr; - kern_return_t kret; + buf_t real_bp; + vm_offset_t vaddr; + kern_return_t kret; - if ( !(bp->b_flags & B_CLUSTER)) { - *io_addr = (caddr_t)bp->b_datap; - return (0); + if (!(bp->b_flags & B_CLUSTER)) { + *io_addr = (caddr_t)bp->b_datap; + return 0; } real_bp = (buf_t)(bp->b_real_bp); if (real_bp && real_bp->b_datap) { - /* + /* * b_real_bp is only valid if B_CLUSTER is SET * if it's non-zero, than someone did a cluster_bp call * if the backing physical pages were already mapped * in before the call to cluster_bp (non-zero b_datap), * than we just use that mapping */ - *io_addr = (caddr_t)real_bp->b_datap; - return (0); + *io_addr = (caddr_t)real_bp->b_datap; + return 0; } kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */ if (kret != KERN_SUCCESS) { - *io_addr = NULL; + *io_addr = NULL; - return(ENOMEM); + return ENOMEM; } - vaddr += bp->b_uploffset; + vaddr += bp->b_uploffset; *io_addr = (caddr_t)vaddr; - return (0); + return 0; } errno_t buf_unmap(buf_t bp) { - buf_t real_bp; - kern_return_t kret; + buf_t real_bp; + kern_return_t kret; - if ( !(bp->b_flags & B_CLUSTER)) - return (0); + if (!(bp->b_flags & B_CLUSTER)) { + return 0; + } /* * see buf_map for the explanation */ real_bp = (buf_t)(bp->b_real_bp); - if (real_bp && real_bp->b_datap) - return (0); + if (real_bp && real_bp->b_datap) { + return 0; + } if ((bp->b_lflags & BL_IOBUF) && ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) { - /* + /* * ignore pageins... the 'right' thing will * happen due to the way we handle speculative * clusters... @@ -1114,22 +1181,24 @@ buf_unmap(buf_t bp) * will clear the reference bit that got * turned on when we touched the mapping */ - bp->b_flags |= B_AGE; + bp->b_flags |= B_AGE; } kret = ubc_upl_unmap(bp->b_upl); - if (kret != KERN_SUCCESS) - return (EINVAL); - return (0); + if (kret != KERN_SUCCESS) { + return EINVAL; + } + return 0; } void -buf_clear(buf_t bp) { - caddr_t baddr; - - if (buf_map(bp, &baddr) == 0) { - bzero(baddr, bp->b_bcount); +buf_clear(buf_t bp) +{ + caddr_t baddr; + + if (buf_map(bp, &baddr) == 0) { + bzero(baddr, bp->b_bcount); buf_unmap(bp); } bp->b_resid = 0; @@ -1142,14 +1211,14 @@ buf_clear(buf_t bp) { static int buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes) { - vnode_t vp = buf_vnode(bp); - buf_t io_bp; /* For reading or writing a single block */ - int io_direction; - int io_resid; - size_t io_contig_bytes; - daddr64_t io_blkno; - int error = 0; - int bmap_flags; + vnode_t vp = buf_vnode(bp); + buf_t io_bp; /* For reading or writing a single block */ + int io_direction; + int io_resid; + size_t io_contig_bytes; + daddr64_t io_blkno; + int error = 0; + int bmap_flags; /* * save our starting point... the bp was already mapped @@ -1162,7 +1231,7 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b * i.e. this can never be a 'permanent' mapping */ bp->b_blkno = bp->b_lblkno; - + /* * Get an io buffer to do the deblocking */ @@ -1170,23 +1239,24 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b io_bp->b_lblkno = bp->b_lblkno; io_bp->b_datap = bp->b_datap; - io_resid = bp->b_bcount; - io_direction = bp->b_flags & B_READ; + io_resid = bp->b_bcount; + io_direction = bp->b_flags & B_READ; io_contig_bytes = contig_bytes; - - if (bp->b_flags & B_READ) - bmap_flags = VNODE_READ; - else - bmap_flags = VNODE_WRITE; + + if (bp->b_flags & B_READ) { + bmap_flags = VNODE_READ; + } else { + bmap_flags = VNODE_WRITE; + } for (;;) { - if (io_blkno == -1) - /* + if (io_blkno == -1) { + /* * this is unexepected, but we'll allow for it */ - bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes); - else { - io_bp->b_bcount = io_contig_bytes; + bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes); + } else { + io_bp->b_bcount = io_contig_bytes; io_bp->b_bufsize = io_contig_bytes; io_bp->b_resid = io_contig_bytes; io_bp->b_blkno = io_blkno; @@ -1197,33 +1267,39 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write */ - if (!ISSET(bp->b_flags, B_READ)) - OSAddAtomic(1, &devvp->v_numoutput); + if (!ISSET(bp->b_flags, B_READ)) { + OSAddAtomic(1, &devvp->v_numoutput); + } - if ((error = VNOP_STRATEGY(io_bp))) - break; - if ((error = (int)buf_biowait(io_bp))) - break; + if ((error = VNOP_STRATEGY(io_bp))) { + break; + } + if ((error = (int)buf_biowait(io_bp))) { + break; + } if (io_bp->b_resid) { - io_resid -= (io_contig_bytes - io_bp->b_resid); + io_resid -= (io_contig_bytes - io_bp->b_resid); break; } } - if ((io_resid -= io_contig_bytes) == 0) - break; + if ((io_resid -= io_contig_bytes) == 0) { + break; + } f_offset += io_contig_bytes; io_bp->b_datap += io_contig_bytes; /* * Map the current position to a physical block number */ - if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) - break; + if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) { + break; + } } buf_free(io_bp); - - if (error) - buf_seterror(bp, error); + + if (error) { + buf_seterror(bp, error); + } bp->b_resid = io_resid; /* * This I/O is now complete @@ -1242,20 +1318,21 @@ buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_b errno_t buf_strategy(vnode_t devvp, void *ap) { - buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp; - vnode_t vp = bp->b_vp; - int bmap_flags; - errno_t error; + buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp; + vnode_t vp = bp->b_vp; + int bmap_flags; + errno_t error; #if CONFIG_DTRACE - int dtrace_io_start_flag = 0; /* We only want to trip the io:::start - * probe once, with the true physical - * block in place (b_blkno) - */ + int dtrace_io_start_flag = 0; /* We only want to trip the io:::start + * probe once, with the true physical + * block in place (b_blkno) + */ -#endif +#endif - if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) - panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); + if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) { + panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK\n"); + } /* * associate the physical device with * with this buf_t even if we don't @@ -1263,104 +1340,105 @@ buf_strategy(vnode_t devvp, void *ap) */ bp->b_dev = devvp->v_rdev; - if (bp->b_flags & B_READ) - bmap_flags = VNODE_READ; - else - bmap_flags = VNODE_WRITE; - - if ( !(bp->b_flags & B_CLUSTER)) { + if (bp->b_flags & B_READ) { + bmap_flags = VNODE_READ; + } else { + bmap_flags = VNODE_WRITE; + } - if ( (bp->b_upl) ) { - /* + if (!(bp->b_flags & B_CLUSTER)) { + if ((bp->b_upl)) { + /* * we have a UPL associated with this bp * go through cluster_bp which knows how * to deal with filesystem block sizes * that aren't equal to the page size */ DTRACE_IO1(start, buf_t, bp); - return (cluster_bp(bp)); + return cluster_bp(bp); } if (bp->b_blkno == bp->b_lblkno) { - off_t f_offset; - size_t contig_bytes; - + off_t f_offset; + size_t contig_bytes; + if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) { DTRACE_IO1(start, buf_t, bp); - buf_seterror(bp, error); + buf_seterror(bp, error); buf_biodone(bp); - return (error); + return error; } - if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { + if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) { DTRACE_IO1(start, buf_t, bp); - buf_seterror(bp, error); + buf_seterror(bp, error); buf_biodone(bp); - return (error); + return error; } DTRACE_IO1(start, buf_t, bp); #if CONFIG_DTRACE dtrace_io_start_flag = 1; -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ if ((bp->b_blkno == -1) || (contig_bytes == 0)) { /* Set block number to force biodone later */ bp->b_blkno = -1; - buf_clear(bp); - } - else if ((long)contig_bytes < bp->b_bcount) { - return (buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes)); + buf_clear(bp); + } else if ((long)contig_bytes < bp->b_bcount) { + return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes); } } - + #if CONFIG_DTRACE if (dtrace_io_start_flag == 0) { DTRACE_IO1(start, buf_t, bp); dtrace_io_start_flag = 1; } #endif /* CONFIG_DTRACE */ - + if (bp->b_blkno == -1) { - buf_biodone(bp); - return (0); + buf_biodone(bp); + return 0; } } #if CONFIG_DTRACE - if (dtrace_io_start_flag == 0) + if (dtrace_io_start_flag == 0) { DTRACE_IO1(start, buf_t, bp); + } #endif /* CONFIG_DTRACE */ - + #if CONFIG_PROTECT /* Capture f_offset in the bufattr*/ cpx_t cpx = bufattr_cpx(buf_attr(bp)); if (cpx) { /* No need to go here for older EAs */ - if(cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) { + if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) { off_t f_offset; - if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) + if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) { return error; + } - /* + /* * Attach the file offset to this buffer. The * bufattr attributes will be passed down the stack - * until they reach the storage driver (whether + * until they reach the storage driver (whether * IOFlashStorage, ASP, or IONVMe). The driver * will retain the offset in a local variable when it - * issues its I/Os to the NAND controller. - * - * Note that LwVM may end up splitting this I/O + * issues its I/Os to the NAND controller. + * + * Note that LwVM may end up splitting this I/O * into sub-I/Os if it crosses a chunk boundary. In this * case, LwVM will update this field when it dispatches * each I/O to IOFlashStorage. But from our perspective * we have only issued a single I/O. * - * In the case of APFS we do not bounce through another + * In the case of APFS we do not bounce through another * intermediate layer (such as CoreStorage). APFS will * issue the I/Os directly to the block device / IOMedia - * via buf_strategy on the specfs node. + * via buf_strategy on the specfs node. */ buf_setcpoff(bp, f_offset); CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0); @@ -1378,7 +1456,7 @@ buf_strategy(vnode_t devvp, void *ap) */ error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap); DTRACE_FSINFO(strategy, vnode_t, vp); - return (error); + return error; } @@ -1386,18 +1464,18 @@ buf_strategy(vnode_t devvp, void *ap) buf_t buf_alloc(vnode_t vp) { - return(alloc_io_buf(vp, is_vm_privileged())); + return alloc_io_buf(vp, is_vm_privileged()); } void -buf_free(buf_t bp) { - - free_io_buf(bp); +buf_free(buf_t bp) +{ + free_io_buf(bp); } /* - * iterate buffers for the specified vp. + * iterate buffers for the specified vp. * if BUF_SCAN_DIRTY is set, do the dirty list * if BUF_SCAN_CLEAN is set, do the clean list * if neither flag is set, default to BUF_SCAN_DIRTY @@ -1412,26 +1490,29 @@ struct buf_iterate_info_t { void buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) { - buf_t bp; - int retval; - struct buflists local_iterblkhd; - int lock_flags = BAC_NOWAIT | BAC_REMOVE; - int notify_busy = flags & BUF_NOTIFY_BUSY; + buf_t bp; + int retval; + struct buflists local_iterblkhd; + int lock_flags = BAC_NOWAIT | BAC_REMOVE; + int notify_busy = flags & BUF_NOTIFY_BUSY; struct buf_iterate_info_t list[2]; - int num_lists, i; + int num_lists, i; - if (flags & BUF_SKIP_LOCKED) - lock_flags |= BAC_SKIP_LOCKED; - if (flags & BUF_SKIP_NONLOCKED) - lock_flags |= BAC_SKIP_NONLOCKED; + if (flags & BUF_SKIP_LOCKED) { + lock_flags |= BAC_SKIP_LOCKED; + } + if (flags & BUF_SKIP_NONLOCKED) { + lock_flags |= BAC_SKIP_NONLOCKED; + } - if ( !(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) - flags |= BUF_SCAN_DIRTY; + if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) { + flags |= BUF_SCAN_DIRTY; + } num_lists = 0; if (flags & BUF_SCAN_DIRTY) { - list[num_lists].flag = VBI_DIRTY; + list[num_lists].flag = VBI_DIRTY; list[num_lists].listhead = &vp->v_dirtyblkhd; num_lists++; } @@ -1443,8 +1524,8 @@ buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) for (i = 0; i < num_lists; i++) { lck_mtx_lock(buf_mtxp); - - if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) { + + if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) { lck_mtx_unlock(buf_mtxp); continue; } @@ -1467,14 +1548,16 @@ buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) switch (retval) { case BUF_RETURNED: - if (bp) + if (bp) { buf_brelse(bp); + } break; case BUF_CLAIMED: break; case BUF_RETURNED_DONE: - if (bp) + if (bp) { buf_brelse(bp); + } lck_mtx_lock(buf_mtxp); goto out; case BUF_CLAIMED_DONE: @@ -1483,7 +1566,7 @@ buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) } lck_mtx_lock(buf_mtxp); } /* while list has more nodes */ - out: +out: buf_itercomplete(vp, &local_iterblkhd, list[i].flag); lck_mtx_unlock(buf_mtxp); } /* for each list */ @@ -1496,39 +1579,41 @@ buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg) int buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) { - buf_t bp; - int aflags; - int error = 0; - int must_rescan = 1; - struct buflists local_iterblkhd; + buf_t bp; + int aflags; + int error = 0; + int must_rescan = 1; + struct buflists local_iterblkhd; - if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) - return (0); + if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) { + return 0; + } lck_mtx_lock(buf_mtxp); for (;;) { - if (must_rescan == 0) - /* + if (must_rescan == 0) { + /* * the lists may not be empty, but all that's left at this * point are metadata or B_LOCKED buffers which are being * skipped... we know this because we made it through both * the clean and dirty lists without dropping buf_mtxp... * each time we drop buf_mtxp we bump "must_rescan" */ - break; - if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) - break; + break; + } + if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) { + break; + } must_rescan = 0; /* * iterate the clean list */ if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) { - goto try_dirty_list; + goto try_dirty_list; } while (!LIST_EMPTY(&local_iterblkhd)) { - bp = LIST_FIRST(&local_iterblkhd); LIST_REMOVE(bp, b_vnbufs); @@ -1537,30 +1622,33 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) /* * some filesystems distinguish meta data blocks with a negative logical block # */ - if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) + if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) { continue; + } aflags = BAC_REMOVE; - if ( !(flags & BUF_INVALIDATE_LOCKED) ) + if (!(flags & BUF_INVALIDATE_LOCKED)) { aflags |= BAC_SKIP_LOCKED; + } - if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { - if (error == EDEADLK) - /* - * this buffer was marked B_LOCKED... + if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) { + if (error == EDEADLK) { + /* + * this buffer was marked B_LOCKED... * we didn't drop buf_mtxp, so we * we don't need to rescan */ - continue; - if (error == EAGAIN) { - /* + continue; + } + if (error == EAGAIN) { + /* * found a busy buffer... we blocked and * dropped buf_mtxp, so we're going to * need to rescan after this pass is completed */ - must_rescan++; - continue; + must_rescan++; + continue; } /* * got some kind of 'real' error out of the msleep @@ -1569,12 +1657,13 @@ buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo) buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN); lck_mtx_unlock(buf_mtxp); - return (error); + return error; } lck_mtx_unlock(buf_mtxp); - if (bp->b_flags & B_LOCKED) + if (bp->b_flags & B_LOCKED) { KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0); + } CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); @@ -1608,30 +1697,33 @@ try_dirty_list: /* * some filesystems distinguish meta data blocks with a negative logical block # */ - if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) + if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) { continue; + } aflags = BAC_REMOVE; - if ( !(flags & BUF_INVALIDATE_LOCKED) ) + if (!(flags & BUF_INVALIDATE_LOCKED)) { aflags |= BAC_SKIP_LOCKED; + } - if ( (error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo)) ) { - if (error == EDEADLK) - /* - * this buffer was marked B_LOCKED... + if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) { + if (error == EDEADLK) { + /* + * this buffer was marked B_LOCKED... * we didn't drop buf_mtxp, so we * we don't need to rescan */ - continue; - if (error == EAGAIN) { - /* + continue; + } + if (error == EAGAIN) { + /* * found a busy buffer... we blocked and * dropped buf_mtxp, so we're going to * need to rescan after this pass is completed */ - must_rescan++; - continue; + must_rescan++; + continue; } /* * got some kind of 'real' error out of the msleep @@ -1640,20 +1732,22 @@ try_dirty_list: buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); lck_mtx_unlock(buf_mtxp); - return (error); + return error; } lck_mtx_unlock(buf_mtxp); - if (bp->b_flags & B_LOCKED) + if (bp->b_flags & B_LOCKED) { KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0); + } CLR(bp->b_flags, B_LOCKED); SET(bp->b_flags, B_INVAL); - if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) + if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) { (void) VNOP_BWRITE(bp); - else + } else { buf_brelse(bp); + } lck_mtx_lock(buf_mtxp); /* @@ -1668,35 +1762,38 @@ try_dirty_list: } lck_mtx_unlock(buf_mtxp); - return (0); + return 0; } void -buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) { - +buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg) +{ (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg); return; } int -buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) { - buf_t bp; - int writes_issued = 0; - errno_t error; - int busy = 0; - struct buflists local_iterblkhd; - int lock_flags = BAC_NOWAIT | BAC_REMOVE; +buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg) +{ + buf_t bp; + int writes_issued = 0; + errno_t error; + int busy = 0; + struct buflists local_iterblkhd; + int lock_flags = BAC_NOWAIT | BAC_REMOVE; int any_locked = 0; - if (flags & BUF_SKIP_LOCKED) - lock_flags |= BAC_SKIP_LOCKED; - if (flags & BUF_SKIP_NONLOCKED) - lock_flags |= BAC_SKIP_NONLOCKED; + if (flags & BUF_SKIP_LOCKED) { + lock_flags |= BAC_SKIP_LOCKED; + } + if (flags & BUF_SKIP_NONLOCKED) { + lock_flags |= BAC_SKIP_NONLOCKED; + } loop: lck_mtx_lock(buf_mtxp); - if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) { - while (!LIST_EMPTY(&local_iterblkhd)) { + if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) { + while (!LIST_EMPTY(&local_iterblkhd)) { bp = LIST_FIRST(&local_iterblkhd); LIST_REMOVE(bp, b_vnbufs); LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs); @@ -1705,13 +1802,13 @@ loop: busy++; } if (error) { - /* + /* * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED, * we may want to do somethign differently if a locked or unlocked * buffer was encountered (depending on the arg specified). * In this case, we know that one of those two was set, and the - * buf acquisition failed above. - * + * buf acquisition failed above. + * * If it failed with EDEADLK, then save state which can be emitted * later on to the caller. Most callers should not care. */ @@ -1728,10 +1825,11 @@ loop: * Wait for I/O associated with indirect blocks to complete, * since there is no way to quickly wait for them below. */ - if ((bp->b_vp == vp) || (wait == 0)) - (void) buf_bawrite(bp); - else - (void) VNOP_BWRITE(bp); + if ((bp->b_vp == vp) || (wait == 0)) { + (void) buf_bawrite(bp); + } else { + (void) VNOP_BWRITE(bp); + } writes_issued++; lck_mtx_lock(buf_mtxp); @@ -1739,25 +1837,25 @@ loop: buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY); } lck_mtx_unlock(buf_mtxp); - + if (wait) { - (void)vnode_waitforwrites(vp, 0, 0, 0, msg); + (void)vnode_waitforwrites(vp, 0, 0, 0, msg); if (vp->v_dirtyblkhd.lh_first && busy) { - /* + /* * we had one or more BUSY buffers on * the dirtyblock list... most likely * these are due to delayed writes that * were moved to the bclean queue but * have not yet been 'written'. - * if we issued some writes on the + * if we issued some writes on the * previous pass, we try again immediately * if we didn't, we'll sleep for some time * to allow the state to change... */ - if (writes_issued == 0) { - (void)tsleep((caddr_t)&vp->v_numoutput, - PRIBIO + 1, "vnode_flushdirtyblks", hz/20); + if (writes_issued == 0) { + (void)tsleep((caddr_t)&vp->v_numoutput, + PRIBIO + 1, "vnode_flushdirtyblks", hz / 20); } writes_issued = 0; busy = 0; @@ -1779,26 +1877,27 @@ buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags) { struct buflists * listheadp; - if (flags & VBI_DIRTY) + if (flags & VBI_DIRTY) { listheadp = &vp->v_dirtyblkhd; - else + } else { listheadp = &vp->v_cleanblkhd; - - while (vp->v_iterblkflags & VBI_ITER) { - vp->v_iterblkflags |= VBI_ITERWANT; - msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL); + } + + while (vp->v_iterblkflags & VBI_ITER) { + vp->v_iterblkflags |= VBI_ITERWANT; + msleep(&vp->v_iterblkflags, buf_mtxp, 0, "buf_iterprepare", NULL); } if (LIST_EMPTY(listheadp)) { - LIST_INIT(iterheadp); - return(EINVAL); + LIST_INIT(iterheadp); + return EINVAL; } vp->v_iterblkflags |= VBI_ITER; iterheadp->lh_first = listheadp->lh_first; - listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first; + listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first; LIST_INIT(listheadp); - return(0); + return 0; } /* @@ -1811,10 +1910,11 @@ buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags) struct buflists * listheadp; buf_t bp; - if (flags & VBI_DIRTY) + if (flags & VBI_DIRTY) { listheadp = &vp->v_dirtyblkhd; - else + } else { listheadp = &vp->v_cleanblkhd; + } while (!LIST_EMPTY(iterheadp)) { bp = LIST_FIRST(iterheadp); @@ -1823,7 +1923,7 @@ buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags) } vp->v_iterblkflags &= ~VBI_ITER; - if (vp->v_iterblkflags & VBI_ITERWANT) { + if (vp->v_iterblkflags & VBI_ITERWANT) { vp->v_iterblkflags &= ~VBI_ITERWANT; wakeup(&vp->v_iterblkflags); } @@ -1839,8 +1939,9 @@ bremfree_locked(buf_t bp) whichq = bp->b_whichq; if (whichq == -1) { - if (bp->b_shadow_ref == 0) + if (bp->b_shadow_ref == 0) { panic("bremfree_locked: %p not on freelist", bp); + } /* * there are clones pointing to 'bp'... * therefore, it was not put on a freelist @@ -1856,18 +1957,20 @@ bremfree_locked(buf_t bp) * NB: This makes an assumption about how tailq's are implemented. */ if (bp->b_freelist.tqe_next == NULL) { - dp = &bufqueues[whichq]; + dp = &bufqueues[whichq]; - if (dp->tqh_last != &bp->b_freelist.tqe_next) + if (dp->tqh_last != &bp->b_freelist.tqe_next) { panic("bremfree: lost tail"); + } } TAILQ_REMOVE(dp, bp, b_freelist); - if (whichq == BQ_LAUNDRY) - blaundrycnt--; + if (whichq == BQ_LAUNDRY) { + blaundrycnt--; + } bp->b_whichq = -1; - bp->b_timestamp = 0; + bp->b_timestamp = 0; bp->b_shadow = 0; } @@ -1878,14 +1981,15 @@ bremfree_locked(buf_t bp) static void bgetvp_locked(vnode_t vp, buf_t bp) { - - if (bp->b_vp != vp) + if (bp->b_vp != vp) { panic("bgetvp_locked: not free"); + } - if (vp->v_type == VBLK || vp->v_type == VCHR) + if (vp->v_type == VBLK || vp->v_type == VCHR) { bp->b_dev = vp->v_rdev; - else + } else { bp->b_dev = NODEV; + } /* * Insert onto list for new vnode. */ @@ -1902,8 +2006,9 @@ brelvp_locked(buf_t bp) /* * Delete from old vnode list, if on one. */ - if (bp->b_vnbufs.le_next != NOLIST) + if (bp->b_vnbufs.le_next != NOLIST) { bufremvn(bp); + } bp->b_vp = (vnode_t)NULL; } @@ -1927,16 +2032,18 @@ buf_reassign(buf_t bp, vnode_t newvp) /* * Delete from old vnode list, if on one. */ - if (bp->b_vnbufs.le_next != NOLIST) + if (bp->b_vnbufs.le_next != NOLIST) { bufremvn(bp); + } /* * If dirty, put on list of dirty buffers; * otherwise insert onto list of clean buffers. */ - if (ISSET(bp->b_flags, B_DELWRI)) + if (ISSET(bp->b_flags, B_DELWRI)) { listheadp = &newvp->v_dirtyblkhd; - else + } else { listheadp = &newvp->v_cleanblkhd; + } bufinsvn(bp, listheadp); lck_mtx_unlock(buf_mtxp); @@ -1961,14 +2068,15 @@ bufhdrinit(buf_t bp) __private_extern__ void bufinit(void) { - buf_t bp; + buf_t bp; struct bqueues *dp; - int i; + int i; nbuf_headers = 0; /* Initialize the buffer queues ('freelists') and the hash table */ - for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) + for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { TAILQ_INIT(dp); + } bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash); buf_busycount = 0; @@ -2003,7 +2111,7 @@ bufinit(void) */ buf_mtx_grp_attr = lck_grp_attr_alloc_init(); buf_mtx_grp = lck_grp_alloc_init("buffer cache", buf_mtx_grp_attr); - + /* * allocate the lock attribute */ @@ -2012,18 +2120,21 @@ bufinit(void) /* * allocate and initialize mutex's for the buffer and iobuffer pools */ - buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); - iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); + buf_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); + iobuffer_mtxp = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); buf_gc_callout = lck_mtx_alloc_init(buf_mtx_grp, buf_mtx_attr); - if (iobuffer_mtxp == NULL) - panic("couldn't create iobuffer mutex"); + if (iobuffer_mtxp == NULL) { + panic("couldn't create iobuffer mutex"); + } - if (buf_mtxp == NULL) - panic("couldn't create buf mutex"); + if (buf_mtxp == NULL) { + panic("couldn't create buf mutex"); + } - if (buf_gc_callout == NULL) + if (buf_gc_callout == NULL) { panic("couldn't create buf_gc_callout mutex"); + } /* * allocate and initialize cluster specific global locks... @@ -2031,7 +2142,7 @@ bufinit(void) cluster_init(); printf("using %d buffer headers and %d cluster IO buffer headers\n", - nbuf_headers, niobuf_headers); + nbuf_headers, niobuf_headers); /* Set up zones used by the buffer cache */ bufzoneinit(); @@ -2043,7 +2154,6 @@ bufinit(void) if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) { panic("Couldn't register buffer cache callout for vm pressure!\n"); } - } /* @@ -2062,8 +2172,8 @@ struct meta_zone_entry { struct meta_zone_entry meta_zones[] = { {NULL, (MINMETA * 1), 128 * (MINMETA * 1), "buf.512" }, - {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, - {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, + {NULL, (MINMETA * 2), 64 * (MINMETA * 2), "buf.1024" }, + {NULL, (MINMETA * 4), 16 * (MINMETA * 4), "buf.2048" }, {NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" }, {NULL, (MINMETA * 16), 512 * (MINMETA * 16), "buf.8192" }, {NULL, (MINMETA * 32), 512 * (MINMETA * 32), "buf.16384" }, @@ -2079,11 +2189,11 @@ bufzoneinit(void) int i; for (i = 0; meta_zones[i].mz_size != 0; i++) { - meta_zones[i].mz_zone = - zinit(meta_zones[i].mz_size, - meta_zones[i].mz_max, - PAGE_SIZE, - meta_zones[i].mz_name); + meta_zones[i].mz_zone = + zinit(meta_zones[i].mz_size, + meta_zones[i].mz_max, + PAGE_SIZE, + meta_zones[i].mz_name); zone_change(meta_zones[i].mz_zone, Z_CALLERACCT, FALSE); } buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers"); @@ -2095,15 +2205,17 @@ getbufzone(size_t size) { int i; - if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) + if ((size % 512) || (size < MINMETA) || (size > MAXMETA)) { panic("getbufzone: incorect size = %lu", size); + } for (i = 0; meta_zones[i].mz_size != 0; i++) { - if (meta_zones[i].mz_size >= size) + if (meta_zones[i].mz_size >= size) { break; + } } - return (meta_zones[i].mz_zone); + return meta_zones[i].mz_zone; } @@ -2111,7 +2223,7 @@ getbufzone(size_t size) static struct buf * bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype) { - buf_t bp; + buf_t bp; bp = buf_getblk(vp, blkno, size, 0, 0, queuetype); @@ -2137,18 +2249,18 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, trace(TR_BREADMISS, pack(vp, size), blkno); /* Pay for the read. */ - if (p && p->p_stats) { - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */ + if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */ } if (async) { - /* + /* * since we asked for an ASYNC I/O * the biodone will do the brelse * we don't want to pass back a bp * that we don't 'own' */ - bp = NULL; + bp = NULL; } } else if (async) { buf_brelse(bp); @@ -2157,19 +2269,19 @@ bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, trace(TR_BREADHIT, pack(vp, size), blkno); - return (bp); + return bp; } /* - * Perform the reads for buf_breadn() and buf_meta_breadn(). - * Trivial modification to the breada algorithm presented in Bach (p.55). + * Perform the reads for buf_breadn() and buf_meta_breadn(). + * Trivial modification to the breada algorithm presented in Bach (p.55). */ static errno_t -do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, - int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype) +do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, + int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype) { - buf_t bp; - int i; + buf_t bp; + int i; bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype); @@ -2178,15 +2290,16 @@ do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int */ for (i = 0; i < nrablks; i++) { /* If it's in the cache, just go on to next one. */ - if (incore(vp, rablks[i])) + if (incore(vp, rablks[i])) { continue; + } /* Get a buffer for the read-ahead block */ (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype); } /* Otherwise, we had to start a read for it; wait until it's valid. */ - return (buf_biowait(bp)); + return buf_biowait(bp); } @@ -2197,13 +2310,13 @@ do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int errno_t buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) { - buf_t bp; + buf_t bp; /* Get buffer for block. */ bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ); /* Wait for the read to complete, and return result. */ - return (buf_biowait(bp)); + return buf_biowait(bp); } /* @@ -2213,13 +2326,13 @@ buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) errno_t buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp) { - buf_t bp; + buf_t bp; /* Get buffer for block. */ bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META); /* Wait for the read to complete, and return result. */ - return (buf_biowait(bp)); + return buf_biowait(bp); } /* @@ -2228,7 +2341,7 @@ buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t * errno_t buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) { - return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ)); + return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ); } /* @@ -2238,7 +2351,7 @@ buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasize errno_t buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp) { - return (do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META)); + return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META); } /* @@ -2247,22 +2360,24 @@ buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *r errno_t buf_bwrite(buf_t bp) { - int sync, wasdelayed; - errno_t rv; - proc_t p = current_proc(); - vnode_t vp = bp->b_vp; + int sync, wasdelayed; + errno_t rv; + proc_t p = current_proc(); + vnode_t vp = bp->b_vp; if (bp->b_datap == 0) { - if (brecover_data(bp) == 0) - return (0); + if (brecover_data(bp) == 0) { + return 0; + } } /* Remember buffer type, to switch on it later. */ sync = !ISSET(bp->b_flags, B_ASYNC); wasdelayed = ISSET(bp->b_flags, B_DELWRI); CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); - if (wasdelayed) + if (wasdelayed) { OSAddAtomicLong(-1, &nbdwrite); + } if (!sync) { /* @@ -2271,19 +2386,18 @@ buf_bwrite(buf_t bp) * to do this now, because if we don't, the vnode may not * be properly notified that its I/O has completed. */ - if (wasdelayed) + if (wasdelayed) { buf_reassign(bp, vp); - else - if (p && p->p_stats) { - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ - } + } else if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } } trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno); /* Initiate disk write. Make sure the appropriate party is charged. */ - OSAddAtomic(1, &vp->v_numoutput); - + OSAddAtomic(1, &vp->v_numoutput); + VNOP_STRATEGY(bp); if (sync) { @@ -2297,26 +2411,25 @@ buf_bwrite(buf_t bp) * make sure it's on the correct vnode queue. (async operatings * were payed for above.) */ - if (wasdelayed) + if (wasdelayed) { buf_reassign(bp, vp); - else - if (p && p->p_stats) { - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ - } + } else if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + } /* Release the buffer. */ buf_brelse(bp); - return (rv); + return rv; } else { - return (0); + return 0; } } int vn_bwrite(struct vnop_bwrite_args *ap) { - return (buf_bwrite(ap->a_bp)); + return buf_bwrite(ap->a_bp); } /* @@ -2333,7 +2446,7 @@ vn_bwrite(struct vnop_bwrite_args *ap) * Described in Leffler, et al. (pp. 208-213). * * Note: With the ability to allocate additional buffer - * headers, we can get in to the situation where "too" many + * headers, we can get in to the situation where "too" many * buf_bdwrite()s can create situation where the kernel can create * buffers faster than the disks can service. Doing a buf_bawrite() in * cases where we have "too many" outstanding buf_bdwrite()s avoids that. @@ -2341,8 +2454,8 @@ vn_bwrite(struct vnop_bwrite_args *ap) int bdwrite_internal(buf_t bp, int return_error) { - proc_t p = current_proc(); - vnode_t vp = bp->b_vp; + proc_t p = current_proc(); + vnode_t vp = bp->b_vp; /* * If the block hasn't been seen before: @@ -2352,8 +2465,8 @@ bdwrite_internal(buf_t bp, int return_error) */ if (!ISSET(bp->b_flags, B_DELWRI)) { SET(bp->b_flags, B_DELWRI); - if (p && p->p_stats) { - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ + if (p && p->p_stats) { + OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */ } OSAddAtomicLong(1, &nbdwrite); buf_reassign(bp, vp); @@ -2362,7 +2475,7 @@ bdwrite_internal(buf_t bp, int return_error) /* * if we're not LOCKED, but the total number of delayed writes * has climbed above 75% of the total buffers in the system - * return an error if the caller has indicated that it can + * return an error if the caller has indicated that it can * handle one in this case, otherwise schedule the I/O now * this is done to prevent us from allocating tons of extra * buffers when dealing with virtual disks (i.e. DiskImages), @@ -2373,36 +2486,37 @@ bdwrite_internal(buf_t bp, int return_error) * buffer is part of a transaction and can't go to disk until * the LOCKED bit is cleared. */ - if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers/4)*3)) { - if (return_error) - return (EAGAIN); + if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) { + if (return_error) { + return EAGAIN; + } /* * If the vnode has "too many" write operations in progress * wait for them to finish the IO */ (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite"); - return (buf_bawrite(bp)); + return buf_bawrite(bp); } - + /* Otherwise, the "write" is done, so mark and release the buffer. */ SET(bp->b_flags, B_DONE); buf_brelse(bp); - return (0); + return 0; } errno_t buf_bdwrite(buf_t bp) { - return (bdwrite_internal(bp, 0)); + return bdwrite_internal(bp, 0); } - + /* * Asynchronous block write; just an asynchronous buf_bwrite(). * * Note: With the abilitty to allocate additional buffer - * headers, we can get in to the situation where "too" many + * headers, we can get in to the situation where "too" many * buf_bawrite()s can create situation where the kernel can create * buffers faster than the disks can service. * We limit the number of "in flight" writes a vnode can have to @@ -2411,31 +2525,32 @@ buf_bdwrite(buf_t bp) static int bawrite_internal(buf_t bp, int throttle) { - vnode_t vp = bp->b_vp; + vnode_t vp = bp->b_vp; if (vp) { - if (throttle) - /* + if (throttle) { + /* * If the vnode has "too many" write operations in progress * wait for them to finish the IO */ - (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite"); - else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) - /* - * return to the caller and + (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite"); + } else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) { + /* + * return to the caller and * let him decide what to do */ - return (EWOULDBLOCK); + return EWOULDBLOCK; + } } SET(bp->b_flags, B_ASYNC); - return (VNOP_BWRITE(bp)); + return VNOP_BWRITE(bp); } errno_t buf_bawrite(buf_t bp) { - return (bawrite_internal(bp, 1)); + return bawrite_internal(bp, 1); } @@ -2448,9 +2563,10 @@ buf_free_meta_store(buf_t bp) zone_t z; z = getbufzone(bp->b_bufsize); - zfree(z, (void *)bp->b_datap); - } else - kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); + zfree(z, bp->b_datap); + } else { + kmem_free(kernel_map, bp->b_datap, bp->b_bufsize); + } bp->b_datap = (uintptr_t)NULL; bp->b_bufsize = 0; @@ -2461,12 +2577,12 @@ buf_free_meta_store(buf_t bp) static buf_t buf_brelse_shadow(buf_t bp) { - buf_t bp_head; - buf_t bp_temp; - buf_t bp_return = NULL; + buf_t bp_head; + buf_t bp_temp; + buf_t bp_return = NULL; #ifdef BUF_MAKE_PRIVATE - buf_t bp_data; - int data_ref = 0; + buf_t bp_data; + int data_ref = 0; #endif int need_wakeup = 0; @@ -2474,14 +2590,15 @@ buf_brelse_shadow(buf_t bp) __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig); - if (bp_head->b_whichq != -1) + if (bp_head->b_whichq != -1) { panic("buf_brelse_shadow: bp_head on freelist %d\n", bp_head->b_whichq); + } #ifdef BUF_MAKE_PRIVATE if (bp_data = bp->b_data_store) { bp_data->b_data_ref--; /* - * snapshot the ref count so that we can check it + * snapshot the ref count so that we can check it * outside of the lock... we only want the guy going * from 1 -> 0 to try and release the storage */ @@ -2492,10 +2609,13 @@ buf_brelse_shadow(buf_t bp) bp_head->b_shadow_ref--; - for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow); + for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) { + ; + } - if (bp_temp == NULL) + if (bp_temp == NULL) { panic("buf_brelse_shadow: bp not on list %p", bp_head); + } bp_temp->b_shadow = bp_temp->b_shadow->b_shadow; @@ -2506,22 +2626,25 @@ buf_brelse_shadow(buf_t bp) * so transfer it to the first shadow buf left in the chain */ if (bp == bp_data && data_ref) { - if ((bp_data = bp_head->b_shadow) == NULL) + if ((bp_data = bp_head->b_shadow) == NULL) { panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp); + } - for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) + for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) { bp_temp->b_data_store = bp_data; + } bp_data->b_data_ref = data_ref; } #endif - if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) - panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp); - if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) - panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp); + if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) { + panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp); + } + if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) { + panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp); + } if (bp_head->b_shadow_ref == 0) { if (!ISSET(bp_head->b_lflags, BL_BUSY)) { - CLR(bp_head->b_flags, B_AGE); bp_head->b_timestamp = buf_timestamp(); @@ -2544,18 +2667,20 @@ buf_brelse_shadow(buf_t bp) } lck_mtx_unlock(buf_mtxp); - if (need_wakeup) + if (need_wakeup) { wakeup(bp_head); + } -#ifdef BUF_MAKE_PRIVATE - if (bp == bp_data && data_ref == 0) +#ifdef BUF_MAKE_PRIVATE + if (bp == bp_data && data_ref == 0) { buf_free_meta_store(bp); + } bp->b_data_store = NULL; #endif KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0); - return (bp_return); + return bp_return; } @@ -2567,14 +2692,15 @@ void buf_brelse(buf_t bp) { struct bqueues *bufq; - long whichq; - upl_t upl; + long whichq; + upl_t upl; int need_wakeup = 0; int need_bp_wakeup = 0; - if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) - panic("buf_brelse: bad buffer = %p\n", bp); + if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) { + panic("buf_brelse: bad buffer = %p\n", bp); + } #ifdef JOE_DEBUG (void) OSBacktrace(&bp->b_stackbrelse[0], 6); @@ -2583,13 +2709,14 @@ buf_brelse(buf_t bp) bp->b_tag = 0; #endif if (bp->b_lflags & BL_IOBUF) { - buf_t shadow_master_bp = NULL; + buf_t shadow_master_bp = NULL; - if (ISSET(bp->b_lflags, BL_SHADOW)) + if (ISSET(bp->b_lflags, BL_SHADOW)) { shadow_master_bp = buf_brelse_shadow(bp); - else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) - buf_free_meta_store(bp); - free_io_buf(bp); + } else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) { + buf_free_meta_store(bp); + } + free_io_buf(bp); if (shadow_master_bp) { bp = shadow_master_bp; @@ -2599,8 +2726,8 @@ buf_brelse(buf_t bp) } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START, - bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap, - bp->b_flags, 0); + bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap, + bp->b_flags, 0); trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); @@ -2612,11 +2739,11 @@ buf_brelse(buf_t bp) * the HFS journal code depends on this */ if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) { - if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ - void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = bp->b_transaction; + if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */ + void (*iodone_func)(struct buf *, void *) = bp->b_iodone; + void *arg = bp->b_transaction; - CLR(bp->b_flags, B_FILTER); /* but note callout done */ + CLR(bp->b_flags, B_FILTER); /* but note callout done */ bp->b_iodone = NULL; bp->b_transaction = NULL; @@ -2631,89 +2758,97 @@ buf_brelse(buf_t bp) */ upl = bp->b_upl; - if ( !ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { + if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { kern_return_t kret; int upl_flags; if (upl == NULL) { - if ( !ISSET(bp->b_flags, B_INVAL)) { + if (!ISSET(bp->b_flags, B_INVAL)) { kret = ubc_create_upl_kernel(bp->b_vp, - ubc_blktooff(bp->b_vp, bp->b_lblkno), - bp->b_bufsize, - &upl, - NULL, - UPL_PRECIOUS, - VM_KERN_MEMORY_FILE); - - if (kret != KERN_SUCCESS) - panic("brelse: Failed to create UPL"); + ubc_blktooff(bp->b_vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + NULL, + UPL_PRECIOUS, + VM_KERN_MEMORY_FILE); + + if (kret != KERN_SUCCESS) { + panic("brelse: Failed to create UPL"); + } #if UPL_DEBUG upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5); #endif /* UPL_DEBUG */ } } else { if (bp->b_datap) { - kret = ubc_upl_unmap(upl); + kret = ubc_upl_unmap(upl); - if (kret != KERN_SUCCESS) - panic("ubc_upl_unmap failed"); + if (kret != KERN_SUCCESS) { + panic("ubc_upl_unmap failed"); + } bp->b_datap = (uintptr_t)NULL; } } if (upl) { if (bp->b_flags & (B_ERROR | B_INVAL)) { - if (bp->b_flags & (B_READ | B_INVAL)) - upl_flags = UPL_ABORT_DUMP_PAGES; - else - upl_flags = 0; + if (bp->b_flags & (B_READ | B_INVAL)) { + upl_flags = UPL_ABORT_DUMP_PAGES; + } else { + upl_flags = 0; + } ubc_upl_abort(upl, upl_flags); } else { - if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) - upl_flags = UPL_COMMIT_SET_DIRTY ; - else - upl_flags = UPL_COMMIT_CLEAR_DIRTY ; + if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) { + upl_flags = UPL_COMMIT_SET_DIRTY; + } else { + upl_flags = UPL_COMMIT_CLEAR_DIRTY; + } ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags | - UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); } bp->b_upl = NULL; } } else { - if ( (upl) ) + if ((upl)) { panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp); - } + } + } /* * If it's locked, don't report an error; try again later. */ - if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) + if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) { CLR(bp->b_flags, B_ERROR); + } /* * If it's not cacheable, or an error, mark it invalid. */ - if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) + if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) { SET(bp->b_flags, B_INVAL); - - if ((bp->b_bufsize <= 0) || - ISSET(bp->b_flags, B_INVAL) || - (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) { + } - boolean_t delayed_buf_free_meta_store = FALSE; + if ((bp->b_bufsize <= 0) || + ISSET(bp->b_flags, B_INVAL) || + (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) { + boolean_t delayed_buf_free_meta_store = FALSE; /* * If it's invalid or empty, dissociate it from its vnode, * release its storage if B_META, and * clean it up a bit and put it on the EMPTY queue */ - if (ISSET(bp->b_flags, B_DELWRI)) + if (ISSET(bp->b_flags, B_DELWRI)) { OSAddAtomicLong(-1, &nbdwrite); + } if (ISSET(bp->b_flags, B_META)) { - if (bp->b_shadow_ref) + if (bp->b_shadow_ref) { delayed_buf_free_meta_store = TRUE; - else + } else { buf_free_meta_store(bp); + } } /* * nuke any credentials we were holding @@ -2724,13 +2859,12 @@ buf_brelse(buf_t bp) if (bp->b_shadow_ref) { SET(bp->b_lflags, BL_WAITSHADOW); - + lck_mtx_unlock(buf_mtxp); - + return; } if (delayed_buf_free_meta_store == TRUE) { - lck_mtx_unlock(buf_mtxp); finish_shadow_master: buf_free_meta_store(bp); @@ -2739,8 +2873,9 @@ finish_shadow_master: } CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); - if (bp->b_vp) + if (bp->b_vp) { brelvp_locked(bp); + } bremhash(bp); BLISTNONE(bp); @@ -2749,25 +2884,25 @@ finish_shadow_master: bp->b_whichq = BQ_EMPTY; binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY); } else { - /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. */ - if (ISSET(bp->b_flags, B_LOCKED)) - whichq = BQ_LOCKED; /* locked in core */ - else if (ISSET(bp->b_flags, B_META)) - whichq = BQ_META; /* meta-data */ - else if (ISSET(bp->b_flags, B_AGE)) - whichq = BQ_AGE; /* stale but valid data */ - else - whichq = BQ_LRU; /* valid data */ + if (ISSET(bp->b_flags, B_LOCKED)) { + whichq = BQ_LOCKED; /* locked in core */ + } else if (ISSET(bp->b_flags, B_META)) { + whichq = BQ_META; /* meta-data */ + } else if (ISSET(bp->b_flags, B_AGE)) { + whichq = BQ_AGE; /* stale but valid data */ + } else { + whichq = BQ_LRU; /* valid data */ + } bufq = &bufqueues[whichq]; bp->b_timestamp = buf_timestamp(); lck_mtx_lock_spin(buf_mtxp); - + /* * the buf_brelse_shadow routine doesn't take 'ownership' * of the parent buf_t... it updates state that is protected by @@ -2785,14 +2920,14 @@ finish_shadow_master: /* * there are still cloned buf_t's pointing * at this guy... need to keep it off the - * freelists until a buf_brelse is done on + * freelists until a buf_brelse is done on * the last clone */ CLR(bp->b_flags, (B_ASYNC | B_NOCACHE)); } } if (needbuffer) { - /* + /* * needbuffer is a global * we're currently using buf_mtxp to protect it * delay doing the actual wakeup until after @@ -2802,7 +2937,7 @@ finish_shadow_master: need_wakeup = 1; } if (ISSET(bp->b_lflags, BL_WANTED)) { - /* + /* * delay the actual wakeup until after we * clear BL_BUSY and we've dropped buf_mtxp */ @@ -2817,19 +2952,19 @@ finish_shadow_master: lck_mtx_unlock(buf_mtxp); if (need_wakeup) { - /* + /* * Wake up any processes waiting for any buffer to become free. */ - wakeup(&needbuffer); + wakeup(&needbuffer); } if (need_bp_wakeup) { - /* + /* * Wake up any proceeses waiting for _this_ buffer to become free. */ - wakeup(bp); + wakeup(bp); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END, - bp, bp->b_datap, bp->b_flags, 0, 0); + bp, bp->b_datap, bp->b_flags, 0, 0); } /* @@ -2842,20 +2977,21 @@ finish_shadow_master: static boolean_t incore(vnode_t vp, daddr64_t blkno) { - boolean_t retval; - struct bufhashhdr *dp; + boolean_t retval; + struct bufhashhdr *dp; dp = BUFHASH(vp, blkno); lck_mtx_lock_spin(buf_mtxp); - if (incore_locked(vp, blkno, dp)) - retval = TRUE; - else - retval = FALSE; + if (incore_locked(vp, blkno, dp)) { + retval = TRUE; + } else { + retval = FALSE; + } lck_mtx_unlock(buf_mtxp); - return (retval); + return retval; } @@ -2868,10 +3004,10 @@ incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp) for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) { if (bp->b_lblkno == blkno && bp->b_vp == vp && !ISSET(bp->b_flags, B_INVAL)) { - return (bp); + return bp; } } - return (NULL); + return NULL; } @@ -2879,26 +3015,28 @@ void buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno) { buf_t bp; - struct bufhashhdr *dp; + struct bufhashhdr *dp; dp = BUFHASH(vp, blkno); lck_mtx_lock_spin(buf_mtxp); for (;;) { - if ((bp = incore_locked(vp, blkno, dp)) == NULL) + if ((bp = incore_locked(vp, blkno, dp)) == NULL) { break; + } - if (bp->b_shadow_ref == 0) + if (bp->b_shadow_ref == 0) { break; + } SET(bp->b_lflags, BL_WANTED_REF); - (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO+1), "buf_wait_for_shadow", NULL); + (void) msleep(bp, buf_mtxp, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL); } lck_mtx_unlock(buf_mtxp); } - + /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */ /* * Get a block of requested size that is associated with @@ -2919,10 +3057,10 @@ buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int int ret_only_valid; struct timespec ts; int upl_flags; - struct bufhashhdr *dp; + struct bufhashhdr *dp; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START, - (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0); + (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0); ret_only_valid = operation & BLK_ONLYVALID; operation &= ~BLK_ONLYVALID; @@ -2947,13 +3085,13 @@ start: /* * don't retake the mutex after being awakened... - * the time out is in msecs + * the time out is in msecs */ - ts.tv_sec = (slptimeo/1000); + ts.tv_sec = (slptimeo / 1000); ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE, - (uintptr_t)blkno, size, operation, 0, 0); + (uintptr_t)blkno, size, operation, 0, 0); err = msleep(bp, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts); @@ -2961,19 +3099,20 @@ start: * Callers who call with PCATCH or timeout are * willing to deal with the NULL pointer */ - if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) - return (NULL); + if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) { + return NULL; + } goto start; - /*NOTREACHED*/ + /*NOTREACHED*/ default: - /* + /* * unknown operation requested */ panic("getblk: paging or unknown operation for incore busy buffer - %x\n", operation); /*NOTREACHED*/ break; - } + } } else { int clear_bdone; @@ -2986,14 +3125,15 @@ start: bremfree_locked(bp); bufstats.bufs_incore++; - + lck_mtx_unlock(buf_mtxp); #ifdef JOE_DEBUG bp->b_owner = current_thread(); bp->b_tag = 1; #endif - if ( (bp->b_upl) ) - panic("buffer has UPL, but not marked BUSY: %p", bp); + if ((bp->b_upl)) { + panic("buffer has UPL, but not marked BUSY: %p", bp); + } clear_bdone = FALSE; if (!ret_only_valid) { @@ -3027,8 +3167,9 @@ start: clear_bdone = TRUE; } - if (bp->b_bufsize != size) + if (bp->b_bufsize != size) { allocbuf(bp, size); + } } upl_flags = 0; @@ -3042,38 +3183,42 @@ start: upl_flags |= UPL_WILL_MODIFY; case BLK_READ: upl_flags |= UPL_PRECIOUS; - if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { + if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) { kret = ubc_create_upl_kernel(vp, - ubc_blktooff(vp, bp->b_lblkno), - bp->b_bufsize, - &upl, - &pl, - upl_flags, - VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - panic("Failed to create UPL"); + ubc_blktooff(vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + panic("Failed to create UPL"); + } bp->b_upl = upl; if (upl_valid_page(pl, 0)) { - if (upl_dirty_page(pl, 0)) - SET(bp->b_flags, B_WASDIRTY); - else - CLR(bp->b_flags, B_WASDIRTY); - } else - CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI)); + if (upl_dirty_page(pl, 0)) { + SET(bp->b_flags, B_WASDIRTY); + } else { + CLR(bp->b_flags, B_WASDIRTY); + } + } else { + CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI)); + } kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap)); - if (kret != KERN_SUCCESS) - panic("getblk: ubc_upl_map() failed with (%d)", kret); + if (kret != KERN_SUCCESS) { + panic("getblk: ubc_upl_map() failed with (%d)", kret); + } } break; case BLK_META: /* * VM is not involved in IO for the meta data - * buffer already has valid data + * buffer already has valid data */ break; @@ -3083,21 +3228,24 @@ start: break; } - if (clear_bdone) + if (clear_bdone) { CLR(bp->b_flags, B_DONE); + } } } else { /* not incore() */ int queue = BQ_EMPTY; /* Start with no preference */ - + if (ret_only_valid) { lck_mtx_unlock(buf_mtxp); - return (NULL); + return NULL; } - if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) + if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) { operation = BLK_META; + } - if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) + if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) { goto start; + } /* * getnewbuf may block for a number of different reasons... @@ -3124,16 +3272,17 @@ start: * mark the buffer as B_META if indicated * so that when buffer is released it will goto META queue */ - if (operation == BLK_META) - SET(bp->b_flags, B_META); + if (operation == BLK_META) { + SET(bp->b_flags, B_META); + } bp->b_blkno = bp->b_lblkno = blkno; bp->b_vp = vp; /* - * Insert in the hash so that incore() can find it + * Insert in the hash so that incore() can find it */ - binshash(bp, BUFHASH(vp, blkno)); + binshash(bp, BUFHASH(vp, blkno)); bgetvp_locked(vp, bp); @@ -3153,7 +3302,7 @@ start: * in bufstats are protected with either * buf_mtxp or iobuffer_mtxp */ - OSAddAtomicLong(1, &bufstats.bufs_miss); + OSAddAtomicLong(1, &bufstats.bufs_miss); break; case BLK_WRITE: @@ -3164,98 +3313,102 @@ start: */ upl_flags |= UPL_WILL_MODIFY; case BLK_READ: - { off_t f_offset; - size_t contig_bytes; - int bmap_flags; + { off_t f_offset; + size_t contig_bytes; + int bmap_flags; #if DEVELOPMENT || DEBUG /* * Apple implemented file systems use UBC excludively; they should * not call in here." */ - const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs", - "exfat", "msdos", "webdav", NULL}; - - for (int i = 0; excldfs[i] != NULL; i++) { - if (vp->v_mount && - !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename, - excldfs[i])) { - panic("%s %s calls buf_getblk", - excldfs[i], - operation == BLK_READ ? "BLK_READ" : "BLK_WRITE"); - } - } + const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs", + "exfat", "msdos", "webdav", NULL}; + + for (int i = 0; excldfs[i] != NULL; i++) { + if (vp->v_mount && + !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename, + excldfs[i])) { + panic("%s %s calls buf_getblk", + excldfs[i], + operation == BLK_READ ? "BLK_READ" : "BLK_WRITE"); + } + } #endif - if ( (bp->b_upl) ) - panic("bp already has UPL: %p",bp); + if ((bp->b_upl)) { + panic("bp already has UPL: %p", bp); + } - f_offset = ubc_blktooff(vp, blkno); + f_offset = ubc_blktooff(vp, blkno); - upl_flags |= UPL_PRECIOUS; - kret = ubc_create_upl_kernel(vp, - f_offset, - bp->b_bufsize, - &upl, - &pl, - upl_flags, - VM_KERN_MEMORY_FILE); + upl_flags |= UPL_PRECIOUS; + kret = ubc_create_upl_kernel(vp, + f_offset, + bp->b_bufsize, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - panic("Failed to create UPL"); + if (kret != KERN_SUCCESS) { + panic("Failed to create UPL"); + } #if UPL_DEBUG - upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4); + upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4); #endif /* UPL_DEBUG */ - bp->b_upl = upl; - - if (upl_valid_page(pl, 0)) { - - if (operation == BLK_READ) - bmap_flags = VNODE_READ; - else - bmap_flags = VNODE_WRITE; - - SET(bp->b_flags, B_CACHE | B_DONE); - - OSAddAtomicLong(1, &bufstats.bufs_vmhits); - - bp->b_validoff = 0; - bp->b_dirtyoff = 0; - - if (upl_dirty_page(pl, 0)) { - /* page is dirty */ - SET(bp->b_flags, B_WASDIRTY); - - bp->b_validend = bp->b_bcount; - bp->b_dirtyend = bp->b_bcount; - } else { - /* page is clean */ - bp->b_validend = bp->b_bcount; - bp->b_dirtyend = 0; - } - /* - * try to recreate the physical block number associated with - * this buffer... - */ - if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) - panic("getblk: VNOP_BLOCKMAP failed"); - /* - * if the extent represented by this buffer - * is not completely physically contiguous on - * disk, than we can't cache the physical mapping - * in the buffer header - */ - if ((long)contig_bytes < bp->b_bcount) - bp->b_blkno = bp->b_lblkno; - } else { - OSAddAtomicLong(1, &bufstats.bufs_miss); - } - kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); - - if (kret != KERN_SUCCESS) - panic("getblk: ubc_upl_map() failed with (%d)", kret); - break; - } // end BLK_READ + bp->b_upl = upl; + + if (upl_valid_page(pl, 0)) { + if (operation == BLK_READ) { + bmap_flags = VNODE_READ; + } else { + bmap_flags = VNODE_WRITE; + } + + SET(bp->b_flags, B_CACHE | B_DONE); + + OSAddAtomicLong(1, &bufstats.bufs_vmhits); + + bp->b_validoff = 0; + bp->b_dirtyoff = 0; + + if (upl_dirty_page(pl, 0)) { + /* page is dirty */ + SET(bp->b_flags, B_WASDIRTY); + + bp->b_validend = bp->b_bcount; + bp->b_dirtyend = bp->b_bcount; + } else { + /* page is clean */ + bp->b_validend = bp->b_bcount; + bp->b_dirtyend = 0; + } + /* + * try to recreate the physical block number associated with + * this buffer... + */ + if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) { + panic("getblk: VNOP_BLOCKMAP failed"); + } + /* + * if the extent represented by this buffer + * is not completely physically contiguous on + * disk, than we can't cache the physical mapping + * in the buffer header + */ + if ((long)contig_bytes < bp->b_bcount) { + bp->b_blkno = bp->b_lblkno; + } + } else { + OSAddAtomicLong(1, &bufstats.bufs_miss); + } + kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); + + if (kret != KERN_SUCCESS) { + panic("getblk: ubc_upl_map() failed with (%d)", kret); + } + break;} // end BLK_READ default: panic("getblk: paging or unknown operation - %x", operation); /*NOTREACHED*/ @@ -3264,12 +3417,12 @@ start: } //end buf_t !incore KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END, - bp, bp->b_datap, bp->b_flags, 3, 0); + bp, bp->b_datap, bp->b_flags, 3, 0); #ifdef JOE_DEBUG (void) OSBacktrace(&bp->b_stackgetblk[0], 6); #endif - return (bp); + return bp; } /* @@ -3278,7 +3431,7 @@ start: buf_t buf_geteblk(int size) { - buf_t bp = NULL; + buf_t bp = NULL; int queue = BQ_EMPTY; do { @@ -3287,7 +3440,7 @@ buf_geteblk(int size) bp = getnewbuf(0, 0, &queue); } while (bp == NULL); - SET(bp->b_flags, (B_META|B_INVAL)); + SET(bp->b_flags, (B_META | B_INVAL)); #if DIAGNOSTIC assert(queue == BQ_EMPTY); @@ -3301,7 +3454,7 @@ buf_geteblk(int size) allocbuf(bp, size); - return (bp); + return bp; } uint32_t @@ -3327,14 +3480,15 @@ buf_clear_redundancy_flags(buf_t bp, uint32_t flags) static void * recycle_buf_from_pool(int nsize) { - buf_t bp; - void *ptr = NULL; + buf_t bp; + void *ptr = NULL; lck_mtx_lock_spin(buf_mtxp); TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) { - if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) + if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != nsize) { continue; + } ptr = (void *)bp->b_datap; bp->b_bufsize = 0; @@ -3343,7 +3497,7 @@ recycle_buf_from_pool(int nsize) } lck_mtx_unlock(buf_mtxp); - return (ptr); + return ptr; } @@ -3371,33 +3525,34 @@ grab_memory_for_meta_buf(int nsize) ptr = zalloc_nopagewait(z); - if (was_vmpriv == TRUE) + if (was_vmpriv == TRUE) { set_vm_privilege(TRUE); + } if (ptr == NULL) { - zalloc_nopagewait_failed++; ptr = recycle_buf_from_pool(nsize); if (ptr == NULL) { - recycle_buf_failed++; - if (was_vmpriv == FALSE) + if (was_vmpriv == FALSE) { set_vm_privilege(TRUE); + } ptr = zalloc(z); - if (was_vmpriv == FALSE) + if (was_vmpriv == FALSE) { set_vm_privilege(FALSE); + } } } - return (ptr); + return ptr; } /* - * With UBC, there is no need to expand / shrink the file data + * With UBC, there is no need to expand / shrink the file data * buffer. The VM uses the same pages, hence no waste. * All the file data buffers can have one size. * In fact expand / shrink would be an expensive operation. @@ -3415,10 +3570,12 @@ allocbuf(buf_t bp, int size) desired_size = roundup(size, CLBYTES); - if (desired_size < PAGE_SIZE) + if (desired_size < PAGE_SIZE) { desired_size = PAGE_SIZE; - if (desired_size > MAXBSIZE) + } + if (desired_size > MAXBSIZE) { panic("allocbuf: buffer larger than MAXBSIZE requested"); + } if (ISSET(bp->b_flags, B_META)) { int nsize = roundup(size, MINMETA); @@ -3427,35 +3584,34 @@ allocbuf(buf_t bp, int size) vm_offset_t elem = (vm_offset_t)bp->b_datap; if (ISSET(bp->b_flags, B_ZALLOC)) { - if (bp->b_bufsize < nsize) { - zone_t zprev; + if (bp->b_bufsize < nsize) { + zone_t zprev; - /* reallocate to a bigger size */ + /* reallocate to a bigger size */ - zprev = getbufzone(bp->b_bufsize); + zprev = getbufzone(bp->b_bufsize); if (nsize <= MAXMETA) { - desired_size = nsize; + desired_size = nsize; /* b_datap not really a ptr */ *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); } else { - bp->b_datap = (uintptr_t)NULL; - kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); + bp->b_datap = (uintptr_t)NULL; + kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); CLR(bp->b_flags, B_ZALLOC); } bcopy((void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); - zfree(zprev, (void *)elem); + zfree(zprev, elem); } else { - desired_size = bp->b_bufsize; + desired_size = bp->b_bufsize; } - } else { if ((vm_size_t)bp->b_bufsize < desired_size) { /* reallocate to a bigger size */ - bp->b_datap = (uintptr_t)NULL; + bp->b_datap = (uintptr_t)NULL; kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); bcopy((const void *)elem, (caddr_t)bp->b_datap, bp->b_bufsize); - kmem_free(kernel_map, elem, bp->b_bufsize); + kmem_free(kernel_map, elem, bp->b_bufsize); } else { desired_size = bp->b_bufsize; } @@ -3468,17 +3624,19 @@ allocbuf(buf_t bp, int size) /* b_datap not really a ptr */ *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize); SET(bp->b_flags, B_ZALLOC); - } else + } else { kmem_alloc_kobject(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size, VM_KERN_MEMORY_FILE); + } } - if (bp->b_datap == 0) - panic("allocbuf: NULL b_datap"); + if (bp->b_datap == 0) { + panic("allocbuf: NULL b_datap"); + } } bp->b_bufsize = desired_size; bp->b_bcount = size; - return (0); + return 0; } /* @@ -3506,12 +3664,12 @@ allocbuf(buf_t bp, int size) static buf_t getnewbuf(int slpflag, int slptimeo, int * queue) { - buf_t bp; - buf_t lru_bp; - buf_t age_bp; - buf_t meta_bp; - int age_time, lru_time, bp_time, meta_time; - int req = *queue; /* save it for restarts */ + buf_t bp; + buf_t lru_bp; + buf_t age_bp; + buf_t meta_bp; + int age_time, lru_time, bp_time, meta_time; + int req = *queue; /* save it for restarts */ struct timespec ts; start: @@ -3519,19 +3677,21 @@ start: * invalid request gets empty queue */ if ((*queue >= BQUEUES) || (*queue < 0) - || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) + || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) { *queue = BQ_EMPTY; + } - if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) - goto found; + if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) { + goto found; + } /* * need to grow number of bufs, add another one rather than recycling */ if (nbuf_headers < max_nbuf_headers) { /* - * Increment count now as lock + * Increment count now as lock * is dropped for allocation. * That avoids over commits */ @@ -3540,8 +3700,9 @@ start: } /* Try for the requested queue first */ bp = bufqueues[*queue].tqh_first; - if (bp) - goto found; + if (bp) { + goto found; + } /* Unable to use requested queue */ age_bp = bufqueues[BQ_AGE].tqh_first; @@ -3560,7 +3721,7 @@ start: } /* * We have seen is this is hard to trigger. - * This is an overcommit of nbufs but needed + * This is an overcommit of nbufs but needed * in some scenarios with diskiamges */ @@ -3569,7 +3730,7 @@ add_newbufs: /* Create a new temporary buffer header */ bp = (struct buf *)zalloc(buf_hdr_zone); - + if (bp) { bufhdrinit(bp); bp->b_whichq = BQ_EMPTY; @@ -3594,12 +3755,12 @@ add_newbufs: /* wait for a free buffer of any kind */ needbuffer = 1; /* hz value is 100 */ - ts.tv_sec = (slptimeo/1000); + ts.tv_sec = (slptimeo / 1000); /* the hz value is 100; which leads to 10ms */ ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10; - msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO+1), "getnewbuf", &ts); - return (NULL); + msleep(&needbuffer, buf_mtxp, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts); + return NULL; } /* Buffer available either on AGE or LRU or META */ @@ -3614,7 +3775,7 @@ add_newbufs: bp = age_bp; *queue = BQ_AGE; } else { /* buffer available on both AGE and LRU */ - int t = buf_timestamp(); + int t = buf_timestamp(); age_time = t - age_bp->b_timestamp; lru_time = t - lru_bp->b_timestamp; @@ -3639,8 +3800,8 @@ add_newbufs: if (!bp) { /* Neither on AGE nor on LRU */ bp = meta_bp; *queue = BQ_META; - } else if (meta_bp) { - int t = buf_timestamp(); + } else if (meta_bp) { + int t = buf_timestamp(); bp_time = t - bp->b_timestamp; meta_time = t - meta_bp->b_timestamp; @@ -3648,19 +3809,20 @@ add_newbufs: if (!(bp_time < 0) && !(meta_time < 0)) { /* time not set backwards */ int bp_is_stale; - bp_is_stale = (*queue == BQ_LRU) ? - lru_is_stale : age_is_stale; + bp_is_stale = (*queue == BQ_LRU) ? + lru_is_stale : age_is_stale; - if ((meta_time >= meta_is_stale) && - (bp_time < bp_is_stale)) { + if ((meta_time >= meta_is_stale) && + (bp_time < bp_is_stale)) { bp = meta_bp; *queue = BQ_META; } } } found: - if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) - panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags); + if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) { + panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)\n", bp, bp->b_flags); + } /* Clean it */ if (bcleanbuf(bp, FALSE)) { @@ -3670,16 +3832,16 @@ found: *queue = req; goto start; } - return (bp); + return bp; } -/* +/* * Clean a buffer. * Returns 0 if buffer is ready to use, - * Returns 1 if issued a buf_bawrite() to indicate + * Returns 1 if issued a buf_bawrite() to indicate * that the buffer is not ready. - * + * * buf_mtxp is held upon entry * returns with buf_mtxp locked */ @@ -3714,7 +3876,7 @@ bcleanbuf(buf_t bp, boolean_t discard) lck_mtx_lock_spin(buf_mtxp); - return (1); + return 1; } #ifdef JOE_DEBUG bp->b_owner = current_thread(); @@ -3725,26 +3887,28 @@ bcleanbuf(buf_t bp, boolean_t discard) */ SET(bp->b_lflags, BL_BUSY); buf_busycount++; - + bremhash(bp); /* * disassociate us from our vnode, if we had one... */ - if (bp->b_vp) + if (bp->b_vp) { brelvp_locked(bp); + } lck_mtx_unlock(buf_mtxp); BLISTNONE(bp); - if (ISSET(bp->b_flags, B_META)) + if (ISSET(bp->b_flags, B_META)) { buf_free_meta_store(bp); + } trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); buf_release_credentials(bp); - + /* If discarding, just move to the empty queue */ if (discard) { lck_mtx_lock_spin(buf_mtxp); @@ -3785,7 +3949,7 @@ bcleanbuf(buf_t bp, boolean_t discard) lck_mtx_lock_spin(buf_mtxp); } - return (0); + return 0; } @@ -3793,30 +3957,30 @@ bcleanbuf(buf_t bp, boolean_t discard) errno_t buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags) { - buf_t bp; - errno_t error; + buf_t bp; + errno_t error; struct bufhashhdr *dp; dp = BUFHASH(vp, lblkno); -relook: +relook: lck_mtx_lock_spin(buf_mtxp); if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) { - lck_mtx_unlock(buf_mtxp); - return (0); + lck_mtx_unlock(buf_mtxp); + return 0; } if (ISSET(bp->b_lflags, BL_BUSY)) { - if ( !ISSET(flags, BUF_WAIT)) { - lck_mtx_unlock(buf_mtxp); - return (EBUSY); + if (!ISSET(flags, BUF_WAIT)) { + lck_mtx_unlock(buf_mtxp); + return EBUSY; } - SET(bp->b_lflags, BL_WANTED); + SET(bp->b_lflags, BL_WANTED); error = msleep((caddr_t)bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL); if (error) { - return (error); + return error; } goto relook; } @@ -3831,19 +3995,19 @@ relook: lck_mtx_unlock(buf_mtxp); buf_brelse(bp); - return (0); + return 0; } void buf_drop(buf_t bp) { - int need_wakeup = 0; + int need_wakeup = 0; lck_mtx_lock_spin(buf_mtxp); if (ISSET(bp->b_lflags, BL_WANTED)) { - /* + /* * delay the actual wakeup until after we * clear BL_BUSY and we've dropped buf_mtxp */ @@ -3862,25 +4026,26 @@ buf_drop(buf_t bp) lck_mtx_unlock(buf_mtxp); if (need_wakeup) { - /* + /* * Wake up any proceeses waiting for _this_ buffer to become free. */ - wakeup(bp); + wakeup(bp); } } errno_t -buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) { - errno_t error; +buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo) +{ + errno_t error; - lck_mtx_lock_spin(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); error = buf_acquire_locked(bp, flags, slpflag, slptimeo); - lck_mtx_unlock(buf_mtxp); + lck_mtx_unlock(buf_mtxp); - return (error); + return error; } @@ -3891,33 +4056,38 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) struct timespec ts; if (ISSET(bp->b_flags, B_LOCKED)) { - if ((flags & BAC_SKIP_LOCKED)) - return (EDEADLK); + if ((flags & BAC_SKIP_LOCKED)) { + return EDEADLK; + } } else { - if ((flags & BAC_SKIP_NONLOCKED)) - return (EDEADLK); + if ((flags & BAC_SKIP_NONLOCKED)) { + return EDEADLK; + } } - if (ISSET(bp->b_lflags, BL_BUSY)) { - /* + if (ISSET(bp->b_lflags, BL_BUSY)) { + /* * since the lck_mtx_lock may block, the buffer * may become BUSY, so we need to * recheck for a NOWAIT request */ - if (flags & BAC_NOWAIT) - return (EBUSY); - SET(bp->b_lflags, BL_WANTED); + if (flags & BAC_NOWAIT) { + return EBUSY; + } + SET(bp->b_lflags, BL_WANTED); /* the hz value is 100; which leads to 10ms */ - ts.tv_sec = (slptimeo/100); + ts.tv_sec = (slptimeo / 100); ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000; error = msleep((caddr_t)bp, buf_mtxp, slpflag | (PRIBIO + 1), "buf_acquire", &ts); - if (error) - return (error); - return (EAGAIN); + if (error) { + return error; + } + return EAGAIN; + } + if (flags & BAC_REMOVE) { + bremfree_locked(bp); } - if (flags & BAC_REMOVE) - bremfree_locked(bp); SET(bp->b_lflags, BL_BUSY); buf_busycount++; @@ -3925,7 +4095,7 @@ buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo) bp->b_owner = current_thread(); bp->b_tag = 5; #endif - return (0); + return 0; } @@ -3937,24 +4107,25 @@ errno_t buf_biowait(buf_t bp) { while (!ISSET(bp->b_flags, B_DONE)) { - lck_mtx_lock_spin(buf_mtxp); if (!ISSET(bp->b_flags, B_DONE)) { DTRACE_IO1(wait__start, buf_t, bp); - (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO+1), "buf_biowait", NULL); + (void) msleep(bp, buf_mtxp, PDROP | (PRIBIO + 1), "buf_biowait", NULL); DTRACE_IO1(wait__done, buf_t, bp); - } else + } else { lck_mtx_unlock(buf_mtxp); + } } /* check for interruption of I/O (e.g. via NFS), then errors. */ if (ISSET(bp->b_flags, B_EINTR)) { CLR(bp->b_flags, B_EINTR); - return (EINTR); - } else if (ISSET(bp->b_flags, B_ERROR)) - return (bp->b_error ? bp->b_error : EIO); - else - return (0); + return EINTR; + } else if (ISSET(bp->b_flags, B_ERROR)) { + return bp->b_error ? bp->b_error : EIO; + } else { + return 0; + } } @@ -3982,12 +4153,13 @@ buf_biodone(buf_t bp) struct bufattr *bap; struct timeval real_elapsed; uint64_t real_elapsed_usec = 0; - + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START, - bp, bp->b_datap, bp->b_flags, 0, 0); + bp, bp->b_datap, bp->b_flags, 0, 0); - if (ISSET(bp->b_flags, B_DONE)) + if (ISSET(bp->b_flags, B_DONE)) { panic("biodone already"); + } bap = &bp->b_attr; @@ -3996,7 +4168,7 @@ buf_biodone(buf_t bp) } else { mp = NULL; } - + if (ISSET(bp->b_flags, B_ERROR)) { if (mp && (MNT_ROOTFS & mp->mnt_flag)) { dk_error_description_t desc; @@ -4020,35 +4192,41 @@ buf_biodone(buf_t bp) int code = DKIO_DONE; int io_tier = GET_BUFATTR_IO_TIER(bap); - if (bp->b_flags & B_READ) - code |= DKIO_READ; - if (bp->b_flags & B_ASYNC) - code |= DKIO_ASYNC; + if (bp->b_flags & B_READ) { + code |= DKIO_READ; + } + if (bp->b_flags & B_ASYNC) { + code |= DKIO_ASYNC; + } - if (bp->b_flags & B_META) - code |= DKIO_META; - else if (bp->b_flags & B_PAGEIO) - code |= DKIO_PAGING; + if (bp->b_flags & B_META) { + code |= DKIO_META; + } else if (bp->b_flags & B_PAGEIO) { + code |= DKIO_PAGING; + } - if (io_tier != 0) + if (io_tier != 0) { code |= DKIO_THROTTLE; + } code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK); - if (bp->b_flags & B_PASSIVE) + if (bp->b_flags & B_PASSIVE) { code |= DKIO_PASSIVE; + } - if (bap->ba_flags & BA_NOCACHE) + if (bap->ba_flags & BA_NOCACHE) { code |= DKIO_NOCACHE; + } if (bap->ba_flags & BA_IO_TIER_UPGRADE) { code |= DKIO_TIER_UPGRADE; } KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code), - buf_kernel_addrperm_addr(bp), - (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, - bp->b_error); + buf_kernel_addrperm_addr(bp), + (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, + bp->b_error); } microuptime(&real_elapsed); @@ -4069,29 +4247,31 @@ buf_biodone(buf_t bp) DTRACE_IO1(done, buf_t, bp); - if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) - /* + if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) { + /* * wake up any writer's blocked * on throttle or waiting for I/O * to drain */ vnode_writedone(bp->b_vp); + } - if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ - void (*iodone_func)(struct buf *, void *) = bp->b_iodone; - void *arg = bp->b_transaction; + if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */ + void (*iodone_func)(struct buf *, void *) = bp->b_iodone; + void *arg = bp->b_transaction; int callout = ISSET(bp->b_flags, B_CALL); - if (iodone_func == NULL) - panic("biodone: bp @ %p has NULL b_iodone!\n", bp); + if (iodone_func == NULL) { + panic("biodone: bp @ %p has NULL b_iodone!\n", bp); + } - CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ + CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */ bp->b_iodone = NULL; bp->b_transaction = NULL; - if (callout) - SET(bp->b_flags, B_DONE); /* note that it's done */ - + if (callout) { + SET(bp->b_flags, B_DONE); /* note that it's done */ + } (*iodone_func)(bp, arg); if (callout) { @@ -4109,12 +4289,12 @@ buf_biodone(buf_t bp) * by the HFS journaling code */ } - if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */ - SET(bp->b_flags, B_DONE); /* note that it's done */ + if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */ + SET(bp->b_flags, B_DONE); /* note that it's done */ buf_brelse(bp); - } else { /* or just wakeup the buffer */ - /* + } else { /* or just wakeup the buffer */ + /* * by taking the mutex, we serialize * the buf owner calling buf_biowait so that we'll * only see him in one of 2 states... @@ -4128,18 +4308,18 @@ buf_biodone(buf_t bp) * they do get to run, their going to re-set * BL_WANTED and go back to sleep */ - lck_mtx_lock_spin(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); CLR(bp->b_lflags, BL_WANTED); - SET(bp->b_flags, B_DONE); /* note that it's done */ + SET(bp->b_flags, B_DONE); /* note that it's done */ - lck_mtx_unlock(buf_mtxp); + lck_mtx_unlock(buf_mtxp); wakeup(bp); } biodone_done: KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END, - (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0); + (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0); } /* @@ -4148,10 +4328,11 @@ biodone_done: vm_offset_t buf_kernel_addrperm_addr(void * addr) { - if ((vm_offset_t)addr == 0) + if ((vm_offset_t)addr == 0) { return 0; - else - return ((vm_offset_t)addr + buf_kernel_addrperm); + } else { + return (vm_offset_t)addr + buf_kernel_addrperm; + } } /* @@ -4160,17 +4341,18 @@ buf_kernel_addrperm_addr(void * addr) int count_lock_queue(void) { - buf_t bp; - int n = 0; + buf_t bp; + int n = 0; lck_mtx_lock_spin(buf_mtxp); for (bp = bufqueues[BQ_LOCKED].tqh_first; bp; - bp = bp->b_freelist.tqe_next) + bp = bp->b_freelist.tqe_next) { n++; + } lck_mtx_unlock(buf_mtxp); - return (n); + return n; } /* @@ -4195,33 +4377,36 @@ vfs_bufstats() int i, j, count; struct buf *bp; struct bqueues *dp; - int counts[MAXBSIZE/CLBYTES+1]; + int counts[MAXBSIZE / CLBYTES + 1]; static char *bname[BQUEUES] = - { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; + { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" }; for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { count = 0; - for (j = 0; j <= MAXBSIZE/CLBYTES; j++) + for (j = 0; j <= MAXBSIZE / CLBYTES; j++) { counts[j] = 0; + } lck_mtx_lock(buf_mtxp); for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) { - counts[bp->b_bufsize/CLBYTES]++; + counts[bp->b_bufsize / CLBYTES]++; count++; } lck_mtx_unlock(buf_mtxp); printf("%s: total-%d", bname[i], count); - for (j = 0; j <= MAXBSIZE/CLBYTES; j++) - if (counts[j] != 0) + for (j = 0; j <= MAXBSIZE / CLBYTES; j++) { + if (counts[j] != 0) { printf(", %d-%d", j * CLBYTES, counts[j]); + } + } printf("\n"); } } #endif /* DIAGNOSTIC */ -#define NRESERVEDIOBUFS 128 +#define NRESERVEDIOBUFS 128 #define MNT_VIRTUALDEV_MAX_IOBUFS 16 #define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100) @@ -4229,7 +4414,7 @@ vfs_bufstats() buf_t alloc_io_buf(vnode_t vp, int priv) { - buf_t bp; + buf_t bp; mount_t mp = NULL; int alloc_for_virtualdev = FALSE; @@ -4253,24 +4438,25 @@ alloc_io_buf(vnode_t vp, int priv) need_iobuffer = 1; (void)msleep(&need_iobuffer, iobuffer_mtxp, - PSPIN | (PRIBIO+1), (const char *)"alloc_io_buf (1)", + PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)", NULL); } } - while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || - (bp = iobufqueue.tqh_first) == NULL) { + while (((niobuf_headers - NRESERVEDIOBUFS < bufstats.bufs_iobufinuse) && !priv) || + (bp = iobufqueue.tqh_first) == NULL) { bufstats.bufs_iobufsleeps++; need_iobuffer = 1; - (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO+1), + (void)msleep(&need_iobuffer, iobuffer_mtxp, PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (2)", NULL); } TAILQ_REMOVE(&iobufqueue, bp, b_freelist); bufstats.bufs_iobufinuse++; - if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) + if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) { bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse; + } if (alloc_for_virtualdev) { mp->mnt_iobufinuse++; @@ -4282,17 +4468,18 @@ alloc_io_buf(vnode_t vp, int priv) /* * initialize various fields * we don't need to hold the mutex since the buffer - * is now private... the vp should have a reference + * is now private... the vp should have a reference * on it and is not protected by this mutex in any event */ - bp->b_timestamp = 0; + bp->b_timestamp = 0; bp->b_proc = NULL; bp->b_datap = 0; bp->b_flags = 0; bp->b_lflags = BL_BUSY | BL_IOBUF; - if (alloc_for_virtualdev) + if (alloc_for_virtualdev) { bp->b_lflags |= BL_IOBUF_VDEV; + } bp->b_redundancy_flags = 0; bp->b_blkno = bp->b_lblkno = 0; #ifdef JOE_DEBUG @@ -4309,12 +4496,13 @@ alloc_io_buf(vnode_t vp, int priv) bp->b_vp = vp; bzero(&bp->b_attr, sizeof(struct bufattr)); - if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) + if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) { bp->b_dev = vp->v_rdev; - else + } else { bp->b_dev = NODEV; + } - return (bp); + return bp; } @@ -4328,8 +4516,9 @@ free_io_buf(buf_t bp) /* Was this iobuf for a diskimage ? */ if (bp->b_lflags & BL_IOBUF_VDEV) { free_for_virtualdev = TRUE; - if (bp->b_vp) + if (bp->b_vp) { mp = bp->b_vp->v_mount; + } } /* @@ -4339,14 +4528,14 @@ free_io_buf(buf_t bp) bp->b_flags = B_INVAL; /* Zero out the bufattr and its flags before relinquishing this iobuf */ - bzero (&bp->b_attr, sizeof(struct bufattr)); - + bzero(&bp->b_attr, sizeof(struct bufattr)); + lck_mtx_lock_spin(iobuffer_mtxp); binsheadfree(bp, &iobufqueue, -1); if (need_iobuffer) { - /* + /* * Wake up any processes waiting because they need an io buffer * * do the wakeup after we drop the mutex... it's possible that the @@ -4358,34 +4547,37 @@ free_io_buf(buf_t bp) need_iobuffer = 0; need_wakeup = 1; } - if (bufstats.bufs_iobufinuse <= 0) + if (bufstats.bufs_iobufinuse <= 0) { panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp); + } bufstats.bufs_iobufinuse--; if (free_for_virtualdev) { bufstats.bufs_iobufinuse_vdev--; - if (mp && mp != dead_mountp) + if (mp && mp != dead_mountp) { mp->mnt_iobufinuse--; + } } lck_mtx_unlock(iobuffer_mtxp); - if (need_wakeup) - wakeup(&need_iobuffer); + if (need_wakeup) { + wakeup(&need_iobuffer); + } } void buf_list_lock(void) { - lck_mtx_lock_spin(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); } void buf_list_unlock(void) { - lck_mtx_unlock(buf_mtxp); + lck_mtx_unlock(buf_mtxp); } /* @@ -4398,7 +4590,7 @@ buf_list_unlock(void) static void bcleanbuf_thread_init(void) { - thread_t thread = THREAD_NULL; + thread_t thread = THREAD_NULL; /* create worker thread */ kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread); @@ -4416,12 +4608,12 @@ bcleanbuf_thread(void) int loopcnt = 0; for (;;) { - lck_mtx_lock_spin(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); - while ( (bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) { - (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO|PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread); + while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) { + (void)msleep0(&bufqueues[BQ_LAUNDRY], buf_mtxp, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread); } - + /* * Remove from the queue */ @@ -4445,10 +4637,10 @@ bcleanbuf_thread(void) error = bawrite_internal(bp, 0); if (error) { - bp->b_whichq = BQ_LAUNDRY; + bp->b_whichq = BQ_LAUNDRY; bp->b_timestamp = buf_timestamp(); - lck_mtx_lock_spin(buf_mtxp); + lck_mtx_lock_spin(buf_mtxp); binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY); blaundrycnt++; @@ -4462,7 +4654,7 @@ bcleanbuf_thread(void) #endif lck_mtx_unlock(buf_mtxp); - + if (loopcnt > MAXLAUNDRY) { /* * bawrite_internal() can return errors if we're throttled. If we've @@ -4484,19 +4676,20 @@ bcleanbuf_thread(void) static int brecover_data(buf_t bp) { - int upl_offset; - upl_t upl; + int upl_offset; + upl_t upl; upl_page_info_t *pl; kern_return_t kret; - vnode_t vp = bp->b_vp; + vnode_t vp = bp->b_vp; int upl_flags; - if ( !UBCINFOEXISTS(vp) || bp->b_bufsize == 0) - goto dump_buffer; + if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) { + goto dump_buffer; + } upl_flags = UPL_PRECIOUS; - if (! (buf_flags(bp) & B_READ)) { + if (!(buf_flags(bp) & B_READ)) { /* * "write" operation: let the UPL subsystem know * that we intend to modify the buffer cache pages we're @@ -4504,38 +4697,39 @@ brecover_data(buf_t bp) */ upl_flags |= UPL_WILL_MODIFY; } - + kret = ubc_create_upl_kernel(vp, - ubc_blktooff(vp, bp->b_lblkno), - bp->b_bufsize, - &upl, - &pl, - upl_flags, - VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - panic("Failed to create UPL"); + ubc_blktooff(vp, bp->b_lblkno), + bp->b_bufsize, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + panic("Failed to create UPL"); + } for (upl_offset = 0; upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) { - - if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { - ubc_upl_abort(upl, 0); + if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) { + ubc_upl_abort(upl, 0); goto dump_buffer; } } bp->b_upl = upl; - + kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap)); - if (kret != KERN_SUCCESS) - panic("getblk: ubc_upl_map() failed with (%d)", kret); - return (1); + if (kret != KERN_SUCCESS) { + panic("getblk: ubc_upl_map() failed with (%d)", kret); + } + return 1; dump_buffer: bp->b_bufsize = 0; SET(bp->b_flags, B_INVAL); buf_brelse(bp); - return(0); + return 0; } int @@ -4574,7 +4768,7 @@ static void fs_buffer_cache_gc_dispatch_callouts(int all) { lck_mtx_lock(buf_gc_callout); - for(int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { + for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) { if (fs_callouts[i].callout != NULL) { fs_callouts[i].callout(all, fs_callouts[i].context); } @@ -4582,7 +4776,7 @@ fs_buffer_cache_gc_dispatch_callouts(int all) lck_mtx_unlock(buf_gc_callout); } -static boolean_t +static boolean_t buffer_cache_gc(int all) { buf_t bp; @@ -4593,9 +4787,10 @@ buffer_cache_gc(int all) struct bqueues privq; int thresh_hold = BUF_STALE_THRESHHOLD; - if (all) + if (all) { thresh_hold = 0; - /* + } + /* * We only care about metadata (incore storage comes from zalloc()). * Unless "all" is set (used to evict meta data buffers in preparation * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers @@ -4611,11 +4806,10 @@ buffer_cache_gc(int all) TAILQ_INIT(&privq); need_wakeup = FALSE; - while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) && - (now > bp->b_timestamp) && - (now - bp->b_timestamp > thresh_hold) && - (found < BUF_MAX_GC_BATCH_SIZE)) { - + while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) && + (now > bp->b_timestamp) && + (now - bp->b_timestamp > thresh_hold) && + (found < BUF_MAX_GC_BATCH_SIZE)) { /* Remove from free list */ bremfree_locked(bp); found++; @@ -4635,14 +4829,14 @@ buffer_cache_gc(int all) continue; } - /* - * Mark busy and put on private list. We could technically get + /* + * Mark busy and put on private list. We could technically get * away without setting BL_BUSY here. */ SET(bp->b_lflags, BL_BUSY); buf_busycount++; - /* + /* * Remove from hash and dissociate from vp. */ bremhash(bp); @@ -4671,7 +4865,7 @@ buffer_cache_gc(int all) /* Take note if we've definitely freed at least a page to a zone */ if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) { did_large_zfree = TRUE; - } + } trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno); @@ -4682,8 +4876,8 @@ buffer_cache_gc(int all) buf_release_credentials(bp); /* Prepare for moving to empty queue */ - CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED - | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); + CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED + | B_AGE | B_ASYNC | B_NOCACHE | B_FUA)); bp->b_whichq = BQ_EMPTY; BLISTNONE(bp); } @@ -4706,7 +4900,6 @@ buffer_cache_gc(int all) /* And do a big bulk move to the empty queue */ TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist); - } while (all && (found == BUF_MAX_GC_BATCH_SIZE)); lck_mtx_unlock(buf_mtxp); @@ -4728,79 +4921,78 @@ buffer_cache_gc(int all) static int bp_cmp(void *a, void *b) { - buf_t *bp_a = *(buf_t **)a, - *bp_b = *(buf_t **)b; - daddr64_t res; + buf_t *bp_a = *(buf_t **)a, + *bp_b = *(buf_t **)b; + daddr64_t res; - // don't have to worry about negative block - // numbers so this is ok to do. - // - res = (bp_a->b_blkno - bp_b->b_blkno); + // don't have to worry about negative block + // numbers so this is ok to do. + // + res = (bp_a->b_blkno - bp_b->b_blkno); - return (int)res; + return (int)res; } int bflushq(int whichq, mount_t mp) { - buf_t bp, next; - int i, buf_count; - int total_writes = 0; + buf_t bp, next; + int i, buf_count; + int total_writes = 0; static buf_t flush_table[NFLUSH]; if (whichq < 0 || whichq >= BQUEUES) { - return (0); + return 0; } - restart: +restart: lck_mtx_lock(buf_mtxp); bp = TAILQ_FIRST(&bufqueues[whichq]); for (buf_count = 0; bp; bp = next) { - next = bp->b_freelist.tqe_next; - - if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) { - continue; - } + next = bp->b_freelist.tqe_next; - if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) { + if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) { + continue; + } - bremfree_locked(bp); + if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) { + bremfree_locked(bp); #ifdef JOE_DEBUG - bp->b_owner = current_thread(); - bp->b_tag = 7; + bp->b_owner = current_thread(); + bp->b_tag = 7; #endif - SET(bp->b_lflags, BL_BUSY); - buf_busycount++; + SET(bp->b_lflags, BL_BUSY); + buf_busycount++; - flush_table[buf_count] = bp; - buf_count++; - total_writes++; + flush_table[buf_count] = bp; + buf_count++; + total_writes++; - if (buf_count >= NFLUSH) { - lck_mtx_unlock(buf_mtxp); + if (buf_count >= NFLUSH) { + lck_mtx_unlock(buf_mtxp); - qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); + qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); - for (i = 0; i < buf_count; i++) { - buf_bawrite(flush_table[i]); - } - goto restart; + for (i = 0; i < buf_count; i++) { + buf_bawrite(flush_table[i]); + } + goto restart; + } } - } } lck_mtx_unlock(buf_mtxp); if (buf_count > 0) { - qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); + qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp); - for (i = 0; i < buf_count; i++) { - buf_bawrite(flush_table[i]); - } + for (i = 0; i < buf_count; i++) { + buf_bawrite(flush_table[i]); + } } - return (total_writes); + return total_writes; } #endif diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index 56c69754c..9f3aaa548 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -109,35 +109,35 @@ * Structures associated with name cacheing. */ -LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ -u_long nchashmask; -u_long nchash; /* size of hash table - 1 */ -long numcache; /* number of cache entries allocated */ -int desiredNodes; -int desiredNegNodes; -int ncs_negtotal; -int nc_disabled = 0; -TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ -TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ +LIST_HEAD(nchashhead, namecache) * nchashtbl; /* Hash Table */ +u_long nchashmask; +u_long nchash; /* size of hash table - 1 */ +long numcache; /* number of cache entries allocated */ +int desiredNodes; +int desiredNegNodes; +int ncs_negtotal; +int nc_disabled = 0; +TAILQ_HEAD(, namecache) nchead; /* chain of all name cache entries */ +TAILQ_HEAD(, namecache) neghead; /* chain of only negative cache entries */ #if COLLECT_STATS -struct nchstats nchstats; /* cache effectiveness statistics */ +struct nchstats nchstats; /* cache effectiveness statistics */ -#define NCHSTAT(v) { \ - nchstats.v++; \ +#define NCHSTAT(v) { \ + nchstats.v++; \ } -#define NAME_CACHE_LOCK() name_cache_lock() -#define NAME_CACHE_UNLOCK() name_cache_unlock() -#define NAME_CACHE_LOCK_SHARED() name_cache_lock() +#define NAME_CACHE_LOCK() name_cache_lock() +#define NAME_CACHE_UNLOCK() name_cache_unlock() +#define NAME_CACHE_LOCK_SHARED() name_cache_lock() #else #define NCHSTAT(v) -#define NAME_CACHE_LOCK() name_cache_lock() -#define NAME_CACHE_UNLOCK() name_cache_unlock() -#define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared() +#define NAME_CACHE_LOCK() name_cache_lock() +#define NAME_CACHE_UNLOCK() name_cache_unlock() +#define NAME_CACHE_LOCK_SHARED() name_cache_lock_shared() #endif @@ -170,7 +170,7 @@ static void cache_enter_locked(vnode_t dvp, vnode_t vp, struct componentname *cn * Internal dump function used for debugging */ void dump_string_table(void); -#endif /* DUMP_STRING_TABLE */ +#endif /* DUMP_STRING_TABLE */ static void init_crc32(void); static unsigned int crc32tab[256]; @@ -202,15 +202,15 @@ static unsigned int crc32tab[256]; * * defer = cache_check_vnode_issubdir(vp, dvp, is_subdir, next_vp); * if (!defer) { - * if (*is_subdir) - * vp is subdirectory; - * else - * vp is not a subdirectory; + * if (*is_subdir) + * vp is subdirectory; + * else + * vp is not a subdirectory; * } else { - * if (*next_vp) - * check this vnode's parent from the filesystem - * else - * error (likely because of forced unmount). + * if (*next_vp) + * check this vnode's parent from the filesystem + * else + * error (likely because of forced unmount). * } * */ @@ -265,7 +265,7 @@ cache_check_vnode_issubdir(vnode_t vp, vnode_t dvp, boolean_t *is_subdir, tvp = tvp->v_parent; } - return (defer); + return defer; } /* maximum times retry from potentially transient errors in vnode_issubdir */ @@ -290,7 +290,7 @@ vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) int error = 0; char dotdotbuf[] = ".."; int error_retry_count = 0; /* retry count for potentially transient - errors */ + * errors */ *is_subdir = FALSE; tvp = start_vp = vp; @@ -320,8 +320,9 @@ vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) defer = cache_check_vnode_issubdir(tvp, dvp, &is_subdir_locked, &tvp); - if (defer && tvp) + if (defer && tvp) { vid = vnode_vid(tvp); + } NAME_CACHE_UNLOCK(); @@ -368,24 +369,27 @@ vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) cn.cn_namelen = 2; pvp = NULLVP; - if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) + if ((error = VNOP_LOOKUP(tvp, &pvp, &cn, ctx))) { break; + } if (!(tvp->v_flag & VISHARDLINK) && tvp->v_parent != pvp) { (void)vnode_update_identity(tvp, pvp, NULL, 0, 0, VNODE_UPDATE_PARENT); } - if (vp_with_iocount) + if (vp_with_iocount) { vnode_put(vp_with_iocount); + } vp_with_iocount = tvp = pvp; } - if (vp_with_iocount) + if (vp_with_iocount) { vnode_put(vp_with_iocount); + } - return (error); + return error; } /* @@ -395,20 +399,20 @@ vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) * byte and thus the length is one greater than what strlen would * return. This is important and lots of code elsewhere in the kernel * assumes this behavior. - * - * This function can call vnop in file system if the parent vnode - * does not exist or when called for hardlinks via volfs path. + * + * This function can call vnop in file system if the parent vnode + * does not exist or when called for hardlinks via volfs path. * If BUILDPATH_NO_FS_ENTER is set in flags, it only uses values present * in the name cache and does not enter the file system. * - * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when - * we encounter ENOENT during path reconstruction. ENOENT means that - * one of the parents moved while we were building the path. The + * If BUILDPATH_CHECK_MOVED is set in flags, we return EAGAIN when + * we encounter ENOENT during path reconstruction. ENOENT means that + * one of the parents moved while we were building the path. The * caller can special handle this case by calling build_path again. * - * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path - * that is relative to the nearest mount point, i.e. do not - * cross over mount points during building the path. + * If BUILDPATH_VOLUME_RELATIVE is set in flags, we return path + * that is relative to the nearest mount point, i.e. do not + * cross over mount points during building the path. * * passed in vp must have a valid io_count reference * @@ -425,34 +429,37 @@ vnode_issubdir(vnode_t vp, vnode_t dvp, int *is_subdir, vfs_context_t ctx) int build_path_with_parent(vnode_t first_vp, vnode_t parent_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx) { - vnode_t vp, tvp; + vnode_t vp, tvp; vnode_t vp_with_iocount; - vnode_t proc_root_dir_vp; + vnode_t proc_root_dir_vp; char *end; const char *str; int len; int ret = 0; int fixhardlink; - if (first_vp == NULLVP) - return (EINVAL); - - if (buflen <= 1) - return (ENOSPC); + if (first_vp == NULLVP) { + return EINVAL; + } + + if (buflen <= 1) { + return ENOSPC; + } /* * Grab the process fd so we can evaluate fd_rdir. */ - if (vfs_context_proc(ctx)->p_fd) + if (vfs_context_proc(ctx)->p_fd) { proc_root_dir_vp = vfs_context_proc(ctx)->p_fd->fd_rdir; - else + } else { proc_root_dir_vp = NULL; + } vp_with_iocount = NULLVP; again: vp = first_vp; - end = &buff[buflen-1]; + end = &buff[buflen - 1]; *end = '\0'; /* @@ -467,7 +474,7 @@ again: * after we drop the NAME_CACHE_LOCK via vnode_getwithvid... * deadlocks may result if you call vnode_get while holding * the NAME_CACHE_LOCK... we lazily release the reference - * we pick up the next time we encounter a need to drop + * we pick up the next time we encounter a need to drop * the NAME_CACHE_LOCK or before we return from this routine */ NAME_CACHE_LOCK_SHARED(); @@ -480,19 +487,19 @@ again: ret = EINVAL; goto out_unlock; } - if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) { + if ((vp->v_mount->mnt_flag & MNT_ROOTFS) || (vp == proc_root_dir_vp)) { /* * It's the root of the root file system, so it's * just "/". */ - *--end = '/'; + *--end = '/'; goto out_unlock; } else { - /* - * This the root of the volume and the caller does not - * want to cross mount points. Therefore just return - * '/' as the relative path. + /* + * This the root of the volume and the caller does not + * want to cross mount points. Therefore just return + * '/' as the relative path. */ if (flags & BUILDPATH_VOLUME_RELATIVE) { *--end = '/'; @@ -512,17 +519,18 @@ again: * name and parent (below). */ fixhardlink = (vp->v_flag & VISHARDLINK) && - (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) && - !(flags & BUILDPATH_NO_FS_ENTER); + (vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID) && + !(flags & BUILDPATH_NO_FS_ENTER); if (!fixhardlink) { str = vp->v_name; if (str == NULL || *str == '\0') { - if (vp->v_parent != NULL) + if (vp->v_parent != NULL) { ret = EINVAL; - else + } else { ret = ENOENT; + } goto out_unlock; } len = strlen(str); @@ -537,9 +545,10 @@ again: * Copy the name backwards. */ str += len; - - for (; len > 0; len--) - *--end = *--str; + + for (; len > 0; len--) { + *--end = *--str; + } /* * Add a path separator. */ @@ -550,8 +559,7 @@ again: * Walk up the parent chain. */ if (((vp->v_parent != NULLVP) && !fixhardlink) || - (flags & BUILDPATH_NO_FS_ENTER)) { - + (flags & BUILDPATH_NO_FS_ENTER)) { /* * In this if () block we are not allowed to enter the filesystem * to conclusively get the most accurate parent identifier. @@ -603,8 +611,9 @@ again: vnode_put(vp_with_iocount); vp_with_iocount = NULLVP; } - if (vnode_getwithvid(vp, vid)) + if (vnode_getwithvid(vp, vid)) { goto again; + } vp_with_iocount = vp; } VATTR_INIT(&va); @@ -661,14 +670,17 @@ bad_news: /* * Ask the file system for the parent vnode. */ - if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) + if ((ret = VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx))) { goto out; + } - if (!fixhardlink && (vp->v_parent != dvp)) + if (!fixhardlink && (vp->v_parent != dvp)) { vnode_update_identity(vp, dvp, NULL, 0, 0, VNODE_UPDATE_PARENT); + } - if (vp_with_iocount) + if (vp_with_iocount) { vnode_put(vp_with_iocount); + } vp = dvp; vp_with_iocount = vp; @@ -680,8 +692,9 @@ bad_news: * so skip up to avoid getting a duplicate copy of the * file name in the path. */ - if (vp && !vnode_isdir(vp) && vp->v_parent) + if (vp && !vnode_isdir(vp) && vp->v_parent) { vp = vp->v_parent; + } } if (vp && (flags & BUILDPATH_CHECKACCESS)) { @@ -694,13 +707,14 @@ bad_news: vnode_put(vp_with_iocount); vp_with_iocount = NULLVP; } - if (vnode_getwithvid(vp, vid)) + if (vnode_getwithvid(vp, vid)) { goto again; + } vp_with_iocount = vp; } - if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) - goto out; /* no peeking */ - + if ((ret = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx))) { + goto out; /* no peeking */ + } NAME_CACHE_LOCK_SHARED(); } @@ -713,12 +727,12 @@ bad_news: tvp = vp; while (tvp) { - if (tvp == proc_root_dir_vp) - goto out_unlock; /* encountered the root */ - - if (!(tvp->v_flag & VROOT) || !tvp->v_mount) - break; /* not the root of a mounted FS */ - + if (tvp == proc_root_dir_vp) { + goto out_unlock; /* encountered the root */ + } + if (!(tvp->v_flag & VROOT) || !tvp->v_mount) { + break; /* not the root of a mounted FS */ + } if (flags & BUILDPATH_VOLUME_RELATIVE) { /* Do not cross over mount points */ tvp = NULL; @@ -726,15 +740,17 @@ bad_news: tvp = tvp->v_mount->mnt_vnodecovered; } } - if (tvp == NULLVP) + if (tvp == NULLVP) { goto out_unlock; + } vp = tvp; } out_unlock: NAME_CACHE_UNLOCK(); out: - if (vp_with_iocount) + if (vp_with_iocount) { vnode_put(vp_with_iocount); + } /* * Slide the name down to the beginning of the buffer. */ @@ -744,22 +760,22 @@ out: * length includes the trailing zero byte */ *outlen = &buff[buflen] - end; - - /* One of the parents was moved during path reconstruction. - * The caller is interested in knowing whether any of the + + /* One of the parents was moved during path reconstruction. + * The caller is interested in knowing whether any of the * parents moved via BUILDPATH_CHECK_MOVED, so return EAGAIN. */ if ((ret == ENOENT) && (flags & BUILDPATH_CHECK_MOVED)) { ret = EAGAIN; } - return (ret); + return ret; } int build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs_context_t ctx) { - return (build_path_with_parent(first_vp, NULL, buff, buflen, outlen, flags, ctx)); + return build_path_with_parent(first_vp, NULL, buff, buflen, outlen, flags, ctx); } /* @@ -770,8 +786,8 @@ build_path(vnode_t first_vp, char *buff, int buflen, int *outlen, int flags, vfs vnode_t vnode_getparent(vnode_t vp) { - vnode_t pvp = NULLVP; - int pvid; + vnode_t pvp = NULLVP; + int pvid; NAME_CACHE_LOCK_SHARED(); /* @@ -781,30 +797,33 @@ vnode_getparent(vnode_t vp) * parent of 'vp' at the time we took the name_cache lock... * once we drop the lock, vp could get re-parented */ - if ( (pvp = vp->v_parent) != NULLVP ) { - pvid = pvp->v_id; + if ((pvp = vp->v_parent) != NULLVP) { + pvid = pvp->v_id; NAME_CACHE_UNLOCK(); - if (vnode_getwithvid(pvp, pvid) != 0) - pvp = NULL; - } else - NAME_CACHE_UNLOCK(); - return (pvp); + if (vnode_getwithvid(pvp, pvid) != 0) { + pvp = NULL; + } + } else { + NAME_CACHE_UNLOCK(); + } + return pvp; } const char * vnode_getname(vnode_t vp) { - const char *name = NULL; - + const char *name = NULL; + NAME_CACHE_LOCK_SHARED(); - - if (vp->v_name) - name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0); + + if (vp->v_name) { + name = vfs_addname(vp->v_name, strlen(vp->v_name), 0, 0); + } NAME_CACHE_UNLOCK(); - return (name); + return name; } void @@ -819,45 +838,47 @@ const char * vnode_getname_printable(vnode_t vp) { const char *name = vnode_getname(vp); - if (name != NULL) + if (name != NULL) { return name; - + } + switch (vp->v_type) { - case VCHR: - case VBLK: - { - /* - * Create an artificial dev name from - * major and minor device number - */ - char dev_name[64]; - (void) snprintf(dev_name, sizeof(dev_name), - "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b', - major(vp->v_rdev), minor(vp->v_rdev)); - /* - * Add the newly created dev name to the name - * cache to allow easier cleanup. Also, - * vfs_addname allocates memory for the new name - * and returns it. - */ - NAME_CACHE_LOCK_SHARED(); - name = vfs_addname(dev_name, strlen(dev_name), 0, 0); - NAME_CACHE_UNLOCK(); - return name; - } - default: - return unknown_vnodename; + case VCHR: + case VBLK: + { + /* + * Create an artificial dev name from + * major and minor device number + */ + char dev_name[64]; + (void) snprintf(dev_name, sizeof(dev_name), + "%c(%u, %u)", VCHR == vp->v_type ? 'c':'b', + major(vp->v_rdev), minor(vp->v_rdev)); + /* + * Add the newly created dev name to the name + * cache to allow easier cleanup. Also, + * vfs_addname allocates memory for the new name + * and returns it. + */ + NAME_CACHE_LOCK_SHARED(); + name = vfs_addname(dev_name, strlen(dev_name), 0, 0); + NAME_CACHE_UNLOCK(); + return name; + } + default: + return unknown_vnodename; } } -void +void vnode_putname_printable(const char *name) { - if (name == unknown_vnodename) + if (name == unknown_vnodename) { return; + } vnode_putname(name); } - + /* * if VNODE_UPDATE_PARENT, and we can take @@ -875,8 +896,8 @@ vnode_putname_printable(const char *name) void vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, uint32_t name_hashval, int flags) { - struct namecache *ncp; - vnode_t old_parentvp = NULLVP; + struct namecache *ncp; + vnode_t old_parentvp = NULLVP; int isstream = (vp->v_flag & VISNAMEDSTREAM); int kusecountbumped = 0; kauth_cred_t tcred = NULL; @@ -884,7 +905,7 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u const char *tname = NULL; if (flags & VNODE_UPDATE_PARENT) { - if (dvp && vnode_ref(dvp) != 0) { + if (dvp && vnode_ref(dvp) != 0) { dvp = NULLVP; } /* Don't count a stream's parent ref during unmounts */ @@ -895,32 +916,35 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u vnode_unlock(dvp); } } else { - dvp = NULLVP; + dvp = NULLVP; } - if ( (flags & VNODE_UPDATE_NAME) ) { + if ((flags & VNODE_UPDATE_NAME)) { if (name != vp->v_name) { if (name && *name) { - if (name_len == 0) + if (name_len == 0) { name_len = strlen(name); - tname = vfs_addname(name, name_len, name_hashval, 0); + } + tname = vfs_addname(name, name_len, name_hashval, 0); } - } else + } else { flags &= ~VNODE_UPDATE_NAME; + } } - if ( (flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME)) ) { - + if ((flags & (VNODE_UPDATE_PURGE | VNODE_UPDATE_PARENT | VNODE_UPDATE_CACHE | VNODE_UPDATE_NAME))) { NAME_CACHE_LOCK(); - if ( (flags & VNODE_UPDATE_PURGE) ) { - - if (vp->v_parent) + if ((flags & VNODE_UPDATE_PURGE)) { + if (vp->v_parent) { vp->v_parent->v_nc_generation++; + } - while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) + while ((ncp = LIST_FIRST(&vp->v_nclinks))) { cache_delete(ncp, 1); + } - while ( (ncp = TAILQ_FIRST(&vp->v_ncchildren)) ) + while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) { cache_delete(ncp, 1); + } /* * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held @@ -930,7 +954,7 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u vp->v_authorized_actions = 0; vp->v_cred_timestamp = 0; } - if ( (flags & VNODE_UPDATE_NAME) ) { + if ((flags & VNODE_UPDATE_NAME)) { vname = vp->v_name; vp->v_name = tname; } @@ -940,42 +964,48 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u vp->v_parent = dvp; dvp = NULLVP; - if (old_parentvp) + if (old_parentvp) { flags |= VNODE_UPDATE_CACHE; + } } } if (flags & VNODE_UPDATE_CACHE) { - while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) + while ((ncp = LIST_FIRST(&vp->v_nclinks))) { cache_delete(ncp, 1); + } } NAME_CACHE_UNLOCK(); - - if (vname != NULL) + + if (vname != NULL) { vfs_removename(vname); + } - if (IS_VALID_CRED(tcred)) + if (IS_VALID_CRED(tcred)) { kauth_cred_unref(&tcred); + } } if (dvp != NULLVP) { /* Back-out the ref we took if we lost a race for vp->v_parent. */ if (kusecountbumped) { vnode_lock_spin(dvp); - if (dvp->v_kusecount > 0) - --dvp->v_kusecount; + if (dvp->v_kusecount > 0) { + --dvp->v_kusecount; + } vnode_unlock(dvp); } - vnode_rele(dvp); + vnode_rele(dvp); } if (old_parentvp) { - struct uthread *ut; + struct uthread *ut; if (isstream) { - vnode_lock_spin(old_parentvp); - if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) + vnode_lock_spin(old_parentvp); + if ((old_parentvp->v_type != VDIR) && (old_parentvp->v_kusecount > 0)) { --old_parentvp->v_kusecount; + } vnode_unlock(old_parentvp); } - ut = get_bsdthread_info(current_thread()); + ut = get_bsdthread_info(current_thread()); /* * indicated to vnode_rele that it shouldn't do a @@ -987,9 +1017,8 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u ut->uu_defer_reclaims = 1; ut->uu_vreclaims = NULLVP; - while ( (vp = old_parentvp) != NULLVP ) { - - vnode_lock_spin(vp); + while ((vp = old_parentvp) != NULLVP) { + vnode_lock_spin(vp); vnode_rele_internal(vp, 0, 0, 1); /* @@ -999,7 +1028,7 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u * out the v_parent field... we'll drop the reference * that was held on the next iteration of this loop... * this short circuits a potential deep recursion if we - * have a long chain of parents in this state... + * have a long chain of parents in this state... * we'll sit in this loop until we run into * a parent in this chain that is not in this state * @@ -1010,9 +1039,9 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u * this vnode on the list to be reaped by us, than * it has left this vnode with an iocount == 1 */ - if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) && - ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { - /* + if ((vp->v_iocount == 1) && (vp->v_usecount == 0) && + ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { + /* * vnode_rele wanted to do a vnode_reclaim on this vnode * it should be sitting on the head of the uu_vreclaims chain * pull the parent pointer now so that when we do the @@ -1020,29 +1049,29 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u * list, we won't recurse back through here * * need to do a convert here in case vnode_rele_internal - * returns with the lock held in the spin mode... it + * returns with the lock held in the spin mode... it * can drop and retake the lock under certain circumstances */ - vnode_lock_convert(vp); + vnode_lock_convert(vp); - NAME_CACHE_LOCK(); + NAME_CACHE_LOCK(); old_parentvp = vp->v_parent; vp->v_parent = NULLVP; NAME_CACHE_UNLOCK(); } else { - /* + /* * we're done... we ran into a vnode that isn't * being terminated */ - old_parentvp = NULLVP; + old_parentvp = NULLVP; } vnode_unlock(vp); } ut->uu_defer_reclaims = 0; - while ( (vp = ut->uu_vreclaims) != NULLVP) { - ut->uu_vreclaims = vp->v_defer_reclaimlist; - + while ((vp = ut->uu_vreclaims) != NULLVP) { + ut->uu_vreclaims = vp->v_defer_reclaimlist; + /* * vnode_put will drive the vnode_reclaim if * we are still the only reference on this vnode @@ -1062,7 +1091,8 @@ vnode_update_identity(vnode_t vp, vnode_t dvp, const char *name, int name_len, u * so that HFS can post-process the lookup. Also, volfs will call * VNOP_GETATTR2 to determine the parent, instead of using v_parent. */ -void vnode_setmultipath(vnode_t vp) +void +vnode_setmultipath(vnode_t vp) { vnode_lock_spin(vp); @@ -1087,9 +1117,10 @@ void vnode_setmultipath(vnode_t vp) /* * backwards compatibility */ -void vnode_uncache_credentials(vnode_t vp) +void +vnode_uncache_credentials(vnode_t vp) { - vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); + vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS); } @@ -1104,9 +1135,10 @@ void vnode_uncache_credentials(vnode_t vp) * to hold it for the minimum amount of time possible */ -void vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action) +void +vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action) { - kauth_cred_t tcred = NOCRED; + kauth_cred_t tcred = NOCRED; NAME_CACHE_LOCK(); @@ -1114,68 +1146,74 @@ void vnode_uncache_authorized_action(vnode_t vp, kauth_action_t action) if (action == KAUTH_INVALIDATE_CACHED_RIGHTS && IS_VALID_CRED(vp->v_cred)) { - /* + /* * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held */ - tcred = vp->v_cred; + tcred = vp->v_cred; vp->v_cred = NOCRED; } NAME_CACHE_UNLOCK(); - if (tcred != NOCRED) + if (tcred != NOCRED) { kauth_cred_unref(&tcred); + } } -extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */ +extern int bootarg_vnode_cache_defeat; /* default = 0, from bsd_init.c */ boolean_t vnode_cache_is_authorized(vnode_t vp, vfs_context_t ctx, kauth_action_t action) { - kauth_cred_t ucred; - boolean_t retval = FALSE; + kauth_cred_t ucred; + boolean_t retval = FALSE; /* Boot argument to defeat rights caching */ - if (bootarg_vnode_cache_defeat) + if (bootarg_vnode_cache_defeat) { return FALSE; + } - if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { - /* + if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { + /* * a TTL is enabled on the rights cache... handle it here * a TTL of 0 indicates that no rights should be cached */ - if (vp->v_mount->mnt_authcache_ttl) { - if ( !(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL) ) { - /* + if (vp->v_mount->mnt_authcache_ttl) { + if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) { + /* * For filesystems marked only MNTK_AUTH_OPAQUE (generally network ones), * we will only allow a SEARCH right on a directory to be cached... * that cached right always has a default TTL associated with it */ - if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) - vp = NULLVP; + if (action != KAUTH_VNODE_SEARCH || vp->v_type != VDIR) { + vp = NULLVP; + } } if (vp != NULLVP && vnode_cache_is_stale(vp) == TRUE) { - vnode_uncache_authorized_action(vp, vp->v_authorized_actions); + vnode_uncache_authorized_action(vp, vp->v_authorized_actions); vp = NULLVP; } - } else - vp = NULLVP; + } else { + vp = NULLVP; + } } if (vp != NULLVP) { - ucred = vfs_context_ucred(ctx); + ucred = vfs_context_ucred(ctx); NAME_CACHE_LOCK_SHARED(); - if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) - retval = TRUE; - + if (vp->v_cred == ucred && (vp->v_authorized_actions & action) == action) { + retval = TRUE; + } + NAME_CACHE_UNLOCK(); } return retval; } -void vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action) +void +vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t action) { kauth_cred_t tcred = NOCRED; kauth_cred_t ucred; @@ -1184,25 +1222,28 @@ void vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t ucred = vfs_context_ucred(ctx); - if (!IS_VALID_CRED(ucred) || action == 0) - return; + if (!IS_VALID_CRED(ucred) || action == 0) { + return; + } - if ( (vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { - /* + if ((vp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { + /* * a TTL is enabled on the rights cache... handle it here * a TTL of 0 indicates that no rights should be cached */ - if (vp->v_mount->mnt_authcache_ttl == 0) - return; + if (vp->v_mount->mnt_authcache_ttl == 0) { + return; + } - if ( !(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL) ) { - /* + if (!(vp->v_mount->mnt_kern_flag & MNTK_AUTH_CACHE_TTL)) { + /* * only cache SEARCH action for filesystems marked * MNTK_AUTH_OPAQUE on VDIRs... * the lookup_path code will time these out */ - if ( (action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR ) - return; + if ((action & ~KAUTH_VNODE_SEARCH) || vp->v_type != VDIR) { + return; + } } ttl_active = TRUE; @@ -1211,8 +1252,8 @@ void vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t NAME_CACHE_LOCK(); if (vp->v_cred != ucred) { - kauth_cred_ref(ucred); - /* + kauth_cred_ref(ucred); + /* * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held */ tcred = vp->v_cred; @@ -1220,36 +1261,39 @@ void vnode_cache_authorized_action(vnode_t vp, vfs_context_t ctx, kauth_action_t vp->v_authorized_actions = 0; } if (ttl_active == TRUE && vp->v_authorized_actions == 0) { - /* + /* * only reset the timestamnp on the * first authorization cached after the previous * timer has expired or we're switching creds... - * 'vnode_cache_is_authorized' will clear the + * 'vnode_cache_is_authorized' will clear the * authorized actions if the TTL is active and * it has expired */ - vp->v_cred_timestamp = tv.tv_sec; + vp->v_cred_timestamp = tv.tv_sec; } vp->v_authorized_actions |= action; NAME_CACHE_UNLOCK(); - if (IS_VALID_CRED(tcred)) + if (IS_VALID_CRED(tcred)) { kauth_cred_unref(&tcred); + } } -boolean_t vnode_cache_is_stale(vnode_t vp) +boolean_t +vnode_cache_is_stale(vnode_t vp) { - struct timeval tv; - boolean_t retval; + struct timeval tv; + boolean_t retval; microuptime(&tv); - if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) - retval = TRUE; - else - retval = FALSE; + if ((tv.tv_sec - vp->v_cred_timestamp) > vp->v_mount->mnt_authcache_ttl) { + retval = TRUE; + } else { + retval = FALSE; + } return retval; } @@ -1259,27 +1303,27 @@ boolean_t vnode_cache_is_stale(vnode_t vp) /* * Returns: 0 Success * ERECYCLE vnode was recycled from underneath us. Force lookup to be re-driven from namei. - * This errno value should not be seen by anyone outside of the kernel. + * This errno value should not be seen by anyone outside of the kernel. */ -int -cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, - vfs_context_t ctx, int *dp_authorized, vnode_t last_dp) +int +cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, + vfs_context_t ctx, int *dp_authorized, vnode_t last_dp) { - char *cp; /* pointer into pathname argument */ - int vid; - int vvid = 0; /* protected by vp != NULLVP */ - vnode_t vp = NULLVP; - vnode_t tdp = NULLVP; - kauth_cred_t ucred; - boolean_t ttl_enabled = FALSE; - struct timeval tv; - mount_t mp; - unsigned int hash; - int error = 0; - boolean_t dotdotchecked = FALSE; + char *cp; /* pointer into pathname argument */ + int vid; + int vvid = 0; /* protected by vp != NULLVP */ + vnode_t vp = NULLVP; + vnode_t tdp = NULLVP; + kauth_cred_t ucred; + boolean_t ttl_enabled = FALSE; + struct timeval tv; + mount_t mp; + unsigned int hash; + int error = 0; + boolean_t dotdotchecked = FALSE; #if CONFIG_TRIGGERS - vnode_t trigger_vp; + vnode_t trigger_vp; #endif /* CONFIG_TRIGGERS */ ucred = vfs_context_ucred(ctx); @@ -1287,7 +1331,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, NAME_CACHE_LOCK_SHARED(); - if ( dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL)) ) { + if (dp->v_mount && (dp->v_mount->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) { ttl_enabled = TRUE; microuptime(&tv); } @@ -1299,7 +1343,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, * The last component of the filename is left accessible via * cnp->cn_nameptr for callers that need the name. */ - hash = 0; + hash = 0; cp = cnp->cn_nameptr; while (*cp && (*cp != '/')) { @@ -1310,8 +1354,9 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, * a 0... however, 0 for us means that we * haven't computed a hash, so use 1 instead */ - if (hash == 0) - hash = 1; + if (hash == 0) { + hash = 1; + } cnp->cn_hash = hash; cnp->cn_namelen = cp - cnp->cn_nameptr; @@ -1326,11 +1371,11 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, * and non-existing files that won't be directories specially later. */ while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) { - cp++; + cp++; ndp->ni_pathlen--; if (*cp == '\0') { - ndp->ni_flag |= NAMEI_TRAILINGSLASH; + ndp->ni_flag |= NAMEI_TRAILINGSLASH; *ndp->ni_next = '\0'; } } @@ -1338,11 +1383,13 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, cnp->cn_flags &= ~(MAKEENTRY | ISLASTCN | ISDOTDOT); - if (*cp == '\0') - cnp->cn_flags |= ISLASTCN; + if (*cp == '\0') { + cnp->cn_flags |= ISLASTCN; + } - if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') - cnp->cn_flags |= ISDOTDOT; + if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') { + cnp->cn_flags |= ISDOTDOT; + } *dp_authorized = 0; #if NAMEDRSRCFORK @@ -1354,7 +1401,7 @@ cache_lookup_path(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, if ((ndp->ni_pathlen == sizeof(_PATH_RSRCFORKSPEC)) && (cp[1] == '.' && cp[2] == '.') && bcmp(cp, _PATH_RSRCFORKSPEC, sizeof(_PATH_RSRCFORKSPEC)) == 0) { - /* Skip volfs file systems that don't support native streams. */ + /* Skip volfs file systems that don't support native streams. */ if ((dp->v_mount != NULL) && (dp->v_mount->mnt_flag & MNT_DOVOLFS) && (dp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { @@ -1388,7 +1435,7 @@ skiprsrcfork: if (ttl_enabled && (dp->v_mount->mnt_authcache_ttl == 0 || ((tv.tv_sec - dp->v_cred_timestamp) > dp->v_mount->mnt_authcache_ttl))) { - break; + break; } /* @@ -1406,8 +1453,8 @@ skiprsrcfork: */ if ((dp->v_cred != ucred || !(dp->v_authorized_actions & KAUTH_VNODE_SEARCH)) && !(dp->v_authorized_actions & KAUTH_VNODE_SEARCHBYANYONE) && - (ttl_enabled || !vfs_context_issuser(ctx))) { - break; + (ttl_enabled || !vfs_context_issuser(ctx))) { + break; } /* @@ -1418,13 +1465,16 @@ skiprsrcfork: */ *dp_authorized = 1; - if ( (cnp->cn_flags & (ISLASTCN | ISDOTDOT)) ) { - if (cnp->cn_nameiop != LOOKUP) + if ((cnp->cn_flags & (ISLASTCN | ISDOTDOT))) { + if (cnp->cn_nameiop != LOOKUP) { break; - if (cnp->cn_flags & LOCKPARENT) + } + if (cnp->cn_flags & LOCKPARENT) { break; - if (cnp->cn_flags & NOCACHE) + } + if (cnp->cn_flags & NOCACHE) { break; + } if (cnp->cn_flags & ISDOTDOT) { /* * Force directory hardlinks to go to @@ -1439,10 +1489,11 @@ skiprsrcfork: * don't have one. Otherwise, we'll * use it below. */ - if ((dp->v_flag & VROOT) || + if ((dp->v_flag & VROOT) || dp == ndp->ni_rootdir || - dp->v_parent == NULLVP) + dp->v_parent == NULLVP) { break; + } } } @@ -1458,9 +1509,9 @@ skiprsrcfork: * "." and ".." aren't supposed to be cached, so check * for them before checking the cache. */ - if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') + if (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') { vp = dp; - else if ( (cnp->cn_flags & ISDOTDOT) ) { + } else if ((cnp->cn_flags & ISDOTDOT)) { /* * If this is a chrooted process, we need to check if * the process is trying to break out of its chrooted @@ -1507,10 +1558,11 @@ skiprsrcfork: vp = dp->v_parent; } } else { - if ( (vp = cache_lookup_locked(dp, cnp)) == NULLVP) + if ((vp = cache_lookup_locked(dp, cnp)) == NULLVP) { break; + } - if ( (vp->v_flag & VISHARDLINK) ) { + if ((vp->v_flag & VISHARDLINK)) { /* * The file system wants a VNOP_LOOKUP on this vnode */ @@ -1518,31 +1570,35 @@ skiprsrcfork: break; } } - if ( (cnp->cn_flags & ISLASTCN) ) - break; + if ((cnp->cn_flags & ISLASTCN)) { + break; + } if (vp->v_type != VDIR) { - if (vp->v_type != VLNK) - vp = NULL; - break; + if (vp->v_type != VLNK) { + vp = NULL; + } + break; } - if ( (mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) { + if ((mp = vp->v_mountedhere) && ((cnp->cn_flags & NOCROSSMOUNT) == 0)) { vnode_t tmp_vp = mp->mnt_realrootvp; if (tmp_vp == NULLVP || mp->mnt_generation != mount_generation || - mp->mnt_realrootvp_vid != tmp_vp->v_id) + mp->mnt_realrootvp_vid != tmp_vp->v_id) { break; + } vp = tmp_vp; } #if CONFIG_TRIGGERS /* * After traversing all mountpoints stacked here, if we have a - * trigger in hand, resolve it. Note that we don't need to + * trigger in hand, resolve it. Note that we don't need to * leave the fast path if the mount has already happened. */ - if (vp->v_resolve) + if (vp->v_resolve) { break; + } #endif /* CONFIG_TRIGGERS */ @@ -1552,20 +1608,21 @@ skiprsrcfork: cnp->cn_nameptr = ndp->ni_next + 1; ndp->ni_pathlen--; while (*cnp->cn_nameptr == '/') { - cnp->cn_nameptr++; + cnp->cn_nameptr++; ndp->ni_pathlen--; } } - if (vp != NULLVP) - vvid = vp->v_id; + if (vp != NULLVP) { + vvid = vp->v_id; + } vid = dp->v_id; - + NAME_CACHE_UNLOCK(); if ((vp != NULLVP) && (vp->v_type != VLNK) && ((cnp->cn_flags & (ISLASTCN | LOCKPARENT | WANTPARENT | SAVESTART)) == ISLASTCN)) { - /* - * if we've got a child and it's the last component, and + /* + * if we've got a child and it's the last component, and * the lookup doesn't need to return the parent then we * can skip grabbing an iocount on the parent, since all * we're going to do with it is a vnode_put just before @@ -1573,8 +1630,8 @@ skiprsrcfork: * we need the parent in case the link happens to be * a relative pathname. */ - tdp = dp; - dp = NULLVP; + tdp = dp; + dp = NULLVP; } else { need_dp: /* @@ -1583,8 +1640,7 @@ need_dp: * in as a result of the last iteration of VNOP_LOOKUP, * it should already hold an io ref. No need to increase ref. */ - if (last_dp != dp){ - + if (last_dp != dp) { if (dp == ndp->ni_usedvp) { /* * if this vnode matches the one passed in via USEDVP @@ -1603,7 +1659,7 @@ need_dp: * changed identity or is being * TERMINATED... in either case * punt this lookup. - * + * * don't necessarily return ENOENT, though, because * we really want to go back to disk and make sure it's * there or not if someone else is changing this @@ -1624,19 +1680,19 @@ need_dp: } } if (vp != NULLVP) { - if ( (vnode_getwithvid_drainok(vp, vvid)) ) { - vp = NULLVP; + if ((vnode_getwithvid_drainok(vp, vvid))) { + vp = NULLVP; - /* + /* * can't get reference on the vp we'd like * to return... if we didn't grab a reference * on the directory (due to fast path bypass), * then we need to do it now... we can't return - * with both ni_dvp and ni_vp NULL, and no + * with both ni_dvp and ni_vp NULL, and no * error condition */ if (dp == NULLVP) { - dp = tdp; + dp = tdp; goto need_dp; } } @@ -1650,29 +1706,31 @@ need_dp: if ((error == 0) && (trigger_vp != NULLVP) && vnode_isdir(trigger_vp)) { error = vnode_trigger_resolve(trigger_vp, ndp, ctx); if (error) { - if (vp) + if (vp) { vnode_put(vp); - if (dp) + } + if (dp) { vnode_put(dp); + } goto errorout; } - } + } #endif /* CONFIG_TRIGGERS */ errorout: - /* + /* * If we came into cache_lookup_path after an iteration of the lookup loop that * resulted in a call to VNOP_LOOKUP, then VNOP_LOOKUP returned a vnode with a io ref - * on it. It is now the job of cache_lookup_path to drop the ref on this vnode + * on it. It is now the job of cache_lookup_path to drop the ref on this vnode * when it is no longer needed. If we get to this point, and last_dp is not NULL * and it is ALSO not the dvp we want to return to caller of this function, it MUST be - * the case that we got to a subsequent path component and this previous vnode is + * the case that we got to a subsequent path component and this previous vnode is * no longer needed. We can then drop the io ref on it. */ - if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)){ + if ((last_dp != NULLVP) && (last_dp != ndp->ni_dvp)) { vnode_put(last_dp); } - + //initialized to 0, should be the same if no error cases occurred. return error; } @@ -1685,16 +1743,17 @@ cache_lookup_locked(vnode_t dvp, struct componentname *cnp) struct nchashhead *ncpp; long namelen = cnp->cn_namelen; unsigned int hashval = cnp->cn_hash; - + if (nc_disabled) { return NULL; } ncpp = NCHHASH(dvp, cnp->cn_hash); LIST_FOREACH(ncp, ncpp, nc_hash) { - if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { - if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) - break; + if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { + if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) { + break; + } } } if (ncp == 0) { @@ -1702,11 +1761,11 @@ cache_lookup_locked(vnode_t dvp, struct componentname *cnp) * We failed to find an entry */ NCHSTAT(ncs_miss); - return (NULL); + return NULL; } NCHSTAT(ncs_goodhits); - return (ncp->nc_vp); + return ncp->nc_vp; } @@ -1718,32 +1777,33 @@ unsigned int hash_string(const char *cp, int len); unsigned int hash_string(const char *cp, int len) { - unsigned hash = 0; - - if (len) { - while (len--) { - hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; - } - } else { - while (*cp != '\0') { - hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; - } - } - /* - * the crc generator can legitimately generate - * a 0... however, 0 for us means that we - * haven't computed a hash, so use 1 instead - */ - if (hash == 0) - hash = 1; - return hash; + unsigned hash = 0; + + if (len) { + while (len--) { + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; + } + } else { + while (*cp != '\0') { + hash = crc32tab[((hash >> 24) ^ (unsigned char)*cp++)] ^ hash << 8; + } + } + /* + * the crc generator can legitimately generate + * a 0... however, 0 for us means that we + * haven't computed a hash, so use 1 instead + */ + if (hash == 0) { + hash = 1; + } + return hash; } /* - * Lookup an entry in the cache + * Lookup an entry in the cache * - * We don't do this if the segment name is long, simply so the cache + * We don't do this if the segment name is long, simply so the cache * can avoid holding long names (which would either waste space, or * add greatly to the complexity). * @@ -1762,12 +1822,13 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) struct nchashhead *ncpp; long namelen = cnp->cn_namelen; unsigned int hashval; - boolean_t have_exclusive = FALSE; + boolean_t have_exclusive = FALSE; uint32_t vid; - vnode_t vp; + vnode_t vp; - if (cnp->cn_hash == 0) + if (cnp->cn_hash == 0) { cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } hashval = cnp->cn_hash; if (nc_disabled) { @@ -1779,35 +1840,36 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) relook: ncpp = NCHHASH(dvp, cnp->cn_hash); LIST_FOREACH(ncp, ncpp, nc_hash) { - if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { - if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) - break; + if ((ncp->nc_dvp == dvp) && (ncp->nc_hashval == hashval)) { + if (strncmp(ncp->nc_name, cnp->cn_nameptr, namelen) == 0 && ncp->nc_name[namelen] == 0) { + break; + } } } /* We failed to find an entry */ if (ncp == 0) { NCHSTAT(ncs_miss); NAME_CACHE_UNLOCK(); - return (0); + return 0; } /* We don't want to have an entry, so dump it */ if ((cnp->cn_flags & MAKEENTRY) == 0) { - if (have_exclusive == TRUE) { - NCHSTAT(ncs_badhits); + if (have_exclusive == TRUE) { + NCHSTAT(ncs_badhits); cache_delete(ncp, 1); NAME_CACHE_UNLOCK(); - return (0); + return 0; } NAME_CACHE_UNLOCK(); NAME_CACHE_LOCK(); have_exclusive = TRUE; goto relook; - } + } vp = ncp->nc_vp; /* We found a "positive" match, return the vnode */ - if (vp) { + if (vp) { NCHSTAT(ncs_goodhits); vid = vp->v_id; @@ -1815,23 +1877,23 @@ relook: if (vnode_getwithvid(vp, vid)) { #if COLLECT_STATS - NAME_CACHE_LOCK(); + NAME_CACHE_LOCK(); NCHSTAT(ncs_badvid); NAME_CACHE_UNLOCK(); #endif - return (0); + return 0; } *vpp = vp; - return (-1); + return -1; } /* We found a negative match, and want to create it, so purge */ if (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) { - if (have_exclusive == TRUE) { - NCHSTAT(ncs_badhits); + if (have_exclusive == TRUE) { + NCHSTAT(ncs_badhits); cache_delete(ncp, 1); NAME_CACHE_UNLOCK(); - return (0); + return 0; } NAME_CACHE_UNLOCK(); NAME_CACHE_LOCK(); @@ -1845,7 +1907,7 @@ relook: NCHSTAT(ncs_neghits); NAME_CACHE_UNLOCK(); - return (ENOENT); + return ENOENT; } const char * @@ -1853,8 +1915,9 @@ cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp) { const char *strname; - if (cnp->cn_hash == 0) - cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } /* * grab 2 references on the string entered @@ -1869,7 +1932,7 @@ cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp) NAME_CACHE_UNLOCK(); - return (strname); + return strname; } @@ -1884,14 +1947,15 @@ cache_enter_create(vnode_t dvp, vnode_t vp, struct componentname *cnp) void cache_enter_with_gen(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int gen) { - - if (cnp->cn_hash == 0) - cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } NAME_CACHE_LOCK(); - if (dvp->v_nc_generation == gen) - (void)cache_enter_locked(dvp, vp, cnp, NULL); + if (dvp->v_nc_generation == gen) { + (void)cache_enter_locked(dvp, vp, cnp, NULL); + } NAME_CACHE_UNLOCK(); } @@ -1905,8 +1969,9 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) { const char *strname; - if (cnp->cn_hash == 0) - cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + if (cnp->cn_hash == 0) { + cnp->cn_hash = hash_string(cnp->cn_nameptr, cnp->cn_namelen); + } /* * grab 1 reference on the string entered @@ -1925,22 +1990,24 @@ cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) static void cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, const char *strname) { - struct namecache *ncp, *negp; + struct namecache *ncp, *negp; struct nchashhead *ncpp; - if (nc_disabled) + if (nc_disabled) { return; + } /* * if the entry is for -ve caching vp is null */ if ((vp != NULLVP) && (LIST_FIRST(&vp->v_nclinks))) { - /* + /* * someone beat us to the punch.. * this vnode is already in the cache */ - if (strname != NULL) + if (strname != NULL) { vfs_removename(strname); + } return; } /* @@ -1950,7 +2017,7 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn */ if (numcache < desiredNodes && ((ncp = nchead.tqh_first) == NULL || - ncp->nc_hash.le_prev != 0)) { + ncp->nc_hash.le_prev != 0)) { /* * Allocate one more entry */ @@ -1960,14 +2027,14 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn /* * reuse an old entry */ - ncp = TAILQ_FIRST(&nchead); + ncp = TAILQ_FIRST(&nchead); TAILQ_REMOVE(&nchead, ncp, nc_entry); if (ncp->nc_hash.le_prev != 0) { - /* - * still in use... we need to - * delete it before re-using it - */ + /* + * still in use... we need to + * delete it before re-using it + */ NCHSTAT(ncs_stolen); cache_delete(ncp, 0); } @@ -1981,10 +2048,11 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn ncp->nc_dvp = dvp; ncp->nc_hashval = cnp->cn_hash; - if (strname == NULL) + if (strname == NULL) { ncp->nc_name = add_name_internal(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, FALSE, 0); - else + } else { ncp->nc_name = strname; + } // // If the bytes of the name associated with the vnode differ, @@ -1993,12 +2061,12 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn // case-insensitive file system where the case of the looked up // name differs from what is on disk. For more details, see: // FSEvents doesn't always decompose diacritical unicode chars in the paths of the changed directories - // + // const char *vn_name = vp ? vp->v_name : NULL; unsigned int len = vn_name ? strlen(vn_name) : 0; if (vn_name && ncp && ncp->nc_name && strncmp(ncp->nc_name, vn_name, len) != 0) { unsigned int hash = hash_string(vn_name, len); - + vfs_removename(ncp->nc_name); ncp->nc_name = add_name_internal(vn_name, len, hash, FALSE, 0); ncp->nc_hashval = hash; @@ -2015,9 +2083,11 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn { struct namecache *p; - for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) - if (p == ncp) + for (p = ncpp->lh_first; p != 0; p = p->nc_hash.le_next) { + if (p == ncp) { panic("cache_enter: duplicate"); + } + } } #endif /* @@ -2026,27 +2096,27 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn LIST_INSERT_HEAD(ncpp, ncp, nc_hash); if (vp) { - /* - * add to the list of name cache entries - * that point at vp - */ + /* + * add to the list of name cache entries + * that point at vp + */ LIST_INSERT_HEAD(&vp->v_nclinks, ncp, nc_un.nc_link); } else { - /* + /* * this is a negative cache entry (vp == NULL) * stick it on the negative cache list. */ - TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry); - + TAILQ_INSERT_TAIL(&neghead, ncp, nc_un.nc_negentry); + ncs_negtotal++; if (ncs_negtotal > desiredNegNodes) { - /* - * if we've reached our desired limit - * of negative cache entries, delete - * the oldest - */ - negp = TAILQ_FIRST(&neghead); + /* + * if we've reached our desired limit + * of negative cache entries, delete + * the oldest + */ + negp = TAILQ_FIRST(&neghead); cache_delete(negp, 1); } } @@ -2054,37 +2124,40 @@ cache_enter_locked(struct vnode *dvp, struct vnode *vp, struct componentname *cn * add us to the list of name cache entries that * are children of dvp */ - if (vp) + if (vp) { TAILQ_INSERT_TAIL(&dvp->v_ncchildren, ncp, nc_child); - else + } else { TAILQ_INSERT_HEAD(&dvp->v_ncchildren, ncp, nc_child); + } } /* * Initialize CRC-32 remainder table. */ -static void init_crc32(void) +static void +init_crc32(void) { - /* + /* * the CRC-32 generator polynomial is: * x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^10 * + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1 */ - unsigned int crc32_polynomial = 0x04c11db7; - unsigned int i,j; + unsigned int crc32_polynomial = 0x04c11db7; + unsigned int i, j; /* * pre-calculate the CRC-32 remainder for each possible octet encoding */ - for (i = 0; i < 256; i++) { - unsigned int crc_rem = i << 24; + for (i = 0; i < 256; i++) { + unsigned int crc_rem = i << 24; - for (j = 0; j < 8; j++) { - if (crc_rem & 0x80000000) - crc_rem = (crc_rem << 1) ^ crc32_polynomial; - else - crc_rem = (crc_rem << 1); + for (j = 0; j < 8; j++) { + if (crc_rem & 0x80000000) { + crc_rem = (crc_rem << 1) ^ crc32_polynomial; + } else { + crc_rem = (crc_rem << 1); + } } crc32tab[i] = crc_rem; } @@ -2097,7 +2170,7 @@ static void init_crc32(void) void nchinit(void) { - int i; + int i; desiredNegNodes = (desiredvnodes / 10); desiredNodes = desiredvnodes + desiredNegNodes; @@ -2107,17 +2180,17 @@ nchinit(void) init_crc32(); - nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 *desiredNodes)), M_CACHE, &nchash); + nchashtbl = hashinit(MAX(CONFIG_NC_HASH, (2 * desiredNodes)), M_CACHE, &nchash); nchashmask = nchash; nchash++; init_string_table(); - + /* Allocate name cache lock group attribute and group */ - namecache_lck_grp_attr= lck_grp_attr_alloc_init(); + namecache_lck_grp_attr = lck_grp_attr_alloc_init(); + + namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr); - namecache_lck_grp = lck_grp_alloc_init("Name Cache", namecache_lck_grp_attr); - /* Allocate name cache lock attribute */ namecache_lck_attr = lck_attr_alloc_init(); @@ -2126,18 +2199,19 @@ nchinit(void) /* Allocate string cache lock group attribute and group */ - strcache_lck_grp_attr= lck_grp_attr_alloc_init(); + strcache_lck_grp_attr = lck_grp_attr_alloc_init(); + + strcache_lck_grp = lck_grp_alloc_init("String Cache", strcache_lck_grp_attr); - strcache_lck_grp = lck_grp_alloc_init("String Cache", strcache_lck_grp_attr); - /* Allocate string cache lock attribute */ strcache_lck_attr = lck_attr_alloc_init(); /* Allocate string cache lock */ strtable_rw_lock = lck_rw_alloc_init(strcache_lck_grp, strcache_lck_attr); - for (i = 0; i < NUM_STRCACHE_LOCKS; i++) + for (i = 0; i < NUM_STRCACHE_LOCKS; i++) { lck_mtx_init(&strcache_mtx_locks[i], strcache_lck_grp, strcache_lck_attr); + } } void @@ -2162,87 +2236,88 @@ name_cache_unlock(void) int resize_namecache(int newsize) { - struct nchashhead *new_table; - struct nchashhead *old_table; - struct nchashhead *old_head, *head; - struct namecache *entry, *next; - uint32_t i, hashval; - int dNodes, dNegNodes, nelements; - u_long new_size, old_size; - - if (newsize < 0) - return EINVAL; - - dNegNodes = (newsize / 10); - dNodes = newsize + dNegNodes; - // we don't support shrinking yet - if (dNodes <= desiredNodes) { - return 0; - } - - if (os_mul_overflow(dNodes, 2, &nelements)) { - return EINVAL; - } - - new_table = hashinit(nelements, M_CACHE, &nchashmask); - new_size = nchashmask + 1; - - if (new_table == NULL) { - return ENOMEM; - } - - NAME_CACHE_LOCK(); - // do the switch! - old_table = nchashtbl; - nchashtbl = new_table; - old_size = nchash; - nchash = new_size; - - // walk the old table and insert all the entries into - // the new table - // - for(i=0; i < old_size; i++) { - old_head = &old_table[i]; - for (entry=old_head->lh_first; entry != NULL; entry=next) { - // - // XXXdbg - Beware: this assumes that hash_string() does - // the same thing as what happens in - // lookup() over in vfs_lookup.c - hashval = hash_string(entry->nc_name, 0); - entry->nc_hashval = hashval; - head = NCHHASH(entry->nc_dvp, hashval); - - next = entry->nc_hash.le_next; - LIST_INSERT_HEAD(head, entry, nc_hash); - } - } - desiredNodes = dNodes; - desiredNegNodes = dNegNodes; - - NAME_CACHE_UNLOCK(); - FREE(old_table, M_CACHE); - - return 0; + struct nchashhead *new_table; + struct nchashhead *old_table; + struct nchashhead *old_head, *head; + struct namecache *entry, *next; + uint32_t i, hashval; + int dNodes, dNegNodes, nelements; + u_long new_size, old_size; + + if (newsize < 0) { + return EINVAL; + } + + dNegNodes = (newsize / 10); + dNodes = newsize + dNegNodes; + // we don't support shrinking yet + if (dNodes <= desiredNodes) { + return 0; + } + + if (os_mul_overflow(dNodes, 2, &nelements)) { + return EINVAL; + } + + new_table = hashinit(nelements, M_CACHE, &nchashmask); + new_size = nchashmask + 1; + + if (new_table == NULL) { + return ENOMEM; + } + + NAME_CACHE_LOCK(); + // do the switch! + old_table = nchashtbl; + nchashtbl = new_table; + old_size = nchash; + nchash = new_size; + + // walk the old table and insert all the entries into + // the new table + // + for (i = 0; i < old_size; i++) { + old_head = &old_table[i]; + for (entry = old_head->lh_first; entry != NULL; entry = next) { + // + // XXXdbg - Beware: this assumes that hash_string() does + // the same thing as what happens in + // lookup() over in vfs_lookup.c + hashval = hash_string(entry->nc_name, 0); + entry->nc_hashval = hashval; + head = NCHHASH(entry->nc_dvp, hashval); + + next = entry->nc_hash.le_next; + LIST_INSERT_HEAD(head, entry, nc_hash); + } + } + desiredNodes = dNodes; + desiredNegNodes = dNegNodes; + + NAME_CACHE_UNLOCK(); + FREE(old_table, M_CACHE); + + return 0; } static void cache_delete(struct namecache *ncp, int free_entry) { - NCHSTAT(ncs_deletes); + NCHSTAT(ncs_deletes); - if (ncp->nc_vp) { - LIST_REMOVE(ncp, nc_un.nc_link); + if (ncp->nc_vp) { + LIST_REMOVE(ncp, nc_un.nc_link); } else { - TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); - ncs_negtotal--; + TAILQ_REMOVE(&neghead, ncp, nc_un.nc_negentry); + ncs_negtotal--; } - TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child); + TAILQ_REMOVE(&(ncp->nc_dvp->v_ncchildren), ncp, nc_child); LIST_REMOVE(ncp, nc_hash); /* * this field is used to indicate * that the entry is in use and - * must be deleted before it can + * must be deleted before it can * be reused... */ ncp->nc_hash.le_prev = NULL; @@ -2250,7 +2325,7 @@ cache_delete(struct namecache *ncp, int free_entry) vfs_removename(ncp->nc_name); ncp->nc_name = NULL; if (free_entry) { - TAILQ_REMOVE(&nchead, ncp, nc_entry); + TAILQ_REMOVE(&nchead, ncp, nc_entry); FREE_ZONE(ncp, sizeof(*ncp), M_CACHE); numcache--; } @@ -2258,31 +2333,35 @@ cache_delete(struct namecache *ncp, int free_entry) /* - * purge the entry associated with the + * purge the entry associated with the * specified vnode from the name cache */ void cache_purge(vnode_t vp) { - struct namecache *ncp; + struct namecache *ncp; kauth_cred_t tcred = NULL; - if ((LIST_FIRST(&vp->v_nclinks) == NULL) && - (TAILQ_FIRST(&vp->v_ncchildren) == NULL) && - (vp->v_cred == NOCRED) && - (vp->v_parent == NULLVP)) - return; + if ((LIST_FIRST(&vp->v_nclinks) == NULL) && + (TAILQ_FIRST(&vp->v_ncchildren) == NULL) && + (vp->v_cred == NOCRED) && + (vp->v_parent == NULLVP)) { + return; + } NAME_CACHE_LOCK(); - if (vp->v_parent) - vp->v_parent->v_nc_generation++; + if (vp->v_parent) { + vp->v_parent->v_nc_generation++; + } - while ( (ncp = LIST_FIRST(&vp->v_nclinks)) ) - cache_delete(ncp, 1); + while ((ncp = LIST_FIRST(&vp->v_nclinks))) { + cache_delete(ncp, 1); + } - while ( (ncp = TAILQ_FIRST(&vp->v_ncchildren)) ) - cache_delete(ncp, 1); + while ((ncp = TAILQ_FIRST(&vp->v_ncchildren))) { + cache_delete(ncp, 1); + } /* * Use a temp variable to avoid kauth_cred_unref() while NAME_CACHE_LOCK is held @@ -2293,8 +2372,9 @@ cache_purge(vnode_t vp) NAME_CACHE_UNLOCK(); - if (IS_VALID_CRED(tcred)) - kauth_cred_unref(&tcred); + if (IS_VALID_CRED(tcred)) { + kauth_cred_unref(&tcred); + } } /* @@ -2312,8 +2392,9 @@ cache_purge_negatives(vnode_t vp) NAME_CACHE_LOCK(); TAILQ_FOREACH_SAFE(ncp, &vp->v_ncchildren, nc_child, next_ncp) { - if (ncp->nc_vp) + if (ncp->nc_vp) { break; + } cache_delete(ncp, 1); } @@ -2336,7 +2417,7 @@ cache_purgevfs(struct mount *mp) NAME_CACHE_LOCK(); /* Scan hash tables for applicable entries */ for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) { -restart: +restart: for (ncp = ncpp->lh_first; ncp != 0; ncp = ncp->nc_hash.le_next) { if (ncp->nc_dvp->v_mount == mp) { cache_delete(ncp, 0); @@ -2352,15 +2433,15 @@ restart: // // String ref routines // -static LIST_HEAD(stringhead, string_t) *string_ref_table; +static LIST_HEAD(stringhead, string_t) * string_ref_table; static u_long string_table_mask; -static uint32_t filled_buckets=0; +static uint32_t filled_buckets = 0; typedef struct string_t { - LIST_ENTRY(string_t) hash_chain; - const char *str; - uint32_t refcount; + LIST_ENTRY(string_t) hash_chain; + const char *str; + uint32_t refcount; } string_t; @@ -2400,7 +2481,7 @@ resize_string_ref_table(void) string_ref_table = new_table; old_mask = string_table_mask; string_table_mask = new_mask; - filled_buckets = 0; + filled_buckets = 0; // walk the old table and insert all the entries into // the new table @@ -2433,7 +2514,7 @@ init_string_table(void) const char * vfs_addname(const char *name, uint32_t len, u_int hashval, u_int flags) { - return (add_name_internal(name, len, hashval, FALSE, flags)); + return add_name_internal(name, len, hashval, FALSE, flags); } @@ -2443,19 +2524,20 @@ add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_ struct stringhead *head; string_t *entry; uint32_t chain_len = 0; - uint32_t hash_index; - uint32_t lock_index; + uint32_t hash_index; + uint32_t lock_index; char *ptr; - - if (len > MAXPATHLEN) + + if (len > MAXPATHLEN) { len = MAXPATHLEN; + } /* * if the length already accounts for the null-byte, then * subtract one so later on we don't index past the end * of the string. */ - if (len > 0 && name[len-1] == '\0') { + if (len > 0 && name[len - 1] == '\0') { len--; } if (hashval == 0) { @@ -2509,9 +2591,10 @@ add_name_internal(const char *name, uint32_t len, u_int hashval, boolean_t need_ entry->refcount = 1; LIST_INSERT_HEAD(head, entry, hash_chain); } - if (need_extra_ref == TRUE) + if (need_extra_ref == TRUE) { entry->refcount++; - + } + lck_mtx_unlock(&strcache_mtx_locks[lock_index]); lck_rw_done(strtable_rw_lock); @@ -2525,9 +2608,9 @@ vfs_removename(const char *nameref) struct stringhead *head; string_t *entry; uint32_t hashval; - uint32_t hash_index; - uint32_t lock_index; - int retval = ENOENT; + uint32_t hash_index; + uint32_t lock_index; + int retval = ENOENT; hashval = hash_string(nameref, 0); @@ -2569,8 +2652,9 @@ vfs_removename(const char *nameref) lck_mtx_unlock(&strcache_mtx_locks[lock_index]); lck_rw_done(strtable_rw_lock); - if (entry != NULL) + if (entry != NULL) { FREE(entry, M_TEMP); + } return retval; } @@ -2580,18 +2664,18 @@ vfs_removename(const char *nameref) void dump_string_table(void) { - struct stringhead *head; - string_t *entry; - u_long i; - - lck_rw_lock_shared(strtable_rw_lock); - - for (i = 0; i <= string_table_mask; i++) { - head = &string_ref_table[i]; - for (entry=head->lh_first; entry != NULL; entry=entry->hash_chain.le_next) { - printf("%6d - %s\n", entry->refcount, entry->str); - } - } - lck_rw_done(strtable_rw_lock); + struct stringhead *head; + string_t *entry; + u_long i; + + lck_rw_lock_shared(strtable_rw_lock); + + for (i = 0; i <= string_table_mask; i++) { + head = &string_ref_table[i]; + for (entry = head->lh_first; entry != NULL; entry = entry->hash_chain.le_next) { + printf("%6d - %s\n", entry->refcount, entry->str); + } + } + lck_rw_done(strtable_rw_lock); } -#endif /* DUMP_STRING_TABLE */ +#endif /* DUMP_STRING_TABLE */ diff --git a/bsd/vfs/vfs_cluster.c b/bsd/vfs/vfs_cluster.c index cb023ccf9..56d369787 100644 --- a/bsd/vfs/vfs_cluster.c +++ b/bsd/vfs/vfs_cluster.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -92,7 +92,7 @@ #include #include -#include +#include #include @@ -106,72 +106,72 @@ #endif -#define CL_READ 0x01 -#define CL_WRITE 0x02 -#define CL_ASYNC 0x04 -#define CL_COMMIT 0x08 -#define CL_PAGEOUT 0x10 -#define CL_AGE 0x20 -#define CL_NOZERO 0x40 -#define CL_PAGEIN 0x80 -#define CL_DEV_MEMORY 0x100 -#define CL_PRESERVE 0x200 -#define CL_THROTTLE 0x400 -#define CL_KEEPCACHED 0x800 -#define CL_DIRECT_IO 0x1000 -#define CL_PASSIVE 0x2000 -#define CL_IOSTREAMING 0x4000 -#define CL_CLOSE 0x8000 -#define CL_ENCRYPTED 0x10000 -#define CL_RAW_ENCRYPTED 0x20000 -#define CL_NOCACHE 0x40000 - -#define MAX_VECTOR_UPL_ELEMENTS 8 -#define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE_BYTES) - -#define CLUSTER_IO_WAITING ((buf_t)1) +#define CL_READ 0x01 +#define CL_WRITE 0x02 +#define CL_ASYNC 0x04 +#define CL_COMMIT 0x08 +#define CL_PAGEOUT 0x10 +#define CL_AGE 0x20 +#define CL_NOZERO 0x40 +#define CL_PAGEIN 0x80 +#define CL_DEV_MEMORY 0x100 +#define CL_PRESERVE 0x200 +#define CL_THROTTLE 0x400 +#define CL_KEEPCACHED 0x800 +#define CL_DIRECT_IO 0x1000 +#define CL_PASSIVE 0x2000 +#define CL_IOSTREAMING 0x4000 +#define CL_CLOSE 0x8000 +#define CL_ENCRYPTED 0x10000 +#define CL_RAW_ENCRYPTED 0x20000 +#define CL_NOCACHE 0x40000 + +#define MAX_VECTOR_UPL_ELEMENTS 8 +#define MAX_VECTOR_UPL_SIZE (2 * MAX_UPL_SIZE_BYTES) + +#define CLUSTER_IO_WAITING ((buf_t)1) extern upl_t vector_upl_create(vm_offset_t); extern boolean_t vector_upl_is_valid(upl_t); -extern boolean_t vector_upl_set_subupl(upl_t,upl_t, u_int32_t); +extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); extern void vector_upl_set_pagelist(upl_t); extern void vector_upl_set_iostate(upl_t, upl_t, vm_offset_t, u_int32_t); struct clios { lck_mtx_t io_mtxp; - u_int io_completed; /* amount of io that has currently completed */ - u_int io_issued; /* amount of io that was successfully issued */ - int io_error; /* error code of first error encountered */ - int io_wanted; /* someone is sleeping waiting for a change in state */ + u_int io_completed; /* amount of io that has currently completed */ + u_int io_issued; /* amount of io that was successfully issued */ + int io_error; /* error code of first error encountered */ + int io_wanted; /* someone is sleeping waiting for a change in state */ }; struct cl_direct_read_lock { - LIST_ENTRY(cl_direct_read_lock) chain; - int32_t ref_count; - vnode_t vp; - lck_rw_t rw_lock; + LIST_ENTRY(cl_direct_read_lock) chain; + int32_t ref_count; + vnode_t vp; + lck_rw_t rw_lock; }; #define CL_DIRECT_READ_LOCK_BUCKETS 61 static LIST_HEAD(cl_direct_read_locks, cl_direct_read_lock) - cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS]; +cl_direct_read_locks[CL_DIRECT_READ_LOCK_BUCKETS]; static lck_spin_t cl_direct_read_spin_lock; -static lck_grp_t *cl_mtx_grp; -static lck_attr_t *cl_mtx_attr; +static lck_grp_t *cl_mtx_grp; +static lck_attr_t *cl_mtx_attr; static lck_grp_attr_t *cl_mtx_grp_attr; -static lck_mtx_t *cl_transaction_mtxp; +static lck_mtx_t *cl_transaction_mtxp; -#define IO_UNKNOWN 0 -#define IO_DIRECT 1 -#define IO_CONTIG 2 -#define IO_COPY 3 +#define IO_UNKNOWN 0 +#define IO_DIRECT 1 +#define IO_CONTIG 2 +#define IO_COPY 3 -#define PUSH_DELAY 0x01 -#define PUSH_ALL 0x02 -#define PUSH_SYNC 0x04 +#define PUSH_DELAY 0x01 +#define PUSH_ALL 0x02 +#define PUSH_SYNC 0x04 static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset); @@ -181,7 +181,7 @@ static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, in static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length); static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size, - int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg); + int flags, buf_t real_bp, struct clios *iostate, int (*)(buf_t, void *), void *callback_arg); static int cluster_iodone(buf_t bp, void *callback_arg); static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp); static int cluster_is_throttled(vnode_t vp); @@ -193,43 +193,44 @@ static void cluster_syncup(vnode_t vp, off_t newEOF, int (*)(buf_t, void *), voi static void cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_reference); static int cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty, int take_reference); -static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, - int (*)(buf_t, void *), void *callback_arg); +static int cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t filesize, int flags, + int (*)(buf_t, void *), void *callback_arg); static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length, - int flags, int (*)(buf_t, void *), void *callback_arg); + int flags, int (*)(buf_t, void *), void *callback_arg); static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length, - int (*)(buf_t, void *), void *callback_arg, int flags); + int (*)(buf_t, void *), void *callback_arg, int flags); static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, - off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg); + off_t headOff, off_t tailOff, int flags, int (*)(buf_t, void *), void *callback_arg); static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, - int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg); + int *write_type, u_int32_t *write_length, int flags, int (*)(buf_t, void *), void *callback_arg); static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, - int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag); + int *write_type, u_int32_t *write_length, int (*)(buf_t, void *), void *callback_arg, int bflag); static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, boolean_t *first_pass, - off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); + off_t write_off, int write_cnt, off_t newEOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*)(buf_t, void *), void *callback_arg); -static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag); -static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, - int (*callback)(buf_t, void *), void *callback_arg, int bflag); +static int cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, int (*callback)(buf_t, void *), void *callback_arg, int bflag); +static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *ra, + int (*callback)(buf_t, void *), void *callback_arg, int bflag); -static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated); +static int cluster_push_now(vnode_t vp, struct cl_extent *, off_t EOF, int flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_ioitiated); -static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), - void *callback_arg, int *err, boolean_t vm_initiated); +static int cluster_try_push(struct cl_writebehind *, vnode_t vp, off_t EOF, int push_flag, int flags, int (*)(buf_t, void *), + void *callback_arg, int *err, boolean_t vm_initiated); -static int sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); -static int sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag, - int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); -static int sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, - int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); +static int sparse_cluster_switch(struct cl_writebehind *, vnode_t vp, off_t EOF, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); +static int sparse_cluster_push(struct cl_writebehind *, void **cmapp, vnode_t vp, off_t EOF, int push_flag, + int io_flags, int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); +static int sparse_cluster_add(struct cl_writebehind *, void **cmapp, vnode_t vp, struct cl_extent *, off_t EOF, + int (*)(buf_t, void *), void *callback_arg, boolean_t vm_initiated); static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp); static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp); static kern_return_t vfs_drt_control(void **cmapp, int op_type); +static kern_return_t vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag); /* @@ -260,46 +261,46 @@ int (*bootcache_contains_block)(dev_t device, u_int64_t blkno) = NULL; * limit the internal I/O size so that we * can represent it in a 32 bit int */ -#define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512) -#define MAX_IO_CONTIG_SIZE MAX_UPL_SIZE_BYTES -#define MAX_VECTS 16 +#define MAX_IO_REQUEST_SIZE (1024 * 1024 * 512) +#define MAX_IO_CONTIG_SIZE MAX_UPL_SIZE_BYTES +#define MAX_VECTS 16 /* * The MIN_DIRECT_WRITE_SIZE governs how much I/O should be issued before we consider - * allowing the caller to bypass the buffer cache. For small I/Os (less than 16k), - * we have not historically allowed the write to bypass the UBC. + * allowing the caller to bypass the buffer cache. For small I/Os (less than 16k), + * we have not historically allowed the write to bypass the UBC. */ -#define MIN_DIRECT_WRITE_SIZE (16384) +#define MIN_DIRECT_WRITE_SIZE (16384) -#define WRITE_THROTTLE 6 -#define WRITE_THROTTLE_SSD 2 -#define WRITE_BEHIND 1 -#define WRITE_BEHIND_SSD 1 +#define WRITE_THROTTLE 6 +#define WRITE_THROTTLE_SSD 2 +#define WRITE_BEHIND 1 +#define WRITE_BEHIND_SSD 1 #if CONFIG_EMBEDDED -#define PREFETCH 1 -#define PREFETCH_SSD 1 -uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */ -uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */ +#define PREFETCH 1 +#define PREFETCH_SSD 1 +uint32_t speculative_prefetch_max = (2048 * 1024); /* maximum bytes in a specluative read-ahead */ +uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead */ #else -#define PREFETCH 3 -#define PREFETCH_SSD 2 -uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */ -uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/ +#define PREFETCH 3 +#define PREFETCH_SSD 2 +uint32_t speculative_prefetch_max = (MAX_UPL_SIZE_BYTES * 3); /* maximum bytes in a specluative read-ahead */ +uint32_t speculative_prefetch_max_iosize = (512 * 1024); /* maximum I/O size to use in a specluative read-ahead on SSDs*/ #endif -#define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base)) -#define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE)) -#define MAX_PREFETCH(vp, size, is_ssd) (size * IO_SCALE(vp, ((is_ssd) ? PREFETCH_SSD : PREFETCH))) +#define IO_SCALE(vp, base) (vp->v_mount->mnt_ioscale * (base)) +#define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE)) +#define MAX_PREFETCH(vp, size, is_ssd) (size * IO_SCALE(vp, ((is_ssd) ? PREFETCH_SSD : PREFETCH))) -int speculative_reads_disabled = 0; +int speculative_reads_disabled = 0; /* * throttle the number of async writes that * can be outstanding on a single vnode - * before we issue a synchronous write + * before we issue a synchronous write */ -#define THROTTLE_MAXCNT 0 +#define THROTTLE_MAXCNT 0 uint32_t throttle_max_iosize = (128 * 1024); @@ -309,13 +310,14 @@ SYSCTL_INT(_debug, OID_AUTO, lowpri_throttle_max_iosize, CTLFLAG_RW | CTLFLAG_LO void -cluster_init(void) { - /* +cluster_init(void) +{ + /* * allocate lock group attribute and group */ - cl_mtx_grp_attr = lck_grp_attr_alloc_init(); + cl_mtx_grp_attr = lck_grp_attr_alloc_init(); cl_mtx_grp = lck_grp_alloc_init("cluster I/O", cl_mtx_grp_attr); - + /* * allocate the lock attribute */ @@ -323,25 +325,26 @@ cluster_init(void) { cl_transaction_mtxp = lck_mtx_alloc_init(cl_mtx_grp, cl_mtx_attr); - if (cl_transaction_mtxp == NULL) - panic("cluster_init: failed to allocate cl_transaction_mtxp"); + if (cl_transaction_mtxp == NULL) { + panic("cluster_init: failed to allocate cl_transaction_mtxp"); + } lck_spin_init(&cl_direct_read_spin_lock, cl_mtx_grp, cl_mtx_attr); - for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) + for (int i = 0; i < CL_DIRECT_READ_LOCK_BUCKETS; ++i) { LIST_INIT(&cl_direct_read_locks[i]); + } } uint32_t cluster_max_io_size(mount_t mp, int type) { - uint32_t max_io_size; - uint32_t segcnt; - uint32_t maxcnt; - - switch(type) { + uint32_t max_io_size; + uint32_t segcnt; + uint32_t maxcnt; + switch (type) { case CL_READ: segcnt = mp->mnt_segreadcnt; maxcnt = mp->mnt_maxreadcnt; @@ -356,34 +359,34 @@ cluster_max_io_size(mount_t mp, int type) break; } if (segcnt > (MAX_UPL_SIZE_BYTES >> PAGE_SHIFT)) { - /* - * don't allow a size beyond the max UPL size we can create - */ - segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT; - } - max_io_size = min((segcnt * PAGE_SIZE), maxcnt); - - if (max_io_size < MAX_UPL_TRANSFER_BYTES) { - /* - * don't allow a size smaller than the old fixed limit - */ - max_io_size = MAX_UPL_TRANSFER_BYTES; - } else { - /* - * make sure the size specified is a multiple of PAGE_SIZE - */ - max_io_size &= ~PAGE_MASK; - } - return (max_io_size); + /* + * don't allow a size beyond the max UPL size we can create + */ + segcnt = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT; + } + max_io_size = min((segcnt * PAGE_SIZE), maxcnt); + + if (max_io_size < MAX_UPL_TRANSFER_BYTES) { + /* + * don't allow a size smaller than the old fixed limit + */ + max_io_size = MAX_UPL_TRANSFER_BYTES; + } else { + /* + * make sure the size specified is a multiple of PAGE_SIZE + */ + max_io_size &= ~PAGE_MASK; + } + return max_io_size; } -#define CLW_ALLOCATE 0x01 -#define CLW_RETURNLOCKED 0x02 -#define CLW_IONOCACHE 0x04 -#define CLW_IOPASSIVE 0x08 +#define CLW_ALLOCATE 0x01 +#define CLW_RETURNLOCKED 0x02 +#define CLW_IONOCACHE 0x04 +#define CLW_IOPASSIVE 0x08 /* * if the read ahead context doesn't yet exist, @@ -392,7 +395,7 @@ cluster_max_io_size(mount_t mp, int type) * during the actual assignment... first one * to grab the lock wins... the other callers * will release the now unnecessary storage - * + * * once the context is present, try to grab (but don't block on) * the lock associated with it... if someone * else currently owns it, than the read @@ -405,33 +408,34 @@ cluster_max_io_size(mount_t mp, int type) static struct cl_readahead * cluster_get_rap(vnode_t vp) { - struct ubc_info *ubc; - struct cl_readahead *rap; + struct ubc_info *ubc; + struct cl_readahead *rap; ubc = vp->v_ubcinfo; - if ((rap = ubc->cl_rahead) == NULL) { - MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK); + if ((rap = ubc->cl_rahead) == NULL) { + MALLOC_ZONE(rap, struct cl_readahead *, sizeof *rap, M_CLRDAHEAD, M_WAITOK); bzero(rap, sizeof *rap); rap->cl_lastr = -1; lck_mtx_init(&rap->cl_lockr, cl_mtx_grp, cl_mtx_attr); vnode_lock(vp); - - if (ubc->cl_rahead == NULL) - ubc->cl_rahead = rap; - else { - lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp); - FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD); + + if (ubc->cl_rahead == NULL) { + ubc->cl_rahead = rap; + } else { + lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp); + FREE_ZONE(rap, sizeof *rap, M_CLRDAHEAD); rap = ubc->cl_rahead; } vnode_unlock(vp); } - if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE) - return(rap); - - return ((struct cl_readahead *)NULL); + if (lck_mtx_try_lock(&rap->cl_lockr) == TRUE) { + return rap; + } + + return (struct cl_readahead *)NULL; } @@ -442,7 +446,7 @@ cluster_get_rap(vnode_t vp) * during the actual assignment... first one * to grab the lock wins... the other callers * will release the now unnecessary storage - * + * * if CLW_RETURNLOCKED is set, grab (blocking if necessary) * the lock associated with the write behind context before * returning @@ -451,36 +455,37 @@ cluster_get_rap(vnode_t vp) static struct cl_writebehind * cluster_get_wbp(vnode_t vp, int flags) { - struct ubc_info *ubc; + struct ubc_info *ubc; struct cl_writebehind *wbp; ubc = vp->v_ubcinfo; - if ((wbp = ubc->cl_wbehind) == NULL) { + if ((wbp = ubc->cl_wbehind) == NULL) { + if (!(flags & CLW_ALLOCATE)) { + return (struct cl_writebehind *)NULL; + } - if ( !(flags & CLW_ALLOCATE)) - return ((struct cl_writebehind *)NULL); - - MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK); + MALLOC_ZONE(wbp, struct cl_writebehind *, sizeof *wbp, M_CLWRBEHIND, M_WAITOK); bzero(wbp, sizeof *wbp); lck_mtx_init(&wbp->cl_lockw, cl_mtx_grp, cl_mtx_attr); vnode_lock(vp); - - if (ubc->cl_wbehind == NULL) - ubc->cl_wbehind = wbp; - else { - lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp); - FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND); + + if (ubc->cl_wbehind == NULL) { + ubc->cl_wbehind = wbp; + } else { + lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp); + FREE_ZONE(wbp, sizeof *wbp, M_CLWRBEHIND); wbp = ubc->cl_wbehind; } vnode_unlock(vp); } - if (flags & CLW_RETURNLOCKED) - lck_mtx_lock(&wbp->cl_lockw); + if (flags & CLW_RETURNLOCKED) { + lck_mtx_lock(&wbp->cl_lockw); + } - return (wbp); + return wbp; } @@ -490,9 +495,8 @@ cluster_syncup(vnode_t vp, off_t newEOF, int (*callback)(buf_t, void *), void *c struct cl_writebehind *wbp; if ((wbp = cluster_get_wbp(vp, 0)) != NULL) { - - if (wbp->cl_number) { - lck_mtx_lock(&wbp->cl_lockw); + if (wbp->cl_number) { + lck_mtx_lock(&wbp->cl_lockw); cluster_try_push(wbp, vp, newEOF, PUSH_ALL | flags, 0, callback, callback_arg, NULL, FALSE); @@ -506,60 +510,64 @@ static int cluster_io_present_in_BC(vnode_t vp, off_t f_offset) { daddr64_t blkno; - size_t io_size; + size_t io_size; int (*bootcache_check_fn)(dev_t device, u_int64_t blkno) = bootcache_contains_block; - + if (bootcache_check_fn && vp->v_mount && vp->v_mount->mnt_devvp) { - if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL)) - return(0); + if (VNOP_BLOCKMAP(vp, f_offset, PAGE_SIZE, &blkno, &io_size, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL)) { + return 0; + } - if (io_size == 0) - return (0); + if (io_size == 0) { + return 0; + } - if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno)) - return(1); + if (bootcache_check_fn(vp->v_mount->mnt_devvp->v_rdev, blkno)) { + return 1; + } } - return(0); + return 0; } -static int +static int cluster_is_throttled(vnode_t vp) { - return (throttle_io_will_be_throttled(-1, vp->v_mount)); + return throttle_io_will_be_throttled(-1, vp->v_mount); } static void cluster_iostate_wait(struct clios *iostate, u_int target, const char *wait_name) { - lck_mtx_lock(&iostate->io_mtxp); while ((iostate->io_issued - iostate->io_completed) > target) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START, - iostate->io_issued, iostate->io_completed, target, 0, 0); + iostate->io_issued, iostate->io_completed, target, 0, 0); iostate->io_wanted = 1; msleep((caddr_t)&iostate->io_wanted, &iostate->io_mtxp, PRIBIO + 1, wait_name, NULL); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END, - iostate->io_issued, iostate->io_completed, target, 0, 0); - } + iostate->io_issued, iostate->io_completed, target, 0, 0); + } lck_mtx_unlock(&iostate->io_mtxp); } -static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl, - upl_offset_t upl_offset, upl_size_t size) +static void +cluster_handle_associated_upl(struct clios *iostate, upl_t upl, + upl_offset_t upl_offset, upl_size_t size) { - if (!size) + if (!size) { return; + } upl_t associated_upl = upl_associated_upl(upl); - if (!associated_upl) + if (!associated_upl) { return; + } #if 0 printf("1: %d %d\n", upl_offset, upl_offset + size); @@ -616,14 +624,15 @@ static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl, * the minus one. */ assert(upl_offset); - if (upl_offset) + if (upl_offset) { upl_offset = trunc_page_32(upl_offset - 1); + } lck_mtx_lock_spin(&iostate->io_mtxp); // Look at the first page... if (upl_offset - && !upl_page_get_mark(assoc_pl, upl_offset >> PAGE_SHIFT)) { + && !upl_page_get_mark(assoc_pl, upl_offset >> PAGE_SHIFT)) { /* * The first page isn't marked so let another transaction * completion handle it. @@ -639,9 +648,9 @@ static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl, * means there's another transaction that is sharing the last * page. */ - if (upl_end > assoc_upl_size) + if (upl_end > assoc_upl_size) { upl_end = assoc_upl_size; - else { + } else { upl_end = trunc_page_32(upl_end); const int last_pg = (upl_end >> PAGE_SHIFT) - 1; @@ -661,8 +670,9 @@ static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl, printf("2: %d %d\n", upl_offset, upl_end); #endif - if (upl_end <= upl_offset) + if (upl_end <= upl_offset) { return; + } size = upl_end - upl_offset; } else { @@ -677,7 +687,7 @@ static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl, * direct/uncached write, we want to dump the pages too. */ kern_return_t kr = upl_abort_range(associated_upl, upl_offset, size, - UPL_ABORT_DUMP_PAGES, &empty); + UPL_ABORT_DUMP_PAGES, &empty); assert(!kr); @@ -690,69 +700,71 @@ static void cluster_handle_associated_upl(struct clios *iostate, upl_t upl, static int cluster_ioerror(upl_t upl, int upl_offset, int abort_size, int error, int io_flags, vnode_t vp) { - int upl_abort_code = 0; + int upl_abort_code = 0; int page_in = 0; int page_out = 0; - if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE)) - /* + if ((io_flags & (B_PHYS | B_CACHE)) == (B_PHYS | B_CACHE)) { + /* * direct write of any flavor, or a direct read that wasn't aligned */ - ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY); - else { - if (io_flags & B_PAGEIO) { - if (io_flags & B_READ) - page_in = 1; - else - page_out = 1; - } - if (io_flags & B_CACHE) - /* + ubc_upl_commit_range(upl, upl_offset, abort_size, UPL_COMMIT_FREE_ON_EMPTY); + } else { + if (io_flags & B_PAGEIO) { + if (io_flags & B_READ) { + page_in = 1; + } else { + page_out = 1; + } + } + if (io_flags & B_CACHE) { + /* * leave pages in the cache unchanged on error */ - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; - else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) - /* + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; + } else if (((io_flags & B_READ) == 0) && ((error != ENXIO) || vnode_isswap(vp))) { + /* * transient error on pageout/write path... leave pages unchanged */ - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; - else if (page_in) - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR; - else - upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY; + } else if (page_in) { + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR; + } else { + upl_abort_code = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + } ubc_upl_abort_range(upl, upl_offset, abort_size, upl_abort_code); } - return (upl_abort_code); + return upl_abort_code; } static int cluster_iodone(buf_t bp, void *callback_arg) { - int b_flags; - int error; - int total_size; - int total_resid; - int upl_offset; - int zero_offset; - int pg_offset = 0; - int commit_size = 0; - int upl_flags = 0; - int transaction_size = 0; - upl_t upl; - buf_t cbp; - buf_t cbp_head; - buf_t cbp_next; - buf_t real_bp; - vnode_t vp; - struct clios *iostate; - boolean_t transaction_complete = FALSE; + int b_flags; + int error; + int total_size; + int total_resid; + int upl_offset; + int zero_offset; + int pg_offset = 0; + int commit_size = 0; + int upl_flags = 0; + int transaction_size = 0; + upl_t upl; + buf_t cbp; + buf_t cbp_head; + buf_t cbp_next; + buf_t real_bp; + vnode_t vp; + struct clios *iostate; + boolean_t transaction_complete = FALSE; __IGNORE_WCASTALIGN(cbp_head = (buf_t)(bp->b_trans_head)); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_START, - cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0); + cbp_head, bp->b_lblkno, bp->b_bcount, bp->b_flags, 0); if (cbp_head->b_trans_next || !(cbp_head->b_flags & B_EOT)) { lck_mtx_lock_spin(cl_transaction_mtxp); @@ -764,10 +776,9 @@ cluster_iodone(buf_t bp, void *callback_arg) * all I/O requests that are part of this transaction * have to complete before we can process it */ - if ( !(cbp->b_flags & B_TDONE)) { - + if (!(cbp->b_flags & B_TDONE)) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, - cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); + cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); lck_mtx_unlock(cl_transaction_mtxp); @@ -776,7 +787,7 @@ cluster_iodone(buf_t bp, void *callback_arg) if (cbp->b_trans_next == CLUSTER_IO_WAITING) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, - cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); + cbp_head, cbp, cbp->b_bcount, cbp->b_flags, 0); lck_mtx_unlock(cl_transaction_mtxp); wakeup(cbp); @@ -784,14 +795,15 @@ cluster_iodone(buf_t bp, void *callback_arg) return 0; } - if (cbp->b_flags & B_EOT) + if (cbp->b_flags & B_EOT) { transaction_complete = TRUE; + } } lck_mtx_unlock(cl_transaction_mtxp); if (transaction_complete == FALSE) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, - cbp_head, 0, 0, 0, 0); + cbp_head, 0, 0, 0, 0); return 0; } } @@ -800,110 +812,119 @@ cluster_iodone(buf_t bp, void *callback_arg) total_resid = 0; cbp = cbp_head; - vp = cbp->b_vp; + vp = cbp->b_vp; upl_offset = cbp->b_uploffset; upl = cbp->b_upl; b_flags = cbp->b_flags; real_bp = cbp->b_real_bp; - zero_offset= cbp->b_validend; + zero_offset = cbp->b_validend; iostate = (struct clios *)cbp->b_iostate; - if (real_bp) - real_bp->b_dev = cbp->b_dev; + if (real_bp) { + real_bp->b_dev = cbp->b_dev; + } while (cbp) { - if ((cbp->b_flags & B_ERROR) && error == 0) - error = cbp->b_error; + if ((cbp->b_flags & B_ERROR) && error == 0) { + error = cbp->b_error; + } total_resid += cbp->b_resid; total_size += cbp->b_bcount; cbp_next = cbp->b_trans_next; - if (cbp_next == NULL) - /* + if (cbp_next == NULL) { + /* * compute the overall size of the transaction * in case we created one that has 'holes' in it * 'total_size' represents the amount of I/O we * did, not the span of the transaction w/r to the UPL */ transaction_size = cbp->b_uploffset + cbp->b_bcount - upl_offset; + } - if (cbp != cbp_head) - free_io_buf(cbp); + if (cbp != cbp_head) { + free_io_buf(cbp); + } cbp = cbp_next; } if (ISSET(b_flags, B_COMMIT_UPL)) { cluster_handle_associated_upl(iostate, - cbp_head->b_upl, - upl_offset, - transaction_size); + cbp_head->b_upl, + upl_offset, + transaction_size); } - if (error == 0 && total_resid) + if (error == 0 && total_resid) { error = EIO; + } if (error == 0) { - int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone); + int (*cliodone_func)(buf_t, void *) = (int (*)(buf_t, void *))(cbp_head->b_cliodone); if (cliodone_func != NULL) { - cbp_head->b_bcount = transaction_size; + cbp_head->b_bcount = transaction_size; - error = (*cliodone_func)(cbp_head, callback_arg); + error = (*cliodone_func)(cbp_head, callback_arg); } } - if (zero_offset) - cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp); + if (zero_offset) { + cluster_zero(upl, zero_offset, PAGE_SIZE - (zero_offset & PAGE_MASK), real_bp); + } - free_io_buf(cbp_head); + free_io_buf(cbp_head); if (iostate) { - int need_wakeup = 0; + int need_wakeup = 0; - /* + /* * someone has issued multiple I/Os asynchrounsly * and is waiting for them to complete (streaming) */ lck_mtx_lock_spin(&iostate->io_mtxp); - if (error && iostate->io_error == 0) - iostate->io_error = error; + if (error && iostate->io_error == 0) { + iostate->io_error = error; + } iostate->io_completed += total_size; if (iostate->io_wanted) { - /* - * someone is waiting for the state of + /* + * someone is waiting for the state of * this io stream to change */ - iostate->io_wanted = 0; + iostate->io_wanted = 0; need_wakeup = 1; } lck_mtx_unlock(&iostate->io_mtxp); - if (need_wakeup) - wakeup((caddr_t)&iostate->io_wanted); + if (need_wakeup) { + wakeup((caddr_t)&iostate->io_wanted); + } } if (b_flags & B_COMMIT_UPL) { - pg_offset = upl_offset & PAGE_MASK; commit_size = (pg_offset + transaction_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; if (error) { - upl_set_iodone_error(upl, error); + upl_set_iodone_error(upl, error); upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, commit_size, error, b_flags, vp); } else { upl_flags = UPL_COMMIT_FREE_ON_EMPTY; - if ((b_flags & B_PHYS) && (b_flags & B_READ)) - upl_flags |= UPL_COMMIT_SET_DIRTY; + if ((b_flags & B_PHYS) && (b_flags & B_READ)) { + upl_flags |= UPL_COMMIT_SET_DIRTY; + } - if (b_flags & B_AGE) - upl_flags |= UPL_COMMIT_INACTIVATE; + if (b_flags & B_AGE) { + upl_flags |= UPL_COMMIT_INACTIVATE; + } ubc_upl_commit_range(upl, upl_offset - pg_offset, commit_size, upl_flags); } @@ -918,9 +939,9 @@ cluster_iodone(buf_t bp, void *callback_arg) buf_biodone(real_bp); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 20)) | DBG_FUNC_END, - upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0); + upl, upl_offset - pg_offset, commit_size, (error << 24) | upl_flags, 0); - return (error); + return error; } @@ -931,32 +952,31 @@ cluster_throttle_io_limit(vnode_t vp, uint32_t *limit) *limit = THROTTLE_MAX_IOSIZE; return 1; } - return 0; + return 0; } void cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_START, - upl_offset, size, bp, 0, 0); + upl_offset, size, bp, 0, 0); if (bp == NULL || bp->b_datap == 0) { - upl_page_info_t *pl; - addr64_t zero_addr; + upl_page_info_t *pl; + addr64_t zero_addr; - pl = ubc_upl_pageinfo(upl); + pl = ubc_upl_pageinfo(upl); if (upl_device_page(pl) == TRUE) { - zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset; + zero_addr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + upl_offset; bzero_phys_nc(zero_addr, size); } else { - while (size) { - int page_offset; - int page_index; - int zero_cnt; + while (size) { + int page_offset; + int page_index; + int zero_cnt; page_index = upl_offset / PAGE_SIZE; page_offset = upl_offset & PAGE_MASK; @@ -970,25 +990,26 @@ cluster_zero(upl_t upl, upl_offset_t upl_offset, int size, buf_t bp) upl_offset += zero_cnt; } } - } else + } else { bzero((caddr_t)((vm_offset_t)bp->b_datap + upl_offset), size); + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 23)) | DBG_FUNC_END, - upl_offset, size, 0, 0, 0); + upl_offset, size, 0, 0, 0); } static void cluster_EOT(buf_t cbp_head, buf_t cbp_tail, int zero_offset) { - cbp_head->b_validend = zero_offset; - cbp_tail->b_flags |= B_EOT; + cbp_head->b_validend = zero_offset; + cbp_tail->b_flags |= B_EOT; } static void cluster_wait_IO(buf_t cbp_head, int async) { - buf_t cbp; + buf_t cbp; if (async) { /* @@ -1006,8 +1027,9 @@ cluster_wait_IO(buf_t cbp_head, int async) lck_mtx_lock_spin(cl_transaction_mtxp); for (cbp = cbp_head; cbp; last = cbp, cbp = cbp->b_trans_next) { - if (!ISSET(cbp->b_flags, B_TDONE)) + if (!ISSET(cbp->b_flags, B_TDONE)) { done = false; + } } if (!done) { @@ -1015,7 +1037,7 @@ cluster_wait_IO(buf_t cbp_head, int async) DTRACE_IO1(wait__start, buf_t, last); do { - msleep(last, cl_transaction_mtxp, PSPIN | (PRIBIO+1), "cluster_wait_IO", NULL); + msleep(last, cl_transaction_mtxp, PSPIN | (PRIBIO + 1), "cluster_wait_IO", NULL); /* * We should only have been woken up if all the @@ -1036,16 +1058,17 @@ cluster_wait_IO(buf_t cbp_head, int async) lck_mtx_unlock(cl_transaction_mtxp); } else { // !async - for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) + for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) { buf_biowait(cbp); + } } } static void cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, int flags, int needwait) { - buf_t cbp; - int error; + buf_t cbp; + int error; boolean_t isswapout = FALSE; /* @@ -1053,29 +1076,33 @@ cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, i * only be called if we've issued a complete chain in synchronous mode * or, we've already done a cluster_wait_IO on an incomplete chain */ - if (needwait) { - for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) - buf_biowait(cbp); + if (needwait) { + for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) { + buf_biowait(cbp); + } } /* * we've already waited on all of the I/Os in this transaction, * so mark all of the buf_t's in this transaction as B_TDONE * so that cluster_iodone sees the transaction as completed */ - for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) + for (cbp = *cbp_head; cbp; cbp = cbp->b_trans_next) { cbp->b_flags |= B_TDONE; + } cbp = *cbp_head; - if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp)) + if ((flags & (CL_ASYNC | CL_PAGEOUT)) == CL_PAGEOUT && vnode_isswap(cbp->b_vp)) { isswapout = TRUE; + } error = cluster_iodone(cbp, callback_arg); - if ( !(flags & CL_ASYNC) && error && *retval == 0) { - if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO)) + if (!(flags & CL_ASYNC) && error && *retval == 0) { + if (((flags & (CL_PAGEOUT | CL_KEEPCACHED)) != CL_PAGEOUT) || (error != ENXIO)) { *retval = error; - else if (isswapout == TRUE) + } else if (isswapout == TRUE) { *retval = error; + } } *cbp_head = (buf_t)NULL; } @@ -1083,38 +1110,39 @@ cluster_complete_transaction(buf_t *cbp_head, void *callback_arg, int *retval, i static int cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int non_rounded_size, - int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg) + int flags, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg) { - buf_t cbp; - u_int size; - u_int io_size; - int io_flags; - int bmap_flags; - int error = 0; - int retval = 0; - buf_t cbp_head = NULL; - buf_t cbp_tail = NULL; - int trans_count = 0; - int max_trans_count; - u_int pg_count; - int pg_offset; - u_int max_iosize; - u_int max_vectors; - int priv; - int zero_offset = 0; - int async_throttle = 0; - mount_t mp; + buf_t cbp; + u_int size; + u_int io_size; + int io_flags; + int bmap_flags; + int error = 0; + int retval = 0; + buf_t cbp_head = NULL; + buf_t cbp_tail = NULL; + int trans_count = 0; + int max_trans_count; + u_int pg_count; + int pg_offset; + u_int max_iosize; + u_int max_vectors; + int priv; + int zero_offset = 0; + int async_throttle = 0; + mount_t mp; vm_offset_t upl_end_offset; boolean_t need_EOT = FALSE; /* * we currently don't support buffers larger than a page */ - if (real_bp && non_rounded_size > PAGE_SIZE) + if (real_bp && non_rounded_size > PAGE_SIZE) { panic("%s(): Called with real buffer of size %d bytes which " - "is greater than the maximum allowed size of " - "%d bytes (the system PAGE_SIZE).\n", - __FUNCTION__, non_rounded_size, PAGE_SIZE); + "is greater than the maximum allowed size of " + "%d bytes (the system PAGE_SIZE).\n", + __FUNCTION__, non_rounded_size, PAGE_SIZE); + } mp = vp->v_mount; @@ -1125,7 +1153,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * out a page */ if (mp->mnt_devblocksize > 1 && !(flags & (CL_DEV_MEMORY | CL_DIRECT_IO))) { - /* + /* * round the requested size up so that this I/O ends on a * page boundary in case this is a 'write'... if the filesystem * has blocks allocated to back the page beyond the EOF, we want to @@ -1137,16 +1165,16 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * only write/read from the disk up to the end of this allocation * via the extent info returned from the VNOP_BLOCKMAP call. */ - pg_offset = upl_offset & PAGE_MASK; + pg_offset = upl_offset & PAGE_MASK; size = (((non_rounded_size + pg_offset) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - pg_offset; } else { - /* + /* * anyone advertising a blocksize of 1 byte probably * can't deal with us rounding up the request size * AFP is one such filesystem/device */ - size = non_rounded_size; + size = non_rounded_size; } upl_end_offset = upl_offset + size; @@ -1157,17 +1185,18 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * buffers. */ max_trans_count = 8; - if (flags & CL_DEV_MEMORY) + if (flags & CL_DEV_MEMORY) { max_trans_count = 16; + } if (flags & CL_READ) { - io_flags = B_READ; + io_flags = B_READ; bmap_flags = VNODE_READ; max_iosize = mp->mnt_maxreadcnt; max_vectors = mp->mnt_segreadcnt; } else { - io_flags = B_WRITE; + io_flags = B_WRITE; bmap_flags = VNODE_WRITE; max_iosize = mp->mnt_maxwritecnt; @@ -1184,19 +1213,21 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no /* * Ensure the maximum iosize is sensible. */ - if (!max_iosize) + if (!max_iosize) { max_iosize = PAGE_SIZE; + } if (flags & CL_THROTTLE) { - if ( !(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) { - if (max_iosize > THROTTLE_MAX_IOSIZE) - max_iosize = THROTTLE_MAX_IOSIZE; + if (!(flags & CL_PAGEOUT) && cluster_is_throttled(vp)) { + if (max_iosize > THROTTLE_MAX_IOSIZE) { + max_iosize = THROTTLE_MAX_IOSIZE; + } async_throttle = THROTTLE_MAXCNT; } else { - if ( (flags & CL_DEV_MEMORY) ) - async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE); - else { - u_int max_cluster; + if ((flags & CL_DEV_MEMORY)) { + async_throttle = IO_SCALE(vp, VNODE_ASYNC_THROTTLE); + } else { + u_int max_cluster; u_int max_cluster_size; u_int scale; @@ -1207,55 +1238,68 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } else { max_cluster_size = MAX_CLUSTER_SIZE(vp); - if (disk_conditioner_mount_is_ssd(vp->v_mount)) + if (disk_conditioner_mount_is_ssd(vp->v_mount)) { scale = WRITE_THROTTLE_SSD; - else + } else { scale = WRITE_THROTTLE; + } + } + if (max_iosize > max_cluster_size) { + max_cluster = max_cluster_size; + } else { + max_cluster = max_iosize; } - if (max_iosize > max_cluster_size) - max_cluster = max_cluster_size; - else - max_cluster = max_iosize; - - if (size < max_cluster) - max_cluster = size; - - if (flags & CL_CLOSE) + + if (size < max_cluster) { + max_cluster = size; + } + + if (flags & CL_CLOSE) { scale += MAX_CLUSTERS; - - async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1); + } + + async_throttle = min(IO_SCALE(vp, VNODE_ASYNC_THROTTLE), ((scale * max_cluster_size) / max_cluster) - 1); } } } - if (flags & CL_AGE) - io_flags |= B_AGE; - if (flags & (CL_PAGEIN | CL_PAGEOUT)) + if (flags & CL_AGE) { + io_flags |= B_AGE; + } + if (flags & (CL_PAGEIN | CL_PAGEOUT)) { io_flags |= B_PAGEIO; - if (flags & (CL_IOSTREAMING)) + } + if (flags & (CL_IOSTREAMING)) { io_flags |= B_IOSTREAMING; - if (flags & CL_COMMIT) - io_flags |= B_COMMIT_UPL; - if (flags & CL_DIRECT_IO) - io_flags |= B_PHYS; - if (flags & (CL_PRESERVE | CL_KEEPCACHED)) + } + if (flags & CL_COMMIT) { + io_flags |= B_COMMIT_UPL; + } + if (flags & CL_DIRECT_IO) { + io_flags |= B_PHYS; + } + if (flags & (CL_PRESERVE | CL_KEEPCACHED)) { io_flags |= B_CACHE; - if (flags & CL_PASSIVE) - io_flags |= B_PASSIVE; - if (flags & CL_ENCRYPTED) - io_flags |= B_ENCRYPTED_IO; + } + if (flags & CL_PASSIVE) { + io_flags |= B_PASSIVE; + } + if (flags & CL_ENCRYPTED) { + io_flags |= B_ENCRYPTED_IO; + } - if (vp->v_flag & VSYSTEM) - io_flags |= B_META; + if (vp->v_flag & VSYSTEM) { + io_flags |= B_META; + } if ((flags & CL_READ) && ((upl_offset + non_rounded_size) & PAGE_MASK) && (!(flags & CL_NOZERO))) { - /* + /* * then we are going to end up * with a page that we can't complete (the file size wasn't a multiple * of PAGE_SIZE and we're trying to read to the end of the file * so we'll go ahead and zero out the portion of the page we can't * read in from the file */ - zero_offset = upl_offset + non_rounded_size; + zero_offset = upl_offset + non_rounded_size; } else if (!ISSET(flags, CL_READ) && ISSET(flags, CL_DIRECT_IO)) { assert(ISSET(flags, CL_COMMIT)); @@ -1268,7 +1312,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * write is in progress. */ ubc_create_upl_kernel(vp, f_offset, non_rounded_size, &cached_upl, - NULL, UPL_SET_LITE, VM_KERN_MEMORY_FILE); + NULL, UPL_SET_LITE, VM_KERN_MEMORY_FILE); /* * Attach this UPL to the other UPL so that we can find it @@ -1290,62 +1334,67 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no while (size) { daddr64_t blkno; daddr64_t lblkno; - u_int io_size_wanted; - size_t io_size_tmp; + u_int io_size_wanted; + size_t io_size_tmp; - if (size > max_iosize) - io_size = max_iosize; - else - io_size = size; + if (size > max_iosize) { + io_size = max_iosize; + } else { + io_size = size; + } io_size_wanted = io_size; io_size_tmp = (size_t)io_size; - - if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL))) + + if ((error = VNOP_BLOCKMAP(vp, f_offset, io_size, &blkno, &io_size_tmp, NULL, bmap_flags, NULL))) { break; + } - if (io_size_tmp > io_size_wanted) - io_size = io_size_wanted; - else - io_size = (u_int)io_size_tmp; + if (io_size_tmp > io_size_wanted) { + io_size = io_size_wanted; + } else { + io_size = (u_int)io_size_tmp; + } - if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) - real_bp->b_blkno = blkno; + if (real_bp && (real_bp->b_blkno == real_bp->b_lblkno)) { + real_bp->b_blkno = blkno; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 24)) | DBG_FUNC_NONE, - (int)f_offset, (int)(blkno>>32), (int)blkno, io_size, 0); + (int)f_offset, (int)(blkno >> 32), (int)blkno, io_size, 0); if (io_size == 0) { - /* + /* * vnop_blockmap didn't return an error... however, it did * return an extent size of 0 which means we can't * make forward progress on this I/O... a hole in the * file would be returned as a blkno of -1 with a non-zero io_size * a real extent is returned with a blkno != -1 and a non-zero io_size */ - error = EINVAL; + error = EINVAL; break; } - if ( !(flags & CL_READ) && blkno == -1) { - off_t e_offset; - int pageout_flags; + if (!(flags & CL_READ) && blkno == -1) { + off_t e_offset; + int pageout_flags; - if (upl_get_internal_vectorupl(upl)) + if (upl_get_internal_vectorupl(upl)) { panic("Vector UPLs should not take this code-path\n"); - /* + } + /* * we're writing into a 'hole' */ if (flags & CL_PAGEOUT) { - /* - * if we got here via cluster_pageout + /* + * if we got here via cluster_pageout * then just error the request and return * the 'hole' should already have been covered */ - error = EINVAL; + error = EINVAL; break; } /* - * we can get here if the cluster code happens to + * we can get here if the cluster code happens to * pick up a page that was dirtied via mmap vs * a 'write' and the page targets a 'hole'... * i.e. the writes to the cluster were sparse @@ -1372,10 +1421,12 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no */ pageout_flags = UPL_MSYNC | UPL_VNODE_PAGER | UPL_NESTED_PAGEOUT; - if ( !(flags & CL_ASYNC)) - pageout_flags |= UPL_IOSYNC; - if ( !(flags & CL_COMMIT)) - pageout_flags |= UPL_NOCOMMIT; + if (!(flags & CL_ASYNC)) { + pageout_flags |= UPL_IOSYNC; + } + if (!(flags & CL_COMMIT)) { + pageout_flags |= UPL_NOCOMMIT; + } if (cbp_head) { buf_t prev_cbp; @@ -1389,10 +1440,11 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no cluster_wait_IO(cbp_head, (flags & CL_ASYNC)); bytes_in_last_page = cbp_head->b_uploffset & PAGE_MASK; - for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) + for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next) { bytes_in_last_page += cbp->b_bcount; + } bytes_in_last_page &= PAGE_MASK; - + while (bytes_in_last_page) { /* * we've got a transcation that @@ -1403,8 +1455,9 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * * find the last bp in the list and act on it */ - for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next) + for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next) { prev_cbp = cbp; + } if (bytes_in_last_page >= cbp->b_bcount) { /* @@ -1444,13 +1497,13 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } } if (cbp_head) { - /* + /* * there was more to the current transaction * than just the page we are pushing out via vnode_pageout... * mark it as finished and complete it... we've already * waited for the I/Os to complete above in the call to cluster_wait_IO */ - cluster_EOT(cbp_head, cbp_tail, 0); + cluster_EOT(cbp_head, cbp_tail, 0); cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 0); @@ -1458,7 +1511,7 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } } if (vnode_pageout(vp, upl, trunc_page(upl_offset), trunc_page_64(f_offset), PAGE_SIZE, pageout_flags, NULL) != PAGER_SUCCESS) { - error = EINVAL; + error = EINVAL; } e_offset = round_page_64(f_offset + 1); io_size = e_offset - f_offset; @@ -1466,10 +1519,11 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no f_offset += io_size; upl_offset += io_size; - if (size >= io_size) - size -= io_size; - else - size = 0; + if (size >= io_size) { + size -= io_size; + } else { + size = 0; + } /* * keep track of how much of the original request * that we've actually completed... non_rounded_size @@ -1479,18 +1533,19 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no non_rounded_size -= io_size; if (non_rounded_size <= 0) { - /* + /* * we've transferred all of the data in the original * request, but we were unable to complete the tail * of the last page because the file didn't have * an allocation to back that portion... this is ok. */ - size = 0; + size = 0; } if (error) { - if (size == 0) + if (size == 0) { flags &= ~CL_COMMIT; - break; + } + break; } continue; } @@ -1507,74 +1562,78 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * treat physical requests as one 'giant' page */ pg_count = 1; - } else - pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE; + } else { + pg_count = (io_size + pg_offset + (PAGE_SIZE - 1)) / PAGE_SIZE; + } if ((flags & CL_READ) && blkno == -1) { vm_offset_t commit_offset; - int bytes_to_zero; + int bytes_to_zero; int complete_transaction_now = 0; - /* + /* * if we're reading and blkno == -1, then we've got a * 'hole' in the file that we need to deal with by zeroing * out the affected area in the upl */ if (io_size >= (u_int)non_rounded_size) { - /* + /* * if this upl contains the EOF and it is not a multiple of PAGE_SIZE * than 'zero_offset' will be non-zero * if the 'hole' returned by vnop_blockmap extends all the way to the eof * (indicated by the io_size finishing off the I/O request for this UPL) * than we're not going to issue an I/O for the * last page in this upl... we need to zero both the hole and the tail - * of the page beyond the EOF, since the delayed zero-fill won't kick in + * of the page beyond the EOF, since the delayed zero-fill won't kick in */ bytes_to_zero = non_rounded_size; - if (!(flags & CL_NOZERO)) + if (!(flags & CL_NOZERO)) { bytes_to_zero = (((upl_offset + io_size) + (PAGE_SIZE - 1)) & ~PAGE_MASK) - upl_offset; + } zero_offset = 0; - } else - bytes_to_zero = io_size; + } else { + bytes_to_zero = io_size; + } pg_count = 0; cluster_zero(upl, upl_offset, bytes_to_zero, real_bp); - + if (cbp_head) { - int pg_resid; + int pg_resid; - /* + /* * if there is a current I/O chain pending * then the first page of the group we just zero'd * will be handled by the I/O completion if the zero * fill started in the middle of the page */ - commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK; + commit_offset = (upl_offset + (PAGE_SIZE - 1)) & ~PAGE_MASK; pg_resid = commit_offset - upl_offset; - + if (bytes_to_zero >= pg_resid) { - /* - * the last page of the current I/O + /* + * the last page of the current I/O * has been completed... - * compute the number of fully zero'd + * compute the number of fully zero'd * pages that are beyond it * plus the last page if its partial * and we have no more I/O to issue... * otherwise a partial page is left * to begin the next I/O */ - if ((int)io_size >= non_rounded_size) - pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE; - else - pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE; - + if ((int)io_size >= non_rounded_size) { + pg_count = (bytes_to_zero - pg_resid + (PAGE_SIZE - 1)) / PAGE_SIZE; + } else { + pg_count = (bytes_to_zero - pg_resid) / PAGE_SIZE; + } + complete_transaction_now = 1; } } else { - /* + /* * no pending I/O to deal with * so, commit all of the fully zero'd pages * plus the last page if its partial @@ -1582,10 +1641,11 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * otherwise a partial page is left * to begin the next I/O */ - if ((int)io_size >= non_rounded_size) - pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE; - else - pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE; + if ((int)io_size >= non_rounded_size) { + pg_count = (pg_offset + bytes_to_zero + (PAGE_SIZE - 1)) / PAGE_SIZE; + } else { + pg_count = (pg_offset + bytes_to_zero) / PAGE_SIZE; + } commit_offset = upl_offset & ~PAGE_MASK; } @@ -1593,9 +1653,9 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no // Associated UPL is currently only used in the direct write path assert(!upl_associated_upl(upl)); - if ( (flags & CL_COMMIT) && pg_count) { - ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE, - UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY); + if ((flags & CL_COMMIT) && pg_count) { + ubc_upl_commit_range(upl, commit_offset, pg_count * PAGE_SIZE, + UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY); } upl_offset += io_size; f_offset += io_size; @@ -1610,16 +1670,16 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no non_rounded_size -= io_size; if (non_rounded_size <= 0) { - /* + /* * we've transferred all of the data in the original * request, but we were unable to complete the tail * of the last page because the file didn't have * an allocation to back that portion... this is ok. */ - size = 0; + size = 0; } - if (cbp_head && (complete_transaction_now || size == 0)) { - cluster_wait_IO(cbp_head, (flags & CL_ASYNC)); + if (cbp_head && (complete_transaction_now || size == 0)) { + cluster_wait_IO(cbp_head, (flags & CL_ASYNC)); cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0); @@ -1630,12 +1690,12 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no continue; } if (pg_count > max_vectors) { - if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) { - io_size = PAGE_SIZE - pg_offset; + if (((pg_count - max_vectors) * PAGE_SIZE) > io_size) { + io_size = PAGE_SIZE - pg_offset; pg_count = 1; } else { - io_size -= (pg_count - max_vectors) * PAGE_SIZE; - pg_count = max_vectors; + io_size -= (pg_count - max_vectors) * PAGE_SIZE; + pg_count = max_vectors; } } /* @@ -1644,11 +1704,11 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no * that the actual i/o is initiated after this buffer is * created and added to the i/o chain. * - * I/O directed to physically contiguous memory + * I/O directed to physically contiguous memory * doesn't have a requirement to make sure we 'fill' a page */ - if ( !(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count && - ((upl_offset + io_size) & PAGE_MASK)) { + if (!(flags & CL_DEV_MEMORY) && trans_count >= max_trans_count && + ((upl_offset + io_size) & PAGE_MASK)) { vm_offset_t aligned_ofs; aligned_ofs = (upl_offset + io_size) & ~PAGE_MASK; @@ -1668,81 +1728,87 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } } - if ( !(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) - /* + if (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV)) { + /* * if we're not targeting a virtual device i.e. a disk image * it's safe to dip into the reserve pool since real devices * can complete this I/O request without requiring additional * bufs from the alloc_io_buf pool */ priv = 1; - else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT)) - /* + } else if ((flags & CL_ASYNC) && !(flags & CL_PAGEOUT)) { + /* * Throttle the speculative IO */ priv = 0; - else + } else { priv = 1; + } cbp = alloc_io_buf(vp, priv); if (flags & CL_PAGEOUT) { - u_int i; + u_int i; /* * since blocks are in offsets of 0x1000, scale * iteration to (PAGE_SIZE * pg_count) of blks. */ - for (i = 0; i < (PAGE_SIZE * pg_count)/0x1000; i++) { - if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY) + for (i = 0; i < (PAGE_SIZE * pg_count) / 0x1000; i++) { + if (buf_invalblkno(vp, lblkno + i, 0) == EBUSY) { panic("BUSY bp found in cluster_io"); + } } } if (flags & CL_ASYNC) { - if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg)) - panic("buf_setcallback failed\n"); + if (buf_setcallback(cbp, (void *)cluster_iodone, callback_arg)) { + panic("buf_setcallback failed\n"); + } } cbp->b_cliodone = (void *)callback; cbp->b_flags |= io_flags; - if (flags & CL_NOCACHE) + if (flags & CL_NOCACHE) { cbp->b_attr.ba_flags |= BA_NOCACHE; + } cbp->b_lblkno = lblkno; cbp->b_blkno = blkno; cbp->b_bcount = io_size; - if (buf_setupl(cbp, upl, upl_offset)) - panic("buf_setupl failed\n"); + if (buf_setupl(cbp, upl, upl_offset)) { + panic("buf_setupl failed\n"); + } #if CONFIG_IOSCHED upl_set_blkno(upl, upl_offset, io_size, blkno); #endif cbp->b_trans_next = (buf_t)NULL; - if ((cbp->b_iostate = (void *)iostate)) - /* + if ((cbp->b_iostate = (void *)iostate)) { + /* * caller wants to track the state of this * io... bump the amount issued against this stream */ - iostate->io_issued += io_size; + iostate->io_issued += io_size; + } if (flags & CL_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 26)) | DBG_FUNC_NONE, - (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0); - } - else { + (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0); + } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 27)) | DBG_FUNC_NONE, - (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0); + (int)cbp->b_lblkno, (int)cbp->b_blkno, upl_offset, io_size, 0); } if (cbp_head) { - cbp_tail->b_trans_next = cbp; + cbp_tail->b_trans_next = cbp; cbp_tail = cbp; } else { - cbp_head = cbp; + cbp_head = cbp; cbp_tail = cbp; - if ( (cbp_head->b_real_bp = real_bp) ) + if ((cbp_head->b_real_bp = real_bp)) { real_bp = (buf_t)NULL; + } } *(buf_t *)(&cbp->b_trans_head) = cbp_head; @@ -1760,65 +1826,69 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no non_rounded_size -= io_size; if (non_rounded_size <= 0) { - /* + /* * we've transferred all of the data in the original * request, but we were unable to complete the tail * of the last page because the file didn't have * an allocation to back that portion... this is ok. */ - size = 0; + size = 0; } if (size == 0) { - /* + /* * we have no more I/O to issue, so go * finish the final transaction */ - need_EOT = TRUE; - } else if ( ((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) && - ((flags & CL_ASYNC) || trans_count > max_trans_count) ) { - /* + need_EOT = TRUE; + } else if (((flags & CL_DEV_MEMORY) || (upl_offset & PAGE_MASK) == 0) && + ((flags & CL_ASYNC) || trans_count > max_trans_count)) { + /* * I/O directed to physically contiguous memory... * which doesn't have a requirement to make sure we 'fill' a page - * or... + * or... * the current I/O we've prepared fully * completes the last page in this request * and ... - * it's either an ASYNC request or + * it's either an ASYNC request or * we've already accumulated more than 8 I/O's into * this transaction so mark it as complete so that * it can finish asynchronously or via the cluster_complete_transaction * below if the request is synchronous */ - need_EOT = TRUE; + need_EOT = TRUE; + } + if (need_EOT == TRUE) { + cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0); + } + + if (flags & CL_THROTTLE) { + (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io"); } - if (need_EOT == TRUE) - cluster_EOT(cbp_head, cbp_tail, size == 0 ? zero_offset : 0); - if (flags & CL_THROTTLE) - (void)vnode_waitforwrites(vp, async_throttle, 0, 0, "cluster_io"); + if (!(io_flags & B_READ)) { + vnode_startwrite(vp); + } - if ( !(io_flags & B_READ)) - vnode_startwrite(vp); - if (flags & CL_RAW_ENCRYPTED) { - /* + /* * User requested raw encrypted bytes. * Twiddle the bit in the ba_flags for the buffer */ cbp->b_attr.ba_flags |= BA_RAW_ENCRYPTED_IO; } - + (void) VNOP_STRATEGY(cbp); if (need_EOT == TRUE) { - if ( !(flags & CL_ASYNC)) - cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1); + if (!(flags & CL_ASYNC)) { + cluster_complete_transaction(&cbp_head, callback_arg, &retval, flags, 1); + } need_EOT = FALSE; trans_count = 0; cbp_head = NULL; } - } + } if (error) { int abort_size; @@ -1840,13 +1910,13 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no if (ISSET(flags, CL_COMMIT)) { cluster_handle_associated_upl(iostate, upl, upl_offset, - upl_end_offset - upl_offset); + upl_end_offset - upl_offset); } // Free all the IO buffers in this transaction for (cbp = cbp_head; cbp;) { - buf_t cbp_next; - + buf_t cbp_next; + size += cbp->b_bcount; io_size += cbp->b_bcount; @@ -1856,51 +1926,55 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } if (iostate) { - int need_wakeup = 0; + int need_wakeup = 0; - /* + /* * update the error condition for this stream * since we never really issued the io * just go ahead and adjust it back */ - lck_mtx_lock_spin(&iostate->io_mtxp); + lck_mtx_lock_spin(&iostate->io_mtxp); - if (iostate->io_error == 0) - iostate->io_error = error; + if (iostate->io_error == 0) { + iostate->io_error = error; + } iostate->io_issued -= io_size; if (iostate->io_wanted) { - /* + /* * someone is waiting for the state of * this io stream to change */ - iostate->io_wanted = 0; + iostate->io_wanted = 0; need_wakeup = 1; } - lck_mtx_unlock(&iostate->io_mtxp); + lck_mtx_unlock(&iostate->io_mtxp); - if (need_wakeup) - wakeup((caddr_t)&iostate->io_wanted); + if (need_wakeup) { + wakeup((caddr_t)&iostate->io_wanted); + } } if (flags & CL_COMMIT) { - int upl_flags; + int upl_flags; pg_offset = upl_offset & PAGE_MASK; abort_size = (upl_end_offset - upl_offset + PAGE_MASK) & ~PAGE_MASK; upl_flags = cluster_ioerror(upl, upl_offset - pg_offset, abort_size, error, io_flags, vp); - + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 28)) | DBG_FUNC_NONE, - upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0); + upl, upl_offset - pg_offset, abort_size, (error << 24) | upl_flags, 0); + } + if (retval == 0) { + retval = error; } - if (retval == 0) - retval = error; - } else if (cbp_head) - panic("%s(): cbp_head is not NULL.\n", __FUNCTION__); + } else if (cbp_head) { + panic("%s(): cbp_head is not NULL.\n", __FUNCTION__); + } if (real_bp) { - /* + /* * can get here if we either encountered an error * or we completely zero-filled the request and * no I/O was issued @@ -1913,26 +1987,26 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 22)) | DBG_FUNC_END, (int)f_offset, size, upl_offset, retval, 0); - return (retval); + return retval; } -#define reset_vector_run_state() \ - issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0; +#define reset_vector_run_state() \ + issueVectorUPL = vector_upl_offset = vector_upl_index = vector_upl_iosize = vector_upl_size = 0; static int vector_cluster_io(vnode_t vp, upl_t vector_upl, vm_offset_t vector_upl_offset, off_t v_upl_uio_offset, int vector_upl_iosize, - int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg) + int io_flag, buf_t real_bp, struct clios *iostate, int (*callback)(buf_t, void *), void *callback_arg) { vector_upl_set_pagelist(vector_upl); - if(io_flag & CL_READ) { - if(vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK)==0)) - io_flag &= ~CL_PRESERVE; /*don't zero fill*/ - else - io_flag |= CL_PRESERVE; /*zero fill*/ - } - return (cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg)); - + if (io_flag & CL_READ) { + if (vector_upl_offset == 0 && ((vector_upl_iosize & PAGE_MASK) == 0)) { + io_flag &= ~CL_PRESERVE; /*don't zero fill*/ + } else { + io_flag |= CL_PRESERVE; /*zero fill*/ + } + } + return cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, real_bp, iostate, callback, callback_arg); } static int @@ -1941,152 +2015,160 @@ cluster_read_prefetch(vnode_t vp, off_t f_offset, u_int size, off_t filesize, in int pages_in_prefetch; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_START, - (int)f_offset, size, (int)filesize, 0, 0); + (int)f_offset, size, (int)filesize, 0, 0); if (f_offset >= filesize) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, - (int)f_offset, 0, 0, 0, 0); - return(0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, + (int)f_offset, 0, 0, 0, 0); + return 0; + } + if ((off_t)size > (filesize - f_offset)) { + size = filesize - f_offset; } - if ((off_t)size > (filesize - f_offset)) - size = filesize - f_offset; pages_in_prefetch = (size + (PAGE_SIZE - 1)) / PAGE_SIZE; advisory_read_ext(vp, filesize, f_offset, size, callback, callback_arg, bflag); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 49)) | DBG_FUNC_END, - (int)f_offset + size, pages_in_prefetch, 0, 1, 0); + (int)f_offset + size, pages_in_prefetch, 0, 1, 0); - return (pages_in_prefetch); + return pages_in_prefetch; } static void cluster_read_ahead(vnode_t vp, struct cl_extent *extent, off_t filesize, struct cl_readahead *rap, int (*callback)(buf_t, void *), void *callback_arg, - int bflag) + int bflag) { - daddr64_t r_addr; - off_t f_offset; - int size_of_prefetch; - u_int max_prefetch; + daddr64_t r_addr; + off_t f_offset; + int size_of_prefetch; + u_int max_prefetch; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START, - (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0); + (int)extent->b_addr, (int)extent->e_addr, (int)rap->cl_lastr, 0, 0); if (extent->b_addr == rap->cl_lastr && extent->b_addr == extent->e_addr) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0); + rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 0, 0); return; } if (rap->cl_lastr == -1 || (extent->b_addr != rap->cl_lastr && extent->b_addr != (rap->cl_lastr + 1))) { - rap->cl_ralen = 0; + rap->cl_ralen = 0; rap->cl_maxra = 0; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0); + rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 1, 0); return; } max_prefetch = MAX_PREFETCH(vp, cluster_max_io_size(vp->v_mount, CL_READ), disk_conditioner_mount_is_ssd(vp->v_mount)); - if (max_prefetch > speculative_prefetch_max) + if (max_prefetch > speculative_prefetch_max) { max_prefetch = speculative_prefetch_max; + } if (max_prefetch <= PAGE_SIZE) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0); + rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 6, 0); return; } if (extent->e_addr < rap->cl_maxra && rap->cl_ralen >= 4) { - if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) { - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0); + if ((rap->cl_maxra - extent->e_addr) > (rap->cl_ralen / 4)) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0); return; } } r_addr = max(extent->e_addr, rap->cl_maxra) + 1; f_offset = (off_t)(r_addr * PAGE_SIZE_64); - size_of_prefetch = 0; + size_of_prefetch = 0; ubc_range_op(vp, f_offset, f_offset + PAGE_SIZE_64, UPL_ROP_PRESENT, &size_of_prefetch); if (size_of_prefetch) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, + rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 3, 0); return; } if (f_offset < filesize) { - daddr64_t read_size; + daddr64_t read_size; - rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1; + rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1; read_size = (extent->e_addr + 1) - extent->b_addr; if (read_size > rap->cl_ralen) { - if (read_size > max_prefetch / PAGE_SIZE) - rap->cl_ralen = max_prefetch / PAGE_SIZE; - else - rap->cl_ralen = read_size; + if (read_size > max_prefetch / PAGE_SIZE) { + rap->cl_ralen = max_prefetch / PAGE_SIZE; + } else { + rap->cl_ralen = read_size; + } } size_of_prefetch = cluster_read_prefetch(vp, f_offset, rap->cl_ralen * PAGE_SIZE, filesize, callback, callback_arg, bflag); - if (size_of_prefetch) - rap->cl_maxra = (r_addr + size_of_prefetch) - 1; + if (size_of_prefetch) { + rap->cl_maxra = (r_addr + size_of_prefetch) - 1; + } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END, - rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0); + rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 4, 0); } int cluster_pageout(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset, - int size, off_t filesize, int flags) + int size, off_t filesize, int flags) { - return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL); - + return cluster_pageout_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL); } int cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset, - int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg) + int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg) { int io_size; int rounded_size; - off_t max_size; + off_t max_size; int local_flags; local_flags = CL_PAGEOUT | CL_THROTTLE; - if ((flags & UPL_IOSYNC) == 0) + if ((flags & UPL_IOSYNC) == 0) { local_flags |= CL_ASYNC; - if ((flags & UPL_NOCOMMIT) == 0) + } + if ((flags & UPL_NOCOMMIT) == 0) { local_flags |= CL_COMMIT; - if ((flags & UPL_KEEPCACHED)) - local_flags |= CL_KEEPCACHED; - if (flags & UPL_PAGING_ENCRYPTED) + } + if ((flags & UPL_KEEPCACHED)) { + local_flags |= CL_KEEPCACHED; + } + if (flags & UPL_PAGING_ENCRYPTED) { local_flags |= CL_ENCRYPTED; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 52)) | DBG_FUNC_NONE, - (int)f_offset, size, (int)filesize, local_flags, 0); + (int)f_offset, size, (int)filesize, local_flags, 0); /* * If they didn't specify any I/O, then we are done... * we can't issue an abort because we don't know how * big the upl really is */ - if (size <= 0) - return (EINVAL); + if (size <= 0) { + return EINVAL; + } - if (vp->v_mount->mnt_flag & MNT_RDONLY) { - if (local_flags & CL_COMMIT) - ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); - return (EROFS); + if (vp->v_mount->mnt_flag & MNT_RDONLY) { + if (local_flags & CL_COMMIT) { + ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + } + return EROFS; } /* * can't page-in from a negative offset @@ -2095,63 +2177,71 @@ cluster_pageout_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offs * or the size requested isn't a multiple of PAGE_SIZE */ if (f_offset < 0 || f_offset >= filesize || - (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) { - if (local_flags & CL_COMMIT) + (f_offset & PAGE_MASK_64) || (size & PAGE_MASK)) { + if (local_flags & CL_COMMIT) { ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); - return (EINVAL); + } + return EINVAL; } max_size = filesize - f_offset; - if (size < max_size) - io_size = size; - else - io_size = max_size; + if (size < max_size) { + io_size = size; + } else { + io_size = max_size; + } rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; if (size > rounded_size) { - if (local_flags & CL_COMMIT) + if (local_flags & CL_COMMIT) { ubc_upl_abort_range(upl, upl_offset + rounded_size, size - rounded_size, - UPL_ABORT_FREE_ON_EMPTY); + UPL_ABORT_FREE_ON_EMPTY); + } } - return (cluster_io(vp, upl, upl_offset, f_offset, io_size, - local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg)); + return cluster_io(vp, upl, upl_offset, f_offset, io_size, + local_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); } int cluster_pagein(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset, - int size, off_t filesize, int flags) + int size, off_t filesize, int flags) { - return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL); + return cluster_pagein_ext(vp, upl, upl_offset, f_offset, size, filesize, flags, NULL, NULL); } int cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offset, - int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg) + int size, off_t filesize, int flags, int (*callback)(buf_t, void *), void *callback_arg) { u_int io_size; int rounded_size; - off_t max_size; + off_t max_size; int retval; int local_flags = 0; - if (upl == NULL || size < 0) - panic("cluster_pagein: NULL upl passed in"); + if (upl == NULL || size < 0) { + panic("cluster_pagein: NULL upl passed in"); + } - if ((flags & UPL_IOSYNC) == 0) - local_flags |= CL_ASYNC; - if ((flags & UPL_NOCOMMIT) == 0) + if ((flags & UPL_IOSYNC) == 0) { + local_flags |= CL_ASYNC; + } + if ((flags & UPL_NOCOMMIT) == 0) { local_flags |= CL_COMMIT; - if (flags & UPL_IOSTREAMING) + } + if (flags & UPL_IOSTREAMING) { local_flags |= CL_IOSTREAMING; - if (flags & UPL_PAGING_ENCRYPTED) + } + if (flags & UPL_PAGING_ENCRYPTED) { local_flags |= CL_ENCRYPTED; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 56)) | DBG_FUNC_NONE, - (int)f_offset, size, (int)filesize, local_flags, 0); + (int)f_offset, size, (int)filesize, local_flags, 0); /* * can't page-in from a negative offset @@ -2160,57 +2250,62 @@ cluster_pagein_ext(vnode_t vp, upl_t upl, upl_offset_t upl_offset, off_t f_offse * or the size requested isn't a multiple of PAGE_SIZE */ if (f_offset < 0 || f_offset >= filesize || - (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) { - if (local_flags & CL_COMMIT) - ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); - return (EINVAL); + (f_offset & PAGE_MASK_64) || (size & PAGE_MASK) || (upl_offset & PAGE_MASK)) { + if (local_flags & CL_COMMIT) { + ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } + return EINVAL; } max_size = filesize - f_offset; - if (size < max_size) - io_size = size; - else - io_size = max_size; + if (size < max_size) { + io_size = size; + } else { + io_size = max_size; + } rounded_size = (io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; - if (size > rounded_size && (local_flags & CL_COMMIT)) + if (size > rounded_size && (local_flags & CL_COMMIT)) { ubc_upl_abort_range(upl, upl_offset + rounded_size, - size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); - + size - rounded_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } + retval = cluster_io(vp, upl, upl_offset, f_offset, io_size, - local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + local_flags | CL_READ | CL_PAGEIN, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); - return (retval); + return retval; } int cluster_bp(buf_t bp) { - return cluster_bp_ext(bp, NULL, NULL); + return cluster_bp_ext(bp, NULL, NULL); } int cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg) { - off_t f_offset; + off_t f_offset; int flags; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 19)) | DBG_FUNC_START, - bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0); + bp, (int)bp->b_lblkno, bp->b_bcount, bp->b_flags, 0); - if (bp->b_flags & B_READ) - flags = CL_ASYNC | CL_READ; - else - flags = CL_ASYNC; - if (bp->b_flags & B_PASSIVE) + if (bp->b_flags & B_READ) { + flags = CL_ASYNC | CL_READ; + } else { + flags = CL_ASYNC; + } + if (bp->b_flags & B_PASSIVE) { flags |= CL_PASSIVE; + } f_offset = ubc_blktooff(bp->b_vp, bp->b_lblkno); - return (cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg)); + return cluster_io(bp->b_vp, bp->b_upl, 0, f_offset, bp->b_bcount, flags, bp, (struct clios *)NULL, callback, callback_arg); } @@ -2218,70 +2313,71 @@ cluster_bp_ext(buf_t bp, int (*callback)(buf_t, void *), void *callback_arg) int cluster_write(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, int xflags) { - return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL); + return cluster_write_ext(vp, uio, oldEOF, newEOF, headOff, tailOff, xflags, NULL, NULL); } int cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t headOff, off_t tailOff, - int xflags, int (*callback)(buf_t, void *), void *callback_arg) + int xflags, int (*callback)(buf_t, void *), void *callback_arg) { - user_ssize_t cur_resid; - int retval = 0; - int flags; - int zflags; + user_ssize_t cur_resid; + int retval = 0; + int flags; + int zflags; int bflag; - int write_type = IO_COPY; - u_int32_t write_length; + int write_type = IO_COPY; + u_int32_t write_length; flags = xflags; - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { bflag = CL_PASSIVE; - else + } else { bflag = 0; + } - if (vp->v_flag & VNOCACHE_DATA){ - flags |= IO_NOCACHE; + if (vp->v_flag & VNOCACHE_DATA) { + flags |= IO_NOCACHE; bflag |= CL_NOCACHE; } - if (uio == NULL) { - /* + if (uio == NULL) { + /* * no user data... * this call is being made to zero-fill some range in the file */ - retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg); - - return(retval); - } - /* - * do a write through the cache if one of the following is true.... - * NOCACHE is not true or NODIRECT is true - * the uio request doesn't target USERSPACE - * otherwise, find out if we want the direct or contig variant for - * the first vector in the uio request - */ - if ( ((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ) - retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE); - - if ( (flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT) - /* + retval = cluster_write_copy(vp, NULL, (u_int32_t)0, oldEOF, newEOF, headOff, tailOff, flags, callback, callback_arg); + + return retval; + } + /* + * do a write through the cache if one of the following is true.... + * NOCACHE is not true or NODIRECT is true + * the uio request doesn't target USERSPACE + * otherwise, find out if we want the direct or contig variant for + * the first vector in the uio request + */ + if (((flags & (IO_NOCACHE | IO_NODIRECT)) == IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { + retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE); + } + + if ((flags & (IO_TAILZEROFILL | IO_HEADZEROFILL)) && write_type == IO_DIRECT) { + /* * must go through the cached variant in this case */ - write_type = IO_COPY; + write_type = IO_COPY; + } while ((cur_resid = uio_resid(uio)) && uio->uio_offset < newEOF && retval == 0) { - - switch (write_type) { - + switch (write_type) { case IO_COPY: - /* + /* * make sure the uio_resid isn't too big... * internally, we want to handle all of the I/O in * chunk sizes that fit in a 32 bit int */ - if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) { - /* + if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) { + /* * we're going to have to call cluster_write_copy * more than once... * @@ -2289,46 +2385,47 @@ cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t * have the IO_TAILZEROFILL flag set and only the * first call should have IO_HEADZEROFILL */ - zflags = flags & ~IO_TAILZEROFILL; + zflags = flags & ~IO_TAILZEROFILL; flags &= ~IO_HEADZEROFILL; write_length = MAX_IO_REQUEST_SIZE; } else { - /* + /* * last call to cluster_write_copy */ - zflags = flags; - + zflags = flags; + write_length = (u_int32_t)cur_resid; } retval = cluster_write_copy(vp, uio, write_length, oldEOF, newEOF, headOff, tailOff, zflags, callback, callback_arg); break; case IO_CONTIG: - zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL); + zflags = flags & ~(IO_TAILZEROFILL | IO_HEADZEROFILL); if (flags & IO_HEADZEROFILL) { - /* + /* * only do this once per request */ - flags &= ~IO_HEADZEROFILL; + flags &= ~IO_HEADZEROFILL; retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, uio->uio_offset, - headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg); - if (retval) - break; + headOff, (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg); + if (retval) { + break; + } } retval = cluster_write_contig(vp, uio, newEOF, &write_type, &write_length, callback, callback_arg, bflag); if (retval == 0 && (flags & IO_TAILZEROFILL) && uio_resid(uio) == 0) { - /* + /* * we're done with the data from the user specified buffer(s) * and we've been requested to zero fill at the tail * treat this as an IO_HEADZEROFILL which doesn't require a uio * by rearranging the args and passing in IO_HEADZEROFILL */ - retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset, - (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg); + retval = cluster_write_copy(vp, (struct uio *)0, (u_int32_t)0, (off_t)0, tailOff, uio->uio_offset, + (off_t)0, zflags | IO_HEADZEROFILL | IO_SYNC, callback, callback_arg); } break; @@ -2340,7 +2437,7 @@ cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t break; case IO_UNKNOWN: - retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE); + retval = cluster_io_type(uio, &write_type, &write_length, MIN_DIRECT_WRITE_SIZE); break; } /* @@ -2350,53 +2447,54 @@ cluster_write_ext(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, off_t * don't zero-fill the head of a page if we've successfully written * data to that area... 'cluster_write_copy' will zero-fill the head of a * page that is beyond the oldEOF if the write is unaligned... we only - * want that to happen for the very first page of the cluster_write, + * want that to happen for the very first page of the cluster_write, * NOT the first page of each vector making up a multi-vector write. */ - if (uio->uio_offset > oldEOF) + if (uio->uio_offset > oldEOF) { oldEOF = uio->uio_offset; + } } - return (retval); + return retval; } static int cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, int *write_type, u_int32_t *write_length, - int flags, int (*callback)(buf_t, void *), void *callback_arg) + int flags, int (*callback)(buf_t, void *), void *callback_arg) { upl_t upl; upl_page_info_t *pl; vm_offset_t upl_offset; - vm_offset_t vector_upl_offset = 0; - u_int32_t io_req_size; - u_int32_t offset_in_file; - u_int32_t offset_in_iovbase; + vm_offset_t vector_upl_offset = 0; + u_int32_t io_req_size; + u_int32_t offset_in_file; + u_int32_t offset_in_iovbase; u_int32_t io_size; int io_flag = 0; - upl_size_t upl_size, vector_upl_size = 0; - vm_size_t upl_needed_size; - mach_msg_type_number_t pages_in_pl; + upl_size_t upl_size, vector_upl_size = 0; + vm_size_t upl_needed_size; + mach_msg_type_number_t pages_in_pl; upl_control_flags_t upl_flags; kern_return_t kret; - mach_msg_type_number_t i; + mach_msg_type_number_t i; int force_data_sync; int retval = 0; - int first_IO = 1; + int first_IO = 1; struct clios iostate; - user_addr_t iov_base; - u_int32_t mem_alignment_mask; - u_int32_t devblocksize; - u_int32_t max_io_size; - u_int32_t max_upl_size; + user_addr_t iov_base; + u_int32_t mem_alignment_mask; + u_int32_t devblocksize; + u_int32_t max_io_size; + u_int32_t max_upl_size; u_int32_t max_vector_size; - u_int32_t bytes_outstanding_limit; - boolean_t io_throttled = FALSE; + u_int32_t bytes_outstanding_limit; + boolean_t io_throttled = FALSE; - u_int32_t vector_upl_iosize = 0; - int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1); - off_t v_upl_uio_offset = 0; - int vector_upl_index=0; - upl_t vector_upl = NULL; + u_int32_t vector_upl_iosize = 0; + int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1); + off_t v_upl_uio_offset = 0; + int vector_upl_index = 0; + upl_t vector_upl = NULL; /* @@ -2404,20 +2502,23 @@ cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, in * -- the resid will not exceed iov_len */ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_START, - (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0); + (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0); max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE); io_flag = CL_ASYNC | CL_PRESERVE | CL_COMMIT | CL_THROTTLE | CL_DIRECT_IO; - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { io_flag |= CL_PASSIVE; - - if (flags & IO_NOCACHE) - io_flag |= CL_NOCACHE; - - if (flags & IO_SKIP_ENCRYPTION) + } + + if (flags & IO_NOCACHE) { + io_flag |= CL_NOCACHE; + } + + if (flags & IO_SKIP_ENCRYPTION) { io_flag |= CL_ENCRYPTED; + } iostate.io_completed = 0; iostate.io_issued = 0; @@ -2430,17 +2531,17 @@ cluster_write_direct(vnode_t vp, struct uio *uio, off_t oldEOF, off_t newEOF, in devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize; if (devblocksize == 1) { - /* - * the AFP client advertises a devblocksize of 1 - * however, its BLOCKMAP routine maps to physical - * blocks that are PAGE_SIZE in size... - * therefore we can't ask for I/Os that aren't page aligned - * or aren't multiples of PAGE_SIZE in size - * by setting devblocksize to PAGE_SIZE, we re-instate - * the old behavior we had before the mem_alignment_mask - * changes went in... - */ - devblocksize = PAGE_SIZE; + /* + * the AFP client advertises a devblocksize of 1 + * however, its BLOCKMAP routine maps to physical + * blocks that are PAGE_SIZE in size... + * therefore we can't ask for I/Os that aren't page aligned + * or aren't multiples of PAGE_SIZE in size + * by setting devblocksize to PAGE_SIZE, we re-instate + * the old behavior we had before the mem_alignment_mask + * changes went in... + */ + devblocksize = PAGE_SIZE; } next_dwrite: @@ -2451,32 +2552,32 @@ next_dwrite: offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask; if (offset_in_file || offset_in_iovbase) { - /* + /* * one of the 2 important offsets is misaligned * so fire an I/O through the cache for this entire vector */ - goto wait_for_dwrites; + goto wait_for_dwrites; } if (iov_base & (devblocksize - 1)) { - /* + /* * the offset in memory must be on a device block boundary * so that we can guarantee that we can generate an * I/O that ends on a page boundary in cluster_io */ - goto wait_for_dwrites; - } + goto wait_for_dwrites; + } task_update_logical_writes(current_task(), (io_req_size & ~PAGE_MASK), TASK_WRITE_IMMEDIATE, vp); while (io_req_size >= PAGE_SIZE && uio->uio_offset < newEOF && retval == 0) { - int throttle_type; + int throttle_type; - if ( (throttle_type = cluster_is_throttled(vp)) ) { + if ((throttle_type = cluster_is_throttled(vp))) { /* * we're in the throttle window, at the very least * we want to limit the size of the I/O we're about * to issue */ - if ( (flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) { + if ((flags & IO_RETURN_ON_THROTTLE) && throttle_type == THROTTLE_NOW) { /* * we're in the throttle window and at least 1 I/O * has already been issued by a throttleable thread @@ -2496,59 +2597,60 @@ next_dwrite: max_io_size = max_upl_size; } - if (first_IO) { - cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0); + if (first_IO) { + cluster_syncup(vp, newEOF, callback, callback_arg, callback ? PUSH_SYNC : 0); first_IO = 0; } - io_size = io_req_size & ~PAGE_MASK; + io_size = io_req_size & ~PAGE_MASK; iov_base = uio_curriovbase(uio); - if (io_size > max_io_size) - io_size = max_io_size; + if (io_size > max_io_size) { + io_size = max_io_size; + } - if(useVectorUPL && (iov_base & PAGE_MASK)) { + if (useVectorUPL && (iov_base & PAGE_MASK)) { /* * We have an iov_base that's not page-aligned. - * Issue all I/O's that have been collected within + * Issue all I/O's that have been collected within * this Vectored UPL. */ - if(vector_upl_index) { + if (vector_upl_index) { retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); reset_vector_run_state(); } - - /* - * After this point, if we are using the Vector UPL path and the base is - * not page-aligned then the UPL with that base will be the first in the vector UPL. - */ + + /* + * After this point, if we are using the Vector UPL path and the base is + * not page-aligned then the UPL with that base will be the first in the vector UPL. + */ } upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK); - upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK; + upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_START, - (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0); + (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0); vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) { - pages_in_pl = 0; + pages_in_pl = 0; upl_size = upl_needed_size; upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; + UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; kret = vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), - &upl_size, - &upl, - NULL, - &pages_in_pl, - &upl_flags, - VM_KERN_MEMORY_FILE, - force_data_sync); + (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + &upl_size, + &upl, + NULL, + &pages_in_pl, + &upl_flags, + VM_KERN_MEMORY_FILE, + force_data_sync); if (kret != KERN_SUCCESS) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, - 0, 0, 0, kret, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, + 0, 0, 0, kret, 0); /* * failed to get pagelist * @@ -2562,11 +2664,13 @@ next_dwrite: pages_in_pl = upl_size / PAGE_SIZE; for (i = 0; i < pages_in_pl; i++) { - if (!upl_valid_page(pl, i)) - break; + if (!upl_valid_page(pl, i)) { + break; + } + } + if (i == pages_in_pl) { + break; } - if (i == pages_in_pl) - break; /* * didn't get all the pages back that we @@ -2575,8 +2679,8 @@ next_dwrite: ubc_upl_abort(upl, 0); } if (force_data_sync >= 3) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, - i, pages_in_pl, upl_size, kret, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, + i, pages_in_pl, upl_size, kret, 0); /* * for some reason, we couldn't acquire a hold on all * the pages needed in the user's address space @@ -2592,16 +2696,17 @@ next_dwrite: * Consider the possibility that upl_size wasn't satisfied. */ if (upl_size < upl_needed_size) { - if (upl_size && upl_offset == 0) - io_size = upl_size; - else - io_size = 0; + if (upl_size && upl_offset == 0) { + io_size = upl_size; + } else { + io_size = 0; + } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 76)) | DBG_FUNC_END, - (int)upl_offset, upl_size, (int)iov_base, io_size, 0); + (int)upl_offset, upl_size, (int)iov_base, io_size, 0); if (io_size == 0) { - ubc_upl_abort(upl, 0); + ubc_upl_abort(upl, 0); /* * we may have already spun some portion of this request * off as async requests... we need to wait for the I/O @@ -2609,11 +2714,12 @@ next_dwrite: */ goto wait_for_dwrites; } - - if(useVectorUPL) { + + if (useVectorUPL) { vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK); - if(end_off) + if (end_off) { issueVectorUPL = 1; + } /* * After this point, if we are using a vector UPL, then * either all the UPL elements end on a page boundary OR @@ -2628,51 +2734,51 @@ next_dwrite: * if there are already too many outstanding writes * wait until some complete before issuing the next */ - if (vp->v_mount->mnt_minsaturationbytecount) + if (vp->v_mount->mnt_minsaturationbytecount) { bytes_outstanding_limit = vp->v_mount->mnt_minsaturationbytecount; - else + } else { bytes_outstanding_limit = max_upl_size * IO_SCALE(vp, 2); + } cluster_iostate_wait(&iostate, bytes_outstanding_limit, "cluster_write_direct"); if (iostate.io_error) { - /* + /* * one of the earlier writes we issued ran into a hard error * don't issue any more writes, cleanup the UPL * that was just created but not used, then * go wait for all writes that are part of this stream * to complete before returning the error to the caller */ - ubc_upl_abort(upl, 0); + ubc_upl_abort(upl, 0); - goto wait_for_dwrites; - } + goto wait_for_dwrites; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_START, - (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0); + (int)upl_offset, (int)uio->uio_offset, io_size, io_flag, 0); - if(!useVectorUPL) + if (!useVectorUPL) { retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, - io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); - - else { - if(!vector_upl_index) { + io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); + } else { + if (!vector_upl_index) { vector_upl = vector_upl_create(upl_offset); v_upl_uio_offset = uio->uio_offset; vector_upl_offset = upl_offset; } - vector_upl_set_subupl(vector_upl,upl,upl_size); + vector_upl_set_subupl(vector_upl, upl, upl_size); vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size); vector_upl_index++; vector_upl_iosize += io_size; vector_upl_size += upl_size; - if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) { + if (issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) { retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); reset_vector_run_state(); } - } + } /* * update the uio structure to @@ -2686,37 +2792,35 @@ next_dwrite: * don't zero-fill the head of a page if we've successfully written * data to that area... 'cluster_write_copy' will zero-fill the head of a * page that is beyond the oldEOF if the write is unaligned... we only - * want that to happen for the very first page of the cluster_write, + * want that to happen for the very first page of the cluster_write, * NOT the first page of each vector making up a multi-vector write. */ - if (uio->uio_offset > oldEOF) + if (uio->uio_offset > oldEOF) { oldEOF = uio->uio_offset; + } io_req_size -= io_size; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 77)) | DBG_FUNC_END, - (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0); - + (int)upl_offset, (int)uio->uio_offset, io_req_size, retval, 0); } /* end while */ - if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) { - - retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE); + if (retval == 0 && iostate.io_error == 0 && io_req_size == 0) { + retval = cluster_io_type(uio, write_type, write_length, MIN_DIRECT_WRITE_SIZE); if (retval == 0 && *write_type == IO_DIRECT) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE, + (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_NONE, - (int)uio->uio_offset, *write_length, (int)newEOF, 0, 0); - - goto next_dwrite; + goto next_dwrite; } - } + } wait_for_dwrites: if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) { retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); - reset_vector_run_state(); + reset_vector_run_state(); } /* * make sure all async writes issued as part of this stream @@ -2724,60 +2828,63 @@ wait_for_dwrites: */ cluster_iostate_wait(&iostate, 0, "cluster_write_direct"); - if (iostate.io_error) - retval = iostate.io_error; + if (iostate.io_error) { + retval = iostate.io_error; + } lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); - if (io_throttled == TRUE && retval == 0) + if (io_throttled == TRUE && retval == 0) { retval = EAGAIN; + } if (io_req_size && retval == 0) { - /* + /* * we couldn't handle the tail of this request in DIRECT mode * so fire it through the copy path * * note that flags will never have IO_HEADZEROFILL or IO_TAILZEROFILL set * so we can just pass 0 in for the headOff and tailOff */ - if (uio->uio_offset > oldEOF) + if (uio->uio_offset > oldEOF) { oldEOF = uio->uio_offset; + } - retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg); + retval = cluster_write_copy(vp, uio, io_req_size, oldEOF, newEOF, (off_t)0, (off_t)0, flags, callback, callback_arg); *write_type = IO_UNKNOWN; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 75)) | DBG_FUNC_END, - (int)uio->uio_offset, io_req_size, retval, 4, 0); + (int)uio->uio_offset, io_req_size, retval, 4, 0); - return (retval); + return retval; } static int cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, u_int32_t *write_length, - int (*callback)(buf_t, void *), void *callback_arg, int bflag) + int (*callback)(buf_t, void *), void *callback_arg, int bflag) { upl_page_info_t *pl; - addr64_t src_paddr = 0; - upl_t upl[MAX_VECTS]; + addr64_t src_paddr = 0; + upl_t upl[MAX_VECTS]; vm_offset_t upl_offset; u_int32_t tail_size = 0; - u_int32_t io_size; - u_int32_t xsize; - upl_size_t upl_size; - vm_size_t upl_needed_size; - mach_msg_type_number_t pages_in_pl; + u_int32_t io_size; + u_int32_t xsize; + upl_size_t upl_size; + vm_size_t upl_needed_size; + mach_msg_type_number_t pages_in_pl; upl_control_flags_t upl_flags; kern_return_t kret; - struct clios iostate; + struct clios iostate; int error = 0; - int cur_upl = 0; - int num_upl = 0; - int n; - user_addr_t iov_base; - u_int32_t devblocksize; - u_int32_t mem_alignment_mask; + int cur_upl = 0; + int num_upl = 0; + int n; + user_addr_t iov_base; + u_int32_t devblocksize; + u_int32_t mem_alignment_mask; /* * When we enter this routine, we know @@ -2789,10 +2896,10 @@ cluster_write_contig(vnode_t vp, struct uio *uio, off_t newEOF, int *write_type, devblocksize = (u_int32_t)vp->v_mount->mnt_devblocksize; mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask; - iostate.io_completed = 0; - iostate.io_issued = 0; - iostate.io_error = 0; - iostate.io_wanted = 0; + iostate.io_completed = 0; + iostate.io_issued = 0; + iostate.io_error = 0; + iostate.io_wanted = 0; lck_mtx_init(&iostate.io_mtxp, cl_mtx_grp, cl_mtx_attr); @@ -2806,19 +2913,19 @@ next_cwrite: pages_in_pl = 0; upl_size = upl_needed_size; - upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; + upl_flags = UPL_FILE_IO | UPL_COPYOUT_FROM | UPL_NO_SYNC | + UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; kret = vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), - &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0); + (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0); if (kret != KERN_SUCCESS) { - /* + /* * failed to get pagelist */ - error = EINVAL; + error = EINVAL; goto wait_for_cwrites; } num_upl++; @@ -2838,17 +2945,19 @@ next_cwrite: src_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset; while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) { - u_int32_t head_size; + u_int32_t head_size; head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1)); - if (head_size > io_size) - head_size = io_size; + if (head_size > io_size) { + head_size = io_size; + } error = cluster_align_phys_io(vp, uio, src_paddr, head_size, 0, callback, callback_arg); - if (error) - goto wait_for_cwrites; + if (error) { + goto wait_for_cwrites; + } upl_offset += head_size; src_paddr += head_size; @@ -2857,14 +2966,14 @@ next_cwrite: iov_base += head_size; } if ((u_int32_t)iov_base & mem_alignment_mask) { - /* + /* * request doesn't set up on a memory boundary * the underlying DMA engine can handle... * return an error instead of going through * the slow copy path since the intent of this * path is direct I/O from device memory */ - error = EINVAL; + error = EINVAL; goto wait_for_cwrites; } @@ -2872,11 +2981,11 @@ next_cwrite: io_size -= tail_size; while (io_size && error == 0) { - - if (io_size > MAX_IO_CONTIG_SIZE) - xsize = MAX_IO_CONTIG_SIZE; - else - xsize = io_size; + if (io_size > MAX_IO_CONTIG_SIZE) { + xsize = MAX_IO_CONTIG_SIZE; + } else { + xsize = io_size; + } /* * request asynchronously so that we can overlap * the preparation of the next I/O... we'll do @@ -2887,67 +2996,70 @@ next_cwrite: */ cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_write_contig"); - if (iostate.io_error) { - /* - * one of the earlier writes we issued ran into a hard error - * don't issue any more writes... - * go wait for all writes that are part of this stream - * to complete before returning the error to the caller - */ - goto wait_for_cwrites; + if (iostate.io_error) { + /* + * one of the earlier writes we issued ran into a hard error + * don't issue any more writes... + * go wait for all writes that are part of this stream + * to complete before returning the error to the caller + */ + goto wait_for_cwrites; } - /* + /* * issue an asynchronous write to cluster_io */ - error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, - xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg); + error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, + xsize, CL_DEV_MEMORY | CL_ASYNC | bflag, (buf_t)NULL, (struct clios *)&iostate, callback, callback_arg); if (error == 0) { - /* + /* * The cluster_io write completed successfully, * update the uio structure */ - uio_update(uio, (user_size_t)xsize); + uio_update(uio, (user_size_t)xsize); upl_offset += xsize; src_paddr += xsize; io_size -= xsize; } } - if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) { - - error = cluster_io_type(uio, write_type, write_length, 0); + if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) { + error = cluster_io_type(uio, write_type, write_length, 0); if (error == 0 && *write_type == IO_CONTIG) { - cur_upl++; - goto next_cwrite; + cur_upl++; + goto next_cwrite; } - } else - *write_type = IO_UNKNOWN; + } else { + *write_type = IO_UNKNOWN; + } wait_for_cwrites: /* - * make sure all async writes that are part of this stream - * have completed before we proceed - */ + * make sure all async writes that are part of this stream + * have completed before we proceed + */ cluster_iostate_wait(&iostate, 0, "cluster_write_contig"); - if (iostate.io_error) - error = iostate.io_error; + if (iostate.io_error) { + error = iostate.io_error; + } lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); - if (error == 0 && tail_size) - error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg); + if (error == 0 && tail_size) { + error = cluster_align_phys_io(vp, uio, src_paddr, tail_size, 0, callback, callback_arg); + } - for (n = 0; n < num_upl; n++) - /* + for (n = 0; n < num_upl; n++) { + /* * just release our hold on each physically contiguous * region without changing any state */ - ubc_upl_abort(upl[n], 0); + ubc_upl_abort(upl[n], 0); + } - return (error); + return error; } @@ -2967,9 +3079,8 @@ cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off int zero_pg_index; boolean_t need_cluster_zero = TRUE; - if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) { - - bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64)); + if ((flags & (IO_NOZEROVALID | IO_NOZERODIRTY))) { + bytes_to_zero = min(bytes_to_zero, PAGE_SIZE - (int)(zero_off & PAGE_MASK_64)); zero_pg_index = (int)((zero_off - upl_f_offset) / PAGE_SIZE_64); if (upl_valid_page(pl, zero_pg_index)) { @@ -2978,12 +3089,13 @@ cluster_zero_range(upl_t upl, upl_page_info_t *pl, int flags, int io_offset, off * we'll leave these in the UPL for cluster_write_copy to deal with */ need_cluster_zero = FALSE; - } + } } - if (need_cluster_zero == TRUE) + if (need_cluster_zero == TRUE) { cluster_zero(upl, io_offset, bytes_to_zero, NULL); + } - return (bytes_to_zero); + return bytes_to_zero; } @@ -3001,19 +3113,19 @@ cluster_update_state(vnode_t vp, vm_object_offset_t s_offset, vm_object_offset_t cl.e_addr = (daddr64_t)(e_offset / PAGE_SIZE_64); cluster_update_state_internal(vp, &cl, 0, TRUE, &first_pass, s_offset, (int)(e_offset - s_offset), - vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated); + vp->v_un.vu_ubcinfo->ui_size, NULL, NULL, vm_initiated); } static void cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boolean_t defer_writes, - boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF, - int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) + boolean_t *first_pass, off_t write_off, int write_cnt, off_t newEOF, + int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) { struct cl_writebehind *wbp; - int cl_index; - int ret_cluster_try_push; - u_int max_cluster_pgcount; + int cl_index; + int ret_cluster_try_push; + u_int max_cluster_pgcount; max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE; @@ -3025,13 +3137,12 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole wbp = cluster_get_wbp(vp, CLW_ALLOCATE | CLW_RETURNLOCKED); if (wbp->cl_scmap) { - - if ( !(flags & IO_NOCACHE)) { - /* + if (!(flags & IO_NOCACHE)) { + /* * we've fallen into the sparse * cluster method of delaying dirty pages */ - sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated); + sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated); lck_mtx_unlock(&wbp->cl_lockw); return; @@ -3056,20 +3167,22 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole goto start_new_cluster; } if (*first_pass == TRUE) { - if (write_off == wbp->cl_last_write) + if (write_off == wbp->cl_last_write) { wbp->cl_seq_written += write_cnt; - else + } else { wbp->cl_seq_written = write_cnt; + } wbp->cl_last_write = write_off + write_cnt; *first_pass = FALSE; } - if (wbp->cl_number == 0) + if (wbp->cl_number == 0) { /* * no clusters currently present */ goto start_new_cluster; + } for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) { /* @@ -3088,11 +3201,12 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole * we have a write that fits entirely * within the existing cluster limits */ - if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) + if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr) { /* * update our idea of where the cluster ends */ wbp->cl_clusters[cl_index].e_addr = cl->e_addr; + } break; } if (cl->b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) { @@ -3115,7 +3229,7 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole * beyond the limit of the existing cluster or we have a leftover * tail after a partial absorbtion * - * in either case, we'll check the remaining clusters before + * in either case, we'll check the remaining clusters before * starting a new one */ } else { @@ -3126,7 +3240,7 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole /* * we can just merge the new request into * this cluster and leave it in the cache - * since the resulting cluster is still + * since the resulting cluster is still * less than the maximum allowable size */ wbp->cl_clusters[cl_index].b_addr = cl->b_addr; @@ -3150,7 +3264,7 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole * the cluster we're currently considering... in fact, we'll * stretch the cluster out to it's full limit and see if we * get an intersection with the current write - * + * */ if (cl->e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) { /* @@ -3164,41 +3278,46 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole } /* * if we get here, there was no way to merge - * any portion of this write with this cluster - * or we could only merge part of it which + * any portion of this write with this cluster + * or we could only merge part of it which * will leave a tail... * we'll check the remaining clusters before starting a new one */ } } - if (cl_index < wbp->cl_number) + if (cl_index < wbp->cl_number) { /* * we found an existing cluster(s) that we * could entirely merge this I/O into */ goto delay_io; + } if (defer_writes == FALSE && wbp->cl_number == MAX_CLUSTERS && wbp->cl_seq_written >= (MAX_CLUSTERS * (max_cluster_pgcount * PAGE_SIZE))) { - uint32_t n; + uint32_t n; if (vp->v_mount->mnt_minsaturationbytecount) { n = vp->v_mount->mnt_minsaturationbytecount / MAX_CLUSTER_SIZE(vp); - - if (n > MAX_CLUSTERS) + + if (n > MAX_CLUSTERS) { n = MAX_CLUSTERS; - } else + } + } else { n = 0; + } if (n == 0) { - if (disk_conditioner_mount_is_ssd(vp->v_mount)) + if (disk_conditioner_mount_is_ssd(vp->v_mount)) { n = WRITE_BEHIND_SSD; - else + } else { n = WRITE_BEHIND; + } + } + while (n--) { + cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated); } - while (n--) - cluster_try_push(wbp, vp, newEOF, 0, 0, callback, callback_arg, NULL, vm_initiated); } if (wbp->cl_number < MAX_CLUSTERS) { /* @@ -3210,7 +3329,7 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole } /* * no exisitng cluster to merge with and no - * room to start a new one... we'll try + * room to start a new one... we'll try * pushing one of the existing ones... if none of * them are able to be pushed, we'll switch * to the sparse cluster mechanism @@ -3224,8 +3343,7 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole * if writes are not deferred, call cluster push immediately */ if (defer_writes == FALSE) { - - ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated); + ret_cluster_try_push = cluster_try_push(wbp, vp, newEOF, (flags & IO_NOCACHE) ? 0 : PUSH_DELAY, 0, callback, callback_arg, NULL, vm_initiated); } /* * execute following regardless of writes being deferred or not @@ -3236,9 +3354,9 @@ cluster_update_state_internal(vnode_t vp, struct cl_extent *cl, int flags, boole * so let's switch to the more expansive but expensive * sparse mechanism.... */ - sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated); + sparse_cluster_switch(wbp, vp, newEOF, callback, callback_arg, vm_initiated); sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, cl, newEOF, callback, callback_arg, vm_initiated); - + lck_mtx_unlock(&wbp->cl_lockw); return; } @@ -3248,11 +3366,13 @@ start_new_cluster: wbp->cl_clusters[wbp->cl_number].io_flags = 0; - if (flags & IO_NOCACHE) + if (flags & IO_NOCACHE) { wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IONOCACHE; + } - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { wbp->cl_clusters[wbp->cl_number].io_flags |= CLW_IOPASSIVE; + } wbp->cl_number++; delay_io: @@ -3263,15 +3383,15 @@ delay_io: static int cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t oldEOF, off_t newEOF, off_t headOff, - off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg) + off_t tailOff, int flags, int (*callback)(buf_t, void *), void *callback_arg) { upl_page_info_t *pl; upl_t upl; vm_offset_t upl_offset = 0; - vm_size_t upl_size; - off_t upl_f_offset; + vm_size_t upl_size; + off_t upl_f_offset; int pages_in_upl; - int start_offset; + int start_offset; int xfer_resid; int io_size; int io_offset; @@ -3285,33 +3405,36 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old off_t zero_off; long long zero_cnt1; off_t zero_off1; - off_t write_off = 0; - int write_cnt = 0; - boolean_t first_pass = FALSE; + off_t write_off = 0; + int write_cnt = 0; + boolean_t first_pass = FALSE; struct cl_extent cl; int bflag; - u_int max_io_size; + u_int max_io_size; if (uio) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, - (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, + (int)uio->uio_offset, io_req_size, (int)oldEOF, (int)newEOF, 0); - io_resid = io_req_size; + io_resid = io_req_size; } else { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, - 0, 0, (int)oldEOF, (int)newEOF, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START, + 0, 0, (int)oldEOF, (int)newEOF, 0); - io_resid = 0; + io_resid = 0; } - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { bflag = CL_PASSIVE; - else + } else { bflag = 0; - if (flags & IO_NOCACHE) + } + if (flags & IO_NOCACHE) { bflag |= CL_NOCACHE; - - if (flags & IO_SKIP_ENCRYPTION) + } + + if (flags & IO_SKIP_ENCRYPTION) { bflag |= CL_ENCRYPTED; + } zero_cnt = 0; zero_cnt1 = 0; @@ -3321,20 +3444,20 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE); if (flags & IO_HEADZEROFILL) { - /* + /* * some filesystems (HFS is one) don't support unallocated holes within a file... * so we zero fill the intervening space between the old EOF and the offset * where the next chunk of real data begins.... ftruncate will also use this * routine to zero fill to the new EOF when growing a file... in this case, the * uio structure will not be provided */ - if (uio) { - if (headOff < uio->uio_offset) { - zero_cnt = uio->uio_offset - headOff; + if (uio) { + if (headOff < uio->uio_offset) { + zero_cnt = uio->uio_offset - headOff; zero_off = headOff; } - } else if (headOff < newEOF) { - zero_cnt = newEOF - headOff; + } else if (headOff < newEOF) { + zero_cnt = newEOF - headOff; zero_off = headOff; } } else { @@ -3349,15 +3472,16 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old } } if (flags & IO_TAILZEROFILL) { - if (uio) { - zero_off1 = uio->uio_offset + io_req_size; + if (uio) { + zero_off1 = uio->uio_offset + io_req_size; - if (zero_off1 < tailOff) - zero_cnt1 = tailOff - zero_off1; - } + if (zero_off1 < tailOff) { + zero_cnt1 = tailOff - zero_off1; + } + } } else { if (uio && newEOF > oldEOF) { - zero_off1 = uio->uio_offset + io_req_size; + zero_off1 = uio->uio_offset + io_req_size; if (zero_off1 == newEOF && (zero_off1 & PAGE_MASK_64)) { zero_cnt1 = PAGE_SIZE_64 - (zero_off1 & PAGE_MASK_64); @@ -3367,9 +3491,9 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old } } if (zero_cnt == 0 && uio == (struct uio *) 0) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, - retval, 0, 0, 0, 0); - return (0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); + return 0; } if (uio) { write_off = uio->uio_offset; @@ -3382,40 +3506,43 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old first_pass = TRUE; } while ((total_size = (io_resid + zero_cnt + zero_cnt1)) && retval == 0) { - /* + /* * for this iteration of the loop, figure out where our starting point is */ - if (zero_cnt) { - start_offset = (int)(zero_off & PAGE_MASK_64); + if (zero_cnt) { + start_offset = (int)(zero_off & PAGE_MASK_64); upl_f_offset = zero_off - start_offset; } else if (io_resid) { - start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); upl_f_offset = uio->uio_offset - start_offset; } else { - start_offset = (int)(zero_off1 & PAGE_MASK_64); + start_offset = (int)(zero_off1 & PAGE_MASK_64); upl_f_offset = zero_off1 - start_offset; } - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE, - (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE, + (int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0); - if (total_size > max_io_size) - total_size = max_io_size; + if (total_size > max_io_size) { + total_size = max_io_size; + } cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64); - + if (uio && ((flags & (IO_SYNC | IO_HEADZEROFILL | IO_TAILZEROFILL)) == 0)) { - /* + /* * assumption... total_size <= io_resid * because IO_HEADZEROFILL and IO_TAILZEROFILL not set */ - if ((start_offset + total_size) > max_io_size) - total_size = max_io_size - start_offset; - xfer_resid = total_size; + if ((start_offset + total_size) > max_io_size) { + total_size = max_io_size - start_offset; + } + xfer_resid = total_size; - retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1); + retval = cluster_copy_ubc_data_internal(vp, uio, &xfer_resid, 1, 1); - if (retval) - break; + if (retval) { + break; + } io_resid -= (total_size - xfer_resid); total_size = xfer_resid; @@ -3423,8 +3550,8 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old upl_f_offset = uio->uio_offset - start_offset; if (total_size == 0) { - if (start_offset) { - /* + if (start_offset) { + /* * the write did not finish on a page boundary * which will leave upl_f_offset pointing to the * beginning of the last page written instead of @@ -3432,11 +3559,11 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * so that the cluster code records the last page * written as dirty */ - upl_f_offset += PAGE_SIZE_64; + upl_f_offset += PAGE_SIZE_64; } - upl_size = 0; - - goto check_cluster; + upl_size = 0; + + goto check_cluster; } } /* @@ -3444,22 +3571,24 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * the requested write... limit each call to cluster_io * to the maximum UPL size... cluster_io will clip if * this exceeds the maximum io_size for the device, - * make sure to account for + * make sure to account for * a starting offset that's not page aligned */ upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; - if (upl_size > max_io_size) - upl_size = max_io_size; + if (upl_size > max_io_size) { + upl_size = max_io_size; + } pages_in_upl = upl_size / PAGE_SIZE; io_size = upl_size - start_offset; - - if ((long long)io_size > total_size) - io_size = total_size; + + if ((long long)io_size > total_size) { + io_size = total_size; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, io_size, total_size, 0, 0); - + /* * Gather the pages from the buffer cache. @@ -3467,17 +3596,18 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * that we intend to modify these pages. */ kret = ubc_create_upl_kernel(vp, - upl_f_offset, - upl_size, - &upl, - &pl, - UPL_SET_LITE | (( uio!=NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY), - VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) + upl_f_offset, + upl_size, + &upl, + &pl, + UPL_SET_LITE | ((uio != NULL && (uio->uio_flags & UIO_FLAGS_IS_COMPRESSED_FILE)) ? 0 : UPL_WILL_MODIFY), + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { panic("cluster_write_copy: failed to get pagelist"); + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, - upl, (int)upl_f_offset, start_offset, 0, 0); + upl, (int)upl_f_offset, start_offset, 0, 0); if (start_offset && upl_f_offset < oldEOF && !upl_valid_page(pl, 0)) { int read_size; @@ -3489,11 +3619,12 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old */ read_size = PAGE_SIZE; - if ((upl_f_offset + read_size) > oldEOF) - read_size = oldEOF - upl_f_offset; + if ((upl_f_offset + read_size) > oldEOF) { + read_size = oldEOF - upl_f_offset; + } - retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, - CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + retval = cluster_io(vp, upl, 0, upl_f_offset, read_size, + CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); if (retval) { /* * we had an error during the read which causes us to abort @@ -3501,35 +3632,37 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * to release the rest of the pages in the upl without modifying * there state and mark the failed page in error */ - ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); - if (upl_size > PAGE_SIZE) - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + if (upl_size > PAGE_SIZE) { + ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, - upl, 0, 0, retval, 0); + upl, 0, 0, retval, 0); break; } } if ((start_offset == 0 || upl_size > PAGE_SIZE) && ((start_offset + io_size) & PAGE_MASK)) { - /* + /* * the last offset we're writing to in this upl does not end on a page * boundary... if it's not beyond the old EOF, then we'll also need to * pre-read this page in if it isn't already valid */ - upl_offset = upl_size - PAGE_SIZE; + upl_offset = upl_size - PAGE_SIZE; - if ((upl_f_offset + start_offset + io_size) < oldEOF && + if ((upl_f_offset + start_offset + io_size) < oldEOF && !upl_valid_page(pl, upl_offset / PAGE_SIZE)) { - int read_size; + int read_size; read_size = PAGE_SIZE; - if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) - read_size = oldEOF - (upl_f_offset + upl_offset); + if ((off_t)(upl_f_offset + upl_offset + read_size) > oldEOF) { + read_size = oldEOF - (upl_f_offset + upl_offset); + } - retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, - CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, read_size, + CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); if (retval) { /* * we had an error during the read which causes us to abort @@ -3537,13 +3670,14 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * need to release the rest of the pages in the upl without * modifying there state and mark the failed page in error */ - ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES|UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, upl_offset, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); - if (upl_size > PAGE_SIZE) - ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + if (upl_size > PAGE_SIZE) { + ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, - upl, 0, 0, retval, 0); + upl, 0, 0, retval, 0); break; } } @@ -3552,11 +3686,11 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old io_offset = start_offset; while (zero_cnt && xfer_resid) { - - if (zero_cnt < (long long)xfer_resid) - bytes_to_zero = zero_cnt; - else - bytes_to_zero = xfer_resid; + if (zero_cnt < (long long)xfer_resid) { + bytes_to_zero = zero_cnt; + } else { + bytes_to_zero = xfer_resid; + } bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off, upl_f_offset, bytes_to_zero); @@ -3566,7 +3700,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old io_offset += bytes_to_zero; } if (xfer_resid && io_resid) { - u_int32_t io_requested; + u_int32_t io_requested; bytes_to_move = min(io_resid, xfer_resid); io_requested = bytes_to_move; @@ -3577,19 +3711,19 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 45)) | DBG_FUNC_NONE, - upl, 0, 0, retval, 0); + upl, 0, 0, retval, 0); } else { - io_resid -= bytes_to_move; + io_resid -= bytes_to_move; xfer_resid -= bytes_to_move; io_offset += bytes_to_move; } } while (xfer_resid && zero_cnt1 && retval == 0) { - - if (zero_cnt1 < (long long)xfer_resid) - bytes_to_zero = zero_cnt1; - else - bytes_to_zero = xfer_resid; + if (zero_cnt1 < (long long)xfer_resid) { + bytes_to_zero = zero_cnt1; + } else { + bytes_to_zero = xfer_resid; + } bytes_to_zero = cluster_zero_range(upl, pl, flags, io_offset, zero_off1, upl_f_offset, bytes_to_zero); @@ -3600,7 +3734,7 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old } if (retval == 0) { int do_zeroing = 1; - + io_size += start_offset; /* Force more restrictive zeroing behavior only on APFS */ @@ -3609,19 +3743,18 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old } if (do_zeroing && (upl_f_offset + io_size) >= newEOF && (u_int)io_size < upl_size) { - /* * if we're extending the file with this write * we'll zero fill the rest of the page so that * if the file gets extended again in such a way as to leave a * hole starting at this EOF, we'll have zero's in the correct spot */ - cluster_zero(upl, io_size, upl_size - io_size, NULL); + cluster_zero(upl, io_size, upl_size - io_size, NULL); } /* * release the upl now if we hold one since... * 1) pages in it may be present in the sparse cluster map - * and may span 2 separate buckets there... if they do and + * and may span 2 separate buckets there... if they do and * we happen to have to flush a bucket to make room and it intersects * this upl, a deadlock may result on page BUSY * 2) we're delaying the I/O... from this point forward we're just updating @@ -3637,10 +3770,10 @@ cluster_write_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t old * we hold since the flushing context is holding the cluster lock. */ ubc_upl_commit_range(upl, 0, upl_size, - UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); + UPL_COMMIT_SET_DIRTY | UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY); check_cluster: /* - * calculate the last logical block number + * calculate the last logical block number * that this delayed I/O encompassed */ cl.e_addr = (daddr64_t)((upl_f_offset + (off_t)upl_size) / PAGE_SIZE_64); @@ -3658,21 +3791,22 @@ check_cluster: * cluster_push_now to wait until all the I/Os have completed... cluster_push_now is also * responsible for generating the correct sized I/O(s) */ - retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE); + retval = cluster_push_now(vp, &cl, newEOF, flags, callback, callback_arg, FALSE); } else { boolean_t defer_writes = FALSE; - if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) + if (vfs_flags(vp->v_mount) & MNT_DEFWRITE) { defer_writes = TRUE; + } cluster_update_state_internal(vp, &cl, flags, defer_writes, &first_pass, - write_off, write_cnt, newEOF, callback, callback_arg, FALSE); + write_off, write_cnt, newEOF, callback, callback_arg, FALSE); } } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_END, retval, 0, io_resid, 0, 0); - return (retval); + return retval; } @@ -3680,29 +3814,32 @@ check_cluster: int cluster_read(vnode_t vp, struct uio *uio, off_t filesize, int xflags) { - return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL); + return cluster_read_ext(vp, uio, filesize, xflags, NULL, NULL); } int cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (*callback)(buf_t, void *), void *callback_arg) { - int retval = 0; - int flags; - user_ssize_t cur_resid; - u_int32_t io_size; - u_int32_t read_length = 0; - int read_type = IO_COPY; + int retval = 0; + int flags; + user_ssize_t cur_resid; + u_int32_t io_size; + u_int32_t read_length = 0; + int read_type = IO_COPY; flags = xflags; - if (vp->v_flag & VNOCACHE_DATA) - flags |= IO_NOCACHE; - if ((vp->v_flag & VRAOFF) || speculative_reads_disabled) - flags |= IO_RAOFF; + if (vp->v_flag & VNOCACHE_DATA) { + flags |= IO_NOCACHE; + } + if ((vp->v_flag & VRAOFF) || speculative_reads_disabled) { + flags |= IO_RAOFF; + } - if (flags & IO_SKIP_ENCRYPTION) + if (flags & IO_SKIP_ENCRYPTION) { flags |= IO_ENCRYPTED; + } /* * do a read through the cache if one of the following is true.... @@ -3715,43 +3852,41 @@ cluster_read_ext(vnode_t vp, struct uio *uio, off_t filesize, int xflags, int (* * otherwise, find out if we want the direct or contig variant for * the first vector in the uio request */ - if ( ((flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) || (flags & IO_ENCRYPTED) ) { - + if (((flags & IO_NOCACHE) && UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) || (flags & IO_ENCRYPTED)) { retval = cluster_io_type(uio, &read_type, &read_length, 0); } while ((cur_resid = uio_resid(uio)) && uio->uio_offset < filesize && retval == 0) { - switch (read_type) { - case IO_COPY: - /* + /* * make sure the uio_resid isn't too big... * internally, we want to handle all of the I/O in * chunk sizes that fit in a 32 bit int */ - if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) - io_size = MAX_IO_REQUEST_SIZE; - else - io_size = (u_int32_t)cur_resid; + if (cur_resid > (user_ssize_t)(MAX_IO_REQUEST_SIZE)) { + io_size = MAX_IO_REQUEST_SIZE; + } else { + io_size = (u_int32_t)cur_resid; + } retval = cluster_read_copy(vp, uio, io_size, filesize, flags, callback, callback_arg); break; case IO_DIRECT: - retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg); + retval = cluster_read_direct(vp, uio, filesize, &read_type, &read_length, flags, callback, callback_arg); break; case IO_CONTIG: - retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags); + retval = cluster_read_contig(vp, uio, filesize, &read_type, &read_length, callback, callback_arg, flags); break; - + case IO_UNKNOWN: - retval = cluster_io_type(uio, &read_type, &read_length, 0); + retval = cluster_io_type(uio, &read_type, &read_length, 0); break; } } - return (retval); + return retval; } @@ -3763,8 +3898,9 @@ cluster_read_upl_release(upl_t upl, int start_pg, int last_pg, int take_referenc int abort_flags = UPL_ABORT_FREE_ON_EMPTY; if ((range = last_pg - start_pg)) { - if (take_reference) + if (take_reference) { abort_flags |= UPL_ABORT_REFERENCE; + } ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, range * PAGE_SIZE, abort_flags); } @@ -3777,11 +3913,11 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file upl_page_info_t *pl; upl_t upl; vm_offset_t upl_offset; - u_int32_t upl_size; - off_t upl_f_offset; - int start_offset; - int start_pg; - int last_pg; + u_int32_t upl_size; + off_t upl_f_offset; + int start_offset; + int start_pg; + int last_pg; int uio_last = 0; int pages_in_upl; off_t max_size; @@ -3798,36 +3934,40 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file u_int32_t max_prefetch; u_int rd_ahead_enabled = 1; u_int prefetch_enabled = 1; - struct cl_readahead * rap; - struct clios iostate; - struct cl_extent extent; + struct cl_readahead * rap; + struct clios iostate; + struct cl_extent extent; int bflag; - int take_reference = 1; - int policy = IOPOL_DEFAULT; - boolean_t iolock_inited = FALSE; + int take_reference = 1; + int policy = IOPOL_DEFAULT; + boolean_t iolock_inited = FALSE; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START, - (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0); - + (int)uio->uio_offset, io_req_size, (int)filesize, flags, 0); + if (flags & IO_ENCRYPTED) { - panic ("encrypted blocks will hit UBC!"); + panic("encrypted blocks will hit UBC!"); } - + policy = throttle_get_io_policy(NULL); - if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE)) + if (policy == THROTTLE_LEVEL_TIER3 || policy == THROTTLE_LEVEL_TIER2 || (flags & IO_NOCACHE)) { take_reference = 0; + } - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { bflag = CL_PASSIVE; - else + } else { bflag = 0; + } - if (flags & IO_NOCACHE) + if (flags & IO_NOCACHE) { bflag |= CL_NOCACHE; + } - if (flags & IO_SKIP_ENCRYPTION) + if (flags & IO_SKIP_ENCRYPTION) { bflag |= CL_ENCRYPTED; + } max_io_size = cluster_max_io_size(vp->v_mount, CL_READ); max_prefetch = MAX_PREFETCH(vp, max_io_size, disk_conditioner_mount_is_ssd(vp->v_mount)); @@ -3835,61 +3975,63 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file last_request_offset = uio->uio_offset + io_req_size; - if (last_request_offset > filesize) - last_request_offset = filesize; + if (last_request_offset > filesize) { + last_request_offset = filesize; + } - if ((flags & (IO_RAOFF|IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) { - rd_ahead_enabled = 0; + if ((flags & (IO_RAOFF | IO_NOCACHE)) || ((last_request_offset & ~PAGE_MASK_64) == (uio->uio_offset & ~PAGE_MASK_64))) { + rd_ahead_enabled = 0; rap = NULL; } else { - if (cluster_is_throttled(vp)) { + if (cluster_is_throttled(vp)) { /* * we're in the throttle window, at the very least * we want to limit the size of the I/O we're about * to issue */ - rd_ahead_enabled = 0; + rd_ahead_enabled = 0; prefetch_enabled = 0; max_rd_size = THROTTLE_MAX_IOSIZE; } - if ((rap = cluster_get_rap(vp)) == NULL) - rd_ahead_enabled = 0; - else { + if ((rap = cluster_get_rap(vp)) == NULL) { + rd_ahead_enabled = 0; + } else { extent.b_addr = uio->uio_offset / PAGE_SIZE_64; extent.e_addr = (last_request_offset - 1) / PAGE_SIZE_64; } } if (rap != NULL && rap->cl_ralen && (rap->cl_lastr == extent.b_addr || (rap->cl_lastr + 1) == extent.b_addr)) { - /* + /* * determine if we already have a read-ahead in the pipe courtesy of the * last read systemcall that was issued... * if so, pick up it's extent to determine where we should start - * with respect to any read-ahead that might be necessary to + * with respect to any read-ahead that might be necessary to * garner all the data needed to complete this read systemcall */ - last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64; + last_ioread_offset = (rap->cl_maxra * PAGE_SIZE_64) + PAGE_SIZE_64; - if (last_ioread_offset < uio->uio_offset) - last_ioread_offset = (off_t)0; - else if (last_ioread_offset > last_request_offset) - last_ioread_offset = last_request_offset; - } else - last_ioread_offset = (off_t)0; + if (last_ioread_offset < uio->uio_offset) { + last_ioread_offset = (off_t)0; + } else if (last_ioread_offset > last_request_offset) { + last_ioread_offset = last_request_offset; + } + } else { + last_ioread_offset = (off_t)0; + } while (io_req_size && uio->uio_offset < filesize && retval == 0) { - max_size = filesize - uio->uio_offset; - if ((off_t)(io_req_size) < max_size) - io_size = io_req_size; - else - io_size = max_size; + if ((off_t)(io_req_size) < max_size) { + io_size = io_req_size; + } else { + io_size = max_size; + } if (!(flags & IO_NOCACHE)) { - - while (io_size) { - u_int32_t io_resid; + while (io_size) { + u_int32_t io_resid; u_int32_t io_requested; /* @@ -3900,69 +4042,75 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * to continue to miss in the cache and it's to our advantage to try and prefetch */ if (last_request_offset && last_ioread_offset && (size_of_prefetch = (last_request_offset - last_ioread_offset))) { - if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) { - /* + if ((last_ioread_offset - uio->uio_offset) <= max_rd_size && prefetch_enabled) { + /* * we've already issued I/O for this request and * there's still work to do and * our prefetch stream is running dry, so issue a * pre-fetch I/O... the I/O latency will overlap * with the copying of the data */ - if (size_of_prefetch > max_rd_size) - size_of_prefetch = max_rd_size; + if (size_of_prefetch > max_rd_size) { + size_of_prefetch = max_rd_size; + } - size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag); + size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag); last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE); - - if (last_ioread_offset > last_request_offset) - last_ioread_offset = last_request_offset; + + if (last_ioread_offset > last_request_offset) { + last_ioread_offset = last_request_offset; + } } } /* - * limit the size of the copy we're about to do so that - * we can notice that our I/O pipe is running dry and + * limit the size of the copy we're about to do so that + * we can notice that our I/O pipe is running dry and * get the next I/O issued before it does go dry */ - if (last_ioread_offset && io_size > (max_io_size / 4)) - io_resid = (max_io_size / 4); - else - io_resid = io_size; + if (last_ioread_offset && io_size > (max_io_size / 4)) { + io_resid = (max_io_size / 4); + } else { + io_resid = io_size; + } io_requested = io_resid; - retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference); + retval = cluster_copy_ubc_data_internal(vp, uio, (int *)&io_resid, 0, take_reference); xsize = io_requested - io_resid; io_size -= xsize; io_req_size -= xsize; - if (retval || io_resid) - /* + if (retval || io_resid) { + /* * if we run into a real error or * a page that is not in the cache * we need to leave streaming mode */ - break; - + break; + } + if (rd_ahead_enabled && (io_size == 0 || last_ioread_offset == last_request_offset)) { - /* + /* * we're already finished the I/O for this read request * let's see if we should do a read-ahead */ - cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag); + cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag); } } - if (retval) - break; + if (retval) { + break; + } if (io_size == 0) { if (rap != NULL) { - if (extent.e_addr < rap->cl_lastr) - rap->cl_maxra = 0; + if (extent.e_addr < rap->cl_lastr) { + rap->cl_maxra = 0; + } rap->cl_lastr = extent.e_addr; } - break; + break; } /* * recompute max_size since cluster_copy_ubc_data_internal @@ -3976,9 +4124,9 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file iostate.io_error = 0; iostate.io_wanted = 0; - if ( (flags & IO_RETURN_ON_THROTTLE) ) { + if ((flags & IO_RETURN_ON_THROTTLE)) { if (cluster_is_throttled(vp) == THROTTLE_NOW) { - if ( !cluster_io_present_in_BC(vp, uio->uio_offset)) { + if (!cluster_io_present_in_BC(vp, uio->uio_offset)) { /* * we're in the throttle window and at least 1 I/O * has already been issued by a throttleable thread @@ -3999,46 +4147,50 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * the requested read... limit each call to cluster_io * to the maximum UPL size... cluster_io will clip if * this exceeds the maximum io_size for the device, - * make sure to account for + * make sure to account for * a starting offset that's not page aligned */ start_offset = (int)(uio->uio_offset & PAGE_MASK_64); upl_f_offset = uio->uio_offset - (off_t)start_offset; - if (io_size > max_rd_size) - io_size = max_rd_size; + if (io_size > max_rd_size) { + io_size = max_rd_size; + } upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; if (flags & IO_NOCACHE) { - if (upl_size > max_io_size) - upl_size = max_io_size; + if (upl_size > max_io_size) { + upl_size = max_io_size; + } } else { - if (upl_size > max_io_size / 4) { - upl_size = max_io_size / 4; + if (upl_size > max_io_size / 4) { + upl_size = max_io_size / 4; upl_size &= ~PAGE_MASK; - - if (upl_size == 0) + + if (upl_size == 0) { upl_size = PAGE_SIZE; + } } } pages_in_upl = upl_size / PAGE_SIZE; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_START, - upl, (int)upl_f_offset, upl_size, start_offset, 0); + upl, (int)upl_f_offset, upl_size, start_offset, 0); kret = ubc_create_upl_kernel(vp, - upl_f_offset, - upl_size, - &upl, - &pl, - UPL_FILE_IO | UPL_SET_LITE, - VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) + upl_f_offset, + upl_size, + &upl, + &pl, + UPL_FILE_IO | UPL_SET_LITE, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { panic("cluster_read_copy: failed to get pagelist"); + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 33)) | DBG_FUNC_END, - upl, (int)upl_f_offset, upl_size, start_offset, 0); + upl, (int)upl_f_offset, upl_size, start_offset, 0); /* * scan from the beginning of the upl looking for the first @@ -4047,23 +4199,25 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * of the pages are valid, we won't call through to 'cluster_io' */ for (start_pg = 0; start_pg < pages_in_upl; start_pg++) { - if (!upl_valid_page(pl, start_pg)) + if (!upl_valid_page(pl, start_pg)) { break; + } } /* * scan from the starting invalid page looking for a valid - * page before the end of the upl is reached, if we + * page before the end of the upl is reached, if we * find one, then it will be the last page of the request to * 'cluster_io' */ for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { - if (upl_valid_page(pl, last_pg)) + if (upl_valid_page(pl, last_pg)) { break; + } } - if (start_pg < last_pg) { - /* + if (start_pg < last_pg) { + /* * we found a range of 'invalid' pages that must be filled * if the last page in this range is the last page of the file * we may have to clip the size of it to keep from reading past @@ -4077,30 +4231,31 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file upl_offset = start_pg * PAGE_SIZE; io_size = (last_pg - start_pg) * PAGE_SIZE; - if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) - io_size = filesize - (upl_f_offset + upl_offset); + if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) { + io_size = filesize - (upl_f_offset + upl_offset); + } /* * issue an asynchronous read to cluster_io */ error = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, - io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg); + io_size, CL_READ | CL_ASYNC | bflag, (buf_t)NULL, &iostate, callback, callback_arg); if (rap) { - if (extent.e_addr < rap->cl_maxra) { - /* - * we've just issued a read for a block that should have been - * in the cache courtesy of the read-ahead engine... something - * has gone wrong with the pipeline, so reset the read-ahead - * logic which will cause us to restart from scratch - */ - rap->cl_maxra = 0; - } - } + if (extent.e_addr < rap->cl_maxra) { + /* + * we've just issued a read for a block that should have been + * in the cache courtesy of the read-ahead engine... something + * has gone wrong with the pipeline, so reset the read-ahead + * logic which will cause us to restart from scratch + */ + rap->cl_maxra = 0; + } + } } if (error == 0) { - /* + /* * if the read completed successfully, or there was no I/O request * issued, than copy the data into user land via 'cluster_upl_copy_data' * we'll first add on any 'valid' @@ -4108,19 +4263,20 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file */ u_int val_size; - for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { - if (!upl_valid_page(pl, uio_last)) - break; + for (uio_last = last_pg; uio_last < pages_in_upl; uio_last++) { + if (!upl_valid_page(pl, uio_last)) { + break; + } } if (uio_last < pages_in_upl) { - /* + /* * there were some invalid pages beyond the valid pages * that we didn't issue an I/O for, just release them * unchanged now, so that any prefetch/readahed can * include them */ - ubc_upl_abort_range(upl, uio_last * PAGE_SIZE, - (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, uio_last * PAGE_SIZE, + (pages_in_upl - uio_last) * PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); } /* @@ -4129,109 +4285,116 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * set up for another I/O. */ val_size = (uio_last * PAGE_SIZE) - start_offset; - - if (val_size > max_size) - val_size = max_size; - if (val_size > io_req_size) - val_size = io_req_size; + if (val_size > max_size) { + val_size = max_size; + } + + if (val_size > io_req_size) { + val_size = io_req_size; + } - if ((uio->uio_offset + val_size) > last_ioread_offset) - last_ioread_offset = uio->uio_offset + val_size; + if ((uio->uio_offset + val_size) > last_ioread_offset) { + last_ioread_offset = uio->uio_offset + val_size; + } if ((size_of_prefetch = (last_request_offset - last_ioread_offset)) && prefetch_enabled) { - - if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) { - /* + if ((last_ioread_offset - (uio->uio_offset + val_size)) <= upl_size) { + /* * if there's still I/O left to do for this request, and... * we're not in hard throttle mode, and... * we're close to using up the previous prefetch, then issue a * new pre-fetch I/O... the I/O latency will overlap * with the copying of the data */ - if (size_of_prefetch > max_rd_size) - size_of_prefetch = max_rd_size; + if (size_of_prefetch > max_rd_size) { + size_of_prefetch = max_rd_size; + } size_of_prefetch = cluster_read_prefetch(vp, last_ioread_offset, size_of_prefetch, filesize, callback, callback_arg, bflag); last_ioread_offset += (off_t)(size_of_prefetch * PAGE_SIZE); - - if (last_ioread_offset > last_request_offset) - last_ioread_offset = last_request_offset; - } + if (last_ioread_offset > last_request_offset) { + last_ioread_offset = last_request_offset; + } + } } else if ((uio->uio_offset + val_size) == last_request_offset) { - /* + /* * this transfer will finish this request, so... - * let's try to read ahead if we're in + * let's try to read ahead if we're in * a sequential access pattern and we haven't * explicitly disabled it */ - if (rd_ahead_enabled) + if (rd_ahead_enabled) { cluster_read_ahead(vp, &extent, filesize, rap, callback, callback_arg, bflag); - + } + if (rap != NULL) { - if (extent.e_addr < rap->cl_lastr) - rap->cl_maxra = 0; + if (extent.e_addr < rap->cl_lastr) { + rap->cl_maxra = 0; + } rap->cl_lastr = extent.e_addr; } } - if (iolock_inited == TRUE) + if (iolock_inited == TRUE) { cluster_iostate_wait(&iostate, 0, "cluster_read_copy"); + } + + if (iostate.io_error) { + error = iostate.io_error; + } else { + u_int32_t io_requested; - if (iostate.io_error) - error = iostate.io_error; - else { - u_int32_t io_requested; + io_requested = val_size; - io_requested = val_size; + retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested); - retval = cluster_copy_upl_data(uio, upl, start_offset, (int *)&io_requested); - io_req_size -= (val_size - io_requested); } } else { - if (iolock_inited == TRUE) + if (iolock_inited == TRUE) { cluster_iostate_wait(&iostate, 0, "cluster_read_copy"); + } } if (start_pg < last_pg) { - /* + /* * compute the range of pages that we actually issued an I/O for * and either commit them as valid if the I/O succeeded - * or abort them if the I/O failed or we're not supposed to + * or abort them if the I/O failed or we're not supposed to * keep them in the cache */ - io_size = (last_pg - start_pg) * PAGE_SIZE; + io_size = (last_pg - start_pg) * PAGE_SIZE; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, upl, start_pg * PAGE_SIZE, io_size, error, 0); - if (error || (flags & IO_NOCACHE)) - ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size, - UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); - else { - int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY; + if (error || (flags & IO_NOCACHE)) { + ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, io_size, + UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + } else { + int commit_flags = UPL_COMMIT_CLEAR_DIRTY | UPL_COMMIT_FREE_ON_EMPTY; - if (take_reference) + if (take_reference) { commit_flags |= UPL_COMMIT_INACTIVATE; - else + } else { commit_flags |= UPL_COMMIT_SPECULATE; + } - ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags); + ubc_upl_commit_range(upl, start_pg * PAGE_SIZE, io_size, commit_flags); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, start_pg * PAGE_SIZE, io_size, error, 0); } if ((last_pg - start_pg) < pages_in_upl) { - /* + /* * the set of pages that we issued an I/O for did not encompass * the entire upl... so just release these without modifying * their state */ - if (error) + if (error) { ubc_upl_abort_range(upl, 0, upl_size, UPL_ABORT_FREE_ON_EMPTY); - else { - + } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_START, - upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0); + upl, -1, pages_in_upl - (last_pg - start_pg), 0, 0); /* * handle any valid pages at the beginning of @@ -4248,27 +4411,29 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 35)) | DBG_FUNC_END, upl, -1, -1, 0, 0); } } - if (retval == 0) - retval = error; + if (retval == 0) { + retval = error; + } if (io_req_size) { - if (cluster_is_throttled(vp)) { + if (cluster_is_throttled(vp)) { /* * we're in the throttle window, at the very least * we want to limit the size of the I/O we're about * to issue */ - rd_ahead_enabled = 0; + rd_ahead_enabled = 0; prefetch_enabled = 0; max_rd_size = THROTTLE_MAX_IOSIZE; } else { - if (max_rd_size == THROTTLE_MAX_IOSIZE) { - /* + if (max_rd_size == THROTTLE_MAX_IOSIZE) { + /* * coming out of throttled state */ if (policy != THROTTLE_LEVEL_TIER3 && policy != THROTTLE_LEVEL_TIER2) { - if (rap != NULL) + if (rap != NULL) { rd_ahead_enabled = 1; + } prefetch_enabled = 1; } max_rd_size = max_prefetch; @@ -4291,16 +4456,16 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); } if (rap != NULL) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, - (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, + (int)uio->uio_offset, io_req_size, rap->cl_lastr, retval, 0); - lck_mtx_unlock(&rap->cl_lockr); + lck_mtx_unlock(&rap->cl_lockr); } else { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, - (int)uio->uio_offset, io_req_size, 0, retval, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_END, + (int)uio->uio_offset, io_req_size, 0, retval, 0); } - return (retval); + return retval; } /* @@ -4308,11 +4473,12 @@ cluster_read_copy(vnode_t vp, struct uio *uio, u_int32_t io_req_size, off_t file * so we keep a hash of them here. There should never be very many of * these around at any point in time. */ -cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type) +cl_direct_read_lock_t * +cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type) { struct cl_direct_read_locks *head - = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp) - % CL_DIRECT_READ_LOCK_BUCKETS]; + = &cl_direct_read_locks[(uintptr_t)vp / sizeof(*vp) + % CL_DIRECT_READ_LOCK_BUCKETS]; struct cl_direct_read_lock *lck, *new_lck = NULL; @@ -4345,7 +4511,7 @@ cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type) // Allocate a new lock MALLOC(new_lck, cl_direct_read_lock_t *, sizeof(*new_lck), - M_TEMP, M_WAITOK); + M_TEMP, M_WAITOK); lck_rw_init(&new_lck->rw_lock, cl_mtx_grp, cl_mtx_attr); new_lck->vp = vp; new_lck->ref_count = 1; @@ -4354,7 +4520,8 @@ cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t type) } } -void cluster_unlock_direct_read(cl_direct_read_lock_t *lck) +void +cluster_unlock_direct_read(cl_direct_read_lock_t *lck) { lck_rw_done(&lck->rw_lock); @@ -4372,52 +4539,52 @@ void cluster_unlock_direct_read(cl_direct_read_lock_t *lck) static int cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length, - int flags, int (*callback)(buf_t, void *), void *callback_arg) + int flags, int (*callback)(buf_t, void *), void *callback_arg) { upl_t upl; upl_page_info_t *pl; - off_t max_io_size; + off_t max_io_size; vm_offset_t upl_offset, vector_upl_offset = 0; - upl_size_t upl_size, vector_upl_size = 0; - vm_size_t upl_needed_size; - unsigned int pages_in_pl; + upl_size_t upl_size, vector_upl_size = 0; + vm_size_t upl_needed_size; + unsigned int pages_in_pl; upl_control_flags_t upl_flags; kern_return_t kret; unsigned int i; int force_data_sync; int retval = 0; - int no_zero_fill = 0; + int no_zero_fill = 0; int io_flag = 0; - int misaligned = 0; + int misaligned = 0; struct clios iostate; - user_addr_t iov_base; - u_int32_t io_req_size; - u_int32_t offset_in_file; - u_int32_t offset_in_iovbase; - u_int32_t io_size; - u_int32_t io_min; - u_int32_t xsize; - u_int32_t devblocksize; - u_int32_t mem_alignment_mask; - u_int32_t max_upl_size; + user_addr_t iov_base; + u_int32_t io_req_size; + u_int32_t offset_in_file; + u_int32_t offset_in_iovbase; + u_int32_t io_size; + u_int32_t io_min; + u_int32_t xsize; + u_int32_t devblocksize; + u_int32_t mem_alignment_mask; + u_int32_t max_upl_size; u_int32_t max_rd_size; u_int32_t max_rd_ahead; u_int32_t max_vector_size; - boolean_t io_throttled = FALSE; + boolean_t io_throttled = FALSE; - u_int32_t vector_upl_iosize = 0; - int issueVectorUPL = 0,useVectorUPL = (uio->uio_iovcnt > 1); - off_t v_upl_uio_offset = 0; - int vector_upl_index=0; - upl_t vector_upl = NULL; + u_int32_t vector_upl_iosize = 0; + int issueVectorUPL = 0, useVectorUPL = (uio->uio_iovcnt > 1); + off_t v_upl_uio_offset = 0; + int vector_upl_index = 0; + upl_t vector_upl = NULL; cl_direct_read_lock_t *lock = NULL; - user_addr_t orig_iov_base = 0; - user_addr_t last_iov_base = 0; - user_addr_t next_iov_base = 0; + user_addr_t orig_iov_base = 0; + user_addr_t last_iov_base = 0; + user_addr_t next_iov_base = 0; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START, - (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0); + (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0); max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ); @@ -4426,8 +4593,9 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, io_flag = CL_COMMIT | CL_READ | CL_ASYNC | CL_NOZERO | CL_DIRECT_IO; - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { io_flag |= CL_PASSIVE; + } if (flags & IO_ENCRYPTED) { io_flag |= CL_RAW_ENCRYPTED; @@ -4437,8 +4605,9 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, io_flag |= CL_NOCACHE; } - if (flags & IO_SKIP_ENCRYPTION) + if (flags & IO_SKIP_ENCRYPTION) { io_flag |= CL_ENCRYPTED; + } iostate.io_completed = 0; iostate.io_issued = 0; @@ -4451,20 +4620,20 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, mem_alignment_mask = (u_int32_t)vp->v_mount->mnt_alignmentmask; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE, - (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0); + (int)devblocksize, (int)mem_alignment_mask, 0, 0, 0); if (devblocksize == 1) { - /* - * the AFP client advertises a devblocksize of 1 - * however, its BLOCKMAP routine maps to physical - * blocks that are PAGE_SIZE in size... - * therefore we can't ask for I/Os that aren't page aligned - * or aren't multiples of PAGE_SIZE in size - * by setting devblocksize to PAGE_SIZE, we re-instate - * the old behavior we had before the mem_alignment_mask - * changes went in... - */ - devblocksize = PAGE_SIZE; + /* + * the AFP client advertises a devblocksize of 1 + * however, its BLOCKMAP routine maps to physical + * blocks that are PAGE_SIZE in size... + * therefore we can't ask for I/Os that aren't page aligned + * or aren't multiples of PAGE_SIZE in size + * by setting devblocksize to PAGE_SIZE, we re-instate + * the old behavior we had before the mem_alignment_mask + * changes went in... + */ + devblocksize = PAGE_SIZE; } orig_iov_base = uio_curriovbase(uio); @@ -4478,37 +4647,39 @@ next_dread: offset_in_iovbase = (u_int32_t)iov_base & mem_alignment_mask; if (offset_in_file || offset_in_iovbase) { - /* + /* * one of the 2 important offsets is misaligned * so fire an I/O through the cache for this entire vector */ misaligned = 1; } if (iov_base & (devblocksize - 1)) { - /* + /* * the offset in memory must be on a device block boundary * so that we can guarantee that we can generate an * I/O that ends on a page boundary in cluster_io */ misaligned = 1; - } + } max_io_size = filesize - uio->uio_offset; - /* - * The user must request IO in aligned chunks. If the - * offset into the file is bad, or the userland pointer + /* + * The user must request IO in aligned chunks. If the + * offset into the file is bad, or the userland pointer * is non-aligned, then we cannot service the encrypted IO request. */ if (flags & IO_ENCRYPTED) { - if (misaligned || (io_req_size & (devblocksize - 1))) + if (misaligned || (io_req_size & (devblocksize - 1))) { retval = EINVAL; + } max_io_size = roundup(max_io_size, devblocksize); } - if ((off_t)io_req_size > max_io_size) - io_req_size = max_io_size; + if ((off_t)io_req_size > max_io_size) { + io_req_size = max_io_size; + } /* * When we get to this point, we know... @@ -4516,19 +4687,19 @@ next_dread: */ while (io_req_size && retval == 0) { - u_int32_t io_start; + u_int32_t io_start; - if (cluster_is_throttled(vp)) { + if (cluster_is_throttled(vp)) { /* * we're in the throttle window, at the very least * we want to limit the size of the I/O we're about * to issue */ - max_rd_size = THROTTLE_MAX_IOSIZE; + max_rd_size = THROTTLE_MAX_IOSIZE; max_rd_ahead = THROTTLE_MAX_IOSIZE - 1; max_vector_size = THROTTLE_MAX_IOSIZE; } else { - max_rd_size = max_upl_size; + max_rd_size = max_upl_size; max_rd_ahead = max_rd_size * IO_SCALE(vp, 2); max_vector_size = MAX_VECTOR_UPL_SIZE; } @@ -4539,7 +4710,7 @@ next_dread: * and move them to user space. But only do this * check if we are not retrieving encrypted data directly * from the filesystem; those blocks should never - * be in the UBC. + * be in the UBC. * * cluster_copy_ubc_data returns the resid * in io_size @@ -4555,43 +4726,44 @@ next_dread: io_req_size -= xsize; - if(useVectorUPL && (xsize || (iov_base & PAGE_MASK))) { + if (useVectorUPL && (xsize || (iov_base & PAGE_MASK))) { /* * We found something in the cache or we have an iov_base that's not * page-aligned. - * + * * Issue all I/O's that have been collected within this Vectored UPL. */ - if(vector_upl_index) { + if (vector_upl_index) { retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); reset_vector_run_state(); } - - if(xsize) + + if (xsize) { useVectorUPL = 0; + } - /* - * After this point, if we are using the Vector UPL path and the base is - * not page-aligned then the UPL with that base will be the first in the vector UPL. - */ + /* + * After this point, if we are using the Vector UPL path and the base is + * not page-aligned then the UPL with that base will be the first in the vector UPL. + */ } /* * check to see if we are finished with this request. * * If we satisfied this IO already, then io_req_size will be 0. - * Otherwise, see if the IO was mis-aligned and needs to go through + * Otherwise, see if the IO was mis-aligned and needs to go through * the UBC to deal with the 'tail'. * */ if (io_req_size == 0 || (misaligned)) { - /* + /* * see if there's another uio vector to * process that's of type IO_DIRECT * * break out of while loop to get there */ - break; + break; } /* * assume the request ends on a device block boundary @@ -4607,7 +4779,7 @@ next_dread: * multiple, we avoid asking the drive for the same physical * blocks twice.. once for the partial page at the end of the * request and a 2nd time for the page we read into the cache - * (which overlaps the end of the direct read) in order to + * (which overlaps the end of the direct read) in order to * get at the overhang bytes */ if (io_size & (devblocksize - 1)) { @@ -4620,14 +4792,14 @@ next_dread: io_min = PAGE_SIZE; } if (retval || io_size < io_min) { - /* + /* * either an error or we only have the tail left to * complete via the copy path... * we may have already spun some portion of this request * off as async requests... we need to wait for the I/O * to complete before returning */ - goto wait_for_dreads; + goto wait_for_dreads; } /* @@ -4635,9 +4807,9 @@ next_dread: * or asking for encrypted blocks. */ if ((flags & IO_ENCRYPTED) == 0) { - - if ((xsize = io_size) > max_rd_size) + if ((xsize = io_size) > max_rd_size) { xsize = max_rd_size; + } io_size = 0; @@ -4665,9 +4837,9 @@ next_dread: continue; } } - if ( (flags & IO_RETURN_ON_THROTTLE) ) { + if ((flags & IO_RETURN_ON_THROTTLE)) { if (cluster_is_throttled(vp) == THROTTLE_NOW) { - if ( !cluster_io_present_in_BC(vp, uio->uio_offset)) { + if (!cluster_io_present_in_BC(vp, uio->uio_offset)) { /* * we're in the throttle window and at least 1 I/O * has already been issued by a throttleable thread @@ -4682,39 +4854,43 @@ next_dread: } } } - if (io_size > max_rd_size) + if (io_size > max_rd_size) { io_size = max_rd_size; + } iov_base = uio_curriovbase(uio); upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK); - upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK; + upl_needed_size = (upl_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START, - (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0); + (int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0); - if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) - no_zero_fill = 1; - else - no_zero_fill = 0; + if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) { + no_zero_fill = 1; + } else { + no_zero_fill = 0; + } vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) { - pages_in_pl = 0; + pages_in_pl = 0; upl_size = upl_needed_size; upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE; - if (no_zero_fill) - upl_flags |= UPL_NOZEROFILL; - if (force_data_sync) - upl_flags |= UPL_FORCE_DATA_SYNC; + if (no_zero_fill) { + upl_flags |= UPL_NOZEROFILL; + } + if (force_data_sync) { + upl_flags |= UPL_FORCE_DATA_SYNC; + } kret = vm_map_create_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), - &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE); + (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + &upl_size, &upl, NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE); if (kret != KERN_SUCCESS) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, - (int)upl_offset, upl_size, io_size, kret, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, + (int)upl_offset, upl_size, io_size, kret, 0); /* * failed to get pagelist * @@ -4728,40 +4904,44 @@ next_dread: pl = UPL_GET_INTERNAL_PAGE_LIST(upl); for (i = 0; i < pages_in_pl; i++) { - if (!upl_page_present(pl, i)) - break; + if (!upl_page_present(pl, i)) { + break; + } + } + if (i == pages_in_pl) { + break; } - if (i == pages_in_pl) - break; ubc_upl_abort(upl, 0); } if (force_data_sync >= 3) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, - (int)upl_offset, upl_size, io_size, kret, 0); - + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, + (int)upl_offset, upl_size, io_size, kret, 0); + goto wait_for_dreads; } /* * Consider the possibility that upl_size wasn't satisfied. */ if (upl_size < upl_needed_size) { - if (upl_size && upl_offset == 0) - io_size = upl_size; - else - io_size = 0; + if (upl_size && upl_offset == 0) { + io_size = upl_size; + } else { + io_size = 0; + } } if (io_size == 0) { ubc_upl_abort(upl, 0); goto wait_for_dreads; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END, - (int)upl_offset, upl_size, io_size, kret, 0); + (int)upl_offset, upl_size, io_size, kret, 0); - if(useVectorUPL) { + if (useVectorUPL) { vm_offset_t end_off = ((iov_base + io_size) & PAGE_MASK); - if(end_off) + if (end_off) { issueVectorUPL = 1; + } /* * After this point, if we are using a vector UPL, then * either all the UPL elements end on a page boundary OR @@ -4779,7 +4959,7 @@ next_dread: cluster_iostate_wait(&iostate, max_rd_ahead, "cluster_read_direct"); if (iostate.io_error) { - /* + /* * one of the earlier reads we issued ran into a hard error * don't issue any more reads, cleanup the UPL * that was just created but not used, then @@ -4788,36 +4968,35 @@ next_dread: */ ubc_upl_abort(upl, 0); - goto wait_for_dreads; - } + goto wait_for_dreads; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_START, - upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0); - - if(!useVectorUPL) { - if (no_zero_fill) - io_flag &= ~CL_PRESERVE; - else - io_flag |= CL_PRESERVE; - - retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); + upl, (int)upl_offset, (int)uio->uio_offset, io_size, 0); - } else { + if (!useVectorUPL) { + if (no_zero_fill) { + io_flag &= ~CL_PRESERVE; + } else { + io_flag |= CL_PRESERVE; + } - if(!vector_upl_index) { + retval = cluster_io(vp, upl, upl_offset, uio->uio_offset, io_size, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); + } else { + if (!vector_upl_index) { vector_upl = vector_upl_create(upl_offset); v_upl_uio_offset = uio->uio_offset; vector_upl_offset = upl_offset; } - vector_upl_set_subupl(vector_upl,upl, upl_size); + vector_upl_set_subupl(vector_upl, upl, upl_size); vector_upl_set_iostate(vector_upl, upl, vector_upl_size, upl_size); vector_upl_index++; vector_upl_size += upl_size; vector_upl_iosize += io_size; - - if(issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) { - retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); - reset_vector_run_state(); + + if (issueVectorUPL || vector_upl_index == MAX_VECTOR_UPL_ELEMENTS || vector_upl_size >= max_vector_size) { + retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); + reset_vector_run_state(); } } last_iov_base = iov_base + io_size; @@ -4833,26 +5012,22 @@ next_dread: */ if ((flags & IO_ENCRYPTED) && (max_io_size < io_size)) { uio_update(uio, (user_size_t)max_io_size); - } - else { + } else { uio_update(uio, (user_size_t)io_size); } io_req_size -= io_size; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 73)) | DBG_FUNC_END, - upl, (int)uio->uio_offset, io_req_size, retval, 0); - + upl, (int)uio->uio_offset, io_req_size, retval, 0); } /* end while */ if (retval == 0 && iostate.io_error == 0 && io_req_size == 0 && uio->uio_offset < filesize) { + retval = cluster_io_type(uio, read_type, read_length, 0); - retval = cluster_io_type(uio, read_type, read_length, 0); - if (retval == 0 && *read_type == IO_DIRECT) { - - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE, - (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_NONE, + (int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0); goto next_dread; } @@ -4860,14 +5035,15 @@ next_dread: wait_for_dreads: - if(retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) { - retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); + if (retval == 0 && iostate.io_error == 0 && useVectorUPL && vector_upl_index) { + retval = vector_cluster_io(vp, vector_upl, vector_upl_offset, v_upl_uio_offset, vector_upl_iosize, io_flag, (buf_t)NULL, &iostate, callback, callback_arg); reset_vector_run_state(); } // We don't need to wait for the I/O to complete - if (lock) + if (lock) { cluster_unlock_direct_read(lock); + } /* * make sure all async reads that are part of this stream @@ -4875,13 +5051,15 @@ wait_for_dreads: */ cluster_iostate_wait(&iostate, 0, "cluster_read_direct"); - if (iostate.io_error) - retval = iostate.io_error; + if (iostate.io_error) { + retval = iostate.io_error; + } lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); - if (io_throttled == TRUE && retval == 0) + if (io_throttled == TRUE && retval == 0) { retval = EAGAIN; + } for (next_iov_base = orig_iov_base; next_iov_base < last_iov_base; next_iov_base += PAGE_SIZE) { /* @@ -4893,7 +5071,7 @@ wait_for_dreads: } if (io_req_size && retval == 0) { - /* + /* * we couldn't handle the tail of this request in DIRECT mode * so fire it through the copy path */ @@ -4911,47 +5089,49 @@ wait_for_dreads: *read_type = IO_UNKNOWN; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_END, - (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0); + (int)uio->uio_offset, (int)uio_resid(uio), io_req_size, retval, 0); - return (retval); + return retval; } static int cluster_read_contig(vnode_t vp, struct uio *uio, off_t filesize, int *read_type, u_int32_t *read_length, - int (*callback)(buf_t, void *), void *callback_arg, int flags) + int (*callback)(buf_t, void *), void *callback_arg, int flags) { upl_page_info_t *pl; upl_t upl[MAX_VECTS]; vm_offset_t upl_offset; - addr64_t dst_paddr = 0; - user_addr_t iov_base; + addr64_t dst_paddr = 0; + user_addr_t iov_base; off_t max_size; - upl_size_t upl_size; - vm_size_t upl_needed_size; - mach_msg_type_number_t pages_in_pl; + upl_size_t upl_size; + vm_size_t upl_needed_size; + mach_msg_type_number_t pages_in_pl; upl_control_flags_t upl_flags; kern_return_t kret; struct clios iostate; - int error= 0; - int cur_upl = 0; - int num_upl = 0; - int n; - u_int32_t xsize; - u_int32_t io_size; - u_int32_t devblocksize; - u_int32_t mem_alignment_mask; - u_int32_t tail_size = 0; + int error = 0; + int cur_upl = 0; + int num_upl = 0; + int n; + u_int32_t xsize; + u_int32_t io_size; + u_int32_t devblocksize; + u_int32_t mem_alignment_mask; + u_int32_t tail_size = 0; int bflag; - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { bflag = CL_PASSIVE; - else + } else { bflag = 0; - - if (flags & IO_NOCACHE) + } + + if (flags & IO_NOCACHE) { bflag |= CL_NOCACHE; - + } + /* * When we enter this routine, we know * -- the read_length will not exceed the current iov_len @@ -4974,8 +5154,9 @@ next_cread: max_size = filesize - uio->uio_offset; - if (io_size > max_size) - io_size = max_size; + if (io_size > max_size) { + io_size = max_size; + } iov_base = uio_curriovbase(uio); @@ -4988,30 +5169,30 @@ next_cread: KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_START, - (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0); + (int)upl_offset, (int)upl_size, (int)iov_base, io_size, 0); vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; kret = vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), - &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0); + (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + &upl_size, &upl[cur_upl], NULL, &pages_in_pl, &upl_flags, VM_KERN_MEMORY_FILE, 0); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 92)) | DBG_FUNC_END, - (int)upl_offset, upl_size, io_size, kret, 0); + (int)upl_offset, upl_size, io_size, kret, 0); if (kret != KERN_SUCCESS) { - /* + /* * failed to get pagelist */ - error = EINVAL; + error = EINVAL; goto wait_for_creads; } num_upl++; if (upl_size < upl_needed_size) { - /* + /* * The upl_size wasn't satisfied. */ - error = EINVAL; + error = EINVAL; goto wait_for_creads; } pl = ubc_upl_pageinfo(upl[cur_upl]); @@ -5019,17 +5200,19 @@ next_cread: dst_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)upl_offset; while (((uio->uio_offset & (devblocksize - 1)) || io_size < devblocksize) && io_size) { - u_int32_t head_size; + u_int32_t head_size; head_size = devblocksize - (u_int32_t)(uio->uio_offset & (devblocksize - 1)); - if (head_size > io_size) - head_size = io_size; + if (head_size > io_size) { + head_size = io_size; + } error = cluster_align_phys_io(vp, uio, dst_paddr, head_size, CL_READ, callback, callback_arg); - if (error) + if (error) { goto wait_for_creads; + } upl_offset += head_size; dst_paddr += head_size; @@ -5038,14 +5221,14 @@ next_cread: iov_base += head_size; } if ((u_int32_t)iov_base & mem_alignment_mask) { - /* + /* * request doesn't set up on a memory boundary * the underlying DMA engine can handle... * return an error instead of going through * the slow copy path since the intent of this * path is direct I/O to device memory */ - error = EINVAL; + error = EINVAL; goto wait_for_creads; } @@ -5054,11 +5237,11 @@ next_cread: io_size -= tail_size; while (io_size && error == 0) { - - if (io_size > MAX_IO_CONTIG_SIZE) - xsize = MAX_IO_CONTIG_SIZE; - else - xsize = io_size; + if (io_size > MAX_IO_CONTIG_SIZE) { + xsize = MAX_IO_CONTIG_SIZE; + } else { + xsize = io_size; + } /* * request asynchronously so that we can overlap * the preparation of the next I/O... we'll do @@ -5070,23 +5253,23 @@ next_cread: cluster_iostate_wait(&iostate, MAX_IO_CONTIG_SIZE * IO_SCALE(vp, 2), "cluster_read_contig"); if (iostate.io_error) { - /* + /* * one of the earlier reads we issued ran into a hard error * don't issue any more reads... * go wait for any other reads to complete before * returning the error to the caller */ - goto wait_for_creads; + goto wait_for_creads; } - error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize, - CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag, - (buf_t)NULL, &iostate, callback, callback_arg); - /* + error = cluster_io(vp, upl[cur_upl], upl_offset, uio->uio_offset, xsize, + CL_READ | CL_NOZERO | CL_DEV_MEMORY | CL_ASYNC | bflag, + (buf_t)NULL, &iostate, callback, callback_arg); + /* * The cluster_io read was issued successfully, * update the uio structure */ if (error == 0) { - uio_update(uio, (user_size_t)xsize); + uio_update(uio, (user_size_t)xsize); dst_paddr += xsize; upl_offset += xsize; @@ -5094,15 +5277,15 @@ next_cread: } } if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS && uio->uio_offset < filesize) { + error = cluster_io_type(uio, read_type, read_length, 0); - error = cluster_io_type(uio, read_type, read_length, 0); - if (error == 0 && *read_type == IO_CONTIG) { - cur_upl++; + cur_upl++; goto next_cread; } - } else - *read_type = IO_UNKNOWN; + } else { + *read_type = IO_UNKNOWN; + } wait_for_creads: /* @@ -5111,88 +5294,94 @@ wait_for_creads: */ cluster_iostate_wait(&iostate, 0, "cluster_read_contig"); - if (iostate.io_error) - error = iostate.io_error; + if (iostate.io_error) { + error = iostate.io_error; + } lck_mtx_destroy(&iostate.io_mtxp, cl_mtx_grp); - if (error == 0 && tail_size) - error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg); + if (error == 0 && tail_size) { + error = cluster_align_phys_io(vp, uio, dst_paddr, tail_size, CL_READ, callback, callback_arg); + } - for (n = 0; n < num_upl; n++) - /* + for (n = 0; n < num_upl; n++) { + /* * just release our hold on each physically contiguous * region without changing any state */ - ubc_upl_abort(upl[n], 0); - - return (error); + ubc_upl_abort(upl[n], 0); + } + + return error; } static int cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t min_length) { - user_size_t iov_len; - user_addr_t iov_base = 0; + user_size_t iov_len; + user_addr_t iov_base = 0; upl_t upl; upl_size_t upl_size; upl_control_flags_t upl_flags; - int retval = 0; + int retval = 0; - /* + /* * skip over any emtpy vectors */ - uio_update(uio, (user_size_t)0); + uio_update(uio, (user_size_t)0); iov_len = uio_curriovlen(uio); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_START, uio, (int)iov_len, 0, 0, 0); if (iov_len) { - iov_base = uio_curriovbase(uio); - /* + iov_base = uio_curriovbase(uio); + /* * make sure the size of the vector isn't too big... * internally, we want to handle all of the I/O in * chunk sizes that fit in a 32 bit int */ - if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE) - upl_size = MAX_IO_REQUEST_SIZE; - else - upl_size = (u_int32_t)iov_len; + if (iov_len > (user_size_t)MAX_IO_REQUEST_SIZE) { + upl_size = MAX_IO_REQUEST_SIZE; + } else { + upl_size = (u_int32_t)iov_len; + } upl_flags = UPL_QUERY_OBJECT_TYPE; vm_map_t map = UIO_SEG_IS_USER_SPACE(uio->uio_segflg) ? current_map() : kernel_map; if ((vm_map_get_upl(map, - (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), - &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) { - /* + (vm_map_offset_t)(iov_base & ~((user_addr_t)PAGE_MASK)), + &upl_size, &upl, NULL, NULL, &upl_flags, VM_KERN_MEMORY_FILE, 0)) != KERN_SUCCESS) { + /* * the user app must have passed in an invalid address */ - retval = EFAULT; + retval = EFAULT; + } + if (upl_size == 0) { + retval = EFAULT; } - if (upl_size == 0) - retval = EFAULT; *io_length = upl_size; - if (upl_flags & UPL_PHYS_CONTIG) - *io_type = IO_CONTIG; - else if (iov_len >= min_length) - *io_type = IO_DIRECT; - else - *io_type = IO_COPY; + if (upl_flags & UPL_PHYS_CONTIG) { + *io_type = IO_CONTIG; + } else if (iov_len >= min_length) { + *io_type = IO_DIRECT; + } else { + *io_type = IO_COPY; + } } else { - /* + /* * nothing left to do for this uio */ - *io_length = 0; + *io_length = 0; *io_type = IO_UNKNOWN; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 94)) | DBG_FUNC_END, iov_base, *io_type, *io_length, retval, 0); - return (retval); + return retval; } @@ -5203,7 +5392,7 @@ cluster_io_type(struct uio *uio, int *io_type, u_int32_t *io_length, u_int32_t m int advisory_read(vnode_t vp, off_t filesize, off_t f_offset, int resid) { - return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE); + return advisory_read_ext(vp, filesize, f_offset, resid, NULL, NULL, CL_PASSIVE); } int @@ -5212,11 +5401,11 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c upl_page_info_t *pl; upl_t upl; vm_offset_t upl_offset; - int upl_size; - off_t upl_f_offset; - int start_offset; - int start_pg; - int last_pg; + int upl_size; + off_t upl_f_offset; + int start_offset; + int start_pg; + int last_pg; int pages_in_upl; off_t max_size; int io_size; @@ -5224,29 +5413,33 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c int retval = 0; int issued_io; int skip_range; - uint32_t max_io_size; + uint32_t max_io_size; - if ( !UBCINFOEXISTS(vp)) - return(EINVAL); + if (!UBCINFOEXISTS(vp)) { + return EINVAL; + } - if (resid < 0) - return(EINVAL); + if (resid < 0) { + return EINVAL; + } max_io_size = cluster_max_io_size(vp->v_mount, CL_READ); #if CONFIG_EMBEDDED - if (max_io_size > speculative_prefetch_max_iosize) + if (max_io_size > speculative_prefetch_max_iosize) { max_io_size = speculative_prefetch_max_iosize; + } #else if (disk_conditioner_mount_is_ssd(vp->v_mount)) { - if (max_io_size > speculative_prefetch_max_iosize) + if (max_io_size > speculative_prefetch_max_iosize) { max_io_size = speculative_prefetch_max_iosize; + } } #endif KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START, - (int)f_offset, resid, (int)filesize, 0, 0); + (int)f_offset, resid, (int)filesize, 0, 0); while (resid && f_offset < filesize && retval == 0) { /* @@ -5254,21 +5447,23 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c * the requested read... limit each call to cluster_io * to the maximum UPL size... cluster_io will clip if * this exceeds the maximum io_size for the device, - * make sure to account for + * make sure to account for * a starting offset that's not page aligned */ start_offset = (int)(f_offset & PAGE_MASK_64); upl_f_offset = f_offset - (off_t)start_offset; max_size = filesize - f_offset; - if (resid < max_size) - io_size = resid; - else - io_size = max_size; + if (resid < max_size) { + io_size = resid; + } else { + io_size = max_size; + } upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK; - if ((uint32_t)upl_size > max_io_size) - upl_size = max_io_size; + if ((uint32_t)upl_size > max_io_size) { + upl_size = max_io_size; + } skip_range = 0; /* @@ -5278,16 +5473,17 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c ubc_range_op(vp, upl_f_offset, upl_f_offset + upl_size, UPL_ROP_PRESENT, &skip_range); if (skip_range) { - /* + /* * skip over pages already present in the cache */ - io_size = skip_range - start_offset; + io_size = skip_range - start_offset; - f_offset += io_size; + f_offset += io_size; resid -= io_size; - if (skip_range == upl_size) - continue; + if (skip_range == upl_size) { + continue; + } /* * have to issue some real I/O * at this point, we know it's starting on a page boundary @@ -5300,102 +5496,109 @@ advisory_read_ext(vnode_t vp, off_t filesize, off_t f_offset, int resid, int (*c pages_in_upl = upl_size / PAGE_SIZE; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_START, - upl, (int)upl_f_offset, upl_size, start_offset, 0); + upl, (int)upl_f_offset, upl_size, start_offset, 0); kret = ubc_create_upl_kernel(vp, - upl_f_offset, - upl_size, - &upl, - &pl, - UPL_RET_ONLY_ABSENT | UPL_SET_LITE, - VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - return(retval); + upl_f_offset, + upl_size, + &upl, + &pl, + UPL_RET_ONLY_ABSENT | UPL_SET_LITE, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + return retval; + } issued_io = 0; /* - * before we start marching forward, we must make sure we end on + * before we start marching forward, we must make sure we end on * a present page, otherwise we will be working with a freed * upl */ for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) { - if (upl_page_present(pl, last_pg)) - break; + if (upl_page_present(pl, last_pg)) { + break; + } } pages_in_upl = last_pg + 1; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 61)) | DBG_FUNC_END, - upl, (int)upl_f_offset, upl_size, start_offset, 0); + upl, (int)upl_f_offset, upl_size, start_offset, 0); - for (last_pg = 0; last_pg < pages_in_upl; ) { - /* + for (last_pg = 0; last_pg < pages_in_upl;) { + /* * scan from the beginning of the upl looking for the first * page that is present.... this will become the first page in * the request we're going to make to 'cluster_io'... if all * of the pages are absent, we won't call through to 'cluster_io' */ - for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) { - if (upl_page_present(pl, start_pg)) - break; + for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) { + if (upl_page_present(pl, start_pg)) { + break; + } } /* * scan from the starting present page looking for an absent - * page before the end of the upl is reached, if we + * page before the end of the upl is reached, if we * find one, then it will terminate the range of pages being * presented to 'cluster_io' */ for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { - if (!upl_page_present(pl, last_pg)) - break; + if (!upl_page_present(pl, last_pg)) { + break; + } } - if (last_pg > start_pg) { - /* + if (last_pg > start_pg) { + /* * we found a range of pages that must be filled * if the last page in this range is the last page of the file * we may have to clip the size of it to keep from reading past * the end of the last physical block associated with the file */ - upl_offset = start_pg * PAGE_SIZE; + upl_offset = start_pg * PAGE_SIZE; io_size = (last_pg - start_pg) * PAGE_SIZE; - if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) - io_size = filesize - (upl_f_offset + upl_offset); + if ((off_t)(upl_f_offset + upl_offset + io_size) > filesize) { + io_size = filesize - (upl_f_offset + upl_offset); + } /* * issue an asynchronous read to cluster_io */ retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, - CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + CL_ASYNC | CL_READ | CL_COMMIT | CL_AGE | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); issued_io = 1; } } - if (issued_io == 0) - ubc_upl_abort(upl, 0); + if (issued_io == 0) { + ubc_upl_abort(upl, 0); + } io_size = upl_size - start_offset; - - if (io_size > resid) - io_size = resid; + + if (io_size > resid) { + io_size = resid; + } f_offset += io_size; resid -= io_size; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_END, - (int)f_offset, resid, retval, 0, 0); + (int)f_offset, resid, retval, 0, 0); - return(retval); + return retval; } int cluster_push(vnode_t vp, int flags) { - return cluster_push_ext(vp, flags, NULL, NULL); + return cluster_push_ext(vp, flags, NULL, NULL); } @@ -5409,34 +5612,35 @@ cluster_push_ext(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *ca int cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *callback_arg, int *err) { - int retval; - int my_sparse_wait = 0; - struct cl_writebehind *wbp; - int local_err = 0; + int retval; + int my_sparse_wait = 0; + struct cl_writebehind *wbp; + int local_err = 0; - if (err) + if (err) { *err = 0; + } - if ( !UBCINFOEXISTS(vp)) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0); - return (0); + if (!UBCINFOEXISTS(vp)) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -1, 0); + return 0; } /* return if deferred write is set */ if (((unsigned int)vfs_flags(vp->v_mount) & MNT_DEFWRITE) && (flags & IO_DEFWRITE)) { - return (0); + return 0; } if ((wbp = cluster_get_wbp(vp, CLW_RETURNLOCKED)) == NULL) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0); - return (0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -2, 0); + return 0; } if (!ISSET(flags, IO_SYNC) && wbp->cl_number == 0 && wbp->cl_scmap == NULL) { - lck_mtx_unlock(&wbp->cl_lockw); + lck_mtx_unlock(&wbp->cl_lockw); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0); - return(0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_NONE, kdebug_vnode(vp), flags, 0, -3, 0); + return 0; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_START, - wbp->cl_scmap, wbp->cl_number, flags, 0, 0); + wbp->cl_scmap, wbp->cl_number, flags, 0, 0); /* * if we have an fsync in progress, we don't want to allow any additional @@ -5471,10 +5675,9 @@ cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *ca } } if (wbp->cl_scmap) { - void *scmap; + void *scmap; if (wbp->cl_sparse_pushes < SPARSE_PUSH_LIMIT) { - scmap = wbp->cl_scmap; wbp->cl_scmap = NULL; @@ -5495,27 +5698,31 @@ cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *ca wbp->cl_scmap = scmap; } - - if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0) + + if (wbp->cl_sparse_wait && wbp->cl_sparse_pushes == 0) { wakeup((caddr_t)&wbp->cl_sparse_pushes); + } } else { - retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE); + retval = sparse_cluster_push(wbp, &(wbp->cl_scmap), vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, FALSE); } local_err = retval; - if (err) + if (err) { *err = retval; + } retval = 1; } else { retval = cluster_try_push(wbp, vp, ubc_getsize(vp), PUSH_ALL, flags, callback, callback_arg, &local_err, FALSE); - if (err) + if (err) { *err = local_err; + } } lck_mtx_unlock(&wbp->cl_lockw); - if (flags & IO_SYNC) - (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push"); + if (flags & IO_SYNC) { + (void)vnode_waitforwrites(vp, 0, 0, 0, "cluster_push"); + } if (my_sparse_wait) { /* @@ -5531,37 +5738,37 @@ cluster_push_err(vnode_t vp, int flags, int (*callback)(buf_t, void *), void *ca lck_mtx_unlock(&wbp->cl_lockw); } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 53)) | DBG_FUNC_END, - wbp->cl_scmap, wbp->cl_number, retval, local_err, 0); + wbp->cl_scmap, wbp->cl_number, retval, local_err, 0); - return (retval); + return retval; } __private_extern__ void cluster_release(struct ubc_info *ubc) { - struct cl_writebehind *wbp; + struct cl_writebehind *wbp; struct cl_readahead *rap; if ((wbp = ubc->cl_wbehind)) { + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0); - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, wbp->cl_scmap, 0, 0, 0); - - if (wbp->cl_scmap) - vfs_drt_control(&(wbp->cl_scmap), 0); + if (wbp->cl_scmap) { + vfs_drt_control(&(wbp->cl_scmap), 0); + } } else { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 81)) | DBG_FUNC_START, ubc, 0, 0, 0, 0); } rap = ubc->cl_rahead; if (wbp != NULL) { - lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp); - FREE_ZONE((void *)wbp, sizeof *wbp, M_CLWRBEHIND); + lck_mtx_destroy(&wbp->cl_lockw, cl_mtx_grp); + FREE_ZONE(wbp, sizeof *wbp, M_CLWRBEHIND); } if ((rap = ubc->cl_rahead)) { - lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp); - FREE_ZONE((void *)rap, sizeof *rap, M_CLRDAHEAD); + lck_mtx_destroy(&rap->cl_lockr, cl_mtx_grp); + FREE_ZONE(rap, sizeof *rap, M_CLRDAHEAD); } ubc->cl_rahead = NULL; ubc->cl_wbehind = NULL; @@ -5573,10 +5780,10 @@ cluster_release(struct ubc_info *ubc) static int cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_flag, int io_flags, int (*callback)(buf_t, void *), void *callback_arg, int *err, boolean_t vm_initiated) { - int cl_index; + int cl_index; int cl_index1; int min_index; - int cl_len; + int cl_len; int cl_pushed = 0; struct cl_wextent l_clusters[MAX_CLUSTERS]; u_int max_cluster_pgcount; @@ -5587,45 +5794,49 @@ cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_fla * the write behind context exists and has * already been locked... */ - if (wbp->cl_number == 0) - /* + if (wbp->cl_number == 0) { + /* * no clusters to push * return number of empty slots */ - return (MAX_CLUSTERS); - + return MAX_CLUSTERS; + } + /* * make a local 'sorted' copy of the clusters * and clear wbp->cl_number so that new clusters can * be developed */ for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) { - for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) { - if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr) - continue; - if (min_index == -1) - min_index = cl_index1; - else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr) - min_index = cl_index1; - } - if (min_index == -1) - break; - - l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr; + for (min_index = -1, cl_index1 = 0; cl_index1 < wbp->cl_number; cl_index1++) { + if (wbp->cl_clusters[cl_index1].b_addr == wbp->cl_clusters[cl_index1].e_addr) { + continue; + } + if (min_index == -1) { + min_index = cl_index1; + } else if (wbp->cl_clusters[cl_index1].b_addr < wbp->cl_clusters[min_index].b_addr) { + min_index = cl_index1; + } + } + if (min_index == -1) { + break; + } + + l_clusters[cl_index].b_addr = wbp->cl_clusters[min_index].b_addr; l_clusters[cl_index].e_addr = wbp->cl_clusters[min_index].e_addr; l_clusters[cl_index].io_flags = wbp->cl_clusters[min_index].io_flags; - wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr; + wbp->cl_clusters[min_index].b_addr = wbp->cl_clusters[min_index].e_addr; } wbp->cl_number = 0; cl_len = cl_index; /* skip switching to the sparse cluster mechanism if on diskimage */ - if ( ((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS ) && - !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) ) { + if (((push_flag & PUSH_DELAY) && cl_len == MAX_CLUSTERS) && + !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV)) { int i; - + /* * determine if we appear to be writing the file sequentially * if not, by returning without having pushed any clusters @@ -5640,39 +5851,45 @@ cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_fla * so we can just make a simple pass through, up to, but not including the last one... * note that e_addr is not inclusive, so it will be equal to the b_addr of the next cluster if they * are sequential - * + * * we let the last one be partial as long as it was adjacent to the previous one... * we need to do this to deal with multi-threaded servers that might write an I/O or 2 out * of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world... */ for (i = 0; i < MAX_CLUSTERS - 1; i++) { - if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount) - goto dont_try; - if (l_clusters[i].e_addr != l_clusters[i+1].b_addr) - goto dont_try; + if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount) { + goto dont_try; + } + if (l_clusters[i].e_addr != l_clusters[i + 1].b_addr) { + goto dont_try; + } } } - if (vm_initiated == TRUE) + if (vm_initiated == TRUE) { lck_mtx_unlock(&wbp->cl_lockw); + } for (cl_index = 0; cl_index < cl_len; cl_index++) { - int flags; - struct cl_extent cl; + int flags; + struct cl_extent cl; int retval; - flags = io_flags & (IO_PASSIVE|IO_CLOSE); + flags = io_flags & (IO_PASSIVE | IO_CLOSE); - /* + /* * try to push each cluster in turn... */ - if (l_clusters[cl_index].io_flags & CLW_IONOCACHE) - flags |= IO_NOCACHE; + if (l_clusters[cl_index].io_flags & CLW_IONOCACHE) { + flags |= IO_NOCACHE; + } - if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE) - flags |= IO_PASSIVE; + if (l_clusters[cl_index].io_flags & CLW_IOPASSIVE) { + flags |= IO_PASSIVE; + } - if (push_flag & PUSH_SYNC) - flags |= IO_SYNC; + if (push_flag & PUSH_SYNC) { + flags |= IO_SYNC; + } cl.b_addr = l_clusters[cl_index].b_addr; cl.e_addr = l_clusters[cl_index].e_addr; @@ -5688,23 +5905,26 @@ cluster_try_push(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int push_fla error = retval; } - if ( !(push_flag & PUSH_ALL) ) - break; + if (!(push_flag & PUSH_ALL)) { + break; + } } - if (vm_initiated == TRUE) + if (vm_initiated == TRUE) { lck_mtx_lock(&wbp->cl_lockw); + } - if (err) + if (err) { *err = error; + } dont_try: if (cl_len > cl_pushed) { - /* - * we didn't push all of the clusters, so - * lets try to merge them back in to the vnode - */ - if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) { - /* + /* + * we didn't push all of the clusters, so + * lets try to merge them back in to the vnode + */ + if ((MAX_CLUSTERS - wbp->cl_number) < (cl_len - cl_pushed)) { + /* * we picked up some new clusters while we were trying to * push the old ones... this can happen because I've dropped * the vnode lock... the sum of the @@ -5713,12 +5933,13 @@ dont_try: * * collect the active public clusters... */ - sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated); + sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated); - for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) { - if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) - continue; - wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr; + for (cl_index = 0, cl_index1 = 0; cl_index < cl_len; cl_index++) { + if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) { + continue; + } + wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr; wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr; wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags; @@ -5729,23 +5950,23 @@ dont_try: */ wbp->cl_number = cl_index1; - /* - * and collect the original clusters that were moved into the + /* + * and collect the original clusters that were moved into the * local storage for sorting purposes */ - sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated); - + sparse_cluster_switch(wbp, vp, EOF, callback, callback_arg, vm_initiated); } else { - /* + /* * we've got room to merge the leftovers back in * just append them starting at the next 'hole' * represented by wbp->cl_number */ - for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) { - if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) - continue; + for (cl_index = 0, cl_index1 = wbp->cl_number; cl_index < cl_len; cl_index++) { + if (l_clusters[cl_index].b_addr == l_clusters[cl_index].e_addr) { + continue; + } - wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr; + wbp->cl_clusters[cl_index1].b_addr = l_clusters[cl_index].b_addr; wbp->cl_clusters[cl_index1].e_addr = l_clusters[cl_index].e_addr; wbp->cl_clusters[cl_index1].io_flags = l_clusters[cl_index].io_flags; @@ -5757,21 +5978,21 @@ dont_try: wbp->cl_number = cl_index1; } } - return (MAX_CLUSTERS - wbp->cl_number); + return MAX_CLUSTERS - wbp->cl_number; } static int cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, - int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) + int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) { upl_page_info_t *pl; upl_t upl; vm_offset_t upl_offset; int upl_size; - off_t upl_f_offset; - int pages_in_upl; + off_t upl_f_offset; + int pages_in_upl; int start_pg; int last_pg; int io_size; @@ -5783,79 +6004,83 @@ cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, int retval; kern_return_t kret; - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { bflag = CL_PASSIVE; - else + } else { bflag = 0; + } - if (flags & IO_SKIP_ENCRYPTION) + if (flags & IO_SKIP_ENCRYPTION) { bflag |= CL_ENCRYPTED; + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_START, - (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0); + (int)cl->b_addr, (int)cl->e_addr, (int)EOF, flags, 0); if ((pages_in_upl = (int)(cl->e_addr - cl->b_addr)) == 0) { - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 0, 0, 0, 0); - return (0); + return 0; } upl_size = pages_in_upl * PAGE_SIZE; upl_f_offset = (off_t)(cl->b_addr * PAGE_SIZE_64); if (upl_f_offset + upl_size >= EOF) { - - if (upl_f_offset >= EOF) { - /* - * must have truncated the file and missed + if (upl_f_offset >= EOF) { + /* + * must have truncated the file and missed * clearing a dangling cluster (i.e. it's completely * beyond the new EOF */ - KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0); + KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 1, 0, 0, 0); - return(0); + return 0; } size = EOF - upl_f_offset; upl_size = (size + (PAGE_SIZE - 1)) & ~PAGE_MASK; pages_in_upl = upl_size / PAGE_SIZE; - } else - size = upl_size; + } else { + size = upl_size; + } if (vm_initiated) { - vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size, - UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error); + vnode_pageout(vp, NULL, (upl_offset_t)0, upl_f_offset, (upl_size_t)upl_size, + UPL_MSYNC | UPL_VNODE_PAGER | UPL_KEEPCACHED, &error); - return (error); + return error; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_START, upl_size, size, 0, 0, 0); /* * by asking for UPL_COPYOUT_FROM and UPL_RET_ONLY_DIRTY, we get the following desirable behavior - * + * * - only pages that are currently dirty are returned... these are the ones we need to clean * - the hardware dirty bit is cleared when the page is gathered into the UPL... the software dirty bit is set * - if we have to abort the I/O for some reason, the software dirty bit is left set since we didn't clean the page - * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if + * - when we commit the page, the software dirty bit is cleared... the hardware dirty bit is untouched so that if * someone dirties this page while the I/O is in progress, we don't lose track of the new state * * when the I/O completes, we no longer ask for an explicit clear of the DIRTY state (either soft or hard) */ - if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE)) - upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED; - else - upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE; + if ((vp->v_flag & VNOCACHE_DATA) || (flags & IO_NOCACHE)) { + upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE | UPL_WILL_BE_DUMPED; + } else { + upl_flags = UPL_COPYOUT_FROM | UPL_RET_ONLY_DIRTY | UPL_SET_LITE; + } kret = ubc_create_upl_kernel(vp, - upl_f_offset, - upl_size, - &upl, - &pl, - upl_flags, - VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - panic("cluster_push: failed to get pagelist"); + upl_f_offset, + upl_size, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + panic("cluster_push: failed to get pagelist"); + } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 41)) | DBG_FUNC_END, upl, upl_f_offset, 0, 0, 0); @@ -5868,52 +6093,58 @@ cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, * employed by commit_range and abort_range. */ for (last_pg = pages_in_upl - 1; last_pg >= 0; last_pg--) { - if (upl_page_present(pl, last_pg)) - break; + if (upl_page_present(pl, last_pg)) { + break; + } } pages_in_upl = last_pg + 1; if (pages_in_upl == 0) { - ubc_upl_abort(upl, 0); + ubc_upl_abort(upl, 0); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 2, 0, 0, 0); - return(0); - } + return 0; + } - for (last_pg = 0; last_pg < pages_in_upl; ) { - /* + for (last_pg = 0; last_pg < pages_in_upl;) { + /* * find the next dirty page in the UPL - * this will become the first page in the + * this will become the first page in the * next I/O to generate */ for (start_pg = last_pg; start_pg < pages_in_upl; start_pg++) { - if (upl_dirty_page(pl, start_pg)) + if (upl_dirty_page(pl, start_pg)) { break; - if (upl_page_present(pl, start_pg)) - /* + } + if (upl_page_present(pl, start_pg)) { + /* * RET_ONLY_DIRTY will return non-dirty 'precious' pages * just release these unchanged since we're not going * to steal them or change their state */ - ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + } } - if (start_pg >= pages_in_upl) - /* + if (start_pg >= pages_in_upl) { + /* * done... no more dirty pages to push */ - break; - if (start_pg > last_pg) - /* + break; + } + if (start_pg > last_pg) { + /* * skipped over some non-dirty pages */ size -= ((start_pg - last_pg) * PAGE_SIZE); + } /* * find a range of dirty pages to write */ for (last_pg = start_pg; last_pg < pages_in_upl; last_pg++) { - if (!upl_dirty_page(pl, last_pg)) + if (!upl_dirty_page(pl, last_pg)) { break; + } } upl_offset = start_pg * PAGE_SIZE; @@ -5921,26 +6152,30 @@ cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, io_flags = CL_THROTTLE | CL_COMMIT | CL_AGE | bflag; - if ( !(flags & IO_SYNC)) - io_flags |= CL_ASYNC; + if (!(flags & IO_SYNC)) { + io_flags |= CL_ASYNC; + } - if (flags & IO_CLOSE) - io_flags |= CL_CLOSE; + if (flags & IO_CLOSE) { + io_flags |= CL_CLOSE; + } - if (flags & IO_NOCACHE) + if (flags & IO_NOCACHE) { io_flags |= CL_NOCACHE; + } retval = cluster_io(vp, upl, upl_offset, upl_f_offset + upl_offset, io_size, - io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + io_flags, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); - if (error == 0 && retval) - error = retval; + if (error == 0 && retval) { + error = retval; + } size -= io_size; } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 51)) | DBG_FUNC_END, 1, 3, error, 0, 0); - return(error); + return error; } @@ -5950,22 +6185,21 @@ cluster_push_now(vnode_t vp, struct cl_extent *cl, off_t EOF, int flags, static int sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) { - int cl_index; - int error; + int cl_index; + int error; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 78)) | DBG_FUNC_START, kdebug_vnode(vp), wbp->cl_scmap, wbp->cl_number, 0, 0); for (cl_index = 0; cl_index < wbp->cl_number; cl_index++) { - int flags; + int flags; struct cl_extent cl; - for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) { - - if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) { - if (flags & UPL_POP_DIRTY) { - cl.e_addr = cl.b_addr + 1; + for (cl.b_addr = wbp->cl_clusters[cl_index].b_addr; cl.b_addr < wbp->cl_clusters[cl_index].e_addr; cl.b_addr++) { + if (ubc_page_op(vp, (off_t)(cl.b_addr * PAGE_SIZE_64), 0, NULL, &flags) == KERN_SUCCESS) { + if (flags & UPL_POP_DIRTY) { + cl.e_addr = cl.b_addr + 1; - error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated); + error = sparse_cluster_add(wbp, &(wbp->cl_scmap), vp, &cl, EOF, callback, callback_arg, vm_initiated); if (error) { break; @@ -5989,42 +6223,47 @@ sparse_cluster_switch(struct cl_writebehind *wbp, vnode_t vp, off_t EOF, int (*c */ static int sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t EOF, int push_flag, - int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) + int io_flags, int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) { - struct cl_extent cl; - off_t offset; - u_int length; + struct cl_extent cl; + off_t offset; + u_int length; void *l_scmap; int error = 0; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_START, kdebug_vnode(vp), (*scmap), 0, push_flag, 0); - if (push_flag & PUSH_ALL) - vfs_drt_control(scmap, 1); + if (push_flag & PUSH_ALL) { + vfs_drt_control(scmap, 1); + } l_scmap = *scmap; for (;;) { int retval; - if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS) + if (vfs_drt_get_cluster(scmap, &offset, &length) != KERN_SUCCESS) { break; + } - if (vm_initiated == TRUE) - lck_mtx_unlock(&wbp->cl_lockw); + if (vm_initiated == TRUE) { + lck_mtx_unlock(&wbp->cl_lockw); + } cl.b_addr = (daddr64_t)(offset / PAGE_SIZE_64); cl.e_addr = (daddr64_t)((offset + length) / PAGE_SIZE_64); retval = cluster_push_now(vp, &cl, EOF, io_flags, callback, callback_arg, vm_initiated); - if (error == 0 && retval) + if (error == 0 && retval) { error = retval; + } if (vm_initiated == TRUE) { - lck_mtx_lock(&wbp->cl_lockw); + lck_mtx_lock(&wbp->cl_lockw); - if (*scmap != l_scmap) - break; + if (*scmap != l_scmap) { + break; + } } if (error) { @@ -6035,8 +6274,8 @@ sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t break; } - if ( !(push_flag & PUSH_ALL)) { - break; + if (!(push_flag & PUSH_ALL)) { + break; } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 79)) | DBG_FUNC_END, kdebug_vnode(vp), (*scmap), error, 0, 0); @@ -6050,12 +6289,13 @@ sparse_cluster_push(struct cl_writebehind *wbp, void **scmap, vnode_t vp, off_t */ static int sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct cl_extent *cl, off_t EOF, - int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) + int (*callback)(buf_t, void *), void *callback_arg, boolean_t vm_initiated) { - u_int new_dirty; - u_int length; - off_t offset; - int error; + u_int new_dirty; + u_int length; + off_t offset; + int error; + int push_flag = 0; /* Is this a valid value? */ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 80)) | DBG_FUNC_START, (*scmap), 0, cl->b_addr, (int)cl->e_addr, 0); @@ -6063,12 +6303,17 @@ sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct length = ((u_int)(cl->e_addr - cl->b_addr)) * PAGE_SIZE; while (vfs_drt_mark_pages(scmap, offset, length, &new_dirty) != KERN_SUCCESS) { - /* + /* * no room left in the map * only a partial update was done * push out some pages and try again */ - error = sparse_cluster_push(wbp, scmap, vp, EOF, 0, 0, callback, callback_arg, vm_initiated); + + if (vfs_get_scmap_push_behavior_internal(scmap, &push_flag)) { + push_flag = 0; + } + + error = sparse_cluster_push(wbp, scmap, vp, EOF, push_flag, 0, callback, callback_arg, vm_initiated); if (error) { break; @@ -6086,27 +6331,29 @@ sparse_cluster_add(struct cl_writebehind *wbp, void **scmap, vnode_t vp, struct static int cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t xsize, int flags, int (*callback)(buf_t, void *), void *callback_arg) { - upl_page_info_t *pl; - upl_t upl; - addr64_t ubc_paddr; - kern_return_t kret; - int error = 0; - int did_read = 0; - int abort_flags; - int upl_flags; + upl_page_info_t *pl; + upl_t upl; + addr64_t ubc_paddr; + kern_return_t kret; + int error = 0; + int did_read = 0; + int abort_flags; + int upl_flags; int bflag; - if (flags & IO_PASSIVE) + if (flags & IO_PASSIVE) { bflag = CL_PASSIVE; - else + } else { bflag = 0; + } - if (flags & IO_NOCACHE) + if (flags & IO_NOCACHE) { bflag |= CL_NOCACHE; + } upl_flags = UPL_SET_LITE; - if ( !(flags & CL_READ) ) { + if (!(flags & CL_READ)) { /* * "write" operation: let the UPL subsystem know * that we intend to modify the buffer cache pages @@ -6114,38 +6361,39 @@ cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t */ upl_flags |= UPL_WILL_MODIFY; } else { - /* + /* * indicate that there is no need to pull the * mapping for this page... we're only going * to read from it, not modify it. */ upl_flags |= UPL_FILE_IO; } - kret = ubc_create_upl_kernel(vp, - uio->uio_offset & ~PAGE_MASK_64, - PAGE_SIZE, - &upl, - &pl, - upl_flags, - VM_KERN_MEMORY_FILE); - - if (kret != KERN_SUCCESS) - return(EINVAL); - - if (!upl_valid_page(pl, 0)) { - /* - * issue a synchronous read to cluster_io - */ - error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, - CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); - if (error) { - ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); - - return(error); - } + kret = ubc_create_upl_kernel(vp, + uio->uio_offset & ~PAGE_MASK_64, + PAGE_SIZE, + &upl, + &pl, + upl_flags, + VM_KERN_MEMORY_FILE); + + if (kret != KERN_SUCCESS) { + return EINVAL; + } + + if (!upl_valid_page(pl, 0)) { + /* + * issue a synchronous read to cluster_io + */ + error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, + CL_READ | bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + if (error) { + ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY); + + return error; + } did_read = 1; - } - ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64); + } + ubc_paddr = ((addr64_t)upl_phys_page(pl, 0) << PAGE_SHIFT) + (addr64_t)(uio->uio_offset & PAGE_MASK_64); /* * NOTE: There is no prototype for the following in BSD. It, and the definitions @@ -6153,73 +6401,73 @@ cluster_align_phys_io(vnode_t vp, struct uio *uio, addr64_t usr_paddr, u_int32_t * osfmk/ppc/mappings.h. They are not included here because there appears to be no * way to do so without exporting them to kexts as well. */ - if (flags & CL_READ) + if (flags & CL_READ) { // copypv(ubc_paddr, usr_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsnk); /* Copy physical to physical and flush the destination */ - copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */ - else + copypv(ubc_paddr, usr_paddr, xsize, 2 | 1 | 4); /* Copy physical to physical and flush the destination */ + } else { // copypv(usr_paddr, ubc_paddr, xsize, cppvPsrc | cppvPsnk | cppvFsrc); /* Copy physical to physical and flush the source */ - copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */ - - if ( !(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) { - /* + copypv(usr_paddr, ubc_paddr, xsize, 2 | 1 | 8); /* Copy physical to physical and flush the source */ + } + if (!(flags & CL_READ) || (upl_valid_page(pl, 0) && upl_dirty_page(pl, 0))) { + /* * issue a synchronous write to cluster_io */ error = cluster_io(vp, upl, 0, uio->uio_offset & ~PAGE_MASK_64, PAGE_SIZE, - bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + bflag, (buf_t)NULL, (struct clios *)NULL, callback, callback_arg); + } + if (error == 0) { + uio_update(uio, (user_size_t)xsize); } - if (error == 0) - uio_update(uio, (user_size_t)xsize); - if (did_read) - abort_flags = UPL_ABORT_FREE_ON_EMPTY; - else - abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + if (did_read) { + abort_flags = UPL_ABORT_FREE_ON_EMPTY; + } else { + abort_flags = UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_DUMP_PAGES; + } ubc_upl_abort_range(upl, 0, PAGE_SIZE, abort_flags); - - return (error); + + return error; } int cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid) { - int pg_offset; + int pg_offset; int pg_index; - int csize; + int csize; int segflg; int retval = 0; - int xsize; + int xsize; upl_page_info_t *pl; - int dirty_count; + int dirty_count; xsize = *io_resid; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, - (int)uio->uio_offset, upl_offset, xsize, 0, 0); + (int)uio->uio_offset, upl_offset, xsize, 0, 0); segflg = uio->uio_segflg; - switch(segflg) { - - case UIO_USERSPACE32: - case UIO_USERISPACE32: + switch (segflg) { + case UIO_USERSPACE32: + case UIO_USERISPACE32: uio->uio_segflg = UIO_PHYS_USERSPACE32; break; - case UIO_USERSPACE: - case UIO_USERISPACE: + case UIO_USERSPACE: + case UIO_USERISPACE: uio->uio_segflg = UIO_PHYS_USERSPACE; break; - case UIO_USERSPACE64: - case UIO_USERISPACE64: + case UIO_USERSPACE64: + case UIO_USERISPACE64: uio->uio_segflg = UIO_PHYS_USERSPACE64; break; - case UIO_SYSSPACE: + case UIO_SYSSPACE: uio->uio_segflg = UIO_PHYS_SYSSPACE; break; - } pl = ubc_upl_pageinfo(upl); @@ -6229,11 +6477,12 @@ cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid) dirty_count = 0; while (xsize && retval == 0) { - addr64_t paddr; + addr64_t paddr; paddr = ((addr64_t)upl_phys_page(pl, pg_index) << PAGE_SHIFT) + pg_offset; - if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE)) + if ((uio->uio_rw == UIO_WRITE) && (upl_dirty_page(pl, pg_index) == FALSE)) { dirty_count++; + } retval = uiomove64(paddr, csize, uio); @@ -6248,17 +6497,16 @@ cluster_copy_upl_data(struct uio *uio, upl_t upl, int upl_offset, int *io_resid) task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, upl_lookup_vnode(upl)); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, - (int)uio->uio_offset, xsize, retval, segflg, 0); - - return (retval); + (int)uio->uio_offset, xsize, retval, segflg, 0); + + return retval; } int cluster_copy_ubc_data(vnode_t vp, struct uio *uio, int *io_resid, int mark_dirty) { - - return (cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1)); + return cluster_copy_ubc_data_internal(vp, uio, io_resid, mark_dirty, 1); } @@ -6270,51 +6518,50 @@ cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int m int xsize; int start_offset; int retval = 0; - memory_object_control_t control; + memory_object_control_t control; io_size = *io_resid; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_START, - (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0); + (int)uio->uio_offset, io_size, mark_dirty, take_reference, 0); control = ubc_getobject(vp, UBC_FLAGS_NONE); if (control == MEMORY_OBJECT_CONTROL_NULL) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, - (int)uio->uio_offset, io_size, retval, 3, 0); + (int)uio->uio_offset, io_size, retval, 3, 0); - return(0); + return 0; } segflg = uio->uio_segflg; - switch(segflg) { - - case UIO_USERSPACE32: - case UIO_USERISPACE32: + switch (segflg) { + case UIO_USERSPACE32: + case UIO_USERISPACE32: uio->uio_segflg = UIO_PHYS_USERSPACE32; break; - case UIO_USERSPACE64: - case UIO_USERISPACE64: + case UIO_USERSPACE64: + case UIO_USERISPACE64: uio->uio_segflg = UIO_PHYS_USERSPACE64; break; - case UIO_USERSPACE: - case UIO_USERISPACE: + case UIO_USERSPACE: + case UIO_USERISPACE: uio->uio_segflg = UIO_PHYS_USERSPACE; break; - case UIO_SYSSPACE: + case UIO_SYSSPACE: uio->uio_segflg = UIO_PHYS_SYSSPACE; break; } - if ( (io_size = *io_resid) ) { - start_offset = (int)(uio->uio_offset & PAGE_MASK_64); + if ((io_size = *io_resid)) { + start_offset = (int)(uio->uio_offset & PAGE_MASK_64); xsize = uio_resid(uio); retval = memory_object_control_uiomove(control, uio->uio_offset - start_offset, uio, - start_offset, io_size, mark_dirty, take_reference); + start_offset, io_size, mark_dirty, take_reference); xsize -= uio_resid(uio); io_size -= xsize; } @@ -6322,30 +6569,31 @@ cluster_copy_ubc_data_internal(vnode_t vp, struct uio *uio, int *io_resid, int m *io_resid = io_size; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 34)) | DBG_FUNC_END, - (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0); + (int)uio->uio_offset, io_size, retval, 0x80000000 | segflg, 0); - return(retval); + return retval; } int is_file_clean(vnode_t vp, off_t filesize) { - off_t f_offset; + off_t f_offset; int flags; int total_dirty = 0; for (f_offset = 0; f_offset < filesize; f_offset += PAGE_SIZE_64) { - if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) { - if (flags & UPL_POP_DIRTY) { - total_dirty++; + if (ubc_page_op(vp, f_offset, 0, NULL, &flags) == KERN_SUCCESS) { + if (flags & UPL_POP_DIRTY) { + total_dirty++; } } } - if (total_dirty) - return(EINVAL); + if (total_dirty) { + return EINVAL; + } - return (0); + return 0; } @@ -6368,16 +6616,16 @@ is_file_clean(vnode_t vp, off_t filesize) * single hashtable entry. Each hashtable entry is aligned to this * size within the file. */ -#define DRT_BITVECTOR_PAGES ((1024 * 256) / PAGE_SIZE) +#define DRT_BITVECTOR_PAGES ((1024 * 256) / PAGE_SIZE) /* * File offset handling. * - * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES; + * DRT_ADDRESS_MASK is dependent on DRT_BITVECTOR_PAGES; * the correct formula is (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)) */ -#define DRT_ADDRESS_MASK (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)) -#define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK) +#define DRT_ADDRESS_MASK (~((DRT_BITVECTOR_PAGES * PAGE_SIZE) - 1)) +#define DRT_ALIGN_ADDRESS(addr) ((addr) & DRT_ADDRESS_MASK) /* * Hashtable address field handling. @@ -6389,29 +6637,29 @@ is_file_clean(vnode_t vp, off_t filesize) * 0-DRT_BITVECTOR_PAGES inclusive, as well as have one value * to indicate that the bucket is actually unoccupied. */ -#define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK) -#define DRT_HASH_SET_ADDRESS(scm, i, a) \ - do { \ - (scm)->scm_hashtable[(i)].dhe_control = \ - ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \ +#define DRT_HASH_GET_ADDRESS(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_ADDRESS_MASK) +#define DRT_HASH_SET_ADDRESS(scm, i, a) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = \ + ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_ADDRESS_MASK) | DRT_ALIGN_ADDRESS(a); \ } while (0) -#define DRT_HASH_COUNT_MASK 0x1ff -#define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK) -#define DRT_HASH_SET_COUNT(scm, i, c) \ - do { \ - (scm)->scm_hashtable[(i)].dhe_control = \ - ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \ +#define DRT_HASH_COUNT_MASK 0x1ff +#define DRT_HASH_GET_COUNT(scm, i) ((scm)->scm_hashtable[(i)].dhe_control & DRT_HASH_COUNT_MASK) +#define DRT_HASH_SET_COUNT(scm, i, c) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = \ + ((scm)->scm_hashtable[(i)].dhe_control & ~DRT_HASH_COUNT_MASK) | ((c) & DRT_HASH_COUNT_MASK); \ } while (0) #define DRT_HASH_CLEAR(scm, i) \ - do { \ - (scm)->scm_hashtable[(i)].dhe_control = 0; \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = 0; \ } while (0) -#define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK) -#define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK) -#define DRT_HASH_COPY(oscm, oi, scm, i) \ - do { \ - (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \ - DRT_BITVECTOR_COPY(oscm, oi, scm, i); \ +#define DRT_HASH_VACATE(scm, i) DRT_HASH_SET_COUNT((scm), (i), DRT_HASH_COUNT_MASK) +#define DRT_HASH_VACANT(scm, i) (DRT_HASH_GET_COUNT((scm), (i)) == DRT_HASH_COUNT_MASK) +#define DRT_HASH_COPY(oscm, oi, scm, i) \ + do { \ + (scm)->scm_hashtable[(i)].dhe_control = (oscm)->scm_hashtable[(oi)].dhe_control; \ + DRT_BITVECTOR_COPY(oscm, oi, scm, i); \ } while(0); @@ -6428,10 +6676,12 @@ is_file_clean(vnode_t vp, off_t filesize) * * The small hashtable allocation is 4096 bytes, so the modulus is 251. * The large hashtable allocation is 32768 bytes, so the modulus is 2039. + * The xlarge hashtable allocation is 131072 bytes, so the modulus is 8179. */ -#define DRT_HASH_SMALL_MODULUS 251 -#define DRT_HASH_LARGE_MODULUS 2039 +#define DRT_HASH_SMALL_MODULUS 251 +#define DRT_HASH_LARGE_MODULUS 2039 +#define DRT_HASH_XLARGE_MODULUS 8179 /* * Physical memory required before the large hash modulus is permitted. @@ -6439,10 +6689,12 @@ is_file_clean(vnode_t vp, off_t filesize) * On small memory systems, the large hash modulus can lead to phsyical * memory starvation, so we avoid using it there. */ -#define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */ +#define DRT_HASH_LARGE_MEMORY_REQUIRED (1024LL * 1024LL * 1024LL) /* 1GiB */ +#define DRT_HASH_XLARGE_MEMORY_REQUIRED (8 * 1024LL * 1024LL * 1024LL) /* 8GiB */ -#define DRT_SMALL_ALLOCATION 4096 /* 80 bytes spare */ -#define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */ +#define DRT_SMALL_ALLOCATION 4096 /* 80 bytes spare */ +#define DRT_LARGE_ALLOCATION 32768 /* 144 bytes spare */ +#define DRT_XLARGE_ALLOCATION 131072 /* 208 bytes spare */ #else /* @@ -6457,10 +6709,12 @@ is_file_clean(vnode_t vp, off_t filesize) * * The small hashtable allocation is 16384 bytes, so the modulus is 1019. * The large hashtable allocation is 131072 bytes, so the modulus is 8179. + * The xlarge hashtable allocation is 524288 bytes, so the modulus is 32749. */ -#define DRT_HASH_SMALL_MODULUS 1019 -#define DRT_HASH_LARGE_MODULUS 8179 +#define DRT_HASH_SMALL_MODULUS 1019 +#define DRT_HASH_LARGE_MODULUS 8179 +#define DRT_HASH_XLARGE_MODULUS 32749 /* * Physical memory required before the large hash modulus is permitted. @@ -6468,10 +6722,12 @@ is_file_clean(vnode_t vp, off_t filesize) * On small memory systems, the large hash modulus can lead to phsyical * memory starvation, so we avoid using it there. */ -#define DRT_HASH_LARGE_MEMORY_REQUIRED (4 * 1024LL * 1024LL * 1024LL) /* 4GiB */ +#define DRT_HASH_LARGE_MEMORY_REQUIRED (4 * 1024LL * 1024LL * 1024LL) /* 4GiB */ +#define DRT_HASH_XLARGE_MEMORY_REQUIRED (32 * 1024LL * 1024LL * 1024LL) /* 32GiB */ -#define DRT_SMALL_ALLOCATION 16384 /* 80 bytes spare */ -#define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */ +#define DRT_SMALL_ALLOCATION 16384 /* 80 bytes spare */ +#define DRT_LARGE_ALLOCATION 131072 /* 208 bytes spare */ +#define DRT_XLARGE_ALLOCATION 524288 /* 304 bytes spare */ #endif @@ -6481,16 +6737,16 @@ is_file_clean(vnode_t vp, off_t filesize) * Hashtable entry. */ struct vfs_drt_hashentry { - u_int64_t dhe_control; + u_int64_t dhe_control; /* -* dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32]; -* DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE) -* Since PAGE_SIZE is only known at boot time, -* -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k) -* -declare dhe_bitvector array for largest possible length -*/ + * dhe_bitvector was declared as dhe_bitvector[DRT_BITVECTOR_PAGES / 32]; + * DRT_BITVECTOR_PAGES is defined as ((1024 * 256) / PAGE_SIZE) + * Since PAGE_SIZE is only known at boot time, + * -define MAX_DRT_BITVECTOR_PAGES for smallest supported page size (4k) + * -declare dhe_bitvector array for largest possible length + */ #define MAX_DRT_BITVECTOR_PAGES (1024 * 256)/( 4 * 1024) - u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES/32]; + u_int32_t dhe_bitvector[MAX_DRT_BITVECTOR_PAGES / 32]; }; /* @@ -6499,21 +6755,21 @@ struct vfs_drt_hashentry { * Bitvector fields are 32 bits long. */ -#define DRT_HASH_SET_BIT(scm, i, bit) \ +#define DRT_HASH_SET_BIT(scm, i, bit) \ (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] |= (1 << ((bit) % 32)) -#define DRT_HASH_CLEAR_BIT(scm, i, bit) \ +#define DRT_HASH_CLEAR_BIT(scm, i, bit) \ (scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] &= ~(1 << ((bit) % 32)) - -#define DRT_HASH_TEST_BIT(scm, i, bit) \ + +#define DRT_HASH_TEST_BIT(scm, i, bit) \ ((scm)->scm_hashtable[(i)].dhe_bitvector[(bit) / 32] & (1 << ((bit) % 32))) - -#define DRT_BITVECTOR_CLEAR(scm, i) \ + +#define DRT_BITVECTOR_CLEAR(scm, i) \ bzero(&(scm)->scm_hashtable[(i)].dhe_bitvector[0], (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) -#define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \ - bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \ - &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \ +#define DRT_BITVECTOR_COPY(oscm, oi, scm, i) \ + bcopy(&(oscm)->scm_hashtable[(oi)].dhe_bitvector[0], \ + &(scm)->scm_hashtable[(i)].dhe_bitvector[0], \ (MAX_DRT_BITVECTOR_PAGES / 32) * sizeof(u_int32_t)) /* @@ -6526,53 +6782,53 @@ struct vfs_drt_hashentry { */ struct vfs_drt_clustermap { - u_int32_t scm_magic; /* sanity/detection */ -#define DRT_SCM_MAGIC 0x12020003 - u_int32_t scm_modulus; /* current ring size */ - u_int32_t scm_buckets; /* number of occupied buckets */ - u_int32_t scm_lastclean; /* last entry we cleaned */ - u_int32_t scm_iskips; /* number of slot skips */ + u_int32_t scm_magic; /* sanity/detection */ +#define DRT_SCM_MAGIC 0x12020003 + u_int32_t scm_modulus; /* current ring size */ + u_int32_t scm_buckets; /* number of occupied buckets */ + u_int32_t scm_lastclean; /* last entry we cleaned */ + u_int32_t scm_iskips; /* number of slot skips */ struct vfs_drt_hashentry scm_hashtable[0]; }; -#define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus) -#define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus) +#define DRT_HASH(scm, addr) ((addr) % (scm)->scm_modulus) +#define DRT_HASH_NEXT(scm, addr) (((addr) + 1) % (scm)->scm_modulus) /* * Debugging codes and arguments. */ -#define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */ -#define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */ -#define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */ -#define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */ -#define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length, - * dirty */ - /* 0, setcount */ - /* 1 (clean, no map) */ - /* 2 (map alloc fail) */ - /* 3, resid (partial) */ -#define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87)) -#define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets, - * lastclean, iskips */ - - -static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp); -static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap); -static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap, - u_int64_t offset, int *indexp); -static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, - u_int64_t offset, - int *indexp, - int recursed); -static kern_return_t vfs_drt_do_mark_pages( - void **cmapp, - u_int64_t offset, - u_int length, - u_int *setcountp, - int dirty); -static void vfs_drt_trace( +#define DRT_DEBUG_EMPTYFREE (FSDBG_CODE(DBG_FSRW, 82)) /* nil */ +#define DRT_DEBUG_RETCLUSTER (FSDBG_CODE(DBG_FSRW, 83)) /* offset, length */ +#define DRT_DEBUG_ALLOC (FSDBG_CODE(DBG_FSRW, 84)) /* copycount */ +#define DRT_DEBUG_INSERT (FSDBG_CODE(DBG_FSRW, 85)) /* offset, iskip */ +#define DRT_DEBUG_MARK (FSDBG_CODE(DBG_FSRW, 86)) /* offset, length, + * dirty */ + /* 0, setcount */ + /* 1 (clean, no map) */ + /* 2 (map alloc fail) */ + /* 3, resid (partial) */ +#define DRT_DEBUG_6 (FSDBG_CODE(DBG_FSRW, 87)) +#define DRT_DEBUG_SCMDATA (FSDBG_CODE(DBG_FSRW, 88)) /* modulus, buckets, + * lastclean, iskips */ + + +static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp); +static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap); +static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap, + u_int64_t offset, int *indexp); +static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, + u_int64_t offset, + int *indexp, + int recursed); +static kern_return_t vfs_drt_do_mark_pages( + void **cmapp, + u_int64_t offset, + u_int length, + u_int *setcountp, + int dirty); +static void vfs_drt_trace( struct vfs_drt_clustermap *cmap, int code, int arg1, @@ -6592,55 +6848,72 @@ static void vfs_drt_trace( static kern_return_t vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) { - struct vfs_drt_clustermap *cmap, *ocmap; - kern_return_t kret; - u_int64_t offset; - u_int32_t i; - int nsize, active_buckets, index, copycount; + struct vfs_drt_clustermap *cmap = NULL, *ocmap = NULL; + kern_return_t kret = KERN_SUCCESS; + u_int64_t offset = 0; + u_int32_t i = 0; + int modulus_size = 0, map_size = 0, active_buckets = 0, index = 0, copycount = 0; ocmap = NULL; - if (cmapp != NULL) + if (cmapp != NULL) { ocmap = *cmapp; - + } + /* * Decide on the size of the new map. */ if (ocmap == NULL) { - nsize = DRT_HASH_SMALL_MODULUS; + modulus_size = DRT_HASH_SMALL_MODULUS; + map_size = DRT_SMALL_ALLOCATION; } else { /* count the number of active buckets in the old map */ active_buckets = 0; for (i = 0; i < ocmap->scm_modulus; i++) { if (!DRT_HASH_VACANT(ocmap, i) && - (DRT_HASH_GET_COUNT(ocmap, i) != 0)) + (DRT_HASH_GET_COUNT(ocmap, i) != 0)) { active_buckets++; + } } /* * If we're currently using the small allocation, check to * see whether we should grow to the large one. */ if (ocmap->scm_modulus == DRT_HASH_SMALL_MODULUS) { - /* + /* * If the ring is nearly full and we are allowed to * use the large modulus, upgrade. */ if ((active_buckets > (DRT_HASH_SMALL_MODULUS - 5)) && (max_mem >= DRT_HASH_LARGE_MEMORY_REQUIRED)) { - nsize = DRT_HASH_LARGE_MODULUS; + modulus_size = DRT_HASH_LARGE_MODULUS; + map_size = DRT_LARGE_ALLOCATION; + } else { + modulus_size = DRT_HASH_SMALL_MODULUS; + map_size = DRT_SMALL_ALLOCATION; + } + } else if (ocmap->scm_modulus == DRT_HASH_LARGE_MODULUS) { + if ((active_buckets > (DRT_HASH_LARGE_MODULUS - 5)) && + (max_mem >= DRT_HASH_XLARGE_MEMORY_REQUIRED)) { + modulus_size = DRT_HASH_XLARGE_MODULUS; + map_size = DRT_XLARGE_ALLOCATION; } else { - nsize = DRT_HASH_SMALL_MODULUS; + modulus_size = DRT_HASH_LARGE_MODULUS; + map_size = DRT_LARGE_ALLOCATION; } } else { - /* already using the large modulus */ - nsize = DRT_HASH_LARGE_MODULUS; + /* already using the xlarge modulus */ + modulus_size = DRT_HASH_XLARGE_MODULUS; + map_size = DRT_XLARGE_ALLOCATION; + /* * If the ring is completely full, there's * nothing useful for us to do. Behave as * though we had compacted into the new * array and return. */ - if (active_buckets >= DRT_HASH_LARGE_MODULUS) - return(KERN_SUCCESS); + if (active_buckets >= DRT_HASH_XLARGE_MODULUS) { + return KERN_SUCCESS; + } } } @@ -6648,17 +6921,17 @@ vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) * Allocate and initialise the new map. */ - kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, - (nsize == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION, VM_KERN_MEMORY_FILE); - if (kret != KERN_SUCCESS) - return(kret); + kret = kmem_alloc(kernel_map, (vm_offset_t *)&cmap, map_size, VM_KERN_MEMORY_FILE); + if (kret != KERN_SUCCESS) { + return kret; + } cmap->scm_magic = DRT_SCM_MAGIC; - cmap->scm_modulus = nsize; + cmap->scm_modulus = modulus_size; cmap->scm_buckets = 0; cmap->scm_lastclean = 0; cmap->scm_iskips = 0; for (i = 0; i < cmap->scm_modulus; i++) { - DRT_HASH_CLEAR(cmap, i); + DRT_HASH_CLEAR(cmap, i); DRT_HASH_VACATE(cmap, i); DRT_BITVECTOR_CLEAR(cmap, i); } @@ -6671,8 +6944,9 @@ vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) for (i = 0; i < ocmap->scm_modulus; i++) { /* skip empty buckets */ if (DRT_HASH_VACANT(ocmap, i) || - (DRT_HASH_GET_COUNT(ocmap, i) == 0)) + (DRT_HASH_GET_COUNT(ocmap, i) == 0)) { continue; + } /* get new index */ offset = DRT_HASH_GET_ADDRESS(ocmap, i); kret = vfs_drt_get_index(&cmap, offset, &index, 1); @@ -6689,9 +6963,9 @@ vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) /* log what we've done */ vfs_drt_trace(cmap, DRT_DEBUG_ALLOC, copycount, 0, 0, 0); - + /* - * It's important to ensure that *cmapp always points to + * It's important to ensure that *cmapp always points to * a valid map, so we must overwrite it before freeing * the old map. */ @@ -6699,14 +6973,14 @@ vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) if (ocmap != NULL) { /* emit stats into trace buffer */ vfs_drt_trace(ocmap, DRT_DEBUG_SCMDATA, - ocmap->scm_modulus, - ocmap->scm_buckets, - ocmap->scm_lastclean, - ocmap->scm_iskips); + ocmap->scm_modulus, + ocmap->scm_buckets, + ocmap->scm_lastclean, + ocmap->scm_iskips); vfs_drt_free_map(ocmap); } - return(KERN_SUCCESS); + return KERN_SUCCESS; } @@ -6716,9 +6990,20 @@ vfs_drt_alloc_map(struct vfs_drt_clustermap **cmapp) static kern_return_t vfs_drt_free_map(struct vfs_drt_clustermap *cmap) { - kmem_free(kernel_map, (vm_offset_t)cmap, - (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) ? DRT_SMALL_ALLOCATION : DRT_LARGE_ALLOCATION); - return(KERN_SUCCESS); + vm_size_t map_size = 0; + + if (cmap->scm_modulus == DRT_HASH_SMALL_MODULUS) { + map_size = DRT_SMALL_ALLOCATION; + } else if (cmap->scm_modulus == DRT_HASH_LARGE_MODULUS) { + map_size = DRT_LARGE_ALLOCATION; + } else if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) { + map_size = DRT_XLARGE_ALLOCATION; + } else { + panic("vfs_drt_free_map: Invalid modulus %d\n", cmap->scm_modulus); + } + + kmem_free(kernel_map, (vm_offset_t)cmap, map_size); + return KERN_SUCCESS; } @@ -6728,27 +7013,27 @@ vfs_drt_free_map(struct vfs_drt_clustermap *cmap) static kern_return_t vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *indexp) { - int index; - u_int32_t i; + int index; + u_int32_t i; offset = DRT_ALIGN_ADDRESS(offset); index = DRT_HASH(cmap, offset); /* traverse the hashtable */ for (i = 0; i < cmap->scm_modulus; i++) { - /* * If the slot is vacant, we can stop. */ - if (DRT_HASH_VACANT(cmap, index)) + if (DRT_HASH_VACANT(cmap, index)) { break; + } /* * If the address matches our offset, we have success. */ if (DRT_HASH_GET_ADDRESS(cmap, index) == offset) { *indexp = index; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -6759,7 +7044,7 @@ vfs_drt_search_index(struct vfs_drt_clustermap *cmap, u_int64_t offset, int *ind /* * It's not there. */ - return(KERN_FAILURE); + return KERN_FAILURE; } /* @@ -6772,16 +7057,17 @@ static kern_return_t vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *indexp, int recursed) { struct vfs_drt_clustermap *cmap; - kern_return_t kret; - u_int32_t index; - u_int32_t i; + kern_return_t kret; + u_int32_t index; + u_int32_t i; cmap = *cmapp; /* look for an existing entry */ kret = vfs_drt_search_index(cmap, offset, indexp); - if (kret == KERN_SUCCESS) - return(kret); + if (kret == KERN_SUCCESS) { + return kret; + } /* need to allocate an entry */ offset = DRT_ALIGN_ADDRESS(offset); @@ -6790,16 +7076,17 @@ vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *inde /* scan from the index forwards looking for a vacant slot */ for (i = 0; i < cmap->scm_modulus; i++) { /* slot vacant? */ - if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap,index) == 0) { + if (DRT_HASH_VACANT(cmap, index) || DRT_HASH_GET_COUNT(cmap, index) == 0) { cmap->scm_buckets++; - if (index < cmap->scm_lastclean) + if (index < cmap->scm_lastclean) { cmap->scm_lastclean = index; + } DRT_HASH_SET_ADDRESS(cmap, index, offset); DRT_HASH_SET_COUNT(cmap, index, 0); DRT_BITVECTOR_CLEAR(cmap, index); *indexp = index; vfs_drt_trace(cmap, DRT_DEBUG_INSERT, (int)offset, i, 0, 0); - return(KERN_SUCCESS); + return KERN_SUCCESS; } cmap->scm_iskips += i; index = DRT_HASH_NEXT(cmap, index); @@ -6809,14 +7096,15 @@ vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *inde * We haven't found a vacant slot, so the map is full. If we're not * already recursed, try reallocating/compacting it. */ - if (recursed) - return(KERN_FAILURE); + if (recursed) { + return KERN_FAILURE; + } kret = vfs_drt_alloc_map(cmapp); if (kret == KERN_SUCCESS) { /* now try to insert again */ kret = vfs_drt_get_index(cmapp, offset, indexp, 1); } - return(kret); + return kret; } /* @@ -6826,35 +7114,36 @@ vfs_drt_get_index(struct vfs_drt_clustermap **cmapp, u_int64_t offset, int *inde */ static kern_return_t vfs_drt_do_mark_pages( - void **private, - u_int64_t offset, - u_int length, - u_int *setcountp, - int dirty) + void **private, + u_int64_t offset, + u_int length, + u_int *setcountp, + int dirty) { struct vfs_drt_clustermap *cmap, **cmapp; - kern_return_t kret; - int i, index, pgoff, pgcount, setcount, ecount; + kern_return_t kret; + int i, index, pgoff, pgcount, setcount, ecount; cmapp = (struct vfs_drt_clustermap **)private; cmap = *cmapp; vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_START, (int)offset, (int)length, dirty, 0); - if (setcountp != NULL) - *setcountp = 0; - + if (setcountp != NULL) { + *setcountp = 0; + } + /* allocate a cluster map if we don't already have one */ if (cmap == NULL) { /* no cluster map, nothing to clean */ if (!dirty) { vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 1, 0, 0, 0); - return(KERN_SUCCESS); + return KERN_SUCCESS; } kret = vfs_drt_alloc_map(cmapp); if (kret != KERN_SUCCESS) { vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 2, 0, 0, 0); - return(kret); + return kret; } } setcount = 0; @@ -6870,14 +7159,15 @@ vfs_drt_do_mark_pages( * that hasn't been dirtied. */ kret = vfs_drt_get_index(cmapp, offset, &index, 0); - cmap = *cmapp; /* may have changed! */ + cmap = *cmapp; /* may have changed! */ /* this may be a partial-success return */ if (kret != KERN_SUCCESS) { - if (setcountp != NULL) - *setcountp = setcount; + if (setcountp != NULL) { + *setcountp = setcount; + } vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 3, (int)length, 0, 0); - return(kret); + return kret; } /* @@ -6894,17 +7184,19 @@ vfs_drt_do_mark_pages( for (i = 0; i < pgcount; i++) { if (dirty) { if (!DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) { - if (ecount >= DRT_BITVECTOR_PAGES) - panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff+i); + if (ecount >= DRT_BITVECTOR_PAGES) { + panic("ecount >= DRT_BITVECTOR_PAGES, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i); + } DRT_HASH_SET_BIT(cmap, index, pgoff + i); ecount++; setcount++; } } else { if (DRT_HASH_TEST_BIT(cmap, index, pgoff + i)) { - if (ecount <= 0) - panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff+i); - assert(ecount > 0); + if (ecount <= 0) { + panic("ecount <= 0, cmap = %p, index = %d, bit = %d", cmap, index, pgoff + i); + } + assert(ecount > 0); DRT_HASH_CLEAR_BIT(cmap, index, pgoff + i); ecount--; setcount++; @@ -6916,12 +7208,13 @@ vfs_drt_do_mark_pages( offset += pgcount * PAGE_SIZE; length -= pgcount * PAGE_SIZE; } - if (setcountp != NULL) + if (setcountp != NULL) { *setcountp = setcount; + } vfs_drt_trace(cmap, DRT_DEBUG_MARK | DBG_FUNC_END, 0, setcount, 0, 0); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -6952,14 +7245,14 @@ static kern_return_t vfs_drt_mark_pages(void **cmapp, off_t offset, u_int length, u_int *setcountp) { /* XXX size unused, drop from interface */ - return(vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1)); + return vfs_drt_do_mark_pages(cmapp, offset, length, setcountp, 1); } #if 0 static kern_return_t vfs_drt_unmark_pages(void **cmapp, off_t offset, u_int length) { - return(vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0)); + return vfs_drt_do_mark_pages(cmapp, offset, length, NULL, 0); } #endif @@ -6987,42 +7280,45 @@ static kern_return_t vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp) { struct vfs_drt_clustermap *cmap; - u_int64_t offset; - u_int length; - u_int32_t j; - int index, i, fs, ls; + u_int64_t offset; + u_int length; + u_int32_t j; + int index, i, fs, ls; /* sanity */ - if ((cmapp == NULL) || (*cmapp == NULL)) - return(KERN_FAILURE); + if ((cmapp == NULL) || (*cmapp == NULL)) { + return KERN_FAILURE; + } cmap = *cmapp; /* walk the hashtable */ for (offset = 0, j = 0; j < cmap->scm_modulus; offset += (DRT_BITVECTOR_PAGES * PAGE_SIZE), j++) { - index = DRT_HASH(cmap, offset); + index = DRT_HASH(cmap, offset); - if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) + if (DRT_HASH_VACANT(cmap, index) || (DRT_HASH_GET_COUNT(cmap, index) == 0)) { continue; + } /* scan the bitfield for a string of bits */ fs = -1; for (i = 0; i < DRT_BITVECTOR_PAGES; i++) { - if (DRT_HASH_TEST_BIT(cmap, index, i)) { - fs = i; + if (DRT_HASH_TEST_BIT(cmap, index, i)) { + fs = i; break; } } if (fs == -1) { - /* didn't find any bits set */ - panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld", - cmap, index, DRT_HASH_GET_COUNT(cmap, index)); + /* didn't find any bits set */ + panic("vfs_drt: entry summary count > 0 but no bits set in map, cmap = %p, index = %d, count = %lld", + cmap, index, DRT_HASH_GET_COUNT(cmap, index)); } for (ls = 0; i < DRT_BITVECTOR_PAGES; i++, ls++) { - if (!DRT_HASH_TEST_BIT(cmap, index, i)) - break; + if (!DRT_HASH_TEST_BIT(cmap, index, i)) { + break; + } } - + /* compute offset and length, mark pages clean */ offset = DRT_HASH_GET_ADDRESS(cmap, index) + (PAGE_SIZE * fs); length = ls * PAGE_SIZE; @@ -7034,7 +7330,7 @@ vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp) *lengthp = length; vfs_drt_trace(cmap, DRT_DEBUG_RETCLUSTER, (int)offset, (int)length, 0, 0); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* * We didn't find anything... hashtable is empty @@ -7042,15 +7338,15 @@ vfs_drt_get_cluster(void **cmapp, off_t *offsetp, u_int *lengthp) * then free it */ vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA, - cmap->scm_modulus, - cmap->scm_buckets, - cmap->scm_lastclean, - cmap->scm_iskips); - + cmap->scm_modulus, + cmap->scm_buckets, + cmap->scm_lastclean, + cmap->scm_iskips); + vfs_drt_free_map(cmap); *cmapp = NULL; - return(KERN_FAILURE); + return KERN_FAILURE; } @@ -7060,28 +7356,29 @@ vfs_drt_control(void **cmapp, int op_type) struct vfs_drt_clustermap *cmap; /* sanity */ - if ((cmapp == NULL) || (*cmapp == NULL)) - return(KERN_FAILURE); + if ((cmapp == NULL) || (*cmapp == NULL)) { + return KERN_FAILURE; + } cmap = *cmapp; switch (op_type) { case 0: /* emit stats into trace buffer */ vfs_drt_trace(cmap, DRT_DEBUG_SCMDATA, - cmap->scm_modulus, - cmap->scm_buckets, - cmap->scm_lastclean, - cmap->scm_iskips); + cmap->scm_modulus, + cmap->scm_buckets, + cmap->scm_lastclean, + cmap->scm_iskips); vfs_drt_free_map(cmap); *cmapp = NULL; - break; + break; case 1: - cmap->scm_lastclean = 0; - break; + cmap->scm_lastclean = 0; + break; } - return(KERN_SUCCESS); + return KERN_SUCCESS; } @@ -7098,12 +7395,12 @@ vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, int code, int arg1, int } #else static void -vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code, - __unused int arg1, __unused int arg2, __unused int arg3, - __unused int arg4) +vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code, + __unused int arg1, __unused int arg2, __unused int arg3, + __unused int arg4) { } -#endif +#endif #if 0 /* @@ -7113,19 +7410,50 @@ vfs_drt_trace(__unused struct vfs_drt_clustermap *cmap, __unused int code, static void vfs_drt_sanity(struct vfs_drt_clustermap *cmap) { - int index, i; + int index, i; int bits_on; - + for (index = 0; index < cmap->scm_modulus; index++) { - if (DRT_HASH_VACANT(cmap, index)) - continue; + if (DRT_HASH_VACANT(cmap, index)) { + continue; + } for (bits_on = 0, i = 0; i < DRT_BITVECTOR_PAGES; i++) { - if (DRT_HASH_TEST_BIT(cmap, index, i)) - bits_on++; + if (DRT_HASH_TEST_BIT(cmap, index, i)) { + bits_on++; + } } - if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) - panic("bits_on = %d, index = %d\n", bits_on, index); - } + if (bits_on != DRT_HASH_GET_COUNT(cmap, index)) { + panic("bits_on = %d, index = %d\n", bits_on, index); + } + } } #endif + +/* + * Internal interface only. + */ +static kern_return_t +vfs_get_scmap_push_behavior_internal(void **cmapp, int *push_flag) +{ + struct vfs_drt_clustermap *cmap; + + /* sanity */ + if ((cmapp == NULL) || (*cmapp == NULL) || (push_flag == NULL)) { + return KERN_FAILURE; + } + cmap = *cmapp; + + if (cmap->scm_modulus == DRT_HASH_XLARGE_MODULUS) { + /* + * If we have a full xlarge sparse cluster, + * we push it out all at once so the cluster + * map can be available to absorb more I/Os. + * This is done on large memory configs so + * the small I/Os don't interfere with the + * pro workloads. + */ + *push_flag = PUSH_ALL; + } + return KERN_SUCCESS; +} diff --git a/bsd/vfs/vfs_conf.c b/bsd/vfs/vfs_conf.c index af7ba7c5b..d6595dc96 100644 --- a/bsd/vfs/vfs_conf.c +++ b/bsd/vfs/vfs_conf.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -79,7 +79,7 @@ struct mount *rootfs; struct vnode *rootvnode; #ifdef CONFIG_IMGSRC_ACCESS -struct vnode *imgsrc_rootvnodes[MAX_IMAGEBOOT_NESTING]; /* [0] -> source volume, [1] -> first disk image */ +struct vnode *imgsrc_rootvnodes[MAX_IMAGEBOOT_NESTING]; /* [0] -> source volume, [1] -> first disk image */ #endif /* CONFIG_IMGSRC_ACCESS */ int (*mountroot)(void) = NULL; @@ -87,19 +87,19 @@ int (*mountroot)(void) = NULL; /* * Set up the initial array of known filesystem types. */ -extern struct vfsops mfs_vfsops; -extern int mfs_mountroot(mount_t, vnode_t, vfs_context_t); /* dead */ -extern struct vfsops nfs_vfsops; -extern int nfs_mountroot(void); -extern struct vfsops afs_vfsops; -extern struct vfsops null_vfsops; -extern struct vfsops devfs_vfsops; -extern struct vfsops routefs_vfsops; +extern struct vfsops mfs_vfsops; +extern int mfs_mountroot(mount_t, vnode_t, vfs_context_t); /* dead */ +extern struct vfsops nfs_vfsops; +extern int nfs_mountroot(void); +extern struct vfsops afs_vfsops; +extern struct vfsops null_vfsops; +extern struct vfsops devfs_vfsops; +extern struct vfsops routefs_vfsops; extern struct vfsops nullfs_vfsops; #if MOCKFS -extern struct vfsops mockfs_vfsops; -extern int mockfs_mountroot(mount_t, vnode_t, vfs_context_t); +extern struct vfsops mockfs_vfsops; +extern int mockfs_mountroot(mount_t, vnode_t, vfs_context_t); #endif /* MOCKFS */ /* @@ -158,7 +158,7 @@ static struct vfstable vfstbllist[] = { /* * vfs_init will set maxvfstypenum to the highest defined type number. */ -const int maxvfsslots = sizeof(vfstbllist) / sizeof (struct vfstable); +const int maxvfsslots = sizeof(vfstbllist) / sizeof(struct vfstable); int numused_vfsslots = 0; int numregistered_fses = 0; int maxvfstypenum = VT_NON + 1; diff --git a/bsd/vfs/vfs_cprotect.c b/bsd/vfs/vfs_cprotect.c index 26af78a75..edb00a315 100644 --- a/bsd/vfs/vfs_cprotect.c +++ b/bsd/vfs/vfs_cprotect.c @@ -37,7 +37,7 @@ #include #include -#define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset)) +#define PTR_ADD(type, base, offset) (type)((uintptr_t)(base) + (offset)) // -- struct cpx -- @@ -50,33 +50,34 @@ // cpx_flags typedef uint32_t cpx_flags_t; enum { - CPX_SEP_WRAPPEDKEY = 0x01, - CPX_IV_AES_CTX_INITIALIZED = 0x02, - CPX_USE_OFFSET_FOR_IV = 0x04, + CPX_SEP_WRAPPEDKEY = 0x01, + CPX_IV_AES_CTX_INITIALIZED = 0x02, + CPX_USE_OFFSET_FOR_IV = 0x04, // Using AES IV context generated from key - CPX_IV_AES_CTX_VFS = 0x08, + CPX_IV_AES_CTX_VFS = 0x08, CPX_SYNTHETIC_OFFSET_FOR_IV = 0x10, - CPX_COMPOSITEKEY = 0x20, - + CPX_COMPOSITEKEY = 0x20, + //write page protection - CPX_WRITE_PROTECTABLE = 0x40 + CPX_WRITE_PROTECTABLE = 0x40 }; struct cpx { #if DEBUG - uint32_t cpx_magic1; + uint32_t cpx_magic1; #endif - aes_encrypt_ctx cpx_iv_aes_ctx; // Context used for generating the IV - cpx_flags_t cpx_flags; - uint16_t cpx_max_key_len; - uint16_t cpx_key_len; - uint8_t cpx_cached_key[]; + aes_encrypt_ctx cpx_iv_aes_ctx; // Context used for generating the IV + cpx_flags_t cpx_flags; + uint16_t cpx_max_key_len; + uint16_t cpx_key_len; + uint8_t cpx_cached_key[]; }; // -- cpx_t accessors -- -size_t cpx_size(size_t key_size) +size_t +cpx_size(size_t key_size) { size_t size = sizeof(struct cpx) + key_size; @@ -87,43 +88,43 @@ size_t cpx_size(size_t key_size) return size; } -size_t cpx_sizex(const struct cpx *cpx) +size_t +cpx_sizex(const struct cpx *cpx) { return cpx_size(cpx->cpx_max_key_len); } -cpx_t cpx_alloc(size_t key_len) +cpx_t +cpx_alloc(size_t key_len) { cpx_t cpx = NULL; - + #if CONFIG_KEYPAGE_WP - /* + /* * Macs only use 1 key per volume, so force it into its own page. * This way, we can write-protect as needed. */ - size_t cpsize = cpx_size (key_len); + size_t cpsize = cpx_size(key_len); if (cpsize < PAGE_SIZE) { - /* - * Don't use MALLOC to allocate the page-sized structure. Instead, + /* + * Don't use MALLOC to allocate the page-sized structure. Instead, * use kmem_alloc to bypass KASAN since we are supplying our own - * unilateral write protection on this page. Note that kmem_alloc + * unilateral write protection on this page. Note that kmem_alloc * can block. */ - if (kmem_alloc (kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE, VM_KERN_MEMORY_FILE)) { + if (kmem_alloc(kernel_map, (vm_offset_t *)&cpx, PAGE_SIZE, VM_KERN_MEMORY_FILE)) { /* - * returning NULL at this point (due to failed allocation) would just + * returning NULL at this point (due to failed allocation) would just * result in a panic. fall back to attempting a normal MALLOC, and don't * let the cpx get marked PROTECTABLE. */ MALLOC(cpx, cpx_t, cpx_size(key_len), M_TEMP, M_WAITOK); - } - else { + } else { //mark the page as protectable, since kmem_alloc succeeded. cpx->cpx_flags |= CPX_WRITE_PROTECTABLE; } - } - else { - panic ("cpx_size too large ! (%lu)", cpsize); + } else { + panic("cpx_size too large ! (%lu)", cpsize); } #else /* If key page write protection disabled, just switch to kernel MALLOC */ @@ -135,13 +136,14 @@ cpx_t cpx_alloc(size_t key_len) } /* this is really a void function */ -void cpx_writeprotect (cpx_t cpx) +void +cpx_writeprotect(cpx_t cpx) { #if CONFIG_KEYPAGE_WP void *cpxstart = (void*)cpx; void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { - vm_map_protect (kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE); + vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_READ), FALSE); } #else (void) cpx; @@ -150,24 +152,24 @@ void cpx_writeprotect (cpx_t cpx) } #if DEBUG -static const uint32_t cpx_magic1 = 0x7b787063; // cpx{ -static const uint32_t cpx_magic2 = 0x7870637d; // }cpx +static const uint32_t cpx_magic1 = 0x7b787063; // cpx{ +static const uint32_t cpx_magic2 = 0x7870637d; // }cpx #endif -void cpx_free(cpx_t cpx) +void +cpx_free(cpx_t cpx) { - #if DEBUG assert(cpx->cpx_magic1 == cpx_magic1); assert(*PTR_ADD(uint32_t *, cpx, cpx_sizex(cpx) - 4) == cpx_magic2); #endif - + #if CONFIG_KEYPAGE_WP /* unprotect the page before bzeroing */ void *cpxstart = (void*)cpx; - void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); + void *cpxend = (void*)((uint8_t*)cpx + PAGE_SIZE); if (cpx->cpx_flags & CPX_WRITE_PROTECTABLE) { - vm_map_protect (kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE); + vm_map_protect(kernel_map, (vm_map_offset_t)cpxstart, (vm_map_offset_t)cpxend, (VM_PROT_DEFAULT), FALSE); //now zero the memory after un-protecting it bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); @@ -176,15 +178,15 @@ void cpx_free(cpx_t cpx) kmem_free(kernel_map, (vm_offset_t)cpx, PAGE_SIZE); return; } -#else +#else bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); FREE(cpx, M_TEMP); return; #endif - } -void cpx_init(cpx_t cpx, size_t key_len) +void +cpx_init(cpx_t cpx, size_t key_len) { #if DEBUG cpx->cpx_magic1 = cpx_magic1; @@ -195,69 +197,84 @@ void cpx_init(cpx_t cpx, size_t key_len) cpx->cpx_max_key_len = key_len; } -bool cpx_is_sep_wrapped_key(const struct cpx *cpx) +bool +cpx_is_sep_wrapped_key(const struct cpx *cpx) { return ISSET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); } -void cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v) +void +cpx_set_is_sep_wrapped_key(struct cpx *cpx, bool v) { - if (v) - SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); - else - CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); + if (v) { + SET(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); + } else { + CLR(cpx->cpx_flags, CPX_SEP_WRAPPEDKEY); + } } -bool cpx_is_composite_key(const struct cpx *cpx) +bool +cpx_is_composite_key(const struct cpx *cpx) { - return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY); + return ISSET(cpx->cpx_flags, CPX_COMPOSITEKEY); } -void cpx_set_is_composite_key(struct cpx *cpx, bool v) +void +cpx_set_is_composite_key(struct cpx *cpx, bool v) { - if (v) - SET(cpx->cpx_flags, CPX_COMPOSITEKEY); - else - CLR(cpx->cpx_flags, CPX_COMPOSITEKEY); + if (v) { + SET(cpx->cpx_flags, CPX_COMPOSITEKEY); + } else { + CLR(cpx->cpx_flags, CPX_COMPOSITEKEY); + } } -bool cpx_use_offset_for_iv(const struct cpx *cpx) +bool +cpx_use_offset_for_iv(const struct cpx *cpx) { return ISSET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); } -void cpx_set_use_offset_for_iv(struct cpx *cpx, bool v) +void +cpx_set_use_offset_for_iv(struct cpx *cpx, bool v) { - if (v) - SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); - else - CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); + if (v) { + SET(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); + } else { + CLR(cpx->cpx_flags, CPX_USE_OFFSET_FOR_IV); + } } -bool cpx_synthetic_offset_for_iv(const struct cpx *cpx) +bool +cpx_synthetic_offset_for_iv(const struct cpx *cpx) { return ISSET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); } -void cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v) +void +cpx_set_synthetic_offset_for_iv(struct cpx *cpx, bool v) { - if (v) - SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); - else - CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); + if (v) { + SET(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); + } else { + CLR(cpx->cpx_flags, CPX_SYNTHETIC_OFFSET_FOR_IV); + } } -uint16_t cpx_max_key_len(const struct cpx *cpx) +uint16_t +cpx_max_key_len(const struct cpx *cpx) { return cpx->cpx_max_key_len; } -uint16_t cpx_key_len(const struct cpx *cpx) +uint16_t +cpx_key_len(const struct cpx *cpx) { return cpx->cpx_key_len; } -void cpx_set_key_len(struct cpx *cpx, uint16_t key_len) +void +cpx_set_key_len(struct cpx *cpx, uint16_t key_len) { cpx->cpx_key_len = key_len; @@ -272,30 +289,35 @@ void cpx_set_key_len(struct cpx *cpx, uint16_t key_len) } } -bool cpx_has_key(const struct cpx *cpx) +bool +cpx_has_key(const struct cpx *cpx) { return cpx->cpx_key_len > 0; } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" -void *cpx_key(const struct cpx *cpx) +void * +cpx_key(const struct cpx *cpx) { return (void *)cpx->cpx_cached_key; } #pragma clang diagnostic pop -void cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key) +void +cpx_set_aes_iv_key(struct cpx *cpx, void *iv_key) { aes_encrypt_key128(iv_key, &cpx->cpx_iv_aes_ctx); SET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED | CPX_USE_OFFSET_FOR_IV); CLR(cpx->cpx_flags, CPX_IV_AES_CTX_VFS); } -aes_encrypt_ctx *cpx_iv_aes_ctx(struct cpx *cpx) +aes_encrypt_ctx * +cpx_iv_aes_ctx(struct cpx *cpx) { - if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) - return &cpx->cpx_iv_aes_ctx; + if (ISSET(cpx->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) { + return &cpx->cpx_iv_aes_ctx; + } SHA1_CTX sha1ctxt; uint8_t digest[SHA_DIGEST_LENGTH]; /* Kiv */ @@ -316,7 +338,8 @@ aes_encrypt_ctx *cpx_iv_aes_ctx(struct cpx *cpx) return &cpx->cpx_iv_aes_ctx; } -void cpx_flush(cpx_t cpx) +void +cpx_flush(cpx_t cpx) { bzero(cpx->cpx_cached_key, cpx->cpx_max_key_len); bzero(&cpx->cpx_iv_aes_ctx, sizeof(cpx->cpx_iv_aes_ctx)); @@ -324,25 +347,28 @@ void cpx_flush(cpx_t cpx) cpx->cpx_key_len = 0; } -bool cpx_can_copy(const struct cpx *src, const struct cpx *dst) +bool +cpx_can_copy(const struct cpx *src, const struct cpx *dst) { return src->cpx_key_len <= dst->cpx_max_key_len; } -void cpx_copy(const struct cpx *src, cpx_t dst) +void +cpx_copy(const struct cpx *src, cpx_t dst) { uint16_t key_len = cpx_key_len(src); cpx_set_key_len(dst, key_len); memcpy(cpx_key(dst), cpx_key(src), key_len); dst->cpx_flags = src->cpx_flags; - if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) - dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx; + if (ISSET(dst->cpx_flags, CPX_IV_AES_CTX_INITIALIZED)) { + dst->cpx_iv_aes_ctx = src->cpx_iv_aes_ctx; + } } typedef struct { cp_lock_state_t state; - int valid_uuid; - uuid_t volume_uuid; + int valid_uuid; + uuid_t volume_uuid; } cp_lock_vfs_callback_arg; static int @@ -350,19 +376,22 @@ cp_lock_vfs_callback(mount_t mp, void *arg) { cp_lock_vfs_callback_arg *callback_arg = (cp_lock_vfs_callback_arg *)arg; - if (callback_arg->valid_uuid) { + if (callback_arg->valid_uuid) { struct vfs_attr va; VFSATTR_INIT(&va); VFSATTR_WANTED(&va, f_uuid); - if (vfs_getattr(mp, &va, vfs_context_current())) + if (vfs_getattr(mp, &va, vfs_context_current())) { return 0; + } - if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) + if (!VFSATTR_IS_SUPPORTED(&va, f_uuid)) { return 0; + } - if(memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t))) + if (memcmp(va.f_uuid, callback_arg->volume_uuid, sizeof(uuid_t))) { return 0; + } } VFS_IOCTL(mp, FIODEVICELOCKED, (void *)(uintptr_t)callback_arg->state, 0, vfs_context_kernel()); @@ -375,14 +404,14 @@ cp_key_store_action(cp_key_store_action_t action) cp_lock_vfs_callback_arg callback_arg; switch (action) { - case CP_ACTION_LOCKED: - case CP_ACTION_UNLOCKED: - callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); - memset(callback_arg.volume_uuid, 0, sizeof(uuid_t)); - callback_arg.valid_uuid = 0; - return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg); - default: - return -1; + case CP_ACTION_LOCKED: + case CP_ACTION_UNLOCKED: + callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); + memset(callback_arg.volume_uuid, 0, sizeof(uuid_t)); + callback_arg.valid_uuid = 0; + return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg); + default: + return -1; } } @@ -392,14 +421,14 @@ cp_key_store_action_for_volume(uuid_t volume_uuid, cp_key_store_action_t action) cp_lock_vfs_callback_arg callback_arg; switch (action) { - case CP_ACTION_LOCKED: - case CP_ACTION_UNLOCKED: - callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); - memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t)); - callback_arg.valid_uuid = 1; - return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg); - default: - return -1; + case CP_ACTION_LOCKED: + case CP_ACTION_UNLOCKED: + callback_arg.state = (action == CP_ACTION_LOCKED ? CP_LOCKED_STATE : CP_UNLOCKED_STATE); + memcpy(callback_arg.volume_uuid, volume_uuid, sizeof(uuid_t)); + callback_arg.valid_uuid = 1; + return vfs_iterate(0, cp_lock_vfs_callback, (void *)&callback_arg); + default: + return -1; } } @@ -413,12 +442,11 @@ cp_is_valid_class(int isdir, int32_t protectionclass) */ if (isdir) { /* Directories are not allowed to have F, but they can have "NONE" */ - return ((protectionclass >= PROTECTION_CLASS_DIR_NONE) && - (protectionclass <= PROTECTION_CLASS_D)); - } - else { - return ((protectionclass >= PROTECTION_CLASS_A) && - (protectionclass <= PROTECTION_CLASS_F)); + return (protectionclass >= PROTECTION_CLASS_DIR_NONE) && + (protectionclass <= PROTECTION_CLASS_D); + } else { + return (protectionclass >= PROTECTION_CLASS_A) && + (protectionclass <= PROTECTION_CLASS_F); } } @@ -438,12 +466,14 @@ parse_os_version(const char *vers) ++p; } - if (!a) + if (!a) { return 0; + } int b = *p++; - if (!b) + if (!b) { return 0; + } int c = 0; while (*p >= '0' && *p <= '9') { @@ -451,8 +481,9 @@ parse_os_version(const char *vers) ++p; } - if (!c) + if (!c) { return 0; + } return (a & 0xff) << 24 | b << 16 | (c & 0xffff); } @@ -462,11 +493,13 @@ cp_os_version(void) { static cp_key_os_version_t cp_os_version; - if (cp_os_version) + if (cp_os_version) { return cp_os_version; + } - if (!osversion[0]) + if (!osversion[0]) { return 0; + } cp_os_version = parse_os_version(osversion); if (!cp_os_version) { diff --git a/bsd/vfs/vfs_disk_conditioner.c b/bsd/vfs/vfs_disk_conditioner.c index 79872204b..9bfbeab55 100644 --- a/bsd/vfs/vfs_disk_conditioner.c +++ b/bsd/vfs/vfs_disk_conditioner.c @@ -53,12 +53,12 @@ #define DISK_IDLE_SEC (10 * 60) struct saved_mount_fields { - uint32_t mnt_maxreadcnt; /* Max. byte count for read */ - uint32_t mnt_maxwritecnt; /* Max. byte count for write */ - uint32_t mnt_segreadcnt; /* Max. segment count for read */ - uint32_t mnt_segwritecnt; /* Max. segment count for write */ - uint32_t mnt_ioqueue_depth; /* the maxiumum number of commands a device can accept */ - uint32_t mnt_ioscale; /* scale the various throttles/limits imposed on the amount of I/O in flight */ + uint32_t mnt_maxreadcnt; /* Max. byte count for read */ + uint32_t mnt_maxwritecnt; /* Max. byte count for write */ + uint32_t mnt_segreadcnt; /* Max. segment count for read */ + uint32_t mnt_segwritecnt; /* Max. segment count for write */ + uint32_t mnt_ioqueue_depth; /* the maxiumum number of commands a device can accept */ + uint32_t mnt_ioscale; /* scale the various throttles/limits imposed on the amount of I/O in flight */ }; struct _disk_conditioner_info_t { @@ -190,7 +190,8 @@ disk_conditioner_get_info(mount_t mp, disk_conditioner_info *uinfo) } static inline void -disk_conditioner_restore_mount_fields(mount_t mp, struct saved_mount_fields *mnt_fields) { +disk_conditioner_restore_mount_fields(mount_t mp, struct saved_mount_fields *mnt_fields) +{ mp->mnt_maxreadcnt = mnt_fields->mnt_maxreadcnt; mp->mnt_maxwritecnt = mnt_fields->mnt_maxwritecnt; mp->mnt_segreadcnt = mnt_fields->mnt_segreadcnt; diff --git a/bsd/vfs/vfs_fsevents.c b/bsd/vfs/vfs_fsevents.c index 5b8eac30e..1a6fa3844 100644 --- a/bsd/vfs/vfs_fsevents.c +++ b/bsd/vfs/vfs_fsevents.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -65,23 +65,23 @@ #include typedef struct kfs_event { - LIST_ENTRY(kfs_event) kevent_list; - int16_t type; // type code of this event - u_int16_t flags, // per-event flags - len; // the length of the path in "str" - int32_t refcount; // number of clients referencing this - pid_t pid; // pid of the process that did the op - - uint64_t abstime; // when this event happened (mach_absolute_time()) - ino64_t ino; - dev_t dev; - int32_t mode; - uid_t uid; - gid_t gid; - - const char *str; - - struct kfs_event *dest; // if this is a two-file op + LIST_ENTRY(kfs_event) kevent_list; + int16_t type; // type code of this event + u_int16_t flags, // per-event flags + len; // the length of the path in "str" + int32_t refcount; // number of clients referencing this + pid_t pid; // pid of the process that did the op + + uint64_t abstime; // when this event happened (mach_absolute_time()) + ino64_t ino; + dev_t dev; + int32_t mode; + uid_t uid; + gid_t gid; + + const char *str; + + struct kfs_event *dest; // if this is a two-file op } kfs_event; // flags for the flags field @@ -98,23 +98,23 @@ int num_pending_rename = 0; struct fsevent_handle; typedef struct fs_event_watcher { - int8_t *event_list; // the events we're interested in - int32_t num_events; - dev_t *devices_not_to_watch; // report events from devices not in this list - uint32_t num_devices; - int32_t flags; - kfs_event **event_queue; - int32_t eventq_size; // number of event pointers in queue - int32_t num_readers; - int32_t rd; // read index into the event_queue - int32_t wr; // write index into the event_queue - int32_t blockers; - int32_t my_id; - uint32_t num_dropped; - uint64_t max_event_id; - struct fsevent_handle *fseh; - pid_t pid; - char proc_name[(2 * MAXCOMLEN) + 1]; + int8_t *event_list; // the events we're interested in + int32_t num_events; + dev_t *devices_not_to_watch;// report events from devices not in this list + uint32_t num_devices; + int32_t flags; + kfs_event **event_queue; + int32_t eventq_size; // number of event pointers in queue + int32_t num_readers; + int32_t rd; // read index into the event_queue + int32_t wr; // write index into the event_queue + int32_t blockers; + int32_t my_id; + uint32_t num_dropped; + uint64_t max_event_id; + struct fsevent_handle *fseh; + pid_t pid; + char proc_name[(2 * MAXCOMLEN) + 1]; } fs_event_watcher; // fs_event_watcher flags @@ -166,99 +166,98 @@ static lck_mtx_t event_writer_lock; /* Explicitly declare qsort so compiler doesn't complain */ __private_extern__ void qsort( - void * array, - size_t nmembers, - size_t member_size, - int (*)(const void *, const void *)); + void * array, + size_t nmembers, + size_t member_size, + int (*)(const void *, const void *)); static int -is_ignored_directory(const char *path) { - - if (!path) { - return 0; - } +is_ignored_directory(const char *path) +{ + if (!path) { + return 0; + } #define IS_TLD(x) strnstr(__DECONST(char *, path), x, MAXPATHLEN) - if (IS_TLD("/.Spotlight-V100/") || - IS_TLD("/.MobileBackups/") || - IS_TLD("/Backups.backupdb/")) { - return 1; - } + if (IS_TLD("/.Spotlight-V100/") || + IS_TLD("/.MobileBackups/") || + IS_TLD("/Backups.backupdb/")) { + return 1; + } #undef IS_TLD - - return 0; + + return 0; } static void fsevents_internal_init(void) { - int i; - - if (fs_event_init++ != 0) { - return; - } + int i; - for(i=0; i < FSE_MAX_EVENTS; i++) { - fs_event_type_watchers[i] = 0; - } + if (fs_event_init++ != 0) { + return; + } - memset(watcher_table, 0, sizeof(watcher_table)); + for (i = 0; i < FSE_MAX_EVENTS; i++) { + fs_event_type_watchers[i] = 0; + } - fsevent_lock_attr = lck_attr_alloc_init(); - fsevent_group_attr = lck_grp_attr_alloc_init(); - fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr); - fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr); + memset(watcher_table, 0, sizeof(watcher_table)); - lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr); - lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr); - lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr); + fsevent_lock_attr = lck_attr_alloc_init(); + fsevent_group_attr = lck_grp_attr_alloc_init(); + fsevent_mutex_group = lck_grp_alloc_init("fsevent-mutex", fsevent_group_attr); + fsevent_rw_group = lck_grp_alloc_init("fsevent-rw", fsevent_group_attr); - lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr); + lck_mtx_init(&watch_table_lock, fsevent_mutex_group, fsevent_lock_attr); + lck_mtx_init(&event_buf_lock, fsevent_mutex_group, fsevent_lock_attr); + lck_mtx_init(&event_writer_lock, fsevent_mutex_group, fsevent_lock_attr); - PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events)); + lck_rw_init(&event_handling_lock, fsevent_rw_group, fsevent_lock_attr); - event_zone = zinit(sizeof(kfs_event), - max_kfs_events * sizeof(kfs_event), - max_kfs_events * sizeof(kfs_event), - "fs-event-buf"); - if (event_zone == NULL) { - printf("fsevents: failed to initialize the event zone.\n"); - } + PE_get_default("kern.maxkfsevents", &max_kfs_events, sizeof(max_kfs_events)); - // mark the zone as exhaustible so that it will not - // ever grow beyond what we initially filled it with - zone_change(event_zone, Z_EXHAUST, TRUE); - zone_change(event_zone, Z_COLLECT, FALSE); - zone_change(event_zone, Z_CALLERACCT, FALSE); + event_zone = zinit(sizeof(kfs_event), + max_kfs_events * sizeof(kfs_event), + max_kfs_events * sizeof(kfs_event), + "fs-event-buf"); + if (event_zone == NULL) { + printf("fsevents: failed to initialize the event zone.\n"); + } - if (zfill(event_zone, max_kfs_events) < max_kfs_events) { - printf("fsevents: failed to pre-fill the event zone.\n"); - } - + // mark the zone as exhaustible so that it will not + // ever grow beyond what we initially filled it with + zone_change(event_zone, Z_EXHAUST, TRUE); + zone_change(event_zone, Z_COLLECT, FALSE); + zone_change(event_zone, Z_CALLERACCT, FALSE); + + if (zfill(event_zone, max_kfs_events) < max_kfs_events) { + printf("fsevents: failed to pre-fill the event zone.\n"); + } } static void lock_watch_table(void) { - lck_mtx_lock(&watch_table_lock); + lck_mtx_lock(&watch_table_lock); } static void unlock_watch_table(void) { - lck_mtx_unlock(&watch_table_lock); + lck_mtx_unlock(&watch_table_lock); } static void lock_fs_event_list(void) { - lck_mtx_lock(&event_buf_lock); + lck_mtx_lock(&event_buf_lock); } static void unlock_fs_event_list(void) { - lck_mtx_unlock(&event_buf_lock); + lck_mtx_unlock(&event_buf_lock); } // forward prototype @@ -267,40 +266,41 @@ static void release_event_ref(kfs_event *kfse); static int watcher_cares_about_dev(fs_event_watcher *watcher, dev_t dev) { - unsigned int i; - - // if devices_not_to_watch is NULL then we care about all - // events from all devices - if (watcher->devices_not_to_watch == NULL) { - return 1; - } + unsigned int i; - for(i=0; i < watcher->num_devices; i++) { - if (dev == watcher->devices_not_to_watch[i]) { - // found a match! that means we do not - // want events from this device. - return 0; + // if devices_not_to_watch is NULL then we care about all + // events from all devices + if (watcher->devices_not_to_watch == NULL) { + return 1; } - } - // if we're here it's not in the devices_not_to_watch[] - // list so that means we do care about it - return 1; + for (i = 0; i < watcher->num_devices; i++) { + if (dev == watcher->devices_not_to_watch[i]) { + // found a match! that means we do not + // want events from this device. + return 0; + } + } + + // if we're here it's not in the devices_not_to_watch[] + // list so that means we do care about it + return 1; } int need_fsevent(int type, vnode_t vp) { - if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0) - return (0); + if (type >= 0 && type < FSE_MAX_EVENTS && fs_event_type_watchers[type] == 0) { + return 0; + } - // events in /dev aren't really interesting... - if (vp->v_tag == VT_DEVFS) { - return (0); - } + // events in /dev aren't really interesting... + if (vp->v_tag == VT_DEVFS) { + return 0; + } - return 1; + return 1; } @@ -309,10 +309,10 @@ need_fsevent(int type, vnode_t vp) // Ways that an event can be reused: // -// "combined" events mean that there were two events for -// the same vnode or path and we're combining both events +// "combined" events mean that there were two events for +// the same vnode or path and we're combining both events // into a single event. The primary event gets a bit that -// marks it as having been combined. The secondary event +// marks it as having been combined. The secondary event // is essentially dropped and the kfse structure reused. // // "collapsed" means that multiple events below a given @@ -320,9 +320,9 @@ need_fsevent(int type, vnode_t vp) // case, the directory that we collapse into and all of // its children must be re-scanned. // -// "recycled" means that we're completely blowing away -// the event since there are other events that have info -// about the same vnode or path (and one of those other +// "recycled" means that we're completely blowing away +// the event since there are other events that have info +// about the same vnode or path (and one of those other // events will be marked as combined or collapsed as // appropriate). // @@ -345,684 +345,680 @@ static struct timeval last_print; // tests like a Finder copy where multiple stat-changed events can // get coalesced. // -static int last_event_type=-1; -static void *last_ptr=NULL; +static int last_event_type = -1; +static void *last_ptr = NULL; static char last_str[MAXPATHLEN]; -static int last_nlen=0; -static int last_vid=-1; -static uint64_t last_coalesced_time=0; -static void *last_event_ptr=NULL; +static int last_nlen = 0; +static int last_vid = -1; +static uint64_t last_coalesced_time = 0; +static void *last_event_ptr = NULL; int last_coalesced = 0; static mach_timebase_info_data_t sTimebaseInfo = { 0, 0 }; int -add_fsevent(int type, vfs_context_t ctx, ...) +add_fsevent(int type, vfs_context_t ctx, ...) { - struct proc *p = vfs_context_proc(ctx); - int i, arg_type, ret; - kfs_event *kfse, *kfse_dest=NULL, *cur; - fs_event_watcher *watcher; - va_list ap; - int error = 0, did_alloc=0; - dev_t dev = 0; - uint64_t now, elapsed; - char *pathbuff=NULL; - int pathbuff_len; + struct proc *p = vfs_context_proc(ctx); + int i, arg_type, ret; + kfs_event *kfse, *kfse_dest = NULL, *cur; + fs_event_watcher *watcher; + va_list ap; + int error = 0, did_alloc = 0; + dev_t dev = 0; + uint64_t now, elapsed; + char *pathbuff = NULL; + int pathbuff_len; - va_start(ap, ctx); + va_start(ap, ctx); - // ignore bogus event types.. - if (type < 0 || type >= FSE_MAX_EVENTS) { - return EINVAL; - } + // ignore bogus event types.. + if (type < 0 || type >= FSE_MAX_EVENTS) { + return EINVAL; + } - // if no one cares about this type of event, bail out - if (fs_event_type_watchers[type] == 0) { - va_end(ap); + // if no one cares about this type of event, bail out + if (fs_event_type_watchers[type] == 0) { + va_end(ap); - return 0; - } - - now = mach_absolute_time(); - - // find a free event and snag it for our use - // NOTE: do not do anything that would block until - // the lock is dropped. - lock_fs_event_list(); - - // - // check if this event is identical to the previous one... - // (as long as it's not an event type that can never be the - // same as a previous event) - // - if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED && type != FSE_CLONE) { - void *ptr=NULL; - int vid=0, was_str=0, nlen=0; - - for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) { - switch(arg_type) { - case FSE_ARG_VNODE: { - ptr = va_arg(ap, void *); - vid = vnode_vid((struct vnode *)ptr); - last_str[0] = '\0'; - break; - } - case FSE_ARG_STRING: { - nlen = va_arg(ap, int32_t); - ptr = va_arg(ap, void *); - was_str = 1; - break; - } - } - if (ptr != NULL) { - break; - } - } - - if ( sTimebaseInfo.denom == 0 ) { - (void) clock_timebase_info(&sTimebaseInfo); - } - - elapsed = (now - last_coalesced_time); - if (sTimebaseInfo.denom != sTimebaseInfo.numer) { - if (sTimebaseInfo.denom == 1) { - elapsed *= sTimebaseInfo.numer; - } else { - // this could overflow... the worst that will happen is that we'll - // send (or not send) an extra event so I'm not going to worry about - // doing the math right like dtrace_abs_to_nano() does. - elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom; - } - } - - if (type == last_event_type - && (elapsed < 1000000000) - && - ((vid && vid == last_vid && last_ptr == ptr) - || - (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0)) - ) { - - last_coalesced++; - unlock_fs_event_list(); - va_end(ap); - - return 0; - } else { - last_ptr = ptr; - if (was_str) { - strlcpy(last_str, ptr, sizeof(last_str)); - } - last_nlen = nlen; - last_vid = vid; - last_event_type = type; - last_coalesced_time = now; + return 0; } - } - va_start(ap, ctx); + now = mach_absolute_time(); - kfse = zalloc_noblock(event_zone); - if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE)) { - kfse_dest = zalloc_noblock(event_zone); - if (kfse_dest == NULL) { - did_alloc = 1; - zfree(event_zone, kfse); - kfse = NULL; - } - } + // find a free event and snag it for our use + // NOTE: do not do anything that would block until + // the lock is dropped. + lock_fs_event_list(); + // + // check if this event is identical to the previous one... + // (as long as it's not an event type that can never be the + // same as a previous event) + // + if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN && type != FSE_DOCID_CHANGED && type != FSE_DOCID_CREATED && type != FSE_CLONE) { + void *ptr = NULL; + int vid = 0, was_str = 0, nlen = 0; + + for (arg_type = va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type = va_arg(ap, int32_t)) { + switch (arg_type) { + case FSE_ARG_VNODE: { + ptr = va_arg(ap, void *); + vid = vnode_vid((struct vnode *)ptr); + last_str[0] = '\0'; + break; + } + case FSE_ARG_STRING: { + nlen = va_arg(ap, int32_t); + ptr = va_arg(ap, void *); + was_str = 1; + break; + } + } + if (ptr != NULL) { + break; + } + } - if (kfse == NULL) { // yikes! no free events - unlock_fs_event_list(); - lock_watch_table(); + if (sTimebaseInfo.denom == 0) { + (void) clock_timebase_info(&sTimebaseInfo); + } - for(i=0; i < MAX_WATCHERS; i++) { - watcher = watcher_table[i]; - if (watcher == NULL) { - continue; + elapsed = (now - last_coalesced_time); + if (sTimebaseInfo.denom != sTimebaseInfo.numer) { + if (sTimebaseInfo.denom == 1) { + elapsed *= sTimebaseInfo.numer; + } else { + // this could overflow... the worst that will happen is that we'll + // send (or not send) an extra event so I'm not going to worry about + // doing the math right like dtrace_abs_to_nano() does. + elapsed = (elapsed * sTimebaseInfo.numer) / (uint64_t)sTimebaseInfo.denom; + } } - watcher->flags |= WATCHER_DROPPED_EVENTS; - fsevents_wakeup(watcher); - } - unlock_watch_table(); - - { - struct timeval current_tv; - - num_dropped++; - - // only print a message at most once every 5 seconds - microuptime(¤t_tv); - if ((current_tv.tv_sec - last_print.tv_sec) > 10) { - int ii; - void *junkptr=zalloc_noblock(event_zone), *listhead=kfse_list_head.lh_first; - - printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding); - printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename); - printf("add_fsevent: zalloc sez: %p\n", junkptr); - printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]); - lock_watch_table(); - for(ii=0; ii < MAX_WATCHERS; ii++) { - if (watcher_table[ii] == NULL) { - continue; + if (type == last_event_type + && (elapsed < 1000000000) + && + ((vid && vid == last_vid && last_ptr == ptr) + || + (last_str[0] && last_nlen == nlen && ptr && strcmp(last_str, ptr) == 0)) + ) { + last_coalesced++; + unlock_fs_event_list(); + va_end(ap); + + return 0; + } else { + last_ptr = ptr; + if (was_str) { + strlcpy(last_str, ptr, sizeof(last_str)); } - - printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n", - watcher_table[ii]->proc_name, - watcher_table[ii], - watcher_table[ii]->rd, watcher_table[ii]->wr, - watcher_table[ii]->eventq_size, watcher_table[ii]->flags); - } - unlock_watch_table(); - - last_print = current_tv; - if (junkptr) { - zfree(event_zone, junkptr); - } - } - } - - if (pathbuff) { - release_pathbuff(pathbuff); - pathbuff = NULL; - } - return ENOSPC; - } - - memset(kfse, 0, sizeof(kfs_event)); - kfse->refcount = 1; - OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags); - - last_event_ptr = kfse; - kfse->type = type; - kfse->abstime = now; - kfse->pid = p->p_pid; - if (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE) { - memset(kfse_dest, 0, sizeof(kfs_event)); - kfse_dest->refcount = 1; - OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); - kfse_dest->type = type; - kfse_dest->pid = p->p_pid; - kfse_dest->abstime = now; - - kfse->dest = kfse_dest; - } - - num_events_outstanding++; - if (kfse->type == FSE_RENAME) { - num_pending_rename++; - } - LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list); - - if (kfse->refcount < 1) { - panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); - } - - unlock_fs_event_list(); // at this point it's safe to unlock - - // - // now process the arguments passed in and copy them into - // the kfse - // - - cur = kfse; - - if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) { - uint64_t val; - - // - // These events are special and not like the other events. They only - // have a dev_t, src inode #, dest inode #, and a doc-id. We use the - // fields that we can in the kfse but have to overlay the dest inode - // number and the doc-id on the other fields. - // - - // First the dev_t - arg_type = va_arg(ap, int32_t); - if (arg_type == FSE_ARG_DEV) { - cur->dev = (dev_t)(va_arg(ap, dev_t)); - } else { - cur->dev = (dev_t)0xbadc0de1; - } - - // next the source inode # - arg_type = va_arg(ap, int32_t); - if (arg_type == FSE_ARG_INO) { - cur->ino = (ino64_t)(va_arg(ap, ino64_t)); - } else { - cur->ino = 0xbadc0de2; - } - - // now the dest inode # - arg_type = va_arg(ap, int32_t); - if (arg_type == FSE_ARG_INO) { - val = (ino64_t)(va_arg(ap, ino64_t)); - } else { - val = 0xbadc0de2; - } - // overlay the dest inode number on the str/dest pointer fields - memcpy(&cur->str, &val, sizeof(ino64_t)); - - - // and last the document-id - arg_type = va_arg(ap, int32_t); - if (arg_type == FSE_ARG_INT32) { - val = (uint64_t)va_arg(ap, uint32_t); - } else if (arg_type == FSE_ARG_INT64) { - val = (uint64_t)va_arg(ap, uint64_t); - } else { - val = 0xbadc0de3; - } - - // the docid is 64-bit and overlays the uid/gid fields - memcpy(&cur->uid, &val, sizeof(uint64_t)); - - goto done_with_args; - } - - if (type == FSE_UNMOUNT_PENDING) { - - // Just a dev_t - arg_type = va_arg(ap, int32_t); - if (arg_type == FSE_ARG_DEV) { - cur->dev = (dev_t)(va_arg(ap, dev_t)); - } else { - cur->dev = (dev_t)0xbadc0de1; - } - - goto done_with_args; - } - - for(arg_type=va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type=va_arg(ap, int32_t)) - - switch(arg_type) { - case FSE_ARG_VNODE: { - // this expands out into multiple arguments to the client - struct vnode *vp; - struct vnode_attr va; - - if (kfse->str != NULL) { - cur = kfse_dest; - } - - vp = va_arg(ap, struct vnode *); - if (vp == NULL) { - panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n", - cur->type); - } - - VATTR_INIT(&va); - VATTR_WANTED(&va, va_fsid); - VATTR_WANTED(&va, va_fileid); - VATTR_WANTED(&va, va_mode); - VATTR_WANTED(&va, va_uid); - VATTR_WANTED(&va, va_gid); - VATTR_WANTED(&va, va_nlink); - if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) { - // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret); - cur->str = NULL; - error = EINVAL; - goto clean_up; - } - - cur->dev = dev = (dev_t)va.va_fsid; - cur->ino = (ino64_t)va.va_fileid; - cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode; - cur->uid = va.va_uid; - cur->gid = va.va_gid; - if (vp->v_flag & VISHARDLINK) { - cur->mode |= FSE_MODE_HLINK; - if ((vp->v_type == VDIR && va.va_dirlinkcount == 0) || (vp->v_type == VREG && va.va_nlink == 0)) { - cur->mode |= FSE_MODE_LAST_HLINK; + last_nlen = nlen; + last_vid = vid; + last_event_type = type; + last_coalesced_time = now; + } + } + va_start(ap, ctx); + + + kfse = zalloc_noblock(event_zone); + if (kfse && (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE)) { + kfse_dest = zalloc_noblock(event_zone); + if (kfse_dest == NULL) { + did_alloc = 1; + zfree(event_zone, kfse); + kfse = NULL; + } + } + + + if (kfse == NULL) { // yikes! no free events + unlock_fs_event_list(); + lock_watch_table(); + + for (i = 0; i < MAX_WATCHERS; i++) { + watcher = watcher_table[i]; + if (watcher == NULL) { + continue; } + + watcher->flags |= WATCHER_DROPPED_EVENTS; + fsevents_wakeup(watcher); } + unlock_watch_table(); - // if we haven't gotten the path yet, get it. - if (pathbuff == NULL) { - pathbuff = get_pathbuff(); - pathbuff_len = MAXPATHLEN; - - pathbuff[0] = '\0'; - if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') { - - cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS; - - do { - if (vp->v_parent != NULL) { - vp = vp->v_parent; - } else if (vp->v_mount) { - strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN); - break; - } else { - vp = NULL; + { + struct timeval current_tv; + + num_dropped++; + + // only print a message at most once every 5 seconds + microuptime(¤t_tv); + if ((current_tv.tv_sec - last_print.tv_sec) > 10) { + int ii; + void *junkptr = zalloc_noblock(event_zone), *listhead = kfse_list_head.lh_first; + + printf("add_fsevent: event queue is full! dropping events (num dropped events: %d; num events outstanding: %d).\n", num_dropped, num_events_outstanding); + printf("add_fsevent: kfse_list head %p ; num_pending_rename %d\n", listhead, num_pending_rename); + printf("add_fsevent: zalloc sez: %p\n", junkptr); + printf("add_fsevent: event_zone info: %d 0x%x\n", ((int *)event_zone)[0], ((int *)event_zone)[1]); + lock_watch_table(); + for (ii = 0; ii < MAX_WATCHERS; ii++) { + if (watcher_table[ii] == NULL) { + continue; + } + + printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n", + watcher_table[ii]->proc_name, + watcher_table[ii], + watcher_table[ii]->rd, watcher_table[ii]->wr, + watcher_table[ii]->eventq_size, watcher_table[ii]->flags); } + unlock_watch_table(); - if (vp == NULL) { - break; + last_print = current_tv; + if (junkptr) { + zfree(event_zone, junkptr); } - - pathbuff_len = MAXPATHLEN; - ret = vn_getpath(vp, pathbuff, &pathbuff_len); - } while (ret == ENOSPC); - - if (ret != 0 || vp == NULL) { - error = ENOENT; - goto clean_up; } - } } - // store the path by adding it to the global string table - cur->len = pathbuff_len; - cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0); - if (cur->str == NULL || cur->str[0] == '\0') { - panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur); + if (pathbuff) { + release_pathbuff(pathbuff); + pathbuff = NULL; } - - release_pathbuff(pathbuff); - pathbuff = NULL; + return ENOSPC; + } - break; - } - - case FSE_ARG_FINFO: { - fse_info *fse; - - fse = va_arg(ap, fse_info *); - - cur->dev = dev = (dev_t)fse->dev; - cur->ino = (ino64_t)fse->ino; - cur->mode = (int32_t)fse->mode; - cur->uid = (uid_t)fse->uid; - cur->gid = (uid_t)fse->gid; - // if it's a hard-link and this is the last link, flag it - if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) { - cur->mode |= FSE_MODE_LAST_HLINK; - } - if (cur->mode & FSE_TRUNCATED_PATH) { - cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS; - cur->mode &= ~FSE_TRUNCATED_PATH; + memset(kfse, 0, sizeof(kfs_event)); + kfse->refcount = 1; + OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags); + + last_event_ptr = kfse; + kfse->type = type; + kfse->abstime = now; + kfse->pid = p->p_pid; + if (type == FSE_RENAME || type == FSE_EXCHANGE || type == FSE_CLONE) { + memset(kfse_dest, 0, sizeof(kfs_event)); + kfse_dest->refcount = 1; + OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags); + kfse_dest->type = type; + kfse_dest->pid = p->p_pid; + kfse_dest->abstime = now; + + kfse->dest = kfse_dest; + } + + num_events_outstanding++; + if (kfse->type == FSE_RENAME) { + num_pending_rename++; + } + LIST_INSERT_HEAD(&kfse_list_head, kfse, kevent_list); + + if (kfse->refcount < 1) { + panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); + } + + unlock_fs_event_list(); // at this point it's safe to unlock + + // + // now process the arguments passed in and copy them into + // the kfse + // + + cur = kfse; + + if (type == FSE_DOCID_CREATED || type == FSE_DOCID_CHANGED) { + uint64_t val; + + // + // These events are special and not like the other events. They only + // have a dev_t, src inode #, dest inode #, and a doc-id. We use the + // fields that we can in the kfse but have to overlay the dest inode + // number and the doc-id on the other fields. + // + + // First the dev_t + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_DEV) { + cur->dev = (dev_t)(va_arg(ap, dev_t)); + } else { + cur->dev = (dev_t)0xbadc0de1; } - break; - } - case FSE_ARG_STRING: - if (kfse->str != NULL) { - cur = kfse_dest; + // next the source inode # + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_INO) { + cur->ino = (ino64_t)(va_arg(ap, ino64_t)); + } else { + cur->ino = 0xbadc0de2; } - cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff); - if (cur->len >= 1) { - cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0); + // now the dest inode # + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_INO) { + val = (ino64_t)(va_arg(ap, ino64_t)); } else { - printf("add_fsevent: funny looking string length: %d\n", (int)cur->len); - cur->len = 2; - cur->str = vfs_addname("/", cur->len, 0, 0); + val = 0xbadc0de2; } - if (cur->str[0] == 0) { - printf("add_fsevent: bogus looking string (len %d)\n", cur->len); + // overlay the dest inode number on the str/dest pointer fields + memcpy(&cur->str, &val, sizeof(ino64_t)); + + + // and last the document-id + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_INT32) { + val = (uint64_t)va_arg(ap, uint32_t); + } else if (arg_type == FSE_ARG_INT64) { + val = (uint64_t)va_arg(ap, uint64_t); + } else { + val = 0xbadc0de3; } - break; - case FSE_ARG_INT32: { - uint32_t ival = (uint32_t)va_arg(ap, int32_t); - kfse->uid = (ino64_t)ival; - break; - } - - default: - printf("add_fsevent: unknown type %d\n", arg_type); - // just skip one 32-bit word and hope we sync up... - (void)va_arg(ap, int32_t); + // the docid is 64-bit and overlays the uid/gid fields + memcpy(&cur->uid, &val, sizeof(uint64_t)); + + goto done_with_args; } -done_with_args: - va_end(ap); - - OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags); - if (kfse_dest) { - OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags); - } - - // - // now we have to go and let everyone know that - // is interested in this type of event - // - lock_watch_table(); - - for(i=0; i < MAX_WATCHERS; i++) { - watcher = watcher_table[i]; - if (watcher == NULL) { - continue; + if (type == FSE_UNMOUNT_PENDING) { + // Just a dev_t + arg_type = va_arg(ap, int32_t); + if (arg_type == FSE_ARG_DEV) { + cur->dev = (dev_t)(va_arg(ap, dev_t)); + } else { + cur->dev = (dev_t)0xbadc0de1; + } + + goto done_with_args; + } + + for (arg_type = va_arg(ap, int32_t); arg_type != FSE_ARG_DONE; arg_type = va_arg(ap, int32_t)) { + switch (arg_type) { + case FSE_ARG_VNODE: { + // this expands out into multiple arguments to the client + struct vnode *vp; + struct vnode_attr va; + + if (kfse->str != NULL) { + cur = kfse_dest; + } + + vp = va_arg(ap, struct vnode *); + if (vp == NULL) { + panic("add_fsevent: you can't pass me a NULL vnode ptr (type %d)!\n", + cur->type); + } + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_fsid); + VATTR_WANTED(&va, va_fileid); + VATTR_WANTED(&va, va_mode); + VATTR_WANTED(&va, va_uid); + VATTR_WANTED(&va, va_gid); + VATTR_WANTED(&va, va_nlink); + if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) { + // printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret); + cur->str = NULL; + error = EINVAL; + goto clean_up; + } + + cur->dev = dev = (dev_t)va.va_fsid; + cur->ino = (ino64_t)va.va_fileid; + cur->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | va.va_mode; + cur->uid = va.va_uid; + cur->gid = va.va_gid; + if (vp->v_flag & VISHARDLINK) { + cur->mode |= FSE_MODE_HLINK; + if ((vp->v_type == VDIR && va.va_dirlinkcount == 0) || (vp->v_type == VREG && va.va_nlink == 0)) { + cur->mode |= FSE_MODE_LAST_HLINK; + } + } + + // if we haven't gotten the path yet, get it. + if (pathbuff == NULL) { + pathbuff = get_pathbuff(); + pathbuff_len = MAXPATHLEN; + + pathbuff[0] = '\0'; + if ((ret = vn_getpath(vp, pathbuff, &pathbuff_len)) != 0 || pathbuff[0] == '\0') { + cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS; + + do { + if (vp->v_parent != NULL) { + vp = vp->v_parent; + } else if (vp->v_mount) { + strlcpy(pathbuff, vp->v_mount->mnt_vfsstat.f_mntonname, MAXPATHLEN); + break; + } else { + vp = NULL; + } + + if (vp == NULL) { + break; + } + + pathbuff_len = MAXPATHLEN; + ret = vn_getpath(vp, pathbuff, &pathbuff_len); + } while (ret == ENOSPC); + + if (ret != 0 || vp == NULL) { + error = ENOENT; + goto clean_up; + } + } + } + + // store the path by adding it to the global string table + cur->len = pathbuff_len; + cur->str = vfs_addname(pathbuff, pathbuff_len, 0, 0); + if (cur->str == NULL || cur->str[0] == '\0') { + panic("add_fsevent: was not able to add path %s to event %p.\n", pathbuff, cur); + } + + release_pathbuff(pathbuff); + pathbuff = NULL; + + break; + } + + case FSE_ARG_FINFO: { + fse_info *fse; + + fse = va_arg(ap, fse_info *); + + cur->dev = dev = (dev_t)fse->dev; + cur->ino = (ino64_t)fse->ino; + cur->mode = (int32_t)fse->mode; + cur->uid = (uid_t)fse->uid; + cur->gid = (uid_t)fse->gid; + // if it's a hard-link and this is the last link, flag it + if ((fse->mode & FSE_MODE_HLINK) && fse->nlink == 0) { + cur->mode |= FSE_MODE_LAST_HLINK; + } + if (cur->mode & FSE_TRUNCATED_PATH) { + cur->flags |= KFSE_CONTAINS_DROPPED_EVENTS; + cur->mode &= ~FSE_TRUNCATED_PATH; + } + break; + } + + case FSE_ARG_STRING: + if (kfse->str != NULL) { + cur = kfse_dest; + } + + cur->len = (int16_t)(va_arg(ap, int32_t) & 0x7fff); + if (cur->len >= 1) { + cur->str = vfs_addname(va_arg(ap, char *), cur->len, 0, 0); + } else { + printf("add_fsevent: funny looking string length: %d\n", (int)cur->len); + cur->len = 2; + cur->str = vfs_addname("/", cur->len, 0, 0); + } + if (cur->str[0] == 0) { + printf("add_fsevent: bogus looking string (len %d)\n", cur->len); + } + break; + + case FSE_ARG_INT32: { + uint32_t ival = (uint32_t)va_arg(ap, int32_t); + kfse->uid = (ino64_t)ival; + break; + } + + default: + printf("add_fsevent: unknown type %d\n", arg_type); + // just skip one 32-bit word and hope we sync up... + (void)va_arg(ap, int32_t); + } } - - if ( type < watcher->num_events - && watcher->event_list[type] == FSE_REPORT - && watcher_cares_about_dev(watcher, dev)) { - - if (watcher_add_event(watcher, kfse) != 0) { - watcher->num_dropped++; - continue; - } + +done_with_args: + va_end(ap); + + OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse->flags); + if (kfse_dest) { + OSBitAndAtomic16(~KFSE_BEING_CREATED, &kfse_dest->flags); } - // if (kfse->refcount < 1) { - // panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); - // } - } + // + // now we have to go and let everyone know that + // is interested in this type of event + // + lock_watch_table(); + + for (i = 0; i < MAX_WATCHERS; i++) { + watcher = watcher_table[i]; + if (watcher == NULL) { + continue; + } - unlock_watch_table(); + if (type < watcher->num_events + && watcher->event_list[type] == FSE_REPORT + && watcher_cares_about_dev(watcher, dev)) { + if (watcher_add_event(watcher, kfse) != 0) { + watcher->num_dropped++; + continue; + } + } - clean_up: + // if (kfse->refcount < 1) { + // panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount); + // } + } - if (pathbuff) { - release_pathbuff(pathbuff); - pathbuff = NULL; - } + unlock_watch_table(); - release_event_ref(kfse); +clean_up: - return error; + if (pathbuff) { + release_pathbuff(pathbuff); + pathbuff = NULL; + } + + release_event_ref(kfse); + + return error; } static void release_event_ref(kfs_event *kfse) { - int old_refcount; - kfs_event copy, dest_copy; - - - old_refcount = OSAddAtomic(-1, &kfse->refcount); - if (old_refcount > 1) { - return; - } - - lock_fs_event_list(); - if (last_event_ptr == kfse) { - last_event_ptr = NULL; - last_event_type = -1; - last_coalesced_time = 0; - } - - if (kfse->refcount < 0) { - panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount); - } - - if (kfse->refcount > 0 || kfse->type == FSE_INVALID) { - // This is very subtle. Either of these conditions can - // be true if an event got recycled while we were waiting - // on the fs_event_list lock or the event got recycled, - // delivered, _and_ free'd by someone else while we were - // waiting on the fs event list lock. In either case - // we need to just unlock the list and return without - // doing anything because if the refcount is > 0 then - // someone else will take care of free'ing it and when - // the kfse->type is invalid then someone else already - // has handled free'ing the event (while we were blocked - // on the event list lock). + int old_refcount; + kfs_event copy, dest_copy; + + + old_refcount = OSAddAtomic(-1, &kfse->refcount); + if (old_refcount > 1) { + return; + } + + lock_fs_event_list(); + if (last_event_ptr == kfse) { + last_event_ptr = NULL; + last_event_type = -1; + last_coalesced_time = 0; + } + + if (kfse->refcount < 0) { + panic("release_event_ref: bogus kfse refcount %d\n", kfse->refcount); + } + + if (kfse->refcount > 0 || kfse->type == FSE_INVALID) { + // This is very subtle. Either of these conditions can + // be true if an event got recycled while we were waiting + // on the fs_event_list lock or the event got recycled, + // delivered, _and_ free'd by someone else while we were + // waiting on the fs event list lock. In either case + // we need to just unlock the list and return without + // doing anything because if the refcount is > 0 then + // someone else will take care of free'ing it and when + // the kfse->type is invalid then someone else already + // has handled free'ing the event (while we were blocked + // on the event list lock). + // + unlock_fs_event_list(); + return; + } + // - unlock_fs_event_list(); - return; - } - - // - // make a copy of this so we can free things without - // holding the fs_event_buf lock - // - copy = *kfse; - if (kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) { - dest_copy = *kfse->dest; - } else { - dest_copy.str = NULL; - dest_copy.len = 0; - dest_copy.type = FSE_INVALID; - } - - kfse->pid = kfse->type; // save this off for debugging... - kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging... - kfse->gid = (gid_t)(long)current_thread(); - - kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters... - - if (dest_copy.type != FSE_INVALID) { - kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters... - kfse->dest->type = FSE_INVALID; - - if (kfse->dest->kevent_list.le_prev != NULL) { - num_events_outstanding--; - LIST_REMOVE(kfse->dest, kevent_list); - memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list)); - } - - zfree(event_zone, kfse->dest); - } - - // mark this fsevent as invalid - { - int otype; - - otype = kfse->type; - kfse->type = FSE_INVALID; - - if (kfse->kevent_list.le_prev != NULL) { - num_events_outstanding--; - if (otype == FSE_RENAME) { - num_pending_rename--; - } - LIST_REMOVE(kfse, kevent_list); - memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list)); - } - } - - zfree(event_zone, kfse); - - unlock_fs_event_list(); - - // if we have a pointer in the union - if (copy.str && copy.type != FSE_DOCID_CREATED && copy.type != FSE_DOCID_CHANGED) { - if (copy.len == 0) { // and it's not a string - panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__); - // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0); - } else { // else it's a string - vfs_removename(copy.str); - } - } - - if (dest_copy.type != FSE_INVALID && dest_copy.str) { - if (dest_copy.len == 0) { - panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__); - // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0); + // make a copy of this so we can free things without + // holding the fs_event_buf lock + // + copy = *kfse; + if (kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && kfse->dest && OSAddAtomic(-1, &kfse->dest->refcount) == 1) { + dest_copy = *kfse->dest; } else { - vfs_removename(dest_copy.str); + dest_copy.str = NULL; + dest_copy.len = 0; + dest_copy.type = FSE_INVALID; + } + + kfse->pid = kfse->type; // save this off for debugging... + kfse->uid = (uid_t)(long)kfse->str; // save this off for debugging... + kfse->gid = (gid_t)(long)current_thread(); + + kfse->str = (char *)0xdeadbeef; // XXXdbg - catch any cheaters... + + if (dest_copy.type != FSE_INVALID) { + kfse->dest->str = (char *)0xbadc0de; // XXXdbg - catch any cheaters... + kfse->dest->type = FSE_INVALID; + + if (kfse->dest->kevent_list.le_prev != NULL) { + num_events_outstanding--; + LIST_REMOVE(kfse->dest, kevent_list); + memset(&kfse->dest->kevent_list, 0xa5, sizeof(kfse->dest->kevent_list)); + } + + zfree(event_zone, kfse->dest); + } + + // mark this fsevent as invalid + { + int otype; + + otype = kfse->type; + kfse->type = FSE_INVALID; + + if (kfse->kevent_list.le_prev != NULL) { + num_events_outstanding--; + if (otype == FSE_RENAME) { + num_pending_rename--; + } + LIST_REMOVE(kfse, kevent_list); + memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list)); + } + } + + zfree(event_zone, kfse); + + unlock_fs_event_list(); + + // if we have a pointer in the union + if (copy.str && copy.type != FSE_DOCID_CREATED && copy.type != FSE_DOCID_CHANGED) { + if (copy.len == 0) { // and it's not a string + panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__); + // vnode_rele_ext(copy.fref.vp, O_EVTONLY, 0); + } else { // else it's a string + vfs_removename(copy.str); + } + } + + if (dest_copy.type != FSE_INVALID && dest_copy.str) { + if (dest_copy.len == 0) { + panic("%s:%d: no more fref.vp!\n", __FILE__, __LINE__); + // vnode_rele_ext(dest_copy.fref.vp, O_EVTONLY, 0); + } else { + vfs_removename(dest_copy.str); + } } - } } static int add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh) { - int i; - fs_event_watcher *watcher; - - if (eventq_size <= 0 || eventq_size > 100*max_kfs_events) { - eventq_size = max_kfs_events; - } - - // Note: the event_queue follows the fs_event_watcher struct - // in memory so we only have to do one allocation - MALLOC(watcher, - fs_event_watcher *, - sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *), - M_TEMP, M_WAITOK); - if (watcher == NULL) { - return ENOMEM; - } - - watcher->event_list = event_list; - watcher->num_events = num_events; - watcher->devices_not_to_watch = NULL; - watcher->num_devices = 0; - watcher->flags = 0; - watcher->event_queue = (kfs_event **)&watcher[1]; - watcher->eventq_size = eventq_size; - watcher->rd = 0; - watcher->wr = 0; - watcher->blockers = 0; - watcher->num_readers = 0; - watcher->max_event_id = 0; - watcher->fseh = fseh; - watcher->pid = proc_selfpid(); - proc_selfname(watcher->proc_name, sizeof(watcher->proc_name)); - - watcher->num_dropped = 0; // XXXdbg - debugging - - if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) || - !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) || - !strncmp(watcher->proc_name, "revisiond", sizeof(watcher->proc_name)) || - !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) { - watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE; - } else { - printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n", - watcher->proc_name, watcher->pid); - } - - lock_watch_table(); - - // find a slot for the new watcher - for(i=0; i < MAX_WATCHERS; i++) { - if (watcher_table[i] == NULL) { - watcher->my_id = i; - watcher_table[i] = watcher; - break; - } - } - - if (i >= MAX_WATCHERS) { - printf("fsevents: too many watchers!\n"); - unlock_watch_table(); - FREE(watcher, M_TEMP); - return ENOSPC; - } + int i; + fs_event_watcher *watcher; + + if (eventq_size <= 0 || eventq_size > 100 * max_kfs_events) { + eventq_size = max_kfs_events; + } + + // Note: the event_queue follows the fs_event_watcher struct + // in memory so we only have to do one allocation + MALLOC(watcher, + fs_event_watcher *, + sizeof(fs_event_watcher) + eventq_size * sizeof(kfs_event *), + M_TEMP, M_WAITOK); + if (watcher == NULL) { + return ENOMEM; + } - // now update the global list of who's interested in - // events of a particular type... - for(i=0; i < num_events; i++) { - if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { - fs_event_type_watchers[i]++; + watcher->event_list = event_list; + watcher->num_events = num_events; + watcher->devices_not_to_watch = NULL; + watcher->num_devices = 0; + watcher->flags = 0; + watcher->event_queue = (kfs_event **)&watcher[1]; + watcher->eventq_size = eventq_size; + watcher->rd = 0; + watcher->wr = 0; + watcher->blockers = 0; + watcher->num_readers = 0; + watcher->max_event_id = 0; + watcher->fseh = fseh; + watcher->pid = proc_selfpid(); + proc_selfname(watcher->proc_name, sizeof(watcher->proc_name)); + + watcher->num_dropped = 0; // XXXdbg - debugging + + if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) || + !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) || + !strncmp(watcher->proc_name, "revisiond", sizeof(watcher->proc_name)) || + !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) { + watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE; + } else { + printf("fsevents: watcher %s (pid: %d) - Using /dev/fsevents directly is unsupported. Migrate to FSEventsFramework\n", + watcher->proc_name, watcher->pid); } - } - unlock_watch_table(); + lock_watch_table(); + + // find a slot for the new watcher + for (i = 0; i < MAX_WATCHERS; i++) { + if (watcher_table[i] == NULL) { + watcher->my_id = i; + watcher_table[i] = watcher; + break; + } + } - *watcher_out = watcher; + if (i >= MAX_WATCHERS) { + printf("fsevents: too many watchers!\n"); + unlock_watch_table(); + FREE(watcher, M_TEMP); + return ENOSPC; + } + + // now update the global list of who's interested in + // events of a particular type... + for (i = 0; i < num_events; i++) { + if (event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { + fs_event_type_watchers[i]++; + } + } - return 0; + unlock_watch_table(); + + *watcher_out = watcher; + + return 0; } @@ -1030,77 +1026,77 @@ add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_even static void remove_watcher(fs_event_watcher *target) { - int i, j, counter=0; - fs_event_watcher *watcher; - kfs_event *kfse; - - lock_watch_table(); - - for(j=0; j < MAX_WATCHERS; j++) { - watcher = watcher_table[j]; - if (watcher != target) { - continue; - } + int i, j, counter = 0; + fs_event_watcher *watcher; + kfs_event *kfse; - watcher_table[j] = NULL; + lock_watch_table(); - for(i=0; i < watcher->num_events; i++) { - if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { - fs_event_type_watchers[i]--; - } - } + for (j = 0; j < MAX_WATCHERS; j++) { + watcher = watcher_table[j]; + if (watcher != target) { + continue; + } - if (watcher->flags & WATCHER_CLOSING) { - unlock_watch_table(); - return; - } + watcher_table[j] = NULL; - // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags); - watcher->flags |= WATCHER_CLOSING; - OSAddAtomic(1, &watcher->num_readers); - - unlock_watch_table(); - - while (watcher->num_readers > 1 && counter++ < 5000) { - lock_watch_table(); - fsevents_wakeup(watcher); // in case they're asleep - unlock_watch_table(); - - tsleep(watcher, PRIBIO, "fsevents-close", 1); - } - if (counter++ >= 5000) { - // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers); - panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers); - } - - // drain the event_queue - - lck_rw_lock_exclusive(&event_handling_lock); - while(watcher->rd != watcher->wr) { - kfse = watcher->event_queue[watcher->rd]; - watcher->event_queue[watcher->rd] = NULL; - watcher->rd = (watcher->rd+1) % watcher->eventq_size; - OSSynchronizeIO(); - if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) { - release_event_ref(kfse); - } - } - lck_rw_unlock_exclusive(&event_handling_lock); - - if (watcher->event_list) { - FREE(watcher->event_list, M_TEMP); - watcher->event_list = NULL; - } - if (watcher->devices_not_to_watch) { - FREE(watcher->devices_not_to_watch, M_TEMP); - watcher->devices_not_to_watch = NULL; - } - FREE(watcher, M_TEMP); + for (i = 0; i < watcher->num_events; i++) { + if (watcher->event_list[i] != FSE_IGNORE && i < FSE_MAX_EVENTS) { + fs_event_type_watchers[i]--; + } + } - return; - } + if (watcher->flags & WATCHER_CLOSING) { + unlock_watch_table(); + return; + } - unlock_watch_table(); + // printf("fsevents: removing watcher %p (rd %d wr %d num_readers %d flags 0x%x)\n", watcher, watcher->rd, watcher->wr, watcher->num_readers, watcher->flags); + watcher->flags |= WATCHER_CLOSING; + OSAddAtomic(1, &watcher->num_readers); + + unlock_watch_table(); + + while (watcher->num_readers > 1 && counter++ < 5000) { + lock_watch_table(); + fsevents_wakeup(watcher); // in case they're asleep + unlock_watch_table(); + + tsleep(watcher, PRIBIO, "fsevents-close", 1); + } + if (counter++ >= 5000) { + // printf("fsevents: close: still have readers! (%d)\n", watcher->num_readers); + panic("fsevents: close: still have readers! (%d)\n", watcher->num_readers); + } + + // drain the event_queue + + lck_rw_lock_exclusive(&event_handling_lock); + while (watcher->rd != watcher->wr) { + kfse = watcher->event_queue[watcher->rd]; + watcher->event_queue[watcher->rd] = NULL; + watcher->rd = (watcher->rd + 1) % watcher->eventq_size; + OSSynchronizeIO(); + if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) { + release_event_ref(kfse); + } + } + lck_rw_unlock_exclusive(&event_handling_lock); + + if (watcher->event_list) { + FREE(watcher->event_list, M_TEMP); + watcher->event_list = NULL; + } + if (watcher->devices_not_to_watch) { + FREE(watcher->devices_not_to_watch, M_TEMP); + watcher->devices_not_to_watch = NULL; + } + FREE(watcher, M_TEMP); + + return; + } + + unlock_watch_table(); } @@ -1112,19 +1108,19 @@ static int timer_set = 0; static void delayed_event_delivery(__unused void *param0, __unused void *param1) { - int i; - - lock_watch_table(); + int i; - for(i=0; i < MAX_WATCHERS; i++) { - if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) { - fsevents_wakeup(watcher_table[i]); + lock_watch_table(); + + for (i = 0; i < MAX_WATCHERS; i++) { + if (watcher_table[i] != NULL && watcher_table[i]->rd != watcher_table[i]->wr) { + fsevents_wakeup(watcher_table[i]); + } } - } - timer_set = 0; + timer_set = 0; - unlock_watch_table(); + unlock_watch_table(); } @@ -1134,16 +1130,16 @@ delayed_event_delivery(__unused void *param0, __unused void *param1) static void schedule_event_wakeup(void) { - uint64_t deadline; - - if (event_delivery_timer == NULL) { - event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL); - } - - clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline); - - thread_call_enter_delayed(event_delivery_timer, deadline); - timer_set = 1; + uint64_t deadline; + + if (event_delivery_timer == NULL) { + event_delivery_timer = thread_call_allocate((thread_call_func_t)delayed_event_delivery, NULL); + } + + clock_interval_to_deadline(EVENT_DELAY_IN_MS, 1000 * 1000, &deadline); + + thread_call_enter_delayed(event_delivery_timer, deadline); + timer_set = 1; } @@ -1157,129 +1153,129 @@ schedule_event_wakeup(void) static int watcher_add_event(fs_event_watcher *watcher, kfs_event *kfse) { - if (kfse->abstime > watcher->max_event_id) { - watcher->max_event_id = kfse->abstime; - } - - if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) { - watcher->flags |= WATCHER_DROPPED_EVENTS; - fsevents_wakeup(watcher); - return ENOSPC; - } - - OSAddAtomic(1, &kfse->refcount); - watcher->event_queue[watcher->wr] = kfse; - OSSynchronizeIO(); - watcher->wr = (watcher->wr + 1) % watcher->eventq_size; - - // - // wake up the watcher if there are more than MAX_NUM_PENDING events. - // otherwise schedule a timer (if one isn't already set) which will - // send any pending events if no more are received in the next - // EVENT_DELAY_IN_MS milli-seconds. - // - int32_t num_pending = 0; - if (watcher->rd < watcher->wr) { - num_pending = watcher->wr - watcher->rd; - } - - if (watcher->rd > watcher->wr) { - num_pending = watcher->wr + watcher->eventq_size - watcher->rd; - } - - if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) { - /* Non-Apple Service is falling behind, start dropping events for this process */ - lck_rw_lock_exclusive(&event_handling_lock); - while (watcher->rd != watcher->wr) { - kfse = watcher->event_queue[watcher->rd]; - watcher->event_queue[watcher->rd] = NULL; - watcher->rd = (watcher->rd+1) % watcher->eventq_size; + if (kfse->abstime > watcher->max_event_id) { + watcher->max_event_id = kfse->abstime; + } + + if (((watcher->wr + 1) % watcher->eventq_size) == watcher->rd) { + watcher->flags |= WATCHER_DROPPED_EVENTS; + fsevents_wakeup(watcher); + return ENOSPC; + } + + OSAddAtomic(1, &kfse->refcount); + watcher->event_queue[watcher->wr] = kfse; OSSynchronizeIO(); - if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) { - release_event_ref(kfse); + watcher->wr = (watcher->wr + 1) % watcher->eventq_size; + + // + // wake up the watcher if there are more than MAX_NUM_PENDING events. + // otherwise schedule a timer (if one isn't already set) which will + // send any pending events if no more are received in the next + // EVENT_DELAY_IN_MS milli-seconds. + // + int32_t num_pending = 0; + if (watcher->rd < watcher->wr) { + num_pending = watcher->wr - watcher->rd; + } + + if (watcher->rd > watcher->wr) { + num_pending = watcher->wr + watcher->eventq_size - watcher->rd; } - } - watcher->flags |= WATCHER_DROPPED_EVENTS; - lck_rw_unlock_exclusive(&event_handling_lock); - printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n", - watcher->proc_name, watcher->pid, watcher->rd, watcher->wr, - watcher->eventq_size, watcher->flags); + if (num_pending > (watcher->eventq_size * 3 / 4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) { + /* Non-Apple Service is falling behind, start dropping events for this process */ + lck_rw_lock_exclusive(&event_handling_lock); + while (watcher->rd != watcher->wr) { + kfse = watcher->event_queue[watcher->rd]; + watcher->event_queue[watcher->rd] = NULL; + watcher->rd = (watcher->rd + 1) % watcher->eventq_size; + OSSynchronizeIO(); + if (kfse != NULL && kfse->type != FSE_INVALID && kfse->refcount >= 1) { + release_event_ref(kfse); + } + } + watcher->flags |= WATCHER_DROPPED_EVENTS; + lck_rw_unlock_exclusive(&event_handling_lock); - fsevents_wakeup(watcher); - } else if (num_pending > MAX_NUM_PENDING) { - fsevents_wakeup(watcher); - } else if (timer_set == 0) { - schedule_event_wakeup(); - } + printf("fsevents: watcher falling behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n", + watcher->proc_name, watcher->pid, watcher->rd, watcher->wr, + watcher->eventq_size, watcher->flags); - return 0; + fsevents_wakeup(watcher); + } else if (num_pending > MAX_NUM_PENDING) { + fsevents_wakeup(watcher); + } else if (timer_set == 0) { + schedule_event_wakeup(); + } + + return 0; } static int fill_buff(uint16_t type, int32_t size, const void *data, - char *buff, int32_t *_buff_idx, int32_t buff_sz, - struct uio *uio) + char *buff, int32_t *_buff_idx, int32_t buff_sz, + struct uio *uio) { - int32_t amt, error = 0, buff_idx = *_buff_idx; - uint16_t tmp; - - // - // the +1 on the size is to guarantee that the main data - // copy loop will always copy at least 1 byte - // - if ((buff_sz - buff_idx) <= (int)(2*sizeof(uint16_t) + 1)) { - if (buff_idx > uio_resid(uio)) { - error = ENOSPC; - goto get_out; - } - - error = uiomove(buff, buff_idx, uio); - if (error) { - goto get_out; - } - buff_idx = 0; - } - - // copy out the header (type & size) - memcpy(&buff[buff_idx], &type, sizeof(uint16_t)); - buff_idx += sizeof(uint16_t); - - tmp = size & 0xffff; - memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t)); - buff_idx += sizeof(uint16_t); - - // now copy the body of the data, flushing along the way - // if the buffer fills up. - // - while(size > 0) { - amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx); - memcpy(&buff[buff_idx], data, amt); - - size -= amt; - buff_idx += amt; - data = (const char *)data + amt; - if (size > (buff_sz - buff_idx)) { - if (buff_idx > uio_resid(uio)) { - error = ENOSPC; - goto get_out; - } - error = uiomove(buff, buff_idx, uio); - if (error) { - goto get_out; - } - buff_idx = 0; + int32_t amt, error = 0, buff_idx = *_buff_idx; + uint16_t tmp; + + // + // the +1 on the size is to guarantee that the main data + // copy loop will always copy at least 1 byte + // + if ((buff_sz - buff_idx) <= (int)(2 * sizeof(uint16_t) + 1)) { + if (buff_idx > uio_resid(uio)) { + error = ENOSPC; + goto get_out; + } + + error = uiomove(buff, buff_idx, uio); + if (error) { + goto get_out; + } + buff_idx = 0; } - if (amt == 0) { // just in case... - break; + // copy out the header (type & size) + memcpy(&buff[buff_idx], &type, sizeof(uint16_t)); + buff_idx += sizeof(uint16_t); + + tmp = size & 0xffff; + memcpy(&buff[buff_idx], &tmp, sizeof(uint16_t)); + buff_idx += sizeof(uint16_t); + + // now copy the body of the data, flushing along the way + // if the buffer fills up. + // + while (size > 0) { + amt = (size < (buff_sz - buff_idx)) ? size : (buff_sz - buff_idx); + memcpy(&buff[buff_idx], data, amt); + + size -= amt; + buff_idx += amt; + data = (const char *)data + amt; + if (size > (buff_sz - buff_idx)) { + if (buff_idx > uio_resid(uio)) { + error = ENOSPC; + goto get_out; + } + error = uiomove(buff, buff_idx, uio); + if (error) { + goto get_out; + } + buff_idx = 0; + } + + if (amt == 0) { // just in case... + break; + } } - } - get_out: - *_buff_idx = buff_idx; - - return error; +get_out: + *_buff_idx = buff_idx; + + return error; } @@ -1288,195 +1284,193 @@ static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio static int copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) { - int error; - uint16_t tmp16; - int32_t type; - kfs_event *cur; - char evbuff[512]; - int evbuff_idx = 0; - - if (kfse->type == FSE_INVALID) { - panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str); - } - - if (kfse->flags & KFSE_BEING_CREATED) { - return 0; - } + int error; + uint16_t tmp16; + int32_t type; + kfs_event *cur; + char evbuff[512]; + int evbuff_idx = 0; + + if (kfse->type == FSE_INVALID) { + panic("fsevents: copy_out_kfse: asked to copy out an invalid event (kfse %p, refcount %d fref ptr %p)\n", kfse, kfse->refcount, kfse->str); + } + + if (kfse->flags & KFSE_BEING_CREATED) { + return 0; + } + + if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) { + // + // This can happen if an event gets recycled but we had a + // pointer to it in our event queue. The event is the + // destination of a rename or clone which we'll process separately + // (that is, another kfse points to this one so it's ok + // to skip this guy because we'll process it when we process + // the other one) + error = 0; + goto get_out; + } - if (((kfse->type == FSE_RENAME) || (kfse->type == FSE_CLONE)) && kfse->dest == NULL) { - // - // This can happen if an event gets recycled but we had a - // pointer to it in our event queue. The event is the - // destination of a rename or clone which we'll process separately - // (that is, another kfse points to this one so it's ok - // to skip this guy because we'll process it when we process - // the other one) - error = 0; - goto get_out; - } + if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) { + type = (kfse->type & 0xfff); - if (watcher->flags & WATCHER_WANTS_EXTENDED_INFO) { + if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) { + type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT); + } else if (kfse->flags & KFSE_COMBINED_EVENTS) { + type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT); + } + } else { + type = (int32_t)kfse->type; + } - type = (kfse->type & 0xfff); + // copy out the type of the event + memcpy(evbuff, &type, sizeof(int32_t)); + evbuff_idx += sizeof(int32_t); - if (kfse->flags & KFSE_CONTAINS_DROPPED_EVENTS) { - type |= (FSE_CONTAINS_DROPPED_EVENTS << FSE_FLAG_SHIFT); - } else if (kfse->flags & KFSE_COMBINED_EVENTS) { - type |= (FSE_COMBINED_EVENTS << FSE_FLAG_SHIFT); - } + // copy out the pid of the person that generated the event + memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t)); + evbuff_idx += sizeof(pid_t); - } else { - type = (int32_t)kfse->type; - } + cur = kfse; - // copy out the type of the event - memcpy(evbuff, &type, sizeof(int32_t)); - evbuff_idx += sizeof(int32_t); +copy_again: - // copy out the pid of the person that generated the event - memcpy(&evbuff[evbuff_idx], &kfse->pid, sizeof(pid_t)); - evbuff_idx += sizeof(pid_t); + if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) { + dev_t dev = cur->dev; + ino64_t ino = cur->ino; + uint64_t ival; - cur = kfse; + error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } - copy_again: + error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } - if (kfse->type == FSE_DOCID_CHANGED || kfse->type == FSE_DOCID_CREATED) { - dev_t dev = cur->dev; - ino64_t ino = cur->ino; - uint64_t ival; + memcpy(&ino, &cur->str, sizeof(ino64_t)); + error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } - error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; + memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field + error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + goto done; } - error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; + if (kfse->type == FSE_UNMOUNT_PENDING) { + dev_t dev = cur->dev; + + error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + goto done; } - memcpy(&ino, &cur->str, sizeof(ino64_t)); - error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (cur->str == NULL || cur->str[0] == '\0') { + printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str); + error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio); + } else { + error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio); + } if (error != 0) { - goto get_out; + goto get_out; } - memcpy(&ival, &cur->uid, sizeof(uint64_t)); // the docid gets stuffed into the ino field - error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &ival, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; + if (cur->dev == 0 && cur->ino == 0) { + // this happens when a rename event happens and the + // destination of the rename did not previously exist. + // it thus has no other file info so skip copying out + // the stuff below since it isn't initialized + goto done; } - goto done; - } - if (kfse->type == FSE_UNMOUNT_PENDING) { - dev_t dev = cur->dev; + if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) { + int32_t finfo_size; + + finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t); + error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + } else { + error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } + + error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } - error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; - } - - goto done; - } - - if (cur->str == NULL || cur->str[0] == '\0') { - printf("copy_out_kfse:2: empty/short path (%s)\n", cur->str); - error = fill_buff(FSE_ARG_STRING, 2, "/", evbuff, &evbuff_idx, sizeof(evbuff), uio); - } else { - error = fill_buff(FSE_ARG_STRING, cur->len, cur->str, evbuff, &evbuff_idx, sizeof(evbuff), uio); - } - if (error != 0) { - goto get_out; - } - - if (cur->dev == 0 && cur->ino == 0) { - // this happens when a rename event happens and the - // destination of the rename did not previously exist. - // it thus has no other file info so skip copying out - // the stuff below since it isn't initialized - goto done; - } - - - if (watcher->flags & WATCHER_WANTS_COMPACT_EVENTS) { - int32_t finfo_size; - - finfo_size = sizeof(dev_t) + sizeof(ino64_t) + sizeof(int32_t) + sizeof(uid_t) + sizeof(gid_t); - error = fill_buff(FSE_ARG_FINFO, finfo_size, &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; - } - } else { - error = fill_buff(FSE_ARG_DEV, sizeof(dev_t), &cur->dev, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; + error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio); + if (error != 0) { + goto get_out; + } } - error = fill_buff(FSE_ARG_INO, sizeof(ino64_t), &cur->ino, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; - } - error = fill_buff(FSE_ARG_MODE, sizeof(int32_t), &cur->mode, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; + if (cur->dest) { + cur = cur->dest; + goto copy_again; } - error = fill_buff(FSE_ARG_UID, sizeof(uid_t), &cur->uid, evbuff, &evbuff_idx, sizeof(evbuff), uio); +done: + // very last thing: the time stamp + error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio); if (error != 0) { - goto get_out; + goto get_out; } - error = fill_buff(FSE_ARG_GID, sizeof(gid_t), &cur->gid, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; + // check if the FSE_ARG_DONE will fit + if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) { + if (evbuff_idx > uio_resid(uio)) { + error = ENOSPC; + goto get_out; + } + error = uiomove(evbuff, evbuff_idx, uio); + if (error) { + goto get_out; + } + evbuff_idx = 0; } - } + tmp16 = FSE_ARG_DONE; + memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t)); + evbuff_idx += sizeof(uint16_t); - if (cur->dest) { - cur = cur->dest; - goto copy_again; - } - - done: - // very last thing: the time stamp - error = fill_buff(FSE_ARG_INT64, sizeof(uint64_t), &cur->abstime, evbuff, &evbuff_idx, sizeof(evbuff), uio); - if (error != 0) { - goto get_out; - } - - // check if the FSE_ARG_DONE will fit - if (sizeof(uint16_t) > sizeof(evbuff) - evbuff_idx) { + // flush any remaining data in the buffer (and hopefully + // in most cases this is the only uiomove we'll do) if (evbuff_idx > uio_resid(uio)) { - error = ENOSPC; - goto get_out; - } - error = uiomove(evbuff, evbuff_idx, uio); - if (error) { - goto get_out; + error = ENOSPC; + } else { + error = uiomove(evbuff, evbuff_idx, uio); } - evbuff_idx = 0; - } - - tmp16 = FSE_ARG_DONE; - memcpy(&evbuff[evbuff_idx], &tmp16, sizeof(uint16_t)); - evbuff_idx += sizeof(uint16_t); - // flush any remaining data in the buffer (and hopefully - // in most cases this is the only uiomove we'll do) - if (evbuff_idx > uio_resid(uio)) { - error = ENOSPC; - } else { - error = uiomove(evbuff, evbuff_idx, uio); - } +get_out: - get_out: - - return error; + return error; } @@ -1484,138 +1478,136 @@ copy_out_kfse(fs_event_watcher *watcher, kfs_event *kfse, struct uio *uio) static int fmod_watch(fs_event_watcher *watcher, struct uio *uio) { - int error=0; - user_ssize_t last_full_event_resid; - kfs_event *kfse; - uint16_t tmp16; - int skipped; - - last_full_event_resid = uio_resid(uio); + int error = 0; + user_ssize_t last_full_event_resid; + kfs_event *kfse; + uint16_t tmp16; + int skipped; - // need at least 2048 bytes of space (maxpathlen + 1 event buf) - if (uio_resid(uio) < 2048 || watcher == NULL) { - return EINVAL; - } + last_full_event_resid = uio_resid(uio); - if (watcher->flags & WATCHER_CLOSING) { - return 0; - } - - if (OSAddAtomic(1, &watcher->num_readers) != 0) { - // don't allow multiple threads to read from the fd at the same time - OSAddAtomic(-1, &watcher->num_readers); - return EAGAIN; - } + // need at least 2048 bytes of space (maxpathlen + 1 event buf) + if (uio_resid(uio) < 2048 || watcher == NULL) { + return EINVAL; + } - restart_watch: - if (watcher->rd == watcher->wr) { if (watcher->flags & WATCHER_CLOSING) { - OSAddAtomic(-1, &watcher->num_readers); - return 0; + return 0; } - OSAddAtomic(1, &watcher->blockers); - - // there's nothing to do, go to sleep - error = tsleep((caddr_t)watcher, PUSER|PCATCH, "fsevents_empty", 0); - - OSAddAtomic(-1, &watcher->blockers); - if (error != 0 || (watcher->flags & WATCHER_CLOSING)) { - OSAddAtomic(-1, &watcher->num_readers); - return error; + if (OSAddAtomic(1, &watcher->num_readers) != 0) { + // don't allow multiple threads to read from the fd at the same time + OSAddAtomic(-1, &watcher->num_readers); + return EAGAIN; } - } - // if we dropped events, return that as an event first - if (watcher->flags & WATCHER_DROPPED_EVENTS) { - int32_t val = FSE_EVENTS_DROPPED; +restart_watch: + if (watcher->rd == watcher->wr) { + if (watcher->flags & WATCHER_CLOSING) { + OSAddAtomic(-1, &watcher->num_readers); + return 0; + } + OSAddAtomic(1, &watcher->blockers); - error = uiomove((caddr_t)&val, sizeof(int32_t), uio); - if (error == 0) { - val = 0; // a fake pid - error = uiomove((caddr_t)&val, sizeof(int32_t), uio); - - tmp16 = FSE_ARG_DONE; // makes it a consistent msg - error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio); + // there's nothing to do, go to sleep + error = tsleep((caddr_t)watcher, PUSER | PCATCH, "fsevents_empty", 0); - last_full_event_resid = uio_resid(uio); - } + OSAddAtomic(-1, &watcher->blockers); - if (error) { - OSAddAtomic(-1, &watcher->num_readers); - return error; + if (error != 0 || (watcher->flags & WATCHER_CLOSING)) { + OSAddAtomic(-1, &watcher->num_readers); + return error; + } } - - watcher->flags &= ~WATCHER_DROPPED_EVENTS; - } - skipped = 0; + // if we dropped events, return that as an event first + if (watcher->flags & WATCHER_DROPPED_EVENTS) { + int32_t val = FSE_EVENTS_DROPPED; - lck_rw_lock_shared(&event_handling_lock); - while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) { - if (watcher->flags & WATCHER_CLOSING) { - break; - } - - // - // check if the event is something of interest to us - // (since it may have been recycled/reused and changed - // its type or which device it is for) - // - kfse = watcher->event_queue[watcher->rd]; - if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) { - break; + error = uiomove((caddr_t)&val, sizeof(int32_t), uio); + if (error == 0) { + val = 0; // a fake pid + error = uiomove((caddr_t)&val, sizeof(int32_t), uio); + + tmp16 = FSE_ARG_DONE; // makes it a consistent msg + error = uiomove((caddr_t)&tmp16, sizeof(int16_t), uio); + + last_full_event_resid = uio_resid(uio); + } + + if (error) { + OSAddAtomic(-1, &watcher->num_readers); + return error; + } + + watcher->flags &= ~WATCHER_DROPPED_EVENTS; } - if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) { + skipped = 0; + + lck_rw_lock_shared(&event_handling_lock); + while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) { + if (watcher->flags & WATCHER_CLOSING) { + break; + } - if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) { - // If this is not an Apple System Service, skip specified directories - // radar://12034844 - error = 0; - skipped = 1; - } else { + // + // check if the event is something of interest to us + // (since it may have been recycled/reused and changed + // its type or which device it is for) + // + kfse = watcher->event_queue[watcher->rd]; + if (!kfse || kfse->type == FSE_INVALID || kfse->type >= watcher->num_events || kfse->refcount < 1) { + break; + } - skipped = 0; - if (last_event_ptr == kfse) { - last_event_ptr = NULL; - last_event_type = -1; - last_coalesced_time = 0; - } - error = copy_out_kfse(watcher, kfse, uio); - if (error != 0) { - // if an event won't fit or encountered an error while - // we were copying it out, then backup to the last full - // event and just bail out. if the error was ENOENT - // then we can continue regular processing, otherwise - // we should unlock things and return. - uio_setresid(uio, last_full_event_resid); - if (error != ENOENT) { - lck_rw_unlock_shared(&event_handling_lock); - error = 0; - goto get_out; - } - } - - last_full_event_resid = uio_resid(uio); - } - } - - watcher->event_queue[watcher->rd] = NULL; - watcher->rd = (watcher->rd + 1) % watcher->eventq_size; - OSSynchronizeIO(); - release_event_ref(kfse); - } - lck_rw_unlock_shared(&event_handling_lock); + if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) { + if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) && kfse->type != FSE_DOCID_CREATED && kfse->type != FSE_DOCID_CHANGED && is_ignored_directory(kfse->str)) { + // If this is not an Apple System Service, skip specified directories + // radar://12034844 + error = 0; + skipped = 1; + } else { + skipped = 0; + if (last_event_ptr == kfse) { + last_event_ptr = NULL; + last_event_type = -1; + last_coalesced_time = 0; + } + error = copy_out_kfse(watcher, kfse, uio); + if (error != 0) { + // if an event won't fit or encountered an error while + // we were copying it out, then backup to the last full + // event and just bail out. if the error was ENOENT + // then we can continue regular processing, otherwise + // we should unlock things and return. + uio_setresid(uio, last_full_event_resid); + if (error != ENOENT) { + lck_rw_unlock_shared(&event_handling_lock); + error = 0; + goto get_out; + } + } + + last_full_event_resid = uio_resid(uio); + } + } + + watcher->event_queue[watcher->rd] = NULL; + watcher->rd = (watcher->rd + 1) % watcher->eventq_size; + OSSynchronizeIO(); + release_event_ref(kfse); + } + lck_rw_unlock_shared(&event_handling_lock); - if (skipped && error == 0) { - goto restart_watch; - } + if (skipped && error == 0) { + goto restart_watch; + } - get_out: - OSAddAtomic(-1, &watcher->num_readers); +get_out: + OSAddAtomic(-1, &watcher->num_readers); - return error; + return error; } @@ -1627,61 +1619,63 @@ void fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx) { #if CONFIG_EMBEDDED - dev_t dev = mp->mnt_vfsstat.f_fsid.val[0]; - int error, waitcount = 0; - struct timespec ts = {1, 0}; - - // wait for any other pending unmounts to complete - lock_watch_table(); - while (fsevent_unmount_dev != 0) { - error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_wait", &ts); - if (error == EWOULDBLOCK) - error = 0; - if (!error && (++waitcount >= 10)) { - error = EWOULDBLOCK; - printf("timeout waiting to signal unmount pending for dev %d (fsevent_unmount_dev %d)\n", dev, fsevent_unmount_dev); - } - if (error) { - // there's a problem, bail out - unlock_watch_table(); - return; - } - } - if (fs_event_type_watchers[FSE_UNMOUNT_PENDING] == 0) { - // nobody watching for unmount pending events - unlock_watch_table(); - return; - } - // this is now the current unmount pending - fsevent_unmount_dev = dev; - fsevent_unmount_ack_count = fs_event_type_watchers[FSE_UNMOUNT_PENDING]; - unlock_watch_table(); - - // send an event to notify the watcher they need to get off the mount - error = add_fsevent(FSE_UNMOUNT_PENDING, ctx, FSE_ARG_DEV, dev, FSE_ARG_DONE); - - // wait for acknowledgment(s) (give up if it takes too long) - lock_watch_table(); - waitcount = 0; - while (fsevent_unmount_dev == dev) { - error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_pending", &ts); - if (error == EWOULDBLOCK) - error = 0; - if (!error && (++waitcount >= 10)) { - error = EWOULDBLOCK; - printf("unmount pending ack timeout for dev %d\n", dev); - } - if (error) { - // there's a problem, bail out - if (fsevent_unmount_dev == dev) { - fsevent_unmount_dev = 0; - fsevent_unmount_ack_count = 0; - } - wakeup((caddr_t)&fsevent_unmount_dev); - break; - } - } - unlock_watch_table(); + dev_t dev = mp->mnt_vfsstat.f_fsid.val[0]; + int error, waitcount = 0; + struct timespec ts = {1, 0}; + + // wait for any other pending unmounts to complete + lock_watch_table(); + while (fsevent_unmount_dev != 0) { + error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_wait", &ts); + if (error == EWOULDBLOCK) { + error = 0; + } + if (!error && (++waitcount >= 10)) { + error = EWOULDBLOCK; + printf("timeout waiting to signal unmount pending for dev %d (fsevent_unmount_dev %d)\n", dev, fsevent_unmount_dev); + } + if (error) { + // there's a problem, bail out + unlock_watch_table(); + return; + } + } + if (fs_event_type_watchers[FSE_UNMOUNT_PENDING] == 0) { + // nobody watching for unmount pending events + unlock_watch_table(); + return; + } + // this is now the current unmount pending + fsevent_unmount_dev = dev; + fsevent_unmount_ack_count = fs_event_type_watchers[FSE_UNMOUNT_PENDING]; + unlock_watch_table(); + + // send an event to notify the watcher they need to get off the mount + error = add_fsevent(FSE_UNMOUNT_PENDING, ctx, FSE_ARG_DEV, dev, FSE_ARG_DONE); + + // wait for acknowledgment(s) (give up if it takes too long) + lock_watch_table(); + waitcount = 0; + while (fsevent_unmount_dev == dev) { + error = msleep((caddr_t)&fsevent_unmount_dev, &watch_table_lock, PRIBIO, "fsevent_unmount_pending", &ts); + if (error == EWOULDBLOCK) { + error = 0; + } + if (!error && (++waitcount >= 10)) { + error = EWOULDBLOCK; + printf("unmount pending ack timeout for dev %d\n", dev); + } + if (error) { + // there's a problem, bail out + if (fsevent_unmount_dev == dev) { + fsevent_unmount_dev = 0; + fsevent_unmount_ack_count = 0; + } + wakeup((caddr_t)&fsevent_unmount_dev); + break; + } + } + unlock_watch_table(); #endif } @@ -1692,75 +1686,75 @@ fsevent_unmount(__unused struct mount *mp, __unused vfs_context_t ctx) static int fsevents_installed = 0; typedef struct fsevent_handle { - UInt32 flags; - SInt32 active; - fs_event_watcher *watcher; - struct klist knotes; - struct selinfo si; + UInt32 flags; + SInt32 active; + fs_event_watcher *watcher; + struct klist knotes; + struct selinfo si; } fsevent_handle; #define FSEH_CLOSING 0x0001 static int fseventsf_read(struct fileproc *fp, struct uio *uio, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; - int error; + fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + int error; - error = fmod_watch(fseh->watcher, uio); + error = fmod_watch(fseh->watcher, uio); - return error; + return error; } static int fseventsf_write(__unused struct fileproc *fp, __unused struct uio *uio, - __unused int flags, __unused vfs_context_t ctx) + __unused int flags, __unused vfs_context_t ctx) { - return EIO; + return EIO; } #pragma pack(push, 4) typedef struct fsevent_dev_filter_args32 { - uint32_t num_devices; - user32_addr_t devices; + uint32_t num_devices; + user32_addr_t devices; } fsevent_dev_filter_args32; typedef struct fsevent_dev_filter_args64 { - uint32_t num_devices; - user64_addr_t devices; + uint32_t num_devices; + user64_addr_t devices; } fsevent_dev_filter_args64; #pragma pack(pop) -#define FSEVENTS_DEVICE_FILTER_32 _IOW('s', 100, fsevent_dev_filter_args32) -#define FSEVENTS_DEVICE_FILTER_64 _IOW('s', 100, fsevent_dev_filter_args64) +#define FSEVENTS_DEVICE_FILTER_32 _IOW('s', 100, fsevent_dev_filter_args32) +#define FSEVENTS_DEVICE_FILTER_64 _IOW('s', 100, fsevent_dev_filter_args64) static int fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; - int ret = 0; - fsevent_dev_filter_args64 *devfilt_args, _devfilt_args; + fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + int ret = 0; + fsevent_dev_filter_args64 *devfilt_args, _devfilt_args; - OSAddAtomic(1, &fseh->active); - if (fseh->flags & FSEH_CLOSING) { - OSAddAtomic(-1, &fseh->active); - return 0; - } + OSAddAtomic(1, &fseh->active); + if (fseh->flags & FSEH_CLOSING) { + OSAddAtomic(-1, &fseh->active); + return 0; + } - switch (cmd) { + switch (cmd) { case FIONBIO: case FIOASYNC: - break; + break; case FSEVENTS_WANT_COMPACT_EVENTS: { - fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS; - break; + fseh->watcher->flags |= WATCHER_WANTS_COMPACT_EVENTS; + break; } case FSEVENTS_WANT_EXTENDED_INFO: { - fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO; - break; + fseh->watcher->flags |= WATCHER_WANTS_EXTENDED_INFO; + break; } case FSEVENTS_GET_CURRENT_ID: { @@ -1770,127 +1764,127 @@ fseventsf_ioctl(struct fileproc *fp, u_long cmd, caddr_t data, vfs_context_t ctx } case FSEVENTS_DEVICE_FILTER_32: { - if (proc_is64bit(vfs_context_proc(ctx))) { - ret = EINVAL; - break; - } - fsevent_dev_filter_args32 *devfilt_args32 = (fsevent_dev_filter_args32 *)data; + if (proc_is64bit(vfs_context_proc(ctx))) { + ret = EINVAL; + break; + } + fsevent_dev_filter_args32 *devfilt_args32 = (fsevent_dev_filter_args32 *)data; - devfilt_args = &_devfilt_args; - memset(devfilt_args, 0, sizeof(fsevent_dev_filter_args64)); - devfilt_args->num_devices = devfilt_args32->num_devices; - devfilt_args->devices = CAST_USER_ADDR_T(devfilt_args32->devices); - goto handle_dev_filter; + devfilt_args = &_devfilt_args; + memset(devfilt_args, 0, sizeof(fsevent_dev_filter_args64)); + devfilt_args->num_devices = devfilt_args32->num_devices; + devfilt_args->devices = CAST_USER_ADDR_T(devfilt_args32->devices); + goto handle_dev_filter; } case FSEVENTS_DEVICE_FILTER_64: - if (!proc_is64bit(vfs_context_proc(ctx))) { - ret = EINVAL; - break; - } - devfilt_args = (fsevent_dev_filter_args64 *)data; + if (!proc_is64bit(vfs_context_proc(ctx))) { + ret = EINVAL; + break; + } + devfilt_args = (fsevent_dev_filter_args64 *)data; - handle_dev_filter: - { - int new_num_devices; - dev_t *devices_not_to_watch, *tmp=NULL; - - if (devfilt_args->num_devices > 256) { - ret = EINVAL; - break; - } - - new_num_devices = devfilt_args->num_devices; - if (new_num_devices == 0) { - lock_watch_table(); +handle_dev_filter: + { + int new_num_devices; + dev_t *devices_not_to_watch, *tmp = NULL; - tmp = fseh->watcher->devices_not_to_watch; - fseh->watcher->devices_not_to_watch = NULL; - fseh->watcher->num_devices = new_num_devices; + if (devfilt_args->num_devices > 256) { + ret = EINVAL; + break; + } - unlock_watch_table(); - if (tmp) { - FREE(tmp, M_TEMP); - } - break; - } + new_num_devices = devfilt_args->num_devices; + if (new_num_devices == 0) { + lock_watch_table(); - MALLOC(devices_not_to_watch, dev_t *, - new_num_devices * sizeof(dev_t), - M_TEMP, M_WAITOK); - if (devices_not_to_watch == NULL) { - ret = ENOMEM; - break; - } - - ret = copyin(devfilt_args->devices, - (void *)devices_not_to_watch, - new_num_devices * sizeof(dev_t)); - if (ret) { - FREE(devices_not_to_watch, M_TEMP); - break; - } + tmp = fseh->watcher->devices_not_to_watch; + fseh->watcher->devices_not_to_watch = NULL; + fseh->watcher->num_devices = new_num_devices; + + unlock_watch_table(); + if (tmp) { + FREE(tmp, M_TEMP); + } + break; + } + + MALLOC(devices_not_to_watch, dev_t *, + new_num_devices * sizeof(dev_t), + M_TEMP, M_WAITOK); + if (devices_not_to_watch == NULL) { + ret = ENOMEM; + break; + } + + ret = copyin(devfilt_args->devices, + (void *)devices_not_to_watch, + new_num_devices * sizeof(dev_t)); + if (ret) { + FREE(devices_not_to_watch, M_TEMP); + break; + } + + lock_watch_table(); + fseh->watcher->num_devices = new_num_devices; + tmp = fseh->watcher->devices_not_to_watch; + fseh->watcher->devices_not_to_watch = devices_not_to_watch; + unlock_watch_table(); - lock_watch_table(); - fseh->watcher->num_devices = new_num_devices; - tmp = fseh->watcher->devices_not_to_watch; - fseh->watcher->devices_not_to_watch = devices_not_to_watch; - unlock_watch_table(); + if (tmp) { + FREE(tmp, M_TEMP); + } - if (tmp) { - FREE(tmp, M_TEMP); - } - - break; - } + break; + } case FSEVENTS_UNMOUNT_PENDING_ACK: { - lock_watch_table(); - dev_t dev = *(dev_t *)data; - if (fsevent_unmount_dev == dev) { - if (--fsevent_unmount_ack_count <= 0) { - fsevent_unmount_dev = 0; - wakeup((caddr_t)&fsevent_unmount_dev); + lock_watch_table(); + dev_t dev = *(dev_t *)data; + if (fsevent_unmount_dev == dev) { + if (--fsevent_unmount_ack_count <= 0) { + fsevent_unmount_dev = 0; + wakeup((caddr_t)&fsevent_unmount_dev); + } + } else { + printf("unexpected unmount pending ack %d (%d)\n", dev, fsevent_unmount_dev); + ret = EINVAL; } - } else { - printf("unexpected unmount pending ack %d (%d)\n", dev, fsevent_unmount_dev); - ret = EINVAL; - } - unlock_watch_table(); - break; + unlock_watch_table(); + break; } default: - ret = EINVAL; - break; - } + ret = EINVAL; + break; + } - OSAddAtomic(-1, &fseh->active); - return (ret); + OSAddAtomic(-1, &fseh->active); + return ret; } static int fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; - int ready = 0; + fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + int ready = 0; - if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) { - return 0; - } + if ((which != FREAD) || (fseh->watcher->flags & WATCHER_CLOSING)) { + return 0; + } - // if there's nothing in the queue, we're not ready - if (fseh->watcher->rd != fseh->watcher->wr) { - ready = 1; - } + // if there's nothing in the queue, we're not ready + if (fseh->watcher->rd != fseh->watcher->wr) { + ready = 1; + } - if (!ready) { - selrecord(vfs_context_proc(ctx), &fseh->si, wql); - } + if (!ready) { + selrecord(vfs_context_proc(ctx), &fseh->si, wql); + } - return ready; + return ready; } @@ -1898,29 +1892,29 @@ fseventsf_select(struct fileproc *fp, int which, __unused void *wql, vfs_context static int fseventsf_stat(__unused struct fileproc *fp, __unused struct stat *sb, __unused vfs_context_t ctx) { - return ENOTSUP; + return ENOTSUP; } #endif static int fseventsf_close(struct fileglob *fg, __unused vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data; - fs_event_watcher *watcher; + fsevent_handle *fseh = (struct fsevent_handle *)fg->fg_data; + fs_event_watcher *watcher; - OSBitOrAtomic(FSEH_CLOSING, &fseh->flags); - while (OSAddAtomic(0, &fseh->active) > 0) { - tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1); - } + OSBitOrAtomic(FSEH_CLOSING, &fseh->flags); + while (OSAddAtomic(0, &fseh->active) > 0) { + tsleep((caddr_t)fseh->watcher, PRIBIO, "fsevents-close", 1); + } - watcher = fseh->watcher; - fg->fg_data = NULL; - fseh->watcher = NULL; + watcher = fseh->watcher; + fg->fg_data = NULL; + fseh->watcher = NULL; - remove_watcher(watcher); - FREE(fseh, M_TEMP); + remove_watcher(watcher); + FREE(fseh, M_TEMP); - return 0; + return 0; } static void @@ -1931,18 +1925,18 @@ filt_fsevent_detach(struct knote *kn) lock_watch_table(); KNOTE_DETACH(&fseh->knotes, kn); - + unlock_watch_table(); } -/* +/* * Determine whether this knote should be active - * - * This is kind of subtle. - * --First, notice if the vnode has been revoked: in so, override hint - * --EVFILT_READ knotes are checked no matter what the hint is - * --Other knotes activate based on hint. - * --If hint is revoke, set special flags and activate + * + * This is kind of subtle. + * --First, notice if the vnode has been revoked: in so, override hint + * --EVFILT_READ knotes are checked no matter what the hint is + * --Other knotes activate based on hint. + * --If hint is revoke, set special flags and activate */ static int filt_fsevent(struct knote *kn, long hint) @@ -1959,35 +1953,35 @@ filt_fsevent(struct knote *kn, long hint) rd = fseh->watcher->rd; wr = fseh->watcher->wr; if (rd <= wr) { - amt = wr - rd; + amt = wr - rd; } else { - amt = fseh->watcher->eventq_size - (rd - wr); + amt = fseh->watcher->eventq_size - (rd - wr); } - switch(kn->kn_filter) { - case EVFILT_READ: - kn->kn_data = amt; + switch (kn->kn_filter) { + case EVFILT_READ: + kn->kn_data = amt; - if (kn->kn_data != 0) { - activate = 1; - } - break; - case EVFILT_VNODE: - /* Check events this note matches against the hint */ - if (kn->kn_sfflags & hint) { - kn->kn_fflags |= hint; /* Set which event occurred */ - } - if (kn->kn_fflags != 0) { - activate = 1; - } - break; - default: { - // nothing to do... - break; + if (kn->kn_data != 0) { + activate = 1; + } + break; + case EVFILT_VNODE: + /* Check events this note matches against the hint */ + if (kn->kn_sfflags & hint) { + kn->kn_fflags |= hint; /* Set which event occurred */ + } + if (kn->kn_fflags != 0) { + activate = 1; } + break; + default: { + // nothing to do... + break; + } } - return (activate); + return activate; } @@ -2049,175 +2043,174 @@ SECURITY_READ_ONLY_EARLY(struct filterops) fsevent_filtops = { static int fseventsf_kqfilter(__unused struct fileproc *fp, __unused struct knote *kn, - __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) + __unused struct kevent_internal_s *kev, __unused vfs_context_t ctx) { - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; - int res; + fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + int res; - kn->kn_hook = (void*)fseh; - kn->kn_hookid = 1; + kn->kn_hook = (void*)fseh; + kn->kn_hookid = 1; kn->kn_filtid = EVFILTID_FSEVENT; - lock_watch_table(); + lock_watch_table(); - KNOTE_ATTACH(&fseh->knotes, kn); + KNOTE_ATTACH(&fseh->knotes, kn); - /* check to see if it is fired already */ - res = filt_fsevent(kn, 0); + /* check to see if it is fired already */ + res = filt_fsevent(kn, 0); - unlock_watch_table(); + unlock_watch_table(); - return res; + return res; } static int fseventsf_drain(struct fileproc *fp, __unused vfs_context_t ctx) { - int counter = 0; - fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; - - // if there are people still waiting, sleep for 10ms to - // let them clean up and get out of there. however we - // also don't want to get stuck forever so if they don't - // exit after 5 seconds we're tearing things down anyway. - while(fseh->watcher->blockers && counter++ < 500) { - // issue wakeup in case anyone is blocked waiting for an event - // do this each time we wakeup in case the blocker missed - // the wakeup due to the unprotected test of WATCHER_CLOSING - // and decision to tsleep in fmod_watch... this bit of - // latency is a decent tradeoff against not having to - // take and drop a lock in fmod_watch - lock_watch_table(); - fsevents_wakeup(fseh->watcher); - unlock_watch_table(); + int counter = 0; + fsevent_handle *fseh = (struct fsevent_handle *)fp->f_fglob->fg_data; + + // if there are people still waiting, sleep for 10ms to + // let them clean up and get out of there. however we + // also don't want to get stuck forever so if they don't + // exit after 5 seconds we're tearing things down anyway. + while (fseh->watcher->blockers && counter++ < 500) { + // issue wakeup in case anyone is blocked waiting for an event + // do this each time we wakeup in case the blocker missed + // the wakeup due to the unprotected test of WATCHER_CLOSING + // and decision to tsleep in fmod_watch... this bit of + // latency is a decent tradeoff against not having to + // take and drop a lock in fmod_watch + lock_watch_table(); + fsevents_wakeup(fseh->watcher); + unlock_watch_table(); - tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1); - } + tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1); + } - return 0; + return 0; } static int fseventsopen(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p) { - if (!kauth_cred_issuser(kauth_cred_get())) { - return EPERM; - } - - return 0; + if (!kauth_cred_issuser(kauth_cred_get())) { + return EPERM; + } + + return 0; } static int fseventsclose(__unused dev_t dev, __unused int flag, __unused int mode, __unused struct proc *p) { - return 0; + return 0; } static int fseventsread(__unused dev_t dev, __unused struct uio *uio, __unused int ioflag) { - return EIO; + return EIO; } static int parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, long *remainder) { - const fse_info *finfo, *dest_finfo; - const char *path, *ptr, *dest_path, *event_start=buffer; - int path_len, type, dest_path_len, err = 0; + const fse_info *finfo, *dest_finfo; + const char *path, *ptr, *dest_path, *event_start = buffer; + int path_len, type, dest_path_len, err = 0; - ptr = buffer; - while ((ptr+sizeof(int)+sizeof(fse_info)+1) < buffer+bufsize) { - type = *(const int *)ptr; - if (type < 0 || type >= FSE_MAX_EVENTS) { - err = EINVAL; - break; - } - - ptr += sizeof(int); - - finfo = (const fse_info *)ptr; - ptr += sizeof(fse_info); + ptr = buffer; + while ((ptr + sizeof(int) + sizeof(fse_info) + 1) < buffer + bufsize) { + type = *(const int *)ptr; + if (type < 0 || type >= FSE_MAX_EVENTS) { + err = EINVAL; + break; + } - path = ptr; - while(ptr < buffer+bufsize && *ptr != '\0') { - ptr++; - } + ptr += sizeof(int); - if (ptr >= buffer+bufsize) { - break; - } + finfo = (const fse_info *)ptr; + ptr += sizeof(fse_info); - ptr++; // advance over the trailing '\0' + path = ptr; + while (ptr < buffer + bufsize && *ptr != '\0') { + ptr++; + } - path_len = ptr - path; + if (ptr >= buffer + bufsize) { + break; + } - if (type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CLONE) { - event_start = ptr; // record where the next event starts + ptr++; // advance over the trailing '\0' - err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE); - if (err) { - break; - } - continue; - } + path_len = ptr - path; - // - // if we're here we have to slurp up the destination finfo - // and path so that we can pass them to the add_fsevent() - // call. basically it's a copy of the above code. - // - dest_finfo = (const fse_info *)ptr; - ptr += sizeof(fse_info); + if (type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CLONE) { + event_start = ptr; // record where the next event starts - dest_path = ptr; - while(ptr < buffer+bufsize && *ptr != '\0') { - ptr++; - } + err = add_fsevent(type, ctx, FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, FSE_ARG_DONE); + if (err) { + break; + } + continue; + } - if (ptr >= buffer+bufsize) { - break; - } + // + // if we're here we have to slurp up the destination finfo + // and path so that we can pass them to the add_fsevent() + // call. basically it's a copy of the above code. + // + dest_finfo = (const fse_info *)ptr; + ptr += sizeof(fse_info); + + dest_path = ptr; + while (ptr < buffer + bufsize && *ptr != '\0') { + ptr++; + } - ptr++; // advance over the trailing '\0' - event_start = ptr; // record where the next event starts + if (ptr >= buffer + bufsize) { + break; + } - dest_path_len = ptr - dest_path; - // - // If the destination inode number is non-zero, generate a rename - // with both source and destination FSE_ARG_FINFO. Otherwise generate - // a rename with only one FSE_ARG_FINFO. If you need to inject an - // exchange with an inode of zero, just make that inode (and its path) - // come in as the first one, not the second. - // - if (dest_finfo->ino) { - err = add_fsevent(type, ctx, - FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, - FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo, - FSE_ARG_DONE); - } else { - err = add_fsevent(type, ctx, - FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, - FSE_ARG_STRING, dest_path_len, dest_path, - FSE_ARG_DONE); - } + ptr++; // advance over the trailing '\0' + event_start = ptr; // record where the next event starts + + dest_path_len = ptr - dest_path; + // + // If the destination inode number is non-zero, generate a rename + // with both source and destination FSE_ARG_FINFO. Otherwise generate + // a rename with only one FSE_ARG_FINFO. If you need to inject an + // exchange with an inode of zero, just make that inode (and its path) + // come in as the first one, not the second. + // + if (dest_finfo->ino) { + err = add_fsevent(type, ctx, + FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, + FSE_ARG_STRING, dest_path_len, dest_path, FSE_ARG_FINFO, dest_finfo, + FSE_ARG_DONE); + } else { + err = add_fsevent(type, ctx, + FSE_ARG_STRING, path_len, path, FSE_ARG_FINFO, finfo, + FSE_ARG_STRING, dest_path_len, dest_path, + FSE_ARG_DONE); + } - if (err) { - break; + if (err) { + break; + } } - } - - // if the last event wasn't complete, set the remainder - // to be the last event start boundary. - // - *remainder = (long)((buffer+bufsize) - event_start); + // if the last event wasn't complete, set the remainder + // to be the last event start boundary. + // + *remainder = (long)((buffer + bufsize) - event_start); - return err; + return err; } @@ -2229,214 +2222,214 @@ parse_buffer_and_add_events(const char *buffer, int bufsize, vfs_context_t ctx, // smaller is not a good idea. // #define WRITE_BUFFER_SIZE 4096 -char *write_buffer=NULL; +char *write_buffer = NULL; static int fseventswrite(__unused dev_t dev, struct uio *uio, __unused int ioflag) { - int error=0, count; - vfs_context_t ctx = vfs_context_current(); - long offset=0, remainder; - - lck_mtx_lock(&event_writer_lock); - - if (write_buffer == NULL) { - if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE, VM_KERN_MEMORY_FILE)) { - lck_mtx_unlock(&event_writer_lock); - return ENOMEM; - } - } - - // - // this loop copies in and processes the events written. - // it takes care to copy in reasonable size chunks and - // process them. if there is an event that spans a chunk - // boundary we're careful to copy those bytes down to the - // beginning of the buffer and read the next chunk in just - // after it. - // - while(uio_resid(uio)) { - if (uio_resid(uio) > (WRITE_BUFFER_SIZE-offset)) { - count = WRITE_BUFFER_SIZE - offset; - } else { - count = uio_resid(uio); - } + int error = 0, count; + vfs_context_t ctx = vfs_context_current(); + long offset = 0, remainder; - error = uiomove(write_buffer+offset, count, uio); - if (error) { - break; - } + lck_mtx_lock(&event_writer_lock); - // printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset); - error = parse_buffer_and_add_events(write_buffer, offset+count, ctx, &remainder); - if (error) { - break; - } + if (write_buffer == NULL) { + if (kmem_alloc(kernel_map, (vm_offset_t *)&write_buffer, WRITE_BUFFER_SIZE, VM_KERN_MEMORY_FILE)) { + lck_mtx_unlock(&event_writer_lock); + return ENOMEM; + } + } // - // if there's any remainder, copy it down to the beginning - // of the buffer so that it will get processed the next time - // through the loop. note that the remainder always starts - // at an event boundary. + // this loop copies in and processes the events written. + // it takes care to copy in reasonable size chunks and + // process them. if there is an event that spans a chunk + // boundary we're careful to copy those bytes down to the + // beginning of the buffer and read the next chunk in just + // after it. // - if (remainder != 0) { - // printf("fsevents: write: an event spanned a %d byte boundary. remainder: %ld\n", - // WRITE_BUFFER_SIZE, remainder); - memmove(write_buffer, (write_buffer+count+offset) - remainder, remainder); - offset = remainder; - } else { - offset = 0; + while (uio_resid(uio)) { + if (uio_resid(uio) > (WRITE_BUFFER_SIZE - offset)) { + count = WRITE_BUFFER_SIZE - offset; + } else { + count = uio_resid(uio); + } + + error = uiomove(write_buffer + offset, count, uio); + if (error) { + break; + } + + // printf("fsevents: write: copied in %d bytes (offset: %ld)\n", count, offset); + error = parse_buffer_and_add_events(write_buffer, offset + count, ctx, &remainder); + if (error) { + break; + } + + // + // if there's any remainder, copy it down to the beginning + // of the buffer so that it will get processed the next time + // through the loop. note that the remainder always starts + // at an event boundary. + // + if (remainder != 0) { + // printf("fsevents: write: an event spanned a %d byte boundary. remainder: %ld\n", + // WRITE_BUFFER_SIZE, remainder); + memmove(write_buffer, (write_buffer + count + offset) - remainder, remainder); + offset = remainder; + } else { + offset = 0; + } } - } - lck_mtx_unlock(&event_writer_lock); + lck_mtx_unlock(&event_writer_lock); - return error; + return error; } static const struct fileops fsevents_fops = { - .fo_type = DTYPE_FSEVENTS, - .fo_read = fseventsf_read, - .fo_write = fseventsf_write, - .fo_ioctl = fseventsf_ioctl, - .fo_select = fseventsf_select, - .fo_close = fseventsf_close, - .fo_kqfilter = fseventsf_kqfilter, - .fo_drain = fseventsf_drain, + .fo_type = DTYPE_FSEVENTS, + .fo_read = fseventsf_read, + .fo_write = fseventsf_write, + .fo_ioctl = fseventsf_ioctl, + .fo_select = fseventsf_select, + .fo_close = fseventsf_close, + .fo_kqfilter = fseventsf_kqfilter, + .fo_drain = fseventsf_drain, }; typedef struct fsevent_clone_args32 { - user32_addr_t event_list; - int32_t num_events; - int32_t event_queue_depth; - user32_addr_t fd; + user32_addr_t event_list; + int32_t num_events; + int32_t event_queue_depth; + user32_addr_t fd; } fsevent_clone_args32; typedef struct fsevent_clone_args64 { - user64_addr_t event_list; - int32_t num_events; - int32_t event_queue_depth; - user64_addr_t fd; + user64_addr_t event_list; + int32_t num_events; + int32_t event_queue_depth; + user64_addr_t fd; } fsevent_clone_args64; -#define FSEVENTS_CLONE_32 _IOW('s', 1, fsevent_clone_args32) -#define FSEVENTS_CLONE_64 _IOW('s', 1, fsevent_clone_args64) +#define FSEVENTS_CLONE_32 _IOW('s', 1, fsevent_clone_args32) +#define FSEVENTS_CLONE_64 _IOW('s', 1, fsevent_clone_args64) static int fseventsioctl(__unused dev_t dev, u_long cmd, caddr_t data, __unused int flag, struct proc *p) { - struct fileproc *f; - int fd, error; - fsevent_handle *fseh = NULL; - fsevent_clone_args64 *fse_clone_args, _fse_clone; - int8_t *event_list; - int is64bit = proc_is64bit(p); - - switch (cmd) { + struct fileproc *f; + int fd, error; + fsevent_handle *fseh = NULL; + fsevent_clone_args64 *fse_clone_args, _fse_clone; + int8_t *event_list; + int is64bit = proc_is64bit(p); + + switch (cmd) { case FSEVENTS_CLONE_32: { - if (is64bit) { - return EINVAL; - } - fsevent_clone_args32 *args32 = (fsevent_clone_args32 *)data; + if (is64bit) { + return EINVAL; + } + fsevent_clone_args32 *args32 = (fsevent_clone_args32 *)data; - fse_clone_args = &_fse_clone; - memset(fse_clone_args, 0, sizeof(fsevent_clone_args64)); + fse_clone_args = &_fse_clone; + memset(fse_clone_args, 0, sizeof(fsevent_clone_args64)); - fse_clone_args->event_list = CAST_USER_ADDR_T(args32->event_list); - fse_clone_args->num_events = args32->num_events; - fse_clone_args->event_queue_depth = args32->event_queue_depth; - fse_clone_args->fd = CAST_USER_ADDR_T(args32->fd); - goto handle_clone; + fse_clone_args->event_list = CAST_USER_ADDR_T(args32->event_list); + fse_clone_args->num_events = args32->num_events; + fse_clone_args->event_queue_depth = args32->event_queue_depth; + fse_clone_args->fd = CAST_USER_ADDR_T(args32->fd); + goto handle_clone; } case FSEVENTS_CLONE_64: - if (!is64bit) { - return EINVAL; - } - fse_clone_args = (fsevent_clone_args64 *)data; + if (!is64bit) { + return EINVAL; + } + fse_clone_args = (fsevent_clone_args64 *)data; - handle_clone: - if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) { - return EINVAL; - } +handle_clone: + if (fse_clone_args->num_events < 0 || fse_clone_args->num_events > 4096) { + return EINVAL; + } - MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle), - M_TEMP, M_WAITOK); - if (fseh == NULL) { - return ENOMEM; - } - memset(fseh, 0, sizeof(fsevent_handle)); - - klist_init(&fseh->knotes); - - MALLOC(event_list, int8_t *, - fse_clone_args->num_events * sizeof(int8_t), - M_TEMP, M_WAITOK); - if (event_list == NULL) { - FREE(fseh, M_TEMP); - return ENOMEM; - } - - error = copyin(fse_clone_args->event_list, - (void *)event_list, - fse_clone_args->num_events * sizeof(int8_t)); - if (error) { - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); - return error; - } - - error = add_watcher(event_list, - fse_clone_args->num_events, - fse_clone_args->event_queue_depth, - &fseh->watcher, - fseh); - if (error) { - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); - return error; - } - - fseh->watcher->fseh = fseh; - - error = falloc(p, &f, &fd, vfs_context_current()); - if (error) { - remove_watcher(fseh->watcher); - FREE(event_list, M_TEMP); - FREE(fseh, M_TEMP); - return (error); - } - proc_fdlock(p); - f->f_fglob->fg_flag = FREAD | FWRITE; - f->f_fglob->fg_ops = &fsevents_fops; - f->f_fglob->fg_data = (caddr_t) fseh; - proc_fdunlock(p); - error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t)); - if (error != 0) { - fp_free(p, fd, f); - } else { + MALLOC(fseh, fsevent_handle *, sizeof(fsevent_handle), + M_TEMP, M_WAITOK); + if (fseh == NULL) { + return ENOMEM; + } + memset(fseh, 0, sizeof(fsevent_handle)); + + klist_init(&fseh->knotes); + + MALLOC(event_list, int8_t *, + fse_clone_args->num_events * sizeof(int8_t), + M_TEMP, M_WAITOK); + if (event_list == NULL) { + FREE(fseh, M_TEMP); + return ENOMEM; + } + + error = copyin(fse_clone_args->event_list, + (void *)event_list, + fse_clone_args->num_events * sizeof(int8_t)); + if (error) { + FREE(event_list, M_TEMP); + FREE(fseh, M_TEMP); + return error; + } + + error = add_watcher(event_list, + fse_clone_args->num_events, + fse_clone_args->event_queue_depth, + &fseh->watcher, + fseh); + if (error) { + FREE(event_list, M_TEMP); + FREE(fseh, M_TEMP); + return error; + } + + fseh->watcher->fseh = fseh; + + error = falloc(p, &f, &fd, vfs_context_current()); + if (error) { + remove_watcher(fseh->watcher); + FREE(event_list, M_TEMP); + FREE(fseh, M_TEMP); + return error; + } proc_fdlock(p); - procfdtbl_releasefd(p, fd, NULL); - fp_drop(p, fd, f, 1); + f->f_fglob->fg_flag = FREAD | FWRITE; + f->f_fglob->fg_ops = &fsevents_fops; + f->f_fglob->fg_data = (caddr_t) fseh; proc_fdunlock(p); - } - break; + error = copyout((void *)&fd, fse_clone_args->fd, sizeof(int32_t)); + if (error != 0) { + fp_free(p, fd, f); + } else { + proc_fdlock(p); + procfdtbl_releasefd(p, fd, NULL); + fp_drop(p, fd, f, 1); + proc_fdunlock(p); + } + break; default: - error = EINVAL; - break; - } + error = EINVAL; + break; + } - return error; + return error; } static void fsevents_wakeup(fs_event_watcher *watcher) { - selwakeup(&watcher->fseh->si); - KNOTE(&watcher->fseh->knotes, NOTE_WRITE|NOTE_NONE); - wakeup((caddr_t)watcher); + selwakeup(&watcher->fseh->si); + KNOTE(&watcher->fseh->knotes, NOTE_WRITE | NOTE_NONE); + wakeup((caddr_t)watcher); } @@ -2446,20 +2439,20 @@ fsevents_wakeup(fs_event_watcher *watcher) */ static struct cdevsw fsevents_cdevsw = { - fseventsopen, /* open */ - fseventsclose, /* close */ - fseventsread, /* read */ - fseventswrite, /* write */ - fseventsioctl, /* ioctl */ - (stop_fcn_t *)&nulldev, /* stop */ - (reset_fcn_t *)&nulldev, /* reset */ - NULL, /* tty's */ - eno_select, /* select */ - eno_mmap, /* mmap */ - eno_strat, /* strategy */ - eno_getc, /* getc */ - eno_putc, /* putc */ - 0 /* type */ + fseventsopen, /* open */ + fseventsclose, /* close */ + fseventsread, /* read */ + fseventswrite, /* write */ + fseventsioctl, /* ioctl */ + (stop_fcn_t *)&nulldev, /* stop */ + (reset_fcn_t *)&nulldev, /* reset */ + NULL, /* tty's */ + eno_select, /* select */ + eno_mmap, /* mmap */ + eno_strat, /* strategy */ + eno_getc, /* getc */ + eno_putc, /* putc */ + 0 /* type */ }; @@ -2471,142 +2464,141 @@ static struct cdevsw fsevents_cdevsw = void fsevents_init(void) { - int ret; + int ret; - if (fsevents_installed) { - return; - } + if (fsevents_installed) { + return; + } - fsevents_installed = 1; + fsevents_installed = 1; - ret = cdevsw_add(-1, &fsevents_cdevsw); - if (ret < 0) { - fsevents_installed = 0; - return; - } + ret = cdevsw_add(-1, &fsevents_cdevsw); + if (ret < 0) { + fsevents_installed = 0; + return; + } - devfs_make_node(makedev (ret, 0), DEVFS_CHAR, - UID_ROOT, GID_WHEEL, 0644, "fsevents", 0); + devfs_make_node(makedev(ret, 0), DEVFS_CHAR, + UID_ROOT, GID_WHEEL, 0644, "fsevents", 0); - fsevents_internal_init(); + fsevents_internal_init(); } char * get_pathbuff(void) { - char *path; + char *path; - MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - return path; + MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); + return path; } void release_pathbuff(char *path) { - - if (path == NULL) { - return; - } - FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + if (path == NULL) { + return; + } + FREE_ZONE(path, MAXPATHLEN, M_NAMEI); } int get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx) { - struct vnode_attr va; - - VATTR_INIT(&va); - VATTR_WANTED(&va, va_fsid); - VATTR_WANTED(&va, va_fileid); - VATTR_WANTED(&va, va_mode); - VATTR_WANTED(&va, va_uid); - VATTR_WANTED(&va, va_gid); - if (vp->v_flag & VISHARDLINK) { - if (vp->v_type == VDIR) { - VATTR_WANTED(&va, va_dirlinkcount); - } else { - VATTR_WANTED(&va, va_nlink); + struct vnode_attr va; + + VATTR_INIT(&va); + VATTR_WANTED(&va, va_fsid); + VATTR_WANTED(&va, va_fileid); + VATTR_WANTED(&va, va_mode); + VATTR_WANTED(&va, va_uid); + VATTR_WANTED(&va, va_gid); + if (vp->v_flag & VISHARDLINK) { + if (vp->v_type == VDIR) { + VATTR_WANTED(&va, va_dirlinkcount); + } else { + VATTR_WANTED(&va, va_nlink); + } + } + + if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) { + memset(fse, 0, sizeof(fse_info)); + return -1; } - } - - if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) { - memset(fse, 0, sizeof(fse_info)); - return -1; - } - return vnode_get_fse_info_from_vap(vp, fse, &va); + return vnode_get_fse_info_from_vap(vp, fse, &va); } int -vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap) +vnode_get_fse_info_from_vap(vnode_t vp, fse_info *fse, struct vnode_attr *vap) { - fse->ino = (ino64_t)vap->va_fileid; - fse->dev = (dev_t)vap->va_fsid; - fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode; - fse->uid = (uid_t)vap->va_uid; - fse->gid = (gid_t)vap->va_gid; - if (vp->v_flag & VISHARDLINK) { - fse->mode |= FSE_MODE_HLINK; - if (vp->v_type == VDIR) { - fse->nlink = (uint64_t)vap->va_dirlinkcount; - } else { - fse->nlink = (uint64_t)vap->va_nlink; + fse->ino = (ino64_t)vap->va_fileid; + fse->dev = (dev_t)vap->va_fsid; + fse->mode = (int32_t)vnode_vttoif(vnode_vtype(vp)) | vap->va_mode; + fse->uid = (uid_t)vap->va_uid; + fse->gid = (gid_t)vap->va_gid; + if (vp->v_flag & VISHARDLINK) { + fse->mode |= FSE_MODE_HLINK; + if (vp->v_type == VDIR) { + fse->nlink = (uint64_t)vap->va_dirlinkcount; + } else { + fse->nlink = (uint64_t)vap->va_nlink; + } } - } - return 0; + return 0; } void create_fsevent_from_kevent(vnode_t vp, uint32_t kevents, struct vnode_attr *vap) { - int fsevent_type=FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic - char pathbuf[MAXPATHLEN]; - fse_info fse; - - - if (kevents & VNODE_EVENT_DELETE) { - fsevent_type = FSE_DELETE; - } else if (kevents & (VNODE_EVENT_EXTEND|VNODE_EVENT_WRITE)) { - fsevent_type = FSE_CONTENT_MODIFIED; - } else if (kevents & VNODE_EVENT_LINK) { - fsevent_type = FSE_CREATE_FILE; - } else if (kevents & VNODE_EVENT_RENAME) { - fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info; - } else if (kevents & (VNODE_EVENT_FILE_CREATED|VNODE_EVENT_FILE_REMOVED|VNODE_EVENT_DIR_CREATED|VNODE_EVENT_DIR_REMOVED)) { - fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it - } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else - fsevent_type = FSE_STAT_CHANGED; - } - - // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)"); - - fse.dev = vap->va_fsid; - fse.ino = vap->va_fileid; - fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode; - if (vp->v_flag & VISHARDLINK) { - fse.mode |= FSE_MODE_HLINK; + int fsevent_type = FSE_CONTENT_MODIFIED, len; // the default is the most pessimistic + char pathbuf[MAXPATHLEN]; + fse_info fse; + + + if (kevents & VNODE_EVENT_DELETE) { + fsevent_type = FSE_DELETE; + } else if (kevents & (VNODE_EVENT_EXTEND | VNODE_EVENT_WRITE)) { + fsevent_type = FSE_CONTENT_MODIFIED; + } else if (kevents & VNODE_EVENT_LINK) { + fsevent_type = FSE_CREATE_FILE; + } else if (kevents & VNODE_EVENT_RENAME) { + fsevent_type = FSE_CREATE_FILE; // XXXdbg - should use FSE_RENAME but we don't have the destination info; + } else if (kevents & (VNODE_EVENT_FILE_CREATED | VNODE_EVENT_FILE_REMOVED | VNODE_EVENT_DIR_CREATED | VNODE_EVENT_DIR_REMOVED)) { + fsevent_type = FSE_STAT_CHANGED; // XXXdbg - because vp is a dir and the thing created/removed lived inside it + } else { // a catch all for VNODE_EVENT_PERMS, VNODE_EVENT_ATTRIB and anything else + fsevent_type = FSE_STAT_CHANGED; + } + + // printf("convert_kevent: kevents 0x%x fsevent type 0x%x (for %s)\n", kevents, fsevent_type, vp->v_name ? vp->v_name : "(no-name)"); + + fse.dev = vap->va_fsid; + fse.ino = vap->va_fileid; + fse.mode = vnode_vttoif(vnode_vtype(vp)) | (uint32_t)vap->va_mode; + if (vp->v_flag & VISHARDLINK) { + fse.mode |= FSE_MODE_HLINK; + if (vp->v_type == VDIR) { + fse.nlink = vap->va_dirlinkcount; + } else { + fse.nlink = vap->va_nlink; + } + } + if (vp->v_type == VDIR) { - fse.nlink = vap->va_dirlinkcount; - } else { - fse.nlink = vap->va_nlink; + fse.mode |= FSE_REMOTE_DIR_EVENT; } - } - if (vp->v_type == VDIR) { - fse.mode |= FSE_REMOTE_DIR_EVENT; - } - - fse.uid = vap->va_uid; - fse.gid = vap->va_gid; + fse.uid = vap->va_uid; + fse.gid = vap->va_gid; - len = sizeof(pathbuf); - if (vn_getpath(vp, pathbuf, &len) == 0) { - add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE); - } - return; + len = sizeof(pathbuf); + if (vn_getpath(vp, pathbuf, &len) == 0) { + add_fsevent(fsevent_type, vfs_context_current(), FSE_ARG_STRING, len, pathbuf, FSE_ARG_FINFO, &fse, FSE_ARG_DONE); + } + return; } #else /* CONFIG_FSE */ @@ -2640,7 +2632,8 @@ add_fsevent(__unused int type, __unused vfs_context_t ctx, ...) return 0; } -int need_fsevent(__unused int type, __unused vnode_t vp) +int +need_fsevent(__unused int type, __unused vnode_t vp) { return 0; } diff --git a/bsd/vfs/vfs_fslog.c b/bsd/vfs/vfs_fslog.c index 87db6067f..37ac39c09 100644 --- a/bsd/vfs/vfs_fslog.c +++ b/bsd/vfs/vfs_fslog.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -50,8 +50,8 @@ /* Log information about external modification of a process, * using MessageTracer formatting. Assumes that both the caller * and target are appropriately locked. - * Currently prints following information - - * 1. Caller process name (truncated to 16 characters) + * Currently prints following information - + * 1. Caller process name (truncated to 16 characters) * 2. Caller process Mach-O UUID * 3. Target process name (truncated to 16 characters) * 4. Target process Mach-O UUID @@ -60,15 +60,14 @@ void fslog_extmod_msgtracer(proc_t caller, proc_t target) { if ((caller != PROC_NULL) && (target != PROC_NULL)) { - /* * Print into buffer large enough for "ThisIsAnApplicat(BC223DD7-B314-42E0-B6B0-C5D2E6638337)", * including space for escaping, and NUL byte included in sizeof(uuid_string_t). */ uuid_string_t uuidstr; - char c_name[2*MAXCOMLEN + 2 /* () */ + sizeof(uuid_string_t)]; - char t_name[2*MAXCOMLEN + 2 /* () */ + sizeof(uuid_string_t)]; + char c_name[2 * MAXCOMLEN + 2 /* () */ + sizeof(uuid_string_t)]; + char t_name[2 * MAXCOMLEN + 2 /* () */ + sizeof(uuid_string_t)]; strlcpy(c_name, caller->p_comm, sizeof(c_name)); uuid_unparse_upper(caller->p_uuid, uuidstr); @@ -89,20 +88,20 @@ fslog_extmod_msgtracer(proc_t caller, proc_t target) } #if DEBUG printf("EXTMOD: %s(%d) -> %s(%d)\n", - c_name, - proc_pid(caller), - t_name, - proc_pid(target)); + c_name, + proc_pid(caller), + t_name, + proc_pid(target)); #endif kern_asl_msg(LOG_DEBUG, "messagetracer", - 5, - "com.apple.message.domain", "com.apple.kernel.external_modification", /* 0 */ - "com.apple.message.signature", c_name, /* 1 */ - "com.apple.message.signature2", t_name, /* 2 */ - "com.apple.message.result", "noop", /* 3 */ - "com.apple.message.summarize", "YES", /* 4 */ - NULL); + 5, + "com.apple.message.domain", "com.apple.kernel.external_modification", /* 0 */ + "com.apple.message.signature", c_name, /* 1 */ + "com.apple.message.signature2", t_name, /* 2 */ + "com.apple.message.result", "noop", /* 3 */ + "com.apple.message.summarize", "YES", /* 4 */ + NULL); } } @@ -133,8 +132,8 @@ static bool match_fpx_event(const struct fpx_event *fe, const uuid_t uuid, const uint32_t code, const uint32_t xcpt) { - return (code == fe->fe_code && xcpt == fe->fe_xcpt && - 0 == memcmp(uuid, fe->fe_uuid, sizeof (uuid_t))); + return code == fe->fe_code && xcpt == fe->fe_xcpt && + 0 == memcmp(uuid, fe->fe_uuid, sizeof(uuid_t)); } #if FPX_EVENT_DBG @@ -146,12 +145,12 @@ print_fpx_event(const char *pfx, const struct fpx_event *fe) printf("%s: code 0x%x xcpt 0x%x uuid '%s'\n", pfx, fe->fe_code, fe->fe_xcpt, uustr); } -#define DPRINTF_FPX_EVENT(pfx, fe) print_fpx_event(pfx, fe) +#define DPRINTF_FPX_EVENT(pfx, fe) print_fpx_event(pfx, fe) #else -#define DPRINTF_FPX_EVENT(pfx, fe) /* nothing */ +#define DPRINTF_FPX_EVENT(pfx, fe) /* nothing */ #endif -#define MAX_DISTINCT_FPX_EVENTS 101 /* (approx one page of heap) */ +#define MAX_DISTINCT_FPX_EVENTS 101 /* (approx one page of heap) */ /* * Filter to detect "new" tuples. @@ -167,7 +166,7 @@ static bool novel_fpx_event(const uuid_t uuid, uint32_t code, uint32_t xcpt) { static TAILQ_HEAD(fpx_event_head, fpx_event) fehead = - TAILQ_HEAD_INITIALIZER(fehead); + TAILQ_HEAD_INITIALIZER(fehead); struct fpx_event *fe; lck_mtx_lock(&fpxlock); @@ -178,7 +177,7 @@ novel_fpx_event(const uuid_t uuid, uint32_t code, uint32_t xcpt) /* seen before and element already at head */ lck_mtx_unlock(&fpxlock); DPRINTF_FPX_EVENT("seen, head", fe); - return (false); + return false; } unsigned int count = 0; @@ -190,7 +189,7 @@ novel_fpx_event(const uuid_t uuid, uint32_t code, uint32_t xcpt) TAILQ_INSERT_HEAD(&fehead, fe, fe_link); lck_mtx_unlock(&fpxlock); DPRINTF_FPX_EVENT("seen, moved to head", fe); - return (false); + return false; } count++; } @@ -204,9 +203,9 @@ novel_fpx_event(const uuid_t uuid, uint32_t code, uint32_t xcpt) DPRINTF_FPX_EVENT("reusing", fe); } else { /* add a new element to the list */ - fe = kalloc(sizeof (*fe)); + fe = kalloc(sizeof(*fe)); } - memcpy(fe->fe_uuid, uuid, sizeof (uuid_t)); + memcpy(fe->fe_uuid, uuid, sizeof(uuid_t)); fe->fe_code = code; fe->fe_xcpt = xcpt; TAILQ_INSERT_HEAD(&fehead, fe, fe_link); @@ -214,62 +213,67 @@ novel_fpx_event(const uuid_t uuid, uint32_t code, uint32_t xcpt) DPRINTF_FPX_EVENT("novel", fe); - return (true); + return true; } void fpxlog( - int code, /* Mach exception code: e.g. 5 or 8 */ - uint32_t stat, /* Full FP status register bits */ - uint32_t ctrl, /* Full FP control register bits */ - uint32_t xcpt) /* Exception bits from FP status */ + int code, /* Mach exception code: e.g. 5 or 8 */ + uint32_t stat, /* Full FP status register bits */ + uint32_t ctrl, /* Full FP control register bits */ + uint32_t xcpt) /* Exception bits from FP status */ { proc_t p = current_proc(); - if (PROC_NULL == p) + if (PROC_NULL == p) { return; + } uuid_t uuid; - proc_getexecutableuuid(p, uuid, sizeof (uuid)); + proc_getexecutableuuid(p, uuid, sizeof(uuid)); /* * Check to see if an exception with this * has been seen before. If "novel" then log a message. */ - if (!novel_fpx_event(uuid, code, xcpt)) + if (!novel_fpx_event(uuid, code, xcpt)) { return; + } const size_t nmlen = 2 * MAXCOMLEN + 1; char nm[nmlen] = {}; proc_selfname(nm, nmlen); - if (escape_str(nm, strlen(nm) + 1, nmlen)) - snprintf(nm, nmlen, "(a.out)"); + if (escape_str(nm, strlen(nm) + 1, nmlen)) { + snprintf(nm, nmlen, "(a.out)"); + } const size_t slen = 8 + 1 + 8 + 1; char xcptstr[slen], csrstr[slen]; snprintf(xcptstr, slen, "%x.%x", code, xcpt); - if (ctrl == stat) + if (ctrl == stat) { snprintf(csrstr, slen, "%x", ctrl); - else + } else { snprintf(csrstr, slen, "%x.%x", ctrl, stat); + } #if DEVELOPMENT || DEBUG printf("%s[%d]: com.apple.kernel.fpx: %s, %s\n", - nm, proc_pid(p), xcptstr, csrstr); + nm, proc_pid(p), xcptstr, csrstr); #endif kern_asl_msg(LOG_DEBUG, "messagetracer", 5, - /* 0 */ "com.apple.message.domain", "com.apple.kernel.fpx", - /* 1 */ "com.apple.message.signature", nm, - /* 2 */ "com.apple.message.signature2", xcptstr, - /* 3 */ "com.apple.message.value", csrstr, - /* 4 */ "com.apple.message.summarize", "YES", - NULL); + /* 0 */ "com.apple.message.domain", "com.apple.kernel.fpx", + /* 1 */ "com.apple.message.signature", nm, + /* 2 */ "com.apple.message.signature2", xcptstr, + /* 3 */ "com.apple.message.value", csrstr, + /* 4 */ "com.apple.message.summarize", "YES", + NULL); } #else void fpxlog_init(void) -{} +{ +} #endif /* __x86_64__ */ diff --git a/bsd/vfs/vfs_init.c b/bsd/vfs/vfs_init.c index bd44f3631..cc70d2c24 100644 --- a/bsd/vfs/vfs_init.c +++ b/bsd/vfs/vfs_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -105,9 +105,9 @@ __private_extern__ void vntblinit(void); extern struct vnodeopv_desc *vfs_opv_descs[]; - /* a list of lists of vnodeops defns */ +/* a list of lists of vnodeops defns */ extern struct vnodeop_desc *vfs_op_descs[]; - /* and the operations they perform */ +/* and the operations they perform */ /* * This code doesn't work if the defn is **vnodop_defns with cc. * The problem is because of the compiler sometimes putting in an @@ -116,7 +116,7 @@ extern struct vnodeop_desc *vfs_op_descs[]; */ int vfs_opv_numops; -typedef int (*PFIvp)(void *); +typedef int (*PFIvp)(void *); /* * A miscellaneous routine. @@ -125,8 +125,7 @@ typedef int (*PFIvp)(void *); int vn_default_error(void) { - - return (ENOTSUP); + return ENOTSUP; } /* @@ -149,14 +148,14 @@ void vfs_opv_init(void) { int i, j, k; - int (***opv_desc_vector_p)(void *); - int (**opv_desc_vector)(void *); + int(***opv_desc_vector_p)(void *); + int(**opv_desc_vector)(void *); struct vnodeopv_entry_desc *opve_descp; /* * Allocate the dynamic vectors and fill them in. */ - for (i=0; vfs_opv_descs[i]; i++) { + for (i = 0; vfs_opv_descs[i]; i++) { opv_desc_vector_p = vfs_opv_descs[i]->opv_desc_vector_p; /* * Allocate and init the vector, if it needs it. @@ -164,19 +163,19 @@ vfs_opv_init(void) */ if (*opv_desc_vector_p == NULL) { MALLOC(*opv_desc_vector_p, PFIvp*, - vfs_opv_numops*sizeof(PFIvp), M_TEMP, M_WAITOK); - bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFIvp)); + vfs_opv_numops * sizeof(PFIvp), M_TEMP, M_WAITOK); + bzero(*opv_desc_vector_p, vfs_opv_numops * sizeof(PFIvp)); DODEBUG(printf("vector at %x allocated\n", opv_desc_vector_p)); } opv_desc_vector = *opv_desc_vector_p; - for (j=0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) { + for (j = 0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) { opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]); /* Silently skip known-disabled operations */ if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) { printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n", - vfs_opv_descs[i], opve_descp->opve_op->vdesc_name); + vfs_opv_descs[i], opve_descp->opve_op->vdesc_name); continue; } @@ -198,18 +197,18 @@ vfs_opv_init(void) * list of supported operations. */ if (opve_descp->opve_op->vdesc_offset == 0 && - opve_descp->opve_op != - VDESC(vnop_default)) { + opve_descp->opve_op != + VDESC(vnop_default)) { printf("operation %s not listed in %s.\n", opve_descp->opve_op->vdesc_name, "vfs_op_descs"); - panic ("vfs_opv_init: bad operation"); + panic("vfs_opv_init: bad operation"); } /* * Fill in this entry. */ opv_desc_vector[opve_descp->opve_op->vdesc_offset] = - opve_descp->opve_impl; + opve_descp->opve_impl; } } /* @@ -222,13 +221,15 @@ vfs_opv_init(void) /* * Force every operations vector to have a default routine. */ - if (opv_desc_vector[VOFFSET(vnop_default)]==NULL) { + if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) { panic("vfs_opv_init: operation vector without default routine."); } - for (k = 0; kopv_desc_vector_p) = NULL; + } /* * Figure out how many ops there are by counting the table, * and assign each its offset. @@ -258,7 +260,7 @@ vfs_op_init(void) vfs_op_descs[i]->vdesc_offset = vfs_opv_numops; vfs_opv_numops++; } - DODEBUG(printf ("vfs_opv_numops=%d\n", vfs_opv_numops)); + DODEBUG(printf("vfs_opv_numops=%d\n", vfs_opv_numops)); } /* @@ -328,12 +330,12 @@ vfsinit(void) struct vfstable *vfsp; int i, maxtypenum; struct mount * mp; - + /* Allocate vnode list lock group attribute and group */ vnode_list_lck_grp_attr = lck_grp_attr_alloc_init(); - vnode_list_lck_grp = lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr); - + vnode_list_lck_grp = lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr); + /* Allocate vnode list lock attribute */ vnode_list_lck_attr = lck_attr_alloc_init(); @@ -347,9 +349,9 @@ vfsinit(void) pkg_extensions_lck = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr); /* allocate vnode lock group attribute and group */ - vnode_lck_grp_attr= lck_grp_attr_alloc_init(); + vnode_lck_grp_attr = lck_grp_attr_alloc_init(); - vnode_lck_grp = lck_grp_alloc_init("vnode", vnode_lck_grp_attr); + vnode_lck_grp = lck_grp_alloc_init("vnode", vnode_lck_grp_attr); /* Allocate vnode lock attribute */ vnode_lck_attr = lck_attr_alloc_init(); @@ -365,20 +367,20 @@ vfsinit(void) fd_vn_lck_attr = lck_attr_alloc_init(); /* Allocate fs config lock group attribute and group */ - fsconf_lck_grp_attr= lck_grp_attr_alloc_init(); + fsconf_lck_grp_attr = lck_grp_attr_alloc_init(); + + fsconf_lck_grp = lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr); - fsconf_lck_grp = lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr); - /* Allocate fs config lock attribute */ fsconf_lck_attr = lck_attr_alloc_init(); /* Allocate mount point related lock structures */ /* Allocate mount list lock group attribute and group */ - mnt_list_lck_grp_attr= lck_grp_attr_alloc_init(); + mnt_list_lck_grp_attr = lck_grp_attr_alloc_init(); + + mnt_list_lck_grp = lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr); - mnt_list_lck_grp = lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr); - /* Allocate mount list lock attribute */ mnt_list_lck_attr = lck_attr_alloc_init(); @@ -387,18 +389,18 @@ vfsinit(void) /* allocate mount lock group attribute and group */ - mnt_lck_grp_attr= lck_grp_attr_alloc_init(); + mnt_lck_grp_attr = lck_grp_attr_alloc_init(); - mnt_lck_grp = lck_grp_alloc_init("mount", mnt_lck_grp_attr); + mnt_lck_grp = lck_grp_alloc_init("mount", mnt_lck_grp_attr); /* Allocate mount lock attribute */ mnt_lck_attr = lck_attr_alloc_init(); /* Allocate sync lock */ - sync_mtx_lck_grp_attr = lck_grp_attr_alloc_init(); - sync_mtx_lck_grp = lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr); - sync_mtx_lck_attr = lck_attr_alloc_init(); - sync_mtx_lck = lck_mtx_alloc_init(sync_mtx_lck_grp, sync_mtx_lck_attr); + sync_mtx_lck_grp_attr = lck_grp_attr_alloc_init(); + sync_mtx_lck_grp = lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr); + sync_mtx_lck_attr = lck_attr_alloc_init(); + sync_mtx_lck = lck_mtx_alloc_init(sync_mtx_lck_grp, sync_mtx_lck_attr); /* * Initialize the vnode table @@ -427,12 +429,16 @@ vfsinit(void) maxtypenum = VT_NON; for (vfsp = vfsconf, i = 0; i < maxvfsslots; i++, vfsp++) { struct vfsconf vfsc; - if (vfsp->vfc_vfsops == (struct vfsops *)0) + if (vfsp->vfc_vfsops == (struct vfsops *)0) { break; - if (i) vfsconf[i-1].vfc_next = vfsp; - if (maxtypenum <= vfsp->vfc_typenum) + } + if (i) { + vfsconf[i - 1].vfc_next = vfsp; + } + if (maxtypenum <= vfsp->vfc_typenum) { maxtypenum = vfsp->vfc_typenum + 1; - + } + bzero(&vfsc, sizeof(struct vfsconf)); vfsc.vfc_reserved1 = 0; bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); @@ -445,16 +451,16 @@ vfsinit(void) if (vfsp->vfc_vfsops->vfs_sysctl) { struct sysctl_oid *oidp = NULL; struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); - + MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK); *oidp = oid; - + /* Memory for VFS oid held by vfsentry forever */ vfsp->vfc_sysctl = oidp; oidp->oid_name = vfsp->vfc_name; sysctl_register_oid(vfsp->vfc_sysctl); } - + (*vfsp->vfc_vfsops->vfs_init)(&vfsc); numused_vfsslots++; @@ -474,12 +480,12 @@ vfsinit(void) #if QUOTA dqinit(); #endif - - /* + + /* * create a mount point for dead vnodes */ MALLOC_ZONE(mp, struct mount *, sizeof(struct mount), - M_MOUNT, M_WAITOK); + M_MOUNT, M_WAITOK); bzero((char *)mp, sizeof(struct mount)); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; @@ -493,7 +499,7 @@ vfsinit(void) mp->mnt_ioflags = 0; mp->mnt_realrootvp = NULLVP; mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; - + TAILQ_INIT(&mp->mnt_vnodelist); TAILQ_INIT(&mp->mnt_workerqueue); TAILQ_INIT(&mp->mnt_newvnodes); @@ -515,7 +521,7 @@ vfsinit(void) void vnode_list_lock(void) { - lck_spin_lock(vnode_list_spin_lock); + lck_spin_lock_grp(vnode_list_spin_lock, vnode_list_lck_grp); } void @@ -581,14 +587,14 @@ vfstable_add(struct vfstable *nvfsp) struct vfstable *slotp, *allocated = NULL; struct sysctl_oid *oidp = NULL; - + if (nvfsp->vfc_vfsops->vfs_sysctl) { struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); - + MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK); *oidp = oid; } - + /* * Find the next empty slot; we recognize an empty slot by a * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must @@ -597,15 +603,16 @@ vfstable_add(struct vfstable *nvfsp) findslot: mount_list_lock(); for (slot = 0; slot < maxvfsslots; slot++) { - if (vfsconf[slot].vfc_vfsops == NULL) + if (vfsconf[slot].vfc_vfsops == NULL) { break; + } } if (slot == maxvfsslots) { if (allocated == NULL) { mount_list_unlock(); /* out of static slots; allocate one instead */ MALLOC(allocated, struct vfstable *, sizeof(struct vfstable), - M_TEMP, M_WAITOK); + M_TEMP, M_WAITOK); goto findslot; } else { slotp = allocated; @@ -643,13 +650,13 @@ findslot: } mount_list_unlock(); - + if (allocated && allocated != slotp) { /* did allocation, but ended up using static slot */ FREE(allocated, M_TEMP); } - return(slotp); + return slotp; } /* @@ -680,14 +687,15 @@ vfstable_del(struct vfstable * vtbl) * will contain the address of the pointer to the entry to * be removed. */ - for( vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) { - if (*vcpp == vtbl) + for (vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) { + if (*vcpp == vtbl) { break; + } } - if (*vcpp == NULL) - return(ESRCH); /* vtbl not on vfsconf list */ - + if (*vcpp == NULL) { + return ESRCH; /* vtbl not on vfsconf list */ + } if ((*vcpp)->vfc_sysctl) { sysctl_unregister_oid((*vcpp)->vfc_sysctl); (*vcpp)->vfc_sysctl->oid_name = NULL; @@ -704,12 +712,12 @@ vfstable_del(struct vfstable * vtbl) * seeing if the pointer to the object to be deleted places * the object in the address space containing the table (or not). */ - if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */ + if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */ /* Mark as empty for vfscon_add() */ bzero(vcdelp, sizeof(struct vfstable)); numregistered_fses--; numused_vfsslots--; - } else { /* N */ + } else { /* N */ /* * This entry was dynamically allocated; we must free it; * we would prefer to have just linked the caller's @@ -726,7 +734,7 @@ vfstable_del(struct vfstable * vtbl) lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED); #endif /* DEBUG */ - return(0); + return 0; } void @@ -740,4 +748,3 @@ SPECHASH_UNLOCK(void) { lck_mtx_unlock(spechash_mtx_lock); } - diff --git a/bsd/vfs/vfs_lookup.c b/bsd/vfs/vfs_lookup.c index ccee2e1c5..2764ecc73 100644 --- a/bsd/vfs/vfs_lookup.c +++ b/bsd/vfs/vfs_lookup.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -85,7 +85,7 @@ #include #include #include -#include /* For _PC_NAME_MAX */ +#include /* For _PC_NAME_MAX */ #include #include #include @@ -110,17 +110,17 @@ static int vfs_getrealpath(const char * path, char * realpath, size_t bufsize, v #define MAX_VOLFS_RESTARTS 5 #endif -static int lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, int vbusyflags, vfs_context_t ctx); -static int lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx); -static int lookup_authorize_search(vnode_t dp, struct componentname *cnp, int dp_authorized_in_cache, vfs_context_t ctx); -static void lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int nc_generation); -static int lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int rdonly, - int vbusyflags, int *keep_going, int nc_generation, - int wantparent, int atroot, vfs_context_t ctx); -static int lookup_handle_emptyname(struct nameidata *ndp, struct componentname *cnp, int wantparent); +static int lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, int vbusyflags, vfs_context_t ctx); +static int lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx); +static int lookup_authorize_search(vnode_t dp, struct componentname *cnp, int dp_authorized_in_cache, vfs_context_t ctx); +static void lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int nc_generation); +static int lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int rdonly, + int vbusyflags, int *keep_going, int nc_generation, + int wantparent, int atroot, vfs_context_t ctx); +static int lookup_handle_emptyname(struct nameidata *ndp, struct componentname *cnp, int wantparent); #if NAMEDRSRCFORK -static int lookup_handle_rsrc_fork(vnode_t dp, struct nameidata *ndp, struct componentname *cnp, int wantparent, vfs_context_t ctx); +static int lookup_handle_rsrc_fork(vnode_t dp, struct nameidata *ndp, struct componentname *cnp, int wantparent, vfs_context_t ctx); #endif @@ -163,10 +163,10 @@ static int lookup_handle_rsrc_fork(vnode_t dp, struct nameidata *ndp, struct co int namei(struct nameidata *ndp) { - struct filedesc *fdp; /* pointer to file descriptor state */ - struct vnode *dp; /* the directory we are searching */ + struct filedesc *fdp; /* pointer to file descriptor state */ + struct vnode *dp; /* the directory we are searching */ struct vnode *usedvp = ndp->ni_dvp; /* store pointer to vp in case we must loop due to - heavy vnode pressure */ + * heavy vnode pressure */ u_long cnpflags = ndp->ni_cnd.cn_flags; /* store in case we have to restore after loop */ int error; struct componentname *cnp = &ndp->ni_cnd; @@ -185,12 +185,15 @@ namei(struct nameidata *ndp) fdp = p->p_fd; #if DIAGNOSTIC - if (!vfs_context_ucred(ctx) || !p) - panic ("namei: bad cred/proc"); - if (cnp->cn_nameiop & (~OPMASK)) - panic ("namei: nameiop contaminated with flags"); - if (cnp->cn_flags & OPMASK) - panic ("namei: flags contaminated with nameiops"); + if (!vfs_context_ucred(ctx) || !p) { + panic("namei: bad cred/proc"); + } + if (cnp->cn_nameiop & (~OPMASK)) { + panic("namei: nameiop contaminated with flags"); + } + if (cnp->cn_flags & OPMASK) { + panic("namei: flags contaminated with nameiops"); + } #endif /* @@ -207,10 +210,11 @@ namei(struct nameidata *ndp) ndp->ni_flag &= ~(NAMEI_CONTLOOKUP); - error = lookup_handle_found_vnode(ndp, &ndp->ni_cnd, rdonly, vbusyflags, - &keep_going, ndp->ni_ncgeneration, wantparent, 0, ctx); - if (error) + error = lookup_handle_found_vnode(ndp, &ndp->ni_cnd, rdonly, vbusyflags, + &keep_going, ndp->ni_ncgeneration, wantparent, 0, ctx); + if (error) { goto out_drop; + } if (keep_going) { if ((cnp->cn_flags & ISSYMLINK) == 0) { panic("We need to keep going on a continued lookup, but for vp type %d (tag %d)\n", ndp->ni_vp->v_type, ndp->ni_vp->v_tag); @@ -219,7 +223,6 @@ namei(struct nameidata *ndp) } return 0; - } vnode_recycled: @@ -234,19 +237,19 @@ vnode_recycled: } #if LP64_DEBUG if ((UIO_SEG_IS_USER_SPACE(ndp->ni_segflg) == 0) - && (ndp->ni_segflg != UIO_SYSSPACE) - && (ndp->ni_segflg != UIO_SYSSPACE32)) { - panic("%s :%d - invalid ni_segflg\n", __FILE__, __LINE__); + && (ndp->ni_segflg != UIO_SYSSPACE) + && (ndp->ni_segflg != UIO_SYSSPACE32)) { + panic("%s :%d - invalid ni_segflg\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ retry_copy: if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) { error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, - cnp->cn_pnlen, &bytes_copied); + cnp->cn_pnlen, &bytes_copied); } else { error = copystr(CAST_DOWN(void *, ndp->ni_dirp), cnp->cn_pnbuf, - cnp->cn_pnlen, &bytes_copied); + cnp->cn_pnlen, &bytes_copied); } if (error == ENAMETOOLONG && !(cnp->cn_flags & HASBUF)) { MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); @@ -258,11 +261,12 @@ retry_copy: cnp->cn_flags |= HASBUF; cnp->cn_pnlen = MAXPATHLEN; bytes_copied = 0; - + goto retry_copy; } - if (error) - goto error_out; + if (error) { + goto error_out; + } ndp->ni_pathlen = bytes_copied; bytes_copied = 0; @@ -273,11 +277,12 @@ retry_copy: * component. Note: the FS may still consult the cache, * but can apply rules to validate the results. */ - if (proc_is_forcing_hfs_case_sensitivity(p)) + if (proc_is_forcing_hfs_case_sensitivity(p)) { cnp->cn_flags |= CN_SKIPNAMECACHE; + } #if CONFIG_VOLFS - /* + /* * Check for legacy volfs style pathnames. * * For compatibility reasons we currently allow these paths, @@ -289,21 +294,21 @@ retry_copy: cnp->cn_pnbuf[2] == 'v' && cnp->cn_pnbuf[3] == 'o' && cnp->cn_pnbuf[4] == 'l' && - cnp->cn_pnbuf[5] == '/' ) { + cnp->cn_pnbuf[5] == '/') { char * realpath; int realpath_err; /* Attempt to resolve a legacy volfs style pathname. */ MALLOC_ZONE(realpath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); if (realpath) { - /* + /* * We only error out on the ENAMETOOLONG cases where we know that * vfs_getrealpath translation succeeded but the path could not fit into * MAXPATHLEN characters. In other failure cases, we may be dealing with a path * that legitimately looks like /.vol/1234/567 and is not meant to be translated */ - if ((realpath_err= vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) { + if ((realpath_err = vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) { FREE_ZONE(realpath, MAXPATHLEN, M_NAMEI); - if (realpath_err == ENOSPC || realpath_err == ENAMETOOLONG){ + if (realpath_err == ENOSPC || realpath_err == ENAMETOOLONG) { error = ENAMETOOLONG; goto error_out; } @@ -322,10 +327,12 @@ retry_copy: #if CONFIG_AUDIT /* If we are auditing the kernel pathname, save the user pathname */ - if (cnp->cn_flags & AUDITVNPATH1) - AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH1); - if (cnp->cn_flags & AUDITVNPATH2) - AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH2); + if (cnp->cn_flags & AUDITVNPATH1) { + AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH1); + } + if (cnp->cn_flags & AUDITVNPATH2) { + AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH2); + } #endif /* CONFIG_AUDIT */ /* @@ -341,27 +348,29 @@ retry_copy: * determine the starting point for the translation. */ if ((ndp->ni_rootdir = fdp->fd_rdir) == NULLVP) { - if ( !(fdp->fd_flags & FD_CHROOT)) - ndp->ni_rootdir = rootvnode; + if (!(fdp->fd_flags & FD_CHROOT)) { + ndp->ni_rootdir = rootvnode; + } } cnp->cn_nameptr = cnp->cn_pnbuf; ndp->ni_usedvp = NULLVP; if (*(cnp->cn_nameptr) == '/') { - while (*(cnp->cn_nameptr) == '/') { - cnp->cn_nameptr++; + while (*(cnp->cn_nameptr) == '/') { + cnp->cn_nameptr++; ndp->ni_pathlen--; } dp = ndp->ni_rootdir; } else if (cnp->cn_flags & USEDVP) { - dp = ndp->ni_dvp; + dp = ndp->ni_dvp; ndp->ni_usedvp = dp; - } else - dp = vfs_context_cwd(ctx); + } else { + dp = vfs_context_cwd(ctx); + } if (dp == NULLVP || (dp->v_lflag & VL_DEAD)) { - error = ENOENT; + error = ENOENT; goto error_out; } ndp->ni_dvp = NULLVP; @@ -385,7 +394,7 @@ retry_copy: ndp->ni_startdir = dp; - if ( (error = lookup(ndp)) ) { + if ((error = lookup(ndp))) { goto error_out; } @@ -393,7 +402,7 @@ retry_copy: * Check for symbolic link */ if ((cnp->cn_flags & ISSYMLINK) == 0) { - return (0); + return 0; } continue_symlink: @@ -410,12 +419,14 @@ continue_symlink: * up in the lookup routine */ out_drop: - if (ndp->ni_dvp) - vnode_put(ndp->ni_dvp); - if (ndp->ni_vp) - vnode_put(ndp->ni_vp); - error_out: - if ( (cnp->cn_flags & HASBUF) ) { + if (ndp->ni_dvp) { + vnode_put(ndp->ni_dvp); + } + if (ndp->ni_vp) { + vnode_put(ndp->ni_vp); + } +error_out: + if ((cnp->cn_flags & HASBUF)) { cnp->cn_flags &= ~HASBUF; FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); } @@ -425,25 +436,25 @@ out_drop: #if CONFIG_VOLFS /* - * Deal with volfs fallout. + * Deal with volfs fallout. * * At this point, if we were originally given a volfs path that * looks like /.vol/123/456, then we would have had to convert it into * a full path. Assuming that part worked properly, we will now attempt - * to conduct a lookup of the item in the namespace. Under normal - * circumstances, if a user looked up /tmp/foo and it was not there, it - * would be permissible to return ENOENT. + * to conduct a lookup of the item in the namespace. Under normal + * circumstances, if a user looked up /tmp/foo and it was not there, it + * would be permissible to return ENOENT. * * However, we may not want to do that here. Specifically, the volfs path * uniquely identifies a certain item in the namespace regardless of where it * lives. If the item has moved in between the time we constructed the * path and now, when we're trying to do a lookup/authorization on the full - * path, we may have gotten an ENOENT. + * path, we may have gotten an ENOENT. + * + * At this point we can no longer tell if the path no longer exists + * or if the item in question no longer exists. It could have been renamed + * away, in which case the /.vol identifier is still valid. * - * At this point we can no longer tell if the path no longer exists - * or if the item in question no longer exists. It could have been renamed - * away, in which case the /.vol identifier is still valid. - * * Do this dance a maximum of MAX_VOLFS_RESTARTS times. */ if ((error == ENOENT) && (ndp->ni_cnd.cn_flags & CN_VOLFSPATH)) { @@ -454,19 +465,19 @@ out_drop: } #endif - if (error == ERECYCLE){ - /* vnode was recycled underneath us. re-drive lookup to start at - the beginning again, since recycling invalidated last lookup*/ + if (error == ERECYCLE) { + /* vnode was recycled underneath us. re-drive lookup to start at + * the beginning again, since recycling invalidated last lookup*/ ndp->ni_cnd.cn_flags = cnpflags; ndp->ni_dvp = usedvp; goto vnode_recycled; } - return (error); + return error; } -int +int namei_compound_available(vnode_t dp, struct nameidata *ndp) { if ((ndp->ni_flag & NAMEI_COMPOUNDOPEN) != 0) { @@ -487,20 +498,22 @@ lookup_authorize_search(vnode_t dp, struct componentname *cnp, int dp_authorized if (!dp_authorized_in_cache) { error = vnode_authorize(dp, NULL, KAUTH_VNODE_SEARCH, ctx); - if (error) + if (error) { return error; + } } #if CONFIG_MACF error = mac_vnode_check_lookup(ctx, dp, cnp); - if (error) + if (error) { return error; + } #endif /* CONFIG_MACF */ return 0; } -static void -lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int nc_generation) +static void +lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int nc_generation) { int isdot_or_dotdot; isdot_or_dotdot = (cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.') || (cnp->cn_flags & ISDOTDOT); @@ -509,16 +522,19 @@ lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, int update_flags = 0; if (isdot_or_dotdot == 0) { - if (vp->v_name == NULL) + if (vp->v_name == NULL) { update_flags |= VNODE_UPDATE_NAME; - if (dvp != NULLVP && vp->v_parent == NULLVP) + } + if (dvp != NULLVP && vp->v_parent == NULLVP) { update_flags |= VNODE_UPDATE_PARENT; + } - if (update_flags) + if (update_flags) { vnode_update_identity(vp, dvp, cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, update_flags); + } } } - if ( (cnp->cn_flags & MAKEENTRY) && (vp->v_flag & VNCACHEABLE) && LIST_FIRST(&vp->v_nclinks) == NULL) { + if ((cnp->cn_flags & MAKEENTRY) && (vp->v_flag & VNCACHEABLE) && LIST_FIRST(&vp->v_nclinks) == NULL) { /* * missing from name cache, but should * be in it... this can happen if volfs @@ -532,16 +548,16 @@ lookup_consider_update_cache(vnode_t dvp, vnode_t vp, struct componentname *cnp, * rechecked behind the name cache lock, but if it * already fails to match, no need to go any further */ - if (dvp != NULLVP && (nc_generation == dvp->v_nc_generation) && (!isdot_or_dotdot)) + if (dvp != NULLVP && (nc_generation == dvp->v_nc_generation) && (!isdot_or_dotdot)) { cache_enter_with_gen(dvp, vp, cnp, nc_generation); + } } - } #if NAMEDRSRCFORK /* * Can change ni_dvp and ni_vp. On success, returns with iocounts on stream vnode (always) and - * data fork if requested. On failure, returns with iocount data fork (always) and its parent directory + * data fork if requested. On failure, returns with iocount data fork (always) and its parent directory * (if one was provided). */ static int @@ -557,39 +573,40 @@ lookup_handle_rsrc_fork(vnode_t dp, struct nameidata *ndp, struct componentname goto out; } switch (cnp->cn_nameiop) { - case DELETE: - if (cnp->cn_flags & CN_ALLOWRSRCFORK) { - nsop = NS_DELETE; - } else { - error = EPERM; - goto out; - } - break; - case CREATE: - if (cnp->cn_flags & CN_ALLOWRSRCFORK) { - nsop = NS_CREATE; - } else { - error = EPERM; - goto out; - } - break; - case LOOKUP: - /* Make sure our lookup of "/..namedfork/rsrc" is allowed. */ - if (cnp->cn_flags & CN_ALLOWRSRCFORK) { - nsop = NS_OPEN; - } else { - error = EPERM; - goto out; - } - break; - default: + case DELETE: + if (cnp->cn_flags & CN_ALLOWRSRCFORK) { + nsop = NS_DELETE; + } else { + error = EPERM; + goto out; + } + break; + case CREATE: + if (cnp->cn_flags & CN_ALLOWRSRCFORK) { + nsop = NS_CREATE; + } else { + error = EPERM; + goto out; + } + break; + case LOOKUP: + /* Make sure our lookup of "/..namedfork/rsrc" is allowed. */ + if (cnp->cn_flags & CN_ALLOWRSRCFORK) { + nsop = NS_OPEN; + } else { error = EPERM; goto out; + } + break; + default: + error = EPERM; + goto out; } nsflags = 0; - if (cnp->cn_flags & CN_RAW_ENCRYPTED) + if (cnp->cn_flags & CN_RAW_ENCRYPTED) { nsflags |= NS_GETRAWENCRYPTED; + } /* Ask the file system for the resource fork. */ error = vnode_getnamedstream(dp, &svp, XATTR_RESOURCEFORK_NAME, nsop, nsflags, ctx); @@ -597,7 +614,7 @@ lookup_handle_rsrc_fork(vnode_t dp, struct nameidata *ndp, struct componentname /* During a create, it OK for stream vnode to be missing. */ if (error == ENOATTR || error == ENOENT) { error = (nsop == NS_CREATE) ? 0 : ENOENT; - } + } if (error) { goto out; } @@ -626,22 +643,22 @@ out: /* * iocounts in: - * --One on ni_vp. One on ni_dvp if there is more path, or we didn't come through the - * cache, or we came through the cache and the caller doesn't want the parent. + * --One on ni_vp. One on ni_dvp if there is more path, or we didn't come through the + * cache, or we came through the cache and the caller doesn't want the parent. * * iocounts out: * --Leaves us in the correct state for the next step, whatever that might be. * --If we find a symlink, returns with iocounts on both ni_vp and ni_dvp. * --If we are to look up another component, then we have an iocount on ni_vp and - * nothing else. + * nothing else. * --If we are done, returns an iocount on ni_vp, and possibly on ni_dvp depending on nameidata flags. * --In the event of an error, may return with ni_dvp NULL'ed out (in which case, iocount * was dropped). */ -static int -lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int rdonly, - int vbusyflags, int *keep_going, int nc_generation, - int wantparent, int atroot, vfs_context_t ctx) +static int +lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int rdonly, + int vbusyflags, int *keep_going, int nc_generation, + int wantparent, int atroot, vfs_context_t ctx) { vnode_t dp; int error; @@ -685,8 +702,9 @@ lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int #if CONFIG_MACF if (vfs_flags(vnode_mount(dp)) & MNT_MULTILABEL) { error = vnode_label(vnode_mount(dp), NULL, dp, NULL, 0, ctx); - if (error) + if (error) { goto out; + } } #endif @@ -697,7 +715,7 @@ lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int ((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) { cnp->cn_flags |= ISSYMLINK; *keep_going = 1; - return (0); + return 0; } /* @@ -709,15 +727,15 @@ lookup_handle_found_vnode(struct nameidata *ndp, struct componentname *cnp, int goto out; } ndp->ni_flag &= ~(NAMEI_TRAILINGSLASH); - } + } #if NAMEDSTREAMS - /* + /* * Deny namei/lookup requests to resolve paths that point to shadow files. * Access to shadow files must be conducted by explicit calls to VNOP_LOOKUP * directly, and not use lookup/namei */ - if (vnode_isshadow (dp)) { + if (vnode_isshadow(dp)) { error = ENOENT; goto out; } @@ -728,7 +746,7 @@ nextname: * Not a symbolic link. If more pathname, * continue at next component, else return. * - * Definitely have a dvp if there's another slash + * Definitely have a dvp if there's another slash */ if (*ndp->ni_next == '/') { cnp->cn_nameptr = ndp->ni_next + 1; @@ -749,7 +767,7 @@ nextname: *keep_going = 1; return 0; } - + /* * Disallow directory write attempts on read-only file systems. */ @@ -758,17 +776,17 @@ nextname: error = EROFS; goto out; } - + /* If SAVESTART is set, we should have a dvp */ if (cnp->cn_flags & SAVESTART) { - /* + /* * note that we already hold a reference * on both dp and ni_dvp, but for some reason * can't get another one... in this case we * need to do vnode_put on dp in 'bad2' */ - if ( (vnode_get(ndp->ni_dvp)) ) { - error = ENOENT; + if ((vnode_get(ndp->ni_dvp))) { + error = ENOENT; goto out; } ndp->ni_startdir = ndp->ni_dvp; @@ -778,10 +796,11 @@ nextname: ndp->ni_dvp = NULLVP; } - if (cnp->cn_flags & AUDITVNPATH1) + if (cnp->cn_flags & AUDITVNPATH1) { AUDIT_ARG(vnpath, dp, ARG_VNODE1); - else if (cnp->cn_flags & AUDITVNPATH2) + } else if (cnp->cn_flags & AUDITVNPATH2) { AUDIT_ARG(vnpath, dp, ARG_VNODE2); + } #if NAMEDRSRCFORK /* @@ -789,32 +808,34 @@ nextname: */ if ((cnp->cn_flags & CN_WANTSRSRCFORK) && (dp != NULLVP)) { error = lookup_handle_rsrc_fork(dp, ndp, cnp, wantparent, ctx); - if (error != 0) + if (error != 0) { goto out; + } dp = ndp->ni_vp; } #endif - if (kdebug_enable) - kdebug_lookup(ndp->ni_vp, cnp); + if (kdebug_enable) { + kdebug_lookup(ndp->ni_vp, cnp); + } return 0; emptyname: error = lookup_handle_emptyname(ndp, cnp, wantparent); - if (error != 0) + if (error != 0) { goto out; + } return 0; out: return error; - } /* * Comes in iocount on ni_vp. May overwrite ni_dvp, but doesn't interpret incoming value. */ -static int +static int lookup_handle_emptyname(struct nameidata *ndp, struct componentname *cnp, int wantparent) { vnode_t dp; @@ -835,14 +856,14 @@ lookup_handle_emptyname(struct nameidata *ndp, struct componentname *cnp, int wa goto out; } if (wantparent) { - /* + /* * note that we already hold a reference * on dp, but for some reason can't * get another one... in this case we * need to do vnode_put on dp in 'bad' */ - if ( (vnode_get(dp)) ) { - error = ENOENT; + if ((vnode_get(dp))) { + error = ENOENT; goto out; } ndp->ni_dvp = dp; @@ -852,12 +873,14 @@ lookup_handle_emptyname(struct nameidata *ndp, struct componentname *cnp, int wa ndp->ni_next = cnp->cn_nameptr; ndp->ni_vp = dp; - if (cnp->cn_flags & AUDITVNPATH1) + if (cnp->cn_flags & AUDITVNPATH1) { AUDIT_ARG(vnpath, dp, ARG_VNODE1); - else if (cnp->cn_flags & AUDITVNPATH2) + } else if (cnp->cn_flags & AUDITVNPATH2) { AUDIT_ARG(vnpath, dp, ARG_VNODE2); - if (cnp->cn_flags & SAVESTART) + } + if (cnp->cn_flags & SAVESTART) { panic("lookup: SAVESTART"); + } return 0; out: @@ -884,7 +907,7 @@ out: * the target is returned locked, otherwise it is returned unlocked. * When creating or renaming and LOCKPARENT is specified, the target may not * be ".". When deleting and LOCKPARENT is specified, the target may be ".". - * + * * Overall outline of lookup: * * dirloop: @@ -922,12 +945,12 @@ out: int lookup(struct nameidata *ndp) { - char *cp; /* pointer into pathname argument */ - vnode_t tdp; /* saved dp */ - vnode_t dp; /* the directory we are searching */ - int docache = 1; /* == 0 do not cache last component */ - int wantparent; /* 1 => wantparent or lockparent flag */ - int rdonly; /* lookup read-only flag bit */ + char *cp; /* pointer into pathname argument */ + vnode_t tdp; /* saved dp */ + vnode_t dp; /* the directory we are searching */ + int docache = 1; /* == 0 do not cache last component */ + int wantparent; /* 1 => wantparent or lockparent flag */ + int rdonly; /* lookup read-only flag bit */ int dp_authorized = 0; int error = 0; struct componentname *cnp = &ndp->ni_cnd; @@ -942,7 +965,7 @@ lookup(struct nameidata *ndp) * Setup: break out flag bits into variables. */ if (cnp->cn_flags & NOCACHE) { - docache = 0; + docache = 0; } wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT); rdonly = cnp->cn_flags & RDONLY; @@ -952,14 +975,15 @@ lookup(struct nameidata *ndp) dp = ndp->ni_startdir; ndp->ni_startdir = NULLVP; - if ((cnp->cn_flags & CN_NBMOUNTLOOK) != 0) - vbusyflags = LK_NOWAIT; + if ((cnp->cn_flags & CN_NBMOUNTLOOK) != 0) { + vbusyflags = LK_NOWAIT; + } cp = cnp->cn_nameptr; if (*cp == '\0') { - if ( (vnode_getwithref(dp)) ) { + if ((vnode_getwithref(dp))) { dp = NULLVP; - error = ENOENT; + error = ENOENT; goto bad; } ndp->ni_vp = dp; @@ -970,26 +994,28 @@ lookup(struct nameidata *ndp) return 0; } -dirloop: +dirloop: atroot = 0; ndp->ni_vp = NULLVP; - if ( (error = cache_lookup_path(ndp, cnp, dp, ctx, &dp_authorized, last_dp)) ) { + if ((error = cache_lookup_path(ndp, cnp, dp, ctx, &dp_authorized, last_dp))) { dp = NULLVP; goto bad; } if ((cnp->cn_flags & ISLASTCN)) { - if (docache) - cnp->cn_flags |= MAKEENTRY; - } else - cnp->cn_flags |= MAKEENTRY; + if (docache) { + cnp->cn_flags |= MAKEENTRY; + } + } else { + cnp->cn_flags |= MAKEENTRY; + } dp = ndp->ni_dvp; if (ndp->ni_vp != NULLVP) { - /* + /* * cache_lookup_path returned a non-NULL ni_vp then, - * we're guaranteed that the dp is a VDIR, it's + * we're guaranteed that the dp is a VDIR, it's * been authorized, and vp is not ".." * * make sure we don't try to enter the name back into @@ -997,10 +1023,11 @@ dirloop: * check since we won't have serialized behind whatever * activity is occurring in the FS that caused the purge */ - if (dp != NULLVP) - nc_generation = dp->v_nc_generation - 1; + if (dp != NULLVP) { + nc_generation = dp->v_nc_generation - 1; + } - goto returned_from_lookup_path; + goto returned_from_lookup_path; } /* @@ -1013,14 +1040,14 @@ dirloop: * vnode which was mounted on so we take the * .. in the other file system. */ - if ( (cnp->cn_flags & ISDOTDOT) ) { + if ((cnp->cn_flags & ISDOTDOT)) { /* * if this is a chroot'ed process, check if the current * directory is still a subdirectory of the process's * root directory. */ if (ndp->ni_rootdir && (ndp->ni_rootdir != rootvnode) && - dp != ndp->ni_rootdir) { + dp != ndp->ni_rootdir) { int sdir_error; int is_subdir = FALSE; @@ -1049,8 +1076,8 @@ dirloop: } for (;;) { - if (dp == ndp->ni_rootdir || dp == rootvnode) { - ndp->ni_dvp = dp; + if (dp == ndp->ni_rootdir || dp == rootvnode) { + ndp->ni_dvp = dp; ndp->ni_vp = dp; /* * we're pinned at the root @@ -1060,7 +1087,7 @@ dirloop: * if we fail to get the new reference, we'll * drop our original down in 'bad' */ - if ( (vnode_get(dp)) ) { + if ((vnode_get(dp))) { error = ENOENT; goto bad; } @@ -1068,10 +1095,11 @@ dirloop: goto returned_from_lookup_path; } if ((dp->v_flag & VROOT) == 0 || - (cnp->cn_flags & NOCROSSMOUNT)) - break; - if (dp->v_mount == NULL) { /* forced umount */ - error = EBADF; + (cnp->cn_flags & NOCROSSMOUNT)) { + break; + } + if (dp->v_mount == NULL) { /* forced umount */ + error = EBADF; goto bad; } tdp = dp; @@ -1079,8 +1107,8 @@ dirloop: vnode_put(tdp); - if ( (vnode_getwithref(dp)) ) { - dp = NULLVP; + if ((vnode_getwithref(dp))) { + dp = NULLVP; error = ENOENT; goto bad; } @@ -1096,10 +1124,10 @@ unionlookup: ndp->ni_vp = NULLVP; if (dp->v_type != VDIR) { - error = ENOTDIR; - goto lookup_error; + error = ENOTDIR; + goto lookup_error; } - if ( (cnp->cn_flags & DONOTAUTH) != DONOTAUTH ) { + if ((cnp->cn_flags & DONOTAUTH) != DONOTAUTH) { error = lookup_authorize_search(dp, cnp, dp_authorized, ctx); if (error) { goto lookup_error; @@ -1111,8 +1139,8 @@ unionlookup: * will be doing a batched operation. Return an iocount on dvp. */ #if NAMEDRSRCFORK - if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp) && !(cnp->cn_flags & CN_WANTSRSRCFORK)) { -#else + if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp) && !(cnp->cn_flags & CN_WANTSRSRCFORK)) { +#else if ((cnp->cn_flags & ISLASTCN) && namei_compound_available(dp, ndp)) { #endif /* NAMEDRSRCFORK */ ndp->ni_flag |= NAMEI_UNFINISHED; @@ -1120,10 +1148,10 @@ unionlookup: return 0; } - nc_generation = dp->v_nc_generation; + nc_generation = dp->v_nc_generation; /* - * Note: + * Note: * Filesystems that support hardlinks may want to call vnode_update_identity * if the lookup operation below will modify the in-core vnode to belong to a new point * in the namespace. VFS cannot infer whether or not the look up operation makes the vnode @@ -1134,20 +1162,20 @@ unionlookup: * * However, even with this in place, multiple lookups may occur in between this lookup * and the subsequent vnop, so, at best, we could only guarantee that you would get a - * valid path back, and not necessarily the one that you wanted. + * valid path back, and not necessarily the one that you wanted. * - * Example: + * Example: * /tmp/a == /foo/b - * - * If you are now looking up /foo/b and the vnode for this link represents /tmp/a, - * vnode_update_identity will fix the parentage so that you can get /foo/b back - * through the v_parent chain (preventing you from getting /tmp/b back). It would + * + * If you are now looking up /foo/b and the vnode for this link represents /tmp/a, + * vnode_update_identity will fix the parentage so that you can get /foo/b back + * through the v_parent chain (preventing you from getting /tmp/b back). It would * not fix whether or not you should or should not get /tmp/a vs. /foo/b. */ error = VNOP_LOOKUP(dp, &ndp->ni_vp, cnp, ctx); - if ( error ) { + if (error) { lookup_error: if ((error == ENOENT) && (dp->v_mount != NULL) && @@ -1156,7 +1184,7 @@ lookup_error: error = lookup_traverse_union(tdp, &dp, ctx); vnode_put(tdp); if (error) { - dp = NULLVP; + dp = NULLVP; goto bad; } @@ -1165,24 +1193,26 @@ lookup_error: goto unionlookup; } - if (error != EJUSTRETURN) + if (error != EJUSTRETURN) { goto bad; + } - if (ndp->ni_vp != NULLVP) + if (ndp->ni_vp != NULLVP) { panic("leaf should be empty"); + } #if NAMEDRSRCFORK - /* + /* * At this point, error should be EJUSTRETURN. - * - * If CN_WANTSRSRCFORK is set, that implies that the + * + * If CN_WANTSRSRCFORK is set, that implies that the * underlying filesystem could not find the "parent" of the - * resource fork (the data fork), and we are doing a lookup + * resource fork (the data fork), and we are doing a lookup * for a CREATE event. * * However, this should be converted to an error, as the * failure to find this parent should disallow further - * progress to try and acquire a resource fork vnode. + * progress to try and acquire a resource fork vnode. */ if (cnp->cn_flags & CN_WANTSRSRCFORK) { error = ENOENT; @@ -1191,32 +1221,35 @@ lookup_error: #endif error = lookup_validate_creation_path(ndp); - if (error) + if (error) { goto bad; + } /* * We return with ni_vp NULL to indicate that the entry * doesn't currently exist, leaving a pointer to the * referenced directory vnode in ndp->ni_dvp. */ if (cnp->cn_flags & SAVESTART) { - if ( (vnode_get(ndp->ni_dvp)) ) { + if ((vnode_get(ndp->ni_dvp))) { error = ENOENT; goto bad; } ndp->ni_startdir = ndp->ni_dvp; } - if (!wantparent) - vnode_put(ndp->ni_dvp); + if (!wantparent) { + vnode_put(ndp->ni_dvp); + } - if (kdebug_enable) - kdebug_lookup(ndp->ni_dvp, cnp); - return (0); + if (kdebug_enable) { + kdebug_lookup(ndp->ni_dvp, cnp); + } + return 0; } returned_from_lookup_path: /* We'll always have an iocount on ni_vp when this finishes. */ error = lookup_handle_found_vnode(ndp, cnp, rdonly, vbusyflags, &keep_going, nc_generation, wantparent, atroot, ctx); if (error != 0) { - goto bad2; + goto bad2; } if (keep_going) { @@ -1224,12 +1257,12 @@ returned_from_lookup_path: /* namei() will handle symlinks */ if ((dp->v_type == VLNK) && - ((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) { - return 0; + ((cnp->cn_flags & FOLLOW) || (ndp->ni_flag & NAMEI_TRAILINGSLASH) || *ndp->ni_next == '/')) { + return 0; } /* - * Otherwise, there's more path to process. + * Otherwise, there's more path to process. * cache_lookup_path is now responsible for dropping io ref on dp * when it is called again in the dirloop. This ensures we hold * a ref on dp until we complete the next round of lookup. @@ -1239,26 +1272,30 @@ returned_from_lookup_path: goto dirloop; } - return (0); + return 0; bad2: - if (ndp->ni_dvp) + if (ndp->ni_dvp) { vnode_put(ndp->ni_dvp); + } vnode_put(ndp->ni_vp); ndp->ni_vp = NULLVP; - if (kdebug_enable) - kdebug_lookup(dp, cnp); - return (error); + if (kdebug_enable) { + kdebug_lookup(dp, cnp); + } + return error; bad: - if (dp) - vnode_put(dp); + if (dp) { + vnode_put(dp); + } ndp->ni_vp = NULLVP; - if (kdebug_enable) - kdebug_lookup(dp, cnp); - return (error); + if (kdebug_enable) { + kdebug_lookup(dp, cnp); + } + return error; } /* @@ -1279,8 +1316,9 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) if (vp && vp->v_flag & VROOT) { *new_dvp = vp->v_mount->mnt_vnodecovered; - if (vnode_getwithref(*new_dvp)) + if (vnode_getwithref(*new_dvp)) { return ENOENT; + } return 0; } @@ -1306,17 +1344,19 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) goto done; } len = strlen(name); - if ((len + 1) > (pp - path)) { // Enough space for this name ? + if ((len + 1) > (pp - path)) { // Enough space for this name ? error = ENAMETOOLONG; vnode_putname(name); goto done; } - for (np = name + len; len > 0; len--) // Copy name backwards + for (np = name + len; len > 0; len--) { // Copy name backwards *--pp = *--np; + } vnode_putname(name); vp = vp->v_parent; - if (vp == NULLVP || vp->v_flag & VROOT) + if (vp == NULLVP || vp->v_flag & VROOT) { break; + } *--pp = '/'; } @@ -1324,16 +1364,18 @@ lookup_traverse_union(vnode_t dvp, vnode_t *new_dvp, vfs_context_t ctx) NDINIT(&nd, LOOKUP, OP_LOOKUP, USEDVP, UIO_SYSSPACE, CAST_USER_ADDR_T(pp), ctx); nd.ni_dvp = dvp->v_mount->mnt_vnodecovered; error = namei(&nd); - if (error == 0) + if (error == 0) { *new_dvp = nd.ni_vp; + } nameidone(&nd); done: - if (path) + if (path) { kfree(path, MAXPATHLEN); + } return error; } -int +int lookup_validate_creation_path(struct nameidata *ndp) { struct componentname *cnp = &ndp->ni_cnd; @@ -1348,7 +1390,7 @@ lookup_validate_creation_path(struct nameidata *ndp) if ((cnp->cn_flags & ISLASTCN) && (ndp->ni_flag & NAMEI_TRAILINGSLASH) && !(cnp->cn_flags & WILLBEDIR)) { return ENOENT; } - + return 0; } @@ -1356,23 +1398,24 @@ lookup_validate_creation_path(struct nameidata *ndp) * Modifies only ni_vp. Always returns with ni_vp still valid (iocount held). */ static int -lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, - int vbusyflags, vfs_context_t ctx) +lookup_traverse_mountpoints(struct nameidata *ndp, struct componentname *cnp, vnode_t dp, + int vbusyflags, vfs_context_t ctx) { mount_t mp; vnode_t tdp; int error = 0; uint32_t depth = 0; - vnode_t mounted_on_dp; + vnode_t mounted_on_dp; int current_mount_generation = 0; #if CONFIG_TRIGGERS vnode_t triggered_dp = NULLVP; int retry_cnt = 0; #define MAX_TRIGGER_RETRIES 1 #endif - - if (dp->v_type != VDIR || cnp->cn_flags & NOCROSSMOUNT) + + if (dp->v_type != VDIR || cnp->cn_flags & NOCROSSMOUNT) { return 0; + } mounted_on_dp = dp; #if CONFIG_TRIGGERS @@ -1392,7 +1435,7 @@ restart: if (ISSET(mp->mnt_lflag, MNT_LFORCE)) { mount_dropcrossref(mp, dp, 0); - break; // don't traverse into a forced unmount + break; // don't traverse into a forced unmount } @@ -1438,22 +1481,24 @@ restart: */ if (dp->v_resolve && retry_cnt < MAX_TRIGGER_RETRIES) { error = vnode_trigger_resolve(dp, ndp, ctx); - if (error) + if (error) { goto out; - if (dp == triggered_dp) + } + if (dp == triggered_dp) { retry_cnt += 1; - else + } else { retry_cnt = 0; + } triggered_dp = dp; goto restart; } #endif /* CONFIG_TRIGGERS */ if (depth) { - mp = mounted_on_dp->v_mountedhere; + mp = mounted_on_dp->v_mountedhere; if (mp) { - mount_lock_spin(mp); + mount_lock_spin(mp); mp->mnt_realrootvp_vid = dp->v_id; mp->mnt_realrootvp = dp; mp->mnt_generation = current_mount_generation; @@ -1471,11 +1516,11 @@ out: * Takes ni_vp and ni_dvp non-NULL. Returns with *new_dp set to the location * at which to start a lookup with a resolved path, and all other iocounts dropped. */ -static int +static int lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) { int error; - char *cp; /* pointer into pathname argument */ + char *cp; /* pointer into pathname argument */ uio_t auio; union { union { @@ -1483,7 +1528,7 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) struct kern_iovec s_kiovec; } u_iovec; struct uio s_uio; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; } u_uio_buf; /* union only for aligning uio_buf correctly */ int need_newpathbuf; u_int linklen; @@ -1495,13 +1540,15 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) return ELOOP; } #if CONFIG_MACF - if ((error = mac_vnode_check_readlink(ctx, ndp->ni_vp)) != 0) + if ((error = mac_vnode_check_readlink(ctx, ndp->ni_vp)) != 0) { return error; + } #endif /* MAC */ - if (ndp->ni_pathlen > 1 || !(cnp->cn_flags & HASBUF)) + if (ndp->ni_pathlen > 1 || !(cnp->cn_flags & HASBUF)) { need_newpathbuf = 1; - else + } else { need_newpathbuf = 0; + } if (need_newpathbuf) { MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); @@ -1518,20 +1565,22 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) error = VNOP_READLINK(ndp->ni_vp, auio, ctx); if (error) { - if (need_newpathbuf) + if (need_newpathbuf) { FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + } return error; } - /* + /* * Safe to set unsigned with a [larger] signed type here * because 0 <= uio_resid <= MAXPATHLEN and MAXPATHLEN * is only 1024. */ linklen = MAXPATHLEN - (u_int)uio_resid(auio); if (linklen + ndp->ni_pathlen > MAXPATHLEN) { - if (need_newpathbuf) + if (need_newpathbuf) { FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); + } return ENAMETOOLONG; } @@ -1543,12 +1592,14 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) cnp->cn_pnbuf = cp; cnp->cn_pnlen = MAXPATHLEN; - if ( (cnp->cn_flags & HASBUF) ) + if ((cnp->cn_flags & HASBUF)) { FREE_ZONE(tmppn, len, M_NAMEI); - else + } else { cnp->cn_flags |= HASBUF; - } else + } + } else { cnp->cn_pnbuf[linklen] = '\0'; + } ndp->ni_pathlen += linklen; cnp->cn_nameptr = cnp->cn_pnbuf; @@ -1563,7 +1614,7 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) * get rid of references returned via 'lookup' */ vnode_put(ndp->ni_vp); - vnode_put(ndp->ni_dvp); /* ALWAYS have a dvp for a symlink */ + vnode_put(ndp->ni_dvp); /* ALWAYS have a dvp for a symlink */ ndp->ni_vp = NULLVP; ndp->ni_dvp = NULLVP; @@ -1593,27 +1644,28 @@ lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) int relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) { - struct vnode *dp = NULL; /* the directory we are searching */ - int wantparent; /* 1 => wantparent or lockparent flag */ - int rdonly; /* lookup read-only flag bit */ + struct vnode *dp = NULL; /* the directory we are searching */ + int wantparent; /* 1 => wantparent or lockparent flag */ + int rdonly; /* lookup read-only flag bit */ int error = 0; #ifdef NAMEI_DIAGNOSTIC - int i, newhash; /* DEBUG: check name hash */ - char *cp; /* DEBUG: check name ptr/len */ + int i, newhash; /* DEBUG: check name hash */ + char *cp; /* DEBUG: check name ptr/len */ #endif vfs_context_t ctx = cnp->cn_context;; /* * Setup: break out flag bits into variables. */ - wantparent = cnp->cn_flags & (LOCKPARENT|WANTPARENT); + wantparent = cnp->cn_flags & (LOCKPARENT | WANTPARENT); rdonly = cnp->cn_flags & RDONLY; cnp->cn_flags &= ~ISSYMLINK; - if (cnp->cn_flags & NOCACHE) - cnp->cn_flags &= ~MAKEENTRY; - else - cnp->cn_flags |= MAKEENTRY; + if (cnp->cn_flags & NOCACHE) { + cnp->cn_flags &= ~MAKEENTRY; + } else { + cnp->cn_flags |= MAKEENTRY; + } dp = dvp; @@ -1631,25 +1683,28 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) error = ENOTDIR; goto bad; } - if ( (vnode_get(dp)) ) { - error = ENOENT; + if ((vnode_get(dp))) { + error = ENOENT; goto bad; } *vpp = dp; - if (cnp->cn_flags & SAVESTART) + if (cnp->cn_flags & SAVESTART) { panic("lookup: SAVESTART"); - return (0); + } + return 0; } /* * We now have a segment name to search for, and a directory to search. */ - if ( (error = VNOP_LOOKUP(dp, vpp, cnp, ctx)) ) { - if (error != EJUSTRETURN) + if ((error = VNOP_LOOKUP(dp, vpp, cnp, ctx))) { + if (error != EJUSTRETURN) { goto bad; + } #if DIAGNOSTIC - if (*vpp != NULL) + if (*vpp != NULL) { panic("leaf should be empty"); + } #endif /* * If creating and at end of pathname, then can consider @@ -1664,7 +1719,7 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) * doesn't currently exist, leaving a pointer to the * (possibly locked) directory inode in ndp->ni_dvp. */ - return (0); + return 0; } dp = *vpp; @@ -1672,8 +1727,9 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) /* * Check for symbolic link */ - if (dp->v_type == VLNK && (cnp->cn_flags & FOLLOW)) - panic ("relookup: symlink found.\n"); + if (dp->v_type == VLNK && (cnp->cn_flags & FOLLOW)) { + panic("relookup: symlink found.\n"); + } #endif /* @@ -1685,15 +1741,15 @@ relookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp) goto bad2; } /* ASSERT(dvp == ndp->ni_startdir) */ - - return (0); + + return 0; bad2: vnode_put(dp); -bad: +bad: *vpp = NULL; - return (error); + return error; } /* @@ -1761,39 +1817,41 @@ kdebug_vfs_lookup(long *dbg_parms, int dbg_namelen, void *dp, uint32_t flags) code = VFS_LOOKUP_DONE | DBG_FUNC_START; } - if (dbg_namelen <= (int)(3 * sizeof(long))) + if (dbg_namelen <= (int)(3 * sizeof(long))) { code |= DBG_FUNC_END; + } if (noprocfilt) { KDBG_RELEASE_NOPROCFILT(code, kdebug_vnode(dp), dbg_parms[0], - dbg_parms[1], dbg_parms[2]); + dbg_parms[1], dbg_parms[2]); } else { KDBG_RELEASE(code, kdebug_vnode(dp), dbg_parms[0], dbg_parms[1], - dbg_parms[2]); + dbg_parms[2]); } code &= ~DBG_FUNC_START; - for (i=3, dbg_namelen -= (3 * sizeof(long)); dbg_namelen > 0; i+=4, dbg_namelen -= (4 * sizeof(long))) { - if (dbg_namelen <= (int)(4 * sizeof(long))) + for (i = 3, dbg_namelen -= (3 * sizeof(long)); dbg_namelen > 0; i += 4, dbg_namelen -= (4 * sizeof(long))) { + if (dbg_namelen <= (int)(4 * sizeof(long))) { code |= DBG_FUNC_END; + } if (noprocfilt) { KDBG_RELEASE_NOPROCFILT(code, dbg_parms[i], dbg_parms[i + 1], - dbg_parms[i + 2], dbg_parms[i + 3]); + dbg_parms[i + 2], dbg_parms[i + 3]); } else { KDBG_RELEASE(code, dbg_parms[i], dbg_parms[i + 1], dbg_parms[i + 2], - dbg_parms[i + 3]); + dbg_parms[i + 3]); } } } void kdebug_lookup_gen_events(long *dbg_parms, int dbg_namelen, void *dp, - boolean_t lookup) + boolean_t lookup) { kdebug_vfs_lookup(dbg_parms, dbg_namelen, dp, - lookup ? KDBG_VFS_LOOKUP_FLAG_LOOKUP : 0); + lookup ? KDBG_VFS_LOOKUP_FLAG_LOOKUP : 0); } void @@ -1807,28 +1865,29 @@ kdebug_lookup(vnode_t dp, struct componentname *cnp) dbg_namelen = (cnp->cn_nameptr - cnp->cn_pnbuf) + cnp->cn_namelen; dbg_nameptr = cnp->cn_nameptr + cnp->cn_namelen; - if (dbg_namelen > (int)sizeof(dbg_parms)) + if (dbg_namelen > (int)sizeof(dbg_parms)) { dbg_namelen = sizeof(dbg_parms); + } dbg_nameptr -= dbg_namelen; - + /* Copy the (possibly truncated) path itself */ memcpy(dbg_parms, dbg_nameptr, dbg_namelen); - + /* Pad with '\0' or '>' */ if (dbg_namelen < (int)sizeof(dbg_parms)) { memset((char *)dbg_parms + dbg_namelen, - *(cnp->cn_nameptr + cnp->cn_namelen) ? '>' : 0, - sizeof(dbg_parms) - dbg_namelen); + *(cnp->cn_nameptr + cnp->cn_namelen) ? '>' : 0, + sizeof(dbg_parms) - dbg_namelen); } kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)dp, - KDBG_VFS_LOOKUP_FLAG_LOOKUP); + KDBG_VFS_LOOKUP_FLAG_LOOKUP); } #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_IST) */ void kdebug_vfs_lookup(long *dbg_parms __unused, int dbg_namelen __unused, - void *dp __unused, __unused uint32_t flags) + void *dp __unused, __unused uint32_t flags) { } @@ -1843,7 +1902,7 @@ vfs_getbyid(fsid_t *fsid, ino64_t ino, vnode_t *vpp, vfs_context_t ctx) { mount_t mp; int error; - + mp = mount_lookupby_volfsid(fsid->val[0], 1); if (mp == NULL) { return EINVAL; @@ -1891,7 +1950,7 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ /* Get file system id and move str to next component. */ id = strtoul(path, &str, 10); if (id == 0 || str[0] != '/') { - return (EINVAL); + return EINVAL; } while (*str == '/') { str++; @@ -1900,7 +1959,7 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ mp = mount_lookupby_volfsid(id, 1); if (mp == NULL) { - return (EINVAL); /* unexpected failure */ + return EINVAL; /* unexpected failure */ } /* Check for an alias to a file system root. */ if (ch == '@' && str[1] == '\0') { @@ -1908,7 +1967,7 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ str++; } else { /* Get file id and move str to next component. */ - ino = strtouq(str, &str, 10); + ino = strtouq(str, &str, 10); } /* Get the target vnode. */ @@ -1929,12 +1988,12 @@ vfs_getrealpath(const char * path, char * realpath, size_t bufsize, vfs_context_ if (error == 0 && *str != '\0') { int attempt = strlcat(realpath, str, MAXPATHLEN); - if (attempt > MAXPATHLEN){ + if (attempt > MAXPATHLEN) { error = ENAMETOOLONG; } } out: - return (error); + return error; } #endif @@ -1945,7 +2004,7 @@ lookup_compound_vnop_post_hook(int error, vnode_t dvp, vnode_t vp, struct nameid panic("NULL vp with error == 0.\n"); } - /* + /* * We don't want to do any of this if we didn't use the compound vnop * to perform the lookup... i.e. if we're allowing and using the legacy pattern, * where we did a full lookup. @@ -1954,7 +2013,7 @@ lookup_compound_vnop_post_hook(int error, vnode_t dvp, vnode_t vp, struct nameid return; } - /* + /* * If we're going to continue the lookup, we'll handle * all lookup-related updates at that time. */ @@ -1967,29 +2026,30 @@ lookup_compound_vnop_post_hook(int error, vnode_t dvp, vnode_t vp, struct nameid * neither would happen in the non-compound-vnop case. */ if ((vp != NULLVP) && !did_create) { - /* - * If MAKEENTRY isn't set, and we've done a successful compound VNOP, + /* + * If MAKEENTRY isn't set, and we've done a successful compound VNOP, * then we certainly don't want to update cache or identity. */ if ((error != 0) || (ndp->ni_cnd.cn_flags & MAKEENTRY)) { lookup_consider_update_cache(dvp, vp, &ndp->ni_cnd, ndp->ni_ncgeneration); } - if (ndp->ni_cnd.cn_flags & AUDITVNPATH1) + if (ndp->ni_cnd.cn_flags & AUDITVNPATH1) { AUDIT_ARG(vnpath, vp, ARG_VNODE1); - else if (ndp->ni_cnd.cn_flags & AUDITVNPATH2) + } else if (ndp->ni_cnd.cn_flags & AUDITVNPATH2) { AUDIT_ARG(vnpath, vp, ARG_VNODE2); + } } - /* - * If you created (whether you opened or not), cut a lookup tracepoint + /* + * If you created (whether you opened or not), cut a lookup tracepoint * for the parent dir (as would happen without a compound vnop). Note: we may need * a vnode despite failure in this case! * * If you did not create: - * Found child (succeeded or not): cut a tracepoint for the child. - * Did not find child: cut a tracepoint with the parent. + * Found child (succeeded or not): cut a tracepoint for the child. + * Did not find child: cut a tracepoint with the parent. */ if (kdebug_enable) { - kdebug_lookup(vp ? vp : dvp, &ndp->ni_cnd); + kdebug_lookup(vp ? vp : dvp, &ndp->ni_cnd); } } diff --git a/bsd/vfs/vfs_quota.c b/bsd/vfs/vfs_quota.c index 2546e809a..63c4afcb5 100644 --- a/bsd/vfs/vfs_quota.c +++ b/bsd/vfs/vfs_quota.c @@ -2,7 +2,7 @@ * Copyright (c) 2002-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -79,15 +79,15 @@ /* vars for quota file lock */ -lck_grp_t * qf_lck_grp; -lck_grp_attr_t * qf_lck_grp_attr; -lck_attr_t * qf_lck_attr; +lck_grp_t * qf_lck_grp; +lck_grp_attr_t * qf_lck_grp_attr; +lck_attr_t * qf_lck_attr; /* vars for quota list lock */ -lck_grp_t * quota_list_lck_grp; -lck_grp_attr_t * quota_list_lck_grp_attr; -lck_attr_t * quota_list_lck_attr; -lck_mtx_t * quota_list_mtx_lock; +lck_grp_t * quota_list_lck_grp; +lck_grp_attr_t * quota_list_lck_grp_attr; +lck_attr_t * quota_list_lck_attr; +lck_mtx_t * quota_list_mtx_lock; /* Routines to lock and unlock the quota global data */ static int dq_list_lock(void); @@ -104,10 +104,10 @@ static u_int32_t quotamagic[MAXQUOTAS] = INITQMAGICS; */ #define DQHASH(dqvp, id) \ (&dqhashtbl[((((intptr_t)(dqvp)) >> 8) + id) & dqhash]) -LIST_HEAD(dqhash, dquot) *dqhashtbl; +LIST_HEAD(dqhash, dquot) * dqhashtbl; u_long dqhash; -#define DQUOTINC 5 /* minimum free dquots desired */ +#define DQUOTINC 5 /* minimum free dquots desired */ long numdquot, desireddquot = DQUOTINC; /* @@ -120,7 +120,7 @@ TAILQ_HEAD(dqfreelist, dquot) dqfreelist; TAILQ_HEAD(dqdirtylist, dquot) dqdirtylist; -static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *); +static int dqlookup(struct quotafile *, u_int32_t, struct dqblk *, u_int32_t *); static int dqsync_locked(struct dquot *dq); static void qf_lock(struct quotafile *); @@ -138,9 +138,9 @@ dqinit(void) /* * Allocate quota list lock group attribute and group */ - quota_list_lck_grp_attr= lck_grp_attr_alloc_init(); - quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr); - + quota_list_lck_grp_attr = lck_grp_attr_alloc_init(); + quota_list_lck_grp = lck_grp_alloc_init("quota list", quota_list_lck_grp_attr); + /* * Allocate qouta list lock attribute */ @@ -155,7 +155,7 @@ dqinit(void) /* * allocate quota file lock group attribute and group */ - qf_lck_grp_attr= lck_grp_attr_alloc_init(); + qf_lck_grp_attr = lck_grp_attr_alloc_init(); qf_lck_grp = lck_grp_alloc_init("quota file", qf_lck_grp_attr); /* @@ -170,7 +170,7 @@ dqinit(void) int dqisinitialized(void) { - return (dqhashtbl != NULL); + return dqhashtbl != NULL; } /* @@ -180,8 +180,9 @@ void dqhashinit(void) { dq_list_lock(); - if (dqisinitialized()) + if (dqisinitialized()) { goto out; + } TAILQ_INIT(&dqfreelist); TAILQ_INIT(&dqdirtylist); @@ -201,12 +202,14 @@ dq_list_lock(void) } static int -dq_list_lock_changed(int oldval) { - return (dq_list_lock_cnt != oldval); +dq_list_lock_changed(int oldval) +{ + return dq_list_lock_cnt != oldval; } static int -dq_list_lock_val(void) { +dq_list_lock_val(void) +{ return dq_list_lock_cnt; } @@ -223,9 +226,9 @@ dq_list_unlock(void) void dq_lock_internal(struct dquot *dq) { - while (dq->dq_lflags & DQ_LLOCK) { - dq->dq_lflags |= DQ_LWANT; - msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", NULL); + while (dq->dq_lflags & DQ_LLOCK) { + dq->dq_lflags |= DQ_LWANT; + msleep(&dq->dq_lflags, quota_list_mtx_lock, PVFS, "dq_lock_internal", NULL); } dq->dq_lflags |= DQ_LLOCK; } @@ -236,17 +239,18 @@ dq_lock_internal(struct dquot *dq) void dq_unlock_internal(struct dquot *dq) { - int wanted = dq->dq_lflags & DQ_LWANT; + int wanted = dq->dq_lflags & DQ_LWANT; dq->dq_lflags &= ~(DQ_LLOCK | DQ_LWANT); - if (wanted) - wakeup(&dq->dq_lflags); + if (wanted) { + wakeup(&dq->dq_lflags); + } } void -dqlock(struct dquot *dq) { - +dqlock(struct dquot *dq) +{ lck_mtx_lock(quota_list_mtx_lock); dq_lock_internal(dq); @@ -255,8 +259,8 @@ dqlock(struct dquot *dq) { } void -dqunlock(struct dquot *dq) { - +dqunlock(struct dquot *dq) +{ lck_mtx_lock(quota_list_mtx_lock); dq_unlock_internal(dq); @@ -269,66 +273,65 @@ dqunlock(struct dquot *dq) { int qf_get(struct quotafile *qfp, int type) { - int error = 0; + int error = 0; - dq_list_lock(); - - switch (type) { + dq_list_lock(); + switch (type) { case QTF_OPENING: - while ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) ) { - if ( (qfp->qf_qflags & QTF_OPENING) ) { - error = EBUSY; + while ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING))) { + if ((qfp->qf_qflags & QTF_OPENING)) { + error = EBUSY; break; } - if ( (qfp->qf_qflags & QTF_CLOSING) ) { - qfp->qf_qflags |= QTF_WANTED; + if ((qfp->qf_qflags & QTF_CLOSING)) { + qfp->qf_qflags |= QTF_WANTED; msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL); } } - if (qfp->qf_vp != NULLVP) - error = EBUSY; - if (error == 0) - qfp->qf_qflags |= QTF_OPENING; + if (qfp->qf_vp != NULLVP) { + error = EBUSY; + } + if (error == 0) { + qfp->qf_qflags |= QTF_OPENING; + } break; case QTF_CLOSING: - if ( (qfp->qf_qflags & QTF_CLOSING) ) { - error = EBUSY; + if ((qfp->qf_qflags & QTF_CLOSING)) { + error = EBUSY; break; } qfp->qf_qflags |= QTF_CLOSING; - while ( (qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt ) { - qfp->qf_qflags |= QTF_WANTED; + while ((qfp->qf_qflags & QTF_OPENING) || qfp->qf_refcnt) { + qfp->qf_qflags |= QTF_WANTED; msleep(&qfp->qf_qflags, quota_list_mtx_lock, PVFS, "qf_get", NULL); } if (qfp->qf_vp == NULLVP) { - qfp->qf_qflags &= ~QTF_CLOSING; + qfp->qf_qflags &= ~QTF_CLOSING; error = EBUSY; } break; } dq_list_unlock(); - return (error); + return error; } void qf_put(struct quotafile *qfp, int type) { + dq_list_lock(); - dq_list_lock(); - - switch (type) { - + switch (type) { case QTF_OPENING: case QTF_CLOSING: - qfp->qf_qflags &= ~type; + qfp->qf_qflags &= ~type; break; } - if ( (qfp->qf_qflags & QTF_WANTED) ) { - qfp->qf_qflags &= ~QTF_WANTED; + if ((qfp->qf_qflags & QTF_WANTED)) { + qfp->qf_qflags &= ~QTF_WANTED; wakeup(&qfp->qf_qflags); } dq_list_unlock(); @@ -365,18 +368,19 @@ qf_unlock(struct quotafile *qfp) static int qf_ref(struct quotafile *qfp) { - int error = 0; + int error = 0; - if ( (qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP) ) - error = EINVAL; - else - qfp->qf_refcnt++; + if ((qfp->qf_qflags & (QTF_OPENING | QTF_CLOSING)) || (qfp->qf_vp == NULLVP)) { + error = EINVAL; + } else { + qfp->qf_refcnt++; + } - return (error); + return error; } /* - * drop our reference and wakeup any waiters if + * drop our reference and wakeup any waiters if * we were the last one holding a ref * * quota list lock must be held on entry @@ -386,9 +390,9 @@ qf_rele(struct quotafile *qfp) { qfp->qf_refcnt--; - if ( (qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) { - qfp->qf_qflags &= ~QTF_WANTED; - wakeup(&qfp->qf_qflags); + if ((qfp->qf_qflags & QTF_WANTED) && qfp->qf_refcnt == 0) { + qfp->qf_qflags &= ~QTF_WANTED; + wakeup(&qfp->qf_qflags); } } @@ -396,7 +400,7 @@ qf_rele(struct quotafile *qfp) void dqfileinit(struct quotafile *qfp) { - qfp->qf_vp = NULLVP; + qfp->qf_vp = NULLVP; qfp->qf_qflags = 0; lck_mtx_init(&qfp->qf_lock, qf_lck_grp, qf_lck_attr); @@ -416,23 +420,24 @@ dqfileopen(struct quotafile *qfp, int type) off_t file_size; uio_t auio; int error = 0; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; context.vc_thread = current_thread(); context.vc_ucred = qfp->qf_cred; - + /* Obtain the file size */ - if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0) - goto out; + if ((error = vnode_size(qfp->qf_vp, &file_size, &context)) != 0) { + goto out; + } /* Read the file header */ - auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header)); + auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, + &uio_buf[0], sizeof(uio_buf)); + uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header)); error = VNOP_READ(qfp->qf_vp, auio, 0, &context); - if (error) - goto out; - else if (uio_resid(auio)) { + if (error) { + goto out; + } else if (uio_resid(auio)) { error = EINVAL; goto out; } @@ -445,21 +450,23 @@ dqfileopen(struct quotafile *qfp, int type) goto out; } /* Set up the time limits for this quota. */ - if (header.dqh_btime != 0) + if (header.dqh_btime != 0) { qfp->qf_btime = OSSwapBigToHostInt32(header.dqh_btime); - else + } else { qfp->qf_btime = MAX_DQ_TIME; - if (header.dqh_itime != 0) + } + if (header.dqh_itime != 0) { qfp->qf_itime = OSSwapBigToHostInt32(header.dqh_itime); - else + } else { qfp->qf_itime = MAX_IQ_TIME; + } /* Calculate the hash table constants. */ qfp->qf_maxentries = OSSwapBigToHostInt32(header.dqh_maxentries); qfp->qf_entrycnt = OSSwapBigToHostInt32(header.dqh_entrycnt); qfp->qf_shift = dqhashshift(qfp->qf_maxentries); out: - return (error); + return error; } /* @@ -471,19 +478,19 @@ dqfileclose(struct quotafile *qfp, __unused int type) struct dqfilehdr header; struct vfs_context context; uio_t auio; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header)); + auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, + &uio_buf[0], sizeof(uio_buf)); + uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header)); context.vc_thread = current_thread(); context.vc_ucred = qfp->qf_cred; - + if (VNOP_READ(qfp->qf_vp, auio, 0, &context) == 0) { header.dqh_entrycnt = OSSwapHostToBigInt32(qfp->qf_entrycnt); uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE); - uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof (header)); + uio_addiov(auio, CAST_USER_ADDR_T(&header), sizeof(header)); (void) VNOP_WRITE(qfp->qf_vp, auio, 0, &context); } } @@ -506,27 +513,27 @@ dqget(u_int32_t id, struct quotafile *qfp, int type, struct dquot **dqp) if (!dqisinitialized()) { *dqp = NODQUOT; - return (EINVAL); + return EINVAL; } - if ( id == 0 || qfp->qf_vp == NULLVP ) { + if (id == 0 || qfp->qf_vp == NULLVP) { *dqp = NODQUOT; - return (EINVAL); + return EINVAL; } dq_list_lock(); - if ( (qf_ref(qfp)) ) { - dq_list_unlock(); + if ((qf_ref(qfp))) { + dq_list_unlock(); *dqp = NODQUOT; - return (EINVAL); + return EINVAL; } - if ( (dqvp = qfp->qf_vp) == NULLVP ) { - qf_rele(qfp); + if ((dqvp = qfp->qf_vp) == NULLVP) { + qf_rele(qfp); dq_list_unlock(); *dqp = NODQUOT; - return (EINVAL); + return EINVAL; } dqh = DQHASH(dqvp, id); @@ -538,8 +545,9 @@ relookup: */ for (dq = dqh->lh_first; dq; dq = dq->dq_hash.le_next) { if (dq->dq_id != id || - dq->dq_qfile->qf_vp != dqvp) + dq->dq_qfile->qf_vp != dqvp) { continue; + } dq_lock_internal(dq); if (dq_list_lock_changed(listlockval)) { @@ -553,7 +561,7 @@ relookup: */ if (dq->dq_id != id || dq->dq_qfile == NULL || dq->dq_qfile->qf_vp != dqvp) { - dq_unlock_internal(dq); + dq_unlock_internal(dq); goto relookup; } /* @@ -561,63 +569,65 @@ relookup: * the structure off the free list. */ if (dq->dq_cnt++ == 0) { - if (dq->dq_flags & DQ_MOD) + if (dq->dq_flags & DQ_MOD) { TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist); - else + } else { TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); + } } dq_unlock_internal(dq); if (fdq != NULL) { - /* + /* * we grabbed this from the free list in the first pass * but we found the dq we were looking for in * the cache the 2nd time through * so stick it back on the free list and return the cached entry */ - TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist); + TAILQ_INSERT_HEAD(&dqfreelist, fdq, dq_freelist); } qf_rele(qfp); - dq_list_unlock(); - + dq_list_unlock(); + if (ndq != NULL) { - /* + /* * we allocated this in the first pass * but we found the dq we were looking for in * the cache the 2nd time through so free it */ - _FREE(ndq, M_DQUOT); + _FREE(ndq, M_DQUOT); } *dqp = dq; - return (0); + return 0; } /* * Not in cache, allocate a new one. */ if (TAILQ_EMPTY(&dqfreelist) && - numdquot < MAXQUOTAS * desiredvnodes) + numdquot < MAXQUOTAS * desiredvnodes) { desireddquot += DQUOTINC; + } if (fdq != NULL) { - /* + /* * we captured this from the free list * in the first pass through, so go * ahead and use it */ - dq = fdq; + dq = fdq; fdq = NULL; } else if (numdquot < desireddquot) { - if (ndq == NULL) { - /* + if (ndq == NULL) { + /* * drop the quota list lock since MALLOC may block */ - dq_list_unlock(); + dq_list_unlock(); ndq = (struct dquot *)_MALLOC(sizeof *dq, M_DQUOT, M_WAITOK); bzero((char *)ndq, sizeof *dq); - listlockval = dq_list_lock(); + listlockval = dq_list_lock(); /* * need to look for the entry again in the cache * since we dropped the quota list lock and @@ -625,44 +635,44 @@ relookup: */ goto relookup; } else { - /* + /* * we allocated this in the first pass through * and we're still under out target, so go * ahead and use it */ - dq = ndq; + dq = ndq; ndq = NULL; numdquot++; } } else { - if (TAILQ_EMPTY(&dqfreelist)) { - qf_rele(qfp); - dq_list_unlock(); + if (TAILQ_EMPTY(&dqfreelist)) { + qf_rele(qfp); + dq_list_unlock(); if (ndq) { - /* + /* * we allocated this in the first pass through * but we're now at the limit of our cache size * so free it */ - _FREE(ndq, M_DQUOT); + _FREE(ndq, M_DQUOT); } tablefull("dquot"); *dqp = NODQUOT; - return (EUSERS); + return EUSERS; } dq = TAILQ_FIRST(&dqfreelist); dq_lock_internal(dq); if (dq_list_lock_changed(listlockval) || dq->dq_cnt || (dq->dq_flags & DQ_MOD)) { - /* + /* * we lost the race while we weren't holding * the quota list lock... dq_lock_internal * will drop it to msleep... this dq has been * reclaimed... go find another */ - dq_unlock_internal(dq); + dq_unlock_internal(dq); /* * need to look for the entry again in the cache @@ -674,7 +684,7 @@ relookup: TAILQ_REMOVE(&dqfreelist, dq, dq_freelist); if (dq->dq_qfile != NULL) { - LIST_REMOVE(dq, dq_hash); + LIST_REMOVE(dq, dq_hash); dq->dq_qfile = NULL; dq->dq_id = 0; } @@ -682,7 +692,7 @@ relookup: /* * because we may have dropped the quota list lock - * in the call to dq_lock_internal, we need to + * in the call to dq_lock_internal, we need to * relookup in the hash in case someone else * caused a dq with this identity to be created... * if we don't find it, we'll use this one @@ -723,12 +733,12 @@ relookup: dq_list_unlock(); if (ndq) { - /* + /* * we allocated this in the first pass through * but we didn't need it, so free it after * we've droped the quota list lock */ - _FREE(ndq, M_DQUOT); + _FREE(ndq, M_DQUOT); } error = dqlookup(qfp, id, &dq->dq_dqb, &dq->dq_index); @@ -738,7 +748,7 @@ relookup: * quota structure and reflect problem to caller. */ if (error) { - dq_list_lock(); + dq_list_lock(); dq->dq_id = 0; dq->dq_qfile = NULL; @@ -746,28 +756,31 @@ relookup: dq_unlock_internal(dq); qf_rele(qfp); - dq_list_unlock(); + dq_list_unlock(); dqrele(dq); *dqp = NODQUOT; - return (error); + return error; } /* * Check for no limit to enforce. * Initialize time values if necessary. */ if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && - dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) + dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) { dq->dq_flags |= DQ_FAKE; + } if (dq->dq_id != 0) { struct timeval tv; microtime(&tv); - if (dq->dq_btime == 0) + if (dq->dq_btime == 0) { dq->dq_btime = tv.tv_sec + qfp->qf_btime; - if (dq->dq_itime == 0) + } + if (dq->dq_itime == 0) { dq->dq_itime = tv.tv_sec + qfp->qf_itime; + } } dq_list_lock(); dq_unlock_internal(dq); @@ -775,7 +788,7 @@ relookup: dq_list_unlock(); *dqp = dq; - return (0); + return 0; } /* @@ -792,7 +805,7 @@ dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index int i, skip, last; u_int32_t mask; int error = 0; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; qf_lock(qfp); @@ -806,12 +819,12 @@ dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index i = dqhash1(id, qfp->qf_shift, mask); skip = dqhash2(id, mask); - for (last = (i + (qfp->qf_maxentries-1) * skip) & mask; - i != last; - i = (i + skip) & mask) { - auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk)); + for (last = (i + (qfp->qf_maxentries - 1) * skip) & mask; + i != last; + i = (i + skip) & mask) { + auio = uio_createwithbuffer(1, dqoffset(i), UIO_SYSSPACE, UIO_READ, + &uio_buf[0], sizeof(uio_buf)); + uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk)); error = VNOP_READ(dqvp, auio, 0, &context); if (error) { printf("dqlookup: error %d looking up id %u at index %d\n", error, id, i); @@ -833,12 +846,14 @@ dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index * Write back to reserve entry for this id */ uio_reset(auio, dqoffset(i), UIO_SYSSPACE, UIO_WRITE); - uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof (struct dqblk)); + uio_addiov(auio, CAST_USER_ADDR_T(dqb), sizeof(struct dqblk)); error = VNOP_WRITE(dqvp, auio, 0, &context); - if (uio_resid(auio) && error == 0) + if (uio_resid(auio) && error == 0) { error = EIO; - if (error == 0) + } + if (error == 0) { ++qfp->qf_entrycnt; + } dqb->dqb_id = id; break; } @@ -860,7 +875,7 @@ dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index *index = i; /* remember index so we don't have to recompute it later */ - return (error); + return error; } @@ -870,9 +885,9 @@ dqlookup(struct quotafile *qfp, u_int32_t id, struct dqblk *dqb, uint32_t *index void dqrele(struct dquot *dq) { - - if (dq == NODQUOT) + if (dq == NODQUOT) { return; + } dqlock(dq); if (dq->dq_cnt > 1) { @@ -881,13 +896,14 @@ dqrele(struct dquot *dq) dqunlock(dq); return; } - if (dq->dq_flags & DQ_MOD) + if (dq->dq_flags & DQ_MOD) { (void) dqsync_locked(dq); + } dq->dq_cnt--; dq_list_lock(); TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist); - dq_unlock_internal(dq); + dq_unlock_internal(dq); dq_list_unlock(); } @@ -897,22 +913,23 @@ dqrele(struct dquot *dq) void dqreclaim(struct dquot *dq) { - - if (dq == NODQUOT) + if (dq == NODQUOT) { return; + } dq_list_lock(); dq_lock_internal(dq); if (--dq->dq_cnt > 0) { - dq_unlock_internal(dq); + dq_unlock_internal(dq); dq_list_unlock(); return; } - if (dq->dq_flags & DQ_MOD) + if (dq->dq_flags & DQ_MOD) { TAILQ_INSERT_TAIL(&dqdirtylist, dq, dq_freelist); - else + } else { TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist); + } dq_unlock_internal(dq); dq_list_unlock(); @@ -925,35 +942,37 @@ void dqsync_orphans(struct quotafile *qfp) { struct dquot *dq; - + dq_list_lock(); - loop: +loop: TAILQ_FOREACH(dq, &dqdirtylist, dq_freelist) { - if (dq->dq_qfile != qfp) - continue; + if (dq->dq_qfile != qfp) { + continue; + } dq_lock_internal(dq); if (dq->dq_qfile != qfp) { - /* + /* * the identity of this dq changed while * the quota_list_lock was dropped * dq_lock_internal can drop it to msleep */ - dq_unlock_internal(dq); + dq_unlock_internal(dq); goto loop; } if ((dq->dq_flags & DQ_MOD) == 0) { - /* + /* * someone cleaned and removed this from * the dq from the dirty list while the * quota_list_lock was dropped */ - dq_unlock_internal(dq); + dq_unlock_internal(dq); goto loop; } - if (dq->dq_cnt != 0) + if (dq->dq_cnt != 0) { panic("dqsync_orphans: dquot in use"); + } TAILQ_REMOVE(&dqdirtylist, dq, dq_freelist); @@ -965,7 +984,7 @@ dqsync_orphans(struct quotafile *qfp) * to pick up another one since we hold dqlock */ (void) dqsync_locked(dq); - + dq_list_lock(); TAILQ_INSERT_TAIL(&dqfreelist, dq, dq_freelist); @@ -979,17 +998,18 @@ dqsync_orphans(struct quotafile *qfp) int dqsync(struct dquot *dq) { - int error = 0; + int error = 0; if (dq != NODQUOT) { - dqlock(dq); + dqlock(dq); - if ( (dq->dq_flags & DQ_MOD) ) - error = dqsync_locked(dq); + if ((dq->dq_flags & DQ_MOD)) { + error = dqsync_locked(dq); + } dqunlock(dq); } - return (error); + return error; } @@ -1004,22 +1024,24 @@ dqsync_locked(struct dquot *dq) struct dqblk dqb, *dqblkp; uio_t auio; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; if (dq->dq_id == 0) { - dq->dq_flags &= ~DQ_MOD; - return (0); + dq->dq_flags &= ~DQ_MOD; + return 0; } - if (dq->dq_qfile == NULL) + if (dq->dq_qfile == NULL) { panic("dqsync: NULL dq_qfile"); - if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP) + } + if ((dqvp = dq->dq_qfile->qf_vp) == NULLVP) { panic("dqsync: NULL qf_vp"); + } - auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE, - UIO_WRITE, &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, CAST_USER_ADDR_T(&dqb), sizeof (struct dqblk)); + auio = uio_createwithbuffer(1, dqoffset(dq->dq_index), UIO_SYSSPACE, + UIO_WRITE, &uio_buf[0], sizeof(uio_buf)); + uio_addiov(auio, CAST_USER_ADDR_T(&dqb), sizeof(struct dqblk)); - context.vc_thread = current_thread(); /* XXX */ + context.vc_thread = current_thread(); /* XXX */ context.vc_ucred = dq->dq_qfile->qf_cred; dqblkp = &dq->dq_dqb; @@ -1038,11 +1060,12 @@ dqsync_locked(struct dquot *dq) dqb.dqb_spare[3] = 0; error = VNOP_WRITE(dqvp, auio, 0, &context); - if (uio_resid(auio) && error == 0) + if (uio_resid(auio) && error == 0) { error = EIO; + } dq->dq_flags &= ~DQ_MOD; - return (error); + return error; } /* @@ -1054,8 +1077,9 @@ dqflush(struct vnode *vp) struct dquot *dq, *nextdq; struct dqhash *dqh; - if (!dqisinitialized()) + if (!dqisinitialized()) { return; + } /* * Move all dquot's that used to refer to this quota @@ -1067,10 +1091,12 @@ dqflush(struct vnode *vp) for (dqh = &dqhashtbl[dqhash]; dqh >= dqhashtbl; dqh--) { for (dq = dqh->lh_first; dq; dq = nextdq) { nextdq = dq->dq_hash.le_next; - if (dq->dq_qfile->qf_vp != vp) + if (dq->dq_qfile->qf_vp != vp) { continue; - if (dq->dq_cnt) + } + if (dq->dq_cnt) { panic("dqflush: stray dquot"); + } LIST_REMOVE(dq, dq_hash); dq->dq_qfile = NULL; } @@ -1080,10 +1106,10 @@ dqflush(struct vnode *vp) /* * LP64 support for munging dqblk structure. - * XXX conversion of user_time_t to time_t loses precision; not an issue for + * XXX conversion of user_time_t to time_t loses precision; not an issue for * XXX us now, since we are only ever setting 32 bits worth of time into it. */ -__private_extern__ void +__private_extern__ void munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64) { if (to64) { @@ -1092,13 +1118,11 @@ munge_dqblk(struct dqblk *dqblkp, struct user_dqblk *user_dqblkp, boolean_t to64 user_dqblkp->dqb_id = dqblkp->dqb_id; user_dqblkp->dqb_itime = dqblkp->dqb_itime; user_dqblkp->dqb_btime = dqblkp->dqb_btime; - } - else { - + } else { /* munge user (64 bit) dqblk into kernel (32 bit) dqblk */ bcopy((caddr_t)user_dqblkp, (caddr_t)dqblkp, offsetof(struct dqblk, dqb_btime)); dqblkp->dqb_id = user_dqblkp->dqb_id; - dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */ - dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */ + dqblkp->dqb_itime = user_dqblkp->dqb_itime; /* XXX - lose precision */ + dqblkp->dqb_btime = user_dqblkp->dqb_btime; /* XXX - lose precision */ } } diff --git a/bsd/vfs/vfs_subr.c b/bsd/vfs/vfs_subr.c index e1d18c7c3..e32310c8e 100644 --- a/bsd/vfs/vfs_subr.c +++ b/bsd/vfs/vfs_subr.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -123,9 +123,9 @@ #include #include -#include /* kalloc()/kfree() */ -#include /* delay_for_interval() */ -#include /* OSAddAtomic() */ +#include /* kalloc()/kfree() */ +#include /* delay_for_interval() */ +#include /* OSAddAtomic() */ #if !CONFIG_EMBEDDED #include #endif @@ -134,7 +134,7 @@ #include #endif -#include /* vnode_pager_vrele() */ +#include /* vnode_pager_vrele() */ #if CONFIG_MACF #include @@ -157,7 +157,7 @@ enum vtype iftovt_tab[16] = { VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON, VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD, }; -int vttoif_tab[9] = { +int vttoif_tab[9] = { 0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK, S_IFSOCK, S_IFIFO, S_IFMT, }; @@ -171,7 +171,7 @@ extern void memory_object_mark_unused( memory_object_control_t control, boolean_t rage); -extern void memory_object_mark_io_tracking( +extern void memory_object_mark_io_tracking( memory_object_control_t control); /* XXX next protptype should be from */ @@ -181,10 +181,10 @@ extern int paniclog_append_noflush(const char *format, ...); /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */ __private_extern__ void qsort( - void * array, - size_t nmembers, - size_t member_size, - int (*)(const void *, const void *)); + void * array, + size_t nmembers, + size_t member_size, + int (*)(const void *, const void *)); __private_extern__ void vntblinit(void); __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t, @@ -203,7 +203,7 @@ static void vgone(vnode_t, int flags); static void vclean(vnode_t vp, int flag); static void vnode_reclaim_internal(vnode_t, int, int, int); -static void vnode_dropiocount (vnode_t); +static void vnode_dropiocount(vnode_t); static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev); static int vnode_reload(vnode_t); @@ -221,10 +221,10 @@ static int vnode_iterate_reloadq(mount_t); static void vnode_iterate_clear(mount_t); static mount_t vfs_getvfs_locked(fsid_t *); static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, - struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx); + struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx); static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx); -errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *); +errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *); #ifdef JOE_DEBUG static void record_vp(vnode_t vp, int count); @@ -241,81 +241,81 @@ static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, static void vnode_resolver_detach(vnode_t); #endif -TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ -TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */ +TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */ +TAILQ_HEAD(deadlst, vnode) vnode_dead_list; /* vnode dead list */ TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list; -TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */ +TAILQ_HEAD(ragelst, vnode) vnode_rage_list; /* vnode rapid age list */ struct timeval rage_tv; -int rage_limit = 0; -int ragevnodes = 0; +int rage_limit = 0; +int ragevnodes = 0; -#define RAGE_LIMIT_MIN 100 -#define RAGE_TIME_LIMIT 5 +#define RAGE_LIMIT_MIN 100 +#define RAGE_TIME_LIMIT 5 -struct mntlist mountlist; /* mounted filesystem list */ +struct mntlist mountlist; /* mounted filesystem list */ static int nummounts = 0; #if DIAGNOSTIC -#define VLISTCHECK(fun, vp, list) \ +#define VLISTCHECK(fun, vp, list) \ if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \ - panic("%s: %s vnode not on %slist", (fun), (list), (list)); + panic("%s: %s vnode not on %slist", (fun), (list), (list)); #else #define VLISTCHECK(fun, vp, list) #endif /* DIAGNOSTIC */ -#define VLISTNONE(vp) \ - do { \ - (vp)->v_freelist.tqe_next = (struct vnode *)0; \ - (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \ +#define VLISTNONE(vp) \ + do { \ + (vp)->v_freelist.tqe_next = (struct vnode *)0; \ + (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb; \ } while(0) -#define VONLIST(vp) \ +#define VONLIST(vp) \ ((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb) /* remove a vnode from free vnode list */ -#define VREMFREE(fun, vp) \ - do { \ - VLISTCHECK((fun), (vp), "free"); \ - TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \ - VLISTNONE((vp)); \ - freevnodes--; \ +#define VREMFREE(fun, vp) \ + do { \ + VLISTCHECK((fun), (vp), "free"); \ + TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist); \ + VLISTNONE((vp)); \ + freevnodes--; \ } while(0) /* remove a vnode from dead vnode list */ -#define VREMDEAD(fun, vp) \ - do { \ - VLISTCHECK((fun), (vp), "dead"); \ - TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \ - VLISTNONE((vp)); \ - vp->v_listflag &= ~VLIST_DEAD; \ - deadvnodes--; \ +#define VREMDEAD(fun, vp) \ + do { \ + VLISTCHECK((fun), (vp), "dead"); \ + TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist); \ + VLISTNONE((vp)); \ + vp->v_listflag &= ~VLIST_DEAD; \ + deadvnodes--; \ } while(0) /* remove a vnode from async work vnode list */ -#define VREMASYNC_WORK(fun, vp) \ - do { \ - VLISTCHECK((fun), (vp), "async_work"); \ - TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \ - VLISTNONE((vp)); \ - vp->v_listflag &= ~VLIST_ASYNC_WORK; \ - async_work_vnodes--; \ +#define VREMASYNC_WORK(fun, vp) \ + do { \ + VLISTCHECK((fun), (vp), "async_work"); \ + TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \ + VLISTNONE((vp)); \ + vp->v_listflag &= ~VLIST_ASYNC_WORK; \ + async_work_vnodes--; \ } while(0) /* remove a vnode from rage vnode list */ -#define VREMRAGE(fun, vp) \ - do { \ - if ( !(vp->v_listflag & VLIST_RAGE)) \ - panic("VREMRAGE: vp not on rage list"); \ - VLISTCHECK((fun), (vp), "rage"); \ - TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \ - VLISTNONE((vp)); \ - vp->v_listflag &= ~VLIST_RAGE; \ - ragevnodes--; \ +#define VREMRAGE(fun, vp) \ + do { \ + if ( !(vp->v_listflag & VLIST_RAGE)) \ + panic("VREMRAGE: vp not on rage list"); \ + VLISTCHECK((fun), (vp), "rage"); \ + TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist); \ + VLISTNONE((vp)); \ + vp->v_listflag &= ~VLIST_RAGE; \ + ragevnodes--; \ } while(0) static void async_work_continue(void); @@ -326,7 +326,7 @@ static void async_work_continue(void); __private_extern__ void vntblinit(void) { - thread_t thread = THREAD_NULL; + thread_t thread = THREAD_NULL; TAILQ_INIT(&vnode_free_list); TAILQ_INIT(&vnode_rage_list); @@ -337,9 +337,10 @@ vntblinit(void) microuptime(&rage_tv); rage_limit = desiredvnodes / 100; - if (rage_limit < RAGE_LIMIT_MIN) - rage_limit = RAGE_LIMIT_MIN; - + if (rage_limit < RAGE_LIMIT_MIN) { + rage_limit = RAGE_LIMIT_MIN; + } + /* * create worker threads */ @@ -349,26 +350,27 @@ vntblinit(void) /* the timeout is in 10 msecs */ int -vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) { - int error = 0; +vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg) +{ + int error = 0; struct timespec ts; KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0); if (vp->v_numoutput > output_target) { + slpflag |= PDROP; - slpflag |= PDROP; - - vnode_lock_spin(vp); + vnode_lock_spin(vp); while ((vp->v_numoutput > output_target) && error == 0) { - if (output_target) - vp->v_flag |= VTHROTTLED; - else - vp->v_flag |= VBWAIT; + if (output_target) { + vp->v_flag |= VTHROTTLED; + } else { + vp->v_flag |= VBWAIT; + } - ts.tv_sec = (slptimeout/100); - ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000 ; + ts.tv_sec = (slptimeout / 100); + ts.tv_nsec = (slptimeout % 1000) * 10 * NSEC_PER_USEC * 1000; error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts); vnode_lock_spin(vp); @@ -382,9 +384,9 @@ vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, void -vnode_startwrite(vnode_t vp) { - - OSAddAtomic(1, &vp->v_numoutput); +vnode_startwrite(vnode_t vp) +{ + OSAddAtomic(1, &vp->v_numoutput); } @@ -394,12 +396,13 @@ vnode_writedone(vnode_t vp) if (vp) { int need_wakeup = 0; - OSAddAtomic(-1, &vp->v_numoutput); + OSAddAtomic(-1, &vp->v_numoutput); vnode_lock_spin(vp); - if (vp->v_numoutput < 0) + if (vp->v_numoutput < 0) { panic("vnode_writedone: numoutput < 0"); + } if ((vp->v_flag & VTHROTTLED)) { vp->v_flag &= ~VTHROTTLED; @@ -410,9 +413,10 @@ vnode_writedone(vnode_t vp) need_wakeup = 1; } vnode_unlock(vp); - - if (need_wakeup) + + if (need_wakeup) { wakeup((caddr_t)&vp->v_numoutput); + } } } @@ -421,27 +425,30 @@ vnode_writedone(vnode_t vp) int vnode_hasdirtyblks(vnode_t vp) { - struct cl_writebehind *wbp; + struct cl_writebehind *wbp; /* * Not taking the buf_mtxp as there is little * point doing it. Even if the lock is taken the - * state can change right after that. If their + * state can change right after that. If their * needs to be a synchronization, it must be driven * by the caller - */ - if (vp->v_dirtyblkhd.lh_first) - return (1); - - if (!UBCINFOEXISTS(vp)) - return (0); + */ + if (vp->v_dirtyblkhd.lh_first) { + return 1; + } + + if (!UBCINFOEXISTS(vp)) { + return 0; + } wbp = vp->v_ubcinfo->cl_wbehind; - if (wbp && (wbp->cl_number || wbp->cl_scmap)) - return (1); + if (wbp && (wbp->cl_number || wbp->cl_scmap)) { + return 1; + } - return (0); + return 0; } int @@ -450,13 +457,14 @@ vnode_hascleanblks(vnode_t vp) /* * Not taking the buf_mtxp as there is little * point doing it. Even if the lock is taken the - * state can change right after that. If their + * state can change right after that. If their * needs to be a synchronization, it must be driven * by the caller - */ - if (vp->v_cleanblkhd.lh_first) - return (1); - return (0); + */ + if (vp->v_cleanblkhd.lh_first) { + return 1; + } + return 0; } void @@ -471,34 +479,39 @@ vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags) vnode_t vp; TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) { - if (vp->v_type == VDIR) + if (vp->v_type == VDIR) { continue; - if (vp == skipvp) + } + if (vp == skipvp) { continue; - if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) + } + if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) { continue; - if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) + } + if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) { continue; - if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) + } + if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) { continue; + } /* Look for busy vnode */ if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) { return 1; - } else if (vp->v_iocount > 0) { - /* Busy if iocount is > 0 for more than 3 seconds */ + /* Busy if iocount is > 0 for more than 3 seconds */ tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz); - if (vp->v_iocount > 0) + if (vp->v_iocount > 0) { return 1; + } continue; } } - + return 0; } -/* +/* * This routine prepares iteration by moving all the vnodes to worker queue * called with mount lock held */ @@ -509,8 +522,8 @@ vnode_iterate_prepare(mount_t mp) if (TAILQ_EMPTY(&mp->mnt_vnodelist)) { /* nothing to do */ - return (0); - } + return 0; + } vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first); @@ -518,16 +531,17 @@ vnode_iterate_prepare(mount_t mp) mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last; TAILQ_INIT(&mp->mnt_vnodelist); - if (mp->mnt_newvnodes.tqh_first != NULL) + if (mp->mnt_newvnodes.tqh_first != NULL) { panic("vnode_iterate_prepare: newvnode when entering vnode"); + } TAILQ_INIT(&mp->mnt_newvnodes); - return (1); + return 1; } /* called with mount lock held */ -int +int vnode_iterate_reloadq(mount_t mp) { int moved = 0; @@ -536,12 +550,13 @@ vnode_iterate_reloadq(mount_t mp) if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { struct vnode * mvp; mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst); - + /* Joining the workerque entities to mount vnode list */ - if (mvp) + if (mvp) { mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first; - else + } else { mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first; + } mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last; mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last; TAILQ_INIT(&mp->mnt_workerqueue); @@ -551,19 +566,20 @@ vnode_iterate_reloadq(mount_t mp) if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) { struct vnode * nlvp; nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst); - + mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first; nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first; - if(mp->mnt_vnodelist.tqh_first) + if (mp->mnt_vnodelist.tqh_first) { mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next; - else + } else { mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last; + } mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first; TAILQ_INIT(&mp->mnt_newvnodes); moved = 1; } - return(moved); + return moved; } @@ -583,34 +599,35 @@ struct vnode_iterate_panic_hook { struct vnode *vp; }; -static void vnode_iterate_panic_hook(panic_hook_t *hook_) +static void +vnode_iterate_panic_hook(panic_hook_t *hook_) { struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_; panic_phys_range_t range; uint64_t phys; - + if (panic_phys_range_before(hook->mp, &phys, &range)) { paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n", - hook->mp, phys, range.type, range.phys_start, - range.phys_start + range.len); + hook->mp, phys, range.type, range.phys_start, + range.phys_start + range.len); } else { paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys); } if (panic_phys_range_before(hook->vp, &phys, &range)) { paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n", - hook->vp, phys, range.type, range.phys_start, - range.phys_start + range.len); + hook->vp, phys, range.type, range.phys_start, + range.phys_start + range.len); } else { paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys); } - panic_dump_mem((void *)(((vm_offset_t)hook->mp -4096) & ~4095), 12288); + panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288); } #endif //CONFIG_EMBEDDED int vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), - void *arg) + void *arg) { struct vnode *vp; int vid, retval; @@ -634,11 +651,11 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), /* If it returns 0 then there is nothing to do */ retval = vnode_iterate_prepare(mp); - if (retval == 0) { + if (retval == 0) { vnode_iterate_clear(mp); mount_unlock(mp); mount_iterate_unlock(mp); - return(ret); + return ret; } #if !CONFIG_EMBEDDED @@ -661,43 +678,43 @@ vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *), } mount_unlock(mp); - if ( vget_internal(vp, vid, (flags | VNODE_NODEAD| VNODE_WITHID | VNODE_NOSUSPEND))) { + if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) { mount_lock(mp); - continue; + continue; } if (flags & VNODE_RELOAD) { - /* + /* * we're reloading the filesystem * cast out any inactive vnodes... */ - if (vnode_reload(vp)) { - /* vnode will be recycled on the refcount drop */ - vnode_put(vp); + if (vnode_reload(vp)) { + /* vnode will be recycled on the refcount drop */ + vnode_put(vp); mount_lock(mp); - continue; + continue; } } retval = callout(vp, arg); switch (retval) { - case VNODE_RETURNED: - case VNODE_RETURNED_DONE: - vnode_put(vp); - if (retval == VNODE_RETURNED_DONE) { + case VNODE_RETURNED: + case VNODE_RETURNED_DONE: + vnode_put(vp); + if (retval == VNODE_RETURNED_DONE) { mount_lock(mp); ret = 0; goto out; - } - break; + } + break; - case VNODE_CLAIMED_DONE: - mount_lock(mp); - ret = 0; - goto out; - case VNODE_CLAIMED: - default: - break; + case VNODE_CLAIMED_DONE: + mount_lock(mp); + ret = 0; + goto out; + case VNODE_CLAIMED: + default: + break; } mount_lock(mp); } @@ -710,7 +727,7 @@ out: vnode_iterate_clear(mp); mount_unlock(mp); mount_iterate_unlock(mp); - return (ret); + return ret; } void @@ -759,29 +776,34 @@ mount_unlock(mount_t mp) void mount_ref(mount_t mp, int locked) { - if ( !locked) - mount_lock_spin(mp); - + if (!locked) { + mount_lock_spin(mp); + } + mp->mnt_count++; - if ( !locked) - mount_unlock(mp); + if (!locked) { + mount_unlock(mp); + } } void mount_drop(mount_t mp, int locked) { - if ( !locked) - mount_lock_spin(mp); - + if (!locked) { + mount_lock_spin(mp); + } + mp->mnt_count--; - if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) - wakeup(&mp->mnt_lflag); + if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) { + wakeup(&mp->mnt_lflag); + } - if ( !locked) - mount_unlock(mp); + if (!locked) { + mount_unlock(mp); + } } @@ -790,16 +812,18 @@ mount_iterref(mount_t mp, int locked) { int retval = 0; - if (!locked) + if (!locked) { mount_list_lock(); + } if (mp->mnt_iterref < 0) { retval = 1; } else { mp->mnt_iterref++; } - if (!locked) + if (!locked) { mount_list_unlock(); - return(retval); + } + return retval; } int @@ -807,15 +831,18 @@ mount_isdrained(mount_t mp, int locked) { int retval; - if (!locked) + if (!locked) { mount_list_lock(); - if (mp->mnt_iterref < 0) + } + if (mp->mnt_iterref < 0) { retval = 1; - else - retval = 0; - if (!locked) + } else { + retval = 0; + } + if (!locked) { mount_list_unlock(); - return(retval); + } + return retval; } void @@ -831,8 +858,9 @@ void mount_iterdrain(mount_t mp) { mount_list_lock(); - while (mp->mnt_iterref) + while (mp->mnt_iterref) { msleep((caddr_t)&mp->mnt_iterref, mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL); + } /* mount iterations drained */ mp->mnt_iterref = -1; mount_list_unlock(); @@ -841,36 +869,41 @@ void mount_iterreset(mount_t mp) { mount_list_lock(); - if (mp->mnt_iterref == -1) + if (mp->mnt_iterref == -1) { mp->mnt_iterref = 0; + } mount_list_unlock(); } /* always called with mount lock held */ -int +int mount_refdrain(mount_t mp) { - if (mp->mnt_lflag & MNT_LDRAIN) + if (mp->mnt_lflag & MNT_LDRAIN) { panic("already in drain"); + } mp->mnt_lflag |= MNT_LDRAIN; - while (mp->mnt_count) + while (mp->mnt_count) { msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL); + } - if (mp->mnt_vnodelist.tqh_first != NULL) - panic("mount_refdrain: dangling vnode"); + if (mp->mnt_vnodelist.tqh_first != NULL) { + panic("mount_refdrain: dangling vnode"); + } mp->mnt_lflag &= ~MNT_LDRAIN; - return(0); + return 0; } /* Tags the mount point as not supportine extended readdir for NFS exports */ -void -mount_set_noreaddirext(mount_t mp) { - mount_lock (mp); +void +mount_set_noreaddirext(mount_t mp) +{ + mount_lock(mp); mp->mnt_kern_flag |= MNTK_DENY_READDIREXT; - mount_unlock (mp); + mount_unlock(mp); } /* @@ -880,17 +913,17 @@ mount_set_noreaddirext(mount_t mp) { int vfs_busy(mount_t mp, int flags) { - restart: - if (mp->mnt_lflag & MNT_LDEAD) - return (ENOENT); + if (mp->mnt_lflag & MNT_LDEAD) { + return ENOENT; + } mount_lock(mp); if (mp->mnt_lflag & MNT_LUNMOUNT) { if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) { - mount_unlock(mp); - return (ENOENT); + mount_unlock(mp); + return ENOENT; } /* @@ -901,14 +934,14 @@ restart: */ mp->mnt_lflag |= MNT_LWAIT; msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL); - return (ENOENT); + return ENOENT; } mount_unlock(mp); lck_rw_lock_shared(&mp->mnt_rwlock); - /* + /* * Until we are granted the rwlock, it's possible for the mount point to * change state, so re-evaluate before granting the vfs_busy. */ @@ -916,7 +949,7 @@ restart: lck_rw_done(&mp->mnt_rwlock); goto restart; } - return (0); + return 0; } /* @@ -931,8 +964,8 @@ vfs_unbusy(mount_t mp) static void -vfs_rootmountfailed(mount_t mp) { - +vfs_rootmountfailed(mount_t mp) +{ mount_list_lock(); mp->mnt_vtable->vfc_refcount--; mount_list_unlock(); @@ -957,7 +990,7 @@ vfs_rootmountfailed(mount_t mp) { static mount_t vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname) { - mount_t mp; + mount_t mp; mp = _MALLOC_ZONE(sizeof(struct mount), M_MOUNT, M_WAITOK); bzero((char *)mp, sizeof(struct mount)); @@ -1004,27 +1037,31 @@ vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname) mac_mount_label_init(mp); mac_mount_label_associate(vfs_context_kernel(), mp); #endif - return (mp); + return mp; } errno_t vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp) { - struct vfstable *vfsp; + struct vfstable *vfsp; - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) - if (!strncmp(vfsp->vfc_name, fstypename, - sizeof(vfsp->vfc_name))) - break; - if (vfsp == NULL) - return (ENODEV); + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { + if (!strncmp(vfsp->vfc_name, fstypename, + sizeof(vfsp->vfc_name))) { + break; + } + } + if (vfsp == NULL) { + return ENODEV; + } *mpp = vfs_rootmountalloc_internal(vfsp, devname); - if (*mpp) - return (0); + if (*mpp) { + return 0; + } - return (ENOMEM); + return ENOMEM; } #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0)) @@ -1045,10 +1082,10 @@ vfs_mountroot(void) #endif struct vfstable *vfsp; vfs_context_t ctx = vfs_context_kernel(); - struct vfs_attr vfsattr; - int error; + struct vfs_attr vfsattr; + int error; mount_t mp; - vnode_t bdevvp_rootvp; + vnode_t bdevvp_rootvp; KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START); if (mountroot != NULL) { @@ -1058,36 +1095,37 @@ vfs_mountroot(void) error = (*mountroot)(); KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0); - return (error); + return error; } if ((error = bdevvp(rootdev, &rootvp))) { printf("vfs_mountroot: can't setup bdevvp\n"); KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1); - return (error); + return error; } /* - * 4951998 - code we call in vfc_mountroot may replace rootvp + * 4951998 - code we call in vfc_mountroot may replace rootvp * so keep a local copy for some house keeping. */ bdevvp_rootvp = rootvp; for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { if (vfsp->vfc_mountroot == NULL - && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) { + && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) { continue; } mp = vfs_rootmountalloc_internal(vfsp, "root_device"); mp->mnt_devvp = rootvp; - if (vfsp->vfc_mountroot) + if (vfsp->vfc_mountroot) { error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx); - else + } else { error = VFS_MOUNT(mp, rootvp, 0, ctx); + } if (!error) { - if ( bdevvp_rootvp != rootvp ) { + if (bdevvp_rootvp != rootvp) { /* * rootvp changed... * bump the iocount and fix up mnt_devvp for the @@ -1098,8 +1136,8 @@ vfs_mountroot(void) vnode_getwithref(rootvp); mp->mnt_devvp = rootvp; - vnode_rele(bdevvp_rootvp); - vnode_put(bdevvp_rootvp); + vnode_rele(bdevvp_rootvp); + vnode_put(bdevvp_rootvp); } mp->mnt_devvp->v_specflags |= SI_MOUNTEDON; @@ -1132,9 +1170,13 @@ vfs_mountroot(void) #if !CONFIG_EMBEDDED uint32_t speed; - if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) speed = 128; - else if (disk_conditioner_mount_is_ssd(mp)) speed = 7*256; - else speed = 256; + if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) { + speed = 128; + } else if (disk_conditioner_mount_is_ssd(mp)) { + speed = 7 * 256; + } else { + speed = 256; + } vc_progress_setdiskspeed(speed); #endif /* @@ -1144,7 +1186,7 @@ vfs_mountroot(void) VFSATTR_INIT(&vfsattr); VFSATTR_WANTED(&vfsattr, f_capabilities); - if (vfs_getattr(mp, &vfsattr, ctx) == 0 && + if (vfs_getattr(mp, &vfsattr, ctx) == 0 && VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) { if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) && (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) { @@ -1162,7 +1204,7 @@ vfs_mountroot(void) } if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) && - (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) { + (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) { mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS; } } @@ -1178,7 +1220,7 @@ vfs_mountroot(void) #if CONFIG_MACF if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) { KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2); - return (0); + return 0; } error = VFS_ROOT(mp, &vp, ctx); @@ -1202,18 +1244,19 @@ vfs_mountroot(void) } #endif KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3); - return (0); + return 0; } #if CONFIG_MACF fail: #endif vfs_rootmountfailed(mp); - if (error != EINVAL) + if (error != EINVAL) { printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error); + } } KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4); - return (ENODEV); + return ENODEV; } /* @@ -1223,13 +1266,13 @@ fail: struct mount * vfs_getvfs(fsid_t *fsid) { - return (mount_list_lookupby_fsid(fsid, 0, 0)); + return mount_list_lookupby_fsid(fsid, 0, 0); } static struct mount * vfs_getvfs_locked(fsid_t *fsid) { - return(mount_list_lookupby_fsid(fsid, 1, 0)); + return mount_list_lookupby_fsid(fsid, 1, 0); } struct mount * @@ -1241,16 +1284,17 @@ vfs_getvfs_by_mntonname(char *path) mount_list_lock(); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (!strncmp(mp->mnt_vfsstat.f_mntonname, path, - sizeof(mp->mnt_vfsstat.f_mntonname))) { + sizeof(mp->mnt_vfsstat.f_mntonname))) { retmp = mp; - if (mount_iterref(retmp, 1)) + if (mount_iterref(retmp, 1)) { retmp = NULL; + } goto out; } } out: mount_list_unlock(); - return (retmp); + return retmp; } /* generation number for creation of new fsids */ @@ -1261,7 +1305,6 @@ u_short mntid_gen = 0; void vfs_getnewfsid(struct mount *mp) { - fsid_t tfsid; int mtype; @@ -1269,14 +1312,16 @@ vfs_getnewfsid(struct mount *mp) /* generate a new fsid */ mtype = mp->mnt_vtable->vfc_typenum; - if (++mntid_gen == 0) + if (++mntid_gen == 0) { mntid_gen++; + } tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); tfsid.val[1] = mtype; while (vfs_getvfs_locked(&tfsid)) { - if (++mntid_gen == 0) + if (++mntid_gen == 0) { mntid_gen++; + } tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen); } @@ -1288,7 +1333,7 @@ vfs_getnewfsid(struct mount *mp) /* * Routines having to do with the management of the vnode table. */ -extern int (**dead_vnodeop_p)(void *); +extern int(**dead_vnodeop_p)(void *); long numvnodes, freevnodes, deadvnodes, async_work_vnodes; @@ -1307,9 +1352,10 @@ insmntque(vnode_t vp, mount_t mp) /* * Delete from old mount point vnode list, if on one. */ - if ( (lmp = vp->v_mount) != NULL && lmp != dead_mountp) { - if ((vp->v_lflag & VNAMED_MOUNT) == 0) + if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) { + if ((vp->v_lflag & VNAMED_MOUNT) == 0) { panic("insmntque: vp not in mount vnode list"); + } vp->v_lflag &= ~VNAMED_MOUNT; mount_lock_spin(lmp); @@ -1317,16 +1363,17 @@ insmntque(vnode_t vp, mount_t mp) mount_drop(lmp, 1); if (vp->v_mntvnodes.tqe_next == NULL) { - if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) + if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) { TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes); - else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) + } else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) { TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes); - else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) + } else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) { TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes); - } else { + } + } else { vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev; *vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next; - } + } vp->v_mntvnodes.tqe_next = NULL; vp->v_mntvnodes.tqe_prev = NULL; mount_unlock(lmp); @@ -1338,14 +1385,17 @@ insmntque(vnode_t vp, mount_t mp) */ if ((vp->v_mount = mp) != NULL) { mount_lock_spin(mp); - if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) + if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) { panic("vp already in mount list"); - if (mp->mnt_lflag & MNT_LITER) + } + if (mp->mnt_lflag & MNT_LITER) { TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes); - else + } else { TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); - if (vp->v_lflag & VNAMED_MOUNT) + } + if (vp->v_lflag & VNAMED_MOUNT) { panic("insmntque: vp already in mount vnode list"); + } vp->v_lflag |= VNAMED_MOUNT; mount_ref(mp, 1); mount_unlock(mp); @@ -1361,14 +1411,14 @@ insmntque(vnode_t vp, mount_t mp) int bdevvp(dev_t dev, vnode_t *vpp) { - vnode_t nvp; - int error; + vnode_t nvp; + int error; struct vnode_fsparam vfsp; struct vfs_context context; if (dev == NODEV) { *vpp = NULLVP; - return (ENODEV); + return ENODEV; } context.vc_thread = current_thread(); @@ -1389,41 +1439,41 @@ bdevvp(dev_t dev, vnode_t *vpp) vfsp.vnfs_marksystem = 0; vfsp.vnfs_markroot = 0; - if ( (error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp)) ) { + if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) { *vpp = NULLVP; - return (error); + return error; } vnode_lock_spin(nvp); nvp->v_flag |= VBDEVVP; - nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */ + nvp->v_tag = VT_NON; /* set this to VT_NON so during aliasing it can be replaced */ vnode_unlock(nvp); - if ( (error = vnode_ref(nvp)) ) { + if ((error = vnode_ref(nvp))) { panic("bdevvp failed: vnode_ref"); - return (error); + return error; } - if ( (error = VNOP_FSYNC(nvp, MNT_WAIT, &context)) ) { + if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) { panic("bdevvp failed: fsync"); - return (error); + return error; } - if ( (error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0)) ) { + if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) { panic("bdevvp failed: invalidateblks"); - return (error); + return error; } #if CONFIG_MACF - /* + /* * XXXMAC: We can't put a MAC check here, the system will * panic without this vnode. */ -#endif /* MAC */ +#endif /* MAC */ - if ( (error = VNOP_OPEN(nvp, FREAD, &context)) ) { + if ((error = VNOP_OPEN(nvp, FREAD, &context))) { panic("bdevvp failed: open"); - return (error); + return error; } *vpp = nvp; - return (0); + return 0; } /* @@ -1448,7 +1498,7 @@ loop: for (vp = *vpp; vp; vp = vp->v_specnext) { if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) { - vid = vp->v_id; + vid = vp->v_id; break; } } @@ -1456,8 +1506,8 @@ loop: if (vp) { found_alias: - if (vnode_getwithvid(vp,vid)) { - goto loop; + if (vnode_getwithvid(vp, vid)) { + goto loop; } /* * Termination state is checked in vnode_getwithvid @@ -1468,17 +1518,16 @@ found_alias: * Alias, but not in use, so flush it out. */ if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) { - vnode_reclaim_internal(vp, 1, 1, 0); + vnode_reclaim_internal(vp, 1, 1, 0); vnode_put_locked(vp); vnode_unlock(vp); goto loop; } - } if (vp == NULL || vp->v_tag != VT_NON) { if (sin == NULL) { MALLOC_ZONE(sin, struct specinfo *, sizeof(struct specinfo), - M_SPECINFO, M_WAITOK); + M_SPECINFO, M_WAITOK); } nvp->v_specinfo = sin; @@ -1491,7 +1540,7 @@ found_alias: nvp->v_specinfo->si_throttleable = 0; SPECHASH_LOCK(); - + /* We dropped the lock, someone could have added */ if (vp == NULLVP) { for (vp = *vpp; vp; vp = vp->v_specnext) { @@ -1501,7 +1550,7 @@ found_alias: goto found_alias; } } - } + } nvp->v_hashchain = vpp; nvp->v_specnext = *vpp; @@ -1517,19 +1566,20 @@ found_alias: SPECHASH_UNLOCK(); } - return (NULLVP); + return NULLVP; } if (sin) { FREE_ZONE(sin, sizeof(struct specinfo), M_SPECINFO); } - if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) - return(vp); + if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) { + return vp; + } panic("checkalias with VT_NON vp that shouldn't: %p", vp); - return (vp); + return vp; } @@ -1550,17 +1600,18 @@ vget_internal(vnode_t vp, int vid, int vflags) vnode_lock_spin(vp); - if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) - /* - * vnode to be returned only if it has writers opened + if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) { + /* + * vnode to be returned only if it has writers opened */ - error = EINVAL; - else - error = vnode_getiocount(vp, vid, vflags); + error = EINVAL; + } else { + error = vnode_getiocount(vp, vid, vflags); + } vnode_unlock(vp); - return (error); + return error; } /* @@ -1570,8 +1621,7 @@ vget_internal(vnode_t vp, int vid, int vflags) int vnode_ref(vnode_t vp) { - - return (vnode_ref_ext(vp, 0, 0)); + return vnode_ref_ext(vp, 0, 0); } /* @@ -1581,7 +1631,7 @@ vnode_ref(vnode_t vp) int vnode_ref_ext(vnode_t vp, int fmode, int flags) { - int error = 0; + int error = 0; vnode_lock_spin(vp); @@ -1590,8 +1640,9 @@ vnode_ref_ext(vnode_t vp, int fmode, int flags) * taken an iocount, we can toughen this assert up and insist that the * iocount is non-zero... a non-zero usecount doesn't insure correctness */ - if (vp->v_iocount <= 0 && vp->v_usecount <= 0) + if (vp->v_iocount <= 0 && vp->v_usecount <= 0) { panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount); + } /* * if you are the owner of drain/termination, can acquire usecount @@ -1607,21 +1658,23 @@ vnode_ref_ext(vnode_t vp, int fmode, int flags) vp->v_usecount++; if (fmode & FWRITE) { - if (++vp->v_writecount <= 0) - panic("vnode_ref_ext: v_writecount"); + if (++vp->v_writecount <= 0) { + panic("vnode_ref_ext: v_writecount"); + } } if (fmode & O_EVTONLY) { - if (++vp->v_kusecount <= 0) - panic("vnode_ref_ext: v_kusecount"); + if (++vp->v_kusecount <= 0) { + panic("vnode_ref_ext: v_kusecount"); + } } if (vp->v_flag & VRAGE) { - struct uthread *ut; + struct uthread *ut; + + ut = get_bsdthread_info(current_thread()); - ut = get_bsdthread_info(current_thread()); - - if ( !(current_proc()->p_lflag & P_LRAGE_VNODES) && - !(ut->uu_flag & UT_RAGE_VNODES)) { - /* + if (!(current_proc()->p_lflag & P_LRAGE_VNODES) && + !(ut->uu_flag & UT_RAGE_VNODES)) { + /* * a 'normal' process accessed this vnode * so make sure its no longer marked * for rapid aging... also, make sure @@ -1635,7 +1688,6 @@ vnode_ref_ext(vnode_t vp, int fmode, int flags) } } if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) { - if (vp->v_ubcinfo) { vnode_lock_convert(vp); memory_object_mark_used(vp->v_ubcinfo->ui_control); @@ -1644,16 +1696,17 @@ vnode_ref_ext(vnode_t vp, int fmode, int flags) out: vnode_unlock(vp); - return (error); + return error; } boolean_t vnode_on_reliable_media(vnode_t vp) { - if ( !(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && (vp->v_mount->mnt_flag & MNT_LOCAL) ) - return (TRUE); - return (FALSE); + if (!(vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && (vp->v_mount->mnt_flag & MNT_LOCAL)) { + return TRUE; + } + return FALSE; } static void @@ -1661,8 +1714,9 @@ vnode_async_list_add(vnode_t vp) { vnode_list_lock(); - if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE|VL_DEAD))) + if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) { panic("vnode_async_list_add: %p is in wrong state", vp); + } TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist); vp->v_listflag |= VLIST_ASYNC_WORK; @@ -1672,7 +1726,6 @@ vnode_async_list_add(vnode_t vp) vnode_list_unlock(); wakeup(&vnode_async_work_list); - } @@ -1692,10 +1745,11 @@ vnode_list_add(vnode_t vp) again: /* - * if it is already on a list or non zero references return + * if it is already on a list or non zero references return */ - if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) + if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) { return; + } /* * In vclean, we might have deferred ditching locked buffers @@ -1703,8 +1757,8 @@ again: * usecount). We can ditch them now. */ if (ISSET(vp->v_lflag, VL_DEAD) - && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) { - ++vp->v_iocount; // Probably not necessary, but harmless + && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) { + ++vp->v_iocount; // Probably not necessary, but harmless #ifdef JOE_DEBUG record_vp(vp, 1); #endif @@ -1721,10 +1775,11 @@ again: /* * add the new guy to the appropriate end of the RAGE list */ - if ((vp->v_flag & VAGE)) - TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist); - else - TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist); + if ((vp->v_flag & VAGE)) { + TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist); + } else { + TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist); + } vp->v_listflag |= VLIST_RAGE; ragevnodes++; @@ -1739,12 +1794,12 @@ again: */ microuptime(&rage_tv); } else { - /* + /* * if VL_DEAD, insert it at head of the dead list * else insert at tail of LRU list or at head if VAGE is set */ - if ( (vp->v_lflag & VL_DEAD)) { - TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist); + if ((vp->v_lflag & VL_DEAD)) { + TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist); vp->v_listflag |= VLIST_DEAD; deadvnodes++; @@ -1752,20 +1807,20 @@ again: dead_vnode_wanted--; need_dead_wakeup = TRUE; } - - } else if ( (vp->v_flag & VAGE) ) { - TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); + } else if ((vp->v_flag & VAGE)) { + TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); vp->v_flag &= ~VAGE; freevnodes++; } else { - TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); + TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); freevnodes++; } } vnode_list_unlock(); - if (need_dead_wakeup == TRUE) + if (need_dead_wakeup == TRUE) { wakeup_one((caddr_t)&dead_vnode_wanted); + } } @@ -1782,14 +1837,15 @@ vnode_list_remove_locked(vnode_t vp) * the v_listflag field is * protected by the vnode_list_lock */ - if (vp->v_listflag & VLIST_RAGE) - VREMRAGE("vnode_list_remove", vp); - else if (vp->v_listflag & VLIST_DEAD) - VREMDEAD("vnode_list_remove", vp); - else if (vp->v_listflag & VLIST_ASYNC_WORK) - VREMASYNC_WORK("vnode_list_remove", vp); - else - VREMFREE("vnode_list_remove", vp); + if (vp->v_listflag & VLIST_RAGE) { + VREMRAGE("vnode_list_remove", vp); + } else if (vp->v_listflag & VLIST_DEAD) { + VREMDEAD("vnode_list_remove", vp); + } else if (vp->v_listflag & VLIST_ASYNC_WORK) { + VREMASYNC_WORK("vnode_list_remove", vp); + } else { + VREMFREE("vnode_list_remove", vp); + } } } @@ -1804,14 +1860,14 @@ vnode_list_remove(vnode_t vp) #if DIAGNOSTIC lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); #endif - /* + /* * we want to avoid taking the list lock * in the case where we're not on the free * list... this will be true for most * directories and any currently in use files * * we're guaranteed that we can't go from - * the not-on-list state to the on-list + * the not-on-list state to the on-list * state since we hold the vnode lock... * all calls to vnode_list_add are done * under the vnode lock... so we can @@ -1819,12 +1875,12 @@ vnode_list_remove(vnode_t vp) * without taking the list lock */ if (VONLIST(vp)) { - vnode_list_lock(); + vnode_list_lock(); /* * however, we're not guaranteed that * we won't go from the on-list state * to the not-on-list state until we - * hold the vnode_list_lock... this + * hold the vnode_list_lock... this * is due to "new_vnode" removing vnodes * from the free list uder the list_lock * w/o the vnode lock... so we need to @@ -1841,40 +1897,45 @@ vnode_list_remove(vnode_t vp) void vnode_rele(vnode_t vp) { - vnode_rele_internal(vp, 0, 0, 0); + vnode_rele_internal(vp, 0, 0, 0); } void vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter) { - vnode_rele_internal(vp, fmode, dont_reenter, 0); + vnode_rele_internal(vp, fmode, dont_reenter, 0); } void vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) { - - if ( !locked) - vnode_lock_spin(vp); + if (!locked) { + vnode_lock_spin(vp); + } #if DIAGNOSTIC - else + else { lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); + } #endif - if (--vp->v_usecount < 0) - panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); + if (--vp->v_usecount < 0) { + panic("vnode_rele_ext: vp %p usecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); + } if (fmode & FWRITE) { - if (--vp->v_writecount < 0) - panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag); + if (--vp->v_writecount < 0) { + panic("vnode_rele_ext: vp %p writecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag); + } } if (fmode & O_EVTONLY) { - if (--vp->v_kusecount < 0) - panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag); + if (--vp->v_kusecount < 0) { + panic("vnode_rele_ext: vp %p kusecount -ve : %d. v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag); + } + } + if (vp->v_kusecount > vp->v_usecount) { + panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); } - if (vp->v_kusecount > vp->v_usecount) - panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d). v_tag = %d, v_type = %d, v_flag = %x.",vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag); if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) { /* @@ -1898,9 +1959,9 @@ vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) * the latter case, we'll mark the vnode aged */ if (dont_reenter) { - if ( !(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM)) ) { + if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) { vp->v_lflag |= VL_NEEDINACTIVE; - + if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) { vnode_async_list_add(vp); goto done; @@ -1922,7 +1983,7 @@ vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) #ifdef JOE_DEBUG record_vp(vp, 1); #endif - vp->v_lflag &= ~VL_NEEDINACTIVE; + vp->v_lflag &= ~VL_NEEDINACTIVE; vnode_unlock(vp); VNOP_INACTIVE(vp, vfs_context_current()); @@ -1937,32 +1998,32 @@ vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked) * this point... if the reference counts are up, we'll pick * up the MARKTERM state when they get subsequently dropped */ - if ( (vp->v_iocount == 1) && (vp->v_usecount == 0) && - ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { - struct uthread *ut; + if ((vp->v_iocount == 1) && (vp->v_usecount == 0) && + ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) { + struct uthread *ut; + + ut = get_bsdthread_info(current_thread()); - ut = get_bsdthread_info(current_thread()); - if (ut->uu_defer_reclaims) { - vp->v_defer_reclaimlist = ut->uu_vreclaims; + vp->v_defer_reclaimlist = ut->uu_vreclaims; ut->uu_vreclaims = vp; goto done; } vnode_lock_convert(vp); - vnode_reclaim_internal(vp, 1, 1, 0); + vnode_reclaim_internal(vp, 1, 1, 0); } vnode_dropiocount(vp); vnode_list_add(vp); done: if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) { - if (vp->v_ubcinfo) { vnode_lock_convert(vp); memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE); } } - if ( !locked) - vnode_unlock(vp); + if (!locked) { + vnode_unlock(vp); + } return; } @@ -1975,7 +2036,7 @@ done: * that are found. */ #if DIAGNOSTIC -int busyprt = 0; /* print out busy vnodes */ +int busyprt = 0; /* print out busy vnodes */ #endif int @@ -2001,33 +2062,32 @@ vflush(struct mount *mp, struct vnode *skipvp, int flags) * tries unmounting every so often to see whether * it is still busy or not. */ - if (((flags & FORCECLOSE)==0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) { + if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) { if (vnode_umount_preflight(mp, skipvp, flags)) { vnode_iterate_clear(mp); mount_unlock(mp); mount_iterate_unlock(mp); - return(EBUSY); + return EBUSY; } } loop: /* If it returns 0 then there is nothing to do */ retval = vnode_iterate_prepare(mp); - if (retval == 0) { + if (retval == 0) { vnode_iterate_clear(mp); mount_unlock(mp); mount_iterate_unlock(mp); - return(retval); + return retval; } /* iterate over all the vnodes */ while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) { - vp = TAILQ_FIRST(&mp->mnt_workerqueue); TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes); TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes); - if ( (vp->v_mount != mp) || (vp == skipvp)) { + if ((vp->v_mount != mp) || (vp == skipvp)) { continue; } vid = vp->v_id; @@ -2042,15 +2102,15 @@ loop: } if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) { - vnode_unlock(vp); - mount_lock(mp); - continue; + vnode_unlock(vp); + mount_lock(mp); + continue; } /* * If requested, skip over vnodes marked VSYSTEM. * Skip over all vnodes marked VNOFLUSH. - */ + */ if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) { vnode_unlock(vp); @@ -2089,9 +2149,8 @@ loop: */ if (((vp->v_usecount == 0) || ((vp->v_usecount - vp->v_kusecount) == 0))) { - vnode_lock_convert(vp); - vp->v_iocount++; /* so that drain waits for * other iocounts */ + vp->v_iocount++; /* so that drain waits for * other iocounts */ #ifdef JOE_DEBUG record_vp(vp, 1); #endif @@ -2113,7 +2172,7 @@ loop: vnode_lock_convert(vp); if (vp->v_type != VBLK && vp->v_type != VCHR) { - vp->v_iocount++; /* so that drain waits * for other iocounts */ + vp->v_iocount++; /* so that drain waits * for other iocounts */ #ifdef JOE_DEBUG record_vp(vp, 1); #endif @@ -2133,8 +2192,9 @@ loop: continue; } #if DIAGNOSTIC - if (busyprt) + if (busyprt) { vprint("vflush: busy vnode", vp); + } #endif vnode_unlock(vp); mount_lock(mp); @@ -2142,7 +2202,7 @@ loop: } /* At this point the worker queue is completed */ - if (busy && ((flags & FORCECLOSE)==0) && reclaimed) { + if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) { busy = 0; reclaimed = 0; (void)vnode_iterate_reloadq(mp); @@ -2151,17 +2211,19 @@ loop: } /* if new vnodes were created in between retry the reclaim */ - if ( vnode_iterate_reloadq(mp) != 0) { - if (!(busy && ((flags & FORCECLOSE)==0))) + if (vnode_iterate_reloadq(mp) != 0) { + if (!(busy && ((flags & FORCECLOSE) == 0))) { goto loop; + } } vnode_iterate_clear(mp); mount_unlock(mp); mount_iterate_unlock(mp); - if (busy && ((flags & FORCECLOSE)==0)) - return (EBUSY); - return (0); + if (busy && ((flags & FORCECLOSE) == 0)) { + return EBUSY; + } + return 0; } long num_recycledvnodes = 0; @@ -2213,22 +2275,25 @@ vclean(vnode_t vp, int flags) OSAddAtomicLong(1, &num_recycledvnodes); - if (flags & DOCLOSE) + if (flags & DOCLOSE) { clflags |= IO_NDELAY; - if (flags & REVOKEALL) + } + if (flags & REVOKEALL) { clflags |= IO_REVOKE; - - if (active && (flags & DOCLOSE)) + } + + if (active && (flags & DOCLOSE)) { VNOP_CLOSE(vp, clflags, ctx); + } /* * Clean out any buffers associated with the vnode. */ if (flags & DOCLOSE) { #if NFSCLIENT - if (vp->v_tag == VT_NFS) + if (vp->v_tag == VT_NFS) { nfs_vinvalbuf(vp, V_SAVE, ctx, 0); - else + } else #endif { VNOP_FSYNC(vp, MNT_WAIT, ctx); @@ -2242,25 +2307,27 @@ vclean(vnode_t vp, int flags) */ buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0); } - if (UBCINFOEXISTS(vp)) - /* + if (UBCINFOEXISTS(vp)) { + /* * Clean the pages in VM. */ - (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); + (void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC); + } } - if (active || need_inactive) + if (active || need_inactive) { VNOP_INACTIVE(vp, ctx); + } #if NAMEDSTREAMS if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) { vnode_t pvp = vp->v_parent; - + /* Delete the shadow stream file before we reclaim its vnode */ if (vnode_isshadow(vp)) { vnode_relenamedstream(pvp, vp); } - - /* + + /* * No more streams associated with the parent. We * have a ref on it, so its identity is stable. * If the parent is on an opaque volume, then we need to know @@ -2287,16 +2354,18 @@ vclean(vnode_t vp, int flags) /* * cleanup trigger info from vnode (if any) */ - if (vp->v_resolve) + if (vp->v_resolve) { vnode_resolver_detach(vp); + } #endif /* * Reclaim the vnode. */ - if (VNOP_RECLAIM(vp, ctx)) + if (VNOP_RECLAIM(vp, ctx)) { panic("vclean: cannot reclaim"); - + } + // make sure the name & parent ptrs get cleaned out! vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE); @@ -2319,12 +2388,12 @@ vclean(vnode_t vp, int flags) vp->v_flag &= ~VISDIRTY; if (already_terminating == 0) { - vp->v_lflag &= ~VL_TERMINATE; + vp->v_lflag &= ~VL_TERMINATE; /* * Done with purge, notify sleepers of the grim news. */ if (vp->v_lflag & VL_TERMWANT) { - vp->v_lflag &= ~VL_TERMWANT; + vp->v_lflag &= ~VL_TERMWANT; wakeup(&vp->v_lflag); } } @@ -2345,8 +2414,9 @@ vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context) int vid; #if DIAGNOSTIC - if ((flags & REVOKEALL) == 0) + if ((flags & REVOKEALL) == 0) { panic("vnop_revoke"); + } #endif if (vnode_isaliased(vp)) { @@ -2354,8 +2424,9 @@ vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context) * If a vgone (or vclean) is already in progress, * return an immediate error */ - if (vp->v_lflag & VL_TERMINATE) - return(ENOENT); + if (vp->v_lflag & VL_TERMINATE) { + return ENOENT; + } /* * Ensure that vp will not be vgone'd while we @@ -2365,12 +2436,13 @@ vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context) while ((vp->v_specflags & SI_ALIASED)) { for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { if (vq->v_rdev != vp->v_rdev || - vq->v_type != vp->v_type || vp == vq) + vq->v_type != vp->v_type || vp == vq) { continue; + } vid = vq->v_id; SPECHASH_UNLOCK(); - if (vnode_getwithvid(vq,vid)){ - SPECHASH_LOCK(); + if (vnode_getwithvid(vq, vid)) { + SPECHASH_LOCK(); break; } vnode_lock(vq); @@ -2388,12 +2460,12 @@ vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context) vnode_lock(vp); if (vp->v_lflag & VL_TERMINATE) { vnode_unlock(vp); - return (ENOENT); + return ENOENT; } vnode_reclaim_internal(vp, 1, 0, REVOKEALL); vnode_unlock(vp); - return (0); + return 0; } /* @@ -2408,14 +2480,14 @@ vnode_recycle(struct vnode *vp) if (vp->v_iocount || vp->v_usecount) { vp->v_lflag |= VL_MARKTERM; vnode_unlock(vp); - return(0); - } + return 0; + } vnode_lock_convert(vp); vnode_reclaim_internal(vp, 1, 0, 0); vnode_unlock(vp); - return (1); + return 1; } static int @@ -2425,16 +2497,17 @@ vnode_reload(vnode_t vp) if ((vp->v_iocount > 1) || vp->v_usecount) { vnode_unlock(vp); - return(0); - } - if (vp->v_iocount <= 0) + return 0; + } + if (vp->v_iocount <= 0) { panic("vnode_reload with no iocount %d", vp->v_iocount); + } /* mark for release when iocount is dopped */ vp->v_lflag |= VL_MARKTERM; vnode_unlock(vp); - return (1); + return 1; } @@ -2456,41 +2529,47 @@ vgone(vnode_t vp, int flags) * if it is on one. */ if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) { - SPECHASH_LOCK(); - if (*vp->v_hashchain == vp) { - *vp->v_hashchain = vp->v_specnext; - } else { - for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { - if (vq->v_specnext != vp) - continue; - vq->v_specnext = vp->v_specnext; - break; + SPECHASH_LOCK(); + if (*vp->v_hashchain == vp) { + *vp->v_hashchain = vp->v_specnext; + } else { + for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { + if (vq->v_specnext != vp) { + continue; } - if (vq == NULL) + vq->v_specnext = vp->v_specnext; + break; + } + if (vq == NULL) { panic("missing bdev"); } - if (vp->v_specflags & SI_ALIASED) { - vx = NULL; - for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { - if (vq->v_rdev != vp->v_rdev || - vq->v_type != vp->v_type) - continue; - if (vx) - break; - vx = vq; + } + if (vp->v_specflags & SI_ALIASED) { + vx = NULL; + for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { + if (vq->v_rdev != vp->v_rdev || + vq->v_type != vp->v_type) { + continue; } - if (vx == NULL) - panic("missing alias"); - if (vq == NULL) - vx->v_specflags &= ~SI_ALIASED; - vp->v_specflags &= ~SI_ALIASED; + if (vx) { + break; + } + vx = vq; } - SPECHASH_UNLOCK(); - { + if (vx == NULL) { + panic("missing alias"); + } + if (vq == NULL) { + vx->v_specflags &= ~SI_ALIASED; + } + vp->v_specflags &= ~SI_ALIASED; + } + SPECHASH_UNLOCK(); + { struct specinfo *tmp = vp->v_specinfo; vp->v_specinfo = NULL; - FREE_ZONE((void *)tmp, sizeof(struct specinfo), M_SPECINFO); - } + FREE_ZONE(tmp, sizeof(struct specinfo), M_SPECINFO); + } } } @@ -2500,31 +2579,35 @@ vgone(vnode_t vp, int flags) int check_mountedon(dev_t dev, enum vtype type, int *errorp) { - vnode_t vp; + vnode_t vp; int rc = 0; int vid; loop: SPECHASH_LOCK(); for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) { - if (dev != vp->v_rdev || type != vp->v_type) + if (dev != vp->v_rdev || type != vp->v_type) { continue; + } vid = vp->v_id; SPECHASH_UNLOCK(); - if (vnode_getwithvid(vp,vid)) + if (vnode_getwithvid(vp, vid)) { goto loop; + } vnode_lock_spin(vp); if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) { vnode_unlock(vp); - if ((*errorp = vfs_mountedon(vp)) != 0) + if ((*errorp = vfs_mountedon(vp)) != 0) { rc = 1; - } else + } + } else { vnode_unlock(vp); + } vnode_put(vp); - return(rc); + return rc; } SPECHASH_UNLOCK(); - return (0); + return 0; } /* @@ -2538,12 +2621,13 @@ vcount(vnode_t vp) int vid; if (!vnode_isspec(vp)) { - return (vp->v_usecount - vp->v_kusecount); + return vp->v_usecount - vp->v_kusecount; } loop: - if (!vnode_isaliased(vp)) - return (vp->v_specinfo->si_opencount); + if (!vnode_isaliased(vp)) { + return vp->v_specinfo->si_opencount; + } count = 0; SPECHASH_LOCK(); @@ -2565,7 +2649,7 @@ loop: vnode_lock(vq); if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) { - if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) { + if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) { /* * Alias, but not in use, so flush it out. */ @@ -2594,41 +2678,49 @@ loop: vq = vnext; } - return (count); + return count; } -int prtactive = 0; /* 1 => print out reclaim of active vnodes */ +int prtactive = 0; /* 1 => print out reclaim of active vnodes */ /* * Print out a description of a vnode. */ static const char *typename[] = - { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; +{ "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" }; void vprint(const char *label, struct vnode *vp) { char sbuf[64]; - if (label != NULL) + if (label != NULL) { printf("%s: ", label); + } printf("type %s, usecount %d, writecount %d", - typename[vp->v_type], vp->v_usecount, vp->v_writecount); + typename[vp->v_type], vp->v_usecount, vp->v_writecount); sbuf[0] = '\0'; - if (vp->v_flag & VROOT) + if (vp->v_flag & VROOT) { strlcat(sbuf, "|VROOT", sizeof(sbuf)); - if (vp->v_flag & VTEXT) + } + if (vp->v_flag & VTEXT) { strlcat(sbuf, "|VTEXT", sizeof(sbuf)); - if (vp->v_flag & VSYSTEM) + } + if (vp->v_flag & VSYSTEM) { strlcat(sbuf, "|VSYSTEM", sizeof(sbuf)); - if (vp->v_flag & VNOFLUSH) + } + if (vp->v_flag & VNOFLUSH) { strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf)); - if (vp->v_flag & VBWAIT) + } + if (vp->v_flag & VBWAIT) { strlcat(sbuf, "|VBWAIT", sizeof(sbuf)); - if (vnode_isaliased(vp)) + } + if (vnode_isaliased(vp)) { strlcat(sbuf, "|VALIASED", sizeof(sbuf)); - if (sbuf[0] != '\0') + } + if (sbuf[0] != '\0') { printf(" flags (%s)", &sbuf[1]); + } } @@ -2687,14 +2779,14 @@ vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash) } -static char *extension_table=NULL; +static char *extension_table = NULL; static int nexts; static int max_ext_width; static int extension_cmp(const void *a, const void *b) { - return (strlen((const char *)a) - strlen((const char *)b)); + return strlen((const char *)a) - strlen((const char *)b); } @@ -2715,142 +2807,145 @@ extern lck_mtx_t *pkg_extensions_lck; __private_extern__ int set_package_extensions_table(user_addr_t data, int nentries, int maxwidth) { - char *new_exts, *old_exts; - int error; - - if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) { - return EINVAL; - } - - - // allocate one byte extra so we can guarantee null termination - MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK); - if (new_exts == NULL) { - return ENOMEM; - } - - error = copyin(data, new_exts, nentries * maxwidth); - if (error) { - FREE(new_exts, M_TEMP); - return error; - } + char *new_exts, *old_exts; + int error; + + if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) { + return EINVAL; + } - new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block - qsort(new_exts, nentries, maxwidth, extension_cmp); + // allocate one byte extra so we can guarantee null termination + MALLOC(new_exts, char *, (nentries * maxwidth) + 1, M_TEMP, M_WAITOK); + if (new_exts == NULL) { + return ENOMEM; + } - lck_mtx_lock(pkg_extensions_lck); + error = copyin(data, new_exts, nentries * maxwidth); + if (error) { + FREE(new_exts, M_TEMP); + return error; + } - old_exts = extension_table; - extension_table = new_exts; - nexts = nentries; - max_ext_width = maxwidth; + new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block - lck_mtx_unlock(pkg_extensions_lck); + qsort(new_exts, nentries, maxwidth, extension_cmp); - if (old_exts) { - FREE(old_exts, M_TEMP); - } + lck_mtx_lock(pkg_extensions_lck); - return 0; + old_exts = extension_table; + extension_table = new_exts; + nexts = nentries; + max_ext_width = maxwidth; + + lck_mtx_unlock(pkg_extensions_lck); + + if (old_exts) { + FREE(old_exts, M_TEMP); + } + + return 0; } -int is_package_name(const char *name, int len) +int +is_package_name(const char *name, int len) { - int i, extlen; - const char *ptr, *name_ext; - - if (len <= 3) { - return 0; - } + int i, extlen; + const char *ptr, *name_ext; - name_ext = NULL; - for(ptr=name; *ptr != '\0'; ptr++) { - if (*ptr == '.') { - name_ext = ptr; + if (len <= 3) { + return 0; } - } - // if there is no "." extension, it can't match - if (name_ext == NULL) { - return 0; - } + name_ext = NULL; + for (ptr = name; *ptr != '\0'; ptr++) { + if (*ptr == '.') { + name_ext = ptr; + } + } - // advance over the "." - name_ext++; + // if there is no "." extension, it can't match + if (name_ext == NULL) { + return 0; + } - lck_mtx_lock(pkg_extensions_lck); + // advance over the "." + name_ext++; - // now iterate over all the extensions to see if any match - ptr = &extension_table[0]; - for(i=0; i < nexts; i++, ptr+=max_ext_width) { - extlen = strlen(ptr); - if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') { - // aha, a match! - lck_mtx_unlock(pkg_extensions_lck); - return 1; + lck_mtx_lock(pkg_extensions_lck); + + // now iterate over all the extensions to see if any match + ptr = &extension_table[0]; + for (i = 0; i < nexts; i++, ptr += max_ext_width) { + extlen = strlen(ptr); + if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') { + // aha, a match! + lck_mtx_unlock(pkg_extensions_lck); + return 1; + } } - } - lck_mtx_unlock(pkg_extensions_lck); + lck_mtx_unlock(pkg_extensions_lck); - // if we get here, no extension matched - return 0; + // if we get here, no extension matched + return 0; } int vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component) { - char *ptr, *end; - int comp=0; - - *component = -1; - if (*path != '/') { - return EINVAL; - } + char *ptr, *end; + int comp = 0; - end = path + 1; - while(end < path + pathlen && *end != '\0') { - while(end < path + pathlen && *end == '/' && *end != '\0') { - end++; + *component = -1; + if (*path != '/') { + return EINVAL; } - ptr = end; + end = path + 1; + while (end < path + pathlen && *end != '\0') { + while (end < path + pathlen && *end == '/' && *end != '\0') { + end++; + } - while(end < path + pathlen && *end != '/' && *end != '\0') { - end++; - } + ptr = end; - if (end > path + pathlen) { - // hmm, string wasn't null terminated - return EINVAL; - } + while (end < path + pathlen && *end != '/' && *end != '\0') { + end++; + } - *end = '\0'; - if (is_package_name(ptr, end - ptr)) { - *component = comp; - break; - } + if (end > path + pathlen) { + // hmm, string wasn't null terminated + return EINVAL; + } + + *end = '\0'; + if (is_package_name(ptr, end - ptr)) { + *component = comp; + break; + } - end++; - comp++; - } + end++; + comp++; + } - return 0; + return 0; } -/* +/* * Determine if a name is inappropriate for a searchfs query. * This list consists of /System currently. */ -int vn_searchfs_inappropriate_name(const char *name, int len) { +int +vn_searchfs_inappropriate_name(const char *name, int len) +{ const char *bad_names[] = { "System" }; int bad_len[] = { 6 }; int i; - for(i=0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) { + for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) { if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) { return 1; } @@ -2875,20 +2970,20 @@ extern unsigned int vfs_nummntops; * name[1]: VFS_NUMMNTOPS */ SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - &vfs_nummntops, 0, ""); + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &vfs_nummntops, 0, ""); int vfs_sysctl(int *name __unused, u_int namelen __unused, - user_addr_t oldp __unused, size_t *oldlenp __unused, - user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused); + user_addr_t oldp __unused, size_t *oldlenp __unused, + user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused); int vfs_sysctl(int *name __unused, u_int namelen __unused, - user_addr_t oldp __unused, size_t *oldlenp __unused, - user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused) + user_addr_t oldp __unused, size_t *oldlenp __unused, + user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused) { - return (EINVAL); + return EINVAL; } @@ -2920,34 +3015,34 @@ vfs_sysctl(int *name __unused, u_int namelen __unused, static int is_bad_sysctl_name(struct vfstable *vfsp, int selector_name) { - switch(selector_name) { - case VFS_CTL_QUERY: - case VFS_CTL_TIMEO: - case VFS_CTL_NOLOCKS: - case VFS_CTL_NSTATUS: - case VFS_CTL_SADDR: - case VFS_CTL_DISC: - case VFS_CTL_SERVERINFO: - return 1; + switch (selector_name) { + case VFS_CTL_QUERY: + case VFS_CTL_TIMEO: + case VFS_CTL_NOLOCKS: + case VFS_CTL_NSTATUS: + case VFS_CTL_SADDR: + case VFS_CTL_DISC: + case VFS_CTL_SERVERINFO: + return 1; - default: - break; + default: + break; } // the more complicated check for some of SMB's special values if (strcmp(vfsp->vfc_name, "smbfs") == 0) { - switch(selector_name) { - case SMBFS_SYSCTL_REMOUNT: - case SMBFS_SYSCTL_REMOUNT_INFO: - case SMBFS_SYSCTL_GET_SERVER_SHARE: - return 1; + switch (selector_name) { + case SMBFS_SYSCTL_REMOUNT: + case SMBFS_SYSCTL_REMOUNT_INFO: + case SMBFS_SYSCTL_GET_SERVER_SHARE: + return 1; } } else if (strcmp(vfsp->vfc_name, "afpfs") == 0) { - switch(selector_name) { - case AFPFS_VFS_CTL_GETID: - case AFPFS_VFS_CTL_NETCHANGE: - case AFPFS_VFS_CTL_VOLCHANGE: - return 1; + switch (selector_name) { + case AFPFS_VFS_CTL_GETID: + case AFPFS_VFS_CTL_NETCHANGE: + case AFPFS_VFS_CTL_VOLCHANGE: + return 1; } } @@ -2964,25 +3059,26 @@ int vfs_sysctl_node SYSCTL_HANDLER_ARGS struct vfstable *vfsp; int error; int fstypenum; - + fstypenum = oidp->oid_number; name = arg1; namelen = arg2; /* all sysctl names at this level should have at least one name slot for the FS */ - if (namelen < 1) - return (EISDIR); /* overloaded */ - + if (namelen < 1) { + return EISDIR; /* overloaded */ + } mount_list_lock(); - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { if (vfsp->vfc_typenum == fstypenum) { vfsp->vfc_refcount++; break; } + } mount_list_unlock(); - + if (vfsp == NULL) { - return (ENOTSUP); + return ENOTSUP; } if (is_bad_sysctl_name(vfsp, name[0])) { @@ -3016,8 +3112,9 @@ vfs_mountedon(struct vnode *vp) if (vp->v_specflags & SI_ALIASED) { for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) { if (vq->v_rdev != vp->v_rdev || - vq->v_type != vp->v_type) + vq->v_type != vp->v_type) { continue; + } if (vq->v_specflags & SI_MOUNTEDON) { error = EBUSY; break; @@ -3026,12 +3123,12 @@ vfs_mountedon(struct vnode *vp) } out: SPECHASH_UNLOCK(); - return (error); + return error; } struct unmount_info { - int u_errs; // Total failed unmounts - int u_busy; // EBUSY failed unmounts + int u_errs; // Total failed unmounts + int u_busy; // EBUSY failed unmounts }; static int @@ -3042,23 +3139,26 @@ unmount_callback(mount_t mp, void *arg) struct unmount_info *uip = arg; mount_ref(mp, 0); - mount_iterdrop(mp); // avoid vfs_iterate deadlock in dounmount() + mount_iterdrop(mp); // avoid vfs_iterate deadlock in dounmount() MALLOC_ZONE(mntname, void *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (mntname) + if (mntname) { strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN); + } error = dounmount(mp, MNT_FORCE, 1, vfs_context_current()); if (error) { uip->u_errs++; printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error); - if (error == EBUSY) + if (error == EBUSY) { uip->u_busy++; + } } - if (mntname) + if (mntname) { FREE_ZONE(mntname, MAXPATHLEN, M_NAMEI); + } - return (VFS_RETURNED); + return VFS_RETURNED; } /* @@ -3076,22 +3176,24 @@ retry: ui.u_errs = ui.u_busy = 0; vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui); mounts = mount_getvfscnt(); - if (mounts == 0) + if (mounts == 0) { return; + } - if (ui.u_busy > 0) { // Busy mounts - wait & retry + if (ui.u_busy > 0) { // Busy mounts - wait & retry tsleep(&nummounts, PVFS, "busy mount", sec * hz); sec *= 2; - if (sec <= 32) + if (sec <= 32) { goto retry; + } printf("Unmounting timed out\n"); - } else if (ui.u_errs < mounts) { + } else if (ui.u_errs < mounts) { // If the vfs_iterate missed mounts in progress - wait a bit tsleep(&nummounts, PVFS, "missed mount", 2 * hz); } } -/* +/* * This routine is called from vnode_pager_deallocate out of the VM * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named * on a vnode that has a UBCINFO @@ -3099,7 +3201,7 @@ retry: __private_extern__ void vnode_pager_vrele(vnode_t vp) { - struct ubc_info *uip; + struct ubc_info *uip; vnode_lock_spin(vp); @@ -3143,16 +3245,16 @@ extern int iosched_enabled; errno_t vfs_init_io_attributes(vnode_t devvp, mount_t mp) { - int error; - off_t readblockcnt = 0; - off_t writeblockcnt = 0; - off_t readmaxcnt = 0; - off_t writemaxcnt = 0; - off_t readsegcnt = 0; - off_t writesegcnt = 0; - off_t readsegsize = 0; - off_t writesegsize = 0; - off_t alignment = 0; + int error; + off_t readblockcnt = 0; + off_t writeblockcnt = 0; + off_t readmaxcnt = 0; + off_t writemaxcnt = 0; + off_t readsegcnt = 0; + off_t writesegcnt = 0; + off_t readsegsize = 0; + off_t writesegsize = 0; + off_t alignment = 0; u_int32_t minsaturationbytecount = 0; u_int32_t ioqueue_depth = 0; u_int32_t blksize; @@ -3172,8 +3274,9 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) */ mp->mnt_devbsdunit = num_trailing_0(mp->mnt_throttle_mask); - if (devvp == rootvp) + if (devvp == rootvp) { rootunit = mp->mnt_devbsdunit; + } if (mp->mnt_devbsdunit == rootunit) { /* @@ -3192,8 +3295,9 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, - (caddr_t)&blksize, 0, ctx))) - return (error); + (caddr_t)&blksize, 0, ctx))) { + return error; + } mp->mnt_devblocksize = blksize; @@ -3208,154 +3312,182 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES; if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) { - if (isvirtual) - mp->mnt_kern_flag |= MNTK_VIRTUALDEV; + if (isvirtual) { + mp->mnt_kern_flag |= MNTK_VIRTUALDEV; + } } if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) { - if (isssd) - mp->mnt_kern_flag |= MNTK_SSD; + if (isssd) { + mp->mnt_kern_flag |= MNTK_SSD; + } } if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES, - (caddr_t)&features, 0, ctx))) - return (error); + (caddr_t)&features, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD, - (caddr_t)&readblockcnt, 0, ctx))) - return (error); + (caddr_t)&readblockcnt, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE, - (caddr_t)&writeblockcnt, 0, ctx))) - return (error); + (caddr_t)&writeblockcnt, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD, - (caddr_t)&readmaxcnt, 0, ctx))) - return (error); + (caddr_t)&readmaxcnt, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE, - (caddr_t)&writemaxcnt, 0, ctx))) - return (error); + (caddr_t)&writemaxcnt, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD, - (caddr_t)&readsegcnt, 0, ctx))) - return (error); + (caddr_t)&readsegcnt, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE, - (caddr_t)&writesegcnt, 0, ctx))) - return (error); + (caddr_t)&writesegcnt, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD, - (caddr_t)&readsegsize, 0, ctx))) - return (error); + (caddr_t)&readsegsize, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, - (caddr_t)&writesegsize, 0, ctx))) - return (error); + (caddr_t)&writesegsize, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT, - (caddr_t)&alignment, 0, ctx))) - return (error); + (caddr_t)&alignment, 0, ctx))) { + return error; + } if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE, - (caddr_t)&ioqueue_depth, 0, ctx))) - return (error); + (caddr_t)&ioqueue_depth, 0, ctx))) { + return error; + } - if (readmaxcnt) + if (readmaxcnt) { mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX : readmaxcnt; + } if (readblockcnt) { temp = readblockcnt * blksize; temp = (temp > UINT32_MAX) ? UINT32_MAX : temp; - if (temp < mp->mnt_maxreadcnt) + if (temp < mp->mnt_maxreadcnt) { mp->mnt_maxreadcnt = (u_int32_t)temp; + } } - if (writemaxcnt) + if (writemaxcnt) { mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : writemaxcnt; + } if (writeblockcnt) { temp = writeblockcnt * blksize; temp = (temp > UINT32_MAX) ? UINT32_MAX : temp; - if (temp < mp->mnt_maxwritecnt) + if (temp < mp->mnt_maxwritecnt) { mp->mnt_maxwritecnt = (u_int32_t)temp; + } } if (readsegcnt) { - temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt; + temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt; } else { temp = mp->mnt_maxreadcnt / PAGE_SIZE; - if (temp > UINT16_MAX) + if (temp > UINT16_MAX) { temp = UINT16_MAX; + } } mp->mnt_segreadcnt = (u_int16_t)temp; if (writesegcnt) { - temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt; + temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt; } else { temp = mp->mnt_maxwritecnt / PAGE_SIZE; - if (temp > UINT16_MAX) + if (temp > UINT16_MAX) { temp = UINT16_MAX; + } } mp->mnt_segwritecnt = (u_int16_t)temp; - if (readsegsize) - temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize; - else - temp = mp->mnt_maxreadcnt; + if (readsegsize) { + temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize; + } else { + temp = mp->mnt_maxreadcnt; + } mp->mnt_maxsegreadsize = (u_int32_t)temp; - if (writesegsize) - temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize; - else - temp = mp->mnt_maxwritecnt; + if (writesegsize) { + temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize; + } else { + temp = mp->mnt_maxwritecnt; + } mp->mnt_maxsegwritesize = (u_int32_t)temp; - if (alignment) - temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1; - else - temp = 0; + if (alignment) { + temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1; + } else { + temp = 0; + } mp->mnt_alignmentmask = temp; - if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) + if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) { temp = ioqueue_depth; - else + } else { temp = MNT_DEFAULT_IOQUEUE_DEPTH; + } mp->mnt_ioqueue_depth = temp; mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth); - if (mp->mnt_ioscale > 1) + if (mp->mnt_ioscale > 1) { printf("ioqueue_depth = %d, ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale); + } + + if (features & DK_FEATURE_FORCE_UNIT_ACCESS) { + mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED; + } - if (features & DK_FEATURE_FORCE_UNIT_ACCESS) - mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED; - if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) { mp->mnt_minsaturationbytecount = minsaturationbytecount; } else { mp->mnt_minsaturationbytecount = 0; } - if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) + if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) { cs_present = TRUE; + } if (features & DK_FEATURE_UNMAP) { mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED; - if (cs_present == TRUE) + if (cs_present == TRUE) { mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED; + } } if (cs_present == TRUE) { /* * for now we'll use the following test as a proxy for * the underlying drive being FUSION in nature */ - if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) + if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) { mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE; + } } else { /* Check for APFS Fusion */ dk_apfs_flavour_t flavour; @@ -3366,12 +3498,12 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp) } #if CONFIG_IOSCHED - if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) { - mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED; + if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) { + mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED; throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0); } #endif /* CONFIG_IOSCHED */ - return (error); + return error; } static struct klist fs_klist; @@ -3381,7 +3513,6 @@ lck_mtx_t *fs_klist_lock; void vfs_event_init(void) { - klist_init(&fs_klist); fs_klist_lck_grp = lck_grp_alloc_init("fs_klist", NULL); fs_klist_lock = lck_mtx_alloc_init(fs_klist_lck_grp, NULL); @@ -3394,10 +3525,11 @@ vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data) struct mount *mp = vfs_getvfs(fsid); if (mp) { mount_lock_spin(mp); - if (data) - mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding - else - mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding + if (data) { + mp->mnt_kern_flag &= ~MNT_LNOTRESP; // Now responding + } else { + mp->mnt_kern_flag |= MNT_LNOTRESP; // Not responding + } mount_unlock(mp); } } @@ -3413,7 +3545,7 @@ vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data) static int sysctl_vfs_getvfscnt(void) { - return(mount_getvfscnt()); + return mount_getvfscnt(); } @@ -3425,8 +3557,7 @@ mount_getvfscnt(void) mount_list_lock(); ret = nummounts; mount_list_unlock(); - return (ret); - + return ret; } @@ -3435,7 +3566,7 @@ static int mount_fillfsids(fsid_t *fsidlst, int count) { struct mount *mp; - int actual=0; + int actual = 0; actual = 0; mount_list_lock(); @@ -3446,8 +3577,7 @@ mount_fillfsids(fsid_t *fsidlst, int count) } } mount_list_unlock(); - return (actual); - + return actual; } /* @@ -3466,24 +3596,26 @@ sysctl_vfs_getvfslist(fsid_t *fsidlst, int count, int *actual) mount_list_lock(); TAILQ_FOREACH(mp, &mountlist, mnt_list) { (*actual)++; - if (*actual <= count) + if (*actual <= count) { fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid; + } } mount_list_unlock(); - return (*actual <= count ? 0 : ENOMEM); + return *actual <= count ? 0 : ENOMEM; } static int sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1, - __unused int arg2, struct sysctl_req *req) + __unused int arg2, struct sysctl_req *req) { int actual, error; size_t space; fsid_t *fsidlst; /* This is a readonly node. */ - if (req->newptr != USER_ADDR_NULL) - return (EPERM); + if (req->newptr != USER_ADDR_NULL) { + return EPERM; + } /* they are querying us so just return the space required. */ if (req->oldptr == USER_ADDR_NULL) { @@ -3499,12 +3631,13 @@ again: req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t); /* they didn't give us enough space. */ - if (space < req->oldlen) - return (ENOMEM); + if (space < req->oldlen) { + return ENOMEM; + } MALLOC(fsidlst, fsid_t *, req->oldlen, M_TEMP, M_WAITOK | M_ZERO); if (fsidlst == NULL) { - return (ENOMEM); + return ENOMEM; } error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t), @@ -3522,7 +3655,7 @@ again: error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t)); } FREE(fsidlst, M_TEMP); - return (error); + return error; } /* @@ -3530,15 +3663,15 @@ again: */ static int sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, - struct sysctl_req *req) + struct sysctl_req *req) { union union_vfsidctl vc; struct mount *mp; struct vfsstatfs *sp; int *name, flags, namelen; - int error=0, gotref=0; + int error = 0, gotref = 0; vfs_context_t ctx = vfs_context_current(); - proc_t p = req->p; /* XXX req->p != current_proc()? */ + proc_t p = req->p; /* XXX req->p != current_proc()? */ boolean_t is_64_bit; name = arg1; @@ -3546,8 +3679,9 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, is_64_bit = proc_is64bit(p); error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32)); - if (error) + if (error) { goto out; + } if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */ error = EINVAL; goto out; @@ -3570,17 +3704,15 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, if (vfs_64bitready(mp)) { error = mp->mnt_op->vfs_sysctl(name, namelen, CAST_USER_ADDR_T(req), - NULL, USER_ADDR_NULL, 0, + NULL, USER_ADDR_NULL, 0, ctx); - } - else { + } else { error = ENOTSUP; } - } - else { + } else { error = mp->mnt_op->vfs_sysctl(name, namelen, CAST_USER_ADDR_T(req), - NULL, USER_ADDR_NULL, 0, + NULL, USER_ADDR_NULL, 0, ctx); } if (error != ENOTSUP) { @@ -3593,14 +3725,14 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, if (is_64_bit) { req->newptr = vc.vc64.vc_ptr; req->newlen = (size_t)vc.vc64.vc_len; - } - else { + } else { req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr); req->newlen = vc.vc32.vc_len; } error = SYSCTL_IN(req, &flags, sizeof(flags)); - if (error) + if (error) { break; + } mount_ref(mp, 0); mount_iterdrop(mp); @@ -3613,18 +3745,19 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, if (is_64_bit) { req->newptr = vc.vc64.vc_ptr; req->newlen = (size_t)vc.vc64.vc_len; - } - else { + } else { req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr); req->newlen = vc.vc32.vc_len; } error = SYSCTL_IN(req, &flags, sizeof(flags)); - if (error) + if (error) { break; + } sp = &mp->mnt_vfsstat; if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) && - (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) + (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) { goto out; + } if (is_64_bit) { struct user64_statfs sfs; bzero(&sfs, sizeof(sfs)); @@ -3649,10 +3782,9 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN); - + error = SYSCTL_OUT(req, &sfs, sizeof(sfs)); - } - else { + } else { struct user32_statfs sfs; bzero(&sfs, sizeof(sfs)); sfs.f_flags = mp->mnt_flag & MNT_VISFLAGMASK; @@ -3664,7 +3796,7 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, * to reflect the filesystem size as best we can. */ if (sp->f_blocks > INT_MAX) { - int shift; + int shift; /* * Work out how far we have to shift the block count down to make it fit. @@ -3676,12 +3808,14 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, * being smaller than f_bsize. */ for (shift = 0; shift < 32; shift++) { - if ((sp->f_blocks >> shift) <= INT_MAX) + if ((sp->f_blocks >> shift) <= INT_MAX) { break; - if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) + } + if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) { break; + } } -#define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s))) +#define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s))) sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift); sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift); sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift); @@ -3710,7 +3844,7 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, } strlcpy(sfs.f_mntonname, sp->f_mntonname, MNAMELEN); strlcpy(sfs.f_mntfromname, sp->f_mntfromname, MNAMELEN); - + error = SYSCTL_OUT(req, &sfs, sizeof(sfs)); } break; @@ -3719,16 +3853,17 @@ sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2, goto out; } out: - if(gotref != 0) + if (gotref != 0) { mount_iterdrop(mp); - return (error); + } + return error; } -static int filt_fsattach(struct knote *kn, struct kevent_internal_s *kev); -static void filt_fsdetach(struct knote *kn); -static int filt_fsevent(struct knote *kn, long hint); -static int filt_fstouch(struct knote *kn, struct kevent_internal_s *kev); -static int filt_fsprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); +static int filt_fsattach(struct knote *kn, struct kevent_internal_s *kev); +static void filt_fsdetach(struct knote *kn); +static int filt_fsevent(struct knote *kn, long hint); +static int filt_fstouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_fsprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = { .f_attach = filt_fsattach, .f_detach = filt_fsdetach, @@ -3744,11 +3879,11 @@ filt_fsattach(struct knote *kn, __unused struct kevent_internal_s *kev) KNOTE_ATTACH(&fs_klist, kn); lck_mtx_unlock(fs_klist_lock); - /* - * filter only sees future events, + /* + * filter only sees future events, * so it can't be fired already. */ - return (0); + return 0; } static void @@ -3771,7 +3906,7 @@ filt_fsevent(struct knote *kn, long hint) kn->kn_fflags |= hint; } - return (kn->kn_fflags != 0); + return kn->kn_fflags != 0; } static int @@ -3816,27 +3951,30 @@ filt_fsprocess(struct knote *kn, struct filt_process_s *data, struct kevent_inte } lck_mtx_unlock(fs_klist_lock); return res; -} +} static int sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp, - __unused void *arg1, __unused int arg2, struct sysctl_req *req) + __unused void *arg1, __unused int arg2, struct sysctl_req *req) { int out, error; pid_t pid; proc_t p; /* We need a pid. */ - if (req->newptr == USER_ADDR_NULL) - return (EINVAL); + if (req->newptr == USER_ADDR_NULL) { + return EINVAL; + } error = SYSCTL_IN(req, &pid, sizeof(pid)); - if (error) - return (error); + if (error) { + return error; + } p = proc_find(pid < 0 ? -pid : pid); - if (p == NULL) - return (ESRCH); + if (p == NULL) { + return ESRCH; + } /* * Fetching the value is ok, but we only fetch if the old @@ -3846,22 +3984,23 @@ sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp, out = !((p->p_flag & P_NOREMOTEHANG) == 0); proc_rele(p); error = SYSCTL_OUT(req, &out, sizeof(out)); - return (error); + return error; } /* cansignal offers us enough security. */ if (p != req->p && proc_suser(req->p) != 0) { proc_rele(p); - return (EPERM); + return EPERM; } - if (pid < 0) + if (pid < 0) { OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag); - else + } else { OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag); + } proc_rele(p); - return (0); + return 0; } static int @@ -3870,27 +4009,29 @@ sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS int *name, namelen; struct vfstable *vfsp; struct vfsconf vfsc = {}; - + (void)oidp; name = arg1; namelen = arg2; - + if (namelen < 1) { - return (EISDIR); + return EISDIR; } else if (namelen > 1) { - return (ENOTDIR); + return ENOTDIR; } - + mount_list_lock(); - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) - if (vfsp->vfc_typenum == name[0]) + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { + if (vfsp->vfc_typenum == name[0]) { break; - + } + } + if (vfsp == NULL) { mount_list_unlock(); - return (ENOTSUP); + return ENOTSUP; } - + vfsc.vfc_reserved1 = 0; bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); vfsc.vfc_typenum = vfsp->vfc_typenum; @@ -3898,9 +4039,9 @@ sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS vfsc.vfc_flags = vfsp->vfc_flags; vfsc.vfc_reserved2 = 0; vfsc.vfc_reserved3 = 0; - + mount_list_unlock(); - return (SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf))); + return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf)); } /* the vfs.generic. branch. */ @@ -3915,12 +4056,12 @@ SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED, SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY, NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang"); SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum, - CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, - &maxvfstypenum, 0, ""); + CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, + &maxvfstypenum, 0, ""); SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, ""); SYSCTL_NODE(_vfs_generic, VFS_CONF, conf, - CTLFLAG_RD | CTLFLAG_LOCKED, - sysctl_vfs_generic_conf, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + sysctl_vfs_generic_conf, ""); /* Indicate that the root file system unmounted cleanly */ static int vfs_root_unmounted_cleanly = 0; @@ -3988,12 +4129,12 @@ process_vp(vnode_t vp, int want_vp, int *deferred) vnode_lock_spin(vp); - /* + /* * We could wait for the vnode_lock after removing the vp from the freelist * and the vid is bumped only at the very end of reclaim. So it is possible * that we are looking at a vnode that is being terminated. If so skip it. - */ - if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || + */ + if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) { /* * we lost the race between dropping the list lock @@ -4001,11 +4142,11 @@ process_vp(vnode_t vp, int want_vp, int *deferred) * used this vnode and it is now in a new state */ vnode_unlock(vp); - - return (NULLVP); + + return NULLVP; } - if ( (vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE ) { - /* + if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) { + /* * we did a vnode_rele_ext that asked for * us not to reenter the filesystem during * the release even though VL_NEEDINACTIVE was @@ -4014,55 +4155,60 @@ process_vp(vnode_t vp, int want_vp, int *deferred) * * pick up an iocount so that we can call * vnode_put and drive the VNOP_INACTIVE... - * vnode_put will either leave us off + * vnode_put will either leave us off * the freelist if a new ref comes in, * or put us back on the end of the freelist * or recycle us if we were marked for termination... * so we'll just go grab a new candidate */ - vp->v_iocount++; + vp->v_iocount++; #ifdef JOE_DEBUG record_vp(vp, 1); #endif vnode_put_locked(vp); vnode_unlock(vp); - return (NULLVP); + return NULLVP; } /* * Checks for anyone racing us for recycle - */ + */ if (vp->v_type != VBAD) { if (want_vp && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) { vnode_async_list_add(vp); vnode_unlock(vp); - + *deferred = 1; - return (NULLVP); + return NULLVP; } - if (vp->v_lflag & VL_DEAD) + if (vp->v_lflag & VL_DEAD) { panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp); + } vnode_lock_convert(vp); (void)vnode_reclaim_internal(vp, 1, want_vp, 0); if (want_vp) { - if ((VONLIST(vp))) + if ((VONLIST(vp))) { panic("new_vnode(%p): vp on list", vp); + } if (vp->v_usecount || vp->v_iocount || vp->v_kusecount || - (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) + (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) { panic("new_vnode(%p): free vnode still referenced", vp); - if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) + } + if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) { panic("new_vnode(%p): vnode seems to be on mount list", vp); - if ( !LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) + } + if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) { panic("new_vnode(%p): vnode still hooked into the name cache", vp); + } } else { vnode_unlock(vp); vp = NULLVP; } } - return (vp); + return vp; } __attribute__((noreturn)) @@ -4070,20 +4216,19 @@ static void async_work_continue(void) { struct async_work_lst *q; - int deferred; - vnode_t vp; + int deferred; + vnode_t vp; q = &vnode_async_work_list; for (;;) { - vnode_list_lock(); - if ( TAILQ_EMPTY(q) ) { + if (TAILQ_EMPTY(q)) { assert_wait(q, (THREAD_UNINT)); - + vnode_list_unlock(); - + thread_block((thread_continue_t)async_work_continue); continue; @@ -4094,8 +4239,9 @@ async_work_continue(void) vp = process_vp(vp, 0, &deferred); - if (vp != NULLVP) + if (vp != NULLVP) { panic("found VBAD vp (%p) on async queue", vp); + } } } @@ -4103,13 +4249,13 @@ async_work_continue(void) static int new_vnode(vnode_t *vpp) { - vnode_t vp; - uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */ + vnode_t vp; + uint32_t retries = 0, max_retries = 100; /* retry incase of tablefull */ int force_alloc = 0, walk_count = 0; boolean_t need_reliable_vp = FALSE; int deferred; - struct timeval initial_tv; - struct timeval current_tv; + struct timeval initial_tv; + struct timeval current_tv; proc_t curproc = current_proc(); initial_tv.tv_sec = 0; @@ -4118,13 +4264,14 @@ retry: vnode_list_lock(); - if (need_reliable_vp == TRUE) + if (need_reliable_vp == TRUE) { async_work_timed_out++; + } if ((numvnodes - deadvnodes) < desiredvnodes || force_alloc) { struct timespec ts; - if ( !TAILQ_EMPTY(&vnode_dead_list)) { + if (!TAILQ_EMPTY(&vnode_dead_list)) { /* * Can always reuse a dead one */ @@ -4140,7 +4287,7 @@ retry: MALLOC_ZONE(vp, struct vnode *, sizeof(*vp), M_VNODE, M_WAITOK); bzero((char *)vp, sizeof(*vp)); - VLISTNONE(vp); /* avoid double queue removal */ + VLISTNONE(vp); /* avoid double queue removal */ lck_mtx_init(&vp->v_lock, vnode_lck_grp, vnode_lck_attr); TAILQ_INIT(&vp->v_ncchildren); @@ -4151,8 +4298,9 @@ retry: vp->v_flag = VSTANDARD; #if CONFIG_MACF - if (mac_vnode_label_init_needed(vp)) + if (mac_vnode_label_init_needed(vp)) { mac_vnode_label_init(vp); + } #endif /* MAC */ vp->v_iocount = 1; @@ -4162,33 +4310,34 @@ retry: #define MAX_WALK_COUNT 1000 - if ( !TAILQ_EMPTY(&vnode_rage_list) && - (ragevnodes >= rage_limit || - (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) { - + if (!TAILQ_EMPTY(&vnode_rage_list) && + (ragevnodes >= rage_limit || + (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) { TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) { - if ( !(vp->v_listflag & VLIST_RAGE)) + if (!(vp->v_listflag & VLIST_RAGE)) { panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp); + } // if we're a dependency-capable process, skip vnodes that can // cause recycling deadlocks. (i.e. this process is diskimages // helper and the vnode is in a disk image). Querying the // mnt_kern_flag for the mount's virtual device status // is safer than checking the mnt_dependent_process, which - // may not be updated if there are multiple devnode layers + // may not be updated if there are multiple devnode layers // in between the disk image and the final consumer. - if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || + if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) { /* * if need_reliable_vp == TRUE, then we've already sent one or more * non-reliable vnodes to the async thread for processing and timed * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT - * mechanism to first scan for a reliable vnode before forcing + * mechanism to first scan for a reliable vnode before forcing * a new vnode to be created */ - if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) + if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) { break; + } } // don't iterate more than MAX_WALK_COUNT vnodes to @@ -4202,31 +4351,31 @@ retry: } if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) { - /* + /* * Pick the first vp for possible reuse */ walk_count = 0; TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) { - // if we're a dependency-capable process, skip vnodes that can // cause recycling deadlocks. (i.e. this process is diskimages // helper and the vnode is in a disk image). Querying the // mnt_kern_flag for the mount's virtual device status // is safer than checking the mnt_dependent_process, which - // may not be updated if there are multiple devnode layers + // may not be updated if there are multiple devnode layers // in between the disk image and the final consumer. - if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || + if ((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL || (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) { /* * if need_reliable_vp == TRUE, then we've already sent one or more * non-reliable vnodes to the async thread for processing and timed * out waiting for a dead vnode to show up. Use the MAX_WALK_COUNT - * mechanism to first scan for a reliable vnode before forcing + * mechanism to first scan for a reliable vnode before forcing * a new vnode to be created */ - if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) + if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) { break; + } } // don't iterate more than MAX_WALK_COUNT vnodes to @@ -4266,17 +4415,18 @@ retry: delay_for_interval(1, 1000 * 1000); goto retry; } - + vnode_list_unlock(); tablefull("vnode"); log(LOG_EMERG, "%d desired, %d numvnodes, " - "%d free, %d dead, %d async, %d rage\n", - desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes); + "%d free, %d dead, %d async, %d rage\n", + desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes); #if CONFIG_JETSAM #if DEVELOPMENT || DEBUG - if (bootarg_no_vnode_jetsam) + if (bootarg_no_vnode_jetsam) { panic("vnode table is full\n"); + } #endif /* DEVELOPMENT || DEBUG */ /* @@ -4292,27 +4442,28 @@ retry: panic("vnode table is full\n"); } - /* - * Now that we've killed someone, wait a bit and continue looking + /* + * Now that we've killed someone, wait a bit and continue looking * (with fewer retries before trying another kill). */ delay_for_interval(3, 1000 * 1000); - retries = 0; + retries = 0; max_retries = 10; goto retry; #endif *vpp = NULL; - return (ENFILE); + return ENFILE; } steal_this_vp: if ((vp = process_vp(vp, 1, &deferred)) == NULLVP) { if (deferred) { - int elapsed_msecs; + int elapsed_msecs; struct timeval elapsed_tv; - if (initial_tv.tv_sec == 0) + if (initial_tv.tv_sec == 0) { microuptime(&initial_tv); + } vnode_list_lock(); @@ -4334,13 +4485,13 @@ steal_this_vp: thread_block(THREAD_CONTINUE_NULL); microuptime(&elapsed_tv); - + timevalsub(&elapsed_tv, &initial_tv); elapsed_msecs = elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000; if (elapsed_msecs >= 100) { /* - * we've waited long enough... 100ms is + * we've waited long enough... 100ms is * somewhat arbitrary for this case, but the * normal worst case latency used for UI * interaction is 100ms, so I've chosen to @@ -4365,13 +4516,13 @@ steal_this_vp: * We should never see VL_LABELWAIT or VL_LABEL here. * as those operations hold a reference. */ - assert ((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT); - assert ((vp->v_lflag & VL_LABEL) != VL_LABEL); + assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT); + assert((vp->v_lflag & VL_LABEL) != VL_LABEL); if (vp->v_lflag & VL_LABELED) { - vnode_lock_convert(vp); + vnode_lock_convert(vp); mac_vnode_label_recycle(vp); } else if (mac_vnode_label_init_needed(vp)) { - vnode_lock_convert(vp); + vnode_lock_convert(vp); mac_vnode_label_init(vp); } @@ -4380,7 +4531,7 @@ steal_this_vp: vp->v_iocount = 1; vp->v_lflag = 0; vp->v_writecount = 0; - vp->v_references = 0; + vp->v_references = 0; vp->v_iterblkflags = 0; vp->v_flag = VSTANDARD; /* vbad vnodes can point to dead_mountp */ @@ -4392,7 +4543,7 @@ steal_this_vp: done: *vpp = vp; - return (0); + return 0; } void @@ -4418,13 +4569,13 @@ vnode_unlock(vnode_t vp) int vnode_get(struct vnode *vp) { - int retval; + int retval; - vnode_lock_spin(vp); + vnode_lock_spin(vp); retval = vnode_get_locked(vp); vnode_unlock(vp); - return(retval); + return retval; } int @@ -4434,13 +4585,17 @@ vnode_get_locked(struct vnode *vp) lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); #endif if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) { - return(ENOENT); + return ENOENT; } - vp->v_iocount++; + + if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) { + panic("v_iocount overflow"); + } + #ifdef JOE_DEBUG record_vp(vp, 1); #endif - return (0); + return 0; } /* @@ -4452,7 +4607,7 @@ vnode_get_locked(struct vnode *vp) int vnode_getwithvid(vnode_t vp, uint32_t vid) { - return(vget_internal(vp, vid, ( VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO ))); + return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO)); } /* @@ -4463,32 +4618,32 @@ vnode_getwithvid(vnode_t vp, uint32_t vid) int vnode_getwithvid_drainok(vnode_t vp, uint32_t vid) { - return(vget_internal(vp, vid, ( VNODE_NODEAD | VNODE_WITHID ))); + return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID)); } int vnode_getwithref(vnode_t vp) { - return(vget_internal(vp, 0, 0)); + return vget_internal(vp, 0, 0); } __private_extern__ int vnode_getalways(vnode_t vp) { - return(vget_internal(vp, 0, VNODE_ALWAYS)); + return vget_internal(vp, 0, VNODE_ALWAYS); } int vnode_put(vnode_t vp) { - int retval; + int retval; vnode_lock_spin(vp); retval = vnode_put_locked(vp); vnode_unlock(vp); - return(retval); + return retval; } static inline void @@ -4505,23 +4660,23 @@ vn_set_dead(vnode_t vp) int vnode_put_locked(vnode_t vp) { - vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */ + vfs_context_t ctx = vfs_context_current(); /* hoist outside loop */ #if DIAGNOSTIC lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED); #endif retry: - if (vp->v_iocount < 1) + if (vp->v_iocount < 1) { panic("vnode_put(%p): iocount < 1", vp); + } - if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) { + if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) { vnode_dropiocount(vp); - return(0); + return 0; } if ((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE) { - - vp->v_lflag &= ~VL_NEEDINACTIVE; - vnode_unlock(vp); + vp->v_lflag &= ~VL_NEEDINACTIVE; + vnode_unlock(vp); VNOP_INACTIVE(vp, ctx); @@ -4537,71 +4692,74 @@ retry: */ goto retry; } - vp->v_lflag &= ~VL_NEEDINACTIVE; + vp->v_lflag &= ~VL_NEEDINACTIVE; if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) { - vnode_lock_convert(vp); - vnode_reclaim_internal(vp, 1, 1, 0); + vnode_lock_convert(vp); + vnode_reclaim_internal(vp, 1, 1, 0); } vnode_dropiocount(vp); vnode_list_add(vp); - return(0); + return 0; } /* is vnode_t in use by others? */ -int +int vnode_isinuse(vnode_t vp, int refcnt) { - return(vnode_isinuse_locked(vp, refcnt, 0)); + return vnode_isinuse_locked(vp, refcnt, 0); } -int vnode_usecount(vnode_t vp) +int +vnode_usecount(vnode_t vp) { return vp->v_usecount; } -int vnode_iocount(vnode_t vp) +int +vnode_iocount(vnode_t vp) { return vp->v_iocount; } -static int +static int vnode_isinuse_locked(vnode_t vp, int refcnt, int locked) { int retval = 0; - if (!locked) + if (!locked) { vnode_lock_spin(vp); - if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) { + } + if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) { retval = 1; goto out; } - if (vp->v_type == VREG) { + if (vp->v_type == VREG) { retval = ubc_isinuse_locked(vp, refcnt, 1); } - + out: - if (!locked) + if (!locked) { vnode_unlock(vp); - return(retval); + } + return retval; } /* resume vnode_t */ -errno_t +errno_t vnode_resume(vnode_t vp) { if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) { - vnode_lock_spin(vp); - vp->v_lflag &= ~VL_SUSPENDED; + vp->v_lflag &= ~VL_SUSPENDED; vp->v_owner = NULL; vnode_unlock(vp); wakeup(&vp->v_iocount); } - return(0); + return 0; } /* suspend vnode_t @@ -4614,13 +4772,13 @@ errno_t vnode_suspend(vnode_t vp) { if (vp->v_lflag & VL_SUSPENDED) { - return(EBUSY); + return EBUSY; } vnode_lock_spin(vp); - /* - * xxx is this sufficient to check if a vnode_drain is + /* + * xxx is this sufficient to check if a vnode_drain is * progress? */ @@ -4630,9 +4788,9 @@ vnode_suspend(vnode_t vp) } vnode_unlock(vp); - return(0); + return 0; } - + /* * Release any blocked locking requests on the vnode. * Used for forced-unmounts. @@ -4642,28 +4800,29 @@ vnode_suspend(vnode_t vp) static void vnode_abort_advlocks(vnode_t vp) { - if (vp->v_flag & VLOCKLOCAL) + if (vp->v_flag & VLOCKLOCAL) { lf_abort_advlocks(vp); + } } - -static errno_t + +static errno_t vnode_drain(vnode_t vp) { - if (vp->v_lflag & VL_DRAIN) { panic("vnode_drain: recursive drain"); - return(ENOENT); + return ENOENT; } vp->v_lflag |= VL_DRAIN; vp->v_owner = current_thread(); - while (vp->v_iocount > 1) + while (vp->v_iocount > 1) { msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL); + } vp->v_lflag &= ~VL_DRAIN; - return(0); + return 0; } @@ -4678,7 +4837,7 @@ vnode_drain(vnode_t vp) * However, if the vnode is marked DIRTY, we want to pull it out much earlier */ #define UNAGE_THRESHHOLD 25 -#define UNAGE_DIRTYTHRESHHOLD 6 +#define UNAGE_DIRTYTHRESHHOLD 6 errno_t vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) @@ -4695,20 +4854,20 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) /* * if it is a dead vnode with deadfs */ - if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) { - return(ENOENT); + if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) { + return ENOENT; } /* * will return VL_DEAD ones */ - if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0 ) { + if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) { break; } /* * if suspended vnodes are to be failed */ if (nosusp && (vp->v_lflag & VL_SUSPENDED)) { - return(ENOENT); + return ENOENT; } /* * if you are the owner of drain/suspend/termination , can acquire iocount @@ -4716,11 +4875,12 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) */ if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) && (vp->v_owner == current_thread())) { - break; + break; } - - if (always != 0) + + if (always != 0) { break; + } /* * If this vnode is getting drained, there are some cases where @@ -4735,8 +4895,9 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) * resources that could prevent other iocounts from * being released. */ - if (beatdrain) + if (beatdrain) { break; + } /* * Don't block if the vnode's mount point is unmounting as * we may be the thread the unmount is itself waiting on @@ -4746,8 +4907,9 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) * those. ENODEV is intended to inform callers that the call * failed because an unmount is in progress. */ - if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) - return (ENODEV); + if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) { + return ENODEV; + } if (vnode_istty(vp)) { sleepflg = PCATCH; @@ -4761,40 +4923,44 @@ vnode_getiocount(vnode_t vp, unsigned int vid, int vflags) vp->v_lflag |= VL_TERMWANT; - error = msleep(&vp->v_lflag, &vp->v_lock, - (PVFS | sleepflg), "vnode getiocount", NULL); - if (error) - return (error); - } else + error = msleep(&vp->v_lflag, &vp->v_lock, + (PVFS | sleepflg), "vnode getiocount", NULL); + if (error) { + return error; + } + } else { msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL); + } } if (withvid && vid != vp->v_id) { - return(ENOENT); + return ENOENT; } if (++vp->v_references >= UNAGE_THRESHHOLD || (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD)) { - vp->v_references = 0; + vp->v_references = 0; vnode_list_remove(vp); } vp->v_iocount++; #ifdef JOE_DEBUG record_vp(vp, 1); #endif - return(0); + return 0; } static void -vnode_dropiocount (vnode_t vp) +vnode_dropiocount(vnode_t vp) { - if (vp->v_iocount < 1) + if (vp->v_iocount < 1) { panic("vnode_dropiocount(%p): v_iocount < 1", vp); + } vp->v_iocount--; #ifdef JOE_DEBUG record_vp(vp, -1); #endif - if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) + if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) { wakeup(&vp->v_iocount); + } } @@ -4810,8 +4976,9 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) { int isfifo = 0; - if (!locked) + if (!locked) { vnode_lock(vp); + } if (vp->v_lflag & VL_TERMINATE) { panic("vnode reclaim in progress"); @@ -4824,9 +4991,9 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) isfifo = (vp->v_type == VFIFO); - if (vp->v_type != VBAD) - vgone(vp, flags); /* clean and reclaim the vnode */ - + if (vp->v_type != VBAD) { + vgone(vp, flags); /* clean and reclaim the vnode */ + } /* * give the vnode a new identity so that vnode_getwithvid will fail * on any stale cache accesses... @@ -4859,16 +5026,21 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) } vp->v_type = VBAD; - if (vp->v_data) + if (vp->v_data) { panic("vnode_reclaim_internal: cleaned vnode isn't"); - if (vp->v_numoutput) + } + if (vp->v_numoutput) { panic("vnode_reclaim_internal: clean vnode has pending I/O's"); - if (UBCINFOEXISTS(vp)) + } + if (UBCINFOEXISTS(vp)) { panic("vnode_reclaim_internal: ubcinfo not cleaned"); - if (vp->v_parent) - panic("vnode_reclaim_internal: vparent not removed"); - if (vp->v_name) - panic("vnode_reclaim_internal: vname not removed"); + } + if (vp->v_parent) { + panic("vnode_reclaim_internal: vparent not removed"); + } + if (vp->v_name) { + panic("vnode_reclaim_internal: vname not removed"); + } vp->v_socket = NULL; @@ -4885,14 +5057,15 @@ vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags) wakeup(&vp->v_lflag); } if (!reuse) { - /* + /* * make sure we get on the * dead list if appropriate */ - vnode_list_add(vp); + vnode_list_add(vp); + } + if (!locked) { + vnode_unlock(vp); } - if (!locked) - vnode_unlock(vp); } static int @@ -4905,7 +5078,7 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vnode_t vp; vnode_t nvp; vnode_t dvp; - struct uthread *ut; + struct uthread *ut; struct componentname *cnp; struct vnode_fsparam *param = (struct vnode_fsparam *)data; #if CONFIG_TRIGGERS @@ -4949,14 +5122,14 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, } if (!existing_vnode) { - if ((error = new_vnode(&vp)) ) { - return (error); + if ((error = new_vnode(&vp))) { + return error; } if (!init_vnode) { /* Make it so that it can be released by a vnode_put) */ vn_set_dead(vp); *vpp = vp; - return (0); + return 0; } } else { /* @@ -4981,10 +5154,12 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vp->v_type = param->vnfs_vtype; vp->v_data = param->vnfs_fsnode; - if (param->vnfs_markroot) + if (param->vnfs_markroot) { vp->v_flag |= VROOT; - if (param->vnfs_marksystem) + } + if (param->vnfs_marksystem) { vp->v_flag |= VSYSTEM; + } if (vp->v_type == VREG) { error = ubc_info_init_withsize(vp, param->vnfs_filesize); if (error) { @@ -4994,10 +5169,11 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vn_set_dead(vp); vnode_put(vp); - return(error); + return error; } - if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) + if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) { memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control); + } } #ifdef JOE_DEBUG record_vp(vp, 1); @@ -5008,9 +5184,9 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, * For trigger vnodes, attach trigger info to vnode */ if ((vp->v_type == VDIR) && (tinfo != NULL)) { - /* + /* * Note: has a side effect of incrementing trigger count on the - * mount if successful, which we would need to undo on a + * mount if successful, which we would need to undo on a * subsequent failure. */ #ifdef JOE_DEBUG @@ -5024,15 +5200,14 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, record_vp(vp, 1); #endif vnode_put(vp); - return (error); + return error; } } #endif if (vp->v_type == VCHR || vp->v_type == VBLK) { + vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */ - vp->v_tag = VT_DEVFS; /* callers will reset if needed (bdevvp) */ - - if ( (nvp = checkalias(vp, param->vnfs_rdev)) ) { + if ((nvp = checkalias(vp, param->vnfs_rdev))) { /* * if checkalias returns a vnode, it will be locked * @@ -5042,7 +5217,7 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vp->v_op = spec_vnodeop_p; vp->v_type = VBAD; vp->v_lflag = VL_DEAD; - vp->v_data = NULL; + vp->v_data = NULL; vp->v_tag = VT_NON; vnode_put(vp); @@ -5066,8 +5241,9 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, if (VCHR == vp->v_type) { u_int maj = major(vp->v_rdev); - if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) + if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) { vp->v_flag |= VISTTY; + } } } @@ -5075,8 +5251,8 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, struct fifoinfo *fip; MALLOC(fip, struct fifoinfo *, - sizeof(*fip), M_TEMP, M_WAITOK); - bzero(fip, sizeof(struct fifoinfo )); + sizeof(*fip), M_TEMP, M_WAITOK); + bzero(fip, sizeof(struct fifoinfo)); vp->v_fifoinfo = fip; } /* The file systems must pass the address of the location where @@ -5091,11 +5267,13 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vp->v_lflag |= VNAMED_FSHASH; } if (param->vnfs_mp) { - if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) - vp->v_flag |= VLOCKLOCAL; + if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) { + vp->v_flag |= VLOCKLOCAL; + } if (insert) { - if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) + if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) { panic("insmntque: vp on the free list\n"); + } /* * enter in mount vnode list @@ -5115,11 +5293,13 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, * the name entered into the string cache */ vp->v_name = cache_enter_create(dvp, vp, cnp); - } else + } else { vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0); + } - if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) + if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) { vp->v_flag |= VISUNION; + } } if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) { /* @@ -5164,7 +5344,7 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, */ if (vnode_vtype(vp) == VREG && vnode_mount(vp) != NULL && - (! (vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) { + (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) { /* not from root filesystem: eligible for secluded pages */ memory_object_mark_eligible_for_secluded( ubc_getobject(vp, UBC_FLAGS_NONE), @@ -5175,8 +5355,8 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, /* * secluded_for_filecache == 2: * + all read-only files OK, except: - * + dyld_shared_cache_arm64* - * + Camera + * + dyld_shared_cache_arm64* + * + Camera * + mediaserverd */ if (vnode_vtype(vp) == VREG) { @@ -5190,13 +5370,13 @@ vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, } #endif /* CONFIG_SECLUDED_MEMORY */ - return (0); + return 0; error_out: if (existing_vnode) { vnode_put(vp); } - return (error); + return error; } /* USAGE: @@ -5208,15 +5388,15 @@ int vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) { *vpp = NULLVP; - return (vnode_create_internal(flavor, size, data, vpp, 1)); + return vnode_create_internal(flavor, size, data, vpp, 1); } int vnode_create_empty(vnode_t *vpp) { *vpp = NULLVP; - return (vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL, - vpp, 0)); + return vnode_create_internal(VNCREATE_FLAVOR, VCREATESIZE, NULL, + vpp, 0); } int @@ -5242,42 +5422,43 @@ vnode_initialize(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp) ("vnode_initialize : v_data not NULL")); vnode_unlock(*vpp); #endif - return (vnode_create_internal(flavor, size, data, vpp, 1)); + return vnode_create_internal(flavor, size, data, vpp, 1); } int vnode_addfsref(vnode_t vp) { vnode_lock_spin(vp); - if (vp->v_lflag & VNAMED_FSHASH) + if (vp->v_lflag & VNAMED_FSHASH) { panic("add_fsref: vp already has named reference"); - if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) - panic("addfsref: vp on the free list\n"); + } + if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) { + panic("addfsref: vp on the free list\n"); + } vp->v_lflag |= VNAMED_FSHASH; vnode_unlock(vp); - return(0); - + return 0; } int vnode_removefsref(vnode_t vp) { vnode_lock_spin(vp); - if ((vp->v_lflag & VNAMED_FSHASH) == 0) + if ((vp->v_lflag & VNAMED_FSHASH) == 0) { panic("remove_fsref: no named reference"); + } vp->v_lflag &= ~VNAMED_FSHASH; vnode_unlock(vp); - return(0); - + return 0; } int vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) { - mount_t mp; + mount_t mp; int ret = 0; fsid_t * fsid_list; - int count, actualcount, i; + int count, actualcount, i; void * allocmem; int indx_start, indx_stop, indx_incr; int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF); @@ -5298,25 +5479,24 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) indx_start = actualcount - 1; indx_stop = -1; indx_incr = -1; - } else /* Head first by default */ { + } else { /* Head first by default */ indx_start = 0; indx_stop = actualcount; indx_incr = 1; } - for (i=indx_start; i != indx_stop; i += indx_incr) { - + for (i = indx_start; i != indx_stop; i += indx_incr) { /* obtain the mount point with iteration reference */ mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1); - if(mp == (struct mount *)0) + if (mp == (struct mount *)0) { continue; + } mount_lock(mp); if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) { mount_unlock(mp); mount_iterdrop(mp); continue; - } mount_unlock(mp); @@ -5327,8 +5507,9 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) * Drop the iterref here if the callback didn't do it. * Note: If cb_dropref is set the mp may no longer exist. */ - if (!cb_dropref) + if (!cb_dropref) { mount_iterdrop(mp); + } switch (ret) { case VFS_RETURNED: @@ -5351,7 +5532,7 @@ vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg) out: kfree(allocmem, (count * sizeof(fsid_t))); - return (ret); + return ret; } /* @@ -5363,8 +5544,8 @@ out: int vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype) { - struct vfs_attr va; - int error; + struct vfs_attr va; + int error; /* * Request the attributes we want to propagate into @@ -5383,13 +5564,14 @@ vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype) if ((error = vfs_getattr(mp, &va, ctx)) != 0) { KAUTH_DEBUG("STAT - filesystem returned error %d", error); - return(error); + return error; } #if CONFIG_MACF if (eventtype == VFS_USER_EVENT) { error = mac_mount_check_getattr(ctx, mp, &va); - if (error != 0) - return (error); + if (error != 0) { + return error; + } } #endif /* @@ -5414,31 +5596,38 @@ vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype) /* 4822056 - protect against malformed server mount */ mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512); } else { - mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */ + mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */ } if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) { mp->mnt_vfsstat.f_iosize = va.f_iosize; } else { - mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */ + mp->mnt_vfsstat.f_iosize = 1024 * 1024; /* 1MB sensible I/O size */ } - if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) + if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) { mp->mnt_vfsstat.f_blocks = va.f_blocks; - if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) + } + if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) { mp->mnt_vfsstat.f_bfree = va.f_bfree; - if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) + } + if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) { mp->mnt_vfsstat.f_bavail = va.f_bavail; - if (VFSATTR_IS_SUPPORTED(&va, f_bused)) + } + if (VFSATTR_IS_SUPPORTED(&va, f_bused)) { mp->mnt_vfsstat.f_bused = va.f_bused; - if (VFSATTR_IS_SUPPORTED(&va, f_files)) + } + if (VFSATTR_IS_SUPPORTED(&va, f_files)) { mp->mnt_vfsstat.f_files = va.f_files; - if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) + } + if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) { mp->mnt_vfsstat.f_ffree = va.f_ffree; + } /* this is unlikely to change, but has to be queried for */ - if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) + if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) { mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype; + } - return(0); + return 0; } int @@ -5450,7 +5639,7 @@ mount_list_add(mount_t mp) if (system_inshutdown != 0) { res = -1; } else { - TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); + TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); nummounts++; res = 0; } @@ -5483,7 +5672,7 @@ mount_lookupby_volfsid(int volfs_id, int withref) (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) { cur_mount = mp; if (withref) { - if (mount_iterref(cur_mount, 1)) { + if (mount_iterref(cur_mount, 1)) { cur_mount = (mount_t)0; mount_list_unlock(); goto out; @@ -5501,31 +5690,34 @@ mount_lookupby_volfsid(int volfs_id, int withref) mount_iterdrop(mp); } out: - return(cur_mount); + return cur_mount; } -mount_t +mount_t mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref) { mount_t retmp = (mount_t)0; mount_t mp; - if (!locked) + if (!locked) { mount_list_lock(); - TAILQ_FOREACH(mp, &mountlist, mnt_list) - if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] && - mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) { - retmp = mp; - if (withref) { - if (mount_iterref(retmp, 1)) - retmp = (mount_t)0; + } + TAILQ_FOREACH(mp, &mountlist, mnt_list) + if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] && + mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) { + retmp = mp; + if (withref) { + if (mount_iterref(retmp, 1)) { + retmp = (mount_t)0; } - goto out; } + goto out; + } out: - if (!locked) + if (!locked) { mount_list_unlock(); - return (retmp); + } + return retmp; } errno_t @@ -5539,27 +5731,31 @@ vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx) return EINVAL; } - if (flags & VNODE_LOOKUP_NOFOLLOW) + if (flags & VNODE_LOOKUP_NOFOLLOW) { ndflags = NOFOLLOW; - else + } else { ndflags = FOLLOW; + } - if (flags & VNODE_LOOKUP_NOCROSSMOUNT) + if (flags & VNODE_LOOKUP_NOCROSSMOUNT) { ndflags |= NOCROSSMOUNT; + } - if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) + if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) { ndflags |= CN_NBMOUNTLOOK; + } /* XXX AUDITVNPATH1 needed ? */ NDINIT(&nd, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE, - CAST_USER_ADDR_T(path), ctx); + CAST_USER_ADDR_T(path), ctx); - if ((error = namei(&nd))) - return (error); + if ((error = namei(&nd))) { + return error; + } *vpp = nd.ni_vp; nameidone(&nd); - - return (0); + + return 0; } errno_t @@ -5570,34 +5766,39 @@ vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_ u_int32_t ndflags = 0; int lflags = flags; - if (ctx == NULL) { /* XXX technically an error */ + if (ctx == NULL) { /* XXX technically an error */ ctx = vfs_context_current(); } - if (fmode & O_NOFOLLOW) + if (fmode & O_NOFOLLOW) { lflags |= VNODE_LOOKUP_NOFOLLOW; + } - if (lflags & VNODE_LOOKUP_NOFOLLOW) + if (lflags & VNODE_LOOKUP_NOFOLLOW) { ndflags = NOFOLLOW; - else + } else { ndflags = FOLLOW; + } - if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) + if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) { ndflags |= NOCROSSMOUNT; - - if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) + } + + if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) { ndflags |= CN_NBMOUNTLOOK; + } /* XXX AUDITVNPATH1 needed ? */ NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, - CAST_USER_ADDR_T(path), ctx); + CAST_USER_ADDR_T(path), ctx); - if ((error = vn_open(&nd, fmode, cmode))) + if ((error = vn_open(&nd, fmode, cmode))) { *vpp = NULL; - else + } else { *vpp = nd.ni_vp; - - return (error); + } + + return error; } errno_t @@ -5608,37 +5809,39 @@ vnode_close(vnode_t vp, int flags, vfs_context_t ctx) if (ctx == NULL) { ctx = vfs_context_current(); } - + error = vn_close(vp, flags, ctx); vnode_put(vp); - return (error); + return error; } errno_t vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx) { - struct vnode_attr va; - int error; + struct vnode_attr va; + int error; VATTR_INIT(&va); VATTR_WANTED(&va, va_modify_time); error = vnode_getattr(vp, &va, ctx); - if (!error) + if (!error) { *mtime = va.va_modify_time; + } return error; } errno_t vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx) { - struct vnode_attr va; - int error; + struct vnode_attr va; + int error; VATTR_INIT(&va); VATTR_WANTED(&va, va_flags); error = vnode_getattr(vp, &va, ctx); - if (!error) + if (!error) { *flags = va.va_flags; + } return error; } @@ -5649,26 +5852,27 @@ vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx) errno_t vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx) { - struct vnode_attr va; - int error; + struct vnode_attr va; + int error; VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); error = vnode_getattr(vp, &va, ctx); - if (!error) + if (!error) { *sizep = va.va_data_size; - return(error); + } + return error; } errno_t vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx) { - struct vnode_attr va; + struct vnode_attr va; VATTR_INIT(&va); VATTR_SET(&va, va_data_size, size); va.va_vaflags = ioflag & 0xffff; - return(vnode_setattr(vp, &va, ctx)); + return vnode_setattr(vp, &va, ctx); } int @@ -5689,7 +5893,7 @@ vnode_cleardirty(vnode_t vp) return 0; } -int +int vnode_isdirty(vnode_t vp) { int dirty; @@ -5730,7 +5934,7 @@ vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_att * flags VN_* flags controlling ACL inheritance * and whether or not authorization is to * be required for the operation. - * + * * Returns: 0 Success * !0 errno value * @@ -5739,10 +5943,10 @@ vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_att * *cnp May be modified by the underlying VFS. * *vap May be modified by the underlying VFS. * modified by either ACL inheritance or - * - * + * + * * be modified, even if the operation is - * + * * * Notes: The kauth_filesec_t in 'vap', if any, is in host byte order. * @@ -5757,7 +5961,7 @@ vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_att errno_t vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx) { - errno_t error, old_error; + errno_t error, old_error; vnode_t vp = (vnode_t)0; boolean_t batched; struct componentname *cnp; @@ -5769,10 +5973,12 @@ vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *v KAUTH_DEBUG("%p CREATE - '%s'", dvp, cnp->cn_nameptr); - if (flags & VN_CREATE_NOINHERIT) + if (flags & VN_CREATE_NOINHERIT) { vap->va_vaflags |= VA_NOINHERIT; - if (flags & VN_CREATE_NOAUTH) + } + if (flags & VN_CREATE_NOAUTH) { vap->va_vaflags |= VA_NOAUTH; + } /* * Handle ACL inheritance, initialize vap. */ @@ -5792,7 +5998,7 @@ vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *v /* * Create the requested node. */ - switch(vap->va_type) { + switch (vap->va_type) { case VREG: error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx); break; @@ -5819,8 +6025,9 @@ vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *v #if CONFIG_MACF if (!(flags & VN_CREATE_NOLABEL)) { error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx); - if (error) + if (error) { goto error; + } } #endif @@ -5836,7 +6043,6 @@ vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *v error: #endif if ((error != 0) && (vp != (vnode_t)0)) { - /* If we've done a compound open, close */ if (batched && (old_error == 0) && (vap->va_type == VREG)) { VNOP_CLOSE(vp, fmode, ctx); @@ -5854,34 +6060,35 @@ error: * For creation VNOPs, this is the equivalent of * lookup_handle_found_vnode. */ - if (kdebug_enable && *vpp) + if (kdebug_enable && *vpp) { kdebug_lookup(*vpp, cnp); + } out: vn_attribute_cleanup(vap, defaulted); - return(error); + return error; } -static kauth_scope_t vnode_scope; -static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action, +static kauth_scope_t vnode_scope; +static int vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, vnode_t vp, vnode_t dvp, int *errorp); typedef struct _vnode_authorize_context { - vnode_t vp; + vnode_t vp; struct vnode_attr *vap; - vnode_t dvp; + vnode_t dvp; struct vnode_attr *dvap; - vfs_context_t ctx; - int flags; - int flags_valid; -#define _VAC_IS_OWNER (1<<0) -#define _VAC_IN_GROUP (1<<1) -#define _VAC_IS_DIR_OWNER (1<<2) -#define _VAC_IN_DIR_GROUP (1<<3) -#define _VAC_NO_VNODE_POINTERS (1<<4) + vfs_context_t ctx; + int flags; + int flags_valid; +#define _VAC_IS_OWNER (1<<0) +#define _VAC_IN_GROUP (1<<1) +#define _VAC_IS_DIR_OWNER (1<<2) +#define _VAC_IN_DIR_GROUP (1<<3) +#define _VAC_NO_VNODE_POINTERS (1<<4) } *vauth_ctx; void @@ -5890,9 +6097,9 @@ vnode_authorize_init(void) vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL); } -#define VATTR_PREPARE_DEFAULTED_UID 0x1 -#define VATTR_PREPARE_DEFAULTED_GID 0x2 -#define VATTR_PREPARE_DEFAULTED_MODE 0x4 +#define VATTR_PREPARE_DEFAULTED_UID 0x1 +#define VATTR_PREPARE_DEFAULTED_GID 0x2 +#define VATTR_PREPARE_DEFAULTED_MODE 0x4 int vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx) @@ -5911,12 +6118,12 @@ vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fi vap->va_acl = NULL; if ((error = kauth_acl_inherit(dvp, - oacl, - &nacl, - vap->va_type == VDIR, - ctx)) != 0) { + oacl, + &nacl, + vap->va_type == VDIR, + ctx)) != 0) { KAUTH_DEBUG("%p CREATE - error %d processing inheritance", dvp, error); - return(error); + return error; } /* @@ -5930,11 +6137,11 @@ vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fi VATTR_SET(vap, va_acl, nacl); } } - + error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx); if (error) { vn_attribute_cleanup(vap, *defaulted_fieldsp); - } + } return error; } @@ -5953,7 +6160,7 @@ vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields) nacl = vap->va_acl; oacl = vap->va_base_acl; - if (oacl) { + if (oacl) { VATTR_SET(vap, va_acl, oacl); vap->va_base_acl = NULL; } else { @@ -5987,21 +6194,23 @@ vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_cont int error = 0; /* - * Normally, unlinking of directories is not supported. + * Normally, unlinking of directories is not supported. * However, some file systems may have limited support. */ if ((vp->v_type == VDIR) && - !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) { - return (EPERM); /* POSIX */ + !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) { + return EPERM; /* POSIX */ } /* authorize the delete operation */ #if CONFIG_MACF - if (!error) + if (!error) { error = mac_vnode_check_unlink(ctx, dvp, vp, cnp); + } #endif /* MAC */ - if (!error) + if (!error) { error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx); + } return error; } @@ -6023,44 +6232,45 @@ vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs /* XXX may do duplicate work here, but ignore that for now (idempotent) */ if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) { error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx); - if (error) - return (error); + if (error) { + return error; + } } #endif - if ( (fmode & O_DIRECTORY) && vp->v_type != VDIR ) { - return (ENOTDIR); + if ((fmode & O_DIRECTORY) && vp->v_type != VDIR) { + return ENOTDIR; } if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) { - return (EOPNOTSUPP); /* Operation not supported on socket */ + return EOPNOTSUPP; /* Operation not supported on socket */ } if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) { - return (ELOOP); /* O_NOFOLLOW was specified and the target is a symbolic link */ + return ELOOP; /* O_NOFOLLOW was specified and the target is a symbolic link */ } /* disallow write operations on directories */ if (vnode_isdir(vp) && (fmode & (FWRITE | O_TRUNC))) { - return (EISDIR); + return EISDIR; } if ((cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH)) { if (vp->v_type != VDIR) { - return (ENOTDIR); + return ENOTDIR; } } #if CONFIG_MACF - /* If a file being opened is a shadow file containing - * namedstream data, ignore the macf checks because it - * is a kernel internal file and access should always + /* If a file being opened is a shadow file containing + * namedstream data, ignore the macf checks because it + * is a kernel internal file and access should always * be allowed. */ if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) { error = mac_vnode_check_open(ctx, vp, fmode); if (error) { - return (error); + return error; } } #endif @@ -6092,7 +6302,7 @@ vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs * is really a shadow file. If it was created successfully * then it should be authorized. */ - if (vnode_isshadow(vp) && vnode_isnamedstream (vp)) { + if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) { error = vnode_verifynamedstream(vp); } } @@ -6120,40 +6330,41 @@ vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *v /* Only validate path for creation if we didn't do a complete lookup */ if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) { error = lookup_validate_creation_path(cnp->cn_ndp); - if (error) - return (error); + if (error) { + return error; + } } #if CONFIG_MACF error = mac_vnode_check_create(ctx, dvp, cnp, vap); - if (error) - return (error); + if (error) { + return error; + } #endif /* CONFIG_MACF */ - return (vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)); + return vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx); } int -vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx, void *reserved) +vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx, void *reserved) { return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved); } int -vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, - vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved) +vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, + vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved) { - return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved); } int -vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path, - struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path, - vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved) +vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path, + struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path, + vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved) { int error = 0; int moving = 0; @@ -6171,27 +6382,30 @@ vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct */ if (fvp->v_type == VDIR && ((fdvp == fvp) || - (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || - ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT)) ) { + (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') || + ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) { error = EINVAL; goto out; } if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) { error = lookup_validate_creation_path(tcnp->cn_ndp); - if (error) + if (error) { goto out; + } } /***** *****/ #if CONFIG_MACF error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp); - if (error) + if (error) { goto out; + } if (swap) { error = mac_vnode_check_rename(ctx, tdvp, tvp, tcnp, fdvp, fvp, fcnp); - if (error) + if (error) { goto out; + } } #endif /***** *****/ @@ -6276,44 +6490,53 @@ vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct * permit changing ".." */ if (fdvp != tdvp) { - if (vnode_isdir(fvp)) + if (vnode_isdir(fvp)) { f = KAUTH_VNODE_ADD_SUBDIRECTORY; - if (vnode_isdir(tvp)) + } + if (vnode_isdir(tvp)) { t = KAUTH_VNODE_ADD_SUBDIRECTORY; + } } - if (to_path != NULL) + if (to_path != NULL) { kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_WILL_RENAME, - (uintptr_t)fvp, - (uintptr_t)to_path); + KAUTH_FILEOP_WILL_RENAME, + (uintptr_t)fvp, + (uintptr_t)to_path); + } error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx); - if (error) + if (error) { goto out; - if (from_path != NULL) + } + if (from_path != NULL) { kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_WILL_RENAME, - (uintptr_t)tvp, - (uintptr_t)from_path); + KAUTH_FILEOP_WILL_RENAME, + (uintptr_t)tvp, + (uintptr_t)from_path); + } error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx); - if (error) + if (error) { goto out; + } f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE; t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE; - if (fdvp == tdvp) + if (fdvp == tdvp) { error = vnode_authorize(fdvp, NULL, f | t, ctx); - else { + } else { error = vnode_authorize(fdvp, NULL, t, ctx); - if (error) + if (error) { goto out; + } error = vnode_authorize(tdvp, NULL, f, ctx); } - if (error) + if (error) { goto out; + } } else { error = 0; if ((tvp != NULL) && vnode_isdir(tvp)) { - if (tvp != fdvp) + if (tvp != fdvp) { moving = 1; + } } else if (tdvp != fdvp) { moving = 1; } @@ -6325,35 +6548,39 @@ vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct * If fvp is a directory, and we are changing it's parent, * then we also need rights to rewrite its ".." entry as well. */ - if (to_path != NULL) + if (to_path != NULL) { kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_WILL_RENAME, - (uintptr_t)fvp, - (uintptr_t)to_path); + KAUTH_FILEOP_WILL_RENAME, + (uintptr_t)fvp, + (uintptr_t)to_path); + } if (vnode_isdir(fvp)) { - if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) + if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) { goto out; + } } else { - if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) + if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) { goto out; + } } if (moving) { /* moving into tdvp or tvp, must have rights to add */ if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp, - NULL, - vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, - ctx)) != 0) { + NULL, + vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, + ctx)) != 0) { goto out; } } else { /* node staying in same directory, must be allowed to add new name */ if ((error = vnode_authorize(fdvp, NULL, - vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) + vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) { goto out; + } } /* overwriting tvp */ if ((tvp != NULL) && !vnode_isdir(tvp) && - ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) { + ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) { goto out; } } @@ -6374,7 +6601,7 @@ vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *va int error; if (reserved != NULL) { - panic("reserved not NULL in vn_authorize_mkdir()"); + panic("reserved not NULL in vn_authorize_mkdir()"); } /* XXX A hack for now, to make shadow files work */ @@ -6384,21 +6611,24 @@ vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *va if (vnode_compound_mkdir_available(dvp)) { error = lookup_validate_creation_path(cnp->cn_ndp); - if (error) + if (error) { goto out; + } } #if CONFIG_MACF error = mac_vnode_check_create(ctx, dvp, cnp, vap); - if (error) + if (error) { goto out; + } #endif - /* authorize addition of a directory to the parent */ - if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) - goto out; - + /* authorize addition of a directory to the parent */ + if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) { + goto out; + } + out: return error; } @@ -6420,20 +6650,21 @@ vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_conte * rmdir only deals with directories */ return ENOTDIR; - } - + } + if (dvp == vp) { /* * No rmdir "." please. */ return EINVAL; - } - + } + #if CONFIG_MACF error = mac_vnode_check_unlink(ctx, dvp, - vp, cnp); - if (error) + vp, cnp); + if (error) { return error; + } #endif return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx); @@ -6479,21 +6710,23 @@ vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action, * gid of the new object. */ VATTR_WANTED(vap, va_acl); - if (dvap) + if (dvap) { VATTR_WANTED(dvap, va_gid); + } } else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) { VATTR_WANTED(dvap, va_gid); } - return (0); + return 0; } else if (vattr_op == OP_VATTR_CLEANUP) { - return (0); /* Nothing to do for now */ + return 0; /* Nothing to do for now */ } /* dvap isn't used for authorization */ error = vnode_attr_authorize(vap, NULL, mp, action, ctx); - if (error) - return (error); + if (error) { + return error; + } /* * vn_attribute_prepare should be able to accept attributes as well as @@ -6534,14 +6767,15 @@ vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action, if (VATTR_IS_ACTIVE(vap, va_flags)) { VATTR_SET(vap, va_flags, ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */ - if (VATTR_IS_ACTIVE(dvap, va_flags)) + if (VATTR_IS_ACTIVE(dvap, va_flags)) { VATTR_SET(vap, va_flags, vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED))); + } } else if (VATTR_IS_ACTIVE(dvap, va_flags)) { VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED))); } - return (0); + return 0; } @@ -6566,24 +6800,27 @@ vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action, int vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx) { - int error, result; + int error, result; /* * We can't authorize against a dead vnode; allow all operations through so that * the correct error can be returned. */ - if (vp->v_type == VBAD) - return(0); - + if (vp->v_type == VBAD) { + return 0; + } + error = 0; result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action, - (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error); - if (result == EPERM) /* traditional behaviour */ + (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error); + if (result == EPERM) { /* traditional behaviour */ result = EACCES; + } /* did the lower layers give a better error return? */ - if ((result != 0) && (error != 0)) - return(error); - return(result); + if ((result != 0) && (error != 0)) { + return error; + } + return result; } /* @@ -6599,14 +6836,15 @@ vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ct static int vnode_immutable(struct vnode_attr *vap, int append, int ignore) { - int mask; + int mask; /* start with all bits precluding the operation */ mask = IMMUTABLE | APPEND; /* if appending only, remove the append-only bits */ - if (append) + if (append) { mask &= ~APPEND; + } /* ignore only set when authorizing flags changes */ if (ignore) { @@ -6619,9 +6857,10 @@ vnode_immutable(struct vnode_attr *vap, int append, int ignore) } } KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore); - if ((vap->va_flags & mask) != 0) - return(EPERM); - return(0); + if ((vap->va_flags & mask) != 0) { + return EPERM; + } + return 0; } static int @@ -6639,8 +6878,8 @@ vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred) result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0; } /* we could test the owner UUID here if we had a policy for it */ - - return(result); + + return result; } /* @@ -6662,8 +6901,8 @@ vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred) static int vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow) { - int error; - int result; + int error; + int result; error = 0; result = 0; @@ -6690,8 +6929,9 @@ vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int i * XXX all currently known cases, however, this wil result * XXX in correct behaviour. */ - if (error == ENOENT) + if (error == ENOENT) { error = idontknow; + } } /* * XXX We could test the group UUID here if we had a policy for it, @@ -6706,9 +6946,10 @@ vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int i * XXX caching DNS server). */ - if (!error) + if (!error) { *ismember = result; - return(error); + } + return error; } static int @@ -6729,7 +6970,7 @@ vauth_file_owner(vauth_ctx vcp) vcp->flags &= ~_VAC_IS_OWNER; } } - return(result); + return result; } @@ -6756,7 +6997,7 @@ vauth_file_owner(vauth_ctx vcp) static int vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow) { - int error; + int error; /* Check for a cached answer first, to avoid the check if possible */ if (vcp->flags_valid & _VAC_IN_GROUP) { @@ -6775,9 +7016,8 @@ vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow) vcp->flags &= ~_VAC_IN_GROUP; } } - } - return(error); + return error; } static int @@ -6798,7 +7038,7 @@ vauth_dir_owner(vauth_ctx vcp) vcp->flags &= ~_VAC_IS_DIR_OWNER; } } - return(result); + return result; } /* @@ -6824,7 +7064,7 @@ vauth_dir_owner(vauth_ctx vcp) static int vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow) { - int error; + int error; /* Check for a cached answer first, to avoid the check if possible */ if (vcp->flags_valid & _VAC_IN_DIR_GROUP) { @@ -6844,7 +7084,7 @@ vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow) } } } - return(error); + return error; } /* @@ -6858,7 +7098,7 @@ vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) int needed, error, owner_ok, group_ok, world_ok, ismember; #ifdef KAUTH_DEBUG_ENABLE const char *where = "uninitialized"; -# define _SETWHERE(c) where = c; +# define _SETWHERE(c) where = c; #else # define _SETWHERE(c) #endif @@ -6869,9 +7109,9 @@ vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) } else { vap = vcp->vap; } - + error = 0; - + /* * We want to do as little work here as possible. So first we check * which sets of permissions grant us the access we need, and avoid checking @@ -6880,32 +7120,41 @@ vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) /* owner permissions */ needed = 0; - if (action & VREAD) + if (action & VREAD) { needed |= S_IRUSR; - if (action & VWRITE) + } + if (action & VWRITE) { needed |= S_IWUSR; - if (action & VEXEC) + } + if (action & VEXEC) { needed |= S_IXUSR; + } owner_ok = (needed & vap->va_mode) == needed; /* group permissions */ needed = 0; - if (action & VREAD) + if (action & VREAD) { needed |= S_IRGRP; - if (action & VWRITE) + } + if (action & VWRITE) { needed |= S_IWGRP; - if (action & VEXEC) + } + if (action & VEXEC) { needed |= S_IXGRP; + } group_ok = (needed & vap->va_mode) == needed; /* world permissions */ needed = 0; - if (action & VREAD) + if (action & VREAD) { needed |= S_IROTH; - if (action & VWRITE) + } + if (action & VWRITE) { needed |= S_IWOTH; - if (action & VEXEC) + } + if (action & VEXEC) { needed |= S_IXOTH; + } world_ok = (needed & vap->va_mode) == needed; /* If granted/denied by all three, we're done */ @@ -6923,8 +7172,9 @@ vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) if ((on_dir && vauth_dir_owner(vcp)) || (!on_dir && vauth_file_owner(vcp))) { _SETWHERE("user"); - if (!owner_ok) + if (!owner_ok) { error = EACCES; + } goto out; } @@ -6940,7 +7190,7 @@ vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) } /* Check group membership (most expensive) */ - ismember = 0; /* Default to allow, if the target has no group owner */ + ismember = 0; /* Default to allow, if the target has no group owner */ /* * In the case we can't get an answer about the user from the call to @@ -6955,21 +7205,24 @@ vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir) error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0)); } if (error) { - if (!group_ok) + if (!group_ok) { ismember = 1; + } error = 0; } if (ismember) { _SETWHERE("group"); - if (!group_ok) + if (!group_ok) { error = EACCES; + } goto out; } /* Not owner, not in group, use world result */ _SETWHERE("world"); - if (!world_ok) + if (!world_ok) { error = EACCES; + } /* FALLTHROUGH */ @@ -6992,7 +7245,7 @@ out: kauth_cred_getuid(vcp->ctx->vc_ucred), on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid, on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid); - return(error); + return error; } /* @@ -7030,11 +7283,11 @@ out: static int vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) { - struct vnode_attr *vap = vcp->vap; - struct vnode_attr *dvap = vcp->dvap; - kauth_cred_t cred = vcp->ctx->vc_ucred; - struct kauth_acl_eval eval; - int error, ismember; + struct vnode_attr *vap = vcp->vap; + struct vnode_attr *dvap = vcp->dvap; + kauth_cred_t cred = vcp->ctx->vc_ucred; + struct kauth_acl_eval eval; + int error, ismember; /* Check the ACL on the node first */ if (VATTR_IS_NOT(vap, va_acl, NULL)) { @@ -7042,20 +7295,23 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) eval.ae_acl = &vap->va_acl->acl_ace[0]; eval.ae_count = vap->va_acl->acl_entrycount; eval.ae_options = 0; - if (vauth_file_owner(vcp)) + if (vauth_file_owner(vcp)) { eval.ae_options |= KAUTH_AEVAL_IS_OWNER; + } /* * We use ENOENT as a marker to indicate we could not get * information in order to delay evaluation until after we * have the ACL evaluation answer. Previously, we would * always deny the operation at this point. */ - if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) - return (error); - if (error == ENOENT) + if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) { + return error; + } + if (error == ENOENT) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN; - else if (ismember) + } else if (ismember) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP; + } eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; @@ -7063,16 +7319,16 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) if ((error = kauth_acl_evaluate(cred, &eval)) != 0) { KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); - return (error); + return error; } - switch(eval.ae_result) { + switch (eval.ae_result) { case KAUTH_RESULT_DENY: KAUTH_DEBUG("%p DENIED - denied by ACL", vcp->vp); - return (EACCES); + return EACCES; case KAUTH_RESULT_ALLOW: KAUTH_DEBUG("%p ALLOWED - granted by ACL", vcp->vp); - return (0); + return 0; case KAUTH_RESULT_DEFER: default: /* Defer to directory */ @@ -7093,7 +7349,7 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) */ if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) { KAUTH_DEBUG("%p ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp); - return (0); + return 0; } /* check the ACL on the directory */ @@ -7102,20 +7358,23 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) eval.ae_acl = &dvap->va_acl->acl_ace[0]; eval.ae_count = dvap->va_acl->acl_entrycount; eval.ae_options = 0; - if (vauth_dir_owner(vcp)) + if (vauth_dir_owner(vcp)) { eval.ae_options |= KAUTH_AEVAL_IS_OWNER; + } /* * We use ENOENT as a marker to indicate we could not get * information in order to delay evaluation until after we * have the ACL evaluation answer. Previously, we would * always deny the operation at this point. */ - if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) - return(error); - if (error == ENOENT) + if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) { + return error; + } + if (error == ENOENT) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN; - else if (ismember) + } else if (ismember) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP; + } eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; @@ -7129,19 +7388,19 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) if (error != 0) { KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); - return (error); + return error; } - switch(eval.ae_result) { + switch (eval.ae_result) { case KAUTH_RESULT_DENY: KAUTH_DEBUG("%p DENIED - denied by directory ACL", vcp->vp); - return (EACCES); + return EACCES; case KAUTH_RESULT_ALLOW: KAUTH_DEBUG("%p ALLOWED - granted by directory ACL", vcp->vp); if (!cached_delete_child && vcp->dvp) { vnode_cache_authorized_action(vcp->dvp, vcp->ctx, KAUTH_VNODE_DELETE_CHILD); } - return (0); + return 0; case KAUTH_RESULT_DEFER: default: /* Deferred by directory ACL */ @@ -7157,7 +7416,7 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) if (!cached_delete_child) { if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) { KAUTH_DEBUG("%p DENIED - denied by posix permisssions", vcp->vp); - return (EACCES); + return EACCES; } /* * Cache the authorized action on the vnode if allowed by the @@ -7174,13 +7433,13 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) { KAUTH_DEBUG("%p DENIED - sticky bit rules (user %d file %d dir %d)", vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid); - return (EACCES); + return EACCES; } /* not denied, must be OK */ - return (0); + return 0; } - + /* * Authorize an operation based on the node's attributes. @@ -7188,19 +7447,20 @@ vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child) static int vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny) { - struct vnode_attr *vap = vcp->vap; - kauth_cred_t cred = vcp->ctx->vc_ucred; - struct kauth_acl_eval eval; - int error, ismember; - mode_t posix_action; + struct vnode_attr *vap = vcp->vap; + kauth_cred_t cred = vcp->ctx->vc_ucred; + struct kauth_acl_eval eval; + int error, ismember; + mode_t posix_action; /* * If we are the file owner, we automatically have some rights. * * Do we need to expand this to support group ownership? */ - if (vauth_file_owner(vcp)) + if (vauth_file_owner(vcp)) { acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY); + } /* * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can @@ -7211,12 +7471,13 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r * the owner. */ if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) && - (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) + (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) { acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY; - + } + if (acl_rights == 0) { KAUTH_DEBUG("%p ALLOWED - implicit or no rights required", vcp->vp); - return(0); + return 0; } /* if we have an ACL, evaluate it */ @@ -7225,37 +7486,40 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r eval.ae_acl = &vap->va_acl->acl_ace[0]; eval.ae_count = vap->va_acl->acl_entrycount; eval.ae_options = 0; - if (vauth_file_owner(vcp)) + if (vauth_file_owner(vcp)) { eval.ae_options |= KAUTH_AEVAL_IS_OWNER; + } /* * We use ENOENT as a marker to indicate we could not get * information in order to delay evaluation until after we * have the ACL evaluation answer. Previously, we would * always deny the operation at this point. */ - if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) - return(error); - if (error == ENOENT) + if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) { + return error; + } + if (error == ENOENT) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN; - else if (ismember) + } else if (ismember) { eval.ae_options |= KAUTH_AEVAL_IN_GROUP; + } eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS; eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS; eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS; eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS; - + if ((error = kauth_acl_evaluate(cred, &eval)) != 0) { KAUTH_DEBUG("%p ERROR during ACL processing - %d", vcp->vp, error); - return(error); + return error; } - - switch(eval.ae_result) { + + switch (eval.ae_result) { case KAUTH_RESULT_DENY: KAUTH_DEBUG("%p DENIED - by ACL", vcp->vp); - return(EACCES); /* deny, deny, counter-allege */ + return EACCES; /* deny, deny, counter-allege */ case KAUTH_RESULT_ALLOW: KAUTH_DEBUG("%p ALLOWED - all rights granted by ACL", vcp->vp); - return(0); + return 0; case KAUTH_RESULT_DEFER: default: /* Effectively the same as !delete_child_denied */ @@ -7279,14 +7543,15 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r /* * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied. */ - if (vauth_file_owner(vcp)) + if (vauth_file_owner(vcp)) { eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES; - + } + if (eval.ae_residual == 0) { KAUTH_DEBUG("%p ALLOWED - rights already authorized", vcp->vp); - return(0); - } - + return 0; + } + /* * Bail if we have residual rights that can't be granted by posix permissions, * or aren't presumed granted at this point. @@ -7295,16 +7560,17 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r */ if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) { KAUTH_DEBUG("%p DENIED - CHANGE_OWNER not permitted", vcp->vp); - return(EACCES); + return EACCES; } if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) { KAUTH_DEBUG("%p DENIED - WRITE_SECURITY not permitted", vcp->vp); - return(EACCES); + return EACCES; } #if DIAGNOSTIC - if (eval.ae_residual & KAUTH_VNODE_DELETE) + if (eval.ae_residual & KAUTH_VNODE_DELETE) { panic("vnode_authorize: can't be checking delete permission here"); + } #endif /* @@ -7313,22 +7579,25 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r */ posix_action = 0; if (eval.ae_residual & (KAUTH_VNODE_READ_DATA | - KAUTH_VNODE_LIST_DIRECTORY | - KAUTH_VNODE_READ_EXTATTRIBUTES)) + KAUTH_VNODE_LIST_DIRECTORY | + KAUTH_VNODE_READ_EXTATTRIBUTES)) { posix_action |= VREAD; + } if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA | - KAUTH_VNODE_ADD_FILE | - KAUTH_VNODE_ADD_SUBDIRECTORY | - KAUTH_VNODE_DELETE_CHILD | - KAUTH_VNODE_WRITE_ATTRIBUTES | - KAUTH_VNODE_WRITE_EXTATTRIBUTES)) + KAUTH_VNODE_ADD_FILE | + KAUTH_VNODE_ADD_SUBDIRECTORY | + KAUTH_VNODE_DELETE_CHILD | + KAUTH_VNODE_WRITE_ATTRIBUTES | + KAUTH_VNODE_WRITE_EXTATTRIBUTES)) { posix_action |= VWRITE; + } if (eval.ae_residual & (KAUTH_VNODE_EXECUTE | - KAUTH_VNODE_SEARCH)) + KAUTH_VNODE_SEARCH)) { posix_action |= VEXEC; - + } + if (posix_action != 0) { - return(vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */)); + return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */); } else { KAUTH_DEBUG("%p ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping", vcp->vp, @@ -7365,7 +7634,7 @@ vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_r /* * Lack of required Posix permissions implies no reason to deny access. */ - return(0); + return 0; } /* @@ -7382,7 +7651,7 @@ vnode_authorize_checkimmutable(mount_t mp, struct vnode_attr *vap, int rights, i * * Sockets, fifos and devices require special handling. */ - switch(vap->va_type) { + switch (vap->va_type) { case VSOCK: case VFIFO: case VBLK: @@ -7399,10 +7668,8 @@ vnode_authorize_checkimmutable(mount_t mp, struct vnode_attr *vap, int rights, i error = 0; if (rights & KAUTH_VNODE_WRITE_RIGHTS) { - /* check per-filesystem options if possible */ if (mp != NULL) { - /* check for no-EA filesystems */ if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) && (vfs_flags(mp) & MNT_NOUSERXATTR)) { @@ -7412,17 +7679,19 @@ vnode_authorize_checkimmutable(mount_t mp, struct vnode_attr *vap, int rights, i } } - /* - * check for file immutability. first, check if the requested rights are + /* + * check for file immutability. first, check if the requested rights are * allowable for a UF_APPEND file. */ append = 0; if (vap->va_type == VDIR) { - if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) + if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) { append = 1; + } } else { - if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) + if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES)) == rights) { append = 1; + } } if ((error = vnode_immutable(vap, append, ignore)) != 0) { KAUTH_DEBUG("%p DENIED - file is immutable", vp); @@ -7430,7 +7699,7 @@ vnode_authorize_checkimmutable(mount_t mp, struct vnode_attr *vap, int rights, i } } out: - return(error); + return error; } /* @@ -7451,18 +7720,18 @@ out: static int vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx) { - int error; + int error; /* * If the vp is a device node, socket or FIFO it actually represents a local * endpoint, so we need to handle it locally. */ - switch(vp->v_type) { + switch (vp->v_type) { case VBLK: case VCHR: case VSOCK: case VFIFO: - return(0); + return 0; default: break; } @@ -7471,8 +7740,9 @@ vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_cont * In the advisory request case, if the filesystem doesn't think it's reliable * we will attempt to formulate a result ourselves based on VNOP_GETATTR data. */ - if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) - return(0); + if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) { + return 0; + } /* * Let the filesystem have a say in the matter. It's OK for it to not implemnent @@ -7481,9 +7751,9 @@ vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_cont if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) { *resultp = error; KAUTH_DEBUG("%p DENIED - opaque filesystem VNOP_ACCESS denied access", vp); - return(1); + return 1; } - + /* * Typically opaque filesystems do authorisation in-line, but exec is a special case. In * order to be reasonably sure that exec will be permitted, we try a bit harder here. @@ -7493,7 +7763,7 @@ vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_cont if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) { *resultp = error; KAUTH_DEBUG("%p DENIED - EXECUTE denied because file could not be opened readonly", vp); - return(1); + return 1; } VNOP_CLOSE(vp, FREAD, ctx); } @@ -7504,7 +7774,7 @@ vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_cont */ *resultp = 0; KAUTH_DEBUG("%p ALLOWED - bypassing access check for non-local filesystem", vp); - return(1); + return 1; } @@ -7532,12 +7802,12 @@ vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata, kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) { - vfs_context_t ctx; - vnode_t cvp = NULLVP; - vnode_t vp, dvp; - int result = KAUTH_RESULT_DENY; - int parent_iocount = 0; - int parent_action; /* In case we need to use namedstream's data fork for cached rights*/ + vfs_context_t ctx; + vnode_t cvp = NULLVP; + vnode_t vp, dvp; + int result = KAUTH_RESULT_DENY; + int parent_iocount = 0; + int parent_action; /* In case we need to use namedstream's data fork for cached rights*/ ctx = (vfs_context_t)arg0; vp = (vnode_t)arg1; @@ -7545,7 +7815,7 @@ vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata, /* * if there are 2 vnodes passed in, we don't know at - * this point which rights to look at based on the + * this point which rights to look at based on the * combined action being passed in... defer until later... * otherwise check the kauth 'rights' cache hung * off of the vnode we're interested in... if we've already @@ -7555,25 +7825,26 @@ vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata, * succeeds, we'll add the right(s) to the cache. * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache */ - if (dvp && vp) - goto defer; + if (dvp && vp) { + goto defer; + } if (dvp) { - cvp = dvp; + cvp = dvp; } else { - /* + /* * For named streams on local-authorization volumes, rights are cached on the parent; * authorization is determined by looking at the parent's properties anyway, so storing - * on the parent means that we don't recompute for the named stream and that if + * on the parent means that we don't recompute for the named stream and that if * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the * stream to flush its cache separately. If we miss in the cache, then we authorize - * as if there were no cached rights (passing the named stream vnode and desired rights to + * as if there were no cached rights (passing the named stream vnode and desired rights to * vnode_authorize_callback_int()). * - * On an opaquely authorized volume, we don't know the relationship between the + * On an opaquely authorized volume, we don't know the relationship between the * data fork's properties and the rights granted on a stream. Thus, named stream vnodes * on such a volume are authorized directly (rather than using the parent) and have their * own caches. When a named stream vnode is created, we mark the parent as having a named - * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we + * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we * find the stream and flush its cache. */ if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) { @@ -7595,22 +7866,21 @@ vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata, parent_action &= ~KAUTH_VNODE_WRITE_DATA; parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES; } - } else { cvp = vp; } } if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) { - result = KAUTH_RESULT_ALLOW; + result = KAUTH_RESULT_ALLOW; goto out; } defer: - result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3); + result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3); if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) { KAUTH_DEBUG("%p - caching action = %x", cvp, action); - vnode_cache_authorized_action(cvp, ctx, action); + vnode_cache_authorized_action(cvp, ctx, action); } out: @@ -7635,15 +7905,17 @@ vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp, * file rights. */ if ((result = vnode_authorize_checkimmutable(mp, vcp->vap, rights, - noimmutable)) != 0) + noimmutable)) != 0) { goto out; + } if ((rights & KAUTH_VNODE_DELETE) && !parent_authorized_for_delete_child) { result = vnode_authorize_checkimmutable(mp, vcp->dvap, KAUTH_VNODE_DELETE_CHILD, 0); - if (result) + if (result) { goto out; + } } /* @@ -7651,8 +7923,9 @@ vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp, * check. */ rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE); - if (rights == 0) + if (rights == 0) { goto out; + } /* * If we're not the superuser, authorize based on file properties; @@ -7662,13 +7935,15 @@ vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp, if (!is_suser) { /* process delete rights */ if ((rights & KAUTH_VNODE_DELETE) && - ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) - goto out; + ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) { + goto out; + } /* process remaining rights */ if ((rights & ~KAUTH_VNODE_DELETE) && - (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) + (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) { goto out; + } } else { /* * Execute is only granted to root if one of the x bits is set. This check only @@ -7689,7 +7964,7 @@ vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp, KAUTH_DEBUG("%p ALLOWED - caller is superuser", vp); } out: - return (result); + return result; } static int @@ -7697,16 +7972,16 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, vnode_t vp, vnode_t dvp, int *errorp) { struct _vnode_authorize_context auth_context; - vauth_ctx vcp; - kauth_cred_t cred; - kauth_ace_rights_t rights; - struct vnode_attr va, dva; - int result; - int noimmutable; - boolean_t parent_authorized_for_delete_child = FALSE; - boolean_t found_deny = FALSE; - boolean_t parent_ref= FALSE; - boolean_t is_suser = FALSE; + vauth_ctx vcp; + kauth_cred_t cred; + kauth_ace_rights_t rights; + struct vnode_attr va, dva; + int result; + int noimmutable; + boolean_t parent_authorized_for_delete_child = FALSE; + boolean_t found_deny = FALSE; + boolean_t parent_ref = FALSE; + boolean_t is_suser = FALSE; vcp = &auth_context; vcp->ctx = ctx; @@ -7726,27 +8001,28 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, vcp->flags = vcp->flags_valid = 0; #if DIAGNOSTIC - if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) + if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) { panic("vnode_authorize: bad arguments (context %p vp %p cred %p)", ctx, vp, cred); + } #endif KAUTH_DEBUG("%p AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)", vp, vfs_context_proc(ctx)->p_comm, - (action & KAUTH_VNODE_ACCESS) ? "access" : "auth", - (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "", - (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "", - (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "", - (action & KAUTH_VNODE_DELETE) ? " DELETE" : "", - (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "", - (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "", - (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "", - (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "", - (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "", - (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "", - (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "", - (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "", - (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "", - (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "", + (action & KAUTH_VNODE_ACCESS) ? "access" : "auth", + (action & KAUTH_VNODE_READ_DATA) ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "", + (action & KAUTH_VNODE_WRITE_DATA) ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "", + (action & KAUTH_VNODE_EXECUTE) ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "", + (action & KAUTH_VNODE_DELETE) ? " DELETE" : "", + (action & KAUTH_VNODE_APPEND_DATA) ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "", + (action & KAUTH_VNODE_DELETE_CHILD) ? " DELETE_CHILD" : "", + (action & KAUTH_VNODE_READ_ATTRIBUTES) ? " READ_ATTRIBUTES" : "", + (action & KAUTH_VNODE_WRITE_ATTRIBUTES) ? " WRITE_ATTRIBUTES" : "", + (action & KAUTH_VNODE_READ_EXTATTRIBUTES) ? " READ_EXTATTRIBUTES" : "", + (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES) ? " WRITE_EXTATTRIBUTES" : "", + (action & KAUTH_VNODE_READ_SECURITY) ? " READ_SECURITY" : "", + (action & KAUTH_VNODE_WRITE_SECURITY) ? " WRITE_SECURITY" : "", + (action & KAUTH_VNODE_CHANGE_OWNER) ? " CHANGE_OWNER" : "", + (action & KAUTH_VNODE_NOIMMUTABLE) ? " (noimmutable)" : "", vnode_isdir(vp) ? "directory" : "file", vp->v_name ? vp->v_name : "", action, vp, dvp); @@ -7756,11 +8032,12 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, */ noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0; rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE); - + if (rights & KAUTH_VNODE_DELETE) { #if DIAGNOSTIC - if (dvp == NULL) + if (dvp == NULL) { panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory"); + } #endif /* * check to see if we've already authorized the parent @@ -7768,21 +8045,22 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, * can skip a whole bunch of work... we will still have to * authorize that this specific child can be removed */ - if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) - parent_authorized_for_delete_child = TRUE; + if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) { + parent_authorized_for_delete_child = TRUE; + } } else { vcp->dvp = NULLVP; vcp->dvap = NULL; } - + /* * Check for read-only filesystems. */ if ((rights & KAUTH_VNODE_WRITE_RIGHTS) && (vp->v_mount->mnt_flag & MNT_RDONLY) && - ((vp->v_type == VREG) || (vp->v_type == VDIR) || - (vp->v_type == VLNK) || (vp->v_type == VCPLX) || - (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) { + ((vp->v_type == VREG) || (vp->v_type == VDIR) || + (vp->v_type == VLNK) || (vp->v_type == VCPLX) || + (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) { result = EROFS; goto out; } @@ -7801,8 +8079,9 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, * check based on VNOP_GETATTR data. Otherwise it returns 1 and sets * an appropriate result, at which point we can return immediately. */ - if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) + if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) { goto out; + } /* * If the vnode is a namedstream (extended attribute) data vnode (eg. @@ -7833,8 +8112,9 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, * if we're not asking for execute permissions or modifications, * then we're done, this action is authorized. */ - if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) + if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) { goto success; + } is_suser = TRUE; } @@ -7879,23 +8159,26 @@ vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx, result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser, &found_deny, noimmutable, parent_authorized_for_delete_child); out: - if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) + if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) { kauth_acl_free(va.va_acl); - if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) + } + if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) { kauth_acl_free(dva.va_acl); + } if (result) { - if (parent_ref) + if (parent_ref) { vnode_put(vp); + } *errorp = result; KAUTH_DEBUG("%p DENIED - auth denied", vp); - return(KAUTH_RESULT_DENY); + return KAUTH_RESULT_DENY; } if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) { - /* + /* * if we were successfully granted the right to search this directory * and there were NO ACL DENYs for search and the posix permissions also don't - * deny execute, we can synthesize a global right that allows anyone to + * deny execute, we can synthesize a global right that allows anyone to * traverse this directory during a pathname lookup without having to * match the credential associated with this cache of rights. * @@ -7903,22 +8186,23 @@ out: * only if we actually check ACLs which we don't for root. As * a workaround, the lookup fast path checks for root. */ - if (!VATTR_IS_SUPPORTED(&va, va_mode) || + if (!VATTR_IS_SUPPORTED(&va, va_mode) || ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == - (S_IXUSR | S_IXGRP | S_IXOTH))) { - vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE); + (S_IXUSR | S_IXGRP | S_IXOTH))) { + vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE); } } success: - if (parent_ref) + if (parent_ref) { vnode_put(vp); + } /* * Note that this implies that we will allow requests for no rights, as well as * for rights that we do not recognise. There should be none of these. */ KAUTH_DEBUG("%p ALLOWED - auth granted", vp); - return(KAUTH_RESULT_ALLOW); + return KAUTH_RESULT_ALLOW; } int @@ -7937,7 +8221,7 @@ vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap, VATTR_WANTED(dvap, va_flags); } } else if (action & KAUTH_VNODE_DELETE) { - return (EINVAL); + return EINVAL; } if (!vfs_context_issuser(ctx)) { @@ -7951,7 +8235,7 @@ vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap, } } - return (0); + return 0; } int @@ -8003,8 +8287,9 @@ vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp * if we're not asking for execute permissions or modifications, * then we're done, this action is authorized. */ - if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) + if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) { goto out; + } is_suser = TRUE; } else { if (!VATTR_IS_SUPPORTED(vap, va_uid) || @@ -8017,14 +8302,15 @@ vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser, &found_deny, noimmutable, FALSE); - if (result == EPERM) + if (result == EPERM) { result = EACCES; + } out: - return (result); + return result; } -int +int vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx) { return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx); @@ -8037,12 +8323,12 @@ vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_ static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx) { - int error; - int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode; - uint32_t inherit_flags; - kauth_cred_t cred; - guid_t changer; - mount_t dmp; + int error; + int has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode; + uint32_t inherit_flags; + kauth_cred_t cred; + guid_t changer; + mount_t dmp; struct vnode_attr dva; error = 0; @@ -8063,7 +8349,7 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin error = EINVAL; goto out; } - + /* * Default some fields. */ @@ -8091,8 +8377,9 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin VATTR_INIT(&dva); VATTR_WANTED(&dva, va_gid); VATTR_WANTED(&dva, va_flags); - if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) + if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) { goto out; + } /* * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that @@ -8113,8 +8400,9 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin } } - if (!VATTR_IS_ACTIVE(vap, va_flags)) + if (!VATTR_IS_ACTIVE(vap, va_flags)) { VATTR_SET(vap, va_flags, 0); + } /* Determine if SF_RESTRICTED should be inherited from the parent * directory. */ @@ -8133,7 +8421,7 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin nanotime(&vap->va_create_time); VATTR_SET_ACTIVE(vap, va_create_time); } - + /* * Check for attempts to set nonsensical fields. */ @@ -8148,8 +8436,9 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin * Quickly check for the applicability of any enforcement here. * Tests below maintain the integrity of the local security model. */ - if (vfs_authopaque(dvp->v_mount)) - goto out; + if (vfs_authopaque(dvp->v_mount)) { + goto out; + } /* * We need to know if the caller is the superuser, or if the work is @@ -8245,7 +8534,7 @@ vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uin } } } -out: +out: if (inherit_flags) { /* Apply SF_RESTRICTED to the file if its parent directory was * restricted. This is done at the end so that root is not @@ -8263,7 +8552,7 @@ out: *defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID; } } - return(error); + return error; } /* @@ -8280,14 +8569,14 @@ int vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx) { struct vnode_attr ova; - kauth_action_t required_action; - int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid; - guid_t changer; - gid_t group; - uid_t owner; - mode_t newmode; - kauth_cred_t cred; - uint32_t fdelta; + kauth_action_t required_action; + int error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid; + guid_t changer; + gid_t group; + uid_t owner; + mode_t newmode; + kauth_cred_t cred; + uint32_t fdelta; VATTR_INIT(&ova); required_action = 0; @@ -8296,9 +8585,10 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ /* * Quickly check for enforcement applicability. */ - if (vfs_authopaque(vp->v_mount)) + if (vfs_authopaque(vp->v_mount)) { goto out; - + } + /* * Check for attempts to set nonsensical fields. */ @@ -8313,7 +8603,7 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ */ cred = vfs_context_ucred(ctx); has_priv_suser = kauth_cred_issuser(cred); - + /* * If any of the following are changing, we need information from the old file: * va_uid @@ -8345,14 +8635,13 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ VATTR_IS_ACTIVE(vap, va_access_time) || VATTR_IS_ACTIVE(vap, va_backup_time) || VATTR_IS_ACTIVE(vap, va_addedtime)) { - VATTR_WANTED(&ova, va_uid); -#if 0 /* enable this when we support UUIDs as official owners */ +#if 0 /* enable this when we support UUIDs as official owners */ VATTR_WANTED(&ova, va_uuuid); #endif KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID"); } - + /* * If flags are being changed, we need the old flags. */ @@ -8395,8 +8684,8 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ */ if (VATTR_IS_ACTIVE(vap, va_data_size)) { /* if we can't get the size, or it's different, we need write access */ - KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA"); - required_action |= KAUTH_VNODE_WRITE_DATA; + KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA"); + required_action |= KAUTH_VNODE_WRITE_DATA; } /* @@ -8459,7 +8748,7 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ * existing group information in the case we're not setting it right now. */ if (vap->va_mode & S_ISGID) { - required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */ + required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */ if (!has_priv_suser) { if (VATTR_IS_ACTIVE(vap, va_gid)) { group = vap->va_gid; @@ -8490,7 +8779,7 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ * Can't set the setuid bit unless you're root or the file's owner. */ if (vap->va_mode & S_ISUID) { - required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */ + required_action |= KAUTH_VNODE_CHECKIMMUTABLE; /* always required */ if (!has_priv_suser) { if (VATTR_IS_ACTIVE(vap, va_uid)) { owner = vap->va_uid; @@ -8512,7 +8801,7 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ } } } - + /* * Validate/mask flags changes. This checks that only the flags in * the UF_SETTABLE mask are being set, and preserves the flags in @@ -8521,7 +8810,7 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ * Since flags changes may be made in conjunction with other changes, * we will ask the auth code to ignore immutability in the case that * the SF_* flags are not set and we are only manipulating the file flags. - * + * */ if (VATTR_IS_ACTIVE(vap, va_flags)) { /* compute changing flags bits */ @@ -8585,16 +8874,16 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ */ if (VATTR_IS_ACTIVE(vap, va_uid)) { if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) { - if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) { - KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party"); - error = EPERM; - goto out; + if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) { + KAUTH_DEBUG(" DENIED - non-superuser cannot change ownershipt to a third party"); + error = EPERM; + goto out; + } + chowner = 1; } - chowner = 1; - } clear_suid = 1; } - + /* * gid changing * Note that if the filesystem didn't give us a GID, we expect that it doesn't @@ -8603,20 +8892,20 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ */ if (VATTR_IS_ACTIVE(vap, va_gid)) { if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) { - if (!has_priv_suser) { - if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) { - KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid); - goto out; - } - if (!ismember) { - KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group", - ova.va_gid, vap->va_gid); - error = EPERM; - goto out; + if (!has_priv_suser) { + if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) { + KAUTH_DEBUG(" ERROR - got %d checking for membership in %d", error, vap->va_gid); + goto out; + } + if (!ismember) { + KAUTH_DEBUG(" DENIED - group change from %d to %d but not a member of target group", + ova.va_gid, vap->va_gid); + error = EPERM; + goto out; + } } + chgroup = 1; } - chgroup = 1; - } clear_sgid = 1; } @@ -8626,9 +8915,10 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ if (VATTR_IS_ACTIVE(vap, va_uuuid)) { /* if the owner UUID is not actually changing ... */ if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) { - if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) + if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) { goto no_uuuid_change; - + } + /* * If the current owner UUID is a null GUID, check * it against the UUID corresponding to the owner UID. @@ -8638,11 +8928,12 @@ vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_ guid_t uid_guid; if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 && - kauth_guid_equal(&vap->va_uuuid, &uid_guid)) - goto no_uuuid_change; + kauth_guid_equal(&vap->va_uuuid, &uid_guid)) { + goto no_uuuid_change; + } } } - + /* * The owner UUID cannot be set by a non-superuser to anything other than * their own or a null GUID (to "unset" the owner UUID). @@ -8673,8 +8964,9 @@ no_uuuid_change: if (VATTR_IS_ACTIVE(vap, va_guuid)) { /* if the group UUID is not actually changing ... */ if (VATTR_IS_SUPPORTED(&ova, va_guuid)) { - if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) + if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) { goto no_guuid_change; + } /* * If the current group UUID is a null UUID, check @@ -8685,8 +8977,9 @@ no_uuuid_change: guid_t gid_guid; if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 && - kauth_guid_equal(&vap->va_guuid, &gid_guid)) - goto no_guuid_change; + kauth_guid_equal(&vap->va_guuid, &gid_guid)) { + goto no_guuid_change; + } } } @@ -8699,9 +8992,9 @@ no_uuuid_change: * system. */ if (!has_priv_suser) { - if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) + if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) { ismember = 1; - else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) { + } else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) { KAUTH_DEBUG(" ERROR - got %d trying to check group membership", error); goto out; } @@ -8731,9 +9024,8 @@ no_guuid_change: KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY"); required_action |= KAUTH_VNODE_WRITE_SECURITY; } - } - + /* * clear set-uid and set-gid bits. POSIX only requires this for * non-privileged processes but we do it even for root. @@ -8755,7 +9047,7 @@ no_guuid_change: if (newmode & (S_ISUID | S_ISGID)) { if (!VATTR_IS_ACTIVE(vap, va_mode) || !has_priv_suser) { KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o", - newmode, newmode & ~(S_ISUID | S_ISGID)); + newmode, newmode & ~(S_ISUID | S_ISGID)); newmode &= ~(S_ISUID | S_ISGID); } VATTR_SET(vap, va_mode, newmode); @@ -8766,10 +9058,8 @@ no_guuid_change: * Authorise changes in the ACL. */ if (VATTR_IS_ACTIVE(vap, va_acl)) { - /* no existing ACL */ if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) { - /* adding an ACL */ if (vap->va_acl != NULL) { required_action |= KAUTH_VNODE_WRITE_SECURITY; @@ -8790,7 +9080,7 @@ no_guuid_change: } else if (vap->va_acl->acl_entrycount > 0) { /* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */ if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0], - sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) { + sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) { required_action |= KAUTH_VNODE_WRITE_SECURITY; KAUTH_DEBUG("CHMOD - changing ACL entries"); } @@ -8801,15 +9091,18 @@ no_guuid_change: /* * Other attributes that require authorisation. */ - if (VATTR_IS_ACTIVE(vap, va_encoding)) + if (VATTR_IS_ACTIVE(vap, va_encoding)) { required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES; - + } + out: - if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) + if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) { kauth_acl_free(ova.va_acl); - if (error == 0) + } + if (error == 0) { *actionp = required_action; - return(error); + } + return error; } static int @@ -8819,7 +9112,7 @@ setlocklocal_callback(struct vnode *vp, __unused void *cargs) vp->v_flag |= VLOCKLOCAL; vnode_unlock(vp); - return (VNODE_RETURNED); + return VNODE_RETURNED; } void @@ -8856,14 +9149,15 @@ vnode_setswapmount(vnode_t vp) int64_t vnode_getswappin_avail(vnode_t vp) { - int64_t max_swappin_avail = 0; + int64_t max_swappin_avail = 0; mount_lock(vp->v_mount); - if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) + if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) { max_swappin_avail = vp->v_mount->mnt_max_swappin_available; + } mount_unlock(vp->v_mount); - return (max_swappin_avail); + return max_swappin_avail; } @@ -8880,46 +9174,49 @@ void vn_checkunionwait(vnode_t vp) { vnode_lock_spin(vp); - while ((vp->v_flag & VISUNION) == VISUNION) + while ((vp->v_flag & VISUNION) == VISUNION) { msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0); + } vnode_unlock(vp); } void vn_clearunionwait(vnode_t vp, int locked) { - if (!locked) + if (!locked) { vnode_lock_spin(vp); - if((vp->v_flag & VISUNION) == VISUNION) { + } + if ((vp->v_flag & VISUNION) == VISUNION) { vp->v_flag &= ~VISUNION; wakeup((caddr_t)&vp->v_flag); } - if (!locked) + if (!locked) { vnode_unlock(vp); + } } -/* +/* * Removes orphaned apple double files during a rmdir * Works by: * 1. vnode_suspend(). - * 2. Call VNOP_READDIR() till the end of directory is reached. - * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY. + * 2. Call VNOP_READDIR() till the end of directory is reached. + * 3. Check if the directory entries returned are regular files with name starting with "._". If not, return ENOTEMPTY. * 4. Continue (2) and (3) till end of directory is reached. * 5. If all the entries in the directory were files with "._" name, delete all the files. * 6. vnode_resume() * 7. If deletion of all files succeeded, call VNOP_RMDIR() again. */ -errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * restart_flag) +errno_t +rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag) { - #define UIO_BUFF_SIZE 2048 uio_t auio = NULL; int eofflag, siz = UIO_BUFF_SIZE, nentries = 0; int open_flag = 0, full_erase_flag = 0; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; char *rbuf = NULL; - void *dir_pos; + void *dir_pos; void *dir_end; struct dirent *dp; errno_t error; @@ -8929,31 +9226,35 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * /* * restart_flag is set so that the calling rmdir sleeps and resets */ - if (error == EBUSY) + if (error == EBUSY) { *restart_flag = 1; - if (error != 0) - return (error); + } + if (error != 0) { + return error; + } /* * set up UIO */ MALLOC(rbuf, caddr_t, siz, M_TEMP, M_WAITOK); - if (rbuf) + if (rbuf) { auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); + } if (!rbuf || !auio) { error = ENOMEM; goto outsc; } - uio_setoffset(auio,0); + uio_setoffset(auio, 0); eofflag = 0; - if ((error = VNOP_OPEN(vp, FREAD, ctx))) - goto outsc; - else + if ((error = VNOP_OPEN(vp, FREAD, ctx))) { + goto outsc; + } else { open_flag = 1; + } /* * First pass checks if all files are appleDouble files. @@ -8964,11 +9265,13 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ); uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE); - if((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) + if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) { goto outsc; + } - if (uio_resid(auio) != 0) + if (uio_resid(auio) != 0) { siz -= uio_resid(auio); + } /* * Iterate through directory @@ -8977,23 +9280,24 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * dir_end = (void*) (rbuf + siz); dp = (struct dirent*) (dir_pos); - if (dir_pos == dir_end) + if (dir_pos == dir_end) { eofflag = 1; + } while (dir_pos < dir_end) { /* * Check for . and .. as well as directories */ - if (dp->d_ino != 0 && - !((dp->d_namlen == 1 && dp->d_name[0] == '.') || - (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) { + if (dp->d_ino != 0 && + !((dp->d_namlen == 1 && dp->d_name[0] == '.') || + (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) { /* * Check for irregular files and ._ files * If there is a ._._ file abort the op */ - if ( dp->d_namlen < 2 || - strncmp(dp->d_name,"._",2) || - (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._",2))) { + if (dp->d_namlen < 2 || + strncmp(dp->d_name, "._", 2) || + (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) { error = ENOTEMPTY; goto outsc; } @@ -9001,23 +9305,23 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen); dp = (struct dirent*)dir_pos; } - + /* - * workaround for HFS/NFS setting eofflag before end of file + * workaround for HFS/NFS setting eofflag before end of file */ - if (vp->v_tag == VT_HFS && nentries > 2) - eofflag=0; + if (vp->v_tag == VT_HFS && nentries > 2) { + eofflag = 0; + } if (vp->v_tag == VT_NFS) { if (eofflag && !full_erase_flag) { full_erase_flag = 1; eofflag = 0; uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ); - } - else if (!eofflag && full_erase_flag) + } else if (!eofflag && full_erase_flag) { full_erase_flag = 0; + } } - } while (!eofflag); /* * If we've made it here all the files in the dir are ._ files. @@ -9036,11 +9340,13 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx); - if (error != 0) + if (error != 0) { goto outsc; + } - if (uio_resid(auio) != 0) + if (uio_resid(auio) != 0) { siz -= uio_resid(auio); + } /* * Iterate through directory @@ -9048,72 +9354,72 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * dir_pos = (void*) rbuf; dir_end = (void*) (rbuf + siz); dp = (struct dirent*) dir_pos; - - if (dir_pos == dir_end) + + if (dir_pos == dir_end) { eofflag = 1; - + } + while (dir_pos < dir_end) { /* * Check for . and .. as well as directories */ - if (dp->d_ino != 0 && - !((dp->d_namlen == 1 && dp->d_name[0] == '.') || - (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.')) - ) { - + if (dp->d_ino != 0 && + !((dp->d_namlen == 1 && dp->d_name[0] == '.') || + (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.')) + ) { error = unlink1(ctx, vp, CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE, VNODE_REMOVE_SKIP_NAMESPACE_EVENT | VNODE_REMOVE_NO_AUDIT_PATH); - if (error && error != ENOENT) { + if (error && error != ENOENT) { goto outsc; } - } dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen); dp = (struct dirent*)dir_pos; } - + /* - * workaround for HFS/NFS setting eofflag before end of file + * workaround for HFS/NFS setting eofflag before end of file */ - if (vp->v_tag == VT_HFS && nentries > 2) - eofflag=0; + if (vp->v_tag == VT_HFS && nentries > 2) { + eofflag = 0; + } if (vp->v_tag == VT_NFS) { if (eofflag && !full_erase_flag) { full_erase_flag = 1; eofflag = 0; uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ); - } - else if (!eofflag && full_erase_flag) + } else if (!eofflag && full_erase_flag) { full_erase_flag = 0; + } } - } while (!eofflag); error = 0; outsc: - if (open_flag) + if (open_flag) { VNOP_CLOSE(vp, FREAD, ctx); + } - if (auio) + if (auio) { uio_free(auio); + } FREE(rbuf, M_TEMP); vnode_resume(vp); - return(error); - + return error; } -void -lock_vnode_and_post(vnode_t vp, int kevent_num) +void +lock_vnode_and_post(vnode_t vp, int kevent_num) { /* Only take the lock if there's something there! */ if (vp->v_knotes.slh_first != NULL) { @@ -9128,7 +9434,8 @@ void panic_print_vnodes(void); /* define PANIC_PRINTS_VNODES only if investigation is required. */ #ifdef PANIC_PRINTS_VNODES -static const char *__vtype(uint16_t vtype) +static const char * +__vtype(uint16_t vtype) { switch (vtype) { case VREG: @@ -9160,23 +9467,26 @@ static const char *__vtype(uint16_t vtype) * build a path from the bottom up * NOTE: called from the panic path - no alloc'ing of memory and no locks! */ -static char *__vpath(vnode_t vp, char *str, int len, int depth) +static char * +__vpath(vnode_t vp, char *str, int len, int depth) { int vnm_len; const char *src; char *dst; - if (len <= 0) + if (len <= 0) { return str; + } /* str + len is the start of the string we created */ - if (!vp->v_name) + if (!vp->v_name) { return str + len; + } /* follow mount vnodes to get the full path */ if ((vp->v_flag & VROOT)) { if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) { return __vpath(vp->v_mount->mnt_vnodecovered, - str, len, depth+1); + str, len, depth + 1); } return str + len; } @@ -9203,7 +9513,7 @@ static char *__vpath(vnode_t vp, char *str, int len, int depth) if (vp->v_parent && len > 1) { /* follow parents up the chain */ len--; - *(dst-1) = '/'; + *(dst - 1) = '/'; return __vpath(vp->v_parent, str, len, depth + 1); } @@ -9211,7 +9521,8 @@ static char *__vpath(vnode_t vp, char *str, int len, int depth) } #define SANE_VNODE_PRINT_LIMIT 5000 -void panic_print_vnodes(void) +void +panic_print_vnodes(void) { mount_t mnt; vnode_t vp; @@ -9221,42 +9532,42 @@ void panic_print_vnodes(void) char vname[257]; paniclog_append_noflush("\n***** VNODES *****\n" - "TYPE UREF ICNT PATH\n"); + "TYPE UREF ICNT PATH\n"); /* NULL-terminate the path name */ - vname[sizeof(vname)-1] = '\0'; + vname[sizeof(vname) - 1] = '\0'; /* * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist */ TAILQ_FOREACH(mnt, &mountlist, mnt_list) { - if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) { paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n", - &mountlist, mnt); + &mountlist, mnt); break; } TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) { - if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) { paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n", - &mnt->mnt_vnodelist, vp); + &mnt->mnt_vnodelist, vp); break; } - - if (++nvnodes > SANE_VNODE_PRINT_LIMIT) + + if (++nvnodes > SANE_VNODE_PRINT_LIMIT) { return; + } type = __vtype(vp->v_type); - nm = __vpath(vp, vname, sizeof(vname)-1, 0); + nm = __vpath(vp, vname, sizeof(vname) - 1, 0); paniclog_append_noflush("%s %0d %0d %s\n", - type, vp->v_usecount, vp->v_iocount, nm); + type, vp->v_usecount, vp->v_iocount, nm); } } } #else /* !PANIC_PRINTS_VNODES */ -void panic_print_vnodes(void) +void +panic_print_vnodes(void) { return; } @@ -9264,18 +9575,22 @@ void panic_print_vnodes(void) #ifdef JOE_DEBUG -static void record_vp(vnode_t vp, int count) { - struct uthread *ut; +static void +record_vp(vnode_t vp, int count) +{ + struct uthread *ut; #if CONFIG_TRIGGERS - if (vp->v_resolve) + if (vp->v_resolve) { return; + } #endif - if ((vp->v_flag & VSYSTEM)) - return; + if ((vp->v_flag & VSYSTEM)) { + return; + } ut = get_bsdthread_info(current_thread()); - ut->uu_iocount += count; + ut->uu_iocount += count; if (count == 1) { if (ut->uu_vpindex < 32) { @@ -9310,16 +9625,16 @@ vfs_resolver_result(uint32_t seq, enum resolver_status stat, int aux) * |<--- 32 --->|<--- 28 --->|<- 4 ->| * sequence auxiliary status */ - return (((uint64_t)seq) << 32) | - (((uint64_t)(aux & 0x0fffffff)) << 4) | - (uint64_t)(stat & 0x0000000F); + return (((uint64_t)seq) << 32) | + (((uint64_t)(aux & 0x0fffffff)) << 4) | + (uint64_t)(stat & 0x0000000F); } enum resolver_status vfs_resolver_status(resolver_result_t result) { /* lower 4 bits is status */ - return (result & 0x0000000F); + return result & 0x0000000F; } uint32_t @@ -9348,31 +9663,32 @@ vnode_trigger_update(vnode_t vp, resolver_result_t result) enum resolver_status stat; if (vp->v_resolve == NULL) { - return (EINVAL); + return EINVAL; } stat = vfs_resolver_status(result); seq = vfs_resolver_sequence(result); if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) { - return (EINVAL); + return EINVAL; } rp = vp->v_resolve; lck_mtx_lock(&rp->vr_lock); if (seq > rp->vr_lastseq) { - if (stat == RESOLVER_RESOLVED) + if (stat == RESOLVER_RESOLVED) { rp->vr_flags |= VNT_RESOLVED; - else + } else { rp->vr_flags &= ~VNT_RESOLVED; + } rp->vr_lastseq = seq; } lck_mtx_unlock(&rp->vr_lock); - return (0); + return 0; } static int @@ -9388,7 +9704,7 @@ vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref) vp->v_resolve = rp; } vnode_unlock(vp); - + if (ref) { error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE); if (error != 0) { @@ -9414,12 +9730,14 @@ vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, #if 1 /* minimum pointer test (debugging) */ - if (tinfo->vnt_data) + if (tinfo->vnt_data) { byte = *((char *)tinfo->vnt_data); + } #endif MALLOC(rp, vnode_resolve_t, sizeof(*rp), M_TEMP, M_WAITOK); - if (rp == NULL) - return (ENOMEM); + if (rp == NULL) { + return ENOMEM; + } lck_mtx_init(&rp->vr_lock, trigger_vnode_lck_grp, trigger_vnode_lck_attr); @@ -9443,7 +9761,7 @@ vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, OSAddAtomic(1, &mp->mnt_numtriggers); } - return (result); + return result; out: FREE(rp, M_TEMP); @@ -9462,7 +9780,6 @@ vnode_resolver_release(vnode_resolve_t rp) lck_mtx_destroy(&rp->vr_lock, trigger_vnode_lck_grp); FREE(rp, M_TEMP); - } /* Called after the vnode has been drained */ @@ -9470,7 +9787,7 @@ static void vnode_resolver_detach(vnode_t vp) { vnode_resolve_t rp; - mount_t mp; + mount_t mp; mp = vnode_mount(vp); @@ -9481,12 +9798,12 @@ vnode_resolver_detach(vnode_t vp) if ((rp->vr_flags & VNT_EXTERNAL) != 0) { vnode_rele_ext(vp, O_EVTONLY, 1); - } + } vnode_resolver_release(rp); - + /* Keep count of active trigger vnodes per mount */ - OSAddAtomic(-1, &mp->mnt_numtriggers); + OSAddAtomic(-1, &mp->mnt_numtriggers); } __private_extern__ @@ -9529,8 +9846,9 @@ vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx) lck_mtx_lock(&rp->vr_lock); if (seq > rp->vr_lastseq) { - if (status == RESOLVER_UNRESOLVED) + if (status == RESOLVER_UNRESOLVED) { rp->vr_flags &= ~VNT_RESOLVED; + } rp->vr_lastseq = seq; } lck_mtx_unlock(&rp->vr_lock); @@ -9550,7 +9868,7 @@ vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx) if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_resolve_func == NULL) || (vp->v_mountedhere != NULL)) { - return (0); + return 0; } rp = vp->v_resolve; @@ -9559,15 +9877,16 @@ vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx) /* Check if this vnode is already resolved */ if (rp->vr_flags & VNT_RESOLVED) { lck_mtx_unlock(&rp->vr_lock); - return (0); + return 0; } lck_mtx_unlock(&rp->vr_lock); #if CONFIG_MACF int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd); - if (rv != 0) + if (rv != 0) { return rv; + } #endif /* @@ -9577,7 +9896,7 @@ vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx) * there can also be other legitimate lookups in parallel * * XXX - should we call this on a separate thread with a timeout? - * + * * XXX - should we use ISLASTCN to pick the op value??? Perhaps only leafs should * get the richer set and non-leafs should get generic OP_LOOKUP? TBD */ @@ -9589,14 +9908,15 @@ vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx) lck_mtx_lock(&rp->vr_lock); if (seq > rp->vr_lastseq) { - if (status == RESOLVER_RESOLVED) + if (status == RESOLVER_RESOLVED) { rp->vr_flags |= VNT_RESOLVED; + } rp->vr_lastseq = seq; } lck_mtx_unlock(&rp->vr_lock); /* On resolver errors, propagate the error back up */ - return (status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0); + return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0; } static int @@ -9608,7 +9928,7 @@ vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx) uint32_t seq; if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) { - return (0); + return 0; } rp = vp->v_resolve; @@ -9618,7 +9938,7 @@ vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx) if ((rp->vr_flags & VNT_RESOLVED) == 0) { printf("vnode_trigger_unresolve: not currently resolved\n"); lck_mtx_unlock(&rp->vr_lock); - return (0); + return 0; } rp->vr_flags |= VNT_VFS_UNMOUNTED; @@ -9639,15 +9959,16 @@ vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx) lck_mtx_lock(&rp->vr_lock); if (seq > rp->vr_lastseq) { - if (status == RESOLVER_UNRESOLVED) + if (status == RESOLVER_UNRESOLVED) { rp->vr_flags &= ~VNT_RESOLVED; + } rp->vr_lastseq = seq; } rp->vr_flags &= ~VNT_VFS_UNMOUNTED; lck_mtx_unlock(&rp->vr_lock); /* On resolver errors, propagate the error back up */ - return (status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0); + return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0; } static int @@ -9664,12 +9985,14 @@ triggerisdescendant(mount_t mp, mount_t rmp) vnode_t vp; /* did we encounter "/" ? */ - if (mp->mnt_flag & MNT_ROOTFS) + if (mp->mnt_flag & MNT_ROOTFS) { break; + } vp = mp->mnt_vnodecovered; - if (vp == NULLVP) + if (vp == NULLVP) { break; + } mp = vp->v_mount; if (mp == rmp) { @@ -9680,16 +10003,16 @@ triggerisdescendant(mount_t mp, mount_t rmp) name_cache_unlock(); - return (match); + return match; } struct trigger_unmount_info { - vfs_context_t ctx; - mount_t top_mp; - vnode_t trigger_vp; - mount_t trigger_mp; - uint32_t trigger_vid; - int flags; + vfs_context_t ctx; + mount_t top_mp; + vnode_t trigger_vp; + mount_t trigger_mp; + uint32_t trigger_vid; + int flags; }; static int @@ -9701,12 +10024,13 @@ trigger_unmount_callback(mount_t mp, void * arg) /* * When we encounter the top level mount we're done */ - if (mp == infop->top_mp) - return (VFS_RETURNED_DONE); + if (mp == infop->top_mp) { + return VFS_RETURNED_DONE; + } if ((mp->mnt_vnodecovered == NULL) || (vnode_getwithref(mp->mnt_vnodecovered) != 0)) { - return (VFS_RETURNED); + return VFS_RETURNED; } if ((mp->mnt_vnodecovered->v_mountedhere == mp) && @@ -9719,8 +10043,9 @@ trigger_unmount_callback(mount_t mp, void * arg) /* * When we encounter a mounted trigger, check if its under the top level mount */ - if ( !mountedtrigger || !triggerisdescendant(mp, infop->top_mp) ) - return (VFS_RETURNED); + if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) { + return VFS_RETURNED; + } /* * Process any pending nested mount (now that its not referenced) @@ -9731,17 +10056,17 @@ trigger_unmount_callback(mount_t mp, void * arg) int error; infop->trigger_vp = NULLVP; - + if (mp == vp->v_mountedhere) { vnode_put(vp); printf("trigger_unmount_callback: unexpected match '%s'\n", - mp->mnt_vfsstat.f_mntonname); - return (VFS_RETURNED); + mp->mnt_vfsstat.f_mntonname); + return VFS_RETURNED; } if (infop->trigger_mp != vp->v_mountedhere) { vnode_put(vp); printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n", - infop->trigger_mp, vp->v_mountedhere); + infop->trigger_mp, vp->v_mountedhere); goto savenext; } @@ -9749,9 +10074,9 @@ trigger_unmount_callback(mount_t mp, void * arg) vnode_put(vp); if (error) { printf("unresolving: '%s', err %d\n", - vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname : - "???", error); - return (VFS_RETURNED_DONE); /* stop iteration on errors */ + vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname : + "???", error); + return VFS_RETURNED_DONE; /* stop iteration on errors */ } } savenext: @@ -9769,7 +10094,7 @@ savenext: vnode_put(infop->trigger_vp); } - return (VFS_RETURNED); + return VFS_RETURNED; } /* @@ -9799,8 +10124,9 @@ vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx) recursive = TRUE; } vnode_put(mp->mnt_vnodecovered); - if (recursive) + if (recursive) { return; + } } /* @@ -9836,16 +10162,16 @@ vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, int res; vnode_t rvp, vp; struct vnode_trigger_param vtp; - - /* - * Must be called for trigger callback, wherein rwlock is held + + /* + * Must be called for trigger callback, wherein rwlock is held */ lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD); TRIG_LOG("Adding trigger at %s\n", relpath); TRIG_LOG("Trying VFS_ROOT\n"); - /* + /* * We do a lookup starting at the root of the mountpoint, unwilling * to cross into other mountpoints. */ @@ -9857,21 +10183,21 @@ vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, TRIG_LOG("Trying namei\n"); NDINIT(&nd, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE, - CAST_USER_ADDR_T(relpath), ctx); + CAST_USER_ADDR_T(relpath), ctx); nd.ni_dvp = rvp; res = namei(&nd); if (res != 0) { vnode_put(rvp); goto out; } - + vp = nd.ni_vp; nameidone(&nd); vnode_put(rvp); TRIG_LOG("Trying vnode_resolver_create()\n"); - /* + /* * Set up blob. vnode_create() takes a larger structure * with creation info, and we needed something different * for this case. One needs to win, or we need to munge both; @@ -9895,20 +10221,22 @@ out: #endif /* CONFIG_TRIGGERS */ -vm_offset_t kdebug_vnode(vnode_t vp) +vm_offset_t +kdebug_vnode(vnode_t vp) { return VM_KERNEL_ADDRPERM(vp); } static int flush_cache_on_write = 0; -SYSCTL_INT (_kern, OID_AUTO, flush_cache_on_write, - CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0, - "always flush the drive cache on writes to uncached files"); +SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write, + CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0, + "always flush the drive cache on writes to uncached files"); -int vnode_should_flush_after_write(vnode_t vp, int ioflag) +int +vnode_should_flush_after_write(vnode_t vp, int ioflag) { - return (flush_cache_on_write - && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp))); + return flush_cache_on_write + && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp)); } /* @@ -9918,22 +10246,24 @@ int vnode_should_flush_after_write(vnode_t vp, int ioflag) struct vnode_trace_paths_context { uint64_t count; - long path[MAXPATHLEN / sizeof (long) + 1]; /* + 1 in case sizeof (long) does not divide MAXPATHLEN */ + long path[MAXPATHLEN / sizeof(long) + 1]; /* + 1 in case sizeof (long) does not divide MAXPATHLEN */ }; -static int vnode_trace_path_callback(struct vnode *vp, void *arg) { +static int +vnode_trace_path_callback(struct vnode *vp, void *arg) +{ int len, rv; struct vnode_trace_paths_context *ctx; ctx = arg; - len = sizeof (ctx->path); + len = sizeof(ctx->path); rv = vn_getpath(vp, (char *)ctx->path, &len); /* vn_getpath() NUL-terminates, and len includes the NUL */ if (!rv) { kdebug_vfs_lookup(ctx->path, len, vp, - KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT); + KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT); if (++(ctx->count) == 1000) { thread_yield_to_preemption(); @@ -9944,9 +10274,12 @@ static int vnode_trace_path_callback(struct vnode *vp, void *arg) { return VNODE_RETURNED; } -static int vfs_trace_paths_callback(mount_t mp, void *arg) { - if (mp->mnt_flag & MNT_LOCAL) +static int +vfs_trace_paths_callback(mount_t mp, void *arg) +{ + if (mp->mnt_flag & MNT_LOCAL) { vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg); + } return VFS_RETURNED; } @@ -9959,13 +10292,15 @@ static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS { (void)arg2; (void)req; - if (!kauth_cred_issuser(kauth_cred_get())) + if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; + } - if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) + if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) { return EINVAL; + } - bzero(&ctx, sizeof (struct vnode_trace_paths_context)); + bzero(&ctx, sizeof(struct vnode_trace_paths_context)); vfs_iterate(0, vfs_trace_paths_callback, &ctx); diff --git a/bsd/vfs/vfs_support.c b/bsd/vfs/vfs_support.c index ed146fa8f..ca3609541 100644 --- a/bsd/vfs/vfs_support.c +++ b/bsd/vfs/vfs_support.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -50,283 +50,285 @@ #include #include -#include /* ubc_upl_abort_range() */ +#include /* ubc_upl_abort_range() */ struct vnop_create_args /* { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_vattr *a_vap; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_vattr *a_vap; + * vfs_context_t a_context; + * } */; int nop_create(__unused struct vnop_create_args *ap) { #if DIAGNOSTIC - if ((ap->a_cnp->cn_flags & HASBUF) == 0) + if ((ap->a_cnp->cn_flags & HASBUF) == 0) { panic("nop_create: no name"); + } #endif - return (0); + return 0; } int err_create(struct vnop_create_args *ap) { (void)nop_create(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_whiteout_args /* { - struct vnode *a_dvp; - struct componentname *a_cnp; - int a_flags; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct componentname *a_cnp; + * int a_flags; + * vfs_context_t a_context; + * } */; int nop_whiteout(__unused struct vnop_whiteout_args *ap) { - return (0); + return 0; } int err_whiteout(__unused struct vnop_whiteout_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_mknod_args /* { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_vattr *a_vap; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_vattr *a_vap; + * vfs_context_t a_context; + * } */; int nop_mknod(__unused struct vnop_mknod_args *ap) { #if DIAGNOSTIC - if ((ap->a_cnp->cn_flags & HASBUF) == 0) + if ((ap->a_cnp->cn_flags & HASBUF) == 0) { panic("nop_mknod: no name"); + } #endif - return (0); + return 0; } int err_mknod(struct vnop_mknod_args *ap) { (void)nop_mknod(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_open_args /* { - struct vnode *a_vp; - int a_mode; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * int a_mode; + * vfs_context_t a_context; + * } */; int nop_open(__unused struct vnop_open_args *ap) { - return (0); + return 0; } int err_open(__unused struct vnop_open_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_close_args /* { - struct vnode *a_vp; - int a_fflag; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * int a_fflag; + * vfs_context_t a_context; + * } */; int nop_close(__unused struct vnop_close_args *ap) { - return (0); + return 0; } int err_close(__unused struct vnop_close_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_access_args /* { - struct vnode *a_vp; - int a_mode; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * int a_mode; + * vfs_context_t a_context; + * } */; int nop_access(__unused struct vnop_access_args *ap) { - return (0); + return 0; } int err_access(__unused struct vnop_access_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_getattr_args /* { - struct vnode *a_vp; - struct vnode_vattr *a_vap; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * struct vnode_vattr *a_vap; + * vfs_context_t a_context; + * } */; int nop_getattr(__unused struct vnop_getattr_args *ap) { - return (0); + return 0; } int err_getattr(__unused struct vnop_getattr_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_setattr_args /* { - struct vnode *a_vp; - struct vnode_vattr *a_vap; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * struct vnode_vattr *a_vap; + * vfs_context_t a_context; + * } */; int nop_setattr(__unused struct vnop_setattr_args *ap) { - return (0); + return 0; } int err_setattr(__unused struct vnop_setattr_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_read_args /* { - struct vnode *a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */; int nop_read(__unused struct vnop_read_args *ap) { - return (0); + return 0; } int err_read(__unused struct vnop_read_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_write_args /* { - struct vnode *a_vp; - struct uio *a_uio; - int a_ioflag; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * struct uio *a_uio; + * int a_ioflag; + * vfs_context_t a_context; + * } */; int nop_write(__unused struct vnop_write_args *ap) { - return (0); + return 0; } int err_write(__unused struct vnop_write_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_ioctl_args /* { - struct vnode *a_vp; - u_long a_command; - caddr_t a_data; - int a_fflag; - kauth_cred_t a_cred; - struct proc *a_p; -} */; + * struct vnode *a_vp; + * u_long a_command; + * caddr_t a_data; + * int a_fflag; + * kauth_cred_t a_cred; + * struct proc *a_p; + * } */; int nop_ioctl(__unused struct vnop_ioctl_args *ap) { - return (0); + return 0; } int err_ioctl(__unused struct vnop_ioctl_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_select_args /* { - struct vnode *a_vp; - int a_which; - int a_fflags; - kauth_cred_t a_cred; - void *a_wql; - struct proc *a_p; -} */; + * struct vnode *a_vp; + * int a_which; + * int a_fflags; + * kauth_cred_t a_cred; + * void *a_wql; + * struct proc *a_p; + * } */; int nop_select(__unused struct vnop_select_args *ap) { - return (0); + return 0; } int err_select(__unused struct vnop_select_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_exchange_args /* { - struct vnode *a_fvp; - struct vnode *a_tvp; - int a_options; - vfs_context_t a_context; -} */; + * struct vnode *a_fvp; + * struct vnode *a_tvp; + * int a_options; + * vfs_context_t a_context; + * } */; int nop_exchange(__unused struct vnop_exchange_args *ap) { - return (0); + return 0; } int err_exchange(__unused struct vnop_exchange_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_revoke_args /* { - struct vnode *a_vp; - int a_flags; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * int a_flags; + * vfs_context_t a_context; + * } */; int nop_revoke(struct vnop_revoke_args *ap) @@ -338,489 +340,494 @@ int err_revoke(struct vnop_revoke_args *ap) { (void)nop_revoke(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_mmap_args /* { - struct vnode *a_vp; - int a_fflags; - kauth_cred_t a_cred; - struct proc *a_p; -} */; + * struct vnode *a_vp; + * int a_fflags; + * kauth_cred_t a_cred; + * struct proc *a_p; + * } */; int nop_mmap(__unused struct vnop_mmap_args *ap) { - return (0); + return 0; } int err_mmap(__unused struct vnop_mmap_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_fsync_args /* { - struct vnode *a_vp; - int a_waitfor; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * int a_waitfor; + * vfs_context_t a_context; + * } */; int nop_fsync(__unused struct vnop_fsync_args *ap) { - return (0); + return 0; } int err_fsync(__unused struct vnop_fsync_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_remove_args /* { - struct vnode *a_dvp; - struct vnode *a_vp; - struct componentname *a_cnp; - int a_flags; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct vnode *a_vp; + * struct componentname *a_cnp; + * int a_flags; + * vfs_context_t a_context; + * } */; int nop_remove(__unused struct vnop_remove_args *ap) { - return (0); + return 0; } int err_remove(struct vnop_remove_args *ap) { (void)nop_remove(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_link_args /* { - struct vnode *a_vp; - struct vnode *a_tdvp; - struct componentname *a_cnp; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * struct vnode *a_tdvp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */; int nop_link(__unused struct vnop_link_args *ap) { - return (0); + return 0; } int err_link(struct vnop_link_args *ap) { (void)nop_link(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_rename_args /* { - struct vnode *a_fdvp; - struct vnode *a_fvp; - struct componentname *a_fcnp; - struct vnode *a_tdvp; - struct vnode *a_tvp; - struct componentname *a_tcnp; - vfs_context_t a_context; -} */; + * struct vnode *a_fdvp; + * struct vnode *a_fvp; + * struct componentname *a_fcnp; + * struct vnode *a_tdvp; + * struct vnode *a_tvp; + * struct componentname *a_tcnp; + * vfs_context_t a_context; + * } */; int nop_rename(__unused struct vnop_rename_args *ap) { - return (0); + return 0; } int err_rename(struct vnop_rename_args *ap) { (void)nop_rename(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_mkdir_args /* { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_vattr *a_vap; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_vattr *a_vap; + * vfs_context_t a_context; + * } */; int nop_mkdir(__unused struct vnop_mkdir_args *ap) { - return (0); + return 0; } int err_mkdir(__unused struct vnop_mkdir_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_rmdir_args /* { - struct vnode *a_dvp; - struct vnode *a_vp; - struct componentname *a_cnp; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct vnode *a_vp; + * struct componentname *a_cnp; + * vfs_context_t a_context; + * } */; int nop_rmdir(__unused struct vnop_rmdir_args *ap) { - return (0); + return 0; } int err_rmdir(struct vnop_rmdir_args *ap) { (void)nop_rmdir(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_symlink_args /* { - struct vnode *a_dvp; - struct vnode **a_vpp; - struct componentname *a_cnp; - struct vnode_vattr *a_vap; - char *a_target; - vfs_context_t a_context; -} */; + * struct vnode *a_dvp; + * struct vnode **a_vpp; + * struct componentname *a_cnp; + * struct vnode_vattr *a_vap; + * char *a_target; + * vfs_context_t a_context; + * } */; int nop_symlink(__unused struct vnop_symlink_args *ap) { #if DIAGNOSTIC - if ((ap->a_cnp->cn_flags & HASBUF) == 0) + if ((ap->a_cnp->cn_flags & HASBUF) == 0) { panic("nop_symlink: no name"); + } #endif - return (0); + return 0; } int err_symlink(struct vnop_symlink_args *ap) { (void)nop_symlink(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_readdir_args /* { - vnode_t a_vp; - struct uio *a_uio; - int a_flags; - int *a_eofflag; - int *a_numdirent; - vfs_context_t a_context; -} */; + * vnode_t a_vp; + * struct uio *a_uio; + * int a_flags; + * int *a_eofflag; + * int *a_numdirent; + * vfs_context_t a_context; + * } */; int nop_readdir(__unused struct vnop_readdir_args *ap) { - return (0); + return 0; } int err_readdir(__unused struct vnop_readdir_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_readdirattr_args /* { - struct vnodeop_desc *a_desc; - vnode_t a_vp; - struct attrlist *a_alist; - struct uio *a_uio; - u_long a_maxcount; - u_long a_options; - u_long *a_newstate; - int *a_eofflag; - u_long *a_actualcount; - vfs_context_t a_context; -} */ ; + * struct vnodeop_desc *a_desc; + * vnode_t a_vp; + * struct attrlist *a_alist; + * struct uio *a_uio; + * u_long a_maxcount; + * u_long a_options; + * u_long *a_newstate; + * int *a_eofflag; + * u_long *a_actualcount; + * vfs_context_t a_context; + * } */; int nop_readdirattr(struct vnop_readdirattr_args *ap) { - *(ap->a_actualcount) = 0; - *(ap->a_eofflag) = 0; - return (0); + *(ap->a_actualcount) = 0; + *(ap->a_eofflag) = 0; + return 0; } int err_readdirattr(struct vnop_readdirattr_args *ap) { - (void)nop_readdirattr(ap); - return (ENOTSUP); + (void)nop_readdirattr(ap); + return ENOTSUP; } struct vnop_readlink_args /* { - struct vnode *vp; - struct uio *uio; - vfs_context_t a_context; -} */; + * struct vnode *vp; + * struct uio *uio; + * vfs_context_t a_context; + * } */; int nop_readlink(__unused struct vnop_readlink_args *ap) { - return (0); + return 0; } int err_readlink(__unused struct vnop_readlink_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_inactive_args /* { - struct vnode *a_vp; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * vfs_context_t a_context; + * } */; int nop_inactive(__unused struct vnop_inactive_args *ap) { - return (0); + return 0; } int err_inactive(struct vnop_inactive_args *ap) { (void)nop_inactive(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_reclaim_args /* { - struct vnode *a_vp; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * vfs_context_t a_context; + * } */; int nop_reclaim(__unused struct vnop_reclaim_args *ap) { - return (0); + return 0; } int err_reclaim(__unused struct vnop_reclaim_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_strategy_args /* { - struct buf *a_bp; -} */; + * struct buf *a_bp; + * } */; int nop_strategy(__unused struct vnop_strategy_args *ap) { - return (0); + return 0; } int err_strategy(__unused struct vnop_strategy_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_pathconf_args /* { - struct vnode *a_vp; - int a_name; - int32_t *a_retval; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * int a_name; + * int32_t *a_retval; + * vfs_context_t a_context; + * } */; int nop_pathconf(__unused struct vnop_pathconf_args *ap) { - return (0); + return 0; } int err_pathconf(__unused struct vnop_pathconf_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_advlock_args /* { - struct vnode *a_vp; - caddr_t a_id; - int a_op; - struct flock *a_fl; - int a_flags; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * caddr_t a_id; + * int a_op; + * struct flock *a_fl; + * int a_flags; + * vfs_context_t a_context; + * } */; int nop_advlock(__unused struct vnop_advlock_args *ap) { - return (0); + return 0; } int err_advlock(__unused struct vnop_advlock_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_allocate_args /* { - struct vnode *a_vp; - off_t a_length; - u_int32_t a_flags; - off_t *a_bytesallocated; - off_t a_offset; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * off_t a_length; + * u_int32_t a_flags; + * off_t *a_bytesallocated; + * off_t a_offset; + * vfs_context_t a_context; + * } */; int nop_allocate(struct vnop_allocate_args *ap) { *(ap->a_bytesallocated) = 0; - return (0); + return 0; } int err_allocate(struct vnop_allocate_args *ap) { (void)nop_allocate(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_bwrite_args /* { - struct buf *a_bp; -} */; + * struct buf *a_bp; + * } */; int nop_bwrite(struct vnop_bwrite_args *ap) { - return ((int)buf_bwrite(ap->a_bp)); + return (int)buf_bwrite(ap->a_bp); } int err_bwrite(__unused struct vnop_bwrite_args *ap) { - return (ENOTSUP); + return ENOTSUP; } struct vnop_pagein_args /* { - struct vnode *a_vp, - upl_t a_pl, - vm_offset_t a_pl_offset, - off_t a_foffset, - size_t a_size, - int a_flags - vfs_context_t a_context; -} */; + * struct vnode *a_vp, + * upl_t a_pl, + * vm_offset_t a_pl_offset, + * off_t a_foffset, + * size_t a_size, + * int a_flags + * vfs_context_t a_context; + * } */; int nop_pagein(struct vnop_pagein_args *ap) { - if ( !(ap->a_flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); - return (EINVAL); + if (!(ap->a_flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } + return EINVAL; } int err_pagein(struct vnop_pagein_args *ap) { - if ( !(ap->a_flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); - return (ENOTSUP); + if (!(ap->a_flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } + return ENOTSUP; } struct vnop_pageout_args /* { - struct vnode *a_vp, - upl_t a_pl, - vm_offset_t a_pl_offset, - off_t a_foffset, - size_t a_size, - int a_flags - vfs_context_t a_context; -} */; + * struct vnode *a_vp, + * upl_t a_pl, + * vm_offset_t a_pl_offset, + * off_t a_foffset, + * size_t a_size, + * int a_flags + * vfs_context_t a_context; + * } */; int nop_pageout(struct vnop_pageout_args *ap) { - if ( !(ap->a_flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); - return (EINVAL); + if (!(ap->a_flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } + return EINVAL; } int err_pageout(struct vnop_pageout_args *ap) { - if ( !(ap->a_flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); - return (ENOTSUP); + if (!(ap->a_flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, ap->a_size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } + return ENOTSUP; } struct vnop_searchfs /* { - struct vnode *a_vp; - void *a_searchparams1; - void *a_searchparams2; - struct attrlist *a_searchattrs; - u_long a_maxmatches; - struct timeval *a_timelimit; - struct attrlist *a_returnattrs; - u_long *a_nummatches; - u_long a_scriptcode; - u_long a_options; - struct uio *a_uio; - struct searchstate *a_searchstate; - vfs_context_t a_context; -} */; + * struct vnode *a_vp; + * void *a_searchparams1; + * void *a_searchparams2; + * struct attrlist *a_searchattrs; + * u_long a_maxmatches; + * struct timeval *a_timelimit; + * struct attrlist *a_returnattrs; + * u_long *a_nummatches; + * u_long a_scriptcode; + * u_long a_options; + * struct uio *a_uio; + * struct searchstate *a_searchstate; + * vfs_context_t a_context; + * } */; int nop_searchfs(struct vnop_searchfs_args *ap) { *(ap->a_nummatches) = 0; - return (0); + return 0; } int err_searchfs(struct vnop_searchfs_args *ap) { (void)nop_searchfs(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_copyfile_args /*{ - struct vnodeop_desc *a_desc; - struct vnode *a_fvp; - struct vnode *a_tdvp; - struct vnode *a_tvp; - struct componentname *a_tcnp; - int a_flags; -}*/; + * struct vnodeop_desc *a_desc; + * struct vnode *a_fvp; + * struct vnode *a_tdvp; + * struct vnode *a_tvp; + * struct componentname *a_tcnp; + * int a_flags; + * }*/; int nop_copyfile(__unused struct vnop_copyfile_args *ap) { - return (0); + return 0; } @@ -828,67 +835,68 @@ int err_copyfile(struct vnop_copyfile_args *ap) { (void)nop_copyfile(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_blktooff_args /* { - struct vnode *a_vp; - daddr64_t a_lblkno; - off_t *a_offset; -} */; + * struct vnode *a_vp; + * daddr64_t a_lblkno; + * off_t *a_offset; + * } */; int nop_blktooff(struct vnop_blktooff_args *ap) { - *ap->a_offset = (off_t)-1; /* failure */ - return (0); + *ap->a_offset = (off_t)-1; /* failure */ + return 0; } int err_blktooff(struct vnop_blktooff_args *ap) { (void)nop_blktooff(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_offtoblk_args /* { - struct vnode *a_vp; - off_t a_offset; - daddr64_t *a_lblkno; -} */; + * struct vnode *a_vp; + * off_t a_offset; + * daddr64_t *a_lblkno; + * } */; int nop_offtoblk(struct vnop_offtoblk_args *ap) { - *ap->a_lblkno = (daddr64_t)-1; /* failure */ - return (0); + *ap->a_lblkno = (daddr64_t)-1; /* failure */ + return 0; } int err_offtoblk(struct vnop_offtoblk_args *ap) { (void)nop_offtoblk(ap); - return (ENOTSUP); + return ENOTSUP; } struct vnop_blockmap_args /* { - struct vnode *a_vp; - off_t a_foffset; - size_t a_size; - daddr64_t *a_bpn; - size_t *a_run; - void *a_poff; - int a_flags; -} */; + * struct vnode *a_vp; + * off_t a_foffset; + * size_t a_size; + * daddr64_t *a_bpn; + * size_t *a_run; + * void *a_poff; + * int a_flags; + * } */; -int nop_blockmap(__unused struct vnop_blockmap_args *ap) +int +nop_blockmap(__unused struct vnop_blockmap_args *ap) { - return (0); + return 0; } -int err_blockmap(__unused struct vnop_blockmap_args *ap) +int +err_blockmap(__unused struct vnop_blockmap_args *ap) { - return (ENOTSUP); + return ENOTSUP; } - diff --git a/bsd/vfs/vfs_support.h b/bsd/vfs/vfs_support.h index ae94ff643..e47a46723 100644 --- a/bsd/vfs/vfs_support.h +++ b/bsd/vfs/vfs_support.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,11 +37,11 @@ * * HISTORY * 18-Aug-1998 Umesh Vaishampayan (umeshv@apple.com) - * Created. + * Created. */ -#ifndef _VFS_VFS_SUPPORT_H_ -#define _VFS_VFS_SUPPORT_H_ +#ifndef _VFS_VFS_SUPPORT_H_ +#define _VFS_VFS_SUPPORT_H_ #include #include @@ -178,4 +178,4 @@ extern int nop_blockmap(struct vnop_blockmap_args *ap); extern int err_blockmap(struct vnop_blockmap_args *ap); __END_DECLS -#endif /* _VFS_VFS_SUPPORT_H_ */ +#endif /* _VFS_VFS_SUPPORT_H_ */ diff --git a/bsd/vfs/vfs_syscalls.c b/bsd/vfs/vfs_syscalls.c index 4e24c8489..c9dc444b9 100644 --- a/bsd/vfs/vfs_syscalls.c +++ b/bsd/vfs/vfs_syscalls.c @@ -139,7 +139,7 @@ #define RELEASE_PATH(x) \ release_pathbuff(x); #else -#define GET_PATH(x) \ +#define GET_PATH(x) \ MALLOC_ZONE((x), char *, MAXPATHLEN, M_NAMEI, M_WAITOK); #define RELEASE_PATH(x) \ FREE_ZONE((x), MAXPATHLEN, M_NAMEI); @@ -175,15 +175,15 @@ static int getutimes(user_addr_t usrtvp, struct timespec *tsp); static int setutimes(vfs_context_t ctx, vnode_t vp, const struct timespec *ts, int nullflag); static int sync_callback(mount_t, void *); static int munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, - user_addr_t bufp, int *sizep, boolean_t is_64_bit, - boolean_t partial_copy); + user_addr_t bufp, int *sizep, boolean_t is_64_bit, + boolean_t partial_copy); static int statfs64_common(struct mount *mp, struct vfsstatfs *sfsp, - user_addr_t bufp); + user_addr_t bufp); static int fsync_common(proc_t p, struct fsync_args *uap, int flags); static int mount_common(char *fstypename, vnode_t pvp, vnode_t vp, - struct componentname *cnp, user_addr_t fsmountargs, - int flags, uint32_t internal_flags, char *labelstr, boolean_t kernelmount, - vfs_context_t ctx); + struct componentname *cnp, user_addr_t fsmountargs, + int flags, uint32_t internal_flags, char *labelstr, boolean_t kernelmount, + vfs_context_t ctx); void vfs_notify_mount(vnode_t pdvp); int prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, const char *fsname, boolean_t skip_auth); @@ -242,7 +242,7 @@ extern lck_attr_t *fd_vn_lck_attr; uint32_t mount_generation = 0; /* counts number of mount and unmount operations */ -unsigned int vfs_nummntops=0; +unsigned int vfs_nummntops = 0; extern const struct fileops vnops; #if CONFIG_APPLEDOUBLE @@ -257,32 +257,33 @@ extern errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *); /* * Private in-kernel mounting spi (NFS only, not exported) */ - __private_extern__ +__private_extern__ boolean_t vfs_iskernelmount(mount_t mp) { - return ((mp->mnt_kern_flag & MNTK_KERNEL_MOUNT) ? TRUE : FALSE); + return (mp->mnt_kern_flag & MNTK_KERNEL_MOUNT) ? TRUE : FALSE; } - __private_extern__ +__private_extern__ int kernel_mount(char *fstype, vnode_t pvp, vnode_t vp, const char *path, - void *data, __unused size_t datalen, int syscall_flags, __unused uint32_t kern_flags, vfs_context_t ctx) + void *data, __unused size_t datalen, int syscall_flags, __unused uint32_t kern_flags, vfs_context_t ctx) { struct nameidata nd; boolean_t did_namei; int error; NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT, - UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); + UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); /* * Get the vnode to be covered if it's not supplied */ if (vp == NULLVP) { error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; pvp = nd.ni_dvp; did_namei = TRUE; @@ -295,7 +296,7 @@ kernel_mount(char *fstype, vnode_t pvp, vnode_t vp, const char *path, } error = mount_common(fstype, pvp, vp, &nd.ni_cnd, CAST_USER_ADDR_T(data), - syscall_flags, kern_flags, NULL, TRUE, ctx); + syscall_flags, kern_flags, NULL, TRUE, ctx); if (did_namei) { vnode_put(vp); @@ -303,7 +304,7 @@ kernel_mount(char *fstype, vnode_t pvp, vnode_t vp, const char *path, nameidone(&nd); } - return (error); + return error; } #endif /* NFSCLIENT || DEVFS */ @@ -321,66 +322,66 @@ mount(proc_t p, struct mount_args *uap, __unused int32_t *retval) muap.flags = uap->flags; muap.data = uap->data; muap.mac_p = USER_ADDR_NULL; - return (__mac_mount(p, &muap, retval)); + return __mac_mount(p, &muap, retval); } int fmount(__unused proc_t p, struct fmount_args *uap, __unused int32_t *retval) { - struct componentname cn; - vfs_context_t ctx = vfs_context_current(); - size_t dummy = 0; - int error; - int flags = uap->flags; - char fstypename[MFSNAMELEN]; - char *labelstr = NULL; /* regular mount call always sets it to NULL for __mac_mount() */ - vnode_t pvp; - vnode_t vp; + struct componentname cn; + vfs_context_t ctx = vfs_context_current(); + size_t dummy = 0; + int error; + int flags = uap->flags; + char fstypename[MFSNAMELEN]; + char *labelstr = NULL; /* regular mount call always sets it to NULL for __mac_mount() */ + vnode_t pvp; + vnode_t vp; AUDIT_ARG(fd, uap->fd); AUDIT_ARG(fflags, flags); /* fstypename will get audited by mount_common */ /* Sanity check the flags */ - if (flags & (MNT_IMGSRC_BY_INDEX|MNT_ROOTFS)) { - return (ENOTSUP); + if (flags & (MNT_IMGSRC_BY_INDEX | MNT_ROOTFS)) { + return ENOTSUP; } if (flags & MNT_UNION) { - return (EPERM); + return EPERM; } error = copyinstr(uap->type, fstypename, MFSNAMELEN, &dummy); if (error) { - return (error); + return error; } if ((error = file_vnode(uap->fd, &vp)) != 0) { - return (error); + return error; } if ((error = vnode_getwithref(vp)) != 0) { file_drop(uap->fd); - return (error); + return error; } pvp = vnode_getparent(vp); if (pvp == NULL) { vnode_put(vp); file_drop(uap->fd); - return (EINVAL); + return EINVAL; } memset(&cn, 0, sizeof(struct componentname)); MALLOC(cn.cn_pnbuf, char *, MAXPATHLEN, M_TEMP, M_WAITOK); cn.cn_pnlen = MAXPATHLEN; - if((error = vn_getpath(vp, cn.cn_pnbuf, &cn.cn_pnlen)) != 0) { + if ((error = vn_getpath(vp, cn.cn_pnbuf, &cn.cn_pnlen)) != 0) { FREE(cn.cn_pnbuf, M_TEMP); vnode_put(pvp); vnode_put(vp); file_drop(uap->fd); - return (error); + return error; } error = mount_common(fstypename, pvp, vp, &cn, uap->data, flags, 0, labelstr, FALSE, ctx); @@ -390,7 +391,7 @@ fmount(__unused proc_t p, struct fmount_args *uap, __unused int32_t *retval) vnode_put(vp); file_drop(uap->fd); - return (error); + return error; } void @@ -425,12 +426,12 @@ int __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int32_t *retval) { vnode_t pvp = NULL; - vnode_t vp = NULL; + vnode_t vp = NULL; int need_nameidone = 0; vfs_context_t ctx = vfs_context_current(); char fstypename[MFSNAMELEN]; struct nameidata nd; - size_t dummy=0; + size_t dummy = 0; char *labelstr = NULL; int flags = uap->flags; int error; @@ -443,14 +444,15 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 * Get the fs type name from user space */ error = copyinstr(uap->type, fstypename, MFSNAMELEN, &dummy); - if (error) - return (error); + if (error) { + return error; + } /* * Get the vnode to be covered */ NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); if (error) { goto out; @@ -463,7 +465,7 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 /* Mounting image source cannot be batched with other operations */ if (flags == MNT_IMGSRC_BY_INDEX) { error = relocate_imageboot_source(pvp, vp, &nd.ni_cnd, fstypename, - ctx, is_64bit, uap->data, (flags == MNT_IMGSRC_BY_INDEX)); + ctx, is_64bit, uap->data, (flags == MNT_IMGSRC_BY_INDEX)); goto out; } #endif /* CONFIG_IMGSRC_ACCESS */ @@ -487,8 +489,9 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) + if (error) { goto out; + } if ((mac.m_buflen > MAC_MAX_LABEL_BUF_LEN) || (mac.m_buflen < 2)) { error = EINVAL; @@ -514,11 +517,10 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 #endif if ((vp->v_flag & VROOT) && - (vp->v_mount->mnt_flag & MNT_ROOTFS)) { + (vp->v_mount->mnt_flag & MNT_ROOTFS)) { if (!(flags & MNT_UNION)) { flags |= MNT_UPDATE; - } - else { + } else { /* * For a union mount on '/', treat it as fresh * mount instead of update. @@ -544,20 +546,21 @@ __mac_mount(struct proc *p, register struct __mac_mount_args *uap, __unused int3 * bitmap optimization. */ #if CHECK_CS_VALIDATION_BITMAP - if ((flags & MNT_RDONLY) == 0 ) { + if ((flags & MNT_RDONLY) == 0) { root_fs_upgrade_try = TRUE; } #endif } error = mount_common(fstypename, pvp, vp, &nd.ni_cnd, uap->data, flags, 0, - labelstr, FALSE, ctx); + labelstr, FALSE, ctx); out: #if CONFIG_MACF - if (labelstr) + if (labelstr) { FREE(labelstr, M_MACTEMP); + } #endif /* CONFIG_MACF */ if (vp) { @@ -570,12 +573,12 @@ out: nameidone(&nd); } - return (error); + return error; } /* * common mount implementation (final stage of mounting) - + * * Arguments: * fstypename file system type (ie it's vfs name) * pvp parent of covered vnode @@ -589,8 +592,8 @@ out: */ static int mount_common(char *fstypename, vnode_t pvp, vnode_t vp, - struct componentname *cnp, user_addr_t fsmountargs, int flags, uint32_t internal_flags, - char *labelstr, boolean_t kernelmount, vfs_context_t ctx) + struct componentname *cnp, user_addr_t fsmountargs, int flags, uint32_t internal_flags, + char *labelstr, boolean_t kernelmount, vfs_context_t ctx) { #if !CONFIG_MACF #pragma unused(labelstr) @@ -647,7 +650,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, * allowed to turn it off. */ if ((mp->mnt_flag & MNT_CPROTECT) && - ((flags & MNT_CPROTECT) == 0)) { + ((flags & MNT_CPROTECT) == 0)) { error = EINVAL; goto out1; } @@ -655,7 +658,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, #ifdef CONFIG_IMGSRC_ACCESS /* Can't downgrade the backer of the root FS */ if ((mp->mnt_kern_flag & MNTK_BACKS_ROOT) && - (!vfs_isrdonly(mp)) && (flags & MNT_RDONLY)) { + (!vfs_isrdonly(mp)) && (flags & MNT_RDONLY)) { error = ENOTSUP; goto out1; } @@ -681,8 +684,9 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, */ if ((!kernelmount) && suser(vfs_context_ucred(ctx), NULL)) { flags |= MNT_NOSUID | MNT_NODEV; - if (mp->mnt_flag & MNT_NOEXEC) + if (mp->mnt_flag & MNT_NOEXEC) { flags |= MNT_NOEXEC; + } } flag = mp->mnt_flag; @@ -700,19 +704,21 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, */ if ((!kernelmount) && suser(vfs_context_ucred(ctx), NULL)) { flags |= MNT_NOSUID | MNT_NODEV; - if (vp->v_mount->mnt_flag & MNT_NOEXEC) + if (vp->v_mount->mnt_flag & MNT_NOEXEC) { flags |= MNT_NOEXEC; + } } /* XXXAUDIT: Should we capture the type on the error path as well? */ AUDIT_ARG(text, fstypename); mount_list_lock(); - for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) + for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) { if (!strncmp(vfsp->vfc_name, fstypename, MFSNAMELEN)) { vfsp->vfc_refcount++; vfsp_ref = TRUE; break; } + } mount_list_unlock(); if (vfsp == NULL) { error = ENODEV; @@ -736,7 +742,7 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, * Allocate and initialize the filesystem (mount_t) */ MALLOC_ZONE(mp, struct mount *, (u_int32_t)sizeof(struct mount), - M_MOUNT, M_WAITOK); + M_MOUNT, M_WAITOK); bzero((char *)mp, (u_int32_t)sizeof(struct mount)); mntalloc = 1; @@ -774,10 +780,12 @@ mount_common(char *fstypename, vnode_t pvp, vnode_t vp, vfs_setowner(mp, KAUTH_UID_NONE, KAUTH_GID_NONE); #if NFSCLIENT || DEVFS || ROUTEFS - if (kernelmount) + if (kernelmount) { mp->mnt_kern_flag |= MNTK_KERNEL_MOUNT; - if ((internal_flags & KERNEL_MOUNT_PERMIT_UNMOUNT) != 0) + } + if ((internal_flags & KERNEL_MOUNT_PERMIT_UNMOUNT) != 0) { mp->mnt_kern_flag |= MNTK_PERMIT_UNMOUNT; + } #endif /* NFSCLIENT || DEVFS */ update: @@ -785,9 +793,9 @@ update: /* * Set the mount level flags. */ - if (flags & MNT_RDONLY) + if (flags & MNT_RDONLY) { mp->mnt_flag |= MNT_RDONLY; - else if (mp->mnt_flag & MNT_RDONLY) { + } else if (mp->mnt_flag & MNT_RDONLY) { // disallow read/write upgrades of file systems that // had the TYPENAME_OVERRIDE feature set. if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) { @@ -797,10 +805,10 @@ update: mp->mnt_kern_flag |= MNTK_WANTRDWR; } mp->mnt_flag &= ~(MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | - MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | - MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE | - MNT_AUTOMOUNTED | MNT_DEFWRITE | MNT_NOATIME | - MNT_QUARANTINE | MNT_CPROTECT); + MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | + MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE | + MNT_AUTOMOUNTED | MNT_DEFWRITE | MNT_NOATIME | + MNT_QUARANTINE | MNT_CPROTECT); #if SECURE_KERNEL #if !CONFIG_MNT_SUID @@ -814,10 +822,10 @@ update: #endif mp->mnt_flag |= flags & (MNT_NOSUID | MNT_NOEXEC | MNT_NODEV | - MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | - MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE | - MNT_AUTOMOUNTED | MNT_DEFWRITE | MNT_NOATIME | - MNT_QUARANTINE | MNT_CPROTECT); + MNT_SYNCHRONOUS | MNT_UNION | MNT_ASYNC | + MNT_UNKNOWNPERMISSIONS | MNT_DONTBROWSE | + MNT_AUTOMOUNTED | MNT_DEFWRITE | MNT_NOATIME | + MNT_QUARANTINE | MNT_CPROTECT); #if CONFIG_MACF if (flags & MNT_MULTILABEL) { @@ -834,13 +842,15 @@ update: if (vfsp->vfc_vfsflags & VFC_VFSLOCALARGS && !(internal_flags & KERNEL_MOUNT_SNAPSHOT)) { if (vfs_context_is64bit(ctx)) { - if ( (error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath))) ) + if ((error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath)))) { goto out1; + } fsmountargs += sizeof(devpath); } else { user32_addr_t tmp; - if ( (error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp))) ) + if ((error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp)))) { goto out1; + } /* munge into LP64 addr */ devpath = CAST_USER_ADDR_T(tmp); fsmountargs += sizeof(tmp); @@ -851,8 +861,9 @@ update: struct nameidata nd; NDINIT(&nd, LOOKUP, OP_MOUNT, FOLLOW, UIO_USERSPACE, devpath, ctx); - if ( (error = namei(&nd)) ) + if ((error = namei(&nd))) { goto out1; + } strlcpy(mp->mnt_vfsstat.f_mntfromname, nd.ni_cnd.cn_pnbuf, MAXPATHLEN); devvp = nd.ni_vp; @@ -868,59 +879,65 @@ update: goto out2; } /* - * If mount by non-root, then verify that user has necessary - * permissions on the device. - */ + * If mount by non-root, then verify that user has necessary + * permissions on the device. + */ if (suser(vfs_context_ucred(ctx), NULL) != 0) { mode_t accessmode = KAUTH_VNODE_READ_DATA; - if ((mp->mnt_flag & MNT_RDONLY) == 0) + if ((mp->mnt_flag & MNT_RDONLY) == 0) { accessmode |= KAUTH_VNODE_WRITE_DATA; - if ((error = vnode_authorize(devvp, NULL, accessmode, ctx)) != 0) + } + if ((error = vnode_authorize(devvp, NULL, accessmode, ctx)) != 0) { goto out2; + } } } /* On first mount, preflight and open device */ if (devpath && ((flags & MNT_UPDATE) == 0)) { - if ( (error = vnode_ref(devvp)) ) + if ((error = vnode_ref(devvp))) { goto out2; + } /* - * Disallow multiple mounts of the same device. - * Disallow mounting of a device that is currently in use - * (except for root, which might share swap device for miniroot). - * Flush out any old buffers remaining from a previous use. - */ - if ( (error = vfs_mountedon(devvp)) ) + * Disallow multiple mounts of the same device. + * Disallow mounting of a device that is currently in use + * (except for root, which might share swap device for miniroot). + * Flush out any old buffers remaining from a previous use. + */ + if ((error = vfs_mountedon(devvp))) { goto out3; + } if (vcount(devvp) > 1 && !(vfs_flags(mp) & MNT_ROOTFS)) { error = EBUSY; goto out3; } - if ( (error = VNOP_FSYNC(devvp, MNT_WAIT, ctx)) ) { + if ((error = VNOP_FSYNC(devvp, MNT_WAIT, ctx))) { error = ENOTBLK; goto out3; } - if ( (error = buf_invalidateblks(devvp, BUF_WRITE_DATA, 0, 0)) ) + if ((error = buf_invalidateblks(devvp, BUF_WRITE_DATA, 0, 0))) { goto out3; + } ronly = (mp->mnt_flag & MNT_RDONLY) != 0; #if CONFIG_MACF error = mac_vnode_check_open(ctx, devvp, - ronly ? FREAD : FREAD|FWRITE); - if (error) + ronly ? FREAD : FREAD | FWRITE); + if (error) { goto out3; + } #endif /* MAC */ - if ( (error = VNOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, ctx)) ) + if ((error = VNOP_OPEN(devvp, ronly ? FREAD : FREAD | FWRITE, ctx))) { goto out3; + } mp->mnt_devvp = devvp; device_vnode = devvp; - } else if ((mp->mnt_flag & MNT_RDONLY) && - (mp->mnt_kern_flag & MNTK_WANTRDWR) && - (device_vnode = mp->mnt_devvp)) { + (mp->mnt_kern_flag & MNTK_WANTRDWR) && + (device_vnode = mp->mnt_devvp)) { dev_t dev; int maj; /* @@ -931,8 +948,8 @@ update: if (suser(vfs_context_ucred(ctx), NULL) && (error = vnode_authorize(device_vnode, NULL, - KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, - ctx)) != 0) { + KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, + ctx)) != 0) { vnode_put(device_vnode); goto out2; } @@ -941,8 +958,9 @@ update: dev = (dev_t)device_vnode->v_rdev; maj = major(dev); - if ((u_int)maj >= (u_int)nblkdev) + if ((u_int)maj >= (u_int)nblkdev) { panic("Volume mounted on a device with invalid major number."); + } error = bdevsw[maj].d_open(dev, FREAD | FWRITE, S_IFBLK, p); vnode_put(device_vnode); @@ -960,8 +978,9 @@ update: if (labelstr) { if ((flags & MNT_UPDATE) != 0) { error = mac_mount_check_label_update(ctx, mp); - if (error != 0) + if (error != 0) { goto out3; + } } } #endif @@ -976,18 +995,21 @@ update: } if (flags & MNT_UPDATE) { - if (mp->mnt_kern_flag & MNTK_WANTRDWR) + if (mp->mnt_kern_flag & MNTK_WANTRDWR) { mp->mnt_flag &= ~MNT_RDONLY; - mp->mnt_flag &=~ + } + mp->mnt_flag &= ~ (MNT_UPDATE | MNT_RELOAD | MNT_FORCE); - mp->mnt_kern_flag &=~ MNTK_WANTRDWR; - if (error) + mp->mnt_kern_flag &= ~MNTK_WANTRDWR; + if (error) { mp->mnt_flag = flag; /* restore flag value */ + } vfs_event_signal(NULL, VQ_UPDATE, (intptr_t)NULL); lck_rw_done(&mp->mnt_rwlock); is_rwlock_locked = FALSE; - if (!error) + if (!error) { enablequotas(mp, ctx); + } goto exit; } @@ -995,7 +1017,7 @@ update: * Put the new filesystem on the mount list after root. */ if (error == 0) { - struct vfs_attr vfsattr; + struct vfs_attr vfsattr; #if CONFIG_MACF if (vfs_flags(mp) & MNT_MULTILABEL) { error = VFS_ROOT(mp, &rvp, ctx); @@ -1004,15 +1026,16 @@ update: goto out3; } error = vnode_label(mp, NULL, rvp, NULL, 0, ctx); - /* + /* * drop reference provided by VFS_ROOT */ vnode_put(rvp); - if (error) + if (error) { goto out3; + } } -#endif /* MAC */ +#endif /* MAC */ vnode_lock_spin(vp); CLR(vp->v_flag, VMOUNT); @@ -1039,7 +1062,7 @@ update: have_usecount = TRUE; error = checkdirs(vp, ctx); - if (error != 0) { + if (error != 0) { /* Unmount the filesystem as cdir/rdirs cannot be updated */ goto out4; } @@ -1087,7 +1110,7 @@ update: } if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) && - (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) { + (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) { mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS; } } @@ -1116,12 +1139,11 @@ update: /* Now that mount is setup, notify the listeners */ vfs_notify_mount(pvp); IOBSDMountChange(mp, kIOMountChangeMount); - } else { /* If we fail a fresh mount, there should be no vnodes left hooked into the mountpoint. */ if (mp->mnt_vnodelist.tqh_first != NULL) { panic("mount_common(): mount of %s filesystem failed with %d, but vnode list is not empty.", - mp->mnt_vtable->vfc_name, error); + mp->mnt_vtable->vfc_name, error); } vnode_lock_spin(vp); @@ -1131,9 +1153,9 @@ update: mp->mnt_vtable->vfc_refcount--; mount_list_unlock(); - if (device_vnode ) { + if (device_vnode) { vnode_rele(device_vnode); - VNOP_CLOSE(device_vnode, ronly ? FREAD : FREAD|FWRITE, ctx); + VNOP_CLOSE(device_vnode, ronly ? FREAD : FREAD | FWRITE, ctx); } lck_rw_done(&mp->mnt_rwlock); is_rwlock_locked = FALSE; @@ -1148,16 +1170,17 @@ update: #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); } exit: /* * drop I/O count on the device vp if there was one */ - if (devpath && devvp) - vnode_put(devvp); + if (devpath && devvp) { + vnode_put(devvp); + } - return(error); + return error; /* Error condition exits */ out4: @@ -1174,8 +1197,8 @@ out4: if (device_vnode != NULLVP) { vnode_rele(device_vnode); - VNOP_CLOSE(device_vnode, mp->mnt_flag & MNT_RDONLY ? FREAD : FREAD|FWRITE, - ctx); + VNOP_CLOSE(device_vnode, mp->mnt_flag & MNT_RDONLY ? FREAD : FREAD | FWRITE, + ctx); did_rele = TRUE; } @@ -1190,11 +1213,13 @@ out4: vnode_rele(vp); } out3: - if (devpath && ((flags & MNT_UPDATE) == 0) && (!did_rele)) + if (devpath && ((flags & MNT_UPDATE) == 0) && (!did_rele)) { vnode_rele(devvp); + } out2: - if (devpath && devvp) - vnode_put(devvp); + if (devpath && devvp) { + vnode_put(devvp); + } out1: /* Release mnt_rwlock only when it was taken */ if (is_rwlock_locked == TRUE) { @@ -1202,14 +1227,14 @@ out1: } if (mntalloc) { - if (mp->mnt_crossref) + if (mp->mnt_crossref) { mount_dropcrossref(mp, vp, 0); - else { + } else { mount_lock_destroy(mp); #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); } } if (vfsp_ref) { @@ -1218,7 +1243,7 @@ out1: mount_list_unlock(); } - return(error); + return error; } /* @@ -1242,18 +1267,20 @@ prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, cons VATTR_INIT(&va); VATTR_WANTED(&va, va_uid); if ((error = vnode_getattr(vp, &va, ctx)) || - (va.va_uid != kauth_cred_getuid(vfs_context_ucred(ctx)) && - (!vfs_context_issuser(ctx)))) { + (va.va_uid != kauth_cred_getuid(vfs_context_ucred(ctx)) && + (!vfs_context_issuser(ctx)))) { error = EPERM; goto out; } } - if ( (error = VNOP_FSYNC(vp, MNT_WAIT, ctx)) ) + if ((error = VNOP_FSYNC(vp, MNT_WAIT, ctx))) { goto out; + } - if ( (error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0)) ) + if ((error = buf_invalidateblks(vp, BUF_WRITE_DATA, 0, 0))) { goto out; + } if (vp->v_type != VDIR) { error = ENOTDIR; @@ -1268,8 +1295,9 @@ prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, cons #if CONFIG_MACF error = mac_mount_check_mount(ctx, vp, cnp, fsname); - if (error != 0) + if (error != 0) { goto out; + } #endif vnode_lock_spin(vp); @@ -1297,7 +1325,7 @@ authorize_devpath_and_update_mntfromname(mount_t mp, user_addr_t devpath, vnode_ int error; NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW, UIO_USERSPACE, devpath, ctx); - if ( (error = namei(&nd)) ) { + if ((error = namei(&nd))) { IMGSRC_DEBUG("namei() failed with %d\n", error); return error; } @@ -1337,8 +1365,9 @@ authorize_devpath_and_update_mntfromname(mount_t mp, user_addr_t devpath, vnode_ */ if (!vfs_context_issuser(ctx)) { accessmode = KAUTH_VNODE_READ_DATA; - if ((mp->mnt_flag & MNT_RDONLY) == 0) + if ((mp->mnt_flag & MNT_RDONLY) == 0) { accessmode |= KAUTH_VNODE_WRITE_DATA; + } if ((error = vnode_authorize(vp, NULL, accessmode, ctx)) != 0) { IMGSRC_DEBUG("Access denied.\n"); goto out1; @@ -1392,7 +1421,7 @@ place_mount_and_checkdirs(mount_t mp, vnode_t vp, vfs_context_t ctx) } error = checkdirs(vp, ctx); - if (error != 0) { + if (error != 0) { /* Unmount the filesystem as cdir/rdirs cannot be updated */ vnode_rele(vp); goto out; @@ -1435,7 +1464,7 @@ mount_begin_update(mount_t mp, vfs_context_t ctx, int flags) * is currently mounted read-only. */ if ((flags & MNT_RELOAD) && - ((mp->mnt_flag & MNT_RDONLY) == 0)) { + ((mp->mnt_flag & MNT_RDONLY) == 0)) { error = ENOTSUP; goto out; } @@ -1445,7 +1474,7 @@ mount_begin_update(mount_t mp, vfs_context_t ctx, int flags) * permitted to update it. */ if (mp->mnt_vfsstat.f_owner != kauth_cred_getuid(vfs_context_ucred(ctx)) && - (!vfs_context_issuser(ctx))) { + (!vfs_context_issuser(ctx))) { error = EPERM; goto out; } @@ -1490,8 +1519,8 @@ get_imgsrc_rootvnode(uint32_t height, vnode_t *rvpp) static int relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp, - const char *fsname, vfs_context_t ctx, - boolean_t is64bit, user_addr_t fsmountargs, boolean_t by_index) + const char *fsname, vfs_context_t ctx, + boolean_t is64bit, user_addr_t fsmountargs, boolean_t by_index) { int error; mount_t mp; @@ -1548,12 +1577,14 @@ relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp, * For binary compatibility--assumes one level of nesting. */ if (is64bit) { - if ( (error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath))) ) + if ((error = copyin(fsmountargs, (caddr_t)&devpath, sizeof(devpath)))) { return error; + } } else { user32_addr_t tmp; - if ( (error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp))) ) + if ((error = copyin(fsmountargs, (caddr_t)&tmp, sizeof(tmp)))) { return error; + } /* munge into LP64 addr */ devpath = CAST_USER_ADDR_T(tmp); @@ -1589,7 +1620,7 @@ relocate_imageboot_source(vnode_t pvp, vnode_t vp, struct componentname *cnp, IMGSRC_DEBUG("Starting updated.\n"); /* Get exclusive rwlock on mount, authorize update on mp */ - error = mount_begin_update(mp , ctx, 0); + error = mount_begin_update(mp, ctx, 0); if (error != 0) { IMGSRC_DEBUG("Starting updated failed with %d\n", error); goto out0; @@ -1719,22 +1750,23 @@ enablequotas(struct mount *mp, vfs_context_t ctx) const char *qfextension[] = INITQFNAMES; /* XXX Shoulkd be an MNTK_ flag, instead of strncmp()'s */ - if (strncmp(mp->mnt_vfsstat.f_fstypename, "hfs", sizeof("hfs")) != 0 ) { + if (strncmp(mp->mnt_vfsstat.f_fstypename, "hfs", sizeof("hfs")) != 0) { return; } /* * Enable filesystem disk quotas if necessary. * We ignore errors as this should not interfere with final mount */ - for (type=0; type < MAXQUOTAS; type++) { + for (type = 0; type < MAXQUOTAS; type++) { snprintf(qfpath, sizeof(qfpath), "%s/%s.%s", mp->mnt_vfsstat.f_mntonname, qfopsname, qfextension[type]); NDINIT(&qnd, LOOKUP, OP_MOUNT, FOLLOW, UIO_SYSSPACE, - CAST_USER_ADDR_T(qfpath), ctx); - if (namei(&qnd) != 0) - continue; /* option file to trigger quotas is not present */ + CAST_USER_ADDR_T(qfpath), ctx); + if (namei(&qnd) != 0) { + continue; /* option file to trigger quotas is not present */ + } vnode_put(qnd.ni_vp); nameidone(&qnd); - snprintf(qfpath, sizeof(qfpath), "%s/%s.%s", mp->mnt_vfsstat.f_mntonname, qfname, qfextension[type]); + snprintf(qfpath, sizeof(qfpath), "%s/%s.%s", mp->mnt_vfsstat.f_mntonname, qfname, qfextension[type]); (void) VFS_QUOTACTL(mp, QCMD(Q_QUOTAON, type), 0, qfpath, ctx); } @@ -1745,7 +1777,7 @@ enablequotas(struct mount *mp, vfs_context_t ctx) static int checkdirs_callback(proc_t p, void * arg) { - struct cdirargs * cdrp = (struct cdirargs * )arg; + struct cdirargs * cdrp = (struct cdirargs *)arg; vnode_t olddp = cdrp->olddp; vnode_t newdp = cdrp->newdp; struct filedesc *fdp; @@ -1765,7 +1797,7 @@ checkdirs_callback(proc_t p, void * arg) fdp = p->p_fd; if (fdp == (struct filedesc *)0) { proc_fdunlock(p); - return(PROC_RETURNED); + return PROC_RETURNED; } fdp_cvp = fdp->fd_cdir; fdp_rvp = fdp->fd_rdir; @@ -1791,7 +1823,7 @@ checkdirs_callback(proc_t p, void * arg) fdp->fd_rdir = fdp_rvp; proc_fdunlock(p); } - return(PROC_RETURNED); + return PROC_RETURNED; } @@ -1809,15 +1841,16 @@ checkdirs(vnode_t olddp, vfs_context_t ctx) int err; struct cdirargs cdr; - if (olddp->v_usecount == 1) - return(0); + if (olddp->v_usecount == 1) { + return 0; + } err = VFS_ROOT(olddp->v_mountedhere, &newdp, ctx); if (err != 0) { #if DIAGNOSTIC panic("mount: lost mount: error %d", err); #endif - return(err); + return err; } cdr.olddp = olddp; @@ -1833,7 +1866,7 @@ checkdirs(vnode_t olddp, vfs_context_t ctx) } vnode_put(newdp); - return(0); + return 0; } /* @@ -1853,10 +1886,11 @@ unmount(__unused proc_t p, struct unmount_args *uap, __unused int32_t *retval) vfs_context_t ctx = vfs_context_current(); NDINIT(&nd, LOOKUP, OP_UNMOUNT, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; mp = vp->v_mount; nameidone(&nd); @@ -1865,7 +1899,7 @@ unmount(__unused proc_t p, struct unmount_args *uap, __unused int32_t *retval) error = mac_mount_check_umount(ctx, mp); if (error != 0) { vnode_put(vp); - return (error); + return error; } #endif /* @@ -1873,12 +1907,12 @@ unmount(__unused proc_t p, struct unmount_args *uap, __unused int32_t *retval) */ if ((vp->v_flag & VROOT) == 0) { vnode_put(vp); - return (EINVAL); + return EINVAL; } mount_ref(mp, 0); vnode_put(vp); /* safedounmount consumes the mount ref */ - return (safedounmount(mp, uap->flags, ctx)); + return safedounmount(mp, uap->flags, ctx); } int @@ -1888,12 +1922,12 @@ vfs_unmountbyfsid(fsid_t *fsid, int flags, vfs_context_t ctx) mp = mount_list_lookupby_fsid(fsid, 0, 1); if (mp == (mount_t)0) { - return(ENOENT); + return ENOENT; } mount_ref(mp, 0); mount_iterdrop(mp); /* safedounmount consumes the mount ref */ - return(safedounmount(mp, flags, ctx)); + return safedounmount(mp, flags, ctx); } @@ -1912,7 +1946,7 @@ safedounmount(struct mount *mp, int flags, vfs_context_t ctx) * is set and not a forced unmount then return EBUSY. */ if ((mp->mnt_kern_flag & MNT_LNOTRESP) && - (flags & MNT_NOBLOCK) && ((flags & MNT_FORCE) == 0)) { + (flags & MNT_NOBLOCK) && ((flags & MNT_FORCE) == 0)) { error = EBUSY; goto out; } @@ -1927,8 +1961,9 @@ safedounmount(struct mount *mp, int flags, vfs_context_t ctx) * permitted to unmount this filesystem. */ if ((mp->mnt_vfsstat.f_owner != kauth_cred_getuid(kauth_cred_get())) && - (error = suser(kauth_cred_get(), &p->p_acflag))) + (error = suser(kauth_cred_get(), &p->p_acflag))) { goto out; + } } /* * Don't allow unmounting the root file system. @@ -1945,11 +1980,11 @@ safedounmount(struct mount *mp, int flags, vfs_context_t ctx) } #endif /* CONFIG_IMGSRC_ACCESS */ - return (dounmount(mp, flags, 1, ctx)); + return dounmount(mp, flags, 1, ctx); out: mount_drop(mp, 0); - return(error); + return error; } /* @@ -1983,10 +2018,11 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) * Even a forced unmount cannot override. */ if (mp->mnt_lflag & MNT_LUNMOUNT) { - if (withref != 0) + if (withref != 0) { mount_drop(mp, 1); + } mount_unlock(mp); - return (EBUSY); + return EBUSY; } if (flags & MNT_FORCE) { @@ -1995,13 +2031,14 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) } #if CONFIG_TRIGGERS - if (flags & MNT_NOBLOCK && p != kernproc) + if (flags & MNT_NOBLOCK && p != kernproc) { pflags_save = OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag); + } #endif mp->mnt_kern_flag |= MNTK_UNMOUNT; mp->mnt_lflag |= MNT_LUNMOUNT; - mp->mnt_flag &=~ MNT_ASYNC; + mp->mnt_flag &= ~MNT_ASYNC; /* * anyone currently in the fast path that * trips over the cached rootvp will be @@ -2034,11 +2071,12 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) lck_rw_lock_exclusive(&mp->mnt_rwlock); - if (withref != 0) + if (withref != 0) { mount_drop(mp, 0); + } error = 0; if (forcedunmount == 0) { - ubc_umount(mp); /* release cached vnodes */ + ubc_umount(mp); /* release cached vnodes */ if ((mp->mnt_flag & MNT_RDONLY) == 0) { error = VFS_SYNC(mp, MNT_WAIT, ctx); if (error) { @@ -2060,8 +2098,9 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) vfs_nested_trigger_unmounts(mp, flags, ctx); did_vflush = 1; #endif - if (forcedunmount) + if (forcedunmount) { lflags |= FORCECLOSE; + } error = vflush(mp, NULLVP, SKIPSWAP | SKIPSYSTEM | SKIPROOT | lflags); if ((forcedunmount == 0) && error) { mount_lock(mp); @@ -2085,16 +2124,17 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) } /* increment the operations count */ - if (!error) + if (!error) { OSAddAtomic(1, &vfs_nummntops); + } - if ( mp->mnt_devvp && mp->mnt_vtable->vfc_vfsflags & VFC_VFSLOCALARGS) { + if (mp->mnt_devvp && mp->mnt_vtable->vfc_vfsflags & VFC_VFSLOCALARGS) { /* hold an io reference and drop the usecount before close */ devvp = mp->mnt_devvp; vnode_getalways(devvp); vnode_rele(devvp); - VNOP_CLOSE(devvp, mp->mnt_flag & MNT_RDONLY ? FREAD : FREAD|FWRITE, - ctx); + VNOP_CLOSE(devvp, mp->mnt_flag & MNT_RDONLY ? FREAD : FREAD | FWRITE, + ctx); vnode_clearmountedon(devvp); vnode_put(devvp); } @@ -2126,13 +2166,13 @@ dounmount(struct mount *mp, int flags, int withref, vfs_context_t ctx) mp->mnt_vtable->vfc_refcount--; mount_list_unlock(); - cache_purgevfs(mp); /* remove cache entries for this file sys */ + cache_purgevfs(mp); /* remove cache entries for this file sys */ vfs_event_signal(NULL, VQ_UNMOUNT, (intptr_t)NULL); mount_lock(mp); mp->mnt_lflag |= MNT_LDEAD; if (mp->mnt_lflag & MNT_LWAIT) { - /* + /* * do the wakeup here * in case we block in mount_refdrain * which will drop the mount lock @@ -2151,9 +2191,10 @@ out: #if CONFIG_TRIGGERS if (flags & MNT_NOBLOCK && p != kernproc) { - // Restore P_NOREMOTEHANG bit to its previous value - if ((pflags_save & P_NOREMOTEHANG) == 0) + // Restore P_NOREMOTEHANG bit to its previous value + if ((pflags_save & P_NOREMOTEHANG) == 0) { OSBitAndAtomic(~((uint32_t) P_NOREMOTEHANG), &p->p_flag); + } } /* @@ -2177,8 +2218,9 @@ out: lck_rw_done(&mp->mnt_rwlock); - if (needwakeup) + if (needwakeup) { wakeup((caddr_t)mp); + } if (!error) { if ((coveredvp != NULLVP)) { @@ -2220,15 +2262,16 @@ out: vnode_put(pvp); } } else if (mp->mnt_flag & MNT_ROOTFS) { - mount_lock_destroy(mp); + mount_lock_destroy(mp); #if CONFIG_MACF - mac_mount_label_destroy(mp); + mac_mount_label_destroy(mp); #endif - FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); - } else + FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); + } else { panic("dounmount: no coveredvp"); + } } - return (error); + return error; } /* @@ -2237,7 +2280,7 @@ out: void dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx) { - mount_t smp; + mount_t smp; fsid_t *fsids, fsid; int fsids_sz; int count = 0, i, m = 0; @@ -2247,14 +2290,14 @@ dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx) // Get an array to hold the submounts fsids. TAILQ_FOREACH(smp, &mountlist, mnt_list) - count++; + count++; fsids_sz = count * sizeof(fsid_t); MALLOC(fsids, fsid_t *, fsids_sz, M_TEMP, M_NOWAIT); if (fsids == NULL) { mount_list_unlock(); goto out; } - fsids[0] = mp->mnt_vfsstat.f_fsid; // Prime the pump + fsids[0] = mp->mnt_vfsstat.f_fsid; // Prime the pump /* * Fill the array with submount fsids. @@ -2265,9 +2308,10 @@ dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx) */ for (smp = TAILQ_NEXT(mp, mnt_list); smp; smp = TAILQ_NEXT(smp, mnt_list)) { vp = smp->mnt_vnodecovered; - if (vp == NULL) + if (vp == NULL) { continue; - fsid = vnode_mount(vp)->mnt_vfsstat.f_fsid; // Underlying fsid + } + fsid = vnode_mount(vp)->mnt_vfsstat.f_fsid; // Underlying fsid for (i = 0; i <= m; i++) { if (fsids[i].val[0] == fsid.val[0] && fsids[i].val[1] == fsid.val[1]) { @@ -2288,8 +2332,9 @@ dounmount_submounts(struct mount *mp, int flags, vfs_context_t ctx) } } out: - if (fsids) + if (fsids) { FREE(fsids, M_TEMP); + } } void @@ -2298,24 +2343,26 @@ mount_dropcrossref(mount_t mp, vnode_t dp, int need_put) vnode_lock(dp); mp->mnt_crossref--; - if (mp->mnt_crossref < 0) + if (mp->mnt_crossref < 0) { panic("mount cross refs -ve"); + } if ((mp != dp->v_mountedhere) && (mp->mnt_crossref == 0)) { - - if (need_put) + if (need_put) { vnode_put_locked(dp); + } vnode_unlock(dp); mount_lock_destroy(mp); #if CONFIG_MACF mac_mount_label_destroy(mp); #endif - FREE_ZONE((caddr_t)mp, sizeof (struct mount), M_MOUNT); + FREE_ZONE(mp, sizeof(struct mount), M_MOUNT); return; } - if (need_put) + if (need_put) { vnode_put_locked(dp); + } vnode_unlock(dp); } @@ -2327,7 +2374,7 @@ mount_dropcrossref(mount_t mp, vnode_t dp, int need_put) int syncprt = 0; #endif -int print_vmpage_stat=0; +int print_vmpage_stat = 0; static int sync_callback(mount_t mp, __unused void *arg) @@ -2337,11 +2384,12 @@ sync_callback(mount_t mp, __unused void *arg) mp->mnt_flag &= ~MNT_ASYNC; VFS_SYNC(mp, arg ? MNT_WAIT : MNT_NOWAIT, vfs_context_kernel()); - if (asyncflag) + if (asyncflag) { mp->mnt_flag |= MNT_ASYNC; + } } - return (VFS_RETURNED); + return VFS_RETURNED; } /* ARGSUSED */ @@ -2355,8 +2403,9 @@ sync(__unused proc_t p, __unused struct sync_args *uap, __unused int32_t *retval } #if DIAGNOSTIC - if (syncprt) + if (syncprt) { vfs_bufstats(); + } #endif /* DIAGNOSTIC */ return 0; } @@ -2372,18 +2421,19 @@ sync_internal_callback(mount_t mp, void *arg) { if (arg) { int is_reliable = !(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && - (mp->mnt_flag & MNT_LOCAL); + (mp->mnt_flag & MNT_LOCAL); sync_type_t sync_type = *((sync_type_t *)arg); - if ((sync_type == SYNC_ONLY_RELIABLE_MEDIA) && !is_reliable) - return (VFS_RETURNED); - else if ((sync_type = SYNC_ONLY_UNRELIABLE_MEDIA) && is_reliable) - return (VFS_RETURNED); + if ((sync_type == SYNC_ONLY_RELIABLE_MEDIA) && !is_reliable) { + return VFS_RETURNED; + } else if ((sync_type = SYNC_ONLY_UNRELIABLE_MEDIA) && is_reliable) { + return VFS_RETURNED; + } } (void)sync_callback(mp, NULL); - return (VFS_RETURNED); + return VFS_RETURNED; } int sync_thread_state = 0; @@ -2424,8 +2474,9 @@ sync_thread(__unused void *arg, __unused wait_result_t wr) } #if DIAGNOSTIC - if (syncprt) + if (syncprt) { vfs_bufstats(); + } #endif /* DIAGNOSTIC */ } @@ -2454,7 +2505,7 @@ sync_internal(void) sync_thread_state &= ~SYNC_THREAD_RUNNING; lck_mtx_unlock(sync_mtx_lck); printf("sync_thread failed\n"); - return (0); + return 0; } thread_created = TRUE; } @@ -2471,10 +2522,11 @@ sync_internal(void) } } - if (thread_created) + if (thread_created) { thread_deallocate(thd); + } - return (0); + return 0; } /* end of sync_internal call */ /* @@ -2495,10 +2547,11 @@ quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval) AUDIT_ARG(uid, uap->uid); AUDIT_ARG(cmd, uap->cmd); NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - uap->path, ctx); + uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } mp = nd.ni_vp->v_mount; vnode_put(nd.ni_vp); nameidone(&nd); @@ -2522,14 +2575,13 @@ quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval) /* uap->arg is a pointer to a dqblk structure. */ datap = (caddr_t) &my_dqblk; if (proc_is64bit(p)) { - struct user_dqblk my_dqblk64; - error = copyin(uap->arg, (caddr_t)&my_dqblk64, sizeof (my_dqblk64)); + struct user_dqblk my_dqblk64; + error = copyin(uap->arg, (caddr_t)&my_dqblk64, sizeof(my_dqblk64)); if (error == 0) { munge_dqblk(&my_dqblk, &my_dqblk64, FALSE); } - } - else { - error = copyin(uap->arg, (caddr_t)&my_dqblk, sizeof (my_dqblk)); + } else { + error = copyin(uap->arg, (caddr_t)&my_dqblk, sizeof(my_dqblk)); } break; case Q_QUOTASTAT: @@ -2547,21 +2599,21 @@ quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval) switch (quota_cmd) { case Q_QUOTAON: - if (datap != NULL) + if (datap != NULL) { kfree(datap, MAXPATHLEN); + } break; case Q_GETQUOTA: /* uap->arg is a pointer to a dqblk structure we need to copy out to */ if (error == 0) { if (proc_is64bit(p)) { - struct user_dqblk my_dqblk64; + struct user_dqblk my_dqblk64; memset(&my_dqblk64, 0, sizeof(my_dqblk64)); munge_dqblk(&my_dqblk, &my_dqblk64, TRUE); - error = copyout((caddr_t)&my_dqblk64, uap->arg, sizeof (my_dqblk64)); - } - else { - error = copyout(datap, uap->arg, sizeof (struct dqblk)); + error = copyout((caddr_t)&my_dqblk64, uap->arg, sizeof(my_dqblk64)); + } else { + error = copyout(datap, uap->arg, sizeof(struct dqblk)); } } break; @@ -2575,13 +2627,13 @@ quotactl(proc_t p, struct quotactl_args *uap, __unused int32_t *retval) break; } /* switch */ - return (error); + return error; } #else int quotactl(__unused proc_t p, __unused struct quotactl_args *uap, __unused int32_t *retval) { - return (EOPNOTSUPP); + return EOPNOTSUPP; } #endif /* QUOTA */ @@ -2605,10 +2657,11 @@ statfs(__unused proc_t p, struct statfs_args *uap, __unused int32_t *retval) vnode_t vp; NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error != 0) - return (error); + if (error != 0) { + return error; + } vp = nd.ni_vp; mp = vp->v_mount; sp = &mp->mnt_vfsstat; @@ -2616,19 +2669,20 @@ statfs(__unused proc_t p, struct statfs_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_mount_check_stat(ctx, mp); - if (error != 0) - return (error); + if (error != 0) { + return error; + } #endif error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT); if (error != 0) { vnode_put(vp); - return (error); + return error; } error = munge_statfs(mp, sp, uap->buf, NULL, IS_64BIT_PROCESS(p), TRUE); vnode_put(vp); - return (error); + return error; } /* @@ -2645,13 +2699,14 @@ fstatfs(__unused proc_t p, struct fstatfs_args *uap, __unused int32_t *retval) AUDIT_ARG(fd, uap->fd); - if ( (error = file_vnode(uap->fd, &vp)) ) - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; + } error = vnode_getwithref(vp); if (error) { file_drop(uap->fd); - return (error); + return error; } AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); @@ -2664,8 +2719,9 @@ fstatfs(__unused proc_t p, struct fstatfs_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_mount_check_stat(vfs_context_current(), mp); - if (error != 0) + if (error != 0) { goto out; + } #endif sp = &mp->mnt_vfsstat; @@ -2679,7 +2735,7 @@ out: file_drop(uap->fd); vnode_put(vp); - return (error); + return error; } /* @@ -2715,7 +2771,7 @@ statfs64_common(struct mount *mp, struct vfsstatfs *sfsp, user_addr_t bufp) error = copyout((caddr_t)&sfs, bufp, sizeof(sfs)); - return(error); + return error; } /* @@ -2732,10 +2788,11 @@ statfs64(__unused struct proc *p, struct statfs64_args *uap, __unused int32_t *r vnode_t vp; NDINIT(&nd, LOOKUP, OP_STATFS, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctxp); + UIO_USERSPACE, uap->path, ctxp); error = namei(&nd); - if (error != 0) - return (error); + if (error != 0) { + return error; + } vp = nd.ni_vp; mp = vp->v_mount; sp = &mp->mnt_vfsstat; @@ -2743,20 +2800,21 @@ statfs64(__unused struct proc *p, struct statfs64_args *uap, __unused int32_t *r #if CONFIG_MACF error = mac_mount_check_stat(ctxp, mp); - if (error != 0) - return (error); + if (error != 0) { + return error; + } #endif error = vfs_update_vfsstat(mp, ctxp, VFS_USER_EVENT); if (error != 0) { vnode_put(vp); - return (error); + return error; } error = statfs64_common(mp, sp, uap->buf); vnode_put(vp); - return (error); + return error; } /* @@ -2772,13 +2830,14 @@ fstatfs64(__unused struct proc *p, struct fstatfs64_args *uap, __unused int32_t AUDIT_ARG(fd, uap->fd); - if ( (error = file_vnode(uap->fd, &vp)) ) - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; + } error = vnode_getwithref(vp); if (error) { file_drop(uap->fd); - return (error); + return error; } AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); @@ -2791,8 +2850,9 @@ fstatfs64(__unused struct proc *p, struct fstatfs64_args *uap, __unused int32_t #if CONFIG_MACF error = mac_mount_check_stat(vfs_context_current(), mp); - if (error != 0) + if (error != 0) { goto out; + } #endif sp = &mp->mnt_vfsstat; @@ -2806,23 +2866,22 @@ out: file_drop(uap->fd); vnode_put(vp); - return (error); + return error; } struct getfsstat_struct { - user_addr_t sfsp; - user_addr_t *mp; - int count; - int maxcount; - int flags; - int error; + user_addr_t sfsp; + user_addr_t *mp; + int count; + int maxcount; + int flags; + int error; }; static int getfsstat_callback(mount_t mp, void * arg) { - struct getfsstat_struct *fstp = (struct getfsstat_struct *)arg; struct vfsstatfs *sp; int error, my_size; @@ -2833,7 +2892,7 @@ getfsstat_callback(mount_t mp, void * arg) error = mac_mount_check_stat(ctx, mp); if (error != 0) { fstp->error = error; - return(VFS_RETURNED_DONE); + return VFS_RETURNED_DONE; } #endif sp = &mp->mnt_vfsstat; @@ -2842,10 +2901,10 @@ getfsstat_callback(mount_t mp, void * arg) * fsstat cache. MNT_WAIT/MNT_DWAIT overrides MNT_NOWAIT. */ if (((fstp->flags & MNT_NOWAIT) == 0 || (fstp->flags & (MNT_WAIT | MNT_DWAIT))) && - (error = vfs_update_vfsstat(mp, ctx, - VFS_USER_EVENT))) { + (error = vfs_update_vfsstat(mp, ctx, + VFS_USER_EVENT))) { KAUTH_DEBUG("vfs_update_vfsstat returned %d", error); - return(VFS_RETURNED); + return VFS_RETURNED; } /* @@ -2854,7 +2913,7 @@ getfsstat_callback(mount_t mp, void * arg) error = munge_statfs(mp, sp, fstp->sfsp, &my_size, IS_64BIT_PROCESS(vfs_context_proc(ctx)), FALSE); if (error) { fstp->error = error; - return(VFS_RETURNED_DONE); + return VFS_RETURNED_DONE; } fstp->sfsp += my_size; @@ -2863,14 +2922,14 @@ getfsstat_callback(mount_t mp, void * arg) error = mac_mount_label_get(mp, *fstp->mp); if (error) { fstp->error = error; - return(VFS_RETURNED_DONE); + return VFS_RETURNED_DONE; } #endif fstp->mp++; } } fstp->count++; - return(VFS_RETURNED); + return VFS_RETURNED; } /* @@ -2887,7 +2946,7 @@ getfsstat(__unused proc_t p, struct getfsstat_args *uap, int *retval) muap.macsize = 0; muap.flags = uap->flags; - return (__mac_getfsstat(p, &muap, retval)); + return __mac_getfsstat(p, &muap, retval); } /* @@ -2921,8 +2980,7 @@ __mac_getfsstat(__unused proc_t p, struct __mac_getfsstat_args *uap, int *retval if (IS_64BIT_PROCESS(p)) { maxcount = bufsize / sizeof(struct user64_statfs); - } - else { + } else { maxcount = bufsize / sizeof(struct user32_statfs); } sfsp = uap->buf; @@ -2937,33 +2995,35 @@ __mac_getfsstat(__unused proc_t p, struct __mac_getfsstat_args *uap, int *retval unsigned int i; count = (macsize / (IS_64BIT_PROCESS(p) ? 8 : 4)); - if (count != maxcount) - return (EINVAL); + if (count != maxcount) { + return EINVAL; + } /* Copy in the array */ MALLOC(mp0, u_int32_t *, macsize, M_MACTEMP, M_WAITOK); if (mp0 == NULL) { - return (ENOMEM); + return ENOMEM; } error = copyin(uap->mac, mp0, macsize); if (error) { FREE(mp0, M_MACTEMP); - return (error); + return error; } /* Normalize to an array of user_addr_t */ MALLOC(mp, user_addr_t *, count * sizeof(user_addr_t), M_MACTEMP, M_WAITOK); if (mp == NULL) { FREE(mp0, M_MACTEMP); - return (ENOMEM); + return ENOMEM; } for (i = 0; i < count; i++) { - if (IS_64BIT_PROCESS(p)) + if (IS_64BIT_PROCESS(p)) { mp[i] = ((user_addr_t *)mp0)[i]; - else + } else { mp[i] = (user_addr_t)mp0[i]; + } } FREE(mp0, M_MACTEMP); } @@ -2980,19 +3040,21 @@ __mac_getfsstat(__unused proc_t p, struct __mac_getfsstat_args *uap, int *retval vfs_iterate(0, getfsstat_callback, &fst); - if (mp) + if (mp) { FREE(mp, M_MACTEMP); + } - if (fst.error ) { + if (fst.error) { KAUTH_DEBUG("ERROR - %s gets %d", p->p_comm, fst.error); - return(fst.error); + return fst.error; } - if (fst.sfsp && fst.count > fst.maxcount) + if (fst.sfsp && fst.count > fst.maxcount) { *retval = fst.maxcount; - else + } else { *retval = fst.count; - return (0); + } + return 0; } static int @@ -3007,7 +3069,7 @@ getfsstat64_callback(mount_t mp, void * arg) error = mac_mount_check_stat(vfs_context_current(), mp); if (error != 0) { fstp->error = error; - return(VFS_RETURNED_DONE); + return VFS_RETURNED_DONE; } #endif sp = &mp->mnt_vfsstat; @@ -3020,21 +3082,21 @@ getfsstat64_callback(mount_t mp, void * arg) * namespace. */ if (((fstp->flags & MNT_NOWAIT) == 0 || - (fstp->flags & (MNT_WAIT | MNT_DWAIT))) && + (fstp->flags & (MNT_WAIT | MNT_DWAIT))) && (error = vfs_update_vfsstat(mp, vfs_context_current(), VFS_USER_EVENT))) { KAUTH_DEBUG("vfs_update_vfsstat returned %d", error); - return(VFS_RETURNED); + return VFS_RETURNED; } error = statfs64_common(mp, sp, fstp->sfsp); if (error) { fstp->error = error; - return(VFS_RETURNED_DONE); + return VFS_RETURNED_DONE; } fstp->sfsp += sizeof(struct statfs64); } fstp->count++; - return(VFS_RETURNED); + return VFS_RETURNED; } /* @@ -3060,17 +3122,18 @@ getfsstat64(__unused proc_t p, struct getfsstat64_args *uap, int *retval) vfs_iterate(0, getfsstat64_callback, &fst); - if (fst.error ) { + if (fst.error) { KAUTH_DEBUG("ERROR - %s gets %d", p->p_comm, fst.error); - return(fst.error); + return fst.error; } - if (fst.sfsp && fst.count > fst.maxcount) + if (fst.sfsp && fst.count > fst.maxcount) { *retval = fst.maxcount; - else + } else { *retval = fst.count; + } - return (0); + return 0; } /* @@ -3097,18 +3160,19 @@ vnode_getfromfd(vfs_context_t ctx, int fd, vnode_t *vpp) *vpp = NULLVP; error = fp_getfvp(p, fd, &fp, &vp); - if (error) - return (error); + if (error) { + return error; + } error = vnode_getwithref(vp); if (error) { (void)fp_drop(p, fd, fp, 0); - return (error); + return error; } (void)fp_drop(p, fd, fp, 0); *vpp = vp; - return (error); + return error; } /* @@ -3130,8 +3194,9 @@ nameiat(struct nameidata *ndp, int dirfd) if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) { error = copyin(ndp->ni_dirp, &c, sizeof(char)); - if (error) - return (error); + if (error) { + return error; + } } else { c = *((char *)(ndp->ni_dirp)); } @@ -3141,12 +3206,13 @@ nameiat(struct nameidata *ndp, int dirfd) error = vnode_getfromfd(ndp->ni_cnd.cn_context, dirfd, &dvp_at); - if (error) - return (error); + if (error) { + return error; + } if (vnode_vtype(dvp_at) != VDIR) { vnode_put(dvp_at); - return (ENOTDIR); + return ENOTDIR; } ndp->ni_dvp = dvp_at; @@ -3154,11 +3220,11 @@ nameiat(struct nameidata *ndp, int dirfd) error = namei(ndp); ndp->ni_cnd.cn_flags &= ~USEDVP; vnode_put(dvp_at); - return (error); + return error; } } - return (namei(ndp)); + return namei(ndp); } /* @@ -3191,17 +3257,18 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) uth->uu_cdir = NULLVP; if (tvp != NULLVP) { vnode_rele(tvp); - return (0); + return 0; } } - return (EBADF); + return EBADF; } - if ( (error = file_vnode(uap->fd, &vp)) ) - return(error); - if ( (error = vnode_getwithref(vp)) ) { - file_drop(uap->fd); - return(error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; + } + if ((error = vnode_getwithref(vp))) { + file_drop(uap->fd); + return error; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -3213,12 +3280,14 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) #if CONFIG_MACF error = mac_vnode_check_chdir(ctx, vp); - if (error) + if (error) { goto out; + } #endif error = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx); - if (error) + if (error) { goto out; + } while (!error && (mp = vp->v_mountedhere) != NULL) { if (vfs_busy(mp, LK_NOWAIT)) { @@ -3227,15 +3296,18 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) } error = VFS_ROOT(mp, &tdp, ctx); vfs_unbusy(mp); - if (error) + if (error) { break; + } vnode_put(vp); vp = tdp; } - if (error) + if (error) { + goto out; + } + if ((error = vnode_ref(vp))) { goto out; - if ( (error = vnode_ref(vp)) ) - goto out; + } vnode_put(vp); if (per_thread) { @@ -3247,7 +3319,7 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) OSBitOrAtomic(P_THCWD, &p->p_flag); } else { vnode_rele(vp); - return (ENOENT); + return ENOENT; } } else { proc_fdlock(p); @@ -3256,16 +3328,17 @@ common_fchdir(proc_t p, struct fchdir_args *uap, int per_thread) proc_fdunlock(p); } - if (tvp) - vnode_rele(tvp); + if (tvp) { + vnode_rele(tvp); + } file_drop(uap->fd); - return (0); + return 0; out: vnode_put(vp); file_drop(uap->fd); - return(error); + return error; } int @@ -3299,13 +3372,14 @@ common_chdir(proc_t p, struct chdir_args *uap, int per_thread) vfs_context_t ctx = vfs_context_current(); NDINIT(&nd, LOOKUP, OP_CHDIR, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = change_dir(&nd, ctx); - if (error) - return (error); - if ( (error = vnode_ref(nd.ni_vp)) ) { - vnode_put(nd.ni_vp); - return (error); + if (error) { + return error; + } + if ((error = vnode_ref(nd.ni_vp))) { + vnode_put(nd.ni_vp); + return error; } /* * drop the iocount we picked up in change_dir @@ -3321,7 +3395,7 @@ common_chdir(proc_t p, struct chdir_args *uap, int per_thread) OSBitOrAtomic(P_THCWD, &p->p_flag); } else { vnode_rele(nd.ni_vp); - return (ENOENT); + return ENOENT; } } else { proc_fdlock(p); @@ -3330,10 +3404,11 @@ common_chdir(proc_t p, struct chdir_args *uap, int per_thread) proc_fdunlock(p); } - if (tvp) - vnode_rele(tvp); + if (tvp) { + vnode_rele(tvp); + } - return (0); + return 0; } @@ -3343,15 +3418,15 @@ common_chdir(proc_t p, struct chdir_args *uap, int per_thread) * Change current working directory (".") for the entire process * * Parameters: p Process requesting the call - * uap User argument descriptor (see below) - * retval (ignored) + * uap User argument descriptor (see below) + * retval (ignored) * * Indirect parameters: uap->path Directory path * * Returns: 0 Success - * common_chdir: ENOTDIR - * common_chdir: ENOENT No such file or directory - * common_chdir: ??? + * common_chdir: ENOTDIR + * common_chdir: ENOENT No such file or directory + * common_chdir: ??? * */ int @@ -3366,13 +3441,13 @@ chdir(proc_t p, struct chdir_args *uap, __unused int32_t *retval) * Change current working directory (".") for a single thread * * Parameters: p Process requesting the call - * uap User argument descriptor (see below) - * retval (ignored) + * uap User argument descriptor (see below) + * retval (ignored) * * Indirect parameters: uap->path Directory path * * Returns: 0 Success - * common_chdir: ENOTDIR + * common_chdir: ENOTDIR * common_chdir: ENOENT No such file or directory * common_chdir: ??? * @@ -3397,27 +3472,29 @@ chroot(proc_t p, struct chroot_args *uap, __unused int32_t *retval) vnode_t tvp; vfs_context_t ctx = vfs_context_current(); - if ((error = suser(kauth_cred_get(), &p->p_acflag))) - return (error); + if ((error = suser(kauth_cred_get(), &p->p_acflag))) { + return error; + } NDINIT(&nd, LOOKUP, OP_CHROOT, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = change_dir(&nd, ctx); - if (error) - return (error); + if (error) { + return error; + } #if CONFIG_MACF error = mac_vnode_check_chroot(ctx, nd.ni_vp, &nd.ni_cnd); if (error) { vnode_put(nd.ni_vp); - return (error); + return error; } #endif - if ( (error = vnode_ref(nd.ni_vp)) ) { - vnode_put(nd.ni_vp); - return (error); + if ((error = vnode_ref(nd.ni_vp))) { + vnode_put(nd.ni_vp); + return error; } vnode_put(nd.ni_vp); @@ -3427,10 +3504,11 @@ chroot(proc_t p, struct chroot_args *uap, __unused int32_t *retval) fdp->fd_flags |= FD_CHROOT; proc_fdunlock(p); - if (tvp != NULL) + if (tvp != NULL) { vnode_rele(tvp); + } - return (0); + return 0; } /* @@ -3447,31 +3525,32 @@ change_dir(struct nameidata *ndp, vfs_context_t ctx) vnode_t vp; int error; - if ((error = namei(ndp))) - return (error); + if ((error = namei(ndp))) { + return error; + } nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VDIR) { vnode_put(vp); - return (ENOTDIR); + return ENOTDIR; } #if CONFIG_MACF error = mac_vnode_check_chdir(ctx, vp); if (error) { vnode_put(vp); - return (error); + return error; } #endif error = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, ctx); if (error) { vnode_put(vp); - return (error); + return error; } - return (error); + return error; } /* @@ -3484,7 +3563,7 @@ fg_vn_data_alloc(void) /* Allocate per fd vnode data */ MALLOC(fvdata, struct fd_vn_data *, (sizeof(struct fd_vn_data)), - M_FD_VN_DATA, M_WAITOK | M_ZERO); + M_FD_VN_DATA, M_WAITOK | M_ZERO); lck_mtx_init(&fvdata->fv_lock, fd_vn_lck_grp, fd_vn_lck_attr); return fvdata; } @@ -3497,8 +3576,9 @@ fg_vn_data_free(void *fgvndata) { struct fd_vn_data *fvdata = (struct fd_vn_data *)fgvndata; - if (fvdata->fv_buf) + if (fvdata->fv_buf) { FREE(fvdata->fv_buf, M_FD_DIRBUF); + } lck_mtx_destroy(&fvdata->fv_lock, fd_vn_lck_grp); FREE(fvdata, M_FD_VN_DATA); } @@ -3536,8 +3616,9 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, oflags = uflags; - if ((oflags & O_ACCMODE) == O_ACCMODE) - return(EINVAL); + if ((oflags & O_ACCMODE) == O_ACCMODE) { + return EINVAL; + } flags = FFLAGS(uflags); CLR(flags, FENCRYPTED); @@ -3548,22 +3629,23 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, if ((error = falloc_withalloc(p, &fp, &indx, ctx, fp_zalloc, cra)) != 0) { - return (error); + return error; } uu->uu_dupfd = -indx - 1; if ((error = vn_open_auth(ndp, &flags, vap))) { - if ((error == ENODEV || error == ENXIO) && (uu->uu_dupfd >= 0)){ /* XXX from fdopen */ + if ((error == ENODEV || error == ENXIO) && (uu->uu_dupfd >= 0)) { /* XXX from fdopen */ if ((error = dupfdopen(p->p_fd, indx, uu->uu_dupfd, flags, error)) == 0) { fp_drop(p, indx, NULL, 0); - *retval = indx; - return (0); + *retval = indx; + return 0; } } - if (error == ERESTART) - error = EINTR; + if (error == ERESTART) { + error = EINTR; + } fp_free(p, indx, fp); - return (error); + return error; } uu->uu_dupfd = 0; vp = ndp->ni_vp; @@ -3576,21 +3658,25 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, lf.l_whence = SEEK_SET; lf.l_start = 0; lf.l_len = 0; - if (flags & O_EXLOCK) + if (flags & O_EXLOCK) { lf.l_type = F_WRLCK; - else + } else { lf.l_type = F_RDLCK; + } type = F_FLOCK; - if ((flags & FNONBLOCK) == 0) + if ((flags & FNONBLOCK) == 0) { type |= F_WAIT; + } #if CONFIG_MACF error = mac_file_check_lock(vfs_context_ucred(ctx), fp->f_fglob, F_SETLK, &lf); - if (error) + if (error) { goto bad; + } #endif - if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx, NULL))) + if ((error = VNOP_ADVLOCK(vp, (caddr_t)fp->f_fglob, F_SETLK, &lf, type, ctx, NULL))) { goto bad; + } fp->f_fglob->fg_flag |= FHASLOCK; } @@ -3609,8 +3695,9 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, #endif /* DEVELOPMENT || DEBUG */ /* try to truncate by setting the size attribute */ - if ((flags & O_TRUNC) && ((error = vnode_setsize(vp, (off_t)0, 0, ctx)) != 0)) + if ((flags & O_TRUNC) && ((error = vnode_setsize(vp, (off_t)0, 0, ctx)) != 0)) { goto bad; + } /* * For directories we hold some additional information in the fd. @@ -3636,10 +3723,12 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, } proc_fdlock(p); - if (flags & O_CLOEXEC) + if (flags & O_CLOEXEC) { *fdflags(p, indx) |= UF_EXCLOSE; - if (flags & O_CLOFORK) + } + if (flags & O_CLOFORK) { *fdflags(p, indx) |= UF_FORKCLOSE; + } procfdtbl_releasefd(p, indx, NULL); #if CONFIG_SECLUDED_MEMORY @@ -3655,29 +3744,29 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, } else if (fp->f_fglob->fg_flag & FWRITE) { /* writable -> no longer eligible for secluded pages */ memory_object_mark_eligible_for_secluded(moc, - FALSE); + FALSE); } else if (secluded_for_filecache == 1) { char pathname[32] = { 0, }; size_t copied; /* XXX FBDP: better way to detect /Applications/ ? */ if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) { copyinstr(ndp->ni_dirp, - pathname, - sizeof (pathname), - &copied); + pathname, + sizeof(pathname), + &copied); } else { copystr(CAST_DOWN(void *, ndp->ni_dirp), - pathname, - sizeof (pathname), - &copied); + pathname, + sizeof(pathname), + &copied); } - pathname[sizeof (pathname) - 1] = '\0'; + pathname[sizeof(pathname) - 1] = '\0'; if (strncmp(pathname, - "/Applications/", - strlen("/Applications/")) == 0 && + "/Applications/", + strlen("/Applications/")) == 0 && strncmp(pathname, - "/Applications/Camera.app/", - strlen("/Applications/Camera.app/")) != 0) { + "/Applications/Camera.app/", + strlen("/Applications/Camera.app/")) != 0) { /* * not writable * AND from "/Applications/" @@ -3685,7 +3774,7 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, * ==> eligible for secluded */ memory_object_mark_eligible_for_secluded(moc, - TRUE); + TRUE); } } else if (secluded_for_filecache == 2) { #if __arm64__ @@ -3696,33 +3785,33 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, /* not implemented... */ #endif if (!strncmp(vp->v_name, - DYLD_SHARED_CACHE_NAME, - strlen(DYLD_SHARED_CACHE_NAME)) || + DYLD_SHARED_CACHE_NAME, + strlen(DYLD_SHARED_CACHE_NAME)) || !strncmp(vp->v_name, - "dyld", - strlen(vp->v_name)) || + "dyld", + strlen(vp->v_name)) || !strncmp(vp->v_name, - "launchd", - strlen(vp->v_name)) || + "launchd", + strlen(vp->v_name)) || !strncmp(vp->v_name, - "Camera", - strlen(vp->v_name)) || + "Camera", + strlen(vp->v_name)) || !strncmp(vp->v_name, - "mediaserverd", - strlen(vp->v_name)) || + "mediaserverd", + strlen(vp->v_name)) || !strncmp(vp->v_name, - "SpringBoard", - strlen(vp->v_name)) || + "SpringBoard", + strlen(vp->v_name)) || !strncmp(vp->v_name, - "backboardd", - strlen(vp->v_name))) { + "backboardd", + strlen(vp->v_name))) { /* * This file matters when launching Camera: * do not store its contents in the secluded * pool that will be drained on Camera launch. */ memory_object_mark_eligible_for_secluded(moc, - FALSE); + FALSE); } } } @@ -3733,27 +3822,27 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags, *retval = indx; - return (0); + return 0; bad: context = *vfs_context_current(); context.vc_ucred = fp->f_fglob->fg_cred; - if ((fp->f_fglob->fg_flag & FHASLOCK) && + if ((fp->f_fglob->fg_flag & FHASLOCK) && (FILEGLOB_DTYPE(fp->f_fglob) == DTYPE_VNODE)) { lf.l_whence = SEEK_SET; - lf.l_start = 0; - lf.l_len = 0; - lf.l_type = F_UNLCK; + lf.l_start = 0; + lf.l_len = 0; + lf.l_type = F_UNLCK; - (void)VNOP_ADVLOCK( - vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); + (void)VNOP_ADVLOCK( + vp, (caddr_t)fp->f_fglob, F_UNLCK, &lf, F_FLOCK, ctx, NULL); } vn_close(vp, fp->f_fglob->fg_flag, &context); vnode_put(vp); fp_free(p, indx, fp); - return (error); + return error; } /* @@ -3774,8 +3863,9 @@ open1at(vfs_context_t ctx, struct nameidata *ndp, int uflags, if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) { error = copyin(ndp->ni_dirp, &c, sizeof(char)); - if (error) - return (error); + if (error) { + return error; + } } else { c = *((char *)(ndp->ni_dirp)); } @@ -3785,12 +3875,13 @@ open1at(vfs_context_t ctx, struct nameidata *ndp, int uflags, error = vnode_getfromfd(ndp->ni_cnd.cn_context, dirfd, &dvp_at); - if (error) - return (error); + if (error) { + return error; + } if (vnode_vtype(dvp_at) != VDIR) { vnode_put(dvp_at); - return (ENOTDIR); + return ENOTDIR; } ndp->ni_dvp = dvp_at; @@ -3798,11 +3889,11 @@ open1at(vfs_context_t ctx, struct nameidata *ndp, int uflags, error = open1(ctx, ndp, uflags, vap, fp_zalloc, cra, retval); vnode_put(dvp_at); - return (error); + return error; } } - return (open1(ctx, ndp, uflags, vap, fp_zalloc, cra, retval)); + return open1(ctx, ndp, uflags, vap, fp_zalloc, cra, retval); } /* @@ -3842,26 +3933,31 @@ open_extended(proc_t p, struct open_extended_args *uap, int32_t *retval) xsecdst = NULL; if ((uap->xsecurity != USER_ADDR_NULL) && - ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0)) + ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0)) { return ciferror; + } VATTR_INIT(&va); - cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; + cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; VATTR_SET(&va, va_mode, cmode); - if (uap->uid != KAUTH_UID_NONE) + if (uap->uid != KAUTH_UID_NONE) { VATTR_SET(&va, va_uid, uap->uid); - if (uap->gid != KAUTH_GID_NONE) + } + if (uap->gid != KAUTH_GID_NONE) { VATTR_SET(&va, va_gid, uap->gid); - if (xsecdst != NULL) + } + if (xsecdst != NULL) { VATTR_SET(&va, va_acl, &xsecdst->fsec_acl); + } NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - uap->path, vfs_context_current()); + uap->path, vfs_context_current()); ciferror = open1(vfs_context_current(), &nd, uap->flags, &va, - fileproc_alloc_init, NULL, retval); - if (xsecdst != NULL) + fileproc_alloc_init, NULL, retval); + if (xsecdst != NULL) { kauth_filesec_free(xsecdst); + } return ciferror; } @@ -3871,7 +3967,9 @@ open_extended(proc_t p, struct open_extended_args *uap, int32_t *retval) * * int open_dprotected_np(user_addr_t path, int flags, int class, int dpflags, int mode) */ -int open_dprotected_np (__unused proc_t p, struct open_dprotected_np_args *uap, int32_t *retval) { +int +open_dprotected_np(__unused proc_t p, struct open_dprotected_np_args *uap, int32_t *retval) +{ int flags = uap->flags; int class = uap->class; int dpflags = uap->dpflags; @@ -3888,11 +3986,11 @@ int open_dprotected_np (__unused proc_t p, struct open_dprotected_np_args *uap, VATTR_INIT(&va); /* Mask off all but regular access permissions */ - cmode = ((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; + cmode = ((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; VATTR_SET(&va, va_mode, cmode & ACCESSPERMS); NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - uap->path, vfs_context_current()); + uap->path, vfs_context_current()); /* * Initialize the extra fields in vnode_attr to pass down our @@ -3901,31 +3999,31 @@ int open_dprotected_np (__unused proc_t p, struct open_dprotected_np_args *uap, * 2. set a flag to mark it as requiring open-raw-encrypted semantics. */ if (flags & O_CREAT) { - /* lower level kernel code validates that the class is valid before applying it. */ - if (class != PROTECTION_CLASS_DEFAULT) { - /* - * PROTECTION_CLASS_DEFAULT implies that we make the class for this - * file behave the same as open (2) - */ - VATTR_SET(&va, va_dataprotect_class, class); - } - } - - if (dpflags & (O_DP_GETRAWENCRYPTED|O_DP_GETRAWUNENCRYPTED)) { - if ( flags & (O_RDWR | O_WRONLY)) { + /* lower level kernel code validates that the class is valid before applying it. */ + if (class != PROTECTION_CLASS_DEFAULT) { + /* + * PROTECTION_CLASS_DEFAULT implies that we make the class for this + * file behave the same as open (2) + */ + VATTR_SET(&va, va_dataprotect_class, class); + } + } + + if (dpflags & (O_DP_GETRAWENCRYPTED | O_DP_GETRAWUNENCRYPTED)) { + if (flags & (O_RDWR | O_WRONLY)) { /* Not allowed to write raw encrypted bytes */ return EINVAL; } if (uap->dpflags & O_DP_GETRAWENCRYPTED) { - VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); + VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); } if (uap->dpflags & O_DP_GETRAWUNENCRYPTED) { - VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED); + VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWUNENCRYPTED); } } error = open1(vfs_context_current(), &nd, uap->flags, &va, - fileproc_alloc_init, NULL, retval); + fileproc_alloc_init, NULL, retval); return error; } @@ -3941,44 +4039,44 @@ openat_internal(vfs_context_t ctx, user_addr_t path, int flags, int mode, VATTR_INIT(&va); /* Mask off all but regular access permissions */ - cmode = ((mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; + cmode = ((mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT; VATTR_SET(&va, va_mode, cmode & ACCESSPERMS); NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW | AUDITVNPATH1, segflg, path, ctx); - return (open1at(ctx, &nd, flags, &va, fileproc_alloc_init, NULL, - retval, fd)); + return open1at(ctx, &nd, flags, &va, fileproc_alloc_init, NULL, + retval, fd); } int open(proc_t p, struct open_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(open_nocancel(p, (struct open_nocancel_args *)uap, retval)); + return open_nocancel(p, (struct open_nocancel_args *)uap, retval); } int open_nocancel(__unused proc_t p, struct open_nocancel_args *uap, int32_t *retval) { - return (openat_internal(vfs_context_current(), uap->path, uap->flags, - uap->mode, AT_FDCWD, UIO_USERSPACE, retval)); + return openat_internal(vfs_context_current(), uap->path, uap->flags, + uap->mode, AT_FDCWD, UIO_USERSPACE, retval); } int openat_nocancel(__unused proc_t p, struct openat_nocancel_args *uap, - int32_t *retval) + int32_t *retval) { - return (openat_internal(vfs_context_current(), uap->path, uap->flags, - uap->mode, uap->fd, UIO_USERSPACE, retval)); + return openat_internal(vfs_context_current(), uap->path, uap->flags, + uap->mode, uap->fd, UIO_USERSPACE, retval); } int openat(proc_t p, struct openat_args *uap, int32_t *retval) { __pthread_testcancel(1); - return(openat_nocancel(p, (struct openat_nocancel_args *)uap, retval)); + return openat_nocancel(p, (struct openat_nocancel_args *)uap, retval); } /* @@ -4016,16 +4114,16 @@ openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval) vfs_context_t ctx = vfs_context_current(); if ((error = priv_check_cred(vfs_context_ucred(ctx), PRIV_VFS_OPEN_BY_ID, 0))) { - return (error); + return error; } if ((error = copyin(uap->fsid, (caddr_t)&fsid, sizeof(fsid)))) { - return (error); + return error; } /*uap->obj is an fsobj_id_t defined as struct {uint32_t, uint32_t} */ if ((error = copyin(uap->objid, (caddr_t)&objid, sizeof(uint64_t)))) { - return (error); + return error; } AUDIT_ARG(value32, fsid.val[0]); @@ -4035,7 +4133,7 @@ openbyid_np(__unused proc_t p, struct openbyid_np_args *uap, int *retval) do { MALLOC(buf, char *, buflen + 1, M_TEMP, M_WAITOK); if (buf == NULL) { - return (ENOMEM); + return ENOMEM; } error = fsgetpath_internal( @@ -4075,26 +4173,29 @@ mknod(proc_t p, struct mknod_args *uap, __unused int32_t *retval) vfs_context_t ctx = vfs_context_current(); int error; struct nameidata nd; - vnode_t vp, dvp; + vnode_t vp, dvp; - VATTR_INIT(&va); - VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask); - VATTR_SET(&va, va_rdev, uap->dev); + VATTR_INIT(&va); + VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask); + VATTR_SET(&va, va_rdev, uap->dev); /* If it's a mknod() of a FIFO, call mkfifo1() instead */ - if ((uap->mode & S_IFMT) == S_IFIFO) - return(mkfifo1(ctx, uap->path, &va)); + if ((uap->mode & S_IFMT) == S_IFIFO) { + return mkfifo1(ctx, uap->path, &va); + } AUDIT_ARG(mode, uap->mode); AUDIT_ARG(value32, uap->dev); - if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag))) - return (error); + if ((error = suser(vfs_context_ucred(ctx), &p->p_acflag))) { + return error; + } NDINIT(&nd, CREATE, OP_MKNOD, LOCKPARENT | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } dvp = nd.ni_dvp; vp = nd.ni_vp; @@ -4118,27 +4219,33 @@ mknod(proc_t p, struct mknod_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_vnode_check_create(ctx, nd.ni_dvp, &nd.ni_cnd, &va); - if (error) + if (error) { goto out; + } #endif - if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) - goto out; + if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) { + goto out; + } - if ((error = vn_create(dvp, &vp, &nd, &va, 0, 0, NULL, ctx)) != 0) + if ((error = vn_create(dvp, &vp, &nd, &va, 0, 0, NULL, ctx)) != 0) { goto out; + } if (vp) { - int update_flags = 0; + int update_flags = 0; - // Make sure the name & parent pointers are hooked up - if (vp->v_name == NULL) + // Make sure the name & parent pointers are hooked up + if (vp->v_name == NULL) { update_flags |= VNODE_UPDATE_NAME; - if (vp->v_parent == NULLVP) - update_flags |= VNODE_UPDATE_PARENT; + } + if (vp->v_parent == NULLVP) { + update_flags |= VNODE_UPDATE_PARENT; + } - if (update_flags) - vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags); + if (update_flags) { + vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags); + } #if CONFIG_FSE add_fsevent(FSE_CREATE_FILE, ctx, @@ -4154,11 +4261,12 @@ out: */ nameidone(&nd); - if (vp) - vnode_put(vp); + if (vp) { + vnode_put(vp); + } vnode_put(dvp); - return (error); + return error; } /* @@ -4173,29 +4281,31 @@ out: static int mkfifo1(vfs_context_t ctx, user_addr_t upath, struct vnode_attr *vap) { - vnode_t vp, dvp; + vnode_t vp, dvp; int error; struct nameidata nd; NDINIT(&nd, CREATE, OP_MKFIFO, LOCKPARENT | AUDITVNPATH1, - UIO_USERSPACE, upath, ctx); + UIO_USERSPACE, upath, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } dvp = nd.ni_dvp; vp = nd.ni_vp; - /* check that this is a new file and authorize addition */ - if (vp != NULL) { - error = EEXIST; - goto out; - } - VATTR_SET(vap, va_type, VFIFO); + /* check that this is a new file and authorize addition */ + if (vp != NULL) { + error = EEXIST; + goto out; + } + VATTR_SET(vap, va_type, VFIFO); - if ((error = vn_authorize_create(dvp, &nd.ni_cnd, vap, ctx, NULL)) != 0) + if ((error = vn_authorize_create(dvp, &nd.ni_cnd, vap, ctx, NULL)) != 0) { goto out; + } - error = vn_create(dvp, &vp, &nd, vap, 0, 0, NULL, ctx); + error = vn_create(dvp, &vp, &nd, vap, 0, 0, NULL, ctx); out: /* * nameidone has to happen before we vnode_put(dvp) @@ -4203,8 +4313,9 @@ out: */ nameidone(&nd); - if (vp) - vnode_put(vp); + if (vp) { + vnode_put(vp); + } vnode_put(dvp); return error; @@ -4243,23 +4354,28 @@ mkfifo_extended(proc_t p, struct mkfifo_extended_args *uap, __unused int32_t *re xsecdst = KAUTH_FILESEC_NONE; if (uap->xsecurity != USER_ADDR_NULL) { - if ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) + if ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) { return ciferror; + } } VATTR_INIT(&va); - VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask); - if (uap->uid != KAUTH_UID_NONE) + VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask); + if (uap->uid != KAUTH_UID_NONE) { VATTR_SET(&va, va_uid, uap->uid); - if (uap->gid != KAUTH_GID_NONE) + } + if (uap->gid != KAUTH_GID_NONE) { VATTR_SET(&va, va_gid, uap->gid); - if (xsecdst != KAUTH_FILESEC_NONE) + } + if (xsecdst != KAUTH_FILESEC_NONE) { VATTR_SET(&va, va_acl, &xsecdst->fsec_acl); + } ciferror = mkfifo1(vfs_context_current(), uap->path, &va); - if (xsecdst != KAUTH_FILESEC_NONE) + if (xsecdst != KAUTH_FILESEC_NONE) { kauth_filesec_free(xsecdst); + } return ciferror; } @@ -4269,10 +4385,10 @@ mkfifo(proc_t p, struct mkfifo_args *uap, __unused int32_t *retval) { struct vnode_attr va; - VATTR_INIT(&va); - VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask); + VATTR_INIT(&va); + VATTR_SET(&va, va_mode, (uap->mode & ALLPERMS) & ~p->p_fd->fd_cmask); - return(mkfifo1(vfs_context_current(), uap->path, &va)); + return mkfifo1(vfs_context_current(), uap->path, &va); } @@ -4282,10 +4398,12 @@ my_strrchr(char *p, int ch) char *save; for (save = NULL;; ++p) { - if (*p == ch) + if (*p == ch) { save = p; - if (!*p) - return(save); + } + if (!*p) { + return save; + } } /* NOTREACHED */ } @@ -4301,8 +4419,8 @@ safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, int *trunc ret = vn_getpath(dvp, path, &len); if (ret == 0 && len < (MAXPATHLEN - 1)) { if (leafname) { - path[len-1] = '/'; - len += strlcpy(&path[len], leafname, MAXPATHLEN-len) + 1; + path[len - 1] = '/'; + len += strlcpy(&path[len], leafname, MAXPATHLEN - len) + 1; if (len > MAXPATHLEN) { char *ptr; @@ -4318,11 +4436,11 @@ safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, int *trunc } else if (ret == 0) { *truncated_path = 1; } else if (ret != 0) { - struct vnode *mydvp=dvp; + struct vnode *mydvp = dvp; if (ret != ENOSPC) { printf("safe_getpath: failed to get the path for vp %p (%s) : err %d\n", - dvp, dvp->v_name ? dvp->v_name : "no-name", ret); + dvp, dvp->v_name ? dvp->v_name : "no-name", ret); } *truncated_path = 1; @@ -4368,7 +4486,7 @@ static int linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, user_addr_t link, int flag, enum uio_seg segflg) { - vnode_t vp, dvp, lvp; + vnode_t vp, dvp, lvp; struct nameidata nd; int follow; int error; @@ -4377,7 +4495,7 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, #endif int need_event, has_listeners, need_kpath2; char *target_path = NULL; - int truncated=0; + int truncated = 0; vp = dvp = lvp = NULLVP; @@ -4387,8 +4505,9 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, segflg, path, ctx); error = nameiat(&nd, fd1); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; nameidone(&nd); @@ -4426,39 +4545,44 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, nd.ni_cnd.cn_flags = LOCKPARENT | AUDITVNPATH2 | CN_NBMOUNTLOOK; nd.ni_dirp = link; error = nameiat(&nd, fd2); - if (error != 0) + if (error != 0) { goto out; + } dvp = nd.ni_dvp; lvp = nd.ni_vp; #if CONFIG_MACF - if ((error = mac_vnode_check_link(ctx, dvp, vp, &nd.ni_cnd)) != 0) + if ((error = mac_vnode_check_link(ctx, dvp, vp, &nd.ni_cnd)) != 0) { goto out2; + } #endif - /* or to anything that kauth doesn't want us to (eg. immutable items) */ - if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_LINKTARGET, ctx)) != 0) - goto out2; + /* or to anything that kauth doesn't want us to (eg. immutable items) */ + if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_LINKTARGET, ctx)) != 0) { + goto out2; + } /* target node must not exist */ if (lvp != NULLVP) { error = EEXIST; goto out2; } - /* cannot link across mountpoints */ - if (vnode_mount(vp) != vnode_mount(dvp)) { - error = EXDEV; - goto out2; - } + /* cannot link across mountpoints */ + if (vnode_mount(vp) != vnode_mount(dvp)) { + error = EXDEV; + goto out2; + } - /* authorize creation of the target note */ - if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) - goto out2; + /* authorize creation of the target note */ + if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) { + goto out2; + } /* and finally make the link */ error = VNOP_LINK(vp, dvp, &nd.ni_cnd, ctx); - if (error) + if (error) { goto out2; + } #if CONFIG_MACF (void)mac_vnode_notify_link(ctx, vp, dvp, &nd.ni_cnd); @@ -4494,7 +4618,7 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, AUDIT_ARG(kpath, target_path, ARG_KPATH2); if (has_listeners) { - /* build the path to file we are linking to */ + /* build the path to file we are linking to */ GET_PATH(link_to_path); if (link_to_path == NULL) { error = ENOMEM; @@ -4508,8 +4632,8 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, * Ignore result of kauth_authorize_fileop call. */ kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_LINK, - (uintptr_t)link_to_path, - (uintptr_t)target_path); + (uintptr_t)link_to_path, + (uintptr_t)target_path); } if (link_to_path != NULL) { RELEASE_PATH(link_to_path); @@ -4517,22 +4641,22 @@ linkat_internal(vfs_context_t ctx, int fd1, user_addr_t path, int fd2, } #if CONFIG_FSE if (need_event) { - /* construct fsevent */ - if (get_fse_info(vp, &finfo, ctx) == 0) { + /* construct fsevent */ + if (get_fse_info(vp, &finfo, ctx) == 0) { if (truncated) { finfo.mode |= FSE_TRUNCATED_PATH; } - // build the path to the destination of the link - add_fsevent(FSE_CREATE_FILE, ctx, - FSE_ARG_STRING, len, target_path, - FSE_ARG_FINFO, &finfo, - FSE_ARG_DONE); + // build the path to the destination of the link + add_fsevent(FSE_CREATE_FILE, ctx, + FSE_ARG_STRING, len, target_path, + FSE_ARG_FINFO, &finfo, + FSE_ARG_DONE); } if (vp->v_parent) { - add_fsevent(FSE_STAT_CHANGED, ctx, - FSE_ARG_VNODE, vp->v_parent, - FSE_ARG_DONE); + add_fsevent(FSE_STAT_CHANGED, ctx, + FSE_ARG_VNODE, vp->v_parent, + FSE_ARG_DONE); } } #endif @@ -4547,29 +4671,32 @@ out2: RELEASE_PATH(target_path); } out: - if (lvp) + if (lvp) { vnode_put(lvp); - if (dvp) + } + if (dvp) { vnode_put(dvp); + } vnode_put(vp); - return (error); + return error; } int link(__unused proc_t p, struct link_args *uap, __unused int32_t *retval) { - return (linkat_internal(vfs_context_current(), AT_FDCWD, uap->path, - AT_FDCWD, uap->link, AT_SYMLINK_FOLLOW, UIO_USERSPACE)); + return linkat_internal(vfs_context_current(), AT_FDCWD, uap->path, + AT_FDCWD, uap->link, AT_SYMLINK_FOLLOW, UIO_USERSPACE); } int linkat(__unused proc_t p, struct linkat_args *uap, __unused int32_t *retval) { - if (uap->flag & ~AT_SYMLINK_FOLLOW) - return (EINVAL); + if (uap->flag & ~AT_SYMLINK_FOLLOW) { + return EINVAL; + } - return (linkat_internal(vfs_context_current(), uap->fd1, uap->path, - uap->fd2, uap->link, uap->flag, UIO_USERSPACE)); + return linkat_internal(vfs_context_current(), uap->fd1, uap->path, + uap->fd2, uap->link, uap->flag, UIO_USERSPACE); } /* @@ -4586,8 +4713,8 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, char *path; int error; struct nameidata nd; - vnode_t vp, dvp; - size_t dummy=0; + vnode_t vp, dvp; + size_t dummy = 0; proc_t p; error = 0; @@ -4597,16 +4724,18 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, } else { path = (char *)path_data; } - if (error) + if (error) { goto out; - AUDIT_ARG(text, path); /* This is the link string */ + } + AUDIT_ARG(text, path); /* This is the link string */ NDINIT(&nd, CREATE, OP_SYMLINK, LOCKPARENT | AUDITVNPATH1, - segflg, link, ctx); + segflg, link, ctx); error = nameiat(&nd, fd); - if (error) + if (error) { goto out; + } dvp = nd.ni_dvp; vp = nd.ni_vp; @@ -4617,37 +4746,42 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, #if CONFIG_MACF error = mac_vnode_check_create(ctx, - dvp, &nd.ni_cnd, &va); + dvp, &nd.ni_cnd, &va); #endif if (error != 0) { - goto skipit; + goto skipit; } if (vp != NULL) { - error = EEXIST; - goto skipit; + error = EEXIST; + goto skipit; } /* authorize */ - if (error == 0) + if (error == 0) { error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx); + } /* get default ownership, etc. */ - if (error == 0) + if (error == 0) { error = vnode_authattr_new(dvp, &va, 0, ctx); - if (error == 0) + } + if (error == 0) { error = VNOP_SYMLINK(dvp, &vp, &nd.ni_cnd, &va, path, ctx); + } #if CONFIG_MACF - if (error == 0 && vp) + if (error == 0 && vp) { error = vnode_label(vnode_mount(vp), dvp, vp, &nd.ni_cnd, VNODE_LABEL_CREATE, ctx); + } #endif /* do fallback attribute handling */ - if (error == 0 && vp) + if (error == 0 && vp) { error = vnode_setattr_fallback(vp, &va, ctx); + } if (error == 0) { - int update_flags = 0; + int update_flags = 0; /*check if a new vnode was created, else try to get one*/ if (vp == NULL) { @@ -4659,8 +4793,9 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, error = nameiat(&nd, fd); vp = nd.ni_vp; - if (vp == NULL) + if (vp == NULL) { goto skipit; + } } #if 0 /* XXX - kauth_todo - is KAUTH_FILEOP_SYMLINK needed? */ @@ -4670,7 +4805,7 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, if (kauth_authorize_fileop_has_listeners() && namei(&nd) == 0) { char *new_link_path = NULL; - int len; + int len; /* build the path to the new link file */ new_link_path = get_pathbuff(); @@ -4678,28 +4813,32 @@ symlinkat_internal(vfs_context_t ctx, user_addr_t path_data, int fd, vn_getpath(dvp, new_link_path, &len); if ((len + 1 + nd.ni_cnd.cn_namelen + 1) < MAXPATHLEN) { new_link_path[len - 1] = '/'; - strlcpy(&new_link_path[len], nd.ni_cnd.cn_nameptr, MAXPATHLEN-len); + strlcpy(&new_link_path[len], nd.ni_cnd.cn_nameptr, MAXPATHLEN - len); } kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_SYMLINK, - (uintptr_t)path, (uintptr_t)new_link_path); - if (new_link_path != NULL) + (uintptr_t)path, (uintptr_t)new_link_path); + if (new_link_path != NULL) { release_pathbuff(new_link_path); + } } #endif // Make sure the name & parent pointers are hooked up - if (vp->v_name == NULL) + if (vp->v_name == NULL) { update_flags |= VNODE_UPDATE_NAME; - if (vp->v_parent == NULLVP) + } + if (vp->v_parent == NULLVP) { update_flags |= VNODE_UPDATE_PARENT; + } - if (update_flags) + if (update_flags) { vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags); + } #if CONFIG_FSE add_fsevent(FSE_CREATE_FILE, ctx, - FSE_ARG_VNODE, vp, - FSE_ARG_DONE); + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); #endif } @@ -4710,29 +4849,31 @@ skipit: */ nameidone(&nd); - if (vp) - vnode_put(vp); + if (vp) { + vnode_put(vp); + } vnode_put(dvp); out: - if (path && (path != (char *)path_data)) + if (path && (path != (char *)path_data)) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); + } - return (error); + return error; } int symlink(__unused proc_t p, struct symlink_args *uap, __unused int32_t *retval) { - return (symlinkat_internal(vfs_context_current(), uap->path, AT_FDCWD, - uap->link, UIO_USERSPACE)); + return symlinkat_internal(vfs_context_current(), uap->path, AT_FDCWD, + uap->link, UIO_USERSPACE); } int symlinkat(__unused proc_t p, struct symlinkat_args *uap, __unused int32_t *retval) { - return (symlinkat_internal(vfs_context_current(), uap->path1, uap->fd, - uap->path2, UIO_USERSPACE)); + return symlinkat_internal(vfs_context_current(), uap->path1, uap->fd, + uap->path2, UIO_USERSPACE); } /* @@ -4742,7 +4883,7 @@ symlinkat(__unused proc_t p, struct symlinkat_args *uap, int undelete(__unused proc_t p, __unused struct undelete_args *uap, __unused int32_t *retval) { - return (ENOTSUP); + return ENOTSUP; } /* @@ -4754,11 +4895,11 @@ unlinkat_internal(vfs_context_t ctx, int fd, vnode_t start_dvp, user_addr_t path_arg, enum uio_seg segflg, int unlink_flags) { struct nameidata nd; - vnode_t vp, dvp; + vnode_t vp, dvp; int error; struct componentname *cnp; char *path = NULL; - int len=0; + int len = 0; #if CONFIG_FSE fse_info finfo; struct vnode_attr va; @@ -4774,11 +4915,13 @@ unlinkat_internal(vfs_context_t ctx, int fd, vnode_t start_dvp, int cn_flags; cn_flags = LOCKPARENT; - if (!(unlink_flags & VNODE_REMOVE_NO_AUDIT_PATH)) + if (!(unlink_flags & VNODE_REMOVE_NO_AUDIT_PATH)) { cn_flags |= AUDITVNPATH1; + } /* If a starting dvp is passed, it trumps any fd passed. */ - if (start_dvp) + if (start_dvp) { cn_flags |= USEDVP; + } #if NAMEDRSRCFORK /* unlink or delete is allowed on rsrc forks and named streams */ @@ -4801,8 +4944,9 @@ retry: continue_lookup: error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } dvp = nd.ni_dvp; vp = nd.ni_vp; @@ -4828,10 +4972,10 @@ continue_lookup: } #if DEVELOPMENT || DEBUG - /* - * XXX VSWAP: Check for entitlements or special flag here - * so we can restrict access appropriately. - */ + /* + * XXX VSWAP: Check for entitlements or special flag here + * so we can restrict access appropriately. + */ #else /* DEVELOPMENT || DEBUG */ if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) { @@ -4892,9 +5036,9 @@ continue_lookup: } #if NAMEDRSRCFORK - if (nd.ni_cnd.cn_flags & CN_WANTSRSRCFORK) + if (nd.ni_cnd.cn_flags & CN_WANTSRSRCFORK) { error = vnode_removenamedstream(dvp, vp, XATTR_RESOURCEFORK_NAME, 0, ctx); - else + } else #endif { error = vn_remove(dvp, &nd.ni_vp, &nd, flags, vap, ctx); @@ -4935,20 +5079,20 @@ continue_lookup: if (!error) { if (has_listeners) { kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_DELETE, - (uintptr_t)vp, - (uintptr_t)path); + KAUTH_FILEOP_DELETE, + (uintptr_t)vp, + (uintptr_t)path); } if (vp->v_flag & VISHARDLINK) { - // - // if a hardlink gets deleted we want to blow away the - // v_parent link because the path that got us to this - // instance of the link is no longer valid. this will - // force the next call to get the path to ask the file - // system instead of just following the v_parent link. - // - vnode_update_identity(vp, NULL, NULL, 0, 0, VNODE_UPDATE_PARENT); + // + // if a hardlink gets deleted we want to blow away the + // v_parent link because the path that got us to this + // instance of the link is no longer valid. this will + // force the next call to get the path to ask the file + // system instead of just following the v_parent link. + // + vnode_update_identity(vp, NULL, NULL, 0, 0, VNODE_UPDATE_PARENT); } #if CONFIG_FSE @@ -4962,26 +5106,27 @@ continue_lookup: finfo.mode |= FSE_TRUNCATED_PATH; } add_fsevent(FSE_DELETE, ctx, - FSE_ARG_STRING, len, path, - FSE_ARG_FINFO, &finfo, - FSE_ARG_DONE); + FSE_ARG_STRING, len, path, + FSE_ARG_FINFO, &finfo, + FSE_ARG_DONE); } #endif } out: - if (path != NULL) + if (path != NULL) { RELEASE_PATH(path); + } #if NAMEDRSRCFORK /* recycle the deleted rsrc fork vnode to force a reclaim, which * will cause its shadow file to go away if necessary. */ - if (vp && (vnode_isnamedstream(vp)) && - (vp->v_parent != NULLVP) && - vnode_isshadow(vp)) { - vnode_recycle(vp); - } + if (vp && (vnode_isnamedstream(vp)) && + (vp->v_parent != NULLVP) && + vnode_isshadow(vp)) { + vnode_recycle(vp); + } #endif /* * nameidone has to happen before we vnode_put(dvp) @@ -4997,15 +5142,15 @@ out: goto retry; } - return (error); + return error; } int unlink1(vfs_context_t ctx, vnode_t start_dvp, user_addr_t path_arg, enum uio_seg segflg, int unlink_flags) { - return (unlinkat_internal(ctx, AT_FDCWD, start_dvp, path_arg, segflg, - unlink_flags)); + return unlinkat_internal(ctx, AT_FDCWD, start_dvp, path_arg, segflg, + unlink_flags); } /* @@ -5014,8 +5159,8 @@ unlink1(vfs_context_t ctx, vnode_t start_dvp, user_addr_t path_arg, int delete(__unused proc_t p, struct delete_args *uap, __unused int32_t *retval) { - return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP, - uap->path, UIO_USERSPACE, VNODE_REMOVE_NODELETEBUSY)); + return unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP, + uap->path, UIO_USERSPACE, VNODE_REMOVE_NODELETEBUSY); } /* @@ -5024,22 +5169,24 @@ delete(__unused proc_t p, struct delete_args *uap, __unused int32_t *retval) int unlink(__unused proc_t p, struct unlink_args *uap, __unused int32_t *retval) { - return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP, - uap->path, UIO_USERSPACE, 0)); + return unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP, + uap->path, UIO_USERSPACE, 0); } int unlinkat(__unused proc_t p, struct unlinkat_args *uap, __unused int32_t *retval) { - if (uap->flag & ~AT_REMOVEDIR) - return (EINVAL); + if (uap->flag & ~AT_REMOVEDIR) { + return EINVAL; + } - if (uap->flag & AT_REMOVEDIR) - return (rmdirat_internal(vfs_context_current(), uap->fd, - uap->path, UIO_USERSPACE)); - else - return (unlinkat_internal(vfs_context_current(), uap->fd, - NULLVP, uap->path, UIO_USERSPACE, 0)); + if (uap->flag & AT_REMOVEDIR) { + return rmdirat_internal(vfs_context_current(), uap->fd, + uap->path, UIO_USERSPACE); + } else { + return unlinkat_internal(vfs_context_current(), uap->fd, + NULLVP, uap->path, UIO_USERSPACE, 0); + } } /* @@ -5054,33 +5201,35 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) off_t offset = uap->offset, file_size; int error; - if ( (error = fp_getfvp(p,uap->fd, &fp, &vp)) ) { - if (error == ENOTSUP) - return (ESPIPE); - return (error); + if ((error = fp_getfvp(p, uap->fd, &fp, &vp))) { + if (error == ENOTSUP) { + return ESPIPE; + } + return error; } if (vnode_isfifo(vp)) { file_drop(uap->fd); - return(ESPIPE); + return ESPIPE; } ctx = vfs_context_current(); #if CONFIG_MACF - if (uap->whence == L_INCR && uap->offset == 0) + if (uap->whence == L_INCR && uap->offset == 0) { error = mac_file_check_get_offset(vfs_context_ucred(ctx), fp->f_fglob); - else + } else { error = mac_file_check_change_offset(vfs_context_ucred(ctx), fp->f_fglob); + } if (error) { file_drop(uap->fd); - return (error); + return error; } #endif - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } switch (uap->whence) { @@ -5088,17 +5237,18 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) offset += fp->f_fglob->fg_offset; break; case L_XTND: - if ((error = vnode_size(vp, &file_size, ctx)) != 0) + if ((error = vnode_size(vp, &file_size, ctx)) != 0) { break; + } offset += file_size; break; case L_SET: break; case SEEK_HOLE: - error = VNOP_IOCTL(vp, FSIOC_FIOSEEKHOLE, (caddr_t)&offset, 0, ctx); + error = VNOP_IOCTL(vp, FSIOC_FIOSEEKHOLE, (caddr_t)&offset, 0, ctx); break; case SEEK_DATA: - error = VNOP_IOCTL(vp, FSIOC_FIOSEEKDATA, (caddr_t)&offset, 0, ctx); + error = VNOP_IOCTL(vp, FSIOC_FIOSEEKDATA, (caddr_t)&offset, 0, ctx); break; default: error = EINVAL; @@ -5131,7 +5281,7 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) post_event_if_success(vp, error, NOTE_NONE); (void)vnode_put(vp); file_drop(uap->fd); - return (error); + return error; } @@ -5144,18 +5294,19 @@ lseek(proc_t p, struct lseek_args *uap, off_t *retval) static int access1(vnode_t vp, vnode_t dvp, int uflags, vfs_context_t ctx) { - kauth_action_t action; + kauth_action_t action; int error; - /* - * If just the regular access bits, convert them to something + /* + * If just the regular access bits, convert them to something * that vnode_authorize will understand. - */ - if (!(uflags & _ACCESS_EXTENDED_MASK)) { - action = 0; - if (uflags & R_OK) - action |= KAUTH_VNODE_READ_DATA; /* aka KAUTH_VNODE_LIST_DIRECTORY */ - if (uflags & W_OK) { + */ + if (!(uflags & _ACCESS_EXTENDED_MASK)) { + action = 0; + if (uflags & R_OK) { + action |= KAUTH_VNODE_READ_DATA; /* aka KAUTH_VNODE_LIST_DIRECTORY */ + } + if (uflags & W_OK) { if (vnode_isdir(vp)) { action |= KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY; @@ -5164,32 +5315,33 @@ access1(vnode_t vp, vnode_t dvp, int uflags, vfs_context_t ctx) action |= KAUTH_VNODE_WRITE_DATA; } } - if (uflags & X_OK) { + if (uflags & X_OK) { if (vnode_isdir(vp)) { action |= KAUTH_VNODE_SEARCH; } else { action |= KAUTH_VNODE_EXECUTE; } } - } else { + } else { /* take advantage of definition of uflags */ action = uflags >> 8; } #if CONFIG_MACF error = mac_vnode_check_access(ctx, vp, uflags); - if (error) - return (error); + if (error) { + return error; + } #endif /* MAC */ - /* action == 0 means only check for existence */ - if (action != 0) { - error = vnode_authorize(vp, dvp, action | KAUTH_VNODE_ACCESS, ctx); + /* action == 0 means only check for existence */ + if (action != 0) { + error = vnode_authorize(vp, dvp, action | KAUTH_VNODE_ACCESS, ctx); } else { error = 0; } - return(error); + return error; } @@ -5198,9 +5350,9 @@ access1(vnode_t vp, vnode_t dvp, int uflags, vfs_context_t ctx) * access_extended: Check access permissions in bulk. * * Description: uap->entries Pointer to an array of accessx - * descriptor structs, plus one or - * more NULL terminated strings (see - * "Notes" section below). + * descriptor structs, plus one or + * more NULL terminated strings (see + * "Notes" section below). * uap->size Size of the area pointed to by * uap->entries. * uap->results Pointer to the results array. @@ -5253,7 +5405,7 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in unsigned int desc_max, desc_actual, i, j; struct vfs_context context; struct nameidata nd; - int niopts; + int niopts; vnode_t vp = NULL; vnode_t dvp = NULL; #define ACCESSX_MAX_DESCR_ON_STACK 10 @@ -5271,22 +5423,25 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in * descriptor and a one yte NUL terminated string. * o The allocation of local storage must not fail. */ - if (uap->size > ACCESSX_MAX_TABLESIZE) - return(ENOMEM); - if (uap->size < (sizeof(struct accessx_descriptor) + 2)) - return(EINVAL); - if (uap->size <= sizeof (stack_input)) { + if (uap->size > ACCESSX_MAX_TABLESIZE) { + return ENOMEM; + } + if (uap->size < (sizeof(struct accessx_descriptor) + 2)) { + return EINVAL; + } + if (uap->size <= sizeof(stack_input)) { input = stack_input; } else { - MALLOC(input, struct accessx_descriptor *, uap->size, M_TEMP, M_WAITOK); - if (input == NULL) { - error = ENOMEM; - goto out; - } + MALLOC(input, struct accessx_descriptor *, uap->size, M_TEMP, M_WAITOK); + if (input == NULL) { + error = ENOMEM; + goto out; + } } error = copyin(uap->entries, input, uap->size); - if (error) + if (error) { goto out; + } AUDIT_ARG(opaque, input, uap->size); @@ -5299,9 +5454,9 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in /* * Access is defined as checking against the process' real identity, - * even if operations are checking the effective identity. This + * even if operations are checking the effective identity. This * requires that we use a local vfs context. - */ + */ context.vc_ucred = kauth_cred_copy_real(kauth_cred_get()); context.vc_thread = current_thread(); @@ -5359,8 +5514,9 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in * the string table following the last descriptor to be out * of order relative to the descriptor list. */ - if (j < desc_actual) + if (j < desc_actual) { desc_actual = j; + } } /* @@ -5408,25 +5564,29 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in * rights to delete the object itself. */ wantdelete = input[i].ad_flags & _DELETE_OK; - for (j = i + 1; (j < desc_actual) && (input[j].ad_name_offset == 0); j++) - if (input[j].ad_flags & _DELETE_OK) + for (j = i + 1; (j < desc_actual) && (input[j].ad_name_offset == 0); j++) { + if (input[j].ad_flags & _DELETE_OK) { wantdelete = 1; + } + } niopts = FOLLOW | AUDITVNPATH1; /* need parent for vnode_authorize for deletion test */ - if (wantdelete) + if (wantdelete) { niopts |= WANTPARENT; + } /* do the lookup */ NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, UIO_SYSSPACE, - CAST_USER_ADDR_T(((const char *)input) + input[i].ad_name_offset), - &context); + CAST_USER_ADDR_T(((const char *)input) + input[i].ad_name_offset), + &context); error = namei(&nd); if (!error) { vp = nd.ni_vp; - if (wantdelete) + if (wantdelete) { dvp = nd.ni_dvp; + } } nameidone(&nd); } @@ -5434,7 +5594,7 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in /* * Handle lookup errors. */ - switch(error) { + switch (error) { case ENOENT: case EACCES: case EPERM: @@ -5458,17 +5618,22 @@ access_extended(__unused proc_t p, struct access_extended_args *uap, __unused in error = copyout(result, uap->results, desc_actual * sizeof(errno_t)); out: - if (input && input != stack_input) + if (input && input != stack_input) { FREE(input, M_TEMP); - if (result) + } + if (result) { FREE(result, M_TEMP); - if (vp) + } + if (vp) { vnode_put(vp); - if (dvp) + } + if (dvp) { vnode_put(dvp); - if (IS_VALID_CRED(context.vc_ucred)) - kauth_cred_unref(&context.vc_ucred); - return(error); + } + if (IS_VALID_CRED(context.vc_ucred)) { + kauth_cred_unref(&context.vc_ucred); + } + return error; } @@ -5489,40 +5654,44 @@ faccessat_internal(vfs_context_t ctx, int fd, user_addr_t path, int amode, { int error; struct nameidata nd; - int niopts; + int niopts; struct vfs_context context; #if NAMEDRSRCFORK int is_namedstream = 0; #endif - /* + /* * Unless the AT_EACCESS option is used, Access is defined as checking * against the process' real identity, even if operations are checking * the effective identity. So we need to tweak the credential - * in the context for that case. - */ - if (!(flag & AT_EACCESS)) + * in the context for that case. + */ + if (!(flag & AT_EACCESS)) { context.vc_ucred = kauth_cred_copy_real(kauth_cred_get()); - else + } else { context.vc_ucred = ctx->vc_ucred; + } context.vc_thread = ctx->vc_thread; niopts = FOLLOW | AUDITVNPATH1; - /* need parent for vnode_authorize for deletion test */ - if (amode & _DELETE_OK) - niopts |= WANTPARENT; - NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, segflg, - path, &context); + /* need parent for vnode_authorize for deletion test */ + if (amode & _DELETE_OK) { + niopts |= WANTPARENT; + } + NDINIT(&nd, LOOKUP, OP_ACCESS, niopts, segflg, + path, &context); #if NAMEDRSRCFORK /* access(F_OK) calls are allowed for resource forks. */ - if (amode == F_OK) + if (amode == F_OK) { nd.ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; + } #endif - error = nameiat(&nd, fd); - if (error) - goto out; + error = nameiat(&nd, fd); + if (error) { + goto out; + } #if NAMEDRSRCFORK /* Grab reference on the shadow stream file vnode to @@ -5545,33 +5714,36 @@ faccessat_internal(vfs_context_t ctx, int fd, user_addr_t path, int amode, } #endif - vnode_put(nd.ni_vp); - if (amode & _DELETE_OK) - vnode_put(nd.ni_dvp); - nameidone(&nd); + vnode_put(nd.ni_vp); + if (amode & _DELETE_OK) { + vnode_put(nd.ni_dvp); + } + nameidone(&nd); out: - if (!(flag & AT_EACCESS)) + if (!(flag & AT_EACCESS)) { kauth_cred_unref(&context.vc_ucred); - return (error); + } + return error; } int access(__unused proc_t p, struct access_args *uap, __unused int32_t *retval) { - return (faccessat_internal(vfs_context_current(), AT_FDCWD, - uap->path, uap->flags, 0, UIO_USERSPACE)); + return faccessat_internal(vfs_context_current(), AT_FDCWD, + uap->path, uap->flags, 0, UIO_USERSPACE); } int faccessat(__unused proc_t p, struct faccessat_args *uap, - __unused int32_t *retval) + __unused int32_t *retval) { - if (uap->flag & ~AT_EACCESS) - return (EINVAL); + if (uap->flag & ~AT_EACCESS) { + return EINVAL; + } - return (faccessat_internal(vfs_context_current(), uap->fd, - uap->path, uap->amode, uap->flag, UIO_USERSPACE)); + return faccessat_internal(vfs_context_current(), uap->fd, + uap->path, uap->amode, uap->flag, UIO_USERSPACE); } /* @@ -5614,8 +5786,9 @@ fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub, nd.ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; #endif error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } fsec = KAUTH_FILESEC_NONE; statptr = (void *)&source; @@ -5643,8 +5816,9 @@ fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub, vnode_put(nd.ni_vp); nameidone(&nd); - if (error) - return (error); + if (error) { + return error; + } /* Zap spare fields */ if (isstat64 != 0) { source.sb64.st_lspare = 0; @@ -5686,12 +5860,12 @@ fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub, source.sb.st_nlink = 1; } } - if ((error = copyout(sbp, ub, my_size)) != 0) + if ((error = copyout(sbp, ub, my_size)) != 0) { goto out; + } /* caller wants extended security information? */ if (xsecurity != USER_ADDR_NULL) { - /* did we get any? */ if (fsec == KAUTH_FILESEC_NONE) { if (susize(xsecurity_size, 0) != 0) { @@ -5709,14 +5883,16 @@ fstatat_internal(vfs_context_t ctx, user_addr_t path, user_addr_t ub, } /* if the caller supplied enough room, copy out to it */ - if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) + if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) { error = copyout(fsec, xsecurity, KAUTH_FILESEC_COPYSIZE(fsec)); + } } } out: - if (fsec != KAUTH_FILESEC_NONE) + if (fsec != KAUTH_FILESEC_NONE) { kauth_filesec_free(fsec); - return (error); + } + return error; } /* @@ -5739,9 +5915,9 @@ int stat_extended(__unused proc_t p, struct stat_extended_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD, - 0)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD, + 0); } /* @@ -5751,15 +5927,15 @@ stat_extended(__unused proc_t p, struct stat_extended_args *uap, int stat(__unused proc_t p, struct stat_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - 0, 0, 0, UIO_USERSPACE, AT_FDCWD, 0)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + 0, 0, 0, UIO_USERSPACE, AT_FDCWD, 0); } int stat64(__unused proc_t p, struct stat64_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - 0, 0, 1, UIO_USERSPACE, AT_FDCWD, 0)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + 0, 0, 1, UIO_USERSPACE, AT_FDCWD, 0); } /* @@ -5781,9 +5957,9 @@ stat64(__unused proc_t p, struct stat64_args *uap, __unused int32_t *retval) int stat64_extended(__unused proc_t p, struct stat64_extended_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD, - 0)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD, + 0); } /* @@ -5805,9 +5981,9 @@ stat64_extended(__unused proc_t p, struct stat64_extended_args *uap, __unused in int lstat_extended(__unused proc_t p, struct lstat_extended_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD, - AT_SYMLINK_NOFOLLOW)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + uap->xsecurity, uap->xsecurity_size, 0, UIO_USERSPACE, AT_FDCWD, + AT_SYMLINK_NOFOLLOW); } /* @@ -5816,15 +5992,15 @@ lstat_extended(__unused proc_t p, struct lstat_extended_args *uap, __unused int3 int lstat(__unused proc_t p, struct lstat_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - 0, 0, 0, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + 0, 0, 0, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW); } int lstat64(__unused proc_t p, struct lstat64_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - 0, 0, 1, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + 0, 0, 1, UIO_USERSPACE, AT_FDCWD, AT_SYMLINK_NOFOLLOW); } /* @@ -5847,30 +6023,32 @@ lstat64(__unused proc_t p, struct lstat64_args *uap, __unused int32_t *retval) int lstat64_extended(__unused proc_t p, struct lstat64_extended_args *uap, __unused int32_t *retval) { - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD, - AT_SYMLINK_NOFOLLOW)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + uap->xsecurity, uap->xsecurity_size, 1, UIO_USERSPACE, AT_FDCWD, + AT_SYMLINK_NOFOLLOW); } int fstatat(__unused proc_t p, struct fstatat_args *uap, __unused int32_t *retval) { - if (uap->flag & ~AT_SYMLINK_NOFOLLOW) - return (EINVAL); + if (uap->flag & ~AT_SYMLINK_NOFOLLOW) { + return EINVAL; + } - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - 0, 0, 0, UIO_USERSPACE, uap->fd, uap->flag)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + 0, 0, 0, UIO_USERSPACE, uap->fd, uap->flag); } int fstatat64(__unused proc_t p, struct fstatat64_args *uap, __unused int32_t *retval) { - if (uap->flag & ~AT_SYMLINK_NOFOLLOW) - return (EINVAL); + if (uap->flag & ~AT_SYMLINK_NOFOLLOW) { + return EINVAL; + } - return (fstatat_internal(vfs_context_current(), uap->path, uap->ub, - 0, 0, 1, UIO_USERSPACE, uap->fd, uap->flag)); + return fstatat_internal(vfs_context_current(), uap->path, uap->ub, + 0, 0, 1, UIO_USERSPACE, uap->fd, uap->flag); } /* @@ -5897,16 +6075,17 @@ pathconf(__unused proc_t p, struct pathconf_args *uap, int32_t *retval) vfs_context_t ctx = vfs_context_current(); NDINIT(&nd, LOOKUP, OP_PATHCONF, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } error = vn_pathconf(nd.ni_vp, uap->name, retval, ctx); vnode_put(nd.ni_vp); nameidone(&nd); - return (error); + return error; } /* @@ -5922,20 +6101,21 @@ readlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path, uio_t auio; int error; struct nameidata nd; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; NDINIT(&nd, LOOKUP, OP_READLINK, NOFOLLOW | AUDITVNPATH1, seg, path, ctx); error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; nameidone(&nd); auio = uio_createwithbuffer(1, 0, bufseg, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, buf, bufsize); if (vp->v_type != VLNK) { error = EINVAL; @@ -5943,16 +6123,18 @@ readlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path, #if CONFIG_MACF error = mac_vnode_check_readlink(ctx, vp); #endif - if (error == 0) + if (error == 0) { error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, - ctx); - if (error == 0) + ctx); + } + if (error == 0) { error = VNOP_READLINK(vp, auio, ctx); + } } vnode_put(vp); *retval = bufsize - (int)uio_resid(auio); - return (error); + return error; } int @@ -5961,9 +6143,9 @@ readlink(proc_t p, struct readlink_args *uap, int32_t *retval) enum uio_seg procseg; procseg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; - return (readlinkat_internal(vfs_context_current(), AT_FDCWD, - CAST_USER_ADDR_T(uap->path), procseg, CAST_USER_ADDR_T(uap->buf), - uap->count, procseg, retval)); + return readlinkat_internal(vfs_context_current(), AT_FDCWD, + CAST_USER_ADDR_T(uap->path), procseg, CAST_USER_ADDR_T(uap->buf), + uap->count, procseg, retval); } int @@ -5972,8 +6154,8 @@ readlinkat(proc_t p, struct readlinkat_args *uap, int32_t *retval) enum uio_seg procseg; procseg = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; - return (readlinkat_internal(vfs_context_current(), uap->fd, uap->path, - procseg, uap->buf, uap->bufsize, procseg, retval)); + return readlinkat_internal(vfs_context_current(), uap->fd, uap->path, + procseg, uap->buf, uap->bufsize, procseg, retval); } /* @@ -5985,7 +6167,7 @@ static int chflags1(vnode_t vp, int flags, vfs_context_t ctx) { struct vnode_attr va; - kauth_action_t action; + kauth_action_t action; int error; VATTR_INIT(&va); @@ -5993,25 +6175,29 @@ chflags1(vnode_t vp, int flags, vfs_context_t ctx) #if CONFIG_MACF error = mac_vnode_check_setflags(ctx, vp, flags); - if (error) + if (error) { goto out; + } #endif /* request authorisation, disregard immutability */ - if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) + if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) { goto out; + } /* * Request that the auth layer disregard those file flags it's allowed to when * authorizing this operation; we need to do this in order to be able to * clear immutable flags. */ - if (action && ((error = vnode_authorize(vp, NULL, action | KAUTH_VNODE_NOIMMUTABLE, ctx)) != 0)) + if (action && ((error = vnode_authorize(vp, NULL, action | KAUTH_VNODE_NOIMMUTABLE, ctx)) != 0)) { goto out; + } error = vnode_setattr(vp, &va, ctx); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_setflags(ctx, vp, flags); + } #endif if ((error == 0) && !VATTR_IS_SUPPORTED(&va, va_flags)) { @@ -6019,7 +6205,7 @@ chflags1(vnode_t vp, int flags, vfs_context_t ctx) } out: vnode_put(vp); - return(error); + return error; } /* @@ -6036,17 +6222,18 @@ chflags(__unused proc_t p, struct chflags_args *uap, __unused int32_t *retval) AUDIT_ARG(fflags, uap->flags); NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; nameidone(&nd); /* we don't vnode_put() here because chflags1 does internally */ error = chflags1(vp, uap->flags, ctx); - return(error); + return error; } /* @@ -6061,12 +6248,13 @@ fchflags(__unused proc_t p, struct fchflags_args *uap, __unused int32_t *retval) AUDIT_ARG(fd, uap->fd); AUDIT_ARG(fflags, uap->flags); - if ( (error = file_vnode(uap->fd, &vp)) ) - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; + } if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -6075,7 +6263,7 @@ fchflags(__unused proc_t p, struct fchflags_args *uap, __unused int32_t *retval) error = chflags1(vp, uap->flags, vfs_context_current()); file_drop(uap->fd); - return (error); + return error; } /* @@ -6102,52 +6290,60 @@ chmod_vnode(vfs_context_t ctx, vnode_t vp, struct vnode_attr *vap) #if NAMEDSTREAMS /* chmod calls are not allowed for resource forks. */ if (vp->v_flag & VISNAMEDSTREAM) { - return (EPERM); + return EPERM; } #endif #if CONFIG_MACF if (VATTR_IS_ACTIVE(vap, va_mode) && - (error = mac_vnode_check_setmode(ctx, vp, (mode_t)vap->va_mode)) != 0) - return (error); + (error = mac_vnode_check_setmode(ctx, vp, (mode_t)vap->va_mode)) != 0) { + return error; + } if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) { if ((error = mac_vnode_check_setowner(ctx, vp, VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1, - VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1))) - return (error); + VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1))) { + return error; + } } if (VATTR_IS_ACTIVE(vap, va_acl) && - (error = mac_vnode_check_setacl(ctx, vp, vap->va_acl))) - return (error); + (error = mac_vnode_check_setacl(ctx, vp, vap->va_acl))) { + return error; + } #endif - /* make sure that the caller is allowed to set this security information */ + /* make sure that the caller is allowed to set this security information */ if (((error = vnode_authattr(vp, vap, &action, ctx)) != 0) || ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) { - if (error == EACCES) + if (error == EACCES) { error = EPERM; - return(error); + } + return error; + } + + if ((error = vnode_setattr(vp, vap, ctx)) != 0) { + return error; } - if ((error = vnode_setattr(vp, vap, ctx)) != 0) - return (error); - #if CONFIG_MACF - if (VATTR_IS_ACTIVE(vap, va_mode)) + if (VATTR_IS_ACTIVE(vap, va_mode)) { mac_vnode_notify_setmode(ctx, vp, (mode_t)vap->va_mode); + } - if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) + if (VATTR_IS_ACTIVE(vap, va_uid) || VATTR_IS_ACTIVE(vap, va_gid)) { mac_vnode_notify_setowner(ctx, vp, - VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1, - VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1); + VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : -1, + VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : -1); + } - if (VATTR_IS_ACTIVE(vap, va_acl)) + if (VATTR_IS_ACTIVE(vap, va_acl)) { mac_vnode_notify_setacl(ctx, vp, vap->va_acl); + } #endif - return (error); + return error; } @@ -6168,12 +6364,13 @@ chmodat(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap, follow = (flag & AT_SYMLINK_NOFOLLOW) ? NOFOLLOW : FOLLOW; NDINIT(&nd, LOOKUP, OP_SETATTR, follow | AUDITVNPATH1, segflg, path, ctx); - if ((error = nameiat(&nd, fd))) - return (error); + if ((error = nameiat(&nd, fd))) { + return error; + } error = chmod_vnode(ctx, nd.ni_vp, vap); vnode_put(nd.ni_vp); nameidone(&nd); - return(error); + return error; } /* @@ -6208,25 +6405,29 @@ chmod_extended(__unused proc_t p, struct chmod_extended_args *uap, __unused int3 AUDIT_ARG(owner, uap->uid, uap->gid); VATTR_INIT(&va); - if (uap->mode != -1) + if (uap->mode != -1) { VATTR_SET(&va, va_mode, uap->mode & ALLPERMS); - if (uap->uid != KAUTH_UID_NONE) + } + if (uap->uid != KAUTH_UID_NONE) { VATTR_SET(&va, va_uid, uap->uid); - if (uap->gid != KAUTH_GID_NONE) + } + if (uap->gid != KAUTH_GID_NONE) { VATTR_SET(&va, va_gid, uap->gid); + } xsecdst = NULL; - switch(uap->xsecurity) { - /* explicit remove request */ - case CAST_USER_ADDR_T((void *)1): /* _FILESEC_REMOVE_ACL */ + switch (uap->xsecurity) { + /* explicit remove request */ + case CAST_USER_ADDR_T((void *)1): /* _FILESEC_REMOVE_ACL */ VATTR_SET(&va, va_acl, NULL); break; - /* not being set */ + /* not being set */ case USER_ADDR_NULL: break; default: - if ((error = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) - return(error); + if ((error = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) { + return error; + } VATTR_SET(&va, va_acl, &xsecdst->fsec_acl); KAUTH_DEBUG("CHMOD - setting ACL with %d entries", va.va_acl->acl_entrycount); } @@ -6234,9 +6435,10 @@ chmod_extended(__unused proc_t p, struct chmod_extended_args *uap, __unused int3 error = chmodat(vfs_context_current(), uap->path, &va, AT_FDCWD, 0, UIO_USERSPACE); - if (xsecdst != NULL) + if (xsecdst != NULL) { kauth_filesec_free(xsecdst); - return(error); + } + return error; } /* @@ -6252,24 +6454,25 @@ fchmodat_internal(vfs_context_t ctx, user_addr_t path, int mode, int fd, VATTR_INIT(&va); VATTR_SET(&va, va_mode, mode & ALLPERMS); - return (chmodat(ctx, path, &va, fd, flag, segflg)); + return chmodat(ctx, path, &va, fd, flag, segflg); } int chmod(__unused proc_t p, struct chmod_args *uap, __unused int32_t *retval) { - return (fchmodat_internal(vfs_context_current(), uap->path, uap->mode, - AT_FDCWD, 0, UIO_USERSPACE)); + return fchmodat_internal(vfs_context_current(), uap->path, uap->mode, + AT_FDCWD, 0, UIO_USERSPACE); } int fchmodat(__unused proc_t p, struct fchmodat_args *uap, __unused int32_t *retval) { - if (uap->flag & ~AT_SYMLINK_NOFOLLOW) - return (EINVAL); + if (uap->flag & ~AT_SYMLINK_NOFOLLOW) { + return EINVAL; + } - return (fchmodat_internal(vfs_context_current(), uap->path, uap->mode, - uap->fd, uap->flag, UIO_USERSPACE)); + return fchmodat_internal(vfs_context_current(), uap->path, uap->mode, + uap->fd, uap->flag, UIO_USERSPACE); } /* @@ -6283,11 +6486,12 @@ fchmod1(__unused proc_t p, int fd, struct vnode_attr *vap) AUDIT_ARG(fd, fd); - if ((error = file_vnode(fd, &vp)) != 0) - return (error); + if ((error = file_vnode(fd, &vp)) != 0) { + return error; + } if ((error = vnode_getwithref(vp)) != 0) { file_drop(fd); - return(error); + return error; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -6295,7 +6499,7 @@ fchmod1(__unused proc_t p, int fd, struct vnode_attr *vap) (void)vnode_put(vp); file_drop(fd); - return (error); + return error; } /* @@ -6326,42 +6530,47 @@ fchmod_extended(proc_t p, struct fchmod_extended_args *uap, __unused int32_t *re AUDIT_ARG(owner, uap->uid, uap->gid); VATTR_INIT(&va); - if (uap->mode != -1) + if (uap->mode != -1) { VATTR_SET(&va, va_mode, uap->mode & ALLPERMS); - if (uap->uid != KAUTH_UID_NONE) + } + if (uap->uid != KAUTH_UID_NONE) { VATTR_SET(&va, va_uid, uap->uid); - if (uap->gid != KAUTH_GID_NONE) + } + if (uap->gid != KAUTH_GID_NONE) { VATTR_SET(&va, va_gid, uap->gid); + } xsecdst = NULL; - switch(uap->xsecurity) { + switch (uap->xsecurity) { case USER_ADDR_NULL: VATTR_SET(&va, va_acl, NULL); break; - case CAST_USER_ADDR_T((void *)1): /* _FILESEC_REMOVE_ACL */ + case CAST_USER_ADDR_T((void *)1): /* _FILESEC_REMOVE_ACL */ VATTR_SET(&va, va_acl, NULL); break; - /* not being set */ + /* not being set */ case CAST_USER_ADDR_T(-1): break; default: - if ((error = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) - return(error); + if ((error = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) { + return error; + } VATTR_SET(&va, va_acl, &xsecdst->fsec_acl); } error = fchmod1(p, uap->fd, &va); - switch(uap->xsecurity) { + switch (uap->xsecurity) { case USER_ADDR_NULL: case CAST_USER_ADDR_T(-1): break; default: - if (xsecdst != NULL) + if (xsecdst != NULL) { kauth_filesec_free(xsecdst); + } } - return(error); + return error; } int @@ -6372,7 +6581,7 @@ fchmod(proc_t p, struct fchmod_args *uap, __unused int32_t *retval) VATTR_INIT(&va); VATTR_SET(&va, va_mode, uap->mode & ALLPERMS); - return(fchmod1(p, uap->fd, &va)); + return fchmod1(p, uap->fd, &va); } @@ -6382,7 +6591,7 @@ fchmod(proc_t p, struct fchmod_args *uap, __unused int32_t *retval) /* ARGSUSED */ static int fchownat_internal(vfs_context_t ctx, int fd, user_addr_t path, uid_t uid, - gid_t gid, int flag, enum uio_seg segflg) + gid_t gid, int flag, enum uio_seg segflg) { vnode_t vp; struct vnode_attr va; @@ -6397,34 +6606,41 @@ fchownat_internal(vfs_context_t ctx, int fd, user_addr_t path, uid_t uid, NDINIT(&nd, LOOKUP, OP_SETATTR, follow | AUDITVNPATH1, segflg, path, ctx); error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; nameidone(&nd); VATTR_INIT(&va); - if (uid != (uid_t)VNOVAL) + if (uid != (uid_t)VNOVAL) { VATTR_SET(&va, va_uid, uid); - if (gid != (gid_t)VNOVAL) + } + if (gid != (gid_t)VNOVAL) { VATTR_SET(&va, va_gid, gid); + } #if CONFIG_MACF error = mac_vnode_check_setowner(ctx, vp, uid, gid); - if (error) + if (error) { goto out; + } #endif /* preflight and authorize attribute changes */ - if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) + if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) { goto out; - if (action && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) + } + if (action && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) { goto out; + } error = vnode_setattr(vp, &va, ctx); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_setowner(ctx, vp, uid, gid); + } #endif out: @@ -6432,35 +6648,37 @@ out: * EACCES is only allowed from namei(); permissions failure should * return EPERM, so we need to translate the error code. */ - if (error == EACCES) + if (error == EACCES) { error = EPERM; + } vnode_put(vp); - return (error); + return error; } int chown(__unused proc_t p, struct chown_args *uap, __unused int32_t *retval) { - return (fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path, - uap->uid, uap->gid, 0, UIO_USERSPACE)); + return fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path, + uap->uid, uap->gid, 0, UIO_USERSPACE); } int lchown(__unused proc_t p, struct lchown_args *uap, __unused int32_t *retval) { - return (fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path, - uap->owner, uap->group, AT_SYMLINK_NOFOLLOW, UIO_USERSPACE)); + return fchownat_internal(vfs_context_current(), AT_FDCWD, uap->path, + uap->owner, uap->group, AT_SYMLINK_NOFOLLOW, UIO_USERSPACE); } int fchownat(__unused proc_t p, struct fchownat_args *uap, __unused int32_t *retval) { - if (uap->flag & ~AT_SYMLINK_NOFOLLOW) - return (EINVAL); + if (uap->flag & ~AT_SYMLINK_NOFOLLOW) { + return EINVAL; + } - return (fchownat_internal(vfs_context_current(), uap->fd, uap->path, - uap->uid, uap->gid, uap->flag, UIO_USERSPACE)); + return fchownat_internal(vfs_context_current(), uap->fd, uap->path, + uap->uid, uap->gid, uap->flag, UIO_USERSPACE); } /* @@ -6479,20 +6697,23 @@ fchown(__unused proc_t p, struct fchown_args *uap, __unused int32_t *retval) AUDIT_ARG(owner, uap->uid, uap->gid); AUDIT_ARG(fd, uap->fd); - if ( (error = file_vnode(uap->fd, &vp)) ) - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; + } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); VATTR_INIT(&va); - if (uap->uid != VNOVAL) + if (uap->uid != VNOVAL) { VATTR_SET(&va, va_uid, uap->uid); - if (uap->gid != VNOVAL) + } + if (uap->gid != VNOVAL) { VATTR_SET(&va, va_gid, uap->gid); + } #if NAMEDSTREAMS /* chown calls are not allowed for resource forks. */ @@ -6504,29 +6725,33 @@ fchown(__unused proc_t p, struct fchown_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_vnode_check_setowner(ctx, vp, uap->uid, uap->gid); - if (error) + if (error) { goto out; + } #endif - /* preflight and authorize attribute changes */ - if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) + /* preflight and authorize attribute changes */ + if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) { goto out; + } if (action && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) { - if (error == EACCES) + if (error == EACCES) { error = EPERM; + } goto out; } error = vnode_setattr(vp, &va, ctx); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_setowner(ctx, vp, uap->uid, uap->gid); + } #endif out: (void)vnode_put(vp); file_drop(uap->fd); - return (error); + return error; } static int @@ -6544,15 +6769,17 @@ getutimes(user_addr_t usrtvp, struct timespec *tsp) if (IS_64BIT_PROCESS(current_proc())) { struct user64_timeval tv[2]; error = copyin(usrtvp, (void *)tv, sizeof(tv)); - if (error) - return (error); + if (error) { + return error; + } TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]); } else { struct user32_timeval tv[2]; error = copyin(usrtvp, (void *)tv, sizeof(tv)); - if (error) - return (error); + if (error) { + return error; + } TIMEVAL_TO_TIMESPEC(&tv[0], &tsp[0]); TIMEVAL_TO_TIMESPEC(&tv[1], &tsp[1]); } @@ -6562,7 +6789,7 @@ getutimes(user_addr_t usrtvp, struct timespec *tsp) static int setutimes(vfs_context_t ctx, vnode_t vp, const struct timespec *ts, - int nullflag) + int nullflag) { int error; struct vnode_attr va; @@ -6573,8 +6800,9 @@ setutimes(vfs_context_t ctx, vnode_t vp, const struct timespec *ts, VATTR_INIT(&va); VATTR_SET(&va, va_access_time, ts[0]); VATTR_SET(&va, va_modify_time, ts[1]); - if (nullflag) + if (nullflag) { va.va_vaflags |= VA_UTIMES_NULL; + } #if NAMEDSTREAMS /* utimes calls are not allowed for resource forks. */ @@ -6586,26 +6814,30 @@ setutimes(vfs_context_t ctx, vnode_t vp, const struct timespec *ts, #if CONFIG_MACF error = mac_vnode_check_setutimes(ctx, vp, ts[0], ts[1]); - if (error) + if (error) { goto out; + } #endif if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) { - if (!nullflag && error == EACCES) + if (!nullflag && error == EACCES) { error = EPERM; + } goto out; } /* since we may not need to auth anything, check here */ if ((action != 0) && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) { - if (!nullflag && error == EACCES) + if (!nullflag && error == EACCES) { error = EPERM; + } goto out; } error = vnode_setattr(vp, &va, ctx); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_setutimes(ctx, vp, ts[0], ts[1]); + } #endif out: @@ -6630,10 +6862,11 @@ utimes(__unused proc_t p, struct utimes_args *uap, __unused int32_t *retval) * name lookup first because auditing wants the path. */ NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } nameidone(&nd); /* @@ -6641,14 +6874,15 @@ utimes(__unused proc_t p, struct utimes_args *uap, __unused int32_t *retval) * the current time instead. */ usrtvp = uap->tptr; - if ((error = getutimes(usrtvp, ts)) != 0) + if ((error = getutimes(usrtvp, ts)) != 0) { goto out; + } error = setutimes(ctx, nd.ni_vp, ts, usrtvp == USER_ADDR_NULL); out: vnode_put(nd.ni_vp); - return (error); + return error; } /* @@ -6665,19 +6899,21 @@ futimes(__unused proc_t p, struct futimes_args *uap, __unused int32_t *retval) AUDIT_ARG(fd, uap->fd); usrtvp = uap->tptr; - if ((error = getutimes(usrtvp, ts)) != 0) - return (error); - if ((error = file_vnode(uap->fd, &vp)) != 0) - return (error); - if((error = vnode_getwithref(vp))) { + if ((error = getutimes(usrtvp, ts)) != 0) { + return error; + } + if ((error = file_vnode(uap->fd, &vp)) != 0) { + return error; + } + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } error = setutimes(vfs_context_current(), vp, ts, usrtvp == 0); vnode_put(vp); file_drop(uap->fd); - return(error); + return error; } /* @@ -6694,12 +6930,14 @@ truncate(__unused proc_t p, struct truncate_args *uap, __unused int32_t *retval) struct nameidata nd; kauth_action_t action; - if (uap->length < 0) - return(EINVAL); + if (uap->length < 0) { + return EINVAL; + } NDINIT(&nd, LOOKUP, OP_TRUNCATE, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); - if ((error = namei(&nd))) - return (error); + UIO_USERSPACE, uap->path, ctx); + if ((error = namei(&nd))) { + return error; + } vp = nd.ni_vp; nameidone(&nd); @@ -6709,24 +6947,28 @@ truncate(__unused proc_t p, struct truncate_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_vnode_check_truncate(ctx, NOCRED, vp); - if (error) + if (error) { goto out; + } #endif - if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) + if ((error = vnode_authattr(vp, &va, &action, ctx)) != 0) { goto out; - if ((action != 0) && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) + } + if ((action != 0) && ((error = vnode_authorize(vp, NULL, action, ctx)) != 0)) { goto out; + } error = vnode_setattr(vp, &va, ctx); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_truncate(ctx, NOCRED, vp); + } #endif out: vnode_put(vp); - return (error); + return error; } /* @@ -6740,15 +6982,16 @@ ftruncate(proc_t p, struct ftruncate_args *uap, int32_t *retval) struct vnode_attr va; vnode_t vp; struct fileproc *fp; - int error ; + int error; int fd = uap->fd; AUDIT_ARG(fd, uap->fd); - if (uap->length < 0) - return(EINVAL); + if (uap->length < 0) { + return EINVAL; + } - if ( (error = fp_lookup(p,fd,&fp,0)) ) { - return(error); + if ((error = fp_lookup(p, fd, &fp, 0))) { + return error; } switch (FILEGLOB_DTYPE(fp->f_fglob)) { @@ -6789,14 +7032,15 @@ ftruncate(proc_t p, struct ftruncate_args *uap, int32_t *retval) error = vnode_setattr(vp, &va, ctx); #if CONFIG_MACF - if (error == 0) + if (error == 0) { mac_vnode_notify_truncate(ctx, fp->f_fglob->fg_cred, vp); + } #endif (void)vnode_put(vp); out: file_drop(fd); - return (error); + return error; } @@ -6808,7 +7052,7 @@ int fsync(proc_t p, struct fsync_args *uap, __unused int32_t *retval) { __pthread_testcancel(1); - return(fsync_common(p, uap, MNT_WAIT)); + return fsync_common(p, uap, MNT_WAIT); } @@ -6822,7 +7066,7 @@ fsync(proc_t p, struct fsync_args *uap, __unused int32_t *retval) int fsync_nocancel(proc_t p, struct fsync_nocancel_args *uap, __unused int32_t *retval) { - return(fsync_common(p, (struct fsync_args *)uap, MNT_WAIT)); + return fsync_common(p, (struct fsync_args *)uap, MNT_WAIT); } @@ -6834,7 +7078,7 @@ int fdatasync(proc_t p, struct fdatasync_args *uap, __unused int32_t *retval) { __pthread_testcancel(1); - return(fsync_common(p, (struct fsync_args *)uap, MNT_DWAIT)); + return fsync_common(p, (struct fsync_args *)uap, MNT_DWAIT); } @@ -6873,11 +7117,12 @@ fsync_common(proc_t p, struct fsync_args *uap, int flags) AUDIT_ARG(fd, uap->fd); - if ( (error = fp_getfvp(p, uap->fd, &fp, &vp)) ) - return (error); - if ( (error = vnode_getwithref(vp)) ) { + if ((error = fp_getfvp(p, uap->fd, &fp, &vp))) { + return error; + } + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -6897,7 +7142,7 @@ fsync_common(proc_t p, struct fsync_args *uap, int flags) (void)vnode_put(vp); file_drop(uap->fd); - return (error); + return error; } /* @@ -6923,18 +7168,19 @@ copyfile(__unused proc_t p, struct copyfile_args *uap, __unused int32_t *retval) /* Check that the flags are valid. */ if (uap->flags & ~CPF_MASK) { - return(EINVAL); + return EINVAL; } NDINIT(&fromnd, LOOKUP, OP_COPYFILE, AUDITVNPATH1, - UIO_USERSPACE, uap->from, ctx); - if ((error = namei(&fromnd))) - return (error); + UIO_USERSPACE, uap->from, ctx); + if ((error = namei(&fromnd))) { + return error; + } fvp = fromnd.ni_vp; NDINIT(&tond, CREATE, OP_LINK, - LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | AUDITVNPATH2 | CN_NBMOUNTLOOK, - UIO_USERSPACE, uap->to, ctx); + LOCKPARENT | LOCKLEAF | NOCACHE | SAVESTART | AUDITVNPATH2 | CN_NBMOUNTLOOK, + UIO_USERSPACE, uap->to, ctx); if ((error = namei(&tond))) { goto out1; } @@ -6967,8 +7213,9 @@ copyfile(__unused proc_t p, struct copyfile_args *uap, __unused int32_t *retval) * path. We choose to ignore this failure. */ error = vn_authorize_unlink(tdvp, tvp, &tond.ni_cnd, ctx, NULL); - if (error && error != ENOENT) + if (error && error != ENOENT) { goto out; + } error = 0; } @@ -6977,26 +7224,31 @@ copyfile(__unused proc_t p, struct copyfile_args *uap, __unused int32_t *retval) VATTR_SET(&va, va_type, fvp->v_type); /* Mask off all but regular access permissions */ VATTR_SET(&va, va_mode, - ((((uap->mode &~ fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT) & ACCESSPERMS)); + ((((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT) & ACCESSPERMS)); error = mac_vnode_check_create(ctx, tdvp, &tond.ni_cnd, &va); - if (error) + if (error) { goto out; + } #endif /* CONFIG_MACF */ - if ((error = vnode_authorize(tdvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) + if ((error = vnode_authorize(tdvp, NULL, KAUTH_VNODE_ADD_FILE, ctx)) != 0) { goto out; + } - if (fvp == tdvp) + if (fvp == tdvp) { error = EINVAL; + } /* * If source is the same as the destination (that is the * same inode number) then there is nothing to do. * (fixed to have POSIX semantics - CSM 3/2/98) */ - if (fvp == tvp) + if (fvp == tvp) { error = -1; - if (!error) - error = VNOP_COPYFILE(fvp, tdvp, tvp, &tond.ni_cnd, uap->mode, uap->flags, ctx); + } + if (!error) { + error = VNOP_COPYFILE(fvp, tdvp, tvp, &tond.ni_cnd, uap->mode, uap->flags, ctx); + } out: sdvp = tond.ni_startdir; /* @@ -7005,8 +7257,9 @@ out: */ nameidone(&tond); - if (tvp) + if (tvp) { vnode_put(tvp); + } vnode_put(tdvp); vnode_put(sdvp); out1: @@ -7014,9 +7267,10 @@ out1: nameidone(&fromnd); - if (error == -1) - return (0); - return (error); + if (error == -1) { + return 0; + } + return error; } #define CLONE_SNAPSHOT_FALLBACKS_ENABLED 1 @@ -7046,19 +7300,19 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, v_type = vnode_vtype(fvp); switch (v_type) { case VLNK: - /* FALLTHRU */ + /* FALLTHRU */ case VREG: action = KAUTH_VNODE_ADD_FILE; break; case VDIR: if (vnode_isvroot(fvp) || vnode_ismount(fvp) || fvp->v_mountedhere) { - return (EINVAL); + return EINVAL; } action = KAUTH_VNODE_ADD_SUBDIRECTORY; break; default: - return (EINVAL); + return EINVAL; } AUDIT_ARG(fd2, dst_dirfd); @@ -7067,8 +7321,9 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, follow = (flags & CLONE_NOFOLLOW) ? NOFOLLOW : FOLLOW; NDINIT(&tond, CREATE, OP_LINK, follow | WANTPARENT | AUDITVNPATH2, UIO_USERSPACE, dst, ctx); - if ((error = nameiat(&tond, dst_dirfd))) - return (error); + if ((error = nameiat(&tond, dst_dirfd))) { + return error; + } cnp = &tond.ni_cnd; tdvp = tond.ni_dvp; tvp = tond.ni_vp; @@ -7087,17 +7342,21 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, } #if CONFIG_MACF - if ((error = mac_vnode_check_clone(ctx, tdvp, fvp, cnp))) + if ((error = mac_vnode_check_clone(ctx, tdvp, fvp, cnp))) { goto out; + } #endif - if ((error = vnode_authorize(tdvp, NULL, action, ctx))) + if ((error = vnode_authorize(tdvp, NULL, action, ctx))) { goto out; + } action = KAUTH_VNODE_GENERIC_READ_BITS; - if (data_read_authorised) + if (data_read_authorised) { action &= ~KAUTH_VNODE_READ_DATA; - if ((error = vnode_authorize(fvp, NULL, action, ctx))) + } + if ((error = vnode_authorize(fvp, NULL, action, ctx))) { goto out; + } /* * certain attributes may need to be changed from the source, we ask for @@ -7110,8 +7369,9 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, VATTR_WANTED(&va, va_flags); VATTR_WANTED(&va, va_acl); - if ((error = vnode_getattr(fvp, &va, ctx)) != 0) + if ((error = vnode_getattr(fvp, &va, ctx)) != 0) { goto out; + } VATTR_INIT(&nva); VATTR_SET(&nva, va_type, v_type); @@ -7125,8 +7385,9 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, error = vnode_authattr_new(tdvp, &nva, 0, ctx); } else { error = vn_attribute_prepare(tdvp, &nva, &defaulted, ctx); - if (error) + if (error) { goto out; + } attr_cleanup = TRUE; } @@ -7139,16 +7400,19 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, * from source as well. */ if (!(flags & CLONE_NOOWNERCOPY) && vfs_context_issuser(ctx)) { - if (VATTR_IS_SUPPORTED(&va, va_uid)) + if (VATTR_IS_SUPPORTED(&va, va_uid)) { VATTR_SET(&nva, va_uid, va.va_uid); - if (VATTR_IS_SUPPORTED(&va, va_gid)) + } + if (VATTR_IS_SUPPORTED(&va, va_gid)) { VATTR_SET(&nva, va_gid, va.va_gid); + } } else { vnop_flags |= VNODE_CLONEFILE_NOOWNERCOPY; } - if (VATTR_IS_SUPPORTED(&va, va_mode)) + if (VATTR_IS_SUPPORTED(&va, va_mode)) { VATTR_SET(&nva, va_mode, va.va_mode); + } if (VATTR_IS_SUPPORTED(&va, va_flags)) { VATTR_SET(&nva, va_flags, ((va.va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)) | /* Turn off from source */ @@ -7158,7 +7422,7 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, error = VNOP_CLONEFILE(fvp, tdvp, &tvp, cnp, &nva, vnop_flags, ctx); if (!error && tvp) { - int update_flags = 0; + int update_flags = 0; #if CONFIG_FSE int fsevent; #endif /* CONFIG_FSE */ @@ -7171,14 +7435,17 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, * If some of the requested attributes weren't handled by the * VNOP, use our fallback code. */ - if (!VATTR_ALL_SUPPORTED(&va)) + if (!VATTR_ALL_SUPPORTED(&va)) { (void)vnode_setattr_fallback(tvp, &nva, ctx); + } // Make sure the name & parent pointers are hooked up - if (tvp->v_name == NULL) + if (tvp->v_name == NULL) { update_flags |= VNODE_UPDATE_NAME; - if (tvp->v_parent == NULLVP) + } + if (tvp->v_parent == NULLVP) { update_flags |= VNODE_UPDATE_PARENT; + } if (update_flags) { (void)vnode_update_identity(tvp, tdvp, cnp->cn_nameptr, @@ -7188,7 +7455,7 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, #if CONFIG_FSE switch (vnode_vtype(tvp)) { case VLNK: - /* FALLTHRU */ + /* FALLTHRU */ case VREG: fsevent = FSE_CREATE_FILE; break; @@ -7218,15 +7485,18 @@ clonefile_internal(vnode_t fvp, boolean_t data_read_authorised, int dst_dirfd, } out: - if (attr_cleanup) + if (attr_cleanup) { vn_attribute_cleanup(&nva, defaulted); - if (free_src_acl && va.va_acl) + } + if (free_src_acl && va.va_acl) { kauth_acl_free(va.va_acl); + } nameidone(&tond); - if (tvp) + if (tvp) { vnode_put(tvp); + } vnode_put(tdvp); - return (error); + return error; } /* @@ -7244,16 +7514,18 @@ clonefileat(__unused proc_t p, struct clonefileat_args *uap, vfs_context_t ctx = vfs_context_current(); /* Check that the flags are valid. */ - if (uap->flags & ~(CLONE_NOFOLLOW | CLONE_NOOWNERCOPY)) - return (EINVAL); + if (uap->flags & ~(CLONE_NOFOLLOW | CLONE_NOOWNERCOPY)) { + return EINVAL; + } AUDIT_ARG(fd, uap->src_dirfd); follow = (uap->flags & CLONE_NOFOLLOW) ? NOFOLLOW : FOLLOW; NDINIT(&fromnd, LOOKUP, OP_COPYFILE, follow | AUDITVNPATH1, UIO_USERSPACE, uap->src, ctx); - if ((error = nameiat(&fromnd, uap->src_dirfd))) - return (error); + if ((error = nameiat(&fromnd, uap->src_dirfd))) { + return error; + } fvp = fromnd.ni_vp; nameidone(&fromnd); @@ -7262,7 +7534,7 @@ clonefileat(__unused proc_t p, struct clonefileat_args *uap, uap->flags, ctx); vnode_put(fvp); - return (error); + return error; } int @@ -7275,13 +7547,15 @@ fclonefileat(__unused proc_t p, struct fclonefileat_args *uap, vfs_context_t ctx = vfs_context_current(); /* Check that the flags are valid. */ - if (uap->flags & ~(CLONE_NOFOLLOW | CLONE_NOOWNERCOPY)) - return (EINVAL); + if (uap->flags & ~(CLONE_NOFOLLOW | CLONE_NOOWNERCOPY)) { + return EINVAL; + } AUDIT_ARG(fd, uap->src_fd); error = fp_getfvp(p, uap->src_fd, &fp, &fvp); - if (error) - return (error); + if (error) { + return error; + } if ((fp->f_fglob->fg_flag & FREAD) == 0) { AUDIT_ARG(vnpath_withref, fvp, ARG_VNODE1); @@ -7289,8 +7563,9 @@ fclonefileat(__unused proc_t p, struct fclonefileat_args *uap, goto out; } - if ((error = vnode_getwithref(fvp))) + if ((error = vnode_getwithref(fvp))) { goto out; + } AUDIT_ARG(vnpath, fvp, ARG_VNODE1); @@ -7300,7 +7575,7 @@ fclonefileat(__unused proc_t p, struct fclonefileat_args *uap, vnode_put(fvp); out: file_drop(uap->src_fd); - return (error); + return error; } /* @@ -7312,11 +7587,13 @@ static int renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, int tofd, user_addr_t to, int segflg, vfs_rename_flags_t flags) { - if (flags & ~VFS_RENAME_FLAGS_MASK) + if (flags & ~VFS_RENAME_FLAGS_MASK) { return EINVAL; + } - if (ISSET(flags, VFS_RENAME_SWAP) && ISSET(flags, VFS_RENAME_EXCL)) + if (ISSET(flags, VFS_RENAME_SWAP) && ISSET(flags, VFS_RENAME_EXCL)) { return EINVAL; + } vnode_t tvp, tdvp; vnode_t fvp, fdvp; @@ -7330,14 +7607,14 @@ renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, int has_listeners; const char *oname = NULL; char *from_name = NULL, *to_name = NULL; - int from_len=0, to_len=0; + int from_len = 0, to_len = 0; int holding_mntlock; mount_t locked_mp = NULL; vnode_t oparent = NULLVP; #if CONFIG_FSE fse_info from_finfo, to_finfo; #endif - int from_truncated=0, to_truncated; + int from_truncated = 0, to_truncated; int batched = 0; struct vnode_attr *fvap, *tvap; int continuing = 0; @@ -7369,22 +7646,25 @@ retry: continue_lookup: if ((fromnd->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) { - if ( (error = nameiat(fromnd, fromfd)) ) + if ((error = nameiat(fromnd, fromfd))) { goto out1; + } fdvp = fromnd->ni_dvp; fvp = fromnd->ni_vp; - if (fvp && fvp->v_type == VDIR) + if (fvp && fvp->v_type == VDIR) { tond->ni_cnd.cn_flags |= WILLBEDIR; + } } if ((tond->ni_flag & NAMEI_CONTLOOKUP) != 0 || !continuing) { - if ( (error = nameiat(tond, tofd)) ) { + if ((error = nameiat(tond, tofd))) { /* * Translate error code for rename("dir1", "dir2/."). */ - if (error == EISDIR && fvp->v_type == VDIR) + if (error == EISDIR && fvp->v_type == VDIR) { error = EINVAL; + } goto out1; } tdvp = tond->ni_dvp; @@ -7436,7 +7716,7 @@ continue_lookup: } if (tvp) { - get_fse_info(tvp, &to_finfo, ctx); + get_fse_info(tvp, &to_finfo, ctx); } else if (batched) { error = vfs_get_notify_attributes(&__rename_data->tv_attr); if (error) { @@ -7491,7 +7771,7 @@ continue_lookup: * For success, either fvp must be on the same mount as tdvp, or fvp must sit atop a vnode on the same mount as tdvp. * Suppose fdvp and tdvp are not on the same mount. * If fvp is on the same mount as tdvp, then fvp is not on the same mount as fdvp, so fvp is the root of its filesystem. If fvp is the root, - * then you can't move it to within another dir on the same mountpoint. + * then you can't move it to within another dir on the same mountpoint. * If fvp sits atop a vnode on the same mount as fdvp, then that vnode must be part of the same mount as fdvp, which is a contradiction. * * If this check passes, then we are safe to pass these vnodes to the same FS. @@ -7522,13 +7802,13 @@ continue_lookup: } } - /* - * If the source and destination are the same (i.e. they're - * links to the same vnode) and the target file system is - * case sensitive, then there is nothing to do. + /* + * If the source and destination are the same (i.e. they're + * links to the same vnode) and the target file system is + * case sensitive, then there is nothing to do. * * XXX Come back to this. - */ + */ if (fvp == tvp) { int pathconf_val; @@ -7560,17 +7840,17 @@ continue_lookup: */ if ((fvp->v_flag & VROOT) && (fvp->v_type == VDIR) && - (tvp == NULL) && - (fvp->v_mountedhere == NULL) && - (fdvp == tdvp) && - ((fvp->v_mount->mnt_flag & (MNT_UNION | MNT_ROOTFS)) == 0) && + (tvp == NULL) && + (fvp->v_mountedhere == NULL) && + (fdvp == tdvp) && + ((fvp->v_mount->mnt_flag & (MNT_UNION | MNT_ROOTFS)) == 0) && (fvp->v_mount->mnt_vnodecovered != NULLVP)) { vnode_t coveredvp; /* switch fvp to the covered vnode */ coveredvp = fvp->v_mount->mnt_vnodecovered; - if ( (vnode_getwithref(coveredvp)) ) { - error = ENOENT; + if ((vnode_getwithref(coveredvp))) { + error = ENOENT; goto out1; } vnode_put(fvp); @@ -7609,24 +7889,24 @@ continue_lookup: */ if (fvp == tvp && fdvp == tdvp) { if (fromnd->ni_cnd.cn_namelen == tond->ni_cnd.cn_namelen && - !bcmp(fromnd->ni_cnd.cn_nameptr, tond->ni_cnd.cn_nameptr, - fromnd->ni_cnd.cn_namelen)) { + !bcmp(fromnd->ni_cnd.cn_nameptr, tond->ni_cnd.cn_nameptr, + fromnd->ni_cnd.cn_namelen)) { goto out1; } } if (holding_mntlock && fvp->v_mount != locked_mp) { - /* + /* * we're holding a reference and lock * on locked_mp, but it no longer matches * what we want to do... so drop our hold */ mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); - holding_mntlock = 0; + holding_mntlock = 0; } if (tdvp != fdvp && fvp->v_type == VDIR) { - /* + /* * serialize renames that re-shape * the tree... if holding_mntlock is * set, then we're ready to go... @@ -7637,8 +7917,8 @@ continue_lookup: * then finally start the lookup * process over with the lock held */ - if (!holding_mntlock) { - /* + if (!holding_mntlock) { + /* * need to grab a reference on * the mount point before we * drop all the iocounts... once @@ -7654,8 +7934,9 @@ continue_lookup: */ nameidone(tond); - if (tvp) - vnode_put(tvp); + if (tvp) { + vnode_put(tvp); + } vnode_put(tdvp); /* @@ -7673,7 +7954,7 @@ continue_lookup: goto retry; } } else { - /* + /* * when we dropped the iocounts to take * the lock, we allowed the identity of * the various vnodes to change... if they did, @@ -7683,10 +7964,10 @@ continue_lookup: * so we're free to drop the lock at this point * and continue on */ - if (holding_mntlock) { + if (holding_mntlock) { mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); - holding_mntlock = 0; + holding_mntlock = 0; } } @@ -7696,8 +7977,8 @@ continue_lookup: skipped_lookup: error = vn_rename(fdvp, &fvp, &fromnd->ni_cnd, fvap, - tdvp, &tvp, &tond->ni_cnd, tvap, - flags, ctx); + tdvp, &tvp, &tond->ni_cnd, tvap, + flags, ctx); if (holding_mntlock) { /* @@ -7753,12 +8034,12 @@ skipped_lookup: * Ignore result of kauth_authorize_fileop call. */ kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_RENAME, - (uintptr_t)from_name, (uintptr_t)to_name); + KAUTH_FILEOP_RENAME, + (uintptr_t)from_name, (uintptr_t)to_name); if (flags & VFS_RENAME_SWAP) { kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_RENAME, - (uintptr_t)to_name, (uintptr_t)from_name); + KAUTH_FILEOP_RENAME, + (uintptr_t)to_name, (uintptr_t)from_name); } #if CONFIG_FSE @@ -7777,11 +8058,11 @@ skipped_lookup: if (tvp) { add_fsevent(FSE_RENAME, ctx, - FSE_ARG_STRING, from_len, from_name, - FSE_ARG_FINFO, &from_finfo, - FSE_ARG_STRING, to_len, to_name, - FSE_ARG_FINFO, &to_finfo, - FSE_ARG_DONE); + FSE_ARG_STRING, from_len, from_name, + FSE_ARG_FINFO, &from_finfo, + FSE_ARG_STRING, to_len, to_name, + FSE_ARG_FINFO, &to_finfo, + FSE_ARG_DONE); if (flags & VFS_RENAME_SWAP) { /* * Strictly speaking, swap is the equivalent of @@ -7790,18 +8071,18 @@ skipped_lookup: * two. */ add_fsevent(FSE_RENAME, ctx, - FSE_ARG_STRING, to_len, to_name, - FSE_ARG_FINFO, &to_finfo, - FSE_ARG_STRING, from_len, from_name, - FSE_ARG_FINFO, &from_finfo, - FSE_ARG_DONE); - } - } else { - add_fsevent(FSE_RENAME, ctx, + FSE_ARG_STRING, to_len, to_name, + FSE_ARG_FINFO, &to_finfo, FSE_ARG_STRING, from_len, from_name, FSE_ARG_FINFO, &from_finfo, - FSE_ARG_STRING, to_len, to_name, FSE_ARG_DONE); + } + } else { + add_fsevent(FSE_RENAME, ctx, + FSE_ARG_STRING, from_len, from_name, + FSE_ARG_FINFO, &from_finfo, + FSE_ARG_STRING, to_len, to_name, + FSE_ARG_DONE); } } #endif /* CONFIG_FSE */ @@ -7810,7 +8091,7 @@ skipped_lookup: * update filesystem's mount point data */ if (mntrename) { - char *cp, *pathend, *mpname; + char *cp, *pathend, *mpname; char * tobuf; struct mount *mp; int maxlen; @@ -7819,26 +8100,29 @@ skipped_lookup: mp = fvp->v_mountedhere; if (vfs_busy(mp, LK_NOWAIT)) { - error = EBUSY; + error = EBUSY; goto out1; } MALLOC_ZONE(tobuf, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (UIO_SEG_IS_USER_SPACE(segflg)) + if (UIO_SEG_IS_USER_SPACE(segflg)) { error = copyinstr(to, tobuf, MAXPATHLEN, &len); - else + } else { error = copystr((void *)to, tobuf, MAXPATHLEN, &len); + } if (!error) { - /* find current mount point prefix */ - pathend = &mp->mnt_vfsstat.f_mntonname[0]; + /* find current mount point prefix */ + pathend = &mp->mnt_vfsstat.f_mntonname[0]; for (cp = pathend; *cp != '\0'; ++cp) { - if (*cp == '/') - pathend = cp + 1; + if (*cp == '/') { + pathend = cp + 1; + } } /* find last component of target name */ for (mpname = cp = tobuf; *cp != '\0'; ++cp) { - if (*cp == '/') - mpname = cp + 1; + if (*cp == '/') { + mpname = cp + 1; + } } /* append name to prefix */ maxlen = MAXPATHLEN - (pathend - mp->mnt_vfsstat.f_mntonname); @@ -7858,14 +8142,15 @@ skipped_lookup: * XXX oparent and oname may not be set in the compound vnop case */ if (batched || (oname == fvp->v_name && oparent == fvp->v_parent)) { - int update_flags; + int update_flags; - update_flags = VNODE_UPDATE_NAME; + update_flags = VNODE_UPDATE_NAME; - if (fdvp != tdvp) - update_flags |= VNODE_UPDATE_PARENT; + if (fdvp != tdvp) { + update_flags |= VNODE_UPDATE_PARENT; + } - vnode_update_identity(fvp, tdvp, tond->ni_cnd.cn_nameptr, tond->ni_cnd.cn_namelen, tond->ni_cnd.cn_hash, update_flags); + vnode_update_identity(fvp, tdvp, tond->ni_cnd.cn_nameptr, tond->ni_cnd.cn_namelen, tond->ni_cnd.cn_hash, update_flags); } out1: if (to_name != NULL) { @@ -7877,7 +8162,7 @@ out1: from_name = NULL; } if (holding_mntlock) { - mount_unlock_renames(locked_mp); + mount_unlock_renames(locked_mp); mount_drop(locked_mp, 0); holding_mntlock = 0; } @@ -7888,9 +8173,10 @@ out1: */ nameidone(tond); - if (tvp) - vnode_put(tvp); - vnode_put(tdvp); + if (tvp) { + vnode_put(tvp); + } + vnode_put(tdvp); } if (fdvp) { /* @@ -7899,9 +8185,10 @@ out1: */ nameidone(fromnd); - if (fvp) - vnode_put(fvp); - vnode_put(fdvp); + if (fvp) { + vnode_put(fvp); + } + vnode_put(fdvp); } /* @@ -7914,17 +8201,18 @@ out1: } FREE(__rename_data, M_TEMP); - return (error); + return error; } int rename(__unused proc_t p, struct rename_args *uap, __unused int32_t *retval) { - return (renameat_internal(vfs_context_current(), AT_FDCWD, uap->from, - AT_FDCWD, uap->to, UIO_USERSPACE, 0)); + return renameat_internal(vfs_context_current(), AT_FDCWD, uap->from, + AT_FDCWD, uap->to, UIO_USERSPACE, 0); } -int renameatx_np(__unused proc_t p, struct renameatx_np_args *uap, __unused int32_t *retval) +int +renameatx_np(__unused proc_t p, struct renameatx_np_args *uap, __unused int32_t *retval) { return renameat_internal( vfs_context_current(), @@ -7936,8 +8224,8 @@ int renameatx_np(__unused proc_t p, struct renameatx_np_args *uap, __unused int3 int renameat(__unused proc_t p, struct renameat_args *uap, __unused int32_t *retval) { - return (renameat_internal(vfs_context_current(), uap->fromfd, uap->from, - uap->tofd, uap->to, UIO_USERSPACE, 0)); + return renameat_internal(vfs_context_current(), uap->fromfd, uap->from, + uap->tofd, uap->to, UIO_USERSPACE, 0); } /* @@ -7954,7 +8242,7 @@ static int mkdir1at(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap, int fd, enum uio_seg segflg) { - vnode_t vp, dvp; + vnode_t vp, dvp; int error; int update_flags = 0; int batched; @@ -7962,14 +8250,15 @@ mkdir1at(vfs_context_t ctx, user_addr_t path, struct vnode_attr *vap, int fd, AUDIT_ARG(mode, vap->va_mode); NDINIT(&nd, CREATE, OP_MKDIR, LOCKPARENT | AUDITVNPATH1, segflg, - path, ctx); + path, ctx); nd.ni_cnd.cn_flags |= WILLBEDIR; nd.ni_flag = NAMEI_COMPOUNDMKDIR; continue_lookup: error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } dvp = nd.ni_dvp; vp = nd.ni_vp; @@ -8002,7 +8291,7 @@ continue_lookup: * rather than EACCESS if the target exists. */ NDINIT(&nd, LOOKUP, OP_MKDIR, AUDITVNPATH1, segflg, - path, ctx); + path, ctx); error2 = nameiat(&nd, fd); if (error2) { goto out; @@ -8029,13 +8318,16 @@ continue_lookup: } // Make sure the name & parent pointers are hooked up - if (vp->v_name == NULL) - update_flags |= VNODE_UPDATE_NAME; - if (vp->v_parent == NULLVP) - update_flags |= VNODE_UPDATE_PARENT; + if (vp->v_name == NULL) { + update_flags |= VNODE_UPDATE_NAME; + } + if (vp->v_parent == NULLVP) { + update_flags |= VNODE_UPDATE_PARENT; + } - if (update_flags) - vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags); + if (update_flags) { + vnode_update_identity(vp, dvp, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen, nd.ni_cnd.cn_hash, update_flags); + } #if CONFIG_FSE add_fsevent(FSE_CREATE_DIR, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); @@ -8048,12 +8340,14 @@ out: */ nameidone(&nd); - if (vp) + if (vp) { vnode_put(vp); - if (dvp) + } + if (dvp) { vnode_put(dvp); + } - return (error); + return error; } /* @@ -8082,18 +8376,21 @@ mkdir_extended(proc_t p, struct mkdir_extended_args *uap, __unused int32_t *retv xsecdst = NULL; if ((uap->xsecurity != USER_ADDR_NULL) && - ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0)) + ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0)) { return ciferror; + } VATTR_INIT(&va); VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask); - if (xsecdst != NULL) + if (xsecdst != NULL) { VATTR_SET(&va, va_acl, &xsecdst->fsec_acl); + } ciferror = mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD, UIO_USERSPACE); - if (xsecdst != NULL) + if (xsecdst != NULL) { kauth_filesec_free(xsecdst); + } return ciferror; } @@ -8105,8 +8402,8 @@ mkdir(proc_t p, struct mkdir_args *uap, __unused int32_t *retval) VATTR_INIT(&va); VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask); - return (mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD, - UIO_USERSPACE)); + return mkdir1at(vfs_context_current(), uap->path, &va, AT_FDCWD, + UIO_USERSPACE); } int @@ -8117,8 +8414,8 @@ mkdirat(proc_t p, struct mkdirat_args *uap, __unused int32_t *retval) VATTR_INIT(&va); VATTR_SET(&va, va_mode, (uap->mode & ACCESSPERMS) & ~p->p_fd->fd_cmask); - return(mkdir1at(vfs_context_current(), uap->path, &va, uap->fd, - UIO_USERSPACE)); + return mkdir1at(vfs_context_current(), uap->path, &va, uap->fd, + UIO_USERSPACE); } static int @@ -8129,7 +8426,7 @@ rmdirat_internal(vfs_context_t ctx, int fd, user_addr_t dirpath, int error; struct nameidata nd; char *path = NULL; - int len=0; + int len = 0; int has_listeners = 0; int need_event = 0; int truncated = 0; @@ -8156,8 +8453,9 @@ continue_lookup: vap = NULL; error = nameiat(&nd, fd); - if (error) - return (error); + if (error) { + return error; + } dvp = nd.ni_dvp; vp = nd.ni_vp; @@ -8175,9 +8473,9 @@ continue_lookup: #if DEVELOPMENT || DEBUG /* - * XXX VSWAP: Check for entitlements or special flag here - * so we can restrict access appropriately. - */ + * XXX VSWAP: Check for entitlements or special flag here + * so we can restrict access appropriately. + */ #else /* DEVELOPMENT || DEBUG */ if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) { @@ -8292,8 +8590,9 @@ continue_lookup: /* * Assuming everything went well, we will try the RMDIR again */ - if (!error) + if (!error) { error = vn_rmdir(dvp, &vp, &nd, vap, ctx); + } } #endif /* CONFIG_APPLEDOUBLE */ /* @@ -8303,9 +8602,9 @@ continue_lookup: if (!error) { if (has_listeners) { kauth_authorize_fileop(vfs_context_ucred(ctx), - KAUTH_FILEOP_DELETE, - (uintptr_t)vp, - (uintptr_t)path); + KAUTH_FILEOP_DELETE, + (uintptr_t)vp, + (uintptr_t)path); } if (vp->v_flag & VISHARDLINK) { @@ -8320,9 +8619,9 @@ continue_lookup: vnode_get_fse_info_from_vap(vp, &finfo, vap); } add_fsevent(FSE_DELETE, ctx, - FSE_ARG_STRING, len, path, - FSE_ARG_FINFO, &finfo, - FSE_ARG_DONE); + FSE_ARG_STRING, len, path, + FSE_ARG_FINFO, &finfo, + FSE_ARG_DONE); } #endif } @@ -8339,19 +8638,18 @@ out: nameidone(&nd); vnode_put(dvp); - if (vp) + if (vp) { vnode_put(vp); + } if (restart_flag == 0) { wakeup_one((caddr_t)vp); - return (error); + return error; } tsleep(vp, PVFS, "rm AD", 1); - } while (restart_flag != 0); - return (error); - + return error; } /* @@ -8361,8 +8659,8 @@ out: int rmdir(__unused proc_t p, struct rmdir_args *uap, __unused int32_t *retval) { - return (rmdirat_internal(vfs_context_current(), AT_FDCWD, - CAST_USER_ADDR_T(uap->path), UIO_USERSPACE)); + return rmdirat_internal(vfs_context_current(), AT_FDCWD, + CAST_USER_ADDR_T(uap->path), UIO_USERSPACE); } /* Get direntry length padded to 8 byte alignment */ @@ -8379,11 +8677,11 @@ rmdir(__unused proc_t p, struct rmdir_args *uap, __unused int32_t *retval) errno_t vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, - int *numdirent, vfs_context_t ctxp) + int *numdirent, vfs_context_t ctxp) { /* Check if fs natively supports VNODE_READDIR_EXTENDED */ if ((vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSREADDIR_EXTENDED) && - ((vp->v_mount->mnt_kern_flag & MNTK_DENY_READDIREXT) == 0)) { + ((vp->v_mount->mnt_kern_flag & MNTK_DENY_READDIREXT) == 0)) { return VNOP_READDIR(vp, uio, flags, eofflag, numdirent, ctxp); } else { size_t bufsize; @@ -8405,7 +8703,7 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, * bytes (8-byte aligned) and a struct dirent size of 12 bytes * (4-byte aligned). So having a buffer that is 3/8 the size * will prevent us from reading more than we can pack. - * + * * Since this buffer is wired memory, we will limit the * buffer size to a maximum of 32K. We would really like to * use 32K in the MIN(), but we use magic number 87371 to @@ -8427,18 +8725,18 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, bytesread = bufsize - uio_resid(auio); MALLOC(entry64, struct direntry *, sizeof(struct direntry), - M_TEMP, M_WAITOK); + M_TEMP, M_WAITOK); /* * Convert all the entries and copy them out to user's buffer. */ while (error == 0 && (char *)dep < ((char *)bufptr + bytesread)) { - size_t enbufsize = DIRENT64_LEN(dep->d_namlen); + size_t enbufsize = DIRENT64_LEN(dep->d_namlen); if (DIRENT_END(dep) > ((char *)bufptr + bytesread) || DIRENT_LEN(dep->d_namlen) > dep->d_reclen) { printf("%s: %s: Bad dirent recived from directory %s\n", __func__, - vp->v_mount->mnt_vfsstat.f_mntonname, - vp->v_name ? vp->v_name : ""); + vp->v_mount->mnt_vfsstat.f_mntonname, + vp->v_name ? vp->v_name : ""); error = EIO; break; } @@ -8466,31 +8764,31 @@ vnode_readdir64(struct vnode *vp, struct uio *uio, int flags, int *eofflag, uio_free(auio); FREE(bufptr, M_TEMP); FREE(entry64, M_TEMP); - return (error); + return error; } } -#define GETDIRENTRIES_MAXBUFSIZE (128 * 1024 * 1024U) +#define GETDIRENTRIES_MAXBUFSIZE (128 * 1024 * 1024U) /* * Read a block of directory entries in a file system independent format. */ static int getdirentries_common(int fd, user_addr_t bufp, user_size_t bufsize, ssize_t *bytesread, - off_t *offset, int flags) + off_t *offset, int flags) { vnode_t vp; - struct vfs_context context = *vfs_context_current(); /* local copy */ + struct vfs_context context = *vfs_context_current(); /* local copy */ struct fileproc *fp; uio_t auio; int spacetype = proc_is64bit(vfs_context_proc(&context)) ? UIO_USERSPACE64 : UIO_USERSPACE32; off_t loff; int error, eofflag, numdirent; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; error = fp_getfvp(vfs_context_proc(&context), fd, &fp, &vp); if (error) { - return (error); + return error; } if ((fp->f_fglob->fg_flag & FREAD) == 0) { AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); @@ -8498,15 +8796,17 @@ getdirentries_common(int fd, user_addr_t bufp, user_size_t bufsize, ssize_t *byt goto out; } - if (bufsize > GETDIRENTRIES_MAXBUFSIZE) + if (bufsize > GETDIRENTRIES_MAXBUFSIZE) { bufsize = GETDIRENTRIES_MAXBUFSIZE; + } #if CONFIG_MACF error = mac_file_check_change_offset(vfs_context_ucred(&context), fp->f_fglob); - if (error) + if (error) { goto out; + } #endif - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { goto out; } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -8542,11 +8842,12 @@ unionread: goto out; } - if ((user_ssize_t)bufsize == uio_resid(auio)){ + if ((user_ssize_t)bufsize == uio_resid(auio)) { if (union_dircheckp) { error = union_dircheckp(&vp, fp, &context); - if (error == -1) + if (error == -1) { goto unionread; + } if (error) { (void)vnode_put(vp); goto out; @@ -8575,7 +8876,7 @@ unionread: *bytesread = bufsize - uio_resid(auio); out: file_drop(fd); - return (error); + return error; } @@ -8599,7 +8900,7 @@ getdirentries(__unused struct proc *p, struct getdirentries_args *uap, int32_t * } *retval = bytesread; } - return (error); + return error; } int @@ -8616,7 +8917,7 @@ getdirentries64(__unused struct proc *p, struct getdirentries64_args *uap, user_ *retval = bytesread; error = copyout((caddr_t)&offset, uap->position, sizeof(off_t)); } - return (error); + return error; } @@ -8624,7 +8925,7 @@ getdirentries64(__unused struct proc *p, struct getdirentries64_args *uap, user_ * Set the mode mask for creation of filesystem nodes. * XXX implement xsecurity */ -#define UMASK_NOXSECURITY (void *)1 /* leave existing xsecurity alone */ +#define UMASK_NOXSECURITY (void *)1 /* leave existing xsecurity alone */ static int umask1(proc_t p, int newmask, __unused kauth_filesec_t fsec, int32_t *retval) { @@ -8636,7 +8937,7 @@ umask1(proc_t p, int newmask, __unused kauth_filesec_t fsec, int32_t *retval) *retval = fdp->fd_cmask; fdp->fd_cmask = newmask & ALLPERMS; proc_fdunlock(p); - return (0); + return 0; } /* @@ -8661,23 +8962,25 @@ umask_extended(proc_t p, struct umask_extended_args *uap, int32_t *retval) xsecdst = KAUTH_FILESEC_NONE; if (uap->xsecurity != USER_ADDR_NULL) { - if ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) + if ((ciferror = kauth_copyinfilesec(uap->xsecurity, &xsecdst)) != 0) { return ciferror; + } } else { xsecdst = KAUTH_FILESEC_NONE; } ciferror = umask1(p, uap->newmask, xsecdst, retval); - if (xsecdst != KAUTH_FILESEC_NONE) + if (xsecdst != KAUTH_FILESEC_NONE) { kauth_filesec_free(xsecdst); + } return ciferror; } int umask(proc_t p, struct umask_args *uap, int32_t *retval) { - return(umask1(p, uap->newmask, UMASK_NOXSECURITY, retval)); + return umask1(p, uap->newmask, UMASK_NOXSECURITY, retval); } /* @@ -8695,10 +8998,11 @@ revoke(proc_t p, struct revoke_args *uap, __unused int32_t *retval) struct nameidata nd; NDINIT(&nd, LOOKUP, OP_REVOKE, FOLLOW | AUDITVNPATH1, UIO_USERSPACE, - uap->path, ctx); + uap->path, ctx); error = namei(&nd); - if (error) - return (error); + if (error) { + return error; + } vp = nd.ni_vp; nameidone(&nd); @@ -8715,22 +9019,26 @@ revoke(proc_t p, struct revoke_args *uap, __unused int32_t *retval) #if CONFIG_MACF error = mac_vnode_check_revoke(ctx, vp); - if (error) + if (error) { goto out; + } #endif VATTR_INIT(&va); VATTR_WANTED(&va, va_uid); - if ((error = vnode_getattr(vp, &va, ctx))) + if ((error = vnode_getattr(vp, &va, ctx))) { goto out; + } if (kauth_cred_getuid(vfs_context_ucred(ctx)) != va.va_uid && - (error = suser(vfs_context_ucred(ctx), &p->p_acflag))) + (error = suser(vfs_context_ucred(ctx), &p->p_acflag))) { goto out; - if (vp->v_usecount > 0 || (vnode_isaliased(vp))) + } + if (vp->v_usecount > 0 || (vnode_isaliased(vp))) { VNOP_REVOKE(vp, REVOKEALL, ctx); + } out: vnode_put(vp); - return (error); + return error; } @@ -8747,7 +9055,7 @@ out: */ /* ARGSUSED */ int -getdirentriesattr (proc_t p, struct getdirentriesattr_args *uap, int32_t *retval) +getdirentriesattr(proc_t p, struct getdirentriesattr_args *uap, int32_t *retval) { vnode_t vp; struct fileproc *fp; @@ -8760,21 +9068,21 @@ getdirentriesattr (proc_t p, struct getdirentriesattr_args *uap, int32_t *retval struct attrlist attributelist; vfs_context_t ctx = vfs_context_current(); int fd = uap->fd; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; kauth_action_t action; AUDIT_ARG(fd, fd); /* Get the attributes into kernel space */ if ((error = copyin(uap->alist, (caddr_t)&attributelist, sizeof(attributelist)))) { - return(error); + return error; } if ((error = copyin(uap->count, (caddr_t)&count, sizeof(count)))) { - return(error); + return error; } savecount = count; - if ( (error = fp_getfvp(p, fd, &fp, &vp)) ) { - return (error); + if ((error = fp_getfvp(p, fd, &fp, &vp))) { + return error; } if ((fp->f_fglob->fg_flag & FREAD) == 0) { AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1); @@ -8786,13 +9094,15 @@ getdirentriesattr (proc_t p, struct getdirentriesattr_args *uap, int32_t *retval #if CONFIG_MACF error = mac_file_check_change_offset(vfs_context_ucred(ctx), fp->f_fglob); - if (error) + if (error) { goto out; + } #endif - if ( (error = vnode_getwithref(vp)) ) + if ((error = vnode_getwithref(vp))) { goto out; + } AUDIT_ARG(vnpath, vp, ARG_VNODE1); @@ -8823,16 +9133,16 @@ unionread: */ action = KAUTH_VNODE_LIST_DIRECTORY; if ((attributelist.commonattr & ~ATTR_CMN_NAME) || - attributelist.fileattr || attributelist.dirattr) + attributelist.fileattr || attributelist.dirattr) { action |= KAUTH_VNODE_SEARCH; + } if ((error = vnode_authorize(vp, NULL, action, ctx)) == 0) { - /* Believe it or not, uap->options only has 32-bits of valid * info, so truncate before extending again */ error = VNOP_READDIRATTR(vp, &attributelist, auio, count, - (u_long)(uint32_t)uap->options, &newstate, &eofflag, &count, ctx); + (u_long)(uint32_t)uap->options, &newstate, &eofflag, &count, ctx); } if (error) { @@ -8850,7 +9160,7 @@ unionread: if (eofflag && vp->v_mount->mnt_flag & MNT_UNION) { if (uio_resid(auio) < (user_ssize_t) uap->buffersize) { // Got some entries eofflag = 0; - } else { // Empty buffer + } else { // Empty buffer struct vnode *tvp = vp; if (lookup_traverse_union(tvp, &vp, ctx) == 0) { vnode_ref_ext(vp, fp->f_fglob->fg_flag & O_EVTONLY, 0); @@ -8867,34 +9177,36 @@ unionread: (void)vnode_put(vp); - if (error) + if (error) { goto out; + } fp->f_fglob->fg_offset = uio_offset(auio); /* should be multiple of dirent, not variable */ - if ((error = copyout((caddr_t) &count, uap->count, sizeof(count)))) + if ((error = copyout((caddr_t) &count, uap->count, sizeof(count)))) { goto out; - if ((error = copyout((caddr_t) &newstate, uap->newstate, sizeof(newstate)))) + } + if ((error = copyout((caddr_t) &newstate, uap->newstate, sizeof(newstate)))) { goto out; - if ((error = copyout((caddr_t) &loff, uap->basep, sizeof(loff)))) + } + if ((error = copyout((caddr_t) &loff, uap->basep, sizeof(loff)))) { goto out; + } *retval = eofflag; /* similar to getdirentries */ error = 0; out: file_drop(fd); - return (error); /* return error earlier, an retval of 0 or 1 now */ - + return error; /* return error earlier, an retval of 0 or 1 now */ } /* end of getdirentriesattr system call */ /* -* Exchange data between two files -*/ + * Exchange data between two files + */ /* ARGSUSED */ int -exchangedata (__unused proc_t p, struct exchangedata_args *uap, __unused int32_t *retval) +exchangedata(__unused proc_t p, struct exchangedata_args *uap, __unused int32_t *retval) { - struct nameidata fnd, snd; vfs_context_t ctx = vfs_context_current(); vnode_t fvp; @@ -8903,27 +9215,30 @@ exchangedata (__unused proc_t p, struct exchangedata_args *uap, __unused int32_t u_int32_t nameiflags; char *fpath = NULL; char *spath = NULL; - int flen=0, slen=0; - int from_truncated=0, to_truncated=0; + int flen = 0, slen = 0; + int from_truncated = 0, to_truncated = 0; #if CONFIG_FSE fse_info f_finfo, s_finfo; #endif nameiflags = 0; - if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + if ((uap->options & FSOPT_NOFOLLOW) == 0) { + nameiflags |= FOLLOW; + } NDINIT(&fnd, LOOKUP, OP_EXCHANGEDATA, nameiflags | AUDITVNPATH1, - UIO_USERSPACE, uap->path1, ctx); + UIO_USERSPACE, uap->path1, ctx); error = namei(&fnd); - if (error) + if (error) { goto out2; + } nameidone(&fnd); fvp = fnd.ni_vp; NDINIT(&snd, LOOKUP, OP_EXCHANGEDATA, CN_NBMOUNTLOOK | nameiflags | AUDITVNPATH2, - UIO_USERSPACE, uap->path2, ctx); + UIO_USERSPACE, uap->path2, ctx); error = namei(&snd); if (error) { @@ -8945,12 +9260,12 @@ exchangedata (__unused proc_t p, struct exchangedata_args *uap, __unused int32_t * if the files are on different volumes, return an error */ if (svp->v_mount != fvp->v_mount) { - error = EXDEV; + error = EXDEV; goto out; } /* If they're not files, return an error */ - if ( (vnode_isreg(fvp) == 0) || (vnode_isreg(svp) == 0)) { + if ((vnode_isreg(fvp) == 0) || (vnode_isreg(svp) == 0)) { error = EINVAL; goto out; } @@ -8958,18 +9273,20 @@ exchangedata (__unused proc_t p, struct exchangedata_args *uap, __unused int32_t #if CONFIG_MACF error = mac_vnode_check_exchangedata(ctx, fvp, svp); - if (error) + if (error) { goto out; + } #endif if (((error = vnode_authorize(fvp, NULL, KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, ctx)) != 0) || - ((error = vnode_authorize(svp, NULL, KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, ctx)) != 0)) + ((error = vnode_authorize(svp, NULL, KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, ctx)) != 0)) { goto out; + } if ( #if CONFIG_FSE - need_fsevent(FSE_EXCHANGE, fvp) || + need_fsevent(FSE_EXCHANGE, fvp) || #endif - kauth_authorize_fileop_has_listeners()) { + kauth_authorize_fileop_has_listeners()) { GET_PATH(fpath); GET_PATH(spath); if (fpath == NULL || spath == NULL) { @@ -8993,51 +9310,53 @@ exchangedata (__unused proc_t p, struct exchangedata_args *uap, __unused int32_t error = VNOP_EXCHANGE(fvp, svp, 0, ctx); if (error == 0) { - const char *tmpname; + const char *tmpname; - if (fpath != NULL && spath != NULL) { - /* call out to allow 3rd party notification of exchangedata. - * Ignore result of kauth_authorize_fileop call. - */ - kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_EXCHANGE, - (uintptr_t)fpath, (uintptr_t)spath); - } - name_cache_lock(); + if (fpath != NULL && spath != NULL) { + /* call out to allow 3rd party notification of exchangedata. + * Ignore result of kauth_authorize_fileop call. + */ + kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_EXCHANGE, + (uintptr_t)fpath, (uintptr_t)spath); + } + name_cache_lock(); - tmpname = fvp->v_name; - fvp->v_name = svp->v_name; - svp->v_name = tmpname; + tmpname = fvp->v_name; + fvp->v_name = svp->v_name; + svp->v_name = tmpname; - if (fvp->v_parent != svp->v_parent) { - vnode_t tmp; + if (fvp->v_parent != svp->v_parent) { + vnode_t tmp; - tmp = fvp->v_parent; - fvp->v_parent = svp->v_parent; - svp->v_parent = tmp; - } - name_cache_unlock(); + tmp = fvp->v_parent; + fvp->v_parent = svp->v_parent; + svp->v_parent = tmp; + } + name_cache_unlock(); #if CONFIG_FSE - if (fpath != NULL && spath != NULL) { - add_fsevent(FSE_EXCHANGE, ctx, - FSE_ARG_STRING, flen, fpath, - FSE_ARG_FINFO, &f_finfo, - FSE_ARG_STRING, slen, spath, - FSE_ARG_FINFO, &s_finfo, - FSE_ARG_DONE); - } + if (fpath != NULL && spath != NULL) { + add_fsevent(FSE_EXCHANGE, ctx, + FSE_ARG_STRING, flen, fpath, + FSE_ARG_FINFO, &f_finfo, + FSE_ARG_STRING, slen, spath, + FSE_ARG_FINFO, &s_finfo, + FSE_ARG_DONE); + } #endif } out: - if (fpath != NULL) - RELEASE_PATH(fpath); - if (spath != NULL) - RELEASE_PATH(spath); + if (fpath != NULL) { + RELEASE_PATH(fpath); + } + if (spath != NULL) { + RELEASE_PATH(spath); + } vnode_put(svp); vnode_put(fvp); out2: - return (error); + return error; } /* @@ -9049,8 +9368,8 @@ uint32_t freespace_mb(vnode_t vp) { vfs_update_vfsstat(vp->v_mount, vfs_context_current(), VFS_USER_EVENT); - return (((uint64_t)vp->v_mount->mnt_vfsstat.f_bavail * - vp->v_mount->mnt_vfsstat.f_bsize) >> 20); + return ((uint64_t)vp->v_mount->mnt_vfsstat.f_bavail * + vp->v_mount->mnt_vfsstat.f_bsize) >> 20; } #if CONFIG_SEARCHFS @@ -9061,69 +9380,70 @@ int searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) { vnode_t vp, tvp; - int i, error=0; + int i, error = 0; int fserror = 0; struct nameidata nd; struct user64_fssearchblock searchblock; struct searchstate *state; struct attrlist *returnattrs; struct timeval timelimit; - void *searchparams1,*searchparams2; + void *searchparams1, *searchparams2; uio_t auio = NULL; int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; uint32_t nummatches; int mallocsize; uint32_t nameiflags; vfs_context_t ctx = vfs_context_current(); - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; /* Start by copying in fsearchblock parameter list */ - if (IS_64BIT_PROCESS(p)) { - error = copyin(uap->searchblock, (caddr_t) &searchblock, sizeof(searchblock)); - timelimit.tv_sec = searchblock.timelimit.tv_sec; - timelimit.tv_usec = searchblock.timelimit.tv_usec; - } - else { - struct user32_fssearchblock tmp_searchblock; - - error = copyin(uap->searchblock, (caddr_t) &tmp_searchblock, sizeof(tmp_searchblock)); - // munge into 64-bit version - searchblock.returnattrs = CAST_USER_ADDR_T(tmp_searchblock.returnattrs); - searchblock.returnbuffer = CAST_USER_ADDR_T(tmp_searchblock.returnbuffer); - searchblock.returnbuffersize = tmp_searchblock.returnbuffersize; - searchblock.maxmatches = tmp_searchblock.maxmatches; + if (IS_64BIT_PROCESS(p)) { + error = copyin(uap->searchblock, (caddr_t) &searchblock, sizeof(searchblock)); + timelimit.tv_sec = searchblock.timelimit.tv_sec; + timelimit.tv_usec = searchblock.timelimit.tv_usec; + } else { + struct user32_fssearchblock tmp_searchblock; + + error = copyin(uap->searchblock, (caddr_t) &tmp_searchblock, sizeof(tmp_searchblock)); + // munge into 64-bit version + searchblock.returnattrs = CAST_USER_ADDR_T(tmp_searchblock.returnattrs); + searchblock.returnbuffer = CAST_USER_ADDR_T(tmp_searchblock.returnbuffer); + searchblock.returnbuffersize = tmp_searchblock.returnbuffersize; + searchblock.maxmatches = tmp_searchblock.maxmatches; /* * These casts are safe. We will promote the tv_sec into a 64 bit long if necessary * from a 32 bit long, and tv_usec is already a signed 32 bit int. */ - timelimit.tv_sec = (__darwin_time_t) tmp_searchblock.timelimit.tv_sec; - timelimit.tv_usec = (__darwin_useconds_t) tmp_searchblock.timelimit.tv_usec; - searchblock.searchparams1 = CAST_USER_ADDR_T(tmp_searchblock.searchparams1); - searchblock.sizeofsearchparams1 = tmp_searchblock.sizeofsearchparams1; - searchblock.searchparams2 = CAST_USER_ADDR_T(tmp_searchblock.searchparams2); - searchblock.sizeofsearchparams2 = tmp_searchblock.sizeofsearchparams2; - searchblock.searchattrs = tmp_searchblock.searchattrs; - } - if (error) - return(error); + timelimit.tv_sec = (__darwin_time_t) tmp_searchblock.timelimit.tv_sec; + timelimit.tv_usec = (__darwin_useconds_t) tmp_searchblock.timelimit.tv_usec; + searchblock.searchparams1 = CAST_USER_ADDR_T(tmp_searchblock.searchparams1); + searchblock.sizeofsearchparams1 = tmp_searchblock.sizeofsearchparams1; + searchblock.searchparams2 = CAST_USER_ADDR_T(tmp_searchblock.searchparams2); + searchblock.sizeofsearchparams2 = tmp_searchblock.sizeofsearchparams2; + searchblock.searchattrs = tmp_searchblock.searchattrs; + } + if (error) { + return error; + } /* Do a sanity check on sizeofsearchparams1 and sizeofsearchparams2. */ if (searchblock.sizeofsearchparams1 > SEARCHFS_MAX_SEARCHPARMS || - searchblock.sizeofsearchparams2 > SEARCHFS_MAX_SEARCHPARMS) - return(EINVAL); + searchblock.sizeofsearchparams2 > SEARCHFS_MAX_SEARCHPARMS) { + return EINVAL; + } /* Now malloc a big bunch of space to hold the search parameters, the attrlists and the search state. */ /* It all has to do into local memory and it's not that big so we might as well put it all together. */ /* Searchparams1 shall be first so we might as well use that to hold the base address of the allocated*/ - /* block. */ + /* block. */ /* */ /* NOTE: we allocate an extra 8 bytes to account for the difference in size of the searchstate */ /* due to the changes in rdar://problem/12438273. That way if a 3rd party file system */ /* assumes the size is still 556 bytes it will continue to work */ mallocsize = searchblock.sizeofsearchparams1 + searchblock.sizeofsearchparams2 + - sizeof(struct attrlist) + sizeof(struct searchstate) + (2*sizeof(uint32_t)); + sizeof(struct attrlist) + sizeof(struct searchstate) + (2 * sizeof(uint32_t)); MALLOC(searchparams1, void *, mallocsize, M_TEMP, M_WAITOK); @@ -9131,31 +9451,36 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) searchparams2 = (void *) (((caddr_t) searchparams1) + searchblock.sizeofsearchparams1); returnattrs = (struct attrlist *) (((caddr_t) searchparams2) + searchblock.sizeofsearchparams2); - state = (struct searchstate *) (((caddr_t) returnattrs) + sizeof (struct attrlist)); + state = (struct searchstate *) (((caddr_t) returnattrs) + sizeof(struct attrlist)); /* Now copy in the stuff given our local variables. */ - if ((error = copyin(searchblock.searchparams1, searchparams1, searchblock.sizeofsearchparams1))) + if ((error = copyin(searchblock.searchparams1, searchparams1, searchblock.sizeofsearchparams1))) { goto freeandexit; + } - if ((error = copyin(searchblock.searchparams2, searchparams2, searchblock.sizeofsearchparams2))) + if ((error = copyin(searchblock.searchparams2, searchparams2, searchblock.sizeofsearchparams2))) { goto freeandexit; + } - if ((error = copyin(searchblock.returnattrs, (caddr_t) returnattrs, sizeof(struct attrlist)))) + if ((error = copyin(searchblock.returnattrs, (caddr_t) returnattrs, sizeof(struct attrlist)))) { goto freeandexit; + } - if ((error = copyin(uap->state, (caddr_t) state, sizeof(struct searchstate)))) + if ((error = copyin(uap->state, (caddr_t) state, sizeof(struct searchstate)))) { goto freeandexit; + } /* * When searching a union mount, need to set the * start flag at the first call on each layer to * reset state for the new volume. */ - if (uap->options & SRCHFS_START) + if (uap->options & SRCHFS_START) { state->ss_union_layer = 0; - else + } else { uap->options |= state->ss_union_flags; + } state->ss_union_flags = 0; /* @@ -9174,12 +9499,12 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) /* validate searchparams1 */ param_length = searchblock.sizeofsearchparams1; /* skip the word that specifies length of the buffer */ - start_length= (u_int32_t*) searchparams1; - start_length= start_length+1; - string_ref= (attrreference_t*) start_length; + start_length = (u_int32_t*) searchparams1; + start_length = start_length + 1; + string_ref = (attrreference_t*) start_length; /* ensure no negative offsets or too big offsets */ - if (string_ref->attr_dataoffset < 0 ) { + if (string_ref->attr_dataoffset < 0) { error = EINVAL; goto freeandexit; } @@ -9206,16 +9531,19 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) /* set up the uio structure which will contain the users return buffer */ auio = uio_createwithbuffer(1, 0, spacetype, UIO_READ, &uio_buf[0], sizeof(uio_buf)); - uio_addiov(auio, searchblock.returnbuffer, searchblock.returnbuffersize); + uio_addiov(auio, searchblock.returnbuffer, searchblock.returnbuffersize); nameiflags = 0; - if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + if ((uap->options & FSOPT_NOFOLLOW) == 0) { + nameiflags |= FOLLOW; + } NDINIT(&nd, LOOKUP, OP_SEARCHFS, nameiflags | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); - if (error) + if (error) { goto freeandexit; + } vp = nd.ni_vp; nameidone(&nd); @@ -9224,8 +9552,9 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) */ error = VFS_ROOT(vnode_mount(vp), &tvp, ctx); vnode_put(vp); - if (error) + if (error) { goto freeandexit; + } vp = tvp; /* @@ -9235,8 +9564,9 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) * is always zero. */ for (i = 0; i < (int) state->ss_union_layer; i++) { - if ((vp->v_mount->mnt_flag & MNT_UNION) == 0) + if ((vp->v_mount->mnt_flag & MNT_UNION) == 0) { break; + } tvp = vp; vp = vp->v_mount->mnt_vnodecovered; if (vp == NULL) { @@ -9246,8 +9576,9 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) } error = vnode_getwithref(vp); vnode_put(tvp); - if (error) + if (error) { goto freeandexit; + } } #if CONFIG_MACF @@ -9263,10 +9594,10 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) * If searchblock.maxmatches == 0, then skip the search. This has happened * before and sometimes the underlying code doesnt deal with it well. */ - if (searchblock.maxmatches == 0) { + if (searchblock.maxmatches == 0) { nummatches = 0; goto saveandexit; - } + } /* * Allright, we have everything we need, so lets make that call. @@ -9277,18 +9608,18 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) */ fserror = VNOP_SEARCHFS(vp, - searchparams1, - searchparams2, - &searchblock.searchattrs, - (u_long)searchblock.maxmatches, - &timelimit, - returnattrs, - &nummatches, - (u_long)uap->scriptcode, - (u_long)uap->options, - auio, - (struct searchstate *) &state->ss_fsstate, - ctx); + searchparams1, + searchparams2, + &searchblock.searchattrs, + (u_long)searchblock.maxmatches, + &timelimit, + returnattrs, + &nummatches, + (u_long)uap->scriptcode, + (u_long)uap->options, + auio, + (struct searchstate *) &state->ss_fsstate, + ctx); /* * If it's a union mount we need to be called again @@ -9296,7 +9627,7 @@ searchfs(proc_t p, struct searchfs_args *uap, __unused int32_t *retval) */ if ((vp->v_mount->mnt_flag & MNT_UNION) && fserror == 0) { state->ss_union_flags = SRCHFS_START; - state->ss_union_layer++; // search next layer down + state->ss_union_layer++; // search next layer down fserror = EAGAIN; } @@ -9305,23 +9636,23 @@ saveandexit: vnode_put(vp); /* Now copy out the stuff that needs copying out. That means the number of matches, the - search state. Everything was already put into he return buffer by the vop call. */ + * search state. Everything was already put into he return buffer by the vop call. */ - if ((error = copyout((caddr_t) state, uap->state, sizeof(struct searchstate))) != 0) + if ((error = copyout((caddr_t) state, uap->state, sizeof(struct searchstate))) != 0) { goto freeandexit; + } - if ((error = suulong(uap->nummatches, (uint64_t)nummatches)) != 0) + if ((error = suulong(uap->nummatches, (uint64_t)nummatches)) != 0) { goto freeandexit; + } error = fserror; freeandexit: - FREE(searchparams1,M_TEMP); - - return(error); - + FREE(searchparams1, M_TEMP); + return error; } /* end of searchfs system call */ #else /* CONFIG_SEARCHFS */ @@ -9329,7 +9660,7 @@ freeandexit: int searchfs(__unused proc_t p, __unused struct searchfs_args *uap, __unused int32_t *retval) { - return (ENOTSUP); + return ENOTSUP; } #endif /* CONFIG_SEARCHFS */ @@ -9342,8 +9673,8 @@ lck_grp_t * nspace_mutex_group; lck_mtx_t nspace_handler_lock; lck_mtx_t nspace_handler_exclusion_lock; -time_t snapshot_timestamp=0; -int nspace_allow_virtual_devs=0; +time_t snapshot_timestamp = 0; +int nspace_allow_virtual_devs = 0; void nspace_handler_init(void); @@ -9359,8 +9690,8 @@ typedef struct nspace_item_info { #define MAX_NSPACE_ITEMS 128 nspace_item_info nspace_items[MAX_NSPACE_ITEMS]; -uint32_t nspace_item_idx=0; // also used as the sleep/wakeup rendezvous address -uint32_t nspace_token_id=0; +uint32_t nspace_item_idx = 0; // also used as the sleep/wakeup rendezvous address +uint32_t nspace_token_id = 0; uint32_t nspace_handler_timeout = 15; // seconds #define NSPACE_ITEM_NEW 0x0001 @@ -9400,68 +9731,74 @@ static nspace_type_t nspace_type_for_op(uint64_t op); static int nspace_is_special_process(struct proc *proc); static int vn_open_with_vp(vnode_t vp, int fmode, vfs_context_t ctx); static int wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type); -static int validate_namespace_args (int is64bit, int size); +static int validate_namespace_args(int is64bit, int size); static int process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int size, caddr_t data); -static inline int nspace_flags_matches_handler(uint32_t event_flags, nspace_type_t nspace_type) +static inline int +nspace_flags_matches_handler(uint32_t event_flags, nspace_type_t nspace_type) { - switch(nspace_type) { - case NSPACE_HANDLER_NSPACE: - return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_NSPACE_EVENT; - case NSPACE_HANDLER_SNAPSHOT: - return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_SNAPSHOT_EVENT; - default: - printf("nspace_flags_matches_handler: invalid type %u\n", (int)nspace_type); - return 0; + switch (nspace_type) { + case NSPACE_HANDLER_NSPACE: + return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_NSPACE_EVENT; + case NSPACE_HANDLER_SNAPSHOT: + return (event_flags & NSPACE_ITEM_ALL_EVENT_TYPES) == NSPACE_ITEM_SNAPSHOT_EVENT; + default: + printf("nspace_flags_matches_handler: invalid type %u\n", (int)nspace_type); + return 0; } } -static inline int nspace_item_flags_for_type(nspace_type_t nspace_type) +static inline int +nspace_item_flags_for_type(nspace_type_t nspace_type) { - switch(nspace_type) { - case NSPACE_HANDLER_NSPACE: - return NSPACE_ITEM_NSPACE_EVENT; - case NSPACE_HANDLER_SNAPSHOT: - return NSPACE_ITEM_SNAPSHOT_EVENT; - default: - printf("nspace_item_flags_for_type: invalid type %u\n", (int)nspace_type); - return 0; + switch (nspace_type) { + case NSPACE_HANDLER_NSPACE: + return NSPACE_ITEM_NSPACE_EVENT; + case NSPACE_HANDLER_SNAPSHOT: + return NSPACE_ITEM_SNAPSHOT_EVENT; + default: + printf("nspace_item_flags_for_type: invalid type %u\n", (int)nspace_type); + return 0; } } -static inline int nspace_open_flags_for_type(nspace_type_t nspace_type) +static inline int +nspace_open_flags_for_type(nspace_type_t nspace_type) { - switch(nspace_type) { - case NSPACE_HANDLER_NSPACE: - return FREAD | FWRITE | O_EVTONLY; - case NSPACE_HANDLER_SNAPSHOT: - return FREAD | O_EVTONLY; - default: - printf("nspace_open_flags_for_type: invalid type %u\n", (int)nspace_type); - return 0; + switch (nspace_type) { + case NSPACE_HANDLER_NSPACE: + return FREAD | FWRITE | O_EVTONLY; + case NSPACE_HANDLER_SNAPSHOT: + return FREAD | O_EVTONLY; + default: + printf("nspace_open_flags_for_type: invalid type %u\n", (int)nspace_type); + return 0; } } -static inline nspace_type_t nspace_type_for_op(uint64_t op) +static inline nspace_type_t +nspace_type_for_op(uint64_t op) { - switch(op & NAMESPACE_HANDLER_EVENT_TYPE_MASK) { - case NAMESPACE_HANDLER_NSPACE_EVENT: - return NSPACE_HANDLER_NSPACE; - case NAMESPACE_HANDLER_SNAPSHOT_EVENT: - return NSPACE_HANDLER_SNAPSHOT; - default: - printf("nspace_type_for_op: invalid op mask %llx\n", op & NAMESPACE_HANDLER_EVENT_TYPE_MASK); - return NSPACE_HANDLER_NSPACE; + switch (op & NAMESPACE_HANDLER_EVENT_TYPE_MASK) { + case NAMESPACE_HANDLER_NSPACE_EVENT: + return NSPACE_HANDLER_NSPACE; + case NAMESPACE_HANDLER_SNAPSHOT_EVENT: + return NSPACE_HANDLER_SNAPSHOT; + default: + printf("nspace_type_for_op: invalid op mask %llx\n", op & NAMESPACE_HANDLER_EVENT_TYPE_MASK); + return NSPACE_HANDLER_NSPACE; } } -static inline int nspace_is_special_process(struct proc *proc) +static inline int +nspace_is_special_process(struct proc *proc) { int i; for (i = 0; i < NSPACE_HANDLER_COUNT; i++) { - if (proc == nspace_handlers[i].handler_proc) + if (proc == nspace_handlers[i].handler_proc) { return 1; + } } return 0; } @@ -9503,11 +9840,9 @@ nspace_proc_exit(struct proc *p) // // unblock anyone that's waiting for the handler that died // - for(i=0; i < MAX_NSPACE_ITEMS; i++) { + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { if (nspace_items[i].flags & (NSPACE_ITEM_NEW | NSPACE_ITEM_PROCESSING)) { - - if ( nspace_items[i].flags & event_mask ) { - + if (nspace_items[i].flags & event_mask) { if (nspace_items[i].vp && (nspace_items[i].vp->v_flag & VNEEDSSNAPSHOT)) { vnode_lock_spin(nspace_items[i].vp); nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT; @@ -9557,11 +9892,10 @@ resolve_nspace_item_ext(struct vnode *vp, uint64_t op, void *arg) // be overridden (for use by the Mobile TimeMachine // testing infrastructure which uses disk images) // - if ( (op & NAMESPACE_HANDLER_SNAPSHOT_EVENT) + if ((op & NAMESPACE_HANDLER_SNAPSHOT_EVENT) && (vp->v_mount != NULL) && (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) && !nspace_allow_virtual_devs) { - return 0; } @@ -9577,14 +9911,14 @@ resolve_nspace_item_ext(struct vnode *vp, uint64_t op, void *arg) lck_mtx_lock(&nspace_handler_lock); retry: - for(i=0; i < MAX_NSPACE_ITEMS; i++) { + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { if (vp == nspace_items[i].vp && op == nspace_items[i].op) { break; } } if (i >= MAX_NSPACE_ITEMS) { - for(i=0; i < MAX_NSPACE_ITEMS; i++) { + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { if (nspace_items[i].flags == 0) { break; } @@ -9597,7 +9931,7 @@ retry: ts.tv_sec = nspace_handler_timeout; ts.tv_nsec = 0; - error = msleep((caddr_t)&nspace_token_id, &nspace_handler_lock, PVFS|PCATCH, "nspace-no-space", &ts); + error = msleep((caddr_t)&nspace_token_id, &nspace_handler_lock, PVFS | PCATCH, "nspace-no-space", &ts); if (error == 0) { // an entry got free'd up, go see if we can get a slot goto retry; @@ -9638,10 +9972,10 @@ retry: // slot in the nspace_items table (or we timeout). // keep_waiting = 1; - while(keep_waiting) { + while (keep_waiting) { ts.tv_sec = nspace_handler_timeout; ts.tv_nsec = 0; - error = msleep((caddr_t)&(nspace_items[i].vp), &nspace_handler_lock, PVFS|PCATCH, "namespace-done", &ts); + error = msleep((caddr_t)&(nspace_items[i].vp), &nspace_handler_lock, PVFS | PCATCH, "namespace-done", &ts); if (nspace_items[i].flags & NSPACE_ITEM_DONE) { error = 0; @@ -9657,7 +9991,7 @@ retry: } else if (error == 0) { // hmmm, why did we get woken up? printf("woken up for token %d but it's not done, cancelled or timedout and error == 0.\n", - nspace_items[i].token); + nspace_items[i].token); } if (--nspace_items[i].refcount == 0) { @@ -9675,7 +10009,8 @@ retry: return error; } -int nspace_snapshot_event(vnode_t vp, time_t ctime, uint64_t op_type, void *arg) +int +nspace_snapshot_event(vnode_t vp, time_t ctime, uint64_t op_type, void *arg) { int snapshot_error = 0; @@ -9714,7 +10049,7 @@ get_nspace_item_status(struct vnode *vp, int32_t *status) int i; lck_mtx_lock(&nspace_handler_lock); - for(i=0; i < MAX_NSPACE_ITEMS; i++) { + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { if (nspace_items[i].vp == vp) { break; } @@ -9772,8 +10107,9 @@ vn_open_with_vp(vnode_t vp, int fmode, vfs_context_t ctx) #if CONFIG_MACF error = mac_vnode_check_open(ctx, vp, fmode); - if (error) + if (error) { return error; + } #endif /* compute action to be authorized */ @@ -9795,8 +10131,9 @@ vn_open_with_vp(vnode_t vp, int fmode, vfs_context_t ctx) } } - if ((error = vnode_authorize(vp, NULL, action, ctx)) != 0) + if ((error = vnode_authorize(vp, NULL, action, ctx)) != 0) { return error; + } // @@ -9812,10 +10149,10 @@ vn_open_with_vp(vnode_t vp, int fmode, vfs_context_t ctx) fmode |= O_EVTONLY; } - if ( (error = VNOP_OPEN(vp, fmode, ctx)) ) { + if ((error = VNOP_OPEN(vp, fmode, ctx))) { return error; } - if ( (error = vnode_ref_ext(vp, fmode, 0)) ) { + if ((error = vnode_ref_ext(vp, fmode, 0))) { VNOP_CLOSE(vp, fmode, ctx); return error; } @@ -9827,7 +10164,7 @@ vn_open_with_vp(vnode_t vp, int fmode, vfs_context_t ctx) mac_vnode_notify_open(ctx, vp, fmode); #endif kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN, - (uintptr_t)vp, 0); + (uintptr_t)vp, 0); return 0; @@ -9858,7 +10195,7 @@ wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type) * process. */ curtask = current_task(); - bsd_set_dependency_capable (curtask); + bsd_set_dependency_capable(curtask); lck_mtx_lock(&nspace_handler_lock); if (nspace_handlers[nspace_type].handler_proc == NULL) { @@ -9867,12 +10204,11 @@ wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type) } if (nspace_type == NSPACE_HANDLER_SNAPSHOT && - (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) { + (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) { error = EINVAL; } while (error == 0) { - /* Try to find matching namespace item */ for (i = 0; i < MAX_NSPACE_ITEMS; i++) { if (nspace_items[i].flags & NSPACE_ITEM_NEW) { @@ -9884,7 +10220,7 @@ wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type) if (i >= MAX_NSPACE_ITEMS) { /* Nothing is there yet. Wait for wake up and retry */ - error = msleep((caddr_t)&nspace_item_idx, &nspace_handler_lock, PVFS|PCATCH, "namespace-items", 0); + error = msleep((caddr_t)&nspace_item_idx, &nspace_handler_lock, PVFS | PCATCH, "namespace-items", 0); if ((nspace_type == NSPACE_HANDLER_SNAPSHOT) && (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) { /* Prevent infinite loop if snapshot handler exited */ error = EINVAL; @@ -9914,15 +10250,21 @@ wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type) */ fmode = nspace_open_flags_for_type(nspace_type); error = vnode_getwithvid(nspace_items[i].vp, nspace_items[i].vid); - if (error) goto cleanup; + if (error) { + goto cleanup; + } vn_get_succsessful = true; error = vn_open_with_vp(nspace_items[i].vp, fmode, ctx); - if (error) goto cleanup; + if (error) { + goto cleanup; + } vn_open_successful = true; error = falloc(p, &fp, &indx, ctx); - if (error) goto cleanup; + if (error) { + goto cleanup; + } fp_alloc_successful = true; fp->f_fglob->fg_flag = fmode; @@ -9939,11 +10281,17 @@ wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type) * token, flags, and the FD pointer */ error = copyout(&nspace_items[i].token, nhd->token, sizeof(uint32_t)); - if (error) goto cleanup; + if (error) { + goto cleanup; + } error = copyout(&nspace_items[i].op, nhd->flags, sizeof(uint64_t)); - if (error) goto cleanup; + if (error) { + goto cleanup; + } error = copyout(&indx, nhd->fdptr, sizeof(uint32_t)); - if (error) goto cleanup; + if (error) { + goto cleanup; + } /* * Handle optional fields: @@ -9964,31 +10312,43 @@ wait_for_namespace_event(namespace_handler_data *nhd, nspace_type_t nspace_type) u_length = 0; } error = copyout(&u_offset, nhd->infoptr, sizeof(uint64_t)); - if (error) goto cleanup; + if (error) { + goto cleanup; + } error = copyout(&u_length, nhd->infoptr + sizeof(uint64_t), sizeof(uint64_t)); - if (error) goto cleanup; + if (error) { + goto cleanup; + } } if (nhd->objid) { VATTR_INIT(&va); VATTR_WANTED(&va, va_linkid); error = vnode_getattr(nspace_items[i].vp, &va, ctx); - if (error) goto cleanup; + if (error) { + goto cleanup; + } uint64_t linkid = 0; - if (VATTR_IS_SUPPORTED (&va, va_linkid)) { + if (VATTR_IS_SUPPORTED(&va, va_linkid)) { linkid = (uint64_t)va.va_linkid; } error = copyout(&linkid, nhd->objid, sizeof(uint64_t)); } cleanup: if (error) { - if (fp_alloc_successful) fp_free(p, indx, fp); - if (vn_open_successful) vn_close(nspace_items[i].vp, fmode, ctx); + if (fp_alloc_successful) { + fp_free(p, indx, fp); + } + if (vn_open_successful) { + vn_close(nspace_items[i].vp, fmode, ctx); + } unblock = 1; } - if (vn_get_succsessful) vnode_put(nspace_items[i].vp); + if (vn_get_succsessful) { + vnode_put(nspace_items[i].vp); + } break; } @@ -10010,7 +10370,7 @@ cleanup: if (nspace_type == NSPACE_HANDLER_SNAPSHOT) { // just go through every snapshot event and unblock it immediately. if (error && (snapshot_timestamp == 0 || snapshot_timestamp == ~0)) { - for(i = 0; i < MAX_NSPACE_ITEMS; i++) { + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { if (nspace_items[i].flags & NSPACE_ITEM_NEW) { if (nspace_flags_matches_handler(nspace_items[i].flags, nspace_type)) { nspace_items[i].vp = NULL; @@ -10034,8 +10394,9 @@ cleanup: return error; } -static inline int validate_namespace_args (int is64bit, int size) { - +static inline int +validate_namespace_args(int is64bit, int size) +{ if (is64bit) { /* Must be one of these */ if (size == sizeof(user64_namespace_handler_info)) { @@ -10048,8 +10409,7 @@ static inline int validate_namespace_args (int is64bit, int size) { goto sizeok; } return EINVAL; - } - else { + } else { /* 32 bit -- must be one of these */ if (size == sizeof(user32_namespace_handler_info)) { goto sizeok; @@ -10066,21 +10426,21 @@ static inline int validate_namespace_args (int is64bit, int size) { sizeok: return 0; - } -static int process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int size, caddr_t data) +static int +process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int size, caddr_t data) { int error = 0; namespace_handler_data nhd; - bzero (&nhd, sizeof(namespace_handler_data)); + bzero(&nhd, sizeof(namespace_handler_data)); if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { return error; } - error = validate_namespace_args (is64bit, size); + error = validate_namespace_args(is64bit, size); if (error) { return error; } @@ -10103,8 +10463,7 @@ static int process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int } /* Otherwise the fields were pre-zeroed when we did the bzero above. */ } - } - else { + } else { /* 32 bit userland structures */ nhd.token = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->token); nhd.flags = CAST_USER_ADDR_T(((user32_namespace_handler_info *)data)->flags); @@ -10127,47 +10486,46 @@ static int process_namespace_fsctl(nspace_type_t nspace_type, int is64bit, u_int static unsigned long fsctl_bogus_command_compat(unsigned long cmd) { - switch (cmd) { case IOCBASECMD(FSIOC_SYNC_VOLUME): - return (FSIOC_SYNC_VOLUME); + return FSIOC_SYNC_VOLUME; case IOCBASECMD(FSIOC_ROUTEFS_SETROUTEID): - return (FSIOC_ROUTEFS_SETROUTEID); + return FSIOC_ROUTEFS_SETROUTEID; case IOCBASECMD(FSIOC_SET_PACKAGE_EXTS): - return (FSIOC_SET_PACKAGE_EXTS); + return FSIOC_SET_PACKAGE_EXTS; case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_GET): - return (FSIOC_NAMESPACE_HANDLER_GET); + return FSIOC_NAMESPACE_HANDLER_GET; case IOCBASECMD(FSIOC_OLD_SNAPSHOT_HANDLER_GET): - return (FSIOC_OLD_SNAPSHOT_HANDLER_GET); + return FSIOC_OLD_SNAPSHOT_HANDLER_GET; case IOCBASECMD(FSIOC_SNAPSHOT_HANDLER_GET_EXT): - return (FSIOC_SNAPSHOT_HANDLER_GET_EXT); + return FSIOC_SNAPSHOT_HANDLER_GET_EXT; case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UPDATE): - return (FSIOC_NAMESPACE_HANDLER_UPDATE); + return FSIOC_NAMESPACE_HANDLER_UPDATE; case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_UNBLOCK): - return (FSIOC_NAMESPACE_HANDLER_UNBLOCK); + return FSIOC_NAMESPACE_HANDLER_UNBLOCK; case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_CANCEL): - return (FSIOC_NAMESPACE_HANDLER_CANCEL); + return FSIOC_NAMESPACE_HANDLER_CANCEL; case IOCBASECMD(FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME): - return (FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME); + return FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME; case IOCBASECMD(FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS): - return (FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS); + return FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS; case IOCBASECMD(FSIOC_SET_FSTYPENAME_OVERRIDE): - return (FSIOC_SET_FSTYPENAME_OVERRIDE); + return FSIOC_SET_FSTYPENAME_OVERRIDE; case IOCBASECMD(DISK_CONDITIONER_IOC_GET): - return (DISK_CONDITIONER_IOC_GET); + return DISK_CONDITIONER_IOC_GET; case IOCBASECMD(DISK_CONDITIONER_IOC_SET): - return (DISK_CONDITIONER_IOC_SET); + return DISK_CONDITIONER_IOC_SET; case IOCBASECMD(FSIOC_FIOSEEKHOLE): - return (FSIOC_FIOSEEKHOLE); + return FSIOC_FIOSEEKHOLE; case IOCBASECMD(FSIOC_FIOSEEKDATA): - return (FSIOC_FIOSEEKDATA); + return FSIOC_FIOSEEKDATA; case IOCBASECMD(SPOTLIGHT_IOC_GET_MOUNT_TIME): - return (SPOTLIGHT_IOC_GET_MOUNT_TIME); + return SPOTLIGHT_IOC_GET_MOUNT_TIME; case IOCBASECMD(SPOTLIGHT_IOC_GET_LAST_MTIME): - return (SPOTLIGHT_IOC_GET_LAST_MTIME); + return SPOTLIGHT_IOC_GET_LAST_MTIME; } - return (cmd); + return cmd; } /* @@ -10177,7 +10535,7 @@ fsctl_bogus_command_compat(unsigned long cmd) static int fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long options, vfs_context_t ctx) { - int error=0; + int error = 0; boolean_t is64bit; u_int size; #define STK_PARAMS 128 @@ -10188,14 +10546,18 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long cmd = fsctl_bogus_command_compat(cmd); size = IOCPARM_LEN(cmd); - if (size > IOCPARM_MAX) return (EINVAL); + if (size > IOCPARM_MAX) { + return EINVAL; + } is64bit = proc_is64bit(p); memp = NULL; - if (size > sizeof (stkbuf)) { - if ((memp = (caddr_t)kalloc(size)) == 0) return ENOMEM; + if (size > sizeof(stkbuf)) { + if ((memp = (caddr_t)kalloc(size)) == 0) { + return ENOMEM; + } data = memp; } else { data = &stkbuf[0]; @@ -10206,15 +10568,14 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long error = copyin(udata, data, size); if (error) { if (memp) { - kfree (memp, size); + kfree(memp, size); } return error; } } else { if (is64bit) { *(user_addr_t *)data = udata; - } - else { + } else { *(uint32_t *)data = (uint32_t)udata; } }; @@ -10227,371 +10588,376 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long } else if (cmd & IOC_VOID) { if (is64bit) { *(user_addr_t *)data = udata; - } - else { + } else { *(uint32_t *)data = (uint32_t)udata; } } /* Check to see if it's a generic command */ switch (cmd) { + case FSIOC_SYNC_VOLUME: { + mount_t mp = vp->v_mount; + int arg = *(uint32_t*)data; - case FSIOC_SYNC_VOLUME: { - mount_t mp = vp->v_mount; - int arg = *(uint32_t*)data; + /* record vid of vp so we can drop it below. */ + uint32_t vvid = vp->v_id; - /* record vid of vp so we can drop it below. */ - uint32_t vvid = vp->v_id; + /* + * Then grab mount_iterref so that we can release the vnode. + * Without this, a thread may call vnode_iterate_prepare then + * get into a deadlock because we've never released the root vp + */ + error = mount_iterref(mp, 0); + if (error) { + break; + } + vnode_put(vp); - /* - * Then grab mount_iterref so that we can release the vnode. - * Without this, a thread may call vnode_iterate_prepare then - * get into a deadlock because we've never released the root vp - */ - error = mount_iterref (mp, 0); - if (error) { - break; - } - vnode_put(vp); + /* issue the sync for this volume */ + (void)sync_callback(mp, (arg & FSCTL_SYNC_WAIT) ? &arg : NULL); - /* issue the sync for this volume */ - (void)sync_callback(mp, (arg & FSCTL_SYNC_WAIT) ? &arg : NULL); + /* + * Then release the mount_iterref once we're done syncing; it's not + * needed for the VNOP_IOCTL below + */ + mount_iterdrop(mp); - /* - * Then release the mount_iterref once we're done syncing; it's not - * needed for the VNOP_IOCTL below - */ - mount_iterdrop(mp); - - if (arg & FSCTL_SYNC_FULLSYNC) { - /* re-obtain vnode iocount on the root vp, if possible */ - error = vnode_getwithvid (vp, vvid); - if (error == 0) { - error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx); - vnode_put (vp); - } + if (arg & FSCTL_SYNC_FULLSYNC) { + /* re-obtain vnode iocount on the root vp, if possible */ + error = vnode_getwithvid(vp, vvid); + if (error == 0) { + error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, ctx); + vnode_put(vp); } - /* mark the argument VP as having been released */ - *arg_vp = NULL; } - break; + /* mark the argument VP as having been released */ + *arg_vp = NULL; + } + break; - case FSIOC_ROUTEFS_SETROUTEID: { + case FSIOC_ROUTEFS_SETROUTEID: { #if ROUTEFS - char routepath[MAXPATHLEN]; - size_t len = 0; + char routepath[MAXPATHLEN]; + size_t len = 0; - if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { - break; - } - bzero(routepath, MAXPATHLEN); - error = copyinstr(udata, &routepath[0], MAXPATHLEN, &len); - if (error) { - break; - } - error = routefs_kernel_mount(routepath); - if (error) { - break; - } + if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { + break; + } + bzero(routepath, MAXPATHLEN); + error = copyinstr(udata, &routepath[0], MAXPATHLEN, &len); + if (error) { + break; + } + error = routefs_kernel_mount(routepath); + if (error) { + break; + } #endif + } + break; + + case FSIOC_SET_PACKAGE_EXTS: { + user_addr_t ext_strings; + uint32_t num_entries; + uint32_t max_width; + + if ((error = priv_check_cred(kauth_cred_get(), PRIV_PACKAGE_EXTENSIONS, 0))) { + break; } - break; - case FSIOC_SET_PACKAGE_EXTS: { - user_addr_t ext_strings; - uint32_t num_entries; - uint32_t max_width; + if ((is64bit && size != sizeof(user64_package_ext_info)) + || (is64bit == 0 && size != sizeof(user32_package_ext_info))) { + // either you're 64-bit and passed a 64-bit struct or + // you're 32-bit and passed a 32-bit struct. otherwise + // it's not ok. + error = EINVAL; + break; + } - if ((error = priv_check_cred(kauth_cred_get(), PRIV_PACKAGE_EXTENSIONS, 0))) - break; + if (is64bit) { + ext_strings = ((user64_package_ext_info *)data)->strings; + num_entries = ((user64_package_ext_info *)data)->num_entries; + max_width = ((user64_package_ext_info *)data)->max_width; + } else { + ext_strings = CAST_USER_ADDR_T(((user32_package_ext_info *)data)->strings); + num_entries = ((user32_package_ext_info *)data)->num_entries; + max_width = ((user32_package_ext_info *)data)->max_width; + } + error = set_package_extensions_table(ext_strings, num_entries, max_width); + } + break; - if ( (is64bit && size != sizeof(user64_package_ext_info)) - || (is64bit == 0 && size != sizeof(user32_package_ext_info))) { + /* namespace handlers */ + case FSIOC_NAMESPACE_HANDLER_GET: { + error = process_namespace_fsctl(NSPACE_HANDLER_NSPACE, is64bit, size, data); + } + break; - // either you're 64-bit and passed a 64-bit struct or - // you're 32-bit and passed a 32-bit struct. otherwise - // it's not ok. - error = EINVAL; - break; - } + /* Snapshot handlers */ + case FSIOC_OLD_SNAPSHOT_HANDLER_GET: { + error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data); + } + break; - if (is64bit) { - ext_strings = ((user64_package_ext_info *)data)->strings; - num_entries = ((user64_package_ext_info *)data)->num_entries; - max_width = ((user64_package_ext_info *)data)->max_width; - } else { - ext_strings = CAST_USER_ADDR_T(((user32_package_ext_info *)data)->strings); - num_entries = ((user32_package_ext_info *)data)->num_entries; - max_width = ((user32_package_ext_info *)data)->max_width; - } - error = set_package_extensions_table(ext_strings, num_entries, max_width); - } - break; + case FSIOC_SNAPSHOT_HANDLER_GET_EXT: { + error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data); + } + break; - /* namespace handlers */ - case FSIOC_NAMESPACE_HANDLER_GET: { - error = process_namespace_fsctl(NSPACE_HANDLER_NSPACE, is64bit, size, data); - } - break; + case FSIOC_NAMESPACE_HANDLER_UPDATE: { + uint32_t token, val; + int i; - /* Snapshot handlers */ - case FSIOC_OLD_SNAPSHOT_HANDLER_GET: { - error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data); + if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) { + break; } - break; - case FSIOC_SNAPSHOT_HANDLER_GET_EXT: { - error = process_namespace_fsctl(NSPACE_HANDLER_SNAPSHOT, is64bit, size, data); + if (!nspace_is_special_process(p)) { + error = EINVAL; + break; } - break; - case FSIOC_NAMESPACE_HANDLER_UPDATE: { - uint32_t token, val; - int i; + token = ((uint32_t *)data)[0]; + val = ((uint32_t *)data)[1]; - if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) { - break; - } + lck_mtx_lock(&nspace_handler_lock); - if (!nspace_is_special_process(p)) { - error = EINVAL; - break; + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { + if (nspace_items[i].token == token) { + break; /* exit for loop, not case stmt */ } + } - token = ((uint32_t *)data)[0]; - val = ((uint32_t *)data)[1]; + if (i >= MAX_NSPACE_ITEMS) { + error = ENOENT; + } else { + // + // if this bit is set, when resolve_nspace_item() times out + // it will loop and go back to sleep. + // + nspace_items[i].flags |= NSPACE_ITEM_RESET_TIMER; + } - lck_mtx_lock(&nspace_handler_lock); + lck_mtx_unlock(&nspace_handler_lock); - for(i=0; i < MAX_NSPACE_ITEMS; i++) { - if (nspace_items[i].token == token) { - break; /* exit for loop, not case stmt */ - } - } + if (error) { + printf("nspace-handler-update: did not find token %u\n", token); + } + } + break; - if (i >= MAX_NSPACE_ITEMS) { - error = ENOENT; - } else { - // - // if this bit is set, when resolve_nspace_item() times out - // it will loop and go back to sleep. - // - nspace_items[i].flags |= NSPACE_ITEM_RESET_TIMER; - } + case FSIOC_NAMESPACE_HANDLER_UNBLOCK: { + uint32_t token, val; + int i; - lck_mtx_unlock(&nspace_handler_lock); + if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) { + break; + } - if (error) { - printf("nspace-handler-update: did not find token %u\n", token); - } + if (!nspace_is_special_process(p)) { + error = EINVAL; + break; } - break; - case FSIOC_NAMESPACE_HANDLER_UNBLOCK: { - uint32_t token, val; - int i; + token = ((uint32_t *)data)[0]; + val = ((uint32_t *)data)[1]; - if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) { - break; - } + lck_mtx_lock(&nspace_handler_lock); - if (!nspace_is_special_process(p)) { - error = EINVAL; - break; + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { + if (nspace_items[i].token == token) { + break; /* exit for loop, not case statement */ } + } - token = ((uint32_t *)data)[0]; - val = ((uint32_t *)data)[1]; - - lck_mtx_lock(&nspace_handler_lock); - - for(i=0; i < MAX_NSPACE_ITEMS; i++) { - if (nspace_items[i].token == token) { - break; /* exit for loop, not case statement */ - } + if (i >= MAX_NSPACE_ITEMS) { + printf("nspace-handler-unblock: did not find token %u\n", token); + error = ENOENT; + } else { + if (val == 0 && nspace_items[i].vp) { + vnode_lock_spin(nspace_items[i].vp); + nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT; + vnode_unlock(nspace_items[i].vp); } - if (i >= MAX_NSPACE_ITEMS) { - printf("nspace-handler-unblock: did not find token %u\n", token); - error = ENOENT; - } else { - if (val == 0 && nspace_items[i].vp) { - vnode_lock_spin(nspace_items[i].vp); - nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT; - vnode_unlock(nspace_items[i].vp); - } - - nspace_items[i].vp = NULL; - nspace_items[i].arg = NULL; - nspace_items[i].op = 0; - nspace_items[i].vid = 0; - nspace_items[i].flags = NSPACE_ITEM_DONE; - nspace_items[i].token = 0; - - wakeup((caddr_t)&(nspace_items[i].vp)); - } + nspace_items[i].vp = NULL; + nspace_items[i].arg = NULL; + nspace_items[i].op = 0; + nspace_items[i].vid = 0; + nspace_items[i].flags = NSPACE_ITEM_DONE; + nspace_items[i].token = 0; - lck_mtx_unlock(&nspace_handler_lock); + wakeup((caddr_t)&(nspace_items[i].vp)); } - break; - case FSIOC_NAMESPACE_HANDLER_CANCEL: { - uint32_t token, val; - int i; + lck_mtx_unlock(&nspace_handler_lock); + } + break; - if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) { - break; - } + case FSIOC_NAMESPACE_HANDLER_CANCEL: { + uint32_t token, val; + int i; - if (!nspace_is_special_process(p)) { - error = EINVAL; - break; - } - - token = ((uint32_t *)data)[0]; - val = ((uint32_t *)data)[1]; - - lck_mtx_lock(&nspace_handler_lock); + if ((error = suser(kauth_cred_get(), &(p->p_acflag)))) { + break; + } - for(i=0; i < MAX_NSPACE_ITEMS; i++) { - if (nspace_items[i].token == token) { - break; /* exit for loop, not case stmt */ - } - } + if (!nspace_is_special_process(p)) { + error = EINVAL; + break; + } - if (i >= MAX_NSPACE_ITEMS) { - printf("nspace-handler-cancel: did not find token %u\n", token); - error = ENOENT; - } else { - if (nspace_items[i].vp) { - vnode_lock_spin(nspace_items[i].vp); - nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT; - vnode_unlock(nspace_items[i].vp); - } + token = ((uint32_t *)data)[0]; + val = ((uint32_t *)data)[1]; - nspace_items[i].vp = NULL; - nspace_items[i].arg = NULL; - nspace_items[i].vid = 0; - nspace_items[i].token = val; - nspace_items[i].flags &= ~NSPACE_ITEM_PROCESSING; - nspace_items[i].flags |= NSPACE_ITEM_CANCELLED; + lck_mtx_lock(&nspace_handler_lock); - wakeup((caddr_t)&(nspace_items[i].vp)); + for (i = 0; i < MAX_NSPACE_ITEMS; i++) { + if (nspace_items[i].token == token) { + break; /* exit for loop, not case stmt */ } - - lck_mtx_unlock(&nspace_handler_lock); } - break; - case FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME: { - if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { - break; + if (i >= MAX_NSPACE_ITEMS) { + printf("nspace-handler-cancel: did not find token %u\n", token); + error = ENOENT; + } else { + if (nspace_items[i].vp) { + vnode_lock_spin(nspace_items[i].vp); + nspace_items[i].vp->v_flag &= ~VNEEDSSNAPSHOT; + vnode_unlock(nspace_items[i].vp); } - // we explicitly do not do the namespace_handler_proc check here + nspace_items[i].vp = NULL; + nspace_items[i].arg = NULL; + nspace_items[i].vid = 0; + nspace_items[i].token = val; + nspace_items[i].flags &= ~NSPACE_ITEM_PROCESSING; + nspace_items[i].flags |= NSPACE_ITEM_CANCELLED; - lck_mtx_lock(&nspace_handler_lock); - snapshot_timestamp = ((uint32_t *)data)[0]; - wakeup(&nspace_item_idx); - lck_mtx_unlock(&nspace_handler_lock); - printf("nspace-handler-set-snapshot-time: %d\n", (int)snapshot_timestamp); + wakeup((caddr_t)&(nspace_items[i].vp)); + } + lck_mtx_unlock(&nspace_handler_lock); + } + break; + + case FSIOC_NAMESPACE_HANDLER_SET_SNAPSHOT_TIME: { + if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { + break; } - break; - case FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS: - { - if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { - break; - } + // we explicitly do not do the namespace_handler_proc check here - lck_mtx_lock(&nspace_handler_lock); - nspace_allow_virtual_devs = ((uint32_t *)data)[0]; - lck_mtx_unlock(&nspace_handler_lock); - printf("nspace-snapshot-handler will%s allow events on disk-images\n", - nspace_allow_virtual_devs ? "" : " NOT"); - error = 0; + lck_mtx_lock(&nspace_handler_lock); + snapshot_timestamp = ((uint32_t *)data)[0]; + wakeup(&nspace_item_idx); + lck_mtx_unlock(&nspace_handler_lock); + printf("nspace-handler-set-snapshot-time: %d\n", (int)snapshot_timestamp); + } + break; + case FSIOC_NAMESPACE_ALLOW_DMG_SNAPSHOT_EVENTS: + { + if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { + break; } - break; - case FSIOC_SET_FSTYPENAME_OVERRIDE: - { - if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { - break; - } - if (vp->v_mount) { - mount_lock(vp->v_mount); - if (data[0] != 0) { - strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN); - vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE; - if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) { - vp->v_mount->mnt_kern_flag |= MNTK_EXTENDED_SECURITY; - vp->v_mount->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE; - } - } else { - if (strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) { - vp->v_mount->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY; - } - vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE; - vp->v_mount->fstypename_override[0] = '\0'; + lck_mtx_lock(&nspace_handler_lock); + nspace_allow_virtual_devs = ((uint32_t *)data)[0]; + lck_mtx_unlock(&nspace_handler_lock); + printf("nspace-snapshot-handler will%s allow events on disk-images\n", + nspace_allow_virtual_devs ? "" : " NOT"); + error = 0; + } + break; + + case FSIOC_SET_FSTYPENAME_OVERRIDE: + { + if ((error = suser(kauth_cred_get(), &(current_proc()->p_acflag)))) { + break; + } + if (vp->v_mount) { + mount_lock(vp->v_mount); + if (data[0] != 0) { + strlcpy(&vp->v_mount->fstypename_override[0], data, MFSTYPENAMELEN); + vp->v_mount->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE; + if (vfs_isrdonly(vp->v_mount) && strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) { + vp->v_mount->mnt_kern_flag |= MNTK_EXTENDED_SECURITY; + vp->v_mount->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE; } - mount_unlock(vp->v_mount); + } else { + if (strcmp(vp->v_mount->fstypename_override, "mtmfs") == 0) { + vp->v_mount->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY; + } + vp->v_mount->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE; + vp->v_mount->fstypename_override[0] = '\0'; } + mount_unlock(vp->v_mount); } - break; + } + break; - case DISK_CONDITIONER_IOC_GET: { - error = disk_conditioner_get_info(vp->v_mount, (disk_conditioner_info *)data); - } - break; + case DISK_CONDITIONER_IOC_GET: { + error = disk_conditioner_get_info(vp->v_mount, (disk_conditioner_info *)data); + } + break; - case DISK_CONDITIONER_IOC_SET: { - error = disk_conditioner_set_info(vp->v_mount, (disk_conditioner_info *)data); - } - break; + case DISK_CONDITIONER_IOC_SET: { + error = disk_conditioner_set_info(vp->v_mount, (disk_conditioner_info *)data); + } + break; - default: { - /* other, known commands shouldn't be passed down here */ - switch (cmd) { - case F_PUNCHHOLE: - case F_TRIM_ACTIVE_FILE: - case F_RDADVISE: - case F_TRANSCODEKEY: - case F_GETPROTECTIONLEVEL: - case F_GETDEFAULTPROTLEVEL: - case F_MAKECOMPRESSED: - case F_SET_GREEDY_MODE: - case F_SETSTATICCONTENT: - case F_SETIOTYPE: - case F_SETBACKINGSTORE: - case F_GETPATH_MTMINFO: - case APFSIOC_REVERT_TO_SNAPSHOT: - case FSIOC_FIOSEEKHOLE: - case FSIOC_FIOSEEKDATA: - case HFS_GET_BOOT_INFO: - case HFS_SET_BOOT_INFO: - case FIOPINSWAP: - case F_CHKCLEAN: - case F_FULLFSYNC: - case F_BARRIERFSYNC: - case F_FREEZE_FS: - case F_THAW_FS: - error = EINVAL; - goto outdrop; - } - /* Invoke the filesystem-specific code */ - error = VNOP_IOCTL(vp, cmd, data, options, ctx); + case FSIOC_FD_ONLY_OPEN_ONCE: { + if (vnode_usecount(vp) > 1) { + error = EBUSY; + } else { + error = 0; } - + } + break; + + default: { + /* other, known commands shouldn't be passed down here */ + switch (cmd) { + case F_PUNCHHOLE: + case F_TRIM_ACTIVE_FILE: + case F_RDADVISE: + case F_TRANSCODEKEY: + case F_GETPROTECTIONLEVEL: + case F_GETDEFAULTPROTLEVEL: + case F_MAKECOMPRESSED: + case F_SET_GREEDY_MODE: + case F_SETSTATICCONTENT: + case F_SETIOTYPE: + case F_SETBACKINGSTORE: + case F_GETPATH_MTMINFO: + case APFSIOC_REVERT_TO_SNAPSHOT: + case FSIOC_FIOSEEKHOLE: + case FSIOC_FIOSEEKDATA: + case HFS_GET_BOOT_INFO: + case HFS_SET_BOOT_INFO: + case FIOPINSWAP: + case F_CHKCLEAN: + case F_FULLFSYNC: + case F_BARRIERFSYNC: + case F_FREEZE_FS: + case F_THAW_FS: + error = EINVAL; + goto outdrop; + } + /* Invoke the filesystem-specific code */ + error = VNOP_IOCTL(vp, cmd, data, options, ctx); + } } /* end switch stmt */ /* * if no errors, copy any data to user. Size was * already set and checked above. */ - if (error == 0 && (cmd & IOC_OUT) && size) + if (error == 0 && (cmd & IOC_OUT) && size) { error = copyout(data, udata, size); + } outdrop: if (memp) { @@ -10603,7 +10969,7 @@ outdrop: /* ARGSUSED */ int -fsctl (proc_t p, struct fsctl_args *uap, __unused int32_t *retval) +fsctl(proc_t p, struct fsctl_args *uap, __unused int32_t *retval) { int error; struct nameidata nd; @@ -10615,10 +10981,23 @@ fsctl (proc_t p, struct fsctl_args *uap, __unused int32_t *retval) AUDIT_ARG(value32, uap->options); /* Get the vnode for the file we are getting info on: */ nameiflags = 0; - if ((uap->options & FSOPT_NOFOLLOW) == 0) nameiflags |= FOLLOW; + // + // if we come through fsctl() then the file is by definition not open. + // therefore for the FSIOC_FD_ONLY_OPEN_ONCE selector we return an error + // lest the caller mistakenly thinks the only open is their own (but in + // reality it's someone elses). + // + if (uap->cmd == FSIOC_FD_ONLY_OPEN_ONCE) { + return EINVAL; + } + if ((uap->options & FSOPT_NOFOLLOW) == 0) { + nameiflags |= FOLLOW; + } NDINIT(&nd, LOOKUP, OP_FSCTL, nameiflags | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); - if ((error = namei(&nd))) goto done; + UIO_USERSPACE, uap->path, ctx); + if ((error = namei(&nd))) { + goto done; + } vp = nd.ni_vp; nameidone(&nd); @@ -10632,13 +11011,14 @@ fsctl (proc_t p, struct fsctl_args *uap, __unused int32_t *retval) error = fsctl_internal(p, &vp, uap->cmd, (user_addr_t)uap->data, uap->options, ctx); done: - if (vp) + if (vp) { vnode_put(vp); + } return error; } /* ARGSUSED */ int -ffsctl (proc_t p, struct ffsctl_args *uap, __unused int32_t *retval) +ffsctl(proc_t p, struct ffsctl_args *uap, __unused int32_t *retval) { int error; vnode_t vp = NULL; @@ -10650,8 +11030,9 @@ ffsctl (proc_t p, struct ffsctl_args *uap, __unused int32_t *retval) AUDIT_ARG(value32, uap->options); /* Get the vnode for the file we are getting info on: */ - if ((error = file_vnode(uap->fd, &vp))) + if ((error = file_vnode(uap->fd, &vp))) { return error; + } fd = uap->fd; if ((error = vnode_getwithref(vp))) { file_drop(fd); @@ -10687,7 +11068,7 @@ getxattr(proc_t p, struct getxattr_args *uap, user_ssize_t *retval) { vnode_t vp; struct nameidata nd; - char attrname[XATTR_MAXNAMELEN+1]; + char attrname[XATTR_MAXNAMELEN + 1]; vfs_context_t ctx = vfs_context_current(); uio_t auio = NULL; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; @@ -10695,15 +11076,16 @@ getxattr(proc_t p, struct getxattr_args *uap, user_ssize_t *retval) size_t namelen; u_int32_t nameiflags; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW; NDINIT(&nd, LOOKUP, OP_GETXATTR, nameiflags, spacetype, uap->path, ctx); if ((error = namei(&nd))) { - return (error); + return error; } vp = nd.ni_vp; nameidone(&nd); @@ -10739,15 +11121,17 @@ getxattr(proc_t p, struct getxattr_args *uap, user_ssize_t *retval) * U64 running on K64 will yield -1 (64 bits wide) * U32/U64 running on K32 will yield -1 (32 bits wide) */ - if (uap->size == 0xffffffff || uap->size == (size_t)-1) + if (uap->size == 0xffffffff || uap->size == (size_t)-1) { goto no_uio; + } if (uap->value) { - if (uap->size > (size_t)XATTR_MAXSIZE) + if (uap->size > (size_t)XATTR_MAXSIZE) { uap->size = XATTR_MAXSIZE; + } auio = uio_createwithbuffer(1, uap->position, spacetype, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->value, uap->size); } no_uio: @@ -10761,7 +11145,7 @@ out: *retval = (user_ssize_t)attrsize; } - return (error); + return error; } /* @@ -10771,23 +11155,24 @@ int fgetxattr(proc_t p, struct fgetxattr_args *uap, user_ssize_t *retval) { vnode_t vp; - char attrname[XATTR_MAXNAMELEN+1]; + char attrname[XATTR_MAXNAMELEN + 1]; uio_t auio = NULL; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; size_t attrsize = 0; size_t namelen; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } - if ( (error = file_vnode(uap->fd, &vp)) ) { - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); if (error != 0) { @@ -10799,7 +11184,7 @@ fgetxattr(proc_t p, struct fgetxattr_args *uap, user_ssize_t *retval) } if (uap->value && uap->size > 0) { auio = uio_createwithbuffer(1, uap->position, spacetype, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->value, uap->size); } @@ -10813,7 +11198,7 @@ out: } else { *retval = (user_ssize_t)attrsize; } - return (error); + return error; } /* @@ -10824,43 +11209,45 @@ setxattr(proc_t p, struct setxattr_args *uap, int *retval) { vnode_t vp; struct nameidata nd; - char attrname[XATTR_MAXNAMELEN+1]; + char attrname[XATTR_MAXNAMELEN + 1]; vfs_context_t ctx = vfs_context_current(); uio_t auio = NULL; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; size_t namelen; u_int32_t nameiflags; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); if (error != 0) { if (error == EPERM) { /* if the string won't fit in attrname, copyinstr emits EPERM */ - return (ENAMETOOLONG); + return ENAMETOOLONG; } /* Otherwise return the default error from copyinstr to detect ERANGE, etc */ return error; } - if (xattr_protected(attrname)) - return(EPERM); + if (xattr_protected(attrname)) { + return EPERM; + } if (uap->size != 0 && uap->value == 0) { - return (EINVAL); + return EINVAL; } nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW; NDINIT(&nd, LOOKUP, OP_SETXATTR, nameiflags, spacetype, uap->path, ctx); if ((error = namei(&nd))) { - return (error); + return error; } vp = nd.ni_vp; nameidone(&nd); auio = uio_createwithbuffer(1, uap->position, spacetype, UIO_WRITE, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->value, uap->size); error = vn_setxattr(vp, attrname, auio, uap->options, ctx); @@ -10873,7 +11260,7 @@ setxattr(proc_t p, struct setxattr_args *uap, int *retval) #endif vnode_put(vp); *retval = 0; - return (error); + return error; } /* @@ -10883,42 +11270,44 @@ int fsetxattr(proc_t p, struct fsetxattr_args *uap, int *retval) { vnode_t vp; - char attrname[XATTR_MAXNAMELEN+1]; + char attrname[XATTR_MAXNAMELEN + 1]; uio_t auio = NULL; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; size_t namelen; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; #if CONFIG_FSE vfs_context_t ctx = vfs_context_current(); #endif - if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); if (error != 0) { if (error == EPERM) { /* if the string won't fit in attrname, copyinstr emits EPERM */ - return (ENAMETOOLONG); + return ENAMETOOLONG; } /* Otherwise return the default error from copyinstr to detect ERANGE, etc */ return error; } - if (xattr_protected(attrname)) - return(EPERM); + if (xattr_protected(attrname)) { + return EPERM; + } if (uap->size != 0 && uap->value == 0) { - return (EINVAL); + return EINVAL; } - if ( (error = file_vnode(uap->fd, &vp)) ) { - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } auio = uio_createwithbuffer(1, uap->position, spacetype, UIO_WRITE, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->value, uap->size); error = vn_setxattr(vp, attrname, auio, uap->options, vfs_context_current()); @@ -10932,7 +11321,7 @@ fsetxattr(proc_t p, struct fsetxattr_args *uap, int *retval) vnode_put(vp); file_drop(uap->fd); *retval = 0; - return (error); + return error; } /* @@ -10944,26 +11333,28 @@ removexattr(proc_t p, struct removexattr_args *uap, int *retval) { vnode_t vp; struct nameidata nd; - char attrname[XATTR_MAXNAMELEN+1]; + char attrname[XATTR_MAXNAMELEN + 1]; int spacetype = IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; vfs_context_t ctx = vfs_context_current(); size_t namelen; u_int32_t nameiflags; int error; - if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); if (error != 0) { - return (error); + return error; + } + if (xattr_protected(attrname)) { + return EPERM; } - if (xattr_protected(attrname)) - return(EPERM); nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW; NDINIT(&nd, LOOKUP, OP_REMOVEXATTR, nameiflags, spacetype, uap->path, ctx); if ((error = namei(&nd))) { - return (error); + return error; } vp = nd.ni_vp; nameidone(&nd); @@ -10978,7 +11369,7 @@ removexattr(proc_t p, struct removexattr_args *uap, int *retval) #endif vnode_put(vp); *retval = 0; - return (error); + return error; } /* @@ -10989,28 +11380,30 @@ int fremovexattr(__unused proc_t p, struct fremovexattr_args *uap, int *retval) { vnode_t vp; - char attrname[XATTR_MAXNAMELEN+1]; + char attrname[XATTR_MAXNAMELEN + 1]; size_t namelen; int error; #if CONFIG_FSE vfs_context_t ctx = vfs_context_current(); #endif - if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } error = copyinstr(uap->attrname, attrname, sizeof(attrname), &namelen); if (error != 0) { - return (error); + return error; } - if (xattr_protected(attrname)) - return(EPERM); - if ( (error = file_vnode(uap->fd, &vp)) ) { - return (error); + if (xattr_protected(attrname)) { + return EPERM; } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = file_vnode(uap->fd, &vp))) { + return error; + } + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } error = vn_removexattr(vp, attrname, uap->options, vfs_context_current()); @@ -11024,7 +11417,7 @@ fremovexattr(__unused proc_t p, struct fremovexattr_args *uap, int *retval) vnode_put(vp); file_drop(uap->fd); *retval = 0; - return (error); + return error; } /* @@ -11042,21 +11435,22 @@ listxattr(proc_t p, struct listxattr_args *uap, user_ssize_t *retval) size_t attrsize = 0; u_int32_t nameiflags; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } nameiflags = (uap->options & XATTR_NOFOLLOW) ? 0 : FOLLOW; NDINIT(&nd, LOOKUP, OP_LISTXATTR, nameiflags, spacetype, uap->path, ctx); if ((error = namei(&nd))) { - return (error); + return error; } vp = nd.ni_vp; nameidone(&nd); if (uap->namebuf != 0 && uap->bufsize > 0) { auio = uio_createwithbuffer(1, 0, spacetype, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->namebuf, uap->bufsize); } @@ -11068,7 +11462,7 @@ listxattr(proc_t p, struct listxattr_args *uap, user_ssize_t *retval) } else { *retval = (user_ssize_t)attrsize; } - return (error); + return error; } /* @@ -11083,21 +11477,22 @@ flistxattr(proc_t p, struct flistxattr_args *uap, user_ssize_t *retval) int spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; size_t attrsize = 0; int error; - char uio_buf[ UIO_SIZEOF(1) ]; + char uio_buf[UIO_SIZEOF(1)]; - if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) - return (EINVAL); + if (uap->options & (XATTR_NOFOLLOW | XATTR_NOSECURITY | XATTR_NODEFAULT)) { + return EINVAL; + } - if ( (error = file_vnode(uap->fd, &vp)) ) { - return (error); + if ((error = file_vnode(uap->fd, &vp))) { + return error; } - if ( (error = vnode_getwithref(vp)) ) { + if ((error = vnode_getwithref(vp))) { file_drop(uap->fd); - return(error); + return error; } if (uap->namebuf != 0 && uap->bufsize > 0) { auio = uio_createwithbuffer(1, 0, spacetype, - UIO_READ, &uio_buf[0], sizeof(uio_buf)); + UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, uap->namebuf, uap->bufsize); } @@ -11110,10 +11505,11 @@ flistxattr(proc_t p, struct flistxattr_args *uap, user_ssize_t *retval) } else { *retval = (user_ssize_t)attrsize; } - return (error); + return error; } -static int fsgetpath_internal( +static int +fsgetpath_internal( vfs_context_t ctx, int volfs_id, uint64_t objid, vm_size_t bufsize, caddr_t buf, int *pathlen) { @@ -11126,11 +11522,11 @@ static int fsgetpath_internal( unsigned int retries = 0x10; if (bufsize > PAGE_SIZE) { - return (EINVAL); + return EINVAL; } if (buf == NULL) { - return (ENOMEM); + return ENOMEM; } retry: @@ -11155,8 +11551,9 @@ unionget: struct mount *tmp = mp; mp = vnode_mount(tmp->mnt_vnodecovered); vfs_unbusy(tmp); - if (vfs_busy(mp, LK_NOWAIT) == 0) + if (vfs_busy(mp, LK_NOWAIT) == 0) { goto unionget; + } } else { vfs_unbusy(mp); } @@ -11183,8 +11580,9 @@ unionget: /* there was a race building the path, try a few more times */ if (error == EAGAIN) { --retries; - if (retries > 0) + if (retries > 0) { goto retry; + } error = ENOENT; } @@ -11199,7 +11597,7 @@ unionget: dbg_namelen = (int)sizeof(dbg_parms); - if (length < dbg_namelen) { + if (length < dbg_namelen) { memcpy((char *)dbg_parms, buf, length); memset((char *)dbg_parms + length, 0, dbg_namelen - length); @@ -11209,13 +11607,13 @@ unionget: } kdebug_vfs_lookup(dbg_parms, dbg_namelen, (void *)vp, - KDBG_VFS_LOOKUP_FLAG_LOOKUP); + KDBG_VFS_LOOKUP_FLAG_LOOKUP); } *pathlen = (user_ssize_t)length; /* may be superseded by error */ out: - return (error); + return error; } /* @@ -11231,18 +11629,18 @@ fsgetpath(__unused proc_t p, struct fsgetpath_args *uap, user_ssize_t *retval) int error; if ((error = copyin(uap->fsid, (caddr_t)&fsid, sizeof(fsid)))) { - return (error); + return error; } AUDIT_ARG(value32, fsid.val[0]); AUDIT_ARG(value64, uap->objid); /* Restrict output buffer size for now. */ if (uap->bufsize > PAGE_SIZE) { - return (EINVAL); + return EINVAL; } MALLOC(realpath, char *, uap->bufsize, M_TEMP, M_WAITOK | M_ZERO); if (realpath == NULL) { - return (ENOMEM); + return ENOMEM; } error = fsgetpath_internal( @@ -11260,7 +11658,7 @@ out: if (realpath) { FREE(realpath, M_TEMP); } - return (error); + return error; } /* @@ -11275,8 +11673,8 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, user_addr_t bufp, int *sizep, boolean_t is_64_bit, boolean_t partial_copy) { - int error; - int my_size, copy_size; + int error; + int my_size, copy_size; if (is_64_bit) { struct user64_statfs sfs; @@ -11306,8 +11704,7 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, copy_size -= (sizeof(sfs.f_reserved3) + sizeof(sfs.f_reserved4)); } error = copyout((caddr_t)&sfs, bufp, copy_size); - } - else { + } else { struct user32_statfs sfs; my_size = copy_size = sizeof(sfs); @@ -11323,19 +11720,19 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, * to reflect the filesystem size as best we can. */ if ((sfsp->f_blocks > INT_MAX) - /* Hack for 4061702 . I think the real fix is for Carbon to - * look for some volume capability and not depend on hidden - * semantics agreed between a FS and carbon. - * f_blocks, f_bfree, and f_bavail set to -1 is the trigger - * for Carbon to set bNoVolumeSizes volume attribute. - * Without this the webdavfs files cannot be copied onto - * disk as they look huge. This change should not affect - * XSAN as they should not setting these to -1.. - */ - && (sfsp->f_blocks != 0xffffffffffffffffULL) - && (sfsp->f_bfree != 0xffffffffffffffffULL) - && (sfsp->f_bavail != 0xffffffffffffffffULL)) { - int shift; + /* Hack for 4061702 . I think the real fix is for Carbon to + * look for some volume capability and not depend on hidden + * semantics agreed between a FS and carbon. + * f_blocks, f_bfree, and f_bavail set to -1 is the trigger + * for Carbon to set bNoVolumeSizes volume attribute. + * Without this the webdavfs files cannot be copied onto + * disk as they look huge. This change should not affect + * XSAN as they should not setting these to -1.. + */ + && (sfsp->f_blocks != 0xffffffffffffffffULL) + && (sfsp->f_bfree != 0xffffffffffffffffULL) + && (sfsp->f_bavail != 0xffffffffffffffffULL)) { + int shift; /* * Work out how far we have to shift the block count down to make it fit. @@ -11347,12 +11744,14 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, * being smaller than f_bsize. */ for (shift = 0; shift < 32; shift++) { - if ((sfsp->f_blocks >> shift) <= INT_MAX) + if ((sfsp->f_blocks >> shift) <= INT_MAX) { break; - if ((sfsp->f_bsize << (shift + 1)) > INT_MAX) + } + if ((sfsp->f_bsize << (shift + 1)) > INT_MAX) { break; + } } -#define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s))) +#define __SHIFT_OR_CLIP(x, s) ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s))) sfs.f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sfsp->f_blocks, shift); sfs.f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sfsp->f_bfree, shift); sfs.f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sfsp->f_bavail, shift); @@ -11388,13 +11787,14 @@ munge_statfs(struct mount *mp, struct vfsstatfs *sfsp, if (sizep != NULL) { *sizep = my_size; } - return(error); + return error; } /* * copy stat structure into user_stat structure. */ -void munge_user64_stat(struct stat *sbp, struct user64_stat *usbp) +void +munge_user64_stat(struct stat *sbp, struct user64_stat *usbp) { bzero(usbp, sizeof(*usbp)); @@ -11430,7 +11830,8 @@ void munge_user64_stat(struct stat *sbp, struct user64_stat *usbp) usbp->st_qspare[1] = sbp->st_qspare[1]; } -void munge_user32_stat(struct stat *sbp, struct user32_stat *usbp) +void +munge_user32_stat(struct stat *sbp, struct user32_stat *usbp) { bzero(usbp, sizeof(*usbp)); @@ -11469,7 +11870,8 @@ void munge_user32_stat(struct stat *sbp, struct user32_stat *usbp) /* * copy stat64 structure into user_stat64 structure. */ -void munge_user64_stat64(struct stat64 *sbp, struct user64_stat64 *usbp) +void +munge_user64_stat64(struct stat64 *sbp, struct user64_stat64 *usbp) { bzero(usbp, sizeof(*usbp)); @@ -11509,7 +11911,8 @@ void munge_user64_stat64(struct stat64 *sbp, struct user64_stat64 *usbp) usbp->st_qspare[1] = sbp->st_qspare[1]; } -void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp) +void +munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp) { bzero(usbp, sizeof(*usbp)); @@ -11552,14 +11955,16 @@ void munge_user32_stat64(struct stat64 *sbp, struct user32_stat64 *usbp) /* * Purge buffer cache for simulating cold starts */ -static int vnode_purge_callback(struct vnode *vp, __unused void *cargs) +static int +vnode_purge_callback(struct vnode *vp, __unused void *cargs) { ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL /* off_t *resid_off */, UBC_PUSHALL | UBC_INVALIDATE); return VNODE_RETURNED; } -static int vfs_purge_callback(mount_t mp, __unused void * arg) +static int +vfs_purge_callback(mount_t mp, __unused void * arg) { vnode_iterate(mp, VNODE_WAIT | VNODE_ITERATE_ALL, vnode_purge_callback, NULL); @@ -11569,10 +11974,11 @@ static int vfs_purge_callback(mount_t mp, __unused void * arg) int vfs_purge(__unused struct proc *p, __unused struct vfs_purge_args *uap, __unused int32_t *retval) { - if (!kauth_cred_issuser(kauth_cred_get())) + if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; + } - vfs_iterate(0/* flags */, vfs_purge_callback, NULL); + vfs_iterate(0 /* flags */, vfs_purge_callback, NULL); return 0; } @@ -11585,7 +11991,7 @@ vfs_purge(__unused struct proc *p, __unused struct vfs_purge_args *uap, __unused int vnode_get_snapdir(vnode_t rvp, vnode_t *sdvpp, vfs_context_t ctx) { - return (VFS_VGET_SNAPDIR(vnode_mount(rvp), sdvpp, ctx)); + return VFS_VGET_SNAPDIR(vnode_mount(rvp), sdvpp, ctx); } /* @@ -11617,8 +12023,9 @@ vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp, *rvpp = NULLVP; error = vnode_getfromfd(ctx, dirfd, rvpp); - if (error) - return (error); + if (error) { + return error; + } if (!vnode_isvroot(*rvpp)) { error = EINVAL; @@ -11639,13 +12046,15 @@ vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp, } error = vnode_get_snapdir(*rvpp, sdvpp, ctx); - if (error) + if (error) { goto out; + } MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); - if (error) + if (error) { goto out1; + } /* * Some sanity checks- name can't be empty, "." or ".." or have slashes. @@ -11656,7 +12065,9 @@ vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp, error = EINVAL; goto out1; } - for (i = 0; i < (int)name_len && name_buf[i] != '/'; i++); + for (i = 0; i < (int)name_len && name_buf[i] != '/'; i++) { + ; + } if (i < (int)name_len) { error = EINVAL; goto out1; @@ -11670,8 +12081,9 @@ vnode_get_snapshot(int dirfd, vnode_t *rvpp, vnode_t *sdvpp, error = mac_mount_check_snapshot_delete(ctx, vnode_mount(*rvpp), name_buf); } - if (error) + if (error) { goto out1; + } #endif /* Check if the snapshot already exists ... */ @@ -11693,7 +12105,7 @@ out: *rvpp = NULLVP; } } - return (error); + return error; } /* @@ -11721,8 +12133,9 @@ snapshot_create(int dirfd, user_addr_t name, __unused uint32_t flags, error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, CREATE, OP_LINK, ctx); - if (error) - return (error); + if (error) { + return error; + } if (namend.ni_vp) { vnode_put(namend.ni_vp); @@ -11737,14 +12150,15 @@ snapshot_create(int dirfd, user_addr_t name, __unused uint32_t flags, error = vn_create(snapdvp, &vp, &namend, &va, VN_CREATE_NOAUTH | VN_CREATE_NOINHERIT, 0, NULL, ctx); - if (!error && vp) + if (!error && vp) { vnode_put(vp); + } } nameidone(&namend); vnode_put(snapdvp); vnode_put(rvp); - return (error); + return error; } /* @@ -11763,8 +12177,9 @@ snapshot_delete(int dirfd, user_addr_t name, __unused uint32_t flags, error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, DELETE, OP_UNLINK, ctx); - if (error) + if (error) { goto out; + } error = VNOP_REMOVE(snapdvp, namend.ni_vp, &namend.ni_cnd, VNODE_REMOVE_SKIP_NAMESPACE_EVENT, ctx); @@ -11774,7 +12189,7 @@ snapshot_delete(int dirfd, user_addr_t name, __unused uint32_t flags, vnode_put(snapdvp); vnode_put(rvp); out: - return (error); + return error; } /* @@ -11784,86 +12199,86 @@ out: */ static int snapshot_revert(int dirfd, user_addr_t name, __unused uint32_t flags, - vfs_context_t ctx) -{ - int error; - vnode_t rvp; - mount_t mp; - struct fs_snapshot_revert_args revert_data; - struct componentname cnp; - caddr_t name_buf; - size_t name_len; - - error = vnode_getfromfd(ctx, dirfd, &rvp); - if (error) { - return (error); - } - mp = vnode_mount(rvp); - - MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); - error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); - if (error) { - FREE(name_buf, M_TEMP); - vnode_put(rvp); - return (error); - } + vfs_context_t ctx) +{ + int error; + vnode_t rvp; + mount_t mp; + struct fs_snapshot_revert_args revert_data; + struct componentname cnp; + caddr_t name_buf; + size_t name_len; + + error = vnode_getfromfd(ctx, dirfd, &rvp); + if (error) { + return error; + } + mp = vnode_mount(rvp); + + MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); + error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); + if (error) { + FREE(name_buf, M_TEMP); + vnode_put(rvp); + return error; + } #if CONFIG_MACF - error = mac_mount_check_snapshot_revert(ctx, mp, name_buf); - if (error) { - FREE(name_buf, M_TEMP); - vnode_put(rvp); - return (error); - } -#endif - - /* - * Grab mount_iterref so that we can release the vnode, - * since VFSIOC_REVERT_SNAPSHOT could conceivably cause a sync. - */ - error = mount_iterref (mp, 0); - vnode_put(rvp); - if (error) { - FREE(name_buf, M_TEMP); - return (error); - } - - memset(&cnp, 0, sizeof(cnp)); - cnp.cn_pnbuf = (char *)name_buf; - cnp.cn_nameiop = LOOKUP; - cnp.cn_flags = ISLASTCN | HASBUF; - cnp.cn_pnlen = MAXPATHLEN; - cnp.cn_nameptr = cnp.cn_pnbuf; - cnp.cn_namelen = (int)name_len; - revert_data.sr_cnp = &cnp; - - error = VFS_IOCTL(mp, VFSIOC_REVERT_SNAPSHOT, (caddr_t)&revert_data, 0, ctx); - mount_iterdrop(mp); - FREE(name_buf, M_TEMP); - - if (error) { - /* If there was any error, try again using VNOP_IOCTL */ - - vnode_t snapdvp; - struct nameidata namend; - - error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, LOOKUP, - OP_LOOKUP, ctx); - if (error) { - return (error); - } - - - error = VNOP_IOCTL(namend.ni_vp, APFSIOC_REVERT_TO_SNAPSHOT, (caddr_t) NULL, - 0, ctx); - - vnode_put(namend.ni_vp); - nameidone(&namend); - vnode_put(snapdvp); - vnode_put(rvp); - } - - return (error); + error = mac_mount_check_snapshot_revert(ctx, mp, name_buf); + if (error) { + FREE(name_buf, M_TEMP); + vnode_put(rvp); + return error; + } +#endif + + /* + * Grab mount_iterref so that we can release the vnode, + * since VFSIOC_REVERT_SNAPSHOT could conceivably cause a sync. + */ + error = mount_iterref(mp, 0); + vnode_put(rvp); + if (error) { + FREE(name_buf, M_TEMP); + return error; + } + + memset(&cnp, 0, sizeof(cnp)); + cnp.cn_pnbuf = (char *)name_buf; + cnp.cn_nameiop = LOOKUP; + cnp.cn_flags = ISLASTCN | HASBUF; + cnp.cn_pnlen = MAXPATHLEN; + cnp.cn_nameptr = cnp.cn_pnbuf; + cnp.cn_namelen = (int)name_len; + revert_data.sr_cnp = &cnp; + + error = VFS_IOCTL(mp, VFSIOC_REVERT_SNAPSHOT, (caddr_t)&revert_data, 0, ctx); + mount_iterdrop(mp); + FREE(name_buf, M_TEMP); + + if (error) { + /* If there was any error, try again using VNOP_IOCTL */ + + vnode_t snapdvp; + struct nameidata namend; + + error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, &namend, LOOKUP, + OP_LOOKUP, ctx); + if (error) { + return error; + } + + + error = VNOP_IOCTL(namend.ni_vp, APFSIOC_REVERT_TO_SNAPSHOT, (caddr_t) NULL, + 0, ctx); + + vnode_put(namend.ni_vp); + nameidone(&namend); + vnode_put(snapdvp); + vnode_put(rvp); + } + + return error; } /* @@ -11896,14 +12311,16 @@ snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, old, fromnd, DELETE, OP_UNLINK, ctx); - if (error) + if (error) { goto out; + } fvp = fromnd->ni_vp; MALLOC(newname_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); error = copyinstr(new, newname_buf, MAXPATHLEN, &name_len); - if (error) + if (error) { goto out1; + } /* * Some sanity checks- new name can't be empty, "." or ".." or have @@ -11918,7 +12335,9 @@ snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, error = EINVAL; goto out1; } - for (i = 0; i < (int)name_len && newname_buf[i] != '/'; i++); + for (i = 0; i < (int)name_len && newname_buf[i] != '/'; i++) { + ; + } if (i < (int)name_len) { error = EINVAL; goto out1; @@ -11927,8 +12346,9 @@ snapshot_rename(int dirfd, user_addr_t old, user_addr_t new, #if CONFIG_MACF error = mac_mount_check_snapshot_create(ctx, vnode_mount(rvp), newname_buf); - if (error) + if (error) { goto out1; + } #endif NDINIT(tond, RENAME, OP_RENAME, USEDVP | NOCACHE | AUDITVNPATH2, @@ -11961,7 +12381,7 @@ out1: nameidone(fromnd); out: FREE(__rename_data, M_TEMP); - return (error); + return error; } /* @@ -11990,8 +12410,9 @@ snapshot_mount(int dirfd, user_addr_t name, user_addr_t directory, error = vnode_get_snapshot(dirfd, &rvp, &snapdvp, name, snapndp, LOOKUP, OP_LOOKUP, ctx); - if (error) + if (error) { goto out; + } snapvp = snapndp->ni_vp; if (!vnode_mount(rvp) || (vnode_mount(rvp) == dead_mountp)) { @@ -12003,8 +12424,9 @@ snapshot_mount(int dirfd, user_addr_t name, user_addr_t directory, NDINIT(dirndp, LOOKUP, OP_MOUNT, FOLLOW | AUDITVNPATH1 | WANTPARENT, UIO_USERSPACE, directory, ctx); error = namei(dirndp); - if (error) + if (error) { goto out1; + } vp = dirndp->ni_vp; pvp = dirndp->ni_dvp; @@ -12018,8 +12440,8 @@ snapshot_mount(int dirfd, user_addr_t name, user_addr_t directory, smnt_data.sm_mp = mp; smnt_data.sm_cnp = &snapndp->ni_cnd; error = mount_common(mp->mnt_vfsstat.f_fstypename, pvp, vp, - &dirndp->ni_cnd, CAST_USER_ADDR_T(&smnt_data), flags & MNT_DONTBROWSE, - KERNEL_MOUNT_SNAPSHOT, NULL, FALSE, ctx); + &dirndp->ni_cnd, CAST_USER_ADDR_T(&smnt_data), flags & MNT_DONTBROWSE, + KERNEL_MOUNT_SNAPSHOT, NULL, FALSE, ctx); } vnode_put(vp); @@ -12032,7 +12454,7 @@ out1: nameidone(snapndp); out: FREE(__snapshot_mount_data, M_TEMP); - return (error); + return error; } /* @@ -12042,58 +12464,58 @@ out: */ static int snapshot_root(int dirfd, user_addr_t name, __unused uint32_t flags, - vfs_context_t ctx) -{ - int error; - vnode_t rvp; - mount_t mp; - struct fs_snapshot_root_args root_data; - struct componentname cnp; - caddr_t name_buf; - size_t name_len; - - error = vnode_getfromfd(ctx, dirfd, &rvp); - if (error) { - return (error); - } - mp = vnode_mount(rvp); - - MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); - error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); - if (error) { - FREE(name_buf, M_TEMP); - vnode_put(rvp); - return (error); - } - - // XXX MAC checks ? - - /* - * Grab mount_iterref so that we can release the vnode, - * since VFSIOC_ROOT_SNAPSHOT could conceivably cause a sync. - */ - error = mount_iterref (mp, 0); - vnode_put(rvp); - if (error) { - FREE(name_buf, M_TEMP); - return (error); - } - - memset(&cnp, 0, sizeof(cnp)); - cnp.cn_pnbuf = (char *)name_buf; - cnp.cn_nameiop = LOOKUP; - cnp.cn_flags = ISLASTCN | HASBUF; - cnp.cn_pnlen = MAXPATHLEN; - cnp.cn_nameptr = cnp.cn_pnbuf; - cnp.cn_namelen = (int)name_len; - root_data.sr_cnp = &cnp; - - error = VFS_IOCTL(mp, VFSIOC_ROOT_SNAPSHOT, (caddr_t)&root_data, 0, ctx); - - mount_iterdrop(mp); - FREE(name_buf, M_TEMP); - - return (error); + vfs_context_t ctx) +{ + int error; + vnode_t rvp; + mount_t mp; + struct fs_snapshot_root_args root_data; + struct componentname cnp; + caddr_t name_buf; + size_t name_len; + + error = vnode_getfromfd(ctx, dirfd, &rvp); + if (error) { + return error; + } + mp = vnode_mount(rvp); + + MALLOC(name_buf, caddr_t, MAXPATHLEN, M_TEMP, M_WAITOK); + error = copyinstr(name, name_buf, MAXPATHLEN, &name_len); + if (error) { + FREE(name_buf, M_TEMP); + vnode_put(rvp); + return error; + } + + // XXX MAC checks ? + + /* + * Grab mount_iterref so that we can release the vnode, + * since VFSIOC_ROOT_SNAPSHOT could conceivably cause a sync. + */ + error = mount_iterref(mp, 0); + vnode_put(rvp); + if (error) { + FREE(name_buf, M_TEMP); + return error; + } + + memset(&cnp, 0, sizeof(cnp)); + cnp.cn_pnbuf = (char *)name_buf; + cnp.cn_nameiop = LOOKUP; + cnp.cn_flags = ISLASTCN | HASBUF; + cnp.cn_pnlen = MAXPATHLEN; + cnp.cn_nameptr = cnp.cn_pnbuf; + cnp.cn_namelen = (int)name_len; + root_data.sr_cnp = &cnp; + + error = VFS_IOCTL(mp, VFSIOC_ROOT_SNAPSHOT, (caddr_t)&root_data, 0, ctx); + + mount_iterdrop(mp); + FREE(name_buf, M_TEMP); + + return error; } /* @@ -12110,8 +12532,53 @@ fs_snapshot(__unused proc_t p, struct fs_snapshot_args *uap, AUDIT_ARG(value32, uap->op); error = priv_check_cred(vfs_context_ucred(ctx), PRIV_VFS_SNAPSHOT, 0); - if (error) - return (error); + if (error) { + return error; + } + + /* + * Enforce user authorization for snapshot modification operations + */ + if ((uap->op != SNAPSHOT_OP_MOUNT) && + (uap->op != SNAPSHOT_OP_ROOT)) { + vnode_t dvp = NULLVP; + vnode_t devvp = NULLVP; + mount_t mp; + + error = vnode_getfromfd(ctx, uap->dirfd, &dvp); + if (error) { + return error; + } + mp = vnode_mount(dvp); + devvp = mp->mnt_devvp; + + /* get an iocount on devvp */ + if (devvp == NULLVP) { + error = vnode_lookup(mp->mnt_vfsstat.f_mntfromname, 0, &devvp, ctx); + /* for mounts which arent block devices */ + if (error == ENOENT) { + error = ENXIO; + } + } else { + error = vnode_getwithref(devvp); + } + + if (error) { + vnode_put(dvp); + return error; + } + + if ((vfs_context_issuser(ctx) == 0) && + (vnode_authorize(devvp, NULL, KAUTH_VNODE_WRITE_DATA, ctx) != 0)) { + error = EPERM; + } + vnode_put(dvp); + vnode_put(devvp); + + if (error) { + return error; + } + } switch (uap->op) { case SNAPSHOT_OP_CREATE: @@ -12128,9 +12595,9 @@ fs_snapshot(__unused proc_t p, struct fs_snapshot_args *uap, error = snapshot_mount(uap->dirfd, uap->name1, uap->name2, uap->data, uap->flags, ctx); break; - case SNAPSHOT_OP_REVERT: - error = snapshot_revert(uap->dirfd, uap->name1, uap->flags, ctx); - break; + case SNAPSHOT_OP_REVERT: + error = snapshot_revert(uap->dirfd, uap->name1, uap->flags, ctx); + break; #if CONFIG_MNT_ROOTSNAP case SNAPSHOT_OP_ROOT: error = snapshot_root(uap->dirfd, uap->name1, uap->flags, ctx); @@ -12140,5 +12607,5 @@ fs_snapshot(__unused proc_t p, struct fs_snapshot_args *uap, error = ENOSYS; } - return (error); + return error; } diff --git a/bsd/vfs/vfs_utfconv.c b/bsd/vfs/vfs_utfconv.c index 1f014aacf..48f21532a 100644 --- a/bsd/vfs/vfs_utfconv.c +++ b/bsd/vfs/vfs_utfconv.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - - /* - Includes Unicode 3.2 decomposition code derived from Core Foundation + +/* + * Includes Unicode 3.2 decomposition code derived from Core Foundation */ #include @@ -48,7 +48,7 @@ * UTF-8 is the Unicode Transformation Format that serializes a Unicode * character as a sequence of one to four bytes. Only the shortest form * required to represent the significant Unicode bits is legal. - * + * * UTF-8 Multibyte Codes * * Bytes Bits Unicode Min Unicode Max UTF-8 Byte Sequence (binary) @@ -64,17 +64,17 @@ #define UNICODE_TO_UTF8_LEN(c) \ ((c) < 0x0080 ? 1 : ((c) < 0x0800 ? 2 : (((c) & 0xf800) == 0xd800 ? 2 : 3))) -#define UCS_ALT_NULL 0x2400 +#define UCS_ALT_NULL 0x2400 /* Surrogate Pair Constants */ -#define SP_HALF_SHIFT 10 -#define SP_HALF_BASE 0x0010000u -#define SP_HALF_MASK 0x3FFu +#define SP_HALF_SHIFT 10 +#define SP_HALF_BASE 0x0010000u +#define SP_HALF_MASK 0x3FFu -#define SP_HIGH_FIRST 0xD800u -#define SP_HIGH_LAST 0xDBFFu -#define SP_LOW_FIRST 0xDC00u -#define SP_LOW_LAST 0xDFFFu +#define SP_HIGH_FIRST 0xD800u +#define SP_HIGH_LAST 0xDBFFu +#define SP_LOW_FIRST 0xDC00u +#define SP_LOW_LAST 0xDFFFu #include "vfs_utfconvdata.h" @@ -92,18 +92,19 @@ unicode_combinable(u_int16_t character) const u_int8_t *bitmap = __CFUniCharCombiningBitmap; u_int8_t value; - if (character < 0x0300) - return (0); + if (character < 0x0300) { + return 0; + } value = bitmap[(character >> 8) & 0xFF]; if (value == 0xFF) { - return (1); + return 1; } else if (value) { bitmap = bitmap + ((value - 1) * 32) + 256; - return (bitmap[(character & 0xFF) / 8] & (1 << (character % 8)) ? 1 : 0); + return bitmap[(character & 0xFF) / 8] & (1 << (character % 8)) ? 1 : 0; } - return (0); + return 0; } /* @@ -112,22 +113,24 @@ unicode_combinable(u_int16_t character) * Similar to __CFUniCharIsDecomposableCharacter. */ int -unicode_decomposeable(u_int16_t character) { +unicode_decomposeable(u_int16_t character) +{ const u_int8_t *bitmap = __CFUniCharDecomposableBitmap; u_int8_t value; - - if (character < 0x00C0) - return (0); + + if (character < 0x00C0) { + return 0; + } value = bitmap[(character >> 8) & 0xFF]; if (value == 0xFF) { - return (1); + return 1; } else if (value) { bitmap = bitmap + ((value - 1) * 32) + 256; - return (bitmap[(character & 0xFF) / 8] & (1 << (character % 8)) ? 1 : 0); + return bitmap[(character & 0xFF) / 8] & (1 << (character % 8)) ? 1 : 0; } - return (0); + return 0; } @@ -137,7 +140,8 @@ unicode_decomposeable(u_int16_t character) { * Similar to CFUniCharGetCombiningPropertyForCharacter. */ static inline u_int8_t -get_combining_class(u_int16_t character) { +get_combining_class(u_int16_t character) +{ const u_int8_t *bitmap = __CFUniCharCombiningPropertyBitmap; u_int8_t value = bitmap[(character >> 8)]; @@ -146,7 +150,7 @@ get_combining_class(u_int16_t character) { bitmap = bitmap + (value * 256); return bitmap[character % 256]; } - return (0); + return 0; } @@ -162,13 +166,13 @@ static u_int16_t sfm_to_ucs(u_int16_t ucs_ch); char utf_extrabytes[32] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 2, 2, 3, -1 + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 2, 2, 3, -1 }; const char hexdigits[16] = { - '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; /* @@ -233,7 +237,7 @@ utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash, int fl len += UNICODE_TO_UTF8_LEN(ucs_ch); } - return (len); + return len; } @@ -264,7 +268,7 @@ utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash, int fl */ int utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, - size_t * utf8len, size_t buflen, u_int16_t altslash, int flags) + size_t * utf8len, size_t buflen, u_int16_t altslash, int flags) { u_int8_t * bufstart; u_int8_t * bufend; @@ -281,8 +285,9 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, bufstart = utf8p; bufend = bufstart + buflen; - if (nullterm) + if (nullterm) { --bufend; + } charcnt = ucslen / 2; while (charcnt-- > 0) { @@ -302,9 +307,9 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, /* Slash and NULL are not permitted */ if (ucs_ch == '/') { - if (altslash) + if (altslash) { ucs_ch = altslash; - else { + } else { ucs_ch = '_'; result = EINVAL; } @@ -316,9 +321,8 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, if (utf8p >= bufend) { result = ENAMETOOLONG; break; - } + } *utf8p++ = ucs_ch; - } else if (ucs_ch < 0x800) { if ((utf8p + 1) >= bufend) { result = ENAMETOOLONG; @@ -326,7 +330,6 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, } *utf8p++ = 0xc0 | (ucs_ch >> 6); *utf8p++ = 0x80 | (0x3f & ucs_ch); - } else { /* These chars never valid Unicode. */ if (ucs_ch == 0xFFFE || ucs_ch == 0xFFFF) { @@ -336,20 +339,20 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, /* Combine valid surrogate pairs */ if (ucs_ch >= SP_HIGH_FIRST && ucs_ch <= SP_HIGH_LAST - && charcnt > 0) { + && charcnt > 0) { u_int16_t ch2; u_int32_t pair; ch2 = swapbytes ? OSSwapInt16(*ucsp) : *ucsp; if (ch2 >= SP_LOW_FIRST && ch2 <= SP_LOW_LAST) { pair = ((ucs_ch - SP_HIGH_FIRST) << SP_HALF_SHIFT) - + (ch2 - SP_LOW_FIRST) + SP_HALF_BASE; + + (ch2 - SP_LOW_FIRST) + SP_HALF_BASE; if ((utf8p + 3) >= bufend) { result = ENAMETOOLONG; break; } --charcnt; - ++ucsp; + ++ucsp; *utf8p++ = 0xf0 | (pair >> 18); *utf8p++ = 0x80 | (0x3f & (pair >> 12)); *utf8p++ = 0x80 | (0x3f & (pair >> 6)); @@ -362,7 +365,7 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, if (utf8p >= bufend) { result = ENAMETOOLONG; break; - } + } *utf8p++ = ucs_ch; continue; } @@ -374,29 +377,31 @@ utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p, *utf8p++ = 0xe0 | (ucs_ch >> 12); *utf8p++ = 0x80 | (0x3f & (ucs_ch >> 6)); *utf8p++ = 0x80 | (0x3f & ucs_ch); - } + } } - + *utf8len = utf8p - bufstart; - if (nullterm) + if (nullterm) { *utf8p++ = '\0'; + } - return (result); + return result; } // Pushes a character taking account of combining character sequences -static void push(uint16_t ucs_ch, int *combcharcnt, uint16_t **ucsp) +static void +push(uint16_t ucs_ch, int *combcharcnt, uint16_t **ucsp) { /* * Make multiple combining character sequences canonical */ if (unicode_combinable(ucs_ch)) { - ++*combcharcnt; /* start tracking a run */ + ++*combcharcnt; /* start tracking a run */ } else if (*combcharcnt) { if (*combcharcnt > 1) { prioritysort(*ucsp - *combcharcnt, *combcharcnt); } - *combcharcnt = 0; /* start over */ + *combcharcnt = 0; /* start over */ } *(*ucsp)++ = ucs_ch; @@ -432,7 +437,7 @@ static void push(uint16_t ucs_ch, int *combcharcnt, uint16_t **ucsp) */ int utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, - size_t *ucslen, size_t buflen, u_int16_t altslash, int flags) + size_t *ucslen, size_t buflen, u_int16_t altslash, int flags) { u_int16_t* bufstart; u_int16_t* bufend; @@ -453,8 +458,9 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, bufend = (u_int16_t *)((u_int8_t *)ucsp + buflen); while (utf8len-- > 0 && (byte = *utf8p++) != '\0') { - if (ucsp >= bufend) + if (ucsp >= bufend) { goto toolong; + } /* check for ascii */ if (byte < 0x80) { @@ -472,63 +478,75 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, case 1: ch = byte; ch <<= 6; /* 1st byte */ byte = *utf8p++; /* 2nd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto escape2; + } ch += byte; ch -= 0x00003080UL; - if (ch < 0x0080) + if (ch < 0x0080) { goto escape2; + } ucs_ch = ch; - break; + break; case 2: ch = byte; ch <<= 6; /* 1st byte */ byte = *utf8p++; /* 2nd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto escape2; + } ch += byte; ch <<= 6; byte = *utf8p++; /* 3rd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto escape3; + } ch += byte; ch -= 0x000E2080UL; - if (ch < 0x0800) + if (ch < 0x0800) { goto escape3; + } if (ch >= 0xD800) { - if (ch <= 0xDFFF) + if (ch <= 0xDFFF) { goto escape3; - if (ch == 0xFFFE || ch == 0xFFFF) + } + if (ch == 0xFFFE || ch == 0xFFFF) { goto escape3; + } } ucs_ch = ch; break; case 3: ch = byte; ch <<= 6; /* 1st byte */ byte = *utf8p++; /* 2nd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto escape2; + } ch += byte; ch <<= 6; byte = *utf8p++; /* 3rd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto escape3; + } ch += byte; ch <<= 6; byte = *utf8p++; /* 4th byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto escape4; - ch += byte; + } + ch += byte; ch -= 0x03C82080UL + SP_HALF_BASE; ucs_ch = (ch >> SP_HALF_SHIFT) + SP_HIGH_FIRST; - if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) + if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) { goto escape4; + } push(ucs_ch, &combcharcnt, &ucsp); - if (ucsp >= bufend) + if (ucsp >= bufend) { goto toolong; + } ucs_ch = (ch & SP_HALF_MASK) + SP_LOW_FIRST; if (ucs_ch < SP_LOW_FIRST || ucs_ch > SP_LOW_LAST) { --ucsp; goto escape4; } *ucsp++ = ucs_ch; - continue; + continue; default: result = EINVAL; goto exit; @@ -541,8 +559,9 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, count = unicode_decompose(ucs_ch, sequence); for (i = 0; i < count; ++i) { - if (ucsp >= bufend) + if (ucsp >= bufend) { goto toolong; + } push(sequence[i], &combcharcnt, &ucsp); } @@ -561,16 +580,18 @@ utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp, } } } - if (ucs_ch == UCS_ALT_NULL) + if (ucs_ch == UCS_ALT_NULL) { ucs_ch = '\0'; + } } - if (ucs_ch == altslash) + if (ucs_ch == altslash) { ucs_ch = '/'; + } push(ucs_ch, &combcharcnt, &ucsp); continue; - /* + /* * Escape illegal UTF-8 into something legal. */ escape4: @@ -586,19 +607,21 @@ escape: result = EINVAL; goto exit; } - if (extrabytes > 0) + if (extrabytes > 0) { utf8len += extrabytes; + } byte = *(utf8p - 1); - if ((ucsp + 2) >= bufend) + if ((ucsp + 2) >= bufend) { goto toolong; + } /* Make a previous combining sequence canonical. */ if (combcharcnt > 1) { prioritysort(ucsp - combcharcnt, combcharcnt); } combcharcnt = 0; - + ucs_ch = '%'; *ucsp++ = ucs_ch; ucs_ch = hexdigits[byte >> 4]; @@ -624,7 +647,7 @@ escape: exit: *ucslen = (u_int8_t*)ucsp - (u_int8_t*)bufstart; - return (result); + return result; toolong: result = ENAMETOOLONG; @@ -644,76 +667,88 @@ utf8_validatestr(const u_int8_t* utf8p, size_t utf8len) size_t extrabytes; while (utf8len-- > 0 && (byte = *utf8p++) != '\0') { - if (byte < 0x80) + if (byte < 0x80) { continue; /* plain ascii */ - + } extrabytes = utf_extrabytes[byte >> 3]; - if (utf8len < extrabytes) + if (utf8len < extrabytes) { goto invalid; + } utf8len -= extrabytes; switch (extrabytes) { case 1: ch = byte; ch <<= 6; /* 1st byte */ byte = *utf8p++; /* 2nd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto invalid; + } ch += byte; ch -= 0x00003080UL; - if (ch < 0x0080) + if (ch < 0x0080) { goto invalid; + } break; case 2: ch = byte; ch <<= 6; /* 1st byte */ byte = *utf8p++; /* 2nd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto invalid; + } ch += byte; ch <<= 6; byte = *utf8p++; /* 3rd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto invalid; + } ch += byte; ch -= 0x000E2080UL; - if (ch < 0x0800) + if (ch < 0x0800) { goto invalid; + } if (ch >= 0xD800) { - if (ch <= 0xDFFF) + if (ch <= 0xDFFF) { goto invalid; - if (ch == 0xFFFE || ch == 0xFFFF) + } + if (ch == 0xFFFE || ch == 0xFFFF) { goto invalid; + } } break; case 3: ch = byte; ch <<= 6; /* 1st byte */ byte = *utf8p++; /* 2nd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto invalid; + } ch += byte; ch <<= 6; byte = *utf8p++; /* 3rd byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto invalid; + } ch += byte; ch <<= 6; byte = *utf8p++; /* 4th byte */ - if ((byte >> 6) != 2) + if ((byte >> 6) != 2) { goto invalid; + } ch += byte; ch -= 0x03C82080UL + SP_HALF_BASE; ucs_ch = (ch >> SP_HALF_SHIFT) + SP_HIGH_FIRST; - if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) + if (ucs_ch < SP_HIGH_FIRST || ucs_ch > SP_HIGH_LAST) { goto invalid; + } ucs_ch = (ch & SP_HALF_MASK) + SP_LOW_FIRST; - if (ucs_ch < SP_LOW_FIRST || ucs_ch > SP_LOW_LAST) + if (ucs_ch < SP_LOW_FIRST || ucs_ch > SP_LOW_LAST) { goto invalid; + } break; default: goto invalid; } - } - return (0); + return 0; invalid: - return (EINVAL); + return EINVAL; } /* @@ -724,7 +759,7 @@ invalid: * pointed to by outstr. The size of the output in bytes (not including * a NULL termination byte) is returned in outlen. In-place conversions * are not supported (i.e. instr != outstr).] - + * * FLAGS * UTF_DECOMPOSED: output string will be fully decomposed (NFD) * @@ -741,7 +776,7 @@ invalid: */ int utf8_normalizestr(const u_int8_t* instr, size_t inlen, u_int8_t* outstr, - size_t *outlen, size_t buflen, int flags) + size_t *outlen, size_t buflen, int flags) { u_int16_t unicodebuf[32]; u_int16_t* unistr = NULL; @@ -755,12 +790,12 @@ utf8_normalizestr(const u_int8_t* instr, size_t inlen, u_int8_t* outstr, int result = 0; if (flags & ~(UTF_DECOMPOSED | UTF_PRECOMPOSED | UTF_NO_NULL_TERM | UTF_ESCAPE_ILLEGAL)) { - return (EINVAL); + return EINVAL; } decompose = (flags & UTF_DECOMPOSED); precompose = (flags & UTF_PRECOMPOSED); if ((decompose && precompose) || (!decompose && !precompose)) { - return (EINVAL); + return EINVAL; } outbufstart = outstr; outbufend = outbufstart + buflen; @@ -781,15 +816,16 @@ utf8_normalizestr(const u_int8_t* instr, size_t inlen, u_int8_t* outstr, exit: *outlen = outstr - outbufstart; if (((flags & UTF_NO_NULL_TERM) == 0)) { - if (outstr < outbufend) + if (outstr < outbufend) { *outstr++ = '\0'; - else + } else { result = ENAMETOOLONG; + } } - return (result); + return result; - /* + /* * Non-ASCII uses the existing utf8_encodestr/utf8_decodestr * functions to perform the normalization. Since this will * presumably be used to normalize filenames in the back-end @@ -812,18 +848,19 @@ nonASCII: */ unicode_bytes = precompose ? (inbuflen * 2) : (inbuflen * 3); - if (unicode_bytes <= sizeof(unicodebuf)) + if (unicode_bytes <= sizeof(unicodebuf)) { unistr = &unicodebuf[0]; - else + } else { MALLOC(unistr, uint16_t *, unicode_bytes, M_TEMP, M_WAITOK); + } /* Normalize the string. */ result = utf8_decodestr(inbufstart, inbuflen, unistr, &unicode_bytes, - unicode_bytes, 0, flags & ~UTF_NO_NULL_TERM); + unicode_bytes, 0, flags & ~UTF_NO_NULL_TERM); if (result == 0) { /* Put results back into UTF-8. */ result = utf8_encodestr(unistr, unicode_bytes, outbufstart, - &uft8_bytes, buflen, 0, UTF_NO_NULL_TERM); + &uft8_bytes, buflen, 0, UTF_NO_NULL_TERM); outstr = outbufstart + uft8_bytes; } if (unistr && unistr != &unicodebuf[0]) { @@ -833,9 +870,9 @@ nonASCII: } - /* - * Unicode 3.2 decomposition code (derived from Core Foundation) - */ +/* + * Unicode 3.2 decomposition code (derived from Core Foundation) + */ typedef struct { u_int32_t _key; @@ -844,26 +881,31 @@ typedef struct { static inline u_int32_t getmappedvalue32(const unicode_mappings32 *theTable, u_int32_t numElem, - u_int16_t character) + u_int16_t character) { const unicode_mappings32 *p, *q, *divider; - if ((character < theTable[0]._key) || (character > theTable[numElem-1]._key)) - return (0); + if ((character < theTable[0]._key) || (character > theTable[numElem - 1]._key)) { + return 0; + } p = theTable; - q = p + (numElem-1); + q = p + (numElem - 1); while (p <= q) { - divider = p + ((q - p) >> 1); /* divide by 2 */ - if (character < divider->_key) { q = divider - 1; } - else if (character > divider->_key) { p = divider + 1; } - else { return (divider->_value); } + divider = p + ((q - p) >> 1); /* divide by 2 */ + if (character < divider->_key) { + q = divider - 1; + } else if (character > divider->_key) { + p = divider + 1; + } else { + return divider->_value; + } } - return (0); + return 0; } -#define RECURSIVE_DECOMPOSITION (1 << 15) -#define EXTRACT_COUNT(value) (((value) >> 12) & 0x0007) +#define RECURSIVE_DECOMPOSITION (1 << 15) +#define EXTRACT_COUNT(value) (((value) >> 12) & 0x0007) typedef struct { u_int16_t _key; @@ -872,25 +914,27 @@ typedef struct { static inline u_int16_t getmappedvalue16(const unicode_mappings16 *theTable, u_int32_t numElem, - u_int16_t character) + u_int16_t character) { const unicode_mappings16 *p, *q, *divider; - if ((character < theTable[0]._key) || (character > theTable[numElem-1]._key)) - return (0); + if ((character < theTable[0]._key) || (character > theTable[numElem - 1]._key)) { + return 0; + } p = theTable; - q = p + (numElem-1); + q = p + (numElem - 1); while (p <= q) { - divider = p + ((q - p) >> 1); /* divide by 2 */ - if (character < divider->_key) + divider = p + ((q - p) >> 1); /* divide by 2 */ + if (character < divider->_key) { q = divider - 1; - else if (character > divider->_key) + } else if (character > divider->_key) { p = divider + 1; - else - return (divider->_value); + } else { + return divider->_value; + } } - return (0); + return 0; } @@ -914,23 +958,25 @@ unicode_recursive_decompose(u_int16_t character, u_int16_t *convertedChars) usedLength = 0; if (value & RECURSIVE_DECOMPOSITION) { - usedLength = unicode_recursive_decompose((u_int16_t)*bmpMappings, convertedChars); - - --length; /* Decrement for the first char */ - if (!usedLength) - return 0; - ++bmpMappings; - convertedChars += usedLength; + usedLength = unicode_recursive_decompose((u_int16_t)*bmpMappings, convertedChars); + + --length; /* Decrement for the first char */ + if (!usedLength) { + return 0; + } + ++bmpMappings; + convertedChars += usedLength; } - + usedLength += length; - - while (length--) + + while (length--) { *(convertedChars++) = *(bmpMappings++); - - return (usedLength); + } + + return usedLength; } - + #define HANGUL_SBASE 0xAC00 #define HANGUL_LBASE 0x1100 #define HANGUL_VBASE 0x1161 @@ -963,14 +1009,15 @@ unicode_decompose(u_int16_t character, u_int16_t *convertedChars) length = (character % HANGUL_TCOUNT ? 3 : 2); *(convertedChars++) = - character / HANGUL_NCOUNT + HANGUL_LBASE; + character / HANGUL_NCOUNT + HANGUL_LBASE; *(convertedChars++) = - (character % HANGUL_NCOUNT) / HANGUL_TCOUNT + HANGUL_VBASE; - if (length > 2) + (character % HANGUL_NCOUNT) / HANGUL_TCOUNT + HANGUL_VBASE; + if (length > 2) { *convertedChars = (character % HANGUL_TCOUNT) + HANGUL_TBASE; - return (length); + } + return length; } else { - return (unicode_recursive_decompose(character, convertedChars)); + return unicode_recursive_decompose(character, convertedChars); } } @@ -995,18 +1042,19 @@ unicode_combine(u_int16_t base, u_int16_t combining) /* 2 char Hangul sequences */ if ((combining < (HANGUL_VBASE + HANGUL_VCOUNT)) && (base >= HANGUL_LBASE && base < (HANGUL_LBASE + HANGUL_LCOUNT))) { - return (HANGUL_SBASE + - ((base - HANGUL_LBASE)*(HANGUL_VCOUNT*HANGUL_TCOUNT)) + - ((combining - HANGUL_VBASE)*HANGUL_TCOUNT)); + return HANGUL_SBASE + + ((base - HANGUL_LBASE) * (HANGUL_VCOUNT * HANGUL_TCOUNT)) + + ((combining - HANGUL_VBASE) * HANGUL_TCOUNT); } - + /* 3 char Hangul sequences */ if ((combining > HANGUL_TBASE) && (base >= HANGUL_SBASE && base < (HANGUL_SBASE + HANGUL_SCOUNT))) { - if ((base - HANGUL_SBASE) % HANGUL_TCOUNT) - return (0); - else - return (base + (combining - HANGUL_TBASE)); + if ((base - HANGUL_SBASE) % HANGUL_TCOUNT) { + return 0; + } else { + return base + (combining - HANGUL_TBASE); + } } } @@ -1020,7 +1068,7 @@ unicode_combine(u_int16_t base, u_int16_t combining) ((const u_int32_t *)__CFUniCharBMPPrecompDestinationTable + (value & 0xFFFF)), (value >> 16), base); } - return (value); + return value; } @@ -1053,7 +1101,7 @@ prioritysort(u_int16_t* characters, int count) *ch1 = *ch2; *ch2 = tmp; changes = 1; - + /* * Make sure that p2 contains the combining class for the * character now stored at *ch2. This isn't required for @@ -1095,17 +1143,17 @@ prioritysort(u_int16_t* characters, int count) */ #define MAX_SFM2MAC 0x29 -#define SFMCODE_PREFIX_MASK 0xf000 +#define SFMCODE_PREFIX_MASK 0xf000 /* * In the Mac OS 9 days the colon was illegal in a file name. For that reason * SFM had no conversion for the colon. There is a conversion for the * slash. In Mac OS X the slash is illegal in a file name. So for us the colon * is a slash and a slash is a colon. So we can just replace the slash with the - * colon in our tables and everything will just work. + * colon in our tables and everything will just work. */ static u_int8_t -sfm2mac[] = { + sfm2mac[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 00 - 07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 08 - 0F */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 10 - 17 */ @@ -1113,11 +1161,11 @@ sfm2mac[] = { 0x22, 0x2a, 0x3a, 0x3c, 0x3e, 0x3f, 0x5c, 0x7c, /* 20 - 27 */ 0x20, 0x2e /* 28 - 29 */ }; -#define SFM2MAC_LEN ((sizeof(sfm2mac))/sizeof(sfm2mac[0])) +#define SFM2MAC_LEN ((sizeof(sfm2mac))/sizeof(sfm2mac[0])) static u_int8_t -mac2sfm[] = { - 0x20, 0x21, 0x20, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20 - 27 */ + mac2sfm[] = { + 0x20, 0x21, 0x20, 0x23, 0x24, 0x25, 0x26, 0x27, /* 20 - 27 */ 0x28, 0x29, 0x21, 0x2b, 0x2c, 0x2d, 0x2e, 0x22, /* 28 - 2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 30 - 37 */ 0x38, 0x39, 0x22, 0x3b, 0x23, 0x3d, 0x24, 0x25, /* 38 - 3f */ @@ -1130,7 +1178,7 @@ mac2sfm[] = { 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 70 - 77 */ 0x78, 0x79, 0x7a, 0x7b, 0x27, 0x7d, 0x7e, 0x7f /* 78 - 7f */ }; -#define MAC2SFM_LEN ((sizeof(mac2sfm))/sizeof(mac2sfm[0])) +#define MAC2SFM_LEN ((sizeof(mac2sfm))/sizeof(mac2sfm[0])) /* @@ -1143,23 +1191,25 @@ ucs_to_sfm(u_int16_t ucs_ch, int lastchar) { /* The last character of filename cannot be a space or period. */ if (lastchar) { - if (ucs_ch == 0x20) - return (0xf028); - else if (ucs_ch == 0x2e) - return (0xf029); + if (ucs_ch == 0x20) { + return 0xf028; + } else if (ucs_ch == 0x2e) { + return 0xf029; + } } /* 0x01 - 0x1f is simple transformation. */ if (ucs_ch <= 0x1f) { - return (ucs_ch | 0xf000); - } else /* 0x20 - 0x7f */ { + return ucs_ch | 0xf000; + } else { /* 0x20 - 0x7f */ u_int16_t lsb; assert((ucs_ch - 0x0020) < MAC2SFM_LEN); lsb = mac2sfm[ucs_ch - 0x0020]; - if (lsb != ucs_ch) - return(0xf000 | lsb); + if (lsb != ucs_ch) { + return 0xf000 | lsb; + } } - return (ucs_ch); + return ucs_ch; } /* @@ -1168,12 +1218,10 @@ ucs_to_sfm(u_int16_t ucs_ch, int lastchar) static u_int16_t sfm_to_ucs(u_int16_t ucs_ch) { - if (((ucs_ch & 0xffC0) == SFMCODE_PREFIX_MASK) && + if (((ucs_ch & 0xffC0) == SFMCODE_PREFIX_MASK) && ((ucs_ch & 0x003f) <= MAX_SFM2MAC)) { assert((ucs_ch & 0x003f) < SFM2MAC_LEN); ucs_ch = sfm2mac[ucs_ch & 0x003f]; } - return (ucs_ch); + return ucs_ch; } - - diff --git a/bsd/vfs/vfs_utfconvdata.h b/bsd/vfs/vfs_utfconvdata.h index 589bd650e..268c26849 100644 --- a/bsd/vfs/vfs_utfconvdata.h +++ b/bsd/vfs/vfs_utfconvdata.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,271 +22,271 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - -/* - Derived from Core Foundation headers: - CFUniCharDecompData.h - CFUniCharPrecompData.h - CFUniCharNonBaseData.h -*/ +/* + * Derived from Core Foundation headers: + * + * CFUniCharDecompData.h + * CFUniCharPrecompData.h + * CFUniCharNonBaseData.h + */ static const u_int16_t -__CFUniCharDecompositionTable[] = { - 0x00C0, 0x2000, 0x00C1, 0x2002, 0x00C2, 0x2004, 0x00C3, 0x2006, - 0x00C4, 0x2008, 0x00C5, 0x200A, 0x00C7, 0x200C, 0x00C8, 0x200E, - 0x00C9, 0x2010, 0x00CA, 0x2012, 0x00CB, 0x2014, 0x00CC, 0x2016, - 0x00CD, 0x2018, 0x00CE, 0x201A, 0x00CF, 0x201C, 0x00D1, 0x201E, - 0x00D2, 0x2020, 0x00D3, 0x2022, 0x00D4, 0x2024, 0x00D5, 0x2026, - 0x00D6, 0x2028, 0x00D9, 0x202A, 0x00DA, 0x202C, 0x00DB, 0x202E, - 0x00DC, 0x2030, 0x00DD, 0x2032, 0x00E0, 0x2034, 0x00E1, 0x2036, - 0x00E2, 0x2038, 0x00E3, 0x203A, 0x00E4, 0x203C, 0x00E5, 0x203E, - 0x00E7, 0x2040, 0x00E8, 0x2042, 0x00E9, 0x2044, 0x00EA, 0x2046, - 0x00EB, 0x2048, 0x00EC, 0x204A, 0x00ED, 0x204C, 0x00EE, 0x204E, - 0x00EF, 0x2050, 0x00F1, 0x2052, 0x00F2, 0x2054, 0x00F3, 0x2056, - 0x00F4, 0x2058, 0x00F5, 0x205A, 0x00F6, 0x205C, 0x00F9, 0x205E, - 0x00FA, 0x2060, 0x00FB, 0x2062, 0x00FC, 0x2064, 0x00FD, 0x2066, - 0x00FF, 0x2068, 0x0100, 0x206A, 0x0101, 0x206C, 0x0102, 0x206E, - 0x0103, 0x2070, 0x0104, 0x2072, 0x0105, 0x2074, 0x0106, 0x2076, - 0x0107, 0x2078, 0x0108, 0x207A, 0x0109, 0x207C, 0x010A, 0x207E, - 0x010B, 0x2080, 0x010C, 0x2082, 0x010D, 0x2084, 0x010E, 0x2086, - 0x010F, 0x2088, 0x0112, 0x208A, 0x0113, 0x208C, 0x0114, 0x208E, - 0x0115, 0x2090, 0x0116, 0x2092, 0x0117, 0x2094, 0x0118, 0x2096, - 0x0119, 0x2098, 0x011A, 0x209A, 0x011B, 0x209C, 0x011C, 0x209E, - 0x011D, 0x20A0, 0x011E, 0x20A2, 0x011F, 0x20A4, 0x0120, 0x20A6, - 0x0121, 0x20A8, 0x0122, 0x20AA, 0x0123, 0x20AC, 0x0124, 0x20AE, - 0x0125, 0x20B0, 0x0128, 0x20B2, 0x0129, 0x20B4, 0x012A, 0x20B6, - 0x012B, 0x20B8, 0x012C, 0x20BA, 0x012D, 0x20BC, 0x012E, 0x20BE, - 0x012F, 0x20C0, 0x0130, 0x20C2, 0x0134, 0x20C4, 0x0135, 0x20C6, - 0x0136, 0x20C8, 0x0137, 0x20CA, 0x0139, 0x20CC, 0x013A, 0x20CE, - 0x013B, 0x20D0, 0x013C, 0x20D2, 0x013D, 0x20D4, 0x013E, 0x20D6, - 0x0143, 0x20D8, 0x0144, 0x20DA, 0x0145, 0x20DC, 0x0146, 0x20DE, - 0x0147, 0x20E0, 0x0148, 0x20E2, 0x014C, 0x20E4, 0x014D, 0x20E6, - 0x014E, 0x20E8, 0x014F, 0x20EA, 0x0150, 0x20EC, 0x0151, 0x20EE, - 0x0154, 0x20F0, 0x0155, 0x20F2, 0x0156, 0x20F4, 0x0157, 0x20F6, - 0x0158, 0x20F8, 0x0159, 0x20FA, 0x015A, 0x20FC, 0x015B, 0x20FE, - 0x015C, 0x2100, 0x015D, 0x2102, 0x015E, 0x2104, 0x015F, 0x2106, - 0x0160, 0x2108, 0x0161, 0x210A, 0x0162, 0x210C, 0x0163, 0x210E, - 0x0164, 0x2110, 0x0165, 0x2112, 0x0168, 0x2114, 0x0169, 0x2116, - 0x016A, 0x2118, 0x016B, 0x211A, 0x016C, 0x211C, 0x016D, 0x211E, - 0x016E, 0x2120, 0x016F, 0x2122, 0x0170, 0x2124, 0x0171, 0x2126, - 0x0172, 0x2128, 0x0173, 0x212A, 0x0174, 0x212C, 0x0175, 0x212E, - 0x0176, 0x2130, 0x0177, 0x2132, 0x0178, 0x2134, 0x0179, 0x2136, - 0x017A, 0x2138, 0x017B, 0x213A, 0x017C, 0x213C, 0x017D, 0x213E, - 0x017E, 0x2140, 0x01A0, 0x2142, 0x01A1, 0x2144, 0x01AF, 0x2146, - 0x01B0, 0x2148, 0x01CD, 0x214A, 0x01CE, 0x214C, 0x01CF, 0x214E, - 0x01D0, 0x2150, 0x01D1, 0x2152, 0x01D2, 0x2154, 0x01D3, 0x2156, - 0x01D4, 0x2158, 0x01D5, 0xA15A, 0x01D6, 0xA15C, 0x01D7, 0xA15E, - 0x01D8, 0xA160, 0x01D9, 0xA162, 0x01DA, 0xA164, 0x01DB, 0xA166, - 0x01DC, 0xA168, 0x01DE, 0xA16A, 0x01DF, 0xA16C, 0x01E0, 0xA16E, - 0x01E1, 0xA170, 0x01E2, 0x2172, 0x01E3, 0x2174, 0x01E6, 0x2176, - 0x01E7, 0x2178, 0x01E8, 0x217A, 0x01E9, 0x217C, 0x01EA, 0x217E, - 0x01EB, 0x2180, 0x01EC, 0xA182, 0x01ED, 0xA184, 0x01EE, 0x2186, - 0x01EF, 0x2188, 0x01F0, 0x218A, 0x01F4, 0x218C, 0x01F5, 0x218E, - 0x01F8, 0x2190, 0x01F9, 0x2192, 0x01FA, 0xA194, 0x01FB, 0xA196, - 0x01FC, 0x2198, 0x01FD, 0x219A, 0x01FE, 0x219C, 0x01FF, 0x219E, - 0x0200, 0x21A0, 0x0201, 0x21A2, 0x0202, 0x21A4, 0x0203, 0x21A6, - 0x0204, 0x21A8, 0x0205, 0x21AA, 0x0206, 0x21AC, 0x0207, 0x21AE, - 0x0208, 0x21B0, 0x0209, 0x21B2, 0x020A, 0x21B4, 0x020B, 0x21B6, - 0x020C, 0x21B8, 0x020D, 0x21BA, 0x020E, 0x21BC, 0x020F, 0x21BE, - 0x0210, 0x21C0, 0x0211, 0x21C2, 0x0212, 0x21C4, 0x0213, 0x21C6, - 0x0214, 0x21C8, 0x0215, 0x21CA, 0x0216, 0x21CC, 0x0217, 0x21CE, - 0x0218, 0x21D0, 0x0219, 0x21D2, 0x021A, 0x21D4, 0x021B, 0x21D6, - 0x021E, 0x21D8, 0x021F, 0x21DA, 0x0226, 0x21DC, 0x0227, 0x21DE, - 0x0228, 0x21E0, 0x0229, 0x21E2, 0x022A, 0xA1E4, 0x022B, 0xA1E6, - 0x022C, 0xA1E8, 0x022D, 0xA1EA, 0x022E, 0x21EC, 0x022F, 0x21EE, - 0x0230, 0xA1F0, 0x0231, 0xA1F2, 0x0232, 0x21F4, 0x0233, 0x21F6, - 0x0340, 0x1300, 0x0341, 0x1301, 0x0343, 0x1313, 0x0344, 0x21F8, - 0x0374, 0x12B9, 0x037E, 0x103B, 0x0385, 0x21FA, 0x0386, 0x21FC, - 0x0387, 0x10B7, 0x0388, 0x21FE, 0x0389, 0x2200, 0x038A, 0x2202, - 0x038C, 0x2204, 0x038E, 0x2206, 0x038F, 0x2208, 0x0390, 0xA20A, - 0x03AA, 0x220C, 0x03AB, 0x220E, 0x03AC, 0x2210, 0x03AD, 0x2212, - 0x03AE, 0x2214, 0x03AF, 0x2216, 0x03B0, 0xA218, 0x03CA, 0x221A, - 0x03CB, 0x221C, 0x03CC, 0x221E, 0x03CD, 0x2220, 0x03CE, 0x2222, - 0x03D3, 0x2224, 0x03D4, 0x2226, 0x0400, 0x2228, 0x0401, 0x222A, - 0x0403, 0x222C, 0x0407, 0x222E, 0x040C, 0x2230, 0x040D, 0x2232, - 0x040E, 0x2234, 0x0419, 0x2236, 0x0439, 0x2238, 0x0450, 0x223A, - 0x0451, 0x223C, 0x0453, 0x223E, 0x0457, 0x2240, 0x045C, 0x2242, - 0x045D, 0x2244, 0x045E, 0x2246, 0x0476, 0x2248, 0x0477, 0x224A, - 0x04C1, 0x224C, 0x04C2, 0x224E, 0x04D0, 0x2250, 0x04D1, 0x2252, - 0x04D2, 0x2254, 0x04D3, 0x2256, 0x04D6, 0x2258, 0x04D7, 0x225A, - 0x04DA, 0x225C, 0x04DB, 0x225E, 0x04DC, 0x2260, 0x04DD, 0x2262, - 0x04DE, 0x2264, 0x04DF, 0x2266, 0x04E2, 0x2268, 0x04E3, 0x226A, - 0x04E4, 0x226C, 0x04E5, 0x226E, 0x04E6, 0x2270, 0x04E7, 0x2272, - 0x04EA, 0x2274, 0x04EB, 0x2276, 0x04EC, 0x2278, 0x04ED, 0x227A, - 0x04EE, 0x227C, 0x04EF, 0x227E, 0x04F0, 0x2280, 0x04F1, 0x2282, - 0x04F2, 0x2284, 0x04F3, 0x2286, 0x04F4, 0x2288, 0x04F5, 0x228A, - 0x04F8, 0x228C, 0x04F9, 0x228E, 0x0622, 0x2290, 0x0623, 0x2292, - 0x0624, 0x2294, 0x0625, 0x2296, 0x0626, 0x2298, 0x06C0, 0x229A, - 0x06C2, 0x229C, 0x06D3, 0x229E, 0x0929, 0x22A0, 0x0931, 0x22A2, - 0x0934, 0x22A4, 0x0958, 0x22A6, 0x0959, 0x22A8, 0x095A, 0x22AA, - 0x095B, 0x22AC, 0x095C, 0x22AE, 0x095D, 0x22B0, 0x095E, 0x22B2, - 0x095F, 0x22B4, 0x09CB, 0x22B6, 0x09CC, 0x22B8, 0x09DC, 0x22BA, - 0x09DD, 0x22BC, 0x09DF, 0x22BE, 0x0A33, 0x22C0, 0x0A36, 0x22C2, - 0x0A59, 0x22C4, 0x0A5A, 0x22C6, 0x0A5B, 0x22C8, 0x0A5E, 0x22CA, - 0x0B48, 0x22CC, 0x0B4B, 0x22CE, 0x0B4C, 0x22D0, 0x0B5C, 0x22D2, - 0x0B5D, 0x22D4, 0x0B94, 0x22D6, 0x0BCA, 0x22D8, 0x0BCB, 0x22DA, - 0x0BCC, 0x22DC, 0x0C48, 0x22DE, 0x0CC0, 0x22E0, 0x0CC7, 0x22E2, - 0x0CC8, 0x22E4, 0x0CCA, 0x22E6, 0x0CCB, 0xA2E8, 0x0D4A, 0x22EA, - 0x0D4B, 0x22EC, 0x0D4C, 0x22EE, 0x0DDA, 0x22F0, 0x0DDC, 0x22F2, - 0x0DDD, 0xA2F4, 0x0DDE, 0x22F6, 0x0F43, 0x22F8, 0x0F4D, 0x22FA, - 0x0F52, 0x22FC, 0x0F57, 0x22FE, 0x0F5C, 0x2300, 0x0F69, 0x2302, - 0x0F73, 0x2304, 0x0F75, 0x2306, 0x0F76, 0x2308, 0x0F78, 0x230A, - 0x0F81, 0x230C, 0x0F93, 0x230E, 0x0F9D, 0x2310, 0x0FA2, 0x2312, - 0x0FA7, 0x2314, 0x0FAC, 0x2316, 0x0FB9, 0x2318, 0x1026, 0x231A, - 0x1E00, 0x231C, 0x1E01, 0x231E, 0x1E02, 0x2320, 0x1E03, 0x2322, - 0x1E04, 0x2324, 0x1E05, 0x2326, 0x1E06, 0x2328, 0x1E07, 0x232A, - 0x1E08, 0xA32C, 0x1E09, 0xA32E, 0x1E0A, 0x2330, 0x1E0B, 0x2332, - 0x1E0C, 0x2334, 0x1E0D, 0x2336, 0x1E0E, 0x2338, 0x1E0F, 0x233A, - 0x1E10, 0x233C, 0x1E11, 0x233E, 0x1E12, 0x2340, 0x1E13, 0x2342, - 0x1E14, 0xA344, 0x1E15, 0xA346, 0x1E16, 0xA348, 0x1E17, 0xA34A, - 0x1E18, 0x234C, 0x1E19, 0x234E, 0x1E1A, 0x2350, 0x1E1B, 0x2352, - 0x1E1C, 0xA354, 0x1E1D, 0xA356, 0x1E1E, 0x2358, 0x1E1F, 0x235A, - 0x1E20, 0x235C, 0x1E21, 0x235E, 0x1E22, 0x2360, 0x1E23, 0x2362, - 0x1E24, 0x2364, 0x1E25, 0x2366, 0x1E26, 0x2368, 0x1E27, 0x236A, - 0x1E28, 0x236C, 0x1E29, 0x236E, 0x1E2A, 0x2370, 0x1E2B, 0x2372, - 0x1E2C, 0x2374, 0x1E2D, 0x2376, 0x1E2E, 0xA378, 0x1E2F, 0xA37A, - 0x1E30, 0x237C, 0x1E31, 0x237E, 0x1E32, 0x2380, 0x1E33, 0x2382, - 0x1E34, 0x2384, 0x1E35, 0x2386, 0x1E36, 0x2388, 0x1E37, 0x238A, - 0x1E38, 0xA38C, 0x1E39, 0xA38E, 0x1E3A, 0x2390, 0x1E3B, 0x2392, - 0x1E3C, 0x2394, 0x1E3D, 0x2396, 0x1E3E, 0x2398, 0x1E3F, 0x239A, - 0x1E40, 0x239C, 0x1E41, 0x239E, 0x1E42, 0x23A0, 0x1E43, 0x23A2, - 0x1E44, 0x23A4, 0x1E45, 0x23A6, 0x1E46, 0x23A8, 0x1E47, 0x23AA, - 0x1E48, 0x23AC, 0x1E49, 0x23AE, 0x1E4A, 0x23B0, 0x1E4B, 0x23B2, - 0x1E4C, 0xA3B4, 0x1E4D, 0xA3B6, 0x1E4E, 0xA3B8, 0x1E4F, 0xA3BA, - 0x1E50, 0xA3BC, 0x1E51, 0xA3BE, 0x1E52, 0xA3C0, 0x1E53, 0xA3C2, - 0x1E54, 0x23C4, 0x1E55, 0x23C6, 0x1E56, 0x23C8, 0x1E57, 0x23CA, - 0x1E58, 0x23CC, 0x1E59, 0x23CE, 0x1E5A, 0x23D0, 0x1E5B, 0x23D2, - 0x1E5C, 0xA3D4, 0x1E5D, 0xA3D6, 0x1E5E, 0x23D8, 0x1E5F, 0x23DA, - 0x1E60, 0x23DC, 0x1E61, 0x23DE, 0x1E62, 0x23E0, 0x1E63, 0x23E2, - 0x1E64, 0xA3E4, 0x1E65, 0xA3E6, 0x1E66, 0xA3E8, 0x1E67, 0xA3EA, - 0x1E68, 0xA3EC, 0x1E69, 0xA3EE, 0x1E6A, 0x23F0, 0x1E6B, 0x23F2, - 0x1E6C, 0x23F4, 0x1E6D, 0x23F6, 0x1E6E, 0x23F8, 0x1E6F, 0x23FA, - 0x1E70, 0x23FC, 0x1E71, 0x23FE, 0x1E72, 0x2400, 0x1E73, 0x2402, - 0x1E74, 0x2404, 0x1E75, 0x2406, 0x1E76, 0x2408, 0x1E77, 0x240A, - 0x1E78, 0xA40C, 0x1E79, 0xA40E, 0x1E7A, 0xA410, 0x1E7B, 0xA412, - 0x1E7C, 0x2414, 0x1E7D, 0x2416, 0x1E7E, 0x2418, 0x1E7F, 0x241A, - 0x1E80, 0x241C, 0x1E81, 0x241E, 0x1E82, 0x2420, 0x1E83, 0x2422, - 0x1E84, 0x2424, 0x1E85, 0x2426, 0x1E86, 0x2428, 0x1E87, 0x242A, - 0x1E88, 0x242C, 0x1E89, 0x242E, 0x1E8A, 0x2430, 0x1E8B, 0x2432, - 0x1E8C, 0x2434, 0x1E8D, 0x2436, 0x1E8E, 0x2438, 0x1E8F, 0x243A, - 0x1E90, 0x243C, 0x1E91, 0x243E, 0x1E92, 0x2440, 0x1E93, 0x2442, - 0x1E94, 0x2444, 0x1E95, 0x2446, 0x1E96, 0x2448, 0x1E97, 0x244A, - 0x1E98, 0x244C, 0x1E99, 0x244E, 0x1E9B, 0x2450, 0x1EA0, 0x2452, - 0x1EA1, 0x2454, 0x1EA2, 0x2456, 0x1EA3, 0x2458, 0x1EA4, 0xA45A, - 0x1EA5, 0xA45C, 0x1EA6, 0xA45E, 0x1EA7, 0xA460, 0x1EA8, 0xA462, - 0x1EA9, 0xA464, 0x1EAA, 0xA466, 0x1EAB, 0xA468, 0x1EAC, 0xA46A, - 0x1EAD, 0xA46C, 0x1EAE, 0xA46E, 0x1EAF, 0xA470, 0x1EB0, 0xA472, - 0x1EB1, 0xA474, 0x1EB2, 0xA476, 0x1EB3, 0xA478, 0x1EB4, 0xA47A, - 0x1EB5, 0xA47C, 0x1EB6, 0xA47E, 0x1EB7, 0xA480, 0x1EB8, 0x2482, - 0x1EB9, 0x2484, 0x1EBA, 0x2486, 0x1EBB, 0x2488, 0x1EBC, 0x248A, - 0x1EBD, 0x248C, 0x1EBE, 0xA48E, 0x1EBF, 0xA490, 0x1EC0, 0xA492, - 0x1EC1, 0xA494, 0x1EC2, 0xA496, 0x1EC3, 0xA498, 0x1EC4, 0xA49A, - 0x1EC5, 0xA49C, 0x1EC6, 0xA49E, 0x1EC7, 0xA4A0, 0x1EC8, 0x24A2, - 0x1EC9, 0x24A4, 0x1ECA, 0x24A6, 0x1ECB, 0x24A8, 0x1ECC, 0x24AA, - 0x1ECD, 0x24AC, 0x1ECE, 0x24AE, 0x1ECF, 0x24B0, 0x1ED0, 0xA4B2, - 0x1ED1, 0xA4B4, 0x1ED2, 0xA4B6, 0x1ED3, 0xA4B8, 0x1ED4, 0xA4BA, - 0x1ED5, 0xA4BC, 0x1ED6, 0xA4BE, 0x1ED7, 0xA4C0, 0x1ED8, 0xA4C2, - 0x1ED9, 0xA4C4, 0x1EDA, 0xA4C6, 0x1EDB, 0xA4C8, 0x1EDC, 0xA4CA, - 0x1EDD, 0xA4CC, 0x1EDE, 0xA4CE, 0x1EDF, 0xA4D0, 0x1EE0, 0xA4D2, - 0x1EE1, 0xA4D4, 0x1EE2, 0xA4D6, 0x1EE3, 0xA4D8, 0x1EE4, 0x24DA, - 0x1EE5, 0x24DC, 0x1EE6, 0x24DE, 0x1EE7, 0x24E0, 0x1EE8, 0xA4E2, - 0x1EE9, 0xA4E4, 0x1EEA, 0xA4E6, 0x1EEB, 0xA4E8, 0x1EEC, 0xA4EA, - 0x1EED, 0xA4EC, 0x1EEE, 0xA4EE, 0x1EEF, 0xA4F0, 0x1EF0, 0xA4F2, - 0x1EF1, 0xA4F4, 0x1EF2, 0x24F6, 0x1EF3, 0x24F8, 0x1EF4, 0x24FA, - 0x1EF5, 0x24FC, 0x1EF6, 0x24FE, 0x1EF7, 0x2500, 0x1EF8, 0x2502, - 0x1EF9, 0x2504, 0x1F00, 0x2506, 0x1F01, 0x2508, 0x1F02, 0xA50A, - 0x1F03, 0xA50C, 0x1F04, 0xA50E, 0x1F05, 0xA510, 0x1F06, 0xA512, - 0x1F07, 0xA514, 0x1F08, 0x2516, 0x1F09, 0x2518, 0x1F0A, 0xA51A, - 0x1F0B, 0xA51C, 0x1F0C, 0xA51E, 0x1F0D, 0xA520, 0x1F0E, 0xA522, - 0x1F0F, 0xA524, 0x1F10, 0x2526, 0x1F11, 0x2528, 0x1F12, 0xA52A, - 0x1F13, 0xA52C, 0x1F14, 0xA52E, 0x1F15, 0xA530, 0x1F18, 0x2532, - 0x1F19, 0x2534, 0x1F1A, 0xA536, 0x1F1B, 0xA538, 0x1F1C, 0xA53A, - 0x1F1D, 0xA53C, 0x1F20, 0x253E, 0x1F21, 0x2540, 0x1F22, 0xA542, - 0x1F23, 0xA544, 0x1F24, 0xA546, 0x1F25, 0xA548, 0x1F26, 0xA54A, - 0x1F27, 0xA54C, 0x1F28, 0x254E, 0x1F29, 0x2550, 0x1F2A, 0xA552, - 0x1F2B, 0xA554, 0x1F2C, 0xA556, 0x1F2D, 0xA558, 0x1F2E, 0xA55A, - 0x1F2F, 0xA55C, 0x1F30, 0x255E, 0x1F31, 0x2560, 0x1F32, 0xA562, - 0x1F33, 0xA564, 0x1F34, 0xA566, 0x1F35, 0xA568, 0x1F36, 0xA56A, - 0x1F37, 0xA56C, 0x1F38, 0x256E, 0x1F39, 0x2570, 0x1F3A, 0xA572, - 0x1F3B, 0xA574, 0x1F3C, 0xA576, 0x1F3D, 0xA578, 0x1F3E, 0xA57A, - 0x1F3F, 0xA57C, 0x1F40, 0x257E, 0x1F41, 0x2580, 0x1F42, 0xA582, - 0x1F43, 0xA584, 0x1F44, 0xA586, 0x1F45, 0xA588, 0x1F48, 0x258A, - 0x1F49, 0x258C, 0x1F4A, 0xA58E, 0x1F4B, 0xA590, 0x1F4C, 0xA592, - 0x1F4D, 0xA594, 0x1F50, 0x2596, 0x1F51, 0x2598, 0x1F52, 0xA59A, - 0x1F53, 0xA59C, 0x1F54, 0xA59E, 0x1F55, 0xA5A0, 0x1F56, 0xA5A2, - 0x1F57, 0xA5A4, 0x1F59, 0x25A6, 0x1F5B, 0xA5A8, 0x1F5D, 0xA5AA, - 0x1F5F, 0xA5AC, 0x1F60, 0x25AE, 0x1F61, 0x25B0, 0x1F62, 0xA5B2, - 0x1F63, 0xA5B4, 0x1F64, 0xA5B6, 0x1F65, 0xA5B8, 0x1F66, 0xA5BA, - 0x1F67, 0xA5BC, 0x1F68, 0x25BE, 0x1F69, 0x25C0, 0x1F6A, 0xA5C2, - 0x1F6B, 0xA5C4, 0x1F6C, 0xA5C6, 0x1F6D, 0xA5C8, 0x1F6E, 0xA5CA, - 0x1F6F, 0xA5CC, 0x1F70, 0x25CE, 0x1F71, 0x93AC, 0x1F72, 0x25D0, - 0x1F73, 0x93AD, 0x1F74, 0x25D2, 0x1F75, 0x93AE, 0x1F76, 0x25D4, - 0x1F77, 0x93AF, 0x1F78, 0x25D6, 0x1F79, 0x93CC, 0x1F7A, 0x25D8, - 0x1F7B, 0x93CD, 0x1F7C, 0x25DA, 0x1F7D, 0x93CE, 0x1F80, 0xA5DC, - 0x1F81, 0xA5DE, 0x1F82, 0xA5E0, 0x1F83, 0xA5E2, 0x1F84, 0xA5E4, - 0x1F85, 0xA5E6, 0x1F86, 0xA5E8, 0x1F87, 0xA5EA, 0x1F88, 0xA5EC, - 0x1F89, 0xA5EE, 0x1F8A, 0xA5F0, 0x1F8B, 0xA5F2, 0x1F8C, 0xA5F4, - 0x1F8D, 0xA5F6, 0x1F8E, 0xA5F8, 0x1F8F, 0xA5FA, 0x1F90, 0xA5FC, - 0x1F91, 0xA5FE, 0x1F92, 0xA600, 0x1F93, 0xA602, 0x1F94, 0xA604, - 0x1F95, 0xA606, 0x1F96, 0xA608, 0x1F97, 0xA60A, 0x1F98, 0xA60C, - 0x1F99, 0xA60E, 0x1F9A, 0xA610, 0x1F9B, 0xA612, 0x1F9C, 0xA614, - 0x1F9D, 0xA616, 0x1F9E, 0xA618, 0x1F9F, 0xA61A, 0x1FA0, 0xA61C, - 0x1FA1, 0xA61E, 0x1FA2, 0xA620, 0x1FA3, 0xA622, 0x1FA4, 0xA624, - 0x1FA5, 0xA626, 0x1FA6, 0xA628, 0x1FA7, 0xA62A, 0x1FA8, 0xA62C, - 0x1FA9, 0xA62E, 0x1FAA, 0xA630, 0x1FAB, 0xA632, 0x1FAC, 0xA634, - 0x1FAD, 0xA636, 0x1FAE, 0xA638, 0x1FAF, 0xA63A, 0x1FB0, 0x263C, - 0x1FB1, 0x263E, 0x1FB2, 0xA640, 0x1FB3, 0x2642, 0x1FB4, 0xA644, - 0x1FB6, 0x2646, 0x1FB7, 0xA648, 0x1FB8, 0x264A, 0x1FB9, 0x264C, - 0x1FBA, 0x264E, 0x1FBB, 0x9386, 0x1FBC, 0x2650, 0x1FBE, 0x13B9, - 0x1FC1, 0x2652, 0x1FC2, 0xA654, 0x1FC3, 0x2656, 0x1FC4, 0xA658, - 0x1FC6, 0x265A, 0x1FC7, 0xA65C, 0x1FC8, 0x265E, 0x1FC9, 0x9388, - 0x1FCA, 0x2660, 0x1FCB, 0x9389, 0x1FCC, 0x2662, 0x1FCD, 0x2664, - 0x1FCE, 0x2666, 0x1FCF, 0x2668, 0x1FD0, 0x266A, 0x1FD1, 0x266C, - 0x1FD2, 0xA66E, 0x1FD3, 0x9390, 0x1FD6, 0x2670, 0x1FD7, 0xA672, - 0x1FD8, 0x2674, 0x1FD9, 0x2676, 0x1FDA, 0x2678, 0x1FDB, 0x938A, - 0x1FDD, 0x267A, 0x1FDE, 0x267C, 0x1FDF, 0x267E, 0x1FE0, 0x2680, - 0x1FE1, 0x2682, 0x1FE2, 0xA684, 0x1FE3, 0x93B0, 0x1FE4, 0x2686, - 0x1FE5, 0x2688, 0x1FE6, 0x268A, 0x1FE7, 0xA68C, 0x1FE8, 0x268E, - 0x1FE9, 0x2690, 0x1FEA, 0x2692, 0x1FEB, 0x938E, 0x1FEC, 0x2694, - 0x1FED, 0x2696, 0x1FEE, 0x9385, 0x1FEF, 0x1060, 0x1FF2, 0xA698, - 0x1FF3, 0x269A, 0x1FF4, 0xA69C, 0x1FF6, 0x269E, 0x1FF7, 0xA6A0, - 0x1FF8, 0x26A2, 0x1FF9, 0x938C, 0x1FFA, 0x26A4, 0x1FFB, 0x938F, - 0x1FFC, 0x26A6, 0x1FFD, 0x10B4, 0x304C, 0x26A8, 0x304E, 0x26AA, - 0x3050, 0x26AC, 0x3052, 0x26AE, 0x3054, 0x26B0, 0x3056, 0x26B2, - 0x3058, 0x26B4, 0x305A, 0x26B6, 0x305C, 0x26B8, 0x305E, 0x26BA, - 0x3060, 0x26BC, 0x3062, 0x26BE, 0x3065, 0x26C0, 0x3067, 0x26C2, - 0x3069, 0x26C4, 0x3070, 0x26C6, 0x3071, 0x26C8, 0x3073, 0x26CA, - 0x3074, 0x26CC, 0x3076, 0x26CE, 0x3077, 0x26D0, 0x3079, 0x26D2, - 0x307A, 0x26D4, 0x307C, 0x26D6, 0x307D, 0x26D8, 0x3094, 0x26DA, - 0x309E, 0x26DC, 0x30AC, 0x26DE, 0x30AE, 0x26E0, 0x30B0, 0x26E2, - 0x30B2, 0x26E4, 0x30B4, 0x26E6, 0x30B6, 0x26E8, 0x30B8, 0x26EA, - 0x30BA, 0x26EC, 0x30BC, 0x26EE, 0x30BE, 0x26F0, 0x30C0, 0x26F2, - 0x30C2, 0x26F4, 0x30C5, 0x26F6, 0x30C7, 0x26F8, 0x30C9, 0x26FA, - 0x30D0, 0x26FC, 0x30D1, 0x26FE, 0x30D3, 0x2700, 0x30D4, 0x2702, - 0x30D6, 0x2704, 0x30D7, 0x2706, 0x30D9, 0x2708, 0x30DA, 0x270A, - 0x30DC, 0x270C, 0x30DD, 0x270E, 0x30F4, 0x2710, 0x30F7, 0x2712, - 0x30F8, 0x2714, 0x30F9, 0x2716, 0x30FA, 0x2718, 0x30FE, 0x271A, - 0xFB1D, 0x271C, 0xFB1F, 0x271E, 0xFB2A, 0x2720, 0xFB2B, 0x2722, - 0xFB2C, 0xA724, 0xFB2D, 0xA726, 0xFB2E, 0x2728, 0xFB2F, 0x272A, - 0xFB30, 0x272C, 0xFB31, 0x272E, 0xFB32, 0x2730, 0xFB33, 0x2732, - 0xFB34, 0x2734, 0xFB35, 0x2736, 0xFB36, 0x2738, 0xFB38, 0x273A, - 0xFB39, 0x273C, 0xFB3A, 0x273E, 0xFB3B, 0x2740, 0xFB3C, 0x2742, - 0xFB3E, 0x2744, 0xFB40, 0x2746, 0xFB41, 0x2748, 0xFB43, 0x274A, - 0xFB44, 0x274C, 0xFB46, 0x274E, 0xFB47, 0x2750, 0xFB48, 0x2752, - 0xFB49, 0x2754, 0xFB4A, 0x2756, 0xFB4B, 0x2758, 0xFB4C, 0x275A, - 0xFB4D, 0x275C, 0xFB4E, 0x275E + __CFUniCharDecompositionTable[] = { + 0x00C0, 0x2000, 0x00C1, 0x2002, 0x00C2, 0x2004, 0x00C3, 0x2006, + 0x00C4, 0x2008, 0x00C5, 0x200A, 0x00C7, 0x200C, 0x00C8, 0x200E, + 0x00C9, 0x2010, 0x00CA, 0x2012, 0x00CB, 0x2014, 0x00CC, 0x2016, + 0x00CD, 0x2018, 0x00CE, 0x201A, 0x00CF, 0x201C, 0x00D1, 0x201E, + 0x00D2, 0x2020, 0x00D3, 0x2022, 0x00D4, 0x2024, 0x00D5, 0x2026, + 0x00D6, 0x2028, 0x00D9, 0x202A, 0x00DA, 0x202C, 0x00DB, 0x202E, + 0x00DC, 0x2030, 0x00DD, 0x2032, 0x00E0, 0x2034, 0x00E1, 0x2036, + 0x00E2, 0x2038, 0x00E3, 0x203A, 0x00E4, 0x203C, 0x00E5, 0x203E, + 0x00E7, 0x2040, 0x00E8, 0x2042, 0x00E9, 0x2044, 0x00EA, 0x2046, + 0x00EB, 0x2048, 0x00EC, 0x204A, 0x00ED, 0x204C, 0x00EE, 0x204E, + 0x00EF, 0x2050, 0x00F1, 0x2052, 0x00F2, 0x2054, 0x00F3, 0x2056, + 0x00F4, 0x2058, 0x00F5, 0x205A, 0x00F6, 0x205C, 0x00F9, 0x205E, + 0x00FA, 0x2060, 0x00FB, 0x2062, 0x00FC, 0x2064, 0x00FD, 0x2066, + 0x00FF, 0x2068, 0x0100, 0x206A, 0x0101, 0x206C, 0x0102, 0x206E, + 0x0103, 0x2070, 0x0104, 0x2072, 0x0105, 0x2074, 0x0106, 0x2076, + 0x0107, 0x2078, 0x0108, 0x207A, 0x0109, 0x207C, 0x010A, 0x207E, + 0x010B, 0x2080, 0x010C, 0x2082, 0x010D, 0x2084, 0x010E, 0x2086, + 0x010F, 0x2088, 0x0112, 0x208A, 0x0113, 0x208C, 0x0114, 0x208E, + 0x0115, 0x2090, 0x0116, 0x2092, 0x0117, 0x2094, 0x0118, 0x2096, + 0x0119, 0x2098, 0x011A, 0x209A, 0x011B, 0x209C, 0x011C, 0x209E, + 0x011D, 0x20A0, 0x011E, 0x20A2, 0x011F, 0x20A4, 0x0120, 0x20A6, + 0x0121, 0x20A8, 0x0122, 0x20AA, 0x0123, 0x20AC, 0x0124, 0x20AE, + 0x0125, 0x20B0, 0x0128, 0x20B2, 0x0129, 0x20B4, 0x012A, 0x20B6, + 0x012B, 0x20B8, 0x012C, 0x20BA, 0x012D, 0x20BC, 0x012E, 0x20BE, + 0x012F, 0x20C0, 0x0130, 0x20C2, 0x0134, 0x20C4, 0x0135, 0x20C6, + 0x0136, 0x20C8, 0x0137, 0x20CA, 0x0139, 0x20CC, 0x013A, 0x20CE, + 0x013B, 0x20D0, 0x013C, 0x20D2, 0x013D, 0x20D4, 0x013E, 0x20D6, + 0x0143, 0x20D8, 0x0144, 0x20DA, 0x0145, 0x20DC, 0x0146, 0x20DE, + 0x0147, 0x20E0, 0x0148, 0x20E2, 0x014C, 0x20E4, 0x014D, 0x20E6, + 0x014E, 0x20E8, 0x014F, 0x20EA, 0x0150, 0x20EC, 0x0151, 0x20EE, + 0x0154, 0x20F0, 0x0155, 0x20F2, 0x0156, 0x20F4, 0x0157, 0x20F6, + 0x0158, 0x20F8, 0x0159, 0x20FA, 0x015A, 0x20FC, 0x015B, 0x20FE, + 0x015C, 0x2100, 0x015D, 0x2102, 0x015E, 0x2104, 0x015F, 0x2106, + 0x0160, 0x2108, 0x0161, 0x210A, 0x0162, 0x210C, 0x0163, 0x210E, + 0x0164, 0x2110, 0x0165, 0x2112, 0x0168, 0x2114, 0x0169, 0x2116, + 0x016A, 0x2118, 0x016B, 0x211A, 0x016C, 0x211C, 0x016D, 0x211E, + 0x016E, 0x2120, 0x016F, 0x2122, 0x0170, 0x2124, 0x0171, 0x2126, + 0x0172, 0x2128, 0x0173, 0x212A, 0x0174, 0x212C, 0x0175, 0x212E, + 0x0176, 0x2130, 0x0177, 0x2132, 0x0178, 0x2134, 0x0179, 0x2136, + 0x017A, 0x2138, 0x017B, 0x213A, 0x017C, 0x213C, 0x017D, 0x213E, + 0x017E, 0x2140, 0x01A0, 0x2142, 0x01A1, 0x2144, 0x01AF, 0x2146, + 0x01B0, 0x2148, 0x01CD, 0x214A, 0x01CE, 0x214C, 0x01CF, 0x214E, + 0x01D0, 0x2150, 0x01D1, 0x2152, 0x01D2, 0x2154, 0x01D3, 0x2156, + 0x01D4, 0x2158, 0x01D5, 0xA15A, 0x01D6, 0xA15C, 0x01D7, 0xA15E, + 0x01D8, 0xA160, 0x01D9, 0xA162, 0x01DA, 0xA164, 0x01DB, 0xA166, + 0x01DC, 0xA168, 0x01DE, 0xA16A, 0x01DF, 0xA16C, 0x01E0, 0xA16E, + 0x01E1, 0xA170, 0x01E2, 0x2172, 0x01E3, 0x2174, 0x01E6, 0x2176, + 0x01E7, 0x2178, 0x01E8, 0x217A, 0x01E9, 0x217C, 0x01EA, 0x217E, + 0x01EB, 0x2180, 0x01EC, 0xA182, 0x01ED, 0xA184, 0x01EE, 0x2186, + 0x01EF, 0x2188, 0x01F0, 0x218A, 0x01F4, 0x218C, 0x01F5, 0x218E, + 0x01F8, 0x2190, 0x01F9, 0x2192, 0x01FA, 0xA194, 0x01FB, 0xA196, + 0x01FC, 0x2198, 0x01FD, 0x219A, 0x01FE, 0x219C, 0x01FF, 0x219E, + 0x0200, 0x21A0, 0x0201, 0x21A2, 0x0202, 0x21A4, 0x0203, 0x21A6, + 0x0204, 0x21A8, 0x0205, 0x21AA, 0x0206, 0x21AC, 0x0207, 0x21AE, + 0x0208, 0x21B0, 0x0209, 0x21B2, 0x020A, 0x21B4, 0x020B, 0x21B6, + 0x020C, 0x21B8, 0x020D, 0x21BA, 0x020E, 0x21BC, 0x020F, 0x21BE, + 0x0210, 0x21C0, 0x0211, 0x21C2, 0x0212, 0x21C4, 0x0213, 0x21C6, + 0x0214, 0x21C8, 0x0215, 0x21CA, 0x0216, 0x21CC, 0x0217, 0x21CE, + 0x0218, 0x21D0, 0x0219, 0x21D2, 0x021A, 0x21D4, 0x021B, 0x21D6, + 0x021E, 0x21D8, 0x021F, 0x21DA, 0x0226, 0x21DC, 0x0227, 0x21DE, + 0x0228, 0x21E0, 0x0229, 0x21E2, 0x022A, 0xA1E4, 0x022B, 0xA1E6, + 0x022C, 0xA1E8, 0x022D, 0xA1EA, 0x022E, 0x21EC, 0x022F, 0x21EE, + 0x0230, 0xA1F0, 0x0231, 0xA1F2, 0x0232, 0x21F4, 0x0233, 0x21F6, + 0x0340, 0x1300, 0x0341, 0x1301, 0x0343, 0x1313, 0x0344, 0x21F8, + 0x0374, 0x12B9, 0x037E, 0x103B, 0x0385, 0x21FA, 0x0386, 0x21FC, + 0x0387, 0x10B7, 0x0388, 0x21FE, 0x0389, 0x2200, 0x038A, 0x2202, + 0x038C, 0x2204, 0x038E, 0x2206, 0x038F, 0x2208, 0x0390, 0xA20A, + 0x03AA, 0x220C, 0x03AB, 0x220E, 0x03AC, 0x2210, 0x03AD, 0x2212, + 0x03AE, 0x2214, 0x03AF, 0x2216, 0x03B0, 0xA218, 0x03CA, 0x221A, + 0x03CB, 0x221C, 0x03CC, 0x221E, 0x03CD, 0x2220, 0x03CE, 0x2222, + 0x03D3, 0x2224, 0x03D4, 0x2226, 0x0400, 0x2228, 0x0401, 0x222A, + 0x0403, 0x222C, 0x0407, 0x222E, 0x040C, 0x2230, 0x040D, 0x2232, + 0x040E, 0x2234, 0x0419, 0x2236, 0x0439, 0x2238, 0x0450, 0x223A, + 0x0451, 0x223C, 0x0453, 0x223E, 0x0457, 0x2240, 0x045C, 0x2242, + 0x045D, 0x2244, 0x045E, 0x2246, 0x0476, 0x2248, 0x0477, 0x224A, + 0x04C1, 0x224C, 0x04C2, 0x224E, 0x04D0, 0x2250, 0x04D1, 0x2252, + 0x04D2, 0x2254, 0x04D3, 0x2256, 0x04D6, 0x2258, 0x04D7, 0x225A, + 0x04DA, 0x225C, 0x04DB, 0x225E, 0x04DC, 0x2260, 0x04DD, 0x2262, + 0x04DE, 0x2264, 0x04DF, 0x2266, 0x04E2, 0x2268, 0x04E3, 0x226A, + 0x04E4, 0x226C, 0x04E5, 0x226E, 0x04E6, 0x2270, 0x04E7, 0x2272, + 0x04EA, 0x2274, 0x04EB, 0x2276, 0x04EC, 0x2278, 0x04ED, 0x227A, + 0x04EE, 0x227C, 0x04EF, 0x227E, 0x04F0, 0x2280, 0x04F1, 0x2282, + 0x04F2, 0x2284, 0x04F3, 0x2286, 0x04F4, 0x2288, 0x04F5, 0x228A, + 0x04F8, 0x228C, 0x04F9, 0x228E, 0x0622, 0x2290, 0x0623, 0x2292, + 0x0624, 0x2294, 0x0625, 0x2296, 0x0626, 0x2298, 0x06C0, 0x229A, + 0x06C2, 0x229C, 0x06D3, 0x229E, 0x0929, 0x22A0, 0x0931, 0x22A2, + 0x0934, 0x22A4, 0x0958, 0x22A6, 0x0959, 0x22A8, 0x095A, 0x22AA, + 0x095B, 0x22AC, 0x095C, 0x22AE, 0x095D, 0x22B0, 0x095E, 0x22B2, + 0x095F, 0x22B4, 0x09CB, 0x22B6, 0x09CC, 0x22B8, 0x09DC, 0x22BA, + 0x09DD, 0x22BC, 0x09DF, 0x22BE, 0x0A33, 0x22C0, 0x0A36, 0x22C2, + 0x0A59, 0x22C4, 0x0A5A, 0x22C6, 0x0A5B, 0x22C8, 0x0A5E, 0x22CA, + 0x0B48, 0x22CC, 0x0B4B, 0x22CE, 0x0B4C, 0x22D0, 0x0B5C, 0x22D2, + 0x0B5D, 0x22D4, 0x0B94, 0x22D6, 0x0BCA, 0x22D8, 0x0BCB, 0x22DA, + 0x0BCC, 0x22DC, 0x0C48, 0x22DE, 0x0CC0, 0x22E0, 0x0CC7, 0x22E2, + 0x0CC8, 0x22E4, 0x0CCA, 0x22E6, 0x0CCB, 0xA2E8, 0x0D4A, 0x22EA, + 0x0D4B, 0x22EC, 0x0D4C, 0x22EE, 0x0DDA, 0x22F0, 0x0DDC, 0x22F2, + 0x0DDD, 0xA2F4, 0x0DDE, 0x22F6, 0x0F43, 0x22F8, 0x0F4D, 0x22FA, + 0x0F52, 0x22FC, 0x0F57, 0x22FE, 0x0F5C, 0x2300, 0x0F69, 0x2302, + 0x0F73, 0x2304, 0x0F75, 0x2306, 0x0F76, 0x2308, 0x0F78, 0x230A, + 0x0F81, 0x230C, 0x0F93, 0x230E, 0x0F9D, 0x2310, 0x0FA2, 0x2312, + 0x0FA7, 0x2314, 0x0FAC, 0x2316, 0x0FB9, 0x2318, 0x1026, 0x231A, + 0x1E00, 0x231C, 0x1E01, 0x231E, 0x1E02, 0x2320, 0x1E03, 0x2322, + 0x1E04, 0x2324, 0x1E05, 0x2326, 0x1E06, 0x2328, 0x1E07, 0x232A, + 0x1E08, 0xA32C, 0x1E09, 0xA32E, 0x1E0A, 0x2330, 0x1E0B, 0x2332, + 0x1E0C, 0x2334, 0x1E0D, 0x2336, 0x1E0E, 0x2338, 0x1E0F, 0x233A, + 0x1E10, 0x233C, 0x1E11, 0x233E, 0x1E12, 0x2340, 0x1E13, 0x2342, + 0x1E14, 0xA344, 0x1E15, 0xA346, 0x1E16, 0xA348, 0x1E17, 0xA34A, + 0x1E18, 0x234C, 0x1E19, 0x234E, 0x1E1A, 0x2350, 0x1E1B, 0x2352, + 0x1E1C, 0xA354, 0x1E1D, 0xA356, 0x1E1E, 0x2358, 0x1E1F, 0x235A, + 0x1E20, 0x235C, 0x1E21, 0x235E, 0x1E22, 0x2360, 0x1E23, 0x2362, + 0x1E24, 0x2364, 0x1E25, 0x2366, 0x1E26, 0x2368, 0x1E27, 0x236A, + 0x1E28, 0x236C, 0x1E29, 0x236E, 0x1E2A, 0x2370, 0x1E2B, 0x2372, + 0x1E2C, 0x2374, 0x1E2D, 0x2376, 0x1E2E, 0xA378, 0x1E2F, 0xA37A, + 0x1E30, 0x237C, 0x1E31, 0x237E, 0x1E32, 0x2380, 0x1E33, 0x2382, + 0x1E34, 0x2384, 0x1E35, 0x2386, 0x1E36, 0x2388, 0x1E37, 0x238A, + 0x1E38, 0xA38C, 0x1E39, 0xA38E, 0x1E3A, 0x2390, 0x1E3B, 0x2392, + 0x1E3C, 0x2394, 0x1E3D, 0x2396, 0x1E3E, 0x2398, 0x1E3F, 0x239A, + 0x1E40, 0x239C, 0x1E41, 0x239E, 0x1E42, 0x23A0, 0x1E43, 0x23A2, + 0x1E44, 0x23A4, 0x1E45, 0x23A6, 0x1E46, 0x23A8, 0x1E47, 0x23AA, + 0x1E48, 0x23AC, 0x1E49, 0x23AE, 0x1E4A, 0x23B0, 0x1E4B, 0x23B2, + 0x1E4C, 0xA3B4, 0x1E4D, 0xA3B6, 0x1E4E, 0xA3B8, 0x1E4F, 0xA3BA, + 0x1E50, 0xA3BC, 0x1E51, 0xA3BE, 0x1E52, 0xA3C0, 0x1E53, 0xA3C2, + 0x1E54, 0x23C4, 0x1E55, 0x23C6, 0x1E56, 0x23C8, 0x1E57, 0x23CA, + 0x1E58, 0x23CC, 0x1E59, 0x23CE, 0x1E5A, 0x23D0, 0x1E5B, 0x23D2, + 0x1E5C, 0xA3D4, 0x1E5D, 0xA3D6, 0x1E5E, 0x23D8, 0x1E5F, 0x23DA, + 0x1E60, 0x23DC, 0x1E61, 0x23DE, 0x1E62, 0x23E0, 0x1E63, 0x23E2, + 0x1E64, 0xA3E4, 0x1E65, 0xA3E6, 0x1E66, 0xA3E8, 0x1E67, 0xA3EA, + 0x1E68, 0xA3EC, 0x1E69, 0xA3EE, 0x1E6A, 0x23F0, 0x1E6B, 0x23F2, + 0x1E6C, 0x23F4, 0x1E6D, 0x23F6, 0x1E6E, 0x23F8, 0x1E6F, 0x23FA, + 0x1E70, 0x23FC, 0x1E71, 0x23FE, 0x1E72, 0x2400, 0x1E73, 0x2402, + 0x1E74, 0x2404, 0x1E75, 0x2406, 0x1E76, 0x2408, 0x1E77, 0x240A, + 0x1E78, 0xA40C, 0x1E79, 0xA40E, 0x1E7A, 0xA410, 0x1E7B, 0xA412, + 0x1E7C, 0x2414, 0x1E7D, 0x2416, 0x1E7E, 0x2418, 0x1E7F, 0x241A, + 0x1E80, 0x241C, 0x1E81, 0x241E, 0x1E82, 0x2420, 0x1E83, 0x2422, + 0x1E84, 0x2424, 0x1E85, 0x2426, 0x1E86, 0x2428, 0x1E87, 0x242A, + 0x1E88, 0x242C, 0x1E89, 0x242E, 0x1E8A, 0x2430, 0x1E8B, 0x2432, + 0x1E8C, 0x2434, 0x1E8D, 0x2436, 0x1E8E, 0x2438, 0x1E8F, 0x243A, + 0x1E90, 0x243C, 0x1E91, 0x243E, 0x1E92, 0x2440, 0x1E93, 0x2442, + 0x1E94, 0x2444, 0x1E95, 0x2446, 0x1E96, 0x2448, 0x1E97, 0x244A, + 0x1E98, 0x244C, 0x1E99, 0x244E, 0x1E9B, 0x2450, 0x1EA0, 0x2452, + 0x1EA1, 0x2454, 0x1EA2, 0x2456, 0x1EA3, 0x2458, 0x1EA4, 0xA45A, + 0x1EA5, 0xA45C, 0x1EA6, 0xA45E, 0x1EA7, 0xA460, 0x1EA8, 0xA462, + 0x1EA9, 0xA464, 0x1EAA, 0xA466, 0x1EAB, 0xA468, 0x1EAC, 0xA46A, + 0x1EAD, 0xA46C, 0x1EAE, 0xA46E, 0x1EAF, 0xA470, 0x1EB0, 0xA472, + 0x1EB1, 0xA474, 0x1EB2, 0xA476, 0x1EB3, 0xA478, 0x1EB4, 0xA47A, + 0x1EB5, 0xA47C, 0x1EB6, 0xA47E, 0x1EB7, 0xA480, 0x1EB8, 0x2482, + 0x1EB9, 0x2484, 0x1EBA, 0x2486, 0x1EBB, 0x2488, 0x1EBC, 0x248A, + 0x1EBD, 0x248C, 0x1EBE, 0xA48E, 0x1EBF, 0xA490, 0x1EC0, 0xA492, + 0x1EC1, 0xA494, 0x1EC2, 0xA496, 0x1EC3, 0xA498, 0x1EC4, 0xA49A, + 0x1EC5, 0xA49C, 0x1EC6, 0xA49E, 0x1EC7, 0xA4A0, 0x1EC8, 0x24A2, + 0x1EC9, 0x24A4, 0x1ECA, 0x24A6, 0x1ECB, 0x24A8, 0x1ECC, 0x24AA, + 0x1ECD, 0x24AC, 0x1ECE, 0x24AE, 0x1ECF, 0x24B0, 0x1ED0, 0xA4B2, + 0x1ED1, 0xA4B4, 0x1ED2, 0xA4B6, 0x1ED3, 0xA4B8, 0x1ED4, 0xA4BA, + 0x1ED5, 0xA4BC, 0x1ED6, 0xA4BE, 0x1ED7, 0xA4C0, 0x1ED8, 0xA4C2, + 0x1ED9, 0xA4C4, 0x1EDA, 0xA4C6, 0x1EDB, 0xA4C8, 0x1EDC, 0xA4CA, + 0x1EDD, 0xA4CC, 0x1EDE, 0xA4CE, 0x1EDF, 0xA4D0, 0x1EE0, 0xA4D2, + 0x1EE1, 0xA4D4, 0x1EE2, 0xA4D6, 0x1EE3, 0xA4D8, 0x1EE4, 0x24DA, + 0x1EE5, 0x24DC, 0x1EE6, 0x24DE, 0x1EE7, 0x24E0, 0x1EE8, 0xA4E2, + 0x1EE9, 0xA4E4, 0x1EEA, 0xA4E6, 0x1EEB, 0xA4E8, 0x1EEC, 0xA4EA, + 0x1EED, 0xA4EC, 0x1EEE, 0xA4EE, 0x1EEF, 0xA4F0, 0x1EF0, 0xA4F2, + 0x1EF1, 0xA4F4, 0x1EF2, 0x24F6, 0x1EF3, 0x24F8, 0x1EF4, 0x24FA, + 0x1EF5, 0x24FC, 0x1EF6, 0x24FE, 0x1EF7, 0x2500, 0x1EF8, 0x2502, + 0x1EF9, 0x2504, 0x1F00, 0x2506, 0x1F01, 0x2508, 0x1F02, 0xA50A, + 0x1F03, 0xA50C, 0x1F04, 0xA50E, 0x1F05, 0xA510, 0x1F06, 0xA512, + 0x1F07, 0xA514, 0x1F08, 0x2516, 0x1F09, 0x2518, 0x1F0A, 0xA51A, + 0x1F0B, 0xA51C, 0x1F0C, 0xA51E, 0x1F0D, 0xA520, 0x1F0E, 0xA522, + 0x1F0F, 0xA524, 0x1F10, 0x2526, 0x1F11, 0x2528, 0x1F12, 0xA52A, + 0x1F13, 0xA52C, 0x1F14, 0xA52E, 0x1F15, 0xA530, 0x1F18, 0x2532, + 0x1F19, 0x2534, 0x1F1A, 0xA536, 0x1F1B, 0xA538, 0x1F1C, 0xA53A, + 0x1F1D, 0xA53C, 0x1F20, 0x253E, 0x1F21, 0x2540, 0x1F22, 0xA542, + 0x1F23, 0xA544, 0x1F24, 0xA546, 0x1F25, 0xA548, 0x1F26, 0xA54A, + 0x1F27, 0xA54C, 0x1F28, 0x254E, 0x1F29, 0x2550, 0x1F2A, 0xA552, + 0x1F2B, 0xA554, 0x1F2C, 0xA556, 0x1F2D, 0xA558, 0x1F2E, 0xA55A, + 0x1F2F, 0xA55C, 0x1F30, 0x255E, 0x1F31, 0x2560, 0x1F32, 0xA562, + 0x1F33, 0xA564, 0x1F34, 0xA566, 0x1F35, 0xA568, 0x1F36, 0xA56A, + 0x1F37, 0xA56C, 0x1F38, 0x256E, 0x1F39, 0x2570, 0x1F3A, 0xA572, + 0x1F3B, 0xA574, 0x1F3C, 0xA576, 0x1F3D, 0xA578, 0x1F3E, 0xA57A, + 0x1F3F, 0xA57C, 0x1F40, 0x257E, 0x1F41, 0x2580, 0x1F42, 0xA582, + 0x1F43, 0xA584, 0x1F44, 0xA586, 0x1F45, 0xA588, 0x1F48, 0x258A, + 0x1F49, 0x258C, 0x1F4A, 0xA58E, 0x1F4B, 0xA590, 0x1F4C, 0xA592, + 0x1F4D, 0xA594, 0x1F50, 0x2596, 0x1F51, 0x2598, 0x1F52, 0xA59A, + 0x1F53, 0xA59C, 0x1F54, 0xA59E, 0x1F55, 0xA5A0, 0x1F56, 0xA5A2, + 0x1F57, 0xA5A4, 0x1F59, 0x25A6, 0x1F5B, 0xA5A8, 0x1F5D, 0xA5AA, + 0x1F5F, 0xA5AC, 0x1F60, 0x25AE, 0x1F61, 0x25B0, 0x1F62, 0xA5B2, + 0x1F63, 0xA5B4, 0x1F64, 0xA5B6, 0x1F65, 0xA5B8, 0x1F66, 0xA5BA, + 0x1F67, 0xA5BC, 0x1F68, 0x25BE, 0x1F69, 0x25C0, 0x1F6A, 0xA5C2, + 0x1F6B, 0xA5C4, 0x1F6C, 0xA5C6, 0x1F6D, 0xA5C8, 0x1F6E, 0xA5CA, + 0x1F6F, 0xA5CC, 0x1F70, 0x25CE, 0x1F71, 0x93AC, 0x1F72, 0x25D0, + 0x1F73, 0x93AD, 0x1F74, 0x25D2, 0x1F75, 0x93AE, 0x1F76, 0x25D4, + 0x1F77, 0x93AF, 0x1F78, 0x25D6, 0x1F79, 0x93CC, 0x1F7A, 0x25D8, + 0x1F7B, 0x93CD, 0x1F7C, 0x25DA, 0x1F7D, 0x93CE, 0x1F80, 0xA5DC, + 0x1F81, 0xA5DE, 0x1F82, 0xA5E0, 0x1F83, 0xA5E2, 0x1F84, 0xA5E4, + 0x1F85, 0xA5E6, 0x1F86, 0xA5E8, 0x1F87, 0xA5EA, 0x1F88, 0xA5EC, + 0x1F89, 0xA5EE, 0x1F8A, 0xA5F0, 0x1F8B, 0xA5F2, 0x1F8C, 0xA5F4, + 0x1F8D, 0xA5F6, 0x1F8E, 0xA5F8, 0x1F8F, 0xA5FA, 0x1F90, 0xA5FC, + 0x1F91, 0xA5FE, 0x1F92, 0xA600, 0x1F93, 0xA602, 0x1F94, 0xA604, + 0x1F95, 0xA606, 0x1F96, 0xA608, 0x1F97, 0xA60A, 0x1F98, 0xA60C, + 0x1F99, 0xA60E, 0x1F9A, 0xA610, 0x1F9B, 0xA612, 0x1F9C, 0xA614, + 0x1F9D, 0xA616, 0x1F9E, 0xA618, 0x1F9F, 0xA61A, 0x1FA0, 0xA61C, + 0x1FA1, 0xA61E, 0x1FA2, 0xA620, 0x1FA3, 0xA622, 0x1FA4, 0xA624, + 0x1FA5, 0xA626, 0x1FA6, 0xA628, 0x1FA7, 0xA62A, 0x1FA8, 0xA62C, + 0x1FA9, 0xA62E, 0x1FAA, 0xA630, 0x1FAB, 0xA632, 0x1FAC, 0xA634, + 0x1FAD, 0xA636, 0x1FAE, 0xA638, 0x1FAF, 0xA63A, 0x1FB0, 0x263C, + 0x1FB1, 0x263E, 0x1FB2, 0xA640, 0x1FB3, 0x2642, 0x1FB4, 0xA644, + 0x1FB6, 0x2646, 0x1FB7, 0xA648, 0x1FB8, 0x264A, 0x1FB9, 0x264C, + 0x1FBA, 0x264E, 0x1FBB, 0x9386, 0x1FBC, 0x2650, 0x1FBE, 0x13B9, + 0x1FC1, 0x2652, 0x1FC2, 0xA654, 0x1FC3, 0x2656, 0x1FC4, 0xA658, + 0x1FC6, 0x265A, 0x1FC7, 0xA65C, 0x1FC8, 0x265E, 0x1FC9, 0x9388, + 0x1FCA, 0x2660, 0x1FCB, 0x9389, 0x1FCC, 0x2662, 0x1FCD, 0x2664, + 0x1FCE, 0x2666, 0x1FCF, 0x2668, 0x1FD0, 0x266A, 0x1FD1, 0x266C, + 0x1FD2, 0xA66E, 0x1FD3, 0x9390, 0x1FD6, 0x2670, 0x1FD7, 0xA672, + 0x1FD8, 0x2674, 0x1FD9, 0x2676, 0x1FDA, 0x2678, 0x1FDB, 0x938A, + 0x1FDD, 0x267A, 0x1FDE, 0x267C, 0x1FDF, 0x267E, 0x1FE0, 0x2680, + 0x1FE1, 0x2682, 0x1FE2, 0xA684, 0x1FE3, 0x93B0, 0x1FE4, 0x2686, + 0x1FE5, 0x2688, 0x1FE6, 0x268A, 0x1FE7, 0xA68C, 0x1FE8, 0x268E, + 0x1FE9, 0x2690, 0x1FEA, 0x2692, 0x1FEB, 0x938E, 0x1FEC, 0x2694, + 0x1FED, 0x2696, 0x1FEE, 0x9385, 0x1FEF, 0x1060, 0x1FF2, 0xA698, + 0x1FF3, 0x269A, 0x1FF4, 0xA69C, 0x1FF6, 0x269E, 0x1FF7, 0xA6A0, + 0x1FF8, 0x26A2, 0x1FF9, 0x938C, 0x1FFA, 0x26A4, 0x1FFB, 0x938F, + 0x1FFC, 0x26A6, 0x1FFD, 0x10B4, 0x304C, 0x26A8, 0x304E, 0x26AA, + 0x3050, 0x26AC, 0x3052, 0x26AE, 0x3054, 0x26B0, 0x3056, 0x26B2, + 0x3058, 0x26B4, 0x305A, 0x26B6, 0x305C, 0x26B8, 0x305E, 0x26BA, + 0x3060, 0x26BC, 0x3062, 0x26BE, 0x3065, 0x26C0, 0x3067, 0x26C2, + 0x3069, 0x26C4, 0x3070, 0x26C6, 0x3071, 0x26C8, 0x3073, 0x26CA, + 0x3074, 0x26CC, 0x3076, 0x26CE, 0x3077, 0x26D0, 0x3079, 0x26D2, + 0x307A, 0x26D4, 0x307C, 0x26D6, 0x307D, 0x26D8, 0x3094, 0x26DA, + 0x309E, 0x26DC, 0x30AC, 0x26DE, 0x30AE, 0x26E0, 0x30B0, 0x26E2, + 0x30B2, 0x26E4, 0x30B4, 0x26E6, 0x30B6, 0x26E8, 0x30B8, 0x26EA, + 0x30BA, 0x26EC, 0x30BC, 0x26EE, 0x30BE, 0x26F0, 0x30C0, 0x26F2, + 0x30C2, 0x26F4, 0x30C5, 0x26F6, 0x30C7, 0x26F8, 0x30C9, 0x26FA, + 0x30D0, 0x26FC, 0x30D1, 0x26FE, 0x30D3, 0x2700, 0x30D4, 0x2702, + 0x30D6, 0x2704, 0x30D7, 0x2706, 0x30D9, 0x2708, 0x30DA, 0x270A, + 0x30DC, 0x270C, 0x30DD, 0x270E, 0x30F4, 0x2710, 0x30F7, 0x2712, + 0x30F8, 0x2714, 0x30F9, 0x2716, 0x30FA, 0x2718, 0x30FE, 0x271A, + 0xFB1D, 0x271C, 0xFB1F, 0x271E, 0xFB2A, 0x2720, 0xFB2B, 0x2722, + 0xFB2C, 0xA724, 0xFB2D, 0xA726, 0xFB2E, 0x2728, 0xFB2F, 0x272A, + 0xFB30, 0x272C, 0xFB31, 0x272E, 0xFB32, 0x2730, 0xFB33, 0x2732, + 0xFB34, 0x2734, 0xFB35, 0x2736, 0xFB36, 0x2738, 0xFB38, 0x273A, + 0xFB39, 0x273C, 0xFB3A, 0x273E, 0xFB3B, 0x2740, 0xFB3C, 0x2742, + 0xFB3E, 0x2744, 0xFB40, 0x2746, 0xFB41, 0x2748, 0xFB43, 0x274A, + 0xFB44, 0x274C, 0xFB46, 0x274E, 0xFB47, 0x2750, 0xFB48, 0x2752, + 0xFB49, 0x2754, 0xFB4A, 0x2756, 0xFB4B, 0x2758, 0xFB4C, 0x275A, + 0xFB4D, 0x275C, 0xFB4E, 0x275E }; static const u_int32_t __UniCharDecompositionTableLength = - (sizeof(__CFUniCharDecompositionTable) / (sizeof(u_int16_t) * 2)); + (sizeof(__CFUniCharDecompositionTable) / (sizeof(u_int16_t) * 2)); static const u_int16_t -__CFUniCharMultipleDecompositionTable[] = { + __CFUniCharMultipleDecompositionTable[] = { 0x0041, 0x0300, 0x0041, 0x0301, 0x0041, 0x0302, 0x0041, 0x0303, 0x0041, 0x0308, 0x0041, 0x030A, 0x0043, 0x0327, 0x0045, 0x0300, 0x0045, 0x0301, 0x0045, 0x0302, 0x0045, 0x0308, 0x0049, 0x0300, @@ -526,7 +526,7 @@ __CFUniCharMultipleDecompositionTable[] = { }; static const u_int8_t -__CFUniCharDecomposableBitmap[] = { + __CFUniCharDecomposableBitmap[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, 0x00, 0x00, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x00, 0x0C, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -635,7 +635,7 @@ __CFUniCharDecomposableBitmap[] = { }; static const u_int32_t -__CFUniCharPrecompSourceTable[] = { + __CFUniCharPrecompSourceTable[] = { 0x00000300, 0x00540000, 0x00000301, 0x00750054, 0x00000302, 0x002000C9, 0x00000303, 0x001C00E9, 0x00000304, 0x002C0105, 0x00000306, 0x00200131, @@ -666,11 +666,11 @@ __CFUniCharPrecompSourceTable[] = { }; static const u_int32_t __CFUniCharPrecompositionTableLength = - (sizeof(__CFUniCharPrecompSourceTable) / (sizeof(u_int32_t) * 2)); + (sizeof(__CFUniCharPrecompSourceTable) / (sizeof(u_int32_t) * 2)); static const u_int16_t -__CFUniCharBMPPrecompDestinationTable[] = { + __CFUniCharBMPPrecompDestinationTable[] = { 0x0041, 0x00C0, 0x0045, 0x00C8, 0x0049, 0x00CC, 0x004E, 0x01F8, 0x004F, 0x00D2, 0x0055, 0x00D9, 0x0057, 0x1E80, 0x0059, 0x1EF2, 0x0061, 0x00E0, 0x0065, 0x00E8, 0x0069, 0x00EC, 0x006E, 0x01F9, @@ -904,7 +904,7 @@ __CFUniCharBMPPrecompDestinationTable[] = { }; static const u_int8_t -__CFUniCharCombiningBitmap[] = { + __CFUniCharCombiningBitmap[] = { 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, @@ -1021,7 +1021,7 @@ __CFUniCharCombiningBitmap[] = { }; static const u_int8_t -__CFUniCharCombiningPropertyBitmap[] = { + __CFUniCharCombiningPropertyBitmap[] = { 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, @@ -1695,5 +1695,3 @@ __CFUniCharCombiningPropertyBitmap[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; - - diff --git a/bsd/vfs/vfs_vnops.c b/bsd/vfs/vfs_vnops.c index 6b03aa5a4..e6ffc2c72 100644 --- a/bsd/vfs/vfs_vnops.c +++ b/bsd/vfs/vfs_vnops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -90,7 +90,7 @@ #define ubc_setcred ubc_setcred_deprecated #include #undef ubc_setcred -int ubc_setcred(struct vnode *, struct proc *); +int ubc_setcred(struct vnode *, struct proc *); #include #include #include @@ -116,22 +116,22 @@ int ubc_setcred(struct vnode *, struct proc *); static int vn_closefile(struct fileglob *fp, vfs_context_t ctx); static int vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, - vfs_context_t ctx); + vfs_context_t ctx); static int vn_read(struct fileproc *fp, struct uio *uio, int flags, - vfs_context_t ctx); + vfs_context_t ctx); static int vn_write(struct fileproc *fp, struct uio *uio, int flags, - vfs_context_t ctx); + vfs_context_t ctx); static int vn_select( struct fileproc *fp, int which, void * wql, - vfs_context_t ctx); + vfs_context_t ctx); static int vn_kqfilt_add(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx); + struct kevent_internal_s *kev, vfs_context_t ctx); static void filt_vndetach(struct knote *kn); static int filt_vnode(struct knote *kn, long hint); static int filt_vnode_common(struct knote *kn, vnode_t vp, long hint); static int vn_open_auth_finish(vnode_t vp, int fmode, vfs_context_t ctx); #if 0 static int vn_kqfilt_remove(struct vnode *vp, uintptr_t ident, - vfs_context_t ctx); + vfs_context_t ctx); #endif const struct fileops vnops = { @@ -166,7 +166,7 @@ SECURITY_READ_ONLY_EARLY(struct filterops) vnode_filtops = { int vn_open(struct nameidata *ndp, int fmode, int cmode) { - return(vn_open_modflags(ndp, &fmode, cmode)); + return vn_open_modflags(ndp, &fmode, cmode); } int @@ -176,8 +176,8 @@ vn_open_modflags(struct nameidata *ndp, int *fmodep, int cmode) VATTR_INIT(&va); VATTR_SET(&va, va_mode, cmode); - - return(vn_open_auth(ndp, fmodep, &va)); + + return vn_open_auth(ndp, fmodep, &va); } static int @@ -189,25 +189,24 @@ vn_open_auth_finish(vnode_t vp, int fmode, vfs_context_t ctx) goto bad; } - /* Call out to allow 3rd party notification of open. + /* Call out to allow 3rd party notification of open. * Ignore result of kauth_authorize_fileop call. */ #if CONFIG_MACF mac_vnode_notify_open(ctx, vp, fmode); #endif - kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN, - (uintptr_t)vp, 0); + kauth_authorize_fileop(vfs_context_ucred(ctx), KAUTH_FILEOP_OPEN, + (uintptr_t)vp, 0); return 0; bad: return error; - } /* * May do nameidone() to allow safely adding an FSEvent. Cue off of ni_dvp to - * determine whether that has happened. + * determine whether that has happened. */ static int vn_open_auth_do_create(struct nameidata *ndp, struct vnode_attr *vap, int fmode, boolean_t *did_create, boolean_t *did_open, vfs_context_t ctx) @@ -222,72 +221,79 @@ vn_open_auth_do_create(struct nameidata *ndp, struct vnode_attr *vap, int fmode, *did_open = FALSE; VATTR_SET(vap, va_type, VREG); - if (fmode & O_EXCL) + if (fmode & O_EXCL) { vap->va_vaflags |= VA_EXCLUSIVE; + } #if NAMEDRSRCFORK if (ndp->ni_cnd.cn_flags & CN_WANTSRSRCFORK) { - if ((error = vn_authorize_create(dvp, &ndp->ni_cnd, vap, ctx, NULL)) != 0) + if ((error = vn_authorize_create(dvp, &ndp->ni_cnd, vap, ctx, NULL)) != 0) { goto out; - if ((error = vnode_makenamedstream(dvp, &ndp->ni_vp, XATTR_RESOURCEFORK_NAME, 0, ctx)) != 0) + } + if ((error = vnode_makenamedstream(dvp, &ndp->ni_vp, XATTR_RESOURCEFORK_NAME, 0, ctx)) != 0) { goto out; + } *did_create = TRUE; } else { #endif - if (!batched) { - if ((error = vn_authorize_create(dvp, &ndp->ni_cnd, vap, ctx, NULL)) != 0) - goto out; + if (!batched) { + if ((error = vn_authorize_create(dvp, &ndp->ni_cnd, vap, ctx, NULL)) != 0) { + goto out; } + } - error = vn_create(dvp, &ndp->ni_vp, ndp, vap, VN_CREATE_DOOPEN, fmode, &status, ctx); - if (error != 0) { - if (batched) { - *did_create = (status & COMPOUND_OPEN_STATUS_DID_CREATE) ? TRUE : FALSE; - } else { - *did_create = FALSE; - } - - if (error == EKEEPLOOKING) { - if (*did_create) { - panic("EKEEPLOOKING, but we did a create?"); - } - if (!batched) { - panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?"); - } - if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) { - panic("EKEEPLOOKING, but continue flag not set?"); - } + error = vn_create(dvp, &ndp->ni_vp, ndp, vap, VN_CREATE_DOOPEN, fmode, &status, ctx); + if (error != 0) { + if (batched) { + *did_create = (status & COMPOUND_OPEN_STATUS_DID_CREATE) ? TRUE : FALSE; + } else { + *did_create = FALSE; + } - /* - * Do NOT drop the dvp: we need everything to continue the lookup. - */ - return error; + if (error == EKEEPLOOKING) { + if (*did_create) { + panic("EKEEPLOOKING, but we did a create?"); } - } else { - if (batched) { - *did_create = (status & COMPOUND_OPEN_STATUS_DID_CREATE) ? 1 : 0; - *did_open = TRUE; - } else { - *did_create = TRUE; + if (!batched) { + panic("EKEEPLOOKING from filesystem that doesn't support compound vnops?"); + } + if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) { + panic("EKEEPLOOKING, but continue flag not set?"); } + + /* + * Do NOT drop the dvp: we need everything to continue the lookup. + */ + return error; + } + } else { + if (batched) { + *did_create = (status & COMPOUND_OPEN_STATUS_DID_CREATE) ? 1 : 0; + *did_open = TRUE; + } else { + *did_create = TRUE; } -#if NAMEDRSRCFORK } +#if NAMEDRSRCFORK +} #endif vp = ndp->ni_vp; if (*did_create) { - int update_flags = 0; + int update_flags = 0; // Make sure the name & parent pointers are hooked up - if (vp->v_name == NULL) + if (vp->v_name == NULL) { update_flags |= VNODE_UPDATE_NAME; - if (vp->v_parent == NULLVP) + } + if (vp->v_parent == NULLVP) { update_flags |= VNODE_UPDATE_PARENT; + } - if (update_flags) + if (update_flags) { vnode_update_identity(vp, dvp, ndp->ni_cnd.cn_nameptr, ndp->ni_cnd.cn_namelen, ndp->ni_cnd.cn_hash, update_flags); + } vnode_put(dvp); ndp->ni_dvp = NULLVP; @@ -295,8 +301,8 @@ vn_open_auth_do_create(struct nameidata *ndp, struct vnode_attr *vap, int fmode, #if CONFIG_FSE if (need_fsevent(FSE_CREATE_FILE, vp)) { add_fsevent(FSE_CREATE_FILE, ctx, - FSE_ARG_VNODE, vp, - FSE_ARG_DONE); + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); } #endif } @@ -313,7 +319,7 @@ out: * This is the number of times we'll loop in vn_open_auth without explicitly * yielding the CPU when we determine we have to retry. */ -#define RETRY_NO_YIELD_COUNT 5 +#define RETRY_NO_YIELD_COUNT 5 /* * Open a file with authorization, updating the contents of the structures @@ -387,8 +393,8 @@ again: origcnflags = ndp->ni_cnd.cn_flags; // If raw encrypted mode is requested, handle that here - if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags) - && ISSET(vap->va_dataprotect_flags, VA_DP_RAWENCRYPTED)) { + if (VATTR_IS_ACTIVE(vap, va_dataprotect_flags) + && ISSET(vap->va_dataprotect_flags, VA_DP_RAWENCRYPTED)) { fmode |= FENCRYPTED; } @@ -396,8 +402,8 @@ again: * O_CREAT */ if (fmode & O_CREAT) { - if ( (fmode & O_DIRECTORY) ) { - error = EINVAL; + if ((fmode & O_DIRECTORY)) { + error = EINVAL; goto out; } ndp->ni_cnd.cn_nameiop = CREATE; @@ -412,12 +418,14 @@ again: /* open calls are allowed for resource forks. */ ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; #endif - if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0 && (origcnflags & FOLLOW) != 0) + if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0 && (origcnflags & FOLLOW) != 0) { ndp->ni_cnd.cn_flags |= FOLLOW; + } continue_create_lookup: - if ( (error = namei(ndp)) ) + if ((error = namei(ndp))) { goto out; + } dvp = ndp->ni_dvp; vp = ndp->ni_vp; @@ -442,7 +450,7 @@ continue_create_lookup: dvp = ndp->ni_dvp; vp = ndp->ni_vp; - /* + /* * Detected a node that the filesystem couldn't handle. Don't call * nameidone() yet, because we need that path buffer. */ @@ -462,22 +470,23 @@ continue_create_lookup: /* * Check for a create race. */ - if ((error == EEXIST) && !(fmode & O_EXCL)){ - if (vp) + if ((error == EEXIST) && !(fmode & O_EXCL)) { + if (vp) { vnode_put(vp); + } goto again; } goto bad; } need_vnop_open = !did_open; - } - else { - if (fmode & O_EXCL) + } else { + if (fmode & O_EXCL) { error = EEXIST; + } - /* - * We have a vnode. Use compound open if available + /* + * We have a vnode. Use compound open if available * or else fall through to "traditional" path. Note: can't * do a compound open for root, because the parent belongs * to a different FS. @@ -493,7 +502,7 @@ continue_create_lookup: panic("EKEEPLOOKING, but continue flag not set?"); } goto continue_create_lookup; - } + } } nameidone(ndp); vnode_put(dvp); @@ -507,8 +516,7 @@ continue_create_lookup: /* Fall through */ } - } - else { + } else { /* * Not O_CREAT */ @@ -520,8 +528,9 @@ continue_create_lookup: /* open calls are allowed for resource forks. */ ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; #endif - if (fmode & FENCRYPTED) + if (fmode & FENCRYPTED) { ndp->ni_cnd.cn_flags |= CN_RAW_ENCRYPTED | CN_SKIPNAMECACHE; + } ndp->ni_flag = NAMEI_COMPOUNDOPEN; /* preserve NOFOLLOW from vnode_open() */ @@ -531,8 +540,9 @@ continue_create_lookup: /* Do a lookup, possibly going directly to filesystem for compound operation */ do { - if ( (error = namei(ndp)) ) + if ((error = namei(ndp))) { goto out; + } vp = ndp->ni_vp; dvp = ndp->ni_dvp; @@ -560,7 +570,7 @@ continue_create_lookup: } } - /* + /* * By this point, nameidone() is called, dvp iocount is dropped, * and dvp pointer is cleared. */ @@ -582,7 +592,7 @@ continue_create_lookup: #endif /* DEVELOPMENT || DEBUG */ /* - * Expect to use this code for filesystems without compound VNOPs, for the root + * Expect to use this code for filesystems without compound VNOPs, for the root * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(), * and for shadow files, which do not live on the same filesystems as their "parents." */ @@ -598,8 +608,8 @@ continue_create_lookup: } } - if (VATTR_IS_ACTIVE (vap, va_dataprotect_flags) - && ISSET(vap->va_dataprotect_flags, VA_DP_RAWUNENCRYPTED)) { + if (VATTR_IS_ACTIVE(vap, va_dataprotect_flags) + && ISSET(vap->va_dataprotect_flags, VA_DP_RAWUNENCRYPTED)) { /* Don't allow unencrypted io request from user space unless entitled */ boolean_t entitled = FALSE; #if !SECURE_KERNEL @@ -641,11 +651,12 @@ continue_create_lookup: } /* Compound VNOP open is responsible for doing the truncate */ - if (batched || did_create) + if (batched || did_create) { fmode &= ~O_TRUNC; + } *fmodep = fmode; - return (0); + return 0; bad: /* Opened either explicitly or by a batched create */ @@ -658,9 +669,9 @@ bad: #if NAMEDRSRCFORK /* Aggressively recycle shadow files if we error'd out during open() */ if ((vnode_isnamedstream(vp)) && - (vp->v_parent != NULLVP) && - (vnode_isshadow(vp))) { - vnode_recycle(vp); + (vp->v_parent != NULLVP) && + (vnode_isshadow(vp))) { + vnode_recycle(vp); } #endif vnode_put(vp); @@ -691,14 +702,14 @@ bad: if (nretries > RETRY_NO_YIELD_COUNT) { /* Every hz/100 secs is 10 msecs ... */ tsleep(&nretries, PVFS, "vn_open_auth_retry", - MIN((nretries * (hz/100)), hz)); + MIN((nretries * (hz / 100)), hz)); } goto again; } } out: - return (error); + return error; } #if vn_access_DEPRECATED @@ -714,19 +725,22 @@ out: int vn_access(vnode_t vp, int mode, vfs_context_t context) { - kauth_action_t action; - - action = 0; - if (mode & VREAD) - action |= KAUTH_VNODE_READ_DATA; - if (mode & VWRITE) + kauth_action_t action; + + action = 0; + if (mode & VREAD) { + action |= KAUTH_VNODE_READ_DATA; + } + if (mode & VWRITE) { action |= KAUTH_VNODE_WRITE_DATA; - if (mode & VEXEC) - action |= KAUTH_VNODE_EXECUTE; - - return(vnode_authorize(vp, NULL, action, context)); + } + if (mode & VEXEC) { + action |= KAUTH_VNODE_EXECUTE; + } + + return vnode_authorize(vp, NULL, action, context); } -#endif /* vn_access_DEPRECATED */ +#endif /* vn_access_DEPRECATED */ /* * Vnode close call @@ -739,7 +753,7 @@ vn_close(struct vnode *vp, int flags, vfs_context_t ctx) #if NAMEDRSRCFORK /* Sync data from resource fork shadow file if needed. */ - if ((vp->v_flag & VISNAMEDSTREAM) && + if ((vp->v_flag & VISNAMEDSTREAM) && (vp->v_parent != NULLVP) && vnode_isshadow(vp)) { if (flags & FWASWRITTEN) { @@ -747,10 +761,11 @@ vn_close(struct vnode *vp, int flags, vfs_context_t ctx) } } #endif - + /* work around for foxhound */ - if (vnode_isspec(vp)) + if (vnode_isspec(vp)) { (void)vnode_rele_ext(vp, flags, 0); + } /* * On HFS, we flush when the last writer closes. We do this @@ -761,40 +776,42 @@ vn_close(struct vnode *vp, int flags, vfs_context_t ctx) * Note that it's OK to access v_writecount without the lock * in this context. */ - if (vp->v_tag == VT_HFS && (flags & FWRITE) && vp->v_writecount == 1) + if (vp->v_tag == VT_HFS && (flags & FWRITE) && vp->v_writecount == 1) { VNOP_FSYNC(vp, MNT_NOWAIT, ctx); + } error = VNOP_CLOSE(vp, flags, ctx); #if CONFIG_FSE if (flags & FWASWRITTEN) { - if (need_fsevent(FSE_CONTENT_MODIFIED, vp)) { - add_fsevent(FSE_CONTENT_MODIFIED, ctx, - FSE_ARG_VNODE, vp, - FSE_ARG_DONE); + if (need_fsevent(FSE_CONTENT_MODIFIED, vp)) { + add_fsevent(FSE_CONTENT_MODIFIED, ctx, + FSE_ARG_VNODE, vp, + FSE_ARG_DONE); } } #endif - if (!vnode_isspec(vp)) + if (!vnode_isspec(vp)) { (void)vnode_rele_ext(vp, flags, 0); - + } + if (flusherror) { error = flusherror; } - return (error); + return error; } static int vn_read_swapfile( - struct vnode *vp, - uio_t uio) + struct vnode *vp, + uio_t uio) { - int error; - off_t swap_count, this_count; - off_t file_end, read_end; - off_t prev_resid; - char *my_swap_page; + int error; + off_t swap_count, this_count; + off_t file_end, read_end; + off_t prev_resid; + char *my_swap_page; /* * Reading from a swap file will get you zeroes. @@ -817,10 +834,10 @@ vn_read_swapfile( while (swap_count > 0) { if (my_swap_page == NULL) { MALLOC(my_swap_page, char *, PAGE_SIZE, - M_TEMP, M_WAITOK); + M_TEMP, M_WAITOK); memset(my_swap_page, '\0', PAGE_SIZE); /* add an end-of-line to keep line counters happy */ - my_swap_page[PAGE_SIZE-1] = '\n'; + my_swap_page[PAGE_SIZE - 1] = '\n'; } this_count = swap_count; if (this_count > PAGE_SIZE) { @@ -829,8 +846,8 @@ vn_read_swapfile( prev_resid = uio_resid(uio); error = uiomove((caddr_t) my_swap_page, - this_count, - uio); + this_count, + uio); if (error) { break; } @@ -861,17 +878,17 @@ vn_rdwr( { int64_t resid; int result; - + result = vn_rdwr_64(rw, - vp, - (uint64_t)(uintptr_t)base, - (int64_t)len, - offset, - segflg, - ioflg, - cred, - &resid, - p); + vp, + (uint64_t)(uintptr_t)base, + (int64_t)len, + offset, + segflg, + ioflg, + cred, + &resid, + p); /* "resid" should be bounded above by "len," which is an int */ if (aresid != NULL) { @@ -898,33 +915,33 @@ vn_rdwr_64( uio_t auio; int spacetype; struct vfs_context context; - int error=0; - char uio_buf[ UIO_SIZEOF(1) ]; + int error = 0; + char uio_buf[UIO_SIZEOF(1)]; context.vc_thread = current_thread(); context.vc_ucred = cred; if (UIO_SEG_IS_USER_SPACE(segflg)) { spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; - } - else { + } else { spacetype = UIO_SYSSPACE; } - auio = uio_createwithbuffer(1, offset, spacetype, rw, - &uio_buf[0], sizeof(uio_buf)); + auio = uio_createwithbuffer(1, offset, spacetype, rw, + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, base, len); #if CONFIG_MACF /* XXXMAC - * IO_NOAUTH should be re-examined. - * Likely that mediation should be performed in caller. + * IO_NOAUTH should be re-examined. + * Likely that mediation should be performed in caller. */ if ((ioflg & IO_NOAUTH) == 0) { - /* passed cred is fp->f_cred */ - if (rw == UIO_READ) + /* passed cred is fp->f_cred */ + if (rw == UIO_READ) { error = mac_vnode_check_read(&context, cred, vp); - else + } else { error = mac_vnode_check_write(&context, cred, vp); + } } #endif @@ -936,12 +953,11 @@ vn_rdwr_64( error = VNOP_READ(vp, auio, ioflg, &context); } } else { - #if DEVELOPMENT || DEBUG /* - * XXX VSWAP: Check for entitlements or special flag here - * so we can restrict access appropriately. - */ + * XXX VSWAP: Check for entitlements or special flag here + * so we can restrict access appropriately. + */ error = VNOP_WRITE(vp, auio, ioflg, &context); #else /* DEVELOPMENT || DEBUG */ @@ -954,12 +970,12 @@ vn_rdwr_64( } } - if (aresid) + if (aresid) { *aresid = uio_resid(auio); - else - if (uio_resid(auio) && error == 0) - error = EIO; - return (error); + } else if (uio_resid(auio) && error == 0) { + error = EIO; + } + return error; } static inline void @@ -1004,25 +1020,27 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) int offset_locked = 0; vp = (struct vnode *)fp->f_fglob->fg_data; - if ( (error = vnode_getwithref(vp)) ) { - return(error); + if ((error = vnode_getwithref(vp))) { + return error; } #if CONFIG_MACF error = mac_vnode_check_read(ctx, vfs_context_ucred(ctx), vp); if (error) { (void)vnode_put(vp); - return (error); + return error; } #endif /* This signals to VNOP handlers that this read came from a file table read */ ioflag = IO_SYSCALL_DISPATCH; - if (fp->f_fglob->fg_flag & FNONBLOCK) + if (fp->f_fglob->fg_flag & FNONBLOCK) { ioflag |= IO_NDELAY; - if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) - ioflag |= IO_NOCACHE; + } + if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) { + ioflag |= IO_NOCACHE; + } if (fp->f_fglob->fg_flag & FENCRYPTED) { ioflag |= IO_ENCRYPTED; } @@ -1032,8 +1050,9 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) if (fp->f_fglob->fg_flag & O_EVTONLY) { ioflag |= IO_EVTONLY; } - if (fp->f_fglob->fg_flag & FNORDAHEAD) - ioflag |= IO_RAOFF; + if (fp->f_fglob->fg_flag & FNORDAHEAD) { + ioflag |= IO_RAOFF; + } if ((flags & FOF_OFFSET) == 0) { if ((vnode_vtype(vp) == VREG) && !vnode_isswap(vp)) { @@ -1045,7 +1064,6 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) count = uio_resid(uio); if (vnode_isswap(vp) && !(IO_SKIP_ENCRYPTION & ioflag)) { - /* special case for swap files */ error = vn_read_swapfile(vp, uio); } else { @@ -1061,7 +1079,7 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) } (void)vnode_put(vp); - return (error); + return error; } @@ -1075,15 +1093,15 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) int error, ioflag; off_t count; int clippedsize = 0; - int partialwrite=0; + int partialwrite = 0; int residcount, oldcount; int offset_locked = 0; proc_t p = vfs_context_proc(ctx); count = 0; vp = (struct vnode *)fp->f_fglob->fg_data; - if ( (error = vnode_getwithref(vp)) ) { - return(error); + if ((error = vnode_getwithref(vp))) { + return error; } #if DEVELOPMENT || DEBUG @@ -1096,7 +1114,7 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) if (vnode_isswap(vp)) { (void)vnode_put(vp); error = EPERM; - return (error); + return error; } #endif /* DEVELOPMENT || DEBUG */ @@ -1105,7 +1123,7 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) error = mac_vnode_check_write(ctx, vfs_context_ucred(ctx), vp); if (error) { (void)vnode_put(vp); - return (error); + return error; } #endif @@ -1115,18 +1133,24 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) */ ioflag = (IO_UNIT | IO_SYSCALL_DISPATCH); - if (vp->v_type == VREG && (fp->f_fglob->fg_flag & O_APPEND)) + if (vp->v_type == VREG && (fp->f_fglob->fg_flag & O_APPEND)) { ioflag |= IO_APPEND; - if (fp->f_fglob->fg_flag & FNONBLOCK) + } + if (fp->f_fglob->fg_flag & FNONBLOCK) { ioflag |= IO_NDELAY; - if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) - ioflag |= IO_NOCACHE; - if (fp->f_fglob->fg_flag & FNODIRECT) + } + if ((fp->f_fglob->fg_flag & FNOCACHE) || vnode_isnocache(vp)) { + ioflag |= IO_NOCACHE; + } + if (fp->f_fglob->fg_flag & FNODIRECT) { ioflag |= IO_NODIRECT; - if (fp->f_fglob->fg_flag & FSINGLE_WRITER) + } + if (fp->f_fglob->fg_flag & FSINGLE_WRITER) { ioflag |= IO_SINGLE_WRITER; - if (fp->f_fglob->fg_flag & O_EVTONLY) + } + if (fp->f_fglob->fg_flag & O_EVTONLY) { ioflag |= IO_EVTONLY; + } /* * Treat synchronous mounts and O_FSYNC on the fd as equivalent. @@ -1135,8 +1159,8 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) * XXX the non-essential metadata without some additional VFS work; * XXX the intent at this point is to plumb the interface for it. */ - if ((fp->f_fglob->fg_flag & (O_FSYNC|O_DSYNC)) || - (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) { + if ((fp->f_fglob->fg_flag & (O_FSYNC | O_DSYNC)) || + (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS))) { ioflag |= IO_SYNC; } @@ -1149,10 +1173,10 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) count = uio_resid(uio); } if (((flags & FOF_OFFSET) == 0) && - vfs_context_proc(ctx) && (vp->v_type == VREG) && - (((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) || - ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)))) { - /* + vfs_context_proc(ctx) && (vp->v_type == VREG) && + (((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) || + ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)))) { + /* * If the requested residual would cause us to go past the * administrative limit, then we need to adjust the residual * down to cause fewer bytes than requested to be written. If @@ -1160,7 +1184,7 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) * then we fail the write with EFBIG. */ residcount = uio_resid(uio); - if ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { + if ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur) { clippedsize = (uio->uio_offset + uio_resid(uio)) - p->p_rlimit[RLIMIT_FSIZE].rlim_cur; } else if ((rlim_t)uio_resid(uio) > (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset)) { clippedsize = (p->p_rlimit[RLIMIT_FSIZE].rlim_cur - uio->uio_offset); @@ -1171,24 +1195,24 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) goto error_out; } partialwrite = 1; - uio_setresid(uio, residcount-clippedsize); + uio_setresid(uio, residcount - clippedsize); } if ((flags & FOF_OFFSET) != 0) { /* for pwrite, append should be ignored */ ioflag &= ~IO_APPEND; if (p && (vp->v_type == VREG) && - ((rlim_t)uio->uio_offset >= p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { - psignal(p, SIGXFSZ); - error = EFBIG; - goto error_out; - } + ((rlim_t)uio->uio_offset >= p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { + psignal(p, SIGXFSZ); + error = EFBIG; + goto error_out; + } if (p && (vp->v_type == VREG) && - ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { + ((rlim_t)(uio->uio_offset + uio_resid(uio)) > p->p_rlimit[RLIMIT_FSIZE].rlim_cur)) { //Debugger("vn_bwrite:overstepping the bounds"); residcount = uio_resid(uio); clippedsize = (uio->uio_offset + uio_resid(uio)) - p->p_rlimit[RLIMIT_FSIZE].rlim_cur; partialwrite = 1; - uio_setresid(uio, residcount-clippedsize); + uio_setresid(uio, residcount - clippedsize); } } @@ -1200,10 +1224,11 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) } if ((flags & FOF_OFFSET) == 0) { - if (ioflag & IO_APPEND) + if (ioflag & IO_APPEND) { fp->f_fglob->fg_offset = uio->uio_offset; - else + } else { fp->f_fglob->fg_offset += count - uio_resid(uio); + } if (offset_locked) { vn_offset_unlock(fp->f_fglob); offset_locked = 0; @@ -1214,7 +1239,7 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) * Set the credentials on successful writes */ if ((error == 0) && (vp->v_tag == VT_NFS) && (UBCINFOEXISTS(vp))) { - /* + /* * When called from aio subsystem, we only have the proc from * which to get the credential, at this point, so use that * instead. This means aio functions are incompatible with @@ -1229,14 +1254,14 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx) } } (void)vnode_put(vp); - return (error); + return error; error_out: if (offset_locked) { vn_offset_unlock(fp->f_fglob); } (void)vnode_put(vp); - return (error); + return error; } /* @@ -1249,19 +1274,20 @@ error_out: */ int vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat64, - vfs_context_t ctx, struct ucred *file_cred) + vfs_context_t ctx, struct ucred *file_cred) { struct vnode_attr va; int error; u_short mode; kauth_filesec_t fsec; - struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ + struct stat *sb = (struct stat *)0; /* warning avoidance ; protected by isstat64 */ struct stat64 * sb64 = (struct stat64 *)0; /* warning avoidance ; protected by isstat64 */ - if (isstat64 != 0) + if (isstat64 != 0) { sb64 = (struct stat64 *)sbptr; - else + } else { sb = (struct stat *)sbptr; + } memset(&va, 0, sizeof(va)); VATTR_INIT(&va); VATTR_WANTED(&va, va_fsid); @@ -1288,8 +1314,9 @@ vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat6 VATTR_WANTED(&va, va_acl); } error = vnode_getattr(vp, &va, ctx); - if (error) + if (error) { goto out; + } #if CONFIG_MACF /* * Give MAC polices a chance to reject or filter the attributes @@ -1300,8 +1327,9 @@ vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat6 * to change the values of attributes retrieved. */ error = mac_vnode_check_getattr(ctx, file_cred, vp, &va); - if (error) + if (error) { goto out; + } #endif /* * Copy from vattr table @@ -1309,7 +1337,6 @@ vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat6 if (isstat64 != 0) { sb64->st_dev = va.va_fsid; sb64->st_ino = (ino64_t)va.va_fileid; - } else { sb->st_dev = va.va_fsid; sb->st_ino = (ino_t)va.va_fileid; @@ -1340,7 +1367,8 @@ vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat6 default: error = EBADF; goto out; - }; + } + ; if (isstat64 != 0) { sb64->st_mode = mode; sb64->st_nlink = VATTR_IS_SUPPORTED(&va, va_nlink) ? va.va_nlink > UINT16_MAX ? UINT16_MAX : (u_int16_t)va.va_nlink : 1; @@ -1381,7 +1409,6 @@ vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat6 !VATTR_IS_SUPPORTED(&va, va_guuid)) { *xsec = KAUTH_FILESEC_NONE; } else { - if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) { fsec = kauth_filesec_alloc(va.va_acl->acl_entrycount); } else { @@ -1410,25 +1437,28 @@ vn_stat_noauth(struct vnode *vp, void *sbptr, kauth_filesec_t *xsec, int isstat6 *xsec = fsec; } } - + /* Do not give the generation number out to unpriviledged users */ if (va.va_gen && !vfs_context_issuser(ctx)) { - if (isstat64 != 0) - sb64->st_gen = 0; - else - sb->st_gen = 0; + if (isstat64 != 0) { + sb64->st_gen = 0; + } else { + sb->st_gen = 0; + } } else { - if (isstat64 != 0) - sb64->st_gen = va.va_gen; - else + if (isstat64 != 0) { + sb64->st_gen = va.va_gen; + } else { sb->st_gen = va.va_gen; + } } error = 0; out: - if (VATTR_IS_SUPPORTED(&va, va_acl) && va.va_acl != NULL) + if (VATTR_IS_SUPPORTED(&va, va_acl) && va.va_acl != NULL) { kauth_acl_free(va.va_acl); - return (error); + } + return error; } int @@ -1438,16 +1468,18 @@ vn_stat(struct vnode *vp, void *sb, kauth_filesec_t *xsec, int isstat64, vfs_con #if CONFIG_MACF error = mac_vnode_check_stat(ctx, NOCRED, vp); - if (error) - return (error); + if (error) { + return error; + } #endif /* authorize */ - if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_ATTRIBUTES | KAUTH_VNODE_READ_SECURITY, ctx)) != 0) - return(error); + if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_ATTRIBUTES | KAUTH_VNODE_READ_SECURITY, ctx)) != 0) { + return error; + } /* actual stat */ - return(vn_stat_noauth(vp, sb, xsec, isstat64, ctx, NOCRED)); + return vn_stat_noauth(vp, sb, xsec, isstat64, ctx, NOCRED); } @@ -1462,30 +1494,32 @@ vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) int error; struct vnode *ttyvp; struct session * sessp; - - if ( (error = vnode_getwithref(vp)) ) { - return(error); + + if ((error = vnode_getwithref(vp))) { + return error; } #if CONFIG_MACF error = mac_vnode_check_ioctl(ctx, vp, com); - if (error) + if (error) { goto out; + } #endif switch (vp->v_type) { case VREG: case VDIR: if (com == FIONREAD) { - if ((error = vnode_size(vp, &file_size, ctx)) != 0) + if ((error = vnode_size(vp, &file_size, ctx)) != 0) { goto out; + } *(int *)data = file_size - fp->f_fglob->fg_offset; goto out; } - if (com == FIONBIO || com == FIOASYNC) { /* XXX */ + if (com == FIONBIO || com == FIOASYNC) { /* XXX */ goto out; } - /* fall into ... */ + /* fall into ... */ default: error = ENOTTY; @@ -1508,7 +1542,6 @@ vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) goto out; } *(int *)data = bdevsw[major(vp->v_rdev)].d_type; - } else if (vp->v_type == VCHR) { if (major(vp->v_rdev) >= nchrdev) { error = ENXIO; @@ -1536,7 +1569,7 @@ vn_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx) } out: (void)vnode_put(vp); - return(error); + return error; } /* @@ -1549,7 +1582,7 @@ vn_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx) struct vnode * vp = (struct vnode *)fp->f_fglob->fg_data; struct vfs_context context; - if ( (error = vnode_getwithref(vp)) == 0 ) { + if ((error = vnode_getwithref(vp)) == 0) { context.vc_thread = current_thread(); context.vc_ucred = fp->f_fglob->fg_cred; @@ -1562,12 +1595,11 @@ vn_select(struct fileproc *fp, int which, void *wql, __unused vfs_context_t ctx) error = mac_vnode_check_select(ctx, vp, which); if (error == 0) #endif - error = VNOP_SELECT(vp, which, fp->f_fglob->fg_flag, wql, ctx); + error = VNOP_SELECT(vp, which, fp->f_fglob->fg_flag, wql, ctx); (void)vnode_put(vp); } - return(error); - + return error; } /* @@ -1579,9 +1611,9 @@ vn_closefile(struct fileglob *fg, vfs_context_t ctx) struct vnode *vp = fg->fg_data; int error; - if ( (error = vnode_getwithref(vp)) == 0 ) { + if ((error = vnode_getwithref(vp)) == 0) { if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE && - ((fg->fg_flag & FHASLOCK) != 0 || + ((fg->fg_flag & FHASLOCK) != 0 || (fg->fg_lflags & FG_HAS_OFDLOCK) != 0)) { struct flock lf = { .l_whence = SEEK_SET, @@ -1590,18 +1622,20 @@ vn_closefile(struct fileglob *fg, vfs_context_t ctx) .l_type = F_UNLCK }; - if ((fg->fg_flag & FHASLOCK) != 0) + if ((fg->fg_flag & FHASLOCK) != 0) { (void) VNOP_ADVLOCK(vp, (caddr_t)fg, F_UNLCK, &lf, F_FLOCK, ctx, NULL); + } - if ((fg->fg_lflags & FG_HAS_OFDLOCK) != 0) + if ((fg->fg_lflags & FG_HAS_OFDLOCK) != 0) { (void) VNOP_ADVLOCK(vp, (caddr_t)fg, F_UNLCK, &lf, F_OFD_LOCK, ctx, NULL); + } } - error = vn_close(vp, fg->fg_flag, ctx); + error = vn_close(vp, fg->fg_flag, ctx); (void) vnode_put(vp); } - return (error); + return error; } /* @@ -1611,10 +1645,10 @@ vn_closefile(struct fileglob *fg, vfs_context_t ctx) int vn_pathconf(vnode_t vp, int name, int32_t *retval, vfs_context_t ctx) { - int error = 0; + int error = 0; struct vfs_attr vfa; - switch(name) { + switch (name) { case _PC_EXTENDED_SECURITY_NP: *retval = vfs_extendedsecurity(vnode_mount(vp)) ? 1 : 0; break; @@ -1622,57 +1656,57 @@ vn_pathconf(vnode_t vp, int name, int32_t *retval, vfs_context_t ctx) *retval = vfs_authopaque(vnode_mount(vp)); break; case _PC_2_SYMLINKS: - *retval = 1; /* XXX NOTSUP on MSDOS, etc. */ + *retval = 1; /* XXX NOTSUP on MSDOS, etc. */ break; case _PC_ALLOC_SIZE_MIN: - *retval = 1; /* XXX lie: 1 byte */ + *retval = 1; /* XXX lie: 1 byte */ break; - case _PC_ASYNC_IO: /* unistd.h: _POSIX_ASYNCHRONUS_IO */ - *retval = 1; /* [AIO] option is supported */ + case _PC_ASYNC_IO: /* unistd.h: _POSIX_ASYNCHRONUS_IO */ + *retval = 1; /* [AIO] option is supported */ break; - case _PC_PRIO_IO: /* unistd.h: _POSIX_PRIORITIZED_IO */ - *retval = 0; /* [PIO] option is not supported */ + case _PC_PRIO_IO: /* unistd.h: _POSIX_PRIORITIZED_IO */ + *retval = 0; /* [PIO] option is not supported */ break; case _PC_REC_INCR_XFER_SIZE: - *retval = 4096; /* XXX go from MIN to MAX 4K at a time */ + *retval = 4096; /* XXX go from MIN to MAX 4K at a time */ break; case _PC_REC_MIN_XFER_SIZE: - *retval = 4096; /* XXX recommend 4K minimum reads/writes */ + *retval = 4096; /* XXX recommend 4K minimum reads/writes */ break; case _PC_REC_MAX_XFER_SIZE: *retval = 65536; /* XXX recommend 64K maximum reads/writes */ break; case _PC_REC_XFER_ALIGN: - *retval = 4096; /* XXX recommend page aligned buffers */ + *retval = 4096; /* XXX recommend page aligned buffers */ break; case _PC_SYMLINK_MAX: - *retval = 255; /* Minimum acceptable POSIX value */ + *retval = 255; /* Minimum acceptable POSIX value */ break; - case _PC_SYNC_IO: /* unistd.h: _POSIX_SYNCHRONIZED_IO */ - *retval = 0; /* [SIO] option is not supported */ + case _PC_SYNC_IO: /* unistd.h: _POSIX_SYNCHRONIZED_IO */ + *retval = 0; /* [SIO] option is not supported */ break; case _PC_XATTR_SIZE_BITS: - /* The number of bits used to store maximum extended - * attribute size in bytes. For example, if the maximum - * attribute size supported by a file system is 128K, the - * value returned will be 18. However a value 18 can mean - * that the maximum attribute size can be anywhere from - * (256KB - 1) to 128KB. As a special case, the resource - * fork can have much larger size, and some file system - * specific extended attributes can have smaller and preset + /* The number of bits used to store maximum extended + * attribute size in bytes. For example, if the maximum + * attribute size supported by a file system is 128K, the + * value returned will be 18. However a value 18 can mean + * that the maximum attribute size can be anywhere from + * (256KB - 1) to 128KB. As a special case, the resource + * fork can have much larger size, and some file system + * specific extended attributes can have smaller and preset * size; for example, Finder Info is always 32 bytes. */ memset(&vfa, 0, sizeof(vfa)); VFSATTR_INIT(&vfa); VFSATTR_WANTED(&vfa, f_capabilities); if (vfs_getattr(vnode_mount(vp), &vfa, ctx) == 0 && - (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) && - (vfa.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) && + (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) && + (vfa.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) && (vfa.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) { /* Supports native extended attributes */ error = VNOP_PATHCONF(vp, name, retval, ctx); } else { - /* Number of bits used to represent the maximum size of + /* Number of bits used to represent the maximum size of * extended attribute stored in an Apple Double file. */ *retval = AD_XATTR_SIZE_BITS; @@ -1683,12 +1717,12 @@ vn_pathconf(vnode_t vp, int name, int32_t *retval, vfs_context_t ctx) break; } - return (error); + return error; } static int vn_kqfilt_add(struct fileproc *fp, struct knote *kn, - struct kevent_internal_s *kev, vfs_context_t ctx) + struct kevent_internal_s *kev, vfs_context_t ctx) { struct vnode *vp; int error = 0; @@ -1701,34 +1735,32 @@ vn_kqfilt_add(struct fileproc *fp, struct knote *kn, */ if ((error = vget_internal(vp, 0, VNODE_NODEAD)) == 0) { switch (kn->kn_filter) { - case EVFILT_READ: - case EVFILT_WRITE: - if (vnode_isfifo(vp)) { - /* We'll only watch FIFOs that use our fifofs */ - if (!(vp->v_fifoinfo && vp->v_fifoinfo->fi_readsock)) { - error = ENOTSUP; - } - - } else if (!vnode_isreg(vp)) { - if (vnode_ischr(vp)) { - result = spec_kqfilter(vp, kn, kev); - if ((kn->kn_flags & EV_ERROR) == 0) { - /* claimed by a special device */ - vnode_put(vp); - return result; - } + case EVFILT_READ: + case EVFILT_WRITE: + if (vnode_isfifo(vp)) { + /* We'll only watch FIFOs that use our fifofs */ + if (!(vp->v_fifoinfo && vp->v_fifoinfo->fi_readsock)) { + error = ENOTSUP; + } + } else if (!vnode_isreg(vp)) { + if (vnode_ischr(vp)) { + result = spec_kqfilter(vp, kn, kev); + if ((kn->kn_flags & EV_ERROR) == 0) { + /* claimed by a special device */ + vnode_put(vp); + return result; } - error = EINVAL; } - break; - case EVFILT_VNODE: - break; - default: error = EINVAL; + } + break; + case EVFILT_VNODE: + break; + default: + error = EINVAL; } if (error == 0) { - #if CONFIG_MACF error = mac_vnode_check_kqfilter(ctx, fp->f_fglob->fg_cred, kn, vp); if (error) { @@ -1750,13 +1782,13 @@ vn_kqfilt_add(struct fileproc *fp, struct knote *kn, * Ask the filesystem to provide remove notifications, * but ignore failure */ - VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx); + VNOP_MONITOR(vp, 0, VNODE_MONITOR_BEGIN, (void*) kn, ctx); } vnode_put(vp); } - out: +out: if (error) { kn->kn_flags = EV_ERROR; kn->kn_data = error; @@ -1769,17 +1801,18 @@ static void filt_vndetach(struct knote *kn) { vfs_context_t ctx = vfs_context_current(); - struct vnode *vp; + struct vnode *vp; vp = (struct vnode *)kn->kn_hook; - if (vnode_getwithvid(vp, kn->kn_hookid)) + if (vnode_getwithvid(vp, kn->kn_hookid)) { return; + } vnode_lock(vp); KNOTE_DETACH(&vp->v_knotes, kn); vnode_unlock(vp); - - /* - * Tell a (generally networked) filesystem that we're no longer watching + + /* + * Tell a (generally networked) filesystem that we're no longer watching * If the FS wants to track contexts, it should still be using the one from * the VNODE_MONITOR_BEGIN. */ @@ -1804,7 +1837,7 @@ vnode_readable_data_count(vnode_t vp, off_t current_offset, int ispoll) int err = fifo_charcount(vp, &cnt); if (err == 0) { return (int64_t)cnt; - } else + } else #endif { return 0; @@ -1822,7 +1855,7 @@ vnode_readable_data_count(vnode_t vp, off_t current_offset, int ispoll) return INT64_MIN; } else { return (int64_t)amount; - } + } } else { panic("Should never have an EVFILT_READ except for reg or fifo."); return 0; @@ -1830,13 +1863,13 @@ vnode_readable_data_count(vnode_t vp, off_t current_offset, int ispoll) } /* - * Used for EVFILT_WRITE. + * Used for EVFILT_WRITE. * * For regular vnodes, we can always write (1). For named pipes, * see how much space there is in the buffer. Nothing else is covered. */ static intptr_t -vnode_writable_space_count(vnode_t vp) +vnode_writable_space_count(vnode_t vp) { if (vnode_isfifo(vp)) { #if FIFO @@ -1844,7 +1877,7 @@ vnode_writable_space_count(vnode_t vp) int err = fifo_freespace(vp, &spc); if (err == 0) { return (intptr_t)spc; - } else + } else #endif { return (intptr_t)0; @@ -1857,14 +1890,14 @@ vnode_writable_space_count(vnode_t vp) } } -/* +/* * Determine whether this knote should be active - * - * This is kind of subtle. - * --First, notice if the vnode has been revoked: in so, override hint - * --EVFILT_READ knotes are checked no matter what the hint is - * --Other knotes activate based on hint. - * --If hint is revoke, set special flags and activate + * + * This is kind of subtle. + * --First, notice if the vnode has been revoked: in so, override hint + * --EVFILT_READ knotes are checked no matter what the hint is + * --Other knotes activate based on hint. + * --If hint is revoke, set special flags and activate */ static int filt_vnode_common(struct knote *kn, vnode_t vp, long hint) @@ -1882,35 +1915,35 @@ filt_vnode_common(struct knote *kn, vnode_t vp, long hint) kn->kn_fflags |= NOTE_REVOKE; } } else { - switch(kn->kn_filter) { - case EVFILT_READ: - kn->kn_data = vnode_readable_data_count(vp, kn->kn_fp->f_fglob->fg_offset, (kn->kn_flags & EV_POLL)); + switch (kn->kn_filter) { + case EVFILT_READ: + kn->kn_data = vnode_readable_data_count(vp, kn->kn_fp->f_fglob->fg_offset, (kn->kn_flags & EV_POLL)); - if (kn->kn_data != 0) { - activate = 1; - } - break; - case EVFILT_WRITE: - kn->kn_data = vnode_writable_space_count(vp); + if (kn->kn_data != 0) { + activate = 1; + } + break; + case EVFILT_WRITE: + kn->kn_data = vnode_writable_space_count(vp); - if (kn->kn_data != 0) { - activate = 1; - } - break; - case EVFILT_VNODE: - /* Check events this note matches against the hint */ - if (kn->kn_sfflags & hint) { - kn->kn_fflags |= hint; /* Set which event occurred */ - } - if (kn->kn_fflags != 0) { - activate = 1; - } - break; - default: - panic("Invalid knote filter on a vnode!\n"); + if (kn->kn_data != 0) { + activate = 1; + } + break; + case EVFILT_VNODE: + /* Check events this note matches against the hint */ + if (kn->kn_sfflags & hint) { + kn->kn_fflags |= hint; /* Set which event occurred */ + } + if (kn->kn_fflags != 0) { + activate = 1; + } + break; + default: + panic("Invalid knote filter on a vnode!\n"); } } - return (activate); + return activate; } static int @@ -1939,8 +1972,9 @@ filt_vntouch(struct knote *kn, struct kevent_internal_s *kev) activate = filt_vnode_common(kn, vp, hint); - if (hint == 0) + if (hint == 0) { vnode_put_locked(vp); + } vnode_unlock(vp); return activate; @@ -1958,7 +1992,7 @@ filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_inte if (vnode_getiocount(vp, kn->kn_hookid, VNODE_NODEAD | VNODE_WITHID) != 0) { /* Is recycled */ hint = NOTE_REVOKE; - } + } activate = filt_vnode_common(kn, vp, hint); if (activate) { *kev = kn->kn_kevent; @@ -1969,10 +2003,10 @@ filt_vnprocess(struct knote *kn, struct filt_process_s *data, struct kevent_inte } /* Definitely need to unlock, may need to put */ - if (hint == 0) + if (hint == 0) { vnode_put_locked(vp); + } vnode_unlock(vp); return activate; } - diff --git a/bsd/vfs/vfs_xattr.c b/bsd/vfs/vfs_xattr.c index f01d117b5..dacd51db9 100644 --- a/bsd/vfs/vfs_xattr.c +++ b/bsd/vfs/vfs_xattr.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -65,25 +65,25 @@ static int shadow_sequence; * We use %p to prevent loss of precision for pointers on varying architectures. */ -#define SHADOW_NAME_FMT ".vfs_rsrc_stream_%p%08x%p" -#define SHADOW_DIR_FMT ".vfs_rsrc_streams_%p%x" +#define SHADOW_NAME_FMT ".vfs_rsrc_stream_%p%08x%p" +#define SHADOW_DIR_FMT ".vfs_rsrc_streams_%p%x" #define SHADOW_DIR_CONTAINER "/var/run" #define MAKE_SHADOW_NAME(VP, NAME) \ snprintf((NAME), sizeof((NAME)), (SHADOW_NAME_FMT), \ - ((void*)(VM_KERNEL_ADDRPERM(VP))), \ - (VP)->v_id, \ - ((void*)(VM_KERNEL_ADDRPERM((VP)->v_data)))) + ((void*)(VM_KERNEL_ADDRPERM(VP))), \ + (VP)->v_id, \ + ((void*)(VM_KERNEL_ADDRPERM((VP)->v_data)))) /* The full path to the shadow directory */ -#define MAKE_SHADOW_DIRNAME(VP, NAME) \ +#define MAKE_SHADOW_DIRNAME(VP, NAME) \ snprintf((NAME), sizeof((NAME)), (SHADOW_DIR_CONTAINER "/" SHADOW_DIR_FMT), \ - ((void*)(VM_KERNEL_ADDRPERM(VP))), shadow_sequence) + ((void*)(VM_KERNEL_ADDRPERM(VP))), shadow_sequence) /* The shadow directory as a 'leaf' entry */ -#define MAKE_SHADOW_DIR_LEAF(VP, NAME) \ +#define MAKE_SHADOW_DIR_LEAF(VP, NAME) \ snprintf((NAME), sizeof((NAME)), (SHADOW_DIR_FMT), \ - ((void*)(VM_KERNEL_ADDRPERM(VP))), shadow_sequence) + ((void*)(VM_KERNEL_ADDRPERM(VP))), shadow_sequence) static int default_getnamedstream(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation op, vfs_context_t context); @@ -115,12 +115,12 @@ static int default_removexattr(vnode_t vp, const char *name, int options, */ int vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, - int options, vfs_context_t context) + int options, vfs_context_t context) { int error; if (!XATTR_VNODE_SUPPORTED(vp)) { - return (EPERM); + return EPERM; } #if NAMEDSTREAMS /* getxattr calls are not allowed for streams. */ @@ -137,8 +137,9 @@ vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, if (!(options & XATTR_NOSECURITY)) { #if CONFIG_MACF error = mac_vnode_check_getextattr(context, vp, name, uio); - if (error) + if (error) { goto out; + } #endif /* MAC */ if ((error = xattr_validatename(name))) { goto out; @@ -147,7 +148,7 @@ vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, goto out; } /* The offset can only be non-zero for resource forks. */ - if (uio != NULL && uio_offset(uio) != 0 && + if (uio != NULL && uio_offset(uio) != 0 && bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { error = EINVAL; goto out; @@ -155,7 +156,7 @@ vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, } /* The offset can only be non-zero for resource forks. */ - if (uio != NULL && uio_offset(uio) != 0 && + if (uio != NULL && uio_offset(uio) != 0 && bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { error = EINVAL; goto out; @@ -169,7 +170,7 @@ vn_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, error = default_getxattr(vp, name, uio, size, options, context); } out: - return (error); + return error; } /* @@ -181,7 +182,7 @@ vn_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t int error; if (!XATTR_VNODE_SUPPORTED(vp)) { - return (EPERM); + return EPERM; } #if NAMEDSTREAMS /* setxattr calls are not allowed for streams. */ @@ -190,25 +191,27 @@ vn_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t goto out; } #endif - if ((options & (XATTR_REPLACE|XATTR_CREATE)) == (XATTR_REPLACE|XATTR_CREATE)) { - return (EINVAL); + if ((options & (XATTR_REPLACE | XATTR_CREATE)) == (XATTR_REPLACE | XATTR_CREATE)) { + return EINVAL; } if ((error = xattr_validatename(name))) { - return (error); + return error; } - if (!(options & XATTR_NOSECURITY)) { + if (!(options & XATTR_NOSECURITY)) { #if CONFIG_MACF error = mac_vnode_check_setextattr(context, vp, name, uio); - if (error) + if (error) { goto out; + } #endif /* MAC */ error = vnode_authorize(vp, NULL, KAUTH_VNODE_WRITE_EXTATTRIBUTES, context); - if (error) + if (error) { goto out; + } } /* The offset can only be non-zero for resource forks. */ - if (uio_offset(uio) != 0 && - bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0 ) { + if (uio_offset(uio) != 0 && + bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { error = EINVAL; goto out; } @@ -226,7 +229,7 @@ vn_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t */ if (error == EJUSTRETURN) { int native = 0, dufile = 0; - size_t sz; /* not used */ + size_t sz; /* not used */ native = VNOP_GETXATTR(vp, name, NULL, &sz, 0, context) ? 0 : 1; dufile = default_getxattr(vp, name, NULL, &sz, 0, context) ? 0 : 1; @@ -256,12 +259,13 @@ vn_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t #if CONFIG_MACF if ((error == 0) && !(options & XATTR_NOSECURITY)) { mac_vnode_notify_setextattr(context, vp, name, uio); - if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) + if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) { mac_vnode_label_update_extattr(vnode_mount(vp), vp, name); + } } #endif out: - return (error); + return error; } /* @@ -273,7 +277,7 @@ vn_removexattr(vnode_t vp, const char * name, int options, vfs_context_t context int error; if (!XATTR_VNODE_SUPPORTED(vp)) { - return (EPERM); + return EPERM; } #if NAMEDSTREAMS /* removexattr calls are not allowed for streams. */ @@ -283,17 +287,19 @@ vn_removexattr(vnode_t vp, const char * name, int options, vfs_context_t context } #endif if ((error = xattr_validatename(name))) { - return (error); + return error; } if (!(options & XATTR_NOSECURITY)) { #if CONFIG_MACF error = mac_vnode_check_deleteextattr(context, vp, name); - if (error) + if (error) { goto out; + } #endif /* MAC */ error = vnode_authorize(vp, NULL, KAUTH_VNODE_WRITE_EXTATTRIBUTES, context); - if (error) + if (error) { goto out; + } } error = VNOP_REMOVEXATTR(vp, name, options, context); if (error == ENOTSUP && !(options & XATTR_NODEFAULT)) { @@ -310,19 +316,21 @@ vn_removexattr(vnode_t vp, const char * name, int options, vfs_context_t context * default_removexattr should not be considered an error. */ error = default_removexattr(vp, name, options, context); - if (error == ENOATTR) + if (error == ENOATTR) { error = 0; + } #endif /* DUAL_EAS */ } #if CONFIG_MACF if ((error == 0) && !(options & XATTR_NOSECURITY)) { mac_vnode_notify_deleteextattr(context, vp, name); - if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) + if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) { mac_vnode_label_update_extattr(vnode_mount(vp), vp, name); + } } #endif out: - return (error); + return error; } /* @@ -334,25 +342,27 @@ vn_listxattr(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t con int error; if (!XATTR_VNODE_SUPPORTED(vp)) { - return (EPERM); + return EPERM; } #if NAMEDSTREAMS /* listxattr calls are not allowed for streams. */ if (vp->v_flag & VISNAMEDSTREAM) { - return (EPERM); + return EPERM; } #endif if (!(options & XATTR_NOSECURITY)) { #if CONFIG_MACF error = mac_vnode_check_listextattr(context, vp); - if (error) + if (error) { goto out; + } #endif /* MAC */ error = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_EXTATTRIBUTES, context); - if (error) + if (error) { goto out; + } } error = VNOP_LISTXATTR(vp, uio, size, options, context); @@ -360,12 +370,12 @@ vn_listxattr(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t con /* * A filesystem may keep some but not all EAs natively, in which case * the native EA names will have been uiomove-d out (or *size updated) - * and the default_listxattr here will finish the job. + * and the default_listxattr here will finish the job. */ error = default_listxattr(vp, uio, size, options, context); } out: - return (error); + return error; } int @@ -374,16 +384,18 @@ xattr_validatename(const char *name) int namelen; if (name == NULL || name[0] == '\0') { - return (EINVAL); + return EINVAL; } namelen = strlen(name); - if (name[namelen] != '\0') - return (ENAMETOOLONG); - - if (utf8_validatestr((const unsigned char *)name, namelen) != 0) - return (EINVAL); - - return (0); + if (name[namelen] != '\0') { + return ENAMETOOLONG; + } + + if (utf8_validatestr((const unsigned char *)name, namelen) != 0) { + return EINVAL; + } + + return 0; } @@ -393,7 +405,7 @@ xattr_validatename(const char *name) int xattr_protected(const char *attrname) { - return(!strncmp(attrname, "com.apple.system.", 17)); + return !strncmp(attrname, "com.apple.system.", 17); } @@ -432,11 +444,12 @@ vnode_setasnamedstream_internal(vnode_t vp, vnode_t svp) errno_t vnode_setasnamedstream(vnode_t vp, vnode_t svp) { - if ((vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) - return (EINVAL); + if ((vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) { + return EINVAL; + } vnode_setasnamedstream_internal(vp, svp); - return (0); + return 0; } #if NAMEDSTREAMS @@ -452,53 +465,56 @@ vnode_getnamedstream(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperati if (vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) { error = VNOP_GETNAMEDSTREAM(vp, svpp, name, op, flags, context); } else { - if (flags) + if (flags) { error = ENOTSUP; - else + } else { error = default_getnamedstream(vp, svpp, name, op, context); + } } if (error == 0) { vnode_setasnamedstream_internal(vp, *svpp); } - return (error); + return error; } /* * Make a named stream for vnode vp. */ -errno_t +errno_t vnode_makenamedstream(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t context) { int error; - if (vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) + if (vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) { error = VNOP_MAKENAMEDSTREAM(vp, svpp, name, flags, context); - else + } else { error = default_makenamedstream(vp, svpp, name, context); + } if (error == 0) { vnode_setasnamedstream_internal(vp, *svpp); } - return (error); + return error; } /* * Remove a named stream from vnode vp. */ -errno_t +errno_t vnode_removenamedstream(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t context) { int error; - if (vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) + if (vp->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) { error = VNOP_REMOVENAMEDSTREAM(vp, svp, name, flags, context); - else + } else { error = default_removenamedstream(vp, name, context); + } - return (error); + return error; } #define NS_IOBUFSIZE (128 * 1024) @@ -506,23 +522,24 @@ vnode_removenamedstream(vnode_t vp, vnode_t svp, const char *name, int flags, vf /* * Release a named stream shadow file. * - * Note: This function is called from two places where we do not need - * to check if the vnode has any references held before deleting the - * shadow file. Once from vclean() when the vnode is being reclaimed - * and we do not hold any references on the vnode. Second time from - * default_getnamedstream() when we get an error during shadow stream - * file initialization so that other processes who are waiting for the - * shadow stream file initialization by the creator will get opportunity + * Note: This function is called from two places where we do not need + * to check if the vnode has any references held before deleting the + * shadow file. Once from vclean() when the vnode is being reclaimed + * and we do not hold any references on the vnode. Second time from + * default_getnamedstream() when we get an error during shadow stream + * file initialization so that other processes who are waiting for the + * shadow stream file initialization by the creator will get opportunity * to create and initialize the file again. */ errno_t -vnode_relenamedstream(vnode_t vp, vnode_t svp) { +vnode_relenamedstream(vnode_t vp, vnode_t svp) +{ vnode_t dvp; struct componentname cn; char tmpname[80]; errno_t err; - - /* + + /* * We need to use the kernel context here. If we used the supplied * VFS context we have no clue whether or not it originated from userland * where it could be subject to a chroot jail. We need to ensure that all @@ -545,8 +562,8 @@ vnode_relenamedstream(vnode_t vp, vnode_t svp) { cn.cn_nameptr = cn.cn_pnbuf; cn.cn_namelen = strlen(tmpname); - /* - * Obtain the vnode for the shadow files directory. Make sure to + /* + * Obtain the vnode for the shadow files directory. Make sure to * use the kernel ctx as described above. */ err = get_shadow_dir(&dvp); @@ -557,16 +574,16 @@ vnode_relenamedstream(vnode_t vp, vnode_t svp) { (void) VNOP_REMOVE(dvp, svp, &cn, 0, kernelctx); vnode_put(dvp); - return (0); + return 0; } /* * Flush a named stream shadow file. - * + * * 'vp' represents the AppleDouble file. * 'svp' represents the shadow file. */ -errno_t +errno_t vnode_flushnamedstream(vnode_t vp, vnode_t svp, vfs_context_t context) { struct vnode_attr va; @@ -577,8 +594,8 @@ vnode_flushnamedstream(vnode_t vp, vnode_t svp, vfs_context_t context) size_t iosize; size_t datasize; int error; - /* - * The kernel context must be used for all I/O to the shadow file + /* + * The kernel context must be used for all I/O to the shadow file * and its namespace operations */ vfs_context_t kernelctx = vfs_context_kernel(); @@ -587,19 +604,19 @@ vnode_flushnamedstream(vnode_t vp, vnode_t svp, vfs_context_t context) VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); - if (VNOP_GETATTR(svp, &va, context) != 0 || - !VATTR_IS_SUPPORTED(&va, va_data_size)) { - return (0); + if (VNOP_GETATTR(svp, &va, context) != 0 || + !VATTR_IS_SUPPORTED(&va, va_data_size)) { + return 0; } datasize = va.va_data_size; if (datasize == 0) { (void) default_removexattr(vp, XATTR_RESOURCEFORK_NAME, 0, context); - return (0); + return 0; } iosize = bufsize = MIN(datasize, NS_IOBUFSIZE); if (kmem_alloc(kernel_map, (vm_offset_t *)&bufptr, bufsize, VM_KERN_MEMORY_FILE)) { - return (ENOMEM); + return ENOMEM; } auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); offset = 0; @@ -646,22 +663,24 @@ out: if (auio) { uio_free(auio); } - return (error); + return error; } -/* +/* * Verify that the vnode 'vp' is a vnode that lives in the shadow * directory. We can't just query the parent pointer directly since * the shadowfile is hooked up to the actual file it's a stream for. */ -errno_t vnode_verifynamedstream(vnode_t vp) { +errno_t +vnode_verifynamedstream(vnode_t vp) +{ int error; struct vnode *shadow_dvp = NULL; struct vnode *shadowfile = NULL; struct componentname cn; - - /* + + /* * We need to use the kernel context here. If we used the supplied * VFS context we have no clue whether or not it originated from userland * where it could be subject to a chroot jail. We need to ensure that all @@ -670,7 +689,7 @@ errno_t vnode_verifynamedstream(vnode_t vp) { */ vfs_context_t kernelctx = vfs_context_kernel(); char tmpname[80]; - + /* Get the shadow directory vnode */ error = get_shadow_dir(&shadow_dvp); @@ -679,7 +698,7 @@ errno_t vnode_verifynamedstream(vnode_t vp) { } /* Re-generate the shadow name in the buffer */ - MAKE_SHADOW_NAME (vp, tmpname); + MAKE_SHADOW_NAME(vp, tmpname); /* Look up item in shadow dir */ bzero(&cn, sizeof(cn)); @@ -691,26 +710,25 @@ errno_t vnode_verifynamedstream(vnode_t vp) { cn.cn_nameptr = cn.cn_pnbuf; cn.cn_namelen = strlen(tmpname); - if (VNOP_LOOKUP (shadow_dvp, &shadowfile, &cn, kernelctx) == 0) { + if (VNOP_LOOKUP(shadow_dvp, &shadowfile, &cn, kernelctx) == 0) { /* is the pointer the same? */ if (shadowfile == vp) { - error = 0; - } - else { + error = 0; + } else { error = EPERM; } /* drop the iocount acquired */ - vnode_put (shadowfile); - } + vnode_put(shadowfile); + } /* Drop iocount on shadow dir */ - vnode_put (shadow_dvp); + vnode_put(shadow_dvp); return error; -} +} -/* - * Access or create the shadow file as needed. - * +/* + * Access or create the shadow file as needed. + * * 'makestream' with non-zero value means that we need to guarantee we were the * creator of the shadow file. * @@ -721,7 +739,7 @@ errno_t vnode_verifynamedstream(vnode_t vp) { */ static int getshadowfile(vnode_t vp, vnode_t *svpp, int makestream, size_t *rsrcsize, - int *creator, vfs_context_t context) + int *creator, vfs_context_t context) { vnode_t dvp = NULLVP; vnode_t svp = NULLVP; @@ -753,10 +771,10 @@ retry_create: VATTR_WANTED(&va, va_mode); VATTR_WANTED(&va, va_create_time); VATTR_WANTED(&va, va_modify_time); - if (VNOP_GETATTR(vp, &va, context) != 0 || - !VATTR_IS_SUPPORTED(&va, va_uid) || - !VATTR_IS_SUPPORTED(&va, va_gid) || - !VATTR_IS_SUPPORTED(&va, va_mode)) { + if (VNOP_GETATTR(vp, &va, context) != 0 || + !VATTR_IS_SUPPORTED(&va, va_uid) || + !VATTR_IS_SUPPORTED(&va, va_gid) || + !VATTR_IS_SUPPORTED(&va, va_mode)) { va.va_uid = KAUTH_UID_NONE; va.va_gid = KAUTH_GID_NONE; va.va_mode = S_IRUSR | S_IWUSR; @@ -777,18 +795,18 @@ retry_create: /* Double check existence by asking for size. */ VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); - if (VNOP_GETATTR(svp, &va, context) == 0 && + if (VNOP_GETATTR(svp, &va, context) == 0 && VATTR_IS_SUPPORTED(&va, va_data_size)) { goto out; /* OK to use. */ } } - - /* - * Otherwise make sure the resource fork data exists. + + /* + * Otherwise make sure the resource fork data exists. * Use the supplied context for accessing the AD file. */ error = vn_getxattr(vp, XATTR_RESOURCEFORK_NAME, NULL, &datasize, - XATTR_NOSECURITY, context); + XATTR_NOSECURITY, context); /* * To maintain binary compatibility with legacy Carbon * emulated resource fork support, if the resource fork @@ -797,14 +815,14 @@ retry_create: */ if ((error == ENOATTR) && (vn_getxattr(vp, XATTR_FINDERINFO_NAME, NULL, &datasize, - XATTR_NOSECURITY, context) == 0)) { + XATTR_NOSECURITY, context) == 0)) { datasize = 0; error = 0; } else { if (error) { goto out; } - + /* If the resource fork exists, its size is expected to be non-zero. */ if (datasize == 0) { error = ENOATTR; @@ -817,32 +835,30 @@ retry_create: if (error == 0) { vnode_recycle(svp); *creator = 1; - } - else if ((error == EEXIST) && !makestream) { + } else if ((error == EEXIST) && !makestream) { error = VNOP_LOOKUP(dvp, &svp, &cn, kernelctx); - } - else if ((error == ENOENT) && !makestream) { + } else if ((error == ENOENT) && !makestream) { /* * We could have raced with a rmdir on the shadow directory * post-lookup. Retry from the beginning, 1x only, to - * try and see if we need to re-create the shadow directory + * try and see if we need to re-create the shadow directory * in get_shadow_dir. */ if (retries == 0) { retries++; if (dvp) { - vnode_put (dvp); + vnode_put(dvp); dvp = NULLVP; } if (svp) { - vnode_put (svp); + vnode_put(svp); svp = NULLVP; } goto retry_create; } /* Otherwise, just error out normally below */ } - + out: if (dvp) { vnode_put(dvp); @@ -858,7 +874,7 @@ out: if (rsrcsize) { *rsrcsize = datasize; } - return (error); + return error; } @@ -881,26 +897,26 @@ default_getnamedstream(vnode_t vp, vnode_t *svpp, const char *name, enum nsopera */ if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { *svpp = NULLVP; - return (ENOATTR); + return ENOATTR; } retry: /* * Obtain a shadow file for the resource fork I/O. - * + * * Need to pass along the supplied context so that getshadowfile * can access the AD file as needed, using it. */ error = getshadowfile(vp, &svp, 0, &datasize, &creator, context); if (error) { *svpp = NULLVP; - return (error); + return error; } /* * The creator of the shadow file provides its file data, - * all other threads should wait until its ready. In order to + * all other threads should wait until its ready. In order to * prevent a deadlock during error codepaths, we need to check if the - * vnode is being created, or if it has failed out. Regardless of success or + * vnode is being created, or if it has failed out. Regardless of success or * failure, we set the VISSHADOW bit on the vnode, so we check that * if the vnode's flags don't have VISNAMEDSTREAM set. If it doesn't, * then we can infer the creator isn't done yet. If it's there, but @@ -916,16 +932,15 @@ retry: } else { /* It's not ready, wait for it (sleep using v_parent as channel) */ if ((svp->v_flag & VISSHADOW)) { - /* + /* * No VISNAMEDSTREAM, but we did see VISSHADOW, indicating that the other * thread is done with this vnode. Just unlock the vnode and try again */ vnode_unlock(svp); - } - else { + } else { /* Otherwise, sleep if the shadow file is not created yet */ msleep((caddr_t)&svp->v_parent, &svp->v_lock, PINOD | PDROP, - "getnamedstream", NULL); + "getnamedstream", NULL); } vnode_put(svp); svp = NULLVP; @@ -938,7 +953,7 @@ retry: */ if (op == NS_OPEN && datasize != 0) { size_t offset; - size_t iosize; + size_t iosize; iosize = bufsize = MIN(datasize, NS_IOBUFSIZE); if (kmem_alloc(kernel_map, (vm_offset_t *)&bufptr, bufsize, VM_KERN_MEMORY_FILE)) { @@ -955,7 +970,7 @@ retry: goto out; } while (offset < datasize) { - size_t tmpsize; + size_t tmpsize; iosize = MIN(datasize - offset, iosize); @@ -963,11 +978,11 @@ retry: uio_addiov(auio, (uintptr_t)bufptr, iosize); /* use supplied ctx for AD file */ error = vn_getxattr(vp, XATTR_RESOURCEFORK_NAME, auio, &tmpsize, - XATTR_NOSECURITY, context); + XATTR_NOSECURITY, context); if (error) { break; } - + uio_reset(auio, offset, UIO_SYSSPACE, UIO_WRITE); uio_addiov(auio, (uintptr_t)bufptr, iosize); /* kernel context for writing shadowfile */ @@ -991,15 +1006,15 @@ out: wakeup((caddr_t)&svp->v_parent); vnode_unlock(svp); } else { - /* On post create errors, get rid of the shadow file. This - * way if there is another process waiting for initialization - * of the shadowfile by the current process will wake up and + /* On post create errors, get rid of the shadow file. This + * way if there is another process waiting for initialization + * of the shadowfile by the current process will wake up and * retry by creating and initializing the shadow file again. * Also add the VISSHADOW bit here to indicate we're done operating * on this vnode. */ (void)vnode_relenamedstream(vp, svp); - vnode_lock (svp); + vnode_lock(svp); svp->v_flag |= VISSHADOW; wakeup((caddr_t)&svp->v_parent); vnode_unlock(svp); @@ -1020,7 +1035,7 @@ out: } } *svpp = svp; - return (error); + return error; } static int @@ -1034,7 +1049,7 @@ default_makenamedstream(vnode_t vp, vnode_t *svpp, const char *name, vfs_context */ if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { *svpp = NULLVP; - return (ENOATTR); + return ENOATTR; } /* Supply the context to getshadowfile so it can manipulate the AD file */ @@ -1052,20 +1067,19 @@ default_makenamedstream(vnode_t vp, vnode_t *svpp, const char *name, vfs_context /* Wakeup any waiters on the v_parent channel */ wakeup((caddr_t)&svp->v_parent); vnode_unlock(svp); - } - return (error); + return error; } -static int +static int default_removenamedstream(vnode_t vp, const char *name, vfs_context_t context) { /* * Only the "com.apple.ResourceFork" stream is supported here. */ if (bcmp(name, XATTR_RESOURCEFORK_NAME, sizeof(XATTR_RESOURCEFORK_NAME)) != 0) { - return (ENOATTR); + return ENOATTR; } /* * XXX - what about other opened instances? @@ -1074,7 +1088,8 @@ default_removenamedstream(vnode_t vp, const char *name, vfs_context_t context) } static int -get_shadow_dir(vnode_t *sdvpp) { +get_shadow_dir(vnode_t *sdvpp) +{ vnode_t dvp = NULLVP; vnode_t sdvp = NULLVP; struct componentname cn; @@ -1086,28 +1101,28 @@ get_shadow_dir(vnode_t *sdvpp) { bzero(tmpname, sizeof(tmpname)); MAKE_SHADOW_DIRNAME(rootvnode, tmpname); - /* - * Look up the shadow directory to ensure that it still exists. + /* + * Look up the shadow directory to ensure that it still exists. * By looking it up, we get an iocounted dvp to use, and avoid some coherency issues * in caching it when multiple threads may be trying to manipulate the pointers. - * + * * Make sure to use the kernel context. We want a singular view of * the shadow dir regardless of chrooted processes. */ error = vnode_lookup(tmpname, 0, &sdvp, kernelctx); if (error == 0) { /* - * If we get here, then we have successfully looked up the shadow dir, + * If we get here, then we have successfully looked up the shadow dir, * and it has an iocount from the lookup. Return the vp in the output argument. */ *sdvpp = sdvp; - return (0); + return 0; } /* In the failure case, no iocount is acquired */ sdvp = NULLVP; - bzero (tmpname, sizeof(tmpname)); + bzero(tmpname, sizeof(tmpname)); - /* + /* * Obtain the vnode for "/var/run" directory using the kernel * context. * @@ -1118,9 +1133,9 @@ get_shadow_dir(vnode_t *sdvpp) { goto out; } - /* + /* * Create the shadow stream directory. - * 'dvp' below suggests the parent directory so + * 'dvp' below suggests the parent directory so * we only need to provide the leaf entry name */ MAKE_SHADOW_DIR_LEAF(rootvnode, tmpname); @@ -1145,7 +1160,7 @@ get_shadow_dir(vnode_t *sdvpp) { va.va_vaflags = VA_EXCLUSIVE; error = VNOP_MKDIR(dvp, &sdvp, &cn, &va, kernelctx); - + /* * There can be only one winner for an exclusive create. */ @@ -1160,7 +1175,7 @@ get_shadow_dir(vnode_t *sdvpp) { /* Obtain the fsid for /var/run directory */ VATTR_INIT(&va); VATTR_WANTED(&va, va_fsid); - if (VNOP_GETATTR(dvp, &va, kernelctx) != 0 || + if (VNOP_GETATTR(dvp, &va, kernelctx) != 0 || !VATTR_IS_SUPPORTED(&va, va_fsid)) { goto baddir; } @@ -1177,16 +1192,16 @@ get_shadow_dir(vnode_t *sdvpp) { va.va_dirlinkcount = 1; va.va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE; - if (VNOP_GETATTR(sdvp, &va, kernelctx) != 0 || - !VATTR_IS_SUPPORTED(&va, va_uid) || - !VATTR_IS_SUPPORTED(&va, va_gid) || - !VATTR_IS_SUPPORTED(&va, va_mode) || + if (VNOP_GETATTR(sdvp, &va, kernelctx) != 0 || + !VATTR_IS_SUPPORTED(&va, va_uid) || + !VATTR_IS_SUPPORTED(&va, va_gid) || + !VATTR_IS_SUPPORTED(&va, va_mode) || !VATTR_IS_SUPPORTED(&va, va_fsid)) { goto baddir; } /* - * Make sure its what we want: - * - owned by root + * Make sure its what we want: + * - owned by root * - not writable by anyone * - on same file system as /var/run * - not a hard-linked directory @@ -1196,7 +1211,7 @@ get_shadow_dir(vnode_t *sdvpp) { (va.va_mode & (S_IWUSR | S_IRWXG | S_IRWXO)) || (va.va_fsid != tmp_fsid) || (va.va_dirlinkcount != 1) || - (va.va_acl != (kauth_acl_t) KAUTH_FILESEC_NONE)) { + (va.va_acl != (kauth_acl_t) KAUTH_FILESEC_NONE)) { goto baddir; } } @@ -1213,7 +1228,7 @@ out: } } *sdvpp = sdvp; - return (error); + return error; baddir: /* This is not the dir we're looking for, move along */ @@ -1226,57 +1241,56 @@ baddir: #if CONFIG_APPLEDOUBLE /* - * Default Implementation (Non-native EA) + * Default Implementation (Non-native EA) */ /* - Typical "._" AppleDouble Header File layout: - ------------------------------------------------------------ - MAGIC 0x00051607 - VERSION 0x00020000 - FILLER 0 - COUNT 2 - .-- AD ENTRY[0] Finder Info Entry (must be first) - .--+-- AD ENTRY[1] Resource Fork Entry (must be last) - | '-> FINDER INFO - | ///////////// Fixed Size Data (32 bytes) - | EXT ATTR HDR - | ///////////// - | ATTR ENTRY[0] --. - | ATTR ENTRY[1] --+--. - | ATTR ENTRY[2] --+--+--. - | ... | | | - | ATTR ENTRY[N] --+--+--+--. - | ATTR DATA 0 <-' | | | - | //////////// | | | - | ATTR DATA 1 <----' | | - | ///////////// | | - | ATTR DATA 2 <-------' | - | ///////////// | - | ... | - | ATTR DATA N <----------' - | ///////////// - | Attribute Free Space - | - '----> RESOURCE FORK - ///////////// Variable Sized Data - ///////////// - ///////////// - ///////////// - ///////////// - ///////////// - ... - ///////////// - - ------------------------------------------------------------ - - NOTE: The EXT ATTR HDR, ATTR ENTRY's and ATTR DATA's are - stored as part of the Finder Info. The length in the Finder - Info AppleDouble entry includes the length of the extended - attribute header, attribute entries, and attribute data. -*/ - + * Typical "._" AppleDouble Header File layout: + * ------------------------------------------------------------ + * MAGIC 0x00051607 + * VERSION 0x00020000 + * FILLER 0 + * COUNT 2 + * .-- AD ENTRY[0] Finder Info Entry (must be first) + * .--+-- AD ENTRY[1] Resource Fork Entry (must be last) + * | '-> FINDER INFO + * | ///////////// Fixed Size Data (32 bytes) + * | EXT ATTR HDR + * | ///////////// + * | ATTR ENTRY[0] --. + * | ATTR ENTRY[1] --+--. + * | ATTR ENTRY[2] --+--+--. + * | ... | | | + * | ATTR ENTRY[N] --+--+--+--. + * | ATTR DATA 0 <-' | | | + * | //////////// | | | + * | ATTR DATA 1 <----' | | + * | ///////////// | | + * | ATTR DATA 2 <-------' | + * | ///////////// | + * | ... | + * | ATTR DATA N <----------' + * | ///////////// + * | Attribute Free Space + * | + * '----> RESOURCE FORK + * ///////////// Variable Sized Data + * ///////////// + * ///////////// + * ///////////// + * ///////////// + * ///////////// + * ... + * ///////////// + * + * ------------------------------------------------------------ + * + * NOTE: The EXT ATTR HDR, ATTR ENTRY's and ATTR DATA's are + * stored as part of the Finder Info. The length in the Finder + * Info AppleDouble entry includes the length of the extended + * attribute header, attribute entries, and attribute data. + */ /* * On Disk Data Structures @@ -1296,7 +1310,7 @@ baddir: */ #define AD_DATA 1 /* Data fork */ #define AD_RESOURCE 2 /* Resource fork */ -#define AD_REALNAME 3 /* FileÕs name on home file system */ +#define AD_REALNAME 3 /* File's name on home file system */ #define AD_COMMENT 4 /* Standard Mac comment */ #define AD_ICONBW 5 /* Mac black & white icon */ #define AD_ICONCOLOR 6 /* Mac color icon */ @@ -1308,7 +1322,7 @@ baddir: #define AD_MSDOSINFO 12 /* MS-DOS file info, attributes, etc */ #define AD_AFPNAME 13 /* Short name on AFP server */ #define AD_AFPINFO 14 /* AFP file info, attrib., etc */ -#define AD_AFPDIRID 15 /* AFP directory ID */ +#define AD_AFPDIRID 15 /* AFP directory ID */ #define AD_ATTRIBUTES AD_FINDERINFO @@ -1338,20 +1352,20 @@ baddir: */ -#define FINDERINFOSIZE 32 +#define FINDERINFOSIZE 32 typedef struct apple_double_entry { - u_int32_t type; /* entry type: see list, 0 invalid */ + u_int32_t type; /* entry type: see list, 0 invalid */ u_int32_t offset; /* entry data offset from the beginning of the file. */ - u_int32_t length; /* entry data length in bytes. */ + u_int32_t length; /* entry data length in bytes. */ } __attribute__((aligned(2), packed)) apple_double_entry_t; typedef struct apple_double_header { u_int32_t magic; /* == ADH_MAGIC */ - u_int32_t version; /* format version: 2 = 0x00020000 */ + u_int32_t version; /* format version: 2 = 0x00020000 */ u_int32_t filler[4]; - u_int16_t numEntries; /* number of entries which follow */ + u_int16_t numEntries; /* number of entries which follow */ apple_double_entry_t entries[2]; /* 'finfo' & 'rsrc' always exist */ u_int8_t finfo[FINDERINFOSIZE]; /* Must start with Finder Info (32 bytes) */ u_int8_t pad[2]; /* get better alignment inside attr_header */ @@ -1374,7 +1388,7 @@ typedef struct attr_header { apple_double_header_t appledouble; u_int32_t magic; /* == ATTR_HDR_MAGIC */ u_int32_t debug_tag; /* for debugging == file id of owning file */ - u_int32_t total_size; /* file offset of end of attribute header + entries + data */ + u_int32_t total_size; /* file offset of end of attribute header + entries + data */ u_int32_t data_start; /* file offset to attribute data area */ u_int32_t data_length; /* length of attribute data area */ u_int32_t reserved[3]; @@ -1431,7 +1445,7 @@ typedef struct attr_info { #define ATTR_ALIGN 3L /* Use four-byte alignment */ #define ATTR_ENTRY_LENGTH(namelen) \ - ((sizeof(attr_entry_t) - 1 + (namelen) + ATTR_ALIGN) & (~ATTR_ALIGN)) + ((sizeof(attr_entry_t) - 1 + (namelen) + ATTR_ALIGN) & (~ATTR_ALIGN)) #define ATTR_NEXT(ae) \ (attr_entry_t *)((u_int8_t *)(ae) + ATTR_ENTRY_LENGTH((ae)->namelen)) @@ -1472,8 +1486,8 @@ static int unlock_xattrfile(vnode_t xvp, vfs_context_t context); #if BYTE_ORDER == LITTLE_ENDIAN - static void swap_adhdr(apple_double_header_t *adh); - static void swap_attrhdr(attr_header_t *ah, attr_info_t* info); +static void swap_adhdr(apple_double_header_t *adh); +static void swap_attrhdr(attr_header_t *ah, attr_info_t* info); #else #define swap_adhdr(x) @@ -1498,26 +1512,28 @@ static int shift_data_up(vnode_t xvp, off_t start, size_t len, off_t delta, vfs * NOTE: Does not attempt to validate the extended attributes header that * may be embedded in the Finder Info entry. */ -static int check_and_swap_apple_double_header(attr_info_t *ainfop) +static int +check_and_swap_apple_double_header(attr_info_t *ainfop) { int i, j; u_int32_t header_end; u_int32_t entry_end; size_t rawsize; apple_double_header_t *header; - + rawsize = ainfop->rawsize; header = (apple_double_header_t *) ainfop->rawdata; - + /* Is the file big enough to contain an AppleDouble header? */ - if (rawsize < offsetof(apple_double_header_t, entries)) + if (rawsize < offsetof(apple_double_header_t, entries)) { return ENOATTR; - + } + /* Swap the AppleDouble header fields to native order */ header->magic = SWAP32(header->magic); header->version = SWAP32(header->version); header->numEntries = SWAP16(header->numEntries); - + /* Sanity check the AppleDouble header fields */ if (header->magic != ADH_MAGIC || header->version != ADH_VERSION || @@ -1525,36 +1541,36 @@ static int check_and_swap_apple_double_header(attr_info_t *ainfop) header->numEntries > 15) { return ENOATTR; } - + /* Calculate where the entries[] array ends */ header_end = offsetof(apple_double_header_t, entries) + - header->numEntries * sizeof(apple_double_entry_t); - + header->numEntries * sizeof(apple_double_entry_t); + /* Is the file big enough to contain the AppleDouble entries? */ if (rawsize < header_end) { - return ENOATTR; + return ENOATTR; } - + /* Swap and sanity check each AppleDouble entry */ - for (i=0; inumEntries; i++) { + for (i = 0; i < header->numEntries; i++) { /* Swap the per-entry fields to native order */ header->entries[i].type = SWAP32(header->entries[i].type); header->entries[i].offset = SWAP32(header->entries[i].offset); header->entries[i].length = SWAP32(header->entries[i].length); - + entry_end = header->entries[i].offset + header->entries[i].length; - + /* * Does the entry's content start within the header itself, * did the addition overflow, or does the entry's content * extend past the end of the file? */ if (header->entries[i].offset < header_end || - entry_end < header->entries[i].offset || + entry_end < header->entries[i].offset || entry_end > ainfop->filesize) { return ENOATTR; } - + /* * Does the current entry's content overlap with a previous * entry's content? @@ -1564,15 +1580,15 @@ static int check_and_swap_apple_double_header(attr_info_t *ainfop) * But we have already ensured N < 16, and N is almost always 2. * So there's no point in using a more complex algorithm. */ - - for (j=0; j header->entries[j].offset && header->entries[j].offset + header->entries[j].length > header->entries[i].offset) { return ENOATTR; } } } - + return 0; } @@ -1583,7 +1599,7 @@ static int check_and_swap_apple_double_header(attr_info_t *ainfop) */ static int default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, - __unused int options, vfs_context_t context) + __unused int options, vfs_context_t context) { vnode_t xvp = NULL; attr_info_t ainfo; @@ -1611,16 +1627,15 @@ default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, } if ((error = open_xattrfile(vp, fileflags, &xvp, context))) { - return (error); + return error; } if ((error = get_xattrinfo(xvp, 0, &ainfo, context))) { close_xattrfile(xvp, fileflags, context); - return (error); + return error; } /* Get the Finder Info. */ if (strcmp(name, XATTR_FINDERINFO_NAME) == 0) { - if (ainfo.finderinfo == NULL || ainfo.emptyfinderinfo) { error = ENOATTR; } else if (uio == NULL) { @@ -1648,12 +1663,13 @@ default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, } else { uio_setoffset(uio, uio_offset(uio) + ainfo.rsrcfork->offset); error = VNOP_READ(xvp, uio, 0, context); - if (error == 0) + if (error == 0) { uio_setoffset(uio, uio_offset(uio) - ainfo.rsrcfork->offset); + } } goto out; } - + if (ainfo.attrhdr == NULL || ainfo.attr_entry == NULL) { error = ENOATTR; goto out; @@ -1693,11 +1709,11 @@ default_getxattr(vnode_t vp, const char *name, uio_t uio, size_t *size, } entry = ATTR_NEXT(entry); } -out: +out: rel_xattrinfo(&ainfo); close_xattrfile(xvp, fileflags, context); - return (error); + return error; } /* @@ -1722,7 +1738,7 @@ default_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_conte int fileflags; int error; char finfo[FINDERINFOSIZE]; - + datalen = uio_resid(uio); namelen = strlen(name) + 1; entrylen = ATTR_ENTRY_LENGTH(namelen); @@ -1751,17 +1767,19 @@ default_setxattr(vnode_t vp, const char *name, uio_t uio, int options, vfs_conte return EINVAL; } error = uiomove(finfo, datalen, uio); - if (error) + if (error) { return error; - if ((options & (XATTR_CREATE|XATTR_REPLACE)) == 0 && + } + if ((options & (XATTR_CREATE | XATTR_REPLACE)) == 0 && bcmp(finfo, emptyfinfo, FINDERINFOSIZE) == 0) { error = default_removexattr(vp, name, 0, context); - if (error == ENOATTR) + if (error == ENOATTR) { error = 0; + } return error; } } - + start: /* * Open the file locked since setting an attribute @@ -1769,11 +1787,11 @@ start: */ fileflags = FREAD | FWRITE | O_EXLOCK; if ((error = open_xattrfile(vp, O_CREAT | fileflags, &xvp, context))) { - return (error); + return error; } if ((error = get_xattrinfo(xvp, ATTR_SETTING, &ainfo, context))) { close_xattrfile(xvp, fileflags, context); - return (error); + return error; } /* Set the Finder Info. */ @@ -1807,8 +1825,9 @@ start: rel_xattrinfo(&ainfo); close_xattrfile(xvp, fileflags, context); error = default_removexattr(vp, name, 0, context); - if (error == ENOATTR) + if (error == ENOATTR) { error = 0; + } return error; } if (ainfo.finderinfo) { @@ -1841,9 +1860,8 @@ start: /* attr exists, and create specified ? */ error = EEXIST; goto out; - } - } - else { + } + } else { /* Zero length AD rsrc fork */ if (options & XATTR_REPLACE) { /* attr doesn't exist (0-length), but replace specified ? */ @@ -1851,8 +1869,7 @@ start: goto out; } } - } - else { + } else { /* We can't do much if we somehow didn't get an AD rsrc pointer */ error = ENOATTR; goto out; @@ -1861,8 +1878,9 @@ start: endoffset = uio_resid(uio) + uio_offset(uio); /* new size */ uio_setoffset(uio, uio_offset(uio) + ainfo.rsrcfork->offset); error = VNOP_WRITE(xvp, uio, 0, context); - if (error) + if (error) { goto out; + } uio_setoffset(uio, uio_offset(uio) - ainfo.rsrcfork->offset); if (endoffset > ainfo.rsrcfork->length) { ainfo.rsrcfork->length = endoffset; @@ -1874,7 +1892,7 @@ start: } if (datalen > ATTR_MAX_SIZE) { - return (E2BIG); /* EINVAL instead ? */ + return E2BIG; /* EINVAL instead ? */ } if (ainfo.attrhdr == NULL) { @@ -1885,11 +1903,12 @@ start: entry = ainfo.attr_entry; /* Check if data area crosses the maximum header size. */ - if ((header->data_start + header->data_length + entrylen + datalen) > ATTR_MAX_HDR_SIZE) + if ((header->data_start + header->data_length + entrylen + datalen) > ATTR_MAX_HDR_SIZE) { splitdata = 1; /* do data I/O separately */ - else + } else { splitdata = 0; - + } + /* * See if attribute already exists. */ @@ -1917,8 +1936,9 @@ start: } else { attrdata = (u_int8_t *)header + entry->offset; error = uiomove((caddr_t)attrdata, datalen, uio); - if (error) + if (error) { goto out; + } ainfo.iosize = ainfo.attrhdr->data_start + ainfo.attrhdr->data_length; error = write_xattrinfo(&ainfo); if (error) { @@ -1935,13 +1955,12 @@ start: close_xattrfile(xvp, fileflags, context); error = default_removexattr(vp, name, options, context); if (error) { - return (error); + return error; } /* Clear XATTR_REPLACE option since we just removed the attribute. */ options &= ~XATTR_REPLACE; goto start; /* start over */ } - } if (options & XATTR_REPLACE) { @@ -1961,7 +1980,7 @@ start: size_t growsize; growsize = roundup((datalen + entrylen) - datafreespace, ATTR_BUF_SIZE); - + /* Clip roundup size when we can still fit in ATTR_MAX_HDR_SIZE. */ if (!splitdata && (header->total_size + growsize) > ATTR_MAX_HDR_SIZE) { growsize = ATTR_MAX_HDR_SIZE - header->total_size; @@ -1972,8 +1991,9 @@ start: if (error) { printf("setxattr: VNOP_TRUNCATE error %d\n", error); } - if (error) + if (error) { goto out; + } /* * Move the resource fork out of the way. @@ -1981,9 +2001,9 @@ start: if (ainfo.rsrcfork) { if (ainfo.rsrcfork->length != 0) { shift_data_down(xvp, - ainfo.rsrcfork->offset, - ainfo.rsrcfork->length, - growsize, context); + ainfo.rsrcfork->offset, + ainfo.rsrcfork->length, + growsize, context); } ainfo.rsrcfork->offset += growsize; } @@ -1994,13 +2014,13 @@ start: /* Make space for a new entry. */ if (splitdata) { shift_data_down(xvp, - header->data_start, - header->data_length, - entrylen, context); + header->data_start, + header->data_length, + entrylen, context); } else { bcopy((u_int8_t *)header + header->data_start, - (u_int8_t *)header + header->data_start + entrylen, - header->data_length); + (u_int8_t *)header + header->data_start + entrylen, + header->data_length); } header->data_start += entrylen; @@ -2009,7 +2029,7 @@ start: for (entry = ainfo.attr_entry; entry != lastentry && ATTR_VALID(entry, ainfo); entry = ATTR_NEXT(entry)) { entry->offset += entrylen; } - + /* * If the attribute data area is entirely within * the header buffer, then just update the buffer, @@ -2029,7 +2049,7 @@ start: } } else { attrdata = (u_int8_t *)header + header->data_start + header->data_length; - + error = uiomove((caddr_t)attrdata, datalen, uio); if (error) { printf("setxattr: uiomove error %d\n", error); @@ -2052,7 +2072,7 @@ start: /* Only write the entries, since the data was written separately. */ ainfo.iosize = ainfo.attrhdr->data_start; } else { - /* The entry and data are both in the header; write them together. */ + /* The entry and data are both in the header; write them together. */ ainfo.iosize = ainfo.attrhdr->data_start + ainfo.attrhdr->data_length; } error = write_xattrinfo(&ainfo); @@ -2060,7 +2080,7 @@ start: printf("setxattr: write_xattrinfo error %d\n", error); } -out: +out: rel_xattrinfo(&ainfo); close_xattrfile(xvp, fileflags, context); @@ -2077,10 +2097,10 @@ out: (void) vnode_setattr(vp, &va, context); } } - + post_event_if_success(vp, error, NOTE_ATTRIB); - return (error); + return error; } @@ -2122,18 +2142,21 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont } if ((error = open_xattrfile(vp, fileflags, &xvp, context))) { - return (error); + return error; } if ((error = get_xattrinfo(xvp, 0, &ainfo, context))) { close_xattrfile(xvp, fileflags, context); - return (error); + return error; } - if (ainfo.attrhdr) + if (ainfo.attrhdr) { attrcount += ainfo.attrhdr->num_attrs; - if (ainfo.rsrcfork) + } + if (ainfo.rsrcfork) { ++attrcount; - if (ainfo.finderinfo && !ainfo.emptyfinderinfo) + } + if (ainfo.finderinfo && !ainfo.emptyfinderinfo) { ++attrcount; + } /* Clear the Finder Info. */ if (strncmp(name, XATTR_FINDERINFO_NAME, sizeof(XATTR_FINDERINFO_NAME)) == 0) { @@ -2142,8 +2165,9 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont goto out; } /* On removal of last attribute the ._ file is removed. */ - if (--attrcount == 0) + if (--attrcount == 0) { goto out; + } attrdata = (u_int8_t *)ainfo.filehdr + ainfo.finderinfo->offset; bzero((caddr_t)attrdata, FINDERINFOSIZE); ainfo.iosize = sizeof(attr_header_t); @@ -2162,8 +2186,9 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont goto out; } /* On removal of last attribute the ._ file is removed. */ - if (--attrcount == 0) + if (--attrcount == 0) { goto out; + } /* * XXX * If the resource fork isn't the last AppleDouble @@ -2196,8 +2221,9 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont for (i = 0; i < header->num_attrs && ATTR_VALID(entry, ainfo); i++) { if (strncmp((const char *)entry->name, name, namelen) == 0) { found = 1; - if ((i+1) == header->num_attrs) + if ((i + 1) == header->num_attrs) { lastone = 1; + } break; } entry = ATTR_NEXT(entry); @@ -2207,51 +2233,51 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont goto out; } /* On removal of last attribute the ._ file is removed. */ - if (--attrcount == 0) + if (--attrcount == 0) { goto out; + } datalen = entry->length; dataoff = entry->offset; entrylen = ATTR_ENTRY_LENGTH(namelen); - if ((header->data_start + header->data_length) > ATTR_MAX_HDR_SIZE) + if ((header->data_start + header->data_length) > ATTR_MAX_HDR_SIZE) { splitdata = 1; - else + } else { splitdata = 0; + } /* Remove the attribute entry. */ if (!lastone) { bcopy((u_int8_t *)entry + entrylen, (u_int8_t *)entry, - ((size_t)header + header->data_start) - ((size_t)entry + entrylen)); + ((size_t)header + header->data_start) - ((size_t)entry + entrylen)); } /* Adjust the attribute data. */ if (splitdata) { shift_data_up(xvp, - header->data_start, - dataoff - header->data_start, - entrylen, - context); + header->data_start, + dataoff - header->data_start, + entrylen, + context); if (!lastone) { shift_data_up(xvp, - dataoff + datalen, - (header->data_start + header->data_length) - (dataoff + datalen), - datalen + entrylen, - context); + dataoff + datalen, + (header->data_start + header->data_length) - (dataoff + datalen), + datalen + entrylen, + context); } /* XXX write zeros to freed space ? */ ainfo.iosize = ainfo.attrhdr->data_start - entrylen; } else { - - bcopy((u_int8_t *)header + header->data_start, - (u_int8_t *)header + header->data_start - entrylen, - dataoff - header->data_start); + (u_int8_t *)header + header->data_start - entrylen, + dataoff - header->data_start); if (!lastone) { bcopy((u_int8_t *)header + dataoff + datalen, - (u_int8_t *)header + dataoff - entrylen, - (header->data_start + header->data_length) - (dataoff + datalen)); + (u_int8_t *)header + dataoff - entrylen, + (header->data_start + header->data_length) - (dataoff + datalen)); } - bzero (((u_int8_t *)header + header->data_start + header->data_length) - (datalen + entrylen), (datalen + entrylen)); + bzero(((u_int8_t *)header + header->data_start + header->data_length) - (datalen + entrylen), (datalen + entrylen)); ainfo.iosize = ainfo.attrhdr->data_start + ainfo.attrhdr->data_length; } @@ -2264,8 +2290,9 @@ default_removexattr(vnode_t vp, const char *name, __unused int options, vfs_cont entry = ainfo.attr_entry; for (i = 0; i < header->num_attrs && ATTR_VALID(entry, ainfo); i++) { entry->offset -= entrylen; - if (entry >= oldslot) + if (entry >= oldslot) { entry->offset -= datalen; + } entry = ATTR_NEXT(entry); } error = write_xattrinfo(&ainfo); @@ -2277,8 +2304,9 @@ out: /* When there are no more attributes remove the ._ file. */ if (attrcount == 0) { - if (fileflags & O_EXLOCK) + if (fileflags & O_EXLOCK) { (void) unlock_xattrfile(xvp, context); + } VNOP_CLOSE(xvp, fileflags, context); vnode_rele(xvp); error = remove_xattrfile(xvp, context); @@ -2302,8 +2330,7 @@ out: post_event_if_success(vp, error, NOTE_ATTRIB); - return (error); - + return error; } @@ -2326,15 +2353,17 @@ default_listxattr(vnode_t vp, uio_t uio, size_t *size, __unused int options, vfs */ if ((error = open_xattrfile(vp, FREAD, &xvp, context))) { - if (error == ENOATTR) + if (error == ENOATTR) { error = 0; - return (error); + } + return error; } if ((error = get_xattrinfo(xvp, 0, &ainfo, context))) { - if (error == ENOATTR) + if (error == ENOATTR) { error = 0; + } close_xattrfile(xvp, FREAD, context); - return (error); + return error; } /* Check for Finder Info. */ @@ -2346,7 +2375,7 @@ default_listxattr(vnode_t vp, uio_t uio, size_t *size, __unused int options, vfs goto out; } else { error = uiomove(XATTR_FINDERINFO_NAME, - sizeof(XATTR_FINDERINFO_NAME), uio); + sizeof(XATTR_FINDERINFO_NAME), uio); if (error) { error = ERANGE; goto out; @@ -2363,7 +2392,7 @@ default_listxattr(vnode_t vp, uio_t uio, size_t *size, __unused int options, vfs goto out; } else { error = uiomove(XATTR_RESOURCEFORK_NAME, - sizeof(XATTR_RESOURCEFORK_NAME), uio); + sizeof(XATTR_RESOURCEFORK_NAME), uio); if (error) { error = ERANGE; goto out; @@ -2377,8 +2406,8 @@ default_listxattr(vnode_t vp, uio_t uio, size_t *size, __unused int options, vfs for (i = 0, entry = ainfo.attr_entry; i < count && ATTR_VALID(entry, ainfo); i++) { if (xattr_protected((const char *)entry->name) || ((entry->namelen < XATTR_MAXNAMELEN) && - (entry->name[entry->namelen] == '\0') && - (xattr_validatename((const char *)entry->name) != 0))) { + (entry->name[entry->namelen] == '\0') && + (xattr_validatename((const char *)entry->name) != 0))) { entry = ATTR_NEXT(entry); continue; } @@ -2393,18 +2422,19 @@ default_listxattr(vnode_t vp, uio_t uio, size_t *size, __unused int options, vfs } error = uiomove((caddr_t) entry->name, entry->namelen, uio); if (error) { - if (error != EFAULT) + if (error != EFAULT) { error = ERANGE; + } break; - } + } entry = ATTR_NEXT(entry); } } -out: +out: rel_xattrinfo(&ainfo); close_xattrfile(xvp, FREAD, context); - return (error); + return error; } static int @@ -2431,11 +2461,11 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context) dvp = vp; /* the "._." file resides in the root dir */ goto lookup; } - if ( (dvp = vnode_getparent(vp)) == NULLVP) { + if ((dvp = vnode_getparent(vp)) == NULLVP) { error = ENOATTR; goto out; } - if ( (basename = vnode_getname(vp)) == NULL) { + if ((basename = vnode_getname(vp)) == NULL) { error = ENOATTR; goto out; } @@ -2463,8 +2493,8 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context) */ lookup: NDINIT(&nd, LOOKUP, OP_OPEN, LOCKLEAF | NOFOLLOW | USEDVP | DONOTAUTH, - UIO_SYSSPACE, CAST_USER_ADDR_T(filename), context); - nd.ni_dvp = dvp; + UIO_SYSSPACE, CAST_USER_ADDR_T(filename), context); + nd.ni_dvp = dvp; if (fileflags & O_CREAT) { nd.ni_cnd.cn_nameiop = CREATE; @@ -2474,16 +2504,16 @@ lookup: if (dvp != vp) { nd.ni_cnd.cn_flags |= LOCKPARENT; } - if ( (error = namei(&nd))) { - nd.ni_dvp = NULLVP; + if ((error = namei(&nd))) { + nd.ni_dvp = NULLVP; error = ENOATTR; goto out; } - if ( (xvp = nd.ni_vp) == NULLVP) { + if ((xvp = nd.ni_vp) == NULLVP) { uid_t uid; gid_t gid; mode_t umode; - + /* * Pick up uid/gid/mode from target file. */ @@ -2491,49 +2521,53 @@ lookup: VATTR_WANTED(&va, va_uid); VATTR_WANTED(&va, va_gid); VATTR_WANTED(&va, va_mode); - if (VNOP_GETATTR(vp, &va, context) == 0 && - VATTR_IS_SUPPORTED(&va, va_uid) && - VATTR_IS_SUPPORTED(&va, va_gid) && + if (VNOP_GETATTR(vp, &va, context) == 0 && + VATTR_IS_SUPPORTED(&va, va_uid) && + VATTR_IS_SUPPORTED(&va, va_gid) && VATTR_IS_SUPPORTED(&va, va_mode)) { uid = va.va_uid; gid = va.va_gid; - umode = va.va_mode & (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH); - } else /* fallback values */ { + umode = va.va_mode & (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH); + } else { /* fallback values */ uid = KAUTH_UID_NONE; gid = KAUTH_GID_NONE; - umode = S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH; + umode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; } VATTR_INIT(&va); VATTR_SET(&va, va_type, VREG); VATTR_SET(&va, va_mode, umode); - if (uid != KAUTH_UID_NONE) + if (uid != KAUTH_UID_NONE) { VATTR_SET(&va, va_uid, uid); - if (gid != KAUTH_GID_NONE) + } + if (gid != KAUTH_GID_NONE) { VATTR_SET(&va, va_gid, gid); + } error = vn_create(dvp, &nd.ni_vp, &nd, &va, - VN_CREATE_NOAUTH | VN_CREATE_NOINHERIT | VN_CREATE_NOLABEL, - 0, NULL, - context); - if (error) + VN_CREATE_NOAUTH | VN_CREATE_NOINHERIT | VN_CREATE_NOLABEL, + 0, NULL, + context); + if (error) { error = ENOATTR; - else + } else { xvp = nd.ni_vp; + } } nameidone(&nd); if (dvp != vp) { vnode_put(dvp); /* drop iocount from LOCKPARENT request above */ } - if (error) - goto out; + if (error) { + goto out; + } } else { if ((error = namei(&nd))) { nd.ni_dvp = NULLVP; error = ENOATTR; goto out; } - xvp = nd.ni_vp; + xvp = nd.ni_vp; nameidone(&nd); } nd.ni_dvp = NULLVP; @@ -2557,8 +2591,8 @@ lookup: goto out; } } - - if ( (error = VNOP_OPEN(xvp, fileflags & ~(O_EXLOCK | O_SHLOCK), context))) { + + if ((error = VNOP_OPEN(xvp, fileflags & ~(O_EXLOCK | O_SHLOCK), context))) { error = ENOATTR; goto out; } @@ -2575,11 +2609,11 @@ lookup: VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_fileid); VATTR_WANTED(&va, va_nlink); - if ( (error = vnode_getattr(xvp, &va, context)) != 0) { + if ((error = vnode_getattr(xvp, &va, context)) != 0) { error = EPERM; goto out; } - + /* If the file is empty then add a default header. */ if (va.va_data_size == 0) { /* Don't adopt hard-linked "._" files. */ @@ -2587,18 +2621,20 @@ lookup: error = EPERM; goto out; } - if ( (error = create_xattrfile(xvp, (u_int32_t)va.va_fileid, context))) + if ((error = create_xattrfile(xvp, (u_int32_t)va.va_fileid, context))) { goto out; + } } } - /* Apply file locking if requested. */ + /* Apply file locking if requested. */ if (fileflags & (O_EXLOCK | O_SHLOCK)) { short locktype; locktype = (fileflags & O_EXLOCK) ? F_WRLCK : F_RDLCK; error = lock_xattrfile(xvp, locktype, context); - if (error) + if (error) { error = ENOATTR; + } } out: if (error) { @@ -2609,7 +2645,7 @@ out: if (fileflags & O_CREAT) { /* Delete the xattr file if we encountered any errors */ - (void) remove_xattrfile (xvp, context); + (void) remove_xattrfile(xvp, context); } if (referenced) { @@ -2634,7 +2670,7 @@ out: } *xvpp = xvp; /* return a referenced vnode */ - return (error); + return error; } static void @@ -2643,8 +2679,9 @@ close_xattrfile(vnode_t xvp, int fileflags, vfs_context_t context) // if (fileflags & FWRITE) // (void) VNOP_FSYNC(xvp, MNT_WAIT, context); - if (fileflags & (O_EXLOCK | O_SHLOCK)) + if (fileflags & (O_EXLOCK | O_SHLOCK)) { (void) unlock_xattrfile(xvp, context); + } (void) VNOP_CLOSE(xvp, fileflags, context); (void) vnode_rele(xvp); @@ -2661,22 +2698,23 @@ remove_xattrfile(vnode_t xvp, vfs_context_t context) int error = 0; MALLOC_ZONE(path, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); - if (path == NULL) + if (path == NULL) { return ENOMEM; + } pathlen = MAXPATHLEN; error = vn_getpath(xvp, path, &pathlen); if (error) { FREE_ZONE(path, MAXPATHLEN, M_NAMEI); - return (error); + return error; } NDINIT(&nd, DELETE, OP_UNLINK, LOCKPARENT | NOFOLLOW | DONOTAUTH, - UIO_SYSSPACE, CAST_USER_ADDR_T(path), context); + UIO_SYSSPACE, CAST_USER_ADDR_T(path), context); error = namei(&nd); FREE_ZONE(path, MAXPATHLEN, M_NAMEI); if (error) { - return (error); + return error; } dvp = nd.ni_dvp; xvp = nd.ni_vp; @@ -2686,7 +2724,7 @@ remove_xattrfile(vnode_t xvp, vfs_context_t context) vnode_put(dvp); vnode_put(xvp); - return (error); + return error; } /* @@ -2731,10 +2769,11 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte ainfop->filesize = va.va_data_size; /* When setting attributes, allow room for the header to grow. */ - if (setting) + if (setting) { iosize = ATTR_MAX_HDR_SIZE; - else + } else { iosize = MIN(ATTR_MAX_HDR_SIZE, ainfop->filesize); + } if (iosize == 0) { error = ENOATTR; @@ -2742,7 +2781,7 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte } ainfop->iosize = iosize; MALLOC(buffer, void *, iosize, M_TEMP, M_WAITOK); - if (buffer == NULL){ + if (buffer == NULL) { error = ENOMEM; goto bail; } @@ -2757,13 +2796,14 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte } ainfop->rawsize = iosize - uio_resid(auio); ainfop->rawdata = (u_int8_t *)buffer; - + filehdr = (apple_double_header_t *)buffer; error = check_and_swap_apple_double_header(ainfop); - if (error) + if (error) { goto bail; - + } + ainfop->filehdr = filehdr; /* valid AppleDouble header */ /* rel_xattrinfo is responsible for freeing the header buffer */ @@ -2775,7 +2815,7 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte filehdr->entries[i].length >= FINDERINFOSIZE) { /* We found the Finder Info entry. */ ainfop->finderinfo = &filehdr->entries[i]; - + /* * Is the Finder Info "empty" (all zeroes)? If so, * we'll pretend like the Finder Info extended attribute @@ -2798,9 +2838,10 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte * we need to remember the resource fork entry so it can be * updated once the new content has been written. */ - if (filehdr->entries[i].length == 0 && !setting) + if (filehdr->entries[i].length == 0 && !setting) { continue; - + } + /* * Check to see if any "empty" resource fork is ours (i.e. is ignorable). * @@ -2833,7 +2874,7 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte continue; } } - + /* * See if this file looks like it is laid out correctly to contain * extended attributes. If so, then do the following: @@ -2859,36 +2900,36 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte if (setting && ainfop->finderinfo->length == FINDERINFOSIZE) { size_t delta; size_t writesize; - + delta = ATTR_BUF_SIZE - (filehdr->entries[0].offset + FINDERINFOSIZE); if (ainfop->rsrcfork && filehdr->entries[1].length) { /* Make some room before existing resource fork. */ shift_data_down(xvp, - filehdr->entries[1].offset, - filehdr->entries[1].length, - delta, context); + filehdr->entries[1].offset, + filehdr->entries[1].length, + delta, context); writesize = sizeof(attr_header_t); } else { /* Create a new, empty resource fork. */ rsrcfork_header_t *rsrcforkhdr; - + vnode_setsize(xvp, filehdr->entries[1].offset + delta, 0, context); - + /* Steal some space for an empty RF header. */ delta -= sizeof(rsrcfork_header_t); - + bzero(&attrhdr->appledouble.pad[0], delta); rsrcforkhdr = (rsrcfork_header_t *)((char *)filehdr + filehdr->entries[1].offset + delta); - + /* Fill in Empty Resource Fork Header. */ init_empty_resource_fork(rsrcforkhdr); - + filehdr->entries[1].length = sizeof(rsrcfork_header_t); writesize = ATTR_BUF_SIZE; } filehdr->entries[0].length += delta; filehdr->entries[1].offset += delta; - + /* Fill in Attribute Header. */ attrhdr->magic = ATTR_HDR_MAGIC; attrhdr->debug_tag = (u_int32_t)va.va_fileid; @@ -2900,15 +2941,15 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte attrhdr->reserved[2] = 0; attrhdr->flags = 0; attrhdr->num_attrs = 0; - + /* Push out new header */ uio_reset(auio, 0, UIO_SYSSPACE, UIO_WRITE); uio_addiov(auio, (uintptr_t)filehdr, writesize); - - swap_adhdr(filehdr); /* to big endian */ - swap_attrhdr(attrhdr, ainfop); /* to big endian */ + + swap_adhdr(filehdr); /* to big endian */ + swap_attrhdr(attrhdr, ainfop); /* to big endian */ error = VNOP_WRITE(xvp, auio, 0, context); - swap_adhdr(filehdr); /* back to native */ + swap_adhdr(filehdr); /* back to native */ /* The attribute header gets swapped below. */ } } @@ -2927,8 +2968,8 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte * header was found. */ if (ainfop->finderinfo && - ainfop->finderinfo == &filehdr->entries[0] && - ainfop->finderinfo->length >= (sizeof(attr_header_t) - sizeof(apple_double_header_t))) { + ainfop->finderinfo == &filehdr->entries[0] && + ainfop->finderinfo->length >= (sizeof(attr_header_t) - sizeof(apple_double_header_t))) { attr_header_t *attrhdr = (attr_header_t*)filehdr; if ((error = check_and_swap_attrhdr(attrhdr, ainfop)) == 0) { @@ -2940,11 +2981,13 @@ get_xattrinfo(vnode_t xvp, int setting, attr_info_t *ainfop, vfs_context_t conte error = 0; bail: - if (auio != NULL) + if (auio != NULL) { uio_free(auio); - if (buffer != NULL) + } + if (buffer != NULL) { FREE(buffer, M_TEMP); - return (error); + } + return error; } @@ -2968,22 +3011,22 @@ create_xattrfile(vnode_t xvp, u_int32_t fileid, vfs_context_t context) rsrcforkhdr = (rsrcfork_header_t *) ((char *)buffer + ATTR_BUF_SIZE - rsrcforksize); /* Fill in Apple Double Header. */ - xah->appledouble.magic = SWAP32 (ADH_MAGIC); - xah->appledouble.version = SWAP32 (ADH_VERSION); - xah->appledouble.numEntries = SWAP16 (2); - xah->appledouble.entries[0].type = SWAP32 (AD_FINDERINFO); - xah->appledouble.entries[0].offset = SWAP32 (offsetof(apple_double_header_t, finfo)); - xah->appledouble.entries[0].length = SWAP32 (ATTR_BUF_SIZE - offsetof(apple_double_header_t, finfo) - rsrcforksize); - xah->appledouble.entries[1].type = SWAP32 (AD_RESOURCE); - xah->appledouble.entries[1].offset = SWAP32 (ATTR_BUF_SIZE - rsrcforksize); - xah->appledouble.entries[1].length = SWAP32 (rsrcforksize); + xah->appledouble.magic = SWAP32(ADH_MAGIC); + xah->appledouble.version = SWAP32(ADH_VERSION); + xah->appledouble.numEntries = SWAP16(2); + xah->appledouble.entries[0].type = SWAP32(AD_FINDERINFO); + xah->appledouble.entries[0].offset = SWAP32(offsetof(apple_double_header_t, finfo)); + xah->appledouble.entries[0].length = SWAP32(ATTR_BUF_SIZE - offsetof(apple_double_header_t, finfo) - rsrcforksize); + xah->appledouble.entries[1].type = SWAP32(AD_RESOURCE); + xah->appledouble.entries[1].offset = SWAP32(ATTR_BUF_SIZE - rsrcforksize); + xah->appledouble.entries[1].length = SWAP32(rsrcforksize); bcopy(ADH_MACOSX, xah->appledouble.filler, sizeof(xah->appledouble.filler)); /* Fill in Attribute Header. */ - xah->magic = SWAP32 (ATTR_HDR_MAGIC); - xah->debug_tag = SWAP32 (fileid); - xah->total_size = SWAP32 (ATTR_BUF_SIZE - rsrcforksize); - xah->data_start = SWAP32 (sizeof(attr_header_t)); + xah->magic = SWAP32(ATTR_HDR_MAGIC); + xah->debug_tag = SWAP32(fileid); + xah->total_size = SWAP32(ATTR_BUF_SIZE - rsrcforksize); + xah->data_start = SWAP32(sizeof(attr_header_t)); /* Fill in Empty Resource Fork Header. */ init_empty_resource_fork(rsrcforkhdr); @@ -2999,22 +3042,22 @@ create_xattrfile(vnode_t xvp, u_int32_t fileid, vfs_context_t context) uio_free(auio); FREE(buffer, M_TEMP); - return (error); + return error; } static void init_empty_resource_fork(rsrcfork_header_t * rsrcforkhdr) { bzero(rsrcforkhdr, sizeof(rsrcfork_header_t)); - rsrcforkhdr->fh_DataOffset = SWAP32 (RF_FIRST_RESOURCE); - rsrcforkhdr->fh_MapOffset = SWAP32 (RF_FIRST_RESOURCE); - rsrcforkhdr->fh_MapLength = SWAP32 (RF_NULL_MAP_LENGTH); - rsrcforkhdr->mh_DataOffset = SWAP32 (RF_FIRST_RESOURCE); - rsrcforkhdr->mh_MapOffset = SWAP32 (RF_FIRST_RESOURCE); - rsrcforkhdr->mh_MapLength = SWAP32 (RF_NULL_MAP_LENGTH); - rsrcforkhdr->mh_Types = SWAP16 (RF_NULL_MAP_LENGTH - 2 ); - rsrcforkhdr->mh_Names = SWAP16 (RF_NULL_MAP_LENGTH); - rsrcforkhdr->typeCount = SWAP16 (-1); + rsrcforkhdr->fh_DataOffset = SWAP32(RF_FIRST_RESOURCE); + rsrcforkhdr->fh_MapOffset = SWAP32(RF_FIRST_RESOURCE); + rsrcforkhdr->fh_MapLength = SWAP32(RF_NULL_MAP_LENGTH); + rsrcforkhdr->mh_DataOffset = SWAP32(RF_FIRST_RESOURCE); + rsrcforkhdr->mh_MapOffset = SWAP32(RF_FIRST_RESOURCE); + rsrcforkhdr->mh_MapLength = SWAP32(RF_NULL_MAP_LENGTH); + rsrcforkhdr->mh_Types = SWAP16(RF_NULL_MAP_LENGTH - 2 ); + rsrcforkhdr->mh_Names = SWAP16(RF_NULL_MAP_LENGTH); + rsrcforkhdr->typeCount = SWAP16(-1); bcopy(RF_EMPTY_TAG, rsrcforkhdr->systemData, sizeof(RF_EMPTY_TAG)); } @@ -3045,14 +3088,14 @@ write_xattrinfo(attr_info_t *ainfop) if (ainfop->attrhdr != NULL) { swap_attrhdr(ainfop->attrhdr, ainfop); } - uio_free(auio); + uio_free(auio); - return (error); + return error; } #if BYTE_ORDER == LITTLE_ENDIAN /* - * Endian swap apple double header + * Endian swap apple double header */ static void swap_adhdr(apple_double_header_t *adh) @@ -3062,19 +3105,19 @@ swap_adhdr(apple_double_header_t *adh) count = (adh->magic == ADH_MAGIC) ? adh->numEntries : SWAP16(adh->numEntries); - adh->magic = SWAP32 (adh->magic); - adh->version = SWAP32 (adh->version); - adh->numEntries = SWAP16 (adh->numEntries); + adh->magic = SWAP32(adh->magic); + adh->version = SWAP32(adh->version); + adh->numEntries = SWAP16(adh->numEntries); for (i = 0; i < count; i++) { - adh->entries[i].type = SWAP32 (adh->entries[i].type); - adh->entries[i].offset = SWAP32 (adh->entries[i].offset); - adh->entries[i].length = SWAP32 (adh->entries[i].length); + adh->entries[i].type = SWAP32(adh->entries[i].type); + adh->entries[i].offset = SWAP32(adh->entries[i].offset); + adh->entries[i].length = SWAP32(adh->entries[i].length); } } /* - * Endian swap extended attributes header + * Endian swap extended attributes header */ static void swap_attrhdr(attr_header_t *ah, attr_info_t* info) @@ -3085,19 +3128,19 @@ swap_attrhdr(attr_header_t *ah, attr_info_t* info) count = (ah->magic == ATTR_HDR_MAGIC) ? ah->num_attrs : SWAP16(ah->num_attrs); - ah->magic = SWAP32 (ah->magic); - ah->debug_tag = SWAP32 (ah->debug_tag); - ah->total_size = SWAP32 (ah->total_size); - ah->data_start = SWAP32 (ah->data_start); - ah->data_length = SWAP32 (ah->data_length); - ah->flags = SWAP16 (ah->flags); - ah->num_attrs = SWAP16 (ah->num_attrs); + ah->magic = SWAP32(ah->magic); + ah->debug_tag = SWAP32(ah->debug_tag); + ah->total_size = SWAP32(ah->total_size); + ah->data_start = SWAP32(ah->data_start); + ah->data_length = SWAP32(ah->data_length); + ah->flags = SWAP16(ah->flags); + ah->num_attrs = SWAP16(ah->num_attrs); ae = (attr_entry_t *)(&ah[1]); for (i = 0; i < count && ATTR_VALID(ae, *info); i++, ae = ATTR_NEXT(ae)) { - ae->offset = SWAP32 (ae->offset); - ae->length = SWAP32 (ae->length); - ae->flags = SWAP16 (ae->flags); + ae->offset = SWAP32(ae->offset); + ae->length = SWAP32(ae->length); + ae->flags = SWAP16(ae->flags); } } #endif @@ -3119,20 +3162,22 @@ check_and_swap_attrhdr(attr_header_t *ah, attr_info_t *ainfop) int count; int i; - if (ah == NULL) + if (ah == NULL) { return EINVAL; + } - if (SWAP32(ah->magic) != ATTR_HDR_MAGIC) + if (SWAP32(ah->magic) != ATTR_HDR_MAGIC) { return EINVAL; - + } + /* Swap the basic header fields */ - ah->magic = SWAP32(ah->magic); - ah->debug_tag = SWAP32 (ah->debug_tag); - ah->total_size = SWAP32 (ah->total_size); - ah->data_start = SWAP32 (ah->data_start); - ah->data_length = SWAP32 (ah->data_length); - ah->flags = SWAP16 (ah->flags); - ah->num_attrs = SWAP16 (ah->num_attrs); + ah->magic = SWAP32(ah->magic); + ah->debug_tag = SWAP32(ah->debug_tag); + ah->total_size = SWAP32(ah->total_size); + ah->data_start = SWAP32(ah->data_start); + ah->data_length = SWAP32(ah->data_length); + ah->flags = SWAP16(ah->flags); + ah->num_attrs = SWAP16(ah->num_attrs); /* * Make sure the total_size fits within the Finder Info area, and the @@ -3144,37 +3189,40 @@ check_and_swap_attrhdr(attr_header_t *ah, attr_info_t *ainfop) end > ah->total_size) { return EINVAL; } - + /* * Make sure each of the attr_entry_t's fits within total_size. */ buf_end = ainfop->rawdata + ah->total_size; count = ah->num_attrs; ae = (attr_entry_t *)(&ah[1]); - - for (i=0; i buf_end) + if ((u_int8_t *) &ae[1] > buf_end) { return EINVAL; - + } + /* Make sure the variable-length name fits (+1 is for NUL terminator) */ /* TODO: Make sure namelen matches strnlen(name,namelen+1)? */ - if (&ae->name[ae->namelen+1] > buf_end) + if (&ae->name[ae->namelen + 1] > buf_end) { return EINVAL; - + } + /* Swap the attribute entry fields */ - ae->offset = SWAP32(ae->offset); - ae->length = SWAP32(ae->length); - ae->flags = SWAP16(ae->flags); - + ae->offset = SWAP32(ae->offset); + ae->length = SWAP32(ae->length); + ae->flags = SWAP16(ae->flags); + /* Make sure the attribute content fits. */ end = ae->offset + ae->length; - if (end < ae->offset || end > ah->total_size) + if (end < ae->offset || end > ah->total_size) { return EINVAL; - + } + ae = ATTR_NEXT(ae); } - + /* * TODO: Make sure the contents of attributes don't overlap the header * and don't overlap each other. The hard part is that we don't know @@ -3206,11 +3254,11 @@ shift_data_down(vnode_t xvp, off_t start, size_t len, off_t delta, vfs_context_t off_t pos; kauth_cred_t ucred = vfs_context_ucred(context); proc_t p = vfs_context_proc(context); - + if (delta == 0 || len == 0) { return 0; } - + chunk = 4096; if (len < chunk) { chunk = len; @@ -3221,24 +3269,24 @@ shift_data_down(vnode_t xvp, off_t start, size_t len, off_t delta, vfs_context_t return ENOMEM; } - for(pos=start+len-chunk; pos >= start; pos-=chunk) { - ret = vn_rdwr(UIO_READ, xvp, buff, chunk, pos, UIO_SYSSPACE, IO_NODELOCKED|IO_NOAUTH, ucred, &iolen, p); + for (pos = start + len - chunk; pos >= start; pos -= chunk) { + ret = vn_rdwr(UIO_READ, xvp, buff, chunk, pos, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error reading data @ %lld (read %d of %lu) (%d)\n", - pos, ret, chunk, ret); + pos, ret, chunk, ret); break; } - - ret = vn_rdwr(UIO_WRITE, xvp, buff, chunk, pos + delta, UIO_SYSSPACE, IO_NODELOCKED|IO_NOAUTH, ucred, &iolen, p); + + ret = vn_rdwr(UIO_WRITE, xvp, buff, chunk, pos + delta, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error writing data @ %lld (wrote %d of %lu) (%d)\n", - pos+delta, ret, chunk, ret); + pos + delta, ret, chunk, ret); break; } - + if ((pos - (off_t)chunk) < start) { chunk = pos - start; - + if (chunk == 0) { // we're all done break; } @@ -3260,11 +3308,11 @@ shift_data_up(vnode_t xvp, off_t start, size_t len, off_t delta, vfs_context_t c off_t end; kauth_cred_t ucred = vfs_context_ucred(context); proc_t p = vfs_context_proc(context); - + if (delta == 0 || len == 0) { return 0; } - + chunk = 4096; if (len < chunk) { chunk = len; @@ -3276,24 +3324,24 @@ shift_data_up(vnode_t xvp, off_t start, size_t len, off_t delta, vfs_context_t c return ENOMEM; } - for(pos = start; pos < end; pos += chunk) { - ret = vn_rdwr(UIO_READ, xvp, buff, chunk, pos, UIO_SYSSPACE, IO_NODELOCKED|IO_NOAUTH, ucred, &iolen, p); + for (pos = start; pos < end; pos += chunk) { + ret = vn_rdwr(UIO_READ, xvp, buff, chunk, pos, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error reading data @ %lld (read %d of %lu) (%d)\n", - pos, ret, chunk, ret); + pos, ret, chunk, ret); break; } - - ret = vn_rdwr(UIO_WRITE, xvp, buff, chunk, pos - delta, UIO_SYSSPACE, IO_NODELOCKED|IO_NOAUTH, ucred, &iolen, p); + + ret = vn_rdwr(UIO_WRITE, xvp, buff, chunk, pos - delta, UIO_SYSSPACE, IO_NODELOCKED | IO_NOAUTH, ucred, &iolen, p); if (iolen != 0) { printf("xattr:shift_data: error writing data @ %lld (wrote %d of %lu) (%d)\n", - pos+delta, ret, chunk, ret); + pos + delta, ret, chunk, ret); break; } - + if ((pos + (off_t)chunk) > end) { chunk = end - pos; - + if (chunk == 0) { // we're all done break; } @@ -3315,11 +3363,11 @@ lock_xattrfile(vnode_t xvp, short locktype, vfs_context_t context) lf.l_len = 0; lf.l_type = locktype; /* F_WRLCK or F_RDLCK */ /* Note: id is just a kernel address that's not a proc */ - error = VNOP_ADVLOCK(xvp, (caddr_t)xvp, F_SETLK, &lf, F_FLOCK|F_WAIT, context, NULL); - return (error == ENOTSUP ? 0 : error); + error = VNOP_ADVLOCK(xvp, (caddr_t)xvp, F_SETLK, &lf, F_FLOCK | F_WAIT, context, NULL); + return error == ENOTSUP ? 0 : error; } - int +int unlock_xattrfile(vnode_t xvp, vfs_context_t context) { struct flock lf; @@ -3331,7 +3379,7 @@ unlock_xattrfile(vnode_t xvp, vfs_context_t context) lf.l_type = F_UNLCK; /* Note: id is just a kernel address that's not a proc */ error = VNOP_ADVLOCK(xvp, (caddr_t)xvp, F_UNLCK, &lf, F_FLOCK, context, NULL); - return (error == ENOTSUP ? 0 : error); + return error == ENOTSUP ? 0 : error; } #else /* CONFIG_APPLEDOUBLE */ @@ -3342,14 +3390,14 @@ default_getxattr(__unused vnode_t vp, __unused const char *name, __unused uio_t uio, __unused size_t *size, __unused int options, __unused vfs_context_t context) { - return (ENOTSUP); + return ENOTSUP; } static int default_setxattr(__unused vnode_t vp, __unused const char *name, __unused uio_t uio, __unused int options, __unused vfs_context_t context) { - return (ENOTSUP); + return ENOTSUP; } static int @@ -3357,14 +3405,14 @@ default_listxattr(__unused vnode_t vp, __unused uio_t uio, __unused size_t *size, __unused int options, __unused vfs_context_t context) { - return (ENOTSUP); + return ENOTSUP; } static int default_removexattr(__unused vnode_t vp, __unused const char *name, - __unused int options, __unused vfs_context_t context) + __unused int options, __unused vfs_context_t context) { - return (ENOTSUP); + return ENOTSUP; } #endif /* CONFIG_APPLEDOUBLE */ diff --git a/bsd/vfs/vnode_if.c b/bsd/vfs/vnode_if.c index f0c1fa80d..ffd01323c 100644 --- a/bsd/vfs/vnode_if.c +++ b/bsd/vfs/vnode_if.c @@ -1,9 +1,8 @@ - /* * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -90,7 +89,7 @@ struct vnodeop_desc vnop_default_desc = { int vnop_lookup_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_lookup_args,a_dvp), + VOPARG_OFFSETOF(struct vnop_lookup_args, a_dvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_lookup_desc = { @@ -115,7 +114,7 @@ struct vnodeop_desc vnop_compound_open_desc = { 0, "vnop_compound_open", 0 | VDESC_VP0_WILLRELE, - vnop_compound_open_vp_offsets, + vnop_compound_open_vp_offsets, VOPARG_OFFSETOF(struct vnop_compound_open_args, a_vpp), VDESC_NO_OFFSET, VDESC_NO_OFFSET, @@ -125,7 +124,7 @@ struct vnodeop_desc vnop_compound_open_desc = { }; int vnop_create_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_create_args,a_dvp), + VOPARG_OFFSETOF(struct vnop_create_args, a_dvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_create_desc = { @@ -142,7 +141,7 @@ struct vnodeop_desc vnop_create_desc = { }; int vnop_whiteout_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_whiteout_args,a_dvp), + VOPARG_OFFSETOF(struct vnop_whiteout_args, a_dvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_whiteout_desc = { @@ -159,24 +158,24 @@ struct vnodeop_desc vnop_whiteout_desc = { }; int vnop_mknod_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_mknod_args,a_dvp), - VDESC_NO_OFFSET + VOPARG_OFFSETOF(struct vnop_mknod_args, a_dvp), + VDESC_NO_OFFSET }; struct vnodeop_desc vnop_mknod_desc = { - 0, - "vnop_mknod", - 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, - vnop_mknod_vp_offsets, - VOPARG_OFFSETOF(struct vnop_mknod_args, a_vpp), - VDESC_NO_OFFSET, - VDESC_NO_OFFSET, - VOPARG_OFFSETOF(struct vnop_mknod_args, a_cnp), - VOPARG_OFFSETOF(struct vnop_mknod_args, a_context), - NULL + 0, + "vnop_mknod", + 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, + vnop_mknod_vp_offsets, + VOPARG_OFFSETOF(struct vnop_mknod_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vnop_mknod_args, a_cnp), + VOPARG_OFFSETOF(struct vnop_mknod_args, a_context), + NULL }; int vnop_open_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_open_args,a_vp), + VOPARG_OFFSETOF(struct vnop_open_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_open_desc = { @@ -193,7 +192,7 @@ struct vnodeop_desc vnop_open_desc = { }; int vnop_close_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_close_args,a_vp), + VOPARG_OFFSETOF(struct vnop_close_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_close_desc = { @@ -210,7 +209,7 @@ struct vnodeop_desc vnop_close_desc = { }; int vnop_access_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_access_args,a_vp), + VOPARG_OFFSETOF(struct vnop_access_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_access_desc = { @@ -227,7 +226,7 @@ struct vnodeop_desc vnop_access_desc = { }; int vnop_getattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_getattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_getattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_getattr_desc = { @@ -244,7 +243,7 @@ struct vnodeop_desc vnop_getattr_desc = { }; int vnop_setattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_setattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_setattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_setattr_desc = { @@ -261,7 +260,7 @@ struct vnodeop_desc vnop_setattr_desc = { }; int vnop_read_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_read_args,a_vp), + VOPARG_OFFSETOF(struct vnop_read_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_read_desc = { @@ -278,7 +277,7 @@ struct vnodeop_desc vnop_read_desc = { }; int vnop_write_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_write_args,a_vp), + VOPARG_OFFSETOF(struct vnop_write_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_write_desc = { @@ -295,7 +294,7 @@ struct vnodeop_desc vnop_write_desc = { }; int vnop_ioctl_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_ioctl_args,a_vp), + VOPARG_OFFSETOF(struct vnop_ioctl_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_ioctl_desc = { @@ -312,7 +311,7 @@ struct vnodeop_desc vnop_ioctl_desc = { }; int vnop_select_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_select_args,a_vp), + VOPARG_OFFSETOF(struct vnop_select_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_select_desc = { @@ -329,8 +328,8 @@ struct vnodeop_desc vnop_select_desc = { }; int vnop_exchange_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_exchange_args,a_fvp), - VOPARG_OFFSETOF(struct vnop_exchange_args,a_tvp), + VOPARG_OFFSETOF(struct vnop_exchange_args, a_fvp), + VOPARG_OFFSETOF(struct vnop_exchange_args, a_tvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_exchange_desc = { @@ -347,7 +346,7 @@ struct vnodeop_desc vnop_exchange_desc = { }; int vnop_kqfilt_add_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_kqfilt_add_args,a_vp), + VOPARG_OFFSETOF(struct vnop_kqfilt_add_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_kqfilt_add_desc = { @@ -364,7 +363,7 @@ struct vnodeop_desc vnop_kqfilt_add_desc = { }; int vnop_kqfilt_remove_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_kqfilt_remove_args,a_vp), + VOPARG_OFFSETOF(struct vnop_kqfilt_remove_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_kqfilt_remove_desc = { @@ -381,7 +380,7 @@ struct vnodeop_desc vnop_kqfilt_remove_desc = { }; int vnop_monitor_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_monitor_args,a_vp), + VOPARG_OFFSETOF(struct vnop_monitor_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_monitor_desc = { @@ -398,7 +397,7 @@ struct vnodeop_desc vnop_monitor_desc = { }; int vnop_setlabel_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_setlabel_args,a_vp), + VOPARG_OFFSETOF(struct vnop_setlabel_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_setlabel_desc = { @@ -415,7 +414,7 @@ struct vnodeop_desc vnop_setlabel_desc = { }; int vnop_revoke_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_revoke_args,a_vp), + VOPARG_OFFSETOF(struct vnop_revoke_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_revoke_desc = { @@ -433,7 +432,7 @@ struct vnodeop_desc vnop_revoke_desc = { int vnop_mmap_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_mmap_args,a_vp), + VOPARG_OFFSETOF(struct vnop_mmap_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_mmap_desc = { @@ -451,7 +450,7 @@ struct vnodeop_desc vnop_mmap_desc = { int vnop_mnomap_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_mnomap_args,a_vp), + VOPARG_OFFSETOF(struct vnop_mnomap_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_mnomap_desc = { @@ -469,7 +468,7 @@ struct vnodeop_desc vnop_mnomap_desc = { int vnop_fsync_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_fsync_args,a_vp), + VOPARG_OFFSETOF(struct vnop_fsync_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_fsync_desc = { @@ -486,8 +485,8 @@ struct vnodeop_desc vnop_fsync_desc = { }; int vnop_remove_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_remove_args,a_dvp), - VOPARG_OFFSETOF(struct vnop_remove_args,a_vp), + VOPARG_OFFSETOF(struct vnop_remove_args, a_dvp), + VOPARG_OFFSETOF(struct vnop_remove_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_remove_desc = { @@ -504,7 +503,7 @@ struct vnodeop_desc vnop_remove_desc = { }; int vnop_remove_extended_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_remove_args,a_dvp), + VOPARG_OFFSETOF(struct vnop_remove_args, a_dvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_compound_remove_desc = { @@ -521,8 +520,8 @@ struct vnodeop_desc vnop_compound_remove_desc = { }; int vnop_link_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_link_args,a_vp), - VOPARG_OFFSETOF(struct vnop_link_args,a_tdvp), + VOPARG_OFFSETOF(struct vnop_link_args, a_vp), + VOPARG_OFFSETOF(struct vnop_link_args, a_tdvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_link_desc = { @@ -539,10 +538,10 @@ struct vnodeop_desc vnop_link_desc = { }; int vnop_rename_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_rename_args,a_fdvp), - VOPARG_OFFSETOF(struct vnop_rename_args,a_fvp), - VOPARG_OFFSETOF(struct vnop_rename_args,a_tdvp), - VOPARG_OFFSETOF(struct vnop_rename_args,a_tvp), + VOPARG_OFFSETOF(struct vnop_rename_args, a_fdvp), + VOPARG_OFFSETOF(struct vnop_rename_args, a_fvp), + VOPARG_OFFSETOF(struct vnop_rename_args, a_tdvp), + VOPARG_OFFSETOF(struct vnop_rename_args, a_tvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_rename_desc = { @@ -559,10 +558,10 @@ struct vnodeop_desc vnop_rename_desc = { }; int vnop_renamex_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_renamex_args,a_fdvp), - VOPARG_OFFSETOF(struct vnop_renamex_args,a_fvp), - VOPARG_OFFSETOF(struct vnop_renamex_args,a_tdvp), - VOPARG_OFFSETOF(struct vnop_renamex_args,a_tvp), + VOPARG_OFFSETOF(struct vnop_renamex_args, a_fdvp), + VOPARG_OFFSETOF(struct vnop_renamex_args, a_fvp), + VOPARG_OFFSETOF(struct vnop_renamex_args, a_tdvp), + VOPARG_OFFSETOF(struct vnop_renamex_args, a_tvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_renamex_desc = { @@ -579,10 +578,10 @@ struct vnodeop_desc vnop_renamex_desc = { }; int vnop_compound_rename_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_compound_rename_args,a_fdvp), - VOPARG_OFFSETOF(struct vnop_compound_rename_args,a_fvpp), - VOPARG_OFFSETOF(struct vnop_compound_rename_args,a_tdvp), - VOPARG_OFFSETOF(struct vnop_compound_rename_args,a_tvpp), + VOPARG_OFFSETOF(struct vnop_compound_rename_args, a_fdvp), + VOPARG_OFFSETOF(struct vnop_compound_rename_args, a_fvpp), + VOPARG_OFFSETOF(struct vnop_compound_rename_args, a_tdvp), + VOPARG_OFFSETOF(struct vnop_compound_rename_args, a_tvpp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_compound_rename_desc = { @@ -599,43 +598,43 @@ struct vnodeop_desc vnop_compound_rename_desc = { }; int vnop_mkdir_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_mkdir_args,a_dvp), - VDESC_NO_OFFSET + VOPARG_OFFSETOF(struct vnop_mkdir_args, a_dvp), + VDESC_NO_OFFSET }; struct vnodeop_desc vnop_mkdir_desc = { - 0, - "vnop_mkdir", - 0 | VDESC_VP0_WILLRELE, - vnop_mkdir_vp_offsets, - VOPARG_OFFSETOF(struct vnop_mkdir_args, a_vpp), - VDESC_NO_OFFSET, - VDESC_NO_OFFSET, - VOPARG_OFFSETOF(struct vnop_mkdir_args, a_cnp), - VOPARG_OFFSETOF(struct vnop_mkdir_args, a_context), - NULL + 0, + "vnop_mkdir", + 0 | VDESC_VP0_WILLRELE, + vnop_mkdir_vp_offsets, + VOPARG_OFFSETOF(struct vnop_mkdir_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vnop_mkdir_args, a_cnp), + VOPARG_OFFSETOF(struct vnop_mkdir_args, a_context), + NULL }; int vnop_compound_mkdir_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_compound_mkdir_args,a_dvp), - VDESC_NO_OFFSET + VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_dvp), + VDESC_NO_OFFSET }; struct vnodeop_desc vnop_compound_mkdir_desc = { - 0, - "vnop_compound_mkdir", - 0 | VDESC_VP0_WILLRELE, - vnop_compound_mkdir_vp_offsets, - VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_vpp), - VDESC_NO_OFFSET, - VDESC_NO_OFFSET, - VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_cnp), - VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_context), - NULL + 0, + "vnop_compound_mkdir", + 0 | VDESC_VP0_WILLRELE, + vnop_compound_mkdir_vp_offsets, + VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_cnp), + VOPARG_OFFSETOF(struct vnop_compound_mkdir_args, a_context), + NULL }; int vnop_rmdir_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_rmdir_args,a_dvp), - VOPARG_OFFSETOF(struct vnop_rmdir_args,a_vp), + VOPARG_OFFSETOF(struct vnop_rmdir_args, a_dvp), + VOPARG_OFFSETOF(struct vnop_rmdir_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_rmdir_desc = { @@ -652,7 +651,7 @@ struct vnodeop_desc vnop_rmdir_desc = { }; int vnop_compound_rmdir_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_compound_rmdir_args,a_dvp), + VOPARG_OFFSETOF(struct vnop_compound_rmdir_args, a_dvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_compound_rmdir_desc = { @@ -669,24 +668,24 @@ struct vnodeop_desc vnop_compound_rmdir_desc = { }; int vnop_symlink_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_symlink_args,a_dvp), - VDESC_NO_OFFSET + VOPARG_OFFSETOF(struct vnop_symlink_args, a_dvp), + VDESC_NO_OFFSET }; struct vnodeop_desc vnop_symlink_desc = { - 0, - "vnop_symlink", - 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, - vnop_symlink_vp_offsets, - VOPARG_OFFSETOF(struct vnop_symlink_args, a_vpp), - VDESC_NO_OFFSET, - VDESC_NO_OFFSET, - VOPARG_OFFSETOF(struct vnop_symlink_args, a_cnp), - VOPARG_OFFSETOF(struct vnop_symlink_args, a_context), - NULL + 0, + "vnop_symlink", + 0 | VDESC_VP0_WILLRELE | VDESC_VPP_WILLRELE, + vnop_symlink_vp_offsets, + VOPARG_OFFSETOF(struct vnop_symlink_args, a_vpp), + VDESC_NO_OFFSET, + VDESC_NO_OFFSET, + VOPARG_OFFSETOF(struct vnop_symlink_args, a_cnp), + VOPARG_OFFSETOF(struct vnop_symlink_args, a_context), + NULL }; int vnop_readdir_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_readdir_args,a_vp), + VOPARG_OFFSETOF(struct vnop_readdir_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_readdir_desc = { @@ -703,7 +702,7 @@ struct vnodeop_desc vnop_readdir_desc = { }; int vnop_readdirattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_readdirattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_readdirattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_readdirattr_desc = { @@ -720,7 +719,7 @@ struct vnodeop_desc vnop_readdirattr_desc = { }; int vnop_getattrlistbulk_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_getattrlistbulk_args,a_vp), + VOPARG_OFFSETOF(struct vnop_getattrlistbulk_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_getattrlistbulk_desc = { @@ -737,7 +736,7 @@ struct vnodeop_desc vnop_getattrlistbulk_desc = { }; int vnop_readlink_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_readlink_args,a_vp), + VOPARG_OFFSETOF(struct vnop_readlink_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_readlink_desc = { @@ -754,7 +753,7 @@ struct vnodeop_desc vnop_readlink_desc = { }; int vnop_inactive_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_inactive_args,a_vp), + VOPARG_OFFSETOF(struct vnop_inactive_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_inactive_desc = { @@ -771,7 +770,7 @@ struct vnodeop_desc vnop_inactive_desc = { }; int vnop_reclaim_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_reclaim_args,a_vp), + VOPARG_OFFSETOF(struct vnop_reclaim_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_reclaim_desc = { @@ -788,7 +787,7 @@ struct vnodeop_desc vnop_reclaim_desc = { }; int vnop_pathconf_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_pathconf_args,a_vp), + VOPARG_OFFSETOF(struct vnop_pathconf_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_pathconf_desc = { @@ -805,7 +804,7 @@ struct vnodeop_desc vnop_pathconf_desc = { }; int vnop_advlock_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_advlock_args,a_vp), + VOPARG_OFFSETOF(struct vnop_advlock_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_advlock_desc = { @@ -822,7 +821,7 @@ struct vnodeop_desc vnop_advlock_desc = { }; int vnop_allocate_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_allocate_args,a_vp), + VOPARG_OFFSETOF(struct vnop_allocate_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_allocate_desc = { @@ -839,7 +838,7 @@ struct vnodeop_desc vnop_allocate_desc = { }; int vnop_pagein_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_pagein_args,a_vp), + VOPARG_OFFSETOF(struct vnop_pagein_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_pagein_desc = { @@ -856,7 +855,7 @@ struct vnodeop_desc vnop_pagein_desc = { }; int vnop_pageout_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_pageout_args,a_vp), + VOPARG_OFFSETOF(struct vnop_pageout_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_pageout_desc = { @@ -873,7 +872,7 @@ struct vnodeop_desc vnop_pageout_desc = { }; int vnop_searchfs_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_searchfs_args,a_vp), + VOPARG_OFFSETOF(struct vnop_searchfs_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_searchfs_desc = { @@ -890,9 +889,9 @@ struct vnodeop_desc vnop_searchfs_desc = { }; int vnop_copyfile_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_copyfile_args,a_fvp), - VOPARG_OFFSETOF(struct vnop_copyfile_args,a_tdvp), - VOPARG_OFFSETOF(struct vnop_copyfile_args,a_tvp), + VOPARG_OFFSETOF(struct vnop_copyfile_args, a_fvp), + VOPARG_OFFSETOF(struct vnop_copyfile_args, a_tdvp), + VOPARG_OFFSETOF(struct vnop_copyfile_args, a_tvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_copyfile_desc = { @@ -909,8 +908,8 @@ struct vnodeop_desc vnop_copyfile_desc = { }; int vnop_clonefile_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_clonefile_args,a_fvp), - VOPARG_OFFSETOF(struct vnop_clonefile_args,a_dvp), + VOPARG_OFFSETOF(struct vnop_clonefile_args, a_fvp), + VOPARG_OFFSETOF(struct vnop_clonefile_args, a_dvp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_clonefile_desc = { @@ -927,7 +926,7 @@ struct vnodeop_desc vnop_clonefile_desc = { }; int vop_getxattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_getxattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_getxattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_getxattr_desc = { @@ -944,7 +943,7 @@ struct vnodeop_desc vnop_getxattr_desc = { }; int vop_setxattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_setxattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_setxattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_setxattr_desc = { @@ -961,7 +960,7 @@ struct vnodeop_desc vnop_setxattr_desc = { }; int vop_removexattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_removexattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_removexattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_removexattr_desc = { @@ -978,7 +977,7 @@ struct vnodeop_desc vnop_removexattr_desc = { }; int vop_listxattr_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_listxattr_args,a_vp), + VOPARG_OFFSETOF(struct vnop_listxattr_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_listxattr_desc = { @@ -995,7 +994,7 @@ struct vnodeop_desc vnop_listxattr_desc = { }; int vnop_blktooff_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_blktooff_args,a_vp), + VOPARG_OFFSETOF(struct vnop_blktooff_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_blktooff_desc = { @@ -1012,7 +1011,7 @@ struct vnodeop_desc vnop_blktooff_desc = { }; int vnop_offtoblk_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_offtoblk_args,a_vp), + VOPARG_OFFSETOF(struct vnop_offtoblk_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_offtoblk_desc = { @@ -1029,7 +1028,7 @@ struct vnodeop_desc vnop_offtoblk_desc = { }; int vnop_blockmap_vp_offsets[] = { - VOPARG_OFFSETOF(struct vnop_blockmap_args,a_vp), + VOPARG_OFFSETOF(struct vnop_blockmap_args, a_vp), VDESC_NO_OFFSET }; struct vnodeop_desc vnop_blockmap_desc = { @@ -1183,9 +1182,9 @@ struct vnodeop_desc vnop_bwrite_desc = { /* End of special cases. */ struct vnodeop_desc *vfs_op_descs[] = { - &vnop_default_desc, /* MUST BE FIRST */ - &vnop_strategy_desc, /* XXX: SPECIAL CASE */ - &vnop_bwrite_desc, /* XXX: SPECIAL CASE */ + &vnop_default_desc, /* MUST BE FIRST */ + &vnop_strategy_desc, /* XXX: SPECIAL CASE */ + &vnop_bwrite_desc, /* XXX: SPECIAL CASE */ &vnop_lookup_desc, &vnop_create_desc, @@ -1256,4 +1255,3 @@ struct vnodeop_desc *vfs_op_descs[] = { &vnop_removenamedstream_desc, NULL }; - diff --git a/bsd/vm/dp_backing_file.c b/bsd/vm/dp_backing_file.c index 5548f11fd..d932aa153 100644 --- a/bsd/vm/dp_backing_file.c +++ b/bsd/vm/dp_backing_file.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -81,7 +81,7 @@ * Routine: macx_backing_store_recovery * Function: * Syscall interface to set a tasks privilege - * level so that it is not subject to + * level so that it is not subject to * macx_backing_store_suspend */ int @@ -94,7 +94,7 @@ macx_backing_store_recovery( /* * Routine: macx_backing_store_suspend * Function: - * Syscall interface to stop new demand for + * Syscall interface to stop new demand for * backing store when backing store is low */ @@ -112,11 +112,11 @@ extern boolean_t compressor_store_stop_compaction; * Routine: macx_backing_store_compaction * Function: * Turn compaction of swap space on or off. This is - * used during shutdown/restart so that the kernel - * doesn't waste time compacting swap files that are - * about to be deleted anyway. Compaction is always - * on by default when the system comes up and is turned - * off when a shutdown/restart is requested. It is + * used during shutdown/restart so that the kernel + * doesn't waste time compacting swap files that are + * about to be deleted anyway. Compaction is always + * on by default when the system comes up and is turned + * off when a shutdown/restart is requested. It is * re-enabled if the shutdown/restart is aborted for any reason. * * This routine assumes macx_lock has been locked by macx_triggers -> @@ -128,14 +128,14 @@ macx_backing_store_compaction(int flags) { int error; - if ((error = suser(kauth_cred_get(), 0))) + if ((error = suser(kauth_cred_get(), 0))) { return error; + } if (flags & SWAP_COMPACT_DISABLE) { compressor_store_stop_compaction = TRUE; kprintf("compressor_store_stop_compaction = TRUE\n"); - } else if (flags & SWAP_COMPACT_ENABLE) { compressor_store_stop_compaction = FALSE; @@ -155,10 +155,11 @@ int macx_triggers( struct macx_triggers_args *args) { - int flags = args->flags; + int flags = args->flags; - if (flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE)) - return (macx_backing_store_compaction(flags)); + if (flags & (SWAP_COMPACT_DISABLE | SWAP_COMPACT_ENABLE)) { + return macx_backing_store_compaction(flags); + } return ENOTSUP; } @@ -195,20 +196,17 @@ extern boolean_t vm_swap_up; int macx_swapinfo( - memory_object_size_t *total_p, - memory_object_size_t *avail_p, - vm_size_t *pagesize_p, - boolean_t *encrypted_p) + memory_object_size_t *total_p, + memory_object_size_t *avail_p, + vm_size_t *pagesize_p, + boolean_t *encrypted_p) { if (VM_CONFIG_SWAP_IS_PRESENT) { - *total_p = vm_swap_get_total_space(); *avail_p = vm_swap_get_free_space(); *pagesize_p = (vm_size_t)PAGE_SIZE_64; *encrypted_p = TRUE; - } else { - *total_p = 0; *avail_p = 0; *pagesize_p = 0; diff --git a/bsd/vm/vm_compressor_backing_file.c b/bsd/vm/vm_compressor_backing_file.c index e54b68356..b908529dc 100644 --- a/bsd/vm/vm_compressor_backing_file.c +++ b/bsd/vm/vm_compressor_backing_file.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -58,13 +58,13 @@ void vm_swapfile_open(const char *path, vnode_t *vp) { int error = 0; - vfs_context_t ctx = vfs_context_kernel(); + vfs_context_t ctx = vfs_context_kernel(); if ((error = vnode_open(path, (O_CREAT | O_TRUNC | FREAD | FWRITE), S_IRUSR | S_IWUSR, 0, vp, ctx))) { printf("Failed to open swap file %d\n", error); *vp = NULL; return; - } + } /* * If MNT_IOFLAGS_NOSWAP is set, opening the swap file should fail. @@ -84,13 +84,13 @@ vm_swapfile_open(const char *path, vnode_t *vp) uint64_t vm_swapfile_get_blksize(vnode_t vp) { - return ((uint64_t)vfs_devblocksize(vnode_mount(vp))); + return (uint64_t)vfs_devblocksize(vnode_mount(vp)); } uint64_t vm_swapfile_get_transfer_size(vnode_t vp) { - return((uint64_t)vp->v_mount->mnt_vfsstat.f_iosize); + return (uint64_t)vp->v_mount->mnt_vfsstat.f_iosize; } int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int); @@ -103,23 +103,24 @@ vm_swapfile_close(uint64_t path_addr, vnode_t vp) vnode_getwithref(vp); vnode_close(vp, 0, context); - + error = unlink1(context, NULLVP, CAST_USER_ADDR_T(path_addr), UIO_SYSSPACE, 0); #if DEVELOPMENT || DEBUG - if (error) + if (error) { printf("%s : unlink of %s failed with error %d", __FUNCTION__, (char *)path_addr, error); + } #endif } int vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin) { - int error = 0; - uint64_t file_size = 0; - vfs_context_t ctx = NULL; + int error = 0; + uint64_t file_size = 0; + vfs_context_t ctx = NULL; #if CONFIG_FREEZE struct vnode_attr va; #endif /* CONFIG_FREEZE */ @@ -138,9 +139,9 @@ vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin) if (error) { printf("vnode_size (new file) for swap file failed: %d\n", error); goto done; - } + } assert(file_size == *size); - + if (pin != NULL && *pin != FALSE) { error = VNOP_IOCTL(vp, FIOPINSWAP, NULL, 0, ctx); @@ -179,11 +180,11 @@ vm_record_file_write(vnode_t vp, uint64_t offset, char *buf, int size) vfs_context_t ctx; ctx = vfs_context_kernel(); - + error = vn_rdwr(UIO_WRITE, vp, (caddr_t)buf, size, offset, - UIO_SYSSPACE, IO_NODELOCKED, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_NODELOCKED, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); - return (error); + return error; } @@ -194,17 +195,18 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag int error = 0; uint64_t io_size = npages * PAGE_SIZE_64; #if 1 - kern_return_t kr = KERN_SUCCESS; - upl_t upl = NULL; - unsigned int count = 0; + kern_return_t kr = KERN_SUCCESS; + upl_t upl = NULL; + unsigned int count = 0; upl_control_flags_t upl_create_flags = 0; - int upl_control_flags = 0; - upl_size_t upl_size = 0; + int upl_control_flags = 0; + upl_size_t upl_size = 0; upl_create_flags = UPL_SET_INTERNAL | UPL_SET_LITE; - if (upl_iodone == NULL) - upl_control_flags = UPL_IOSYNC; + if (upl_iodone == NULL) { + upl_control_flags = UPL_IOSYNC; + } #if ENCRYPTED_SWAP upl_control_flags |= UPL_PAGING_ENCRYPTED; @@ -213,16 +215,16 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag if ((flags & SWAP_READ) == FALSE) { upl_create_flags |= UPL_COPYOUT_FROM; } - + upl_size = io_size; kr = vm_map_create_upl( kernel_map, - start, - &upl_size, - &upl, - NULL, - &count, - &upl_create_flags, - VM_KERN_MEMORY_OSFMK); + start, + &upl_size, + &upl, + NULL, + &count, + &upl_create_flags, + VM_KERN_MEMORY_OSFMK); if (kr != KERN_SUCCESS || (upl_size != io_size)) { panic("vm_map_create_upl failed with %d\n", kr); @@ -230,12 +232,12 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag if (flags & SWAP_READ) { vnode_pagein(vp, - upl, - 0, - offset, - io_size, - upl_control_flags | UPL_IGNORE_VALID_PAGE_CHECK, - &error); + upl, + 0, + offset, + io_size, + upl_control_flags | UPL_IGNORE_VALID_PAGE_CHECK, + &error); if (error) { #if DEBUG printf("vm_swapfile_io: vnode_pagein failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error, vp, offset, io_size); @@ -243,17 +245,16 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag printf("vm_swapfile_io: vnode_pagein failed with %d.\n", error); #endif /* DEBUG */ } - } else { - upl_set_iodone(upl, upl_iodone); + upl_set_iodone(upl, upl_iodone); vnode_pageout(vp, - upl, - 0, - offset, - io_size, - upl_control_flags, - &error); + upl, + 0, + offset, + io_size, + upl_control_flags, + &error); if (error) { #if DEBUG printf("vm_swapfile_io: vnode_pageout failed with %d (vp: %p, offset: 0x%llx, size:%llu)\n", error, vp, offset, io_size); @@ -267,9 +268,9 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag #else /* 1 */ vfs_context_t ctx; ctx = vfs_context_kernel(); - + error = vn_rdwr((flags & SWAP_READ) ? UIO_READ : UIO_WRITE, vp, (caddr_t)start, io_size, offset, - UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED | IO_UNIT | IO_NOCACHE | IO_SWAP_DISPATCH, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); + UIO_SYSSPACE, IO_SYNC | IO_NODELOCKED | IO_UNIT | IO_NOCACHE | IO_SWAP_DISPATCH, vfs_context_ucred(ctx), (int *) 0, vfs_context_proc(ctx)); if (error) { printf("vn_rdwr: Swap I/O failed with %d\n", error); @@ -279,28 +280,31 @@ vm_swapfile_io(vnode_t vp, uint64_t offset, uint64_t start, int npages, int flag } -#define MAX_BATCH_TO_TRIM 256 +#define MAX_BATCH_TO_TRIM 256 -#define ROUTE_ONLY 0x10 /* if corestorage is present, tell it to just pass */ +#define ROUTE_ONLY 0x10 /* if corestorage is present, tell it to just pass */ /* the DKIOUNMAP command through w/o acting on it */ /* this is used by the compressed swap system to reclaim empty space */ -u_int32_t vnode_trim_list (vnode_t vp, struct trim_list *tl, boolean_t route_only) +u_int32_t +vnode_trim_list(vnode_t vp, struct trim_list *tl, boolean_t route_only) { - int error = 0; - int trim_index = 0; - u_int32_t blocksize = 0; - struct vnode *devvp; - dk_extent_t *extents; - dk_unmap_t unmap; - _dk_cs_unmap_t cs_unmap; - - if ( !(vp->v_mount->mnt_ioflags & MNT_IOFLAGS_UNMAP_SUPPORTED)) - return (ENOTSUP); + int error = 0; + int trim_index = 0; + u_int32_t blocksize = 0; + struct vnode *devvp; + dk_extent_t *extents; + dk_unmap_t unmap; + _dk_cs_unmap_t cs_unmap; + + if (!(vp->v_mount->mnt_ioflags & MNT_IOFLAGS_UNMAP_SUPPORTED)) { + return ENOTSUP; + } - if (tl == NULL) - return (0); + if (tl == NULL) { + return 0; + } /* * Get the underlying device vnode and physical block size @@ -311,28 +315,29 @@ u_int32_t vnode_trim_list (vnode_t vp, struct trim_list *tl, boolean_t route_onl extents = kalloc(sizeof(dk_extent_t) * MAX_BATCH_TO_TRIM); if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_CSUNMAP_SUPPORTED) { - memset (&cs_unmap, 0, sizeof(_dk_cs_unmap_t)); + memset(&cs_unmap, 0, sizeof(_dk_cs_unmap_t)); cs_unmap.extents = extents; - if (route_only == TRUE) + if (route_only == TRUE) { cs_unmap.options = ROUTE_ONLY; + } } else { - memset (&unmap, 0, sizeof(dk_unmap_t)); + memset(&unmap, 0, sizeof(dk_unmap_t)); unmap.extents = extents; } while (tl) { - daddr64_t io_blockno; /* Block number corresponding to the start of the extent */ - size_t io_bytecount; /* Number of bytes in current extent for the specified range */ - size_t trimmed; - size_t remaining_length; - off_t current_offset; + daddr64_t io_blockno; /* Block number corresponding to the start of the extent */ + size_t io_bytecount; /* Number of bytes in current extent for the specified range */ + size_t trimmed; + size_t remaining_length; + off_t current_offset; current_offset = tl->tl_offset; remaining_length = tl->tl_length; trimmed = 0; - - /* + + /* * We may not get the entire range from tl_offset -> tl_offset+tl_length in a single * extent from the blockmap call. Keep looping/going until we are sure we've hit * the whole range or if we encounter an error. @@ -340,24 +345,23 @@ u_int32_t vnode_trim_list (vnode_t vp, struct trim_list *tl, boolean_t route_onl while (trimmed < tl->tl_length) { /* * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the - * specified offset. It returns blocks in contiguous chunks, so if the logical range is + * specified offset. It returns blocks in contiguous chunks, so if the logical range is * broken into multiple extents, it must be called multiple times, increasing the offset * in each call to ensure that the entire range is covered. */ - error = VNOP_BLOCKMAP (vp, current_offset, remaining_length, - &io_blockno, &io_bytecount, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL); + error = VNOP_BLOCKMAP(vp, current_offset, remaining_length, + &io_blockno, &io_bytecount, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL); if (error) { goto trim_exit; } if (io_blockno != -1) { - extents[trim_index].offset = (uint64_t) io_blockno * (u_int64_t) blocksize; + extents[trim_index].offset = (uint64_t) io_blockno * (u_int64_t) blocksize; extents[trim_index].length = io_bytecount; trim_index++; } if (trim_index == MAX_BATCH_TO_TRIM) { - if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_CSUNMAP_SUPPORTED) { cs_unmap.extentsCount = trim_index; error = VNOP_IOCTL(devvp, _DKIOCCSUNMAP, (caddr_t)&cs_unmap, 0, vfs_context_kernel()); @@ -395,9 +399,9 @@ trim_exit: int vm_swap_vol_get_budget(vnode_t vp, uint64_t *freeze_daily_budget) { - vnode_t devvp = NULL; - vfs_context_t ctx = vfs_context_kernel(); - errno_t err = 0; + vnode_t devvp = NULL; + vfs_context_t ctx = vfs_context_kernel(); + errno_t err = 0; devvp = vp->v_mount->mnt_devvp; diff --git a/bsd/vm/vm_pager.h b/bsd/vm/vm_pager.h index 1e189858b..7d18009df 100644 --- a/bsd/vm/vm_pager.h +++ b/bsd/vm/vm_pager.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -41,31 +41,31 @@ * Pager routine interface definition */ -#ifndef _VM_PAGER_ -#define _VM_PAGER_ +#ifndef _VM_PAGER_ +#define _VM_PAGER_ #include -struct pager_struct { - boolean_t is_device; +struct pager_struct { + boolean_t is_device; }; -typedef struct pager_struct *vm_pager_t; -#define vm_pager_null ((vm_pager_t) 0) +typedef struct pager_struct *vm_pager_t; +#define vm_pager_null ((vm_pager_t) 0) -#define PAGER_SUCCESS 0 /* page read or written */ -#define PAGER_ABSENT 1 /* pager does not have page */ -#define PAGER_ERROR 2 /* pager unable to read or write page */ +#define PAGER_SUCCESS 0 /* page read or written */ +#define PAGER_ABSENT 1 /* pager does not have page */ +#define PAGER_ERROR 2 /* pager unable to read or write page */ #if 0 -#ifdef KERNEL -typedef int pager_return_t; +#ifdef KERNEL +typedef int pager_return_t; -extern vm_pager_t vm_pager_allocate(void); -extern void vm_pager_deallocate(void); -extern pager_return_t vm_pager_get(void); -extern pager_return_t vm_pager_put(void); -extern boolean_t vm_pager_has_page(void); -#endif /* KERNEL */ +extern vm_pager_t vm_pager_allocate(void); +extern void vm_pager_deallocate(void); +extern pager_return_t vm_pager_get(void); +extern pager_return_t vm_pager_put(void); +extern boolean_t vm_pager_has_page(void); +#endif /* KERNEL */ #endif -#endif /* _VM_PAGER_ */ +#endif /* _VM_PAGER_ */ diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index d3109c564..596835593 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -124,25 +124,27 @@ static int sysctl_kmem_alloc_contig SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2) - vm_offset_t kaddr; - kern_return_t kr; - int error = 0; - int size = 0; + vm_offset_t kaddr; + kern_return_t kr; + int error = 0; + int size = 0; error = sysctl_handle_int(oidp, &size, 0, req); - if (error || !req->newptr) - return (error); + if (error || !req->newptr) { + return error; + } kr = kmem_alloc_contig(kernel_map, &kaddr, (vm_size_t)size, 0, 0, 0, 0, VM_KERN_MEMORY_IOKIT); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { kmem_free(kernel_map, kaddr, size); + } return error; } -SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, - 0, 0, &sysctl_kmem_alloc_contig, "I", ""); +SYSCTL_PROC(_vm, OID_AUTO, kmem_alloc_contig, CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, + 0, 0, &sysctl_kmem_alloc_contig, "I", ""); extern int vm_region_footprint; SYSCTL_INT(_vm, OID_AUTO, region_footprint, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, &vm_region_footprint, 0, ""); @@ -150,11 +152,11 @@ static int sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS { #pragma unused(arg1, arg2, oidp) - int error = 0; - int value; + int error = 0; + int value; value = task_self_region_footprint(); - error = SYSCTL_OUT(req, &value, sizeof (int)); + error = SYSCTL_OUT(req, &value, sizeof(int)); if (error) { return error; } @@ -163,14 +165,14 @@ sysctl_vm_self_region_footprint SYSCTL_HANDLER_ARGS return 0; } - error = SYSCTL_IN(req, &value, sizeof (int)); + error = SYSCTL_IN(req, &value, sizeof(int)); if (error) { - return (error); + return error; } task_self_region_footprint_set(value); return 0; } -SYSCTL_PROC(_vm, OID_AUTO, self_region_footprint, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_footprint, "I", ""); +SYSCTL_PROC(_vm, OID_AUTO, self_region_footprint, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0, &sysctl_vm_self_region_footprint, "I", ""); #endif /* DEVELOPMENT || DEBUG */ @@ -254,12 +256,13 @@ vm_shadow_max SYSCTL_HANDLER_ARGS #pragma unused(arg1, arg2, oidp) int value = 0; - if (vm_shadow_max_enabled) + if (vm_shadow_max_enabled) { value = proc_shadow_max(); + } return SYSCTL_OUT(req, &value, sizeof(value)); } -SYSCTL_PROC(_vm, OID_AUTO, vm_shadow_max, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED, +SYSCTL_PROC(_vm, OID_AUTO, vm_shadow_max, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, &vm_shadow_max, "I", ""); SYSCTL_INT(_vm, OID_AUTO, vm_shadow_max_enabled, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_shadow_max_enabled, 0, ""); @@ -296,8 +299,8 @@ static const char *prot_values[] = { void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot) { - printf("Data/Stack execution not permitted: %s[pid %d] at virtual address 0x%qx, protections were %s\n", - current_proc()->p_comm, current_proc()->p_pid, vaddr, prot_values[prot & VM_PROT_ALL]); + printf("Data/Stack execution not permitted: %s[pid %d] at virtual address 0x%qx, protections were %s\n", + current_proc()->p_comm, current_proc()->p_pid, vaddr, prot_values[prot & VM_PROT_ALL]); } /* @@ -310,7 +313,7 @@ log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot) int shared_region_unnest_logging = 1; SYSCTL_INT(_vm, OID_AUTO, shared_region_unnest_logging, CTLFLAG_RW | CTLFLAG_LOCKED, - &shared_region_unnest_logging, 0, ""); + &shared_region_unnest_logging, 0, ""); int vm_shared_region_unnest_log_interval = 10; int shared_region_unnest_log_count_threshold = 5; @@ -345,21 +348,22 @@ SYSCTL_PROC(_vm, OID_AUTO, enforce_shared_cache_dir, CTLTYPE_INT | CTLFLAG_RW | /* These log rate throttling state variables aren't thread safe, but * are sufficient unto the task. */ -static int64_t last_unnest_log_time = 0; +static int64_t last_unnest_log_time = 0; static int shared_region_unnest_log_count = 0; void log_unnest_badness( - vm_map_t m, + vm_map_t m, vm_map_offset_t s, vm_map_offset_t e, - boolean_t is_nested_map, - vm_map_offset_t lowest_unnestable_addr) + boolean_t is_nested_map, + vm_map_offset_t lowest_unnestable_addr) { - struct timeval tv; + struct timeval tv; - if (shared_region_unnest_logging == 0) + if (shared_region_unnest_logging == 0) { return; + } if (shared_region_unnest_logging <= 2 && is_nested_map && @@ -375,8 +379,9 @@ log_unnest_badness( if ((tv.tv_sec - last_unnest_log_time) < vm_shared_region_unnest_log_interval) { if (shared_region_unnest_log_count++ > - shared_region_unnest_log_count_threshold) + shared_region_unnest_log_count_threshold) { return; + } } else { last_unnest_log_time = tv.tv_sec; shared_region_unnest_log_count = 0; @@ -384,58 +389,58 @@ log_unnest_badness( } DTRACE_VM4(log_unnest_badness, - vm_map_t, m, - vm_map_offset_t, s, - vm_map_offset_t, e, - vm_map_offset_t, lowest_unnestable_addr); + vm_map_t, m, + vm_map_offset_t, s, + vm_map_offset_t, e, + vm_map_offset_t, lowest_unnestable_addr); printf("%s[%d] triggered unnest of range 0x%qx->0x%qx of DYLD shared region in VM map %p. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm, current_proc()->p_pid, (uint64_t)s, (uint64_t)e, (void *) VM_KERNEL_ADDRPERM(m)); } int useracc( - user_addr_t addr, - user_size_t len, - int prot) + user_addr_t addr, + user_size_t len, + int prot) { - vm_map_t map; + vm_map_t map; map = current_map(); - return (vm_map_check_protection( - map, - vm_map_trunc_page(addr, - vm_map_page_mask(map)), - vm_map_round_page(addr+len, - vm_map_page_mask(map)), - prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); + return vm_map_check_protection( + map, + vm_map_trunc_page(addr, + vm_map_page_mask(map)), + vm_map_round_page(addr + len, + vm_map_page_mask(map)), + prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE); } int vslock( - user_addr_t addr, - user_size_t len) + user_addr_t addr, + user_size_t len) { - kern_return_t kret; - vm_map_t map; + kern_return_t kret; + vm_map_t map; map = current_map(); kret = vm_map_wire_kernel(map, - vm_map_trunc_page(addr, - vm_map_page_mask(map)), - vm_map_round_page(addr+len, - vm_map_page_mask(map)), - VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_BSD, - FALSE); + vm_map_trunc_page(addr, + vm_map_page_mask(map)), + vm_map_round_page(addr + len, + vm_map_page_mask(map)), + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_BSD, + FALSE); switch (kret) { case KERN_SUCCESS: - return (0); + return 0; case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: - return (ENOMEM); + return ENOMEM; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; default: - return (EINVAL); + return EINVAL; } } @@ -446,13 +451,13 @@ vsunlock( __unused int dirtied) { #if FIXME /* [ */ - pmap_t pmap; - vm_page_t pg; - vm_map_offset_t vaddr; - ppnum_t paddr; + pmap_t pmap; + vm_page_t pg; + vm_map_offset_t vaddr; + ppnum_t paddr; #endif /* FIXME ] */ - kern_return_t kret; - vm_map_t map; + kern_return_t kret; + vm_map_t map; map = current_map(); @@ -460,33 +465,33 @@ vsunlock( if (dirtied) { pmap = get_task_pmap(current_task()); for (vaddr = vm_map_trunc_page(addr, PAGE_MASK); - vaddr < vm_map_round_page(addr+len, PAGE_MASK); - vaddr += PAGE_SIZE) { + vaddr < vm_map_round_page(addr + len, PAGE_MASK); + vaddr += PAGE_SIZE) { paddr = pmap_extract(pmap, vaddr); pg = PHYS_TO_VM_PAGE(paddr); vm_page_set_modified(pg); } } #endif /* FIXME ] */ -#ifdef lint +#ifdef lint dirtied++; -#endif /* lint */ +#endif /* lint */ kret = vm_map_unwire(map, - vm_map_trunc_page(addr, - vm_map_page_mask(map)), - vm_map_round_page(addr+len, - vm_map_page_mask(map)), - FALSE); + vm_map_trunc_page(addr, + vm_map_page_mask(map)), + vm_map_round_page(addr + len, + vm_map_page_mask(map)), + FALSE); switch (kret) { case KERN_SUCCESS: - return (0); + return 0; case KERN_INVALID_ADDRESS: case KERN_NO_SPACE: - return (ENOMEM); + return ENOMEM; case KERN_PROTECTION_FAILURE: - return (EACCES); + return EACCES; default: - return (EINVAL); + return EINVAL; } } @@ -496,9 +501,9 @@ subyte( int byte) { char character; - + character = (char)byte; - return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1); + return copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1; } int @@ -507,27 +512,31 @@ suibyte( int byte) { char character; - + character = (char)byte; - return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1); + return copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1; } -int fubyte(user_addr_t addr) +int +fubyte(user_addr_t addr) { unsigned char byte; - if (copyin(addr, (void *) &byte, sizeof(char))) - return(-1); - return(byte); + if (copyin(addr, (void *) &byte, sizeof(char))) { + return -1; + } + return byte; } -int fuibyte(user_addr_t addr) +int +fuibyte(user_addr_t addr) { unsigned char byte; - if (copyin(addr, (void *) &(byte), sizeof(char))) - return(-1); - return(byte); + if (copyin(addr, (void *) &(byte), sizeof(char))) { + return -1; + } + return byte; } int @@ -535,16 +544,18 @@ suword( user_addr_t addr, long word) { - return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1); + return copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1; } -long fuword(user_addr_t addr) +long +fuword(user_addr_t addr) { long word = 0; - if (copyin(addr, (void *) &word, sizeof(int))) - return(-1); - return(word); + if (copyin(addr, (void *) &word, sizeof(int))) { + return -1; + } + return word; } /* suiword and fuiword are the same as suword and fuword, respectively */ @@ -554,16 +565,18 @@ suiword( user_addr_t addr, long word) { - return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1); + return copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1; } -long fuiword(user_addr_t addr) +long +fuiword(user_addr_t addr) { long word = 0; - if (copyin(addr, (void *) &word, sizeof(int))) - return(-1); - return(word); + if (copyin(addr, (void *) &word, sizeof(int))) { + return -1; + } + return word; } /* @@ -573,11 +586,10 @@ long fuiword(user_addr_t addr) int sulong(user_addr_t addr, int64_t word) { - if (IS_64BIT_PROCESS(current_proc())) { - return(copyout((void *)&word, addr, sizeof(word)) == 0 ? 0 : -1); + return copyout((void *)&word, addr, sizeof(word)) == 0 ? 0 : -1; } else { - return(suiword(addr, (long)word)); + return suiword(addr, (long)word); } } @@ -587,22 +599,22 @@ fulong(user_addr_t addr) int64_t longword; if (IS_64BIT_PROCESS(current_proc())) { - if (copyin(addr, (void *)&longword, sizeof(longword)) != 0) - return(-1); - return(longword); + if (copyin(addr, (void *)&longword, sizeof(longword)) != 0) { + return -1; + } + return longword; } else { - return((int64_t)fuiword(addr)); + return (int64_t)fuiword(addr); } } int suulong(user_addr_t addr, uint64_t uword) { - if (IS_64BIT_PROCESS(current_proc())) { - return(copyout((void *)&uword, addr, sizeof(uword)) == 0 ? 0 : -1); + return copyout((void *)&uword, addr, sizeof(uword)) == 0 ? 0 : -1; } else { - return(suiword(addr, (uint32_t)uword)); + return suiword(addr, (uint32_t)uword); } } @@ -612,33 +624,34 @@ fuulong(user_addr_t addr) uint64_t ulongword; if (IS_64BIT_PROCESS(current_proc())) { - if (copyin(addr, (void *)&ulongword, sizeof(ulongword)) != 0) - return(-1ULL); - return(ulongword); + if (copyin(addr, (void *)&ulongword, sizeof(ulongword)) != 0) { + return -1ULL; + } + return ulongword; } else { - return((uint64_t)fuiword(addr)); + return (uint64_t)fuiword(addr); } } int swapon(__unused proc_t procp, __unused struct swapon_args *uap, __unused int *retval) { - return(ENOTSUP); + return ENOTSUP; } /* * pid_for_task * - * Find the BSD process ID for the Mach task associated with the given Mach port + * Find the BSD process ID for the Mach task associated with the given Mach port * name * * Parameters: args User argument descriptor (see below) * * Indirect parameters: args->t Mach port name - * args->pid Process ID (returned value; see below) + * args->pid Process ID (returned value; see below) * * Returns: KERL_SUCCESS Success - * KERN_FAILURE Not success + * KERN_FAILURE Not success * * Implicit returns: args->pid Process ID * @@ -647,12 +660,12 @@ kern_return_t pid_for_task( struct pid_for_task_args *args) { - mach_port_name_t t = args->t; - user_addr_t pid_addr = args->pid; + mach_port_name_t t = args->t; + user_addr_t pid_addr = args->pid; proc_t p; - task_t t1; - int pid = -1; - kern_return_t err = KERN_SUCCESS; + task_t t1; + int pid = -1; + kern_return_t err = KERN_SUCCESS; AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK); AUDIT_ARG(mach_port1, t); @@ -670,7 +683,7 @@ pid_for_task( } else if (is_corpsetask(t1)) { pid = task_pid(t1); err = KERN_SUCCESS; - }else { + } else { err = KERN_FAILURE; } } @@ -679,10 +692,10 @@ pftout: AUDIT_ARG(pid, pid); (void) copyout((char *) &pid, pid_addr, sizeof(int)); AUDIT_MACH_SYSCALL_EXIT(err); - return(err); + return err; } -/* +/* * * tfp_policy = KERN_TFP_POLICY_DENY; Deny Mode: None allowed except for self * tfp_policy = KERN_TFP_POLICY_DEFAULT; default mode: all posix checks and upcall via task port for authentication @@ -694,7 +707,7 @@ static int tfp_policy = KERN_TFP_POLICY_DEFAULT; * Routine: task_for_pid_posix_check * Purpose: * Verify that the current process should be allowed to - * get the target process's task port. This is only + * get the target process's task port. This is only * permitted if: * - The current process is root * OR all of the following are true: @@ -712,7 +725,7 @@ task_for_pid_posix_check(proc_t target) { kauth_cred_t targetcred, mycred; uid_t myuid; - int allowed; + int allowed; /* No task_for_pid on bad targets */ if (target->p_stat == SZOMB) { @@ -723,18 +736,20 @@ task_for_pid_posix_check(proc_t target) myuid = kauth_cred_getuid(mycred); /* If we're running as root, the check passes */ - if (kauth_cred_issuser(mycred)) + if (kauth_cred_issuser(mycred)) { return TRUE; + } /* We're allowed to get our own task port */ - if (target == current_proc()) + if (target == current_proc()) { return TRUE; + } - /* + /* * Under DENY, only root can get another proc's task port, * so no more checks are needed. */ - if (tfp_policy == KERN_TFP_POLICY_DENY) { + if (tfp_policy == KERN_TFP_POLICY_DENY) { return FALSE; } @@ -742,16 +757,16 @@ task_for_pid_posix_check(proc_t target) allowed = TRUE; /* Do target's ruid, euid, and saved uid match my euid? */ - if ((kauth_cred_getuid(targetcred) != myuid) || - (kauth_cred_getruid(targetcred) != myuid) || - (kauth_cred_getsvuid(targetcred) != myuid)) { + if ((kauth_cred_getuid(targetcred) != myuid) || + (kauth_cred_getruid(targetcred) != myuid) || + (kauth_cred_getsvuid(targetcred) != myuid)) { allowed = FALSE; goto out; } /* Are target's groups a subset of my groups? */ if (kauth_cred_gid_subset(targetcred, mycred, &allowed) || - allowed == 0) { + allowed == 0) { allowed = FALSE; goto out; } @@ -761,7 +776,7 @@ task_for_pid_posix_check(proc_t target) allowed = FALSE; goto out; } - + out: kauth_cred_unref(&targetcred); return allowed; @@ -774,7 +789,8 @@ out: * we made. Function declared non inline to be visible in * stackshots and spindumps as well as debugging. */ -__attribute__((noinline)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__( +__attribute__((noinline)) int +__KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__( mach_port_t task_access_port, int32_t calling_pid, uint32_t calling_gid, int32_t target_pid) { return check_task_access(task_access_port, calling_pid, calling_gid, target_pid); @@ -797,14 +813,14 @@ kern_return_t task_for_pid( struct task_for_pid_args *args) { - mach_port_name_t target_tport = args->target_tport; - int pid = args->pid; - user_addr_t task_addr = args->t; - proc_t p = PROC_NULL; - task_t t1 = TASK_NULL; - task_t task = TASK_NULL; - mach_port_name_t tret = MACH_PORT_NULL; - ipc_port_t tfpport = MACH_PORT_NULL; + mach_port_name_t target_tport = args->target_tport; + int pid = args->pid; + user_addr_t task_addr = args->t; + proc_t p = PROC_NULL; + task_t t1 = TASK_NULL; + task_t task = TASK_NULL; + mach_port_name_t tret = MACH_PORT_NULL; + ipc_port_t tfpport = MACH_PORT_NULL; void * sright; int error = 0; @@ -814,17 +830,17 @@ task_for_pid( /* Always check if pid == 0 */ if (pid == 0) { - (void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); + (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); - return(KERN_FAILURE); + return KERN_FAILURE; } t1 = port_name_to_task(target_tport); if (t1 == TASK_NULL) { (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); - return(KERN_FAILURE); - } + return KERN_FAILURE; + } p = proc_find(pid); @@ -861,10 +877,9 @@ task_for_pid( /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && - p != current_proc() && - (task_get_task_access_port(task, &tfpport) == 0) && - (tfpport != IPC_PORT_NULL)) { - + p != current_proc() && + (task_get_task_access_port(task, &tfpport) == 0) && + (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { error = KERN_PROTECTION_FAILURE; goto tfpout; @@ -882,10 +897,11 @@ task_for_pid( error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { - if (error == MACH_RCV_INTERRUPTED) + if (error == MACH_RCV_INTERRUPTED) { error = KERN_ABORTED; - else + } else { error = KERN_FAILURE; + } goto tfpout; } } @@ -906,8 +922,8 @@ task_for_pid( /* task ref consumed by convert_task_to_port */ task = TASK_NULL; tret = ipc_port_copyout_send( - sright, - get_task_ipcspace(current_task())); + sright, + get_task_ipcspace(current_task())); error = KERN_SUCCESS; @@ -922,10 +938,11 @@ tfpout: if (task != TASK_NULL) { task_deallocate(task); } - if (p != PROC_NULL) + if (p != PROC_NULL) { proc_rele(p); + } AUDIT_MACH_SYSCALL_EXIT(error); - return(error); + return error; } /* @@ -944,12 +961,12 @@ kern_return_t task_name_for_pid( struct task_name_for_pid_args *args) { - mach_port_name_t target_tport = args->target_tport; - int pid = args->pid; - user_addr_t task_addr = args->t; - proc_t p = PROC_NULL; - task_t t1; - mach_port_name_t tret; + mach_port_name_t target_tport = args->target_tport; + int pid = args->pid; + user_addr_t task_addr = args->t; + proc_t p = PROC_NULL; + task_t t1; + mach_port_name_t tret; void * sright; int error = 0, refheld = 0; kauth_cred_t target_cred; @@ -962,8 +979,8 @@ task_name_for_pid( if (t1 == TASK_NULL) { (void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t)); AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); - return(KERN_FAILURE); - } + return KERN_FAILURE; + } p = proc_find(pid); if (p != PROC_NULL) { @@ -973,24 +990,24 @@ task_name_for_pid( if ((p->p_stat != SZOMB) && ((current_proc() == p) - || kauth_cred_issuser(kauth_cred_get()) - || ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && - ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) { - + || kauth_cred_issuser(kauth_cred_get()) + || ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && + ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) { if (p->task != TASK_NULL) { task_reference(p->task); #if CONFIG_MACF - error = mac_proc_check_get_task_name(kauth_cred_get(), p); + error = mac_proc_check_get_task_name(kauth_cred_get(), p); if (error) { task_deallocate(p->task); goto noperm; } #endif sright = (void *)convert_task_name_to_port(p->task); - tret = ipc_port_copyout_send(sright, - get_task_ipcspace(current_task())); - } else + tret = ipc_port_copyout_send(sright, + get_task_ipcspace(current_task())); + } else { tret = MACH_PORT_NULL; + } AUDIT_ARG(mach_port2, tret); (void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t)); @@ -1003,26 +1020,28 @@ task_name_for_pid( #if CONFIG_MACF noperm: #endif - task_deallocate(t1); + task_deallocate(t1); tret = MACH_PORT_NULL; (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t)); error = KERN_FAILURE; tnfpout: - if (refheld != 0) + if (refheld != 0) { kauth_cred_unref(&target_cred); - if (p != PROC_NULL) + } + if (p != PROC_NULL) { proc_rele(p); + } AUDIT_MACH_SYSCALL_EXIT(error); - return(error); + return error; } kern_return_t pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) { - task_t target = NULL; - proc_t targetproc = PROC_NULL; - int pid = args->pid; - int error = 0; + task_t target = NULL; + proc_t targetproc = PROC_NULL; + int pid = args->pid; + int error = 0; #if CONFIG_MACF error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_SUSPEND); @@ -1055,10 +1074,9 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && - targetproc != current_proc() && - (task_get_task_access_port(target, &tfpport) == 0) && - (tfpport != IPC_PORT_NULL)) { - + targetproc != current_proc() && + (task_get_task_access_port(target, &tfpport) == 0) && + (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { error = EACCES; goto out; @@ -1068,10 +1086,11 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { - if (error == MACH_RCV_INTERRUPTED) + if (error == MACH_RCV_INTERRUPTED) { error = EINTR; - else + } else { error = EPERM; + } goto out; } } @@ -1096,8 +1115,9 @@ pid_suspend(struct proc *p __unused, struct pid_suspend_args *args, int *ret) task_deallocate(target); out: - if (targetproc != PROC_NULL) + if (targetproc != PROC_NULL) { proc_rele(targetproc); + } *ret = error; return error; } @@ -1105,10 +1125,10 @@ out: kern_return_t pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) { - task_t target = NULL; - proc_t targetproc = PROC_NULL; - int pid = args->pid; - int error = 0; + task_t target = NULL; + proc_t targetproc = PROC_NULL; + int pid = args->pid; + int error = 0; #if CONFIG_MACF error = mac_proc_check_suspend_resume(p, MAC_PROC_CHECK_RESUME); @@ -1141,10 +1161,9 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) /* If we aren't root and target's task access port is set... */ if (!kauth_cred_issuser(kauth_cred_get()) && - targetproc != current_proc() && - (task_get_task_access_port(target, &tfpport) == 0) && - (tfpport != IPC_PORT_NULL)) { - + targetproc != current_proc() && + (task_get_task_access_port(target, &tfpport) == 0) && + (tfpport != IPC_PORT_NULL)) { if (tfpport == IPC_PORT_DEAD) { error = EACCES; goto out; @@ -1154,10 +1173,11 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) error = __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport, proc_selfpid(), kauth_getgid(), pid); if (error != MACH_MSG_SUCCESS) { - if (error == MACH_RCV_INTERRUPTED) + if (error == MACH_RCV_INTERRUPTED) { error = EINTR; - else + } else { error = EPERM; + } goto out; } } @@ -1184,17 +1204,19 @@ pid_resume(struct proc *p __unused, struct pid_resume_args *args, int *ret) if (error == KERN_MEMORY_ERROR) { psignal(targetproc, SIGKILL); error = EIO; - } else + } else { error = EPERM; + } } } - + task_deallocate(target); out: - if (targetproc != PROC_NULL) + if (targetproc != PROC_NULL) { proc_rele(targetproc); - + } + *ret = error; return error; } @@ -1208,9 +1230,9 @@ out: kern_return_t pid_hibernate(struct proc *p __unused, struct pid_hibernate_args *args, int *ret) { - int error = 0; - proc_t targetproc = PROC_NULL; - int pid = args->pid; + int error = 0; + proc_t targetproc = PROC_NULL; + int pid = args->pid; #ifndef CONFIG_FREEZE #pragma unused(pid) @@ -1254,8 +1276,9 @@ out: #endif /* CONFIG_FREEZE */ - if (targetproc != PROC_NULL) + if (targetproc != PROC_NULL) { proc_rele(targetproc); + } *ret = error; return error; } @@ -1265,7 +1288,7 @@ out: int networking_memstatus_callout(proc_t p, uint32_t status) { - struct filedesc *fdp; + struct filedesc *fdp; int i; /* @@ -1279,7 +1302,7 @@ networking_memstatus_callout(proc_t p, uint32_t status) proc_fdlock(p); fdp = p->p_fd; for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp; + struct fileproc *fp; fp = fdp->fd_ofiles[i]; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { @@ -1298,7 +1321,7 @@ networking_memstatus_callout(proc_t p, uint32_t status) } proc_fdunlock(p); - return (1); + return 1; } @@ -1308,13 +1331,13 @@ networking_defunct_callout(proc_t p, void *arg) struct pid_shutdown_sockets_args *args = arg; int pid = args->pid; int level = args->level; - struct filedesc *fdp; + struct filedesc *fdp; int i; proc_fdlock(p); fdp = p->p_fd; for (i = 0; i < fdp->fd_nfiles; i++) { - struct fileproc *fp = fdp->fd_ofiles[i]; + struct fileproc *fp = fdp->fd_ofiles[i]; struct fileglob *fg; if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0) { @@ -1325,7 +1348,7 @@ networking_defunct_callout(proc_t p, void *arg) switch (FILEGLOB_DTYPE(fg)) { case DTYPE_SOCKET: { struct socket *so = (struct socket *)fg->fg_data; - if (p->p_pid == pid || so->last_pid == pid || + if (p->p_pid == pid || so->last_pid == pid || ((so->so_flags & SOF_DELEGATED) && so->e_pid == pid)) { /* Call networking stack with socket and level */ (void) socket_defunct(p, so, level); @@ -1348,16 +1371,16 @@ networking_defunct_callout(proc_t p, void *arg) proc_fdunlock(p); - return (PROC_RETURNED); + return PROC_RETURNED; } int pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args *args, int *ret) { - int error = 0; - proc_t targetproc = PROC_NULL; - int pid = args->pid; - int level = args->level; + int error = 0; + proc_t targetproc = PROC_NULL; + int pid = args->pid; + int level = args->level; if (level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC && level != SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL) { @@ -1388,8 +1411,9 @@ pid_shutdown_sockets(struct proc *p __unused, struct pid_shutdown_sockets_args * networking_defunct_callout, args, NULL, NULL); out: - if (targetproc != PROC_NULL) + if (targetproc != PROC_NULL) { proc_rele(targetproc); + } *ret = error; return error; } @@ -1400,27 +1424,29 @@ static int sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1, __unused int arg2, struct sysctl_req *req) { - int error = 0; + int error = 0; int new_value; - error = SYSCTL_OUT(req, arg1, sizeof(int)); - if (error || req->newptr == USER_ADDR_NULL) - return(error); + error = SYSCTL_OUT(req, arg1, sizeof(int)); + if (error || req->newptr == USER_ADDR_NULL) { + return error; + } - if (!kauth_cred_issuser(kauth_cred_get())) - return(EPERM); + if (!kauth_cred_issuser(kauth_cred_get())) { + return EPERM; + } if ((error = SYSCTL_IN(req, &new_value, sizeof(int)))) { goto out; } - if ((new_value == KERN_TFP_POLICY_DENY) - || (new_value == KERN_TFP_POLICY_DEFAULT)) - tfp_policy = new_value; - else - error = EINVAL; + if ((new_value == KERN_TFP_POLICY_DENY) + || (new_value == KERN_TFP_POLICY_DEFAULT)) { + tfp_policy = new_value; + } else { + error = EINVAL; + } out: - return(error); - + return error; } #if defined(SECURE_KERNEL) @@ -1433,14 +1459,14 @@ SYSCTL_INT(_kern, OID_AUTO, secure_kernel, CTLFLAG_RD | CTLFLAG_LOCKED, &kern_se SYSCTL_NODE(_kern, KERN_TFP, tfp, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "tfp"); SYSCTL_PROC(_kern_tfp, KERN_TFP_POLICY, policy, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, - &tfp_policy, sizeof(uint32_t), &sysctl_settfp_policy ,"I","policy"); + &tfp_policy, sizeof(uint32_t), &sysctl_settfp_policy, "I", "policy"); SYSCTL_INT(_vm, OID_AUTO, shared_region_trace_level, CTLFLAG_RW | CTLFLAG_LOCKED, - &shared_region_trace_level, 0, ""); + &shared_region_trace_level, 0, ""); SYSCTL_INT(_vm, OID_AUTO, shared_region_version, CTLFLAG_RD | CTLFLAG_LOCKED, - &shared_region_version, 0, ""); + &shared_region_version, 0, ""); SYSCTL_INT(_vm, OID_AUTO, shared_region_persistence, CTLFLAG_RW | CTLFLAG_LOCKED, - &shared_region_persistence, 0, ""); + &shared_region_persistence, 0, ""); /* * shared_region_check_np: @@ -1470,43 +1496,43 @@ SYSCTL_INT(_vm, OID_AUTO, shared_region_persistence, CTLFLAG_RW | CTLFLAG_LOCKED */ int shared_region_check_np( - __unused struct proc *p, - struct shared_region_check_np_args *uap, - __unused int *retvalp) + __unused struct proc *p, + struct shared_region_check_np_args *uap, + __unused int *retvalp) { - vm_shared_region_t shared_region; - mach_vm_offset_t start_address = 0; - int error; - kern_return_t kr; + vm_shared_region_t shared_region; + mach_vm_offset_t start_address = 0; + int error; + kern_return_t kr; SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (uint64_t)uap->start_address)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (uint64_t)uap->start_address)); /* retrieve the current tasks's shared region */ shared_region = vm_shared_region_get(current_task()); if (shared_region != NULL) { /* retrieve address of its first mapping... */ kr = vm_shared_region_start_address(shared_region, - &start_address); + &start_address); if (kr != KERN_SUCCESS) { error = ENOMEM; } else { /* ... and give it to the caller */ error = copyout(&start_address, - (user_addr_t) uap->start_address, - sizeof (start_address)); + (user_addr_t) uap->start_address, + sizeof(start_address)); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] " - "check_np(0x%llx) " - "copyout(0x%llx) error %d\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (uint64_t)uap->start_address, (uint64_t)start_address, - error)); + "check_np(0x%llx) " + "copyout(0x%llx) error %d\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (uint64_t)uap->start_address, (uint64_t)start_address, + error)); } } vm_shared_region_deallocate(shared_region); @@ -1517,9 +1543,9 @@ shared_region_check_np( SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (uint64_t)uap->start_address, (uint64_t)start_address, error)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (uint64_t)uap->start_address, (uint64_t)start_address, error)); return error; } @@ -1527,26 +1553,26 @@ shared_region_check_np( int shared_region_copyin_mappings( - struct proc *p, - user_addr_t user_mappings, - unsigned int mappings_count, - struct shared_file_mapping_np *mappings) + struct proc *p, + user_addr_t user_mappings, + unsigned int mappings_count, + struct shared_file_mapping_np *mappings) { - int error = 0; - vm_size_t mappings_size = 0; + int error = 0; + vm_size_t mappings_size = 0; /* get the list of mappings the caller wants us to establish */ - mappings_size = (vm_size_t) (mappings_count * sizeof (mappings[0])); + mappings_size = (vm_size_t) (mappings_count * sizeof(mappings[0])); error = copyin(user_mappings, - mappings, - mappings_size); + mappings, + mappings_size); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " - "copyin(0x%llx, %d) failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (uint64_t)user_mappings, mappings_count, error)); + "copyin(0x%llx, %d) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (uint64_t)user_mappings, mappings_count, error)); } return error; } @@ -1562,32 +1588,32 @@ shared_region_copyin_mappings( */ int _shared_region_map_and_slide( - struct proc *p, - int fd, - uint32_t mappings_count, - struct shared_file_mapping_np *mappings, - uint32_t slide, - user_addr_t slide_start, - user_addr_t slide_size) + struct proc *p, + int fd, + uint32_t mappings_count, + struct shared_file_mapping_np *mappings, + uint32_t slide, + user_addr_t slide_start, + user_addr_t slide_size) { - int error; - kern_return_t kr; - struct fileproc *fp; - struct vnode *vp, *root_vp, *scdir_vp; - struct vnode_attr va; - off_t fs; - memory_object_size_t file_size; + int error; + kern_return_t kr; + struct fileproc *fp; + struct vnode *vp, *root_vp, *scdir_vp; + struct vnode_attr va; + off_t fs; + memory_object_size_t file_size; #if CONFIG_MACF - vm_prot_t maxprot = VM_PROT_ALL; + vm_prot_t maxprot = VM_PROT_ALL; #endif - memory_object_control_t file_control; - struct vm_shared_region *shared_region; - uint32_t i; + memory_object_control_t file_control; + struct vm_shared_region *shared_region; + uint32_t i; SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> map\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); shared_region = NULL; fp = NULL; @@ -1599,9 +1625,9 @@ _shared_region_map_and_slide( if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " - "fd=%d lookup failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, fd, error)); + "fd=%d lookup failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd, error)); goto done; } @@ -1609,21 +1635,21 @@ _shared_region_map_and_slide( if (FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_VNODE) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " - "fd=%d not a vnode (type=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - fd, FILEGLOB_DTYPE(fp->f_fglob))); + "fd=%d not a vnode (type=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + fd, FILEGLOB_DTYPE(fp->f_fglob))); error = EINVAL; goto done; } /* we need at least read permission on the file */ - if (! (fp->f_fglob->fg_flag & FREAD)) { + if (!(fp->f_fglob->fg_flag & FREAD)) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " - "fd=%d not readable\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, fd)); + "fd=%d not readable\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd)); error = EPERM; goto done; } @@ -1633,9 +1659,9 @@ _shared_region_map_and_slide( if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " - "fd=%d getwithref failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, fd, error)); + "fd=%d getwithref failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, fd, error)); goto done; } vp = (struct vnode *) fp->f_fglob->fg_data; @@ -1644,20 +1670,20 @@ _shared_region_map_and_slide( if (vp->v_type != VREG) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "not a file (type=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name, vp->v_type)); + "not a file (type=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), + vp->v_name, vp->v_type)); error = EINVAL; goto done; } #if CONFIG_MACF /* pass in 0 for the offset argument because AMFI does not need the offset - of the shared cache */ + * of the shared cache */ error = mac_file_check_mmap(vfs_context_ucred(vfs_context_current()), - fp->f_fglob, VM_PROT_ALL, MAP_FILE, 0, &maxprot); + fp->f_fglob, VM_PROT_ALL, MAP_FILE, 0, &maxprot); if (error) { goto done; } @@ -1678,10 +1704,10 @@ _shared_region_map_and_slide( if (vp->v_mount != root_vp->v_mount) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "not on process's root volume\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); + "not on process's root volume\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); error = EPERM; goto done; } @@ -1693,21 +1719,21 @@ _shared_region_map_and_slide( if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vnode_getattr(%p) failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - (void *)VM_KERNEL_ADDRPERM(vp), error)); + "vnode_getattr(%p) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + (void *)VM_KERNEL_ADDRPERM(vp), error)); goto done; } if (va.va_uid != 0) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "owned by uid=%d instead of 0\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name, va.va_uid)); + "owned by uid=%d instead of 0\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), + vp->v_name, va.va_uid)); error = EPERM; goto done; } @@ -1718,11 +1744,11 @@ _shared_region_map_and_slide( if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vnode_lookup(%s) failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - scdir_path, error)); + "vnode_lookup(%s) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + scdir_path, error)); goto done; } @@ -1730,11 +1756,11 @@ _shared_region_map_and_slide( if (vnode_parent(vp) != scdir_vp) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "shared cache file not in %s\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), - vp->v_name, scdir_path)); + "shared cache file not in %s\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), + vp->v_name, scdir_path)); error = EPERM; goto done; } @@ -1745,11 +1771,11 @@ _shared_region_map_and_slide( if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vnode_size(%p) failed (error=%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - (void *)VM_KERNEL_ADDRPERM(vp), error)); + "vnode_size(%p) failed (error=%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + (void *)VM_KERNEL_ADDRPERM(vp), error)); goto done; } file_size = fs; @@ -1759,10 +1785,10 @@ _shared_region_map_and_slide( if (file_control == MEMORY_OBJECT_CONTROL_NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "no memory object\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); + "no memory object\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); error = EINVAL; goto done; } @@ -1770,32 +1796,34 @@ _shared_region_map_and_slide( /* check that the mappings are properly covered by code signatures */ if (!cs_system_enforcement()) { /* code signing is not enforced: no need to check */ - } else for (i = 0; i < mappings_count; i++) { - if (mappings[i].sfm_init_prot & VM_PROT_ZF) { - /* zero-filled mapping: not backed by the file */ - continue; - } - if (ubc_cs_is_range_codesigned(vp, - mappings[i].sfm_file_offset, - mappings[i].sfm_size)) { - /* this mapping is fully covered by code signatures */ - continue; + } else { + for (i = 0; i < mappings_count; i++) { + if (mappings[i].sfm_init_prot & VM_PROT_ZF) { + /* zero-filled mapping: not backed by the file */ + continue; + } + if (ubc_cs_is_range_codesigned(vp, + mappings[i].sfm_file_offset, + mappings[i].sfm_size)) { + /* this mapping is fully covered by code signatures */ + continue; + } + SHARED_REGION_TRACE_ERROR( + ("shared_region: %p [%d(%s)] map(%p:'%s'): " + "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] " + "is not code-signed\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, + i, mappings_count, + mappings[i].sfm_address, + mappings[i].sfm_size, + mappings[i].sfm_file_offset, + mappings[i].sfm_max_prot, + mappings[i].sfm_init_prot)); + error = EINVAL; + goto done; } - SHARED_REGION_TRACE_ERROR( - ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] " - "is not code-signed\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, - i, mappings_count, - mappings[i].sfm_address, - mappings[i].sfm_size, - mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); - error = EINVAL; - goto done; } /* get the process's shared region (setup in vm_map_exec()) */ @@ -1803,31 +1831,31 @@ _shared_region_map_and_slide( if (shared_region == NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "no shared region\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); + "no shared region\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name)); error = EINVAL; goto done; } /* map the file into that shared region's submap */ kr = vm_shared_region_map_file(shared_region, - mappings_count, - mappings, - file_control, - file_size, - (void *) p->p_fd->fd_rdir, - slide, - slide_start, - slide_size); + mappings_count, + mappings, + file_control, + file_size, + (void *) p->p_fd->fd_rdir, + slide, + slide_start, + slide_size); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " - "vm_shared_region_map_file() failed kr=0x%x\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, kr)); + "vm_shared_region_map_file() failed kr=0x%x\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + (void *)VM_KERNEL_ADDRPERM(vp), vp->v_name, kr)); switch (kr) { case KERN_INVALID_ADDRESS: error = EFAULT; @@ -1856,7 +1884,7 @@ _shared_region_map_and_slide( vnode_unlock(vp); /* update the vnode's access time */ - if (! (vnode_vfsvisflags(vp) & MNT_NOATIME)) { + if (!(vnode_vfsvisflags(vp) & MNT_NOATIME)) { VATTR_INIT(&va); nanotime(&va.va_access_time); VATTR_SET_ACTIVE(&va, va_access_time); @@ -1893,36 +1921,36 @@ done: SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] <- map\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm)); + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); return error; } int shared_region_map_and_slide_np( - struct proc *p, - struct shared_region_map_and_slide_np_args *uap, - __unused int *retvalp) + struct proc *p, + struct shared_region_map_and_slide_np_args *uap, + __unused int *retvalp) { - struct shared_file_mapping_np *mappings; - unsigned int mappings_count = uap->count; - kern_return_t kr = KERN_SUCCESS; - uint32_t slide = uap->slide; - -#define SFM_MAX_STACK 8 - struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK]; + struct shared_file_mapping_np *mappings; + unsigned int mappings_count = uap->count; + kern_return_t kr = KERN_SUCCESS; + uint32_t slide = uap->slide; + +#define SFM_MAX_STACK 8 + struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK]; /* Is the process chrooted?? */ if (p->p_fd->fd_rdir != NULL) { kr = EINVAL; goto done; } - + if ((kr = vm_shared_region_sliding_valid(slide)) != KERN_SUCCESS) { if (kr == KERN_INVALID_ARGUMENT) { /* - * This will happen if we request sliding again + * This will happen if we request sliding again * with the same slide value that was used earlier * for the very first sliding. */ @@ -1934,32 +1962,32 @@ shared_region_map_and_slide_np( if (mappings_count == 0) { SHARED_REGION_TRACE_INFO( ("shared_region: %p [%d(%s)] map(): " - "no mappings\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm)); - kr = 0; /* no mappings: we're done ! */ + "no mappings\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm)); + kr = 0; /* no mappings: we're done ! */ goto done; } else if (mappings_count <= SFM_MAX_STACK) { mappings = &stack_mappings[0]; } else { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " - "too many mappings (%d)\n", - (void *)VM_KERNEL_ADDRPERM(current_thread()), - p->p_pid, p->p_comm, - mappings_count)); + "too many mappings (%d)\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()), + p->p_pid, p->p_comm, + mappings_count)); kr = KERN_FAILURE; goto done; } - if ( (kr = shared_region_copyin_mappings(p, uap->mappings, uap->count, mappings))) { + if ((kr = shared_region_copyin_mappings(p, uap->mappings, uap->count, mappings))) { goto done; } kr = _shared_region_map_and_slide(p, uap->fd, mappings_count, mappings, - slide, - uap->slide_start, uap->slide_size); + slide, + uap->slide_start, uap->slide_size); if (kr != KERN_SUCCESS) { return kr; } @@ -1970,18 +1998,18 @@ done: /* sysctl overflow room */ -SYSCTL_INT (_vm, OID_AUTO, pagesize, CTLFLAG_RD | CTLFLAG_LOCKED, - (int *) &page_size, 0, "vm page size"); +SYSCTL_INT(_vm, OID_AUTO, pagesize, CTLFLAG_RD | CTLFLAG_LOCKED, + (int *) &page_size, 0, "vm page size"); /* vm_page_free_target is provided as a makeshift solution for applications that want to - allocate buffer space, possibly purgeable memory, but not cause inactive pages to be - reclaimed. It allows the app to calculate how much memory is free outside the free target. */ -extern unsigned int vm_page_free_target; -SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_free_target, 0, "Pageout daemon free target"); + * allocate buffer space, possibly purgeable memory, but not cause inactive pages to be + * reclaimed. It allows the app to calculate how much memory is free outside the free target. */ +extern unsigned int vm_page_free_target; +SYSCTL_INT(_vm, OID_AUTO, vm_page_free_target, CTLFLAG_RD | CTLFLAG_LOCKED, + &vm_page_free_target, 0, "Pageout daemon free target"); SYSCTL_INT(_vm, OID_AUTO, memory_pressure, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_state.vm_memory_pressure, 0, "Memory pressure indicator"); + &vm_pageout_state.vm_memory_pressure, 0, "Memory pressure indicator"); static int vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS @@ -1990,19 +2018,19 @@ vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS unsigned int page_free_wanted; page_free_wanted = mach_vm_ctl_page_free_wanted(); - return SYSCTL_OUT(req, &page_free_wanted, sizeof (page_free_wanted)); + return SYSCTL_OUT(req, &page_free_wanted, sizeof(page_free_wanted)); } SYSCTL_PROC(_vm, OID_AUTO, page_free_wanted, - CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, vm_ctl_page_free_wanted, "I", ""); + CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED, + 0, 0, vm_ctl_page_free_wanted, "I", ""); -extern unsigned int vm_page_purgeable_count; +extern unsigned int vm_page_purgeable_count; SYSCTL_INT(_vm, OID_AUTO, page_purgeable_count, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_purgeable_count, 0, "Purgeable page count"); + &vm_page_purgeable_count, 0, "Purgeable page count"); -extern unsigned int vm_page_purgeable_wired_count; +extern unsigned int vm_page_purgeable_wired_count; SYSCTL_INT(_vm, OID_AUTO, page_purgeable_wired_count, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_purgeable_wired_count, 0, "Wired purgeable page count"); + &vm_page_purgeable_wired_count, 0, "Wired purgeable page count"); #if DEVELOPMENT || DEBUG extern uint64_t get_pages_grabbed_count(void); @@ -2016,67 +2044,67 @@ pages_grabbed SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_vm, OID_AUTO, pages_grabbed, CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED, - 0, 0, &pages_grabbed, "QU", "Total pages grabbed"); + 0, 0, &pages_grabbed, "QU", "Total pages grabbed"); SYSCTL_ULONG(_vm, OID_AUTO, pages_freed, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_vminfo.vm_page_pages_freed, "Total pages freed"); + &vm_pageout_vminfo.vm_page_pages_freed, "Total pages freed"); SYSCTL_INT(_vm, OID_AUTO, pageout_purged_objects, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_purged_objects, 0, "System purged object count"); + &vm_pageout_debug.vm_pageout_purged_objects, 0, "System purged object count"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_busy, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)"); + &vm_pageout_debug.vm_pageout_cleaned_busy, 0, "Cleaned pages busy (deactivated)"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_nolock, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)"); + &vm_pageout_debug.vm_pageout_cleaned_nolock, 0, "Cleaned pages no-lock (deactivated)"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_volatile_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated"); + &vm_pageout_debug.vm_pageout_cleaned_volatile_reactivated, 0, "Cleaned pages volatile reactivated"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_fault_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated"); + &vm_pageout_debug.vm_pageout_cleaned_fault_reactivated, 0, "Cleaned pages fault reactivated"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */ + &vm_pageout_debug.vm_pageout_cleaned_reactivated, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */ SYSCTL_ULONG(_vm, OID_AUTO, pageout_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_vminfo.vm_pageout_freed_cleaned, "Cleaned pages freed"); + &vm_pageout_vminfo.vm_pageout_freed_cleaned, "Cleaned pages freed"); SYSCTL_UINT(_vm, OID_AUTO, pageout_cleaned_reference_reactivated, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated"); + &vm_pageout_debug.vm_pageout_cleaned_reference_reactivated, 0, "Cleaned pages reference reactivated"); SYSCTL_UINT(_vm, OID_AUTO, pageout_enqueued_cleaned, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_pageout_debug.vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */ + &vm_pageout_debug.vm_pageout_enqueued_cleaned, 0, ""); /* sum of next two */ #endif extern int madvise_free_debug; SYSCTL_INT(_vm, OID_AUTO, madvise_free_debug, CTLFLAG_RW | CTLFLAG_LOCKED, - &madvise_free_debug, 0, "zero-fill on madvise(MADV_FREE*)"); + &madvise_free_debug, 0, "zero-fill on madvise(MADV_FREE*)"); SYSCTL_INT(_vm, OID_AUTO, page_reusable_count, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_count, 0, "Reusable page count"); + &vm_page_stats_reusable.reusable_count, 0, "Reusable page count"); SYSCTL_QUAD(_vm, OID_AUTO, reusable_success, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_pages_success, ""); + &vm_page_stats_reusable.reusable_pages_success, ""); SYSCTL_QUAD(_vm, OID_AUTO, reusable_failure, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_pages_failure, ""); + &vm_page_stats_reusable.reusable_pages_failure, ""); SYSCTL_QUAD(_vm, OID_AUTO, reusable_pages_shared, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_pages_shared, ""); + &vm_page_stats_reusable.reusable_pages_shared, ""); SYSCTL_QUAD(_vm, OID_AUTO, all_reusable_calls, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.all_reusable_calls, ""); + &vm_page_stats_reusable.all_reusable_calls, ""); SYSCTL_QUAD(_vm, OID_AUTO, partial_reusable_calls, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.partial_reusable_calls, ""); + &vm_page_stats_reusable.partial_reusable_calls, ""); SYSCTL_QUAD(_vm, OID_AUTO, reuse_success, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reuse_pages_success, ""); + &vm_page_stats_reusable.reuse_pages_success, ""); SYSCTL_QUAD(_vm, OID_AUTO, reuse_failure, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reuse_pages_failure, ""); + &vm_page_stats_reusable.reuse_pages_failure, ""); SYSCTL_QUAD(_vm, OID_AUTO, all_reuse_calls, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.all_reuse_calls, ""); + &vm_page_stats_reusable.all_reuse_calls, ""); SYSCTL_QUAD(_vm, OID_AUTO, partial_reuse_calls, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.partial_reuse_calls, ""); + &vm_page_stats_reusable.partial_reuse_calls, ""); SYSCTL_QUAD(_vm, OID_AUTO, can_reuse_success, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.can_reuse_success, ""); + &vm_page_stats_reusable.can_reuse_success, ""); SYSCTL_QUAD(_vm, OID_AUTO, can_reuse_failure, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.can_reuse_failure, ""); + &vm_page_stats_reusable.can_reuse_failure, ""); SYSCTL_QUAD(_vm, OID_AUTO, reusable_reclaimed, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_reclaimed, ""); + &vm_page_stats_reusable.reusable_reclaimed, ""); SYSCTL_QUAD(_vm, OID_AUTO, reusable_nonwritable, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_nonwritable, ""); + &vm_page_stats_reusable.reusable_nonwritable, ""); SYSCTL_QUAD(_vm, OID_AUTO, reusable_shared, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.reusable_shared, ""); + &vm_page_stats_reusable.reusable_shared, ""); SYSCTL_QUAD(_vm, OID_AUTO, free_shared, CTLFLAG_RD | CTLFLAG_LOCKED, - &vm_page_stats_reusable.free_shared, ""); + &vm_page_stats_reusable.free_shared, ""); extern unsigned int vm_page_free_count, vm_page_speculative_count; @@ -2165,21 +2193,22 @@ SYSCTL_UINT(_vm, OID_AUTO, page_secluded_grab_for_iokit_success, CTLFLAG_RD | CT void vm_pageout_io_throttle(void); -void vm_pageout_io_throttle(void) { +void +vm_pageout_io_throttle(void) +{ struct uthread *uthread = get_bsdthread_info(current_thread()); - - /* - * thread is marked as a low priority I/O type - * and the I/O we issued while in this cleaning operation - * collided with normal I/O operations... we'll - * delay in order to mitigate the impact of this - * task on the normal operation of the system - */ + + /* + * thread is marked as a low priority I/O type + * and the I/O we issued while in this cleaning operation + * collided with normal I/O operations... we'll + * delay in order to mitigate the impact of this + * task on the normal operation of the system + */ if (uthread->uu_lowpri_window) { throttle_lowpri_io(1); } - } int @@ -2188,9 +2217,9 @@ vm_pressure_monitor( struct vm_pressure_monitor_args *uap, int *retval) { - kern_return_t kr; - uint32_t pages_reclaimed; - uint32_t pages_wanted; + kern_return_t kr; + uint32_t pages_reclaimed; + uint32_t pages_wanted; kr = mach_vm_pressure_monitor( (boolean_t) uap->wait_for_pressure, @@ -2209,8 +2238,8 @@ vm_pressure_monitor( if (uap->pages_reclaimed) { if (copyout((void *)&pages_reclaimed, - uap->pages_reclaimed, - sizeof (pages_reclaimed)) != 0) { + uap->pages_reclaimed, + sizeof(pages_reclaimed)) != 0) { return EFAULT; } } @@ -2221,19 +2250,19 @@ vm_pressure_monitor( int kas_info(struct proc *p, - struct kas_info_args *uap, - int *retval __unused) + struct kas_info_args *uap, + int *retval __unused) { #ifdef SECURE_KERNEL (void)p; (void)uap; return ENOTSUP; #else /* !SECURE_KERNEL */ - int selector = uap->selector; - user_addr_t valuep = uap->value; - user_addr_t sizep = uap->size; + int selector = uap->selector; + user_addr_t valuep = uap->value; + user_addr_t sizep = uap->size; user_size_t size; - int error; + int error; if (!kauth_cred_issuser(kauth_cred_get())) { return EPERM; @@ -2260,33 +2289,33 @@ kas_info(struct proc *p, } switch (selector) { - case KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR: - { - uint64_t slide = vm_kernel_slide; + case KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR: + { + uint64_t slide = vm_kernel_slide; - if (sizeof(slide) != size) { - return EINVAL; - } - - if (IS_64BIT_PROCESS(p)) { - user64_size_t size64 = (user64_size_t)size; - error = copyout(&size64, sizep, sizeof(size64)); - } else { - user32_size_t size32 = (user32_size_t)size; - error = copyout(&size32, sizep, sizeof(size32)); - } - if (error) { - return error; - } - - error = copyout(&slide, valuep, sizeof(slide)); - if (error) { - return error; - } - } - break; - default: + if (sizeof(slide) != size) { return EINVAL; + } + + if (IS_64BIT_PROCESS(p)) { + user64_size_t size64 = (user64_size_t)size; + error = copyout(&size64, sizep, sizeof(size64)); + } else { + user32_size_t size32 = (user32_size_t)size; + error = copyout(&size32, sizep, sizeof(size32)); + } + if (error) { + return error; + } + + error = copyout(&slide, valuep, sizeof(slide)); + if (error) { + return error; + } + } + break; + default: + return EINVAL; } return 0; @@ -2298,7 +2327,9 @@ kas_info(struct proc *p, #pragma clang diagnostic ignored "-Wcast-qual" #pragma clang diagnostic ignored "-Wunused-function" -static void asserts() { +static void +asserts() +{ static_assert(sizeof(vm_min_kernel_address) == sizeof(unsigned long)); static_assert(sizeof(vm_max_kernel_address) == sizeof(unsigned long)); } @@ -2340,16 +2371,16 @@ sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS /* ... but let resumes proceed */ } DTRACE_VM2(footprint_suspend, - vm_map_t, current_map(), - int, new_value); + vm_map_t, current_map(), + int, new_value); pmap_footprint_suspend(current_map(), new_value); return 0; } SYSCTL_PROC(_vm, OID_AUTO, footprint_suspend, - CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_ANYBODY|CTLFLAG_LOCKED|CTLFLAG_MASKED, - 0, 0, &sysctl_vm_footprint_suspend, "I", ""); + CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | CTLFLAG_MASKED, + 0, 0, &sysctl_vm_footprint_suspend, "I", ""); #endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */ extern uint64_t vm_map_corpse_footprint_count; @@ -2358,23 +2389,23 @@ extern uint64_t vm_map_corpse_footprint_size_max; extern uint64_t vm_map_corpse_footprint_full; extern uint64_t vm_map_corpse_footprint_no_buf; SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_count, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_count, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_count, ""); SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_avg, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_avg, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_avg, ""); SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_size_max, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_max, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_size_max, ""); SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_full, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_full, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_full, ""); SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, ""); #if PMAP_CS extern uint64_t vm_cs_defer_to_pmap_cs; extern uint64_t vm_cs_defer_to_pmap_cs_not; SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, ""); SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, ""); #endif /* PMAP_CS */ extern uint64_t shared_region_pager_copied; @@ -2382,13 +2413,13 @@ extern uint64_t shared_region_pager_slid; extern uint64_t shared_region_pager_slid_error; extern uint64_t shared_region_pager_reclaimed; SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_copied, - CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_copied, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_copied, ""); SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid, - CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid, ""); SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_slid_error, - CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid_error, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_slid_error, ""); SYSCTL_QUAD(_vm, OID_AUTO, shared_region_pager_reclaimed, - CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_reclaimed, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &shared_region_pager_reclaimed, ""); #if MACH_ASSERT extern int pmap_ledgers_panic_leeway; diff --git a/bsd/vm/vnode_pager.c b/bsd/vm/vnode_pager.c index fdbccff33..436268db2 100644 --- a/bsd/vm/vnode_pager.c +++ b/bsd/vm/vnode_pager.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -49,10 +49,10 @@ #include #include #include -#include /* needs internal due to fhandle_t */ +#include /* needs internal due to fhandle_t */ #include #include -#include /* For DKIOC calls */ +#include /* For DKIOC calls */ #include #include @@ -88,8 +88,9 @@ vnode_pager_throttle() ut = get_bsdthread_info(current_thread()); - if (ut->uu_lowpri_window) + if (ut->uu_lowpri_window) { throttle_lowpri_io(1); + } } boolean_t @@ -104,23 +105,24 @@ vnode_pager_issue_reprioritize_io(struct vnode *devvp, uint64_t blkno, uint32_t { u_int32_t blocksize = 0; dk_extent_t extent; - dk_set_tier_t set_tier; + dk_set_tier_t set_tier; int error = 0; error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blocksize, 0, vfs_context_kernel()); - if (error) + if (error) { return; + } memset(&extent, 0, sizeof(dk_extent_t)); memset(&set_tier, 0, sizeof(dk_set_tier_t)); - + extent.offset = blkno * (u_int64_t) blocksize; extent.length = len; - set_tier.extents = &extent; + set_tier.extents = &extent; set_tier.extentsCount = 1; set_tier.tier = priority; - + error = VNOP_IOCTL(devvp, DKIOCSETTIER, (caddr_t)&set_tier, 0, vfs_context_kernel()); return; } @@ -128,25 +130,26 @@ vnode_pager_issue_reprioritize_io(struct vnode *devvp, uint64_t blkno, uint32_t void vnode_pager_was_dirtied( - struct vnode *vp, - vm_object_offset_t s_offset, - vm_object_offset_t e_offset) + struct vnode *vp, + vm_object_offset_t s_offset, + vm_object_offset_t e_offset) { - cluster_update_state(vp, s_offset, e_offset, TRUE); + cluster_update_state(vp, s_offset, e_offset, TRUE); } uint32_t vnode_pager_isinuse(struct vnode *vp) { - if (vp->v_usecount > vp->v_kusecount) - return (1); - return (0); + if (vp->v_usecount > vp->v_kusecount) { + return 1; + } + return 0; } uint32_t vnode_pager_return_throttle_io_limit(struct vnode *vp, uint32_t *limit) { - return(cluster_throttle_io_limit(vp, limit)); + return cluster_throttle_io_limit(vp, limit); } vm_object_offset_t @@ -159,19 +162,19 @@ extern int safe_getpath(struct vnode *dvp, char *leafname, char *path, int _len, kern_return_t vnode_pager_get_name( - struct vnode *vp, - char *pathname, - vm_size_t pathname_len, - char *filename, - vm_size_t filename_len, - boolean_t *truncated_path_p) + struct vnode *vp, + char *pathname, + vm_size_t pathname_len, + char *filename, + vm_size_t filename_len, + boolean_t *truncated_path_p) { *truncated_path_p = FALSE; if (pathname != NULL) { /* get the path name */ safe_getpath(vp, NULL, - pathname, (int) pathname_len, - truncated_path_p); + pathname, (int) pathname_len, + truncated_path_p); } if ((pathname == NULL || *truncated_path_p) && filename != NULL) { @@ -187,9 +190,9 @@ vnode_pager_get_name( kern_return_t vnode_pager_get_mtime( - struct vnode *vp, - struct timespec *current_mtime, - struct timespec *cs_mtime) + struct vnode *vp, + struct timespec *current_mtime, + struct timespec *cs_mtime) { vnode_mtime(vp, current_mtime, vfs_context_current()); if (cs_mtime != NULL) { @@ -200,14 +203,14 @@ vnode_pager_get_mtime( kern_return_t vnode_pager_get_cs_blobs( - struct vnode *vp, - void **blobs) + struct vnode *vp, + void **blobs) { *blobs = ubc_get_cs_blobs(vp); return KERN_SUCCESS; } -/* +/* * vnode_trim: * Used to call the DKIOCUNMAP ioctl on the underlying disk device for the specified vnode. * Trims the region at offset bytes into the file, for length bytes. @@ -217,15 +220,16 @@ vnode_pager_get_cs_blobs( * This function is non-idempotent in error cases; We cannot un-discard the blocks if only some of them * are successfully discarded. */ -u_int32_t vnode_trim ( - struct vnode *vp, - off_t offset, - size_t length) +u_int32_t +vnode_trim( + struct vnode *vp, + off_t offset, + size_t length) { - daddr64_t io_blockno; /* Block number corresponding to the start of the extent */ - size_t io_bytecount; /* Number of bytes in current extent for the specified range */ + daddr64_t io_blockno; /* Block number corresponding to the start of the extent */ + size_t io_bytecount; /* Number of bytes in current extent for the specified range */ size_t trimmed = 0; - off_t current_offset = offset; + off_t current_offset = offset; size_t remaining_length = length; int error = 0; u_int32_t blocksize = 0; @@ -243,7 +247,7 @@ u_int32_t vnode_trim ( goto trim_exit; } - /* + /* * We may not get the entire range from offset -> offset+length in a single * extent from the blockmap call. Keep looping/going until we are sure we've hit * the whole range or if we encounter an error. @@ -251,22 +255,22 @@ u_int32_t vnode_trim ( while (trimmed < length) { /* * VNOP_BLOCKMAP will tell us the logical to physical block number mapping for the - * specified offset. It returns blocks in contiguous chunks, so if the logical range is + * specified offset. It returns blocks in contiguous chunks, so if the logical range is * broken into multiple extents, it must be called multiple times, increasing the offset * in each call to ensure that the entire range is covered. */ - error = VNOP_BLOCKMAP (vp, current_offset, remaining_length, - &io_blockno, &io_bytecount, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL); + error = VNOP_BLOCKMAP(vp, current_offset, remaining_length, + &io_blockno, &io_bytecount, NULL, VNODE_READ | VNODE_BLOCKMAP_NO_TRACK, NULL); if (error) { goto trim_exit; } - /* + /* * We have a contiguous run. Prepare & issue the ioctl for the device. * the DKIOCUNMAP ioctl takes offset in bytes from the start of the device. */ - memset (&extent, 0, sizeof(dk_extent_t)); - memset (&unmap, 0, sizeof(dk_unmap_t)); + memset(&extent, 0, sizeof(dk_extent_t)); + memset(&unmap, 0, sizeof(dk_unmap_t)); extent.offset = (uint64_t) io_blockno * (u_int64_t) blocksize; extent.length = io_bytecount; unmap.extents = &extent; @@ -283,33 +287,32 @@ u_int32_t vnode_trim ( trim_exit: return error; - } pager_return_t vnode_pageout(struct vnode *vp, - upl_t upl, - upl_offset_t upl_offset, - vm_object_offset_t f_offset, - upl_size_t size, - int flags, - int *errorp) + upl_t upl, + upl_offset_t upl_offset, + vm_object_offset_t f_offset, + upl_size_t size, + int flags, + int *errorp) { - int result = PAGER_SUCCESS; - int error = 0; - int error_ret = 0; + int result = PAGER_SUCCESS; + int error = 0; + int error_ret = 0; daddr64_t blkno; int isize; int pg_index; int base_index; upl_offset_t offset; upl_page_info_t *pl; - vfs_context_t ctx = vfs_context_current(); /* pager context */ + vfs_context_t ctx = vfs_context_current(); /* pager context */ isize = (int)size; if (isize <= 0) { - result = PAGER_ERROR; + result = PAGER_ERROR; error_ret = EINVAL; goto out; } @@ -318,32 +321,34 @@ vnode_pageout(struct vnode *vp, result = PAGER_ERROR; error_ret = EINVAL; - if (upl && !(flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + if (upl && !(flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + } goto out; } - if ( !(flags & UPL_VNODE_PAGER)) { + if (!(flags & UPL_VNODE_PAGER)) { /* * This is a pageout from the default pager, * just go ahead and call vnop_pageout since * it has already sorted out the dirty ranges */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, - size, 1, 0, 0, 0); + (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, + size, 1, 0, 0, 0); - if ( (error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset, - (size_t)size, flags, ctx)) ) + if ((error_ret = VNOP_PAGEOUT(vp, upl, upl_offset, (off_t)f_offset, + (size_t)size, flags, ctx))) { result = PAGER_ERROR; + } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, - size, 1, 0, 0, 0); + (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, + size, 1, 0, 0, 0); goto out; } if (upl == NULL) { - int request_flags; + int request_flags; if (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_PAGEOUTV2) { /* @@ -353,52 +358,56 @@ vnode_pageout(struct vnode *vp, * via 'f_offset' and 'size' into a UPL... this allows the filesystem to first * take any locks it needs, before effectively locking the pages into a UPL... */ - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, - size, (int)f_offset, 0, 0, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, + size, (int)f_offset, 0, 0, 0); - if ( (error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset, - size, flags, ctx)) ) { + if ((error_ret = VNOP_PAGEOUT(vp, NULL, upl_offset, (off_t)f_offset, + size, flags, ctx))) { result = PAGER_ERROR; } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, - size, 0, 0, 0, 0); + (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, + size, 0, 0, 0, 0); goto out; } - if (flags & UPL_MSYNC) + if (flags & UPL_MSYNC) { request_flags = UPL_UBC_MSYNC | UPL_RET_ONLY_DIRTY; - else + } else { request_flags = UPL_UBC_PAGEOUT | UPL_RET_ONLY_DIRTY; - - if (ubc_create_upl_kernel(vp, f_offset, size, &upl, &pl, request_flags, VM_KERN_MEMORY_FILE) != KERN_SUCCESS) { + } + + if (ubc_create_upl_kernel(vp, f_offset, size, &upl, &pl, request_flags, VM_KERN_MEMORY_FILE) != KERN_SUCCESS) { result = PAGER_ERROR; error_ret = EINVAL; goto out; } upl_offset = 0; - } else + } else { pl = ubc_upl_pageinfo(upl); + } /* * Ignore any non-present pages at the end of the - * UPL so that we aren't looking at a upl that + * UPL so that we aren't looking at a upl that * may already have been freed by the preceeding * aborts/completions. */ base_index = upl_offset / PAGE_SIZE; for (pg_index = (upl_offset + isize) / PAGE_SIZE; pg_index > base_index;) { - if (upl_page_present(pl, --pg_index)) - break; + if (upl_page_present(pl, --pg_index)) { + break; + } if (pg_index == base_index) { - /* + /* * no pages were returned, so release * our hold on the upl and leave */ - if ( !(flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(upl, upl_offset, isize, UPL_ABORT_FREE_ON_EMPTY); + if (!(flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(upl, upl_offset, isize, UPL_ABORT_FREE_ON_EMPTY); + } goto out; } @@ -414,7 +423,7 @@ vnode_pageout(struct vnode *vp, */ if (ubc_getsize(vp) == 0) { - /* + /* * if the file has been effectively deleted, then * we need to go through the UPL and invalidate any * buffer headers we might have that reference any @@ -422,24 +431,25 @@ vnode_pageout(struct vnode *vp, */ for (offset = upl_offset; isize; isize -= PAGE_SIZE, offset += PAGE_SIZE) { #if NFSCLIENT - if (vp->v_tag == VT_NFS) + if (vp->v_tag == VT_NFS) { /* check with nfs if page is OK to drop */ error = nfs_buf_page_inval(vp, (off_t)f_offset); - else + } else #endif { - blkno = ubc_offtoblk(vp, (off_t)f_offset); - error = buf_invalblkno(vp, blkno, 0); + blkno = ubc_offtoblk(vp, (off_t)f_offset); + error = buf_invalblkno(vp, blkno, 0); } if (error) { - if ( !(flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); - if (error_ret == 0) - error_ret = error; + if (!(flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + } + if (error_ret == 0) { + error_ret = error; + } result = PAGER_ERROR; - - } else if ( !(flags & UPL_NOCOMMIT)) { - ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY); + } else if (!(flags & UPL_NOCOMMIT)) { + ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY); } f_offset += PAGE_SIZE; } @@ -453,20 +463,20 @@ vnode_pageout(struct vnode *vp, int xsize; int num_of_pages; - if ( !upl_page_present(pl, pg_index)) { - /* + if (!upl_page_present(pl, pg_index)) { + /* * we asked for RET_ONLY_DIRTY, so it's possible * to get back empty slots in the UPL * just skip over them */ - f_offset += PAGE_SIZE; + f_offset += PAGE_SIZE; offset += PAGE_SIZE; isize -= PAGE_SIZE; pg_index++; continue; } - if ( !upl_dirty_page(pl, pg_index)) { + if (!upl_dirty_page(pl, pg_index)) { /* * if the page is not dirty and reached here it is * marked precious or it is due to invalidation in @@ -478,26 +488,27 @@ vnode_pageout(struct vnode *vp, * a lock inversion which causes deadlock. */ #if NFSCLIENT - if (vp->v_tag == VT_NFS) + if (vp->v_tag == VT_NFS) { /* check with nfs if page is OK to drop */ error = nfs_buf_page_inval(vp, (off_t)f_offset); - else + } else #endif { - blkno = ubc_offtoblk(vp, (off_t)f_offset); - error = buf_invalblkno(vp, blkno, 0); + blkno = ubc_offtoblk(vp, (off_t)f_offset); + error = buf_invalblkno(vp, blkno, 0); } if (error) { - if ( !(flags & UPL_NOCOMMIT)) - ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); - if (error_ret == 0) - error_ret = error; + if (!(flags & UPL_NOCOMMIT)) { + ubc_upl_abort_range(upl, offset, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY); + } + if (error_ret == 0) { + error_ret = error; + } result = PAGER_ERROR; - - } else if ( !(flags & UPL_NOCOMMIT)) { - ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY); + } else if (!(flags & UPL_NOCOMMIT)) { + ubc_upl_commit_range(upl, offset, PAGE_SIZE, UPL_COMMIT_FREE_ON_EMPTY); } - f_offset += PAGE_SIZE; + f_offset += PAGE_SIZE; offset += PAGE_SIZE; isize -= PAGE_SIZE; pg_index++; @@ -508,81 +519,87 @@ vnode_pageout(struct vnode *vp, xsize = isize - PAGE_SIZE; while (xsize) { - if ( !upl_dirty_page(pl, pg_index + num_of_pages)) + if (!upl_dirty_page(pl, pg_index + num_of_pages)) { break; + } num_of_pages++; xsize -= PAGE_SIZE; } xsize = num_of_pages * PAGE_SIZE; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, - xsize, (int)f_offset, 0, 0, 0); + (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_START, + xsize, (int)f_offset, 0, 0, 0); - if ( (error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset, - xsize, flags, ctx)) ) { - if (error_ret == 0) - error_ret = error; + if ((error = VNOP_PAGEOUT(vp, upl, offset, (off_t)f_offset, + xsize, flags, ctx))) { + if (error_ret == 0) { + error_ret = error; + } result = PAGER_ERROR; } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, - xsize, 0, 0, 0, 0); + (MACHDBG_CODE(DBG_MACH_VM, 1)) | DBG_FUNC_END, + xsize, 0, 0, 0, 0); - f_offset += xsize; + f_offset += xsize; offset += xsize; isize -= xsize; pg_index += num_of_pages; } out: - if (errorp) + if (errorp) { *errorp = error_ret; + } - return (result); + return result; } pager_return_t vnode_pagein( - struct vnode *vp, - upl_t upl, - upl_offset_t upl_offset, - vm_object_offset_t f_offset, - upl_size_t size, - int flags, - int *errorp) + struct vnode *vp, + upl_t upl, + upl_offset_t upl_offset, + vm_object_offset_t f_offset, + upl_size_t size, + int flags, + int *errorp) { - upl_page_info_t *pl; - int result = PAGER_SUCCESS; - int error = 0; - int pages_in_upl; - int start_pg; - int last_pg; + upl_page_info_t *pl; + int result = PAGER_SUCCESS; + int error = 0; + int pages_in_upl; + int start_pg; + int last_pg; int first_pg; - int xsize; - int must_commit = 1; - int ignore_valid_page_check = 0; + int xsize; + int must_commit = 1; + int ignore_valid_page_check = 0; - if (flags & UPL_NOCOMMIT) - must_commit = 0; + if (flags & UPL_NOCOMMIT) { + must_commit = 0; + } - if (flags & UPL_IGNORE_VALID_PAGE_CHECK) + if (flags & UPL_IGNORE_VALID_PAGE_CHECK) { ignore_valid_page_check = 1; + } if (UBCINFOEXISTS(vp) == 0) { result = PAGER_ERROR; error = PAGER_ERROR; - if (upl && must_commit) + if (upl && must_commit) { ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + } goto out; } if (upl == (upl_t)NULL) { flags &= ~UPL_NOCOMMIT; - if (size > MAX_UPL_SIZE_BYTES) { - result = PAGER_ERROR; + if (size > MAX_UPL_SIZE_BYTES) { + result = PAGER_ERROR; error = PAGER_ERROR; goto out; } @@ -595,21 +612,21 @@ vnode_pagein( * take any locks it needs, before effectively locking the pages into a UPL... * so we pass a NULL into the filesystem instead of a UPL pointer... the 'upl_offset' * is used to identify the "must have" page in the extent... the filesystem is free - * to clip the extent to better fit the underlying FS blocksize if it desires as + * to clip the extent to better fit the underlying FS blocksize if it desires as * long as it continues to include the "must have" page... 'f_offset' + 'upl_offset' * identifies that page */ - if ( (error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset, - size, flags, vfs_context_current())) ) { + if ((error = VNOP_PAGEIN(vp, NULL, upl_offset, (off_t)f_offset, + size, flags, vfs_context_current()))) { result = PAGER_ERROR; error = PAGER_ERROR; } goto out; } - ubc_create_upl_kernel(vp, f_offset, size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT, VM_KERN_MEMORY_FILE); + ubc_create_upl_kernel(vp, f_offset, size, &upl, &pl, UPL_UBC_PAGEIN | UPL_RET_ONLY_ABSENT, VM_KERN_MEMORY_FILE); if (upl == (upl_t)NULL) { - result = PAGER_ABSENT; + result = PAGER_ABSENT; error = PAGER_ABSENT; goto out; } @@ -617,7 +634,7 @@ vnode_pagein( upl_offset = 0; first_pg = 0; - + /* * if we get here, we've created the upl and * are responsible for commiting/aborting it @@ -625,26 +642,28 @@ vnode_pagein( */ must_commit = 1; } else { - pl = ubc_upl_pageinfo(upl); + pl = ubc_upl_pageinfo(upl); first_pg = upl_offset / PAGE_SIZE; } pages_in_upl = size / PAGE_SIZE; DTRACE_VM2(pgpgin, int, pages_in_upl, (uint64_t *), NULL); /* - * before we start marching forward, we must make sure we end on + * before we start marching forward, we must make sure we end on * a present page, otherwise we will be working with a freed - * upl + * upl */ for (last_pg = pages_in_upl - 1; last_pg >= first_pg; last_pg--) { - if (upl_page_present(pl, last_pg)) + if (upl_page_present(pl, last_pg)) { break; + } if (last_pg == first_pg) { - /* + /* * empty UPL, no pages are present */ - if (must_commit) - ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + if (must_commit) { + ubc_upl_abort_range(upl, upl_offset, size, UPL_ABORT_FREE_ON_EMPTY); + } goto out; } } @@ -652,54 +671,58 @@ vnode_pagein( last_pg = first_pg; while (last_pg < pages_in_upl) { - /* + /* * skip over missing pages... */ - for ( ; last_pg < pages_in_upl; last_pg++) { - if (upl_page_present(pl, last_pg)) - break; + for (; last_pg < pages_in_upl; last_pg++) { + if (upl_page_present(pl, last_pg)) { + break; + } } if (ignore_valid_page_check == 1) { start_pg = last_pg; } else { - /* + /* * skip over 'valid' pages... we don't want to issue I/O for these */ - for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) { - if (!upl_valid_page(pl, last_pg)) - break; + for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) { + if (!upl_valid_page(pl, last_pg)) { + break; + } } } if (last_pg > start_pg) { - /* + /* * we've found a range of valid pages * if we've got COMMIT responsibility * commit this range of pages back to the * cache unchanged */ - xsize = (last_pg - start_pg) * PAGE_SIZE; + xsize = (last_pg - start_pg) * PAGE_SIZE; - if (must_commit) - ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY); + if (must_commit) { + ubc_upl_abort_range(upl, start_pg * PAGE_SIZE, xsize, UPL_ABORT_FREE_ON_EMPTY); + } } - if (last_pg == pages_in_upl) - /* + if (last_pg == pages_in_upl) { + /* * we're done... all pages that were present - * have either had I/O issued on them or + * have either had I/O issued on them or * were aborted unchanged... */ - break; + break; + } if (!upl_page_present(pl, last_pg)) { - /* - * we found a range of valid pages + /* + * we found a range of valid pages * terminated by a missing page... * bump index to the next page and continue on */ - last_pg++; - continue; + last_pg++; + continue; } /* * scan from the found invalid page looking for a valid @@ -708,51 +731,51 @@ vnode_pagein( * 'cluster_io' */ for (start_pg = last_pg; last_pg < pages_in_upl; last_pg++) { - if (( !ignore_valid_page_check && upl_valid_page(pl, last_pg)) || !upl_page_present(pl, last_pg)) - break; + if ((!ignore_valid_page_check && upl_valid_page(pl, last_pg)) || !upl_page_present(pl, last_pg)) { + break; + } } if (last_pg > start_pg) { - int xoff; - xsize = (last_pg - start_pg) * PAGE_SIZE; + int xoff; + xsize = (last_pg - start_pg) * PAGE_SIZE; xoff = start_pg * PAGE_SIZE; - if ( (error = VNOP_PAGEIN(vp, upl, (upl_offset_t) xoff, - (off_t)f_offset + xoff, - xsize, flags, vfs_context_current())) ) { - /* + if ((error = VNOP_PAGEIN(vp, upl, (upl_offset_t) xoff, + (off_t)f_offset + xoff, + xsize, flags, vfs_context_current()))) { + /* * Usually this UPL will be aborted/committed by the lower cluster layer. * * a) In the case of decmpfs, however, we may return an error (EAGAIN) to avoid - * a deadlock with another thread already inflating the file. + * a deadlock with another thread already inflating the file. * * b) In the case of content protection, EPERM is a valid error and we should respect it. * * In those cases, we must take care of our UPL at this layer itself. */ if (must_commit) { - if(error == EAGAIN) { - ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART); + if (error == EAGAIN) { + ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_RESTART); } - if(error == EPERM) { - ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); + if (error == EPERM) { + ubc_upl_abort_range(upl, (upl_offset_t) xoff, xsize, UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR); } } result = PAGER_ERROR; error = PAGER_ERROR; - } } - } + } out: - if (errorp) + if (errorp) { *errorp = result; + } - return (error); + return error; } void * upl_get_internal_page_list(upl_t upl) { - return(UPL_GET_INTERNAL_PAGE_LIST(upl)); - + return UPL_GET_INTERNAL_PAGE_LIST(upl); } diff --git a/bsd/vm/vnode_pager.h b/bsd/vm/vnode_pager.h index 17adbf1e7..920212c5c 100644 --- a/bsd/vm/vnode_pager.h +++ b/bsd/vm/vnode_pager.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,24 +22,24 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ -#ifndef _VNODE_PAGER_ -#define _VNODE_PAGER_ 1 +#ifndef _VNODE_PAGER_ +#define _VNODE_PAGER_ 1 #include #include #include -#ifdef KERNEL +#ifdef KERNEL #include #include #include @@ -68,30 +68,30 @@ * Pager file structure. One per swap file. */ typedef struct pager_file { - queue_chain_t pf_chain; /* link to other paging files */ - struct vnode *pf_vp; /* vnode of paging file */ - u_int pf_count; /* Number of vstruct using this file */ - u_char *pf_bmap; /* Map of used blocks */ - long pf_npgs; /* Size of file in pages */ - long pf_pfree; /* Number of unused pages */ - long pf_lowat; /* Low water page */ - long pf_hipage; /* Highest page allocated */ - long pf_hint; /* Lowest page unallocated */ - char *pf_name; /* Filename of this file */ - boolean_t pf_prefer; - int pf_index; /* index into the pager_file array */ - void * pf_lock; /* Lock for alloc and dealloc */ + queue_chain_t pf_chain; /* link to other paging files */ + struct vnode *pf_vp; /* vnode of paging file */ + u_int pf_count; /* Number of vstruct using this file */ + u_char *pf_bmap; /* Map of used blocks */ + long pf_npgs; /* Size of file in pages */ + long pf_pfree; /* Number of unused pages */ + long pf_lowat; /* Low water page */ + long pf_hipage; /* Highest page allocated */ + long pf_hint; /* Lowest page unallocated */ + char *pf_name; /* Filename of this file */ + boolean_t pf_prefer; + int pf_index; /* index into the pager_file array */ + void * pf_lock; /* Lock for alloc and dealloc */ } *pager_file_t; -#define PAGER_FILE_NULL (pager_file_t) 0 +#define PAGER_FILE_NULL (pager_file_t) 0 -#define MAXPAGERFILES 16 +#define MAXPAGERFILES 16 #define MAX_BACKING_STORE 100 struct bs_map { - struct vnode *vp; - void *bs; + struct vnode *vp; + void *bs; }; extern struct bs_map bs_port_table[]; @@ -100,33 +100,33 @@ extern struct bs_map bs_port_table[]; /* * Pager file data structures. */ -#define INDEX_NULL 0 +#define INDEX_NULL 0 typedef struct { - unsigned int index:8; /* paging file this block is in */ - unsigned int offset:24; /* page number where block resides */ + unsigned int index:8; /* paging file this block is in */ + unsigned int offset:24; /* page number where block resides */ } pf_entry; typedef enum { - IS_INODE, /* Local disk */ - IS_RNODE /* NFS */ - } vpager_fstype; + IS_INODE, /* Local disk */ + IS_RNODE /* NFS */ +} vpager_fstype; /* * Basic vnode pager structure. One per object, backing-store pair. */ typedef struct vstruct { - boolean_t is_device; /* Must be first - see vm_pager.h */ - pager_file_t vs_pf; /* Pager file this uses */ - pf_entry **vs_pmap; /* Map of pages into paging file */ + boolean_t is_device; /* Must be first - see vm_pager.h */ + pager_file_t vs_pf; /* Pager file this uses */ + pf_entry **vs_pmap; /* Map of pages into paging file */ unsigned int - /* boolean_t */ vs_swapfile:1; /* vnode is a swapfile */ - short vs_count; /* use count */ - int vs_size; /* size of this chunk in pages*/ - struct vnode *vs_vp; /* vnode to page to */ + /* boolean_t */ vs_swapfile:1; /* vnode is a swapfile */ + short vs_count; /* use count */ + int vs_size; /* size of this chunk in pages*/ + struct vnode *vs_vp; /* vnode to page to */ } *vnode_pager_t; -#define VNODE_PAGER_NULL ((vnode_pager_t) 0) +#define VNODE_PAGER_NULL ((vnode_pager_t) 0) -#endif /* KERNEL */ +#endif /* KERNEL */ -#endif /* _VNODE_PAGER_ */ +#endif /* _VNODE_PAGER_ */ diff --git a/config/Libkern.exports b/config/Libkern.exports index d0c0a5554..e5f047928 100644 --- a/config/Libkern.exports +++ b/config/Libkern.exports @@ -189,21 +189,6 @@ __ZN12OSSerializerC2EPK11OSMetaClass __ZN12OSSerializerC2Ev __ZN12OSSerializerD0Ev __ZN12OSSerializerD2Ev -__ZN12OSSymbolPool12insertSymbolEP8OSSymbol -__ZN12OSSymbolPool12removeSymbolEP8OSSymbol -__ZN12OSSymbolPool13initHashStateEv -__ZN12OSSymbolPool13nextHashStateEP17OSSymbolPoolState -__ZN12OSSymbolPool18reconstructSymbolsEv -__ZN12OSSymbolPool4initEv -__ZN12OSSymbolPool4log2Ej -__ZN12OSSymbolPool6exp2mlEj -__ZN12OSSymbolPoolC1EPKS_ -__ZN12OSSymbolPoolC2EPKS_ -__ZN12OSSymbolPoolD0Ev -__ZN12OSSymbolPoolD1Ev -__ZN12OSSymbolPoolD2Ev -__ZN12OSSymbolPooldlEPvm -__ZN12OSSymbolPoolnwEm __ZN15OSMetaClassBase12safeMetaCastEPKS_PK11OSMetaClass __ZN15OSMetaClassBase13checkTypeInstEPKS_S1_ __ZN15OSMetaClassBaseC2Ev @@ -374,6 +359,8 @@ __ZN8OSSymbol15initWithCStringEPKc __ZN8OSSymbol17withCStringNoCopyEPKc __ZN8OSSymbol18checkForPageUnloadEPvS0_ __ZN8OSSymbol21initWithCStringNoCopyEPKc +__ZN8OSSymbol23existingSymbolForStringEPK8OSString +__ZN8OSSymbol24existingSymbolForCStringEPKc __ZN8OSSymbol4freeEv __ZN8OSSymbol9MetaClassC1Ev __ZN8OSSymbol9MetaClassC2Ev @@ -459,7 +446,6 @@ __ZNK12OSOrderedSet9isEqualToEPKS_ __ZNK12OSSerializer12getMetaClassEv __ZNK12OSSerializer9MetaClass5allocEv __ZNK12OSSerializer9serializeEP11OSSerialize -__ZNK12OSSymbolPool10findSymbolEPKc __ZNK15OSMetaClassBase8metaCastEPK11OSMetaClass __ZNK15OSMetaClassBase8metaCastEPK8OSString __ZNK15OSMetaClassBase8metaCastEPK8OSSymbol @@ -564,7 +550,6 @@ __ZTV12OSCollection __ZTV12OSDictionary __ZTV12OSOrderedSet __ZTV12OSSerializer -__ZTV12OSSymbolPool __ZTV15OSMetaClassBase __ZTV15OSMetaClassMeta __ZTV20OSCollectionIterator @@ -695,7 +680,9 @@ _lck_spin_destroy _lck_spin_free _lck_spin_init _lck_spin_lock +_lck_spin_lock_grp _lck_spin_try_lock +_lck_spin_try_lock_grp _lck_spin_unlock _memcmp _memcpy diff --git a/config/MASTER b/config/MASTER index b3c36794a..a4b109d11 100644 --- a/config/MASTER +++ b/config/MASTER @@ -306,11 +306,6 @@ options CONFIG_IPC_TABLE_ENTRIES_STEPS=256 # 300714 entries # options CONFIG_NO_KPRINTF_STRINGS # -# -# use finer-grained lock groups for the proc subsystem -# -options CONFIG_FINE_LOCK_GROUPS # - # # configurable kernel - general switch to say we are building for an # embedded device @@ -471,6 +466,7 @@ pseudo-device random 1 init random_init pseudo-device dtrace 1 init dtrace_init # pseudo-device helper 1 init helper_init # pseudo-device lockstat 1 init lockstat_init # +pseudo-device lockprof 1 init lockprof_init # pseudo-device sdt 1 init sdt_init # pseudo-device systrace 1 init systrace_init # pseudo-device fbt 1 init fbt_init # @@ -615,6 +611,8 @@ options NO_KDEBUG # no kernel tracing # # options CONFIG_DTRACE # # +options LOCK_STATS # # + # kernel performance tracing options KPERF # options KPC # @@ -666,9 +664,10 @@ options CONFIG_TASK_MAX=1024 # options CONFIG_TASK_MAX=768 # options CONFIG_TASK_MAX=512 # -options CONFIG_ZONE_MAP_MIN=12582912 # -options CONFIG_ZONE_MAP_MIN=6291456 # -options CONFIG_ZONE_MAP_MIN=1048576 # +# +# Minimum zone map size: 115 MB +# +options CONFIG_ZONE_MAP_MIN=120586240 # # Sizes must be a power of two for the zhash to # be able to just mask off bits instead of mod @@ -754,6 +753,11 @@ options CONFIG_REQUIRES_U32_MUNGING # incoming U32 argument structures must be # options COPYOUT_SHIM # Shim for copyout memory analysis via kext # +# +# Enable hardware correlation of mach absolute time +# across intel/arm boundary +options CONFIG_MACH_BRIDGE_SEND_TIME # # +options CONFIG_MACH_BRIDGE_RECV_TIME # # # # Telemetry for 32-bit process launch diff --git a/config/MASTER.arm64 b/config/MASTER.arm64 index a90486db3..eadc388d6 100644 --- a/config/MASTER.arm64 +++ b/config/MASTER.arm64 @@ -46,8 +46,8 @@ # LIBKERN_DEBUG = [ LIBKERN_BASE iotracking ] # PERF_DBG_BASE = [ mach_kdp config_serial_kdp MONOTONIC_BASE kperf kpc ] # PERF_DBG_RELEASE=[ PERF_DBG_BASE ist_kdebug ] -# PERF_DBG_DEV = [ PERF_DBG_BASE config_dtrace zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] -# PERF_DBG_DEBUG = [ PERF_DBG_BASE config_dtrace zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] +# PERF_DBG_DEV = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] +# PERF_DBG_DEBUG = [ PERF_DBG_BASE config_dtrace lock_stats zleaks kdp_interactive_debugging alternate_debugger interrupt_masked_debug ] # MACH_BASE = [ mach slidable config_ecc_logging vc_progress_white mdebug ipc_debug importance_inheritance config_atm config_coalitions config_iosched config_library_validation config_sysdiagnose config_telemetry config_mach_bridge_recv_time config_quiesce_counter ] # MACH_RELEASE = [ MACH_BASE config_skip_precise_user_kernel_time debugger_for_zone_info ] # MACH_DEV = [ MACH_BASE task_zone_info config_io_accounting importance_trace config_ledger_interval_max ] diff --git a/config/MASTER.x86_64 b/config/MASTER.x86_64 index 7f128cc61..66e7f98de 100644 --- a/config/MASTER.x86_64 +++ b/config/MASTER.x86_64 @@ -44,7 +44,10 @@ # LIBKERN_RELEASE =[ LIBKERN_BASE ] # LIBKERN_DEV = [ LIBKERN_BASE iotracking ] # LIBKERN_DEBUG = [ LIBKERN_BASE iotracking ] -# PERF_DBG = [ config_dtrace mach_kdp config_serial_kdp kdp_interactive_debugging kperf kpc zleaks config_gzalloc MONOTONIC_BASE ] +# PERF_DBG_BASE = [ config_dtrace mach_kdp config_serial_kdp kdp_interactive_debugging kperf kpc zleaks config_gzalloc MONOTONIC_BASE ] +# PERF_DBG_RELEASE=[ PERF_DBG_BASE ] +# PERF_DBG_DEV =[ PERF_DBG_BASE lock_stats ] +# PERF_DBG_DEBUG = [ PERF_DBG_BASE lock_stats ] # MACH_BASE = [ mach config_kext_basement mdebug ipc_debug config_mca config_vmx config_mtrr config_lapic config_telemetry importance_inheritance config_atm config_coalitions hypervisor config_iosched config_sysdiagnose config_mach_bridge_send_time copyout_shim ] # MACH_RELEASE = [ MACH_BASE ] # MACH_DEV = [ MACH_BASE task_zone_info importance_trace config_ledger_interval_max ] @@ -53,11 +56,11 @@ # SCHED_RELEASE = [ SCHED_BASE ] # SCHED_DEV = [ SCHED_BASE ] # SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ] -# VM = [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap phantom_cache config_background_queue] +# VM = [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap config_background_queue] # SECURITY = [ config_macf config_audit config_csr ] -# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ] -# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG MACH_DEV SCHED_DEV VM SECURITY ] -# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING_DEBUG PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG MACH_DEBUG SCHED_DEBUG VM SECURITY ] +# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM SECURITY ] +# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM SECURITY ] +# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING_DEBUG PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM SECURITY ] # KASAN = [ DEVELOPMENT ] # ###################################################################### diff --git a/config/MasterVersion b/config/MasterVersion index 68608495c..c9bd63daf 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -18.2.0 +18.7.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/config/Private.arm.exports b/config/Private.arm.exports index 36db1f3f7..683c7e68f 100644 --- a/config/Private.arm.exports +++ b/config/Private.arm.exports @@ -21,3 +21,4 @@ _ml_static_mfree _sched_perfcontrol_register_callbacks _sched_perfcontrol_update_recommended_cores _PE_panic_debugging_enabled +_register_additional_panic_data_buffer diff --git a/config/Private.arm64.exports b/config/Private.arm64.exports index a9e2160a2..4b43941bc 100644 --- a/config/Private.arm64.exports +++ b/config/Private.arm64.exports @@ -30,6 +30,7 @@ _sched_perfcontrol_register_callbacks _sched_perfcontrol_update_recommended_cores _sched_perfcontrol_thread_group_recommend _sched_perfcontrol_update_callback_deadline +_thread_group_join_io_storage _ml_static_ptovirt _ml_static_mfree _ex_cb_register @@ -39,4 +40,7 @@ _pgtrace_stop _pgtrace_active _pgtrace_add_probe _pgtrace_clear_probe +_mach_bridge_recv_timestamps +_mach_bridge_init_timestamp _PE_panic_debugging_enabled +_register_additional_panic_data_buffer diff --git a/config/Private.exports b/config/Private.exports index 3e655ff10..9cf4a78f3 100644 --- a/config/Private.exports +++ b/config/Private.exports @@ -304,6 +304,11 @@ _ml_io_read16 _ml_io_read32 _ml_io_read64 _ml_io_read8 +_ml_io_write +_ml_io_write16 +_ml_io_write32 +_ml_io_write64 +_ml_io_write8 _mnl_instantiate _mnl_register _mnl_msg_alloc @@ -611,6 +616,7 @@ _zone_change _fs_buffer_cache_gc_register _fs_buffer_cache_gc_unregister _cp_key_store_action_for_volume +_mach_bridge_remote_time _Block_size __Block_extended_layout diff --git a/config/Private.x86_64.exports b/config/Private.x86_64.exports index 0ad58ec1a..92da71aa1 100644 --- a/config/Private.x86_64.exports +++ b/config/Private.x86_64.exports @@ -1,6 +1,7 @@ _IOGetBootKeyStoreData _IOGetAPFSKeyStoreData _IOSetAPFSKeyStoreData +__Z33IOSKCopyKextIdentifierWithAddressm __ZN14IOPMrootDomain20claimSystemWakeEventEP9IOServicejPKcP8OSObject __ZN14IOPMrootDomain20restartWithStackshotEv __ZN22IOInterruptEventSource7warmCPUEy @@ -23,6 +24,14 @@ _hv_set* _lapic_end_of_interrupt _lapic_get_cmci_vector _lapic_unmask_perfcnt_interrupt +_ml_port_io_read +_ml_port_io_read16 +_ml_port_io_read32 +_ml_port_io_read8 +_ml_port_io_write +_ml_port_io_write16 +_ml_port_io_write32 +_ml_port_io_write8 _mp_broadcast _mp_cpus_call _mp_cpus_call1 diff --git a/config/version.c b/config/version.c index 894ed9468..568b8becf 100644 --- a/config/version.c +++ b/config/version.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/iokit/.clang-format b/iokit/.clang-format deleted file mode 100644 index cd99c24e5..000000000 --- a/iokit/.clang-format +++ /dev/null @@ -1,30 +0,0 @@ -# See top level .clang-format for explanation of options -AlignEscapedNewlinesLeft: true -AlignTrailingComments: true -AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: true -AllowShortCaseLabelsOnASingleLine: true -AllowShortFunctionsOnASingleLine: None -AllowShortIfStatementsOnASingleLine: false -AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterDefinitionReturnType: false -AlwaysBreakBeforeMultilineStrings: true -BinPackArguments: true -BinPackParameters: false -BreakBeforeBinaryOperators: None -BreakBeforeBraces: Allman -ColumnLimit: 132 -IndentCaseLabels: false -IndentWidth: 4 -IndentWrappedFunctionNames: false -KeepEmptyLinesAtTheStartOfBlocks: false -PointerAlignment: Middle -SpaceAfterCStyleCast: false -SpaceBeforeAssignmentOperators: true -SpaceBeforeParens: ControlStatements -SpaceInEmptyParentheses: false -SpacesInCStyleCastParentheses: false -SpacesInParentheses: false -SpacesInSquareBrackets: false -TabWidth: 4 -UseTab: Never diff --git a/iokit/Examples/drvGenericInterruptController/GenericInterruptController.cpp b/iokit/Examples/drvGenericInterruptController/GenericInterruptController.cpp index ff4950a87..6b347efdd 100644 --- a/iokit/Examples/drvGenericInterruptController/GenericInterruptController.cpp +++ b/iokit/Examples/drvGenericInterruptController/GenericInterruptController.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -41,166 +41,183 @@ #define super IOInterruptController IODefineMetaClassAndStructors(GenericInterruptController, - IOInterruptController); + IOInterruptController); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool GenericInterruptController::start(IOService *provider) +bool +GenericInterruptController::start(IOService *provider) { - IOInterruptAction handler; - IOSymbol *interruptControllerName; - - // If needed call the parents start. - if (!super::start(provider)) - return false; - - // Map the device's memory and initalize its state. - - // For now you must allocate storage for the vectors. - // This will probably changed to something like: initVectors(numVectors). - // In the mean time something like this works well. + IOInterruptAction handler; + IOSymbol *interruptControllerName; + + // If needed call the parents start. + if (!super::start(provider)) { + return false; + } + + // Map the device's memory and initalize its state. + + // For now you must allocate storage for the vectors. + // This will probably changed to something like: initVectors(numVectors). + // In the mean time something like this works well. #if 0 - // Allocate the memory for the vectors. - vectors = (IOInterruptVector *)IOMalloc(numVectors * - sizeof(IOInterruptVector)); - if (vectors == NULL) return false; - bzero(vectors, numVectors * sizeof(IOInterruptVector)); - - // Allocate locks for the vectors. - for (cnt = 0; cnt < numVectors; cnt++) { - vectors[cnt].interruptLock = IOLockAlloc(); - if (vectors[cnt].interruptLock == NULL) { - for (cnt = 0; cnt < numVectors; cnt++) { - if (vectors[cnt].interruptLock != NULL) - IOLockFree(vectors[cnt].interruptLock); - } - } - } + // Allocate the memory for the vectors. + vectors = (IOInterruptVector *)IOMalloc(numVectors * + sizeof(IOInterruptVector)); + if (vectors == NULL) { + return false; + } + bzero(vectors, numVectors * sizeof(IOInterruptVector)); + + // Allocate locks for the vectors. + for (cnt = 0; cnt < numVectors; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numVectors; cnt++) { + if (vectors[cnt].interruptLock != NULL) { + IOLockFree(vectors[cnt].interruptLock); + } + } + } + } #endif - - // If you know that this interrupt controller is the primary - // interrupt controller, use this to set it nub properties properly. - // This may be done by the nub's creator. - getPlatform()->setCPUInterruptProperties(provider); - - // register the interrupt handler so it can receive interrupts. - handler = getInterruptHandlerAddress(); - provider->registerInterrupt(0, this, handler, 0); - - // Just like any interrupt source, you must enable it to receive interrupts. - provider->enableInterrupt(0); - - // Set interruptControllerName to the proper symbol. - //interruptControllerName = xxx; - - // Register this interrupt controller so clients can find it. - getPlatform()->registerInterruptController(interruptControllerName, this); - - // All done, so return true. - return true; + + // If you know that this interrupt controller is the primary + // interrupt controller, use this to set it nub properties properly. + // This may be done by the nub's creator. + getPlatform()->setCPUInterruptProperties(provider); + + // register the interrupt handler so it can receive interrupts. + handler = getInterruptHandlerAddress(); + provider->registerInterrupt(0, this, handler, 0); + + // Just like any interrupt source, you must enable it to receive interrupts. + provider->enableInterrupt(0); + + // Set interruptControllerName to the proper symbol. + //interruptControllerName = xxx; + + // Register this interrupt controller so clients can find it. + getPlatform()->registerInterruptController(interruptControllerName, this); + + // All done, so return true. + return true; } -IOReturn GenericInterruptController::getInterruptType(IOService *nub, - int source, - int *interruptType) +IOReturn +GenericInterruptController::getInterruptType(IOService *nub, + int source, + int *interruptType) { - if (interruptType == 0) return kIOReturnBadArgument; - - // Given the nub and source, set interruptType to level or edge. - - return kIOReturnSuccess; + if (interruptType == 0) { + return kIOReturnBadArgument; + } + + // Given the nub and source, set interruptType to level or edge. + + return kIOReturnSuccess; } // Sadly this just has to be replicated in every interrupt controller. -IOInterruptAction GenericInterruptController::getInterruptHandlerAddress(void) +IOInterruptAction +GenericInterruptController::getInterruptHandlerAddress(void) { - return (IOInterruptAction)handleInterrupt; + return (IOInterruptAction)handleInterrupt; } // Handle all current interrupts. -IOReturn GenericInterruptController::handleInterrupt(void * refCon, - IOService * nub, - int source) +IOReturn +GenericInterruptController::handleInterrupt(void * refCon, + IOService * nub, + int source) { - IOInterruptVector *vector; - int vectorNumber; - - while (1) { - // Get vectorNumber from hardware some how and clear the event. - - // Break if there are no more vectors to handle. - if (vectorNumber == 0/*kNoVector*/) break; - - // Get the vector's date from the controller's array. - vector = &vectors[vectorNumber]; - - // Set the vector as active. This store must compleat before - // moving on to prevent the disableInterrupt fuction from - // geting out of sync. - vector->interruptActive = 1; - //sync(); - //isync(); - - // If the vector is not disabled soft, handle it. - if (!vector->interruptDisabledSoft) { - // Prevent speculative exacution as needed on your processor. - //isync(); - - // Call the handler if it exists. - if (vector->interruptRegistered) { - vector->handler(vector->target, vector->refCon, - vector->nub, vector->source); - } - } else { - // Hard disable the vector if is was only soft disabled. - vector->interruptDisabledHard = 1; - disableVectorHard(vectorNumber, vector); - } - - // Done with this vector so, set it back to inactive. - vector->interruptActive = 0; - } - - return kIOReturnSuccess; + IOInterruptVector *vector; + int vectorNumber; + + while (1) { + // Get vectorNumber from hardware some how and clear the event. + + // Break if there are no more vectors to handle. + if (vectorNumber == 0 /*kNoVector*/) { + break; + } + + // Get the vector's date from the controller's array. + vector = &vectors[vectorNumber]; + + // Set the vector as active. This store must compleat before + // moving on to prevent the disableInterrupt fuction from + // geting out of sync. + vector->interruptActive = 1; + //sync(); + //isync(); + + // If the vector is not disabled soft, handle it. + if (!vector->interruptDisabledSoft) { + // Prevent speculative exacution as needed on your processor. + //isync(); + + // Call the handler if it exists. + if (vector->interruptRegistered) { + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + } + } else { + // Hard disable the vector if is was only soft disabled. + vector->interruptDisabledHard = 1; + disableVectorHard(vectorNumber, vector); + } + + // Done with this vector so, set it back to inactive. + vector->interruptActive = 0; + } + + return kIOReturnSuccess; } -bool GenericInterruptController::vectorCanBeShared(long vectorNumber, - IOInterruptVector *vector) +bool +GenericInterruptController::vectorCanBeShared(long vectorNumber, + IOInterruptVector *vector) { - // Given the vector number and the vector data, return if it can be shared. - return true; + // Given the vector number and the vector data, return if it can be shared. + return true; } -void GenericInterruptController::initVector(long vectorNumber, - IOInterruptVector *vector) +void +GenericInterruptController::initVector(long vectorNumber, + IOInterruptVector *vector) { - // Given the vector number and the vector data, - // get the hardware ready for the vector to generate interrupts. - // Make sure the vector is left disabled. + // Given the vector number and the vector data, + // get the hardware ready for the vector to generate interrupts. + // Make sure the vector is left disabled. } -void GenericInterruptController::disableVectorHard(long vectorNumber, - IOInterruptVector *vector) +void +GenericInterruptController::disableVectorHard(long vectorNumber, + IOInterruptVector *vector) { - // Given the vector number and the vector data, - // disable the vector at the hardware. + // Given the vector number and the vector data, + // disable the vector at the hardware. } -void GenericInterruptController::enableVector(long vectorNumber, - IOInterruptVector *vector) +void +GenericInterruptController::enableVector(long vectorNumber, + IOInterruptVector *vector) { - // Given the vector number and the vector data, - // enable the vector at the hardware. + // Given the vector number and the vector data, + // enable the vector at the hardware. } -void GenericInterruptController::causeVector(long vectorNumber, - IOInterruptVector *vector) +void +GenericInterruptController::causeVector(long vectorNumber, + IOInterruptVector *vector) { - // Given the vector number and the vector data, - // Set the vector pending and cause an interrupt at the parent controller. - - // cause the interrupt at the parent controller. Source is usually zero, - // but it could be different for your controller. - getPlatform()->causeInterrupt(0); + // Given the vector number and the vector data, + // Set the vector pending and cause an interrupt at the parent controller. + + // cause the interrupt at the parent controller. Source is usually zero, + // but it could be different for your controller. + getPlatform()->causeInterrupt(0); } diff --git a/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h b/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h index 52ded55ec..55151c37a 100644 --- a/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h +++ b/iokit/Examples/drvGenericInterruptController/GenericInterruptController.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -40,45 +40,45 @@ class GenericInterruptController : public IOInterruptController { - IODeclareDefaultStructors(GenericInterruptController); - + IODeclareDefaultStructors(GenericInterruptController); + public: - // There should be a method to start or init the controller. - // Its nature is up to you. - virtual bool start(IOService *provider); - - // Returns the type of a vector: level or edge. This will probably get - // replaced but a default method and a new method getVectorType. - virtual IOReturn getInterruptType(IOService *nub, int source, - int *interruptType); - - // Returns a function pointer for the interrupt handler. - // Sadly, egcs prevents this from being done by the base class. - virtual IOInterruptAction getInterruptHandlerAddress(void); - - // The actual interrupt handler. - virtual IOReturn handleInterrupt(void *refCon, - IOService *nub, int source); - - - // Should return true if this vector can be shared. - // The base class return false, so this method only need to be implemented - // if the controller needs to support shared interrupts. - // No other work is required to support shared interrupts. - virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector *vector); - - // Do any hardware initalization for this vector. Leave the vector - // hard disabled. - virtual void initVector(long vectorNumber, IOInterruptVector *vector); - - // Disable this vector at the hardware. - virtual void disableVectorHard(long vectorNumber, IOInterruptVector *vector); - - // Enable this vector at the hardware. - virtual void enableVector(long vectorNumber, IOInterruptVector *vector); - - // Cause an interrupt on this vector. - virtual void causeVector(long vectorNumber, IOInterruptVector *vector); +// There should be a method to start or init the controller. +// Its nature is up to you. + virtual bool start(IOService *provider); + +// Returns the type of a vector: level or edge. This will probably get +// replaced but a default method and a new method getVectorType. + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType); + +// Returns a function pointer for the interrupt handler. +// Sadly, egcs prevents this from being done by the base class. + virtual IOInterruptAction getInterruptHandlerAddress(void); + +// The actual interrupt handler. + virtual IOReturn handleInterrupt(void *refCon, + IOService *nub, int source); + + +// Should return true if this vector can be shared. +// The base class return false, so this method only need to be implemented +// if the controller needs to support shared interrupts. +// No other work is required to support shared interrupts. + virtual bool vectorCanBeShared(long vectorNumber, IOInterruptVector *vector); + +// Do any hardware initalization for this vector. Leave the vector +// hard disabled. + virtual void initVector(long vectorNumber, IOInterruptVector *vector); + +// Disable this vector at the hardware. + virtual void disableVectorHard(long vectorNumber, IOInterruptVector *vector); + +// Enable this vector at the hardware. + virtual void enableVector(long vectorNumber, IOInterruptVector *vector); + +// Cause an interrupt on this vector. + virtual void causeVector(long vectorNumber, IOInterruptVector *vector); }; #endif /* ! _IOKIT_GENERICINTERRUPTCONTROLLER_H */ diff --git a/iokit/Families/IONVRAM/IONVRAMController.cpp b/iokit/Families/IONVRAM/IONVRAMController.cpp index 4d8829507..338e6dcc6 100644 --- a/iokit/Families/IONVRAM/IONVRAMController.cpp +++ b/iokit/Families/IONVRAM/IONVRAMController.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,7 +38,8 @@ OSDefineAbstractStructors(IONVRAMController, IOService); // init // // **************************************************************************** -void IONVRAMController::registerService(IOOptionBits options) +void +IONVRAMController::registerService(IOOptionBits options) { super::registerService(options); @@ -50,6 +51,7 @@ void IONVRAMController::registerService(IOOptionBits options) // // // **************************************************************************** -void IONVRAMController::sync(void) +void +IONVRAMController::sync(void) { } diff --git a/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp b/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp index f0a274d9f..5fd5f6a8c 100644 --- a/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp +++ b/iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,10 +33,10 @@ static IOReturn IOWatchDogTimerSleepHandler(void *target, void *refCon, - UInt32 messageType, - IOService *provider, - void *messageArgument, - vm_size_t argSize); + UInt32 messageType, + IOService *provider, + void *messageArgument, + vm_size_t argSize); #define kWatchDogEnabledProperty "IOWatchDogEnabled" @@ -46,73 +46,87 @@ static IOReturn IOWatchDogTimerSleepHandler(void *target, void *refCon, OSDefineMetaClassAndAbstractStructors(IOWatchDogTimer, IOService); -OSMetaClassDefineReservedUnused(IOWatchDogTimer, 0); -OSMetaClassDefineReservedUnused(IOWatchDogTimer, 1); -OSMetaClassDefineReservedUnused(IOWatchDogTimer, 2); -OSMetaClassDefineReservedUnused(IOWatchDogTimer, 3); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 0); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 1); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 2); +OSMetaClassDefineReservedUnused(IOWatchDogTimer, 3); -bool IOWatchDogTimer::start(IOService *provider) +bool +IOWatchDogTimer::start(IOService *provider) { - if (!super::start(provider)) return false; - - notifier = registerSleepWakeInterest(IOWatchDogTimerSleepHandler, this); - if (notifier == 0) return false; - - setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); - setWatchDogTimer(0); - - registerService(); - - return true; + if (!super::start(provider)) { + return false; + } + + notifier = registerSleepWakeInterest(IOWatchDogTimerSleepHandler, this); + if (notifier == 0) { + return false; + } + + setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); + setWatchDogTimer(0); + + registerService(); + + return true; } -void IOWatchDogTimer::stop(IOService *provider) +void +IOWatchDogTimer::stop(IOService *provider) { - setWatchDogTimer(0); - notifier->remove(); + setWatchDogTimer(0); + notifier->remove(); } -IOReturn IOWatchDogTimer::setProperties(OSObject *properties) +IOReturn +IOWatchDogTimer::setProperties(OSObject *properties) { - OSNumber *theNumber; - UInt32 theValue; - IOReturn result; - - result = IOUserClient::clientHasPrivilege(current_task(), - kIOClientPrivilegeAdministrator); - if (result != kIOReturnSuccess) return kIOReturnNotPrivileged; - - theNumber = OSDynamicCast(OSNumber, properties); - if (theNumber == 0) return kIOReturnBadArgument; - - theValue = theNumber->unsigned32BitValue(); - if (theValue == 0) { - setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); - } else { - setProperty(kWatchDogEnabledProperty, kOSBooleanTrue); - } - - setWatchDogTimer(theValue); - - return kIOReturnSuccess; + OSNumber *theNumber; + UInt32 theValue; + IOReturn result; + + result = IOUserClient::clientHasPrivilege(current_task(), + kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) { + return kIOReturnNotPrivileged; + } + + theNumber = OSDynamicCast(OSNumber, properties); + if (theNumber == 0) { + return kIOReturnBadArgument; + } + + theValue = theNumber->unsigned32BitValue(); + if (theValue == 0) { + setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); + } else { + setProperty(kWatchDogEnabledProperty, kOSBooleanTrue); + } + + setWatchDogTimer(theValue); + + return kIOReturnSuccess; } -static IOReturn IOWatchDogTimerSleepHandler(void *target, void */*refCon*/, - UInt32 messageType, - IOService */*provider*/, - void *messageArgument, - vm_size_t /*argSize*/) +static IOReturn +IOWatchDogTimerSleepHandler(void *target, void */*refCon*/, + UInt32 messageType, + IOService */*provider*/, + void *messageArgument, + vm_size_t /*argSize*/) { - IOWatchDogTimer *watchDogTimer = (IOWatchDogTimer *)target; - sleepWakeNote *swNote = (sleepWakeNote *)messageArgument; - - if (messageType != kIOMessageSystemWillSleep) return kIOReturnUnsupported; - - watchDogTimer->setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); - watchDogTimer->setWatchDogTimer(0); - - swNote->returnValue = 0; - acknowledgeSleepWakeNotification(swNote->powerRef); - - return kIOReturnSuccess; + IOWatchDogTimer *watchDogTimer = (IOWatchDogTimer *)target; + sleepWakeNote *swNote = (sleepWakeNote *)messageArgument; + + if (messageType != kIOMessageSystemWillSleep) { + return kIOReturnUnsupported; + } + + watchDogTimer->setProperty(kWatchDogEnabledProperty, kOSBooleanFalse); + watchDogTimer->setWatchDogTimer(0); + + swNote->returnValue = 0; + acknowledgeSleepWakeNotification(swNote->powerRef); + + return kIOReturnSuccess; } diff --git a/iokit/IOKit/AppleKeyStoreInterface.h b/iokit/IOKit/AppleKeyStoreInterface.h index b5cb5775b..a329a42e2 100644 --- a/iokit/IOKit/AppleKeyStoreInterface.h +++ b/iokit/IOKit/AppleKeyStoreInterface.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,20 +33,18 @@ // from AppleKeyStore & CoreStorage // aka MAX_KEY_SIZE -#define AKS_MAX_KEY_SIZE 128 +#define AKS_MAX_KEY_SIZE 128 // aka rawKey -struct aks_raw_key_t -{ - uint32_t keybytecount; - uint8_t keybytes[AKS_MAX_KEY_SIZE]; +struct aks_raw_key_t { + uint32_t keybytecount; + uint8_t keybytes[AKS_MAX_KEY_SIZE]; }; // aka volumeKey -struct aks_volume_key_t -{ - uint32_t algorithm; - aks_raw_key_t key; +struct aks_volume_key_t { + uint32_t algorithm; + aks_raw_key_t key; }; // aka AKS_GETKEY diff --git a/iokit/IOKit/IOBSD.h b/iokit/IOKit/IOBSD.h index b72a4e8f5..5135d166a 100644 --- a/iokit/IOKit/IOBSD.h +++ b/iokit/IOKit/IOBSD.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOBSD_H @@ -52,12 +52,11 @@ extern "C" { struct IOPolledFileIOVars; struct mount; -enum -{ - kIOMountChangeMount = 0x00000101, - kIOMountChangeUnmount = 0x00000102, - kIOMountChangeWillResize = 0x00000201, - kIOMountChangeDidResize = 0x00000202, +enum{ + kIOMountChangeMount = 0x00000101, + kIOMountChangeUnmount = 0x00000102, + kIOMountChangeWillResize = 0x00000201, + kIOMountChangeDidResize = 0x00000202, }; extern void IOBSDMountChange(struct mount * mp, uint32_t op); extern boolean_t IOTaskHasEntitlement(task_t task, const char * entitlement); diff --git a/iokit/IOKit/IOBufferMemoryDescriptor.h b/iokit/IOKit/IOBufferMemoryDescriptor.h index 4b218498b..e025e01a3 100644 --- a/iokit/IOKit/IOBufferMemoryDescriptor.h +++ b/iokit/IOKit/IOBufferMemoryDescriptor.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOBUFFERMEMORYDESCRIPTOR_H @@ -31,249 +31,249 @@ #include enum { - kIOMemoryPhysicallyContiguous = 0x00000010, - kIOMemoryPageable = 0x00000020, - kIOMemoryPurgeable = 0x00000040, - kIOMemoryHostPhysicallyContiguous = 0x00000080, - kIOMemorySharingTypeMask = 0x000f0000, - kIOMemoryUnshared = 0x00000000, - kIOMemoryKernelUserShared = 0x00010000, - // shared IOMemoryDescriptor options for IOBufferMemoryDescriptor: - kIOBufferDescriptorMemoryFlags = kIOMemoryDirectionMask + kIOMemoryPhysicallyContiguous = 0x00000010, + kIOMemoryPageable = 0x00000020, + kIOMemoryPurgeable = 0x00000040, + kIOMemoryHostPhysicallyContiguous = 0x00000080, + kIOMemorySharingTypeMask = 0x000f0000, + kIOMemoryUnshared = 0x00000000, + kIOMemoryKernelUserShared = 0x00010000, + // shared IOMemoryDescriptor options for IOBufferMemoryDescriptor: + kIOBufferDescriptorMemoryFlags = kIOMemoryDirectionMask #ifdef XNU_KERNEL_PRIVATE - | kIOMemoryAutoPrepare + | kIOMemoryAutoPrepare #endif - | kIOMemoryThreadSafe - | kIOMemoryClearEncrypt - | kIOMemoryMapperNone - | kIOMemoryUseReserve + | kIOMemoryThreadSafe + | kIOMemoryClearEncrypt + | kIOMemoryMapperNone + | kIOMemoryUseReserve }; -#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_ 1 -#define _IOBUFFERMEMORYDESCRIPTOR_HOSTPHYSICALLYCONTIGUOUS_ 1 +#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_ 1 +#define _IOBUFFERMEMORYDESCRIPTOR_HOSTPHYSICALLYCONTIGUOUS_ 1 /*! - @class IOBufferMemoryDescriptor - @abstract Provides a simple memory descriptor that allocates its own buffer memory. -*/ + * @class IOBufferMemoryDescriptor + * @abstract Provides a simple memory descriptor that allocates its own buffer memory. + */ class IOBufferMemoryDescriptor : public IOGeneralMemoryDescriptor { - OSDeclareDefaultStructors(IOBufferMemoryDescriptor); + OSDeclareDefaultStructors(IOBufferMemoryDescriptor); private: /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of this class in the future. - */ - struct ExpansionData { - IOMemoryMap * map; - }; + * @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { + IOMemoryMap * map; + }; /*! @var reserved - Reserved for future use. (Internal use only) */ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData * reserved; + * Reserved for future use. (Internal use only) */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData * reserved; protected: - void * _buffer; - vm_size_t _capacity; - vm_offset_t _alignment; - IOOptionBits _options; + void * _buffer; + vm_size_t _capacity; + vm_offset_t _alignment; + IOOptionBits _options; private: - uintptr_t _internalReserved; - unsigned _internalFlags; - APPLE_KEXT_WSHADOW_POP; + uintptr_t _internalReserved; + unsigned _internalFlags; + APPLE_KEXT_WSHADOW_POP; private: #ifndef __LP64__ - virtual bool initWithOptions( - IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment, - task_t inTask) APPLE_KEXT_DEPRECATED; /* use withOptions() instead */ + virtual bool initWithOptions( + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment, + task_t inTask) APPLE_KEXT_DEPRECATED; /* use withOptions() instead */ #endif /* !__LP64__ */ public: - virtual bool initWithPhysicalMask( - task_t inTask, - IOOptionBits options, - mach_vm_size_t capacity, - mach_vm_address_t alignment, - mach_vm_address_t physicalMask); + virtual bool initWithPhysicalMask( + task_t inTask, + IOOptionBits options, + mach_vm_size_t capacity, + mach_vm_address_t alignment, + mach_vm_address_t physicalMask); #ifdef __LP64__ - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 0); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 1); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 0); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 1); #else /* !__LP64__ */ - OSMetaClassDeclareReservedUsed(IOBufferMemoryDescriptor, 0); - OSMetaClassDeclareReservedUsed(IOBufferMemoryDescriptor, 1); + OSMetaClassDeclareReservedUsed(IOBufferMemoryDescriptor, 0); + OSMetaClassDeclareReservedUsed(IOBufferMemoryDescriptor, 1); #endif /* !__LP64__ */ - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 2); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 3); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 4); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 5); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 6); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 7); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 8); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 9); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 10); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 11); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 12); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 13); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 14); - OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 15); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 2); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 3); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 4); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 5); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 6); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 7); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 8); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 9); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 10); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 11); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 12); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 13); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 14); + OSMetaClassDeclareReservedUnused(IOBufferMemoryDescriptor, 15); protected: - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; public: - /* - * withOptions: - * - * Returns a new IOBufferMemoryDescriptor with a buffer large enough to - * hold capacity bytes. The descriptor's length is initially set to the - * capacity. - */ +/* + * withOptions: + * + * Returns a new IOBufferMemoryDescriptor with a buffer large enough to + * hold capacity bytes. The descriptor's length is initially set to the + * capacity. + */ #ifndef __LP64__ - virtual bool initWithOptions( IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment) APPLE_KEXT_DEPRECATED; /* use withOptions() instead */ + virtual bool initWithOptions( IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment) APPLE_KEXT_DEPRECATED; /* use withOptions() instead */ #endif /* !__LP64__ */ - static IOBufferMemoryDescriptor * withOptions( IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment = 1); + static IOBufferMemoryDescriptor * withOptions( IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment = 1); /*! @function inTaskWithOptions - @abstract Creates a memory buffer with memory descriptor for that buffer. - @discussion Added in Mac OS X 10.2, this method allocates a memory buffer with a given size and alignment in the task's address space specified, and returns a memory descriptor instance representing the memory. It is recommended that memory allocated for I/O or sharing via mapping be created via IOBufferMemoryDescriptor. Options passed with the request specify the kind of memory to be allocated - pageablity and sharing are specified with option bits. This function may block and so should not be called from interrupt level or while a simple lock is held. - @param inTask The task the buffer will be allocated in. - @param options Options for the allocation:
- kIODirectionOut, kIODirectionIn - set the direction of the I/O transfer.
- kIOMemoryPhysicallyContiguous - pass to request memory be physically contiguous. This option is heavily discouraged. The request may fail if memory is fragmented, may cause large amounts of paging activity, and may take a very long time to execute.
- kIOMemoryPageable - pass to request memory be non-wired - the default for kernel allocated memory is wired.
- kIOMemoryPurgeable - pass to request memory that may later have its purgeable state set with IOMemoryDescriptor::setPurgeable. Only supported for kIOMemoryPageable allocations.
- kIOMemoryKernelUserShared - pass to request memory that will be mapped into both the kernel and client applications.
- kIOMapInhibitCache - allocate memory with inhibited cache setting.
- kIOMapWriteThruCache - allocate memory with writethru cache setting.
- kIOMapCopybackCache - allocate memory with copyback cache setting.
- kIOMapWriteCombineCache - allocate memory with writecombined cache setting. - @param capacity The number of bytes to allocate. - @param alignment The minimum required alignment of the buffer in bytes - 1 is the default for no required alignment. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. - @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ + * @abstract Creates a memory buffer with memory descriptor for that buffer. + * @discussion Added in Mac OS X 10.2, this method allocates a memory buffer with a given size and alignment in the task's address space specified, and returns a memory descriptor instance representing the memory. It is recommended that memory allocated for I/O or sharing via mapping be created via IOBufferMemoryDescriptor. Options passed with the request specify the kind of memory to be allocated - pageablity and sharing are specified with option bits. This function may block and so should not be called from interrupt level or while a simple lock is held. + * @param inTask The task the buffer will be allocated in. + * @param options Options for the allocation:
+ * kIODirectionOut, kIODirectionIn - set the direction of the I/O transfer.
+ * kIOMemoryPhysicallyContiguous - pass to request memory be physically contiguous. This option is heavily discouraged. The request may fail if memory is fragmented, may cause large amounts of paging activity, and may take a very long time to execute.
+ * kIOMemoryPageable - pass to request memory be non-wired - the default for kernel allocated memory is wired.
+ * kIOMemoryPurgeable - pass to request memory that may later have its purgeable state set with IOMemoryDescriptor::setPurgeable. Only supported for kIOMemoryPageable allocations.
+ * kIOMemoryKernelUserShared - pass to request memory that will be mapped into both the kernel and client applications.
+ * kIOMapInhibitCache - allocate memory with inhibited cache setting.
+ * kIOMapWriteThruCache - allocate memory with writethru cache setting.
+ * kIOMapCopybackCache - allocate memory with copyback cache setting.
+ * kIOMapWriteCombineCache - allocate memory with writecombined cache setting. + * @param capacity The number of bytes to allocate. + * @param alignment The minimum required alignment of the buffer in bytes - 1 is the default for no required alignment. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. + * @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ - static IOBufferMemoryDescriptor * inTaskWithOptions( - task_t inTask, - IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment = 1); + static IOBufferMemoryDescriptor * inTaskWithOptions( + task_t inTask, + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment = 1); /*! @function inTaskWithPhysicalMask - @abstract Creates a memory buffer with memory descriptor for that buffer. - @discussion Added in Mac OS X 10.5, this method allocates a memory buffer with a given size and alignment in the task's address space specified, and returns a memory descriptor instance representing the memory. It is recommended that memory allocated for I/O or sharing via mapping be created via IOBufferMemoryDescriptor. Options passed with the request specify the kind of memory to be allocated - pageablity and sharing are specified with option bits. This function may block and so should not be called from interrupt level or while a simple lock is held. - @param inTask The task the buffer will be mapped in. Pass NULL to create memory unmapped in any task (eg. for use as a DMA buffer). - @param options Options for the allocation:
- kIODirectionOut, kIODirectionIn - set the direction of the I/O transfer.
- kIOMemoryPhysicallyContiguous - pass to request memory be physically contiguous. This option is heavily discouraged. The request may fail if memory is fragmented, may cause large amounts of paging activity, and may take a very long time to execute.
- kIOMemoryKernelUserShared - pass to request memory that will be mapped into both the kernel and client applications.
- kIOMapInhibitCache - allocate memory with inhibited cache setting.
- kIOMapWriteThruCache - allocate memory with writethru cache setting.
- kIOMapCopybackCache - allocate memory with copyback cache setting.
- kIOMapWriteCombineCache - allocate memory with writecombined cache setting. - @param capacity The number of bytes to allocate. - @param physicalMask The buffer will be allocated with pages such that physical addresses will only have bits set present in physicalMask. For example, pass 0x00000000FFFFFFFFULL for a buffer to be accessed by hardware that has 32 address bits. - @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ + * @abstract Creates a memory buffer with memory descriptor for that buffer. + * @discussion Added in Mac OS X 10.5, this method allocates a memory buffer with a given size and alignment in the task's address space specified, and returns a memory descriptor instance representing the memory. It is recommended that memory allocated for I/O or sharing via mapping be created via IOBufferMemoryDescriptor. Options passed with the request specify the kind of memory to be allocated - pageablity and sharing are specified with option bits. This function may block and so should not be called from interrupt level or while a simple lock is held. + * @param inTask The task the buffer will be mapped in. Pass NULL to create memory unmapped in any task (eg. for use as a DMA buffer). + * @param options Options for the allocation:
+ * kIODirectionOut, kIODirectionIn - set the direction of the I/O transfer.
+ * kIOMemoryPhysicallyContiguous - pass to request memory be physically contiguous. This option is heavily discouraged. The request may fail if memory is fragmented, may cause large amounts of paging activity, and may take a very long time to execute.
+ * kIOMemoryKernelUserShared - pass to request memory that will be mapped into both the kernel and client applications.
+ * kIOMapInhibitCache - allocate memory with inhibited cache setting.
+ * kIOMapWriteThruCache - allocate memory with writethru cache setting.
+ * kIOMapCopybackCache - allocate memory with copyback cache setting.
+ * kIOMapWriteCombineCache - allocate memory with writecombined cache setting. + * @param capacity The number of bytes to allocate. + * @param physicalMask The buffer will be allocated with pages such that physical addresses will only have bits set present in physicalMask. For example, pass 0x00000000FFFFFFFFULL for a buffer to be accessed by hardware that has 32 address bits. + * @result Returns an instance of class IOBufferMemoryDescriptor to be released by the caller, which will free the memory desriptor and associated buffer. */ - static IOBufferMemoryDescriptor * inTaskWithPhysicalMask( - task_t inTask, - IOOptionBits options, - mach_vm_size_t capacity, - mach_vm_address_t physicalMask); + static IOBufferMemoryDescriptor * inTaskWithPhysicalMask( + task_t inTask, + IOOptionBits options, + mach_vm_size_t capacity, + mach_vm_address_t physicalMask); - /* - * withCapacity: - * - * Returns a new IOBufferMemoryDescriptor with a buffer large enough to - * hold capacity bytes. The descriptor's length is initially set to the - * capacity. - */ - static IOBufferMemoryDescriptor * withCapacity( - vm_size_t capacity, - IODirection withDirection, - bool withContiguousMemory = false); +/* + * withCapacity: + * + * Returns a new IOBufferMemoryDescriptor with a buffer large enough to + * hold capacity bytes. The descriptor's length is initially set to the + * capacity. + */ + static IOBufferMemoryDescriptor * withCapacity( + vm_size_t capacity, + IODirection withDirection, + bool withContiguousMemory = false); #ifndef __LP64__ - virtual bool initWithBytes(const void * bytes, - vm_size_t withLength, - IODirection withDirection, - bool withContiguousMemory = false) APPLE_KEXT_DEPRECATED; /* use withBytes() instead */ + virtual bool initWithBytes(const void * bytes, + vm_size_t withLength, + IODirection withDirection, + bool withContiguousMemory = false) APPLE_KEXT_DEPRECATED; /* use withBytes() instead */ #endif /* !__LP64__ */ - /* - * withBytes: - * - * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). - * The descriptor's length and capacity are set to the input buffer's size. - */ - static IOBufferMemoryDescriptor * withBytes( - const void * bytes, - vm_size_t withLength, - IODirection withDirection, - bool withContiguousMemory = false); +/* + * withBytes: + * + * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). + * The descriptor's length and capacity are set to the input buffer's size. + */ + static IOBufferMemoryDescriptor * withBytes( + const void * bytes, + vm_size_t withLength, + IODirection withDirection, + bool withContiguousMemory = false); - /* - * setLength: - * - * Change the buffer length of the memory descriptor. When a new buffer - * is created, the initial length of the buffer is set to be the same as - * the capacity. The length can be adjusted via setLength for a shorter - * transfer (there is no need to create more buffer descriptors when you - * can reuse an existing one, even for different transfer sizes). Note - * that the specified length must not exceed the capacity of the buffer. - */ - virtual void setLength(vm_size_t length); +/* + * setLength: + * + * Change the buffer length of the memory descriptor. When a new buffer + * is created, the initial length of the buffer is set to be the same as + * the capacity. The length can be adjusted via setLength for a shorter + * transfer (there is no need to create more buffer descriptors when you + * can reuse an existing one, even for different transfer sizes). Note + * that the specified length must not exceed the capacity of the buffer. + */ + virtual void setLength(vm_size_t length); - /* - * setDirection: - * - * Change the direction of the transfer. This method allows one to redirect - * the descriptor's transfer direction. This eliminates the need to destroy - * and create new buffers when different transfer directions are needed. - */ - virtual void setDirection(IODirection direction); +/* + * setDirection: + * + * Change the direction of the transfer. This method allows one to redirect + * the descriptor's transfer direction. This eliminates the need to destroy + * and create new buffers when different transfer directions are needed. + */ + virtual void setDirection(IODirection direction); - /* - * getCapacity: - * - * Get the buffer capacity - */ - virtual vm_size_t getCapacity() const; +/* + * getCapacity: + * + * Get the buffer capacity + */ + virtual vm_size_t getCapacity() const; - /* - * getBytesNoCopy: - * - * Return the virtual address of the beginning of the buffer - */ - virtual void *getBytesNoCopy(); +/* + * getBytesNoCopy: + * + * Return the virtual address of the beginning of the buffer + */ + virtual void *getBytesNoCopy(); - /* - * getBytesNoCopy: - * - * Return the virtual address of an offset from the beginning of the buffer - */ - virtual void *getBytesNoCopy(vm_size_t start, vm_size_t withLength); +/* + * getBytesNoCopy: + * + * Return the virtual address of an offset from the beginning of the buffer + */ + virtual void *getBytesNoCopy(vm_size_t start, vm_size_t withLength); - /* - * appendBytes: - * - * Add some data to the end of the buffer. This method automatically - * maintains the memory descriptor buffer length. Note that appendBytes - * will not copy past the end of the memory descriptor's current capacity. - */ - virtual bool appendBytes(const void *bytes, vm_size_t withLength); +/* + * appendBytes: + * + * Add some data to the end of the buffer. This method automatically + * maintains the memory descriptor buffer length. Note that appendBytes + * will not copy past the end of the memory descriptor's current capacity. + */ + virtual bool appendBytes(const void *bytes, vm_size_t withLength); #ifndef __LP64__ - virtual void * getVirtualSegment(IOByteCount offset, - IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; /* use getBytesNoCopy() instead */ + virtual void * getVirtualSegment(IOByteCount offset, + IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; /* use getBytesNoCopy() instead */ #endif /* !__LP64__ */ }; diff --git a/iokit/IOKit/IOCPU.h b/iokit/IOKit/IOCPU.h index 7b1e219c5..b1e6ca63b 100644 --- a/iokit/IOKit/IOCPU.h +++ b/iokit/IOKit/IOCPU.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -44,65 +44,65 @@ extern "C" { #include enum { - kIOCPUStateUnregistered = 0, - kIOCPUStateUninitalized, - kIOCPUStateStopped, - kIOCPUStateRunning, - kIOCPUStateCount + kIOCPUStateUnregistered = 0, + kIOCPUStateUninitalized, + kIOCPUStateStopped, + kIOCPUStateRunning, + kIOCPUStateCount }; class IOCPU : public IOService { - OSDeclareAbstractStructors(IOCPU); - + OSDeclareAbstractStructors(IOCPU); + private: - OSArray *_cpuGroup; - UInt32 _cpuNumber; - UInt32 _cpuState; - + OSArray *_cpuGroup; + UInt32 _cpuNumber; + UInt32 _cpuState; + protected: - IOService *cpuNub; - processor_t machProcessor; - ipi_handler_t ipi_handler; + IOService *cpuNub; + processor_t machProcessor; + ipi_handler_t ipi_handler; + + struct ExpansionData { }; + ExpansionData *iocpu_reserved; - struct ExpansionData { }; - ExpansionData *iocpu_reserved; + virtual void setCPUNumber(UInt32 cpuNumber); + virtual void setCPUState(UInt32 cpuState); - virtual void setCPUNumber(UInt32 cpuNumber); - virtual void setCPUState(UInt32 cpuState); - public: - virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; - virtual OSObject *getProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; - virtual bool setProperty(const OSSymbol *aKey, OSObject *anObject) APPLE_KEXT_OVERRIDE; - virtual bool serializeProperties(OSSerialize *serialize) const APPLE_KEXT_OVERRIDE; - virtual IOReturn setProperties(OSObject *properties) APPLE_KEXT_OVERRIDE; - virtual void initCPU(bool boot) = 0; - virtual void quiesceCPU(void) = 0; - virtual kern_return_t startCPU(vm_offset_t start_paddr, - vm_offset_t arg_paddr) = 0; - virtual void haltCPU(void) = 0; - virtual void signalCPU(IOCPU *target); - virtual void signalCPUDeferred(IOCPU * target); - virtual void signalCPUCancel(IOCPU * target); - virtual void enableCPUTimeBase(bool enable); - - virtual UInt32 getCPUNumber(void); - virtual UInt32 getCPUState(void); - virtual OSArray *getCPUGroup(void); - virtual UInt32 getCPUGroupSize(void); - virtual processor_t getMachProcessor(void); - - virtual const OSSymbol *getCPUName(void) = 0; - - OSMetaClassDeclareReservedUnused(IOCPU, 0); - OSMetaClassDeclareReservedUnused(IOCPU, 1); - OSMetaClassDeclareReservedUnused(IOCPU, 2); - OSMetaClassDeclareReservedUnused(IOCPU, 3); - OSMetaClassDeclareReservedUnused(IOCPU, 4); - OSMetaClassDeclareReservedUnused(IOCPU, 5); - OSMetaClassDeclareReservedUnused(IOCPU, 6); - OSMetaClassDeclareReservedUnused(IOCPU, 7); + virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; + virtual OSObject *getProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; + virtual bool setProperty(const OSSymbol *aKey, OSObject *anObject) APPLE_KEXT_OVERRIDE; + virtual bool serializeProperties(OSSerialize *serialize) const APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties(OSObject *properties) APPLE_KEXT_OVERRIDE; + virtual void initCPU(bool boot) = 0; + virtual void quiesceCPU(void) = 0; + virtual kern_return_t startCPU(vm_offset_t start_paddr, + vm_offset_t arg_paddr) = 0; + virtual void haltCPU(void) = 0; + virtual void signalCPU(IOCPU *target); + virtual void signalCPUDeferred(IOCPU * target); + virtual void signalCPUCancel(IOCPU * target); + virtual void enableCPUTimeBase(bool enable); + + virtual UInt32 getCPUNumber(void); + virtual UInt32 getCPUState(void); + virtual OSArray *getCPUGroup(void); + virtual UInt32 getCPUGroupSize(void); + virtual processor_t getMachProcessor(void); + + virtual const OSSymbol *getCPUName(void) = 0; + + OSMetaClassDeclareReservedUnused(IOCPU, 0); + OSMetaClassDeclareReservedUnused(IOCPU, 1); + OSMetaClassDeclareReservedUnused(IOCPU, 2); + OSMetaClassDeclareReservedUnused(IOCPU, 3); + OSMetaClassDeclareReservedUnused(IOCPU, 4); + OSMetaClassDeclareReservedUnused(IOCPU, 5); + OSMetaClassDeclareReservedUnused(IOCPU, 6); + OSMetaClassDeclareReservedUnused(IOCPU, 7); }; void IOCPUSleepKernel(void); @@ -114,46 +114,46 @@ extern "C" kern_return_t IOCPURunPlatformPanicSyncAction(void *addr, uint32_t of class IOCPUInterruptController : public IOInterruptController { - OSDeclareDefaultStructors(IOCPUInterruptController); - + OSDeclareDefaultStructors(IOCPUInterruptController); + private: - int enabledCPUs; - -protected: - int numCPUs; - int numSources; + int enabledCPUs; + +protected: + int numCPUs; + int numSources; - struct ExpansionData { }; - ExpansionData *iocpuic_reserved; + struct ExpansionData { }; + ExpansionData *iocpuic_reserved; public: - virtual IOReturn initCPUInterruptController(int sources); - virtual void registerCPUInterruptController(void); - virtual void setCPUInterruptProperties(IOService *service); - virtual void enableCPUInterrupt(IOCPU *cpu); - - virtual IOReturn registerInterrupt(IOService *nub, int source, - void *target, - IOInterruptHandler handler, - void *refCon) APPLE_KEXT_OVERRIDE; - - virtual IOReturn getInterruptType(IOService *nub, int source, - int *interruptType) APPLE_KEXT_OVERRIDE; - - virtual IOReturn enableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; - virtual IOReturn disableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; - virtual IOReturn causeInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; - - virtual IOReturn handleInterrupt(void *refCon, IOService *nub, - int source) APPLE_KEXT_OVERRIDE; - - virtual IOReturn initCPUInterruptController(int sources, int cpus); - - OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 1); - OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 2); - OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 3); - OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 4); - OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 5); + virtual IOReturn initCPUInterruptController(int sources); + virtual void registerCPUInterruptController(void); + virtual void setCPUInterruptProperties(IOService *service); + virtual void enableCPUInterrupt(IOCPU *cpu); + + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon) APPLE_KEXT_OVERRIDE; + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType) APPLE_KEXT_OVERRIDE; + + virtual IOReturn enableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + virtual IOReturn disableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + virtual IOReturn causeInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, + int source) APPLE_KEXT_OVERRIDE; + + virtual IOReturn initCPUInterruptController(int sources, int cpus); + + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 1); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 2); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 3); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 4); + OSMetaClassDeclareReservedUnused(IOCPUInterruptController, 5); }; #endif /* ! _IOKIT_CPU_H */ diff --git a/iokit/IOKit/IOCatalogue.h b/iokit/IOKit/IOCatalogue.h index 0f1a8bdb9..a9f7e3b44 100644 --- a/iokit/IOKit/IOCatalogue.h +++ b/iokit/IOKit/IOCatalogue.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Inc. All rights reserved. + * Copyright (c) 1998 Apple Inc. All rights reserved. * * HISTORY * @@ -48,176 +48,176 @@ class IOService; /*! - @class IOCatalogue - @abstract In-kernel database for IOKit driver personalities. - @discussion The IOCatalogue is a database which contains all IOKit driver personalities. IOService uses this resource when matching devices to their associated drivers. -*/ + * @class IOCatalogue + * @abstract In-kernel database for IOKit driver personalities. + * @discussion The IOCatalogue is a database which contains all IOKit driver personalities. IOService uses this resource when matching devices to their associated drivers. + */ class IOCatalogue : public OSObject { - OSDeclareDefaultStructors(IOCatalogue) - + OSDeclareDefaultStructors(IOCatalogue) + private: - IORWLock * lock; - SInt32 generation; - OSDictionary * personalities; - OSArray * arrayForPersonality(OSDictionary * dict); - void addPersonality(OSDictionary * dict); + IORWLock * lock; + SInt32 generation; + OSDictionary * personalities; + OSArray * arrayForPersonality(OSDictionary * dict); + void addPersonality(OSDictionary * dict); public: - /*! - @function initialize - @abstract Creates and initializes the database object and poputates it with in-kernel driver personalities. - */ - static void initialize( void ); - - /*! - @function init - @abstract Initializes the database object. - @param initArray The initial array of driver personalities to populate the database. - */ - bool init( OSArray * initArray ); - - /*! - @function free - @abstract Cleans up the database and deallocates memory allocated at initialization. This is never called in normal operation of the system. - */ - void free( void ) APPLE_KEXT_OVERRIDE; - - /*! - @function findDrivers - @abstract This is the primary entry point for IOService. - @param service The service - @param generationCount Returns a reference to the generation count of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. - @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. - */ - OSOrderedSet * findDrivers( IOService * service, SInt32 * generationCount ); - - /*! - @function findDrivers - @abstract A more general purpose interface which allows one to retreive driver personalities based the intersection of the 'matching' dictionary and the personality's own property list. - @param matching A dictionary containing only keys and values which are to be used for matching. For example, a matching dictionary containing 'IOProviderClass'='IOPCIDevice' will return all personalities with an IOProviderClass key and a value of IOPCIDevice. - @param generationCount Returns a reference to the current generation of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. - @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. - */ - OSOrderedSet * findDrivers( OSDictionary * matching, SInt32 * generationCount ); - - /*! - @function addDrivers - @abstract Adds an array of driver personalities to the database. - @param array Array of driver personalities to be added to the database. - @param doNubMatching Start matching process after personalities have been added. - @result Returns true if driver personality was added to the database successfully. Failure is due to a memory allocation failure. - */ - bool addDrivers( OSArray * array, bool doNubMatching = true ); - - /*! - @function removeDrivers - @abstract Remove driver personalities from the database based on matching information provided. - @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will remove all personalities which have the key 'IOProviderClass' equal to 'IOPCIDevice'. - @param doNubMatching Start matching process after personalities have been removed. Matching criteria is based on IOProviderClass of those personalities which were removed. This is to allow drivers which haven't been matched to match against NUB's which were blocked by the previous personalities. - @result Returns true if personality was removed successfully. Failure is due to a memory allocation failure. - */ - bool removeDrivers( OSDictionary * matching, bool doNubMatching = true ); - - /*! - @function getGenerationCount - @abstract Get the current generation count of the database. - */ - SInt32 getGenerationCount( void ) const; - - /*! - @function isModuleLoaded - @abstract Reports if a kernel module has been loaded. - @param moduleName Name of the module. - @result Returns true if the associated kernel module has been loaded into the kernel. - */ - bool isModuleLoaded( OSString * moduleName ) const; - - /*! - @function isModuleLoaded - @abstract Reports if a kernel module has been loaded. - @param moduleName Name of the module. - @result Returns true if the associated kernel module has been loaded into the kernel. - */ - bool isModuleLoaded( const char * moduleName ) const; - - /*! - @function isModuleLoaded - @abstract Reports if a kernel module has been loaded for a particular personality. - @param driver A driver personality's property list. - @result Returns true if the associated kernel module has been loaded into the kernel for a particular driver personality on which it depends. - */ - bool isModuleLoaded( OSDictionary * driver ) const; - - /*! - @function moduleHasLoaded - @abstract Callback function called after a IOKit dependent kernel module is loaded. - @param name Name of the kernel module. - */ - void moduleHasLoaded( OSString * name ); - - /*! - @function moduleHasLoaded - @abstract Callback function called after a IOKit dependent kernel module is loaded. - @param name Name of the kernel module. - */ - void moduleHasLoaded( const char * name ); - - /*! - @function terminateDrivers - @abstract Terminates all instances of a driver which match the contents of the matching dictionary. Does not unload module. - @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will cause termination for all instances whose personalities have the key 'IOProviderClass' equal to 'IOPCIDevice'. - */ - IOReturn terminateDrivers( OSDictionary * matching ); - - /*! - @function terminateDriversForModule - @abstract Terminates all instances of a driver which depends on a particular module and unloads the module. - @param moduleName Name of the module which is used to determine which driver instances to terminate and unload. - @param unload Flag to cause the actual unloading of the module. - */ - IOReturn terminateDriversForModule( OSString * moduleName, bool unload = true); - - /*! - @function terminateDriversForModule - @abstract Terminates all instances of a driver which depends on a particular module and unloads the module. - @param moduleName Name of the module which is used to determine which driver instances to terminate and unload. - @param unload Flag to cause the actual unloading of the module. - */ - IOReturn terminateDriversForModule( const char * moduleName, bool unload = true); - - /*! - @function startMatching - @abstract Starts an IOService matching thread where matching keys and values are provided by the matching dictionary. - @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will start matching for all personalities which have the key 'IOProviderClass' equal to 'IOPCIDevice'. - */ - bool startMatching( OSDictionary * matching ); - - /*! - @function reset - @abstract Return the Catalogue to its initial state. - @discussion - Should only be used by kextd just before it sends all kext personalities down during a rescan. - */ - void reset(void); - - /*! - @function resetAndAddDrivers - @abstract Replace personalities in IOCatalog with those provided. - @discussion - Resets the catalogue with a new set of drivers, preserving matching originals to keep wired memory usage down. - */ - bool resetAndAddDrivers(OSArray * drivers, bool doNubMatching = true); - - /*! - @function serialize - @abstract Serializes the catalog for transport to the user. - @param s The serializer object. - @result Returns false if unable to serialize database, most likely due to memory shortage. - */ - virtual bool serialize(OSSerialize * s) const APPLE_KEXT_OVERRIDE; - - bool serializeData(IOOptionBits kind, OSSerialize * s) const; +/*! + * @function initialize + * @abstract Creates and initializes the database object and poputates it with in-kernel driver personalities. + */ + static void initialize( void ); + +/*! + * @function init + * @abstract Initializes the database object. + * @param initArray The initial array of driver personalities to populate the database. + */ + bool init( OSArray * initArray ); + +/*! + * @function free + * @abstract Cleans up the database and deallocates memory allocated at initialization. This is never called in normal operation of the system. + */ + void free( void ) APPLE_KEXT_OVERRIDE; + +/*! + * @function findDrivers + * @abstract This is the primary entry point for IOService. + * @param service The service + * @param generationCount Returns a reference to the generation count of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. + * @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. + */ + OSOrderedSet * findDrivers( IOService * service, SInt32 * generationCount ); + +/*! + * @function findDrivers + * @abstract A more general purpose interface which allows one to retreive driver personalities based the intersection of the 'matching' dictionary and the personality's own property list. + * @param matching A dictionary containing only keys and values which are to be used for matching. For example, a matching dictionary containing 'IOProviderClass'='IOPCIDevice' will return all personalities with an IOProviderClass key and a value of IOPCIDevice. + * @param generationCount Returns a reference to the current generation of the database. The generation count increases only when personalities are added to the database *and* IOService matching has been initiated. + * @result Returns an ordered set of driver personalities ranked on probe-scores. The ordered set must be released by the receiver. + */ + OSOrderedSet * findDrivers( OSDictionary * matching, SInt32 * generationCount ); + +/*! + * @function addDrivers + * @abstract Adds an array of driver personalities to the database. + * @param array Array of driver personalities to be added to the database. + * @param doNubMatching Start matching process after personalities have been added. + * @result Returns true if driver personality was added to the database successfully. Failure is due to a memory allocation failure. + */ + bool addDrivers( OSArray * array, bool doNubMatching = true ); + +/*! + * @function removeDrivers + * @abstract Remove driver personalities from the database based on matching information provided. + * @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will remove all personalities which have the key 'IOProviderClass' equal to 'IOPCIDevice'. + * @param doNubMatching Start matching process after personalities have been removed. Matching criteria is based on IOProviderClass of those personalities which were removed. This is to allow drivers which haven't been matched to match against NUB's which were blocked by the previous personalities. + * @result Returns true if personality was removed successfully. Failure is due to a memory allocation failure. + */ + bool removeDrivers( OSDictionary * matching, bool doNubMatching = true ); + +/*! + * @function getGenerationCount + * @abstract Get the current generation count of the database. + */ + SInt32 getGenerationCount( void ) const; + +/*! + * @function isModuleLoaded + * @abstract Reports if a kernel module has been loaded. + * @param moduleName Name of the module. + * @result Returns true if the associated kernel module has been loaded into the kernel. + */ + bool isModuleLoaded( OSString * moduleName ) const; + +/*! + * @function isModuleLoaded + * @abstract Reports if a kernel module has been loaded. + * @param moduleName Name of the module. + * @result Returns true if the associated kernel module has been loaded into the kernel. + */ + bool isModuleLoaded( const char * moduleName ) const; + +/*! + * @function isModuleLoaded + * @abstract Reports if a kernel module has been loaded for a particular personality. + * @param driver A driver personality's property list. + * @result Returns true if the associated kernel module has been loaded into the kernel for a particular driver personality on which it depends. + */ + bool isModuleLoaded( OSDictionary * driver ) const; + +/*! + * @function moduleHasLoaded + * @abstract Callback function called after a IOKit dependent kernel module is loaded. + * @param name Name of the kernel module. + */ + void moduleHasLoaded( OSString * name ); + +/*! + * @function moduleHasLoaded + * @abstract Callback function called after a IOKit dependent kernel module is loaded. + * @param name Name of the kernel module. + */ + void moduleHasLoaded( const char * name ); + +/*! + * @function terminateDrivers + * @abstract Terminates all instances of a driver which match the contents of the matching dictionary. Does not unload module. + * @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will cause termination for all instances whose personalities have the key 'IOProviderClass' equal to 'IOPCIDevice'. + */ + IOReturn terminateDrivers( OSDictionary * matching ); + +/*! + * @function terminateDriversForModule + * @abstract Terminates all instances of a driver which depends on a particular module and unloads the module. + * @param moduleName Name of the module which is used to determine which driver instances to terminate and unload. + * @param unload Flag to cause the actual unloading of the module. + */ + IOReturn terminateDriversForModule( OSString * moduleName, bool unload = true); + +/*! + * @function terminateDriversForModule + * @abstract Terminates all instances of a driver which depends on a particular module and unloads the module. + * @param moduleName Name of the module which is used to determine which driver instances to terminate and unload. + * @param unload Flag to cause the actual unloading of the module. + */ + IOReturn terminateDriversForModule( const char * moduleName, bool unload = true); + +/*! + * @function startMatching + * @abstract Starts an IOService matching thread where matching keys and values are provided by the matching dictionary. + * @param matching A dictionary whose keys and values are used for matching personalities in the database. For example, a matching dictionary containing a 'IOProviderClass' key with the value 'IOPCIDevice' will start matching for all personalities which have the key 'IOProviderClass' equal to 'IOPCIDevice'. + */ + bool startMatching( OSDictionary * matching ); + +/*! + * @function reset + * @abstract Return the Catalogue to its initial state. + * @discussion + * Should only be used by kextd just before it sends all kext personalities down during a rescan. + */ + void reset(void); + +/*! + * @function resetAndAddDrivers + * @abstract Replace personalities in IOCatalog with those provided. + * @discussion + * Resets the catalogue with a new set of drivers, preserving matching originals to keep wired memory usage down. + */ + bool resetAndAddDrivers(OSArray * drivers, bool doNubMatching = true); + +/*! + * @function serialize + * @abstract Serializes the catalog for transport to the user. + * @param s The serializer object. + * @result Returns false if unable to serialize database, most likely due to memory shortage. + */ + virtual bool serialize(OSSerialize * s) const APPLE_KEXT_OVERRIDE; + + bool serializeData(IOOptionBits kind, OSSerialize * s) const; /* This stuff is no longer used at all we keep it around for i386 * binary compatibility only. Symbols are no longer exported. @@ -225,15 +225,15 @@ public: private: - /*! - @function unloadModule - @abstract Unloads the reqested module if no driver instances are currently depending on it. - @param moduleName An OSString containing the name of the module to unload. - */ - IOReturn unloadModule( OSString * moduleName ) const; +/*! + * @function unloadModule + * @abstract Unloads the reqested module if no driver instances are currently depending on it. + * @param moduleName An OSString containing the name of the module to unload. + */ + IOReturn unloadModule( OSString * moduleName ) const; - IOReturn _removeDrivers(OSDictionary * matching); - IOReturn _terminateDrivers(OSDictionary * matching); + IOReturn _removeDrivers(OSDictionary * matching); + IOReturn _terminateDrivers(OSDictionary * matching); }; extern const OSSymbol * gIOClassKey; diff --git a/iokit/IOKit/IOCommand.h b/iokit/IOKit/IOCommand.h index fc77b627f..9c3e6c06b 100644 --- a/iokit/IOKit/IOCommand.h +++ b/iokit/IOKit/IOCommand.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -65,19 +65,19 @@ * from a device driver to a controller. All controller commands (e.g. IOATACommand) * should inherit from this class. */ - + class IOCommand : public OSObject { - OSDeclareDefaultStructors(IOCommand) - + OSDeclareDefaultStructors(IOCommand) + public: - virtual bool init(void) APPLE_KEXT_OVERRIDE; - + virtual bool init(void) APPLE_KEXT_OVERRIDE; + /*! @var fCommandChain - This variable is used by the current 'owner' to queue the command. During the life cycle of a command it moves through a series of queues. This is the queue pointer for it. Only valid while 'ownership' is clear. For instance a IOCommandPool uses this pointer to maintain its list of free commands. May be manipulated using the kern/queue.h macros */ - queue_chain_t fCommandChain; /* used to queue commands */ + * This variable is used by the current 'owner' to queue the command. During the life cycle of a command it moves through a series of queues. This is the queue pointer for it. Only valid while 'ownership' is clear. For instance a IOCommandPool uses this pointer to maintain its list of free commands. May be manipulated using the kern/queue.h macros */ + queue_chain_t fCommandChain; /* used to queue commands */ }; #endif /* defined(KERNEL) && defined(__cplusplus) */ -#endif /* _IOKIT_IO_COMMAND_H_ */ +#endif /* _IOKIT_IO_COMMAND_H_ */ diff --git a/iokit/IOKit/IOCommandGate.h b/iokit/IOKit/IOCommandGate.h index 431f179d4..2fa36e137 100644 --- a/iokit/IOKit/IOCommandGate.h +++ b/iokit/IOKit/IOCommandGate.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*[ - 1999-8-10 Godfrey van der Linden(gvdl) - Created. -]*/ + * 1999-8-10 Godfrey van der Linden(gvdl) + * Created. + * ]*/ /*! @language embedded-c++ */ #ifndef _IOKIT_IOCOMMANDGATE_H @@ -37,218 +37,218 @@ #include /*! - @class IOCommandGate : public IOEventSource - @abstract Single-threaded work loop client request mechanism. - @discussion An IOCommandGate instance is an extremely lightweight mechanism -that executes an action on the driver's work loop. Although the code does not -technically execute on the work loop itself, a single-threaded work loop semantic -is maintained for this event source using the work loop gate. The command gate -tests for a potential self dead lock by checking if the runCommand request is -made from the work loop's thread, it doesn't check for a mutual dead lock though -where a pair of work loop's dead lock each other. -

- The IOCommandGate is a lighter weight version of the IOCommandQueue and -should be used in preference. Generally use a command queue whenever you need a -client to submit a request to a work loop. A typical command gate action would -check if the hardware is active, if so it will add the request to a pending -queue internal to the device or the device's family. Otherwise if the hardware -is inactive then this request can be acted upon immediately. -

- CAUTION: The runAction, runCommand, and attemptCommand functions cannot be called from an interrupt context. - -*/ + * @class IOCommandGate : public IOEventSource + * @abstract Single-threaded work loop client request mechanism. + * @discussion An IOCommandGate instance is an extremely lightweight mechanism + * that executes an action on the driver's work loop. Although the code does not + * technically execute on the work loop itself, a single-threaded work loop semantic + * is maintained for this event source using the work loop gate. The command gate + * tests for a potential self dead lock by checking if the runCommand request is + * made from the work loop's thread, it doesn't check for a mutual dead lock though + * where a pair of work loop's dead lock each other. + *

+ * The IOCommandGate is a lighter weight version of the IOCommandQueue and + * should be used in preference. Generally use a command queue whenever you need a + * client to submit a request to a work loop. A typical command gate action would + * check if the hardware is active, if so it will add the request to a pending + * queue internal to the device or the device's family. Otherwise if the hardware + * is inactive then this request can be acted upon immediately. + *

+ * CAUTION: The runAction, runCommand, and attemptCommand functions cannot be called from an interrupt context. + * + */ class IOCommandGate : public IOEventSource { - OSDeclareDefaultStructors(IOCommandGate) + OSDeclareDefaultStructors(IOCommandGate) public: /*! - @typedef Action - @discussion Type and arguments of callout C function that is used when -a runCommand is executed by a client. Cast to this type when you want a C++ -member function to be used. Note the arg1 - arg3 parameters are straight pass -through from the runCommand to the action callout. - @param owner - Target of the function, can be used as a refcon. The owner is set -during initialisation of the IOCommandGate instance. Note if a C++ function -was specified this parameter is implicitly the first paramter in the target -member function's parameter list. - @param arg0 Argument to action from run operation. - @param arg1 Argument to action from run operation. - @param arg2 Argument to action from run operation. - @param arg3 Argument to action from run operation. -*/ - typedef IOReturn (*Action)(OSObject *owner, - void *arg0, void *arg1, - void *arg2, void *arg3); + * @typedef Action + * @discussion Type and arguments of callout C function that is used when + * a runCommand is executed by a client. Cast to this type when you want a C++ + * member function to be used. Note the arg1 - arg3 parameters are straight pass + * through from the runCommand to the action callout. + * @param owner + * Target of the function, can be used as a refcon. The owner is set + * during initialisation of the IOCommandGate instance. Note if a C++ function + * was specified this parameter is implicitly the first paramter in the target + * member function's parameter list. + * @param arg0 Argument to action from run operation. + * @param arg1 Argument to action from run operation. + * @param arg2 Argument to action from run operation. + * @param arg3 Argument to action from run operation. + */ + typedef IOReturn (*Action)(OSObject *owner, + void *arg0, void *arg1, + void *arg2, void *arg3); protected: /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. - */ - struct ExpansionData { }; + * @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; /*! @var reserved - Reserved for future use. (Internal use only) */ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData *reserved; - APPLE_KEXT_WSHADOW_POP; + * Reserved for future use. (Internal use only) */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData *reserved; + APPLE_KEXT_WSHADOW_POP; public: /*! @function commandGate - @abstract Factory method to create and initialise an IOCommandGate, See $link init. - @result Returns a pointer to the new command gate if sucessful, 0 otherwise. */ - static IOCommandGate *commandGate(OSObject *owner, Action action = 0); + * @abstract Factory method to create and initialise an IOCommandGate, See $link init. + * @result Returns a pointer to the new command gate if sucessful, 0 otherwise. */ + static IOCommandGate *commandGate(OSObject *owner, Action action = 0); /*! @function init - @abstract Class initialiser. - @discussion Initialiser for IOCommandGate operates only on newly 'newed' -objects. Shouldn't be used to re-init an existing instance. - @param owner Owner of this, newly created, instance of the IOCommandGate. This argument will be used as the first parameter in the action callout. - @param action - Pointer to a C function that is called whenever a client of the -IOCommandGate calls runCommand. NB Can be a C++ member function but caller -must cast the member function to $link IOCommandGate::Action and they will get a -compiler warning. Defaults to zero, see $link IOEventSource::setAction. - @result True if inherited classes initialise successfully. */ - virtual bool init(OSObject *owner, Action action = 0); - - // Superclass overrides - virtual void free() APPLE_KEXT_OVERRIDE; - virtual void setWorkLoop(IOWorkLoop *inWorkLoop) APPLE_KEXT_OVERRIDE; + * @abstract Class initialiser. + * @discussion Initialiser for IOCommandGate operates only on newly 'newed' + * objects. Shouldn't be used to re-init an existing instance. + * @param owner Owner of this, newly created, instance of the IOCommandGate. This argument will be used as the first parameter in the action callout. + * @param action + * Pointer to a C function that is called whenever a client of the + * IOCommandGate calls runCommand. NB Can be a C++ member function but caller + * must cast the member function to $link IOCommandGate::Action and they will get a + * compiler warning. Defaults to zero, see $link IOEventSource::setAction. + * @result True if inherited classes initialise successfully. */ + virtual bool init(OSObject *owner, Action action = 0); + +// Superclass overrides + virtual void free() APPLE_KEXT_OVERRIDE; + virtual void setWorkLoop(IOWorkLoop *inWorkLoop) APPLE_KEXT_OVERRIDE; /*! @function runCommand - @abstract Single thread a command with the target work loop. - @discussion Client function that causes the current action to be called in -a single threaded manner. Beware the work loop's gate is recursive and command -gates can cause direct or indirect re-entrancy. When the executing on a -client's thread runCommand will sleep until the work loop's gate opens for -execution of client actions, the action is single threaded against all other -work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. - @param arg0 Parameter for action of command gate, defaults to 0. - @param arg1 Parameter for action of command gate, defaults to 0. - @param arg2 Parameter for action of command gate, defaults to 0. - @param arg3 Parameter for action of command gate, defaults to 0. - @result kIOReturnSuccess if successful. kIOReturnAborted if a disabled command gate is free()ed before being reenabled, kIOReturnNoResources if no action available. -*/ - virtual IOReturn runCommand(void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0); + * @abstract Single thread a command with the target work loop. + * @discussion Client function that causes the current action to be called in + * a single threaded manner. Beware the work loop's gate is recursive and command + * gates can cause direct or indirect re-entrancy. When the executing on a + * client's thread runCommand will sleep until the work loop's gate opens for + * execution of client actions, the action is single threaded against all other + * work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. + * @param arg0 Parameter for action of command gate, defaults to 0. + * @param arg1 Parameter for action of command gate, defaults to 0. + * @param arg2 Parameter for action of command gate, defaults to 0. + * @param arg3 Parameter for action of command gate, defaults to 0. + * @result kIOReturnSuccess if successful. kIOReturnAborted if a disabled command gate is free()ed before being reenabled, kIOReturnNoResources if no action available. + */ + virtual IOReturn runCommand(void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); /*! @function runAction - @abstract Single thread a call to an action with the target work loop. - @discussion Client function that causes the given action to be called in -a single threaded manner. Beware the work loop's gate is recursive and command -gates can cause direct or indirect re-entrancy. When the executing on a -client's thread runAction will sleep until the work loop's gate opens for -execution of client actions, the action is single threaded against all other -work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. - @param action Pointer to function to be executed in the context of the work loop. - @param arg0 Parameter for action parameter, defaults to 0. - @param arg1 Parameter for action parameter, defaults to 0. - @param arg2 Parameter for action parameter, defaults to 0. - @param arg3 Parameter for action parameter, defaults to 0. - @result The return value of action if it was called, kIOReturnBadArgument if action is not defined, kIOReturnAborted if a disabled command gate is free()ed before being reenabled. -*/ - virtual IOReturn runAction(Action action, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0); + * @abstract Single thread a call to an action with the target work loop. + * @discussion Client function that causes the given action to be called in + * a single threaded manner. Beware the work loop's gate is recursive and command + * gates can cause direct or indirect re-entrancy. When the executing on a + * client's thread runAction will sleep until the work loop's gate opens for + * execution of client actions, the action is single threaded against all other + * work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. + * @param action Pointer to function to be executed in the context of the work loop. + * @param arg0 Parameter for action parameter, defaults to 0. + * @param arg1 Parameter for action parameter, defaults to 0. + * @param arg2 Parameter for action parameter, defaults to 0. + * @param arg3 Parameter for action parameter, defaults to 0. + * @result The return value of action if it was called, kIOReturnBadArgument if action is not defined, kIOReturnAborted if a disabled command gate is free()ed before being reenabled. + */ + virtual IOReturn runAction(Action action, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); #ifdef __BLOCKS__ /*! @function runActionBlock - @abstract Single thread a call to an action with the target work loop. - @discussion Client function that causes the given action to be called in -a single threaded manner. Beware the work loop's gate is recursive and command -gates can cause direct or indirect re-entrancy. When the executing on a -client's thread runAction will sleep until the work loop's gate opens for -execution of client actions, the action is single threaded against all other -work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. - @param action Block to be executed in the context of the work loop. - @result The return value of action if it was called, kIOReturnBadArgument if action is not defined, kIOReturnAborted if a disabled command gate is free()ed before being reenabled. -*/ - IOReturn runActionBlock(ActionBlock action); + * @abstract Single thread a call to an action with the target work loop. + * @discussion Client function that causes the given action to be called in + * a single threaded manner. Beware the work loop's gate is recursive and command + * gates can cause direct or indirect re-entrancy. When the executing on a + * client's thread runAction will sleep until the work loop's gate opens for + * execution of client actions, the action is single threaded against all other + * work loop event sources. If the command is disabled the attempt to run a command will be stalled until enable is called. + * @param action Block to be executed in the context of the work loop. + * @result The return value of action if it was called, kIOReturnBadArgument if action is not defined, kIOReturnAborted if a disabled command gate is free()ed before being reenabled. + */ + IOReturn runActionBlock(ActionBlock action); #endif /* __BLOCKS__ */ /*! @function attemptCommand - @abstract Single thread a command with the target work loop. - @discussion Client function that causes the current action to be called in -a single threaded manner. When the executing on a client's thread attemptCommand will fail if the work loop's gate is closed. - @param arg0 Parameter for action of command gate, defaults to 0. - @param arg1 Parameter for action of command gate, defaults to 0. - @param arg2 Parameter for action of command gate, defaults to 0. - @param arg3 Parameter for action of command gate, defaults to 0. - @result kIOReturnSuccess if successful. kIOReturnNotPermitted if this event source is currently disabled, kIOReturnNoResources if no action available, kIOReturnCannotLock if lock attempt fails. -*/ - virtual IOReturn attemptCommand(void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0); + * @abstract Single thread a command with the target work loop. + * @discussion Client function that causes the current action to be called in + * a single threaded manner. When the executing on a client's thread attemptCommand will fail if the work loop's gate is closed. + * @param arg0 Parameter for action of command gate, defaults to 0. + * @param arg1 Parameter for action of command gate, defaults to 0. + * @param arg2 Parameter for action of command gate, defaults to 0. + * @param arg3 Parameter for action of command gate, defaults to 0. + * @result kIOReturnSuccess if successful. kIOReturnNotPermitted if this event source is currently disabled, kIOReturnNoResources if no action available, kIOReturnCannotLock if lock attempt fails. + */ + virtual IOReturn attemptCommand(void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); /*! @function attemptAction - @abstract Single thread a call to an action with the target work loop. - @discussion Client function that causes the given action to be called in -a single threaded manner. Beware the work loop's gate is recursive and command -gates can cause direct or indirect re-entrancy. When the executing on a -client's thread attemptCommand will fail if the work loop's gate is closed. - @param action Pointer to function to be executed in context of the work loop. - @param arg0 Parameter for action parameter, defaults to 0. - @param arg1 Parameter for action parameter, defaults to 0. - @param arg2 Parameter for action parameter, defaults to 0. - @param arg3 Parameter for action parameter, defaults to 0. - @result kIOReturnSuccess if successful. kIOReturnBadArgument if action is not defined, kIOReturnNotPermitted if this event source is currently disabled, kIOReturnCannotLock if lock attempt fails. - -*/ - virtual IOReturn attemptAction(Action action, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0); - -/*! @function commandSleep - @abstract Put a thread that is currently holding the command gate to sleep. - @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs then the commandGate is closed before the function returns. If the thread does not hold the gate, panic. - @param event Pointer to an address. - @param interruptible THREAD_UNINT, THREAD_INTERRUPTIBLE or THREAD_ABORTSAFE. THREAD_UNINT specifies that the sleep cannot be interrupted by a signal. THREAD_INTERRUPTIBLE specifies that the sleep may be interrupted by a "kill -9" signal. THREAD_ABORTSAFE (the default value) specifies that the sleep may be interrupted by any user signal. - @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely. */ - virtual IOReturn commandSleep(void *event, - UInt32 interruptible = THREAD_ABORTSAFE); + * @abstract Single thread a call to an action with the target work loop. + * @discussion Client function that causes the given action to be called in + * a single threaded manner. Beware the work loop's gate is recursive and command + * gates can cause direct or indirect re-entrancy. When the executing on a + * client's thread attemptCommand will fail if the work loop's gate is closed. + * @param action Pointer to function to be executed in context of the work loop. + * @param arg0 Parameter for action parameter, defaults to 0. + * @param arg1 Parameter for action parameter, defaults to 0. + * @param arg2 Parameter for action parameter, defaults to 0. + * @param arg3 Parameter for action parameter, defaults to 0. + * @result kIOReturnSuccess if successful. kIOReturnBadArgument if action is not defined, kIOReturnNotPermitted if this event source is currently disabled, kIOReturnCannotLock if lock attempt fails. + * + */ + virtual IOReturn attemptAction(Action action, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); + +/*! @function commandSleep + * @abstract Put a thread that is currently holding the command gate to sleep. + * @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs then the commandGate is closed before the function returns. If the thread does not hold the gate, panic. + * @param event Pointer to an address. + * @param interruptible THREAD_UNINT, THREAD_INTERRUPTIBLE or THREAD_ABORTSAFE. THREAD_UNINT specifies that the sleep cannot be interrupted by a signal. THREAD_INTERRUPTIBLE specifies that the sleep may be interrupted by a "kill -9" signal. THREAD_ABORTSAFE (the default value) specifies that the sleep may be interrupted by any user signal. + * @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely. */ + virtual IOReturn commandSleep(void *event, + UInt32 interruptible = THREAD_ABORTSAFE); /*! @function commandWakeup - @abstract Wakeup one or more threads that are asleep on an event. - @param event Pointer to an address. - @param oneThread true to only wake up at most one thread, false otherwise. */ - virtual void commandWakeup(void *event, bool oneThread = false); + * @abstract Wakeup one or more threads that are asleep on an event. + * @param event Pointer to an address. + * @param oneThread true to only wake up at most one thread, false otherwise. */ + virtual void commandWakeup(void *event, bool oneThread = false); /*! @function disable - @abstract Disable the command gate - @discussion When a command gate is disabled all future calls to runAction and runCommand will stall until the gate is enable()d later. This can be used to block client threads when a system sleep is requested. The IOWorkLoop thread itself will never stall, even when making runAction/runCommand calls. This call must be made from a gated context, to clear potential race conditions. */ - virtual void disable() APPLE_KEXT_OVERRIDE; + * @abstract Disable the command gate + * @discussion When a command gate is disabled all future calls to runAction and runCommand will stall until the gate is enable()d later. This can be used to block client threads when a system sleep is requested. The IOWorkLoop thread itself will never stall, even when making runAction/runCommand calls. This call must be made from a gated context, to clear potential race conditions. */ + virtual void disable() APPLE_KEXT_OVERRIDE; /*! @function enable - @abstract Enable command gate, this will unblock any blocked Commands and Actions. - @discussion Enable the command gate. The attemptAction/attemptCommand calls will now be enabled and can succeeed. Stalled runCommand/runAction calls will be woken up. */ - virtual void enable() APPLE_KEXT_OVERRIDE; - -/*! @function commandSleep - @abstract Put a thread that is currently holding the command gate to sleep. - @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs or timeout occurs then the commandGate is closed before the function returns. If the thread does not hold the gate, panic. - @param event Pointer to an address. - @param deadline Clock deadline to timeout the sleep. - @param interruptible THREAD_UNINT, THREAD_INTERRUPTIBLE or THREAD_ABORTSAFE. THREAD_UNINT specifies that the sleep cannot be interrupted by a signal. THREAD_INTERRUPTIBLE specifies that the sleep may be interrupted by a "kill -9" signal. THREAD_ABORTSAFE specifies that the sleep may be interrupted by any user signal. - @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely. */ - virtual IOReturn commandSleep(void *event, - AbsoluteTime deadline, - UInt32 interruptible); + * @abstract Enable command gate, this will unblock any blocked Commands and Actions. + * @discussion Enable the command gate. The attemptAction/attemptCommand calls will now be enabled and can succeeed. Stalled runCommand/runAction calls will be woken up. */ + virtual void enable() APPLE_KEXT_OVERRIDE; + +/*! @function commandSleep + * @abstract Put a thread that is currently holding the command gate to sleep. + * @discussion Put a thread to sleep waiting for an event but release the gate first. If the event occurs or timeout occurs then the commandGate is closed before the function returns. If the thread does not hold the gate, panic. + * @param event Pointer to an address. + * @param deadline Clock deadline to timeout the sleep. + * @param interruptible THREAD_UNINT, THREAD_INTERRUPTIBLE or THREAD_ABORTSAFE. THREAD_UNINT specifies that the sleep cannot be interrupted by a signal. THREAD_INTERRUPTIBLE specifies that the sleep may be interrupted by a "kill -9" signal. THREAD_ABORTSAFE specifies that the sleep may be interrupted by any user signal. + * @result THREAD_AWAKENED - normal wakeup, THREAD_TIMED_OUT - timeout expired, THREAD_INTERRUPTED - interrupted, THREAD_RESTART - restart operation entirely. */ + virtual IOReturn commandSleep(void *event, + AbsoluteTime deadline, + UInt32 interruptible); private: #if __LP64__ - OSMetaClassDeclareReservedUnused(IOCommandGate, 0); + OSMetaClassDeclareReservedUnused(IOCommandGate, 0); #else - OSMetaClassDeclareReservedUsed(IOCommandGate, 0); + OSMetaClassDeclareReservedUsed(IOCommandGate, 0); #endif - OSMetaClassDeclareReservedUnused(IOCommandGate, 1); - OSMetaClassDeclareReservedUnused(IOCommandGate, 2); - OSMetaClassDeclareReservedUnused(IOCommandGate, 3); - OSMetaClassDeclareReservedUnused(IOCommandGate, 4); - OSMetaClassDeclareReservedUnused(IOCommandGate, 5); - OSMetaClassDeclareReservedUnused(IOCommandGate, 6); - OSMetaClassDeclareReservedUnused(IOCommandGate, 7); + OSMetaClassDeclareReservedUnused(IOCommandGate, 1); + OSMetaClassDeclareReservedUnused(IOCommandGate, 2); + OSMetaClassDeclareReservedUnused(IOCommandGate, 3); + OSMetaClassDeclareReservedUnused(IOCommandGate, 4); + OSMetaClassDeclareReservedUnused(IOCommandGate, 5); + OSMetaClassDeclareReservedUnused(IOCommandGate, 6); + OSMetaClassDeclareReservedUnused(IOCommandGate, 7); }; #endif /* !_IOKIT_IOCOMMANDGATE_H */ diff --git a/iokit/IOKit/IOCommandPool.h b/iokit/IOKit/IOCommandPool.h index c21455c5c..356c04ace 100644 --- a/iokit/IOKit/IOCommandPool.h +++ b/iokit/IOKit/IOCommandPool.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -74,157 +74,156 @@ class IOCommandPool : public OSObject { - - OSDeclareDefaultStructors(IOCommandPool) - - + OSDeclareDefaultStructors(IOCommandPool) + + protected: - queue_head_t fQueueHead; /* head of the queue of elements available */ - UInt32 fSleepers; /* Count of threads sleeping on this pool */ - IOCommandGate *fSerializer; /* command gate used for serializing pool access */ + queue_head_t fQueueHead; /* head of the queue of elements available */ + UInt32 fSleepers; /* Count of threads sleeping on this pool */ + IOCommandGate *fSerializer; /* command gate used for serializing pool access */ /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. - */ - struct ExpansionData { }; + * @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. + */ + struct ExpansionData { }; /*! @var reserved - Reserved for future use. (Internal use only) */ - ExpansionData *reserved; - - /*! - * @const kIOCommandPoolDefaultSize - * @abstract The default size of any command pool. - * @discussion - * kIOCommandPoolDefaultSize is the default size of any command pool. - * The default size was determined to be the smallest size for which - * a pool makes sense. - */ - - static const UInt32 kIOCommandPoolDefaultSize = 2; - - /* - * Free all of this object's outstanding resources. - */ - - virtual void free(void) APPLE_KEXT_OVERRIDE; - - + * Reserved for future use. (Internal use only) */ + ExpansionData *reserved; + +/*! + * @const kIOCommandPoolDefaultSize + * @abstract The default size of any command pool. + * @discussion + * kIOCommandPoolDefaultSize is the default size of any command pool. + * The default size was determined to be the smallest size for which + * a pool makes sense. + */ + + static const UInt32 kIOCommandPoolDefaultSize = 2; + +/* + * Free all of this object's outstanding resources. + */ + + virtual void free(void) APPLE_KEXT_OVERRIDE; + + public: - /*! - * @function initWithWorkLoop - * @abstract Primary initializer for an IOCommandPool object. - * @discussion Primary initializer for an IOCommandPool. - * Should probably use IOCommandPool::withWorkLoop() as it is easier to use. - * @param workLoop - * The workloop that this command pool should synchronize with. - * @result Returns true if command pool was successfully initialized. - */ - virtual bool initWithWorkLoop(IOWorkLoop *workLoop); - - /*! - * @function withWorkLoop - * @abstract Primary factory method for the IOCommandPool class - * @discussion - * The withWorkLoop method is what is known as a factory method. It creates - * a new instance of an IOCommandPool and returns a pointer to that object. - * @param inWorkLoop - * The workloop that this command pool should synchronize with. - * @result - * Returns a pointer to an instance of IOCommandPool if successful, - * otherwise NULL. - */ - - static IOCommandPool *withWorkLoop(IOWorkLoop *inWorkLoop); - - /*! - * @function init - * @abstract Should never be used, obsolete. See initWithWorkLoop. - */ - virtual bool init(IOService *inOwner, - IOWorkLoop *inWorkLoop, - UInt32 inSize = kIOCommandPoolDefaultSize); - - /*! - * @function withWorkLoop - * @abstract Should never be used, obsolete. See IOCommandPool::withWorkLoop. - */ - static IOCommandPool *commandPool(IOService *inOwner, - IOWorkLoop *inWorkLoop, - UInt32 inSize = kIOCommandPoolDefaultSize); - - - /*! - * @function getCommand - * @discussion The getCommand method is used to get a pointer to an object of type IOCommand from the pool. - * @param blockForCommand - * If the caller would like to have its thread slept until a command is - * available, it should pass true, else false. - * @result - * If the caller passes true in blockForCommand, getCommand guarantees that - * the result will be a pointer to an IOCommand object from the pool. If - * the caller passes false, s/he is responsible for checking whether a non-NULL - * pointer was returned. - */ - - virtual IOCommand *getCommand(bool blockForCommand = true); - - /*! - * @function returnCommand - * @discussion - * The returnCommand method is used to place an object of type IOCommand - * into the pool, whether it be the first time, or the 1000th time. - * @param command - * The command to place in the pool. - */ - - virtual void returnCommand(IOCommand *command); - +/*! + * @function initWithWorkLoop + * @abstract Primary initializer for an IOCommandPool object. + * @discussion Primary initializer for an IOCommandPool. + * Should probably use IOCommandPool::withWorkLoop() as it is easier to use. + * @param workLoop + * The workloop that this command pool should synchronize with. + * @result Returns true if command pool was successfully initialized. + */ + virtual bool initWithWorkLoop(IOWorkLoop *workLoop); + +/*! + * @function withWorkLoop + * @abstract Primary factory method for the IOCommandPool class + * @discussion + * The withWorkLoop method is what is known as a factory method. It creates + * a new instance of an IOCommandPool and returns a pointer to that object. + * @param inWorkLoop + * The workloop that this command pool should synchronize with. + * @result + * Returns a pointer to an instance of IOCommandPool if successful, + * otherwise NULL. + */ + + static IOCommandPool *withWorkLoop(IOWorkLoop *inWorkLoop); + +/*! + * @function init + * @abstract Should never be used, obsolete. See initWithWorkLoop. + */ + virtual bool init(IOService *inOwner, + IOWorkLoop *inWorkLoop, + UInt32 inSize = kIOCommandPoolDefaultSize); + +/*! + * @function withWorkLoop + * @abstract Should never be used, obsolete. See IOCommandPool::withWorkLoop. + */ + static IOCommandPool *commandPool(IOService *inOwner, + IOWorkLoop *inWorkLoop, + UInt32 inSize = kIOCommandPoolDefaultSize); + + +/*! + * @function getCommand + * @discussion The getCommand method is used to get a pointer to an object of type IOCommand from the pool. + * @param blockForCommand + * If the caller would like to have its thread slept until a command is + * available, it should pass true, else false. + * @result + * If the caller passes true in blockForCommand, getCommand guarantees that + * the result will be a pointer to an IOCommand object from the pool. If + * the caller passes false, s/he is responsible for checking whether a non-NULL + * pointer was returned. + */ + + virtual IOCommand *getCommand(bool blockForCommand = true); + +/*! + * @function returnCommand + * @discussion + * The returnCommand method is used to place an object of type IOCommand + * into the pool, whether it be the first time, or the 1000th time. + * @param command + * The command to place in the pool. + */ + + virtual void returnCommand(IOCommand *command); + protected: - - /*! - * @function gatedGetCommand - * @discussion - * The gatedGetCommand method is used to serialize the extraction of a - * command from the pool behind a command gate, runAction-ed by getCommand. - * @param command - * A pointer to a pointer to an IOCommand object where the returned - * command will be stored. - * @param blockForCommand - * A bool that indicates whether to block the request until a command - * becomes available. - * @result - * Returns kIOReturnNoResources if no command is available and the client - * doesn't wish to block until one does become available. - * kIOReturnSuccess if the vCommand argument is valid. - */ - virtual IOReturn gatedGetCommand(IOCommand **command, bool blockForCommand); - - /*! - * @function gatedReturnCommand - * @discussion - * The gatedReturnCommand method is used to serialize the return of a - * command to the pool behind a command gate, runAction-ed by returnCommand. - * @param command - * A pointer to the IOCommand object to be returned to the pool. - * @result - * Always returns kIOReturnSuccess if the vCommand argument is valid. - */ - virtual IOReturn gatedReturnCommand(IOCommand *command); + +/*! + * @function gatedGetCommand + * @discussion + * The gatedGetCommand method is used to serialize the extraction of a + * command from the pool behind a command gate, runAction-ed by getCommand. + * @param command + * A pointer to a pointer to an IOCommand object where the returned + * command will be stored. + * @param blockForCommand + * A bool that indicates whether to block the request until a command + * becomes available. + * @result + * Returns kIOReturnNoResources if no command is available and the client + * doesn't wish to block until one does become available. + * kIOReturnSuccess if the vCommand argument is valid. + */ + virtual IOReturn gatedGetCommand(IOCommand **command, bool blockForCommand); + +/*! + * @function gatedReturnCommand + * @discussion + * The gatedReturnCommand method is used to serialize the return of a + * command to the pool behind a command gate, runAction-ed by returnCommand. + * @param command + * A pointer to the IOCommand object to be returned to the pool. + * @result + * Always returns kIOReturnSuccess if the vCommand argument is valid. + */ + virtual IOReturn gatedReturnCommand(IOCommand *command); private: - OSMetaClassDeclareReservedUnused(IOCommandPool, 0); - OSMetaClassDeclareReservedUnused(IOCommandPool, 1); - OSMetaClassDeclareReservedUnused(IOCommandPool, 2); - OSMetaClassDeclareReservedUnused(IOCommandPool, 3); - OSMetaClassDeclareReservedUnused(IOCommandPool, 4); - OSMetaClassDeclareReservedUnused(IOCommandPool, 5); - OSMetaClassDeclareReservedUnused(IOCommandPool, 6); - OSMetaClassDeclareReservedUnused(IOCommandPool, 7); + OSMetaClassDeclareReservedUnused(IOCommandPool, 0); + OSMetaClassDeclareReservedUnused(IOCommandPool, 1); + OSMetaClassDeclareReservedUnused(IOCommandPool, 2); + OSMetaClassDeclareReservedUnused(IOCommandPool, 3); + OSMetaClassDeclareReservedUnused(IOCommandPool, 4); + OSMetaClassDeclareReservedUnused(IOCommandPool, 5); + OSMetaClassDeclareReservedUnused(IOCommandPool, 6); + OSMetaClassDeclareReservedUnused(IOCommandPool, 7); }; -#endif /* defined(KERNEL) && defined(__cplusplus) */ +#endif /* defined(KERNEL) && defined(__cplusplus) */ -#endif /* _IOKIT_IO_COMMAND_POOL_H_ */ +#endif /* _IOKIT_IO_COMMAND_POOL_H_ */ diff --git a/iokit/IOKit/IOCommandQueue.h b/iokit/IOKit/IOCommandQueue.h index 124369d41..2193062b2 100644 --- a/iokit/IOKit/IOCommandQueue.h +++ b/iokit/IOKit/IOCommandQueue.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - -HISTORY - 1998-7-13 Godfrey van der Linden(gvdl) - Created. - 1998-10-30 Godfrey van der Linden(gvdl) - Converted to C++ - 1999-9-22 Godfrey van der Linden(gvdl) - Deprecated -]*/ + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 1998-7-13 Godfrey van der Linden(gvdl) + * Created. + * 1998-10-30 Godfrey van der Linden(gvdl) + * Converted to C++ + * 1999-9-22 Godfrey van der Linden(gvdl) + * Deprecated + * ]*/ #ifndef _IOKIT_IOCOMMANDQUEUE_H #define _IOKIT_IOCOMMANDQUEUE_H @@ -44,48 +44,48 @@ HISTORY class IOCommandQueue; typedef void (*IOCommandQueueAction) - (OSObject *, void *field0, void *field1, void *field2, void *field3); +(OSObject *, void *field0, void *field1, void *field2, void *field3); class IOCommandQueue : public IOEventSource { - OSDeclareDefaultStructors(IOCommandQueue) + OSDeclareDefaultStructors(IOCommandQueue) protected: - static const int kIOCQDefaultSize = 128; + static const int kIOCQDefaultSize = 128; - void *queue; - IOLock *producerLock; - semaphore_port_t producerSema; - int producerIndex, consumerIndex; - int size; + void *queue; + IOLock *producerLock; + semaphore_port_t producerSema; + int producerIndex, consumerIndex; + int size; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - virtual bool checkForWork() APPLE_KEXT_OVERRIDE; + virtual bool checkForWork() APPLE_KEXT_OVERRIDE; public: - static IOCommandQueue *commandQueue(OSObject *inOwner, - IOCommandQueueAction inAction = 0, - int inSize = kIOCQDefaultSize) + static IOCommandQueue *commandQueue(OSObject *inOwner, + IOCommandQueueAction inAction = 0, + int inSize = kIOCQDefaultSize) APPLE_KEXT_DEPRECATED; - virtual bool init(OSObject *inOwner, - IOCommandQueueAction inAction = 0, - int inSize = kIOCQDefaultSize) + virtual bool init(OSObject *inOwner, + IOCommandQueueAction inAction = 0, + int inSize = kIOCQDefaultSize) APPLE_KEXT_DEPRECATED; - virtual kern_return_t enqueueCommand(bool gotoSleep = true, - void *field0 = 0, void *field1 = 0, - void *field2 = 0, void *field3 = 0) + virtual kern_return_t enqueueCommand(bool gotoSleep = true, + void *field0 = 0, void *field1 = 0, + void *field2 = 0, void *field3 = 0) APPLE_KEXT_DEPRECATED; - // WARNING: This function can only be safely called from the appropriate - // work loop context. You should check IOWorkLoop::onThread is true. - // - // For each entry in the commandQueue call the target/action. - // Lockout all new entries to the queue while iterating. - // If the input fields are zero then the queue's owner/action will be used. - virtual int performAndFlush(OSObject *target = 0, - IOCommandQueueAction inAction = 0) +// WARNING: This function can only be safely called from the appropriate +// work loop context. You should check IOWorkLoop::onThread is true. +// +// For each entry in the commandQueue call the target/action. +// Lockout all new entries to the queue while iterating. +// If the input fields are zero then the queue's owner/action will be used. + virtual int performAndFlush(OSObject *target = 0, + IOCommandQueueAction inAction = 0) APPLE_KEXT_DEPRECATED; }; diff --git a/iokit/IOKit/IOConditionLock.h b/iokit/IOKit/IOConditionLock.h index a3a044069..408a78515 100644 --- a/iokit/IOKit/IOConditionLock.h +++ b/iokit/IOKit/IOConditionLock.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - * Copyright (c) 1994-1996 NeXT Software, Inc. All rights reserved. + * Copyright (c) 1994-1996 NeXT Software, Inc. All rights reserved. */ #ifndef _IOKIT_IOCONDITIONLOCK_H @@ -40,32 +40,32 @@ class IOConditionLock : public OSObject { - OSDeclareDefaultStructors(IOConditionLock) + OSDeclareDefaultStructors(IOConditionLock) private: - IOLock * cond_interlock; // condition var Simple lock - volatile int condition; + IOLock * cond_interlock; // condition var Simple lock + volatile int condition; - IOLock * sleep_interlock; // sleep lock Simple lock - unsigned char interruptible; - volatile bool want_lock; - volatile bool waiting; + IOLock * sleep_interlock; // sleep lock Simple lock + unsigned char interruptible; + volatile bool want_lock; + volatile bool waiting; public: - static IOConditionLock *withCondition(int condition, bool inIntr = true); - virtual bool initWithCondition(int condition, bool inIntr = true); - virtual void free() APPLE_KEXT_OVERRIDE; + static IOConditionLock *withCondition(int condition, bool inIntr = true); + virtual bool initWithCondition(int condition, bool inIntr = true); + virtual void free() APPLE_KEXT_OVERRIDE; - virtual bool tryLock(); // acquire lock, no waiting - virtual int lock(); // acquire lock (enter critical section) - virtual void unlock(); // release lock (leave critical section) + virtual bool tryLock(); // acquire lock, no waiting + virtual int lock(); // acquire lock (enter critical section) + virtual void unlock(); // release lock (leave critical section) - virtual bool getInterruptible() const; - virtual int getCondition() const; - virtual int setCondition(int condition); + virtual bool getInterruptible() const; + virtual int getCondition() const; + virtual int setCondition(int condition); - virtual int lockWhen(int condition); // acquire lock when condition - virtual void unlockWith(int condition); // set condition & release lock + virtual int lockWhen(int condition);// acquire lock when condition + virtual void unlockWith(int condition); // set condition & release lock }; #endif /* _IOKIT_IOCONDITIONLOCK_H */ diff --git a/iokit/IOKit/IODMACommand.h b/iokit/IOKit/IODMACommand.h index 89e301e47..5628986e2 100644 --- a/iokit/IOKit/IODMACommand.h +++ b/iokit/IOKit/IODMACommand.h @@ -2,7 +2,7 @@ * Copyright (c) 2005-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IODMACOMMAND_H @@ -33,609 +33,616 @@ class IOMapper; class IOBufferMemoryDescriptor; -enum -{ - kIODMAMapOptionMapped = 0x00000000, - kIODMAMapOptionBypassed = 0x00000001, - kIODMAMapOptionNonCoherent = 0x00000002, - kIODMAMapOptionUnmapped = 0x00000003, - kIODMAMapOptionTypeMask = 0x0000000f, - - kIODMAMapOptionNoCacheStore = 0x00000010, // Memory in descriptor - kIODMAMapOptionOnChip = 0x00000020, // Indicates DMA is on South Bridge - kIODMAMapOptionIterateOnly = 0x00000040 // DMACommand will be used as a cursor only +enum{ + kIODMAMapOptionMapped = 0x00000000, + kIODMAMapOptionBypassed = 0x00000001, + kIODMAMapOptionNonCoherent = 0x00000002, + kIODMAMapOptionUnmapped = 0x00000003, + kIODMAMapOptionTypeMask = 0x0000000f, + + kIODMAMapOptionNoCacheStore = 0x00000010, // Memory in descriptor + kIODMAMapOptionOnChip = 0x00000020,// Indicates DMA is on South Bridge + kIODMAMapOptionIterateOnly = 0x00000040// DMACommand will be used as a cursor only }; /**************************** class IODMACommand ***************************/ /*! - @class IODMACommand - @abstract A mechanism to convert memory references to I/O bus addresses. - @discussion The IODMACommand is supersedes the IOMemoryCursor and greatly enhances the functionality and power of it. The command can be specified to output 64 bit physical addresses and also allows driver writers bypass mapping hardware or get addresses suitable for non-snooped DMA. -

- The command is designed to be very easily subclassable. Most driver writers need to associate some DMA operations with their memory descriptor and usually use a C structure for that purpose. This structure is often kept in a linked list. This IODMACommand has built it linkage and can be derived and 'public:' variables added, giving the developer a structure that can associate a memory descriptor with a particular dma command but will also allow the developer to generate that command and keep the state necessary for tracking it. -

- It is envisaged that a pool of IODMACommands will be created at driver initialisation and each command will be kept in an IOCommandPool while not in use. However if developers wishes to maintain their own free lists that is certainly possible. See the and for sample code on manipulating the command's doubly linked list entries. -

- The IODMACommand can be used in a 'weak-linked' manner. To do this you must avoid using any static member functions. Use the, much slower but safe, weakWithSpecification function. On success a dma command instance will be returned. This instance can then be used to clone as many commands as is needed. Remember deriving from this class can not be done weakly, that is no weak subclassing! -*/ + * @class IODMACommand + * @abstract A mechanism to convert memory references to I/O bus addresses. + * @discussion The IODMACommand is supersedes the IOMemoryCursor and greatly enhances the functionality and power of it. The command can be specified to output 64 bit physical addresses and also allows driver writers bypass mapping hardware or get addresses suitable for non-snooped DMA. + *

+ * The command is designed to be very easily subclassable. Most driver writers need to associate some DMA operations with their memory descriptor and usually use a C structure for that purpose. This structure is often kept in a linked list. This IODMACommand has built it linkage and can be derived and 'public:' variables added, giving the developer a structure that can associate a memory descriptor with a particular dma command but will also allow the developer to generate that command and keep the state necessary for tracking it. + *

+ * It is envisaged that a pool of IODMACommands will be created at driver initialisation and each command will be kept in an IOCommandPool while not in use. However if developers wishes to maintain their own free lists that is certainly possible. See the and for sample code on manipulating the command's doubly linked list entries. + *

+ * The IODMACommand can be used in a 'weak-linked' manner. To do this you must avoid using any static member functions. Use the, much slower but safe, weakWithSpecification function. On success a dma command instance will be returned. This instance can then be used to clone as many commands as is needed. Remember deriving from this class can not be done weakly, that is no weak subclassing! + */ class IODMACommand : public IOCommand { - OSDeclareDefaultStructors(IODMACommand); + OSDeclareDefaultStructors(IODMACommand); -friend class IODMAEventSource; + friend class IODMAEventSource; public: /*! - @typedef Segment32 - @discussion A 32 bit I/O bus address/length pair -*/ - struct Segment32 { - UInt32 fIOVMAddr, fLength; - }; + * @typedef Segment32 + * @discussion A 32 bit I/O bus address/length pair + */ + struct Segment32 { + UInt32 fIOVMAddr, fLength; + }; /*! - @typedef Segment64 - @discussion A 64 bit I/O bus address/length pair -*/ - struct Segment64 { - UInt64 fIOVMAddr, fLength; - }; + * @typedef Segment64 + * @discussion A 64 bit I/O bus address/length pair + */ + struct Segment64 { + UInt64 fIOVMAddr, fLength; + }; /*! @enum MappingOptions - @abstract Mapping types to indicate the desired mapper type for translating memory descriptors into I/O DMA Bus addresses. - @constant kNonCoherent Used by drivers for non-coherent transfers, implies unmapped memmory - @constant kMapped Allow a driver to define addressing size - @constant kBypassed Allow drivers to bypass any mapper - @constant kMaxMappingOptions Internal use only -*/ - enum MappingOptions { - kMapped = kIODMAMapOptionMapped, - kBypassed = kIODMAMapOptionBypassed, - kNonCoherent = kIODMAMapOptionNonCoherent, - kUnmapped = kIODMAMapOptionUnmapped, - kTypeMask = kIODMAMapOptionTypeMask, - - kNoCacheStore = kIODMAMapOptionNoCacheStore, // Memory in descriptor - kOnChip = kIODMAMapOptionOnChip, // Indicates DMA is on South Bridge - kIterateOnly = kIODMAMapOptionIterateOnly // DMACommand will be used as a cursor only - }; - - struct SegmentOptions { - uint8_t fStructSize; - uint8_t fNumAddressBits; - uint64_t fMaxSegmentSize; - uint64_t fMaxTransferSize; - uint32_t fAlignment; - uint32_t fAlignmentLength; - uint32_t fAlignmentInternalSegments; - }; + * @abstract Mapping types to indicate the desired mapper type for translating memory descriptors into I/O DMA Bus addresses. + * @constant kNonCoherent Used by drivers for non-coherent transfers, implies unmapped memmory + * @constant kMapped Allow a driver to define addressing size + * @constant kBypassed Allow drivers to bypass any mapper + * @constant kMaxMappingOptions Internal use only + */ + enum MappingOptions { + kMapped = kIODMAMapOptionMapped, + kBypassed = kIODMAMapOptionBypassed, + kNonCoherent = kIODMAMapOptionNonCoherent, + kUnmapped = kIODMAMapOptionUnmapped, + kTypeMask = kIODMAMapOptionTypeMask, + + kNoCacheStore = kIODMAMapOptionNoCacheStore, // Memory in descriptor + kOnChip = kIODMAMapOptionOnChip, // Indicates DMA is on South Bridge + kIterateOnly = kIODMAMapOptionIterateOnly// DMACommand will be used as a cursor only + }; + + struct SegmentOptions { + uint8_t fStructSize; + uint8_t fNumAddressBits; + uint64_t fMaxSegmentSize; + uint64_t fMaxTransferSize; + uint32_t fAlignment; + uint32_t fAlignmentLength; + uint32_t fAlignmentInternalSegments; + }; /*! @enum SynchronizeOptions - @abstract Options for the synchronize method. - @constant kForceDoubleBuffer Copy the entire prepared range to a new page aligned buffer. -*/ - enum SynchronizeOptions { - kForceDoubleBuffer = 0x01000000 - }; + * @abstract Options for the synchronize method. + * @constant kForceDoubleBuffer Copy the entire prepared range to a new page aligned buffer. + */ + enum SynchronizeOptions { + kForceDoubleBuffer = 0x01000000 + }; /*! - @typedef SegmentFunction - @discussion Pointer to a C function that translates a 64 segment and outputs a single desired segment to the array at the requested index. There are a group of pre-implemented SegmentFunctions that may be usefull to the developer below. - @param segment The 64Bit I/O bus address and length. - @param segments Base of the output vector of DMA address length pairs. - @param segmentIndex Index to output 'segment' in the 'segments' array. - @result Returns true if segment encoding succeeded. false may be returned if the current segment does not fit in an output segment, i.e. a 38bit address wont fit into a 32 encoding. -*/ - typedef bool (*SegmentFunction)(IODMACommand *target, - Segment64 segment, - void *segments, - UInt32 segmentIndex); - - // -------------- Preimplemented output functions ---------------- + * @typedef SegmentFunction + * @discussion Pointer to a C function that translates a 64 segment and outputs a single desired segment to the array at the requested index. There are a group of pre-implemented SegmentFunctions that may be usefull to the developer below. + * @param segment The 64Bit I/O bus address and length. + * @param segments Base of the output vector of DMA address length pairs. + * @param segmentIndex Index to output 'segment' in the 'segments' array. + * @result Returns true if segment encoding succeeded. false may be returned if the current segment does not fit in an output segment, i.e. a 38bit address wont fit into a 32 encoding. + */ + typedef bool (*SegmentFunction)(IODMACommand *target, + Segment64 segment, + void *segments, + UInt32 segmentIndex); + +// -------------- Preimplemented output functions ---------------- /*! @function OutputHost32 - @abstract Output host natural Segment32 output segment function. -*/ - static bool OutputHost32(IODMACommand *target, + * @abstract Output host natural Segment32 output segment function. + */ + static bool OutputHost32(IODMACommand *target, Segment64 seg, void *segs, UInt32 ind); /*! @defined kIODMACommandOutputHost32 - @abstract Output host natural Segment32 output segment function. + * @abstract Output host natural Segment32 output segment function. */ #define kIODMACommandOutputHost32 (IODMACommand::OutputHost32) /*! @function OutputBig32 - @abstract Output big-endian Segment32 output segment function. -*/ - static bool OutputBig32(IODMACommand *target, + * @abstract Output big-endian Segment32 output segment function. + */ + static bool OutputBig32(IODMACommand *target, Segment64 seg, void *segs, UInt32 ind); /*! @defined kIODMACommandOutputBig32 - @abstract Output big-endian Segment32 output segment function. + * @abstract Output big-endian Segment32 output segment function. */ #define kIODMACommandOutputBig32 (IODMACommand::OutputBig32) /*! @function OutputLittle32 - @abstract Output little-endian Segment32 output segment function. -*/ - static bool OutputLittle32(IODMACommand *target, + * @abstract Output little-endian Segment32 output segment function. + */ + static bool OutputLittle32(IODMACommand *target, Segment64 seg, void *segs, UInt32 ind); /*! @defined kIODMACommandOutputLittle32 - @abstract Output little-endian Segment32 output segment function. -*/ + * @abstract Output little-endian Segment32 output segment function. + */ #define kIODMACommandOutputLittle32 (IODMACommand::OutputLittle32) /*! @function OutputHost64 - @abstract Output host natural Segment64 output segment function. -*/ - static bool OutputHost64(IODMACommand *target, + * @abstract Output host natural Segment64 output segment function. + */ + static bool OutputHost64(IODMACommand *target, Segment64 seg, void *segs, UInt32 ind); /*! @defined kIODMACommandOutputHost64 - @abstract Output host natural Segment64 output segment function. -*/ + * @abstract Output host natural Segment64 output segment function. + */ #define kIODMACommandOutputHost64 (IODMACommand::OutputHost64) /*! @function OutputBig64 - @abstract Output big-endian Segment64 output segment function. -*/ - static bool OutputBig64(IODMACommand *target, + * @abstract Output big-endian Segment64 output segment function. + */ + static bool OutputBig64(IODMACommand *target, Segment64 seg, void *segs, UInt32 ind); /*! @defined kIODMACommandOutputBig64 - @abstract Output big-endian Segment64 output segment function. -*/ + * @abstract Output big-endian Segment64 output segment function. + */ #define kIODMACommandOutputBig64 (IODMACommand::OutputBig64) /*! @function OutputLittle64 - @abstract Output little-endian Segment64 output segment function. -*/ - static bool OutputLittle64(IODMACommand *target, + * @abstract Output little-endian Segment64 output segment function. + */ + static bool OutputLittle64(IODMACommand *target, Segment64 seg, void *segs, UInt32 ind); /*! @defined kIODMACommandOutputLittle64 - @abstract Output little-endian Segment64 output segment function. -*/ + * @abstract Output little-endian Segment64 output segment function. + */ #define kIODMACommandOutputLittle64 (IODMACommand::OutputLittle64) /*! @function withSpecification - @abstract Creates and initializes an IODMACommand in one operation. - @discussion Factory function to create and initialize an IODMACommand in one operation. - @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. - @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. - @param maxSegmentSize Maximum allowable size for one segment. If 0 is passed the maximum segment size is unlimited. - @param mappingOptions is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below.This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transferes are into coherent memory but no mapping is required. See also prepare() for failure cases. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. - @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. - @param refCon Reference Constant - @result Returns a new IODMACommand if successfully created and initialized, 0 otherwise. -*/ - static IODMACommand * + * @abstract Creates and initializes an IODMACommand in one operation. + * @discussion Factory function to create and initialize an IODMACommand in one operation. + * @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. + * @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. + * @param maxSegmentSize Maximum allowable size for one segment. If 0 is passed the maximum segment size is unlimited. + * @param mappingOptions is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below.This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transferes are into coherent memory but no mapping is required. See also prepare() for failure cases. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. + * @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. + * @param refCon Reference Constant + * @result Returns a new IODMACommand if successfully created and initialized, 0 otherwise. + */ + static IODMACommand * withSpecification(SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mappingOptions = kMapped, - UInt64 maxTransferSize = 0, - UInt32 alignment = 1, - IOMapper *mapper = 0, - void *refCon = 0); + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mappingOptions = kMapped, + UInt64 maxTransferSize = 0, + UInt32 alignment = 1, + IOMapper *mapper = 0, + void *refCon = 0); /*! @function weakWithSpecification - @abstract Creates and initialises an IODMACommand in one operation if this version of the operating system supports it. - @discussion Factory function to create and initialise an IODMACommand in one operation. The function allows a developer to 'weak' link with IODMACommand. This function will return kIOReturnUnsupported if the IODMACommand is unavailable. This function is actually fairly slow so it will be better to call it once then clone the successfully create command using cloneCommand (q.v.). - @param newCommand Output reference variable of the newly created IODMACommand. - @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. - @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. - @param maxSegmentSize Maximum allowable size for one segment. Zero is treated as an unlimited segment size. - @param mapType is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below. This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transfers are into coherent memory but no mapping is required. See also prepare() for failure cases. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. - @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. - @param refCon Reference Constant - @result kIOReturnSuccess if everything is OK, otherwise kIOReturnBadArgument if newCommand is NULL, kIOReturnUnsupported if the kernel doesn't export IODMACommand or IOReturnError if the new command fails to init, q.v. initWithSpecification. -*/ - // Note that the function has the attribute always_inline. - // The point of this function is to make a call into the kernel - // without generating an undefined symbol. If the client could call - // the code as a function then the goal of no undefined symbols - // would be lost thus defeating the purpose. - static inline IOReturn weakWithSpecification - (IODMACommand **newCommand, - SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mapType = kMapped, - UInt64 maxTransferSize = 0, - UInt32 alignment = 1, - IOMapper *mapper = 0, - void *refCon = 0) __attribute__((always_inline)); - - static IODMACommand * + * @abstract Creates and initialises an IODMACommand in one operation if this version of the operating system supports it. + * @discussion Factory function to create and initialise an IODMACommand in one operation. The function allows a developer to 'weak' link with IODMACommand. This function will return kIOReturnUnsupported if the IODMACommand is unavailable. This function is actually fairly slow so it will be better to call it once then clone the successfully create command using cloneCommand (q.v.). + * @param newCommand Output reference variable of the newly created IODMACommand. + * @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. + * @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. + * @param maxSegmentSize Maximum allowable size for one segment. Zero is treated as an unlimited segment size. + * @param mapType is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below. This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transfers are into coherent memory but no mapping is required. See also prepare() for failure cases. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. + * @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. + * @param refCon Reference Constant + * @result kIOReturnSuccess if everything is OK, otherwise kIOReturnBadArgument if newCommand is NULL, kIOReturnUnsupported if the kernel doesn't export IODMACommand or IOReturnError if the new command fails to init, q.v. initWithSpecification. + */ +// Note that the function has the attribute always_inline. +// The point of this function is to make a call into the kernel +// without generating an undefined symbol. If the client could call +// the code as a function then the goal of no undefined symbols +// would be lost thus defeating the purpose. + static inline IOReturn weakWithSpecification + (IODMACommand **newCommand, + SegmentFunction outSegFunc, + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mapType = kMapped, + UInt64 maxTransferSize = 0, + UInt32 alignment = 1, + IOMapper *mapper = 0, + void *refCon = 0) __attribute__((always_inline)); + + static IODMACommand * withSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper, - void * refCon); + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + void * refCon); /*! @function withRefCon - @abstract Creates and initializes an unspecified IODMACommand. - @discussion Factory function to create and initialize an unspecified IODMACommand. prepareWithSpecification() must be used to prepare the IODMACommand before use. - @param refCon Reference Constant - @result Returns a new IODMACommand if successfully created and initialized, 0 otherwise. -*/ - static IODMACommand * withRefCon(void * refCon); + * @abstract Creates and initializes an unspecified IODMACommand. + * @discussion Factory function to create and initialize an unspecified IODMACommand. prepareWithSpecification() must be used to prepare the IODMACommand before use. + * @param refCon Reference Constant + * @result Returns a new IODMACommand if successfully created and initialized, 0 otherwise. + */ + static IODMACommand * withRefCon(void * refCon); /*! - @function cloneCommand - @abstract Creates a new command based on the specification of the current one. - @discussion Factory function to create and initialise an IODMACommand in one operation. The current command's specification will be duplicated in the new object, but however none of its state will be duplicated. This means that it is safe to clone a command even if it is currently active and running, however you must be certain that the command to be duplicated does have a valid reference for the duration. - @result Returns a new IODMACommand if successfully created and initialised, 0 otherwise. -*/ - virtual IODMACommand *cloneCommand(void *refCon = 0); + * @function cloneCommand + * @abstract Creates a new command based on the specification of the current one. + * @discussion Factory function to create and initialise an IODMACommand in one operation. The current command's specification will be duplicated in the new object, but however none of its state will be duplicated. This means that it is safe to clone a command even if it is currently active and running, however you must be certain that the command to be duplicated does have a valid reference for the duration. + * @result Returns a new IODMACommand if successfully created and initialised, 0 otherwise. + */ + virtual IODMACommand *cloneCommand(void *refCon = 0); /*! @function initWithSpecification - @abstract Primary initializer for the IODMACommand class. - @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. - @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0 which means any size. - @param mappingOptions is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below.This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transferes are into coherent memory but no mapping is required. See also prepare() for failure cases. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. - @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. - @param refCon Reference Constant - @result Can fail if the mapping type is not recognised, if one of the 3 mandatory parameters are set to 0, if a 32 bit output function is selected when more than 32 bits of address is required or, if kBypassed is requested on a machine that doesn't support bypassing. Returns true otherwise. -*/ - virtual bool initWithSpecification( SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mappingOptions = kMapped, - UInt64 maxTransferSize = 0, - UInt32 alignment = 1, - IOMapper *mapper = 0, - void *refCon = 0); + * @abstract Primary initializer for the IODMACommand class. + * @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. + * @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0 which means any size. + * @param mappingOptions is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below.This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transferes are into coherent memory but no mapping is required. See also prepare() for failure cases. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. + * @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. + * @param refCon Reference Constant + * @result Can fail if the mapping type is not recognised, if one of the 3 mandatory parameters are set to 0, if a 32 bit output function is selected when more than 32 bits of address is required or, if kBypassed is requested on a machine that doesn't support bypassing. Returns true otherwise. + */ + virtual bool initWithSpecification( SegmentFunction outSegFunc, + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mappingOptions = kMapped, + UInt64 maxTransferSize = 0, + UInt32 alignment = 1, + IOMapper *mapper = 0, + void *refCon = 0); /*! @function setMemoryDescriptor - @abstract Sets and resets the DMACommand's current memory descriptor - @discussion The DMA command will configure itself based on the information that it finds in the memory descriptor. It looks for things like the direction of the memory descriptor and whether the current memory descriptor is already mapped into some IOMMU. As a programmer convenience it can also prepare the DMA command immediately. See prepare(). Note the IODMACommand is designed to used multiple times with a succession of memory descriptors, making the pooling of commands possible. It is an error though to attempt to reset a currently prepared() DMA command. Warning: This routine may block so never try to autoprepare an IODMACommand while in a gated context, i.e. one of the WorkLoops action call outs. - @param mem A pointer to the current I/Os memory descriptor. - @param autoPrepare An optional boolean variable that will call the prepare() function automatically after the memory descriptor is processed. Defaults to true. - @result Returns kIOReturnSuccess, kIOReturnBusy if currently prepared, kIOReturnNoSpace if the length(mem) >= Maximum Transfer Size or the error codes returned by prepare() (qv). -*/ - virtual IOReturn setMemoryDescriptor(const IOMemoryDescriptor *mem, - bool autoPrepare = true); + * @abstract Sets and resets the DMACommand's current memory descriptor + * @discussion The DMA command will configure itself based on the information that it finds in the memory descriptor. It looks for things like the direction of the memory descriptor and whether the current memory descriptor is already mapped into some IOMMU. As a programmer convenience it can also prepare the DMA command immediately. See prepare(). Note the IODMACommand is designed to used multiple times with a succession of memory descriptors, making the pooling of commands possible. It is an error though to attempt to reset a currently prepared() DMA command. Warning: This routine may block so never try to autoprepare an IODMACommand while in a gated context, i.e. one of the WorkLoops action call outs. + * @param mem A pointer to the current I/Os memory descriptor. + * @param autoPrepare An optional boolean variable that will call the prepare() function automatically after the memory descriptor is processed. Defaults to true. + * @result Returns kIOReturnSuccess, kIOReturnBusy if currently prepared, kIOReturnNoSpace if the length(mem) >= Maximum Transfer Size or the error codes returned by prepare() (qv). + */ + virtual IOReturn setMemoryDescriptor(const IOMemoryDescriptor *mem, + bool autoPrepare = true); /*! @function clearMemoryDescriptor - @abstract Clears the DMACommand's current memory descriptor - @discussion completes and invalidates the cache if the DMA command is currently active, copies all data from bounce buffers if necessary and releases all resources acquired during setMemoryDescriptor. - @param autoComplete An optional boolean variable that will call the complete() function automatically before the memory descriptor is processed. Defaults to true. -*/ - virtual IOReturn clearMemoryDescriptor(bool autoComplete = true); + * @abstract Clears the DMACommand's current memory descriptor + * @discussion completes and invalidates the cache if the DMA command is currently active, copies all data from bounce buffers if necessary and releases all resources acquired during setMemoryDescriptor. + * @param autoComplete An optional boolean variable that will call the complete() function automatically before the memory descriptor is processed. Defaults to true. + */ + virtual IOReturn clearMemoryDescriptor(bool autoComplete = true); /*! @function getMemoryDescriptor - @abstract Get the current memory descriptor -*/ - virtual const IOMemoryDescriptor *getMemoryDescriptor() const; + * @abstract Get the current memory descriptor + */ + virtual const IOMemoryDescriptor *getMemoryDescriptor() const; /*! @function getIOMemoryDescriptor - @abstract Get the memory descriptor to be used for DMA -*/ - IOMemoryDescriptor * getIOMemoryDescriptor() const; + * @abstract Get the memory descriptor to be used for DMA + */ + IOMemoryDescriptor * getIOMemoryDescriptor() const; /*! @function prepare - @abstract Prepare the memory for an I/O transfer. - @discussion Allocate the mapping resources neccessary for this transfer, specifying a sub range of the IOMemoryDescriptor that will be the target of the I/O. The complete() method frees these resources. Data may be copied to buffers for kIODirectionOut memory descriptors, depending on hardware mapping resource availabilty or alignment restrictions. It should be noted that the this function may block and should only be called on the clients context, i.e never call this routine while gated; also the call itself is not thread safe though this should be an issue as each IODMACommand is independant. - @param offset defines the starting offset in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. - @param length defines the ending position in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. - @param flushCache Flush the caches for the memory descriptor and make certain that the memory cycles are complete. Defaults to true for kNonCoherent and is ignored by the other types. - @param synchronize Copy any buffered data back from the target IOMemoryDescriptor. Defaults to true, if synchronize() is being used to explicitly copy data, passing false may avoid an unneeded copy. - @result An IOReturn code. */ + * @abstract Prepare the memory for an I/O transfer. + * @discussion Allocate the mapping resources neccessary for this transfer, specifying a sub range of the IOMemoryDescriptor that will be the target of the I/O. The complete() method frees these resources. Data may be copied to buffers for kIODirectionOut memory descriptors, depending on hardware mapping resource availabilty or alignment restrictions. It should be noted that the this function may block and should only be called on the clients context, i.e never call this routine while gated; also the call itself is not thread safe though this should be an issue as each IODMACommand is independant. + * @param offset defines the starting offset in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. + * @param length defines the ending position in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. + * @param flushCache Flush the caches for the memory descriptor and make certain that the memory cycles are complete. Defaults to true for kNonCoherent and is ignored by the other types. + * @param synchronize Copy any buffered data back from the target IOMemoryDescriptor. Defaults to true, if synchronize() is being used to explicitly copy data, passing false may avoid an unneeded copy. + * @result An IOReturn code. */ - virtual IOReturn prepare(UInt64 offset = 0, UInt64 length = 0, bool flushCache = true, bool synchronize = true); + virtual IOReturn prepare(UInt64 offset = 0, UInt64 length = 0, bool flushCache = true, bool synchronize = true); /*! @function complete - @abstract Complete processing of DMA mappings after an I/O transfer is finished. - @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer - @param invalidateCache Invalidate the caches for the memory descriptor. Defaults to true for kNonCoherent and is ignored by the other types. - @param synchronize Copy any buffered data back to the target IOMemoryDescriptor. Defaults to true, if synchronize() is being used to explicitly copy data, passing false may avoid an unneeded copy. - @result kIOReturnNotReady if not prepared, kIOReturnSuccess otherwise. */ + * @abstract Complete processing of DMA mappings after an I/O transfer is finished. + * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer + * @param invalidateCache Invalidate the caches for the memory descriptor. Defaults to true for kNonCoherent and is ignored by the other types. + * @param synchronize Copy any buffered data back to the target IOMemoryDescriptor. Defaults to true, if synchronize() is being used to explicitly copy data, passing false may avoid an unneeded copy. + * @result kIOReturnNotReady if not prepared, kIOReturnSuccess otherwise. */ - virtual IOReturn complete(bool invalidateCache = true, bool synchronize = true); + virtual IOReturn complete(bool invalidateCache = true, bool synchronize = true); /*! @function synchronize - @abstract Bring IOMemoryDescriptor and IODMACommand buffers into sync. - @discussion This method should not be called unless a prepare was previously issued. If needed a caller may synchronize any IODMACommand buffers with the original IOMemoryDescriptor buffers. - @param options Specifies the direction of the copy: - kIODirectionOut copy IOMemoryDesciptor memory to any IODMACommand buffers. By default this action takes place automatically at prepare(). - kIODirectionIn copy any IODMACommand buffers back to the IOMemoryDescriptor. By default this action takes place automatically at complete(). - kForceDoubleBuffer copy the entire prepared range to a new page aligned buffer. - @result kIOReturnNotReady if not prepared, kIOReturnBadArgument if invalid options are passed, kIOReturnSuccess otherwise. */ + * @abstract Bring IOMemoryDescriptor and IODMACommand buffers into sync. + * @discussion This method should not be called unless a prepare was previously issued. If needed a caller may synchronize any IODMACommand buffers with the original IOMemoryDescriptor buffers. + * @param options Specifies the direction of the copy: + * kIODirectionOut copy IOMemoryDesciptor memory to any IODMACommand buffers. By default this action takes place automatically at prepare(). + * kIODirectionIn copy any IODMACommand buffers back to the IOMemoryDescriptor. By default this action takes place automatically at complete(). + * kForceDoubleBuffer copy the entire prepared range to a new page aligned buffer. + * @result kIOReturnNotReady if not prepared, kIOReturnBadArgument if invalid options are passed, kIOReturnSuccess otherwise. */ - virtual IOReturn synchronize(IOOptionBits options); + virtual IOReturn synchronize(IOOptionBits options); /*! @function genIOVMSegments - @abstract Generates a physical scatter/gather for the current DMA command - @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. The constraints that are set during initialisation will be respected. This function maintains the state across multiple calls for efficiency. However the state is discarded if the new offset is not the expected one. - @param offset input/output parameter, defines the starting and ending offset in the memory descriptor, relative to any offset passed to the prepare() method. - @param segments Void pointer to base of output physical scatter/gather list. Always passed directly onto the SegmentFunction. - @param numSegments Input/output parameter Number of segments that can fit in the segment array and returns number of segments generated. - @result kIOReturnSuccess on success, kIOReturnOverrun if the memory descriptor is exhausted, kIOReturnMessageTooLarge if the output segment function's address bits has insufficient resolution for a segment, kIOReturnNotReady if the DMA command has not be prepared, kIOReturnBadArgument if the DMA command doesn't have a memory descriptor yet or some of the parameters are NULL and kIOReturnNotReady if the DMA command is not prepared. -*/ - virtual IOReturn genIOVMSegments(UInt64 *offset, - void *segments, - UInt32 *numSegments); + * @abstract Generates a physical scatter/gather for the current DMA command + * @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. The constraints that are set during initialisation will be respected. This function maintains the state across multiple calls for efficiency. However the state is discarded if the new offset is not the expected one. + * @param offset input/output parameter, defines the starting and ending offset in the memory descriptor, relative to any offset passed to the prepare() method. + * @param segments Void pointer to base of output physical scatter/gather list. Always passed directly onto the SegmentFunction. + * @param numSegments Input/output parameter Number of segments that can fit in the segment array and returns number of segments generated. + * @result kIOReturnSuccess on success, kIOReturnOverrun if the memory descriptor is exhausted, kIOReturnMessageTooLarge if the output segment function's address bits has insufficient resolution for a segment, kIOReturnNotReady if the DMA command has not be prepared, kIOReturnBadArgument if the DMA command doesn't have a memory descriptor yet or some of the parameters are NULL and kIOReturnNotReady if the DMA command is not prepared. + */ + virtual IOReturn genIOVMSegments(UInt64 *offset, + void *segments, + UInt32 *numSegments); private: - virtual UInt64 transfer( IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length ); + virtual UInt64 transfer( IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length ); public: /*! @function writeBytes - @abstract Copy data to the IODMACommand's buffer from the specified buffer. - @discussion This method copies data to the IODMACommand's memory at the given offset, from the caller's buffer. The IODMACommand must be prepared, and the offset is relative to the prepared offset. - @param offset A byte offset into the IODMACommand's memory, relative to the prepared offset. - @param bytes The caller supplied buffer to copy the data from. - @param length The length of the data to copy. - @result The number of bytes copied, zero will be returned if the specified offset is beyond the prepared length of the IODMACommand. */ + * @abstract Copy data to the IODMACommand's buffer from the specified buffer. + * @discussion This method copies data to the IODMACommand's memory at the given offset, from the caller's buffer. The IODMACommand must be prepared, and the offset is relative to the prepared offset. + * @param offset A byte offset into the IODMACommand's memory, relative to the prepared offset. + * @param bytes The caller supplied buffer to copy the data from. + * @param length The length of the data to copy. + * @result The number of bytes copied, zero will be returned if the specified offset is beyond the prepared length of the IODMACommand. */ - UInt64 writeBytes(UInt64 offset, const void *bytes, UInt64 length); + UInt64 writeBytes(UInt64 offset, const void *bytes, UInt64 length); /*! @function readBytes - @abstract Copy data from the IODMACommand's buffer to the specified buffer. - @discussion This method copies data from the IODMACommand's memory at the given offset, to the caller's buffer. The IODMACommand must be prepared, and the offset is relative to the prepared offset. - @param offset A byte offset into the IODMACommand's memory, relative to the prepared offset. - @param bytes The caller supplied buffer to copy the data to. - @param length The length of the data to copy. - @result The number of bytes copied, zero will be returned if the specified offset is beyond the prepared length of the IODMACommand. */ + * @abstract Copy data from the IODMACommand's buffer to the specified buffer. + * @discussion This method copies data from the IODMACommand's memory at the given offset, to the caller's buffer. The IODMACommand must be prepared, and the offset is relative to the prepared offset. + * @param offset A byte offset into the IODMACommand's memory, relative to the prepared offset. + * @param bytes The caller supplied buffer to copy the data to. + * @param length The length of the data to copy. + * @result The number of bytes copied, zero will be returned if the specified offset is beyond the prepared length of the IODMACommand. */ - UInt64 readBytes(UInt64 offset, void *bytes, UInt64 length); + UInt64 readBytes(UInt64 offset, void *bytes, UInt64 length); /*! @function gen32IOVMSegments - @abstract Helper function for a type checked call to genIOVMSegments(qv), for use with an IODMACommand set up with the output function kIODMACommandOutputHost32, kIODMACommandOutputBig32, or kIODMACommandOutputLittle32. If the output function of the IODMACommand is not a 32 bit function, results will be incorrect. -*/ - inline IOReturn gen32IOVMSegments(UInt64 *offset, - Segment32 *segments, - UInt32 *numSegments) - { return genIOVMSegments(offset, segments, numSegments); } + * @abstract Helper function for a type checked call to genIOVMSegments(qv), for use with an IODMACommand set up with the output function kIODMACommandOutputHost32, kIODMACommandOutputBig32, or kIODMACommandOutputLittle32. If the output function of the IODMACommand is not a 32 bit function, results will be incorrect. + */ + inline IOReturn + gen32IOVMSegments(UInt64 *offset, + Segment32 *segments, + UInt32 *numSegments) + { + return genIOVMSegments(offset, segments, numSegments); + } /*! @function gen64IOVMSegments - @abstract Helper function for a type checked call to genIOVMSegments(qv), for use with an IODMACommand set up with the output function kIODMACommandOutputHost64, kIODMACommandOutputBig64, or kIODMACommandOutputLittle64. If the output function of the IODMACommand is not a 64 bit function, results will be incorrect. -*/ - inline IOReturn gen64IOVMSegments(UInt64 *offset, - Segment64 *segments, - UInt32 *numSegments) - { return genIOVMSegments(offset, segments, numSegments); } - - IOReturn - genIOVMSegments(SegmentFunction segmentFunction, - UInt64 *offsetP, - void *segmentsP, - UInt32 *numSegmentsP); - - virtual void free() APPLE_KEXT_OVERRIDE; + * @abstract Helper function for a type checked call to genIOVMSegments(qv), for use with an IODMACommand set up with the output function kIODMACommandOutputHost64, kIODMACommandOutputBig64, or kIODMACommandOutputLittle64. If the output function of the IODMACommand is not a 64 bit function, results will be incorrect. + */ + inline IOReturn + gen64IOVMSegments(UInt64 *offset, + Segment64 *segments, + UInt32 *numSegments) + { + return genIOVMSegments(offset, segments, numSegments); + } + + IOReturn + genIOVMSegments(SegmentFunction segmentFunction, + UInt64 *offsetP, + void *segmentsP, + UInt32 *numSegmentsP); + + virtual void free() APPLE_KEXT_OVERRIDE; private: - IOReturn setSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper); - - typedef IOReturn (*InternalSegmentFunction)( - void *reference, - IODMACommand *target, - Segment64 segment, - void *segments, - UInt32 segmentIndex); - - IOReturn genIOVMSegments(uint32_t op, - InternalSegmentFunction outSegFunc, - void *reference, - UInt64 *offsetP, - void *segmentsP, - UInt32 *numSegmentsP); - - static IOReturn clientOutputSegment( - void *reference, IODMACommand *target, - Segment64 segment, void *vSegList, UInt32 outSegIndex); - - static IOReturn segmentOp( - void *reference, - IODMACommand *target, - Segment64 segment, - void *segments, - UInt32 segmentIndex); - IOReturn walkAll(UInt8 op); + IOReturn setSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper); + + typedef IOReturn (*InternalSegmentFunction)( + void *reference, + IODMACommand *target, + Segment64 segment, + void *segments, + UInt32 segmentIndex); + + IOReturn genIOVMSegments(uint32_t op, + InternalSegmentFunction outSegFunc, + void *reference, + UInt64 *offsetP, + void *segmentsP, + UInt32 *numSegmentsP); + + static IOReturn clientOutputSegment( + void *reference, IODMACommand *target, + Segment64 segment, void *vSegList, UInt32 outSegIndex); + + static IOReturn segmentOp( + void *reference, + IODMACommand *target, + Segment64 segment, + void *segments, + UInt32 segmentIndex); + IOReturn walkAll(UInt8 op); public: /*! @function prepareWithSpecification - @abstract Prepare the memory for an I/O transfer with a new specification. - @discussion Allocate the mapping resources neccessary for this transfer, specifying a sub range of the IOMemoryDescriptor that will be the target of the I/O. The complete() method frees these resources. Data may be copied to buffers for kIODirectionOut memory descriptors, depending on hardware mapping resource availabilty or alignment restrictions. It should be noted that the this function may block and should only be called on the clients context, i.e never call this routine while gated; also the call itself is not thread safe though this should be an issue as each IODMACommand is independant. - @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. - @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0 which means any size. - @param mappingOptions is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below.This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transferes are into coherent memory but no mapping is required. See also prepare() for failure cases. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. - @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. - @param offset defines the starting offset in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. - @param length defines the ending position in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. - @param flushCache Flush the caches for the memory descriptor and make certain that the memory cycles are complete. Defaults to true for kNonCoherent and is ignored by the other types. - @param synchronize Copy any buffered data back from the target IOMemoryDescriptor. Defaults to true, if synchronize() is being used to explicitly copy data, passing false may avoid an unneeded copy. - @result An IOReturn code. Can fail if the mapping type is not recognised, if one of the 3 mandatory parameters are set to 0, if a 32 bit output function is selected when more than 32 bits of address is required or, if kBypassed is requested on a machine that doesn't support bypassing. -*/ - - virtual IOReturn prepareWithSpecification(SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mappingOptions = kMapped, - UInt64 maxTransferSize = 0, - UInt32 alignment = 1, - IOMapper *mapper = 0, - UInt64 offset = 0, - UInt64 length = 0, - bool flushCache = true, - bool synchronize = true); - - static IOReturn transferSegment(void *reference, - IODMACommand *target, - Segment64 segment, - void *segments, - UInt32 segmentIndex); + * @abstract Prepare the memory for an I/O transfer with a new specification. + * @discussion Allocate the mapping resources neccessary for this transfer, specifying a sub range of the IOMemoryDescriptor that will be the target of the I/O. The complete() method frees these resources. Data may be copied to buffers for kIODirectionOut memory descriptors, depending on hardware mapping resource availabilty or alignment restrictions. It should be noted that the this function may block and should only be called on the clients context, i.e never call this routine while gated; also the call itself is not thread safe though this should be an issue as each IODMACommand is independant. + * @param outSegFunc SegmentFunction to call to output one physical segment. A set of nine commonly required segment functions are provided. + * @param numAddressBits Number of bits that the hardware uses on its internal address bus. Typically 32 but may be more on modern hardware. A 0 implies no-restriction other than that implied by the output segment function. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0 which means any size. + * @param mappingOptions is the type of mapping that is required to translate an IOMemoryDescriptor into the desired number of bits. For instance if your hardware only supports 32 bits but must run on machines with > 4G of RAM some mapping will be required. Number of bits will be specified in numAddressBits, see below.This parameter can take 3 values:- kNonCoherent - used for non-coherent hardware transfers, Mapped - Validate that all I/O bus generated addresses are within the number of addressing bits specified, Bypassed indicates that bypassed addressing is required, this is used when the hardware transferes are into coherent memory but no mapping is required. See also prepare() for failure cases. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restriction, in bytes, on I/O bus addresses. Defaults to single byte alignment. + * @param mapper For mapping types kMapped & kBypassed mapper is used to define the hardware that will perform the mapping, defaults to the system mapper. + * @param offset defines the starting offset in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. + * @param length defines the ending position in the memory descriptor the DMA command will operate on. genIOVMSegments will produce its results based on the offset and length passed to the prepare method. + * @param flushCache Flush the caches for the memory descriptor and make certain that the memory cycles are complete. Defaults to true for kNonCoherent and is ignored by the other types. + * @param synchronize Copy any buffered data back from the target IOMemoryDescriptor. Defaults to true, if synchronize() is being used to explicitly copy data, passing false may avoid an unneeded copy. + * @result An IOReturn code. Can fail if the mapping type is not recognised, if one of the 3 mandatory parameters are set to 0, if a 32 bit output function is selected when more than 32 bits of address is required or, if kBypassed is requested on a machine that doesn't support bypassing. + */ + + virtual IOReturn prepareWithSpecification(SegmentFunction outSegFunc, + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mappingOptions = kMapped, + UInt64 maxTransferSize = 0, + UInt32 alignment = 1, + IOMapper *mapper = 0, + UInt64 offset = 0, + UInt64 length = 0, + bool flushCache = true, + bool synchronize = true); + + static IOReturn transferSegment(void *reference, + IODMACommand *target, + Segment64 segment, + void *segments, + UInt32 segmentIndex); /*! @function getPreparedOffsetAndLength - @abstract Returns the offset and length into the target IOMemoryDescriptor of a prepared IODDMACommand. - @discussion If successfully prepared, returns the offset and length into the IOMemoryDescriptor. Will fail for an unprepared IODMACommand. - @param offset returns the starting offset in the memory descriptor the DMA command was prepared with. Pass NULL for don't care. - @param length returns the length in the memory descriptor the DMA command was prepared with. Pass NULL for don't care. - @result An IOReturn code. kIOReturnNotReady if the IODMACommand is not prepared. */ + * @abstract Returns the offset and length into the target IOMemoryDescriptor of a prepared IODDMACommand. + * @discussion If successfully prepared, returns the offset and length into the IOMemoryDescriptor. Will fail for an unprepared IODMACommand. + * @param offset returns the starting offset in the memory descriptor the DMA command was prepared with. Pass NULL for don't care. + * @param length returns the length in the memory descriptor the DMA command was prepared with. Pass NULL for don't care. + * @result An IOReturn code. kIOReturnNotReady if the IODMACommand is not prepared. */ - virtual IOReturn getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length); + virtual IOReturn getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length); - UInt8 getNumAddressBits(void); - UInt32 getAlignment(void); - uint32_t getAlignmentLength(void); - uint32_t getAlignmentInternalSegments(void); + UInt8 getNumAddressBits(void); + UInt32 getAlignment(void); + uint32_t getAlignmentLength(void); + uint32_t getAlignmentInternalSegments(void); /*! @function initWithRefCon - @abstract Secondary initializer for the IODMACommand class. - @param refCon Reference Constant - @result Can fail if super init fails. Returns true otherwise. -*/ - - virtual - bool initWithRefCon(void * refCon = 0); - - virtual - bool initWithSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper, - void * refCon); - - virtual - IOReturn prepareWithSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper, - uint64_t offset, - uint64_t length, - bool flushCache = true, - bool synchronize = true); - - virtual - IOBufferMemoryDescriptor * createCopyBuffer(IODirection direction, UInt64 length); - + * @abstract Secondary initializer for the IODMACommand class. + * @param refCon Reference Constant + * @result Can fail if super init fails. Returns true otherwise. + */ + + virtual + bool initWithRefCon(void * refCon = 0); + + virtual + bool initWithSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + void * refCon); + + virtual + IOReturn prepareWithSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + uint64_t offset, + uint64_t length, + bool flushCache = true, + bool synchronize = true); + + virtual + IOBufferMemoryDescriptor * createCopyBuffer(IODirection direction, UInt64 length); + private: - OSMetaClassDeclareReservedUsed(IODMACommand, 0); - OSMetaClassDeclareReservedUsed(IODMACommand, 1); - OSMetaClassDeclareReservedUsed(IODMACommand, 2); - OSMetaClassDeclareReservedUsed(IODMACommand, 3); - OSMetaClassDeclareReservedUsed(IODMACommand, 4); - OSMetaClassDeclareReservedUsed(IODMACommand, 5); - OSMetaClassDeclareReservedUsed(IODMACommand, 6); - OSMetaClassDeclareReservedUnused(IODMACommand, 7); - OSMetaClassDeclareReservedUnused(IODMACommand, 8); - OSMetaClassDeclareReservedUnused(IODMACommand, 9); - OSMetaClassDeclareReservedUnused(IODMACommand, 10); - OSMetaClassDeclareReservedUnused(IODMACommand, 11); - OSMetaClassDeclareReservedUnused(IODMACommand, 12); - OSMetaClassDeclareReservedUnused(IODMACommand, 13); - OSMetaClassDeclareReservedUnused(IODMACommand, 14); - OSMetaClassDeclareReservedUnused(IODMACommand, 15); + OSMetaClassDeclareReservedUsed(IODMACommand, 0); + OSMetaClassDeclareReservedUsed(IODMACommand, 1); + OSMetaClassDeclareReservedUsed(IODMACommand, 2); + OSMetaClassDeclareReservedUsed(IODMACommand, 3); + OSMetaClassDeclareReservedUsed(IODMACommand, 4); + OSMetaClassDeclareReservedUsed(IODMACommand, 5); + OSMetaClassDeclareReservedUsed(IODMACommand, 6); + OSMetaClassDeclareReservedUnused(IODMACommand, 7); + OSMetaClassDeclareReservedUnused(IODMACommand, 8); + OSMetaClassDeclareReservedUnused(IODMACommand, 9); + OSMetaClassDeclareReservedUnused(IODMACommand, 10); + OSMetaClassDeclareReservedUnused(IODMACommand, 11); + OSMetaClassDeclareReservedUnused(IODMACommand, 12); + OSMetaClassDeclareReservedUnused(IODMACommand, 13); + OSMetaClassDeclareReservedUnused(IODMACommand, 14); + OSMetaClassDeclareReservedUnused(IODMACommand, 15); public: /*! @var fRefCon Reference Constant, client defined publicly avialable */ - void *fRefCon; + void *fRefCon; protected: /*! @var fMaxSegmentSize Maximum size of one segment in a scatter/gather list */ - UInt64 fMaxSegmentSize; + UInt64 fMaxSegmentSize; /*! @var fMaxTransferSize - Maximum size of a transfer that this memory cursor is allowed to generate */ - UInt64 fMaxTransferSize; + * Maximum size of a transfer that this memory cursor is allowed to generate */ + UInt64 fMaxTransferSize; - UInt32 fAlignMaskLength; - UInt32 fAlignMaskInternalSegments; + UInt32 fAlignMaskLength; + UInt32 fAlignMaskInternalSegments; /*! @var fMapper - Client defined mapper. */ - IOMapper *fMapper; + * Client defined mapper. */ + IOMapper *fMapper; /*! @var fMemory - memory descriptor for current I/O. */ - const IOMemoryDescriptor *fMemory; + * memory descriptor for current I/O. */ + const IOMemoryDescriptor *fMemory; /*! @var fOutSeg The action method called when an event has been delivered */ - SegmentFunction fOutSeg; + SegmentFunction fOutSeg; /*! @var fAlignMask - Alignment restriction mask. */ - UInt32 fAlignMask; + * Alignment restriction mask. */ + UInt32 fAlignMask; /*! @var fNumAddressBits - Number of bits that the hardware can address */ - UInt32 fNumAddressBits; + * Number of bits that the hardware can address */ + UInt32 fNumAddressBits; /*! @var fNumSegments - Number of contiguous segments required for the current memory descriptor and desired mapping */ - UInt32 fNumSegments; + * Number of contiguous segments required for the current memory descriptor and desired mapping */ + UInt32 fNumSegments; /*! @var fMappingOptions - What type of I/O virtual address mapping is required for this command */ - uint32_t fMappingOptions; + * What type of I/O virtual address mapping is required for this command */ + uint32_t fMappingOptions; /*! @var fActive - fActive indicates that this DMA command is currently prepared and ready to go */ - UInt32 fActive; + * fActive indicates that this DMA command is currently prepared and ready to go */ + UInt32 fActive; /*! @var reserved - Reserved for future use. (Internal use only) */ - struct IODMACommandInternal * reserved; + * Reserved for future use. (Internal use only) */ + struct IODMACommandInternal * reserved; }; -IOReturn IODMACommand:: +IOReturn +IODMACommand:: weakWithSpecification(IODMACommand **newCommand, - SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mapType, - UInt64 maxTransferSize, - UInt32 alignment, - IOMapper *mapper, - void *refCon) + SegmentFunction outSegFunc, + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mapType, + UInt64 maxTransferSize, + UInt32 alignment, + IOMapper *mapper, + void *refCon) { - if (!newCommand) - return kIOReturnBadArgument; - - IODMACommand *self = (IODMACommand *) - OSMetaClass::allocClassWithName("IODMACommand"); - if (!self) - return kIOReturnUnsupported; - - IOReturn ret; - bool inited = self-> - initWithSpecification(outSegFunc, - numAddressBits, maxSegmentSize, mapType, - maxTransferSize, alignment, mapper, refCon); - if (inited) - ret = kIOReturnSuccess; - else { - self->release(); - self = 0; - ret = kIOReturnError; - } - - *newCommand = self; - return ret; + if (!newCommand) { + return kIOReturnBadArgument; + } + + IODMACommand *self = (IODMACommand *) + OSMetaClass::allocClassWithName("IODMACommand"); + if (!self) { + return kIOReturnUnsupported; + } + + IOReturn ret; + bool inited = self-> + initWithSpecification(outSegFunc, + numAddressBits, maxSegmentSize, mapType, + maxTransferSize, alignment, mapper, refCon); + if (inited) { + ret = kIOReturnSuccess; + } else { + self->release(); + self = 0; + ret = kIOReturnError; + } + + *newCommand = self; + return ret; }; #endif /* !_IODMACOMMAND_H */ - diff --git a/iokit/IOKit/IODMAController.h b/iokit/IOKit/IODMAController.h index e5b8f106d..2d8682cc2 100644 --- a/iokit/IOKit/IODMAController.h +++ b/iokit/IOKit/IODMAController.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,35 +37,35 @@ class IODMAEventSource; class IODMAController : public IOService { - OSDeclareAbstractStructors(IODMAController); - - friend class IODMAEventSource; - - private: - IOService *_provider; - const OSSymbol *_dmaControllerName; - - protected: - virtual void registerDMAController(IOOptionBits options = 0); - virtual IOReturn initDMAChannel(IOService *provider, IODMAEventSource *dmaES, UInt32 *dmaIndex, UInt32 reqIndex) = 0; - virtual IOReturn startDMACommand(UInt32 dmaIndex, IODMACommand *dmaCommand, IODirection direction, - IOByteCount byteCount = 0, IOByteCount byteOffset = 0) = 0; - virtual IOReturn stopDMACommand(UInt32 dmaIndex, bool flush = false, uint64_t timeout = UINT64_MAX) = 0; - virtual void completeDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand); - virtual void notifyDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); - virtual IOReturn queryDMACommand(UInt32 dmaIndex, IODMACommand **dmaCommand, IOByteCount *transferCount, bool waitForIdle = false) = 0; - virtual IOByteCount getFIFODepth(UInt32 dmaIndex, IODirection direction) = 0; - virtual IOReturn setFIFODepth(UInt32 dmaIndex, IOByteCount depth) = 0; - virtual IOByteCount validFIFODepth(UInt32 dmaIndex, IOByteCount depth, IODirection direction) = 0; - virtual IOReturn setFrameSize(UInt32 dmaIndex, UInt8 byteCount) = 0; - virtual IOReturn setDMAConfig(UInt32 dmaIndex, IOService *provider, UInt32 reqIndex) = 0; - virtual bool validDMAConfig(UInt32 dmaIndex, IOService *provider, UInt32 reqIndex) = 0; - - public: - static const OSSymbol *createControllerName(UInt32 phandle); - static IODMAController *getController(IOService *provider, UInt32 dmaIndex); - - virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; + OSDeclareAbstractStructors(IODMAController); + + friend class IODMAEventSource; + +private: + IOService *_provider; + const OSSymbol *_dmaControllerName; + +protected: + virtual void registerDMAController(IOOptionBits options = 0); + virtual IOReturn initDMAChannel(IOService *provider, IODMAEventSource *dmaES, UInt32 *dmaIndex, UInt32 reqIndex) = 0; + virtual IOReturn startDMACommand(UInt32 dmaIndex, IODMACommand *dmaCommand, IODirection direction, + IOByteCount byteCount = 0, IOByteCount byteOffset = 0) = 0; + virtual IOReturn stopDMACommand(UInt32 dmaIndex, bool flush = false, uint64_t timeout = UINT64_MAX) = 0; + virtual void completeDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand); + virtual void notifyDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); + virtual IOReturn queryDMACommand(UInt32 dmaIndex, IODMACommand **dmaCommand, IOByteCount *transferCount, bool waitForIdle = false) = 0; + virtual IOByteCount getFIFODepth(UInt32 dmaIndex, IODirection direction) = 0; + virtual IOReturn setFIFODepth(UInt32 dmaIndex, IOByteCount depth) = 0; + virtual IOByteCount validFIFODepth(UInt32 dmaIndex, IOByteCount depth, IODirection direction) = 0; + virtual IOReturn setFrameSize(UInt32 dmaIndex, UInt8 byteCount) = 0; + virtual IOReturn setDMAConfig(UInt32 dmaIndex, IOService *provider, UInt32 reqIndex) = 0; + virtual bool validDMAConfig(UInt32 dmaIndex, IOService *provider, UInt32 reqIndex) = 0; + +public: + static const OSSymbol *createControllerName(UInt32 phandle); + static IODMAController *getController(IOService *provider, UInt32 dmaIndex); + + virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; }; diff --git a/iokit/IOKit/IODMAEventSource.h b/iokit/IOKit/IODMAEventSource.h index 9aa34a93f..88ffeed97 100644 --- a/iokit/IOKit/IODMAEventSource.h +++ b/iokit/IOKit/IODMAEventSource.h @@ -2,7 +2,7 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,56 +38,56 @@ class IODMAController; class IODMAEventSource : public IOEventSource { - OSDeclareDefaultStructors(IODMAEventSource); - - friend class IODMAController; - - public: - typedef void (*Action)(OSObject *owner, IODMAEventSource *dmaES, IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); + OSDeclareDefaultStructors(IODMAEventSource); + + friend class IODMAController; + +public: + typedef void (*Action)(OSObject *owner, IODMAEventSource *dmaES, IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); #define IODMAEventAction IODMAEventSource::Action - - protected: - virtual void completeDMACommand(IODMACommand *dmaCommand); - virtual void notifyDMACommand(IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); - - public: - static IODMAEventSource *dmaEventSource(OSObject *owner, - IOService *provider, - Action completion = 0, - Action notification = 0, - UInt32 dmaIndex = 0); - - virtual IOReturn startDMACommand(IODMACommand *dmaCommand, IODirection direction, IOByteCount byteCount = 0, IOByteCount byteOffset = 0); - virtual IOReturn stopDMACommand(bool flush = false, uint64_t timeout = UINT64_MAX); - - virtual IOReturn queryDMACommand(IODMACommand **dmaCommand, IOByteCount *transferCount, bool waitForIdle = false); - - virtual IOByteCount getFIFODepth(IODirection direction = kIODirectionNone); - virtual IOReturn setFIFODepth(IOByteCount depth); - virtual IOByteCount validFIFODepth(IOByteCount depth, IODirection direction); - - virtual IOReturn setFrameSize(UInt8 byteCount); - - virtual IOReturn setDMAConfig(UInt32 dmaIndex); - virtual bool validDMAConfig(UInt32 dmaIndex); - - private: - IOService *dmaProvider; - IODMAController *dmaController; - UInt32 dmaIndex; - queue_head_t dmaCommandsCompleted; - IOSimpleLock *dmaCommandsCompletedLock; - Action dmaCompletionAction; - Action dmaNotificationAction; - bool dmaSynchBusy; - - virtual bool init(OSObject *owner, - IOService *provider, - Action completion = 0, - Action notification = 0, - UInt32 dmaIndex = 0); - virtual bool checkForWork(void) APPLE_KEXT_OVERRIDE; - virtual void free(void) APPLE_KEXT_OVERRIDE; + +protected: + virtual void completeDMACommand(IODMACommand *dmaCommand); + virtual void notifyDMACommand(IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp); + +public: + static IODMAEventSource *dmaEventSource(OSObject *owner, + IOService *provider, + Action completion = 0, + Action notification = 0, + UInt32 dmaIndex = 0); + + virtual IOReturn startDMACommand(IODMACommand *dmaCommand, IODirection direction, IOByteCount byteCount = 0, IOByteCount byteOffset = 0); + virtual IOReturn stopDMACommand(bool flush = false, uint64_t timeout = UINT64_MAX); + + virtual IOReturn queryDMACommand(IODMACommand **dmaCommand, IOByteCount *transferCount, bool waitForIdle = false); + + virtual IOByteCount getFIFODepth(IODirection direction = kIODirectionNone); + virtual IOReturn setFIFODepth(IOByteCount depth); + virtual IOByteCount validFIFODepth(IOByteCount depth, IODirection direction); + + virtual IOReturn setFrameSize(UInt8 byteCount); + + virtual IOReturn setDMAConfig(UInt32 dmaIndex); + virtual bool validDMAConfig(UInt32 dmaIndex); + +private: + IOService *dmaProvider; + IODMAController *dmaController; + UInt32 dmaIndex; + queue_head_t dmaCommandsCompleted; + IOSimpleLock *dmaCommandsCompletedLock; + Action dmaCompletionAction; + Action dmaNotificationAction; + bool dmaSynchBusy; + + virtual bool init(OSObject *owner, + IOService *provider, + Action completion = 0, + Action notification = 0, + UInt32 dmaIndex = 0); + virtual bool checkForWork(void) APPLE_KEXT_OVERRIDE; + virtual void free(void) APPLE_KEXT_OVERRIDE; }; #endif /* _IOKIT_IODMAEVENTSOURCE_H */ diff --git a/iokit/IOKit/IODataQueue.h b/iokit/IOKit/IODataQueue.h index 8af46e5b3..c7de3c5fe 100644 --- a/iokit/IOKit/IODataQueue.h +++ b/iokit/IOKit/IODataQueue.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,7 +43,7 @@ typedef struct _IODataQueueMemory IODataQueueMemory; class IOMemoryDescriptor; struct _notifyMsg { - mach_msg_header_t h; + mach_msg_header_t h; }; /*! @@ -59,92 +59,92 @@ struct _notifyMsg { * *
In order to make the data queue memory available to a user process, the method getMemoryDescriptor() must be used to get an IOMemoryDescriptor instance that can be mapped into a user process. Typically, the clientMemoryForType() method on an IOUserClient instance will be used to request the IOMemoryDescriptor and then return it to be mapped into the user process. */ -#ifndef DISABLE_DATAQUEUE_WARNING -class __attribute__((deprecated)) IODataQueue : public OSObject -#else +#ifndef DISABLE_DATAQUEUE_WARNING +class __attribute__((deprecated)) IODataQueue: public OSObject +#else class IODataQueue : public OSObject #endif { - OSDeclareDefaultStructors(IODataQueue) + OSDeclareDefaultStructors(IODataQueue) protected: - IODataQueueMemory * dataQueue; + IODataQueueMemory * dataQueue; - void * notifyMsg; + void * notifyMsg; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - /*! - * @function sendDataAvailableNotification - * @abstract Sends a dataAvailableNotification message to the specified mach port. - * @discussion This method sends a message to the mach port passed to setNotificationPort(). It is used to indicate that data is available in the queue. - */ - virtual void sendDataAvailableNotification(); +/*! + * @function sendDataAvailableNotification + * @abstract Sends a dataAvailableNotification message to the specified mach port. + * @discussion This method sends a message to the mach port passed to setNotificationPort(). It is used to indicate that data is available in the queue. + */ + virtual void sendDataAvailableNotification(); public: - /*! - * @function withCapacity - * @abstract Static method that creates a new IODataQueue instance with the capacity specified in the size parameter. - * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in .
This method allocates a new IODataQueue instance and then calls initWithCapacity() with the given size parameter. If the initWithCapacity() fails, the new instance is released and zero is returned. - * @param size The size of the data queue memory region. - * @result Returns the newly allocated IODataQueue instance. Zero is returned on failure. - */ - static IODataQueue *withCapacity(UInt32 size); - - /*! - * @function withEntries - * @abstract Static method that creates a new IODataQueue instance with the specified number of entries of the given size. - * @discussion This method will create a new IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. If the initWithEntries() fails, the new instance is released and zero is returned. - * @param numEntries Number of entries to allocate space for. - * @param entrySize Size of each entry. - * @result Reeturns the newly allocated IODataQueue instance. Zero is returned on failure. - */ - static IODataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); - - /*! - * @function initWithCapacity - * @abstract Initializes an IODataQueue instance with the capacity specified in the size parameter. - * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in . - * @param size The size of the data queue memory region. - * @result Returns true on success and false on failure. - */ - virtual Boolean initWithCapacity(UInt32 size); - - /*! - * @function initWithEntries - * @abstract Initializes an IODataQueue instance with the specified number of entries of the given size. - * @discussion This method will initialize an IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. - * @param numEntries Number of entries to allocate space for. - * @param entrySize Size of each entry. - * @result Reeturns true on success and false on failure. - */ - virtual Boolean initWithEntries(UInt32 numEntries, UInt32 entrySize); - - /*! - * @function enqueue - * @abstract Enqueues a new entry on the queue. - * @discussion This method adds a new data entry of dataSize to the queue. It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue. Once that is done, it moves the tail to the next available location. When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.
If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available. - * @param data Pointer to the data to be added to the queue. - * @param dataSize Size of the data pointed to by data. - * @result Returns true on success and false on failure. Typically failure means that the queue is full. - */ - virtual Boolean enqueue(void *data, UInt32 dataSize); - - /*! - * @function setNotificationPort - * @abstract Creates a simple mach message targeting the mach port specified in port. - * @discussion This message is sent when data is added to an empty queue. It is to notify a user process that new data has become available. - * @param port The mach port to target with the notification message. - */ - virtual void setNotificationPort(mach_port_t port); - - /*! - * @function getMemoryDescriptor - * @abstract Returns a memory descriptor covering the IODataQueueMemory region. - * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. - * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. - */ - virtual IOMemoryDescriptor *getMemoryDescriptor(); +/*! + * @function withCapacity + * @abstract Static method that creates a new IODataQueue instance with the capacity specified in the size parameter. + * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in .
This method allocates a new IODataQueue instance and then calls initWithCapacity() with the given size parameter. If the initWithCapacity() fails, the new instance is released and zero is returned. + * @param size The size of the data queue memory region. + * @result Returns the newly allocated IODataQueue instance. Zero is returned on failure. + */ + static IODataQueue *withCapacity(UInt32 size); + +/*! + * @function withEntries + * @abstract Static method that creates a new IODataQueue instance with the specified number of entries of the given size. + * @discussion This method will create a new IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. If the initWithEntries() fails, the new instance is released and zero is returned. + * @param numEntries Number of entries to allocate space for. + * @param entrySize Size of each entry. + * @result Reeturns the newly allocated IODataQueue instance. Zero is returned on failure. + */ + static IODataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); + +/*! + * @function initWithCapacity + * @abstract Initializes an IODataQueue instance with the capacity specified in the size parameter. + * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in . + * @param size The size of the data queue memory region. + * @result Returns true on success and false on failure. + */ + virtual Boolean initWithCapacity(UInt32 size); + +/*! + * @function initWithEntries + * @abstract Initializes an IODataQueue instance with the specified number of entries of the given size. + * @discussion This method will initialize an IODataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. + * @param numEntries Number of entries to allocate space for. + * @param entrySize Size of each entry. + * @result Reeturns true on success and false on failure. + */ + virtual Boolean initWithEntries(UInt32 numEntries, UInt32 entrySize); + +/*! + * @function enqueue + * @abstract Enqueues a new entry on the queue. + * @discussion This method adds a new data entry of dataSize to the queue. It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue. Once that is done, it moves the tail to the next available location. When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.
If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available. + * @param data Pointer to the data to be added to the queue. + * @param dataSize Size of the data pointed to by data. + * @result Returns true on success and false on failure. Typically failure means that the queue is full. + */ + virtual Boolean enqueue(void *data, UInt32 dataSize); + +/*! + * @function setNotificationPort + * @abstract Creates a simple mach message targeting the mach port specified in port. + * @discussion This message is sent when data is added to an empty queue. It is to notify a user process that new data has become available. + * @param port The mach port to target with the notification message. + */ + virtual void setNotificationPort(mach_port_t port); + +/*! + * @function getMemoryDescriptor + * @abstract Returns a memory descriptor covering the IODataQueueMemory region. + * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. + * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. + */ + virtual IOMemoryDescriptor *getMemoryDescriptor(); }; #endif /* _IOKIT_IODATAQUEUE_H */ diff --git a/iokit/IOKit/IODataQueueShared.h b/iokit/IOKit/IODataQueueShared.h index dc4532486..0e8c7b407 100644 --- a/iokit/IOKit/IODataQueueShared.h +++ b/iokit/IOKit/IODataQueueShared.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,13 +36,13 @@ /*! * @typedef IODataQueueEntry * @abstract Represents an entry within the data queue - * @discussion This is a variable sized struct. The data field simply represents the start of the data region. The size of the data region is stored in the size field. The whole size of the specific entry is the size of a UInt32 plus the size of the data region. + * @discussion This is a variable sized struct. The data field simply represents the start of the data region. The size of the data region is stored in the size field. The whole size of the specific entry is the size of a UInt32 plus the size of the data region. * @field size The size of the following data region. * @field data Represents the beginning of the data region. The address of the data field is a pointer to the start of the data region. */ -typedef struct _IODataQueueEntry{ - UInt32 size; - UInt8 data[4]; +typedef struct _IODataQueueEntry { + UInt32 size; + UInt8 data[4]; } IODataQueueEntry; /*! @@ -55,10 +55,10 @@ typedef struct _IODataQueueEntry{ * @field queue Represents the beginning of the queue memory region. The size of the region pointed to by queue is stored in the queueSize field. */ typedef struct _IODataQueueMemory { - UInt32 queueSize; - volatile UInt32 head; - volatile UInt32 tail; - IODataQueueEntry queue[1]; + UInt32 queueSize; + volatile UInt32 head; + volatile UInt32 tail; + IODataQueueEntry queue[1]; } IODataQueueMemory; /*! @@ -69,8 +69,8 @@ typedef struct _IODataQueueMemory { * @field msgh Mach message header containing the notification mach port associated with this queue. */ typedef struct _IODataQueueAppendix { - UInt32 version; - mach_msg_header_t msgh; + UInt32 version; + mach_msg_header_t msgh; } IODataQueueAppendix; /*! @@ -89,4 +89,3 @@ typedef struct _IODataQueueAppendix { #define DATA_QUEUE_MEMORY_APPENDIX_SIZE (sizeof(IODataQueueAppendix)) #endif /* _IOKIT_IODATAQUEUESHARED_H */ - diff --git a/iokit/IOKit/IODeviceMemory.h b/iokit/IOKit/IODeviceMemory.h index bcc31ab3d..49d0324b8 100644 --- a/iokit/IOKit/IODeviceMemory.h +++ b/iokit/IOKit/IODeviceMemory.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -38,61 +38,61 @@ #include /*! @class IODeviceMemory - @abstract An IOMemoryDescriptor used for device physical memory ranges. - @discussion The IODeviceMemory class is a simple subclass of IOMemoryDescriptor that uses its methods to describe a single range of physical memory on a device. IODeviceMemory objects are usually looked up with IOService or IOPCIDevice accessors, and are created by memory-mapped bus families. IODeviceMemory implements only some factory methods in addition to the methods of IOMemoryDescriptor. -*/ + * @abstract An IOMemoryDescriptor used for device physical memory ranges. + * @discussion The IODeviceMemory class is a simple subclass of IOMemoryDescriptor that uses its methods to describe a single range of physical memory on a device. IODeviceMemory objects are usually looked up with IOService or IOPCIDevice accessors, and are created by memory-mapped bus families. IODeviceMemory implements only some factory methods in addition to the methods of IOMemoryDescriptor. + */ class IODeviceMemory : public IOMemoryDescriptor { - OSDeclareDefaultStructors(IODeviceMemory) + OSDeclareDefaultStructors(IODeviceMemory) public: /*! @struct InitElement - @field start First physical address in the range. - @field length Length of the range. - @field tag 32-bit value not interpreted by IODeviceMemory or IOMemoryDescriptor, for use by the bus family. */ + * @field start First physical address in the range. + * @field length Length of the range. + * @field tag 32-bit value not interpreted by IODeviceMemory or IOMemoryDescriptor, for use by the bus family. */ - struct InitElement { - IOPhysicalAddress start; - IOPhysicalLength length; - IOOptionBits tag; - }; + struct InitElement { + IOPhysicalAddress start; + IOPhysicalLength length; + IOOptionBits tag; + }; /*! @function arrayFromList - @abstract Constructs an OSArray of IODeviceMemory instances, each describing one physical range, and a tag value. - @discussion This method creates IODeviceMemory instances for each physical range passed in an IODeviceMemory::InitElement array. Each element consists of a physical address, length and tag value for the IODeviceMemory. The instances are returned as a created OSArray. - @param list An array of IODeviceMemory::InitElement structures. - @param count The number of elements in the list. - @result Returns a created OSArray of IODeviceMemory objects, to be released by the caller, or zero on failure. */ + * @abstract Constructs an OSArray of IODeviceMemory instances, each describing one physical range, and a tag value. + * @discussion This method creates IODeviceMemory instances for each physical range passed in an IODeviceMemory::InitElement array. Each element consists of a physical address, length and tag value for the IODeviceMemory. The instances are returned as a created OSArray. + * @param list An array of IODeviceMemory::InitElement structures. + * @param count The number of elements in the list. + * @result Returns a created OSArray of IODeviceMemory objects, to be released by the caller, or zero on failure. */ - static OSArray * arrayFromList( - InitElement list[], - IOItemCount count ); + static OSArray * arrayFromList( + InitElement list[], + IOItemCount count ); /*! @function withRange - @abstract Constructs an IODeviceMemory instance, describing one physical range. - @discussion This method creates an IODeviceMemory instance for one physical range passed as a physical address and length. It just calls IOMemoryDescriptor::withPhysicalAddress. - @param start The physical address of the first byte in the memory. - @param length The length of memory. - @result Returns the created IODeviceMemory on success, to be released by the caller, or zero on failure. */ + * @abstract Constructs an IODeviceMemory instance, describing one physical range. + * @discussion This method creates an IODeviceMemory instance for one physical range passed as a physical address and length. It just calls IOMemoryDescriptor::withPhysicalAddress. + * @param start The physical address of the first byte in the memory. + * @param length The length of memory. + * @result Returns the created IODeviceMemory on success, to be released by the caller, or zero on failure. */ - static IODeviceMemory * withRange( - IOPhysicalAddress start, - IOPhysicalLength length ); + static IODeviceMemory * withRange( + IOPhysicalAddress start, + IOPhysicalLength length ); /*! @function withSubRange - @abstract Constructs an IODeviceMemory instance, describing a subset of an existing IODeviceMemory range. - @discussion This method creates an IODeviceMemory instance for a subset of an existing IODeviceMemory range, passed as a physical address offset and length. It just calls IOMemoryDescriptor::withSubRange. - @param of The parent IODeviceMemory of which a subrange is to be used for the new descriptor, which will be retained by the subrange IODeviceMemory. - @param offset A byte offset into the parent's memory. - @param length The length of the subrange. - @result Returns the created IODeviceMemory on success, to be released by the caller, or zero on failure. */ + * @abstract Constructs an IODeviceMemory instance, describing a subset of an existing IODeviceMemory range. + * @discussion This method creates an IODeviceMemory instance for a subset of an existing IODeviceMemory range, passed as a physical address offset and length. It just calls IOMemoryDescriptor::withSubRange. + * @param of The parent IODeviceMemory of which a subrange is to be used for the new descriptor, which will be retained by the subrange IODeviceMemory. + * @param offset A byte offset into the parent's memory. + * @param length The length of the subrange. + * @result Returns the created IODeviceMemory on success, to be released by the caller, or zero on failure. */ - static IODeviceMemory * withSubRange( - IODeviceMemory * of, - IOPhysicalAddress offset, - IOPhysicalLength length ); + static IODeviceMemory * withSubRange( + IODeviceMemory * of, + IOPhysicalAddress offset, + IOPhysicalLength length ); }; #endif /* ! _IOKIT_IODEVICEMEMORY_H */ diff --git a/iokit/IOKit/IODeviceTreeSupport.h b/iokit/IOKit/IODeviceTreeSupport.h index b10c5553c..24c79221a 100644 --- a/iokit/IOKit/IODeviceTreeSupport.h +++ b/iokit/IOKit/IODeviceTreeSupport.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -41,85 +41,87 @@ class IODeviceMemory; class IOService; -extern const IORegistryPlane * gIODTPlane; +extern const IORegistryPlane * gIODTPlane; -extern const OSSymbol * gIODTPHandleKey; +extern const OSSymbol * gIODTPHandleKey; -extern const OSSymbol * gIODTCompatibleKey; -extern const OSSymbol * gIODTTypeKey; -extern const OSSymbol * gIODTModelKey; -extern const OSSymbol * gIODTTargetTypeKey; +extern const OSSymbol * gIODTCompatibleKey; +extern const OSSymbol * gIODTTypeKey; +extern const OSSymbol * gIODTModelKey; +extern const OSSymbol * gIODTTargetTypeKey; -extern const OSSymbol * gIODTAAPLInterruptsKey; -extern const OSSymbol * gIODTDefaultInterruptController; -extern const OSSymbol * gIODTNWInterruptMappingKey; +extern const OSSymbol * gIODTAAPLInterruptsKey; +extern const OSSymbol * gIODTDefaultInterruptController; +extern const OSSymbol * gIODTNWInterruptMappingKey; -IORegistryEntry * IODeviceTreeAlloc( void * dtTop ); +LIBKERN_RETURNS_NOT_RETAINED IORegistryEntry * IODeviceTreeAlloc( void * dtTop ); bool IODTMatchNubWithKeys( IORegistryEntry * nub, - const char * keys ); + const char * keys ); bool IODTCompareNubName( const IORegistryEntry * regEntry, - OSString * name, OSString ** matchingName ); + OSString * name, OSString ** matchingName ); enum { - kIODTRecursive = 0x00000001, - kIODTExclusive = 0x00000002 + kIODTRecursive = 0x00000001, + kIODTExclusive = 0x00000002 }; OSCollectionIterator * IODTFindMatchingEntries( IORegistryEntry * from, - IOOptionBits options, const char * keys ); + IOOptionBits options, const char * keys ); #if !defined(__arm64__) typedef SInt32 (*IODTCompareAddressCellFunc) - (UInt32 cellCount, UInt32 left[], UInt32 right[]); +(UInt32 cellCount, UInt32 left[], UInt32 right[]); #else typedef SInt64 (*IODTCompareAddressCellFunc) - (UInt32 cellCount, UInt32 left[], UInt32 right[]); +(UInt32 cellCount, UInt32 left[], UInt32 right[]); #endif typedef void (*IODTNVLocationFunc) - (IORegistryEntry * entry, - UInt8 * busNum, UInt8 * deviceNum, UInt8 * functionNum ); +(IORegistryEntry * entry, + UInt8 * busNum, UInt8 * deviceNum, UInt8 * functionNum ); -void IODTSetResolving( IORegistryEntry * regEntry, - IODTCompareAddressCellFunc compareFunc, - IODTNVLocationFunc locationFunc ); +void IODTSetResolving( IORegistryEntry * regEntry, + IODTCompareAddressCellFunc compareFunc, + IODTNVLocationFunc locationFunc ); void IODTGetCellCounts( IORegistryEntry * regEntry, - UInt32 * sizeCount, UInt32 * addressCount); + UInt32 * sizeCount, UInt32 * addressCount); bool IODTResolveAddressCell( IORegistryEntry * regEntry, - UInt32 cellsIn[], - IOPhysicalAddress * phys, IOPhysicalLength * len ); + UInt32 cellsIn[], + IOPhysicalAddress * phys, IOPhysicalLength * len ); -OSArray * IODTResolveAddressing( IORegistryEntry * regEntry, - const char * addressPropertyName, - IODeviceMemory * parent ); +LIBKERN_RETURNS_NOT_RETAINED OSArray * +IODTResolveAddressing( IORegistryEntry * regEntry, + const char * addressPropertyName, + IODeviceMemory * parent ); struct IONVRAMDescriptor { - unsigned int format:4; - unsigned int marker:1; - unsigned int bridgeCount:3; - unsigned int busNum:2; - unsigned int bridgeDevices:6 * 5; - unsigned int functionNum:3; - unsigned int deviceNum:5; + unsigned int format:4; + unsigned int marker:1; + unsigned int bridgeCount:3; + unsigned int busNum:2; + unsigned int bridgeDevices:6 * 5; + unsigned int functionNum:3; + unsigned int deviceNum:5; } __attribute__((aligned(2), packed)); IOReturn IODTMakeNVDescriptor( IORegistryEntry * regEntry, - IONVRAMDescriptor * hdr ); + IONVRAMDescriptor * hdr ); -OSData * IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ); +LIBKERN_RETURNS_NOT_RETAINED OSData * +IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ); const OSSymbol * IODTInterruptControllerName( - IORegistryEntry * regEntry ); + IORegistryEntry * regEntry ); bool IODTMapInterrupts( IORegistryEntry * regEntry ); enum { - kIODTInterruptShared = 0x00000001 + kIODTInterruptShared = 0x00000001 }; IOReturn IODTGetInterruptOptions( IORegistryEntry * regEntry, int source, IOOptionBits * options ); @@ -134,4 +136,3 @@ IOReturn IONDRVLibrariesInitialize( IOService * provider ); #endif #endif /* _IOKIT_IODEVICETREE_H */ - diff --git a/iokit/IOKit/IOEventSource.h b/iokit/IOKit/IOEventSource.h index 34273c470..4b48cb475 100644 --- a/iokit/IOKit/IOEventSource.h +++ b/iokit/IOKit/IOEventSource.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000, 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -Copyright (c) 1998 Apple Computer, Inc. All rights reserved. -HISTORY - 1998-7-13 Godfrey van der Linden(gvdl) - Created. - 1998-10-30 Godfrey van der Linden(gvdl) - Converted to C++ -*/ + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * HISTORY + * 1998-7-13 Godfrey van der Linden(gvdl) + * Created. + * 1998-10-30 Godfrey van der Linden(gvdl) + * Converted to C++ + */ #ifndef _IOKIT_IOEVENTSOURCE_H #define _IOKIT_IOEVENTSOURCE_H @@ -54,251 +54,250 @@ __BEGIN_DECLS __END_DECLS /*! - @class IOEventSource : public OSObject - @abstract Abstract class for all work-loop event sources. - @discussion The IOEventSource declares the abstract super class that all -event sources must inherit from if an IOWorkLoop is to receive events from them. -

- An event source can represent any event that should cause the work-loop of a -device to wake up and perform work. Two examples of event sources are the -IOInterruptEventSource which delivers interrupt notifications and IOCommandGate -which delivers command requests. -

- A kernel module can always use the work-loop model for serialising access to -anything at all. The IOEventSource is used for communicating events to the -work-loop, and the chain of event sources should be used to walk the possible -event sources and demultipex them. Note a particular instance of an event -source may only be a member of 1 linked list chain. If you need to move it -between chains than make sure it is removed from the original chain before -attempting to move it. -

- The IOEventSource makes no attempt to maintain the consistency of its internal data across multi-threading. It is assumed that the user of these basic tools will protect the data that these objects represent in some sort of device wide instance lock. For example the IOWorkLoop maintains the event chain by using an IOCommandGate and thus single threading access to its state. -

- All subclasses of IOEventSource that wish to perform work on the work-loop thread are expected to implement the checkForWork() member function. As of Mac OS X, 10.7 (Darwin 11), checkForWork is no longer pure virtual, and should not be overridden if there is no work to be done. - -

- checkForWork() is the key method in this class. It is called by some work-loop when convienient and is expected to evaluate its internal state and determine if an event has occurred since the last call. In the case of an event having occurred then the instance defined target(owner)/action will be called. The action is stored as an ordinary C function pointer but the first parameter is always the owner. This means that a C++ member function can be used as an action function though this depends on the ABI. -

- Although the eventChainNext variable contains a reference to the next event source in the chain this reference is not retained. The list 'owner' i.e. the client that creates the event, not the work-loop, is expected to retain the source. -*/ + * @class IOEventSource : public OSObject + * @abstract Abstract class for all work-loop event sources. + * @discussion The IOEventSource declares the abstract super class that all + * event sources must inherit from if an IOWorkLoop is to receive events from them. + *

+ * An event source can represent any event that should cause the work-loop of a + * device to wake up and perform work. Two examples of event sources are the + * IOInterruptEventSource which delivers interrupt notifications and IOCommandGate + * which delivers command requests. + *

+ * A kernel module can always use the work-loop model for serialising access to + * anything at all. The IOEventSource is used for communicating events to the + * work-loop, and the chain of event sources should be used to walk the possible + * event sources and demultipex them. Note a particular instance of an event + * source may only be a member of 1 linked list chain. If you need to move it + * between chains than make sure it is removed from the original chain before + * attempting to move it. + *

+ * The IOEventSource makes no attempt to maintain the consistency of its internal data across multi-threading. It is assumed that the user of these basic tools will protect the data that these objects represent in some sort of device wide instance lock. For example the IOWorkLoop maintains the event chain by using an IOCommandGate and thus single threading access to its state. + *

+ * All subclasses of IOEventSource that wish to perform work on the work-loop thread are expected to implement the checkForWork() member function. As of Mac OS X, 10.7 (Darwin 11), checkForWork is no longer pure virtual, and should not be overridden if there is no work to be done. + * + *

+ * checkForWork() is the key method in this class. It is called by some work-loop when convienient and is expected to evaluate its internal state and determine if an event has occurred since the last call. In the case of an event having occurred then the instance defined target(owner)/action will be called. The action is stored as an ordinary C function pointer but the first parameter is always the owner. This means that a C++ member function can be used as an action function though this depends on the ABI. + *

+ * Although the eventChainNext variable contains a reference to the next event source in the chain this reference is not retained. The list 'owner' i.e. the client that creates the event, not the work-loop, is expected to retain the source. + */ class IOEventSource : public OSObject { - OSDeclareAbstractStructors(IOEventSource) - friend class IOWorkLoop; + OSDeclareAbstractStructors(IOEventSource) + friend class IOWorkLoop; #if IOKITSTATS - friend class IOStatistics; + friend class IOStatistics; #endif public: /*! - @typedef Action - @discussion Placeholder type for C++ function overloading discrimination. -As the all event sources require an action and it has to be stored somewhere -and be of some type, this is that type. - @param owner - Target of the function, can be used as a refcon. The owner is set -during initialisation. Note if a C++ function was specified this parameter -is implicitly the first paramter in the target member function's parameter list. -*/ - typedef void (*Action)(OSObject *owner, ...); + * @typedef Action + * @discussion Placeholder type for C++ function overloading discrimination. + * As the all event sources require an action and it has to be stored somewhere + * and be of some type, this is that type. + * @param owner + * Target of the function, can be used as a refcon. The owner is set + * during initialisation. Note if a C++ function was specified this parameter + * is implicitly the first paramter in the target member function's parameter list. + */ + typedef void (*Action)(OSObject *owner, ...); /*! @defined IOEventSourceAction - @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOEventSource::Action */ + * @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOEventSource::Action */ #define IOEventSourceAction IOEventSource::Action #ifdef __BLOCKS__ - typedef IOReturn (^ActionBlock)(); + typedef IOReturn (^ActionBlock)(); #endif /* __BLOCKS__ */ protected: /*! @var eventChainNext - The next event source in the event chain. nil at end of chain. */ - IOEventSource *eventChainNext; + * The next event source in the event chain. nil at end of chain. */ + IOEventSource *eventChainNext; /*! @var owner The owner object called when an event has been delivered. */ - OSObject *owner; + OSObject *owner; /*! @var action - The action method called when an event has been delivered */ + * The action method called when an event has been delivered */ #if XNU_KERNEL_PRIVATE - union { Action action; ActionBlock actionBlock; }; + union { Action action; ActionBlock actionBlock; }; #else /* XNU_KERNEL_PRIVATE */ - Action action; + Action action; #endif /* !XNU_KERNEL_PRIVATE */ /*! @var enabled - Is this event source enabled to deliver requests to the work-loop. */ - bool enabled; + * Is this event source enabled to deliver requests to the work-loop. */ + bool enabled; #if XNU_KERNEL_PRIVATE - enum - { - kPassive = 0x0001, - kActive = 0x0002, - kActionBlock = 0x0004, - kSubClass0 = 0x0008, - }; - uint8_t eventSourceReserved1[1]; - uint16_t flags; + enum{ + kPassive = 0x0001, + kActive = 0x0002, + kActionBlock = 0x0004, + kSubClass0 = 0x0008, + }; + uint8_t eventSourceReserved1[1]; + uint16_t flags; #if __LP64__ - uint8_t eventSourceReserved2[4]; + uint8_t eventSourceReserved2[4]; #endif /* __LP64__ */ #endif /* XNU_KERNEL_PRIVATE */ /*! @var workLoop What is the work-loop for this event source. */ - IOWorkLoop *workLoop; + IOWorkLoop *workLoop; /*! @var refcon What ever the client wants to do, see $link setRefcon. */ - void *refcon; + void *refcon; /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. - */ - struct ExpansionData { + * @discussion This structure will be used to expand the capablilties of the IOEventSource in the future. + */ + struct ExpansionData { #if IOKITSTATS - struct IOEventSourceCounter *counter; + struct IOEventSourceCounter *counter; #else - void *iokitstatsReserved; + void *iokitstatsReserved; #endif }; /*! @var reserved - Reserved for future use. (Internal use only) */ - ExpansionData *reserved; + * Reserved for future use. (Internal use only) */ + ExpansionData *reserved; /*! @function init - @abstract Primary initialiser for the IOEventSource class. - @param owner - Owner of this instance of an event source. Used as the first parameter -of the action callout. Owner must be an OSObject. - @param action - Pointer to C call out function. Action is a pointer to a C function -that gets called when this event source has outstanding work. It will usually -be called by the checkForWork member function. The first parameter of the -action call out will always be the owner, this allows C++ member functions to -be used as actions. Defaults to 0. - @result true if the inherited classes and this instance initialise -successfully. -*/ - virtual bool init(OSObject *owner, IOEventSource::Action action = 0); - - virtual void free( void ) APPLE_KEXT_OVERRIDE; + * @abstract Primary initialiser for the IOEventSource class. + * @param owner + * Owner of this instance of an event source. Used as the first parameter + * of the action callout. Owner must be an OSObject. + * @param action + * Pointer to C call out function. Action is a pointer to a C function + * that gets called when this event source has outstanding work. It will usually + * be called by the checkForWork member function. The first parameter of the + * action call out will always be the owner, this allows C++ member functions to + * be used as actions. Defaults to 0. + * @result true if the inherited classes and this instance initialise + * successfully. + */ + virtual bool init(OSObject *owner, IOEventSource::Action action = 0); + + virtual void free( void ) APPLE_KEXT_OVERRIDE; /*! @function checkForWork - @abstract Virtual member function used by IOWorkLoop for work -scheduling. - @discussion This function will be called to request a subclass to check -its internal state for any work to do and then to call out the owner/action. -If this event source never performs any work (e.g. IOCommandGate), this -method should not be overridden. NOTE: This method is no longer declared pure -virtual. A default implementation is provided in IOEventSource. - @result Return true if this function needs to be called again before all its outstanding events have been processed. - */ - virtual bool checkForWork(); + * @abstract Virtual member function used by IOWorkLoop for work + * scheduling. + * @discussion This function will be called to request a subclass to check + * its internal state for any work to do and then to call out the owner/action. + * If this event source never performs any work (e.g. IOCommandGate), this + * method should not be overridden. NOTE: This method is no longer declared pure + * virtual. A default implementation is provided in IOEventSource. + * @result Return true if this function needs to be called again before all its outstanding events have been processed. + */ + virtual bool checkForWork(); /*! @function setWorkLoop - @abstract Set'ter for $link workLoop variable. - @param workLoop - Target work-loop of this event source instance. A subclass of -IOWorkLoop that at least reacts to signalWorkAvailable() and onThread functions. -*/ - virtual void setWorkLoop(IOWorkLoop *workLoop); + * @abstract Set'ter for $link workLoop variable. + * @param workLoop + * Target work-loop of this event source instance. A subclass of + * IOWorkLoop that at least reacts to signalWorkAvailable() and onThread functions. + */ + virtual void setWorkLoop(IOWorkLoop *workLoop); /*! @function setNext - @abstract Set'ter for $link eventChainNext variable. - @param next - Pointer to another IOEventSource instance. -*/ - virtual void setNext(IOEventSource *next); + * @abstract Set'ter for $link eventChainNext variable. + * @param next + * Pointer to another IOEventSource instance. + */ + virtual void setNext(IOEventSource *next); /*! @function getNext - @abstract Get'ter for $link eventChainNext variable. - @result value of eventChainNext. -*/ - virtual IOEventSource *getNext() const; + * @abstract Get'ter for $link eventChainNext variable. + * @result value of eventChainNext. + */ + virtual IOEventSource *getNext() const; protected: - // Methods to access the IOWorkLoop exported fields - void signalWorkAvailable(); - void openGate(); - void closeGate(); - bool tryCloseGate(); - int sleepGate(void *event, UInt32 type); +// Methods to access the IOWorkLoop exported fields + void signalWorkAvailable(); + void openGate(); + void closeGate(); + bool tryCloseGate(); + int sleepGate(void *event, UInt32 type); int sleepGate(void *event, AbsoluteTime deadline, UInt32 type); - void wakeupGate(void *event, bool oneThread); + void wakeupGate(void *event, bool oneThread); public: /*! @function setAction - @abstract Set'ter for $link action variable. - @param action Pointer to a C function of type IOEventSource::Action. */ - virtual void setAction(IOEventSource::Action action); + * @abstract Set'ter for $link action variable. + * @param action Pointer to a C function of type IOEventSource::Action. */ + virtual void setAction(IOEventSource::Action action); /*! @function getAction - @abstract Get'ter for $link action variable. - @result value of action. */ - virtual IOEventSource::Action getAction() const; + * @abstract Get'ter for $link action variable. + * @result value of action. */ + virtual IOEventSource::Action getAction() const; #ifdef __BLOCKS__ /*! @function setActionBlock - @abstract Setter for action ivar. The current block is released, & the new block is retained. - @param block Block pointer of type IOEventSource::ActionBlock. */ - void setActionBlock(ActionBlock block); + * @abstract Setter for action ivar. The current block is released, & the new block is retained. + * @param block Block pointer of type IOEventSource::ActionBlock. */ + void setActionBlock(ActionBlock block); /*! @function getActionBlock - @abstract Getter for action ivar. - @result Block pointer of type IOEventSource::ActionBlock, if set, or NULL. */ - ActionBlock getActionBlock(ActionBlock) const; + * @abstract Getter for action ivar. + * @result Block pointer of type IOEventSource::ActionBlock, if set, or NULL. */ + ActionBlock getActionBlock(ActionBlock) const; #endif /* __BLOCKS__ */ /*! @function setRefcon - @abstract Setter for refcon ivar. This function will assert if a block action has been set. - @param refcon Refcon. */ - void setRefcon(void *refcon); + * @abstract Setter for refcon ivar. This function will assert if a block action has been set. + * @param refcon Refcon. */ + void setRefcon(void *refcon); /*! @function getRefcon - @abstract Getter for refcon ivar. - @result The refcon. This function will assert if a block action has been set. */ - void * getRefcon() const; + * @abstract Getter for refcon ivar. + * @result The refcon. This function will assert if a block action has been set. */ + void * getRefcon() const; /*! @function enable - @abstract Enable event source. - @discussion A subclass implementation is expected to respect the enabled -state when checkForWork is called. Calling this function will cause the -work-loop to be signalled so that a checkForWork is performed. */ - virtual void enable(); + * @abstract Enable event source. + * @discussion A subclass implementation is expected to respect the enabled + * state when checkForWork is called. Calling this function will cause the + * work-loop to be signalled so that a checkForWork is performed. */ + virtual void enable(); /*! @function disable - @abstract Disable event source. - @discussion A subclass implementation is expected to respect the enabled -state when checkForWork is called. */ - virtual void disable(); + * @abstract Disable event source. + * @discussion A subclass implementation is expected to respect the enabled + * state when checkForWork is called. */ + virtual void disable(); /*! @function isEnabled - @abstract Get'ter for $link enable variable. - @result true if enabled. */ - virtual bool isEnabled() const; + * @abstract Get'ter for $link enable variable. + * @result true if enabled. */ + virtual bool isEnabled() const; /*! @function getWorkLoop - @abstract Get'ter for $link workLoop variable. - @result value of workLoop. */ - virtual IOWorkLoop *getWorkLoop() const; + * @abstract Get'ter for $link workLoop variable. + * @result value of workLoop. */ + virtual IOWorkLoop *getWorkLoop() const; /*! @function onThread - @abstract Convenience function for workLoop->onThread. - @result true if called on the work-loop thread. -*/ - virtual bool onThread() const; + * @abstract Convenience function for workLoop->onThread. + * @result true if called on the work-loop thread. + */ + virtual bool onThread() const; private: - OSMetaClassDeclareReservedUnused(IOEventSource, 0); - OSMetaClassDeclareReservedUnused(IOEventSource, 1); - OSMetaClassDeclareReservedUnused(IOEventSource, 2); - OSMetaClassDeclareReservedUnused(IOEventSource, 3); - OSMetaClassDeclareReservedUnused(IOEventSource, 4); - OSMetaClassDeclareReservedUnused(IOEventSource, 5); - OSMetaClassDeclareReservedUnused(IOEventSource, 6); - OSMetaClassDeclareReservedUnused(IOEventSource, 7); + OSMetaClassDeclareReservedUnused(IOEventSource, 0); + OSMetaClassDeclareReservedUnused(IOEventSource, 1); + OSMetaClassDeclareReservedUnused(IOEventSource, 2); + OSMetaClassDeclareReservedUnused(IOEventSource, 3); + OSMetaClassDeclareReservedUnused(IOEventSource, 4); + OSMetaClassDeclareReservedUnused(IOEventSource, 5); + OSMetaClassDeclareReservedUnused(IOEventSource, 6); + OSMetaClassDeclareReservedUnused(IOEventSource, 7); }; #endif /* !_IOKIT_IOEVENTSOURCE_H */ diff --git a/iokit/IOKit/IOFilterInterruptEventSource.h b/iokit/IOKit/IOFilterInterruptEventSource.h index 1a5470b45..db887e746 100644 --- a/iokit/IOKit/IOFilterInterruptEventSource.h +++ b/iokit/IOKit/IOFilterInterruptEventSource.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -Copyright (c) 1999 Apple Computer, Inc. All rights reserved. - -HISTORY - 1999-4-15 Godfrey van der Linden(gvdl) - Created. -*/ + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 1999-4-15 Godfrey van der Linden(gvdl) + * Created. + */ #ifndef _IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H #define _IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H @@ -40,160 +40,159 @@ HISTORY class IOService; /*! @class IOFilterInterruptEventSource : public IOInterruptEventSource - @abstract Filtering varient of the $link IOInterruptEventSource. - @discussion An interrupt event source that calls the client to determine if a interrupt event needs to be scheduled on the work loop. A filter interrupt event source call's the client in the primary interrupt context, the client can then interrogate its hardware and determine if the interrupt needs to be processed yet. -

- As the routine is called in the primary interrupt context great care must be taken in the writing of this routine. In general none of the generic IOKit environment is safe to call in this context. We intend this routine to be used by hardware that can interrogate its registers without destroying state. Primarily this variant of event sources will be used by drivers that share interrupts. The filter routine will determine if the interrupt is a real interrupt or a ghost and thus optimise the work thread context switch away. -

-If you are implementing 'SoftDMA' (or pseudo-DMA), you may not want the I/O Kit to automatically start your interrupt handler routine on your work loop when your filter routine returns true. In this case, you may choose to have your filter routine schedule the work on the work loop itself and then return false. If you do this, the interrupt will not be disabled in hardware and you could receive additional primary interrupts before your work loop–level service routine completes. Because this scheme has implications for synchronization between your filter routine and your interrupt service routine, you should avoid doing this unless your driver requires SoftDMA. -

-CAUTION: Called in primary interrupt context, if you need to disable interrupt to guard you registers against an unexpected call then it is better to use a straight IOInterruptEventSource and its secondary interrupt delivery mechanism. -*/ + * @abstract Filtering varient of the $link IOInterruptEventSource. + * @discussion An interrupt event source that calls the client to determine if a interrupt event needs to be scheduled on the work loop. A filter interrupt event source call's the client in the primary interrupt context, the client can then interrogate its hardware and determine if the interrupt needs to be processed yet. + *

+ * As the routine is called in the primary interrupt context great care must be taken in the writing of this routine. In general none of the generic IOKit environment is safe to call in this context. We intend this routine to be used by hardware that can interrogate its registers without destroying state. Primarily this variant of event sources will be used by drivers that share interrupts. The filter routine will determine if the interrupt is a real interrupt or a ghost and thus optimise the work thread context switch away. + *

+ * If you are implementing 'SoftDMA' (or pseudo-DMA), you may not want the I/O Kit to automatically start your interrupt handler routine on your work loop when your filter routine returns true. In this case, you may choose to have your filter routine schedule the work on the work loop itself and then return false. If you do this, the interrupt will not be disabled in hardware and you could receive additional primary interrupts before your work loop–level service routine completes. Because this scheme has implications for synchronization between your filter routine and your interrupt service routine, you should avoid doing this unless your driver requires SoftDMA. + *

+ * CAUTION: Called in primary interrupt context, if you need to disable interrupt to guard you registers against an unexpected call then it is better to use a straight IOInterruptEventSource and its secondary interrupt delivery mechanism. + */ class IOFilterInterruptEventSource : public IOInterruptEventSource { - OSDeclareDefaultStructors(IOFilterInterruptEventSource) + OSDeclareDefaultStructors(IOFilterInterruptEventSource) public: /*! - @typedef Filter - @discussion C Function pointer to a routine to call when an interrupt occurs. - @param owner Pointer to the owning/client instance. - @param sender Where is the interrupt comming from. - @result false if this interrupt can be ignored. */ - typedef bool (*Filter)(OSObject *owner, IOFilterInterruptEventSource *sender); + * @typedef Filter + * @discussion C Function pointer to a routine to call when an interrupt occurs. + * @param owner Pointer to the owning/client instance. + * @param sender Where is the interrupt comming from. + * @result false if this interrupt can be ignored. */ + typedef bool (*Filter)(OSObject *owner, IOFilterInterruptEventSource *sender); /*! @defined IOFilterInterruptAction - @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOFilterInterruptSource::Filter */ + * @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOFilterInterruptSource::Filter */ #define IOFilterInterruptAction IOFilterInterruptEventSource::Filter #ifdef __BLOCKS__ - typedef bool (^FilterBlock)(IOFilterInterruptEventSource *sender); + typedef bool (^FilterBlock)(IOFilterInterruptEventSource *sender); #endif /* __BLOCKS__ */ private: - // Hide the superclass initializers - virtual bool init(OSObject *inOwner, - IOInterruptEventSource::Action inAction = 0, - IOService *inProvider = 0, - int inIntIndex = 0) APPLE_KEXT_OVERRIDE; +// Hide the superclass initializers + virtual bool init(OSObject *inOwner, + IOInterruptEventSource::Action inAction = 0, + IOService *inProvider = 0, + int inIntIndex = 0) APPLE_KEXT_OVERRIDE; - static IOInterruptEventSource * + static IOInterruptEventSource * interruptEventSource(OSObject *inOwner, - IOInterruptEventSource::Action inAction = 0, - IOService *inProvider = 0, - int inIntIndex = 0); + IOInterruptEventSource::Action inAction = 0, + IOService *inProvider = 0, + int inIntIndex = 0); protected: /*! @var filterAction Filter callout */ #if XNU_KERNEL_PRIVATE - union { Filter filterAction; FilterBlock filterActionBlock; }; + union { Filter filterAction; FilterBlock filterActionBlock; }; #else /* XNU_KERNEL_PRIVATE */ - Filter filterAction; + Filter filterAction; #endif /* !XNU_KERNEL_PRIVATE */ /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. - */ - struct ExpansionData { }; + * @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { }; /*! @var reserved - Reserved for future use. (Internal use only) */ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData *reserved; - APPLE_KEXT_WSHADOW_POP; + * Reserved for future use. (Internal use only) */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData *reserved; + APPLE_KEXT_WSHADOW_POP; public: /*! @function filterInterruptEventSource - @abstract Factor method to create and initialise an IOFilterInterruptEventSource. See $link init. - @param owner Owner/client of this event source. - @param action 'C' Function to call when something happens. - @param filter 'C' Function to call when interrupt occurs. - @param provider Service that provides interrupts. - @param intIndex Defaults to 0. - @result a new event source if succesful, 0 otherwise. */ - static IOFilterInterruptEventSource * + * @abstract Factor method to create and initialise an IOFilterInterruptEventSource. See $link init. + * @param owner Owner/client of this event source. + * @param action 'C' Function to call when something happens. + * @param filter 'C' Function to call when interrupt occurs. + * @param provider Service that provides interrupts. + * @param intIndex Defaults to 0. + * @result a new event source if succesful, 0 otherwise. */ + static IOFilterInterruptEventSource * filterInterruptEventSource(OSObject *owner, - IOInterruptEventSource::Action action, - Filter filter, - IOService *provider, - int intIndex = 0); + IOInterruptEventSource::Action action, + Filter filter, + IOService *provider, + int intIndex = 0); #ifdef __BLOCKS__ /*! @function filterInterruptEventSource - @abstract Factor method to create and initialise an IOFilterInterruptEventSource. See $link init. - @param owner Owner/client of this event source. - @param provider Service that provides interrupts. - @param intIndex The index of the interrupt within the provider's interrupt sources. - @param action Block for the callout routine of this event source. - @param filter Block to invoke when HW interrupt occurs. - @result a new event source if succesful, 0 otherwise. */ - static IOFilterInterruptEventSource * + * @abstract Factor method to create and initialise an IOFilterInterruptEventSource. See $link init. + * @param owner Owner/client of this event source. + * @param provider Service that provides interrupts. + * @param intIndex The index of the interrupt within the provider's interrupt sources. + * @param action Block for the callout routine of this event source. + * @param filter Block to invoke when HW interrupt occurs. + * @result a new event source if succesful, 0 otherwise. */ + static IOFilterInterruptEventSource * filterInterruptEventSource(OSObject *owner, - IOService *provider, - int intIndex, - IOInterruptEventSource::ActionBlock action, - FilterBlock filter); + IOService *provider, + int intIndex, + IOInterruptEventSource::ActionBlock action, + FilterBlock filter); #endif /* __BLOCKS__ */ #if XNU_KERNEL_PRIVATE - enum - { - kFilterBlock = kSubClass0, - }; + enum{ + kFilterBlock = kSubClass0, + }; #endif /*! @function init - @abstract Primary initialiser for the IOFilterInterruptEventSource class. - @param owner Owner/client of this event source. - @param action 'C' Function to call when something happens. - @param filter 'C' Function to call in primary interrupt context. - @param provider Service that provides interrupts. - @param intIndex Interrupt source within provider. Defaults to 0. - @result true if the inherited classes and this instance initialise -successfully. */ - virtual bool init(OSObject *owner, - IOInterruptEventSource::Action action, - Filter filter, - IOService *provider, - int intIndex = 0); - - virtual void free( void ) APPLE_KEXT_OVERRIDE; + * @abstract Primary initialiser for the IOFilterInterruptEventSource class. + * @param owner Owner/client of this event source. + * @param action 'C' Function to call when something happens. + * @param filter 'C' Function to call in primary interrupt context. + * @param provider Service that provides interrupts. + * @param intIndex Interrupt source within provider. Defaults to 0. + * @result true if the inherited classes and this instance initialise + * successfully. */ + virtual bool init(OSObject *owner, + IOInterruptEventSource::Action action, + Filter filter, + IOService *provider, + int intIndex = 0); + + virtual void free( void ) APPLE_KEXT_OVERRIDE; /*! @function signalInterrupt - @abstract Cause the work loop to schedule the action. - @discussion Cause the work loop to schedule the interrupt action even if the filter routine returns 'false'. Note well the interrupting condition MUST be cleared from the hardware otherwise an infinite process interrupt loop will occur. Use this function when SoftDMA is desired. See $link IOFilterInterruptSource::Filter */ - virtual void signalInterrupt(); + * @abstract Cause the work loop to schedule the action. + * @discussion Cause the work loop to schedule the interrupt action even if the filter routine returns 'false'. Note well the interrupting condition MUST be cleared from the hardware otherwise an infinite process interrupt loop will occur. Use this function when SoftDMA is desired. See $link IOFilterInterruptSource::Filter */ + virtual void signalInterrupt(); /*! @function getFilterAction - @abstract Get'ter for filterAction variable. - @result value of filterAction. */ - virtual Filter getFilterAction() const; + * @abstract Get'ter for filterAction variable. + * @result value of filterAction. */ + virtual Filter getFilterAction() const; #ifdef __BLOCKS__ /*! @function getFilterActionBlock - @abstract Get'ter for filterAction variable. - @result value of filterAction. */ - FilterBlock getFilterActionBlock() const; + * @abstract Get'ter for filterAction variable. + * @result value of filterAction. */ + FilterBlock getFilterActionBlock() const; #endif /* __BLOCKS__ */ /*! @function normalInterruptOccurred - @abstract Override $link IOInterruptEventSource::normalInterruptOccured to make a filter callout. */ - virtual void normalInterruptOccurred(void *self, IOService *prov, int ind) APPLE_KEXT_OVERRIDE; + * @abstract Override $link IOInterruptEventSource::normalInterruptOccured to make a filter callout. */ + virtual void normalInterruptOccurred(void *self, IOService *prov, int ind) APPLE_KEXT_OVERRIDE; /*! @function disableInterruptOccurred - @abstract Override $link IOInterruptEventSource::disableInterruptOccurred to make a filter callout. */ - virtual void disableInterruptOccurred(void *self, IOService *prov, int ind) APPLE_KEXT_OVERRIDE; + * @abstract Override $link IOInterruptEventSource::disableInterruptOccurred to make a filter callout. */ + virtual void disableInterruptOccurred(void *self, IOService *prov, int ind) APPLE_KEXT_OVERRIDE; private: - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 0); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 1); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 2); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 3); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 4); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 5); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 6); - OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 7); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 0); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 1); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 2); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 3); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 4); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 5); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 6); + OSMetaClassDeclareReservedUnused(IOFilterInterruptEventSource, 7); }; #endif /* !_IOKIT_IOFILTERINTERRUPTEVENTSOURCE_H */ diff --git a/iokit/IOKit/IOHibernatePrivate.h b/iokit/IOKit/IOHibernatePrivate.h index f3195d0f1..df8be032a 100644 --- a/iokit/IOKit/IOHibernatePrivate.h +++ b/iokit/IOKit/IOHibernatePrivate.h @@ -37,271 +37,256 @@ extern "C" { #include #include -extern int kdb_printf(const char *format, ...) __printflike(1,2); +extern int kdb_printf(const char *format, ...) __printflike(1, 2); #endif #ifndef __IOKIT_IOHIBERNATEPRIVATE_H #define __IOKIT_IOHIBERNATEPRIVATE_H -struct IOPolledFileExtent -{ - uint64_t start; - uint64_t length; +struct IOPolledFileExtent { + uint64_t start; + uint64_t length; }; typedef struct IOPolledFileExtent IOPolledFileExtent; -struct IOHibernateImageHeader -{ - uint64_t imageSize; - uint64_t image1Size; +struct IOHibernateImageHeader { + uint64_t imageSize; + uint64_t image1Size; - uint32_t restore1CodePhysPage; - uint32_t reserved1; - uint64_t restore1CodeVirt; - uint32_t restore1PageCount; - uint32_t restore1CodeOffset; - uint32_t restore1StackOffset; + uint32_t restore1CodePhysPage; + uint32_t reserved1; + uint64_t restore1CodeVirt; + uint32_t restore1PageCount; + uint32_t restore1CodeOffset; + uint32_t restore1StackOffset; - uint32_t pageCount; - uint32_t bitmapSize; + uint32_t pageCount; + uint32_t bitmapSize; - uint32_t restore1Sum; - uint32_t image1Sum; - uint32_t image2Sum; + uint32_t restore1Sum; + uint32_t image1Sum; + uint32_t image2Sum; - uint32_t actualRestore1Sum; - uint32_t actualImage1Sum; - uint32_t actualImage2Sum; + uint32_t actualRestore1Sum; + uint32_t actualImage1Sum; + uint32_t actualImage2Sum; - uint32_t actualUncompressedPages; - uint32_t conflictCount; - uint32_t nextFree; + uint32_t actualUncompressedPages; + uint32_t conflictCount; + uint32_t nextFree; - uint32_t signature; - uint32_t processorFlags; + uint32_t signature; + uint32_t processorFlags; - uint32_t runtimePages; - uint32_t runtimePageCount; - uint64_t runtimeVirtualPages __attribute__ ((packed)); + uint32_t runtimePages; + uint32_t runtimePageCount; + uint64_t runtimeVirtualPages __attribute__ ((packed)); - uint32_t performanceDataStart; - uint32_t performanceDataSize; + uint32_t performanceDataStart; + uint32_t performanceDataSize; - uint64_t encryptStart __attribute__ ((packed)); - uint64_t machineSignature __attribute__ ((packed)); + uint64_t encryptStart __attribute__ ((packed)); + uint64_t machineSignature __attribute__ ((packed)); - uint32_t previewSize; - uint32_t previewPageListSize; + uint32_t previewSize; + uint32_t previewPageListSize; - uint32_t diag[4]; + uint32_t diag[4]; - uint32_t handoffPages; - uint32_t handoffPageCount; + uint32_t handoffPages; + uint32_t handoffPageCount; - uint32_t systemTableOffset; + uint32_t systemTableOffset; - uint32_t debugFlags; - uint32_t options; - uint32_t sleepTime; - uint32_t compression; + uint32_t debugFlags; + uint32_t options; + uint32_t sleepTime; + uint32_t compression; - uint8_t bridgeBootSessionUUID[16]; + uint8_t bridgeBootSessionUUID[16]; - uint32_t reserved[54]; // make sizeof == 512 - uint32_t booterTime0; - uint32_t booterTime1; - uint32_t booterTime2; + uint32_t reserved[54]; // make sizeof == 512 + uint32_t booterTime0; + uint32_t booterTime1; + uint32_t booterTime2; - uint32_t booterStart; - uint32_t smcStart; - uint32_t connectDisplayTime; - uint32_t splashTime; - uint32_t booterTime; - uint32_t trampolineTime; + uint32_t booterStart; + uint32_t smcStart; + uint32_t connectDisplayTime; + uint32_t splashTime; + uint32_t booterTime; + uint32_t trampolineTime; - uint64_t encryptEnd __attribute__ ((packed)); - uint64_t deviceBase __attribute__ ((packed)); - uint32_t deviceBlockSize; + uint64_t encryptEnd __attribute__ ((packed)); + uint64_t deviceBase __attribute__ ((packed)); + uint32_t deviceBlockSize; - uint32_t fileExtentMapSize; - IOPolledFileExtent fileExtentMap[2]; + uint32_t fileExtentMapSize; + IOPolledFileExtent fileExtentMap[2]; }; typedef struct IOHibernateImageHeader IOHibernateImageHeader; -enum -{ - kIOHibernateDebugRestoreLogs = 0x00000001 +enum{ + kIOHibernateDebugRestoreLogs = 0x00000001 }; // options & IOHibernateOptions property -enum -{ - kIOHibernateOptionSSD = 0x00000001, - kIOHibernateOptionColor = 0x00000002, - kIOHibernateOptionProgress = 0x00000004, - kIOHibernateOptionDarkWake = 0x00000008, - kIOHibernateOptionHWEncrypt = 0x00000010, +enum{ + kIOHibernateOptionSSD = 0x00000001, + kIOHibernateOptionColor = 0x00000002, + kIOHibernateOptionProgress = 0x00000004, + kIOHibernateOptionDarkWake = 0x00000008, + kIOHibernateOptionHWEncrypt = 0x00000010, }; -struct hibernate_bitmap_t -{ - uint32_t first_page; - uint32_t last_page; - uint32_t bitmapwords; - uint32_t bitmap[0]; +struct hibernate_bitmap_t { + uint32_t first_page; + uint32_t last_page; + uint32_t bitmapwords; + uint32_t bitmap[0]; }; typedef struct hibernate_bitmap_t hibernate_bitmap_t; -struct hibernate_page_list_t -{ - uint32_t list_size; - uint32_t page_count; - uint32_t bank_count; - hibernate_bitmap_t bank_bitmap[0]; +struct hibernate_page_list_t { + uint32_t list_size; + uint32_t page_count; + uint32_t bank_count; + hibernate_bitmap_t bank_bitmap[0]; }; typedef struct hibernate_page_list_t hibernate_page_list_t; #if defined(_AES_H) -struct hibernate_cryptwakevars_t -{ - uint8_t aes_iv[AES_BLOCK_SIZE]; +struct hibernate_cryptwakevars_t { + uint8_t aes_iv[AES_BLOCK_SIZE]; }; typedef struct hibernate_cryptwakevars_t hibernate_cryptwakevars_t; -struct hibernate_cryptvars_t -{ - uint8_t aes_iv[AES_BLOCK_SIZE]; - aes_ctx ctx; +struct hibernate_cryptvars_t { + uint8_t aes_iv[AES_BLOCK_SIZE]; + aes_ctx ctx; }; typedef struct hibernate_cryptvars_t hibernate_cryptvars_t; #endif /* defined(_AES_H) */ -enum -{ - kIOHibernateHandoffType = 0x686f0000, - kIOHibernateHandoffTypeEnd = kIOHibernateHandoffType + 0, - kIOHibernateHandoffTypeGraphicsInfo = kIOHibernateHandoffType + 1, - kIOHibernateHandoffTypeCryptVars = kIOHibernateHandoffType + 2, - kIOHibernateHandoffTypeMemoryMap = kIOHibernateHandoffType + 3, - kIOHibernateHandoffTypeDeviceTree = kIOHibernateHandoffType + 4, - kIOHibernateHandoffTypeDeviceProperties = kIOHibernateHandoffType + 5, - kIOHibernateHandoffTypeKeyStore = kIOHibernateHandoffType + 6, - kIOHibernateHandoffTypeVolumeCryptKey = kIOHibernateHandoffType + 7, +enum{ + kIOHibernateHandoffType = 0x686f0000, + kIOHibernateHandoffTypeEnd = kIOHibernateHandoffType + 0, + kIOHibernateHandoffTypeGraphicsInfo = kIOHibernateHandoffType + 1, + kIOHibernateHandoffTypeCryptVars = kIOHibernateHandoffType + 2, + kIOHibernateHandoffTypeMemoryMap = kIOHibernateHandoffType + 3, + kIOHibernateHandoffTypeDeviceTree = kIOHibernateHandoffType + 4, + kIOHibernateHandoffTypeDeviceProperties = kIOHibernateHandoffType + 5, + kIOHibernateHandoffTypeKeyStore = kIOHibernateHandoffType + 6, + kIOHibernateHandoffTypeVolumeCryptKey = kIOHibernateHandoffType + 7, }; -struct IOHibernateHandoff -{ - uint32_t type; - uint32_t bytecount; - uint8_t data[]; +struct IOHibernateHandoff { + uint32_t type; + uint32_t bytecount; + uint8_t data[]; }; typedef struct IOHibernateHandoff IOHibernateHandoff; -enum -{ - kIOHibernateProgressCount = 19, - kIOHibernateProgressWidth = 7, - kIOHibernateProgressHeight = 16, - kIOHibernateProgressSpacing = 3, - kIOHibernateProgressOriginY = 81, +enum{ + kIOHibernateProgressCount = 19, + kIOHibernateProgressWidth = 7, + kIOHibernateProgressHeight = 16, + kIOHibernateProgressSpacing = 3, + kIOHibernateProgressOriginY = 81, - kIOHibernateProgressSaveUnderSize = 2*5+14*2, + kIOHibernateProgressSaveUnderSize = 2 * 5 + 14 * 2, - kIOHibernateProgressLightGray = 230, - kIOHibernateProgressMidGray = 174, - kIOHibernateProgressDarkGray = 92 + kIOHibernateProgressLightGray = 230, + kIOHibernateProgressMidGray = 174, + kIOHibernateProgressDarkGray = 92 }; -enum -{ - kIOHibernatePostWriteSleep = 0, - kIOHibernatePostWriteWake = 1, - kIOHibernatePostWriteHalt = 2, - kIOHibernatePostWriteRestart = 3 +enum{ + kIOHibernatePostWriteSleep = 0, + kIOHibernatePostWriteWake = 1, + kIOHibernatePostWriteHalt = 2, + kIOHibernatePostWriteRestart = 3 }; -struct hibernate_graphics_t -{ - uint64_t physicalAddress; // Base address of video memory - int32_t gfxStatus; // EFI config restore status - uint32_t rowBytes; // Number of bytes per pixel row - uint32_t width; // Width - uint32_t height; // Height - uint32_t depth; // Pixel Depth +struct hibernate_graphics_t { + uint64_t physicalAddress; // Base address of video memory + int32_t gfxStatus; // EFI config restore status + uint32_t rowBytes; // Number of bytes per pixel row + uint32_t width; // Width + uint32_t height; // Height + uint32_t depth; // Pixel Depth - uint8_t progressSaveUnder[kIOHibernateProgressCount][kIOHibernateProgressSaveUnderSize]; + uint8_t progressSaveUnder[kIOHibernateProgressCount][kIOHibernateProgressSaveUnderSize]; }; typedef struct hibernate_graphics_t hibernate_graphics_t; -#define DECLARE_IOHIBERNATEPROGRESSALPHA \ -static const uint8_t gIOHibernateProgressAlpha \ -[kIOHibernateProgressHeight][kIOHibernateProgressWidth] = \ -{ \ - { 0x00,0x63,0xd8,0xf0,0xd8,0x63,0x00 }, \ - { 0x51,0xff,0xff,0xff,0xff,0xff,0x51 }, \ - { 0xae,0xff,0xff,0xff,0xff,0xff,0xae }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ - { 0xae,0xff,0xff,0xff,0xff,0xff,0xae }, \ - { 0x54,0xff,0xff,0xff,0xff,0xff,0x54 }, \ - { 0x00,0x66,0xdb,0xf3,0xdb,0x66,0x00 } \ +#define DECLARE_IOHIBERNATEPROGRESSALPHA \ +static const uint8_t gIOHibernateProgressAlpha \ +[kIOHibernateProgressHeight][kIOHibernateProgressWidth] = \ +{ \ + { 0x00,0x63,0xd8,0xf0,0xd8,0x63,0x00 }, \ + { 0x51,0xff,0xff,0xff,0xff,0xff,0x51 }, \ + { 0xae,0xff,0xff,0xff,0xff,0xff,0xae }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xc3,0xff,0xff,0xff,0xff,0xff,0xc3 }, \ + { 0xae,0xff,0xff,0xff,0xff,0xff,0xae }, \ + { 0x54,0xff,0xff,0xff,0xff,0xff,0x54 }, \ + { 0x00,0x66,0xdb,0xf3,0xdb,0x66,0x00 } \ }; -struct hibernate_preview_t -{ - uint32_t imageCount; // Number of images - uint32_t width; // Width - uint32_t height; // Height - uint32_t depth; // Pixel Depth - uint32_t lockTime; // Lock time - uint32_t reservedG[8]; // reserved - uint32_t reservedK[8]; // reserved +struct hibernate_preview_t { + uint32_t imageCount; // Number of images + uint32_t width; // Width + uint32_t height; // Height + uint32_t depth; // Pixel Depth + uint32_t lockTime; // Lock time + uint32_t reservedG[8];// reserved + uint32_t reservedK[8];// reserved }; typedef struct hibernate_preview_t hibernate_preview_t; -struct hibernate_statistics_t -{ - uint64_t image1Size; - uint64_t imageSize; - uint32_t image1Pages; - uint32_t imagePages; - uint32_t booterStart; - uint32_t smcStart; - uint32_t booterDuration; - uint32_t booterConnectDisplayDuration; - uint32_t booterSplashDuration; - uint32_t booterDuration0; - uint32_t booterDuration1; - uint32_t booterDuration2; - uint32_t trampolineDuration; - uint32_t kernelImageReadDuration; - - uint32_t graphicsReadyTime; - uint32_t wakeNotificationTime; - uint32_t lockScreenReadyTime; - uint32_t hidReadyTime; - - uint32_t wakeCapability; - uint32_t resvA[15]; +struct hibernate_statistics_t { + uint64_t image1Size; + uint64_t imageSize; + uint32_t image1Pages; + uint32_t imagePages; + uint32_t booterStart; + uint32_t smcStart; + uint32_t booterDuration; + uint32_t booterConnectDisplayDuration; + uint32_t booterSplashDuration; + uint32_t booterDuration0; + uint32_t booterDuration1; + uint32_t booterDuration2; + uint32_t trampolineDuration; + uint32_t kernelImageReadDuration; + + uint32_t graphicsReadyTime; + uint32_t wakeNotificationTime; + uint32_t lockScreenReadyTime; + uint32_t hidReadyTime; + + uint32_t wakeCapability; + uint32_t resvA[15]; }; typedef struct hibernate_statistics_t hibernate_statistics_t; -#define kIOSysctlHibernateStatistics "kern.hibernatestatistics" -#define kIOSysctlHibernateGraphicsReady "kern.hibernategraphicsready" -#define kIOSysctlHibernateWakeNotify "kern.hibernatewakenotification" -#define kIOSysctlHibernateScreenReady "kern.hibernatelockscreenready" -#define kIOSysctlHibernateHIDReady "kern.hibernatehidready" +#define kIOSysctlHibernateStatistics "kern.hibernatestatistics" +#define kIOSysctlHibernateGraphicsReady "kern.hibernategraphicsready" +#define kIOSysctlHibernateWakeNotify "kern.hibernatewakenotification" +#define kIOSysctlHibernateScreenReady "kern.hibernatelockscreenready" +#define kIOSysctlHibernateHIDReady "kern.hibernatehidready" #ifdef KERNEL @@ -330,21 +315,21 @@ hibernate_page_list_allocate(boolean_t log); kern_return_t hibernate_alloc_page_lists( - hibernate_page_list_t ** page_list_ret, - hibernate_page_list_t ** page_list_wired_ret, - hibernate_page_list_t ** page_list_pal_ret); + hibernate_page_list_t ** page_list_ret, + hibernate_page_list_t ** page_list_wired_ret, + hibernate_page_list_t ** page_list_pal_ret); kern_return_t hibernate_setup(IOHibernateImageHeader * header, - boolean_t vmflush, - hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - hibernate_page_list_t * page_list_pal); + boolean_t vmflush, + hibernate_page_list_t * page_list, + hibernate_page_list_t * page_list_wired, + hibernate_page_list_t * page_list_pal); kern_return_t hibernate_teardown(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - hibernate_page_list_t * page_list_pal); + hibernate_page_list_t * page_list_wired, + hibernate_page_list_t * page_list_pal); kern_return_t hibernate_pin_swap(boolean_t begin); @@ -374,25 +359,25 @@ hibernate_vm_locks_are_safe(void); // mark pages not to be saved, based on VM system accounting void hibernate_page_list_setall(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - hibernate_page_list_t * page_list_pal, - boolean_t preflight, - boolean_t discard_all, - uint32_t * pagesOut); + hibernate_page_list_t * page_list_wired, + hibernate_page_list_t * page_list_pal, + boolean_t preflight, + boolean_t discard_all, + uint32_t * pagesOut); // mark pages to be saved, or pages not to be saved but available // for scratch usage during restore void hibernate_page_list_setall_machine(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - boolean_t preflight, - uint32_t * pagesOut); + hibernate_page_list_t * page_list_wired, + boolean_t preflight, + uint32_t * pagesOut); // mark pages not to be saved and not for scratch usage during restore void hibernate_page_list_set_volatile( hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - uint32_t * pagesOut); + hibernate_page_list_t * page_list_wired, + uint32_t * pagesOut); void hibernate_page_list_discard(hibernate_page_list_t * page_list); @@ -402,7 +387,7 @@ hibernate_should_abort(void); void hibernate_set_page_state(hibernate_page_list_t * page_list, hibernate_page_list_t * page_list_wired, - vm_offset_t ppnum, vm_offset_t count, uint32_t kind); + vm_offset_t ppnum, vm_offset_t count, uint32_t kind); void hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page); @@ -431,13 +416,13 @@ long hibernate_kernel_entrypoint(uint32_t p1, uint32_t p2, uint32_t p3, uint32_t p4); void hibernate_newruntime_map(void * map, vm_size_t map_size, - uint32_t system_table_offset); + uint32_t system_table_offset); extern uint32_t gIOHibernateState; extern uint32_t gIOHibernateMode; extern uint32_t gIOHibernateDebugFlags; -extern uint32_t gIOHibernateFreeTime; // max time to spend freeing pages (ms) +extern uint32_t gIOHibernateFreeTime; // max time to spend freeing pages (ms) extern boolean_t gIOHibernateStandbyDisabled; extern uint8_t gIOHibernateRestoreStack[]; extern uint8_t gIOHibernateRestoreStackEnd[]; @@ -446,107 +431,101 @@ extern IOHibernateImageHeader * gIOHibernateCurrentHeader; #define HIBLOGFROMPANIC(fmt, args...) \ { if (kernel_debugger_entry_count) { kdb_printf(fmt, ## args); } } -#define HIBLOG(fmt, args...) \ +#define HIBLOG(fmt, args...) \ { if (kernel_debugger_entry_count) { kdb_printf(fmt, ## args); } else { kprintf(fmt, ## args); printf(fmt, ## args); } } -#define HIBPRINT(fmt, args...) \ +#define HIBPRINT(fmt, args...) \ { if (kernel_debugger_entry_count) { kdb_printf(fmt, ## args); } else { kprintf(fmt, ## args); } } #endif /* KERNEL */ // gIOHibernateState, kIOHibernateStateKey -enum -{ - kIOHibernateStateInactive = 0, - kIOHibernateStateHibernating = 1, /* writing image */ - kIOHibernateStateWakingFromHibernate = 2 /* booted and restored image */ +enum{ + kIOHibernateStateInactive = 0, + kIOHibernateStateHibernating = 1,/* writing image */ + kIOHibernateStateWakingFromHibernate = 2 /* booted and restored image */ }; // gIOHibernateMode, kIOHibernateModeKey -enum -{ - kIOHibernateModeOn = 0x00000001, - kIOHibernateModeSleep = 0x00000002, - kIOHibernateModeEncrypt = 0x00000004, - kIOHibernateModeDiscardCleanInactive = 0x00000008, - kIOHibernateModeDiscardCleanActive = 0x00000010, - kIOHibernateModeSwitch = 0x00000020, - kIOHibernateModeRestart = 0x00000040, - kIOHibernateModeSSDInvert = 0x00000080, - kIOHibernateModeFileResize = 0x00000100, +enum{ + kIOHibernateModeOn = 0x00000001, + kIOHibernateModeSleep = 0x00000002, + kIOHibernateModeEncrypt = 0x00000004, + kIOHibernateModeDiscardCleanInactive = 0x00000008, + kIOHibernateModeDiscardCleanActive = 0x00000010, + kIOHibernateModeSwitch = 0x00000020, + kIOHibernateModeRestart = 0x00000040, + kIOHibernateModeSSDInvert = 0x00000080, + kIOHibernateModeFileResize = 0x00000100, }; // IOHibernateImageHeader.signature -enum -{ - kIOHibernateHeaderSignature = 0x73696d65, - kIOHibernateHeaderInvalidSignature = 0x7a7a7a7a, - kIOHibernateHeaderOpenSignature = 0xf1e0be9d, - kIOHibernateHeaderDebugDataSignature = 0xfcddfcdd +enum{ + kIOHibernateHeaderSignature = 0x73696d65, + kIOHibernateHeaderInvalidSignature = 0x7a7a7a7a, + kIOHibernateHeaderOpenSignature = 0xf1e0be9d, + kIOHibernateHeaderDebugDataSignature = 0xfcddfcdd }; // kind for hibernate_set_page_state() -enum -{ - kIOHibernatePageStateFree = 0, - kIOHibernatePageStateWiredSave = 1, - kIOHibernatePageStateUnwiredSave = 2 +enum{ + kIOHibernatePageStateFree = 0, + kIOHibernatePageStateWiredSave = 1, + kIOHibernatePageStateUnwiredSave = 2 }; -#define kIOHibernateModeKey "Hibernate Mode" -#define kIOHibernateFileKey "Hibernate File" -#define kIOHibernateFileMinSizeKey "Hibernate File Min" -#define kIOHibernateFileMaxSizeKey "Hibernate File Max" -#define kIOHibernateFreeRatioKey "Hibernate Free Ratio" -#define kIOHibernateFreeTimeKey "Hibernate Free Time" +#define kIOHibernateModeKey "Hibernate Mode" +#define kIOHibernateFileKey "Hibernate File" +#define kIOHibernateFileMinSizeKey "Hibernate File Min" +#define kIOHibernateFileMaxSizeKey "Hibernate File Max" +#define kIOHibernateFreeRatioKey "Hibernate Free Ratio" +#define kIOHibernateFreeTimeKey "Hibernate Free Time" -#define kIOHibernateStateKey "IOHibernateState" -#define kIOHibernateFeatureKey "Hibernation" -#define kIOHibernatePreviewBufferKey "IOPreviewBuffer" +#define kIOHibernateStateKey "IOHibernateState" +#define kIOHibernateFeatureKey "Hibernation" +#define kIOHibernatePreviewBufferKey "IOPreviewBuffer" #ifndef kIOHibernatePreviewActiveKey -#define kIOHibernatePreviewActiveKey "IOHibernatePreviewActive" +#define kIOHibernatePreviewActiveKey "IOHibernatePreviewActive" // values for kIOHibernatePreviewActiveKey enum { - kIOHibernatePreviewActive = 0x00000001, - kIOHibernatePreviewUpdates = 0x00000002 + kIOHibernatePreviewActive = 0x00000001, + kIOHibernatePreviewUpdates = 0x00000002 }; #endif #define kIOHibernateOptionsKey "IOHibernateOptions" #define kIOHibernateGfxStatusKey "IOHibernateGfxStatus" enum { - kIOHibernateGfxStatusUnknown = ((int32_t) 0xFFFFFFFF) + kIOHibernateGfxStatusUnknown = ((int32_t) 0xFFFFFFFF) }; -#define kIOHibernateBootImageKey "boot-image" -#define kIOHibernateBootImageKeyKey "boot-image-key" -#define kIOHibernateBootSignatureKey "boot-signature" +#define kIOHibernateBootImageKey "boot-image" +#define kIOHibernateBootImageKeyKey "boot-image-key" +#define kIOHibernateBootSignatureKey "boot-signature" -#define kIOHibernateMemorySignatureKey "memory-signature" +#define kIOHibernateMemorySignatureKey "memory-signature" #define kIOHibernateMemorySignatureEnvKey "mem-sig" -#define kIOHibernateMachineSignatureKey "machine-signature" +#define kIOHibernateMachineSignatureKey "machine-signature" -#define kIOHibernateRTCVariablesKey "IOHibernateRTCVariables" -#define kIOHibernateSMCVariablesKey "IOHibernateSMCVariables" +#define kIOHibernateRTCVariablesKey "IOHibernateRTCVariables" +#define kIOHibernateSMCVariablesKey "IOHibernateSMCVariables" -#define kIOHibernateBootSwitchVarsKey "boot-switch-vars" +#define kIOHibernateBootSwitchVarsKey "boot-switch-vars" -#define kIOHibernateBootNoteKey "boot-note" +#define kIOHibernateBootNoteKey "boot-note" #define kIOHibernateUseKernelInterpreter 0x80000000 -enum -{ +enum{ kIOPreviewImageIndexDesktop = 0, kIOPreviewImageIndexLockScreen = 1, kIOPreviewImageCount = 2 }; -enum -{ +enum{ kIOScreenLockNoLock = 1, kIOScreenLockUnlocked = 2, kIOScreenLockLocked = 3, diff --git a/iokit/IOKit/IOInterleavedMemoryDescriptor.h b/iokit/IOKit/IOInterleavedMemoryDescriptor.h index e1c122aef..5221ab19f 100644 --- a/iokit/IOKit/IOInterleavedMemoryDescriptor.h +++ b/iokit/IOKit/IOInterleavedMemoryDescriptor.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,91 +32,91 @@ #include /*! @class IOInterleavedMemoryDescriptor : public IOMemoryDescriptor - @abstract The IOInterleavedMemoryDescriptor object describes a memory area made up of portions of several other IOMemoryDescriptors. - @discussion The IOInterleavedMemoryDescriptor object represents interleaved ranges of memory, specified as an ordered list of portions of individual IOMemoryDescriptors. The portions are chained end-to-end to make up a single contiguous buffer. */ + * @abstract The IOInterleavedMemoryDescriptor object describes a memory area made up of portions of several other IOMemoryDescriptors. + * @discussion The IOInterleavedMemoryDescriptor object represents interleaved ranges of memory, specified as an ordered list of portions of individual IOMemoryDescriptors. The portions are chained end-to-end to make up a single contiguous buffer. */ class IOInterleavedMemoryDescriptor : public IOMemoryDescriptor { - OSDeclareDefaultStructors(IOInterleavedMemoryDescriptor); + OSDeclareDefaultStructors(IOInterleavedMemoryDescriptor); protected: - IOByteCount _descriptorCapacity; - UInt32 _descriptorCount; - IOMemoryDescriptor ** _descriptors; - IOByteCount * _descriptorOffsets; - IOByteCount * _descriptorLengths; - bool _descriptorPrepared; + IOByteCount _descriptorCapacity; + UInt32 _descriptorCount; + IOMemoryDescriptor ** _descriptors; + IOByteCount * _descriptorOffsets; + IOByteCount * _descriptorLengths; + bool _descriptorPrepared; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; public: /*! @function withCapacity - @abstract Create an IOInterleavedMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. - @discussion This method creates and initializes an IOInterleavedMemoryDescriptor for memory consisting of portions of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. - @param capacity The maximum number of IOMemoryDescriptors that may be subsequently added to this IOInterleavedMemoryDescriptor. - @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @result The created IOInterleavedMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + * @abstract Create an IOInterleavedMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. + * @discussion This method creates and initializes an IOInterleavedMemoryDescriptor for memory consisting of portions of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. + * @param capacity The maximum number of IOMemoryDescriptors that may be subsequently added to this IOInterleavedMemoryDescriptor. + * @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @result The created IOInterleavedMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOInterleavedMemoryDescriptor * withCapacity( IOByteCount capacity, - IODirection direction); + static IOInterleavedMemoryDescriptor * withCapacity( IOByteCount capacity, + IODirection direction); /*! @function initWithCapacity - @abstract Initialize an IOInterleavedMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. - @discussion This method initializes an IOInterleavedMemoryDescriptor for memory consisting of portions of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. - @param capacity The maximum number of IOMemoryDescriptors that may be subsequently added to this IOInterleavedMemoryDescriptor. - @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @result The created IOInterleavedMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + * @abstract Initialize an IOInterleavedMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. + * @discussion This method initializes an IOInterleavedMemoryDescriptor for memory consisting of portions of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. + * @param capacity The maximum number of IOMemoryDescriptors that may be subsequently added to this IOInterleavedMemoryDescriptor. + * @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @result The created IOInterleavedMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - virtual bool initWithCapacity( IOByteCount capacity, - IODirection direction ); + virtual bool initWithCapacity( IOByteCount capacity, + IODirection direction ); /*! @function clearMemoryDescriptors - @abstract Clear all of the IOMemoryDescriptors currently contained in and reset the IOInterleavedMemoryDescriptor. - @discussion Clears each IOMemoryDescriptor by completing (if needed) and releasing. The IOInterleavedMemoryDescriptor is then reset and may accept new descriptors up to the capacity specified when it was created. - @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. */ + * @abstract Clear all of the IOMemoryDescriptors currently contained in and reset the IOInterleavedMemoryDescriptor. + * @discussion Clears each IOMemoryDescriptor by completing (if needed) and releasing. The IOInterleavedMemoryDescriptor is then reset and may accept new descriptors up to the capacity specified when it was created. + * @param direction An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. */ - virtual void clearMemoryDescriptors( IODirection direction = kIODirectionNone ); + virtual void clearMemoryDescriptors( IODirection direction = kIODirectionNone ); /*! @function setMemoryDescriptor - @abstract Add a portion of an IOMemoryDescriptor to the IOInterleavedMemoryDescriptor. - @discussion This method adds the portion of an IOMemoryDescriptor described by the offset and length parameters to the end of the IOInterleavedMemoryDescriptor. A single IOMemoryDescriptor may be added as many times as there is room for it. The offset and length must describe a portion entirely within the IOMemoryDescriptor. - @param descriptor An IOMemoryDescriptor to be added to the IOInterleavedMemoryDescriptor. Its direction must be compatible with that of the IOInterleavedMemoryDescriptor. - @param offset The offset into the IOMemoryDescriptor of the portion that will be added to the virtualized buffer. - @param length The length of the portion of the IOMemoryDescriptor to be added to the virtualized buffer. - @result Returns true the portion was successfully added. */ + * @abstract Add a portion of an IOMemoryDescriptor to the IOInterleavedMemoryDescriptor. + * @discussion This method adds the portion of an IOMemoryDescriptor described by the offset and length parameters to the end of the IOInterleavedMemoryDescriptor. A single IOMemoryDescriptor may be added as many times as there is room for it. The offset and length must describe a portion entirely within the IOMemoryDescriptor. + * @param descriptor An IOMemoryDescriptor to be added to the IOInterleavedMemoryDescriptor. Its direction must be compatible with that of the IOInterleavedMemoryDescriptor. + * @param offset The offset into the IOMemoryDescriptor of the portion that will be added to the virtualized buffer. + * @param length The length of the portion of the IOMemoryDescriptor to be added to the virtualized buffer. + * @result Returns true the portion was successfully added. */ - virtual bool setMemoryDescriptor( IOMemoryDescriptor * descriptor, - IOByteCount offset, - IOByteCount length ); + virtual bool setMemoryDescriptor( IOMemoryDescriptor * descriptor, + IOByteCount offset, + IOByteCount length ); /*! @function getPhysicalSegment - @abstract Break a memory descriptor into its physically contiguous segments. - @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. - @param offset A byte offset into the memory whose physical address to return. - @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. - @result A physical address, or zero if the offset is beyond the length of the memory. */ + * @abstract Break a memory descriptor into its physically contiguous segments. + * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. + * @param offset A byte offset into the memory whose physical address to return. + * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. + * @result A physical address, or zero if the offset is beyond the length of the memory. */ - virtual addr64_t getPhysicalSegment( IOByteCount offset, - IOByteCount * length, - IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; + virtual addr64_t getPhysicalSegment( IOByteCount offset, + IOByteCount * length, + IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; /*! @function prepare - @abstract Prepare the memory for an I/O transfer. - @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method need not called for non-pageable memory. - @param forDirection The direction of the I/O to be performed, or kIODirectionNone for the direction specified by the memory descriptor. - @result An IOReturn code. */ + * @abstract Prepare the memory for an I/O transfer. + * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method need not called for non-pageable memory. + * @param forDirection The direction of the I/O to be performed, or kIODirectionNone for the direction specified by the memory descriptor. + * @result An IOReturn code. */ - virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; /*! @function complete - @abstract Complete processing of the memory after an I/O transfer finishes. - @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. - @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. - @result An IOReturn code. */ + * @abstract Complete processing of the memory after an I/O transfer finishes. + * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. + * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + * @result An IOReturn code. */ - virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; }; #endif /* !_IOINTERLEAVEDMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IOInterruptAccounting.h b/iokit/IOKit/IOInterruptAccounting.h index d2715d0b0..3231f8c22 100644 --- a/iokit/IOKit/IOInterruptAccounting.h +++ b/iokit/IOKit/IOInterruptAccounting.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -133,17 +133,17 @@ * path). */ enum { - kInterruptAccountingFirstLevelCountIndex = 0, /* Number of times we invoked the top level handler */ - kInterruptAccountingSecondLevelCountIndex, /* Number of times we invoked the workloop action */ - kInterruptAccountingFirstLevelTimeIndex, /* Time spent in the top level handler, if one was installed */ - kInterruptAccountingSecondLevelCPUTimeIndex, /* CPU time spent in the workloop action */ - kInterruptAccountingSecondLevelSystemTimeIndex, /* System time spent in the workloop action */ - kInterruptAccountingNoThreadWakeupsIndex, /* Number of first level (filter) invocations that did not wake up a thread */ - kInterruptAccountingTotalThreadWakeupsIndex, /* Number of actual thread wakeups caused by this interrupt */ - kInterruptAccountingPackageWakeupsIndex, /* Number of times this interrupt woke up the package */ - kInterruptAccountingCPUWakeupsIndex, /* Number of times this interrupt woke up a CPU */ - kInterruptAccountingIdleExitsIndex, /* Number of times this interrupt forced a CPU out of the idle loop */ - kInterruptAccountingInvalidStatisticIndex /* Sentinel value for checking for a nonsensical index */ + kInterruptAccountingFirstLevelCountIndex = 0, /* Number of times we invoked the top level handler */ + kInterruptAccountingSecondLevelCountIndex, /* Number of times we invoked the workloop action */ + kInterruptAccountingFirstLevelTimeIndex, /* Time spent in the top level handler, if one was installed */ + kInterruptAccountingSecondLevelCPUTimeIndex, /* CPU time spent in the workloop action */ + kInterruptAccountingSecondLevelSystemTimeIndex, /* System time spent in the workloop action */ + kInterruptAccountingNoThreadWakeupsIndex, /* Number of first level (filter) invocations that did not wake up a thread */ + kInterruptAccountingTotalThreadWakeupsIndex, /* Number of actual thread wakeups caused by this interrupt */ + kInterruptAccountingPackageWakeupsIndex, /* Number of times this interrupt woke up the package */ + kInterruptAccountingCPUWakeupsIndex, /* Number of times this interrupt woke up a CPU */ + kInterruptAccountingIdleExitsIndex, /* Number of times this interrupt forced a CPU out of the idle loop */ + kInterruptAccountingInvalidStatisticIndex /* Sentinel value for checking for a nonsensical index */ }; /* @@ -154,4 +154,3 @@ enum { #define kInterruptAccountingGroupName "Interrupt Statistics (by index)" #endif /* __IOKIT_IOINTERRUPTACCOUNTING_PRIVATE_H */ - diff --git a/iokit/IOKit/IOInterruptAccountingPrivate.h b/iokit/IOKit/IOInterruptAccountingPrivate.h index 5f37136e1..b1dd7e369 100644 --- a/iokit/IOKit/IOInterruptAccountingPrivate.h +++ b/iokit/IOKit/IOInterruptAccountingPrivate.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -106,16 +106,16 @@ extern uint32_t gInterruptAccountingStatisticBitmask; #define kInterruptAccountingChannelNameIdleExits (" Idle exits caused by this interrupt") static const char * const kInterruptAccountingStatisticNameArray[IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS] = { - [kInterruptAccountingFirstLevelCountIndex] = kInterruptAccountingChannelNameFirstLevelCount, - [kInterruptAccountingSecondLevelCountIndex] = kInterruptAccountingChannelNameSecondLevelCount, - [kInterruptAccountingFirstLevelTimeIndex] = kInterruptAccountingChannelNameFirstLevelTime, - [kInterruptAccountingSecondLevelCPUTimeIndex] = kInterruptAccountingChannelNameSecondLevelCPUTime, - [kInterruptAccountingSecondLevelSystemTimeIndex] = kInterruptAccountingChannelNameSecondLevelSystemTime, - [kInterruptAccountingNoThreadWakeupsIndex] = kInterruptAccountingChannelNameNoThreadWakeups, - [kInterruptAccountingTotalThreadWakeupsIndex] = kInterruptAccountingChannelNameTotalThreadWakeups, - [kInterruptAccountingPackageWakeupsIndex] = kInterruptAccountingChannelNamePackageWakeups, - [kInterruptAccountingCPUWakeupsIndex] = kInterruptAccountingChannelNameCPUWakeups, - [kInterruptAccountingIdleExitsIndex] = kInterruptAccountingChannelNameIdleExits, + [kInterruptAccountingFirstLevelCountIndex] = kInterruptAccountingChannelNameFirstLevelCount, + [kInterruptAccountingSecondLevelCountIndex] = kInterruptAccountingChannelNameSecondLevelCount, + [kInterruptAccountingFirstLevelTimeIndex] = kInterruptAccountingChannelNameFirstLevelTime, + [kInterruptAccountingSecondLevelCPUTimeIndex] = kInterruptAccountingChannelNameSecondLevelCPUTime, + [kInterruptAccountingSecondLevelSystemTimeIndex] = kInterruptAccountingChannelNameSecondLevelSystemTime, + [kInterruptAccountingNoThreadWakeupsIndex] = kInterruptAccountingChannelNameNoThreadWakeups, + [kInterruptAccountingTotalThreadWakeupsIndex] = kInterruptAccountingChannelNameTotalThreadWakeups, + [kInterruptAccountingPackageWakeupsIndex] = kInterruptAccountingChannelNamePackageWakeups, + [kInterruptAccountingCPUWakeupsIndex] = kInterruptAccountingChannelNameCPUWakeups, + [kInterruptAccountingIdleExitsIndex] = kInterruptAccountingChannelNameIdleExits, }; /* @@ -146,26 +146,26 @@ static const char * const kInterruptAccountingStatisticNameArray[IA_NUM_INTERRUP * TODO: Should this be an OSObject? Or properly pull in its methods as member functions? */ struct IOInterruptAccountingData { - OSObject * owner; /* The owner of the statistics; currently always an IOIES or a subclass of it */ - queue_chain_t chain; - /* - * We have no guarantee that the owner will not temporarily mutate its index value (i.e, in setWorkLoop - * for IOIES). To ensure we can properly recalculate our own identity (and our channel IDs for the - * reporter), stash the index we set up the reporter with here. - * - * Note that we should never remap the interrupt (point it to a different specifier). The mutation of - * the index value is usually to negate it; I am uncertain of the reason for this at the moment. The - * practical impact being that we should never need to update the stashed index value; it should stay - * valid for the lifetime of the owner. - */ - int interruptIndex; - - /* - * As long as we are based on the simple reporter, all our channels will be 64 bits. Align the data - * to allow for safe atomic updates (we don't want to cross a cache line on any platform, but for some - * it would cause a panic). - */ - volatile uint64_t interruptStatistics[IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS] __attribute__((aligned(8))); + OSObject * owner; /* The owner of the statistics; currently always an IOIES or a subclass of it */ + queue_chain_t chain; + /* + * We have no guarantee that the owner will not temporarily mutate its index value (i.e, in setWorkLoop + * for IOIES). To ensure we can properly recalculate our own identity (and our channel IDs for the + * reporter), stash the index we set up the reporter with here. + * + * Note that we should never remap the interrupt (point it to a different specifier). The mutation of + * the index value is usually to negate it; I am uncertain of the reason for this at the moment. The + * practical impact being that we should never need to update the stashed index value; it should stay + * valid for the lifetime of the owner. + */ + int interruptIndex; + + /* + * As long as we are based on the simple reporter, all our channels will be 64 bits. Align the data + * to allow for safe atomic updates (we don't want to cross a cache line on any platform, but for some + * it would cause a panic). + */ + volatile uint64_t interruptStatistics[IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS] __attribute__((aligned(8))); }; /* @@ -198,4 +198,3 @@ void interruptAccountingDataUpdateChannels(IOInterruptAccountingData * data, IOS void interruptAccountingDataInheritChannels(IOInterruptAccountingData * data, IOSimpleReporter * reporter); #endif /* __IOKIT_IOINTERRUPTACCOUNTING_PRIVATE_H */ - diff --git a/iokit/IOKit/IOInterruptController.h b/iokit/IOKit/IOInterruptController.h index eca74ce9c..53bdcf529 100644 --- a/iokit/IOKit/IOInterruptController.h +++ b/iokit/IOKit/IOInterruptController.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * DRI: Josh de Cesare - * + * */ @@ -44,17 +44,17 @@ class IOSharedInterruptController; struct IOInterruptVector { - volatile char interruptActive; - volatile char interruptDisabledSoft; - volatile char interruptDisabledHard; - volatile char interruptRegistered; - IOLock * interruptLock; - IOService * nub; - int source; - void * target; - IOInterruptHandler handler; - void * refCon; - IOSharedInterruptController *sharedController; + volatile char interruptActive; + volatile char interruptDisabledSoft; + volatile char interruptDisabledHard; + volatile char interruptRegistered; + IOLock * interruptLock; + IOService * nub; + int source; + void * target; + IOInterruptHandler handler; + void * refCon; + IOSharedInterruptController *sharedController; }; typedef struct IOInterruptVector IOInterruptVector; @@ -67,98 +67,98 @@ typedef long IOInterruptVectorNumber; class IOInterruptController : public IOService { - OSDeclareAbstractStructors(IOInterruptController); + OSDeclareAbstractStructors(IOInterruptController); protected: - IOInterruptVector *vectors; - IOSimpleLock *controllerLock; + IOInterruptVector *vectors; + IOSimpleLock *controllerLock; - struct ExpansionData { }; - ExpansionData *ioic_reserved; + struct ExpansionData { }; + ExpansionData *ioic_reserved; public: - virtual IOReturn registerInterrupt(IOService *nub, int source, - void *target, - IOInterruptHandler handler, - void *refCon); - virtual IOReturn unregisterInterrupt(IOService *nub, int source); - - virtual IOReturn getInterruptType(IOService *nub, int source, - int *interruptType); - - virtual IOReturn enableInterrupt(IOService *nub, int source); - virtual IOReturn disableInterrupt(IOService *nub, int source); - virtual IOReturn causeInterrupt(IOService *nub, int source); - - virtual IOInterruptAction getInterruptHandlerAddress(void); - virtual IOReturn handleInterrupt(void *refCon, IOService *nub, - int source); - - // Methods to be overridden for simplifed interrupt controller subclasses. - - virtual bool vectorCanBeShared(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - virtual void initVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - virtual int getVectorType(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - virtual void disableVectorHard(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - virtual void enableVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - virtual void causeVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - - OSMetaClassDeclareReservedUnused(IOInterruptController, 0); - OSMetaClassDeclareReservedUnused(IOInterruptController, 1); - OSMetaClassDeclareReservedUnused(IOInterruptController, 2); - OSMetaClassDeclareReservedUnused(IOInterruptController, 3); - OSMetaClassDeclareReservedUnused(IOInterruptController, 4); - OSMetaClassDeclareReservedUnused(IOInterruptController, 5); + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon); + virtual IOReturn unregisterInterrupt(IOService *nub, int source); + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType); + + virtual IOReturn enableInterrupt(IOService *nub, int source); + virtual IOReturn disableInterrupt(IOService *nub, int source); + virtual IOReturn causeInterrupt(IOService *nub, int source); + + virtual IOInterruptAction getInterruptHandlerAddress(void); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, + int source); + +// Methods to be overridden for simplifed interrupt controller subclasses. + + virtual bool vectorCanBeShared(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + virtual void initVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + virtual int getVectorType(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + virtual void disableVectorHard(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + virtual void enableVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + virtual void causeVector(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + + OSMetaClassDeclareReservedUnused(IOInterruptController, 0); + OSMetaClassDeclareReservedUnused(IOInterruptController, 1); + OSMetaClassDeclareReservedUnused(IOInterruptController, 2); + OSMetaClassDeclareReservedUnused(IOInterruptController, 3); + OSMetaClassDeclareReservedUnused(IOInterruptController, 4); + OSMetaClassDeclareReservedUnused(IOInterruptController, 5); public: - // Generic methods (not to be overriden). +// Generic methods (not to be overriden). - void timeStampSpuriousInterrupt(void); - void timeStampInterruptHandlerStart(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); - void timeStampInterruptHandlerEnd(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + void timeStampSpuriousInterrupt(void); + void timeStampInterruptHandlerStart(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + void timeStampInterruptHandlerEnd(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); private: - void timeStampInterruptHandlerInternal(bool isStart, IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); + void timeStampInterruptHandlerInternal(bool isStart, IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector); }; class IOSharedInterruptController : public IOInterruptController { - OSDeclareDefaultStructors(IOSharedInterruptController); - + OSDeclareDefaultStructors(IOSharedInterruptController); + private: - IOService *provider; - int numVectors; - int vectorsRegistered; - int vectorsEnabled; - volatile int controllerDisabled; - bool sourceIsLevel; + IOService *provider; + int numVectors; + int vectorsRegistered; + int vectorsEnabled; + volatile int controllerDisabled; + bool sourceIsLevel; - struct ExpansionData { }; - ExpansionData *iosic_reserved __unused; + struct ExpansionData { }; + ExpansionData *iosic_reserved __unused; public: - virtual IOReturn initInterruptController(IOInterruptController *parentController, OSData *parentSource); - - virtual IOReturn registerInterrupt(IOService *nub, int source, - void *target, - IOInterruptHandler handler, - void *refCon) APPLE_KEXT_OVERRIDE; - virtual IOReturn unregisterInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; - - virtual IOReturn getInterruptType(IOService *nub, int source, - int *interruptType) APPLE_KEXT_OVERRIDE; - - virtual IOReturn enableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; - virtual IOReturn disableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; - - virtual IOInterruptAction getInterruptHandlerAddress(void) APPLE_KEXT_OVERRIDE; - virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 0); - OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 1); - OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 2); - OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 3); + virtual IOReturn initInterruptController(IOInterruptController *parentController, OSData *parentSource); + + virtual IOReturn registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon) APPLE_KEXT_OVERRIDE; + virtual IOReturn unregisterInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + + virtual IOReturn getInterruptType(IOService *nub, int source, + int *interruptType) APPLE_KEXT_OVERRIDE; + + virtual IOReturn enableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + virtual IOReturn disableInterrupt(IOService *nub, int source) APPLE_KEXT_OVERRIDE; + + virtual IOInterruptAction getInterruptHandlerAddress(void) APPLE_KEXT_OVERRIDE; + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 0); + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 1); + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 2); + OSMetaClassDeclareReservedUnused(IOSharedInterruptController, 3); }; diff --git a/iokit/IOKit/IOInterruptEventSource.h b/iokit/IOKit/IOInterruptEventSource.h index 1d63d5c3a..40e5bc1dc 100644 --- a/iokit/IOKit/IOInterruptEventSource.h +++ b/iokit/IOKit/IOInterruptEventSource.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,18 +22,18 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - -HISTORY - 1998-7-13 Godfrey van der Linden(gvdl) - Created. - 1998-10-30 Godfrey van der Linden(gvdl) - Converted to C++ -*/ + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 1998-7-13 Godfrey van der Linden(gvdl) + * Created. + * 1998-10-30 Godfrey van der Linden(gvdl) + * Converted to C++ + */ #ifndef _IOKIT_IOINTERRUPTEVENTSOURCE_H #define _IOKIT_IOINTERRUPTEVENTSOURCE_H @@ -45,205 +45,205 @@ class IOService; struct IOInterruptAccountingData; /*! @class IOInterruptEventSource : public IOEventSource - @abstract Event source for interrupt delivery to work-loop based drivers. - @discussion The IOInterruptEventSource is a generic object that delivers calls interrupt routines in it's client in a guaranteed single-threaded manner. IOInterruptEventSource is part of the IOKit $link IOWorkLoop infrastructure where the semantic that one and only one action method is executing within a work-loops event chain. -

-When the action method is called in the client member function will receive 2 arguments, (IOEventSource *) sender and (int) count, See $link IOInterruptEventSource::Action. Where sender will be reference to the interrupt that occurred and the count will be computed by the difference between the $link producerCount and $link consumerCount. This number may not be reliable as no attempt is made to adjust for around the world type problems but is provided for general information and statistic gathering. -

-In general a client will use the factory member function to create and initialise the event source and then add it to their work-loop. It is the work loop's responsiblity to maintain the new event source in it's event chain. See $link IOWorkLoop. -

-An interrupt event source attaches itself to the given provider's interrupt source at initialisation time. At this time it determines if it is connected to a level or edge triggered interrupt. If the interrupt is an level triggered interrupt the event source automatically disables the interrupt source at primary interrupt time and after it call's the client it automatically reenables the interrupt. This action is fairly expensive but it is 100% safe and defaults sensibly so that the driver writer does not have to implement type dependant interrupt routines. So to repeat, the driver writer does not have to be concerned by the actual underlying interrupt mechanism as the event source hides the complexity. -

-Saying this if the hardware is a multi-device card, for instance a 4 port NIC, where all of the devices are sharing one level triggered interrupt AND it is possible to determine each port's interrupt state non-destructively then the $link IOFilterInterruptEventSource would be a better choice. -

-Warning: All IOInterruptEventSources are created in the disabled state. If you want to actually schedule interrupt delivery do not forget to enable the source. -*/ + * @abstract Event source for interrupt delivery to work-loop based drivers. + * @discussion The IOInterruptEventSource is a generic object that delivers calls interrupt routines in it's client in a guaranteed single-threaded manner. IOInterruptEventSource is part of the IOKit $link IOWorkLoop infrastructure where the semantic that one and only one action method is executing within a work-loops event chain. + *

+ * When the action method is called in the client member function will receive 2 arguments, (IOEventSource *) sender and (int) count, See $link IOInterruptEventSource::Action. Where sender will be reference to the interrupt that occurred and the count will be computed by the difference between the $link producerCount and $link consumerCount. This number may not be reliable as no attempt is made to adjust for around the world type problems but is provided for general information and statistic gathering. + *

+ * In general a client will use the factory member function to create and initialise the event source and then add it to their work-loop. It is the work loop's responsiblity to maintain the new event source in it's event chain. See $link IOWorkLoop. + *

+ * An interrupt event source attaches itself to the given provider's interrupt source at initialisation time. At this time it determines if it is connected to a level or edge triggered interrupt. If the interrupt is an level triggered interrupt the event source automatically disables the interrupt source at primary interrupt time and after it call's the client it automatically reenables the interrupt. This action is fairly expensive but it is 100% safe and defaults sensibly so that the driver writer does not have to implement type dependant interrupt routines. So to repeat, the driver writer does not have to be concerned by the actual underlying interrupt mechanism as the event source hides the complexity. + *

+ * Saying this if the hardware is a multi-device card, for instance a 4 port NIC, where all of the devices are sharing one level triggered interrupt AND it is possible to determine each port's interrupt state non-destructively then the $link IOFilterInterruptEventSource would be a better choice. + *

+ * Warning: All IOInterruptEventSources are created in the disabled state. If you want to actually schedule interrupt delivery do not forget to enable the source. + */ class IOInterruptEventSource : public IOEventSource { - OSDeclareDefaultStructors(IOInterruptEventSource) + OSDeclareDefaultStructors(IOInterruptEventSource) public: /*! @typedef Action - @discussion 'C' pointer prototype of functions that are called in a single threaded context when an interrupt occurs. - @param owner Pointer to client instance. - @param sender Pointer to generation interrupt event source. - @param count Number of interrupts seen before delivery. */ - typedef void (*Action)(OSObject *owner, IOInterruptEventSource *sender, int count); + * @discussion 'C' pointer prototype of functions that are called in a single threaded context when an interrupt occurs. + * @param owner Pointer to client instance. + * @param sender Pointer to generation interrupt event source. + * @param count Number of interrupts seen before delivery. */ + typedef void (*Action)(OSObject *owner, IOInterruptEventSource *sender, int count); #ifdef __BLOCKS__ - typedef void (^ActionBlock)(IOInterruptEventSource *sender, int count); + typedef void (^ActionBlock)(IOInterruptEventSource *sender, int count); #endif /* __BLOCKS__ */ /*! @defined IOInterruptEventAction - @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOInterruptEventSource::Action */ + * @discussion Backward compatibilty define for the old non-class scoped type definition. See $link IOInterruptEventSource::Action */ #define IOInterruptEventAction IOInterruptEventSource::Action protected: /*! @var provider IOService that provides interrupts for delivery. */ - IOService *provider; + IOService *provider; /*! @var intIndex */ - int intIndex; + int intIndex; /*! @var producerCount - Current count of produced interrupts that have been received. */ - volatile unsigned int producerCount; + * Current count of produced interrupts that have been received. */ + volatile unsigned int producerCount; /*! @var consumerCount - Current count of produced interrupts that the owner has been informed of. */ - unsigned int consumerCount; + * Current count of produced interrupts that the owner has been informed of. */ + unsigned int consumerCount; /*! @var autoDisable Do we need to automatically disable the interrupt source when we take an interrupt, i.e. we are level triggered. */ - bool autoDisable; + bool autoDisable; /*! @var explicitDisable Has the user expicitly disabled this event source, if so then do not overide their request when returning from the callout */ - bool explicitDisable; + bool explicitDisable; /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. - */ - struct ExpansionData { - IOInterruptAccountingData * statistics; - }; + * @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { + IOInterruptAccountingData * statistics; + }; /*! @var reserved - Reserved for future use. (Internal use only) */ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData *reserved; - APPLE_KEXT_WSHADOW_POP; + * Reserved for future use. (Internal use only) */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData *reserved; + APPLE_KEXT_WSHADOW_POP; /*! @function free - @abstract Sub-class implementation of free method, disconnects from the interrupt source. */ - virtual void free() APPLE_KEXT_OVERRIDE; + * @abstract Sub-class implementation of free method, disconnects from the interrupt source. */ + virtual void free() APPLE_KEXT_OVERRIDE; /*! @function checkForWork - @abstract Pure Virtual member function used by IOWorkLoop for issueing a client calls. - @discussion This function called when the work-loop is ready to check for any work to do and then to call out the owner/action. - @result Return true if this function needs to be called again before all its outstanding events have been processed. */ - virtual bool checkForWork() APPLE_KEXT_OVERRIDE; + * @abstract Pure Virtual member function used by IOWorkLoop for issueing a client calls. + * @discussion This function called when the work-loop is ready to check for any work to do and then to call out the owner/action. + * @result Return true if this function needs to be called again before all its outstanding events have been processed. */ + virtual bool checkForWork() APPLE_KEXT_OVERRIDE; /*! @function setWorkLoop - @abstract Sub-class implementation of setWorkLoop method. */ - virtual void setWorkLoop(IOWorkLoop *inWorkLoop) APPLE_KEXT_OVERRIDE; + * @abstract Sub-class implementation of setWorkLoop method. */ + virtual void setWorkLoop(IOWorkLoop *inWorkLoop) APPLE_KEXT_OVERRIDE; public: /*! @function interruptEventSource - @abstract Factory function for IOInterruptEventSources creation and initialisation. - @param owner Owning client of the new event source. - @param action 'C' Function to call when something happens. - @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. - @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. - @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ - static IOInterruptEventSource * + * @abstract Factory function for IOInterruptEventSources creation and initialisation. + * @param owner Owning client of the new event source. + * @param action 'C' Function to call when something happens. + * @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. + * @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. + * @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ + static IOInterruptEventSource * interruptEventSource(OSObject *owner, - Action action, - IOService *provider = 0, - int intIndex = 0); + Action action, + IOService *provider = 0, + int intIndex = 0); #ifdef __BLOCKS__ /*! @function interruptEventSource - @abstract Factory function for IOInterruptEventSources creation and initialisation. - @param owner Owning client of the new event source. - @param provider IOService that represents the interrupt source. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. - @param intIndex The index of the interrupt within the provider's interrupt sources. - @param action Block for the callout routine of this event source.. - @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ - static IOInterruptEventSource * + * @abstract Factory function for IOInterruptEventSources creation and initialisation. + * @param owner Owning client of the new event source. + * @param provider IOService that represents the interrupt source. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. + * @param intIndex The index of the interrupt within the provider's interrupt sources. + * @param action Block for the callout routine of this event source.. + * @result A new interrupt event source if successfully created and initialised, 0 otherwise. */ + static IOInterruptEventSource * interruptEventSource(OSObject *owner, - IOService *provider, - int intIndex, - ActionBlock action); + IOService *provider, + int intIndex, + ActionBlock action); #endif /* __BLOCKS__ */ #if XNU_KERNEL_PRIVATE - static void actionToBlock(OSObject *owner, IOInterruptEventSource *sender, int count); + static void actionToBlock(OSObject *owner, IOInterruptEventSource *sender, int count); #endif /* XNU_KERNEL_PRIVATE */ /*! @function init - @abstract Primary initialiser for the IOInterruptEventSource class. - @param owner Owning client of the new event source. - @param action 'C' Function to call when something happens. - @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. - @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. - @result true if the inherited classes and this instance initialise -successfully. */ - virtual bool init(OSObject *owner, - Action action, - IOService *provider = 0, - int intIndex = 0); + * @abstract Primary initialiser for the IOInterruptEventSource class. + * @param owner Owning client of the new event source. + * @param action 'C' Function to call when something happens. + * @param provider IOService that represents the interrupt source. Defaults to 0. When no provider is defined the event source assumes that the client will in some manner call the interruptOccured method explicitly. This will start the ball rolling for safe delivery of asynchronous event's into the driver. + * @param intIndex The index of the interrupt within the provider's interrupt sources. Defaults to 0, i.e. the first interrupt in the provider. + * @result true if the inherited classes and this instance initialise + * successfully. */ + virtual bool init(OSObject *owner, + Action action, + IOService *provider = 0, + int intIndex = 0); /*! @function enable - @abstract Enable event source. - @discussion A subclass implementation is expected to respect the enabled -state when checkForWork is called. Calling this function will cause the -work-loop to be signalled so that a checkForWork is performed. */ - virtual void enable() APPLE_KEXT_OVERRIDE; + * @abstract Enable event source. + * @discussion A subclass implementation is expected to respect the enabled + * state when checkForWork is called. Calling this function will cause the + * work-loop to be signalled so that a checkForWork is performed. */ + virtual void enable() APPLE_KEXT_OVERRIDE; /*! @function disable - @abstract Disable event source. - @discussion A subclass implementation is expected to respect the enabled -state when checkForWork is called. */ - virtual void disable() APPLE_KEXT_OVERRIDE; + * @abstract Disable event source. + * @discussion A subclass implementation is expected to respect the enabled + * state when checkForWork is called. */ + virtual void disable() APPLE_KEXT_OVERRIDE; /*! @function getProvider - @abstract Get'ter for $link provider variable. - @result value of provider. */ - virtual const IOService *getProvider() const; + * @abstract Get'ter for $link provider variable. + * @result value of provider. */ + virtual const IOService *getProvider() const; /*! @function getIntIndex - @abstract Get'ter for $link intIndex interrupt index variable. - @result value of intIndex. */ - virtual int getIntIndex() const; + * @abstract Get'ter for $link intIndex interrupt index variable. + * @result value of intIndex. */ + virtual int getIntIndex() const; /*! @function getAutoDisable - @abstract Get'ter for $link autoDisable variable. - @result value of autoDisable. */ - virtual bool getAutoDisable() const; + * @abstract Get'ter for $link autoDisable variable. + * @result value of autoDisable. */ + virtual bool getAutoDisable() const; /*! @function interruptOccurred - @abstract Functions that get called by the interrupt controller. See $link IOService::registerInterrupt - @param nub Where did the interrupt originate from - @param ind What is this interrupts index within 'nub'. */ - virtual void interruptOccurred(void *, IOService *nub, int ind); + * @abstract Functions that get called by the interrupt controller. See $link IOService::registerInterrupt + * @param nub Where did the interrupt originate from + * @param ind What is this interrupts index within 'nub'. */ + virtual void interruptOccurred(void *, IOService *nub, int ind); /*! @function normalInterruptOccurred - @abstract Functions that get called by the interrupt controller.See $link IOService::registerInterrupt - @param nub Where did the interrupt originate from - @param ind What is this interrupts index within 'nub'. */ - virtual void normalInterruptOccurred(void *, IOService *nub, int ind); + * @abstract Functions that get called by the interrupt controller.See $link IOService::registerInterrupt + * @param nub Where did the interrupt originate from + * @param ind What is this interrupts index within 'nub'. */ + virtual void normalInterruptOccurred(void *, IOService *nub, int ind); /*! @function disableInterruptOccurred - @abstract Functions that get called by the interrupt controller.See $link IOService::registerInterrupt - @param nub Where did the interrupt originate from - @param ind What is this interrupts index within 'nub'. */ - virtual void disableInterruptOccurred(void *, IOService *nub, int ind); - + * @abstract Functions that get called by the interrupt controller.See $link IOService::registerInterrupt + * @param nub Where did the interrupt originate from + * @param ind What is this interrupts index within 'nub'. */ + virtual void disableInterruptOccurred(void *, IOService *nub, int ind); + /*! @function warmCPU - @abstract Tries to reduce latency for an interrupt which will be received near a specified time. - @discussion Warms up a CPU in advance of an interrupt so that the interrupt may be serviced with predictable latency. - The warm-up is not periodic; callers should call warmCPU once in advance of each interrupt. It is recommended that - requests be issues in serial (i.e. each after the target for the previous call has elapsed), as there is a systemwide - cap on the number of outstanding requests. This routine may be disruptive to the system if used with very small intervals - between requests; it should be used only in cases where interrupt latency is absolutely critical, and tens or hundreds of - milliseconds between targets is the expected time scale. NOTE: it is not safe to call this method with interrupts disabled. - @param abstime Time at which interrupt is expected. */ - IOReturn warmCPU(uint64_t abstime); + * @abstract Tries to reduce latency for an interrupt which will be received near a specified time. + * @discussion Warms up a CPU in advance of an interrupt so that the interrupt may be serviced with predictable latency. + * The warm-up is not periodic; callers should call warmCPU once in advance of each interrupt. It is recommended that + * requests be issues in serial (i.e. each after the target for the previous call has elapsed), as there is a systemwide + * cap on the number of outstanding requests. This routine may be disruptive to the system if used with very small intervals + * between requests; it should be used only in cases where interrupt latency is absolutely critical, and tens or hundreds of + * milliseconds between targets is the expected time scale. NOTE: it is not safe to call this method with interrupts disabled. + * @param abstime Time at which interrupt is expected. */ + IOReturn warmCPU(uint64_t abstime); private: - IOReturn registerInterruptHandler(IOService *inProvider, int inIntIndex); - void unregisterInterruptHandler(IOService *inProvider, int inIntIndex); + IOReturn registerInterruptHandler(IOService *inProvider, int inIntIndex); + void unregisterInterruptHandler(IOService *inProvider, int inIntIndex); private: - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 0); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 1); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 2); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 3); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 4); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 5); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 6); - OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 7); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 0); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 1); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 2); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 3); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 4); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 5); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 6); + OSMetaClassDeclareReservedUnused(IOInterruptEventSource, 7); }; #endif /* !_IOKIT_IOINTERRUPTEVENTSOURCE_H */ diff --git a/iokit/IOKit/IOInterrupts.h b/iokit/IOKit/IOInterrupts.h index d58ea9f07..c2679ec83 100644 --- a/iokit/IOKit/IOInterrupts.h +++ b/iokit/IOKit/IOInterrupts.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * DRI: Josh de Cesare * @@ -45,15 +45,15 @@ class OSData; class IOInterruptController; struct IOInterruptSource { - IOInterruptController *interruptController; - OSData *vectorData; + IOInterruptController *interruptController; + OSData *vectorData; }; typedef struct IOInterruptSource IOInterruptSource; #ifdef XNU_KERNEL_PRIVATE struct IOInterruptSourcePrivate { - void * vectorBlock; + void * vectorBlock; }; typedef struct IOInterruptSourcePrivate IOInterruptSourcePrivate; @@ -63,6 +63,6 @@ typedef struct IOInterruptSourcePrivate IOInterruptSourcePrivate; #endif /* __cplusplus */ typedef void (*IOInterruptHandler)(void *target, void *refCon, - void *nub, int source); + void *nub, int source); #endif /* ! _IOKIT_IOINTERRUPTS_H */ diff --git a/iokit/IOKit/IOKernelReportStructs.h b/iokit/IOKit/IOKernelReportStructs.h index b15a62527..162ac5565 100644 --- a/iokit/IOKit/IOKernelReportStructs.h +++ b/iokit/IOKit/IOKernelReportStructs.h @@ -60,27 +60,27 @@ extern "C" { #define kIOReportChannelNameIdx 2 // optional /* Histogram Segment Configuration - Currently supports 2 types of scaling to compute bucket upper bounds, - linear or exponential. - scale_flag = 0 -> linear scale - 1 -> exponential scale - upper_bound[n] = (scale_flag) ? pow(base,(n+1)) : base * (n+1); -*/ + * Currently supports 2 types of scaling to compute bucket upper bounds, + * linear or exponential. + * scale_flag = 0 -> linear scale + * 1 -> exponential scale + * upper_bound[n] = (scale_flag) ? pow(base,(n+1)) : base * (n+1); + */ #define kIOHistogramScaleLinear 0 #define kIOHistogramScaleExponential 1 typedef struct { - uint32_t base_bucket_width; // segment[0].bucket[0] = [0, base_width] - uint32_t scale_flag; // bit 0 only in current use (see #defs) - uint32_t segment_idx; // for multiple segments histograms - uint32_t segment_bucket_count; // number of buckets in this segment + uint32_t base_bucket_width;// segment[0].bucket[0] = [0, base_width] + uint32_t scale_flag; // bit 0 only in current use (see #defs) + uint32_t segment_idx; // for multiple segments histograms + uint32_t segment_bucket_count;// number of buckets in this segment } __attribute((packed)) IOHistogramSegmentConfig; // "normalized distribution"(FIXME?) internal format (unused?) typedef struct { - uint64_t samples; - uint64_t mean; - uint64_t variance; - uint64_t reserved; + uint64_t samples; + uint64_t mean; + uint64_t variance; + uint64_t reserved; } __attribute((packed)) IONormDistReportValues; #ifdef __cplusplus diff --git a/iokit/IOKit/IOKernelReporters.h b/iokit/IOKit/IOKernelReporters.h index bbde3d817..2e6ac1643 100644 --- a/iokit/IOKit/IOKernelReporters.h +++ b/iokit/IOKit/IOKernelReporters.h @@ -1,8 +1,8 @@ /* * Copyright (c) 2012-2016 Apple Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -50,69 +50,69 @@ typedef OSDictionary IOReportLegendEntry; /******************************* - TOC: this file contains - 1. Introduction - 2a. IOReporter class declaration (public & non-public members) - 2b. static IOReporter methods unrelated to the class - 3. IOReporter subclass declarations (public & non-public members) - 4. IOReportLegend class declaration +* TOC: this file contains +* 1. Introduction +* 2a. IOReporter class declaration (public & non-public members) +* 2b. static IOReporter methods unrelated to the class +* 3. IOReporter subclass declarations (public & non-public members) +* 4. IOReportLegend class declaration *******************************/ /*! - 1. Introduction - - IOReporting is a mechanism for I/O Kit drivers to gather statistics - (or other information) and make it available to various "observers," - which are generally in user space. Requests for information come - through two new IOService methods: ::configureReport(...) and - ::updateReport(...). While not required (see IOReportTypes.h), drivers - will generally use IOReporter subclass instances to track the requested - information and respond to IOReporting requests. Drivers can use these - classes to track information, either all the time or between "enable" - and "disable" calls to IOService::configureReport(). - - Available information is organized into "channels." A channel is - uniquely identified by both driver (registry) ID and a 64-bit channel - ID. One way drivers can advertise their channels is by publishing - "legends" in the I/O Kit registry. In addition to collecting - information and responding to queries, IOReporter objects can produce - legend entries describing their channels. The IOReportLegend class - helps manage legend entries from multiple reporter objects as well - as with grouping channels logically for observers. - - An important basic constraint of the current implementation is that - all channels reported by a particular reporter instance must share all - traits except channel ID and name. Specifically, the channel type - (including report class, categories, & size) and units. Additionally, - IOHistogramReporter currently only supports one channel at a time. - - Currently, ::{configure/update}Report() can be called any time between - when a driver calls registerService() and when free() is called on - your driver. 12960947 tracks improvements / recommendations for - correctly handling these calls during termination. - - * Locking - IOReporting only imposes concurrent access constraints when multiple - threads are accessing the same object. Three levels of constraint apply - depending on a method's purpose: - 1. Allocation/Teardown - same-instance concurrency UNSAFE, MAY BLOCK - 2. Configuration - same-instance concurrency SAFE, MAY BLOCK - 3. Update - same-instance concurrency SAFE, WILL NOT BLOCK - - Configuration requires memory management which can block and must - be invoked with interrupts ENABLED (for example, NOT in the interrupt - context NOR with a spin lock -- like IOSimpleLock -- held). - - Updates can be performed with interrupts disabled, but clients should - take into account that IOReporters' non-blocking currenency is achieved - with IOSimpleLockLockDisable/UnlockEnableInterrupts(): that is, by - disabling interrupts and taking a spin lock. While IOReporting will - never hold a lock beyond a call into it, some time may be spent within - the call spin-waiting for the lock. Clients holding their own - spin locks should carefully consider the impact of IOReporting's - (small) additional latency before calling it while holding a spin lock. - - The documentation for each method indicates any concurrency guarantees. + * 1. Introduction + * + * IOReporting is a mechanism for I/O Kit drivers to gather statistics + * (or other information) and make it available to various "observers," + * which are generally in user space. Requests for information come + * through two new IOService methods: ::configureReport(...) and + * ::updateReport(...). While not required (see IOReportTypes.h), drivers + * will generally use IOReporter subclass instances to track the requested + * information and respond to IOReporting requests. Drivers can use these + * classes to track information, either all the time or between "enable" + * and "disable" calls to IOService::configureReport(). + * + * Available information is organized into "channels." A channel is + * uniquely identified by both driver (registry) ID and a 64-bit channel + * ID. One way drivers can advertise their channels is by publishing + * "legends" in the I/O Kit registry. In addition to collecting + * information and responding to queries, IOReporter objects can produce + * legend entries describing their channels. The IOReportLegend class + * helps manage legend entries from multiple reporter objects as well + * as with grouping channels logically for observers. + * + * An important basic constraint of the current implementation is that + * all channels reported by a particular reporter instance must share all + * traits except channel ID and name. Specifically, the channel type + * (including report class, categories, & size) and units. Additionally, + * IOHistogramReporter currently only supports one channel at a time. + * + * Currently, ::{configure/update}Report() can be called any time between + * when a driver calls registerService() and when free() is called on + * your driver. 12960947 tracks improvements / recommendations for + * correctly handling these calls during termination. + * + * Locking + * IOReporting only imposes concurrent access constraints when multiple + * threads are accessing the same object. Three levels of constraint apply + * depending on a method's purpose: + * 1. Allocation/Teardown - same-instance concurrency UNSAFE, MAY BLOCK + * 2. Configuration - same-instance concurrency SAFE, MAY BLOCK + * 3. Update - same-instance concurrency SAFE, WILL NOT BLOCK + * + * Configuration requires memory management which can block and must + * be invoked with interrupts ENABLED (for example, NOT in the interrupt + * context NOR with a spin lock -- like IOSimpleLock -- held). + * + * Updates can be performed with interrupts disabled, but clients should + * take into account that IOReporters' non-blocking currenency is achieved + * with IOSimpleLockLockDisable/UnlockEnableInterrupts(): that is, by + * disabling interrupts and taking a spin lock. While IOReporting will + * never hold a lock beyond a call into it, some time may be spent within + * the call spin-waiting for the lock. Clients holding their own + * spin locks should carefully consider the impact of IOReporting's + * (small) additional latency before calling it while holding a spin lock. + * + * The documentation for each method indicates any concurrency guarantees. */ @@ -122,163 +122,163 @@ typedef OSDictionary IOReportLegendEntry; class IOReporter : public OSObject { - OSDeclareDefaultStructors(IOReporter); + OSDeclareDefaultStructors(IOReporter); protected: /*! @function IOReporter::init - @abstract base init() method, called by subclass initWith() methods - - @param reportingService - IOService associated with all channels - @param channelType - type info for all channels (element_idx = 0) - @param unit - description applied for all channels - @result true on success, false otherwise - - @discussion - init() establishes the parameters of all channels for this reporter - instance. Any channels added via addChannel() will be of this type - and have this unit. - - IOReporter clients should use the static ::with() methods - below to obtain fully-initialized reporter instances. ::free() - expects ::init() to have completed successfully. On failure, any - allocations are cleaned up. - - Locking: same-instance concurrency UNSAFE -*/ - virtual bool init(IOService *reportingService, - IOReportChannelType channelType, - IOReportUnit unit); + * @abstract base init() method, called by subclass initWith() methods + * + * @param reportingService - IOService associated with all channels + * @param channelType - type info for all channels (element_idx = 0) + * @param unit - description applied for all channels + * @result true on success, false otherwise + * + * @discussion + * init() establishes the parameters of all channels for this reporter + * instance. Any channels added via addChannel() will be of this type + * and have this unit. + * + * IOReporter clients should use the static ::with() methods + * below to obtain fully-initialized reporter instances. ::free() + * expects ::init() to have completed successfully. On failure, any + * allocations are cleaned up. + * + * Locking: same-instance concurrency UNSAFE + */ + virtual bool init(IOService *reportingService, + IOReportChannelType channelType, + IOReportUnit unit); public: /*! @function IOReporter::addChannel - @abstract add an additional, similar channel to the reporter - - @param channelID - identifier for the channel to be added - @param channelName - an optional human-readble name for the channel - @result appropriate IOReturn code - - @discussion - The reporter will allocate memory to track a new channel with the - provided ID and name (if any). Its other traits (type, etc) will - be those provided when the reporter was initialized. If no channel - name is provided and the channelID consists solely of ASCII bytes, - those bytes (ignoring any NUL bytes) will be used as the - human-readable channel name in user space. The IOREPORT_MAKEID() - macro in IOReportTypes.h can be used to create ASCII channel IDs. - - Locking: same-instance concurrency SAFE, MAY BLOCK -*/ - IOReturn addChannel(uint64_t channelID, const char *channelName = NULL); - + * @abstract add an additional, similar channel to the reporter + * + * @param channelID - identifier for the channel to be added + * @param channelName - an optional human-readble name for the channel + * @result appropriate IOReturn code + * + * @discussion + * The reporter will allocate memory to track a new channel with the + * provided ID and name (if any). Its other traits (type, etc) will + * be those provided when the reporter was initialized. If no channel + * name is provided and the channelID consists solely of ASCII bytes, + * those bytes (ignoring any NUL bytes) will be used as the + * human-readable channel name in user space. The IOREPORT_MAKEID() + * macro in IOReportTypes.h can be used to create ASCII channel IDs. + * + * Locking: same-instance concurrency SAFE, MAY BLOCK + */ + IOReturn addChannel(uint64_t channelID, const char *channelName = NULL); + /*! @function IOReporter::createLegend - @abstract create a legend entry represending this reporter's channels - @result An IOReportLegendEntry object or NULL on failure. - @discussion - All channels added to the reporter will be represented - in the resulting legend entry. - - Legends must be published togethar as an array under the - kIOReportLegendKey in the I/O Kit registry. The IOReportLegend - class can be used to properly combine legend entries from multiple - reporters as well as to put channels into groups of interest to - observers. When published, individual legend entries share - characteristics such as group and sub-group. Multiple IOReporter - instances are required to produce independent legend entries which - can then be published with different characteristics. - - Drivers wishing to publish legends should do so as part of their - ::start() routine. As superclasses *may* have installed legend - entries, any existing existing legend should be retrieved and - IOReportLegend used to merge it with the new entries. - - Recommendations for best practices are forthcoming. - - Instead of calling createLegend on your reporter object and then - appending it manually to IOReportLegend, one may prefer to call - IOReportLegend::appendReporterLegend which creates and appends a - reporter's IOReportLegendEntry in a single call. - - Locking: same-instance concurrency SAFE, MAY BLOCK -*/ - IOReportLegendEntry* createLegend(void); - + * @abstract create a legend entry represending this reporter's channels + * @result An IOReportLegendEntry object or NULL on failure. + * @discussion + * All channels added to the reporter will be represented + * in the resulting legend entry. + * + * Legends must be published togethar as an array under the + * kIOReportLegendKey in the I/O Kit registry. The IOReportLegend + * class can be used to properly combine legend entries from multiple + * reporters as well as to put channels into groups of interest to + * observers. When published, individual legend entries share + * characteristics such as group and sub-group. Multiple IOReporter + * instances are required to produce independent legend entries which + * can then be published with different characteristics. + * + * Drivers wishing to publish legends should do so as part of their + * ::start() routine. As superclasses *may* have installed legend + * entries, any existing existing legend should be retrieved and + * IOReportLegend used to merge it with the new entries. + * + * Recommendations for best practices are forthcoming. + * + * Instead of calling createLegend on your reporter object and then + * appending it manually to IOReportLegend, one may prefer to call + * IOReportLegend::appendReporterLegend which creates and appends a + * reporter's IOReportLegendEntry in a single call. + * + * Locking: same-instance concurrency SAFE, MAY BLOCK + */ + IOReportLegendEntry* createLegend(void); + /*! @function IOReporter::configureReport - @abstract track IOService::configureReport(), provide sizing info - - @param channelList - channels to configure - @param action - enable/disable/size, etc (see IOReportTypes.h) - @param result - *incremented* for kIOReportGetDimensions - @param destination - action-specific default destination - @result appropriate IOReturn code - - @discussion - Any time a reporting driver's ::configureReport method is invoked, - this method should be invoked on each IOReporter that is being - used by that driver to report channels in channelList. - - Any channels in channelList which are not tracked by this reporter - are ignored. ::configureReport(kIOReportGetDimensions) expects - the full size of all channels, including any reported by - superclasses. It is valid to call this routine on multiple - reporter objects in succession and they will increment 'result' - to provide the correct total. - - In the initial release, this routine is only required to calculate - the response to kIOReportGetDimensions, but in the future it will - will enable functionality like "triggered polling" via - kIOReportNotifyHubOnChange. Internally, it is already keeping - track of the number of times each channel has been enabled and - disabled. 13073064 tracks adding a method to see whether any - channels are currently being observed. - - The static IOReporter::configureAllReports() will call this method - on multiple reporters grouped in an OSSet. - - Locking: same-instance concurrency SAFE, MAY BLOCK -*/ - IOReturn configureReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination); - + * @abstract track IOService::configureReport(), provide sizing info + * + * @param channelList - channels to configure + * @param action - enable/disable/size, etc (see IOReportTypes.h) + * @param result - *incremented* for kIOReportGetDimensions + * @param destination - action-specific default destination + * @result appropriate IOReturn code + * + * @discussion + * Any time a reporting driver's ::configureReport method is invoked, + * this method should be invoked on each IOReporter that is being + * used by that driver to report channels in channelList. + * + * Any channels in channelList which are not tracked by this reporter + * are ignored. ::configureReport(kIOReportGetDimensions) expects + * the full size of all channels, including any reported by + * superclasses. It is valid to call this routine on multiple + * reporter objects in succession and they will increment 'result' + * to provide the correct total. + * + * In the initial release, this routine is only required to calculate + * the response to kIOReportGetDimensions, but in the future it will + * will enable functionality like "triggered polling" via + * kIOReportNotifyHubOnChange. Internally, it is already keeping + * track of the number of times each channel has been enabled and + * disabled. 13073064 tracks adding a method to see whether any + * channels are currently being observed. + * + * The static IOReporter::configureAllReports() will call this method + * on multiple reporters grouped in an OSSet. + * + * Locking: same-instance concurrency SAFE, MAY BLOCK + */ + IOReturn configureReport(IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination); + /*! @function IOReporter::updateReport - @abstract Produce standard reply to IOService::updateReport() - - @param channelList - channels to update - @param action - copy/trace data (see IOReportTypes.h) - @param result - action-specific return value (e.g. size of data) - @param destination - destination for this update (action-specific) - @result appropriate IOReturn code - - @discussion - This method searches channelList for channels tracked by this - reporter, writes the corresponding data into 'destination', and - updates 'result'. It should be possible to pass a given set of - IOService::updateReport() arguments to any and all reporters as - well as to super::updateReport() and get the right result. - - The static IOReporter::updateAllReports() will call this method - on an OSSet of reporters. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn updateReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination); + * @abstract Produce standard reply to IOService::updateReport() + * + * @param channelList - channels to update + * @param action - copy/trace data (see IOReportTypes.h) + * @param result - action-specific return value (e.g. size of data) + * @param destination - destination for this update (action-specific) + * @result appropriate IOReturn code + * + * @discussion + * This method searches channelList for channels tracked by this + * reporter, writes the corresponding data into 'destination', and + * updates 'result'. It should be possible to pass a given set of + * IOService::updateReport() arguments to any and all reporters as + * well as to super::updateReport() and get the right result. + * + * The static IOReporter::updateAllReports() will call this method + * on an OSSet of reporters. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn updateReport(IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination); /*! @function IOReporter::free - @abstract Releases the object and all its resources. - - @discussion - ::free() [called on last ->release()] assumes that init() [called - by static ::with() methods] has completed successfully. + * @abstract Releases the object and all its resources. + * + * @discussion + * ::free() [called on last ->release()] assumes that init() [called + * by static ::with() methods] has completed successfully. + * + * Locking: same-instance concurrency UNSAFE + */ + virtual void free(void) APPLE_KEXT_OVERRIDE; - Locking: same-instance concurrency UNSAFE -*/ - virtual void free(void) APPLE_KEXT_OVERRIDE; - /*********************************/ /*** 2b. Useful Static Methods ***/ @@ -290,485 +290,484 @@ public: */ /*! @function IOReporter::configureAllReports - @abstract call configureReport() on multiple IOReporter objects - - @param reporters - OSSet of IOReporter objects - @param channelList - full list of channels to configure - @param action - enable/disable/size, etc - @param result - action-specific returned value - @param destination - action-specific default destination - @result success if all objects successfully complete - IOReporter::configureReport() - - @discussion - The OSSet must only contain IOReporter instances. The presence - of non-IOReporter instances will cause this function to return - kIOReturnBadArgument. If any reporter returns an error, the - function will immediately return that error. - - Per the IOReporter::configureReport() documentation, each - reporter will search channelList for channels it is reporting - and provide a partial response. -*/ - static IOReturn configureAllReports(OSSet *reporters, - IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination); + * @abstract call configureReport() on multiple IOReporter objects + * + * @param reporters - OSSet of IOReporter objects + * @param channelList - full list of channels to configure + * @param action - enable/disable/size, etc + * @param result - action-specific returned value + * @param destination - action-specific default destination + * @result success if all objects successfully complete + * IOReporter::configureReport() + * + * @discussion + * The OSSet must only contain IOReporter instances. The presence + * of non-IOReporter instances will cause this function to return + * kIOReturnBadArgument. If any reporter returns an error, the + * function will immediately return that error. + * + * Per the IOReporter::configureReport() documentation, each + * reporter will search channelList for channels it is reporting + * and provide a partial response. + */ + static IOReturn configureAllReports(OSSet *reporters, + IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination); // FIXME: just put the function (inline-ish) here? - + /*! @function IOReporter::updateAllReports - @abstract call updateReport() on multiple IOReporter objects - - @param reporters - OSSet of IOReporter objects - @param channelList - full list of channels to update - @param action - type/style of update - @param result - returned details about what was updated - @param destination - destination for this update (action-specific) - @result IOReturn code - @discussion - The OSSet must only contain IOReporter instances. The presence - of non-IOReporter instances will cause this function to return - kIOReturnBadArgument. If any reporter returns an error, the - function will immediately return that error. - - Per the IOReporter::configureReport() documentation, each - reporter will search channelList for channels it is reporting - and provide a partial response. -*/ - static IOReturn updateAllReports(OSSet *reporters, - IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination); + * @abstract call updateReport() on multiple IOReporter objects + * + * @param reporters - OSSet of IOReporter objects + * @param channelList - full list of channels to update + * @param action - type/style of update + * @param result - returned details about what was updated + * @param destination - destination for this update (action-specific) + * @result IOReturn code + * @discussion + * The OSSet must only contain IOReporter instances. The presence + * of non-IOReporter instances will cause this function to return + * kIOReturnBadArgument. If any reporter returns an error, the + * function will immediately return that error. + * + * Per the IOReporter::configureReport() documentation, each + * reporter will search channelList for channels it is reporting + * and provide a partial response. + */ + static IOReturn updateAllReports(OSSet *reporters, + IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination); // FIXME: just put the function (inline-ish) here? - /* Protected (subclass-only) Methods - - General subclassing is not encouraged as we intend to improve - internal interfaces. If you need something that might require - a subclass, please file a bug against IOReporting/X and we will - help you. - - One important concept for sub-classes (not clients) is that report - data is stored in IOReportElement structures (see IOReportTypes.h). - */ +/* Protected (subclass-only) Methods + * + * General subclassing is not encouraged as we intend to improve + * internal interfaces. If you need something that might require + * a subclass, please file a bug against IOReporting/X and we will + * help you. + * + * One important concept for sub-classes (not clients) is that report + * data is stored in IOReportElement structures (see IOReportTypes.h). + */ protected: /*! @function IOReporter::lockReporterConfig - @function IOReporter::unlockReporterConfig - @abstract prevent concurrent reconfiguration of a reporter - - @discussion - lockReporterConfig() takes a mutex-based lock intended to prevent - concurrent access to the reporter's configuration. It is not - intended to prevent updates to the reporter's data. As long as - all other requirements are met, it is safe to simultaneously hold - both the configuration and data locks on a single reporter. - - lockReporterConfig() is used by routines such as addChannel(). - See also lockReporter() and ::handle*Swap*() below. -*/ - void lockReporterConfig(void); - void unlockReporterConfig(void); - + * @function IOReporter::unlockReporterConfig + * @abstract prevent concurrent reconfiguration of a reporter + * + * @discussion + * lockReporterConfig() takes a mutex-based lock intended to prevent + * concurrent access to the reporter's configuration. It is not + * intended to prevent updates to the reporter's data. As long as + * all other requirements are met, it is safe to simultaneously hold + * both the configuration and data locks on a single reporter. + * + * lockReporterConfig() is used by routines such as addChannel(). + * See also lockReporter() and ::handle*Swap*() below. + */ + void lockReporterConfig(void); + void unlockReporterConfig(void); + /*! @function IOReporter::lockReporter - @function IOReporter::unlockReporter - @abstract prevent concurrent access to a reporter's data - - @discussion - This method grabs a lock intended to control access the reporter's - reporting data. Sub-classes maninupating internal report values - must make sure the reporter is locked (usually by the most generic - public interface) before calling getElementValues(), - copyElementValues(), or setElementValues(). - - Subclasses should ensure that this lock is taken exactly once - before directly accessing reporter data. For example, - [virtual] IOFooReporter::handleSetFoo(.) { - // assert(lock_held) - getElementValues(1..) - getElementValues(3..) - getElementValues(5..) - [calculate] - setElementValues(6..) - } - IOFooReporter::setFoo(.) { // not virtual - lockReporter() - handleSetFoo(.) - unlockReporter() - } - - IOReporter::handle*() use lockReporter() similarly. For example, - the lock is taken by IOReporter::updateReport() and is already - held by the time any ::updateChannelValues() methods are called. - - Subclasses cannot call this routine if the lock is already held. - That's why IOReporting generally only calls it from non-virtual - public methods. In particular, this method should not be called - it from ::handle*() methods which exist to allow override after - the lock is taken. - - Because lockReporter() uses a spin lock, it is SAFE to use in the - interrupt context. For the same reason, however, it is UNSAFE - to perform any blocking blocking operations (including memory - allocations) while holding this lock. -*/ - void lockReporter(void); - void unlockReporter(void); + * @function IOReporter::unlockReporter + * @abstract prevent concurrent access to a reporter's data + * + * @discussion + * This method grabs a lock intended to control access the reporter's + * reporting data. Sub-classes maninupating internal report values + * must make sure the reporter is locked (usually by the most generic + * public interface) before calling getElementValues(), + * copyElementValues(), or setElementValues(). + * + * Subclasses should ensure that this lock is taken exactly once + * before directly accessing reporter data. For example, + * [virtual] IOFooReporter::handleSetFoo(.) { + * // assert(lock_held) + * getElementValues(1..) + * getElementValues(3..) + * getElementValues(5..) + * [calculate] + * setElementValues(6..) + * } + * IOFooReporter::setFoo(.) { // not virtual + * lockReporter() + * handleSetFoo(.) + * unlockReporter() + * } + * + * IOReporter::handle*() use lockReporter() similarly. For example, + * the lock is taken by IOReporter::updateReport() and is already + * held by the time any ::updateChannelValues() methods are called. + * + * Subclasses cannot call this routine if the lock is already held. + * That's why IOReporting generally only calls it from non-virtual + * public methods. In particular, this method should not be called + * it from ::handle*() methods which exist to allow override after + * the lock is taken. + * + * Because lockReporter() uses a spin lock, it is SAFE to use in the + * interrupt context. For the same reason, however, it is UNSAFE + * to perform any blocking blocking operations (including memory + * allocations) while holding this lock. + */ + void lockReporter(void); + void unlockReporter(void); /*! - @discussion - The ::handle*Swap* functions allow subclasses to safely reconfigure - their internal state. A non-virtual function handles locking - and invokes the functions in order: - - lockReporterConfig() // protecting instance vars but not content - - prepare / allocate buffers of the new size - - if error, bail (unlocking, of course) - - - lockReporter() // protecting data / blocking updates - - swap: preserve continuing data / install new buffers - - unlockReporter() - - - deallocate now-unused buffers - - unlockReporterConfig() -*/ + * @discussion + * The ::handle*Swap* functions allow subclasses to safely reconfigure + * their internal state. A non-virtual function handles locking + * and invokes the functions in order: + * - lockReporterConfig() // protecting instance vars but not content + * - prepare / allocate buffers of the new size + * - if error, bail (unlocking, of course) + * + * - lockReporter() // protecting data / blocking updates + * - swap: preserve continuing data / install new buffers + * - unlockReporter() + * + * - deallocate now-unused buffers + * - unlockReporterConfig() + */ /*! @function IOReporter::handleSwapPrepare - @abstract allocate memory in preparation for an instance variable swap - - @param newNChannels target number of channels - @result IOReturn code - - @discussion - ::handleSwapPrepare() is responsible for allocating appropriately- - sized buffers (based on the new number of channels) and storing - them in _swap* instance variables. If returning and error, it - must deallocate any buffers and set to NULL any _swap* variables. - - Locking: The caller must ensure that the *config* lock is HELD but - that the reporter (data) lock is *NOT HELD*. -*/ - virtual IOReturn handleSwapPrepare(int newNChannels); + * @abstract allocate memory in preparation for an instance variable swap + * + * @param newNChannels target number of channels + * @result IOReturn code + * + * @discussion + * ::handleSwapPrepare() is responsible for allocating appropriately- + * sized buffers (based on the new number of channels) and storing + * them in _swap* instance variables. If returning and error, it + * must deallocate any buffers and set to NULL any _swap* variables. + * + * Locking: The caller must ensure that the *config* lock is HELD but + * that the reporter (data) lock is *NOT HELD*. + */ + virtual IOReturn handleSwapPrepare(int newNChannels); /*! @function IOReporter::handleAddChannelSwap - @abstract update primary instance variables with new buffers - - @param channel_id ID of channel being added - @param symChannelName optional channel name, in an allocated object - @result IOReturn code - - @discussion - handlAddChannelSwap() replaces the primary instance variables - with buffers allocated in handlePrepareSwap(). It copies the the - existing data into the appropriate portion of the new buffers. - Because it is specific to adding one channel, it assumes that the - target number of channels is one greater than the current value - of _nChannels. - - IOReporter::handleAddChannelSwap() increments _nElements and - _nChannels. To ensure that these variables describe the current - buffers throughout ::handle*Swap(), subclasses overriding this - method should call super::handleAddChannelSwap() after swapping - their own instance variables. - - If returning an error, all implementations should leave their - instance variables as they found them (*unswapped*). That ensures - handleSwapCleanup() cleans up the unused buffers regardless of - whether the swap was complete. - - Pseudo-code incorporating these suggestions: - res = ; swapComplete = false; - if () goto finish - tmpBuf = _primaryBuf; _primaryBuf = _swapBuf; _swapBuf = _primaryBuf; - ... - swapComplete = true; - res = super::handle*Swap() - ... - finish: - if (res && swapComplete) // unswap - - Locking: The caller must ensure that BOTH the configuration and - reporter (data) locks are HELD. -*/ - virtual IOReturn handleAddChannelSwap(uint64_t channel_id, - const OSSymbol *symChannelName); + * @abstract update primary instance variables with new buffers + * + * @param channel_id ID of channel being added + * @param symChannelName optional channel name, in an allocated object + * @result IOReturn code + * + * @discussion + * handlAddChannelSwap() replaces the primary instance variables + * with buffers allocated in handlePrepareSwap(). It copies the the + * existing data into the appropriate portion of the new buffers. + * Because it is specific to adding one channel, it assumes that the + * target number of channels is one greater than the current value + * of _nChannels. + * + * IOReporter::handleAddChannelSwap() increments _nElements and + * _nChannels. To ensure that these variables describe the current + * buffers throughout ::handle*Swap(), subclasses overriding this + * method should call super::handleAddChannelSwap() after swapping + * their own instance variables. + * + * If returning an error, all implementations should leave their + * instance variables as they found them (*unswapped*). That ensures + * handleSwapCleanup() cleans up the unused buffers regardless of + * whether the swap was complete. + * + * Pseudo-code incorporating these suggestions: + * res = ; swapComplete = false; + * if () goto finish + * tmpBuf = _primaryBuf; _primaryBuf = _swapBuf; _swapBuf = _primaryBuf; + * ... + * swapComplete = true; + * res = super::handle*Swap() + * ... + * finish: + * if (res && swapComplete) // unswap + * + * Locking: The caller must ensure that BOTH the configuration and + * reporter (data) locks are HELD. + */ + virtual IOReturn handleAddChannelSwap(uint64_t channel_id, + const OSSymbol *symChannelName); /*! @function IOReporter::handleSwapCleanup - @abstract release and forget unused buffers - - @param swapNChannels channel-relative size of the _swap buffers - - @discussion - ::handleSwapCleanup() is responsible for deallocating the buffers - no longer used after a swap. It must always be called if - SwapPrepare() completes successfully. Because bufers may be - swapped in and out of existance, the _swap* variables may be - NULL and should be set to NULL when complete. - - Locking: The caller must ensure that the *config* lock is HELD but - that the reporter (data) lock is *NOT HELD*. -*/ - virtual void handleSwapCleanup(int swapNChannels); + * @abstract release and forget unused buffers + * + * @param swapNChannels channel-relative size of the _swap buffers + * + * @discussion + * ::handleSwapCleanup() is responsible for deallocating the buffers + * no longer used after a swap. It must always be called if + * SwapPrepare() completes successfully. Because bufers may be + * swapped in and out of existance, the _swap* variables may be + * NULL and should be set to NULL when complete. + * + * Locking: The caller must ensure that the *config* lock is HELD but + * that the reporter (data) lock is *NOT HELD*. + */ + virtual void handleSwapCleanup(int swapNChannels); /*! @function IOReporter::handleConfigureReport - @abstract override vector for IOReporter::configureReport() - [parameters and result should exactly match] - - @discussion - The public base class method takes the reporter lock, calls this - function, and then drops the lock. Subclasses should not call - this function directly. -*/ - virtual IOReturn handleConfigureReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination); + * @abstract override vector for IOReporter::configureReport() + * [parameters and result should exactly match] + * + * @discussion + * The public base class method takes the reporter lock, calls this + * function, and then drops the lock. Subclasses should not call + * this function directly. + */ + virtual IOReturn handleConfigureReport(IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination); /*! @function IOReporter::handleUpdateReport - @abstract override vector for IOReporter::updateReport() - [parameters and result should exactly match] - - @discussion - The public base class method takes the reporter lock, calls this - function, and then drops the lock. Subclasses should not call - this function directly. - - This function may be overriden but the common case should be to - simply update reporter's specific values by overriding - IOReporter::updateChannelValues(). -*/ - virtual IOReturn handleUpdateReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination); + * @abstract override vector for IOReporter::updateReport() + * [parameters and result should exactly match] + * + * @discussion + * The public base class method takes the reporter lock, calls this + * function, and then drops the lock. Subclasses should not call + * this function directly. + * + * This function may be overriden but the common case should be to + * simply update reporter's specific values by overriding + * IOReporter::updateChannelValues(). + */ + virtual IOReturn handleUpdateReport(IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination); /* @function IOReporter::handleCreateLegend - @abstract override vector for IOReporter::createLegend() - [parameters and result should exactly match] - - @discussion - The public base class method takes the reporter lock, calls this - function, and then drops the lock. Subclasses should not call - this function directly. -*/ - virtual IOReportLegendEntry* handleCreateLegend(void); + * @abstract override vector for IOReporter::createLegend() + * [parameters and result should exactly match] + * + * @discussion + * The public base class method takes the reporter lock, calls this + * function, and then drops the lock. Subclasses should not call + * this function directly. + */ + virtual IOReportLegendEntry* handleCreateLegend(void); /*! @function IOReporter::updateChannelValues - @abstract update channel values for IOReporter::updateReport() - - @param channel_index - logical (internal) index of the channel - @result appropriate IOReturn code - - @discussion - Internal reporter method to allow a subclass to update channel - data when updateReport() is called. This routine handles the - common case of a subclass needing to refresh state in response - to IOReporter::updateReport(). It saves the complexity of - parsing the full parameters to IOReporter::updateReport(). - - The IOReporter base class implementation does not do anything - except return success. - - Locking: IOReporter::updateReport() takes the reporter lock, - determines the indices involved, calls this function, and - then proceeds to provide values to the caller. If subclasses - need to call this routine directly, they must ensure that - the reporter (data) lock is held: see - IOReporter::lockReporter(). -*/ - virtual IOReturn updateChannelValues(int channel_index); - - -/*! @function IOReporter::updateReportChannel - @abstract Internal method to extract channel data to a destination - - @param channel_index - offset into internal elements array - @param nElements - incremented by the number of IOReportElements added - @param destination - pointer to the destination buffer - @result IOReturn code - - @discussion - updateReportChannel() is used to extract a single channel's - data to the updateReport() destination. - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - IOReturn updateReportChannel(int channel_index, - int *nElements, - IOBufferMemoryDescriptor *destination); - - -/*! @function IOReporter::setElementValues - @abstract Atomically update a specific member of _elements[]. + * @abstract update channel values for IOReporter::updateReport() + * + * @param channel_index - logical (internal) index of the channel + * @result appropriate IOReturn code + * + * @discussion + * Internal reporter method to allow a subclass to update channel + * data when updateReport() is called. This routine handles the + * common case of a subclass needing to refresh state in response + * to IOReporter::updateReport(). It saves the complexity of + * parsing the full parameters to IOReporter::updateReport(). + * + * The IOReporter base class implementation does not do anything + * except return success. + * + * Locking: IOReporter::updateReport() takes the reporter lock, + * determines the indices involved, calls this function, and + * then proceeds to provide values to the caller. If subclasses + * need to call this routine directly, they must ensure that + * the reporter (data) lock is held: see + * IOReporter::lockReporter(). + */ + virtual IOReturn updateChannelValues(int channel_index); - @param element_index - index of the _element in internal array - @param values - IORepoterElementValues to replace those at _elements[idx] - @param record_time - optional mach_absolute_time to be used for metadata - @result IOReturn code - @discussion - element_index can be obtained from getFirstElementIndex(). If - record_time is not provided, IOReporter::setElementValues() will - fetch the current mach_absolute_time. If the current time is - already known, it is more efficient to pass it along. +/*! @function IOReporter::updateReportChannel + * @abstract Internal method to extract channel data to a destination + * + * @param channel_index - offset into internal elements array + * @param nElements - incremented by the number of IOReportElements added + * @param destination - pointer to the destination buffer + * @result IOReturn code + * + * @discussion + * updateReportChannel() is used to extract a single channel's + * data to the updateReport() destination. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + IOReturn updateReportChannel(int channel_index, + int *nElements, + IOBufferMemoryDescriptor *destination); + - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn setElementValues(int element_index, - IOReportElementValues *values, - uint64_t record_time = 0); +/*! @function IOReporter::setElementValues + * @abstract Atomically update a specific member of _elements[]. + * + * @param element_index - index of the _element in internal array + * @param values - IORepoterElementValues to replace those at _elements[idx] + * @param record_time - optional mach_absolute_time to be used for metadata + * @result IOReturn code + * + * @discussion + * element_index can be obtained from getFirstElementIndex(). If + * record_time is not provided, IOReporter::setElementValues() will + * fetch the current mach_absolute_time. If the current time is + * already known, it is more efficient to pass it along. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn setElementValues(int element_index, + IOReportElementValues *values, + uint64_t record_time = 0); /*! @function IOReporter::getElementValues - @abstract Internal method to directly access the values of an element - - @param element_index - index of the _element in internal array - @result A pointer to the element values requested or NULL on failure - - @discussion Locking: Caller must ensure that the reporter (data) lock is held. - The returned pointer is only valid until unlockReporter() is called. -*/ - virtual const IOReportElementValues* getElementValues(int element_index); - + * @abstract Internal method to directly access the values of an element + * + * @param element_index - index of the _element in internal array + * @result A pointer to the element values requested or NULL on failure + * + * @discussion Locking: Caller must ensure that the reporter (data) lock is held. + * The returned pointer is only valid until unlockReporter() is called. + */ + virtual const IOReportElementValues* getElementValues(int element_index); + /*! @function IOReporter::getFirstElementIndex - @abstract Returns the first element index for a channel - - @param channel_id - ID of the channel - @param element_index - pointer to the returned element_index - @result appropriate IOReturn code - - @discussion - For efficiently and thread-safely reading _elements - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn getFirstElementIndex(uint64_t channel_id, - int *element_index); - + * @abstract Returns the first element index for a channel + * + * @param channel_id - ID of the channel + * @param element_index - pointer to the returned element_index + * @result appropriate IOReturn code + * + * @discussion + * For efficiently and thread-safely reading _elements + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn getFirstElementIndex(uint64_t channel_id, + int *element_index); + /*! @function IOReporter::getChannelIndex - @abstract Returns the index of a channel from internal data structures - - @param channel_id - ID of the channel - @param channel_index - pointer to the returned element_index - @result appropriate IOReturn code - - @discussion - For efficiently and thread-safely reading channels - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn getChannelIndex(uint64_t channel_id, - int *channel_index); - + * @abstract Returns the index of a channel from internal data structures + * + * @param channel_id - ID of the channel + * @param channel_index - pointer to the returned element_index + * @result appropriate IOReturn code + * + * @discussion + * For efficiently and thread-safely reading channels + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn getChannelIndex(uint64_t channel_id, + int *channel_index); + /*! @function IOReporter::getChannelIndices - @abstract Returns the index of a channel and its corresponding - first element index from internal data structure - - @param channel_id - ID of the channel - @param channel_index - pointer to the returned channel_index - @param element_index - pointer to the returned element_index - @result appropriate IOReturn code - - @discussion - For efficiently and thread-safely reading channel elements. - It is commonly useful to get access to both channel and element - indices togther. This convenience method allows sub-classes to - get both indices simultaneously. - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn getChannelIndices(uint64_t channel_id, - int *channel_index, - int *element_index); + * @abstract Returns the index of a channel and its corresponding + * first element index from internal data structure + * + * @param channel_id - ID of the channel + * @param channel_index - pointer to the returned channel_index + * @param element_index - pointer to the returned element_index + * @result appropriate IOReturn code + * + * @discussion + * For efficiently and thread-safely reading channel elements. + * It is commonly useful to get access to both channel and element + * indices togther. This convenience method allows sub-classes to + * get both indices simultaneously. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn getChannelIndices(uint64_t channel_id, + int *channel_index, + int *element_index); /*! @function IOReporter::copyElementValues - @abstract Copies the values of an internal element to *elementValues - - @param element_index - Index of the element to return values from - @param elementValues - For returning the content of element values - @result Returns the content of an element - - @discussion - For efficiently and thread-safely reading _elements. - May need to find the index of the element first. + * @abstract Copies the values of an internal element to *elementValues + * + * @param element_index - Index of the element to return values from + * @param elementValues - For returning the content of element values + * @result Returns the content of an element + * + * @discussion + * For efficiently and thread-safely reading _elements. + * May need to find the index of the element first. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn copyElementValues(int element_index, + IOReportElementValues *elementValues); - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn copyElementValues(int element_index, - IOReportElementValues *elementValues); - // private methods private: /*! @function IOReporter::copyChannelIDs - @abstract return an an OSArray of the reporter's - channel IDs - - @result An OSArray of the repoter's channel ID's as OSNumbers - - @discussion - This method is an internal helper function used to prepare a - legend entry. It encapsulates the channel IDs in OSNumbers and - aggregates them in an OSArray used when building the IOReportLegend - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - OSArray* copyChannelIDs(void); + * @abstract return an an OSArray of the reporter's + * channel IDs + * + * @result An OSArray of the repoter's channel ID's as OSNumbers + * + * @discussion + * This method is an internal helper function used to prepare a + * legend entry. It encapsulates the channel IDs in OSNumbers and + * aggregates them in an OSArray used when building the IOReportLegend + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + OSArray* copyChannelIDs(void); /*! @function IOReporter::legendWith - @abstract Internal method to help create legend entries - - @param channelIDs - OSArray of OSNumber(uint64_t) channels IDs. - @param channelNames - parrallel OSArray of OSSymbol(rich names) - @param channelType - the type of all channels in this legend - @param unit - The unit for the quantity recorded by this reporter object - - @result An IOReportLegendEntry object or NULL on failure - - @discussion - This static method is the main legend creation function. It is called by - IOReporter sub-classes and is responsible for building an - IOReportLegendEntry corresponding to this reporter object. - This legend entry may be extended by the sub-class of IOReporter if - required. - - Locking: SAFE to call concurrently (no static globals), MAY BLOCK -*/ - static IOReportLegendEntry* legendWith(OSArray *channelIDs, - OSArray *channelNames, - IOReportChannelType channelType, - IOReportUnit unit); + * @abstract Internal method to help create legend entries + * + * @param channelIDs - OSArray of OSNumber(uint64_t) channels IDs. + * @param channelNames - parrallel OSArray of OSSymbol(rich names) + * @param channelType - the type of all channels in this legend + * @param unit - The unit for the quantity recorded by this reporter object + * + * @result An IOReportLegendEntry object or NULL on failure + * + * @discussion + * This static method is the main legend creation function. It is called by + * IOReporter sub-classes and is responsible for building an + * IOReportLegendEntry corresponding to this reporter object. + * This legend entry may be extended by the sub-class of IOReporter if + * required. + * + * Locking: SAFE to call concurrently (no static globals), MAY BLOCK + */ + static IOReportLegendEntry* legendWith(OSArray *channelIDs, + OSArray *channelNames, + IOReportChannelType channelType, + IOReportUnit unit); // protected instance variables (want to get rid of these) protected: - IOReportChannelType _channelType; - uint64_t _driver_id; // driver reporting data - - // IOHistogramReporter accesses these; need to re-do its instantiation - IOReportElement *_elements; - int *_enableCounts; // refcount kIOReportEnable/Disable - uint16_t _channelDimension; // Max channel size - int _nElements; - int _nChannels; // Total Channels in this reporter - OSArray *_channelNames; - - // MUST be protected because check is a macro! - bool _reporterIsLocked; - bool _reporterConfigIsLocked; - - // Required for swapping inside addChannel - IOReportElement *_swapElements; - int *_swapEnableCounts; + IOReportChannelType _channelType; + uint64_t _driver_id; // driver reporting data + +// IOHistogramReporter accesses these; need to re-do its instantiation + IOReportElement *_elements; + int *_enableCounts; // refcount kIOReportEnable/Disable + uint16_t _channelDimension;// Max channel size + int _nElements; + int _nChannels; // Total Channels in this reporter + OSArray *_channelNames; + +// MUST be protected because check is a macro! + bool _reporterIsLocked; + bool _reporterConfigIsLocked; + +// Required for swapping inside addChannel + IOReportElement *_swapElements; + int *_swapEnableCounts; // private instance variables private: - IOReportUnit _unit; + IOReportUnit _unit; - int _enabled; // 'enabled' if _enabled > 0 + int _enabled;// 'enabled' if _enabled > 0 - IOLock *_configLock; - IOInterruptState _interruptState; - IOSimpleLock *_reporterLock; - + IOLock *_configLock; + IOInterruptState _interruptState; + IOSimpleLock *_reporterLock; }; @@ -777,812 +776,812 @@ private: /************************************/ /*! - @class IOSimpleReporter - @abstract Report simple integers - @discussion - Each IOSimpleReporter can have an arbitrary number of channels, - each publishing a single integer value at any given time. -*/ + * @class IOSimpleReporter + * @abstract Report simple integers + * @discussion + * Each IOSimpleReporter can have an arbitrary number of channels, + * each publishing a single integer value at any given time. + */ class IOSimpleReporter : public IOReporter { - OSDeclareDefaultStructors(IOSimpleReporter); - + OSDeclareDefaultStructors(IOSimpleReporter); + public: /*! @function IOSimpleReporter::with - @abstract create an initialized simple reporter - - @param reportingService - IOService associated with all channels - @param categories - The category in which the report should be classified - @param unit - The unit for the quantity recorded by the reporter object - @result On success, an instance of IOSimpleReporter, else NULL - - @discussion - Creates an instance of IOSimpleReporter object - - Locking: SAFE to call concurrently (no static globals), MAY BLOCK. -*/ - static IOSimpleReporter* with(IOService *reportingService, - IOReportCategories categories, - IOReportUnit unit); - -/*! @function IOSimpleReporter::setValue - @abstract Thread safely set a channel's value - - @param channel_id - ID of the channel for which the value needs to be set - @param value - New channel value - @result Appropriate IOReturn code + * @abstract create an initialized simple reporter + * + * @param reportingService - IOService associated with all channels + * @param categories - The category in which the report should be classified + * @param unit - The unit for the quantity recorded by the reporter object + * @result On success, an instance of IOSimpleReporter, else NULL + * + * @discussion + * Creates an instance of IOSimpleReporter object + * + * Locking: SAFE to call concurrently (no static globals), MAY BLOCK. + */ + static IOSimpleReporter* with(IOService *reportingService, + IOReportCategories categories, + IOReportUnit unit); - @discussion - Updates the value of a channel to the provided value. +/*! @function IOSimpleReporter::setValue + * @abstract Thread safely set a channel's value + * + * @param channel_id - ID of the channel for which the value needs to be set + * @param value - New channel value + * @result Appropriate IOReturn code + * + * @discussion + * Updates the value of a channel to the provided value. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setValue(uint64_t channel_id, + int64_t value); - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setValue(uint64_t channel_id, - int64_t value); - /*! @function IOSimpleReporter::incrementValue - @abstract Thread safely increment a channel's value by a given amount - - @param channel_id - ID of the channel for which the value needs to be incremented - @param increment - Amount to be added to the current channel value - @result Appropriate IOReturn code - @discussion - Increments the value of the channel ID by the provided amount. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn incrementValue(uint64_t channel_id, - int64_t increment); - -/*! @function IOSimpleReporter::getValue - @abstract Thread safely access a channel value + * @abstract Thread safely increment a channel's value by a given amount + * + * @param channel_id - ID of the channel for which the value needs to be incremented + * @param increment - Amount to be added to the current channel value + * @result Appropriate IOReturn code + * @discussion + * Increments the value of the channel ID by the provided amount. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn incrementValue(uint64_t channel_id, + int64_t increment); - @param channel_id - ID of the channel to get a value from - @result Returns the current value stored in the channel - @discussion - Accessor method to a channel's current stored value +/*! @function IOSimpleReporter::getValue + * @abstract Thread safely access a channel value + * + * @param channel_id - ID of the channel to get a value from + * @result Returns the current value stored in the channel + * @discussion + * Accessor method to a channel's current stored value + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + int64_t getValue(uint64_t channel_id); - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - int64_t getValue(uint64_t channel_id); - protected: - -/*! @function IOSimpleReporter::initWith - @abstract instance method implementation called by IOSimpleReporter::with - @discussion - See description of parameters above +/*! @function IOSimpleReporter::initWith + * @abstract instance method implementation called by IOSimpleReporter::with + * + * @discussion + * See description of parameters above + * + * Locking: same-instance concurrency UNSAFE + */ + virtual bool initWith(IOService *reportingService, + IOReportCategories categories, + IOReportUnit unit); - Locking: same-instance concurrency UNSAFE -*/ - virtual bool initWith(IOService *reportingService, - IOReportCategories categories, - IOReportUnit unit); - private: - }; -/*! - @class IOStateReporter - @abstract Report state machine data - @discussion - Each IOStateReporter can report information for an arbitrary number - of similar state machines. All must have the same number of states. -*/ +/*! + * @class IOStateReporter + * @abstract Report state machine data + * @discussion + * Each IOStateReporter can report information for an arbitrary number + * of similar state machines. All must have the same number of states. + */ class IOStateReporter : public IOReporter { - OSDeclareDefaultStructors(IOStateReporter); - + OSDeclareDefaultStructors(IOStateReporter); + public: - + /*! @function IOStateReporter::with - @abstract State reporter static creation method - - @param reportingService - The I/O Kit service for this reporter's channels - @param categories - The categories for this reporter's channels - @param nstates - Maximum number of states for this reporter's channels - @param unit - optional parameter if using override/increment...() - @result on success, an IOStateReporter instance, else NULL - - @discussion - Creates an instance of IOStateReporter. The default time scale - is the current system's notion of mach_absolute_time(). Using a - non-default time scale requires the use of - override/incrementChannelState() instead of setState(). - setState() always updates using mach_absolute_time(). - - Locking: SAFE to call concurrently (no static globals), MAY BLOCK -*/ - static IOStateReporter* with(IOService *reportingService, - IOReportCategories categories, - int nstates, - IOReportUnit unit = kIOReportUnitHWTicks); - + * @abstract State reporter static creation method + * + * @param reportingService - The I/O Kit service for this reporter's channels + * @param categories - The categories for this reporter's channels + * @param nstates - Maximum number of states for this reporter's channels + * @param unit - optional parameter if using override/increment...() + * @result on success, an IOStateReporter instance, else NULL + * + * @discussion + * Creates an instance of IOStateReporter. The default time scale + * is the current system's notion of mach_absolute_time(). Using a + * non-default time scale requires the use of + * override/incrementChannelState() instead of setState(). + * setState() always updates using mach_absolute_time(). + * + * Locking: SAFE to call concurrently (no static globals), MAY BLOCK + */ + static IOStateReporter* with(IOService *reportingService, + IOReportCategories categories, + int nstates, + IOReportUnit unit = kIOReportUnitHWTicks); + /*! @function IOStateReporter::setStateID - @abstract Assign a non-default ID to a state - - @param channel_id - ID of channel containing the state in question - @param state_index - index of state to give an ID: [0..(nstates-1)] - @param state_id - 64-bit state ID, for ASCII, use IOREPORT_MAKEID - - @result Appropriate IOReturn code - - @discussion - By default, IOStateReporter identifies its channel states by - numbering them from 0 to . If setStateID is not - called to customize the state IDs, the numbered states will be - kept throughout the life of the object and it is safe to reference - those states by their indices. Otherwise, after setStateID() has - been called, the ordering of states is no longer guaranteed and - the client must reference states by their assigned state ID. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setStateID(uint64_t channel_id, - int state_index, - uint64_t state_id); + * @abstract Assign a non-default ID to a state + * + * @param channel_id - ID of channel containing the state in question + * @param state_index - index of state to give an ID: [0..(nstates-1)] + * @param state_id - 64-bit state ID, for ASCII, use IOREPORT_MAKEID + * + * @result Appropriate IOReturn code + * + * @discussion + * By default, IOStateReporter identifies its channel states by + * numbering them from 0 to . If setStateID is not + * called to customize the state IDs, the numbered states will be + * kept throughout the life of the object and it is safe to reference + * those states by their indices. Otherwise, after setStateID() has + * been called, the ordering of states is no longer guaranteed and + * the client must reference states by their assigned state ID. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setStateID(uint64_t channel_id, + int state_index, + uint64_t state_id); /*! @function IOStateReporter::setChannelState - @abstract Updates the current state of a channel to a new state - - @param channel_id - ID of the channel which is updated to a new state - @param new_state_id - ID of the target state for this channel - @param last_intransition - deprecated: time of most recent entry - @param prev_state_residency - deprecated: time spent in previous state - @result Appropriate IOReturn code - - @discussion - setChannelState() updates the amount of time spent in the previous - state (if any) and increments the number of transitions into the - new state. It also sets the target state's last transition time to - the current time and enables internal time-keeping for the channel. - In this mode, calls like getStateResidencyTime() and updateReport() - automatically update a channel's time in state. - - new_state_id identifies the target state as initialized - (0..) or as configured by setStateID(). - - Drivers wishing to compute and report their own time in state - should use incrementChannelState() or overrideChannelState(). It - is not currently possible for a driver to synchronize with the - automatic time-keeping enabled by setChannelState(). The - 4-argument version of setChannelState() is thus impossible to - use correctly. In the future, there may be a setChannelState() - which accepts a last_intransition parameter and uses it to - automatically calculate time in state (ERs -> IOReporting / X). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setChannelState(uint64_t channel_id, - uint64_t new_state_id, - uint64_t last_intransition, - uint64_t prev_state_residency) __deprecated; + * @abstract Updates the current state of a channel to a new state + * + * @param channel_id - ID of the channel which is updated to a new state + * @param new_state_id - ID of the target state for this channel + * @param last_intransition - deprecated: time of most recent entry + * @param prev_state_residency - deprecated: time spent in previous state + * @result Appropriate IOReturn code + * + * @discussion + * setChannelState() updates the amount of time spent in the previous + * state (if any) and increments the number of transitions into the + * new state. It also sets the target state's last transition time to + * the current time and enables internal time-keeping for the channel. + * In this mode, calls like getStateResidencyTime() and updateReport() + * automatically update a channel's time in state. + * + * new_state_id identifies the target state as initialized + * (0..) or as configured by setStateID(). + * + * Drivers wishing to compute and report their own time in state + * should use incrementChannelState() or overrideChannelState(). It + * is not currently possible for a driver to synchronize with the + * automatic time-keeping enabled by setChannelState(). The + * 4-argument version of setChannelState() is thus impossible to + * use correctly. In the future, there may be a setChannelState() + * which accepts a last_intransition parameter and uses it to + * automatically calculate time in state (ERs -> IOReporting / X). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setChannelState(uint64_t channel_id, + uint64_t new_state_id, + uint64_t last_intransition, + uint64_t prev_state_residency) __deprecated; /*! @function IOStateReporter::setChannelState - @abstract Updates the current state of a channel to a new state - - @param channel_id - ID of the channel which is updated to a new state - @param new_state_id - ID of the target state for this channel - @result Appropriate IOReturn code - - @discussion - setChannelState() updates the amount of time spent in the previous - state (if any) and increments the number of transitions into the - new state. It also sets the target state's last transition time to - the current time and enables internal time-keeping for the channel. - In this mode, calls like getStateResidencyTime() and updateReport() - automatically update a channel's time in state. - - new_state_id identifies the target state as initialized - (0..) or as configured by setStateID(). - - Drivers wishing to compute and report their own time in state - should use incrementChannelState() or overrideChannelState(). It - is not currently possible for a driver to synchronize with the - automatic time-keeping enabled by setChannelState(). The - 4-argument version of setChannelState() is thus impossible to - use correctly. In the future, there may be a setChannelState() - which accepts a last_intransition parameter and uses it to - automatically calculate time in state (ERs -> IOReporting / X). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setChannelState(uint64_t channel_id, - uint64_t new_state_id); + * @abstract Updates the current state of a channel to a new state + * + * @param channel_id - ID of the channel which is updated to a new state + * @param new_state_id - ID of the target state for this channel + * @result Appropriate IOReturn code + * + * @discussion + * setChannelState() updates the amount of time spent in the previous + * state (if any) and increments the number of transitions into the + * new state. It also sets the target state's last transition time to + * the current time and enables internal time-keeping for the channel. + * In this mode, calls like getStateResidencyTime() and updateReport() + * automatically update a channel's time in state. + * + * new_state_id identifies the target state as initialized + * (0..) or as configured by setStateID(). + * + * Drivers wishing to compute and report their own time in state + * should use incrementChannelState() or overrideChannelState(). It + * is not currently possible for a driver to synchronize with the + * automatic time-keeping enabled by setChannelState(). The + * 4-argument version of setChannelState() is thus impossible to + * use correctly. In the future, there may be a setChannelState() + * which accepts a last_intransition parameter and uses it to + * automatically calculate time in state (ERs -> IOReporting / X). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setChannelState(uint64_t channel_id, + uint64_t new_state_id); /*! @function IOStateReporter::setState - @abstract Updates state for single channel reporters - - @param new_state_id - New state for the channel - @result Appropriate IOReturn code. - - @discussion - setState() is a convenience method for single-channel state - reporter instances. An error will be returned if the reporter - in question has more than one channel. - - See further discussion at setChannelState(). + * @abstract Updates state for single channel reporters + * + * @param new_state_id - New state for the channel + * @result Appropriate IOReturn code. + * + * @discussion + * setState() is a convenience method for single-channel state + * reporter instances. An error will be returned if the reporter + * in question has more than one channel. + * + * See further discussion at setChannelState(). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setState(uint64_t new_state_id); - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setState(uint64_t new_state_id); - /*! @function IOStateReporter::setState - @abstract Updates state for single channel reporters - - @param new_state_id - New state for the channel - @param last_intransition - deprecated: time of most recent entry - @param prev_state_residency - deprecated: spent in previous state - @result Appropriate IOReturn code. - - @discussion - setState() is a convenience method for single-channel state - reporter instances. An error will be returned if the reporter - in question has more than one channel. - - See further discussion at setChannelState(). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setState(uint64_t new_state_id, - uint64_t last_intransition, - uint64_t prev_state_residency) __deprecated; + * @abstract Updates state for single channel reporters + * + * @param new_state_id - New state for the channel + * @param last_intransition - deprecated: time of most recent entry + * @param prev_state_residency - deprecated: spent in previous state + * @result Appropriate IOReturn code. + * + * @discussion + * setState() is a convenience method for single-channel state + * reporter instances. An error will be returned if the reporter + * in question has more than one channel. + * + * See further discussion at setChannelState(). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setState(uint64_t new_state_id, + uint64_t last_intransition, + uint64_t prev_state_residency) __deprecated; /*! @function IOStateReporter::overrideChannelState - @abstract Overrides state data for a channel with passed arguments - - @param channel_id - ID of the channel which state is to be updated - @param state_id - state id for the channel - @param time_in_state - time used as new total time in state - @param intransitions - total number of transitions into state - @param last_intransition - mach_absolute_time of most recent entry (opt) - @result Appropriate IOReturn code - - @discussion - overrideChannelState() sets a particular state's time in state - and transition count to the values provided. The optional - last_intransition records the last time the channel transitioned - into the given state. Passing 0 for time_in_state and - intransitions will force the current values to 0. Passing 0 - for last_intransition for all states will disable the notion - of a channel's "current state." - - The most recent last_intransition (amongst all states in a channel) - logically determines the current state. If last_intransition is - not provided for any state, the channel will not report a current - For consistent results, it is important to either never specify - last_intransition or to always specify it. - - There is currently a bug in determining current state (13423273). - The IOReportMacros.h macros only update the state's metadata - timestamp and libIOReport only looks at the metadata timestamps - to determine the current state. Until that bug is fixed, whichever - state is updated most recently will be considered the "current" - state by libIOReport. - - ::setState()'s automatic "time in state" updates are not supported - when using overrideChannelState(). Clients must not use - overrideChannelState() on any channel that has ::setState() called - on it. Unlike with ::setState(), clients using - overrideChannelState() are responsible for ensuring that data is - up to date for updateReport() calls. The correct way to do this - is for a driver's ::updateReport() method to push the most up to - date values into the reporters before calling - super::updateReport(). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn overrideChannelState(uint64_t channel_id, - uint64_t state_id, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition = 0); + * @abstract Overrides state data for a channel with passed arguments + * + * @param channel_id - ID of the channel which state is to be updated + * @param state_id - state id for the channel + * @param time_in_state - time used as new total time in state + * @param intransitions - total number of transitions into state + * @param last_intransition - mach_absolute_time of most recent entry (opt) + * @result Appropriate IOReturn code + * + * @discussion + * overrideChannelState() sets a particular state's time in state + * and transition count to the values provided. The optional + * last_intransition records the last time the channel transitioned + * into the given state. Passing 0 for time_in_state and + * intransitions will force the current values to 0. Passing 0 + * for last_intransition for all states will disable the notion + * of a channel's "current state." + * + * The most recent last_intransition (amongst all states in a channel) + * logically determines the current state. If last_intransition is + * not provided for any state, the channel will not report a current + * For consistent results, it is important to either never specify + * last_intransition or to always specify it. + * + * There is currently a bug in determining current state (13423273). + * The IOReportMacros.h macros only update the state's metadata + * timestamp and libIOReport only looks at the metadata timestamps + * to determine the current state. Until that bug is fixed, whichever + * state is updated most recently will be considered the "current" + * state by libIOReport. + * + * ::setState()'s automatic "time in state" updates are not supported + * when using overrideChannelState(). Clients must not use + * overrideChannelState() on any channel that has ::setState() called + * on it. Unlike with ::setState(), clients using + * overrideChannelState() are responsible for ensuring that data is + * up to date for updateReport() calls. The correct way to do this + * is for a driver's ::updateReport() method to push the most up to + * date values into the reporters before calling + * super::updateReport(). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn overrideChannelState(uint64_t channel_id, + uint64_t state_id, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition = 0); /*! @function IOStateReporter::incrementChannelState - @abstract Updates state data for a channel with passed arguments - - @param channel_id - ID of the channel which state is to be updated - @param state_id - state id for the channel - @param time_in_state - time to be accumulated for time in state - @param intransitions - number of transitions into state to be added - @param last_intransition - mach_absolute_time of most recent entry (opt) - @result Appropriate IOReturn code - - @discussion - incrementChannelState() adds time_in_state and intransitions - to the current values stored for a particular state. If provided, - last_intransition overwrites the time the state was most recently - entered. Passing 0 for time_in_state and intransitions will have - no effect. Passing 0 for last_intransition for all states will - disable the notion of a channel's "current state." - - The most recent last_intransition (amongst all states in a channel) - logically determines the current state. If last_intransition is - not provided for any state, the channel will not report a current - For consistent results, it is important to either never specify - last_intransition or to always specify it. - - There is currently a bug in determining current state (13423273). - The IOReportMacros.h macros only update the state's metadata - timestamp and libIOReport only looks at the metadata timestamps - to determine the current state. Until that bug is fixed, whichever - state is updated most recently will be considered the "current" - state by libIOReport. - - ::setState()'s automatic "time in state" updates are not supported - when using incrementChannelState(). Clients must not use - incrementChannelState() on any channel that has ::setState() - called on it. Unlike with ::setState(), clients using - incrementChannelState() are responsible for ensuring that data - is up to date for updateReport() calls. The correct way to do - this is for a driver's ::updateReport() method to push the most - up to date values into the reporters before calling - super::updateReport(). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn incrementChannelState(uint64_t channel_id, - uint64_t state_id, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition = 0); - + * @abstract Updates state data for a channel with passed arguments + * + * @param channel_id - ID of the channel which state is to be updated + * @param state_id - state id for the channel + * @param time_in_state - time to be accumulated for time in state + * @param intransitions - number of transitions into state to be added + * @param last_intransition - mach_absolute_time of most recent entry (opt) + * @result Appropriate IOReturn code + * + * @discussion + * incrementChannelState() adds time_in_state and intransitions + * to the current values stored for a particular state. If provided, + * last_intransition overwrites the time the state was most recently + * entered. Passing 0 for time_in_state and intransitions will have + * no effect. Passing 0 for last_intransition for all states will + * disable the notion of a channel's "current state." + * + * The most recent last_intransition (amongst all states in a channel) + * logically determines the current state. If last_intransition is + * not provided for any state, the channel will not report a current + * For consistent results, it is important to either never specify + * last_intransition or to always specify it. + * + * There is currently a bug in determining current state (13423273). + * The IOReportMacros.h macros only update the state's metadata + * timestamp and libIOReport only looks at the metadata timestamps + * to determine the current state. Until that bug is fixed, whichever + * state is updated most recently will be considered the "current" + * state by libIOReport. + * + * ::setState()'s automatic "time in state" updates are not supported + * when using incrementChannelState(). Clients must not use + * incrementChannelState() on any channel that has ::setState() + * called on it. Unlike with ::setState(), clients using + * incrementChannelState() are responsible for ensuring that data + * is up to date for updateReport() calls. The correct way to do + * this is for a driver's ::updateReport() method to push the most + * up to date values into the reporters before calling + * super::updateReport(). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn incrementChannelState(uint64_t channel_id, + uint64_t state_id, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition = 0); + /*! @function IOStateReporter::setStateByIndices - @abstract update a channel state without validating channel_id - - @param channel_index - 0.., available from getChannelIndex() - @param new_state_index - New state (by index) for the channel - @result Appropriate IOReturn code - - @discussion - Similar to setState(), setStateByIndices() sets a channel's state - without searching for the channel or state IDs. It will perform - bounds checking, but relies on the caller to properly indicate - the indices of the channel and state. Clients can rely on channels - being added to IOStateReporter in order: the first channel will - have index 0, the second index 1, etc. Like ::setState(), - "time in state" calculations are handled automatically. - - setStateByIndices() is faster than than setChannelState(), but - it should only be used where the latter's performance overhead - might be a problem. For example, many channels in a single - reporter and high-frequency state changes. - - Drivers wishing to compute and report their own time in state - should use incrementChannelState() or overrideChannelState(). It - is not currently possible for a driver to synchronize with the - automatic time-keeping enabled by setStateByIndices(). The - 4-argument version of setChannelState() is thus impossible to - use correctly. In the future, there may be a setChannelState() - which accepts a last_intransition parameter and uses it to - automatically calculate time in state (ERs -> IOReporting / X). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setStateByIndices(int channel_index, - int new_state_index); - + * @abstract update a channel state without validating channel_id + * + * @param channel_index - 0.., available from getChannelIndex() + * @param new_state_index - New state (by index) for the channel + * @result Appropriate IOReturn code + * + * @discussion + * Similar to setState(), setStateByIndices() sets a channel's state + * without searching for the channel or state IDs. It will perform + * bounds checking, but relies on the caller to properly indicate + * the indices of the channel and state. Clients can rely on channels + * being added to IOStateReporter in order: the first channel will + * have index 0, the second index 1, etc. Like ::setState(), + * "time in state" calculations are handled automatically. + * + * setStateByIndices() is faster than than setChannelState(), but + * it should only be used where the latter's performance overhead + * might be a problem. For example, many channels in a single + * reporter and high-frequency state changes. + * + * Drivers wishing to compute and report their own time in state + * should use incrementChannelState() or overrideChannelState(). It + * is not currently possible for a driver to synchronize with the + * automatic time-keeping enabled by setStateByIndices(). The + * 4-argument version of setChannelState() is thus impossible to + * use correctly. In the future, there may be a setChannelState() + * which accepts a last_intransition parameter and uses it to + * automatically calculate time in state (ERs -> IOReporting / X). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setStateByIndices(int channel_index, + int new_state_index); + /*! @function IOStateReporter::setStateByIndices - @abstract update a channel state without validating channel_id - - @param channel_index - 0.., available from getChannelIndex() - @param new_state_index - New state (by index) for the channel - @param last_intransition - deprecated: time of most recent entry - @param prev_state_residency - deprecated: time spent in previous state - @result Appropriate IOReturn code - - @discussion - Similar to setState(), setStateByIndices() sets a channel's state - without searching for the channel or state IDs. It will perform - bounds checking, but relies on the caller to properly indicate - the indices of the channel and state. Clients can rely on channels - being added to IOStateReporter in order: the first channel will - have index 0, the second index 1, etc. Like ::setState(), - "time in state" calculations are handled automatically. - - setStateByIndices() is faster than than setChannelState(), but - it should only be used where the latter's performance overhead - might be a problem. For example, many channels in a single - reporter and high-frequency state changes. - - Drivers wishing to compute and report their own time in state - should use incrementChannelState() or overrideChannelState(). It - is not currently possible for a driver to synchronize with the - automatic time-keeping enabled by setStateByIndices(). The - 4-argument version of setChannelState() is thus impossible to - use correctly. In the future, there may be a setChannelState() - which accepts a last_intransition parameter and uses it to - automatically calculate time in state (ERs -> IOReporting / X). - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - IOReturn setStateByIndices(int channel_index, - int new_state_index, - uint64_t last_intransition, - uint64_t prev_state_residency) __deprecated; - + * @abstract update a channel state without validating channel_id + * + * @param channel_index - 0.., available from getChannelIndex() + * @param new_state_index - New state (by index) for the channel + * @param last_intransition - deprecated: time of most recent entry + * @param prev_state_residency - deprecated: time spent in previous state + * @result Appropriate IOReturn code + * + * @discussion + * Similar to setState(), setStateByIndices() sets a channel's state + * without searching for the channel or state IDs. It will perform + * bounds checking, but relies on the caller to properly indicate + * the indices of the channel and state. Clients can rely on channels + * being added to IOStateReporter in order: the first channel will + * have index 0, the second index 1, etc. Like ::setState(), + * "time in state" calculations are handled automatically. + * + * setStateByIndices() is faster than than setChannelState(), but + * it should only be used where the latter's performance overhead + * might be a problem. For example, many channels in a single + * reporter and high-frequency state changes. + * + * Drivers wishing to compute and report their own time in state + * should use incrementChannelState() or overrideChannelState(). It + * is not currently possible for a driver to synchronize with the + * automatic time-keeping enabled by setStateByIndices(). The + * 4-argument version of setChannelState() is thus impossible to + * use correctly. In the future, there may be a setChannelState() + * which accepts a last_intransition parameter and uses it to + * automatically calculate time in state (ERs -> IOReporting / X). + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + IOReturn setStateByIndices(int channel_index, + int new_state_index, + uint64_t last_intransition, + uint64_t prev_state_residency) __deprecated; + /*! @function IOStateReporter::getStateInTransitions - @abstract Accessor method for count of transitions into state - - @param channel_id - ID of the channel - @param state_id - State of the channel - @result Count of transitions into the requested state. - - @discussion - Some clients may need to consume internally the data aggregated by the - reporter object. This method allows a client to retrieve the count of - transitions into the requested state for the channel_id. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - uint64_t getStateInTransitions(uint64_t channel_id, - uint64_t state_id); + * @abstract Accessor method for count of transitions into state + * + * @param channel_id - ID of the channel + * @param state_id - State of the channel + * @result Count of transitions into the requested state. + * + * @discussion + * Some clients may need to consume internally the data aggregated by the + * reporter object. This method allows a client to retrieve the count of + * transitions into the requested state for the channel_id. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + uint64_t getStateInTransitions(uint64_t channel_id, + uint64_t state_id); /*! @function IOStateReporter::getStateResidencyTime - @abstract Accessor method for time spent in a given state - - @param channel_id - ID of the channel - @param state_id - State of the channel - @result Absolute time spent in specified state - - @discussion - Some clients may need to consume internally the data aggregated - by the by the reporter object. This method allows a client to - retrieve the absolute time a particular channel recorded as spent - in a specified state. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - uint64_t getStateResidencyTime(uint64_t channel_id, - uint64_t state_id); - + * @abstract Accessor method for time spent in a given state + * + * @param channel_id - ID of the channel + * @param state_id - State of the channel + * @result Absolute time spent in specified state + * + * @discussion + * Some clients may need to consume internally the data aggregated + * by the by the reporter object. This method allows a client to + * retrieve the absolute time a particular channel recorded as spent + * in a specified state. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + uint64_t getStateResidencyTime(uint64_t channel_id, + uint64_t state_id); + /*! @function IOStateReporter::getStateLastTransitionTime - @abstract Accessor method for last time a transition occured - - @param channel_id - ID of the channel - @param state_id - State of the channel - @result Absolute time for when the last transition occured - - @discussion - Some clients may need to consume internally the data aggregated - by the by the reporter object. This method allows a client to - retrieve the absolute time stamp for when the last transition into - a specific state was recorded. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - uint64_t getStateLastTransitionTime(uint64_t channel_id, uint64_t state_id); - + * @abstract Accessor method for last time a transition occured + * + * @param channel_id - ID of the channel + * @param state_id - State of the channel + * @result Absolute time for when the last transition occured + * + * @discussion + * Some clients may need to consume internally the data aggregated + * by the by the reporter object. This method allows a client to + * retrieve the absolute time stamp for when the last transition into + * a specific state was recorded. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + uint64_t getStateLastTransitionTime(uint64_t channel_id, uint64_t state_id); + /*! @function [DEPRECATED] IOStateReporter::getStateLastChannelUpdateTime - @abstract Deprecated accessor for last time a channel was auto-updated - - @param channel_id - ID of the channel - @result Absolute time for last time the channel was updated - - @discussion - If a channel has had ::setState() called on it, calls such as - getStateResidencyTime() or updateReport() will update time in the - current state and update an internal "last channel update time." - Because clients have no way to interlock with those methods, there - is no sensible way to use this method and it will be removed in - a future release. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - uint64_t getStateLastChannelUpdateTime(uint64_t channel_id) __deprecated; - + * @abstract Deprecated accessor for last time a channel was auto-updated + * + * @param channel_id - ID of the channel + * @result Absolute time for last time the channel was updated + * + * @discussion + * If a channel has had ::setState() called on it, calls such as + * getStateResidencyTime() or updateReport() will update time in the + * current state and update an internal "last channel update time." + * Because clients have no way to interlock with those methods, there + * is no sensible way to use this method and it will be removed in + * a future release. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + uint64_t getStateLastChannelUpdateTime(uint64_t channel_id) __deprecated; + /*! @function IOStateReporter::free - @abstract Releases the object and all its resources. - - @discussion - ::free() assumes that init() has completed. Clients should use - the static ::with() methods to obtain fully-initialized reporter - instances. - - Locking: same-instance concurrency UNSAFE -*/ - virtual void free(void) APPLE_KEXT_OVERRIDE; - + * @abstract Releases the object and all its resources. + * + * @discussion + * ::free() assumes that init() has completed. Clients should use + * the static ::with() methods to obtain fully-initialized reporter + * instances. + * + * Locking: same-instance concurrency UNSAFE + */ + virtual void free(void) APPLE_KEXT_OVERRIDE; + protected: /*! @function IOStateReporter::initWith - @abstract Instance method implementation called by ::with - - @discussion - See description of parameters above -*/ - virtual bool initWith(IOService *reportingService, - IOReportCategories categories, - int16_t nstates, IOReportUnit unit); - - + * @abstract Instance method implementation called by ::with + * + * @discussion + * See description of parameters above + */ + virtual bool initWith(IOService *reportingService, + IOReportCategories categories, + int16_t nstates, IOReportUnit unit); + + /*! @function IOStateReporter::handleSwapPrepare - @abstract _swap* = - [see IOReporter::handle*Swap* for more info] -*/ - virtual IOReturn handleSwapPrepare(int newNChannels) APPLE_KEXT_OVERRIDE; + * @abstract _swap* = + * [see IOReporter::handle*Swap* for more info] + */ + virtual IOReturn handleSwapPrepare(int newNChannels) APPLE_KEXT_OVERRIDE; /*! - @function IOStateReporter::handleAddChannelSwap - @abstract swap in IOStateReporter's variables -*/ - virtual IOReturn handleAddChannelSwap(uint64_t channel_id, - const OSSymbol *symChannelName) APPLE_KEXT_OVERRIDE; + * @function IOStateReporter::handleAddChannelSwap + * @abstract swap in IOStateReporter's variables + */ + virtual IOReturn handleAddChannelSwap(uint64_t channel_id, + const OSSymbol *symChannelName) APPLE_KEXT_OVERRIDE; /*! - @function IOStateReporter::handleSwapCleanup - @abstract clean up unused buffers in _swap* -*/ - virtual void handleSwapCleanup(int swapNChannels) APPLE_KEXT_OVERRIDE; - -/*! @function IOStateReporter::updateChannelValues - @abstract Update accounting of time spent in current state - - @param channel_index - internal index of the channel - @result appropriate IOReturn code - - @discussion - Internal State reporter method to account for the time spent in - the current state when updateReport() is called on the reporter's - channels. + * @function IOStateReporter::handleSwapCleanup + * @abstract clean up unused buffers in _swap* + */ + virtual void handleSwapCleanup(int swapNChannels) APPLE_KEXT_OVERRIDE; - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn updateChannelValues(int channel_index) APPLE_KEXT_OVERRIDE; +/*! @function IOStateReporter::updateChannelValues + * @abstract Update accounting of time spent in current state + * + * @param channel_index - internal index of the channel + * @result appropriate IOReturn code + * + * @discussion + * Internal State reporter method to account for the time spent in + * the current state when updateReport() is called on the reporter's + * channels. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn updateChannelValues(int channel_index) APPLE_KEXT_OVERRIDE; /*! @function IOStateReporter::setStateByIndices - @abstract update a channel state without validating channel_id - - @param channel_index - 0.., available from getChannelIndex() - @param new_state_index - New state for the channel - @param last_intransition - to remove: time of most recent entry - @param prev_state_residency - to remove: time spent in previous state - @result Appropriate IOReturn code - - @discussion - Locked version of IOReporter::setStateByIndices(). This method may be - overriden by sub-classes. - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn handleSetStateByIndices(int channel_index, - int new_state_index, - uint64_t last_intransition, - uint64_t prev_state_residency); - + * @abstract update a channel state without validating channel_id + * + * @param channel_index - 0.., available from getChannelIndex() + * @param new_state_index - New state for the channel + * @param last_intransition - to remove: time of most recent entry + * @param prev_state_residency - to remove: time spent in previous state + * @result Appropriate IOReturn code + * + * @discussion + * Locked version of IOReporter::setStateByIndices(). This method may be + * overriden by sub-classes. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn handleSetStateByIndices(int channel_index, + int new_state_index, + uint64_t last_intransition, + uint64_t prev_state_residency); + /*! @function IOStateReporter::setStateID - @abstract Assign a non-default ID to a state - - @param channel_id - ID of channel containing the state in question - @param state_index - index of state to give an ID: [0..(nstates-1)] - @param state_id - 64-bit state ID, for ASCII, use IOREPORT_MAKEID - - @result Appropriate IOReturn code - - @discussion - Locked version of IOReporter::setStateID(). This method may be - overriden by sub-classes - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn handleSetStateID(uint64_t channel_id, - int state_index, - uint64_t state_id); - + * @abstract Assign a non-default ID to a state + * + * @param channel_id - ID of channel containing the state in question + * @param state_index - index of state to give an ID: [0..(nstates-1)] + * @param state_id - 64-bit state ID, for ASCII, use IOREPORT_MAKEID + * + * @result Appropriate IOReturn code + * + * @discussion + * Locked version of IOReporter::setStateID(). This method may be + * overriden by sub-classes + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn handleSetStateID(uint64_t channel_id, + int state_index, + uint64_t state_id); + /*! @function IOStateReporter::handleOverrideChannelStateByIndices - @abstract Overrides state data for a channel with passed arguments - - @param channel_index - index of the channel which state is to be updated - @param state_index - index of the state id for the channel - @param time_in_state - time used as new total time in state - @param intransitions - total number of transitions into state - @param last_intransition - mach_absolute_time of most recent entry (opt) - @result Appropriate IOReturn code - - @discussion - Locked version of IOReporter::overrideChannelState(). This method - may be overriden by sub-classes. - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn handleOverrideChannelStateByIndices(int channel_index, - int state_index, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition = 0); + * @abstract Overrides state data for a channel with passed arguments + * + * @param channel_index - index of the channel which state is to be updated + * @param state_index - index of the state id for the channel + * @param time_in_state - time used as new total time in state + * @param intransitions - total number of transitions into state + * @param last_intransition - mach_absolute_time of most recent entry (opt) + * @result Appropriate IOReturn code + * + * @discussion + * Locked version of IOReporter::overrideChannelState(). This method + * may be overriden by sub-classes. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn handleOverrideChannelStateByIndices(int channel_index, + int state_index, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition = 0); /*! @function IOStateReporter::handleIncrementChannelStateByIndices - @abstract Updates state data for a channel with passed arguments - - @param channel_index - index of the channel which state is to be updated - @param state_index - index of the state id for the channel - @param time_in_state - time used as new total time in state - @param intransitions - total number of transitions into state - @param last_intransition - mach_absolute_time of most recent entry (opt) - @result Appropriate IOReturn code - - @discussion - Locked version of IOReporter::incrementChannelState(). This method - may be overriden by sub-classes. - - Locking: Caller must ensure that the reporter (data) lock is held. -*/ - virtual IOReturn handleIncrementChannelStateByIndices(int channel_index, - int state_index, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition = 0); + * @abstract Updates state data for a channel with passed arguments + * + * @param channel_index - index of the channel which state is to be updated + * @param state_index - index of the state id for the channel + * @param time_in_state - time used as new total time in state + * @param intransitions - total number of transitions into state + * @param last_intransition - mach_absolute_time of most recent entry (opt) + * @result Appropriate IOReturn code + * + * @discussion + * Locked version of IOReporter::incrementChannelState(). This method + * may be overriden by sub-classes. + * + * Locking: Caller must ensure that the reporter (data) lock is held. + */ + virtual IOReturn handleIncrementChannelStateByIndices(int channel_index, + int state_index, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition = 0); private: - int *_currentStates; // current states (per chonnel) - uint64_t *_lastUpdateTimes; // most recent auto-update - - // Required for swapping inside addChannel - int *_swapCurrentStates; - uint64_t *_swapLastUpdateTimes; - -enum valueSelector { - kInTransitions, - kResidencyTime, - kLastTransitionTime -}; - uint64_t _getStateValue(uint64_t channel_id, - uint64_t state_id, - enum valueSelector value); - - IOReturn _getStateIndices(uint64_t channel_id, - uint64_t state_id, - int *channel_index, - int *state_index); - + int *_currentStates; // current states (per chonnel) + uint64_t *_lastUpdateTimes; // most recent auto-update + +// Required for swapping inside addChannel + int *_swapCurrentStates; + uint64_t *_swapLastUpdateTimes; + + enum valueSelector { + kInTransitions, + kResidencyTime, + kLastTransitionTime + }; + uint64_t _getStateValue(uint64_t channel_id, + uint64_t state_id, + enum valueSelector value); + + IOReturn _getStateIndices(uint64_t channel_id, + uint64_t state_id, + int *channel_index, + int *state_index); }; /*! - @class IOHistogramReporter - @abstract Report histograms of values - @discussion - Each IOHistogramReporter can report one histogram representing - how a given value has changed over time. -*/ + * @class IOHistogramReporter + * @abstract Report histograms of values + * @discussion + * Each IOHistogramReporter can report one histogram representing + * how a given value has changed over time. + */ class IOHistogramReporter : public IOReporter { - OSDeclareDefaultStructors(IOHistogramReporter); - + OSDeclareDefaultStructors(IOHistogramReporter); + public: /*! @function IOHistogramReporter::with - @abstract Initializes the IOHistogramReporter instance variables and data structures - - @param reportingService - The I/O Kit service for this reporter's channels - @param categories - The categories in which the report should be classified - @param channelID - uint64_t channel identifier - @param channelName - rich channel name as char* - @param unit - The unit for the quantity recorded by the reporter object - @param nSegments - Number of segments to be extracted from the config data structure - @param config - Histograms require the caller to pass a configuration by segments - @result an instance of the IOSimpleReporter object or NULL on error - - @discussion - Creates an instance of histogram reporter object. - -FIXME: need more explanation of the config - - IOHistogramReporter currently only supports a single channel. - - - */ - static IOHistogramReporter* with(IOService *reportingService, - IOReportCategories categories, - uint64_t channelID, - const char *channelName, - IOReportUnit unit, - int nSegments, - IOHistogramSegmentConfig *config); + * @abstract Initializes the IOHistogramReporter instance variables and data structures + * + * @param reportingService - The I/O Kit service for this reporter's channels + * @param categories - The categories in which the report should be classified + * @param channelID - uint64_t channel identifier + * @param channelName - rich channel name as char* + * @param unit - The unit for the quantity recorded by the reporter object + * @param nSegments - Number of segments to be extracted from the config data structure + * @param config - Histograms require the caller to pass a configuration by segments + * @result an instance of the IOSimpleReporter object or NULL on error + * + * @discussion + * Creates an instance of histogram reporter object. + * + * FIXME: need more explanation of the config + * + * IOHistogramReporter currently only supports a single channel. + * + * + */ + static IOHistogramReporter* with(IOService *reportingService, + IOReportCategories categories, + uint64_t channelID, + const char *channelName, + IOReportUnit unit, + int nSegments, + IOHistogramSegmentConfig *config); /*! @function IOHistogramReporter::addChannel - @abstract Override IOReporter::addChannel(*) to return an error - - @result kIOReturnUnsupported - doesn't support adding channels -*/ - IOReturn addChannel(__unused uint64_t channelID, __unused const char *channelName = NULL) { - return kIOReturnUnsupported; - } + * @abstract Override IOReporter::addChannel(*) to return an error + * + * @result kIOReturnUnsupported - doesn't support adding channels + */ + IOReturn + addChannel(__unused uint64_t channelID, __unused const char *channelName = NULL) + { + return kIOReturnUnsupported; + } /*! @function IOHistogramReporter::overrideBucketValues - @abstract Override values of a bucket at specified index - - @param index - index of bucket to override - @param bucket_hits - new bucket hits count - @param bucket_min - new bucket minimum value - @param bucket_max - new bucket maximum value - @param bucket_sum - new bucket sum - @result Appropriate IOReturn code - - @discussion - Replaces data in the bucket at the specified index with the data pointed - to by bucket. No sanity check is performed on the data. If the index - is out of bounds, kIOReturnBadArgument is returned. - - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - - IOReturn overrideBucketValues(unsigned int index, - uint64_t bucket_hits, - int64_t bucket_min, - int64_t bucket_max, - int64_t bucket_sum); - -/*! @function IOHistogramReporter::tallyValue - @abstract Add a new value to the histogram + * @abstract Override values of a bucket at specified index + * + * @param index - index of bucket to override + * @param bucket_hits - new bucket hits count + * @param bucket_min - new bucket minimum value + * @param bucket_max - new bucket maximum value + * @param bucket_sum - new bucket sum + * @result Appropriate IOReturn code + * + * @discussion + * Replaces data in the bucket at the specified index with the data pointed + * to by bucket. No sanity check is performed on the data. If the index + * is out of bounds, kIOReturnBadArgument is returned. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ - @param value - new value to add to the histogram - @result the index of the affected bucket, or -1 on error - - @discussion - The histogram reporter determines in which bucket the value - falls and increments it. The lowest and highest buckets - extend to negative and positive infinity, respectively. + IOReturn overrideBucketValues(unsigned int index, + uint64_t bucket_hits, + int64_t bucket_min, + int64_t bucket_max, + int64_t bucket_sum); - Locking: same-instance concurrency SAFE, WILL NOT BLOCK -*/ - int tallyValue(int64_t value); +/*! @function IOHistogramReporter::tallyValue + * @abstract Add a new value to the histogram + * + * @param value - new value to add to the histogram + * @result the index of the affected bucket, or -1 on error + * + * @discussion + * The histogram reporter determines in which bucket the value + * falls and increments it. The lowest and highest buckets + * extend to negative and positive infinity, respectively. + * + * Locking: same-instance concurrency SAFE, WILL NOT BLOCK + */ + int tallyValue(int64_t value); /*! @function IOHistogramReporter::free - @abstract Releases the object and all its resources. - - @discussion - ::free() assumes that init() has completed. Clients should use - the static ::with() methods to obtain fully-initialized reporter - instances. - - Locking: same-instance concurrency UNSAFE -*/ - virtual void free(void) APPLE_KEXT_OVERRIDE; + * @abstract Releases the object and all its resources. + * + * @discussion + * ::free() assumes that init() has completed. Clients should use + * the static ::with() methods to obtain fully-initialized reporter + * instances. + * + * Locking: same-instance concurrency UNSAFE + */ + virtual void free(void) APPLE_KEXT_OVERRIDE; protected: /*! @function IOHistogramReporter::initWith - @abstract instance method implementation called by ::with - - @discussion - See description of parameters above -*/ - virtual bool initWith(IOService *reportingService, - IOReportCategories categories, - uint64_t channelID, - const OSSymbol *channelName, - IOReportUnit unit, - int nSegments, - IOHistogramSegmentConfig *config); - + * @abstract instance method implementation called by ::with + * + * @discussion + * See description of parameters above + */ + virtual bool initWith(IOService *reportingService, + IOReportCategories categories, + uint64_t channelID, + const OSSymbol *channelName, + IOReportUnit unit, + int nSegments, + IOHistogramSegmentConfig *config); + /*! @function IOHistogramReporter::handleCreateLegend - @abstract Builds an IOReporting legend entry representing the channels of this reporter. - - @result An IOReportLegendEntry or NULL on failure - - @discussion - The returned legend entry may be appended to kIOReportLegendKey - to be published by the caller in the IORegistry. See the - IOReportLegend class for more details. - - Locking: same-instance concurrency SAFE, MAY BLOCK -*/ - IOReportLegendEntry* handleCreateLegend(void) APPLE_KEXT_OVERRIDE; - - + * @abstract Builds an IOReporting legend entry representing the channels of this reporter. + * + * @result An IOReportLegendEntry or NULL on failure + * + * @discussion + * The returned legend entry may be appended to kIOReportLegendKey + * to be published by the caller in the IORegistry. See the + * IOReportLegend class for more details. + * + * Locking: same-instance concurrency SAFE, MAY BLOCK + */ + IOReportLegendEntry* handleCreateLegend(void) APPLE_KEXT_OVERRIDE; + + private: - - int _segmentCount; - int64_t *_bucketBounds; - int _bucketCount; - IOHistogramSegmentConfig *_histogramSegmentsConfig; + + int _segmentCount; + int64_t *_bucketBounds; + int _bucketCount; + IOHistogramSegmentConfig *_histogramSegmentsConfig; }; @@ -1591,196 +1590,196 @@ private: /***********************************/ /*! - @class IOReportLegend - @abstract combine legend entries into a complete legend - @discussion - IOReportLegend adds metadata to legend entries and combines them - into a single OSArray that can be published under the - kIOReportLegendKey property in the I/O Kit registry. -*/ + * @class IOReportLegend + * @abstract combine legend entries into a complete legend + * @discussion + * IOReportLegend adds metadata to legend entries and combines them + * into a single OSArray that can be published under the + * kIOReportLegendKey property in the I/O Kit registry. + */ class IOReportLegend : public OSObject { - OSDeclareDefaultStructors(IOReportLegend); - + OSDeclareDefaultStructors(IOReportLegend); + public: /*! @function IOReportLegend::with - @abstract Create an instance of IOReportLegend - - @param legend - OSArray of the legend possibly already present in registry - @result an instance of IOReportLegend, or NULL on failure - - @discussion - An IOReporting legend (an OSArray of legend entries) may be already - present in the IORegistry. Thus the recommended way to publish - new entries is to append to any existing array as follows: - 1. call getProperty(kIOReportLegendKey) to get an existing legend. - - 2a. If it exists - - OSDynamicCast to OSArray - - and pass it to ::with() - IOReportLegend *legendMaker = IOReportLegend::with(legend); - The provided array is retained by IOReportLegend. - - 2b. If no legend already exists in the registry, pass NULL - IOReportLegend *legend = IOReportLegend::with(NULL); - This latter invocation will cause IOReportLegend to create a new - array internally (also holding one reference). - - At the cost of some registry churn, the static - IOReportLegend::addReporterLegend() will handle the above, removing - the need for any direct use of the IOReportLegend class. -*/ - static IOReportLegend* with(OSArray *legend); - + * @abstract Create an instance of IOReportLegend + * + * @param legend - OSArray of the legend possibly already present in registry + * @result an instance of IOReportLegend, or NULL on failure + * + * @discussion + * An IOReporting legend (an OSArray of legend entries) may be already + * present in the IORegistry. Thus the recommended way to publish + * new entries is to append to any existing array as follows: + * 1. call getProperty(kIOReportLegendKey) to get an existing legend. + * + * 2a. If it exists + * - OSDynamicCast to OSArray + * - and pass it to ::with() + * IOReportLegend *legendMaker = IOReportLegend::with(legend); + * The provided array is retained by IOReportLegend. + * + * 2b. If no legend already exists in the registry, pass NULL + * IOReportLegend *legend = IOReportLegend::with(NULL); + * This latter invocation will cause IOReportLegend to create a new + * array internally (also holding one reference). + * + * At the cost of some registry churn, the static + * IOReportLegend::addReporterLegend() will handle the above, removing + * the need for any direct use of the IOReportLegend class. + */ + static IOReportLegend* with(OSArray *legend); + /*! @function IOReportLegend::addLegendEntry - @abstract Add a new legend entry - - @param legendEntry - entry to be added to the internal legend array - @param groupName - primary group name for this entry - @param subGroupName - secondary group name for this entry - @result appropriate IOReturn code - - @discussion - The entry will be retained as an element of the internal array. - Legend entries are available from reporter objects. Entries - represent some number of channels with similar properties (such - as group and sub-group). Multiple legend entries with the same - group names will be aggregated in user space. - - Drivers that instantiate their reporter objects in response to - IOService::configureReport(kIOReportDisable) will need to create - temporary reporter objects for the purpose of creating their - legend entries. User-space legends are tracked by 12836893. -*/ - IOReturn addLegendEntry(IOReportLegendEntry *legendEntry, - const char *groupName, - const char *subGroupName); - + * @abstract Add a new legend entry + * + * @param legendEntry - entry to be added to the internal legend array + * @param groupName - primary group name for this entry + * @param subGroupName - secondary group name for this entry + * @result appropriate IOReturn code + * + * @discussion + * The entry will be retained as an element of the internal array. + * Legend entries are available from reporter objects. Entries + * represent some number of channels with similar properties (such + * as group and sub-group). Multiple legend entries with the same + * group names will be aggregated in user space. + * + * Drivers that instantiate their reporter objects in response to + * IOService::configureReport(kIOReportDisable) will need to create + * temporary reporter objects for the purpose of creating their + * legend entries. User-space legends are tracked by 12836893. + */ + IOReturn addLegendEntry(IOReportLegendEntry *legendEntry, + const char *groupName, + const char *subGroupName); + /*! @function IOReportLegend::addReporterLegend - @abstract Add a legend entry from a reporter object - - @param reporter - IOReporter to use to extract and append the legend - @param groupName - primary group name for this entry - @param subGroupName - secondary group name for this entry - @result appropriate IOReturn code - - @discussion - An IOReportLegendEntry will be created internally to this method from - the IOReporter object passed in argument. The entry will be released - internally after being appended to the IOReportLegend object. - Legend entries are available from reporter objects. Entries - represent some number of channels with similar properties (such - as group and sub-group). Multiple legend entries with the same - group names will be aggregated in user space. - - Drivers that instantiate their reporter objects in response to - IOService::configureReport(kIOReportDisable) will need to create - temporary reporter objects for the purpose of creating their - legend entries. User-space legends are tracked by 12836893. - - Locking: same-reportingService and same-IORLegend concurrency UNSAFE -*/ - IOReturn addReporterLegend(IOReporter *reporter, - const char *groupName, - const char *subGroupName); - + * @abstract Add a legend entry from a reporter object + * + * @param reporter - IOReporter to use to extract and append the legend + * @param groupName - primary group name for this entry + * @param subGroupName - secondary group name for this entry + * @result appropriate IOReturn code + * + * @discussion + * An IOReportLegendEntry will be created internally to this method from + * the IOReporter object passed in argument. The entry will be released + * internally after being appended to the IOReportLegend object. + * Legend entries are available from reporter objects. Entries + * represent some number of channels with similar properties (such + * as group and sub-group). Multiple legend entries with the same + * group names will be aggregated in user space. + * + * Drivers that instantiate their reporter objects in response to + * IOService::configureReport(kIOReportDisable) will need to create + * temporary reporter objects for the purpose of creating their + * legend entries. User-space legends are tracked by 12836893. + * + * Locking: same-reportingService and same-IORLegend concurrency UNSAFE + */ + IOReturn addReporterLegend(IOReporter *reporter, + const char *groupName, + const char *subGroupName); + /*! @function IOReportLegend::addReporterLegend - @abstract Add a legend entry from a reporter object - - @param reportingService - IOService data provider into the reporter object - @param reporter - IOReporter to use to extract and append the legend - @param groupName - primary group name for this entry - @param subGroupName - secondary group name for this entry - @result appropriate IOReturn code - - @discussion - An IOReportLegendEntry will be created internally to this method from - the IOReporter object passed in argument. The entry will be released - internally after being appended to the IOReportLegend object. - Legend entries are available from reporter objects. Entries - represent some number of channels with similar properties (such - as group and sub-group). Multiple legend entries with the same - group names will be aggregated in user space. - - Drivers that instantiate their reporter objects in response to - IOService::configureReport(kIOReportDisable) will need to create - temporary reporter objects for the purpose of creating their - legend entries. User-space legends are tracked by 12836893. - - The static version of addReporterLegend adds the reporter's legend - directly to reportingService's kIOReportLegendKey. It is not - possible to safely update kIOReportLegendKey from multiple threads. - - Locking: same-reportingService and same-IORLegend concurrency UNSAFE -*/ - static IOReturn addReporterLegend(IOService *reportingService, - IOReporter *reporter, - const char *groupName, - const char *subGroupName); + * @abstract Add a legend entry from a reporter object + * + * @param reportingService - IOService data provider into the reporter object + * @param reporter - IOReporter to use to extract and append the legend + * @param groupName - primary group name for this entry + * @param subGroupName - secondary group name for this entry + * @result appropriate IOReturn code + * + * @discussion + * An IOReportLegendEntry will be created internally to this method from + * the IOReporter object passed in argument. The entry will be released + * internally after being appended to the IOReportLegend object. + * Legend entries are available from reporter objects. Entries + * represent some number of channels with similar properties (such + * as group and sub-group). Multiple legend entries with the same + * group names will be aggregated in user space. + * + * Drivers that instantiate their reporter objects in response to + * IOService::configureReport(kIOReportDisable) will need to create + * temporary reporter objects for the purpose of creating their + * legend entries. User-space legends are tracked by 12836893. + * + * The static version of addReporterLegend adds the reporter's legend + * directly to reportingService's kIOReportLegendKey. It is not + * possible to safely update kIOReportLegendKey from multiple threads. + * + * Locking: same-reportingService and same-IORLegend concurrency UNSAFE + */ + static IOReturn addReporterLegend(IOService *reportingService, + IOReporter *reporter, + const char *groupName, + const char *subGroupName); /*! @function IOReportLegend::getLegend - @abstract Accessor method to get the legend array - - @result Returns the OSObject holding the legend to be published by the driver - @discussion - This array will include all legend entries added to the object. -*/ - OSArray* getLegend(void); + * @abstract Accessor method to get the legend array + * + * @result Returns the OSObject holding the legend to be published by the driver + * @discussion + * This array will include all legend entries added to the object. + */ + OSArray* getLegend(void); /*! @function IOReportLegend::free - @abstract Frees the IOReportLegend object + * @abstract Frees the IOReportLegend object + * + * @discussion + * ::free() cleans up the reporter and anything it allocated. + * + * ::free() releases the internal array (which was either passed + * to ::with() or created as a result of ::with(NULL)). Assuming + * the caller extracted the array with getLegend() and published it + * in the I/O Kit registry, its ownership will now be with the + * registry. + */ + void free(void) APPLE_KEXT_OVERRIDE; - @discussion - ::free() cleans up the reporter and anything it allocated. - ::free() releases the internal array (which was either passed - to ::with() or created as a result of ::with(NULL)). Assuming - the caller extracted the array with getLegend() and published it - in the I/O Kit registry, its ownership will now be with the - registry. -*/ - void free(void) APPLE_KEXT_OVERRIDE; - - protected: - + private: - - OSArray *_reportLegend; - - IOReturn initWith(OSArray *legend); - -/*! @function IOReportLegend::organizeLegend - @abstract Sets up the legend entry, organizing it with group and sub-group names - @param groupName - Primary group name - @param subGroupName - Secondary group name - @result IOReturn code -*/ - IOReturn organizeLegend(IOReportLegendEntry *legendEntry, - const OSSymbol *groupName, - const OSSymbol *subGroupName); + OSArray *_reportLegend; + + IOReturn initWith(OSArray *legend); + +/*! @function IOReportLegend::organizeLegend + * @abstract Sets up the legend entry, organizing it with group and sub-group names + * + * @param groupName - Primary group name + * @param subGroupName - Secondary group name + * @result IOReturn code + */ + IOReturn organizeLegend(IOReportLegendEntry *legendEntry, + const OSSymbol *groupName, + const OSSymbol *subGroupName); // FUTURE POSSIBILITY (NOT IMPLEMENTED!) /*! @function IOReportLegend::createReporters - @abstract Creates as many IOReporter objects as the legend contains - - @param legend - OSArray legend object containing the description of all reporters - the driver is able to address - @param reporter - OSSet of reporter objects created by this call - @result IOReturn code kIOReturnSuccess if successful - - @discussion - NOT SUPPORTED at the time of writing - Convenience method to create all the driver's reporter objects from a legend. - Can be used when a legend is made public through the IORegistry but IOReporter - objects have not yet been created to save memory, waiting for observers. - Upon a call to configureReport via the IOService method, a driver could - create all reporter objects on the fly using this function. -*/ - // For Future IOReporterManager... - // static IOReturn createReporters(requestedChannels, legend); + * @abstract Creates as many IOReporter objects as the legend contains + * + * @param legend - OSArray legend object containing the description of all reporters + * the driver is able to address + * @param reporter - OSSet of reporter objects created by this call + * @result IOReturn code kIOReturnSuccess if successful + * + * @discussion + * NOT SUPPORTED at the time of writing + * Convenience method to create all the driver's reporter objects from a legend. + * Can be used when a legend is made public through the IORegistry but IOReporter + * objects have not yet been created to save memory, waiting for observers. + * Upon a call to configureReport via the IOService method, a driver could + * create all reporter objects on the fly using this function. + */ +// For Future IOReporterManager... +// static IOReturn createReporters(requestedChannels, legend); }; #endif /* ! _IOKERNEL_REPORTERS_H_ */ diff --git a/iokit/IOKit/IOKitDebug.h b/iokit/IOKit/IOKitDebug.h index a6c64b8eb..6f114a347 100644 --- a/iokit/IOKit/IOKitDebug.h +++ b/iokit/IOKit/IOKitDebug.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,92 +40,92 @@ class IOKitDiagnostics : public OSObject { - OSDeclareDefaultStructors(IOKitDiagnostics) + OSDeclareDefaultStructors(IOKitDiagnostics) public: - static OSObject * diagnostics( void ); - virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; + static OSObject * diagnostics( void ); + virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; private: - static void updateOffset( OSDictionary * dict, - UInt64 value, const char * name ); + static void updateOffset( OSDictionary * dict, + UInt64 value, const char * name ); }; #endif /* __cplusplus */ enum { - // loggage - kIOLogAttach = 0x00000001ULL, - kIOLogProbe = 0x00000002ULL, - kIOLogStart = 0x00000004ULL, - kIOLogRegister = 0x00000008ULL, - kIOLogMatch = 0x00000010ULL, - kIOLogConfig = 0x00000020ULL, - kIOLogYield = 0x00000040ULL, - kIOLogPower = 0x00000080ULL, - kIOLogMapping = 0x00000100ULL, - kIOLogCatalogue = 0x00000200ULL, + // loggage + kIOLogAttach = 0x00000001ULL, + kIOLogProbe = 0x00000002ULL, + kIOLogStart = 0x00000004ULL, + kIOLogRegister = 0x00000008ULL, + kIOLogMatch = 0x00000010ULL, + kIOLogConfig = 0x00000020ULL, + kIOLogYield = 0x00000040ULL, + kIOLogPower = 0x00000080ULL, + kIOLogMapping = 0x00000100ULL, + kIOLogCatalogue = 0x00000200ULL, kIOLogTracePower = 0x00000400ULL, // Obsolete: Use iotrace=0x00000400ULL to enable now - kIOLogDebugPower = 0x00000800ULL, - kIOLogServiceTree = 0x00001000ULL, - kIOLogDTree = 0x00002000ULL, - kIOLogMemory = 0x00004000ULL, - kIOLogKextMemory = 0x00008000ULL, - kOSLogRegistryMods = 0x00010000ULL, // Log attempts to modify registry collections - kIOLogPMRootDomain = 0x00020000ULL, - kOSRegistryModsMode = 0x00040000ULL, // Change default registry modification handling - panic vs. log + kIOLogDebugPower = 0x00000800ULL, + kIOLogServiceTree = 0x00001000ULL, + kIOLogDTree = 0x00002000ULL, + kIOLogMemory = 0x00004000ULL, + kIOLogKextMemory = 0x00008000ULL, + kOSLogRegistryMods = 0x00010000ULL,// Log attempts to modify registry collections + kIOLogPMRootDomain = 0x00020000ULL, + kOSRegistryModsMode = 0x00040000ULL,// Change default registry modification handling - panic vs. log // kIOTraceIOService = 0x00080000ULL, // Obsolete: Use iotrace=0x00080000ULL to enable now - kIOLogHibernate = 0x00100000ULL, - kIOStatistics = 0x04000000ULL, - kIOSleepWakeWdogOff = 0x40000000ULL, - kIOKextSpinDump = 0x80000000ULL, + kIOLogHibernate = 0x00100000ULL, + kIOStatistics = 0x04000000ULL, + kIOSleepWakeWdogOff = 0x40000000ULL, + kIOKextSpinDump = 0x80000000ULL, - // debug aids - change behaviour - kIONoFreeObjects = 0x00100000ULL, + // debug aids - change behaviour + kIONoFreeObjects = 0x00100000ULL, // kIOLogSynchronous = 0x00200000ULL, // IOLog completes synchronously -- obsolete - kIOTracking = 0x00400000ULL, - kIOWaitQuietPanics = 0x00800000ULL, - kIOWaitQuietBeforeRoot = 0x01000000ULL, - kIOTrackingBoot = 0x02000000ULL, + kIOTracking = 0x00400000ULL, + kIOWaitQuietPanics = 0x00800000ULL, + kIOWaitQuietBeforeRoot = 0x01000000ULL, + kIOTrackingBoot = 0x02000000ULL, - _kIODebugTopFlag = 0x8000000000000000ULL // force enum to be 64 bits + _kIODebugTopFlag = 0x8000000000000000ULL// force enum to be 64 bits }; enum { kIOKitDebugUserOptions = 0 - | kIOLogAttach - | kIOLogProbe - | kIOLogStart - | kIOLogRegister - | kIOLogMatch - | kIOLogConfig - | kIOLogYield - | kIOLogPower - | kIOLogMapping - | kIOLogCatalogue - | kIOLogTracePower - | kIOLogDebugPower - | kOSLogRegistryMods - | kIOLogPMRootDomain - | kOSRegistryModsMode - | kIOLogHibernate - | kIOSleepWakeWdogOff - | kIOKextSpinDump - | kIOWaitQuietPanics + | kIOLogAttach + | kIOLogProbe + | kIOLogStart + | kIOLogRegister + | kIOLogMatch + | kIOLogConfig + | kIOLogYield + | kIOLogPower + | kIOLogMapping + | kIOLogCatalogue + | kIOLogTracePower + | kIOLogDebugPower + | kOSLogRegistryMods + | kIOLogPMRootDomain + | kOSRegistryModsMode + | kIOLogHibernate + | kIOSleepWakeWdogOff + | kIOKextSpinDump + | kIOWaitQuietPanics }; enum { - kIOTraceInterrupts = 0x00000001ULL, // Trace primary interrupts - kIOTraceWorkLoops = 0x00000002ULL, // Trace workloop activity - kIOTraceEventSources = 0x00000004ULL, // Trace non-passive event sources - kIOTraceIntEventSource = 0x00000008ULL, // Trace IOIES and IOFIES sources - kIOTraceCommandGates = 0x00000010ULL, // Trace command gate activity - kIOTraceTimers = 0x00000020ULL, // Trace timer event source activity - - kIOTracePowerMgmt = 0x00000400ULL, // Trace power management changes - - kIOTraceIOService = 0x00080000ULL, // registerService/termination - - kIOTraceCompatBootArgs = kIOTraceIOService | kIOTracePowerMgmt + kIOTraceInterrupts = 0x00000001ULL, // Trace primary interrupts + kIOTraceWorkLoops = 0x00000002ULL, // Trace workloop activity + kIOTraceEventSources = 0x00000004ULL, // Trace non-passive event sources + kIOTraceIntEventSource = 0x00000008ULL, // Trace IOIES and IOFIES sources + kIOTraceCommandGates = 0x00000010ULL, // Trace command gate activity + kIOTraceTimers = 0x00000020ULL, // Trace timer event source activity + + kIOTracePowerMgmt = 0x00000400ULL, // Trace power management changes + + kIOTraceIOService = 0x00080000ULL, // registerService/termination + + kIOTraceCompatBootArgs = kIOTraceIOService | kIOTracePowerMgmt }; extern SInt64 gIOKitDebug; @@ -141,11 +141,11 @@ class IORegistryPlane; extern void IOPrintPlane( #ifdef __cplusplus - const IORegistryPlane * plane + const IORegistryPlane * plane #else - const struct IORegistryPlane * plane + const struct IORegistryPlane * plane #endif - ); + ); #ifndef _OSCPPDEBUG_H extern void OSPrintMemory( void ); #endif @@ -155,89 +155,81 @@ extern void OSPrintMemory( void ); #define kIOKitDiagnosticsClientClassName "IOKitDiagnosticsClient" -enum -{ - kIOKitDiagnosticsClientType = 0x99000002 +enum{ + kIOKitDiagnosticsClientType = 0x99000002 }; -struct IOKitDiagnosticsParameters -{ - size_t size; - uint64_t value; - uint32_t options; - uint32_t tag; - uint32_t zsize; - uint32_t reserved[8]; +struct IOKitDiagnosticsParameters { + size_t size; + uint64_t value; + uint32_t options; + uint32_t tag; + uint32_t zsize; + uint32_t reserved[8]; }; typedef struct IOKitDiagnosticsParameters IOKitDiagnosticsParameters; -enum -{ - kIOTrackingCallSiteBTs = 16, +enum{ + kIOTrackingCallSiteBTs = 16, }; -struct IOTrackingCallSiteInfo -{ - uint32_t count; - pid_t addressPID; - mach_vm_address_t address; - mach_vm_size_t size[2]; - pid_t btPID; - mach_vm_address_t bt[2][kIOTrackingCallSiteBTs]; +struct IOTrackingCallSiteInfo { + uint32_t count; + pid_t addressPID; + mach_vm_address_t address; + mach_vm_size_t size[2]; + pid_t btPID; + mach_vm_address_t bt[2][kIOTrackingCallSiteBTs]; }; -#define kIOMallocTrackingName "IOMalloc" -#define kIOWireTrackingName "IOWire" -#define kIOMapTrackingName "IOMap" +#define kIOMallocTrackingName "IOMalloc" +#define kIOWireTrackingName "IOWire" +#define kIOMapTrackingName "IOMap" #if XNU_KERNEL_PRIVATE && IOTRACKING struct IOTrackingQueue; struct IOTrackingCallSite; -struct IOTracking -{ - queue_chain_t link; - IOTrackingCallSite * site; +struct IOTracking { + queue_chain_t link; + IOTrackingCallSite * site; #if !defined(__LP64__) - uint32_t flags; + uint32_t flags; #endif }; -struct IOTrackingAddress -{ - IOTracking tracking; - uintptr_t address; - size_t size; +struct IOTrackingAddress { + IOTracking tracking; + uintptr_t address; + size_t size; #if defined(__LP64__) - uint32_t flags; + uint32_t flags; #endif }; -struct IOTrackingUser -{ - queue_chain_t link; - pid_t btPID; - uint8_t user32; - uint8_t userCount; - uintptr_t bt[kIOTrackingCallSiteBTs]; - uintptr_t btUser[kIOTrackingCallSiteBTs]; +struct IOTrackingUser { + queue_chain_t link; + pid_t btPID; + uint8_t user32; + uint8_t userCount; + uintptr_t bt[kIOTrackingCallSiteBTs]; + uintptr_t btUser[kIOTrackingCallSiteBTs]; }; -enum -{ - kIOTrackingQueueTypeDefaultOn = 0x00000001, - kIOTrackingQueueTypeAlloc = 0x00000002, - kIOTrackingQueueTypeMap = 0x00000004, - kIOTrackingQueueTypeUser = 0x00000008, +enum{ + kIOTrackingQueueTypeDefaultOn = 0x00000001, + kIOTrackingQueueTypeAlloc = 0x00000002, + kIOTrackingQueueTypeMap = 0x00000004, + kIOTrackingQueueTypeUser = 0x00000008, }; void IOTrackingInit(void); IOTrackingQueue * IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, - size_t allocSize, size_t minCaptureSize, - uint32_t type, uint32_t numSiteQs); + size_t allocSize, size_t minCaptureSize, + uint32_t type, uint32_t numSiteQs); void IOTrackingQueueFree(IOTrackingQueue * head); void IOTrackingAdd(IOTrackingQueue * head, IOTracking * mem, size_t size, bool address, vm_tag_t tag); void IOTrackingRemove(IOTrackingQueue * head, IOTracking * mem, size_t size); @@ -249,8 +241,8 @@ void IOTrackingFree(IOTrackingQueue * head, uintptr_t address, size void IOTrackingReset(IOTrackingQueue * head); void IOTrackingAccumSize(IOTrackingQueue * head, IOTracking * mem, size_t size); kern_return_t IOTrackingDebug(uint32_t selector, uint32_t options, - const char * names, size_t namesLen, - size_t size, OSObject ** result); + const char * names, size_t namesLen, + size_t size, OSObject ** result); extern IOTrackingQueue * gIOMallocTracking; extern IOTrackingQueue * gIOWireTracking; @@ -258,21 +250,19 @@ extern IOTrackingQueue * gIOMapTracking; #endif /* XNU_KERNEL_PRIVATE && IOTRACKING */ -enum -{ - kIOTrackingExcludeNames = 0x00000001, +enum{ + kIOTrackingExcludeNames = 0x00000001, }; -enum -{ - kIOTrackingGetTracking = 0x00000001, - kIOTrackingGetMappings = 0x00000002, - kIOTrackingResetTracking = 0x00000003, - kIOTrackingStartCapture = 0x00000004, - kIOTrackingStopCapture = 0x00000005, - kIOTrackingSetMinCaptureSize = 0x00000006, - kIOTrackingLeaks = 0x00000007, - kIOTrackingInvalid = 0xFFFFFFFE, +enum{ + kIOTrackingGetTracking = 0x00000001, + kIOTrackingGetMappings = 0x00000002, + kIOTrackingResetTracking = 0x00000003, + kIOTrackingStartCapture = 0x00000004, + kIOTrackingStopCapture = 0x00000005, + kIOTrackingSetMinCaptureSize = 0x00000006, + kIOTrackingLeaks = 0x00000007, + kIOTrackingInvalid = 0xFFFFFFFE, }; diff --git a/iokit/IOKit/IOKitDiagnosticsUserClient.h b/iokit/IOKit/IOKitDiagnosticsUserClient.h index 86370398b..12976053b 100644 --- a/iokit/IOKit/IOKitDiagnosticsUserClient.h +++ b/iokit/IOKit/IOKitDiagnosticsUserClient.h @@ -1,17 +1,15 @@ - #include #include class IOKitDiagnosticsClient : public IOUserClient { - OSDeclareDefaultStructors(IOKitDiagnosticsClient) + OSDeclareDefaultStructors(IOKitDiagnosticsClient) public: - static IOUserClient * withTask(task_t owningTask); - virtual IOReturn clientClose(void) APPLE_KEXT_OVERRIDE; - virtual IOReturn setProperties(OSObject * properties) APPLE_KEXT_OVERRIDE; - virtual IOReturn externalMethod(uint32_t selector, IOExternalMethodArguments * args, - IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) APPLE_KEXT_OVERRIDE; + static IOUserClient * withTask(task_t owningTask); + virtual IOReturn clientClose(void) APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties(OSObject * properties) APPLE_KEXT_OVERRIDE; + virtual IOReturn externalMethod(uint32_t selector, IOExternalMethodArguments * args, + IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) APPLE_KEXT_OVERRIDE; }; - diff --git a/iokit/IOKit/IOKitKeys.h b/iokit/IOKit/IOKitKeys.h index 1d5bf5afa..84e5a0afd 100644 --- a/iokit/IOKit/IOKitKeys.h +++ b/iokit/IOKit/IOKitKeys.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * - * Common symbol definitions for IOKit. + * Common symbol definitions for IOKit. * * HISTORY * @@ -39,89 +39,89 @@ #define _IOKIT_IOKITKEYS_H // properties found in the registry root -#define kIOKitBuildVersionKey "IOKitBuildVersion" -#define kIOKitDiagnosticsKey "IOKitDiagnostics" - // a dictionary keyed by plane name -#define kIORegistryPlanesKey "IORegistryPlanes" -#define kIOCatalogueKey "IOCatalogue" +#define kIOKitBuildVersionKey "IOKitBuildVersion" +#define kIOKitDiagnosticsKey "IOKitDiagnostics" +// a dictionary keyed by plane name +#define kIORegistryPlanesKey "IORegistryPlanes" +#define kIOCatalogueKey "IOCatalogue" // registry plane names -#define kIOServicePlane "IOService" -#define kIOPowerPlane "IOPower" -#define kIODeviceTreePlane "IODeviceTree" -#define kIOAudioPlane "IOAudio" -#define kIOFireWirePlane "IOFireWire" -#define kIOUSBPlane "IOUSB" +#define kIOServicePlane "IOService" +#define kIOPowerPlane "IOPower" +#define kIODeviceTreePlane "IODeviceTree" +#define kIOAudioPlane "IOAudio" +#define kIOFireWirePlane "IOFireWire" +#define kIOUSBPlane "IOUSB" // registry ID number -#define kIORegistryEntryIDKey "IORegistryEntryID" +#define kIORegistryEntryIDKey "IORegistryEntryID" // property name to get array of property names #define kIORegistryEntryPropertyKeysKey "IORegistryEntryPropertyKeys" // IOService class name -#define kIOServiceClass "IOService" +#define kIOServiceClass "IOService" // IOResources class name -#define kIOResourcesClass "IOResources" +#define kIOResourcesClass "IOResources" // IOService driver probing property names -#define kIOClassKey "IOClass" -#define kIOProbeScoreKey "IOProbeScore" -#define kIOKitDebugKey "IOKitDebug" +#define kIOClassKey "IOClass" +#define kIOProbeScoreKey "IOProbeScore" +#define kIOKitDebugKey "IOKitDebug" // IOService matching property names -#define kIOProviderClassKey "IOProviderClass" -#define kIONameMatchKey "IONameMatch" -#define kIOPropertyMatchKey "IOPropertyMatch" -#define kIOPropertyExistsMatchKey "IOPropertyExistsMatch" -#define kIOPathMatchKey "IOPathMatch" -#define kIOLocationMatchKey "IOLocationMatch" -#define kIOParentMatchKey "IOParentMatch" -#define kIOResourceMatchKey "IOResourceMatch" -#define kIOResourceMatchedKey "IOResourceMatched" -#define kIOMatchedServiceCountKey "IOMatchedServiceCountMatch" - -#define kIONameMatchedKey "IONameMatched" - -#define kIOMatchCategoryKey "IOMatchCategory" -#define kIODefaultMatchCategoryKey "IODefaultMatchCategory" +#define kIOProviderClassKey "IOProviderClass" +#define kIONameMatchKey "IONameMatch" +#define kIOPropertyMatchKey "IOPropertyMatch" +#define kIOPropertyExistsMatchKey "IOPropertyExistsMatch" +#define kIOPathMatchKey "IOPathMatch" +#define kIOLocationMatchKey "IOLocationMatch" +#define kIOParentMatchKey "IOParentMatch" +#define kIOResourceMatchKey "IOResourceMatch" +#define kIOResourceMatchedKey "IOResourceMatched" +#define kIOMatchedServiceCountKey "IOMatchedServiceCountMatch" + +#define kIONameMatchedKey "IONameMatched" + +#define kIOMatchCategoryKey "IOMatchCategory" +#define kIODefaultMatchCategoryKey "IODefaultMatchCategory" // IOService default user client class, for loadable user clients -#define kIOUserClientClassKey "IOUserClientClass" +#define kIOUserClientClassKey "IOUserClientClass" // key to find IOMappers -#define kIOMapperIDKey "IOMapperID" +#define kIOMapperIDKey "IOMapperID" -#define kIOUserClientCrossEndianKey "IOUserClientCrossEndian" -#define kIOUserClientCrossEndianCompatibleKey "IOUserClientCrossEndianCompatible" -#define kIOUserClientSharedInstanceKey "IOUserClientSharedInstance" +#define kIOUserClientCrossEndianKey "IOUserClientCrossEndian" +#define kIOUserClientCrossEndianCompatibleKey "IOUserClientCrossEndianCompatible" +#define kIOUserClientSharedInstanceKey "IOUserClientSharedInstance" // diagnostic string describing the creating task -#define kIOUserClientCreatorKey "IOUserClientCreator" +#define kIOUserClientCreatorKey "IOUserClientCreator" // IOService notification types -#define kIOPublishNotification "IOServicePublish" -#define kIOFirstPublishNotification "IOServiceFirstPublish" -#define kIOMatchedNotification "IOServiceMatched" -#define kIOFirstMatchNotification "IOServiceFirstMatch" -#define kIOTerminatedNotification "IOServiceTerminate" -#define kIOWillTerminateNotification "IOServiceWillTerminate" +#define kIOPublishNotification "IOServicePublish" +#define kIOFirstPublishNotification "IOServiceFirstPublish" +#define kIOMatchedNotification "IOServiceMatched" +#define kIOFirstMatchNotification "IOServiceFirstMatch" +#define kIOTerminatedNotification "IOServiceTerminate" +#define kIOWillTerminateNotification "IOServiceWillTerminate" // IOService interest notification types -#define kIOGeneralInterest "IOGeneralInterest" -#define kIOBusyInterest "IOBusyInterest" -#define kIOAppPowerStateInterest "IOAppPowerStateInterest" -#define kIOPriorityPowerStateInterest "IOPriorityPowerStateInterest" +#define kIOGeneralInterest "IOGeneralInterest" +#define kIOBusyInterest "IOBusyInterest" +#define kIOAppPowerStateInterest "IOAppPowerStateInterest" +#define kIOPriorityPowerStateInterest "IOPriorityPowerStateInterest" #define kIOPlatformDeviceMessageKey "IOPlatformDeviceMessage" // IOService interest notification types -#define kIOCFPlugInTypesKey "IOCFPlugInTypes" +#define kIOCFPlugInTypesKey "IOCFPlugInTypes" // properties found in services that implement command pooling -#define kIOCommandPoolSizeKey "IOCommandPoolSize" // (OSNumber) +#define kIOCommandPoolSizeKey "IOCommandPoolSize" // (OSNumber) // properties found in services that implement priority -#define kIOMaximumPriorityCountKey "IOMaximumPriorityCount" // (OSNumber) +#define kIOMaximumPriorityCountKey "IOMaximumPriorityCount" // (OSNumber) // properties found in services that have transfer constraints #define kIOMaximumBlockCountReadKey "IOMaximumBlockCountRead" // (OSNumber) @@ -139,7 +139,7 @@ // properties found in services that wish to describe an icon // -// IOIcon = +// IOIcon = // { // CFBundleIdentifier = "com.example.driver.example"; // IOBundleResourceFile = "example.icns"; @@ -154,23 +154,23 @@ #define kIODeviceIconKey "IODeviceIcon" // (OSDictionary) // property of root that describes the machine's serial number as a string -#define kIOPlatformSerialNumberKey "IOPlatformSerialNumber" // (OSString) +#define kIOPlatformSerialNumberKey "IOPlatformSerialNumber" // (OSString) // property of root that describes the machine's UUID as a string -#define kIOPlatformUUIDKey "IOPlatformUUID" // (OSString) +#define kIOPlatformUUIDKey "IOPlatformUUID" // (OSString) // IODTNVRAM property keys -#define kIONVRAMDeletePropertyKey "IONVRAM-DELETE-PROPERTY" -#define kIONVRAMSyncNowPropertyKey "IONVRAM-SYNCNOW-PROPERTY" -#define kIONVRAMActivateCSRConfigPropertyKey "IONVRAM-ARMCSR-PROPERTY" -#define kIODTNVRAMPanicInfoKey "aapl,panic-info" +#define kIONVRAMDeletePropertyKey "IONVRAM-DELETE-PROPERTY" +#define kIONVRAMSyncNowPropertyKey "IONVRAM-SYNCNOW-PROPERTY" +#define kIONVRAMActivateCSRConfigPropertyKey "IONVRAM-ARMCSR-PROPERTY" +#define kIODTNVRAMPanicInfoKey "aapl,panic-info" // keys for complex boot information -#define kIOBootDeviceKey "IOBootDevice" // dict | array of dicts -#define kIOBootDevicePathKey "IOBootDevicePath" // arch-neutral OSString -#define kIOBootDeviceSizeKey "IOBootDeviceSize" // OSNumber of bytes +#define kIOBootDeviceKey "IOBootDevice" // dict | array of dicts +#define kIOBootDevicePathKey "IOBootDevicePath" // arch-neutral OSString +#define kIOBootDeviceSizeKey "IOBootDeviceSize" // OSNumber of bytes // keys for OS Version information -#define kOSBuildVersionKey "OS Build Version" +#define kOSBuildVersionKey "OS Build Version" #endif /* ! _IOKIT_IOKITKEYS_H */ diff --git a/iokit/IOKit/IOKitKeysPrivate.h b/iokit/IOKit/IOKitKeysPrivate.h index 5ecf86f08..4751aba6e 100644 --- a/iokit/IOKit/IOKitKeysPrivate.h +++ b/iokit/IOKit/IOKitKeysPrivate.h @@ -57,7 +57,7 @@ #define kIOBridgeBootSessionUUIDKey "bridge-boot-session-uuid" /* value is OSData */ // interest type -#define kIOConsoleSecurityInterest "IOConsoleSecurityInterest" +#define kIOConsoleSecurityInterest "IOConsoleSecurityInterest" // private keys for clientHasPrivilege @@ -69,13 +69,13 @@ // Embedded still throttles NVRAM commits via kIONVRAMSyncNowPropertyKey, but // some clients still need a stricter NVRAM commit contract. Please use this with // care. -#define kIONVRAMForceSyncNowPropertyKey "IONVRAM-FORCESYNCNOW-PROPERTY" +#define kIONVRAMForceSyncNowPropertyKey "IONVRAM-FORCESYNCNOW-PROPERTY" // clientHasPrivilege security token for kIOClientPrivilegeSecureConsoleProcess typedef struct _IOUCProcessToken { - void * token; - UInt32 pid; + void * token; + UInt32 pid; } IOUCProcessToken; #define kIOKernelHasSafeSleep 1 @@ -98,18 +98,18 @@ typedef struct _IOUCProcessToken { #endif /* defined(__i386__) || defined(__x86_64__) */ enum { - // these flags are valid for the prepare() method only - kIODirectionPrepareNoZeroFill = 0x00000010, + // these flags are valid for the prepare() method only + kIODirectionPrepareNoZeroFill = 0x00000010, }; enum { - kIOServiceTerminateNeedWillTerminate = 0x00000100, + kIOServiceTerminateNeedWillTerminate = 0x00000100, }; #define kIOClassNameOverrideKey "IOClassNameOverride" enum { - kIOClassNameOverrideNone = 0x00000001, + kIOClassNameOverrideNone = 0x00000001, }; #define kIOServiceLegacyMatchingRegistryIDKey "IOServiceLegacyMatchingRegistryID" diff --git a/iokit/IOKit/IOKitServer.h b/iokit/IOKit/IOKitServer.h index 927ad8cb2..099970fbc 100644 --- a/iokit/IOKit/IOKitServer.h +++ b/iokit/IOKit/IOKitServer.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -54,63 +54,63 @@ extern "C" { // IOCatalogueSendData /*! - @enum IOCatalogueSendData user-client flags. - @constant kIOCatalogAddDrivers Signals a call to the addDrivers function in IOCatalogue. - @constant kIOCatalogAddDriversNoMatch Signals a call to the addDrivers function in IOCatalogue but does not start a matching thread. - @constant kIOCatalogRemoveDrivers Signals a call to the removeDrivers function in IOCatalogue. - @constant kIOCatalogRemoveDriversNoMatch Signals a call to the removedrivers function in IOCatalogue but does not start a matching thread. - @constant kIOCatalogStartMatching Signals the IOCatalogue to start an IOService matching thread. - @constant kIOCatalogRemoveKernelLinker Deprecated; does nothing. - @constant kIOCatalogKextdActive Signals the kernel that kextd is running. - @constant kIOCatalogKextdFinishedLaunching Signals the IOCatalogue that kextd has finished sending it information at startup. - @constant kIOCatalogResetDrivers Resets the IOCatalogue with a new set of personalities. - @constant kIOCatalogResetDriversNoMatch Resets the IOCatalogue with a new set of personalities but does not start a matching thread. -*/ + * @enum IOCatalogueSendData user-client flags. + * @constant kIOCatalogAddDrivers Signals a call to the addDrivers function in IOCatalogue. + * @constant kIOCatalogAddDriversNoMatch Signals a call to the addDrivers function in IOCatalogue but does not start a matching thread. + * @constant kIOCatalogRemoveDrivers Signals a call to the removeDrivers function in IOCatalogue. + * @constant kIOCatalogRemoveDriversNoMatch Signals a call to the removedrivers function in IOCatalogue but does not start a matching thread. + * @constant kIOCatalogStartMatching Signals the IOCatalogue to start an IOService matching thread. + * @constant kIOCatalogRemoveKernelLinker Deprecated; does nothing. + * @constant kIOCatalogKextdActive Signals the kernel that kextd is running. + * @constant kIOCatalogKextdFinishedLaunching Signals the IOCatalogue that kextd has finished sending it information at startup. + * @constant kIOCatalogResetDrivers Resets the IOCatalogue with a new set of personalities. + * @constant kIOCatalogResetDriversNoMatch Resets the IOCatalogue with a new set of personalities but does not start a matching thread. + */ enum { - kIOCatalogAddDrivers = 1, - kIOCatalogAddDriversNoMatch, - kIOCatalogRemoveDrivers, - kIOCatalogRemoveDriversNoMatch, - kIOCatalogStartMatching, - kIOCatalogRemoveKernelLinker, - kIOCatalogKextdActive, - kIOCatalogKextdFinishedLaunching, - kIOCatalogResetDrivers, - kIOCatalogResetDriversNoMatch + kIOCatalogAddDrivers = 1, + kIOCatalogAddDriversNoMatch, + kIOCatalogRemoveDrivers, + kIOCatalogRemoveDriversNoMatch, + kIOCatalogStartMatching, + kIOCatalogRemoveKernelLinker, + kIOCatalogKextdActive, + kIOCatalogKextdFinishedLaunching, + kIOCatalogResetDrivers, + kIOCatalogResetDriversNoMatch }; // IOCatalogueGetData /*! - @enum IOCatalogueGetData user-client flags - @constant kIOCatalogGetContents Returns a snapshot of the database to the caller. -*/ + * @enum IOCatalogueGetData user-client flags + * @constant kIOCatalogGetContents Returns a snapshot of the database to the caller. + */ enum { - kIOCatalogGetContents = 1, - kIOCatalogGetModuleDemandList = 2, - kIOCatalogGetCacheMissList = 3, - kIOCatalogGetROMMkextList = 4 + kIOCatalogGetContents = 1, + kIOCatalogGetModuleDemandList = 2, + kIOCatalogGetCacheMissList = 3, + kIOCatalogGetROMMkextList = 4 }; // IOCatalogueReset /*! - @enum IOCatalogueReset user-client flag - @constant kIOCatalogResetDefault Removes all entries from IOCatalogue except those used for booting the system. -*/ + * @enum IOCatalogueReset user-client flag + * @constant kIOCatalogResetDefault Removes all entries from IOCatalogue except those used for booting the system. + */ enum { - kIOCatalogResetDefault = 1 + kIOCatalogResetDefault = 1 }; // IOCatalogueTerminate /*! - @enum IOCatalogueTerminate user-client flags. - @constant kIOCatalogModuleUnload Terminates all services which depend on a particular module and unloads the module. - @constant kIOCatalogModuleTerminate Terminates all services which depend on a particular module but does not unload the module. - @constant kIOCatalogServiceTerminate Terminates a particular service by name. -*/ + * @enum IOCatalogueTerminate user-client flags. + * @constant kIOCatalogModuleUnload Terminates all services which depend on a particular module and unloads the module. + * @constant kIOCatalogModuleTerminate Terminates all services which depend on a particular module but does not unload the module. + * @constant kIOCatalogServiceTerminate Terminates a particular service by name. + */ enum { - kIOCatalogModuleUnload = 1, - kIOCatalogModuleTerminate, - kIOCatalogServiceTerminate + kIOCatalogModuleUnload = 1, + kIOCatalogModuleTerminate, + kIOCatalogServiceTerminate }; @@ -129,30 +129,30 @@ extern "C" { extern void iokit_add_reference( io_object_t obj, ipc_kobject_type_t type ); extern ipc_port_t iokit_port_for_object( io_object_t obj, - ipc_kobject_type_t type ); + ipc_kobject_type_t type ); extern kern_return_t iokit_client_died( io_object_t obj, - ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount ); + ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount ); extern kern_return_t iokit_client_memory_for_type( - io_object_t connect, - unsigned int type, - unsigned int * flags, - vm_address_t * address, - vm_size_t * size ); + io_object_t connect, + unsigned int type, + unsigned int * flags, + vm_address_t * address, + vm_size_t * size ); /* * Functions in osfmk:iokit_rpc.c */ extern ipc_port_t iokit_alloc_object_port( io_object_t obj, - ipc_kobject_type_t type ); + ipc_kobject_type_t type ); extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); extern mach_port_name_t iokit_make_send_right( task_t task, - io_object_t obj, ipc_kobject_type_t type ); + io_object_t obj, ipc_kobject_type_t type ); extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ); @@ -176,12 +176,12 @@ extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, extern ppnum_t IOGetLastPageNumber(void); extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, - mach_vm_size_t length, unsigned int mapFlags); + mach_vm_size_t length, unsigned int mapFlags); extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length); extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va, - mach_vm_size_t length, unsigned int options); + mach_vm_size_t length, unsigned int options); extern unsigned int IODefaultCacheBits(addr64_t pa); @@ -192,4 +192,3 @@ extern unsigned int IODefaultCacheBits(addr64_t pa); #endif /* MACH_KERNEL_PRIVATE */ #endif /* ! _IOKIT_IOKITSERVER_H */ - diff --git a/iokit/IOKit/IOLib.h b/iokit/IOKit/IOLib.h index ce3689190..989dc1de1 100644 --- a/iokit/IOKit/IOLib.h +++ b/iokit/IOKit/IOLib.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -61,8 +61,8 @@ __BEGIN_DECLS * min/max macros. */ -#define min(a,b) ((a) < (b) ? (a) : (b)) -#define max(a,b) ((a) > (b) ? (a) : (b)) +#define min(a, b) ((a) < (b) ? (a) : (b)) +#define max(a, b) ((a) > (b) ? (a) : (b)) /* * These are opaque to the user. @@ -75,86 +75,86 @@ typedef void (*IOThreadFunc)(void *argument); */ /*! @function IOMalloc - @abstract Allocates general purpose, wired memory in the kernel map. - @discussion This is a general purpose utility to allocate memory in the kernel. There are no alignment guarantees given on the returned memory, and alignment may vary depending on the kernel configuration. This function may block and so should not be called from interrupt level or while a simple lock is held. - @param size Size of the memory requested. - @result Pointer to the allocated memory, or zero on failure. */ + * @abstract Allocates general purpose, wired memory in the kernel map. + * @discussion This is a general purpose utility to allocate memory in the kernel. There are no alignment guarantees given on the returned memory, and alignment may vary depending on the kernel configuration. This function may block and so should not be called from interrupt level or while a simple lock is held. + * @param size Size of the memory requested. + * @result Pointer to the allocated memory, or zero on failure. */ void * IOMalloc(vm_size_t size) __attribute__((alloc_size(1))); /*! @function IOFree - @abstract Frees memory allocated with IOMalloc. - @discussion This function frees memory allocated with IOMalloc, it may block and so should not be called from interrupt level or while a simple lock is held. - @param address Pointer to the allocated memory. Must be identical to result - @of a prior IOMalloc. - @param size Size of the memory allocated. Must be identical to size of - @the corresponding IOMalloc */ + * @abstract Frees memory allocated with IOMalloc. + * @discussion This function frees memory allocated with IOMalloc, it may block and so should not be called from interrupt level or while a simple lock is held. + * @param address Pointer to the allocated memory. Must be identical to result + * @of a prior IOMalloc. + * @param size Size of the memory allocated. Must be identical to size of + * @the corresponding IOMalloc */ void IOFree(void * address, vm_size_t size); /*! @function IOMallocAligned - @abstract Allocates wired memory in the kernel map, with an alignment restriction. - @discussion This is a utility to allocate memory in the kernel, with an alignment restriction which is specified as a byte count. This function may block and so should not be called from interrupt level or while a simple lock is held. - @param size Size of the memory requested. - @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bit 0-7 zero. - @result Pointer to the allocated memory, or zero on failure. */ + * @abstract Allocates wired memory in the kernel map, with an alignment restriction. + * @discussion This is a utility to allocate memory in the kernel, with an alignment restriction which is specified as a byte count. This function may block and so should not be called from interrupt level or while a simple lock is held. + * @param size Size of the memory requested. + * @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bit 0-7 zero. + * @result Pointer to the allocated memory, or zero on failure. */ void * IOMallocAligned(vm_size_t size, vm_offset_t alignment) __attribute__((alloc_size(1))); /*! @function IOFreeAligned - @abstract Frees memory allocated with IOMallocAligned. - @discussion This function frees memory allocated with IOMallocAligned, it may block and so should not be called from interrupt level or while a simple lock is held. - @param address Pointer to the allocated memory. - @param size Size of the memory allocated. */ + * @abstract Frees memory allocated with IOMallocAligned. + * @discussion This function frees memory allocated with IOMallocAligned, it may block and so should not be called from interrupt level or while a simple lock is held. + * @param address Pointer to the allocated memory. + * @param size Size of the memory allocated. */ void IOFreeAligned(void * address, vm_size_t size); /*! @function IOMallocContiguous - @abstract Deprecated - use IOBufferMemoryDescriptor. Allocates wired memory in the kernel map, with an alignment restriction and physically contiguous. - @discussion This is a utility to allocate memory in the kernel, with an alignment restriction which is specified as a byte count, and will allocate only physically contiguous memory. The request may fail if memory is fragmented, and may cause large amounts of paging activity. This function may block and so should not be called from interrupt level or while a simple lock is held. - @param size Size of the memory requested. - @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. - @param physicalAddress IOMallocContiguous returns the physical address of the allocated memory here, if physicalAddress is a non-zero pointer. The physicalAddress argument is deprecated and should be passed as NULL. To obtain the physical address for a memory buffer, use the IODMACommand class in conjunction with the IOMemoryDescriptor or IOBufferMemoryDescriptor classes. - @result Virtual address of the allocated memory, or zero on failure. */ + * @abstract Deprecated - use IOBufferMemoryDescriptor. Allocates wired memory in the kernel map, with an alignment restriction and physically contiguous. + * @discussion This is a utility to allocate memory in the kernel, with an alignment restriction which is specified as a byte count, and will allocate only physically contiguous memory. The request may fail if memory is fragmented, and may cause large amounts of paging activity. This function may block and so should not be called from interrupt level or while a simple lock is held. + * @param size Size of the memory requested. + * @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. + * @param physicalAddress IOMallocContiguous returns the physical address of the allocated memory here, if physicalAddress is a non-zero pointer. The physicalAddress argument is deprecated and should be passed as NULL. To obtain the physical address for a memory buffer, use the IODMACommand class in conjunction with the IOMemoryDescriptor or IOBufferMemoryDescriptor classes. + * @result Virtual address of the allocated memory, or zero on failure. */ void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, - IOPhysicalAddress * physicalAddress) __attribute__((deprecated)) __attribute__((alloc_size(1))); + IOPhysicalAddress * physicalAddress) __attribute__((deprecated)) __attribute__((alloc_size(1))); /*! @function IOFreeContiguous - @abstract Deprecated - use IOBufferMemoryDescriptor. Frees memory allocated with IOMallocContiguous. - @discussion This function frees memory allocated with IOMallocContiguous, it may block and so should not be called from interrupt level or while a simple lock is held. - @param address Virtual address of the allocated memory. - @param size Size of the memory allocated. */ + * @abstract Deprecated - use IOBufferMemoryDescriptor. Frees memory allocated with IOMallocContiguous. + * @discussion This function frees memory allocated with IOMallocContiguous, it may block and so should not be called from interrupt level or while a simple lock is held. + * @param address Virtual address of the allocated memory. + * @param size Size of the memory allocated. */ void IOFreeContiguous(void * address, vm_size_t size) __attribute__((deprecated)); /*! @function IOMallocPageable - @abstract Allocates pageable memory in the kernel map. - @discussion This is a utility to allocate pageable memory in the kernel. This function may block and so should not be called from interrupt level or while a simple lock is held. - @param size Size of the memory requested. - @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. - @result Pointer to the allocated memory, or zero on failure. */ + * @abstract Allocates pageable memory in the kernel map. + * @discussion This is a utility to allocate pageable memory in the kernel. This function may block and so should not be called from interrupt level or while a simple lock is held. + * @param size Size of the memory requested. + * @param alignment Byte count of the alignment for the memory. For example, pass 256 to get memory allocated at an address with bits 0-7 zero. + * @result Pointer to the allocated memory, or zero on failure. */ void * IOMallocPageable(vm_size_t size, vm_size_t alignment) __attribute__((alloc_size(1))); /*! @function IOFreePageable - @abstract Frees memory allocated with IOMallocPageable. - @discussion This function frees memory allocated with IOMallocPageable, it may block and so should not be called from interrupt level or while a simple lock is held. - @param address Virtual address of the allocated memory. - @param size Size of the memory allocated. */ + * @abstract Frees memory allocated with IOMallocPageable. + * @discussion This function frees memory allocated with IOMallocPageable, it may block and so should not be called from interrupt level or while a simple lock is held. + * @param address Virtual address of the allocated memory. + * @param size Size of the memory allocated. */ void IOFreePageable(void * address, vm_size_t size); /* * Typed memory allocation macros. Both may block. */ -#define IONew(type,number) \ +#define IONew(type, number) \ ( ((number) != 0 && ((vm_size_t) ((sizeof(type) * (number) / (number))) != sizeof(type)) /* overflow check 20847256 */ \ ? 0 \ : ((type*)IOMalloc(sizeof(type) * (number)))) ) -#define IODelete(ptr,type,number) IOFree( (ptr) , sizeof(type) * (number) ) +#define IODelete(ptr, type, number) IOFree( (ptr) , sizeof(type) * (number) ) ///////////////////////////////////////////////////////////////////////////// // @@ -165,181 +165,181 @@ void IOFreePageable(void * address, vm_size_t size); ///////////////////////////////////////////////////////////////////////////// /*! @function IOMappedRead8 - @abstract Read one byte from the desired "Physical" IOSpace address. - @discussion Read one byte from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @result Data contained at that location */ + * @abstract Read one byte from the desired "Physical" IOSpace address. + * @discussion Read one byte from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @result Data contained at that location */ UInt8 IOMappedRead8(IOPhysicalAddress address); /*! @function IOMappedRead16 - @abstract Read two bytes from the desired "Physical" IOSpace address. - @discussion Read two bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @result Data contained at that location */ + * @abstract Read two bytes from the desired "Physical" IOSpace address. + * @discussion Read two bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @result Data contained at that location */ UInt16 IOMappedRead16(IOPhysicalAddress address); /*! @function IOMappedRead32 - @abstract Read four bytes from the desired "Physical" IOSpace address. - @discussion Read four bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @result Data contained at that location */ + * @abstract Read four bytes from the desired "Physical" IOSpace address. + * @discussion Read four bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @result Data contained at that location */ UInt32 IOMappedRead32(IOPhysicalAddress address); /*! @function IOMappedRead64 - @abstract Read eight bytes from the desired "Physical" IOSpace address. - @discussion Read eight bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @result Data contained at that location */ + * @abstract Read eight bytes from the desired "Physical" IOSpace address. + * @discussion Read eight bytes from the desired "Physical" IOSpace address. This function allows the developer to read an address returned from any memory descriptor's getPhysicalSegment routine. It can then be used by segmenting a physical page slightly to tag the physical page with its kernel space virtual address. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @result Data contained at that location */ UInt64 IOMappedRead64(IOPhysicalAddress address); /*! @function IOMappedWrite8 - @abstract Write one byte to the desired "Physical" IOSpace address. - @discussion Write one byte to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @param value Data to be writen to the desired location */ + * @abstract Write one byte to the desired "Physical" IOSpace address. + * @discussion Write one byte to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @param value Data to be writen to the desired location */ void IOMappedWrite8(IOPhysicalAddress address, UInt8 value); /*! @function IOMappedWrite16 - @abstract Write two bytes to the desired "Physical" IOSpace address. - @discussion Write two bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @param value Data to be writen to the desired location */ + * @abstract Write two bytes to the desired "Physical" IOSpace address. + * @discussion Write two bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @param value Data to be writen to the desired location */ void IOMappedWrite16(IOPhysicalAddress address, UInt16 value); /*! @function IOMappedWrite32 - @abstract Write four bytes to the desired "Physical" IOSpace address. - @discussion Write four bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @param value Data to be writen to the desired location */ + * @abstract Write four bytes to the desired "Physical" IOSpace address. + * @discussion Write four bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @param value Data to be writen to the desired location */ void IOMappedWrite32(IOPhysicalAddress address, UInt32 value); /*! @function IOMappedWrite64 - @abstract Write eight bytes to the desired "Physical" IOSpace address. - @discussion Write eight bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. - @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. - @param value Data to be writen to the desired location */ + * @abstract Write eight bytes to the desired "Physical" IOSpace address. + * @discussion Write eight bytes to the desired "Physical" IOSpace address. This function allows the developer to write to an address returned from any memory descriptor's getPhysicalSegment routine. + * @param address The desired address, as returned by IOMemoryDescriptor::getPhysicalSegment. + * @param value Data to be writen to the desired location */ void IOMappedWrite64(IOPhysicalAddress address, UInt64 value); /* This function is deprecated. Cache settings may be set for allocated memory with the IOBufferMemoryDescriptor api. */ IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, - IOByteCount length, IOOptionBits cacheMode ) __attribute__((deprecated)); + IOByteCount length, IOOptionBits cacheMode ) __attribute__((deprecated)); /*! @function IOFlushProcessorCache - @abstract Flushes the processor cache for mapped memory. - @discussion This function flushes the processor cache of an already mapped memory range. Note in most cases it is preferable to use IOMemoryDescriptor::prepare and complete to manage cache coherency since they are aware of the architecture's requirements. Flushing the processor cache is not required for coherency in most situations. - @param task Task the memory is mapped into. - @param address Virtual address of the memory. - @param length Length of the range to set. - @result An IOReturn code. */ + * @abstract Flushes the processor cache for mapped memory. + * @discussion This function flushes the processor cache of an already mapped memory range. Note in most cases it is preferable to use IOMemoryDescriptor::prepare and complete to manage cache coherency since they are aware of the architecture's requirements. Flushing the processor cache is not required for coherency in most situations. + * @param task Task the memory is mapped into. + * @param address Virtual address of the memory. + * @param length Length of the range to set. + * @result An IOReturn code. */ IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, - IOByteCount length ); + IOByteCount length ); /*! @function IOThreadSelf - @abstract Returns the osfmk identifier for the currently running thread. - @discussion This function returns the current thread (a pointer to the currently active osfmk thread_shuttle). */ + * @abstract Returns the osfmk identifier for the currently running thread. + * @discussion This function returns the current thread (a pointer to the currently active osfmk thread_shuttle). */ #define IOThreadSelf() (current_thread()) /*! @function IOCreateThread - @abstract Deprecated function - use kernel_thread_start(). Create a kernel thread. - @discussion This function creates a kernel thread, and passes the caller supplied argument to the new thread. Warning: the value returned by this function is not 100% reliable. There is a race condition where it is possible that the new thread has already terminated before this call returns. Under that circumstance the IOThread returned will be invalid. In general there is little that can be done with this value except compare it against 0. The thread itself can call IOThreadSelf() 100% reliably and that is the prefered mechanism to manipulate the IOThreads state. - @param function A C-function pointer where the thread will begin execution. - @param argument Caller specified data to be passed to the new thread. - @result An IOThread identifier for the new thread, equivalent to an osfmk thread_t. */ + * @abstract Deprecated function - use kernel_thread_start(). Create a kernel thread. + * @discussion This function creates a kernel thread, and passes the caller supplied argument to the new thread. Warning: the value returned by this function is not 100% reliable. There is a race condition where it is possible that the new thread has already terminated before this call returns. Under that circumstance the IOThread returned will be invalid. In general there is little that can be done with this value except compare it against 0. The thread itself can call IOThreadSelf() 100% reliably and that is the prefered mechanism to manipulate the IOThreads state. + * @param function A C-function pointer where the thread will begin execution. + * @param argument Caller specified data to be passed to the new thread. + * @result An IOThread identifier for the new thread, equivalent to an osfmk thread_t. */ IOThread IOCreateThread(IOThreadFunc function, void *argument) __attribute__((deprecated)); /*! @function IOExitThread - @abstract Deprecated function - use thread_terminate(). Terminate execution of current thread. - @discussion This function destroys the currently running thread, and does not return. */ + * @abstract Deprecated function - use thread_terminate(). Terminate execution of current thread. + * @discussion This function destroys the currently running thread, and does not return. */ void IOExitThread(void) __attribute__((deprecated)); /*! @function IOSleep - @abstract Sleep the calling thread for a number of milliseconds. - @discussion This function blocks the calling thread for at least the number of specified milliseconds, giving time to other processes. - @param milliseconds The integer number of milliseconds to wait. */ + * @abstract Sleep the calling thread for a number of milliseconds. + * @discussion This function blocks the calling thread for at least the number of specified milliseconds, giving time to other processes. + * @param milliseconds The integer number of milliseconds to wait. */ void IOSleep(unsigned milliseconds); /*! @function IOSleepWithLeeway - @abstract Sleep the calling thread for a number of milliseconds, with a specified leeway the kernel may use for timer coalescing. - @discussion This function blocks the calling thread for at least the number of specified milliseconds, giving time to other processes. The kernel may also coalesce any timers involved in the delay, using the leeway given as a guideline. - @param intervalMilliseconds The integer number of milliseconds to wait. - @param leewayMilliseconds The integer number of milliseconds to use as a timer coalescing guideline. */ + * @abstract Sleep the calling thread for a number of milliseconds, with a specified leeway the kernel may use for timer coalescing. + * @discussion This function blocks the calling thread for at least the number of specified milliseconds, giving time to other processes. The kernel may also coalesce any timers involved in the delay, using the leeway given as a guideline. + * @param intervalMilliseconds The integer number of milliseconds to wait. + * @param leewayMilliseconds The integer number of milliseconds to use as a timer coalescing guideline. */ void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds); /*! @function IODelay - @abstract Spin delay for a number of microseconds. - @discussion This function spins to delay for at least the number of specified microseconds. Since the CPU is busy spinning no time is made available to other processes; this method of delay should be used only for short periods. Also, the AbsoluteTime based APIs of kern/clock.h provide finer grained and lower cost delays. - @param microseconds The integer number of microseconds to spin wait. */ + * @abstract Spin delay for a number of microseconds. + * @discussion This function spins to delay for at least the number of specified microseconds. Since the CPU is busy spinning no time is made available to other processes; this method of delay should be used only for short periods. Also, the AbsoluteTime based APIs of kern/clock.h provide finer grained and lower cost delays. + * @param microseconds The integer number of microseconds to spin wait. */ void IODelay(unsigned microseconds); /*! @function IOPause - @abstract Spin delay for a number of nanoseconds. - @discussion This function spins to delay for at least the number of specified nanoseconds. Since the CPU is busy spinning no time is made available to other processes; this method of delay should be used only for short periods. - @param nanoseconds The integer number of nanoseconds to spin wait. */ + * @abstract Spin delay for a number of nanoseconds. + * @discussion This function spins to delay for at least the number of specified nanoseconds. Since the CPU is busy spinning no time is made available to other processes; this method of delay should be used only for short periods. + * @param nanoseconds The integer number of nanoseconds to spin wait. */ void IOPause(unsigned nanoseconds); /*! @function IOLog - @abstract Log a message to console in text mode, and /var/log/system.log. - @discussion This function allows a driver to log diagnostic information to the screen during verbose boots, and to a log file found at /var/log/system.log. IOLog should not be called from interrupt context. - @param format A printf() style format string (see printf(3) documentation). - */ + * @abstract Log a message to console in text mode, and /var/log/system.log. + * @discussion This function allows a driver to log diagnostic information to the screen during verbose boots, and to a log file found at /var/log/system.log. IOLog should not be called from interrupt context. + * @param format A printf() style format string (see printf(3) documentation). + */ void IOLog(const char *format, ...) __attribute__((format(printf, 1, 2))); /*! @function IOLogv - @abstract Log a message to console in text mode, and /var/log/system.log. - @discussion This function allows a driver to log diagnostic information to the screen during verbose boots, and to a log file found at /var/log/system.log. IOLogv should not be called from interrupt context. - @param format A printf() style format string (see printf(3) documentation). - @param ap stdarg(3) style variable arguments. */ + * @abstract Log a message to console in text mode, and /var/log/system.log. + * @discussion This function allows a driver to log diagnostic information to the screen during verbose boots, and to a log file found at /var/log/system.log. IOLogv should not be called from interrupt context. + * @param format A printf() style format string (see printf(3) documentation). + * @param ap stdarg(3) style variable arguments. */ void IOLogv(const char *format, va_list ap) __attribute__((format(printf, 1, 0))); #ifndef _FN_KPRINTF -#define _FN_KPRINTF +#define _FN_KPRINTF void kprintf(const char *format, ...); #endif #ifndef _FN_KPRINTF_DECLARED -#define _FN_KPRINTF_DECLARED +#define _FN_KPRINTF_DECLARED #endif /* * Convert a integer constant (typically a #define or enum) to a string * via an array of IONamedValue. */ -const char *IOFindNameForValue(int value, - const IONamedValue *namedValueArray); +const char *IOFindNameForValue(int value, + const IONamedValue *namedValueArray); /* * Convert a string to an int via an array of IONamedValue. Returns * kIOReturnSuccess of string found, else returns kIOReturnBadArgument. */ -IOReturn IOFindValueForName(const char *string, - const IONamedValue *regValueArray, - int *value); /* RETURNED */ +IOReturn IOFindValueForName(const char *string, + const IONamedValue *regValueArray, + int *value); /* RETURNED */ /*! @function Debugger - @abstract Enter the kernel debugger. - @discussion This function freezes the kernel and enters the builtin debugger. It may not be possible to exit the debugger without a second machine. - @param reason A C-string to describe why the debugger is being entered. */ - + * @abstract Enter the kernel debugger. + * @discussion This function freezes the kernel and enters the builtin debugger. It may not be possible to exit the debugger without a second machine. + * @param reason A C-string to describe why the debugger is being entered. */ + void Debugger(const char * reason); #if __LP64__ #define IOPanic(reason) panic("%s", reason) @@ -375,25 +375,27 @@ unsigned int IOAlignmentToSize(IOAlignment align); * Multiply and divide routines for IOFixed datatype. */ -static inline IOFixed IOFixedMultiply(IOFixed a, IOFixed b) +static inline IOFixed +IOFixedMultiply(IOFixed a, IOFixed b) { - return (IOFixed)((((SInt64) a) * ((SInt64) b)) >> 16); + return (IOFixed)((((SInt64) a) * ((SInt64) b)) >> 16); } -static inline IOFixed IOFixedDivide(IOFixed a, IOFixed b) +static inline IOFixed +IOFixedDivide(IOFixed a, IOFixed b) { - return (IOFixed)((((SInt64) a) << 16) / ((SInt64) b)); + return (IOFixed)((((SInt64) a) << 16) / ((SInt64) b)); } /* * IORound and IOTrunc convenience functions, in the spirit * of vm's round_page() and trunc_page(). */ -#define IORound(value,multiple) \ - ((((value) + (multiple) - 1) / (multiple)) * (multiple)) +#define IORound(value, multiple) \ + ((((value) + (multiple) - 1) / (multiple)) * (multiple)) -#define IOTrunc(value,multiple) \ - (((value) / (multiple)) * (multiple)); +#define IOTrunc(value, multiple) \ + (((value) / (multiple)) * (multiple)); #if defined(__APPLE_API_OBSOLETE) @@ -401,7 +403,7 @@ static inline IOFixed IOFixedDivide(IOFixed a, IOFixed b) /* The following API is deprecated */ /* The API exported by kern/clock.h - should be used for high resolution timing. */ + * should be used for high resolution timing. */ void IOGetTime( mach_timespec_t * clock_time) __attribute__((deprecated)); diff --git a/iokit/IOKit/IOLocks.h b/iokit/IOKit/IOLocks.h index 75d327b7f..2c89a177a 100644 --- a/iokit/IOKit/IOLocks.h +++ b/iokit/IOKit/IOLocks.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -52,101 +52,101 @@ extern "C" { #include /*! @var IOLockGroup - Global lock group used by all IOKit locks. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. -*/ -extern lck_grp_t *IOLockGroup; + * Global lock group used by all IOKit locks. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. + */ +extern lck_grp_t *IOLockGroup; #if defined(XNU_KERNEL_PRIVATE) -#define IOLOCKS_INLINE 1 +#define IOLOCKS_INLINE 1 #endif /* * Mutex lock operations */ -#ifdef IOLOCKS_INLINE -typedef lck_mtx_t IOLock; +#ifdef IOLOCKS_INLINE +typedef lck_mtx_t IOLock; #else -typedef struct _IOLock IOLock; -#endif /* IOLOCKS_INLINE */ +typedef struct _IOLock IOLock; +#endif /* IOLOCKS_INLINE */ /*! @function IOLockAlloc - @abstract Allocates and initializes a mutex. - @discussion Allocates a mutex in general purpose memory, and initializes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held. IOLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. - @result Pointer to the allocated lock, or zero on failure. */ + * @abstract Allocates and initializes a mutex. + * @discussion Allocates a mutex in general purpose memory, and initializes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held. IOLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. + * @result Pointer to the allocated lock, or zero on failure. */ IOLock * IOLockAlloc( void ); /*! @function IOLockFree - @abstract Frees a mutex. - @discussion Frees a lock allocated with IOLockAlloc. Mutex should be unlocked with no waiters. - @param lock Pointer to the allocated lock. */ + * @abstract Frees a mutex. + * @discussion Frees a lock allocated with IOLockAlloc. Mutex should be unlocked with no waiters. + * @param lock Pointer to the allocated lock. */ -void IOLockFree( IOLock * lock); +void IOLockFree( IOLock * lock); /*! @function IOLockGetMachLock - @abstract Accessor to a Mach mutex. - @discussion Accessor to the Mach mutex. - @param lock Pointer to the allocated lock. */ + * @abstract Accessor to a Mach mutex. + * @discussion Accessor to the Mach mutex. + * @param lock Pointer to the allocated lock. */ lck_mtx_t * IOLockGetMachLock( IOLock * lock); /*! @function IOLockLock - @abstract Lock a mutex. - @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock. - @param lock Pointer to the allocated lock. */ + * @abstract Lock a mutex. + * @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock. + * @param lock Pointer to the allocated lock. */ -#ifdef IOLOCKS_INLINE -#define IOLockLock(l) lck_mtx_lock(l) +#ifdef IOLOCKS_INLINE +#define IOLockLock(l) lck_mtx_lock(l) #else -void IOLockLock( IOLock * lock); -#endif /* !IOLOCKS_INLINE */ +void IOLockLock( IOLock * lock); +#endif /* !IOLOCKS_INLINE */ /*! @function IOLockTryLock - @abstract Attempt to lock a mutex. - @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false. - @param lock Pointer to the allocated lock. - @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */ + * @abstract Attempt to lock a mutex. + * @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false. + * @param lock Pointer to the allocated lock. + * @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */ -#ifdef IOLOCKS_INLINE -#define IOLockTryLock(l) lck_mtx_try_lock(l) +#ifdef IOLOCKS_INLINE +#define IOLockTryLock(l) lck_mtx_try_lock(l) #else boolean_t IOLockTryLock( IOLock * lock); -#endif /* !IOLOCKS_INLINE */ +#endif /* !IOLOCKS_INLINE */ /*! @function IOLockUnlock - @abstract Unlock a mutex. -@discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held. - @param lock Pointer to the allocated lock. */ + * @abstract Unlock a mutex. + * @discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held. + * @param lock Pointer to the allocated lock. */ -#ifdef IOLOCKS_INLINE -#define IOLockUnlock(l) lck_mtx_unlock(l) +#ifdef IOLOCKS_INLINE +#define IOLockUnlock(l) lck_mtx_unlock(l) #else -void IOLockUnlock( IOLock * lock); -#endif /* !IOLOCKS_INLINE */ +void IOLockUnlock( IOLock * lock); +#endif /* !IOLOCKS_INLINE */ /*! @function IOLockSleep - @abstract Sleep with mutex unlock and relock -@discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held. - @param lock Pointer to the locked lock. - @param event The event to sleep on. Must be non-NULL. - @param interType How can the sleep be interrupted. - @result The wait-result value indicating how the thread was awakened.*/ -int IOLockSleep( IOLock * lock, void *event, UInt32 interType) __DARWIN14_ALIAS(IOLockSleep); + * @abstract Sleep with mutex unlock and relock + * @discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held. + * @param lock Pointer to the locked lock. + * @param event The event to sleep on. Must be non-NULL. + * @param interType How can the sleep be interrupted. + * @result The wait-result value indicating how the thread was awakened.*/ +int IOLockSleep( IOLock * lock, void *event, UInt32 interType) __DARWIN14_ALIAS(IOLockSleep); -int IOLockSleepDeadline( IOLock * lock, void *event, - AbsoluteTime deadline, UInt32 interType) __DARWIN14_ALIAS(IOLockSleepDeadline); +int IOLockSleepDeadline( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) __DARWIN14_ALIAS(IOLockSleepDeadline); -void IOLockWakeup(IOLock * lock, void *event, bool oneThread) __DARWIN14_ALIAS(IOLockWakeup); +void IOLockWakeup(IOLock * lock, void *event, bool oneThread) __DARWIN14_ALIAS(IOLockWakeup); #ifdef XNU_KERNEL_PRIVATE /*! @enum IOLockAssertState * @abstract Used with IOLockAssert to assert the state of a lock. */ typedef enum { - kIOLockAssertOwned = LCK_ASSERT_OWNED, - kIOLockAssertNotOwned = LCK_ASSERT_NOTOWNED + kIOLockAssertOwned = LCK_ASSERT_OWNED, + kIOLockAssertNotOwned = LCK_ASSERT_NOTOWNED } IOLockAssertState; #ifdef IOLOCKS_INLINE @@ -158,7 +158,7 @@ typedef enum { * Panics the kernel if the lock is not owned if called with kIOLockAssertOwned, * and vice-versa. */ -void IOLockAssert(IOLock * lock, IOLockAssertState type); +void IOLockAssert(IOLock * lock, IOLockAssertState type); #endif /* !IOLOCKS_INLINE */ #endif /* !XNU_KERNEL_PRIVATE */ @@ -167,16 +167,28 @@ void IOLockAssert(IOLock * lock, IOLockAssertState type); /* The following API is deprecated */ typedef enum { - kIOLockStateUnlocked = 0, - kIOLockStateLocked = 1 + kIOLockStateUnlocked = 0, + kIOLockStateLocked = 1 } IOLockState; -void IOLockInitWithState( IOLock * lock, IOLockState state); -#define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked); +void IOLockInitWithState( IOLock * lock, IOLockState state); +#define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked); -static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock); } -static __inline__ boolean_t IOTryLock( IOLock * lock) { return(IOLockTryLock(lock)); } -static __inline__ void IOUnlock( IOLock * lock) { IOLockUnlock(lock); } +static __inline__ void +IOTakeLock( IOLock * lock) +{ + IOLockLock(lock); +} +static __inline__ boolean_t +IOTryLock( IOLock * lock) +{ + return IOLockTryLock(lock); +} +static __inline__ void +IOUnlock( IOLock * lock) +{ + IOLockUnlock(lock); +} #endif /* __APPLE_API_OBSOLETE */ @@ -187,136 +199,136 @@ static __inline__ void IOUnlock( IOLock * lock) { IOLockUnlock(lock); } typedef struct _IORecursiveLock IORecursiveLock; /*! @function IORecursiveLockAlloc - @abstract Allocates and initializes an recursive lock. - @discussion Allocates a recursive lock in general purpose memory, and initializes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks. IORecursiveLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. - @result Pointer to the allocated lock, or zero on failure. */ + * @abstract Allocates and initializes an recursive lock. + * @discussion Allocates a recursive lock in general purpose memory, and initializes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks. IORecursiveLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. + * @result Pointer to the allocated lock, or zero on failure. */ IORecursiveLock * IORecursiveLockAlloc( void ); /*! @function IORecursiveLockFree - @abstract Frees a recursive lock. - @discussion Frees a lock allocated with IORecursiveLockAlloc. Lock should be unlocked with no waiters. - @param lock Pointer to the allocated lock. */ + * @abstract Frees a recursive lock. + * @discussion Frees a lock allocated with IORecursiveLockAlloc. Lock should be unlocked with no waiters. + * @param lock Pointer to the allocated lock. */ -void IORecursiveLockFree( IORecursiveLock * lock); +void IORecursiveLockFree( IORecursiveLock * lock); /*! @function IORecursiveLockGetMachLock - @abstract Accessor to a Mach mutex. - @discussion Accessor to the Mach mutex. - @param lock Pointer to the allocated lock. */ + * @abstract Accessor to a Mach mutex. + * @discussion Accessor to the Mach mutex. + * @param lock Pointer to the allocated lock. */ lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock); /*! @function IORecursiveLockLock - @abstract Lock a recursive lock. - @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock. - @param lock Pointer to the allocated lock. */ + * @abstract Lock a recursive lock. + * @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock. + * @param lock Pointer to the allocated lock. */ -void IORecursiveLockLock( IORecursiveLock * lock); +void IORecursiveLockLock( IORecursiveLock * lock); /*! @function IORecursiveLockTryLock - @abstract Attempt to lock a recursive lock. - @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock. - @param lock Pointer to the allocated lock. - @result True if the lock is now locked by the caller, otherwise false. */ + * @abstract Attempt to lock a recursive lock. + * @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock. + * @param lock Pointer to the allocated lock. + * @result True if the lock is now locked by the caller, otherwise false. */ -boolean_t IORecursiveLockTryLock( IORecursiveLock * lock); +boolean_t IORecursiveLockTryLock( IORecursiveLock * lock); /*! @function IORecursiveLockUnlock - @abstract Unlock a recursive lock. -@discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held. - @param lock Pointer to the allocated lock. */ + * @abstract Unlock a recursive lock. + * @discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held. + * @param lock Pointer to the allocated lock. */ -void IORecursiveLockUnlock( IORecursiveLock * lock); +void IORecursiveLockUnlock( IORecursiveLock * lock); /*! @function IORecursiveLockHaveLock - @abstract Check if a recursive lock is held by the calling thread. - @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned. - @param lock Pointer to the allocated lock. - @result True if the calling thread holds the lock otherwise false. */ + * @abstract Check if a recursive lock is held by the calling thread. + * @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned. + * @param lock Pointer to the allocated lock. + * @result True if the calling thread holds the lock otherwise false. */ -boolean_t IORecursiveLockHaveLock( const IORecursiveLock * lock); +boolean_t IORecursiveLockHaveLock( const IORecursiveLock * lock); -extern int IORecursiveLockSleep( IORecursiveLock *_lock, - void *event, UInt32 interType); -extern int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, - AbsoluteTime deadline, UInt32 interType); -extern void IORecursiveLockWakeup( IORecursiveLock *_lock, - void *event, bool oneThread); +extern int IORecursiveLockSleep( IORecursiveLock *_lock, + void *event, UInt32 interType); +extern int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, + AbsoluteTime deadline, UInt32 interType); +extern void IORecursiveLockWakeup( IORecursiveLock *_lock, + void *event, bool oneThread); /* * Complex (read/write) lock operations */ -#ifdef IOLOCKS_INLINE -typedef lck_rw_t IORWLock; +#ifdef IOLOCKS_INLINE +typedef lck_rw_t IORWLock; #else -typedef struct _IORWLock IORWLock; -#endif /* IOLOCKS_INLINE */ +typedef struct _IORWLock IORWLock; +#endif /* IOLOCKS_INLINE */ /*! @function IORWLockAlloc - @abstract Allocates and initializes a read/write lock. - @discussion Allocates and initializes a read/write lock in general purpose memory. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held. IORWLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. - @result Pointer to the allocated lock, or zero on failure. */ + * @abstract Allocates and initializes a read/write lock. + * @discussion Allocates and initializes a read/write lock in general purpose memory. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held. IORWLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. + * @result Pointer to the allocated lock, or zero on failure. */ IORWLock * IORWLockAlloc( void ); /*! @function IORWLockFree - @abstract Frees a read/write lock. - @discussion Frees a lock allocated with IORWLockAlloc. Lock should be unlocked with no waiters. - @param lock Pointer to the allocated lock. */ + * @abstract Frees a read/write lock. + * @discussion Frees a lock allocated with IORWLockAlloc. Lock should be unlocked with no waiters. + * @param lock Pointer to the allocated lock. */ -void IORWLockFree( IORWLock * lock); +void IORWLockFree( IORWLock * lock); /*! @function IORWLockGetMachLock - @abstract Accessor to a Mach read/write lock. - @discussion Accessor to the Mach read/write lock. - @param lock Pointer to the allocated lock. */ + * @abstract Accessor to a Mach read/write lock. + * @discussion Accessor to the Mach read/write lock. + * @param lock Pointer to the allocated lock. */ lck_rw_t * IORWLockGetMachLock( IORWLock * lock); /*! @function IORWLockRead - @abstract Lock a read/write lock for read. -@discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. - @param lock Pointer to the allocated lock. */ + * @abstract Lock a read/write lock for read. + * @discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. + * @param lock Pointer to the allocated lock. */ -#ifdef IOLOCKS_INLINE -#define IORWLockRead(l) lck_rw_lock_shared(l) +#ifdef IOLOCKS_INLINE +#define IORWLockRead(l) lck_rw_lock_shared(l) #else -void IORWLockRead(IORWLock * lock); -#endif /* !IOLOCKS_INLINE */ +void IORWLockRead(IORWLock * lock); +#endif /* !IOLOCKS_INLINE */ /*! @function IORWLockWrite - @abstract Lock a read/write lock for write. - @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. - @param lock Pointer to the allocated lock. */ + * @abstract Lock a read/write lock for write. + * @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock. + * @param lock Pointer to the allocated lock. */ -#ifdef IOLOCKS_INLINE -#define IORWLockWrite(l) lck_rw_lock_exclusive(l) +#ifdef IOLOCKS_INLINE +#define IORWLockWrite(l) lck_rw_lock_exclusive(l) #else -void IORWLockWrite( IORWLock * lock); -#endif /* !IOLOCKS_INLINE */ +void IORWLockWrite( IORWLock * lock); +#endif /* !IOLOCKS_INLINE */ /*! @function IORWLockUnlock - @abstract Unlock a read/write lock. - @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held. - @param lock Pointer to the allocated lock. */ + * @abstract Unlock a read/write lock. + * @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held. + * @param lock Pointer to the allocated lock. */ -#ifdef IOLOCKS_INLINE -#define IORWLockUnlock(l) lck_rw_done(l) +#ifdef IOLOCKS_INLINE +#define IORWLockUnlock(l) lck_rw_done(l) #else -void IORWLockUnlock( IORWLock * lock); -#endif /* !IOLOCKS_INLINE */ +void IORWLockUnlock( IORWLock * lock); +#endif /* !IOLOCKS_INLINE */ #ifdef XNU_KERNEL_PRIVATE /*! @enum IORWLockAssertState * @abstract Used with IORWLockAssert to assert the state of a lock. */ typedef enum { - kIORWLockAssertRead = LCK_RW_ASSERT_SHARED, - kIORWLockAssertWrite = LCK_RW_ASSERT_EXCLUSIVE, - kIORWLockAssertHeld = LCK_RW_ASSERT_HELD, - kIORWLockAssertNotHeld = LCK_RW_ASSERT_NOTHELD + kIORWLockAssertRead = LCK_RW_ASSERT_SHARED, + kIORWLockAssertWrite = LCK_RW_ASSERT_EXCLUSIVE, + kIORWLockAssertHeld = LCK_RW_ASSERT_HELD, + kIORWLockAssertNotHeld = LCK_RW_ASSERT_NOTHELD } IORWLockAssertState; #ifdef IOLOCKS_INLINE @@ -329,7 +341,7 @@ typedef enum { * If the specified lock is not in the state specified by the type argument, * then the kernel will panic. */ -void IORWLockAssert(IORWLock * lock, IORWLockAssertState type); +void IORWLockAssert(IORWLock * lock, IORWLockAssertState type); #endif /* !IOLOCKS_INLINE */ #endif /* !XNU_KERNEL_PRIVATE */ @@ -337,9 +349,21 @@ void IORWLockAssert(IORWLock * lock, IORWLockAssertState type); /* The following API is deprecated */ -static __inline__ void IOReadLock( IORWLock * lock) { IORWLockRead(lock); } -static __inline__ void IOWriteLock( IORWLock * lock) { IORWLockWrite(lock); } -static __inline__ void IORWUnlock( IORWLock * lock) { IORWLockUnlock(lock); } +static __inline__ void +IOReadLock( IORWLock * lock) +{ + IORWLockRead(lock); +} +static __inline__ void +IOWriteLock( IORWLock * lock) +{ + IORWLockWrite(lock); +} +static __inline__ void +IORWUnlock( IORWLock * lock) +{ + IORWLockUnlock(lock); +} #endif /* __APPLE_API_OBSOLETE */ @@ -348,82 +372,82 @@ static __inline__ void IORWUnlock( IORWLock * lock) { IORWLockUnlock(lock); } * Simple locks. Cannot block while holding a simple lock. */ -#ifdef IOLOCKS_INLINE -typedef lck_spin_t IOSimpleLock; +#ifdef IOLOCKS_INLINE +typedef lck_spin_t IOSimpleLock; #else -typedef struct _IOSimpleLock IOSimpleLock; -#endif /* IOLOCKS_INLINE */ +typedef struct _IOSimpleLock IOSimpleLock; +#endif /* IOLOCKS_INLINE */ /*! @function IOSimpleLockAlloc - @abstract Allocates and initializes a spin lock. - @discussion Allocates and initializes a spin lock in general purpose memory. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held. IOSimpleLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. - @result Pointer to the allocated lock, or zero on failure. */ + * @abstract Allocates and initializes a spin lock. + * @discussion Allocates and initializes a spin lock in general purpose memory. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held. IOSimpleLocks use the global IOKit lock group, IOLockGroup. To simplify kext debugging and lock-heat analysis, consider using lck_* locks with a per-driver lock group, as defined in kern/locks.h. + * @result Pointer to the allocated lock, or zero on failure. */ IOSimpleLock * IOSimpleLockAlloc( void ); /*! @function IOSimpleLockFree - @abstract Frees a spin lock. - @discussion Frees a lock allocated with IOSimpleLockAlloc. - @param lock Pointer to the lock. */ + * @abstract Frees a spin lock. + * @discussion Frees a lock allocated with IOSimpleLockAlloc. + * @param lock Pointer to the lock. */ void IOSimpleLockFree( IOSimpleLock * lock ); /*! @function IOSimpleLockGetMachLock - @abstract Accessor to a Mach spin lock. - @discussion Accessor to the Mach spin lock. - @param lock Pointer to the allocated lock. */ + * @abstract Accessor to a Mach spin lock. + * @discussion Accessor to the Mach spin lock. + * @param lock Pointer to the allocated lock. */ lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock); /*! @function IOSimpleLockInit - @abstract Initialize a spin lock. - @discussion Initialize an embedded spin lock, to the unlocked state. - @param lock Pointer to the lock. */ + * @abstract Initialize a spin lock. + * @discussion Initialize an embedded spin lock, to the unlocked state. + * @param lock Pointer to the lock. */ void IOSimpleLockInit( IOSimpleLock * lock ); /*! @function IOSimpleLockLock - @abstract Lock a spin lock. -@discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock. - @param lock Pointer to the lock. */ + * @abstract Lock a spin lock. + * @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock. + * @param lock Pointer to the lock. */ -#ifdef IOLOCKS_INLINE -#define IOSimpleLockLock(l) lck_spin_lock(l) +#ifdef IOLOCKS_INLINE +#define IOSimpleLockLock(l) lck_spin_lock(l) #else void IOSimpleLockLock( IOSimpleLock * lock ); -#endif /* !IOLOCKS_INLINE */ +#endif /* !IOLOCKS_INLINE */ /*! @function IOSimpleLockTryLock - @abstract Attempt to lock a spin lock. -@discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock. - @param lock Pointer to the lock. - @result True if the lock was unlocked and is now locked by the caller, otherwise false. */ + * @abstract Attempt to lock a spin lock. + * @discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock. + * @param lock Pointer to the lock. + * @result True if the lock was unlocked and is now locked by the caller, otherwise false. */ -#ifdef IOLOCKS_INLINE -#define IOSimpleLockTryLock(l) lck_spin_try_lock(l) +#ifdef IOLOCKS_INLINE +#define IOSimpleLockTryLock(l) lck_spin_try_lock(l) #else boolean_t IOSimpleLockTryLock( IOSimpleLock * lock ); -#endif /* !IOLOCKS_INLINE */ +#endif /* !IOLOCKS_INLINE */ /*! @function IOSimpleLockUnlock - @abstract Unlock a spin lock. - @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock. - @param lock Pointer to the lock. */ + * @abstract Unlock a spin lock. + * @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock. + * @param lock Pointer to the lock. */ -#ifdef IOLOCKS_INLINE -#define IOSimpleLockUnlock(l) lck_spin_unlock(l) +#ifdef IOLOCKS_INLINE +#define IOSimpleLockUnlock(l) lck_spin_unlock(l) #else void IOSimpleLockUnlock( IOSimpleLock * lock ); -#endif /* !IOLOCKS_INLINE */ +#endif /* !IOLOCKS_INLINE */ #ifdef XNU_KERNEL_PRIVATE /*! @enum IOSimpleLockAssertState * @abstract Used with IOSimpleLockAssert to assert the state of a lock. */ typedef enum { - kIOSimpleLockAssertOwned = LCK_ASSERT_OWNED, - kIOSimpleLockAssertNotOwned = LCK_ASSERT_NOTOWNED + kIOSimpleLockAssertOwned = LCK_ASSERT_OWNED, + kIOSimpleLockAssertNotOwned = LCK_ASSERT_NOTOWNED } IOSimpleLockAssertState; #ifdef IOLOCKS_INLINE @@ -435,7 +459,7 @@ typedef enum { * Panics the kernel if the lock is not owned if called with * kIOSimpleLockAssertOwned and vice-versa. */ -void IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type); +void IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type); #endif /* !IOLOCKS_INLINE */ #endif /* !XNU_KERNEL_PRIVATE */ @@ -446,30 +470,32 @@ typedef long int IOInterruptState; #endif /*! @function IOSimpleLockLockDisableInterrupt - @abstract Lock a spin lock. - @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock. - @param lock Pointer to the lock. */ + * @abstract Lock a spin lock. + * @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock. + * @param lock Pointer to the lock. */ static __inline__ -IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock ) +IOInterruptState +IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock ) { - IOInterruptState state = ml_set_interrupts_enabled( false ); - IOSimpleLockLock( lock ); - return( state ); + IOInterruptState state = ml_set_interrupts_enabled( false ); + IOSimpleLockLock( lock ); + return state; } /*! @function IOSimpleLockUnlockEnableInterrupt - @abstract Unlock a spin lock, and restore interrupt state. - @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock. - @param lock Pointer to the lock. - @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */ + * @abstract Unlock a spin lock, and restore interrupt state. + * @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock. + * @param lock Pointer to the lock. + * @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */ static __inline__ -void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock, - IOInterruptState state ) +void +IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock, + IOInterruptState state ) { - IOSimpleLockUnlock( lock ); - ml_set_interrupts_enabled( state ); + IOSimpleLockUnlock( lock ); + ml_set_interrupts_enabled( state ); } #ifdef __cplusplus @@ -477,4 +503,3 @@ void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock, #endif #endif /* !__IOKIT_IOLOCKS_H */ - diff --git a/iokit/IOKit/IOLocksPrivate.h b/iokit/IOKit/IOLocksPrivate.h index 16eed6a4d..b21fa4da3 100644 --- a/iokit/IOKit/IOLocksPrivate.h +++ b/iokit/IOKit/IOLocksPrivate.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -49,7 +49,7 @@ extern "C" { #endif IORecursiveLock * -IORecursiveLockAllocWithLockGroup ( lck_grp_t * lockGroup ); +IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup ); #ifdef __cplusplus @@ -57,4 +57,3 @@ IORecursiveLockAllocWithLockGroup ( lck_grp_t * lockGroup ); #endif #endif /* !__IOKIT_IOLOCKS_PRIVATE_H */ - diff --git a/iokit/IOKit/IOMapper.h b/iokit/IOKit/IOMapper.h index 4fe1ccf43..df5214478 100644 --- a/iokit/IOKit/IOMapper.h +++ b/iokit/IOKit/IOMapper.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,95 +54,100 @@ extern const OSSymbol * gIOMapperIDKey; class IOMapper : public IOService { - OSDeclareAbstractStructors(IOMapper); + OSDeclareAbstractStructors(IOMapper); - // Give the platform expert access to setMapperRequired(); - friend class IOPlatformExpert; - friend class IOMemoryDescriptor; - friend class IOGeneralMemoryDescriptor; +// Give the platform expert access to setMapperRequired(); + friend class IOPlatformExpert; + friend class IOMemoryDescriptor; + friend class IOGeneralMemoryDescriptor; private: - enum SystemMapperState { - kNoMapper = 0, - kUnknown = 1, - kHasMapper = 2, // Any other value is pointer to a live mapper - kWaitMask = 3, - }; + enum SystemMapperState { + kNoMapper = 0, + kUnknown = 1, + kHasMapper = 2, // Any other value is pointer to a live mapper + kWaitMask = 3, + }; protected: #ifdef XNU_KERNEL_PRIVATE - uint64_t __reservedA[6]; - kern_allocation_name_t fAllocName; - uint32_t __reservedB; - uint32_t fPageSize; + uint64_t __reservedA[6]; + kern_allocation_name_t fAllocName; + uint32_t __reservedB; + uint32_t fPageSize; #else - uint64_t __reserved[8]; + uint64_t __reserved[8]; #endif - bool fIsSystem; + bool fIsSystem; - static void setMapperRequired(bool hasMapper); - static void waitForSystemMapper(); + static void setMapperRequired(bool hasMapper); + static void waitForSystemMapper(); - virtual bool initHardware(IOService *provider) = 0; + virtual bool initHardware(IOService *provider) = 0; public: - virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - // To get access to the system mapper IOMapper::gSystem - static IOMapper *gSystem; +// To get access to the system mapper IOMapper::gSystem + static IOMapper *gSystem; - static void checkForSystemMapper() - { if ((uintptr_t) gSystem & kWaitMask) waitForSystemMapper(); } + static void + checkForSystemMapper() + { + if ((uintptr_t) gSystem & kWaitMask) { + waitForSystemMapper(); + } + } - static IOMapper * copyMapperForDevice(IOService * device); - static IOMapper * copyMapperForDeviceWithIndex(IOService * device, unsigned int index); + static IOMapper * copyMapperForDevice(IOService * device); + static IOMapper * copyMapperForDeviceWithIndex(IOService * device, unsigned int index); - // { subclasses +// { subclasses - virtual uint64_t getPageSize(void) const = 0; + virtual uint64_t getPageSize(void) const = 0; - virtual IOReturn iovmMapMemory(IOMemoryDescriptor * memory, - uint64_t descriptorOffset, - uint64_t length, - uint32_t mapOptions, - const IODMAMapSpecification * mapSpecification, - IODMACommand * dmaCommand, - const IODMAMapPageList * pageList, - uint64_t * mapAddress, - uint64_t * mapLength) = 0; + virtual IOReturn iovmMapMemory(IOMemoryDescriptor * memory, + uint64_t descriptorOffset, + uint64_t length, + uint32_t mapOptions, + const IODMAMapSpecification * mapSpecification, + IODMACommand * dmaCommand, + const IODMAMapPageList * pageList, + uint64_t * mapAddress, + uint64_t * mapLength) = 0; - virtual IOReturn iovmUnmapMemory(IOMemoryDescriptor * memory, - IODMACommand * dmaCommand, - uint64_t mapAddress, - uint64_t mapLength) = 0; + virtual IOReturn iovmUnmapMemory(IOMemoryDescriptor * memory, + IODMACommand * dmaCommand, + uint64_t mapAddress, + uint64_t mapLength) = 0; - virtual IOReturn iovmInsert(uint32_t options, - uint64_t mapAddress, - uint64_t offset, - uint64_t physicalAddress, - uint64_t length) = 0; + virtual IOReturn iovmInsert(uint32_t options, + uint64_t mapAddress, + uint64_t offset, + uint64_t physicalAddress, + uint64_t length) = 0; - virtual uint64_t mapToPhysicalAddress(uint64_t mappedAddress) = 0; + virtual uint64_t mapToPhysicalAddress(uint64_t mappedAddress) = 0; - // } +// } private: - OSMetaClassDeclareReservedUnused(IOMapper, 0); - OSMetaClassDeclareReservedUnused(IOMapper, 1); - OSMetaClassDeclareReservedUnused(IOMapper, 2); - OSMetaClassDeclareReservedUnused(IOMapper, 3); - OSMetaClassDeclareReservedUnused(IOMapper, 4); - OSMetaClassDeclareReservedUnused(IOMapper, 5); - OSMetaClassDeclareReservedUnused(IOMapper, 6); - OSMetaClassDeclareReservedUnused(IOMapper, 7); - OSMetaClassDeclareReservedUnused(IOMapper, 8); - OSMetaClassDeclareReservedUnused(IOMapper, 9); - OSMetaClassDeclareReservedUnused(IOMapper, 10); - OSMetaClassDeclareReservedUnused(IOMapper, 11); - OSMetaClassDeclareReservedUnused(IOMapper, 12); - OSMetaClassDeclareReservedUnused(IOMapper, 13); - OSMetaClassDeclareReservedUnused(IOMapper, 14); - OSMetaClassDeclareReservedUnused(IOMapper, 15); + OSMetaClassDeclareReservedUnused(IOMapper, 0); + OSMetaClassDeclareReservedUnused(IOMapper, 1); + OSMetaClassDeclareReservedUnused(IOMapper, 2); + OSMetaClassDeclareReservedUnused(IOMapper, 3); + OSMetaClassDeclareReservedUnused(IOMapper, 4); + OSMetaClassDeclareReservedUnused(IOMapper, 5); + OSMetaClassDeclareReservedUnused(IOMapper, 6); + OSMetaClassDeclareReservedUnused(IOMapper, 7); + OSMetaClassDeclareReservedUnused(IOMapper, 8); + OSMetaClassDeclareReservedUnused(IOMapper, 9); + OSMetaClassDeclareReservedUnused(IOMapper, 10); + OSMetaClassDeclareReservedUnused(IOMapper, 11); + OSMetaClassDeclareReservedUnused(IOMapper, 12); + OSMetaClassDeclareReservedUnused(IOMapper, 13); + OSMetaClassDeclareReservedUnused(IOMapper, 14); + OSMetaClassDeclareReservedUnused(IOMapper, 15); }; #endif /* __cplusplus */ diff --git a/iokit/IOKit/IOMemoryCursor.h b/iokit/IOKit/IOMemoryCursor.h index 048cdf584..99f9dc814 100644 --- a/iokit/IOKit/IOMemoryCursor.h +++ b/iokit/IOKit/IOMemoryCursor.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOMEMORYCURSOR_H @@ -36,347 +36,348 @@ class IOMemoryDescriptor; /**************************** class IOMemoryCursor ***************************/ /*! - @class IOMemoryCursor - @abstract A mechanism to convert memory references to physical addresses. - @discussion The IOMemoryCursor declares the super class that all -specific memory cursors must inherit from, but a memory cursor can be created without a specific format subclass by just providing a segment function to the initializers. This class does the difficult stuff of dividing a memory descriptor into a physical scatter/gather list appropriate for the target hardware. -

- A driver is expected to create a memory cursor and configure it to the limitations of its DMA hardware; for instance the memory cursor used by the FireWire SBP-2 protocol has a maximum physical segment size of 2^16 - 1 but the actual transfer size is unlimited. Thus it would create a cursor with a maxSegmentSize of 65535 and a maxTransfer size of UINT_MAX. It would also provide a SegmentFunction that can output a pagelist entry. -

-Below is the simplest example of a SegmentFunction:
-void IONaturalMemoryCursor::outputSegment(PhysicalSegment segment,
- void * outSegments,
- UInt32 outSegmentIndex)
-{
- ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment;
-} - -*/ + * @class IOMemoryCursor + * @abstract A mechanism to convert memory references to physical addresses. + * @discussion The IOMemoryCursor declares the super class that all + * specific memory cursors must inherit from, but a memory cursor can be created without a specific format subclass by just providing a segment function to the initializers. This class does the difficult stuff of dividing a memory descriptor into a physical scatter/gather list appropriate for the target hardware. + *

+ * A driver is expected to create a memory cursor and configure it to the limitations of its DMA hardware; for instance the memory cursor used by the FireWire SBP-2 protocol has a maximum physical segment size of 2^16 - 1 but the actual transfer size is unlimited. Thus it would create a cursor with a maxSegmentSize of 65535 and a maxTransfer size of UINT_MAX. It would also provide a SegmentFunction that can output a pagelist entry. + *

+ * Below is the simplest example of a SegmentFunction:
+ * void IONaturalMemoryCursor::outputSegment(PhysicalSegment segment,
+ * void * outSegments,
+ * UInt32 outSegmentIndex)
+ * {
+ * ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment;
+ * } + * + */ class IOMemoryCursor : public OSObject { - OSDeclareDefaultStructors(IOMemoryCursor) + OSDeclareDefaultStructors(IOMemoryCursor) public: /*! - @typedef PhysicalSegment - @discussion A physical address/length pair. -*/ - struct PhysicalSegment - { - IOPhysicalAddress location; - IOPhysicalLength length; - }; + * @typedef PhysicalSegment + * @discussion A physical address/length pair. + */ + struct PhysicalSegment { + IOPhysicalAddress location; + IOPhysicalLength length; + }; /*! @defined IOPhysicalSegment - @discussion Backward compatibility define for the old non-class scoped type definition. See IOMemoryCursor::PhysicalSegment -*/ + * @discussion Backward compatibility define for the old non-class scoped type definition. See IOMemoryCursor::PhysicalSegment + */ #define IOPhysicalSegment IOMemoryCursor::PhysicalSegment /*! - @typedef SegmentFunction - @discussion Pointer to a C function that outputs a single physical segment to an element in the array as defined by the segments and segmentIndex parameters. - @param segment The physical address and length that is next to be output. - @param segments Base of the output vector of DMA address length pairs. - @param segmentIndex Index to output 'segment' in the 'segments' array. -*/ - typedef void (*SegmentFunction)(PhysicalSegment segment, - void * segments, - UInt32 segmentIndex); + * @typedef SegmentFunction + * @discussion Pointer to a C function that outputs a single physical segment to an element in the array as defined by the segments and segmentIndex parameters. + * @param segment The physical address and length that is next to be output. + * @param segments Base of the output vector of DMA address length pairs. + * @param segmentIndex Index to output 'segment' in the 'segments' array. + */ + typedef void (*SegmentFunction)(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); /*! @defined OutputSegmentFunc - @discussion Backward compatibility define for the old non-class scoped type definition. See IOMemoryCursor::SegmentFunction */ + * @discussion Backward compatibility define for the old non-class scoped type definition. See IOMemoryCursor::SegmentFunction */ #define OutputSegmentFunc IOMemoryCursor::SegmentFunction protected: /*! @var outSeg The action method called when an event has been delivered */ - SegmentFunction outSeg; + SegmentFunction outSeg; /*! @var maxSegmentSize Maximum size of one segment in a scatter/gather list */ - IOPhysicalLength maxSegmentSize; + IOPhysicalLength maxSegmentSize; /*! @var maxTransferSize - Maximum size of a transfer that this memory cursor is allowed to generate */ - IOPhysicalLength maxTransferSize; + * Maximum size of a transfer that this memory cursor is allowed to generate */ + IOPhysicalLength maxTransferSize; /*! @var alignMask - Currently unused. Reserved for automated aligment restriction code. */ - IOPhysicalLength alignMask; + * Currently unused. Reserved for automated aligment restriction code. */ + IOPhysicalLength alignMask; public: /*! @function withSpecification - @abstract Creates and initializes an IOMemoryCursor in one operation. - @discussion Factory function to create and initialize an IOMemoryCursor in one operation. For more information, see IOMemoryCursor::initWithSpecification. - @param outSegFunc SegmentFunction to call to output one physical segment. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. -*/ - static IOMemoryCursor * + * @abstract Creates and initializes an IOMemoryCursor in one operation. + * @discussion Factory function to create and initialize an IOMemoryCursor in one operation. For more information, see IOMemoryCursor::initWithSpecification. + * @param outSegFunc SegmentFunction to call to output one physical segment. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. + */ + static IOMemoryCursor * withSpecification(SegmentFunction outSegFunc, - IOPhysicalLength maxSegmentSize = 0, - IOPhysicalLength maxTransferSize = 0, - IOPhysicalLength alignment = 1); + IOPhysicalLength maxSegmentSize = 0, + IOPhysicalLength maxTransferSize = 0, + IOPhysicalLength alignment = 1); /*! @function initWithSpecification - @abstract Primary initializer for the IOMemoryCursor class. - @param outSegFunc SegmentFunction to call to output one physical segment. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns true if the inherited classes and this instance initialize -successfully. -*/ - virtual bool initWithSpecification(SegmentFunction outSegFunc, - IOPhysicalLength maxSegmentSize = 0, - IOPhysicalLength maxTransferSize = 0, - IOPhysicalLength alignment = 1); + * @abstract Primary initializer for the IOMemoryCursor class. + * @param outSegFunc SegmentFunction to call to output one physical segment. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns true if the inherited classes and this instance initialize + * successfully. + */ + virtual bool initWithSpecification(SegmentFunction outSegFunc, + IOPhysicalLength maxSegmentSize = 0, + IOPhysicalLength maxTransferSize = 0, + IOPhysicalLength alignment = 1); /*! @function genPhysicalSegments - @abstract Generates a physical scatter/gather list given a memory descriptor. - @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. - @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. - @param fromPosition Starting location of the I/O within a memory descriptor. - @param segments Void pointer to base of output physical scatter/gather list. Always passed directly onto the SegmentFunction without interpretation by the cursor. - @param maxSegments Maximum number of segments that can be written to segments array. - @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. - @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. - @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. -*/ - virtual UInt32 genPhysicalSegments( - IOMemoryDescriptor *descriptor, - IOByteCount fromPosition, - void * segments, - UInt32 maxSegments, - UInt32 maxTransferSize = 0, - IOByteCount *transferSize = 0); + * @abstract Generates a physical scatter/gather list given a memory descriptor. + * @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. + * @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + * @param fromPosition Starting location of the I/O within a memory descriptor. + * @param segments Void pointer to base of output physical scatter/gather list. Always passed directly onto the SegmentFunction without interpretation by the cursor. + * @param maxSegments Maximum number of segments that can be written to segments array. + * @param maxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + * @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. + * @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. + */ + virtual UInt32 genPhysicalSegments( + IOMemoryDescriptor *descriptor, + IOByteCount fromPosition, + void * segments, + UInt32 maxSegments, + UInt32 maxTransferSize = 0, + IOByteCount *transferSize = 0); }; /************************ class IONaturalMemoryCursor ************************/ /*! - @class IONaturalMemoryCursor - @abstract An IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the natural byte orientation for the CPU. - @discussion The IONaturalMemoryCursor would be used when it is too difficult to safely describe a SegmentFunction that is more appropriate for your hardware. This cursor just outputs an array of PhysicalSegments. -*/ + * @class IONaturalMemoryCursor + * @abstract An IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the natural byte orientation for the CPU. + * @discussion The IONaturalMemoryCursor would be used when it is too difficult to safely describe a SegmentFunction that is more appropriate for your hardware. This cursor just outputs an array of PhysicalSegments. + */ class IONaturalMemoryCursor : public IOMemoryCursor { - OSDeclareDefaultStructors(IONaturalMemoryCursor) + OSDeclareDefaultStructors(IONaturalMemoryCursor) public: /*! @function outputSegment - @abstract Outputs the given segment into the output segments array in natural byte order. - @param segment The physical address and length that is next to be output. - @param segments Base of the output vector of DMA address length pairs. - @param segmentIndex Index to output 'segment' in the 'segments' array. -*/ - static void outputSegment(PhysicalSegment segment, - void * segments, - UInt32 segmentIndex); + * @abstract Outputs the given segment into the output segments array in natural byte order. + * @param segment The physical address and length that is next to be output. + * @param segments Base of the output vector of DMA address length pairs. + * @param segmentIndex Index to output 'segment' in the 'segments' array. + */ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); /*! @defined naturalOutputSegment - @discussion Backward compatibility define for the old global function definition. See IONaturalMemoryCursor::outputSegment. -*/ + * @discussion Backward compatibility define for the old global function definition. See IONaturalMemoryCursor::outputSegment. + */ #define naturalOutputSegment IONaturalMemoryCursor::outputSegment /*! @function withSpecification - @abstract Creates and initializes an IONaturalMemoryCursor in one operation. - @discussion Factory function to create and initialize an IONaturalMemoryCursor in one operation. For more information, see IONaturalMemoryCursor::initWithSpecification. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. -*/ - static IONaturalMemoryCursor * + * @abstract Creates and initializes an IONaturalMemoryCursor in one operation. + * @discussion Factory function to create and initialize an IONaturalMemoryCursor in one operation. For more information, see IONaturalMemoryCursor::initWithSpecification. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. + */ + static IONaturalMemoryCursor * withSpecification(IOPhysicalLength maxSegmentSize, - IOPhysicalLength maxTransferSize, - IOPhysicalLength alignment = 1); + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); /*! @function initWithSpecification - @abstract Primary initializer for the IONaturalMemoryCursor class. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns true if the inherited classes and this instance initialize successfully. -*/ - virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, - IOPhysicalLength maxTransferSize, - IOPhysicalLength alignment = 1); + * @abstract Primary initializer for the IONaturalMemoryCursor class. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns true if the inherited classes and this instance initialize successfully. + */ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); /*! @function getPhysicalSegments - @abstract Generates a CPU natural physical scatter/gather list given a memory descriptor. - @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps IOMemoryCursor::genPhysicalSegments. - @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. - @param fromPosition Starting location of the I/O within a memory descriptor. - @param segments Pointer to an array of IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. - @param maxSegments Maximum number of segments that can be written to segments array. - @param inMaxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. - @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. - @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. -*/ - virtual UInt32 getPhysicalSegments(IOMemoryDescriptor *descriptor, - IOByteCount fromPosition, - PhysicalSegment *segments, - UInt32 maxSegments, - UInt32 inMaxTransferSize = 0, - IOByteCount *transferSize = 0) - { - return genPhysicalSegments(descriptor, fromPosition, segments, - maxSegments, inMaxTransferSize, transferSize); - } + * @abstract Generates a CPU natural physical scatter/gather list given a memory descriptor. + * @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps IOMemoryCursor::genPhysicalSegments. + * @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + * @param fromPosition Starting location of the I/O within a memory descriptor. + * @param segments Pointer to an array of IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. + * @param maxSegments Maximum number of segments that can be written to segments array. + * @param inMaxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + * @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. + * @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. + */ + virtual UInt32 + getPhysicalSegments(IOMemoryDescriptor *descriptor, + IOByteCount fromPosition, + PhysicalSegment *segments, + UInt32 maxSegments, + UInt32 inMaxTransferSize = 0, + IOByteCount *transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, inMaxTransferSize, transferSize); + } }; /************************** class IOBigMemoryCursor **************************/ /*! - @class IOBigMemoryCursor - @abstract An IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the big endian byte order. - @discussion The IOBigMemoryCursor would be used when the DMA hardware requires a big endian address and length pair. This cursor outputs an array of PhysicalSegments that are encoded in big-endian format. -*/ + * @class IOBigMemoryCursor + * @abstract An IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the big endian byte order. + * @discussion The IOBigMemoryCursor would be used when the DMA hardware requires a big endian address and length pair. This cursor outputs an array of PhysicalSegments that are encoded in big-endian format. + */ class IOBigMemoryCursor : public IOMemoryCursor { - OSDeclareDefaultStructors(IOBigMemoryCursor) + OSDeclareDefaultStructors(IOBigMemoryCursor) public: /*! @function outputSegment - @abstract Outputs the given segment into the output segments array in big endian byte order. - @param segment The physical address and length that is next to be output. - @param segments Base of the output vector of DMA address length pairs. - @param segmentIndex Index to output 'segment' in the 'segments' array. -*/ - static void outputSegment(PhysicalSegment segment, - void * segments, - UInt32 segmentIndex); + * @abstract Outputs the given segment into the output segments array in big endian byte order. + * @param segment The physical address and length that is next to be output. + * @param segments Base of the output vector of DMA address length pairs. + * @param segmentIndex Index to output 'segment' in the 'segments' array. + */ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); /*! @defined bigOutputSegment - @discussion Backward compatibility define for the old global function definition. See IOBigMemoryCursor::outputSegment -*/ + * @discussion Backward compatibility define for the old global function definition. See IOBigMemoryCursor::outputSegment + */ #define bigOutputSegment IOBigMemoryCursor::outputSegment /*! @function withSpecification - @abstract Creates and initializes an IOBigMemoryCursor in one operation. - @discussion Factory function to create and initialize an IOBigMemoryCursor in one operation. See also IOBigMemoryCursor::initWithSpecification. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. -*/ - static IOBigMemoryCursor * + * @abstract Creates and initializes an IOBigMemoryCursor in one operation. + * @discussion Factory function to create and initialize an IOBigMemoryCursor in one operation. See also IOBigMemoryCursor::initWithSpecification. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. + */ + static IOBigMemoryCursor * withSpecification(IOPhysicalLength maxSegmentSize, - IOPhysicalLength maxTransferSize, - IOPhysicalLength alignment = 1); + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); /*! @function initWithSpecification - @abstract Primary initializer for the IOBigMemoryCursor class. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns true if the inherited classes and this instance initialize -successfully. -*/ - virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, - IOPhysicalLength maxTransferSize, - IOPhysicalLength alignment = 1); + * @abstract Primary initializer for the IOBigMemoryCursor class. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns true if the inherited classes and this instance initialize + * successfully. + */ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); /*! @function getPhysicalSegments - @abstract Generates a big endian physical scatter/gather list given a memory descriptor. - @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps IOMemoryCursor::genPhysicalSegments. - @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. - @param fromPosition Starting location of the I/O within a memory descriptor. - @param segments Pointer to an array of IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. - @param maxSegments Maximum number of segments that can be written to segments array. - @param inMaxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. - @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. - @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. -*/ - virtual UInt32 getPhysicalSegments(IOMemoryDescriptor * descriptor, - IOByteCount fromPosition, - PhysicalSegment * segments, - UInt32 maxSegments, - UInt32 inMaxTransferSize = 0, - IOByteCount * transferSize = 0) - { - return genPhysicalSegments(descriptor, fromPosition, segments, - maxSegments, inMaxTransferSize, transferSize); - } + * @abstract Generates a big endian physical scatter/gather list given a memory descriptor. + * @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps IOMemoryCursor::genPhysicalSegments. + * @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + * @param fromPosition Starting location of the I/O within a memory descriptor. + * @param segments Pointer to an array of IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. + * @param maxSegments Maximum number of segments that can be written to segments array. + * @param inMaxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + * @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. + * @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. + */ + virtual UInt32 + getPhysicalSegments(IOMemoryDescriptor * descriptor, + IOByteCount fromPosition, + PhysicalSegment * segments, + UInt32 maxSegments, + UInt32 inMaxTransferSize = 0, + IOByteCount * transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, inMaxTransferSize, transferSize); + } }; /************************* class IOLittleMemoryCursor ************************/ /*! - @class IOLittleMemoryCursor - @abstract An IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the little endian byte order. - @discussion The IOLittleMemoryCursor would be used when the DMA hardware requires a little endian address and length pair. This cursor outputs an array of PhysicalSegments that are encoded in little endian format. -*/ + * @class IOLittleMemoryCursor + * @abstract An IOMemoryCursor subclass that outputs a vector of PhysicalSegments in the little endian byte order. + * @discussion The IOLittleMemoryCursor would be used when the DMA hardware requires a little endian address and length pair. This cursor outputs an array of PhysicalSegments that are encoded in little endian format. + */ class IOLittleMemoryCursor : public IOMemoryCursor { - OSDeclareDefaultStructors(IOLittleMemoryCursor) + OSDeclareDefaultStructors(IOLittleMemoryCursor) public: /*! @function outputSegment - @abstract Outputs the given segment into the output segments array in little endian byte order. - @param segment The physical address and length that is next to be output. - @param segments Base of the output vector of DMA address length pairs. - @param segmentIndex Index to output 'segment' in the 'segments' array. -*/ - static void outputSegment(PhysicalSegment segment, - void * segments, - UInt32 segmentIndex); + * @abstract Outputs the given segment into the output segments array in little endian byte order. + * @param segment The physical address and length that is next to be output. + * @param segments Base of the output vector of DMA address length pairs. + * @param segmentIndex Index to output 'segment' in the 'segments' array. + */ + static void outputSegment(PhysicalSegment segment, + void * segments, + UInt32 segmentIndex); /*! @defined littleOutputSegment - @discussion Backward compatibility define for the old global function definition. See also IOLittleMemoryCursor::outputSegment. */ + * @discussion Backward compatibility define for the old global function definition. See also IOLittleMemoryCursor::outputSegment. */ #define littleOutputSegment IOLittleMemoryCursor::outputSegment /*! @function withSpecification - @abstract Creates and initializes an IOLittleMemoryCursor in one operation. - @discussion Factory function to create and initialize an IOLittleMemoryCursor in one operation. See also IOLittleMemoryCursor::initWithSpecification. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. -*/ - static IOLittleMemoryCursor * + * @abstract Creates and initializes an IOLittleMemoryCursor in one operation. + * @discussion Factory function to create and initialize an IOLittleMemoryCursor in one operation. See also IOLittleMemoryCursor::initWithSpecification. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns a new memory cursor if successfully created and initialized, 0 otherwise. + */ + static IOLittleMemoryCursor * withSpecification(IOPhysicalLength maxSegmentSize, - IOPhysicalLength maxTransferSize, - IOPhysicalLength alignment = 1); + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); /*! @function initWithSpecification - @abstract Primary initializer for the IOLittleMemoryCursor class. - @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. - @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. - @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. - @result Returns true if the inherited classes and this instance initialize successfully. -*/ - virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, - IOPhysicalLength maxTransferSize, - IOPhysicalLength alignment = 1); + * @abstract Primary initializer for the IOLittleMemoryCursor class. + * @param maxSegmentSize Maximum allowable size for one segment. Defaults to 0. + * @param maxTransferSize Maximum size of an entire transfer. Defaults to 0 indicating no maximum. + * @param alignment Alignment restrictions on output physical addresses. Not currently implemented. Defaults to single byte alignment. + * @result Returns true if the inherited classes and this instance initialize successfully. + */ + virtual bool initWithSpecification(IOPhysicalLength maxSegmentSize, + IOPhysicalLength maxTransferSize, + IOPhysicalLength alignment = 1); /*! @function getPhysicalSegments - @abstract Generates a little endian physical scatter/gather list given a memory descriptor. - @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps IOMemoryCursor::genPhysicalSegments. - @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. - @param fromPosition Starting location of the I/O within a memory descriptor. - @param segments Pointer to an array of IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. - @param maxSegments Maximum number of segments that can be written to segments array. - @param inMaxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. - @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. - @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. -*/ - virtual UInt32 getPhysicalSegments(IOMemoryDescriptor * descriptor, - IOByteCount fromPosition, - PhysicalSegment * segments, - UInt32 maxSegments, - UInt32 inMaxTransferSize = 0, - IOByteCount * transferSize = 0) - { - return genPhysicalSegments(descriptor, fromPosition, segments, - maxSegments, inMaxTransferSize, transferSize); - } + * @abstract Generates a little endian physical scatter/gather list given a memory descriptor. + * @discussion Generates a list of physical segments from the given memory descriptor, relative to the current position of the descriptor. Wraps IOMemoryCursor::genPhysicalSegments. + * @param descriptor IOMemoryDescriptor that describes the data associated with an I/O request. + * @param fromPosition Starting location of the I/O within a memory descriptor. + * @param segments Pointer to an array of IOMemoryCursor::PhysicalSegments for the output physical scatter/gather list. + * @param maxSegments Maximum number of segments that can be written to segments array. + * @param inMaxTransferSize Maximum transfer size is limited to that many bytes, otherwise it defaults to the maximum transfer size specified when the memory cursor was initialized. + * @param transferSize Pointer to an IOByteCount variable that can contain the total size of the transfer being described. Defaults to 0 indicating that no transfer size need be returned. + * @result If the descriptor is exhausted of memory, a zero is returned, otherwise the number of segments that were filled in is returned. + */ + virtual UInt32 + getPhysicalSegments(IOMemoryDescriptor * descriptor, + IOByteCount fromPosition, + PhysicalSegment * segments, + UInt32 maxSegments, + UInt32 inMaxTransferSize = 0, + IOByteCount * transferSize = 0) + { + return genPhysicalSegments(descriptor, fromPosition, segments, + maxSegments, inMaxTransferSize, transferSize); + } }; #endif /* !_IOMEMORYCURSOR_H */ - diff --git a/iokit/IOKit/IOMemoryDescriptor.h b/iokit/IOKit/IOMemoryDescriptor.h index 35d037da0..b9deeaa4a 100644 --- a/iokit/IOKit/IOMemoryDescriptor.h +++ b/iokit/IOKit/IOMemoryDescriptor.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOMEMORYDESCRIPTOR_H @@ -53,24 +53,24 @@ enum enum IODirection #endif /* !__LP64__ */ { - kIODirectionNone = 0x0, // same as VM_PROT_NONE - kIODirectionIn = 0x1, // User land 'read', same as VM_PROT_READ - kIODirectionOut = 0x2, // User land 'write', same as VM_PROT_WRITE - kIODirectionOutIn = kIODirectionOut | kIODirectionIn, - kIODirectionInOut = kIODirectionIn | kIODirectionOut, - - // these flags are valid for the prepare() method only - kIODirectionPrepareToPhys32 = 0x00000004, - kIODirectionPrepareNoFault = 0x00000008, - kIODirectionPrepareReserved1 = 0x00000010, -#define IODIRECTIONPREPARENONCOHERENTDEFINED 1 - kIODirectionPrepareNonCoherent = 0x00000020, - - // these flags are valid for the complete() method only -#define IODIRECTIONCOMPLETEWITHERRORDEFINED 1 - kIODirectionCompleteWithError = 0x00000040, -#define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1 - kIODirectionCompleteWithDataValid = 0x00000080, + kIODirectionNone = 0x0,// same as VM_PROT_NONE + kIODirectionIn = 0x1,// User land 'read', same as VM_PROT_READ + kIODirectionOut = 0x2,// User land 'write', same as VM_PROT_WRITE + kIODirectionOutIn = kIODirectionOut | kIODirectionIn, + kIODirectionInOut = kIODirectionIn | kIODirectionOut, + + // these flags are valid for the prepare() method only + kIODirectionPrepareToPhys32 = 0x00000004, + kIODirectionPrepareNoFault = 0x00000008, + kIODirectionPrepareReserved1 = 0x00000010, +#define IODIRECTIONPREPARENONCOHERENTDEFINED 1 + kIODirectionPrepareNonCoherent = 0x00000020, + + // these flags are valid for the complete() method only +#define IODIRECTIONCOMPLETEWITHERRORDEFINED 1 + kIODirectionCompleteWithError = 0x00000040, +#define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1 + kIODirectionCompleteWithDataValid = 0x00000080, }; #ifdef __LP64__ @@ -81,86 +81,83 @@ typedef IOOptionBits IODirection; * IOOptionBits used in the withOptions variant */ enum { - kIOMemoryDirectionMask = 0x00000007, + kIOMemoryDirectionMask = 0x00000007, #ifdef XNU_KERNEL_PRIVATE - kIOMemoryAutoPrepare = 0x00000008, // Shared with Buffer MD + kIOMemoryAutoPrepare = 0x00000008,// Shared with Buffer MD #endif - kIOMemoryTypeVirtual = 0x00000010, - kIOMemoryTypePhysical = 0x00000020, - kIOMemoryTypeUPL = 0x00000030, - kIOMemoryTypePersistentMD = 0x00000040, // Persistent Memory Descriptor - kIOMemoryTypeUIO = 0x00000050, + kIOMemoryTypeVirtual = 0x00000010, + kIOMemoryTypePhysical = 0x00000020, + kIOMemoryTypeUPL = 0x00000030, + kIOMemoryTypePersistentMD = 0x00000040,// Persistent Memory Descriptor + kIOMemoryTypeUIO = 0x00000050, #ifdef __LP64__ - kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual, - kIOMemoryTypePhysical64 = kIOMemoryTypePhysical, + kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual, + kIOMemoryTypePhysical64 = kIOMemoryTypePhysical, #else /* !__LP64__ */ - kIOMemoryTypeVirtual64 = 0x00000060, - kIOMemoryTypePhysical64 = 0x00000070, + kIOMemoryTypeVirtual64 = 0x00000060, + kIOMemoryTypePhysical64 = 0x00000070, #endif /* !__LP64__ */ - kIOMemoryTypeMask = 0x000000f0, + kIOMemoryTypeMask = 0x000000f0, - kIOMemoryAsReference = 0x00000100, - kIOMemoryBufferPageable = 0x00000400, - kIOMemoryMapperNone = 0x00000800, // Shared with Buffer MD - kIOMemoryHostOnly = 0x00001000, // Never DMA accessible + kIOMemoryAsReference = 0x00000100, + kIOMemoryBufferPageable = 0x00000400, + kIOMemoryMapperNone = 0x00000800,// Shared with Buffer MD + kIOMemoryHostOnly = 0x00001000,// Never DMA accessible #ifdef XNU_KERNEL_PRIVATE - kIOMemoryRedirected = 0x00004000, - kIOMemoryPreparedReadOnly = 0x00008000, + kIOMemoryRedirected = 0x00004000, + kIOMemoryPreparedReadOnly = 0x00008000, #endif - kIOMemoryPersistent = 0x00010000, - kIOMemoryMapCopyOnWrite = 0x00020000, - kIOMemoryRemote = 0x00040000, - kIOMemoryThreadSafe = 0x00100000, // Shared with Buffer MD - kIOMemoryClearEncrypt = 0x00200000, // Shared with Buffer MD - kIOMemoryUseReserve = 0x00800000, // Shared with Buffer MD -#define IOMEMORYUSERESERVEDEFINED 1 + kIOMemoryPersistent = 0x00010000, + kIOMemoryMapCopyOnWrite = 0x00020000, + kIOMemoryRemote = 0x00040000, + kIOMemoryThreadSafe = 0x00100000,// Shared with Buffer MD + kIOMemoryClearEncrypt = 0x00200000,// Shared with Buffer MD + kIOMemoryUseReserve = 0x00800000,// Shared with Buffer MD +#define IOMEMORYUSERESERVEDEFINED 1 #ifdef XNU_KERNEL_PRIVATE - kIOMemoryBufferPurgeable = 0x00400000, - kIOMemoryBufferCacheMask = 0x70000000, - kIOMemoryBufferCacheShift = 28, + kIOMemoryBufferPurgeable = 0x00400000, + kIOMemoryBufferCacheMask = 0x70000000, + kIOMemoryBufferCacheShift = 28, #endif }; -#define kIOMapperSystem ((IOMapper *) 0) - -enum -{ - kIOMemoryPurgeableKeepCurrent = 1, - - kIOMemoryPurgeableNonVolatile = 2, - kIOMemoryPurgeableVolatile = 3, - kIOMemoryPurgeableEmpty = 4, - - // modifiers for kIOMemoryPurgeableVolatile behavior - kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0, - kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1, - kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2, - kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3, - kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4, - kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5, - kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6, - kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7, - kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO, - kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO, - kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE, - kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL, - kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT, +#define kIOMapperSystem ((IOMapper *) 0) + +enum{ + kIOMemoryPurgeableKeepCurrent = 1, + + kIOMemoryPurgeableNonVolatile = 2, + kIOMemoryPurgeableVolatile = 3, + kIOMemoryPurgeableEmpty = 4, + + // modifiers for kIOMemoryPurgeableVolatile behavior + kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0, + kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1, + kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2, + kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3, + kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4, + kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5, + kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6, + kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7, + kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO, + kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO, + kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE, + kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL, + kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT, }; -enum -{ - kIOMemoryIncoherentIOFlush = 1, - kIOMemoryIncoherentIOStore = 2, +enum{ + kIOMemoryIncoherentIOFlush = 1, + kIOMemoryIncoherentIOStore = 2, - kIOMemoryClearEncrypted = 50, - kIOMemorySetEncrypted = 51, + kIOMemoryClearEncrypted = 50, + kIOMemorySetEncrypted = 51, }; -#define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1 +#define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1 -struct IODMAMapSpecification -{ +struct IODMAMapSpecification { uint64_t alignment; IOService * device; uint32_t options; @@ -169,25 +166,23 @@ struct IODMAMapSpecification uint32_t resvB[4]; }; -struct IODMAMapPageList -{ - uint32_t pageOffset; - uint32_t pageListCount; - const upl_page_info_t * pageList; +struct IODMAMapPageList { + uint32_t pageOffset; + uint32_t pageListCount; + const upl_page_info_t * pageList; }; // mapOptions for iovmMapMemory -enum -{ - kIODMAMapReadAccess = 0x00000001, - kIODMAMapWriteAccess = 0x00000002, - kIODMAMapPhysicallyContiguous = 0x00000010, - kIODMAMapDeviceMemory = 0x00000020, - kIODMAMapPagingPath = 0x00000040, - kIODMAMapIdentityMap = 0x00000080, - - kIODMAMapPageListFullyOccupied = 0x00000100, - kIODMAMapFixedAddress = 0x00000200, +enum{ + kIODMAMapReadAccess = 0x00000001, + kIODMAMapWriteAccess = 0x00000002, + kIODMAMapPhysicallyContiguous = 0x00000010, + kIODMAMapDeviceMemory = 0x00000020, + kIODMAMapPagingPath = 0x00000040, + kIODMAMapIdentityMap = 0x00000080, + + kIODMAMapPageListFullyOccupied = 0x00000100, + kIODMAMapFixedAddress = 0x00000200, }; #ifdef KERNEL_PRIVATE @@ -195,59 +190,57 @@ enum // Used for dmaCommandOperation communications for IODMACommand and mappers enum { - kIOMDWalkSegments = 0x01000000, - kIOMDFirstSegment = 1 | kIOMDWalkSegments, - kIOMDGetCharacteristics = 0x02000000, - kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics, - kIOMDDMAActive = 0x03000000, - kIOMDSetDMAActive = 1 | kIOMDDMAActive, - kIOMDSetDMAInactive = kIOMDDMAActive, - kIOMDAddDMAMapSpec = 0x04000000, - kIOMDDMAMap = 0x05000000, - kIOMDDMAUnmap = 0x06000000, - kIOMDDMACommandOperationMask = 0xFF000000, + kIOMDWalkSegments = 0x01000000, + kIOMDFirstSegment = 1 | kIOMDWalkSegments, + kIOMDGetCharacteristics = 0x02000000, + kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics, + kIOMDDMAActive = 0x03000000, + kIOMDSetDMAActive = 1 | kIOMDDMAActive, + kIOMDSetDMAInactive = kIOMDDMAActive, + kIOMDAddDMAMapSpec = 0x04000000, + kIOMDDMAMap = 0x05000000, + kIOMDDMAUnmap = 0x06000000, + kIOMDDMACommandOperationMask = 0xFF000000, }; struct IOMDDMACharacteristics { - UInt64 fLength; - UInt32 fSGCount; - UInt32 fPages; - UInt32 fPageAlign; - ppnum_t fHighestPage; - IODirection fDirection; - UInt8 fIsPrepared; + UInt64 fLength; + UInt32 fSGCount; + UInt32 fPages; + UInt32 fPageAlign; + ppnum_t fHighestPage; + IODirection fDirection; + UInt8 fIsPrepared; }; struct IOMDDMAMapArgs { - IOMapper * fMapper; - IODMACommand * fCommand; - IODMAMapSpecification fMapSpec; - uint64_t fOffset; - uint64_t fLength; - uint64_t fAlloc; - uint64_t fAllocLength; - uint8_t fMapContig; + IOMapper * fMapper; + IODMACommand * fCommand; + IODMAMapSpecification fMapSpec; + uint64_t fOffset; + uint64_t fLength; + uint64_t fAlloc; + uint64_t fAllocLength; + uint8_t fMapContig; }; struct IOMDDMAWalkSegmentArgs { - UInt64 fOffset; // Input/Output offset - UInt64 fIOVMAddr, fLength; // Output variables - UInt8 fMapped; // Input Variable, Require mapped IOVMA - UInt64 fMappedBase; // Input base of mapping + UInt64 fOffset; // Input/Output offset + UInt64 fIOVMAddr, fLength; // Output variables + UInt8 fMapped; // Input Variable, Require mapped IOVMA + UInt64 fMappedBase; // Input base of mapping }; typedef UInt8 IOMDDMAWalkSegmentState[128]; // fMapped: -enum -{ +enum{ kIOMDDMAWalkMappedLocal = 2 }; #endif /* KERNEL_PRIVATE */ -enum -{ - kIOPreparationIDUnprepared = 0, - kIOPreparationIDUnsupported = 1, - kIOPreparationIDAlwaysPrepared = 2, +enum{ + kIOPreparationIDUnprepared = 0, + kIOPreparationIDUnsupported = 1, + kIOPreparationIDAlwaysPrepared = 2, }; #ifdef XNU_KERNEL_PRIVATE @@ -256,720 +249,720 @@ struct IOMemoryReference; /*! @class IOMemoryDescriptor : public OSObject - @abstract An abstract base class defining common methods for describing physical or virtual memory. - @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */ + * @abstract An abstract base class defining common methods for describing physical or virtual memory. + * @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */ class IOMemoryDescriptor : public OSObject { - friend class IOMemoryMap; - friend class IOMultiMemoryDescriptor; + friend class IOMemoryMap; + friend class IOMultiMemoryDescriptor; - OSDeclareDefaultStructors(IOMemoryDescriptor); + OSDeclareDefaultStructors(IOMemoryDescriptor); protected: /*! @var reserved - Reserved for future use. (Internal use only) */ - struct IOMemoryDescriptorReserved * reserved; + * Reserved for future use. (Internal use only) */ + struct IOMemoryDescriptorReserved * reserved; protected: - OSSet * _mappings; - IOOptionBits _flags; + OSSet * _mappings; + IOOptionBits _flags; #ifdef XNU_KERNEL_PRIVATE public: - struct IOMemoryReference * _memRef; - vm_tag_t _kernelTag; - vm_tag_t _userTag; - int16_t _dmaReferences; - uint16_t _internalFlags; - kern_allocation_name_t _mapName; + struct IOMemoryReference * _memRef; + vm_tag_t _kernelTag; + vm_tag_t _userTag; + int16_t _dmaReferences; + uint16_t _internalFlags; + kern_allocation_name_t _mapName; protected: #else /* XNU_KERNEL_PRIVATE */ - void * __iomd_reserved5; - uint16_t __iomd_reserved1[4]; - uintptr_t __iomd_reserved2; + void * __iomd_reserved5; + uint16_t __iomd_reserved1[4]; + uintptr_t __iomd_reserved2; #endif /* XNU_KERNEL_PRIVATE */ - uintptr_t __iomd_reserved3; - uintptr_t __iomd_reserved4; + uintptr_t __iomd_reserved3; + uintptr_t __iomd_reserved4; #ifndef __LP64__ - IODirection _direction; /* use _flags instead */ + IODirection _direction; /* use _flags instead */ #endif /* !__LP64__ */ - IOByteCount _length; /* length of all ranges */ - IOOptionBits _tag; + IOByteCount _length; /* length of all ranges */ + IOOptionBits _tag; public: -typedef IOOptionBits DMACommandOps; + typedef IOOptionBits DMACommandOps; #ifndef __LP64__ - virtual IOPhysicalAddress getSourceSegment( IOByteCount offset, - IOByteCount * length ) APPLE_KEXT_DEPRECATED; + virtual IOPhysicalAddress getSourceSegment( IOByteCount offset, + IOByteCount * length ) APPLE_KEXT_DEPRECATED; #endif /* !__LP64__ */ /*! @function initWithOptions - @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions. - @discussion Note this function can be used to re-init a previously created memory descriptor. - @result true on success, false on failure. */ - virtual bool initWithOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper = kIOMapperSystem); + * @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions. + * @discussion Note this function can be used to re-init a previously created memory descriptor. + * @result true on success, false on failure. */ + virtual bool initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper = kIOMapperSystem); #ifndef __LP64__ - virtual addr64_t getPhysicalSegment64( IOByteCount offset, - IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */ + virtual addr64_t getPhysicalSegment64( IOByteCount offset, + IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */ #endif /* !__LP64__ */ /*! @function setPurgeable - @abstract Control the purgeable status of a memory descriptors memory. - @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost. - @param newState - the desired new purgeable state of the memory:
- kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.
- kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.
- kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.
- kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it. - @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:
- kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.
- kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.
- kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.
- @result An IOReturn code. */ - - virtual IOReturn setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ); - + * @abstract Control the purgeable status of a memory descriptors memory. + * @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost. + * @param newState - the desired new purgeable state of the memory:
+ * kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.
+ * kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.
+ * kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.
+ * kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it. + * @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:
+ * kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.
+ * kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.
+ * kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.
+ * @result An IOReturn code. */ + + virtual IOReturn setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ); + /*! @function getPageCounts - @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. - @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. - @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor. - @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor. - @result An IOReturn code. */ + * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. + * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. + * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor. + * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor. + * @result An IOReturn code. */ - IOReturn getPageCounts( IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount); + IOReturn getPageCounts( IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount); /*! @function performOperation - @abstract Perform an operation on the memory descriptor's memory. - @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually. - @param options The operation to perform on the memory:
- kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
- kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared. - @param offset A byte offset into the memory descriptor's memory. - @param length The length of the data range. - @result An IOReturn code. */ + * @abstract Perform an operation on the memory descriptor's memory. + * @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually. + * @param options The operation to perform on the memory:
+ * kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
+ * kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared. + * @param offset A byte offset into the memory descriptor's memory. + * @param length The length of the data range. + * @result An IOReturn code. */ - virtual IOReturn performOperation( IOOptionBits options, - IOByteCount offset, IOByteCount length ); + virtual IOReturn performOperation( IOOptionBits options, + IOByteCount offset, IOByteCount length ); - // Used for dedicated communications for IODMACommand - virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const; +// Used for dedicated communications for IODMACommand + virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const; /*! @function getPhysicalSegment - @abstract Break a memory descriptor into its physically contiguous segments. - @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. - @param offset A byte offset into the memory whose physical address to return. - @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. - @result A physical address, or zero if the offset is beyond the length of the memory. */ + * @abstract Break a memory descriptor into its physically contiguous segments. + * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. + * @param offset A byte offset into the memory whose physical address to return. + * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. + * @result A physical address, or zero if the offset is beyond the length of the memory. */ #ifdef __LP64__ - virtual addr64_t getPhysicalSegment( IOByteCount offset, - IOByteCount * length, - IOOptionBits options = 0 ) = 0; + virtual addr64_t getPhysicalSegment( IOByteCount offset, + IOByteCount * length, + IOOptionBits options = 0 ) = 0; #else /* !__LP64__ */ - virtual addr64_t getPhysicalSegment( IOByteCount offset, - IOByteCount * length, - IOOptionBits options ); + virtual addr64_t getPhysicalSegment( IOByteCount offset, + IOByteCount * length, + IOOptionBits options ); #endif /* !__LP64__ */ - virtual uint64_t getPreparationID( void ); - void setPreparationID( void ); + virtual uint64_t getPreparationID( void ); + void setPreparationID( void ); #ifdef XNU_KERNEL_PRIVATE - IOMemoryDescriptorReserved * getKernelReserved( void ); - IOReturn dmaMap( - IOMapper * mapper, - IODMACommand * command, - const IODMAMapSpecification * mapSpec, - uint64_t offset, - uint64_t length, - uint64_t * mapAddress, - uint64_t * mapLength); - IOReturn dmaUnmap( - IOMapper * mapper, - IODMACommand * command, - uint64_t offset, - uint64_t mapAddress, - uint64_t mapLength); - void dmaMapRecord( - IOMapper * mapper, - IODMACommand * command, - uint64_t mapLength); - - void setVMTags(vm_tag_t kernelTag, vm_tag_t userTag); - vm_tag_t getVMTag(vm_map_t map); + IOMemoryDescriptorReserved * getKernelReserved( void ); + IOReturn dmaMap( + IOMapper * mapper, + IODMACommand * command, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * mapAddress, + uint64_t * mapLength); + IOReturn dmaUnmap( + IOMapper * mapper, + IODMACommand * command, + uint64_t offset, + uint64_t mapAddress, + uint64_t mapLength); + void dmaMapRecord( + IOMapper * mapper, + IODMACommand * command, + uint64_t mapLength); + + void setVMTags(vm_tag_t kernelTag, vm_tag_t userTag); + vm_tag_t getVMTag(vm_map_t map); #endif - + private: - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0); #ifdef __LP64__ - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7); #else /* !__LP64__ */ - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6); - OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6); + OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7); #endif /* !__LP64__ */ - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14); - OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14); + OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15); protected: - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; public: - static void initialize( void ); + static void initialize( void ); public: /*! @function withAddress - @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task. - @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. - @param address The virtual address of the first byte in the memory. - @param withLength The length of memory. - @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + * @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task. + * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. + * @param address The virtual address of the first byte in the memory. + * @param withLength The length of memory. + * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor * withAddress(void * address, - IOByteCount withLength, - IODirection withDirection); + static IOMemoryDescriptor * withAddress(void * address, + IOByteCount withLength, + IODirection withDirection); #ifndef __LP64__ - static IOMemoryDescriptor * withAddress(IOVirtualAddress address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */ + static IOMemoryDescriptor * withAddress(IOVirtualAddress address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */ #endif /* !__LP64__ */ /*! @function withPhysicalAddress - @abstract Create an IOMemoryDescriptor to describe one physical range. - @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range. - @param address The physical address of the first byte in the memory. - @param withLength The length of memory. - @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - - static IOMemoryDescriptor * withPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ); + * @abstract Create an IOMemoryDescriptor to describe one physical range. + * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range. + * @param address The physical address of the first byte in the memory. + * @param withLength The length of memory. + * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ); #ifndef __LP64__ - static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */ + static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */ #endif /* !__LP64__ */ /*! @function withAddressRange - @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map. - @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. - @param address The virtual address of the first byte in the memory. - @param length The length of memory. - @param options - kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address. - @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - - static IOMemoryDescriptor * withAddressRange( - mach_vm_address_t address, - mach_vm_size_t length, - IOOptionBits options, - task_t task); + * @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map. + * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. + * @param address The virtual address of the first byte in the memory. + * @param length The length of memory. + * @param options + * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address. + * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withAddressRange( + mach_vm_address_t address, + mach_vm_size_t length, + IOOptionBits options, + task_t task); /*! @function withAddressRanges - @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges. - @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. - @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange. - @param rangeCount The member count of the ranges array. - @param options - kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations. - @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address. - @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - - static IOMemoryDescriptor * withAddressRanges( - IOAddressRange * ranges, - UInt32 rangeCount, - IOOptionBits options, - task_t task); + * @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges. + * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. + * @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange. + * @param rangeCount The member count of the ranges array. + * @param options + * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations. + * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address. + * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMemoryDescriptor * withAddressRanges( + IOAddressRange * ranges, + UInt32 rangeCount, + IOOptionBits options, + task_t task); /*! @function withOptions - @abstract Master initialiser for all variants of memory descriptors. - @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. - - - @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels. - - @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length. - - @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl. - - @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into. - - @param options - kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters. - kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations. - kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map. - - @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present. - - @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + * @abstract Master initialiser for all variants of memory descriptors. + * @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described. + * + * + * @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels. + * + * @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length. + * + * @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl. + * + * @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into. + * + * @param options + * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters. + * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations. + * kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map. + * + * @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present. + * + * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - static IOMemoryDescriptor *withOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper = kIOMapperSystem); + static IOMemoryDescriptor *withOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper = kIOMapperSystem); #ifndef __LP64__ - static IOMemoryDescriptor * withPhysicalRanges( - IOPhysicalRange * ranges, - UInt32 withCount, - IODirection withDirection, - bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */ + static IOMemoryDescriptor * withPhysicalRanges( + IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */ #endif /* !__LP64__ */ #ifndef __LP64__ - static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, - IOByteCount offset, - IOByteCount length, - IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */ + static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, + IOByteCount offset, + IOByteCount length, + IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */ #endif /* !__LP64__ */ /*! @function withPersistentMemoryDescriptor - @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed. - @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option. - @param originalMD The memory descriptor to be duplicated. - @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */ - static IOMemoryDescriptor * + * @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed. + * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option. + * @param originalMD The memory descriptor to be duplicated. + * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */ + static IOMemoryDescriptor * withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD); #ifndef __LP64__ - // obsolete initializers - // - initWithOptions is the designated initializer - virtual bool initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ - virtual bool initWithAddress(IOVirtualAddress address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ - virtual bool initWithPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ - virtual bool initWithRanges(IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ - virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, - UInt32 withCount, - IODirection withDirection, - bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ +// obsolete initializers +// - initWithOptions is the designated initializer + virtual bool initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ + virtual bool initWithAddress(IOVirtualAddress address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ + virtual bool initWithRanges(IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ + virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */ #endif /* __LP64__ */ /*! @function getDirection - @abstract Accessor to get the direction the memory descriptor was created with. - @discussion This method returns the direction the memory descriptor was created with. - @result The direction. */ + * @abstract Accessor to get the direction the memory descriptor was created with. + * @discussion This method returns the direction the memory descriptor was created with. + * @result The direction. */ - virtual IODirection getDirection() const; + virtual IODirection getDirection() const; /*! @function getLength - @abstract Accessor to get the length of the memory descriptor (over all its ranges). - @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths. - @result The byte count. */ + * @abstract Accessor to get the length of the memory descriptor (over all its ranges). + * @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths. + * @result The byte count. */ - virtual IOByteCount getLength() const; + virtual IOByteCount getLength() const; /*! @function setTag - @abstract Set the tag for the memory descriptor. - @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. - @param tag The tag. */ + * @abstract Set the tag for the memory descriptor. + * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. + * @param tag The tag. */ - virtual void setTag( IOOptionBits tag ); + virtual void setTag( IOOptionBits tag ); /*! @function getTag - @abstract Accessor to the retrieve the tag for the memory descriptor. - @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. - @result The tag. */ + * @abstract Accessor to the retrieve the tag for the memory descriptor. + * @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor. + * @result The tag. */ - virtual IOOptionBits getTag( void ); + virtual IOOptionBits getTag( void ); /*! @function getFlags - @abstract Accessor to the retrieve the options the memory descriptor was created with. - @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum. - @result The flags bitfield. */ + * @abstract Accessor to the retrieve the options the memory descriptor was created with. + * @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum. + * @result The flags bitfield. */ - uint64_t getFlags(void); + uint64_t getFlags(void); /*! @function readBytes - @abstract Copy data from the memory descriptor's buffer to the specified buffer. - @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device. - @param offset A byte offset into the memory descriptor's memory. - @param bytes The caller supplied buffer to copy the data to. - @param withLength The length of the data to copy. - @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */ + * @abstract Copy data from the memory descriptor's buffer to the specified buffer. + * @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device. + * @param offset A byte offset into the memory descriptor's memory. + * @param bytes The caller supplied buffer to copy the data to. + * @param withLength The length of the data to copy. + * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */ - virtual IOByteCount readBytes(IOByteCount offset, - void * bytes, IOByteCount withLength); + virtual IOByteCount readBytes(IOByteCount offset, + void * bytes, IOByteCount withLength); /*! @function writeBytes - @abstract Copy data to the memory descriptor's buffer from the specified buffer. - @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers. - @param offset A byte offset into the memory descriptor's memory. - @param bytes The caller supplied buffer to copy the data from. - @param withLength The length of the data to copy. - @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */ + * @abstract Copy data to the memory descriptor's buffer from the specified buffer. + * @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers. + * @param offset A byte offset into the memory descriptor's memory. + * @param bytes The caller supplied buffer to copy the data from. + * @param withLength The length of the data to copy. + * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */ - virtual IOByteCount writeBytes(IOByteCount offset, - const void * bytes, IOByteCount withLength); + virtual IOByteCount writeBytes(IOByteCount offset, + const void * bytes, IOByteCount withLength); #ifndef __LP64__ - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length); + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); #endif /* !__LP64__ */ /*! @function getPhysicalAddress - @abstract Return the physical address of the first byte in the memory. - @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous. - @result A physical address. */ + * @abstract Return the physical address of the first byte in the memory. + * @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous. + * @result A physical address. */ - IOPhysicalAddress getPhysicalAddress(); + IOPhysicalAddress getPhysicalAddress(); #ifndef __LP64__ - virtual void * getVirtualSegment(IOByteCount offset, - IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */ + virtual void * getVirtualSegment(IOByteCount offset, + IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */ #endif /* !__LP64__ */ /*! @function prepare - @abstract Prepare the memory for an I/O transfer. - @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor. - @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. - @result An IOReturn code. */ + * @abstract Prepare the memory for an I/O transfer. + * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor. + * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + * @result An IOReturn code. */ - virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0; + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0; /*! @function complete - @abstract Complete processing of the memory after an I/O transfer finishes. - @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time. - @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. - @result An IOReturn code. */ + * @abstract Complete processing of the memory after an I/O transfer finishes. + * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time. + * @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + * @result An IOReturn code. */ - virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0; + virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0; - /* - * Mapping functions. - */ +/* + * Mapping functions. + */ /*! @function createMappingInTask - @abstract Maps a IOMemoryDescriptor into a task. - @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. - @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space. - @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored. - @param options Mapping options are defined in IOTypes.h,
- kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.
- kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.
- kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.
- kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.
- kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.
- kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.
- kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.
- @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory. - @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory. - @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */ - - IOMemoryMap * createMappingInTask( - task_t intoTask, - mach_vm_address_t atAddress, - IOOptionBits options, - mach_vm_size_t offset = 0, - mach_vm_size_t length = 0 ); + * @abstract Maps a IOMemoryDescriptor into a task. + * @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. + * @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space. + * @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored. + * @param options Mapping options are defined in IOTypes.h,
+ * kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.
+ * kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.
+ * kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.
+ * kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.
+ * kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.
+ * kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.
+ * kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.
+ * @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory. + * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory. + * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */ + + IOMemoryMap * createMappingInTask( + task_t intoTask, + mach_vm_address_t atAddress, + IOOptionBits options, + mach_vm_size_t offset = 0, + mach_vm_size_t length = 0 ); #ifndef __LP64__ - virtual IOMemoryMap * map( - task_t intoTask, - IOVirtualAddress atAddress, - IOOptionBits options, - IOByteCount offset = 0, - IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED; /* use createMappingInTask() instead */ + virtual IOMemoryMap * map( + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset = 0, + IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */ #endif /* !__LP64__ */ /*! @function map - @abstract Maps a IOMemoryDescriptor into the kernel map. - @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details. - @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed. - @result See the full version of the createMappingInTask method. */ + * @abstract Maps a IOMemoryDescriptor into the kernel map. + * @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details. + * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed. + * @result See the full version of the createMappingInTask method. */ - virtual IOMemoryMap * map( - IOOptionBits options = 0 ); + virtual IOMemoryMap * map( + IOOptionBits options = 0 ); /*! @function setMapping - @abstract Establishes an already existing mapping. - @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed. - @param task Address space in which the mapping exists. - @param mapAddress Virtual address of the mapping. - @param options Caching and read-only attributes of the mapping. - @result A IOMemoryMap object created to represent the mapping. */ + * @abstract Establishes an already existing mapping. + * @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed. + * @param task Address space in which the mapping exists. + * @param mapAddress Virtual address of the mapping. + * @param options Caching and read-only attributes of the mapping. + * @result A IOMemoryMap object created to represent the mapping. */ - virtual IOMemoryMap * setMapping( - task_t task, - IOVirtualAddress mapAddress, - IOOptionBits options = 0 ); + virtual IOMemoryMap * setMapping( + task_t task, + IOVirtualAddress mapAddress, + IOOptionBits options = 0 ); - // Following methods are private implementation +// Following methods are private implementation #ifdef __LP64__ - virtual + virtual #endif /* __LP64__ */ - IOReturn redirect( task_t safeTask, bool redirect ); - - IOReturn handleFault( - void * _pager, - mach_vm_size_t sourceOffset, - mach_vm_size_t length); - - IOReturn populateDevicePager( - void * pager, - vm_map_t addressMap, - mach_vm_address_t address, - mach_vm_size_t sourceOffset, - mach_vm_size_t length, - IOOptionBits options ); - - virtual IOMemoryMap * makeMapping( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress atAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ); + IOReturn redirect( task_t safeTask, bool redirect ); + + IOReturn handleFault( + void * _pager, + mach_vm_size_t sourceOffset, + mach_vm_size_t length); + + IOReturn populateDevicePager( + void * pager, + vm_map_t addressMap, + mach_vm_address_t address, + mach_vm_size_t sourceOffset, + mach_vm_size_t length, + IOOptionBits options ); + + virtual IOMemoryMap * makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ); protected: - virtual void addMapping( - IOMemoryMap * mapping ); - - virtual void removeMapping( - IOMemoryMap * mapping ); - - virtual IOReturn doMap( - vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ); - - virtual IOReturn doUnmap( - vm_map_t addressMap, - IOVirtualAddress logical, - IOByteCount length ); + virtual void addMapping( + IOMemoryMap * mapping ); + + virtual void removeMapping( + IOMemoryMap * mapping ); + + virtual IOReturn doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ); + + virtual IOReturn doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /*! @class IOMemoryMap : public OSObject - @abstract A class defining common methods for describing a memory mapping. - @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */ + * @abstract A class defining common methods for describing a memory mapping. + * @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */ class IOMemoryMap : public OSObject { - OSDeclareDefaultStructors(IOMemoryMap) + OSDeclareDefaultStructors(IOMemoryMap) #ifdef XNU_KERNEL_PRIVATE public: - IOMemoryDescriptor * fMemory; - IOMemoryMap * fSuperMap; - mach_vm_size_t fOffset; - mach_vm_address_t fAddress; - mach_vm_size_t fLength; - task_t fAddressTask; - vm_map_t fAddressMap; - IOOptionBits fOptions; - upl_t fRedirUPL; - ipc_port_t fRedirEntry; - IOMemoryDescriptor * fOwner; - uint8_t fUserClientUnmap; + IOMemoryDescriptor * fMemory; + IOMemoryMap * fSuperMap; + mach_vm_size_t fOffset; + mach_vm_address_t fAddress; + mach_vm_size_t fLength; + task_t fAddressTask; + vm_map_t fAddressMap; + IOOptionBits fOptions; + upl_t fRedirUPL; + ipc_port_t fRedirEntry; + IOMemoryDescriptor * fOwner; + uint8_t fUserClientUnmap; #if IOTRACKING - IOTrackingUser fTracking; + IOTrackingUser fTracking; #endif #endif /* XNU_KERNEL_PRIVATE */ protected: - virtual void taggedRelease(const void *tag = 0) const APPLE_KEXT_OVERRIDE; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void taggedRelease(const void *tag = 0) const APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; public: /*! @function getVirtualAddress - @abstract Accessor to the virtual address of the first byte in the mapping. - @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings. - @result A virtual address. */ + * @abstract Accessor to the virtual address of the first byte in the mapping. + * @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings. + * @result A virtual address. */ - virtual IOVirtualAddress getVirtualAddress(); + virtual IOVirtualAddress getVirtualAddress(); /*! @function getPhysicalSegment - @abstract Break a mapping into its physically contiguous segments. - @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment. - @param offset A byte offset into the mapping whose physical address to return. - @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. - @result A physical address, or zero if the offset is beyond the length of the mapping. */ + * @abstract Break a mapping into its physically contiguous segments. + * @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment. + * @param offset A byte offset into the mapping whose physical address to return. + * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. + * @result A physical address, or zero if the offset is beyond the length of the mapping. */ #ifdef __LP64__ - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length, - IOOptionBits options = 0); + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length, + IOOptionBits options = 0); #else /* !__LP64__ */ - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length); + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length); #endif /* !__LP64__ */ /*! @function getPhysicalAddress - @abstract Return the physical address of the first byte in the mapping. - @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous. - @result A physical address. */ + * @abstract Return the physical address of the first byte in the mapping. + * @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous. + * @result A physical address. */ - IOPhysicalAddress getPhysicalAddress(); + IOPhysicalAddress getPhysicalAddress(); /*! @function getLength - @abstract Accessor to the length of the mapping. - @discussion This method returns the length of the mapping. - @result A byte count. */ + * @abstract Accessor to the length of the mapping. + * @discussion This method returns the length of the mapping. + * @result A byte count. */ - virtual IOByteCount getLength(); + virtual IOByteCount getLength(); /*! @function getAddressTask - @abstract Accessor to the task of the mapping. - @discussion This method returns the mach task the mapping exists in. - @result A mach task_t. */ + * @abstract Accessor to the task of the mapping. + * @discussion This method returns the mach task the mapping exists in. + * @result A mach task_t. */ - virtual task_t getAddressTask(); + virtual task_t getAddressTask(); /*! @function getMemoryDescriptor - @abstract Accessor to the IOMemoryDescriptor the mapping was created from. - @discussion This method returns the IOMemoryDescriptor the mapping was created from. - @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */ + * @abstract Accessor to the IOMemoryDescriptor the mapping was created from. + * @discussion This method returns the IOMemoryDescriptor the mapping was created from. + * @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */ - virtual IOMemoryDescriptor * getMemoryDescriptor(); + virtual IOMemoryDescriptor * getMemoryDescriptor(); /*! @function getMapOptions - @abstract Accessor to the options the mapping was created with. - @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with. - @result Options for the mapping, including cache settings. */ + * @abstract Accessor to the options the mapping was created with. + * @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with. + * @result Options for the mapping, including cache settings. */ - virtual IOOptionBits getMapOptions(); + virtual IOOptionBits getMapOptions(); /*! @function unmap - @abstract Force the IOMemoryMap to unmap, without destroying the object. - @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used. - @result An IOReturn code. */ + * @abstract Force the IOMemoryMap to unmap, without destroying the object. + * @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used. + * @result An IOReturn code. */ - virtual IOReturn unmap(); + virtual IOReturn unmap(); - virtual void taskDied(); + virtual void taskDied(); /*! @function redirect - @abstract Replace the memory mapped in a process with new backing memory. - @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory. - @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument. - @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map() - @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default. - @result An IOReturn code. */ + * @abstract Replace the memory mapped in a process with new backing memory. + * @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory. + * @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument. + * @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map() + * @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default. + * @result An IOReturn code. */ #ifndef __LP64__ // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface; // for 64 bit, these fall together on the 64 bit one. - virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, - IOOptionBits options, - IOByteCount offset = 0); + virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + IOByteCount offset = 0); #endif - virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, - IOOptionBits options, - mach_vm_size_t offset = 0); + virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + mach_vm_size_t offset = 0); #ifdef __LP64__ /*! @function getAddress - @abstract Accessor to the virtual address of the first byte in the mapping. - @discussion This method returns the virtual address of the first byte in the mapping. - @result A virtual address. */ - inline mach_vm_address_t getAddress() __attribute__((always_inline)); + * @abstract Accessor to the virtual address of the first byte in the mapping. + * @discussion This method returns the virtual address of the first byte in the mapping. + * @result A virtual address. */ + inline mach_vm_address_t getAddress() __attribute__((always_inline)); /*! @function getSize - @abstract Accessor to the length of the mapping. - @discussion This method returns the length of the mapping. - @result A byte count. */ - inline mach_vm_size_t getSize() __attribute__((always_inline)); + * @abstract Accessor to the length of the mapping. + * @discussion This method returns the length of the mapping. + * @result A byte count. */ + inline mach_vm_size_t getSize() __attribute__((always_inline)); #else /* !__LP64__ */ /*! @function getAddress - @abstract Accessor to the virtual address of the first byte in the mapping. - @discussion This method returns the virtual address of the first byte in the mapping. - @result A virtual address. */ - virtual mach_vm_address_t getAddress(); + * @abstract Accessor to the virtual address of the first byte in the mapping. + * @discussion This method returns the virtual address of the first byte in the mapping. + * @result A virtual address. */ + virtual mach_vm_address_t getAddress(); /*! @function getSize - @abstract Accessor to the length of the mapping. - @discussion This method returns the length of the mapping. - @result A byte count. */ - virtual mach_vm_size_t getSize(); + * @abstract Accessor to the length of the mapping. + * @discussion This method returns the length of the mapping. + * @result A byte count. */ + virtual mach_vm_size_t getSize(); #endif /* !__LP64__ */ #ifdef XNU_KERNEL_PRIVATE - // for IOMemoryDescriptor use - IOMemoryMap * copyCompatible( IOMemoryMap * newMapping ); +// for IOMemoryDescriptor use + IOMemoryMap * copyCompatible( IOMemoryMap * newMapping ); - bool init( - task_t intoTask, - mach_vm_address_t toAddress, - IOOptionBits options, - mach_vm_size_t offset, - mach_vm_size_t length ); + bool init( + task_t intoTask, + mach_vm_address_t toAddress, + IOOptionBits options, + mach_vm_size_t offset, + mach_vm_size_t length ); - bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset); + bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset); - IOReturn redirect( - task_t intoTask, bool redirect ); + IOReturn redirect( + task_t intoTask, bool redirect ); - IOReturn userClientUnmap(); + IOReturn userClientUnmap(); #endif /* XNU_KERNEL_PRIVATE */ - IOReturn wireRange( - uint32_t options, - mach_vm_size_t offset, - mach_vm_size_t length); - - OSMetaClassDeclareReservedUnused(IOMemoryMap, 0); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 1); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 2); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 3); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 4); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 5); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 6); - OSMetaClassDeclareReservedUnused(IOMemoryMap, 7); + IOReturn wireRange( + uint32_t options, + mach_vm_size_t offset, + mach_vm_size_t length); + + OSMetaClassDeclareReservedUnused(IOMemoryMap, 0); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 1); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 2); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 3); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 4); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 5); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 6); + OSMetaClassDeclareReservedUnused(IOMemoryMap, 7); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -977,213 +970,214 @@ public: // Also these flags should not overlap with the options to // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options); enum { - _kIOMemorySourceSegment = 0x00002000 + _kIOMemorySourceSegment = 0x00002000 }; #endif /* XNU_KERNEL_PRIVATE */ // The following classes are private implementation of IOMemoryDescriptor - they -// should not be referenced directly, just through the public API's in the +// should not be referenced directly, just through the public API's in the // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance -// might be created by IOMemoryDescriptor::withAddressRange(), but there should be +// might be created by IOMemoryDescriptor::withAddressRange(), but there should be // no need to reference as anything but a generic IOMemoryDescriptor *. class IOGeneralMemoryDescriptor : public IOMemoryDescriptor { - OSDeclareDefaultStructors(IOGeneralMemoryDescriptor); + OSDeclareDefaultStructors(IOGeneralMemoryDescriptor); public: - union Ranges { - IOVirtualRange *v; - IOAddressRange *v64; - IOPhysicalRange *p; - void *uio; - }; + union Ranges { + IOVirtualRange *v; + IOAddressRange *v64; + IOPhysicalRange *p; + void *uio; + }; protected: - Ranges _ranges; - unsigned _rangesCount; /* number of address ranges in list */ + Ranges _ranges; + unsigned _rangesCount; /* number of address ranges in list */ #ifndef __LP64__ - bool _rangesIsAllocated; /* is list allocated by us? */ + bool _rangesIsAllocated;/* is list allocated by us? */ #endif /* !__LP64__ */ - task_t _task; /* task where all ranges are mapped to */ + task_t _task; /* task where all ranges are mapped to */ - union { - IOVirtualRange v; - IOPhysicalRange p; - } _singleRange; /* storage space for a single range */ + union { + IOVirtualRange v; + IOPhysicalRange p; + } _singleRange; /* storage space for a single range */ - unsigned _wireCount; /* number of outstanding wires */ + unsigned _wireCount; /* number of outstanding wires */ #ifndef __LP64__ - uintptr_t _cachedVirtualAddress; + uintptr_t _cachedVirtualAddress; - IOPhysicalAddress _cachedPhysicalAddress; + IOPhysicalAddress _cachedPhysicalAddress; #endif /* !__LP64__ */ - bool _initialized; /* has superclass been initialized? */ + bool _initialized; /* has superclass been initialized? */ public: - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE; + virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE; - virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; + virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; #ifdef XNU_KERNEL_PRIVATE - // Internal APIs may be made virtual at some time in the future. - IOReturn wireVirtual(IODirection forDirection); - IOReturn dmaMap( - IOMapper * mapper, - IODMACommand * command, - const IODMAMapSpecification * mapSpec, - uint64_t offset, - uint64_t length, - uint64_t * mapAddress, - uint64_t * mapLength); - bool initMemoryEntries(size_t size, IOMapper * mapper); - - IOMemoryReference * memoryReferenceAlloc(uint32_t capacity, - IOMemoryReference * realloc); - void memoryReferenceFree(IOMemoryReference * ref); - void memoryReferenceRelease(IOMemoryReference * ref); - - IOReturn memoryReferenceCreate( - IOOptionBits options, - IOMemoryReference ** reference); - - IOReturn memoryReferenceMap(IOMemoryReference * ref, - vm_map_t map, - mach_vm_size_t inoffset, - mach_vm_size_t size, - IOOptionBits options, - mach_vm_address_t * inaddr); - - static IOReturn memoryReferenceSetPurgeable( - IOMemoryReference * ref, - IOOptionBits newState, - IOOptionBits * oldState); - static IOReturn memoryReferenceGetPageCounts( - IOMemoryReference * ref, - IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount); +// Internal APIs may be made virtual at some time in the future. + IOReturn wireVirtual(IODirection forDirection); + IOReturn dmaMap( + IOMapper * mapper, + IODMACommand * command, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * mapAddress, + uint64_t * mapLength); + bool initMemoryEntries(size_t size, IOMapper * mapper); + + IOMemoryReference * memoryReferenceAlloc(uint32_t capacity, + IOMemoryReference * realloc); + void memoryReferenceFree(IOMemoryReference * ref); + void memoryReferenceRelease(IOMemoryReference * ref); + + IOReturn memoryReferenceCreate( + IOOptionBits options, + IOMemoryReference ** reference); + + IOReturn memoryReferenceMap(IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr); + + static IOReturn memoryReferenceSetPurgeable( + IOMemoryReference * ref, + IOOptionBits newState, + IOOptionBits * oldState); + static IOReturn memoryReferenceGetPageCounts( + IOMemoryReference * ref, + IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount); #endif private: #ifndef __LP64__ - virtual void setPosition(IOByteCount position); - virtual void mapIntoKernel(unsigned rangeIndex); - virtual void unmapFromKernel(); + virtual void setPosition(IOByteCount position); + virtual void mapIntoKernel(unsigned rangeIndex); + virtual void unmapFromKernel(); #endif /* !__LP64__ */ - // Internal - OSData * _memoryEntries; - unsigned int _pages; - ppnum_t _highestPage; - uint32_t __iomd_reservedA; - uint32_t __iomd_reservedB; +// Internal + OSData * _memoryEntries; + unsigned int _pages; + ppnum_t _highestPage; + uint32_t __iomd_reservedA; + uint32_t __iomd_reservedB; - IOLock * _prepareLock; + IOLock * _prepareLock; public: - /* - * IOMemoryDescriptor required methods - */ - - // Master initaliser - virtual bool initWithOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE; +/* + * IOMemoryDescriptor required methods + */ + +// Master initaliser + virtual bool initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE; #ifndef __LP64__ - // Secondary initialisers - virtual bool initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual bool initWithAddress(IOVirtualAddress address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual bool initWithPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual bool initWithRanges( IOVirtualRange * ranges, - UInt32 withCount, - IODirection withDirection, - task_t withTask, - bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, - UInt32 withCount, - IODirection withDirection, - bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual addr64_t getPhysicalSegment64( IOByteCount offset, - IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, - IOByteCount * length) APPLE_KEXT_OVERRIDE; - - virtual IOPhysicalAddress getSourceSegment(IOByteCount offset, - IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; - - virtual void * getVirtualSegment(IOByteCount offset, - IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; +// Secondary initialisers + virtual bool initWithAddress(void * address, + IOByteCount withLength, + IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual bool initWithAddress(IOVirtualAddress address, + IOByteCount withLength, + IODirection withDirection, + task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual bool initWithPhysicalAddress( + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual bool initWithRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection withDirection, + task_t withTask, + bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges, + UInt32 withCount, + IODirection withDirection, + bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual addr64_t getPhysicalSegment64( IOByteCount offset, + IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, + IOByteCount * length) APPLE_KEXT_OVERRIDE; + + virtual IOPhysicalAddress getSourceSegment(IOByteCount offset, + IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; + + virtual void * getVirtualSegment(IOByteCount offset, + IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED; #endif /* !__LP64__ */ - virtual IOReturn setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE; - - virtual addr64_t getPhysicalSegment( IOByteCount offset, - IOByteCount * length, + virtual IOReturn setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE; + + virtual addr64_t getPhysicalSegment( IOByteCount offset, + IOByteCount * length, #ifdef __LP64__ - IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; + IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; #else /* !__LP64__ */ - IOOptionBits options ) APPLE_KEXT_OVERRIDE; + IOOptionBits options)APPLE_KEXT_OVERRIDE; #endif /* !__LP64__ */ - virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; - virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; - virtual IOReturn doMap( - vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE; + virtual IOReturn doMap( + vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE; - virtual IOReturn doUnmap( - vm_map_t addressMap, - IOVirtualAddress logical, - IOByteCount length ) APPLE_KEXT_OVERRIDE; + virtual IOReturn doUnmap( + vm_map_t addressMap, + IOVirtualAddress logical, + IOByteCount length ) APPLE_KEXT_OVERRIDE; - virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; + virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; - // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor - static IOMemoryDescriptor * +// Factory method for cloning a persistent IOMD, see IOMemoryDescriptor + static IOMemoryDescriptor * withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD); - }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifdef __LP64__ -mach_vm_address_t IOMemoryMap::getAddress() +mach_vm_address_t +IOMemoryMap::getAddress() { - return (getVirtualAddress()); + return getVirtualAddress(); } -mach_vm_size_t IOMemoryMap::getSize() +mach_vm_size_t +IOMemoryMap::getSize() { - return (getLength()); + return getLength(); } #else /* !__LP64__ */ #include diff --git a/iokit/IOKit/IOMessage.h b/iokit/IOKit/IOMessage.h index 38808ebcc..cdd53930a 100644 --- a/iokit/IOKit/IOMessage.h +++ b/iokit/IOKit/IOMessage.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,12 +42,12 @@ typedef UInt32 IOMessage; #define iokit_common_msg(message) (UInt32)(sys_iokit|sub_iokit_common|message) -#define iokit_family_msg(sub,message) (UInt32)(sys_iokit|sub|message) +#define iokit_family_msg(sub, message) (UInt32)(sys_iokit|sub|message) -/*! +/*! * @defined iokit_vendor_specific_msg * @discussion iokit_vendor_specific_msg passes messages in the sub_iokit_vendor_specific - * subsystem. It can be used to generate messages that are used for private + * subsystem. It can be used to generate messages that are used for private * communication between vendor specific code with the IOService::message() etc. APIs. */ #define iokit_vendor_specific_msg(message) (UInt32)(sys_iokit|sub_iokit_vendor_specific|message) @@ -93,10 +93,10 @@ typedef UInt32 IOMessage; /*! * @defined kIOMessageSystemWillPowerOff - * @discussion Indicates an imminent system shutdown. Recipients have a limited - * amount of time to respond, otherwise the system will timeout and + * @discussion Indicates an imminent system shutdown. Recipients have a limited + * amount of time to respond, otherwise the system will timeout and * shutdown even without a response. - * Delivered to in-kernel IOKit drivers via IOService::systemWillShutdown(), + * Delivered to in-kernel IOKit drivers via IOService::systemWillShutdown(), * and to clients of registerPrioritySleepWakeInterest(). * Never delivered to user space notification clients. */ @@ -104,10 +104,10 @@ typedef UInt32 IOMessage; /*! * @defined kIOMessageSystemWillRestart - * @discussion Indicates an imminent system restart. Recipients have a limited - * amount of time to respond, otherwise the system will timeout and + * @discussion Indicates an imminent system restart. Recipients have a limited + * amount of time to respond, otherwise the system will timeout and * restart even without a response. - * Delivered to in-kernel IOKit drivers via IOService::systemWillShutdown(), + * Delivered to in-kernel IOKit drivers via IOService::systemWillShutdown(), * and to clients of registerPrioritySleepWakeInterest(). * Never delivered to user space notification clients. */ @@ -138,7 +138,7 @@ typedef UInt32 IOMessage; /*! * @defined kIOMessageSystemWillNotSleep - * @discussion Announces that the system has retracted a previous attempt to sleep; + * @discussion Announces that the system has retracted a previous attempt to sleep; * it follows kIOMessageCanSystemSleep. * Delivered to in-kernel IOKit drivers via kIOGeneralInterest * and kIOPriorityPowerStateInterest. @@ -157,7 +157,7 @@ typedef UInt32 IOMessage; /*! * @defined kIOMessageSystemWillPowerOn - * @discussion Announces that the system is beginning to power the device tree; most + * @discussion Announces that the system is beginning to power the device tree; most * devices are unavailable at this point.. * Delivered to in-kernel IOKit drivers via kIOGeneralInterest * and kIOPriorityPowerStateInterest. @@ -177,22 +177,22 @@ typedef UInt32 IOMessage; /*! @group Unused and deprecated notifications */ -/*! +/*! * @defined kIOMessageCanDevicePowerOff - * @discussion Delivered to kIOAppPowerStateInterest clients of + * @discussion Delivered to kIOAppPowerStateInterest clients of * devices that implement their own idle timeouts. * This message type is almost never used. */ #define kIOMessageCanDevicePowerOff iokit_common_msg(0x200) -/*! +/*! * @defined kIOMessageDeviceWillNotPowerOff - * @discussion This IOKit interest notification is largely unused; + * @discussion This IOKit interest notification is largely unused; * it's not very interesting. */ #define kIOMessageDeviceWillNotPowerOff iokit_common_msg(0x220) -/*! +/*! * @defined kIOMessageSystemWillNotPowerOff * @deprecated This IOKit message is unused. */ diff --git a/iokit/IOKit/IOMultiMemoryDescriptor.h b/iokit/IOKit/IOMultiMemoryDescriptor.h index 8d3fd47fe..995207421 100644 --- a/iokit/IOKit/IOMultiMemoryDescriptor.h +++ b/iokit/IOKit/IOMultiMemoryDescriptor.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,102 +32,102 @@ #include /*! @class IOMultiMemoryDescriptor : public IOMemoryDescriptor - @abstract The IOMultiMemoryDescriptor object describes a memory area made up of several other IOMemoryDescriptors. - @discussion The IOMultiMemoryDescriptor object represents multiple ranges of memory, specified as an ordered list of IOMemoryDescriptors. The descriptors are chained end-to-end to make up a single contiguous buffer. */ + * @abstract The IOMultiMemoryDescriptor object describes a memory area made up of several other IOMemoryDescriptors. + * @discussion The IOMultiMemoryDescriptor object represents multiple ranges of memory, specified as an ordered list of IOMemoryDescriptors. The descriptors are chained end-to-end to make up a single contiguous buffer. */ class IOMultiMemoryDescriptor : public IOMemoryDescriptor { - OSDeclareDefaultStructors(IOMultiMemoryDescriptor); + OSDeclareDefaultStructors(IOMultiMemoryDescriptor); protected: - IOMemoryDescriptor ** _descriptors; - UInt32 _descriptorsCount; - bool _descriptorsIsAllocated; + IOMemoryDescriptor ** _descriptors; + UInt32 _descriptorsCount; + bool _descriptorsIsAllocated; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; public: /*! @function withDescriptors - @abstract Create an IOMultiMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. - @discussion This method creates and initializes an IOMultiMemoryDescriptor for memory consisting of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. Passing the descriptor array as a reference will avoid an extra allocation. - @param descriptors An array of IOMemoryDescriptors which make up the memory to be described. - @param withCount The object count for the descriptors array. - @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. - @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - - static IOMultiMemoryDescriptor * withDescriptors( - IOMemoryDescriptor ** descriptors, - UInt32 withCount, - IODirection withDirection, - bool asReference = false ); + * @abstract Create an IOMultiMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. + * @discussion This method creates and initializes an IOMultiMemoryDescriptor for memory consisting of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. Passing the descriptor array as a reference will avoid an extra allocation. + * @param descriptors An array of IOMemoryDescriptors which make up the memory to be described. + * @param withCount The object count for the descriptors array. + * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. + * @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOMultiMemoryDescriptor * withDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); /*! @function withDescriptors - @abstract Initialize an IOMultiMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. - @discussion This method initializes an IOMultiMemoryDescriptor for memory consisting of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. Passing the descriptor array as a reference will avoid an extra allocation. - @param descriptors An array of IOMemoryDescriptors which make up the memory to be described. - @param withCount The object count for the descriptors array. - @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. - @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - - virtual bool initWithDescriptors( - IOMemoryDescriptor ** descriptors, - UInt32 withCount, - IODirection withDirection, - bool asReference = false ); + * @abstract Initialize an IOMultiMemoryDescriptor to describe a memory area made up of several other IOMemoryDescriptors. + * @discussion This method initializes an IOMultiMemoryDescriptor for memory consisting of a number of other IOMemoryDescriptors, chained end-to-end (in the order they appear in the array) to represent a single contiguous memory buffer. Passing the descriptor array as a reference will avoid an extra allocation. + * @param descriptors An array of IOMemoryDescriptors which make up the memory to be described. + * @param withCount The object count for the descriptors array. + * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @param asReference If false, the IOMultiMemoryDescriptor object will make a copy of the descriptors array, otherwise, the array will be used in situ, avoiding an extra allocation. + * @result The created IOMultiMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + virtual bool initWithDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference = false ); /*! @function getPhysicalSegment - @abstract Break a memory descriptor into its physically contiguous segments. - @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. - @param offset A byte offset into the memory whose physical address to return. - @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. - @result A physical address, or zero if the offset is beyond the length of the memory. */ + * @abstract Break a memory descriptor into its physically contiguous segments. + * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset. + * @param offset A byte offset into the memory whose physical address to return. + * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset. + * @result A physical address, or zero if the offset is beyond the length of the memory. */ - virtual addr64_t getPhysicalSegment( IOByteCount offset, - IOByteCount * length, - IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; + virtual addr64_t getPhysicalSegment( IOByteCount offset, + IOByteCount * length, + IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; /*! @function prepare - @abstract Prepare the memory for an I/O transfer. - @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method needn't called for non-pageable memory. - @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. - @result An IOReturn code. */ + * @abstract Prepare the memory for an I/O transfer. + * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. This method needn't called for non-pageable memory. + * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + * @result An IOReturn code. */ - virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; /*! @function complete - @abstract Complete processing of the memory after an I/O transfer finishes. - @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. - @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. - @result An IOReturn code. */ + * @abstract Complete processing of the memory after an I/O transfer finishes. + * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. + * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor. + * @result An IOReturn code. */ - virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; - virtual IOReturn setPurgeable(IOOptionBits newState, IOOptionBits * oldState) APPLE_KEXT_OVERRIDE; + virtual IOReturn setPurgeable(IOOptionBits newState, IOOptionBits * oldState) APPLE_KEXT_OVERRIDE; /*! @function getPageCounts - @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. - @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. - @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor. - @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor. - @result An IOReturn code. */ + * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. + * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. + * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor. + * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor. + * @result An IOReturn code. */ - IOReturn getPageCounts(IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount); + IOReturn getPageCounts(IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount); - virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; + virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; -#define IOMULTIMEMORYDESCRIPTOR_SUPPORTS_GETPAGECOUNTS 1 +#define IOMULTIMEMORYDESCRIPTOR_SUPPORTS_GETPAGECOUNTS 1 private: - virtual IOReturn doMap(vm_map_t addressMap, - IOVirtualAddress * atAddress, - IOOptionBits options, - IOByteCount sourceOffset = 0, - IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE; + virtual IOReturn doMap(vm_map_t addressMap, + IOVirtualAddress * atAddress, + IOOptionBits options, + IOByteCount sourceOffset = 0, + IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE; }; #endif /* !_IOMULTIMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IONVRAM.h b/iokit/IOKit/IONVRAM.h index b58ea8490..a4da3d4fa 100644 --- a/iokit/IOKit/IONVRAM.h +++ b/iokit/IOKit/IONVRAM.h @@ -3,7 +3,7 @@ * Copyright (c) 2007-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +12,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +23,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,140 +45,140 @@ #define MIN_SYNC_NOW_INTERVAL 15*60 /* Minimum 15 Minutes interval mandated */ enum { - kIODTNVRAMImageSize = 0x2000, - kIODTNVRAMXPRAMSize = 0x0100, - kIODTNVRAMNameRegistrySize = 0x0400 + kIODTNVRAMImageSize = 0x2000, + kIODTNVRAMXPRAMSize = 0x0100, + kIODTNVRAMNameRegistrySize = 0x0400 }; enum { - kOFVariableTypeBoolean = 1, - kOFVariableTypeNumber, - kOFVariableTypeString, - kOFVariableTypeData + kOFVariableTypeBoolean = 1, + kOFVariableTypeNumber, + kOFVariableTypeString, + kOFVariableTypeData }; enum { - kOFVariablePermRootOnly = 0, - kOFVariablePermUserRead, - kOFVariablePermUserWrite, - kOFVariablePermKernelOnly + kOFVariablePermRootOnly = 0, + kOFVariablePermUserRead, + kOFVariablePermUserWrite, + kOFVariablePermKernelOnly }; #ifdef __cplusplus class IODTNVRAM : public IOService { - OSDeclareDefaultStructors(IODTNVRAM); - + OSDeclareDefaultStructors(IODTNVRAM); + private: - IONVRAMController *_nvramController; - const OSSymbol *_registryPropertiesKey; - UInt8 *_nvramImage; - __unused bool _nvramImageDirty; - UInt32 _ofPartitionOffset; - UInt32 _ofPartitionSize; - UInt8 *_ofImage; - __unused bool _ofImageDirty; - OSDictionary *_ofDict; - OSDictionary *_nvramPartitionOffsets; - OSDictionary *_nvramPartitionLengths; - UInt32 _resv0 __unused; - UInt32 _resv1 __unused; - IOLock *_ofLock; - UInt32 _resv2 __unused; - UInt32 _resv3 __unused; - UInt8 *_resv4 __unused; - UInt32 _piPartitionOffset; - UInt32 _piPartitionSize; - UInt8 *_piImage; - bool _systemPaniced; - SInt32 _lastDeviceSync; - bool _freshInterval; - bool _isProxied; - - virtual UInt8 calculatePartitionChecksum(UInt8 *partitionHeader); - virtual IOReturn initOFVariables(void); + IONVRAMController *_nvramController; + const OSSymbol *_registryPropertiesKey; + UInt8 *_nvramImage; + __unused bool _nvramImageDirty; + UInt32 _ofPartitionOffset; + UInt32 _ofPartitionSize; + UInt8 *_ofImage; + __unused bool _ofImageDirty; + OSDictionary *_ofDict; + OSDictionary *_nvramPartitionOffsets; + OSDictionary *_nvramPartitionLengths; + UInt32 _resv0 __unused; + UInt32 _resv1 __unused; + IOLock *_ofLock; + UInt32 _resv2 __unused; + UInt32 _resv3 __unused; + UInt8 *_resv4 __unused; + UInt32 _piPartitionOffset; + UInt32 _piPartitionSize; + UInt8 *_piImage; + bool _systemPaniced; + SInt32 _lastDeviceSync; + bool _freshInterval; + bool _isProxied; + + virtual UInt8 calculatePartitionChecksum(UInt8 *partitionHeader); + virtual IOReturn initOFVariables(void); public: - virtual IOReturn syncOFVariables(void); + virtual IOReturn syncOFVariables(void); private: - virtual UInt32 getOFVariableType(const OSSymbol *propSymbol) const; - virtual UInt32 getOFVariablePerm(const OSSymbol *propSymbol) const; - virtual bool getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, - UInt32 *propType, UInt32 *propOffset); - virtual bool convertPropToObject(UInt8 *propName, UInt32 propNameLength, - UInt8 *propData, UInt32 propDataLength, - const OSSymbol **propSymbol, - OSObject **propObject); - virtual bool convertObjectToProp(UInt8 *buffer, UInt32 *length, - const OSSymbol *propSymbol, OSObject *propObject); - virtual UInt16 generateOWChecksum(UInt8 *buffer); - virtual bool validateOWChecksum(UInt8 *buffer); - virtual void updateOWBootArgs(const OSSymbol *key, OSObject *value); - virtual bool searchNVRAMProperty(struct IONVRAMDescriptor *hdr, - UInt32 *where); - - virtual IOReturn readNVRAMPropertyType0(IORegistryEntry *entry, - const OSSymbol **name, - OSData **value); - virtual IOReturn writeNVRAMPropertyType0(IORegistryEntry *entry, - const OSSymbol *name, - OSData * value); - - virtual OSData *unescapeBytesToData(const UInt8 *bytes, UInt32 length); - virtual OSData *escapeDataToData(OSData * value); - - virtual IOReturn readNVRAMPropertyType1(IORegistryEntry *entry, - const OSSymbol **name, - OSData **value); - virtual IOReturn writeNVRAMPropertyType1(IORegistryEntry *entry, - const OSSymbol *name, - OSData *value); - - void initNVRAMImage(void); - void initProxyData(void); - IOReturn syncVariables(void); - + virtual UInt32 getOFVariableType(const OSSymbol *propSymbol) const; + virtual UInt32 getOFVariablePerm(const OSSymbol *propSymbol) const; + virtual bool getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, + UInt32 *propType, UInt32 *propOffset); + virtual bool convertPropToObject(UInt8 *propName, UInt32 propNameLength, + UInt8 *propData, UInt32 propDataLength, + const OSSymbol **propSymbol, + OSObject **propObject); + virtual bool convertObjectToProp(UInt8 *buffer, UInt32 *length, + const OSSymbol *propSymbol, OSObject *propObject); + virtual UInt16 generateOWChecksum(UInt8 *buffer); + virtual bool validateOWChecksum(UInt8 *buffer); + virtual void updateOWBootArgs(const OSSymbol *key, OSObject *value); + virtual bool searchNVRAMProperty(struct IONVRAMDescriptor *hdr, + UInt32 *where); + + virtual IOReturn readNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value); + virtual IOReturn writeNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol *name, + OSData * value); + + virtual OSData *unescapeBytesToData(const UInt8 *bytes, UInt32 length); + virtual OSData *escapeDataToData(OSData * value); + + virtual IOReturn readNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value); + virtual IOReturn writeNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value); + + void initNVRAMImage(void); + void initProxyData(void); + IOReturn syncVariables(void); + public: - virtual bool init(IORegistryEntry *old, const IORegistryPlane *plane) APPLE_KEXT_OVERRIDE; - - virtual void registerNVRAMController(IONVRAMController *nvram); - - virtual void sync(void); - - virtual bool serializeProperties(OSSerialize *s) const APPLE_KEXT_OVERRIDE; - virtual OSObject *copyProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; - virtual OSObject *copyProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; - virtual OSObject *getProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; - virtual OSObject *getProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; - virtual bool setProperty(const OSSymbol *aKey, OSObject *anObject) APPLE_KEXT_OVERRIDE; - virtual void removeProperty(const OSSymbol *aKey) APPLE_KEXT_OVERRIDE; - virtual IOReturn setProperties(OSObject *properties) APPLE_KEXT_OVERRIDE; - - virtual IOReturn readXPRAM(IOByteCount offset, UInt8 *buffer, - IOByteCount length); - virtual IOReturn writeXPRAM(IOByteCount offset, UInt8 *buffer, - IOByteCount length); - - virtual IOReturn readNVRAMProperty(IORegistryEntry *entry, - const OSSymbol **name, - OSData **value); - virtual IOReturn writeNVRAMProperty(IORegistryEntry *entry, - const OSSymbol *name, - OSData *value); - - virtual OSDictionary *getNVRAMPartitions(void); - - virtual IOReturn readNVRAMPartition(const OSSymbol *partitionID, - IOByteCount offset, UInt8 *buffer, - IOByteCount length); - - virtual IOReturn writeNVRAMPartition(const OSSymbol *partitionID, - IOByteCount offset, UInt8 *buffer, - IOByteCount length); - - virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length); - virtual bool safeToSync(void); - void syncInternal(bool rateLimit); + virtual bool init(IORegistryEntry *old, const IORegistryPlane *plane) APPLE_KEXT_OVERRIDE; + + virtual void registerNVRAMController(IONVRAMController *nvram); + + virtual void sync(void); + + virtual bool serializeProperties(OSSerialize *s) const APPLE_KEXT_OVERRIDE; + virtual OSObject *copyProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; + virtual OSObject *copyProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; + virtual OSObject *getProperty(const OSSymbol *aKey) const APPLE_KEXT_OVERRIDE; + virtual OSObject *getProperty(const char *aKey) const APPLE_KEXT_OVERRIDE; + virtual bool setProperty(const OSSymbol *aKey, OSObject *anObject) APPLE_KEXT_OVERRIDE; + virtual void removeProperty(const OSSymbol *aKey) APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties(OSObject *properties) APPLE_KEXT_OVERRIDE; + + virtual IOReturn readXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length); + virtual IOReturn writeXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length); + + virtual IOReturn readNVRAMProperty(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value); + virtual IOReturn writeNVRAMProperty(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value); + + virtual OSDictionary *getNVRAMPartitions(void); + + virtual IOReturn readNVRAMPartition(const OSSymbol *partitionID, + IOByteCount offset, UInt8 *buffer, + IOByteCount length); + + virtual IOReturn writeNVRAMPartition(const OSSymbol *partitionID, + IOByteCount offset, UInt8 *buffer, + IOByteCount length); + + virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length); + virtual bool safeToSync(void); + void syncInternal(bool rateLimit); }; #endif /* __cplusplus */ diff --git a/iokit/IOKit/IONotifier.h b/iokit/IOKit/IONotifier.h index 8f4378ab7..0ee138617 100644 --- a/iokit/IOKit/IONotifier.h +++ b/iokit/IOKit/IONotifier.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -38,35 +38,34 @@ #include /*! @class IONotifier : public OSObject - @abstract An abstract base class defining common methods for controlling a notification request. - @discussion IOService notification requests are represented as implementations of the IONotifier object. It defines methods to enable, disable and remove notification requests. These actions are synchronized with invocations of the notification handler, so removing a notification request will guarantee the handler is not being executed. */ + * @abstract An abstract base class defining common methods for controlling a notification request. + * @discussion IOService notification requests are represented as implementations of the IONotifier object. It defines methods to enable, disable and remove notification requests. These actions are synchronized with invocations of the notification handler, so removing a notification request will guarantee the handler is not being executed. */ class IONotifier : public OSObject { - OSDeclareAbstractStructors(IONotifier) + OSDeclareAbstractStructors(IONotifier) public: /*! @function remove - @abstract Removes the notification request and releases it. - @discussion Removes the notification request and release it. Since creating an IONotifier instance will leave it with a retain count of one, creating an IONotifier and then removing it will destroy it. This method is synchronous with any handler invocations, so when this method returns its guaranteed the handler will not be in entered. */ + * @abstract Removes the notification request and releases it. + * @discussion Removes the notification request and release it. Since creating an IONotifier instance will leave it with a retain count of one, creating an IONotifier and then removing it will destroy it. This method is synchronous with any handler invocations, so when this method returns its guaranteed the handler will not be in entered. */ - virtual void remove() = 0; + virtual void remove() = 0; /*! @function disable - @abstract Disables the notification request. - @discussion Disables the notification request. This method is synchronous with any handler invocations, so when this method returns its guaranteed the handler will not be in entered. - @result Returns the previous enable state of the IONotifier. */ + * @abstract Disables the notification request. + * @discussion Disables the notification request. This method is synchronous with any handler invocations, so when this method returns its guaranteed the handler will not be in entered. + * @result Returns the previous enable state of the IONotifier. */ - virtual bool disable() = 0; - -/*! @function enable - @abstract Sets the enable state of the notification request. - @discussion Restores the enable state of the notification request, given the previous state passed in. - @param was The enable state of the notifier to restore. */ + virtual bool disable() = 0; - virtual void enable( bool was ) = 0; +/*! @function enable + * @abstract Sets the enable state of the notification request. + * @discussion Restores the enable state of the notification request, given the previous state passed in. + * @param was The enable state of the notifier to restore. */ + virtual void enable( bool was ) = 0; }; #endif /* ! _IOKIT_IONOTIFIER_H */ diff --git a/iokit/IOKit/IOPlatformExpert.h b/iokit/IOKit/IOPlatformExpert.h index 4f5c8067f..7f47bc62c 100644 --- a/iokit/IOKit/IOPlatformExpert.h +++ b/iokit/IOKit/IOPlatformExpert.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -48,9 +48,9 @@ extern "C" { #include typedef enum { - kCoprocessorVersionNone = 0x00000000, - kCoprocessorVersion1 = 0x00010000, - kCoprocessorVersion2 = 0x00020000, + kCoprocessorVersionNone = 0x00000000, + kCoprocessorVersion1 = 0x00010000, + kCoprocessorVersion2 = 0x00020000, } coprocessor_type_t; @@ -59,16 +59,16 @@ extern boolean_t PEGetModelName( char * name, int maxLength ); extern int PEGetPlatformEpoch( void ); enum { - kPEHaltCPU, - kPERestartCPU, - kPEHangCPU, - kPEUPSDelayHaltCPU, - kPEPanicRestartCPU, - kPEPanicSync, - kPEPagingOff, - kPEPanicBegin, - kPEPanicEnd, - kPEPanicDiskShutdown + kPEHaltCPU, + kPERestartCPU, + kPEHangCPU, + kPEUPSDelayHaltCPU, + kPEPanicRestartCPU, + kPEPanicSync, + kPEPagingOff, + kPEPanicBegin, + kPEPanicEnd, + kPEPanicDiskShutdown }; extern int (*PE_halt_restart)(unsigned int type); extern int PEHaltRestart(unsigned int type); @@ -85,7 +85,7 @@ extern void IOSystemShutdownNotification(int stage); extern UInt32 PESavePanicInfo(UInt8 *buffer, UInt32 length); extern void PESavePanicInfoAction(void *buffer, UInt32 offset, UInt32 length); -/* +/* * SMC requires that all data is flushed in multiples of 16 bytes at 16 byte * boundaries. */ @@ -100,6 +100,7 @@ extern void PESetUTCTimeOfDay( clock_sec_t secs, clock_usec_t usecs ); extern boolean_t PEWriteNVRAMBooleanProperty(const char *symbol, boolean_t value); extern boolean_t PEWriteNVRAMProperty(const char *symbol, const void *value, const unsigned int len); +extern boolean_t PEWriteNVRAMPropertyWithCopy(const char *symbol, const void *value, const unsigned int len); extern boolean_t PEReadNVRAMProperty(const char *symbol, void *value, unsigned int *len); @@ -117,12 +118,12 @@ extern coprocessor_type_t PEGetCoprocessorVersion( void ); extern OSSymbol * gPlatformInterruptControllerName; -extern const OSSymbol * gIOPlatformSleepActionKey; -extern const OSSymbol * gIOPlatformWakeActionKey; -extern const OSSymbol * gIOPlatformQuiesceActionKey; -extern const OSSymbol * gIOPlatformActiveActionKey; -extern const OSSymbol * gIOPlatformHaltRestartActionKey; -extern const OSSymbol * gIOPlatformPanicActionKey; +extern const OSSymbol * gIOPlatformSleepActionKey; +extern const OSSymbol * gIOPlatformWakeActionKey; +extern const OSSymbol * gIOPlatformQuiesceActionKey; +extern const OSSymbol * gIOPlatformActiveActionKey; +extern const OSSymbol * gIOPlatformHaltRestartActionKey; +extern const OSSymbol * gIOPlatformPanicActionKey; class IORangeAllocator; class IONVRAMController; @@ -130,106 +131,108 @@ class IOPMrootDomain; class IOPlatformExpert : public IOService { - OSDeclareDefaultStructors(IOPlatformExpert); + OSDeclareDefaultStructors(IOPlatformExpert); private: - long _peBootROMType; - long _peChipSetType; - long _peMachineType; + long _peBootROMType; + long _peChipSetType; + long _peMachineType; protected: - IOPMrootDomain * root; - int _pePMFeatures; - int _pePrivPMFeatures; - int _peNumBatteriesSupported; - OSArray * thePowerTree; + IOPMrootDomain * root; + int _pePMFeatures; + int _pePrivPMFeatures; + int _peNumBatteriesSupported; + OSArray * thePowerTree; - bool searchingForAdditionalParents; - OSNumber * multipleParentKeyValue; - int numInstancesRegistered; + bool searchingForAdditionalParents; + OSNumber * multipleParentKeyValue; + int numInstancesRegistered; - struct ExpansionData { }; - ExpansionData *iope_reserved __unused; + struct ExpansionData { }; + ExpansionData *iope_reserved __unused; - virtual void setBootROMType(long peBootROMType); - virtual void setChipSetType(long peChipSetType); - virtual void setMachineType(long peMachineType); + virtual void setBootROMType(long peBootROMType); + virtual void setChipSetType(long peChipSetType); + virtual void setMachineType(long peMachineType); - virtual bool CheckSubTree (OSArray * inSubTree, IOService * theNub, IOService * theDevice, OSDictionary * theParent); - virtual bool RegisterServiceInTree (IOService * theService, OSDictionary * theTreeNode, OSDictionary * theTreeParentNode, IOService * theProvider); + virtual bool CheckSubTree(OSArray * inSubTree, IOService * theNub, IOService * theDevice, OSDictionary * theParent); + virtual bool RegisterServiceInTree(IOService * theService, OSDictionary * theTreeNode, OSDictionary * theTreeParentNode, IOService * theProvider); - virtual void PMInstantiatePowerDomains ( void ); + virtual void PMInstantiatePowerDomains( void ); public: - virtual bool attach( IOService * provider ) APPLE_KEXT_OVERRIDE; - virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; - virtual bool configure( IOService * provider ); - virtual IOService * createNub( OSDictionary * from ); + virtual bool attach( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual bool configure( IOService * provider ); + virtual IOService * createNub( OSDictionary * from ); + + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const; + virtual IOReturn getNubResources( IOService * nub ); - virtual bool compareNubName( const IOService * nub, OSString * name, - OSString ** matched = 0 ) const; - virtual IOReturn getNubResources( IOService * nub ); + virtual long getBootROMType(void); + virtual long getChipSetType(void); + virtual long getMachineType(void); - virtual long getBootROMType(void); - virtual long getChipSetType(void); - virtual long getMachineType(void); + virtual bool getModelName( char * name, int maxLength ); + virtual bool getMachineName( char * name, int maxLength ); - virtual bool getModelName( char * name, int maxLength ); - virtual bool getMachineName( char * name, int maxLength ); + virtual int haltRestart(unsigned int type); + virtual void sleepKernel(void); - virtual int haltRestart(unsigned int type); - virtual void sleepKernel(void); + virtual long getGMTTimeOfDay( void ); + virtual void setGMTTimeOfDay( long secs ); - virtual long getGMTTimeOfDay( void ); - virtual void setGMTTimeOfDay( long secs ); + virtual IOReturn getConsoleInfo( PE_Video * consoleInfo ); + virtual IOReturn setConsoleInfo( PE_Video * consoleInfo, unsigned int op ); - virtual IOReturn getConsoleInfo( PE_Video * consoleInfo ); - virtual IOReturn setConsoleInfo( PE_Video * consoleInfo, unsigned int op ); + virtual void registerNVRAMController( IONVRAMController * nvram ); - virtual void registerNVRAMController( IONVRAMController * nvram ); + virtual IOReturn registerInterruptController(OSSymbol *name, IOInterruptController *interruptController); + virtual LIBKERN_RETURNS_NOT_RETAINED IOInterruptController * + lookUpInterruptController(OSSymbol *name); - virtual IOReturn registerInterruptController(OSSymbol *name, IOInterruptController *interruptController); - virtual IOInterruptController *lookUpInterruptController(OSSymbol *name); - virtual void setCPUInterruptProperties(IOService *service); - virtual bool atInterruptLevel(void); + virtual void setCPUInterruptProperties(IOService *service); + virtual bool atInterruptLevel(void); - virtual IOReturn callPlatformFunction(const OSSymbol *functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4) APPLE_KEXT_OVERRIDE; + virtual IOReturn callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4) APPLE_KEXT_OVERRIDE; - virtual IORangeAllocator * getPhysicalRangeAllocator(void); + virtual IORangeAllocator * getPhysicalRangeAllocator(void); - virtual bool platformAdjustService(IOService *service); + virtual bool platformAdjustService(IOService *service); - virtual void PMRegisterDevice(IOService * theNub, IOService * theDevice); - virtual void PMLog ( const char *,unsigned long, unsigned long, unsigned long ); + virtual void PMRegisterDevice(IOService * theNub, IOService * theDevice); + virtual void PMLog( const char *, unsigned long, unsigned long, unsigned long ); - virtual bool hasPMFeature (unsigned long featureMask); - virtual bool hasPrivPMFeature (unsigned long privFeatureMask); - virtual int numBatteriesSupported (void); + virtual bool hasPMFeature(unsigned long featureMask); + virtual bool hasPrivPMFeature(unsigned long privFeatureMask); + virtual int numBatteriesSupported(void); - virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length); + virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length); - virtual OSString* createSystemSerialNumberString(OSData* myProperty); + virtual OSString* createSystemSerialNumberString(OSData* myProperty); - virtual IOReturn deregisterInterruptController(OSSymbol *name); + virtual IOReturn deregisterInterruptController(OSSymbol *name); - virtual void getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ); - virtual void setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ); + virtual void getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ); + virtual void setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 0); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 1); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 2); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 3); - OSMetaClassDeclareReservedUsed(IOPlatformExpert, 4); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 5); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 6); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 7); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 8); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 9); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 10); - OSMetaClassDeclareReservedUnused(IOPlatformExpert, 11); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 0); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 1); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 2); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 3); + OSMetaClassDeclareReservedUsed(IOPlatformExpert, 4); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 5); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 6); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 7); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 8); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 9); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 10); + OSMetaClassDeclareReservedUnused(IOPlatformExpert, 11); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -238,75 +241,75 @@ class IODTNVRAM; class IODTPlatformExpert : public IOPlatformExpert { - OSDeclareAbstractStructors(IODTPlatformExpert); + OSDeclareAbstractStructors(IODTPlatformExpert); private: - IODTNVRAM *dtNVRAM; + IODTNVRAM *dtNVRAM; - struct ExpansionData { }; - ExpansionData *iodtpe_reserved; + struct ExpansionData { }; + ExpansionData *iodtpe_reserved; public: - virtual IOService * probe( IOService * provider, - SInt32 * score ) APPLE_KEXT_OVERRIDE; - virtual bool configure( IOService * provider ) APPLE_KEXT_OVERRIDE; - - virtual void processTopLevel( IORegistryEntry * root ); - virtual const char * deleteList( void ) = 0; - virtual const char * excludeList( void ) = 0; - virtual IOService * createNub( IORegistryEntry * from ); - virtual bool createNubs( IOService * parent, OSIterator * iter ); - - virtual bool compareNubName( const IOService * nub, OSString * name, - OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; - - virtual IOReturn getNubResources( IOService * nub ) APPLE_KEXT_OVERRIDE; - - virtual bool getModelName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; - virtual bool getMachineName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; - - virtual void registerNVRAMController( IONVRAMController * nvram ) APPLE_KEXT_OVERRIDE; - - virtual int haltRestart(unsigned int type) APPLE_KEXT_OVERRIDE; - - /* virtual */ IOReturn readXPRAM(IOByteCount offset, UInt8 * buffer, - IOByteCount length); - - /* virtual */ IOReturn writeXPRAM(IOByteCount offset, UInt8 * buffer, - IOByteCount length); - - virtual IOReturn readNVRAMProperty( - IORegistryEntry * entry, - const OSSymbol ** name, OSData ** value ); - - virtual IOReturn writeNVRAMProperty( - IORegistryEntry * entry, - const OSSymbol * name, OSData * value ); - - // This returns a dictionary describing all the NVRAM partitions. - // The keys will be the partitionIDs of the form "0x52,nvram". - // The values will be OSNumbers of the partition's byte count. - /* virtual */ OSDictionary *getNVRAMPartitions(void); - - /* virtual */ IOReturn readNVRAMPartition(const OSSymbol * partitionID, - IOByteCount offset, UInt8 * buffer, - IOByteCount length); - - /* virtual */ IOReturn writeNVRAMPartition(const OSSymbol * partitionID, - IOByteCount offset, UInt8 * buffer, - IOByteCount length); - - virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length) APPLE_KEXT_OVERRIDE; - virtual OSString* createSystemSerialNumberString(OSData* myProperty) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 0); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 1); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 2); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 3); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 4); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 5); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 6); - OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 7); + virtual IOService * probe( IOService * provider, + SInt32 * score ) APPLE_KEXT_OVERRIDE; + virtual bool configure( IOService * provider ) APPLE_KEXT_OVERRIDE; + + virtual void processTopLevel( IORegistryEntry * root ); + virtual const char * deleteList( void ) = 0; + virtual const char * excludeList( void ) = 0; + virtual IOService * createNub( IORegistryEntry * from ); + virtual bool createNubs( IOService * parent, LIBKERN_CONSUMED OSIterator * iter ); + + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; + + virtual IOReturn getNubResources( IOService * nub ) APPLE_KEXT_OVERRIDE; + + virtual bool getModelName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; + virtual bool getMachineName( char * name, int maxLength ) APPLE_KEXT_OVERRIDE; + + virtual void registerNVRAMController( IONVRAMController * nvram ) APPLE_KEXT_OVERRIDE; + + virtual int haltRestart(unsigned int type) APPLE_KEXT_OVERRIDE; + +/* virtual */ IOReturn readXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length); + +/* virtual */ IOReturn writeXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length); + + virtual IOReturn readNVRAMProperty( + IORegistryEntry * entry, + const OSSymbol ** name, OSData ** value ); + + virtual IOReturn writeNVRAMProperty( + IORegistryEntry * entry, + const OSSymbol * name, OSData * value ); + +// This returns a dictionary describing all the NVRAM partitions. +// The keys will be the partitionIDs of the form "0x52,nvram". +// The values will be OSNumbers of the partition's byte count. +/* virtual */ OSDictionary *getNVRAMPartitions(void); + +/* virtual */ IOReturn readNVRAMPartition(const OSSymbol * partitionID, + IOByteCount offset, UInt8 * buffer, + IOByteCount length); + +/* virtual */ IOReturn writeNVRAMPartition(const OSSymbol * partitionID, + IOByteCount offset, UInt8 * buffer, + IOByteCount length); + + virtual IOByteCount savePanicInfo(UInt8 *buffer, IOByteCount length) APPLE_KEXT_OVERRIDE; + virtual OSString* createSystemSerialNumberString(OSData* myProperty) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 0); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 1); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 2); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 3); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 4); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 5); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 6); + OSMetaClassDeclareReservedUnused(IODTPlatformExpert, 7); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -315,33 +318,33 @@ public: class IOPlatformExpertDevice : public IOService { - OSDeclareDefaultStructors(IOPlatformExpertDevice) + OSDeclareDefaultStructors(IOPlatformExpertDevice) private: - IOWorkLoop *workLoop; + IOWorkLoop *workLoop; - struct ExpansionData { }; - ExpansionData *ioped_reserved __unused; + struct ExpansionData { }; + ExpansionData *ioped_reserved __unused; public: - virtual bool initWithArgs( void * p1, void * p2, - void * p3, void *p4 ); - virtual bool compareName( OSString * name, OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; + virtual bool initWithArgs( void * p1, void * p2, + void * p3, void *p4 ); + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; - virtual IOWorkLoop *getWorkLoop() const APPLE_KEXT_OVERRIDE; - virtual IOReturn setProperties( OSObject * properties ) APPLE_KEXT_OVERRIDE; + virtual IOWorkLoop *getWorkLoop() const APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties( OSObject * properties ) APPLE_KEXT_OVERRIDE; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - virtual IOReturn newUserClient( task_t owningTask, void * securityID, - UInt32 type, OSDictionary * properties, - IOUserClient ** handler) APPLE_KEXT_OVERRIDE; + virtual IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler) APPLE_KEXT_OVERRIDE; - OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 0); - OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 1); - OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 2); - OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 3); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 0); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 1); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 2); + OSMetaClassDeclareReservedUnused(IOPlatformExpertDevice, 3); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -350,20 +353,20 @@ public: class IOPlatformDevice : public IOService { - OSDeclareDefaultStructors(IOPlatformDevice) + OSDeclareDefaultStructors(IOPlatformDevice) - struct ExpansionData { }; - ExpansionData *iopd_reserved; + struct ExpansionData { }; + ExpansionData *iopd_reserved; public: - virtual bool compareName( OSString * name, OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; - virtual IOService * matchLocation( IOService * client ) APPLE_KEXT_OVERRIDE; - virtual IOReturn getResources( void ) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(IOPlatformDevice, 0); - OSMetaClassDeclareReservedUnused(IOPlatformDevice, 1); - OSMetaClassDeclareReservedUnused(IOPlatformDevice, 2); - OSMetaClassDeclareReservedUnused(IOPlatformDevice, 3); + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; + virtual IOService * matchLocation( IOService * client ) APPLE_KEXT_OVERRIDE; + virtual IOReturn getResources( void ) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 0); + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 1); + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 2); + OSMetaClassDeclareReservedUnused(IOPlatformDevice, 3); }; #endif /* __cplusplus */ diff --git a/iokit/IOKit/IOPolledInterface.h b/iokit/IOKit/IOPolledInterface.h index f22e999fc..498814c82 100644 --- a/iokit/IOKit/IOPolledInterface.h +++ b/iokit/IOKit/IOPolledInterface.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,24 +22,23 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOPOLLEDINTERFACE_H_ #define _IOPOLLEDINTERFACE_H_ -enum -{ - kIOPolledPreflightState = 1, - kIOPolledBeforeSleepState = 2, - kIOPolledAfterSleepState = 3, - kIOPolledPostflightState = 4, +enum{ + kIOPolledPreflightState = 1, + kIOPolledBeforeSleepState = 2, + kIOPolledAfterSleepState = 3, + kIOPolledPostflightState = 4, - kIOPolledPreflightCoreDumpState = 5, - kIOPolledPostflightCoreDumpState = 6, + kIOPolledPreflightCoreDumpState = 5, + kIOPolledPostflightCoreDumpState = 6, - kIOPolledBeforeSleepStateAborted = 7, + kIOPolledBeforeSleepStateAborted = 7, }; #if defined(__cplusplus) @@ -51,64 +50,62 @@ enum #define kIOPolledInterfaceActiveKey "IOPolledInterfaceActive" #define kIOPolledInterfaceStackKey "IOPolledInterfaceStack" -enum -{ - kIOPolledWrite = 1, - kIOPolledRead = 2, - kIOPolledFlush = 3 +enum{ + kIOPolledWrite = 1, + kIOPolledRead = 2, + kIOPolledFlush = 3 }; typedef void (*IOPolledCompletionAction)( void * target, - void * parameter, - IOReturn status, - uint64_t actualByteCount); -struct IOPolledCompletion -{ - void * target; - IOPolledCompletionAction action; - void * parameter; + void * parameter, + IOReturn status, + uint64_t actualByteCount); +struct IOPolledCompletion { + void * target; + IOPolledCompletionAction action; + void * parameter; }; class IOPolledInterface : public OSObject { - OSDeclareAbstractStructors(IOPolledInterface); + OSDeclareAbstractStructors(IOPolledInterface); protected: - struct ExpansionData { }; - ExpansionData * reserved; + struct ExpansionData { }; + ExpansionData * reserved; public: - virtual IOReturn probe(IOService * target) = 0; - - virtual IOReturn open( IOOptionBits state, IOMemoryDescriptor * buffer) = 0; - virtual IOReturn close(IOOptionBits state) = 0; - - virtual IOReturn startIO(uint32_t operation, - uint32_t bufferOffset, - uint64_t deviceOffset, - uint64_t length, - IOPolledCompletion completion) = 0; - - virtual IOReturn checkForWork(void) = 0; - - virtual IOReturn setEncryptionKey(const uint8_t * key, size_t keySize); - - OSMetaClassDeclareReservedUsed(IOPolledInterface, 0); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 1); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 2); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 3); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 4); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 5); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 6); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 7); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 8); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 9); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 10); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 11); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 12); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 13); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 14); - OSMetaClassDeclareReservedUnused(IOPolledInterface, 15); + virtual IOReturn probe(IOService * target) = 0; + + virtual IOReturn open( IOOptionBits state, IOMemoryDescriptor * buffer) = 0; + virtual IOReturn close(IOOptionBits state) = 0; + + virtual IOReturn startIO(uint32_t operation, + uint32_t bufferOffset, + uint64_t deviceOffset, + uint64_t length, + IOPolledCompletion completion) = 0; + + virtual IOReturn checkForWork(void) = 0; + + virtual IOReturn setEncryptionKey(const uint8_t * key, size_t keySize); + + OSMetaClassDeclareReservedUsed(IOPolledInterface, 0); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 1); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 2); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 3); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 4); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 5); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 6); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 7); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 8); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 9); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 10); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 11); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 12); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 13); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 14); + OSMetaClassDeclareReservedUnused(IOPolledInterface, 15); }; #endif /* defined(__cplusplus) */ @@ -120,16 +117,14 @@ public: #include // kern_open_file_for_direct_io() flags -enum -{ - kIOPolledFileCreate = 0x00000001, - kIOPolledFileHibernate = 0x00000002, +enum{ + kIOPolledFileCreate = 0x00000001, + kIOPolledFileHibernate = 0x00000002, }; // kern_open_file_for_direct_io() oflags -enum -{ - kIOPolledFileSSD = 0x00000001 +enum{ + kIOPolledFileSSD = 0x00000001 }; #if !defined(__cplusplus) @@ -142,66 +137,64 @@ typedef struct IOPolledFilePollers IOPolledFilePollers; class IOPolledFilePollers; #endif -struct IOPolledFileIOVars -{ - IOPolledFilePollers * pollers; - struct kern_direct_file_io_ref_t * fileRef; - OSData * fileExtents; - uint64_t block0; - IOByteCount blockSize; - uint64_t maxiobytes; - IOByteCount bufferLimit; - uint8_t * buffer; - IOByteCount bufferSize; - IOByteCount bufferOffset; - IOByteCount bufferHalf; - IOByteCount extentRemaining; - IOByteCount lastRead; - IOByteCount readEnd; - uint32_t flags; - uint64_t fileSize; - uint64_t position; - uint64_t extentPosition; - uint64_t encryptStart; - uint64_t encryptEnd; - uint64_t cryptBytes; - AbsoluteTime cryptTime; - IOPolledFileExtent * extentMap; - IOPolledFileExtent * currentExtent; - bool allocated; +struct IOPolledFileIOVars { + IOPolledFilePollers * pollers; + struct kern_direct_file_io_ref_t * fileRef; + OSData * fileExtents; + uint64_t block0; + IOByteCount blockSize; + uint64_t maxiobytes; + IOByteCount bufferLimit; + uint8_t * buffer; + IOByteCount bufferSize; + IOByteCount bufferOffset; + IOByteCount bufferHalf; + IOByteCount extentRemaining; + IOByteCount lastRead; + IOByteCount readEnd; + uint32_t flags; + uint64_t fileSize; + uint64_t position; + uint64_t extentPosition; + uint64_t encryptStart; + uint64_t encryptEnd; + uint64_t cryptBytes; + AbsoluteTime cryptTime; + IOPolledFileExtent * extentMap; + IOPolledFileExtent * currentExtent; + bool allocated; }; typedef struct IOPolledFileIOVars IOPolledFileIOVars; -struct IOPolledFileCryptVars -{ - uint8_t aes_iv[AES_BLOCK_SIZE]; - aes_ctx ctx; +struct IOPolledFileCryptVars { + uint8_t aes_iv[AES_BLOCK_SIZE]; + aes_ctx ctx; }; typedef struct IOPolledFileCryptVars IOPolledFileCryptVars; #if defined(__cplusplus) IOReturn IOPolledFileOpen(const char * filename, - uint32_t flags, - uint64_t setFileSize, uint64_t fsFreeSize, - void * write_file_addr, size_t write_file_len, - IOPolledFileIOVars ** fileVars, - OSData ** imagePath, - uint8_t * volumeCryptKey, size_t * keySize); + uint32_t flags, + uint64_t setFileSize, uint64_t fsFreeSize, + void * write_file_addr, size_t write_file_len, + IOPolledFileIOVars ** fileVars, + OSData ** imagePath, + uint8_t * volumeCryptKey, size_t * keySize); IOReturn IOPolledFileClose(IOPolledFileIOVars ** pVars, - off_t write_offset, void * addr, size_t write_length, - off_t discard_offset, off_t discard_end); + off_t write_offset, void * addr, size_t write_length, + off_t discard_offset, off_t discard_end); IOReturn IOPolledFilePollersSetup(IOPolledFileIOVars * vars, uint32_t openState); -IOMemoryDescriptor * IOPolledFileGetIOBuffer(IOPolledFileIOVars * vars); +LIBKERN_RETURNS_NOT_RETAINED IOMemoryDescriptor * IOPolledFileGetIOBuffer(IOPolledFileIOVars * vars); #endif /* defined(__cplusplus) */ #if defined(__cplusplus) -#define __C "C" +#define __C "C" #else #define __C #endif @@ -209,11 +202,11 @@ IOMemoryDescriptor * IOPolledFileGetIOBuffer(IOPolledFileIOVars * vars); extern __C IOReturn IOPolledFileSeek(IOPolledFileIOVars * vars, uint64_t position); extern __C IOReturn IOPolledFileWrite(IOPolledFileIOVars * vars, - const uint8_t * bytes, IOByteCount size, - IOPolledFileCryptVars * cryptvars); + const uint8_t * bytes, IOByteCount size, + IOPolledFileCryptVars * cryptvars); extern __C IOReturn IOPolledFileRead(IOPolledFileIOVars * vars, - uint8_t * bytes, IOByteCount size, - IOPolledFileCryptVars * cryptvars); + uint8_t * bytes, IOByteCount size, + IOPolledFileCryptVars * cryptvars); extern __C IOReturn IOPolledFileFlush(IOPolledFileIOVars * vars); @@ -222,7 +215,7 @@ extern __C IOReturn IOPolledFilePollersOpen(IOPolledFileIOVars * vars, uint32_t extern __C IOReturn IOPolledFilePollersClose(IOPolledFileIOVars * vars, uint32_t state); extern __C IOReturn IOPolledFilePollersSetEncryptionKey(IOPolledFileIOVars * vars, - const uint8_t * key, size_t keySize); + const uint8_t * key, size_t keySize); extern __C IOPolledFileIOVars * gCoreFileVars; @@ -234,23 +227,23 @@ typedef void (*kern_get_file_extents_callback_t)(void * ref, uint64_t start, uin struct kern_direct_file_io_ref_t * kern_open_file_for_direct_io(const char * name, - uint32_t flags, - kern_get_file_extents_callback_t callback, - void * callback_ref, - off_t set_file_size, - off_t fs_free_size, - off_t write_file_offset, - void * write_file_addr, - size_t write_file_len, - dev_t * partition_device_result, - dev_t * image_device_result, - uint64_t * partitionbase_result, - uint64_t * maxiocount_result, - uint32_t * oflags); + uint32_t flags, + kern_get_file_extents_callback_t callback, + void * callback_ref, + off_t set_file_size, + off_t fs_free_size, + off_t write_file_offset, + void * write_file_addr, + size_t write_file_len, + dev_t * partition_device_result, + dev_t * image_device_result, + uint64_t * partitionbase_result, + uint64_t * maxiocount_result, + uint32_t * oflags); void kern_close_file_for_direct_io(struct kern_direct_file_io_ref_t * ref, - off_t write_offset, void * addr, size_t write_length, - off_t discard_offset, off_t discard_end); + off_t write_offset, void * addr, size_t write_length, + off_t discard_offset, off_t discard_end); int kern_write_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * addr, size_t len, int ioflag); int @@ -259,12 +252,11 @@ kern_read_file(struct kern_direct_file_io_ref_t * ref, off_t offset, void * addr struct mount * kern_file_mount(struct kern_direct_file_io_ref_t * ref); -enum -{ - kIOPolledFileMountChangeMount = 0x00000101, - kIOPolledFileMountChangeUnmount = 0x00000102, - kIOPolledFileMountChangeWillResize = 0x00000201, - kIOPolledFileMountChangeDidResize = 0x00000202, +enum{ + kIOPolledFileMountChangeMount = 0x00000101, + kIOPolledFileMountChangeUnmount = 0x00000102, + kIOPolledFileMountChangeWillResize = 0x00000201, + kIOPolledFileMountChangeDidResize = 0x00000202, }; extern void IOPolledFileMountChange(struct mount * mp, uint32_t op); diff --git a/iokit/IOKit/IORangeAllocator.h b/iokit/IOKit/IORangeAllocator.h index 290194bb4..2520c5bd8 100644 --- a/iokit/IOKit/IORangeAllocator.h +++ b/iokit/IOKit/IORangeAllocator.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -43,129 +43,128 @@ typedef IOByteCount IORangeScalar; /*! @class IORangeAllocator - @abstract A utility class to manage allocations from a range. - @discussion The IORangeAllocator class provides functions for allocating ranges, at a fixed or any offset, and freeing them back to a free list. It is useful for describing ranges of memory or address space without requiring storage in the memory - information describing the free elements is kept elsewhere. Ranges are described by a start offset and a size. IORangeAllocator is optionally protected against multithreaded access. -*/ + * @abstract A utility class to manage allocations from a range. + * @discussion The IORangeAllocator class provides functions for allocating ranges, at a fixed or any offset, and freeing them back to a free list. It is useful for describing ranges of memory or address space without requiring storage in the memory - information describing the free elements is kept elsewhere. Ranges are described by a start offset and a size. IORangeAllocator is optionally protected against multithreaded access. + */ class IORangeAllocator : public OSObject { - - OSDeclareDefaultStructors(IORangeAllocator) + OSDeclareDefaultStructors(IORangeAllocator) protected: - UInt32 numElements; - UInt32 capacity; - UInt32 capacityIncrement; - IORangeScalar defaultAlignmentMask; - IOOptionBits options; + UInt32 numElements; + UInt32 capacity; + UInt32 capacityIncrement; + IORangeScalar defaultAlignmentMask; + IOOptionBits options; - struct IORangeAllocatorElement * elements; + struct IORangeAllocatorElement * elements; private: - virtual bool allocElement( UInt32 index ); + virtual bool allocElement( UInt32 index ); - virtual void deallocElement( UInt32 index ); + virtual void deallocElement( UInt32 index ); public: - enum { - kLocking = 0x00000001 - }; + enum { + kLocking = 0x00000001 + }; /*! @function init - @abstract Standard initializer for IORangeAllocator. - @discussion This method initializes an IORangeAllocator and optionally sets the free list to contain one fragment, from zero to an endOfRange parameter. The capacity in terms of free fragments and locking options are set for the instance. - @param endOfRange If the free list is to contain an initial fragment, set endOfRange to the last offset in the range, ie. size - 1, to create a free fragment for the range zero to endOfRange inclusive. If zero is passed, the free list will be initialized empty, and can be populated with calls to the deallocate method. - @param defaultAlignment If this parameter is non-zero it specifies a required alignment for all allocations, for example pass 256 to align allocations on 256 byte boundaries. Zero or one specify unaligned allocations. - @param capacity Sets the initial size of the free list in number of noncontiguous fragments. This value is also used for the capacityIncrement. - @param options Pass kLocking if the instance can be used by multiple threads. - @result Returns true if the instance is successfully initialized, false on failure. */ - - virtual bool init( IORangeScalar endOfRange, - IORangeScalar defaultAlignment, - UInt32 capacity, - IOOptionBits options ); + * @abstract Standard initializer for IORangeAllocator. + * @discussion This method initializes an IORangeAllocator and optionally sets the free list to contain one fragment, from zero to an endOfRange parameter. The capacity in terms of free fragments and locking options are set for the instance. + * @param endOfRange If the free list is to contain an initial fragment, set endOfRange to the last offset in the range, ie. size - 1, to create a free fragment for the range zero to endOfRange inclusive. If zero is passed, the free list will be initialized empty, and can be populated with calls to the deallocate method. + * @param defaultAlignment If this parameter is non-zero it specifies a required alignment for all allocations, for example pass 256 to align allocations on 256 byte boundaries. Zero or one specify unaligned allocations. + * @param capacity Sets the initial size of the free list in number of noncontiguous fragments. This value is also used for the capacityIncrement. + * @param options Pass kLocking if the instance can be used by multiple threads. + * @result Returns true if the instance is successfully initialized, false on failure. */ + + virtual bool init( IORangeScalar endOfRange, + IORangeScalar defaultAlignment, + UInt32 capacity, + IOOptionBits options ); /*! @function withRange - @abstract Standard factory method for IORangeAllocator. - @discussion This method allocates and initializes an IORangeAllocator and optionally sets the free list to contain one fragment, from zero to an endOfRange parameter. The capacity in terms of free fragments and locking options are set for the instance. - @param endOfRange If the free list is to contain an initial fragment, set endOfRange to the last offset in the range, ie. size - 1, to create a free fragment for the range zero to endOfRange inclusive. If zero is passed the free list will be initialized empty, and can be populated with calls to the deallocate method. - @param defaultAlignment If this parameter is non-zero it specifies a required alignment for all allocations, for example pass 256 to align allocations on 256 byte boundaries. Zero or one specify unaligned allocations. - @param capacity Sets the initial size of the free list in number of non-contiguous fragments. This value is also used for the capacityIncrement. - @param options Pass kLocking if the instance can be used by multiple threads. - @result Returns the new IORangeAllocator instance, to be released by the caller, or zero on failure. */ - - static IORangeAllocator * withRange( IORangeScalar endOfRange, - IORangeScalar defaultAlignment = 0, - UInt32 capacity = 0, - IOOptionBits options = 0 ); - - virtual void free() APPLE_KEXT_OVERRIDE; - virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; + * @abstract Standard factory method for IORangeAllocator. + * @discussion This method allocates and initializes an IORangeAllocator and optionally sets the free list to contain one fragment, from zero to an endOfRange parameter. The capacity in terms of free fragments and locking options are set for the instance. + * @param endOfRange If the free list is to contain an initial fragment, set endOfRange to the last offset in the range, ie. size - 1, to create a free fragment for the range zero to endOfRange inclusive. If zero is passed the free list will be initialized empty, and can be populated with calls to the deallocate method. + * @param defaultAlignment If this parameter is non-zero it specifies a required alignment for all allocations, for example pass 256 to align allocations on 256 byte boundaries. Zero or one specify unaligned allocations. + * @param capacity Sets the initial size of the free list in number of non-contiguous fragments. This value is also used for the capacityIncrement. + * @param options Pass kLocking if the instance can be used by multiple threads. + * @result Returns the new IORangeAllocator instance, to be released by the caller, or zero on failure. */ + + static IORangeAllocator * withRange( IORangeScalar endOfRange, + IORangeScalar defaultAlignment = 0, + UInt32 capacity = 0, + IOOptionBits options = 0 ); + + virtual void free() APPLE_KEXT_OVERRIDE; + virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; /*! @function getFragmentCount - @abstract Accessor to return the number of free fragments in the range. - @discussion This method returns a count of free fragments. Each fragment describes a non-contiguous free range - deallocations will merge contiguous fragments together. - @result Returns the count of free fragments. -*/ + * @abstract Accessor to return the number of free fragments in the range. + * @discussion This method returns a count of free fragments. Each fragment describes a non-contiguous free range - deallocations will merge contiguous fragments together. + * @result Returns the count of free fragments. + */ - virtual UInt32 getFragmentCount( void ); + virtual UInt32 getFragmentCount( void ); /*! @function getFragmentCapacity - @abstract Accessor to return the number of free fragments in the range. - @discussion This method returns the current capacity of the free fragment list. - @result Returns the current capacity of free fragment list. -*/ + * @abstract Accessor to return the number of free fragments in the range. + * @discussion This method returns the current capacity of the free fragment list. + * @result Returns the current capacity of free fragment list. + */ - virtual UInt32 getFragmentCapacity( void ); + virtual UInt32 getFragmentCapacity( void ); /*! @function setFragmentCapacityIncrement - @abstract Sets the count of fragments the free list will increase by when full. - @discussion This method sets the number of extra fragments the free list will expand to when full. It defaults to the initial capacity. - @param count The number of fragments to increment the capacity by when the free list is full. -*/ + * @abstract Sets the count of fragments the free list will increase by when full. + * @discussion This method sets the number of extra fragments the free list will expand to when full. It defaults to the initial capacity. + * @param count The number of fragments to increment the capacity by when the free list is full. + */ - virtual void setFragmentCapacityIncrement( UInt32 count ); + virtual void setFragmentCapacityIncrement( UInt32 count ); /*! @function getFreeCount - @abstract Totals the sizes of the free fragments. - @discussion This method returns the total of the sizes of the fragments on the free list. - @result Returns the total of the free fragments sizes. -*/ + * @abstract Totals the sizes of the free fragments. + * @discussion This method returns the total of the sizes of the fragments on the free list. + * @result Returns the total of the free fragments sizes. + */ - virtual IORangeScalar getFreeCount( void ); + virtual IORangeScalar getFreeCount( void ); /*! @function allocate - @abstract Allocates from the free list, at any offset. - @discussion This method allocates a range from the free list. The alignment will default to the alignment set when the allocator was created or may be set here. - @param size The size of the range requested. - @param result The beginning of the range allocated is returned here on success. - @param alignment If zero is passed, default to the allocators alignment, otherwise pass an alignment required for the allocation, for example 4096 to page align. - @result Returns true if the allocation was successful, else false. -*/ - - virtual bool allocate( IORangeScalar size, - IORangeScalar * result, - IORangeScalar alignment = 0 ); + * @abstract Allocates from the free list, at any offset. + * @discussion This method allocates a range from the free list. The alignment will default to the alignment set when the allocator was created or may be set here. + * @param size The size of the range requested. + * @param result The beginning of the range allocated is returned here on success. + * @param alignment If zero is passed, default to the allocators alignment, otherwise pass an alignment required for the allocation, for example 4096 to page align. + * @result Returns true if the allocation was successful, else false. + */ + + virtual bool allocate( IORangeScalar size, + IORangeScalar * result, + IORangeScalar alignment = 0 ); /*! @function allocateRange - @abstract Allocates from the free list, at a set offset. - @discussion This method allocates a range from the free list, given a set offset passed in. - @param start The beginning of the range requested. - @param size The size of the range requested. - @result Returns true if the allocation was successful, else false. -*/ + * @abstract Allocates from the free list, at a set offset. + * @discussion This method allocates a range from the free list, given a set offset passed in. + * @param start The beginning of the range requested. + * @param size The size of the range requested. + * @result Returns true if the allocation was successful, else false. + */ - virtual bool allocateRange( IORangeScalar start, - IORangeScalar size ); + virtual bool allocateRange( IORangeScalar start, + IORangeScalar size ); /*! @function deallocate - @abstract Deallocates a range to the free list. - @discussion This method deallocates a range to the free list, given a the start offset and length passed in. - @param start The beginning of the range requested. - @param size Returns the size of the range requested. -*/ - - virtual void deallocate( IORangeScalar start, - IORangeScalar size ); + * @abstract Deallocates a range to the free list. + * @discussion This method deallocates a range to the free list, given a the start offset and length passed in. + * @param start The beginning of the range requested. + * @param size Returns the size of the range requested. + */ + + virtual void deallocate( IORangeScalar start, + IORangeScalar size ); }; #endif /* _IOKIT_IORANGEALLOCATOR_H */ diff --git a/iokit/IOKit/IORegistryEntry.h b/iokit/IOKit/IORegistryEntry.h index 59ba42d99..0812c9579 100644 --- a/iokit/IOKit/IORegistryEntry.h +++ b/iokit/IOKit/IORegistryEntry.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -50,922 +50,922 @@ class IORegistryPlane; class IORegistryIterator; typedef void (*IORegistryEntryApplierFunction)(IORegistryEntry * entry, - void * context); + void * context); enum { - kIORegistryIterateRecursively = 0x00000001, - kIORegistryIterateParents = 0x00000002, + kIORegistryIterateRecursively = 0x00000001, + kIORegistryIterateParents = 0x00000002, }; #ifdef KERNEL_PRIVATE -enum -{ +enum{ kIORegistryEntryIndexedPropertyCLPC = 0, kIORegistryEntryIndexedPropertyCount, }; #endif /* KERNEL_PRIVATE */ /*! @class IORegistryEntry : public OSObject - @abstract The base class for all objects in the registry. - @discussion The IORegistryEntry base class provides functions for describing graphs of connected registry entries, each with a dictionary-based property table. Entries may be connected in different planes, with differing topologies. Access to the registry is protected against multiple threads. Inside the kernel planes are specified with plane objects and are published by the creator - IOService exports the gIOServicePlane plane object for example. Non kernel clients specify planes by their name. -*/ + * @abstract The base class for all objects in the registry. + * @discussion The IORegistryEntry base class provides functions for describing graphs of connected registry entries, each with a dictionary-based property table. Entries may be connected in different planes, with differing topologies. Access to the registry is protected against multiple threads. Inside the kernel planes are specified with plane objects and are published by the creator - IOService exports the gIOServicePlane plane object for example. Non kernel clients specify planes by their name. + */ class IORegistryEntry : public OSObject { - friend class IORegistryIterator; + friend class IORegistryIterator; - OSDeclareDefaultStructors(IORegistryEntry) + OSDeclareDefaultStructors(IORegistryEntry) protected: /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of this class in the future. - */ - struct ExpansionData; + * @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData; /*! @var reserved - Reserved for future use. (Internal use only) */ - ExpansionData * reserved; + * Reserved for future use. (Internal use only) */ + ExpansionData * reserved; private: - OSDictionary * fRegistryTable; - OSDictionary * fPropertyTable; + OSDictionary * fRegistryTable; + OSDictionary * fPropertyTable; public: - /* methods available in Mac OS X 10.1 or later */ +/* methods available in Mac OS X 10.1 or later */ /*! @function copyProperty - @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. Available in Mac OS X 10.1 or later. - @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). - @param aKey The property's name as a C-string. - @param plane The plane to iterate over, eg. gIOServicePlane. - @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. - @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ - - virtual OSObject * copyProperty( const char * aKey, - const IORegistryPlane * plane, - IOOptionBits options = - kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. Available in Mac OS X 10.1 or later. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as a C-string. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ + + virtual OSObject * copyProperty( const char * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; /*! @function copyProperty - @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. Available in Mac OS X 10.1 or later. - @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). - @param aKey The property's name as an OSString. - @param plane The plane to iterate over, eg. gIOServicePlane. - @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. - @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ - - virtual OSObject * copyProperty( const OSString * aKey, - const IORegistryPlane * plane, - IOOptionBits options = - kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. Available in Mac OS X 10.1 or later. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSString. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ + + virtual OSObject * copyProperty( const OSString * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; /*! @function copyProperty - @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. Available in Mac OS X 10.1 or later. - @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). - @param aKey The property's name as an OSSymbol. - @param plane The plane to iterate over, eg. gIOServicePlane. - @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. - @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ - - virtual OSObject * copyProperty( const OSSymbol * aKey, - const IORegistryPlane * plane, - IOOptionBits options = - kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. Available in Mac OS X 10.1 or later. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSSymbol. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result The property value found, or zero. A reference on any found property is returned to caller, which should be released. */ + + virtual OSObject * copyProperty( const OSSymbol * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; /*! @function copyParentEntry - @abstract Returns an registry entry's first parent entry in a plane. Available in Mac OS X 10.1 or later. - @discussion This function will return the parent to which a registry entry was first attached. Since the majority of registry entrys have only one provider, this is a useful simplification. - @param plane The plane object. - @result Returns the first parent of the registry entry, or zero if the entry is not attached into the registry in that plane. A reference on the entry is returned to caller, which should be released. */ + * @abstract Returns an registry entry's first parent entry in a plane. Available in Mac OS X 10.1 or later. + * @discussion This function will return the parent to which a registry entry was first attached. Since the majority of registry entrys have only one provider, this is a useful simplification. + * @param plane The plane object. + * @result Returns the first parent of the registry entry, or zero if the entry is not attached into the registry in that plane. A reference on the entry is returned to caller, which should be released. */ - virtual IORegistryEntry * copyParentEntry( const IORegistryPlane * plane ) const; + virtual IORegistryEntry * copyParentEntry( const IORegistryPlane * plane ) const; /*! @function copyChildEntry - @abstract Returns an registry entry's first child entry in a plane. Available in Mac OS X 10.1 or later. - @discussion This function will return the child which first attached to a registry entry. - @param plane The plane object. - @result Returns the first child of the registry entry, or zero if the entry is not attached into the registry in that plane. A reference on the entry is returned to caller, which should be released. */ + * @abstract Returns an registry entry's first child entry in a plane. Available in Mac OS X 10.1 or later. + * @discussion This function will return the child which first attached to a registry entry. + * @param plane The plane object. + * @result Returns the first child of the registry entry, or zero if the entry is not attached into the registry in that plane. A reference on the entry is returned to caller, which should be released. */ - virtual IORegistryEntry * copyChildEntry( const IORegistryPlane * plane ) const; + virtual IORegistryEntry * copyChildEntry( const IORegistryPlane * plane ) const; - /* method available in Mac OS X 10.4 or later */ +/* method available in Mac OS X 10.4 or later */ /*! - @typedef Action - @discussion Type and arguments of callout C function that is used when -a runCommand is executed by a client. Cast to this type when you want a C++ -member function to be used. Note the arg1 - arg3 parameters are passed straight pass through to the action callout. - @param target - Target of the function, can be used as a refcon. Note if a C++ function -was specified, this parameter is implicitly the first parameter in the target -member function's parameter list. - @param arg0 Argument to action from run operation. - @param arg1 Argument to action from run operation. - @param arg2 Argument to action from run operation. - @param arg3 Argument to action from run operation. -*/ - typedef IOReturn (*Action)(OSObject *target, - void *arg0, void *arg1, - void *arg2, void *arg3); + * @typedef Action + * @discussion Type and arguments of callout C function that is used when + * a runCommand is executed by a client. Cast to this type when you want a C++ + * member function to be used. Note the arg1 - arg3 parameters are passed straight pass through to the action callout. + * @param target + * Target of the function, can be used as a refcon. Note if a C++ function + * was specified, this parameter is implicitly the first parameter in the target + * member function's parameter list. + * @param arg0 Argument to action from run operation. + * @param arg1 Argument to action from run operation. + * @param arg2 Argument to action from run operation. + * @param arg3 Argument to action from run operation. + */ + typedef IOReturn (*Action)(OSObject *target, + void *arg0, void *arg1, + void *arg2, void *arg3); /*! @function runPropertyAction - @abstract Single thread a call to an action w.r.t. the property lock - @discussion Client function that causes the given action to be called in a manner that syncrhonises with the registry iterators and serialisers. This functin can be used to synchronously manipulate the property table of this nub - @param action Pointer to function to be executed in work-loop context. - @param arg0 Parameter for action parameter, defaults to 0. - @param arg1 Parameter for action parameter, defaults to 0. - @param arg2 Parameter for action parameter, defaults to 0. - @param arg3 Parameter for action parameter, defaults to 0. - @result Returns the value of the Action callout. -*/ - virtual IOReturn runPropertyAction(Action action, OSObject *target, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0); + * @abstract Single thread a call to an action w.r.t. the property lock + * @discussion Client function that causes the given action to be called in a manner that syncrhonises with the registry iterators and serialisers. This functin can be used to synchronously manipulate the property table of this nub + * @param action Pointer to function to be executed in work-loop context. + * @param arg0 Parameter for action parameter, defaults to 0. + * @param arg1 Parameter for action parameter, defaults to 0. + * @param arg2 Parameter for action parameter, defaults to 0. + * @param arg3 Parameter for action parameter, defaults to 0. + * @result Returns the value of the Action callout. + */ + virtual IOReturn runPropertyAction(Action action, OSObject *target, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); private: #if __LP64__ - OSMetaClassDeclareReservedUnused(IORegistryEntry, 0); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 1); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 2); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 3); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 4); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 5); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 0); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 1); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 2); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 3); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 4); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 5); #else - OSMetaClassDeclareReservedUsed(IORegistryEntry, 0); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 1); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 2); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 3); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 4); - OSMetaClassDeclareReservedUsed(IORegistryEntry, 5); + OSMetaClassDeclareReservedUsed(IORegistryEntry, 0); + OSMetaClassDeclareReservedUsed(IORegistryEntry, 1); + OSMetaClassDeclareReservedUsed(IORegistryEntry, 2); + OSMetaClassDeclareReservedUsed(IORegistryEntry, 3); + OSMetaClassDeclareReservedUsed(IORegistryEntry, 4); + OSMetaClassDeclareReservedUsed(IORegistryEntry, 5); #endif - OSMetaClassDeclareReservedUnused(IORegistryEntry, 6); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 7); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 8); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 9); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 10); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 11); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 12); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 13); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 14); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 15); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 16); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 17); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 18); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 19); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 20); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 21); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 22); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 23); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 24); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 25); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 26); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 27); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 28); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 29); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 30); - OSMetaClassDeclareReservedUnused(IORegistryEntry, 31); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 6); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 7); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 8); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 9); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 10); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 11); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 12); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 13); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 14); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 15); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 16); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 17); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 18); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 19); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 20); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 21); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 22); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 23); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 24); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 25); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 26); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 27); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 28); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 29); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 30); + OSMetaClassDeclareReservedUnused(IORegistryEntry, 31); public: - /* Registry accessors */ +/* Registry accessors */ /*! @function getRegistryRoot - @abstract Returns a pointer to the root instance of the registry. - @discussion This method provides an accessor to the root of the registry for the machine. The root may be passed to a registry iterator when iterating a plane, and contains properties that describe the available planes, and diagnostic information for IOKit. Keys for these properties are in IOKitKeys.h. - @result A pointer to the IORegistryEntry root instance. It should not be released by the caller. */ + * @abstract Returns a pointer to the root instance of the registry. + * @discussion This method provides an accessor to the root of the registry for the machine. The root may be passed to a registry iterator when iterating a plane, and contains properties that describe the available planes, and diagnostic information for IOKit. Keys for these properties are in IOKitKeys.h. + * @result A pointer to the IORegistryEntry root instance. It should not be released by the caller. */ + + static IORegistryEntry * getRegistryRoot( void ); - static IORegistryEntry * getRegistryRoot( void ); - /*! @function getGenerationCount - @abstract Returns an generation count for all registry changing operations. - @discussion This method provides an accessor to the current generation count (or seed) of the registry which changes when any topology change occurs in the registry - this does not include property table changes. It may be used to invalidate any caching of the results from IORegistryEntry methods. - @result An integer generation count. */ + * @abstract Returns an generation count for all registry changing operations. + * @discussion This method provides an accessor to the current generation count (or seed) of the registry which changes when any topology change occurs in the registry - this does not include property table changes. It may be used to invalidate any caching of the results from IORegistryEntry methods. + * @result An integer generation count. */ - static SInt32 getGenerationCount( void ); + static SInt32 getGenerationCount( void ); /*! @function getPlane - @abstract Looks up the plane object by a C-string name. - @discussion Planes are usually provided as globals by the creator, eg. gIOServicePlane, gIODeviceTreePlane, or gIOAudioPlane, however they may also be looked up by name with this method. - @result A pointer to the plane object, or zero if no such plane exists. The returned plane should not be released. */ + * @abstract Looks up the plane object by a C-string name. + * @discussion Planes are usually provided as globals by the creator, eg. gIOServicePlane, gIODeviceTreePlane, or gIOAudioPlane, however they may also be looked up by name with this method. + * @result A pointer to the plane object, or zero if no such plane exists. The returned plane should not be released. */ - static const IORegistryPlane * getPlane( const char * name ); + static const IORegistryPlane * getPlane( const char * name ); - /* Registry Entry allocation & init */ +/* Registry Entry allocation & init */ /*! @function init - @abstract Standard init method for all IORegistryEntry subclasses. - @discussion A registry entry must be initialized with this method before it can be used. A property dictionary may passed and will be retained by this method for use as the registry entry's property table, or an empty one will be created. - @param dictionary A dictionary that will become the registry entry's property table (retaining it), or zero which will cause an empty property table to be created. - @result true on success, or false on a resource failure. */ + * @abstract Standard init method for all IORegistryEntry subclasses. + * @discussion A registry entry must be initialized with this method before it can be used. A property dictionary may passed and will be retained by this method for use as the registry entry's property table, or an empty one will be created. + * @param dictionary A dictionary that will become the registry entry's property table (retaining it), or zero which will cause an empty property table to be created. + * @result true on success, or false on a resource failure. */ - virtual bool init( OSDictionary * dictionary = 0 ); + virtual bool init( OSDictionary * dictionary = 0 ); /*! @function free - @abstract Standard free method for all IORegistryEntry subclasses. - @discussion This method will release any resources of the entry, in particular its property table. Note that the registry entry must always be detached from the registry before free may be called, and subclasses (namely IOService) will have additional protocols for removing registry entries. free should never need be called directly. */ + * @abstract Standard free method for all IORegistryEntry subclasses. + * @discussion This method will release any resources of the entry, in particular its property table. Note that the registry entry must always be detached from the registry before free may be called, and subclasses (namely IOService) will have additional protocols for removing registry entries. free should never need be called directly. */ - virtual void free( void ) APPLE_KEXT_OVERRIDE; + virtual void free( void ) APPLE_KEXT_OVERRIDE; /*! @function setPropertyTable - @abstract Replace a registry entry's property table. - @discussion This method will release the current property table of a the entry and replace it with another, retaining the new property table. - @param dict The new dictionary to be used as the entry's property table. */ + * @abstract Replace a registry entry's property table. + * @discussion This method will release the current property table of a the entry and replace it with another, retaining the new property table. + * @param dict The new dictionary to be used as the entry's property table. */ - virtual void setPropertyTable( OSDictionary * dict ); + virtual void setPropertyTable( OSDictionary * dict ); - /* Synchronized property accessors; wrappers to OSDictionary - * plus property creation helpers */ +/* Synchronized property accessors; wrappers to OSDictionary + * plus property creation helpers */ /*! @function setProperty - @abstract Synchronized method to add a property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The properties name as an OSSymbol. - @param anObject The property value. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to add a property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The properties name as an OSSymbol. + * @param anObject The property value. + * @result true on success or false on a resource failure. */ - virtual bool setProperty(const OSSymbol * aKey, OSObject * anObject); + virtual bool setProperty(const OSSymbol * aKey, + OSObject * anObject); #ifdef KERNEL_PRIVATE - OSObject * setIndexedProperty(uint32_t index, OSObject * anObject); - OSObject * getIndexedProperty(uint32_t index) const; + OSObject * setIndexedProperty(uint32_t index, OSObject * anObject); + OSObject * getIndexedProperty(uint32_t index) const; #endif /* KERNEL_PRIVATE */ /*! @function setProperty - @abstract Synchronized method to add a property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSString. - @param anObject The property value. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to add a property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSString. + * @param anObject The property value. + * @result true on success or false on a resource failure. */ - virtual bool setProperty(const OSString * aKey, OSObject * anObject); + virtual bool setProperty(const OSString * aKey, OSObject * anObject); /*! @function setProperty - @abstract Synchronized method to add a property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as a C-string. - @param anObject The property value. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to add a property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as a C-string. + * @param anObject The property value. + * @result true on success or false on a resource failure. */ - virtual bool setProperty(const char * aKey, OSObject * anObject); + virtual bool setProperty(const char * aKey, OSObject * anObject); /*! @function setProperty - @abstract Synchronized method to construct and add a OSString property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSString from the supplied C-string, set in the property table with the given name, and released. - @param aKey The property's name as a C-string. - @param aString The property value as a C-string. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to construct and add a OSString property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSString from the supplied C-string, set in the property table with the given name, and released. + * @param aKey The property's name as a C-string. + * @param aString The property value as a C-string. + * @result true on success or false on a resource failure. */ - virtual bool setProperty(const char * aKey, const char * aString); + virtual bool setProperty(const char * aKey, const char * aString); /*! @function setProperty - @abstract Synchronized method to construct and add an OSBoolean property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSBoolean from the supplied value, set in the property table with the given name, and released. - @param aKey The property's name as a C-string. - @param aBoolean The property's boolean value. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to construct and add an OSBoolean property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSBoolean from the supplied value, set in the property table with the given name, and released. + * @param aKey The property's name as a C-string. + * @param aBoolean The property's boolean value. + * @result true on success or false on a resource failure. */ - virtual bool setProperty(const char * aKey, bool aBoolean); + virtual bool setProperty(const char * aKey, bool aBoolean); /*! @function setProperty - @abstract Synchronized method to construct and add an OSNumber property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSNumber from the supplied value and size, set in the property table with the given name, and released. - @param aKey The property's name as a C-string. - @param aValue The property's numeric value. - @param aNumberOfBits The property's size in bits, for OSNumber. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to construct and add an OSNumber property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSNumber from the supplied value and size, set in the property table with the given name, and released. + * @param aKey The property's name as a C-string. + * @param aValue The property's numeric value. + * @param aNumberOfBits The property's size in bits, for OSNumber. + * @result true on success or false on a resource failure. */ - virtual bool setProperty( const char * aKey, - unsigned long long aValue, - unsigned int aNumberOfBits); + virtual bool setProperty( const char * aKey, + unsigned long long aValue, + unsigned int aNumberOfBits); /*! @function setProperty - @abstract Synchronized method to construct and add an OSData property to a registry entry's property table. - @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSData copied from the supplied data and length, set in the property table with the given name, and released. - @param aKey The property's name as a C-string. - @param bytes The property's value as a pointer. OSData will copy this data. - @param length The property's size in bytes, for OSData. - @result true on success or false on a resource failure. */ + * @abstract Synchronized method to construct and add an OSData property to a registry entry's property table. + * @discussion This method will add or replace a property in a registry entry's property table, using the OSDictionary::setObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. The property is created as an OSData copied from the supplied data and length, set in the property table with the given name, and released. + * @param aKey The property's name as a C-string. + * @param bytes The property's value as a pointer. OSData will copy this data. + * @param length The property's size in bytes, for OSData. + * @result true on success or false on a resource failure. */ - virtual bool setProperty( const char * aKey, - void * bytes, - unsigned int length); + virtual bool setProperty( const char * aKey, + void * bytes, + unsigned int length); /*! @function removeProperty - @abstract Synchronized method to remove a property from a registry entry's property table. - @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSSymbol. */ + * @abstract Synchronized method to remove a property from a registry entry's property table. + * @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSSymbol. */ - virtual void removeProperty( const OSSymbol * aKey); + virtual void removeProperty( const OSSymbol * aKey); /*! @function removeProperty - @abstract Synchronized method to remove a property from a registry entry's property table. - @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSString. */ + * @abstract Synchronized method to remove a property from a registry entry's property table. + * @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSString. */ - virtual void removeProperty( const OSString * aKey); + virtual void removeProperty( const OSString * aKey); /*! @function removeProperty - @abstract Synchronized method to remove a property from a registry entry's property table. - @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as a C-string. */ + * @abstract Synchronized method to remove a property from a registry entry's property table. + * @discussion This method will remove a property from a registry entry's property table, using the OSDictionary::removeObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as a C-string. */ - virtual void removeProperty( const char * aKey); + virtual void removeProperty( const char * aKey); /*! @function getProperty - @abstract Synchronized method to obtain a property from a registry entry's property table. - @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSSymbol. - @result The property value found, or zero. */ + * @abstract Synchronized method to obtain a property from a registry entry's property table. + * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSSymbol. + * @result The property value found, or zero. */ - virtual OSObject * getProperty( const OSSymbol * aKey) const; + virtual OSObject * getProperty( const OSSymbol * aKey) const; /*! @function getProperty - @abstract Synchronized method to obtain a property from a registry entry's property table. - @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSString. - @result The property value found, or zero. */ + * @abstract Synchronized method to obtain a property from a registry entry's property table. + * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSString. + * @result The property value found, or zero. */ - virtual OSObject * getProperty( const OSString * aKey) const; + virtual OSObject * getProperty( const OSString * aKey) const; /*! @function getProperty - @abstract Synchronized method to obtain a property from a registry entry's property table. - @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as a C-string. - @result The property value found, or zero. */ + * @abstract Synchronized method to obtain a property from a registry entry's property table. + * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as a C-string. + * @result The property value found, or zero. */ - virtual OSObject * getProperty( const char * aKey) const; + virtual OSObject * getProperty( const char * aKey) const; /*! @function getProperty - @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. - @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). - @param aKey The property's name as an OSSymbol. - @param plane The plane to iterate over, eg. gIOServicePlane. - @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. - @result The property value found, or zero. */ - - virtual OSObject * getProperty( const OSSymbol * aKey, - const IORegistryPlane * plane, - IOOptionBits options = - kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSSymbol. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result The property value found, or zero. */ + + virtual OSObject * getProperty( const OSSymbol * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; /*! @function getProperty - @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. - @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). - @param aKey The property's name as an OSString. - @param plane The plane to iterate over, eg. gIOServicePlane. - @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. - @result The property value found, or zero. */ - - virtual OSObject * getProperty( const OSString * aKey, - const IORegistryPlane * plane, - IOOptionBits options = - kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as an OSString. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result The property value found, or zero. */ + + virtual OSObject * getProperty( const OSString * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; /*! @function getProperty - @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. - @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). - @param aKey The property's name as a C-string. - @param plane The plane to iterate over, eg. gIOServicePlane. - @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. - @result The property value found, or zero. */ - - virtual OSObject * getProperty( const char * aKey, - const IORegistryPlane * plane, - IOOptionBits options = - kIORegistryIterateRecursively | - kIORegistryIterateParents) const; + * @abstract Synchronized method to obtain a property from a registry entry or one of its parents (or children) in the hierarchy. + * @discussion This method will search for a property, starting first with this registry entry's property table, then iterating recusively through either the parent registry entries or the child registry entries of this entry. Once the first occurrence is found, it will lookup and return the value of the property, using the OSDictionary::getObject semantics. The iteration keeps track of entries that have been recursed into previously to avoid loops. This method is synchronized with other IORegistryEntry accesses to the property table(s). + * @param aKey The property's name as a C-string. + * @param plane The plane to iterate over, eg. gIOServicePlane. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into the registry hierarchy. Without this option, this method degenerates into the standard getProperty() call. kIORegistryIterateParents may be set to iterate the parents of the entry, in place of the children. + * @result The property value found, or zero. */ + + virtual OSObject * getProperty( const char * aKey, + const IORegistryPlane * plane, + IOOptionBits options = + kIORegistryIterateRecursively | + kIORegistryIterateParents) const; /*! @function copyProperty - @abstract Synchronized method to obtain a property from a registry entry's property table. - @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSSymbol. - @result The property value found, or zero. It should be released by the caller. */ + * @abstract Synchronized method to obtain a property from a registry entry's property table. + * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSSymbol. + * @result The property value found, or zero. It should be released by the caller. */ - virtual OSObject * copyProperty( const OSSymbol * aKey) const; + virtual OSObject * copyProperty( const OSSymbol * aKey) const; /*! @function copyProperty - @abstract Synchronized method to obtain a property from a registry entry's property table. - @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as an OSString. - @result The property value found, or zero. It should be released by the caller. */ + * @abstract Synchronized method to obtain a property from a registry entry's property table. + * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as an OSString. + * @result The property value found, or zero. It should be released by the caller. */ - virtual OSObject * copyProperty( const OSString * aKey) const; + virtual OSObject * copyProperty( const OSString * aKey) const; /*! @function copyProperty - @abstract Synchronized method to obtain a property from a registry entry's property table. - @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. - @param aKey The property's name as a C-string. - @result The property value found, or zero. It should be released by the caller. */ + * @abstract Synchronized method to obtain a property from a registry entry's property table. + * @discussion This method will lookup a property in a registry entry's property table, using the OSDictionary::getObject semantics, and return a reference to the caller. This method is synchronized with other IORegistryEntry accesses to the property table. + * @param aKey The property's name as a C-string. + * @result The property value found, or zero. It should be released by the caller. */ - virtual OSObject * copyProperty( const char * aKey) const; + virtual OSObject * copyProperty( const char * aKey) const; /*! @function dictionaryWithProperties - @abstract Synchronized method to obtain copy a registry entry's property table. - @discussion This method will copy a registry entry's property table, using the OSDictionary::withDictionary semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Since OSDictionary will only copy property values by reference, synchronization is not guaranteed to any collection values. - @result The created dictionary, or zero on a resource value. It should be released by the caller. */ + * @abstract Synchronized method to obtain copy a registry entry's property table. + * @discussion This method will copy a registry entry's property table, using the OSDictionary::withDictionary semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Since OSDictionary will only copy property values by reference, synchronization is not guaranteed to any collection values. + * @result The created dictionary, or zero on a resource value. It should be released by the caller. */ - virtual OSDictionary * dictionaryWithProperties( void ) const; + virtual OSDictionary * dictionaryWithProperties( void ) const; /*! @function serializeProperties - @abstract Synchronized method to serialize a registry entry's property table. - @discussion This method will serialize a registry entry's property table, using the OSDictionary::serialize semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Many non-kernel clients of IOKit read information from the registry via properties, and will invoke this method in a registry entry to create a serialization of all the entry's properties, which is then reconstructed in the client's task as a CFDictionary. This method may be intercepted by subclasses to update their properties or implement a different serialization method, though it is usually better to implement such functionality by creating objects in the property table and implementing their serialize methods, avoiding any need to implement serializeProperties. - @param serialize The OSSerialize instance representing the serialization request. - @result True on success, false otherwise. */ + * @abstract Synchronized method to serialize a registry entry's property table. + * @discussion This method will serialize a registry entry's property table, using the OSDictionary::serialize semantics. This method is synchronized with other IORegistryEntry accesses to the property table. Many non-kernel clients of IOKit read information from the registry via properties, and will invoke this method in a registry entry to create a serialization of all the entry's properties, which is then reconstructed in the client's task as a CFDictionary. This method may be intercepted by subclasses to update their properties or implement a different serialization method, though it is usually better to implement such functionality by creating objects in the property table and implementing their serialize methods, avoiding any need to implement serializeProperties. + * @param serialize The OSSerialize instance representing the serialization request. + * @result True on success, false otherwise. */ - virtual bool serializeProperties( OSSerialize * serialize ) const; + virtual bool serializeProperties( OSSerialize * serialize ) const; - /* Unsynchronized(!) property table access */ +/* Unsynchronized(!) property table access */ /*! @function getPropertyTable - @abstract Unsynchronized accessor to a registry entry's property table. - @discussion This method will return a pointer to the live property table as an OSDictionery. Its use is not recommended in most cases, instead use the synchronized accessors and helper functions of IORegistryEntry to access properties. It can only safely be used by one thread, which usually means it can only be used before a registry entry is entered into the registry. - @result A pointer to the property table as an OSDictionary. The pointer is valid while the registry entry is retained, and should not be released by the caller. */ + * @abstract Unsynchronized accessor to a registry entry's property table. + * @discussion This method will return a pointer to the live property table as an OSDictionery. Its use is not recommended in most cases, instead use the synchronized accessors and helper functions of IORegistryEntry to access properties. It can only safely be used by one thread, which usually means it can only be used before a registry entry is entered into the registry. + * @result A pointer to the property table as an OSDictionary. The pointer is valid while the registry entry is retained, and should not be released by the caller. */ - /* inline */ OSDictionary * getPropertyTable( void ) const; - /* { return(fPropertyTable); } */ +/* inline */ OSDictionary * getPropertyTable( void ) const; +/* { return(fPropertyTable); } */ - /* Set properties from user level, to be overridden if supported */ +/* Set properties from user level, to be overridden if supported */ /*! @function setProperties - @abstract Optionally supported external method to set properties in a registry entry. - @discussion This method is not implemented by IORegistryEntry, but is available to kernel and non-kernel clients to set properties in a registry entry. IOUserClient provides connection based, more controlled access to this functionality and may be more appropriate for many uses, since there is no differentiation between clients available to this method. - @param properties Any OSObject subclass, to be interpreted by the implementing method - for example an OSDictionary, OSData etc. may all be appropriate. - @result An IOReturn code to be returned to the caller. */ + * @abstract Optionally supported external method to set properties in a registry entry. + * @discussion This method is not implemented by IORegistryEntry, but is available to kernel and non-kernel clients to set properties in a registry entry. IOUserClient provides connection based, more controlled access to this functionality and may be more appropriate for many uses, since there is no differentiation between clients available to this method. + * @param properties Any OSObject subclass, to be interpreted by the implementing method - for example an OSDictionary, OSData etc. may all be appropriate. + * @result An IOReturn code to be returned to the caller. */ - virtual IOReturn setProperties( OSObject * properties ); + virtual IOReturn setProperties( OSObject * properties ); - /* Topology */ +/* Topology */ /*! @function getParentIterator - @abstract Returns an iterator over an registry entry's parent entries in a specified plane. - @param plane The plane object. - @result Returns an iterator over the parents of the registry entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + * @abstract Returns an iterator over an registry entry's parent entries in a specified plane. + * @param plane The plane object. + * @result Returns an iterator over the parents of the registry entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getParentIterator( const IORegistryPlane * plane ) - const; - virtual void applyToParents( IORegistryEntryApplierFunction applier, - void * context, - const IORegistryPlane * plane ) const; + virtual OSIterator * getParentIterator( const IORegistryPlane * plane ) + const; + virtual void applyToParents( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const; /*! @function getParentEntry - @abstract Returns an registry entry's first parent entry in a plane. - @discussion This function will return the parent to which a registry entry was first attached. Since the majority of registry entrys have only one provider, this is a useful simplification. - @param plane The plane object. - @result Returns the first parent of the registry entry, or zero if the entry is not attached into the registry in that plane. The parent is retained while the entry is attached, and should not be released by the caller. */ + * @abstract Returns an registry entry's first parent entry in a plane. + * @discussion This function will return the parent to which a registry entry was first attached. Since the majority of registry entrys have only one provider, this is a useful simplification. + * @param plane The plane object. + * @result Returns the first parent of the registry entry, or zero if the entry is not attached into the registry in that plane. The parent is retained while the entry is attached, and should not be released by the caller. */ - virtual IORegistryEntry * getParentEntry( const IORegistryPlane * plane ) const; + virtual IORegistryEntry * getParentEntry( const IORegistryPlane * plane ) const; /*! @function getChildIterator - @abstract Returns an iterator over an registry entry's child entries in a plane. - @discussion This method creates an iterator which will return each of a registry entry's child entries in a specified plane. - @param plane The plane object. - @result Returns an iterator over the children of the entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + * @abstract Returns an iterator over an registry entry's child entries in a plane. + * @discussion This method creates an iterator which will return each of a registry entry's child entries in a specified plane. + * @param plane The plane object. + * @result Returns an iterator over the children of the entry, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getChildIterator( const IORegistryPlane * plane ) - const; + virtual OSIterator * getChildIterator( const IORegistryPlane * plane ) + const; #if XNU_KERNEL_PRIVATE - uint32_t getChildCount( const IORegistryPlane * plane ) const; - OSArray * copyPropertyKeys(void) const; + uint32_t getChildCount( const IORegistryPlane * plane ) const; + OSArray * copyPropertyKeys(void) const; #endif - virtual void applyToChildren( IORegistryEntryApplierFunction applier, - void * context, - const IORegistryPlane * plane ) const; + virtual void applyToChildren( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const; /*! @function getChildEntry - @abstract Returns an registry entry's first child entry in a plane. - @discussion This function will return the child which first attached to a registry entry. - @param plane The plane object. - @result Returns the first child of the registry entry, or zero if the entry is not attached into the registry in that plane. The child is retained while the entry is attached, and should not be released by the caller. */ + * @abstract Returns an registry entry's first child entry in a plane. + * @discussion This function will return the child which first attached to a registry entry. + * @param plane The plane object. + * @result Returns the first child of the registry entry, or zero if the entry is not attached into the registry in that plane. The child is retained while the entry is attached, and should not be released by the caller. */ - virtual IORegistryEntry * getChildEntry( const IORegistryPlane * plane ) const; + virtual IORegistryEntry * getChildEntry( const IORegistryPlane * plane ) const; /*! @function isChild - @abstract Determines whether a registry entry is the child of another in a plane. - @discussion This method called in the parent entry determines if the specified entry is a child, in a plane. Additionally, it can check if the child is the only child of the parent entry. - @param child The possible child registry entry. - @param plane The plane object. - @param onlyChild If true, check also if the child is the only child. - @result If the child argument is not a child of the registry entry, false is returned. If onlyChild is true and the child is not the only child of the entry, false is returned, otherwise true is returned. */ + * @abstract Determines whether a registry entry is the child of another in a plane. + * @discussion This method called in the parent entry determines if the specified entry is a child, in a plane. Additionally, it can check if the child is the only child of the parent entry. + * @param child The possible child registry entry. + * @param plane The plane object. + * @param onlyChild If true, check also if the child is the only child. + * @result If the child argument is not a child of the registry entry, false is returned. If onlyChild is true and the child is not the only child of the entry, false is returned, otherwise true is returned. */ - virtual bool isChild( IORegistryEntry * child, - const IORegistryPlane * plane, - bool onlyChild = false ) const; + virtual bool isChild( IORegistryEntry * child, + const IORegistryPlane * plane, + bool onlyChild = false ) const; /*! @function isParent - @abstract Determines whether a registry entry is the parent of another in a plane. - @discussion This method called in the child entry determines if the specified entry is a parent, in a plane. Additionally, it can check if the parent is the only parent of the child entry. - @param parent The possible parent registry entry. - @param plane The plane object. - @param onlyParent If true, check also if the parent is the only parent. - @result If the parent argument is not a parent of the registry entry, false is returned. If onlyParent is true and the parent is not the only parent of the entry, false is returned, otherwise true is returned. */ + * @abstract Determines whether a registry entry is the parent of another in a plane. + * @discussion This method called in the child entry determines if the specified entry is a parent, in a plane. Additionally, it can check if the parent is the only parent of the child entry. + * @param parent The possible parent registry entry. + * @param plane The plane object. + * @param onlyParent If true, check also if the parent is the only parent. + * @result If the parent argument is not a parent of the registry entry, false is returned. If onlyParent is true and the parent is not the only parent of the entry, false is returned, otherwise true is returned. */ - virtual bool isParent( IORegistryEntry * parent, - const IORegistryPlane * plane, - bool onlyParent = false ) const; + virtual bool isParent( IORegistryEntry * parent, + const IORegistryPlane * plane, + bool onlyParent = false ) const; /*! @function inPlane - @abstract Determines whether a registry entry is attached in a plane. - @discussion This method determines if the entry is attached in a plane to any other entry. It can also be used to determine if the entry is a member of any plane. - @param plane The plane object, 0 indicates any plane. - @result If the entry has a parent in the given plane or if plane = 0 then if entry has any parent; return true, otherwise false. */ + * @abstract Determines whether a registry entry is attached in a plane. + * @discussion This method determines if the entry is attached in a plane to any other entry. It can also be used to determine if the entry is a member of any plane. + * @param plane The plane object, 0 indicates any plane. + * @result If the entry has a parent in the given plane or if plane = 0 then if entry has any parent; return true, otherwise false. */ - virtual bool inPlane( const IORegistryPlane * plane = 0) const; + virtual bool inPlane( const IORegistryPlane * plane = 0) const; /*! @function getDepth - @abstract Counts the maximum number of entries between an entry and the registry root, in a plane. - @discussion This method counts the number of entries between and entry and the registry root, in a plane, for each parent of the entry and returns the maximum value. - @param plane The plane object. - @result The maximum number of entries between the entry and the root. Zero is returned if the entry is not attached in the plane. */ + * @abstract Counts the maximum number of entries between an entry and the registry root, in a plane. + * @discussion This method counts the number of entries between and entry and the registry root, in a plane, for each parent of the entry and returns the maximum value. + * @param plane The plane object. + * @result The maximum number of entries between the entry and the root. Zero is returned if the entry is not attached in the plane. */ - virtual unsigned int getDepth( const IORegistryPlane * plane ) const; + virtual unsigned int getDepth( const IORegistryPlane * plane ) const; - /* Attach / detach */ +/* Attach / detach */ /*! @function attachToParent - @abstract Attaches a entry to a parent entry in a plane. - @discussion This is the usual method of entering an entry into the registry. It is a no-op and success if the entry is already attached to the parent. Attaching the entry into the registry retains both the child and parent while they are attached. This method will call attachToChild in the parent entry if it is not being called from attachToChild. - @param parent The registry entry to attach to. - @param plane The plane object. - @result true on success, or false on a resource failure, or if the parent is the same as the child. */ + * @abstract Attaches a entry to a parent entry in a plane. + * @discussion This is the usual method of entering an entry into the registry. It is a no-op and success if the entry is already attached to the parent. Attaching the entry into the registry retains both the child and parent while they are attached. This method will call attachToChild in the parent entry if it is not being called from attachToChild. + * @param parent The registry entry to attach to. + * @param plane The plane object. + * @result true on success, or false on a resource failure, or if the parent is the same as the child. */ - virtual bool attachToParent( IORegistryEntry * parent, - const IORegistryPlane * plane ); + virtual bool attachToParent( IORegistryEntry * parent, + const IORegistryPlane * plane ); /*! @function detachFromParent - @abstract Detaches an entry from a parent entry in a plane. - @discussion This is the usual method of removing an entry from the registry. It is a no-op if the entry is not attached to the parent. Detaching the entry will release both the child and parent. This method will call detachFromChild in the parent entry if it is not being called from detachFromChild. - @param parent The registry entry to detach from. - @param plane The plane object. */ + * @abstract Detaches an entry from a parent entry in a plane. + * @discussion This is the usual method of removing an entry from the registry. It is a no-op if the entry is not attached to the parent. Detaching the entry will release both the child and parent. This method will call detachFromChild in the parent entry if it is not being called from detachFromChild. + * @param parent The registry entry to detach from. + * @param plane The plane object. */ - virtual void detachFromParent( IORegistryEntry * parent, - const IORegistryPlane * plane ); + virtual void detachFromParent( IORegistryEntry * parent, + const IORegistryPlane * plane ); /*! @function attachToChild - @abstract Method called in the parent entry when a child attaches. - @discussion This method is called in the parent entry when a child attaches, to make overrides possible. This method will also call attachToParent in the child entry if it is not being called from attachToParent. It is a no-op and success if the entry is already a child. Attaching the entry into the registry retains both the child and parent while they are attached. - @param child The registry entry being attached. - @param plane The plane object. - @result true on success, or false on a resource failure, or if the parent is the same as the child. */ + * @abstract Method called in the parent entry when a child attaches. + * @discussion This method is called in the parent entry when a child attaches, to make overrides possible. This method will also call attachToParent in the child entry if it is not being called from attachToParent. It is a no-op and success if the entry is already a child. Attaching the entry into the registry retains both the child and parent while they are attached. + * @param child The registry entry being attached. + * @param plane The plane object. + * @result true on success, or false on a resource failure, or if the parent is the same as the child. */ - virtual bool attachToChild( IORegistryEntry * child, - const IORegistryPlane * plane ); + virtual bool attachToChild( IORegistryEntry * child, + const IORegistryPlane * plane ); /*! @function detachFromChild - @abstract Detaches a child entry from its parent in a plane. - @discussion This method is called in the parent entry when a child detaches, to make overrides possible. It is a no-op if the entry is not a child of the parent. Detaching the entry will release both the child and parent. This method will call detachFromParent in the child entry if it is not being called from detachFromParent. - @param child The registry entry to detach. - @param plane The plane object. */ + * @abstract Detaches a child entry from its parent in a plane. + * @discussion This method is called in the parent entry when a child detaches, to make overrides possible. It is a no-op if the entry is not a child of the parent. Detaching the entry will release both the child and parent. This method will call detachFromParent in the child entry if it is not being called from detachFromParent. + * @param child The registry entry to detach. + * @param plane The plane object. */ - virtual void detachFromChild( IORegistryEntry * child, - const IORegistryPlane * plane ); + virtual void detachFromChild( IORegistryEntry * child, + const IORegistryPlane * plane ); /*! @function detachAbove - @abstract Detaches an entry from all its parent entries in a plane. - @discussion This method calls detachFromParent in the entry for each of its parent entries in the plane. - @param plane The plane object. */ + * @abstract Detaches an entry from all its parent entries in a plane. + * @discussion This method calls detachFromParent in the entry for each of its parent entries in the plane. + * @param plane The plane object. */ - virtual void detachAbove( const IORegistryPlane * plane ); + virtual void detachAbove( const IORegistryPlane * plane ); /*! @function detachAll - @abstract Detaches an entry and all its children recursively in a plane. - @discussion This method breaks the registry connections for a subtree. detachAbove is called in the entry, and all child entries and their children in the plane. - @param plane The plane object. */ + * @abstract Detaches an entry and all its children recursively in a plane. + * @discussion This method breaks the registry connections for a subtree. detachAbove is called in the entry, and all child entries and their children in the plane. + * @param plane The plane object. */ - virtual void detachAll( const IORegistryPlane * plane ); + virtual void detachAll( const IORegistryPlane * plane ); - /* Name, location and path accessors */ +/* Name, location and path accessors */ /*! @function getName - @abstract Returns the name assigned to the registry entry as a C-string. - @discussion Entries can be named in a particular plane, or globally. If the entry is named in plane and the plane is specified that name will be returned, otherwise the global name is returned. The global name defaults to the entry's meta class name if it has not been named. - @param plane The plane object, or zero for the global name. - @result A C-string name, valid while the entry is retained. */ + * @abstract Returns the name assigned to the registry entry as a C-string. + * @discussion Entries can be named in a particular plane, or globally. If the entry is named in plane and the plane is specified that name will be returned, otherwise the global name is returned. The global name defaults to the entry's meta class name if it has not been named. + * @param plane The plane object, or zero for the global name. + * @result A C-string name, valid while the entry is retained. */ - virtual const char * getName( const IORegistryPlane * plane = 0 ) const; + virtual const char * getName( const IORegistryPlane * plane = 0 ) const; /*! @function copyName - @abstract Returns the name assigned to the registry entry as an OSSymbol. - @discussion Entries can be named in a particular plane, or globally. If the entry is named in plane and the plane is specified that name will be returned, otherwise the global name is returned. The global name defaults to the entry's meta class name if it has not been named. - @param plane The plane object, or zero for the global name. - @result A reference to an OSSymbol for the name, which should be released by the caller. */ + * @abstract Returns the name assigned to the registry entry as an OSSymbol. + * @discussion Entries can be named in a particular plane, or globally. If the entry is named in plane and the plane is specified that name will be returned, otherwise the global name is returned. The global name defaults to the entry's meta class name if it has not been named. + * @param plane The plane object, or zero for the global name. + * @result A reference to an OSSymbol for the name, which should be released by the caller. */ - virtual const OSSymbol * copyName( - const IORegistryPlane * plane = 0 ) const; + virtual const OSSymbol * copyName( + const IORegistryPlane * plane = 0 ) const; /*! @function compareNames - @abstract Compares the name of the entry with one or more names, and optionally returns the matching name. - @discussion This method is called during IOService name matching and elsewhere to compare the entry's global name with a list of names, or a single name. A list of names may be passed as any OSCollection of OSStrings, while a single name may be passed an OSString, in the name parameter. compareNames will call the compareName method for each name, for overrides. - @param name The name or names to compare with as any OSCollection (eg. OSArray, OSSet, OSDictionary) of OSStrings, or a single name may be passed an OSString. - @param matched If the caller wants the successfully matched name returned, pass a non-zero pointer for the matched parameter and an OSString will be returned here. It should be released by the caller. - @result True if one of the names compared true with the entry's global name. */ + * @abstract Compares the name of the entry with one or more names, and optionally returns the matching name. + * @discussion This method is called during IOService name matching and elsewhere to compare the entry's global name with a list of names, or a single name. A list of names may be passed as any OSCollection of OSStrings, while a single name may be passed an OSString, in the name parameter. compareNames will call the compareName method for each name, for overrides. + * @param name The name or names to compare with as any OSCollection (eg. OSArray, OSSet, OSDictionary) of OSStrings, or a single name may be passed an OSString. + * @param matched If the caller wants the successfully matched name returned, pass a non-zero pointer for the matched parameter and an OSString will be returned here. It should be released by the caller. + * @result True if one of the names compared true with the entry's global name. */ - virtual bool compareNames( OSObject * name, OSString ** matched = 0 ) const; + virtual bool compareNames( OSObject * name, OSString ** matched = 0 ) const; /*! @function compareName - @abstract Compares the name of the entry with one name, and optionally returns the matching name. - @discussion This method is called during IOService name matching and elsewhere from the compareNames method. It should be overridden to provide non-standard name matching. - @param name The name to compare with as an OSString. - @param matched If the caller wants the successfully matched name returned, pass a non-zero pointer for the matched parameter and an OSString will be returned here. It should be released by the caller. Generally, this will be the same as the name parameter, but may not be if wildcards are used. - @result True if the name compared true with the entry's global name. */ + * @abstract Compares the name of the entry with one name, and optionally returns the matching name. + * @discussion This method is called during IOService name matching and elsewhere from the compareNames method. It should be overridden to provide non-standard name matching. + * @param name The name to compare with as an OSString. + * @param matched If the caller wants the successfully matched name returned, pass a non-zero pointer for the matched parameter and an OSString will be returned here. It should be released by the caller. Generally, this will be the same as the name parameter, but may not be if wildcards are used. + * @result True if the name compared true with the entry's global name. */ - virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const; /*! @function setName - @abstract Sets a name for the registry entry, in a particular plane, or globally. - @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. - @param name An OSSymbol which will be retained. - @param plane The plane object, or zero to set the global name. */ + * @abstract Sets a name for the registry entry, in a particular plane, or globally. + * @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. + * @param name An OSSymbol which will be retained. + * @param plane The plane object, or zero to set the global name. */ - virtual void setName( const OSSymbol * name, - const IORegistryPlane * plane = 0 ); + virtual void setName( const OSSymbol * name, + const IORegistryPlane * plane = 0 ); /*! @function setName - @abstract Sets a name for the registry entry, in a particular plane, or globally. - @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. - @param name A const C-string name which will be copied. - @param plane The plane object, or zero to set the global name. */ + * @abstract Sets a name for the registry entry, in a particular plane, or globally. + * @discussion Entries can be named in a particular plane, or globally. If the plane is specified the name applies only to that plane, otherwise the global name is set. The global name defaults to the entry's meta class name if it has not been named. + * @param name A const C-string name which will be copied. + * @param plane The plane object, or zero to set the global name. */ - virtual void setName( const char * name, - const IORegistryPlane * plane = 0 ); + virtual void setName( const char * name, + const IORegistryPlane * plane = 0 ); /*! @function getLocation - @abstract Returns the location string assigned to the registry entry as a C-string. - @discussion Entries can given a location string in a particular plane, or globally. If the entry has had a location set in a plane and the plane is specified that location string will be returned, otherwise the global location string is returned. If no global location string has been set, zero is returned. - @param plane The plane object, or zero for the global name. - @result A C-string location string, valid while the entry is retained, or zero. */ + * @abstract Returns the location string assigned to the registry entry as a C-string. + * @discussion Entries can given a location string in a particular plane, or globally. If the entry has had a location set in a plane and the plane is specified that location string will be returned, otherwise the global location string is returned. If no global location string has been set, zero is returned. + * @param plane The plane object, or zero for the global name. + * @result A C-string location string, valid while the entry is retained, or zero. */ - virtual const char * getLocation( const IORegistryPlane * plane = 0 ) const; + virtual const char * getLocation( const IORegistryPlane * plane = 0 ) const; /*! @function copyLocation - @abstract Returns the location string assigned to the registry entry as an OSSymbol. - @discussion Entries can given a location string in a particular plane, or globally. If the entry has had a location set in a plane and the plane is specified that location string will be returned, otherwise the global location string is returned. If no global location string has been set, zero is returned. - @param plane The plane object, or zero for the global name. - @result A reference to an OSSymbol for the location if one exists, which should be released by the caller, or zero. */ + * @abstract Returns the location string assigned to the registry entry as an OSSymbol. + * @discussion Entries can given a location string in a particular plane, or globally. If the entry has had a location set in a plane and the plane is specified that location string will be returned, otherwise the global location string is returned. If no global location string has been set, zero is returned. + * @param plane The plane object, or zero for the global name. + * @result A reference to an OSSymbol for the location if one exists, which should be released by the caller, or zero. */ - virtual const OSSymbol * copyLocation( - const IORegistryPlane * plane = 0 ) const; + virtual const OSSymbol * copyLocation( + const IORegistryPlane * plane = 0 ) const; /*! @function setLocation - @abstract Sets a location string for the registry entry, in a particular plane, or globally. - @discussion Entries can be given a location string in a particular plane, or globally. If the plane is specified the location applies only to that plane, otherwise the global location is set. The location string may be used during path lookups of registry entries, to distinguish between sibling entries with the same name. The default IORegistryEntry parsing of location strings expects a list of hex numbers separated by commas, though subclasses of IORegistryEntry might do their own parsing. - @param location A C-string location string which will be copied, or an OSSymbol which will be retained. - @param plane The plane object, or zero to set the global location string. */ + * @abstract Sets a location string for the registry entry, in a particular plane, or globally. + * @discussion Entries can be given a location string in a particular plane, or globally. If the plane is specified the location applies only to that plane, otherwise the global location is set. The location string may be used during path lookups of registry entries, to distinguish between sibling entries with the same name. The default IORegistryEntry parsing of location strings expects a list of hex numbers separated by commas, though subclasses of IORegistryEntry might do their own parsing. + * @param location A C-string location string which will be copied, or an OSSymbol which will be retained. + * @param plane The plane object, or zero to set the global location string. */ - virtual void setLocation( const OSSymbol * location, - const IORegistryPlane * plane = 0 ); - virtual void setLocation( const char * location, - const IORegistryPlane * plane = 0 ); + virtual void setLocation( const OSSymbol * location, + const IORegistryPlane * plane = 0 ); + virtual void setLocation( const char * location, + const IORegistryPlane * plane = 0 ); /*! @function getPath - @abstract Create a path for a registry entry. - @discussion The path for a registry entry is copied to the caller's buffer. The path describes the entry's attachment in a particular plane, which must be specified. The path begins with the plane name followed by a colon, and then followed by '/' separated path components for each of the entries between the root and the registry entry. Each component is constructed with the getPathComponent method called in each entry. An alias may also exist for the entry, which are described as properties in a registry entry found at /aliases in the plane. If a property value interpreted as a path in a call to IORegistryEntry::fromPath yields the entry, then the property name is used as the entry's path. - @param path A char buffer allocated by the caller. - @param length An in/out parameter - the caller sets the length of the buffer available, and getPath returns the total length of the path copied to the buffer. - @param plane The plane object. - @result getPath will fail if the entry is not attached in the plane, or if the buffer is not large enough to contain the path. */ + * @abstract Create a path for a registry entry. + * @discussion The path for a registry entry is copied to the caller's buffer. The path describes the entry's attachment in a particular plane, which must be specified. The path begins with the plane name followed by a colon, and then followed by '/' separated path components for each of the entries between the root and the registry entry. Each component is constructed with the getPathComponent method called in each entry. An alias may also exist for the entry, which are described as properties in a registry entry found at /aliases in the plane. If a property value interpreted as a path in a call to IORegistryEntry::fromPath yields the entry, then the property name is used as the entry's path. + * @param path A char buffer allocated by the caller. + * @param length An in/out parameter - the caller sets the length of the buffer available, and getPath returns the total length of the path copied to the buffer. + * @param plane The plane object. + * @result getPath will fail if the entry is not attached in the plane, or if the buffer is not large enough to contain the path. */ - virtual bool getPath( char * path, int * length, - const IORegistryPlane * plane) const; + virtual bool getPath( char * path, int * length, + const IORegistryPlane * plane) const; /*! @function getPathComponent - @abstract Create a path component for a registry entry. - @discussion Each component of a path created with getPath is created with getPathComponent. The default implementation concatenates the entry's name in the the plane, with the "at" symbol and the location string of the entry in the plane if it has been set. - @param path A char buffer allocated by the caller. - @param length An in/out parameter - the caller sets the length of the buffer available, and getPathComponent returns the total length of the path component copied to the buffer. - @param plane The plane object. - @result true if the path fits into the supplied buffer or false on a overflow. */ + * @abstract Create a path component for a registry entry. + * @discussion Each component of a path created with getPath is created with getPathComponent. The default implementation concatenates the entry's name in the the plane, with the "at" symbol and the location string of the entry in the plane if it has been set. + * @param path A char buffer allocated by the caller. + * @param length An in/out parameter - the caller sets the length of the buffer available, and getPathComponent returns the total length of the path component copied to the buffer. + * @param plane The plane object. + * @result true if the path fits into the supplied buffer or false on a overflow. */ - virtual bool getPathComponent( char * path, int * length, - const IORegistryPlane * plane ) const; + virtual bool getPathComponent( char * path, int * length, + const IORegistryPlane * plane ) const; /*! @function fromPath - @abstract Looks up a registry entry by path. - @discussion This function parses paths to lookup registry entries. The path may begin with the : created by getPath, or the plane may be set by the caller. If there are characters remaining unparsed after an entry has been looked up, this may be considered an invalid lookup, or those characters may be passed back to the caller and the lookup successful. - @param path A C-string path. - @param plane The plane to lookup up the path, or zero, in which case the path must begin with the plane name. - @param residualPath If the path may contain residual characters after the last path component, the residual will be copied back to the caller's residualPath buffer. If there are residual characters and no residual buffer is specified, fromPath will fail. - @param residualLength An in/out parameter - the caller sets the length of the residual buffer available, and fromPath returns the total length of the residual path copied to the buffer. If there is no residualBuffer (residualPath = 0) then residualLength may be zero also. - @param fromEntry The lookup will proceed rooted at this entry if non-zero, otherwise it proceeds from the root of the plane. - @result A retained registry entry is returned on success, or zero on failure. The caller should release the entry. */ - - static IORegistryEntry * fromPath( const char * path, - const IORegistryPlane * plane = 0, - char * residualPath = 0, - int * residualLength = 0, - IORegistryEntry * fromEntry = 0 ); + * @abstract Looks up a registry entry by path. + * @discussion This function parses paths to lookup registry entries. The path may begin with the : created by getPath, or the plane may be set by the caller. If there are characters remaining unparsed after an entry has been looked up, this may be considered an invalid lookup, or those characters may be passed back to the caller and the lookup successful. + * @param path A C-string path. + * @param plane The plane to lookup up the path, or zero, in which case the path must begin with the plane name. + * @param residualPath If the path may contain residual characters after the last path component, the residual will be copied back to the caller's residualPath buffer. If there are residual characters and no residual buffer is specified, fromPath will fail. + * @param residualLength An in/out parameter - the caller sets the length of the residual buffer available, and fromPath returns the total length of the residual path copied to the buffer. If there is no residualBuffer (residualPath = 0) then residualLength may be zero also. + * @param fromEntry The lookup will proceed rooted at this entry if non-zero, otherwise it proceeds from the root of the plane. + * @result A retained registry entry is returned on success, or zero on failure. The caller should release the entry. */ + + static IORegistryEntry * fromPath( const char * path, + const IORegistryPlane * plane = 0, + char * residualPath = 0, + int * residualLength = 0, + IORegistryEntry * fromEntry = 0 ); /*! @function fromPath - @abstract Looks up a registry entry by relative path. - @discussion This function looks up a entry below the called entry by a relative path. It is just a convenience that calls IORegistryEntry::fromPath with this as the fromEntry parameter. - @param path See IORegistryEntry::fromPath. - @param plane See IORegistryEntry::fromPath. - @param residualPath See IORegistryEntry::fromPath. - @param residualLength See IORegistryEntry::fromPath. - @result See IORegistryEntry::fromPath. */ - - virtual IORegistryEntry * childFromPath( const char * path, - const IORegistryPlane * plane = 0, - char * residualPath = 0, - int * residualLength = 0 ); + * @abstract Looks up a registry entry by relative path. + * @discussion This function looks up a entry below the called entry by a relative path. It is just a convenience that calls IORegistryEntry::fromPath with this as the fromEntry parameter. + * @param path See IORegistryEntry::fromPath. + * @param plane See IORegistryEntry::fromPath. + * @param residualPath See IORegistryEntry::fromPath. + * @param residualLength See IORegistryEntry::fromPath. + * @result See IORegistryEntry::fromPath. */ + + virtual IORegistryEntry * childFromPath( const char * path, + const IORegistryPlane * plane = 0, + char * residualPath = 0, + int * residualLength = 0 ); /*! @function dealiasPath - @abstract Strips any aliases from the head of path and returns the full path. - @discussion If the path specified begins with an alias found in the /aliases entry, the value of the alias is returned, and a pointer into the passed in path after the alias is passed back to the caller. If an alias is not found, zero is returned and the path parameter is unchanged. - @param opath An in/out paramter - the caller passes in a pointer to a C-string pointer to a path. If an alias is found, dealiasPath returns a pointer into the path just beyond the end of the alias. - @param plane A plane object must be specified. - @result A C-string pointer to the value of the alias if one is found, or zero if not. */ + * @abstract Strips any aliases from the head of path and returns the full path. + * @discussion If the path specified begins with an alias found in the /aliases entry, the value of the alias is returned, and a pointer into the passed in path after the alias is passed back to the caller. If an alias is not found, zero is returned and the path parameter is unchanged. + * @param opath An in/out paramter - the caller passes in a pointer to a C-string pointer to a path. If an alias is found, dealiasPath returns a pointer into the path just beyond the end of the alias. + * @param plane A plane object must be specified. + * @result A C-string pointer to the value of the alias if one is found, or zero if not. */ - static const char * dealiasPath( const char ** opath, - const IORegistryPlane * plane ); + static const char * dealiasPath( const char ** opath, + const IORegistryPlane * plane ); /*! @function makePlane - @abstract Constructs an IORegistryPlane object. - @discussion Most planes in IOKit are created by the OS, although other planes may be created. - @param name A C-string name for the new plane, to be copied. - @result A new instance of an IORegistryPlane, or zero on failure. */ + * @abstract Constructs an IORegistryPlane object. + * @discussion Most planes in IOKit are created by the OS, although other planes may be created. + * @param name A C-string name for the new plane, to be copied. + * @result A new instance of an IORegistryPlane, or zero on failure. */ - static const IORegistryPlane * makePlane( const char * name ); + static const IORegistryPlane * makePlane( const char * name ); /*! @abstract Returns an ID for the registry entry that is global to all tasks. - @discussion The entry ID returned by getRegistryEntryID can be used to identify a registry entry across all tasks. A registry entry may be looked up by its entry ID by creating a matching dictionary with IORegistryEntryIDMatching() in user space, or IOService::registryEntryIDMatching() in the kernel, to be used with the IOKit matching functions. The ID is valid only until the machine reboots. - @result An ID for the registry entry, assigned when the entry is first attached in the registry. */ + * @discussion The entry ID returned by getRegistryEntryID can be used to identify a registry entry across all tasks. A registry entry may be looked up by its entry ID by creating a matching dictionary with IORegistryEntryIDMatching() in user space, or IOService::registryEntryIDMatching() in the kernel, to be used with the IOKit matching functions. The ID is valid only until the machine reboots. + * @result An ID for the registry entry, assigned when the entry is first attached in the registry. */ - uint64_t getRegistryEntryID( void ); + uint64_t getRegistryEntryID( void ); - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - /* * * * * * * * * * * * internals * * * * * * * * * * * */ - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * internals * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - virtual bool init( IORegistryEntry * from, - const IORegistryPlane * inPlane ); + virtual bool init( IORegistryEntry * from, + const IORegistryPlane * inPlane ); #ifdef XNU_KERNEL_PRIVATE public: #else private: #endif - static IORegistryEntry * initialize( void ); + static LIBKERN_RETURNS_NOT_RETAINED IORegistryEntry * initialize( void ); #ifdef XNU_KERNEL_PRIVATE - SInt32 getRegistryEntryGenerationCount( void ) const; + SInt32 getRegistryEntryGenerationCount( void ) const; #endif private: - inline bool arrayMember( OSArray * set, - const IORegistryEntry * member, - unsigned int * index = 0 ) const; - - bool makeLink( IORegistryEntry * to, - unsigned int relation, - const IORegistryPlane * plane ) const; - void breakLink( IORegistryEntry * to, - unsigned int relation, - const IORegistryPlane * plane ) const; - - APPLE_KEXT_COMPATIBILITY_VIRTUAL + inline bool arrayMember( OSArray * set, + const IORegistryEntry * member, + unsigned int * index = 0 ) const; + + bool makeLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const; + void breakLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const; + + APPLE_KEXT_COMPATIBILITY_VIRTUAL OSArray * getParentSetReference( const IORegistryPlane * plane ) - const; - - APPLE_KEXT_COMPATIBILITY_VIRTUAL - OSArray * getChildSetReference( const IORegistryPlane * plane ) - const; + const; - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IORegistryEntry * getChildFromComponent( const char ** path, - const IORegistryPlane * plane ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + OSArray * getChildSetReference( const IORegistryPlane * plane ) + const; - APPLE_KEXT_COMPATIBILITY_VIRTUAL - const OSSymbol * hasAlias( const IORegistryPlane * plane, - char * opath = 0, int * length = 0 ) const; + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IORegistryEntry * getChildFromComponent( const char ** path, + const IORegistryPlane * plane ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - const char * matchPathLocation( const char * cmp, - const IORegistryPlane * plane ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + LIBKERN_RETURNS_NOT_RETAINED + const OSSymbol * hasAlias( const IORegistryPlane * plane, + char * opath = 0, int * length = 0 ) const; + APPLE_KEXT_COMPATIBILITY_VIRTUAL + const char * matchPathLocation( const char * cmp, + const IORegistryPlane * plane ); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /*! @class IORegistryIterator : public OSIterator - @abstract An iterator over the registry. - @discussion An iterator that can traverse the children or parents of a registry entry in a plane, and recurse. Access to the registry is protected against multiple threads, but an IORegistryIterator instance is for use by one thread only. */ + * @abstract An iterator over the registry. + * @discussion An iterator that can traverse the children or parents of a registry entry in a plane, and recurse. Access to the registry is protected against multiple threads, but an IORegistryIterator instance is for use by one thread only. */ class IORegistryIterator : public OSIterator { - OSDeclareAbstractStructors(IORegistryIterator) + OSDeclareAbstractStructors(IORegistryIterator) private: - struct IORegCursor { - IORegCursor * next; - IORegistryEntry * current; - OSIterator * iter; - }; - IORegCursor start; - IORegCursor * where; - IORegistryEntry * root; - OSOrderedSet * done; - const IORegistryPlane * plane; - IOOptionBits options; - - virtual void free( void ) APPLE_KEXT_OVERRIDE; + struct IORegCursor { + IORegCursor * next; + IORegistryEntry * current; + OSIterator * iter; + }; + IORegCursor start; + IORegCursor * where; + IORegistryEntry * root; + OSOrderedSet * done; + const IORegistryPlane * plane; + IOOptionBits options; + + virtual void free( void ) APPLE_KEXT_OVERRIDE; public: /*! @function iterateOver - @abstract Create an iterator rooted at a given registry entry. - @discussion This method creates an IORegistryIterator that is set up with options to iterate children or parents of a root entry, and to recurse automatically into entries as they are returned, or only when instructed. The iterator object keeps track of entries that have been recursed into previously to avoid loops. - @param start The root entry to begin the iteration at. - @param plane A plane object must be specified. - @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. - @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ + * @abstract Create an iterator rooted at a given registry entry. + * @discussion This method creates an IORegistryIterator that is set up with options to iterate children or parents of a root entry, and to recurse automatically into entries as they are returned, or only when instructed. The iterator object keeps track of entries that have been recursed into previously to avoid loops. + * @param start The root entry to begin the iteration at. + * @param plane A plane object must be specified. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. + * @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ - static IORegistryIterator * iterateOver( IORegistryEntry * start, - const IORegistryPlane * plane, - IOOptionBits options = 0 ); + static IORegistryIterator * iterateOver( IORegistryEntry * start, + const IORegistryPlane * plane, + IOOptionBits options = 0 ); /*! @function iterateOver - @abstract Create an iterator rooted at the registry root. - @discussion This method creates an IORegistryIterator that is set up with options to iterate children of the registry root entry, and to recurse automatically into entries as they are returned, or only when instructed. The iterator object keeps track of entries that have been recursed into previously to avoid loops. - @param plane A plane object must be specified. - @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. - @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ + * @abstract Create an iterator rooted at the registry root. + * @discussion This method creates an IORegistryIterator that is set up with options to iterate children of the registry root entry, and to recurse automatically into entries as they are returned, or only when instructed. The iterator object keeps track of entries that have been recursed into previously to avoid loops. + * @param plane A plane object must be specified. + * @param options kIORegistryIterateRecursively may be set to recurse automatically into each entry as it is returned. This option affects the behaviour of the getNextObject method, which is defined in the OSIterator superclass. Other methods will override this behaviour. kIORegistryIterateParents may be set to iterate the parents of each entry, by default the children are iterated. + * @result A created IORegistryIterator instance, to be released by the caller when it has finished with it. */ - static IORegistryIterator * iterateOver( const IORegistryPlane * plane, - IOOptionBits options = 0 ); + static IORegistryIterator * iterateOver( const IORegistryPlane * plane, + IOOptionBits options = 0 ); /*! @function getNextObject - @abstract Return the next object in the registry iteration. - @discussion This method calls either getNextObjectFlat or getNextObjectRecursive depending on the options the iterator was created with. This implements the OSIterator defined getNextObject method. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. - @result The next registry entry in the iteration (the current entry), or zero if the iteration has finished at this level of recursion. The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + * @abstract Return the next object in the registry iteration. + * @discussion This method calls either getNextObjectFlat or getNextObjectRecursive depending on the options the iterator was created with. This implements the OSIterator defined getNextObject method. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. + * @result The next registry entry in the iteration (the current entry), or zero if the iteration has finished at this level of recursion. The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ - virtual IORegistryEntry * getNextObject( void ) APPLE_KEXT_OVERRIDE; + virtual IORegistryEntry * getNextObject( void ) APPLE_KEXT_OVERRIDE; /*! @function getNextObjectFlat - @abstract Return the next object in the registry iteration, ignoring the kIORegistryIterateRecursively option. - @discussion This method returns the next child, or parent if the kIORegistryIterateParents option was used to create the iterator, of the current root entry. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. - @result The next registry entry in the iteration (the current entry), or zero if the iteration has finished at this level of recursion, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + * @abstract Return the next object in the registry iteration, ignoring the kIORegistryIterateRecursively option. + * @discussion This method returns the next child, or parent if the kIORegistryIterateParents option was used to create the iterator, of the current root entry. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. + * @result The next registry entry in the iteration (the current entry), or zero if the iteration has finished at this level of recursion, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ - virtual IORegistryEntry * getNextObjectFlat( void ); + virtual IORegistryEntry * getNextObjectFlat( void ); /*! @function getNextObjectRecursive - @abstract Return the next object in the registry iteration, and enter it. - @discussion If the iterator has a current entry, and the iterator has not already entered previously, enterEntry is called to recurse into it, ie. make it the new root, and the next child, or parent if the kIORegistryIterateParents option was used to create the iterator, at this new level of recursion is returned. If there is no current entry at this level of recursion, exitEntry is called and the process repeats, until the iteration returns to the entry the iterator was created with and zero is returned. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. - @result The next registry entry in the iteration (the current entry), or zero if its finished, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + * @abstract Return the next object in the registry iteration, and enter it. + * @discussion If the iterator has a current entry, and the iterator has not already entered previously, enterEntry is called to recurse into it, ie. make it the new root, and the next child, or parent if the kIORegistryIterateParents option was used to create the iterator, at this new level of recursion is returned. If there is no current entry at this level of recursion, exitEntry is called and the process repeats, until the iteration returns to the entry the iterator was created with and zero is returned. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. + * @result The next registry entry in the iteration (the current entry), or zero if its finished, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ - virtual IORegistryEntry * getNextObjectRecursive( void ); + virtual IORegistryEntry * getNextObjectRecursive( void ); /*! @function getCurrentEntry - @abstract Return the current entry in the registry iteration. - @discussion This method returns the current entry, last returned by getNextObject et al. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. If the iteration is no longer valid (see isValid), the current entry is zero. - @result The current registry entry in the iteration, or zero if the last iteration returned zero, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ + * @abstract Return the current entry in the registry iteration. + * @discussion This method returns the current entry, last returned by getNextObject et al. The object returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. If the iteration is no longer valid (see isValid), the current entry is zero. + * @result The current registry entry in the iteration, or zero if the last iteration returned zero, or the iteration is invalid (see isValid). The entry returned is retained while the iterator is pointing at it (its the current entry), or recursing into it. The caller should not release it. */ - virtual IORegistryEntry * getCurrentEntry( void ); + virtual IORegistryEntry * getCurrentEntry( void ); /*! @function enterEntry - @abstract Recurse into the current entry in the registry iteration. - @discussion This method makes the current entry, ie. the last entry returned by getNextObject et al., the root in a new level of recursion. */ + * @abstract Recurse into the current entry in the registry iteration. + * @discussion This method makes the current entry, ie. the last entry returned by getNextObject et al., the root in a new level of recursion. */ + + virtual void enterEntry( void ); - virtual void enterEntry( void ); - /*! @function enterEntry - @abstract Recurse into the current entry in the registry iteration. - @discussion This method recurses into an entry as with enterEntry, but also switches from the current plane to a new one set by the caller. - @param plane The new plane to switch into. */ + * @abstract Recurse into the current entry in the registry iteration. + * @discussion This method recurses into an entry as with enterEntry, but also switches from the current plane to a new one set by the caller. + * @param plane The new plane to switch into. */ - virtual void enterEntry( const IORegistryPlane * plane ); + virtual void enterEntry( const IORegistryPlane * plane ); /*! @function exitEntry - @abstract Exits a level of recursion, restoring the current entry. - @discussion This method undoes an enterEntry, restoring the current entry. If there are no more levels of recursion to exit false is returned, otherwise true is returned. - @result true if a level of recursion was undone, false if no recursive levels are left in the iteration. */ + * @abstract Exits a level of recursion, restoring the current entry. + * @discussion This method undoes an enterEntry, restoring the current entry. If there are no more levels of recursion to exit false is returned, otherwise true is returned. + * @result true if a level of recursion was undone, false if no recursive levels are left in the iteration. */ - virtual bool exitEntry( void ); + virtual bool exitEntry( void ); /*! @function reset - @abstract Exits all levels of recursion, restoring the iterator to its state at creation. - @discussion This method exits all levels of recursion, and restores the iterator to its state at creation. */ + * @abstract Exits all levels of recursion, restoring the iterator to its state at creation. + * @discussion This method exits all levels of recursion, and restores the iterator to its state at creation. */ - virtual void reset( void ) APPLE_KEXT_OVERRIDE; + virtual void reset( void ) APPLE_KEXT_OVERRIDE; /*! @function isValid - @abstract Checks that no registry changes have invalidated the iteration. - @discussion If a registry iteration is invalidated by changes to the registry, it will be made invalid, the currentEntry will be considered zero, and further calls to getNextObject et al. will return zero. The iterator should be reset to restart the iteration when this happens. - @result false if the iterator has been invalidated by changes to the registry, true otherwise. */ + * @abstract Checks that no registry changes have invalidated the iteration. + * @discussion If a registry iteration is invalidated by changes to the registry, it will be made invalid, the currentEntry will be considered zero, and further calls to getNextObject et al. will return zero. The iterator should be reset to restart the iteration when this happens. + * @result false if the iterator has been invalidated by changes to the registry, true otherwise. */ - virtual bool isValid( void ) APPLE_KEXT_OVERRIDE; + virtual bool isValid( void ) APPLE_KEXT_OVERRIDE; /*! @function iterateAll - @abstract Iterates all entries (with getNextObject) and returns a set of all returned entries. - @discussion This method will reset, then iterate all entries in the iteration (with getNextObject). - @result A set of entries returned by the iteration. The caller should release the set when it has finished with it. Zero is returned on a resource failure. */ + * @abstract Iterates all entries (with getNextObject) and returns a set of all returned entries. + * @discussion This method will reset, then iterate all entries in the iteration (with getNextObject). + * @result A set of entries returned by the iteration. The caller should release the set when it has finished with it. Zero is returned on a resource failure. */ - virtual OSOrderedSet * iterateAll( void ); + virtual OSOrderedSet * iterateAll( void ); }; #endif /* _IOKIT_IOREGISTRYENTRY_H */ diff --git a/iokit/IOKit/IOReportMacros.h b/iokit/IOKit/IOReportMacros.h index f3b5a015a..db8e8aed9 100644 --- a/iokit/IOKit/IOReportMacros.h +++ b/iokit/IOKit/IOReportMacros.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2014 Apple Computer, Inc. All Rights Reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,19 +41,19 @@ extern "C" { #endif /* - Background - - These macros allow non-I/O Kit software to generate IOReporting - reports. Clients must prevent concurrent access to any given - report buffer from multiple threads. - - While these macros allow non-I/O Kit software to participate - in IOReporting, an IOService instance must lend its driver ID, - respond to the appropriate IOService overrides, and shuttle - data back and forth. In some cases, it may be useful to have - the I/O Kit driver initialize the report buffer with the - appropriate macro. -*/ + * Background + * + * These macros allow non-I/O Kit software to generate IOReporting + * reports. Clients must prevent concurrent access to any given + * report buffer from multiple threads. + * + * While these macros allow non-I/O Kit software to participate + * in IOReporting, an IOService instance must lend its driver ID, + * respond to the appropriate IOService overrides, and shuttle + * data back and forth. In some cases, it may be useful to have + * the I/O Kit driver initialize the report buffer with the + * appropriate macro. + */ /* ----- Reporting Single Integers (SimpleReport) ----- */ @@ -84,19 +84,19 @@ do { \ IOReportElement *__elem = (IOReportElement *)(buf); \ IOSimpleReportValues *__vals; \ if ((bufSize) >= SIMPLEREPORT_BUFSIZE) { \ - __elem->provider_id = (providerID); \ - __elem->channel_id = (channelID); \ - __elem->channel_type.report_format = kIOReportFormatSimple; \ - __elem->channel_type.reserved = 0; \ - __elem->channel_type.categories = (cats); \ - __elem->channel_type.nelements = 1; \ - __elem->channel_type.element_idx = 0; \ - __elem->timestamp = 0; \ - __vals = (IOSimpleReportValues*)&__elem->values; \ - __vals->simple_value = kIOReportInvalidIntValue; \ + __elem->provider_id = (providerID); \ + __elem->channel_id = (channelID); \ + __elem->channel_type.report_format = kIOReportFormatSimple; \ + __elem->channel_type.reserved = 0; \ + __elem->channel_type.categories = (cats); \ + __elem->channel_type.nelements = 1; \ + __elem->channel_type.element_idx = 0; \ + __elem->timestamp = 0; \ + __vals = (IOSimpleReportValues*)&__elem->values; \ + __vals->simple_value = kIOReportInvalidIntValue; \ } \ else { \ - IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ + IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ } \ } while(0) @@ -161,8 +161,8 @@ do { \ #define SIMPLEREPORT_UPDATERES(action, result) \ do { \ if (((action) == kIOReportGetDimensions) || ((action) == kIOReportCopyChannelData)) { \ - int *__nElements = (int *)(result); \ - *__nElements += 1; \ + int *__nElements = (int *)(result); \ + *__nElements += 1; \ } \ } while (0) @@ -194,16 +194,16 @@ do { \ #define SIMPLEREPORT_GETVALUE(simp_buf) \ (((IOSimpleReportValues*)&(((IOReportElement*)(simp_buf))->values)) \ - ->simple_value) + ->simple_value) /* ----- State Machine Reporting (StateReport) ----- */ // Internal struct for StateReport typedef struct { - uint16_t curr_state; - uint64_t update_ts; - IOReportElement elem[]; // Array of elements + uint16_t curr_state; + uint64_t update_ts; + IOReportElement elem[]; // Array of elements } IOStateReportInfo; /* @@ -235,27 +235,27 @@ do { \ IOStateReportValues *__rep; \ IOReportElement *__elem; \ if ((bufSize) >= STATEREPORT_BUFSIZE(nstates)) { \ - for (unsigned __no = 0; __no < (nstates); __no++) { \ - __elem = &(__info->elem[__no]); \ - __rep = (IOStateReportValues *) &(__elem->values); \ - __elem->provider_id = (providerID); \ - __elem->channel_id = (channelID); \ - __elem->channel_type.report_format = kIOReportFormatState; \ - __elem->channel_type.reserved = 0; \ - __elem->channel_type.categories = (cats); \ - __elem->channel_type.nelements = (nstates); \ - __elem->channel_type.element_idx = __no; \ - __elem->timestamp = 0; \ - __rep->state_id = __no; \ - __rep->intransitions = 0; \ - __rep->upticks = 0; \ - __rep->last_intransition = 0; \ - } \ - __info->curr_state = 0; \ - __info->update_ts = 0; \ + for (unsigned __no = 0; __no < (nstates); __no++) { \ + __elem = &(__info->elem[__no]); \ + __rep = (IOStateReportValues *) &(__elem->values); \ + __elem->provider_id = (providerID); \ + __elem->channel_id = (channelID); \ + __elem->channel_type.report_format = kIOReportFormatState; \ + __elem->channel_type.reserved = 0; \ + __elem->channel_type.categories = (cats); \ + __elem->channel_type.nelements = (nstates); \ + __elem->channel_type.element_idx = __no; \ + __elem->timestamp = 0; \ + __rep->state_id = __no; \ + __rep->intransitions = 0; \ + __rep->upticks = 0; \ + __rep->last_intransition = 0; \ + } \ + __info->curr_state = 0; \ + __info->update_ts = 0; \ } \ else { \ - IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ + IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ } \ } while(0) @@ -273,8 +273,8 @@ do { \ IOStateReportInfo *__info = (IOStateReportInfo *)(state_buf); \ IOStateReportValues *__rep; \ if ((stateIdx) < __info->elem[0].channel_type.nelements) { \ - __rep = (IOStateReportValues*) &(__info->elem[(stateIdx)].values); \ - __rep->state_id = (stateID); \ + __rep = (IOStateReportValues*) &(__info->elem[(stateIdx)].values); \ + __rep->state_id = (stateID); \ } \ } while (0) @@ -291,14 +291,14 @@ do { \ IOStateReportInfo *__info = (IOStateReportInfo *)(state_buf); \ IOStateReportValues *__rep; \ if ((newStateIdx) < __info->elem[0].channel_type.nelements ) { \ - __rep = (IOStateReportValues*) &(__info->elem[__info->curr_state].values); \ - if (__info->update_ts) \ - __rep->upticks += (changeTime) - __info->update_ts; \ - __info->elem[(newStateIdx)].timestamp = (changeTime); \ - __rep = (IOStateReportValues*) &(__info->elem[(newStateIdx)].values); \ - __rep->intransitions++; \ - __info->curr_state = (newStateIdx); \ - __info->update_ts = (changeTime); \ + __rep = (IOStateReportValues*) &(__info->elem[__info->curr_state].values); \ + if (__info->update_ts) \ + __rep->upticks += (changeTime) - __info->update_ts; \ + __info->elem[(newStateIdx)].timestamp = (changeTime); \ + __rep = (IOStateReportValues*) &(__info->elem[(newStateIdx)].values); \ + __rep->intransitions++; \ + __info->curr_state = (newStateIdx); \ + __info->update_ts = (changeTime); \ } \ } while(0) @@ -319,11 +319,11 @@ do { \ (size2cpy) = __info->elem[0].channel_type.nelements * sizeof(IOReportElement); \ (ptr2cpy) = (void *) &__info->elem[0]; \ if (__info->update_ts) { \ - __elem = &__info->elem[__info->curr_state]; \ - __state = (IOStateReportValues *)&__elem->values; \ - __elem->timestamp = (currentTime); \ - __state->upticks += (currentTime) - __info->update_ts; \ - __info->update_ts = (currentTime); \ + __elem = &__info->elem[__info->curr_state]; \ + __state = (IOStateReportValues *)&__elem->values; \ + __elem->timestamp = (currentTime); \ + __state->upticks += (currentTime) - __info->update_ts; \ + __info->update_ts = (currentTime); \ } \ } while(0) @@ -342,8 +342,8 @@ do { \ IOReportElement *__elem; \ int *__nElements = (int *)(result); \ if (((action) == kIOReportGetDimensions) || ((action) == kIOReportCopyChannelData)) { \ - __elem = &(__info->elem[0]); \ - *__nElements += __elem->channel_type.nelements; \ + __elem = &(__info->elem[0]); \ + *__nElements += __elem->channel_type.nelements; \ } \ } while (0) @@ -373,8 +373,8 @@ do { \ */ #define STATEREPORT_GETTRANSITIONS(state_buf, stateIdx) \ (((stateIdx) < ((IOStateReportInfo *)(state_buf))->elem[0].channel_type.nelements) \ - ? ((IOStateReportValues*)&(((IOStateReportInfo*)(state_buf))->elem[(stateIdx)].values))->intransitions \ - : kIOReportInvalidValue) + ? ((IOStateReportValues*)&(((IOStateReportInfo*)(state_buf))->elem[(stateIdx)].values))->intransitions \ + : kIOReportInvalidValue) /* * Get the total number of ticks spent in a given state. @@ -384,8 +384,8 @@ do { \ */ #define STATEREPORT_GETTICKS(state_buf, stateIdx) \ (((stateIdx) < ((IOStateReportInfo*)(state_buf))->elem[0].channel_type.nelements) \ - ? ((IOStateReportValues*)&(((IOStateReportInfo*)(state_buf))->elem[(stateIdx)].values))->upticks \ - : kIOReportInvalidValue) + ? ((IOStateReportValues*)&(((IOStateReportInfo*)(state_buf))->elem[(stateIdx)].values))->upticks \ + : kIOReportInvalidValue) /* ----- Reporting an Array of Integers (SimpleArrayReport) ----- */ @@ -398,7 +398,7 @@ do { \ #define SIMPLEARRAY_BUFSIZE(nValues) \ ((((nValues)/IOR_VALUES_PER_ELEMENT) + (((nValues) % IOR_VALUES_PER_ELEMENT) ? 1:0)) \ - * sizeof(IOReportElement)) + * sizeof(IOReportElement)) /* * Initialize a buffer for use as a SimpleArrayReport. @@ -420,27 +420,27 @@ do { \ IOSimpleArrayReportValues *__rep; \ IOReportElement *__elem; \ uint32_t __nElems = (((nValues) / IOR_VALUES_PER_ELEMENT) + \ - (((nValues) % IOR_VALUES_PER_ELEMENT) ? 1 : 0)); \ + (((nValues) % IOR_VALUES_PER_ELEMENT) ? 1 : 0)); \ if ((bufSize) >= SIMPLEARRAY_BUFSIZE(nValues)) { \ - for (unsigned __no = 0; __no < __nElems; __no++) { \ - __elem = &(((IOReportElement *)(buf))[__no]); \ - __rep = (IOSimpleArrayReportValues *) &(__elem->values); \ - __elem->provider_id = (providerID); \ - __elem->channel_id = (channelID); \ - __elem->channel_type.report_format = kIOReportFormatSimpleArray; \ - __elem->channel_type.reserved = 0; \ - __elem->channel_type.categories = (cats); \ - __elem->channel_type.nelements = (__nElems); \ - __elem->channel_type.element_idx = __no; \ - __elem->timestamp = 0; \ - __rep->simple_values[0] = kIOReportInvalidIntValue; \ - __rep->simple_values[1] = kIOReportInvalidIntValue; \ - __rep->simple_values[2] = kIOReportInvalidIntValue; \ - __rep->simple_values[3] = kIOReportInvalidIntValue; \ - } \ + for (unsigned __no = 0; __no < __nElems; __no++) { \ + __elem = &(((IOReportElement *)(buf))[__no]); \ + __rep = (IOSimpleArrayReportValues *) &(__elem->values); \ + __elem->provider_id = (providerID); \ + __elem->channel_id = (channelID); \ + __elem->channel_type.report_format = kIOReportFormatSimpleArray; \ + __elem->channel_type.reserved = 0; \ + __elem->channel_type.categories = (cats); \ + __elem->channel_type.nelements = (__nElems); \ + __elem->channel_type.element_idx = __no; \ + __elem->timestamp = 0; \ + __rep->simple_values[0] = kIOReportInvalidIntValue; \ + __rep->simple_values[1] = kIOReportInvalidIntValue; \ + __rep->simple_values[2] = kIOReportInvalidIntValue; \ + __rep->simple_values[3] = kIOReportInvalidIntValue; \ + } \ } \ else { \ - IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ + IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ } \ } while(0) @@ -448,18 +448,18 @@ do { \ /* SimpleArrayReport helpers */ #define __SA_FINDREP(array_buf, idx) \ - IOSimpleArrayReportValues *__rep; \ - IOReportElement *__elem; \ - unsigned __elemIdx = (idx) / IOR_VALUES_PER_ELEMENT; \ - unsigned __valueIdx = (idx) % IOR_VALUES_PER_ELEMENT; \ - __elem = &(((IOReportElement *)(array_buf))[0]); \ - if (__elemIdx < __elem->channel_type.nelements) { \ - __elem = &(((IOReportElement *)(array_buf))[__elemIdx]); \ - __rep = (IOSimpleArrayReportValues *) &(__elem->values); \ + IOSimpleArrayReportValues *__rep; \ + IOReportElement *__elem; \ + unsigned __elemIdx = (idx) / IOR_VALUES_PER_ELEMENT; \ + unsigned __valueIdx = (idx) % IOR_VALUES_PER_ELEMENT; \ + __elem = &(((IOReportElement *)(array_buf))[0]); \ + if (__elemIdx < __elem->channel_type.nelements) { \ + __elem = &(((IOReportElement *)(array_buf))[__elemIdx]); \ + __rep = (IOSimpleArrayReportValues *) &(__elem->values); \ #define __SA_MAXINDEX(array_buf) \ - ((((IOReportElement*)(array_buf))->channel_type.nelements) \ - * IOR_VALUES_PER_ELEMENT) - 1 + ((((IOReportElement*)(array_buf))->channel_type.nelements) \ + * IOR_VALUES_PER_ELEMENT) - 1 /* * Set a value at a specified index in a SimpleArrayReport. @@ -471,7 +471,7 @@ do { \ #define SIMPLEARRAY_SETVALUE(array_buf, idx, newValue) \ do { \ __SA_FINDREP((array_buf), (idx)) \ - __rep->simple_values[__valueIdx] = (newValue); \ + __rep->simple_values[__valueIdx] = (newValue); \ } \ } while(0) @@ -485,7 +485,7 @@ do { \ #define SIMPLEARRAY_INCREMENTVALUE(array_buf, idx, value) \ do { \ __SA_FINDREP((array_buf), (idx)) \ - __rep->simple_values[__valueIdx] += (value); \ + __rep->simple_values[__valueIdx] += (value); \ } \ } while(0) @@ -523,7 +523,7 @@ do { \ int *__nElements = (int *)(result); \ __elem = &(((IOReportElement *)(array_buf))[0]); \ if (((action) == kIOReportGetDimensions) || ((action) == kIOReportCopyChannelData)) { \ - *__nElements += __elem->channel_type.nelements; \ + *__nElements += __elem->channel_type.nelements; \ } \ } while (0) @@ -554,16 +554,16 @@ do { \ #define SIMPLEARRAY_GETVALUE(array_buf, idx) \ (((idx) > __SA_MAXINDEX(array_buf) || (idx) < 0) ? kIOReportInvalidIntValue : \ ((IOSimpleArrayReportValues*)&( \ - ((IOReportElement*)(array_buf))[(idx) / IOR_VALUES_PER_ELEMENT].values)) \ - ->simple_values[(idx) % IOR_VALUES_PER_ELEMENT]) + ((IOReportElement*)(array_buf))[(idx) / IOR_VALUES_PER_ELEMENT].values)) \ + ->simple_values[(idx) % IOR_VALUES_PER_ELEMENT]) /* ----- Histogram Reporting (HistogramReport) ----- */ // Internal struct for HistogramReport typedef struct { - int bucketWidth; - IOReportElement elem[]; // Array of elements + int bucketWidth; + IOReportElement elem[]; // Array of elements } IOHistReportInfo; /* @@ -595,23 +595,23 @@ do { \ IOReportElement *__elem; \ IOHistogramReportValues *__rep; \ if ((bufSize) >= HISTREPORT_BUFSIZE(nbuckets)) { \ - __info->bucketWidth = (bktSize); \ - for (unsigned __no = 0; __no < (nbuckets); __no++) { \ - __elem = &(__info->elem[__no]); \ - __rep = (IOHistogramReportValues *) &(__elem->values); \ - __elem->provider_id = (providerID); \ - __elem->channel_id = (channelID); \ - __elem->channel_type.report_format = kIOReportFormatHistogram; \ - __elem->channel_type.reserved = 0; \ - __elem->channel_type.categories = (cats); \ - __elem->channel_type.nelements = (nbuckets); \ - __elem->channel_type.element_idx = __no; \ - __elem->timestamp = 0; \ - memset(__rep, '\0', sizeof(IOHistogramReportValues)); \ - } \ + __info->bucketWidth = (bktSize); \ + for (unsigned __no = 0; __no < (nbuckets); __no++) { \ + __elem = &(__info->elem[__no]); \ + __rep = (IOHistogramReportValues *) &(__elem->values); \ + __elem->provider_id = (providerID); \ + __elem->channel_id = (channelID); \ + __elem->channel_type.report_format = kIOReportFormatHistogram; \ + __elem->channel_type.reserved = 0; \ + __elem->channel_type.categories = (cats); \ + __elem->channel_type.nelements = (nbuckets); \ + __elem->channel_type.element_idx = __no; \ + __elem->timestamp = 0; \ + memset(__rep, '\0', sizeof(IOHistogramReportValues)); \ + } \ } \ else { \ - IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ + IOREPORT_ABORT("bufSize is smaller than the required size\n"); \ } \ } while (0) @@ -628,22 +628,22 @@ do { \ IOReportElement *__elem; \ IOHistogramReportValues *__rep; \ for (unsigned __no = 0; __no < __info->elem[0].channel_type.nelements; __no++) { \ - if ((value) <= __info->bucketWidth * (__no+1)) { \ - __elem = &(__info->elem[__no]); \ - __rep = (IOHistogramReportValues *) &(__elem->values); \ - if (__rep->bucket_hits == 0) { \ - __rep->bucket_min = __rep->bucket_max = (value); \ - } \ - else if ((value) < __rep->bucket_min) { \ - __rep->bucket_min = (value); \ - } \ - else if ((value) > __rep->bucket_max) { \ - __rep->bucket_max = (value); \ - } \ - __rep->bucket_sum += (value); \ - __rep->bucket_hits++; \ - break; \ - } \ + if ((value) <= __info->bucketWidth * (__no+1)) { \ + __elem = &(__info->elem[__no]); \ + __rep = (IOHistogramReportValues *) &(__elem->values); \ + if (__rep->bucket_hits == 0) { \ + __rep->bucket_min = __rep->bucket_max = (value); \ + } \ + else if ((value) < __rep->bucket_min) { \ + __rep->bucket_min = (value); \ + } \ + else if ((value) > __rep->bucket_max) { \ + __rep->bucket_max = (value); \ + } \ + __rep->bucket_sum += (value); \ + __rep->bucket_hits++; \ + break; \ + } \ } \ } while (0) @@ -678,7 +678,7 @@ do { \ IOHistReportInfo *__info = (IOHistReportInfo *)(hist_buf); \ int *__nElements = (int *)(result); \ if (((action) == kIOReportGetDimensions) || ((action) == kIOReportCopyChannelData)) { \ - *__nElements += __info->elem[0].channel_type.nelements; \ + *__nElements += __info->elem[0].channel_type.nelements; \ } \ } while (0) @@ -703,5 +703,3 @@ do { \ #endif #endif // _IOREPORT_MACROS_H_ - - diff --git a/iokit/IOKit/IOReportTypes.h b/iokit/IOKit/IOReportTypes.h index 3d65c3480..dd81759a4 100644 --- a/iokit/IOKit/IOReportTypes.h +++ b/iokit/IOKit/IOReportTypes.h @@ -38,28 +38,28 @@ extern "C" { #define IOR_VALUES_PER_ELEMENT 4 /*! @const kIOReportInvalidValue - @const kIOReportInvalidIntValue - @abstract cardinal value used to indicate data errors - - @discussion - kIOReportInvalidValue and kIOReportInvalidIntValue have the - same bit pattern so that clients checking for one or the other - don't have to worry about getting the signedness right. -*/ + * @const kIOReportInvalidIntValue + * @abstract cardinal value used to indicate data errors + * + * @discussion + * kIOReportInvalidValue and kIOReportInvalidIntValue have the + * same bit pattern so that clients checking for one or the other + * don't have to worry about getting the signedness right. + */ #define kIOReportInvalidIntValue INT64_MIN #define kIOReportInvalidValue (uint64_t)kIOReportInvalidIntValue /*! @typedef IOReportCategories - @abstract encapsulate important, multi-purpose "tags" for channels - - @discussion - IOReportCategories is the type for the .categories field of - IOReportChanelType. These categories are inteded to empower a - limited number of clients to retrieve a broad range of channels - without knowing much about them. They can be OR'd together as - needed. Groups and subgroups are a more extensible mechanism - for aggregating channels produced by different drivers. -*/ + * @abstract encapsulate important, multi-purpose "tags" for channels + * + * @discussion + * IOReportCategories is the type for the .categories field of + * IOReportChanelType. These categories are inteded to empower a + * limited number of clients to retrieve a broad range of channels + * without knowing much about them. They can be OR'd together as + * needed. Groups and subgroups are a more extensible mechanism + * for aggregating channels produced by different drivers. + */ typedef uint16_t IOReportCategories; #define kIOReportCategoryPower (1 << 1) // and energy #define kIOReportCategoryTraffic (1 << 2) // I/O at any level @@ -76,40 +76,40 @@ typedef uint16_t IOReportCategories; // IOReportChannelType.report_format typedef uint8_t IOReportFormat; enum { - kIOReportInvalidFormat = 0, - kIOReportFormatSimple = 1, - kIOReportFormatState = 2, - kIOReportFormatHistogram = 3, - kIOReportFormatSimpleArray = 4 + kIOReportInvalidFormat = 0, + kIOReportFormatSimple = 1, + kIOReportFormatState = 2, + kIOReportFormatHistogram = 3, + kIOReportFormatSimpleArray = 4 }; // simple report values typedef struct { - int64_t simple_value; - uint64_t reserved1; - uint64_t reserved2; - uint64_t reserved3; + int64_t simple_value; + uint64_t reserved1; + uint64_t reserved2; + uint64_t reserved3; } __attribute((packed)) IOSimpleReportValues; // simple value array typedef struct { - int64_t simple_values[IOR_VALUES_PER_ELEMENT]; + int64_t simple_values[IOR_VALUES_PER_ELEMENT]; } __attribute((packed)) IOSimpleArrayReportValues; // state report values typedef struct { - uint64_t state_id; // 0..N-1 or 8-char code (see MAKEID()) - uint64_t intransitions; // number of transitions into this state - uint64_t upticks; // ticks spent in state (local timebase) - uint64_t last_intransition; // ticks at last in-transition + uint64_t state_id; // 0..N-1 or 8-char code (see MAKEID()) + uint64_t intransitions; // number of transitions into this state + uint64_t upticks; // ticks spent in state (local timebase) + uint64_t last_intransition;// ticks at last in-transition } __attribute((packed)) IOStateReportValues; // histogram report values typedef struct { - uint64_t bucket_hits; - int64_t bucket_min; - int64_t bucket_max; - int64_t bucket_sum; + uint64_t bucket_hits; + int64_t bucket_min; + int64_t bucket_max; + int64_t bucket_sum; } __attribute((packed)) IOHistogramReportValues; @@ -117,51 +117,51 @@ typedef struct { // configuration actions generally change future behavior typedef uint32_t IOReportConfigureAction; enum { - // basics (in common operational order) - kIOReportEnable = 0x01, - kIOReportGetDimensions = 0x02, - kIOReportDisable = 0x00, + // basics (in common operational order) + kIOReportEnable = 0x01, + kIOReportGetDimensions = 0x02, + kIOReportDisable = 0x00, - // Enable/disable modifiers - kIOReportNotifyHubOnChange = 0x10, // triggered polling + // Enable/disable modifiers + kIOReportNotifyHubOnChange = 0x10, // triggered polling - kIOReportTraceOnChange = 0x20 // kdebug.h tracing + kIOReportTraceOnChange = 0x20 // kdebug.h tracing }; // update actions should not have observable side effects typedef uint32_t IOReportUpdateAction; enum { - kIOReportCopyChannelData = 1, - kIOReportTraceChannelData = 2 + kIOReportCopyChannelData = 1, + kIOReportTraceChannelData = 2 }; typedef struct { - uint8_t report_format; // Histogram, StateResidency, etc. - uint8_t reserved; // must be zero - uint16_t categories; // power, traffic, etc (omnibus obs.) - uint16_t nelements; // internal size of channel - - // only meaningful in the data pipeline - int16_t element_idx; // 0..nelements-1 - // -1..-(nelements) = invalid (13127884) + uint8_t report_format; // Histogram, StateResidency, etc. + uint8_t reserved; // must be zero + uint16_t categories; // power, traffic, etc (omnibus obs.) + uint16_t nelements; // internal size of channel + + // only meaningful in the data pipeline + int16_t element_idx; // 0..nelements-1 + // -1..-(nelements) = invalid (13127884) } __attribute((packed)) IOReportChannelType; /*! - @define IOREPORT_MAKECHID - @abstract convert up to 8 printable characters into a 64-bit channel ID - @param - printable chars to be packed into a channel ID - @result a 64-bit channel ID with an implicit ASCII name - @discussion A simple example: - IOREPORT_MAKECHID('H', 'i', ' ', 'w', 'o', 'r', 'l', 'd'); - will evaluate to 0x686920776f726c64. Any NUL bytes are - ignored (by libIOReport) for naming purposes, but will - appear in the channel ID. Using a non-NUL non-printable - character will disable the implicit name. Putting NUL - bytes first eliminates trailing zeros when the channel - ID is printed as hex. For example: - IORERPORT_MAKECHID('\0','\0','n','x','f','e','r','s'); - To see the text, use xxd -r -p # not -rp; see 12976241 -*/ + * @define IOREPORT_MAKECHID + * @abstract convert up to 8 printable characters into a 64-bit channel ID + * @param - printable chars to be packed into a channel ID + * @result a 64-bit channel ID with an implicit ASCII name + * @discussion A simple example: + * IOREPORT_MAKECHID('H', 'i', ' ', 'w', 'o', 'r', 'l', 'd'); + * will evaluate to 0x686920776f726c64. Any NUL bytes are + * ignored (by libIOReport) for naming purposes, but will + * appear in the channel ID. Using a non-NUL non-printable + * character will disable the implicit name. Putting NUL + * bytes first eliminates trailing zeros when the channel + * ID is printed as hex. For example: + * IORERPORT_MAKECHID('\0','\0','n','x','f','e','r','s'); + * To see the text, use xxd -r -p # not -rp; see 12976241 + */ #define __IOR_lshiftchr(c, chshift) ((uint64_t)(c) << (8*(chshift))) #define IOREPORT_MAKEID(A, B, C, D, E, F, G, H) \ (__IOR_lshiftchr(A, 7) | __IOR_lshiftchr(B, 6) | __IOR_lshiftchr(C, 5) \ @@ -169,42 +169,42 @@ typedef struct { | __IOR_lshiftchr(G, 1) | __IOR_lshiftchr(H, 0)) typedef struct { - uint64_t channel_id; - IOReportChannelType channel_type; + uint64_t channel_id; + IOReportChannelType channel_type; } IOReportChannel; typedef struct { - uint32_t nchannels; - IOReportChannel channels[]; + uint32_t nchannels; + IOReportChannel channels[]; } IOReportChannelList; typedef struct { - uint64_t provider_id; - IOReportChannel channel; + uint64_t provider_id; + IOReportChannel channel; } IOReportInterest; typedef struct { - uint32_t ninterests; - IOReportInterest interests[]; + uint32_t ninterests; + IOReportInterest interests[]; } IOReportInterestList; typedef struct { - uint64_t v[IOR_VALUES_PER_ELEMENT]; + uint64_t v[IOR_VALUES_PER_ELEMENT]; } __attribute((packed)) IOReportElementValues; typedef struct { - uint64_t provider_id; - uint64_t channel_id; - IOReportChannelType channel_type; - uint64_t timestamp; // mach_absolute_time() - IOReportElementValues values; + uint64_t provider_id; + uint64_t channel_id; + IOReportChannelType channel_type; + uint64_t timestamp;// mach_absolute_time() + IOReportElementValues values; } __attribute((packed)) IOReportElement; /* - IOReporting unit type and constants -*/ + * IOReporting unit type and constants + */ // 1. Mechanism @@ -214,11 +214,11 @@ typedef struct { typedef uint64_t IOReportUnit; typedef uint64_t IOReportUnits; // deprecated typo, please switch #define __IOR_MAKEUNIT(quantity, scale) \ - (((IOReportUnit)quantity << 56) | (uint64_t)scale) + (((IOReportUnit)quantity << 56) | (uint64_t)scale) #define IOREPORT_GETUNIT_QUANTITY(unit) \ - ((IOReportQuantity)((uint64_t)unit >> 56) & 0xff) + ((IOReportQuantity)((uint64_t)unit >> 56) & 0xff) #define IOREPORT_GETUNIT_SCALE(unit) \ - ((IOReportScaleFactor)unit & 0x00ffffffffffffff) + ((IOReportScaleFactor)unit & 0x00ffffffffffffff) // 8b quantity ID | 32b const val + 8b*2^10 + 8b*2^n | 8b cardinal | 8b unused typedef uint8_t IOReportQuantity; // SI "quantity" is what's measured @@ -227,30 +227,30 @@ typedef uint64_t IOReportScaleFactor; // See for a list // of quantities and their symbols. enum { - // used by state reports, etc - kIOReportQuantityUndefined = 0, - - kIOReportQuantityTime = 1, // Seconds - kIOReportQuantityPower = 2, // Watts - kIOReportQuantityEnergy = 3, // Joules - kIOReportQuantityCurrent = 4, // Amperes - kIOReportQuantityVoltage = 5, // Volts - kIOReportQuantityCapacitance = 6, // Farad - kIOReportQuantityInductance = 7, // Henry - kIOReportQuantityFrequency = 8, // Hertz - kIOReportQuantityData = 9, // bits/bytes (see scale) - kIOReportQuantityTemperature = 10, // Celsius (not Kelvin :) - - kIOReportQuantityEventCount = 100, - kIOReportQuantityPacketCount = 101, - kIOReportQuantityCPUInstrs = 102 + // used by state reports, etc + kIOReportQuantityUndefined = 0, + + kIOReportQuantityTime = 1,// Seconds + kIOReportQuantityPower = 2,// Watts + kIOReportQuantityEnergy = 3,// Joules + kIOReportQuantityCurrent = 4,// Amperes + kIOReportQuantityVoltage = 5,// Volts + kIOReportQuantityCapacitance = 6,// Farad + kIOReportQuantityInductance = 7,// Henry + kIOReportQuantityFrequency = 8,// Hertz + kIOReportQuantityData = 9,// bits/bytes (see scale) + kIOReportQuantityTemperature = 10,// Celsius (not Kelvin :) + + kIOReportQuantityEventCount = 100, + kIOReportQuantityPacketCount = 101, + kIOReportQuantityCPUInstrs = 102 }; /* A number of units end up with both IEC (2^n) and SI (10^n) scale factors. - For example, the "MB" of a 1.44 MB floppy or a 1024MHz clock. We - thus support separate 2^n and 10^n factors. The exponent encoding - scheme is modeled loosely on single-precision IEEE 754. + * For example, the "MB" of a 1.44 MB floppy or a 1024MHz clock. We + * thus support separate 2^n and 10^n factors. The exponent encoding + * scheme is modeled loosely on single-precision IEEE 754. */ #define kIOReportScaleConstMask 0x000000007fffffff // constant ("uint31") #define kIOReportScaleOneOver (1LL << 31) // 1/constant @@ -265,56 +265,56 @@ enum { /* - Scales are described as a factor times unity: - 1ms = kIOReportScaleMilli * s - - A value expressed in a scaled unit can be scaled to unity via - multiplication by the constant: - 100ms * kIOReportScaleMilli [1e-3] = 0.1s. -*/ + * Scales are described as a factor times unity: + * 1ms = kIOReportScaleMilli * s + * + * A value expressed in a scaled unit can be scaled to unity via + * multiplication by the constant: + * 100ms * kIOReportScaleMilli [1e-3] = 0.1s. + */ // SI / decimal #define kIOReportScalePico ((-12LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleNano ((-9LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleMicro ((-6LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleMilli ((-3LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleUnity 0 // 10^0 = 2^0 = 1 // unity = 0 is a special case for which we give up exp = -127 #define kIOReportScaleKilo ((3LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleMega ((6LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleGiga ((9LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) #define kIOReportScaleTera ((12LL + kIOReportExpZeroOffset) \ - << kIOReportScaleSIShift) + << kIOReportScaleSIShift) // IEC / computer / binary // It's not clear we'll ever use 2^(-n), but 1..2^~120 should suffice. #define kIOReportScaleBits kIOReportScaleUnity #define kIOReportScaleBytes ((3LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) // (bytes have to be added to the exponents up front, can't just OR in) #define kIOReportScaleKibi ((10LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleKiBytes ((13LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleMebi ((20LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleMiBytes ((23LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleGibi ((30LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleGiBytes ((33LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleTebi ((40LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) #define kIOReportScaleTiBytes ((43LL + kIOReportExpZeroOffset) \ - << kIOReportScaleIECShift) + << kIOReportScaleIECShift) // can't encode more than 2^125 (keeping bits & bytes inside -126..128) // Also, IOReportScaleValue() is currently limited internally by uint64_t. @@ -342,62 +342,62 @@ enum { // 2. Unit constants #define kIOReportUnitNone __IOR_MAKEUNIT(kIOReportQuantityUndefined, \ - kIOReportScaleUnity) + kIOReportScaleUnity) #define kIOReportUnit_s __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScaleUnity) + kIOReportScaleUnity) #define kIOReportUnit_ms __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScaleMilli) + kIOReportScaleMilli) #define kIOReportUnit_us __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScaleMicro) + kIOReportScaleMicro) #define kIOReportUnit_ns __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScaleNano) + kIOReportScaleNano) #define kIOReportUnit_J __IOR_MAKEUNIT(kIOReportQuantityEnergy, \ - kIOReportScaleUnity) + kIOReportScaleUnity) #define kIOReportUnit_mJ __IOR_MAKEUNIT(kIOReportQuantityEnergy, \ - kIOReportScaleMilli) + kIOReportScaleMilli) #define kIOReportUnit_uJ __IOR_MAKEUNIT(kIOReportQuantityEnergy, \ - kIOReportScaleMicro) + kIOReportScaleMicro) #define kIOReportUnit_nJ __IOR_MAKEUNIT(kIOReportQuantityEnergy, \ - kIOReportScaleNano) + kIOReportScaleNano) #define kIOReportUnit_pJ __IOR_MAKEUNIT(kIOReportQuantityEnergy, \ - kIOReportScalePico) + kIOReportScalePico) #define kIOReportUnitHWTicks __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScaleMachHWTicks) + kIOReportScaleMachHWTicks) #define kIOReportUnit24MHzTicks __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScale24MHz) + kIOReportScale24MHz) #define kIOReportUnit1GHzTicks __IOR_MAKEUNIT(kIOReportQuantityTime, \ - kIOReportScale1GHz) + kIOReportScale1GHz) #define kIOReportUnitBits __IOR_MAKEUNIT(kIOReportQuantityData, \ - kIOReportScaleBits) + kIOReportScaleBits) #define kIOReportUnitBytes __IOR_MAKEUNIT(kIOReportQuantityData, \ - kIOReportScaleBytes) + kIOReportScaleBytes) #define kIOReportUnit_KiB __IOR_MAKEUNIT(kIOReportQuantityData, \ - kIOReportScaleKiBytes) + kIOReportScaleKiBytes) #define kIOReportUnit_MiB __IOR_MAKEUNIT(kIOReportQuantityData, \ - kIOReportScaleMiBytes) + kIOReportScaleMiBytes) #define kIOReportUnit_GiB __IOR_MAKEUNIT(kIOReportQuantityData, \ - kIOReportScaleGiBytes) + kIOReportScaleGiBytes) #define kIOReportUnit_TiB __IOR_MAKEUNIT(kIOReportQuantityData, \ - kIOReportScaleTiBytes) + kIOReportScaleTiBytes) #define kIOReportUnitEvents __IOR_MAKEUNIT(kIOReportQuantityEventCount, \ - kIOReportScaleUnity) + kIOReportScaleUnity) #define kIOReportUnitPackets __IOR_MAKEUNIT(kIOReportQuantityPacketCount, \ - kIOReportScaleUnity) + kIOReportScaleUnity) #define kIOReportUnitInstrs __IOR_MAKEUNIT(kIOReportQuantityCPUInstrs, \ - kIOReportScaleUnity) + kIOReportScaleUnity) #define kIOReportUnit_KI __IOR_MAKEUNIT(kIOReportQuantityCPUInstrs, \ - kIOReportScaleKilo) + kIOReportScaleKilo) #define kIOReportUnit_MI __IOR_MAKEUNIT(kIOReportQuantityCPUInstrs, \ - kIOReportScaleMega) + kIOReportScaleMega) #define kIOReportUnit_GI __IOR_MAKEUNIT(kIOReportQuantityCPUInstrs, \ - kIOReportScaleGiga) + kIOReportScaleGiga) // Please file bugs (xnu | IOReporting) for additional units. diff --git a/iokit/IOKit/IOReturn.h b/iokit/IOKit/IOReturn.h index 3f1b2877f..94347a72d 100644 --- a/iokit/IOKit/IOReturn.h +++ b/iokit/IOKit/IOReturn.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * HISTORY */ - + /* * Core IOReturn values. Others may be family defined. */ @@ -42,7 +42,7 @@ extern "C" { #include -typedef kern_return_t IOReturn; +typedef kern_return_t IOReturn; #ifndef sys_iokit #define sys_iokit err_system(0x38) @@ -68,7 +68,7 @@ typedef kern_return_t IOReturn; #define sub_iokit_nvme err_sub(28) #endif #define sub_iokit_thunderbolt err_sub(29) -#define sub_iokit_graphics_acceleration err_sub(30) +#define sub_iokit_graphics_acceleration err_sub(30) #define sub_iokit_keystore err_sub(31) #ifdef PRIVATE #define sub_iokit_smc err_sub(32) @@ -87,48 +87,48 @@ typedef kern_return_t IOReturn; #define sub_iokit_vendor_specific err_sub(-2) #define sub_iokit_reserved err_sub(-1) -#define iokit_common_err(return) (sys_iokit|sub_iokit_common|return) -#define iokit_family_err(sub,return) (sys_iokit|sub|return) -#define iokit_vendor_specific_err(return) (sys_iokit|sub_iokit_vendor_specific|return) +#define iokit_common_err(return ) (sys_iokit|sub_iokit_common|return) +#define iokit_family_err(sub, return ) (sys_iokit|sub|return) +#define iokit_vendor_specific_err(return ) (sys_iokit|sub_iokit_vendor_specific|return) #define kIOReturnSuccess KERN_SUCCESS // OK -#define kIOReturnError iokit_common_err(0x2bc) // general error -#define kIOReturnNoMemory iokit_common_err(0x2bd) // can't allocate memory -#define kIOReturnNoResources iokit_common_err(0x2be) // resource shortage -#define kIOReturnIPCError iokit_common_err(0x2bf) // error during IPC -#define kIOReturnNoDevice iokit_common_err(0x2c0) // no such device -#define kIOReturnNotPrivileged iokit_common_err(0x2c1) // privilege violation -#define kIOReturnBadArgument iokit_common_err(0x2c2) // invalid argument -#define kIOReturnLockedRead iokit_common_err(0x2c3) // device read locked -#define kIOReturnLockedWrite iokit_common_err(0x2c4) // device write locked +#define kIOReturnError iokit_common_err(0x2bc) // general error +#define kIOReturnNoMemory iokit_common_err(0x2bd) // can't allocate memory +#define kIOReturnNoResources iokit_common_err(0x2be) // resource shortage +#define kIOReturnIPCError iokit_common_err(0x2bf) // error during IPC +#define kIOReturnNoDevice iokit_common_err(0x2c0) // no such device +#define kIOReturnNotPrivileged iokit_common_err(0x2c1) // privilege violation +#define kIOReturnBadArgument iokit_common_err(0x2c2) // invalid argument +#define kIOReturnLockedRead iokit_common_err(0x2c3) // device read locked +#define kIOReturnLockedWrite iokit_common_err(0x2c4) // device write locked #define kIOReturnExclusiveAccess iokit_common_err(0x2c5) // exclusive access and - // device already open + // device already open #define kIOReturnBadMessageID iokit_common_err(0x2c6) // sent/received messages // had different msg_id -#define kIOReturnUnsupported iokit_common_err(0x2c7) // unsupported function -#define kIOReturnVMError iokit_common_err(0x2c8) // misc. VM failure -#define kIOReturnInternalError iokit_common_err(0x2c9) // internal error -#define kIOReturnIOError iokit_common_err(0x2ca) // General I/O error -//#define kIOReturn???Error iokit_common_err(0x2cb) // ??? +#define kIOReturnUnsupported iokit_common_err(0x2c7) // unsupported function +#define kIOReturnVMError iokit_common_err(0x2c8) // misc. VM failure +#define kIOReturnInternalError iokit_common_err(0x2c9) // internal error +#define kIOReturnIOError iokit_common_err(0x2ca) // General I/O error +//#define kIOReturn???Error iokit_common_err(0x2cb) // ??? #define kIOReturnCannotLock iokit_common_err(0x2cc) // can't acquire lock -#define kIOReturnNotOpen iokit_common_err(0x2cd) // device not open -#define kIOReturnNotReadable iokit_common_err(0x2ce) // read not supported -#define kIOReturnNotWritable iokit_common_err(0x2cf) // write not supported -#define kIOReturnNotAligned iokit_common_err(0x2d0) // alignment error -#define kIOReturnBadMedia iokit_common_err(0x2d1) // Media Error -#define kIOReturnStillOpen iokit_common_err(0x2d2) // device(s) still open -#define kIOReturnRLDError iokit_common_err(0x2d3) // rld failure -#define kIOReturnDMAError iokit_common_err(0x2d4) // DMA failure -#define kIOReturnBusy iokit_common_err(0x2d5) // Device Busy -#define kIOReturnTimeout iokit_common_err(0x2d6) // I/O Timeout -#define kIOReturnOffline iokit_common_err(0x2d7) // device offline -#define kIOReturnNotReady iokit_common_err(0x2d8) // not ready -#define kIOReturnNotAttached iokit_common_err(0x2d9) // device not attached +#define kIOReturnNotOpen iokit_common_err(0x2cd) // device not open +#define kIOReturnNotReadable iokit_common_err(0x2ce) // read not supported +#define kIOReturnNotWritable iokit_common_err(0x2cf) // write not supported +#define kIOReturnNotAligned iokit_common_err(0x2d0) // alignment error +#define kIOReturnBadMedia iokit_common_err(0x2d1) // Media Error +#define kIOReturnStillOpen iokit_common_err(0x2d2) // device(s) still open +#define kIOReturnRLDError iokit_common_err(0x2d3) // rld failure +#define kIOReturnDMAError iokit_common_err(0x2d4) // DMA failure +#define kIOReturnBusy iokit_common_err(0x2d5) // Device Busy +#define kIOReturnTimeout iokit_common_err(0x2d6) // I/O Timeout +#define kIOReturnOffline iokit_common_err(0x2d7) // device offline +#define kIOReturnNotReady iokit_common_err(0x2d8) // not ready +#define kIOReturnNotAttached iokit_common_err(0x2d9) // device not attached #define kIOReturnNoChannels iokit_common_err(0x2da) // no DMA channels left -#define kIOReturnNoSpace iokit_common_err(0x2db) // no space for data -//#define kIOReturn???Error iokit_common_err(0x2dc) // ??? +#define kIOReturnNoSpace iokit_common_err(0x2db) // no space for data +//#define kIOReturn???Error iokit_common_err(0x2dc) // ??? #define kIOReturnPortExists iokit_common_err(0x2dd) // port already exists -#define kIOReturnCannotWire iokit_common_err(0x2de) // can't wire down +#define kIOReturnCannotWire iokit_common_err(0x2de) // can't wire down // physical memory #define kIOReturnNoInterrupt iokit_common_err(0x2df) // no interrupt attached #define kIOReturnNoFrames iokit_common_err(0x2e0) // no DMA frames enqueued @@ -141,13 +141,13 @@ typedef kern_return_t IOReturn; #define kIOReturnUnsupportedMode iokit_common_err(0x2e6) // no such mode #define kIOReturnUnderrun iokit_common_err(0x2e7) // data underrun #define kIOReturnOverrun iokit_common_err(0x2e8) // data overrun -#define kIOReturnDeviceError iokit_common_err(0x2e9) // the device is not working properly! -#define kIOReturnNoCompletion iokit_common_err(0x2ea) // a completion routine is required -#define kIOReturnAborted iokit_common_err(0x2eb) // operation aborted -#define kIOReturnNoBandwidth iokit_common_err(0x2ec) // bus bandwidth would be exceeded -#define kIOReturnNotResponding iokit_common_err(0x2ed) // device not responding -#define kIOReturnIsoTooOld iokit_common_err(0x2ee) // isochronous I/O request for distant past! -#define kIOReturnIsoTooNew iokit_common_err(0x2ef) // isochronous I/O request for distant future +#define kIOReturnDeviceError iokit_common_err(0x2e9) // the device is not working properly! +#define kIOReturnNoCompletion iokit_common_err(0x2ea) // a completion routine is required +#define kIOReturnAborted iokit_common_err(0x2eb) // operation aborted +#define kIOReturnNoBandwidth iokit_common_err(0x2ec) // bus bandwidth would be exceeded +#define kIOReturnNotResponding iokit_common_err(0x2ed) // device not responding +#define kIOReturnIsoTooOld iokit_common_err(0x2ee) // isochronous I/O request for distant past! +#define kIOReturnIsoTooNew iokit_common_err(0x2ef) // isochronous I/O request for distant future #define kIOReturnNotFound iokit_common_err(0x2f0) // data was not found #define kIOReturnInvalid iokit_common_err(0x1) // should never be seen diff --git a/iokit/IOKit/IOService.h b/iokit/IOKit/IOService.h index e06839160..4a6da1574 100644 --- a/iokit/IOKit/IOService.h +++ b/iokit/IOKit/IOService.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,21 +22,21 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998,1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998,1999 Apple Computer, Inc. All rights reserved. * * HISTORY * */ /*! - @header - This header contains the definition of the IOService class. IOService is the sole direct subclass of IORegistryEntry and is the base class of almost all I/O Kit family superclasses. IOService defines methods that support the life cycle of I/O Kit drivers. For more information on IOService, see {@linkdoc //apple_ref/doc/uid/TP0000011 I/O Kit Fundamentals}. - - @seealso //apple_ref/doc/header/IORegistryEntry.h IORegistryEntry -*/ + * @header + * This header contains the definition of the IOService class. IOService is the sole direct subclass of IORegistryEntry and is the base class of almost all I/O Kit family superclasses. IOService defines methods that support the life cycle of I/O Kit drivers. For more information on IOService, see {@linkdoc //apple_ref/doc/uid/TP0000011 I/O Kit Fundamentals}. + * + * @seealso //apple_ref/doc/header/IORegistryEntry.h IORegistryEntry + */ #ifndef _IOKIT_IOSERVICE_H #define _IOKIT_IOSERVICE_H @@ -64,41 +64,41 @@ extern "C" { enum { - kIODefaultProbeScore = 0 + kIODefaultProbeScore = 0 }; // masks for getState() enum { - kIOServiceInactiveState = 0x00000001, - kIOServiceRegisteredState = 0x00000002, - kIOServiceMatchedState = 0x00000004, - kIOServiceFirstPublishState = 0x00000008, - kIOServiceFirstMatchState = 0x00000010 + kIOServiceInactiveState = 0x00000001, + kIOServiceRegisteredState = 0x00000002, + kIOServiceMatchedState = 0x00000004, + kIOServiceFirstPublishState = 0x00000008, + kIOServiceFirstMatchState = 0x00000010 }; enum { - // options for registerService() - kIOServiceExclusive = 0x00000001, + // options for registerService() + kIOServiceExclusive = 0x00000001, - // options for terminate() - kIOServiceRequired = 0x00000001, - kIOServiceTerminate = 0x00000004, + // options for terminate() + kIOServiceRequired = 0x00000001, + kIOServiceTerminate = 0x00000004, - // options for registerService() & terminate() - kIOServiceSynchronous = 0x00000002, - // options for registerService() - kIOServiceAsynchronous = 0x00000008 + // options for registerService() & terminate() + kIOServiceSynchronous = 0x00000002, + // options for registerService() + kIOServiceAsynchronous = 0x00000008 }; // options for open() enum { - kIOServiceSeize = 0x00000001, - kIOServiceFamilyOpenOptions = 0xffff0000 + kIOServiceSeize = 0x00000001, + kIOServiceFamilyOpenOptions = 0xffff0000 }; // options for close() enum { - kIOServiceFamilyCloseOptions = 0xffff0000 + kIOServiceFamilyCloseOptions = 0xffff0000 }; typedef void * IONotificationRef; @@ -153,45 +153,45 @@ extern const OSSymbol * gIOBSDUnitKey; extern SInt32 IOServiceOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ); typedef void (*IOInterruptAction)( OSObject * target, void * refCon, - IOService * nub, int source ); + IOService * nub, int source ); #ifdef __BLOCKS__ typedef void (^IOInterruptActionBlock)(IOService * nub, int source); #endif /* __BLOCKS__ */ /*! @typedef IOServiceNotificationHandler - @param target Reference supplied when the notification was registered. - @param refCon Reference constant supplied when the notification was registered. - @param newService The IOService object the notification is delivering. It is retained for the duration of the handler's invocation and doesn't need to be released by the handler. */ + * @param target Reference supplied when the notification was registered. + * @param refCon Reference constant supplied when the notification was registered. + * @param newService The IOService object the notification is delivering. It is retained for the duration of the handler's invocation and doesn't need to be released by the handler. */ typedef bool (*IOServiceNotificationHandler)( void * target, void * refCon, - IOService * newService ); + IOService * newService ); typedef bool (*IOServiceMatchingNotificationHandler)( void * target, void * refCon, - IOService * newService, - IONotifier * notifier ); + IOService * newService, + IONotifier * notifier ); #ifdef __BLOCKS__ typedef bool (^IOServiceMatchingNotificationHandlerBlock)(IOService * newService, - IONotifier * notifier ); + IONotifier * notifier ); #endif /* __BLOCKS__ */ /*! @typedef IOServiceInterestHandler - @param target Reference supplied when the notification was registered. - @param refCon Reference constant supplied when the notification was registered. - @param messageType Type of the message - IOKit defined in IOKit/IOMessage.h or family specific. - @param provider The IOService object who is delivering the notification. It is retained for the duration of the handler's invocation and doesn't need to be released by the handler. - @param messageArgument An argument for message, dependent on its type. - @param argSize Non zero if the argument represents a struct of that size, used when delivering messages outside the kernel. */ + * @param target Reference supplied when the notification was registered. + * @param refCon Reference constant supplied when the notification was registered. + * @param messageType Type of the message - IOKit defined in IOKit/IOMessage.h or family specific. + * @param provider The IOService object who is delivering the notification. It is retained for the duration of the handler's invocation and doesn't need to be released by the handler. + * @param messageArgument An argument for message, dependent on its type. + * @param argSize Non zero if the argument represents a struct of that size, used when delivering messages outside the kernel. */ typedef IOReturn (*IOServiceInterestHandler)( void * target, void * refCon, - UInt32 messageType, IOService * provider, - void * messageArgument, vm_size_t argSize ); + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ); #ifdef __BLOCKS__ typedef IOReturn (^IOServiceInterestHandlerBlock)( uint32_t messageType, IOService * provider, - void * messageArgument, size_t argSize ); + void * messageArgument, size_t argSize ); #endif /* __BLOCKS__ */ typedef void (*IOServiceApplierFunction)(IOService * service, void * context); @@ -201,1818 +201,1820 @@ class IOUserClient; class IOPlatformExpert; /*! @class IOService - @abstract The base class for most I/O Kit families, devices, and drivers. - @discussion The IOService base class defines APIs used to publish services, instantiate other services based on the existance of a providing service (ie. driver stacking), destroy a service and its dependent stack, notify interested parties of service state changes, and general utility functions useful across all families. - -Types of service are specified with a matching dictionary that describes properties of the service. For example, a matching dictionary might describe any IOUSBDevice (or subclass), an IOUSBDevice with a certain class code, or a IOPCIDevice with a set of matching names or device & vendor IDs. Since the matching dictionary is interpreted by the family which created the service, as well as generically by IOService, the list of properties considered for matching depends on the familiy. - -Matching dictionaries are associated with IOService classes by the catalogue, as driver property tables, and also supplied by clients of the notification APIs. - -IOService provides matching based on C++ class (via OSMetaClass dynamic casting), registry entry name, a registry path to the service (which includes device tree paths), a name assigned by BSD, or by its location (its point of attachment). - -

Driver Instantiation by IOService

- -Drivers are subclasses of IOService, and their availability is managed through the catalogue. They are instantiated based on the publication of an IOService they use (for example, an IOPCIDevice or IOUSBDevice), or when they are added to the catalogue and the IOService(s) they use are already available. - -When an IOService (the "provider") is published with the @link registerService registerService@/link method, the matching and probing process begins, which is always single threaded per provider. A list of matching dictionaries from the catalog and installed publish notification requests, that successfully match the IOService, is constructed, with ordering supplied by kIOProbeScoreKey ("IOProbeScore") property in the dictionary, or supplied with the notification. - -Each entry in the list is then processed in order - for notifications, the notification is delivered, for driver property tables a lot more happens. - -The driver class is instantiated and init() called with its property table. The new driver instance is then attached to the provider, and has its @link probe probe@/link method called with the provider as an argument. The default probe method does nothing but return success, but a driver may implement this method to interrogate the provider to make sure it can work with it. It may also modify its probe score at this time. After probe, the driver is detached and the next in the list is considered (ie. attached, probed, and detached). - -When the probing phase is complete, the list consists of successfully probed drivers, in order of their probe score (after adjustment during the @link probe probe@/link call). The list is then divided into categories based on the kIOMatchCategoryKey property ("IOMatchCategory"); drivers without a match category are all considered in one default category. Match categories allow multiple clients of a provider to be attached and started, though the provider may also enforce open/close semantics to gain active access to it. - -For each category, the highest scoring driver in that category is attached to the provider, and its @link start start@/link method called. If start is successful, the rest of the drivers in the same match category are discarded, otherwise the next highest scoring driver is started, and so on. - -The driver should only consider itself in action when the start method is called, meaning it has been selected for use on the provider, and consuming that particular match category. It should also be prepared to be allocated, probed and freed even if the probe was successful. - -After the drivers have all synchronously been started, the installed "matched" notifications that match the registered IOService are delivered. - -

Properties used by IOService

- - kIOClassKey, extern const OSSymbol * gIOClassKey, "IOClass" -
-
-Class of the driver to instantiate on matching providers. -
-
- kIOProviderClassKey, extern const OSSymbol * gIOProviderClassKey, "IOProviderClass" -
-
-Class of the provider(s) to be considered for matching, checked with OSDynamicCast so subclasses will also match. -
-
- kIOProbeScoreKey, extern const OSSymbol * gIOProbeScoreKey, "IOProbeScore" -
-
-The probe score initially used to order multiple matching drivers. -
-
- kIOMatchCategoryKey, extern const OSSymbol * gIOMatchCategoryKey, "IOMatchCategory" -
-
-A string defining the driver category for matching purposes. All drivers with no IOMatchCategory property are considered to be in the same default category. Only one driver in a category can be started on each provider. -
-
- kIONameMatchKey, extern const OSSymbol * gIONameMatchKey, "IONameMatch" -
-A string or collection of strings that match the provider's name. The comparison is implemented with the @link //apple_ref/cpp/instm/IORegistryEntry/compareNames/virtualbool/(OSObject*,OSString**) IORegistryEntry::compareNames@/link method, which supports a single string, or any collection (OSArray, OSSet, OSDictionary etc.) of strings. IOService objects with device tree properties (eg. IOPCIDevice) will also be matched based on that standard's "compatible", "name", "device_type" properties. The matching name will be left in the driver's property table in the kIONameMatchedKey property. -
-Examples -
-@textblock
-    IONameMatch
-    pci106b,7
-@/textblock
-
- -For a list of possible matching names, a serialized array of strings should used, eg. -
-@textblock
-    IONameMatch
-    
-        APPL,happy16
-        pci106b,7
-    
-@/textblock
-
- -
- kIONameMatchedKey, extern const OSSymbol * gIONameMatchedKey, "IONameMatched" -
-The name successfully matched name from the kIONameMatchKey property will be left in the driver's property table as the kIONameMatchedKey property. -
-
- kIOPropertyMatchKey, extern const OSSymbol * gIOPropertyMatchKey, "IOPropertyMatch" -
-A dictionary of properties that each must exist in the matching IOService and compare successfully with the isEqualTo method. - -
-@textblock
-    IOPropertyMatch
-    
-        APPL,happy16
-        APPL,meek8
-    
-@/textblock
-
- -
- kIOUserClientClassKey, extern const OSSymbol * gIOUserClientClassKey, "IOUserClientClass" -
-The class name that the service will attempt to allocate when a user client connection is requested. First the device nub is queried, then the nub's provider is queried by default. -
-
- kIOKitDebugKey, extern const OSSymbol * gIOKitDebugKey, "IOKitDebug" -
-Set some debug flags for logging the driver loading process. Flags are defined in IOKit/IOKitDebug.h, but 65535 works well.*/ + * @abstract The base class for most I/O Kit families, devices, and drivers. + * @discussion The IOService base class defines APIs used to publish services, instantiate other services based on the existance of a providing service (ie. driver stacking), destroy a service and its dependent stack, notify interested parties of service state changes, and general utility functions useful across all families. + * + * Types of service are specified with a matching dictionary that describes properties of the service. For example, a matching dictionary might describe any IOUSBDevice (or subclass), an IOUSBDevice with a certain class code, or a IOPCIDevice with a set of matching names or device & vendor IDs. Since the matching dictionary is interpreted by the family which created the service, as well as generically by IOService, the list of properties considered for matching depends on the familiy. + * + * Matching dictionaries are associated with IOService classes by the catalogue, as driver property tables, and also supplied by clients of the notification APIs. + * + * IOService provides matching based on C++ class (via OSMetaClass dynamic casting), registry entry name, a registry path to the service (which includes device tree paths), a name assigned by BSD, or by its location (its point of attachment). + * + *

Driver Instantiation by IOService

+ * + * Drivers are subclasses of IOService, and their availability is managed through the catalogue. They are instantiated based on the publication of an IOService they use (for example, an IOPCIDevice or IOUSBDevice), or when they are added to the catalogue and the IOService(s) they use are already available. + * + * When an IOService (the "provider") is published with the @link registerService registerService@/link method, the matching and probing process begins, which is always single threaded per provider. A list of matching dictionaries from the catalog and installed publish notification requests, that successfully match the IOService, is constructed, with ordering supplied by kIOProbeScoreKey ("IOProbeScore") property in the dictionary, or supplied with the notification. + * + * Each entry in the list is then processed in order - for notifications, the notification is delivered, for driver property tables a lot more happens. + * + * The driver class is instantiated and init() called with its property table. The new driver instance is then attached to the provider, and has its @link probe probe@/link method called with the provider as an argument. The default probe method does nothing but return success, but a driver may implement this method to interrogate the provider to make sure it can work with it. It may also modify its probe score at this time. After probe, the driver is detached and the next in the list is considered (ie. attached, probed, and detached). + * + * When the probing phase is complete, the list consists of successfully probed drivers, in order of their probe score (after adjustment during the @link probe probe@/link call). The list is then divided into categories based on the kIOMatchCategoryKey property ("IOMatchCategory"); drivers without a match category are all considered in one default category. Match categories allow multiple clients of a provider to be attached and started, though the provider may also enforce open/close semantics to gain active access to it. + * + * For each category, the highest scoring driver in that category is attached to the provider, and its @link start start@/link method called. If start is successful, the rest of the drivers in the same match category are discarded, otherwise the next highest scoring driver is started, and so on. + * + * The driver should only consider itself in action when the start method is called, meaning it has been selected for use on the provider, and consuming that particular match category. It should also be prepared to be allocated, probed and freed even if the probe was successful. + * + * After the drivers have all synchronously been started, the installed "matched" notifications that match the registered IOService are delivered. + * + *

Properties used by IOService

+ * + * kIOClassKey, extern const OSSymbol * gIOClassKey, "IOClass" + *
+ *
+ * Class of the driver to instantiate on matching providers. + *
+ *
+ * kIOProviderClassKey, extern const OSSymbol * gIOProviderClassKey, "IOProviderClass" + *
+ *
+ * Class of the provider(s) to be considered for matching, checked with OSDynamicCast so subclasses will also match. + *
+ *
+ * kIOProbeScoreKey, extern const OSSymbol * gIOProbeScoreKey, "IOProbeScore" + *
+ *
+ * The probe score initially used to order multiple matching drivers. + *
+ *
+ * kIOMatchCategoryKey, extern const OSSymbol * gIOMatchCategoryKey, "IOMatchCategory" + *
+ *
+ * A string defining the driver category for matching purposes. All drivers with no IOMatchCategory property are considered to be in the same default category. Only one driver in a category can be started on each provider. + *
+ *
+ * kIONameMatchKey, extern const OSSymbol * gIONameMatchKey, "IONameMatch" + *
+ * A string or collection of strings that match the provider's name. The comparison is implemented with the @link //apple_ref/cpp/instm/IORegistryEntry/compareNames/virtualbool/(OSObject*,OSString**) IORegistryEntry::compareNames@/link method, which supports a single string, or any collection (OSArray, OSSet, OSDictionary etc.) of strings. IOService objects with device tree properties (eg. IOPCIDevice) will also be matched based on that standard's "compatible", "name", "device_type" properties. The matching name will be left in the driver's property table in the kIONameMatchedKey property. + *
+ * Examples + *
+ *  @textblock
+ *   IONameMatch
+ *   pci106b,7
+ *  @/textblock
+ *  
+ * + * For a list of possible matching names, a serialized array of strings should used, eg. + *
+ *  @textblock
+ *   IONameMatch
+ *   
+ *       APPL,happy16
+ *       pci106b,7
+ *   
+ *  @/textblock
+ *  
+ * + *
+ * kIONameMatchedKey, extern const OSSymbol * gIONameMatchedKey, "IONameMatched" + *
+ * The name successfully matched name from the kIONameMatchKey property will be left in the driver's property table as the kIONameMatchedKey property. + *
+ *
+ * kIOPropertyMatchKey, extern const OSSymbol * gIOPropertyMatchKey, "IOPropertyMatch" + *
+ * A dictionary of properties that each must exist in the matching IOService and compare successfully with the isEqualTo method. + * + *
+ *  @textblock
+ *   IOPropertyMatch
+ *   
+ *       APPL,happy16
+ *       APPL,meek8
+ *   
+ *  @/textblock
+ *  
+ * + *
+ * kIOUserClientClassKey, extern const OSSymbol * gIOUserClientClassKey, "IOUserClientClass" + *
+ * The class name that the service will attempt to allocate when a user client connection is requested. First the device nub is queried, then the nub's provider is queried by default. + *
+ *
+ * kIOKitDebugKey, extern const OSSymbol * gIOKitDebugKey, "IOKitDebug" + *
+ * Set some debug flags for logging the driver loading process. Flags are defined in IOKit/IOKitDebug.h, but 65535 works well.*/ struct IOInterruptAccountingData; struct IOInterruptAccountingReporter; class IOService : public IORegistryEntry { - OSDeclareDefaultStructors(IOService) + OSDeclareDefaultStructors(IOService) protected: /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of this class in the future. - */ - struct ExpansionData { - uint64_t authorizationID; - /* - * Variables associated with interrupt accounting. Consists of an array - * (that pairs reporters with opaque "statistics" objects), the count for - * the array, and a lock to guard both of the former variables. The lock - * is necessary as IOReporting will not update reports in a manner that is - * synchonized with the service (i.e, on a workloop). - */ - IOLock * interruptStatisticsLock; - IOInterruptAccountingReporter * interruptStatisticsArray; - int interruptStatisticsArrayCount; - }; + * @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { + uint64_t authorizationID; + /* + * Variables associated with interrupt accounting. Consists of an array + * (that pairs reporters with opaque "statistics" objects), the count for + * the array, and a lock to guard both of the former variables. The lock + * is necessary as IOReporting will not update reports in a manner that is + * synchonized with the service (i.e, on a workloop). + */ + IOLock * interruptStatisticsLock; + IOInterruptAccountingReporter * interruptStatisticsArray; + int interruptStatisticsArrayCount; + }; /*! @var reserved - Reserved for future use. (Internal use only) */ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData * reserved; - APPLE_KEXT_WSHADOW_POP; + * Reserved for future use. (Internal use only) */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData * reserved; + APPLE_KEXT_WSHADOW_POP; private: - IOService * __provider; - SInt32 __providerGeneration; - IOService * __owner; - IOOptionBits __state[2]; - uint64_t __timeBusy; - uint64_t __accumBusy; - IOServicePM * pwrMgt; + IOService * __provider; + SInt32 __providerGeneration; + IOService * __owner; + IOOptionBits __state[2]; + uint64_t __timeBusy; + uint64_t __accumBusy; + IOServicePM * pwrMgt; protected: - // TRUE once PMinit has been called - bool initialized; +// TRUE once PMinit has been called + bool initialized; public: - // DEPRECATED - void * pm_vars; +// DEPRECATED + void * pm_vars; public: - /* methods available in Mac OS X 10.1 or later */ +/* methods available in Mac OS X 10.1 or later */ /*! @function requestTerminate - @abstract Passes a termination up the stack. - @discussion When an IOService is made inactive the default behavior is to also make any of its clients that have it as their only provider also inactive, in this way recursing the termination up the driver stack. This method allows an IOService object to override this behavior. Returning true from this method when passed a just terminated provider will cause the client to also be terminated. - @param provider The terminated provider of this object. - @param options Options originally passed to terminate, plus kIOServiceRecursing. - @result true if this object should be terminated now that its provider has been. */ + * @abstract Passes a termination up the stack. + * @discussion When an IOService is made inactive the default behavior is to also make any of its clients that have it as their only provider also inactive, in this way recursing the termination up the driver stack. This method allows an IOService object to override this behavior. Returning true from this method when passed a just terminated provider will cause the client to also be terminated. + * @param provider The terminated provider of this object. + * @param options Options originally passed to terminate, plus kIOServiceRecursing. + * @result true if this object should be terminated now that its provider has been. */ - virtual bool requestTerminate( IOService * provider, IOOptionBits options ); + virtual bool requestTerminate( IOService * provider, IOOptionBits options ); /*! @function willTerminate - @abstract Passes a termination up the stack. - @discussion Notification that a provider has been terminated, sent before recursing up the stack, in root-to-leaf order. - @param provider The terminated provider of this object. - @param options Options originally passed to terminate. - @result true. */ + * @abstract Passes a termination up the stack. + * @discussion Notification that a provider has been terminated, sent before recursing up the stack, in root-to-leaf order. + * @param provider The terminated provider of this object. + * @param options Options originally passed to terminate. + * @result true. */ - virtual bool willTerminate( IOService * provider, IOOptionBits options ); + virtual bool willTerminate( IOService * provider, IOOptionBits options ); /*! @function didTerminate - @abstract Passes a termination up the stack. - @discussion Notification that a provider has been terminated, sent after recursing up the stack, in leaf-to-root order. - @param provider The terminated provider of this object. - @param options Options originally passed to terminate. - @param defer If there is pending I/O that requires this object to persist, and the provider is not opened by this object set defer to true and call the IOService::didTerminate() implementation when the I/O completes. Otherwise, leave defer set to its default value of false. - @result true. */ + * @abstract Passes a termination up the stack. + * @discussion Notification that a provider has been terminated, sent after recursing up the stack, in leaf-to-root order. + * @param provider The terminated provider of this object. + * @param options Options originally passed to terminate. + * @param defer If there is pending I/O that requires this object to persist, and the provider is not opened by this object set defer to true and call the IOService::didTerminate() implementation when the I/O completes. Otherwise, leave defer set to its default value of false. + * @result true. */ - virtual bool didTerminate( IOService * provider, IOOptionBits options, bool * defer ); + virtual bool didTerminate( IOService * provider, IOOptionBits options, bool * defer ); /*! @function nextIdleTimeout - @availability Mac OS X v10.4 and later - @abstract Allows subclasses to customize idle power management behavior. - @discussion Returns the next time that the device should idle into its next lower power state. Subclasses may override for custom idle behavior. - - A power managed driver might override this method to provide a more sophisticated idle power off algorithm than the one defined by power management. - @param currentTime The current time - @param lastActivity The time of last activity on this device - @param powerState The device's current power state. - @result Returns the next time the device should idle off (in seconds, relative to the current time). */ - - virtual SInt32 nextIdleTimeout(AbsoluteTime currentTime, - AbsoluteTime lastActivity, unsigned int powerState); + * @availability Mac OS X v10.4 and later + * @abstract Allows subclasses to customize idle power management behavior. + * @discussion Returns the next time that the device should idle into its next lower power state. Subclasses may override for custom idle behavior. + * + * A power managed driver might override this method to provide a more sophisticated idle power off algorithm than the one defined by power management. + * @param currentTime The current time + * @param lastActivity The time of last activity on this device + * @param powerState The device's current power state. + * @result Returns the next time the device should idle off (in seconds, relative to the current time). */ + + virtual SInt32 nextIdleTimeout(AbsoluteTime currentTime, + AbsoluteTime lastActivity, unsigned int powerState); /*! @function systemWillShutdown - @availability Mac OS X v10.5 and later - @abstract Notifies members of the power plane of system shutdown and restart. - @discussion This function is called for all members of the power plane in leaf-to-root order. If a subclass needs to wait for a pending I/O, then the call to systemWillShutdown should be postponed until the I/O completes. - - Any power managed driver (which has called @link joinPMtree joinPMtree@/link to join the power plane) interested in taking action at system shutdown or restart should override this method. - @param specifier kIOMessageSystemWillPowerOff or kIOMessageSystemWillRestart. */ + * @availability Mac OS X v10.5 and later + * @abstract Notifies members of the power plane of system shutdown and restart. + * @discussion This function is called for all members of the power plane in leaf-to-root order. If a subclass needs to wait for a pending I/O, then the call to systemWillShutdown should be postponed until the I/O completes. + * + * Any power managed driver (which has called @link joinPMtree joinPMtree@/link to join the power plane) interested in taking action at system shutdown or restart should override this method. + * @param specifier kIOMessageSystemWillPowerOff or kIOMessageSystemWillRestart. */ - virtual void systemWillShutdown( IOOptionBits specifier ); + virtual void systemWillShutdown( IOOptionBits specifier ); /*! @function copyClientWithCategory - @availability Mac OS X v10.6 and later - @param category An OSSymbol corresponding to an IOMatchCategory matching property. - @result Returns a reference to the IOService child with the given category. The result should be released by the caller. -*/ + * @availability Mac OS X v10.6 and later + * @param category An OSSymbol corresponding to an IOMatchCategory matching property. + * @result Returns a reference to the IOService child with the given category. The result should be released by the caller. + */ - virtual IOService * copyClientWithCategory( const OSSymbol * category ); + virtual IOService * copyClientWithCategory( const OSSymbol * category ); public: /*! @function configureReport - @abstract configure IOReporting channels - @availability SPI on OS X v10.9 / iOS 7 and later - - @param channels - channels to configure - @param action - enable/disable/size, etc - @param result - action-specific returned value - @param destination - action-specific default destination -*/ -virtual IOReturn configureReport(IOReportChannelList *channels, - IOReportConfigureAction action, - void *result, - void *destination); + * @abstract configure IOReporting channels + * @availability SPI on OS X v10.9 / iOS 7 and later + * + * @param channels - channels to configure + * @param action - enable/disable/size, etc + * @param result - action-specific returned value + * @param destination - action-specific default destination + */ + virtual IOReturn configureReport(IOReportChannelList *channels, + IOReportConfigureAction action, + void *result, + void *destination); /*! @function updateReport - @abstract request current data for the specified channels - @availability SPI on OS X 10.9 / iOS 7 and later - - @param channels - channels to be updated - @param action - type/style of update - @param result - returned details about what was updated - @param destination - destination for this update (action-specific) -*/ -virtual IOReturn updateReport(IOReportChannelList *channels, - IOReportUpdateAction action, - void *result, - void *destination); + * @abstract request current data for the specified channels + * @availability SPI on OS X 10.9 / iOS 7 and later + * + * @param channels - channels to be updated + * @param action - type/style of update + * @param result - returned details about what was updated + * @param destination - destination for this update (action-specific) + */ + virtual IOReturn updateReport(IOReportChannelList *channels, + IOReportUpdateAction action, + void *result, + void *destination); private: #if __LP64__ - OSMetaClassDeclareReservedUsed(IOService, 0); - OSMetaClassDeclareReservedUsed(IOService, 1); - OSMetaClassDeclareReservedUnused(IOService, 2); - OSMetaClassDeclareReservedUnused(IOService, 3); - OSMetaClassDeclareReservedUnused(IOService, 4); - OSMetaClassDeclareReservedUnused(IOService, 5); - OSMetaClassDeclareReservedUnused(IOService, 6); - OSMetaClassDeclareReservedUnused(IOService, 7); + OSMetaClassDeclareReservedUsed(IOService, 0); + OSMetaClassDeclareReservedUsed(IOService, 1); + OSMetaClassDeclareReservedUnused(IOService, 2); + OSMetaClassDeclareReservedUnused(IOService, 3); + OSMetaClassDeclareReservedUnused(IOService, 4); + OSMetaClassDeclareReservedUnused(IOService, 5); + OSMetaClassDeclareReservedUnused(IOService, 6); + OSMetaClassDeclareReservedUnused(IOService, 7); #else - OSMetaClassDeclareReservedUsed(IOService, 0); - OSMetaClassDeclareReservedUsed(IOService, 1); - OSMetaClassDeclareReservedUsed(IOService, 2); - OSMetaClassDeclareReservedUsed(IOService, 3); - OSMetaClassDeclareReservedUsed(IOService, 4); - OSMetaClassDeclareReservedUsed(IOService, 5); - OSMetaClassDeclareReservedUsed(IOService, 6); - OSMetaClassDeclareReservedUsed(IOService, 7); + OSMetaClassDeclareReservedUsed(IOService, 0); + OSMetaClassDeclareReservedUsed(IOService, 1); + OSMetaClassDeclareReservedUsed(IOService, 2); + OSMetaClassDeclareReservedUsed(IOService, 3); + OSMetaClassDeclareReservedUsed(IOService, 4); + OSMetaClassDeclareReservedUsed(IOService, 5); + OSMetaClassDeclareReservedUsed(IOService, 6); + OSMetaClassDeclareReservedUsed(IOService, 7); #endif - OSMetaClassDeclareReservedUnused(IOService, 8); - OSMetaClassDeclareReservedUnused(IOService, 9); - OSMetaClassDeclareReservedUnused(IOService, 10); - OSMetaClassDeclareReservedUnused(IOService, 11); - OSMetaClassDeclareReservedUnused(IOService, 12); - OSMetaClassDeclareReservedUnused(IOService, 13); - OSMetaClassDeclareReservedUnused(IOService, 14); - OSMetaClassDeclareReservedUnused(IOService, 15); - OSMetaClassDeclareReservedUnused(IOService, 16); - OSMetaClassDeclareReservedUnused(IOService, 17); - OSMetaClassDeclareReservedUnused(IOService, 18); - OSMetaClassDeclareReservedUnused(IOService, 19); - OSMetaClassDeclareReservedUnused(IOService, 20); - OSMetaClassDeclareReservedUnused(IOService, 21); - OSMetaClassDeclareReservedUnused(IOService, 22); - OSMetaClassDeclareReservedUnused(IOService, 23); - OSMetaClassDeclareReservedUnused(IOService, 24); - OSMetaClassDeclareReservedUnused(IOService, 25); - OSMetaClassDeclareReservedUnused(IOService, 26); - OSMetaClassDeclareReservedUnused(IOService, 27); - OSMetaClassDeclareReservedUnused(IOService, 28); - OSMetaClassDeclareReservedUnused(IOService, 29); - OSMetaClassDeclareReservedUnused(IOService, 30); - OSMetaClassDeclareReservedUnused(IOService, 31); - OSMetaClassDeclareReservedUnused(IOService, 32); - OSMetaClassDeclareReservedUnused(IOService, 33); - OSMetaClassDeclareReservedUnused(IOService, 34); - OSMetaClassDeclareReservedUnused(IOService, 35); - OSMetaClassDeclareReservedUnused(IOService, 36); - OSMetaClassDeclareReservedUnused(IOService, 37); - OSMetaClassDeclareReservedUnused(IOService, 38); - OSMetaClassDeclareReservedUnused(IOService, 39); - OSMetaClassDeclareReservedUnused(IOService, 40); - OSMetaClassDeclareReservedUnused(IOService, 41); - OSMetaClassDeclareReservedUnused(IOService, 42); - OSMetaClassDeclareReservedUnused(IOService, 43); - OSMetaClassDeclareReservedUnused(IOService, 44); - OSMetaClassDeclareReservedUnused(IOService, 45); - OSMetaClassDeclareReservedUnused(IOService, 46); - OSMetaClassDeclareReservedUnused(IOService, 47); + OSMetaClassDeclareReservedUnused(IOService, 8); + OSMetaClassDeclareReservedUnused(IOService, 9); + OSMetaClassDeclareReservedUnused(IOService, 10); + OSMetaClassDeclareReservedUnused(IOService, 11); + OSMetaClassDeclareReservedUnused(IOService, 12); + OSMetaClassDeclareReservedUnused(IOService, 13); + OSMetaClassDeclareReservedUnused(IOService, 14); + OSMetaClassDeclareReservedUnused(IOService, 15); + OSMetaClassDeclareReservedUnused(IOService, 16); + OSMetaClassDeclareReservedUnused(IOService, 17); + OSMetaClassDeclareReservedUnused(IOService, 18); + OSMetaClassDeclareReservedUnused(IOService, 19); + OSMetaClassDeclareReservedUnused(IOService, 20); + OSMetaClassDeclareReservedUnused(IOService, 21); + OSMetaClassDeclareReservedUnused(IOService, 22); + OSMetaClassDeclareReservedUnused(IOService, 23); + OSMetaClassDeclareReservedUnused(IOService, 24); + OSMetaClassDeclareReservedUnused(IOService, 25); + OSMetaClassDeclareReservedUnused(IOService, 26); + OSMetaClassDeclareReservedUnused(IOService, 27); + OSMetaClassDeclareReservedUnused(IOService, 28); + OSMetaClassDeclareReservedUnused(IOService, 29); + OSMetaClassDeclareReservedUnused(IOService, 30); + OSMetaClassDeclareReservedUnused(IOService, 31); + OSMetaClassDeclareReservedUnused(IOService, 32); + OSMetaClassDeclareReservedUnused(IOService, 33); + OSMetaClassDeclareReservedUnused(IOService, 34); + OSMetaClassDeclareReservedUnused(IOService, 35); + OSMetaClassDeclareReservedUnused(IOService, 36); + OSMetaClassDeclareReservedUnused(IOService, 37); + OSMetaClassDeclareReservedUnused(IOService, 38); + OSMetaClassDeclareReservedUnused(IOService, 39); + OSMetaClassDeclareReservedUnused(IOService, 40); + OSMetaClassDeclareReservedUnused(IOService, 41); + OSMetaClassDeclareReservedUnused(IOService, 42); + OSMetaClassDeclareReservedUnused(IOService, 43); + OSMetaClassDeclareReservedUnused(IOService, 44); + OSMetaClassDeclareReservedUnused(IOService, 45); + OSMetaClassDeclareReservedUnused(IOService, 46); + OSMetaClassDeclareReservedUnused(IOService, 47); public: /*! @function getState - @abstract Accessor for IOService state bits, not normally needed or used outside IOService. - @result State bits for the IOService, eg. kIOServiceInactiveState, kIOServiceRegisteredState. */ + * @abstract Accessor for IOService state bits, not normally needed or used outside IOService. + * @result State bits for the IOService, eg. kIOServiceInactiveState, kIOServiceRegisteredState. */ - virtual IOOptionBits getState( void ) const; + virtual IOOptionBits getState( void ) const; /*! @function isInactive - @abstract Checks if the IOService object has been terminated, and is in the process of being destroyed. - @discussion When an IOService object is successfully terminated, it is immediately made inactive, which blocks further attach()es, matching or notifications occuring on the object. It remains inactive until the last client closes, and is then finalized and destroyed. - @result true if the IOService object has been terminated. */ + * @abstract Checks if the IOService object has been terminated, and is in the process of being destroyed. + * @discussion When an IOService object is successfully terminated, it is immediately made inactive, which blocks further attach()es, matching or notifications occuring on the object. It remains inactive until the last client closes, and is then finalized and destroyed. + * @result true if the IOService object has been terminated. */ - bool isInactive( void ) const; + bool isInactive( void ) const; - /* Stack creation */ +/* Stack creation */ /*! @function registerService - @abstract Starts the registration process for a newly discovered IOService object. - @discussion This function allows an IOService subclass to be published and made available to possible clients, by starting the registration process and delivering notifications to registered clients. The object should be completely setup and ready to field requests from clients before registerService is called. - @param options The default zero options mask is recommended and should be used in most cases. The registration process is usually asynchronous, with possible driver probing and notification occurring some time later. kIOServiceSynchronous may be passed to carry out the matching and notification process for currently registered clients before returning to the caller. */ + * @abstract Starts the registration process for a newly discovered IOService object. + * @discussion This function allows an IOService subclass to be published and made available to possible clients, by starting the registration process and delivering notifications to registered clients. The object should be completely setup and ready to field requests from clients before registerService is called. + * @param options The default zero options mask is recommended and should be used in most cases. The registration process is usually asynchronous, with possible driver probing and notification occurring some time later. kIOServiceSynchronous may be passed to carry out the matching and notification process for currently registered clients before returning to the caller. */ - virtual void registerService( IOOptionBits options = 0 ); + virtual void registerService( IOOptionBits options = 0 ); /*! @function probe - @abstract During an IOService object's instantiation, probes a matched service to see if it can be used. - @discussion The registration process for an IOService object (the provider) includes instantiating possible driver clients. The probe method is called in the client instance to check the matched service can be used before the driver is considered to be started. Since matching screens many possible providers, in many cases the probe method can be left unimplemented by IOService subclasses. The client is already attached to the provider when probe is called. - @param provider The registered IOService object that matches a driver personality's matching dictionary. - @param score Pointer to the current driver's probe score, which is used to order multiple matching drivers in the same match category. It defaults to the value of the IOProbeScore property in the drivers property table, or kIODefaultProbeScore if none is specified. The probe method may alter the score to affect start order. - @result An IOService instance or zero when the probe is unsuccessful. In almost all cases the value of this is returned on success. If another IOService object is returned, the probed instance is detached and freed, and the returned instance is used in its stead for start. */ - - virtual IOService * probe( IOService * provider, - SInt32 * score ); + * @abstract During an IOService object's instantiation, probes a matched service to see if it can be used. + * @discussion The registration process for an IOService object (the provider) includes instantiating possible driver clients. The probe method is called in the client instance to check the matched service can be used before the driver is considered to be started. Since matching screens many possible providers, in many cases the probe method can be left unimplemented by IOService subclasses. The client is already attached to the provider when probe is called. + * @param provider The registered IOService object that matches a driver personality's matching dictionary. + * @param score Pointer to the current driver's probe score, which is used to order multiple matching drivers in the same match category. It defaults to the value of the IOProbeScore property in the drivers property table, or kIODefaultProbeScore if none is specified. The probe method may alter the score to affect start order. + * @result An IOService instance or zero when the probe is unsuccessful. In almost all cases the value of this is returned on success. If another IOService object is returned, the probed instance is detached and freed, and the returned instance is used in its stead for start. */ + + virtual LIBKERN_RETURNS_NOT_RETAINED IOService * probe( IOService * provider, + SInt32 * score ); /*! @function start - @abstract During an IOService object's instantiation, starts the IOService object that has been selected to run on the provider. - @discussion The start method of an IOService instance is called by its provider when it has been selected (due to its probe score and match category) as the winning client. The client is already attached to the provider when start is called.
Implementations of start must call start on their superclass at an appropriate point. If an implementation of start has already called super::start but subsequently determines that it will fail, it must call super::stop to balance the prior call to super::start and prevent reference leaks. - @result true if the start was successful; false otherwise (which will cause the instance to be detached and usually freed). */ - - virtual bool start( IOService * provider ); - + * @abstract During an IOService object's instantiation, starts the IOService object that has been selected to run on the provider. + * @discussion The start method of an IOService instance is called by its provider when it has been selected (due to its probe score and match category) as the winning client. The client is already attached to the provider when start is called.
Implementations of start must call start on their superclass at an appropriate point. If an implementation of start has already called super::start but subsequently determines that it will fail, it must call super::stop to balance the prior call to super::start and prevent reference leaks. + * @result true if the start was successful; false otherwise (which will cause the instance to be detached and usually freed). */ + + virtual bool start( IOService * provider ); + /*! @function stop - @abstract During an IOService termination, the stop method is called in its clients before they are detached & it is destroyed. - @discussion The termination process for an IOService (the provider) will call stop in each of its clients, after they have closed the provider if they had it open, or immediately on termination. */ + * @abstract During an IOService termination, the stop method is called in its clients before they are detached & it is destroyed. + * @discussion The termination process for an IOService (the provider) will call stop in each of its clients, after they have closed the provider if they had it open, or immediately on termination. */ - virtual void stop( IOService * provider ); + virtual void stop( IOService * provider ); - /* Open / Close */ +/* Open / Close */ /*! @function open - @abstract Requests active access to a provider. - @discussion IOService provides generic open and close semantics to track clients of a provider that have established an active datapath. The use of open and @link close close@/link, and rules regarding ownership are family defined, and defined by the @link handleOpen handleOpen@/link and @link handleClose handleClose@/link methods in the provider. Some families will limit access to a provider based on its open state. - @param forClient Designates the client of the provider requesting the open. - @param options Options for the open. The provider family may implement options for open; IOService defines only kIOServiceSeize to request the device be withdrawn from its current owner. - @param arg Family specific arguments which are ignored by IOService. - @result true if the open was successful; false otherwise. */ + * @abstract Requests active access to a provider. + * @discussion IOService provides generic open and close semantics to track clients of a provider that have established an active datapath. The use of open and @link close close@/link, and rules regarding ownership are family defined, and defined by the @link handleOpen handleOpen@/link and @link handleClose handleClose@/link methods in the provider. Some families will limit access to a provider based on its open state. + * @param forClient Designates the client of the provider requesting the open. + * @param options Options for the open. The provider family may implement options for open; IOService defines only kIOServiceSeize to request the device be withdrawn from its current owner. + * @param arg Family specific arguments which are ignored by IOService. + * @result true if the open was successful; false otherwise. */ - virtual bool open( IOService * forClient, - IOOptionBits options = 0, - void * arg = 0 ); + virtual bool open( IOService * forClient, + IOOptionBits options = 0, + void * arg = 0 ); /*! @function close - @abstract Releases active access to a provider. - @discussion IOService provides generic open and close semantics to track clients of a provider that have established an active datapath. The use of @link open open@/link and close, and rules regarding ownership are family defined, and defined by the @link handleOpen handleOpen@/link and @link handleClose handleClose@/link methods in the provider. - @param forClient Designates the client of the provider requesting the close. - @param options Options available for the close. The provider family may implement options for close; IOService defines none. */ - - virtual void close( IOService * forClient, - IOOptionBits options = 0 ); - + * @abstract Releases active access to a provider. + * @discussion IOService provides generic open and close semantics to track clients of a provider that have established an active datapath. The use of @link open open@/link and close, and rules regarding ownership are family defined, and defined by the @link handleOpen handleOpen@/link and @link handleClose handleClose@/link methods in the provider. + * @param forClient Designates the client of the provider requesting the close. + * @param options Options available for the close. The provider family may implement options for close; IOService defines none. */ + + virtual void close( IOService * forClient, + IOOptionBits options = 0 ); + /*! @function isOpen - @abstract Determines whether a specific, or any, client has an IOService object open. - @discussion Returns the open state of an IOService object with respect to the specified client, or when it is open by any client. - @param forClient If non-zero, isOpen returns the open state for that client. If zero is passed, isOpen returns the open state for all clients. - @result true if the specific, or any, client has the IOService object open. */ + * @abstract Determines whether a specific, or any, client has an IOService object open. + * @discussion Returns the open state of an IOService object with respect to the specified client, or when it is open by any client. + * @param forClient If non-zero, isOpen returns the open state for that client. If zero is passed, isOpen returns the open state for all clients. + * @result true if the specific, or any, client has the IOService object open. */ - virtual bool isOpen( const IOService * forClient = 0 ) const; + virtual bool isOpen( const IOService * forClient = 0 ) const; /*! @function handleOpen - @abstract Controls the open / close behavior of an IOService object (overrideable by subclasses). - @discussion IOService calls this method in its subclasses in response to the @link open open@/link method, so the subclass may implement the request. The default implementation provides single owner access to an IOService object via open. The object is locked via @link lockForArbitration lockForArbitration@/link before handleOpen is called. - @param forClient Designates the client of the provider requesting the open. - @param options Options for the open, may be interpreted by the implementor of handleOpen. - @result trueif the open was successful; false otherwise. */ - - virtual bool handleOpen( IOService * forClient, - IOOptionBits options, - void * arg ); - + * @abstract Controls the open / close behavior of an IOService object (overrideable by subclasses). + * @discussion IOService calls this method in its subclasses in response to the @link open open@/link method, so the subclass may implement the request. The default implementation provides single owner access to an IOService object via open. The object is locked via @link lockForArbitration lockForArbitration@/link before handleOpen is called. + * @param forClient Designates the client of the provider requesting the open. + * @param options Options for the open, may be interpreted by the implementor of handleOpen. + * @result trueif the open was successful; false otherwise. */ + + virtual bool handleOpen( IOService * forClient, + IOOptionBits options, + void * arg ); + /*! @function handleClose - @abstract Controls the open / close behavior of an IOService object (overrideable by subclasses). - @discussion IOService calls this method in its subclasses in response to the @link close close@/link method, so the subclass may implement the request. The default implementation provides single owner access to an IOService object via @link open open@/link. The object is locked via @link lockForArbitration lockForArbitration@/link before handleClose is called. - @param forClient Designates the client of the provider requesting the close. - @param options Options for the close, may be interpreted by the implementor of @link handleOpen handleOpen@/link. */ - - virtual void handleClose( IOService * forClient, - IOOptionBits options ); - + * @abstract Controls the open / close behavior of an IOService object (overrideable by subclasses). + * @discussion IOService calls this method in its subclasses in response to the @link close close@/link method, so the subclass may implement the request. The default implementation provides single owner access to an IOService object via @link open open@/link. The object is locked via @link lockForArbitration lockForArbitration@/link before handleClose is called. + * @param forClient Designates the client of the provider requesting the close. + * @param options Options for the close, may be interpreted by the implementor of @link handleOpen handleOpen@/link. */ + + virtual void handleClose( IOService * forClient, + IOOptionBits options ); + /*! @function handleIsOpen - @abstract Controls the open / close behavior of an IOService object (overrideable by subclasses). - @discussion IOService calls this method in its subclasses in response to the @link open open@/link method, so the subclass may implement the request. The default implementation provides single owner access to an IOService object via @link open open@/link. The object is locked via @link lockForArbitration lockForArbitration@/link before handleIsOpen is called. - @param forClient If non-zero, isOpen returns the open state for that client. If zero is passed, isOpen returns the open state for all clients. - @result true if the specific, or any, client has the IOService object open. */ + * @abstract Controls the open / close behavior of an IOService object (overrideable by subclasses). + * @discussion IOService calls this method in its subclasses in response to the @link open open@/link method, so the subclass may implement the request. The default implementation provides single owner access to an IOService object via @link open open@/link. The object is locked via @link lockForArbitration lockForArbitration@/link before handleIsOpen is called. + * @param forClient If non-zero, isOpen returns the open state for that client. If zero is passed, isOpen returns the open state for all clients. + * @result true if the specific, or any, client has the IOService object open. */ - virtual bool handleIsOpen( const IOService * forClient ) const; + virtual bool handleIsOpen( const IOService * forClient ) const; - /* Stacking change */ +/* Stacking change */ /*! @function terminate - @abstract Makes an IOService object inactive and begins its destruction. - @discussion Registering an IOService object informs possible clients of its existance and instantiates drivers that may be used with it; terminate involves the opposite process of informing clients that an IOService object is no longer able to be used and will be destroyed. By default, if any client has the service open, terminate fails. If the kIOServiceRequired flag is passed however, terminate will be successful though further progress in the destruction of the IOService object will not proceed until the last client has closed it. The service will be made inactive immediately upon successful termination, and all its clients will be notified via their @link message message@/link method with a message of type kIOMessageServiceIsTerminated. Both these actions take place on the caller's thread. After the IOService object is made inactive, further matching or attach calls will fail on it. Each client has its @link stop stop@/link method called upon their close of an inactive IOService object , or on its termination if they do not have it open. After stop, @link detach detach@/link is called in each client. When all clients have been detached, the @link finalize finalize@/link method is called in the inactive service. The termination process is inherently asynchronous because it will be deferred until all clients have chosen to close. - @param options In most cases no options are needed. kIOServiceSynchronous may be passed to cause terminate to not return until the service is finalized. */ + * @abstract Makes an IOService object inactive and begins its destruction. + * @discussion Registering an IOService object informs possible clients of its existance and instantiates drivers that may be used with it; terminate involves the opposite process of informing clients that an IOService object is no longer able to be used and will be destroyed. By default, if any client has the service open, terminate fails. If the kIOServiceRequired flag is passed however, terminate will be successful though further progress in the destruction of the IOService object will not proceed until the last client has closed it. The service will be made inactive immediately upon successful termination, and all its clients will be notified via their @link message message@/link method with a message of type kIOMessageServiceIsTerminated. Both these actions take place on the caller's thread. After the IOService object is made inactive, further matching or attach calls will fail on it. Each client has its @link stop stop@/link method called upon their close of an inactive IOService object , or on its termination if they do not have it open. After stop, @link detach detach@/link is called in each client. When all clients have been detached, the @link finalize finalize@/link method is called in the inactive service. The termination process is inherently asynchronous because it will be deferred until all clients have chosen to close. + * @param options In most cases no options are needed. kIOServiceSynchronous may be passed to cause terminate to not return until the service is finalized. */ - virtual bool terminate( IOOptionBits options = 0 ); + virtual bool terminate( IOOptionBits options = 0 ); /*! @function finalize - @abstract Finalizes the destruction of an IOService object. - @discussion The finalize method is called in an inactive (ie. terminated) IOService object after the last client has detached. IOService's implementation will call @link stop stop@/link, @link close close@/link, and @link detach detach@/link on each provider. When finalize returns, the object's retain count will have no references generated by IOService's registration process. - @param options The options passed to the @link terminate terminate@/link method of the IOService object are passed on to finalize. - @result true. */ - - virtual bool finalize( IOOptionBits options ); + * @abstract Finalizes the destruction of an IOService object. + * @discussion The finalize method is called in an inactive (ie. terminated) IOService object after the last client has detached. IOService's implementation will call @link stop stop@/link, @link close close@/link, and @link detach detach@/link on each provider. When finalize returns, the object's retain count will have no references generated by IOService's registration process. + * @param options The options passed to the @link terminate terminate@/link method of the IOService object are passed on to finalize. + * @result true. */ + + virtual bool finalize( IOOptionBits options ); /*! @function init - @abstract Initializes generic IOService data structures (expansion data, etc). */ - virtual bool init( OSDictionary * dictionary = 0 ) APPLE_KEXT_OVERRIDE; + * @abstract Initializes generic IOService data structures (expansion data, etc). */ + virtual bool init( OSDictionary * dictionary = 0 ) APPLE_KEXT_OVERRIDE; /*! @function init - @abstract Initializes generic IOService data structures (expansion data, etc). */ - virtual bool init( IORegistryEntry * from, - const IORegistryPlane * inPlane ) APPLE_KEXT_OVERRIDE; + * @abstract Initializes generic IOService data structures (expansion data, etc). */ + virtual bool init( IORegistryEntry * from, + const IORegistryPlane * inPlane ) APPLE_KEXT_OVERRIDE; /*! @function free - @abstract Frees data structures that were allocated when power management was initialized on this service. */ - - virtual void free( void ) APPLE_KEXT_OVERRIDE; + * @abstract Frees data structures that were allocated when power management was initialized on this service. */ + + virtual void free( void ) APPLE_KEXT_OVERRIDE; /*! @function lockForArbitration - @abstract Locks an IOService object against changes in state or ownership. - @discussion The registration, termination and open / close functions of IOService use lockForArbtration to single-thread access to an IOService object. lockForArbitration grants recursive access to the same thread. - @param isSuccessRequired If a request for access to an IOService object should be denied if it is terminated, pass false, otherwise pass true. */ - - virtual bool lockForArbitration( bool isSuccessRequired = true ); - + * @abstract Locks an IOService object against changes in state or ownership. + * @discussion The registration, termination and open / close functions of IOService use lockForArbtration to single-thread access to an IOService object. lockForArbitration grants recursive access to the same thread. + * @param isSuccessRequired If a request for access to an IOService object should be denied if it is terminated, pass false, otherwise pass true. */ + + virtual bool lockForArbitration( bool isSuccessRequired = true ); + /*! @function unlockForArbitration - @abstract Unlocks an IOService obkect after a successful @link lockForArbitration lockForArbitration@/link. - @discussion A thread granted exclusive access to an IOService object should release it with unlockForArbitration. */ - - virtual void unlockForArbitration( void ); + * @abstract Unlocks an IOService obkect after a successful @link lockForArbitration lockForArbitration@/link. + * @discussion A thread granted exclusive access to an IOService object should release it with unlockForArbitration. */ + + virtual void unlockForArbitration( void ); #ifdef XNU_KERNEL_PRIVATE - static uint32_t isLockedForArbitration(IOService * service); + static uint32_t isLockedForArbitration(IOService * service); #endif /* XNU_KERNEL_PRIVATE */ /*! @function terminateClient - @abstract Passes a termination up the stack. - @discussion When an IOService object is made inactive the default behavior is to also make any of its clients that have it as their only provider inactive, in this way recursing the termination up the driver stack. This method allows a terminated IOService object to override this behavior. Note the client may also override this behavior by overriding its @link terminate terminate@/link method. - @param client The client of the terminated provider. - @param options Options originally passed to @link terminate terminate@/link, plus kIOServiceRecursing. - @result result of the terminate request on the client. */ + * @abstract Passes a termination up the stack. + * @discussion When an IOService object is made inactive the default behavior is to also make any of its clients that have it as their only provider inactive, in this way recursing the termination up the driver stack. This method allows a terminated IOService object to override this behavior. Note the client may also override this behavior by overriding its @link terminate terminate@/link method. + * @param client The client of the terminated provider. + * @param options Options originally passed to @link terminate terminate@/link, plus kIOServiceRecursing. + * @result result of the terminate request on the client. */ - virtual bool terminateClient( IOService * client, IOOptionBits options ); + virtual bool terminateClient( IOService * client, IOOptionBits options ); - /* Busy state indicates discovery, matching or termination is in progress */ +/* Busy state indicates discovery, matching or termination is in progress */ /*! @function getBusyState - @abstract Returns the busyState of an IOService object. - @discussion Many activities in IOService are asynchronous. When registration, matching, or termination is in progress on an IOService object, its busyState is increased by one. Change in busyState to or from zero also changes the IOService object's provider's busyState by one, which means that an IOService object is marked busy when any of the above activities is ocurring on it or any of its clients. - @result The busyState value. */ + * @abstract Returns the busyState of an IOService object. + * @discussion Many activities in IOService are asynchronous. When registration, matching, or termination is in progress on an IOService object, its busyState is increased by one. Change in busyState to or from zero also changes the IOService object's provider's busyState by one, which means that an IOService object is marked busy when any of the above activities is ocurring on it or any of its clients. + * @result The busyState value. */ + + virtual UInt32 getBusyState( void ); - virtual UInt32 getBusyState( void ); - /*! @function adjustBusy - @abstract Adjusts the busyState of an IOService object. - @discussion Applies a delta to an IOService object's busyState. A change in the busyState to or from zero will change the IOService object's provider's busyState by one (in the same direction). - @param delta The delta to be applied to the IOService object's busyState. */ + * @abstract Adjusts the busyState of an IOService object. + * @discussion Applies a delta to an IOService object's busyState. A change in the busyState to or from zero will change the IOService object's provider's busyState by one (in the same direction). + * @param delta The delta to be applied to the IOService object's busyState. */ - virtual void adjustBusy( SInt32 delta ); + virtual void adjustBusy( SInt32 delta ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn waitQuiet(mach_timespec_t * timeout) - APPLE_KEXT_DEPRECATED; + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn waitQuiet(mach_timespec_t * timeout) + APPLE_KEXT_DEPRECATED; /*! @function waitQuiet - @abstract Waits for an IOService object's busyState to be zero. - @discussion Blocks the caller until an IOService object is non busy. - @param timeout The maximum time to wait in nanoseconds. Default is to wait forever. - @result Returns an error code if Mach synchronization primitives fail, kIOReturnTimeout, or kIOReturnSuccess. */ - - IOReturn waitQuiet(uint64_t timeout = UINT64_MAX); + * @abstract Waits for an IOService object's busyState to be zero. + * @discussion Blocks the caller until an IOService object is non busy. + * @param timeout The maximum time to wait in nanoseconds. Default is to wait forever. + * @result Returns an error code if Mach synchronization primitives fail, kIOReturnTimeout, or kIOReturnSuccess. */ + + IOReturn waitQuiet(uint64_t timeout = UINT64_MAX); - /* Matching */ +/* Matching */ /*! @function matchPropertyTable - @abstract Allows a registered IOService object to implement family specific matching. - @discussion All matching on an IOService object will call this method to allow a family writer to implement matching in addition to the generic methods provided by IOService. The implementer should examine the matching dictionary passed to see if it contains properties the family understands for matching, and use them to match with the IOService object if so. Note that since matching is also carried out by other parts of the I/O Kit, the matching dictionary may contain properties the family does not understand - these should not be considered matching failures. - @param table The dictionary of properties to be matched against. - @param score Pointer to the current driver's probe score, which is used to order multiple matching drivers in the same match category. It defaults to the value of the IOProbeScore property in the drivers property table, or kIODefaultProbeScore if none is specified. - @result false if the family considers the matching dictionary does not match in properties it understands; true otherwise. */ + * @abstract Allows a registered IOService object to implement family specific matching. + * @discussion All matching on an IOService object will call this method to allow a family writer to implement matching in addition to the generic methods provided by IOService. The implementer should examine the matching dictionary passed to see if it contains properties the family understands for matching, and use them to match with the IOService object if so. Note that since matching is also carried out by other parts of the I/O Kit, the matching dictionary may contain properties the family does not understand - these should not be considered matching failures. + * @param table The dictionary of properties to be matched against. + * @param score Pointer to the current driver's probe score, which is used to order multiple matching drivers in the same match category. It defaults to the value of the IOProbeScore property in the drivers property table, or kIODefaultProbeScore if none is specified. + * @result false if the family considers the matching dictionary does not match in properties it understands; true otherwise. */ - virtual bool matchPropertyTable( OSDictionary * table, - SInt32 * score ); + virtual bool matchPropertyTable( OSDictionary * table, + SInt32 * score ); - virtual bool matchPropertyTable( OSDictionary * table ); + virtual bool matchPropertyTable( OSDictionary * table ); /*! @function matchLocation - @abstract Allows a registered IOService object to direct location matching. - @discussion By default, a location matching property will be applied to an IOService object's provider. This method allows that behavior to be overridden by families. - @param client The IOService object at which matching is taking place. - @result Returns the IOService instance to be used for location matching. */ + * @abstract Allows a registered IOService object to direct location matching. + * @discussion By default, a location matching property will be applied to an IOService object's provider. This method allows that behavior to be overridden by families. + * @param client The IOService object at which matching is taking place. + * @result Returns the IOService instance to be used for location matching. */ - virtual IOService * matchLocation( IOService * client ); + virtual LIBKERN_RETURNS_NOT_RETAINED IOService * matchLocation( IOService * client ); - /* Resource service */ +/* Resource service */ /*! @function publishResource - @abstract Uses the resource service to publish a property. - @discussion The resource service uses IOService's matching and notification to allow objects to be published and found by any I/O Kit client by a global name. publishResource makes an object available to anyone waiting for it or looking for it in the future. - @param key An OSSymbol key that globally identifies the object. - @param value The object to be published. */ + * @abstract Uses the resource service to publish a property. + * @discussion The resource service uses IOService's matching and notification to allow objects to be published and found by any I/O Kit client by a global name. publishResource makes an object available to anyone waiting for it or looking for it in the future. + * @param key An OSSymbol key that globally identifies the object. + * @param value The object to be published. */ - static void publishResource( const OSSymbol * key, OSObject * value = 0 ); + static void publishResource( const OSSymbol * key, OSObject * value = 0 ); /*! @function publishResource - @abstract Uses the resource service to publish a property. - @discussion The resource service uses IOService object's matching and notification to allow objects to be published and found by any I/O Kit client by a global name. publishResource makes an object available to anyone waiting for it or looking for it in the future. - @param key A C string key that globally identifies the object. - @param value The object to be published. */ + * @abstract Uses the resource service to publish a property. + * @discussion The resource service uses IOService object's matching and notification to allow objects to be published and found by any I/O Kit client by a global name. publishResource makes an object available to anyone waiting for it or looking for it in the future. + * @param key A C string key that globally identifies the object. + * @param value The object to be published. */ - static void publishResource( const char * key, OSObject * value = 0 ); - virtual bool addNeededResource( const char * key ); + static void publishResource( const char * key, OSObject * value = 0 ); + virtual bool addNeededResource( const char * key ); - /* Notifications */ +/* Notifications */ /*! @function addNotification - @abstract Deprecated use addMatchingNotification(). Adds a persistant notification handler to be notified of IOService events. - @discussion IOService will deliver notifications of changes in state of an IOService object to registered clients. The type of notification is specified by a symbol, for example gIOMatchedNotification or gIOTerminatedNotification, and notifications will only include IOService objects that match the supplied matching dictionary. Notifications are ordered by a priority set with addNotification. When the notification is installed, its handler will be called with each of any currently existing IOService objects that are in the correct state (eg. registered) and match the supplied matching dictionary, avoiding races between finding preexisting and new IOService events. The notification request is identified by an instance of an IONotifier object, through which it can be enabled, disabled, or removed. addNotification consumes a retain count on the matching dictionary when the notification is removed. - @param type An OSSymbol identifying the type of notification and IOService state: -
gIOPublishNotification Delivered when an IOService object is registered. -
gIOFirstPublishNotification Delivered when an IOService object is registered, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. -
gIOMatchedNotification Delivered when an IOService object has been matched with all client drivers, and they have been probed and started. -
gIOFirstMatchNotification Delivered when an IOService object has been matched with all client drivers, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. -
gIOWillTerminateNotification Delivered after an IOService object has been terminated, during its finalize stage. Delivered after any matching on the service has finished. -
gIOTerminatedNotification Delivered immediately when an IOService object has been terminated, making it inactive. - @param matching A matching dictionary to restrict notifications to only matching IOService objects. The dictionary will be released when the notification is removed, consuming the passed-in reference. - @param handler A C function callback to deliver notifications. - @param target An instance reference for the callback's use. - @param ref A reference constant for the callback's use. - @param priority A constant ordering all notifications of a each type. - @result An instance of an IONotifier object that can be used to control or destroy the notification request. */ - - static IONotifier * addNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceNotificationHandler handler, - void * target, void * ref = 0, - SInt32 priority = 0 ) - APPLE_KEXT_DEPRECATED; + * @abstract Deprecated use addMatchingNotification(). Adds a persistant notification handler to be notified of IOService events. + * @discussion IOService will deliver notifications of changes in state of an IOService object to registered clients. The type of notification is specified by a symbol, for example gIOMatchedNotification or gIOTerminatedNotification, and notifications will only include IOService objects that match the supplied matching dictionary. Notifications are ordered by a priority set with addNotification. When the notification is installed, its handler will be called with each of any currently existing IOService objects that are in the correct state (eg. registered) and match the supplied matching dictionary, avoiding races between finding preexisting and new IOService events. The notification request is identified by an instance of an IONotifier object, through which it can be enabled, disabled, or removed. addNotification consumes a retain count on the matching dictionary when the notification is removed. + * @param type An OSSymbol identifying the type of notification and IOService state: + *
gIOPublishNotification Delivered when an IOService object is registered. + *
gIOFirstPublishNotification Delivered when an IOService object is registered, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. + *
gIOMatchedNotification Delivered when an IOService object has been matched with all client drivers, and they have been probed and started. + *
gIOFirstMatchNotification Delivered when an IOService object has been matched with all client drivers, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. + *
gIOWillTerminateNotification Delivered after an IOService object has been terminated, during its finalize stage. Delivered after any matching on the service has finished. + *
gIOTerminatedNotification Delivered immediately when an IOService object has been terminated, making it inactive. + * @param matching A matching dictionary to restrict notifications to only matching IOService objects. The dictionary will be released when the notification is removed, consuming the passed-in reference. + * @param handler A C function callback to deliver notifications. + * @param target An instance reference for the callback's use. + * @param ref A reference constant for the callback's use. + * @param priority A constant ordering all notifications of a each type. + * @result An instance of an IONotifier object that can be used to control or destroy the notification request. */ + + static IONotifier * addNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref = 0, + SInt32 priority = 0 ) + APPLE_KEXT_DEPRECATED; /*! @function addMatchingNotification - @abstract Adds a persistant notification handler to be notified of IOService events. - @discussion IOService will deliver notifications of changes in state of an IOService object to registered clients. The type of notification is specified by a symbol, for example gIOMatchedNotification or gIOTerminatedNotification, and notifications will only include IOService objects that match the supplied matching dictionary. Notifications are ordered by a priority set with addNotification. When the notification is installed, its handler will be called with each of any currently existing IOService objects that are in the correct state (eg. registered) and match the supplied matching dictionary, avoiding races between finding preexisting and new IOService events. The notification request is identified by an instance of an IONotifier object, through which it can be enabled, disabled, or removed. addMatchingNotification does not consume a reference on the matching dictionary when the notification is removed, unlike addNotification. - @param type An OSSymbol identifying the type of notification and IOService state: -
gIOPublishNotification Delivered when an IOService object is registered. -
gIOFirstPublishNotification Delivered when an IOService object is registered, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. -
gIOMatchedNotification Delivered when an IOService object has been matched with all client drivers, and they have been probed and started. -
gIOFirstMatchNotification Delivered when an IOService object has been matched with all client drivers, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. -
gIOWillTerminateNotification Delivered after an IOService object has been terminated, during its finalize stage. Delivered after any matching on the service has finished. -
gIOTerminatedNotification Delivered immediately when an IOService object has been terminated, making it inactive. - @param matching A matching dictionary to restrict notifications to only matching IOService objects. The dictionary is retained while the notification is installed. (Differs from addNotification). - @param handler A C function callback to deliver notifications. - @param target An instance reference for the callback's use. - @param ref A reference constant for the callback's use. - @param priority A constant ordering all notifications of a each type. - @result An instance of an IONotifier object that can be used to control or destroy the notification request. */ - - static IONotifier * addMatchingNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref = 0, - SInt32 priority = 0 ); + * @abstract Adds a persistant notification handler to be notified of IOService events. + * @discussion IOService will deliver notifications of changes in state of an IOService object to registered clients. The type of notification is specified by a symbol, for example gIOMatchedNotification or gIOTerminatedNotification, and notifications will only include IOService objects that match the supplied matching dictionary. Notifications are ordered by a priority set with addNotification. When the notification is installed, its handler will be called with each of any currently existing IOService objects that are in the correct state (eg. registered) and match the supplied matching dictionary, avoiding races between finding preexisting and new IOService events. The notification request is identified by an instance of an IONotifier object, through which it can be enabled, disabled, or removed. addMatchingNotification does not consume a reference on the matching dictionary when the notification is removed, unlike addNotification. + * @param type An OSSymbol identifying the type of notification and IOService state: + *
gIOPublishNotification Delivered when an IOService object is registered. + *
gIOFirstPublishNotification Delivered when an IOService object is registered, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. + *
gIOMatchedNotification Delivered when an IOService object has been matched with all client drivers, and they have been probed and started. + *
gIOFirstMatchNotification Delivered when an IOService object has been matched with all client drivers, but only once per IOService instance. Some IOService objects may be reregistered when their state is changed. + *
gIOWillTerminateNotification Delivered after an IOService object has been terminated, during its finalize stage. Delivered after any matching on the service has finished. + *
gIOTerminatedNotification Delivered immediately when an IOService object has been terminated, making it inactive. + * @param matching A matching dictionary to restrict notifications to only matching IOService objects. The dictionary is retained while the notification is installed. (Differs from addNotification). + * @param handler A C function callback to deliver notifications. + * @param target An instance reference for the callback's use. + * @param ref A reference constant for the callback's use. + * @param priority A constant ordering all notifications of a each type. + * @result An instance of an IONotifier object that can be used to control or destroy the notification request. */ + + static IONotifier * addMatchingNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref = 0, + SInt32 priority = 0 ); #ifdef __BLOCKS__ - static IONotifier * addMatchingNotification( - const OSSymbol * type, OSDictionary * matching, - SInt32 priority, - IOServiceMatchingNotificationHandlerBlock handler); + static IONotifier * addMatchingNotification( + const OSSymbol * type, OSDictionary * matching, + SInt32 priority, + IOServiceMatchingNotificationHandlerBlock handler); #endif /* __BLOCKS__ */ /*! @function waitForService - @abstract Deprecated use waitForMatchingService(). Waits for a matching to service to be published. - @discussion Provides a method of waiting for an IOService object matching the supplied matching dictionary to be registered and fully matched. - @param matching The matching dictionary describing the desired IOService object. waitForService consumes one reference of the matching dictionary. - @param timeout The maximum time to wait. - @result A published IOService object matching the supplied dictionary. */ + * @abstract Deprecated use waitForMatchingService(). Waits for a matching to service to be published. + * @discussion Provides a method of waiting for an IOService object matching the supplied matching dictionary to be registered and fully matched. + * @param matching The matching dictionary describing the desired IOService object. waitForService consumes one reference of the matching dictionary. + * @param timeout The maximum time to wait. + * @result A published IOService object matching the supplied dictionary. */ - static IOService * waitForService( OSDictionary * matching, - mach_timespec_t * timeout = 0); + LIBKERN_RETURNS_NOT_RETAINED + static IOService * waitForService( + LIBKERN_CONSUMED OSDictionary * matching, + mach_timespec_t * timeout = 0); /*! @function waitForMatchingService - @abstract Waits for a matching to service to be published. - @discussion Provides a method of waiting for an IOService object matching the supplied matching dictionary to be registered and fully matched. - @param matching The matching dictionary describing the desired IOService object. (Does not consume a reference of the matching dictionary - differs from waitForService() which does consume a reference on the matching dictionary.) - @param timeout The maximum time to wait in nanoseconds. Default is to wait forever. - @result A published IOService object matching the supplied dictionary. waitForMatchingService returns a reference to the IOService which should be released by the caller. (Differs from waitForService() which does not retain the returned object.) */ + * @abstract Waits for a matching to service to be published. + * @discussion Provides a method of waiting for an IOService object matching the supplied matching dictionary to be registered and fully matched. + * @param matching The matching dictionary describing the desired IOService object. (Does not consume a reference of the matching dictionary - differs from waitForService() which does consume a reference on the matching dictionary.) + * @param timeout The maximum time to wait in nanoseconds. Default is to wait forever. + * @result A published IOService object matching the supplied dictionary. waitForMatchingService returns a reference to the IOService which should be released by the caller. (Differs from waitForService() which does not retain the returned object.) */ - static IOService * waitForMatchingService( OSDictionary * matching, - uint64_t timeout = UINT64_MAX); + static IOService * waitForMatchingService( OSDictionary * matching, + uint64_t timeout = UINT64_MAX); /*! @function getMatchingServices - @abstract Finds the set of current published IOService objects matching a matching dictionary. - @discussion Provides a method of finding the current set of published IOService objects matching the supplied matching dictionary. - @param matching The matching dictionary describing the desired IOService objects. - @result An instance of an iterator over a set of IOService objects. To be released by the caller. */ + * @abstract Finds the set of current published IOService objects matching a matching dictionary. + * @discussion Provides a method of finding the current set of published IOService objects matching the supplied matching dictionary. + * @param matching The matching dictionary describing the desired IOService objects. + * @result An instance of an iterator over a set of IOService objects. To be released by the caller. */ - static OSIterator * getMatchingServices( OSDictionary * matching ); + static OSIterator * getMatchingServices( OSDictionary * matching ); /*! @function copyMatchingService - @abstract Finds one of the current published IOService objects matching a matching dictionary. - @discussion Provides a method to find one member of the set of published IOService objects matching the supplied matching dictionary. - @param matching The matching dictionary describing the desired IOService object. - @result The IOService object or NULL. To be released by the caller. */ + * @abstract Finds one of the current published IOService objects matching a matching dictionary. + * @discussion Provides a method to find one member of the set of published IOService objects matching the supplied matching dictionary. + * @param matching The matching dictionary describing the desired IOService object. + * @result The IOService object or NULL. To be released by the caller. */ - static IOService * copyMatchingService( OSDictionary * matching ); + static IOService * copyMatchingService( OSDictionary * matching ); public: - /* Helpers to make matching dictionaries for simple cases, - * they add keys to an existing dictionary, or create one. */ +/* Helpers to make matching dictionaries for simple cases, + * they add keys to an existing dictionary, or create one. */ /*! @function serviceMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService class match. - @discussion A very common matching criteria for IOService object is based on its class. serviceMatching creates a matching dictionary that specifies any IOService object of a class, or its subclasses. The class is specified by name, and an existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. - @param className The class name, as a const C string. Class matching is successful on IOService objects of this class or any subclass. - @param table If zero, serviceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService class match. + * @discussion A very common matching criteria for IOService object is based on its class. serviceMatching creates a matching dictionary that specifies any IOService object of a class, or its subclasses. The class is specified by name, and an existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + * @param className The class name, as a const C string. Class matching is successful on IOService objects of this class or any subclass. + * @param table If zero, serviceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * serviceMatching( const char * className, - OSDictionary * table = 0 ); + static OSDictionary * serviceMatching( const char * className, + OSDictionary * table = 0 ); /*! @function serviceMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService class match. - @discussion A very common matching criteria for IOService object is based on its class. serviceMatching creates a matching dictionary that specifies any IOService of a class, or its subclasses. The class is specified by name, and an existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. - @param className The class name, as an OSString (which includes OSSymbol). Class matching is successful on IOService objects of this class or any subclass. - @param table If zero, serviceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService class match. + * @discussion A very common matching criteria for IOService object is based on its class. serviceMatching creates a matching dictionary that specifies any IOService of a class, or its subclasses. The class is specified by name, and an existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + * @param className The class name, as an OSString (which includes OSSymbol). Class matching is successful on IOService objects of this class or any subclass. + * @param table If zero, serviceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * serviceMatching( const OSString * className, - OSDictionary * table = 0 ); + static OSDictionary * serviceMatching( const OSString * className, + OSDictionary * table = 0 ); /*! @function nameMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService name match. - @discussion A very common matching criteria for IOService object is based on its name. nameMatching creates a matching dictionary that specifies any IOService object which responds successfully to the @link //apple_ref/cpp/instm/IORegistryEntry/compareName/virtualbool/(OSString*,OSString**) IORegistryEntry::compareName@/link method. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. - @param name The service's name, as a const C string. Name matching is successful on IOService objects that respond successfully to the IORegistryEntry::compareName method. - @param table If zero, nameMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService name match. + * @discussion A very common matching criteria for IOService object is based on its name. nameMatching creates a matching dictionary that specifies any IOService object which responds successfully to the @link //apple_ref/cpp/instm/IORegistryEntry/compareName/virtualbool/(OSString*,OSString**) IORegistryEntry::compareName@/link method. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + * @param name The service's name, as a const C string. Name matching is successful on IOService objects that respond successfully to the IORegistryEntry::compareName method. + * @param table If zero, nameMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * nameMatching( const char * name, - OSDictionary * table = 0 ); + static OSDictionary * nameMatching( const char * name, + OSDictionary * table = 0 ); /*! @function nameMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService name match. - @discussion A very common matching criteria for IOService object is based on its name. nameMatching creates a matching dictionary that specifies any IOService object which responds successfully to the @link //apple_ref/cpp/instm/IORegistryEntry/compareName/virtualbool/(OSString*,OSString**) IORegistryEntry::compareName@/link method. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. - @param name The service's name, as an OSString (which includes OSSymbol). Name matching is successful on IOService objects that respond successfully to the IORegistryEntry::compareName method. - @param table If zero, nameMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService name match. + * @discussion A very common matching criteria for IOService object is based on its name. nameMatching creates a matching dictionary that specifies any IOService object which responds successfully to the @link //apple_ref/cpp/instm/IORegistryEntry/compareName/virtualbool/(OSString*,OSString**) IORegistryEntry::compareName@/link method. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + * @param name The service's name, as an OSString (which includes OSSymbol). Name matching is successful on IOService objects that respond successfully to the IORegistryEntry::compareName method. + * @param table If zero, nameMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * nameMatching( const OSString* name, - OSDictionary * table = 0 ); + static OSDictionary * nameMatching( const OSString* name, + OSDictionary * table = 0 ); /*! @function resourceMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify a resource service match. - @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in the I/O Kit based on a name, using the standard IOService matching and notification calls. - @param name The resource name, as a const C string. Resource matching is successful when an object by that name has been published with the publishResource method. - @param table If zero, resourceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify a resource service match. + * @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in the I/O Kit based on a name, using the standard IOService matching and notification calls. + * @param name The resource name, as a const C string. Resource matching is successful when an object by that name has been published with the publishResource method. + * @param table If zero, resourceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * resourceMatching( const char * name, - OSDictionary * table = 0 ); + static OSDictionary * resourceMatching( const char * name, + OSDictionary * table = 0 ); /*! @function resourceMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify a resource service match. - @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in the I/O Kit based on a name, using the standard IOService matching and notification calls. - @param name The resource name, as an OSString (which includes OSSymbol). Resource matching is successful when an object by that name has been published with the publishResource method. - @param table If zero, resourceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify a resource service match. + * @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in the I/O Kit based on a name, using the standard IOService matching and notification calls. + * @param name The resource name, as an OSString (which includes OSSymbol). Resource matching is successful when an object by that name has been published with the publishResource method. + * @param table If zero, resourceMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * resourceMatching( const OSString * name, - OSDictionary * table = 0 ); + static OSDictionary * resourceMatching( const OSString * name, + OSDictionary * table = 0 ); /*! @function propertyMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService phandle match. - @discussion TODO A very common matching criteria for IOService is based on its name. nameMatching will create a matching dictionary that specifies any IOService which respond successfully to the IORegistryEntry method compareName. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. - @param key The service's phandle, as a const UInt32. PHandle matching is successful on IOService objects that respond successfully to the IORegistryEntry method compareName. - @param value The service's phandle, as a const UInt32. PHandle matching is successful on IOService's which respond successfully to the IORegistryEntry method compareName. - @param table If zero, nameMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify an IOService phandle match. + * @discussion TODO A very common matching criteria for IOService is based on its name. nameMatching will create a matching dictionary that specifies any IOService which respond successfully to the IORegistryEntry method compareName. An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + * @param key The service's phandle, as a const UInt32. PHandle matching is successful on IOService objects that respond successfully to the IORegistryEntry method compareName. + * @param value The service's phandle, as a const UInt32. PHandle matching is successful on IOService's which respond successfully to the IORegistryEntry method compareName. + * @param table If zero, nameMatching will create a matching dictionary and return a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * propertyMatching( const OSSymbol * key, const OSObject * value, - OSDictionary * table = 0 ); + static OSDictionary * propertyMatching( const OSSymbol * key, const OSObject * value, + OSDictionary * table = 0 ); /*! @function registryEntryIDMatching - @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify a IORegistryEntryID match. - @discussion registryEntryIDMatching creates a matching dictionary that specifies the IOService object with the assigned registry entry ID (returned by IORegistryEntry::getRegistryEntryID()). An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. - @param entryID The service's ID. Matching is successful on the IOService object that return that ID from the IORegistryEntry::getRegistryEntryID() method. - @param table If zero, registryEntryIDMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. - @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ + * @abstract Creates a matching dictionary, or adds matching properties to an existing dictionary, that specify a IORegistryEntryID match. + * @discussion registryEntryIDMatching creates a matching dictionary that specifies the IOService object with the assigned registry entry ID (returned by IORegistryEntry::getRegistryEntryID()). An existing dictionary may be passed in, in which case the matching properties will be added to that dictionary rather than creating a new one. + * @param entryID The service's ID. Matching is successful on the IOService object that return that ID from the IORegistryEntry::getRegistryEntryID() method. + * @param table If zero, registryEntryIDMatching creates a matching dictionary and returns a reference to it, otherwise the matching properties are added to the specified dictionary. + * @result The matching dictionary created, or passed in, is returned on success, or zero on failure. */ - static OSDictionary * registryEntryIDMatching( uint64_t entryID, - OSDictionary * table = 0 ); + static OSDictionary * registryEntryIDMatching( uint64_t entryID, + OSDictionary * table = 0 ); /*! @function addLocation - @abstract Adds a location matching property to an existing dictionary. - @discussion This function creates matching properties that specify the location of a IOService object, as an embedded matching dictionary. This matching will be successful on an IOService object that attached to an IOService object which matches this location matching dictionary. - @param table The matching properties are added to the specified dictionary, which must be non-zero. - @result The location matching dictionary created is returned on success, or zero on failure. */ + * @abstract Adds a location matching property to an existing dictionary. + * @discussion This function creates matching properties that specify the location of a IOService object, as an embedded matching dictionary. This matching will be successful on an IOService object that attached to an IOService object which matches this location matching dictionary. + * @param table The matching properties are added to the specified dictionary, which must be non-zero. + * @result The location matching dictionary created is returned on success, or zero on failure. */ - static OSDictionary * addLocation( OSDictionary * table ); + static OSDictionary * addLocation( OSDictionary * table ); - /* Helpers for matching dictionaries. */ +/* Helpers for matching dictionaries. */ /*! @function compareProperty - @abstract Compares a property in a matching dictionary with an IOService object's property table. - @discussion This is a helper function to aid in implementing @link matchPropertyTable matchPropertyTable@/link. If the property specified by key exists in the matching dictionary, it is compared with a property of the same name in the IOService object's property table. The comparison is performed with the isEqualTo method. If the property does not exist in the matching table, success is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. - @param matching The matching dictionary, which must be non-zero. - @param key The dictionary key specifying the property to be compared, as a C string. - @result true if the property does not exist in the matching table. If the property exists in the matching dictionary but not the IOService property table, failure is returned. Otherwise the result of calling the property from the matching dictionary's isEqualTo method with the IOService property as an argument is returned. */ - - virtual bool compareProperty( OSDictionary * matching, - const char * key ); + * @abstract Compares a property in a matching dictionary with an IOService object's property table. + * @discussion This is a helper function to aid in implementing @link matchPropertyTable matchPropertyTable@/link. If the property specified by key exists in the matching dictionary, it is compared with a property of the same name in the IOService object's property table. The comparison is performed with the isEqualTo method. If the property does not exist in the matching table, success is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. + * @param matching The matching dictionary, which must be non-zero. + * @param key The dictionary key specifying the property to be compared, as a C string. + * @result true if the property does not exist in the matching table. If the property exists in the matching dictionary but not the IOService property table, failure is returned. Otherwise the result of calling the property from the matching dictionary's isEqualTo method with the IOService property as an argument is returned. */ + + virtual bool compareProperty( OSDictionary * matching, + const char * key ); /*! @function compareProperty - @abstract Compares a property in a matching dictionary with an IOService object's property table. - @discussion This is a helper function to aid in implementing @link matchPropertyTable matchPropertyTable@/link. If the property specified by key exists in the matching dictionary, it is compared with a property of the same name in the IOService object's property table. The comparison is performed with the isEqualTo method. If the property does not exist in the matching table, success is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. - @param matching The matching dictionary, which must be non-zero. - @param key The dictionary key specifying the property to be compared, as an OSString (which includes OSSymbol). - @result true if the property does not exist in the matching table. If the property exists in the matching dictionary but not the IOService property table, failure is returned. Otherwise the result of calling the property from the matching dictionary's isEqualTo method with the IOService property as an argument is returned. */ + * @abstract Compares a property in a matching dictionary with an IOService object's property table. + * @discussion This is a helper function to aid in implementing @link matchPropertyTable matchPropertyTable@/link. If the property specified by key exists in the matching dictionary, it is compared with a property of the same name in the IOService object's property table. The comparison is performed with the isEqualTo method. If the property does not exist in the matching table, success is returned. If the property exists in the matching dictionary but not the IOService property table, failure is returned. + * @param matching The matching dictionary, which must be non-zero. + * @param key The dictionary key specifying the property to be compared, as an OSString (which includes OSSymbol). + * @result true if the property does not exist in the matching table. If the property exists in the matching dictionary but not the IOService property table, failure is returned. Otherwise the result of calling the property from the matching dictionary's isEqualTo method with the IOService property as an argument is returned. */ - virtual bool compareProperty( OSDictionary * matching, - const OSString * key ); + virtual bool compareProperty( OSDictionary * matching, + const OSString * key ); /*! @function compareProperties - @abstract Compares a set of properties in a matching dictionary with an IOService object's property table. - @discussion This is a helper function to aid in implementing @link matchPropertyTable matchPropertyTable@/link. A collection of dictionary keys specifies properties in a matching dictionary to be compared, with compareProperty, with an IOService object's property table, if compareProperty returns true for each key, success is returned; otherwise failure. - @param matching The matching dictionary, which must be non-zero. - @param keys A collection (eg. OSSet, OSArray, OSDictionary) which should contain OSStrings (or OSSymbols) that specify the property keys to be compared. - @result Success if compareProperty returns true for each key in the collection; otherwise failure. */ + * @abstract Compares a set of properties in a matching dictionary with an IOService object's property table. + * @discussion This is a helper function to aid in implementing @link matchPropertyTable matchPropertyTable@/link. A collection of dictionary keys specifies properties in a matching dictionary to be compared, with compareProperty, with an IOService object's property table, if compareProperty returns true for each key, success is returned; otherwise failure. + * @param matching The matching dictionary, which must be non-zero. + * @param keys A collection (eg. OSSet, OSArray, OSDictionary) which should contain OSStrings (or OSSymbols) that specify the property keys to be compared. + * @result Success if compareProperty returns true for each key in the collection; otherwise failure. */ - virtual bool compareProperties( OSDictionary * matching, - OSCollection * keys ); + virtual bool compareProperties( OSDictionary * matching, + OSCollection * keys ); - /* Client / provider accessors */ +/* Client / provider accessors */ /*! @function attach - @abstract Attaches an IOService client to a provider in the I/O Registry. - @discussion This function called in an IOService client enters the client into the I/O Registry as a child of the provider in the service plane. The provider must be active or the attach will fail. Multiple attach calls to the same provider are no-ops and return success. A client may be attached to multiple providers. Entering an object into the I/O Registry retains both the client and provider until they are detached. - @param provider The IOService object which will serve as this object's provider. - @result false if the provider is inactive or on a resource failure; otherwise true. */ + * @abstract Attaches an IOService client to a provider in the I/O Registry. + * @discussion This function called in an IOService client enters the client into the I/O Registry as a child of the provider in the service plane. The provider must be active or the attach will fail. Multiple attach calls to the same provider are no-ops and return success. A client may be attached to multiple providers. Entering an object into the I/O Registry retains both the client and provider until they are detached. + * @param provider The IOService object which will serve as this object's provider. + * @result false if the provider is inactive or on a resource failure; otherwise true. */ + + virtual bool attach( IOService * provider ); - virtual bool attach( IOService * provider ); - /*! @function detach - @abstract Detaches an IOService client from a provider in the I/O Registry. - @discussion This function called in an IOService client removes the client as a child of the provider in the service plane of the I/O Registry. If the provider is not a parent of the client this is a no-op, otherwise the I/O Registry releases both the client and provider. - @param provider The IOService object to detach from. */ + * @abstract Detaches an IOService client from a provider in the I/O Registry. + * @discussion This function called in an IOService client removes the client as a child of the provider in the service plane of the I/O Registry. If the provider is not a parent of the client this is a no-op, otherwise the I/O Registry releases both the client and provider. + * @param provider The IOService object to detach from. */ - virtual void detach( IOService * provider ); + virtual void detach( IOService * provider ); /*! @function getProvider - @abstract Returns an IOService object's primary provider. - @discussion This function called in an IOService client will return the provider to which it was first attached. Because the majority of IOService objects have only one provider, this is a useful simplification and also supports caching of the provider when the I/O Registry is unchanged. - @result The first provider of the client, or zero if the IOService object is not attached into the I/O Registry. The provider is retained while the client is attached, and should not be released by the caller. */ + * @abstract Returns an IOService object's primary provider. + * @discussion This function called in an IOService client will return the provider to which it was first attached. Because the majority of IOService objects have only one provider, this is a useful simplification and also supports caching of the provider when the I/O Registry is unchanged. + * @result The first provider of the client, or zero if the IOService object is not attached into the I/O Registry. The provider is retained while the client is attached, and should not be released by the caller. */ - virtual IOService * getProvider( void ) const; + virtual IOService * getProvider( void ) const; /*! @function getWorkLoop - @abstract Returns the current work loop or provider->getWorkLoop. - @discussion This function returns a valid work loop that a client can use to add an IOCommandGate to. The intention is that an IOService client has data that needs to be protected but doesn't want to pay the cost of a dedicated thread. This data has to be accessed from a provider's call-out context as well. So to achieve both of these goals the client creates an IOCommandGate to lock access to his data but he registers it with the provider's work loop, i.e. the work loop which will make the completion call-outs. This avoids a potential deadlock because the work loop gate uses a recursive lock, which allows the same lock to be held multiple times by a single thread. - @result A work loop, either the current work loop or it walks up the @link getProvider getProvider@/link chain calling getWorkLoop. Eventually it will reach a valid work loop-based driver or the root of the I/O tree, where it will return a system-wide work loop. Returns 0 if it fails to find (or create) a work loop.*/ + * @abstract Returns the current work loop or provider->getWorkLoop. + * @discussion This function returns a valid work loop that a client can use to add an IOCommandGate to. The intention is that an IOService client has data that needs to be protected but doesn't want to pay the cost of a dedicated thread. This data has to be accessed from a provider's call-out context as well. So to achieve both of these goals the client creates an IOCommandGate to lock access to his data but he registers it with the provider's work loop, i.e. the work loop which will make the completion call-outs. This avoids a potential deadlock because the work loop gate uses a recursive lock, which allows the same lock to be held multiple times by a single thread. + * @result A work loop, either the current work loop or it walks up the @link getProvider getProvider@/link chain calling getWorkLoop. Eventually it will reach a valid work loop-based driver or the root of the I/O tree, where it will return a system-wide work loop. Returns 0 if it fails to find (or create) a work loop.*/ - virtual IOWorkLoop * getWorkLoop() const; + virtual IOWorkLoop * getWorkLoop() const; /*! @function getProviderIterator - @abstract Returns an iterator over an IOService object's providers. - @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers. - @result An iterator over the providers of the client, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + * @abstract Returns an iterator over an IOService object's providers. + * @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers. + * @result An iterator over the providers of the client, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getProviderIterator( void ) const; + virtual OSIterator * getProviderIterator( void ) const; /*! @function getOpenProviderIterator - @abstract Returns an iterator over an client's providers that are currently opened by the client. - @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers, locking each in turn with @link lockForArbitration lockForArbitration@/link and returning those that have been opened by the client. - @result An iterator over the providers the client has open, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ + * @abstract Returns an iterator over an client's providers that are currently opened by the client. + * @discussion For those few IOService objects that obtain service from multiple providers, this method supplies an iterator over a client's providers, locking each in turn with @link lockForArbitration lockForArbitration@/link and returning those that have been opened by the client. + * @result An iterator over the providers the client has open, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ - virtual OSIterator * getOpenProviderIterator( void ) const; + virtual OSIterator * getOpenProviderIterator( void ) const; /*! @function getClient - @abstract Returns an IOService object's primary client. - @discussion This function called in an IOService provider will return the first client to attach to it. For IOService objects which have only only one client, this may be a useful simplification. - @result The first client of the provider, or zero if the IOService object is not attached into the I/O Registry. The client is retained while it is attached, and should not be released by the caller. */ + * @abstract Returns an IOService object's primary client. + * @discussion This function called in an IOService provider will return the first client to attach to it. For IOService objects which have only only one client, this may be a useful simplification. + * @result The first client of the provider, or zero if the IOService object is not attached into the I/O Registry. The client is retained while it is attached, and should not be released by the caller. */ - virtual IOService * getClient( void ) const; + virtual IOService * getClient( void ) const; /*! @function getClientIterator - @abstract Returns an iterator over an IOService object's clients. - @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients. - @result An iterator over the clients of the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ + * @abstract Returns an iterator over an IOService object's clients. + * @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients. + * @result An iterator over the clients of the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, though they may no longer be attached during the iteration. */ - virtual OSIterator * getClientIterator( void ) const; + virtual OSIterator * getClientIterator( void ) const; /*! @function getOpenClientIterator - @abstract Returns an iterator over a provider's clients that currently have opened the provider. - @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients, locking each in turn with @link lockForArbitration lockForArbitration@/link and returning those that have opened the provider. - @result An iterator over the clients that have opened the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ + * @abstract Returns an iterator over a provider's clients that currently have opened the provider. + * @discussion For IOService objects that may have multiple clients, this method supplies an iterator over a provider's clients, locking each in turn with @link lockForArbitration lockForArbitration@/link and returning those that have opened the provider. + * @result An iterator over the clients that have opened the provider, or zero if there is a resource failure. The iterator must be released when the iteration is finished. All objects returned by the iteration are retained while the iterator is valid, and the current entry in the iteration is locked with lockForArbitration, protecting it from state changes. */ - virtual OSIterator * getOpenClientIterator( void ) const; + virtual OSIterator * getOpenClientIterator( void ) const; /*! @function callPlatformFunction - @abstract Calls the platform function with the given name. - @discussion The platform expert or other drivers may implement various functions to control hardware features. callPlatformFunction allows any IOService object to access these functions. Normally callPlatformFunction is called on a service's provider. The provider services the request or passes it to its provider. The system's IOPlatformExpert subclass catches functions it knows about and redirects them into other parts of the service plane. If the IOPlatformExpert subclass cannot execute the function, the base class is called. The IOPlatformExpert base class attempts to find a service to execute the function by looking up the function name in an IOResources name space. A service may publish a service using publishResource(functionName, this). If no service can be found to execute the function an error is returned. - @param functionName Name of the function to be called. When functionName is a C string, callPlatformFunction converts the C string to an OSSymbol and calls the OSSymbol version of callPlatformFunction. This process can block and should not be used from an interrupt context. - @param waitForFunction If true, callPlatformFunction will not return until the function has been called. - @result An IOReturn code; kIOReturnSuccess if the function was successfully executed, kIOReturnUnsupported if a service to execute the function could not be found. Other return codes may be returned by the function.*/ + * @abstract Calls the platform function with the given name. + * @discussion The platform expert or other drivers may implement various functions to control hardware features. callPlatformFunction allows any IOService object to access these functions. Normally callPlatformFunction is called on a service's provider. The provider services the request or passes it to its provider. The system's IOPlatformExpert subclass catches functions it knows about and redirects them into other parts of the service plane. If the IOPlatformExpert subclass cannot execute the function, the base class is called. The IOPlatformExpert base class attempts to find a service to execute the function by looking up the function name in an IOResources name space. A service may publish a service using publishResource(functionName, this). If no service can be found to execute the function an error is returned. + * @param functionName Name of the function to be called. When functionName is a C string, callPlatformFunction converts the C string to an OSSymbol and calls the OSSymbol version of callPlatformFunction. This process can block and should not be used from an interrupt context. + * @param waitForFunction If true, callPlatformFunction will not return until the function has been called. + * @result An IOReturn code; kIOReturnSuccess if the function was successfully executed, kIOReturnUnsupported if a service to execute the function could not be found. Other return codes may be returned by the function.*/ - virtual IOReturn callPlatformFunction( const OSSymbol * functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4 ); + virtual IOReturn callPlatformFunction( const OSSymbol * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ); - virtual IOReturn callPlatformFunction( const char * functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4 ); + virtual IOReturn callPlatformFunction( const char * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ); - /* Some accessors */ +/* Some accessors */ /*! @function getPlatform - @abstract Returns a pointer to the platform expert instance for the computer. - @discussion This method provides an accessor to the platform expert instance for the computer. - @result A pointer to the IOPlatformExpert instance. It should not be released by the caller. */ + * @abstract Returns a pointer to the platform expert instance for the computer. + * @discussion This method provides an accessor to the platform expert instance for the computer. + * @result A pointer to the IOPlatformExpert instance. It should not be released by the caller. */ - static IOPlatformExpert * getPlatform( void ); + static IOPlatformExpert * getPlatform( void ); /*! @function getPMRootDomain - @abstract Returns a pointer to the power management root domain instance for the computer. - @discussion This method provides an accessor to the power management root domain instance for the computer. - @result A pointer to the power management root domain instance. It should not be released by the caller. */ + * @abstract Returns a pointer to the power management root domain instance for the computer. + * @discussion This method provides an accessor to the power management root domain instance for the computer. + * @result A pointer to the power management root domain instance. It should not be released by the caller. */ - static class IOPMrootDomain * getPMRootDomain( void ); + static class IOPMrootDomain * getPMRootDomain( void ); /*! @function getServiceRoot - @abstract Returns a pointer to the root of the service plane. - @discussion This method provides an accessor to the root of the service plane for the computer. - @result A pointer to the IOService instance at the root of the service plane. It should not be released by the caller. */ + * @abstract Returns a pointer to the root of the service plane. + * @discussion This method provides an accessor to the root of the service plane for the computer. + * @result A pointer to the IOService instance at the root of the service plane. It should not be released by the caller. */ - static IOService * getServiceRoot( void ); + static IOService * getServiceRoot( void ); /*! @function getResourceService - @abstract Returns a pointer to the IOResources service. - @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in the I/O Kit based on a name, using the standard IOService matching and notification calls. - @result A pointer to the IOResources instance. It should not be released by the caller. */ + * @abstract Returns a pointer to the IOResources service. + * @discussion IOService maintains a resource service IOResources that allows objects to be published and found globally in the I/O Kit based on a name, using the standard IOService matching and notification calls. + * @result A pointer to the IOResources instance. It should not be released by the caller. */ - static IOService * getResourceService( void ); + static IOService * getResourceService( void ); - /* Allocate resources for a matched service */ +/* Allocate resources for a matched service */ /*! @function getResources - @abstract Allocates any needed resources for a published IOService object before clients attach. - @discussion This method is called during the registration process for an IOService object if there are successful driver matches, before any clients attach. It allows for lazy allocation of resources to an IOService object when a matching driver is found. - @result An IOReturn code; kIOReturnSuccess is necessary for the IOService object to be successfully used, otherwise the registration process for the object is halted. */ - - virtual IOReturn getResources( void ); + * @abstract Allocates any needed resources for a published IOService object before clients attach. + * @discussion This method is called during the registration process for an IOService object if there are successful driver matches, before any clients attach. It allows for lazy allocation of resources to an IOService object when a matching driver is found. + * @result An IOReturn code; kIOReturnSuccess is necessary for the IOService object to be successfully used, otherwise the registration process for the object is halted. */ - /* Device memory accessors */ + virtual IOReturn getResources( void ); + +/* Device memory accessors */ /*! @function getDeviceMemoryCount - @abstract Returns a count of the physical memory ranges available for a device. - @discussion This method returns the count of physical memory ranges, each represented by an IODeviceMemory instance, that have been allocated for a memory mapped device. - @result An integer count of the number of ranges available. */ + * @abstract Returns a count of the physical memory ranges available for a device. + * @discussion This method returns the count of physical memory ranges, each represented by an IODeviceMemory instance, that have been allocated for a memory mapped device. + * @result An integer count of the number of ranges available. */ - virtual IOItemCount getDeviceMemoryCount( void ); + virtual IOItemCount getDeviceMemoryCount( void ); /*! @function getDeviceMemoryWithIndex - @abstract Returns an instance of IODeviceMemory representing one of a device's memory mapped ranges. - @discussion This method returns a pointer to an instance of IODeviceMemory for the physical memory range at the given index for a memory mapped device. - @param index An index into the array of ranges assigned to the device. - @result A pointer to an instance of IODeviceMemory, or zero if the index is beyond the count available. The IODeviceMemory is retained by the provider, so is valid while attached, or while any mappings to it exist. It should not be released by the caller. See also @link mapDeviceMemoryWithIndex mapDeviceMemoryWithIndex@/link, which creates a device memory mapping. */ + * @abstract Returns an instance of IODeviceMemory representing one of a device's memory mapped ranges. + * @discussion This method returns a pointer to an instance of IODeviceMemory for the physical memory range at the given index for a memory mapped device. + * @param index An index into the array of ranges assigned to the device. + * @result A pointer to an instance of IODeviceMemory, or zero if the index is beyond the count available. The IODeviceMemory is retained by the provider, so is valid while attached, or while any mappings to it exist. It should not be released by the caller. See also @link mapDeviceMemoryWithIndex mapDeviceMemoryWithIndex@/link, which creates a device memory mapping. */ - virtual IODeviceMemory * getDeviceMemoryWithIndex( unsigned int index ); + virtual IODeviceMemory * getDeviceMemoryWithIndex( unsigned int index ); /*! @function mapDeviceMemoryWithIndex - @abstract Maps a physical range of a device. - @discussion This method creates a mapping for the IODeviceMemory at the given index, with IODeviceMemory::map(options). The mapping is represented by the returned instance of IOMemoryMap, which should not be released until the mapping is no longer required. - @param index An index into the array of ranges assigned to the device. - @result An instance of IOMemoryMap, or zero if the index is beyond the count available. The mapping should be released only when access to it is no longer required. */ + * @abstract Maps a physical range of a device. + * @discussion This method creates a mapping for the IODeviceMemory at the given index, with IODeviceMemory::map(options). The mapping is represented by the returned instance of IOMemoryMap, which should not be released until the mapping is no longer required. + * @param index An index into the array of ranges assigned to the device. + * @result An instance of IOMemoryMap, or zero if the index is beyond the count available. The mapping should be released only when access to it is no longer required. */ - virtual IOMemoryMap * mapDeviceMemoryWithIndex( unsigned int index, - IOOptionBits options = 0 ); + virtual IOMemoryMap * mapDeviceMemoryWithIndex( unsigned int index, + IOOptionBits options = 0 ); /*! @function getDeviceMemory - @abstract Returns the array of IODeviceMemory objects representing a device's memory mapped ranges. - @discussion This method returns an array of IODeviceMemory objects representing the physical memory ranges allocated to a memory mapped device. - @result An OSArray of IODeviceMemory objects, or zero if none are available. The array is retained by the provider, so is valid while attached. */ + * @abstract Returns the array of IODeviceMemory objects representing a device's memory mapped ranges. + * @discussion This method returns an array of IODeviceMemory objects representing the physical memory ranges allocated to a memory mapped device. + * @result An OSArray of IODeviceMemory objects, or zero if none are available. The array is retained by the provider, so is valid while attached. */ - virtual OSArray * getDeviceMemory( void ); + virtual OSArray * getDeviceMemory( void ); /*! @function setDeviceMemory - @abstract Sets the array of IODeviceMemory objects representing a device's memory mapped ranges. - @discussion This method sets an array of IODeviceMemory objects representing the physical memory ranges allocated to a memory mapped device. - @param array An OSArray of IODeviceMemory objects, or zero if none are available. The array will be retained by the object. */ + * @abstract Sets the array of IODeviceMemory objects representing a device's memory mapped ranges. + * @discussion This method sets an array of IODeviceMemory objects representing the physical memory ranges allocated to a memory mapped device. + * @param array An OSArray of IODeviceMemory objects, or zero if none are available. The array will be retained by the object. */ - virtual void setDeviceMemory( OSArray * array ); + virtual void setDeviceMemory( OSArray * array ); - /* Interrupt accessors */ +/* Interrupt accessors */ /*! @function registerInterrupt - @abstract Registers a C function interrupt handler for a device supplying interrupts. - @discussion This method installs a C function interrupt handler to be called at primary interrupt time for a device's interrupt. Only one handler may be installed per interrupt source. IOInterruptEventSource provides a work loop based abstraction for interrupt delivery that may be more appropriate for work loop based drivers. - @param source The index of the interrupt source in the device. - @param target An object instance to be passed to the interrupt handler. - @param handler The C function to be called at primary interrupt time when the interrupt occurs. The handler should process the interrupt by clearing the interrupt, or by disabling the source. - @param refCon A reference constant for the handler's use. - @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid; kIOReturnNoResources is returned if the interrupt already has an installed handler. */ - - virtual IOReturn registerInterrupt(int source, OSObject *target, - IOInterruptAction handler, - void *refCon = 0); + * @abstract Registers a C function interrupt handler for a device supplying interrupts. + * @discussion This method installs a C function interrupt handler to be called at primary interrupt time for a device's interrupt. Only one handler may be installed per interrupt source. IOInterruptEventSource provides a work loop based abstraction for interrupt delivery that may be more appropriate for work loop based drivers. + * @param source The index of the interrupt source in the device. + * @param target An object instance to be passed to the interrupt handler. + * @param handler The C function to be called at primary interrupt time when the interrupt occurs. The handler should process the interrupt by clearing the interrupt, or by disabling the source. + * @param refCon A reference constant for the handler's use. + * @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid; kIOReturnNoResources is returned if the interrupt already has an installed handler. */ + + virtual IOReturn registerInterrupt(int source, OSObject *target, + IOInterruptAction handler, + void *refCon = 0); #ifdef __BLOCKS__ /*! @function registerInterrupt - @abstract Registers a block handler for a device supplying interrupts. - @discussion This method installs a C function interrupt handler to be called at primary interrupt time for a device's interrupt. Only one handler may be installed per interrupt source. IOInterruptEventSource provides a work loop based abstraction for interrupt delivery that may be more appropriate for work loop based drivers. - @param source The index of the interrupt source in the device. - @param target An object instance to be passed to the interrupt handler. - @param handler The block to be invoked at primary interrupt time when the interrupt occurs. The handler should process the interrupt by clearing the interrupt, or by disabling the source. - @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid; kIOReturnNoResources is returned if the interrupt already has an installed handler. */ + * @abstract Registers a block handler for a device supplying interrupts. + * @discussion This method installs a C function interrupt handler to be called at primary interrupt time for a device's interrupt. Only one handler may be installed per interrupt source. IOInterruptEventSource provides a work loop based abstraction for interrupt delivery that may be more appropriate for work loop based drivers. + * @param source The index of the interrupt source in the device. + * @param target An object instance to be passed to the interrupt handler. + * @param handler The block to be invoked at primary interrupt time when the interrupt occurs. The handler should process the interrupt by clearing the interrupt, or by disabling the source. + * @result An IOReturn code.
kIOReturnNoInterrupt is returned if the source is not valid; kIOReturnNoResources is returned if the interrupt already has an installed handler. */ IOReturn registerInterruptBlock(int source, OSObject *target, - IOInterruptActionBlock handler); + IOInterruptActionBlock handler); #endif /* __BLOCKS__ */ - + /*! @function unregisterInterrupt - @abstract Removes a C function interrupt handler for a device supplying hardware interrupts. - @discussion This method removes a C function interrupt handler previously installed with @link registerInterrupt registerInterrupt@/link. - @param source The index of the interrupt source in the device. - @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ + * @abstract Removes a C function interrupt handler for a device supplying hardware interrupts. + * @discussion This method removes a C function interrupt handler previously installed with @link registerInterrupt registerInterrupt@/link. + * @param source The index of the interrupt source in the device. + * @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ - virtual IOReturn unregisterInterrupt(int source); + virtual IOReturn unregisterInterrupt(int source); /*! @function addInterruptStatistics - @abstract Adds a statistics object to the IOService for the given interrupt. - @discussion This method associates a set of statistics and a reporter for those statistics with an interrupt for this IOService, so that we can interrogate the IOService for statistics pertaining to that interrupt. - @param statistics The IOInterruptAccountingData container we wish to associate the IOService with. - @param source The index of the interrupt source in the device. */ - IOReturn addInterruptStatistics(IOInterruptAccountingData * statistics, int source); + * @abstract Adds a statistics object to the IOService for the given interrupt. + * @discussion This method associates a set of statistics and a reporter for those statistics with an interrupt for this IOService, so that we can interrogate the IOService for statistics pertaining to that interrupt. + * @param statistics The IOInterruptAccountingData container we wish to associate the IOService with. + * @param source The index of the interrupt source in the device. */ + IOReturn addInterruptStatistics(IOInterruptAccountingData * statistics, int source); /*! @function removeInterruptStatistics - @abstract Removes any statistics from the IOService for the given interrupt. - @discussion This method disassociates any IOInterruptAccountingData container we may have for the given interrupt from the IOService; this indicates that the the interrupt target (at the moment, likely an IOInterruptEventSource) is being destroyed. - @param source The index of the interrupt source in the device. */ - IOReturn removeInterruptStatistics(int source); + * @abstract Removes any statistics from the IOService for the given interrupt. + * @discussion This method disassociates any IOInterruptAccountingData container we may have for the given interrupt from the IOService; this indicates that the the interrupt target (at the moment, likely an IOInterruptEventSource) is being destroyed. + * @param source The index of the interrupt source in the device. */ + IOReturn removeInterruptStatistics(int source); /*! @function getInterruptType - @abstract Returns the type of interrupt used for a device supplying hardware interrupts. - @param source The index of the interrupt source in the device. - @param interruptType The interrupt type for the interrupt source will be stored here by getInterruptType.
kIOInterruptTypeEdge will be returned for edge-trigggered sources.
kIOInterruptTypeLevel will be returned for level-trigggered sources. - @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ + * @abstract Returns the type of interrupt used for a device supplying hardware interrupts. + * @param source The index of the interrupt source in the device. + * @param interruptType The interrupt type for the interrupt source will be stored here by getInterruptType.
kIOInterruptTypeEdge will be returned for edge-trigggered sources.
kIOInterruptTypeLevel will be returned for level-trigggered sources. + * @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ - virtual IOReturn getInterruptType(int source, int *interruptType); + virtual IOReturn getInterruptType(int source, int *interruptType); /*! @function enableInterrupt - @abstract Enables a device interrupt. - @discussion It is the caller's responsiblity to keep track of the enable state of the interrupt source. - @param source The index of the interrupt source in the device. - @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ + * @abstract Enables a device interrupt. + * @discussion It is the caller's responsiblity to keep track of the enable state of the interrupt source. + * @param source The index of the interrupt source in the device. + * @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ - virtual IOReturn enableInterrupt(int source); + virtual IOReturn enableInterrupt(int source); /*! @function disableInterrupt - @abstract Synchronously disables a device interrupt. - @discussion If the interrupt routine is running, the call will block until the routine completes. It is the caller's responsiblity to keep track of the enable state of the interrupt source. - @param source The index of the interrupt source in the device. - @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ + * @abstract Synchronously disables a device interrupt. + * @discussion If the interrupt routine is running, the call will block until the routine completes. It is the caller's responsiblity to keep track of the enable state of the interrupt source. + * @param source The index of the interrupt source in the device. + * @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ - virtual IOReturn disableInterrupt(int source); + virtual IOReturn disableInterrupt(int source); /*! @function causeInterrupt - @abstract Causes a device interrupt to occur. - @discussion Emulates a hardware interrupt, to be called from task level. - @param source The index of the interrupt source in the device. - @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ + * @abstract Causes a device interrupt to occur. + * @discussion Emulates a hardware interrupt, to be called from task level. + * @param source The index of the interrupt source in the device. + * @result An IOReturn code (kIOReturnNoInterrupt is returned if the source is not valid). */ - virtual IOReturn causeInterrupt(int source); + virtual IOReturn causeInterrupt(int source); /*! @function requestProbe - @abstract Requests that hardware be re-scanned for devices. - @discussion For bus families that do not usually detect device addition or removal, this method represents an external request (eg. from a utility application) to rescan and publish or remove found devices. - @param options Family defined options, not interpreted by IOService. - @result An IOReturn code. */ + * @abstract Requests that hardware be re-scanned for devices. + * @discussion For bus families that do not usually detect device addition or removal, this method represents an external request (eg. from a utility application) to rescan and publish or remove found devices. + * @param options Family defined options, not interpreted by IOService. + * @result An IOReturn code. */ - virtual IOReturn requestProbe( IOOptionBits options ); + virtual IOReturn requestProbe( IOOptionBits options ); - /* Generic API for non-data-path upstream calls */ +/* Generic API for non-data-path upstream calls */ /*! @function message - @abstract Receives a generic message delivered from an attached provider. - @discussion A provider may deliver messages via the message method to its clients informing them of state changes, such as kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by the I/O Kit in IOMessage.h while others may be family dependent. This method is implemented in the client to receive messages. - @param type A type defined in IOMessage.h or defined by the provider family. - @param provider The provider from which the message originates. - @param argument An argument defined by the provider family, not used by IOService. - @result An IOReturn code defined by the message type. */ - - virtual IOReturn message( UInt32 type, IOService * provider, - void * argument = 0 ); - + * @abstract Receives a generic message delivered from an attached provider. + * @discussion A provider may deliver messages via the message method to its clients informing them of state changes, such as kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by the I/O Kit in IOMessage.h while others may be family dependent. This method is implemented in the client to receive messages. + * @param type A type defined in IOMessage.h or defined by the provider family. + * @param provider The provider from which the message originates. + * @param argument An argument defined by the provider family, not used by IOService. + * @result An IOReturn code defined by the message type. */ + + virtual IOReturn message( UInt32 type, IOService * provider, + void * argument = 0 ); + /*! @function messageClient - @abstract Sends a generic message to an attached client. - @discussion A provider may deliver messages via the @link message message@/link method to its clients informing them of state changes, such as kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by the I/O Kit in IOMessage.h while others may be family dependent. This method may be called in the provider to send a message to the specified client, which may be useful for overrides. - @param messageType A type defined in IOMessage.h or defined by the provider family. - @param client A client of the IOService to send the message. - @param messageArgument An argument defined by the provider family, not used by IOService. - @param argSize Specifies the size of messageArgument, in bytes. If argSize is non-zero, messageArgument is treated as a pointer to argSize bytes of data. If argSize is 0 (the default), messageArgument is treated as an ordinal and passed by value. - @result The return code from the client message call. */ - - virtual IOReturn messageClient( UInt32 messageType, OSObject * client, - void * messageArgument = 0, vm_size_t argSize = 0 ); + * @abstract Sends a generic message to an attached client. + * @discussion A provider may deliver messages via the @link message message@/link method to its clients informing them of state changes, such as kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by the I/O Kit in IOMessage.h while others may be family dependent. This method may be called in the provider to send a message to the specified client, which may be useful for overrides. + * @param messageType A type defined in IOMessage.h or defined by the provider family. + * @param client A client of the IOService to send the message. + * @param messageArgument An argument defined by the provider family, not used by IOService. + * @param argSize Specifies the size of messageArgument, in bytes. If argSize is non-zero, messageArgument is treated as a pointer to argSize bytes of data. If argSize is 0 (the default), messageArgument is treated as an ordinal and passed by value. + * @result The return code from the client message call. */ + + virtual IOReturn messageClient( UInt32 messageType, OSObject * client, + void * messageArgument = 0, vm_size_t argSize = 0 ); /*! @function messageClients - @abstract Sends a generic message to all attached clients. - @discussion A provider may deliver messages via the @link message message@/link method to its clients informing them of state changes, such as kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by the I/O Kit in IOMessage.h while others may be family dependent. This method may be called in the provider to send a message to all the attached clients, via the @link messageClient messageClient@/link method. - @param type A type defined in IOMessage.h or defined by the provider family. - @param argument An argument defined by the provider family, not used by IOService. - @param argSize Specifies the size of argument, in bytes. If argSize is non-zero, argument is treated as a pointer to argSize bytes of data. If argSize is 0 (the default), argument is treated as an ordinal and passed by value. - @result Any non-kIOReturnSuccess return codes returned by the clients, or kIOReturnSuccess if all return kIOReturnSuccess. */ + * @abstract Sends a generic message to all attached clients. + * @discussion A provider may deliver messages via the @link message message@/link method to its clients informing them of state changes, such as kIOMessageServiceIsTerminated or kIOMessageServiceIsSuspended. Certain messages are defined by the I/O Kit in IOMessage.h while others may be family dependent. This method may be called in the provider to send a message to all the attached clients, via the @link messageClient messageClient@/link method. + * @param type A type defined in IOMessage.h or defined by the provider family. + * @param argument An argument defined by the provider family, not used by IOService. + * @param argSize Specifies the size of argument, in bytes. If argSize is non-zero, argument is treated as a pointer to argSize bytes of data. If argSize is 0 (the default), argument is treated as an ordinal and passed by value. + * @result Any non-kIOReturnSuccess return codes returned by the clients, or kIOReturnSuccess if all return kIOReturnSuccess. */ - virtual IOReturn messageClients( UInt32 type, - void * argument = 0, vm_size_t argSize = 0 ); + virtual IOReturn messageClients( UInt32 type, + void * argument = 0, vm_size_t argSize = 0 ); - virtual IONotifier * registerInterest( const OSSymbol * typeOfInterest, - IOServiceInterestHandler handler, - void * target, void * ref = 0 ); + virtual IONotifier * registerInterest( const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, + void * target, void * ref = 0 ); #ifdef __BLOCKS__ - IONotifier * registerInterest(const OSSymbol * typeOfInterest, - IOServiceInterestHandlerBlock handler); + IONotifier * registerInterest(const OSSymbol * typeOfInterest, + IOServiceInterestHandlerBlock handler); #endif /* __BLOCKS__ */ - virtual void applyToProviders( IOServiceApplierFunction applier, - void * context ); + virtual void applyToProviders( IOServiceApplierFunction applier, + void * context ); - virtual void applyToClients( IOServiceApplierFunction applier, - void * context ); + virtual void applyToClients( IOServiceApplierFunction applier, + void * context ); - virtual void applyToInterested( const OSSymbol * typeOfInterest, - OSObjectApplierFunction applier, - void * context ); + virtual void applyToInterested( const OSSymbol * typeOfInterest, + OSObjectApplierFunction applier, + void * context ); - virtual IOReturn acknowledgeNotification( IONotificationRef notification, - IOOptionBits response ); + virtual IOReturn acknowledgeNotification( IONotificationRef notification, + IOOptionBits response ); - /* User client create */ +/* User client create */ /*! @function newUserClient - @abstract Creates a connection for a non kernel client. - @discussion A non kernel client may request a connection be opened via the @link //apple_ref/c/func/IOServiceOpen IOServiceOpen@/link library function, which will call this method in an IOService object. The rules and capabilities of user level clients are family dependent, and use the functions of the IOUserClient class for support. IOService's implementation returns kIOReturnUnsupported, so any family supporting user clients must implement this method. - @param owningTask The Mach task of the client thread in the process of opening the user client. Note that in Mac OS X, each process is based on a Mach task and one or more Mach threads. For more information on the composition of a Mach task and its relationship with Mach threads, see {@linkdoc //apple_ref/doc/uid/TP30000905-CH209-TPXREF103 "Tasks and Threads"}. - @param securityID A token representing the access level for the task. - @param type A constant specifying the type of connection to be created, specified by the caller of @link //apple_ref/c/func/IOServiceOpen IOServiceOpen@/link and interpreted only by the family. - @param handler An instance of an IOUserClient object to represent the connection, which will be released when the connection is closed, or zero if the connection was not opened. - @param properties A dictionary of additional properties for the connection. - @result A return code to be passed back to the caller of IOServiceOpen. */ + * @abstract Creates a connection for a non kernel client. + * @discussion A non kernel client may request a connection be opened via the @link //apple_ref/c/func/IOServiceOpen IOServiceOpen@/link library function, which will call this method in an IOService object. The rules and capabilities of user level clients are family dependent, and use the functions of the IOUserClient class for support. IOService's implementation returns kIOReturnUnsupported, so any family supporting user clients must implement this method. + * @param owningTask The Mach task of the client thread in the process of opening the user client. Note that in Mac OS X, each process is based on a Mach task and one or more Mach threads. For more information on the composition of a Mach task and its relationship with Mach threads, see {@linkdoc //apple_ref/doc/uid/TP30000905-CH209-TPXREF103 "Tasks and Threads"}. + * @param securityID A token representing the access level for the task. + * @param type A constant specifying the type of connection to be created, specified by the caller of @link //apple_ref/c/func/IOServiceOpen IOServiceOpen@/link and interpreted only by the family. + * @param handler An instance of an IOUserClient object to represent the connection, which will be released when the connection is closed, or zero if the connection was not opened. + * @param properties A dictionary of additional properties for the connection. + * @result A return code to be passed back to the caller of IOServiceOpen. */ - virtual IOReturn newUserClient( task_t owningTask, void * securityID, - UInt32 type, OSDictionary * properties, - IOUserClient ** handler ); + virtual IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler ); - virtual IOReturn newUserClient( task_t owningTask, void * securityID, - UInt32 type, IOUserClient ** handler ); + virtual IOReturn newUserClient( task_t owningTask, void * securityID, + UInt32 type, IOUserClient ** handler ); - /* Return code utilities */ +/* Return code utilities */ /*! @function stringFromReturn - @abstract Supplies a programmer-friendly string from an IOReturn code. - @discussion Strings are available for the standard return codes in IOReturn.h in IOService, while subclasses may implement this method to interpret family dependent return codes. - @param rtn The IOReturn code. - @result A pointer to a constant string, or zero if the return code is unknown. */ - - virtual const char * stringFromReturn( IOReturn rtn ); + * @abstract Supplies a programmer-friendly string from an IOReturn code. + * @discussion Strings are available for the standard return codes in IOReturn.h in IOService, while subclasses may implement this method to interpret family dependent return codes. + * @param rtn The IOReturn code. + * @result A pointer to a constant string, or zero if the return code is unknown. */ + + virtual const char * stringFromReturn( IOReturn rtn ); /*! @function errnoFromReturn - @abstract Translates an IOReturn code to a BSD errno. - @discussion BSD defines its own return codes for its functions in sys/errno.h, and I/O Kit families may need to supply compliant results in BSD shims. Results are available for the standard return codes in IOReturn.h in IOService, while subclasses may implement this method to interpret family dependent return codes. - @param rtn The IOReturn code. - @result The BSD errno or EIO if unknown. */ - - virtual int errnoFromReturn( IOReturn rtn ); + * @abstract Translates an IOReturn code to a BSD errno. + * @discussion BSD defines its own return codes for its functions in sys/errno.h, and I/O Kit families may need to supply compliant results in BSD shims. Results are available for the standard return codes in IOReturn.h in IOService, while subclasses may implement this method to interpret family dependent return codes. + * @param rtn The IOReturn code. + * @result The BSD errno or EIO if unknown. */ + + virtual int errnoFromReturn( IOReturn rtn ); - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - /* * * * * * * * * * end of IOService API * * * * * * * */ - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * end of IOService API * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - /* for IOInterruptController implementors */ +/* for IOInterruptController implementors */ - int _numInterruptSources; - IOInterruptSource *_interruptSources; + int _numInterruptSources; + IOInterruptSource *_interruptSources; - /* overrides */ - virtual bool serializeProperties( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; +/* overrides */ + virtual bool serializeProperties( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; #ifdef KERNEL_PRIVATE - /* Apple only SPI to control CPU low power modes */ - void setCPUSnoopDelay(UInt32 ns); - UInt32 getCPUSnoopDelay(); +/* Apple only SPI to control CPU low power modes */ + void setCPUSnoopDelay(UInt32 ns); + UInt32 getCPUSnoopDelay(); #endif - void requireMaxBusStall(UInt32 ns); - void requireMaxInterruptDelay(uint32_t ns); + void requireMaxBusStall(UInt32 ns); + void requireMaxInterruptDelay(uint32_t ns); - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - /* * * * * * * * * * * * Internals * * * * * * * * * * * */ - /* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +/* * * * * * * * * * * * Internals * * * * * * * * * * * */ +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifdef XNU_KERNEL_PRIVATE public: - // called from other xnu components - static void initialize( void ); - static void setPlatform( IOPlatformExpert * platform); - static void setPMRootDomain( class IOPMrootDomain * rootDomain ); - static IOReturn catalogNewDrivers( OSOrderedSet * newTables ); - uint64_t getAccumulatedBusyTime( void ); - static void updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage); - static void consoleLockTimer(thread_call_param_t p0, thread_call_param_t p1); - void setTerminateDefer(IOService * provider, bool defer); - uint64_t getAuthorizationID( void ); - IOReturn setAuthorizationID( uint64_t authorizationID ); - void cpusRunning(void); - void scheduleFinalize(bool now); +// called from other xnu components + static void initialize( void ); + static void setPlatform( IOPlatformExpert * platform); + static void setPMRootDomain( class IOPMrootDomain * rootDomain ); + static IOReturn catalogNewDrivers( OSOrderedSet * newTables ); + uint64_t getAccumulatedBusyTime( void ); + static void updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage); + static void consoleLockTimer(thread_call_param_t p0, thread_call_param_t p1); + void setTerminateDefer(IOService * provider, bool defer); + uint64_t getAuthorizationID( void ); + IOReturn setAuthorizationID( uint64_t authorizationID ); + void cpusRunning(void); + void scheduleFinalize(bool now); private: - static IOReturn waitMatchIdle( UInt32 ms ); - static IONotifier * installNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref, - SInt32 priority, OSIterator ** existing ); + static IOReturn waitMatchIdle( UInt32 ms ); + static IONotifier * installNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ); #if !defined(__LP64__) - static IONotifier * installNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceNotificationHandler handler, - void * target, void * ref, - SInt32 priority, OSIterator ** existing); + static IONotifier * installNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing); #endif /* !defined(__LP64__) */ #endif private: - APPLE_KEXT_COMPATIBILITY_VIRTUAL - bool checkResources( void ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - bool checkResource( OSObject * matching ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + bool checkResources( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + bool checkResource( OSObject * matching ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void probeCandidates( OSOrderedSet * matches ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - bool startCandidate( IOService * candidate ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void probeCandidates( OSOrderedSet * matches ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + bool startCandidate( IOService * candidate ); public: - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOService * getClientWithCategory( const OSSymbol * category ) - APPLE_KEXT_DEPRECATED; - // copyClientWithCategory is the public replacement + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOService * getClientWithCategory( const OSSymbol * category ) + APPLE_KEXT_DEPRECATED; +// copyClientWithCategory is the public replacement #ifdef XNU_KERNEL_PRIVATE - /* Callable within xnu source only - but require vtable entries to be visible */ +/* Callable within xnu source only - but require vtable entries to be visible */ public: #else private: #endif - APPLE_KEXT_COMPATIBILITY_VIRTUAL - bool passiveMatch( OSDictionary * matching, bool changesOK = false); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void startMatching( IOOptionBits options = 0 ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void doServiceMatch( IOOptionBits options ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void doServiceTerminate( IOOptionBits options ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + bool passiveMatch( OSDictionary * matching, bool changesOK = false); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void startMatching( IOOptionBits options = 0 ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void doServiceMatch( IOOptionBits options ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void doServiceTerminate( IOOptionBits options ); private: - bool matchPassive(OSDictionary * table, uint32_t options); - bool matchInternal(OSDictionary * table, uint32_t options, unsigned int * did); - static bool instanceMatch(const OSObject * entry, void * context); + bool matchPassive(OSDictionary * table, uint32_t options); + bool matchInternal(OSDictionary * table, uint32_t options, unsigned int * did); + static bool instanceMatch(const OSObject * entry, void * context); - static OSObject * copyExistingServices( OSDictionary * matching, - IOOptionBits inState, IOOptionBits options = 0 ); + static OSObject * copyExistingServices( OSDictionary * matching, + IOOptionBits inState, IOOptionBits options = 0 ); - static IONotifier * setNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref, - SInt32 priority = 0 ); + static IONotifier * setNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref, + SInt32 priority = 0 ); - static IONotifier * doInstallNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref, - SInt32 priority, OSIterator ** existing ); + static IONotifier * doInstallNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ); - static bool syncNotificationHandler( void * target, void * ref, - IOService * newService, IONotifier * notifier ); + static bool syncNotificationHandler( void * target, void * ref, + IOService * newService, IONotifier * notifier ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void deliverNotification( const OSSymbol * type, - IOOptionBits orNewState, IOOptionBits andNewState ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void deliverNotification( const OSSymbol * type, + IOOptionBits orNewState, IOOptionBits andNewState ); - OSArray * copyNotifiers(const OSSymbol * type, - IOOptionBits orNewState, IOOptionBits andNewState); + OSArray * copyNotifiers(const OSSymbol * type, + IOOptionBits orNewState, IOOptionBits andNewState); - bool invokeNotifiers(OSArray ** willSend); - bool invokeNotifier( class _IOServiceNotifier * notify ); + bool invokeNotifiers(OSArray ** willSend); + bool invokeNotifier( class _IOServiceNotifier * notify ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void unregisterAllInterest( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void unregisterAllInterest( void ); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn waitForState( UInt32 mask, UInt32 value, - mach_timespec_t * timeout = 0 ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn waitForState( UInt32 mask, UInt32 value, + mach_timespec_t * timeout = 0 ); - IOReturn waitForState( UInt32 mask, UInt32 value, uint64_t timeout ); + IOReturn waitForState( UInt32 mask, UInt32 value, uint64_t timeout ); - UInt32 _adjustBusy( SInt32 delta ); + UInt32 _adjustBusy( SInt32 delta ); - bool terminatePhase1( IOOptionBits options = 0 ); - void scheduleTerminatePhase2( IOOptionBits options = 0 ); - void scheduleStop( IOService * provider ); + bool terminatePhase1( IOOptionBits options = 0 ); + void scheduleTerminatePhase2( IOOptionBits options = 0 ); + void scheduleStop( IOService * provider ); - static void waitToBecomeTerminateThread( void ); - static void __attribute__((__noreturn__)) terminateThread( void * arg, wait_result_t unused ); - static void terminateWorker( IOOptionBits options ); - static void actionWillTerminate( IOService * victim, IOOptionBits options, - OSArray * doPhase2List, void*, void * ); - static void actionDidTerminate( IOService * victim, IOOptionBits options, - void *, void *, void *); + static void waitToBecomeTerminateThread( void ); + static void __attribute__((__noreturn__)) terminateThread( void * arg, wait_result_t unused ); + static void terminateWorker( IOOptionBits options ); + static void actionWillTerminate( IOService * victim, IOOptionBits options, + OSArray * doPhase2List, void*, void * ); + static void actionDidTerminate( IOService * victim, IOOptionBits options, + void *, void *, void *); - static void actionWillStop( IOService * victim, IOOptionBits options, - void *, void *, void *); - static void actionDidStop( IOService * victim, IOOptionBits options, - void *, void *, void *); + static void actionWillStop( IOService * victim, IOOptionBits options, + void *, void *, void *); + static void actionDidStop( IOService * victim, IOOptionBits options, + void *, void *, void *); - static void actionFinalize( IOService * victim, IOOptionBits options, - void *, void *, void *); - static void actionStop( IOService * client, IOService * provider, - void *, void *, void *); + static void actionFinalize( IOService * victim, IOOptionBits options, + void *, void *, void *); + static void actionStop( IOService * client, IOService * provider, + void *, void *, void *); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn resolveInterrupt(IOService *nub, int source); - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn lookupInterrupt(int source, bool resolve, IOInterruptController **interruptController); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn resolveInterrupt(IOService *nub, int source); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn lookupInterrupt(int source, bool resolve, IOInterruptController **interruptController); #ifdef XNU_KERNEL_PRIVATE - /* end xnu internals */ +/* end xnu internals */ #endif - /* power management */ +/* power management */ public: /*! @function PMinit - @abstract Initializes power management for a driver. - @discussion PMinit allocates and initializes the power management instance variables, and it should be called before accessing those variables or calling the power management methods. This method should be called inside the driver's start routine and must be paired with a call to @link PMstop PMstop@/link. - Most calls to PMinit are followed by calls to @link joinPMtree joinPMtree@/link and @link registerPowerDriver registerPowerDriver@/link. */ + * @abstract Initializes power management for a driver. + * @discussion PMinit allocates and initializes the power management instance variables, and it should be called before accessing those variables or calling the power management methods. This method should be called inside the driver's start routine and must be paired with a call to @link PMstop PMstop@/link. + * Most calls to PMinit are followed by calls to @link joinPMtree joinPMtree@/link and @link registerPowerDriver registerPowerDriver@/link. */ - virtual void PMinit( void ); + virtual void PMinit( void ); /*! @function PMstop - @abstract Stop power managing the driver. - @discussion Removes the driver from the power plane and stop its power management. This method is synchronous against any power management method invocations (e.g. setPowerState or setAggressiveness), so when this method returns it is guaranteed those power management methods will not be entered. Driver should not call any power management methods after this call. - Calling PMstop cleans up for the three power management initialization calls: @link PMinit PMinit@/link, @link joinPMtree joinPMtree@/link, and @link registerPowerDriver registerPowerDriver@/link. */ + * @abstract Stop power managing the driver. + * @discussion Removes the driver from the power plane and stop its power management. This method is synchronous against any power management method invocations (e.g. setPowerState or setAggressiveness), so when this method returns it is guaranteed those power management methods will not be entered. Driver should not call any power management methods after this call. + * Calling PMstop cleans up for the three power management initialization calls: @link PMinit PMinit@/link, @link joinPMtree joinPMtree@/link, and @link registerPowerDriver registerPowerDriver@/link. */ - virtual void PMstop( void ); + virtual void PMstop( void ); /*! @function joinPMtree - @abstract Joins the driver into the power plane of the I/O Registry. - @discussion A driver uses this method to call its nub when initializing (usually in its start routine after calling @link PMinit PMinit@/link), to be attached into the power management hierarchy (i.e., the power plane). A driver usually calls this method on the driver for the device that provides it power (this is frequently the nub). - Before this call returns, the caller will probably be called at @link setPowerParent setPowerParent@/link and @link setAggressiveness setAggressiveness@/link and possibly at @link addPowerChild addPowerChild@/link as it is added to the hierarchy. This method may be overridden by a nub subclass. - @param driver The driver to be added to the power plane, usually this. */ + * @abstract Joins the driver into the power plane of the I/O Registry. + * @discussion A driver uses this method to call its nub when initializing (usually in its start routine after calling @link PMinit PMinit@/link), to be attached into the power management hierarchy (i.e., the power plane). A driver usually calls this method on the driver for the device that provides it power (this is frequently the nub). + * Before this call returns, the caller will probably be called at @link setPowerParent setPowerParent@/link and @link setAggressiveness setAggressiveness@/link and possibly at @link addPowerChild addPowerChild@/link as it is added to the hierarchy. This method may be overridden by a nub subclass. + * @param driver The driver to be added to the power plane, usually this. */ - virtual void joinPMtree( IOService * driver ); + virtual void joinPMtree( IOService * driver ); /*! @function registerPowerDriver - @abstract Registers a set of power states that the driver supports. - @discussion A driver defines its array of supported power states with power management in its power management initialization (its start routine). If successful, power management will call the driver to instruct it to change its power state through @link setPowerState setPowerState@/link. - Most drivers do not need to override registerPowerDriver. A nub may override registerPowerDriver if it needs to arrange its children in the power plane differently than the default placement, but this is uncommon. - @param controllingDriver A pointer to the calling driver, usually this. - @param powerStates A driver-defined array of power states that the driver and device support. Power states are defined in pwr_mgt/IOPMpowerState.h. - @param numberOfStates The number of power states in the array. - @result IOPMNoErr. All errors are logged via kprintf. */ - - virtual IOReturn registerPowerDriver( - IOService * controllingDriver, - IOPMPowerState * powerStates, - unsigned long numberOfStates ); + * @abstract Registers a set of power states that the driver supports. + * @discussion A driver defines its array of supported power states with power management in its power management initialization (its start routine). If successful, power management will call the driver to instruct it to change its power state through @link setPowerState setPowerState@/link. + * Most drivers do not need to override registerPowerDriver. A nub may override registerPowerDriver if it needs to arrange its children in the power plane differently than the default placement, but this is uncommon. + * @param controllingDriver A pointer to the calling driver, usually this. + * @param powerStates A driver-defined array of power states that the driver and device support. Power states are defined in pwr_mgt/IOPMpowerState.h. + * @param numberOfStates The number of power states in the array. + * @result IOPMNoErr. All errors are logged via kprintf. */ + + virtual IOReturn registerPowerDriver( + IOService * controllingDriver, + IOPMPowerState * powerStates, + unsigned long numberOfStates ); /*! @function registerInterestedDriver - @abstract Allows an IOService object to register interest in the changing power state of a power-managed IOService object. - @discussion Call registerInterestedDriver on the IOService object you are interested in receiving power state messages from, and pass a pointer to the interested driver (this) as an argument. - The interested driver is retained until the power interest is removed by calling deRegisterInterestedDriver. - The interested driver should override @link powerStateWillChangeTo powerStateWillChangeTo@/link and @link powerStateDidChangeTo powerStateDidChangeTo@/link to receive these power change messages. - Interested drivers must acknowledge power changes in powerStateWillChangeTo or powerStateDidChangeTo, either via return value or later calls to @link acknowledgePowerChange acknowledgePowerChange@/link. - @param theDriver The driver of interest adds this pointer to the list of interested drivers. It informs drivers on this list before and after the power change. - @result Flags describing the capability of the device in its current power state. If the current power state is not yet defined, zero is returned (this is the case when the driver is not yet in the power domain hierarchy or hasn't fully registered with power management yet). */ + * @abstract Allows an IOService object to register interest in the changing power state of a power-managed IOService object. + * @discussion Call registerInterestedDriver on the IOService object you are interested in receiving power state messages from, and pass a pointer to the interested driver (this) as an argument. + * The interested driver is retained until the power interest is removed by calling deRegisterInterestedDriver. + * The interested driver should override @link powerStateWillChangeTo powerStateWillChangeTo@/link and @link powerStateDidChangeTo powerStateDidChangeTo@/link to receive these power change messages. + * Interested drivers must acknowledge power changes in powerStateWillChangeTo or powerStateDidChangeTo, either via return value or later calls to @link acknowledgePowerChange acknowledgePowerChange@/link. + * @param theDriver The driver of interest adds this pointer to the list of interested drivers. It informs drivers on this list before and after the power change. + * @result Flags describing the capability of the device in its current power state. If the current power state is not yet defined, zero is returned (this is the case when the driver is not yet in the power domain hierarchy or hasn't fully registered with power management yet). */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOPMPowerFlags registerInterestedDriver( IOService * theDriver ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOPMPowerFlags registerInterestedDriver( IOService * theDriver ); /*! @function deRegisterInterestedDriver - @abstract De-registers power state interest from a previous call to registerInterestedDriver. - @discussion The retain from registerInterestedDriver is released. This method is synchronous against any powerStateWillChangeTo or powerStateDidChangeTo call targeting the interested driver, so when this method returns it is guaranteed those interest handlers will not be entered. - Most drivers do not need to override deRegisterInterestedDriver. - @param theDriver The interested driver previously passed into @link registerInterestedDriver registerInterestedDriver@/link. - @result A return code that can be ignored by the caller. */ + * @abstract De-registers power state interest from a previous call to registerInterestedDriver. + * @discussion The retain from registerInterestedDriver is released. This method is synchronous against any powerStateWillChangeTo or powerStateDidChangeTo call targeting the interested driver, so when this method returns it is guaranteed those interest handlers will not be entered. + * Most drivers do not need to override deRegisterInterestedDriver. + * @param theDriver The interested driver previously passed into @link registerInterestedDriver registerInterestedDriver@/link. + * @result A return code that can be ignored by the caller. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn deRegisterInterestedDriver( IOService * theDriver ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn deRegisterInterestedDriver( IOService * theDriver ); /*! @function acknowledgePowerChange - @abstract Acknowledges an in-progress power state change. - @discussion When power management informs an interested object (via @link powerStateWillChangeTo powerStateWillChangeTo@/link or @link powerStateDidChangeTo powerStateDidChangeTo@/link), the object can return an immediate acknowledgement via a return code, or it may return an indication that it will acknowledge later by calling acknowledgePowerChange. - Interested objects are those that have registered as interested drivers, as well as power plane children of the power changing driver. A driver that calls @link registerInterestedDriver registerInterestedDriver@/link must call acknowledgePowerChange, or use an immediate acknowledgement return from powerStateWillChangeTo or powerStateDidChangeTo. - @param whichDriver A pointer to the calling driver. The called object tracks all interested parties to ensure that all have acknowledged the power state change. - @result IOPMNoErr. */ + * @abstract Acknowledges an in-progress power state change. + * @discussion When power management informs an interested object (via @link powerStateWillChangeTo powerStateWillChangeTo@/link or @link powerStateDidChangeTo powerStateDidChangeTo@/link), the object can return an immediate acknowledgement via a return code, or it may return an indication that it will acknowledge later by calling acknowledgePowerChange. + * Interested objects are those that have registered as interested drivers, as well as power plane children of the power changing driver. A driver that calls @link registerInterestedDriver registerInterestedDriver@/link must call acknowledgePowerChange, or use an immediate acknowledgement return from powerStateWillChangeTo or powerStateDidChangeTo. + * @param whichDriver A pointer to the calling driver. The called object tracks all interested parties to ensure that all have acknowledged the power state change. + * @result IOPMNoErr. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn acknowledgePowerChange( IOService * whichDriver ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn acknowledgePowerChange( IOService * whichDriver ); /*! @function acknowledgeSetPowerState - @abstract Acknowledges the belated completion of a driver's setPowerState power state change. - @discussion After power management instructs a driver to change its state via @link setPowerState setPowerState@/link, that driver must acknowledge the change when its device has completed its transition. The acknowledgement may be immediate, via a return code from setPowerState, or delayed, via this call to acknowledgeSetPowerState. - Any driver that does not return kIOPMAckImplied from its setPowerState implementation must later call acknowledgeSetPowerState. - @result IOPMNoErr. */ +* @abstract Acknowledges the belated completion of a driver's setPowerState power state change. +* @discussion After power management instructs a driver to change its state via @link setPowerState setPowerState@/link, that driver must acknowledge the change when its device has completed its transition. The acknowledgement may be immediate, via a return code from setPowerState, or delayed, via this call to acknowledgeSetPowerState. +* Any driver that does not return kIOPMAckImplied from its setPowerState implementation must later call acknowledgeSetPowerState. +* @result IOPMNoErr. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn acknowledgeSetPowerState( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn acknowledgeSetPowerState( void ); /*! @function requestPowerDomainState - @abstract Tells a driver to adjust its power state. - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @abstract Tells a driver to adjust its power state. + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual IOReturn requestPowerDomainState( - IOPMPowerFlags desiredState, - IOPowerConnection * whichChild, - unsigned long specificationFlags ); + virtual IOReturn requestPowerDomainState( + IOPMPowerFlags desiredState, + IOPowerConnection * whichChild, + unsigned long specificationFlags ); /*! @function makeUsable - @abstract Requests that a device become usable. - @discussion This method is called when some client of a device (or the device's own driver) is asking for the device to become usable. Power management responds by telling the object upon which this method is called to change to its highest power state. - makeUsable is implemented using @link changePowerStateToPriv changePowerStateToPriv@/link. Subsequent requests for lower power, such as from changePowerStateToPriv, will pre-empt this request. - @result A return code that can be ignored by the caller. */ + * @abstract Requests that a device become usable. + * @discussion This method is called when some client of a device (or the device's own driver) is asking for the device to become usable. Power management responds by telling the object upon which this method is called to change to its highest power state. + * makeUsable is implemented using @link changePowerStateToPriv changePowerStateToPriv@/link. Subsequent requests for lower power, such as from changePowerStateToPriv, will pre-empt this request. + * @result A return code that can be ignored by the caller. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn makeUsable( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn makeUsable( void ); /*! @function temporaryPowerClampOn - @abstract A driver calls this method to hold itself in the highest power state until it has children. - @discussion Use temporaryPowerClampOn to hold your driver in its highest power state while waiting for child devices to attach. After children have attached, the clamp is released and the device's power state is controlled by the children's requirements. - @result A return code that can be ignored by the caller. */ + * @abstract A driver calls this method to hold itself in the highest power state until it has children. + * @discussion Use temporaryPowerClampOn to hold your driver in its highest power state while waiting for child devices to attach. After children have attached, the clamp is released and the device's power state is controlled by the children's requirements. + * @result A return code that can be ignored by the caller. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn temporaryPowerClampOn( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn temporaryPowerClampOn( void ); /*! @function changePowerStateTo - @abstract Sets a driver's power state. - @discussion This function is one of several that are used to set a driver's power state. In most circumstances, however, you should call @link changePowerStateToPriv changePowerStateToPriv@/link instead. - Calls to changePowerStateTo, changePowerStateToPriv, and a driver's power children all affect the power state of a driver. For legacy design reasons, they have overlapping functionality. Although you should call changePowerStateToPriv to change your device's power state, you might need to call changePowerStateTo in the following circumstances: -
  • If a driver will be using changePowerStateToPriv to change its power state, it should call changePowerStateTo(0) in its start routine to eliminate the influence changePowerStateTo has on power state calculations. -
  • Call changePowerStateTo in conjunction with @link setIdleTimerPeriod setIdleTimerPeriod@/link and @link activityTickle activityTickle@/link to idle a driver into a low power state. For a driver with 3 power states, for example, changePowerStateTo(1) sets a minimum level of power state 1, such that the idle timer period may not set your device's power any lower than state 1.
- @param ordinal The number of the desired power state in the power state array. - @result A return code that can be ignored by the caller. */ + * @abstract Sets a driver's power state. + * @discussion This function is one of several that are used to set a driver's power state. In most circumstances, however, you should call @link changePowerStateToPriv changePowerStateToPriv@/link instead. + * Calls to changePowerStateTo, changePowerStateToPriv, and a driver's power children all affect the power state of a driver. For legacy design reasons, they have overlapping functionality. Although you should call changePowerStateToPriv to change your device's power state, you might need to call changePowerStateTo in the following circumstances: + *
  • If a driver will be using changePowerStateToPriv to change its power state, it should call changePowerStateTo(0) in its start routine to eliminate the influence changePowerStateTo has on power state calculations. + *
  • Call changePowerStateTo in conjunction with @link setIdleTimerPeriod setIdleTimerPeriod@/link and @link activityTickle activityTickle@/link to idle a driver into a low power state. For a driver with 3 power states, for example, changePowerStateTo(1) sets a minimum level of power state 1, such that the idle timer period may not set your device's power any lower than state 1.
+ * @param ordinal The number of the desired power state in the power state array. + * @result A return code that can be ignored by the caller. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOReturn changePowerStateTo( unsigned long ordinal ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOReturn changePowerStateTo( unsigned long ordinal ); /*! @function currentCapability - @abstract Finds out the capability of a device's current power state. - @result A copy of the capabilityFlags field for the current power state in the power state array. */ + * @abstract Finds out the capability of a device's current power state. + * @result A copy of the capabilityFlags field for the current power state in the power state array. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - IOPMPowerFlags currentCapability( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + IOPMPowerFlags currentCapability( void ); /*! @function currentPowerConsumption - @abstract Finds out the current power consumption of a device. - @discussion Most Mac OS X power managed drivers do not report their power consumption via the staticPower field. Thus this call will not accurately reflect power consumption for most drivers. - @result A copy of the staticPower field for the current power state in the power state array. */ + * @abstract Finds out the current power consumption of a device. + * @discussion Most Mac OS X power managed drivers do not report their power consumption via the staticPower field. Thus this call will not accurately reflect power consumption for most drivers. + * @result A copy of the staticPower field for the current power state in the power state array. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - unsigned long currentPowerConsumption( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + unsigned long currentPowerConsumption( void ); /*! @function activityTickle - @abstract Informs power management when a power-managed device is in use, so that power management can track when it is idle and adjust its power state accordingly. - @discussion The activityTickle method is provided for objects in the system (or for the driver itself) to tell a driver that its device is being used. - The IOService superclass can manage idleness determination with a simple idle timer mechanism and this activityTickle call. To start this up, the driver calls its superclass's setIdleTimerPeriod. This starts a timer for the time interval specified in the call. When the timer expires, the superclass checks to see if there has been any activity since the last timer expiration. (It checks to see if activityTickle has been called). If there has been activity, it restarts the timer, and this process continues. When the timer expires, and there has been no device activity, the superclass lowers the device power state to the next lower state. This can continue until the device is in state zero. - After the device has been powered down by at least one power state, a subsequent call to activityTickle causes the device to be switched to a higher state required for the activity. - If the driver is managing the idleness determination totally on its own, the value of the type parameter should be kIOPMSubclassPolicy, and the driver should override the activityTickle method. The superclass IOService implementation of activityTickle does nothing with the kIOPMSubclassPolicy argument. - @param type When type is kIOPMSubclassPolicy, activityTickle is not handled in IOService and should be intercepted by the subclass. When type is kIOPMSuperclassPolicy1, an activity flag is set and the device state is checked. If the device has been powered down, it is powered up again. - @param stateNumber When type is kIOPMSuperclassPolicy1, stateNumber contains the desired power state ordinal for the activity. If the device is in a lower state, the superclass will switch it to this state. This is for devices that can handle some accesses in lower power states; the device is powered up only as far as it needs to be for the activity. - @result When type is kIOPMSuperclassPolicy1, the superclass returns true if the device is currently in the state specified by stateNumber. If the device is in a lower state and must be powered up, the superclass returns false; in this case the superclass will initiate a power change to power the device up. */ - - virtual bool activityTickle( - unsigned long type, - unsigned long stateNumber = 0 ); + * @abstract Informs power management when a power-managed device is in use, so that power management can track when it is idle and adjust its power state accordingly. + * @discussion The activityTickle method is provided for objects in the system (or for the driver itself) to tell a driver that its device is being used. + * The IOService superclass can manage idleness determination with a simple idle timer mechanism and this activityTickle call. To start this up, the driver calls its superclass's setIdleTimerPeriod. This starts a timer for the time interval specified in the call. When the timer expires, the superclass checks to see if there has been any activity since the last timer expiration. (It checks to see if activityTickle has been called). If there has been activity, it restarts the timer, and this process continues. When the timer expires, and there has been no device activity, the superclass lowers the device power state to the next lower state. This can continue until the device is in state zero. + * After the device has been powered down by at least one power state, a subsequent call to activityTickle causes the device to be switched to a higher state required for the activity. + * If the driver is managing the idleness determination totally on its own, the value of the type parameter should be kIOPMSubclassPolicy, and the driver should override the activityTickle method. The superclass IOService implementation of activityTickle does nothing with the kIOPMSubclassPolicy argument. + * @param type When type is kIOPMSubclassPolicy, activityTickle is not handled in IOService and should be intercepted by the subclass. When type is kIOPMSuperclassPolicy1, an activity flag is set and the device state is checked. If the device has been powered down, it is powered up again. + * @param stateNumber When type is kIOPMSuperclassPolicy1, stateNumber contains the desired power state ordinal for the activity. If the device is in a lower state, the superclass will switch it to this state. This is for devices that can handle some accesses in lower power states; the device is powered up only as far as it needs to be for the activity. + * @result When type is kIOPMSuperclassPolicy1, the superclass returns true if the device is currently in the state specified by stateNumber. If the device is in a lower state and must be powered up, the superclass returns false; in this case the superclass will initiate a power change to power the device up. */ + + virtual bool activityTickle( + unsigned long type, + unsigned long stateNumber = 0 ); /*! @function setAggressiveness - @abstract Broadcasts an aggressiveness factor from the parent of a driver to the driver. - @discussion Implement setAggressiveness to receive a notification when an "aggressiveness Aggressiveness factors are a loose set of power management variables that contain values for system sleep timeout, display sleep timeout, whether the system is on battery or AC, and other power management features. There are several aggressiveness factors that can be broadcast and a driver may take action on whichever factors apply to it. - A driver that has joined the power plane via @link joinPMtree joinPMtree@/link will receive setAgressiveness calls when aggressiveness factors change. - A driver may override this call if it needs to do something with the new factor (such as change its idle timeout). If overridden, the driver must call its superclass's setAgressiveness method in its own setAgressiveness implementation. - Most drivers do not need to implement setAgressiveness. - @param type The aggressiveness factor type, such as kPMMinutesToDim, kPMMinutesToSpinDown, kPMMinutesToSleep, and kPMPowerSource. (Aggressiveness factors are defined in pwr_mgt/IOPM.h.) - @param newLevel The aggressiveness factor's new value. - @result IOPMNoErr. */ - - virtual IOReturn setAggressiveness( - unsigned long type, - unsigned long newLevel ); + * @abstract Broadcasts an aggressiveness factor from the parent of a driver to the driver. + * @discussion Implement setAggressiveness to receive a notification when an "aggressiveness Aggressiveness factors are a loose set of power management variables that contain values for system sleep timeout, display sleep timeout, whether the system is on battery or AC, and other power management features. There are several aggressiveness factors that can be broadcast and a driver may take action on whichever factors apply to it. + * A driver that has joined the power plane via @link joinPMtree joinPMtree@/link will receive setAgressiveness calls when aggressiveness factors change. + * A driver may override this call if it needs to do something with the new factor (such as change its idle timeout). If overridden, the driver must call its superclass's setAgressiveness method in its own setAgressiveness implementation. + * Most drivers do not need to implement setAgressiveness. + * @param type The aggressiveness factor type, such as kPMMinutesToDim, kPMMinutesToSpinDown, kPMMinutesToSleep, and kPMPowerSource. (Aggressiveness factors are defined in pwr_mgt/IOPM.h.) + * @param newLevel The aggressiveness factor's new value. + * @result IOPMNoErr. */ + + virtual IOReturn setAggressiveness( + unsigned long type, + unsigned long newLevel ); /*! @function getAggressiveness - @abstract Returns the current aggressiveness value for the given type. - @param type The aggressiveness factor to query. - @param currentLevel Upon successful return, contains the value of aggressiveness factor type. - @result kIOReturnSuccess upon success; an I/O Kit error code otherwise. */ + * @abstract Returns the current aggressiveness value for the given type. + * @param type The aggressiveness factor to query. + * @param currentLevel Upon successful return, contains the value of aggressiveness factor type. + * @result kIOReturnSuccess upon success; an I/O Kit error code otherwise. */ - virtual IOReturn getAggressiveness( - unsigned long type, - unsigned long * currentLevel ); + virtual IOReturn getAggressiveness( + unsigned long type, + unsigned long * currentLevel ); #ifndef __LP64__ /*! @function systemWake - @abstract Tells every driver in the power plane that the system is waking up. - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @abstract Tells every driver in the power plane that the system is waking up. + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual IOReturn systemWake( void ) - APPLE_KEXT_DEPRECATED; + virtual IOReturn systemWake( void ) + APPLE_KEXT_DEPRECATED; /*! @function temperatureCriticalForZone - @abstract Alerts a driver to a critical temperature in some thermal zone. - @discussion This call is unused by power management. It is not intended to be called or overridden. */ + * @abstract Alerts a driver to a critical temperature in some thermal zone. + * @discussion This call is unused by power management. It is not intended to be called or overridden. */ - virtual IOReturn temperatureCriticalForZone( IOService * whichZone ) - APPLE_KEXT_DEPRECATED; + virtual IOReturn temperatureCriticalForZone( IOService * whichZone ) + APPLE_KEXT_DEPRECATED; /*! @function youAreRoot - @abstract Informs power management which IOService object is the power plane root. - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @abstract Informs power management which IOService object is the power plane root. + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual IOReturn youAreRoot( void ) - APPLE_KEXT_DEPRECATED; + virtual IOReturn youAreRoot( void ) + APPLE_KEXT_DEPRECATED; /*! @function setPowerParent - @abstract This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @abstract This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual IOReturn setPowerParent( - IOPowerConnection * parent, - bool stateKnown, - IOPMPowerFlags currentState ) - APPLE_KEXT_DEPRECATED; + virtual IOReturn setPowerParent( + IOPowerConnection * parent, + bool stateKnown, + IOPMPowerFlags currentState ) + APPLE_KEXT_DEPRECATED; #endif /* !__LP64__ */ /*! @function addPowerChild - @abstract Informs a driver that it has a new child. - @discussion The Platform Expert uses this method to call a driver and introduce it to a new child. This call is handled internally by power management. It is not intended to be overridden or called by drivers. - @param theChild A pointer to the child IOService object. */ + * @abstract Informs a driver that it has a new child. + * @discussion The Platform Expert uses this method to call a driver and introduce it to a new child. This call is handled internally by power management. It is not intended to be overridden or called by drivers. + * @param theChild A pointer to the child IOService object. */ - virtual IOReturn addPowerChild( IOService * theChild ); + virtual IOReturn addPowerChild( IOService * theChild ); /*! @function removePowerChild - @abstract Informs a power managed driver that one of its power plane childen is disappearing. - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @abstract Informs a power managed driver that one of its power plane childen is disappearing. + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual IOReturn removePowerChild( IOPowerConnection * theChild ); + virtual IOReturn removePowerChild( IOPowerConnection * theChild ); #ifndef __LP64__ /*! @function command_received - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual void command_received( void *, void * , void * , void * ); + virtual void command_received( void *, void *, void *, void * ); #endif /*! @function start_PM_idle_timer - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - APPLE_KEXT_COMPATIBILITY_VIRTUAL - void start_PM_idle_timer( void ); + APPLE_KEXT_COMPATIBILITY_VIRTUAL + void start_PM_idle_timer( void ); #ifndef __LP64__ /*! @function PM_idle_timer_expiration - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual void PM_idle_timer_expiration( void ) - APPLE_KEXT_DEPRECATED; + virtual void PM_idle_timer_expiration( void ) + APPLE_KEXT_DEPRECATED; /*! @function PM_Clamp_Timer_Expired - @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ + * @discussion This call is handled internally by power management. It is not intended to be overridden or called by drivers. */ - virtual void PM_Clamp_Timer_Expired( void ) - APPLE_KEXT_DEPRECATED; + virtual void PM_Clamp_Timer_Expired( void ) + APPLE_KEXT_DEPRECATED; #endif /*! @function setIdleTimerPeriod - @abstract Sets or changes the idle timer period. - @discussion A driver using the idleness determination provided by IOService calls its superclass with this method to set or change the idle timer period. See @link activityTickle activityTickle@/link for a description of this type of idleness determination. - @param period The desired idle timer period in seconds. - @result kIOReturnSuccess upon success; an I/O Kit error code otherwise. */ + * @abstract Sets or changes the idle timer period. + * @discussion A driver using the idleness determination provided by IOService calls its superclass with this method to set or change the idle timer period. See @link activityTickle activityTickle@/link for a description of this type of idleness determination. + * @param period The desired idle timer period in seconds. + * @result kIOReturnSuccess upon success; an I/O Kit error code otherwise. */ - virtual IOReturn setIdleTimerPeriod( unsigned long period ); + virtual IOReturn setIdleTimerPeriod( unsigned long period ); #ifndef __LP64__ /*! @function getPMworkloop - @abstract Returns a pointer to the system-wide power management work loop. - @availability Deprecated in Mac OS X version 10.6. - @discussion Most drivers should create their own work loops to synchronize their code; drivers should not run arbitrary code on the power management work loop. */ + * @abstract Returns a pointer to the system-wide power management work loop. + * @availability Deprecated in Mac OS X version 10.6. + * @discussion Most drivers should create their own work loops to synchronize their code; drivers should not run arbitrary code on the power management work loop. */ - virtual IOWorkLoop * getPMworkloop( void ) - APPLE_KEXT_DEPRECATED; + virtual IOWorkLoop * getPMworkloop( void ) + APPLE_KEXT_DEPRECATED; #endif /*! @function getPowerState - @abstract Determines a device's power state. - @discussion A device's "current power state" is updated at the end of each power state transition (e.g. transition from state 1 to state 0, or state 0 to state 2). This transition includes the time spent powering on or off any power plane children. Thus, if a child calls getPowerState on its power parent during system wake from sleep, the call will return the index to the device's off state rather than its on state. - @result The current power state's index into the device's power state array. */ + * @abstract Determines a device's power state. + * @discussion A device's "current power state" is updated at the end of each power state transition (e.g. transition from state 1 to state 0, or state 0 to state 2). This transition includes the time spent powering on or off any power plane children. Thus, if a child calls getPowerState on its power parent during system wake from sleep, the call will return the index to the device's off state rather than its on state. + * @result The current power state's index into the device's power state array. */ - UInt32 getPowerState( void ); + UInt32 getPowerState( void ); /*! @function setPowerState - @abstract Requests a power managed driver to change the power state of its device. - @discussion A power managed driver must override setPowerState to take part in system power management. After a driver is registered with power management, the system uses setPowerState to power the device off and on for system sleep and wake. - Calls to @link PMinit PMinit@/link and @link registerPowerDriver registerPowerDriver@/link enable power management to change a device's power state using setPowerState. setPowerState is called in a clean and separate thread context. - @param powerStateOrdinal The number in the power state array of the state the driver is being instructed to switch to. - @param whatDevice A pointer to the power management object which registered to manage power for this device. In most cases, whatDevice will be equal to your driver's own this pointer. - @result The driver must return IOPMAckImplied if it has complied with the request when it returns. Otherwise if it has started the process of changing power state but not finished it, the driver should return a number of microseconds which is an upper limit of the time it will need to finish. Then, when it has completed the power switch, it should call @link acknowledgeSetPowerState acknowledgeSetPowerState@/link. */ + * @abstract Requests a power managed driver to change the power state of its device. + * @discussion A power managed driver must override setPowerState to take part in system power management. After a driver is registered with power management, the system uses setPowerState to power the device off and on for system sleep and wake. + * Calls to @link PMinit PMinit@/link and @link registerPowerDriver registerPowerDriver@/link enable power management to change a device's power state using setPowerState. setPowerState is called in a clean and separate thread context. + * @param powerStateOrdinal The number in the power state array of the state the driver is being instructed to switch to. + * @param whatDevice A pointer to the power management object which registered to manage power for this device. In most cases, whatDevice will be equal to your driver's own this pointer. + * @result The driver must return IOPMAckImplied if it has complied with the request when it returns. Otherwise if it has started the process of changing power state but not finished it, the driver should return a number of microseconds which is an upper limit of the time it will need to finish. Then, when it has completed the power switch, it should call @link acknowledgeSetPowerState acknowledgeSetPowerState@/link. */ - virtual IOReturn setPowerState( - unsigned long powerStateOrdinal, - IOService * whatDevice ); + virtual IOReturn setPowerState( + unsigned long powerStateOrdinal, + IOService * whatDevice ); #ifndef __LP64__ /*! @function clampPowerOn - @abstract Deprecated. Do not use. */ + * @abstract Deprecated. Do not use. */ - virtual void clampPowerOn( unsigned long duration ); + virtual void clampPowerOn( unsigned long duration ); #endif /*! @function maxCapabilityForDomainState - @abstract Determines a driver's highest power state possible for a given power domain state. - @discussion This happens when the power domain is changing state and power management needs to determine which state the device is capable of in the new domain state. - Most drivers do not need to implement this method, and can rely upon the default IOService implementation. The IOService implementation scans the power state array looking for the highest state whose inputPowerRequirement field exactly matches the value of the domainState parameter. If more intelligent determination is required, the driver itself should implement the method and override the superclass's implementation. - @param domainState Flags that describe the character of "domain power"; they represent the outputPowerCharacter field of a state in the power domain's power state array. - @result A state number. */ + * @abstract Determines a driver's highest power state possible for a given power domain state. + * @discussion This happens when the power domain is changing state and power management needs to determine which state the device is capable of in the new domain state. + * Most drivers do not need to implement this method, and can rely upon the default IOService implementation. The IOService implementation scans the power state array looking for the highest state whose inputPowerRequirement field exactly matches the value of the domainState parameter. If more intelligent determination is required, the driver itself should implement the method and override the superclass's implementation. + * @param domainState Flags that describe the character of "domain power"; they represent the outputPowerCharacter field of a state in the power domain's power state array. + * @result A state number. */ - virtual unsigned long maxCapabilityForDomainState( IOPMPowerFlags domainState ); + virtual unsigned long maxCapabilityForDomainState( IOPMPowerFlags domainState ); /*! @function initialPowerStateForDomainState - @abstract Determines which power state a device is in, given the current power domain state. - @discussion Power management calls this method once, when the driver is initializing power management. - Most drivers do not need to implement this method, and can rely upon the default IOService implementation. The IOService implementation scans the power state array looking for the highest state whose inputPowerRequirement field exactly matches the value of the domainState parameter. If more intelligent determination is required, the power managed driver should implement the method and override the superclass's implementation. - @param domainState Flags that describe the character of "domain power"; they represent the outputPowerCharacter field of a state in the power domain's power state array. - @result A state number. */ + * @abstract Determines which power state a device is in, given the current power domain state. + * @discussion Power management calls this method once, when the driver is initializing power management. + * Most drivers do not need to implement this method, and can rely upon the default IOService implementation. The IOService implementation scans the power state array looking for the highest state whose inputPowerRequirement field exactly matches the value of the domainState parameter. If more intelligent determination is required, the power managed driver should implement the method and override the superclass's implementation. + * @param domainState Flags that describe the character of "domain power"; they represent the outputPowerCharacter field of a state in the power domain's power state array. + * @result A state number. */ - virtual unsigned long initialPowerStateForDomainState( IOPMPowerFlags domainState ); + virtual unsigned long initialPowerStateForDomainState( IOPMPowerFlags domainState ); /*! @function powerStateForDomainState - @abstract Determines what power state the device would be in for a given power domain state. - @discussion This call is unused by power management. Drivers should override initialPowerStateForDomainState and/or maxCapabilityForDomainState instead to change the default mapping of domain state to driver power state. - @param domainState Flags that describe the character of "domain power"; they represent the outputPowerCharacter field of a state in the power domain's power state array. - @result A state number. */ + * @abstract Determines what power state the device would be in for a given power domain state. + * @discussion This call is unused by power management. Drivers should override initialPowerStateForDomainState and/or maxCapabilityForDomainState instead to change the default mapping of domain state to driver power state. + * @param domainState Flags that describe the character of "domain power"; they represent the outputPowerCharacter field of a state in the power domain's power state array. + * @result A state number. */ - virtual unsigned long powerStateForDomainState( IOPMPowerFlags domainState ); + virtual unsigned long powerStateForDomainState( IOPMPowerFlags domainState ); /*! @function powerStateWillChangeTo - @abstract Informs interested parties that a device is about to change its power state. - @discussion Power management informs interested parties that a device is about to change to a different power state. Interested parties are those that have registered for this notification via @link registerInterestedDriver registerInterestedDriver@/link. If you have called registerInterestedDriver on a power managed driver, you must implement powerStateWillChangeTo and @link powerStateDidChangeTo powerStateDidChangeTo@/link to receive the notifications. - powerStateWillChangeTo is called in a clean and separate thread context. powerStateWillChangeTo is called before a power state transition takes place; powerStateDidChangeTo is called after the transition has completed. - @param capabilities Flags that describe the capability of the device in the new power state (they come from the capabilityFlags field of the new state in the power state array). - @param stateNumber The number of the state in the state array that the device is switching to. - @param whatDevice A pointer to the driver that is changing. It can be used by a driver that is receiving power state change notifications for multiple devices to distinguish between them. - @result The driver returns IOPMAckImplied if it has prepared for the power change when it returns. If it has started preparing but not finished, it should return a number of microseconds which is an upper limit of the time it will need to finish preparing. Then, when it has completed its preparations, it should call @link acknowledgePowerChange acknowledgePowerChange@/link. */ - - virtual IOReturn powerStateWillChangeTo( - IOPMPowerFlags capabilities, - unsigned long stateNumber, - IOService * whatDevice ); + * @abstract Informs interested parties that a device is about to change its power state. + * @discussion Power management informs interested parties that a device is about to change to a different power state. Interested parties are those that have registered for this notification via @link registerInterestedDriver registerInterestedDriver@/link. If you have called registerInterestedDriver on a power managed driver, you must implement powerStateWillChangeTo and @link powerStateDidChangeTo powerStateDidChangeTo@/link to receive the notifications. + * powerStateWillChangeTo is called in a clean and separate thread context. powerStateWillChangeTo is called before a power state transition takes place; powerStateDidChangeTo is called after the transition has completed. + * @param capabilities Flags that describe the capability of the device in the new power state (they come from the capabilityFlags field of the new state in the power state array). + * @param stateNumber The number of the state in the state array that the device is switching to. + * @param whatDevice A pointer to the driver that is changing. It can be used by a driver that is receiving power state change notifications for multiple devices to distinguish between them. + * @result The driver returns IOPMAckImplied if it has prepared for the power change when it returns. If it has started preparing but not finished, it should return a number of microseconds which is an upper limit of the time it will need to finish preparing. Then, when it has completed its preparations, it should call @link acknowledgePowerChange acknowledgePowerChange@/link. */ + + virtual IOReturn powerStateWillChangeTo( + IOPMPowerFlags capabilities, + unsigned long stateNumber, + IOService * whatDevice ); /*! @function powerStateDidChangeTo - @abstract Informs interested parties that a device has changed to a different power state. - @discussion Power management informs interested parties that a device has changed to a different power state. Interested parties are those that have registered for this notification via @link registerInterestedDriver registerInterestedDriver@/link. If you have called registerInterestedDriver on a power managed driver, you must implemnt @link powerStateWillChangeTo powerStateWillChangeTo@/link and powerStateDidChangeTo to receive the notifications. - powerStateDidChangeTo is called in a clean and separate thread context. powerStateWillChangeTo is called before a power state transition takes place; powerStateDidChangeTo is called after the transition has completed. - @param capabilities Flags that describe the capability of the device in the new power state (they come from the capabilityFlags field of the new state in the power state array). - @param stateNumber The number of the state in the state array that the device is switching to. - @param whatDevice A pointer to the driver that is changing. It can be used by a driver that is receiving power state change notifications for multiple devices to distinguish between them. - @result The driver returns IOPMAckImplied if it has prepared for the power change when it returns. If it has started preparing but not finished, it should return a number of microseconds which is an upper limit of the time it will need to finish preparing. Then, when it has completed its preparations, it should call @link acknowledgePowerChange acknowledgePowerChange@/link. */ - - virtual IOReturn powerStateDidChangeTo( - IOPMPowerFlags capabilities, - unsigned long stateNumber, - IOService * whatDevice ); + * @abstract Informs interested parties that a device has changed to a different power state. + * @discussion Power management informs interested parties that a device has changed to a different power state. Interested parties are those that have registered for this notification via @link registerInterestedDriver registerInterestedDriver@/link. If you have called registerInterestedDriver on a power managed driver, you must implemnt @link powerStateWillChangeTo powerStateWillChangeTo@/link and powerStateDidChangeTo to receive the notifications. + * powerStateDidChangeTo is called in a clean and separate thread context. powerStateWillChangeTo is called before a power state transition takes place; powerStateDidChangeTo is called after the transition has completed. + * @param capabilities Flags that describe the capability of the device in the new power state (they come from the capabilityFlags field of the new state in the power state array). + * @param stateNumber The number of the state in the state array that the device is switching to. + * @param whatDevice A pointer to the driver that is changing. It can be used by a driver that is receiving power state change notifications for multiple devices to distinguish between them. + * @result The driver returns IOPMAckImplied if it has prepared for the power change when it returns. If it has started preparing but not finished, it should return a number of microseconds which is an upper limit of the time it will need to finish preparing. Then, when it has completed its preparations, it should call @link acknowledgePowerChange acknowledgePowerChange@/link. */ + + virtual IOReturn powerStateDidChangeTo( + IOPMPowerFlags capabilities, + unsigned long stateNumber, + IOService * whatDevice ); #ifndef __LP64__ /*! @function didYouWakeSystem - @abstract Asks a driver if its device is the one that just woke the system from sleep. - @availability Deprecated in Mac OS X version 10.6. - @discussion Power management calls a power managed driver with this method to ask if its device is the one that just woke the system from sleep. If a device is capable of waking the system from sleep, its driver should implement didYouWakeSystem and return true if its device was responsible for waking the system. - @result true if the driver's device woke the system and false otherwise. */ + * @abstract Asks a driver if its device is the one that just woke the system from sleep. + * @availability Deprecated in Mac OS X version 10.6. + * @discussion Power management calls a power managed driver with this method to ask if its device is the one that just woke the system from sleep. If a device is capable of waking the system from sleep, its driver should implement didYouWakeSystem and return true if its device was responsible for waking the system. + * @result true if the driver's device woke the system and false otherwise. */ - virtual bool didYouWakeSystem( void ) - APPLE_KEXT_DEPRECATED; + virtual bool didYouWakeSystem( void ) + APPLE_KEXT_DEPRECATED; /*! @function newTemperature - @abstract Tells a power managed driver that the temperature in the thermal zone has changed. - @discussion This call is unused by power management. It is not intended to be called or overridden. */ + * @abstract Tells a power managed driver that the temperature in the thermal zone has changed. + * @discussion This call is unused by power management. It is not intended to be called or overridden. */ - virtual IOReturn newTemperature( long currentTemp, IOService * whichZone ) - APPLE_KEXT_DEPRECATED; + virtual IOReturn newTemperature( long currentTemp, IOService * whichZone ) + APPLE_KEXT_DEPRECATED; #endif - virtual bool askChangeDown( unsigned long ); - virtual bool tellChangeDown( unsigned long ); - virtual void tellNoChangeDown ( unsigned long ); - virtual void tellChangeUp( unsigned long ); - virtual IOReturn allowPowerChange( unsigned long refcon ); - virtual IOReturn cancelPowerChange( unsigned long refcon ); + virtual bool askChangeDown( unsigned long ); + virtual bool tellChangeDown( unsigned long ); + virtual void tellNoChangeDown( unsigned long ); + virtual void tellChangeUp( unsigned long ); + virtual IOReturn allowPowerChange( unsigned long refcon ); + virtual IOReturn cancelPowerChange( unsigned long refcon ); protected: -/*! @function changePowerStateToPriv - @abstract Tells a driver's superclass to change the power state of its device. - @discussion A driver uses this method to tell its superclass to change the power state of the device. This is the recommended way to change the power state of a device. - Three things affect driver power state: @link changePowerStateTo changePowerStateTo@/link, changePowerStateToPriv, and the desires of the driver's power plane children. Power management puts the device into the maximum state governed by those three entities. - Drivers may eliminate the influence of the changePowerStateTo method on power state one of two ways. See @link powerOverrideOnPriv powerOverrideOnPriv@/link to ignore the method's influence, or call changePowerStateTo(0) in the driver's start routine to remove the changePowerStateTo method's power request. - @param ordinal The number of the desired power state in the power state array. - @result A return code that can be ignored by the caller. */ +/*! @function changePowerStateToPriv + * @abstract Tells a driver's superclass to change the power state of its device. + * @discussion A driver uses this method to tell its superclass to change the power state of the device. This is the recommended way to change the power state of a device. + * Three things affect driver power state: @link changePowerStateTo changePowerStateTo@/link, changePowerStateToPriv, and the desires of the driver's power plane children. Power management puts the device into the maximum state governed by those three entities. + * Drivers may eliminate the influence of the changePowerStateTo method on power state one of two ways. See @link powerOverrideOnPriv powerOverrideOnPriv@/link to ignore the method's influence, or call changePowerStateTo(0) in the driver's start routine to remove the changePowerStateTo method's power request. + * @param ordinal The number of the desired power state in the power state array. + * @result A return code that can be ignored by the caller. */ - IOReturn changePowerStateToPriv( unsigned long ordinal ); + IOReturn changePowerStateToPriv( unsigned long ordinal ); /*! @function powerOverrideOnPriv - @abstract Allows a driver to ignore its children's power management requests and only use changePowerStateToPriv to define its own power state. - @discussion Power management normally keeps a device at the highest state required by its requests via @link changePowerStateTo changePowerStateTo@/link, @link changePowerStateToPriv changePowerStateToPriv@/link, and its children. However, a driver may ensure a lower power state than otherwise required by itself and its children using powerOverrideOnPriv. When the override is on, power management keeps the device's power state in the state specified by changePowerStateToPriv. Turning on the override will initiate a power change if the driver's changePowerStateToPriv desired power state is different from the maximum of the changePowerStateTo desired power state and the children's desires. - @result A return code that can be ignored by the caller. */ + * @abstract Allows a driver to ignore its children's power management requests and only use changePowerStateToPriv to define its own power state. + * @discussion Power management normally keeps a device at the highest state required by its requests via @link changePowerStateTo changePowerStateTo@/link, @link changePowerStateToPriv changePowerStateToPriv@/link, and its children. However, a driver may ensure a lower power state than otherwise required by itself and its children using powerOverrideOnPriv. When the override is on, power management keeps the device's power state in the state specified by changePowerStateToPriv. Turning on the override will initiate a power change if the driver's changePowerStateToPriv desired power state is different from the maximum of the changePowerStateTo desired power state and the children's desires. + * @result A return code that can be ignored by the caller. */ - IOReturn powerOverrideOnPriv( void ); + IOReturn powerOverrideOnPriv( void ); /*! @function powerOverrideOffPriv - @abstract Allows a driver to disable a power override. - @discussion When a driver has enabled an override via @link powerOverrideOnPriv powerOverrideOnPriv@/link, it can disable it again by calling this method in its superclass. Disabling the override reverts to the default algorithm for determining a device's power state. The superclass will now keep the device at the highest state required by changePowerStateTo, changePowerStateToPriv, and its children. Turning off the override will initiate a power change if the driver's desired power state is different from the maximum of the power managed driver's desire and the children's desires. - @result A return code that can be ignored by the caller. */ + * @abstract Allows a driver to disable a power override. + * @discussion When a driver has enabled an override via @link powerOverrideOnPriv powerOverrideOnPriv@/link, it can disable it again by calling this method in its superclass. Disabling the override reverts to the default algorithm for determining a device's power state. The superclass will now keep the device at the highest state required by changePowerStateTo, changePowerStateToPriv, and its children. Turning off the override will initiate a power change if the driver's desired power state is different from the maximum of the power managed driver's desire and the children's desires. + * @result A return code that can be ignored by the caller. */ - IOReturn powerOverrideOffPriv( void ); + IOReturn powerOverrideOffPriv( void ); /*! @function powerChangeDone - @abstract Tells a driver when a power state change is complete. - @discussion Power management uses this method to inform a driver when a power change is completely done, when all interested parties have acknowledged the @link powerStateDidChangeTo powerStateDidChangeTo@/link call. The default implementation of this method is null; the method is meant to be overridden by subclassed power managed drivers. A driver should use this method to find out if a power change it initiated is complete. - @param stateNumber The number of the state in the state array that the device has switched from. */ + * @abstract Tells a driver when a power state change is complete. + * @discussion Power management uses this method to inform a driver when a power change is completely done, when all interested parties have acknowledged the @link powerStateDidChangeTo powerStateDidChangeTo@/link call. The default implementation of this method is null; the method is meant to be overridden by subclassed power managed drivers. A driver should use this method to find out if a power change it initiated is complete. + * @param stateNumber The number of the state in the state array that the device has switched from. */ - virtual void powerChangeDone( unsigned long stateNumber ); + virtual void powerChangeDone( unsigned long stateNumber ); #ifdef XNU_KERNEL_PRIVATE - /* Power management internals */ +/* Power management internals */ public: - void idleTimerExpired( void ); - void settleTimerExpired( void ); - IOReturn synchronizePowerTree( IOOptionBits options = 0, IOService * notifyRoot = 0 ); - bool assertPMDriverCall( IOPMDriverCallEntry * callEntry, IOOptionBits options = 0, IOPMinformee * inform = 0 ); - void deassertPMDriverCall( IOPMDriverCallEntry * callEntry ); - IOReturn changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); - IOReturn changePowerStateForRootDomain( IOPMPowerStateIndex ordinal ); - IOReturn setIgnoreIdleTimer( bool ignore ); - IOReturn quiescePowerTree( void * target, IOPMCompletionAction action, void * param ); - uint32_t getPowerStateForClient( const OSSymbol * client ); - static const char * getIOMessageString( uint32_t msg ); - static void setAdvisoryTickleEnable( bool enable ); - void reset_watchdog_timer(IOService *obj, int timeout); - void start_watchdog_timer ( void ); - void stop_watchdog_timer ( void ); - void start_watchdog_timer(uint64_t deadline); - IOReturn registerInterestForNotifier( IONotifier *notify, const OSSymbol * typeOfInterest, - IOServiceInterestHandler handler, void * target, void * ref ); - - static IOWorkLoop * getIOPMWorkloop( void ); - bool getBlockingDriverCall(thread_t *thread, const void **callMethod); + void idleTimerExpired( void ); + void settleTimerExpired( void ); + IOReturn synchronizePowerTree( IOOptionBits options = 0, IOService * notifyRoot = 0 ); + bool assertPMDriverCall( IOPMDriverCallEntry * callEntry, IOOptionBits options = 0, IOPMinformee * inform = 0 ); + void deassertPMDriverCall( IOPMDriverCallEntry * callEntry ); + IOReturn changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, IOPMRequestTag tag ); + IOReturn changePowerStateForRootDomain( IOPMPowerStateIndex ordinal ); + IOReturn setIgnoreIdleTimer( bool ignore ); + IOReturn quiescePowerTree( void * target, IOPMCompletionAction action, void * param ); + uint32_t getPowerStateForClient( const OSSymbol * client ); + static const char * getIOMessageString( uint32_t msg ); + static void setAdvisoryTickleEnable( bool enable ); + void reset_watchdog_timer(IOService *obj, int timeout); + void start_watchdog_timer( void ); + void stop_watchdog_timer( void ); + void start_watchdog_timer(uint64_t deadline); + IOReturn registerInterestForNotifier( IONotifier *notify, const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, void * target, void * ref ); + + static IOWorkLoop * getIOPMWorkloop( void ); + bool getBlockingDriverCall(thread_t *thread, const void **callMethod); protected: - bool tellClientsWithResponse( int messageType ); - void tellClients( int messageType ); - void PMDebug( uint32_t event, uintptr_t param1, uintptr_t param2 ); + bool tellClientsWithResponse( int messageType ); + void tellClients( int messageType ); + void PMDebug( uint32_t event, uintptr_t param1, uintptr_t param2 ); private: #ifndef __LP64__ - void ack_timer_ticked ( void ); - IOReturn serializedAllowPowerChange2 ( unsigned long ); - IOReturn serializedCancelPowerChange2 ( unsigned long ); - IOReturn powerDomainWillChangeTo( IOPMPowerFlags, IOPowerConnection * ); - IOReturn powerDomainDidChangeTo( IOPMPowerFlags, IOPowerConnection * ); + void ack_timer_ticked( void ); + IOReturn serializedAllowPowerChange2( unsigned long ); + IOReturn serializedCancelPowerChange2( unsigned long ); + IOReturn powerDomainWillChangeTo( IOPMPowerFlags, IOPowerConnection * ); + IOReturn powerDomainDidChangeTo( IOPMPowerFlags, IOPowerConnection * ); #endif - void PMfree( void ); - bool tellChangeDown1 ( unsigned long ); - bool tellChangeDown2 ( unsigned long ); - IOReturn startPowerChange( IOPMPowerChangeFlags, IOPMPowerStateIndex, IOPMPowerFlags, IOPowerConnection *, IOPMPowerFlags ); - void setParentInfo ( IOPMPowerFlags, IOPowerConnection *, bool ); - IOReturn notifyAll ( uint32_t nextMS ); - bool notifyChild ( IOPowerConnection * child ); - IOPMPowerStateIndex getPowerStateForDomainFlags( IOPMPowerFlags flags ); - - // power change initiated by driver - void OurChangeStart( void ); - void OurSyncStart ( void ); - void OurChangeTellClientsPowerDown ( void ); - void OurChangeTellUserPMPolicyPowerDown ( void ); - void OurChangeTellPriorityClientsPowerDown ( void ); - void OurChangeTellCapabilityWillChange ( void ); - void OurChangeNotifyInterestedDriversWillChange ( void ); - void OurChangeSetPowerState ( void ); - void OurChangeWaitForPowerSettle ( void ); - void OurChangeNotifyInterestedDriversDidChange ( void ); - void OurChangeTellCapabilityDidChange ( void ); - void OurChangeFinish ( void ); - - // downward power change initiated by a power parent - IOReturn ParentChangeStart( void ); - void ParentChangeTellPriorityClientsPowerDown ( void ); - void ParentChangeTellCapabilityWillChange ( void ); - void ParentChangeNotifyInterestedDriversWillChange ( void ); - void ParentChangeSetPowerState ( void ); - void ParentChangeWaitForPowerSettle ( void ); - void ParentChangeNotifyInterestedDriversDidChange ( void ); - void ParentChangeTellCapabilityDidChange ( void ); - void ParentChangeAcknowledgePowerChange ( void ); - void ParentChangeRootChangeDown( void ); - - void all_done ( void ); - void start_ack_timer ( void ); - void stop_ack_timer ( void ); - void start_ack_timer( UInt32 value, UInt32 scale ); - void startSettleTimer( void ); - void start_spindump_timer( const char * delay_type ); - void stop_spindump_timer( void ); - bool checkForDone ( void ); - bool responseValid ( uint32_t x, int pid ); - void computeDesiredState( unsigned long tempDesire, bool computeOnly ); - void trackSystemSleepPreventers( IOPMPowerStateIndex, IOPMPowerStateIndex, IOPMPowerChangeFlags ); - void tellSystemCapabilityChange( uint32_t nextMS ); - void restartIdleTimer( void ); - - static void ack_timer_expired( thread_call_param_t, thread_call_param_t ); - static void watchdog_timer_expired ( thread_call_param_t arg0, thread_call_param_t arg1 ); - static void spindump_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ); - static IOReturn actionAckTimerExpired(OSObject *, void *, void *, void *, void * ); - static IOReturn actionSpinDumpTimerExpired(OSObject *, void *, void *, void *, void * ); - - static IOReturn actionDriverCalloutDone(OSObject *, void *, void *, void *, void * ); - static IOPMRequest * acquirePMRequest( IOService * target, IOOptionBits type, IOPMRequest * active = 0 ); - static void releasePMRequest( IOPMRequest * request ); - static void pmDriverCallout( IOService * from ); - static void pmTellAppWithResponse( OSObject * object, void * context ); - static void pmTellClientWithResponse( OSObject * object, void * context ); - static void pmTellCapabilityAppWithResponse ( OSObject * object, void * arg ); - static void pmTellCapabilityClientWithResponse( OSObject * object, void * arg ); - static void submitPMRequest( IOPMRequest * request ); - static void submitPMRequests( IOPMRequest ** request, IOItemCount count ); - bool ackTimerTick( void ); - void addPowerChild1( IOPMRequest * request ); - void addPowerChild2( IOPMRequest * request ); - void addPowerChild3( IOPMRequest * request ); - void adjustPowerState( uint32_t clamp = 0 ); - void handlePMstop( IOPMRequest * request ); - void handleRegisterPowerDriver( IOPMRequest * request ); - bool handleAcknowledgePowerChange( IOPMRequest * request ); - void handlePowerDomainWillChangeTo( IOPMRequest * request ); - void handlePowerDomainDidChangeTo( IOPMRequest * request ); - void handleRequestPowerState( IOPMRequest * request ); - void handlePowerOverrideChanged( IOPMRequest * request ); - void handleActivityTickle( IOPMRequest * request ); - void handleInterestChanged( IOPMRequest * request ); - void handleSynchronizePowerTree( IOPMRequest * request ); - void executePMRequest( IOPMRequest * request ); - bool actionPMWorkQueueInvoke( IOPMRequest * request, IOPMWorkQueue * queue ); - bool actionPMWorkQueueRetire( IOPMRequest * request, IOPMWorkQueue * queue ); - bool actionPMRequestQueue( IOPMRequest * request, IOPMRequestQueue * queue ); - bool actionPMReplyQueue( IOPMRequest * request, IOPMRequestQueue * queue ); - bool actionPMCompletionQueue( IOPMRequest * request, IOPMCompletionQueue * queue ); - bool notifyInterestedDrivers( void ); - void notifyInterestedDriversDone( void ); - bool notifyControllingDriver( void ); - void notifyControllingDriverDone( void ); - void driverSetPowerState( void ); - void driverInformPowerChange( void ); - bool isPMBlocked( IOPMRequest * request, int count ); - void notifyChildren( void ); - void notifyChildrenOrdered( void ); - void notifyChildrenDelayed( void ); - void notifyRootDomain( void ); - void notifyRootDomainDone( void ); - void cleanClientResponses ( bool logErrors ); - void updatePowerClient( const OSSymbol * client, uint32_t powerState ); - void removePowerClient( const OSSymbol * client ); - IOReturn requestPowerState( const OSSymbol * client, uint32_t state ); - IOReturn requestDomainPower( IOPMPowerStateIndex ourPowerState, IOOptionBits options = 0 ); - IOReturn configurePowerStatesReport( IOReportConfigureAction action, void *result ); - IOReturn updatePowerStatesReport( IOReportConfigureAction action, void *result, void *destination ); - IOReturn configureSimplePowerReport(IOReportConfigureAction action, void *result ); - IOReturn updateSimplePowerReport( IOReportConfigureAction action, void *result, void *destination ); - void waitForPMDriverCall( IOService * target = 0 ); + void PMfree( void ); + bool tellChangeDown1( unsigned long ); + bool tellChangeDown2( unsigned long ); + IOReturn startPowerChange( IOPMPowerChangeFlags, IOPMPowerStateIndex, IOPMPowerFlags, IOPowerConnection *, IOPMPowerFlags ); + void setParentInfo( IOPMPowerFlags, IOPowerConnection *, bool ); + IOReturn notifyAll( uint32_t nextMS ); + bool notifyChild( IOPowerConnection * child ); + IOPMPowerStateIndex getPowerStateForDomainFlags( IOPMPowerFlags flags ); + +// power change initiated by driver + void OurChangeStart( void ); + void OurSyncStart( void ); + void OurChangeTellClientsPowerDown( void ); + void OurChangeTellUserPMPolicyPowerDown( void ); + void OurChangeTellPriorityClientsPowerDown( void ); + void OurChangeTellCapabilityWillChange( void ); + void OurChangeNotifyInterestedDriversWillChange( void ); + void OurChangeSetPowerState( void ); + void OurChangeWaitForPowerSettle( void ); + void OurChangeNotifyInterestedDriversDidChange( void ); + void OurChangeTellCapabilityDidChange( void ); + void OurChangeFinish( void ); + +// downward power change initiated by a power parent + IOReturn ParentChangeStart( void ); + void ParentChangeTellPriorityClientsPowerDown( void ); + void ParentChangeTellCapabilityWillChange( void ); + void ParentChangeNotifyInterestedDriversWillChange( void ); + void ParentChangeSetPowerState( void ); + void ParentChangeWaitForPowerSettle( void ); + void ParentChangeNotifyInterestedDriversDidChange( void ); + void ParentChangeTellCapabilityDidChange( void ); + void ParentChangeAcknowledgePowerChange( void ); + void ParentChangeRootChangeDown( void ); + + void all_done( void ); + void start_ack_timer( void ); + void stop_ack_timer( void ); + void start_ack_timer( UInt32 value, UInt32 scale ); + void startSettleTimer( void ); + void start_spindump_timer( const char * delay_type ); + void stop_spindump_timer( void ); + bool checkForDone( void ); + bool responseValid( uint32_t x, int pid ); + void computeDesiredState( unsigned long tempDesire, bool computeOnly ); + void trackSystemSleepPreventers( IOPMPowerStateIndex, IOPMPowerStateIndex, IOPMPowerChangeFlags ); + void tellSystemCapabilityChange( uint32_t nextMS ); + void restartIdleTimer( void ); + + static void ack_timer_expired( thread_call_param_t, thread_call_param_t ); + static void watchdog_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ); + static void spindump_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ); + static IOReturn actionAckTimerExpired(OSObject *, void *, void *, void *, void * ); + static IOReturn actionSpinDumpTimerExpired(OSObject *, void *, void *, void *, void * ); + + static IOReturn actionDriverCalloutDone(OSObject *, void *, void *, void *, void * ); + static IOPMRequest * acquirePMRequest( IOService * target, IOOptionBits type, IOPMRequest * active = 0 ); + static void releasePMRequest( IOPMRequest * request ); + static void pmDriverCallout( IOService * from ); + static void pmTellAppWithResponse( OSObject * object, void * context ); + static void pmTellClientWithResponse( OSObject * object, void * context ); + static void pmTellCapabilityAppWithResponse( OSObject * object, void * arg ); + static void pmTellCapabilityClientWithResponse( OSObject * object, void * arg ); + static void submitPMRequest(LIBKERN_CONSUMED IOPMRequest * request ); + static void submitPMRequests( IOPMRequest ** request, IOItemCount count ); + bool ackTimerTick( void ); + void addPowerChild1( IOPMRequest * request ); + void addPowerChild2( IOPMRequest * request ); + void addPowerChild3( IOPMRequest * request ); + void adjustPowerState( uint32_t clamp = 0 ); + void handlePMstop( IOPMRequest * request ); + void handleRegisterPowerDriver( IOPMRequest * request ); + bool handleAcknowledgePowerChange( IOPMRequest * request ); + void handlePowerDomainWillChangeTo( IOPMRequest * request ); + void handlePowerDomainDidChangeTo( IOPMRequest * request ); + void handleRequestPowerState( IOPMRequest * request ); + void handlePowerOverrideChanged( IOPMRequest * request ); + void handleActivityTickle( IOPMRequest * request ); + void handleInterestChanged( IOPMRequest * request ); + void handleSynchronizePowerTree( IOPMRequest * request ); + void executePMRequest( IOPMRequest * request ); + bool actionPMWorkQueueInvoke( IOPMRequest * request, IOPMWorkQueue * queue ); + bool actionPMWorkQueueRetire( IOPMRequest * request, IOPMWorkQueue * queue ); + bool actionPMRequestQueue( IOPMRequest * request, IOPMRequestQueue * queue ); + bool actionPMReplyQueue( IOPMRequest * request, IOPMRequestQueue * queue ); + bool actionPMCompletionQueue( IOPMRequest * request, IOPMCompletionQueue * queue ); + bool notifyInterestedDrivers( void ); + void notifyInterestedDriversDone( void ); + bool notifyControllingDriver( void ); + void notifyControllingDriverDone( void ); + void driverSetPowerState( void ); + void driverInformPowerChange( void ); + bool isPMBlocked( IOPMRequest * request, int count ); + void notifyChildren( void ); + void notifyChildrenOrdered( void ); + void notifyChildrenDelayed( void ); + void notifyRootDomain( void ); + void notifyRootDomainDone( void ); + void cleanClientResponses( bool logErrors ); + void updatePowerClient( const OSSymbol * client, uint32_t powerState ); + void removePowerClient( const OSSymbol * client ); + IOReturn requestPowerState( const OSSymbol * client, uint32_t state ); + IOReturn requestDomainPower( IOPMPowerStateIndex ourPowerState, IOOptionBits options = 0 ); + IOReturn configurePowerStatesReport( IOReportConfigureAction action, void *result ); + IOReturn updatePowerStatesReport( IOReportConfigureAction action, void *result, void *destination ); + IOReturn configureSimplePowerReport(IOReportConfigureAction action, void *result ); + IOReturn updateSimplePowerReport( IOReportConfigureAction action, void *result, void *destination ); + void waitForPMDriverCall( IOService * target = 0 ); #endif /* XNU_KERNEL_PRIVATE */ }; diff --git a/iokit/IOKit/IOServicePM.h b/iokit/IOKit/IOServicePM.h index f04fe93a1..e20f0f086 100644 --- a/iokit/IOKit/IOServicePM.h +++ b/iokit/IOKit/IOServicePM.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -69,10 +69,10 @@ typedef uint32_t IOPMPowerChangeFlags; typedef uint32_t IOPMRequestTag; struct IOPMDriverCallEntry { - queue_chain_t link; - thread_t thread; - IOService * target; - const void *callMethod; + queue_chain_t link; + thread_t thread; + IOService * target; + const void *callMethod; }; // Power clients (desires) @@ -93,20 +93,20 @@ extern const OSSymbol * gIOPMPowerClientRootDomain; /* Deprecated in version 10.5 */ class IOPMprot : public OSObject { - friend class IOService; - - OSDeclareDefaultStructors(IOPMprot) + friend class IOService; + + OSDeclareDefaultStructors(IOPMprot) public: - const char * ourName; - IOPlatformExpert * thePlatform; - unsigned long theNumberOfPowerStates; - IOPMPowerState thePowerStates[IOPMMaxPowerStates]; - IOService * theControllingDriver; - unsigned long aggressiveness; - unsigned long current_aggressiveness_values[kMaxType+1]; - bool current_aggressiveness_valid[kMaxType+1]; - unsigned long myCurrentState; + const char * ourName; + IOPlatformExpert * thePlatform; + unsigned long theNumberOfPowerStates; + IOPMPowerState thePowerStates[IOPMMaxPowerStates]; + IOService * theControllingDriver; + unsigned long aggressiveness; + unsigned long current_aggressiveness_values[kMaxType + 1]; + bool current_aggressiveness_valid[kMaxType + 1]; + unsigned long myCurrentState; }; #endif /* PM_VARS_SUPPORT */ #endif /* XNU_KERNEL_PRIVATE */ diff --git a/iokit/IOKit/IOSharedDataQueue.h b/iokit/IOKit/IOSharedDataQueue.h index 5b8a9f3e6..f0347c8c6 100644 --- a/iokit/IOKit/IOSharedDataQueue.h +++ b/iokit/IOKit/IOSharedDataQueue.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -57,114 +57,118 @@ typedef struct _IODataQueueEntry IODataQueueEntry; */ class IOSharedDataQueue : public IODataQueue { - OSDeclareDefaultStructors(IOSharedDataQueue) + OSDeclareDefaultStructors(IOSharedDataQueue) - struct ExpansionData { - UInt32 queueSize; - }; - /*! @var reserved - Reserved for future use. (Internal use only) */ - ExpansionData * _reserved; + struct ExpansionData { + UInt32 queueSize; + }; +/*! @var reserved + * Reserved for future use. (Internal use only) */ + ExpansionData * _reserved; protected: - virtual void free() APPLE_KEXT_OVERRIDE; - - /*! - * @function getQueueSize - * @abstract Returns the size of the data queue. - * @discussion Use this method to access the size of the data queue. Do not access the value of size directly, as it can get modified from userspace and is not reliable. - * @result Returns the size of the data queue, or zero in case of failure. - */ - UInt32 getQueueSize(); - - /*! - * @function setQueueSize - * @abstract Stores the value of the size of the data queue. - * @discussion Use this method to store the value of the size of the data queue. Do not access the value of size directly, as it can get modified from userspace and is not reliable. - * @param size The size of the data queue. - * @result Returns true in case of success, false otherwise. - */ - Boolean setQueueSize(UInt32 size); + virtual void free() APPLE_KEXT_OVERRIDE; + +/*! + * @function getQueueSize + * @abstract Returns the size of the data queue. + * @discussion Use this method to access the size of the data queue. Do not access the value of size directly, as it can get modified from userspace and is not reliable. + * @result Returns the size of the data queue, or zero in case of failure. + */ + UInt32 getQueueSize(); + +/*! + * @function setQueueSize + * @abstract Stores the value of the size of the data queue. + * @discussion Use this method to store the value of the size of the data queue. Do not access the value of size directly, as it can get modified from userspace and is not reliable. + * @param size The size of the data queue. + * @result Returns true in case of success, false otherwise. + */ + Boolean setQueueSize(UInt32 size); public: - /*! - * @function withCapacity - * @abstract Static method that creates a new IOSharedDataQueue instance with the capacity specified in the size parameter. - * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in .
This method allocates a new IODataQueue instance and then calls initWithCapacity() with the given size parameter. If the initWithCapacity() fails, the new instance is released and zero is returned. - * @param size The size of the data queue memory region. - * @result Returns the newly allocated IOSharedDataQueue instance. Zero is returned on failure. - */ - static IOSharedDataQueue *withCapacity(UInt32 size); - - /*! - * @function withEntries - * @abstract Static method that creates a new IOSharedDataQueue instance with the specified number of entries of the given size. - * @discussion This method will create a new IOSharedDataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. If the initWithEntries() fails, the new instance is released and zero is returned. - * @param numEntries Number of entries to allocate space for. - * @param entrySize Size of each entry. - * @result Reeturns the newly allocated IOSharedDataQueue instance. Zero is returned on failure. - */ - static IOSharedDataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); - - /*! - * @function initWithCapacity - * @abstract Initializes an IOSharedDataQueue instance with the capacity specified in the size parameter. - * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE and DATA_QUEUE_MEMORY_APPENDIX_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in . - * @param size The size of the data queue memory region. - * @result Returns true on success and false on failure. - */ - virtual Boolean initWithCapacity(UInt32 size) APPLE_KEXT_OVERRIDE; - - /*! - * @function getMemoryDescriptor - * @abstract Returns a memory descriptor covering the IODataQueueMemory region. - * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. - * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. - */ - virtual IOMemoryDescriptor *getMemoryDescriptor() APPLE_KEXT_OVERRIDE; - - /*! - * @function peek - * @abstract Used to peek at the next entry on the queue. - * @discussion This function can be used to look at the next entry which allows the entry to be received without having to copy it with dequeue. In order to do this, call peek to get the entry. Then call dequeue with a NULL data pointer. That will cause the head to be moved to the next entry, but no memory to be copied. - * @result Returns a pointer to the next IODataQueueEntry if one is available. 0 (NULL) is returned if the queue is empty. - */ - virtual IODataQueueEntry * peek(); - - /*! - * @function dequeue - * @abstract Dequeues the next available entry on the queue and copies it into the given data pointer. - * @discussion This function will dequeue the next available entry on the queue. If a data pointer is provided, it will copy the data into the memory region if there is enough space available as specified in the dataSize parameter. If no data pointer is provided, it will simply move the head value past the current entry. - * @param data A pointer to the data memory region in which to copy the next entry data on the queue. If this parameter is 0 (NULL), it will simply move to the next entry. - * @param dataSize A pointer to the size of the data parameter. On return, this contains the size of the actual entry data - even if the original size was not large enough. - * @result Returns true on success and false on failure. Typically failure means that the queue is empty. - */ - virtual Boolean dequeue(void *data, UInt32 *dataSize); - - /*! - * @function enqueue - * @abstract Enqueues a new entry on the queue. - * @discussion This method adds a new data entry of dataSize to the queue. It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue. Once that is done, it moves the tail to the next available location. When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.
If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available. - * @param data Pointer to the data to be added to the queue. - * @param dataSize Size of the data pointed to by data. - * @result Returns true on success and false on failure. Typically failure means that the queue is full. - */ - virtual Boolean enqueue(void *data, UInt32 dataSize) APPLE_KEXT_OVERRIDE; +/*! + * @function withCapacity + * @abstract Static method that creates a new IOSharedDataQueue instance with the capacity specified in the size parameter. + * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in .
This method allocates a new IODataQueue instance and then calls initWithCapacity() with the given size parameter. If the initWithCapacity() fails, the new instance is released and zero is returned. + * @param size The size of the data queue memory region. + * @result Returns the newly allocated IOSharedDataQueue instance. Zero is returned on failure. + */ + static IOSharedDataQueue *withCapacity(UInt32 size); + +/*! + * @function withEntries + * @abstract Static method that creates a new IOSharedDataQueue instance with the specified number of entries of the given size. + * @discussion This method will create a new IOSharedDataQueue instance with enough capacity for numEntries of entrySize. It does account for the IODataQueueEntry overhead for each entry. Note that the numEntries and entrySize are simply used to determine the data region size. They do not actually restrict the size of number of entries that can be added to the queue.
This method allocates a new IODataQueue instance and then calls initWithEntries() with the given numEntries and entrySize parameters. If the initWithEntries() fails, the new instance is released and zero is returned. + * @param numEntries Number of entries to allocate space for. + * @param entrySize Size of each entry. + * @result Reeturns the newly allocated IOSharedDataQueue instance. Zero is returned on failure. + */ + static IOSharedDataQueue *withEntries(UInt32 numEntries, UInt32 entrySize); + +/*! + * @function initWithCapacity + * @abstract Initializes an IOSharedDataQueue instance with the capacity specified in the size parameter. + * @discussion The actual size of the entire data queue memory region (to be shared into a user process) is equal to the capacity plus the IODataQueueMemory overhead. This overhead value can be determined from the DATA_QUEUE_MEMORY_HEADER_SIZE and DATA_QUEUE_MEMORY_APPENDIX_SIZE macro in . The size of the data queue memory region must include space for the overhead of each IODataQueueEntry. This entry overhead can be determined from the DATA_QUEUE_ENTRY_HEADER_SIZE macro in . + * @param size The size of the data queue memory region. + * @result Returns true on success and false on failure. + */ + virtual Boolean initWithCapacity(UInt32 size) APPLE_KEXT_OVERRIDE; + +/*! + * @function getMemoryDescriptor + * @abstract Returns a memory descriptor covering the IODataQueueMemory region. + * @discussion The IOMemoryDescriptor instance returned by this method is intended to be mapped into a user process. This is the memory region that the IODataQueueClient code operates on. + * @result Returns a newly allocated IOMemoryDescriptor for the IODataQueueMemory region. Returns zero on failure. + */ + virtual IOMemoryDescriptor *getMemoryDescriptor() APPLE_KEXT_OVERRIDE; + +/*! + * @function peek + * @abstract Used to peek at the next entry on the queue. + * @discussion This function can be used to look at the next entry which allows the entry to be received without having to copy it with dequeue. In order to do this, call peek to get the entry. Then call dequeue with a NULL data pointer. That will cause the head to be moved to the next entry, but no memory to be copied. + * @result Returns a pointer to the next IODataQueueEntry if one is available. 0 (NULL) is returned if the queue is empty. + */ + virtual IODataQueueEntry * peek(); + +/*! + * @function dequeue + * @abstract Dequeues the next available entry on the queue and copies it into the given data pointer. + * @discussion This function will dequeue the next available entry on the queue. If a data pointer is provided, it will copy the data into the memory region if there is enough space available as specified in the dataSize parameter. If no data pointer is provided, it will simply move the head value past the current entry. + * @param data A pointer to the data memory region in which to copy the next entry data on the queue. If this parameter is 0 (NULL), it will simply move to the next entry. + * @param dataSize A pointer to the size of the data parameter. On return, this contains the size of the actual entry data - even if the original size was not large enough. + * @result Returns true on success and false on failure. Typically failure means that the queue is empty. + */ + virtual Boolean dequeue(void *data, UInt32 *dataSize); + +/*! + * @function enqueue + * @abstract Enqueues a new entry on the queue. + * @discussion This method adds a new data entry of dataSize to the queue. It sets the size parameter of the entry pointed to by the tail value and copies the memory pointed to by the data parameter in place in the queue. Once that is done, it moves the tail to the next available location. When attempting to add a new entry towards the end of the queue and there isn't enough space at the end, it wraps back to the beginning.
If the queue is empty when a new entry is added, sendDataAvailableNotification() is called to send a message to the user process that data is now available. + * @param data Pointer to the data to be added to the queue. + * @param dataSize Size of the data pointed to by data. + * @result Returns true on success and false on failure. Typically failure means that the queue is full. + */ + virtual Boolean enqueue(void *data, UInt32 dataSize) APPLE_KEXT_OVERRIDE; #ifdef PRIVATE - /* workaround for queue.h redefine, please do not use */ - __inline__ Boolean enqueue_tail(void *data, UInt32 dataSize) { return (IOSharedDataQueue::enqueue(data, dataSize)); } +/* workaround for queue.h redefine, please do not use */ + __inline__ Boolean + enqueue_tail(void *data, UInt32 dataSize) + { + return IOSharedDataQueue::enqueue(data, dataSize); + } #endif #if APPLE_KEXT_VTABLE_PADDING - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 0); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 1); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 2); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 3); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 4); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 5); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 6); - OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 7); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 0); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 1); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 2); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 3); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 4); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 5); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 6); + OSMetaClassDeclareReservedUnused(IOSharedDataQueue, 7); #endif }; diff --git a/iokit/IOKit/IOSharedLock.h b/iokit/IOKit/IOSharedLock.h index 795007451..195ad9fd2 100644 --- a/iokit/IOKit/IOSharedLock.h +++ b/iokit/IOKit/IOSharedLock.h @@ -2,14 +2,14 @@ * Copyright (c) 1998-2010 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/iokit/IOKit/IOStatistics.h b/iokit/IOKit/IOStatistics.h index 0c4a1abb2..e3ebdc2b2 100644 --- a/iokit/IOKit/IOStatistics.h +++ b/iokit/IOKit/IOStatistics.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,7 +34,7 @@ #define IOSTATISTICS_SIG_WORKLOOP 'IOSW' /* Update when the binary format changes */ -#define IOSTATISTICS_VER 0x2 +#define IOSTATISTICS_VER 0x2 enum { kIOStatisticsDriverNameLength = 64, @@ -63,17 +63,17 @@ enum { #pragma pack(4) -/* Event Counters */ +/* Event Counters */ typedef struct IOStatisticsInterruptEventSources { uint32_t created; uint32_t produced; - uint32_t checksForWork; + uint32_t checksForWork; } IOStatisticsInterruptEventSources; typedef struct IOStatisticsTimerEventSources { uint32_t created; - uint32_t openGateCalls; + uint32_t openGateCalls; uint32_t closeGateCalls; uint64_t timeOnGate; uint32_t timeouts; @@ -94,12 +94,12 @@ typedef struct IOStatisticsCommandGates { uint64_t timeOnGate; uint32_t actionCalls; } IOStatisticsCommandGates; - + typedef struct IOStatisticsCommandQueues { uint32_t created; uint32_t actionCalls; } IOStatisticsCommandQueues; - + typedef struct IOStatisticsUserClients { uint32_t created; uint32_t clientCalls; @@ -209,7 +209,7 @@ typedef struct IOStatisticsUserClientCall { typedef struct IOStatisticsUserClientHeader { uint32_t sig; /* 'IOSU */ - uint32_t ver; /* incremented with every data revision */ + uint32_t ver; /* incremented with every data revision */ uint32_t seq; /* sequence ID */ uint32_t processes; struct IOStatisticsUserClientCall userClientCalls[]; diff --git a/iokit/IOKit/IOStatisticsPrivate.h b/iokit/IOKit/IOStatisticsPrivate.h index a41230c3d..ba3c5063f 100644 --- a/iokit/IOKit/IOStatisticsPrivate.h +++ b/iokit/IOKit/IOStatisticsPrivate.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -157,13 +157,13 @@ typedef struct IOWorkLoopDependency { OSKextLoadTag loadTag; } IOWorkLoopDependency; -typedef struct IOWorkLoopCounter { +typedef struct IOWorkLoopCounter { SLIST_ENTRY(IOWorkLoopCounter) link; KextNode *parentKext; int attachedEventSources; IOWorkLoop *workLoop; uint64_t startTimeStamp; - uint64_t timeOnGate; + uint64_t timeOnGate; uint32_t closeGateCalls; uint32_t openGateCalls; typedef RB_HEAD(DependencyTree, IOWorkLoopDependency) DependencyTreeHead; @@ -196,7 +196,7 @@ class IOStatistics { static uint32_t attachedEventSources; static KextNode *kextHint; - + static IOWorkLoopDependency *nextWorkLoopDependency; typedef RB_HEAD(KextTree, KextNode) KextTreeHead; @@ -236,7 +236,7 @@ class IOStatistics { static void releaseKextNode(KextNode *node); public: - + static void initialize(); static void onKextLoad(OSKext *kext, kmod_info_t *kmod_info); @@ -246,7 +246,7 @@ public: static IOEventSourceCounter *registerEventSource(OSObject *inOwner); static void unregisterEventSource(IOEventSourceCounter *counter); - + static IOWorkLoopCounter *registerWorkLoop(IOWorkLoop *workLoop); static void unregisterWorkLoop(IOWorkLoopCounter *counter); @@ -257,91 +257,111 @@ public: static int getWorkLoopStatistics(sysctl_req *req); static int getUserClientStatistics(sysctl_req *req); - /* Inlines for counter manipulation. - * - * NOTE: counter access is not expressly guarded here so as not to incur performance penalties - * in the instrumented parent objects. Writes are arranged so as to be protected by pre-existing - * locks in the parent where appropriate, but reads have no such guarantee. Counters should - * therefore be regarded as providing an indication of current state, rather than precisely - * accurate statistics. - */ - - static inline void setCounterType(IOEventSourceCounter *counter, IOStatisticsCounterType type) { +/* Inlines for counter manipulation. + * + * NOTE: counter access is not expressly guarded here so as not to incur performance penalties + * in the instrumented parent objects. Writes are arranged so as to be protected by pre-existing + * locks in the parent where appropriate, but reads have no such guarantee. Counters should + * therefore be regarded as providing an indication of current state, rather than precisely + * accurate statistics. + */ + + static inline void + setCounterType(IOEventSourceCounter *counter, IOStatisticsCounterType type) + { if (counter) { counter->type = type; } } - static inline void countOpenGate(IOEventSourceCounter *counter) { + static inline void + countOpenGate(IOEventSourceCounter *counter) + { if (counter) { counter->timeOnGate += mach_absolute_time() - counter->startTimeStamp; counter->openGateCalls++; } } - static inline void countCloseGate(IOEventSourceCounter *counter) { + static inline void + countCloseGate(IOEventSourceCounter *counter) + { if (counter) { counter->startTimeStamp = mach_absolute_time(); counter->closeGateCalls++; } } - /* Interrupt */ - static inline void countInterruptCheckForWork(IOEventSourceCounter *counter) { +/* Interrupt */ + static inline void + countInterruptCheckForWork(IOEventSourceCounter *counter) + { if (counter) { counter->u.interrupt.checksForWork++; } } - static inline void countInterrupt(IOEventSourceCounter *counter) { + static inline void + countInterrupt(IOEventSourceCounter *counter) + { if (counter) { counter->u.interrupt.produced++; } } - /* CommandQueue */ - static inline void countCommandQueueActionCall(IOEventSourceCounter *counter) { +/* CommandQueue */ + static inline void + countCommandQueueActionCall(IOEventSourceCounter *counter) + { if (counter) { counter->u.commandQueue.actionCalls++; } } - /* CommandGate */ - static inline void countCommandGateActionCall(IOEventSourceCounter *counter) { +/* CommandGate */ + static inline void + countCommandGateActionCall(IOEventSourceCounter *counter) + { if (counter) { counter->u.commandGate.actionCalls++; } } - /* Timer */ - static inline void countTimerTimeout(IOEventSourceCounter *counter) { +/* Timer */ + static inline void + countTimerTimeout(IOEventSourceCounter *counter) + { if (counter) { counter->u.timer.timeouts++; } } - /* WorkLoop */ +/* WorkLoop */ static void attachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSourceCounter *esc); static void detachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSourceCounter *esc); - static inline void countWorkLoopOpenGate(IOWorkLoopCounter *counter) { + static inline void + countWorkLoopOpenGate(IOWorkLoopCounter *counter) + { if (counter) { counter->timeOnGate += mach_absolute_time() - counter->startTimeStamp; counter->openGateCalls++; } } - static inline void countWorkLoopCloseGate(IOWorkLoopCounter *counter) { + static inline void + countWorkLoopCloseGate(IOWorkLoopCounter *counter) + { if (counter) { counter->startTimeStamp = mach_absolute_time(); counter->closeGateCalls++; } } - /* IOLib allocations */ +/* IOLib allocations */ static void countAlloc(uint32_t index, vm_size_t size); - /* UserClient */ +/* UserClient */ static void countUserClientCall(IOUserClient *client); }; @@ -351,7 +371,10 @@ public: class IOStatistics { public: - static void initialize() {} + static void + initialize() + { + } }; #endif /* IOKITSTATS */ diff --git a/iokit/IOKit/IOSubMemoryDescriptor.h b/iokit/IOKit/IOSubMemoryDescriptor.h index 03f1850af..a228cb4ff 100644 --- a/iokit/IOKit/IOSubMemoryDescriptor.h +++ b/iokit/IOKit/IOSubMemoryDescriptor.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,88 +32,88 @@ #include /*! @class IOSubMemoryDescriptor : public IOMemoryDescriptor - @abstract The IOSubMemoryDescriptor object describes a memory area made up of a portion of another IOMemoryDescriptor. - @discussion The IOSubMemoryDescriptor object represents a subrange of memory, specified as a portion of another IOMemoryDescriptor. */ + * @abstract The IOSubMemoryDescriptor object describes a memory area made up of a portion of another IOMemoryDescriptor. + * @discussion The IOSubMemoryDescriptor object represents a subrange of memory, specified as a portion of another IOMemoryDescriptor. */ class IOSubMemoryDescriptor : public IOMemoryDescriptor { - OSDeclareDefaultStructors(IOSubMemoryDescriptor); + OSDeclareDefaultStructors(IOSubMemoryDescriptor); protected: - IOMemoryDescriptor * _parent; - IOByteCount _start; + IOMemoryDescriptor * _parent; + IOByteCount _start; - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; public: /*! @function withSubRange - @abstract Create an IOMemoryDescriptor to describe a subrange of an existing descriptor. - @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a subrange of the specified memory descriptor. The parent memory descriptor is retained by the new descriptor. - @param of The parent IOMemoryDescriptor of which a subrange is to be used for the new descriptor, which will be retained by the subrange IOMemoryDescriptor. - @param offset A byte offset into the parent memory descriptor's memory. - @param length The length of the subrange. - @param options - kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. - @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ - - static IOSubMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, - IOByteCount offset, - IOByteCount length, - IOOptionBits options); - - /* - * Initialize or reinitialize an IOSubMemoryDescriptor to describe - * a subrange of an existing descriptor. - * - * An IOSubMemoryDescriptor can be re-used by calling initSubRange - * again on an existing instance -- note that this behavior is not - * commonly supported in other IOKit classes, although it is here. - */ - virtual bool initSubRange( IOMemoryDescriptor * parent, - IOByteCount offset, IOByteCount length, - IODirection withDirection ); - - /* - * IOMemoryDescriptor required methods - */ - - virtual addr64_t getPhysicalSegment( IOByteCount offset, - IOByteCount * length, - IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; - - virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; - - virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + * @abstract Create an IOMemoryDescriptor to describe a subrange of an existing descriptor. + * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a subrange of the specified memory descriptor. The parent memory descriptor is retained by the new descriptor. + * @param of The parent IOMemoryDescriptor of which a subrange is to be used for the new descriptor, which will be retained by the subrange IOMemoryDescriptor. + * @param offset A byte offset into the parent memory descriptor's memory. + * @param length The length of the subrange. + * @param options + * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures. + * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */ + + static IOSubMemoryDescriptor * withSubRange(IOMemoryDescriptor *of, + IOByteCount offset, + IOByteCount length, + IOOptionBits options); + +/* + * Initialize or reinitialize an IOSubMemoryDescriptor to describe + * a subrange of an existing descriptor. + * + * An IOSubMemoryDescriptor can be re-used by calling initSubRange + * again on an existing instance -- note that this behavior is not + * commonly supported in other IOKit classes, although it is here. + */ + virtual bool initSubRange( IOMemoryDescriptor * parent, + IOByteCount offset, IOByteCount length, + IODirection withDirection ); + +/* + * IOMemoryDescriptor required methods + */ + + virtual addr64_t getPhysicalSegment( IOByteCount offset, + IOByteCount * length, + IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE; + + virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; + + virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE; #ifdef __LP64__ - virtual IOReturn redirect( task_t safeTask, bool redirect ) APPLE_KEXT_OVERRIDE; + virtual IOReturn redirect( task_t safeTask, bool redirect ) APPLE_KEXT_OVERRIDE; #else - IOReturn redirect( task_t safeTask, bool redirect ); + IOReturn redirect( task_t safeTask, bool redirect ); #endif /* __LP64__ */ - virtual IOReturn setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE; + virtual IOReturn setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE; - // support map() on kIOMemoryTypeVirtual without prepare() - virtual IOMemoryMap * makeMapping( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress atAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ) APPLE_KEXT_OVERRIDE; +// support map() on kIOMemoryTypeVirtual without prepare() + virtual IOMemoryMap * makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ) APPLE_KEXT_OVERRIDE; - virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; + virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE; /*! @function getPageCounts - @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. - @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. - @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor. - @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor. - @result An IOReturn code. */ - - IOReturn getPageCounts(IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount); + * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. + * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor. + * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor. + * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor. + * @result An IOReturn code. */ + + IOReturn getPageCounts(IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount); }; #endif /* !_IOSUBMEMORYDESCRIPTOR_H */ diff --git a/iokit/IOKit/IOSyncer.h b/iokit/IOKit/IOSyncer.h index dbdb443c1..f72dcc37f 100644 --- a/iokit/IOKit/IOSyncer.h +++ b/iokit/IOKit/IOSyncer.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOSYNCER_H @@ -34,31 +34,30 @@ class IOSyncer : public OSObject { - OSDeclareDefaultStructors(IOSyncer) + OSDeclareDefaultStructors(IOSyncer) private: - // The spin lock that is used to guard the 'threadMustStop' variable. - IOSimpleLock *guardLock; - volatile bool threadMustStop; - IOReturn fResult; - virtual void free() APPLE_KEXT_OVERRIDE; - virtual void privateSignal(); +// The spin lock that is used to guard the 'threadMustStop' variable. + IOSimpleLock *guardLock; + volatile bool threadMustStop; + IOReturn fResult; + virtual void free() APPLE_KEXT_OVERRIDE; + virtual void privateSignal(); public: - static IOSyncer * create(bool twoRetains = true) + static IOSyncer * create(bool twoRetains = true) APPLE_KEXT_DEPRECATED; - virtual bool init(bool twoRetains) + virtual bool init(bool twoRetains) APPLE_KEXT_DEPRECATED; - virtual void reinit() + virtual void reinit() APPLE_KEXT_DEPRECATED; - virtual IOReturn wait(bool autoRelease = true) + virtual IOReturn wait(bool autoRelease = true) APPLE_KEXT_DEPRECATED; - virtual void signal(IOReturn res = kIOReturnSuccess, - bool autoRelease = true) + virtual void signal(IOReturn res = kIOReturnSuccess, + bool autoRelease = true) APPLE_KEXT_DEPRECATED; }; #endif /* !_IOSYNCER */ - diff --git a/iokit/IOKit/IOTimeStamp.h b/iokit/IOKit/IOTimeStamp.h index 955505f73..95d08197e 100644 --- a/iokit/IOKit/IOTimeStamp.h +++ b/iokit/IOKit/IOTimeStamp.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef IOKIT_IOTIMESTAMP_H @@ -32,91 +32,91 @@ static inline void IOTimeStampStartConstant(unsigned int csc, - uintptr_t a = 0, uintptr_t b = 0, - uintptr_t c = 0, uintptr_t d = 0) + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) { - KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_START, a, b, c, d, 0); + KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_START, a, b, c, d, 0); } static inline void IOTimeStampEndConstant(uintptr_t csc, - uintptr_t a = 0, uintptr_t b = 0, - uintptr_t c = 0, uintptr_t d = 0) + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) { - KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_END, a, b, c, d, 0); + KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_END, a, b, c, d, 0); } static inline void IOTimeStampConstant(uintptr_t csc, - uintptr_t a = 0, uintptr_t b = 0, - uintptr_t c = 0, uintptr_t d = 0) + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) { - KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); + KERNEL_DEBUG_CONSTANT(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); } #if KDEBUG static inline void IOTimeStampStart(uintptr_t csc, - uintptr_t a = 0, uintptr_t b = 0, - uintptr_t c = 0, uintptr_t d = 0) + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) { - KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_START, a, b, c, d, 0); + KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_START, a, b, c, d, 0); } static inline void IOTimeStampEnd(uintptr_t csc, - uintptr_t a = 0, uintptr_t b = 0, - uintptr_t c = 0, uintptr_t d = 0) + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) { - KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_END, a, b, c, d, 0); + KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_END, a, b, c, d, 0); } static inline void IOTimeStamp(uintptr_t csc, - uintptr_t a = 0, uintptr_t b = 0, - uintptr_t c = 0, uintptr_t d = 0) + uintptr_t a = 0, uintptr_t b = 0, + uintptr_t c = 0, uintptr_t d = 0) { - KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); + KERNEL_DEBUG(((uint32_t)csc) | DBG_FUNC_NONE, a, b, c, d, 0); } #endif /* KDEBUG */ -#define IODBG_STORAGE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSTORAGE, code)) -#define IODBG_NETWORK(code) (KDBG_CODE(DBG_IOKIT, DBG_IONETWORK, code)) -#define IODBG_KEYBOARD(code) (KDBG_CODE(DBG_IOKIT, DBG_IOKEYBOARD, code)) -#define IODBG_HID(code) (KDBG_CODE(DBG_IOKIT, DBG_IOHID, code)) -#define IODBG_AUDIO(code) (KDBG_CODE(DBG_IOKIT, DBG_IOAUDIO, code)) -#define IODBG_SERIAL(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSERIAL, code)) -#define IODBG_TTY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOTTY, code)) -#define IODBG_SAM(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSAM, code)) -#define IODBG_PARALLELATA(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPARALLELATA, code)) -#define IODBG_PARALLELSCSI(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPARALLELSCSI, code)) -#define IODBG_SATA(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSATA, code)) -#define IODBG_SAS(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSAS, code)) -#define IODBG_FIBRECHANNEL(code) (KDBG_CODE(DBG_IOKIT, DBG_IOFIBRECHANNEL, code)) -#define IODBG_USB(code) (KDBG_CODE(DBG_IOKIT, DBG_IOUSB, code)) -#define IODBG_BLUETOOTH(code) (KDBG_CODE(DBG_IOKIT, DBG_IOBLUETOOTH, code)) -#define IODBG_FIREWIRE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOFIREWIRE, code)) -#define IODBG_INFINIBAND(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINFINIBAND, code)) +#define IODBG_STORAGE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSTORAGE, code)) +#define IODBG_NETWORK(code) (KDBG_CODE(DBG_IOKIT, DBG_IONETWORK, code)) +#define IODBG_KEYBOARD(code) (KDBG_CODE(DBG_IOKIT, DBG_IOKEYBOARD, code)) +#define IODBG_HID(code) (KDBG_CODE(DBG_IOKIT, DBG_IOHID, code)) +#define IODBG_AUDIO(code) (KDBG_CODE(DBG_IOKIT, DBG_IOAUDIO, code)) +#define IODBG_SERIAL(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSERIAL, code)) +#define IODBG_TTY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOTTY, code)) +#define IODBG_SAM(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSAM, code)) +#define IODBG_PARALLELATA(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPARALLELATA, code)) +#define IODBG_PARALLELSCSI(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPARALLELSCSI, code)) +#define IODBG_SATA(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSATA, code)) +#define IODBG_SAS(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSAS, code)) +#define IODBG_FIBRECHANNEL(code) (KDBG_CODE(DBG_IOKIT, DBG_IOFIBRECHANNEL, code)) +#define IODBG_USB(code) (KDBG_CODE(DBG_IOKIT, DBG_IOUSB, code)) +#define IODBG_BLUETOOTH(code) (KDBG_CODE(DBG_IOKIT, DBG_IOBLUETOOTH, code)) +#define IODBG_FIREWIRE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOFIREWIRE, code)) +#define IODBG_INFINIBAND(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINFINIBAND, code)) /* Backwards compatibility */ -#define IODBG_DISK(code) IODBG_STORAGE(code) -#define IODBG_POINTING(code) IODBG_HID(code) +#define IODBG_DISK(code) IODBG_STORAGE(code) +#define IODBG_POINTING(code) IODBG_HID(code) /* IOKit infrastructure subclasses */ -#define IODBG_INTC(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINTC, code)) -#define IODBG_WORKLOOP(code) (KDBG_CODE(DBG_IOKIT, DBG_IOWORKLOOP, code)) -#define IODBG_INTES(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINTES, code)) -#define IODBG_TIMES(code) (KDBG_CODE(DBG_IOKIT, DBG_IOCLKES, code)) -#define IODBG_CMDQ(code) (KDBG_CODE(DBG_IOKIT, DBG_IOCMDQ, code)) -#define IODBG_MCURS(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMCURS, code)) -#define IODBG_MDESC(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMDESC, code)) -#define IODBG_POWER(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPOWER, code)) -#define IODBG_IOSERVICE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSERVICE, code)) -#define IODBG_IOREGISTRY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOREGISTRY, code)) +#define IODBG_INTC(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINTC, code)) +#define IODBG_WORKLOOP(code) (KDBG_CODE(DBG_IOKIT, DBG_IOWORKLOOP, code)) +#define IODBG_INTES(code) (KDBG_CODE(DBG_IOKIT, DBG_IOINTES, code)) +#define IODBG_TIMES(code) (KDBG_CODE(DBG_IOKIT, DBG_IOCLKES, code)) +#define IODBG_CMDQ(code) (KDBG_CODE(DBG_IOKIT, DBG_IOCMDQ, code)) +#define IODBG_MCURS(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMCURS, code)) +#define IODBG_MDESC(code) (KDBG_CODE(DBG_IOKIT, DBG_IOMDESC, code)) +#define IODBG_POWER(code) (KDBG_CODE(DBG_IOKIT, DBG_IOPOWER, code)) +#define IODBG_IOSERVICE(code) (KDBG_CODE(DBG_IOKIT, DBG_IOSERVICE, code)) +#define IODBG_IOREGISTRY(code) (KDBG_CODE(DBG_IOKIT, DBG_IOREGISTRY, code)) /* IOKit specific codes - within each subclass */ @@ -135,35 +135,35 @@ IOTimeStamp(uintptr_t csc, /* DBG_IOKIT/DBG_IOTTY codes */ /* DBG_IOKIT/DBG_IOINTC codes */ -#define IOINTC_HANDLER 1 /* 0x05000004 */ -#define IOINTC_SPURIOUS 2 /* 0x05000008 */ +#define IOINTC_HANDLER 1 /* 0x05000004 */ +#define IOINTC_SPURIOUS 2 /* 0x05000008 */ /* DBG_IOKIT/DBG_IOWORKLOOP codes */ -#define IOWL_CLIENT 1 /* 0x05010004 */ -#define IOWL_WORK 2 /* 0x05010008 */ +#define IOWL_CLIENT 1 /* 0x05010004 */ +#define IOWL_WORK 2 /* 0x05010008 */ /* DBG_IOKIT/DBG_IOINTES codes */ -#define IOINTES_CLIENT 1 /* 0x05020004 */ -#define IOINTES_LAT 2 /* 0x05020008 */ -#define IOINTES_SEMA 3 /* 0x0502000c */ -#define IOINTES_INTCTXT 4 /* 0x05020010 */ -#define IOINTES_INTFLTR 5 /* 0x05020014 */ -#define IOINTES_ACTION 6 /* 0x05020018 */ -#define IOINTES_FILTER 7 /* 0x0502001c */ +#define IOINTES_CLIENT 1 /* 0x05020004 */ +#define IOINTES_LAT 2 /* 0x05020008 */ +#define IOINTES_SEMA 3 /* 0x0502000c */ +#define IOINTES_INTCTXT 4 /* 0x05020010 */ +#define IOINTES_INTFLTR 5 /* 0x05020014 */ +#define IOINTES_ACTION 6 /* 0x05020018 */ +#define IOINTES_FILTER 7 /* 0x0502001c */ /* DBG_IOKIT/DBG_IOTIMES codes */ -#define IOTIMES_CLIENT 1 /* 0x05030004 */ -#define IOTIMES_LAT 2 /* 0x05030008 */ -#define IOTIMES_SEMA 3 /* 0x0503000c */ -#define IOTIMES_ACTION 4 /* 0x05030010 */ +#define IOTIMES_CLIENT 1 /* 0x05030004 */ +#define IOTIMES_LAT 2 /* 0x05030008 */ +#define IOTIMES_SEMA 3 /* 0x0503000c */ +#define IOTIMES_ACTION 4 /* 0x05030010 */ /* DBG_IOKIT/DBG_IOCMDQ codes */ -#define IOCMDQ_CLIENT 1 /* 0x05040004 */ -#define IOCMDQ_LAT 2 /* 0x05040008 */ -#define IOCMDQ_SEMA 3 /* 0x0504000c */ -#define IOCMDQ_PSEMA 4 /* 0x05040010 */ -#define IOCMDQ_PLOCK 5 /* 0x05040014 */ -#define IOCMDQ_ACTION 6 /* 0x05040018 */ +#define IOCMDQ_CLIENT 1 /* 0x05040004 */ +#define IOCMDQ_LAT 2 /* 0x05040008 */ +#define IOCMDQ_SEMA 3 /* 0x0504000c */ +#define IOCMDQ_PSEMA 4 /* 0x05040010 */ +#define IOCMDQ_PLOCK 5 /* 0x05040014 */ +#define IOCMDQ_ACTION 6 /* 0x05040018 */ /* DBG_IOKIT/DBG_IOMCURS codes */ @@ -173,38 +173,38 @@ IOTimeStamp(uintptr_t csc, // See IOKit/pwr_mgt/IOPMlog.h for the power management codes /* DBG_IOKIT/DBG_IOSERVICE codes */ -#define IOSERVICE_BUSY 1 /* 0x05080004 */ -#define IOSERVICE_NONBUSY 2 /* 0x05080008 */ -#define IOSERVICE_MODULESTALL 3 /* 0x0508000C */ -#define IOSERVICE_MODULEUNSTALL 4 /* 0x05080010 */ - -#define IOSERVICE_TERMINATE_PHASE1 5 /* 0x05080014 */ -#define IOSERVICE_TERMINATE_REQUEST_OK 6 /* 0x05080018 */ -#define IOSERVICE_TERMINATE_REQUEST_FAIL 7 /* 0x0508001C */ -#define IOSERVICE_TERMINATE_SCHEDULE_STOP 8 /* 0x05080020 */ -#define IOSERVICE_TERMINATE_SCHEDULE_FINALIZE 9 /* 0x05080024 */ -#define IOSERVICE_TERMINATE_WILL 10 /* 0x05080028 */ -#define IOSERVICE_TERMINATE_DID 11 /* 0x0508002C */ -#define IOSERVICE_TERMINATE_DID_DEFER 12 /* 0x05080030 */ -#define IOSERVICE_TERMINATE_FINALIZE 13 /* 0x05080034 */ -#define IOSERVICE_TERMINATE_STOP 14 /* 0x05080038 */ -#define IOSERVICE_TERMINATE_STOP_NOP 15 /* 0x0508003C */ -#define IOSERVICE_TERMINATE_STOP_DEFER 16 /* 0x05080040 */ -#define IOSERVICE_TERMINATE_DONE 17 /* 0x05080044 */ - -#define IOSERVICE_KEXTD_ALIVE 18 /* 0x05080048 */ -#define IOSERVICE_KEXTD_READY 19 /* 0x0508004C */ -#define IOSERVICE_REGISTRY_QUIET 20 /* 0x05080050 */ - -#define IOSERVICE_TERM_SET_INACTIVE 21 /* 0x05080054 */ -#define IOSERVICE_TERM_SCHED_PHASE2 22 /* 0x05080058 */ -#define IOSERVICE_TERM_START_PHASE2 23 /* 0x0508005C */ -#define IOSERVICE_TERM_TRY_PHASE2 24 /* 0x05080060 */ -#define IOSERVICE_TERM_UC_DEFER 25 /* 0x05080064 */ -#define IOSERVICE_DETACH 26 /* 0x05080068 */ +#define IOSERVICE_BUSY 1 /* 0x05080004 */ +#define IOSERVICE_NONBUSY 2 /* 0x05080008 */ +#define IOSERVICE_MODULESTALL 3 /* 0x0508000C */ +#define IOSERVICE_MODULEUNSTALL 4 /* 0x05080010 */ + +#define IOSERVICE_TERMINATE_PHASE1 5 /* 0x05080014 */ +#define IOSERVICE_TERMINATE_REQUEST_OK 6 /* 0x05080018 */ +#define IOSERVICE_TERMINATE_REQUEST_FAIL 7 /* 0x0508001C */ +#define IOSERVICE_TERMINATE_SCHEDULE_STOP 8 /* 0x05080020 */ +#define IOSERVICE_TERMINATE_SCHEDULE_FINALIZE 9 /* 0x05080024 */ +#define IOSERVICE_TERMINATE_WILL 10 /* 0x05080028 */ +#define IOSERVICE_TERMINATE_DID 11 /* 0x0508002C */ +#define IOSERVICE_TERMINATE_DID_DEFER 12 /* 0x05080030 */ +#define IOSERVICE_TERMINATE_FINALIZE 13 /* 0x05080034 */ +#define IOSERVICE_TERMINATE_STOP 14 /* 0x05080038 */ +#define IOSERVICE_TERMINATE_STOP_NOP 15 /* 0x0508003C */ +#define IOSERVICE_TERMINATE_STOP_DEFER 16 /* 0x05080040 */ +#define IOSERVICE_TERMINATE_DONE 17 /* 0x05080044 */ + +#define IOSERVICE_KEXTD_ALIVE 18 /* 0x05080048 */ +#define IOSERVICE_KEXTD_READY 19 /* 0x0508004C */ +#define IOSERVICE_REGISTRY_QUIET 20 /* 0x05080050 */ + +#define IOSERVICE_TERM_SET_INACTIVE 21 /* 0x05080054 */ +#define IOSERVICE_TERM_SCHED_PHASE2 22 /* 0x05080058 */ +#define IOSERVICE_TERM_START_PHASE2 23 /* 0x0508005C */ +#define IOSERVICE_TERM_TRY_PHASE2 24 /* 0x05080060 */ +#define IOSERVICE_TERM_UC_DEFER 25 /* 0x05080064 */ +#define IOSERVICE_DETACH 26 /* 0x05080068 */ /* DBG_IOKIT/DBG_IOREGISTRY codes */ -#define IOREGISTRYENTRY_NAME_STRING 1 /* 0x05090004 */ -#define IOREGISTRYENTRY_NAME 2 /* 0x05090008 */ +#define IOREGISTRYENTRY_NAME_STRING 1 /* 0x05090004 */ +#define IOREGISTRYENTRY_NAME 2 /* 0x05090008 */ #endif /* ! IOKIT_IOTIMESTAMP_H */ diff --git a/iokit/IOKit/IOTimerEventSource.h b/iokit/IOKit/IOTimerEventSource.h index 8ef49ef19..ed54a6a60 100644 --- a/iokit/IOKit/IOTimerEventSource.h +++ b/iokit/IOKit/IOTimerEventSource.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * IOTimerEventSource.h * @@ -48,285 +48,282 @@ __END_DECLS #include /*! - @enum IOTimerEventSource constructor options - @abstract Constants defining behavior of the IOTimerEventSource. - @constant kIOTimerEventSourceOptionsPriorityHigh Importance above everything but realtime. - Thread calls allocated with this priority execute at extremely high priority, - above everything but realtime threads. They are generally executed in serial. - Though they may execute concurrently under some circumstances, no fan-out is implied. - These work items should do very small amounts of work or risk disrupting system - responsiveness. - @constant kIOTimerEventSourceOptionsPriorityKernelHigh Importance higher than most kernel - threads. - @constant kIOTimerEventSourceOptionsPriorityKernel Importance similar to that of normal kernel - threads. - @constant kIOTimerEventSourceOptionsPriorityUser Importance similar to that of normal user threads. - @constant kIOTimerEventSourceOptionsPriorityLow Very low importance. - @constant kIOTimerEventSourceOptionsPriorityWorkLoop Run the callout on the thread of the IOWorkLoop - the event source has been added to. - @constant kIOTimerEventSourceOptionsAllowReenter Allow the callout to be rescheduled and potentially - re-entered, if the IOWorkLoop lock has been released (eg. with commandSleep) during its invocation. - @constant kIOTimerEventSourceOptionsDefault Recommended default options. + * @enum IOTimerEventSource constructor options + * @abstract Constants defining behavior of the IOTimerEventSource. + * @constant kIOTimerEventSourceOptionsPriorityHigh Importance above everything but realtime. + * Thread calls allocated with this priority execute at extremely high priority, + * above everything but realtime threads. They are generally executed in serial. + * Though they may execute concurrently under some circumstances, no fan-out is implied. + * These work items should do very small amounts of work or risk disrupting system + * responsiveness. + * @constant kIOTimerEventSourceOptionsPriorityKernelHigh Importance higher than most kernel + * threads. + * @constant kIOTimerEventSourceOptionsPriorityKernel Importance similar to that of normal kernel + * threads. + * @constant kIOTimerEventSourceOptionsPriorityUser Importance similar to that of normal user threads. + * @constant kIOTimerEventSourceOptionsPriorityLow Very low importance. + * @constant kIOTimerEventSourceOptionsPriorityWorkLoop Run the callout on the thread of the IOWorkLoop + * the event source has been added to. + * @constant kIOTimerEventSourceOptionsAllowReenter Allow the callout to be rescheduled and potentially + * re-entered, if the IOWorkLoop lock has been released (eg. with commandSleep) during its invocation. + * @constant kIOTimerEventSourceOptionsDefault Recommended default options. */ -enum -{ - kIOTimerEventSourceOptionsPriorityMask = 0x000000ff, - kIOTimerEventSourceOptionsPriorityHigh = 0x00000000, - kIOTimerEventSourceOptionsPriorityKernelHigh = 0x00000001, - kIOTimerEventSourceOptionsPriorityKernel = 0x00000002, - kIOTimerEventSourceOptionsPriorityUser = 0x00000003, - kIOTimerEventSourceOptionsPriorityLow = 0x00000004, - kIOTimerEventSourceOptionsPriorityWorkLoop = 0x000000ff, - - kIOTimerEventSourceOptionsAllowReenter = 0x00000100, - - kIOTimerEventSourceOptionsDefault = kIOTimerEventSourceOptionsPriorityKernelHigh +enum{ + kIOTimerEventSourceOptionsPriorityMask = 0x000000ff, + kIOTimerEventSourceOptionsPriorityHigh = 0x00000000, + kIOTimerEventSourceOptionsPriorityKernelHigh = 0x00000001, + kIOTimerEventSourceOptionsPriorityKernel = 0x00000002, + kIOTimerEventSourceOptionsPriorityUser = 0x00000003, + kIOTimerEventSourceOptionsPriorityLow = 0x00000004, + kIOTimerEventSourceOptionsPriorityWorkLoop = 0x000000ff, + + kIOTimerEventSourceOptionsAllowReenter = 0x00000100, + + kIOTimerEventSourceOptionsDefault = kIOTimerEventSourceOptionsPriorityKernelHigh }; -#define IOTIMEREVENTSOURCEOPTIONS_DEFINED 1 +#define IOTIMEREVENTSOURCEOPTIONS_DEFINED 1 /*! - @enum IOTimerEventSource setTimeout/wakeAtTime options - @abstract Constants defining behavior of a scheduled call from IOTimerEventSource. - @constant kIOTimeOptionsWithLeeway Use the leeway parameter to the call. - @constant kIOTimeOptionsContinuous Use mach_continuous_time() to generate the callback. -*/ -enum -{ - kIOTimeOptionsWithLeeway = 0x00000020, - kIOTimeOptionsContinuous = 0x00000100, + * @enum IOTimerEventSource setTimeout/wakeAtTime options + * @abstract Constants defining behavior of a scheduled call from IOTimerEventSource. + * @constant kIOTimeOptionsWithLeeway Use the leeway parameter to the call. + * @constant kIOTimeOptionsContinuous Use mach_continuous_time() to generate the callback. + */ +enum{ + kIOTimeOptionsWithLeeway = 0x00000020, + kIOTimeOptionsContinuous = 0x00000100, }; /*! - @class IOTimerEventSource : public IOEventSource - @abstract Time based event source mechanism. - @discussion An event source that implements a simple timer. A timeout handler is called once the timeout period expires. This timeout handler will be called by the work-loop that this event source is attached to. -

- Usually a timer event source will be used to implement a timeout. In general when a driver makes a request it will need to setup a call to keep track of when the I/O doesn't complete. This class is designed to make that somewhat easier. -

- Remember the system doesn't guarantee the accuracy of the callout. It is possible that a higher priority thread is running which will delay the execution of the action routine. In fact the thread will be made runable at the exact requested time, within the accuracy of the CPU's decrementer based interrupt, but the scheduler will then control execution. -*/ + * @class IOTimerEventSource : public IOEventSource + * @abstract Time based event source mechanism. + * @discussion An event source that implements a simple timer. A timeout handler is called once the timeout period expires. This timeout handler will be called by the work-loop that this event source is attached to. + *

+ * Usually a timer event source will be used to implement a timeout. In general when a driver makes a request it will need to setup a call to keep track of when the I/O doesn't complete. This class is designed to make that somewhat easier. + *

+ * Remember the system doesn't guarantee the accuracy of the callout. It is possible that a higher priority thread is running which will delay the execution of the action routine. In fact the thread will be made runable at the exact requested time, within the accuracy of the CPU's decrementer based interrupt, but the scheduler will then control execution. + */ class IOTimerEventSource : public IOEventSource { - OSDeclareDefaultStructors(IOTimerEventSource) + OSDeclareDefaultStructors(IOTimerEventSource) protected: /*! @var calloutEntry thread_call entry for preregistered thread callouts */ - void *calloutEntry; + void *calloutEntry; /*! @var abstime time to wake up next, see enable. */ - AbsoluteTime abstime; + AbsoluteTime abstime; /*! @struct ExpansionData - @discussion This structure is private to the IOTimerEventSource implementation. - */ - struct ExpansionData - { - SInt32 calloutGeneration; - SInt32 calloutGenerationSignaled; - IOWorkLoop * workLoop; - }; + * @discussion This structure is private to the IOTimerEventSource implementation. + */ + struct ExpansionData { + SInt32 calloutGeneration; + SInt32 calloutGenerationSignaled; + IOWorkLoop * workLoop; + }; /*! @var reserved - Reserved for future use. (Internal use only) */ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData *reserved; - APPLE_KEXT_WSHADOW_POP; + * Reserved for future use. (Internal use only) */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData *reserved; + APPLE_KEXT_WSHADOW_POP; /*! @function timeout - @abstract Function that routes the call from the OS' timeout mechanism into a work-loop context. - @discussion timeout will normally not be called nor overridden by a subclass. If the event source is enabled then close the work-loop's gate and call the action routine. - @param self This argument will be cast to an IOTimerEventSource. */ - static void timeout(void *self); + * @abstract Function that routes the call from the OS' timeout mechanism into a work-loop context. + * @discussion timeout will normally not be called nor overridden by a subclass. If the event source is enabled then close the work-loop's gate and call the action routine. + * @param self This argument will be cast to an IOTimerEventSource. */ + static void timeout(void *self); /*! @function setTimeoutFunc - @abstract Set's timeout as the function of calloutEntry. - @discussion IOTimerEventSource is based upon the kern/thread_call.h APIs currently. This function allocates the calloutEntry member variable by using thread_call_allocate(timeout, this). If you need to write your own subclass of IOTimerEventSource you probably should override this method to allocate an entry that points to your own timeout routine. */ - virtual void setTimeoutFunc(); + * @abstract Set's timeout as the function of calloutEntry. + * @discussion IOTimerEventSource is based upon the kern/thread_call.h APIs currently. This function allocates the calloutEntry member variable by using thread_call_allocate(timeout, this). If you need to write your own subclass of IOTimerEventSource you probably should override this method to allocate an entry that points to your own timeout routine. */ + virtual void setTimeoutFunc(); /*! @function free - @abstract Sub-class implementation of free method, frees calloutEntry */ - virtual void free() APPLE_KEXT_OVERRIDE; + * @abstract Sub-class implementation of free method, frees calloutEntry */ + virtual void free() APPLE_KEXT_OVERRIDE; - virtual void setWorkLoop(IOWorkLoop *workLoop) APPLE_KEXT_OVERRIDE; + virtual void setWorkLoop(IOWorkLoop *workLoop) APPLE_KEXT_OVERRIDE; public: /*! @typedef Action - @discussion 'C' Function pointer defining the callout routine of this event source. - @param owner Owning target object. Note by a startling coincidence the first parameter in a C callout is currently used to define the target of a C++ member function. - @param sender The object that timed out. */ - typedef void (*Action)(OSObject *owner, IOTimerEventSource *sender); + * @discussion 'C' Function pointer defining the callout routine of this event source. + * @param owner Owning target object. Note by a startling coincidence the first parameter in a C callout is currently used to define the target of a C++ member function. + * @param sender The object that timed out. */ + typedef void (*Action)(OSObject *owner, IOTimerEventSource *sender); #ifdef __BLOCKS__ - typedef void (^ActionBlock)(IOTimerEventSource *sender); + typedef void (^ActionBlock)(IOTimerEventSource *sender); #endif /* __BLOCKS__ */ - static IOTimerEventSource * + static IOTimerEventSource * timerEventSource(OSObject *owner, Action action = 0); /*! @function timerEventSource - @abstract Allocates and returns an initialized timer instance. - @param options Mask of kIOTimerEventSourceOptions* options. - @param owner The object that that will be passed to the Action callback. - @param action 'C' Function pointer for the callout routine of this event source. - */ - static IOTimerEventSource * + * @abstract Allocates and returns an initialized timer instance. + * @param options Mask of kIOTimerEventSourceOptions* options. + * @param owner The object that that will be passed to the Action callback. + * @param action 'C' Function pointer for the callout routine of this event source. + */ + static IOTimerEventSource * timerEventSource(uint32_t options, OSObject *owner, Action action = 0); #ifdef __BLOCKS__ /*! @function timerEventSource - @abstract Allocates and returns an initialized timer instance. - @param options Mask of kIOTimerEventSourceOptions* options. - @param inOwner The object that that will be passed to the Action callback. - @param action Block for the callout routine of this event source. - */ - static IOTimerEventSource * + * @abstract Allocates and returns an initialized timer instance. + * @param options Mask of kIOTimerEventSourceOptions* options. + * @param inOwner The object that that will be passed to the Action callback. + * @param action Block for the callout routine of this event source. + */ + static IOTimerEventSource * timerEventSource(uint32_t options, OSObject *inOwner, ActionBlock action); #endif /* __BLOCKS__ */ #if XNU_KERNEL_PRIVATE __inline__ void invokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts, - OSObject * owner, IOWorkLoop * workLoop); + OSObject * owner, IOWorkLoop * workLoop); #endif /* XNU_KERNEL_PRIVATE */ /*! @function init - @abstract Initializes the timer with an owner, and a handler to call when the timeout expires. - */ - virtual bool init(OSObject *owner, Action action = 0); + * @abstract Initializes the timer with an owner, and a handler to call when the timeout expires. + */ + virtual bool init(OSObject *owner, Action action = 0); /*! @function enable - @abstract Enables a call to the action. - @discussion Allows the action function to be called. If the timer event source was disabled while a call was outstanding and the call wasn't cancelled then it will be rescheduled. So a disable/enable pair will disable calls from this event source. */ - virtual void enable() APPLE_KEXT_OVERRIDE; + * @abstract Enables a call to the action. + * @discussion Allows the action function to be called. If the timer event source was disabled while a call was outstanding and the call wasn't cancelled then it will be rescheduled. So a disable/enable pair will disable calls from this event source. */ + virtual void enable() APPLE_KEXT_OVERRIDE; /*! @function disable - @abstract Disable a timed callout. - @discussion When disable returns the action will not be called until the next time enable(qv) is called. */ - virtual void disable() APPLE_KEXT_OVERRIDE; + * @abstract Disable a timed callout. + * @discussion When disable returns the action will not be called until the next time enable(qv) is called. */ + virtual void disable() APPLE_KEXT_OVERRIDE; /*! @function checkForWork - @abstract Pure Virtual member function used by IOWorkLoop for issuing a client calls. - @discussion This function called when the work-loop is ready to check for any work to do and then to call out the owner/action. - @result Return true if this function needs to be called again before all its outstanding events have been processed. */ - virtual bool checkForWork() APPLE_KEXT_OVERRIDE; + * @abstract Pure Virtual member function used by IOWorkLoop for issuing a client calls. + * @discussion This function called when the work-loop is ready to check for any work to do and then to call out the owner/action. + * @result Return true if this function needs to be called again before all its outstanding events have been processed. */ + virtual bool checkForWork() APPLE_KEXT_OVERRIDE; /*! @function setTimeoutTicks - @abstract Setup a callback at after the delay in scheduler ticks. See wakeAtTime(AbsoluteTime). - @param ticks Delay from now to wake up, in scheduler ticks, whatever that may be. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn setTimeoutTicks(UInt32 ticks); + * @abstract Setup a callback at after the delay in scheduler ticks. See wakeAtTime(AbsoluteTime). + * @param ticks Delay from now to wake up, in scheduler ticks, whatever that may be. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeoutTicks(UInt32 ticks); /*! @function setTimeoutMS - @abstract Setup a callback at after the delay in milliseconds. See wakeAtTime(AbsoluteTime). - @param ms Delay from now to wake up, time in milliseconds. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn setTimeoutMS(UInt32 ms); + * @abstract Setup a callback at after the delay in milliseconds. See wakeAtTime(AbsoluteTime). + * @param ms Delay from now to wake up, time in milliseconds. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeoutMS(UInt32 ms); /*! @function setTimeoutUS - @abstract Setup a callback at after the delay in microseconds. See wakeAtTime(AbsoluteTime). - @param us Delay from now to wake up, time in microseconds. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn setTimeoutUS(UInt32 us); + * @abstract Setup a callback at after the delay in microseconds. See wakeAtTime(AbsoluteTime). + * @param us Delay from now to wake up, time in microseconds. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeoutUS(UInt32 us); /*! @function setTimeout - @abstract Setup a callback at after the delay in some unit. See wakeAtTime(AbsoluteTime). - @param interval Delay from now to wake up in some defined unit. - @param scale_factor Define the unit of interval, default to nanoseconds. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn setTimeout(UInt32 interval, - UInt32 scale_factor = kNanosecondScale); + * @abstract Setup a callback at after the delay in some unit. See wakeAtTime(AbsoluteTime). + * @param interval Delay from now to wake up in some defined unit. + * @param scale_factor Define the unit of interval, default to nanoseconds. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeout(UInt32 interval, + UInt32 scale_factor = kNanosecondScale); #if !defined(__LP64__) - virtual IOReturn setTimeout(mach_timespec_t interval) + virtual IOReturn setTimeout(mach_timespec_t interval) APPLE_KEXT_DEPRECATED; #endif /*! @function setTimeout - @abstract Setup a callback at after the delay in decrementer ticks. See wakeAtTime(AbsoluteTime). - @param interval Delay from now to wake up in decrementer ticks. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn setTimeout(AbsoluteTime interval); + * @abstract Setup a callback at after the delay in decrementer ticks. See wakeAtTime(AbsoluteTime). + * @param interval Delay from now to wake up in decrementer ticks. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeout(AbsoluteTime interval); /*! @function wakeAtTimeTicks - @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). - @param ticks Time to wake up in scheduler quantums, whatever that is? - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn wakeAtTimeTicks(UInt32 ticks); + * @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + * @param ticks Time to wake up in scheduler quantums, whatever that is? + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTimeTicks(UInt32 ticks); /*! @function wakeAtTimeMS - @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). - @param ms Time to wake up in milliseconds. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn wakeAtTimeMS(UInt32 ms); + * @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + * @param ms Time to wake up in milliseconds. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTimeMS(UInt32 ms); /*! @function wakeAtTimeUS - @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). - @param us Time to wake up in microseconds. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn wakeAtTimeUS(UInt32 us); + * @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + * @param us Time to wake up in microseconds. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTimeUS(UInt32 us); /*! @function wakeAtTime - @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). - @param abstime Time to wake up in some unit. - @param scale_factor Define the unit of abstime, default to nanoseconds. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn wakeAtTime(UInt32 abstime, - UInt32 scale_factor = kNanosecondScale); + * @abstract Setup a callback at this absolute time. See wakeAtTime(AbsoluteTime). + * @param abstime Time to wake up in some unit. + * @param scale_factor Define the unit of abstime, default to nanoseconds. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn wakeAtTime(UInt32 abstime, + UInt32 scale_factor = kNanosecondScale); #if !defined(__LP64__) - virtual IOReturn wakeAtTime(mach_timespec_t abstime) + virtual IOReturn wakeAtTime(mach_timespec_t abstime) APPLE_KEXT_DEPRECATED; #endif /*! @function wakeAtTime - @abstract Setup a callback at this absolute time. - @discussion Starts the timer, which will expire at abstime. After it expires, the timer will call the 'action' registered in the init() function. This timer is not periodic, a further call is needed to reset and restart the timer after it expires. - @param abstime Absolute Time when to wake up, counted in 'decrementer' units and starts at zero when system boots. - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared by init or IOEventSource::setAction (qqv). */ - virtual IOReturn wakeAtTime(AbsoluteTime abstime); + * @abstract Setup a callback at this absolute time. + * @discussion Starts the timer, which will expire at abstime. After it expires, the timer will call the 'action' registered in the init() function. This timer is not periodic, a further call is needed to reset and restart the timer after it expires. + * @param abstime Absolute Time when to wake up, counted in 'decrementer' units and starts at zero when system boots. + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared by init or IOEventSource::setAction (qqv). */ + virtual IOReturn wakeAtTime(AbsoluteTime abstime); /*! @function cancelTimeout - @abstract Disable any outstanding calls to this event source. - @discussion Clear down any oustanding calls. By the time this function completes it is guaranteed that the action will not be called again. */ - virtual void cancelTimeout(); + * @abstract Disable any outstanding calls to this event source. + * @discussion Clear down any oustanding calls. By the time this function completes it is guaranteed that the action will not be called again. */ + virtual void cancelTimeout(); /*! @function init - @abstract Initializes the timer with an owner, and a handler to call when the timeout expires. - */ - virtual bool init(uint32_t options, OSObject *inOwner, Action inAction); + * @abstract Initializes the timer with an owner, and a handler to call when the timeout expires. + */ + virtual bool init(uint32_t options, OSObject *inOwner, Action inAction); /*! @function setTimeout - @abstract Setup a callback at after the delay in decrementer ticks. See wakeAtTime(AbsoluteTime). - @param options see kIOTimeOptionsWithLeeway and kIOTimeOptionsContinuous - @param interval Delay from now to wake up in decrementer ticks. - @param leeway Allowable leeway to wake time, if the kIOTimeOptionsWithLeeway option is set - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ - virtual IOReturn setTimeout(uint32_t options, AbsoluteTime interval, AbsoluteTime leeway); + * @abstract Setup a callback at after the delay in decrementer ticks. See wakeAtTime(AbsoluteTime). + * @param options see kIOTimeOptionsWithLeeway and kIOTimeOptionsContinuous + * @param interval Delay from now to wake up in decrementer ticks. + * @param leeway Allowable leeway to wake time, if the kIOTimeOptionsWithLeeway option is set + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared. */ + virtual IOReturn setTimeout(uint32_t options, AbsoluteTime interval, AbsoluteTime leeway); /*! @function wakeAtTime - @abstract Setup a callback at this absolute time. - @discussion Starts the timer, which will expire at abstime. After it expires, the timer will call the 'action' registered in the init() function. This timer is not periodic, a further call is needed to reset and restart the timer after it expires. - @param options see kIOTimeOptionsWithLeeway and kIOTimeOptionsContinuous - @param abstime Absolute Time when to wake up, counted in 'decrementer' units and starts at zero when system boots. - @param leeway Allowable leeway to wake time, if the kIOTimeOptionsWithLeeway option is set - @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared by init or IOEventSource::setAction (qqv). */ - virtual IOReturn wakeAtTime(uint32_t options, AbsoluteTime abstime, AbsoluteTime leeway); + * @abstract Setup a callback at this absolute time. + * @discussion Starts the timer, which will expire at abstime. After it expires, the timer will call the 'action' registered in the init() function. This timer is not periodic, a further call is needed to reset and restart the timer after it expires. + * @param options see kIOTimeOptionsWithLeeway and kIOTimeOptionsContinuous + * @param abstime Absolute Time when to wake up, counted in 'decrementer' units and starts at zero when system boots. + * @param leeway Allowable leeway to wake time, if the kIOTimeOptionsWithLeeway option is set + * @result kIOReturnSuccess if everything is fine, kIOReturnNoResources if action hasn't been declared by init or IOEventSource::setAction (qqv). */ + virtual IOReturn wakeAtTime(uint32_t options, AbsoluteTime abstime, AbsoluteTime leeway); private: - static void timeoutAndRelease(void *self, void *c); - static void timeoutSignaled(void *self, void *c); + static void timeoutAndRelease(void *self, void *c); + static void timeoutSignaled(void *self, void *c); private: - OSMetaClassDeclareReservedUsed(IOTimerEventSource, 0); - OSMetaClassDeclareReservedUsed(IOTimerEventSource, 1); - OSMetaClassDeclareReservedUsed(IOTimerEventSource, 2); - OSMetaClassDeclareReservedUnused(IOTimerEventSource, 3); - OSMetaClassDeclareReservedUnused(IOTimerEventSource, 4); - OSMetaClassDeclareReservedUnused(IOTimerEventSource, 5); - OSMetaClassDeclareReservedUnused(IOTimerEventSource, 6); - OSMetaClassDeclareReservedUnused(IOTimerEventSource, 7); + OSMetaClassDeclareReservedUsed(IOTimerEventSource, 0); + OSMetaClassDeclareReservedUsed(IOTimerEventSource, 1); + OSMetaClassDeclareReservedUsed(IOTimerEventSource, 2); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 3); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 4); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 5); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 6); + OSMetaClassDeclareReservedUnused(IOTimerEventSource, 7); }; #endif /* !_IOTIMEREVENTSOURCE */ diff --git a/iokit/IOKit/IOTypes.h b/iokit/IOKit/IOTypes.h index c3f056001..be2137e18 100644 --- a/iokit/IOKit/IOTypes.h +++ b/iokit/IOKit/IOTypes.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef __IOKIT_IOTYPES_H +#ifndef __IOKIT_IOTYPES_H #define __IOKIT_IOTYPES_H #ifndef IOKIT @@ -45,14 +45,14 @@ extern "C" { #endif -#ifndef NULL +#ifndef NULL #if defined (__cplusplus) -#define NULL 0 +#define NULL 0 #else #define NULL ((void *)0) #endif #endif - + /* * Simple data types. */ @@ -63,70 +63,67 @@ extern "C" { #include #endif -typedef UInt32 IOOptionBits; -typedef SInt32 IOFixed; -typedef UInt32 IOVersion; -typedef UInt32 IOItemCount; -typedef UInt32 IOCacheMode; +typedef UInt32 IOOptionBits; +typedef SInt32 IOFixed; +typedef UInt32 IOVersion; +typedef UInt32 IOItemCount; +typedef UInt32 IOCacheMode; -typedef UInt32 IOByteCount32; -typedef UInt64 IOByteCount64; +typedef UInt32 IOByteCount32; +typedef UInt64 IOByteCount64; -typedef UInt32 IOPhysicalAddress32; -typedef UInt64 IOPhysicalAddress64; -typedef UInt32 IOPhysicalLength32; -typedef UInt64 IOPhysicalLength64; +typedef UInt32 IOPhysicalAddress32; +typedef UInt64 IOPhysicalAddress64; +typedef UInt32 IOPhysicalLength32; +typedef UInt64 IOPhysicalLength64; #if !defined(__arm__) && !defined(__i386__) -typedef mach_vm_address_t IOVirtualAddress; +typedef mach_vm_address_t IOVirtualAddress; #else -typedef vm_address_t IOVirtualAddress; +typedef vm_address_t IOVirtualAddress; #endif #if !defined(__arm__) && !defined(__i386__) && !(defined(__x86_64__) && !defined(KERNEL)) && !(defined(__arm64__) && !defined(__LP64__)) -typedef IOByteCount64 IOByteCount; +typedef IOByteCount64 IOByteCount; #else -typedef IOByteCount32 IOByteCount; +typedef IOByteCount32 IOByteCount; #endif typedef IOVirtualAddress IOLogicalAddress; #if !defined(__arm__) && !defined(__i386__) && !(defined(__x86_64__) && !defined(KERNEL)) -typedef IOPhysicalAddress64 IOPhysicalAddress; -typedef IOPhysicalLength64 IOPhysicalLength; -#define IOPhysical32( hi, lo ) ((UInt64) lo + ((UInt64)(hi) << 32)) -#define IOPhysSize 64 +typedef IOPhysicalAddress64 IOPhysicalAddress; +typedef IOPhysicalLength64 IOPhysicalLength; +#define IOPhysical32( hi, lo ) ((UInt64) lo + ((UInt64)(hi) << 32)) +#define IOPhysSize 64 #else -typedef IOPhysicalAddress32 IOPhysicalAddress; -typedef IOPhysicalLength32 IOPhysicalLength; -#define IOPhysical32( hi, lo ) (lo) -#define IOPhysSize 32 +typedef IOPhysicalAddress32 IOPhysicalAddress; +typedef IOPhysicalLength32 IOPhysicalLength; +#define IOPhysical32( hi, lo ) (lo) +#define IOPhysSize 32 #endif -typedef struct -{ - IOPhysicalAddress address; - IOByteCount length; +typedef struct{ + IOPhysicalAddress address; + IOByteCount length; } IOPhysicalRange; -typedef struct -{ - IOVirtualAddress address; - IOByteCount length; +typedef struct{ + IOVirtualAddress address; + IOByteCount length; } IOVirtualRange; #if !defined(__arm__) && !defined(__i386__) -typedef IOVirtualRange IOAddressRange; +typedef IOVirtualRange IOAddressRange; #else -typedef struct -{ - mach_vm_address_t address; - mach_vm_size_t length; +typedef struct{ + mach_vm_address_t address; + mach_vm_size_t length; } IOAddressRange; #endif @@ -142,9 +139,9 @@ typedef struct { /* * Memory alignment -- specified as a power of two. */ -typedef unsigned int IOAlignment; +typedef unsigned int IOAlignment; -#define IO_NULL_VM_TASK ((vm_task_t)0) +#define IO_NULL_VM_TASK ((vm_task_t)0) /* @@ -165,83 +162,83 @@ typedef OSObject * io_object_t; typedef struct OSObject * io_object_t; #endif #else /* KERNEL */ -typedef mach_port_t io_object_t; +typedef mach_port_t io_object_t; #endif /* KERNEL */ #endif /* __IOKIT_PORTS_DEFINED__ */ #include -typedef io_object_t io_connect_t; -typedef io_object_t io_enumerator_t; -typedef io_object_t io_iterator_t; -typedef io_object_t io_registry_entry_t; -typedef io_object_t io_service_t; +typedef io_object_t io_connect_t; +typedef io_object_t io_enumerator_t; +typedef io_object_t io_iterator_t; +typedef io_object_t io_registry_entry_t; +typedef io_object_t io_service_t; -#define IO_OBJECT_NULL ((io_object_t) 0) +#define IO_OBJECT_NULL ((io_object_t) 0) #endif /* MACH_KERNEL */ // IOConnectMapMemory memoryTypes enum { - kIODefaultMemoryType = 0 + kIODefaultMemoryType = 0 }; enum { - kIODefaultCache = 0, - kIOInhibitCache = 1, - kIOWriteThruCache = 2, - kIOCopybackCache = 3, - kIOWriteCombineCache = 4, - kIOCopybackInnerCache = 5, - kIOPostedWrite = 6 + kIODefaultCache = 0, + kIOInhibitCache = 1, + kIOWriteThruCache = 2, + kIOCopybackCache = 3, + kIOWriteCombineCache = 4, + kIOCopybackInnerCache = 5, + kIOPostedWrite = 6 }; // IOMemory mapping options enum { - kIOMapAnywhere = 0x00000001, + kIOMapAnywhere = 0x00000001, - kIOMapCacheMask = 0x00000700, - kIOMapCacheShift = 8, - kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, - kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, - kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, - kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, - kIOMapWriteCombineCache = kIOWriteCombineCache << kIOMapCacheShift, - kIOMapCopybackInnerCache = kIOCopybackInnerCache << kIOMapCacheShift, - kIOMapPostedWrite = kIOPostedWrite << kIOMapCacheShift, + kIOMapCacheMask = 0x00000700, + kIOMapCacheShift = 8, + kIOMapDefaultCache = kIODefaultCache << kIOMapCacheShift, + kIOMapInhibitCache = kIOInhibitCache << kIOMapCacheShift, + kIOMapWriteThruCache = kIOWriteThruCache << kIOMapCacheShift, + kIOMapCopybackCache = kIOCopybackCache << kIOMapCacheShift, + kIOMapWriteCombineCache = kIOWriteCombineCache << kIOMapCacheShift, + kIOMapCopybackInnerCache = kIOCopybackInnerCache << kIOMapCacheShift, + kIOMapPostedWrite = kIOPostedWrite << kIOMapCacheShift, - kIOMapUserOptionsMask = 0x00000fff, + kIOMapUserOptionsMask = 0x00000fff, - kIOMapReadOnly = 0x00001000, + kIOMapReadOnly = 0x00001000, - kIOMapStatic = 0x01000000, - kIOMapReference = 0x02000000, - kIOMapUnique = 0x04000000, + kIOMapStatic = 0x01000000, + kIOMapReference = 0x02000000, + kIOMapUnique = 0x04000000, #ifdef XNU_KERNEL_PRIVATE - kIOMap64Bit = 0x08000000, + kIOMap64Bit = 0x08000000, #endif - kIOMapPrefault = 0x10000000, - kIOMapOverwrite = 0x20000000 + kIOMapPrefault = 0x10000000, + kIOMapOverwrite = 0x20000000 }; /*! @enum Scale Factors - @discussion Used when a scale_factor parameter is required to define a unit of time. - @constant kNanosecondScale Scale factor for nanosecond based times. - @constant kMicrosecondScale Scale factor for microsecond based times. - @constant kMillisecondScale Scale factor for millisecond based times. - @constant kTickScale Scale factor for the standard (100Hz) tick. - @constant kSecondScale Scale factor for second based times. */ + * @discussion Used when a scale_factor parameter is required to define a unit of time. + * @constant kNanosecondScale Scale factor for nanosecond based times. + * @constant kMicrosecondScale Scale factor for microsecond based times. + * @constant kMillisecondScale Scale factor for millisecond based times. + * @constant kTickScale Scale factor for the standard (100Hz) tick. + * @constant kSecondScale Scale factor for second based times. */ enum { - kNanosecondScale = 1, - kMicrosecondScale = 1000, - kMillisecondScale = 1000 * 1000, - kSecondScale = 1000 * 1000 * 1000, - kTickScale = (kSecondScale / 100) + kNanosecondScale = 1, + kMicrosecondScale = 1000, + kMillisecondScale = 1000 * 1000, + kSecondScale = 1000 * 1000 * 1000, + kTickScale = (kSecondScale / 100) }; enum { - kIOConnectMethodVarOutputSize = -3 + kIOConnectMethodVarOutputSize = -3 }; /* compatibility types */ diff --git a/iokit/IOKit/IOUserClient.h b/iokit/IOKit/IOUserClient.h index 6a7149827..1c17dda61 100644 --- a/iokit/IOKit/IOUserClient.h +++ b/iokit/IOKit/IOUserClient.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,415 +41,412 @@ #include #endif -#define _IOUSERCLIENT_SENDASYNCRESULT64WITHOPTIONS_ 1 +#define _IOUSERCLIENT_SENDASYNCRESULT64WITHOPTIONS_ 1 enum { - kIOUCTypeMask = 0x0000000f, - kIOUCScalarIScalarO = 0, - kIOUCScalarIStructO = 2, - kIOUCStructIStructO = 3, - kIOUCScalarIStructI = 4, + kIOUCTypeMask = 0x0000000f, + kIOUCScalarIScalarO = 0, + kIOUCScalarIStructO = 2, + kIOUCStructIStructO = 3, + kIOUCScalarIStructI = 4, - kIOUCForegroundOnly = 0x00000010, + kIOUCForegroundOnly = 0x00000010, }; /*! @enum - @abstract Constant to denote a variable length structure argument to IOUserClient. - @constant kIOUCVariableStructureSize Use in the structures IOExternalMethod, IOExternalAsyncMethod, IOExternalMethodDispatch to specify the size of the structure is variable. -*/ + * @abstract Constant to denote a variable length structure argument to IOUserClient. + * @constant kIOUCVariableStructureSize Use in the structures IOExternalMethod, IOExternalAsyncMethod, IOExternalMethodDispatch to specify the size of the structure is variable. + */ enum { - kIOUCVariableStructureSize = 0xffffffff + kIOUCVariableStructureSize = 0xffffffff }; typedef IOReturn (IOService::*IOMethod)(void * p1, void * p2, void * p3, - void * p4, void * p5, void * p6 ); + void * p4, void * p5, void * p6 ); typedef IOReturn (IOService::*IOAsyncMethod)(OSAsyncReference asyncRef, - void * p1, void * p2, void * p3, - void * p4, void * p5, void * p6 ); + void * p1, void * p2, void * p3, + void * p4, void * p5, void * p6 ); typedef IOReturn (IOService::*IOTrap)(void * p1, void * p2, void * p3, - void * p4, void * p5, void * p6 ); + void * p4, void * p5, void * p6 ); struct IOExternalMethod { - IOService * object; - IOMethod func; - IOOptionBits flags; - IOByteCount count0; - IOByteCount count1; + IOService * object; + IOMethod func; + IOOptionBits flags; + IOByteCount count0; + IOByteCount count1; }; struct IOExternalAsyncMethod { - IOService * object; - IOAsyncMethod func; - IOOptionBits flags; - IOByteCount count0; - IOByteCount count1; + IOService * object; + IOAsyncMethod func; + IOOptionBits flags; + IOByteCount count0; + IOByteCount count1; }; struct IOExternalTrap { - IOService * object; - IOTrap func; + IOService * object; + IOTrap func; }; enum { - kIOUserNotifyMaxMessageSize = 64 + kIOUserNotifyMaxMessageSize = 64 }; enum { - kIOUserNotifyOptionCanDrop = 0x1 /* Fail if queue is full, rather than infinitely queuing. */ + kIOUserNotifyOptionCanDrop = 0x1 /* Fail if queue is full, rather than infinitely queuing. */ }; // keys for clientHasPrivilege -#define kIOClientPrivilegeAdministrator "root" -#define kIOClientPrivilegeLocalUser "local" -#define kIOClientPrivilegeForeground "foreground" +#define kIOClientPrivilegeAdministrator "root" +#define kIOClientPrivilegeLocalUser "local" +#define kIOClientPrivilegeForeground "foreground" /*! @enum - @abstract Constants to specify the maximum number of scalar arguments in the IOExternalMethodArguments structure. These constants are documentary since the scalarInputCount, scalarOutputCount fields reflect the actual number passed. - @constant kIOExternalMethodScalarInputCountMax The maximum number of scalars able to passed on input. - @constant kIOExternalMethodScalarOutputCountMax The maximum number of scalars able to passed on output. -*/ + * @abstract Constants to specify the maximum number of scalar arguments in the IOExternalMethodArguments structure. These constants are documentary since the scalarInputCount, scalarOutputCount fields reflect the actual number passed. + * @constant kIOExternalMethodScalarInputCountMax The maximum number of scalars able to passed on input. + * @constant kIOExternalMethodScalarOutputCountMax The maximum number of scalars able to passed on output. + */ enum { - kIOExternalMethodScalarInputCountMax = 16, - kIOExternalMethodScalarOutputCountMax = 16, + kIOExternalMethodScalarInputCountMax = 16, + kIOExternalMethodScalarOutputCountMax = 16, }; -struct IOExternalMethodArguments -{ - uint32_t version; +struct IOExternalMethodArguments { + uint32_t version; + + uint32_t selector; - uint32_t selector; + mach_port_t asyncWakePort; + io_user_reference_t * asyncReference; + uint32_t asyncReferenceCount; - mach_port_t asyncWakePort; - io_user_reference_t * asyncReference; - uint32_t asyncReferenceCount; + const uint64_t * scalarInput; + uint32_t scalarInputCount; - const uint64_t * scalarInput; - uint32_t scalarInputCount; + const void * structureInput; + uint32_t structureInputSize; - const void * structureInput; - uint32_t structureInputSize; + IOMemoryDescriptor * structureInputDescriptor; - IOMemoryDescriptor * structureInputDescriptor; - - uint64_t * scalarOutput; - uint32_t scalarOutputCount; + uint64_t * scalarOutput; + uint32_t scalarOutputCount; - void * structureOutput; - uint32_t structureOutputSize; + void * structureOutput; + uint32_t structureOutputSize; - IOMemoryDescriptor * structureOutputDescriptor; - uint32_t structureOutputDescriptorSize; + IOMemoryDescriptor * structureOutputDescriptor; + uint32_t structureOutputDescriptorSize; - uint32_t __reservedA; + uint32_t __reservedA; - OSObject ** structureVariableOutputData; + OSObject ** structureVariableOutputData; - uint32_t __reserved[30]; + uint32_t __reserved[30]; }; -typedef IOReturn (*IOExternalMethodAction)(OSObject * target, void * reference, - IOExternalMethodArguments * arguments); -struct IOExternalMethodDispatch -{ - IOExternalMethodAction function; - uint32_t checkScalarInputCount; - uint32_t checkStructureInputSize; - uint32_t checkScalarOutputCount; - uint32_t checkStructureOutputSize; +typedef IOReturn (*IOExternalMethodAction)(OSObject * target, void * reference, + IOExternalMethodArguments * arguments); +struct IOExternalMethodDispatch { + IOExternalMethodAction function; + uint32_t checkScalarInputCount; + uint32_t checkStructureInputSize; + uint32_t checkScalarOutputCount; + uint32_t checkStructureOutputSize; }; enum { -#define IO_EXTERNAL_METHOD_ARGUMENTS_CURRENT_VERSION 2 - kIOExternalMethodArgumentsCurrentVersion = IO_EXTERNAL_METHOD_ARGUMENTS_CURRENT_VERSION +#define IO_EXTERNAL_METHOD_ARGUMENTS_CURRENT_VERSION 2 + kIOExternalMethodArgumentsCurrentVersion = IO_EXTERNAL_METHOD_ARGUMENTS_CURRENT_VERSION }; /*! - @class IOUserClient - @abstract Provides a basis for communication between client applications and I/O Kit objects. -*/ + * @class IOUserClient + * @abstract Provides a basis for communication between client applications and I/O Kit objects. + */ class IOUserClient : public IOService { - OSDeclareAbstractStructors(IOUserClient) + OSDeclareAbstractStructors(IOUserClient) #if IOKITSTATS - friend class IOStatistics; + friend class IOStatistics; #endif protected: /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of this class in the future. -*/ - struct ExpansionData { + * @discussion This structure will be used to expand the capablilties of this class in the future. + */ + struct ExpansionData { #if IOKITSTATS - IOUserClientCounter *counter; + IOUserClientCounter *counter; #else - void *iokitstatsReserved; + void *iokitstatsReserved; #endif - }; + }; /*! @var reserved - Reserved for future use. (Internal use only) -*/ - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData * reserved; - APPLE_KEXT_WSHADOW_POP; + * Reserved for future use. (Internal use only) + */ + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData * reserved; + APPLE_KEXT_WSHADOW_POP; - bool reserve(); + bool reserve(); #ifdef XNU_KERNEL_PRIVATE public: - OSSet * mappings; - UInt8 sharedInstance; - UInt8 closed; - UInt8 __ipcFinal; - UInt8 __reservedA[1]; - volatile SInt32 __ipc; - queue_head_t owners; - IOLock * lock; + OSSet * mappings; + UInt8 sharedInstance; + UInt8 closed; + UInt8 __ipcFinal; + UInt8 __reservedA[1]; + volatile SInt32 __ipc; + queue_head_t owners; + IOLock * lock; #if __LP64__ - void * __reserved[4]; + void * __reserved[4]; #else - void * __reserved[3]; + void * __reserved[3]; #endif #else /* XNU_KERNEL_PRIVATE */ private: - void * __reserved[9]; + void * __reserved[9]; #endif /* XNU_KERNEL_PRIVATE */ public: - virtual IOReturn externalMethod( uint32_t selector, IOExternalMethodArguments * arguments, - IOExternalMethodDispatch * dispatch = 0, OSObject * target = 0, void * reference = 0 ); + virtual IOReturn externalMethod( uint32_t selector, IOExternalMethodArguments * arguments, + IOExternalMethodDispatch * dispatch = 0, OSObject * target = 0, void * reference = 0 ); - virtual IOReturn registerNotificationPort( - mach_port_t port, UInt32 type, io_user_reference_t refCon); + virtual IOReturn registerNotificationPort( + mach_port_t port, UInt32 type, io_user_reference_t refCon); private: #if __LP64__ - OSMetaClassDeclareReservedUnused(IOUserClient, 0); - OSMetaClassDeclareReservedUnused(IOUserClient, 1); + OSMetaClassDeclareReservedUnused(IOUserClient, 0); + OSMetaClassDeclareReservedUnused(IOUserClient, 1); #else - OSMetaClassDeclareReservedUsed(IOUserClient, 0); - OSMetaClassDeclareReservedUsed(IOUserClient, 1); + OSMetaClassDeclareReservedUsed(IOUserClient, 0); + OSMetaClassDeclareReservedUsed(IOUserClient, 1); #endif - OSMetaClassDeclareReservedUnused(IOUserClient, 2); - OSMetaClassDeclareReservedUnused(IOUserClient, 3); - OSMetaClassDeclareReservedUnused(IOUserClient, 4); - OSMetaClassDeclareReservedUnused(IOUserClient, 5); - OSMetaClassDeclareReservedUnused(IOUserClient, 6); - OSMetaClassDeclareReservedUnused(IOUserClient, 7); - OSMetaClassDeclareReservedUnused(IOUserClient, 8); - OSMetaClassDeclareReservedUnused(IOUserClient, 9); - OSMetaClassDeclareReservedUnused(IOUserClient, 10); - OSMetaClassDeclareReservedUnused(IOUserClient, 11); - OSMetaClassDeclareReservedUnused(IOUserClient, 12); - OSMetaClassDeclareReservedUnused(IOUserClient, 13); - OSMetaClassDeclareReservedUnused(IOUserClient, 14); - OSMetaClassDeclareReservedUnused(IOUserClient, 15); + OSMetaClassDeclareReservedUnused(IOUserClient, 2); + OSMetaClassDeclareReservedUnused(IOUserClient, 3); + OSMetaClassDeclareReservedUnused(IOUserClient, 4); + OSMetaClassDeclareReservedUnused(IOUserClient, 5); + OSMetaClassDeclareReservedUnused(IOUserClient, 6); + OSMetaClassDeclareReservedUnused(IOUserClient, 7); + OSMetaClassDeclareReservedUnused(IOUserClient, 8); + OSMetaClassDeclareReservedUnused(IOUserClient, 9); + OSMetaClassDeclareReservedUnused(IOUserClient, 10); + OSMetaClassDeclareReservedUnused(IOUserClient, 11); + OSMetaClassDeclareReservedUnused(IOUserClient, 12); + OSMetaClassDeclareReservedUnused(IOUserClient, 13); + OSMetaClassDeclareReservedUnused(IOUserClient, 14); + OSMetaClassDeclareReservedUnused(IOUserClient, 15); #ifdef XNU_KERNEL_PRIVATE - /* Available within xnu source only */ +/* Available within xnu source only */ public: - static void initialize( void ); - static void destroyUserReferences( OSObject * obj ); - static bool finalizeUserReferences( OSObject * obj ); - IOMemoryMap * mapClientMemory64( IOOptionBits type, - task_t task, - IOOptionBits mapFlags = kIOMapAnywhere, - mach_vm_address_t atAddress = 0 ); - IOReturn registerOwner(task_t task); - void noMoreSenders(void); + static void initialize( void ); + static void destroyUserReferences( OSObject * obj ); + static bool finalizeUserReferences( OSObject * obj ); + IOMemoryMap * mapClientMemory64( IOOptionBits type, + task_t task, + IOOptionBits mapFlags = kIOMapAnywhere, + mach_vm_address_t atAddress = 0 ); + IOReturn registerOwner(task_t task); + void noMoreSenders(void); #endif /* XNU_KERNEL_PRIVATE */ protected: - static IOReturn sendAsyncResult(OSAsyncReference reference, - IOReturn result, void *args[], UInt32 numArgs); - static void setAsyncReference(OSAsyncReference asyncRef, - mach_port_t wakePort, - void *callback, void *refcon); - - static IOReturn sendAsyncResult64(OSAsyncReference64 reference, - IOReturn result, io_user_reference_t args[], UInt32 numArgs); - - /*! - @function sendAsyncResult64WithOptions - @abstract Send a notification as with sendAsyncResult, but with finite queueing. - @discussion IOUserClient::sendAsyncResult64() will infitely queue messages if the client - is not processing them in a timely fashion. This variant will not, for simple - handling of situations where clients may be expected to stop processing messages. - */ - static IOReturn sendAsyncResult64WithOptions(OSAsyncReference64 reference, - IOReturn result, io_user_reference_t args[], UInt32 numArgs, - IOOptionBits options); - - static void setAsyncReference64(OSAsyncReference64 asyncRef, - mach_port_t wakePort, - mach_vm_address_t callback, io_user_reference_t refcon); - - static void setAsyncReference64(OSAsyncReference64 asyncRef, - mach_port_t wakePort, - mach_vm_address_t callback, io_user_reference_t refcon, - task_t task); + static IOReturn sendAsyncResult(OSAsyncReference reference, + IOReturn result, void *args[], UInt32 numArgs); + static void setAsyncReference(OSAsyncReference asyncRef, + mach_port_t wakePort, + void *callback, void *refcon); + + static IOReturn sendAsyncResult64(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs); + +/*! + * @function sendAsyncResult64WithOptions + * @abstract Send a notification as with sendAsyncResult, but with finite queueing. + * @discussion IOUserClient::sendAsyncResult64() will infitely queue messages if the client + * is not processing them in a timely fashion. This variant will not, for simple + * handling of situations where clients may be expected to stop processing messages. + */ + static IOReturn sendAsyncResult64WithOptions(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs, + IOOptionBits options); + + static void setAsyncReference64(OSAsyncReference64 asyncRef, + mach_port_t wakePort, + mach_vm_address_t callback, io_user_reference_t refcon); + + static void setAsyncReference64(OSAsyncReference64 asyncRef, + mach_port_t wakePort, + mach_vm_address_t callback, io_user_reference_t refcon, + task_t task); public: - static IOReturn clientHasAuthorization( task_t task, - IOService * service ); - - static IOReturn clientHasPrivilege( void * securityToken, - const char * privilegeName ); - - static OSObject * copyClientEntitlement( task_t task, - const char * entitlement ); - - /*! - @function releaseAsyncReference64 - @abstract Release the mach_port_t reference held within the OSAsyncReference64 structure. - @discussion The OSAsyncReference64 structure passed to async methods holds a reference to the wakeup mach port, which should be released to balance each async method call. Behavior is undefined if these calls are not correctly balanced. - @param reference The reference passed to the subclass IOAsyncMethod, or externalMethod() in the IOExternalMethodArguments.asyncReference field. - @result A return code. - */ - static IOReturn releaseAsyncReference64(OSAsyncReference64 reference); - /*! - @function releaseNotificationPort - @abstract Release the mach_port_t passed to registerNotificationPort(). - @discussion The mach_port_t passed to the registerNotificationPort() methods should be released to balance each call to registerNotificationPort(). Behavior is undefined if these calls are not correctly balanced. - @param port The mach_port_t argument previously passed to the subclass implementation of registerNotificationPort(). - @result A return code. - */ - static IOReturn releaseNotificationPort(mach_port_t port); - - virtual bool init() APPLE_KEXT_OVERRIDE; - virtual bool init( OSDictionary * dictionary ) APPLE_KEXT_OVERRIDE; - // Currently ignores the all args, just passes up to IOService::init() - virtual bool initWithTask( - task_t owningTask, void * securityToken, UInt32 type, - OSDictionary * properties); - - virtual bool initWithTask( - task_t owningTask, void * securityToken, UInt32 type); - - virtual void free() APPLE_KEXT_OVERRIDE; - - virtual IOReturn clientClose( void ); - virtual IOReturn clientDied( void ); - - virtual IOService * getService( void ); - - virtual IOReturn registerNotificationPort( + static IOReturn clientHasAuthorization( task_t task, + IOService * service ); + + static IOReturn clientHasPrivilege( void * securityToken, + const char * privilegeName ); + + static OSObject * copyClientEntitlement( task_t task, + const char * entitlement ); + +/*! + * @function releaseAsyncReference64 + * @abstract Release the mach_port_t reference held within the OSAsyncReference64 structure. + * @discussion The OSAsyncReference64 structure passed to async methods holds a reference to the wakeup mach port, which should be released to balance each async method call. Behavior is undefined if these calls are not correctly balanced. + * @param reference The reference passed to the subclass IOAsyncMethod, or externalMethod() in the IOExternalMethodArguments.asyncReference field. + * @result A return code. + */ + static IOReturn releaseAsyncReference64(OSAsyncReference64 reference); +/*! + * @function releaseNotificationPort + * @abstract Release the mach_port_t passed to registerNotificationPort(). + * @discussion The mach_port_t passed to the registerNotificationPort() methods should be released to balance each call to registerNotificationPort(). Behavior is undefined if these calls are not correctly balanced. + * @param port The mach_port_t argument previously passed to the subclass implementation of registerNotificationPort(). + * @result A return code. + */ + static IOReturn releaseNotificationPort(mach_port_t port); + + virtual bool init() APPLE_KEXT_OVERRIDE; + virtual bool init( OSDictionary * dictionary ) APPLE_KEXT_OVERRIDE; +// Currently ignores the all args, just passes up to IOService::init() + virtual bool initWithTask( + task_t owningTask, void * securityToken, UInt32 type, + OSDictionary * properties); + + virtual bool initWithTask( + task_t owningTask, void * securityToken, UInt32 type); + + virtual void free() APPLE_KEXT_OVERRIDE; + + virtual IOReturn clientClose( void ); + virtual IOReturn clientDied( void ); + + virtual IOService * getService( void ); + + virtual IOReturn registerNotificationPort( mach_port_t port, UInt32 type, UInt32 refCon ); - virtual IOReturn getNotificationSemaphore( UInt32 notification_type, - semaphore_t * semaphore ); + virtual IOReturn getNotificationSemaphore( UInt32 notification_type, + semaphore_t * semaphore ); - virtual IOReturn connectClient( IOUserClient * client ); + virtual IOReturn connectClient( IOUserClient * client ); - // memory will be released by user client when last map is destroyed - virtual IOReturn clientMemoryForType( UInt32 type, - IOOptionBits * options, - IOMemoryDescriptor ** memory ); +// memory will be released by user client when last map is destroyed + virtual IOReturn clientMemoryForType( UInt32 type, + IOOptionBits * options, + IOMemoryDescriptor ** memory ); #if !__LP64__ private: APPLE_KEXT_COMPATIBILITY_VIRTUAL IOMemoryMap * mapClientMemory( IOOptionBits type, - task_t task, - IOOptionBits mapFlags = kIOMapAnywhere, - IOVirtualAddress atAddress = 0 ); + task_t task, + IOOptionBits mapFlags = kIOMapAnywhere, + IOVirtualAddress atAddress = 0 ); #endif - static IOReturn _sendAsyncResult64(OSAsyncReference64 reference, - IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options); + static IOReturn _sendAsyncResult64(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options); public: - /*! - @function removeMappingForDescriptor - Remove the first mapping created from the memory descriptor returned by clientMemoryForType() from IOUserClient's list of mappings. If such a mapping exists, it is retained and the reference currently held by IOUserClient is returned to the caller. - @param memory The memory descriptor instance previously returned by the implementation of clientMemoryForType(). - @result A reference to the first IOMemoryMap instance found in the list of mappings created by IOUserClient from that passed memory descriptor is returned, or zero if none exist. The caller should release this reference. - */ - IOMemoryMap * removeMappingForDescriptor(IOMemoryDescriptor * memory); - - /*! - @function exportObjectToClient - Make an arbitrary OSObject available to the client task. - @param task The task. - @param obj The object we want to export to the client. - @param clientObj Returned value is the client's port name. - */ - virtual IOReturn exportObjectToClient(task_t task, - OSObject *obj, io_object_t *clientObj); +/*! + * @function removeMappingForDescriptor + * Remove the first mapping created from the memory descriptor returned by clientMemoryForType() from IOUserClient's list of mappings. If such a mapping exists, it is retained and the reference currently held by IOUserClient is returned to the caller. + * @param memory The memory descriptor instance previously returned by the implementation of clientMemoryForType(). + * @result A reference to the first IOMemoryMap instance found in the list of mappings created by IOUserClient from that passed memory descriptor is returned, or zero if none exist. The caller should release this reference. + */ + IOMemoryMap * removeMappingForDescriptor(IOMemoryDescriptor * memory); + +/*! + * @function exportObjectToClient + * Make an arbitrary OSObject available to the client task. + * @param task The task. + * @param obj The object we want to export to the client. + * @param clientObj Returned value is the client's port name. + */ + virtual IOReturn exportObjectToClient(task_t task, + OSObject *obj, io_object_t *clientObj); #if KERNEL_PRIVATE - /*! - @function copyPortNameForObjectInTask - Make an arbitrary OSObject available to the client task as a port name. - The port does not respond to any IOKit IPC calls. - @param task The task. - @param object The object we want to export to the client. - The port holds a reference on the object, this function does not consume any reference on the object. - @param port_name Returned value is the task's port name. It has one send right created by this function. - @result A return code. - */ - static IOReturn copyPortNameForObjectInTask(task_t task, OSObject *object, - mach_port_name_t * port_name); - - /*! - @function copyObjectForPortNameInTask - Look up an OSObject given a task's port name created with copyPortNameForObjectInTask(). - @param task The task. - @param port_name The task's port name. This function does not consume any reference on the port name. - @param object If the port name is valid, a reference to the object is returned. It should be released by the caller. - @result A return code. - */ +/*! + * @function copyPortNameForObjectInTask + * Make an arbitrary OSObject available to the client task as a port name. + * The port does not respond to any IOKit IPC calls. + * @param task The task. + * @param object The object we want to export to the client. + * The port holds a reference on the object, this function does not consume any reference on the object. + * @param port_name Returned value is the task's port name. It has one send right created by this function. + * @result A return code. + */ + static IOReturn copyPortNameForObjectInTask(task_t task, OSObject *object, + mach_port_name_t * port_name); + +/*! + * @function copyObjectForPortNameInTask + * Look up an OSObject given a task's port name created with copyPortNameForObjectInTask(). + * @param task The task. + * @param port_name The task's port name. This function does not consume any reference on the port name. + * @param object If the port name is valid, a reference to the object is returned. It should be released by the caller. + * @result A return code. + */ static IOReturn copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, - OSObject **object); - - /*! - @function adjustPortNameReferencesInTask - Adjust the send rights for a port name created with copyPortNameForObjectInTask(). - @param task The task. - @param port_name The task's port name. - @param delta Signed value change to the number of user references. - @result A return code. - */ + OSObject **object); + +/*! + * @function adjustPortNameReferencesInTask + * Adjust the send rights for a port name created with copyPortNameForObjectInTask(). + * @param task The task. + * @param port_name The task's port name. + * @param delta Signed value change to the number of user references. + * @result A return code. + */ static IOReturn adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta); #define IOUC_COPYPORTNAMEFOROBJECTINTASK 1 #endif /* KERNEL_PRIVATE */ - // Old methods for accessing method vector backward compatiblility only - virtual IOExternalMethod * - getExternalMethodForIndex( UInt32 index ) +// Old methods for accessing method vector backward compatiblility only + virtual IOExternalMethod * + getExternalMethodForIndex( UInt32 index ) APPLE_KEXT_DEPRECATED; - virtual IOExternalAsyncMethod * - getExternalAsyncMethodForIndex( UInt32 index ) + virtual IOExternalAsyncMethod * + getExternalAsyncMethodForIndex( UInt32 index ) APPLE_KEXT_DEPRECATED; - // Methods for accessing method vector. - virtual IOExternalMethod * - getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); - virtual IOExternalAsyncMethod * - getAsyncTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); +// Methods for accessing method vector. + virtual IOExternalMethod * + getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); + virtual IOExternalAsyncMethod * + getAsyncTargetAndMethodForIndex( IOService ** targetP, UInt32 index ); - // Methods for accessing trap vector - old and new style - virtual IOExternalTrap * - getExternalTrapForIndex( UInt32 index ) +// Methods for accessing trap vector - old and new style + virtual IOExternalTrap * + getExternalTrapForIndex( UInt32 index ) APPLE_KEXT_DEPRECATED; - virtual IOExternalTrap * - getTargetAndTrapForIndex( IOService **targetP, UInt32 index ); + virtual IOExternalTrap * + getTargetAndTrapForIndex( IOService **targetP, UInt32 index ); }; #endif /* ! _IOKIT_IOUSERCLIENT_H */ - diff --git a/iokit/IOKit/IOWorkLoop.h b/iokit/IOKit/IOWorkLoop.h index c62c13216..2c1fd64f5 100644 --- a/iokit/IOKit/IOWorkLoop.h +++ b/iokit/IOKit/IOWorkLoop.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,319 +45,319 @@ class IOTimerEventSource; class IOCommandGate; /*! @class IOWorkLoop - @discussion An IOWorkLoop is a thread of control that is intended to be used to provide single threaded access to hardware. This class has no knowledge of the nature and type of the events that it marshals and forwards. When a device driver successfully starts (see IOService::start), it is expected to create the event sources it will need to receive events. Then a work loop is initialized and the events are added to the work loop for monitoring. In general this set up will be automated by the family superclass of the specific device. -

- The thread main method walks the event source linked list and messages each one requesting a work check. At this point each event source is expected to notify its registered owner that the event has occurred. After each event has been walked and each indicates that another loop isn't required (by setting the 'more' flag to false) the thread will go to sleep on a signaling semaphore. -

- When an event source is registered with a work loop it is informed of the semaphore to use to wake up the loop. -*/ + * @discussion An IOWorkLoop is a thread of control that is intended to be used to provide single threaded access to hardware. This class has no knowledge of the nature and type of the events that it marshals and forwards. When a device driver successfully starts (see IOService::start), it is expected to create the event sources it will need to receive events. Then a work loop is initialized and the events are added to the work loop for monitoring. In general this set up will be automated by the family superclass of the specific device. + *

+ * The thread main method walks the event source linked list and messages each one requesting a work check. At this point each event source is expected to notify its registered owner that the event has occurred. After each event has been walked and each indicates that another loop isn't required (by setting the 'more' flag to false) the thread will go to sleep on a signaling semaphore. + *

+ * When an event source is registered with a work loop it is informed of the semaphore to use to wake up the loop. + */ class IOWorkLoop : public OSObject { - OSDeclareDefaultStructors(IOWorkLoop) + OSDeclareDefaultStructors(IOWorkLoop) public: /*! - @typedef Action - @discussion Type and arguments of callout C function that is used when -a runCommand is executed by a client. Cast to this type when you want a C++ -member function to be used. Note the arg1 - arg3 parameters are straight pass -through from the runCommand to the action callout. - @param target - Target of the function, can be used as a refcon. Note if a C++ function -was specified, this parameter is implicitly the first parameter in the target -member function's parameter list. - @param arg0 Argument to action from run operation. - @param arg1 Argument to action from run operation. - @param arg2 Argument to action from run operation. - @param arg3 Argument to action from run operation. -*/ - typedef IOReturn (*Action)(OSObject *target, - void *arg0, void *arg1, - void *arg2, void *arg3); + * @typedef Action + * @discussion Type and arguments of callout C function that is used when + * a runCommand is executed by a client. Cast to this type when you want a C++ + * member function to be used. Note the arg1 - arg3 parameters are straight pass + * through from the runCommand to the action callout. + * @param target + * Target of the function, can be used as a refcon. Note if a C++ function + * was specified, this parameter is implicitly the first parameter in the target + * member function's parameter list. + * @param arg0 Argument to action from run operation. + * @param arg1 Argument to action from run operation. + * @param arg2 Argument to action from run operation. + * @param arg3 Argument to action from run operation. + */ + typedef IOReturn (*Action)(OSObject *target, + void *arg0, void *arg1, + void *arg2, void *arg3); #ifdef __BLOCKS__ - typedef IOReturn (^ActionBlock)(); + typedef IOReturn (^ActionBlock)(); #endif /* __BLOCKS__ */ - enum { - kPreciousStack = 0x00000001, - kTimeLockPanics = 0x00000002, - }; + enum { + kPreciousStack = 0x00000001, + kTimeLockPanics = 0x00000002, + }; private: /*! @function threadMainContinuation - @abstract Static function that calls the threadMain function. -*/ - static void threadMainContinuation(IOWorkLoop *self); - + * @abstract Static function that calls the threadMain function. + */ + static void threadMainContinuation(IOWorkLoop *self); + /*! @function eventSourcePerformsWork - @abstract Checks if the event source passed in overrides checkForWork() to perform any work. -IOWorkLoop uses this to determine if the event source should be polled in runEventSources() or not. - @param inEventSource The event source to check. -*/ + * @abstract Checks if the event source passed in overrides checkForWork() to perform any work. + * IOWorkLoop uses this to determine if the event source should be polled in runEventSources() or not. + * @param inEventSource The event source to check. + */ bool eventSourcePerformsWork(IOEventSource *inEventSource); - + protected: /*! @typedef maintCommandEnum - @discussion Enumeration of commands that _maintCommand can deal with. - @constant mAddEvent Used to tag a Remove event source command. - @constant mRemoveEvent Used to tag a Remove event source command. -*/ - typedef enum { mAddEvent, mRemoveEvent } maintCommandEnum; + * @discussion Enumeration of commands that _maintCommand can deal with. + * @constant mAddEvent Used to tag a Remove event source command. + * @constant mRemoveEvent Used to tag a Remove event source command. + */ + typedef enum { mAddEvent, mRemoveEvent } maintCommandEnum; /*! @var gateLock - Mutual exclusion lock that is used by close and open Gate functions. - This is a recursive lock, which allows multiple layers of code to share a single IOWorkLoop without deadlock. This is common in IOKit since threads of execution tend to follow the service plane in the IORegistry, and multiple objects along the call path may acquire the gate for the same (shared) workloop. -*/ - IORecursiveLock *gateLock; + * Mutual exclusion lock that is used by close and open Gate functions. + * This is a recursive lock, which allows multiple layers of code to share a single IOWorkLoop without deadlock. This is common in IOKit since threads of execution tend to follow the service plane in the IORegistry, and multiple objects along the call path may acquire the gate for the same (shared) workloop. + */ + IORecursiveLock *gateLock; -/*! @var eventChain - Pointer to first event source in linked list. -*/ - IOEventSource *eventChain; +/*! @var eventChain + * Pointer to first event source in linked list. + */ + IOEventSource *eventChain; -/*! @var controlG - Internal control gate to maintain event system. -*/ - IOCommandGate *controlG; +/*! @var controlG + * Internal control gate to maintain event system. + */ + IOCommandGate *controlG; /*! @var workToDoLock - The spin lock that is used to guard the 'workToDo' variable. -*/ - IOSimpleLock *workToDoLock; + * The spin lock that is used to guard the 'workToDo' variable. + */ + IOSimpleLock *workToDoLock; -/*! @var workThread - Work loop thread. -*/ - IOThread workThread; +/*! @var workThread + * Work loop thread. + */ + IOThread workThread; /*! @var workToDo - Used to to indicate that an interrupt has fired and needs to be processed. -*/ - volatile bool workToDo; + * Used to to indicate that an interrupt has fired and needs to be processed. + */ + volatile bool workToDo; /*! @var loopRestart - Set if an event chain has been changed and the system has to be rechecked from start. (Internal use only) -*/ - bool loopRestart; + * Set if an event chain has been changed and the system has to be rechecked from start. (Internal use only) + */ + bool loopRestart; /*! @struct ExpansionData - @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. -*/ - struct ExpansionData { - IOOptionBits options; - IOEventSource *passiveEventChain; + * @discussion This structure will be used to expand the capablilties of the IOWorkLoop in the future. + */ + struct ExpansionData { + IOOptionBits options; + IOEventSource *passiveEventChain; #if IOKITSTATS - struct IOWorkLoopCounter *counter; + struct IOWorkLoopCounter *counter; #else - void *iokitstatsReserved; + void *iokitstatsReserved; #endif - uint64_t lockInterval; - uint64_t lockTime; - }; + uint64_t lockInterval; + uint64_t lockTime; + }; /*! @var reserved - Reserved for future use. (Internal use only) -*/ - ExpansionData *reserved; + * Reserved for future use. (Internal use only) + */ + ExpansionData *reserved; /*! @function _maintRequest - @abstract Synchronous implementation of addEventSource and removeEventSource functions. - @discussion This function implements the commands as defined in the maintCommandEnum. It can be subclassed but it isn't an external API in the usual sense. A subclass implementation of _maintRequest would be called synchronously with respect to the work loop and it should be implemented in the usual way that an ioctl would be. - @return kIOReturnUnsupported if the command given is not implemented, kIOReturnSuccess otherwise. -*/ - virtual IOReturn _maintRequest(void *command, void *data, void *, void *); + * @abstract Synchronous implementation of addEventSource and removeEventSource functions. + * @discussion This function implements the commands as defined in the maintCommandEnum. It can be subclassed but it isn't an external API in the usual sense. A subclass implementation of _maintRequest would be called synchronously with respect to the work loop and it should be implemented in the usual way that an ioctl would be. + * @return kIOReturnUnsupported if the command given is not implemented, kIOReturnSuccess otherwise. + */ + virtual IOReturn _maintRequest(void *command, void *data, void *, void *); /*! @function free - @discussion Mandatory free of the object independent of the current retain count. If the work loop is running, this method will not return until the thread has successfully terminated. Each event source in the chain will be released and the working semaphore will be destroyed. -

- If the client has some outstanding requests on an event they will never be informed of completion. If an external thread is blocked on any of the event sources they will be awakened with a KERN_INTERUPTED status. -*/ - virtual void free() APPLE_KEXT_OVERRIDE; + * @discussion Mandatory free of the object independent of the current retain count. If the work loop is running, this method will not return until the thread has successfully terminated. Each event source in the chain will be released and the working semaphore will be destroyed. + *

+ * If the client has some outstanding requests on an event they will never be informed of completion. If an external thread is blocked on any of the event sources they will be awakened with a KERN_INTERUPTED status. + */ + virtual void free() APPLE_KEXT_OVERRIDE; /*! @function threadMain - @discussion Work loop threads main function. This function consists of 3 - loops: the outermost loop is the semaphore clear and wait loop, the middle - loop terminates when there is no more work, and the inside loop walks the - event list calling the checkForWork method in each event source. If an - event source has more work to do, it can set the more flag and the middle - loop will repeat. When no more work is outstanding the outermost will - sleep until an event is signalled. -*/ - virtual void threadMain(); + * @discussion Work loop threads main function. This function consists of 3 + * loops: the outermost loop is the semaphore clear and wait loop, the middle + * loop terminates when there is no more work, and the inside loop walks the + * event list calling the checkForWork method in each event source. If an + * event source has more work to do, it can set the more flag and the middle + * loop will repeat. When no more work is outstanding the outermost will + * sleep until an event is signalled. + */ + virtual void threadMain(); public: /*! @function workLoop - @abstract Factory member function to construct and intialize a work loop. - @result Returns a workLoop instance if constructed successfully, 0 otherwise. -*/ - static IOWorkLoop *workLoop(); + * @abstract Factory member function to construct and intialize a work loop. + * @result Returns a workLoop instance if constructed successfully, 0 otherwise. + */ + static IOWorkLoop *workLoop(); /*! @function workLoopWithOptions(IOOptionBits options) - @abstract Factory member function to constuct and intialize a work loop. - @param options Options - kPreciousStack to avoid stack deallocation on paging path. - @result Returns a workLoop instance if constructed successfully, 0 otherwise. -*/ - static IOWorkLoop *workLoopWithOptions(IOOptionBits options); + * @abstract Factory member function to constuct and intialize a work loop. + * @param options Options - kPreciousStack to avoid stack deallocation on paging path. + * @result Returns a workLoop instance if constructed successfully, 0 otherwise. + */ + static IOWorkLoop *workLoopWithOptions(IOOptionBits options); /*! @function init - @discussion Initializes an instance of the workloop. This method creates and initializes the signaling semaphore, the controller gate lock, and spawns the thread that will continue executing. - @result Returns true if initialized successfully, false otherwise. -*/ - virtual bool init() APPLE_KEXT_OVERRIDE; + * @discussion Initializes an instance of the workloop. This method creates and initializes the signaling semaphore, the controller gate lock, and spawns the thread that will continue executing. + * @result Returns true if initialized successfully, false otherwise. + */ + virtual bool init() APPLE_KEXT_OVERRIDE; /*! @function getThread - @abstract Gets the workThread. - @result Returns workThread. -*/ - virtual IOThread getThread() const; + * @abstract Gets the workThread. + * @result Returns workThread. + */ + virtual IOThread getThread() const; /*! @function onThread - @abstract Is the current execution context on the work thread? - @result Returns true if IOThreadSelf() == workThread. -*/ - virtual bool onThread() const; + * @abstract Is the current execution context on the work thread? + * @result Returns true if IOThreadSelf() == workThread. + */ + virtual bool onThread() const; /*! @function inGate - @abstract Is the current execution context holding the work-loop's gate? - @result Returns true if IOThreadSelf() is gate holder. -*/ - virtual bool inGate() const; - + * @abstract Is the current execution context holding the work-loop's gate? + * @result Returns true if IOThreadSelf() is gate holder. + */ + virtual bool inGate() const; + /*! @function addEventSource - @discussion Add an event source to be monitored by the work loop. This function does not return until the work loop has acknowledged the arrival of the new event source. When a new event has been added the threadMain will always restart its loop and check all outstanding events. The event source is retained by the work loop. - @param newEvent Pointer to IOEventSource subclass to add. - @result Always returns kIOReturnSuccess. -*/ - virtual IOReturn addEventSource(IOEventSource *newEvent); + * @discussion Add an event source to be monitored by the work loop. This function does not return until the work loop has acknowledged the arrival of the new event source. When a new event has been added the threadMain will always restart its loop and check all outstanding events. The event source is retained by the work loop. + * @param newEvent Pointer to IOEventSource subclass to add. + * @result Always returns kIOReturnSuccess. + */ + virtual IOReturn addEventSource(IOEventSource *newEvent); /*! @function removeEventSource - @discussion Remove an event source from the work loop. This function does not return until the work loop has acknowledged the removal of the event source. When an event has been removed the threadMain will always restart its loop and check all outstanding events. The event source will be released before return. - @param toRemove Pointer to IOEventSource subclass to remove. - @result Returns kIOReturnSuccess if successful, kIOReturnBadArgument if toRemove couldn't be found. -*/ - virtual IOReturn removeEventSource(IOEventSource *toRemove); + * @discussion Remove an event source from the work loop. This function does not return until the work loop has acknowledged the removal of the event source. When an event has been removed the threadMain will always restart its loop and check all outstanding events. The event source will be released before return. + * @param toRemove Pointer to IOEventSource subclass to remove. + * @result Returns kIOReturnSuccess if successful, kIOReturnBadArgument if toRemove couldn't be found. + */ + virtual IOReturn removeEventSource(IOEventSource *toRemove); /*! @function enableAllEventSources - @abstract Calls enable() in all event sources. - @discussion For all event sources in eventChain, call enable() function. See IOEventSource::enable(). -*/ - virtual void enableAllEventSources() const; + * @abstract Calls enable() in all event sources. + * @discussion For all event sources in eventChain, call enable() function. See IOEventSource::enable(). + */ + virtual void enableAllEventSources() const; /*! @function disableAllEventSources - @abstract Calls disable() in all event sources. - @discussion For all event sources in eventChain, call disable() function. See IOEventSource::disable(). -*/ - virtual void disableAllEventSources() const; + * @abstract Calls disable() in all event sources. + * @discussion For all event sources in eventChain, call disable() function. See IOEventSource::disable(). + */ + virtual void disableAllEventSources() const; /*! @function enableAllInterrupts - @abstract Calls enable() in all interrupt event sources. - @discussion For all event sources (ES) for which OSDynamicCast(IOInterruptEventSource, ES) is valid, in eventChain call enable() function. See IOEventSource::enable(). -*/ - virtual void enableAllInterrupts() const; + * @abstract Calls enable() in all interrupt event sources. + * @discussion For all event sources (ES) for which OSDynamicCast(IOInterruptEventSource, ES) is valid, in eventChain call enable() function. See IOEventSource::enable(). + */ + virtual void enableAllInterrupts() const; /*! @function disableAllInterrupts - @abstract Calls disable() in all interrupt event sources. - @discussion For all event sources (ES) for which OSDynamicCast(IOInterruptEventSource, ES) is valid, in eventChain call disable() function. See IOEventSource::disable(). -*/ - virtual void disableAllInterrupts() const; + * @abstract Calls disable() in all interrupt event sources. + * @discussion For all event sources (ES) for which OSDynamicCast(IOInterruptEventSource, ES) is valid, in eventChain call disable() function. See IOEventSource::disable(). + */ + virtual void disableAllInterrupts() const; protected: - // Internal APIs used by event sources to control the thread - friend class IOEventSource; - friend class IOTimerEventSource; - friend class IOCommandGate; +// Internal APIs used by event sources to control the thread + friend class IOEventSource; + friend class IOTimerEventSource; + friend class IOCommandGate; #if IOKITSTATS - friend class IOStatistics; + friend class IOStatistics; #endif - virtual void signalWorkAvailable(); - virtual void openGate(); - virtual void closeGate(); - virtual bool tryCloseGate(); - virtual int sleepGate(void *event, UInt32 interuptibleType); - virtual void wakeupGate(void *event, bool oneThread); + virtual void signalWorkAvailable(); + virtual void openGate(); + virtual void closeGate(); + virtual bool tryCloseGate(); + virtual int sleepGate(void *event, UInt32 interuptibleType); + virtual void wakeupGate(void *event, bool oneThread); public: - /* methods available in Mac OS X 10.1 or later */ +/* methods available in Mac OS X 10.1 or later */ /*! @function runAction - @abstract Single thread a call to an action with the work-loop. - @discussion Client function that causes the given action to be called in a single threaded manner. Beware: the work-loop's gate is recursive and runAction can cause direct or indirect re-entrancy. When executing on a client's thread, runAction will sleep until the work-loop's gate opens for execution of client actions, the action is single threaded against all other work-loop event sources. - @param action Pointer to function to be executed in work-loop context. - @param arg0 Parameter for action parameter, defaults to 0. - @param arg1 Parameter for action parameter, defaults to 0. - @param arg2 Parameter for action parameter, defaults to 0. - @param arg3 Parameter for action parameter, defaults to 0. - @result Returns the value of the Action callout. -*/ - virtual IOReturn runAction(Action action, OSObject *target, - void *arg0 = 0, void *arg1 = 0, - void *arg2 = 0, void *arg3 = 0); + * @abstract Single thread a call to an action with the work-loop. + * @discussion Client function that causes the given action to be called in a single threaded manner. Beware: the work-loop's gate is recursive and runAction can cause direct or indirect re-entrancy. When executing on a client's thread, runAction will sleep until the work-loop's gate opens for execution of client actions, the action is single threaded against all other work-loop event sources. + * @param action Pointer to function to be executed in work-loop context. + * @param arg0 Parameter for action parameter, defaults to 0. + * @param arg1 Parameter for action parameter, defaults to 0. + * @param arg2 Parameter for action parameter, defaults to 0. + * @param arg3 Parameter for action parameter, defaults to 0. + * @result Returns the value of the Action callout. + */ + virtual IOReturn runAction(Action action, OSObject *target, + void *arg0 = 0, void *arg1 = 0, + void *arg2 = 0, void *arg3 = 0); #ifdef __BLOCKS__ /*! @function runAction - @abstract Single thread a call to an action with the work-loop. - @discussion Client function that causes the given action to be called in a single threaded manner. Beware: the work-loop's gate is recursive and runAction can cause direct or indirect re-entrancy. When executing on a client's thread, runAction will sleep until the work-loop's gate opens for execution of client actions, the action is single threaded against all other work-loop event sources. - @param action Block to be executed in work-loop context. - @result Returns the result of the action block. -*/ - IOReturn runActionBlock(ActionBlock action); + * @abstract Single thread a call to an action with the work-loop. + * @discussion Client function that causes the given action to be called in a single threaded manner. Beware: the work-loop's gate is recursive and runAction can cause direct or indirect re-entrancy. When executing on a client's thread, runAction will sleep until the work-loop's gate opens for execution of client actions, the action is single threaded against all other work-loop event sources. + * @param action Block to be executed in work-loop context. + * @result Returns the result of the action block. + */ + IOReturn runActionBlock(ActionBlock action); #endif /* __BLOCKS__ */ /*! @function runEventSources - @discussion Consists of the inner 2 loops of the threadMain function(qv). - The outer loop terminates when there is no more work, and the inside loop - walks the event list calling the checkForWork method in each event source. - If an event source has more work to do, it can set the more flag and the - outer loop will repeat. -

- This function can be used to clear a priority inversion between the normal - workloop thread and multimedia's real time threads. The problem is that - the interrupt action routine is often held off by high priority threads. - So if they want to get their data now they will have to call us and ask if - any data is available. The multi-media user client will arrange for this - function to be called, which causes any pending interrupts to be processed - and the completion routines called. By the time the function returns all - outstanding work will have been completed at the real time threads - priority. - - @result Return false if the work loop is shutting down, true otherwise. -*/ - virtual bool runEventSources(); + * @discussion Consists of the inner 2 loops of the threadMain function(qv). + * The outer loop terminates when there is no more work, and the inside loop + * walks the event list calling the checkForWork method in each event source. + * If an event source has more work to do, it can set the more flag and the + * outer loop will repeat. + *

+ * This function can be used to clear a priority inversion between the normal + * workloop thread and multimedia's real time threads. The problem is that + * the interrupt action routine is often held off by high priority threads. + * So if they want to get their data now they will have to call us and ask if + * any data is available. The multi-media user client will arrange for this + * function to be called, which causes any pending interrupts to be processed + * and the completion routines called. By the time the function returns all + * outstanding work will have been completed at the real time threads + * priority. + * + * @result Return false if the work loop is shutting down, true otherwise. + */ + virtual bool runEventSources(); /*! @function setMaximumLockTime - @discussion For diagnostics use in DEVELOPMENT kernels, set a time interval which if the work loop lock is held for this time or greater, IOWorkLoop will panic or log a backtrace. - @param interval An absolute time interval, eg. created with clock_interval_to_absolutetime_interval(). - @param options Pass IOWorkLoop::kTimeLockPanics to panic when the time is exceeded, otherwise a log will be generated with OSReportWithBacktrace(). -*/ - void setMaximumLockTime(uint64_t interval, uint32_t options); + * @discussion For diagnostics use in DEVELOPMENT kernels, set a time interval which if the work loop lock is held for this time or greater, IOWorkLoop will panic or log a backtrace. + * @param interval An absolute time interval, eg. created with clock_interval_to_absolutetime_interval(). + * @param options Pass IOWorkLoop::kTimeLockPanics to panic when the time is exceeded, otherwise a log will be generated with OSReportWithBacktrace(). + */ + void setMaximumLockTime(uint64_t interval, uint32_t options); protected: - // Internal APIs used by event sources to control the thread - virtual int sleepGate(void *event, AbsoluteTime deadline, UInt32 interuptibleType); +// Internal APIs used by event sources to control the thread + virtual int sleepGate(void *event, AbsoluteTime deadline, UInt32 interuptibleType); #if XNU_KERNEL_PRIVATE - void lockTime(void); + void lockTime(void); #endif /* XNU_KERNEL_PRIVATE */ protected: #if __LP64__ - OSMetaClassDeclareReservedUnused(IOWorkLoop, 0); - OSMetaClassDeclareReservedUnused(IOWorkLoop, 1); - OSMetaClassDeclareReservedUnused(IOWorkLoop, 2); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 0); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 1); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 2); #else - OSMetaClassDeclareReservedUsed(IOWorkLoop, 0); - OSMetaClassDeclareReservedUsed(IOWorkLoop, 1); - OSMetaClassDeclareReservedUsed(IOWorkLoop, 2); + OSMetaClassDeclareReservedUsed(IOWorkLoop, 0); + OSMetaClassDeclareReservedUsed(IOWorkLoop, 1); + OSMetaClassDeclareReservedUsed(IOWorkLoop, 2); #endif - OSMetaClassDeclareReservedUnused(IOWorkLoop, 3); - OSMetaClassDeclareReservedUnused(IOWorkLoop, 4); - OSMetaClassDeclareReservedUnused(IOWorkLoop, 5); - OSMetaClassDeclareReservedUnused(IOWorkLoop, 6); - OSMetaClassDeclareReservedUnused(IOWorkLoop, 7); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 3); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 4); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 5); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 6); + OSMetaClassDeclareReservedUnused(IOWorkLoop, 7); }; #endif /* !__IOKIT_IOWORKLOOP_H */ diff --git a/iokit/IOKit/OSMessageNotification.h b/iokit/IOKit/OSMessageNotification.h index bd6bc1cb1..42f1bc85a 100644 --- a/iokit/IOKit/OSMessageNotification.h +++ b/iokit/IOKit/OSMessageNotification.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * HISTORY * */ -#ifndef __OS_OSMESSAGENOTIFICATION_H +#ifndef __OS_OSMESSAGENOTIFICATION_H #define __OS_OSMESSAGENOTIFICATION_H #ifdef __cplusplus @@ -44,69 +44,69 @@ extern "C" { #include enum { - kFirstIOKitNotificationType = 100, - kIOServicePublishNotificationType = 100, - kIOServiceMatchedNotificationType = 101, - kIOServiceTerminatedNotificationType = 102, - kIOAsyncCompletionNotificationType = 150, - kIOServiceMessageNotificationType = 160, - kLastIOKitNotificationType = 199, - - // reserved bits - kIOKitNoticationTypeMask = 0x00000FFF, - kIOKitNoticationTypeSizeAdjShift = 30, - kIOKitNoticationMsgSizeMask = 3, + kFirstIOKitNotificationType = 100, + kIOServicePublishNotificationType = 100, + kIOServiceMatchedNotificationType = 101, + kIOServiceTerminatedNotificationType = 102, + kIOAsyncCompletionNotificationType = 150, + kIOServiceMessageNotificationType = 160, + kLastIOKitNotificationType = 199, + + // reserved bits + kIOKitNoticationTypeMask = 0x00000FFF, + kIOKitNoticationTypeSizeAdjShift = 30, + kIOKitNoticationMsgSizeMask = 3, }; enum { - kOSNotificationMessageID = 53, - kOSAsyncCompleteMessageID = 57, - kMaxAsyncArgs = 16 + kOSNotificationMessageID = 53, + kOSAsyncCompleteMessageID = 57, + kMaxAsyncArgs = 16 }; enum { - kIOAsyncReservedIndex = 0, - kIOAsyncReservedCount, - - kIOAsyncCalloutFuncIndex = kIOAsyncReservedCount, - kIOAsyncCalloutRefconIndex, - kIOAsyncCalloutCount, - - kIOMatchingCalloutFuncIndex = kIOAsyncReservedCount, - kIOMatchingCalloutRefconIndex, - kIOMatchingCalloutCount, - - kIOInterestCalloutFuncIndex = kIOAsyncReservedCount, - kIOInterestCalloutRefconIndex, - kIOInterestCalloutServiceIndex, - kIOInterestCalloutCount + kIOAsyncReservedIndex = 0, + kIOAsyncReservedCount, + + kIOAsyncCalloutFuncIndex = kIOAsyncReservedCount, + kIOAsyncCalloutRefconIndex, + kIOAsyncCalloutCount, + + kIOMatchingCalloutFuncIndex = kIOAsyncReservedCount, + kIOMatchingCalloutRefconIndex, + kIOMatchingCalloutCount, + + kIOInterestCalloutFuncIndex = kIOAsyncReservedCount, + kIOInterestCalloutRefconIndex, + kIOInterestCalloutServiceIndex, + kIOInterestCalloutCount }; // -------------- enum { - kOSAsyncRef64Count = 8, - kOSAsyncRef64Size = kOSAsyncRef64Count * ((int) sizeof(io_user_reference_t)) + kOSAsyncRef64Count = 8, + kOSAsyncRef64Size = kOSAsyncRef64Count * ((int) sizeof(io_user_reference_t)) }; typedef io_user_reference_t OSAsyncReference64[kOSAsyncRef64Count]; struct OSNotificationHeader64 { - mach_msg_size_t size; /* content size */ - natural_t type; - OSAsyncReference64 reference; + mach_msg_size_t size; /* content size */ + natural_t type; + OSAsyncReference64 reference; #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) - unsigned char content[]; + unsigned char content[]; #else - unsigned char content[0]; + unsigned char content[0]; #endif }; #pragma pack(4) struct IOServiceInterestContent64 { - natural_t messageType; - io_user_reference_t messageArgument[1]; + natural_t messageType; + io_user_reference_t messageArgument[1]; }; #pragma pack() // -------------- @@ -114,38 +114,38 @@ struct IOServiceInterestContent64 { #if !KERNEL_USER32 enum { - kOSAsyncRefCount = 8, - kOSAsyncRefSize = 32 + kOSAsyncRefCount = 8, + kOSAsyncRefSize = 32 }; typedef natural_t OSAsyncReference[kOSAsyncRefCount]; struct OSNotificationHeader { - mach_msg_size_t size; /* content size */ - natural_t type; - OSAsyncReference reference; + mach_msg_size_t size; /* content size */ + natural_t type; + OSAsyncReference reference; #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) - unsigned char content[]; + unsigned char content[]; #else - unsigned char content[0]; + unsigned char content[0]; #endif }; #pragma pack(4) struct IOServiceInterestContent { - natural_t messageType; - void * messageArgument[1]; + natural_t messageType; + void * messageArgument[1]; }; #pragma pack() #endif /* KERNEL_USER32 */ struct IOAsyncCompletionContent { - IOReturn result; + IOReturn result; #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) - void * args[] __attribute__ ((packed)); + void * args[] __attribute__ ((packed)); #else - void * args[0] __attribute__ ((packed)); + void * args[0] __attribute__ ((packed)); #endif }; @@ -160,4 +160,3 @@ typedef struct IOAsyncCompletionContent IOAsyncCompletionContent; #endif #endif /* __OS_OSMESSAGENOTIFICATION_H */ - diff --git a/iokit/IOKit/assert.h b/iokit/IOKit/assert.h index f7212675f..cb50d2f89 100644 --- a/iokit/IOKit/assert.h +++ b/iokit/IOKit/assert.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _IO_ASSERT_H_ -#define _IO_ASSERT_H_ +#ifndef _IO_ASSERT_H_ +#define _IO_ASSERT_H_ #include #ifdef __cplusplus @@ -37,7 +37,7 @@ extern "C" { #ifdef KERNEL #if IOASSERT #undef MACH_ASSERT -#define MACH_ASSERT 1 +#define MACH_ASSERT 1 #endif #endif #include @@ -47,10 +47,9 @@ extern "C" { #endif -#if( !defined( OSCompileAssert ) ) -# define OSCompileAssert( TEST ) \ +#if(!defined(OSCompileAssert)) +# define OSCompileAssert( TEST ) \ extern int OSCompileAssertFailed[ ( TEST ) ? 1 : -1 ] __unused; #endif -#endif /* _IO_ASSERT_H_ */ - +#endif /* _IO_ASSERT_H_ */ diff --git a/iokit/IOKit/nvram/IONVRAMController.h b/iokit/IOKit/nvram/IONVRAMController.h index 29b2b722e..fcabedf91 100644 --- a/iokit/IOKit/nvram/IONVRAMController.h +++ b/iokit/IOKit/nvram/IONVRAMController.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,19 +31,19 @@ #include -class IONVRAMController: public IOService +class IONVRAMController : public IOService { - OSDeclareAbstractStructors(IONVRAMController); - + OSDeclareAbstractStructors(IONVRAMController); + public: - virtual void registerService(IOOptionBits options = 0) APPLE_KEXT_OVERRIDE; - - virtual void sync(void); - - virtual IOReturn read(IOByteCount offset, UInt8 *buffer, - IOByteCount length) = 0; - virtual IOReturn write(IOByteCount offset, UInt8 *buffer, - IOByteCount length) = 0; + virtual void registerService(IOOptionBits options = 0) APPLE_KEXT_OVERRIDE; + + virtual void sync(void); + + virtual IOReturn read(IOByteCount offset, UInt8 *buffer, + IOByteCount length) = 0; + virtual IOReturn write(IOByteCount offset, UInt8 *buffer, + IOByteCount length) = 0; }; #endif /* !_IOKIT_IONVRAMCONTROLLER_H */ diff --git a/iokit/IOKit/perfcontrol/IOPerfControl.h b/iokit/IOKit/perfcontrol/IOPerfControl.h index 886d0a0c1..b1501cd52 100644 --- a/iokit/IOKit/perfcontrol/IOPerfControl.h +++ b/iokit/IOKit/perfcontrol/IOPerfControl.h @@ -11,9 +11,8 @@ struct thread_group; -enum -{ - kIOPerfControlClientWorkUntracked = 0, +enum{ + kIOPerfControlClientWorkUntracked = 0, }; /*! @@ -23,195 +22,190 @@ enum */ class IOPerfControlClient final : public OSObject { - OSDeclareDefaultStructors(IOPerfControlClient); + OSDeclareDefaultStructors(IOPerfControlClient); protected: - virtual bool init(IOService *driver, uint64_t maxWorkCapacity); + virtual bool init(IOService *driver, uint64_t maxWorkCapacity); public: - /*! - * @function copyClient - * @abstract Return a retained reference to a client object, to be released by the driver. It may be - * shared with other drivers in the system. - * @param driver The device driver that will be using this interface. - * @param maxWorkCapacity The maximum number of concurrent work items supported by the device driver. - * @returns An instance of IOPerfControlClient. - */ - static IOPerfControlClient *copyClient(IOService *driver, uint64_t maxWorkCapacity); - - /*! - * @function registerDevice - * @abstract Inform the system that work will be dispatched to a device in the future. - * @discussion The system will do some one-time setup work associated with the device, and may block the - * current thread during the setup. Devices should not be passed to work workSubmit, workSubmitAndBegin, - * workBegin, or workEnd until they have been successfully registered. The unregistration process happens - * automatically when the device object is deallocated. - * @param device The device object. Some platforms require device to be a specific subclass of IOService. - * @returns kIOReturnSuccess or an IOReturn error code - */ - virtual IOReturn registerDevice(IOService *driver, IOService *device); - - /*! - * @function unregisterDevice - * @abstract Inform the system that work will be no longer be dispatched to a device in the future. - * @discussion This call is optional as the unregistration process happens automatically when the device - * object is deallocated. This call may block the current thread and/or acquire locks. It should not be - * called until after all submitted work has been ended using workEnd. - * @param device The device object. Some platforms require device to be a specific subclass of IOService. - */ - virtual void unregisterDevice(IOService *driver, IOService *device); - - /*! - * @struct WorkSubmitArgs - * @discussion Drivers may submit additional device-specific arguments related to the submission of a work item - * by passing a struct with WorkSubmitArgs as its first member. Note: Drivers are responsible for publishing - * a header file describing these arguments. - */ - struct WorkSubmitArgs - { - uint32_t version; - uint32_t size; - uint64_t submit_time; - uint64_t reserved[4]; - void *driver_data; - }; - - /*! - * @function workSubmit - * @abstract Tell the performance controller that work was submitted. - * @param device The device that will execute the work. Some platforms require device to be a - * specific subclass of IOService. - * @param args Optional device-specific arguments related to the submission of this work item. - * @returns A token representing this work item, which must be passed to workEnd when the work is finished - * unless the token equals kIOPerfControlClientWorkUntracked. Failure to do this will result in memory leaks - * and a degradation of system performance. - */ - virtual uint64_t workSubmit(IOService *device, WorkSubmitArgs *args = nullptr); - - /*! - * @struct WorkBeginArgs - * @discussion Drivers may submit additional device-specific arguments related to the start of a work item - * by passing a struct with WorkBeginArgs as its first member. Note: Drivers are responsible for publishing - * a header file describing these arguments. - */ - struct WorkBeginArgs - { - uint32_t version; - uint32_t size; - uint64_t begin_time; - uint64_t reserved[4]; - void *driver_data; - }; - - /*! - * @function workSubmitAndBegin - * @abstract Tell the performance controller that work was submitted and immediately began executing. - * @param device The device that is executing the work. Some platforms require device to be a - * specific subclass of IOService. - * @param submitArgs Optional device-specific arguments related to the submission of this work item. - * @param beginArgs Optional device-specific arguments related to the start of this work item. - * @returns A token representing this work item, which must be passed to workEnd when the work is finished - * unless the token equals kIOPerfControlClientWorkUntracked. Failure to do this will result in memory leaks - * and a degradation of system performance. - */ - virtual uint64_t workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs = nullptr, - WorkBeginArgs *beginArgs = nullptr); - - /*! - * @function workBegin - * @abstract Tell the performance controller that previously submitted work began executing. - * @param device The device that is executing the work. Some platforms require device to be a - * specific subclass of IOService. - * @param args Optional device-specific arguments related to the start of this work item. - */ - virtual void workBegin(IOService *device, uint64_t token, WorkBeginArgs *args = nullptr); - - /*! - * @struct WorkEndArgs - * @discussion Drivers may submit additional device-specific arguments related to the end of a work item - * by passing a struct with WorkEndArgs as its first member. Note: Drivers are responsible for publishing - * a header file describing these arguments. - */ - struct WorkEndArgs - { - uint32_t version; - uint32_t size; - uint64_t end_time; - uint64_t reserved[4]; - void *driver_data; - }; - - /*! - * @function workEnd - * @abstract Tell the performance controller that previously started work finished executing. - * @param device The device that executed the work. Some platforms require device to be a - * specific subclass of IOService. - * @param args Optional device-specific arguments related to the end of this work item. - * @param done Optional Set to false if the work has not yet completed. Drivers are then responsible for - * calling workBegin when the work resumes and workEnd with done set to True when it has completed. - */ - virtual void workEnd(IOService *device, uint64_t token, WorkEndArgs *args = nullptr, bool done = true); - - /*! - * @struct PerfControllerInterface - * @discussion Function pointers necessary to register a performance controller. Not for general driver use. - */ - struct PerfControllerInterface - { - struct WorkState { - uint64_t thread_group_id; - void *thread_group_data; - void *work_data; - uint32_t work_data_size; - }; - - using RegisterDeviceFunction = IOReturn (*)(IOService *); - using WorkCanSubmitFunction = bool (*)(IOService *, WorkState *, WorkSubmitArgs *); - using WorkSubmitFunction = void (*)(IOService *, uint64_t, WorkState *, WorkSubmitArgs *); - using WorkBeginFunction = void (*)(IOService *, uint64_t, WorkState *, WorkBeginArgs *); - using WorkEndFunction = void (*)(IOService *, uint64_t, WorkState *, WorkEndArgs *, bool); - - uint64_t version; - RegisterDeviceFunction registerDevice; - RegisterDeviceFunction unregisterDevice; - WorkCanSubmitFunction workCanSubmit; - WorkSubmitFunction workSubmit; - WorkBeginFunction workBegin; - WorkEndFunction workEnd; - }; - - /*! - * @function registerPerformanceController - * @abstract Register a performance controller to receive callbacks. Not for general driver use. - * @param interface Struct containing callback functions implemented by the performance controller. - * @returns kIOReturnSuccess or kIOReturnError if the interface was already registered. - */ - virtual IOReturn registerPerformanceController(PerfControllerInterface interface); +/*! + * @function copyClient + * @abstract Return a retained reference to a client object, to be released by the driver. It may be + * shared with other drivers in the system. + * @param driver The device driver that will be using this interface. + * @param maxWorkCapacity The maximum number of concurrent work items supported by the device driver. + * @returns An instance of IOPerfControlClient. + */ + static IOPerfControlClient *copyClient(IOService *driver, uint64_t maxWorkCapacity); + +/*! + * @function registerDevice + * @abstract Inform the system that work will be dispatched to a device in the future. + * @discussion The system will do some one-time setup work associated with the device, and may block the + * current thread during the setup. Devices should not be passed to work workSubmit, workSubmitAndBegin, + * workBegin, or workEnd until they have been successfully registered. The unregistration process happens + * automatically when the device object is deallocated. + * @param device The device object. Some platforms require device to be a specific subclass of IOService. + * @returns kIOReturnSuccess or an IOReturn error code + */ + virtual IOReturn registerDevice(IOService *driver, IOService *device); + +/*! + * @function unregisterDevice + * @abstract Inform the system that work will be no longer be dispatched to a device in the future. + * @discussion This call is optional as the unregistration process happens automatically when the device + * object is deallocated. This call may block the current thread and/or acquire locks. It should not be + * called until after all submitted work has been ended using workEnd. + * @param device The device object. Some platforms require device to be a specific subclass of IOService. + */ + virtual void unregisterDevice(IOService *driver, IOService *device); + +/*! + * @struct WorkSubmitArgs + * @discussion Drivers may submit additional device-specific arguments related to the submission of a work item + * by passing a struct with WorkSubmitArgs as its first member. Note: Drivers are responsible for publishing + * a header file describing these arguments. + */ + struct WorkSubmitArgs { + uint32_t version; + uint32_t size; + uint64_t submit_time; + uint64_t reserved[4]; + void *driver_data; + }; + +/*! + * @function workSubmit + * @abstract Tell the performance controller that work was submitted. + * @param device The device that will execute the work. Some platforms require device to be a + * specific subclass of IOService. + * @param args Optional device-specific arguments related to the submission of this work item. + * @returns A token representing this work item, which must be passed to workEnd when the work is finished + * unless the token equals kIOPerfControlClientWorkUntracked. Failure to do this will result in memory leaks + * and a degradation of system performance. + */ + virtual uint64_t workSubmit(IOService *device, WorkSubmitArgs *args = nullptr); + +/*! + * @struct WorkBeginArgs + * @discussion Drivers may submit additional device-specific arguments related to the start of a work item + * by passing a struct with WorkBeginArgs as its first member. Note: Drivers are responsible for publishing + * a header file describing these arguments. + */ + struct WorkBeginArgs { + uint32_t version; + uint32_t size; + uint64_t begin_time; + uint64_t reserved[4]; + void *driver_data; + }; + +/*! + * @function workSubmitAndBegin + * @abstract Tell the performance controller that work was submitted and immediately began executing. + * @param device The device that is executing the work. Some platforms require device to be a + * specific subclass of IOService. + * @param submitArgs Optional device-specific arguments related to the submission of this work item. + * @param beginArgs Optional device-specific arguments related to the start of this work item. + * @returns A token representing this work item, which must be passed to workEnd when the work is finished + * unless the token equals kIOPerfControlClientWorkUntracked. Failure to do this will result in memory leaks + * and a degradation of system performance. + */ + virtual uint64_t workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs = nullptr, + WorkBeginArgs *beginArgs = nullptr); + +/*! + * @function workBegin + * @abstract Tell the performance controller that previously submitted work began executing. + * @param device The device that is executing the work. Some platforms require device to be a + * specific subclass of IOService. + * @param args Optional device-specific arguments related to the start of this work item. + */ + virtual void workBegin(IOService *device, uint64_t token, WorkBeginArgs *args = nullptr); + +/*! + * @struct WorkEndArgs + * @discussion Drivers may submit additional device-specific arguments related to the end of a work item + * by passing a struct with WorkEndArgs as its first member. Note: Drivers are responsible for publishing + * a header file describing these arguments. + */ + struct WorkEndArgs { + uint32_t version; + uint32_t size; + uint64_t end_time; + uint64_t reserved[4]; + void *driver_data; + }; + +/*! + * @function workEnd + * @abstract Tell the performance controller that previously started work finished executing. + * @param device The device that executed the work. Some platforms require device to be a + * specific subclass of IOService. + * @param args Optional device-specific arguments related to the end of this work item. + * @param done Optional Set to false if the work has not yet completed. Drivers are then responsible for + * calling workBegin when the work resumes and workEnd with done set to True when it has completed. + */ + virtual void workEnd(IOService *device, uint64_t token, WorkEndArgs *args = nullptr, bool done = true); + +/*! + * @struct PerfControllerInterface + * @discussion Function pointers necessary to register a performance controller. Not for general driver use. + */ + struct PerfControllerInterface { + struct WorkState { + uint64_t thread_group_id; + void *thread_group_data; + void *work_data; + uint32_t work_data_size; + }; + + using RegisterDeviceFunction = IOReturn (*)(IOService *); + using WorkCanSubmitFunction = bool (*)(IOService *, WorkState *, WorkSubmitArgs *); + using WorkSubmitFunction = void (*)(IOService *, uint64_t, WorkState *, WorkSubmitArgs *); + using WorkBeginFunction = void (*)(IOService *, uint64_t, WorkState *, WorkBeginArgs *); + using WorkEndFunction = void (*)(IOService *, uint64_t, WorkState *, WorkEndArgs *, bool); + + uint64_t version; + RegisterDeviceFunction registerDevice; + RegisterDeviceFunction unregisterDevice; + WorkCanSubmitFunction workCanSubmit; + WorkSubmitFunction workSubmit; + WorkBeginFunction workBegin; + WorkEndFunction workEnd; + }; + +/*! + * @function registerPerformanceController + * @abstract Register a performance controller to receive callbacks. Not for general driver use. + * @param interface Struct containing callback functions implemented by the performance controller. + * @returns kIOReturnSuccess or kIOReturnError if the interface was already registered. + */ + virtual IOReturn registerPerformanceController(PerfControllerInterface interface); private: - struct WorkTableEntry - { - struct thread_group *thread_group; - bool started; - uint8_t perfcontrol_data[32]; - }; - - // TODO: size of table should match sum(maxWorkCapacity) of all users - static constexpr size_t kWorkTableNumEntries = 1024; - - uint64_t allocateToken(thread_group *thread_group); - void deallocateToken(uint64_t token); - bool getEntryForToken(uint64_t token, WorkTableEntry &entry); - void markEntryStarted(uint64_t token, bool started); - - PerfControllerInterface interface; - IOLock *interfaceLock; - OSSet *deviceRegistrationList; - - // TODO: replace with ltable or pool of objects - WorkTableEntry workTable[kWorkTableNumEntries]; - size_t workTableNextIndex; - IOSimpleLock *workTableLock; + struct WorkTableEntry { + struct thread_group *thread_group; + bool started; + uint8_t perfcontrol_data[32]; + }; + +// TODO: size of table should match sum(maxWorkCapacity) of all users + static constexpr size_t kWorkTableNumEntries = 1024; + + uint64_t allocateToken(thread_group *thread_group); + void deallocateToken(uint64_t token); + bool getEntryForToken(uint64_t token, WorkTableEntry &entry); + void markEntryStarted(uint64_t token, bool started); + + PerfControllerInterface interface; + IOLock *interfaceLock; + OSSet *deviceRegistrationList; + +// TODO: replace with ltable or pool of objects + WorkTableEntry workTable[kWorkTableNumEntries]; + size_t workTableNextIndex; + IOSimpleLock *workTableLock; }; #endif /* __cplusplus */ diff --git a/iokit/IOKit/platform/AppleMacIO.h b/iokit/IOKit/platform/AppleMacIO.h index 2c72d58c5..45e5b9bd0 100644 --- a/iokit/IOKit/platform/AppleMacIO.h +++ b/iokit/IOKit/platform/AppleMacIO.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -42,38 +42,38 @@ class AppleMacIO : public IOService { - OSDeclareAbstractStructors(AppleMacIO); + OSDeclareAbstractStructors(AppleMacIO); + + IOService * fNub; + IOMemoryMap * fMemory; - IOService * fNub; - IOMemoryMap * fMemory; + struct ExpansionData { }; + ExpansionData *fReserved; - struct ExpansionData { }; - ExpansionData *fReserved; - protected: - virtual bool selfTest( void ); + virtual bool selfTest( void ); public: - virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; - virtual IOService * createNub( IORegistryEntry * from ); + virtual IOService * createNub( IORegistryEntry * from ); - virtual void processNub( IOService * nub ); + virtual void processNub( IOService * nub ); - virtual void publishBelow( IORegistryEntry * root ); + virtual void publishBelow( IORegistryEntry * root ); - virtual const char * deleteList( void ); - virtual const char * excludeList( void ); + virtual const char * deleteList( void ); + virtual const char * excludeList( void ); - virtual bool compareNubName( const IOService * nub, OSString * name, - OSString ** matched = 0 ) const; + virtual bool compareNubName( const IOService * nub, OSString * name, + OSString ** matched = 0 ) const; - virtual IOReturn getNubResources( IOService * nub ); + virtual IOReturn getNubResources( IOService * nub ); - OSMetaClassDeclareReservedUnused(AppleMacIO, 0); - OSMetaClassDeclareReservedUnused(AppleMacIO, 1); - OSMetaClassDeclareReservedUnused(AppleMacIO, 2); - OSMetaClassDeclareReservedUnused(AppleMacIO, 3); + OSMetaClassDeclareReservedUnused(AppleMacIO, 0); + OSMetaClassDeclareReservedUnused(AppleMacIO, 1); + OSMetaClassDeclareReservedUnused(AppleMacIO, 2); + OSMetaClassDeclareReservedUnused(AppleMacIO, 3); }; #endif /* ! _IOKIT_APPLEMACIO_H */ diff --git a/iokit/IOKit/platform/AppleMacIODevice.h b/iokit/IOKit/platform/AppleMacIODevice.h index 7a676e116..cb6f898da 100644 --- a/iokit/IOKit/platform/AppleMacIODevice.h +++ b/iokit/IOKit/platform/AppleMacIODevice.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -40,21 +40,21 @@ class AppleMacIODevice : public IOService { - OSDeclareDefaultStructors(AppleMacIODevice); + OSDeclareDefaultStructors(AppleMacIODevice); -private: - struct ExpansionData { }; - ExpansionData *reserved; +private: + struct ExpansionData { }; + ExpansionData *reserved; public: - virtual bool compareName( OSString * name, OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; - virtual IOService *matchLocation(IOService *client) APPLE_KEXT_OVERRIDE; - virtual IOReturn getResources( void ) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(AppleMacIODevice, 0); - OSMetaClassDeclareReservedUnused(AppleMacIODevice, 1); - OSMetaClassDeclareReservedUnused(AppleMacIODevice, 2); - OSMetaClassDeclareReservedUnused(AppleMacIODevice, 3); + virtual bool compareName( OSString * name, OSString ** matched = 0 ) const APPLE_KEXT_OVERRIDE; + virtual IOService *matchLocation(IOService *client) APPLE_KEXT_OVERRIDE; + virtual IOReturn getResources( void ) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 0); + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 1); + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 2); + OSMetaClassDeclareReservedUnused(AppleMacIODevice, 3); }; #endif /* ! _IOKIT_APPLEMACIODEVICE_H */ diff --git a/iokit/IOKit/platform/AppleNMI.h b/iokit/IOKit/platform/AppleNMI.h index 216f22074..a21688e1b 100644 --- a/iokit/IOKit/platform/AppleNMI.h +++ b/iokit/IOKit/platform/AppleNMI.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,38 +39,37 @@ #include // NMI Interrupt Constants -enum -{ - kExtInt9_NMIIntSource = 0x800506E0, - kNMIIntLevelMask = 0x00004000, - kNMIIntMask = 0x00000080 +enum{ + kExtInt9_NMIIntSource = 0x800506E0, + kNMIIntLevelMask = 0x00004000, + kNMIIntMask = 0x00000080 }; class AppleNMI : public IOService { - OSDeclareDefaultStructors(AppleNMI); + OSDeclareDefaultStructors(AppleNMI); private: - bool enable_debugger; - bool mask_NMI; + bool enable_debugger; + bool mask_NMI; - struct ExpansionData { }; - ExpansionData * reserved; // Reserved for future use + struct ExpansionData { }; + ExpansionData * reserved; // Reserved for future use public: - IOService *rootDomain; - virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; - virtual IOReturn initNMI(IOInterruptController *parentController, OSData *parentSource); - virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source); + IOService *rootDomain; + virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; + virtual IOReturn initNMI(IOInterruptController *parentController, OSData *parentSource); + virtual IOReturn handleInterrupt(void *refCon, IOService *nub, int source); - // Power handling methods: - virtual IOReturn powerStateWillChangeTo(IOPMPowerFlags, unsigned long, IOService*) APPLE_KEXT_OVERRIDE; +// Power handling methods: + virtual IOReturn powerStateWillChangeTo(IOPMPowerFlags, unsigned long, IOService*) APPLE_KEXT_OVERRIDE; - OSMetaClassDeclareReservedUnused(AppleNMI, 0); - OSMetaClassDeclareReservedUnused(AppleNMI, 1); - OSMetaClassDeclareReservedUnused(AppleNMI, 2); - OSMetaClassDeclareReservedUnused(AppleNMI, 3); + OSMetaClassDeclareReservedUnused(AppleNMI, 0); + OSMetaClassDeclareReservedUnused(AppleNMI, 1); + OSMetaClassDeclareReservedUnused(AppleNMI, 2); + OSMetaClassDeclareReservedUnused(AppleNMI, 3); }; #endif /* ! _IOKIT_APPLENMI_H */ diff --git a/iokit/IOKit/platform/ApplePlatformExpert.h b/iokit/IOKit/platform/ApplePlatformExpert.h index 0f75950d3..d5ffd5a6a 100644 --- a/iokit/IOKit/platform/ApplePlatformExpert.h +++ b/iokit/IOKit/platform/ApplePlatformExpert.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -39,52 +39,52 @@ #include enum { - kBootROMTypeOldWorld = 0, - kBootROMTypeNewWorld + kBootROMTypeOldWorld = 0, + kBootROMTypeNewWorld }; enum { - kChipSetTypePowerSurge = 0, - kChipSetTypePowerStar, - kChipSetTypeGossamer, - kChipSetTypePowerExpress, - kChipSetTypeCore99, - kChipSetTypeCore2001 + kChipSetTypePowerSurge = 0, + kChipSetTypePowerStar, + kChipSetTypeGossamer, + kChipSetTypePowerExpress, + kChipSetTypeCore99, + kChipSetTypeCore2001 }; enum { - kMachineTypeUnknown = 0 + kMachineTypeUnknown = 0 }; extern const OSSymbol *gGetDefaultBusSpeedsKey; class ApplePlatformExpert : public IODTPlatformExpert { - OSDeclareAbstractStructors(ApplePlatformExpert); - -private: - SInt32 _timeToGMT; + OSDeclareAbstractStructors(ApplePlatformExpert); + +private: + SInt32 _timeToGMT; - struct ExpansionData { }; - ExpansionData *reserved; + struct ExpansionData { }; + ExpansionData *reserved; public: - virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; - virtual bool configure( IOService * provider ) APPLE_KEXT_OVERRIDE; - virtual const char * deleteList( void ) APPLE_KEXT_OVERRIDE; - virtual const char * excludeList( void ) APPLE_KEXT_OVERRIDE; - - virtual void registerNVRAMController( IONVRAMController * nvram ) APPLE_KEXT_OVERRIDE; - - virtual long getGMTTimeOfDay(void) APPLE_KEXT_OVERRIDE; - virtual void setGMTTimeOfDay(long secs) APPLE_KEXT_OVERRIDE; - - virtual bool getMachineName(char *name, int maxLength) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 0); - OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 1); - OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 2); - OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 3); + virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual bool configure( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual const char * deleteList( void ) APPLE_KEXT_OVERRIDE; + virtual const char * excludeList( void ) APPLE_KEXT_OVERRIDE; + + virtual void registerNVRAMController( IONVRAMController * nvram ) APPLE_KEXT_OVERRIDE; + + virtual long getGMTTimeOfDay(void) APPLE_KEXT_OVERRIDE; + virtual void setGMTTimeOfDay(long secs) APPLE_KEXT_OVERRIDE; + + virtual bool getMachineName(char *name, int maxLength) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 0); + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 1); + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 2); + OSMetaClassDeclareReservedUnused(ApplePlatformExpert, 3); }; diff --git a/iokit/IOKit/power/IOPwrController.h b/iokit/IOKit/power/IOPwrController.h index 709a0fb1c..6fcd0d8ba 100644 --- a/iokit/IOKit/power/IOPwrController.h +++ b/iokit/IOKit/power/IOPwrController.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,11 +31,9 @@ #include -class IOPwrController: public IOService +class IOPwrController : public IOService { -OSDeclareAbstractStructors(IOPwrController) + OSDeclareAbstractStructors(IOPwrController) public: - }; - diff --git a/iokit/IOKit/pwr_mgt/IOPM.h b/iokit/IOKit/pwr_mgt/IOPM.h index 930a45d8d..bc1b88397 100644 --- a/iokit/IOKit/pwr_mgt/IOPM.h +++ b/iokit/IOKit/pwr_mgt/IOPM.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOKIT_IOPM_H @@ -33,82 +33,82 @@ #include /*! @header IOPM.h - @abstract Defines power management constants and keys used by both in-kernel and user space power management. - @discussion IOPM.h defines a range of power management constants used in several in-kernel and user space APIs. Most significantly, the IOPMPowerFlags used to specify the fields of an IOPMPowerState struct are defined here. - - Most of the constants defined in IOPM.h are deprecated or for Apple internal use only, and are not elaborated on in headerdoc. -*/ + * @abstract Defines power management constants and keys used by both in-kernel and user space power management. + * @discussion IOPM.h defines a range of power management constants used in several in-kernel and user space APIs. Most significantly, the IOPMPowerFlags used to specify the fields of an IOPMPowerState struct are defined here. + * + * Most of the constants defined in IOPM.h are deprecated or for Apple internal use only, and are not elaborated on in headerdoc. + */ enum { - kIOPMMaxPowerStates = 10, - IOPMMaxPowerStates = kIOPMMaxPowerStates + kIOPMMaxPowerStates = 10, + IOPMMaxPowerStates = kIOPMMaxPowerStates }; /*! @enum IOPMPowerFlags - @abstract Bits are used in defining capabilityFlags, inputPowerRequirements, and outputPowerCharacter in the IOPMPowerState structure. - @discussion These bits may be bitwise-OR'd together in the IOPMPowerState capabilityFlags field, the outputPowerCharacter field, and/or the inputPowerRequirement field. - - The comments clearly mark whether each flag should be used in the capabilityFlags field, outputPowerCharacter field, and inputPowerRequirement field, or all three. - - The value of capabilityFlags, inputPowerRequirement or outputPowerCharacter may be 0. Most drivers implement their 'OFF' state, used when asleep, by defininf each of the 3 fields as 0. - - The bits listed below are only the most common bits used to define a device's power states. Your device's IO family may require that your device specify other input or output power flags to interact properly. Consult family-specific documentation to determine if your IOPower plane parents or children require other power flags; they probably don't. - - @constant kIOPMPowerOn Indicates the device is on, requires power, and provides power. Useful as a: Capability, InputPowerRequirement, OutputPowerCharacter - - @constant kIOPMDeviceUsable Indicates the device is usable in this state. Useful only as a Capability - - @constant kIOPMLowPower - Indicates device is in a low power state. May be bitwis-OR'd together - with kIOPMDeviceUsable flag, to indicate the device is still usable. - - A device with a capability of kIOPMLowPower may: - Require either 0 or kIOPMPowerOn from its power parent - Offer either kIOPMLowPower, kIOPMPowerOn, or 0 (no power at all) - to its power plane children. - - Useful only as a Capability, although USB drivers should consult USB family documentation for other valid circumstances to use the kIOPMLowPower bit. - - @constant kIOPMPreventIdleSleep - In the capability field of a power state, disallows idle system sleep while the device is in that state. - - For example, displays and disks set this capability for their ON power state; since the system may not idle sleep while the display (and thus keyboard or mouse) or the disk is active. - - Useful only as a Capability. - - @constant kIOPMSleepCapability - Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. - - @constant kIOPMRestartCapability - Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. - - @constant kIOPMSleep - Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. - - @constant kIOPMRestart - Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. - - @constant kIOPMInitialDeviceState - Indicates the initial power state for the device. If initialPowerStateForDomainState() returns a power state with this flag set in the capability field, then the initial power change is performed without calling the driver's setPowerState(). - - @constant kIOPMRootDomainState - An indication that the power flags represent the state of the root power - domain. This bit must not be set in the IOPMPowerState structure. - Power Management may pass this bit to initialPowerStateForDomainState() - to map from a global system state to the desired device state. -*/ + * @abstract Bits are used in defining capabilityFlags, inputPowerRequirements, and outputPowerCharacter in the IOPMPowerState structure. + * @discussion These bits may be bitwise-OR'd together in the IOPMPowerState capabilityFlags field, the outputPowerCharacter field, and/or the inputPowerRequirement field. + * + * The comments clearly mark whether each flag should be used in the capabilityFlags field, outputPowerCharacter field, and inputPowerRequirement field, or all three. + * + * The value of capabilityFlags, inputPowerRequirement or outputPowerCharacter may be 0. Most drivers implement their 'OFF' state, used when asleep, by defininf each of the 3 fields as 0. + * + * The bits listed below are only the most common bits used to define a device's power states. Your device's IO family may require that your device specify other input or output power flags to interact properly. Consult family-specific documentation to determine if your IOPower plane parents or children require other power flags; they probably don't. + * + * @constant kIOPMPowerOn Indicates the device is on, requires power, and provides power. Useful as a: Capability, InputPowerRequirement, OutputPowerCharacter + * + * @constant kIOPMDeviceUsable Indicates the device is usable in this state. Useful only as a Capability + * + * @constant kIOPMLowPower + * Indicates device is in a low power state. May be bitwis-OR'd together + * with kIOPMDeviceUsable flag, to indicate the device is still usable. + * + * A device with a capability of kIOPMLowPower may: + * Require either 0 or kIOPMPowerOn from its power parent + * Offer either kIOPMLowPower, kIOPMPowerOn, or 0 (no power at all) + * to its power plane children. + * + * Useful only as a Capability, although USB drivers should consult USB family documentation for other valid circumstances to use the kIOPMLowPower bit. + * + * @constant kIOPMPreventIdleSleep + * In the capability field of a power state, disallows idle system sleep while the device is in that state. + * + * For example, displays and disks set this capability for their ON power state; since the system may not idle sleep while the display (and thus keyboard or mouse) or the disk is active. + * + * Useful only as a Capability. + * + * @constant kIOPMSleepCapability + * Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. + * + * @constant kIOPMRestartCapability + * Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. + * + * @constant kIOPMSleep + * Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. + * + * @constant kIOPMRestart + * Used only by certain IOKit Families (USB). Not defined or used by generic Power Management. Read your family documentation to see if you should define a powerstate using these capabilities. + * + * @constant kIOPMInitialDeviceState + * Indicates the initial power state for the device. If initialPowerStateForDomainState() returns a power state with this flag set in the capability field, then the initial power change is performed without calling the driver's setPowerState(). + * + * @constant kIOPMRootDomainState + * An indication that the power flags represent the state of the root power + * domain. This bit must not be set in the IOPMPowerState structure. + * Power Management may pass this bit to initialPowerStateForDomainState() + * to map from a global system state to the desired device state. + */ typedef unsigned long IOPMPowerFlags; enum { - kIOPMPowerOn = 0x00000002, - kIOPMDeviceUsable = 0x00008000, - kIOPMLowPower = 0x00010000, - kIOPMPreventIdleSleep = 0x00000040, - kIOPMSleepCapability = 0x00000004, - kIOPMRestartCapability = 0x00000080, - kIOPMSleep = 0x00000001, - kIOPMRestart = 0x00000080, - kIOPMInitialDeviceState = 0x00000100, - kIOPMRootDomainState = 0x00000200 + kIOPMPowerOn = 0x00000002, + kIOPMDeviceUsable = 0x00008000, + kIOPMLowPower = 0x00010000, + kIOPMPreventIdleSleep = 0x00000040, + kIOPMSleepCapability = 0x00000004, + kIOPMRestartCapability = 0x00000080, + kIOPMSleep = 0x00000001, + kIOPMRestart = 0x00000080, + kIOPMInitialDeviceState = 0x00000100, + kIOPMRootDomainState = 0x00000200 }; /* @@ -119,13 +119,13 @@ enum { * Their behavior is undefined */ enum { - kIOPMClockNormal = 0x0004, - kIOPMClockRunning = 0x0008, - kIOPMPreventSystemSleep = 0x0010, - kIOPMDoze = 0x0400, - kIOPMChildClamp = 0x0080, - kIOPMChildClamp2 = 0x0200, - kIOPMNotPowerManaged = 0x0800 + kIOPMClockNormal = 0x0004, + kIOPMClockRunning = 0x0008, + kIOPMPreventSystemSleep = 0x0010, + kIOPMDoze = 0x0400, + kIOPMChildClamp = 0x0080, + kIOPMChildClamp2 = 0x0200, + kIOPMNotPowerManaged = 0x0800 }; /* @@ -134,61 +134,61 @@ enum { * Capability, InputPowerRequirement, or OutputPowerCharacter fields. */ enum { - kIOPMMaxPerformance = 0x4000, - kIOPMPassThrough = 0x0100, - kIOPMAuxPowerOn = 0x0020, - kIOPMNotAttainable = 0x0001, - kIOPMContextRetained = 0x2000, - kIOPMConfigRetained = 0x1000, - kIOPMStaticPowerValid = 0x0800, - kIOPMSoftSleep = 0x0400, - kIOPMCapabilitiesMask = kIOPMPowerOn | kIOPMDeviceUsable | - kIOPMMaxPerformance | kIOPMContextRetained | - kIOPMConfigRetained | kIOPMSleepCapability | - kIOPMRestartCapability + kIOPMMaxPerformance = 0x4000, + kIOPMPassThrough = 0x0100, + kIOPMAuxPowerOn = 0x0020, + kIOPMNotAttainable = 0x0001, + kIOPMContextRetained = 0x2000, + kIOPMConfigRetained = 0x1000, + kIOPMStaticPowerValid = 0x0800, + kIOPMSoftSleep = 0x0400, + kIOPMCapabilitiesMask = kIOPMPowerOn | kIOPMDeviceUsable | + kIOPMMaxPerformance | kIOPMContextRetained | + kIOPMConfigRetained | kIOPMSleepCapability | + kIOPMRestartCapability }; /* * Support for old names of IOPMPowerFlag constants */ enum { - IOPMNotAttainable = kIOPMNotAttainable, - IOPMPowerOn = kIOPMPowerOn, - IOPMClockNormal = kIOPMClockNormal, - IOPMClockRunning = kIOPMClockRunning, - IOPMAuxPowerOn = kIOPMAuxPowerOn, - IOPMDeviceUsable = kIOPMDeviceUsable, - IOPMMaxPerformance = kIOPMMaxPerformance, - IOPMContextRetained = kIOPMContextRetained, - IOPMConfigRetained = kIOPMConfigRetained, - IOPMNotPowerManaged = kIOPMNotPowerManaged, - IOPMSoftSleep = kIOPMSoftSleep + IOPMNotAttainable = kIOPMNotAttainable, + IOPMPowerOn = kIOPMPowerOn, + IOPMClockNormal = kIOPMClockNormal, + IOPMClockRunning = kIOPMClockRunning, + IOPMAuxPowerOn = kIOPMAuxPowerOn, + IOPMDeviceUsable = kIOPMDeviceUsable, + IOPMMaxPerformance = kIOPMMaxPerformance, + IOPMContextRetained = kIOPMContextRetained, + IOPMConfigRetained = kIOPMConfigRetained, + IOPMNotPowerManaged = kIOPMNotPowerManaged, + IOPMSoftSleep = kIOPMSoftSleep }; enum { - kIOPMNextHigherState = 1, - kIOPMHighestState = 2, - kIOPMNextLowerState = 3, - kIOPMLowestState = 4 + kIOPMNextHigherState = 1, + kIOPMHighestState = 2, + kIOPMNextLowerState = 3, + kIOPMLowestState = 4 }; enum { - IOPMNextHigherState = kIOPMNextHigherState, - IOPMHighestState = kIOPMHighestState, - IOPMNextLowerState = kIOPMNextLowerState, - IOPMLowestState = kIOPMLowestState + IOPMNextHigherState = kIOPMNextHigherState, + IOPMHighestState = kIOPMHighestState, + IOPMNextLowerState = kIOPMNextLowerState, + IOPMLowestState = kIOPMLowestState }; // Internal commands used by power managment command queue enum { - kIOPMBroadcastAggressiveness = 1, - kIOPMUnidleDevice + kIOPMBroadcastAggressiveness = 1, + kIOPMUnidleDevice }; // Power consumption unknown value enum { - kIOPMUnknown = 0xFFFF + kIOPMUnknown = 0xFFFF }; /******************************************************************************* @@ -197,7 +197,7 @@ enum { * ******************************************************************************/ -/* AppleClamshellState +/* AppleClamshellState * reflects the state of the clamshell (lid) on a portable. * It has a boolean value. * true == clamshell is closed @@ -206,11 +206,11 @@ enum { */ #define kAppleClamshellStateKey "AppleClamshellState" -/* AppleClamshellCausesSleep - * reflects the clamshell close behavior on a portable. +/* AppleClamshellCausesSleep + * reflects the clamshell close behavior on a portable. * It has a boolean value. * true == system will sleep when clamshell is closed - * false == system will not sleep on clamshell close + * false == system will not sleep on clamshell close * (typically external display mode) * not present == no clamshell on this hardware */ @@ -224,7 +224,7 @@ enum { * The key becomes invalid at the completion of a system wakeup. The * property will not be present in the IOPMrootDomain's registry entry * when it is invalid. - * + * * See IOPMrootDomain notification kIOPMMessageSleepWakeUUIDChange */ #define kIOPMSleepWakeUUIDKey "SleepWakeUUID" @@ -232,8 +232,8 @@ enum { /* kIOPMBootSessionUUIDKey * Key refers to a CFStringRef that will uniquely identify * a boot cycle. - * The key becomes valid at boot time and remains valid - * till shutdown. The property value will remain same across + * The key becomes valid at boot time and remains valid + * till shutdown. The property value will remain same across * sleep/wake/hibernate cycle. */ #define kIOPMBootSessionUUIDKey "BootSessionUUID" @@ -315,77 +315,77 @@ enum { * Driver PM assertions are defined by these bits. */ enum { - /*! kIOPMDriverAssertionCPUBit - * When set, PM kernel will prefer to leave the CPU and core hardware - * running in "Dark Wake" state, instead of sleeping. - */ - kIOPMDriverAssertionCPUBit = 0x01, - - /*! kIOPMDriverAssertionUSBExternalDeviceBit - * When set, driver is informing PM that an external USB device is attached. - */ - kIOPMDriverAssertionUSBExternalDeviceBit = 0x04, - - /*! kIOPMDriverAssertionBluetoothHIDDevicePairedBit - * When set, driver is informing PM that a Bluetooth HID device is paired. - */ - kIOPMDriverAssertionBluetoothHIDDevicePairedBit = 0x08, - - /*! kIOPMDriverAssertionExternalMediaMountedBit - * When set, driver is informing PM that an external media is mounted. - */ - kIOPMDriverAssertionExternalMediaMountedBit = 0x10, - - /*! kIOPMDriverAssertionReservedBit5 - * Reserved for Thunderbolt. - */ - kIOPMDriverAssertionReservedBit5 = 0x20, - - /*! kIOPMDriverAssertionPreventDisplaySleepBit - * When set, the display should remain powered on while the system's awake. - */ - kIOPMDriverAssertionPreventDisplaySleepBit = 0x40, - - /*! kIOPMDriverAssertionReservedBit7 - * Reserved for storage family. - */ - kIOPMDriverAssertionReservedBit7 = 0x80, - - /*! kIOPMDriverAssertionMagicPacketWakeEnabledBit - * When set, driver is informing PM that magic packet wake is enabled. - */ - kIOPMDriverAssertionMagicPacketWakeEnabledBit = 0x100, - - /*! kIOPMDriverAssertionNetworkKeepAliveActiveBit - * When set, driver is informing PM that it is holding the network - * interface up to do TCPKeepAlive - */ - kIOPMDriverAssertionNetworkKeepAliveActiveBit = 0x200 + /*! kIOPMDriverAssertionCPUBit + * When set, PM kernel will prefer to leave the CPU and core hardware + * running in "Dark Wake" state, instead of sleeping. + */ + kIOPMDriverAssertionCPUBit = 0x01, + + /*! kIOPMDriverAssertionUSBExternalDeviceBit + * When set, driver is informing PM that an external USB device is attached. + */ + kIOPMDriverAssertionUSBExternalDeviceBit = 0x04, + + /*! kIOPMDriverAssertionBluetoothHIDDevicePairedBit + * When set, driver is informing PM that a Bluetooth HID device is paired. + */ + kIOPMDriverAssertionBluetoothHIDDevicePairedBit = 0x08, + + /*! kIOPMDriverAssertionExternalMediaMountedBit + * When set, driver is informing PM that an external media is mounted. + */ + kIOPMDriverAssertionExternalMediaMountedBit = 0x10, + + /*! kIOPMDriverAssertionReservedBit5 + * Reserved for Thunderbolt. + */ + kIOPMDriverAssertionReservedBit5 = 0x20, + + /*! kIOPMDriverAssertionPreventDisplaySleepBit + * When set, the display should remain powered on while the system's awake. + */ + kIOPMDriverAssertionPreventDisplaySleepBit = 0x40, + + /*! kIOPMDriverAssertionReservedBit7 + * Reserved for storage family. + */ + kIOPMDriverAssertionReservedBit7 = 0x80, + + /*! kIOPMDriverAssertionMagicPacketWakeEnabledBit + * When set, driver is informing PM that magic packet wake is enabled. + */ + kIOPMDriverAssertionMagicPacketWakeEnabledBit = 0x100, + + /*! kIOPMDriverAssertionNetworkKeepAliveActiveBit + * When set, driver is informing PM that it is holding the network + * interface up to do TCPKeepAlive + */ + kIOPMDriverAssertionNetworkKeepAliveActiveBit = 0x200 }; - /* kIOPMAssertionsDriverKey - * This kIOPMrootDomain key refers to a CFNumberRef property, containing - * a bitfield describing the aggregate PM assertion levels. - * Example: A value of 0 indicates that no driver has asserted anything. - * Or, a value of kIOPMDriverAssertionCPUBit - * indicates that a driver (or drivers) have asserted a need for CPU and video. - */ +/* kIOPMAssertionsDriverKey + * This kIOPMrootDomain key refers to a CFNumberRef property, containing + * a bitfield describing the aggregate PM assertion levels. + * Example: A value of 0 indicates that no driver has asserted anything. + * Or, a value of kIOPMDriverAssertionCPUBit + * indicates that a driver (or drivers) have asserted a need for CPU and video. + */ #define kIOPMAssertionsDriverKey "DriverPMAssertions" - /* kIOPMAssertionsDriverKey - * This kIOPMrootDomain key refers to a CFNumberRef property, containing - * a bitfield describing the aggregate PM assertion levels. - * Example: A value of 0 indicates that no driver has asserted anything. - * Or, a value of kIOPMDriverAssertionCPUBit - * indicates that a driver (or drivers) have asserted a need for CPU and video. - */ +/* kIOPMAssertionsDriverKey + * This kIOPMrootDomain key refers to a CFNumberRef property, containing + * a bitfield describing the aggregate PM assertion levels. + * Example: A value of 0 indicates that no driver has asserted anything. + * Or, a value of kIOPMDriverAssertionCPUBit + * indicates that a driver (or drivers) have asserted a need for CPU and video. + */ #define kIOPMAssertionsDriverDetailedKey "DriverPMAssertionsDetailed" /******************************************************************************* * * Kernel Driver assertion detailed dictionary keys * - * Keys decode the Array & dictionary data structure under IOPMrootDomain property + * Keys decode the Array & dictionary data structure under IOPMrootDomain property * kIOPMAssertionsDriverKey. * */ @@ -402,8 +402,8 @@ enum { * * Root Domain general interest messages * - * Available by registering for interest type 'gIOGeneralInterest' - * on IOPMrootDomain. + * Available by registering for interest type 'gIOGeneralInterest' + * on IOPMrootDomain. * ******************************************************************************/ @@ -416,12 +416,12 @@ enum { * the message. Check bits 0 and 1 using kClamshellStateBit & kClamshellSleepBit */ enum { - kClamshellStateBit = (1 << 0), - kClamshellSleepBit = (1 << 1) + kClamshellStateBit = (1 << 0), + kClamshellSleepBit = (1 << 1) }; #define kIOPMMessageClamshellStateChange \ - iokit_family_msg(sub_iokit_powermanagement, 0x100) + iokit_family_msg(sub_iokit_powermanagement, 0x100) /* kIOPMMessageFeatureChange * Delivered when the set of supported features ("Supported Features" dictionary @@ -430,47 +430,47 @@ enum { * RootDomain passes no argument with this message. */ #define kIOPMMessageFeatureChange \ - iokit_family_msg(sub_iokit_powermanagement, 0x110) + iokit_family_msg(sub_iokit_powermanagement, 0x110) /* kIOPMMessageInflowDisableCancelled - * The battery has drained completely to its "Fully Discharged" state. - * If a user process has disabled battery inflow for battery + * The battery has drained completely to its "Fully Discharged" state. + * If a user process has disabled battery inflow for battery * calibration, we forcibly re-enable Inflow at this point. * If inflow HAS been forcibly re-enabled, bit 0 * (kInflowForciblyEnabledBit) will be set. */ enum { - kInflowForciblyEnabledBit = (1 << 0) + kInflowForciblyEnabledBit = (1 << 0) }; /* kIOPMMessageInternalBatteryFullyDischarged - * The battery has drained completely to its "Fully Discharged" state. + * The battery has drained completely to its "Fully Discharged" state. */ #define kIOPMMessageInternalBatteryFullyDischarged \ - iokit_family_msg(sub_iokit_powermanagement, 0x120) + iokit_family_msg(sub_iokit_powermanagement, 0x120) /* kIOPMMessageSystemPowerEventOccurred * Some major system thermal property has changed, and interested clients may * modify their behavior. */ #define kIOPMMessageSystemPowerEventOccurred \ - iokit_family_msg(sub_iokit_powermanagement, 0x130) + iokit_family_msg(sub_iokit_powermanagement, 0x130) /* kIOPMMessageSleepWakeUUIDChange * Either a new SleepWakeUUID has been specified at the beginning of a sleep, * or we're removing the existing property upon completion of a wakeup. */ #define kIOPMMessageSleepWakeUUIDChange \ - iokit_family_msg(sub_iokit_powermanagement, 0x140) - + iokit_family_msg(sub_iokit_powermanagement, 0x140) + /* kIOPMMessageSleepWakeUUIDSet - * Argument accompanying the kIOPMMessageSleepWakeUUIDChange notification when + * Argument accompanying the kIOPMMessageSleepWakeUUIDChange notification when * a new UUID has been specified. */ #define kIOPMMessageSleepWakeUUIDSet ((void *)1) /* kIOPMMessageSleepWakeUUIDCleared - * Argument accompanying the kIOPMMessageSleepWakeUUIDChange notification when + * Argument accompanying the kIOPMMessageSleepWakeUUIDChange notification when * the current UUID has been removed. */ #define kIOPMMessageSleepWakeUUIDCleared ((void *)0) @@ -479,14 +479,14 @@ enum { * Sent when kernel PM driver assertions have changed. */ #define kIOPMMessageDriverAssertionsChanged \ - iokit_family_msg(sub_iokit_powermanagement, 0x150) + iokit_family_msg(sub_iokit_powermanagement, 0x150) /*! kIOPMMessageDarkWakeThermalEmergency * Sent when machine becomes unsustainably warm in DarkWake. * Kernel PM might choose to put the machine back to sleep right after. */ #define kIOPMMessageDarkWakeThermalEmergency \ - iokit_family_msg(sub_iokit_powermanagement, 0x160) + iokit_family_msg(sub_iokit_powermanagement, 0x160) /******************************************************************************* * @@ -499,18 +499,18 @@ enum { * TODO: deprecate kIOPMAllowSleep and kIOPMPreventSleep ******************************************************************************/ enum { - kIOPMSleepNow = (1<<0), // put machine to sleep now - kIOPMAllowSleep = (1<<1), // allow idle sleep - kIOPMPreventSleep = (1<<2), // do not allow idle sleep - kIOPMPowerButton = (1<<3), // power button was pressed - kIOPMClamshellClosed = (1<<4), // clamshell was closed - kIOPMPowerEmergency = (1<<5), // battery dangerously low - kIOPMDisableClamshell = (1<<6), // do not sleep on clamshell closure - kIOPMEnableClamshell = (1<<7), // sleep on clamshell closure - kIOPMProcessorSpeedChange = (1<<8), // change the processor speed - kIOPMOverTemp = (1<<9), // system dangerously hot - kIOPMClamshellOpened = (1<<10), // clamshell was opened - kIOPMDWOverTemp = (1<<11) // DarkWake thermal limits exceeded. + kIOPMSleepNow = (1 << 0),// put machine to sleep now + kIOPMAllowSleep = (1 << 1),// allow idle sleep + kIOPMPreventSleep = (1 << 2),// do not allow idle sleep + kIOPMPowerButton = (1 << 3),// power button was pressed + kIOPMClamshellClosed = (1 << 4),// clamshell was closed + kIOPMPowerEmergency = (1 << 5),// battery dangerously low + kIOPMDisableClamshell = (1 << 6),// do not sleep on clamshell closure + kIOPMEnableClamshell = (1 << 7),// sleep on clamshell closure + kIOPMProcessorSpeedChange = (1 << 8),// change the processor speed + kIOPMOverTemp = (1 << 9),// system dangerously hot + kIOPMClamshellOpened = (1 << 10),// clamshell was opened + kIOPMDWOverTemp = (1 << 11)// DarkWake thermal limits exceeded. }; @@ -520,48 +520,48 @@ enum { * ******************************************************************************/ enum { - kIOPMNoErr = 0, - - // Returned by driver's setPowerState(), powerStateWillChangeTo(), - // powerStateDidChangeTo(), or acknowledgeSetPowerState() to - // implicitly acknowledge power change upon function return. - kIOPMAckImplied = 0, - - // Deprecated - kIOPMWillAckLater = 1, - - // Returned by requestPowerDomainState() to indicate - // unrecognized specification parameter. - kIOPMBadSpecification = 4, - - // Returned by requestPowerDomainState() to indicate - // no power state matches search specification. - kIOPMNoSuchState = 5, - - // Deprecated - kIOPMCannotRaisePower = 6, - - // Deprecated - kIOPMParameterError = 7, - - // Returned when power management state is accessed - // before driver has called PMinit(). - kIOPMNotYetInitialized = 8, - - // And the old constants; deprecated - IOPMNoErr = kIOPMNoErr, - IOPMAckImplied = kIOPMAckImplied, - IOPMWillAckLater = kIOPMWillAckLater, - IOPMBadSpecification = kIOPMBadSpecification, - IOPMNoSuchState = kIOPMNoSuchState, - IOPMCannotRaisePower = kIOPMCannotRaisePower, - IOPMParameterError = kIOPMParameterError, - IOPMNotYetInitialized = kIOPMNotYetInitialized + kIOPMNoErr = 0, + + // Returned by driver's setPowerState(), powerStateWillChangeTo(), + // powerStateDidChangeTo(), or acknowledgeSetPowerState() to + // implicitly acknowledge power change upon function return. + kIOPMAckImplied = 0, + + // Deprecated + kIOPMWillAckLater = 1, + + // Returned by requestPowerDomainState() to indicate + // unrecognized specification parameter. + kIOPMBadSpecification = 4, + + // Returned by requestPowerDomainState() to indicate + // no power state matches search specification. + kIOPMNoSuchState = 5, + + // Deprecated + kIOPMCannotRaisePower = 6, + + // Deprecated + kIOPMParameterError = 7, + + // Returned when power management state is accessed + // before driver has called PMinit(). + kIOPMNotYetInitialized = 8, + + // And the old constants; deprecated + IOPMNoErr = kIOPMNoErr, + IOPMAckImplied = kIOPMAckImplied, + IOPMWillAckLater = kIOPMWillAckLater, + IOPMBadSpecification = kIOPMBadSpecification, + IOPMNoSuchState = kIOPMNoSuchState, + IOPMCannotRaisePower = kIOPMCannotRaisePower, + IOPMParameterError = kIOPMParameterError, + IOPMNotYetInitialized = kIOPMNotYetInitialized }; // IOPMPowerSource class descriptive strings -// Power Source state is published as properties to the IORegistry under these +// Power Source state is published as properties to the IORegistry under these // keys. #define kIOPMPSExternalConnectedKey "ExternalConnected" #define kIOPMPSExternalChargeCapableKey "ExternalChargeCapable" @@ -589,11 +589,11 @@ enum { #define kIOPMPSLegacyBatteryInfoKey "LegacyBatteryInfo" #define kIOPMPSBatteryHealthKey "BatteryHealth" #define kIOPMPSHealthConfidenceKey "HealthConfidence" -#define kIOPMPSCapacityEstimatedKey "CapacityEstimated" +#define kIOPMPSCapacityEstimatedKey "CapacityEstimated" #define kIOPMPSBatteryChargeStatusKey "ChargeStatus" #define kIOPMPSBatteryTemperatureKey "Temperature" -#define kIOPMPSAdapterDetailsKey "AdapterDetails" -#define kIOPMPSChargerConfigurationKey "ChargerConfiguration" +#define kIOPMPSAdapterDetailsKey "AdapterDetails" +#define kIOPMPSChargerConfigurationKey "ChargerConfiguration" // kIOPMPSBatteryChargeStatusKey may have one of the following values, or may have // no value. If kIOPMBatteryChargeStatusKey has a NULL value (or no value) associated with it @@ -601,71 +601,71 @@ enum { // then the charge may have been interrupted. #define kIOPMBatteryChargeStatusTooHot "HighTemperature" #define kIOPMBatteryChargeStatusTooCold "LowTemperature" -#define kIOPMBatteryChargeStatusTooHotOrCold "HighOrLowTemperature" +#define kIOPMBatteryChargeStatusTooHotOrCold "HighOrLowTemperature" #define kIOPMBatteryChargeStatusGradient "BatteryTemperatureGradient" // Definitions for battery location, in case of multiple batteries. // A location of 0 is unspecified // Location is undefined for single battery systems enum { - kIOPMPSLocationLeft = 1001, - kIOPMPSLocationRight = 1002 + kIOPMPSLocationLeft = 1001, + kIOPMPSLocationRight = 1002 }; // Battery quality health types, specified by BatteryHealth and HealthConfidence // properties in an IOPMPowerSource battery kext. enum { - kIOPMUndefinedValue = 0, - kIOPMPoorValue = 1, - kIOPMFairValue = 2, - kIOPMGoodValue = 3 + kIOPMUndefinedValue = 0, + kIOPMPoorValue = 1, + kIOPMFairValue = 2, + kIOPMGoodValue = 3 }; // Keys for kIOPMPSAdapterDetailsKey dictionary -#define kIOPMPSAdapterDetailsIDKey "AdapterID" -#define kIOPMPSAdapterDetailsWattsKey "Watts" -#define kIOPMPSAdapterDetailsRevisionKey "AdapterRevision" -#define kIOPMPSAdapterDetailsSerialNumberKey "SerialNumber" -#define kIOPMPSAdapterDetailsFamilyKey "FamilyCode" -#define kIOPMPSAdapterDetailsAmperageKey "Amperage" -#define kIOPMPSAdapterDetailsDescriptionKey "Description" +#define kIOPMPSAdapterDetailsIDKey "AdapterID" +#define kIOPMPSAdapterDetailsWattsKey "Watts" +#define kIOPMPSAdapterDetailsRevisionKey "AdapterRevision" +#define kIOPMPSAdapterDetailsSerialNumberKey "SerialNumber" +#define kIOPMPSAdapterDetailsFamilyKey "FamilyCode" +#define kIOPMPSAdapterDetailsAmperageKey "Amperage" +#define kIOPMPSAdapterDetailsDescriptionKey "Description" #define kIOPMPSAdapterDetailsPMUConfigurationKey "PMUConfiguration" #define kIOPMPSAdapterDetailsVoltage "AdapterVoltage" -#define kIOPMPSAdapterDetailsSourceIDKey "SourceID" -#define kIOPMPSAdapterDetailsErrorFlagsKey "ErrorFlags" -#define kIOPMPSAdapterDetailsSharedSourceKey "SharedSource" -#define kIOPMPSAdapterDetailsCloakedKey "CloakedSource" +#define kIOPMPSAdapterDetailsSourceIDKey "SourceID" +#define kIOPMPSAdapterDetailsErrorFlagsKey "ErrorFlags" +#define kIOPMPSAdapterDetailsSharedSourceKey "SharedSource" +#define kIOPMPSAdapterDetailsCloakedKey "CloakedSource" // values for kIOPSPowerAdapterFamilyKey enum { - kIOPSFamilyCodeDisconnected = 0, - kIOPSFamilyCodeUnsupported = kIOReturnUnsupported, - kIOPSFamilyCodeFirewire = iokit_family_err(sub_iokit_firewire, 0), - kIOPSFamilyCodeUSBHost = iokit_family_err(sub_iokit_usb, 0), - kIOPSFamilyCodeUSBHostSuspended = iokit_family_err(sub_iokit_usb, 1), - kIOPSFamilyCodeUSBDevice = iokit_family_err(sub_iokit_usb, 2), - kIOPSFamilyCodeUSBAdapter = iokit_family_err(sub_iokit_usb, 3), - kIOPSFamilyCodeUSBChargingPortDedicated = iokit_family_err(sub_iokit_usb, 4), - kIOPSFamilyCodeUSBChargingPortDownstream = iokit_family_err(sub_iokit_usb, 5), - kIOPSFamilyCodeUSBChargingPort = iokit_family_err(sub_iokit_usb, 6), - kIOPSFamilyCodeUSBUnknown = iokit_family_err(sub_iokit_usb, 7), - kIOPSFamilyCodeUSBCBrick = iokit_family_err(sub_iokit_usb, 8), - kIOPSFamilyCodeUSBCTypeC = iokit_family_err(sub_iokit_usb, 9), - kIOPSFamilyCodeUSBCPD = iokit_family_err(sub_iokit_usb, 10), - kIOPSFamilyCodeAC = iokit_family_err(sub_iokit_pmu, 0), - kIOPSFamilyCodeExternal = iokit_family_err(sub_iokit_pmu, 1), - kIOPSFamilyCodeExternal2 = iokit_family_err(sub_iokit_pmu, 2), - kIOPSFamilyCodeExternal3 = iokit_family_err(sub_iokit_pmu, 3), - kIOPSFamilyCodeExternal4 = iokit_family_err(sub_iokit_pmu, 4), - kIOPSFamilyCodeExternal5 = iokit_family_err(sub_iokit_pmu, 5), + kIOPSFamilyCodeDisconnected = 0, + kIOPSFamilyCodeUnsupported = kIOReturnUnsupported, + kIOPSFamilyCodeFirewire = iokit_family_err(sub_iokit_firewire, 0), + kIOPSFamilyCodeUSBHost = iokit_family_err(sub_iokit_usb, 0), + kIOPSFamilyCodeUSBHostSuspended = iokit_family_err(sub_iokit_usb, 1), + kIOPSFamilyCodeUSBDevice = iokit_family_err(sub_iokit_usb, 2), + kIOPSFamilyCodeUSBAdapter = iokit_family_err(sub_iokit_usb, 3), + kIOPSFamilyCodeUSBChargingPortDedicated = iokit_family_err(sub_iokit_usb, 4), + kIOPSFamilyCodeUSBChargingPortDownstream = iokit_family_err(sub_iokit_usb, 5), + kIOPSFamilyCodeUSBChargingPort = iokit_family_err(sub_iokit_usb, 6), + kIOPSFamilyCodeUSBUnknown = iokit_family_err(sub_iokit_usb, 7), + kIOPSFamilyCodeUSBCBrick = iokit_family_err(sub_iokit_usb, 8), + kIOPSFamilyCodeUSBCTypeC = iokit_family_err(sub_iokit_usb, 9), + kIOPSFamilyCodeUSBCPD = iokit_family_err(sub_iokit_usb, 10), + kIOPSFamilyCodeAC = iokit_family_err(sub_iokit_pmu, 0), + kIOPSFamilyCodeExternal = iokit_family_err(sub_iokit_pmu, 1), + kIOPSFamilyCodeExternal2 = iokit_family_err(sub_iokit_pmu, 2), + kIOPSFamilyCodeExternal3 = iokit_family_err(sub_iokit_pmu, 3), + kIOPSFamilyCodeExternal4 = iokit_family_err(sub_iokit_pmu, 4), + kIOPSFamilyCodeExternal5 = iokit_family_err(sub_iokit_pmu, 5), }; // values for kIOPMPSAdapterDetailsErrorFlagsKey enum { - kIOPSAdapterErrorFlagNoErrors = 0, - kIOPSAdapterErrorFlagInsufficientAvailablePower = (1 << 1), - kIOPSAdapterErrorFlagForeignObjectDetected = (1 << 2), - kIOPSAdapterErrorFlagDeviceNeedsToBeRepositioned = (1 << 3), + kIOPSAdapterErrorFlagNoErrors = 0, + kIOPSAdapterErrorFlagInsufficientAvailablePower = (1 << 1), + kIOPSAdapterErrorFlagForeignObjectDetected = (1 << 2), + kIOPSAdapterErrorFlagDeviceNeedsToBeRepositioned = (1 << 3), }; // Battery's time remaining estimate is invalid this long (seconds) after a wake @@ -675,7 +675,7 @@ enum { // the battery is settled. #define kIOPMPSPostChargeWaitSecondsKey "PostChargeWaitSeconds" -// Battery must wait this long (seconds) after being completely discharged +// Battery must wait this long (seconds) after being completely discharged // before the battery is settled. #define kIOPMPSPostDishargeWaitSecondsKey "PostDischargeWaitSeconds" @@ -685,7 +685,7 @@ enum { * Or as arguments to IOPMSystemPowerEventOccurred() * Or to decode the dictionary obtained from IOPMCopyCPUPowerStatus() * These keys reflect restrictions placed on the CPU by the system - * to bring the CPU's power consumption within allowable thermal and + * to bring the CPU's power consumption within allowable thermal and * power constraints. */ @@ -712,7 +712,7 @@ enum { */ #define kIOPMCPUPowerLimitsKey "CPU_Power_Limits" -/* kIOPMCPUPowerLimitProcessorSpeedKey defines the speed & voltage limits placed +/* kIOPMCPUPowerLimitProcessorSpeedKey defines the speed & voltage limits placed * on the CPU. * Represented as a percentage (0-100) of maximum CPU speed. */ @@ -723,7 +723,7 @@ enum { */ #define kIOPMCPUPowerLimitProcessorCountKey "CPU_Available_CPUs" -/* kIOPMCPUPowerLimitSchedulerTimeKey represents the percentage (0-100) of CPU time +/* kIOPMCPUPowerLimitSchedulerTimeKey represents the percentage (0-100) of CPU time * available. 100% at normal operation. The OS may limit this time for a percentage * less than 100%. */ @@ -731,7 +731,7 @@ enum { /* Thermal Level Warning Key - * Indicates the thermal constraints placed on the system. This value may + * Indicates the thermal constraints placed on the system. This value may * cause clients to action to consume fewer system resources. * The value associated with this warning is defined by the platform. */ @@ -746,14 +746,14 @@ enum { * Platform specific values are defined from 100 and above */ enum { - kIOPMThermalLevelNormal = 0, - kIOPMThermalLevelDanger = 5, - kIOPMThermalLevelCritical = 10, + kIOPMThermalLevelNormal = 0, + kIOPMThermalLevelDanger = 5, + kIOPMThermalLevelCritical = 10, - kIOPMThermalLevelWarning = 100, - kIOPMThermalLevelTrap = 110, + kIOPMThermalLevelWarning = 100, + kIOPMThermalLevelTrap = 110, - kIOPMThermalLevelUnknown = 255, + kIOPMThermalLevelUnknown = 255, }; #define kIOPMThermalWarningLevelNormal kIOPMThermalLevelNormal @@ -794,34 +794,34 @@ enum { struct IOPMCalendarStruct { - UInt32 year; - UInt8 month; - UInt8 day; - UInt8 hour; - UInt8 minute; - UInt8 second; - UInt8 selector; + UInt32 year; + UInt8 month; + UInt8 day; + UInt8 hour; + UInt8 minute; + UInt8 second; + UInt8 selector; }; typedef struct IOPMCalendarStruct IOPMCalendarStruct; // SetAggressiveness types enum { - kPMGeneralAggressiveness = 0, - kPMMinutesToDim, - kPMMinutesToSpinDown, - kPMMinutesToSleep, - kPMEthernetWakeOnLANSettings, - kPMSetProcessorSpeed, - kPMPowerSource, - kPMMotionSensor, - kPMLastAggressivenessType + kPMGeneralAggressiveness = 0, + kPMMinutesToDim, + kPMMinutesToSpinDown, + kPMMinutesToSleep, + kPMEthernetWakeOnLANSettings, + kPMSetProcessorSpeed, + kPMPowerSource, + kPMMotionSensor, + kPMLastAggressivenessType }; #define kMaxType (kPMLastAggressivenessType-1) // SetAggressiveness values for the kPMPowerSource aggressiveness type enum { - kIOPMInternalPower = 1, - kIOPMExternalPower + kIOPMInternalPower = 1, + kIOPMExternalPower }; #define kIOREMSleepEnabledKey "REMSleepEnabled" @@ -836,9 +836,9 @@ enum { #define kIOBatteryCycleCountKey "Cycle Count" enum { - kIOBatteryInstalled = (1 << 2), - kIOBatteryCharge = (1 << 1), - kIOBatteryChargerConnect = (1 << 0) + kIOBatteryInstalled = (1 << 2), + kIOBatteryCharge = (1 << 1), + kIOBatteryChargerConnect = (1 << 0) }; // Private power management message indicating battery data has changed @@ -853,17 +853,17 @@ enum { // For use with IOPMPowerSource bFlags #define IOPM_POWER_SOURCE_REV 2 enum { - kIOPMACInstalled = kIOBatteryChargerConnect, - kIOPMBatteryCharging = kIOBatteryCharge, - kIOPMBatteryInstalled = kIOBatteryInstalled, - kIOPMUPSInstalled = (1<<3), - kIOPMBatteryAtWarn = (1<<4), - kIOPMBatteryDepleted = (1<<5), - kIOPMACnoChargeCapability = (1<<6), // AC adapter cannot charge battery - kIOPMRawLowBattery = (1<<7), // used only by Platform Expert - kIOPMForceLowSpeed = (1<<8), // set by Platfm Expert, chk'd by Pwr Plugin - kIOPMClosedClamshell = (1<<9), // set by PMU - reflects state of the clamshell - kIOPMClamshellStateOnWake = (1<<10) // used only by Platform Expert + kIOPMACInstalled = kIOBatteryChargerConnect, + kIOPMBatteryCharging = kIOBatteryCharge, + kIOPMBatteryInstalled = kIOBatteryInstalled, + kIOPMUPSInstalled = (1 << 3), + kIOPMBatteryAtWarn = (1 << 4), + kIOPMBatteryDepleted = (1 << 5), + kIOPMACnoChargeCapability = (1 << 6), // AC adapter cannot charge battery + kIOPMRawLowBattery = (1 << 7), // used only by Platform Expert + kIOPMForceLowSpeed = (1 << 8), // set by Platfm Expert, chk'd by Pwr Plugin + kIOPMClosedClamshell = (1 << 9), // set by PMU - reflects state of the clamshell + kIOPMClamshellStateOnWake = (1 << 10) // used only by Platform Expert }; // ********************************************** @@ -874,82 +874,82 @@ enum { class IOService; enum { - kIOPowerEmergencyLevel = 1000 + kIOPowerEmergencyLevel = 1000 }; enum { - kIOPMSubclassPolicy, - kIOPMSuperclassPolicy1 + kIOPMSubclassPolicy, + kIOPMSuperclassPolicy1 #ifdef KERNEL_PRIVATE - , kIOPMActivityTickleTypeAdvisory = 128 + , kIOPMActivityTickleTypeAdvisory = 128 #endif }; struct stateChangeNote { - IOPMPowerFlags stateFlags; - unsigned long stateNum; - void * powerRef; + IOPMPowerFlags stateFlags; + unsigned long stateNum; + void * powerRef; }; typedef struct stateChangeNote stateChangeNote; #endif /* KERNEL && __cplusplus */ struct IOPowerStateChangeNotification { - void * powerRef; - unsigned long returnValue; - unsigned long stateNumber; - IOPMPowerFlags stateFlags; + void * powerRef; + unsigned long returnValue; + unsigned long stateNumber; + IOPMPowerFlags stateFlags; }; typedef struct IOPowerStateChangeNotification IOPowerStateChangeNotification; typedef IOPowerStateChangeNotification sleepWakeNote; /*! @struct IOPMSystemCapabilityChangeParameters - @abstract A structure describing a system capability change. - @discussion A system capability change is a system level transition from a set - of system capabilities to a new set of system capabilities. Power management - sends a kIOMessageSystemCapabilityChange message and provides - this structure as the message data (by reference) to - gIOPriorityPowerStateInterest clients when system capability - changes. - @field notifyRef An identifier for this message notification. Clients with pending - I/O can signal completion by calling allowPowerChange() with this - value as the argument. Clients that are able to process the notification - synchronously should ignore this field. - @field maxWaitForReply A return value to the caller indicating the maximum time in - microseconds to wait for the allowPowerChange() call. The default - value is zero, which indicates the client processing has finished, and power - management should not wait for an allowPowerChange() call. - @field changeFlags Flags will be set to indicate whether the notification precedes - the capability change (kIOPMSystemCapabilityWillChange), or after - the capability change has occurred (kIOPMSystemCapabilityDidChange). - @field __reserved1 Set to zero. - @field fromCapabilities The system capabilities at the start of the transition. - @field toCapabilities The system capabilities at the end of the transition. - @field __reserved2 Set to zero. + * @abstract A structure describing a system capability change. + * @discussion A system capability change is a system level transition from a set + * of system capabilities to a new set of system capabilities. Power management + * sends a kIOMessageSystemCapabilityChange message and provides + * this structure as the message data (by reference) to + * gIOPriorityPowerStateInterest clients when system capability + * changes. + * @field notifyRef An identifier for this message notification. Clients with pending + * I/O can signal completion by calling allowPowerChange() with this + * value as the argument. Clients that are able to process the notification + * synchronously should ignore this field. + * @field maxWaitForReply A return value to the caller indicating the maximum time in + * microseconds to wait for the allowPowerChange() call. The default + * value is zero, which indicates the client processing has finished, and power + * management should not wait for an allowPowerChange() call. + * @field changeFlags Flags will be set to indicate whether the notification precedes + * the capability change (kIOPMSystemCapabilityWillChange), or after + * the capability change has occurred (kIOPMSystemCapabilityDidChange). + * @field __reserved1 Set to zero. + * @field fromCapabilities The system capabilities at the start of the transition. + * @field toCapabilities The system capabilities at the end of the transition. + * @field __reserved2 Set to zero. */ struct IOPMSystemCapabilityChangeParameters { - uint32_t notifyRef; - uint32_t maxWaitForReply; - uint32_t changeFlags; - uint32_t __reserved1; - uint32_t fromCapabilities; - uint32_t toCapabilities; - uint32_t __reserved2[4]; + uint32_t notifyRef; + uint32_t maxWaitForReply; + uint32_t changeFlags; + uint32_t __reserved1; + uint32_t fromCapabilities; + uint32_t toCapabilities; + uint32_t __reserved2[4]; }; /*! @enum IOPMSystemCapabilityChangeFlags - @constant kIOPMSystemCapabilityWillChange Indicates the system capability will change. - @constant kIOPMSystemCapabilityDidChange Indicates the system capability has changed. -*/ + * @constant kIOPMSystemCapabilityWillChange Indicates the system capability will change. + * @constant kIOPMSystemCapabilityDidChange Indicates the system capability has changed. + */ enum { - kIOPMSystemCapabilityWillChange = 0x01, - kIOPMSystemCapabilityDidChange = 0x02 + kIOPMSystemCapabilityWillChange = 0x01, + kIOPMSystemCapabilityDidChange = 0x02 }; enum { - kIOPMSystemCapabilityCPU = 0x01, - kIOPMSystemCapabilityGraphics = 0x02, - kIOPMSystemCapabilityAudio = 0x04, - kIOPMSystemCapabilityNetwork = 0x08 + kIOPMSystemCapabilityCPU = 0x01, + kIOPMSystemCapabilityGraphics = 0x02, + kIOPMSystemCapabilityAudio = 0x04, + kIOPMSystemCapabilityNetwork = 0x08 }; #endif /* ! _IOKIT_IOPM_H */ diff --git a/iokit/IOKit/pwr_mgt/IOPMLibDefs.h b/iokit/IOKit/pwr_mgt/IOPMLibDefs.h index b74d580e9..7caa52528 100644 --- a/iokit/IOKit/pwr_mgt/IOPMLibDefs.h +++ b/iokit/IOKit/pwr_mgt/IOPMLibDefs.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,18 +22,18 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#define kPMSetAggressiveness 0 -#define kPMGetAggressiveness 1 -#define kPMSleepSystem 2 -#define kPMAllowPowerChange 3 -#define kPMCancelPowerChange 4 -#define kPMShutdownSystem 5 -#define kPMRestartSystem 6 +#define kPMSetAggressiveness 0 +#define kPMGetAggressiveness 1 +#define kPMSleepSystem 2 +#define kPMAllowPowerChange 3 +#define kPMCancelPowerChange 4 +#define kPMShutdownSystem 5 +#define kPMRestartSystem 6 #define kPMSleepSystemOptions 7 #define kPMSetMaintenanceWakeCalendar 8 #define kPMSetUserAssertionLevels 9 diff --git a/iokit/IOKit/pwr_mgt/IOPMPowerSource.h b/iokit/IOKit/pwr_mgt/IOPMPowerSource.h index 015c70a05..7f199e6b8 100644 --- a/iokit/IOKit/pwr_mgt/IOPMPowerSource.h +++ b/iokit/IOKit/pwr_mgt/IOPMPowerSource.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #ifndef _IOPMPowerSource_h_ #define _IOPMPowerSource_h_ @@ -36,8 +36,8 @@ #include enum { - kSecondsPerHour = 3600, - kTenMinutesInSeconds = 600 + kSecondsPerHour = 3600, + kTenMinutesInSeconds = 600 }; @@ -55,7 +55,7 @@ enum { * 3. When battery state changes, change the relevant member variables * through setCurrentCapacity() style accessors. * 4. Call updateStatus() on itself when all such settings have been updated. - * + * * The subclass driver should also initially populate its settings and call * updateStatus() on launch. * @@ -66,7 +66,7 @@ enum { * ExternalConnected * Type: bool * IORegistry Key: kIOPMPSExternalConnectedKey - * True if computer is drawing external power + * True if computer is drawing external power * * ExternalChargeCapable * Type: bool @@ -142,7 +142,7 @@ enum { * Manufactured Date * Type: unsigned 16-bit bitfield * IORegistry Key: kIOPMPSManufactureDateKey - * Date is published in a bitfield per the Smart Battery Data spec rev 1.1 + * Date is published in a bitfield per the Smart Battery Data spec rev 1.1 * in section 5.1.26 * Bits 0...4 => day (value 1-31; 5 bits) * Bits 5...8 => month (value 1-12; 4 bits) @@ -158,146 +158,146 @@ enum { * IORegistry Key: kIOPMPSSerialKey * String describing serial number or unique info * The serial number published hear bears no correspondence to the Apple serial - * number printed on each battery. This is a manufacturer serial number with + * number printed on each battery. This is a manufacturer serial number with * no correlation to the printed serial number. * * LegacyIOBatteryInfo * Type: OSDictionary * IORegistry Key: kIOPMPSLegacyBatteryInfoKey - * Dictionary conforming to the OS X 10.0-10.4 + * Dictionary conforming to the OS X 10.0-10.4 * */ class IOPMPowerSource : public IOService { - OSDeclareDefaultStructors(IOPMPowerSource) + OSDeclareDefaultStructors(IOPMPowerSource) - friend class IOPMPowerSourceList; + friend class IOPMPowerSourceList; - protected: +protected: /*! @var settingsChangedSinceLastUpdate * Used by subclasses to determine if any settings have been modified via the * accessors below since last call to update(). true is settings have changed; * false otherwise. */ - bool settingsChangedSinceUpdate; - + bool settingsChangedSinceUpdate; + /*! @var properties * Stores power source state */ - OSDictionary *properties; + OSDictionary *properties; - const OSSymbol *externalConnectedKey; - const OSSymbol *externalChargeCapableKey; - const OSSymbol *batteryInstalledKey; - const OSSymbol *chargingKey; - const OSSymbol *warnLevelKey; - const OSSymbol *criticalLevelKey; - const OSSymbol *currentCapacityKey; - const OSSymbol *maxCapacityKey; - const OSSymbol *timeRemainingKey; - const OSSymbol *amperageKey; - const OSSymbol *voltageKey; - const OSSymbol *cycleCountKey; - const OSSymbol *adapterInfoKey; - const OSSymbol *locationKey; - const OSSymbol *errorConditionKey; - const OSSymbol *manufacturerKey; - const OSSymbol *modelKey; - const OSSymbol *serialKey; - const OSSymbol *batteryInfoKey; + const OSSymbol *externalConnectedKey; + const OSSymbol *externalChargeCapableKey; + const OSSymbol *batteryInstalledKey; + const OSSymbol *chargingKey; + const OSSymbol *warnLevelKey; + const OSSymbol *criticalLevelKey; + const OSSymbol *currentCapacityKey; + const OSSymbol *maxCapacityKey; + const OSSymbol *timeRemainingKey; + const OSSymbol *amperageKey; + const OSSymbol *voltageKey; + const OSSymbol *cycleCountKey; + const OSSymbol *adapterInfoKey; + const OSSymbol *locationKey; + const OSSymbol *errorConditionKey; + const OSSymbol *manufacturerKey; + const OSSymbol *modelKey; + const OSSymbol *serialKey; + const OSSymbol *batteryInfoKey; - // Tracking for IOPMPowerSourceList - IOPMPowerSource *nextInList; +// Tracking for IOPMPowerSourceList + IOPMPowerSource *nextInList; - public: +public: /*! @function powerSource - @abstract Creates a new IOPMPowerSource nub. Must be attached to IORegistry, - and registered by provider. -*/ - static IOPMPowerSource *powerSource(void); + * @abstract Creates a new IOPMPowerSource nub. Must be attached to IORegistry, + * and registered by provider. + */ + static IOPMPowerSource *powerSource(void); + + virtual bool init(void) APPLE_KEXT_OVERRIDE; - virtual bool init(void) APPLE_KEXT_OVERRIDE; - - virtual void free(void) APPLE_KEXT_OVERRIDE; + virtual void free(void) APPLE_KEXT_OVERRIDE; /*! @function updateStatus - @abstract Must be called by physical battery controller when battery state - has changed significantly. - @discussion The system will not poll this object for battery updates. Rather \ - the battery's controller must call updateStatus() every time state changes \ - and the settings will be relayed to higher levels of power management. \ - The subclassing driver should override this only if the driver needs to add \ - new settings to the base class. -*/ - virtual void updateStatus(void); + * @abstract Must be called by physical battery controller when battery state + * has changed significantly. + * @discussion The system will not poll this object for battery updates. Rather \ + * the battery's controller must call updateStatus() every time state changes \ + * and the settings will be relayed to higher levels of power management. \ + * The subclassing driver should override this only if the driver needs to add \ + * new settings to the base class. + */ + virtual void updateStatus(void); /* Public accessors for battery state */ - bool externalConnected(void); - bool externalChargeCapable(void); - bool batteryInstalled(void); - bool isCharging(void); - bool atWarnLevel(void); - bool atCriticalLevel(void); + bool externalConnected(void); + bool externalChargeCapable(void); + bool batteryInstalled(void); + bool isCharging(void); + bool atWarnLevel(void); + bool atCriticalLevel(void); + + unsigned int currentCapacity(void); + unsigned int maxCapacity(void); + unsigned int capacityPercentRemaining(void); + int timeRemaining(void); + int amperage(void); + unsigned int voltage(void); + unsigned int cycleCount(void); + int adapterInfo(void); + int location(void); + + OSSymbol *errorCondition(void); + OSSymbol *manufacturer(void); + OSSymbol *model(void); + OSSymbol *serial(void); + OSDictionary *legacyIOBatteryInfo(void); + + OSObject *getPSProperty(const OSSymbol *); - unsigned int currentCapacity(void); - unsigned int maxCapacity(void); - unsigned int capacityPercentRemaining(void); - int timeRemaining(void); - int amperage(void); - unsigned int voltage(void); - unsigned int cycleCount(void); - int adapterInfo(void); - int location(void); - - OSSymbol *errorCondition(void); - OSSymbol *manufacturer(void); - OSSymbol *model(void); - OSSymbol *serial(void); - OSDictionary *legacyIOBatteryInfo(void); - - OSObject *getPSProperty(const OSSymbol *); - protected: /* Protected "setter" methods for subclasses * Subclasses should use these setters to modify all battery properties. - * - * Subclasses must follow all property changes with a call to updateStatus() + * + * Subclasses must follow all property changes with a call to updateStatus() * to flush settings changes to upper level battery API clients. * */ - void setExternalConnected(bool); - void setExternalChargeCapable(bool); - void setBatteryInstalled(bool); - void setIsCharging(bool); - void setAtWarnLevel(bool); - void setAtCriticalLevel(bool); + void setExternalConnected(bool); + void setExternalChargeCapable(bool); + void setBatteryInstalled(bool); + void setIsCharging(bool); + void setAtWarnLevel(bool); + void setAtCriticalLevel(bool); - void setCurrentCapacity(unsigned int); - void setMaxCapacity(unsigned int); - void setTimeRemaining(int); - void setAmperage(int); - void setVoltage(unsigned int); - void setCycleCount(unsigned int); - void setAdapterInfo(int); - void setLocation(int); + void setCurrentCapacity(unsigned int); + void setMaxCapacity(unsigned int); + void setTimeRemaining(int); + void setAmperage(int); + void setVoltage(unsigned int); + void setCycleCount(unsigned int); + void setAdapterInfo(int); + void setLocation(int); - void setErrorCondition(OSSymbol *); - void setManufacturer(OSSymbol *); - void setModel(OSSymbol *); - void setSerial(OSSymbol *); - void setLegacyIOBatteryInfo(OSDictionary *); + void setErrorCondition(OSSymbol *); + void setManufacturer(OSSymbol *); + void setModel(OSSymbol *); + void setSerial(OSSymbol *); + void setLegacyIOBatteryInfo(OSDictionary *); /*! All of these methods funnel through the generic accessor method - setPSProperty. Caller can pass in any arbitrary OSSymbol key, and - that value will be stored in the PM settings dictionary, and relayed - onto the IORegistry at update time. + * setPSProperty. Caller can pass in any arbitrary OSSymbol key, and + * that value will be stored in the PM settings dictionary, and relayed + * onto the IORegistry at update time. */ - void setPSProperty(const OSSymbol *, OSObject *); + void setPSProperty(const OSSymbol *, OSObject *); }; #endif diff --git a/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h b/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h index cede5e137..f78ca2d54 100644 --- a/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h +++ b/iokit/IOKit/pwr_mgt/IOPMPowerSourceList.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -32,23 +32,22 @@ class IOPMPowerSource; class IOPMPowerSourceList : public OSObject { - OSDeclareDefaultStructors(IOPMPowerSourceList) - private: - // pointer to first power source in list - IOPMPowerSource *firstItem; + OSDeclareDefaultStructors(IOPMPowerSourceList) +private: +// pointer to first power source in list + IOPMPowerSource *firstItem; - // how many power sources are in the list - unsigned long length; +// how many power sources are in the list + unsigned long length; - public: - void initialize(void); - void free(void) APPLE_KEXT_OVERRIDE; +public: + void initialize(void); + void free(void) APPLE_KEXT_OVERRIDE; - unsigned long numberOfItems(void); - IOReturn addToList(IOPMPowerSource *newPowerSource); - IOReturn removeFromList(IOPMPowerSource *theItem); - - IOPMPowerSource *firstInList(void); - IOPMPowerSource *nextInList(IOPMPowerSource *currentItem); -}; + unsigned long numberOfItems(void); + IOReturn addToList(IOPMPowerSource *newPowerSource); + IOReturn removeFromList(IOPMPowerSource *theItem); + LIBKERN_RETURNS_NOT_RETAINED IOPMPowerSource *firstInList(void); + LIBKERN_RETURNS_NOT_RETAINED IOPMPowerSource *nextInList(IOPMPowerSource *currentItem); +}; diff --git a/iokit/IOKit/pwr_mgt/IOPMinformee.h b/iokit/IOKit/pwr_mgt/IOPMinformee.h index 9d4e0c3b9..6280e2ba4 100644 --- a/iokit/IOKit/pwr_mgt/IOPMinformee.h +++ b/iokit/IOKit/pwr_mgt/IOPMinformee.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOKIT_IOPMINFORMEE_H @@ -33,22 +33,22 @@ class IOPMinformee : public OSObject { - OSDeclareDefaultStructors(IOPMinformee) - friend class IOPMinformeeList; - + OSDeclareDefaultStructors(IOPMinformee) + friend class IOPMinformeeList; + public: - static IOPMinformee * withObject( IOService * theObject ); + static IOPMinformee * withObject( IOService * theObject ); - void initialize( IOService * theObject ); + void initialize( IOService * theObject ); - void free( void ) APPLE_KEXT_OVERRIDE; + void free( void ) APPLE_KEXT_OVERRIDE; public: - IOService * whatObject; // interested driver - int32_t timer; // -1, 0, or positive number of ticks - IOPMinformee * nextInList; // linkage pointer - AbsoluteTime startTime; // start time of last inform - bool active; // enable flag + IOService * whatObject; // interested driver + int32_t timer; // -1, 0, or positive number of ticks + IOPMinformee * nextInList; // linkage pointer + AbsoluteTime startTime; // start time of last inform + bool active; // enable flag }; #endif /* !_IOKIT_IOPMINFORMEE_H */ diff --git a/iokit/IOKit/pwr_mgt/IOPMinformeeList.h b/iokit/IOKit/pwr_mgt/IOPMinformeeList.h index f06689def..f941a0ad1 100644 --- a/iokit/IOKit/pwr_mgt/IOPMinformeeList.h +++ b/iokit/IOKit/pwr_mgt/IOPMinformeeList.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -35,34 +35,33 @@ extern uint32_t gCanSleepTimeout; class IOPMinformeeList : public OSObject { -OSDeclareDefaultStructors(IOPMinformeeList) - friend class IOPMinformee; + OSDeclareDefaultStructors(IOPMinformeeList) + friend class IOPMinformee; private: - // pointer to first informee in the list - IOPMinformee *firstItem; - // how many informees are in the list - unsigned long length; +// pointer to first informee in the list + IOPMinformee *firstItem; +// how many informees are in the list + unsigned long length; public: - void initialize ( void ); - void free ( void ) APPLE_KEXT_OVERRIDE; + void initialize( void ); + void free( void ) APPLE_KEXT_OVERRIDE; - unsigned long numberOfItems ( void ); + unsigned long numberOfItems( void ); - IOPMinformee *appendNewInformee( IOService * newObject ); - - // OBSOLETE - // do not use addToList(); Use appendNewInformee() instead - IOReturn addToList ( IOPMinformee * newInformee ); - IOReturn removeFromList ( IOService * theItem ); - - IOPMinformee * firstInList ( void ); - IOPMinformee * nextInList ( IOPMinformee * currentItem ); - - IOPMinformee * findItem ( IOService * driverOrChild ); + IOPMinformee *appendNewInformee( IOService * newObject ); - // This lock must be held while modifying list or length - static IORecursiveLock * getSharedRecursiveLock( void ); -}; +// OBSOLETE +// do not use addToList(); Use appendNewInformee() instead + IOReturn addToList(LIBKERN_CONSUMED IOPMinformee * newInformee ); + IOReturn removeFromList( IOService * theItem ); + + LIBKERN_RETURNS_NOT_RETAINED IOPMinformee * firstInList( void ); + LIBKERN_RETURNS_NOT_RETAINED IOPMinformee * nextInList( IOPMinformee * currentItem ); + LIBKERN_RETURNS_NOT_RETAINED IOPMinformee * findItem( IOService * driverOrChild ); + +// This lock must be held while modifying list or length + static IORecursiveLock * getSharedRecursiveLock( void ); +}; diff --git a/iokit/IOKit/pwr_mgt/IOPMlog.h b/iokit/IOKit/pwr_mgt/IOPMlog.h index 9e93f2379..4785a4ecd 100644 --- a/iokit/IOKit/pwr_mgt/IOPMlog.h +++ b/iokit/IOKit/pwr_mgt/IOPMlog.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,72 +22,72 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ enum PMLogEnum { - kPMLogSetParent = 1, // 1 0x05070004 - kPMLogAddChild, // 2 0x05070008 - kPMLogRemoveChild, // 3 0x0507000c - kPMLogControllingDriver, // 4 0x05070010 - kPMLogControllingDriverErr1, // 5 0x05070014 - bad power state array version - kPMLogControllingDriverErr2, // 6 0x05070018 - power states already registered - kPMLogControllingDriverErr3, // 7 0x0507001c - kPMLogControllingDriverErr4, // 8 0x05070020 - power driver is invalid - kPMLogInterestedDriver, // 9 0x05070024 - kPMLogAcknowledgeErr1, // 10 0x05070028 - unknown entity called acknowledgePowerChange - kPMLogChildAcknowledge, // 11 0x0507002c - kPMLogDriverAcknowledge, // 12 0x05070030 - interested driver acknowledges - kPMLogAcknowledgeErr2, // 13 0x05070034 - object has already acked - kPMLogAcknowledgeErr3, // 14 0x05070038 - not expecting any acks - kPMLogAcknowledgeErr4, // 15 0x0507003c - not expecting acknowledgeSetPowerState - kPMLogDriverAcknowledgeSet, // 16 0x05070040 - controlling driver acknowledges - kPMLogWillChange, // 17 0x05070044 - kPMLogDidChange, // 18 0x05070048 - kPMLogRequestDomain, // 19 0x0507004c - kPMLogMakeUsable, // 20 0x05070050 - kPMLogChangeStateTo, // 21 0x05070054 - kPMLogChangeStateToPriv, // 22 0x05070058 - kPMLogSetAggressiveness, // 23 0x0507005c - kPMLogCriticalTemp, // 24 0x05070060 - kPMLogOverrideOn, // 25 0x05070064 - kPMLogOverrideOff, // 26 0x05070068 - kPMLogChangeStateForRootDomain, // 27 0x0507006c - kPMLogCSynchronizePowerTree, // 28 0x05070070 - kPMLogChangeDone, // 29 0x05070074 - kPMLogCtrlDriverTardy, // 30 0x05070078 - controlling driver didn't acknowledge - kPMLogIntDriverTardy, // 31 0x0507007c - interested driver didn't acknowledge - kPMLogStartAckTimer, // 32 0x05070080 - kPMLogStartParentChange, // 33 0x05070084 - kPMLogAmendParentChange, // 34 0x05070088 - kPMLogStartDeviceChange, // 35 0x0507008c - kPMLogRequestDenied, // 36 0x05070090 - parent denied domain state change request - kPMLogControllingDriverErr5, // 37 0x05070094 - too few power states - kPMLogProgramHardware, // 38 0x05070098 - kPMLogInformDriverPreChange, // 39 0x0507009c - kPMLogInformDriverPostChange, // 40 0x050700a0 - kPMLogRemoveDriver, // 41 0x050700a4 - NOT USED - kPMLogSetIdleTimerPeriod, // 42 0x050700a8 - kPMLogSystemWake, // 43 0x050700ac - kPMLogAcknowledgeErr5, // 44 0x050700b0 - kPMLogClientAcknowledge, // 45 0x050700b4 - kPMLogClientTardy, // 46 0x050700b8 - application didn't acknowledge - kPMLogClientCancel, // 47 0x050700bc - NOT USED - kPMLogClientNotify, // 48 0x050700c0 - client sent a notification - kPMLogAppNotify, // 49 0x050700c4 - application sent a notification - kPMLogSetClockGating, // 50 0x050700c8 - NOT USED - kPMLogSetPowerGating, // 51 0x050700cc - NOT USED - kPMLogSetPinGroup, // 52 0x050700d0 - NOT USED - kPMLogIdleCancel, // 53 0x050700d4 - device unidle during change - kPMLogSleepWakeTracePoint, // 54 0x050700d8 - kIOPMTracePoint markers - kPMLogQuiescePowerTree, // 55 0x050700dc - kPMLogComponentWakeProgress, // 56 0x050700e0 - kPMLogUserActiveState, // 57 0x050700e4 - kPMLogAppResponseDelay, // 58 0x050700e8 - kPMLogDrvResponseDelay, // 59 0x050700ec - kPMLogPCIDevChangeStart, // 60 0x050700f0 - kPMLogPCIDevChangeDone, // 61 0x050700f4 - kPMLogSleepWakeMessage, // 62 0x050700f8 - kPMLogDrvPSChangeDelay, // 63 0x050700fc - kIOPMlogLastEvent + kPMLogSetParent = 1, // 1 0x05070004 + kPMLogAddChild, // 2 0x05070008 + kPMLogRemoveChild, // 3 0x0507000c + kPMLogControllingDriver, // 4 0x05070010 + kPMLogControllingDriverErr1, // 5 0x05070014 - bad power state array version + kPMLogControllingDriverErr2, // 6 0x05070018 - power states already registered + kPMLogControllingDriverErr3, // 7 0x0507001c + kPMLogControllingDriverErr4, // 8 0x05070020 - power driver is invalid + kPMLogInterestedDriver, // 9 0x05070024 + kPMLogAcknowledgeErr1, // 10 0x05070028 - unknown entity called acknowledgePowerChange + kPMLogChildAcknowledge, // 11 0x0507002c + kPMLogDriverAcknowledge, // 12 0x05070030 - interested driver acknowledges + kPMLogAcknowledgeErr2, // 13 0x05070034 - object has already acked + kPMLogAcknowledgeErr3, // 14 0x05070038 - not expecting any acks + kPMLogAcknowledgeErr4, // 15 0x0507003c - not expecting acknowledgeSetPowerState + kPMLogDriverAcknowledgeSet, // 16 0x05070040 - controlling driver acknowledges + kPMLogWillChange, // 17 0x05070044 + kPMLogDidChange, // 18 0x05070048 + kPMLogRequestDomain, // 19 0x0507004c + kPMLogMakeUsable, // 20 0x05070050 + kPMLogChangeStateTo, // 21 0x05070054 + kPMLogChangeStateToPriv, // 22 0x05070058 + kPMLogSetAggressiveness, // 23 0x0507005c + kPMLogCriticalTemp, // 24 0x05070060 + kPMLogOverrideOn, // 25 0x05070064 + kPMLogOverrideOff, // 26 0x05070068 + kPMLogChangeStateForRootDomain, // 27 0x0507006c + kPMLogCSynchronizePowerTree, // 28 0x05070070 + kPMLogChangeDone, // 29 0x05070074 + kPMLogCtrlDriverTardy, // 30 0x05070078 - controlling driver didn't acknowledge + kPMLogIntDriverTardy, // 31 0x0507007c - interested driver didn't acknowledge + kPMLogStartAckTimer, // 32 0x05070080 + kPMLogStartParentChange, // 33 0x05070084 + kPMLogAmendParentChange, // 34 0x05070088 + kPMLogStartDeviceChange, // 35 0x0507008c + kPMLogRequestDenied, // 36 0x05070090 - parent denied domain state change request + kPMLogControllingDriverErr5, // 37 0x05070094 - too few power states + kPMLogProgramHardware, // 38 0x05070098 + kPMLogInformDriverPreChange, // 39 0x0507009c + kPMLogInformDriverPostChange, // 40 0x050700a0 + kPMLogRemoveDriver, // 41 0x050700a4 - NOT USED + kPMLogSetIdleTimerPeriod, // 42 0x050700a8 + kPMLogSystemWake, // 43 0x050700ac + kPMLogAcknowledgeErr5, // 44 0x050700b0 + kPMLogClientAcknowledge, // 45 0x050700b4 + kPMLogClientTardy, // 46 0x050700b8 - application didn't acknowledge + kPMLogClientCancel, // 47 0x050700bc - NOT USED + kPMLogClientNotify, // 48 0x050700c0 - client sent a notification + kPMLogAppNotify, // 49 0x050700c4 - application sent a notification + kPMLogSetClockGating, // 50 0x050700c8 - NOT USED + kPMLogSetPowerGating, // 51 0x050700cc - NOT USED + kPMLogSetPinGroup, // 52 0x050700d0 - NOT USED + kPMLogIdleCancel, // 53 0x050700d4 - device unidle during change + kPMLogSleepWakeTracePoint, // 54 0x050700d8 - kIOPMTracePoint markers + kPMLogQuiescePowerTree, // 55 0x050700dc + kPMLogComponentWakeProgress, // 56 0x050700e0 + kPMLogUserActiveState, // 57 0x050700e4 + kPMLogAppResponseDelay, // 58 0x050700e8 + kPMLogDrvResponseDelay, // 59 0x050700ec + kPMLogPCIDevChangeStart, // 60 0x050700f0 + kPMLogPCIDevChangeDone, // 61 0x050700f4 + kPMLogSleepWakeMessage, // 62 0x050700f8 + kPMLogDrvPSChangeDelay, // 63 0x050700fc + kIOPMlogLastEvent }; diff --git a/iokit/IOKit/pwr_mgt/IOPMpowerState.h b/iokit/IOKit/pwr_mgt/IOPMpowerState.h index f4f8dd0bf..1ca245297 100644 --- a/iokit/IOKit/pwr_mgt/IOPMpowerState.h +++ b/iokit/IOKit/pwr_mgt/IOPMpowerState.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,48 +32,47 @@ #include /*! @header IOPMpowerState.h - @abstract Defines the struct IOPMPowerState that power managed drivers should use to describe their power states. -*/ + * @abstract Defines the struct IOPMPowerState that power managed drivers should use to describe their power states. + */ /*! @struct IOPMPowerState - @abstract Describes a device's power state. - @discussion To take part in system power management, drivers should define an array of 2 or more power states and pass it to kernel power management through IOService::registerPowerDriver. - @field version Defines version number of this struct. Just use the value "1" when defining an IOPMPowerState. - @field capabilityFlags Describes the capability of the device in this state. - @field outputPowerCharacter Describes the power provided in this state. - @field inputPowerRequirement Describes the input power required in this state. - @field staticPower Describes average consumption in milliwatts. Unused; drivers may specify 0. - @field stateOrder Valid in version kIOPMPowerStateVersion2 or greater of this structure. Defines ordering of power states independently of the power state ordinal. - @field powerToAttain Describes dditional power to attain this state from next lower state (in milliWatts). Unused; drivers may specify 0. - @field timeToAttain Describes time required to enter this state from next lower state (in microseconds). Unused; drivers may specify 0. - @field settleUpTime Describes settle time required after entering this state from next lower state (microseconds). Unused; drivers may specify 0. - @field timeToLower Describes time required to enter next lower state from this one (microseconds). Unused; drivers may specify 0. - @field settleDownTime Settle time required after entering next lower state from this state (microseconds). Unused; drivers may specify 0. - @field powerDomainBudget Describes power in milliWatts a domain in this state can deliver to its children. Unused; drivers may specify 0. -} -*/ + * @abstract Describes a device's power state. + * @discussion To take part in system power management, drivers should define an array of 2 or more power states and pass it to kernel power management through IOService::registerPowerDriver. + * @field version Defines version number of this struct. Just use the value "1" when defining an IOPMPowerState. + * @field capabilityFlags Describes the capability of the device in this state. + * @field outputPowerCharacter Describes the power provided in this state. + * @field inputPowerRequirement Describes the input power required in this state. + * @field staticPower Describes average consumption in milliwatts. Unused; drivers may specify 0. + * @field stateOrder Valid in version kIOPMPowerStateVersion2 or greater of this structure. Defines ordering of power states independently of the power state ordinal. + * @field powerToAttain Describes dditional power to attain this state from next lower state (in milliWatts). Unused; drivers may specify 0. + * @field timeToAttain Describes time required to enter this state from next lower state (in microseconds). Unused; drivers may specify 0. + * @field settleUpTime Describes settle time required after entering this state from next lower state (microseconds). Unused; drivers may specify 0. + * @field timeToLower Describes time required to enter next lower state from this one (microseconds). Unused; drivers may specify 0. + * @field settleDownTime Settle time required after entering next lower state from this state (microseconds). Unused; drivers may specify 0. + * @field powerDomainBudget Describes power in milliWatts a domain in this state can deliver to its children. Unused; drivers may specify 0. + * } + */ -struct IOPMPowerState -{ - unsigned long version; - IOPMPowerFlags capabilityFlags; - IOPMPowerFlags outputPowerCharacter; - IOPMPowerFlags inputPowerRequirement; - unsigned long staticPower; - unsigned long stateOrder; - unsigned long powerToAttain; - unsigned long timeToAttain; - unsigned long settleUpTime; - unsigned long timeToLower; - unsigned long settleDownTime; - unsigned long powerDomainBudget; +struct IOPMPowerState { + unsigned long version; + IOPMPowerFlags capabilityFlags; + IOPMPowerFlags outputPowerCharacter; + IOPMPowerFlags inputPowerRequirement; + unsigned long staticPower; + unsigned long stateOrder; + unsigned long powerToAttain; + unsigned long timeToAttain; + unsigned long settleUpTime; + unsigned long timeToLower; + unsigned long settleDownTime; + unsigned long powerDomainBudget; }; typedef struct IOPMPowerState IOPMPowerState; enum { - kIOPMPowerStateVersion1 = 1, - kIOPMPowerStateVersion2 = 2 + kIOPMPowerStateVersion1 = 1, + kIOPMPowerStateVersion2 = 2 }; #endif /* _IOKIT_IOPMPOWERSTATE_H */ diff --git a/iokit/IOKit/pwr_mgt/IOPowerConnection.h b/iokit/IOKit/pwr_mgt/IOPowerConnection.h index 4e66198c2..98ebe50b5 100644 --- a/iokit/IOKit/pwr_mgt/IOPowerConnection.h +++ b/iokit/IOKit/pwr_mgt/IOPowerConnection.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -48,125 +48,124 @@ class IOPowerConnection : public IOService { - OSDeclareDefaultStructors(IOPowerConnection) + OSDeclareDefaultStructors(IOPowerConnection) protected: - /*! @field parentKnowsState true: parent knows state of its domain - used by child */ - bool stateKnown; +/*! @field parentKnowsState true: parent knows state of its domain + * used by child */ + bool stateKnown; - /*! @field currentPowerFlags power flags which describe the current state of the power domain - used by child */ - IOPMPowerFlags currentPowerFlags; +/*! @field currentPowerFlags power flags which describe the current state of the power domain + * used by child */ + IOPMPowerFlags currentPowerFlags; - /*! @field desiredDomainState state number which corresponds to the child's desire - used by parent */ - unsigned long desiredDomainState; +/*! @field desiredDomainState state number which corresponds to the child's desire + * used by parent */ + unsigned long desiredDomainState; - /*! @field requestFlag set to true when desiredDomainState is set */ - bool requestFlag; +/*! @field requestFlag set to true when desiredDomainState is set */ + bool requestFlag; - /*! @field preventIdleSleepFlag true if child has this bit set in its desired state - used by parent */ - unsigned long preventIdleSleepFlag; +/*! @field preventIdleSleepFlag true if child has this bit set in its desired state + * used by parent */ + unsigned long preventIdleSleepFlag; - /*! @field preventSystemSleepFlag true if child has this bit set in its desired state - used by parent */ - unsigned long preventSystemSleepFlag; +/*! @field preventSystemSleepFlag true if child has this bit set in its desired state + * used by parent */ + unsigned long preventSystemSleepFlag; - /*! @field awaitingAck true if child has not yet acked our notification - used by parent */ - bool awaitingAck; +/*! @field awaitingAck true if child has not yet acked our notification + * used by parent */ + bool awaitingAck; - /*! @field readyFlag true if the child has been added as a power child - used by parent */ +/*! @field readyFlag true if the child has been added as a power child + * used by parent */ bool readyFlag; #ifdef XNU_KERNEL_PRIVATE public: - bool delayChildNotification; + bool delayChildNotification; #endif public: - /*! @function setParentKnowsState - @abstract Sets the stateKnown variable. - @discussion Called by the parent when the object is created and called by the child when it discovers that the parent now knows its state. */ - void setParentKnowsState (bool ); - - /*! @function setParentCurrentPowerFlags - @abstract Sets the currentPowerFlags variable. - @discussion Called by the parent when the object is created and called by the child when it discovers that the parent state is changing. */ - void setParentCurrentPowerFlags (IOPMPowerFlags ); - - /*! @function parentKnowsState - @abstract Returns the stateKnown variable. */ - bool parentKnowsState (void ); - - /*! @function parentCurrentPowerFlags - @abstract Returns the currentPowerFlags variable. */ - IOPMPowerFlags parentCurrentPowerFlags (void ); - - /*! @function setDesiredDomainState - @abstract Sets the desiredDomainState variable. - @discussion Called by the parent. */ - void setDesiredDomainState (unsigned long ); - - /*! @function getDesiredDomainState - @abstract Returns the desiredDomainState variable. - @discussion Called by the parent. */ - unsigned long getDesiredDomainState ( void ); - - /*! @function setChildHasRequestedPower - @abstract Set the flag that says that the child has called requestPowerDomainState. - @discussion Called by the parent. */ - void setChildHasRequestedPower ( void ); - - /*! @function childHasRequestedPower - @abstract Return the flag that says whether the child has called requestPowerDomainState. - @discussion Called by the PCI Aux Power Supply Driver to see if a device driver - is power managed. */ - bool childHasRequestedPower ( void ); - - /*! @function setPreventIdleSleepFlag - @abstract Sets the preventIdleSleepFlag variable. - @discussion Called by the parent. */ - void setPreventIdleSleepFlag (unsigned long ); - - /*! @function getPreventIdleSleepFlag - @abstract Returns the preventIdleSleepFlag variable. - @discussion Called by the parent. */ - bool getPreventIdleSleepFlag ( void ); - - /*! @function setPreventSystemSleepFlag - @abstract Sets the preventSystemSleepFlag variable. - @discussion Called by the parent. */ - void setPreventSystemSleepFlag (unsigned long ); - - /*! @function getPreventSystemSleepFlag - @abstract Returns the preventSystemSleepFlag variable. - @discussion Called by the parent. */ - bool getPreventSystemSleepFlag ( void ); - - /*! @function setAwaitingAck - @abstract Sets the awaitingAck variable. - @discussion Called by the parent. */ - void setAwaitingAck ( bool ); - - /*! @function getAwaitingAck - @abstract Returns the awaitingAck variable. - @discussion Called by the parent. */ - bool getAwaitingAck ( void ); - - /*! @function setReadyFlag - @abstract Sets the readyFlag variable. - @discussion Called by the parent. */ +/*! @function setParentKnowsState + * @abstract Sets the stateKnown variable. + * @discussion Called by the parent when the object is created and called by the child when it discovers that the parent now knows its state. */ + void setParentKnowsState(bool ); + +/*! @function setParentCurrentPowerFlags + * @abstract Sets the currentPowerFlags variable. + * @discussion Called by the parent when the object is created and called by the child when it discovers that the parent state is changing. */ + void setParentCurrentPowerFlags(IOPMPowerFlags ); + +/*! @function parentKnowsState + * @abstract Returns the stateKnown variable. */ + bool parentKnowsState(void ); + +/*! @function parentCurrentPowerFlags + * @abstract Returns the currentPowerFlags variable. */ + IOPMPowerFlags parentCurrentPowerFlags(void ); + +/*! @function setDesiredDomainState + * @abstract Sets the desiredDomainState variable. + * @discussion Called by the parent. */ + void setDesiredDomainState(unsigned long ); + +/*! @function getDesiredDomainState + * @abstract Returns the desiredDomainState variable. + * @discussion Called by the parent. */ + unsigned long getDesiredDomainState( void ); + +/*! @function setChildHasRequestedPower +* @abstract Set the flag that says that the child has called requestPowerDomainState. +* @discussion Called by the parent. */ + void setChildHasRequestedPower( void ); + +/*! @function childHasRequestedPower + * @abstract Return the flag that says whether the child has called requestPowerDomainState. + * @discussion Called by the PCI Aux Power Supply Driver to see if a device driver + * is power managed. */ + bool childHasRequestedPower( void ); + +/*! @function setPreventIdleSleepFlag + * @abstract Sets the preventIdleSleepFlag variable. + * @discussion Called by the parent. */ + void setPreventIdleSleepFlag(unsigned long ); + +/*! @function getPreventIdleSleepFlag + * @abstract Returns the preventIdleSleepFlag variable. + * @discussion Called by the parent. */ + bool getPreventIdleSleepFlag( void ); + +/*! @function setPreventSystemSleepFlag + * @abstract Sets the preventSystemSleepFlag variable. + * @discussion Called by the parent. */ + void setPreventSystemSleepFlag(unsigned long ); + +/*! @function getPreventSystemSleepFlag + * @abstract Returns the preventSystemSleepFlag variable. + * @discussion Called by the parent. */ + bool getPreventSystemSleepFlag( void ); + +/*! @function setAwaitingAck + * @abstract Sets the awaitingAck variable. + * @discussion Called by the parent. */ + void setAwaitingAck( bool ); + +/*! @function getAwaitingAck + * @abstract Returns the awaitingAck variable. + * @discussion Called by the parent. */ + bool getAwaitingAck( void ); + +/*! @function setReadyFlag + * @abstract Sets the readyFlag variable. + * @discussion Called by the parent. */ void setReadyFlag( bool flag ); - /*! @function getReadyFlag - @abstract Returns the readyFlag variable. - @discussion Called by the parent. */ +/*! @function getReadyFlag + * @abstract Returns the readyFlag variable. + * @discussion Called by the parent. */ bool getReadyFlag( void ) const; }; #endif /* ! _IOKIT_IOPOWERCONNECTION_H */ - diff --git a/iokit/IOKit/pwr_mgt/RootDomain.h b/iokit/IOKit/pwr_mgt/RootDomain.h index eef58a320..504d8d0f2 100644 --- a/iokit/IOKit/pwr_mgt/RootDomain.h +++ b/iokit/IOKit/pwr_mgt/RootDomain.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _IOKIT_ROOTDOMAIN_H @@ -81,14 +81,14 @@ typedef uint32_t IOPMDriverAssertionLevel; * Flags for get/setSleepSupported() */ enum { - kRootDomainSleepNotSupported = 0x00000000, - kRootDomainSleepSupported = 0x00000001, - kFrameBufferDeepSleepSupported = 0x00000002, - kPCICantSleep = 0x00000004 + kRootDomainSleepNotSupported = 0x00000000, + kRootDomainSleepSupported = 0x00000001, + kFrameBufferDeepSleepSupported = 0x00000002, + kPCICantSleep = 0x00000004 }; /* - *IOPMrootDomain registry property keys + * IOPMrootDomain registry property keys */ #define kRootDomainSupportedFeatures "Supported Features" #define kRootDomainSleepReasonKey "Last Sleep Reason" @@ -119,22 +119,22 @@ enum { * Supported Feature bitfields for IOPMrootDomain::publishFeature() */ enum { - kIOPMSupportedOnAC = (1<<0), - kIOPMSupportedOnBatt = (1<<1), - kIOPMSupportedOnUPS = (1<<2) + kIOPMSupportedOnAC = (1 << 0), + kIOPMSupportedOnBatt = (1 << 1), + kIOPMSupportedOnUPS = (1 << 2) }; typedef IOReturn (*IOPMSettingControllerCallback) - (OSObject *target, const OSSymbol *type, - OSObject *val, uintptr_t refcon); +(OSObject *target, const OSSymbol *type, + OSObject *val, uintptr_t refcon); __BEGIN_DECLS IONotifier * registerSleepWakeInterest( - IOServiceInterestHandler, void *, void * = 0); + IOServiceInterestHandler, void *, void * = 0); IONotifier * registerPrioritySleepWakeInterest( - IOServiceInterestHandler handler, - void * self, void * ref = 0); + IOServiceInterestHandler handler, + void * self, void * ref = 0); IOReturn acknowledgeSleepWakeNotification(void * ); @@ -143,755 +143,755 @@ __END_DECLS #define IOPM_ROOTDOMAIN_REV 2 -class IOPMrootDomain: public IOService +class IOPMrootDomain : public IOService { - OSDeclareFinalStructors(IOPMrootDomain) + OSDeclareFinalStructors(IOPMrootDomain) public: - static IOPMrootDomain * construct( void ); + static IOPMrootDomain * construct( void ); - virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; - virtual IOReturn setAggressiveness( unsigned long, unsigned long ) APPLE_KEXT_OVERRIDE; - virtual IOReturn getAggressiveness( unsigned long, unsigned long * ) APPLE_KEXT_OVERRIDE; + virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual IOReturn setAggressiveness( unsigned long, unsigned long ) APPLE_KEXT_OVERRIDE; + virtual IOReturn getAggressiveness( unsigned long, unsigned long * ) APPLE_KEXT_OVERRIDE; - virtual IOReturn sleepSystem( void ); - IOReturn sleepSystemOptions( OSDictionary *options ); + virtual IOReturn sleepSystem( void ); + IOReturn sleepSystemOptions( OSDictionary *options ); - virtual IOReturn setProperties( OSObject * ) APPLE_KEXT_OVERRIDE; - virtual bool serializeProperties( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; - virtual OSObject * copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties( OSObject * ) APPLE_KEXT_OVERRIDE; + virtual bool serializeProperties( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; + virtual OSObject * copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; /*! @function systemPowerEventOccurred - @abstract Other drivers may inform IOPMrootDomain of system PM events - @discussion systemPowerEventOccurred is a richer alternative to receivePowerNotification() - Only Apple-owned kexts should have reason to call systemPowerEventOccurred. - @param event An OSSymbol describing the type of power event. - @param intValue A 32-bit integer value associated with the event. - @result kIOReturnSuccess on success */ + * @abstract Other drivers may inform IOPMrootDomain of system PM events + * @discussion systemPowerEventOccurred is a richer alternative to receivePowerNotification() + * Only Apple-owned kexts should have reason to call systemPowerEventOccurred. + * @param event An OSSymbol describing the type of power event. + * @param intValue A 32-bit integer value associated with the event. + * @result kIOReturnSuccess on success */ - IOReturn systemPowerEventOccurred( - const OSSymbol *event, - uint32_t intValue ); + IOReturn systemPowerEventOccurred( + const OSSymbol *event, + uint32_t intValue ); - IOReturn systemPowerEventOccurred( - const OSSymbol *event, - OSObject *value ); + IOReturn systemPowerEventOccurred( + const OSSymbol *event, + OSObject *value ); #ifdef XNU_KERNEL_PRIVATE // Hide doc from public headers /*! @function claimSystemWakeEvent - @abstract Apple-internal SPI to describe system wake events. - @discussion IOKit drivers may call claimSystemWakeEvent() during system wakeup to - provide human readable debug information describing the event(s) that - caused the system to wake. - - - Drivers should call claimSystemWakeEvent before completing - their setPowerState() acknowledgement. IOPMrootDomain stops - collecting wake events when driver wake is complete. - - - It is only appropriate to claim a wake event when the driver - can positively identify its hardware has generated an event - that can wake the system. - - - This call tracks wake events from a non-S0 state (S0i, S3, S4) into S0. - - This call does not track wake events from DarkWake(S0) to FullWake(S0). - - Examples: - (reason = "WiFi.TCPData", - details = "TCPKeepAlive packet arrived from IP 16.2.1.1") - (reason = "WiFi.ScanOffload", - details = "WiFi station 'AppleWiFi' signal dropped below threshold") - (reason = "Enet.LinkToggle", - details = "Ethernet attached") - - @param device The device/nub that is associated with the wake event. - - @param flags Pass kIOPMWakeEventSource if the device is the source - of the wake event. Pass zero if the device is forwarding or - aggregating wake events from multiple sources, e.g. an USB or - Thunderbolt host controller. - - @param reason Caller should pass a human readable C string describing the - wake reason. Please use a string from the list below, or create - your own string matching this format: - [Hardware].[Event] - WiFi.MagicPacket - WiFi.ScanOffload - WiFi.mDNSConflict - WiFi.mDNSService - WiFi.TCPData - WiFi.TCPTimeout - WiFi.FirmwareCrash - Enet.MagicPacket - Enet.mDNSConflict - Enet.mDNSService - Enet.TCPData - Enet.TCPTimeout - Enet.Service - Enet.LinkToggle - Enet.ConflictResolution - Enet.PatternMatch - Enet.Timer - Enet.LinkUpTimeout - Enet.LinkDown - USB.DeviceAttach - USB.DeviceDetach - - @param details Optional details further describing the wake event. - Please pass an OSString defining the event. - */ + * @abstract Apple-internal SPI to describe system wake events. + * @discussion IOKit drivers may call claimSystemWakeEvent() during system wakeup to + * provide human readable debug information describing the event(s) that + * caused the system to wake. + * + * - Drivers should call claimSystemWakeEvent before completing + * their setPowerState() acknowledgement. IOPMrootDomain stops + * collecting wake events when driver wake is complete. + * + * - It is only appropriate to claim a wake event when the driver + * can positively identify its hardware has generated an event + * that can wake the system. + * + * - This call tracks wake events from a non-S0 state (S0i, S3, S4) into S0. + * - This call does not track wake events from DarkWake(S0) to FullWake(S0). + * + * Examples: + * (reason = "WiFi.TCPData", + * details = "TCPKeepAlive packet arrived from IP 16.2.1.1") + * (reason = "WiFi.ScanOffload", + * details = "WiFi station 'AppleWiFi' signal dropped below threshold") + * (reason = "Enet.LinkToggle", + * details = "Ethernet attached") + * + * @param device The device/nub that is associated with the wake event. + * + * @param flags Pass kIOPMWakeEventSource if the device is the source + * of the wake event. Pass zero if the device is forwarding or + * aggregating wake events from multiple sources, e.g. an USB or + * Thunderbolt host controller. + * + * @param reason Caller should pass a human readable C string describing the + * wake reason. Please use a string from the list below, or create + * your own string matching this format: + * [Hardware].[Event] + * WiFi.MagicPacket + * WiFi.ScanOffload + * WiFi.mDNSConflict + * WiFi.mDNSService + * WiFi.TCPData + * WiFi.TCPTimeout + * WiFi.FirmwareCrash + * Enet.MagicPacket + * Enet.mDNSConflict + * Enet.mDNSService + * Enet.TCPData + * Enet.TCPTimeout + * Enet.Service + * Enet.LinkToggle + * Enet.ConflictResolution + * Enet.PatternMatch + * Enet.Timer + * Enet.LinkUpTimeout + * Enet.LinkDown + * USB.DeviceAttach + * USB.DeviceDetach + * + * @param details Optional details further describing the wake event. + * Please pass an OSString defining the event. + */ #endif - void claimSystemWakeEvent( IOService *device, - IOOptionBits flags, - const char *reason, - OSObject *details = 0 ); + void claimSystemWakeEvent( IOService *device, + IOOptionBits flags, + const char *reason, + OSObject *details = 0 ); - virtual IOReturn receivePowerNotification( UInt32 msg ); + virtual IOReturn receivePowerNotification( UInt32 msg ); - virtual void setSleepSupported( IOOptionBits flags ); + virtual void setSleepSupported( IOOptionBits flags ); - virtual IOOptionBits getSleepSupported( void ); + virtual IOOptionBits getSleepSupported( void ); - void wakeFromDoze( void ); + void wakeFromDoze( void ); - // KEXT driver announces support of power management feature +// KEXT driver announces support of power management feature - void publishFeature( const char *feature ); + void publishFeature( const char *feature ); - // KEXT driver announces support of power management feature - // And specifies power sources with kIOPMSupportedOn{AC/Batt/UPS} bitfield. - // Returns a unique uint32_t identifier for later removing support for this - // feature. - // NULL is acceptable for uniqueFeatureID for kexts without plans to unload. +// KEXT driver announces support of power management feature +// And specifies power sources with kIOPMSupportedOn{AC/Batt/UPS} bitfield. +// Returns a unique uint32_t identifier for later removing support for this +// feature. +// NULL is acceptable for uniqueFeatureID for kexts without plans to unload. - void publishFeature( const char *feature, - uint32_t supportedWhere, - uint32_t *uniqueFeatureID); + void publishFeature( const char *feature, + uint32_t supportedWhere, + uint32_t *uniqueFeatureID); - // KEXT driver announces removal of a previously published power management - // feature. Pass 'uniqueFeatureID' returned from publishFeature() +// KEXT driver announces removal of a previously published power management +// feature. Pass 'uniqueFeatureID' returned from publishFeature() - IOReturn removePublishedFeature( uint32_t removeFeatureID ); + IOReturn removePublishedFeature( uint32_t removeFeatureID ); /*! @function copyPMSetting - @abstract Copy the current value for a PM setting. Returns an OSNumber or - OSData depending on the setting. - @param whichSetting Name of the desired setting. - @result OSObject value if valid, NULL otherwise. */ + * @abstract Copy the current value for a PM setting. Returns an OSNumber or + * OSData depending on the setting. + * @param whichSetting Name of the desired setting. + * @result OSObject value if valid, NULL otherwise. */ - OSObject * copyPMSetting( OSSymbol *whichSetting ); + OSObject * copyPMSetting( OSSymbol *whichSetting ); /*! @function registerPMSettingController - @abstract Register for callbacks on changes to certain PM settings. - @param settings NULL terminated array of C strings, each string for a PM - setting that the caller is interested in and wants to get callbacks for. - @param callout C function ptr or member function cast as such. - @param target The target of the callback, usually 'this' - @param refcon Will be passed to caller in callback; for caller's use. - @param handle Caller should keep the OSObject * returned here. If non-NULL, - handle will have a retain count of 1 on return. To deregister, pass to - unregisterPMSettingController() - @result kIOReturnSuccess on success. */ - - IOReturn registerPMSettingController( - const OSSymbol *settings[], - IOPMSettingControllerCallback callout, - OSObject *target, - uintptr_t refcon, - OSObject **handle); // out param + * @abstract Register for callbacks on changes to certain PM settings. + * @param settings NULL terminated array of C strings, each string for a PM + * setting that the caller is interested in and wants to get callbacks for. + * @param callout C function ptr or member function cast as such. + * @param target The target of the callback, usually 'this' + * @param refcon Will be passed to caller in callback; for caller's use. + * @param handle Caller should keep the OSObject * returned here. If non-NULL, + * handle will have a retain count of 1 on return. To deregister, pass to + * unregisterPMSettingController() + * @result kIOReturnSuccess on success. */ + + IOReturn registerPMSettingController( + const OSSymbol *settings[], + IOPMSettingControllerCallback callout, + OSObject *target, + uintptr_t refcon, + OSObject **handle); // out param /*! @function registerPMSettingController - @abstract Register for callbacks on changes to certain PM settings. - @param settings NULL terminated array of C strings, each string for a PM - setting that the caller is interested in and wants to get callbacks for. - @param supportedPowerSources bitfield indicating which power sources these - settings are supported for (kIOPMSupportedOnAC, etc.) - @param callout C function ptr or member function cast as such. - @param target The target of the callback, usually 'this' - @param refcon Will be passed to caller in callback; for caller's use. - @param handle Caller should keep the OSObject * returned here. If non-NULL, - handle will have a retain count of 1 on return. To deregister, pass to - unregisterPMSettingController() - @result kIOReturnSuccess on success. */ - - IOReturn registerPMSettingController( - const OSSymbol *settings[], - uint32_t supportedPowerSources, - IOPMSettingControllerCallback callout, - OSObject *target, - uintptr_t refcon, - OSObject **handle); // out param - - virtual IONotifier * registerInterest( - const OSSymbol * typeOfInterest, - IOServiceInterestHandler handler, - void * target, void * ref = 0 ) APPLE_KEXT_OVERRIDE; - - virtual IOReturn callPlatformFunction( - const OSSymbol *functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4 ) APPLE_KEXT_OVERRIDE; + * @abstract Register for callbacks on changes to certain PM settings. + * @param settings NULL terminated array of C strings, each string for a PM + * setting that the caller is interested in and wants to get callbacks for. + * @param supportedPowerSources bitfield indicating which power sources these + * settings are supported for (kIOPMSupportedOnAC, etc.) + * @param callout C function ptr or member function cast as such. + * @param target The target of the callback, usually 'this' + * @param refcon Will be passed to caller in callback; for caller's use. + * @param handle Caller should keep the OSObject * returned here. If non-NULL, + * handle will have a retain count of 1 on return. To deregister, pass to + * unregisterPMSettingController() + * @result kIOReturnSuccess on success. */ + + IOReturn registerPMSettingController( + const OSSymbol *settings[], + uint32_t supportedPowerSources, + IOPMSettingControllerCallback callout, + OSObject *target, + uintptr_t refcon, + OSObject **handle); // out param + + virtual IONotifier * registerInterest( + const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, + void * target, void * ref = 0 ) APPLE_KEXT_OVERRIDE; + + virtual IOReturn callPlatformFunction( + const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ) APPLE_KEXT_OVERRIDE; /*! @function createPMAssertion - @abstract Creates an assertion to influence system power behavior. - @param whichAssertionsBits A bitfield specify the assertion that the caller requests. - @param assertionLevel An integer detailing the initial assertion level, kIOPMDriverAssertionLevelOn - or kIOPMDriverAssertionLevelOff. - @param ownerService A pointer to the caller's IOService class, for tracking. - @param ownerDescription A reverse-DNS string describing the caller's identity and reason. - @result On success, returns a new assertion of type IOPMDriverAssertionID -*/ - IOPMDriverAssertionID createPMAssertion( - IOPMDriverAssertionType whichAssertionsBits, - IOPMDriverAssertionLevel assertionLevel, - IOService *ownerService, - const char *ownerDescription); + * @abstract Creates an assertion to influence system power behavior. + * @param whichAssertionsBits A bitfield specify the assertion that the caller requests. + * @param assertionLevel An integer detailing the initial assertion level, kIOPMDriverAssertionLevelOn + * or kIOPMDriverAssertionLevelOff. + * @param ownerService A pointer to the caller's IOService class, for tracking. + * @param ownerDescription A reverse-DNS string describing the caller's identity and reason. + * @result On success, returns a new assertion of type IOPMDriverAssertionID + */ + IOPMDriverAssertionID createPMAssertion( + IOPMDriverAssertionType whichAssertionsBits, + IOPMDriverAssertionLevel assertionLevel, + IOService *ownerService, + const char *ownerDescription); /* @function setPMAssertionLevel - @abstract Modify the level of a pre-existing assertion. - @discussion Change the value of a PM assertion to influence system behavior, - without undergoing the work required to create or destroy an assertion. Suggested - for clients who will assert and de-assert needs for PM behavior several times over - their lifespan. - @param assertionID An assertion ID previously returned by createPMAssertion - @param assertionLevel The new assertion level. - @result kIOReturnSuccess if it worked; kIOReturnNotFound or other IOReturn error on failure. -*/ - IOReturn setPMAssertionLevel(IOPMDriverAssertionID assertionID, IOPMDriverAssertionLevel assertionLevel); + * @abstract Modify the level of a pre-existing assertion. + * @discussion Change the value of a PM assertion to influence system behavior, + * without undergoing the work required to create or destroy an assertion. Suggested + * for clients who will assert and de-assert needs for PM behavior several times over + * their lifespan. + * @param assertionID An assertion ID previously returned by createPMAssertion + * @param assertionLevel The new assertion level. + * @result kIOReturnSuccess if it worked; kIOReturnNotFound or other IOReturn error on failure. + */ + IOReturn setPMAssertionLevel(IOPMDriverAssertionID assertionID, IOPMDriverAssertionLevel assertionLevel); /*! @function getPMAssertionLevel - @absract Returns the active level of the specified assertion(s). - @discussion Returns kIOPMDriverAssertionLevelOff or - kIOPMDriverAssertionLevelOn. If multiple assertions are specified - in the bitfield, only returns kIOPMDriverAssertionLevelOn - if all assertions are active. - @param whichAssertionBits Bits defining the assertion or assertions the caller is interested in - the level of. If in doubt, pass kIOPMDriverAssertionCPUBit as the argument. - @result Returns kIOPMDriverAssertionLevelOff or - kIOPMDriverAssertionLevelOn indicating the specified assertion's levels, if available. - If the assertions aren't supported on this machine, or aren't recognized by the OS, the - result is undefined. -*/ - IOPMDriverAssertionLevel getPMAssertionLevel(IOPMDriverAssertionType whichAssertionBits); + * @absract Returns the active level of the specified assertion(s). + * @discussion Returns kIOPMDriverAssertionLevelOff or + * kIOPMDriverAssertionLevelOn. If multiple assertions are specified + * in the bitfield, only returns kIOPMDriverAssertionLevelOn + * if all assertions are active. + * @param whichAssertionBits Bits defining the assertion or assertions the caller is interested in + * the level of. If in doubt, pass kIOPMDriverAssertionCPUBit as the argument. + * @result Returns kIOPMDriverAssertionLevelOff or + * kIOPMDriverAssertionLevelOn indicating the specified assertion's levels, if available. + * If the assertions aren't supported on this machine, or aren't recognized by the OS, the + * result is undefined. + */ + IOPMDriverAssertionLevel getPMAssertionLevel(IOPMDriverAssertionType whichAssertionBits); /*! @function releasePMAssertion - @abstract Removes an assertion to influence system power behavior. - @result On success, returns a new assertion of type IOPMDriverAssertionID * -*/ - IOReturn releasePMAssertion(IOPMDriverAssertionID releaseAssertion); + * @abstract Removes an assertion to influence system power behavior. + * @result On success, returns a new assertion of type IOPMDriverAssertionID * + */ + IOReturn releasePMAssertion(IOPMDriverAssertionID releaseAssertion); /*! @function restartWithStackshot - @abstract Take a stackshot of the system and restart the system. - @result Return kIOReturnSuccess if it work, kIOReturnError if the service is not available. -*/ - IOReturn restartWithStackshot(); + * @abstract Take a stackshot of the system and restart the system. + * @result Return kIOReturnSuccess if it work, kIOReturnError if the service is not available. + */ + IOReturn restartWithStackshot(); private: - virtual IOReturn changePowerStateTo( unsigned long ordinal ) APPLE_KEXT_COMPATIBILITY_OVERRIDE; - virtual IOReturn changePowerStateToPriv( unsigned long ordinal ); - virtual IOReturn requestPowerDomainState( IOPMPowerFlags, IOPowerConnection *, unsigned long ) APPLE_KEXT_OVERRIDE; - virtual void powerChangeDone( unsigned long ) APPLE_KEXT_OVERRIDE; - virtual bool tellChangeDown( unsigned long ) APPLE_KEXT_OVERRIDE; - virtual bool askChangeDown( unsigned long ) APPLE_KEXT_OVERRIDE; - virtual void tellChangeUp( unsigned long ) APPLE_KEXT_OVERRIDE; - virtual void tellNoChangeDown( unsigned long ) APPLE_KEXT_OVERRIDE; - virtual IOReturn configureReport(IOReportChannelList *channels, - IOReportConfigureAction action, - void *result, - void *destination) APPLE_KEXT_OVERRIDE; - virtual IOReturn updateReport(IOReportChannelList *channels, - IOReportUpdateAction action, - void *result, - void *destination) APPLE_KEXT_OVERRIDE; - - void configureReportGated(uint64_t channel_id, - uint64_t action, - void *result); - IOReturn updateReportGated(uint64_t ch_id, - void *result, - IOBufferMemoryDescriptor *dest); + virtual IOReturn changePowerStateTo( unsigned long ordinal ) APPLE_KEXT_COMPATIBILITY_OVERRIDE; + virtual IOReturn changePowerStateToPriv( unsigned long ordinal ); + virtual IOReturn requestPowerDomainState( IOPMPowerFlags, IOPowerConnection *, unsigned long ) APPLE_KEXT_OVERRIDE; + virtual void powerChangeDone( unsigned long ) APPLE_KEXT_OVERRIDE; + virtual bool tellChangeDown( unsigned long ) APPLE_KEXT_OVERRIDE; + virtual bool askChangeDown( unsigned long ) APPLE_KEXT_OVERRIDE; + virtual void tellChangeUp( unsigned long ) APPLE_KEXT_OVERRIDE; + virtual void tellNoChangeDown( unsigned long ) APPLE_KEXT_OVERRIDE; + virtual IOReturn configureReport(IOReportChannelList *channels, + IOReportConfigureAction action, + void *result, + void *destination) APPLE_KEXT_OVERRIDE; + virtual IOReturn updateReport(IOReportChannelList *channels, + IOReportUpdateAction action, + void *result, + void *destination) APPLE_KEXT_OVERRIDE; + + void configureReportGated(uint64_t channel_id, + uint64_t action, + void *result); + IOReturn updateReportGated(uint64_t ch_id, + void *result, + IOBufferMemoryDescriptor *dest); #ifdef XNU_KERNEL_PRIVATE - /* Root Domain internals */ +/* Root Domain internals */ public: - void tagPowerPlaneService( - IOService * service, - IOPMActions * actions ); - - void overrideOurPowerChange( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex * inOutPowerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ); - - void handleOurPowerChangeStart( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ); - - void handleOurPowerChangeDone( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags, - IOPMRequestTag requestTag ); - - void overridePowerChangeForUIService( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex * inOutPowerState, - IOPMPowerChangeFlags * inOutChangeFlags ); - - void handleActivityTickleForDisplayWrangler( - IOService * service, - IOPMActions * actions ); - - void handleUpdatePowerClientForDisplayWrangler( - IOService * service, - IOPMActions * actions, - const OSSymbol * powerClient, - IOPMPowerStateIndex oldPowerState, - IOPMPowerStateIndex newPowerState ); - - bool shouldDelayChildNotification( - IOService * service ); - - void handlePowerChangeStartForPCIDevice( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * inOutChangeFlags ); - - void handlePowerChangeDoneForPCIDevice( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags ); - - void askChangeDownDone( - IOPMPowerChangeFlags * inOutChangeFlags, - bool * cancel ); - - void handlePublishSleepWakeUUID( - bool shouldPublish); - - void handleQueueSleepWakeUUID( - OSObject *obj); - - void handleDisplayPowerOn( ); - - void willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ); - - IOReturn setMaintenanceWakeCalendar( - const IOPMCalendarStruct * calendar ); - - IOReturn getSystemSleepType(uint32_t * sleepType, uint32_t * standbyTimer); - - // Handle callbacks from IOService::systemWillShutdown() - void acknowledgeSystemWillShutdown( IOService * from ); - - // Handle platform halt and restart notifications - void handlePlatformHaltRestart( UInt32 pe_type ); - - IOReturn shutdownSystem( void ); - IOReturn restartSystem( void ); - void handleSleepTimerExpiration( void ); - - bool activitySinceSleep(void); - bool abortHibernation(void); - void updateConsoleUsers(void); - - IOReturn joinAggressiveness( IOService * service ); - void handleAggressivesRequests( void ); - - void kdebugTrace(uint32_t event, uint64_t regId, - uintptr_t param1, uintptr_t param2, uintptr_t param3 = 0); - void tracePoint(uint8_t point); - void traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay); - void traceDetail(OSObject *notifier, bool start); - void traceAckDelay(OSObject *notifier, uint32_t response, uint32_t delay_ms); - - void startSpinDump(uint32_t spindumpKind); - - bool systemMessageFilter( - void * object, void * arg1, void * arg2, void * arg3 ); - - bool updatePreventIdleSleepList( - IOService * service, bool addNotRemove ); - void updatePreventSystemSleepList( - IOService * service, bool addNotRemove ); + void tagPowerPlaneService( + IOService * service, + IOPMActions * actions ); + + void overrideOurPowerChange( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex * inOutPowerState, + IOPMPowerChangeFlags * inOutChangeFlags, + IOPMRequestTag requestTag ); + + void handleOurPowerChangeStart( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags * inOutChangeFlags, + IOPMRequestTag requestTag ); + + void handleOurPowerChangeDone( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags changeFlags, + IOPMRequestTag requestTag ); + + void overridePowerChangeForUIService( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex * inOutPowerState, + IOPMPowerChangeFlags * inOutChangeFlags ); + + void handleActivityTickleForDisplayWrangler( + IOService * service, + IOPMActions * actions ); + + void handleUpdatePowerClientForDisplayWrangler( + IOService * service, + IOPMActions * actions, + const OSSymbol * powerClient, + IOPMPowerStateIndex oldPowerState, + IOPMPowerStateIndex newPowerState ); + + bool shouldDelayChildNotification( + IOService * service ); + + void handlePowerChangeStartForPCIDevice( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags * inOutChangeFlags ); + + void handlePowerChangeDoneForPCIDevice( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags changeFlags ); + + void askChangeDownDone( + IOPMPowerChangeFlags * inOutChangeFlags, + bool * cancel ); + + void handlePublishSleepWakeUUID( + bool shouldPublish); + + void handleQueueSleepWakeUUID( + OSObject *obj); + + void handleDisplayPowerOn(); + + void willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ); + + IOReturn setMaintenanceWakeCalendar( + const IOPMCalendarStruct * calendar ); + + IOReturn getSystemSleepType(uint32_t * sleepType, uint32_t * standbyTimer); + +// Handle callbacks from IOService::systemWillShutdown() + void acknowledgeSystemWillShutdown( IOService * from ); + +// Handle platform halt and restart notifications + void handlePlatformHaltRestart( UInt32 pe_type ); + + IOReturn shutdownSystem( void ); + IOReturn restartSystem( void ); + void handleSleepTimerExpiration( void ); + + bool activitySinceSleep(void); + bool abortHibernation(void); + void updateConsoleUsers(void); + + IOReturn joinAggressiveness( IOService * service ); + void handleAggressivesRequests( void ); + + void kdebugTrace(uint32_t event, uint64_t regId, + uintptr_t param1, uintptr_t param2, uintptr_t param3 = 0); + void tracePoint(uint8_t point); + void traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay); + void traceDetail(OSObject *notifier, bool start); + void traceAckDelay(OSObject *notifier, uint32_t response, uint32_t delay_ms); + + void startSpinDump(uint32_t spindumpKind); + + bool systemMessageFilter( + void * object, void * arg1, void * arg2, void * arg3 ); + + bool updatePreventIdleSleepList( + IOService * service, bool addNotRemove ); + void updatePreventSystemSleepList( + IOService * service, bool addNotRemove ); - void publishPMSetting( - const OSSymbol * feature, uint32_t where, uint32_t * featureID ); - - void pmStatsRecordEvent( - int eventIndex, - AbsoluteTime timestamp); + void publishPMSetting( + const OSSymbol * feature, uint32_t where, uint32_t * featureID ); + + void pmStatsRecordEvent( + int eventIndex, + AbsoluteTime timestamp); - void pmStatsRecordApplicationResponse( - const OSSymbol *response, - const char *name, - int messageType, - uint32_t delay_ms, - uint64_t id, - OSObject *object, - IOPMPowerStateIndex ps=0); + void pmStatsRecordApplicationResponse( + const OSSymbol *response, + const char *name, + int messageType, + uint32_t delay_ms, + uint64_t id, + OSObject *object, + IOPMPowerStateIndex ps = 0); - void copyWakeReasonString( char * outBuf, size_t bufSize ); + void copyWakeReasonString( char * outBuf, size_t bufSize ); #if HIBERNATION - bool getHibernateSettings( - uint32_t * hibernateMode, - uint32_t * hibernateFreeRatio, - uint32_t * hibernateFreeTime ); - bool mustHibernate( void ); + bool getHibernateSettings( + uint32_t * hibernateMode, + uint32_t * hibernateFreeRatio, + uint32_t * hibernateFreeTime ); + bool mustHibernate( void ); #endif - void takeStackshot(bool restart, bool isOSXWatchdog, bool isSpinDump); - void sleepWakeDebugTrig(bool restart); - void sleepWakeDebugEnableWdog(); - bool sleepWakeDebugIsWdogEnabled(); - void sleepWakeDebugSaveSpinDumpFile(); - bool checkShutdownTimeout(); - void panicWithShutdownLog(uint32_t timeoutInMs); - uint32_t getWatchdogTimeout(); + void takeStackshot(bool restart, bool isOSXWatchdog, bool isSpinDump); + void sleepWakeDebugTrig(bool restart); + void sleepWakeDebugEnableWdog(); + bool sleepWakeDebugIsWdogEnabled(); + void sleepWakeDebugSaveSpinDumpFile(); + bool checkShutdownTimeout(); + void panicWithShutdownLog(uint32_t timeoutInMs); + uint32_t getWatchdogTimeout(); private: - friend class PMSettingObject; - friend class RootDomainUserClient; - friend class PMAssertionsTracker; - - static IOReturn sysPowerDownHandler( void * target, void * refCon, - UInt32 messageType, IOService * service, - void * messageArgument, vm_size_t argSize ); - - static IOReturn displayWranglerNotification( void * target, void * refCon, - UInt32 messageType, IOService * service, - void * messageArgument, vm_size_t argSize ); - - static IOReturn rootBusyStateChangeHandler( void * target, void * refCon, - UInt32 messageType, IOService * service, - void * messageArgument, vm_size_t argSize ); - - static bool displayWranglerMatchPublished( void * target, void * refCon, - IOService * newService, - IONotifier * notifier); - - static bool batteryPublished( void * target, void * refCon, - IOService * resourceService, - IONotifier * notifier); - - void initializeBootSessionUUID( void ); - - void fullWakeDelayedWork( void ); - - IOService * wrangler; - OSDictionary * wranglerIdleSettings; - - IOLock *featuresDictLock; // guards supportedFeatures - IOLock *wakeEventLock; - IOPMPowerStateQueue *pmPowerStateQueue; - - OSArray *allowedPMSettings; - OSArray *noPublishPMSettings; - PMTraceWorker *pmTracer; - PMAssertionsTracker *pmAssertions; - - // Settings controller info - IOLock *settingsCtrlLock; - OSDictionary *settingsCallbacks; - OSDictionary *fPMSettingsDict; - - IONotifier *_batteryPublishNotifier; - IONotifier *_displayWranglerNotifier; - - // Statistics - const OSSymbol *_statsNameKey; - const OSSymbol *_statsPIDKey; - const OSSymbol *_statsTimeMSKey; - const OSSymbol *_statsResponseTypeKey; - const OSSymbol *_statsMessageTypeKey; - const OSSymbol *_statsPowerCapsKey; - uint32_t sleepCnt; - uint32_t darkWakeCnt; - uint32_t displayWakeCnt; - - OSString *queuedSleepWakeUUIDString; - OSArray *pmStatsAppResponses; - IOLock *pmStatsLock; // guards pmStatsAppResponses - - void *sleepDelaysReport; // report to track time taken to go to sleep - uint32_t sleepDelaysClientCnt; // Number of interested clients in sleepDelaysReport - uint64_t ts_sleepStart; - uint64_t wake2DarkwakeDelay; // Time taken to change from full wake -> Dark wake - - - void *assertOnWakeReport; // report to track time spent without any assertions held after wake - uint32_t assertOnWakeClientCnt; // Number of clients interested in assertOnWakeReport - clock_sec_t assertOnWakeSecs; // Num of secs after wake for first assertion - - bool uuidPublished; - - // Pref: idle time before idle sleep - bool idleSleepEnabled; - unsigned long sleepSlider; - unsigned long idleSeconds; - uint64_t autoWakeStart; - uint64_t autoWakeEnd; - - // Difference between sleepSlider and longestNonSleepSlider - unsigned long extraSleepDelay; - - // Used to wait between say display idle and system idle - thread_call_t extraSleepTimer; - thread_call_t diskSyncCalloutEntry; - thread_call_t fullWakeThreadCall; - thread_call_t updateConsoleUsersEntry; - - // Track system capabilities. - uint32_t _desiredCapability; - uint32_t _currentCapability; - uint32_t _pendingCapability; - uint32_t _highestCapability; - OSSet * _joinedCapabilityClients; - uint32_t _systemStateGeneration; - - // Type of clients that can receive system messages. - enum { - kSystemMessageClientPowerd = 0x01, - kSystemMessageClientLegacyApp = 0x02, - kSystemMessageClientKernel = 0x04, - kSystemMessageClientAll = 0x07 - }; - uint32_t _systemMessageClientMask; - - // Power state and capability change transitions. - enum { - kSystemTransitionNone = 0, - kSystemTransitionSleep = 1, - kSystemTransitionWake = 2, - kSystemTransitionCapability = 3, - kSystemTransitionNewCapClient = 4 - } _systemTransitionType; - - unsigned int systemBooting :1; - unsigned int systemShutdown :1; - unsigned int systemDarkWake :1; - unsigned int clamshellExists :1; - unsigned int clamshellClosed :1; - unsigned int clamshellDisabled :1; - unsigned int desktopMode :1; - unsigned int acAdaptorConnected :1; - - unsigned int clamshellSleepDisabled :1; - unsigned int idleSleepTimerPending :1; - unsigned int userDisabledAllSleep :1; - unsigned int ignoreTellChangeDown :1; - unsigned int wranglerAsleep :1; - unsigned int wranglerTickled :1; - unsigned int _preventUserActive :1; - unsigned int graphicsSuppressed :1; - - unsigned int capabilityLoss :1; - unsigned int pciCantSleepFlag :1; - unsigned int pciCantSleepValid :1; - unsigned int logGraphicsClamp :1; - unsigned int darkWakeToSleepASAP :1; - unsigned int darkWakeMaintenance :1; - unsigned int darkWakeSleepService :1; - unsigned int darkWakePostTickle :1; - - unsigned int sleepTimerMaintenance :1; - unsigned int sleepToStandby :1; - unsigned int lowBatteryCondition :1; - unsigned int hibernateDisabled :1; - unsigned int hibernateRetry :1; - unsigned int wranglerTickleLatched :1; - unsigned int userIsActive :1; - unsigned int userWasActive :1; - - unsigned int displayIdleForDemandSleep :1; - unsigned int darkWakeHibernateError :1; - unsigned int thermalWarningState:1; - unsigned int toldPowerdCapWillChange :1; - unsigned int displayPowerOnRequested:1; - - uint8_t tasksSuspended; - uint32_t hibernateMode; - AbsoluteTime userActivityTime; - AbsoluteTime userActivityTime_prev; - uint32_t userActivityCount; - uint32_t userActivityAtSleep; - uint32_t lastSleepReason; - uint32_t fullToDarkReason; - uint32_t hibernateAborted; - uint8_t standbyNixed; - uint8_t resetTimers; - - enum FullWakeReason { - kFullWakeReasonNone = 0, - kFullWakeReasonLocalUser = 1, - kFullWakeReasonDisplayOn = 2, - fFullWakeReasonDisplayOnAndLocalUser = 3 - }; - uint32_t fullWakeReason; - - // Info for communicating system state changes to PMCPU - int32_t idxPMCPUClamshell; - int32_t idxPMCPULimitedPower; - - IOOptionBits platformSleepSupport; - uint32_t _debugWakeSeconds; - uint32_t _lastDebugWakeSeconds; - - queue_head_t aggressivesQueue; - thread_call_t aggressivesThreadCall; - OSData * aggressivesData; - - AbsoluteTime userBecameInactiveTime; - - // PCI top-level PM trace - IOService * pciHostBridgeDevice; - IOService * pciHostBridgeDriver; - - IONotifier * systemCapabilityNotifier; - - typedef struct { - uint32_t pid; - uint32_t refcount; - } PMNotifySuspendedStruct; - - uint32_t pmSuspendedCapacity; - uint32_t pmSuspendedSize; - PMNotifySuspendedStruct *pmSuspendedPIDS; - - OSSet * preventIdleSleepList; - OSSet * preventSystemSleepList; - - UInt32 _scheduledAlarms; - UInt32 _userScheduledAlarm; + friend class PMSettingObject; + friend class RootDomainUserClient; + friend class PMAssertionsTracker; + + static IOReturn sysPowerDownHandler( void * target, void * refCon, + UInt32 messageType, IOService * service, + void * messageArgument, vm_size_t argSize ); + + static IOReturn displayWranglerNotification( void * target, void * refCon, + UInt32 messageType, IOService * service, + void * messageArgument, vm_size_t argSize ); + + static IOReturn rootBusyStateChangeHandler( void * target, void * refCon, + UInt32 messageType, IOService * service, + void * messageArgument, vm_size_t argSize ); + + static bool displayWranglerMatchPublished( void * target, void * refCon, + IOService * newService, + IONotifier * notifier); + + static bool batteryPublished( void * target, void * refCon, + IOService * resourceService, + IONotifier * notifier); + + void initializeBootSessionUUID( void ); + + void fullWakeDelayedWork( void ); + + IOService * wrangler; + OSDictionary * wranglerIdleSettings; + + IOLock *featuresDictLock;// guards supportedFeatures + IOLock *wakeEventLock; + IOPMPowerStateQueue *pmPowerStateQueue; + + OSArray *allowedPMSettings; + OSArray *noPublishPMSettings; + PMTraceWorker *pmTracer; + PMAssertionsTracker *pmAssertions; + +// Settings controller info + IOLock *settingsCtrlLock; + OSDictionary *settingsCallbacks; + OSDictionary *fPMSettingsDict; + + IONotifier *_batteryPublishNotifier; + IONotifier *_displayWranglerNotifier; + +// Statistics + const OSSymbol *_statsNameKey; + const OSSymbol *_statsPIDKey; + const OSSymbol *_statsTimeMSKey; + const OSSymbol *_statsResponseTypeKey; + const OSSymbol *_statsMessageTypeKey; + const OSSymbol *_statsPowerCapsKey; + uint32_t sleepCnt; + uint32_t darkWakeCnt; + uint32_t displayWakeCnt; + + OSString *queuedSleepWakeUUIDString; + OSArray *pmStatsAppResponses; + IOLock *pmStatsLock;// guards pmStatsAppResponses + + void *sleepDelaysReport; // report to track time taken to go to sleep + uint32_t sleepDelaysClientCnt;// Number of interested clients in sleepDelaysReport + uint64_t ts_sleepStart; + uint64_t wake2DarkwakeDelay; // Time taken to change from full wake -> Dark wake + + + void *assertOnWakeReport;// report to track time spent without any assertions held after wake + uint32_t assertOnWakeClientCnt;// Number of clients interested in assertOnWakeReport + clock_sec_t assertOnWakeSecs; // Num of secs after wake for first assertion + + bool uuidPublished; + +// Pref: idle time before idle sleep + bool idleSleepEnabled; + unsigned long sleepSlider; + unsigned long idleSeconds; + uint64_t autoWakeStart; + uint64_t autoWakeEnd; + +// Difference between sleepSlider and longestNonSleepSlider + unsigned long extraSleepDelay; + +// Used to wait between say display idle and system idle + thread_call_t extraSleepTimer; + thread_call_t diskSyncCalloutEntry; + thread_call_t fullWakeThreadCall; + thread_call_t updateConsoleUsersEntry; + +// Track system capabilities. + uint32_t _desiredCapability; + uint32_t _currentCapability; + uint32_t _pendingCapability; + uint32_t _highestCapability; + OSSet * _joinedCapabilityClients; + uint32_t _systemStateGeneration; + +// Type of clients that can receive system messages. + enum { + kSystemMessageClientPowerd = 0x01, + kSystemMessageClientLegacyApp = 0x02, + kSystemMessageClientKernel = 0x04, + kSystemMessageClientAll = 0x07 + }; + uint32_t _systemMessageClientMask; + +// Power state and capability change transitions. + enum { + kSystemTransitionNone = 0, + kSystemTransitionSleep = 1, + kSystemTransitionWake = 2, + kSystemTransitionCapability = 3, + kSystemTransitionNewCapClient = 4 + } _systemTransitionType; + + unsigned int systemBooting :1; + unsigned int systemShutdown :1; + unsigned int systemDarkWake :1; + unsigned int clamshellExists :1; + unsigned int clamshellClosed :1; + unsigned int clamshellDisabled :1; + unsigned int desktopMode :1; + unsigned int acAdaptorConnected :1; + + unsigned int clamshellSleepDisabled :1; + unsigned int idleSleepTimerPending :1; + unsigned int userDisabledAllSleep :1; + unsigned int ignoreTellChangeDown :1; + unsigned int wranglerAsleep :1; + unsigned int wranglerTickled :1; + unsigned int _preventUserActive :1; + unsigned int graphicsSuppressed :1; + + unsigned int capabilityLoss :1; + unsigned int pciCantSleepFlag :1; + unsigned int pciCantSleepValid :1; + unsigned int logGraphicsClamp :1; + unsigned int darkWakeToSleepASAP :1; + unsigned int darkWakeMaintenance :1; + unsigned int darkWakeSleepService :1; + unsigned int darkWakePostTickle :1; + + unsigned int sleepTimerMaintenance :1; + unsigned int sleepToStandby :1; + unsigned int lowBatteryCondition :1; + unsigned int hibernateDisabled :1; + unsigned int hibernateRetry :1; + unsigned int wranglerTickleLatched :1; + unsigned int userIsActive :1; + unsigned int userWasActive :1; + + unsigned int displayIdleForDemandSleep :1; + unsigned int darkWakeHibernateError :1; + unsigned int thermalWarningState:1; + unsigned int toldPowerdCapWillChange :1; + unsigned int displayPowerOnRequested:1; + + uint8_t tasksSuspended; + uint32_t hibernateMode; + AbsoluteTime userActivityTime; + AbsoluteTime userActivityTime_prev; + uint32_t userActivityCount; + uint32_t userActivityAtSleep; + uint32_t lastSleepReason; + uint32_t fullToDarkReason; + uint32_t hibernateAborted; + uint8_t standbyNixed; + uint8_t resetTimers; + + enum FullWakeReason { + kFullWakeReasonNone = 0, + kFullWakeReasonLocalUser = 1, + kFullWakeReasonDisplayOn = 2, + fFullWakeReasonDisplayOnAndLocalUser = 3 + }; + uint32_t fullWakeReason; + +// Info for communicating system state changes to PMCPU + int32_t idxPMCPUClamshell; + int32_t idxPMCPULimitedPower; + + IOOptionBits platformSleepSupport; + uint32_t _debugWakeSeconds; + uint32_t _lastDebugWakeSeconds; + + queue_head_t aggressivesQueue; + thread_call_t aggressivesThreadCall; + OSData * aggressivesData; + + AbsoluteTime userBecameInactiveTime; + +// PCI top-level PM trace + IOService * pciHostBridgeDevice; + IOService * pciHostBridgeDriver; + + IONotifier * systemCapabilityNotifier; + + typedef struct { + uint32_t pid; + uint32_t refcount; + } PMNotifySuspendedStruct; + + uint32_t pmSuspendedCapacity; + uint32_t pmSuspendedSize; + PMNotifySuspendedStruct *pmSuspendedPIDS; + + OSSet * preventIdleSleepList; + OSSet * preventSystemSleepList; + + UInt32 _scheduledAlarms; + UInt32 _userScheduledAlarm; #if HIBERNATION - clock_sec_t _standbyTimerResetSeconds; + clock_sec_t _standbyTimerResetSeconds; #endif - volatile uint32_t swd_lock; /* Lock to access swd_buffer & and its header */ - void * swd_buffer; /* Memory allocated for dumping sleep/wake logs */ - uint32_t swd_flags; /* Flags defined in IOPMPrivate.h */ - void * swd_compressed_buffer; - void * swd_spindump_buffer; - thread_t notifierThread; - OSObject *notifierObject; + volatile uint32_t swd_lock;/* Lock to access swd_buffer & and its header */ + void * swd_buffer;/* Memory allocated for dumping sleep/wake logs */ + uint32_t swd_flags;/* Flags defined in IOPMPrivate.h */ + void * swd_compressed_buffer; + void * swd_spindump_buffer; + thread_t notifierThread; + OSObject *notifierObject; - IOBufferMemoryDescriptor *swd_memDesc; + IOBufferMemoryDescriptor *swd_memDesc; - // Wake Event Reporting - OSArray * _systemWakeEventsArray; - bool _acceptSystemWakeEvents; +// Wake Event Reporting + OSArray * _systemWakeEventsArray; + bool _acceptSystemWakeEvents; - int findSuspendedPID(uint32_t pid, uint32_t *outRefCount); + int findSuspendedPID(uint32_t pid, uint32_t *outRefCount); - // IOPMrootDomain internal sleep call - IOReturn privateSleepSystem( uint32_t sleepReason ); - void reportUserInput( void ); - void setDisableClamShellSleep( bool ); - bool checkSystemSleepAllowed( IOOptionBits options, - uint32_t sleepReason ); - bool checkSystemSleepEnabled( void ); - bool checkSystemCanSleep( uint32_t sleepReason ); - bool checkSystemCanSustainFullWake( void ); +// IOPMrootDomain internal sleep call + IOReturn privateSleepSystem( uint32_t sleepReason ); + void reportUserInput( void ); + void setDisableClamShellSleep( bool ); + bool checkSystemSleepAllowed( IOOptionBits options, + uint32_t sleepReason ); + bool checkSystemSleepEnabled( void ); + bool checkSystemCanSleep( uint32_t sleepReason ); + bool checkSystemCanSustainFullWake( void ); - void adjustPowerState( bool sleepASAP = false ); - void setQuickSpinDownTimeout( void ); - void restoreUserSpinDownTimeout( void ); + void adjustPowerState( bool sleepASAP = false ); + void setQuickSpinDownTimeout( void ); + void restoreUserSpinDownTimeout( void ); - bool shouldSleepOnClamshellClosed(void ); - void sendClientClamshellNotification( void ); + bool shouldSleepOnClamshellClosed(void ); + void sendClientClamshellNotification( void ); - // Inform PMCPU of changes to state like lid, AC vs. battery - void informCPUStateChange( uint32_t type, uint32_t value ); +// Inform PMCPU of changes to state like lid, AC vs. battery + void informCPUStateChange( uint32_t type, uint32_t value ); - void dispatchPowerEvent( uint32_t event, void * arg0, uint64_t arg1 ); - void handlePowerNotification( UInt32 msg ); + void dispatchPowerEvent( uint32_t event, void * arg0, uint64_t arg1 ); + void handlePowerNotification( UInt32 msg ); - IOReturn setPMSetting(const OSSymbol *, OSObject *); + IOReturn setPMSetting(const OSSymbol *, OSObject *); - void startIdleSleepTimer( uint32_t inSeconds ); - void cancelIdleSleepTimer( void ); - uint32_t getTimeToIdleSleep( void ); + void startIdleSleepTimer( uint32_t inSeconds ); + void cancelIdleSleepTimer( void ); + uint32_t getTimeToIdleSleep( void ); - IOReturn setAggressiveness( - unsigned long type, - unsigned long value, - IOOptionBits options ); + IOReturn setAggressiveness( + unsigned long type, + unsigned long value, + IOOptionBits options ); - void synchronizeAggressives( - queue_head_t * services, - const AggressivesRecord * array, - int count ); + void synchronizeAggressives( + queue_head_t * services, + const AggressivesRecord * array, + int count ); - void broadcastAggressives( - const AggressivesRecord * array, - int count ); + void broadcastAggressives( + const AggressivesRecord * array, + int count ); - IOReturn setPMAssertionUserLevels(IOPMDriverAssertionType); + IOReturn setPMAssertionUserLevels(IOPMDriverAssertionType); - void publishSleepWakeUUID( bool shouldPublish ); + void publishSleepWakeUUID( bool shouldPublish ); - void evaluatePolicy( int stimulus, uint32_t arg = 0 ); - void requestFullWake( FullWakeReason reason ); - void willEnterFullWake( void ); + void evaluatePolicy( int stimulus, uint32_t arg = 0 ); + void requestFullWake( FullWakeReason reason ); + void willEnterFullWake( void ); - void evaluateAssertions(IOPMDriverAssertionType newAssertions, - IOPMDriverAssertionType oldAssertions); + void evaluateAssertions(IOPMDriverAssertionType newAssertions, + IOPMDriverAssertionType oldAssertions); - void deregisterPMSettingObject( PMSettingObject * pmso ); + void deregisterPMSettingObject( PMSettingObject * pmso ); - uint32_t checkForValidDebugData(const char *fname, vfs_context_t *ctx, - void *tmpBuf, struct vnode **vp); - void getFailureData(thread_t *thread, char *failureStr, size_t strLen); - void saveFailureData2File(); - void tracePhase2String(uint32_t tracePhase, const char **phaseString, const char **description); - void sleepWakeDebugMemAlloc( ); - void sleepWakeDebugSpinDumpMemAlloc( ); - errno_t sleepWakeDebugSaveFile(const char *name, char *buf, int len); + uint32_t checkForValidDebugData(const char *fname, vfs_context_t *ctx, + void *tmpBuf, struct vnode **vp); + void getFailureData(thread_t *thread, char *failureStr, size_t strLen); + void saveFailureData2File(); + void tracePhase2String(uint32_t tracePhase, const char **phaseString, const char **description); + void sleepWakeDebugMemAlloc(); + void sleepWakeDebugSpinDumpMemAlloc(); + errno_t sleepWakeDebugSaveFile(const char *name, char *buf, int len); #if HIBERNATION - bool getSleepOption( const char * key, uint32_t * option ); - bool evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, - int phase, uint32_t * hibMode ); - void evaluateSystemSleepPolicyEarly( void ); - void evaluateSystemSleepPolicyFinal( void ); + bool getSleepOption( const char * key, uint32_t * option ); + bool evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, + int phase, uint32_t * hibMode ); + void evaluateSystemSleepPolicyEarly( void ); + void evaluateSystemSleepPolicyFinal( void ); #endif /* HIBERNATION */ - bool latchDisplayWranglerTickle( bool latch ); - void setDisplayPowerOn( uint32_t options ); + bool latchDisplayWranglerTickle( bool latch ); + void setDisplayPowerOn( uint32_t options ); - void acceptSystemWakeEvents( bool accept ); - void systemDidNotSleep( void ); - void preventTransitionToUserActive( bool prevent ); - void setThermalState(OSObject *value); - void copySleepPreventersList(OSArray **idleSleepList, OSArray **systemSleepList); + void acceptSystemWakeEvents( bool accept ); + void systemDidNotSleep( void ); + void preventTransitionToUserActive( bool prevent ); + void setThermalState(OSObject *value); + void copySleepPreventersList(OSArray **idleSleepList, OSArray **systemSleepList); #endif /* XNU_KERNEL_PRIVATE */ }; #ifdef XNU_KERNEL_PRIVATE -class IORootParent: public IOService +class IORootParent : public IOService { - OSDeclareFinalStructors(IORootParent) + OSDeclareFinalStructors(IORootParent) public: - static void initialize( void ); - virtual OSObject * copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; - bool start( IOService * nub ) APPLE_KEXT_OVERRIDE; - void shutDownSystem( void ); - void restartSystem( void ); - void sleepSystem( void ); - void dozeSystem( void ); - void sleepToDoze( void ); - void wakeSystem( void ); + static void initialize( void ); + virtual OSObject * copyProperty( const char * aKey ) const APPLE_KEXT_OVERRIDE; + bool start( IOService * nub ) APPLE_KEXT_OVERRIDE; + void shutDownSystem( void ); + void restartSystem( void ); + void sleepSystem( void ); + void dozeSystem( void ); + void sleepToDoze( void ); + void wakeSystem( void ); }; #endif /* XNU_KERNEL_PRIVATE */ diff --git a/iokit/IOKit/rtc/IORTCController.h b/iokit/IOKit/rtc/IORTCController.h index 159f3eb93..5a73917ec 100644 --- a/iokit/IOKit/rtc/IORTCController.h +++ b/iokit/IOKit/rtc/IORTCController.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2017 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,50 +36,50 @@ typedef void (*RTC_tick_handler)( IOService * ); -class IORTCController: public IOService +class IORTCController : public IOService { -OSDeclareAbstractStructors(IORTCController) + OSDeclareAbstractStructors(IORTCController) public: -virtual IOReturn getRealTimeClock ( UInt8 * currentTime, IOByteCount * length ) = 0; -virtual IOReturn setRealTimeClock ( UInt8 * newTime ) = 0; + virtual IOReturn getRealTimeClock( UInt8 * currentTime, IOByteCount * length ) = 0; + virtual IOReturn setRealTimeClock( UInt8 * newTime ) = 0; }; -class IORTC: public IOService +class IORTC : public IOService { -OSDeclareAbstractStructors(IORTC); + OSDeclareAbstractStructors(IORTC); protected: - /*! @var reserved - Reserved for future use. (Internal use only) */ - struct ExpansionData { }; - ExpansionData *iortc_reserved __unused; +/*! @var reserved + * Reserved for future use. (Internal use only) */ + struct ExpansionData { }; + ExpansionData *iortc_reserved __unused; public: - virtual long getGMTTimeOfDay( void ) = 0; - virtual void setGMTTimeOfDay( long secs ) = 0; + virtual long getGMTTimeOfDay( void ) = 0; + virtual void setGMTTimeOfDay( long secs ) = 0; - virtual void getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ); - virtual void setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ); + virtual void getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ); + virtual void setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ); - virtual void setAlarmEnable( IOOptionBits message ) = 0; + virtual void setAlarmEnable( IOOptionBits message ) = 0; - virtual IOReturn getMonotonicClockOffset( int64_t * usecs ); - virtual IOReturn setMonotonicClockOffset( int64_t usecs ); - virtual IOReturn getMonotonicClockAndTimestamp( uint64_t * usecs, uint64_t *mach_absolute_time ); + virtual IOReturn getMonotonicClockOffset( int64_t * usecs ); + virtual IOReturn setMonotonicClockOffset( int64_t usecs ); + virtual IOReturn getMonotonicClockAndTimestamp( uint64_t * usecs, uint64_t *mach_absolute_time ); - OSMetaClassDeclareReservedUnused(IORTC, 0); - OSMetaClassDeclareReservedUnused(IORTC, 1); - OSMetaClassDeclareReservedUnused(IORTC, 2); - OSMetaClassDeclareReservedUnused(IORTC, 3); - OSMetaClassDeclareReservedUnused(IORTC, 4); - OSMetaClassDeclareReservedUnused(IORTC, 5); - OSMetaClassDeclareReservedUnused(IORTC, 6); - OSMetaClassDeclareReservedUnused(IORTC, 7); + OSMetaClassDeclareReservedUnused(IORTC, 0); + OSMetaClassDeclareReservedUnused(IORTC, 1); + OSMetaClassDeclareReservedUnused(IORTC, 2); + OSMetaClassDeclareReservedUnused(IORTC, 3); + OSMetaClassDeclareReservedUnused(IORTC, 4); + OSMetaClassDeclareReservedUnused(IORTC, 5); + OSMetaClassDeclareReservedUnused(IORTC, 6); + OSMetaClassDeclareReservedUnused(IORTC, 7); }; #endif /* !_IORTCCONTROLLER_H */ diff --git a/iokit/IOKit/system.h b/iokit/IOKit/system.h index eee4d8132..6d620462f 100644 --- a/iokit/IOKit/system.h +++ b/iokit/IOKit/system.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef __IOKIT_SYSTEM_H @@ -58,16 +58,16 @@ __BEGIN_DECLS #include #endif -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include #include #include #include #endif /* KERNEL_PRIVATE */ -#ifndef _MISC_PROTOS_H_ -extern void _doprnt( const char *format, va_list *arg, - void (*lputc)(char), int radix ); +#ifndef _MISC_PROTOS_H_ +extern void _doprnt( const char *format, va_list *arg, + void (*lputc)(char), int radix ); #endif __END_DECLS diff --git a/iokit/IOKit/system_management/IOWatchDogTimer.h b/iokit/IOKit/system_management/IOWatchDogTimer.h index fe031ce42..0139a7b2c 100644 --- a/iokit/IOKit/system_management/IOWatchDogTimer.h +++ b/iokit/IOKit/system_management/IOWatchDogTimer.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,25 +33,25 @@ class IOWatchDogTimer : public IOService { - OSDeclareAbstractStructors(IOWatchDogTimer); - + OSDeclareAbstractStructors(IOWatchDogTimer); + protected: - IONotifier *notifier; - struct ExpansionData { }; - APPLE_KEXT_WSHADOW_PUSH; - ExpansionData *reserved; - APPLE_KEXT_WSHADOW_POP; - + IONotifier *notifier; + struct ExpansionData { }; + APPLE_KEXT_WSHADOW_PUSH; + ExpansionData *reserved; + APPLE_KEXT_WSHADOW_POP; + public: - virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; - virtual void stop(IOService *provider) APPLE_KEXT_OVERRIDE; - virtual IOReturn setProperties(OSObject *properties) APPLE_KEXT_OVERRIDE; - virtual void setWatchDogTimer(UInt32 timeOut) = 0; - - OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 0); - OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 1); - OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 2); - OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 3); + virtual bool start(IOService *provider) APPLE_KEXT_OVERRIDE; + virtual void stop(IOService *provider) APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties(OSObject *properties) APPLE_KEXT_OVERRIDE; + virtual void setWatchDogTimer(UInt32 timeOut) = 0; + + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 0); + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 1); + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 2); + OSMetaClassDeclareReservedUnused(IOWatchDogTimer, 3); }; #endif /* !_IOWATCHDOGTIMER_H */ diff --git a/iokit/Kernel/IOBufferMemoryDescriptor.cpp b/iokit/Kernel/IOBufferMemoryDescriptor.cpp index b3ff13dda..91aeca2a8 100644 --- a/iokit/Kernel/IOBufferMemoryDescriptor.cpp +++ b/iokit/Kernel/IOBufferMemoryDescriptor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -62,325 +62,333 @@ __END_DECLS /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -enum -{ - kInternalFlagPhysical = 0x00000001, - kInternalFlagPageSized = 0x00000002, - kInternalFlagPageAllocated = 0x00000004, - kInternalFlagInit = 0x00000008 +enum{ + kInternalFlagPhysical = 0x00000001, + kInternalFlagPageSized = 0x00000002, + kInternalFlagPageAllocated = 0x00000004, + kInternalFlagInit = 0x00000008 }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #define super IOGeneralMemoryDescriptor OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor, - IOGeneralMemoryDescriptor); + IOGeneralMemoryDescriptor); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static uintptr_t IOBMDPageProc(iopa_t * a) +static uintptr_t +IOBMDPageProc(iopa_t * a) { - kern_return_t kr; - vm_address_t vmaddr = 0; - int options = 0; // KMA_LOMEM; + kern_return_t kr; + vm_address_t vmaddr = 0; + int options = 0;// KMA_LOMEM; - kr = kernel_memory_allocate(kernel_map, &vmaddr, - page_size, 0, options, VM_KERN_MEMORY_IOKIT); + kr = kernel_memory_allocate(kernel_map, &vmaddr, + page_size, 0, options, VM_KERN_MEMORY_IOKIT); - if (KERN_SUCCESS != kr) vmaddr = 0; - else bzero((void *) vmaddr, page_size); + if (KERN_SUCCESS != kr) { + vmaddr = 0; + } else { + bzero((void *) vmaddr, page_size); + } - return ((uintptr_t) vmaddr); + return (uintptr_t) vmaddr; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifndef __LP64__ -bool IOBufferMemoryDescriptor::initWithOptions( - IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment, - task_t inTask) +bool +IOBufferMemoryDescriptor::initWithOptions( + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment, + task_t inTask) { - mach_vm_address_t physicalMask = 0; - return (initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask)); + mach_vm_address_t physicalMask = 0; + return initWithPhysicalMask(inTask, options, capacity, alignment, physicalMask); } #endif /* !__LP64__ */ -bool IOBufferMemoryDescriptor::initWithPhysicalMask( - task_t inTask, - IOOptionBits options, - mach_vm_size_t capacity, - mach_vm_address_t alignment, - mach_vm_address_t physicalMask) +bool +IOBufferMemoryDescriptor::initWithPhysicalMask( + task_t inTask, + IOOptionBits options, + mach_vm_size_t capacity, + mach_vm_address_t alignment, + mach_vm_address_t physicalMask) { - task_t mapTask = NULL; - vm_map_t vmmap = NULL; - mach_vm_address_t highestMask = 0; - IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; - IODMAMapSpecification mapSpec; - bool mapped = false; - bool needZero; - - if (!capacity) return false; - - _options = options; - _capacity = capacity; - _internalFlags = 0; - _internalReserved = 0; - _buffer = 0; - - _ranges.v64 = IONew(IOAddressRange, 1); - if (!_ranges.v64) - return (false); - _ranges.v64->address = 0; - _ranges.v64->length = 0; - // make sure super::free doesn't dealloc _ranges before super::init - _flags = kIOMemoryAsReference; - - // Grab IOMD bits from the Buffer MD options - iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); - - if (!(kIOMemoryMapperNone & options)) - { - IOMapper::checkForSystemMapper(); - mapped = (0 != IOMapper::gSystem); - } - needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); - - if (physicalMask && (alignment <= 1)) - { - alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); - highestMask = (physicalMask | alignment); - alignment++; - if (alignment < page_size) - alignment = page_size; - } - - if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) - alignment = page_size; - - if (alignment >= page_size) - capacity = round_page(capacity); - - if (alignment > page_size) - options |= kIOMemoryPhysicallyContiguous; - - _alignment = alignment; - - if ((capacity + alignment) < _capacity) return (false); - - if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) - return false; - - bzero(&mapSpec, sizeof(mapSpec)); - mapSpec.alignment = _alignment; - mapSpec.numAddressBits = 64; - if (highestMask && mapped) - { - if (highestMask <= 0xFFFFFFFF) - mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); - else - mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); - highestMask = 0; - } - - // set memory entry cache mode, pageable, purgeable - iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; - if (options & kIOMemoryPageable) - { - iomdOptions |= kIOMemoryBufferPageable; - if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable; - } - else - { - vmmap = kernel_map; - - // Buffer shouldn't auto prepare they should be prepared explicitly - // But it never was enforced so what are you going to do? - iomdOptions |= kIOMemoryAutoPrepare; - - /* Allocate a wired-down buffer inside kernel space. */ - - bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); - - if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) - { - contig |= (!mapped); - contig |= (0 != (kIOMemoryMapperNone & options)); -#if 0 - // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now - contig |= true; -#endif + task_t mapTask = NULL; + vm_map_t vmmap = NULL; + mach_vm_address_t highestMask = 0; + IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference; + IODMAMapSpecification mapSpec; + bool mapped = false; + bool needZero; + + if (!capacity) { + return false; } - if (contig || highestMask || (alignment > page_size)) - { - _internalFlags |= kInternalFlagPhysical; - if (highestMask) - { - _internalFlags |= kInternalFlagPageSized; - capacity = round_page(capacity); - } - _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( - capacity, highestMask, alignment, contig); - } - else if (needZero - && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) - { - _internalFlags |= kInternalFlagPageAllocated; - needZero = false; - _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); - if (_buffer) - { - IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); -#if IOALLOCDEBUG - OSAddAtomic(capacity, &debug_iomalloc_size); -#endif - } + _options = options; + _capacity = capacity; + _internalFlags = 0; + _internalReserved = 0; + _buffer = 0; + + _ranges.v64 = IONew(IOAddressRange, 1); + if (!_ranges.v64) { + return false; + } + _ranges.v64->address = 0; + _ranges.v64->length = 0; + // make sure super::free doesn't dealloc _ranges before super::init + _flags = kIOMemoryAsReference; + + // Grab IOMD bits from the Buffer MD options + iomdOptions |= (options & kIOBufferDescriptorMemoryFlags); + + if (!(kIOMemoryMapperNone & options)) { + IOMapper::checkForSystemMapper(); + mapped = (0 != IOMapper::gSystem); + } + needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options))); + + if (physicalMask && (alignment <= 1)) { + alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1)); + highestMask = (physicalMask | alignment); + alignment++; + if (alignment < page_size) { + alignment = page_size; + } + } + + if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size)) { + alignment = page_size; } - else if (alignment > 1) - { - _buffer = IOMallocAligned(capacity, alignment); + + if (alignment >= page_size) { + capacity = round_page(capacity); + } + + if (alignment > page_size) { + options |= kIOMemoryPhysicallyContiguous; + } + + _alignment = alignment; + + if ((capacity + alignment) < _capacity) { + return false; } - else - { - _buffer = IOMalloc(capacity); + + if ((inTask != kernel_task) && !(options & kIOMemoryPageable)) { + return false; } - if (!_buffer) - { - return false; + + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.alignment = _alignment; + mapSpec.numAddressBits = 64; + if (highestMask && mapped) { + if (highestMask <= 0xFFFFFFFF) { + mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask)); + } else { + mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32))); + } + highestMask = 0; } - if (needZero) bzero(_buffer, capacity); - } - if( (options & (kIOMemoryPageable | kIOMapCacheMask))) { - vm_size_t size = round_page(capacity); + // set memory entry cache mode, pageable, purgeable + iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift; + if (options & kIOMemoryPageable) { + iomdOptions |= kIOMemoryBufferPageable; + if (options & kIOMemoryPurgeable) { + iomdOptions |= kIOMemoryBufferPurgeable; + } + } else { + vmmap = kernel_map; + + // Buffer shouldn't auto prepare they should be prepared explicitly + // But it never was enforced so what are you going to do? + iomdOptions |= kIOMemoryAutoPrepare; + + /* Allocate a wired-down buffer inside kernel space. */ - // initWithOptions will create memory entry - iomdOptions |= kIOMemoryPersistent; + bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous)); - if( options & kIOMemoryPageable) { + if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous))) { + contig |= (!mapped); + contig |= (0 != (kIOMemoryMapperNone & options)); +#if 0 + // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now + contig |= true; +#endif + } + + if (contig || highestMask || (alignment > page_size)) { + _internalFlags |= kInternalFlagPhysical; + if (highestMask) { + _internalFlags |= kInternalFlagPageSized; + capacity = round_page(capacity); + } + _buffer = (void *) IOKernelAllocateWithPhysicalRestrict( + capacity, highestMask, alignment, contig); + } else if (needZero + && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes))) { + _internalFlags |= kInternalFlagPageAllocated; + needZero = false; + _buffer = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment); + if (_buffer) { + IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity); #if IOALLOCDEBUG - OSAddAtomicLong(size, &debug_iomallocpageable_size); + OSAddAtomic(capacity, &debug_iomalloc_size); #endif - mapTask = inTask; - if (NULL == inTask) - inTask = kernel_task; + } + } else if (alignment > 1) { + _buffer = IOMallocAligned(capacity, alignment); + } else { + _buffer = IOMalloc(capacity); + } + if (!_buffer) { + return false; + } + if (needZero) { + bzero(_buffer, capacity); + } } - else if (options & kIOMapCacheMask) - { - // Prefetch each page to put entries into the pmap - volatile UInt8 * startAddr = (UInt8 *)_buffer; - volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; - while (startAddr < endAddr) - { - UInt8 dummyVar = *startAddr; - (void) dummyVar; - startAddr += page_size; - } + if ((options & (kIOMemoryPageable | kIOMapCacheMask))) { + vm_size_t size = round_page(capacity); + + // initWithOptions will create memory entry + iomdOptions |= kIOMemoryPersistent; + + if (options & kIOMemoryPageable) { +#if IOALLOCDEBUG + OSAddAtomicLong(size, &debug_iomallocpageable_size); +#endif + mapTask = inTask; + if (NULL == inTask) { + inTask = kernel_task; + } + } else if (options & kIOMapCacheMask) { + // Prefetch each page to put entries into the pmap + volatile UInt8 * startAddr = (UInt8 *)_buffer; + volatile UInt8 * endAddr = (UInt8 *)_buffer + capacity; + + while (startAddr < endAddr) { + UInt8 dummyVar = *startAddr; + (void) dummyVar; + startAddr += page_size; + } + } } - } - _ranges.v64->address = (mach_vm_address_t) _buffer;; - _ranges.v64->length = _capacity; + _ranges.v64->address = (mach_vm_address_t) _buffer;; + _ranges.v64->length = _capacity; - if (!super::initWithOptions(_ranges.v64, 1, 0, - inTask, iomdOptions, /* System mapper */ 0)) - return false; + if (!super::initWithOptions(_ranges.v64, 1, 0, + inTask, iomdOptions, /* System mapper */ 0)) { + return false; + } - _internalFlags |= kInternalFlagInit; + _internalFlags |= kInternalFlagInit; #if IOTRACKING - if (!(options & kIOMemoryPageable)) trackingAccumSize(capacity); + if (!(options & kIOMemoryPageable)) { + trackingAccumSize(capacity); + } #endif /* IOTRACKING */ - // give any system mapper the allocation params - if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, - &mapSpec, sizeof(mapSpec))) - return false; - - if (mapTask) - { - if (!reserved) { - reserved = IONew( ExpansionData, 1 ); - if( !reserved) - return( false ); - } - reserved->map = createMappingInTask(mapTask, 0, - kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); - if (!reserved->map) - { - _buffer = 0; - return( false ); - } - release(); // map took a retain on this - reserved->map->retain(); - removeMapping(reserved->map); - mach_vm_address_t buffer = reserved->map->getAddress(); - _buffer = (void *) buffer; - if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) - _ranges.v64->address = buffer; - } - - setLength(_capacity); - - return true; + // give any system mapper the allocation params + if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, + &mapSpec, sizeof(mapSpec))) { + return false; + } + + if (mapTask) { + if (!reserved) { + reserved = IONew( ExpansionData, 1 ); + if (!reserved) { + return false; + } + } + reserved->map = createMappingInTask(mapTask, 0, + kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0); + if (!reserved->map) { + _buffer = 0; + return false; + } + release(); // map took a retain on this + reserved->map->retain(); + removeMapping(reserved->map); + mach_vm_address_t buffer = reserved->map->getAddress(); + _buffer = (void *) buffer; + if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions)) { + _ranges.v64->address = buffer; + } + } + + setLength(_capacity); + + return true; } -IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithOptions( - task_t inTask, - IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment) +IOBufferMemoryDescriptor * +IOBufferMemoryDescriptor::inTaskWithOptions( + task_t inTask, + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; - - if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { - me->release(); - me = 0; - } - return me; + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithPhysicalMask(inTask, options, capacity, alignment, 0)) { + me->release(); + me = 0; + } + return me; } -IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::inTaskWithPhysicalMask( - task_t inTask, - IOOptionBits options, - mach_vm_size_t capacity, - mach_vm_address_t physicalMask) +IOBufferMemoryDescriptor * +IOBufferMemoryDescriptor::inTaskWithPhysicalMask( + task_t inTask, + IOOptionBits options, + mach_vm_size_t capacity, + mach_vm_address_t physicalMask) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; - - if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) - { - me->release(); - me = 0; - } - return me; + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithPhysicalMask(inTask, options, capacity, 1, physicalMask)) { + me->release(); + me = 0; + } + return me; } #ifndef __LP64__ -bool IOBufferMemoryDescriptor::initWithOptions( - IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment) +bool +IOBufferMemoryDescriptor::initWithOptions( + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment) { - return (initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0)); + return initWithPhysicalMask(kernel_task, options, capacity, alignment, (mach_vm_address_t)0); } #endif /* !__LP64__ */ -IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( - IOOptionBits options, - vm_size_t capacity, - vm_offset_t alignment) +IOBufferMemoryDescriptor * +IOBufferMemoryDescriptor::withOptions( + IOOptionBits options, + vm_size_t capacity, + vm_offset_t alignment) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; - - if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { - me->release(); - me = 0; - } - return me; + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithPhysicalMask(kernel_task, options, capacity, alignment, 0)) { + me->release(); + me = 0; + } + return me; } @@ -392,13 +400,13 @@ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withOptions( */ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, - IODirection inDirection, - bool inContiguous) + IODirection inDirection, + bool inContiguous) { - return( IOBufferMemoryDescriptor::withOptions( - inDirection | kIOMemoryUnshared - | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), - inCapacity, inContiguous ? inCapacity : 1 )); + return IOBufferMemoryDescriptor::withOptions( + inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inCapacity, inContiguous ? inCapacity : 1 ); } #ifndef __LP64__ @@ -408,23 +416,26 @@ IOBufferMemoryDescriptor::withCapacity(vm_size_t inCapacity, * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). * The descriptor's length and capacity are set to the input buffer's size. */ -bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, - vm_size_t inLength, - IODirection inDirection, - bool inContiguous) +bool +IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, + vm_size_t inLength, + IODirection inDirection, + bool inContiguous) { - if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared - | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), - inLength, inLength, (mach_vm_address_t)0)) - return false; + if (!initWithPhysicalMask(kernel_task, inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inLength, inLength, (mach_vm_address_t)0)) { + return false; + } - // start out with no data - setLength(0); + // start out with no data + setLength(0); - if (!appendBytes(inBytes, inLength)) - return false; + if (!appendBytes(inBytes, inLength)) { + return false; + } - return true; + return true; } #endif /* !__LP64__ */ @@ -436,33 +447,30 @@ bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes, */ IOBufferMemoryDescriptor * IOBufferMemoryDescriptor::withBytes(const void * inBytes, - vm_size_t inLength, - IODirection inDirection, - bool inContiguous) + vm_size_t inLength, + IODirection inDirection, + bool inContiguous) { - IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; - - if (me && !me->initWithPhysicalMask( - kernel_task, inDirection | kIOMemoryUnshared - | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), - inLength, inLength, 0 )) - { - me->release(); - me = 0; - } - - if (me) - { - // start out with no data - me->setLength(0); + IOBufferMemoryDescriptor *me = new IOBufferMemoryDescriptor; + + if (me && !me->initWithPhysicalMask( + kernel_task, inDirection | kIOMemoryUnshared + | (inContiguous ? kIOMemoryPhysicallyContiguous : 0), + inLength, inLength, 0 )) { + me->release(); + me = 0; + } + + if (me) { + // start out with no data + me->setLength(0); - if (!me->appendBytes(inBytes, inLength)) - { - me->release(); - me = 0; + if (!me->appendBytes(inBytes, inLength)) { + me->release(); + me = 0; + } } - } - return me; + return me; } /* @@ -470,78 +478,74 @@ IOBufferMemoryDescriptor::withBytes(const void * inBytes, * * Free resources */ -void IOBufferMemoryDescriptor::free() +void +IOBufferMemoryDescriptor::free() { - // Cache all of the relevant information on the stack for use - // after we call super::free()! - IOOptionBits flags = _flags; - IOOptionBits internalFlags = _internalFlags; - IOOptionBits options = _options; - vm_size_t size = _capacity; - void * buffer = _buffer; - IOMemoryMap * map = 0; - IOAddressRange * range = _ranges.v64; - vm_offset_t alignment = _alignment; - - if (alignment >= page_size) - size = round_page(size); - - if (reserved) - { - map = reserved->map; - IODelete( reserved, ExpansionData, 1 ); - if (map) - map->release(); - } - - if ((options & kIOMemoryPageable) - || (kInternalFlagPageSized & internalFlags)) size = round_page(size); + // Cache all of the relevant information on the stack for use + // after we call super::free()! + IOOptionBits flags = _flags; + IOOptionBits internalFlags = _internalFlags; + IOOptionBits options = _options; + vm_size_t size = _capacity; + void * buffer = _buffer; + IOMemoryMap * map = 0; + IOAddressRange * range = _ranges.v64; + vm_offset_t alignment = _alignment; + + if (alignment >= page_size) { + size = round_page(size); + } + + if (reserved) { + map = reserved->map; + IODelete( reserved, ExpansionData, 1 ); + if (map) { + map->release(); + } + } + + if ((options & kIOMemoryPageable) + || (kInternalFlagPageSized & internalFlags)) { + size = round_page(size); + } #if IOTRACKING - if (!(options & kIOMemoryPageable) - && buffer - && (kInternalFlagInit & _internalFlags)) trackingAccumSize(-size); + if (!(options & kIOMemoryPageable) + && buffer + && (kInternalFlagInit & _internalFlags)) { + trackingAccumSize(-size); + } #endif /* IOTRACKING */ - /* super::free may unwire - deallocate buffer afterwards */ - super::free(); + /* super::free may unwire - deallocate buffer afterwards */ + super::free(); - if (options & kIOMemoryPageable) - { + if (options & kIOMemoryPageable) { #if IOALLOCDEBUG - OSAddAtomicLong(-size, &debug_iomallocpageable_size); + OSAddAtomicLong(-size, &debug_iomallocpageable_size); #endif - } - else if (buffer) - { - if (kInternalFlagPhysical & internalFlags) - { - IOKernelFreePhysical((mach_vm_address_t) buffer, size); - } - else if (kInternalFlagPageAllocated & internalFlags) - { - uintptr_t page; - page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); - if (page) - { - kmem_free(kernel_map, page, page_size); - } + } else if (buffer) { + if (kInternalFlagPhysical & internalFlags) { + IOKernelFreePhysical((mach_vm_address_t) buffer, size); + } else if (kInternalFlagPageAllocated & internalFlags) { + uintptr_t page; + page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size); + if (page) { + kmem_free(kernel_map, page, page_size); + } #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomic(-size, &debug_iomalloc_size); #endif - IOStatisticsAlloc(kIOStatisticsFreeAligned, size); + IOStatisticsAlloc(kIOStatisticsFreeAligned, size); + } else if (alignment > 1) { + IOFreeAligned(buffer, size); + } else { + IOFree(buffer, size); + } } - else if (alignment > 1) - { - IOFreeAligned(buffer, size); + if (range && (kIOMemoryAsReference & flags)) { + IODelete(range, IOAddressRange, 1); } - else - { - IOFree(buffer, size); - } - } - if (range && (kIOMemoryAsReference & flags)) - IODelete(range, IOAddressRange, 1); } /* @@ -549,9 +553,10 @@ void IOBufferMemoryDescriptor::free() * * Get the buffer capacity */ -vm_size_t IOBufferMemoryDescriptor::getCapacity() const +vm_size_t +IOBufferMemoryDescriptor::getCapacity() const { - return _capacity; + return _capacity; } /* @@ -564,13 +569,16 @@ vm_size_t IOBufferMemoryDescriptor::getCapacity() const * can reuse an existing one, even for different transfer sizes). Note * that the specified length must not exceed the capacity of the buffer. */ -void IOBufferMemoryDescriptor::setLength(vm_size_t length) +void +IOBufferMemoryDescriptor::setLength(vm_size_t length) { - assert(length <= _capacity); - if (length > _capacity) return; + assert(length <= _capacity); + if (length > _capacity) { + return; + } - _length = length; - _ranges.v64->length = length; + _length = length; + _ranges.v64->length = length; } /* @@ -580,11 +588,12 @@ void IOBufferMemoryDescriptor::setLength(vm_size_t length) * the descriptor's transfer direction. This eliminates the need to destroy * and create new buffers when different transfer directions are needed. */ -void IOBufferMemoryDescriptor::setDirection(IODirection direction) +void +IOBufferMemoryDescriptor::setDirection(IODirection direction) { - _flags = (_flags & ~kIOMemoryDirectionMask) | direction; + _flags = (_flags & ~kIOMemoryDirectionMask) | direction; #ifndef __LP64__ - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ } @@ -598,22 +607,23 @@ void IOBufferMemoryDescriptor::setDirection(IODirection direction) bool IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) { - vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); - IOByteCount offset; + vm_size_t actualBytesToCopy = min(withLength, _capacity - _length); + IOByteCount offset; - assert(_length <= _capacity); + assert(_length <= _capacity); - offset = _length; - _length += actualBytesToCopy; - _ranges.v64->length += actualBytesToCopy; + offset = _length; + _length += actualBytesToCopy; + _ranges.v64->length += actualBytesToCopy; - if (_task == kernel_task) - bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), - actualBytesToCopy); - else - writeBytes(offset, bytes, actualBytesToCopy); + if (_task == kernel_task) { + bcopy(/* from */ bytes, (void *)(_ranges.v64->address + offset), + actualBytesToCopy); + } else { + writeBytes(offset, bytes, actualBytesToCopy); + } - return true; + return true; } /* @@ -621,12 +631,14 @@ IOBufferMemoryDescriptor::appendBytes(const void * bytes, vm_size_t withLength) * * Return the virtual address of the beginning of the buffer */ -void * IOBufferMemoryDescriptor::getBytesNoCopy() +void * +IOBufferMemoryDescriptor::getBytesNoCopy() { - if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) - return _buffer; - else - return (void *)_ranges.v64->address; + if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { + return _buffer; + } else { + return (void *)_ranges.v64->address; + } } @@ -638,30 +650,36 @@ void * IOBufferMemoryDescriptor::getBytesNoCopy() void * IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start, vm_size_t withLength) { - IOVirtualAddress address; + IOVirtualAddress address; - if ((start + withLength) < start) return 0; + if ((start + withLength) < start) { + return 0; + } - if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) - address = (IOVirtualAddress) _buffer; - else - address = _ranges.v64->address; + if (kIOMemoryTypePhysical64 == (_flags & kIOMemoryTypeMask)) { + address = (IOVirtualAddress) _buffer; + } else { + address = _ranges.v64->address; + } - if (start < _length && (start + withLength) <= _length) - return (void *)(address + start); - return 0; + if (start < _length && (start + withLength) <= _length) { + return (void *)(address + start); + } + return 0; } #ifndef __LP64__ -void * IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) +void * +IOBufferMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) { - void * bytes = getBytesNoCopy(offset, 0); - - if (bytes && lengthOfSegment) - *lengthOfSegment = _length - offset; + void * bytes = getBytesNoCopy(offset, 0); + + if (bytes && lengthOfSegment) { + *lengthOfSegment = _length - offset; + } - return bytes; + return bytes; } #endif /* !__LP64__ */ diff --git a/iokit/Kernel/IOCPU.cpp b/iokit/Kernel/IOCPU.cpp index 8ad8d76cd..88ac5d1ff 100644 --- a/iokit/Kernel/IOCPU.cpp +++ b/iokit/Kernel/IOCPU.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1999-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -51,19 +51,18 @@ extern "C" void sched_override_recommended_cores_for_sleep(void); extern "C" void sched_restore_recommended_cores_after_sleep(void); typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3, - const char * name); - -struct iocpu_platform_action_entry -{ - queue_chain_t link; - iocpu_platform_action_t action; - int32_t priority; - const char * name; - void * refcon0; - void * refcon1; - boolean_t callout_in_progress; - struct iocpu_platform_action_entry * alloc_list; + void * param1, void * param2, void * param3, + const char * name); + +struct iocpu_platform_action_entry { + queue_chain_t link; + iocpu_platform_action_t action; + int32_t priority; + const char * name; + void * refcon0; + void * refcon1; + boolean_t callout_in_progress; + struct iocpu_platform_action_entry * alloc_list; }; typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t; @@ -74,199 +73,202 @@ static OSArray *gIOCPUs; static const OSSymbol *gIOCPUStateKey; static OSString *gIOCPUStateNames[kIOCPUStateCount]; -enum -{ - kQueueSleep = 0, - kQueueWake = 1, - kQueueQuiesce = 2, - kQueueActive = 3, - kQueueHaltRestart = 4, - kQueuePanic = 5, - kQueueCount = 6 +enum{ + kQueueSleep = 0, + kQueueWake = 1, + kQueueQuiesce = 2, + kQueueActive = 3, + kQueueHaltRestart = 4, + kQueuePanic = 5, + kQueueCount = 6 }; -const OSSymbol * gIOPlatformSleepActionKey; -const OSSymbol * gIOPlatformWakeActionKey; -const OSSymbol * gIOPlatformQuiesceActionKey; -const OSSymbol * gIOPlatformActiveActionKey; -const OSSymbol * gIOPlatformHaltRestartActionKey; -const OSSymbol * gIOPlatformPanicActionKey; +const OSSymbol * gIOPlatformSleepActionKey; +const OSSymbol * gIOPlatformWakeActionKey; +const OSSymbol * gIOPlatformQuiesceActionKey; +const OSSymbol * gIOPlatformActiveActionKey; +const OSSymbol * gIOPlatformHaltRestartActionKey; +const OSSymbol * gIOPlatformPanicActionKey; -static queue_head_t gActionQueues[kQueueCount]; -static const OSSymbol * gActionSymbols[kQueueCount]; +static queue_head_t gActionQueues[kQueueCount]; +static const OSSymbol * gActionSymbols[kQueueCount]; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static void iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry) { - iocpu_platform_action_entry_t * next; + iocpu_platform_action_entry_t * next; - queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) - { - if (next->priority > entry->priority) + queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) { - queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link); - return; + if (next->priority > entry->priority) { + queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link); + return; + } } - } - queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail + queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail } static void iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry) { - remque(&entry->link); + remque(&entry->link); } static kern_return_t iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority, - void * param1, void * param2, void * param3, boolean_t allow_nested_callouts) + void * param1, void * param2, void * param3, boolean_t allow_nested_callouts) { - kern_return_t ret = KERN_SUCCESS; - kern_return_t result = KERN_SUCCESS; - iocpu_platform_action_entry_t * next; + kern_return_t ret = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; + iocpu_platform_action_entry_t * next; - queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) - { - uint32_t pri = (next->priority < 0) ? -next->priority : next->priority; - if ((pri >= first_priority) && (pri <= last_priority)) + queue_iterate(queue, next, iocpu_platform_action_entry_t *, link) { - //kprintf("[%p]", next->action); - if (!allow_nested_callouts && !next->callout_in_progress) - { - next->callout_in_progress = TRUE; - ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); - next->callout_in_progress = FALSE; - } - else if (allow_nested_callouts) - { - ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); - } - } - if (KERN_SUCCESS == result) - result = ret; - } - return (result); + uint32_t pri = (next->priority < 0) ? -next->priority : next->priority; + if ((pri >= first_priority) && (pri <= last_priority)) { + //kprintf("[%p]", next->action); + if (!allow_nested_callouts && !next->callout_in_progress) { + next->callout_in_progress = TRUE; + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + next->callout_in_progress = FALSE; + } else if (allow_nested_callouts) { + ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name); + } + } + if (KERN_SUCCESS == result) { + result = ret; + } + } + return result; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern "C" kern_return_t +extern "C" kern_return_t IOCPURunPlatformQuiesceActions(void) { - return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1, - NULL, NULL, NULL, TRUE)); + return iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U - 1, + NULL, NULL, NULL, TRUE); } -extern "C" kern_return_t +extern "C" kern_return_t IOCPURunPlatformActiveActions(void) { - return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1, - NULL, NULL, NULL, TRUE)); + return iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U - 1, + NULL, NULL, NULL, TRUE); } -extern "C" kern_return_t +extern "C" kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message) { - if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady); - return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1, - (void *)(uintptr_t) message, NULL, NULL, TRUE)); + if (!gActionQueues[kQueueHaltRestart].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U - 1, + (void *)(uintptr_t) message, NULL, NULL, TRUE); } -extern "C" kern_return_t +extern "C" kern_return_t IOCPURunPlatformPanicActions(uint32_t message) { - // Don't allow nested calls of panic actions - if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady); - return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1, - (void *)(uintptr_t) message, NULL, NULL, FALSE)); + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, + (void *)(uintptr_t) message, NULL, NULL, FALSE); } extern "C" kern_return_t IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len) { - PE_panic_save_context_t context = { - .psc_buffer = addr, - .psc_offset = offset, - .psc_length = len - }; - - // Don't allow nested calls of panic actions - if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady); - return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1, - (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE)); + PE_panic_save_context_t context = { + .psc_buffer = addr, + .psc_offset = offset, + .psc_length = len + }; + // Don't allow nested calls of panic actions + if (!gActionQueues[kQueuePanic].next) { + return kIOReturnNotReady; + } + return iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U - 1, + (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static kern_return_t +static kern_return_t IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority, - void * param1, void * param2, void * param3, - const char * service_name) + void * param1, void * param2, void * param3, + const char * service_name) { - IOReturn ret; - IOService * service = (IOService *) refcon0; - const OSSymbol * function = (const OSSymbol *) refcon1; + IOReturn ret; + IOService * service = (IOService *) refcon0; + const OSSymbol * function = (const OSSymbol *) refcon1; - kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name); + kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name); - ret = service->callPlatformFunction(function, false, - (void *)(uintptr_t) priority, param1, param2, param3); + ret = service->callPlatformFunction(function, false, + (void *)(uintptr_t) priority, param1, param2, param3); - return (ret); + return ret; } static void IOInstallServicePlatformAction(IOService * service, uint32_t qidx) { - iocpu_platform_action_entry_t * entry; - OSNumber * num; - uint32_t priority; - const OSSymbol * key = gActionSymbols[qidx]; - queue_head_t * queue = &gActionQueues[qidx]; - bool reverse; - bool uniq; - - num = OSDynamicCast(OSNumber, service->getProperty(key)); - if (!num) return; - - reverse = false; - uniq = false; - switch (qidx) - { + iocpu_platform_action_entry_t * entry; + OSNumber * num; + uint32_t priority; + const OSSymbol * key = gActionSymbols[qidx]; + queue_head_t * queue = &gActionQueues[qidx]; + bool reverse; + bool uniq; + + num = OSDynamicCast(OSNumber, service->getProperty(key)); + if (!num) { + return; + } + + reverse = false; + uniq = false; + switch (qidx) { case kQueueWake: case kQueueActive: - reverse = true; - break; + reverse = true; + break; case kQueueHaltRestart: case kQueuePanic: - uniq = true; - break; - } - if (uniq) - { - queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link) - { - if (service == entry->refcon0) return; + uniq = true; + break; + } + if (uniq) { + queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link) + { + if (service == entry->refcon0) { + return; + } + } } - } - entry = IONew(iocpu_platform_action_entry_t, 1); - entry->action = &IOServicePlatformAction; - entry->name = service->getName(); - priority = num->unsigned32BitValue(); - if (reverse) - entry->priority = -priority; - else - entry->priority = priority; - entry->refcon0 = service; - entry->refcon1 = (void *) key; - entry->callout_in_progress = FALSE; + entry = IONew(iocpu_platform_action_entry_t, 1); + entry->action = &IOServicePlatformAction; + entry->name = service->getName(); + priority = num->unsigned32BitValue(); + if (reverse) { + entry->priority = -priority; + } else { + entry->priority = priority; + } + entry->refcon0 = service; + entry->refcon1 = (void *) key; + entry->callout_in_progress = FALSE; - iocpu_add_platform_action(queue, entry); + iocpu_add_platform_action(queue, entry); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -274,169 +276,181 @@ IOInstallServicePlatformAction(IOService * service, uint32_t qidx) void IOCPUInitialize(void) { - gIOCPUsLock = IOLockAlloc(); - gIOCPUs = OSArray::withCapacity(1); - - for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) - { - queue_init(&gActionQueues[qidx]); - } - - gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); - - gIOCPUStateNames[kIOCPUStateUnregistered] = - OSString::withCStringNoCopy("Unregistered"); - gIOCPUStateNames[kIOCPUStateUninitalized] = - OSString::withCStringNoCopy("Uninitalized"); - gIOCPUStateNames[kIOCPUStateStopped] = - OSString::withCStringNoCopy("Stopped"); - gIOCPUStateNames[kIOCPUStateRunning] = - OSString::withCStringNoCopy("Running"); - - gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep] - = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey); - gIOPlatformWakeActionKey = gActionSymbols[kQueueWake] - = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey); - gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce] - = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey); - gIOPlatformActiveActionKey = gActionSymbols[kQueueActive] - = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey); - gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart] - = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey); - gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic] - = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey); + gIOCPUsLock = IOLockAlloc(); + gIOCPUs = OSArray::withCapacity(1); + + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { + queue_init(&gActionQueues[qidx]); + } + + gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState"); + + gIOCPUStateNames[kIOCPUStateUnregistered] = + OSString::withCStringNoCopy("Unregistered"); + gIOCPUStateNames[kIOCPUStateUninitalized] = + OSString::withCStringNoCopy("Uninitalized"); + gIOCPUStateNames[kIOCPUStateStopped] = + OSString::withCStringNoCopy("Stopped"); + gIOCPUStateNames[kIOCPUStateRunning] = + OSString::withCStringNoCopy("Running"); + + gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep] + = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey); + gIOPlatformWakeActionKey = gActionSymbols[kQueueWake] + = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey); + gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce] + = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey); + gIOPlatformActiveActionKey = gActionSymbols[kQueueActive] + = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey); + gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart] + = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey); + gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic] + = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey); } IOReturn IOInstallServicePlatformActions(IOService * service) { - IOLockLock(gIOCPUsLock); + IOLockLock(gIOCPUsLock); - IOInstallServicePlatformAction(service, kQueueHaltRestart); - IOInstallServicePlatformAction(service, kQueuePanic); + IOInstallServicePlatformAction(service, kQueueHaltRestart); + IOInstallServicePlatformAction(service, kQueuePanic); - IOLockUnlock(gIOCPUsLock); + IOLockUnlock(gIOCPUsLock); - return (kIOReturnSuccess); + return kIOReturnSuccess; } IOReturn IORemoveServicePlatformActions(IOService * service) { - iocpu_platform_action_entry_t * entry; - iocpu_platform_action_entry_t * next; + iocpu_platform_action_entry_t * entry; + iocpu_platform_action_entry_t * next; - IOLockLock(gIOCPUsLock); + IOLockLock(gIOCPUsLock); - for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) - { - next = (typeof(entry)) queue_first(&gActionQueues[qidx]); - while (!queue_end(&gActionQueues[qidx], &next->link)) - { - entry = next; - next = (typeof(entry)) queue_next(&entry->link); - if (service == entry->refcon0) - { - iocpu_remove_platform_action(entry); - IODelete(entry, iocpu_platform_action_entry_t, 1); - } + for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++) { + next = (typeof(entry))queue_first(&gActionQueues[qidx]); + while (!queue_end(&gActionQueues[qidx], &next->link)) { + entry = next; + next = (typeof(entry))queue_next(&entry->link); + if (service == entry->refcon0) { + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); + } + } } - } - IOLockUnlock(gIOCPUsLock); + IOLockUnlock(gIOCPUsLock); - return (kIOReturnSuccess); + return kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -kern_return_t PE_cpu_start(cpu_id_t target, - vm_offset_t start_paddr, vm_offset_t arg_paddr) +kern_return_t +PE_cpu_start(cpu_id_t target, + vm_offset_t start_paddr, vm_offset_t arg_paddr) { - IOCPU *targetCPU = (IOCPU *)target; - - if (targetCPU == NULL) return KERN_FAILURE; - return targetCPU->startCPU(start_paddr, arg_paddr); + IOCPU *targetCPU = (IOCPU *)target; + + if (targetCPU == NULL) { + return KERN_FAILURE; + } + return targetCPU->startCPU(start_paddr, arg_paddr); } -void PE_cpu_halt(cpu_id_t target) +void +PE_cpu_halt(cpu_id_t target) { - IOCPU *targetCPU = (IOCPU *)target; - - targetCPU->haltCPU(); + IOCPU *targetCPU = (IOCPU *)target; + + targetCPU->haltCPU(); } -void PE_cpu_signal(cpu_id_t source, cpu_id_t target) +void +PE_cpu_signal(cpu_id_t source, cpu_id_t target) { - IOCPU *sourceCPU = (IOCPU *)source; - IOCPU *targetCPU = (IOCPU *)target; - - sourceCPU->signalCPU(targetCPU); + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; + + sourceCPU->signalCPU(targetCPU); } -void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target) +void +PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target) { - IOCPU *sourceCPU = (IOCPU *)source; - IOCPU *targetCPU = (IOCPU *)target; + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; - sourceCPU->signalCPUDeferred(targetCPU); + sourceCPU->signalCPUDeferred(targetCPU); } -void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target) +void +PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target) { - IOCPU *sourceCPU = (IOCPU *)source; - IOCPU *targetCPU = (IOCPU *)target; + IOCPU *sourceCPU = (IOCPU *)source; + IOCPU *targetCPU = (IOCPU *)target; - sourceCPU->signalCPUCancel(targetCPU); + sourceCPU->signalCPUCancel(targetCPU); } -void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb) +void +PE_cpu_machine_init(cpu_id_t target, boolean_t bootb) { - IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); + IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target); - if (targetCPU == NULL) - panic("%s: invalid target CPU %p", __func__, target); + if (targetCPU == NULL) { + panic("%s: invalid target CPU %p", __func__, target); + } - targetCPU->initCPU(bootb); + targetCPU->initCPU(bootb); #if defined(__arm__) || defined(__arm64__) - if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false); + if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) { + ml_set_is_quiescing(false); + } #endif /* defined(__arm__) || defined(__arm64__) */ } -void PE_cpu_machine_quiesce(cpu_id_t target) +void +PE_cpu_machine_quiesce(cpu_id_t target) { - IOCPU *targetCPU = (IOCPU*)target; + IOCPU *targetCPU = (IOCPU*)target; #if defined(__arm__) || defined(__arm64__) - if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true); + if (targetCPU->getCPUNumber() == (UInt32)master_cpu) { + ml_set_is_quiescing(true); + } #endif /* defined(__arm__) || defined(__arm64__) */ - targetCPU->quiesceCPU(); + targetCPU->quiesceCPU(); } #if defined(__arm__) || defined(__arm64__) static perfmon_interrupt_handler_func pmi_handler = 0; -kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler) +kern_return_t +PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler) { - pmi_handler = handler; + pmi_handler = handler; - return KERN_SUCCESS; + return KERN_SUCCESS; } -void PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable) +void +PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable) { - IOCPU *targetCPU = (IOCPU*)target; + IOCPU *targetCPU = (IOCPU*)target; - if (targetCPU == nullptr) { - return; - } + if (targetCPU == nullptr) { + return; + } - if (enable) { - targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0); - targetCPU->getProvider()->enableInterrupt(1); - } else { - targetCPU->getProvider()->disableInterrupt(1); - } + if (enable) { + targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0); + targetCPU->getProvider()->enableInterrupt(1); + } else { + targetCPU->getProvider()->disableInterrupt(1); + } } #endif @@ -456,276 +470,306 @@ OSMetaClassDefineReservedUnused(IOCPU, 7); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOCPUSleepKernel(void) +void +IOCPUSleepKernel(void) { - long cnt, numCPUs; - IOCPU *target; - IOCPU *bootCPU = NULL; - IOPMrootDomain *rootDomain = IOService::getPMRootDomain(); +#if defined(__x86_64__) + extern IOCPU *currentShutdownTarget; +#endif + long cnt, numCPUs; + IOCPU *target; + IOCPU *bootCPU = NULL; + IOPMrootDomain *rootDomain = IOService::getPMRootDomain(); - kprintf("IOCPUSleepKernel\n"); + kprintf("IOCPUSleepKernel\n"); #if defined(__arm64__) - sched_override_recommended_cores_for_sleep(); + sched_override_recommended_cores_for_sleep(); #endif - IORegistryIterator * iter; - OSOrderedSet * all; - IOService * service; + IORegistryIterator * iter; + OSOrderedSet * all; + IOService * service; + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions ); + + iter = IORegistryIterator::iterateOver( gIOServicePlane, + kIORegistryIterateRecursively ); + if (iter) { + all = 0; + do{ + if (all) { + all->release(); + } + all = iter->iterateAll(); + }while (!iter->isValid()); + iter->release(); + + if (all) { + while ((service = (IOService *) all->getFirstObject())) { + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { + IOInstallServicePlatformAction(service, qidx); + } + all->removeObject(service); + } + all->release(); + } + } + + iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U - 1, + NULL, NULL, NULL, TRUE); - rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions ); + rootDomain->tracePoint( kIOPMTracePointSleepCPUs ); - iter = IORegistryIterator::iterateOver( gIOServicePlane, - kIORegistryIterateRecursively ); - if( iter) - { - all = 0; - do - { - if (all) - all->release(); - all = iter->iterateAll(); + numCPUs = gIOCPUs->getCount(); +#if defined(__x86_64__) + currentShutdownTarget = NULL; +#endif + + // Sleep the CPUs. + cnt = numCPUs; + while (cnt--) { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + + // We make certain that the bootCPU is the last to sleep + // We'll skip it for now, and halt it after finishing the + // non-boot CPU's. + if (target->getCPUNumber() == (UInt32)master_cpu) { + bootCPU = target; + } else if (target->getCPUState() == kIOCPUStateRunning) { +#if defined(__x86_64__) + currentShutdownTarget = target; +#endif + target->haltCPU(); + } } - while (!iter->isValid()); - iter->release(); - if (all) - { - while((service = (IOService *) all->getFirstObject())) - { - for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) - { - IOInstallServicePlatformAction(service, qidx); + assert(bootCPU != NULL); + assert(cpu_number() == master_cpu); + + console_suspend(); + + rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver ); + rootDomain->stop_watchdog_timer(); + + // Now sleep the boot CPU. + bootCPU->haltCPU(); + + rootDomain->start_watchdog_timer(); + rootDomain->tracePoint( kIOPMTracePointWakePlatformActions ); + + console_resume(); + + iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U - 1, + NULL, NULL, NULL, TRUE); + + iocpu_platform_action_entry_t * entry; + for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) { + while (!(queue_empty(&gActionQueues[qidx]))) { + entry = (typeof(entry))queue_first(&gActionQueues[qidx]); + iocpu_remove_platform_action(entry); + IODelete(entry, iocpu_platform_action_entry_t, 1); } - all->removeObject(service); - } - all->release(); - } - } - - iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1, - NULL, NULL, NULL, TRUE); - - rootDomain->tracePoint( kIOPMTracePointSleepCPUs ); - - numCPUs = gIOCPUs->getCount(); - // Sleep the CPUs. - cnt = numCPUs; - while (cnt--) - { - target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); - - // We make certain that the bootCPU is the last to sleep - // We'll skip it for now, and halt it after finishing the - // non-boot CPU's. - if (target->getCPUNumber() == (UInt32)master_cpu) - { - bootCPU = target; - } else if (target->getCPUState() == kIOCPUStateRunning) - { - target->haltCPU(); - } - } - - assert(bootCPU != NULL); - assert(cpu_number() == master_cpu); - - console_suspend(); - - rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver ); - rootDomain->stop_watchdog_timer(); - - // Now sleep the boot CPU. - bootCPU->haltCPU(); - - rootDomain->start_watchdog_timer(); - rootDomain->tracePoint( kIOPMTracePointWakePlatformActions ); - - console_resume(); - - iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1, - NULL, NULL, NULL, TRUE); - - iocpu_platform_action_entry_t * entry; - for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++) - { - while (!(queue_empty(&gActionQueues[qidx]))) - { - entry = (typeof(entry)) queue_first(&gActionQueues[qidx]); - iocpu_remove_platform_action(entry); - IODelete(entry, iocpu_platform_action_entry_t, 1); } - } - rootDomain->tracePoint( kIOPMTracePointWakeCPUs ); + rootDomain->tracePoint( kIOPMTracePointWakeCPUs ); - // Wake the other CPUs. - for (cnt = 0; cnt < numCPUs; cnt++) - { - target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); + // Wake the other CPUs. + for (cnt = 0; cnt < numCPUs; cnt++) { + target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt)); - // Skip the already-woken boot CPU. - if (target->getCPUNumber() != (UInt32)master_cpu) { - if (target->getCPUState() == kIOCPUStateRunning) - panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber())); - - if (target->getCPUState() == kIOCPUStateStopped) - processor_start(target->getMachProcessor()); - } - } + // Skip the already-woken boot CPU. + if (target->getCPUNumber() != (UInt32)master_cpu) { + if (target->getCPUState() == kIOCPUStateRunning) { + panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber())); + } + + if (target->getCPUState() == kIOCPUStateStopped) { + processor_start(target->getMachProcessor()); + } + } + } #if defined(__arm64__) - sched_restore_recommended_cores_after_sleep(); + sched_restore_recommended_cores_after_sleep(); #endif } -bool IOCPU::start(IOService *provider) -{ - OSData *busFrequency, *cpuFrequency, *timebaseFrequency; - - if (!super::start(provider)) return false; - - _cpuGroup = gIOCPUs; - cpuNub = provider; - - IOLockLock(gIOCPUsLock); - gIOCPUs->setObject(this); - IOLockUnlock(gIOCPUsLock); - - // Correct the bus, cpu and timebase frequencies in the device tree. - if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); - } else { - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8); - } - provider->setProperty("bus-frequency", busFrequency); - busFrequency->release(); - - if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) { - cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); - } else { - cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8); - } - provider->setProperty("clock-frequency", cpuFrequency); - cpuFrequency->release(); - - timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4); - provider->setProperty("timebase-frequency", timebaseFrequency); - timebaseFrequency->release(); - - super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8); - - setCPUNumber(0); - setCPUState(kIOCPUStateUnregistered); - - return true; -} - -OSObject *IOCPU::getProperty(const OSSymbol *aKey) const -{ - if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState]; - - return super::getProperty(aKey); -} - -bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject) -{ - if (aKey == gIOCPUStateKey) { - return false; - } - - return super::setProperty(aKey, anObject); -} - -bool IOCPU::serializeProperties(OSSerialize *serialize) const +bool +IOCPU::start(IOService *provider) +{ + OSData *busFrequency, *cpuFrequency, *timebaseFrequency; + + if (!super::start(provider)) { + return false; + } + + _cpuGroup = gIOCPUs; + cpuNub = provider; + + IOLockLock(gIOCPUsLock); + gIOCPUs->setObject(this); + IOLockUnlock(gIOCPUsLock); + + // Correct the bus, cpu and timebase frequencies in the device tree. + if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + } else { + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8); + } + provider->setProperty("bus-frequency", busFrequency); + busFrequency->release(); + + if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) { + cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4); + } else { + cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8); + } + provider->setProperty("clock-frequency", cpuFrequency); + cpuFrequency->release(); + + timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4); + provider->setProperty("timebase-frequency", timebaseFrequency); + timebaseFrequency->release(); + + super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t) * 8); + + setCPUNumber(0); + setCPUState(kIOCPUStateUnregistered); + + return true; +} + +OSObject * +IOCPU::getProperty(const OSSymbol *aKey) const +{ + if (aKey == gIOCPUStateKey) { + return gIOCPUStateNames[_cpuState]; + } + + return super::getProperty(aKey); +} + +bool +IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject) +{ + if (aKey == gIOCPUStateKey) { + return false; + } + + return super::setProperty(aKey, anObject); +} + +bool +IOCPU::serializeProperties(OSSerialize *serialize) const { bool result; OSDictionary *dict = dictionaryWithProperties(); - if (!dict) return false; + if (!dict) { + return false; + } dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]); result = dict->serialize(serialize); - dict->release(); + dict->release(); return result; } -IOReturn IOCPU::setProperties(OSObject *properties) +IOReturn +IOCPU::setProperties(OSObject *properties) { - OSDictionary *dict = OSDynamicCast(OSDictionary, properties); - OSString *stateStr; - IOReturn result; - - if (dict == 0) return kIOReturnUnsupported; - - stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); - if (stateStr != 0) { - result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); - if (result != kIOReturnSuccess) return result; - - if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess; - - return kIOReturnUnsupported; - } - - return kIOReturnUnsupported; + OSDictionary *dict = OSDynamicCast(OSDictionary, properties); + OSString *stateStr; + IOReturn result; + + if (dict == 0) { + return kIOReturnUnsupported; + } + + stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey)); + if (stateStr != 0) { + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) { + return result; + } + + if (setProperty(gIOCPUStateKey, stateStr)) { + return kIOReturnSuccess; + } + + return kIOReturnUnsupported; + } + + return kIOReturnUnsupported; } -void IOCPU::signalCPU(IOCPU */*target*/) +void +IOCPU::signalCPU(IOCPU */*target*/) { } -void IOCPU::signalCPUDeferred(IOCPU *target) +void +IOCPU::signalCPUDeferred(IOCPU *target) { - // Our CPU may not support deferred IPIs, - // so send a regular IPI by default - signalCPU(target); + // Our CPU may not support deferred IPIs, + // so send a regular IPI by default + signalCPU(target); } -void IOCPU::signalCPUCancel(IOCPU */*target*/) +void +IOCPU::signalCPUCancel(IOCPU */*target*/) { - // Meant to cancel signals sent by - // signalCPUDeferred; unsupported - // by default + // Meant to cancel signals sent by + // signalCPUDeferred; unsupported + // by default } -void IOCPU::enableCPUTimeBase(bool /*enable*/) +void +IOCPU::enableCPUTimeBase(bool /*enable*/) { } -UInt32 IOCPU::getCPUNumber(void) +UInt32 +IOCPU::getCPUNumber(void) { - return _cpuNumber; + return _cpuNumber; } -void IOCPU::setCPUNumber(UInt32 cpuNumber) +void +IOCPU::setCPUNumber(UInt32 cpuNumber) { - _cpuNumber = cpuNumber; - super::setProperty("IOCPUNumber", _cpuNumber, 32); + _cpuNumber = cpuNumber; + super::setProperty("IOCPUNumber", _cpuNumber, 32); } -UInt32 IOCPU::getCPUState(void) +UInt32 +IOCPU::getCPUState(void) { - return _cpuState; + return _cpuState; } -void IOCPU::setCPUState(UInt32 cpuState) +void +IOCPU::setCPUState(UInt32 cpuState) { - if (cpuState < kIOCPUStateCount) { - _cpuState = cpuState; - } + if (cpuState < kIOCPUStateCount) { + _cpuState = cpuState; + } } -OSArray *IOCPU::getCPUGroup(void) +OSArray * +IOCPU::getCPUGroup(void) { - return _cpuGroup; + return _cpuGroup; } -UInt32 IOCPU::getCPUGroupSize(void) +UInt32 +IOCPU::getCPUGroupSize(void) { - return _cpuGroup->getCount(); + return _cpuGroup->getCount(); } -processor_t IOCPU::getMachProcessor(void) +processor_t +IOCPU::getMachProcessor(void) { - return machProcessor; + return machProcessor; } @@ -746,97 +790,109 @@ OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOReturn IOCPUInterruptController::initCPUInterruptController(int sources) +IOReturn +IOCPUInterruptController::initCPUInterruptController(int sources) { return initCPUInterruptController(sources, sources); } -IOReturn IOCPUInterruptController::initCPUInterruptController(int sources, int cpus) +IOReturn +IOCPUInterruptController::initCPUInterruptController(int sources, int cpus) { - int cnt; - - if (!super::init()) return kIOReturnInvalid; + int cnt; + + if (!super::init()) { + return kIOReturnInvalid; + } - numSources = sources; - numCPUs = cpus; + numSources = sources; + numCPUs = cpus; - vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector)); - if (vectors == 0) return kIOReturnNoMemory; - bzero(vectors, numSources * sizeof(IOInterruptVector)); + vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector)); + if (vectors == 0) { + return kIOReturnNoMemory; + } + bzero(vectors, numSources * sizeof(IOInterruptVector)); + + // Allocate a lock for each vector + for (cnt = 0; cnt < numSources; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numSources; cnt++) { + if (vectors[cnt].interruptLock != NULL) { + IOLockFree(vectors[cnt].interruptLock); + } + } + return kIOReturnNoResources; + } + } - // Allocate a lock for each vector - for (cnt = 0; cnt < numSources; cnt++) { - vectors[cnt].interruptLock = IOLockAlloc(); - if (vectors[cnt].interruptLock == NULL) { - for (cnt = 0; cnt < numSources; cnt++) { - if (vectors[cnt].interruptLock != NULL) - IOLockFree(vectors[cnt].interruptLock); - } - return kIOReturnNoResources; - } - } - - ml_init_max_cpus(numSources); + ml_init_max_cpus(numSources); #if KPERF - /* - * kperf allocates based on the number of CPUs and requires them to all be - * accounted for. - */ - boolean_t found_kperf = FALSE; - char kperf_config_str[64]; - found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str)); - if (found_kperf && kperf_config_str[0] != '\0') { - kperf_kernel_configure(kperf_config_str); - } -#endif - - return kIOReturnSuccess; -} - -void IOCPUInterruptController::registerCPUInterruptController(void) -{ - registerService(); - - getPlatform()->registerInterruptController(gPlatformInterruptControllerName, - this); -} - -void IOCPUInterruptController::setCPUInterruptProperties(IOService *service) -{ - int cnt; - OSArray *controller; - OSArray *specifier; - OSData *tmpData; - long tmpLong; - - if ((service->getProperty(gIOInterruptControllersKey) != 0) && - (service->getProperty(gIOInterruptSpecifiersKey) != 0)) - return; - - // Create the interrupt specifer array. - specifier = OSArray::withCapacity(numSources); - for (cnt = 0; cnt < numSources; cnt++) { - tmpLong = cnt; - tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); - specifier->setObject(tmpData); - tmpData->release(); - }; - - // Create the interrupt controller array. - controller = OSArray::withCapacity(numSources); - for (cnt = 0; cnt < numSources; cnt++) { - controller->setObject(gPlatformInterruptControllerName); - } - - // Put the two arrays into the property table. - service->setProperty(gIOInterruptControllersKey, controller); - service->setProperty(gIOInterruptSpecifiersKey, specifier); - controller->release(); - specifier->release(); -} - -void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) + /* + * kperf allocates based on the number of CPUs and requires them to all be + * accounted for. + */ + boolean_t found_kperf = FALSE; + char kperf_config_str[64]; + found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str)); + if (found_kperf && kperf_config_str[0] != '\0') { + kperf_kernel_configure(kperf_config_str); + } +#endif /* KPERF */ + + return kIOReturnSuccess; +} + +void +IOCPUInterruptController::registerCPUInterruptController(void) +{ + registerService(); + + getPlatform()->registerInterruptController(gPlatformInterruptControllerName, + this); +} + +void +IOCPUInterruptController::setCPUInterruptProperties(IOService *service) +{ + int cnt; + OSArray *controller; + OSArray *specifier; + OSData *tmpData; + long tmpLong; + + if ((service->getProperty(gIOInterruptControllersKey) != 0) && + (service->getProperty(gIOInterruptSpecifiersKey) != 0)) { + return; + } + + // Create the interrupt specifer array. + specifier = OSArray::withCapacity(numSources); + for (cnt = 0; cnt < numSources; cnt++) { + tmpLong = cnt; + tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong)); + specifier->setObject(tmpData); + tmpData->release(); + } + ; + + // Create the interrupt controller array. + controller = OSArray::withCapacity(numSources); + for (cnt = 0; cnt < numSources; cnt++) { + controller->setObject(gPlatformInterruptControllerName); + } + + // Put the two arrays into the property table. + service->setProperty(gIOInterruptControllersKey, controller); + service->setProperty(gIOInterruptSpecifiersKey, specifier); + controller->release(); + specifier->release(); +} + +void +IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) { IOInterruptHandler handler = OSMemberFunctionCast( IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt); @@ -851,102 +907,115 @@ void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu) if (enabledCPUs == numCPUs) { IOService::cpusRunning(); thread_wakeup(this); - } + } IOUnlock(vectors[0].interruptLock); } -IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon) +IOReturn +IOCPUInterruptController::registerInterrupt(IOService *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) { - IOInterruptVector *vector; + IOInterruptVector *vector; - if (source >= numSources) return kIOReturnNoResources; + if (source >= numSources) { + return kIOReturnNoResources; + } - vector = &vectors[source]; + vector = &vectors[source]; - // Get the lock for this vector. - IOTakeLock(vector->interruptLock); + // Get the lock for this vector. + IOTakeLock(vector->interruptLock); - // Make sure the vector is not in use. - if (vector->interruptRegistered) { - IOUnlock(vector->interruptLock); - return kIOReturnNoResources; - } + // Make sure the vector is not in use. + if (vector->interruptRegistered) { + IOUnlock(vector->interruptLock); + return kIOReturnNoResources; + } - // Fill in vector with the client's info. - vector->handler = handler; - vector->nub = nub; - vector->source = source; - vector->target = target; - vector->refCon = refCon; + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; - // Get the vector ready. It starts hard disabled. - vector->interruptDisabledHard = 1; - vector->interruptDisabledSoft = 1; - vector->interruptRegistered = 1; + // Get the vector ready. It starts hard disabled. + vector->interruptDisabledHard = 1; + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; - IOUnlock(vector->interruptLock); + IOUnlock(vector->interruptLock); - IOTakeLock(vectors[0].interruptLock); - if (enabledCPUs != numCPUs) { - assert_wait(this, THREAD_UNINT); - IOUnlock(vectors[0].interruptLock); - thread_block(THREAD_CONTINUE_NULL); - } else - IOUnlock(vectors[0].interruptLock); + IOTakeLock(vectors[0].interruptLock); + if (enabledCPUs != numCPUs) { + assert_wait(this, THREAD_UNINT); + IOUnlock(vectors[0].interruptLock); + thread_block(THREAD_CONTINUE_NULL); + } else { + IOUnlock(vectors[0].interruptLock); + } - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/, - int /*source*/, - int *interruptType) +IOReturn +IOCPUInterruptController::getInterruptType(IOService */*nub*/, + int /*source*/, + int *interruptType) { - if (interruptType == 0) return kIOReturnBadArgument; - - *interruptType = kIOInterruptTypeLevel; - - return kIOReturnSuccess; + if (interruptType == 0) { + return kIOReturnBadArgument; + } + + *interruptType = kIOInterruptTypeLevel; + + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/, - int /*source*/) +IOReturn +IOCPUInterruptController::enableInterrupt(IOService */*nub*/, + int /*source*/) { // ml_set_interrupts_enabled(true); - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/, - int /*source*/) +IOReturn +IOCPUInterruptController::disableInterrupt(IOService */*nub*/, + int /*source*/) { // ml_set_interrupts_enabled(false); - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/, - int /*source*/) +IOReturn +IOCPUInterruptController::causeInterrupt(IOService */*nub*/, + int /*source*/) { - ml_cause_interrupt(); - return kIOReturnSuccess; + ml_cause_interrupt(); + return kIOReturnSuccess; } -IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/, - IOService */*nub*/, - int source) +IOReturn +IOCPUInterruptController::handleInterrupt(void */*refCon*/, + IOService */*nub*/, + int source) { - IOInterruptVector *vector; - - vector = &vectors[source]; - - if (!vector->interruptRegistered) return kIOReturnInvalid; - - vector->handler(vector->target, vector->refCon, - vector->nub, vector->source); - - return kIOReturnSuccess; + IOInterruptVector *vector; + + vector = &vectors[source]; + + if (!vector->interruptRegistered) { + return kIOReturnInvalid; + } + + vector->handler(vector->target, vector->refCon, + vector->nub, vector->source); + + return kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Kernel/IOCatalogue.cpp b/iokit/Kernel/IOCatalogue.cpp index b6c335fbf..814494af2 100644 --- a/iokit/Kernel/IOCatalogue.cpp +++ b/iokit/Kernel/IOCatalogue.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Inc. All rights reserved. + * Copyright (c) 1998 Apple Inc. All rights reserved. * * HISTORY * @@ -84,152 +84,165 @@ IORWLock * gIOCatalogLock; OSDefineMetaClassAndStructors(IOCatalogue, OSObject) static bool isModuleLoadedNoOSKextLock(OSDictionary *theKexts, - OSDictionary *theModuleDict); + OSDictionary *theModuleDict); /********************************************************************* *********************************************************************/ -void IOCatalogue::initialize(void) +void +IOCatalogue::initialize(void) { - OSArray * array; - OSString * errorString; - bool rc; + OSArray * array; + OSString * errorString; + bool rc; - extern const char * gIOKernelConfigTables; + extern const char * gIOKernelConfigTables; - array = OSDynamicCast(OSArray, OSUnserialize(gIOKernelConfigTables, &errorString)); - if (!array && errorString) { - IOLog("KernelConfigTables syntax error: %s\n", - errorString->getCStringNoCopy()); - errorString->release(); - } + array = OSDynamicCast(OSArray, OSUnserialize(gIOKernelConfigTables, &errorString)); + if (!array && errorString) { + IOLog("KernelConfigTables syntax error: %s\n", + errorString->getCStringNoCopy()); + errorString->release(); + } - gIOClassKey = OSSymbol::withCStringNoCopy( kIOClassKey ); - gIOProbeScoreKey = OSSymbol::withCStringNoCopy( kIOProbeScoreKey ); - gIOModuleIdentifierKey = OSSymbol::withCStringNoCopy( kCFBundleIdentifierKey ); + gIOClassKey = OSSymbol::withCStringNoCopy( kIOClassKey ); + gIOProbeScoreKey = OSSymbol::withCStringNoCopy( kIOProbeScoreKey ); + gIOModuleIdentifierKey = OSSymbol::withCStringNoCopy( kCFBundleIdentifierKey ); - assert( array && gIOClassKey && gIOProbeScoreKey + assert( array && gIOClassKey && gIOProbeScoreKey && gIOModuleIdentifierKey); - gIOCatalogue = new IOCatalogue; - assert(gIOCatalogue); - rc = gIOCatalogue->init(array); - assert(rc); - array->release(); + gIOCatalogue = new IOCatalogue; + assert(gIOCatalogue); + rc = gIOCatalogue->init(array); + assert(rc); + array->release(); } /********************************************************************* * Initialize the IOCatalog object. *********************************************************************/ -OSArray * IOCatalogue::arrayForPersonality(OSDictionary * dict) +OSArray * +IOCatalogue::arrayForPersonality(OSDictionary * dict) { - const OSSymbol * sym; + const OSSymbol * sym; - sym = OSDynamicCast(OSSymbol, dict->getObject(gIOProviderClassKey)); - if (!sym) return (0); + sym = OSDynamicCast(OSSymbol, dict->getObject(gIOProviderClassKey)); + if (!sym) { + return 0; + } - return ((OSArray *) personalities->getObject(sym)); + return (OSArray *) personalities->getObject(sym); } -void IOCatalogue::addPersonality(OSDictionary * dict) +void +IOCatalogue::addPersonality(OSDictionary * dict) { - const OSSymbol * sym; - OSArray * arr; - - sym = OSDynamicCast(OSSymbol, dict->getObject(gIOProviderClassKey)); - if (!sym) return; - arr = (OSArray *) personalities->getObject(sym); - if (arr) arr->setObject(dict); - else - { - arr = OSArray::withObjects((const OSObject **)&dict, 1, 2); - personalities->setObject(sym, arr); - arr->release(); - } + const OSSymbol * sym; + OSArray * arr; + + sym = OSDynamicCast(OSSymbol, dict->getObject(gIOProviderClassKey)); + if (!sym) { + return; + } + arr = (OSArray *) personalities->getObject(sym); + if (arr) { + arr->setObject(dict); + } else { + arr = OSArray::withObjects((const OSObject **)&dict, 1, 2); + personalities->setObject(sym, arr); + arr->release(); + } } /********************************************************************* * Initialize the IOCatalog object. *********************************************************************/ -bool IOCatalogue::init(OSArray * initArray) +bool +IOCatalogue::init(OSArray * initArray) { - OSDictionary * dict; - OSObject * obj; - - if ( !super::init() ) - return false; - - generation = 1; - - personalities = OSDictionary::withCapacity(32); - personalities->setOptions(OSCollection::kSort, OSCollection::kSort); - for (unsigned int idx = 0; (obj = initArray->getObject(idx)); idx++) - { - dict = OSDynamicCast(OSDictionary, obj); - if (!dict) continue; - OSKext::uniquePersonalityProperties(dict); - if( 0 == dict->getObject( gIOClassKey )) - { - IOLog("Missing or bad \"%s\" key\n", - gIOClassKey->getCStringNoCopy()); - continue; - } - dict->setObject("KernelConfigTable", kOSBooleanTrue); - addPersonality(dict); - } - - gIOCatalogLock = IORWLockAlloc(); - lock = gIOCatalogLock; - - return true; + OSDictionary * dict; + OSObject * obj; + + if (!super::init()) { + return false; + } + + generation = 1; + + personalities = OSDictionary::withCapacity(32); + personalities->setOptions(OSCollection::kSort, OSCollection::kSort); + for (unsigned int idx = 0; (obj = initArray->getObject(idx)); idx++) { + dict = OSDynamicCast(OSDictionary, obj); + if (!dict) { + continue; + } + OSKext::uniquePersonalityProperties(dict); + if (0 == dict->getObject( gIOClassKey )) { + IOLog("Missing or bad \"%s\" key\n", + gIOClassKey->getCStringNoCopy()); + continue; + } + dict->setObject("KernelConfigTable", kOSBooleanTrue); + addPersonality(dict); + } + + gIOCatalogLock = IORWLockAlloc(); + lock = gIOCatalogLock; + + return true; } /********************************************************************* * Release all resources used by IOCatalogue and deallocate. * This will probably never be called. *********************************************************************/ -void IOCatalogue::free( void ) +void +IOCatalogue::free( void ) { - panic(""); + panic(""); } /********************************************************************* *********************************************************************/ OSOrderedSet * IOCatalogue::findDrivers( - IOService * service, - SInt32 * generationCount) + IOService * service, + SInt32 * generationCount) { - OSDictionary * nextTable; - OSOrderedSet * set; - OSArray * array; - const OSMetaClass * meta; - unsigned int idx; - - set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, - (void *)gIOProbeScoreKey ); - if( !set ) - return( 0 ); + OSDictionary * nextTable; + OSOrderedSet * set; + OSArray * array; + const OSMetaClass * meta; + unsigned int idx; + + set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, + (void *)gIOProbeScoreKey ); + if (!set) { + return 0; + } - IORWLockRead(lock); + IORWLockRead(lock); - meta = service->getMetaClass(); - while (meta) - { - array = (OSArray *) personalities->getObject(meta->getClassNameSymbol()); - if (array) for (idx = 0; (nextTable = (OSDictionary *) array->getObject(idx)); idx++) - { - set->setObject(nextTable); + meta = service->getMetaClass(); + while (meta) { + array = (OSArray *) personalities->getObject(meta->getClassNameSymbol()); + if (array) { + for (idx = 0; (nextTable = (OSDictionary *) array->getObject(idx)); idx++) { + set->setObject(nextTable); + } + } + if (meta == &IOService::gMetaClass) { + break; + } + meta = meta->getSuperClass(); } - if (meta == &IOService::gMetaClass) break; - meta = meta->getSuperClass(); - } - *generationCount = getGenerationCount(); + *generationCount = getGenerationCount(); - IORWLockUnlock(lock); + IORWLockUnlock(lock); - return( set ); + return set; } /********************************************************************* @@ -237,46 +250,48 @@ IOCatalogue::findDrivers( *********************************************************************/ OSOrderedSet * IOCatalogue::findDrivers( - OSDictionary * matching, - SInt32 * generationCount) + OSDictionary * matching, + SInt32 * generationCount) { - OSCollectionIterator * iter; - OSDictionary * dict; - OSOrderedSet * set; - OSArray * array; - const OSSymbol * key; - unsigned int idx; - - OSKext::uniquePersonalityProperties(matching); - - set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, - (void *)gIOProbeScoreKey ); - if (!set) return (0); - iter = OSCollectionIterator::withCollection(personalities); - if (!iter) - { - set->release(); - return (0); - } - - IORWLockRead(lock); - while ((key = (const OSSymbol *) iter->getNextObject())) - { - array = (OSArray *) personalities->getObject(key); - if (array) for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) - { - /* This comparison must be done with only the keys in the - * "matching" dict to enable general searches. - */ - if ( dict->isEqualTo(matching, matching) ) - set->setObject(dict); - } - } - *generationCount = getGenerationCount(); - IORWLockUnlock(lock); - - iter->release(); - return set; + OSCollectionIterator * iter; + OSDictionary * dict; + OSOrderedSet * set; + OSArray * array; + const OSSymbol * key; + unsigned int idx; + + OSKext::uniquePersonalityProperties(matching); + + set = OSOrderedSet::withCapacity( 1, IOServiceOrdering, + (void *)gIOProbeScoreKey ); + if (!set) { + return 0; + } + iter = OSCollectionIterator::withCollection(personalities); + if (!iter) { + set->release(); + return 0; + } + + IORWLockRead(lock); + while ((key = (const OSSymbol *) iter->getNextObject())) { + array = (OSArray *) personalities->getObject(key); + if (array) { + for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) { + /* This comparison must be done with only the keys in the + * "matching" dict to enable general searches. + */ + if (dict->isEqualTo(matching, matching)) { + set->setObject(dict); + } + } + } + } + *generationCount = getGenerationCount(); + IORWLockUnlock(lock); + + iter->release(); + return set; } /********************************************************************* @@ -291,100 +306,104 @@ IOCatalogue::findDrivers( * xxx - during safe boot. That would be better implemented here. *********************************************************************/ -bool IOCatalogue::addDrivers( - OSArray * drivers, - bool doNubMatching) +bool +IOCatalogue::addDrivers( + OSArray * drivers, + bool doNubMatching) { - bool result = false; - OSCollectionIterator * iter = NULL; // must release - OSOrderedSet * set = NULL; // must release - OSObject * object = NULL; // do not release - OSArray * persons = NULL; // do not release - - persons = OSDynamicCast(OSArray, drivers); - if (!persons) { - goto finish; - } - - set = OSOrderedSet::withCapacity( 10, IOServiceOrdering, - (void *)gIOProbeScoreKey ); - if (!set) { - goto finish; - } - - iter = OSCollectionIterator::withCollection(persons); - if (!iter) { - goto finish; - } - - /* Start with success; clear it on an error. - */ - result = true; - - IORWLockWrite(lock); - while ( (object = iter->getNextObject()) ) { - - // xxx Deleted OSBundleModuleDemand check; will handle in other ways for SL - - OSDictionary * personality = OSDynamicCast(OSDictionary, object); - - SInt count; - - if (!personality) { - IOLog("IOCatalogue::addDrivers() encountered non-dictionary; bailing.\n"); - result = false; - break; - } - - OSKext::uniquePersonalityProperties(personality); - - // Add driver personality to catalogue. - - OSArray * array = arrayForPersonality(personality); - if (!array) addPersonality(personality); - else - { - count = array->getCount(); - while (count--) { - OSDictionary * driver; - - // Be sure not to double up on personalities. - driver = (OSDictionary *)array->getObject(count); - - /* Unlike in other functions, this comparison must be exact! - * The catalogue must be able to contain personalities that - * are proper supersets of others. - * Do not compare just the properties present in one driver - * personality or the other. - */ - if (personality->isEqualTo(driver)) { - break; + bool result = false; + OSCollectionIterator * iter = NULL; // must release + OSOrderedSet * set = NULL; // must release + OSObject * object = NULL; // do not release + OSArray * persons = NULL;// do not release + + persons = OSDynamicCast(OSArray, drivers); + if (!persons) { + goto finish; + } + + set = OSOrderedSet::withCapacity( 10, IOServiceOrdering, + (void *)gIOProbeScoreKey ); + if (!set) { + goto finish; + } + + iter = OSCollectionIterator::withCollection(persons); + if (!iter) { + goto finish; + } + + /* Start with success; clear it on an error. + */ + result = true; + + IORWLockWrite(lock); + while ((object = iter->getNextObject())) { + // xxx Deleted OSBundleModuleDemand check; will handle in other ways for SL + + OSDictionary * personality = OSDynamicCast(OSDictionary, object); + + SInt count; + + if (!personality) { + IOLog("IOCatalogue::addDrivers() encountered non-dictionary; bailing.\n"); + result = false; + break; } - } - if (count >= 0) { - // its a dup - continue; - } - result = array->setObject(personality); - if (!result) { - break; - } - } - - set->setObject(personality); - } - // Start device matching. - if (result && doNubMatching && (set->getCount() > 0)) { - IOService::catalogNewDrivers(set); - generation++; - } - IORWLockUnlock(lock); + + OSKext::uniquePersonalityProperties(personality); + + // Add driver personality to catalogue. + + OSArray * array = arrayForPersonality(personality); + if (!array) { + addPersonality(personality); + } else { + count = array->getCount(); + while (count--) { + OSDictionary * driver; + + // Be sure not to double up on personalities. + driver = (OSDictionary *)array->getObject(count); + + /* Unlike in other functions, this comparison must be exact! + * The catalogue must be able to contain personalities that + * are proper supersets of others. + * Do not compare just the properties present in one driver + * personality or the other. + */ + if (personality->isEqualTo(driver)) { + break; + } + } + if (count >= 0) { + // its a dup + continue; + } + result = array->setObject(personality); + if (!result) { + break; + } + } + + set->setObject(personality); + } + // Start device matching. + if (result && doNubMatching && (set->getCount() > 0)) { + IOService::catalogNewDrivers(set); + generation++; + } + IORWLockUnlock(lock); finish: - if (set) set->release(); - if (iter) iter->release(); + if (set) { + set->release(); + } + if (iter) { + iter->release(); + } - return result; + return result; } /********************************************************************* @@ -393,541 +412,573 @@ finish: *********************************************************************/ bool IOCatalogue::removeDrivers( - OSDictionary * matching, - bool doNubMatching) + OSDictionary * matching, + bool doNubMatching) { - OSOrderedSet * set; - OSCollectionIterator * iter; - OSDictionary * dict; - OSArray * array; - const OSSymbol * key; - unsigned int idx; - - if ( !matching ) - return false; - - set = OSOrderedSet::withCapacity(10, - IOServiceOrdering, - (void *)gIOProbeScoreKey); - if ( !set ) - return false; - iter = OSCollectionIterator::withCollection(personalities); - if (!iter) - { - set->release(); - return (false); - } - - IORWLockWrite(lock); - while ((key = (const OSSymbol *) iter->getNextObject())) - { - array = (OSArray *) personalities->getObject(key); - if (array) for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) - { - /* This comparison must be done with only the keys in the - * "matching" dict to enable general searches. - */ - if ( dict->isEqualTo(matching, matching) ) { - set->setObject(dict); - array->removeObject(idx); - idx--; - } - } - // Start device matching. - if ( doNubMatching && (set->getCount() > 0) ) { - IOService::catalogNewDrivers(set); - generation++; - } - } - IORWLockUnlock(lock); - - set->release(); - iter->release(); - - return true; + OSOrderedSet * set; + OSCollectionIterator * iter; + OSDictionary * dict; + OSArray * array; + const OSSymbol * key; + unsigned int idx; + + if (!matching) { + return false; + } + + set = OSOrderedSet::withCapacity(10, + IOServiceOrdering, + (void *)gIOProbeScoreKey); + if (!set) { + return false; + } + iter = OSCollectionIterator::withCollection(personalities); + if (!iter) { + set->release(); + return false; + } + + IORWLockWrite(lock); + while ((key = (const OSSymbol *) iter->getNextObject())) { + array = (OSArray *) personalities->getObject(key); + if (array) { + for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) { + /* This comparison must be done with only the keys in the + * "matching" dict to enable general searches. + */ + if (dict->isEqualTo(matching, matching)) { + set->setObject(dict); + array->removeObject(idx); + idx--; + } + } + } + // Start device matching. + if (doNubMatching && (set->getCount() > 0)) { + IOService::catalogNewDrivers(set); + generation++; + } + } + IORWLockUnlock(lock); + + set->release(); + iter->release(); + + return true; } // Return the generation count. -SInt32 IOCatalogue::getGenerationCount(void) const +SInt32 +IOCatalogue::getGenerationCount(void) const { - return( generation ); + return generation; } -bool IOCatalogue::isModuleLoaded(OSString * moduleName) const +bool +IOCatalogue::isModuleLoaded(OSString * moduleName) const { - return isModuleLoaded(moduleName->getCStringNoCopy()); + return isModuleLoaded(moduleName->getCStringNoCopy()); } -bool IOCatalogue::isModuleLoaded(const char * moduleName) const +bool +IOCatalogue::isModuleLoaded(const char * moduleName) const { - OSReturn ret; - ret = OSKext::loadKextWithIdentifier(moduleName); - if (kOSKextReturnDeferred == ret) { - // a request has been queued but the module isn't necessarily - // loaded yet, so stall. - return false; - } - // module is present or never will be - return true; + OSReturn ret; + ret = OSKext::loadKextWithIdentifier(moduleName); + if (kOSKextReturnDeferred == ret) { + // a request has been queued but the module isn't necessarily + // loaded yet, so stall. + return false; + } + // module is present or never will be + return true; } // Check to see if module has been loaded already. -bool IOCatalogue::isModuleLoaded(OSDictionary * driver) const +bool +IOCatalogue::isModuleLoaded(OSDictionary * driver) const { - OSString * moduleName = NULL; - OSString * publisherName = NULL; - - if ( !driver ) - return false; - - /* The personalities of codeless kexts often contain the bundle ID of the - * kext they reference, and not the bundle ID of the codeless kext itself. - * The prelinked kernel needs to know the bundle ID of the codeless kext - * so it can include these personalities, so OSKext stores that bundle ID - * in the IOPersonalityPublisher key, and we record it as requested here. - */ - publisherName = OSDynamicCast(OSString, - driver->getObject(kIOPersonalityPublisherKey)); - OSKext::recordIdentifierRequest(publisherName); - - moduleName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKey)); - if ( moduleName ) - return isModuleLoaded(moduleName); - - /* If a personality doesn't hold the "CFBundleIdentifier" key - * it is assumed to be an "in-kernel" driver. - */ - return true; + OSString * moduleName = NULL; + OSString * publisherName = NULL; + + if (!driver) { + return false; + } + + /* The personalities of codeless kexts often contain the bundle ID of the + * kext they reference, and not the bundle ID of the codeless kext itself. + * The prelinked kernel needs to know the bundle ID of the codeless kext + * so it can include these personalities, so OSKext stores that bundle ID + * in the IOPersonalityPublisher key, and we record it as requested here. + */ + publisherName = OSDynamicCast(OSString, + driver->getObject(kIOPersonalityPublisherKey)); + OSKext::recordIdentifierRequest(publisherName); + + moduleName = OSDynamicCast(OSString, driver->getObject(gIOModuleIdentifierKey)); + if (moduleName) { + return isModuleLoaded(moduleName); + } + + /* If a personality doesn't hold the "CFBundleIdentifier" key + * it is assumed to be an "in-kernel" driver. + */ + return true; } /* This function is called after a module has been loaded. * Is invoked from user client call, ultimately from IOKitLib's * IOCatalogueModuleLoaded(). Sent from kextd. */ -void IOCatalogue::moduleHasLoaded(OSString * moduleName) +void +IOCatalogue::moduleHasLoaded(OSString * moduleName) { - OSDictionary * dict; + OSDictionary * dict; - dict = OSDictionary::withCapacity(2); - dict->setObject(gIOModuleIdentifierKey, moduleName); - startMatching(dict); - dict->release(); + dict = OSDictionary::withCapacity(2); + dict->setObject(gIOModuleIdentifierKey, moduleName); + startMatching(dict); + dict->release(); - (void) OSKext::setDeferredLoadSucceeded(); - (void) OSKext::considerRebuildOfPrelinkedKernel(); + (void) OSKext::setDeferredLoadSucceeded(); + (void) OSKext::considerRebuildOfPrelinkedKernel(); } -void IOCatalogue::moduleHasLoaded(const char * moduleName) +void +IOCatalogue::moduleHasLoaded(const char * moduleName) { - OSString * name; + OSString * name; - name = OSString::withCString(moduleName); - moduleHasLoaded(name); - name->release(); + name = OSString::withCString(moduleName); + moduleHasLoaded(name); + name->release(); } // xxx - return is really OSReturn/kern_return_t -IOReturn IOCatalogue::unloadModule(OSString * moduleName) const +IOReturn +IOCatalogue::unloadModule(OSString * moduleName) const { - return OSKext::removeKextWithIdentifier(moduleName->getCStringNoCopy()); + return OSKext::removeKextWithIdentifier(moduleName->getCStringNoCopy()); } -IOReturn IOCatalogue::_terminateDrivers(OSDictionary * matching) +IOReturn +IOCatalogue::_terminateDrivers(OSDictionary * matching) { - OSDictionary * dict; - OSIterator * iter; - IOService * service; - IOReturn ret; - - if ( !matching ) - return kIOReturnBadArgument; - - ret = kIOReturnSuccess; - dict = 0; - iter = IORegistryIterator::iterateOver(gIOServicePlane, - kIORegistryIterateRecursively); - if ( !iter ) - return kIOReturnNoMemory; - - OSKext::uniquePersonalityProperties( matching ); - - // terminate instances. - do { - iter->reset(); - while( (service = (IOService *)iter->getNextObject()) ) { - dict = service->getPropertyTable(); - if ( !dict ) - continue; - - /* Terminate only for personalities that match the matching dictionary. - * This comparison must be done with only the keys in the - * "matching" dict to enable general matching. - */ - if ( !dict->isEqualTo(matching, matching) ) - continue; - - if ( !service->terminate(kIOServiceRequired|kIOServiceSynchronous) ) { - ret = kIOReturnUnsupported; - break; - } - } - } while( !service && !iter->isValid()); - iter->release(); - - return ret; + OSDictionary * dict; + OSIterator * iter; + IOService * service; + IOReturn ret; + + if (!matching) { + return kIOReturnBadArgument; + } + + ret = kIOReturnSuccess; + dict = 0; + iter = IORegistryIterator::iterateOver(gIOServicePlane, + kIORegistryIterateRecursively); + if (!iter) { + return kIOReturnNoMemory; + } + + OSKext::uniquePersonalityProperties( matching ); + + // terminate instances. + do { + iter->reset(); + while ((service = (IOService *)iter->getNextObject())) { + dict = service->getPropertyTable(); + if (!dict) { + continue; + } + + /* Terminate only for personalities that match the matching dictionary. + * This comparison must be done with only the keys in the + * "matching" dict to enable general matching. + */ + if (!dict->isEqualTo(matching, matching)) { + continue; + } + + if (!service->terminate(kIOServiceRequired | kIOServiceSynchronous)) { + ret = kIOReturnUnsupported; + break; + } + } + } while (!service && !iter->isValid()); + iter->release(); + + return ret; } -IOReturn IOCatalogue::_removeDrivers(OSDictionary * matching) +IOReturn +IOCatalogue::_removeDrivers(OSDictionary * matching) { - IOReturn ret = kIOReturnSuccess; - OSCollectionIterator * iter; - OSDictionary * dict; - OSArray * array; - const OSSymbol * key; - unsigned int idx; - - // remove configs from catalog. - - iter = OSCollectionIterator::withCollection(personalities); - if (!iter) return (kIOReturnNoMemory); - - while ((key = (const OSSymbol *) iter->getNextObject())) - { - array = (OSArray *) personalities->getObject(key); - if (array) for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) - { - - /* Remove from the catalogue's array any personalities - * that match the matching dictionary. - * This comparison must be done with only the keys in the - * "matching" dict to enable general matching. - */ - if (dict->isEqualTo(matching, matching)) - { - array->removeObject(idx); - idx--; - } - } - } - iter->release(); - - return ret; + IOReturn ret = kIOReturnSuccess; + OSCollectionIterator * iter; + OSDictionary * dict; + OSArray * array; + const OSSymbol * key; + unsigned int idx; + + // remove configs from catalog. + + iter = OSCollectionIterator::withCollection(personalities); + if (!iter) { + return kIOReturnNoMemory; + } + + while ((key = (const OSSymbol *) iter->getNextObject())) { + array = (OSArray *) personalities->getObject(key); + if (array) { + for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) { + /* Remove from the catalogue's array any personalities + * that match the matching dictionary. + * This comparison must be done with only the keys in the + * "matching" dict to enable general matching. + */ + if (dict->isEqualTo(matching, matching)) { + array->removeObject(idx); + idx--; + } + } + } + } + iter->release(); + + return ret; } -IOReturn IOCatalogue::terminateDrivers(OSDictionary * matching) +IOReturn +IOCatalogue::terminateDrivers(OSDictionary * matching) { - IOReturn ret; + IOReturn ret; - ret = _terminateDrivers(matching); - IORWLockWrite(lock); - if (kIOReturnSuccess == ret) - ret = _removeDrivers(matching); - IORWLockUnlock(lock); + ret = _terminateDrivers(matching); + IORWLockWrite(lock); + if (kIOReturnSuccess == ret) { + ret = _removeDrivers(matching); + } + IORWLockUnlock(lock); - return ret; + return ret; } -IOReturn IOCatalogue::terminateDriversForModule( - OSString * moduleName, - bool unload) +IOReturn +IOCatalogue::terminateDriversForModule( + OSString * moduleName, + bool unload) { - IOReturn ret; - OSDictionary * dict; - bool isLoaded = false; - - /* Check first if the kext currently has any linkage dependents; - * in such a case the unload would fail so let's not terminate any - * IOServices (since doing so typically results in a panic when there - * are loaded dependencies). Note that we aren't locking the kext here - * so it might lose or gain dependents by the time we call unloadModule(); - * I think that's ok, our unload can fail if a kext comes in on top of - * this one even after we've torn down IOService objects. Conversely, - * if we fail the unload here and then lose a library, the autounload - * thread will get us in short order. - */ - if (OSKext::isKextWithIdentifierLoaded(moduleName->getCStringNoCopy())) { - - isLoaded = true; - - if (!OSKext::canUnloadKextWithIdentifier(moduleName, - /* checkClasses */ false)) { - ret = kOSKextReturnInUse; - goto finish; - } - } - dict = OSDictionary::withCapacity(1); - if (!dict) { - ret = kIOReturnNoMemory; - goto finish; - } - - dict->setObject(gIOModuleIdentifierKey, moduleName); - - ret = _terminateDrivers(dict); - - /* No goto between IOLock calls! - */ - IORWLockWrite(lock); - if (kIOReturnSuccess == ret) { - ret = _removeDrivers(dict); - } - - // Unload the module itself. - if (unload && isLoaded && ret == kIOReturnSuccess) { - ret = unloadModule(moduleName); - } - - IORWLockUnlock(lock); - - dict->release(); + IOReturn ret; + OSDictionary * dict; + bool isLoaded = false; + + /* Check first if the kext currently has any linkage dependents; + * in such a case the unload would fail so let's not terminate any + * IOServices (since doing so typically results in a panic when there + * are loaded dependencies). Note that we aren't locking the kext here + * so it might lose or gain dependents by the time we call unloadModule(); + * I think that's ok, our unload can fail if a kext comes in on top of + * this one even after we've torn down IOService objects. Conversely, + * if we fail the unload here and then lose a library, the autounload + * thread will get us in short order. + */ + if (OSKext::isKextWithIdentifierLoaded(moduleName->getCStringNoCopy())) { + isLoaded = true; + + if (!OSKext::canUnloadKextWithIdentifier(moduleName, + /* checkClasses */ false)) { + ret = kOSKextReturnInUse; + goto finish; + } + } + dict = OSDictionary::withCapacity(1); + if (!dict) { + ret = kIOReturnNoMemory; + goto finish; + } + + dict->setObject(gIOModuleIdentifierKey, moduleName); + + ret = _terminateDrivers(dict); + + /* No goto between IOLock calls! + */ + IORWLockWrite(lock); + if (kIOReturnSuccess == ret) { + ret = _removeDrivers(dict); + } + + // Unload the module itself. + if (unload && isLoaded && ret == kIOReturnSuccess) { + ret = unloadModule(moduleName); + } + + IORWLockUnlock(lock); + + dict->release(); finish: - return ret; + return ret; } -IOReturn IOCatalogue::terminateDriversForModule( - const char * moduleName, - bool unload) +IOReturn +IOCatalogue::terminateDriversForModule( + const char * moduleName, + bool unload) { - OSString * name; - IOReturn ret; + OSString * name; + IOReturn ret; - name = OSString::withCString(moduleName); - if ( !name ) - return kIOReturnNoMemory; + name = OSString::withCString(moduleName); + if (!name) { + return kIOReturnNoMemory; + } - ret = terminateDriversForModule(name, unload); - name->release(); + ret = terminateDriversForModule(name, unload); + name->release(); - return ret; + return ret; } -bool IOCatalogue::startMatching( OSDictionary * matching ) +bool +IOCatalogue::startMatching( OSDictionary * matching ) { - OSCollectionIterator * iter; - OSDictionary * dict; - OSOrderedSet * set; - OSArray * array; - const OSSymbol * key; - unsigned int idx; - - if ( !matching ) - return false; - - set = OSOrderedSet::withCapacity(10, IOServiceOrdering, - (void *)gIOProbeScoreKey); - if ( !set ) - return false; - - iter = OSCollectionIterator::withCollection(personalities); - if (!iter) - { - set->release(); - return false; - } - - IORWLockRead(lock); - - while ((key = (const OSSymbol *) iter->getNextObject())) - { - array = (OSArray *) personalities->getObject(key); - if (array) for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) - { - /* This comparison must be done with only the keys in the - * "matching" dict to enable general matching. - */ - if (dict->isEqualTo(matching, matching)) { - set->setObject(dict); - } - } - } - - // Start device matching. - if ( set->getCount() > 0 ) { - IOService::catalogNewDrivers(set); - generation++; - } - - IORWLockUnlock(lock); - - set->release(); - iter->release(); - - return true; + OSCollectionIterator * iter; + OSDictionary * dict; + OSOrderedSet * set; + OSArray * array; + const OSSymbol * key; + unsigned int idx; + + if (!matching) { + return false; + } + + set = OSOrderedSet::withCapacity(10, IOServiceOrdering, + (void *)gIOProbeScoreKey); + if (!set) { + return false; + } + + iter = OSCollectionIterator::withCollection(personalities); + if (!iter) { + set->release(); + return false; + } + + IORWLockRead(lock); + + while ((key = (const OSSymbol *) iter->getNextObject())) { + array = (OSArray *) personalities->getObject(key); + if (array) { + for (idx = 0; (dict = (OSDictionary *) array->getObject(idx)); idx++) { + /* This comparison must be done with only the keys in the + * "matching" dict to enable general matching. + */ + if (dict->isEqualTo(matching, matching)) { + set->setObject(dict); + } + } + } + } + + // Start device matching. + if (set->getCount() > 0) { + IOService::catalogNewDrivers(set); + generation++; + } + + IORWLockUnlock(lock); + + set->release(); + iter->release(); + + return true; } -void IOCatalogue::reset(void) +void +IOCatalogue::reset(void) { - IOCatalogue::resetAndAddDrivers(/* no drivers; true reset */ NULL, - /* doMatching */ false); - return; + IOCatalogue::resetAndAddDrivers(/* no drivers; true reset */ NULL, + /* doMatching */ false); + return; } -bool IOCatalogue::resetAndAddDrivers(OSArray * drivers, bool doNubMatching) +bool +IOCatalogue::resetAndAddDrivers(OSArray * drivers, bool doNubMatching) { - bool result = false; - OSArray * newPersonalities = NULL; // do not release - OSCollectionIterator * iter = NULL; // must release - OSOrderedSet * matchSet = NULL; // must release - const OSSymbol * key; - OSArray * array; - OSDictionary * thisNewPersonality = NULL; // do not release - OSDictionary * thisOldPersonality = NULL; // do not release - OSDictionary * myKexts = NULL; // must release - signed int idx, newIdx; - - if (drivers) { - newPersonalities = OSDynamicCast(OSArray, drivers); - if (!newPersonalities) { - goto finish; - } - } - matchSet = OSOrderedSet::withCapacity(10, IOServiceOrdering, - (void *)gIOProbeScoreKey); - if (!matchSet) { - goto finish; - } - iter = OSCollectionIterator::withCollection(personalities); - if (!iter) { - goto finish; - } - - /* need copy of loaded kexts so we can check if for loaded modules without - * taking the OSKext lock. There is a potential of deadlocking if we get - * an OSKext via the normal path. See 14672140. - */ - myKexts = OSKext::copyKexts(); - - result = true; - - IOLog("Resetting IOCatalogue.\n"); - - /* No goto finish from here to unlock. - */ - IORWLockWrite(lock); - - while ((key = (const OSSymbol *) iter->getNextObject())) - { - array = (OSArray *) personalities->getObject(key); - if (!array) continue; - - for (idx = 0; - (thisOldPersonality = (OSDictionary *) array->getObject(idx)); - idx++) - { - if (thisOldPersonality->getObject("KernelConfigTable")) continue; - thisNewPersonality = NULL; - - if (newPersonalities) { - for (newIdx = 0; - (thisNewPersonality = (OSDictionary *) newPersonalities->getObject(newIdx)); - newIdx++) - { - /* Unlike in other functions, this comparison must be exact! - * The catalogue must be able to contain personalities that - * are proper supersets of others. - * Do not compare just the properties present in one driver - * personality or the other. - */ - if (OSDynamicCast(OSDictionary, thisNewPersonality) == NULL) { - /* skip thisNewPersonality if it is not an OSDictionary */ - continue; - } - if (thisNewPersonality->isEqualTo(thisOldPersonality)) - break; - } - } - if (thisNewPersonality) { - // dup, ignore - newPersonalities->removeObject(newIdx); - } - else { - // not in new set - remove - // only remove dictionary if this module in not loaded - 9953845 - if ( isModuleLoadedNoOSKextLock(myKexts, thisOldPersonality) == false ) { - if (matchSet) { - matchSet->setObject(thisOldPersonality); - } - array->removeObject(idx); - idx--; - } - } - } // for... - } // while... - - // add new - if (newPersonalities) { - for (newIdx = 0; - (thisNewPersonality = (OSDictionary *) newPersonalities->getObject(newIdx)); - newIdx++) - { - if (OSDynamicCast(OSDictionary, thisNewPersonality) == NULL) { - /* skip thisNewPersonality if it is not an OSDictionary */ - continue; - } - - OSKext::uniquePersonalityProperties(thisNewPersonality); - addPersonality(thisNewPersonality); - matchSet->setObject(thisNewPersonality); - } - } - - /* Finally, start device matching on all new & removed personalities. - */ - if (result && doNubMatching && (matchSet->getCount() > 0)) { - IOService::catalogNewDrivers(matchSet); - generation++; - } - - IORWLockUnlock(lock); + bool result = false; + OSArray * newPersonalities = NULL;// do not release + OSCollectionIterator * iter = NULL;// must release + OSOrderedSet * matchSet = NULL;// must release + const OSSymbol * key; + OSArray * array; + OSDictionary * thisNewPersonality = NULL;// do not release + OSDictionary * thisOldPersonality = NULL;// do not release + OSDictionary * myKexts = NULL;// must release + signed int idx, newIdx; + + if (drivers) { + newPersonalities = OSDynamicCast(OSArray, drivers); + if (!newPersonalities) { + goto finish; + } + } + matchSet = OSOrderedSet::withCapacity(10, IOServiceOrdering, + (void *)gIOProbeScoreKey); + if (!matchSet) { + goto finish; + } + iter = OSCollectionIterator::withCollection(personalities); + if (!iter) { + goto finish; + } + + /* need copy of loaded kexts so we can check if for loaded modules without + * taking the OSKext lock. There is a potential of deadlocking if we get + * an OSKext via the normal path. See 14672140. + */ + myKexts = OSKext::copyKexts(); + + result = true; + + IOLog("Resetting IOCatalogue.\n"); + + /* No goto finish from here to unlock. + */ + IORWLockWrite(lock); + + while ((key = (const OSSymbol *) iter->getNextObject())) { + array = (OSArray *) personalities->getObject(key); + if (!array) { + continue; + } + + for (idx = 0; + (thisOldPersonality = (OSDictionary *) array->getObject(idx)); + idx++) { + if (thisOldPersonality->getObject("KernelConfigTable")) { + continue; + } + thisNewPersonality = NULL; + + if (newPersonalities) { + for (newIdx = 0; + (thisNewPersonality = (OSDictionary *) newPersonalities->getObject(newIdx)); + newIdx++) { + /* Unlike in other functions, this comparison must be exact! + * The catalogue must be able to contain personalities that + * are proper supersets of others. + * Do not compare just the properties present in one driver + * personality or the other. + */ + if (OSDynamicCast(OSDictionary, thisNewPersonality) == NULL) { + /* skip thisNewPersonality if it is not an OSDictionary */ + continue; + } + if (thisNewPersonality->isEqualTo(thisOldPersonality)) { + break; + } + } + } + if (thisNewPersonality) { + // dup, ignore + newPersonalities->removeObject(newIdx); + } else { + // not in new set - remove + // only remove dictionary if this module in not loaded - 9953845 + if (isModuleLoadedNoOSKextLock(myKexts, thisOldPersonality) == false) { + if (matchSet) { + matchSet->setObject(thisOldPersonality); + } + array->removeObject(idx); + idx--; + } + } + } // for... + } // while... + + // add new + if (newPersonalities) { + for (newIdx = 0; + (thisNewPersonality = (OSDictionary *) newPersonalities->getObject(newIdx)); + newIdx++) { + if (OSDynamicCast(OSDictionary, thisNewPersonality) == NULL) { + /* skip thisNewPersonality if it is not an OSDictionary */ + continue; + } + + OSKext::uniquePersonalityProperties(thisNewPersonality); + addPersonality(thisNewPersonality); + matchSet->setObject(thisNewPersonality); + } + } + + /* Finally, start device matching on all new & removed personalities. + */ + if (result && doNubMatching && (matchSet->getCount() > 0)) { + IOService::catalogNewDrivers(matchSet); + generation++; + } + + IORWLockUnlock(lock); finish: - if (matchSet) matchSet->release(); - if (iter) iter->release(); - if (myKexts) myKexts->release(); + if (matchSet) { + matchSet->release(); + } + if (iter) { + iter->release(); + } + if (myKexts) { + myKexts->release(); + } - return result; + return result; } -bool IOCatalogue::serialize(OSSerialize * s) const +bool +IOCatalogue::serialize(OSSerialize * s) const { - if ( !s ) - return false; + if (!s) { + return false; + } - return super::serialize(s); + return super::serialize(s); } -bool IOCatalogue::serializeData(IOOptionBits kind, OSSerialize * s) const +bool +IOCatalogue::serializeData(IOOptionBits kind, OSSerialize * s) const { - kern_return_t kr = kIOReturnSuccess; + kern_return_t kr = kIOReturnSuccess; - switch ( kind ) - { - case kIOCatalogGetContents: - kr = KERN_NOT_SUPPORTED; - break; + switch (kind) { + case kIOCatalogGetContents: + kr = KERN_NOT_SUPPORTED; + break; - case kIOCatalogGetModuleDemandList: - kr = KERN_NOT_SUPPORTED; - break; + case kIOCatalogGetModuleDemandList: + kr = KERN_NOT_SUPPORTED; + break; - case kIOCatalogGetCacheMissList: - kr = KERN_NOT_SUPPORTED; - break; + case kIOCatalogGetCacheMissList: + kr = KERN_NOT_SUPPORTED; + break; - case kIOCatalogGetROMMkextList: - kr = KERN_NOT_SUPPORTED; - break; + case kIOCatalogGetROMMkextList: + kr = KERN_NOT_SUPPORTED; + break; - default: - kr = kIOReturnBadArgument; - break; - } + default: + kr = kIOReturnBadArgument; + break; + } - return kr; + return kr; } -/* isModuleLoadedNoOSKextLock - used to check to see if a kext is loaded +/* isModuleLoadedNoOSKextLock - used to check to see if a kext is loaded * without taking the OSKext lock. We use this to avoid the problem * where taking the IOCatalog lock then the OSKext lock will dealock when * a kext load or unload is happening at the same time as IOCatalog changing. @@ -936,30 +987,31 @@ bool IOCatalogue::serializeData(IOOptionBits kind, OSSerialize * s) const * key set to the kext bundle ID and value set to an OSKext object * theModuleDict - is an IOKit personality dictionary for a given module (kext) */ -static bool isModuleLoadedNoOSKextLock(OSDictionary *theKexts, - OSDictionary *theModuleDict) +static bool +isModuleLoadedNoOSKextLock(OSDictionary *theKexts, + OSDictionary *theModuleDict) { - bool myResult = false; - const OSString * myBundleID = NULL; // do not release - OSKext * myKext = NULL; // do not release - - if (theKexts == NULL || theModuleDict == NULL) { - return( myResult ); - } - - // gIOModuleIdentifierKey is "CFBundleIdentifier" - myBundleID = OSDynamicCast(OSString, - theModuleDict->getObject(gIOModuleIdentifierKey)); - if (myBundleID == NULL) { - return( myResult ); - } - - myKext = OSDynamicCast(OSKext, theKexts->getObject(myBundleID->getCStringNoCopy())); - if (myKext) { - myResult = myKext->isLoaded(); - } - - return( myResult ); + bool myResult = false; + const OSString * myBundleID = NULL;// do not release + OSKext * myKext = NULL; // do not release + + if (theKexts == NULL || theModuleDict == NULL) { + return myResult; + } + + // gIOModuleIdentifierKey is "CFBundleIdentifier" + myBundleID = OSDynamicCast(OSString, + theModuleDict->getObject(gIOModuleIdentifierKey)); + if (myBundleID == NULL) { + return myResult; + } + + myKext = OSDynamicCast(OSKext, theKexts->getObject(myBundleID->getCStringNoCopy())); + if (myKext) { + myResult = myKext->isLoaded(); + } + + return myResult; } @@ -967,10 +1019,10 @@ static bool isModuleLoadedNoOSKextLock(OSDictionary *theKexts, #pragma mark Obsolete Kext Loading Stuff #endif /********************************************************************* -********************************************************************** -*** BINARY COMPATIBILITY SECTION *** -********************************************************************** -********************************************************************** -* These functions are no longer used are necessary for C++ binary -* compatibility on i386. -**********************************************************************/ + ********************************************************************** + *** BINARY COMPATIBILITY SECTION *** + ********************************************************************** + ********************************************************************** + * These functions are no longer used are necessary for C++ binary + * compatibility on i386. + **********************************************************************/ diff --git a/iokit/Kernel/IOCommand.cpp b/iokit/Kernel/IOCommand.cpp index ac80d9da0..b08f1eef5 100644 --- a/iokit/Kernel/IOCommand.cpp +++ b/iokit/Kernel/IOCommand.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,12 +46,13 @@ OSDefineMetaClassAndStructors(IOCommand, OSObject); // init - initialize our data structures //-------------------------------------------------------------------------- -bool IOCommand::init(void) +bool +IOCommand::init(void) { - if (super::init()) { - queue_init(&fCommandChain); - return true; - } - else - return false; + if (super::init()) { + queue_init(&fCommandChain); + return true; + } else { + return false; + } } diff --git a/iokit/Kernel/IOCommandGate.cpp b/iokit/Kernel/IOCommandGate.cpp index e69457efe..265b2f786 100644 --- a/iokit/Kernel/IOCommandGate.cpp +++ b/iokit/Kernel/IOCommandGate.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000, 2009-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -68,245 +68,266 @@ do { \ #endif /* IOKITSTATS */ -bool IOCommandGate::init(OSObject *inOwner, Action inAction) +bool +IOCommandGate::init(OSObject *inOwner, Action inAction) { - bool res = super::init(inOwner, (IOEventSource::Action) inAction); - if (res) { - IOStatisticsInitializeCounter(); - } + bool res = super::init(inOwner, (IOEventSource::Action) inAction); + if (res) { + IOStatisticsInitializeCounter(); + } - return res; + return res; } IOCommandGate * IOCommandGate::commandGate(OSObject *inOwner, Action inAction) { - IOCommandGate *me = new IOCommandGate; + IOCommandGate *me = new IOCommandGate; - if (me && !me->init(inOwner, inAction)) { - me->release(); - return 0; - } + if (me && !me->init(inOwner, inAction)) { + me->release(); + return 0; + } - return me; + return me; } -/* virtual */ void IOCommandGate::disable() +/* virtual */ void +IOCommandGate::disable() { - if (workLoop && !workLoop->inGate()) - OSReportWithBacktrace("IOCommandGate::disable() called when not gated"); + if (workLoop && !workLoop->inGate()) { + OSReportWithBacktrace("IOCommandGate::disable() called when not gated"); + } - super::disable(); + super::disable(); } -/* virtual */ void IOCommandGate::enable() +/* virtual */ void +IOCommandGate::enable() { - if (workLoop) { - closeGate(); - super::enable(); - wakeupGate(&enabled, /* oneThread */ false); // Unblock sleeping threads - openGate(); - } + if (workLoop) { + closeGate(); + super::enable(); + wakeupGate(&enabled, /* oneThread */ false); // Unblock sleeping threads + openGate(); + } } -/* virtual */ void IOCommandGate::free() +/* virtual */ void +IOCommandGate::free() { - if (workLoop) setWorkLoop(0); - super::free(); + if (workLoop) { + setWorkLoop(0); + } + super::free(); } -enum -{ - kSleepersRemoved = 0x00000001, - kSleepersWaitEnabled = 0x00000002, - kSleepersActions = 0x00000100, - kSleepersActionsMask = 0xffffff00, +enum{ + kSleepersRemoved = 0x00000001, + kSleepersWaitEnabled = 0x00000002, + kSleepersActions = 0x00000100, + kSleepersActionsMask = 0xffffff00, }; -/* virtual */ void IOCommandGate::setWorkLoop(IOWorkLoop *inWorkLoop) +/* virtual */ void +IOCommandGate::setWorkLoop(IOWorkLoop *inWorkLoop) { - IOWorkLoop * wl; - uintptr_t * sleepersP = (uintptr_t *) &reserved; - bool defer; - - if (!inWorkLoop && (wl = workLoop)) { // tearing down - wl->closeGate(); - *sleepersP |= kSleepersRemoved; - while (*sleepersP & kSleepersWaitEnabled) { - thread_wakeup_with_result(&enabled, THREAD_INTERRUPTED); - sleepGate(sleepersP, THREAD_UNINT); - } - *sleepersP &= ~kSleepersWaitEnabled; - defer = (0 != (kSleepersActionsMask & *sleepersP)); - if (!defer) - { - super::setWorkLoop(0); - *sleepersP &= ~kSleepersRemoved; + IOWorkLoop * wl; + uintptr_t * sleepersP = (uintptr_t *) &reserved; + bool defer; + + if (!inWorkLoop && (wl = workLoop)) { // tearing down + wl->closeGate(); + *sleepersP |= kSleepersRemoved; + while (*sleepersP & kSleepersWaitEnabled) { + thread_wakeup_with_result(&enabled, THREAD_INTERRUPTED); + sleepGate(sleepersP, THREAD_UNINT); + } + *sleepersP &= ~kSleepersWaitEnabled; + defer = (0 != (kSleepersActionsMask & *sleepersP)); + if (!defer) { + super::setWorkLoop(0); + *sleepersP &= ~kSleepersRemoved; + } + wl->openGate(); + return; } - wl->openGate(); - return; - } - super::setWorkLoop(inWorkLoop); + super::setWorkLoop(inWorkLoop); } -IOReturn IOCommandGate::runCommand(void *arg0, void *arg1, - void *arg2, void *arg3) +IOReturn +IOCommandGate::runCommand(void *arg0, void *arg1, + void *arg2, void *arg3) { - return runAction((Action) action, arg0, arg1, arg2, arg3); + return runAction((Action) action, arg0, arg1, arg2, arg3); } -IOReturn IOCommandGate::attemptCommand(void *arg0, void *arg1, - void *arg2, void *arg3) +IOReturn +IOCommandGate::attemptCommand(void *arg0, void *arg1, + void *arg2, void *arg3) { - return attemptAction((Action) action, arg0, arg1, arg2, arg3); + return attemptAction((Action) action, arg0, arg1, arg2, arg3); } -static IOReturn IOCommandGateActionToBlock(OSObject *owner, - void *arg0, void *arg1, - void *arg2, void *arg3) +static IOReturn +IOCommandGateActionToBlock(OSObject *owner, + void *arg0, void *arg1, + void *arg2, void *arg3) { - return ((IOEventSource::ActionBlock) arg0)(); + return ((IOEventSource::ActionBlock) arg0)(); } -IOReturn IOCommandGate::runActionBlock(ActionBlock action) +IOReturn +IOCommandGate::runActionBlock(ActionBlock action) { - return (runAction(&IOCommandGateActionToBlock, action)); + return runAction(&IOCommandGateActionToBlock, action); } -IOReturn IOCommandGate::runAction(Action inAction, - void *arg0, void *arg1, - void *arg2, void *arg3) +IOReturn +IOCommandGate::runAction(Action inAction, + void *arg0, void *arg1, + void *arg2, void *arg3) { - IOWorkLoop * wl; - uintptr_t * sleepersP; - - if (!inAction) - return kIOReturnBadArgument; - if (!(wl = workLoop)) - return kIOReturnNotReady; - - // closeGate is recursive needn't worry if we already hold the lock. - wl->closeGate(); - sleepersP = (uintptr_t *) &reserved; - - // If the command gate is disabled and we aren't on the workloop thread - // itself then sleep until we get enabled. - IOReturn res; - if (!wl->onThread()) - { - while (!enabled) - { - IOReturn sleepResult = kIOReturnSuccess; - if (workLoop) - { - *sleepersP |= kSleepersWaitEnabled; - sleepResult = wl->sleepGate(&enabled, THREAD_INTERRUPTIBLE); - *sleepersP &= ~kSleepersWaitEnabled; - } - bool wakeupTearDown = (!workLoop || (0 != (*sleepersP & kSleepersRemoved))); - if ((kIOReturnSuccess != sleepResult) || wakeupTearDown) { - wl->openGate(); + IOWorkLoop * wl; + uintptr_t * sleepersP; + + if (!inAction) { + return kIOReturnBadArgument; + } + if (!(wl = workLoop)) { + return kIOReturnNotReady; + } + + // closeGate is recursive needn't worry if we already hold the lock. + wl->closeGate(); + sleepersP = (uintptr_t *) &reserved; + + // If the command gate is disabled and we aren't on the workloop thread + // itself then sleep until we get enabled. + IOReturn res; + if (!wl->onThread()) { + while (!enabled) { + IOReturn sleepResult = kIOReturnSuccess; + if (workLoop) { + *sleepersP |= kSleepersWaitEnabled; + sleepResult = wl->sleepGate(&enabled, THREAD_INTERRUPTIBLE); + *sleepersP &= ~kSleepersWaitEnabled; + } + bool wakeupTearDown = (!workLoop || (0 != (*sleepersP & kSleepersRemoved))); + if ((kIOReturnSuccess != sleepResult) || wakeupTearDown) { + wl->openGate(); + + if (wakeupTearDown) { + wl->wakeupGate(sleepersP, false); // No further resources used + } + return kIOReturnAborted; + } + } + } + + bool trace = (gIOKitTrace & kIOTraceCommandGates) ? true : false; - if (wakeupTearDown) - wl->wakeupGate(sleepersP, false); // No further resources used + if (trace) { + IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION), + VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); + } + + IOStatisticsActionCall(); + + // Must be gated and on the work loop or enabled + + *sleepersP += kSleepersActions; + res = (*inAction)(owner, arg0, arg1, arg2, arg3); + *sleepersP -= kSleepersActions; - return kIOReturnAborted; - } + if (trace) { + IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION), + VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); } - } - - bool trace = ( gIOKitTrace & kIOTraceCommandGates ) ? true : false; - - if (trace) IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION), - VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); - - IOStatisticsActionCall(); - - // Must be gated and on the work loop or enabled - - *sleepersP += kSleepersActions; - res = (*inAction)(owner, arg0, arg1, arg2, arg3); - *sleepersP -= kSleepersActions; - - if (trace) IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION), - VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); - - if (kSleepersRemoved == ((kSleepersActionsMask|kSleepersRemoved) & *sleepersP)) - { - // no actions outstanding - *sleepersP &= ~kSleepersRemoved; - super::setWorkLoop(0); - } - - wl->openGate(); - - return res; + + if (kSleepersRemoved == ((kSleepersActionsMask | kSleepersRemoved) & *sleepersP)) { + // no actions outstanding + *sleepersP &= ~kSleepersRemoved; + super::setWorkLoop(0); + } + + wl->openGate(); + + return res; } -IOReturn IOCommandGate::attemptAction(Action inAction, - void *arg0, void *arg1, - void *arg2, void *arg3) +IOReturn +IOCommandGate::attemptAction(Action inAction, + void *arg0, void *arg1, + void *arg2, void *arg3) { - IOReturn res; - IOWorkLoop * wl; - - if (!inAction) - return kIOReturnBadArgument; - if (!(wl = workLoop)) - return kIOReturnNotReady; - - // Try to close the gate if can't get return immediately. - if (!wl->tryCloseGate()) - return kIOReturnCannotLock; - - // If the command gate is disabled then sleep until we get a wakeup - if (!wl->onThread() && !enabled) - res = kIOReturnNotPermitted; - else { - - bool trace = ( gIOKitTrace & kIOTraceCommandGates ) ? true : false; - - if (trace) - IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION), - VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); - - IOStatisticsActionCall(); - - res = (*inAction)(owner, arg0, arg1, arg2, arg3); - - if (trace) - IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION), - VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); - } - - wl->openGate(); - - return res; + IOReturn res; + IOWorkLoop * wl; + + if (!inAction) { + return kIOReturnBadArgument; + } + if (!(wl = workLoop)) { + return kIOReturnNotReady; + } + + // Try to close the gate if can't get return immediately. + if (!wl->tryCloseGate()) { + return kIOReturnCannotLock; + } + + // If the command gate is disabled then sleep until we get a wakeup + if (!wl->onThread() && !enabled) { + res = kIOReturnNotPermitted; + } else { + bool trace = (gIOKitTrace & kIOTraceCommandGates) ? true : false; + + if (trace) { + IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION), + VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); + } + + IOStatisticsActionCall(); + + res = (*inAction)(owner, arg0, arg1, arg2, arg3); + + if (trace) { + IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION), + VM_KERNEL_ADDRHIDE(inAction), VM_KERNEL_ADDRHIDE(owner)); + } + } + + wl->openGate(); + + return res; } -IOReturn IOCommandGate::commandSleep(void *event, UInt32 interruptible) +IOReturn +IOCommandGate::commandSleep(void *event, UInt32 interruptible) { - if (!workLoop->inGate()) { - /* The equivalent of 'msleep' while not holding the mutex is invalid */ - panic("invalid commandSleep while not holding the gate"); - } + if (!workLoop->inGate()) { + /* The equivalent of 'msleep' while not holding the mutex is invalid */ + panic("invalid commandSleep while not holding the gate"); + } - return sleepGate(event, interruptible); + return sleepGate(event, interruptible); } -IOReturn IOCommandGate::commandSleep(void *event, AbsoluteTime deadline, UInt32 interruptible) +IOReturn +IOCommandGate::commandSleep(void *event, AbsoluteTime deadline, UInt32 interruptible) { - if (!workLoop->inGate()) { - /* The equivalent of 'msleep' while not holding the mutex is invalid */ - panic("invalid commandSleep while not holding the gate"); - } + if (!workLoop->inGate()) { + /* The equivalent of 'msleep' while not holding the mutex is invalid */ + panic("invalid commandSleep while not holding the gate"); + } - return sleepGate(event, deadline, interruptible); + return sleepGate(event, deadline, interruptible); } -void IOCommandGate::commandWakeup(void *event, bool oneThread) +void +IOCommandGate::commandWakeup(void *event, bool oneThread) { - wakeupGate(event, oneThread); + wakeupGate(event, oneThread); } diff --git a/iokit/Kernel/IOCommandPool.cpp b/iokit/Kernel/IOCommandPool.cpp index 2a3b6e71b..d61f37fc3 100644 --- a/iokit/Kernel/IOCommandPool.cpp +++ b/iokit/Kernel/IOCommandPool.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,62 +54,69 @@ OSMetaClassDefineReservedUnused(IOCommandPool, 7); // withWorkLoop - primary initializer and factory method //-------------------------------------------------------------------------- -IOCommandPool *IOCommandPool:: +IOCommandPool * +IOCommandPool:: withWorkLoop(IOWorkLoop *inWorkLoop) { - IOCommandPool * me = new IOCommandPool; - - if (me && !me->initWithWorkLoop(inWorkLoop)) { - me->release(); - return 0; - } - - return me; + IOCommandPool * me = new IOCommandPool; + + if (me && !me->initWithWorkLoop(inWorkLoop)) { + me->release(); + return 0; + } + + return me; } -bool IOCommandPool:: +bool +IOCommandPool:: initWithWorkLoop(IOWorkLoop *inWorkLoop) { - assert(inWorkLoop); - - if (!super::init()) - return false; - - queue_init(&fQueueHead); - - fSerializer = IOCommandGate::commandGate(this); - assert(fSerializer); - if (!fSerializer) - return false; - - if (kIOReturnSuccess != inWorkLoop->addEventSource(fSerializer)) - return false; - - return true; + assert(inWorkLoop); + + if (!super::init()) { + return false; + } + + queue_init(&fQueueHead); + + fSerializer = IOCommandGate::commandGate(this); + assert(fSerializer); + if (!fSerializer) { + return false; + } + + if (kIOReturnSuccess != inWorkLoop->addEventSource(fSerializer)) { + return false; + } + + return true; } //-------------------------------------------------------------------------- // commandPool & init - obsolete initializer and factory method //-------------------------------------------------------------------------- -IOCommandPool *IOCommandPool:: +IOCommandPool * +IOCommandPool:: commandPool(IOService * inOwner, IOWorkLoop *inWorkLoop, UInt32 inSize) { - IOCommandPool * me = new IOCommandPool; - - if (me && !me->init(inOwner, inWorkLoop, inSize)) { - me->release(); - return 0; - } - - return me; + IOCommandPool * me = new IOCommandPool; + + if (me && !me->init(inOwner, inWorkLoop, inSize)) { + me->release(); + return 0; + } + + return me; } -bool IOCommandPool:: +bool +IOCommandPool:: init(IOService */* inOwner */, IOWorkLoop *inWorkLoop, UInt32 /* inSize */) { - return initWithWorkLoop(inWorkLoop); + return initWithWorkLoop(inWorkLoop); } @@ -120,18 +127,19 @@ init(IOService */* inOwner */, IOWorkLoop *inWorkLoop, UInt32 /* inSize */) void IOCommandPool::free(void) { - if (fSerializer) { - // remove our event source from owner's workloop - IOWorkLoop *wl = fSerializer->getWorkLoop(); - if (wl) - wl->removeEventSource(fSerializer); - - fSerializer->release(); - fSerializer = 0; - } - - // Tell our superclass to cleanup too - super::free(); + if (fSerializer) { + // remove our event source from owner's workloop + IOWorkLoop *wl = fSerializer->getWorkLoop(); + if (wl) { + wl->removeEventSource(fSerializer); + } + + fSerializer->release(); + fSerializer = 0; + } + + // Tell our superclass to cleanup too + super::free(); } @@ -144,17 +152,18 @@ IOCommandPool::free(void) IOCommand * IOCommandPool::getCommand(bool blockForCommand) { - IOReturn result = kIOReturnSuccess; - IOCommand *command = 0; - - IOCommandGate::Action func = OSMemberFunctionCast( - IOCommandGate::Action, this, &IOCommandPool::gatedGetCommand); - result = fSerializer-> - runAction(func, (void *) &command, (void *) blockForCommand); - if (kIOReturnSuccess == result) - return command; - else - return 0; + IOReturn result = kIOReturnSuccess; + IOCommand *command = 0; + + IOCommandGate::Action func = OSMemberFunctionCast( + IOCommandGate::Action, this, &IOCommandPool::gatedGetCommand); + result = fSerializer-> + runAction(func, (void *) &command, (void *) blockForCommand); + if (kIOReturnSuccess == result) { + return command; + } else { + return 0; + } } @@ -163,20 +172,22 @@ IOCommandPool::getCommand(bool blockForCommand) // (on safe side of command gate) //-------------------------------------------------------------------------- -IOReturn IOCommandPool:: +IOReturn +IOCommandPool:: gatedGetCommand(IOCommand **command, bool blockForCommand) { - while (queue_empty(&fQueueHead)) { - if (!blockForCommand) - return kIOReturnNoResources; - - fSleepers++; - fSerializer->commandSleep(&fSleepers, THREAD_UNINT); - } - - queue_remove_first(&fQueueHead, - *command, IOCommand *, fCommandChain); - return kIOReturnSuccess; + while (queue_empty(&fQueueHead)) { + if (!blockForCommand) { + return kIOReturnNoResources; + } + + fSleepers++; + fSerializer->commandSleep(&fSleepers, THREAD_UNINT); + } + + queue_remove_first(&fQueueHead, + *command, IOCommand *, fCommandChain); + return kIOReturnSuccess; } @@ -184,27 +195,29 @@ gatedGetCommand(IOCommand **command, bool blockForCommand) // returnCommand - Returns command to the pool. //-------------------------------------------------------------------------- -void IOCommandPool:: +void +IOCommandPool:: returnCommand(IOCommand *command) { - IOCommandGate::Action func = OSMemberFunctionCast( - IOCommandGate::Action, this, &IOCommandPool::gatedReturnCommand); - (void) fSerializer->runAction(func, (void *) command); + IOCommandGate::Action func = OSMemberFunctionCast( + IOCommandGate::Action, this, &IOCommandPool::gatedReturnCommand); + (void) fSerializer->runAction(func, (void *) command); } //-------------------------------------------------------------------------- // gatedReturnCommand - Callthrough function -// (on safe side of command gate) +// (on safe side of command gate) //-------------------------------------------------------------------------- -IOReturn IOCommandPool:: +IOReturn +IOCommandPool:: gatedReturnCommand(IOCommand *command) { - queue_enter_first(&fQueueHead, command, IOCommand *, fCommandChain); - if (fSleepers) { - fSerializer->commandWakeup(&fSleepers, /* oneThread */ true); - fSleepers--; - } - return kIOReturnSuccess; + queue_enter_first(&fQueueHead, command, IOCommand *, fCommandChain); + if (fSleepers) { + fSerializer->commandWakeup(&fSleepers, /* oneThread */ true); + fSleepers--; + } + return kIOReturnSuccess; } diff --git a/iokit/Kernel/IOCommandQueue.cpp b/iokit/Kernel/IOCommandQueue.cpp index 532cd57c0..08ba843c9 100644 --- a/iokit/Kernel/IOCommandQueue.cpp +++ b/iokit/Kernel/IOCommandQueue.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -50,9 +50,9 @@ #endif /* IOKITSTATS */ -#define NUM_FIELDS_IN_COMMAND 4 +#define NUM_FIELDS_IN_COMMAND 4 typedef struct commandEntryTag { - void *f[NUM_FIELDS_IN_COMMAND]; + void *f[NUM_FIELDS_IN_COMMAND]; } commandEntryT; #define super IOEventSource @@ -60,238 +60,260 @@ typedef struct commandEntryTag { OSDefineMetaClassAndStructors(IOCommandQueue, IOEventSource) /*[ -Instance Methods - -initWithNext:owner:action:size: - - initWithNext: (IOEventSource *) inNext - owner: (id) inOwner - action: (SEL) inAction - size: (int) inSize; - -Primary initialiser for the IOCommandQueue class. Returns an -IOCommandQueue object that is initialised with the next object in -the chain and the owner and action. On return the signalWorkAvailableIMP -has been cached for this function. - -If the object fails to initialise for some reason then [self free] will -be called and nil will be returned. - -See also: initWithNext:owner:action:(IOEventSource) -]*/ -bool IOCommandQueue::init(OSObject *inOwner, - IOCommandQueueAction inAction, - int inSize) + * Instance Methods + * + * initWithNext:owner:action:size: + * - initWithNext: (IOEventSource *) inNext + * owner: (id) inOwner + * action: (SEL) inAction + * size: (int) inSize; + * + * Primary initialiser for the IOCommandQueue class. Returns an + * IOCommandQueue object that is initialised with the next object in + * the chain and the owner and action. On return the signalWorkAvailableIMP + * has been cached for this function. + * + * If the object fails to initialise for some reason then [self free] will + * be called and nil will be returned. + * + * See also: initWithNext:owner:action:(IOEventSource) + * ]*/ +bool +IOCommandQueue::init(OSObject *inOwner, + IOCommandQueueAction inAction, + int inSize) { - if ( !super::init(inOwner, (IOEventSourceAction) inAction) ) - return false; - - if (KERN_SUCCESS - != semaphore_create(kernel_task, &producerSema, SYNC_POLICY_FIFO, inSize)) - return false; + if (!super::init(inOwner, (IOEventSourceAction) inAction)) { + return false; + } - size = inSize + 1; /* Allocate one more entry than needed */ + if (KERN_SUCCESS + != semaphore_create(kernel_task, &producerSema, SYNC_POLICY_FIFO, inSize)) { + return false; + } - queue = (void *)kalloc(size * sizeof(commandEntryT)); - if (!queue) - return false; + size = inSize + 1; /* Allocate one more entry than needed */ - producerLock = IOLockAlloc(); - if (!producerLock) - return false; + queue = (void *)kalloc(size * sizeof(commandEntryT)); + if (!queue) { + return false; + } - producerIndex = consumerIndex = 0; + producerLock = IOLockAlloc(); + if (!producerLock) { + return false; + } - IOStatisticsInitializeCounter(); + producerIndex = consumerIndex = 0; - return true; + IOStatisticsInitializeCounter(); + + return true; } IOCommandQueue * IOCommandQueue::commandQueue(OSObject *inOwner, - IOCommandQueueAction inAction, - int inSize) + IOCommandQueueAction inAction, + int inSize) { - IOCommandQueue *me = new IOCommandQueue; + IOCommandQueue *me = new IOCommandQueue; - if (me && !me->init(inOwner, inAction, inSize)) { - me->free(); - return 0; - } + if (me && !me->init(inOwner, inAction, inSize)) { + me->free(); + return 0; + } - return me; + return me; } /*[ -free - - free - -Mandatory free of the object independent of the current retain count. -Returns nil. -]*/ -void IOCommandQueue::free() + * free + * - free + * + * Mandatory free of the object independent of the current retain count. + * Returns nil. + * ]*/ +void +IOCommandQueue::free() { - if (queue) - kfree(queue, size * sizeof(commandEntryT)); - if (producerSema) - semaphore_destroy(kernel_task, producerSema); - if (producerLock) - IOLockFree(producerLock); - - super::free(); + if (queue) { + kfree(queue, size * sizeof(commandEntryT)); + } + if (producerSema) { + semaphore_destroy(kernel_task, producerSema); + } + if (producerLock) { + IOLockFree(producerLock); + } + + super::free(); } #if NUM_FIELDS_IN_COMMAND != 4 #error IOCommandQueue::checkForWork needs to be updated for new command size #endif -bool IOCommandQueue::checkForWork() +bool +IOCommandQueue::checkForWork() { - void *field0, *field1, *field2, *field3; - bool trace = ( gIOKitTrace & kIOTraceCommandGates ) ? true : false; + void *field0, *field1, *field2, *field3; + bool trace = (gIOKitTrace & kIOTraceCommandGates) ? true : false; - if (!enabled || consumerIndex == producerIndex) - return false; + if (!enabled || consumerIndex == producerIndex) { + return false; + } - { - commandEntryT *q = (commandEntryT *) queue; - int localIndex = consumerIndex; + { + commandEntryT *q = (commandEntryT *) queue; + int localIndex = consumerIndex; - field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1]; - field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3]; - semaphore_signal(producerSema); - } + field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1]; + field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3]; + semaphore_signal(producerSema); + } - if (++consumerIndex >= size) - consumerIndex = 0; + if (++consumerIndex >= size) { + consumerIndex = 0; + } - if (trace) + if (trace) { IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION), - VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner)); - - IOStatisticsActionCall(); - (*(IOCommandQueueAction) action)(owner, field0, field1, field2, field3); - - if (trace) - IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION), - VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner)); - - return (consumerIndex != producerIndex); -} + VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner)); + } -/*[ -enqueueSleep:command: - - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep - field0: (void *) field0 field1: (void *) field1 - field2: (void *) field2 field3: (void *) field3; + IOStatisticsActionCall(); + (*(IOCommandQueueAction) action)(owner, field0, field1, field2, field3); -Key method that enqueues the four input fields onto the command queue -and calls signalWorkAvailable to indicate that work is available to the -consumer. This routine is safe against multiple threaded producers. + if (trace) { + IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION), + VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner)); + } -A family of convenience functions have been provided to assist with the -enqueueing of an method selector and an integer tag. This relies on the -IODevice rawCommandOccurred... command to forward on the requests. + return consumerIndex != producerIndex; +} -See also: signalWorkAvailable, checkForWork -]*/ +/*[ + * enqueueSleep:command: + * - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep + * field0: (void *) field0 field1: (void *) field1 + * field2: (void *) field2 field3: (void *) field3; + * + * Key method that enqueues the four input fields onto the command queue + * and calls signalWorkAvailable to indicate that work is available to the + * consumer. This routine is safe against multiple threaded producers. + * + * A family of convenience functions have been provided to assist with the + * enqueueing of an method selector and an integer tag. This relies on the + * IODevice rawCommandOccurred... command to forward on the requests. + * + * See also: signalWorkAvailable, checkForWork + * ]*/ #if NUM_FIELDS_IN_COMMAND != 4 #error IOCommandQueue::enqueueCommand needs to be updated #endif kern_return_t IOCommandQueue::enqueueCommand(bool gotoSleep, - void *field0, void *field1, - void *field2, void *field3) + void *field0, void *field1, + void *field2, void *field3) { - kern_return_t rtn = KERN_SUCCESS; - int retry; - - /* Make sure there is room in the queue before doing anything else */ - - if (gotoSleep) { - retry = 0; - do - rtn = semaphore_wait(producerSema); - while( (KERN_SUCCESS != rtn) - && (KERN_OPERATION_TIMED_OUT != rtn) - && (KERN_SEMAPHORE_DESTROYED != rtn) - && (KERN_TERMINATED != rtn) - && ((retry++) < 4)); - } else - rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO); - - if (KERN_SUCCESS != rtn) - return rtn; - - /* Block other producers */ - IOTakeLock(producerLock); - - /* - * Make sure that we update the current producer entry before we - * increment the producer pointer. This avoids a nasty race as the - * test for work is producerIndex != consumerIndex and a signal. - */ - { - commandEntryT *q = (commandEntryT *) queue; - int localIndex = producerIndex; - - q[localIndex].f[0] = field0; q[localIndex].f[1] = field1; - q[localIndex].f[2] = field2; q[localIndex].f[3] = field3; - } - if (++producerIndex >= size) - producerIndex = 0; - - /* Clear to allow other producers to go now */ - IOUnlock(producerLock); - - /* - * Right we have created some new work, we had better make sure that - * we notify the work loop that it has to test producerIndex. - */ - signalWorkAvailable(); - return rtn; + kern_return_t rtn = KERN_SUCCESS; + int retry; + + /* Make sure there is room in the queue before doing anything else */ + + if (gotoSleep) { + retry = 0; + do{ + rtn = semaphore_wait(producerSema); + } while ((KERN_SUCCESS != rtn) + && (KERN_OPERATION_TIMED_OUT != rtn) + && (KERN_SEMAPHORE_DESTROYED != rtn) + && (KERN_TERMINATED != rtn) + && ((retry++) < 4)); + } else { + rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO); + } + + if (KERN_SUCCESS != rtn) { + return rtn; + } + + /* Block other producers */ + IOTakeLock(producerLock); + + /* + * Make sure that we update the current producer entry before we + * increment the producer pointer. This avoids a nasty race as the + * test for work is producerIndex != consumerIndex and a signal. + */ + { + commandEntryT *q = (commandEntryT *) queue; + int localIndex = producerIndex; + + q[localIndex].f[0] = field0; q[localIndex].f[1] = field1; + q[localIndex].f[2] = field2; q[localIndex].f[3] = field3; + } + if (++producerIndex >= size) { + producerIndex = 0; + } + + /* Clear to allow other producers to go now */ + IOUnlock(producerLock); + + /* + * Right we have created some new work, we had better make sure that + * we notify the work loop that it has to test producerIndex. + */ + signalWorkAvailable(); + return rtn; } -int IOCommandQueue::performAndFlush(OSObject *target, - IOCommandQueueAction inAction) +int +IOCommandQueue::performAndFlush(OSObject *target, + IOCommandQueueAction inAction) { - int numEntries; - kern_return_t rtn; - - // Set the defaults if necessary - if (!target) - target = owner; - if (!inAction) - inAction = (IOCommandQueueAction) action; - - // Lock out the producers first - do { - rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO); - } while (rtn == KERN_SUCCESS); - - // now step over all remaining entries in the command queue - for (numEntries = 0; consumerIndex != producerIndex; ) { - void *field0, *field1, *field2, *field3; - - { - commandEntryT *q = (commandEntryT *) queue; - int localIndex = consumerIndex; - - field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1]; - field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3]; - } - - if (++consumerIndex >= size) - consumerIndex = 0; - - (*inAction)(target, field0, field1, field2, field3); - } - - // finally refill the producer semaphore to size - 1 - for (int i = 1; i < size; i++) - semaphore_signal(producerSema); - - return numEntries; + int numEntries; + kern_return_t rtn; + + // Set the defaults if necessary + if (!target) { + target = owner; + } + if (!inAction) { + inAction = (IOCommandQueueAction) action; + } + + // Lock out the producers first + do { + rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO); + } while (rtn == KERN_SUCCESS); + + // now step over all remaining entries in the command queue + for (numEntries = 0; consumerIndex != producerIndex;) { + void *field0, *field1, *field2, *field3; + + { + commandEntryT *q = (commandEntryT *) queue; + int localIndex = consumerIndex; + + field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1]; + field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3]; + } + + if (++consumerIndex >= size) { + consumerIndex = 0; + } + + (*inAction)(target, field0, field1, field2, field3); + } + + // finally refill the producer semaphore to size - 1 + for (int i = 1; i < size; i++) { + semaphore_signal(producerSema); + } + + return numEntries; } #endif /* !defined(__LP64__) */ diff --git a/iokit/Kernel/IOConditionLock.cpp b/iokit/Kernel/IOConditionLock.cpp index 9e2bd04cb..655a150a4 100644 --- a/iokit/Kernel/IOConditionLock.cpp +++ b/iokit/Kernel/IOConditionLock.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,19 +22,19 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1997 Apple Computer, Inc. All rights reserved. - * Copyright (c) 1994-1996 NeXT Software, Inc. All rights reserved. + * Copyright (c) 1994-1996 NeXT Software, Inc. All rights reserved. * - * AppleIOPSSafeCondLock.m. Lock object with exported condition variable, + * AppleIOPSSafeCondLock.m. Lock object with exported condition variable, * kernel version. * * HISTORY * 1997-11- * 01-Aug-91 Doug Mitchell at NeXT - * Created. + * Created. */ #include @@ -42,159 +42,173 @@ #define super OSObject OSDefineMetaClassAndStructors(IOConditionLock, OSObject) -bool IOConditionLock::initWithCondition(int inCondition, bool inIntr) +bool +IOConditionLock::initWithCondition(int inCondition, bool inIntr) { - if (!super::init()) - return false; + if (!super::init()) { + return false; + } - cond_interlock = IOLockAlloc(); - sleep_interlock = IOLockAlloc(); + cond_interlock = IOLockAlloc(); + sleep_interlock = IOLockAlloc(); - condition = inCondition; - want_lock = false; - waiting = false; - interruptible = (inIntr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT; + condition = inCondition; + want_lock = false; + waiting = false; + interruptible = (inIntr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT; - return cond_interlock && sleep_interlock; + return cond_interlock && sleep_interlock; } -IOConditionLock *IOConditionLock::withCondition(int condition, bool intr) +IOConditionLock * +IOConditionLock::withCondition(int condition, bool intr) { - IOConditionLock *me = new IOConditionLock; + IOConditionLock *me = new IOConditionLock; - if (me && !me->initWithCondition(condition, intr)) { - me->release(); - return 0; - } + if (me && !me->initWithCondition(condition, intr)) { + me->release(); + return 0; + } - return me; + return me; } -void IOConditionLock::free() +void +IOConditionLock::free() { - if (cond_interlock) - IOLockFree(cond_interlock); - if (sleep_interlock) - IOLockFree(sleep_interlock); - super::free(); + if (cond_interlock) { + IOLockFree(cond_interlock); + } + if (sleep_interlock) { + IOLockFree(sleep_interlock); + } + super::free(); } -bool IOConditionLock::getInterruptible() const +bool +IOConditionLock::getInterruptible() const { - return interruptible; + return interruptible; } -int IOConditionLock:: getCondition() const +int +IOConditionLock:: getCondition() const { - return condition; + return condition; } -int IOConditionLock:: setCondition(int inCondition) +int +IOConditionLock:: setCondition(int inCondition) { - int old = condition; + int old = condition; - condition = inCondition; - thread_wakeup_one((void *) &condition); + condition = inCondition; + thread_wakeup_one((void *) &condition); - return old; + return old; } -void IOConditionLock::unlock() +void +IOConditionLock::unlock() { - IOTakeLock(sleep_interlock); + IOTakeLock(sleep_interlock); - thread_wakeup_one((void *) &condition); + thread_wakeup_one((void *) &condition); - want_lock = false; - if (waiting) { - waiting = false; - IOLockWakeup(sleep_interlock, this, /* one-thread */ false); // Wakeup everybody - } + want_lock = false; + if (waiting) { + waiting = false; + IOLockWakeup(sleep_interlock, this, /* one-thread */ false); // Wakeup everybody + } - IOUnlock(sleep_interlock); + IOUnlock(sleep_interlock); } -void IOConditionLock::unlockWith(int inCondition) +void +IOConditionLock::unlockWith(int inCondition) { - IOTakeLock(sleep_interlock); - IOTakeLock(cond_interlock); - - condition = inCondition; + IOTakeLock(sleep_interlock); + IOTakeLock(cond_interlock); + + condition = inCondition; - IOUnlock(cond_interlock); - IOUnlock(sleep_interlock); + IOUnlock(cond_interlock); + IOUnlock(sleep_interlock); - unlock(); + unlock(); } -bool IOConditionLock::tryLock() +bool +IOConditionLock::tryLock() { - bool result; + bool result; - IOTakeLock(sleep_interlock); + IOTakeLock(sleep_interlock); - result = !want_lock; - if (result) - want_lock = true; + result = !want_lock; + if (result) { + want_lock = true; + } - IOUnlock(sleep_interlock); + IOUnlock(sleep_interlock); - return result; + return result; } -int IOConditionLock::lock() +int +IOConditionLock::lock() { - int thread_res = THREAD_AWAKENED; - - IOTakeLock(sleep_interlock); - - /* Try to acquire the want_lock bit. */ - while (want_lock && thread_res == THREAD_AWAKENED) - { - waiting = true; - thread_res = IOLockSleep(sleep_interlock, (void *) this, interruptible); - } - if (thread_res == THREAD_AWAKENED) - want_lock = true; - - IOUnlock(sleep_interlock); - - return thread_res; + int thread_res = THREAD_AWAKENED; + + IOTakeLock(sleep_interlock); + + /* Try to acquire the want_lock bit. */ + while (want_lock && thread_res == THREAD_AWAKENED) { + waiting = true; + thread_res = IOLockSleep(sleep_interlock, (void *) this, interruptible); + } + if (thread_res == THREAD_AWAKENED) { + want_lock = true; + } + + IOUnlock(sleep_interlock); + + return thread_res; } -int IOConditionLock::lockWhen(int inCondition) +int +IOConditionLock::lockWhen(int inCondition) { - int thread_res; - - do - { - /* First get the actual lock */ - thread_res = lock(); - if (thread_res != THREAD_AWAKENED) - break; // Failed to acquire lock - - if (inCondition == condition) - break; // Hold lock and condition is expected value - - /* - * Need to hold a IOTakeLock when we call thread_sleep(). - * Both _cond_interlock and want_lock must be held to - * change _condition. - */ - IOTakeLock(cond_interlock); - unlock(); // Release lock and sleep - - /* - * this is the critical section on a multi in which - * another thread could hold _sleep_interlock, but they - * can't change _condition. Holding _cond_interlock here - * (until after assert_wait() is called from - * thread_sleep()) ensures that we'll be notified - * of changes in _condition. - */ - assert_wait((void *) &condition, interruptible); /* assert event */ - IOUnlock(cond_interlock); /* release the lock */ - thread_res = thread_block(THREAD_CONTINUE_NULL); /* block ourselves */ - } while (thread_res == THREAD_AWAKENED); - - return thread_res; + int thread_res; + + do{ + /* First get the actual lock */ + thread_res = lock(); + if (thread_res != THREAD_AWAKENED) { + break; // Failed to acquire lock + } + if (inCondition == condition) { + break; // Hold lock and condition is expected value + } + /* + * Need to hold a IOTakeLock when we call thread_sleep(). + * Both _cond_interlock and want_lock must be held to + * change _condition. + */ + IOTakeLock(cond_interlock); + unlock(); // Release lock and sleep + + /* + * this is the critical section on a multi in which + * another thread could hold _sleep_interlock, but they + * can't change _condition. Holding _cond_interlock here + * (until after assert_wait() is called from + * thread_sleep()) ensures that we'll be notified + * of changes in _condition. + */ + assert_wait((void *) &condition, interruptible); /* assert event */ + IOUnlock(cond_interlock); /* release the lock */ + thread_res = thread_block(THREAD_CONTINUE_NULL); /* block ourselves */ + } while (thread_res == THREAD_AWAKENED); + + return thread_res; } diff --git a/iokit/Kernel/IODMACommand.cpp b/iokit/Kernel/IODMACommand.cpp index ee6642b22..b5383d481 100644 --- a/iokit/Kernel/IODMACommand.cpp +++ b/iokit/Kernel/IODMACommand.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -41,19 +41,18 @@ #include "IOKitKernelInternal.h" -#define MAPTYPE(type) ((UInt) (type) & kTypeMask) -#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) - -enum -{ - kWalkSyncIn = 0x01, // bounce -> md - kWalkSyncOut = 0x02, // bounce <- md - kWalkSyncAlways = 0x04, - kWalkPreflight = 0x08, - kWalkDoubleBuffer = 0x10, - kWalkPrepare = 0x20, - kWalkComplete = 0x40, - kWalkClient = 0x80 +#define MAPTYPE(type) ((UInt) (type) & kTypeMask) +#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) + +enum{ + kWalkSyncIn = 0x01,// bounce -> md + kWalkSyncOut = 0x02,// bounce <- md + kWalkSyncAlways = 0x04, + kWalkPreflight = 0x08, + kWalkDoubleBuffer = 0x10, + kWalkPrepare = 0x20, + kWalkComplete = 0x40, + kWalkClient = 0x80 }; @@ -64,20 +63,20 @@ enum #if 1 // no direction => OutIn -#define SHOULD_COPY_DIR(op, direction) \ - ((kIODirectionNone == (direction)) \ - || (kWalkSyncAlways & (op)) \ +#define SHOULD_COPY_DIR(op, direction) \ + ((kIODirectionNone == (direction)) \ + || (kWalkSyncAlways & (op)) \ || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \ - & (direction))) + & (direction))) #else #define SHOULD_COPY_DIR(state, direction) (true) #endif #if 0 -#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } +#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } #else -#define DEBG(fmt, args...) {} +#define DEBG(fmt, args...) {} #endif /**************************** class IODMACommand ***************************/ @@ -86,16 +85,16 @@ enum #define super IOCommand OSDefineMetaClassAndStructors(IODMACommand, IOCommand); -OSMetaClassDefineReservedUsed(IODMACommand, 0); -OSMetaClassDefineReservedUsed(IODMACommand, 1); -OSMetaClassDefineReservedUsed(IODMACommand, 2); -OSMetaClassDefineReservedUsed(IODMACommand, 3); -OSMetaClassDefineReservedUsed(IODMACommand, 4); -OSMetaClassDefineReservedUsed(IODMACommand, 5); -OSMetaClassDefineReservedUsed(IODMACommand, 6); -OSMetaClassDefineReservedUnused(IODMACommand, 7); -OSMetaClassDefineReservedUnused(IODMACommand, 8); -OSMetaClassDefineReservedUnused(IODMACommand, 9); +OSMetaClassDefineReservedUsed(IODMACommand, 0); +OSMetaClassDefineReservedUsed(IODMACommand, 1); +OSMetaClassDefineReservedUsed(IODMACommand, 2); +OSMetaClassDefineReservedUsed(IODMACommand, 3); +OSMetaClassDefineReservedUsed(IODMACommand, 4); +OSMetaClassDefineReservedUsed(IODMACommand, 5); +OSMetaClassDefineReservedUsed(IODMACommand, 6); +OSMetaClassDefineReservedUnused(IODMACommand, 7); +OSMetaClassDefineReservedUnused(IODMACommand, 8); +OSMetaClassDefineReservedUnused(IODMACommand, 9); OSMetaClassDefineReservedUnused(IODMACommand, 10); OSMetaClassDefineReservedUnused(IODMACommand, 11); OSMetaClassDefineReservedUnused(IODMACommand, 12); @@ -106,76 +105,73 @@ OSMetaClassDefineReservedUnused(IODMACommand, 15); IODMACommand * IODMACommand::withRefCon(void * refCon) { - IODMACommand * me = new IODMACommand; + IODMACommand * me = new IODMACommand; - if (me && !me->initWithRefCon(refCon)) - { - me->release(); - return 0; - } + if (me && !me->initWithRefCon(refCon)) { + me->release(); + return 0; + } - return me; + return me; } IODMACommand * IODMACommand::withSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper, - void * refCon) + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + void * refCon) { - IODMACommand * me = new IODMACommand; + IODMACommand * me = new IODMACommand; - if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, - mapper, refCon)) - { - me->release(); - return 0; - } + if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, + mapper, refCon)) { + me->release(); + return 0; + } - return me; + return me; } IODMACommand * IODMACommand::withSpecification(SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mappingOptions, - UInt64 maxTransferSize, - UInt32 alignment, - IOMapper *mapper, - void *refCon) + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mappingOptions, + UInt64 maxTransferSize, + UInt32 alignment, + IOMapper *mapper, + void *refCon) { - IODMACommand * me = new IODMACommand; - - if (me && !me->initWithSpecification(outSegFunc, - numAddressBits, maxSegmentSize, - mappingOptions, maxTransferSize, - alignment, mapper, refCon)) - { - me->release(); - return 0; - } - - return me; + IODMACommand * me = new IODMACommand; + + if (me && !me->initWithSpecification(outSegFunc, + numAddressBits, maxSegmentSize, + mappingOptions, maxTransferSize, + alignment, mapper, refCon)) { + me->release(); + return 0; + } + + return me; } IODMACommand * IODMACommand::cloneCommand(void *refCon) { - SegmentOptions segmentOptions = - { - .fStructSize = sizeof(segmentOptions), - .fNumAddressBits = (uint8_t)fNumAddressBits, - .fMaxSegmentSize = fMaxSegmentSize, - .fMaxTransferSize = fMaxTransferSize, - .fAlignment = fAlignMask + 1, - .fAlignmentLength = fAlignMaskInternalSegments + 1, - .fAlignmentInternalSegments = fAlignMaskLength + 1 - }; - - return (IODMACommand::withSpecification(fOutSeg, &segmentOptions, - fMappingOptions, fMapper, refCon)); + SegmentOptions segmentOptions = + { + .fStructSize = sizeof(segmentOptions), + .fNumAddressBits = (uint8_t)fNumAddressBits, + .fMaxSegmentSize = fMaxSegmentSize, + .fMaxTransferSize = fMaxTransferSize, + .fAlignment = fAlignMask + 1, + .fAlignmentLength = fAlignMaskInternalSegments + 1, + .fAlignmentInternalSegments = fAlignMaskLength + 1 + }; + + return IODMACommand::withSpecification(fOutSeg, &segmentOptions, + fMappingOptions, fMapper, refCon); } #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) @@ -183,1311 +179,1314 @@ IODMACommand::cloneCommand(void *refCon) bool IODMACommand::initWithRefCon(void * refCon) { - if (!super::init()) return (false); + if (!super::init()) { + return false; + } - if (!reserved) - { - reserved = IONew(IODMACommandInternal, 1); - if (!reserved) return false; - } - bzero(reserved, sizeof(IODMACommandInternal)); - fRefCon = refCon; + if (!reserved) { + reserved = IONew(IODMACommandInternal, 1); + if (!reserved) { + return false; + } + } + bzero(reserved, sizeof(IODMACommandInternal)); + fRefCon = refCon; - return (true); + return true; } bool -IODMACommand::initWithSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper, - void * refCon) +IODMACommand::initWithSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + void * refCon) { - if (!initWithRefCon(refCon)) return false; + if (!initWithRefCon(refCon)) { + return false; + } - if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, - mappingOptions, mapper)) return false; + if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, + mappingOptions, mapper)) { + return false; + } - return (true); + return true; } bool IODMACommand::initWithSpecification(SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mappingOptions, - UInt64 maxTransferSize, - UInt32 alignment, - IOMapper *mapper, - void *refCon) + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mappingOptions, + UInt64 maxTransferSize, + UInt32 alignment, + IOMapper *mapper, + void *refCon) { - SegmentOptions segmentOptions = - { - .fStructSize = sizeof(segmentOptions), - .fNumAddressBits = numAddressBits, - .fMaxSegmentSize = maxSegmentSize, - .fMaxTransferSize = maxTransferSize, - .fAlignment = alignment, - .fAlignmentLength = 1, - .fAlignmentInternalSegments = alignment - }; - - return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon)); + SegmentOptions segmentOptions = + { + .fStructSize = sizeof(segmentOptions), + .fNumAddressBits = numAddressBits, + .fMaxSegmentSize = maxSegmentSize, + .fMaxTransferSize = maxTransferSize, + .fAlignment = alignment, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = alignment + }; + + return initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon); } IOReturn IODMACommand::setSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper) + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper) { - IOService * device = 0; - UInt8 numAddressBits; - UInt64 maxSegmentSize; - UInt64 maxTransferSize; - UInt32 alignment; - - bool is32Bit; - - if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument); - - is32Bit = ((OutputHost32 == outSegFunc) - || (OutputBig32 == outSegFunc) - || (OutputLittle32 == outSegFunc)); - - numAddressBits = segmentOptions->fNumAddressBits; - maxSegmentSize = segmentOptions->fMaxSegmentSize; - maxTransferSize = segmentOptions->fMaxTransferSize; - alignment = segmentOptions->fAlignment; - if (is32Bit) - { - if (!numAddressBits) - numAddressBits = 32; - else if (numAddressBits > 32) - return (kIOReturnBadArgument); // Wrong output function for bits - } - - if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument); - - if (!maxSegmentSize) maxSegmentSize--; // Set Max segment to -1 - if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1 - - if (mapper && !OSDynamicCast(IOMapper, mapper)) - { - device = mapper; - mapper = 0; - } - if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) - { - IOMapper::checkForSystemMapper(); - mapper = IOMapper::gSystem; - } - - fNumSegments = 0; - fOutSeg = outSegFunc; - fNumAddressBits = numAddressBits; - fMaxSegmentSize = maxSegmentSize; - fMappingOptions = mappingOptions; - fMaxTransferSize = maxTransferSize; - if (!alignment) alignment = 1; - fAlignMask = alignment - 1; - - alignment = segmentOptions->fAlignmentLength; - if (!alignment) alignment = 1; - fAlignMaskLength = alignment - 1; - - alignment = segmentOptions->fAlignmentInternalSegments; - if (!alignment) alignment = (fAlignMask + 1); - fAlignMaskInternalSegments = alignment - 1; - - switch (MAPTYPE(mappingOptions)) - { - case kMapped: break; - case kUnmapped: break; - case kNonCoherent: break; - - case kBypassed: - if (!mapper) break; - return (kIOReturnBadArgument); - - default: - return (kIOReturnBadArgument); - }; - - if (mapper != fMapper) - { - if (mapper) mapper->retain(); - if (fMapper) fMapper->release(); - fMapper = mapper; - } - - fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); - fInternalState->fDevice = device; - - return (kIOReturnSuccess); + IOService * device = 0; + UInt8 numAddressBits; + UInt64 maxSegmentSize; + UInt64 maxTransferSize; + UInt32 alignment; + + bool is32Bit; + + if (!outSegFunc || !segmentOptions) { + return kIOReturnBadArgument; + } + + is32Bit = ((OutputHost32 == outSegFunc) + || (OutputBig32 == outSegFunc) + || (OutputLittle32 == outSegFunc)); + + numAddressBits = segmentOptions->fNumAddressBits; + maxSegmentSize = segmentOptions->fMaxSegmentSize; + maxTransferSize = segmentOptions->fMaxTransferSize; + alignment = segmentOptions->fAlignment; + if (is32Bit) { + if (!numAddressBits) { + numAddressBits = 32; + } else if (numAddressBits > 32) { + return kIOReturnBadArgument; // Wrong output function for bits + } + } + + if (numAddressBits && (numAddressBits < PAGE_SHIFT)) { + return kIOReturnBadArgument; + } + + if (!maxSegmentSize) { + maxSegmentSize--; // Set Max segment to -1 + } + if (!maxTransferSize) { + maxTransferSize--; // Set Max transfer to -1 + } + if (mapper && !OSDynamicCast(IOMapper, mapper)) { + device = mapper; + mapper = 0; + } + if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) { + IOMapper::checkForSystemMapper(); + mapper = IOMapper::gSystem; + } + + fNumSegments = 0; + fOutSeg = outSegFunc; + fNumAddressBits = numAddressBits; + fMaxSegmentSize = maxSegmentSize; + fMappingOptions = mappingOptions; + fMaxTransferSize = maxTransferSize; + if (!alignment) { + alignment = 1; + } + fAlignMask = alignment - 1; + + alignment = segmentOptions->fAlignmentLength; + if (!alignment) { + alignment = 1; + } + fAlignMaskLength = alignment - 1; + + alignment = segmentOptions->fAlignmentInternalSegments; + if (!alignment) { + alignment = (fAlignMask + 1); + } + fAlignMaskInternalSegments = alignment - 1; + + switch (MAPTYPE(mappingOptions)) { + case kMapped: break; + case kUnmapped: break; + case kNonCoherent: break; + + case kBypassed: + if (!mapper) { + break; + } + return kIOReturnBadArgument; + + default: + return kIOReturnBadArgument; + } + ; + + if (mapper != fMapper) { + if (mapper) { + mapper->retain(); + } + if (fMapper) { + fMapper->release(); + } + fMapper = mapper; + } + + fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); + fInternalState->fDevice = device; + + return kIOReturnSuccess; } void IODMACommand::free() { - if (reserved) IODelete(reserved, IODMACommandInternal, 1); + if (reserved) { + IODelete(reserved, IODMACommandInternal, 1); + } - if (fMapper) fMapper->release(); + if (fMapper) { + fMapper->release(); + } - super::free(); + super::free(); } IOReturn IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) { - IOReturn err = kIOReturnSuccess; - - if (mem == fMemory) - { - if (!autoPrepare) - { - while (fActive) - complete(); + IOReturn err = kIOReturnSuccess; + + if (mem == fMemory) { + if (!autoPrepare) { + while (fActive) { + complete(); + } + } + return kIOReturnSuccess; } - return kIOReturnSuccess; - } - - if (fMemory) { - // As we are almost certainly being called from a work loop thread - // if fActive is true it is probably not a good time to potentially - // block. Just test for it and return an error - if (fActive) - return kIOReturnBusy; - clearMemoryDescriptor(); - } - - if (mem) { - bzero(&fMDSummary, sizeof(fMDSummary)); - err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), - &fMDSummary, sizeof(fMDSummary)); - if (err) - return err; - - ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; - - if ((kMapped == MAPTYPE(fMappingOptions)) - && fMapper) - fInternalState->fCheckAddressing = false; - else - fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); - - fInternalState->fNewMD = true; - mem->retain(); - fMemory = mem; - fInternalState->fSetActiveNoMapper = (!fMapper); - if (fInternalState->fSetActiveNoMapper) mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); - if (autoPrepare) { - err = prepare(); - if (err) { + + if (fMemory) { + // As we are almost certainly being called from a work loop thread + // if fActive is true it is probably not a good time to potentially + // block. Just test for it and return an error + if (fActive) { + return kIOReturnBusy; + } clearMemoryDescriptor(); - } } - } - - return err; + + if (mem) { + bzero(&fMDSummary, sizeof(fMDSummary)); + err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), + &fMDSummary, sizeof(fMDSummary)); + if (err) { + return err; + } + + ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; + + if ((kMapped == MAPTYPE(fMappingOptions)) + && fMapper) { + fInternalState->fCheckAddressing = false; + } else { + fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); + } + + fInternalState->fNewMD = true; + mem->retain(); + fMemory = mem; + fInternalState->fSetActiveNoMapper = (!fMapper); + if (fInternalState->fSetActiveNoMapper) { + mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); + } + if (autoPrepare) { + err = prepare(); + if (err) { + clearMemoryDescriptor(); + } + } + } + + return err; } IOReturn IODMACommand::clearMemoryDescriptor(bool autoComplete) { - if (fActive && !autoComplete) return (kIOReturnNotReady); + if (fActive && !autoComplete) { + return kIOReturnNotReady; + } - if (fMemory) - { - while (fActive) complete(); - if (fInternalState->fSetActiveNoMapper) fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); - fMemory->release(); - fMemory = 0; - } + if (fMemory) { + while (fActive) { + complete(); + } + if (fInternalState->fSetActiveNoMapper) { + fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); + } + fMemory->release(); + fMemory = 0; + } - return (kIOReturnSuccess); + return kIOReturnSuccess; } const IOMemoryDescriptor * IODMACommand::getMemoryDescriptor() const { - return fMemory; + return fMemory; } IOMemoryDescriptor * IODMACommand::getIOMemoryDescriptor() const { - IOMemoryDescriptor * mem; + IOMemoryDescriptor * mem; - mem = reserved->fCopyMD; - if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); + mem = reserved->fCopyMD; + if (!mem) { + mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); + } - return (mem); + return mem; } IOReturn IODMACommand::segmentOp( - void *reference, - IODMACommand *target, - Segment64 segment, - void *segments, - UInt32 segmentIndex) + void *reference, + IODMACommand *target, + Segment64 segment, + void *segments, + UInt32 segmentIndex) { - IOOptionBits op = (uintptr_t) reference; - addr64_t maxPhys, address; - uint64_t length; - uint32_t numPages; - uint32_t mask; - - IODMACommandInternal * state = target->reserved; - - if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) - maxPhys = (1ULL << target->fNumAddressBits); - else - maxPhys = 0; - maxPhys--; - - address = segment.fIOVMAddr; - length = segment.fLength; - - assert(length); - - if (!state->fMisaligned) - { - mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); - state->fMisaligned |= (0 != (mask & address)); - if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n", address, length, mask); - } - if (!state->fMisaligned) - { - mask = target->fAlignMaskLength; - state->fMisaligned |= (0 != (mask & length)); - if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n", address, length, mask); - } - - if (state->fMisaligned && (kWalkPreflight & op)) - return (kIOReturnNotAligned); - - if (!state->fDoubleBuffer) - { - if ((address + length - 1) <= maxPhys) - { - length = 0; + IOOptionBits op = (uintptr_t) reference; + addr64_t maxPhys, address; + uint64_t length; + uint32_t numPages; + uint32_t mask; + + IODMACommandInternal * state = target->reserved; + + if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) { + maxPhys = (1ULL << target->fNumAddressBits); + } else { + maxPhys = 0; } - else if (address <= maxPhys) - { - DEBG("tail %qx, %qx", address, length); - length = (address + length - maxPhys - 1); - address = maxPhys + 1; - DEBG("-> %qx, %qx\n", address, length); - } - } - - if (!length) - return (kIOReturnSuccess); - - numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); - - if (kWalkPreflight & op) - { - state->fCopyPageCount += numPages; - } - else - { - vm_page_t lastPage; - lastPage = NULL; - if (kWalkPrepare & op) - { - lastPage = state->fCopyNext; - for (IOItemCount idx = 0; idx < numPages; idx++) - { - vm_page_set_offset(lastPage, atop_64(address) + idx); - lastPage = vm_page_get_next(lastPage); - } + maxPhys--; + + address = segment.fIOVMAddr; + length = segment.fLength; + + assert(length); + + if (!state->fMisaligned) { + mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); + state->fMisaligned |= (0 != (mask & address)); + if (state->fMisaligned) { + DEBG("misaligned address %qx:%qx, %x\n", address, length, mask); + } + } + if (!state->fMisaligned) { + mask = target->fAlignMaskLength; + state->fMisaligned |= (0 != (mask & length)); + if (state->fMisaligned) { + DEBG("misaligned length %qx:%qx, %x\n", address, length, mask); + } } - if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) - { - lastPage = state->fCopyNext; - for (IOItemCount idx = 0; idx < numPages; idx++) - { - if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) - { - addr64_t cpuAddr = address; - addr64_t remapAddr; - uint64_t chunk; - - if ((kMapped == MAPTYPE(target->fMappingOptions)) - && target->fMapper) - { - cpuAddr = target->fMapper->mapToPhysicalAddress(address); - } - - remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); - if (!state->fDoubleBuffer) - { - remapAddr += (address & PAGE_MASK); - } - chunk = PAGE_SIZE - (address & PAGE_MASK); - if (chunk > length) - chunk = length; - - DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, - (kWalkSyncIn & op) ? "->" : "<-", - address, chunk, op); - - if (kWalkSyncIn & op) - { // cppvNoModSnk - copypv(remapAddr, cpuAddr, chunk, - cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); - } - else - { - copypv(cpuAddr, remapAddr, chunk, - cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); - } - address += chunk; - length -= chunk; + if (state->fMisaligned && (kWalkPreflight & op)) { + return kIOReturnNotAligned; + } + + if (!state->fDoubleBuffer) { + if ((address + length - 1) <= maxPhys) { + length = 0; + } else if (address <= maxPhys) { + DEBG("tail %qx, %qx", address, length); + length = (address + length - maxPhys - 1); + address = maxPhys + 1; + DEBG("-> %qx, %qx\n", address, length); + } + } + + if (!length) { + return kIOReturnSuccess; + } + + numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); + + if (kWalkPreflight & op) { + state->fCopyPageCount += numPages; + } else { + vm_page_t lastPage; + lastPage = NULL; + if (kWalkPrepare & op) { + lastPage = state->fCopyNext; + for (IOItemCount idx = 0; idx < numPages; idx++) { + vm_page_set_offset(lastPage, atop_64(address) + idx); + lastPage = vm_page_get_next(lastPage); + } + } + + if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { + lastPage = state->fCopyNext; + for (IOItemCount idx = 0; idx < numPages; idx++) { + if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { + addr64_t cpuAddr = address; + addr64_t remapAddr; + uint64_t chunk; + + if ((kMapped == MAPTYPE(target->fMappingOptions)) + && target->fMapper) { + cpuAddr = target->fMapper->mapToPhysicalAddress(address); + } + + remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); + if (!state->fDoubleBuffer) { + remapAddr += (address & PAGE_MASK); + } + chunk = PAGE_SIZE - (address & PAGE_MASK); + if (chunk > length) { + chunk = length; + } + + DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, + (kWalkSyncIn & op) ? "->" : "<-", + address, chunk, op); + + if (kWalkSyncIn & op) { // cppvNoModSnk + copypv(remapAddr, cpuAddr, chunk, + cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); + } else { + copypv(cpuAddr, remapAddr, chunk, + cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); + } + address += chunk; + length -= chunk; + } + lastPage = vm_page_get_next(lastPage); + } } - lastPage = vm_page_get_next(lastPage); - } + state->fCopyNext = lastPage; } - state->fCopyNext = lastPage; - } - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOBufferMemoryDescriptor * +IOBufferMemoryDescriptor * IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) { - mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask - return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, - direction, length, mask)); + mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask + return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, + direction, length, mask); } IOReturn IODMACommand::walkAll(UInt8 op) { - IODMACommandInternal * state = fInternalState; - - IOReturn ret = kIOReturnSuccess; - UInt32 numSegments; - UInt64 offset; - - if (kWalkPreflight & op) - { - state->fMisaligned = false; - state->fDoubleBuffer = false; - state->fPrepared = false; - state->fCopyNext = NULL; - state->fCopyPageAlloc = 0; - state->fCopyPageCount = 0; - state->fNextRemapPage = NULL; - state->fCopyMD = 0; - - if (!(kWalkDoubleBuffer & op)) - { - offset = 0; - numSegments = 0-1; - ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); - } + IODMACommandInternal * state = fInternalState; + + IOReturn ret = kIOReturnSuccess; + UInt32 numSegments; + UInt64 offset; + + if (kWalkPreflight & op) { + state->fMisaligned = false; + state->fDoubleBuffer = false; + state->fPrepared = false; + state->fCopyNext = NULL; + state->fCopyPageAlloc = 0; + state->fCopyPageCount = 0; + state->fNextRemapPage = NULL; + state->fCopyMD = 0; + + if (!(kWalkDoubleBuffer & op)) { + offset = 0; + numSegments = 0 - 1; + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); + } - op &= ~kWalkPreflight; + op &= ~kWalkPreflight; - state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); - state->fForceDoubleBuffer = false; - if (state->fDoubleBuffer) - state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); + state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); + state->fForceDoubleBuffer = false; + if (state->fDoubleBuffer) { + state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); + } - if (state->fCopyPageCount) - { - vm_page_t mapBase = NULL; + if (state->fCopyPageCount) { + vm_page_t mapBase = NULL; - DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount); + DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount); - if (!fMapper && !state->fDoubleBuffer) - { - kern_return_t kr; + if (!fMapper && !state->fDoubleBuffer) { + kern_return_t kr; - if (fMapper) panic("fMapper copying"); + if (fMapper) { + panic("fMapper copying"); + } - kr = vm_page_alloc_list(state->fCopyPageCount, - KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); - if (KERN_SUCCESS != kr) - { - DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr); - mapBase = NULL; - } - } - - if (mapBase) - { - state->fCopyPageAlloc = mapBase; - state->fCopyNext = state->fCopyPageAlloc; - offset = 0; - numSegments = 0-1; - ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); - state->fPrepared = true; - op &= ~(kWalkSyncIn | kWalkSyncOut); - } - else - { - DEBG("alloc IOBMD\n"); - state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); - - if (state->fCopyMD) - { - ret = kIOReturnSuccess; - state->fPrepared = true; - } - else - { - DEBG("IODMACommand !alloc IOBMD"); - return (kIOReturnNoResources); + kr = vm_page_alloc_list(state->fCopyPageCount, + KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); + if (KERN_SUCCESS != kr) { + DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr); + mapBase = NULL; + } + } + + if (mapBase) { + state->fCopyPageAlloc = mapBase; + state->fCopyNext = state->fCopyPageAlloc; + offset = 0; + numSegments = 0 - 1; + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); + state->fPrepared = true; + op &= ~(kWalkSyncIn | kWalkSyncOut); + } else { + DEBG("alloc IOBMD\n"); + state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); + + if (state->fCopyMD) { + ret = kIOReturnSuccess; + state->fPrepared = true; + } else { + DEBG("IODMACommand !alloc IOBMD"); + return kIOReturnNoResources; + } + } } - } } - } - if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) - { - if (state->fCopyPageCount) - { - DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount); - - if (state->fCopyPageAlloc) - { - state->fCopyNext = state->fCopyPageAlloc; - offset = 0; - numSegments = 0-1; - ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); - } - else if (state->fCopyMD) - { - DEBG("sync IOBMD\n"); - - if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) - { - IOMemoryDescriptor *poMD = const_cast(fMemory); - - IOByteCount bytes; - - if (kWalkSyncIn & op) - bytes = poMD->writeBytes(state->fPreparedOffset, + if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) { + if (state->fCopyPageCount) { + DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount); + + if (state->fCopyPageAlloc) { + state->fCopyNext = state->fCopyPageAlloc; + offset = 0; + numSegments = 0 - 1; + ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); + } else if (state->fCopyMD) { + DEBG("sync IOBMD\n"); + + if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) { + IOMemoryDescriptor *poMD = const_cast(fMemory); + + IOByteCount bytes; + + if (kWalkSyncIn & op) { + bytes = poMD->writeBytes(state->fPreparedOffset, state->fCopyMD->getBytesNoCopy(), state->fPreparedLength); - else - bytes = poMD->readBytes(state->fPreparedOffset, + } else { + bytes = poMD->readBytes(state->fPreparedOffset, state->fCopyMD->getBytesNoCopy(), state->fPreparedLength); - DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes); - ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; + } + DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes); + ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; + } else { + ret = kIOReturnSuccess; + } + } } - else - ret = kIOReturnSuccess; - } } - } - if (kWalkComplete & op) - { - if (state->fCopyPageAlloc) - { - vm_page_free_list(state->fCopyPageAlloc, FALSE); - state->fCopyPageAlloc = 0; - state->fCopyPageCount = 0; - } - if (state->fCopyMD) - { - state->fCopyMD->release(); - state->fCopyMD = 0; - } + if (kWalkComplete & op) { + if (state->fCopyPageAlloc) { + vm_page_free_list(state->fCopyPageAlloc, FALSE); + state->fCopyPageAlloc = 0; + state->fCopyPageCount = 0; + } + if (state->fCopyMD) { + state->fCopyMD->release(); + state->fCopyMD = 0; + } - state->fPrepared = false; - } - return (ret); + state->fPrepared = false; + } + return ret; } UInt8 IODMACommand::getNumAddressBits(void) { - return (fNumAddressBits); + return fNumAddressBits; } UInt32 IODMACommand::getAlignment(void) { - return (fAlignMask + 1); + return fAlignMask + 1; } uint32_t IODMACommand::getAlignmentLength(void) { - return (fAlignMaskLength + 1); + return fAlignMaskLength + 1; } uint32_t IODMACommand::getAlignmentInternalSegments(void) { - return (fAlignMaskInternalSegments + 1); + return fAlignMaskInternalSegments + 1; } IOReturn -IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, - const SegmentOptions * segmentOptions, - uint32_t mappingOptions, - IOMapper * mapper, - UInt64 offset, - UInt64 length, - bool flushCache, - bool synchronize) +IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, + const SegmentOptions * segmentOptions, + uint32_t mappingOptions, + IOMapper * mapper, + UInt64 offset, + UInt64 length, + bool flushCache, + bool synchronize) { - IOReturn ret; + IOReturn ret; - if (fActive) return kIOReturnNotPermitted; + if (fActive) { + return kIOReturnNotPermitted; + } - ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); - if (kIOReturnSuccess != ret) return (ret); + ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); + if (kIOReturnSuccess != ret) { + return ret; + } - ret = prepare(offset, length, flushCache, synchronize); + ret = prepare(offset, length, flushCache, synchronize); - return (ret); + return ret; } IOReturn -IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, - UInt8 numAddressBits, - UInt64 maxSegmentSize, - MappingOptions mappingOptions, - UInt64 maxTransferSize, - UInt32 alignment, - IOMapper *mapper, - UInt64 offset, - UInt64 length, - bool flushCache, - bool synchronize) +IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, + UInt8 numAddressBits, + UInt64 maxSegmentSize, + MappingOptions mappingOptions, + UInt64 maxTransferSize, + UInt32 alignment, + IOMapper *mapper, + UInt64 offset, + UInt64 length, + bool flushCache, + bool synchronize) { - SegmentOptions segmentOptions = - { - .fStructSize = sizeof(segmentOptions), - .fNumAddressBits = numAddressBits, - .fMaxSegmentSize = maxSegmentSize, - .fMaxTransferSize = maxTransferSize, - .fAlignment = alignment, - .fAlignmentLength = 1, - .fAlignmentInternalSegments = alignment - }; - - return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, - offset, length, flushCache, synchronize)); + SegmentOptions segmentOptions = + { + .fStructSize = sizeof(segmentOptions), + .fNumAddressBits = numAddressBits, + .fMaxSegmentSize = maxSegmentSize, + .fMaxTransferSize = maxTransferSize, + .fAlignment = alignment, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = alignment + }; + + return prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, + offset, length, flushCache, synchronize); } -IOReturn +IOReturn IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) { - IODMACommandInternal * state = fInternalState; - IOReturn ret = kIOReturnSuccess; - uint32_t mappingOptions = fMappingOptions; - - // check specification has been set - if (!fOutSeg) return (kIOReturnNotReady); - - if (!length) length = fMDSummary.fLength; - - if (length > fMaxTransferSize) return kIOReturnNoSpace; - - if (fActive++) - { - if ((state->fPreparedOffset != offset) - || (state->fPreparedLength != length)) - ret = kIOReturnNotReady; - } - else - { - if (fAlignMaskLength & length) return (kIOReturnNotAligned); - - state->fPreparedOffset = offset; - state->fPreparedLength = length; - - state->fMapContig = false; - state->fMisaligned = false; - state->fDoubleBuffer = false; - state->fPrepared = false; - state->fCopyNext = NULL; - state->fCopyPageAlloc = 0; - state->fCopyPageCount = 0; - state->fNextRemapPage = NULL; - state->fCopyMD = 0; - state->fLocalMapperAlloc = 0; - state->fLocalMapperAllocValid = false; - state->fLocalMapperAllocLength = 0; - - state->fSourceAlignMask = fAlignMask; - if (fMapper) - state->fSourceAlignMask &= page_mask; - - state->fCursor = state->fIterateOnly - || (!state->fCheckAddressing - && (!state->fSourceAlignMask - || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); - - if (!state->fCursor) - { - IOOptionBits op = kWalkPrepare | kWalkPreflight; - if (synchronize) - op |= kWalkSyncOut; - ret = walkAll(op); + IODMACommandInternal * state = fInternalState; + IOReturn ret = kIOReturnSuccess; + uint32_t mappingOptions = fMappingOptions; + + // check specification has been set + if (!fOutSeg) { + return kIOReturnNotReady; } - if (IS_NONCOHERENT(mappingOptions) && flushCache) - { - if (state->fCopyMD) - { - state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); - } - else - { - IOMemoryDescriptor * md = const_cast(fMemory); - md->performOperation(kIOMemoryIncoherentIOStore, offset, length); - } - } - - if (fMapper) - { - IOMDDMAMapArgs mapArgs; - bzero(&mapArgs, sizeof(mapArgs)); - mapArgs.fMapper = fMapper; - mapArgs.fCommand = this; - mapArgs.fMapSpec.device = state->fDevice; - mapArgs.fMapSpec.alignment = fAlignMask + 1; - mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; - mapArgs.fLength = state->fPreparedLength; - const IOMemoryDescriptor * md = state->fCopyMD; - if (md) { mapArgs.fOffset = 0; } else - { - md = fMemory; - mapArgs.fOffset = state->fPreparedOffset; - } - ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); + if (!length) { + length = fMDSummary.fLength; + } + + if (length > fMaxTransferSize) { + return kIOReturnNoSpace; + } + + if (fActive++) { + if ((state->fPreparedOffset != offset) + || (state->fPreparedLength != length)) { + ret = kIOReturnNotReady; + } + } else { + if (fAlignMaskLength & length) { + return kIOReturnNotAligned; + } + + state->fPreparedOffset = offset; + state->fPreparedLength = length; + + state->fMapContig = false; + state->fMisaligned = false; + state->fDoubleBuffer = false; + state->fPrepared = false; + state->fCopyNext = NULL; + state->fCopyPageAlloc = 0; + state->fCopyPageCount = 0; + state->fNextRemapPage = NULL; + state->fCopyMD = 0; + state->fLocalMapperAlloc = 0; + state->fLocalMapperAllocValid = false; + state->fLocalMapperAllocLength = 0; + + state->fSourceAlignMask = fAlignMask; + if (fMapper) { + state->fSourceAlignMask &= page_mask; + } + + state->fCursor = state->fIterateOnly + || (!state->fCheckAddressing + && (!state->fSourceAlignMask + || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); + + if (!state->fCursor) { + IOOptionBits op = kWalkPrepare | kWalkPreflight; + if (synchronize) { + op |= kWalkSyncOut; + } + ret = walkAll(op); + } + + if (IS_NONCOHERENT(mappingOptions) && flushCache) { + if (state->fCopyMD) { + state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); + } else { + IOMemoryDescriptor * md = const_cast(fMemory); + md->performOperation(kIOMemoryIncoherentIOStore, offset, length); + } + } + + if (fMapper) { + IOMDDMAMapArgs mapArgs; + bzero(&mapArgs, sizeof(mapArgs)); + mapArgs.fMapper = fMapper; + mapArgs.fCommand = this; + mapArgs.fMapSpec.device = state->fDevice; + mapArgs.fMapSpec.alignment = fAlignMask + 1; + mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; + mapArgs.fLength = state->fPreparedLength; + const IOMemoryDescriptor * md = state->fCopyMD; + if (md) { + mapArgs.fOffset = 0; + } else { + md = fMemory; + mapArgs.fOffset = state->fPreparedOffset; + } + ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength); - if (kIOReturnSuccess == ret) - { - state->fLocalMapperAlloc = mapArgs.fAlloc; - state->fLocalMapperAllocValid = true; - state->fLocalMapperAllocLength = mapArgs.fAllocLength; - state->fMapContig = mapArgs.fMapContig; - } - if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess; - } - if (kIOReturnSuccess == ret) state->fPrepared = true; - } - return ret; + if (kIOReturnSuccess == ret) { + state->fLocalMapperAlloc = mapArgs.fAlloc; + state->fLocalMapperAllocValid = true; + state->fLocalMapperAllocLength = mapArgs.fAllocLength; + state->fMapContig = mapArgs.fMapContig; + } + if (NULL != IOMapper::gSystem) { + ret = kIOReturnSuccess; + } + } + if (kIOReturnSuccess == ret) { + state->fPrepared = true; + } + } + return ret; } -IOReturn +IOReturn IODMACommand::complete(bool invalidateCache, bool synchronize) { - IODMACommandInternal * state = fInternalState; - IOReturn ret = kIOReturnSuccess; - IOMemoryDescriptor * copyMD; + IODMACommandInternal * state = fInternalState; + IOReturn ret = kIOReturnSuccess; + IOMemoryDescriptor * copyMD; - if (fActive < 1) - return kIOReturnNotReady; + if (fActive < 1) { + return kIOReturnNotReady; + } - if (!--fActive) - { - copyMD = state->fCopyMD; - if (copyMD) copyMD->retain(); + if (!--fActive) { + copyMD = state->fCopyMD; + if (copyMD) { + copyMD->retain(); + } - if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) - { - if (copyMD) - { - copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); - } - else - { - IOMemoryDescriptor * md = const_cast(fMemory); - md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); - } - } - - if (!state->fCursor) - { - IOOptionBits op = kWalkComplete; - if (synchronize) - op |= kWalkSyncIn; - ret = walkAll(op); - } - - if (state->fLocalMapperAllocValid) - { - IOMDDMAMapArgs mapArgs; - bzero(&mapArgs, sizeof(mapArgs)); - mapArgs.fMapper = fMapper; - mapArgs.fCommand = this; - mapArgs.fAlloc = state->fLocalMapperAlloc; - mapArgs.fAllocLength = state->fLocalMapperAllocLength; - const IOMemoryDescriptor * md = copyMD; - if (md) { mapArgs.fOffset = 0; } - else - { - md = fMemory; - mapArgs.fOffset = state->fPreparedOffset; - } - - ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs)); - - state->fLocalMapperAlloc = 0; - state->fLocalMapperAllocValid = false; - state->fLocalMapperAllocLength = 0; - } - if (copyMD) copyMD->release(); - state->fPrepared = false; - } - - return ret; + if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) { + if (copyMD) { + copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); + } else { + IOMemoryDescriptor * md = const_cast(fMemory); + md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); + } + } + + if (!state->fCursor) { + IOOptionBits op = kWalkComplete; + if (synchronize) { + op |= kWalkSyncIn; + } + ret = walkAll(op); + } + + if (state->fLocalMapperAllocValid) { + IOMDDMAMapArgs mapArgs; + bzero(&mapArgs, sizeof(mapArgs)); + mapArgs.fMapper = fMapper; + mapArgs.fCommand = this; + mapArgs.fAlloc = state->fLocalMapperAlloc; + mapArgs.fAllocLength = state->fLocalMapperAllocLength; + const IOMemoryDescriptor * md = copyMD; + if (md) { + mapArgs.fOffset = 0; + } else { + md = fMemory; + mapArgs.fOffset = state->fPreparedOffset; + } + + ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs)); + + state->fLocalMapperAlloc = 0; + state->fLocalMapperAllocValid = false; + state->fLocalMapperAllocLength = 0; + } + if (copyMD) { + copyMD->release(); + } + state->fPrepared = false; + } + + return ret; } IOReturn IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) { - IODMACommandInternal * state = fInternalState; - if (fActive < 1) - return (kIOReturnNotReady); + IODMACommandInternal * state = fInternalState; + if (fActive < 1) { + return kIOReturnNotReady; + } - if (offset) - *offset = state->fPreparedOffset; - if (length) - *length = state->fPreparedLength; + if (offset) { + *offset = state->fPreparedOffset; + } + if (length) { + *length = state->fPreparedLength; + } - return (kIOReturnSuccess); + return kIOReturnSuccess; } IOReturn IODMACommand::synchronize(IOOptionBits options) { - IODMACommandInternal * state = fInternalState; - IOReturn ret = kIOReturnSuccess; - IOOptionBits op; - - if (kIODirectionOutIn == (kIODirectionOutIn & options)) - return kIOReturnBadArgument; - - if (fActive < 1) - return kIOReturnNotReady; - - op = 0; - if (kForceDoubleBuffer & options) - { - if (state->fDoubleBuffer) return kIOReturnSuccess; - ret = complete(false /* invalidateCache */, true /* synchronize */); - state->fCursor = false; - state->fForceDoubleBuffer = true; - ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */); - - return (ret); - } - else if (state->fCursor) - return kIOReturnSuccess; + IODMACommandInternal * state = fInternalState; + IOReturn ret = kIOReturnSuccess; + IOOptionBits op; - if (kIODirectionIn & options) - op |= kWalkSyncIn | kWalkSyncAlways; - else if (kIODirectionOut & options) - op |= kWalkSyncOut | kWalkSyncAlways; + if (kIODirectionOutIn == (kIODirectionOutIn & options)) { + return kIOReturnBadArgument; + } + + if (fActive < 1) { + return kIOReturnNotReady; + } + + op = 0; + if (kForceDoubleBuffer & options) { + if (state->fDoubleBuffer) { + return kIOReturnSuccess; + } + ret = complete(false /* invalidateCache */, true /* synchronize */); + state->fCursor = false; + state->fForceDoubleBuffer = true; + ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */); + + return ret; + } else if (state->fCursor) { + return kIOReturnSuccess; + } + + if (kIODirectionIn & options) { + op |= kWalkSyncIn | kWalkSyncAlways; + } else if (kIODirectionOut & options) { + op |= kWalkSyncOut | kWalkSyncAlways; + } - ret = walkAll(op); + ret = walkAll(op); - return ret; + return ret; } -struct IODMACommandTransferContext -{ - void * buffer; - UInt64 bufferOffset; - UInt64 remaining; - UInt32 op; +struct IODMACommandTransferContext { + void * buffer; + UInt64 bufferOffset; + UInt64 remaining; + UInt32 op; }; -enum -{ - kIODMACommandTransferOpReadBytes = 1, - kIODMACommandTransferOpWriteBytes = 2 +enum{ + kIODMACommandTransferOpReadBytes = 1, + kIODMACommandTransferOpWriteBytes = 2 }; IOReturn IODMACommand::transferSegment(void *reference, - IODMACommand *target, - Segment64 segment, - void *segments, - UInt32 segmentIndex) + IODMACommand *target, + Segment64 segment, + void *segments, + UInt32 segmentIndex) { - IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; - UInt64 length = min(segment.fLength, context->remaining); - addr64_t ioAddr = segment.fIOVMAddr; - addr64_t cpuAddr = ioAddr; - - context->remaining -= length; - - while (length) - { - UInt64 copyLen = length; - if ((kMapped == MAPTYPE(target->fMappingOptions)) - && target->fMapper) - { - cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); - copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); - ioAddr += copyLen; + IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; + UInt64 length = min(segment.fLength, context->remaining); + addr64_t ioAddr = segment.fIOVMAddr; + addr64_t cpuAddr = ioAddr; + + context->remaining -= length; + + while (length) { + UInt64 copyLen = length; + if ((kMapped == MAPTYPE(target->fMappingOptions)) + && target->fMapper) { + cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); + copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); + ioAddr += copyLen; + } + + switch (context->op) { + case kIODMACommandTransferOpReadBytes: + copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen, + cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); + break; + case kIODMACommandTransferOpWriteBytes: + copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + break; + } + length -= copyLen; + context->bufferOffset += copyLen; } - switch (context->op) - { - case kIODMACommandTransferOpReadBytes: - copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen, - cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); - break; - case kIODMACommandTransferOpWriteBytes: - copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen, - cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); - break; - } - length -= copyLen; - context->bufferOffset += copyLen; - } - - return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun); + return context->remaining ? kIOReturnSuccess : kIOReturnOverrun; } UInt64 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length) { - IODMACommandInternal * state = fInternalState; - IODMACommandTransferContext context; - Segment64 segments[1]; - UInt32 numSegments = 0-1; + IODMACommandInternal * state = fInternalState; + IODMACommandTransferContext context; + Segment64 segments[1]; + UInt32 numSegments = 0 - 1; - if (fActive < 1) - return (0); + if (fActive < 1) { + return 0; + } - if (offset >= state->fPreparedLength) - return (0); - length = min(length, state->fPreparedLength - offset); + if (offset >= state->fPreparedLength) { + return 0; + } + length = min(length, state->fPreparedLength - offset); - context.buffer = buffer; - context.bufferOffset = 0; - context.remaining = length; - context.op = transferOp; - (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); + context.buffer = buffer; + context.bufferOffset = 0; + context.remaining = length; + context.op = transferOp; + (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); - return (length - context.remaining); + return length - context.remaining; } UInt64 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length) { - return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length)); + return transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length); } UInt64 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length) { - return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast(bytes), length)); + return transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast(bytes), length); } IOReturn IODMACommand::genIOVMSegments(UInt64 *offsetP, - void *segmentsP, - UInt32 *numSegmentsP) + void *segmentsP, + UInt32 *numSegmentsP) { - return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, - offsetP, segmentsP, numSegmentsP)); + return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, + offsetP, segmentsP, numSegmentsP); } IOReturn IODMACommand::genIOVMSegments(uint32_t op, - InternalSegmentFunction outSegFunc, - void *reference, - UInt64 *offsetP, - void *segmentsP, - UInt32 *numSegmentsP) + InternalSegmentFunction outSegFunc, + void *reference, + UInt64 *offsetP, + void *segmentsP, + UInt32 *numSegmentsP) { - IODMACommandInternal * internalState = fInternalState; - IOOptionBits mdOp = kIOMDWalkSegments; - IOReturn ret = kIOReturnSuccess; + IODMACommandInternal * internalState = fInternalState; + IOOptionBits mdOp = kIOMDWalkSegments; + IOReturn ret = kIOReturnSuccess; - if (!(kWalkComplete & op) && !fActive) - return kIOReturnNotReady; + if (!(kWalkComplete & op) && !fActive) { + return kIOReturnNotReady; + } - if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) - return kIOReturnBadArgument; + if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) { + return kIOReturnBadArgument; + } - IOMDDMAWalkSegmentArgs *state = - (IOMDDMAWalkSegmentArgs *)(void *) fState; + IOMDDMAWalkSegmentArgs *state = + (IOMDDMAWalkSegmentArgs *)(void *) fState; - UInt64 offset = *offsetP + internalState->fPreparedOffset; - UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; + UInt64 offset = *offsetP + internalState->fPreparedOffset; + UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; - if (offset >= memLength) - return kIOReturnOverrun; + if (offset >= memLength) { + return kIOReturnOverrun; + } - if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { - state->fOffset = 0; - internalState->fIOVMAddrValid = state->fIOVMAddr = 0; - internalState->fNextRemapPage = NULL; - internalState->fNewMD = false; - mdOp = kIOMDFirstSegment; - if (fMapper) - { - if (internalState->fLocalMapperAllocValid) - { - state->fMapped = kIOMDDMAWalkMappedLocal; - state->fMappedBase = internalState->fLocalMapperAlloc; - } - else state->fMapped = true; - } - }; - - UInt32 segIndex = 0; - UInt32 numSegments = *numSegmentsP; - Segment64 curSeg = { 0, 0 }; - bool curSegValid = false; - addr64_t maxPhys; - - if (fNumAddressBits && (fNumAddressBits < 64)) - maxPhys = (1ULL << fNumAddressBits); - else - maxPhys = 0; - maxPhys--; - - while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) - { - // state = next seg - if (!internalState->fIOVMAddrValid) { - - IOReturn rtn; - - state->fOffset = offset; - state->fLength = memLength - offset; - - if (internalState->fMapContig && internalState->fLocalMapperAllocValid) - { - state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; - rtn = kIOReturnSuccess; -#if 0 - { - uint64_t checkOffset; - IOPhysicalLength segLen; - for (checkOffset = 0; checkOffset < state->fLength; ) - { - addr64_t phys = const_cast(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); - if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) - { - panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, - state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, - phys, checkOffset); + if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { + state->fOffset = 0; + internalState->fIOVMAddrValid = state->fIOVMAddr = 0; + internalState->fNextRemapPage = NULL; + internalState->fNewMD = false; + mdOp = kIOMDFirstSegment; + if (fMapper) { + if (internalState->fLocalMapperAllocValid) { + state->fMapped = kIOMDDMAWalkMappedLocal; + state->fMappedBase = internalState->fLocalMapperAlloc; + } else { + state->fMapped = true; } - checkOffset += page_size - (phys & page_mask); - } - } -#endif - } - else - { - const IOMemoryDescriptor * memory = - internalState->fCopyMD ? internalState->fCopyMD : fMemory; - rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); - mdOp = kIOMDWalkSegments; - } - - if (rtn == kIOReturnSuccess) - { - internalState->fIOVMAddrValid = true; - assert(state->fLength); - if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { - UInt64 length = state->fLength; - offset += length; - curSeg.fLength += length; - internalState->fIOVMAddrValid = state->fIOVMAddr = 0; } - } - else if (rtn == kIOReturnOverrun) - internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end - else - return rtn; } - - // seg = state, offset = end of seg - if (!curSegValid) - { - UInt64 length = state->fLength; - offset += length; - curSeg.fIOVMAddr = state->fIOVMAddr; - curSeg.fLength = length; - curSegValid = true; - internalState->fIOVMAddrValid = state->fIOVMAddr = 0; + ; + + UInt32 segIndex = 0; + UInt32 numSegments = *numSegmentsP; + Segment64 curSeg = { 0, 0 }; + bool curSegValid = false; + addr64_t maxPhys; + + if (fNumAddressBits && (fNumAddressBits < 64)) { + maxPhys = (1ULL << fNumAddressBits); + } else { + maxPhys = 0; } + maxPhys--; - if (!internalState->fIOVMAddrValid) - { - // maxPhys - if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) - { - if (internalState->fCursor) - { - curSegValid = curSeg.fIOVMAddr = 0; - ret = kIOReturnMessageTooLarge; - break; - } - else if (curSeg.fIOVMAddr <= maxPhys) - { - UInt64 remain, newLength; - - newLength = (maxPhys + 1 - curSeg.fIOVMAddr); - DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); - remain = curSeg.fLength - newLength; - state->fIOVMAddr = newLength + curSeg.fIOVMAddr; - internalState->fIOVMAddrValid = true; - curSeg.fLength = newLength; - state->fLength = remain; - offset -= remain; + while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) { + // state = next seg + if (!internalState->fIOVMAddrValid) { + IOReturn rtn; + + state->fOffset = offset; + state->fLength = memLength - offset; + + if (internalState->fMapContig && internalState->fLocalMapperAllocValid) { + state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; + rtn = kIOReturnSuccess; +#if 0 + { + uint64_t checkOffset; + IOPhysicalLength segLen; + for (checkOffset = 0; checkOffset < state->fLength;) { + addr64_t phys = const_cast(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); + if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) { + panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, + state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, + phys, checkOffset); + } + checkOffset += page_size - (phys & page_mask); + } + } +#endif + } else { + const IOMemoryDescriptor * memory = + internalState->fCopyMD ? internalState->fCopyMD : fMemory; + rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); + mdOp = kIOMDWalkSegments; + } + + if (rtn == kIOReturnSuccess) { + internalState->fIOVMAddrValid = true; + assert(state->fLength); + if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { + UInt64 length = state->fLength; + offset += length; + curSeg.fLength += length; + internalState->fIOVMAddrValid = state->fIOVMAddr = 0; + } + } else if (rtn == kIOReturnOverrun) { + internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end + } else { + return rtn; + } } - else - { - UInt64 addr = curSeg.fIOVMAddr; - ppnum_t addrPage = atop_64(addr); - vm_page_t remap = NULL; - UInt64 remain, newLength; - - DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength); - - remap = internalState->fNextRemapPage; - if (remap && (addrPage == vm_page_get_offset(remap))) - { - } - else for (remap = internalState->fCopyPageAlloc; - remap && (addrPage != vm_page_get_offset(remap)); - remap = vm_page_get_next(remap)) - { - } - - if (!remap) panic("no remap page found"); - - curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) - + (addr & PAGE_MASK); - curSegValid = true; - internalState->fNextRemapPage = vm_page_get_next(remap); - - newLength = PAGE_SIZE - (addr & PAGE_MASK); - if (newLength < curSeg.fLength) - { - remain = curSeg.fLength - newLength; - state->fIOVMAddr = addr + newLength; - internalState->fIOVMAddrValid = true; - curSeg.fLength = newLength; - state->fLength = remain; - offset -= remain; - } - DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset); + + // seg = state, offset = end of seg + if (!curSegValid) { + UInt64 length = state->fLength; + offset += length; + curSeg.fIOVMAddr = state->fIOVMAddr; + curSeg.fLength = length; + curSegValid = true; + internalState->fIOVMAddrValid = state->fIOVMAddr = 0; } - } - - // reduce size of output segment - uint64_t reduce, leftover = 0; - - // fMaxSegmentSize - if (curSeg.fLength > fMaxSegmentSize) - { - leftover += curSeg.fLength - fMaxSegmentSize; - curSeg.fLength = fMaxSegmentSize; - state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; - internalState->fIOVMAddrValid = true; - } - - // alignment current length - - reduce = (curSeg.fLength & fAlignMaskLength); - if (reduce && (curSeg.fLength > reduce)) - { - leftover += reduce; - curSeg.fLength -= reduce; - state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; - internalState->fIOVMAddrValid = true; - } - - // alignment next address - - reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); - if (reduce && (curSeg.fLength > reduce)) - { - leftover += reduce; - curSeg.fLength -= reduce; - state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; - internalState->fIOVMAddrValid = true; - } - - if (leftover) - { - DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", - leftover, offset, - curSeg.fIOVMAddr, curSeg.fLength); - state->fLength = leftover; - offset -= leftover; - } - - // - - if (internalState->fCursor) - { - bool misaligned; - uint32_t mask; - - mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); - misaligned = (0 != (mask & curSeg.fIOVMAddr)); - if (!misaligned) - { - mask = fAlignMaskLength; - misaligned |= (0 != (mask & curSeg.fLength)); + + if (!internalState->fIOVMAddrValid) { + // maxPhys + if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) { + if (internalState->fCursor) { + curSegValid = curSeg.fIOVMAddr = 0; + ret = kIOReturnMessageTooLarge; + break; + } else if (curSeg.fIOVMAddr <= maxPhys) { + UInt64 remain, newLength; + + newLength = (maxPhys + 1 - curSeg.fIOVMAddr); + DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); + remain = curSeg.fLength - newLength; + state->fIOVMAddr = newLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + curSeg.fLength = newLength; + state->fLength = remain; + offset -= remain; + } else { + UInt64 addr = curSeg.fIOVMAddr; + ppnum_t addrPage = atop_64(addr); + vm_page_t remap = NULL; + UInt64 remain, newLength; + + DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength); + + remap = internalState->fNextRemapPage; + if (remap && (addrPage == vm_page_get_offset(remap))) { + } else { + for (remap = internalState->fCopyPageAlloc; + remap && (addrPage != vm_page_get_offset(remap)); + remap = vm_page_get_next(remap)) { + } + } + + if (!remap) { + panic("no remap page found"); + } + + curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) + + (addr & PAGE_MASK); + curSegValid = true; + internalState->fNextRemapPage = vm_page_get_next(remap); + + newLength = PAGE_SIZE - (addr & PAGE_MASK); + if (newLength < curSeg.fLength) { + remain = curSeg.fLength - newLength; + state->fIOVMAddr = addr + newLength; + internalState->fIOVMAddrValid = true; + curSeg.fLength = newLength; + state->fLength = remain; + offset -= remain; + } + DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset); + } + } + + // reduce size of output segment + uint64_t reduce, leftover = 0; + + // fMaxSegmentSize + if (curSeg.fLength > fMaxSegmentSize) { + leftover += curSeg.fLength - fMaxSegmentSize; + curSeg.fLength = fMaxSegmentSize; + state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + } + + // alignment current length + + reduce = (curSeg.fLength & fAlignMaskLength); + if (reduce && (curSeg.fLength > reduce)) { + leftover += reduce; + curSeg.fLength -= reduce; + state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + } + + // alignment next address + + reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); + if (reduce && (curSeg.fLength > reduce)) { + leftover += reduce; + curSeg.fLength -= reduce; + state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; + internalState->fIOVMAddrValid = true; + } + + if (leftover) { + DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", + leftover, offset, + curSeg.fIOVMAddr, curSeg.fLength); + state->fLength = leftover; + offset -= leftover; + } + + // + + if (internalState->fCursor) { + bool misaligned; + uint32_t mask; + + mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); + misaligned = (0 != (mask & curSeg.fIOVMAddr)); + if (!misaligned) { + mask = fAlignMaskLength; + misaligned |= (0 != (mask & curSeg.fLength)); + } + if (misaligned) { + if (misaligned) { + DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength); + } + curSegValid = curSeg.fIOVMAddr = 0; + ret = kIOReturnNotAligned; + break; + } + } + + if (offset >= memLength) { + curSeg.fLength -= (offset - memLength); + offset = memLength; + internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end + break; + } } - if (misaligned) - { - if (misaligned) DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength); - curSegValid = curSeg.fIOVMAddr = 0; - ret = kIOReturnNotAligned; - break; + + if (internalState->fIOVMAddrValid) { + if ((segIndex + 1 == numSegments)) { + break; + } + + ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); + curSegValid = curSeg.fIOVMAddr = 0; + if (kIOReturnSuccess != ret) { + break; + } } - } - - if (offset >= memLength) - { - curSeg.fLength -= (offset - memLength); - offset = memLength; - internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end - break; - } - } - - if (internalState->fIOVMAddrValid) { - if ((segIndex + 1 == numSegments)) - break; - - ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); - curSegValid = curSeg.fIOVMAddr = 0; - if (kIOReturnSuccess != ret) - break; - } - } - - if (curSegValid) { - ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); - } - - if (kIOReturnSuccess == ret) - { - state->fOffset = offset; - *offsetP = offset - internalState->fPreparedOffset; - *numSegmentsP = segIndex; - } - return ret; + } + + if (curSegValid) { + ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); + } + + if (kIOReturnSuccess == ret) { + state->fOffset = offset; + *offsetP = offset - internalState->fPreparedOffset; + *numSegmentsP = segIndex; + } + return ret; } -IOReturn +IOReturn IODMACommand::clientOutputSegment( void *reference, IODMACommand *target, Segment64 segment, void *vSegList, UInt32 outSegIndex) { - SegmentFunction segmentFunction = (SegmentFunction) reference; - IOReturn ret = kIOReturnSuccess; - - if (target->fNumAddressBits && (target->fNumAddressBits < 64) - && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) - && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) - { - DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); - ret = kIOReturnMessageTooLarge; - } - - if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) - { - DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); - ret = kIOReturnMessageTooLarge; - } - - return (ret); + SegmentFunction segmentFunction = (SegmentFunction) reference; + IOReturn ret = kIOReturnSuccess; + + if (target->fNumAddressBits && (target->fNumAddressBits < 64) + && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) + && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) { + DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); + ret = kIOReturnMessageTooLarge; + } + + if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) { + DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); + ret = kIOReturnMessageTooLarge; + } + + return ret; } IOReturn IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, - UInt64 *offsetP, - void *segmentsP, - UInt32 *numSegmentsP) + UInt64 *offsetP, + void *segmentsP, + UInt32 *numSegmentsP) { - return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, - offsetP, segmentsP, numSegmentsP)); + return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, + offsetP, segmentsP, numSegmentsP); } -bool +bool IODMACommand::OutputHost32(IODMACommand *, - Segment64 segment, void *vSegList, UInt32 outSegIndex) + Segment64 segment, void *vSegList, UInt32 outSegIndex) { - Segment32 *base = (Segment32 *) vSegList; - base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; - base[outSegIndex].fLength = (UInt32) segment.fLength; - return true; + Segment32 *base = (Segment32 *) vSegList; + base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; + base[outSegIndex].fLength = (UInt32) segment.fLength; + return true; } -bool +bool IODMACommand::OutputBig32(IODMACommand *, - Segment64 segment, void *vSegList, UInt32 outSegIndex) + Segment64 segment, void *vSegList, UInt32 outSegIndex) { - const UInt offAddr = outSegIndex * sizeof(Segment32); - const UInt offLen = offAddr + sizeof(UInt32); - OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); - OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); - return true; + const UInt offAddr = outSegIndex * sizeof(Segment32); + const UInt offLen = offAddr + sizeof(UInt32); + OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); + OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); + return true; } bool IODMACommand::OutputLittle32(IODMACommand *, - Segment64 segment, void *vSegList, UInt32 outSegIndex) + Segment64 segment, void *vSegList, UInt32 outSegIndex) { - const UInt offAddr = outSegIndex * sizeof(Segment32); - const UInt offLen = offAddr + sizeof(UInt32); - OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); - OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); - return true; + const UInt offAddr = outSegIndex * sizeof(Segment32); + const UInt offLen = offAddr + sizeof(UInt32); + OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); + OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); + return true; } bool IODMACommand::OutputHost64(IODMACommand *, - Segment64 segment, void *vSegList, UInt32 outSegIndex) + Segment64 segment, void *vSegList, UInt32 outSegIndex) { - Segment64 *base = (Segment64 *) vSegList; - base[outSegIndex] = segment; - return true; + Segment64 *base = (Segment64 *) vSegList; + base[outSegIndex] = segment; + return true; } bool IODMACommand::OutputBig64(IODMACommand *, - Segment64 segment, void *vSegList, UInt32 outSegIndex) + Segment64 segment, void *vSegList, UInt32 outSegIndex) { - const UInt offAddr = outSegIndex * sizeof(Segment64); - const UInt offLen = offAddr + sizeof(UInt64); - OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); - OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); - return true; + const UInt offAddr = outSegIndex * sizeof(Segment64); + const UInt offLen = offAddr + sizeof(UInt64); + OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); + OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); + return true; } bool IODMACommand::OutputLittle64(IODMACommand *, - Segment64 segment, void *vSegList, UInt32 outSegIndex) + Segment64 segment, void *vSegList, UInt32 outSegIndex) { - const UInt offAddr = outSegIndex * sizeof(Segment64); - const UInt offLen = offAddr + sizeof(UInt64); - OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); - OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); - return true; + const UInt offAddr = outSegIndex * sizeof(Segment64); + const UInt offLen = offAddr + sizeof(UInt64); + OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); + OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); + return true; } - - diff --git a/iokit/Kernel/IODMAController.cpp b/iokit/Kernel/IODMAController.cpp index faf7c20aa..4ce1edea3 100644 --- a/iokit/Kernel/IODMAController.cpp +++ b/iokit/Kernel/IODMAController.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,75 +32,88 @@ #define super IOService OSDefineMetaClassAndAbstractStructors(IODMAController, IOService); -const OSSymbol *IODMAController::createControllerName(UInt32 phandle) +const OSSymbol * +IODMAController::createControllerName(UInt32 phandle) { #define CREATE_BUF_LEN 48 - char buf[CREATE_BUF_LEN]; - - snprintf(buf, CREATE_BUF_LEN, "IODMAController%08X", (uint32_t)phandle); - - return OSSymbol::withCString(buf); + char buf[CREATE_BUF_LEN]; + + snprintf(buf, CREATE_BUF_LEN, "IODMAController%08X", (uint32_t)phandle); + + return OSSymbol::withCString(buf); } -IODMAController *IODMAController::getController(IOService *provider, UInt32 dmaIndex) +IODMAController * +IODMAController::getController(IOService *provider, UInt32 dmaIndex) { - OSData *dmaParentData; - const OSSymbol *dmaParentName; - IODMAController *dmaController; - - // Find the name of the parent dma controller - dmaParentData = OSDynamicCast(OSData, provider->getProperty("dma-parent")); - if (dmaParentData == 0) return NULL; - - if (dmaParentData->getLength() == sizeof(UInt32)) { - dmaParentName = createControllerName(*(UInt32 *)dmaParentData->getBytesNoCopy()); - } else { - if (dmaIndex >= dmaParentData->getLength() / sizeof(UInt32)) - panic("dmaIndex out of range"); - dmaParentName = createControllerName(*(UInt32 *)dmaParentData->getBytesNoCopy(dmaIndex * sizeof(UInt32), sizeof(UInt32))); - } - if (dmaParentName == 0) return NULL; - - // Wait for the parent dma controller - dmaController = OSDynamicCast(IODMAController, IOService::waitForService(IOService::nameMatching(dmaParentName))); - - return dmaController; + OSData *dmaParentData; + const OSSymbol *dmaParentName; + IODMAController *dmaController; + + // Find the name of the parent dma controller + dmaParentData = OSDynamicCast(OSData, provider->getProperty("dma-parent")); + if (dmaParentData == 0) { + return NULL; + } + + if (dmaParentData->getLength() == sizeof(UInt32)) { + dmaParentName = createControllerName(*(UInt32 *)dmaParentData->getBytesNoCopy()); + } else { + if (dmaIndex >= dmaParentData->getLength() / sizeof(UInt32)) { + panic("dmaIndex out of range"); + } + dmaParentName = createControllerName(*(UInt32 *)dmaParentData->getBytesNoCopy(dmaIndex * sizeof(UInt32), sizeof(UInt32))); + } + if (dmaParentName == 0) { + return NULL; + } + + // Wait for the parent dma controller + dmaController = OSDynamicCast(IODMAController, IOService::waitForService(IOService::nameMatching(dmaParentName))); + + return dmaController; } -bool IODMAController::start(IOService *provider) +bool +IODMAController::start(IOService *provider) { - if (!super::start(provider)) return false; - - _provider = provider; - - return true; + if (!super::start(provider)) { + return false; + } + + _provider = provider; + + return true; } // protected -void IODMAController::registerDMAController(IOOptionBits options) +void +IODMAController::registerDMAController(IOOptionBits options) { - OSData *phandleData; - - phandleData = OSDynamicCast(OSData, _provider->getProperty("AAPL,phandle")); - - _dmaControllerName = createControllerName(*(UInt32 *)phandleData->getBytesNoCopy()); - - setName(_dmaControllerName); - - registerService(options | ((options & kIOServiceAsynchronous) ? 0 : kIOServiceSynchronous)); + OSData *phandleData; + + phandleData = OSDynamicCast(OSData, _provider->getProperty("AAPL,phandle")); + + _dmaControllerName = createControllerName(*(UInt32 *)phandleData->getBytesNoCopy()); + + setName(_dmaControllerName); + + registerService(options | ((options & kIOServiceAsynchronous) ? 0 : kIOServiceSynchronous)); } -void IODMAController::completeDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand) +void +IODMAController::completeDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand) { - dmaES->completeDMACommand(dmaCommand); + dmaES->completeDMACommand(dmaCommand); } -void IODMAController::notifyDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp) +void +IODMAController::notifyDMACommand(IODMAEventSource *dmaES, IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp) { - dmaES->notifyDMACommand(dmaCommand, status, actualByteCount, timeStamp); + dmaES->notifyDMACommand(dmaCommand, status, actualByteCount, timeStamp); } diff --git a/iokit/Kernel/IODMAEventSource.cpp b/iokit/Kernel/IODMAEventSource.cpp index 6875e0c42..af624aeaa 100644 --- a/iokit/Kernel/IODMAEventSource.cpp +++ b/iokit/Kernel/IODMAEventSource.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,180 +35,227 @@ #define super IOEventSource OSDefineMetaClassAndStructors(IODMAEventSource, IOEventSource); -bool IODMAEventSource::init(OSObject *inOwner, - IOService *inProvider, - Action inCompletion, - Action inNotification, - UInt32 inDMAIndex) +bool +IODMAEventSource::init(OSObject *inOwner, + IOService *inProvider, + Action inCompletion, + Action inNotification, + UInt32 inDMAIndex) { - IOReturn result; - - if (!super::init(inOwner)) return false; - - if (inProvider == 0) return false; - - dmaProvider = inProvider; - dmaIndex = 0xFFFFFFFF; - dmaCompletionAction = inCompletion; - dmaNotificationAction = inNotification; - - dmaController = IODMAController::getController(dmaProvider, inDMAIndex); - if (dmaController == 0) return false; - dmaController->retain(); - - result = dmaController->initDMAChannel(dmaProvider, this, &dmaIndex, inDMAIndex); - if (result != kIOReturnSuccess) return false; - - queue_init(&dmaCommandsCompleted); - dmaCommandsCompletedLock = IOSimpleLockAlloc(); - - return true; + IOReturn result; + + if (!super::init(inOwner)) { + return false; + } + + if (inProvider == 0) { + return false; + } + + dmaProvider = inProvider; + dmaIndex = 0xFFFFFFFF; + dmaCompletionAction = inCompletion; + dmaNotificationAction = inNotification; + + dmaController = IODMAController::getController(dmaProvider, inDMAIndex); + if (dmaController == 0) { + return false; + } + dmaController->retain(); + + result = dmaController->initDMAChannel(dmaProvider, this, &dmaIndex, inDMAIndex); + if (result != kIOReturnSuccess) { + return false; + } + + queue_init(&dmaCommandsCompleted); + dmaCommandsCompletedLock = IOSimpleLockAlloc(); + + return true; } -void IODMAEventSource::free() +void +IODMAEventSource::free() { - if (dmaCommandsCompletedLock != NULL) IOSimpleLockFree(dmaCommandsCompletedLock); - super::free(); + if (dmaCommandsCompletedLock != NULL) { + IOSimpleLockFree(dmaCommandsCompletedLock); + } + super::free(); } -IODMAEventSource *IODMAEventSource::dmaEventSource(OSObject *inOwner, - IOService *inProvider, - Action inCompletion, - Action inNotification, - UInt32 inDMAIndex) +IODMAEventSource * +IODMAEventSource::dmaEventSource(OSObject *inOwner, + IOService *inProvider, + Action inCompletion, + Action inNotification, + UInt32 inDMAIndex) { - IODMAEventSource *dmaES = new IODMAEventSource; - - if (dmaES && !dmaES->init(inOwner, inProvider, inCompletion, inNotification, inDMAIndex)) { - dmaES->release(); - return 0; - } - - return dmaES; + IODMAEventSource *dmaES = new IODMAEventSource; + + if (dmaES && !dmaES->init(inOwner, inProvider, inCompletion, inNotification, inDMAIndex)) { + dmaES->release(); + return 0; + } + + return dmaES; } -IOReturn IODMAEventSource::startDMACommand(IODMACommand *dmaCommand, IODirection direction, IOByteCount byteCount, IOByteCount byteOffset) +IOReturn +IODMAEventSource::startDMACommand(IODMACommand *dmaCommand, IODirection direction, IOByteCount byteCount, IOByteCount byteOffset) { - IOReturn result; - - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return kIOReturnError; - - if (dmaSynchBusy) return kIOReturnBusy; - - if (dmaCompletionAction == 0) dmaSynchBusy = true; - - result = dmaController->startDMACommand(dmaIndex, dmaCommand, direction, byteCount, byteOffset); - - if (result != kIOReturnSuccess) { - dmaSynchBusy = false; - return result; - } - - while (dmaSynchBusy) sleepGate(&dmaSynchBusy, THREAD_UNINT); - - return kIOReturnSuccess; + IOReturn result; + + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return kIOReturnError; + } + + if (dmaSynchBusy) { + return kIOReturnBusy; + } + + if (dmaCompletionAction == 0) { + dmaSynchBusy = true; + } + + result = dmaController->startDMACommand(dmaIndex, dmaCommand, direction, byteCount, byteOffset); + + if (result != kIOReturnSuccess) { + dmaSynchBusy = false; + return result; + } + + while (dmaSynchBusy) { + sleepGate(&dmaSynchBusy, THREAD_UNINT); + } + + return kIOReturnSuccess; } -IOReturn IODMAEventSource::stopDMACommand(bool flush, uint64_t timeout) +IOReturn +IODMAEventSource::stopDMACommand(bool flush, uint64_t timeout) { - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return kIOReturnError; - - return dmaController->stopDMACommand(dmaIndex, flush, timeout); + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return kIOReturnError; + } + + return dmaController->stopDMACommand(dmaIndex, flush, timeout); } -IOReturn IODMAEventSource::queryDMACommand(IODMACommand **dmaCommand, IOByteCount *transferCount, bool waitForIdle) +IOReturn +IODMAEventSource::queryDMACommand(IODMACommand **dmaCommand, IOByteCount *transferCount, bool waitForIdle) { - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return kIOReturnError; - - return dmaController->queryDMACommand(dmaIndex, dmaCommand, transferCount, waitForIdle); + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return kIOReturnError; + } + + return dmaController->queryDMACommand(dmaIndex, dmaCommand, transferCount, waitForIdle); } -IOByteCount IODMAEventSource::getFIFODepth(IODirection direction) +IOByteCount +IODMAEventSource::getFIFODepth(IODirection direction) { - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return 0; - - return dmaController->getFIFODepth(dmaIndex, direction); + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return 0; + } + + return dmaController->getFIFODepth(dmaIndex, direction); } -IOReturn IODMAEventSource::setFIFODepth(IOByteCount depth) +IOReturn +IODMAEventSource::setFIFODepth(IOByteCount depth) { - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return kIOReturnError; - - return dmaController->setFIFODepth(dmaIndex, depth); + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return kIOReturnError; + } + + return dmaController->setFIFODepth(dmaIndex, depth); } -IOByteCount IODMAEventSource::validFIFODepth(IOByteCount depth, IODirection direction) +IOByteCount +IODMAEventSource::validFIFODepth(IOByteCount depth, IODirection direction) { - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return kIOReturnError; - - return dmaController->validFIFODepth(dmaIndex, depth, direction); + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return kIOReturnError; + } + + return dmaController->validFIFODepth(dmaIndex, depth, direction); } -IOReturn IODMAEventSource::setFrameSize(UInt8 byteCount) +IOReturn +IODMAEventSource::setFrameSize(UInt8 byteCount) { - if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) return kIOReturnError; - - return dmaController->setFrameSize(dmaIndex, byteCount); + if ((dmaController == 0) || (dmaIndex == 0xFFFFFFFF)) { + return kIOReturnError; + } + + return dmaController->setFrameSize(dmaIndex, byteCount); } // protected -bool IODMAEventSource::checkForWork(void) +bool +IODMAEventSource::checkForWork(void) { - IODMACommand *dmaCommand = NULL; - bool work, again; - - IOSimpleLockLock(dmaCommandsCompletedLock); - work = !queue_empty(&dmaCommandsCompleted); - if (work) { - queue_remove_first(&dmaCommandsCompleted, dmaCommand, IODMACommand *, fCommandChain); - again = !queue_empty(&dmaCommandsCompleted); - } else { - again = false; - } - IOSimpleLockUnlock(dmaCommandsCompletedLock); - - if (work) { - (*dmaCompletionAction)(owner, this, dmaCommand, dmaCommand->reserved->fStatus, dmaCommand->reserved->fActualByteCount, dmaCommand->reserved->fTimeStamp); - } - - return again; + IODMACommand *dmaCommand = NULL; + bool work, again; + + IOSimpleLockLock(dmaCommandsCompletedLock); + work = !queue_empty(&dmaCommandsCompleted); + if (work) { + queue_remove_first(&dmaCommandsCompleted, dmaCommand, IODMACommand *, fCommandChain); + again = !queue_empty(&dmaCommandsCompleted); + } else { + again = false; + } + IOSimpleLockUnlock(dmaCommandsCompletedLock); + + if (work) { + (*dmaCompletionAction)(owner, this, dmaCommand, dmaCommand->reserved->fStatus, dmaCommand->reserved->fActualByteCount, dmaCommand->reserved->fTimeStamp); + } + + return again; } -void IODMAEventSource::completeDMACommand(IODMACommand *dmaCommand) +void +IODMAEventSource::completeDMACommand(IODMACommand *dmaCommand) { - if (dmaCompletionAction != 0) { - IOSimpleLockLock(dmaCommandsCompletedLock); - queue_enter(&dmaCommandsCompleted, dmaCommand, IODMACommand *, fCommandChain); - IOSimpleLockUnlock(dmaCommandsCompletedLock); - - signalWorkAvailable(); - } else { - dmaSynchBusy = false; - wakeupGate(&dmaSynchBusy, true); - } + if (dmaCompletionAction != 0) { + IOSimpleLockLock(dmaCommandsCompletedLock); + queue_enter(&dmaCommandsCompleted, dmaCommand, IODMACommand *, fCommandChain); + IOSimpleLockUnlock(dmaCommandsCompletedLock); + + signalWorkAvailable(); + } else { + dmaSynchBusy = false; + wakeupGate(&dmaSynchBusy, true); + } } -void IODMAEventSource::notifyDMACommand(IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp) +void +IODMAEventSource::notifyDMACommand(IODMACommand *dmaCommand, IOReturn status, IOByteCount actualByteCount, AbsoluteTime timeStamp) { - dmaCommand->reserved->fStatus = status; - dmaCommand->reserved->fActualByteCount = actualByteCount; - dmaCommand->reserved->fTimeStamp = timeStamp; - - if (dmaNotificationAction != 0) (*dmaNotificationAction)(owner, this, dmaCommand, status, actualByteCount, timeStamp); + dmaCommand->reserved->fStatus = status; + dmaCommand->reserved->fActualByteCount = actualByteCount; + dmaCommand->reserved->fTimeStamp = timeStamp; + + if (dmaNotificationAction != 0) { + (*dmaNotificationAction)(owner, this, dmaCommand, status, actualByteCount, timeStamp); + } } -IOReturn IODMAEventSource::setDMAConfig(UInt32 newReqIndex) +IOReturn +IODMAEventSource::setDMAConfig(UInt32 newReqIndex) { - return dmaController->setDMAConfig(dmaIndex, dmaProvider, newReqIndex); + return dmaController->setDMAConfig(dmaIndex, dmaProvider, newReqIndex); } -bool IODMAEventSource::validDMAConfig(UInt32 newReqIndex) +bool +IODMAEventSource::validDMAConfig(UInt32 newReqIndex) { - return dmaController->validDMAConfig(dmaIndex, dmaProvider, newReqIndex); + return dmaController->validDMAConfig(dmaIndex, dmaProvider, newReqIndex); } diff --git a/iokit/Kernel/IODataQueue.cpp b/iokit/Kernel/IODataQueue.cpp index 15f68a362..93bd0c268 100644 --- a/iokit/Kernel/IODataQueue.cpp +++ b/iokit/Kernel/IODataQueue.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,10 +37,9 @@ #include #include -struct IODataQueueInternal -{ - mach_msg_header_t msg; - UInt32 queueSize; +struct IODataQueueInternal { + mach_msg_header_t msg; + UInt32 queueSize; }; #ifdef enqueue @@ -57,87 +56,91 @@ OSDefineMetaClassAndStructors(IODataQueue, OSObject) IODataQueue *IODataQueue::withCapacity(UInt32 size) { - IODataQueue *dataQueue = new IODataQueue; + IODataQueue *dataQueue = new IODataQueue; - if (dataQueue) { - if (!dataQueue->initWithCapacity(size)) { - dataQueue->release(); - dataQueue = 0; - } - } + if (dataQueue) { + if (!dataQueue->initWithCapacity(size)) { + dataQueue->release(); + dataQueue = 0; + } + } - return dataQueue; + return dataQueue; } -IODataQueue *IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) +IODataQueue * +IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) { - IODataQueue *dataQueue = new IODataQueue; + IODataQueue *dataQueue = new IODataQueue; - if (dataQueue) { - if (!dataQueue->initWithEntries(numEntries, entrySize)) { - dataQueue->release(); - dataQueue = 0; - } - } + if (dataQueue) { + if (!dataQueue->initWithEntries(numEntries, entrySize)) { + dataQueue->release(); + dataQueue = 0; + } + } - return dataQueue; + return dataQueue; } -Boolean IODataQueue::initWithCapacity(UInt32 size) +Boolean +IODataQueue::initWithCapacity(UInt32 size) { - vm_size_t allocSize = 0; + vm_size_t allocSize = 0; - if (!super::init()) { - return false; - } + if (!super::init()) { + return false; + } - if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) { - return false; - } - - allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE); + if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) { + return false; + } + + allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE); - if (allocSize < size) { - return false; - } + if (allocSize < size) { + return false; + } - assert(!notifyMsg); - notifyMsg = IONew(IODataQueueInternal, 1); + assert(!notifyMsg); + notifyMsg = IONew(IODataQueueInternal, 1); if (!notifyMsg) { return false; } - bzero(notifyMsg, sizeof(IODataQueueInternal)); - ((IODataQueueInternal *)notifyMsg)->queueSize = size; + bzero(notifyMsg, sizeof(IODataQueueInternal)); + ((IODataQueueInternal *)notifyMsg)->queueSize = size; - dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE); - if (dataQueue == 0) { - return false; - } - bzero(dataQueue, allocSize); + dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE); + if (dataQueue == 0) { + return false; + } + bzero(dataQueue, allocSize); - dataQueue->queueSize = size; + dataQueue->queueSize = size; // dataQueue->head = 0; // dataQueue->tail = 0; - return true; + return true; } -Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize) +Boolean +IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize) { - // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE): - // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE) - if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || - // check (numEntries + 1) - (numEntries > UINT32_MAX-1) || - // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE) - (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX/(numEntries+1))) { - return false; - } - - return (initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize))); + // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE): + // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE) + if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || + // check (numEntries + 1) + (numEntries > UINT32_MAX - 1) || + // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE) + (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX / (numEntries + 1))) { + return false; + } + + return initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize)); } -void IODataQueue::free() +void +IODataQueue::free() { if (notifyMsg) { if (dataQueue) { @@ -147,97 +150,86 @@ void IODataQueue::free() IODelete(notifyMsg, IODataQueueInternal, 1); notifyMsg = NULL; - } + } - super::free(); + super::free(); - return; + return; } -Boolean IODataQueue::enqueue(void * data, UInt32 dataSize) +Boolean +IODataQueue::enqueue(void * data, UInt32 dataSize) { - UInt32 head; - UInt32 tail; - UInt32 newTail; - const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; - UInt32 queueSize; - IODataQueueEntry * entry; - - // Check for overflow of entrySize - if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) { - return false; - } - - // Force a single read of head and tail - // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers - tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); - head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); - - // Check for underflow of (dataQueue->queueSize - tail) - queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; - if ((queueSize < tail) || (queueSize < head)) { - return false; - } - - if ( tail >= head ) - { - // Is there enough room at the end for the entry? - if ((entrySize <= UINT32_MAX - tail) && - ((tail + entrySize) <= queueSize) ) - { - entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); - - entry->size = dataSize; - memcpy(&entry->data, data, dataSize); - - // The tail can be out of bound when the size of the new entry - // exactly matches the available space at the end of the queue. - // The tail can range from 0 to dataQueue->queueSize inclusive. - - newTail = tail + entrySize; - } - else if ( head > entrySize ) // Is there enough room at the beginning? - { - // Wrap around to the beginning, but do not allow the tail to catch - // up to the head. - - dataQueue->queue->size = dataSize; - - // We need to make sure that there is enough room to set the size before - // doing this. The user client checks for this and will look for the size - // at the beginning if there isn't room for it at the end. - - if ( ( queueSize - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE ) - { - ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; - } - - memcpy(&dataQueue->queue->data, data, dataSize); - newTail = entrySize; - } - else - { - return false; // queue is full - } - } - else - { - // Do not allow the tail to catch up to the head when the queue is full. - // That's why the comparison uses a '>' rather than '>='. - - if ( (head - tail) > entrySize ) - { - entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); - - entry->size = dataSize; - memcpy(&entry->data, data, dataSize); - newTail = tail + entrySize; - } - else - { - return false; // queue is full - } - } + UInt32 head; + UInt32 tail; + UInt32 newTail; + const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; + UInt32 queueSize; + IODataQueueEntry * entry; + + // Check for overflow of entrySize + if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) { + return false; + } + + // Force a single read of head and tail + // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers + tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); + head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); + + // Check for underflow of (dataQueue->queueSize - tail) + queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; + if ((queueSize < tail) || (queueSize < head)) { + return false; + } + + if (tail >= head) { + // Is there enough room at the end for the entry? + if ((entrySize <= UINT32_MAX - tail) && + ((tail + entrySize) <= queueSize)) { + entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); + + entry->size = dataSize; + memcpy(&entry->data, data, dataSize); + + // The tail can be out of bound when the size of the new entry + // exactly matches the available space at the end of the queue. + // The tail can range from 0 to dataQueue->queueSize inclusive. + + newTail = tail + entrySize; + } else if (head > entrySize) { // Is there enough room at the beginning? + // Wrap around to the beginning, but do not allow the tail to catch + // up to the head. + + dataQueue->queue->size = dataSize; + + // We need to make sure that there is enough room to set the size before + // doing this. The user client checks for this and will look for the size + // at the beginning if there isn't room for it at the end. + + if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) { + ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; + } + + memcpy(&dataQueue->queue->data, data, dataSize); + newTail = entrySize; + } else { + return false; // queue is full + } + } else { + // Do not allow the tail to catch up to the head when the queue is full. + // That's why the comparison uses a '>' rather than '>='. + + if ((head - tail) > entrySize) { + entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); + + entry->size = dataSize; + memcpy(&entry->data, data, dataSize); + newTail = tail + entrySize; + } else { + return false; // queue is full + } + } // Publish the data we just enqueued __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); @@ -263,48 +255,49 @@ Boolean IODataQueue::enqueue(void * data, UInt32 dataSize) return true; } -void IODataQueue::setNotificationPort(mach_port_t port) +void +IODataQueue::setNotificationPort(mach_port_t port) { - mach_msg_header_t * msgh; + mach_msg_header_t * msgh; - msgh = &((IODataQueueInternal *) notifyMsg)->msg; + msgh = &((IODataQueueInternal *) notifyMsg)->msg; bzero(msgh, sizeof(mach_msg_header_t)); msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); msgh->msgh_size = sizeof(mach_msg_header_t); msgh->msgh_remote_port = port; } -void IODataQueue::sendDataAvailableNotification() +void +IODataQueue::sendDataAvailableNotification() { - kern_return_t kr; - mach_msg_header_t * msgh; - - msgh = &((IODataQueueInternal *) notifyMsg)->msg; - if (msgh->msgh_remote_port) { - kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE); - switch(kr) { - case MACH_SEND_TIMED_OUT: // Notification already sent - case MACH_MSG_SUCCESS: - case MACH_SEND_NO_BUFFER: - break; - default: - IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/"IODataQueue", kr); - break; - } - } + kern_return_t kr; + mach_msg_header_t * msgh; + + msgh = &((IODataQueueInternal *) notifyMsg)->msg; + if (msgh->msgh_remote_port) { + kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE); + switch (kr) { + case MACH_SEND_TIMED_OUT: // Notification already sent + case MACH_MSG_SUCCESS: + case MACH_SEND_NO_BUFFER: + break; + default: + IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr); + break; + } + } } -IOMemoryDescriptor *IODataQueue::getMemoryDescriptor() +IOMemoryDescriptor * +IODataQueue::getMemoryDescriptor() { - IOMemoryDescriptor *descriptor = 0; - UInt32 queueSize; + IOMemoryDescriptor *descriptor = 0; + UInt32 queueSize; - queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; - if (dataQueue != 0) { - descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn); - } + queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; + if (dataQueue != 0) { + descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn); + } - return descriptor; + return descriptor; } - - diff --git a/iokit/Kernel/IODeviceMemory.cpp b/iokit/Kernel/IODeviceMemory.cpp index f7ed8020c..ee178c845 100644 --- a/iokit/Kernel/IODeviceMemory.cpp +++ b/iokit/Kernel/IODeviceMemory.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,50 +31,53 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IODeviceMemory * IODeviceMemory::withRange( - IOPhysicalAddress start, - IOPhysicalLength length ) +IODeviceMemory * +IODeviceMemory::withRange( + IOPhysicalAddress start, + IOPhysicalLength length ) { - return( (IODeviceMemory *) IOMemoryDescriptor::withAddressRange( - start, length, kIODirectionNone | kIOMemoryMapperNone, NULL )); + return (IODeviceMemory *) IOMemoryDescriptor::withAddressRange( + start, length, kIODirectionNone | kIOMemoryMapperNone, NULL ); } -IODeviceMemory * IODeviceMemory::withSubRange( - IODeviceMemory * of, - IOPhysicalAddress offset, - IOPhysicalLength length ) +IODeviceMemory * +IODeviceMemory::withSubRange( + IODeviceMemory * of, + IOPhysicalAddress offset, + IOPhysicalLength length ) { - return( (IODeviceMemory *) IOSubMemoryDescriptor::withSubRange( - of, offset, length, kIODirectionNone )); + return (IODeviceMemory *) IOSubMemoryDescriptor::withSubRange( + of, offset, length, kIODirectionNone ); } -OSArray * IODeviceMemory::arrayFromList( - InitElement list[], - IOItemCount count ) +OSArray * +IODeviceMemory::arrayFromList( + InitElement list[], + IOItemCount count ) { - OSArray * array; - IODeviceMemory * range; - IOItemCount i; + OSArray * array; + IODeviceMemory * range; + IOItemCount i; - array = OSArray::withCapacity( count ); - if( 0 == array ) - return( 0); + array = OSArray::withCapacity( count ); + if (0 == array) { + return 0; + } - for( i = 0; i < count; i++) { - range = IODeviceMemory::withRange( list[i].start, list[i].length ); - if( range) { - range->setTag( list[i].tag ); - array->setObject( range); - range->release(); - } else { - array->release(); - array = 0; - break; + for (i = 0; i < count; i++) { + range = IODeviceMemory::withRange( list[i].start, list[i].length ); + if (range) { + range->setTag( list[i].tag ); + array->setObject( range); + range->release(); + } else { + array->release(); + array = 0; + break; + } } - } - return( array ); + return array; } - diff --git a/iokit/Kernel/IODeviceTreeSupport.cpp b/iokit/Kernel/IODeviceTreeSupport.cpp index 1a3b426d8..6d49c4fee 100644 --- a/iokit/Kernel/IODeviceTreeSupport.cpp +++ b/iokit/Kernel/IODeviceTreeSupport.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,11 +46,9 @@ typedef UInt32 dtptr_t; #include extern "C" { - int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infosize ); void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize ); int IODTGetDefault(const char *key, void *infoAddr, unsigned int infoSize ); - } #include @@ -63,27 +61,27 @@ static OSArray * gIODTPHandles; static OSArray * gIODTPHandleMap; static OSData * gIODTResolvers; -const OSSymbol * gIODTNameKey; -const OSSymbol * gIODTUnitKey; -const OSSymbol * gIODTCompatibleKey; -const OSSymbol * gIODTTypeKey; -const OSSymbol * gIODTModelKey; -const OSSymbol * gIODTTargetTypeKey; +const OSSymbol * gIODTNameKey; +const OSSymbol * gIODTUnitKey; +const OSSymbol * gIODTCompatibleKey; +const OSSymbol * gIODTTypeKey; +const OSSymbol * gIODTModelKey; +const OSSymbol * gIODTTargetTypeKey; -const OSSymbol * gIODTSizeCellKey; -const OSSymbol * gIODTAddressCellKey; -const OSSymbol * gIODTRangeKey; +const OSSymbol * gIODTSizeCellKey; +const OSSymbol * gIODTAddressCellKey; +const OSSymbol * gIODTRangeKey; -const OSSymbol * gIODTPersistKey; +const OSSymbol * gIODTPersistKey; -const OSSymbol * gIODTDefaultInterruptController; -const OSSymbol * gIODTAAPLInterruptsKey; -const OSSymbol * gIODTPHandleKey; -const OSSymbol * gIODTInterruptCellKey; -const OSSymbol * gIODTInterruptParentKey; -const OSSymbol * gIODTNWInterruptMappingKey; +const OSSymbol * gIODTDefaultInterruptController; +const OSSymbol * gIODTAAPLInterruptsKey; +const OSSymbol * gIODTPHandleKey; +const OSSymbol * gIODTInterruptCellKey; +const OSSymbol * gIODTInterruptParentKey; +const OSSymbol * gIODTNWInterruptMappingKey; -OSDictionary * gIODTSharedInterrupts; +OSDictionary * gIODTSharedInterrupts; static IOLock * gIODTResolversLock; @@ -95,701 +93,748 @@ static bool IODTMapInterruptsSharing( IORegistryEntry * regEntry, OSDictionary * IORegistryEntry * IODeviceTreeAlloc( void * dtTop ) { - IORegistryEntry * parent; - IORegistryEntry * child; - IORegistryIterator * regIter; - OpaqueDTEntryIterator iter; - DTEntry dtChild; - DTEntry mapEntry; - OSArray * stack; - OSData * prop; - OSDictionary * allInts; - vm_offset_t * dtMap; - unsigned int propSize; - bool intMap; - bool freeDT; - - gIODTPlane = IORegistryEntry::makePlane( kIODeviceTreePlane ); - - gIODTNameKey = OSSymbol::withCStringNoCopy( "name" ); - gIODTUnitKey = OSSymbol::withCStringNoCopy( "AAPL,unit-string" ); - gIODTCompatibleKey = OSSymbol::withCStringNoCopy( "compatible" ); - gIODTTypeKey = OSSymbol::withCStringNoCopy( "device_type" ); - gIODTModelKey = OSSymbol::withCStringNoCopy( "model" ); - gIODTTargetTypeKey = OSSymbol::withCStringNoCopy( "target-type" ); - gIODTSizeCellKey = OSSymbol::withCStringNoCopy( "#size-cells" ); - gIODTAddressCellKey = OSSymbol::withCStringNoCopy( "#address-cells" ); - gIODTRangeKey = OSSymbol::withCStringNoCopy( "ranges" ); - gIODTPersistKey = OSSymbol::withCStringNoCopy( "IODTPersist" ); - - assert( gIODTPlane && gIODTCompatibleKey - && gIODTTypeKey && gIODTModelKey - && gIODTSizeCellKey && gIODTAddressCellKey && gIODTRangeKey - && gIODTPersistKey ); - - gIODTDefaultInterruptController - = OSSymbol::withCStringNoCopy("IOPrimaryInterruptController"); - gIODTNWInterruptMappingKey - = OSSymbol::withCStringNoCopy("IONWInterrupts"); - - gIODTAAPLInterruptsKey - = OSSymbol::withCStringNoCopy("AAPL,interrupts"); - gIODTPHandleKey - = OSSymbol::withCStringNoCopy("AAPL,phandle"); - - gIODTInterruptParentKey - = OSSymbol::withCStringNoCopy("interrupt-parent"); - - gIODTPHandles = OSArray::withCapacity( 1 ); - gIODTPHandleMap = OSArray::withCapacity( 1 ); - gIODTResolvers = OSData::withCapacity(16); - - gIODTResolversLock = IOLockAlloc(); - - gIODTInterruptCellKey - = OSSymbol::withCStringNoCopy("#interrupt-cells"); - - assert( gIODTDefaultInterruptController && gIODTNWInterruptMappingKey + IORegistryEntry * parent; + IORegistryEntry * child; + IORegistryIterator * regIter; + OpaqueDTEntryIterator iter; + DTEntry dtChild; + DTEntry mapEntry; + OSArray * stack; + OSData * prop; + OSDictionary * allInts; + vm_offset_t * dtMap; + unsigned int propSize; + bool intMap; + bool freeDT; + + gIODTPlane = IORegistryEntry::makePlane( kIODeviceTreePlane ); + + gIODTNameKey = OSSymbol::withCStringNoCopy( "name" ); + gIODTUnitKey = OSSymbol::withCStringNoCopy( "AAPL,unit-string" ); + gIODTCompatibleKey = OSSymbol::withCStringNoCopy( "compatible" ); + gIODTTypeKey = OSSymbol::withCStringNoCopy( "device_type" ); + gIODTModelKey = OSSymbol::withCStringNoCopy( "model" ); + gIODTTargetTypeKey = OSSymbol::withCStringNoCopy( "target-type" ); + gIODTSizeCellKey = OSSymbol::withCStringNoCopy( "#size-cells" ); + gIODTAddressCellKey = OSSymbol::withCStringNoCopy( "#address-cells" ); + gIODTRangeKey = OSSymbol::withCStringNoCopy( "ranges" ); + gIODTPersistKey = OSSymbol::withCStringNoCopy( "IODTPersist" ); + + assert( gIODTPlane && gIODTCompatibleKey + && gIODTTypeKey && gIODTModelKey + && gIODTSizeCellKey && gIODTAddressCellKey && gIODTRangeKey + && gIODTPersistKey ); + + gIODTDefaultInterruptController + = OSSymbol::withCStringNoCopy("IOPrimaryInterruptController"); + gIODTNWInterruptMappingKey + = OSSymbol::withCStringNoCopy("IONWInterrupts"); + + gIODTAAPLInterruptsKey + = OSSymbol::withCStringNoCopy("AAPL,interrupts"); + gIODTPHandleKey + = OSSymbol::withCStringNoCopy("AAPL,phandle"); + + gIODTInterruptParentKey + = OSSymbol::withCStringNoCopy("interrupt-parent"); + + gIODTPHandles = OSArray::withCapacity( 1 ); + gIODTPHandleMap = OSArray::withCapacity( 1 ); + gIODTResolvers = OSData::withCapacity(16); + + gIODTResolversLock = IOLockAlloc(); + + gIODTInterruptCellKey + = OSSymbol::withCStringNoCopy("#interrupt-cells"); + + assert( gIODTDefaultInterruptController && gIODTNWInterruptMappingKey && gIODTAAPLInterruptsKey && gIODTPHandleKey && gIODTInterruptParentKey && gIODTPHandles && gIODTPHandleMap && gIODTResolvers && gIODTResolversLock - && gIODTInterruptCellKey - ); - - freeDT = (kSuccess == DTLookupEntry( 0, "/chosen/memory-map", &mapEntry )) - && (kSuccess == DTGetProperty( mapEntry, - "DeviceTree", (void **) &dtMap, &propSize )) - && ((2 * sizeof(uint32_t)) == propSize); - - parent = MakeReferenceTable( (DTEntry)dtTop, freeDT ); - - stack = OSArray::withObjects( (const OSObject **) &parent, 1, 10 ); - DTInitEntryIterator( (DTEntry)dtTop, &iter ); - - do { - parent = (IORegistryEntry *)stack->getObject( stack->getCount() - 1); - //parent->release(); - stack->removeObject( stack->getCount() - 1); - - while( kSuccess == DTIterateEntries( &iter, &dtChild) ) { - - child = MakeReferenceTable( dtChild, freeDT ); - child->attachToParent( parent, gIODTPlane); - - AddPHandle( child ); - - if( kSuccess == DTEnterEntry( &iter, dtChild)) { - stack->setObject( parent); - parent = child; - } - // only registry holds retain - child->release(); - } - - } while( stack->getCount() - && (kSuccess == DTExitEntry( &iter, &dtChild))); - - stack->release(); - assert(kSuccess != DTExitEntry(&iter, &dtChild)); - - // parent is now root of the created tree - - // make root name first compatible entry (purely cosmetic) - if( (prop = (OSData *) parent->getProperty( gIODTCompatibleKey))) { - parent->setName( parent->getName(), gIODTPlane ); - parent->setName( (const char *) prop->getBytesNoCopy() ); - } - - // attach tree to meta root - parent->attachToParent( IORegistryEntry::getRegistryRoot(), gIODTPlane); - parent->release(); - - if( freeDT ) { - // free original device tree - DTInit(0); - IODTFreeLoaderInfo( "DeviceTree", - (void *)dtMap[0], (int) round_page(dtMap[1]) ); - } - - // adjust tree - - gIODTSharedInterrupts = OSDictionary::withCapacity(4); - allInts = OSDictionary::withCapacity(4); - intMap = false; - regIter = IORegistryIterator::iterateOver( gIODTPlane, - kIORegistryIterateRecursively ); - assert( regIter && allInts && gIODTSharedInterrupts ); - if( regIter && allInts && gIODTSharedInterrupts ) { - while( (child = regIter->getNextObject())) { - IODTMapInterruptsSharing( child, allInts ); - if( !intMap && child->getProperty( gIODTInterruptParentKey)) - intMap = true; - - } - regIter->release(); - } + && gIODTInterruptCellKey + ); + + freeDT = (kSuccess == DTLookupEntry( 0, "/chosen/memory-map", &mapEntry )) + && (kSuccess == DTGetProperty( mapEntry, + "DeviceTree", (void **) &dtMap, &propSize )) + && ((2 * sizeof(uint32_t)) == propSize); + + parent = MakeReferenceTable((DTEntry)dtTop, freeDT ); + + stack = OSArray::withObjects((const OSObject **) &parent, 1, 10 ); + DTInitEntryIterator((DTEntry)dtTop, &iter ); + + do { + parent = (IORegistryEntry *)stack->getObject( stack->getCount() - 1); + //parent->release(); + stack->removeObject( stack->getCount() - 1); + + while (kSuccess == DTIterateEntries( &iter, &dtChild)) { + child = MakeReferenceTable( dtChild, freeDT ); + child->attachToParent( parent, gIODTPlane); + + AddPHandle( child ); + + if (kSuccess == DTEnterEntry( &iter, dtChild)) { + stack->setObject( parent); + parent = child; + } + // only registry holds retain + child->release(); + } + } while (stack->getCount() + && (kSuccess == DTExitEntry( &iter, &dtChild))); + + stack->release(); + assert(kSuccess != DTExitEntry(&iter, &dtChild)); + + // parent is now root of the created tree + + // make root name first compatible entry (purely cosmetic) + if ((prop = (OSData *) parent->getProperty( gIODTCompatibleKey))) { + parent->setName( parent->getName(), gIODTPlane ); + parent->setName((const char *) prop->getBytesNoCopy()); + } + + // attach tree to meta root + parent->attachToParent( IORegistryEntry::getRegistryRoot(), gIODTPlane); + parent->release(); + + if (freeDT) { + // free original device tree + DTInit(0); + IODTFreeLoaderInfo( "DeviceTree", + (void *)dtMap[0], (int) round_page(dtMap[1])); + } + + // adjust tree + + gIODTSharedInterrupts = OSDictionary::withCapacity(4); + allInts = OSDictionary::withCapacity(4); + intMap = false; + regIter = IORegistryIterator::iterateOver( gIODTPlane, + kIORegistryIterateRecursively ); + assert( regIter && allInts && gIODTSharedInterrupts ); + if (regIter && allInts && gIODTSharedInterrupts) { + while ((child = regIter->getNextObject())) { + IODTMapInterruptsSharing( child, allInts ); + if (!intMap && child->getProperty( gIODTInterruptParentKey)) { + intMap = true; + } + } + regIter->release(); + } #if IODTSUPPORTDEBUG - parent->setProperty("allInts", allInts); - parent->setProperty("sharedInts", gIODTSharedInterrupts); - - regIter = IORegistryIterator::iterateOver( gIODTPlane, - kIORegistryIterateRecursively ); - if (regIter) { - while( (child = regIter->getNextObject())) { - OSArray * - array = OSDynamicCast(OSArray, child->getProperty( gIOInterruptSpecifiersKey )); - for( UInt32 i = 0; array && (i < array->getCount()); i++) - { - IOOptionBits options; - IOReturn ret = IODTGetInterruptOptions( child, i, &options ); - if( (ret != kIOReturnSuccess) || options) - IOLog("%s[%ld] %ld (%x)\n", child->getName(), i, options, ret); - } + parent->setProperty("allInts", allInts); + parent->setProperty("sharedInts", gIODTSharedInterrupts); + + regIter = IORegistryIterator::iterateOver( gIODTPlane, + kIORegistryIterateRecursively ); + if (regIter) { + while ((child = regIter->getNextObject())) { + OSArray * + array = OSDynamicCast(OSArray, child->getProperty( gIOInterruptSpecifiersKey )); + for (UInt32 i = 0; array && (i < array->getCount()); i++) { + IOOptionBits options; + IOReturn ret = IODTGetInterruptOptions( child, i, &options ); + if ((ret != kIOReturnSuccess) || options) { + IOLog("%s[%ld] %ld (%x)\n", child->getName(), i, options, ret); + } + } + } + regIter->release(); } - regIter->release(); - } #endif - allInts->release(); + allInts->release(); - if( intMap) - // set a key in the root to indicate we found NW interrupt mapping - parent->setProperty( gIODTNWInterruptMappingKey, - (OSObject *) gIODTNWInterruptMappingKey ); + if (intMap) { + // set a key in the root to indicate we found NW interrupt mapping + parent->setProperty( gIODTNWInterruptMappingKey, + (OSObject *) gIODTNWInterruptMappingKey ); + } - return( parent); + return parent; } -int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infoSize ) +int +IODTGetLoaderInfo( const char *key, void **infoAddr, int *infoSize ) { - IORegistryEntry *chosen; - OSData *propObj; - dtptr_t *propPtr; - unsigned int propSize; - int ret = -1; + IORegistryEntry *chosen; + OSData *propObj; + dtptr_t *propPtr; + unsigned int propSize; + int ret = -1; + + chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); + if (chosen == 0) { + return -1; + } - chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); - if ( chosen == 0 ) return -1; + propObj = OSDynamicCast( OSData, chosen->getProperty(key)); + if (propObj == 0) { + goto cleanup; + } - propObj = OSDynamicCast( OSData, chosen->getProperty(key) ); - if ( propObj == 0 ) goto cleanup; + propSize = propObj->getLength(); + if (propSize != (2 * sizeof(dtptr_t))) { + goto cleanup; + } - propSize = propObj->getLength(); - if ( propSize != (2 * sizeof(dtptr_t)) ) goto cleanup; - - propPtr = (dtptr_t *)propObj->getBytesNoCopy(); - if ( propPtr == 0 ) goto cleanup; + propPtr = (dtptr_t *)propObj->getBytesNoCopy(); + if (propPtr == 0) { + goto cleanup; + } - *infoAddr = (void *)(uintptr_t) (propPtr[0]); - *infoSize = (int) (propPtr[1]); + *infoAddr = (void *)(uintptr_t) (propPtr[0]); + *infoSize = (int) (propPtr[1]); - ret = 0; + ret = 0; cleanup: - chosen->release(); + chosen->release(); - return ret; + return ret; } -void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize ) +void +IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize ) { - vm_offset_t range[2]; - IORegistryEntry *chosen; - - range[0] = (vm_offset_t)infoAddr; - range[1] = (vm_offset_t)infoSize; - FreePhysicalMemory( range ); - - if ( key != 0 ) { - chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); - if ( chosen != 0 ) { - chosen->removeProperty(key); - chosen->release(); - } - } + vm_offset_t range[2]; + IORegistryEntry *chosen; + + range[0] = (vm_offset_t)infoAddr; + range[1] = (vm_offset_t)infoSize; + FreePhysicalMemory( range ); + + if (key != 0) { + chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); + if (chosen != 0) { + chosen->removeProperty(key); + chosen->release(); + } + } } -int IODTGetDefault(const char *key, void *infoAddr, unsigned int infoSize ) +int +IODTGetDefault(const char *key, void *infoAddr, unsigned int infoSize ) { - IORegistryEntry *defaults; - OSData *defaultObj; - unsigned int defaultSize; + IORegistryEntry *defaults; + OSData *defaultObj; + unsigned int defaultSize; - defaults = IORegistryEntry::fromPath( "/defaults", gIODTPlane ); - if ( defaults == 0 ) return -1; + defaults = IORegistryEntry::fromPath( "/defaults", gIODTPlane ); + if (defaults == 0) { + return -1; + } - defaultObj = OSDynamicCast( OSData, defaults->getProperty(key) ); - if ( defaultObj == 0 ) return -1; + defaultObj = OSDynamicCast( OSData, defaults->getProperty(key)); + if (defaultObj == 0) { + return -1; + } - defaultSize = defaultObj->getLength(); - if ( defaultSize > infoSize) return -1; + defaultSize = defaultObj->getLength(); + if (defaultSize > infoSize) { + return -1; + } - memcpy( infoAddr, defaultObj->getBytesNoCopy(), defaultSize ); + memcpy( infoAddr, defaultObj->getBytesNoCopy(), defaultSize ); - return 0; + return 0; } -static void FreePhysicalMemory( vm_offset_t * range ) +static void +FreePhysicalMemory( vm_offset_t * range ) { - vm_offset_t virt; + vm_offset_t virt; - virt = ml_static_ptovirt( range[0] ); - if( virt) { - ml_static_mfree( virt, range[1] ); - } + virt = ml_static_ptovirt( range[0] ); + if (virt) { + ml_static_mfree( virt, range[1] ); + } } static IORegistryEntry * MakeReferenceTable( DTEntry dtEntry, bool copy ) { - IORegistryEntry *regEntry; - OSDictionary *propTable; - const OSSymbol *nameKey; - OSData *data; - const OSSymbol *sym; - OpaqueDTPropertyIterator dtIter; - void *prop; - unsigned int propSize; - char *name; - char location[ 32 ]; - bool noLocation = true; - bool kernelOnly; - - regEntry = new IOService; - - if( regEntry && (false == regEntry->init())) { - regEntry->release(); - regEntry = 0; - } - - if( regEntry && - (kSuccess == DTInitPropertyIterator( dtEntry, &dtIter))) { - - kernelOnly = (kSuccess == DTGetProperty(dtEntry, "kernel-only", &prop, &propSize)); - propTable = regEntry->getPropertyTable(); - - while( kSuccess == DTIterateProperties( &dtIter, &name)) { - - if( kSuccess != DTGetProperty( dtEntry, name, &prop, &propSize )) - continue; - - if( copy) { - nameKey = OSSymbol::withCString(name); - data = OSData::withBytes(prop, propSize); - } else { - nameKey = OSSymbol::withCStringNoCopy(name); - data = OSData::withBytesNoCopy(prop, propSize); - } - assert( nameKey && data ); - - if (kernelOnly) - data->setSerializable(false); - - propTable->setObject( nameKey, data); - data->release(); - nameKey->release(); - - if( nameKey == gIODTNameKey ) { - if( copy) - sym = OSSymbol::withCString( (const char *) prop); - else - sym = OSSymbol::withCStringNoCopy( (const char *) prop); - regEntry->setName( sym ); - sym->release(); - - } else if( nameKey == gIODTUnitKey ) { - // all OF strings are null terminated... except this one - if( propSize >= (int) sizeof(location)) - propSize = sizeof(location) - 1; - strncpy( location, (const char *) prop, propSize ); - location[ propSize ] = 0; - regEntry->setLocation( location ); - propTable->removeObject( gIODTUnitKey ); - noLocation = false; - - } else if(noLocation && (!strncmp(name, "reg", sizeof("reg")))) { - // default location - override later - snprintf(location, sizeof(location), "%X", *((uint32_t *) prop)); - regEntry->setLocation( location ); - } - } - } - - return( regEntry); + IORegistryEntry *regEntry; + OSDictionary *propTable; + const OSSymbol *nameKey; + OSData *data; + const OSSymbol *sym; + OpaqueDTPropertyIterator dtIter; + void *prop; + unsigned int propSize; + char *name; + char location[32]; + bool noLocation = true; + bool kernelOnly; + + regEntry = new IOService; + + if (regEntry && (false == regEntry->init())) { + regEntry->release(); + regEntry = 0; + } + + if (regEntry && + (kSuccess == DTInitPropertyIterator( dtEntry, &dtIter))) { + kernelOnly = (kSuccess == DTGetProperty(dtEntry, "kernel-only", &prop, &propSize)); + propTable = regEntry->getPropertyTable(); + + while (kSuccess == DTIterateProperties( &dtIter, &name)) { + if (kSuccess != DTGetProperty( dtEntry, name, &prop, &propSize )) { + continue; + } + + if (copy) { + nameKey = OSSymbol::withCString(name); + data = OSData::withBytes(prop, propSize); + } else { + nameKey = OSSymbol::withCStringNoCopy(name); + data = OSData::withBytesNoCopy(prop, propSize); + } + assert( nameKey && data ); + + if (kernelOnly) { + data->setSerializable(false); + } + + propTable->setObject( nameKey, data); + data->release(); + nameKey->release(); + + if (nameKey == gIODTNameKey) { + if (copy) { + sym = OSSymbol::withCString((const char *) prop); + } else { + sym = OSSymbol::withCStringNoCopy((const char *) prop); + } + regEntry->setName( sym ); + sym->release(); + } else if (nameKey == gIODTUnitKey) { + // all OF strings are null terminated... except this one + if (propSize >= (int) sizeof(location)) { + propSize = sizeof(location) - 1; + } + strncpy( location, (const char *) prop, propSize ); + location[propSize] = 0; + regEntry->setLocation( location ); + propTable->removeObject( gIODTUnitKey ); + noLocation = false; + } else if (noLocation && (!strncmp(name, "reg", sizeof("reg")))) { + // default location - override later + snprintf(location, sizeof(location), "%X", *((uint32_t *) prop)); + regEntry->setLocation( location ); + } + } + } + + return regEntry; } -static void AddPHandle( IORegistryEntry * regEntry ) +static void +AddPHandle( IORegistryEntry * regEntry ) { - OSData * data; - - if( regEntry->getProperty( gIODTInterruptCellKey) - && (data = OSDynamicCast( OSData, regEntry->getProperty( gIODTPHandleKey )))) { - // a possible interrupt-parent - gIODTPHandles->setObject( data ); - gIODTPHandleMap->setObject( regEntry ); - } + OSData * data; + + if (regEntry->getProperty( gIODTInterruptCellKey) + && (data = OSDynamicCast( OSData, regEntry->getProperty( gIODTPHandleKey )))) { + // a possible interrupt-parent + gIODTPHandles->setObject( data ); + gIODTPHandleMap->setObject( regEntry ); + } } -static IORegistryEntry * FindPHandle( UInt32 phandle ) +static IORegistryEntry * +FindPHandle( UInt32 phandle ) { - OSData *data; - IORegistryEntry *regEntry = 0; - int i; - - for( i = 0; (data = (OSData *)gIODTPHandles->getObject( i )); i++ ) { - if( phandle == *((UInt32 *)data->getBytesNoCopy())) { - regEntry = (IORegistryEntry *) - gIODTPHandleMap->getObject( i ); - break; - } - } - - return( regEntry ); + OSData *data; + IORegistryEntry *regEntry = 0; + int i; + + for (i = 0; (data = (OSData *)gIODTPHandles->getObject( i )); i++) { + if (phandle == *((UInt32 *)data->getBytesNoCopy())) { + regEntry = (IORegistryEntry *) + gIODTPHandleMap->getObject( i ); + break; + } + } + + return regEntry; } -static bool GetUInt32( IORegistryEntry * regEntry, const OSSymbol * name, - UInt32 * value ) +static bool +GetUInt32( IORegistryEntry * regEntry, const OSSymbol * name, + UInt32 * value ) { - OSObject * obj; - OSData * data; - bool result; + OSObject * obj; + OSData * data; + bool result; - if (!(obj = regEntry->copyProperty(name))) return (false); + if (!(obj = regEntry->copyProperty(name))) { + return false; + } - result = ((data = OSDynamicCast(OSData, obj)) && (sizeof(UInt32) == data->getLength())); - if (result) *value = *((UInt32 *) data->getBytesNoCopy()); + result = ((data = OSDynamicCast(OSData, obj)) && (sizeof(UInt32) == data->getLength())); + if (result) { + *value = *((UInt32 *) data->getBytesNoCopy()); + } - obj->release(); - return(result); + obj->release(); + return result; } -static IORegistryEntry * IODTFindInterruptParent( IORegistryEntry * regEntry, IOItemCount index ) +static IORegistryEntry * +IODTFindInterruptParent( IORegistryEntry * regEntry, IOItemCount index ) { - IORegistryEntry * parent; - UInt32 phandle; - OSData * data; - unsigned int len; - - if( (data = OSDynamicCast( OSData, regEntry->getProperty( gIODTInterruptParentKey ))) - && (sizeof(UInt32) <= (len = data->getLength()))) { - if (((index + 1) * sizeof(UInt32)) > len) - index = 0; - phandle = ((UInt32 *) data->getBytesNoCopy())[index]; - parent = FindPHandle( phandle ); - - } else if( 0 == regEntry->getProperty( "interrupt-controller")) - parent = regEntry->getParentEntry( gIODTPlane); - else - parent = 0; - - return( parent ); + IORegistryEntry * parent; + UInt32 phandle; + OSData * data; + unsigned int len; + + if ((data = OSDynamicCast( OSData, regEntry->getProperty( gIODTInterruptParentKey ))) + && (sizeof(UInt32) <= (len = data->getLength()))) { + if (((index + 1) * sizeof(UInt32)) > len) { + index = 0; + } + phandle = ((UInt32 *) data->getBytesNoCopy())[index]; + parent = FindPHandle( phandle ); + } else if (0 == regEntry->getProperty( "interrupt-controller")) { + parent = regEntry->getParentEntry( gIODTPlane); + } else { + parent = 0; + } + + return parent; } -const OSSymbol * IODTInterruptControllerName( IORegistryEntry * regEntry ) +const OSSymbol * +IODTInterruptControllerName( IORegistryEntry * regEntry ) { - const OSSymbol *sym; - UInt32 phandle; - bool ok; - char buf[48]; - - ok = GetUInt32( regEntry, gIODTPHandleKey, &phandle); - assert( ok ); - - if( ok) { - snprintf(buf, sizeof(buf), "IOInterruptController%08X", (uint32_t)phandle); - sym = OSSymbol::withCString( buf ); - } else - sym = 0; + const OSSymbol *sym; + UInt32 phandle; + bool ok; + char buf[48]; + + ok = GetUInt32( regEntry, gIODTPHandleKey, &phandle); + assert( ok ); + + if (ok) { + snprintf(buf, sizeof(buf), "IOInterruptController%08X", (uint32_t)phandle); + sym = OSSymbol::withCString( buf ); + } else { + sym = 0; + } - return( sym ); + return sym; } #define unexpected(a) { kprintf("unexpected %s:%d\n", __FILE__, __LINE__); a; } -static void IODTGetICellCounts( IORegistryEntry * regEntry, - UInt32 * iCellCount, UInt32 * aCellCount) +static void +IODTGetICellCounts( IORegistryEntry * regEntry, + UInt32 * iCellCount, UInt32 * aCellCount) { - if( !GetUInt32( regEntry, gIODTInterruptCellKey, iCellCount)) - unexpected( *iCellCount = 1 ); - if( !GetUInt32( regEntry, gIODTAddressCellKey, aCellCount)) - *aCellCount = 0; + if (!GetUInt32( regEntry, gIODTInterruptCellKey, iCellCount)) { + unexpected( *iCellCount = 1 ); + } + if (!GetUInt32( regEntry, gIODTAddressCellKey, aCellCount)) { + *aCellCount = 0; + } } -static UInt32 IODTMapOneInterrupt( IORegistryEntry * regEntry, UInt32 * intSpec, UInt32 index, - OSData ** spec, const OSSymbol ** controller ) +static UInt32 +IODTMapOneInterrupt( IORegistryEntry * regEntry, UInt32 * intSpec, UInt32 index, + OSData ** spec, const OSSymbol ** controller ) { - IORegistryEntry *parent = 0; - OSData *data; - UInt32 *addrCmp; - UInt32 *maskCmp; - UInt32 *map; - UInt32 *endMap; - UInt32 acells, icells, pacells, picells, cell; - UInt32 i, original_icells; - bool cmp, ok = false; - - parent = IODTFindInterruptParent( regEntry, index ); - IODTGetICellCounts( parent, &icells, &acells ); - addrCmp = 0; - if( acells) { - data = OSDynamicCast( OSData, regEntry->getProperty( "reg" )); - if( data && (data->getLength() >= (acells * sizeof(UInt32)))) - addrCmp = (UInt32 *) data->getBytesNoCopy(); - } - original_icells = icells; - regEntry = parent; - - do { + IORegistryEntry *parent = 0; + OSData *data; + UInt32 *addrCmp; + UInt32 *maskCmp; + UInt32 *map; + UInt32 *endMap; + UInt32 acells, icells, pacells, picells, cell; + UInt32 i, original_icells; + bool cmp, ok = false; + + parent = IODTFindInterruptParent( regEntry, index ); + IODTGetICellCounts( parent, &icells, &acells ); + addrCmp = 0; + if (acells) { + data = OSDynamicCast( OSData, regEntry->getProperty( "reg" )); + if (data && (data->getLength() >= (acells * sizeof(UInt32)))) { + addrCmp = (UInt32 *) data->getBytesNoCopy(); + } + } + original_icells = icells; + regEntry = parent; + + do { #if IODTSUPPORTDEBUG - kprintf ("IODTMapOneInterrupt: current regEntry name %s\n", regEntry->getName()); - kprintf ("acells - icells: "); - for (i = 0; i < acells; i++) kprintf ("0x%08X ", addrCmp[i]); - kprintf ("- "); - for (i = 0; i < icells; i++) kprintf ("0x%08X ", intSpec[i]); - kprintf ("\n"); + kprintf("IODTMapOneInterrupt: current regEntry name %s\n", regEntry->getName()); + kprintf("acells - icells: "); + for (i = 0; i < acells; i++) { + kprintf("0x%08X ", addrCmp[i]); + } + kprintf("- "); + for (i = 0; i < icells; i++) { + kprintf("0x%08X ", intSpec[i]); + } + kprintf("\n"); #endif - if( parent && (data = OSDynamicCast( OSData, - regEntry->getProperty( "interrupt-controller")))) { - // found a controller - don't want to follow cascaded controllers - parent = 0; - *spec = OSData::withBytesNoCopy( (void *) intSpec, - icells * sizeof(UInt32)); - *controller = IODTInterruptControllerName( regEntry ); - ok = (*spec && *controller); - } else if( parent && (data = OSDynamicCast( OSData, - regEntry->getProperty( "interrupt-map")))) { - // interrupt-map - map = (UInt32 *) data->getBytesNoCopy(); - endMap = map + (data->getLength() / sizeof(UInt32)); - data = OSDynamicCast( OSData, regEntry->getProperty( "interrupt-map-mask" )); - if( data && (data->getLength() >= ((acells + icells) * sizeof(UInt32)))) - maskCmp = (UInt32 *) data->getBytesNoCopy(); - else - maskCmp = 0; + if (parent && (data = OSDynamicCast( OSData, + regEntry->getProperty( "interrupt-controller")))) { + // found a controller - don't want to follow cascaded controllers + parent = 0; + *spec = OSData::withBytesNoCopy((void *) intSpec, + icells * sizeof(UInt32)); + *controller = IODTInterruptControllerName( regEntry ); + ok = (*spec && *controller); + } else if (parent && (data = OSDynamicCast( OSData, + regEntry->getProperty( "interrupt-map")))) { + // interrupt-map + map = (UInt32 *) data->getBytesNoCopy(); + endMap = map + (data->getLength() / sizeof(UInt32)); + data = OSDynamicCast( OSData, regEntry->getProperty( "interrupt-map-mask" )); + if (data && (data->getLength() >= ((acells + icells) * sizeof(UInt32)))) { + maskCmp = (UInt32 *) data->getBytesNoCopy(); + } else { + maskCmp = 0; + } #if IODTSUPPORTDEBUG - if (maskCmp) { - kprintf (" maskCmp: "); - for (i = 0; i < acells + icells; i++) { - if (i == acells) - kprintf ("- "); - kprintf ("0x%08X ", maskCmp[i]); - } - kprintf ("\n"); - kprintf (" masked: "); - for (i = 0; i < acells + icells; i++) { - if (i == acells) - kprintf ("- "); - kprintf ("0x%08X ", ((i < acells) ? addrCmp[i] : intSpec[i-acells]) & maskCmp[i]); - } - kprintf ("\n"); - } else - kprintf ("no maskCmp\n"); + if (maskCmp) { + kprintf(" maskCmp: "); + for (i = 0; i < acells + icells; i++) { + if (i == acells) { + kprintf("- "); + } + kprintf("0x%08X ", maskCmp[i]); + } + kprintf("\n"); + kprintf(" masked: "); + for (i = 0; i < acells + icells; i++) { + if (i == acells) { + kprintf("- "); + } + kprintf("0x%08X ", ((i < acells) ? addrCmp[i] : intSpec[i - acells]) & maskCmp[i]); + } + kprintf("\n"); + } else { + kprintf("no maskCmp\n"); + } #endif - do { + do { #if IODTSUPPORTDEBUG - kprintf (" map: "); - for (i = 0; i < acells + icells; i++) { - if (i == acells) - kprintf ("- "); - kprintf ("0x%08X ", map[i]); - } - kprintf ("\n"); + kprintf(" map: "); + for (i = 0; i < acells + icells; i++) { + if (i == acells) { + kprintf("- "); + } + kprintf("0x%08X ", map[i]); + } + kprintf("\n"); #endif - for( i = 0, cmp = true; cmp && (i < (acells + icells)); i++) { - cell = (i < acells) ? addrCmp[i] : intSpec[ i - acells ]; - if( maskCmp) - cell &= maskCmp[i]; - cmp = (cell == map[i]); - } - - map += acells + icells; - if( 0 == (parent = FindPHandle( *(map++) ))) - unexpected(break); - - IODTGetICellCounts( parent, &picells, &pacells ); - if( cmp) { - addrCmp = map; - intSpec = map + pacells; - regEntry = parent; - } else { - map += pacells + picells; - } - } while( !cmp && (map < endMap) ); - if (!cmp) - parent = 0; - } - - if( parent) { - IODTGetICellCounts( parent, &icells, &acells ); - regEntry = parent; - } - - } while( parent); - - return( ok ? original_icells : 0 ); + for (i = 0, cmp = true; cmp && (i < (acells + icells)); i++) { + cell = (i < acells) ? addrCmp[i] : intSpec[i - acells]; + if (maskCmp) { + cell &= maskCmp[i]; + } + cmp = (cell == map[i]); + } + + map += acells + icells; + if (0 == (parent = FindPHandle( *(map++)))) { + unexpected(break); + } + + IODTGetICellCounts( parent, &picells, &pacells ); + if (cmp) { + addrCmp = map; + intSpec = map + pacells; + regEntry = parent; + } else { + map += pacells + picells; + } + } while (!cmp && (map < endMap)); + if (!cmp) { + parent = 0; + } + } + + if (parent) { + IODTGetICellCounts( parent, &icells, &acells ); + regEntry = parent; + } + } while (parent); + + return ok ? original_icells : 0; } -IOReturn IODTGetInterruptOptions( IORegistryEntry * regEntry, int source, IOOptionBits * options ) +IOReturn +IODTGetInterruptOptions( IORegistryEntry * regEntry, int source, IOOptionBits * options ) { - OSArray * controllers; - OSArray * specifiers; - OSArray * shared; - OSObject * spec; - OSObject * oneSpec; - - *options = 0; - - controllers = OSDynamicCast(OSArray, regEntry->getProperty(gIOInterruptControllersKey)); - specifiers = OSDynamicCast(OSArray, regEntry->getProperty(gIOInterruptSpecifiersKey)); - - if( !controllers || !specifiers) - return (kIOReturnNoInterrupt); - - shared = (OSArray *) gIODTSharedInterrupts->getObject( - (const OSSymbol *) controllers->getObject(source) ); - if (!shared) - return (kIOReturnSuccess); - - spec = specifiers->getObject(source); - if (!spec) - return (kIOReturnNoInterrupt); - - for (unsigned int i = 0; - (oneSpec = shared->getObject(i)) - && (!oneSpec->isEqualTo(spec)); - i++ ) {} - - if (oneSpec) - *options = kIODTInterruptShared; - - return (kIOReturnSuccess); + OSArray * controllers; + OSArray * specifiers; + OSArray * shared; + OSObject * spec; + OSObject * oneSpec; + + *options = 0; + + controllers = OSDynamicCast(OSArray, regEntry->getProperty(gIOInterruptControllersKey)); + specifiers = OSDynamicCast(OSArray, regEntry->getProperty(gIOInterruptSpecifiersKey)); + + if (!controllers || !specifiers) { + return kIOReturnNoInterrupt; + } + + shared = (OSArray *) gIODTSharedInterrupts->getObject( + (const OSSymbol *) controllers->getObject(source)); + if (!shared) { + return kIOReturnSuccess; + } + + spec = specifiers->getObject(source); + if (!spec) { + return kIOReturnNoInterrupt; + } + + for (unsigned int i = 0; + (oneSpec = shared->getObject(i)) + && (!oneSpec->isEqualTo(spec)); + i++) { + } + + if (oneSpec) { + *options = kIODTInterruptShared; + } + + return kIOReturnSuccess; } -static bool IODTMapInterruptsSharing( IORegistryEntry * regEntry, OSDictionary * allInts ) +static bool +IODTMapInterruptsSharing( IORegistryEntry * regEntry, OSDictionary * allInts ) { - IORegistryEntry * parent; - OSData * local; - OSData * local2; - UInt32 * localBits; - UInt32 * localEnd; - IOItemCount index; - OSData * map; - OSObject * oneMap; - OSArray * mapped; - OSArray * controllerInts; - const OSSymbol * controller = 0; - OSArray * controllers; - UInt32 skip = 1; - bool ok, nw; - - nw = (0 == (local = OSDynamicCast( OSData, - regEntry->getProperty( gIODTAAPLInterruptsKey)))); - if( nw && (0 == (local = OSDynamicCast( OSData, - regEntry->getProperty( "interrupts"))))) - return( true ); // nothing to see here - - if( nw && (parent = regEntry->getParentEntry( gIODTPlane))) { - // check for bridges on old world - if( (local2 = OSDynamicCast( OSData, - parent->getProperty( gIODTAAPLInterruptsKey)))) { - local = local2; - nw = false; - } - } - - localBits = (UInt32 *) local->getBytesNoCopy(); - localEnd = localBits + (local->getLength() / sizeof(UInt32)); - index = 0; - mapped = OSArray::withCapacity( 1 ); - controllers = OSArray::withCapacity( 1 ); - - ok = (mapped && controllers); - - if( ok) do { - if( nw) { - skip = IODTMapOneInterrupt( regEntry, localBits, index, &map, &controller ); - if( 0 == skip) { - IOLog("%s: error mapping interrupt[%d]\n", - regEntry->getName(), mapped->getCount()); - break; - } - } else { - map = OSData::withData( local, mapped->getCount() * sizeof(UInt32), - sizeof(UInt32)); - controller = gIODTDefaultInterruptController; - controller->retain(); - } - - index++; - localBits += skip; - mapped->setObject( map ); - controllers->setObject( controller ); - - if (allInts) - { - controllerInts = (OSArray *) allInts->getObject( controller ); - if (controllerInts) - { - for (unsigned int i = 0; (oneMap = controllerInts->getObject(i)); i++) - { - if (map->isEqualTo(oneMap)) - { - controllerInts = (OSArray *) gIODTSharedInterrupts->getObject( controller ); - if (controllerInts) - controllerInts->setObject(map); - else - { - controllerInts = OSArray::withObjects( (const OSObject **) &map, 1, 4 ); - if (controllerInts) - { - gIODTSharedInterrupts->setObject( controller, controllerInts ); - controllerInts->release(); - } - } - break; - } - } - if (!oneMap) - controllerInts->setObject(map); - } - else - { - controllerInts = OSArray::withObjects( (const OSObject **) &map, 1, 16 ); - if (controllerInts) - { - allInts->setObject( controller, controllerInts ); - controllerInts->release(); - } - } - } - - map->release(); - controller->release(); - - } while( localBits < localEnd); - - ok &= (localBits == localEnd); - - if( ok ) { - // store results - ok = regEntry->setProperty( gIOInterruptControllersKey, controllers); - ok &= regEntry->setProperty( gIOInterruptSpecifiersKey, mapped); - } - - if( controllers) - controllers->release(); - if( mapped) - mapped->release(); - - return( ok ); + IORegistryEntry * parent; + OSData * local; + OSData * local2; + UInt32 * localBits; + UInt32 * localEnd; + IOItemCount index; + OSData * map; + OSObject * oneMap; + OSArray * mapped; + OSArray * controllerInts; + const OSSymbol * controller = 0; + OSArray * controllers; + UInt32 skip = 1; + bool ok, nw; + + nw = (0 == (local = OSDynamicCast( OSData, + regEntry->getProperty( gIODTAAPLInterruptsKey)))); + if (nw && (0 == (local = OSDynamicCast( OSData, + regEntry->getProperty( "interrupts"))))) { + return true; // nothing to see here + } + if (nw && (parent = regEntry->getParentEntry( gIODTPlane))) { + // check for bridges on old world + if ((local2 = OSDynamicCast( OSData, + parent->getProperty( gIODTAAPLInterruptsKey)))) { + local = local2; + nw = false; + } + } + + localBits = (UInt32 *) local->getBytesNoCopy(); + localEnd = localBits + (local->getLength() / sizeof(UInt32)); + index = 0; + mapped = OSArray::withCapacity( 1 ); + controllers = OSArray::withCapacity( 1 ); + + ok = (mapped && controllers); + + if (ok) { + do { + if (nw) { + skip = IODTMapOneInterrupt( regEntry, localBits, index, &map, &controller ); + if (0 == skip) { + IOLog("%s: error mapping interrupt[%d]\n", + regEntry->getName(), mapped->getCount()); + break; + } + } else { + map = OSData::withData( local, mapped->getCount() * sizeof(UInt32), + sizeof(UInt32)); + controller = gIODTDefaultInterruptController; + controller->retain(); + } + + index++; + localBits += skip; + mapped->setObject( map ); + controllers->setObject( controller ); + + if (allInts) { + controllerInts = (OSArray *) allInts->getObject( controller ); + if (controllerInts) { + for (unsigned int i = 0; (oneMap = controllerInts->getObject(i)); i++) { + if (map->isEqualTo(oneMap)) { + controllerInts = (OSArray *) gIODTSharedInterrupts->getObject( controller ); + if (controllerInts) { + controllerInts->setObject(map); + } else { + controllerInts = OSArray::withObjects((const OSObject **) &map, 1, 4 ); + if (controllerInts) { + gIODTSharedInterrupts->setObject( controller, controllerInts ); + controllerInts->release(); + } + } + break; + } + } + if (!oneMap) { + controllerInts->setObject(map); + } + } else { + controllerInts = OSArray::withObjects((const OSObject **) &map, 1, 16 ); + if (controllerInts) { + allInts->setObject( controller, controllerInts ); + controllerInts->release(); + } + } + } + + map->release(); + controller->release(); + } while (localBits < localEnd); + } + + ok &= (localBits == localEnd); + + if (ok) { + // store results + ok = regEntry->setProperty( gIOInterruptControllersKey, controllers); + ok &= regEntry->setProperty( gIOInterruptSpecifiersKey, mapped); + } + + if (controllers) { + controllers->release(); + } + if (mapped) { + mapped->release(); + } + + return ok; } -bool IODTMapInterrupts( IORegistryEntry * regEntry ) +bool +IODTMapInterrupts( IORegistryEntry * regEntry ) { - return( IODTMapInterruptsSharing( regEntry, 0 )); + return IODTMapInterruptsSharing( regEntry, 0 ); } /* @@ -797,30 +842,34 @@ bool IODTMapInterrupts( IORegistryEntry * regEntry ) static bool CompareKey( OSString * key, - const IORegistryEntry * table, const OSSymbol * propName, - OSString ** matchingName ) + const IORegistryEntry * table, const OSSymbol * propName, + OSString ** matchingName ) { - OSObject *prop; - OSData *data; - OSString *string; - const char *ckey; - UInt32 keyLen; - UInt32 nlen; - const char *names; - const char *lastName; - bool wild; - bool matched; - const char *result = 0; - - if( 0 == (prop = table->copyProperty( propName ))) return( 0 ); - - if( (data = OSDynamicCast( OSData, prop ))) { - names = (const char *) data->getBytesNoCopy(); - lastName = names + data->getLength(); - } else if( (string = OSDynamicCast( OSString, prop ))) { - names = string->getCStringNoCopy(); - lastName = names + string->getLength() + 1; - } else names = 0; + OSObject *prop; + OSData *data; + OSString *string; + const char *ckey; + UInt32 keyLen; + UInt32 nlen; + const char *names; + const char *lastName; + bool wild; + bool matched; + const char *result = 0; + + if (0 == (prop = table->copyProperty( propName ))) { + return 0; + } + + if ((data = OSDynamicCast( OSData, prop ))) { + names = (const char *) data->getBytesNoCopy(); + lastName = names + data->getLength(); + } else if ((string = OSDynamicCast( OSString, prop ))) { + names = string->getCStringNoCopy(); + lastName = names + string->getLength() + 1; + } else { + names = 0; + } if (names) { ckey = key->getCStringNoCopy(); @@ -830,215 +879,237 @@ CompareKey( OSString * key, do { // for each name in the property nlen = strnlen(names, lastName - names); - if( wild) + if (wild) { matched = ((nlen >= (keyLen - 1)) && (0 == strncmp(ckey, names, keyLen - 1))); - else + } else { matched = (keyLen == nlen) && (0 == strncmp(ckey, names, keyLen)); + } - if( matched) + if (matched) { result = names; + } names = names + nlen + 1; - - } while( (names < lastName) && (false == matched)); + } while ((names < lastName) && (false == matched)); } - if (result && matchingName) *matchingName = OSString::withCString( result ); + if (result && matchingName) { + *matchingName = OSString::withCString( result ); + } - if (prop) prop->release(); + if (prop) { + prop->release(); + } - return (result != 0); + return result != 0; } -bool IODTCompareNubName( const IORegistryEntry * regEntry, - OSString * name, OSString ** matchingName ) +bool +IODTCompareNubName( const IORegistryEntry * regEntry, + OSString * name, OSString ** matchingName ) { - bool matched; + bool matched; - matched = CompareKey( name, regEntry, gIODTNameKey, matchingName) - || CompareKey( name, regEntry, gIODTCompatibleKey, matchingName) - || CompareKey( name, regEntry, gIODTTypeKey, matchingName) - || CompareKey( name, regEntry, gIODTModelKey, matchingName); + matched = CompareKey( name, regEntry, gIODTNameKey, matchingName) + || CompareKey( name, regEntry, gIODTCompatibleKey, matchingName) + || CompareKey( name, regEntry, gIODTTypeKey, matchingName) + || CompareKey( name, regEntry, gIODTModelKey, matchingName); - return (matched); + return matched; } -bool IODTMatchNubWithKeys( IORegistryEntry * regEntry, - const char * keys ) +bool +IODTMatchNubWithKeys( IORegistryEntry * regEntry, + const char * keys ) { - OSObject *obj; - bool result = false; + OSObject *obj; + bool result = false; - obj = OSUnserialize( keys, 0 ); + obj = OSUnserialize( keys, 0 ); - if( obj) { - result = regEntry->compareNames( obj ); + if (obj) { + result = regEntry->compareNames( obj ); obj->release(); - } + } #if DEBUG - else IOLog("Couldn't unserialize %s\n", keys ); + else { + IOLog("Couldn't unserialize %s\n", keys ); + } #endif - return( result ); + return result; } -OSCollectionIterator * IODTFindMatchingEntries( IORegistryEntry * from, - IOOptionBits options, const char * keys ) +OSCollectionIterator * +IODTFindMatchingEntries( IORegistryEntry * from, + IOOptionBits options, const char * keys ) { - OSSet *result = 0; - IORegistryEntry *next; - IORegistryIterator *iter; - OSCollectionIterator *cIter; - bool cmp; - bool minus = options & kIODTExclusive; - - - iter = IORegistryIterator::iterateOver( from, gIODTPlane, - (options & kIODTRecursive) ? kIORegistryIterateRecursively : 0 ); - if( iter) { - - do { - - if( result) - result->release(); - result = OSSet::withCapacity( 3 ); - if( !result) - break; - - iter->reset(); - while( (next = iter->getNextObject())) { - - // Look for existence of a debug property to skip - if( next->getProperty("AAPL,ignore")) - continue; - - if( keys) { - cmp = IODTMatchNubWithKeys( next, keys ); - if( (minus && (false == cmp)) - || ((false == minus) && (false != cmp)) ) - result->setObject( next); - } else - result->setObject( next); - } - } while( !iter->isValid()); - - iter->release(); - } - - cIter = OSCollectionIterator::withCollection( result); - if (result) result->release(); - - return( cIter); + OSSet *result = 0; + IORegistryEntry *next; + IORegistryIterator *iter; + OSCollectionIterator *cIter; + bool cmp; + bool minus = options & kIODTExclusive; + + + iter = IORegistryIterator::iterateOver( from, gIODTPlane, + (options & kIODTRecursive) ? kIORegistryIterateRecursively : 0 ); + if (iter) { + do { + if (result) { + result->release(); + } + result = OSSet::withCapacity( 3 ); + if (!result) { + break; + } + + iter->reset(); + while ((next = iter->getNextObject())) { + // Look for existence of a debug property to skip + if (next->getProperty("AAPL,ignore")) { + continue; + } + + if (keys) { + cmp = IODTMatchNubWithKeys( next, keys ); + if ((minus && (false == cmp)) + || ((false == minus) && (false != cmp))) { + result->setObject( next); + } + } else { + result->setObject( next); + } + } + } while (!iter->isValid()); + + iter->release(); + } + + cIter = OSCollectionIterator::withCollection( result); + if (result) { + result->release(); + } + + return cIter; } struct IODTPersistent { - IODTCompareAddressCellFunc compareFunc; + IODTCompareAddressCellFunc compareFunc; }; -void IODTSetResolving( IORegistryEntry * regEntry, - IODTCompareAddressCellFunc compareFunc, - IODTNVLocationFunc locationFunc __unused ) +void +IODTSetResolving( IORegistryEntry * regEntry, + IODTCompareAddressCellFunc compareFunc, + IODTNVLocationFunc locationFunc __unused ) { - IODTPersistent persist; - IODTPersistent * entry; - OSNumber * num; - unsigned int index, count; - - IOLockLock(gIODTResolversLock); - - count = (gIODTResolvers->getLength() / sizeof(IODTPersistent)); - entry = (typeof(entry)) gIODTResolvers->getBytesNoCopy(); - for (index = 0; index < count; index++) - { - if (compareFunc == entry->compareFunc) break; - entry++; - } - if (index == count) - { - persist.compareFunc = compareFunc; - if (!gIODTResolvers->appendBytes(&persist, sizeof(IODTPersistent))) panic("IODTSetResolving"); - } - - IOLockUnlock(gIODTResolversLock); - - num = OSNumber::withNumber(index, 32); - regEntry->setProperty(gIODTPersistKey, num); - OSSafeReleaseNULL(num); - - return; + IODTPersistent persist; + IODTPersistent * entry; + OSNumber * num; + unsigned int index, count; + + IOLockLock(gIODTResolversLock); + + count = (gIODTResolvers->getLength() / sizeof(IODTPersistent)); + entry = (typeof(entry))gIODTResolvers->getBytesNoCopy(); + for (index = 0; index < count; index++) { + if (compareFunc == entry->compareFunc) { + break; + } + entry++; + } + if (index == count) { + persist.compareFunc = compareFunc; + if (!gIODTResolvers->appendBytes(&persist, sizeof(IODTPersistent))) { + panic("IODTSetResolving"); + } + } + + IOLockUnlock(gIODTResolversLock); + + num = OSNumber::withNumber(index, 32); + regEntry->setProperty(gIODTPersistKey, num); + OSSafeReleaseNULL(num); + + return; } #if defined(__arm64__) -static SInt64 DefaultCompare( UInt32 cellCount, UInt32 left[], UInt32 right[] ) +static SInt64 +DefaultCompare( UInt32 cellCount, UInt32 left[], UInt32 right[] ) { - SInt64 diff = 0; - - if (cellCount == 2) { - diff = IOPhysical32(left[1], left[0]) - IOPhysical32(right[1], right[0]); - } else if (cellCount == 1) { - diff = ( left[0] - right[0] ); - } else { - panic("DefaultCompare only knows how to handle 1 or 2 cells."); - } + SInt64 diff = 0; + + if (cellCount == 2) { + diff = IOPhysical32(left[1], left[0]) - IOPhysical32(right[1], right[0]); + } else if (cellCount == 1) { + diff = (left[0] - right[0]); + } else { + panic("DefaultCompare only knows how to handle 1 or 2 cells."); + } - return diff; + return diff; } #elif defined(__arm__) || defined(__i386__) || defined(__x86_64__) -static SInt32 DefaultCompare( UInt32 cellCount, UInt32 left[], UInt32 right[] ) +static SInt32 +DefaultCompare( UInt32 cellCount, UInt32 left[], UInt32 right[] ) { cellCount--; - return( left[ cellCount ] - right[ cellCount ] ); + return left[cellCount] - right[cellCount]; } #else #error Unknown architecture. #endif -static void AddLengthToCells( UInt32 numCells, UInt32 *cells, UInt64 offset) +static void +AddLengthToCells( UInt32 numCells, UInt32 *cells, UInt64 offset) { - if (numCells == 1) - { - cells[0] += (UInt32)offset; - } - else { + if (numCells == 1) { + cells[0] += (UInt32)offset; + } else { #if defined(__arm64__) || defined(__arm__) - UInt64 sum = cells[numCells - 2] + offset; - cells[numCells - 2] = (UInt32)sum; - if (sum > UINT32_MAX) { - cells[numCells - 1] += (UInt32)(sum >> 32); - } + UInt64 sum = cells[numCells - 2] + offset; + cells[numCells - 2] = (UInt32)sum; + if (sum > UINT32_MAX) { + cells[numCells - 1] += (UInt32)(sum >> 32); + } #else - UInt64 sum = cells[numCells - 1] + offset; - cells[numCells - 1] = (UInt32)sum; - if (sum > UINT32_MAX) { - cells[numCells - 2] += (UInt32)(sum >> 32); - } + UInt64 sum = cells[numCells - 1] + offset; + cells[numCells - 1] = (UInt32)sum; + if (sum > UINT32_MAX) { + cells[numCells - 2] += (UInt32)(sum >> 32); + } #endif - } + } } -static IOPhysicalAddress CellsValue( UInt32 numCells, UInt32 *cells) +static IOPhysicalAddress +CellsValue( UInt32 numCells, UInt32 *cells) { - if (numCells == 1) { - return IOPhysical32( 0, cells[0] ); - } else { + if (numCells == 1) { + return IOPhysical32( 0, cells[0] ); + } else { #if defined(__arm64__) || defined(arm) - return IOPhysical32( cells[numCells - 1], cells[numCells - 2] ); + return IOPhysical32( cells[numCells - 1], cells[numCells - 2] ); #else - return IOPhysical32( cells[numCells - 2], cells[numCells - 1] ); + return IOPhysical32( cells[numCells - 2], cells[numCells - 1] ); #endif - } + } } -void IODTGetCellCounts( IORegistryEntry * regEntry, - UInt32 * sizeCount, UInt32 * addressCount) +void +IODTGetCellCounts( IORegistryEntry * regEntry, + UInt32 * sizeCount, UInt32 * addressCount) { - if( !GetUInt32( regEntry, gIODTSizeCellKey, sizeCount)) - *sizeCount = 1; - if( !GetUInt32( regEntry, gIODTAddressCellKey, addressCount)) - *addressCount = 2; - return; + if (!GetUInt32( regEntry, gIODTSizeCellKey, sizeCount)) { + *sizeCount = 1; + } + if (!GetUInt32( regEntry, gIODTAddressCellKey, addressCount)) { + *addressCount = 2; + } + return; } // Given addr & len cells from our child, find it in our ranges property, then @@ -1047,272 +1118,292 @@ void IODTGetCellCounts( IORegistryEntry * regEntry, // Range[]: child-addr our-addr child-len // #cells: child ours child -bool IODTResolveAddressCell( IORegistryEntry * startEntry, - UInt32 cellsIn[], - IOPhysicalAddress * phys, IOPhysicalLength * lenOut ) +bool +IODTResolveAddressCell( IORegistryEntry * startEntry, + UInt32 cellsIn[], + IOPhysicalAddress * phys, IOPhysicalLength * lenOut ) { - IORegistryEntry * parent; - IORegistryEntry * regEntry; - OSData * prop; + IORegistryEntry * parent; + IORegistryEntry * regEntry; + OSData * prop; OSNumber * num; unsigned int index, count; - // cells in addresses at regEntry - UInt32 sizeCells, addressCells; - // cells in addresses below regEntry - UInt32 childSizeCells, childAddressCells; - UInt32 childCells; - UInt32 cell[ 8 ], propLen; - UInt64 offset = 0; - UInt32 endCell[ 8 ]; - UInt32 *range; - UInt32 *lookRange; - UInt32 *startRange; - UInt32 *endRanges; - bool ok = true; - SInt64 diff, diff2, endDiff; - UInt64 len, rangeLen; - - IODTPersistent *persist; - IODTCompareAddressCellFunc compare; - - regEntry = startEntry; - IODTGetCellCounts( regEntry, &childSizeCells, &childAddressCells ); - childCells = childAddressCells + childSizeCells; - - if (childCells > sizeof(cell)/sizeof(cell[0])) - panic("IODTResolveAddressCell: Invalid device tree (%u,%u)", (uint32_t)childAddressCells, (uint32_t)childSizeCells); - - bcopy( cellsIn, cell, sizeof(UInt32) * childCells ); - *lenOut = CellsValue( childSizeCells, cellsIn + childAddressCells ); - - do - { - prop = OSDynamicCast( OSData, regEntry->getProperty( gIODTRangeKey )); - if( 0 == prop) { - /* end of the road */ - *phys = CellsValue( childAddressCells, cell ); - *phys += offset; - if (regEntry != startEntry) regEntry->release(); - break; - } - - parent = regEntry->copyParentEntry( gIODTPlane ); - IODTGetCellCounts( parent, &sizeCells, &addressCells ); - - if( (propLen = prop->getLength())) { - // search - startRange = (UInt32 *) prop->getBytesNoCopy(); - range = startRange; - endRanges = range + (propLen / sizeof(UInt32)); - - compare = NULL; - num = OSDynamicCast(OSNumber, regEntry->getProperty(gIODTPersistKey)); - if (num) - { - IOLockLock(gIODTResolversLock); - index = num->unsigned32BitValue(); - count = gIODTResolvers->getLength() / sizeof(IODTPersistent); - if (index < count) - { - persist = ((IODTPersistent *) gIODTResolvers->getBytesNoCopy()) + index; - compare = persist->compareFunc; + // cells in addresses at regEntry + UInt32 sizeCells, addressCells; + // cells in addresses below regEntry + UInt32 childSizeCells, childAddressCells; + UInt32 childCells; + UInt32 cell[8], propLen; + UInt64 offset = 0; + UInt32 endCell[8]; + UInt32 *range; + UInt32 *lookRange; + UInt32 *startRange; + UInt32 *endRanges; + bool ok = true; + SInt64 diff, diff2, endDiff; + UInt64 len, rangeLen; + + IODTPersistent *persist; + IODTCompareAddressCellFunc compare; + + regEntry = startEntry; + IODTGetCellCounts( regEntry, &childSizeCells, &childAddressCells ); + childCells = childAddressCells + childSizeCells; + + if (childCells > sizeof(cell) / sizeof(cell[0])) { + panic("IODTResolveAddressCell: Invalid device tree (%u,%u)", (uint32_t)childAddressCells, (uint32_t)childSizeCells); + } + + bcopy( cellsIn, cell, sizeof(UInt32) * childCells ); + *lenOut = CellsValue( childSizeCells, cellsIn + childAddressCells ); + + do{ + prop = OSDynamicCast( OSData, regEntry->getProperty( gIODTRangeKey )); + if (0 == prop) { + /* end of the road */ + *phys = CellsValue( childAddressCells, cell ); + *phys += offset; + if (regEntry != startEntry) { + regEntry->release(); + } + break; } - IOLockUnlock(gIODTResolversLock); - } - - if (!compare && (addressCells == childAddressCells)) { - compare = DefaultCompare; - } - if (!compare) { - panic("There is no mixed comparison function yet..."); - } - - for( ok = false; - range < endRanges; - range += (childCells + addressCells) ) { - - // is cell start within range? - diff = (*compare)( childAddressCells, cell, range ); - - if (childAddressCells > sizeof(endCell)/sizeof(endCell[0])) - panic("IODTResolveAddressCell: Invalid device tree (%u)", (uint32_t)childAddressCells); - - bcopy(range, endCell, childAddressCells * sizeof(UInt32)); - - rangeLen = CellsValue(childSizeCells, range + childAddressCells + addressCells); - AddLengthToCells(childAddressCells, endCell, rangeLen); - - diff2 = (*compare)( childAddressCells, cell, endCell ); - - // if start of cell < start of range, or end of range >= start of cell, skip - if ((diff < 0) || (diff2 >= 0)) - continue; - - len = CellsValue(childSizeCells, cell + childAddressCells); - ok = (0 == len); - - if (!ok) - { - // search for cell end - bcopy(cell, endCell, childAddressCells * sizeof(UInt32)); - - AddLengthToCells(childAddressCells, endCell, len - 1); - - for( lookRange = startRange; - lookRange < endRanges; - lookRange += (childCells + addressCells) ) - { - // make sure end of cell >= range start - endDiff = (*compare)( childAddressCells, endCell, lookRange ); - if( endDiff < 0) - continue; - - UInt64 rangeStart = CellsValue(addressCells, range + childAddressCells); - UInt64 lookRangeStart = CellsValue(addressCells, lookRange + childAddressCells); - if ((endDiff - len + 1 + lookRangeStart) == (diff + rangeStart)) - { - ok = true; - break; - } - } - if (!ok) - continue; - } - offset += diff; - break; - } - - if (addressCells + sizeCells > sizeof(cell)/sizeof(cell[0])) - panic("IODTResolveAddressCell: Invalid device tree (%u, %u)", (uint32_t)addressCells, (uint32_t)sizeCells); - - // Get the physical start of the range from our parent - bcopy( range + childAddressCells, cell, sizeof(UInt32) * addressCells ); - bzero( cell + addressCells, sizeof(UInt32) * sizeCells ); - - } /* else zero length range => pass thru to parent */ - - if (regEntry != startEntry) regEntry->release(); - regEntry = parent; - childSizeCells = sizeCells; - childAddressCells = addressCells; - childCells = childAddressCells + childSizeCells; - } - while( ok && regEntry); - - return( ok); + + parent = regEntry->copyParentEntry( gIODTPlane ); + IODTGetCellCounts( parent, &sizeCells, &addressCells ); + + if ((propLen = prop->getLength())) { + // search + startRange = (UInt32 *) prop->getBytesNoCopy(); + range = startRange; + endRanges = range + (propLen / sizeof(UInt32)); + + compare = NULL; + num = OSDynamicCast(OSNumber, regEntry->getProperty(gIODTPersistKey)); + if (num) { + IOLockLock(gIODTResolversLock); + index = num->unsigned32BitValue(); + count = gIODTResolvers->getLength() / sizeof(IODTPersistent); + if (index < count) { + persist = ((IODTPersistent *) gIODTResolvers->getBytesNoCopy()) + index; + compare = persist->compareFunc; + } + IOLockUnlock(gIODTResolversLock); + } + + if (!compare && (addressCells == childAddressCells)) { + compare = DefaultCompare; + } + if (!compare) { + panic("There is no mixed comparison function yet..."); + } + + for (ok = false; + range < endRanges; + range += (childCells + addressCells)) { + // is cell start within range? + diff = (*compare)( childAddressCells, cell, range ); + + if (childAddressCells > sizeof(endCell) / sizeof(endCell[0])) { + panic("IODTResolveAddressCell: Invalid device tree (%u)", (uint32_t)childAddressCells); + } + + bcopy(range, endCell, childAddressCells * sizeof(UInt32)); + + rangeLen = CellsValue(childSizeCells, range + childAddressCells + addressCells); + AddLengthToCells(childAddressCells, endCell, rangeLen); + + diff2 = (*compare)( childAddressCells, cell, endCell ); + + // if start of cell < start of range, or end of range >= start of cell, skip + if ((diff < 0) || (diff2 >= 0)) { + continue; + } + + len = CellsValue(childSizeCells, cell + childAddressCells); + ok = (0 == len); + + if (!ok) { + // search for cell end + bcopy(cell, endCell, childAddressCells * sizeof(UInt32)); + + AddLengthToCells(childAddressCells, endCell, len - 1); + + for (lookRange = startRange; + lookRange < endRanges; + lookRange += (childCells + addressCells)) { + // make sure end of cell >= range start + endDiff = (*compare)( childAddressCells, endCell, lookRange ); + if (endDiff < 0) { + continue; + } + + UInt64 rangeStart = CellsValue(addressCells, range + childAddressCells); + UInt64 lookRangeStart = CellsValue(addressCells, lookRange + childAddressCells); + if ((endDiff - len + 1 + lookRangeStart) == (diff + rangeStart)) { + ok = true; + break; + } + } + if (!ok) { + continue; + } + } + offset += diff; + break; + } + + if (addressCells + sizeCells > sizeof(cell) / sizeof(cell[0])) { + panic("IODTResolveAddressCell: Invalid device tree (%u, %u)", (uint32_t)addressCells, (uint32_t)sizeCells); + } + + // Get the physical start of the range from our parent + bcopy( range + childAddressCells, cell, sizeof(UInt32) * addressCells ); + bzero( cell + addressCells, sizeof(UInt32) * sizeCells ); + } /* else zero length range => pass thru to parent */ + + if (regEntry != startEntry) { + regEntry->release(); + } + regEntry = parent; + childSizeCells = sizeCells; + childAddressCells = addressCells; + childCells = childAddressCells + childSizeCells; + }while (ok && regEntry); + + return ok; } -OSArray * IODTResolveAddressing( IORegistryEntry * regEntry, - const char * addressPropertyName, - IODeviceMemory * parent ) +OSArray * +IODTResolveAddressing( IORegistryEntry * regEntry, + const char * addressPropertyName, + IODeviceMemory * parent ) { - IORegistryEntry *parentEntry; - OSData *addressProperty; - UInt32 sizeCells, addressCells, cells; - int i, num; - UInt32 *reg; - IOPhysicalAddress phys; - IOPhysicalLength len; - OSArray *array; - IODeviceMemory *range; - - array = 0; - do - { - parentEntry = regEntry->copyParentEntry( gIODTPlane ); - addressProperty = (OSData *) regEntry->getProperty( addressPropertyName ); - if( (0 == addressProperty) || (0 == parentEntry)) break; - - IODTGetCellCounts( parentEntry, &sizeCells, &addressCells ); - if( 0 == sizeCells) break; - - cells = sizeCells + addressCells; - reg = (UInt32 *) addressProperty->getBytesNoCopy(); - num = addressProperty->getLength() / (4 * cells); - - array = OSArray::withCapacity( 1 ); - if( 0 == array) break; - - for( i = 0; i < num; i++) { - if( IODTResolveAddressCell( parentEntry, reg, &phys, &len )) { - range = 0; - if( parent) - range = IODeviceMemory::withSubRange( parent, - phys - parent->getPhysicalSegment(0, 0, kIOMemoryMapperNone), len ); - if( 0 == range) - range = IODeviceMemory::withRange( phys, len ); - if( range) - array->setObject( range ); - } - reg += cells; - } - - regEntry->setProperty( gIODeviceMemoryKey, array); - array->release(); /* ??? */ - } - while (false); - - OSSafeReleaseNULL(parentEntry); - - return (array); + IORegistryEntry *parentEntry; + OSData *addressProperty; + UInt32 sizeCells, addressCells, cells; + int i, num; + UInt32 *reg; + IOPhysicalAddress phys; + IOPhysicalLength len; + OSArray *array; + IODeviceMemory *range; + + array = 0; + do{ + parentEntry = regEntry->copyParentEntry( gIODTPlane ); + addressProperty = (OSData *) regEntry->getProperty( addressPropertyName ); + if ((0 == addressProperty) || (0 == parentEntry)) { + break; + } + + IODTGetCellCounts( parentEntry, &sizeCells, &addressCells ); + if (0 == sizeCells) { + break; + } + + cells = sizeCells + addressCells; + reg = (UInt32 *) addressProperty->getBytesNoCopy(); + num = addressProperty->getLength() / (4 * cells); + + array = OSArray::withCapacity( 1 ); + if (0 == array) { + break; + } + + for (i = 0; i < num; i++) { + if (IODTResolveAddressCell( parentEntry, reg, &phys, &len )) { + range = 0; + if (parent) { + range = IODeviceMemory::withSubRange( parent, + phys - parent->getPhysicalSegment(0, 0, kIOMemoryMapperNone), len ); + } + if (0 == range) { + range = IODeviceMemory::withRange( phys, len ); + } + if (range) { + array->setObject( range ); + } + } + reg += cells; + } + + regEntry->setProperty( gIODeviceMemoryKey, array); + array->release(); /* ??? */ + }while (false); + + OSSafeReleaseNULL(parentEntry); + + return array; } -OSData * IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ) +OSData * +IODTFindSlotName( IORegistryEntry * regEntry, UInt32 deviceNumber ) { - IORegistryEntry *parent; - OSData *data; - OSData *ret = 0; - UInt32 *bits; - UInt32 i; - size_t nlen; - char *names; - char *lastName; - UInt32 mask; - - data = (OSData *) regEntry->getProperty("AAPL,slot-name"); - if (data) return (data); - - do - { - parent = regEntry->copyParentEntry( gIODTPlane ); - if (!parent) break; - - data = OSDynamicCast( OSData, parent->getProperty("slot-names")); - if (!data) break; - if (data->getLength() <= 4) break; - - bits = (UInt32 *) data->getBytesNoCopy(); - mask = *bits; - if ((0 == (mask & (1 << deviceNumber)))) break; - - names = (char *)(bits + 1); - lastName = names + (data->getLength() - 4); - - for( i = 0; (i <= deviceNumber) && (names < lastName); i++ ) { - - if( mask & (1 << i)) { - nlen = 1 + strnlen(names, lastName - names); - if( i == deviceNumber) { - data = OSData::withBytesNoCopy(names, nlen); - if( data) { - regEntry->setProperty("AAPL,slot-name", data); - ret = data; - data->release(); - } - } else - names += nlen; - } - } - } - while (false); - - OSSafeReleaseNULL(parent); - - return( ret ); + IORegistryEntry *parent; + OSData *data; + OSData *ret = 0; + UInt32 *bits; + UInt32 i; + size_t nlen; + char *names; + char *lastName; + UInt32 mask; + + data = (OSData *) regEntry->getProperty("AAPL,slot-name"); + if (data) { + return data; + } + + do{ + parent = regEntry->copyParentEntry( gIODTPlane ); + if (!parent) { + break; + } + + data = OSDynamicCast( OSData, parent->getProperty("slot-names")); + if (!data) { + break; + } + if (data->getLength() <= 4) { + break; + } + + bits = (UInt32 *) data->getBytesNoCopy(); + mask = *bits; + if ((0 == (mask & (1 << deviceNumber)))) { + break; + } + + names = (char *)(bits + 1); + lastName = names + (data->getLength() - 4); + + for (i = 0; (i <= deviceNumber) && (names < lastName); i++) { + if (mask & (1 << i)) { + nlen = 1 + strnlen(names, lastName - names); + if (i == deviceNumber) { + data = OSData::withBytesNoCopy(names, nlen); + if (data) { + regEntry->setProperty("AAPL,slot-name", data); + ret = data; + data->release(); + } + } else { + names += nlen; + } + } + } + }while (false); + + OSSafeReleaseNULL(parent); + + return ret; } -extern "C" IOReturn IONDRVLibrariesInitialize( IOService * provider ) +extern "C" IOReturn +IONDRVLibrariesInitialize( IOService * provider ) { - return( kIOReturnUnsupported ); + return kIOReturnUnsupported; } diff --git a/iokit/Kernel/IOEventSource.cpp b/iokit/Kernel/IOEventSource.cpp index 76c2d5032..33306ae6c 100644 --- a/iokit/Kernel/IOEventSource.cpp +++ b/iokit/Kernel/IOEventSource.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000, 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - -HISTORY - 1998-7-13 Godfrey van der Linden(gvdl) - Created. -]*/ + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * + * HISTORY + * 1998-7-13 Godfrey van der Linden(gvdl) + * Created. + * ]*/ #include #include @@ -51,7 +51,11 @@ OSMetaClassDefineReservedUnused(IOEventSource, 5); OSMetaClassDefineReservedUnused(IOEventSource, 6); OSMetaClassDefineReservedUnused(IOEventSource, 7); -bool IOEventSource::checkForWork() { return false; } +bool +IOEventSource::checkForWork() +{ + return false; +} /* inline function implementations */ @@ -65,7 +69,7 @@ do { \ #define IOStatisticsUnregisterCounter() \ do { \ if (reserved) \ - IOStatistics::unregisterEventSource(reserved->counter); \ + IOStatistics::unregisterEventSource(reserved->counter); \ } while (0) #define IOStatisticsOpenGate() \ @@ -87,163 +91,207 @@ do { \ #endif /* IOKITSTATS */ -void IOEventSource::signalWorkAvailable() -{ - workLoop->signalWorkAvailable(); +void +IOEventSource::signalWorkAvailable() +{ + workLoop->signalWorkAvailable(); } -void IOEventSource::openGate() -{ +void +IOEventSource::openGate() +{ IOStatisticsOpenGate(); - workLoop->openGate(); + workLoop->openGate(); } -void IOEventSource::closeGate() -{ - workLoop->closeGate(); - IOStatisticsCloseGate(); +void +IOEventSource::closeGate() +{ + workLoop->closeGate(); + IOStatisticsCloseGate(); } -bool IOEventSource::tryCloseGate() -{ - bool res; +bool +IOEventSource::tryCloseGate() +{ + bool res; if ((res = workLoop->tryCloseGate())) { - IOStatisticsCloseGate(); + IOStatisticsCloseGate(); } - return res; + return res; } -int IOEventSource::sleepGate(void *event, UInt32 type) -{ - int res; - IOStatisticsOpenGate(); - res = workLoop->sleepGate(event, type); - IOStatisticsCloseGate(); - return res; +int +IOEventSource::sleepGate(void *event, UInt32 type) +{ + int res; + IOStatisticsOpenGate(); + res = workLoop->sleepGate(event, type); + IOStatisticsCloseGate(); + return res; } -int IOEventSource::sleepGate(void *event, AbsoluteTime deadline, UInt32 type) -{ - int res; - IOStatisticsOpenGate(); +int +IOEventSource::sleepGate(void *event, AbsoluteTime deadline, UInt32 type) +{ + int res; + IOStatisticsOpenGate(); res = workLoop->sleepGate(event, deadline, type); - IOStatisticsCloseGate(); - return res; + IOStatisticsCloseGate(); + return res; +} + +void +IOEventSource::wakeupGate(void *event, bool oneThread) +{ + workLoop->wakeupGate(event, oneThread); } - -void IOEventSource::wakeupGate(void *event, bool oneThread) { workLoop->wakeupGate(event, oneThread); } -bool IOEventSource::init(OSObject *inOwner, - Action inAction) +bool +IOEventSource::init(OSObject *inOwner, + Action inAction) { - if (!inOwner) - return false; + if (!inOwner) { + return false; + } + + owner = inOwner; - owner = inOwner; + if (!super::init()) { + return false; + } - if ( !super::init() ) - return false; + (void) setAction(inAction); + enabled = true; - (void) setAction(inAction); - enabled = true; + if (!reserved) { + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } + } - if(!reserved) { - reserved = IONew(ExpansionData, 1); - if (!reserved) { - return false; - } - } + IOStatisticsRegisterCounter(); - IOStatisticsRegisterCounter(); - - return true; + return true; } -void IOEventSource::free( void ) +void +IOEventSource::free( void ) { - IOStatisticsUnregisterCounter(); + IOStatisticsUnregisterCounter(); + + if ((kActionBlock & flags) && actionBlock) { + Block_release(actionBlock); + } - if ((kActionBlock & flags) && actionBlock) Block_release(actionBlock); - - if (reserved) + if (reserved) { IODelete(reserved, ExpansionData, 1); + } - super::free(); + super::free(); } -void IOEventSource::setRefcon(void *newrefcon) +void +IOEventSource::setRefcon(void *newrefcon) { refcon = newrefcon; } -void * IOEventSource::getRefcon() const +void * +IOEventSource::getRefcon() const { return refcon; } -IOEventSource::Action IOEventSource::getAction() const +IOEventSource::Action +IOEventSource::getAction() const { - if (kActionBlock & flags) return NULL; - return (action); + if (kActionBlock & flags) { + return NULL; + } + return action; } -IOEventSource::ActionBlock IOEventSource::getActionBlock(ActionBlock) const +IOEventSource::ActionBlock +IOEventSource::getActionBlock(ActionBlock) const { - if (kActionBlock & flags) return actionBlock; - return (NULL); + if (kActionBlock & flags) { + return actionBlock; + } + return NULL; } -void IOEventSource::setAction(Action inAction) +void +IOEventSource::setAction(Action inAction) { - if ((kActionBlock & flags) && actionBlock) Block_release(actionBlock); - action = inAction; + if ((kActionBlock & flags) && actionBlock) { + Block_release(actionBlock); + } + action = inAction; } -void IOEventSource::setActionBlock(ActionBlock block) +void +IOEventSource::setActionBlock(ActionBlock block) { - if ((kActionBlock & flags) && actionBlock) Block_release(actionBlock); + if ((kActionBlock & flags) && actionBlock) { + Block_release(actionBlock); + } actionBlock = Block_copy(block); flags |= kActionBlock; } -IOEventSource *IOEventSource::getNext() const { return eventChainNext; }; +IOEventSource * +IOEventSource::getNext() const +{ + return eventChainNext; +}; -void IOEventSource::setNext(IOEventSource *inNext) +void +IOEventSource::setNext(IOEventSource *inNext) { - eventChainNext = inNext; + eventChainNext = inNext; } -void IOEventSource::enable() +void +IOEventSource::enable() { - enabled = true; - if (workLoop) - return signalWorkAvailable(); + enabled = true; + if (workLoop) { + return signalWorkAvailable(); + } } -void IOEventSource::disable() +void +IOEventSource::disable() { - enabled = false; + enabled = false; } -bool IOEventSource::isEnabled() const +bool +IOEventSource::isEnabled() const { - return enabled; + return enabled; } -void IOEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) +void +IOEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) { - if ( !inWorkLoop ) - disable(); - workLoop = inWorkLoop; + if (!inWorkLoop) { + disable(); + } + workLoop = inWorkLoop; } -IOWorkLoop *IOEventSource::getWorkLoop() const +IOWorkLoop * +IOEventSource::getWorkLoop() const { - return workLoop; + return workLoop; } -bool IOEventSource::onThread() const +bool +IOEventSource::onThread() const { - return (workLoop != 0) && workLoop->onThread(); + return (workLoop != 0) && workLoop->onThread(); } diff --git a/iokit/Kernel/IOFilterInterruptEventSource.cpp b/iokit/Kernel/IOFilterInterruptEventSource.cpp index f3c61367b..e3b9803cf 100644 --- a/iokit/Kernel/IOFilterInterruptEventSource.cpp +++ b/iokit/Kernel/IOFilterInterruptEventSource.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -56,7 +56,7 @@ do { \ #define super IOInterruptEventSource OSDefineMetaClassAndStructors - (IOFilterInterruptEventSource, IOInterruptEventSource) +(IOFilterInterruptEventSource, IOInterruptEventSource) OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 0); OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 1); OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 2); @@ -69,207 +69,236 @@ OSMetaClassDefineReservedUnused(IOFilterInterruptEventSource, 7); /* * Implement the call throughs for the private protection conversion */ -bool IOFilterInterruptEventSource::init(OSObject *inOwner, - Action inAction, - IOService *inProvider, - int inIntIndex) +bool +IOFilterInterruptEventSource::init(OSObject *inOwner, + Action inAction, + IOService *inProvider, + int inIntIndex) { - return false; + return false; } IOInterruptEventSource * IOFilterInterruptEventSource::interruptEventSource(OSObject *inOwner, - Action inAction, - IOService *inProvider, - int inIntIndex) + Action inAction, + IOService *inProvider, + int inIntIndex) { - return 0; + return 0; } bool IOFilterInterruptEventSource::init(OSObject *inOwner, - Action inAction, - Filter inFilterAction, - IOService *inProvider, - int inIntIndex) + Action inAction, + Filter inFilterAction, + IOService *inProvider, + int inIntIndex) { - if ( !super::init(inOwner, inAction, inProvider, inIntIndex) ) - return false; + if (!super::init(inOwner, inAction, inProvider, inIntIndex)) { + return false; + } + + if (!inFilterAction) { + return false; + } - if (!inFilterAction) - return false; + filterAction = inFilterAction; - filterAction = inFilterAction; + IOStatisticsInitializeCounter(); - IOStatisticsInitializeCounter(); - - return true; + return true; } -IOFilterInterruptEventSource *IOFilterInterruptEventSource +IOFilterInterruptEventSource * +IOFilterInterruptEventSource ::filterInterruptEventSource(OSObject *inOwner, - Action inAction, - Filter inFilterAction, - IOService *inProvider, - int inIntIndex) + Action inAction, + Filter inFilterAction, + IOService *inProvider, + int inIntIndex) { - IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; + IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; - if (me - && !me->init(inOwner, inAction, inFilterAction, inProvider, inIntIndex)) { - me->release(); - return 0; - } + if (me + && !me->init(inOwner, inAction, inFilterAction, inProvider, inIntIndex)) { + me->release(); + return 0; + } - return me; + return me; } -IOFilterInterruptEventSource *IOFilterInterruptEventSource +IOFilterInterruptEventSource * +IOFilterInterruptEventSource ::filterInterruptEventSource(OSObject *inOwner, - IOService *inProvider, - int inIntIndex, - ActionBlock inAction, - FilterBlock inFilterAction) + IOService *inProvider, + int inIntIndex, + ActionBlock inAction, + FilterBlock inFilterAction) { - IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; - - FilterBlock filter = Block_copy(inFilterAction); - if (!filter) return 0; - - if (me - && !me->init(inOwner, (Action) NULL, (Filter) filter, inProvider, inIntIndex)) { - me->release(); - Block_release(filter); - return 0; - } - me->flags |= kFilterBlock; - me->setActionBlock((IOEventSource::ActionBlock) inAction); - - return me; + IOFilterInterruptEventSource *me = new IOFilterInterruptEventSource; + + FilterBlock filter = Block_copy(inFilterAction); + if (!filter) { + return 0; + } + + if (me + && !me->init(inOwner, (Action) NULL, (Filter) filter, inProvider, inIntIndex)) { + me->release(); + Block_release(filter); + return 0; + } + me->flags |= kFilterBlock; + me->setActionBlock((IOEventSource::ActionBlock) inAction); + + return me; } -void IOFilterInterruptEventSource::free( void ) +void +IOFilterInterruptEventSource::free( void ) { - if ((kFilterBlock & flags) && filterActionBlock) Block_release(filterActionBlock); + if ((kFilterBlock & flags) && filterActionBlock) { + Block_release(filterActionBlock); + } - super::free(); + super::free(); } -void IOFilterInterruptEventSource::signalInterrupt() +void +IOFilterInterruptEventSource::signalInterrupt() { bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; - - IOStatisticsInterrupt(); - producerCount++; - - if (trace) - IOTimeStampStartConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); - - signalWorkAvailable(); - - if (trace) - IOTimeStampEndConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); - + + IOStatisticsInterrupt(); + producerCount++; + + if (trace) { + IOTimeStampStartConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + } + + signalWorkAvailable(); + + if (trace) { + IOTimeStampEndConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + } } IOFilterInterruptEventSource::Filter IOFilterInterruptEventSource::getFilterAction() const { - if (kFilterBlock & flags) return NULL; - return filterAction; + if (kFilterBlock & flags) { + return NULL; + } + return filterAction; } IOFilterInterruptEventSource::FilterBlock IOFilterInterruptEventSource::getFilterActionBlock() const { - if (kFilterBlock & flags) return filterActionBlock; - return (NULL); + if (kFilterBlock & flags) { + return filterActionBlock; + } + return NULL; } -void IOFilterInterruptEventSource::normalInterruptOccurred - (void */*refcon*/, IOService */*prov*/, int /*source*/) +void +IOFilterInterruptEventSource::normalInterruptOccurred +(void */*refcon*/, IOService */*prov*/, int /*source*/) { - bool filterRes; - uint64_t startTime = 0; - uint64_t endTime = 0; - bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; - - if (trace) + bool filterRes; + uint64_t startTime = 0; + uint64_t endTime = 0; + bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; + + if (trace) { IOTimeStampStartConstant(IODBG_INTES(IOINTES_FILTER), - VM_KERNEL_UNSLIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); - - if (IOInterruptEventSource::reserved->statistics) { - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { - startTime = mach_absolute_time(); - } - } - - // Call the filter. - if (kFilterBlock & flags) filterRes = (filterActionBlock)(this); - else filterRes = (*filterAction)(owner, this); - - if (IOInterruptEventSource::reserved->statistics) { - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { - IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); - } - - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { - endTime = mach_absolute_time(); - IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelTimeIndex], endTime - startTime); - } - } - - if (trace) + VM_KERNEL_UNSLIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } + + if (IOInterruptEventSource::reserved->statistics) { + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { + startTime = mach_absolute_time(); + } + } + + // Call the filter. + if (kFilterBlock & flags) { + filterRes = (filterActionBlock)(this); + } else { + filterRes = (*filterAction)(owner, this); + } + + if (IOInterruptEventSource::reserved->statistics) { + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { + IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); + } + + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { + endTime = mach_absolute_time(); + IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelTimeIndex], endTime - startTime); + } + } + + if (trace) { IOTimeStampEndConstant(IODBG_INTES(IOINTES_FILTER), - VM_KERNEL_ADDRHIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), - VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); - - if (filterRes) - signalInterrupt(); + VM_KERNEL_ADDRHIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } + + if (filterRes) { + signalInterrupt(); + } } -void IOFilterInterruptEventSource::disableInterruptOccurred - (void */*refcon*/, IOService *prov, int source) +void +IOFilterInterruptEventSource::disableInterruptOccurred +(void */*refcon*/, IOService *prov, int source) { - bool filterRes; - uint64_t startTime = 0; - uint64_t endTime = 0; - bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; - - if (trace) + bool filterRes; + uint64_t startTime = 0; + uint64_t endTime = 0; + bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; + + if (trace) { IOTimeStampStartConstant(IODBG_INTES(IOINTES_FILTER), - VM_KERNEL_UNSLIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); - - if (IOInterruptEventSource::reserved->statistics) { - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { - startTime = mach_absolute_time(); - } - } - - // Call the filter. - if (kFilterBlock & flags) filterRes = (filterActionBlock)(this); - else filterRes = (*filterAction)(owner, this); - - if (IOInterruptEventSource::reserved->statistics) { - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { - IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); - } - - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { - endTime = mach_absolute_time(); - IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelTimeIndex], endTime - startTime); - } - } - - if (trace) + VM_KERNEL_UNSLIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } + + if (IOInterruptEventSource::reserved->statistics) { + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { + startTime = mach_absolute_time(); + } + } + + // Call the filter. + if (kFilterBlock & flags) { + filterRes = (filterActionBlock)(this); + } else { + filterRes = (*filterAction)(owner, this); + } + + if (IOInterruptEventSource::reserved->statistics) { + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { + IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); + } + + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelTimeIndex)) { + endTime = mach_absolute_time(); + IA_ADD_VALUE(&IOInterruptEventSource::reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelTimeIndex], endTime - startTime); + } + } + + if (trace) { IOTimeStampEndConstant(IODBG_INTES(IOINTES_FILTER), - VM_KERNEL_UNSLIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); - - if (filterRes) { - prov->disableInterrupt(source); /* disable the interrupt */ - signalInterrupt(); - } + VM_KERNEL_UNSLIDE(filterAction), VM_KERNEL_ADDRHIDE(owner), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } + + if (filterRes) { + prov->disableInterrupt(source); /* disable the interrupt */ + signalInterrupt(); + } } diff --git a/iokit/Kernel/IOHibernateIO.cpp b/iokit/Kernel/IOHibernateIO.cpp index 94d5b465e..be2483dbf 100644 --- a/iokit/Kernel/IOHibernateIO.cpp +++ b/iokit/Kernel/IOHibernateIO.cpp @@ -27,111 +27,111 @@ */ /* - -Sleep: - -- PMRootDomain calls IOHibernateSystemSleep() before system sleep -(devices awake, normal execution context) -- IOHibernateSystemSleep opens the hibernation file (or partition) at the bsd level, - grabs its extents and searches for a polling driver willing to work with that IOMedia. - The BSD code makes an ioctl to the storage driver to get the partition base offset to - the disk, and other ioctls to get the transfer constraints - If successful, the file is written to make sure its initially not bootable (in case of - later failure) and nvram set to point to the first block of the file. (Has to be done - here so blocking is possible in nvram support). - hibernate_setup() in osfmk is called to allocate page bitmaps for all dram, and - page out any pages it wants to (currently zero, but probably some percentage of memory). - Its assumed just allocating pages will cause the VM system to naturally select the best - pages for eviction. It also copies processor flags needed for the restore path and sets - a flag in the boot processor proc info. - gIOHibernateState = kIOHibernateStateHibernating. -- Regular sleep progresses - some drivers may inspect the root domain property - kIOHibernateStateKey to modify behavior. The platform driver saves state to memory - as usual but leaves motherboard I/O on. -- Eventually the platform calls ml_ppc_sleep() in the shutdown context on the last cpu, - at which point memory is ready to be saved. mapping_hibernate_flush() is called to get - all ppc RC bits out of the hash table and caches into the mapping structures. -- hibernate_write_image() is called (still in shutdown context, no blocking or preemption). - hibernate_page_list_setall() is called to get a bitmap of dram pages that need to be saved. - All pages are assumed to be saved (as part of the wired image) unless explicitly subtracted - by hibernate_page_list_setall(), avoiding having to find arch dependent low level bits. - The image header and block list are written. The header includes the second file extent so - only the header block is needed to read the file, regardless of filesystem. - The kernel segment "__HIB" is written uncompressed to the image. This segment of code and data - (only) is used to decompress the image during wake/boot. - Some additional pages are removed from the bitmaps - the buffers used for hibernation. - The bitmaps are written to the image. - More areas are removed from the bitmaps (after they have been written to the image) - the - segment "__HIB" pages and interrupt stack. - Each wired page is compressed and written and then each non-wired page. Compression and - disk writes are in parallel. - The image header is written to the start of the file and the polling driver closed. - The machine powers down (or sleeps). - -Boot/Wake: - -- BootX sees the boot-image nvram variable containing the device and block number of the image, - reads the header and if the signature is correct proceeds. The boot-image variable is cleared. -- BootX reads the portion of the image used for wired pages, to memory. Its assumed this will fit - in the OF memory environment, and the image is decrypted. There is no decompression in BootX, - that is in the kernel's __HIB section. -- BootX copies the "__HIB" section to its correct position in memory, quiesces and calls its entry - hibernate_kernel_entrypoint(), passing the location of the image in memory. Translation is off, - only code & data in that section is safe to call since all the other wired pages are still - compressed in the image. -- hibernate_kernel_entrypoint() removes pages occupied by the raw image from the page bitmaps. - It uses the bitmaps to work out which pages can be uncompressed from the image to their final - location directly, and copies those that can't to interim free pages. When the image has been - completed, the copies are uncompressed, overwriting the wired image pages. - hibernate_restore_phys_page() (in osfmk since its arch dependent, but part of the "__HIB" section) - is used to get pages into place for 64bit. -- the reset vector is called (at least on ppc), the kernel proceeds on a normal wake, with some - changes conditional on the per proc flag - before VM is turned on the boot cpu, all mappings - are removed from the software strutures, and the hash table is reinitialized. -- After the platform CPU init code is called, hibernate_machine_init() is called to restore the rest - of memory, using the polled mode driver, before other threads can run or any devices are turned on. - This reduces the memory usage for BootX and allows decompression in parallel with disk reads, - for the remaining non wired pages. -- The polling driver is closed down and regular wake proceeds. When the kernel calls iokit to wake - (normal execution context) hibernate_teardown() in osmfk is called to release any memory, the file - is closed via bsd. - -Polled Mode I/O: - -IOHibernateSystemSleep() finds a polled mode interface to the ATA controller via a property in the -registry, specifying an object of calls IOPolledInterface. - -Before the system goes to sleep it searches from the IOMedia object (could be a filesystem or -partition) that the image is going to live, looking for polled interface properties. If it finds -one the IOMedia object is passed to a "probe" call for the interface to accept or reject. All the -interfaces found are kept in an ordered list. - -There is an Open/Close pair of calls made to each of the interfaces at various stages since there are -few different contexts things happen in: - -- there is an Open/Close (Preflight) made before any part of the system has slept (I/O is all -up and running) and after wake - this is safe to allocate memory and do anything. The device -ignores sleep requests from that point since its a waste of time if it goes to sleep and -immediately wakes back up for the image write. - -- there is an Open/Close (BeforeSleep) pair made around the image write operations that happen -immediately before sleep. These can't block or allocate memory - the I/O system is asleep apart -from the low level bits (motherboard I/O etc). There is only one thread running. The close can be -used to flush and set the disk to sleep. - -- there is an Open/Close (AfterSleep) pair made around the image read operations that happen -immediately after sleep. These can't block or allocate memory. This is happening after the platform -expert has woken the low level bits of the system, but most of the I/O system has not. There is only -one thread running. - -For the actual I/O, all the ops are with respect to a single IOMemoryDescriptor that was passed -(prepared) to the Preflight Open() call. There is a read/write op, buffer offset to the IOMD for -the data, an offset to the disk and length (block aligned 64 bit numbers), and completion callback. -Each I/O is async but only one is ever outstanding. The polled interface has a checkForWork call -that is called for the hardware to check for events, and complete the I/O via the callback. -The hibernate path uses the same transfer constraints the regular cluster I/O path in BSD uses -to restrict I/O ops. -*/ + * + * Sleep: + * + * - PMRootDomain calls IOHibernateSystemSleep() before system sleep + * (devices awake, normal execution context) + * - IOHibernateSystemSleep opens the hibernation file (or partition) at the bsd level, + * grabs its extents and searches for a polling driver willing to work with that IOMedia. + * The BSD code makes an ioctl to the storage driver to get the partition base offset to + * the disk, and other ioctls to get the transfer constraints + * If successful, the file is written to make sure its initially not bootable (in case of + * later failure) and nvram set to point to the first block of the file. (Has to be done + * here so blocking is possible in nvram support). + * hibernate_setup() in osfmk is called to allocate page bitmaps for all dram, and + * page out any pages it wants to (currently zero, but probably some percentage of memory). + * Its assumed just allocating pages will cause the VM system to naturally select the best + * pages for eviction. It also copies processor flags needed for the restore path and sets + * a flag in the boot processor proc info. + * gIOHibernateState = kIOHibernateStateHibernating. + * - Regular sleep progresses - some drivers may inspect the root domain property + * kIOHibernateStateKey to modify behavior. The platform driver saves state to memory + * as usual but leaves motherboard I/O on. + * - Eventually the platform calls ml_ppc_sleep() in the shutdown context on the last cpu, + * at which point memory is ready to be saved. mapping_hibernate_flush() is called to get + * all ppc RC bits out of the hash table and caches into the mapping structures. + * - hibernate_write_image() is called (still in shutdown context, no blocking or preemption). + * hibernate_page_list_setall() is called to get a bitmap of dram pages that need to be saved. + * All pages are assumed to be saved (as part of the wired image) unless explicitly subtracted + * by hibernate_page_list_setall(), avoiding having to find arch dependent low level bits. + * The image header and block list are written. The header includes the second file extent so + * only the header block is needed to read the file, regardless of filesystem. + * The kernel segment "__HIB" is written uncompressed to the image. This segment of code and data + * (only) is used to decompress the image during wake/boot. + * Some additional pages are removed from the bitmaps - the buffers used for hibernation. + * The bitmaps are written to the image. + * More areas are removed from the bitmaps (after they have been written to the image) - the + * segment "__HIB" pages and interrupt stack. + * Each wired page is compressed and written and then each non-wired page. Compression and + * disk writes are in parallel. + * The image header is written to the start of the file and the polling driver closed. + * The machine powers down (or sleeps). + * + * Boot/Wake: + * + * - BootX sees the boot-image nvram variable containing the device and block number of the image, + * reads the header and if the signature is correct proceeds. The boot-image variable is cleared. + * - BootX reads the portion of the image used for wired pages, to memory. Its assumed this will fit + * in the OF memory environment, and the image is decrypted. There is no decompression in BootX, + * that is in the kernel's __HIB section. + * - BootX copies the "__HIB" section to its correct position in memory, quiesces and calls its entry + * hibernate_kernel_entrypoint(), passing the location of the image in memory. Translation is off, + * only code & data in that section is safe to call since all the other wired pages are still + * compressed in the image. + * - hibernate_kernel_entrypoint() removes pages occupied by the raw image from the page bitmaps. + * It uses the bitmaps to work out which pages can be uncompressed from the image to their final + * location directly, and copies those that can't to interim free pages. When the image has been + * completed, the copies are uncompressed, overwriting the wired image pages. + * hibernate_restore_phys_page() (in osfmk since its arch dependent, but part of the "__HIB" section) + * is used to get pages into place for 64bit. + * - the reset vector is called (at least on ppc), the kernel proceeds on a normal wake, with some + * changes conditional on the per proc flag - before VM is turned on the boot cpu, all mappings + * are removed from the software strutures, and the hash table is reinitialized. + * - After the platform CPU init code is called, hibernate_machine_init() is called to restore the rest + * of memory, using the polled mode driver, before other threads can run or any devices are turned on. + * This reduces the memory usage for BootX and allows decompression in parallel with disk reads, + * for the remaining non wired pages. + * - The polling driver is closed down and regular wake proceeds. When the kernel calls iokit to wake + * (normal execution context) hibernate_teardown() in osmfk is called to release any memory, the file + * is closed via bsd. + * + * Polled Mode I/O: + * + * IOHibernateSystemSleep() finds a polled mode interface to the ATA controller via a property in the + * registry, specifying an object of calls IOPolledInterface. + * + * Before the system goes to sleep it searches from the IOMedia object (could be a filesystem or + * partition) that the image is going to live, looking for polled interface properties. If it finds + * one the IOMedia object is passed to a "probe" call for the interface to accept or reject. All the + * interfaces found are kept in an ordered list. + * + * There is an Open/Close pair of calls made to each of the interfaces at various stages since there are + * few different contexts things happen in: + * + * - there is an Open/Close (Preflight) made before any part of the system has slept (I/O is all + * up and running) and after wake - this is safe to allocate memory and do anything. The device + * ignores sleep requests from that point since its a waste of time if it goes to sleep and + * immediately wakes back up for the image write. + * + * - there is an Open/Close (BeforeSleep) pair made around the image write operations that happen + * immediately before sleep. These can't block or allocate memory - the I/O system is asleep apart + * from the low level bits (motherboard I/O etc). There is only one thread running. The close can be + * used to flush and set the disk to sleep. + * + * - there is an Open/Close (AfterSleep) pair made around the image read operations that happen + * immediately after sleep. These can't block or allocate memory. This is happening after the platform + * expert has woken the low level bits of the system, but most of the I/O system has not. There is only + * one thread running. + * + * For the actual I/O, all the ops are with respect to a single IOMemoryDescriptor that was passed + * (prepared) to the Preflight Open() call. There is a read/write op, buffer offset to the IOMD for + * the data, an offset to the disk and length (block aligned 64 bit numbers), and completion callback. + * Each I/O is async but only one is ever outstanding. The polled interface has a checkForWork call + * that is called for the hardware to check for events, and complete the I/O via the callback. + * The hibernate path uses the same transfer constraints the regular cluster I/O path in BSD uses + * to restrict I/O ops. + */ #include @@ -177,63 +177,62 @@ to restrict I/O ops. #include #include -extern "C" addr64_t kvtophys(vm_offset_t va); -extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); +extern "C" addr64_t kvtophys(vm_offset_t va); +extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define DISABLE_TRIM 0 -#define TRIM_DELAY 25000 +#define DISABLE_TRIM 0 +#define TRIM_DELAY 25000 -extern unsigned int save_kdebug_enable; -extern uint32_t gIOHibernateState; -uint32_t gIOHibernateMode; -static char gIOHibernateBootSignature[256+1]; -static char gIOHibernateFilename[MAXPATHLEN+1]; +extern unsigned int save_kdebug_enable; +extern uint32_t gIOHibernateState; +uint32_t gIOHibernateMode; +static char gIOHibernateBootSignature[256 + 1]; +static char gIOHibernateFilename[MAXPATHLEN + 1]; -static uuid_string_t gIOHibernateBridgeBootSessionUUIDString; +static uuid_string_t gIOHibernateBridgeBootSessionUUIDString; -static uint32_t gIOHibernateFreeRatio = 0; // free page target (percent) -uint32_t gIOHibernateFreeTime = 0*1000; // max time to spend freeing pages (ms) -static uint64_t gIOHibernateCompression = 0x80; // default compression 50% +static uint32_t gIOHibernateFreeRatio = 0; // free page target (percent) +uint32_t gIOHibernateFreeTime = 0 * 1000; // max time to spend freeing pages (ms) +static uint64_t gIOHibernateCompression = 0x80; // default compression 50% boolean_t gIOHibernateStandbyDisabled; -static IODTNVRAM * gIOOptionsEntry; -static IORegistryEntry * gIOChosenEntry; +static IODTNVRAM * gIOOptionsEntry; +static IORegistryEntry * gIOChosenEntry; -static const OSSymbol * gIOHibernateBootImageKey; -static const OSSymbol * gIOHibernateBootSignatureKey; -static const OSSymbol * gIOBridgeBootSessionUUIDKey; +static const OSSymbol * gIOHibernateBootImageKey; +static const OSSymbol * gIOHibernateBootSignatureKey; +static const OSSymbol * gIOBridgeBootSessionUUIDKey; #if defined(__i386__) || defined(__x86_64__) -static const OSSymbol * gIOHibernateRTCVariablesKey; +static const OSSymbol * gIOHibernateRTCVariablesKey; static const OSSymbol * gIOHibernateBoot0082Key; static const OSSymbol * gIOHibernateBootNextKey; -static OSData * gIOHibernateBoot0082Data; -static OSData * gIOHibernateBootNextData; -static OSObject * gIOHibernateBootNextSave; +static OSData * gIOHibernateBoot0082Data; +static OSData * gIOHibernateBootNextData; +static OSObject * gIOHibernateBootNextSave; #endif /* defined(__i386__) || defined(__x86_64__) */ static IOLock * gFSLock; - uint32_t gFSState; +uint32_t gFSState; static thread_call_t gIOHibernateTrimCalloutEntry; -static IOPolledFileIOVars gFileVars; -static IOHibernateVars gIOHibernateVars; -static IOPolledFileCryptVars gIOHibernateCryptWakeContext; -static hibernate_graphics_t _hibernateGraphics; -static hibernate_graphics_t * gIOHibernateGraphicsInfo = &_hibernateGraphics; -static hibernate_statistics_t _hibernateStats; -static hibernate_statistics_t * gIOHibernateStats = &_hibernateStats; - -enum -{ - kFSIdle = 0, - kFSOpening = 2, - kFSOpened = 3, - kFSTimedOut = 4, - kFSTrimDelay = 5 +static IOPolledFileIOVars gFileVars; +static IOHibernateVars gIOHibernateVars; +static IOPolledFileCryptVars gIOHibernateCryptWakeContext; +static hibernate_graphics_t _hibernateGraphics; +static hibernate_graphics_t * gIOHibernateGraphicsInfo = &_hibernateGraphics; +static hibernate_statistics_t _hibernateStats; +static hibernate_statistics_t * gIOHibernateStats = &_hibernateStats; + +enum{ + kFSIdle = 0, + kFSOpening = 2, + kFSOpened = 3, + kFSTimedOut = 4, + kFSTrimDelay = 5 }; static IOReturn IOHibernateDone(IOHibernateVars * vars); @@ -252,140 +251,142 @@ enum { kVideoMapSize = 80 * 1024 * 1024 }; static IOReturn IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md, - IOByteCount offset, addr64_t bytes, IOByteCount length) + IOByteCount offset, addr64_t bytes, IOByteCount length) { - addr64_t srcAddr = bytes; - IOByteCount remaining; + addr64_t srcAddr = bytes; + IOByteCount remaining; - remaining = length = min(length, md->getLength() - offset); - while (remaining) { // (process another target segment?) - addr64_t dstAddr64; - IOByteCount dstLen; + remaining = length = min(length, md->getLength() - offset); + while (remaining) { // (process another target segment?) + addr64_t dstAddr64; + IOByteCount dstLen; - dstAddr64 = md->getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); - if (!dstAddr64) - break; + dstAddr64 = md->getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); + if (!dstAddr64) { + break; + } - // Clip segment length to remaining - if (dstLen > remaining) - dstLen = remaining; + // Clip segment length to remaining + if (dstLen > remaining) { + dstLen = remaining; + } #if 1 - bcopy_phys(srcAddr, dstAddr64, dstLen); + bcopy_phys(srcAddr, dstAddr64, dstLen); #else - copypv(srcAddr, dstAddr64, dstLen, - cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + copypv(srcAddr, dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); #endif - srcAddr += dstLen; - offset += dstLen; - remaining -= dstLen; - } + srcAddr += dstLen; + offset += dstLen; + remaining -= dstLen; + } - assert(!remaining); + assert(!remaining); - return remaining ? kIOReturnUnderrun : kIOReturnSuccess; + return remaining ? kIOReturnUnderrun : kIOReturnSuccess; } // copy from MD to phys addr static IOReturn IOMemoryDescriptorReadToPhysical(IOMemoryDescriptor * md, - IOByteCount offset, addr64_t bytes, IOByteCount length) + IOByteCount offset, addr64_t bytes, IOByteCount length) { - addr64_t dstAddr = bytes; - IOByteCount remaining; + addr64_t dstAddr = bytes; + IOByteCount remaining; - remaining = length = min(length, md->getLength() - offset); - while (remaining) { // (process another target segment?) - addr64_t srcAddr64; - IOByteCount dstLen; + remaining = length = min(length, md->getLength() - offset); + while (remaining) { // (process another target segment?) + addr64_t srcAddr64; + IOByteCount dstLen; - srcAddr64 = md->getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); - if (!srcAddr64) - break; + srcAddr64 = md->getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); + if (!srcAddr64) { + break; + } - // Clip segment length to remaining - if (dstLen > remaining) - dstLen = remaining; + // Clip segment length to remaining + if (dstLen > remaining) { + dstLen = remaining; + } #if 1 - bcopy_phys(srcAddr64, dstAddr, dstLen); + bcopy_phys(srcAddr64, dstAddr, dstLen); #else - copypv(srcAddr, dstAddr64, dstLen, - cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + copypv(srcAddr, dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); #endif - dstAddr += dstLen; - offset += dstLen; - remaining -= dstLen; - } + dstAddr += dstLen; + offset += dstLen; + remaining -= dstLen; + } - assert(!remaining); + assert(!remaining); - return remaining ? kIOReturnUnderrun : kIOReturnSuccess; + return remaining ? kIOReturnUnderrun : kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ void hibernate_set_page_state(hibernate_page_list_t * page_list, hibernate_page_list_t * page_list_wired, - vm_offset_t ppnum, vm_offset_t count, uint32_t kind) + vm_offset_t ppnum, vm_offset_t count, uint32_t kind) { - count += ppnum; - switch (kind) - { - case kIOHibernatePageStateUnwiredSave: - // unwired save - for (; ppnum < count; ppnum++) - { - hibernate_page_bitset(page_list, FALSE, ppnum); - hibernate_page_bitset(page_list_wired, TRUE, ppnum); - } - break; - case kIOHibernatePageStateWiredSave: - // wired save - for (; ppnum < count; ppnum++) - { - hibernate_page_bitset(page_list, FALSE, ppnum); - hibernate_page_bitset(page_list_wired, FALSE, ppnum); - } - break; - case kIOHibernatePageStateFree: - // free page - for (; ppnum < count; ppnum++) - { - hibernate_page_bitset(page_list, TRUE, ppnum); - hibernate_page_bitset(page_list_wired, TRUE, ppnum); - } - break; - default: - panic("hibernate_set_page_state"); - } + count += ppnum; + switch (kind) { + case kIOHibernatePageStateUnwiredSave: + // unwired save + for (; ppnum < count; ppnum++) { + hibernate_page_bitset(page_list, FALSE, ppnum); + hibernate_page_bitset(page_list_wired, TRUE, ppnum); + } + break; + case kIOHibernatePageStateWiredSave: + // wired save + for (; ppnum < count; ppnum++) { + hibernate_page_bitset(page_list, FALSE, ppnum); + hibernate_page_bitset(page_list_wired, FALSE, ppnum); + } + break; + case kIOHibernatePageStateFree: + // free page + for (; ppnum < count; ppnum++) { + hibernate_page_bitset(page_list, TRUE, ppnum); + hibernate_page_bitset(page_list_wired, TRUE, ppnum); + } + break; + default: + panic("hibernate_set_page_state"); + } } static vm_offset_t hibernate_page_list_iterate(hibernate_page_list_t * list, vm_offset_t * pPage) { - uint32_t page = *pPage; - uint32_t count; - hibernate_bitmap_t * bitmap; - - while ((bitmap = hibernate_page_bitmap_pin(list, &page))) - { - count = hibernate_page_bitmap_count(bitmap, TRUE, page); - if (!count) - break; - page += count; - if (page <= bitmap->last_page) - break; - } - - *pPage = page; - if (bitmap) - count = hibernate_page_bitmap_count(bitmap, FALSE, page); - else - count = 0; - - return (count); + uint32_t page = *pPage; + uint32_t count; + hibernate_bitmap_t * bitmap; + + while ((bitmap = hibernate_page_bitmap_pin(list, &page))) { + count = hibernate_page_bitmap_count(bitmap, TRUE, page); + if (!count) { + break; + } + page += count; + if (page <= bitmap->last_page) { + break; + } + } + + *pPage = page; + if (bitmap) { + count = hibernate_page_bitmap_count(bitmap, FALSE, page); + } else { + count = 0; + } + + return count; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -393,403 +394,414 @@ hibernate_page_list_iterate(hibernate_page_list_t * list, vm_offset_t * pPage) IOReturn IOHibernateSystemSleep(void) { - IOReturn err; - OSData * nvramData; - OSObject * obj; - OSString * str; - OSNumber * num; - bool dsSSD, vmflush, swapPinned; - IOHibernateVars * vars; - uint64_t setFileSize = 0; - - gIOHibernateState = kIOHibernateStateInactive; - - gIOHibernateDebugFlags = 0; - if (kIOLogHibernate & gIOKitDebug) - gIOHibernateDebugFlags |= kIOHibernateDebugRestoreLogs; - - if (IOService::getPMRootDomain()->getHibernateSettings( - &gIOHibernateMode, &gIOHibernateFreeRatio, &gIOHibernateFreeTime)) - { - if (kIOHibernateModeSleep & gIOHibernateMode) - // default to discard clean for safe sleep - gIOHibernateMode ^= (kIOHibernateModeDiscardCleanInactive - | kIOHibernateModeDiscardCleanActive); - } - - if ((obj = IOService::getPMRootDomain()->copyProperty(kIOHibernateFileKey))) - { - if ((str = OSDynamicCast(OSString, obj))) - strlcpy(gIOHibernateFilename, str->getCStringNoCopy(), + IOReturn err; + OSData * nvramData; + OSObject * obj; + OSString * str; + OSNumber * num; + bool dsSSD, vmflush, swapPinned; + IOHibernateVars * vars; + uint64_t setFileSize = 0; + + gIOHibernateState = kIOHibernateStateInactive; + + gIOHibernateDebugFlags = 0; + if (kIOLogHibernate & gIOKitDebug) { + gIOHibernateDebugFlags |= kIOHibernateDebugRestoreLogs; + } + + if (IOService::getPMRootDomain()->getHibernateSettings( + &gIOHibernateMode, &gIOHibernateFreeRatio, &gIOHibernateFreeTime)) { + if (kIOHibernateModeSleep & gIOHibernateMode) { + // default to discard clean for safe sleep + gIOHibernateMode ^= (kIOHibernateModeDiscardCleanInactive + | kIOHibernateModeDiscardCleanActive); + } + } + + if ((obj = IOService::getPMRootDomain()->copyProperty(kIOHibernateFileKey))) { + if ((str = OSDynamicCast(OSString, obj))) { + strlcpy(gIOHibernateFilename, str->getCStringNoCopy(), sizeof(gIOHibernateFilename)); - obj->release(); - } - - if (!gIOHibernateMode || !gIOHibernateFilename[0]) - return (kIOReturnUnsupported); - - HIBLOG("hibernate image path: %s\n", gIOHibernateFilename); - - vars = IONew(IOHibernateVars, 1); - if (!vars) return (kIOReturnNoMemory); - bzero(vars, sizeof(*vars)); - - IOLockLock(gFSLock); - if (!gIOHibernateTrimCalloutEntry) - { - gIOHibernateTrimCalloutEntry = thread_call_allocate(&IOHibernateSystemPostWakeTrim, &gFSLock); - } - IOHibernateSystemPostWakeTrim(NULL, NULL); - thread_call_cancel(gIOHibernateTrimCalloutEntry); - if (kFSIdle != gFSState) - { - HIBLOG("hibernate file busy\n"); - IOLockUnlock(gFSLock); - IODelete(vars, IOHibernateVars, 1); - return (kIOReturnBusy); - } - gFSState = kFSOpening; - IOLockUnlock(gFSLock); - - swapPinned = false; - do - { - vars->srcBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, - 2 * page_size + WKdm_SCRATCH_BUF_SIZE_INTERNAL, page_size); - - vars->handoffBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, - ptoa_64(gIOHibernateHandoffPageCount), page_size); - - if (!vars->srcBuffer || !vars->handoffBuffer) - { - err = kIOReturnNoMemory; - break; - } - - if ((obj = IOService::getPMRootDomain()->copyProperty(kIOHibernateFileMinSizeKey))) - { - if ((num = OSDynamicCast(OSNumber, obj))) vars->fileMinSize = num->unsigned64BitValue(); - obj->release(); - } - if ((obj = IOService::getPMRootDomain()->copyProperty(kIOHibernateFileMaxSizeKey))) - { - if ((num = OSDynamicCast(OSNumber, obj))) vars->fileMaxSize = num->unsigned64BitValue(); - obj->release(); - } - - boolean_t encryptedswap = true; - uint32_t pageCount; - AbsoluteTime startTime, endTime; - uint64_t nsec; - - bzero(gIOHibernateCurrentHeader, sizeof(IOHibernateImageHeader)); - gIOHibernateCurrentHeader->debugFlags = gIOHibernateDebugFlags; - gIOHibernateCurrentHeader->signature = kIOHibernateHeaderInvalidSignature; + } + obj->release(); + } - vmflush = ((kOSBooleanTrue == IOService::getPMRootDomain()->getProperty(kIOPMDeepSleepEnabledKey))); - err = hibernate_alloc_page_lists(&vars->page_list, - &vars->page_list_wired, - &vars->page_list_pal); - if (KERN_SUCCESS != err) break; - - err = hibernate_pin_swap(TRUE); - if (KERN_SUCCESS != err) break; - swapPinned = true; - - if (vars->fileMinSize || (kIOHibernateModeFileResize & gIOHibernateMode)) - { - hibernate_page_list_setall(vars->page_list, - vars->page_list_wired, - vars->page_list_pal, - true /* preflight */, - vmflush /* discard */, - &pageCount); - PE_Video consoleInfo; - bzero(&consoleInfo, sizeof(consoleInfo)); - IOService::getPlatform()->getConsoleInfo(&consoleInfo); - - // estimate: 6% increase in pages compressed - // screen preview 2 images compressed 0% - setFileSize = ((ptoa_64((106 * pageCount) / 100) * gIOHibernateCompression) >> 8) - + vars->page_list->list_size - + (consoleInfo.v_width * consoleInfo.v_height * 8); - enum { setFileRound = 1024*1024ULL }; - setFileSize = ((setFileSize + setFileRound) & ~(setFileRound - 1)); - - HIBLOG("hibernate_page_list_setall preflight pageCount %d est comp %qd setfile %qd min %qd\n", - pageCount, (100ULL * gIOHibernateCompression) >> 8, - setFileSize, vars->fileMinSize); - - if (!(kIOHibernateModeFileResize & gIOHibernateMode) - && (setFileSize < vars->fileMinSize)) - { - setFileSize = vars->fileMinSize; - } - } - - vars->volumeCryptKeySize = sizeof(vars->volumeCryptKey); - err = IOPolledFileOpen(gIOHibernateFilename, - (kIOPolledFileCreate | kIOPolledFileHibernate), - setFileSize, 0, - gIOHibernateCurrentHeader, sizeof(gIOHibernateCurrentHeader), - &vars->fileVars, &nvramData, - &vars->volumeCryptKey[0], &vars->volumeCryptKeySize); - - if (KERN_SUCCESS != err) - { - IOLockLock(gFSLock); - if (kFSOpening != gFSState) err = kIOReturnTimeout; - IOLockUnlock(gFSLock); - } - - if (KERN_SUCCESS != err) - { - HIBLOG("IOPolledFileOpen(%x)\n", err); - break; - } - - // write extents for debug data usage in EFI - IOWriteExtentsToFile(vars->fileVars, kIOHibernateHeaderOpenSignature); - - err = IOPolledFilePollersSetup(vars->fileVars, kIOPolledPreflightState); - if (KERN_SUCCESS != err) break; - - clock_get_uptime(&startTime); - err = hibernate_setup(gIOHibernateCurrentHeader, - vmflush, - vars->page_list, vars->page_list_wired, vars->page_list_pal); - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nsec); - - boolean_t haveSwapPin, hibFileSSD; - haveSwapPin = vm_swap_files_pinned(); - - hibFileSSD = (kIOPolledFileSSD & vars->fileVars->flags); - - HIBLOG("hibernate_setup(%d) took %qd ms, swapPin(%d) ssd(%d)\n", - err, nsec / 1000000ULL, - haveSwapPin, hibFileSSD); - if (KERN_SUCCESS != err) break; - - gIOHibernateStandbyDisabled = ((!haveSwapPin || !hibFileSSD)); - - dsSSD = ((0 != (kIOPolledFileSSD & vars->fileVars->flags)) - && (kOSBooleanTrue == IOService::getPMRootDomain()->getProperty(kIOPMDeepSleepEnabledKey))); - - if (dsSSD) gIOHibernateCurrentHeader->options |= kIOHibernateOptionSSD | kIOHibernateOptionColor; - else gIOHibernateCurrentHeader->options |= kIOHibernateOptionProgress; + if (!gIOHibernateMode || !gIOHibernateFilename[0]) { + return kIOReturnUnsupported; + } + HIBLOG("hibernate image path: %s\n", gIOHibernateFilename); -#if defined(__i386__) || defined(__x86_64__) - if (vars->volumeCryptKeySize && - (kOSBooleanTrue != IOService::getPMRootDomain()->getProperty(kIOPMDestroyFVKeyOnStandbyKey))) - { - uintptr_t smcVars[2]; - smcVars[0] = vars->volumeCryptKeySize; - smcVars[1] = (uintptr_t)(void *) &gIOHibernateVars.volumeCryptKey[0]; + vars = IONew(IOHibernateVars, 1); + if (!vars) { + return kIOReturnNoMemory; + } + bzero(vars, sizeof(*vars)); - IOService::getPMRootDomain()->setProperty(kIOHibernateSMCVariablesKey, smcVars, sizeof(smcVars)); - bzero(smcVars, sizeof(smcVars)); + IOLockLock(gFSLock); + if (!gIOHibernateTrimCalloutEntry) { + gIOHibernateTrimCalloutEntry = thread_call_allocate(&IOHibernateSystemPostWakeTrim, &gFSLock); + } + IOHibernateSystemPostWakeTrim(NULL, NULL); + thread_call_cancel(gIOHibernateTrimCalloutEntry); + if (kFSIdle != gFSState) { + HIBLOG("hibernate file busy\n"); + IOLockUnlock(gFSLock); + IODelete(vars, IOHibernateVars, 1); + return kIOReturnBusy; } + gFSState = kFSOpening; + IOLockUnlock(gFSLock); + + swapPinned = false; + do{ + vars->srcBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, + 2 * page_size + WKdm_SCRATCH_BUF_SIZE_INTERNAL, page_size); + + vars->handoffBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionOutIn, + ptoa_64(gIOHibernateHandoffPageCount), page_size); + + if (!vars->srcBuffer || !vars->handoffBuffer) { + err = kIOReturnNoMemory; + break; + } + + if ((obj = IOService::getPMRootDomain()->copyProperty(kIOHibernateFileMinSizeKey))) { + if ((num = OSDynamicCast(OSNumber, obj))) { + vars->fileMinSize = num->unsigned64BitValue(); + } + obj->release(); + } + if ((obj = IOService::getPMRootDomain()->copyProperty(kIOHibernateFileMaxSizeKey))) { + if ((num = OSDynamicCast(OSNumber, obj))) { + vars->fileMaxSize = num->unsigned64BitValue(); + } + obj->release(); + } + + boolean_t encryptedswap = true; + uint32_t pageCount; + AbsoluteTime startTime, endTime; + uint64_t nsec; + + bzero(gIOHibernateCurrentHeader, sizeof(IOHibernateImageHeader)); + gIOHibernateCurrentHeader->debugFlags = gIOHibernateDebugFlags; + gIOHibernateCurrentHeader->signature = kIOHibernateHeaderInvalidSignature; + + vmflush = ((kOSBooleanTrue == IOService::getPMRootDomain()->getProperty(kIOPMDeepSleepEnabledKey))); + err = hibernate_alloc_page_lists(&vars->page_list, + &vars->page_list_wired, + &vars->page_list_pal); + if (KERN_SUCCESS != err) { + break; + } + + err = hibernate_pin_swap(TRUE); + if (KERN_SUCCESS != err) { + break; + } + swapPinned = true; + + if (vars->fileMinSize || (kIOHibernateModeFileResize & gIOHibernateMode)) { + hibernate_page_list_setall(vars->page_list, + vars->page_list_wired, + vars->page_list_pal, + true /* preflight */, + vmflush /* discard */, + &pageCount); + PE_Video consoleInfo; + bzero(&consoleInfo, sizeof(consoleInfo)); + IOService::getPlatform()->getConsoleInfo(&consoleInfo); + + // estimate: 6% increase in pages compressed + // screen preview 2 images compressed 0% + setFileSize = ((ptoa_64((106 * pageCount) / 100) * gIOHibernateCompression) >> 8) + + vars->page_list->list_size + + (consoleInfo.v_width * consoleInfo.v_height * 8); + enum { setFileRound = 1024 * 1024ULL }; + setFileSize = ((setFileSize + setFileRound) & ~(setFileRound - 1)); + + HIBLOG("hibernate_page_list_setall preflight pageCount %d est comp %qd setfile %qd min %qd\n", + pageCount, (100ULL * gIOHibernateCompression) >> 8, + setFileSize, vars->fileMinSize); + + if (!(kIOHibernateModeFileResize & gIOHibernateMode) + && (setFileSize < vars->fileMinSize)) { + setFileSize = vars->fileMinSize; + } + } + + vars->volumeCryptKeySize = sizeof(vars->volumeCryptKey); + err = IOPolledFileOpen(gIOHibernateFilename, + (kIOPolledFileCreate | kIOPolledFileHibernate), + setFileSize, 0, + gIOHibernateCurrentHeader, sizeof(gIOHibernateCurrentHeader), + &vars->fileVars, &nvramData, + &vars->volumeCryptKey[0], &vars->volumeCryptKeySize); + + if (KERN_SUCCESS != err) { + IOLockLock(gFSLock); + if (kFSOpening != gFSState) { + err = kIOReturnTimeout; + } + IOLockUnlock(gFSLock); + } + + if (KERN_SUCCESS != err) { + HIBLOG("IOPolledFileOpen(%x)\n", err); + break; + } + + // write extents for debug data usage in EFI + IOWriteExtentsToFile(vars->fileVars, kIOHibernateHeaderOpenSignature); + + err = IOPolledFilePollersSetup(vars->fileVars, kIOPolledPreflightState); + if (KERN_SUCCESS != err) { + break; + } + + clock_get_uptime(&startTime); + err = hibernate_setup(gIOHibernateCurrentHeader, + vmflush, + vars->page_list, vars->page_list_wired, vars->page_list_pal); + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nsec); + + boolean_t haveSwapPin, hibFileSSD; + haveSwapPin = vm_swap_files_pinned(); + + hibFileSSD = (kIOPolledFileSSD & vars->fileVars->flags); + + HIBLOG("hibernate_setup(%d) took %qd ms, swapPin(%d) ssd(%d)\n", + err, nsec / 1000000ULL, + haveSwapPin, hibFileSSD); + if (KERN_SUCCESS != err) { + break; + } + + gIOHibernateStandbyDisabled = ((!haveSwapPin || !hibFileSSD)); + + dsSSD = ((0 != (kIOPolledFileSSD & vars->fileVars->flags)) + && (kOSBooleanTrue == IOService::getPMRootDomain()->getProperty(kIOPMDeepSleepEnabledKey))); + + if (dsSSD) { + gIOHibernateCurrentHeader->options |= kIOHibernateOptionSSD | kIOHibernateOptionColor; + } else { + gIOHibernateCurrentHeader->options |= kIOHibernateOptionProgress; + } + + +#if defined(__i386__) || defined(__x86_64__) + if (vars->volumeCryptKeySize && + (kOSBooleanTrue != IOService::getPMRootDomain()->getProperty(kIOPMDestroyFVKeyOnStandbyKey))) { + uintptr_t smcVars[2]; + smcVars[0] = vars->volumeCryptKeySize; + smcVars[1] = (uintptr_t)(void *) &gIOHibernateVars.volumeCryptKey[0]; + + IOService::getPMRootDomain()->setProperty(kIOHibernateSMCVariablesKey, smcVars, sizeof(smcVars)); + bzero(smcVars, sizeof(smcVars)); + } #endif - if (encryptedswap || vars->volumeCryptKeySize) - gIOHibernateMode ^= kIOHibernateModeEncrypt; + if (encryptedswap || vars->volumeCryptKeySize) { + gIOHibernateMode ^= kIOHibernateModeEncrypt; + } - if (kIOHibernateOptionProgress & gIOHibernateCurrentHeader->options) - { - vars->videoAllocSize = kVideoMapSize; - if (KERN_SUCCESS != kmem_alloc_pageable(kernel_map, &vars->videoMapping, vars->videoAllocSize, VM_KERN_MEMORY_IOKIT)) - vars->videoMapping = 0; - } + if (kIOHibernateOptionProgress & gIOHibernateCurrentHeader->options) { + vars->videoAllocSize = kVideoMapSize; + if (KERN_SUCCESS != kmem_alloc_pageable(kernel_map, &vars->videoMapping, vars->videoAllocSize, VM_KERN_MEMORY_IOKIT)) { + vars->videoMapping = 0; + } + } - // generate crypt keys - for (uint32_t i = 0; i < sizeof(vars->wiredCryptKey); i++) - vars->wiredCryptKey[i] = random(); - for (uint32_t i = 0; i < sizeof(vars->cryptKey); i++) - vars->cryptKey[i] = random(); + // generate crypt keys + for (uint32_t i = 0; i < sizeof(vars->wiredCryptKey); i++) { + vars->wiredCryptKey[i] = random(); + } + for (uint32_t i = 0; i < sizeof(vars->cryptKey); i++) { + vars->cryptKey[i] = random(); + } - // set nvram + // set nvram - IOSetBootImageNVRAM(nvramData); - nvramData->release(); + IOSetBootImageNVRAM(nvramData); + nvramData->release(); #if defined(__i386__) || defined(__x86_64__) - { - struct AppleRTCHibernateVars - { - uint8_t signature[4]; - uint32_t revision; - uint8_t booterSignature[20]; - uint8_t wiredCryptKey[16]; - }; - AppleRTCHibernateVars rtcVars; - OSData * data; - - rtcVars.signature[0] = 'A'; - rtcVars.signature[1] = 'A'; - rtcVars.signature[2] = 'P'; - rtcVars.signature[3] = 'L'; - rtcVars.revision = 1; - bcopy(&vars->wiredCryptKey[0], &rtcVars.wiredCryptKey[0], sizeof(rtcVars.wiredCryptKey)); - - if (gIOChosenEntry - && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOHibernateBootSignatureKey))) - && (sizeof(rtcVars.booterSignature) <= data->getLength())) - { - bcopy(data->getBytesNoCopy(), &rtcVars.booterSignature[0], sizeof(rtcVars.booterSignature)); - } - else if (gIOHibernateBootSignature[0]) - { - char c; - uint8_t value = 0; - uint32_t in, out, digits; - for (in = out = digits = 0; - (c = gIOHibernateBootSignature[in]) && (in < sizeof(gIOHibernateBootSignature)); - in++) { - if ((c >= 'a') && (c <= 'f')) c -= 'a' - 10; - else if ((c >= 'A') && (c <= 'F')) c -= 'A' - 10; - else if ((c >= '0') && (c <= '9')) c -= '0'; - else - { - if (c == '=') out = digits = value = 0; - continue; - } - value = (value << 4) | c; - if (digits & 1) - { - rtcVars.booterSignature[out++] = value; - if (out >= sizeof(rtcVars.booterSignature)) break; - } - digits++; - } - } + struct AppleRTCHibernateVars { + uint8_t signature[4]; + uint32_t revision; + uint8_t booterSignature[20]; + uint8_t wiredCryptKey[16]; + }; + AppleRTCHibernateVars rtcVars; + OSData * data; + + rtcVars.signature[0] = 'A'; + rtcVars.signature[1] = 'A'; + rtcVars.signature[2] = 'P'; + rtcVars.signature[3] = 'L'; + rtcVars.revision = 1; + bcopy(&vars->wiredCryptKey[0], &rtcVars.wiredCryptKey[0], sizeof(rtcVars.wiredCryptKey)); + + if (gIOChosenEntry + && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOHibernateBootSignatureKey))) + && (sizeof(rtcVars.booterSignature) <= data->getLength())) { + bcopy(data->getBytesNoCopy(), &rtcVars.booterSignature[0], sizeof(rtcVars.booterSignature)); + } else if (gIOHibernateBootSignature[0]) { + char c; + uint8_t value = 0; + uint32_t in, out, digits; + for (in = out = digits = 0; + (c = gIOHibernateBootSignature[in]) && (in < sizeof(gIOHibernateBootSignature)); + in++) { + if ((c >= 'a') && (c <= 'f')) { + c -= 'a' - 10; + } else if ((c >= 'A') && (c <= 'F')) { + c -= 'A' - 10; + } else if ((c >= '0') && (c <= '9')) { + c -= '0'; + } else { + if (c == '=') { + out = digits = value = 0; + } + continue; + } + value = (value << 4) | c; + if (digits & 1) { + rtcVars.booterSignature[out++] = value; + if (out >= sizeof(rtcVars.booterSignature)) { + break; + } + } + digits++; + } + } #if DEBUG || DEVELOPMENT - if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> rtc:", - &rtcVars, sizeof(rtcVars), &kprintf); + if (kIOLogHibernate & gIOKitDebug) { + IOKitKernelLogBuffer("H> rtc:", + &rtcVars, sizeof(rtcVars), &kprintf); + } #endif /* DEBUG || DEVELOPMENT */ - data = OSData::withBytes(&rtcVars, sizeof(rtcVars)); - if (data) - { - if (gIOHibernateRTCVariablesKey) - IOService::getPMRootDomain()->setProperty(gIOHibernateRTCVariablesKey, data); - data->release(); - } - if (gIOChosenEntry && gIOOptionsEntry) - { - data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOHibernateMachineSignatureKey)); - if (data) gIOHibernateCurrentHeader->machineSignature = *((UInt32 *)data->getBytesNoCopy()); - // set BootNext - if (!gIOHibernateBoot0082Data) - { - OSData * fileData = 0; - data = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-device-path")); - if (data && data->getLength() >= 4) fileData = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-file-path")); - if (data) - { - // AppleNVRAM_EFI_LOAD_OPTION - struct { - uint32_t Attributes; - uint16_t FilePathLength; - uint16_t Desc; - } loadOptionHeader; - loadOptionHeader.Attributes = 1; - loadOptionHeader.FilePathLength = data->getLength(); - loadOptionHeader.Desc = 0; - if (fileData) - { - loadOptionHeader.FilePathLength -= 4; - loadOptionHeader.FilePathLength += fileData->getLength(); - } - gIOHibernateBoot0082Data = OSData::withCapacity(sizeof(loadOptionHeader) + loadOptionHeader.FilePathLength); - if (gIOHibernateBoot0082Data) - { - gIOHibernateBoot0082Data->appendBytes(&loadOptionHeader, sizeof(loadOptionHeader)); - if (fileData) - { - gIOHibernateBoot0082Data->appendBytes(data->getBytesNoCopy(), data->getLength() - 4); - gIOHibernateBoot0082Data->appendBytes(fileData); - } - else gIOHibernateBoot0082Data->appendBytes(data); + data = OSData::withBytes(&rtcVars, sizeof(rtcVars)); + if (data) { + if (gIOHibernateRTCVariablesKey) { + IOService::getPMRootDomain()->setProperty(gIOHibernateRTCVariablesKey, data); + } + data->release(); } - } - } - if (!gIOHibernateBootNextData) - { - uint16_t bits = 0x0082; - gIOHibernateBootNextData = OSData::withBytes(&bits, sizeof(bits)); - } + if (gIOChosenEntry && gIOOptionsEntry) { + data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOHibernateMachineSignatureKey)); + if (data) { + gIOHibernateCurrentHeader->machineSignature = *((UInt32 *)data->getBytesNoCopy()); + } + // set BootNext + if (!gIOHibernateBoot0082Data) { + OSData * fileData = 0; + data = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-device-path")); + if (data && data->getLength() >= 4) { + fileData = OSDynamicCast(OSData, gIOChosenEntry->getProperty("boot-file-path")); + } + if (data) { + // AppleNVRAM_EFI_LOAD_OPTION + struct { + uint32_t Attributes; + uint16_t FilePathLength; + uint16_t Desc; + } loadOptionHeader; + loadOptionHeader.Attributes = 1; + loadOptionHeader.FilePathLength = data->getLength(); + loadOptionHeader.Desc = 0; + if (fileData) { + loadOptionHeader.FilePathLength -= 4; + loadOptionHeader.FilePathLength += fileData->getLength(); + } + gIOHibernateBoot0082Data = OSData::withCapacity(sizeof(loadOptionHeader) + loadOptionHeader.FilePathLength); + if (gIOHibernateBoot0082Data) { + gIOHibernateBoot0082Data->appendBytes(&loadOptionHeader, sizeof(loadOptionHeader)); + if (fileData) { + gIOHibernateBoot0082Data->appendBytes(data->getBytesNoCopy(), data->getLength() - 4); + gIOHibernateBoot0082Data->appendBytes(fileData); + } else { + gIOHibernateBoot0082Data->appendBytes(data); + } + } + } + } + if (!gIOHibernateBootNextData) { + uint16_t bits = 0x0082; + gIOHibernateBootNextData = OSData::withBytes(&bits, sizeof(bits)); + } #if DEBUG || DEVELOPMENT - if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> bootnext:", - gIOHibernateBoot0082Data->getBytesNoCopy(), gIOHibernateBoot0082Data->getLength(), &kprintf); + if (kIOLogHibernate & gIOKitDebug) { + IOKitKernelLogBuffer("H> bootnext:", + gIOHibernateBoot0082Data->getBytesNoCopy(), gIOHibernateBoot0082Data->getLength(), &kprintf); + } #endif /* DEBUG || DEVELOPMENT */ - if (gIOHibernateBoot0082Key && gIOHibernateBoot0082Data && gIOHibernateBootNextKey && gIOHibernateBootNextData) - { - gIOHibernateBootNextSave = gIOOptionsEntry->copyProperty(gIOHibernateBootNextKey); - gIOOptionsEntry->setProperty(gIOHibernateBoot0082Key, gIOHibernateBoot0082Data); - gIOOptionsEntry->setProperty(gIOHibernateBootNextKey, gIOHibernateBootNextData); + if (gIOHibernateBoot0082Key && gIOHibernateBoot0082Data && gIOHibernateBootNextKey && gIOHibernateBootNextData) { + gIOHibernateBootNextSave = gIOOptionsEntry->copyProperty(gIOHibernateBootNextKey); + gIOOptionsEntry->setProperty(gIOHibernateBoot0082Key, gIOHibernateBoot0082Data); + gIOOptionsEntry->setProperty(gIOHibernateBootNextKey, gIOHibernateBootNextData); + } + // BootNext + } } - // BootNext - } - } #endif /* !i386 && !x86_64 */ - } - while (false); - - if (swapPinned) hibernate_pin_swap(FALSE); - - IOLockLock(gFSLock); - if ((kIOReturnSuccess == err) && (kFSOpening != gFSState)) - { - HIBLOG("hibernate file close due timeout\n"); - err = kIOReturnTimeout; - } - if (kIOReturnSuccess == err) - { - gFSState = kFSOpened; - gIOHibernateVars = *vars; - gFileVars = *vars->fileVars; - gFileVars.allocated = false; - gIOHibernateVars.fileVars = &gFileVars; - gIOHibernateCurrentHeader->signature = kIOHibernateHeaderSignature; - gIOHibernateState = kIOHibernateStateHibernating; + }while (false); + + if (swapPinned) { + hibernate_pin_swap(FALSE); + } + + IOLockLock(gFSLock); + if ((kIOReturnSuccess == err) && (kFSOpening != gFSState)) { + HIBLOG("hibernate file close due timeout\n"); + err = kIOReturnTimeout; + } + if (kIOReturnSuccess == err) { + gFSState = kFSOpened; + gIOHibernateVars = *vars; + gFileVars = *vars->fileVars; + gFileVars.allocated = false; + gIOHibernateVars.fileVars = &gFileVars; + gIOHibernateCurrentHeader->signature = kIOHibernateHeaderSignature; + gIOHibernateState = kIOHibernateStateHibernating; #if DEBUG || DEVELOPMENT - if (kIOLogHibernate & gIOKitDebug) - { - OSData * data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey)); - if (data) - { - uintptr_t * smcVars = (typeof(smcVars)) data->getBytesNoCopy(); - IOKitKernelLogBuffer("H> smc:", - (const void *)smcVars[1], smcVars[0], &kprintf); - } - } + if (kIOLogHibernate & gIOKitDebug) { + OSData * data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey)); + if (data) { + uintptr_t * smcVars = (typeof(smcVars))data->getBytesNoCopy(); + IOKitKernelLogBuffer("H> smc:", + (const void *)smcVars[1], smcVars[0], &kprintf); + } + } #endif /* DEBUG || DEVELOPMENT */ - } - else - { - IOPolledFileIOVars * fileVars = vars->fileVars; - IOHibernateDone(vars); - IOPolledFileClose(&fileVars, + } else { + IOPolledFileIOVars * fileVars = vars->fileVars; + IOHibernateDone(vars); + IOPolledFileClose(&fileVars, #if DISABLE_TRIM - 0, NULL, 0, 0, 0); + 0, NULL, 0, 0, 0); #else - 0, NULL, 0, sizeof(IOHibernateImageHeader), setFileSize); + 0, NULL, 0, sizeof(IOHibernateImageHeader), setFileSize); #endif - gFSState = kFSIdle; - } - IOLockUnlock(gFSLock); + gFSState = kFSIdle; + } + IOLockUnlock(gFSLock); - if (vars->fileVars) IODelete(vars->fileVars, IOPolledFileIOVars, 1); - IODelete(vars, IOHibernateVars, 1); + if (vars->fileVars) { + IODelete(vars->fileVars, IOPolledFileIOVars, 1); + } + IODelete(vars, IOHibernateVars, 1); - return (err); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -797,31 +809,29 @@ IOHibernateSystemSleep(void) static void IOSetBootImageNVRAM(OSData * data) { - IORegistryEntry * regEntry; - - if (!gIOOptionsEntry) - { - regEntry = IORegistryEntry::fromPath("/options", gIODTPlane); - gIOOptionsEntry = OSDynamicCast(IODTNVRAM, regEntry); - if (regEntry && !gIOOptionsEntry) - regEntry->release(); - } - if (gIOOptionsEntry && gIOHibernateBootImageKey) - { - if (data) - { - gIOOptionsEntry->setProperty(gIOHibernateBootImageKey, data); + IORegistryEntry * regEntry; + + if (!gIOOptionsEntry) { + regEntry = IORegistryEntry::fromPath("/options", gIODTPlane); + gIOOptionsEntry = OSDynamicCast(IODTNVRAM, regEntry); + if (regEntry && !gIOOptionsEntry) { + regEntry->release(); + } + } + if (gIOOptionsEntry && gIOHibernateBootImageKey) { + if (data) { + gIOOptionsEntry->setProperty(gIOHibernateBootImageKey, data); #if DEBUG || DEVELOPMENT - if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> boot-image:", - data->getBytesNoCopy(), data->getLength(), &kprintf); + if (kIOLogHibernate & gIOKitDebug) { + IOKitKernelLogBuffer("H> boot-image:", + data->getBytesNoCopy(), data->getLength(), &kprintf); + } #endif /* DEBUG || DEVELOPMENT */ - } - else - { - gIOOptionsEntry->removeProperty(gIOHibernateBootImageKey); - gIOOptionsEntry->sync(); + } else { + gIOOptionsEntry->removeProperty(gIOHibernateBootImageKey); + gIOOptionsEntry->sync(); + } } - } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -832,51 +842,49 @@ IOSetBootImageNVRAM(OSData * data) static IOReturn IOWriteExtentsToFile(IOPolledFileIOVars * vars, uint32_t signature) { - IOHibernateImageHeader hdr; - IOItemCount count; - IOReturn err = kIOReturnSuccess; - int rc; - IOPolledFileExtent * fileExtents; - - fileExtents = (typeof(fileExtents)) vars->fileExtents->getBytesNoCopy(); - - memset(&hdr, 0, sizeof(IOHibernateImageHeader)); - count = vars->fileExtents->getLength(); - if (count > sizeof(hdr.fileExtentMap)) - { - hdr.fileExtentMapSize = count; - count = sizeof(hdr.fileExtentMap); - } - else - hdr.fileExtentMapSize = sizeof(hdr.fileExtentMap); - - bcopy(fileExtents, &hdr.fileExtentMap[0], count); - - // copy file block extent list if larger than header - if (hdr.fileExtentMapSize > sizeof(hdr.fileExtentMap)) - { - count = hdr.fileExtentMapSize - sizeof(hdr.fileExtentMap); - rc = kern_write_file(vars->fileRef, vars->blockSize, - (caddr_t)(((uint8_t *)fileExtents) + sizeof(hdr.fileExtentMap)), - count, IO_SKIP_ENCRYPTION); - if (rc != 0) { - HIBLOG("kern_write_file returned %d\n", rc); - err = kIOReturnIOError; - goto exit; - } - } - hdr.signature = signature; - hdr.deviceBlockSize = vars->blockSize; - - rc = kern_write_file(vars->fileRef, 0, (char *)&hdr, sizeof(hdr), IO_SKIP_ENCRYPTION); - if (rc != 0) { - HIBLOG("kern_write_file returned %d\n", rc); - err = kIOReturnIOError; - goto exit; - } + IOHibernateImageHeader hdr; + IOItemCount count; + IOReturn err = kIOReturnSuccess; + int rc; + IOPolledFileExtent * fileExtents; + + fileExtents = (typeof(fileExtents))vars->fileExtents->getBytesNoCopy(); + + memset(&hdr, 0, sizeof(IOHibernateImageHeader)); + count = vars->fileExtents->getLength(); + if (count > sizeof(hdr.fileExtentMap)) { + hdr.fileExtentMapSize = count; + count = sizeof(hdr.fileExtentMap); + } else { + hdr.fileExtentMapSize = sizeof(hdr.fileExtentMap); + } + + bcopy(fileExtents, &hdr.fileExtentMap[0], count); + + // copy file block extent list if larger than header + if (hdr.fileExtentMapSize > sizeof(hdr.fileExtentMap)) { + count = hdr.fileExtentMapSize - sizeof(hdr.fileExtentMap); + rc = kern_write_file(vars->fileRef, vars->blockSize, + (caddr_t)(((uint8_t *)fileExtents) + sizeof(hdr.fileExtentMap)), + count, IO_SKIP_ENCRYPTION); + if (rc != 0) { + HIBLOG("kern_write_file returned %d\n", rc); + err = kIOReturnIOError; + goto exit; + } + } + hdr.signature = signature; + hdr.deviceBlockSize = vars->blockSize; + + rc = kern_write_file(vars->fileRef, 0, (char *)&hdr, sizeof(hdr), IO_SKIP_ENCRYPTION); + if (rc != 0) { + HIBLOG("kern_write_file returned %d\n", rc); + err = kIOReturnIOError; + goto exit; + } exit: - return err; + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -886,115 +894,105 @@ DECLARE_IOHIBERNATEPROGRESSALPHA static void ProgressInit(hibernate_graphics_t * display, uint8_t * screen, uint8_t * saveunder, uint32_t savelen) { - uint32_t rowBytes, pixelShift; - uint32_t x, y; - int32_t blob; - uint32_t alpha, in, color, result; - uint8_t * out; - uint32_t saveindex[kIOHibernateProgressCount] = { 0 }; - - rowBytes = display->rowBytes; - pixelShift = display->depth >> 4; - if (pixelShift < 1) return; - - screen += ((display->width - - kIOHibernateProgressCount * (kIOHibernateProgressWidth + kIOHibernateProgressSpacing)) << (pixelShift - 1)) - + (display->height - kIOHibernateProgressOriginY - kIOHibernateProgressHeight) * rowBytes; - - for (y = 0; y < kIOHibernateProgressHeight; y++) - { - out = screen + y * rowBytes; - for (blob = 0; blob < kIOHibernateProgressCount; blob++) - { - color = blob ? kIOHibernateProgressDarkGray : kIOHibernateProgressMidGray; - for (x = 0; x < kIOHibernateProgressWidth; x++) - { - alpha = gIOHibernateProgressAlpha[y][x]; - result = color; - if (alpha) - { - if (0xff != alpha) - { - if (1 == pixelShift) - { - in = *((uint16_t *)out) & 0x1f; // 16 - in = (in << 3) | (in >> 2); - } - else - in = *((uint32_t *)out) & 0xff; // 32 - saveunder[blob * kIOHibernateProgressSaveUnderSize + saveindex[blob]++] = in; - result = ((255 - alpha) * in + alpha * result + 0xff) >> 8; - } - if (1 == pixelShift) - { - result >>= 3; - *((uint16_t *)out) = (result << 10) | (result << 5) | result; // 16 - } - else - *((uint32_t *)out) = (result << 16) | (result << 8) | result; // 32 - } - out += (1 << pixelShift); - } - out += (kIOHibernateProgressSpacing << pixelShift); - } - } + uint32_t rowBytes, pixelShift; + uint32_t x, y; + int32_t blob; + uint32_t alpha, in, color, result; + uint8_t * out; + uint32_t saveindex[kIOHibernateProgressCount] = { 0 }; + + rowBytes = display->rowBytes; + pixelShift = display->depth >> 4; + if (pixelShift < 1) { + return; + } + + screen += ((display->width + - kIOHibernateProgressCount * (kIOHibernateProgressWidth + kIOHibernateProgressSpacing)) << (pixelShift - 1)) + + (display->height - kIOHibernateProgressOriginY - kIOHibernateProgressHeight) * rowBytes; + + for (y = 0; y < kIOHibernateProgressHeight; y++) { + out = screen + y * rowBytes; + for (blob = 0; blob < kIOHibernateProgressCount; blob++) { + color = blob ? kIOHibernateProgressDarkGray : kIOHibernateProgressMidGray; + for (x = 0; x < kIOHibernateProgressWidth; x++) { + alpha = gIOHibernateProgressAlpha[y][x]; + result = color; + if (alpha) { + if (0xff != alpha) { + if (1 == pixelShift) { + in = *((uint16_t *)out) & 0x1f; // 16 + in = (in << 3) | (in >> 2); + } else { + in = *((uint32_t *)out) & 0xff; // 32 + } + saveunder[blob * kIOHibernateProgressSaveUnderSize + saveindex[blob]++] = in; + result = ((255 - alpha) * in + alpha * result + 0xff) >> 8; + } + if (1 == pixelShift) { + result >>= 3; + *((uint16_t *)out) = (result << 10) | (result << 5) | result; // 16 + } else { + *((uint32_t *)out) = (result << 16) | (result << 8) | result; // 32 + } + } + out += (1 << pixelShift); + } + out += (kIOHibernateProgressSpacing << pixelShift); + } + } } static void ProgressUpdate(hibernate_graphics_t * display, uint8_t * screen, int32_t firstBlob, int32_t select) { - uint32_t rowBytes, pixelShift; - uint32_t x, y; - int32_t blob, lastBlob; - uint32_t alpha, in, color, result; - uint8_t * out; - uint32_t saveindex[kIOHibernateProgressCount] = { 0 }; - - pixelShift = display->depth >> 4; - if (pixelShift < 1) - return; - - rowBytes = display->rowBytes; - - screen += ((display->width - - kIOHibernateProgressCount * (kIOHibernateProgressWidth + kIOHibernateProgressSpacing)) << (pixelShift - 1)) - + (display->height - kIOHibernateProgressOriginY - kIOHibernateProgressHeight) * rowBytes; - - lastBlob = (select < kIOHibernateProgressCount) ? select : (kIOHibernateProgressCount - 1); - - screen += (firstBlob * (kIOHibernateProgressWidth + kIOHibernateProgressSpacing)) << pixelShift; - - for (y = 0; y < kIOHibernateProgressHeight; y++) - { - out = screen + y * rowBytes; - for (blob = firstBlob; blob <= lastBlob; blob++) - { - color = (blob < select) ? kIOHibernateProgressLightGray : kIOHibernateProgressMidGray; - for (x = 0; x < kIOHibernateProgressWidth; x++) - { - alpha = gIOHibernateProgressAlpha[y][x]; - result = color; - if (alpha) - { - if (0xff != alpha) - { - in = display->progressSaveUnder[blob][saveindex[blob]++]; - result = ((255 - alpha) * in + alpha * result + 0xff) / 255; - } - if (1 == pixelShift) - { - result >>= 3; - *((uint16_t *)out) = (result << 10) | (result << 5) | result; // 16 - } - else - *((uint32_t *)out) = (result << 16) | (result << 8) | result; // 32 - } - out += (1 << pixelShift); - } - out += (kIOHibernateProgressSpacing << pixelShift); - } - } + uint32_t rowBytes, pixelShift; + uint32_t x, y; + int32_t blob, lastBlob; + uint32_t alpha, in, color, result; + uint8_t * out; + uint32_t saveindex[kIOHibernateProgressCount] = { 0 }; + + pixelShift = display->depth >> 4; + if (pixelShift < 1) { + return; + } + + rowBytes = display->rowBytes; + + screen += ((display->width + - kIOHibernateProgressCount * (kIOHibernateProgressWidth + kIOHibernateProgressSpacing)) << (pixelShift - 1)) + + (display->height - kIOHibernateProgressOriginY - kIOHibernateProgressHeight) * rowBytes; + + lastBlob = (select < kIOHibernateProgressCount) ? select : (kIOHibernateProgressCount - 1); + + screen += (firstBlob * (kIOHibernateProgressWidth + kIOHibernateProgressSpacing)) << pixelShift; + + for (y = 0; y < kIOHibernateProgressHeight; y++) { + out = screen + y * rowBytes; + for (blob = firstBlob; blob <= lastBlob; blob++) { + color = (blob < select) ? kIOHibernateProgressLightGray : kIOHibernateProgressMidGray; + for (x = 0; x < kIOHibernateProgressWidth; x++) { + alpha = gIOHibernateProgressAlpha[y][x]; + result = color; + if (alpha) { + if (0xff != alpha) { + in = display->progressSaveUnder[blob][saveindex[blob]++]; + result = ((255 - alpha) * in + alpha * result + 0xff) / 255; + } + if (1 == pixelShift) { + result >>= 3; + *((uint16_t *)out) = (result << 10) | (result << 5) | result; // 16 + } else { + *((uint32_t *)out) = (result << 16) | (result << 8) | result; // 32 + } + } + out += (1 << pixelShift); + } + out += (kIOHibernateProgressSpacing << pixelShift); + } + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1002,16 +1000,15 @@ ProgressUpdate(hibernate_graphics_t * display, uint8_t * screen, int32_t firstBl IOReturn IOHibernateIOKitSleep(void) { - IOReturn ret = kIOReturnSuccess; - IOLockLock(gFSLock); - if (kFSOpening == gFSState) - { - gFSState = kFSTimedOut; - HIBLOG("hibernate file open timed out\n"); - ret = kIOReturnTimeout; - } - IOLockUnlock(gFSLock); - return (ret); + IOReturn ret = kIOReturnSuccess; + IOLockLock(gFSLock); + if (kFSOpening == gFSState) { + gFSState = kFSTimedOut; + HIBLOG("hibernate file open timed out\n"); + ret = kIOReturnTimeout; + } + IOLockUnlock(gFSLock); + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1019,67 +1016,70 @@ IOHibernateIOKitSleep(void) IOReturn IOHibernateSystemHasSlept(void) { - IOReturn ret = kIOReturnSuccess; - IOHibernateVars * vars = &gIOHibernateVars; - OSObject * obj = 0; - OSData * data; - - IOLockLock(gFSLock); - if ((kFSOpened != gFSState) && gIOHibernateMode) - { - ret = kIOReturnTimeout; - } - IOLockUnlock(gFSLock); - if (kIOReturnSuccess != ret) return (ret); - - if (gIOHibernateMode) obj = IOService::getPMRootDomain()->copyProperty(kIOHibernatePreviewBufferKey); - vars->previewBuffer = OSDynamicCast(IOMemoryDescriptor, obj); - if (obj && !vars->previewBuffer) - obj->release(); - - vars->consoleMapping = NULL; - if (vars->previewBuffer && (kIOReturnSuccess != vars->previewBuffer->prepare())) - { - vars->previewBuffer->release(); - vars->previewBuffer = 0; - } - - if ((kIOHibernateOptionProgress & gIOHibernateCurrentHeader->options) - && vars->previewBuffer - && (data = OSDynamicCast(OSData, - IOService::getPMRootDomain()->getProperty(kIOHibernatePreviewActiveKey)))) - { - UInt32 flags = *((UInt32 *)data->getBytesNoCopy()); - HIBPRINT("kIOHibernatePreviewActiveKey %08lx\n", (long)flags); - - IOService::getPMRootDomain()->removeProperty(kIOHibernatePreviewActiveKey); - - if (kIOHibernatePreviewUpdates & flags) - { - PE_Video consoleInfo; - hibernate_graphics_t * graphicsInfo = gIOHibernateGraphicsInfo; - - IOService::getPlatform()->getConsoleInfo(&consoleInfo); - - graphicsInfo->width = consoleInfo.v_width; - graphicsInfo->height = consoleInfo.v_height; - graphicsInfo->rowBytes = consoleInfo.v_rowBytes; - graphicsInfo->depth = consoleInfo.v_depth; - vars->consoleMapping = (uint8_t *) consoleInfo.v_baseAddr; - - HIBPRINT("video %p %d %d %d\n", - vars->consoleMapping, graphicsInfo->depth, - graphicsInfo->width, graphicsInfo->height); - if (vars->consoleMapping) - ProgressInit(graphicsInfo, vars->consoleMapping, - &graphicsInfo->progressSaveUnder[0][0], sizeof(graphicsInfo->progressSaveUnder)); - } - } - - if (gIOOptionsEntry) - gIOOptionsEntry->sync(); - - return (ret); + IOReturn ret = kIOReturnSuccess; + IOHibernateVars * vars = &gIOHibernateVars; + OSObject * obj = 0; + OSData * data; + + IOLockLock(gFSLock); + if ((kFSOpened != gFSState) && gIOHibernateMode) { + ret = kIOReturnTimeout; + } + IOLockUnlock(gFSLock); + if (kIOReturnSuccess != ret) { + return ret; + } + + if (gIOHibernateMode) { + obj = IOService::getPMRootDomain()->copyProperty(kIOHibernatePreviewBufferKey); + } + vars->previewBuffer = OSDynamicCast(IOMemoryDescriptor, obj); + if (obj && !vars->previewBuffer) { + obj->release(); + } + + vars->consoleMapping = NULL; + if (vars->previewBuffer && (kIOReturnSuccess != vars->previewBuffer->prepare())) { + vars->previewBuffer->release(); + vars->previewBuffer = 0; + } + + if ((kIOHibernateOptionProgress & gIOHibernateCurrentHeader->options) + && vars->previewBuffer + && (data = OSDynamicCast(OSData, + IOService::getPMRootDomain()->getProperty(kIOHibernatePreviewActiveKey)))) { + UInt32 flags = *((UInt32 *)data->getBytesNoCopy()); + HIBPRINT("kIOHibernatePreviewActiveKey %08lx\n", (long)flags); + + IOService::getPMRootDomain()->removeProperty(kIOHibernatePreviewActiveKey); + + if (kIOHibernatePreviewUpdates & flags) { + PE_Video consoleInfo; + hibernate_graphics_t * graphicsInfo = gIOHibernateGraphicsInfo; + + IOService::getPlatform()->getConsoleInfo(&consoleInfo); + + graphicsInfo->width = consoleInfo.v_width; + graphicsInfo->height = consoleInfo.v_height; + graphicsInfo->rowBytes = consoleInfo.v_rowBytes; + graphicsInfo->depth = consoleInfo.v_depth; + vars->consoleMapping = (uint8_t *) consoleInfo.v_baseAddr; + + HIBPRINT("video %p %d %d %d\n", + vars->consoleMapping, graphicsInfo->depth, + graphicsInfo->width, graphicsInfo->height); + if (vars->consoleMapping) { + ProgressInit(graphicsInfo, vars->consoleMapping, + &graphicsInfo->progressSaveUnder[0][0], sizeof(graphicsInfo->progressSaveUnder)); + } + } + } + + if (gIOOptionsEntry) { + gIOOptionsEntry->sync(); + } + + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1087,98 +1087,87 @@ IOHibernateSystemHasSlept(void) static DeviceTreeNode * MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry) { - DeviceTreeNodeProperty * prop; - DeviceTreeNode * child; - IORegistryEntry * childRegEntry; - const char * nameProp; - unsigned int propLen, idx; - - prop = (DeviceTreeNodeProperty *) (entry + 1); - for (idx = 0; idx < entry->nProperties; idx++) - { - if (regEntry && (0 != strcmp("name", prop->name))) - { - regEntry->setProperty((const char *) prop->name, (void *) (prop + 1), prop->length); + DeviceTreeNodeProperty * prop; + DeviceTreeNode * child; + IORegistryEntry * childRegEntry; + const char * nameProp; + unsigned int propLen, idx; + + prop = (DeviceTreeNodeProperty *) (entry + 1); + for (idx = 0; idx < entry->nProperties; idx++) { + if (regEntry && (0 != strcmp("name", prop->name))) { + regEntry->setProperty((const char *) prop->name, (void *) (prop + 1), prop->length); // HIBPRINT("%s: %s, %d\n", regEntry->getName(), prop->name, prop->length); + } + prop = (DeviceTreeNodeProperty *) (((uintptr_t)(prop + 1)) + ((prop->length + 3) & ~3)); } - prop = (DeviceTreeNodeProperty *) (((uintptr_t)(prop + 1)) + ((prop->length + 3) & ~3)); - } - child = (DeviceTreeNode *) prop; - for (idx = 0; idx < entry->nChildren; idx++) - { - if (kSuccess != DTGetProperty(child, "name", (void **) &nameProp, &propLen)) - panic("no name"); - childRegEntry = regEntry ? regEntry->childFromPath(nameProp, gIODTPlane) : NULL; + child = (DeviceTreeNode *) prop; + for (idx = 0; idx < entry->nChildren; idx++) { + if (kSuccess != DTGetProperty(child, "name", (void **) &nameProp, &propLen)) { + panic("no name"); + } + childRegEntry = regEntry ? regEntry->childFromPath(nameProp, gIODTPlane) : NULL; // HIBPRINT("%s == %p\n", nameProp, childRegEntry); - child = MergeDeviceTree(child, childRegEntry); - } - return (child); + child = MergeDeviceTree(child, childRegEntry); + } + return child; } IOReturn IOHibernateSystemWake(void) { - if (kFSOpened == gFSState) - { - IOPolledFilePollersClose(gIOHibernateVars.fileVars, kIOPolledPostflightState); - IOHibernateDone(&gIOHibernateVars); - } - else - { - IOService::getPMRootDomain()->removeProperty(kIOHibernateOptionsKey); - IOService::getPMRootDomain()->removeProperty(kIOHibernateGfxStatusKey); - } - return (kIOReturnSuccess); + if (kFSOpened == gFSState) { + IOPolledFilePollersClose(gIOHibernateVars.fileVars, kIOPolledPostflightState); + IOHibernateDone(&gIOHibernateVars); + } else { + IOService::getPMRootDomain()->removeProperty(kIOHibernateOptionsKey); + IOService::getPMRootDomain()->removeProperty(kIOHibernateGfxStatusKey); + } + return kIOReturnSuccess; } static IOReturn IOHibernateDone(IOHibernateVars * vars) { - IOReturn err; - OSData * data; - - hibernate_teardown(vars->page_list, vars->page_list_wired, vars->page_list_pal); - - if (vars->videoMapping) - { - if (vars->videoMapSize) - // remove mappings - IOUnmapPages(kernel_map, vars->videoMapping, vars->videoMapSize); - if (vars->videoAllocSize) - // dealloc range - kmem_free(kernel_map, trunc_page(vars->videoMapping), vars->videoAllocSize); - } - - if (vars->previewBuffer) - { - vars->previewBuffer->release(); - vars->previewBuffer = 0; - } - - if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) - { - IOService::getPMRootDomain()->setProperty(kIOHibernateOptionsKey, - gIOHibernateCurrentHeader->options, 32); - } - else - { - IOService::getPMRootDomain()->removeProperty(kIOHibernateOptionsKey); - } - - if ((kIOHibernateStateWakingFromHibernate == gIOHibernateState) - && (kIOHibernateGfxStatusUnknown != gIOHibernateGraphicsInfo->gfxStatus)) - { - IOService::getPMRootDomain()->setProperty(kIOHibernateGfxStatusKey, - &gIOHibernateGraphicsInfo->gfxStatus, - sizeof(gIOHibernateGraphicsInfo->gfxStatus)); - } - else - { - IOService::getPMRootDomain()->removeProperty(kIOHibernateGfxStatusKey); - } - - // invalidate nvram properties - (gIOOptionsEntry != 0) => nvram was touched + IOReturn err; + OSData * data; + + hibernate_teardown(vars->page_list, vars->page_list_wired, vars->page_list_pal); + + if (vars->videoMapping) { + if (vars->videoMapSize) { + // remove mappings + IOUnmapPages(kernel_map, vars->videoMapping, vars->videoMapSize); + } + if (vars->videoAllocSize) { + // dealloc range + kmem_free(kernel_map, trunc_page(vars->videoMapping), vars->videoAllocSize); + } + } + + if (vars->previewBuffer) { + vars->previewBuffer->release(); + vars->previewBuffer = 0; + } + + if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { + IOService::getPMRootDomain()->setProperty(kIOHibernateOptionsKey, + gIOHibernateCurrentHeader->options, 32); + } else { + IOService::getPMRootDomain()->removeProperty(kIOHibernateOptionsKey); + } + + if ((kIOHibernateStateWakingFromHibernate == gIOHibernateState) + && (kIOHibernateGfxStatusUnknown != gIOHibernateGraphicsInfo->gfxStatus)) { + IOService::getPMRootDomain()->setProperty(kIOHibernateGfxStatusKey, + &gIOHibernateGraphicsInfo->gfxStatus, + sizeof(gIOHibernateGraphicsInfo->gfxStatus)); + } else { + IOService::getPMRootDomain()->removeProperty(kIOHibernateGfxStatusKey); + } + + // invalidate nvram properties - (gIOOptionsEntry != 0) => nvram was touched #if defined(__i386__) || defined(__x86_64__) IOService::getPMRootDomain()->removeProperty(gIOHibernateRTCVariablesKey); @@ -1189,270 +1178,263 @@ IOHibernateDone(IOHibernateVars * vars) * is not backed by coin cell. Remove Hibernate data from NVRAM. */ if (gIOOptionsEntry) { - - if (gIOHibernateRTCVariablesKey) { - if (gIOOptionsEntry->getProperty(gIOHibernateRTCVariablesKey)) { - gIOOptionsEntry->removeProperty(gIOHibernateRTCVariablesKey); + if (gIOHibernateRTCVariablesKey) { + if (gIOOptionsEntry->getProperty(gIOHibernateRTCVariablesKey)) { + gIOOptionsEntry->removeProperty(gIOHibernateRTCVariablesKey); + } } - } - if (gIOHibernateBootNextKey) - { - if (gIOHibernateBootNextSave) - { - gIOOptionsEntry->setProperty(gIOHibernateBootNextKey, gIOHibernateBootNextSave); - gIOHibernateBootNextSave->release(); - gIOHibernateBootNextSave = NULL; + if (gIOHibernateBootNextKey) { + if (gIOHibernateBootNextSave) { + gIOOptionsEntry->setProperty(gIOHibernateBootNextKey, gIOHibernateBootNextSave); + gIOHibernateBootNextSave->release(); + gIOHibernateBootNextSave = NULL; + } else { + gIOOptionsEntry->removeProperty(gIOHibernateBootNextKey); + } + } + if (kIOHibernateStateWakingFromHibernate != gIOHibernateState) { + gIOOptionsEntry->sync(); } - else - gIOOptionsEntry->removeProperty(gIOHibernateBootNextKey); - } - if (kIOHibernateStateWakingFromHibernate != gIOHibernateState) gIOOptionsEntry->sync(); } #endif - if (vars->srcBuffer) vars->srcBuffer->release(); - bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0])); - if (vars->handoffBuffer) - { - if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) - { - IOHibernateHandoff * handoff; - bool done = false; - for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy(); - !done; - handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount]) - { - HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount); - uint8_t * data = &handoff->data[0]; - switch (handoff->type) - { - case kIOHibernateHandoffTypeEnd: - done = true; - break; - - case kIOHibernateHandoffTypeDeviceTree: - MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot()); - break; - - case kIOHibernateHandoffTypeKeyStore: + if (vars->srcBuffer) { + vars->srcBuffer->release(); + } + bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0])); + if (vars->handoffBuffer) { + if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { + IOHibernateHandoff * handoff; + bool done = false; + for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy(); + !done; + handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount]) { + HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount); + uint8_t * data = &handoff->data[0]; + switch (handoff->type) { + case kIOHibernateHandoffTypeEnd: + done = true; + break; + + case kIOHibernateHandoffTypeDeviceTree: + MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot()); + break; + + case kIOHibernateHandoffTypeKeyStore: #if defined(__i386__) || defined(__x86_64__) - { - IOBufferMemoryDescriptor * - md = IOBufferMemoryDescriptor::withBytes(data, handoff->bytecount, kIODirectionOutIn); - if (md) - { - IOSetKeyStoreData(md); - } - } + { + IOBufferMemoryDescriptor * + md = IOBufferMemoryDescriptor::withBytes(data, handoff->bytecount, kIODirectionOutIn); + if (md) { + IOSetKeyStoreData(md); + } + } #endif - break; + break; - default: - done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000)); - break; - } - } + default: + done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000)); + break; + } + } #if defined(__i386__) || defined(__x86_64__) - if (vars->volumeCryptKeySize) - { - IOBufferMemoryDescriptor * - bmd = IOBufferMemoryDescriptor::withBytes(&vars->volumeCryptKey[0], - vars->volumeCryptKeySize, kIODirectionOutIn); - if (!bmd) panic("IOBufferMemoryDescriptor"); - IOSetAPFSKeyStoreData(bmd); - bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); - } + if (vars->volumeCryptKeySize) { + IOBufferMemoryDescriptor * + bmd = IOBufferMemoryDescriptor::withBytes(&vars->volumeCryptKey[0], + vars->volumeCryptKeySize, kIODirectionOutIn); + if (!bmd) { + panic("IOBufferMemoryDescriptor"); + } + IOSetAPFSKeyStoreData(bmd); + bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); + } #endif - + } + vars->handoffBuffer->release(); } - vars->handoffBuffer->release(); - } - if (gIOChosenEntry - && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOBridgeBootSessionUUIDKey))) - && (sizeof(gIOHibernateBridgeBootSessionUUIDString) <= data->getLength())) - { - bcopy(data->getBytesNoCopy(), &gIOHibernateBridgeBootSessionUUIDString[0], - sizeof(gIOHibernateBridgeBootSessionUUIDString)); - } + if (gIOChosenEntry + && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOBridgeBootSessionUUIDKey))) + && (sizeof(gIOHibernateBridgeBootSessionUUIDString) <= data->getLength())) { + bcopy(data->getBytesNoCopy(), &gIOHibernateBridgeBootSessionUUIDString[0], + sizeof(gIOHibernateBridgeBootSessionUUIDString)); + } - if (vars->hwEncrypt) - { - err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, NULL, 0); - HIBLOG("IOPolledFilePollersSetEncryptionKey(0,%x)\n", err); - } + if (vars->hwEncrypt) { + err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, NULL, 0); + HIBLOG("IOPolledFilePollersSetEncryptionKey(0,%x)\n", err); + } - bzero(vars, sizeof(*vars)); + bzero(vars, sizeof(*vars)); // gIOHibernateState = kIOHibernateStateInactive; // leave it for post wake code to see - return (kIOReturnSuccess); + return kIOReturnSuccess; } static void IOHibernateSystemPostWakeTrim(void * p1, void * p2) { - // invalidate & close the image file - if (p1) IOLockLock(gFSLock); - if (kFSTrimDelay == gFSState) - { - IOPolledFileIOVars * vars = &gFileVars; - IOPolledFileClose(&vars, + // invalidate & close the image file + if (p1) { + IOLockLock(gFSLock); + } + if (kFSTrimDelay == gFSState) { + IOPolledFileIOVars * vars = &gFileVars; + IOPolledFileClose(&vars, #if DISABLE_TRIM - 0, NULL, 0, 0, 0); + 0, NULL, 0, 0, 0); #else - 0, (caddr_t)gIOHibernateCurrentHeader, sizeof(IOHibernateImageHeader), - sizeof(IOHibernateImageHeader), gIOHibernateCurrentHeader->imageSize); + 0, (caddr_t)gIOHibernateCurrentHeader, sizeof(IOHibernateImageHeader), + sizeof(IOHibernateImageHeader), gIOHibernateCurrentHeader->imageSize); #endif - gFSState = kFSIdle; - } - if (p1) IOLockUnlock(gFSLock); + gFSState = kFSIdle; + } + if (p1) { + IOLockUnlock(gFSLock); + } } IOReturn IOHibernateSystemPostWake(bool now) { - gIOHibernateCurrentHeader->signature = kIOHibernateHeaderInvalidSignature; - IOSetBootImageNVRAM(0); - - IOLockLock(gFSLock); - if (kFSTrimDelay == gFSState) - { - thread_call_cancel(gIOHibernateTrimCalloutEntry); - IOHibernateSystemPostWakeTrim(NULL, NULL); - } - else if (kFSOpened != gFSState) gFSState = kFSIdle; - else - { - gFSState = kFSTrimDelay; - if (now) - { - thread_call_cancel(gIOHibernateTrimCalloutEntry); - IOHibernateSystemPostWakeTrim(NULL, NULL); - } - else - { - AbsoluteTime deadline; - clock_interval_to_deadline(TRIM_DELAY, kMillisecondScale, &deadline ); - thread_call_enter1_delayed(gIOHibernateTrimCalloutEntry, NULL, deadline); - } - } - IOLockUnlock(gFSLock); - - return (kIOReturnSuccess); + gIOHibernateCurrentHeader->signature = kIOHibernateHeaderInvalidSignature; + IOSetBootImageNVRAM(0); + + IOLockLock(gFSLock); + if (kFSTrimDelay == gFSState) { + thread_call_cancel(gIOHibernateTrimCalloutEntry); + IOHibernateSystemPostWakeTrim(NULL, NULL); + } else if (kFSOpened != gFSState) { + gFSState = kFSIdle; + } else { + gFSState = kFSTrimDelay; + if (now) { + thread_call_cancel(gIOHibernateTrimCalloutEntry); + IOHibernateSystemPostWakeTrim(NULL, NULL); + } else { + AbsoluteTime deadline; + clock_interval_to_deadline(TRIM_DELAY, kMillisecondScale, &deadline ); + thread_call_enter1_delayed(gIOHibernateTrimCalloutEntry, NULL, deadline); + } + } + IOLockUnlock(gFSLock); + + return kIOReturnSuccess; } -uint32_t IOHibernateWasScreenLocked(void) +uint32_t +IOHibernateWasScreenLocked(void) { - uint32_t ret = 0; - if ((kIOHibernateStateWakingFromHibernate == gIOHibernateState) && gIOChosenEntry) - { - OSData * - data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOScreenLockStateKey)); - if (data) - { - ret = ((uint32_t *)data->getBytesNoCopy())[0]; - gIOChosenEntry->setProperty(kIOBooterScreenLockStateKey, data); - } - } - else gIOChosenEntry->removeProperty(kIOBooterScreenLockStateKey); - - return (ret); + uint32_t ret = 0; + if ((kIOHibernateStateWakingFromHibernate == gIOHibernateState) && gIOChosenEntry) { + OSData * + data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOScreenLockStateKey)); + if (data) { + ret = ((uint32_t *)data->getBytesNoCopy())[0]; + gIOChosenEntry->setProperty(kIOBooterScreenLockStateKey, data); + } + } else { + gIOChosenEntry->removeProperty(kIOBooterScreenLockStateKey); + } + + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ SYSCTL_STRING(_kern, OID_AUTO, hibernatefile, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - gIOHibernateFilename, sizeof(gIOHibernateFilename), ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + gIOHibernateFilename, sizeof(gIOHibernateFilename), ""); SYSCTL_STRING(_kern, OID_AUTO, bootsignature, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - gIOHibernateBootSignature, sizeof(gIOHibernateBootSignature), ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + gIOHibernateBootSignature, sizeof(gIOHibernateBootSignature), ""); SYSCTL_UINT(_kern, OID_AUTO, hibernatemode, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOHibernateMode, 0, ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOHibernateMode, 0, ""); SYSCTL_STRUCT(_kern, OID_AUTO, hibernatestatistics, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - &_hibernateStats, hibernate_statistics_t, ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + &_hibernateStats, hibernate_statistics_t, ""); SYSCTL_STRING(_kern_bridge, OID_AUTO, bootsessionuuid, - CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - gIOHibernateBridgeBootSessionUUIDString, sizeof(gIOHibernateBridgeBootSessionUUIDString), ""); + CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + gIOHibernateBridgeBootSessionUUIDString, sizeof(gIOHibernateBridgeBootSessionUUIDString), ""); SYSCTL_UINT(_kern, OID_AUTO, hibernategraphicsready, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, - &_hibernateStats.graphicsReadyTime, 0, ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, + &_hibernateStats.graphicsReadyTime, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, hibernatewakenotification, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, - &_hibernateStats.wakeNotificationTime, 0, ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, + &_hibernateStats.wakeNotificationTime, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, hibernatelockscreenready, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, - &_hibernateStats.lockScreenReadyTime, 0, ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, + &_hibernateStats.lockScreenReadyTime, 0, ""); SYSCTL_UINT(_kern, OID_AUTO, hibernatehidready, - CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, - &_hibernateStats.hidReadyTime, 0, ""); + CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_ANYBODY, + &_hibernateStats.hidReadyTime, 0, ""); void IOHibernateSystemInit(IOPMrootDomain * rootDomain) { - gIOHibernateBootImageKey = OSSymbol::withCStringNoCopy(kIOHibernateBootImageKey); - gIOHibernateBootSignatureKey = OSSymbol::withCStringNoCopy(kIOHibernateBootSignatureKey); - gIOBridgeBootSessionUUIDKey = OSSymbol::withCStringNoCopy(kIOBridgeBootSessionUUIDKey); + gIOHibernateBootImageKey = OSSymbol::withCStringNoCopy(kIOHibernateBootImageKey); + gIOHibernateBootSignatureKey = OSSymbol::withCStringNoCopy(kIOHibernateBootSignatureKey); + gIOBridgeBootSessionUUIDKey = OSSymbol::withCStringNoCopy(kIOBridgeBootSessionUUIDKey); #if defined(__i386__) || defined(__x86_64__) - gIOHibernateRTCVariablesKey = OSSymbol::withCStringNoCopy(kIOHibernateRTCVariablesKey); - gIOHibernateBoot0082Key = OSSymbol::withCString("8BE4DF61-93CA-11D2-AA0D-00E098032B8C:Boot0082"); - gIOHibernateBootNextKey = OSSymbol::withCString("8BE4DF61-93CA-11D2-AA0D-00E098032B8C:BootNext"); - gIOHibernateRTCVariablesKey = OSSymbol::withCStringNoCopy(kIOHibernateRTCVariablesKey); + gIOHibernateRTCVariablesKey = OSSymbol::withCStringNoCopy(kIOHibernateRTCVariablesKey); + gIOHibernateBoot0082Key = OSSymbol::withCString("8BE4DF61-93CA-11D2-AA0D-00E098032B8C:Boot0082"); + gIOHibernateBootNextKey = OSSymbol::withCString("8BE4DF61-93CA-11D2-AA0D-00E098032B8C:BootNext"); + gIOHibernateRTCVariablesKey = OSSymbol::withCStringNoCopy(kIOHibernateRTCVariablesKey); #endif /* defined(__i386__) || defined(__x86_64__) */ - OSData * data = OSData::withBytesNoCopy(&gIOHibernateState, sizeof(gIOHibernateState)); - if (data) - { - rootDomain->setProperty(kIOHibernateStateKey, data); - data->release(); - } - - if (PE_parse_boot_argn("hfile", gIOHibernateFilename, sizeof(gIOHibernateFilename))) - gIOHibernateMode = kIOHibernateModeOn; - else - gIOHibernateFilename[0] = 0; - - sysctl_register_oid(&sysctl__kern_hibernatefile); - sysctl_register_oid(&sysctl__kern_bootsignature); - sysctl_register_oid(&sysctl__kern_hibernatemode); - sysctl_register_oid(&sysctl__kern_hibernatestatistics); - sysctl_register_oid(&sysctl__kern_hibernategraphicsready); - sysctl_register_oid(&sysctl__kern_hibernatewakenotification); - sysctl_register_oid(&sysctl__kern_hibernatelockscreenready); - sysctl_register_oid(&sysctl__kern_hibernatehidready); - - gIOChosenEntry = IORegistryEntry::fromPath("/chosen", gIODTPlane); - - if (gIOChosenEntry - && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOBridgeBootSessionUUIDKey))) - && (sizeof(gIOHibernateBridgeBootSessionUUIDString) <= data->getLength())) - { - sysctl_register_oid(&sysctl__kern_bridge_bootsessionuuid); - bcopy(data->getBytesNoCopy(), &gIOHibernateBridgeBootSessionUUIDString[0], sizeof(gIOHibernateBridgeBootSessionUUIDString)); - } - - gFSLock = IOLockAlloc(); + OSData * data = OSData::withBytesNoCopy(&gIOHibernateState, sizeof(gIOHibernateState)); + if (data) { + rootDomain->setProperty(kIOHibernateStateKey, data); + data->release(); + } + + if (PE_parse_boot_argn("hfile", gIOHibernateFilename, sizeof(gIOHibernateFilename))) { + gIOHibernateMode = kIOHibernateModeOn; + } else { + gIOHibernateFilename[0] = 0; + } + + sysctl_register_oid(&sysctl__kern_hibernatefile); + sysctl_register_oid(&sysctl__kern_bootsignature); + sysctl_register_oid(&sysctl__kern_hibernatemode); + sysctl_register_oid(&sysctl__kern_hibernatestatistics); + sysctl_register_oid(&sysctl__kern_hibernategraphicsready); + sysctl_register_oid(&sysctl__kern_hibernatewakenotification); + sysctl_register_oid(&sysctl__kern_hibernatelockscreenready); + sysctl_register_oid(&sysctl__kern_hibernatehidready); + + gIOChosenEntry = IORegistryEntry::fromPath("/chosen", gIODTPlane); + + if (gIOChosenEntry + && (data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(gIOBridgeBootSessionUUIDKey))) + && (sizeof(gIOHibernateBridgeBootSessionUUIDString) <= data->getLength())) { + sysctl_register_oid(&sysctl__kern_bridge_bootsessionuuid); + bcopy(data->getBytesNoCopy(), &gIOHibernateBridgeBootSessionUUIDString[0], sizeof(gIOHibernateBridgeBootSessionUUIDString)); + } + + gFSLock = IOLockAlloc(); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static IOReturn IOHibernatePolledFileWrite(IOPolledFileIOVars * vars, - const uint8_t * bytes, IOByteCount size, - IOPolledFileCryptVars * cryptvars) + const uint8_t * bytes, IOByteCount size, + IOPolledFileCryptVars * cryptvars) { - IOReturn err; + IOReturn err; - err = IOPolledFileWrite(vars, bytes, size, cryptvars); - if ((kIOReturnSuccess == err) && hibernate_should_abort()) err = kIOReturnAborted; + err = IOPolledFileWrite(vars, bytes, size, cryptvars); + if ((kIOReturnSuccess == err) && hibernate_should_abort()) { + err = kIOReturnAborted; + } - return (err); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1460,686 +1442,658 @@ IOHibernatePolledFileWrite(IOPolledFileIOVars * vars, extern "C" uint32_t hibernate_write_image(void) { - IOHibernateImageHeader * header = gIOHibernateCurrentHeader; - IOHibernateVars * vars = &gIOHibernateVars; - IOPolledFileExtent * fileExtents; - - _static_assert_1_arg(sizeof(IOHibernateImageHeader) == 512); - - uint32_t pageCount, pagesDone; - IOReturn err; - vm_offset_t ppnum, page; - IOItemCount count; - uint8_t * src; - uint8_t * data; - uint8_t * compressed; - uint8_t * scratch; - IOByteCount pageCompressedSize; - uint64_t compressedSize, uncompressedSize; - uint64_t image1Size = 0; - uint32_t bitmap_size; - bool iterDone, pollerOpen, needEncrypt; - uint32_t restore1Sum, sum, sum1, sum2; - int wkresult; - uint32_t tag; - uint32_t pageType; - uint32_t pageAndCount[2]; - addr64_t phys64; - IOByteCount segLen; - uintptr_t hibernateBase; - uintptr_t hibernateEnd; - - AbsoluteTime startTime, endTime; - AbsoluteTime allTime, compTime; - uint64_t compBytes; - uint64_t nsec; - uint32_t lastProgressStamp = 0; - uint32_t progressStamp; - uint32_t blob, lastBlob = (uint32_t) -1L; - - uint32_t wiredPagesEncrypted; - uint32_t dirtyPagesEncrypted; - uint32_t wiredPagesClear; - uint32_t svPageCount; - uint32_t zvPageCount; - - IOPolledFileCryptVars _cryptvars; - IOPolledFileCryptVars * cryptvars = 0; - - wiredPagesEncrypted = 0; - dirtyPagesEncrypted = 0; - wiredPagesClear = 0; - svPageCount = 0; - zvPageCount = 0; - - if (!vars->fileVars - || !vars->fileVars->pollers - || !(kIOHibernateModeOn & gIOHibernateMode)) return (kIOHibernatePostWriteSleep); - - if (kIOHibernateModeSleep & gIOHibernateMode) - kdebug_enable = save_kdebug_enable; - - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 1) | DBG_FUNC_START); - IOService::getPMRootDomain()->tracePoint(kIOPMTracePointHibernate); - - restore1Sum = sum1 = sum2 = 0; + IOHibernateImageHeader * header = gIOHibernateCurrentHeader; + IOHibernateVars * vars = &gIOHibernateVars; + IOPolledFileExtent * fileExtents; + + _static_assert_1_arg(sizeof(IOHibernateImageHeader) == 512); + + uint32_t pageCount, pagesDone; + IOReturn err; + vm_offset_t ppnum, page; + IOItemCount count; + uint8_t * src; + uint8_t * data; + uint8_t * compressed; + uint8_t * scratch; + IOByteCount pageCompressedSize; + uint64_t compressedSize, uncompressedSize; + uint64_t image1Size = 0; + uint32_t bitmap_size; + bool iterDone, pollerOpen, needEncrypt; + uint32_t restore1Sum, sum, sum1, sum2; + int wkresult; + uint32_t tag; + uint32_t pageType; + uint32_t pageAndCount[2]; + addr64_t phys64; + IOByteCount segLen; + uintptr_t hibernateBase; + uintptr_t hibernateEnd; + + AbsoluteTime startTime, endTime; + AbsoluteTime allTime, compTime; + uint64_t compBytes; + uint64_t nsec; + uint32_t lastProgressStamp = 0; + uint32_t progressStamp; + uint32_t blob, lastBlob = (uint32_t) -1L; + + uint32_t wiredPagesEncrypted; + uint32_t dirtyPagesEncrypted; + uint32_t wiredPagesClear; + uint32_t svPageCount; + uint32_t zvPageCount; + + IOPolledFileCryptVars _cryptvars; + IOPolledFileCryptVars * cryptvars = 0; + + wiredPagesEncrypted = 0; + dirtyPagesEncrypted = 0; + wiredPagesClear = 0; + svPageCount = 0; + zvPageCount = 0; + + if (!vars->fileVars + || !vars->fileVars->pollers + || !(kIOHibernateModeOn & gIOHibernateMode)) { + return kIOHibernatePostWriteSleep; + } + + if (kIOHibernateModeSleep & gIOHibernateMode) { + kdebug_enable = save_kdebug_enable; + } + + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 1) | DBG_FUNC_START); + IOService::getPMRootDomain()->tracePoint(kIOPMTracePointHibernate); + + restore1Sum = sum1 = sum2 = 0; #if CRYPTO - // encryption data. "iv" is the "initial vector". - if (kIOHibernateModeEncrypt & gIOHibernateMode) - { - static const unsigned char first_iv[AES_BLOCK_SIZE] - = { 0xa3, 0x63, 0x65, 0xa9, 0x0b, 0x71, 0x7b, 0x1c, - 0xdf, 0x9e, 0x5f, 0x32, 0xd7, 0x61, 0x63, 0xda }; - - cryptvars = &gIOHibernateCryptWakeContext; - bzero(cryptvars, sizeof(IOPolledFileCryptVars)); - aes_encrypt_key(vars->cryptKey, - kIOHibernateAESKeySize, - &cryptvars->ctx.encrypt); - aes_decrypt_key(vars->cryptKey, - kIOHibernateAESKeySize, - &cryptvars->ctx.decrypt); - - cryptvars = &_cryptvars; - bzero(cryptvars, sizeof(IOPolledFileCryptVars)); - for (pageCount = 0; pageCount < sizeof(vars->wiredCryptKey); pageCount++) - vars->wiredCryptKey[pageCount] ^= vars->volumeCryptKey[pageCount]; - aes_encrypt_key(vars->wiredCryptKey, - kIOHibernateAESKeySize, - &cryptvars->ctx.encrypt); - - bcopy(&first_iv[0], &cryptvars->aes_iv[0], AES_BLOCK_SIZE); - bzero(&vars->wiredCryptKey[0], sizeof(vars->wiredCryptKey)); - bzero(&vars->cryptKey[0], sizeof(vars->cryptKey)); - } + // encryption data. "iv" is the "initial vector". + if (kIOHibernateModeEncrypt & gIOHibernateMode) { + static const unsigned char first_iv[AES_BLOCK_SIZE] + = { 0xa3, 0x63, 0x65, 0xa9, 0x0b, 0x71, 0x7b, 0x1c, + 0xdf, 0x9e, 0x5f, 0x32, 0xd7, 0x61, 0x63, 0xda }; + + cryptvars = &gIOHibernateCryptWakeContext; + bzero(cryptvars, sizeof(IOPolledFileCryptVars)); + aes_encrypt_key(vars->cryptKey, + kIOHibernateAESKeySize, + &cryptvars->ctx.encrypt); + aes_decrypt_key(vars->cryptKey, + kIOHibernateAESKeySize, + &cryptvars->ctx.decrypt); + + cryptvars = &_cryptvars; + bzero(cryptvars, sizeof(IOPolledFileCryptVars)); + for (pageCount = 0; pageCount < sizeof(vars->wiredCryptKey); pageCount++) { + vars->wiredCryptKey[pageCount] ^= vars->volumeCryptKey[pageCount]; + } + aes_encrypt_key(vars->wiredCryptKey, + kIOHibernateAESKeySize, + &cryptvars->ctx.encrypt); + + bcopy(&first_iv[0], &cryptvars->aes_iv[0], AES_BLOCK_SIZE); + bzero(&vars->wiredCryptKey[0], sizeof(vars->wiredCryptKey)); + bzero(&vars->cryptKey[0], sizeof(vars->cryptKey)); + } #endif /* CRYPTO */ - hibernate_page_list_setall(vars->page_list, - vars->page_list_wired, - vars->page_list_pal, - false /* !preflight */, - /* discard_all */ - ((0 == (kIOHibernateModeSleep & gIOHibernateMode)) - && (0 != ((kIOHibernateModeDiscardCleanActive | kIOHibernateModeDiscardCleanInactive) & gIOHibernateMode))), - &pageCount); + hibernate_page_list_setall(vars->page_list, + vars->page_list_wired, + vars->page_list_pal, + false /* !preflight */, + /* discard_all */ + ((0 == (kIOHibernateModeSleep & gIOHibernateMode)) + && (0 != ((kIOHibernateModeDiscardCleanActive | kIOHibernateModeDiscardCleanInactive) & gIOHibernateMode))), + &pageCount); - HIBLOG("hibernate_page_list_setall found pageCount %d\n", pageCount); + HIBLOG("hibernate_page_list_setall found pageCount %d\n", pageCount); - fileExtents = (IOPolledFileExtent *) vars->fileVars->fileExtents->getBytesNoCopy(); + fileExtents = (IOPolledFileExtent *) vars->fileVars->fileExtents->getBytesNoCopy(); #if 0 - count = vars->fileExtents->getLength() / sizeof(IOPolledFileExtent); - for (page = 0; page < count; page++) - { - HIBLOG("fileExtents[%d] %qx, %qx (%qx)\n", page, - fileExtents[page].start, fileExtents[page].length, - fileExtents[page].start + fileExtents[page].length); - } + count = vars->fileExtents->getLength() / sizeof(IOPolledFileExtent); + for (page = 0; page < count; page++) { + HIBLOG("fileExtents[%d] %qx, %qx (%qx)\n", page, + fileExtents[page].start, fileExtents[page].length, + fileExtents[page].start + fileExtents[page].length); + } #endif - needEncrypt = (0 != (kIOHibernateModeEncrypt & gIOHibernateMode)); - AbsoluteTime_to_scalar(&compTime) = 0; - compBytes = 0; - - clock_get_uptime(&allTime); - IOService::getPMRootDomain()->pmStatsRecordEvent( - kIOPMStatsHibernateImageWrite | kIOPMStatsEventStartFlag, allTime); - do - { - compressedSize = 0; - uncompressedSize = 0; - svPageCount = 0; - zvPageCount = 0; - - IOPolledFileSeek(vars->fileVars, vars->fileVars->blockSize); - - HIBLOG("IOHibernatePollerOpen, ml_get_interrupts_enabled %d\n", - ml_get_interrupts_enabled()); - err = IOPolledFilePollersOpen(vars->fileVars, kIOPolledBeforeSleepState, - // abortable if not low battery - !IOService::getPMRootDomain()->mustHibernate()); - HIBLOG("IOHibernatePollerOpen(%x)\n", err); - pollerOpen = (kIOReturnSuccess == err); - if (!pollerOpen) - break; - - if (vars->volumeCryptKeySize) - { - err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, &vars->volumeCryptKey[0], vars->volumeCryptKeySize); - HIBLOG("IOPolledFilePollersSetEncryptionKey(%x)\n", err); - vars->hwEncrypt = (kIOReturnSuccess == err); - bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); - if (vars->hwEncrypt) header->options |= kIOHibernateOptionHWEncrypt; - } - - // copy file block extent list if larger than header - - count = vars->fileVars->fileExtents->getLength(); - if (count > sizeof(header->fileExtentMap)) - { - count -= sizeof(header->fileExtentMap); - err = IOHibernatePolledFileWrite(vars->fileVars, - ((uint8_t *) &fileExtents[0]) + sizeof(header->fileExtentMap), count, cryptvars); - if (kIOReturnSuccess != err) - break; - } - - hibernateBase = HIB_BASE; /* Defined in PAL headers */ - hibernateEnd = (segHIBB + segSizeHIB); - - // copy out restore1 code - - for (count = 0; - (phys64 = vars->handoffBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) - { - for (pagesDone = 0; pagesDone < atop_32(segLen); pagesDone++) - { - gIOHibernateHandoffPages[atop_32(count) + pagesDone] = atop_64(phys64) + pagesDone; - } - } - - page = atop_32(kvtophys(hibernateBase)); - count = atop_32(round_page(hibernateEnd) - hibernateBase); - header->restore1CodePhysPage = page; - header->restore1CodeVirt = hibernateBase; - header->restore1PageCount = count; - header->restore1CodeOffset = ((uintptr_t) &hibernate_machine_entrypoint) - hibernateBase; - header->restore1StackOffset = ((uintptr_t) &gIOHibernateRestoreStackEnd[0]) - 64 - hibernateBase; - - if (uuid_parse(&gIOHibernateBridgeBootSessionUUIDString[0], &header->bridgeBootSessionUUID[0])) - { - bzero(&header->bridgeBootSessionUUID[0], sizeof(header->bridgeBootSessionUUID)); - } - - // sum __HIB seg, with zeros for the stack - src = (uint8_t *) trunc_page(hibernateBase); - for (page = 0; page < count; page++) - { - if ((src < &gIOHibernateRestoreStack[0]) || (src >= &gIOHibernateRestoreStackEnd[0])) - restore1Sum += hibernate_sum_page(src, header->restore1CodeVirt + page); - else - restore1Sum += 0x00000000; - src += page_size; - } - sum1 = restore1Sum; - - // write the __HIB seg, with zeros for the stack - - src = (uint8_t *) trunc_page(hibernateBase); - count = ((uintptr_t) &gIOHibernateRestoreStack[0]) - trunc_page(hibernateBase); - if (count) - { - err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); - if (kIOReturnSuccess != err) - break; - } - err = IOHibernatePolledFileWrite(vars->fileVars, - (uint8_t *) 0, - &gIOHibernateRestoreStackEnd[0] - &gIOHibernateRestoreStack[0], - cryptvars); - if (kIOReturnSuccess != err) - break; - src = &gIOHibernateRestoreStackEnd[0]; - count = round_page(hibernateEnd) - ((uintptr_t) src); - if (count) - { - err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); - if (kIOReturnSuccess != err) - break; - } - - if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) - { - vars->fileVars->encryptStart = (vars->fileVars->position & ~(AES_BLOCK_SIZE - 1)); - vars->fileVars->encryptEnd = UINT64_MAX; - HIBLOG("encryptStart %qx\n", vars->fileVars->encryptStart); - } - - // write the preview buffer - - if (vars->previewBuffer) - { - ppnum = 0; - count = 0; - do - { - phys64 = vars->previewBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone); - pageAndCount[0] = atop_64(phys64); - pageAndCount[1] = atop_32(segLen); - err = IOHibernatePolledFileWrite(vars->fileVars, - (const uint8_t *) &pageAndCount, sizeof(pageAndCount), - cryptvars); - if (kIOReturnSuccess != err) - break; - count += segLen; - ppnum += sizeof(pageAndCount); - } - while (phys64); - if (kIOReturnSuccess != err) - break; - - src = (uint8_t *) vars->previewBuffer->getPhysicalSegment(0, NULL, _kIOMemorySourceSegment); + needEncrypt = (0 != (kIOHibernateModeEncrypt & gIOHibernateMode)); + AbsoluteTime_to_scalar(&compTime) = 0; + compBytes = 0; + + clock_get_uptime(&allTime); + IOService::getPMRootDomain()->pmStatsRecordEvent( + kIOPMStatsHibernateImageWrite | kIOPMStatsEventStartFlag, allTime); + do{ + compressedSize = 0; + uncompressedSize = 0; + svPageCount = 0; + zvPageCount = 0; + + IOPolledFileSeek(vars->fileVars, vars->fileVars->blockSize); + + HIBLOG("IOHibernatePollerOpen, ml_get_interrupts_enabled %d\n", + ml_get_interrupts_enabled()); + err = IOPolledFilePollersOpen(vars->fileVars, kIOPolledBeforeSleepState, + // abortable if not low battery + !IOService::getPMRootDomain()->mustHibernate()); + HIBLOG("IOHibernatePollerOpen(%x)\n", err); + pollerOpen = (kIOReturnSuccess == err); + if (!pollerOpen) { + break; + } + + if (vars->volumeCryptKeySize) { + err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, &vars->volumeCryptKey[0], vars->volumeCryptKeySize); + HIBLOG("IOPolledFilePollersSetEncryptionKey(%x)\n", err); + vars->hwEncrypt = (kIOReturnSuccess == err); + bzero(&vars->volumeCryptKey[0], sizeof(vars->volumeCryptKey)); + if (vars->hwEncrypt) { + header->options |= kIOHibernateOptionHWEncrypt; + } + } + + // copy file block extent list if larger than header + + count = vars->fileVars->fileExtents->getLength(); + if (count > sizeof(header->fileExtentMap)) { + count -= sizeof(header->fileExtentMap); + err = IOHibernatePolledFileWrite(vars->fileVars, + ((uint8_t *) &fileExtents[0]) + sizeof(header->fileExtentMap), count, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + } + + hibernateBase = HIB_BASE; /* Defined in PAL headers */ + hibernateEnd = (segHIBB + segSizeHIB); + + // copy out restore1 code + + for (count = 0; + (phys64 = vars->handoffBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); + count += segLen) { + for (pagesDone = 0; pagesDone < atop_32(segLen); pagesDone++) { + gIOHibernateHandoffPages[atop_32(count) + pagesDone] = atop_64(phys64) + pagesDone; + } + } + + page = atop_32(kvtophys(hibernateBase)); + count = atop_32(round_page(hibernateEnd) - hibernateBase); + header->restore1CodePhysPage = page; + header->restore1CodeVirt = hibernateBase; + header->restore1PageCount = count; + header->restore1CodeOffset = ((uintptr_t) &hibernate_machine_entrypoint) - hibernateBase; + header->restore1StackOffset = ((uintptr_t) &gIOHibernateRestoreStackEnd[0]) - 64 - hibernateBase; + + if (uuid_parse(&gIOHibernateBridgeBootSessionUUIDString[0], &header->bridgeBootSessionUUID[0])) { + bzero(&header->bridgeBootSessionUUID[0], sizeof(header->bridgeBootSessionUUID)); + } + + // sum __HIB seg, with zeros for the stack + src = (uint8_t *) trunc_page(hibernateBase); + for (page = 0; page < count; page++) { + if ((src < &gIOHibernateRestoreStack[0]) || (src >= &gIOHibernateRestoreStackEnd[0])) { + restore1Sum += hibernate_sum_page(src, header->restore1CodeVirt + page); + } else { + restore1Sum += 0x00000000; + } + src += page_size; + } + sum1 = restore1Sum; + + // write the __HIB seg, with zeros for the stack + + src = (uint8_t *) trunc_page(hibernateBase); + count = ((uintptr_t) &gIOHibernateRestoreStack[0]) - trunc_page(hibernateBase); + if (count) { + err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + } + err = IOHibernatePolledFileWrite(vars->fileVars, + (uint8_t *) 0, + &gIOHibernateRestoreStackEnd[0] - &gIOHibernateRestoreStack[0], + cryptvars); + if (kIOReturnSuccess != err) { + break; + } + src = &gIOHibernateRestoreStackEnd[0]; + count = round_page(hibernateEnd) - ((uintptr_t) src); + if (count) { + err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + } + + if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) { + vars->fileVars->encryptStart = (vars->fileVars->position & ~(AES_BLOCK_SIZE - 1)); + vars->fileVars->encryptEnd = UINT64_MAX; + HIBLOG("encryptStart %qx\n", vars->fileVars->encryptStart); + } + + // write the preview buffer + + if (vars->previewBuffer) { + ppnum = 0; + count = 0; + do{ + phys64 = vars->previewBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone); + pageAndCount[0] = atop_64(phys64); + pageAndCount[1] = atop_32(segLen); + err = IOHibernatePolledFileWrite(vars->fileVars, + (const uint8_t *) &pageAndCount, sizeof(pageAndCount), + cryptvars); + if (kIOReturnSuccess != err) { + break; + } + count += segLen; + ppnum += sizeof(pageAndCount); + }while (phys64); + if (kIOReturnSuccess != err) { + break; + } + + src = (uint8_t *) vars->previewBuffer->getPhysicalSegment(0, NULL, _kIOMemorySourceSegment); ((hibernate_preview_t *)src)->lockTime = gIOConsoleLockTime; - count = vars->previewBuffer->getLength(); - - header->previewPageListSize = ppnum; - header->previewSize = count + ppnum; - - for (page = 0; page < count; page += page_size) - { - phys64 = vars->previewBuffer->getPhysicalSegment(page, NULL, kIOMemoryMapperNone); - sum1 += hibernate_sum_page(src + page, atop_64(phys64)); - } - err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); - if (kIOReturnSuccess != err) - break; - } - - // mark areas for no save - IOMemoryDescriptor * ioBuffer; - ioBuffer = IOPolledFileGetIOBuffer(vars->fileVars); - for (count = 0; - (phys64 = ioBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) - { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } - - for (count = 0; - (phys64 = vars->srcBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) - { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } - - // copy out bitmap of pages available for trashing during restore - - bitmap_size = vars->page_list_wired->list_size; - src = (uint8_t *) vars->page_list_wired; - err = IOHibernatePolledFileWrite(vars->fileVars, src, bitmap_size, cryptvars); - if (kIOReturnSuccess != err) - break; - - // mark more areas for no save, but these are not available - // for trashing during restore - - hibernate_page_list_set_volatile(vars->page_list, vars->page_list_wired, &pageCount); - - - page = atop_32(KERNEL_IMAGE_TO_PHYS(hibernateBase)); - count = atop_32(round_page(KERNEL_IMAGE_TO_PHYS(hibernateEnd))) - page; - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - page, count, - kIOHibernatePageStateFree); - pageCount -= count; - - if (vars->previewBuffer) for (count = 0; - (phys64 = vars->previewBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) - { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } - - for (count = 0; - (phys64 = vars->handoffBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); - count += segLen) - { - hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(phys64), atop_32(segLen), - kIOHibernatePageStateFree); - pageCount -= atop_32(segLen); - } + count = vars->previewBuffer->getLength(); + + header->previewPageListSize = ppnum; + header->previewSize = count + ppnum; + + for (page = 0; page < count; page += page_size) { + phys64 = vars->previewBuffer->getPhysicalSegment(page, NULL, kIOMemoryMapperNone); + sum1 += hibernate_sum_page(src + page, atop_64(phys64)); + } + err = IOHibernatePolledFileWrite(vars->fileVars, src, count, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + } + + // mark areas for no save + IOMemoryDescriptor * ioBuffer; + ioBuffer = IOPolledFileGetIOBuffer(vars->fileVars); + for (count = 0; + (phys64 = ioBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); + count += segLen) { + hibernate_set_page_state(vars->page_list, vars->page_list_wired, + atop_64(phys64), atop_32(segLen), + kIOHibernatePageStateFree); + pageCount -= atop_32(segLen); + } + + for (count = 0; + (phys64 = vars->srcBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); + count += segLen) { + hibernate_set_page_state(vars->page_list, vars->page_list_wired, + atop_64(phys64), atop_32(segLen), + kIOHibernatePageStateFree); + pageCount -= atop_32(segLen); + } + + // copy out bitmap of pages available for trashing during restore + + bitmap_size = vars->page_list_wired->list_size; + src = (uint8_t *) vars->page_list_wired; + err = IOHibernatePolledFileWrite(vars->fileVars, src, bitmap_size, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + + // mark more areas for no save, but these are not available + // for trashing during restore + + hibernate_page_list_set_volatile(vars->page_list, vars->page_list_wired, &pageCount); + + + page = atop_32(KERNEL_IMAGE_TO_PHYS(hibernateBase)); + count = atop_32(round_page(KERNEL_IMAGE_TO_PHYS(hibernateEnd))) - page; + hibernate_set_page_state(vars->page_list, vars->page_list_wired, + page, count, + kIOHibernatePageStateFree); + pageCount -= count; + + if (vars->previewBuffer) { + for (count = 0; + (phys64 = vars->previewBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); + count += segLen) { + hibernate_set_page_state(vars->page_list, vars->page_list_wired, + atop_64(phys64), atop_32(segLen), + kIOHibernatePageStateFree); + pageCount -= atop_32(segLen); + } + } + + for (count = 0; + (phys64 = vars->handoffBuffer->getPhysicalSegment(count, &segLen, kIOMemoryMapperNone)); + count += segLen) { + hibernate_set_page_state(vars->page_list, vars->page_list_wired, + atop_64(phys64), atop_32(segLen), + kIOHibernatePageStateFree); + pageCount -= atop_32(segLen); + } #if KASAN vm_size_t shadow_pages_free = atop_64(shadow_ptop) - atop_64(shadow_pnext); /* no need to save unused shadow pages */ hibernate_set_page_state(vars->page_list, vars->page_list_wired, - atop_64(shadow_pnext), - shadow_pages_free, - kIOHibernatePageStateFree); + atop_64(shadow_pnext), + shadow_pages_free, + kIOHibernatePageStateFree); #endif - src = (uint8_t *) vars->srcBuffer->getBytesNoCopy(); - compressed = src + page_size; - scratch = compressed + page_size; - - pagesDone = 0; - lastBlob = 0; - - HIBLOG("bitmap_size 0x%x, previewSize 0x%x, writing %d pages @ 0x%llx\n", - bitmap_size, header->previewSize, - pageCount, vars->fileVars->position); - - enum - // pageType - { - kWired = 0x02, - kEncrypt = 0x01, - kWiredEncrypt = kWired | kEncrypt, - kWiredClear = kWired, - kUnwiredEncrypt = kEncrypt - }; - - bool cpuAES = (0 != (CPUID_FEATURE_AES & cpuid_features())); - - for (pageType = kWiredEncrypt; pageType >= kUnwiredEncrypt; pageType--) - { - if (kUnwiredEncrypt == pageType) - { - // start unwired image - if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) - { - vars->fileVars->encryptStart = (vars->fileVars->position & ~(((uint64_t)AES_BLOCK_SIZE) - 1)); - vars->fileVars->encryptEnd = UINT64_MAX; - HIBLOG("encryptStart %qx\n", vars->fileVars->encryptStart); - } - bcopy(&cryptvars->aes_iv[0], - &gIOHibernateCryptWakeContext.aes_iv[0], - sizeof(cryptvars->aes_iv)); - cryptvars = &gIOHibernateCryptWakeContext; - } - for (iterDone = false, ppnum = 0; !iterDone; ) - { - if (cpuAES && (pageType == kWiredClear)) - { - count = 0; - } - else + src = (uint8_t *) vars->srcBuffer->getBytesNoCopy(); + compressed = src + page_size; + scratch = compressed + page_size; + + pagesDone = 0; + lastBlob = 0; + + HIBLOG("bitmap_size 0x%x, previewSize 0x%x, writing %d pages @ 0x%llx\n", + bitmap_size, header->previewSize, + pageCount, vars->fileVars->position); + + enum + // pageType { - count = hibernate_page_list_iterate((kWired & pageType) ? vars->page_list_wired : vars->page_list, - &ppnum); - } + kWired = 0x02, + kEncrypt = 0x01, + kWiredEncrypt = kWired | kEncrypt, + kWiredClear = kWired, + kUnwiredEncrypt = kEncrypt + }; + + bool cpuAES = (0 != (CPUID_FEATURE_AES & cpuid_features())); + + for (pageType = kWiredEncrypt; pageType >= kUnwiredEncrypt; pageType--) { + if (kUnwiredEncrypt == pageType) { + // start unwired image + if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) { + vars->fileVars->encryptStart = (vars->fileVars->position & ~(((uint64_t)AES_BLOCK_SIZE) - 1)); + vars->fileVars->encryptEnd = UINT64_MAX; + HIBLOG("encryptStart %qx\n", vars->fileVars->encryptStart); + } + bcopy(&cryptvars->aes_iv[0], + &gIOHibernateCryptWakeContext.aes_iv[0], + sizeof(cryptvars->aes_iv)); + cryptvars = &gIOHibernateCryptWakeContext; + } + for (iterDone = false, ppnum = 0; !iterDone;) { + if (cpuAES && (pageType == kWiredClear)) { + count = 0; + } else { + count = hibernate_page_list_iterate((kWired & pageType) ? vars->page_list_wired : vars->page_list, + &ppnum); + } // kprintf("[%d](%x : %x)\n", pageType, ppnum, count); - iterDone = !count; + iterDone = !count; + + if (!cpuAES) { + if (count && (kWired & pageType) && needEncrypt) { + uint32_t checkIndex; + for (checkIndex = 0; + (checkIndex < count) + && (((kEncrypt & pageType) == 0) == pmap_is_noencrypt(ppnum + checkIndex)); + checkIndex++) { + } + if (!checkIndex) { + ppnum++; + continue; + } + count = checkIndex; + } + } + + switch (pageType) { + case kWiredEncrypt: wiredPagesEncrypted += count; break; + case kWiredClear: wiredPagesClear += count; break; + case kUnwiredEncrypt: dirtyPagesEncrypted += count; break; + } + + if (iterDone && (kWiredEncrypt == pageType)) {/* not yet end of wired list */ + } else { + pageAndCount[0] = ppnum; + pageAndCount[1] = count; + err = IOHibernatePolledFileWrite(vars->fileVars, + (const uint8_t *) &pageAndCount, sizeof(pageAndCount), + cryptvars); + if (kIOReturnSuccess != err) { + break; + } + } + + for (page = ppnum; page < (ppnum + count); page++) { + err = IOMemoryDescriptorWriteFromPhysical(vars->srcBuffer, 0, ptoa_64(page), page_size); + if (err) { + HIBLOG("IOMemoryDescriptorWriteFromPhysical %d [%ld] %x\n", __LINE__, (long)page, err); + break; + } + + sum = hibernate_sum_page(src, page); + if (kWired & pageType) { + sum1 += sum; + } else { + sum2 += sum; + } + + clock_get_uptime(&startTime); + wkresult = WKdm_compress_new((const WK_word*) src, + (WK_word*) compressed, + (WK_word*) scratch, + page_size - 4); + + clock_get_uptime(&endTime); + ADD_ABSOLUTETIME(&compTime, &endTime); + SUB_ABSOLUTETIME(&compTime, &startTime); + + compBytes += page_size; + pageCompressedSize = (-1 == wkresult) ? page_size : wkresult; + + if (pageCompressedSize == 0) { + pageCompressedSize = 4; + data = src; + + if (*(uint32_t *)src) { + svPageCount++; + } else { + zvPageCount++; + } + } else { + if (pageCompressedSize != page_size) { + data = compressed; + } else { + data = src; + } + } + + tag = pageCompressedSize | kIOHibernateTagSignature; + err = IOHibernatePolledFileWrite(vars->fileVars, (const uint8_t *) &tag, sizeof(tag), cryptvars); + if (kIOReturnSuccess != err) { + break; + } + + err = IOHibernatePolledFileWrite(vars->fileVars, data, (pageCompressedSize + 3) & ~3, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + + compressedSize += pageCompressedSize; + uncompressedSize += page_size; + pagesDone++; + + if (vars->consoleMapping && (0 == (1023 & pagesDone))) { + blob = ((pagesDone * kIOHibernateProgressCount) / pageCount); + if (blob != lastBlob) { + ProgressUpdate(gIOHibernateGraphicsInfo, vars->consoleMapping, lastBlob, blob); + lastBlob = blob; + } + } + if (0 == (8191 & pagesDone)) { + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &allTime); + absolutetime_to_nanoseconds(endTime, &nsec); + progressStamp = nsec / 750000000ULL; + if (progressStamp != lastProgressStamp) { + lastProgressStamp = progressStamp; + HIBPRINT("pages %d (%d%%)\n", pagesDone, (100 * pagesDone) / pageCount); + } + } + } + if (kIOReturnSuccess != err) { + break; + } + ppnum = page; + } - if (!cpuAES) - { - if (count && (kWired & pageType) && needEncrypt) - { - uint32_t checkIndex; - for (checkIndex = 0; - (checkIndex < count) - && (((kEncrypt & pageType) == 0) == pmap_is_noencrypt(ppnum + checkIndex)); - checkIndex++) - {} - if (!checkIndex) - { - ppnum++; - continue; + if (kIOReturnSuccess != err) { + break; + } + + if ((kEncrypt & pageType) && vars->fileVars->encryptStart) { + vars->fileVars->encryptEnd = ((vars->fileVars->position + 511) & ~511ULL); + HIBLOG("encryptEnd %qx\n", vars->fileVars->encryptEnd); } - count = checkIndex; - } - } - - switch (pageType) - { - case kWiredEncrypt: wiredPagesEncrypted += count; break; - case kWiredClear: wiredPagesClear += count; break; - case kUnwiredEncrypt: dirtyPagesEncrypted += count; break; - } - - if (iterDone && (kWiredEncrypt == pageType)) {/* not yet end of wired list */} - else - { - pageAndCount[0] = ppnum; - pageAndCount[1] = count; - err = IOHibernatePolledFileWrite(vars->fileVars, - (const uint8_t *) &pageAndCount, sizeof(pageAndCount), - cryptvars); - if (kIOReturnSuccess != err) - break; - } - - for (page = ppnum; page < (ppnum + count); page++) - { - err = IOMemoryDescriptorWriteFromPhysical(vars->srcBuffer, 0, ptoa_64(page), page_size); - if (err) - { - HIBLOG("IOMemoryDescriptorWriteFromPhysical %d [%ld] %x\n", __LINE__, (long)page, err); - break; - } - - sum = hibernate_sum_page(src, page); - if (kWired & pageType) - sum1 += sum; - else - sum2 += sum; - - clock_get_uptime(&startTime); - wkresult = WKdm_compress_new((const WK_word*) src, - (WK_word*) compressed, - (WK_word*) scratch, - page_size - 4); - - clock_get_uptime(&endTime); - ADD_ABSOLUTETIME(&compTime, &endTime); - SUB_ABSOLUTETIME(&compTime, &startTime); - - compBytes += page_size; - pageCompressedSize = (-1 == wkresult) ? page_size : wkresult; - - if (pageCompressedSize == 0) - { - pageCompressedSize = 4; - data = src; - - if (*(uint32_t *)src) - svPageCount++; - else - zvPageCount++; - } - else - { - if (pageCompressedSize != page_size) - data = compressed; - else - data = src; - } - - tag = pageCompressedSize | kIOHibernateTagSignature; - err = IOHibernatePolledFileWrite(vars->fileVars, (const uint8_t *) &tag, sizeof(tag), cryptvars); - if (kIOReturnSuccess != err) - break; - - err = IOHibernatePolledFileWrite(vars->fileVars, data, (pageCompressedSize + 3) & ~3, cryptvars); - if (kIOReturnSuccess != err) - break; - - compressedSize += pageCompressedSize; - uncompressedSize += page_size; - pagesDone++; - - if (vars->consoleMapping && (0 == (1023 & pagesDone))) - { - blob = ((pagesDone * kIOHibernateProgressCount) / pageCount); - if (blob != lastBlob) - { - ProgressUpdate(gIOHibernateGraphicsInfo, vars->consoleMapping, lastBlob, blob); - lastBlob = blob; - } - } - if (0 == (8191 & pagesDone)) - { - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &allTime); - absolutetime_to_nanoseconds(endTime, &nsec); - progressStamp = nsec / 750000000ULL; - if (progressStamp != lastProgressStamp) - { - lastProgressStamp = progressStamp; - HIBPRINT("pages %d (%d%%)\n", pagesDone, (100 * pagesDone) / pageCount); - } - } - } - if (kIOReturnSuccess != err) - break; - ppnum = page; - } - - if (kIOReturnSuccess != err) - break; - - if ((kEncrypt & pageType) && vars->fileVars->encryptStart) - { - vars->fileVars->encryptEnd = ((vars->fileVars->position + 511) & ~511ULL); - HIBLOG("encryptEnd %qx\n", vars->fileVars->encryptEnd); - } - - if (kWiredEncrypt != pageType) - { - // end of image1/2 - fill to next block - err = IOHibernatePolledFileWrite(vars->fileVars, 0, 0, cryptvars); - if (kIOReturnSuccess != err) - break; - } - if (kWiredClear == pageType) - { - // enlarge wired image for test + + if (kWiredEncrypt != pageType) { + // end of image1/2 - fill to next block + err = IOHibernatePolledFileWrite(vars->fileVars, 0, 0, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + } + if (kWiredClear == pageType) { + // enlarge wired image for test // err = IOHibernatePolledFileWrite(vars->fileVars, 0, 0x60000000, cryptvars); - // end wired image - header->encryptStart = vars->fileVars->encryptStart; - header->encryptEnd = vars->fileVars->encryptEnd; - image1Size = vars->fileVars->position; - HIBLOG("image1Size 0x%qx, encryptStart1 0x%qx, End1 0x%qx\n", - image1Size, header->encryptStart, header->encryptEnd); - } - } - if (kIOReturnSuccess != err) - { - if (kIOReturnOverrun == err) - { - // update actual compression ratio on not enough space (for retry) - gIOHibernateCompression = (compressedSize << 8) / uncompressedSize; - } - - // update partial amount written (for IOPolledFileClose cleanup/unmap) - header->imageSize = vars->fileVars->position; - break; - } - - // Header: - - header->imageSize = vars->fileVars->position; - header->image1Size = image1Size; - header->bitmapSize = bitmap_size; - header->pageCount = pageCount; - - header->restore1Sum = restore1Sum; - header->image1Sum = sum1; - header->image2Sum = sum2; - header->sleepTime = gIOLastSleepTime.tv_sec; - - header->compression = (compressedSize << 8) / uncompressedSize; - gIOHibernateCompression = header->compression; - - count = vars->fileVars->fileExtents->getLength(); - if (count > sizeof(header->fileExtentMap)) - { - header->fileExtentMapSize = count; - count = sizeof(header->fileExtentMap); - } - else - header->fileExtentMapSize = sizeof(header->fileExtentMap); - bcopy(&fileExtents[0], &header->fileExtentMap[0], count); - - header->deviceBase = vars->fileVars->block0; - header->deviceBlockSize = vars->fileVars->blockSize; - - IOPolledFileSeek(vars->fileVars, 0); - err = IOHibernatePolledFileWrite(vars->fileVars, - (uint8_t *) header, sizeof(IOHibernateImageHeader), - cryptvars); - if (kIOReturnSuccess != err) - break; - err = IOHibernatePolledFileWrite(vars->fileVars, 0, 0, cryptvars); - } - while (false); - - clock_get_uptime(&endTime); - - IOService::getPMRootDomain()->pmStatsRecordEvent( - kIOPMStatsHibernateImageWrite | kIOPMStatsEventStopFlag, endTime); - - SUB_ABSOLUTETIME(&endTime, &allTime); - absolutetime_to_nanoseconds(endTime, &nsec); - HIBLOG("all time: %qd ms, ", nsec / 1000000ULL); - - absolutetime_to_nanoseconds(compTime, &nsec); - HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", - compBytes, - nsec / 1000000ULL, - nsec ? (((compBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); - - absolutetime_to_nanoseconds(vars->fileVars->cryptTime, &nsec); - HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s, ", - vars->fileVars->cryptBytes, - nsec / 1000000ULL, - nsec ? (((vars->fileVars->cryptBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); - - HIBLOG("\nimage %qd (%lld%%), uncompressed %qd (%d), compressed %qd (%d%%), sum1 %x, sum2 %x\n", - header->imageSize, (header->imageSize * 100) / vars->fileVars->fileSize, - uncompressedSize, atop_32(uncompressedSize), compressedSize, - uncompressedSize ? ((int) ((compressedSize * 100ULL) / uncompressedSize)) : 0, - sum1, sum2); - - HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n", - svPageCount, zvPageCount, wiredPagesEncrypted, wiredPagesClear, dirtyPagesEncrypted); - - if (pollerOpen) - IOPolledFilePollersClose(vars->fileVars, (kIOReturnSuccess == err) ? kIOPolledBeforeSleepState : kIOPolledBeforeSleepStateAborted ); - - if (vars->consoleMapping) - ProgressUpdate(gIOHibernateGraphicsInfo, - vars->consoleMapping, 0, kIOHibernateProgressCount); - - HIBLOG("hibernate_write_image done(%x)\n", err); - - // should we come back via regular wake, set the state in memory. - gIOHibernateState = kIOHibernateStateInactive; + // end wired image + header->encryptStart = vars->fileVars->encryptStart; + header->encryptEnd = vars->fileVars->encryptEnd; + image1Size = vars->fileVars->position; + HIBLOG("image1Size 0x%qx, encryptStart1 0x%qx, End1 0x%qx\n", + image1Size, header->encryptStart, header->encryptEnd); + } + } + if (kIOReturnSuccess != err) { + if (kIOReturnOverrun == err) { + // update actual compression ratio on not enough space (for retry) + gIOHibernateCompression = (compressedSize << 8) / uncompressedSize; + } + + // update partial amount written (for IOPolledFileClose cleanup/unmap) + header->imageSize = vars->fileVars->position; + break; + } + + // Header: + + header->imageSize = vars->fileVars->position; + header->image1Size = image1Size; + header->bitmapSize = bitmap_size; + header->pageCount = pageCount; + + header->restore1Sum = restore1Sum; + header->image1Sum = sum1; + header->image2Sum = sum2; + header->sleepTime = gIOLastSleepTime.tv_sec; + + header->compression = (compressedSize << 8) / uncompressedSize; + gIOHibernateCompression = header->compression; + + count = vars->fileVars->fileExtents->getLength(); + if (count > sizeof(header->fileExtentMap)) { + header->fileExtentMapSize = count; + count = sizeof(header->fileExtentMap); + } else { + header->fileExtentMapSize = sizeof(header->fileExtentMap); + } + bcopy(&fileExtents[0], &header->fileExtentMap[0], count); + + header->deviceBase = vars->fileVars->block0; + header->deviceBlockSize = vars->fileVars->blockSize; + + IOPolledFileSeek(vars->fileVars, 0); + err = IOHibernatePolledFileWrite(vars->fileVars, + (uint8_t *) header, sizeof(IOHibernateImageHeader), + cryptvars); + if (kIOReturnSuccess != err) { + break; + } + err = IOHibernatePolledFileWrite(vars->fileVars, 0, 0, cryptvars); + }while (false); + + clock_get_uptime(&endTime); + + IOService::getPMRootDomain()->pmStatsRecordEvent( + kIOPMStatsHibernateImageWrite | kIOPMStatsEventStopFlag, endTime); + + SUB_ABSOLUTETIME(&endTime, &allTime); + absolutetime_to_nanoseconds(endTime, &nsec); + HIBLOG("all time: %qd ms, ", nsec / 1000000ULL); + + absolutetime_to_nanoseconds(compTime, &nsec); + HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", + compBytes, + nsec / 1000000ULL, + nsec ? (((compBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); + + absolutetime_to_nanoseconds(vars->fileVars->cryptTime, &nsec); + HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s, ", + vars->fileVars->cryptBytes, + nsec / 1000000ULL, + nsec ? (((vars->fileVars->cryptBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); + + HIBLOG("\nimage %qd (%lld%%), uncompressed %qd (%d), compressed %qd (%d%%), sum1 %x, sum2 %x\n", + header->imageSize, (header->imageSize * 100) / vars->fileVars->fileSize, + uncompressedSize, atop_32(uncompressedSize), compressedSize, + uncompressedSize ? ((int) ((compressedSize * 100ULL) / uncompressedSize)) : 0, + sum1, sum2); + + HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n", + svPageCount, zvPageCount, wiredPagesEncrypted, wiredPagesClear, dirtyPagesEncrypted); + + if (pollerOpen) { + IOPolledFilePollersClose(vars->fileVars, (kIOReturnSuccess == err) ? kIOPolledBeforeSleepState : kIOPolledBeforeSleepStateAborted ); + } + + if (vars->consoleMapping) { + ProgressUpdate(gIOHibernateGraphicsInfo, + vars->consoleMapping, 0, kIOHibernateProgressCount); + } + + HIBLOG("hibernate_write_image done(%x)\n", err); + + // should we come back via regular wake, set the state in memory. + gIOHibernateState = kIOHibernateStateInactive; KDBG(IOKDBG_CODE(DBG_HIBERNATE, 1) | DBG_FUNC_END, wiredPagesEncrypted, - wiredPagesClear, dirtyPagesEncrypted); - - if (kIOReturnSuccess == err) - { - if (kIOHibernateModeSleep & gIOHibernateMode) - { - return (kIOHibernatePostWriteSleep); - } - else if(kIOHibernateModeRestart & gIOHibernateMode) - { - return (kIOHibernatePostWriteRestart); - } - else - { - /* by default, power down */ - return (kIOHibernatePostWriteHalt); - } - } - else if (kIOReturnAborted == err) - { - return (kIOHibernatePostWriteWake); - } - else - { - /* on error, sleep */ - return (kIOHibernatePostWriteSleep); - } + wiredPagesClear, dirtyPagesEncrypted); + + if (kIOReturnSuccess == err) { + if (kIOHibernateModeSleep & gIOHibernateMode) { + return kIOHibernatePostWriteSleep; + } else if (kIOHibernateModeRestart & gIOHibernateMode) { + return kIOHibernatePostWriteRestart; + } else { + /* by default, power down */ + return kIOHibernatePostWriteHalt; + } + } else if (kIOReturnAborted == err) { + return kIOHibernatePostWriteWake; + } else { + /* on error, sleep */ + return kIOHibernatePostWriteSleep; + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -2147,422 +2101,434 @@ hibernate_write_image(void) extern "C" void hibernate_machine_init(void) { - IOReturn err; - uint32_t sum; - uint32_t pagesDone; - uint32_t pagesRead = 0; - AbsoluteTime startTime, compTime; - AbsoluteTime allTime, endTime; - AbsoluteTime startIOTime, endIOTime; - uint64_t nsec, nsecIO; - uint64_t compBytes; - uint32_t lastProgressStamp = 0; - uint32_t progressStamp; - IOPolledFileCryptVars * cryptvars = 0; - - IOHibernateVars * vars = &gIOHibernateVars; - bzero(gIOHibernateStats, sizeof(hibernate_statistics_t)); - - if (!vars->fileVars || !vars->fileVars->pollers) - return; - - sum = gIOHibernateCurrentHeader->actualImage1Sum; - pagesDone = gIOHibernateCurrentHeader->actualUncompressedPages; - - if (kIOHibernateStateWakingFromHibernate != gIOHibernateState) - { - HIBLOG("regular wake\n"); - return; - } - - HIBPRINT("diag %x %x %x %x\n", + IOReturn err; + uint32_t sum; + uint32_t pagesDone; + uint32_t pagesRead = 0; + AbsoluteTime startTime, compTime; + AbsoluteTime allTime, endTime; + AbsoluteTime startIOTime, endIOTime; + uint64_t nsec, nsecIO; + uint64_t compBytes; + uint32_t lastProgressStamp = 0; + uint32_t progressStamp; + IOPolledFileCryptVars * cryptvars = 0; + + IOHibernateVars * vars = &gIOHibernateVars; + bzero(gIOHibernateStats, sizeof(hibernate_statistics_t)); + + if (!vars->fileVars || !vars->fileVars->pollers) { + return; + } + + sum = gIOHibernateCurrentHeader->actualImage1Sum; + pagesDone = gIOHibernateCurrentHeader->actualUncompressedPages; + + if (kIOHibernateStateWakingFromHibernate != gIOHibernateState) { + HIBLOG("regular wake\n"); + return; + } + + HIBPRINT("diag %x %x %x %x\n", gIOHibernateCurrentHeader->diag[0], gIOHibernateCurrentHeader->diag[1], gIOHibernateCurrentHeader->diag[2], gIOHibernateCurrentHeader->diag[3]); -#define t40ms(x) (tmrCvt((((uint64_t)(x)) << 8), tscFCvtt2n) / 1000000) -#define tStat(x, y) gIOHibernateStats->x = t40ms(gIOHibernateCurrentHeader->y); - tStat(booterStart, booterStart); - gIOHibernateStats->smcStart = gIOHibernateCurrentHeader->smcStart; - tStat(booterDuration0, booterTime0); - tStat(booterDuration1, booterTime1); - tStat(booterDuration2, booterTime2); - tStat(booterDuration, booterTime); - tStat(booterConnectDisplayDuration, connectDisplayTime); - tStat(booterSplashDuration, splashTime); - tStat(trampolineDuration, trampolineTime); - - gIOHibernateStats->image1Size = gIOHibernateCurrentHeader->image1Size; - gIOHibernateStats->imageSize = gIOHibernateCurrentHeader->imageSize; - gIOHibernateStats->image1Pages = pagesDone; +#define t40ms(x) (tmrCvt((((uint64_t)(x)) << 8), tscFCvtt2n) / 1000000) +#define tStat(x, y) gIOHibernateStats->x = t40ms(gIOHibernateCurrentHeader->y); + tStat(booterStart, booterStart); + gIOHibernateStats->smcStart = gIOHibernateCurrentHeader->smcStart; + tStat(booterDuration0, booterTime0); + tStat(booterDuration1, booterTime1); + tStat(booterDuration2, booterTime2); + tStat(booterDuration, booterTime); + tStat(booterConnectDisplayDuration, connectDisplayTime); + tStat(booterSplashDuration, splashTime); + tStat(trampolineDuration, trampolineTime); + + gIOHibernateStats->image1Size = gIOHibernateCurrentHeader->image1Size; + gIOHibernateStats->imageSize = gIOHibernateCurrentHeader->imageSize; + gIOHibernateStats->image1Pages = pagesDone; /* HIBERNATE_stats */ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 14), gIOHibernateStats->smcStart, - gIOHibernateStats->booterStart, gIOHibernateStats->booterDuration, - gIOHibernateStats->trampolineDuration); - - HIBLOG("booter start at %d ms smc %d ms, [%d, %d, %d] total %d ms, dsply %d, %d ms, tramp %d ms\n", - gIOHibernateStats->booterStart, - gIOHibernateStats->smcStart, - gIOHibernateStats->booterDuration0, - gIOHibernateStats->booterDuration1, - gIOHibernateStats->booterDuration2, - gIOHibernateStats->booterDuration, - gIOHibernateStats->booterConnectDisplayDuration, - gIOHibernateStats->booterSplashDuration, - gIOHibernateStats->trampolineDuration); - - HIBLOG("hibernate_machine_init: state %d, image pages %d, sum was %x, imageSize 0x%qx, image1Size 0x%qx, conflictCount %d, nextFree %x\n", + gIOHibernateStats->booterStart, gIOHibernateStats->booterDuration, + gIOHibernateStats->trampolineDuration); + + HIBLOG("booter start at %d ms smc %d ms, [%d, %d, %d] total %d ms, dsply %d, %d ms, tramp %d ms\n", + gIOHibernateStats->booterStart, + gIOHibernateStats->smcStart, + gIOHibernateStats->booterDuration0, + gIOHibernateStats->booterDuration1, + gIOHibernateStats->booterDuration2, + gIOHibernateStats->booterDuration, + gIOHibernateStats->booterConnectDisplayDuration, + gIOHibernateStats->booterSplashDuration, + gIOHibernateStats->trampolineDuration); + + HIBLOG("hibernate_machine_init: state %d, image pages %d, sum was %x, imageSize 0x%qx, image1Size 0x%qx, conflictCount %d, nextFree %x\n", gIOHibernateState, pagesDone, sum, gIOHibernateStats->imageSize, gIOHibernateStats->image1Size, gIOHibernateCurrentHeader->conflictCount, gIOHibernateCurrentHeader->nextFree); - if ((0 != (kIOHibernateModeSleep & gIOHibernateMode)) - && (0 != ((kIOHibernateModeDiscardCleanActive | kIOHibernateModeDiscardCleanInactive) & gIOHibernateMode))) - { - hibernate_page_list_discard(vars->page_list); - } + if ((0 != (kIOHibernateModeSleep & gIOHibernateMode)) + && (0 != ((kIOHibernateModeDiscardCleanActive | kIOHibernateModeDiscardCleanInactive) & gIOHibernateMode))) { + hibernate_page_list_discard(vars->page_list); + } - cryptvars = (kIOHibernateModeEncrypt & gIOHibernateMode) ? &gIOHibernateCryptWakeContext : 0; + cryptvars = (kIOHibernateModeEncrypt & gIOHibernateMode) ? &gIOHibernateCryptWakeContext : 0; - if (gIOHibernateCurrentHeader->handoffPageCount > gIOHibernateHandoffPageCount) - panic("handoff overflow"); + if (gIOHibernateCurrentHeader->handoffPageCount > gIOHibernateHandoffPageCount) { + panic("handoff overflow"); + } - IOHibernateHandoff * handoff; - bool done = false; - bool foundCryptData = false; - bool foundVolumeEncryptData = false; + IOHibernateHandoff * handoff; + bool done = false; + bool foundCryptData = false; + bool foundVolumeEncryptData = false; - for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy(); - !done; - handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount]) - { + for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy(); + !done; + handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount]) { // HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount); - uint8_t * data = &handoff->data[0]; - switch (handoff->type) - { - case kIOHibernateHandoffTypeEnd: - done = true; - break; + uint8_t * data = &handoff->data[0]; + switch (handoff->type) { + case kIOHibernateHandoffTypeEnd: + done = true; + break; - case kIOHibernateHandoffTypeGraphicsInfo: - if (handoff->bytecount == sizeof(*gIOHibernateGraphicsInfo)) - { - bcopy(data, gIOHibernateGraphicsInfo, sizeof(*gIOHibernateGraphicsInfo)); - } - break; + case kIOHibernateHandoffTypeGraphicsInfo: + if (handoff->bytecount == sizeof(*gIOHibernateGraphicsInfo)) { + bcopy(data, gIOHibernateGraphicsInfo, sizeof(*gIOHibernateGraphicsInfo)); + } + break; - case kIOHibernateHandoffTypeCryptVars: - if (cryptvars) - { - hibernate_cryptwakevars_t * - wakevars = (hibernate_cryptwakevars_t *) &handoff->data[0]; - bcopy(&wakevars->aes_iv[0], &cryptvars->aes_iv[0], sizeof(cryptvars->aes_iv)); - } - foundCryptData = true; - bzero(data, handoff->bytecount); - break; + case kIOHibernateHandoffTypeCryptVars: + if (cryptvars) { + hibernate_cryptwakevars_t * + wakevars = (hibernate_cryptwakevars_t *) &handoff->data[0]; + bcopy(&wakevars->aes_iv[0], &cryptvars->aes_iv[0], sizeof(cryptvars->aes_iv)); + } + foundCryptData = true; + bzero(data, handoff->bytecount); + break; - case kIOHibernateHandoffTypeVolumeCryptKey: - if (handoff->bytecount == vars->volumeCryptKeySize) - { - bcopy(data, &vars->volumeCryptKey[0], vars->volumeCryptKeySize); - foundVolumeEncryptData = true; - } - else panic("kIOHibernateHandoffTypeVolumeCryptKey(%d)", handoff->bytecount); - break; + case kIOHibernateHandoffTypeVolumeCryptKey: + if (handoff->bytecount == vars->volumeCryptKeySize) { + bcopy(data, &vars->volumeCryptKey[0], vars->volumeCryptKeySize); + foundVolumeEncryptData = true; + } else { + panic("kIOHibernateHandoffTypeVolumeCryptKey(%d)", handoff->bytecount); + } + break; - case kIOHibernateHandoffTypeMemoryMap: + case kIOHibernateHandoffTypeMemoryMap: - clock_get_uptime(&allTime); + clock_get_uptime(&allTime); - hibernate_newruntime_map(data, handoff->bytecount, - gIOHibernateCurrentHeader->systemTableOffset); + hibernate_newruntime_map(data, handoff->bytecount, + gIOHibernateCurrentHeader->systemTableOffset); - clock_get_uptime(&endTime); + clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &allTime); - absolutetime_to_nanoseconds(endTime, &nsec); + SUB_ABSOLUTETIME(&endTime, &allTime); + absolutetime_to_nanoseconds(endTime, &nsec); - HIBLOG("hibernate_newruntime_map time: %qd ms, ", nsec / 1000000ULL); + HIBLOG("hibernate_newruntime_map time: %qd ms, ", nsec / 1000000ULL); - break; + break; - case kIOHibernateHandoffTypeDeviceTree: + case kIOHibernateHandoffTypeDeviceTree: { // DTEntry chosen = NULL; // HIBPRINT("DTLookupEntry %d\n", DTLookupEntry((const DTEntry) data, "/chosen", &chosen)); } - break; + break; - default: - done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000)); - break; + default: + done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000)); + break; + } } - } - if (vars->hwEncrypt && !foundVolumeEncryptData) - panic("no volumeCryptKey"); - else if (cryptvars && !foundCryptData) - panic("hibernate handoff"); + if (vars->hwEncrypt && !foundVolumeEncryptData) { + panic("no volumeCryptKey"); + } else if (cryptvars && !foundCryptData) { + panic("hibernate handoff"); + } - HIBPRINT("video 0x%llx %d %d %d status %x\n", + HIBPRINT("video 0x%llx %d %d %d status %x\n", gIOHibernateGraphicsInfo->physicalAddress, gIOHibernateGraphicsInfo->depth, gIOHibernateGraphicsInfo->width, gIOHibernateGraphicsInfo->height, gIOHibernateGraphicsInfo->gfxStatus); - if (vars->videoMapping && gIOHibernateGraphicsInfo->physicalAddress) - { - vars->videoMapSize = round_page(gIOHibernateGraphicsInfo->height - * gIOHibernateGraphicsInfo->rowBytes); - if (vars->videoMapSize > vars->videoAllocSize) vars->videoMapSize = 0; - else - { - IOMapPages(kernel_map, - vars->videoMapping, gIOHibernateGraphicsInfo->physicalAddress, - vars->videoMapSize, kIOMapInhibitCache ); - } - } - - if (vars->videoMapSize) - ProgressUpdate(gIOHibernateGraphicsInfo, - (uint8_t *) vars->videoMapping, 0, kIOHibernateProgressCount); - - uint8_t * src = (uint8_t *) vars->srcBuffer->getBytesNoCopy(); - uint8_t * compressed = src + page_size; - uint8_t * scratch = compressed + page_size; - uint32_t decoOffset; - - clock_get_uptime(&allTime); - AbsoluteTime_to_scalar(&compTime) = 0; - compBytes = 0; - - HIBLOG("IOPolledFilePollersOpen(), ml_get_interrupts_enabled %d\n", ml_get_interrupts_enabled()); - err = IOPolledFilePollersOpen(vars->fileVars, kIOPolledAfterSleepState, false); - clock_get_uptime(&startIOTime); - endTime = startIOTime; - SUB_ABSOLUTETIME(&endTime, &allTime); - absolutetime_to_nanoseconds(endTime, &nsec); - HIBLOG("IOPolledFilePollersOpen(%x) %qd ms\n", err, nsec / 1000000ULL); - - if (vars->hwEncrypt) - { - err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, - &vars->volumeCryptKey[0], vars->volumeCryptKeySize); - HIBLOG("IOPolledFilePollersSetEncryptionKey(%x) %ld\n", err, vars->volumeCryptKeySize); - if (kIOReturnSuccess != err) panic("IOPolledFilePollersSetEncryptionKey(0x%x)", err); - cryptvars = 0; - } - - IOPolledFileSeek(vars->fileVars, gIOHibernateCurrentHeader->image1Size); - - // kick off the read ahead - vars->fileVars->bufferHalf = 0; - vars->fileVars->bufferLimit = 0; - vars->fileVars->lastRead = 0; - vars->fileVars->readEnd = gIOHibernateCurrentHeader->imageSize; - vars->fileVars->bufferOffset = vars->fileVars->bufferLimit; - vars->fileVars->cryptBytes = 0; - AbsoluteTime_to_scalar(&vars->fileVars->cryptTime) = 0; - - err = IOPolledFileRead(vars->fileVars, 0, 0, cryptvars); - vars->fileVars->bufferOffset = vars->fileVars->bufferLimit; - // -- - - HIBLOG("hibernate_machine_init reading\n"); - - uint32_t * header = (uint32_t *) src; - sum = 0; - - while (kIOReturnSuccess == err) - { - unsigned int count; - unsigned int page; - uint32_t tag; - vm_offset_t ppnum, compressedSize; - - err = IOPolledFileRead(vars->fileVars, src, 8, cryptvars); - if (kIOReturnSuccess != err) - break; - - ppnum = header[0]; - count = header[1]; + if (vars->videoMapping && gIOHibernateGraphicsInfo->physicalAddress) { + vars->videoMapSize = round_page(gIOHibernateGraphicsInfo->height + * gIOHibernateGraphicsInfo->rowBytes); + if (vars->videoMapSize > vars->videoAllocSize) { + vars->videoMapSize = 0; + } else { + IOMapPages(kernel_map, + vars->videoMapping, gIOHibernateGraphicsInfo->physicalAddress, + vars->videoMapSize, kIOMapInhibitCache ); + } + } -// HIBPRINT("(%x, %x)\n", ppnum, count); + if (vars->videoMapSize) { + ProgressUpdate(gIOHibernateGraphicsInfo, + (uint8_t *) vars->videoMapping, 0, kIOHibernateProgressCount); + } - if (!count) - break; + uint8_t * src = (uint8_t *) vars->srcBuffer->getBytesNoCopy(); + uint8_t * compressed = src + page_size; + uint8_t * scratch = compressed + page_size; + uint32_t decoOffset; + + clock_get_uptime(&allTime); + AbsoluteTime_to_scalar(&compTime) = 0; + compBytes = 0; + + HIBLOG("IOPolledFilePollersOpen(), ml_get_interrupts_enabled %d\n", ml_get_interrupts_enabled()); + err = IOPolledFilePollersOpen(vars->fileVars, kIOPolledAfterSleepState, false); + clock_get_uptime(&startIOTime); + endTime = startIOTime; + SUB_ABSOLUTETIME(&endTime, &allTime); + absolutetime_to_nanoseconds(endTime, &nsec); + HIBLOG("IOPolledFilePollersOpen(%x) %qd ms\n", err, nsec / 1000000ULL); + + if (vars->hwEncrypt) { + err = IOPolledFilePollersSetEncryptionKey(vars->fileVars, + &vars->volumeCryptKey[0], vars->volumeCryptKeySize); + HIBLOG("IOPolledFilePollersSetEncryptionKey(%x) %ld\n", err, vars->volumeCryptKeySize); + if (kIOReturnSuccess != err) { + panic("IOPolledFilePollersSetEncryptionKey(0x%x)", err); + } + cryptvars = 0; + } - for (page = 0; page < count; page++) - { - err = IOPolledFileRead(vars->fileVars, (uint8_t *) &tag, 4, cryptvars); - if (kIOReturnSuccess != err) - break; + IOPolledFileSeek(vars->fileVars, gIOHibernateCurrentHeader->image1Size); - compressedSize = kIOHibernateTagLength & tag; - if (kIOHibernateTagSignature != (tag & ~kIOHibernateTagLength)) - { - err = kIOReturnIPCError; - break; - } + // kick off the read ahead + vars->fileVars->bufferHalf = 0; + vars->fileVars->bufferLimit = 0; + vars->fileVars->lastRead = 0; + vars->fileVars->readEnd = gIOHibernateCurrentHeader->imageSize; + vars->fileVars->bufferOffset = vars->fileVars->bufferLimit; + vars->fileVars->cryptBytes = 0; + AbsoluteTime_to_scalar(&vars->fileVars->cryptTime) = 0; - err = IOPolledFileRead(vars->fileVars, src, (compressedSize + 3) & ~3, cryptvars); - if (kIOReturnSuccess != err) break; + err = IOPolledFileRead(vars->fileVars, 0, 0, cryptvars); + vars->fileVars->bufferOffset = vars->fileVars->bufferLimit; + // -- - if (compressedSize < page_size) - { - decoOffset = page_size; - clock_get_uptime(&startTime); + HIBLOG("hibernate_machine_init reading\n"); - if (compressedSize == 4) { - int i; - uint32_t *s, *d; + uint32_t * header = (uint32_t *) src; + sum = 0; - s = (uint32_t *)src; - d = (uint32_t *)(uintptr_t)compressed; + while (kIOReturnSuccess == err) { + unsigned int count; + unsigned int page; + uint32_t tag; + vm_offset_t ppnum, compressedSize; - for (i = 0; i < (int)(PAGE_SIZE / sizeof(int32_t)); i++) - *d++ = *s; + err = IOPolledFileRead(vars->fileVars, src, 8, cryptvars); + if (kIOReturnSuccess != err) { + break; } - else - WKdm_decompress_new((WK_word*) src, (WK_word*) compressed, (WK_word*) scratch, compressedSize); - clock_get_uptime(&endTime); - ADD_ABSOLUTETIME(&compTime, &endTime); - SUB_ABSOLUTETIME(&compTime, &startTime); - compBytes += page_size; - } - else decoOffset = 0; - - sum += hibernate_sum_page((src + decoOffset), ppnum); - err = IOMemoryDescriptorReadToPhysical(vars->srcBuffer, decoOffset, ptoa_64(ppnum), page_size); - if (err) - { - HIBLOG("IOMemoryDescriptorReadToPhysical [%ld] %x\n", (long)ppnum, err); - break; - } - - ppnum++; - pagesDone++; - pagesRead++; - - if (0 == (8191 & pagesDone)) - { - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &allTime); - absolutetime_to_nanoseconds(endTime, &nsec); - progressStamp = nsec / 750000000ULL; - if (progressStamp != lastProgressStamp) - { - lastProgressStamp = progressStamp; - HIBPRINT("pages %d (%d%%)\n", pagesDone, - (100 * pagesDone) / gIOHibernateCurrentHeader->pageCount); + + ppnum = header[0]; + count = header[1]; + +// HIBPRINT("(%x, %x)\n", ppnum, count); + + if (!count) { + break; + } + + for (page = 0; page < count; page++) { + err = IOPolledFileRead(vars->fileVars, (uint8_t *) &tag, 4, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + + compressedSize = kIOHibernateTagLength & tag; + if (kIOHibernateTagSignature != (tag & ~kIOHibernateTagLength)) { + err = kIOReturnIPCError; + break; + } + + err = IOPolledFileRead(vars->fileVars, src, (compressedSize + 3) & ~3, cryptvars); + if (kIOReturnSuccess != err) { + break; + } + + if (compressedSize < page_size) { + decoOffset = page_size; + clock_get_uptime(&startTime); + + if (compressedSize == 4) { + int i; + uint32_t *s, *d; + + s = (uint32_t *)src; + d = (uint32_t *)(uintptr_t)compressed; + + for (i = 0; i < (int)(PAGE_SIZE / sizeof(int32_t)); i++) { + *d++ = *s; + } + } else { + WKdm_decompress_new((WK_word*) src, (WK_word*) compressed, (WK_word*) scratch, compressedSize); + } + clock_get_uptime(&endTime); + ADD_ABSOLUTETIME(&compTime, &endTime); + SUB_ABSOLUTETIME(&compTime, &startTime); + compBytes += page_size; + } else { + decoOffset = 0; + } + + sum += hibernate_sum_page((src + decoOffset), ppnum); + err = IOMemoryDescriptorReadToPhysical(vars->srcBuffer, decoOffset, ptoa_64(ppnum), page_size); + if (err) { + HIBLOG("IOMemoryDescriptorReadToPhysical [%ld] %x\n", (long)ppnum, err); + break; + } + + ppnum++; + pagesDone++; + pagesRead++; + + if (0 == (8191 & pagesDone)) { + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &allTime); + absolutetime_to_nanoseconds(endTime, &nsec); + progressStamp = nsec / 750000000ULL; + if (progressStamp != lastProgressStamp) { + lastProgressStamp = progressStamp; + HIBPRINT("pages %d (%d%%)\n", pagesDone, + (100 * pagesDone) / gIOHibernateCurrentHeader->pageCount); + } + } } - } } - } - if ((kIOReturnSuccess == err) && (pagesDone == gIOHibernateCurrentHeader->actualUncompressedPages)) - err = kIOReturnLockedRead; + if ((kIOReturnSuccess == err) && (pagesDone == gIOHibernateCurrentHeader->actualUncompressedPages)) { + err = kIOReturnLockedRead; + } - if (kIOReturnSuccess != err) - panic("Hibernate restore error %x", err); + if (kIOReturnSuccess != err) { + panic("Hibernate restore error %x", err); + } - gIOHibernateCurrentHeader->actualImage2Sum = sum; - gIOHibernateCompression = gIOHibernateCurrentHeader->compression; + gIOHibernateCurrentHeader->actualImage2Sum = sum; + gIOHibernateCompression = gIOHibernateCurrentHeader->compression; - clock_get_uptime(&endIOTime); + clock_get_uptime(&endIOTime); - err = IOPolledFilePollersClose(vars->fileVars, kIOPolledAfterSleepState); + err = IOPolledFilePollersClose(vars->fileVars, kIOPolledAfterSleepState); - clock_get_uptime(&endTime); + clock_get_uptime(&endTime); - IOService::getPMRootDomain()->pmStatsRecordEvent( - kIOPMStatsHibernateImageRead | kIOPMStatsEventStartFlag, allTime); - IOService::getPMRootDomain()->pmStatsRecordEvent( - kIOPMStatsHibernateImageRead | kIOPMStatsEventStopFlag, endTime); + IOService::getPMRootDomain()->pmStatsRecordEvent( + kIOPMStatsHibernateImageRead | kIOPMStatsEventStartFlag, allTime); + IOService::getPMRootDomain()->pmStatsRecordEvent( + kIOPMStatsHibernateImageRead | kIOPMStatsEventStopFlag, endTime); - SUB_ABSOLUTETIME(&endTime, &allTime); - absolutetime_to_nanoseconds(endTime, &nsec); + SUB_ABSOLUTETIME(&endTime, &allTime); + absolutetime_to_nanoseconds(endTime, &nsec); - SUB_ABSOLUTETIME(&endIOTime, &startIOTime); - absolutetime_to_nanoseconds(endIOTime, &nsecIO); + SUB_ABSOLUTETIME(&endIOTime, &startIOTime); + absolutetime_to_nanoseconds(endIOTime, &nsecIO); - gIOHibernateStats->kernelImageReadDuration = nsec / 1000000ULL; - gIOHibernateStats->imagePages = pagesDone; + gIOHibernateStats->kernelImageReadDuration = nsec / 1000000ULL; + gIOHibernateStats->imagePages = pagesDone; - HIBLOG("hibernate_machine_init pagesDone %d sum2 %x, time: %d ms, disk(0x%x) %qd Mb/s, ", - pagesDone, sum, gIOHibernateStats->kernelImageReadDuration, kDefaultIOSize, - nsecIO ? ((((gIOHibernateCurrentHeader->imageSize - gIOHibernateCurrentHeader->image1Size) * 1000000000ULL) / 1024 / 1024) / nsecIO) : 0); + HIBLOG("hibernate_machine_init pagesDone %d sum2 %x, time: %d ms, disk(0x%x) %qd Mb/s, ", + pagesDone, sum, gIOHibernateStats->kernelImageReadDuration, kDefaultIOSize, + nsecIO ? ((((gIOHibernateCurrentHeader->imageSize - gIOHibernateCurrentHeader->image1Size) * 1000000000ULL) / 1024 / 1024) / nsecIO) : 0); - absolutetime_to_nanoseconds(compTime, &nsec); - HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", - compBytes, - nsec / 1000000ULL, - nsec ? (((compBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); + absolutetime_to_nanoseconds(compTime, &nsec); + HIBLOG("comp bytes: %qd time: %qd ms %qd Mb/s, ", + compBytes, + nsec / 1000000ULL, + nsec ? (((compBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); - absolutetime_to_nanoseconds(vars->fileVars->cryptTime, &nsec); - HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s\n", - vars->fileVars->cryptBytes, - nsec / 1000000ULL, - nsec ? (((vars->fileVars->cryptBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); + absolutetime_to_nanoseconds(vars->fileVars->cryptTime, &nsec); + HIBLOG("crypt bytes: %qd time: %qd ms %qd Mb/s\n", + vars->fileVars->cryptBytes, + nsec / 1000000ULL, + nsec ? (((vars->fileVars->cryptBytes * 1000000000ULL) / 1024 / 1024) / nsec) : 0); - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2), pagesRead, pagesDone); + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 2), pagesRead, pagesDone); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOHibernateSetWakeCapabilities(uint32_t capability) +void +IOHibernateSetWakeCapabilities(uint32_t capability) { - if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) - { - gIOHibernateStats->wakeCapability = capability; + if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { + gIOHibernateStats->wakeCapability = capability; - if (kIOPMSystemCapabilityGraphics & capability) - { - vm_compressor_do_warmup(); + if (kIOPMSystemCapabilityGraphics & capability) { + vm_compressor_do_warmup(); + } } - } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOHibernateSystemRestart(void) +void +IOHibernateSystemRestart(void) { - static uint8_t noteStore[32] __attribute__((aligned(32))); - IORegistryEntry * regEntry; - const OSSymbol * sym; - OSData * noteProp; - OSData * data; - uintptr_t * smcVars; - uint8_t * smcBytes; - size_t len; - addr64_t element; - - data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey)); - if (!data) return; - - smcVars = (typeof(smcVars)) data->getBytesNoCopy(); - smcBytes = (typeof(smcBytes)) smcVars[1]; - len = smcVars[0]; - if (len > sizeof(noteStore)) len = sizeof(noteStore); - noteProp = OSData::withCapacity(3 * sizeof(element)); - if (!noteProp) return; - element = len; - noteProp->appendBytes(&element, sizeof(element)); - element = crc32(0, smcBytes, len); - noteProp->appendBytes(&element, sizeof(element)); - - bcopy(smcBytes, noteStore, len); - element = (addr64_t) ¬eStore[0]; - element = (element & page_mask) | ptoa_64(pmap_find_phys(kernel_pmap, element)); - noteProp->appendBytes(&element, sizeof(element)); - - if (!gIOOptionsEntry) - { - regEntry = IORegistryEntry::fromPath("/options", gIODTPlane); - gIOOptionsEntry = OSDynamicCast(IODTNVRAM, regEntry); - if (regEntry && !gIOOptionsEntry) - regEntry->release(); - } - - sym = OSSymbol::withCStringNoCopy(kIOHibernateBootNoteKey); - if (gIOOptionsEntry && sym) gIOOptionsEntry->setProperty(sym, noteProp); - if (noteProp) noteProp->release(); - if (sym) sym->release(); + static uint8_t noteStore[32] __attribute__((aligned(32))); + IORegistryEntry * regEntry; + const OSSymbol * sym; + OSData * noteProp; + OSData * data; + uintptr_t * smcVars; + uint8_t * smcBytes; + size_t len; + addr64_t element; + + data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey)); + if (!data) { + return; + } + + smcVars = (typeof(smcVars))data->getBytesNoCopy(); + smcBytes = (typeof(smcBytes))smcVars[1]; + len = smcVars[0]; + if (len > sizeof(noteStore)) { + len = sizeof(noteStore); + } + noteProp = OSData::withCapacity(3 * sizeof(element)); + if (!noteProp) { + return; + } + element = len; + noteProp->appendBytes(&element, sizeof(element)); + element = crc32(0, smcBytes, len); + noteProp->appendBytes(&element, sizeof(element)); + + bcopy(smcBytes, noteStore, len); + element = (addr64_t) ¬eStore[0]; + element = (element & page_mask) | ptoa_64(pmap_find_phys(kernel_pmap, element)); + noteProp->appendBytes(&element, sizeof(element)); + + if (!gIOOptionsEntry) { + regEntry = IORegistryEntry::fromPath("/options", gIODTPlane); + gIOOptionsEntry = OSDynamicCast(IODTNVRAM, regEntry); + if (regEntry && !gIOOptionsEntry) { + regEntry->release(); + } + } + + sym = OSSymbol::withCStringNoCopy(kIOHibernateBootNoteKey); + if (gIOOptionsEntry && sym) { + gIOOptionsEntry->setProperty(sym, noteProp); + } + if (noteProp) { + noteProp->release(); + } + if (sym) { + sym->release(); + } } diff --git a/iokit/Kernel/IOHibernateInternal.h b/iokit/Kernel/IOHibernateInternal.h index 7fd0ec461..a96d843bd 100644 --- a/iokit/Kernel/IOHibernateInternal.h +++ b/iokit/Kernel/IOHibernateInternal.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,48 +30,46 @@ #ifdef __cplusplus -enum { kIOHibernateAESKeySize = 128 }; /* bits */ +enum { kIOHibernateAESKeySize = 128 }; /* bits */ -struct IOHibernateVars -{ - hibernate_page_list_t * page_list; - hibernate_page_list_t * page_list_wired; - hibernate_page_list_t * page_list_pal; - class IOBufferMemoryDescriptor * ioBuffer; - class IOBufferMemoryDescriptor * srcBuffer; - class IOBufferMemoryDescriptor * handoffBuffer; - class IOMemoryDescriptor * previewBuffer; - OSData * previewData; - OSObject * saveBootDevice; +struct IOHibernateVars { + hibernate_page_list_t * page_list; + hibernate_page_list_t * page_list_wired; + hibernate_page_list_t * page_list_pal; + class IOBufferMemoryDescriptor * ioBuffer; + class IOBufferMemoryDescriptor * srcBuffer; + class IOBufferMemoryDescriptor * handoffBuffer; + class IOMemoryDescriptor * previewBuffer; + OSData * previewData; + OSObject * saveBootDevice; - struct IOPolledFileIOVars * fileVars; - uint64_t fileMinSize; - uint64_t fileMaxSize; - vm_offset_t videoMapping; - vm_size_t videoAllocSize; - vm_size_t videoMapSize; - uint8_t * consoleMapping; - uint8_t haveFastBoot; - uint8_t saveBootAudioVolume; - uint8_t hwEncrypt; - uint8_t wiredCryptKey[kIOHibernateAESKeySize / 8]; - uint8_t cryptKey[kIOHibernateAESKeySize / 8]; - size_t volumeCryptKeySize; - uint8_t volumeCryptKey[64]; + struct IOPolledFileIOVars * fileVars; + uint64_t fileMinSize; + uint64_t fileMaxSize; + vm_offset_t videoMapping; + vm_size_t videoAllocSize; + vm_size_t videoMapSize; + uint8_t * consoleMapping; + uint8_t haveFastBoot; + uint8_t saveBootAudioVolume; + uint8_t hwEncrypt; + uint8_t wiredCryptKey[kIOHibernateAESKeySize / 8]; + uint8_t cryptKey[kIOHibernateAESKeySize / 8]; + size_t volumeCryptKeySize; + uint8_t volumeCryptKey[64]; }; typedef struct IOHibernateVars IOHibernateVars; -#endif /* __cplusplus */ +#endif /* __cplusplus */ -enum -{ - kIOHibernateTagSignature = 0x53000000, - kIOHibernateTagLength = 0x00001fff, +enum{ + kIOHibernateTagSignature = 0x53000000, + kIOHibernateTagLength = 0x00001fff, }; #ifdef __cplusplus extern "C" -#endif /* __cplusplus */ +#endif /* __cplusplus */ uint32_t hibernate_sum_page(uint8_t *buf, uint32_t ppnum); diff --git a/iokit/Kernel/IOHibernateRestoreKernel.c b/iokit/Kernel/IOHibernateRestoreKernel.c index fc5a1b7f2..e330b5467 100644 --- a/iokit/Kernel/IOHibernateRestoreKernel.c +++ b/iokit/Kernel/IOHibernateRestoreKernel.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,11 +40,11 @@ #include /* -This code is linked into the kernel but part of the "__HIB" section, which means -its used by code running in the special context of restoring the kernel text and data -from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything -it calls or references needs to be careful to only touch memory also in the "__HIB" section. -*/ + * This code is linked into the kernel but part of the "__HIB" section, which means + * its used by code running in the special context of restoring the kernel text and data + * from the hibernation image read by the booter. hibernate_kernel_entrypoint() and everything + * it calls or references needs to be careful to only touch memory also in the "__HIB" section. + */ uint32_t gIOHibernateState; @@ -54,8 +54,8 @@ static IOHibernateImageHeader _hibernateHeader; IOHibernateImageHeader * gIOHibernateCurrentHeader = &_hibernateHeader; ppnum_t gIOHibernateHandoffPages[64]; -uint32_t gIOHibernateHandoffPageCount = sizeof(gIOHibernateHandoffPages) - / sizeof(gIOHibernateHandoffPages[0]); +uint32_t gIOHibernateHandoffPageCount = sizeof(gIOHibernateHandoffPages) + / sizeof(gIOHibernateHandoffPages[0]); #if CONFIG_DEBUG void hibprintf(const char *fmt, ...); @@ -76,156 +76,168 @@ extern void acpi_wake_prot_entry(void); #include #else -static inline uint64_t rdtsc64(void) +static inline uint64_t +rdtsc64(void) { - return (0); + return 0; } #endif /* defined(__i386__) || defined(__x86_64__) */ #if defined(__i386__) || defined(__x86_64__) -#define DBGLOG 1 +#define DBGLOG 1 #include /* standard port addresses */ enum { - COM1_PORT_ADDR = 0x3f8, - COM2_PORT_ADDR = 0x2f8 + COM1_PORT_ADDR = 0x3f8, + COM2_PORT_ADDR = 0x2f8 }; /* UART register offsets */ enum { - UART_RBR = 0, /* receive buffer Register (R) */ - UART_THR = 0, /* transmit holding register (W) */ - UART_DLL = 0, /* DLAB = 1, divisor latch (LSB) */ - UART_IER = 1, /* interrupt enable register */ - UART_DLM = 1, /* DLAB = 1, divisor latch (MSB) */ - UART_IIR = 2, /* interrupt ident register (R) */ - UART_FCR = 2, /* fifo control register (W) */ - UART_LCR = 3, /* line control register */ - UART_MCR = 4, /* modem control register */ - UART_LSR = 5, /* line status register */ - UART_MSR = 6, /* modem status register */ - UART_SCR = 7 /* scratch register */ + UART_RBR = 0, /* receive buffer Register (R) */ + UART_THR = 0, /* transmit holding register (W) */ + UART_DLL = 0, /* DLAB = 1, divisor latch (LSB) */ + UART_IER = 1, /* interrupt enable register */ + UART_DLM = 1, /* DLAB = 1, divisor latch (MSB) */ + UART_IIR = 2, /* interrupt ident register (R) */ + UART_FCR = 2, /* fifo control register (W) */ + UART_LCR = 3, /* line control register */ + UART_MCR = 4, /* modem control register */ + UART_LSR = 5, /* line status register */ + UART_MSR = 6, /* modem status register */ + UART_SCR = 7 /* scratch register */ }; enum { - UART_LCR_8BITS = 0x03, - UART_LCR_DLAB = 0x80 + UART_LCR_8BITS = 0x03, + UART_LCR_DLAB = 0x80 }; enum { - UART_MCR_DTR = 0x01, - UART_MCR_RTS = 0x02, - UART_MCR_OUT1 = 0x04, - UART_MCR_OUT2 = 0x08, - UART_MCR_LOOP = 0x10 + UART_MCR_DTR = 0x01, + UART_MCR_RTS = 0x02, + UART_MCR_OUT1 = 0x04, + UART_MCR_OUT2 = 0x08, + UART_MCR_LOOP = 0x10 }; enum { - UART_LSR_DR = 0x01, - UART_LSR_OE = 0x02, - UART_LSR_PE = 0x04, - UART_LSR_FE = 0x08, - UART_LSR_THRE = 0x20 + UART_LSR_DR = 0x01, + UART_LSR_OE = 0x02, + UART_LSR_PE = 0x04, + UART_LSR_FE = 0x08, + UART_LSR_THRE = 0x20 }; -static void uart_putc(char c) +static void +uart_putc(char c) { - while (!(inb(COM1_PORT_ADDR + UART_LSR) & UART_LSR_THRE)) - {} - outb(COM1_PORT_ADDR + UART_THR, c); + while (!(inb(COM1_PORT_ADDR + UART_LSR) & UART_LSR_THRE)) { + } + outb(COM1_PORT_ADDR + UART_THR, c); } -static int debug_probe( void ) +static int +debug_probe( void ) { - /* Verify that the Scratch Register is accessible */ - outb(COM1_PORT_ADDR + UART_SCR, 0x5a); - if (inb(COM1_PORT_ADDR + UART_SCR) != 0x5a) return false; - outb(COM1_PORT_ADDR + UART_SCR, 0xa5); - if (inb(COM1_PORT_ADDR + UART_SCR) != 0xa5) return false; - uart_putc('\n'); - return true; + /* Verify that the Scratch Register is accessible */ + outb(COM1_PORT_ADDR + UART_SCR, 0x5a); + if (inb(COM1_PORT_ADDR + UART_SCR) != 0x5a) { + return false; + } + outb(COM1_PORT_ADDR + UART_SCR, 0xa5); + if (inb(COM1_PORT_ADDR + UART_SCR) != 0xa5) { + return false; + } + uart_putc('\n'); + return true; } -static void uart_puthex(uint64_t num) +static void +uart_puthex(uint64_t num) { - int bit; - char c; - bool leading = true; - - for (bit = 60; bit >= 0; bit -= 4) - { - c = 0xf & (num >> bit); - if (c) - leading = false; - else if (leading && bit) - continue; - if (c <= 9) - c += '0'; - else - c+= 'a' - 10; - uart_putc(c); - } + int bit; + char c; + bool leading = true; + + for (bit = 60; bit >= 0; bit -= 4) { + c = 0xf & (num >> bit); + if (c) { + leading = false; + } else if (leading && bit) { + continue; + } + if (c <= 9) { + c += '0'; + } else { + c += 'a' - 10; + } + uart_putc(c); + } } -static void debug_code(uint32_t code, uint64_t value) +static void +debug_code(uint32_t code, uint64_t value) { - int bit; - char c; - - if (!(kIOHibernateDebugRestoreLogs & gIOHibernateDebugFlags)) - return; - - for (bit = 24; bit >= 0; bit -= 8) - { - c = 0xFF & (code >> bit); - if (c) - uart_putc(c); - } - uart_putc('='); - uart_puthex(value); - uart_putc('\n'); - uart_putc('\r'); + int bit; + char c; + + if (!(kIOHibernateDebugRestoreLogs & gIOHibernateDebugFlags)) { + return; + } + + for (bit = 24; bit >= 0; bit -= 8) { + c = 0xFF & (code >> bit); + if (c) { + uart_putc(c); + } + } + uart_putc('='); + uart_puthex(value); + uart_putc('\n'); + uart_putc('\r'); } #endif /* defined(__i386__) || defined(__x86_64__) */ #if !defined(DBGLOG) -#define debug_probe() (false) +#define debug_probe() (false) #define debug_code(c, v) {} #endif -enum -{ - kIOHibernateRestoreCodeImageStart = 'imgS', - kIOHibernateRestoreCodeImageEnd = 'imgE', - kIOHibernateRestoreCodePageIndexStart = 'pgiS', - kIOHibernateRestoreCodePageIndexEnd = 'pgiE', - kIOHibernateRestoreCodeMapStart = 'mapS', - kIOHibernateRestoreCodeMapEnd = 'mapE', - kIOHibernateRestoreCodeWakeMapSize = 'wkms', - kIOHibernateRestoreCodeConflictPage = 'cfpg', - kIOHibernateRestoreCodeConflictSource = 'cfsr', - kIOHibernateRestoreCodeNoMemory = 'nomm', - kIOHibernateRestoreCodeTag = 'tag ', - kIOHibernateRestoreCodeSignature = 'sign', - kIOHibernateRestoreCodeMapVirt = 'mapV', - kIOHibernateRestoreCodeHandoffPages = 'hand', - kIOHibernateRestoreCodeHandoffCount = 'hndc', +enum{ + kIOHibernateRestoreCodeImageStart = 'imgS', + kIOHibernateRestoreCodeImageEnd = 'imgE', + kIOHibernateRestoreCodePageIndexStart = 'pgiS', + kIOHibernateRestoreCodePageIndexEnd = 'pgiE', + kIOHibernateRestoreCodeMapStart = 'mapS', + kIOHibernateRestoreCodeMapEnd = 'mapE', + kIOHibernateRestoreCodeWakeMapSize = 'wkms', + kIOHibernateRestoreCodeConflictPage = 'cfpg', + kIOHibernateRestoreCodeConflictSource = 'cfsr', + kIOHibernateRestoreCodeNoMemory = 'nomm', + kIOHibernateRestoreCodeTag = 'tag ', + kIOHibernateRestoreCodeSignature = 'sign', + kIOHibernateRestoreCodeMapVirt = 'mapV', + kIOHibernateRestoreCodeHandoffPages = 'hand', + kIOHibernateRestoreCodeHandoffCount = 'hndc', }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static void fatal(void) +static void +fatal(void) { #if defined(__i386__) || defined(__x86_64__) - outb(0xcf9, 6); + outb(0xcf9, 6); #else - while (true) {} + while (true) { + } #endif } @@ -234,7 +246,7 @@ static void fatal(void) uint32_t hibernate_sum_page(uint8_t *buf, uint32_t ppnum) { - return (((uint32_t *)buf)[((PAGE_SIZE >> 2) - 1) & ppnum]); + return ((uint32_t *)buf)[((PAGE_SIZE >> 2) - 1) & ppnum]; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -242,474 +254,471 @@ hibernate_sum_page(uint8_t *buf, uint32_t ppnum) static hibernate_bitmap_t * hibernate_page_bitmap(hibernate_page_list_t * list, uint32_t page) { - uint32_t bank; - hibernate_bitmap_t * bitmap = &list->bank_bitmap[0]; - - for (bank = 0; bank < list->bank_count; bank++) - { - if ((page >= bitmap->first_page) && (page <= bitmap->last_page)) - break; - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; - } - if (bank == list->bank_count) - bitmap = NULL; - - return (bitmap); + uint32_t bank; + hibernate_bitmap_t * bitmap = &list->bank_bitmap[0]; + + for (bank = 0; bank < list->bank_count; bank++) { + if ((page >= bitmap->first_page) && (page <= bitmap->last_page)) { + break; + } + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; + } + if (bank == list->bank_count) { + bitmap = NULL; + } + + return bitmap; } hibernate_bitmap_t * hibernate_page_bitmap_pin(hibernate_page_list_t * list, uint32_t * pPage) { - uint32_t bank, page = *pPage; - hibernate_bitmap_t * bitmap = &list->bank_bitmap[0]; - - for (bank = 0; bank < list->bank_count; bank++) - { - if (page <= bitmap->first_page) - { - *pPage = bitmap->first_page; - break; + uint32_t bank, page = *pPage; + hibernate_bitmap_t * bitmap = &list->bank_bitmap[0]; + + for (bank = 0; bank < list->bank_count; bank++) { + if (page <= bitmap->first_page) { + *pPage = bitmap->first_page; + break; + } + if (page <= bitmap->last_page) { + break; + } + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; } - if (page <= bitmap->last_page) - break; - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; - } - if (bank == list->bank_count) - bitmap = NULL; - - return (bitmap); + if (bank == list->bank_count) { + bitmap = NULL; + } + + return bitmap; } -void +void hibernate_page_bitset(hibernate_page_list_t * list, boolean_t set, uint32_t page) { - hibernate_bitmap_t * bitmap; - - bitmap = hibernate_page_bitmap(list, page); - if (bitmap) - { - page -= bitmap->first_page; - if (set) - bitmap->bitmap[page >> 5] |= (0x80000000 >> (page & 31)); - //setbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]); - else - bitmap->bitmap[page >> 5] &= ~(0x80000000 >> (page & 31)); - //clrbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]); - } + hibernate_bitmap_t * bitmap; + + bitmap = hibernate_page_bitmap(list, page); + if (bitmap) { + page -= bitmap->first_page; + if (set) { + bitmap->bitmap[page >> 5] |= (0x80000000 >> (page & 31)); + } + //setbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]); + else { + bitmap->bitmap[page >> 5] &= ~(0x80000000 >> (page & 31)); + } + //clrbit(page - bitmap->first_page, (int *) &bitmap->bitmap[0]); + } } -boolean_t +boolean_t hibernate_page_bittst(hibernate_page_list_t * list, uint32_t page) { - boolean_t result = TRUE; - hibernate_bitmap_t * bitmap; - - bitmap = hibernate_page_bitmap(list, page); - if (bitmap) - { - page -= bitmap->first_page; - result = (0 != (bitmap->bitmap[page >> 5] & (0x80000000 >> (page & 31)))); - } - return (result); + boolean_t result = TRUE; + hibernate_bitmap_t * bitmap; + + bitmap = hibernate_page_bitmap(list, page); + if (bitmap) { + page -= bitmap->first_page; + result = (0 != (bitmap->bitmap[page >> 5] & (0x80000000 >> (page & 31)))); + } + return result; } // count bits clear or set (set == TRUE) starting at page. uint32_t hibernate_page_bitmap_count(hibernate_bitmap_t * bitmap, uint32_t set, uint32_t page) { - uint32_t index, bit, bits; - uint32_t count; - - count = 0; - - index = (page - bitmap->first_page) >> 5; - bit = (page - bitmap->first_page) & 31; - - bits = bitmap->bitmap[index]; - if (set) - bits = ~bits; - bits = (bits << bit); - if (bits) - count += __builtin_clz(bits); - else - { - count += 32 - bit; - while (++index < bitmap->bitmapwords) - { - bits = bitmap->bitmap[index]; - if (set) + uint32_t index, bit, bits; + uint32_t count; + + count = 0; + + index = (page - bitmap->first_page) >> 5; + bit = (page - bitmap->first_page) & 31; + + bits = bitmap->bitmap[index]; + if (set) { bits = ~bits; - if (bits) - { + } + bits = (bits << bit); + if (bits) { count += __builtin_clz(bits); - break; - } - count += 32; + } else { + count += 32 - bit; + while (++index < bitmap->bitmapwords) { + bits = bitmap->bitmap[index]; + if (set) { + bits = ~bits; + } + if (bits) { + count += __builtin_clz(bits); + break; + } + count += 32; + } } - } - if ((page + count) > (bitmap->last_page + 1)) count = (bitmap->last_page + 1) - page; + if ((page + count) > (bitmap->last_page + 1)) { + count = (bitmap->last_page + 1) - page; + } - return (count); + return count; } static ppnum_t hibernate_page_list_grab(hibernate_page_list_t * list, uint32_t * pNextFree) { - uint32_t nextFree = *pNextFree; - uint32_t nextFreeInBank; - hibernate_bitmap_t * bitmap; - - nextFreeInBank = nextFree + 1; - while ((bitmap = hibernate_page_bitmap_pin(list, &nextFreeInBank))) - { - nextFreeInBank += hibernate_page_bitmap_count(bitmap, FALSE, nextFreeInBank); - if (nextFreeInBank <= bitmap->last_page) - { - *pNextFree = nextFreeInBank; - break; + uint32_t nextFree = *pNextFree; + uint32_t nextFreeInBank; + hibernate_bitmap_t * bitmap; + + nextFreeInBank = nextFree + 1; + while ((bitmap = hibernate_page_bitmap_pin(list, &nextFreeInBank))) { + nextFreeInBank += hibernate_page_bitmap_count(bitmap, FALSE, nextFreeInBank); + if (nextFreeInBank <= bitmap->last_page) { + *pNextFree = nextFreeInBank; + break; + } } - } - if (!bitmap) - { - debug_code(kIOHibernateRestoreCodeNoMemory, nextFree); - fatal(); - nextFree = 0; - } + if (!bitmap) { + debug_code(kIOHibernateRestoreCodeNoMemory, nextFree); + fatal(); + nextFree = 0; + } - return (nextFree); + return nextFree; } static uint32_t -store_one_page(uint32_t procFlags, uint32_t * src, uint32_t compressedSize, - uint32_t * buffer, uint32_t ppnum) +store_one_page(uint32_t procFlags, uint32_t * src, uint32_t compressedSize, + uint32_t * buffer, uint32_t ppnum) { uint64_t dst = ptoa_64(ppnum); - uint8_t scratch[WKdm_SCRATCH_BUF_SIZE_INTERNAL] __attribute__ ((aligned (16))); + uint8_t scratch[WKdm_SCRATCH_BUF_SIZE_INTERNAL] __attribute__ ((aligned(16))); - if (compressedSize != PAGE_SIZE) - { + if (compressedSize != PAGE_SIZE) { dst = pal_hib_map(DEST_COPY_AREA, dst); - if (compressedSize != 4) WKdm_decompress_new((WK_word*) src, (WK_word*)(uintptr_t)dst, (WK_word*) &scratch[0], compressedSize); - else - { + if (compressedSize != 4) { + WKdm_decompress_new((WK_word*) src, (WK_word*)(uintptr_t)dst, (WK_word*) &scratch[0], compressedSize); + } else { size_t i; uint32_t s, *d; s = *src; d = (uint32_t *)(uintptr_t)dst; - if (!s) __nosan_bzero((void *) dst, PAGE_SIZE); - else for (i = 0; i < (PAGE_SIZE / sizeof(int32_t)); i++) *d++ = s; + if (!s) { + __nosan_bzero((void *) dst, PAGE_SIZE); + } else { + for (i = 0; i < (PAGE_SIZE / sizeof(int32_t)); i++) { + *d++ = s; + } + } } - } - else - { + } else { dst = hibernate_restore_phys_page((uint64_t) (uintptr_t) src, dst, PAGE_SIZE, procFlags); } return hibernate_sum_page((uint8_t *)(uintptr_t)dst, ppnum); } -long -hibernate_kernel_entrypoint(uint32_t p1, - uint32_t p2, uint32_t p3, uint32_t p4) +long +hibernate_kernel_entrypoint(uint32_t p1, + uint32_t p2, uint32_t p3, uint32_t p4) { - uint64_t headerPhys; - uint64_t mapPhys; - uint64_t srcPhys; - uint64_t imageReadPhys; - uint64_t pageIndexPhys; - uint32_t * pageIndexSource; - hibernate_page_list_t * map; - uint32_t stage; - uint32_t count; - uint32_t ppnum; - uint32_t page; - uint32_t conflictCount; - uint32_t compressedSize; - uint32_t uncompressedPages; - uint32_t copyPageListHeadPage; - uint32_t pageListPage; - uint32_t * copyPageList; - uint32_t * src; - uint32_t copyPageIndex; - uint32_t sum; - uint32_t pageSum; - uint32_t nextFree; - uint32_t lastImagePage; - uint32_t lastMapPage; - uint32_t lastPageIndexPage; - uint32_t handoffPages; - uint32_t handoffPageCount; - - uint64_t timeStart; - timeStart = rdtsc64(); - - static_assert(sizeof(IOHibernateImageHeader) == 512); - - headerPhys = ptoa_64(p1); - - if ((kIOHibernateDebugRestoreLogs & gIOHibernateDebugFlags) && !debug_probe()) - gIOHibernateDebugFlags &= ~kIOHibernateDebugRestoreLogs; - - debug_code(kIOHibernateRestoreCodeImageStart, headerPhys); - - __nosan_memcpy(gIOHibernateCurrentHeader, - (void *) pal_hib_map(IMAGE_AREA, headerPhys), - sizeof(IOHibernateImageHeader)); - - debug_code(kIOHibernateRestoreCodeSignature, gIOHibernateCurrentHeader->signature); - - mapPhys = headerPhys - + (offsetof(IOHibernateImageHeader, fileExtentMap) - + gIOHibernateCurrentHeader->fileExtentMapSize - + ptoa_32(gIOHibernateCurrentHeader->restore1PageCount) - + gIOHibernateCurrentHeader->previewSize); - - map = (hibernate_page_list_t *) pal_hib_map(BITMAP_AREA, mapPhys); - - lastImagePage = atop_64(headerPhys + gIOHibernateCurrentHeader->image1Size); - lastMapPage = atop_64(mapPhys + gIOHibernateCurrentHeader->bitmapSize); - - handoffPages = gIOHibernateCurrentHeader->handoffPages; - handoffPageCount = gIOHibernateCurrentHeader->handoffPageCount; - - debug_code(kIOHibernateRestoreCodeImageEnd, ptoa_64(lastImagePage)); - debug_code(kIOHibernateRestoreCodeMapStart, mapPhys); - debug_code(kIOHibernateRestoreCodeMapEnd, ptoa_64(lastMapPage)); - - debug_code(kIOHibernateRestoreCodeMapVirt, (uintptr_t) map); - debug_code(kIOHibernateRestoreCodeHandoffPages, ptoa_64(handoffPages)); - debug_code(kIOHibernateRestoreCodeHandoffCount, handoffPageCount); - - // knock all the image pages to be used out of free map - for (ppnum = atop_64(headerPhys); ppnum <= lastImagePage; ppnum++) - { - hibernate_page_bitset(map, FALSE, ppnum); - } - // knock all the handoff pages to be used out of free map - for (ppnum = handoffPages; ppnum < (handoffPages + handoffPageCount); ppnum++) - { - hibernate_page_bitset(map, FALSE, ppnum); - } - - nextFree = 0; - hibernate_page_list_grab(map, &nextFree); - - sum = gIOHibernateCurrentHeader->actualRestore1Sum; - gIOHibernateCurrentHeader->diag[0] = atop_64(headerPhys); - gIOHibernateCurrentHeader->diag[1] = sum; - gIOHibernateCurrentHeader->trampolineTime = 0; - - uncompressedPages = 0; - conflictCount = 0; - copyPageListHeadPage = 0; - copyPageList = 0; - copyPageIndex = PAGE_SIZE >> 2; - - compressedSize = PAGE_SIZE; - stage = 2; - count = 0; - srcPhys = 0; - - if (gIOHibernateCurrentHeader->previewSize) - { - pageIndexPhys = headerPhys - + (offsetof(IOHibernateImageHeader, fileExtentMap) - + gIOHibernateCurrentHeader->fileExtentMapSize - + ptoa_32(gIOHibernateCurrentHeader->restore1PageCount)); - imageReadPhys = (pageIndexPhys + gIOHibernateCurrentHeader->previewPageListSize); - lastPageIndexPage = atop_64(imageReadPhys); - pageIndexSource = (uint32_t *) pal_hib_map(IMAGE2_AREA, pageIndexPhys); - } - else - { - pageIndexPhys = 0; - lastPageIndexPage = 0; - imageReadPhys = (mapPhys + gIOHibernateCurrentHeader->bitmapSize); - } - - debug_code(kIOHibernateRestoreCodePageIndexStart, pageIndexPhys); - debug_code(kIOHibernateRestoreCodePageIndexEnd, ptoa_64(lastPageIndexPage)); - - while (1) - { - switch (stage) - { - case 2: - // copy handoff data - count = srcPhys ? 0 : handoffPageCount; - if (!count) - break; - if (count > gIOHibernateHandoffPageCount) count = gIOHibernateHandoffPageCount; - srcPhys = ptoa_64(handoffPages); - break; - - case 1: - // copy pageIndexSource pages == preview image data - if (!srcPhys) - { - if (!pageIndexPhys) break; - srcPhys = imageReadPhys; - } - ppnum = pageIndexSource[0]; - count = pageIndexSource[1]; - pageIndexSource += 2; - pageIndexPhys += 2 * sizeof(pageIndexSource[0]); - imageReadPhys = srcPhys; - break; - - case 0: - // copy pages - if (!srcPhys) srcPhys = (mapPhys + gIOHibernateCurrentHeader->bitmapSize); - src = (uint32_t *) pal_hib_map(IMAGE_AREA, srcPhys); - ppnum = src[0]; - count = src[1]; - srcPhys += 2 * sizeof(*src); - imageReadPhys = srcPhys; - break; + uint64_t headerPhys; + uint64_t mapPhys; + uint64_t srcPhys; + uint64_t imageReadPhys; + uint64_t pageIndexPhys; + uint32_t * pageIndexSource; + hibernate_page_list_t * map; + uint32_t stage; + uint32_t count; + uint32_t ppnum; + uint32_t page; + uint32_t conflictCount; + uint32_t compressedSize; + uint32_t uncompressedPages; + uint32_t copyPageListHeadPage; + uint32_t pageListPage; + uint32_t * copyPageList; + uint32_t * src; + uint32_t copyPageIndex; + uint32_t sum; + uint32_t pageSum; + uint32_t nextFree; + uint32_t lastImagePage; + uint32_t lastMapPage; + uint32_t lastPageIndexPage; + uint32_t handoffPages; + uint32_t handoffPageCount; + + uint64_t timeStart; + timeStart = rdtsc64(); + + static_assert(sizeof(IOHibernateImageHeader) == 512); + + headerPhys = ptoa_64(p1); + + if ((kIOHibernateDebugRestoreLogs & gIOHibernateDebugFlags) && !debug_probe()) { + gIOHibernateDebugFlags &= ~kIOHibernateDebugRestoreLogs; } + debug_code(kIOHibernateRestoreCodeImageStart, headerPhys); + + __nosan_memcpy(gIOHibernateCurrentHeader, + (void *) pal_hib_map(IMAGE_AREA, headerPhys), + sizeof(IOHibernateImageHeader)); + + debug_code(kIOHibernateRestoreCodeSignature, gIOHibernateCurrentHeader->signature); + + mapPhys = headerPhys + + (offsetof(IOHibernateImageHeader, fileExtentMap) + + gIOHibernateCurrentHeader->fileExtentMapSize + + ptoa_32(gIOHibernateCurrentHeader->restore1PageCount) + + gIOHibernateCurrentHeader->previewSize); + + map = (hibernate_page_list_t *) pal_hib_map(BITMAP_AREA, mapPhys); + + lastImagePage = atop_64(headerPhys + gIOHibernateCurrentHeader->image1Size); + lastMapPage = atop_64(mapPhys + gIOHibernateCurrentHeader->bitmapSize); + + handoffPages = gIOHibernateCurrentHeader->handoffPages; + handoffPageCount = gIOHibernateCurrentHeader->handoffPageCount; + + debug_code(kIOHibernateRestoreCodeImageEnd, ptoa_64(lastImagePage)); + debug_code(kIOHibernateRestoreCodeMapStart, mapPhys); + debug_code(kIOHibernateRestoreCodeMapEnd, ptoa_64(lastMapPage)); - if (!count) - { - if (!stage) - break; - stage--; - srcPhys = 0; - continue; + debug_code(kIOHibernateRestoreCodeMapVirt, (uintptr_t) map); + debug_code(kIOHibernateRestoreCodeHandoffPages, ptoa_64(handoffPages)); + debug_code(kIOHibernateRestoreCodeHandoffCount, handoffPageCount); + + // knock all the image pages to be used out of free map + for (ppnum = atop_64(headerPhys); ppnum <= lastImagePage; ppnum++) { + hibernate_page_bitset(map, FALSE, ppnum); + } + // knock all the handoff pages to be used out of free map + for (ppnum = handoffPages; ppnum < (handoffPages + handoffPageCount); ppnum++) { + hibernate_page_bitset(map, FALSE, ppnum); + } + + nextFree = 0; + hibernate_page_list_grab(map, &nextFree); + + sum = gIOHibernateCurrentHeader->actualRestore1Sum; + gIOHibernateCurrentHeader->diag[0] = atop_64(headerPhys); + gIOHibernateCurrentHeader->diag[1] = sum; + gIOHibernateCurrentHeader->trampolineTime = 0; + + uncompressedPages = 0; + conflictCount = 0; + copyPageListHeadPage = 0; + copyPageList = 0; + copyPageIndex = PAGE_SIZE >> 2; + + compressedSize = PAGE_SIZE; + stage = 2; + count = 0; + srcPhys = 0; + + if (gIOHibernateCurrentHeader->previewSize) { + pageIndexPhys = headerPhys + + (offsetof(IOHibernateImageHeader, fileExtentMap) + + gIOHibernateCurrentHeader->fileExtentMapSize + + ptoa_32(gIOHibernateCurrentHeader->restore1PageCount)); + imageReadPhys = (pageIndexPhys + gIOHibernateCurrentHeader->previewPageListSize); + lastPageIndexPage = atop_64(imageReadPhys); + pageIndexSource = (uint32_t *) pal_hib_map(IMAGE2_AREA, pageIndexPhys); + } else { + pageIndexPhys = 0; + lastPageIndexPage = 0; + imageReadPhys = (mapPhys + gIOHibernateCurrentHeader->bitmapSize); } - for (page = 0; page < count; page++, ppnum++) - { - uint32_t tag; - int conflicts; + debug_code(kIOHibernateRestoreCodePageIndexStart, pageIndexPhys); + debug_code(kIOHibernateRestoreCodePageIndexEnd, ptoa_64(lastPageIndexPage)); - src = (uint32_t *) pal_hib_map(IMAGE_AREA, srcPhys); + while (1) { + switch (stage) { + case 2: + // copy handoff data + count = srcPhys ? 0 : handoffPageCount; + if (!count) { + break; + } + if (count > gIOHibernateHandoffPageCount) { + count = gIOHibernateHandoffPageCount; + } + srcPhys = ptoa_64(handoffPages); + break; - if (2 == stage) ppnum = gIOHibernateHandoffPages[page]; - else if (!stage) - { - tag = *src++; + case 1: + // copy pageIndexSource pages == preview image data + if (!srcPhys) { + if (!pageIndexPhys) { + break; + } + srcPhys = imageReadPhys; + } + ppnum = pageIndexSource[0]; + count = pageIndexSource[1]; + pageIndexSource += 2; + pageIndexPhys += 2 * sizeof(pageIndexSource[0]); + imageReadPhys = srcPhys; + break; + + case 0: + // copy pages + if (!srcPhys) { + srcPhys = (mapPhys + gIOHibernateCurrentHeader->bitmapSize); + } + src = (uint32_t *) pal_hib_map(IMAGE_AREA, srcPhys); + ppnum = src[0]; + count = src[1]; + srcPhys += 2 * sizeof(*src); + imageReadPhys = srcPhys; + break; + } + + + if (!count) { + if (!stage) { + break; + } + stage--; + srcPhys = 0; + continue; + } + + for (page = 0; page < count; page++, ppnum++) { + uint32_t tag; + int conflicts; + + src = (uint32_t *) pal_hib_map(IMAGE_AREA, srcPhys); + + if (2 == stage) { + ppnum = gIOHibernateHandoffPages[page]; + } else if (!stage) { + tag = *src++; // debug_code(kIOHibernateRestoreCodeTag, (uintptr_t) tag); - srcPhys += sizeof(*src); - compressedSize = kIOHibernateTagLength & tag; - } + srcPhys += sizeof(*src); + compressedSize = kIOHibernateTagLength & tag; + } - conflicts = (ppnum >= atop_64(mapPhys)) && (ppnum <= lastMapPage); + conflicts = (ppnum >= atop_64(mapPhys)) && (ppnum <= lastMapPage); - conflicts |= ((ppnum >= atop_64(imageReadPhys)) && (ppnum <= lastImagePage)); + conflicts |= ((ppnum >= atop_64(imageReadPhys)) && (ppnum <= lastImagePage)); - if (stage >= 2) - conflicts |= ((ppnum >= atop_64(srcPhys)) && (ppnum <= (handoffPages + handoffPageCount - 1))); + if (stage >= 2) { + conflicts |= ((ppnum >= atop_64(srcPhys)) && (ppnum <= (handoffPages + handoffPageCount - 1))); + } - if (stage >= 1) - conflicts |= ((ppnum >= atop_64(pageIndexPhys)) && (ppnum <= lastPageIndexPage)); + if (stage >= 1) { + conflicts |= ((ppnum >= atop_64(pageIndexPhys)) && (ppnum <= lastPageIndexPage)); + } - if (!conflicts) - { - pageSum = store_one_page(gIOHibernateCurrentHeader->processorFlags, - src, compressedSize, 0, ppnum); - if (stage != 2) - sum += pageSum; - uncompressedPages++; - } - else - { - uint32_t bufferPage = 0; - uint32_t * dst; + if (!conflicts) { + pageSum = store_one_page(gIOHibernateCurrentHeader->processorFlags, + src, compressedSize, 0, ppnum); + if (stage != 2) { + sum += pageSum; + } + uncompressedPages++; + } else { + uint32_t bufferPage = 0; + uint32_t * dst; // debug_code(kIOHibernateRestoreCodeConflictPage, ppnum); // debug_code(kIOHibernateRestoreCodeConflictSource, (uintptr_t) src); - conflictCount++; - if (compressedSize) - { - // alloc new buffer page - bufferPage = hibernate_page_list_grab(map, &nextFree); - dst = (uint32_t *)pal_hib_map(DEST_COPY_AREA, ptoa_64(bufferPage)); - __nosan_memcpy(dst, src, compressedSize); - } - if (copyPageIndex > ((PAGE_SIZE >> 2) - 3)) - { - // alloc new copy list page - pageListPage = hibernate_page_list_grab(map, &nextFree); - // link to current - if (copyPageList) { - copyPageList[1] = pageListPage; - } else { - copyPageListHeadPage = pageListPage; - } - copyPageList = (uint32_t *)pal_hib_map(SRC_COPY_AREA, - ptoa_64(pageListPage)); - copyPageList[1] = 0; - copyPageIndex = 2; + conflictCount++; + if (compressedSize) { + // alloc new buffer page + bufferPage = hibernate_page_list_grab(map, &nextFree); + dst = (uint32_t *)pal_hib_map(DEST_COPY_AREA, ptoa_64(bufferPage)); + __nosan_memcpy(dst, src, compressedSize); + } + if (copyPageIndex > ((PAGE_SIZE >> 2) - 3)) { + // alloc new copy list page + pageListPage = hibernate_page_list_grab(map, &nextFree); + // link to current + if (copyPageList) { + copyPageList[1] = pageListPage; + } else { + copyPageListHeadPage = pageListPage; + } + copyPageList = (uint32_t *)pal_hib_map(SRC_COPY_AREA, + ptoa_64(pageListPage)); + copyPageList[1] = 0; + copyPageIndex = 2; + } + copyPageList[copyPageIndex++] = ppnum; + copyPageList[copyPageIndex++] = bufferPage; + copyPageList[copyPageIndex++] = (compressedSize | (stage << 24)); + copyPageList[0] = copyPageIndex; + } + srcPhys += ((compressedSize + 3) & ~3); + src += ((compressedSize + 3) >> 2); } - copyPageList[copyPageIndex++] = ppnum; - copyPageList[copyPageIndex++] = bufferPage; - copyPageList[copyPageIndex++] = (compressedSize | (stage << 24)); - copyPageList[0] = copyPageIndex; - } - srcPhys += ((compressedSize + 3) & ~3); - src += ((compressedSize + 3) >> 2); } - } - - /* src points to the last page restored, so we need to skip over that */ - hibernateRestorePALState(src); - - // -- copy back conflicts - - pageListPage = copyPageListHeadPage; - while (pageListPage) - { - copyPageList = (uint32_t *)pal_hib_map(COPY_PAGE_AREA, ptoa_64(pageListPage)); - for (copyPageIndex = 2; copyPageIndex < copyPageList[0]; copyPageIndex += 3) - { - ppnum = copyPageList[copyPageIndex + 0]; - srcPhys = ptoa_64(copyPageList[copyPageIndex + 1]); - src = (uint32_t *) pal_hib_map(SRC_COPY_AREA, srcPhys); - compressedSize = copyPageList[copyPageIndex + 2]; - stage = compressedSize >> 24; - compressedSize &= 0x1FFF; - pageSum = store_one_page(gIOHibernateCurrentHeader->processorFlags, - src, compressedSize, 0, ppnum); - if (stage != 2) - sum += pageSum; - uncompressedPages++; + + /* src points to the last page restored, so we need to skip over that */ + hibernateRestorePALState(src); + + // -- copy back conflicts + + pageListPage = copyPageListHeadPage; + while (pageListPage) { + copyPageList = (uint32_t *)pal_hib_map(COPY_PAGE_AREA, ptoa_64(pageListPage)); + for (copyPageIndex = 2; copyPageIndex < copyPageList[0]; copyPageIndex += 3) { + ppnum = copyPageList[copyPageIndex + 0]; + srcPhys = ptoa_64(copyPageList[copyPageIndex + 1]); + src = (uint32_t *) pal_hib_map(SRC_COPY_AREA, srcPhys); + compressedSize = copyPageList[copyPageIndex + 2]; + stage = compressedSize >> 24; + compressedSize &= 0x1FFF; + pageSum = store_one_page(gIOHibernateCurrentHeader->processorFlags, + src, compressedSize, 0, ppnum); + if (stage != 2) { + sum += pageSum; + } + uncompressedPages++; + } + pageListPage = copyPageList[1]; } - pageListPage = copyPageList[1]; - } - pal_hib_patchup(); + pal_hib_patchup(); - // -- image has been destroyed... + // -- image has been destroyed... - gIOHibernateCurrentHeader->actualImage1Sum = sum; - gIOHibernateCurrentHeader->actualUncompressedPages = uncompressedPages; - gIOHibernateCurrentHeader->conflictCount = conflictCount; - gIOHibernateCurrentHeader->nextFree = nextFree; + gIOHibernateCurrentHeader->actualImage1Sum = sum; + gIOHibernateCurrentHeader->actualUncompressedPages = uncompressedPages; + gIOHibernateCurrentHeader->conflictCount = conflictCount; + gIOHibernateCurrentHeader->nextFree = nextFree; - gIOHibernateState = kIOHibernateStateWakingFromHibernate; + gIOHibernateState = kIOHibernateStateWakingFromHibernate; - gIOHibernateCurrentHeader->trampolineTime = (((rdtsc64() - timeStart)) >> 8); + gIOHibernateCurrentHeader->trampolineTime = (((rdtsc64() - timeStart)) >> 8); // debug_code('done', 0); #if CONFIG_SLEEP #if defined(__i386__) || defined(__x86_64__) - typedef void (*ResetProc)(void); - ResetProc proc; - proc = HIB_ENTRYPOINT; - // flush caches - __asm__("wbinvd"); - proc(); + typedef void (*ResetProc)(void); + ResetProc proc; + proc = HIB_ENTRYPOINT; + // flush caches + __asm__("wbinvd"); + proc(); #else // implement me #endif #endif - return -1; + return -1; } #if CONFIG_DEBUG @@ -758,13 +767,14 @@ static size_t hibstrlen(const char *s) { size_t l = 0; - while (*s++) + while (*s++) { l++; + } return l; } /* Max number conversion buffer length: a u_quad_t in base 2, plus NUL byte. */ -#define MAXNBUF (sizeof(intmax_t) * NBBY + 1) +#define MAXNBUF (sizeof(intmax_t) * NBBY + 1) /* * Put a NUL-terminated ASCII number (base <= 36) in a buffer in reverse @@ -790,9 +800,10 @@ ksprintn(char *nbuf, uintmax_t num, int base, int *lenp, int upper) c = hibhex2ascii(num2 % base); *++p = upper ? toupper(c) : c; } while (num2 /= base); - if (lenp) + if (lenp) { *lenp = (int)(p - nbuf); - return (p); + } + return p; } /* @@ -838,30 +849,34 @@ hibkvprintf(char const *fmt, void (*func)(int, void*), void *arg, int radix, va_ int stop = 0, retval = 0; num = 0; - if (!func) + if (!func) { d = (char *) arg; - else + } else { d = NULL; + } - if (fmt == NULL) + if (fmt == NULL) { fmt = "(fmt null)\n"; + } - if (radix < 2 || radix > 36) + if (radix < 2 || radix > 36) { radix = 10; + } for (;;) { padc = ' '; width = 0; - while ((ch = (u_char)*fmt++) != '%' || stop) { - if (ch == '\0') - return (retval); + while ((ch = (u_char) * fmt++) != '%' || stop) { + if (ch == '\0') { + return retval; + } PCHAR(ch); } percent = fmt - 1; qflag = 0; lflag = 0; ladjust = 0; sharpflag = 0; neg = 0; sign = 0; dot = 0; dwidth = 0; upper = 0; cflag = 0; hflag = 0; jflag = 0; tflag = 0; zflag = 0; -reswitch: switch (ch = (u_char)*fmt++) { +reswitch: switch (ch = (u_char) * fmt++) { case '.': dot = 1; goto reswitch; @@ -895,39 +910,47 @@ reswitch: switch (ch = (u_char)*fmt++) { } case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': - for (n = 0;; ++fmt) { - n = n * 10 + ch - '0'; - ch = *fmt; - if (ch < '0' || ch > '9') - break; + for (n = 0;; ++fmt) { + n = n * 10 + ch - '0'; + ch = *fmt; + if (ch < '0' || ch > '9') { + break; } - if (dot) + } + if (dot) { dwidth = n; - else + } else { width = n; + } goto reswitch; case 'b': num = (u_int)va_arg(ap, int); p = va_arg(ap, char *); - for (q = ksprintn(nbuf, num, *p++, NULL, 0); *q;) + for (q = ksprintn(nbuf, num, *p++, NULL, 0); *q;) { PCHAR(*q--); + } - if (num == 0) + if (num == 0) { break; + } for (tmp = 0; *p;) { n = *p++; if (num & (1 << (n - 1))) { PCHAR(tmp ? ',' : '<'); - for (; (n = *p) > ' '; ++p) + for (; (n = *p) > ' '; ++p) { PCHAR(n); + } tmp = 1; - } else - for (; *p > ' '; ++p) + } else { + for (; *p > ' '; ++p) { continue; + } + } } - if (tmp) + if (tmp) { PCHAR('>'); + } break; case 'c': PCHAR(va_arg(ap, int)); @@ -935,15 +958,18 @@ reswitch: switch (ch = (u_char)*fmt++) { case 'D': up = va_arg(ap, u_char *); p = va_arg(ap, char *); - if (!width) + if (!width) { width = 16; - while(width--) { + } + while (width--) { PCHAR(hibhex2ascii(*up >> 4)); PCHAR(hibhex2ascii(*up & 0x0f)); up++; - if (width) - for (q=p;*q;q++) + if (width) { + for (q = p; *q; q++) { PCHAR(*q); + } + } } break; case 'd': @@ -955,8 +981,9 @@ reswitch: switch (ch = (u_char)*fmt++) { if (hflag) { hflag = 0; cflag = 1; - } else + } else { hflag = 1; + } goto reswitch; case 'j': jflag = 1; @@ -965,24 +992,26 @@ reswitch: switch (ch = (u_char)*fmt++) { if (lflag) { lflag = 0; qflag = 1; - } else + } else { lflag = 1; + } goto reswitch; case 'n': - if (jflag) + if (jflag) { *(va_arg(ap, intmax_t *)) = retval; - else if (qflag) + } else if (qflag) { *(va_arg(ap, quad_t *)) = retval; - else if (lflag) + } else if (lflag) { *(va_arg(ap, long *)) = retval; - else if (zflag) + } else if (zflag) { *(va_arg(ap, size_t *)) = retval; - else if (hflag) + } else if (hflag) { *(va_arg(ap, short *)) = retval; - else if (cflag) + } else if (cflag) { *(va_arg(ap, char *)) = retval; - else + } else { *(va_arg(ap, int *)) = retval; + } break; case 'o': base = 8; @@ -998,29 +1027,38 @@ reswitch: switch (ch = (u_char)*fmt++) { goto reswitch; case 'r': base = radix; - if (sign) + if (sign) { goto handle_sign; + } goto handle_nosign; case 's': p = va_arg(ap, char *); - if (p == NULL) + if (p == NULL) { p = "(null)"; - if (!dot) - n = (typeof(n))hibstrlen (p); - else - for (n = 0; n < dwidth && p[n]; n++) + } + if (!dot) { + n = (typeof(n))hibstrlen(p); + } else { + for (n = 0; n < dwidth && p[n]; n++) { continue; + } + } width -= n; - if (!ladjust && width > 0) - while (width--) + if (!ladjust && width > 0) { + while (width--) { PCHAR(padc); - while (n--) + } + } + while (n--) { PCHAR(*p++); - if (ladjust && width > 0) - while (width--) + } + if (ladjust && width > 0) { + while (width--) { PCHAR(padc); + } + } break; case 't': tflag = 1; @@ -1042,40 +1080,42 @@ reswitch: switch (ch = (u_char)*fmt++) { goto reswitch; handle_nosign: sign = 0; - if (jflag) + if (jflag) { num = va_arg(ap, uintmax_t); - else if (qflag) + } else if (qflag) { num = va_arg(ap, u_quad_t); - else if (tflag) + } else if (tflag) { num = va_arg(ap, ptrdiff_t); - else if (lflag) + } else if (lflag) { num = va_arg(ap, u_long); - else if (zflag) + } else if (zflag) { num = va_arg(ap, size_t); - else if (hflag) + } else if (hflag) { num = (u_short)va_arg(ap, int); - else if (cflag) + } else if (cflag) { num = (u_char)va_arg(ap, int); - else + } else { num = va_arg(ap, u_int); + } goto number; handle_sign: - if (jflag) + if (jflag) { num = va_arg(ap, intmax_t); - else if (qflag) + } else if (qflag) { num = va_arg(ap, quad_t); - else if (tflag) + } else if (tflag) { num = va_arg(ap, ptrdiff_t); - else if (lflag) + } else if (lflag) { num = va_arg(ap, long); - else if (zflag) + } else if (zflag) { num = va_arg(ap, ssize_t); - else if (hflag) + } else if (hflag) { num = (short)va_arg(ap, int); - else if (cflag) + } else if (cflag) { num = (char)va_arg(ap, int); - else + } else { num = va_arg(ap, int); + } number: if (sign && (intmax_t)num < 0) { neg = 1; @@ -1083,20 +1123,25 @@ number: } p = ksprintn(nbuf, num, base, &tmp, upper); if (sharpflag && num != 0) { - if (base == 8) + if (base == 8) { tmp++; - else if (base == 16) + } else if (base == 16) { tmp += 2; + } } - if (neg) + if (neg) { tmp++; + } if (!ladjust && padc != '0' && width - && (width -= tmp) > 0) - while (width--) + && (width -= tmp) > 0) { + while (width--) { PCHAR(padc); - if (neg) + } + } + if (neg) { PCHAR('-'); + } if (sharpflag && num != 0) { if (base == 8) { PCHAR('0'); @@ -1105,21 +1150,27 @@ number: PCHAR('x'); } } - if (!ladjust && width && (width -= tmp) > 0) - while (width--) + if (!ladjust && width && (width -= tmp) > 0) { + while (width--) { PCHAR(padc); + } + } - while (*p) + while (*p) { PCHAR(*p--); + } - if (ladjust && width && (width -= tmp) > 0) - while (width--) + if (ladjust && width && (width -= tmp) > 0) { + while (width--) { PCHAR(padc); + } + } break; default: - while (percent < fmt) + while (percent < fmt) { PCHAR(*percent++); + } /* * Since we ignore an formatting argument it is no * longer safe to obey the remaining formatting @@ -1152,4 +1203,3 @@ hibprintf(const char *fmt, ...) va_end(ap); } #endif /* CONFIG_DEBUG */ - diff --git a/iokit/Kernel/IOHistogramReporter.cpp b/iokit/Kernel/IOHistogramReporter.cpp index 4c288ccdb..efbd6de16 100644 --- a/iokit/Kernel/IOHistogramReporter.cpp +++ b/iokit/Kernel/IOHistogramReporter.cpp @@ -1,8 +1,8 @@ /* * Copyright (c) 2012-2013 Apple Computer, Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,340 +40,354 @@ OSDefineMetaClassAndStructors(IOHistogramReporter, IOReporter); /* static */ IOHistogramReporter* IOHistogramReporter::with(IOService *reportingService, - IOReportCategories categories, - uint64_t channelID, - const char *channelName, - IOReportUnit unit, - int nSegments, - IOHistogramSegmentConfig *config) + IOReportCategories categories, + uint64_t channelID, + const char *channelName, + IOReportUnit unit, + int nSegments, + IOHistogramSegmentConfig *config) { - IOHistogramReporter *reporter = new IOHistogramReporter; - - const OSSymbol *tmpChannelName = NULL; - - if (reporter) { - - if (channelName) - tmpChannelName = OSSymbol::withCString(channelName); - - if(reporter->initWith(reportingService, categories, - channelID, tmpChannelName, - unit, nSegments, config)) { - return reporter; - } - } - OSSafeReleaseNULL(reporter); - OSSafeReleaseNULL(tmpChannelName); - - return 0; + IOHistogramReporter *reporter = new IOHistogramReporter; + + const OSSymbol *tmpChannelName = NULL; + + if (reporter) { + if (channelName) { + tmpChannelName = OSSymbol::withCString(channelName); + } + + if (reporter->initWith(reportingService, categories, + channelID, tmpChannelName, + unit, nSegments, config)) { + return reporter; + } + } + OSSafeReleaseNULL(reporter); + OSSafeReleaseNULL(tmpChannelName); + + return 0; } bool IOHistogramReporter::initWith(IOService *reportingService, - IOReportCategories categories, - uint64_t channelID, - const OSSymbol *channelName, - IOReportUnit unit, - int nSegments, - IOHistogramSegmentConfig *config) + IOReportCategories categories, + uint64_t channelID, + const OSSymbol *channelName, + IOReportUnit unit, + int nSegments, + IOHistogramSegmentConfig *config) { - bool result = false; - IOReturn res; // for PREFL_MEMOP - size_t configSize, elementsSize, eCountsSize, boundsSize; - int cnt, cnt2, cnt3 = 0; - int64_t bucketBound = 0, previousBucketBound = 0; - - // analyzer appeasement - configSize = elementsSize = eCountsSize = boundsSize = 0; - - IORLOG("IOHistogramReporter::initWith"); - - // For now, this reporter is currently limited to a single channel - _nChannels = 1; - - IOReportChannelType channelType = { - .categories = categories, - .report_format = kIOReportFormatHistogram, - .nelements = 0, // Initialized when Config is unpacked - .element_idx = 0 - }; - - if (super::init(reportingService, channelType, unit) != true) { - IORLOG("%s - ERROR: super::init failed", __func__); - result = false; - goto finish; - } - - // Make sure to call this after the commit init phase - if (channelName) _channelNames->setObject(channelName); - - _segmentCount = nSegments; - if (_segmentCount == 0) { - IORLOG("IOReportHistogram init ERROR. No configuration provided!"); - result = false; - goto finish; - } - - IORLOG("%s - %u segment(s)", __func__, _segmentCount); - - PREFL_MEMOP_FAIL(_segmentCount, IOHistogramSegmentConfig); - configSize = (size_t)_segmentCount * sizeof(IOHistogramSegmentConfig); - _histogramSegmentsConfig = (IOHistogramSegmentConfig*)IOMalloc(configSize); - if (!_histogramSegmentsConfig) goto finish; - memcpy(_histogramSegmentsConfig, config, configSize); - - // Find out how many elements are need to store the histogram - for (cnt = 0; cnt < _segmentCount; cnt++) { - - _nElements += _histogramSegmentsConfig[cnt].segment_bucket_count; - _channelDimension += _histogramSegmentsConfig[cnt].segment_bucket_count; - - IORLOG("\t\t bucket_base_width: %u | log_scale: %u | buckets: %u", - _histogramSegmentsConfig[cnt].base_bucket_width, - _histogramSegmentsConfig[cnt].scale_flag, - _histogramSegmentsConfig[cnt].segment_bucket_count); - - if (_histogramSegmentsConfig[cnt].scale_flag > 1 - || _histogramSegmentsConfig[cnt].base_bucket_width == 0) { - result = false; - goto finish; - } - - } - - // Update the channel type with discovered dimension - _channelType.nelements = _channelDimension; - - IORLOG("%s - %u channel(s) of dimension %u", - __func__, _nChannels, _channelDimension); - - IORLOG("%s %d segments for a total dimension of %d elements", - __func__, _nChannels, _nElements); - - // Allocate memory for the array of report elements - PREFL_MEMOP_FAIL(_nElements, IOReportElement); - elementsSize = (size_t)_nElements * sizeof(IOReportElement); - _elements = (IOReportElement *)IOMalloc(elementsSize); - if (!_elements) goto finish; - memset(_elements, 0, elementsSize); - - // Allocate memory for the array of element watch count - PREFL_MEMOP_FAIL(_nElements, int); - eCountsSize = (size_t)_nChannels * sizeof(int); - _enableCounts = (int *)IOMalloc(eCountsSize); - if (!_enableCounts) goto finish; - memset(_enableCounts, 0, eCountsSize); - - lockReporter(); - for (cnt2 = 0; cnt2 < _channelDimension; cnt2++) { - IOHistogramReportValues hist_values; - if (copyElementValues(cnt2, (IOReportElementValues*)&hist_values)){ - goto finish; - } - hist_values.bucket_min = kIOReportInvalidIntValue; - hist_values.bucket_max = kIOReportInvalidIntValue; - hist_values.bucket_sum = kIOReportInvalidIntValue; - if (setElementValues(cnt2, (IOReportElementValues*)&hist_values)){ - goto finish; - } - - // Setup IOReporter's channel IDs - _elements[cnt2].channel_id = channelID; - - // Setup IOReporter's reporting provider service - _elements[cnt2].provider_id = _driver_id; - - // Setup IOReporter's channel type - _elements[cnt2].channel_type = _channelType; - _elements[cnt2].channel_type.element_idx = cnt2; - - //IOREPORTER_DEBUG_ELEMENT(cnt2); - } - unlockReporter(); - - // Allocate memory for the bucket upper bounds - PREFL_MEMOP_FAIL(_nElements, uint64_t); - boundsSize = (size_t)_nElements * sizeof(uint64_t); - _bucketBounds = (int64_t*)IOMalloc(boundsSize); - if (!_bucketBounds) goto finish; - memset(_bucketBounds, 0, boundsSize); - _bucketCount = _nElements; - - for (cnt = 0; cnt < _segmentCount; cnt++) { - - if (_histogramSegmentsConfig[cnt].segment_bucket_count > INT_MAX - || _histogramSegmentsConfig[cnt].base_bucket_width > INT_MAX) { - goto finish; - } - for (cnt2 = 0; cnt2 < (int)_histogramSegmentsConfig[cnt].segment_bucket_count; cnt2++) { - - if (cnt3 >= _nElements) { - IORLOG("ERROR: _bucketBounds init"); - result = false; - goto finish; - } - - if (_histogramSegmentsConfig[cnt].scale_flag) { - // FIXME: Could use pow() but not sure how to include math.h - int64_t power = 1; - int exponent = cnt2 + 1; - while (exponent) { - power *= _histogramSegmentsConfig[cnt].base_bucket_width; - exponent--; - } - bucketBound = power; - } - - else { - bucketBound = _histogramSegmentsConfig[cnt].base_bucket_width * - ((unsigned)cnt2 + 1); - } - - if (previousBucketBound >= bucketBound) { - IORLOG("Histogram ERROR: bucket bound does not increase linearly (segment %u / bucket # %u)", - cnt, cnt2); - result = false; - goto finish; - } - - _bucketBounds[cnt3] = bucketBound; - // IORLOG("_bucketBounds[%u] = %llu", cnt3, bucketBound); - previousBucketBound = _bucketBounds[cnt3]; - cnt3++; - } - } - - // success - result = true; - + bool result = false; + IOReturn res; // for PREFL_MEMOP + size_t configSize, elementsSize, eCountsSize, boundsSize; + int cnt, cnt2, cnt3 = 0; + int64_t bucketBound = 0, previousBucketBound = 0; + + // analyzer appeasement + configSize = elementsSize = eCountsSize = boundsSize = 0; + + IORLOG("IOHistogramReporter::initWith"); + + // For now, this reporter is currently limited to a single channel + _nChannels = 1; + + IOReportChannelType channelType = { + .categories = categories, + .report_format = kIOReportFormatHistogram, + .nelements = 0, // Initialized when Config is unpacked + .element_idx = 0 + }; + + if (super::init(reportingService, channelType, unit) != true) { + IORLOG("%s - ERROR: super::init failed", __func__); + result = false; + goto finish; + } + + // Make sure to call this after the commit init phase + if (channelName) { + _channelNames->setObject(channelName); + } + + _segmentCount = nSegments; + if (_segmentCount == 0) { + IORLOG("IOReportHistogram init ERROR. No configuration provided!"); + result = false; + goto finish; + } + + IORLOG("%s - %u segment(s)", __func__, _segmentCount); + + PREFL_MEMOP_FAIL(_segmentCount, IOHistogramSegmentConfig); + configSize = (size_t)_segmentCount * sizeof(IOHistogramSegmentConfig); + _histogramSegmentsConfig = (IOHistogramSegmentConfig*)IOMalloc(configSize); + if (!_histogramSegmentsConfig) { + goto finish; + } + memcpy(_histogramSegmentsConfig, config, configSize); + + // Find out how many elements are need to store the histogram + for (cnt = 0; cnt < _segmentCount; cnt++) { + _nElements += _histogramSegmentsConfig[cnt].segment_bucket_count; + _channelDimension += _histogramSegmentsConfig[cnt].segment_bucket_count; + + IORLOG("\t\t bucket_base_width: %u | log_scale: %u | buckets: %u", + _histogramSegmentsConfig[cnt].base_bucket_width, + _histogramSegmentsConfig[cnt].scale_flag, + _histogramSegmentsConfig[cnt].segment_bucket_count); + + if (_histogramSegmentsConfig[cnt].scale_flag > 1 + || _histogramSegmentsConfig[cnt].base_bucket_width == 0) { + result = false; + goto finish; + } + } + + // Update the channel type with discovered dimension + _channelType.nelements = _channelDimension; + + IORLOG("%s - %u channel(s) of dimension %u", + __func__, _nChannels, _channelDimension); + + IORLOG("%s %d segments for a total dimension of %d elements", + __func__, _nChannels, _nElements); + + // Allocate memory for the array of report elements + PREFL_MEMOP_FAIL(_nElements, IOReportElement); + elementsSize = (size_t)_nElements * sizeof(IOReportElement); + _elements = (IOReportElement *)IOMalloc(elementsSize); + if (!_elements) { + goto finish; + } + memset(_elements, 0, elementsSize); + + // Allocate memory for the array of element watch count + PREFL_MEMOP_FAIL(_nElements, int); + eCountsSize = (size_t)_nChannels * sizeof(int); + _enableCounts = (int *)IOMalloc(eCountsSize); + if (!_enableCounts) { + goto finish; + } + memset(_enableCounts, 0, eCountsSize); + + lockReporter(); + for (cnt2 = 0; cnt2 < _channelDimension; cnt2++) { + IOHistogramReportValues hist_values; + if (copyElementValues(cnt2, (IOReportElementValues*)&hist_values)) { + goto finish; + } + hist_values.bucket_min = kIOReportInvalidIntValue; + hist_values.bucket_max = kIOReportInvalidIntValue; + hist_values.bucket_sum = kIOReportInvalidIntValue; + if (setElementValues(cnt2, (IOReportElementValues*)&hist_values)) { + goto finish; + } + + // Setup IOReporter's channel IDs + _elements[cnt2].channel_id = channelID; + + // Setup IOReporter's reporting provider service + _elements[cnt2].provider_id = _driver_id; + + // Setup IOReporter's channel type + _elements[cnt2].channel_type = _channelType; + _elements[cnt2].channel_type.element_idx = cnt2; + + //IOREPORTER_DEBUG_ELEMENT(cnt2); + } + unlockReporter(); + + // Allocate memory for the bucket upper bounds + PREFL_MEMOP_FAIL(_nElements, uint64_t); + boundsSize = (size_t)_nElements * sizeof(uint64_t); + _bucketBounds = (int64_t*)IOMalloc(boundsSize); + if (!_bucketBounds) { + goto finish; + } + memset(_bucketBounds, 0, boundsSize); + _bucketCount = _nElements; + + for (cnt = 0; cnt < _segmentCount; cnt++) { + if (_histogramSegmentsConfig[cnt].segment_bucket_count > INT_MAX + || _histogramSegmentsConfig[cnt].base_bucket_width > INT_MAX) { + goto finish; + } + for (cnt2 = 0; cnt2 < (int)_histogramSegmentsConfig[cnt].segment_bucket_count; cnt2++) { + if (cnt3 >= _nElements) { + IORLOG("ERROR: _bucketBounds init"); + result = false; + goto finish; + } + + if (_histogramSegmentsConfig[cnt].scale_flag) { + // FIXME: Could use pow() but not sure how to include math.h + int64_t power = 1; + int exponent = cnt2 + 1; + while (exponent) { + power *= _histogramSegmentsConfig[cnt].base_bucket_width; + exponent--; + } + bucketBound = power; + } else { + bucketBound = _histogramSegmentsConfig[cnt].base_bucket_width * + ((unsigned)cnt2 + 1); + } + + if (previousBucketBound >= bucketBound) { + IORLOG("Histogram ERROR: bucket bound does not increase linearly (segment %u / bucket # %u)", + cnt, cnt2); + result = false; + goto finish; + } + + _bucketBounds[cnt3] = bucketBound; + // IORLOG("_bucketBounds[%u] = %llu", cnt3, bucketBound); + previousBucketBound = _bucketBounds[cnt3]; + cnt3++; + } + } + + // success + result = true; + finish: - return result; + return result; } void IOHistogramReporter::free(void) { - if (_bucketBounds) { - PREFL_MEMOP_PANIC(_nElements, int64_t); - IOFree(_bucketBounds, (size_t)_nElements * sizeof(int64_t)); - } - if (_histogramSegmentsConfig) { - PREFL_MEMOP_PANIC(_segmentCount, IOHistogramSegmentConfig); - IOFree(_histogramSegmentsConfig, - (size_t)_segmentCount * sizeof(IOHistogramSegmentConfig)); - } - - super::free(); + if (_bucketBounds) { + PREFL_MEMOP_PANIC(_nElements, int64_t); + IOFree(_bucketBounds, (size_t)_nElements * sizeof(int64_t)); + } + if (_histogramSegmentsConfig) { + PREFL_MEMOP_PANIC(_segmentCount, IOHistogramSegmentConfig); + IOFree(_histogramSegmentsConfig, + (size_t)_segmentCount * sizeof(IOHistogramSegmentConfig)); + } + + super::free(); } IOReportLegendEntry* IOHistogramReporter::handleCreateLegend(void) { - IOReportLegendEntry *rval = NULL, *legendEntry = NULL; - OSData *tmpConfigData = NULL; - OSDictionary *tmpDict; // no refcount - - legendEntry = super::handleCreateLegend(); - if (!legendEntry) goto finish; - - PREFL_MEMOP_PANIC(_segmentCount, IOHistogramSegmentConfig); - tmpConfigData = OSData::withBytes(_histogramSegmentsConfig, - (unsigned)_segmentCount * - sizeof(IOHistogramSegmentConfig)); - if (!tmpConfigData) goto finish; - - tmpDict = OSDynamicCast(OSDictionary, - legendEntry->getObject(kIOReportLegendInfoKey)); - if (!tmpDict) goto finish; - - tmpDict->setObject(kIOReportLegendConfigKey, tmpConfigData); - - // success - rval = legendEntry; + IOReportLegendEntry *rval = NULL, *legendEntry = NULL; + OSData *tmpConfigData = NULL; + OSDictionary *tmpDict; // no refcount + + legendEntry = super::handleCreateLegend(); + if (!legendEntry) { + goto finish; + } + + PREFL_MEMOP_PANIC(_segmentCount, IOHistogramSegmentConfig); + tmpConfigData = OSData::withBytes(_histogramSegmentsConfig, + (unsigned)_segmentCount * + sizeof(IOHistogramSegmentConfig)); + if (!tmpConfigData) { + goto finish; + } + + tmpDict = OSDynamicCast(OSDictionary, + legendEntry->getObject(kIOReportLegendInfoKey)); + if (!tmpDict) { + goto finish; + } + + tmpDict->setObject(kIOReportLegendConfigKey, tmpConfigData); + + // success + rval = legendEntry; finish: - if (tmpConfigData) tmpConfigData->release(); - if (!rval && legendEntry) { - legendEntry->release(); - } + if (tmpConfigData) { + tmpConfigData->release(); + } + if (!rval && legendEntry) { + legendEntry->release(); + } - return rval; + return rval; } IOReturn -IOHistogramReporter::overrideBucketValues(unsigned int index, - uint64_t bucket_hits, - int64_t bucket_min, - int64_t bucket_max, - int64_t bucket_sum) +IOHistogramReporter::overrideBucketValues(unsigned int index, + uint64_t bucket_hits, + int64_t bucket_min, + int64_t bucket_max, + int64_t bucket_sum) { - IOReturn result; - IOHistogramReportValues bucket; - lockReporter(); + IOReturn result; + IOHistogramReportValues bucket; + lockReporter(); - if (index >= (unsigned int)_bucketCount) { - result = kIOReturnBadArgument; - goto finish; - } + if (index >= (unsigned int)_bucketCount) { + result = kIOReturnBadArgument; + goto finish; + } - bucket.bucket_hits = bucket_hits; - bucket.bucket_min = bucket_min; - bucket.bucket_max = bucket_max; - bucket.bucket_sum = bucket_sum; + bucket.bucket_hits = bucket_hits; + bucket.bucket_min = bucket_min; + bucket.bucket_max = bucket_max; + bucket.bucket_sum = bucket_sum; - result = setElementValues(index, (IOReportElementValues *)&bucket); + result = setElementValues(index, (IOReportElementValues *)&bucket); finish: - unlockReporter(); - return result; + unlockReporter(); + return result; } int IOHistogramReporter::tallyValue(int64_t value) { - int result = -1; - int cnt = 0, element_index = 0; - IOHistogramReportValues hist_values; - - lockReporter(); - - // Iterate over _bucketCount minus one to make last bucket of infinite width - for (cnt = 0; cnt < _bucketCount - 1; cnt++) { - if (value <= _bucketBounds[cnt]) break; - } - - element_index = cnt; - - if (copyElementValues(element_index, (IOReportElementValues *)&hist_values) != kIOReturnSuccess) { - goto finish; - } - - // init stats on first hit - if (hist_values.bucket_hits == 0) { - hist_values.bucket_min = hist_values.bucket_max = value; - hist_values.bucket_sum = 0; // += is below - } - - // update all values - if (value < hist_values.bucket_min) { - hist_values.bucket_min = value; - } else if (value > hist_values.bucket_max) { - hist_values.bucket_max = value; - } - hist_values.bucket_sum += value; - hist_values.bucket_hits++; - - if (setElementValues(element_index, (IOReportElementValues *)&hist_values) - != kIOReturnSuccess) { - goto finish; - } - - // success! - result = element_index; - + int result = -1; + int cnt = 0, element_index = 0; + IOHistogramReportValues hist_values; + + lockReporter(); + + // Iterate over _bucketCount minus one to make last bucket of infinite width + for (cnt = 0; cnt < _bucketCount - 1; cnt++) { + if (value <= _bucketBounds[cnt]) { + break; + } + } + + element_index = cnt; + + if (copyElementValues(element_index, (IOReportElementValues *)&hist_values) != kIOReturnSuccess) { + goto finish; + } + + // init stats on first hit + if (hist_values.bucket_hits == 0) { + hist_values.bucket_min = hist_values.bucket_max = value; + hist_values.bucket_sum = 0; // += is below + } + + // update all values + if (value < hist_values.bucket_min) { + hist_values.bucket_min = value; + } else if (value > hist_values.bucket_max) { + hist_values.bucket_max = value; + } + hist_values.bucket_sum += value; + hist_values.bucket_hits++; + + if (setElementValues(element_index, (IOReportElementValues *)&hist_values) + != kIOReturnSuccess) { + goto finish; + } + + // success! + result = element_index; + finish: - unlockReporter(); - return result; + unlockReporter(); + return result; } diff --git a/iokit/Kernel/IOInterleavedMemoryDescriptor.cpp b/iokit/Kernel/IOInterleavedMemoryDescriptor.cpp index 50dbcf2fd..d4ad771ff 100644 --- a/iokit/Kernel/IOInterleavedMemoryDescriptor.cpp +++ b/iokit/Kernel/IOInterleavedMemoryDescriptor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,242 +33,256 @@ OSDefineMetaClassAndStructors(IOInterleavedMemoryDescriptor, IOMemoryDescriptor) IOInterleavedMemoryDescriptor * IOInterleavedMemoryDescriptor::withCapacity( - IOByteCount capacity, - IODirection direction ) + IOByteCount capacity, + IODirection direction ) { - // - // Create a new IOInterleavedMemoryDescriptor. The "buffer" will be made up - // of several memory descriptors, that are to be chained end-to-end to make up - // a single memory descriptor. - // - - IOInterleavedMemoryDescriptor * me = new IOInterleavedMemoryDescriptor; - - if ( me && !me->initWithCapacity( - /* capacity */ capacity, - /* direction */ direction )) - { - me->release(); - me = 0; - } - - return me; + // + // Create a new IOInterleavedMemoryDescriptor. The "buffer" will be made up + // of several memory descriptors, that are to be chained end-to-end to make up + // a single memory descriptor. + // + + IOInterleavedMemoryDescriptor * me = new IOInterleavedMemoryDescriptor; + + if (me && !me->initWithCapacity( + /* capacity */ capacity, + /* direction */ direction )) { + me->release(); + me = 0; + } + + return me; } -bool IOInterleavedMemoryDescriptor::initWithCapacity( - IOByteCount capacity, - IODirection direction ) +bool +IOInterleavedMemoryDescriptor::initWithCapacity( + IOByteCount capacity, + IODirection direction ) { - // - // Initialize an IOInterleavedMemoryDescriptor. The "buffer" will be made up - // of several memory descriptors, that are to be chained end-to-end to make up - // a single memory descriptor. - // + // + // Initialize an IOInterleavedMemoryDescriptor. The "buffer" will be made up + // of several memory descriptors, that are to be chained end-to-end to make up + // a single memory descriptor. + // + + assert(capacity); - assert(capacity); + // Ask our superclass' opinion. + if (super::init() == false) { + return false; + } - // Ask our superclass' opinion. - if ( super::init() == false ) return false; - - // Initialize our minimal state. + // Initialize our minimal state. - _flags = direction; + _flags = direction; #ifndef __LP64__ - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ - _length = 0; - _mappings = 0; - _tag = 0; - _descriptorCount = 0; - _descriptors = IONew(IOMemoryDescriptor *, capacity); - _descriptorOffsets = IONew(IOByteCount, capacity); - _descriptorLengths = IONew(IOByteCount, capacity); + _length = 0; + _mappings = 0; + _tag = 0; + _descriptorCount = 0; + _descriptors = IONew(IOMemoryDescriptor *, capacity); + _descriptorOffsets = IONew(IOByteCount, capacity); + _descriptorLengths = IONew(IOByteCount, capacity); - if ( (_descriptors == 0) || (_descriptorOffsets == 0) || (_descriptorLengths == 0) ) - return false; + if ((_descriptors == 0) || (_descriptorOffsets == 0) || (_descriptorLengths == 0)) { + return false; + } - _descriptorCapacity = capacity; + _descriptorCapacity = capacity; - return true; + return true; } -void IOInterleavedMemoryDescriptor::clearMemoryDescriptors( IODirection direction ) +void +IOInterleavedMemoryDescriptor::clearMemoryDescriptors( IODirection direction ) { - UInt32 index; + UInt32 index; - for ( index = 0; index < _descriptorCount; index++ ) - { - if ( _descriptorPrepared ) - _descriptors[index]->complete(getDirection()); + for (index = 0; index < _descriptorCount; index++) { + if (_descriptorPrepared) { + _descriptors[index]->complete(getDirection()); + } - _descriptors[index]->release(); - _descriptors[index] = 0; + _descriptors[index]->release(); + _descriptors[index] = 0; - _descriptorOffsets[index] = 0; - _descriptorLengths[index] = 0; - } + _descriptorOffsets[index] = 0; + _descriptorLengths[index] = 0; + } - if ( direction != kIODirectionNone ) - { - _flags = (_flags & ~kIOMemoryDirectionMask) | direction; + if (direction != kIODirectionNone) { + _flags = (_flags & ~kIOMemoryDirectionMask) | direction; #ifndef __LP64__ - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ - } - - _descriptorCount = 0; - _length = 0; - _mappings = 0; - _tag = 0; + } + _descriptorCount = 0; + _length = 0; + _mappings = 0; + _tag = 0; }; -bool IOInterleavedMemoryDescriptor::setMemoryDescriptor( - IOMemoryDescriptor * descriptor, - IOByteCount offset, - IOByteCount length ) +bool +IOInterleavedMemoryDescriptor::setMemoryDescriptor( + IOMemoryDescriptor * descriptor, + IOByteCount offset, + IOByteCount length ) { - if ( _descriptorPrepared || (_descriptorCount == _descriptorCapacity) ) - return false; + if (_descriptorPrepared || (_descriptorCount == _descriptorCapacity)) { + return false; + } - if ( (offset + length) > descriptor->getLength() ) - return false; + if ((offset + length) > descriptor->getLength()) { + return false; + } // if ( descriptor->getDirection() != getDirection() ) // return false; - descriptor->retain(); - _descriptors[_descriptorCount] = descriptor; - _descriptorOffsets[_descriptorCount] = offset; - _descriptorLengths[_descriptorCount] = length; + descriptor->retain(); + _descriptors[_descriptorCount] = descriptor; + _descriptorOffsets[_descriptorCount] = offset; + _descriptorLengths[_descriptorCount] = length; - _descriptorCount++; + _descriptorCount++; - _length += length; + _length += length; - return true; + return true; } -void IOInterleavedMemoryDescriptor::free() +void +IOInterleavedMemoryDescriptor::free() { - // - // Free all of this object's outstanding resources. - // + // + // Free all of this object's outstanding resources. + // - if ( _descriptors ) - { - for ( unsigned index = 0; index < _descriptorCount; index++ ) - _descriptors[index]->release(); + if (_descriptors) { + for (unsigned index = 0; index < _descriptorCount; index++) { + _descriptors[index]->release(); + } - if ( _descriptors != 0 ) - IODelete(_descriptors, IOMemoryDescriptor *, _descriptorCapacity); + if (_descriptors != 0) { + IODelete(_descriptors, IOMemoryDescriptor *, _descriptorCapacity); + } - if ( _descriptorOffsets != 0 ) - IODelete(_descriptorOffsets, IOMemoryDescriptor *, _descriptorCapacity); + if (_descriptorOffsets != 0) { + IODelete(_descriptorOffsets, IOMemoryDescriptor *, _descriptorCapacity); + } - if ( _descriptorLengths != 0 ) - IODelete(_descriptorLengths, IOMemoryDescriptor *, _descriptorCapacity); - } + if (_descriptorLengths != 0) { + IODelete(_descriptorLengths, IOMemoryDescriptor *, _descriptorCapacity); + } + } - super::free(); + super::free(); } -IOReturn IOInterleavedMemoryDescriptor::prepare(IODirection forDirection) +IOReturn +IOInterleavedMemoryDescriptor::prepare(IODirection forDirection) { - // - // Prepare the memory for an I/O transfer. - // - // This involves paging in the memory and wiring it down for the duration - // of the transfer. The complete() method finishes the processing of the - // memory after the I/O transfer finishes. - // - - unsigned index; - IOReturn status = kIOReturnSuccess; - IOReturn statusUndo; - - if ( forDirection == kIODirectionNone ) - { - forDirection = getDirection(); - } - - for ( index = 0; index < _descriptorCount; index++ ) - { - status = _descriptors[index]->prepare(forDirection); - if ( status != kIOReturnSuccess ) break; - } - - if ( status != kIOReturnSuccess ) - { - for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ ) - { - statusUndo = _descriptors[index]->complete(forDirection); - assert(statusUndo == kIOReturnSuccess); - } - } - - if ( status == kIOReturnSuccess ) _descriptorPrepared = true; - - return status; + // + // Prepare the memory for an I/O transfer. + // + // This involves paging in the memory and wiring it down for the duration + // of the transfer. The complete() method finishes the processing of the + // memory after the I/O transfer finishes. + // + + unsigned index; + IOReturn status = kIOReturnSuccess; + IOReturn statusUndo; + + if (forDirection == kIODirectionNone) { + forDirection = getDirection(); + } + + for (index = 0; index < _descriptorCount; index++) { + status = _descriptors[index]->prepare(forDirection); + if (status != kIOReturnSuccess) { + break; + } + } + + if (status != kIOReturnSuccess) { + for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) { + statusUndo = _descriptors[index]->complete(forDirection); + assert(statusUndo == kIOReturnSuccess); + } + } + + if (status == kIOReturnSuccess) { + _descriptorPrepared = true; + } + + return status; } -IOReturn IOInterleavedMemoryDescriptor::complete(IODirection forDirection) +IOReturn +IOInterleavedMemoryDescriptor::complete(IODirection forDirection) { - // - // Complete processing of the memory after an I/O transfer finishes. - // - // This method shouldn't be called unless a prepare() was previously issued; - // the prepare() and complete() must occur in pairs, before and after an I/O - // transfer. - // - - IOReturn status; - IOReturn statusFinal = kIOReturnSuccess; - - if ( forDirection == kIODirectionNone ) - { - forDirection = getDirection(); - } - - for ( unsigned index = 0; index < _descriptorCount; index++ ) - { - status = _descriptors[index]->complete(forDirection); - if ( status != kIOReturnSuccess ) statusFinal = status; - assert(status == kIOReturnSuccess); - } - - _descriptorPrepared = false; - - return statusFinal; + // + // Complete processing of the memory after an I/O transfer finishes. + // + // This method shouldn't be called unless a prepare() was previously issued; + // the prepare() and complete() must occur in pairs, before and after an I/O + // transfer. + // + + IOReturn status; + IOReturn statusFinal = kIOReturnSuccess; + + if (forDirection == kIODirectionNone) { + forDirection = getDirection(); + } + + for (unsigned index = 0; index < _descriptorCount; index++) { + status = _descriptors[index]->complete(forDirection); + if (status != kIOReturnSuccess) { + statusFinal = status; + } + assert(status == kIOReturnSuccess); + } + + _descriptorPrepared = false; + + return statusFinal; } -addr64_t IOInterleavedMemoryDescriptor::getPhysicalSegment( - IOByteCount offset, - IOByteCount * length, - IOOptionBits options ) +addr64_t +IOInterleavedMemoryDescriptor::getPhysicalSegment( + IOByteCount offset, + IOByteCount * length, + IOOptionBits options ) { - // - // This method returns the physical address of the byte at the given offset - // into the memory, and optionally the length of the physically contiguous - // segment from that offset. - // - - addr64_t pa; - - assert(offset <= _length); - - for ( unsigned index = 0; index < _descriptorCount; index++ ) - { - if ( offset < _descriptorLengths[index] ) - { - pa = _descriptors[index]->getPhysicalSegment(_descriptorOffsets[index] + offset, length, options); - if ((_descriptorLengths[index] - offset) < *length) *length = _descriptorLengths[index] - offset; - return pa; - } - offset -= _descriptorLengths[index]; - } - - if ( length ) *length = 0; - - return 0; + // + // This method returns the physical address of the byte at the given offset + // into the memory, and optionally the length of the physically contiguous + // segment from that offset. + // + + addr64_t pa; + + assert(offset <= _length); + + for (unsigned index = 0; index < _descriptorCount; index++) { + if (offset < _descriptorLengths[index]) { + pa = _descriptors[index]->getPhysicalSegment(_descriptorOffsets[index] + offset, length, options); + if ((_descriptorLengths[index] - offset) < *length) { + *length = _descriptorLengths[index] - offset; + } + return pa; + } + offset -= _descriptorLengths[index]; + } + + if (length) { + *length = 0; + } + + return 0; } diff --git a/iokit/Kernel/IOInterruptAccounting.cpp b/iokit/Kernel/IOInterruptAccounting.cpp index bfaf153b2..3d7f57cdb 100644 --- a/iokit/Kernel/IOInterruptAccounting.cpp +++ b/iokit/Kernel/IOInterruptAccounting.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,23 +31,25 @@ uint32_t gInterruptAccountingStatisticBitmask = #if !defined(__arm__) - /* Disable timestamps for older ARM platforms; they are expensive. */ - IA_GET_ENABLE_BIT(kInterruptAccountingFirstLevelTimeIndex) | - IA_GET_ENABLE_BIT(kInterruptAccountingSecondLevelCPUTimeIndex) | - IA_GET_ENABLE_BIT(kInterruptAccountingSecondLevelSystemTimeIndex) | + /* Disable timestamps for older ARM platforms; they are expensive. */ + IA_GET_ENABLE_BIT(kInterruptAccountingFirstLevelTimeIndex) | + IA_GET_ENABLE_BIT(kInterruptAccountingSecondLevelCPUTimeIndex) | + IA_GET_ENABLE_BIT(kInterruptAccountingSecondLevelSystemTimeIndex) | #endif - IA_GET_ENABLE_BIT(kInterruptAccountingFirstLevelCountIndex) | - IA_GET_ENABLE_BIT(kInterruptAccountingSecondLevelCountIndex); + IA_GET_ENABLE_BIT(kInterruptAccountingFirstLevelCountIndex) | + IA_GET_ENABLE_BIT(kInterruptAccountingSecondLevelCountIndex); IOLock * gInterruptAccountingDataListLock = NULL; queue_head_t gInterruptAccountingDataList; -void interruptAccountingInit(void) +void +interruptAccountingInit(void) { int bootArgValue = 0; - if (PE_parse_boot_argn("interrupt_accounting", &bootArgValue, sizeof(bootArgValue))) - gInterruptAccountingStatisticBitmask = bootArgValue; + if (PE_parse_boot_argn("interrupt_accounting", &bootArgValue, sizeof(bootArgValue))) { + gInterruptAccountingStatisticBitmask = bootArgValue; + } gInterruptAccountingDataListLock = IOLockAlloc(); @@ -56,37 +58,42 @@ void interruptAccountingInit(void) queue_init(&gInterruptAccountingDataList); } -void interruptAccountingDataAddToList(IOInterruptAccountingData * data) +void +interruptAccountingDataAddToList(IOInterruptAccountingData * data) { IOLockLock(gInterruptAccountingDataListLock); - queue_enter(&gInterruptAccountingDataList, data, IOInterruptAccountingData *, chain); + queue_enter(&gInterruptAccountingDataList, data, IOInterruptAccountingData *, chain); IOLockUnlock(gInterruptAccountingDataListLock); } -void interruptAccountingDataRemoveFromList(IOInterruptAccountingData * data) +void +interruptAccountingDataRemoveFromList(IOInterruptAccountingData * data) { IOLockLock(gInterruptAccountingDataListLock); - queue_remove(&gInterruptAccountingDataList, data, IOInterruptAccountingData *, chain); + queue_remove(&gInterruptAccountingDataList, data, IOInterruptAccountingData *, chain); IOLockUnlock(gInterruptAccountingDataListLock); } -void interruptAccountingDataUpdateChannels(IOInterruptAccountingData * data, IOSimpleReporter * reporter) +void +interruptAccountingDataUpdateChannels(IOInterruptAccountingData * data, IOSimpleReporter * reporter) { uint64_t i = 0; for (i = 0; i < IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS; i++) { - if (IA_GET_STATISTIC_ENABLED(i)) - reporter->setValue(IA_GET_CHANNEL_ID(data->interruptIndex, i), data->interruptStatistics[i]); + if (IA_GET_STATISTIC_ENABLED(i)) { + reporter->setValue(IA_GET_CHANNEL_ID(data->interruptIndex, i), data->interruptStatistics[i]); + } } } -void interruptAccountingDataInheritChannels(IOInterruptAccountingData * data, IOSimpleReporter * reporter) +void +interruptAccountingDataInheritChannels(IOInterruptAccountingData * data, IOSimpleReporter * reporter) { uint64_t i = 0; for (i = 0; i < IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS; i++) { - if (IA_GET_STATISTIC_ENABLED(i)) - data->interruptStatistics[i] = reporter->getValue(IA_GET_CHANNEL_ID(data->interruptIndex, i)); + if (IA_GET_STATISTIC_ENABLED(i)) { + data->interruptStatistics[i] = reporter->getValue(IA_GET_CHANNEL_ID(data->interruptIndex, i)); + } } } - diff --git a/iokit/Kernel/IOInterruptController.cpp b/iokit/Kernel/IOInterruptController.cpp index 2ee2e1466..f84357e38 100644 --- a/iokit/Kernel/IOInterruptController.cpp +++ b/iokit/Kernel/IOInterruptController.cpp @@ -3,7 +3,7 @@ * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +12,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +23,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,398 +52,428 @@ OSMetaClassDefineReservedUnused(IOInterruptController, 5); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOReturn IOInterruptController::registerInterrupt(IOService *nub, int source, - void *target, - IOInterruptHandler handler, - void *refCon) +IOReturn +IOInterruptController::registerInterrupt(IOService *nub, int source, + void *target, + IOInterruptHandler handler, + void *refCon) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - int wasDisabledSoft; - IOReturn error; - OSData *vectorData; - IOOptionBits options; - bool canBeShared, shouldBeShared, wasAlreadyRegisterd; - - IOService *originalNub = NULL; // Protected by wasAlreadyRegisterd - int originalSource = 0; // Protected by wasAlreadyRegisterd - - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - // Get the lock for this vector. - IOLockLock(vector->interruptLock); - - // Check if the interrupt source can/should be shared. - canBeShared = vectorCanBeShared(vectorNumber, vector); - IODTGetInterruptOptions(nub, source, &options); + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + int wasDisabledSoft; + IOReturn error; + OSData *vectorData; + IOOptionBits options; + bool canBeShared, shouldBeShared, wasAlreadyRegisterd; + + IOService *originalNub = NULL;// Protected by wasAlreadyRegisterd + int originalSource = 0;// Protected by wasAlreadyRegisterd + + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOLockLock(vector->interruptLock); + + // Check if the interrupt source can/should be shared. + canBeShared = vectorCanBeShared(vectorNumber, vector); + IODTGetInterruptOptions(nub, source, &options); #if defined(__i386__) || defined(__x86_64__) - int interruptType; - if (OSDynamicCast(IOPlatformDevice, getProvider()) && - (getInterruptType(nub, source, &interruptType) == kIOReturnSuccess) && - (kIOInterruptTypeLevel & interruptType)) - { - options |= kIODTInterruptShared; - } + int interruptType; + if (OSDynamicCast(IOPlatformDevice, getProvider()) && + (getInterruptType(nub, source, &interruptType) == kIOReturnSuccess) && + (kIOInterruptTypeLevel & interruptType)) { + options |= kIODTInterruptShared; + } #endif - shouldBeShared = canBeShared && (options & kIODTInterruptShared); - wasAlreadyRegisterd = vector->interruptRegistered; - - // If the vector is registered and can not be shared return error. - if (wasAlreadyRegisterd && !canBeShared) { - IOLockUnlock(vector->interruptLock); - return kIOReturnNoResources; - } - - // If this vector is already in use, and can be shared (implied), - // or it is not registered and should be shared, - // register as a shared interrupt. - if (wasAlreadyRegisterd || shouldBeShared) { - // If this vector is not already shared, break it out. - if (vector->sharedController == 0) { - // Make the IOShareInterruptController instance - vector->sharedController = new IOSharedInterruptController; - if (vector->sharedController == 0) { - IOLockUnlock(vector->interruptLock); - return kIOReturnNoMemory; - } - - if (wasAlreadyRegisterd) { - // Save the nub and source for the original consumer. - originalNub = vector->nub; - originalSource = vector->source; - - // Physically disable the interrupt, but mark it as being enabled in the hardware. - // The interruptDisabledSoft now indicates the driver's request for enablement. - disableVectorHard(vectorNumber, vector); - vector->interruptDisabledHard = 0; - } - - // Initialize the new shared interrupt controller. - error = vector->sharedController->initInterruptController(this, vectorData); - // If the IOSharedInterruptController could not be initalized, - // if needed, put the original consumer's interrupt back to normal and - // get rid of whats left of the shared controller. - if (error != kIOReturnSuccess) { - if (wasAlreadyRegisterd) enableInterrupt(originalNub, originalSource); - vector->sharedController->release(); - vector->sharedController = 0; - IOLockUnlock(vector->interruptLock); - return error; - } - - // If there was an original consumer try to register it on the shared controller. - if (wasAlreadyRegisterd) { - error = vector->sharedController->registerInterrupt(originalNub, - originalSource, - vector->target, - vector->handler, - vector->refCon); - // If the original consumer could not be moved to the shared controller, - // put the original consumor's interrupt back to normal and - // get rid of whats left of the shared controller. - if (error != kIOReturnSuccess) { - // Save the driver's interrupt enablement state. - wasDisabledSoft = vector->interruptDisabledSoft; - - // Make the interrupt really hard disabled. - vector->interruptDisabledSoft = 1; - vector->interruptDisabledHard = 1; - - // Enable the original consumer's interrupt if needed. - if (!wasDisabledSoft) originalNub->enableInterrupt(originalSource); - enableInterrupt(originalNub, originalSource); - - vector->sharedController->release(); - vector->sharedController = 0; - IOLockUnlock(vector->interruptLock); - return error; + shouldBeShared = canBeShared && (options & kIODTInterruptShared); + wasAlreadyRegisterd = vector->interruptRegistered; + + // If the vector is registered and can not be shared return error. + if (wasAlreadyRegisterd && !canBeShared) { + IOLockUnlock(vector->interruptLock); + return kIOReturnNoResources; } - } - - // Fill in vector with the shared controller's info. - vector->handler = (IOInterruptHandler)vector->sharedController->getInterruptHandlerAddress(); - vector->nub = vector->sharedController; - vector->source = 0; - vector->target = vector->sharedController; - vector->refCon = 0; - - // If the interrupt was already registered, - // save the driver's interrupt enablement state. - if (wasAlreadyRegisterd) wasDisabledSoft = vector->interruptDisabledSoft; - else wasDisabledSoft = true; - - // Do any specific initalization for this vector if it has not yet been used. - if (!wasAlreadyRegisterd) initVector(vectorNumber, vector); - - // Make the interrupt really hard disabled. - vector->interruptDisabledSoft = 1; - vector->interruptDisabledHard = 1; - vector->interruptRegistered = 1; - - // Enable the original consumer's interrupt if needed. - // originalNub is protected by wasAlreadyRegisterd here (see line 184). - if (!wasDisabledSoft) originalNub->enableInterrupt(originalSource); - } - - error = vector->sharedController->registerInterrupt(nub, source, target, - handler, refCon); - IOLockUnlock(vector->interruptLock); - return error; - } - - // Fill in vector with the client's info. - vector->handler = handler; - vector->nub = nub; - vector->source = source; - vector->target = target; - vector->refCon = refCon; - - // Do any specific initalization for this vector. - initVector(vectorNumber, vector); - - // Get the vector ready. It starts hard disabled. - vector->interruptDisabledHard = 1; - vector->interruptDisabledSoft = 1; - vector->interruptRegistered = 1; - - IOLockUnlock(vector->interruptLock); - return kIOReturnSuccess; + + // If this vector is already in use, and can be shared (implied), + // or it is not registered and should be shared, + // register as a shared interrupt. + if (wasAlreadyRegisterd || shouldBeShared) { + // If this vector is not already shared, break it out. + if (vector->sharedController == 0) { + // Make the IOShareInterruptController instance + vector->sharedController = new IOSharedInterruptController; + if (vector->sharedController == 0) { + IOLockUnlock(vector->interruptLock); + return kIOReturnNoMemory; + } + + if (wasAlreadyRegisterd) { + // Save the nub and source for the original consumer. + originalNub = vector->nub; + originalSource = vector->source; + + // Physically disable the interrupt, but mark it as being enabled in the hardware. + // The interruptDisabledSoft now indicates the driver's request for enablement. + disableVectorHard(vectorNumber, vector); + vector->interruptDisabledHard = 0; + } + + // Initialize the new shared interrupt controller. + error = vector->sharedController->initInterruptController(this, vectorData); + // If the IOSharedInterruptController could not be initalized, + // if needed, put the original consumer's interrupt back to normal and + // get rid of whats left of the shared controller. + if (error != kIOReturnSuccess) { + if (wasAlreadyRegisterd) { + enableInterrupt(originalNub, originalSource); + } + vector->sharedController->release(); + vector->sharedController = 0; + IOLockUnlock(vector->interruptLock); + return error; + } + + // If there was an original consumer try to register it on the shared controller. + if (wasAlreadyRegisterd) { + error = vector->sharedController->registerInterrupt(originalNub, + originalSource, + vector->target, + vector->handler, + vector->refCon); + // If the original consumer could not be moved to the shared controller, + // put the original consumor's interrupt back to normal and + // get rid of whats left of the shared controller. + if (error != kIOReturnSuccess) { + // Save the driver's interrupt enablement state. + wasDisabledSoft = vector->interruptDisabledSoft; + + // Make the interrupt really hard disabled. + vector->interruptDisabledSoft = 1; + vector->interruptDisabledHard = 1; + + // Enable the original consumer's interrupt if needed. + if (!wasDisabledSoft) { + originalNub->enableInterrupt(originalSource); + } + enableInterrupt(originalNub, originalSource); + + vector->sharedController->release(); + vector->sharedController = 0; + IOLockUnlock(vector->interruptLock); + return error; + } + } + + // Fill in vector with the shared controller's info. + vector->handler = (IOInterruptHandler)vector->sharedController->getInterruptHandlerAddress(); + vector->nub = vector->sharedController; + vector->source = 0; + vector->target = vector->sharedController; + vector->refCon = 0; + + // If the interrupt was already registered, + // save the driver's interrupt enablement state. + if (wasAlreadyRegisterd) { + wasDisabledSoft = vector->interruptDisabledSoft; + } else { + wasDisabledSoft = true; + } + + // Do any specific initalization for this vector if it has not yet been used. + if (!wasAlreadyRegisterd) { + initVector(vectorNumber, vector); + } + + // Make the interrupt really hard disabled. + vector->interruptDisabledSoft = 1; + vector->interruptDisabledHard = 1; + vector->interruptRegistered = 1; + + // Enable the original consumer's interrupt if needed. + // originalNub is protected by wasAlreadyRegisterd here (see line 184). + if (!wasDisabledSoft) { + originalNub->enableInterrupt(originalSource); + } + } + + error = vector->sharedController->registerInterrupt(nub, source, target, + handler, refCon); + IOLockUnlock(vector->interruptLock); + return error; + } + + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; + + // Do any specific initalization for this vector. + initVector(vectorNumber, vector); + + // Get the vector ready. It starts hard disabled. + vector->interruptDisabledHard = 1; + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; + + IOLockUnlock(vector->interruptLock); + return kIOReturnSuccess; } -IOReturn IOInterruptController::unregisterInterrupt(IOService *nub, int source) +IOReturn +IOInterruptController::unregisterInterrupt(IOService *nub, int source) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - // Get the lock for this vector. - IOLockLock(vector->interruptLock); - - // Return success if it is not already registered - if (!vector->interruptRegistered) { - IOLockUnlock(vector->interruptLock); - return kIOReturnSuccess; - } - - // Soft disable the source. - disableInterrupt(nub, source); - - // Turn the source off at hardware. - disableVectorHard(vectorNumber, vector); - - // Clear all the storage for the vector except for interruptLock. - vector->interruptActive = 0; - vector->interruptDisabledSoft = 0; - vector->interruptDisabledHard = 0; - vector->interruptRegistered = 0; - vector->nub = 0; - vector->source = 0; - vector->handler = 0; - vector->target = 0; - vector->refCon = 0; - - IOLockUnlock(vector->interruptLock); - return kIOReturnSuccess; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOLockLock(vector->interruptLock); + + // Return success if it is not already registered + if (!vector->interruptRegistered) { + IOLockUnlock(vector->interruptLock); + return kIOReturnSuccess; + } + + // Soft disable the source. + disableInterrupt(nub, source); + + // Turn the source off at hardware. + disableVectorHard(vectorNumber, vector); + + // Clear all the storage for the vector except for interruptLock. + vector->interruptActive = 0; + vector->interruptDisabledSoft = 0; + vector->interruptDisabledHard = 0; + vector->interruptRegistered = 0; + vector->nub = 0; + vector->source = 0; + vector->handler = 0; + vector->target = 0; + vector->refCon = 0; + + IOLockUnlock(vector->interruptLock); + return kIOReturnSuccess; } -IOReturn IOInterruptController::getInterruptType(IOService *nub, int source, - int *interruptType) +IOReturn +IOInterruptController::getInterruptType(IOService *nub, int source, + int *interruptType) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - - if (interruptType == 0) return kIOReturnBadArgument; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - *interruptType = getVectorType(vectorNumber, vector); - - return kIOReturnSuccess; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + if (interruptType == 0) { + return kIOReturnBadArgument; + } + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + *interruptType = getVectorType(vectorNumber, vector); + + return kIOReturnSuccess; } -IOReturn IOInterruptController::enableInterrupt(IOService *nub, int source) +IOReturn +IOInterruptController::enableInterrupt(IOService *nub, int source) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - if (vector->interruptDisabledSoft) { - vector->interruptDisabledSoft = 0; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + if (vector->interruptDisabledSoft) { + vector->interruptDisabledSoft = 0; #if !defined(__i386__) && !defined(__x86_64__) - OSMemoryBarrier(); + OSMemoryBarrier(); #endif - if (!getPlatform()->atInterruptLevel()) { - while (vector->interruptActive) - {} - } - if (vector->interruptDisabledHard) { - vector->interruptDisabledHard = 0; - - enableVector(vectorNumber, vector); - } - } - - return kIOReturnSuccess; + if (!getPlatform()->atInterruptLevel()) { + while (vector->interruptActive) { + } + } + if (vector->interruptDisabledHard) { + vector->interruptDisabledHard = 0; + + enableVector(vectorNumber, vector); + } + } + + return kIOReturnSuccess; } -IOReturn IOInterruptController::disableInterrupt(IOService *nub, int source) +IOReturn +IOInterruptController::disableInterrupt(IOService *nub, int source) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - vector->interruptDisabledSoft = 1; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + vector->interruptDisabledSoft = 1; #if !defined(__i386__) && !defined(__x86_64__) - OSMemoryBarrier(); + OSMemoryBarrier(); #endif - - if (!getPlatform()->atInterruptLevel()) { - while (vector->interruptActive) - {} - } - - return kIOReturnSuccess; + + if (!getPlatform()->atInterruptLevel()) { + while (vector->interruptActive) { + } + } + + return kIOReturnSuccess; } -IOReturn IOInterruptController::causeInterrupt(IOService *nub, int source) +IOReturn +IOInterruptController::causeInterrupt(IOService *nub, int source) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - causeVector(vectorNumber, vector); - - return kIOReturnSuccess; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + causeVector(vectorNumber, vector); + + return kIOReturnSuccess; } -IOInterruptAction IOInterruptController::getInterruptHandlerAddress(void) +IOInterruptAction +IOInterruptController::getInterruptHandlerAddress(void) { - return 0; + return 0; } -IOReturn IOInterruptController::handleInterrupt(void *refCon, IOService *nub, - int source) +IOReturn +IOInterruptController::handleInterrupt(void *refCon, IOService *nub, + int source) { - return kIOReturnInvalid; + return kIOReturnInvalid; } // Methods to be overridden for simplifed interrupt controller subclasses. -bool IOInterruptController::vectorCanBeShared(IOInterruptVectorNumber /*vectorNumber*/, - IOInterruptVector */*vector*/) +bool +IOInterruptController::vectorCanBeShared(IOInterruptVectorNumber /*vectorNumber*/, + IOInterruptVector */*vector*/) { - return false; + return false; } -void IOInterruptController::initVector(IOInterruptVectorNumber /*vectorNumber*/, - IOInterruptVector */*vector*/) +void +IOInterruptController::initVector(IOInterruptVectorNumber /*vectorNumber*/, + IOInterruptVector */*vector*/) { } -int IOInterruptController::getVectorType(IOInterruptVectorNumber /*vectorNumber*/, - IOInterruptVector */*vector*/) +int +IOInterruptController::getVectorType(IOInterruptVectorNumber /*vectorNumber*/, + IOInterruptVector */*vector*/) { - return kIOInterruptTypeEdge; + return kIOInterruptTypeEdge; } -void IOInterruptController::disableVectorHard(IOInterruptVectorNumber /*vectorNumber*/, - IOInterruptVector */*vector*/) +void +IOInterruptController::disableVectorHard(IOInterruptVectorNumber /*vectorNumber*/, + IOInterruptVector */*vector*/) { } -void IOInterruptController::enableVector(IOInterruptVectorNumber /*vectorNumber*/, - IOInterruptVector */*vector*/) +void +IOInterruptController::enableVector(IOInterruptVectorNumber /*vectorNumber*/, + IOInterruptVector */*vector*/) { } -void IOInterruptController::causeVector(IOInterruptVectorNumber /*vectorNumber*/, - IOInterruptVector */*vector*/) +void +IOInterruptController::causeVector(IOInterruptVectorNumber /*vectorNumber*/, + IOInterruptVector */*vector*/) { } -void IOInterruptController::timeStampSpuriousInterrupt(void) +void +IOInterruptController::timeStampSpuriousInterrupt(void) { - uint64_t providerID = 0; - IOService * provider = getProvider(); + uint64_t providerID = 0; + IOService * provider = getProvider(); - if (provider) { - providerID = provider->getRegistryEntryID(); - } + if (provider) { + providerID = provider->getRegistryEntryID(); + } - IOTimeStampConstant(IODBG_INTC(IOINTC_SPURIOUS), providerID); + IOTimeStampConstant(IODBG_INTC(IOINTC_SPURIOUS), providerID); } -void IOInterruptController::timeStampInterruptHandlerInternal(bool isStart, IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector) +void +IOInterruptController::timeStampInterruptHandlerInternal(bool isStart, IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector) { - uint64_t providerID = 0; - vm_offset_t unslidHandler = 0; - vm_offset_t unslidTarget = 0; + uint64_t providerID = 0; + vm_offset_t unslidHandler = 0; + vm_offset_t unslidTarget = 0; - IOService * provider = getProvider(); + IOService * provider = getProvider(); - if (provider) { - providerID = provider->getRegistryEntryID(); - } + if (provider) { + providerID = provider->getRegistryEntryID(); + } - if (vector) { - unslidHandler = VM_KERNEL_UNSLIDE((vm_offset_t)vector->handler); - unslidTarget = VM_KERNEL_UNSLIDE_OR_PERM((vm_offset_t)vector->target); - } + if (vector) { + unslidHandler = VM_KERNEL_UNSLIDE((vm_offset_t)vector->handler); + unslidTarget = VM_KERNEL_UNSLIDE_OR_PERM((vm_offset_t)vector->target); + } - if (isStart) { - IOTimeStampStartConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler, - (uintptr_t)unslidTarget, (uintptr_t)providerID); - } else { - IOTimeStampEndConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler, - (uintptr_t)unslidTarget, (uintptr_t)providerID); - } + if (isStart) { + IOTimeStampStartConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler, + (uintptr_t)unslidTarget, (uintptr_t)providerID); + } else { + IOTimeStampEndConstant(IODBG_INTC(IOINTC_HANDLER), (uintptr_t)vectorNumber, (uintptr_t)unslidHandler, + (uintptr_t)unslidTarget, (uintptr_t)providerID); + } } -void IOInterruptController::timeStampInterruptHandlerStart(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector) +void +IOInterruptController::timeStampInterruptHandlerStart(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector) { - timeStampInterruptHandlerInternal(true, vectorNumber, vector); + timeStampInterruptHandlerInternal(true, vectorNumber, vector); } -void IOInterruptController::timeStampInterruptHandlerEnd(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector) +void +IOInterruptController::timeStampInterruptHandlerEnd(IOInterruptVectorNumber vectorNumber, IOInterruptVector *vector) { - timeStampInterruptHandlerInternal(false, vectorNumber, vector); + timeStampInterruptHandlerInternal(false, vectorNumber, vector); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -462,310 +492,330 @@ OSMetaClassDefineReservedUnused(IOSharedInterruptController, 3); #define kIOSharedInterruptControllerDefaultVectors (128) -IOReturn IOSharedInterruptController::initInterruptController(IOInterruptController *parentController, OSData *parentSource) +IOReturn +IOSharedInterruptController::initInterruptController(IOInterruptController *parentController, OSData *parentSource) { - int cnt, interruptType; - IOReturn error; - - if (!super::init()) - return kIOReturnNoResources; - - // Set provider to this so enable/disable nub stuff works. - provider = this; - - // Allocate the IOInterruptSource so this can act like a nub. - _interruptSources = (IOInterruptSource *)IOMalloc(sizeof(IOInterruptSource)); - if (_interruptSources == 0) return kIOReturnNoMemory; - _numInterruptSources = 1; - - // Set up the IOInterruptSource to point at this. - parentController->retain(); - parentSource->retain(); - _interruptSources[0].interruptController = parentController; - _interruptSources[0].vectorData = parentSource; - - sourceIsLevel = false; - error = provider->getInterruptType(0, &interruptType); - if (error == kIOReturnSuccess) { - if (interruptType & kIOInterruptTypeLevel) - sourceIsLevel = true; - } - - // Allocate the memory for the vectors - numVectors = kIOSharedInterruptControllerDefaultVectors; // For now a constant number. - vectors = (IOInterruptVector *)IOMalloc(numVectors * sizeof(IOInterruptVector)); - if (vectors == NULL) { - IOFree(_interruptSources, sizeof(IOInterruptSource)); - return kIOReturnNoMemory; - } - bzero(vectors, numVectors * sizeof(IOInterruptVector)); - - // Allocate the lock for the controller. - controllerLock = IOSimpleLockAlloc(); - if (controllerLock == 0) return kIOReturnNoResources; - - // Allocate locks for the vectors. - for (cnt = 0; cnt < numVectors; cnt++) { - vectors[cnt].interruptLock = IOLockAlloc(); - if (vectors[cnt].interruptLock == NULL) { - for (cnt = 0; cnt < numVectors; cnt++) { - if (vectors[cnt].interruptLock != NULL) - IOLockFree(vectors[cnt].interruptLock); - } - return kIOReturnNoResources; - } - } - - numVectors = 0; // reset the high water mark for used vectors - vectorsRegistered = 0; - vectorsEnabled = 0; - controllerDisabled = 1; - - return kIOReturnSuccess; + int cnt, interruptType; + IOReturn error; + + if (!super::init()) { + return kIOReturnNoResources; + } + + // Set provider to this so enable/disable nub stuff works. + provider = this; + + // Allocate the IOInterruptSource so this can act like a nub. + _interruptSources = (IOInterruptSource *)IOMalloc(sizeof(IOInterruptSource)); + if (_interruptSources == 0) { + return kIOReturnNoMemory; + } + _numInterruptSources = 1; + + // Set up the IOInterruptSource to point at this. + parentController->retain(); + parentSource->retain(); + _interruptSources[0].interruptController = parentController; + _interruptSources[0].vectorData = parentSource; + + sourceIsLevel = false; + error = provider->getInterruptType(0, &interruptType); + if (error == kIOReturnSuccess) { + if (interruptType & kIOInterruptTypeLevel) { + sourceIsLevel = true; + } + } + + // Allocate the memory for the vectors + numVectors = kIOSharedInterruptControllerDefaultVectors; // For now a constant number. + vectors = (IOInterruptVector *)IOMalloc(numVectors * sizeof(IOInterruptVector)); + if (vectors == NULL) { + IOFree(_interruptSources, sizeof(IOInterruptSource)); + return kIOReturnNoMemory; + } + bzero(vectors, numVectors * sizeof(IOInterruptVector)); + + // Allocate the lock for the controller. + controllerLock = IOSimpleLockAlloc(); + if (controllerLock == 0) { + return kIOReturnNoResources; + } + + // Allocate locks for the vectors. + for (cnt = 0; cnt < numVectors; cnt++) { + vectors[cnt].interruptLock = IOLockAlloc(); + if (vectors[cnt].interruptLock == NULL) { + for (cnt = 0; cnt < numVectors; cnt++) { + if (vectors[cnt].interruptLock != NULL) { + IOLockFree(vectors[cnt].interruptLock); + } + } + return kIOReturnNoResources; + } + } + + numVectors = 0; // reset the high water mark for used vectors + vectorsRegistered = 0; + vectorsEnabled = 0; + controllerDisabled = 1; + + return kIOReturnSuccess; } -IOReturn IOSharedInterruptController::registerInterrupt(IOService *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon) +IOReturn +IOSharedInterruptController::registerInterrupt(IOService *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector = 0; - OSData *vectorData; - IOInterruptState interruptState; - - interruptSources = nub->_interruptSources; - - // Find a free vector. - vectorNumber = kIOSharedInterruptControllerDefaultVectors; - while (vectorsRegistered != kIOSharedInterruptControllerDefaultVectors) { - for (vectorNumber = 0; vectorNumber < kIOSharedInterruptControllerDefaultVectors; vectorNumber++) { - vector = &vectors[vectorNumber]; - - // Get the lock for this vector. - IOLockLock(vector->interruptLock); - - // Is it unregistered? - if (!vector->interruptRegistered) break; - - // Move along to the next one. - IOLockUnlock(vector->interruptLock); - } - - if (vectorNumber != kIOSharedInterruptControllerDefaultVectors) break; - } - - // Could not find a free one, so give up. - if (vectorNumber == kIOSharedInterruptControllerDefaultVectors) { - return kIOReturnNoResources; - } - - // Create the vectorData for the IOInterruptSource. - vectorData = OSData::withBytes(&vectorNumber, sizeof(vectorNumber)); - if (vectorData == 0) { - IOLockUnlock(vector->interruptLock); - return kIOReturnNoMemory; - } - - // Fill in the IOInterruptSource with the controller's info. - interruptSources[source].interruptController = this; - interruptSources[source].vectorData = vectorData; - - // Fill in vector with the client's info. - vector->handler = handler; - vector->nub = nub; - vector->source = source; - vector->target = target; - vector->refCon = refCon; - - // Get the vector ready. It starts off soft disabled. - vector->interruptDisabledSoft = 1; - vector->interruptRegistered = 1; - - interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); - // Move the high water mark if needed - if (++vectorsRegistered > numVectors) numVectors = vectorsRegistered; - IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); - - IOLockUnlock(vector->interruptLock); - return kIOReturnSuccess; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector = 0; + OSData *vectorData; + IOInterruptState interruptState; + + interruptSources = nub->_interruptSources; + + // Find a free vector. + vectorNumber = kIOSharedInterruptControllerDefaultVectors; + while (vectorsRegistered != kIOSharedInterruptControllerDefaultVectors) { + for (vectorNumber = 0; vectorNumber < kIOSharedInterruptControllerDefaultVectors; vectorNumber++) { + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOLockLock(vector->interruptLock); + + // Is it unregistered? + if (!vector->interruptRegistered) { + break; + } + + // Move along to the next one. + IOLockUnlock(vector->interruptLock); + } + + if (vectorNumber != kIOSharedInterruptControllerDefaultVectors) { + break; + } + } + + // Could not find a free one, so give up. + if (vectorNumber == kIOSharedInterruptControllerDefaultVectors) { + return kIOReturnNoResources; + } + + // Create the vectorData for the IOInterruptSource. + vectorData = OSData::withBytes(&vectorNumber, sizeof(vectorNumber)); + if (vectorData == 0) { + IOLockUnlock(vector->interruptLock); + return kIOReturnNoMemory; + } + + // Fill in the IOInterruptSource with the controller's info. + interruptSources[source].interruptController = this; + interruptSources[source].vectorData = vectorData; + + // Fill in vector with the client's info. + vector->handler = handler; + vector->nub = nub; + vector->source = source; + vector->target = target; + vector->refCon = refCon; + + // Get the vector ready. It starts off soft disabled. + vector->interruptDisabledSoft = 1; + vector->interruptRegistered = 1; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + // Move the high water mark if needed + if (++vectorsRegistered > numVectors) { + numVectors = vectorsRegistered; + } + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + IOLockUnlock(vector->interruptLock); + return kIOReturnSuccess; } -IOReturn IOSharedInterruptController::unregisterInterrupt(IOService *nub, - int source) +IOReturn +IOSharedInterruptController::unregisterInterrupt(IOService *nub, + int source) { - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - IOInterruptState interruptState; - - for (vectorNumber = 0; vectorNumber < kIOSharedInterruptControllerDefaultVectors; vectorNumber++) { - vector = &vectors[vectorNumber]; - - // Get the lock for this vector. - IOLockLock(vector->interruptLock); - - // Return success if it is not already registered - if (!vector->interruptRegistered - || (vector->nub != nub) || (vector->source != source)) { - IOLockUnlock(vector->interruptLock); - continue; - } - - // Soft disable the source and the controller too. - disableInterrupt(nub, source); - - // Clear all the storage for the vector except for interruptLock. - vector->interruptActive = 0; - vector->interruptDisabledSoft = 0; - vector->interruptDisabledHard = 0; - vector->interruptRegistered = 0; - vector->nub = 0; - vector->source = 0; - vector->handler = 0; - vector->target = 0; - vector->refCon = 0; - - interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); - vectorsRegistered--; - IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); - - // Move along to the next one. - IOLockUnlock(vector->interruptLock); - } - - // Re-enable the controller if all vectors are enabled. - if (vectorsEnabled == vectorsRegistered) { - controllerDisabled = 0; - provider->enableInterrupt(0); - } - - return kIOReturnSuccess; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + IOInterruptState interruptState; + + for (vectorNumber = 0; vectorNumber < kIOSharedInterruptControllerDefaultVectors; vectorNumber++) { + vector = &vectors[vectorNumber]; + + // Get the lock for this vector. + IOLockLock(vector->interruptLock); + + // Return success if it is not already registered + if (!vector->interruptRegistered + || (vector->nub != nub) || (vector->source != source)) { + IOLockUnlock(vector->interruptLock); + continue; + } + + // Soft disable the source and the controller too. + disableInterrupt(nub, source); + + // Clear all the storage for the vector except for interruptLock. + vector->interruptActive = 0; + vector->interruptDisabledSoft = 0; + vector->interruptDisabledHard = 0; + vector->interruptRegistered = 0; + vector->nub = 0; + vector->source = 0; + vector->handler = 0; + vector->target = 0; + vector->refCon = 0; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + vectorsRegistered--; + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + // Move along to the next one. + IOLockUnlock(vector->interruptLock); + } + + // Re-enable the controller if all vectors are enabled. + if (vectorsEnabled == vectorsRegistered) { + controllerDisabled = 0; + provider->enableInterrupt(0); + } + + return kIOReturnSuccess; } -IOReturn IOSharedInterruptController::getInterruptType(IOService */*nub*/, - int /*source*/, - int *interruptType) +IOReturn +IOSharedInterruptController::getInterruptType(IOService */*nub*/, + int /*source*/, + int *interruptType) { - return provider->getInterruptType(0, interruptType); + return provider->getInterruptType(0, interruptType); } -IOReturn IOSharedInterruptController::enableInterrupt(IOService *nub, - int source) +IOReturn +IOSharedInterruptController::enableInterrupt(IOService *nub, + int source) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - IOInterruptState interruptState; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); - if (!vector->interruptDisabledSoft) { - IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); - return kIOReturnSuccess; - } - - vector->interruptDisabledSoft = 0; - vectorsEnabled++; - IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); - - if (controllerDisabled && (vectorsEnabled == vectorsRegistered)) { - controllerDisabled = 0; - provider->enableInterrupt(0); - } - - return kIOReturnSuccess; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + IOInterruptState interruptState; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + if (!vector->interruptDisabledSoft) { + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + return kIOReturnSuccess; + } + + vector->interruptDisabledSoft = 0; + vectorsEnabled++; + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + if (controllerDisabled && (vectorsEnabled == vectorsRegistered)) { + controllerDisabled = 0; + provider->enableInterrupt(0); + } + + return kIOReturnSuccess; } -IOReturn IOSharedInterruptController::disableInterrupt(IOService *nub, - int source) +IOReturn +IOSharedInterruptController::disableInterrupt(IOService *nub, + int source) { - IOInterruptSource *interruptSources; - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - OSData *vectorData; - IOInterruptState interruptState; - - interruptSources = nub->_interruptSources; - vectorData = interruptSources[source].vectorData; - vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); - vector = &vectors[vectorNumber]; - - interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); - if (!vector->interruptDisabledSoft) { - vector->interruptDisabledSoft = 1; + IOInterruptSource *interruptSources; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + OSData *vectorData; + IOInterruptState interruptState; + + interruptSources = nub->_interruptSources; + vectorData = interruptSources[source].vectorData; + vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy(); + vector = &vectors[vectorNumber]; + + interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); + if (!vector->interruptDisabledSoft) { + vector->interruptDisabledSoft = 1; #if !defined(__i386__) && !defined(__x86_64__) - OSMemoryBarrier(); + OSMemoryBarrier(); #endif - vectorsEnabled--; - } - IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); - - if (!getPlatform()->atInterruptLevel()) { - while (vector->interruptActive) - {} - } - - return kIOReturnSuccess; + vectorsEnabled--; + } + IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); + + if (!getPlatform()->atInterruptLevel()) { + while (vector->interruptActive) { + } + } + + return kIOReturnSuccess; } -IOInterruptAction IOSharedInterruptController::getInterruptHandlerAddress(void) +IOInterruptAction +IOSharedInterruptController::getInterruptHandlerAddress(void) { - return OSMemberFunctionCast(IOInterruptAction, - this, &IOSharedInterruptController::handleInterrupt); + return OSMemberFunctionCast(IOInterruptAction, + this, &IOSharedInterruptController::handleInterrupt); } -IOReturn IOSharedInterruptController::handleInterrupt(void * /*refCon*/, - IOService * nub, - int /*source*/) +IOReturn +IOSharedInterruptController::handleInterrupt(void * /*refCon*/, + IOService * nub, + int /*source*/) { - IOInterruptVectorNumber vectorNumber; - IOInterruptVector *vector; - - for (vectorNumber = 0; vectorNumber < numVectors; vectorNumber++) { - vector = &vectors[vectorNumber]; - - vector->interruptActive = 1; + IOInterruptVectorNumber vectorNumber; + IOInterruptVector *vector; + + for (vectorNumber = 0; vectorNumber < numVectors; vectorNumber++) { + vector = &vectors[vectorNumber]; + + vector->interruptActive = 1; #if !defined(__i386__) && !defined(__x86_64__) - OSMemoryBarrier(); + OSMemoryBarrier(); #endif - if (!vector->interruptDisabledSoft) { - - // Call the handler if it exists. - if (vector->interruptRegistered) { - - bool trace = (gIOKitTrace & kIOTraceInterrupts) ? true : false; - - if (trace) - timeStampInterruptHandlerStart(vectorNumber, vector); - - // Call handler. - vector->handler(vector->target, vector->refCon, vector->nub, vector->source); - - if (trace) - timeStampInterruptHandlerEnd(vectorNumber, vector); - } - } - - vector->interruptActive = 0; - } - - // if any of the vectors are dissabled, then dissable this controller. - IOSimpleLockLock(controllerLock); - if (vectorsEnabled != vectorsRegistered) { - nub->disableInterrupt(0); - controllerDisabled = 1; - } - IOSimpleLockUnlock(controllerLock); - - return kIOReturnSuccess; -} + if (!vector->interruptDisabledSoft) { + // Call the handler if it exists. + if (vector->interruptRegistered) { + bool trace = (gIOKitTrace & kIOTraceInterrupts) ? true : false; + + if (trace) { + timeStampInterruptHandlerStart(vectorNumber, vector); + } + + // Call handler. + vector->handler(vector->target, vector->refCon, vector->nub, vector->source); + + if (trace) { + timeStampInterruptHandlerEnd(vectorNumber, vector); + } + } + } + vector->interruptActive = 0; + } + + // if any of the vectors are dissabled, then dissable this controller. + IOSimpleLockLock(controllerLock); + if (vectorsEnabled != vectorsRegistered) { + nub->disableInterrupt(0); + controllerDisabled = 1; + } + IOSimpleLockUnlock(controllerLock); + + return kIOReturnSuccess; +} diff --git a/iokit/Kernel/IOInterruptEventSource.cpp b/iokit/Kernel/IOInterruptEventSource.cpp index 0d96bbb86..19d5d597d 100644 --- a/iokit/Kernel/IOInterruptEventSource.cpp +++ b/iokit/Kernel/IOInterruptEventSource.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -72,258 +72,277 @@ OSMetaClassDefineReservedUnused(IOInterruptEventSource, 5); OSMetaClassDefineReservedUnused(IOInterruptEventSource, 6); OSMetaClassDefineReservedUnused(IOInterruptEventSource, 7); -bool IOInterruptEventSource::init(OSObject *inOwner, - Action inAction, - IOService *inProvider, - int inIntIndex) +bool +IOInterruptEventSource::init(OSObject *inOwner, + Action inAction, + IOService *inProvider, + int inIntIndex) { - bool res = true; + bool res = true; + + if (!super::init(inOwner, (IOEventSourceAction) inAction)) { + return false; + } - if ( !super::init(inOwner, (IOEventSourceAction) inAction) ) - return false; + reserved = IONew(ExpansionData, 1); - reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } - if (!reserved) { - return false; - } + bzero(reserved, sizeof(ExpansionData)); + + provider = inProvider; + producerCount = consumerCount = 0; + autoDisable = explicitDisable = false; + intIndex = ~inIntIndex; + + // Assumes inOwner holds a reference(retain) on the provider + if (inProvider) { + if (IA_ANY_STATISTICS_ENABLED) { + /* + * We only treat this as an "interrupt" if it has a provider; if it does, + * set up the objects necessary to track interrupt statistics. Interrupt + * event sources without providers are most likely being used as simple + * event source in order to poke at workloops and kick off work. + * + * We also avoid try to avoid interrupt accounting overhead if none of + * the statistics are enabled. + */ + reserved->statistics = IONew(IOInterruptAccountingData, 1); + + if (!reserved->statistics) { + /* + * We rely on the free() routine to clean up after us if init fails + * midway. + */ + return false; + } - bzero(reserved, sizeof(ExpansionData)); + bzero(reserved->statistics, sizeof(IOInterruptAccountingData)); - provider = inProvider; - producerCount = consumerCount = 0; - autoDisable = explicitDisable = false; - intIndex = ~inIntIndex; + reserved->statistics->owner = this; + } - // Assumes inOwner holds a reference(retain) on the provider - if (inProvider) { - if (IA_ANY_STATISTICS_ENABLED) { - /* - * We only treat this as an "interrupt" if it has a provider; if it does, - * set up the objects necessary to track interrupt statistics. Interrupt - * event sources without providers are most likely being used as simple - * event source in order to poke at workloops and kick off work. - * - * We also avoid try to avoid interrupt accounting overhead if none of - * the statistics are enabled. - */ - reserved->statistics = IONew(IOInterruptAccountingData, 1); + res = (kIOReturnSuccess == registerInterruptHandler(inProvider, inIntIndex)); - if (!reserved->statistics) { - /* - * We rely on the free() routine to clean up after us if init fails - * midway. - */ - return false; - } + if (res) { + intIndex = inIntIndex; + } + } - bzero(reserved->statistics, sizeof(IOInterruptAccountingData)); + IOStatisticsInitializeCounter(); - reserved->statistics->owner = this; - } + return res; +} - res = (kIOReturnSuccess == registerInterruptHandler(inProvider, inIntIndex)); +IOReturn +IOInterruptEventSource::registerInterruptHandler(IOService *inProvider, + int inIntIndex) +{ + IOReturn ret; + int intType; + IOInterruptAction intHandler; - if (res) { - intIndex = inIntIndex; - } - } + ret = inProvider->getInterruptType(inIntIndex, &intType); + if (kIOReturnSuccess != ret) { + return ret; + } - IOStatisticsInitializeCounter(); + autoDisable = (intType == kIOInterruptTypeLevel); + if (autoDisable) { + intHandler = OSMemberFunctionCast(IOInterruptAction, + this, &IOInterruptEventSource::disableInterruptOccurred); + } else { + intHandler = OSMemberFunctionCast(IOInterruptAction, + this, &IOInterruptEventSource::normalInterruptOccurred); + } - return res; -} + ret = provider->registerInterrupt(inIntIndex, this, intHandler); + + /* + * Add statistics to the provider. The setWorkLoop convention should ensure + * that we always go down the unregister path before we register (outside of + * init()), so we don't have to worry that we will invoke addInterruptStatistics + * erroneously. + */ + if ((ret == kIOReturnSuccess) && (reserved->statistics)) { + /* + * Stash the normal index value, for the sake of debugging. + */ + reserved->statistics->interruptIndex = inIntIndex; + + /* + * We need to hook the interrupt information up to the provider so that it + * can find the statistics for this interrupt when desired. The provider is + * responsible for maintaining the reporter for a particular interrupt, and + * needs a handle on the statistics so that it can request that the reporter + * be updated as needed. Errors are considered "soft" for the moment (it + * will either panic, or fail in a way such that we can still service the + * interrupt). + */ + provider->addInterruptStatistics(reserved->statistics, inIntIndex); + + /* + * Add the statistics object to the global list of statistics objects; this + * is an aid to debugging (we can trivially find statistics for all eligible + * interrupts, and dump them; potentially helpful if the system is wedged + * due to interrupt activity). + */ + interruptAccountingDataAddToList(reserved->statistics); + } -IOReturn IOInterruptEventSource::registerInterruptHandler(IOService *inProvider, - int inIntIndex) -{ - IOReturn ret; - int intType; - IOInterruptAction intHandler; - - ret = inProvider->getInterruptType(inIntIndex, &intType); - if (kIOReturnSuccess != ret) - return (ret); - - autoDisable = (intType == kIOInterruptTypeLevel); - if (autoDisable) { - intHandler = OSMemberFunctionCast(IOInterruptAction, - this, &IOInterruptEventSource::disableInterruptOccurred); - } - else - intHandler = OSMemberFunctionCast(IOInterruptAction, - this, &IOInterruptEventSource::normalInterruptOccurred); - - ret = provider->registerInterrupt(inIntIndex, this, intHandler); - - /* - * Add statistics to the provider. The setWorkLoop convention should ensure - * that we always go down the unregister path before we register (outside of - * init()), so we don't have to worry that we will invoke addInterruptStatistics - * erroneously. - */ - if ((ret == kIOReturnSuccess) && (reserved->statistics)) { - /* - * Stash the normal index value, for the sake of debugging. - */ - reserved->statistics->interruptIndex = inIntIndex; - - /* - * We need to hook the interrupt information up to the provider so that it - * can find the statistics for this interrupt when desired. The provider is - * responsible for maintaining the reporter for a particular interrupt, and - * needs a handle on the statistics so that it can request that the reporter - * be updated as needed. Errors are considered "soft" for the moment (it - * will either panic, or fail in a way such that we can still service the - * interrupt). - */ - provider->addInterruptStatistics(reserved->statistics, inIntIndex); - - /* - * Add the statistics object to the global list of statistics objects; this - * is an aid to debugging (we can trivially find statistics for all eligible - * interrupts, and dump them; potentially helpful if the system is wedged - * due to interrupt activity). - */ - interruptAccountingDataAddToList(reserved->statistics); - } - - return (ret); + return ret; } void IOInterruptEventSource::unregisterInterruptHandler(IOService *inProvider, - int inIntIndex) + int inIntIndex) { - if (reserved->statistics) { - interruptAccountingDataRemoveFromList(reserved->statistics); - provider->removeInterruptStatistics(reserved->statistics->interruptIndex); - } + if (reserved->statistics) { + interruptAccountingDataRemoveFromList(reserved->statistics); + provider->removeInterruptStatistics(reserved->statistics->interruptIndex); + } - provider->unregisterInterrupt(inIntIndex); + provider->unregisterInterrupt(inIntIndex); } IOInterruptEventSource * IOInterruptEventSource::interruptEventSource(OSObject *inOwner, - Action inAction, - IOService *inProvider, - int inIntIndex) + Action inAction, + IOService *inProvider, + int inIntIndex) { - IOInterruptEventSource *me = new IOInterruptEventSource; + IOInterruptEventSource *me = new IOInterruptEventSource; - if (me && !me->init(inOwner, inAction, inProvider, inIntIndex)) { - me->release(); - return 0; - } + if (me && !me->init(inOwner, inAction, inProvider, inIntIndex)) { + me->release(); + return 0; + } - return me; + return me; } IOInterruptEventSource * IOInterruptEventSource::interruptEventSource(OSObject *inOwner, - IOService *inProvider, - int inIntIndex, - ActionBlock inAction) + IOService *inProvider, + int inIntIndex, + ActionBlock inAction) { - IOInterruptEventSource * ies; - ies = IOInterruptEventSource::interruptEventSource(inOwner, (Action) NULL, inProvider, inIntIndex); - if (ies) ies->setActionBlock((IOEventSource::ActionBlock) inAction); + IOInterruptEventSource * ies; + ies = IOInterruptEventSource::interruptEventSource(inOwner, (Action) NULL, inProvider, inIntIndex); + if (ies) { + ies->setActionBlock((IOEventSource::ActionBlock) inAction); + } - return ies; + return ies; } -void IOInterruptEventSource::free() +void +IOInterruptEventSource::free() { - if (provider && intIndex >= 0) - unregisterInterruptHandler(provider, intIndex); + if (provider && intIndex >= 0) { + unregisterInterruptHandler(provider, intIndex); + } - if (reserved) { - if (reserved->statistics) { - IODelete(reserved->statistics, IOInterruptAccountingData, 1); - } + if (reserved) { + if (reserved->statistics) { + IODelete(reserved->statistics, IOInterruptAccountingData, 1); + } - IODelete(reserved, ExpansionData, 1); - } + IODelete(reserved, ExpansionData, 1); + } - super::free(); + super::free(); } -void IOInterruptEventSource::enable() +void +IOInterruptEventSource::enable() { - if (provider && intIndex >= 0) { - provider->enableInterrupt(intIndex); - explicitDisable = false; - enabled = true; - } + if (provider && intIndex >= 0) { + provider->enableInterrupt(intIndex); + explicitDisable = false; + enabled = true; + } } -void IOInterruptEventSource::disable() +void +IOInterruptEventSource::disable() { - if (provider && intIndex >= 0) { - provider->disableInterrupt(intIndex); - explicitDisable = true; - enabled = false; - } + if (provider && intIndex >= 0) { + provider->disableInterrupt(intIndex); + explicitDisable = true; + enabled = false; + } } -void IOInterruptEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) +void +IOInterruptEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) { - if (inWorkLoop) super::setWorkLoop(inWorkLoop); + if (inWorkLoop) { + super::setWorkLoop(inWorkLoop); + } - if (provider) { - if (!inWorkLoop) { - if (intIndex >= 0) { - /* - * It isn't necessarily safe to wait until free() to unregister the interrupt; - * our provider may disappear. - */ - unregisterInterruptHandler(provider, intIndex); - intIndex = ~intIndex; - } - } else if ((intIndex < 0) && (kIOReturnSuccess == registerInterruptHandler(provider, ~intIndex))) { - intIndex = ~intIndex; + if (provider) { + if (!inWorkLoop) { + if (intIndex >= 0) { + /* + * It isn't necessarily safe to wait until free() to unregister the interrupt; + * our provider may disappear. + */ + unregisterInterruptHandler(provider, intIndex); + intIndex = ~intIndex; + } + } else if ((intIndex < 0) && (kIOReturnSuccess == registerInterruptHandler(provider, ~intIndex))) { + intIndex = ~intIndex; + } } - } - if (!inWorkLoop) super::setWorkLoop(inWorkLoop); + if (!inWorkLoop) { + super::setWorkLoop(inWorkLoop); + } } -const IOService *IOInterruptEventSource::getProvider() const +const IOService * +IOInterruptEventSource::getProvider() const { - return provider; + return provider; } -int IOInterruptEventSource::getIntIndex() const +int +IOInterruptEventSource::getIntIndex() const { - return intIndex; + return intIndex; } -bool IOInterruptEventSource::getAutoDisable() const +bool +IOInterruptEventSource::getAutoDisable() const { - return autoDisable; + return autoDisable; } -bool IOInterruptEventSource::checkForWork() +bool +IOInterruptEventSource::checkForWork() { - uint64_t startSystemTime = 0; - uint64_t endSystemTime = 0; - uint64_t startCPUTime = 0; - uint64_t endCPUTime = 0; - unsigned int cacheProdCount = producerCount; - int numInts = cacheProdCount - consumerCount; - IOInterruptEventAction intAction = (IOInterruptEventAction) action; - ActionBlock intActionBlock = (ActionBlock) actionBlock; + uint64_t startSystemTime = 0; + uint64_t endSystemTime = 0; + uint64_t startCPUTime = 0; + uint64_t endCPUTime = 0; + unsigned int cacheProdCount = producerCount; + int numInts = cacheProdCount - consumerCount; + IOInterruptEventAction intAction = (IOInterruptEventAction) action; + ActionBlock intActionBlock = (ActionBlock) actionBlock; bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; - - IOStatisticsCheckForWork(); - - if ( numInts > 0 ) - { - if (trace) + + IOStatisticsCheckForWork(); + + if (numInts > 0) { + if (trace) { IOTimeStampStartConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), - VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } if (reserved->statistics) { if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingSecondLevelSystemTimeIndex)) { @@ -336,8 +355,11 @@ bool IOInterruptEventSource::checkForWork() } // Call the handler - if (kActionBlock & flags) (intActionBlock)(this, numInts); - else (*intAction)(owner, this, numInts); + if (kActionBlock & flags) { + (intActionBlock)(this, numInts); + } else { + (*intAction)(owner, this, numInts); + } if (reserved->statistics) { if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingSecondLevelCountIndex)) { @@ -354,23 +376,23 @@ bool IOInterruptEventSource::checkForWork() IA_ADD_VALUE(&reserved->statistics->interruptStatistics[kInterruptAccountingSecondLevelSystemTimeIndex], endSystemTime - startSystemTime); } } - - if (trace) + + if (trace) { IOTimeStampEndConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), - VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); - + VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } + consumerCount = cacheProdCount; - if (autoDisable && !explicitDisable) + if (autoDisable && !explicitDisable) { enable(); - } - - else if ( numInts < 0 ) - { - if (trace) + } + } else if (numInts < 0) { + if (trace) { IOTimeStampStartConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), - VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } if (reserved->statistics) { if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingSecondLevelSystemTimeIndex)) { @@ -381,10 +403,13 @@ bool IOInterruptEventSource::checkForWork() startCPUTime = thread_get_runtime_self(); } } - + // Call the handler - if (kActionBlock & flags) (intActionBlock)(this, numInts); - else (*intAction)(owner, this, numInts); + if (kActionBlock & flags) { + (intActionBlock)(this, numInts); + } else { + (*intAction)(owner, this, numInts); + } if (reserved->statistics) { if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingSecondLevelCountIndex)) { @@ -401,80 +426,90 @@ bool IOInterruptEventSource::checkForWork() IA_ADD_VALUE(&reserved->statistics->interruptStatistics[kInterruptAccountingSecondLevelSystemTimeIndex], endSystemTime - startSystemTime); } } - - if (trace) + + if (trace) { IOTimeStampEndConstant(IODBG_INTES(IOINTES_ACTION), - VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), - VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); - + VM_KERNEL_ADDRHIDE(intAction), VM_KERNEL_ADDRHIDE(owner), + VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(workLoop)); + } + consumerCount = cacheProdCount; - if (autoDisable && !explicitDisable) + if (autoDisable && !explicitDisable) { enable(); + } } - - return false; + + return false; } -void IOInterruptEventSource::normalInterruptOccurred - (void */*refcon*/, IOService */*prov*/, int /*source*/) +void +IOInterruptEventSource::normalInterruptOccurred +(void */*refcon*/, IOService */*prov*/, int /*source*/) { bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; - - IOStatisticsInterrupt(); - producerCount++; - - if (trace) - IOTimeStampStartConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); - - if (reserved->statistics) { - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { - IA_ADD_VALUE(&reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); - } - } - - signalWorkAvailable(); - - if (trace) - IOTimeStampEndConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + + IOStatisticsInterrupt(); + producerCount++; + + if (trace) { + IOTimeStampStartConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + } + + if (reserved->statistics) { + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { + IA_ADD_VALUE(&reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); + } + } + + signalWorkAvailable(); + + if (trace) { + IOTimeStampEndConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + } } -void IOInterruptEventSource::disableInterruptOccurred - (void */*refcon*/, IOService *prov, int source) +void +IOInterruptEventSource::disableInterruptOccurred +(void */*refcon*/, IOService *prov, int source) { bool trace = (gIOKitTrace & kIOTraceIntEventSource) ? true : false; - - prov->disableInterrupt(source); /* disable the interrupt */ - - IOStatisticsInterrupt(); - producerCount++; - - if (trace) - IOTimeStampStartConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); - - if (reserved->statistics) { - if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { - IA_ADD_VALUE(&reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); - } - } - - signalWorkAvailable(); - - if (trace) - IOTimeStampEndConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + + prov->disableInterrupt(source); /* disable the interrupt */ + + IOStatisticsInterrupt(); + producerCount++; + + if (trace) { + IOTimeStampStartConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + } + + if (reserved->statistics) { + if (IA_GET_STATISTIC_ENABLED(kInterruptAccountingFirstLevelCountIndex)) { + IA_ADD_VALUE(&reserved->statistics->interruptStatistics[kInterruptAccountingFirstLevelCountIndex], 1); + } + } + + signalWorkAvailable(); + + if (trace) { + IOTimeStampEndConstant(IODBG_INTES(IOINTES_SEMA), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(owner)); + } } -void IOInterruptEventSource::interruptOccurred - (void *refcon, IOService *prov, int source) +void +IOInterruptEventSource::interruptOccurred +(void *refcon, IOService *prov, int source) { - if (autoDisable && prov) - disableInterruptOccurred(refcon, prov, source); - else - normalInterruptOccurred(refcon, prov, source); + if (autoDisable && prov) { + disableInterruptOccurred(refcon, prov, source); + } else { + normalInterruptOccurred(refcon, prov, source); + } } -IOReturn IOInterruptEventSource::warmCPU - (uint64_t abstime) +IOReturn +IOInterruptEventSource::warmCPU +(uint64_t abstime) { - return ml_interrupt_prewarm(abstime); } diff --git a/iokit/Kernel/IOKitDebug.cpp b/iokit/Kernel/IOKitDebug.cpp index 0cf42b685..bfbac5edf 100644 --- a/iokit/Kernel/IOKitDebug.cpp +++ b/iokit/Kernel/IOKitDebug.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -68,17 +68,17 @@ static int sysctl_debug_iokit (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - SInt64 newValue; - int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed); - if (changed) { - gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions)); - } - return (error); + SInt64 newValue; + int changed, error = sysctl_io_number(req, gIOKitDebug, sizeof(gIOKitDebug), &newValue, &changed); + if (changed) { + gIOKitDebug = ((gIOKitDebug & ~kIOKitDebugUserOptions) | (newValue & kIOKitDebugUserOptions)); + } + return error; } SYSCTL_PROC(_debug, OID_AUTO, iokit, - CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io"); + CTLTYPE_QUAD | IODEBUG_CTLFLAGS | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOKitDebug, 0, sysctl_debug_iokit, "Q", "boot_arg io"); int debug_malloc_size; int debug_iomalloc_size; @@ -88,72 +88,76 @@ int debug_container_malloc_size; // int debug_ivars_size; // in OSObject.cpp extern "C" { - #if 0 #define DEBG(fmt, args...) { kprintf(fmt, ## args); } #else #define DEBG(fmt, args...) { IOLog(fmt, ## args); } #endif -void IOPrintPlane( const IORegistryPlane * plane ) +void +IOPrintPlane( const IORegistryPlane * plane ) { - IORegistryEntry * next; - IORegistryIterator * iter; - OSOrderedSet * all; - char format[] = "%xxxs"; - IOService * service; - - iter = IORegistryIterator::iterateOver( plane ); - assert( iter ); - all = iter->iterateAll(); - if( all) { - DEBG("Count %d\n", all->getCount() ); - all->release(); - } else - DEBG("Empty\n"); - - iter->reset(); - while( (next = iter->getNextObjectRecursive())) { - snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane )); - DEBG( format, ""); - DEBG( "\033[33m%s", next->getName( plane )); - if( (next->getLocation( plane ))) - DEBG("@%s", next->getLocation( plane )); - DEBG("\033[0m getMetaClass()->getClassName()); - if( (service = OSDynamicCast(IOService, next))) - DEBG(", busy %ld", (long) service->getBusyState()); - DEBG( ">\n"); + IORegistryEntry * next; + IORegistryIterator * iter; + OSOrderedSet * all; + char format[] = "%xxxs"; + IOService * service; + + iter = IORegistryIterator::iterateOver( plane ); + assert( iter ); + all = iter->iterateAll(); + if (all) { + DEBG("Count %d\n", all->getCount()); + all->release(); + } else { + DEBG("Empty\n"); + } + + iter->reset(); + while ((next = iter->getNextObjectRecursive())) { + snprintf(format + 1, sizeof(format) - 1, "%ds", 2 * next->getDepth( plane )); + DEBG( format, ""); + DEBG( "\033[33m%s", next->getName( plane )); + if ((next->getLocation( plane ))) { + DEBG("@%s", next->getLocation( plane )); + } + DEBG("\033[0m getMetaClass()->getClassName()); + if ((service = OSDynamicCast(IOService, next))) { + DEBG(", busy %ld", (long) service->getBusyState()); + } + DEBG( ">\n"); // IOSleep(250); - } - iter->release(); + } + iter->release(); } -void db_piokjunk(void) +void +db_piokjunk(void) { } -void db_dumpiojunk( const IORegistryPlane * plane __unused ) +void +db_dumpiojunk( const IORegistryPlane * plane __unused ) { } -void IOPrintMemory( void ) +void +IOPrintMemory( void ) { - // OSMetaClass::printInstanceCounts(); - IOLog("\n" - "ivar kalloc() 0x%08x\n" - "malloc() 0x%08x\n" - "containers kalloc() 0x%08x\n" - "IOMalloc() 0x%08x\n" - "----------------------------------------\n", - debug_ivars_size, - debug_malloc_size, - debug_container_malloc_size, - debug_iomalloc_size - ); + IOLog("\n" + "ivar kalloc() 0x%08x\n" + "malloc() 0x%08x\n" + "containers kalloc() 0x%08x\n" + "IOMalloc() 0x%08x\n" + "----------------------------------------\n", + debug_ivars_size, + debug_malloc_size, + debug_container_malloc_size, + debug_iomalloc_size + ); } - } /* extern "C" */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -165,51 +169,55 @@ OSDefineMetaClassAndStructors(IOKitDiagnostics, OSObject) OSObject * IOKitDiagnostics::diagnostics( void ) { - IOKitDiagnostics * diags; + IOKitDiagnostics * diags; - diags = new IOKitDiagnostics; - if( diags && !diags->init()) { - diags->release(); - diags = 0; - } + diags = new IOKitDiagnostics; + if (diags && !diags->init()) { + diags->release(); + diags = 0; + } - return( diags ); + return diags; } -void IOKitDiagnostics::updateOffset( OSDictionary * dict, - UInt64 value, const char * name ) +void +IOKitDiagnostics::updateOffset( OSDictionary * dict, + UInt64 value, const char * name ) { - OSNumber * off; + OSNumber * off; - off = OSNumber::withNumber( value, 64 ); - if( !off) - return; + off = OSNumber::withNumber( value, 64 ); + if (!off) { + return; + } - dict->setObject( name, off ); - off->release(); + dict->setObject( name, off ); + off->release(); } -bool IOKitDiagnostics::serialize(OSSerialize *s) const +bool +IOKitDiagnostics::serialize(OSSerialize *s) const { - OSDictionary * dict; - bool ok; + OSDictionary * dict; + bool ok; - dict = OSDictionary::withCapacity( 5 ); - if( !dict) - return( false ); + dict = OSDictionary::withCapacity( 5 ); + if (!dict) { + return false; + } - updateOffset( dict, debug_ivars_size, "Instance allocation" ); - updateOffset( dict, debug_container_malloc_size, "Container allocation" ); - updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); - updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" ); + updateOffset( dict, debug_ivars_size, "Instance allocation" ); + updateOffset( dict, debug_container_malloc_size, "Container allocation" ); + updateOffset( dict, debug_iomalloc_size, "IOMalloc allocation" ); + updateOffset( dict, debug_iomallocpageable_size, "Pageable allocation" ); - OSMetaClass::serializeClassDictionary(dict); + OSMetaClass::serializeClassDictionary(dict); - ok = dict->serialize( s ); + ok = dict->serialize( s ); - dict->release(); + dict->release(); - return( ok ); + return ok; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -221,102 +229,96 @@ bool IOKitDiagnostics::serialize(OSSerialize *s) const #include __private_extern__ "C" void qsort( - void * array, - size_t nmembers, - size_t member_size, - int (*)(const void *, const void *)); + void * array, + size_t nmembers, + size_t member_size, + int (*)(const void *, const void *)); extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); extern "C" ppnum_t pmap_valid_page(ppnum_t pn); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -struct IOTRecursiveLock -{ - lck_mtx_t * mutex; - thread_t thread; - UInt32 count; +struct IOTRecursiveLock { + lck_mtx_t * mutex; + thread_t thread; + UInt32 count; }; -struct IOTrackingQueue -{ - queue_chain_t link; - IOTRecursiveLock lock; - const char * name; - uintptr_t btEntry; - size_t allocSize; - size_t minCaptureSize; - uint32_t siteCount; - uint32_t type; - uint32_t numSiteQs; - uint8_t captureOn; - queue_head_t sites[]; +struct IOTrackingQueue { + queue_chain_t link; + IOTRecursiveLock lock; + const char * name; + uintptr_t btEntry; + size_t allocSize; + size_t minCaptureSize; + uint32_t siteCount; + uint32_t type; + uint32_t numSiteQs; + uint8_t captureOn; + queue_head_t sites[]; }; -struct IOTrackingCallSite -{ - queue_chain_t link; - IOTrackingQueue * queue; - uint32_t crc; +struct IOTrackingCallSite { + queue_chain_t link; + IOTrackingQueue * queue; + uint32_t crc; - vm_tag_t tag; - uint32_t count; - size_t size[2]; - uintptr_t bt[kIOTrackingCallSiteBTs]; + vm_tag_t tag; + uint32_t count; + size_t size[2]; + uintptr_t bt[kIOTrackingCallSiteBTs]; - queue_head_t instances; - IOTracking * addresses; + queue_head_t instances; + IOTracking * addresses; }; -struct IOTrackingLeaksRef -{ - uintptr_t * instances; - uint32_t zoneSize; - uint32_t count; - uint32_t found; - uint32_t foundzlen; - size_t bytes; +struct IOTrackingLeaksRef { + uintptr_t * instances; + uint32_t zoneSize; + uint32_t count; + uint32_t found; + uint32_t foundzlen; + size_t bytes; }; lck_mtx_t * gIOTrackingLock; queue_head_t gIOTrackingQ; -enum -{ - kTrackingAddressFlagAllocated = 0x00000001 +enum{ + kTrackingAddressFlagAllocated = 0x00000001 }; #if defined(__LP64__) -#define IOTrackingAddressFlags(ptr) (ptr->flags) +#define IOTrackingAddressFlags(ptr) (ptr->flags) #else -#define IOTrackingAddressFlags(ptr) (ptr->tracking.flags) +#define IOTrackingAddressFlags(ptr) (ptr->tracking.flags) #endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static void +static void IOTRecursiveLockLock(IOTRecursiveLock * lock) { - if (lock->thread == current_thread()) lock->count++; - else - { - lck_mtx_lock(lock->mutex); - assert(lock->thread == 0); - assert(lock->count == 0); - lock->thread = current_thread(); - lock->count = 1; - } + if (lock->thread == current_thread()) { + lock->count++; + } else { + lck_mtx_lock(lock->mutex); + assert(lock->thread == 0); + assert(lock->count == 0); + lock->thread = current_thread(); + lock->count = 1; + } } -static void +static void IOTRecursiveLockUnlock(IOTRecursiveLock * lock) { - assert(lock->thread == current_thread()); - if (0 == (--lock->count)) - { - lock->thread = 0; - lck_mtx_unlock(lock->mutex); - } + assert(lock->thread == current_thread()); + if (0 == (--lock->count)) { + lock->thread = 0; + lck_mtx_unlock(lock->mutex); + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -324,42 +326,46 @@ IOTRecursiveLockUnlock(IOTRecursiveLock * lock) void IOTrackingInit(void) { - queue_init(&gIOTrackingQ); - gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); + queue_init(&gIOTrackingQ); + gIOTrackingLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOTrackingQueue * IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, - size_t allocSize, size_t minCaptureSize, - uint32_t type, uint32_t numSiteQs) + size_t allocSize, size_t minCaptureSize, + uint32_t type, uint32_t numSiteQs) { - IOTrackingQueue * queue; - uint32_t idx; - - if (!numSiteQs) numSiteQs = 1; - queue = (typeof(queue)) kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0])); - bzero(queue, sizeof(IOTrackingQueue)); - - queue->name = name; - queue->btEntry = btEntry; - queue->allocSize = allocSize; - queue->minCaptureSize = minCaptureSize; - queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); - queue->numSiteQs = numSiteQs; - queue->type = type; - enum { kFlags = (kIOTracking | kIOTrackingBoot) }; - queue->captureOn = (kFlags == (kFlags & gIOKitDebug)) - || (kIOTrackingQueueTypeDefaultOn & type); - - for (idx = 0; idx < numSiteQs; idx++) queue_init(&queue->sites[idx]); - - lck_mtx_lock(gIOTrackingLock); - queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link); - lck_mtx_unlock(gIOTrackingLock); - - return (queue); + IOTrackingQueue * queue; + uint32_t idx; + + if (!numSiteQs) { + numSiteQs = 1; + } + queue = (typeof(queue))kalloc(sizeof(IOTrackingQueue) + numSiteQs * sizeof(queue->sites[0])); + bzero(queue, sizeof(IOTrackingQueue)); + + queue->name = name; + queue->btEntry = btEntry; + queue->allocSize = allocSize; + queue->minCaptureSize = minCaptureSize; + queue->lock.mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); + queue->numSiteQs = numSiteQs; + queue->type = type; + enum { kFlags = (kIOTracking | kIOTrackingBoot) }; + queue->captureOn = (kFlags == (kFlags & gIOKitDebug)) + || (kIOTrackingQueueTypeDefaultOn & type); + + for (idx = 0; idx < numSiteQs; idx++) { + queue_init(&queue->sites[idx]); + } + + lck_mtx_lock(gIOTrackingLock); + queue_enter(&gIOTrackingQ, queue, IOTrackingQueue *, link); + lck_mtx_unlock(gIOTrackingLock); + + return queue; }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -367,102 +373,102 @@ IOTrackingQueueAlloc(const char * name, uintptr_t btEntry, void IOTrackingQueueFree(IOTrackingQueue * queue) { - lck_mtx_lock(gIOTrackingLock); - IOTrackingReset(queue); - remque(&queue->link); - lck_mtx_unlock(gIOTrackingLock); + lck_mtx_lock(gIOTrackingLock); + IOTrackingReset(queue); + remque(&queue->link); + lck_mtx_unlock(gIOTrackingLock); - lck_mtx_free(queue->lock.mutex, IOLockGroup); + lck_mtx_free(queue->lock.mutex, IOLockGroup); - kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0])); + kfree(queue, sizeof(IOTrackingQueue) + queue->numSiteQs * sizeof(queue->sites[0])); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* fasthash - The MIT License - - Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com) - - Permission is hereby granted, free of charge, to any person - obtaining a copy of this software and associated documentation - files (the "Software"), to deal in the Software without - restriction, including without limitation the rights to use, copy, - modify, merge, publish, distribute, sublicense, and/or sell copies - of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. -*/ + * The MIT License + * + * Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com) + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ // Compression function for Merkle-Damgard construction. // This function is generated using the framework provided. #define mix(h) ({ \ - (h) ^= (h) >> 23; \ - (h) *= 0x2127599bf4325c37ULL; \ - (h) ^= (h) >> 47; }) + (h) ^= (h) >> 23; \ + (h) *= 0x2127599bf4325c37ULL; \ + (h) ^= (h) >> 47; }) static uint64_t fasthash64(const void *buf, size_t len, uint64_t seed) { - const uint64_t m = 0x880355f21e6d1965ULL; - const uint64_t *pos = (const uint64_t *)buf; - const uint64_t *end = pos + (len / 8); - const unsigned char *pos2; - uint64_t h = seed ^ (len * m); - uint64_t v; - - while (pos != end) { - v = *pos++; - h ^= mix(v); - h *= m; - } - - pos2 = (const unsigned char*)pos; - v = 0; - - switch (len & 7) { - case 7: v ^= (uint64_t)pos2[6] << 48; - [[clang::fallthrough]]; - case 6: v ^= (uint64_t)pos2[5] << 40; - [[clang::fallthrough]]; - case 5: v ^= (uint64_t)pos2[4] << 32; - [[clang::fallthrough]]; - case 4: v ^= (uint64_t)pos2[3] << 24; - [[clang::fallthrough]]; - case 3: v ^= (uint64_t)pos2[2] << 16; - [[clang::fallthrough]]; - case 2: v ^= (uint64_t)pos2[1] << 8; - [[clang::fallthrough]]; - case 1: v ^= (uint64_t)pos2[0]; - h ^= mix(v); - h *= m; - } - - return mix(h); -} + const uint64_t m = 0x880355f21e6d1965ULL; + const uint64_t *pos = (const uint64_t *)buf; + const uint64_t *end = pos + (len / 8); + const unsigned char *pos2; + uint64_t h = seed ^ (len * m); + uint64_t v; + + while (pos != end) { + v = *pos++; + h ^= mix(v); + h *= m; + } + + pos2 = (const unsigned char*)pos; + v = 0; + + switch (len & 7) { + case 7: v ^= (uint64_t)pos2[6] << 48; + [[clang::fallthrough]]; + case 6: v ^= (uint64_t)pos2[5] << 40; + [[clang::fallthrough]]; + case 5: v ^= (uint64_t)pos2[4] << 32; + [[clang::fallthrough]]; + case 4: v ^= (uint64_t)pos2[3] << 24; + [[clang::fallthrough]]; + case 3: v ^= (uint64_t)pos2[2] << 16; + [[clang::fallthrough]]; + case 2: v ^= (uint64_t)pos2[1] << 8; + [[clang::fallthrough]]; + case 1: v ^= (uint64_t)pos2[0]; + h ^= mix(v); + h *= m; + } + + return mix(h); +} /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static uint32_t fasthash32(const void *buf, size_t len, uint32_t seed) { - // the following trick converts the 64-bit hashcode to Fermat - // residue, which shall retain information from both the higher - // and lower parts of hashcode. - uint64_t h = fasthash64(buf, len, seed); - return h - (h >> 32); + // the following trick converts the 64-bit hashcode to Fermat + // residue, which shall retain information from both the higher + // and lower parts of hashcode. + uint64_t h = fasthash64(buf, len, seed); + return h - (h >> 32); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -470,47 +476,51 @@ fasthash32(const void *buf, size_t len, uint32_t seed) void IOTrackingAddUser(IOTrackingQueue * queue, IOTrackingUser * mem, vm_size_t size) { - uint32_t num; - proc_t self; - - if (!queue->captureOn) return; - if (size < queue->minCaptureSize) return; - - assert(!mem->link.next); - - num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs); - num = 0; - if ((kernel_task != current_task()) && (self = proc_self())) - { - bool user_64; - mem->btPID = proc_pid(self); - (void)backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, &num, - &user_64); - mem->user32 = !user_64; - proc_rele(self); - } - assert(num <= kIOTrackingCallSiteBTs); - mem->userCount = num; - - IOTRecursiveLockLock(&queue->lock); - queue_enter/*last*/(&queue->sites[0], mem, IOTrackingUser *, link); - queue->siteCount++; - IOTRecursiveLockUnlock(&queue->lock); + uint32_t num; + proc_t self; + + if (!queue->captureOn) { + return; + } + if (size < queue->minCaptureSize) { + return; + } + + assert(!mem->link.next); + + num = backtrace(&mem->bt[0], kIOTrackingCallSiteBTs); + num = 0; + if ((kernel_task != current_task()) && (self = proc_self())) { + bool user_64; + mem->btPID = proc_pid(self); + (void)backtrace_user(&mem->btUser[0], kIOTrackingCallSiteBTs - 1, &num, + &user_64); + mem->user32 = !user_64; + proc_rele(self); + } + assert(num <= kIOTrackingCallSiteBTs); + mem->userCount = num; + + IOTRecursiveLockLock(&queue->lock); + queue_enter/*last*/ (&queue->sites[0], mem, IOTrackingUser *, link); + queue->siteCount++; + IOTRecursiveLockUnlock(&queue->lock); } void IOTrackingRemoveUser(IOTrackingQueue * queue, IOTrackingUser * mem) { - if (!mem->link.next) return; - - IOTRecursiveLockLock(&queue->lock); - if (mem->link.next) - { - remque(&mem->link); - assert(queue->siteCount); - queue->siteCount--; - } - IOTRecursiveLockUnlock(&queue->lock); + if (!mem->link.next) { + return; + } + + IOTRecursiveLockLock(&queue->lock); + if (mem->link.next) { + remque(&mem->link); + assert(queue->siteCount); + queue->siteCount--; + } + IOTRecursiveLockUnlock(&queue->lock); } uint64_t gIOTrackingAddTime; @@ -518,61 +528,74 @@ uint64_t gIOTrackingAddTime; void IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool address, vm_tag_t tag) { - IOTrackingCallSite * site; - uint32_t crc, num; - uintptr_t bt[kIOTrackingCallSiteBTs + 1]; - queue_head_t * que; - - if (mem->site) return; - if (!queue->captureOn) return; - if (size < queue->minCaptureSize) return; - - assert(!mem->link.next); - - num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1); - if (!num) return; - num--; - crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7); - - IOTRecursiveLockLock(&queue->lock); - que = &queue->sites[crc % queue->numSiteQs]; - queue_iterate(que, site, IOTrackingCallSite *, link) - { - if (tag != site->tag) continue; - if (crc == site->crc) break; - } - - if (queue_end(que, (queue_entry_t) site)) - { - site = (typeof(site)) kalloc(sizeof(IOTrackingCallSite)); - - queue_init(&site->instances); - site->addresses = (IOTracking *) &site->instances; - site->queue = queue; - site->crc = crc; - site->count = 0; - site->tag = tag; - memset(&site->size[0], 0, sizeof(site->size)); - bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0])); - assert(num <= kIOTrackingCallSiteBTs); - bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0])); - - queue_enter_first(que, site, IOTrackingCallSite *, link); - queue->siteCount++; - } - - if (address) - { - queue_enter/*last*/(&site->instances, mem, IOTracking *, link); - if (queue_end(&site->instances, (queue_entry_t)site->addresses)) site->addresses = mem; - } - else queue_enter_first(&site->instances, mem, IOTracking *, link); - - mem->site = site; - site->size[0] += size; - site->count++; - - IOTRecursiveLockUnlock(&queue->lock); + IOTrackingCallSite * site; + uint32_t crc, num; + uintptr_t bt[kIOTrackingCallSiteBTs + 1]; + queue_head_t * que; + + if (mem->site) { + return; + } + if (!queue->captureOn) { + return; + } + if (size < queue->minCaptureSize) { + return; + } + + assert(!mem->link.next); + + num = backtrace(&bt[0], kIOTrackingCallSiteBTs + 1); + if (!num) { + return; + } + num--; + crc = fasthash32(&bt[1], num * sizeof(bt[0]), 0x04C11DB7); + + IOTRecursiveLockLock(&queue->lock); + que = &queue->sites[crc % queue->numSiteQs]; + queue_iterate(que, site, IOTrackingCallSite *, link) + { + if (tag != site->tag) { + continue; + } + if (crc == site->crc) { + break; + } + } + + if (queue_end(que, (queue_entry_t) site)) { + site = (typeof(site))kalloc(sizeof(IOTrackingCallSite)); + + queue_init(&site->instances); + site->addresses = (IOTracking *) &site->instances; + site->queue = queue; + site->crc = crc; + site->count = 0; + site->tag = tag; + memset(&site->size[0], 0, sizeof(site->size)); + bcopy(&bt[1], &site->bt[0], num * sizeof(site->bt[0])); + assert(num <= kIOTrackingCallSiteBTs); + bzero(&site->bt[num], (kIOTrackingCallSiteBTs - num) * sizeof(site->bt[0])); + + queue_enter_first(que, site, IOTrackingCallSite *, link); + queue->siteCount++; + } + + if (address) { + queue_enter/*last*/ (&site->instances, mem, IOTracking *, link); + if (queue_end(&site->instances, (queue_entry_t)site->addresses)) { + site->addresses = mem; + } + } else { + queue_enter_first(&site->instances, mem, IOTracking *, link); + } + + mem->site = site; + site->size[0] += size; + site->count++; + + IOTRecursiveLockUnlock(&queue->lock); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -580,34 +603,36 @@ IOTrackingAdd(IOTrackingQueue * queue, IOTracking * mem, size_t size, bool addre void IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size) { - if (!mem->link.next) return; - - IOTRecursiveLockLock(&queue->lock); - if (mem->link.next) - { - assert(mem->site); - - if (mem == mem->site->addresses) mem->site->addresses = (IOTracking *) queue_next(&mem->link); - remque(&mem->link); - - assert(mem->site->count); - mem->site->count--; - assert(mem->site->size[0] >= size); - mem->site->size[0] -= size; - if (!mem->site->count) - { - assert(queue_empty(&mem->site->instances)); - assert(!mem->site->size[0]); - assert(!mem->site->size[1]); - - remque(&mem->site->link); - assert(queue->siteCount); - queue->siteCount--; - kfree(mem->site, sizeof(IOTrackingCallSite)); - } - mem->site = NULL; - } - IOTRecursiveLockUnlock(&queue->lock); + if (!mem->link.next) { + return; + } + + IOTRecursiveLockLock(&queue->lock); + if (mem->link.next) { + assert(mem->site); + + if (mem == mem->site->addresses) { + mem->site->addresses = (IOTracking *) queue_next(&mem->link); + } + remque(&mem->link); + + assert(mem->site->count); + mem->site->count--; + assert(mem->site->size[0] >= size); + mem->site->size[0] -= size; + if (!mem->site->count) { + assert(queue_empty(&mem->site->instances)); + assert(!mem->site->size[0]); + assert(!mem->site->size[1]); + + remque(&mem->site->link); + assert(queue->siteCount); + queue->siteCount--; + kfree(mem->site, sizeof(IOTrackingCallSite)); + } + mem->site = NULL; + } + IOTRecursiveLockUnlock(&queue->lock); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -615,19 +640,23 @@ IOTrackingRemove(IOTrackingQueue * queue, IOTracking * mem, size_t size) void IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size) { - IOTrackingAddress * tracking; - - if (!queue->captureOn) return; - if (size < queue->minCaptureSize) return; - - address = ~address; - tracking = (typeof(tracking)) kalloc(sizeof(IOTrackingAddress)); - bzero(tracking, sizeof(IOTrackingAddress)); - IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated; - tracking->address = address; - tracking->size = size; - - IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE); + IOTrackingAddress * tracking; + + if (!queue->captureOn) { + return; + } + if (size < queue->minCaptureSize) { + return; + } + + address = ~address; + tracking = (typeof(tracking))kalloc(sizeof(IOTrackingAddress)); + bzero(tracking, sizeof(IOTrackingAddress)); + IOTrackingAddressFlags(tracking) |= kTrackingAddressFlagAllocated; + tracking->address = address; + tracking->size = size; + + IOTrackingAdd(queue, &tracking->tracking, size, true, VM_KERN_MEMORY_NONE); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -635,33 +664,36 @@ IOTrackingAlloc(IOTrackingQueue * queue, uintptr_t address, size_t size) void IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size) { - IOTrackingCallSite * site; - IOTrackingAddress * tracking; - uint32_t idx; - bool done; - - address = ~address; - IOTRecursiveLockLock(&queue->lock); - done = false; - for (idx = 0; idx < queue->numSiteQs; idx++) - { - queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) - { - for (tracking = (IOTrackingAddress *) site->addresses; - !done && !queue_end(&site->instances, &tracking->tracking.link); - tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link)) - { - if ((done = (address == tracking->address))) - { - IOTrackingRemove(queue, &tracking->tracking, size); - kfree(tracking, sizeof(IOTrackingAddress)); - } - } - if (done) break; - } - if (done) break; - } - IOTRecursiveLockUnlock(&queue->lock); + IOTrackingCallSite * site; + IOTrackingAddress * tracking; + uint32_t idx; + bool done; + + address = ~address; + IOTRecursiveLockLock(&queue->lock); + done = false; + for (idx = 0; idx < queue->numSiteQs; idx++) { + queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) + { + tracking = (IOTrackingAddress *) site->addresses; + while (!queue_end(&site->instances, &tracking->tracking.link)) { + if ((done = (address == tracking->address))) { + IOTrackingRemove(queue, &tracking->tracking, size); + kfree(tracking, sizeof(IOTrackingAddress)); + break; + } else { + tracking = (IOTrackingAddress *) queue_next(&tracking->tracking.link); + } + } + if (done) { + break; + } + } + if (done) { + break; + } + } + IOTRecursiveLockUnlock(&queue->lock); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -669,14 +701,14 @@ IOTrackingFree(IOTrackingQueue * queue, uintptr_t address, size_t size) void IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size) { - IOTRecursiveLockLock(&queue->lock); - if (mem->link.next) - { - assert(mem->site); - assert((size > 0) || (mem->site->size[1] >= -size)); - mem->site->size[1] += size; - }; - IOTRecursiveLockUnlock(&queue->lock); + IOTRecursiveLockLock(&queue->lock); + if (mem->link.next) { + assert(mem->site); + assert((size > 0) || (mem->site->size[1] >= -size)); + mem->site->size[1] += size; + } + ; + IOTRecursiveLockUnlock(&queue->lock); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -684,46 +716,40 @@ IOTrackingAccumSize(IOTrackingQueue * queue, IOTracking * mem, size_t size) void IOTrackingReset(IOTrackingQueue * queue) { - IOTrackingCallSite * site; - IOTrackingUser * user; - IOTracking * tracking; - IOTrackingAddress * trackingAddress; - uint32_t idx; - bool addresses; - - IOTRecursiveLockLock(&queue->lock); - for (idx = 0; idx < queue->numSiteQs; idx++) - { - while (!queue_empty(&queue->sites[idx])) - { - if (kIOTrackingQueueTypeMap & queue->type) - { - queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link); - user->link.next = user->link.prev = NULL; - } - else - { - queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link); - addresses = false; - while (!queue_empty(&site->instances)) - { - queue_remove_first(&site->instances, tracking, IOTracking *, link); - if (tracking == site->addresses) addresses = true; - if (addresses) - { - trackingAddress = (typeof(trackingAddress)) tracking; - if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) - { - kfree(tracking, sizeof(IOTrackingAddress)); - } - } - } - kfree(site, sizeof(IOTrackingCallSite)); - } - } - } - queue->siteCount = 0; - IOTRecursiveLockUnlock(&queue->lock); + IOTrackingCallSite * site; + IOTrackingUser * user; + IOTracking * tracking; + IOTrackingAddress * trackingAddress; + uint32_t idx; + bool addresses; + + IOTRecursiveLockLock(&queue->lock); + for (idx = 0; idx < queue->numSiteQs; idx++) { + while (!queue_empty(&queue->sites[idx])) { + if (kIOTrackingQueueTypeMap & queue->type) { + queue_remove_first(&queue->sites[idx], user, IOTrackingUser *, link); + user->link.next = user->link.prev = NULL; + } else { + queue_remove_first(&queue->sites[idx], site, IOTrackingCallSite *, link); + addresses = false; + while (!queue_empty(&site->instances)) { + queue_remove_first(&site->instances, tracking, IOTracking *, link); + if (tracking == site->addresses) { + addresses = true; + } + if (addresses) { + trackingAddress = (typeof(trackingAddress))tracking; + if (kTrackingAddressFlagAllocated & IOTrackingAddressFlags(trackingAddress)) { + kfree(tracking, sizeof(IOTrackingAddress)); + } + } + } + kfree(site, sizeof(IOTrackingCallSite)); + } + } + } + queue->siteCount = 0; + IOTRecursiveLockUnlock(&queue->lock); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -731,14 +757,14 @@ IOTrackingReset(IOTrackingQueue * queue) static int IOTrackingCallSiteInfoCompare(const void * left, const void * right) { - IOTrackingCallSiteInfo * l = (typeof(l)) left; - IOTrackingCallSiteInfo * r = (typeof(r)) right; - size_t lsize, rsize; + IOTrackingCallSiteInfo * l = (typeof(l))left; + IOTrackingCallSiteInfo * r = (typeof(r))right; + size_t lsize, rsize; - rsize = r->size[0] + r->size[1]; - lsize = l->size[0] + l->size[1]; + rsize = r->size[0] + r->size[1]; + lsize = l->size[0] + l->size[1]; - return ((rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1)); + return (rsize > lsize) ? 1 : ((rsize == lsize) ? 0 : -1); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -746,34 +772,40 @@ IOTrackingCallSiteInfoCompare(const void * left, const void * right) static int IOTrackingAddressCompare(const void * left, const void * right) { - IOTracking * instance; - uintptr_t inst, laddr, raddr; - - inst = ((typeof(inst) *) left)[0]; - instance = (typeof(instance)) INSTANCE_GET(inst); - if (kInstanceFlagAddress & inst) laddr = ~((IOTrackingAddress *)instance)->address; - else laddr = (uintptr_t) (instance + 1); + IOTracking * instance; + uintptr_t inst, laddr, raddr; + + inst = ((typeof(inst) *)left)[0]; + instance = (typeof(instance))INSTANCE_GET(inst); + if (kInstanceFlagAddress & inst) { + laddr = ~((IOTrackingAddress *)instance)->address; + } else { + laddr = (uintptr_t) (instance + 1); + } - inst = ((typeof(inst) *) right)[0]; - instance = (typeof(instance)) (inst & ~kInstanceFlags); - if (kInstanceFlagAddress & inst) raddr = ~((IOTrackingAddress *)instance)->address; - else raddr = (uintptr_t) (instance + 1); + inst = ((typeof(inst) *)right)[0]; + instance = (typeof(instance))(inst & ~kInstanceFlags); + if (kInstanceFlagAddress & inst) { + raddr = ~((IOTrackingAddress *)instance)->address; + } else { + raddr = (uintptr_t) (instance + 1); + } - return ((laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1)); + return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1); } static int IOTrackingZoneElementCompare(const void * left, const void * right) { - uintptr_t inst, laddr, raddr; + uintptr_t inst, laddr, raddr; - inst = ((typeof(inst) *) left)[0]; - laddr = INSTANCE_PUT(inst); - inst = ((typeof(inst) *) right)[0]; - raddr = INSTANCE_PUT(inst); + inst = ((typeof(inst) *)left)[0]; + laddr = INSTANCE_PUT(inst); + inst = ((typeof(inst) *)right)[0]; + raddr = INSTANCE_PUT(inst); - return ((laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1)); + return (laddr > raddr) ? 1 : ((laddr == raddr) ? 0 : -1); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -781,21 +813,19 @@ IOTrackingZoneElementCompare(const void * left, const void * right) static void CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteInfo) { - uint32_t j; - mach_vm_address_t bt, btEntry; - - btEntry = site->queue->btEntry; - for (j = 0; j < kIOTrackingCallSiteBTs; j++) - { - bt = site->bt[j]; - if (btEntry - && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) - { - bt = btEntry; - btEntry = 0; - } - siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt); - } + uint32_t j; + mach_vm_address_t bt, btEntry; + + btEntry = site->queue->btEntry; + for (j = 0; j < kIOTrackingCallSiteBTs; j++) { + bt = site->bt[j]; + if (btEntry + && (!bt || (j == (kIOTrackingCallSiteBTs - 1)))) { + bt = btEntry; + btEntry = 0; + } + siteInfo->bt[0][j] = VM_KERNEL_UNSLIDE(bt); + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -803,91 +833,88 @@ CopyOutKernelBacktrace(IOTrackingCallSite * site, IOTrackingCallSiteInfo * siteI static void IOTrackingLeakScan(void * refcon) { - IOTrackingLeaksRef * ref = (typeof(ref)) refcon; - uintptr_t * instances; - IOTracking * instance; - uint64_t vaddr, vincr; - ppnum_t ppn; - uintptr_t ptr, addr, vphysaddr, inst; - size_t size, origsize; - uint32_t baseIdx, lim, ptrIdx, count; - boolean_t is; - AbsoluteTime deadline; - - instances = ref->instances; - count = ref->count; - size = origsize = ref->zoneSize; - - for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; - ; - vaddr += vincr) - { - if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) - { - if (deadline) - { - ml_set_interrupts_enabled(is); - IODelay(10); - } - if (vaddr >= VM_MAX_KERNEL_ADDRESS) break; - is = ml_set_interrupts_enabled(false); - clock_interval_to_deadline(10, kMillisecondScale, &deadline); - } - - ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr); - // check noencrypt to avoid VM structs (map entries) with pointers - if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) ppn = 0; - if (!ppn) continue; - - for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) - { - ptr = ((uintptr_t *)vphysaddr)[ptrIdx]; - - for (lim = count, baseIdx = 0; lim; lim >>= 1) - { - inst = instances[baseIdx + (lim >> 1)]; - instance = (typeof(instance)) INSTANCE_GET(inst); - - if (ref->zoneSize) - { - addr = INSTANCE_PUT(inst) & ~kInstanceFlags; - } - else if (kInstanceFlagAddress & inst) - { - addr = ~((IOTrackingAddress *)instance)->address; - origsize = size = ((IOTrackingAddress *)instance)->size; - if (!size) size = 1; - } - else - { - addr = (uintptr_t) (instance + 1); - origsize = size = instance->site->queue->allocSize; - } - if ((ptr >= addr) && (ptr < (addr + size)) - - && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr) - || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) - { - if (!(kInstanceFlagReferenced & inst)) - { - inst |= kInstanceFlagReferenced; - instances[baseIdx + (lim >> 1)] = inst; - ref->found++; - if (!origsize) ref->foundzlen++; - } - break; - } - if (ptr > addr) - { - // move right - baseIdx += (lim >> 1) + 1; - lim--; - } - // else move left - } - } - ref->bytes += page_size; - } + IOTrackingLeaksRef * ref = (typeof(ref))refcon; + uintptr_t * instances; + IOTracking * instance; + uint64_t vaddr, vincr; + ppnum_t ppn; + uintptr_t ptr, addr, vphysaddr, inst; + size_t size, origsize; + uint32_t baseIdx, lim, ptrIdx, count; + boolean_t is; + AbsoluteTime deadline; + + instances = ref->instances; + count = ref->count; + size = origsize = ref->zoneSize; + + for (deadline = 0, vaddr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; + ; + vaddr += vincr) { + if ((mach_absolute_time() > deadline) || (vaddr >= VM_MAX_KERNEL_ADDRESS)) { + if (deadline) { + ml_set_interrupts_enabled(is); + IODelay(10); + } + if (vaddr >= VM_MAX_KERNEL_ADDRESS) { + break; + } + is = ml_set_interrupts_enabled(false); + clock_interval_to_deadline(10, kMillisecondScale, &deadline); + } + + ppn = kernel_pmap_present_mapping(vaddr, &vincr, &vphysaddr); + // check noencrypt to avoid VM structs (map entries) with pointers + if (ppn && (!pmap_valid_page(ppn) || (!ref->zoneSize && pmap_is_noencrypt(ppn)))) { + ppn = 0; + } + if (!ppn) { + continue; + } + + for (ptrIdx = 0; ptrIdx < (page_size / sizeof(uintptr_t)); ptrIdx++) { + ptr = ((uintptr_t *)vphysaddr)[ptrIdx]; + + for (lim = count, baseIdx = 0; lim; lim >>= 1) { + inst = instances[baseIdx + (lim >> 1)]; + instance = (typeof(instance))INSTANCE_GET(inst); + + if (ref->zoneSize) { + addr = INSTANCE_PUT(inst) & ~kInstanceFlags; + } else if (kInstanceFlagAddress & inst) { + addr = ~((IOTrackingAddress *)instance)->address; + origsize = size = ((IOTrackingAddress *)instance)->size; + if (!size) { + size = 1; + } + } else { + addr = (uintptr_t) (instance + 1); + origsize = size = instance->site->queue->allocSize; + } + if ((ptr >= addr) && (ptr < (addr + size)) + + && (((vaddr + ptrIdx * sizeof(uintptr_t)) < addr) + || ((vaddr + ptrIdx * sizeof(uintptr_t)) >= (addr + size)))) { + if (!(kInstanceFlagReferenced & inst)) { + inst |= kInstanceFlagReferenced; + instances[baseIdx + (lim >> 1)] = inst; + ref->found++; + if (!origsize) { + ref->foundzlen++; + } + } + break; + } + if (ptr > addr) { + // move right + baseIdx += (lim >> 1) + 1; + lim--; + } + // else move left + } + } + ref->bytes += page_size; + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -895,118 +922,119 @@ IOTrackingLeakScan(void * refcon) extern "C" void zone_leaks_scan(uintptr_t * instances, uint32_t count, uint32_t zoneSize, uint32_t * found) { - IOTrackingLeaksRef ref; - IOTrackingCallSiteInfo siteInfo; - uint32_t idx; - - qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare); - - bzero(&siteInfo, sizeof(siteInfo)); - bzero(&ref, sizeof(ref)); - ref.instances = instances; - ref.count = count; - ref.zoneSize = zoneSize; - - for (idx = 0; idx < 2; idx++) - { - ref.bytes = 0; - IOTrackingLeakScan(&ref); - IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found); - if (count <= ref.found) break; - } - - *found = ref.found; + IOTrackingLeaksRef ref; + IOTrackingCallSiteInfo siteInfo; + uint32_t idx; + + qsort(instances, count, sizeof(*instances), &IOTrackingZoneElementCompare); + + bzero(&siteInfo, sizeof(siteInfo)); + bzero(&ref, sizeof(ref)); + ref.instances = instances; + ref.count = count; + ref.zoneSize = zoneSize; + + for (idx = 0; idx < 2; idx++) { + ref.bytes = 0; + IOTrackingLeakScan(&ref); + IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d\n", idx, ref.bytes / 1024 / 1024, count, ref.found); + if (count <= ref.found) { + break; + } + } + + *found = ref.found; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static void ZoneSiteProc(void * refCon, uint32_t siteCount, uint32_t zoneSize, - uintptr_t * backtrace, uint32_t btCount) + uintptr_t * backtrace, uint32_t btCount) { - IOTrackingCallSiteInfo siteInfo; - OSData * leakData; - uint32_t idx; + IOTrackingCallSiteInfo siteInfo; + OSData * leakData; + uint32_t idx; - leakData = (typeof(leakData)) refCon; + leakData = (typeof(leakData))refCon; - bzero(&siteInfo, sizeof(siteInfo)); - siteInfo.count = siteCount; - siteInfo.size[0] = zoneSize * siteCount; + bzero(&siteInfo, sizeof(siteInfo)); + siteInfo.count = siteCount; + siteInfo.size[0] = zoneSize * siteCount; - for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) - { - siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]); - } + for (idx = 0; (idx < btCount) && (idx < kIOTrackingCallSiteBTs); idx++) { + siteInfo.bt[0][idx] = VM_KERNEL_UNSLIDE(backtrace[idx]); + } - leakData->appendBytes(&siteInfo, sizeof(siteInfo)); + leakData->appendBytes(&siteInfo, sizeof(siteInfo)); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static OSData * -IOTrackingLeaks(OSData * data) +IOTrackingLeaks(LIBKERN_CONSUMED OSData * data) { - IOTrackingLeaksRef ref; - IOTrackingCallSiteInfo siteInfo; - IOTrackingCallSite * site; - OSData * leakData; - uintptr_t * instances; - IOTracking * instance; - uintptr_t inst; - uint32_t count, idx, numSites, dups, siteCount; - - instances = (typeof(instances)) data->getBytesNoCopy(); - count = (data->getLength() / sizeof(*instances)); - qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare); - - bzero(&siteInfo, sizeof(siteInfo)); - bzero(&ref, sizeof(ref)); - ref.instances = instances; - ref.count = count; - for (idx = 0; idx < 2; idx++) - { - ref.bytes = 0; - IOTrackingLeakScan(&ref); - IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen); - if (count <= ref.found) break; - } - - leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); - - for (numSites = 0, idx = 0; idx < count; idx++) - { - inst = instances[idx]; - if (kInstanceFlagReferenced & inst) continue; - instance = (typeof(instance)) INSTANCE_GET(inst); - site = instance->site; - instances[numSites] = (uintptr_t) site; - numSites++; - } - - for (idx = 0; idx < numSites; idx++) - { - inst = instances[idx]; - if (!inst) continue; - site = (typeof(site)) inst; - for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) - { - if (instances[dups] == (uintptr_t) site) - { - siteCount++; - instances[dups] = 0; - } + IOTrackingLeaksRef ref; + IOTrackingCallSiteInfo siteInfo; + IOTrackingCallSite * site; + OSData * leakData; + uintptr_t * instances; + IOTracking * instance; + uintptr_t inst; + uint32_t count, idx, numSites, dups, siteCount; + + instances = (typeof(instances))data->getBytesNoCopy(); + count = (data->getLength() / sizeof(*instances)); + qsort(instances, count, sizeof(*instances), &IOTrackingAddressCompare); + + bzero(&siteInfo, sizeof(siteInfo)); + bzero(&ref, sizeof(ref)); + ref.instances = instances; + ref.count = count; + for (idx = 0; idx < 2; idx++) { + ref.bytes = 0; + IOTrackingLeakScan(&ref); + IOLog("leaks(%d) scanned %ld MB, instance count %d, found %d (zlen %d)\n", idx, ref.bytes / 1024 / 1024, count, ref.found, ref.foundzlen); + if (count <= ref.found) { + break; + } + } + + leakData = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); + + for (numSites = 0, idx = 0; idx < count; idx++) { + inst = instances[idx]; + if (kInstanceFlagReferenced & inst) { + continue; + } + instance = (typeof(instance))INSTANCE_GET(inst); + site = instance->site; + instances[numSites] = (uintptr_t) site; + numSites++; } - siteInfo.count = siteCount; - siteInfo.size[0] = (site->size[0] * site->count) / siteCount; - siteInfo.size[1] = (site->size[1] * site->count) / siteCount;; - CopyOutKernelBacktrace(site, &siteInfo); - leakData->appendBytes(&siteInfo, sizeof(siteInfo)); - } - data->release(); - - return (leakData); + + for (idx = 0; idx < numSites; idx++) { + inst = instances[idx]; + if (!inst) { + continue; + } + site = (typeof(site))inst; + for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) { + if (instances[dups] == (uintptr_t) site) { + siteCount++; + instances[dups] = 0; + } + } + siteInfo.count = siteCount; + siteInfo.size[0] = (site->size[0] * site->count) / siteCount; + siteInfo.size[1] = (site->size[1] * site->count) / siteCount;; + CopyOutKernelBacktrace(site, &siteInfo); + leakData->appendBytes(&siteInfo, sizeof(siteInfo)); + } + data->release(); + + return leakData; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1014,29 +1042,31 @@ IOTrackingLeaks(OSData * data) static bool SkipName(uint32_t options, const char * name, size_t namesLen, const char * names) { - const char * scan; - const char * next; - bool exclude, found; - size_t qLen, sLen; - - if (!namesLen || !names) return (false); - // ...<0> - exclude = (0 != (kIOTrackingExcludeNames & options)); - qLen = strlen(name); - scan = names; - found = false; - do - { - sLen = scan[0]; - scan++; - next = scan + sLen; - if (next >= (names + namesLen)) break; - found = ((sLen == qLen) && !strncmp(scan, name, sLen)); - scan = next; - } - while (!found && (scan < (names + namesLen))); - - return (!(exclude ^ found)); + const char * scan; + const char * next; + bool exclude, found; + size_t qLen, sLen; + + if (!namesLen || !names) { + return false; + } + // ...<0> + exclude = (0 != (kIOTrackingExcludeNames & options)); + qLen = strlen(name); + scan = names; + found = false; + do{ + sLen = scan[0]; + scan++; + next = scan + sLen; + if (next >= (names + namesLen)) { + break; + } + found = ((sLen == qLen) && !strncmp(scan, name, sLen)); + scan = next; + }while (!found && (scan < (names + namesLen))); + + return !(exclude ^ found); } #endif /* IOTRACKING */ @@ -1045,286 +1075,335 @@ SkipName(uint32_t options, const char * name, size_t namesLen, const char * name static kern_return_t IOTrackingDebug(uint32_t selector, uint32_t options, uint64_t value, - uint32_t intag, uint32_t inzsize, - const char * names, size_t namesLen, - size_t size, OSObject ** result) + uint32_t intag, uint32_t inzsize, + const char * names, size_t namesLen, + size_t size, OSObject ** result) { - kern_return_t ret; - OSData * data; + kern_return_t ret; + OSData * data; - if (result) *result = 0; - data = 0; - ret = kIOReturnNotReady; + if (result) { + *result = 0; + } + data = 0; + ret = kIOReturnNotReady; #if IOTRACKING - kern_return_t kr; - IOTrackingQueue * queue; - IOTracking * instance; - IOTrackingCallSite * site; - IOTrackingCallSiteInfo siteInfo; - IOTrackingUser * user; - task_t mapTask; - mach_vm_address_t mapAddress; - mach_vm_size_t mapSize; - uint32_t num, idx, qIdx; - uintptr_t instFlags; - proc_t proc; - bool addresses; - - ret = kIOReturnNotFound; - proc = NULL; - if (kIOTrackingGetMappings == selector) - { - if (value != -1ULL) - { - proc = proc_find(value); - if (!proc) return (kIOReturnNotFound); + kern_return_t kr; + IOTrackingQueue * queue; + IOTracking * instance; + IOTrackingCallSite * site; + IOTrackingCallSiteInfo siteInfo; + IOTrackingUser * user; + task_t mapTask; + mach_vm_address_t mapAddress; + mach_vm_size_t mapSize; + uint32_t num, idx, qIdx; + uintptr_t instFlags; + proc_t proc; + bool addresses; + + ret = kIOReturnNotFound; + proc = NULL; + if (kIOTrackingGetMappings == selector) { + if (value != -1ULL) { + proc = proc_find(value); + if (!proc) { + return kIOReturnNotFound; + } + } } - } - - bzero(&siteInfo, sizeof(siteInfo)); - lck_mtx_lock(gIOTrackingLock); - queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) - { - if (SkipName(options, queue->name, namesLen, names)) continue; - - if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) continue; - - switch (selector) - { - case kIOTrackingResetTracking: - { - IOTrackingReset(queue); - ret = kIOReturnSuccess; - break; - } - - case kIOTrackingStartCapture: - case kIOTrackingStopCapture: - { - queue->captureOn = (kIOTrackingStartCapture == selector); - ret = kIOReturnSuccess; - break; - } - - case kIOTrackingSetMinCaptureSize: - { - queue->minCaptureSize = size; - ret = kIOReturnSuccess; - break; - } - - case kIOTrackingLeaks: - { - if (!(kIOTrackingQueueTypeAlloc & queue->type)) break; - - if (!data) data = OSData::withCapacity(1024 * sizeof(uintptr_t)); - - IOTRecursiveLockLock(&queue->lock); - for (idx = 0; idx < queue->numSiteQs; idx++) - { - queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) - { - addresses = false; - queue_iterate(&site->instances, instance, IOTracking *, link) - { - if (instance == site->addresses) addresses = true; - instFlags = (typeof(instFlags)) instance; - if (addresses) instFlags |= kInstanceFlagAddress; - data->appendBytes(&instFlags, sizeof(instFlags)); - } - } - } - // queue is locked - ret = kIOReturnSuccess; - break; - } - - - case kIOTrackingGetTracking: - { - if (kIOTrackingQueueTypeMap & queue->type) break; - - if (!data) data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); - - IOTRecursiveLockLock(&queue->lock); - num = queue->siteCount; - idx = 0; - for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) - { - queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link) - { - assert(idx < num); - idx++; - - size_t tsize[2]; - uint32_t count = site->count; - tsize[0] = site->size[0]; - tsize[1] = site->size[1]; - - if (intag || inzsize) - { - uintptr_t addr; - vm_size_t size, zoneSize; - vm_tag_t tag; - - if (kIOTrackingQueueTypeAlloc & queue->type) - { - addresses = false; - count = 0; - tsize[0] = tsize[1] = 0; - queue_iterate(&site->instances, instance, IOTracking *, link) - { - if (instance == site->addresses) addresses = true; - - if (addresses) addr = ~((IOTrackingAddress *)instance)->address; - else addr = (uintptr_t) (instance + 1); - - kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize); - if (KERN_SUCCESS != kr) continue; - - if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) continue; - if (inzsize && (inzsize != zoneSize)) continue; - - count++; - tsize[0] += size; - } - } - else - { - if (!intag || inzsize || (intag != site->tag)) continue; - } - } - - if (!count) continue; - if (size && ((tsize[0] + tsize[1]) < size)) continue; - - siteInfo.count = count; - siteInfo.size[0] = tsize[0]; - siteInfo.size[1] = tsize[1]; - - CopyOutKernelBacktrace(site, &siteInfo); - data->appendBytes(&siteInfo, sizeof(siteInfo)); - } - } - assert(idx == num); - IOTRecursiveLockUnlock(&queue->lock); - ret = kIOReturnSuccess; - break; - } - - case kIOTrackingGetMappings: - { - if (!(kIOTrackingQueueTypeMap & queue->type)) break; - if (!data) data = OSData::withCapacity(page_size); - - IOTRecursiveLockLock(&queue->lock); - num = queue->siteCount; - idx = 0; - for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) - { - queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link) - { - assert(idx < num); - idx++; - - kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize); - if (kIOReturnSuccess != kr) continue; - if (proc && (mapTask != proc_task(proc))) continue; - if (size && (mapSize < size)) continue; - - siteInfo.count = 1; - siteInfo.size[0] = mapSize; - siteInfo.address = mapAddress; - siteInfo.addressPID = task_pid(mapTask); - siteInfo.btPID = user->btPID; - - for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) - { - siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]); - } - uint32_t * bt32 = (typeof(bt32)) &user->btUser[0]; - uint64_t * bt64 = (typeof(bt64)) ((void *) &user->btUser[0]); - for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) - { - if (j >= user->userCount) siteInfo.bt[1][j] = 0; - else if (user->user32) siteInfo.bt[1][j] = bt32[j]; - else siteInfo.bt[1][j] = bt64[j]; - } - data->appendBytes(&siteInfo, sizeof(siteInfo)); - } - } - assert(idx == num); - IOTRecursiveLockUnlock(&queue->lock); - ret = kIOReturnSuccess; - break; - } - - default: - ret = kIOReturnUnsupported; - break; - } - } - - if ((kIOTrackingLeaks == selector) && data) - { - data = IOTrackingLeaks(data); - queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) - { - if (SkipName(options, queue->name, namesLen, names)) continue; - if (!(kIOTrackingQueueTypeAlloc & queue->type)) continue; - IOTRecursiveLockUnlock(&queue->lock); - } - } - - lck_mtx_unlock(gIOTrackingLock); - - if ((kIOTrackingLeaks == selector) && namesLen && names) - { - const char * scan; - const char * next; - size_t sLen; - - if (!data) data = OSData::withCapacity(4096 * sizeof(uintptr_t)); - - // ...<0> - scan = names; - do - { - sLen = scan[0]; - scan++; - next = scan + sLen; - if (next >= (names + namesLen)) break; - kr = zone_leaks(scan, sLen, &ZoneSiteProc, data); - if (KERN_SUCCESS == kr) ret = kIOReturnSuccess; - else if (KERN_INVALID_NAME != kr) ret = kIOReturnVMError; - scan = next; - } - while (scan < (names + namesLen)); - } - - if (data) switch (selector) - { - case kIOTrackingLeaks: - case kIOTrackingGetTracking: - case kIOTrackingGetMappings: + + bzero(&siteInfo, sizeof(siteInfo)); + lck_mtx_lock(gIOTrackingLock); + queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) { - IOTrackingCallSiteInfo * siteInfos; - siteInfos = (typeof(siteInfos)) data->getBytesNoCopy(); - num = (data->getLength() / sizeof(*siteInfos)); - qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare); - break; + if (SkipName(options, queue->name, namesLen, names)) { + continue; + } + + if (!(kIOTracking & gIOKitDebug) && (kIOTrackingQueueTypeAlloc & queue->type)) { + continue; + } + + switch (selector) { + case kIOTrackingResetTracking: + { + IOTrackingReset(queue); + ret = kIOReturnSuccess; + break; + } + + case kIOTrackingStartCapture: + case kIOTrackingStopCapture: + { + queue->captureOn = (kIOTrackingStartCapture == selector); + ret = kIOReturnSuccess; + break; + } + + case kIOTrackingSetMinCaptureSize: + { + queue->minCaptureSize = size; + ret = kIOReturnSuccess; + break; + } + + case kIOTrackingLeaks: + { + if (!(kIOTrackingQueueTypeAlloc & queue->type)) { + break; + } + + if (!data) { + data = OSData::withCapacity(1024 * sizeof(uintptr_t)); + } + + IOTRecursiveLockLock(&queue->lock); + for (idx = 0; idx < queue->numSiteQs; idx++) { + queue_iterate(&queue->sites[idx], site, IOTrackingCallSite *, link) + { + addresses = false; + queue_iterate(&site->instances, instance, IOTracking *, link) + { + if (instance == site->addresses) { + addresses = true; + } + instFlags = (typeof(instFlags))instance; + if (addresses) { + instFlags |= kInstanceFlagAddress; + } + data->appendBytes(&instFlags, sizeof(instFlags)); + } + } + } + // queue is locked + ret = kIOReturnSuccess; + break; + } + + + case kIOTrackingGetTracking: + { + if (kIOTrackingQueueTypeMap & queue->type) { + break; + } + + if (!data) { + data = OSData::withCapacity(128 * sizeof(IOTrackingCallSiteInfo)); + } + + IOTRecursiveLockLock(&queue->lock); + num = queue->siteCount; + idx = 0; + for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) { + queue_iterate(&queue->sites[qIdx], site, IOTrackingCallSite *, link) + { + assert(idx < num); + idx++; + + size_t tsize[2]; + uint32_t count = site->count; + tsize[0] = site->size[0]; + tsize[1] = site->size[1]; + + if (intag || inzsize) { + uintptr_t addr; + vm_size_t size, zoneSize; + vm_tag_t tag; + + if (kIOTrackingQueueTypeAlloc & queue->type) { + addresses = false; + count = 0; + tsize[0] = tsize[1] = 0; + queue_iterate(&site->instances, instance, IOTracking *, link) + { + if (instance == site->addresses) { + addresses = true; + } + + if (addresses) { + addr = ~((IOTrackingAddress *)instance)->address; + } else { + addr = (uintptr_t) (instance + 1); + } + + kr = vm_kern_allocation_info(addr, &size, &tag, &zoneSize); + if (KERN_SUCCESS != kr) { + continue; + } + + if ((VM_KERN_MEMORY_NONE != intag) && (intag != tag)) { + continue; + } + if (inzsize && (inzsize != zoneSize)) { + continue; + } + + count++; + tsize[0] += size; + } + } else { + if (!intag || inzsize || (intag != site->tag)) { + continue; + } + } + } + + if (!count) { + continue; + } + if (size && ((tsize[0] + tsize[1]) < size)) { + continue; + } + + siteInfo.count = count; + siteInfo.size[0] = tsize[0]; + siteInfo.size[1] = tsize[1]; + + CopyOutKernelBacktrace(site, &siteInfo); + data->appendBytes(&siteInfo, sizeof(siteInfo)); + } + } + assert(idx == num); + IOTRecursiveLockUnlock(&queue->lock); + ret = kIOReturnSuccess; + break; + } + + case kIOTrackingGetMappings: + { + if (!(kIOTrackingQueueTypeMap & queue->type)) { + break; + } + if (!data) { + data = OSData::withCapacity(page_size); + } + + IOTRecursiveLockLock(&queue->lock); + num = queue->siteCount; + idx = 0; + for (qIdx = 0; qIdx < queue->numSiteQs; qIdx++) { + queue_iterate(&queue->sites[qIdx], user, IOTrackingUser *, link) + { + assert(idx < num); + idx++; + + kr = IOMemoryMapTracking(user, &mapTask, &mapAddress, &mapSize); + if (kIOReturnSuccess != kr) { + continue; + } + if (proc && (mapTask != proc_task(proc))) { + continue; + } + if (size && (mapSize < size)) { + continue; + } + + siteInfo.count = 1; + siteInfo.size[0] = mapSize; + siteInfo.address = mapAddress; + siteInfo.addressPID = task_pid(mapTask); + siteInfo.btPID = user->btPID; + + for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) { + siteInfo.bt[0][j] = VM_KERNEL_UNSLIDE(user->bt[j]); + } + uint32_t * bt32 = (typeof(bt32)) & user->btUser[0]; + uint64_t * bt64 = (typeof(bt64))((void *) &user->btUser[0]); + for (uint32_t j = 0; j < kIOTrackingCallSiteBTs; j++) { + if (j >= user->userCount) { + siteInfo.bt[1][j] = 0; + } else if (user->user32) { + siteInfo.bt[1][j] = bt32[j]; + } else { + siteInfo.bt[1][j] = bt64[j]; + } + } + data->appendBytes(&siteInfo, sizeof(siteInfo)); + } + } + assert(idx == num); + IOTRecursiveLockUnlock(&queue->lock); + ret = kIOReturnSuccess; + break; + } + + default: + ret = kIOReturnUnsupported; + break; + } + } + + if ((kIOTrackingLeaks == selector) && data) { + data = IOTrackingLeaks(data); + queue_iterate(&gIOTrackingQ, queue, IOTrackingQueue *, link) + { + if (SkipName(options, queue->name, namesLen, names)) { + continue; + } + if (!(kIOTrackingQueueTypeAlloc & queue->type)) { + continue; + } + IOTRecursiveLockUnlock(&queue->lock); + } + } + + lck_mtx_unlock(gIOTrackingLock); + + if ((kIOTrackingLeaks == selector) && namesLen && names) { + const char * scan; + const char * next; + size_t sLen; + + if (!data) { + data = OSData::withCapacity(4096 * sizeof(uintptr_t)); + } + + // ...<0> + scan = names; + do{ + sLen = scan[0]; + scan++; + next = scan + sLen; + if (next >= (names + namesLen)) { + break; + } + kr = zone_leaks(scan, sLen, &ZoneSiteProc, data); + if (KERN_SUCCESS == kr) { + ret = kIOReturnSuccess; + } else if (KERN_INVALID_NAME != kr) { + ret = kIOReturnVMError; + } + scan = next; + }while (scan < (names + namesLen)); + } + + if (data) { + switch (selector) { + case kIOTrackingLeaks: + case kIOTrackingGetTracking: + case kIOTrackingGetMappings: + { + IOTrackingCallSiteInfo * siteInfos; + siteInfos = (typeof(siteInfos))data->getBytesNoCopy(); + num = (data->getLength() / sizeof(*siteInfos)); + qsort(siteInfos, num, sizeof(*siteInfos), &IOTrackingCallSiteInfoCompare); + break; + } + default: assert(false); break; + } } - default: assert(false); break; - } - *result = data; - if (proc) proc_rele(proc); + *result = data; + if (proc) { + proc_rele(proc); + } #endif /* IOTRACKING */ - return (ret); + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1342,53 +1421,64 @@ OSDefineMetaClassAndStructors(IOKitDiagnosticsClient, IOUserClient) IOUserClient * IOKitDiagnosticsClient::withTask(task_t owningTask) { - IOKitDiagnosticsClient * inst; + IOKitDiagnosticsClient * inst; - inst = new IOKitDiagnosticsClient; - if (inst && !inst->init()) - { - inst->release(); - inst = 0; - } + inst = new IOKitDiagnosticsClient; + if (inst && !inst->init()) { + inst->release(); + inst = 0; + } - return (inst); + return inst; } -IOReturn IOKitDiagnosticsClient::clientClose(void) +IOReturn +IOKitDiagnosticsClient::clientClose(void) { - terminate(); - return (kIOReturnSuccess); + terminate(); + return kIOReturnSuccess; } -IOReturn IOKitDiagnosticsClient::setProperties(OSObject * properties) +IOReturn +IOKitDiagnosticsClient::setProperties(OSObject * properties) { - IOReturn kr = kIOReturnUnsupported; - return (kr); + IOReturn kr = kIOReturnUnsupported; + return kr; } -IOReturn IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args, - IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) +IOReturn +IOKitDiagnosticsClient::externalMethod(uint32_t selector, IOExternalMethodArguments * args, + IOExternalMethodDispatch * dispatch, OSObject * target, void * reference) { - IOReturn ret = kIOReturnBadArgument; - const IOKitDiagnosticsParameters * params; - const char * names; - size_t namesLen; - OSObject * result; - - if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) return (kIOReturnBadArgument); - params = (typeof(params)) args->structureInput; - if (!params) return (kIOReturnBadArgument); + IOReturn ret = kIOReturnBadArgument; + const IOKitDiagnosticsParameters * params; + const char * names; + size_t namesLen; + OSObject * result; + + if (args->structureInputSize < sizeof(IOKitDiagnosticsParameters)) { + return kIOReturnBadArgument; + } + params = (typeof(params))args->structureInput; + if (!params) { + return kIOReturnBadArgument; + } - names = 0; - namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters); - if (namesLen) names = (typeof(names))(params + 1); + names = 0; + namesLen = args->structureInputSize - sizeof(IOKitDiagnosticsParameters); + if (namesLen) { + names = (typeof(names))(params + 1); + } - ret = IOTrackingDebug(selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result); + ret = IOTrackingDebug(selector, params->options, params->value, params->tag, params->zsize, names, namesLen, params->size, &result); - if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) *args->structureVariableOutputData = result; - else if (result) result->release(); + if ((kIOReturnSuccess == ret) && args->structureVariableOutputData) { + *args->structureVariableOutputData = result; + } else if (result) { + result->release(); + } - return (ret); + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ diff --git a/iokit/Kernel/IOKitKernelInternal.h b/iokit/Kernel/IOKitKernelInternal.h index 4b0cf6ffc..436b19793 100644 --- a/iokit/Kernel/IOKitKernelInternal.h +++ b/iokit/Kernel/IOKitKernelInternal.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,19 +44,19 @@ __BEGIN_DECLS #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) -#define IOServiceTrace(csc, a, b, c, d) do { \ - if(kIOTraceIOService & gIOKitTrace) { \ - KERNEL_DEBUG_CONSTANT(IODBG_IOSERVICE(csc), a, b, c, d, 0); \ - } \ +#define IOServiceTrace(csc, a, b, c, d) do { \ + if(kIOTraceIOService & gIOKitTrace) { \ + KERNEL_DEBUG_CONSTANT(IODBG_IOSERVICE(csc), a, b, c, d, 0); \ + } \ } while(0) #else /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ -#define IOServiceTrace(csc, a, b, c, d) do { \ - (void)a; \ - (void)b; \ - (void)c; \ - (void)d; \ +#define IOServiceTrace(csc, a, b, c, d) do { \ + (void)a; \ + (void)b; \ + (void)c; \ + (void)d; \ } while (0) #endif /* (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) */ @@ -67,33 +67,32 @@ typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref) void IOLibInit(void); kern_return_t IOIteratePageableMaps(vm_size_t size, - IOIteratePageableMapsCallback callback, void * ref); + IOIteratePageableMapsCallback callback, void * ref); vm_map_t IOPageableMapForAddress(uintptr_t address); -struct IOMemoryDescriptorMapAllocRef -{ - vm_map_t map; - mach_vm_address_t mapped; - mach_vm_size_t size; - vm_prot_t prot; - vm_tag_t tag; - IOOptionBits options; +struct IOMemoryDescriptorMapAllocRef { + vm_map_t map; + mach_vm_address_t mapped; + mach_vm_size_t size; + vm_prot_t prot; + vm_tag_t tag; + IOOptionBits options; }; -kern_return_t +kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * ref); mach_vm_address_t -IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, - mach_vm_size_t alignment, bool contiguous); +IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, + mach_vm_size_t alignment, bool contiguous); void IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size); #if IOTRACKING IOReturn IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task, - mach_vm_address_t * address, mach_vm_size_t * size); + mach_vm_address_t * address, mach_vm_size_t * size); #endif /* IOTRACKING */ extern vm_size_t debug_iomallocpageable_size; @@ -108,90 +107,86 @@ extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t size); __END_DECLS -#define __IODEQUALIFY(type, expr) \ - ({ typeof(expr) expr_ = (type)(uintptr_t)(expr); \ +#define __IODEQUALIFY(type, expr) \ + ({ typeof(expr) expr_ = (type)(uintptr_t)(expr); \ (type)(uintptr_t)(expr_); }) -struct IODMACommandInternal -{ - IOMDDMAWalkSegmentState fState; - IOMDDMACharacteristics fMDSummary; +struct IODMACommandInternal { + IOMDDMAWalkSegmentState fState; + IOMDDMACharacteristics fMDSummary; + + UInt64 fPreparedOffset; + UInt64 fPreparedLength; - UInt64 fPreparedOffset; - UInt64 fPreparedLength; + UInt32 fSourceAlignMask; - UInt32 fSourceAlignMask; - - UInt8 fCursor; - UInt8 fCheckAddressing; - UInt8 fIterateOnly; - UInt8 fMisaligned; - UInt8 fMapContig; - UInt8 fPrepared; - UInt8 fDoubleBuffer; - UInt8 fNewMD; - UInt8 fLocalMapperAllocValid; - UInt8 fIOVMAddrValid; - UInt8 fForceDoubleBuffer; - UInt8 fSetActiveNoMapper; + UInt8 fCursor; + UInt8 fCheckAddressing; + UInt8 fIterateOnly; + UInt8 fMisaligned; + UInt8 fMapContig; + UInt8 fPrepared; + UInt8 fDoubleBuffer; + UInt8 fNewMD; + UInt8 fLocalMapperAllocValid; + UInt8 fIOVMAddrValid; + UInt8 fForceDoubleBuffer; + UInt8 fSetActiveNoMapper; - vm_page_t fCopyPageAlloc; - vm_page_t fCopyNext; - vm_page_t fNextRemapPage; + vm_page_t fCopyPageAlloc; + vm_page_t fCopyNext; + vm_page_t fNextRemapPage; - ppnum_t fCopyPageCount; + ppnum_t fCopyPageCount; - uint64_t fLocalMapperAlloc; - uint64_t fLocalMapperAllocLength; + uint64_t fLocalMapperAlloc; + uint64_t fLocalMapperAllocLength; - class IOBufferMemoryDescriptor * fCopyMD; + class IOBufferMemoryDescriptor * fCopyMD; - IOService * fDevice; + IOService * fDevice; - // IODMAEventSource use - IOReturn fStatus; - UInt64 fActualByteCount; - AbsoluteTime fTimeStamp; + // IODMAEventSource use + IOReturn fStatus; + UInt64 fActualByteCount; + AbsoluteTime fTimeStamp; }; struct IOMemoryDescriptorDevicePager { - void * devicePager; - unsigned int pagerContig:1; - unsigned int unused:31; - IOMemoryDescriptor * memory; + void * devicePager; + unsigned int pagerContig:1; + unsigned int unused:31; + IOMemoryDescriptor * memory; }; struct IOMemoryDescriptorReserved { - IOMemoryDescriptorDevicePager dp; - uint64_t preparationID; - // for kernel IOMD subclasses... they have no expansion - uint64_t kernReserved[4]; - vm_tag_t kernelTag; - vm_tag_t userTag; + IOMemoryDescriptorDevicePager dp; + uint64_t preparationID; + // for kernel IOMD subclasses... they have no expansion + uint64_t kernReserved[4]; + vm_tag_t kernelTag; + vm_tag_t userTag; }; -struct iopa_t -{ - IOLock * lock; - queue_head_t list; - vm_size_t pagecount; - vm_size_t bytecount; +struct iopa_t { + IOLock * lock; + queue_head_t list; + vm_size_t pagecount; + vm_size_t bytecount; }; -struct iopa_page_t -{ - queue_chain_t link; - uint64_t avail; - uint32_t signature; +struct iopa_page_t { + queue_chain_t link; + uint64_t avail; + uint32_t signature; }; typedef struct iopa_page_t iopa_page_t; typedef uintptr_t (*iopa_proc_t)(iopa_t * a); -enum -{ - kIOPageAllocSignature = 'iopa' +enum{ + kIOPageAllocSignature = 'iopa' }; extern "C" void iopa_init(iopa_t * a); @@ -215,7 +210,7 @@ extern "C" void IOKitInitializeTime( void ); extern "C" OSString * IOCopyLogNameForPID(int pid); extern "C" void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, - void (*output)(const char *format, ...)); + void (*output)(const char *format, ...)); #if defined(__i386__) || defined(__x86_64__) #ifndef __cplusplus @@ -223,8 +218,8 @@ extern "C" void IOKitKernelLogBuffer(const char * title, const void * buffer, si #endif extern const OSSymbol * gIOCreateEFIDevicePathSymbol; -extern "C" void IOSetKeyStoreData(IOMemoryDescriptor * data); -extern "C" void IOSetAPFSKeyStoreData(IOMemoryDescriptor* data); +extern "C" void IOSetKeyStoreData(LIBKERN_CONSUMED IOMemoryDescriptor * data); +extern "C" void IOSetAPFSKeyStoreData(LIBKERN_CONSUMED IOMemoryDescriptor* data); #endif extern const OSSymbol * gAKSGetKey; diff --git a/iokit/Kernel/IOLib.cpp b/iokit/Kernel/IOLib.cpp index 385ce056f..0dedff70f 100644 --- a/iokit/Kernel/IOLib.cpp +++ b/iokit/Kernel/IOLib.cpp @@ -1,8 +1,8 @@ -/* +/* * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,11 +42,11 @@ #include #include -#include -#include +#include +#include #include #include -#include +#include #include "IOKitKernelInternal.h" @@ -76,25 +76,23 @@ do { \ #endif /* IOKITSTATS */ -#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug)) +#define TRACK_ALLOC (IOTRACKING && (kIOTracking & gIOKitDebug)) extern "C" { - - mach_timespec_t IOZeroTvalspec = { 0, 0 }; extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); extern int __doprnt( - const char *fmt, - va_list argp, - void (*putc)(int, void *), + const char *fmt, + va_list argp, + void (*putc)(int, void *), void *arg, - int radix, - int is_log); + int radix, + int is_log); extern void cons_putc_locked(char); extern void bsd_log_lock(void); @@ -103,7 +101,7 @@ extern void bsd_log_unlock(void); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -lck_grp_t *IOLockGroup; +lck_grp_t *IOLockGroup; /* * Global variables for use by iLogger @@ -111,10 +109,10 @@ lck_grp_t *IOLockGroup; * Binary compatibility is not guaranteed for kexts that reference these symbols. */ -void *_giDebugLogInternal = NULL; -void *_giDebugLogDataInternal = NULL; -void *_giDebugReserved1 = NULL; -void *_giDebugReserved2 = NULL; +void *_giDebugLogInternal = NULL; +void *_giDebugLogDataInternal = NULL; +void *_giDebugReserved1 = NULL; +void *_giDebugReserved2 = NULL; iopa_t gIOBMDPageAllocator; @@ -136,16 +134,16 @@ enum { kIOPageableMaxMapSize = 96 * 1024 * 1024 }; #endif typedef struct { - vm_map_t map; - vm_offset_t address; - vm_offset_t end; + vm_map_t map; + vm_offset_t address; + vm_offset_t end; } IOMapData; static struct { - UInt32 count; - UInt32 hint; - IOMapData maps[ kIOMaxPageableMaps ]; - lck_mtx_t * lock; + UInt32 count; + UInt32 hint; + IOMapData maps[kIOMaxPageableMaps]; + lck_mtx_t * lock; } gIOKitPageableSpace; static iopa_t gIOPageablePageAllocator; @@ -160,333 +158,350 @@ IOTrackingQueue * gIOMapTracking; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOLibInit(void) +void +IOLibInit(void) { - kern_return_t ret; + kern_return_t ret; - static bool libInitialized; + static bool libInitialized; - if(libInitialized) - return; + if (libInitialized) { + return; + } - IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL); + IOLockGroup = lck_grp_alloc_init("IOKit", LCK_GRP_ATTR_NULL); #if IOTRACKING - IOTrackingInit(); - gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0, - kIOTrackingQueueTypeAlloc, - 37); - gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0); - - size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024*1024); - gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize, - kIOTrackingQueueTypeDefaultOn - | kIOTrackingQueueTypeMap - | kIOTrackingQueueTypeUser, - 0); + IOTrackingInit(); + gIOMallocTracking = IOTrackingQueueAlloc(kIOMallocTrackingName, 0, 0, 0, + kIOTrackingQueueTypeAlloc, + 37); + gIOWireTracking = IOTrackingQueueAlloc(kIOWireTrackingName, 0, 0, page_size, 0, 0); + + size_t mapCaptureSize = (kIOTracking & gIOKitDebug) ? page_size : (1024 * 1024); + gIOMapTracking = IOTrackingQueueAlloc(kIOMapTrackingName, 0, 0, mapCaptureSize, + kIOTrackingQueueTypeDefaultOn + | kIOTrackingQueueTypeMap + | kIOTrackingQueueTypeUser, + 0); #endif - gIOKitPageableSpace.maps[0].address = 0; - ret = kmem_suballoc(kernel_map, - &gIOKitPageableSpace.maps[0].address, - kIOPageableMapSize, - TRUE, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_IOKIT, - &gIOKitPageableSpace.maps[0].map); - if (ret != KERN_SUCCESS) - panic("failed to allocate iokit pageable map\n"); + gIOKitPageableSpace.maps[0].address = 0; + ret = kmem_suballoc(kernel_map, + &gIOKitPageableSpace.maps[0].address, + kIOPageableMapSize, + TRUE, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_IOKIT, + &gIOKitPageableSpace.maps[0].map); + if (ret != KERN_SUCCESS) { + panic("failed to allocate iokit pageable map\n"); + } - gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); - gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize; - gIOKitPageableSpace.hint = 0; - gIOKitPageableSpace.count = 1; + gIOKitPageableSpace.lock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); + gIOKitPageableSpace.maps[0].end = gIOKitPageableSpace.maps[0].address + kIOPageableMapSize; + gIOKitPageableSpace.hint = 0; + gIOKitPageableSpace.count = 1; - gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); - queue_init( &gIOMallocContiguousEntries ); + gIOMallocContiguousEntriesLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); + queue_init( &gIOMallocContiguousEntries ); - gIOPageAllocChunkBytes = PAGE_SIZE/64; - assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes); - iopa_init(&gIOBMDPageAllocator); - iopa_init(&gIOPageablePageAllocator); + gIOPageAllocChunkBytes = PAGE_SIZE / 64; + assert(sizeof(iopa_page_t) <= gIOPageAllocChunkBytes); + iopa_init(&gIOBMDPageAllocator); + iopa_init(&gIOPageablePageAllocator); - libInitialized = true; + libInitialized = true; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static uint32_t +static uint32_t log2up(uint32_t size) { - if (size <= 1) size = 0; - else size = 32 - __builtin_clz(size - 1); - return (size); + if (size <= 1) { + size = 0; + } else { + size = 32 - __builtin_clz(size - 1); + } + return size; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOThread IOCreateThread(IOThreadFunc fcn, void *arg) +IOThread +IOCreateThread(IOThreadFunc fcn, void *arg) { - kern_return_t result; - thread_t thread; + kern_return_t result; + thread_t thread; result = kernel_thread_start((thread_continue_t)fcn, arg, &thread); - if (result != KERN_SUCCESS) - return (NULL); + if (result != KERN_SUCCESS) { + return NULL; + } thread_deallocate(thread); - return (thread); + return thread; } -void IOExitThread(void) +void +IOExitThread(void) { - (void) thread_terminate(current_thread()); + (void) thread_terminate(current_thread()); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #if IOTRACKING -struct IOLibMallocHeader -{ - IOTrackingAddress tracking; +struct IOLibMallocHeader { + IOTrackingAddress tracking; }; #endif #if IOTRACKING -#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) +#define sizeofIOLibMallocHeader (sizeof(IOLibMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) #else -#define sizeofIOLibMallocHeader (0) +#define sizeofIOLibMallocHeader (0) #endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void * IOMalloc(vm_size_t size) +void * +IOMalloc(vm_size_t size) { - void * address; - vm_size_t allocSize; + void * address; + vm_size_t allocSize; - allocSize = size + sizeofIOLibMallocHeader; + allocSize = size + sizeofIOLibMallocHeader; #if IOTRACKING - if (sizeofIOLibMallocHeader && (allocSize <= size)) return (NULL); // overflow + if (sizeofIOLibMallocHeader && (allocSize <= size)) { + return NULL; // overflow + } #endif - address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT); + address = kalloc_tag_bt(allocSize, VM_KERN_MEMORY_IOKIT); - if ( address ) { + if (address) { #if IOTRACKING - if (TRACK_ALLOC) { - IOLibMallocHeader * hdr; - hdr = (typeof(hdr)) address; - bzero(&hdr->tracking, sizeof(hdr->tracking)); - hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader); - hdr->tracking.size = size; - IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); - } + if (TRACK_ALLOC) { + IOLibMallocHeader * hdr; + hdr = (typeof(hdr))address; + bzero(&hdr->tracking, sizeof(hdr->tracking)); + hdr->tracking.address = ~(((uintptr_t) address) + sizeofIOLibMallocHeader); + hdr->tracking.size = size; + IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); + } #endif - address = (typeof(address)) (((uintptr_t) address) + sizeofIOLibMallocHeader); + address = (typeof(address))(((uintptr_t) address) + sizeofIOLibMallocHeader); #if IOALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); + OSAddAtomic(size, &debug_iomalloc_size); #endif - IOStatisticsAlloc(kIOStatisticsMalloc, size); - } + IOStatisticsAlloc(kIOStatisticsMalloc, size); + } - return address; + return address; } -void IOFree(void * inAddress, vm_size_t size) +void +IOFree(void * inAddress, vm_size_t size) { - void * address; + void * address; + + if ((address = inAddress)) { + address = (typeof(address))(((uintptr_t) address) - sizeofIOLibMallocHeader); - if ((address = inAddress)) - { - address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader); - #if IOTRACKING - if (TRACK_ALLOC) - { - IOLibMallocHeader * hdr; - struct ptr_reference{ void * ptr; }; - volatile struct ptr_reference ptr; - - // we're about to block in IOTrackingRemove(), make sure the original pointer - // exists in memory or a register for leak scanning to find - ptr.ptr = inAddress; - - hdr = (typeof(hdr)) address; - if (size != hdr->tracking.size) - { - OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size); - size = hdr->tracking.size; - } - IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); - ptr.ptr = NULL; - } + if (TRACK_ALLOC) { + IOLibMallocHeader * hdr; + struct ptr_reference { void * ptr; }; + volatile struct ptr_reference ptr; + + // we're about to block in IOTrackingRemove(), make sure the original pointer + // exists in memory or a register for leak scanning to find + ptr.ptr = inAddress; + + hdr = (typeof(hdr))address; + if (size != hdr->tracking.size) { + OSReportWithBacktrace("bad IOFree size 0x%lx should be 0x%lx", size, hdr->tracking.size); + size = hdr->tracking.size; + } + IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); + ptr.ptr = NULL; + } #endif - kfree(address, size + sizeofIOLibMallocHeader); + kfree(address, size + sizeofIOLibMallocHeader); #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomic(-size, &debug_iomalloc_size); #endif - IOStatisticsAlloc(kIOStatisticsFree, size); - } + IOStatisticsAlloc(kIOStatisticsFree, size); + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -vm_tag_t +vm_tag_t IOMemoryTag(vm_map_t map) { - vm_tag_t tag; + vm_tag_t tag; - if (!vm_kernel_map_is_kernel(map)) return (VM_MEMORY_IOKIT); + if (!vm_kernel_map_is_kernel(map)) { + return VM_MEMORY_IOKIT; + } - tag = vm_tag_bt(); - if (tag == VM_KERN_MEMORY_NONE) tag = VM_KERN_MEMORY_IOKIT; + tag = vm_tag_bt(); + if (tag == VM_KERN_MEMORY_NONE) { + tag = VM_KERN_MEMORY_IOKIT; + } - return (tag); + return tag; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -struct IOLibPageMallocHeader -{ - mach_vm_size_t allocationSize; - mach_vm_address_t allocationAddress; +struct IOLibPageMallocHeader { + mach_vm_size_t allocationSize; + mach_vm_address_t allocationAddress; #if IOTRACKING - IOTrackingAddress tracking; + IOTrackingAddress tracking; #endif }; #if IOTRACKING -#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) +#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader) - (TRACK_ALLOC ? 0 : sizeof(IOTrackingAddress))) #else -#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader)) +#define sizeofIOLibPageMallocHeader (sizeof(IOLibPageMallocHeader)) #endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void * IOMallocAligned(vm_size_t size, vm_size_t alignment) +void * +IOMallocAligned(vm_size_t size, vm_size_t alignment) { - kern_return_t kr; - vm_offset_t address; - vm_offset_t allocationAddress; - vm_size_t adjustedSize; - uintptr_t alignMask; - IOLibPageMallocHeader * hdr; - - if (size == 0) - return 0; - - alignment = (1UL << log2up(alignment)); - alignMask = alignment - 1; - adjustedSize = size + sizeofIOLibPageMallocHeader; - - if (size > adjustedSize) { - address = 0; /* overflow detected */ - } - else if (adjustedSize >= page_size) { - - kr = kernel_memory_allocate(kernel_map, &address, - size, alignMask, 0, IOMemoryTag(kernel_map)); - if (KERN_SUCCESS != kr) address = 0; + kern_return_t kr; + vm_offset_t address; + vm_offset_t allocationAddress; + vm_size_t adjustedSize; + uintptr_t alignMask; + IOLibPageMallocHeader * hdr; + + if (size == 0) { + return 0; + } + + alignment = (1UL << log2up(alignment)); + alignMask = alignment - 1; + adjustedSize = size + sizeofIOLibPageMallocHeader; + + if (size > adjustedSize) { + address = 0; /* overflow detected */ + } else if (adjustedSize >= page_size) { + kr = kernel_memory_allocate(kernel_map, &address, + size, alignMask, 0, IOMemoryTag(kernel_map)); + if (KERN_SUCCESS != kr) { + address = 0; + } #if IOTRACKING - else if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size); + else if (TRACK_ALLOC) { + IOTrackingAlloc(gIOMallocTracking, address, size); + } #endif + } else { + adjustedSize += alignMask; + + if (adjustedSize >= page_size) { + kr = kernel_memory_allocate(kernel_map, &allocationAddress, + adjustedSize, 0, 0, IOMemoryTag(kernel_map)); + if (KERN_SUCCESS != kr) { + allocationAddress = 0; + } + } else { + allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); + } - } else { - - adjustedSize += alignMask; - - if (adjustedSize >= page_size) { - - kr = kernel_memory_allocate(kernel_map, &allocationAddress, - adjustedSize, 0, 0, IOMemoryTag(kernel_map)); - if (KERN_SUCCESS != kr) allocationAddress = 0; - - } else - allocationAddress = (vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); - - if (allocationAddress) { - address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) - & (~alignMask); + if (allocationAddress) { + address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) + & (~alignMask); - hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader); - hdr->allocationSize = adjustedSize; - hdr->allocationAddress = allocationAddress; + hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader); + hdr->allocationSize = adjustedSize; + hdr->allocationAddress = allocationAddress; #if IOTRACKING - if (TRACK_ALLOC) { - bzero(&hdr->tracking, sizeof(hdr->tracking)); - hdr->tracking.address = ~address; - hdr->tracking.size = size; - IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); - } + if (TRACK_ALLOC) { + bzero(&hdr->tracking, sizeof(hdr->tracking)); + hdr->tracking.address = ~address; + hdr->tracking.size = size; + IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); + } #endif - } else - address = 0; - } + } else { + address = 0; + } + } - assert(0 == (address & alignMask)); + assert(0 == (address & alignMask)); - if( address) { + if (address) { #if IOALLOCDEBUG OSAddAtomic(size, &debug_iomalloc_size); #endif - IOStatisticsAlloc(kIOStatisticsMallocAligned, size); + IOStatisticsAlloc(kIOStatisticsMallocAligned, size); } - return (void *) address; + return (void *) address; } -void IOFreeAligned(void * address, vm_size_t size) +void +IOFreeAligned(void * address, vm_size_t size) { - vm_address_t allocationAddress; - vm_size_t adjustedSize; - IOLibPageMallocHeader * hdr; + vm_address_t allocationAddress; + vm_size_t adjustedSize; + IOLibPageMallocHeader * hdr; - if( !address) - return; + if (!address) { + return; + } - assert(size); + assert(size); - adjustedSize = size + sizeofIOLibPageMallocHeader; - if (adjustedSize >= page_size) { + adjustedSize = size + sizeofIOLibPageMallocHeader; + if (adjustedSize >= page_size) { #if IOTRACKING - if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size); + if (TRACK_ALLOC) { + IOTrackingFree(gIOMallocTracking, (uintptr_t) address, size); + } #endif - kmem_free( kernel_map, (vm_offset_t) address, size); - - } else { - hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader); - adjustedSize = hdr->allocationSize; - allocationAddress = hdr->allocationAddress; + kmem_free( kernel_map, (vm_offset_t) address, size); + } else { + hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader); + adjustedSize = hdr->allocationSize; + allocationAddress = hdr->allocationAddress; #if IOTRACKING - if (TRACK_ALLOC) - { - if (size != hdr->tracking.size) - { - OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size); - size = hdr->tracking.size; - } - IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); - } + if (TRACK_ALLOC) { + if (size != hdr->tracking.size) { + OSReportWithBacktrace("bad IOFreeAligned size 0x%lx should be 0x%lx", size, hdr->tracking.size); + size = hdr->tracking.size; + } + IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); + } #endif - if (adjustedSize >= page_size) { - kmem_free( kernel_map, allocationAddress, adjustedSize); - } else { - kfree((void *)allocationAddress, adjustedSize); + if (adjustedSize >= page_size) { + kmem_free( kernel_map, allocationAddress, adjustedSize); + } else { + kfree(allocationAddress, adjustedSize); + } } - } #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomic(-size, &debug_iomalloc_size); #endif - IOStatisticsAlloc(kIOStatisticsFreeAligned, size); + IOStatisticsAlloc(kIOStatisticsFreeAligned, size); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -494,36 +509,39 @@ void IOFreeAligned(void * address, vm_size_t size) void IOKernelFreePhysical(mach_vm_address_t address, mach_vm_size_t size) { - mach_vm_address_t allocationAddress; - mach_vm_size_t adjustedSize; - IOLibPageMallocHeader * hdr; + mach_vm_address_t allocationAddress; + mach_vm_size_t adjustedSize; + IOLibPageMallocHeader * hdr; - if (!address) - return; + if (!address) { + return; + } - assert(size); + assert(size); - adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader; - if (adjustedSize >= page_size) { + adjustedSize = (2 * size) + sizeofIOLibPageMallocHeader; + if (adjustedSize >= page_size) { #if IOTRACKING - if (TRACK_ALLOC) IOTrackingFree(gIOMallocTracking, address, size); + if (TRACK_ALLOC) { + IOTrackingFree(gIOMallocTracking, address, size); + } #endif - kmem_free( kernel_map, (vm_offset_t) address, size); - - } else { - - hdr = (typeof(hdr)) (((uintptr_t)address) - sizeofIOLibPageMallocHeader); - adjustedSize = hdr->allocationSize; - allocationAddress = hdr->allocationAddress; + kmem_free( kernel_map, (vm_offset_t) address, size); + } else { + hdr = (typeof(hdr))(((uintptr_t)address) - sizeofIOLibPageMallocHeader); + adjustedSize = hdr->allocationSize; + allocationAddress = hdr->allocationAddress; #if IOTRACKING - if (TRACK_ALLOC) IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); + if (TRACK_ALLOC) { + IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); + } #endif - kfree((void *)allocationAddress, adjustedSize); - } + kfree(allocationAddress, adjustedSize); + } - IOStatisticsAlloc(kIOStatisticsFreeContiguous, size); + IOStatisticsAlloc(kIOStatisticsFreeContiguous, size); #if IOALLOCDEBUG - OSAddAtomic(-size, &debug_iomalloc_size); + OSAddAtomic(-size, &debug_iomalloc_size); #endif } @@ -532,581 +550,598 @@ extern unsigned long gPhysBase, gPhysSize; #endif mach_vm_address_t -IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, - mach_vm_size_t alignment, bool contiguous) +IOKernelAllocateWithPhysicalRestrict(mach_vm_size_t size, mach_vm_address_t maxPhys, + mach_vm_size_t alignment, bool contiguous) { - kern_return_t kr; - mach_vm_address_t address; - mach_vm_address_t allocationAddress; - mach_vm_size_t adjustedSize; - mach_vm_address_t alignMask; - IOLibPageMallocHeader * hdr; - - if (size == 0) - return (0); - if (alignment == 0) - alignment = 1; + kern_return_t kr; + mach_vm_address_t address; + mach_vm_address_t allocationAddress; + mach_vm_size_t adjustedSize; + mach_vm_address_t alignMask; + IOLibPageMallocHeader * hdr; + + if (size == 0) { + return 0; + } + if (alignment == 0) { + alignment = 1; + } - alignMask = alignment - 1; + alignMask = alignment - 1; - if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) return (0); + if (os_mul_and_add_overflow(2, size, sizeofIOLibPageMallocHeader, &adjustedSize)) { + return 0; + } - contiguous = (contiguous && (adjustedSize > page_size)) - || (alignment > page_size); + contiguous = (contiguous && (adjustedSize > page_size)) + || (alignment > page_size); - if (contiguous || maxPhys) - { - int options = 0; - vm_offset_t virt; + if (contiguous || maxPhys) { + int options = 0; + vm_offset_t virt; - adjustedSize = size; - contiguous = (contiguous && (adjustedSize > page_size)) - || (alignment > page_size); + adjustedSize = size; + contiguous = (contiguous && (adjustedSize > page_size)) + || (alignment > page_size); - if (!contiguous) - { + if (!contiguous) { #if __arm__ || __arm64__ - if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) - { - maxPhys = 0; - } - else + if (maxPhys >= (mach_vm_address_t)(gPhysBase + gPhysSize)) { + maxPhys = 0; + } else #endif - if (maxPhys <= 0xFFFFFFFF) - { - maxPhys = 0; - options |= KMA_LOMEM; - } - else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) - { - maxPhys = 0; - } - } - if (contiguous || maxPhys) - { - kr = kmem_alloc_contig(kernel_map, &virt, size, - alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map)); - } - else - { - kr = kernel_memory_allocate(kernel_map, &virt, - size, alignMask, options, IOMemoryTag(kernel_map)); - } - if (KERN_SUCCESS == kr) - { - address = virt; + if (maxPhys <= 0xFFFFFFFF) { + maxPhys = 0; + options |= KMA_LOMEM; + } else if (gIOLastPage && (atop_64(maxPhys) > gIOLastPage)) { + maxPhys = 0; + } + } + if (contiguous || maxPhys) { + kr = kmem_alloc_contig(kernel_map, &virt, size, + alignMask, atop(maxPhys), atop(alignMask), 0, IOMemoryTag(kernel_map)); + } else { + kr = kernel_memory_allocate(kernel_map, &virt, + size, alignMask, options, IOMemoryTag(kernel_map)); + } + if (KERN_SUCCESS == kr) { + address = virt; #if IOTRACKING - if (TRACK_ALLOC) IOTrackingAlloc(gIOMallocTracking, address, size); + if (TRACK_ALLOC) { + IOTrackingAlloc(gIOMallocTracking, address, size); + } #endif - } - else - address = 0; - } - else - { - adjustedSize += alignMask; - if (adjustedSize < size) return (0); - allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); - - if (allocationAddress) { - + } else { + address = 0; + } + } else { + adjustedSize += alignMask; + if (adjustedSize < size) { + return 0; + } + allocationAddress = (mach_vm_address_t) kalloc_tag_bt(adjustedSize, VM_KERN_MEMORY_IOKIT); - address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) - & (~alignMask); + if (allocationAddress) { + address = (allocationAddress + alignMask + sizeofIOLibPageMallocHeader) + & (~alignMask); - if (atop_32(address) != atop_32(address + size - 1)) - address = round_page(address); + if (atop_32(address) != atop_32(address + size - 1)) { + address = round_page(address); + } - hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader); - hdr->allocationSize = adjustedSize; - hdr->allocationAddress = allocationAddress; + hdr = (typeof(hdr))(address - sizeofIOLibPageMallocHeader); + hdr->allocationSize = adjustedSize; + hdr->allocationAddress = allocationAddress; #if IOTRACKING - if (TRACK_ALLOC) { - bzero(&hdr->tracking, sizeof(hdr->tracking)); - hdr->tracking.address = ~address; - hdr->tracking.size = size; - IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); - } + if (TRACK_ALLOC) { + bzero(&hdr->tracking, sizeof(hdr->tracking)); + hdr->tracking.address = ~address; + hdr->tracking.size = size; + IOTrackingAdd(gIOMallocTracking, &hdr->tracking.tracking, size, true, VM_KERN_MEMORY_NONE); + } #endif - } else - address = 0; - } + } else { + address = 0; + } + } - if (address) { - IOStatisticsAlloc(kIOStatisticsMallocContiguous, size); + if (address) { + IOStatisticsAlloc(kIOStatisticsMallocContiguous, size); #if IOALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); + OSAddAtomic(size, &debug_iomalloc_size); #endif - } + } - return (address); + return address; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -struct _IOMallocContiguousEntry -{ - mach_vm_address_t virtualAddr; - IOBufferMemoryDescriptor * md; - queue_chain_t link; +struct _IOMallocContiguousEntry { + mach_vm_address_t virtualAddr; + IOBufferMemoryDescriptor * md; + queue_chain_t link; }; typedef struct _IOMallocContiguousEntry _IOMallocContiguousEntry; -void * IOMallocContiguous(vm_size_t size, vm_size_t alignment, - IOPhysicalAddress * physicalAddress) +void * +IOMallocContiguous(vm_size_t size, vm_size_t alignment, + IOPhysicalAddress * physicalAddress) { - mach_vm_address_t address = 0; - - if (size == 0) - return 0; - if (alignment == 0) - alignment = 1; - - /* Do we want a physical address? */ - if (!physicalAddress) - { - address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true); - } - else do - { - IOBufferMemoryDescriptor * bmd; - mach_vm_address_t physicalMask; - vm_offset_t alignMask; + mach_vm_address_t address = 0; - alignMask = alignment - 1; - physicalMask = (0xFFFFFFFF ^ alignMask); - - bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask( - kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask); - if (!bmd) - break; - - _IOMallocContiguousEntry * - entry = IONew(_IOMallocContiguousEntry, 1); - if (!entry) - { - bmd->release(); - break; + if (size == 0) { + return 0; + } + if (alignment == 0) { + alignment = 1; } - entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy(); - entry->md = bmd; - lck_mtx_lock(gIOMallocContiguousEntriesLock); - queue_enter( &gIOMallocContiguousEntries, entry, - _IOMallocContiguousEntry *, link ); - lck_mtx_unlock(gIOMallocContiguousEntriesLock); - address = (mach_vm_address_t) entry->virtualAddr; - *physicalAddress = bmd->getPhysicalAddress(); - } - while (false); + /* Do we want a physical address? */ + if (!physicalAddress) { + address = IOKernelAllocateWithPhysicalRestrict(size, 0 /*maxPhys*/, alignment, true); + } else { + do { + IOBufferMemoryDescriptor * bmd; + mach_vm_address_t physicalMask; + vm_offset_t alignMask; + + alignMask = alignment - 1; + physicalMask = (0xFFFFFFFF ^ alignMask); + + bmd = IOBufferMemoryDescriptor::inTaskWithPhysicalMask( + kernel_task, kIOMemoryPhysicallyContiguous, size, physicalMask); + if (!bmd) { + break; + } + + _IOMallocContiguousEntry * + entry = IONew(_IOMallocContiguousEntry, 1); + if (!entry) { + bmd->release(); + break; + } + entry->virtualAddr = (mach_vm_address_t) bmd->getBytesNoCopy(); + entry->md = bmd; + lck_mtx_lock(gIOMallocContiguousEntriesLock); + queue_enter( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ); + lck_mtx_unlock(gIOMallocContiguousEntriesLock); + + address = (mach_vm_address_t) entry->virtualAddr; + *physicalAddress = bmd->getPhysicalAddress(); + }while (false); + } - return (void *) address; + return (void *) address; } -void IOFreeContiguous(void * _address, vm_size_t size) +void +IOFreeContiguous(void * _address, vm_size_t size) { - _IOMallocContiguousEntry * entry; - IOMemoryDescriptor * md = NULL; + _IOMallocContiguousEntry * entry; + IOMemoryDescriptor * md = NULL; - mach_vm_address_t address = (mach_vm_address_t) _address; + mach_vm_address_t address = (mach_vm_address_t) _address; - if( !address) - return; + if (!address) { + return; + } - assert(size); + assert(size); - lck_mtx_lock(gIOMallocContiguousEntriesLock); - queue_iterate( &gIOMallocContiguousEntries, entry, - _IOMallocContiguousEntry *, link ) - { - if( entry->virtualAddr == address ) { - md = entry->md; - queue_remove( &gIOMallocContiguousEntries, entry, + lck_mtx_lock(gIOMallocContiguousEntriesLock); + queue_iterate( &gIOMallocContiguousEntries, entry, + _IOMallocContiguousEntry *, link ) + { + if (entry->virtualAddr == address) { + md = entry->md; + queue_remove( &gIOMallocContiguousEntries, entry, _IOMallocContiguousEntry *, link ); - break; + break; + } + } + lck_mtx_unlock(gIOMallocContiguousEntriesLock); + + if (md) { + md->release(); + IODelete(entry, _IOMallocContiguousEntry, 1); + } else { + IOKernelFreePhysical((mach_vm_address_t) address, size); } - } - lck_mtx_unlock(gIOMallocContiguousEntriesLock); - - if (md) - { - md->release(); - IODelete(entry, _IOMallocContiguousEntry, 1); - } - else - { - IOKernelFreePhysical((mach_vm_address_t) address, size); - } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -kern_return_t IOIteratePageableMaps(vm_size_t size, - IOIteratePageableMapsCallback callback, void * ref) +kern_return_t +IOIteratePageableMaps(vm_size_t size, + IOIteratePageableMapsCallback callback, void * ref) { - kern_return_t kr = kIOReturnNotReady; - vm_size_t segSize; - UInt32 attempts; - UInt32 index; - vm_offset_t min; - vm_map_t map; - - if (size > kIOPageableMaxMapSize) - return( kIOReturnBadArgument ); - - do { - index = gIOKitPageableSpace.hint; - attempts = gIOKitPageableSpace.count; - while( attempts--) { - kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref); - if( KERN_SUCCESS == kr) { - gIOKitPageableSpace.hint = index; - break; - } - if( index) - index--; - else - index = gIOKitPageableSpace.count - 1; - } - if (KERN_NO_SPACE != kr) - break; - - lck_mtx_lock( gIOKitPageableSpace.lock ); - - index = gIOKitPageableSpace.count; - if( index >= (kIOMaxPageableMaps - 1)) { - lck_mtx_unlock( gIOKitPageableSpace.lock ); - break; - } - - if( size < kIOPageableMapSize) - segSize = kIOPageableMapSize; - else - segSize = size; - - min = 0; - kr = kmem_suballoc(kernel_map, - &min, - segSize, - TRUE, - VM_FLAGS_ANYWHERE, + kern_return_t kr = kIOReturnNotReady; + vm_size_t segSize; + UInt32 attempts; + UInt32 index; + vm_offset_t min; + vm_map_t map; + + if (size > kIOPageableMaxMapSize) { + return kIOReturnBadArgument; + } + + do { + index = gIOKitPageableSpace.hint; + attempts = gIOKitPageableSpace.count; + while (attempts--) { + kr = (*callback)(gIOKitPageableSpace.maps[index].map, ref); + if (KERN_SUCCESS == kr) { + gIOKitPageableSpace.hint = index; + break; + } + if (index) { + index--; + } else { + index = gIOKitPageableSpace.count - 1; + } + } + if (KERN_NO_SPACE != kr) { + break; + } + + lck_mtx_lock( gIOKitPageableSpace.lock ); + + index = gIOKitPageableSpace.count; + if (index >= (kIOMaxPageableMaps - 1)) { + lck_mtx_unlock( gIOKitPageableSpace.lock ); + break; + } + + if (size < kIOPageableMapSize) { + segSize = kIOPageableMapSize; + } else { + segSize = size; + } + + min = 0; + kr = kmem_suballoc(kernel_map, + &min, + segSize, + TRUE, + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_IOKIT, - &map); - if( KERN_SUCCESS != kr) { - lck_mtx_unlock( gIOKitPageableSpace.lock ); - break; - } - - gIOKitPageableSpace.maps[index].map = map; - gIOKitPageableSpace.maps[index].address = min; - gIOKitPageableSpace.maps[index].end = min + segSize; - gIOKitPageableSpace.hint = index; - gIOKitPageableSpace.count = index + 1; + &map); + if (KERN_SUCCESS != kr) { + lck_mtx_unlock( gIOKitPageableSpace.lock ); + break; + } - lck_mtx_unlock( gIOKitPageableSpace.lock ); + gIOKitPageableSpace.maps[index].map = map; + gIOKitPageableSpace.maps[index].address = min; + gIOKitPageableSpace.maps[index].end = min + segSize; + gIOKitPageableSpace.hint = index; + gIOKitPageableSpace.count = index + 1; - } while( true ); + lck_mtx_unlock( gIOKitPageableSpace.lock ); + } while (true); - return kr; + return kr; } -struct IOMallocPageableRef -{ - vm_offset_t address; - vm_size_t size; - vm_tag_t tag; +struct IOMallocPageableRef { + vm_offset_t address; + vm_size_t size; + vm_tag_t tag; }; -static kern_return_t IOMallocPageableCallback(vm_map_t map, void * _ref) +static kern_return_t +IOMallocPageableCallback(vm_map_t map, void * _ref) { - struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref; - kern_return_t kr; + struct IOMallocPageableRef * ref = (struct IOMallocPageableRef *) _ref; + kern_return_t kr; - kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag ); + kr = kmem_alloc_pageable( map, &ref->address, ref->size, ref->tag ); - return( kr ); + return kr; } -static void * IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag) +static void * +IOMallocPageablePages(vm_size_t size, vm_size_t alignment, vm_tag_t tag) { - kern_return_t kr = kIOReturnNotReady; - struct IOMallocPageableRef ref; + kern_return_t kr = kIOReturnNotReady; + struct IOMallocPageableRef ref; - if (alignment > page_size) - return( 0 ); - if (size > kIOPageableMaxMapSize) - return( 0 ); + if (alignment > page_size) { + return 0; + } + if (size > kIOPageableMaxMapSize) { + return 0; + } - ref.size = size; - ref.tag = tag; - kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref ); - if( kIOReturnSuccess != kr) - ref.address = 0; + ref.size = size; + ref.tag = tag; + kr = IOIteratePageableMaps( size, &IOMallocPageableCallback, &ref ); + if (kIOReturnSuccess != kr) { + ref.address = 0; + } - return( (void *) ref.address ); + return (void *) ref.address; } -vm_map_t IOPageableMapForAddress( uintptr_t address ) +vm_map_t +IOPageableMapForAddress( uintptr_t address ) { - vm_map_t map = 0; - UInt32 index; - - for( index = 0; index < gIOKitPageableSpace.count; index++) { - if( (address >= gIOKitPageableSpace.maps[index].address) - && (address < gIOKitPageableSpace.maps[index].end) ) { - map = gIOKitPageableSpace.maps[index].map; - break; - } - } - if( !map) - panic("IOPageableMapForAddress: null"); - - return( map ); + vm_map_t map = 0; + UInt32 index; + + for (index = 0; index < gIOKitPageableSpace.count; index++) { + if ((address >= gIOKitPageableSpace.maps[index].address) + && (address < gIOKitPageableSpace.maps[index].end)) { + map = gIOKitPageableSpace.maps[index].map; + break; + } + } + if (!map) { + panic("IOPageableMapForAddress: null"); + } + + return map; } -static void IOFreePageablePages(void * address, vm_size_t size) +static void +IOFreePageablePages(void * address, vm_size_t size) { - vm_map_t map; - - map = IOPageableMapForAddress( (vm_address_t) address); - if( map) - kmem_free( map, (vm_offset_t) address, size); + vm_map_t map; + + map = IOPageableMapForAddress((vm_address_t) address); + if (map) { + kmem_free( map, (vm_offset_t) address, size); + } } -static uintptr_t IOMallocOnePageablePage(iopa_t * a) +static uintptr_t +IOMallocOnePageablePage(iopa_t * a) { - return ((uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT)); + return (uintptr_t) IOMallocPageablePages(page_size, page_size, VM_KERN_MEMORY_IOKIT); } -void * IOMallocPageable(vm_size_t size, vm_size_t alignment) +void * +IOMallocPageable(vm_size_t size, vm_size_t alignment) { - void * addr; + void * addr; - if (size >= (page_size - 4*gIOPageAllocChunkBytes)) addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map)); - else addr = ((void * ) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment)); + if (size >= (page_size - 4 * gIOPageAllocChunkBytes)) { + addr = IOMallocPageablePages(size, alignment, IOMemoryTag(kernel_map)); + } else { + addr = ((void *) iopa_alloc(&gIOPageablePageAllocator, &IOMallocOnePageablePage, size, alignment)); + } - if (addr) { + if (addr) { #if IOALLOCDEBUG - OSAddAtomicLong(size, &debug_iomallocpageable_size); + OSAddAtomicLong(size, &debug_iomallocpageable_size); #endif - IOStatisticsAlloc(kIOStatisticsMallocPageable, size); - } + IOStatisticsAlloc(kIOStatisticsMallocPageable, size); + } - return (addr); + return addr; } -void IOFreePageable(void * address, vm_size_t size) +void +IOFreePageable(void * address, vm_size_t size) { #if IOALLOCDEBUG OSAddAtomicLong(-size, &debug_iomallocpageable_size); #endif - IOStatisticsAlloc(kIOStatisticsFreePageable, size); - - if (size < (page_size - 4*gIOPageAllocChunkBytes)) - { - address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size); - size = page_size; - } - if (address) IOFreePageablePages(address, size); + IOStatisticsAlloc(kIOStatisticsFreePageable, size); + + if (size < (page_size - 4 * gIOPageAllocChunkBytes)) { + address = (void *) iopa_free(&gIOPageablePageAllocator, (uintptr_t) address, size); + size = page_size; + } + if (address) { + IOFreePageablePages(address, size); + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern "C" void +extern "C" void iopa_init(iopa_t * a) { - bzero(a, sizeof(*a)); - a->lock = IOLockAlloc(); - queue_init(&a->list); + bzero(a, sizeof(*a)); + a->lock = IOLockAlloc(); + queue_init(&a->list); } static uintptr_t iopa_allocinpage(iopa_page_t * pa, uint32_t count, uint64_t align) { - uint32_t n, s; - uint64_t avail = pa->avail; - - assert(avail); - - // find strings of count 1 bits in avail - for (n = count; n > 1; n -= s) - { - s = n >> 1; - avail = avail & (avail << s); - } - // and aligned - avail &= align; - - if (avail) - { - n = __builtin_clzll(avail); - pa->avail &= ~((-1ULL << (64 - count)) >> n); - if (!pa->avail && pa->link.next) - { - remque(&pa->link); - pa->link.next = 0; + uint32_t n, s; + uint64_t avail = pa->avail; + + assert(avail); + + // find strings of count 1 bits in avail + for (n = count; n > 1; n -= s) { + s = n >> 1; + avail = avail & (avail << s); + } + // and aligned + avail &= align; + + if (avail) { + n = __builtin_clzll(avail); + pa->avail &= ~((-1ULL << (64 - count)) >> n); + if (!pa->avail && pa->link.next) { + remque(&pa->link); + pa->link.next = 0; + } + return n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa); } - return (n * gIOPageAllocChunkBytes + trunc_page((uintptr_t) pa)); - } - return (0); + return 0; } -uintptr_t +uintptr_t iopa_alloc(iopa_t * a, iopa_proc_t alloc, vm_size_t bytes, uint32_t balign) { - static const uint64_t align_masks[] = { - 0xFFFFFFFFFFFFFFFF, - 0xAAAAAAAAAAAAAAAA, - 0x8888888888888888, - 0x8080808080808080, - 0x8000800080008000, - 0x8000000080000000, - 0x8000000000000000, - }; - iopa_page_t * pa; - uintptr_t addr = 0; - uint32_t count; - uint64_t align; - - if (!bytes) bytes = 1; - count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; - align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)]; - - IOLockLock(a->lock); - __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_first(&a->list)); - while (!queue_end(&a->list, &pa->link)) - { - addr = iopa_allocinpage(pa, count, align); - if (addr) - { - a->bytecount += bytes; - break; + static const uint64_t align_masks[] = { + 0xFFFFFFFFFFFFFFFF, + 0xAAAAAAAAAAAAAAAA, + 0x8888888888888888, + 0x8080808080808080, + 0x8000800080008000, + 0x8000000080000000, + 0x8000000000000000, + }; + iopa_page_t * pa; + uintptr_t addr = 0; + uint32_t count; + uint64_t align; + + if (!bytes) { + bytes = 1; } - __IGNORE_WCASTALIGN(pa = (typeof(pa)) queue_next(&pa->link)); - } - IOLockUnlock(a->lock); - - if (!addr) - { - addr = alloc(a); - if (addr) - { - pa = (typeof(pa)) (addr + page_size - gIOPageAllocChunkBytes); - pa->signature = kIOPageAllocSignature; - pa->avail = -2ULL; - - addr = iopa_allocinpage(pa, count, align); - IOLockLock(a->lock); - if (pa->avail) enqueue_head(&a->list, &pa->link); - a->pagecount++; - if (addr) a->bytecount += bytes; - IOLockUnlock(a->lock); + count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; + align = align_masks[log2up((balign + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes)]; + + IOLockLock(a->lock); + __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_first(&a->list)); + while (!queue_end(&a->list, &pa->link)) { + addr = iopa_allocinpage(pa, count, align); + if (addr) { + a->bytecount += bytes; + break; + } + __IGNORE_WCASTALIGN(pa = (typeof(pa))queue_next(&pa->link)); + } + IOLockUnlock(a->lock); + + if (!addr) { + addr = alloc(a); + if (addr) { + pa = (typeof(pa))(addr + page_size - gIOPageAllocChunkBytes); + pa->signature = kIOPageAllocSignature; + pa->avail = -2ULL; + + addr = iopa_allocinpage(pa, count, align); + IOLockLock(a->lock); + if (pa->avail) { + enqueue_head(&a->list, &pa->link); + } + a->pagecount++; + if (addr) { + a->bytecount += bytes; + } + IOLockUnlock(a->lock); + } } - } - assert((addr & ((1 << log2up(balign)) - 1)) == 0); - return (addr); + assert((addr & ((1 << log2up(balign)) - 1)) == 0); + return addr; } -uintptr_t +uintptr_t iopa_free(iopa_t * a, uintptr_t addr, vm_size_t bytes) { - iopa_page_t * pa; - uint32_t count; - uintptr_t chunk; - - if (!bytes) bytes = 1; - - chunk = (addr & page_mask); - assert(0 == (chunk & (gIOPageAllocChunkBytes - 1))); - - pa = (typeof(pa)) (addr | (page_size - gIOPageAllocChunkBytes)); - assert(kIOPageAllocSignature == pa->signature); - - count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; - chunk /= gIOPageAllocChunkBytes; - - IOLockLock(a->lock); - if (!pa->avail) - { - assert(!pa->link.next); - enqueue_tail(&a->list, &pa->link); - } - pa->avail |= ((-1ULL << (64 - count)) >> chunk); - if (pa->avail != -2ULL) pa = 0; - else - { - remque(&pa->link); - pa->link.next = 0; - pa->signature = 0; - a->pagecount--; - // page to free - pa = (typeof(pa)) trunc_page(pa); - } - a->bytecount -= bytes; - IOLockUnlock(a->lock); - - return ((uintptr_t) pa); + iopa_page_t * pa; + uint32_t count; + uintptr_t chunk; + + if (!bytes) { + bytes = 1; + } + + chunk = (addr & page_mask); + assert(0 == (chunk & (gIOPageAllocChunkBytes - 1))); + + pa = (typeof(pa))(addr | (page_size - gIOPageAllocChunkBytes)); + assert(kIOPageAllocSignature == pa->signature); + + count = (bytes + gIOPageAllocChunkBytes - 1) / gIOPageAllocChunkBytes; + chunk /= gIOPageAllocChunkBytes; + + IOLockLock(a->lock); + if (!pa->avail) { + assert(!pa->link.next); + enqueue_tail(&a->list, &pa->link); + } + pa->avail |= ((-1ULL << (64 - count)) >> chunk); + if (pa->avail != -2ULL) { + pa = 0; + } else { + remque(&pa->link); + pa->link.next = 0; + pa->signature = 0; + a->pagecount--; + // page to free + pa = (typeof(pa))trunc_page(pa); + } + a->bytecount -= bytes; + IOLockUnlock(a->lock); + + return (uintptr_t) pa; } - + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOReturn IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, - IOByteCount length, IOOptionBits cacheMode ) +IOReturn +IOSetProcessorCacheMode( task_t task, IOVirtualAddress address, + IOByteCount length, IOOptionBits cacheMode ) { - IOReturn ret = kIOReturnSuccess; - ppnum_t pagenum; + IOReturn ret = kIOReturnSuccess; + ppnum_t pagenum; - if( task != kernel_task) - return( kIOReturnUnsupported ); - if ((address | length) & PAGE_MASK) - { + if (task != kernel_task) { + return kIOReturnUnsupported; + } + if ((address | length) & PAGE_MASK) { // OSReportWithBacktrace("IOSetProcessorCacheMode(0x%x, 0x%x, 0x%x) fails\n", address, length, cacheMode); - return( kIOReturnUnsupported ); - } - length = round_page(address + length) - trunc_page( address ); - address = trunc_page( address ); - - // make map mode - cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; - - while( (kIOReturnSuccess == ret) && (length > 0) ) { - - // Get the physical page number - pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address); - if( pagenum) { - ret = IOUnmapPages( get_task_map(task), address, page_size ); - ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode ); - } else - ret = kIOReturnVMError; + return kIOReturnUnsupported; + } + length = round_page(address + length) - trunc_page( address ); + address = trunc_page( address ); + + // make map mode + cacheMode = (cacheMode << kIOMapCacheShift) & kIOMapCacheMask; + + while ((kIOReturnSuccess == ret) && (length > 0)) { + // Get the physical page number + pagenum = pmap_find_phys(kernel_pmap, (addr64_t)address); + if (pagenum) { + ret = IOUnmapPages( get_task_map(task), address, page_size ); + ret = IOMapPages( get_task_map(task), address, ptoa_64(pagenum), page_size, cacheMode ); + } else { + ret = kIOReturnVMError; + } - address += page_size; - length -= page_size; - } + address += page_size; + length -= page_size; + } - return( ret ); + return ret; } -IOReturn IOFlushProcessorCache( task_t task, IOVirtualAddress address, - IOByteCount length ) +IOReturn +IOFlushProcessorCache( task_t task, IOVirtualAddress address, + IOByteCount length ) { - if( task != kernel_task) - return( kIOReturnUnsupported ); + if (task != kernel_task) { + return kIOReturnUnsupported; + } - flush_dcache64( (addr64_t) address, (unsigned) length, false ); + flush_dcache64((addr64_t) address, (unsigned) length, false ); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -vm_offset_t OSKernelStackRemaining( void ) +vm_offset_t +OSKernelStackRemaining( void ) { - return (ml_stack_remaining()); + return ml_stack_remaining(); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1114,77 +1149,85 @@ vm_offset_t OSKernelStackRemaining( void ) /* * Spin for indicated number of milliseconds. */ -void IOSleep(unsigned milliseconds) +void +IOSleep(unsigned milliseconds) { - delay_for_interval(milliseconds, kMillisecondScale); + delay_for_interval(milliseconds, kMillisecondScale); } /* * Spin for indicated number of milliseconds, and potentially an * additional number of milliseconds up to the leeway values. */ -void IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds) +void +IOSleepWithLeeway(unsigned intervalMilliseconds, unsigned leewayMilliseconds) { - delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale); + delay_for_interval_with_leeway(intervalMilliseconds, leewayMilliseconds, kMillisecondScale); } /* * Spin for indicated number of microseconds. */ -void IODelay(unsigned microseconds) +void +IODelay(unsigned microseconds) { - delay_for_interval(microseconds, kMicrosecondScale); + delay_for_interval(microseconds, kMicrosecondScale); } /* * Spin for indicated number of nanoseconds. */ -void IOPause(unsigned nanoseconds) +void +IOPause(unsigned nanoseconds) { - delay_for_interval(nanoseconds, kNanosecondScale); + delay_for_interval(nanoseconds, kNanosecondScale); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1,0); +static void _IOLogv(const char *format, va_list ap, void *caller) __printflike(1, 0); -__attribute__((noinline,not_tail_called)) -void IOLog(const char *format, ...) +__attribute__((noinline, not_tail_called)) +void +IOLog(const char *format, ...) { - void *caller = __builtin_return_address(0); - va_list ap; + void *caller = __builtin_return_address(0); + va_list ap; - va_start(ap, format); - _IOLogv(format, ap, caller); - va_end(ap); + va_start(ap, format); + _IOLogv(format, ap, caller); + va_end(ap); } -__attribute__((noinline,not_tail_called)) -void IOLogv(const char *format, va_list ap) +__attribute__((noinline, not_tail_called)) +void +IOLogv(const char *format, va_list ap) { - void *caller = __builtin_return_address(0); - _IOLogv(format, ap, caller); + void *caller = __builtin_return_address(0); + _IOLogv(format, ap, caller); } -void _IOLogv(const char *format, va_list ap, void *caller) +void +_IOLogv(const char *format, va_list ap, void *caller) { - va_list ap2; - struct console_printbuf_state info_data; - console_printbuf_state_init(&info_data, TRUE, TRUE); + va_list ap2; + struct console_printbuf_state info_data; + console_printbuf_state_init(&info_data, TRUE, TRUE); - va_copy(ap2, ap); + va_copy(ap2, ap); - os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller); + os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, format, ap, caller); - __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE); - console_printbuf_clear(&info_data); - va_end(ap2); + __doprnt(format, ap2, console_printbuf_putc, &info_data, 16, TRUE); + console_printbuf_clear(&info_data); + va_end(ap2); - assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled"); + assertf(ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !gCPUsRunning, "IOLog called with interrupts disabled"); } #if !__LP64__ -void IOPanic(const char *reason) +void +IOPanic(const char *reason) { panic("%s", reason); } @@ -1192,36 +1235,44 @@ void IOPanic(const char *reason) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, - void (*output)(const char *format, ...)) +void +IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, + void (*output)(const char *format, ...)) { - uint8_t c, chars[17]; - size_t idx; - - output("%s(0x%x):\n", title, size); - if (size > 4096) size = 4096; - chars[16] = idx = 0; - while (true) { - if (!(idx & 15)) { - if (idx) output(" |%s|\n", chars); - if (idx >= size) break; - output("%04x: ", idx); - } - else if (!(idx & 7)) output(" "); - - c = ((char *)buffer)[idx]; - output("%02x ", c); - chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' '; - - idx++; - if ((idx == size) && (idx & 15)) { - chars[idx & 15] = 0; - while (idx & 15) { - idx++; - output(" "); - } - } - } + uint8_t c, chars[17]; + size_t idx; + + output("%s(0x%x):\n", title, size); + if (size > 4096) { + size = 4096; + } + chars[16] = idx = 0; + while (true) { + if (!(idx & 15)) { + if (idx) { + output(" |%s|\n", chars); + } + if (idx >= size) { + break; + } + output("%04x: ", idx); + } else if (!(idx & 7)) { + output(" "); + } + + c = ((char *)buffer)[idx]; + output("%02x ", c); + chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' '; + + idx++; + if ((idx == size) && (idx & 15)) { + chars[idx & 15] = 0; + while (idx & 15) { + idx++; + output(" "); + } + } + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1229,24 +1280,27 @@ void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size, /* * Convert a integer constant (typically a #define or enum) to a string. */ -static char noValue[80]; // that's pretty +static char noValue[80]; // that's pretty -const char *IOFindNameForValue(int value, const IONamedValue *regValueArray) +const char * +IOFindNameForValue(int value, const IONamedValue *regValueArray) { - for( ; regValueArray->name; regValueArray++) { - if(regValueArray->value == value) - return(regValueArray->name); + for (; regValueArray->name; regValueArray++) { + if (regValueArray->value == value) { + return regValueArray->name; + } } snprintf(noValue, sizeof(noValue), "0x%x (UNDEFINED)", value); - return((const char *)noValue); + return (const char *)noValue; } -IOReturn IOFindValueForName(const char *string, - const IONamedValue *regValueArray, - int *value) +IOReturn +IOFindValueForName(const char *string, + const IONamedValue *regValueArray, + int *value) { - for( ; regValueArray->name; regValueArray++) { - if(!strcmp(regValueArray->name, string)) { + for (; regValueArray->name; regValueArray++) { + if (!strcmp(regValueArray->name, string)) { *value = regValueArray->value; return kIOReturnSuccess; } @@ -1254,42 +1308,42 @@ IOReturn IOFindValueForName(const char *string, return kIOReturnBadArgument; } -OSString * IOCopyLogNameForPID(int pid) +OSString * +IOCopyLogNameForPID(int pid) { - char buf[128]; - size_t len; - snprintf(buf, sizeof(buf), "pid %d, ", pid); - len = strlen(buf); - proc_name(pid, buf + len, sizeof(buf) - len); - return (OSString::withCString(buf)); + char buf[128]; + size_t len; + snprintf(buf, sizeof(buf), "pid %d, ", pid); + len = strlen(buf); + proc_name(pid, buf + len, sizeof(buf) - len); + return OSString::withCString(buf); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOAlignment IOSizeToAlignment(unsigned int size) +IOAlignment +IOSizeToAlignment(unsigned int size) { - int shift; - const int intsize = sizeof(unsigned int) * 8; - - for (shift = 1; shift < intsize; shift++) { - if (size & 0x80000000) - return (IOAlignment)(intsize - shift); - size <<= 1; - } - return 0; + int shift; + const int intsize = sizeof(unsigned int) * 8; + + for (shift = 1; shift < intsize; shift++) { + if (size & 0x80000000) { + return (IOAlignment)(intsize - shift); + } + size <<= 1; + } + return 0; } -unsigned int IOAlignmentToSize(IOAlignment align) +unsigned int +IOAlignmentToSize(IOAlignment align) { - unsigned int size; - - for (size = 1; align; align--) { - size <<= 1; - } - return size; -} + unsigned int size; + for (size = 1; align; align--) { + size <<= 1; + } + return size; +} } /* extern "C" */ - - - diff --git a/iokit/Kernel/IOLocks.cpp b/iokit/Kernel/IOLocks.cpp index 2bbb712bb..8871fb684 100644 --- a/iokit/Kernel/IOLocks.cpp +++ b/iokit/Kernel/IOLocks.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include #include -#include +#include #include #include @@ -42,43 +42,51 @@ extern "C" { static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0; #endif -void IOLockInitWithState( IOLock * lock, IOLockState state) +void +IOLockInitWithState( IOLock * lock, IOLockState state) { - if( state == kIOLockStateLocked) - lck_mtx_lock( lock); + if (state == kIOLockStateLocked) { + lck_mtx_lock( lock); + } } -IOLock * IOLockAlloc( void ) +IOLock * +IOLockAlloc( void ) { - return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) ); + return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); } -void IOLockFree( IOLock * lock) +void +IOLockFree( IOLock * lock) { - lck_mtx_free( lock, IOLockGroup); + lck_mtx_free( lock, IOLockGroup); } -lck_mtx_t * IOLockGetMachLock( IOLock * lock) +lck_mtx_t * +IOLockGetMachLock( IOLock * lock) { - return( (lck_mtx_t *)lock); + return (lck_mtx_t *)lock; } -int IOLockSleep( IOLock * lock, void *event, UInt32 interType) +int +IOLockSleep( IOLock * lock, void *event, UInt32 interType) { - return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); + return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); } -int IOLockSleepDeadline( IOLock * lock, void *event, - AbsoluteTime deadline, UInt32 interType) +int +IOLockSleepDeadline( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) { - return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, - (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); + return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, + (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); } -void IOLockWakeup(IOLock * lock, void *event, bool oneThread) -{ +void +IOLockWakeup(IOLock * lock, void *event, bool oneThread) +{ thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); -} +} #if defined(__x86_64__) @@ -86,202 +94,224 @@ void IOLockWakeup(IOLock * lock, void *event, bool oneThread) * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function, * which supports a NULL event, */ -int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep"); -int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, - AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline"); -void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup"); +int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep"); +int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline"); +void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup"); -int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) +int +IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) { - if (event == NULL) - event = (void *)&IOLockSleep_NO_EVENT; + if (event == NULL) { + event = (void *)&IOLockSleep_NO_EVENT; + } - return IOLockSleep(lock, event, interType); + return IOLockSleep(lock, event, interType); } -int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, - AbsoluteTime deadline, UInt32 interType) +int +IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) { - if (event == NULL) - event = (void *)&IOLockSleep_NO_EVENT; + if (event == NULL) { + event = (void *)&IOLockSleep_NO_EVENT; + } - return IOLockSleepDeadline(lock, event, deadline, interType); + return IOLockSleepDeadline(lock, event, deadline, interType); } -void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) -{ - if (event == NULL) - event = (void *)&IOLockSleep_NO_EVENT; +void +IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) +{ + if (event == NULL) { + event = (void *)&IOLockSleep_NO_EVENT; + } - IOLockWakeup(lock, event, oneThread); -} + IOLockWakeup(lock, event, oneThread); +} #endif /* defined(__x86_64__) */ struct _IORecursiveLock { - lck_mtx_t mutex; - lck_grp_t *group; - thread_t thread; - UInt32 count; + lck_mtx_t mutex; + lck_grp_t *group; + thread_t thread; + UInt32 count; }; -IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup ) +IORecursiveLock * +IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup ) { - _IORecursiveLock * lock; + _IORecursiveLock * lock; - if( lockGroup == 0 ) - return( 0 ); + if (lockGroup == 0) { + return 0; + } - lock = IONew( _IORecursiveLock, 1 ); - if( !lock ) - return( 0 ); + lock = IONew( _IORecursiveLock, 1 ); + if (!lock) { + return 0; + } - lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL ); - lock->group = lockGroup; - lock->thread = 0; - lock->count = 0; + lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL ); + lock->group = lockGroup; + lock->thread = 0; + lock->count = 0; - return( (IORecursiveLock *) lock ); + return (IORecursiveLock *) lock; } -IORecursiveLock * IORecursiveLockAlloc( void ) +IORecursiveLock * +IORecursiveLockAlloc( void ) { - return IORecursiveLockAllocWithLockGroup( IOLockGroup ); + return IORecursiveLockAllocWithLockGroup( IOLockGroup ); } -void IORecursiveLockFree( IORecursiveLock * _lock ) +void +IORecursiveLockFree( IORecursiveLock * _lock ) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - - lck_mtx_destroy(&lock->mutex, lock->group); - IODelete( lock, _IORecursiveLock, 1 ); + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + lck_mtx_destroy(&lock->mutex, lock->group); + IODelete( lock, _IORecursiveLock, 1 ); } -lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock ) +lck_mtx_t * +IORecursiveLockGetMachLock( IORecursiveLock * lock ) { - return( &lock->mutex ); + return &lock->mutex; } -void IORecursiveLockLock( IORecursiveLock * _lock) +void +IORecursiveLockLock( IORecursiveLock * _lock) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - - if( lock->thread == IOThreadSelf()) - lock->count++; - else { - lck_mtx_lock( &lock->mutex ); - assert( lock->thread == 0 ); - assert( lock->count == 0 ); - lock->thread = IOThreadSelf(); - lock->count = 1; - } + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + if (lock->thread == IOThreadSelf()) { + lock->count++; + } else { + lck_mtx_lock( &lock->mutex ); + assert( lock->thread == 0 ); + assert( lock->count == 0 ); + lock->thread = IOThreadSelf(); + lock->count = 1; + } } -boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock) +boolean_t +IORecursiveLockTryLock( IORecursiveLock * _lock) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - - if( lock->thread == IOThreadSelf()) { - lock->count++; - return( true ); - } else { - if( lck_mtx_try_lock( &lock->mutex )) { - assert( lock->thread == 0 ); - assert( lock->count == 0 ); - lock->thread = IOThreadSelf(); - lock->count = 1; - return( true ); + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + if (lock->thread == IOThreadSelf()) { + lock->count++; + return true; + } else { + if (lck_mtx_try_lock( &lock->mutex )) { + assert( lock->thread == 0 ); + assert( lock->count == 0 ); + lock->thread = IOThreadSelf(); + lock->count = 1; + return true; + } } - } - return( false ); + return false; } -void IORecursiveLockUnlock( IORecursiveLock * _lock) +void +IORecursiveLockUnlock( IORecursiveLock * _lock) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - assert( lock->thread == IOThreadSelf() ); + assert( lock->thread == IOThreadSelf()); - if( 0 == (--lock->count)) { - lock->thread = 0; - lck_mtx_unlock( &lock->mutex ); - } + if (0 == (--lock->count)) { + lock->thread = 0; + lck_mtx_unlock( &lock->mutex ); + } } -boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock) +boolean_t +IORecursiveLockHaveLock( const IORecursiveLock * _lock) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - return( lock->thread == IOThreadSelf()); + return lock->thread == IOThreadSelf(); } -int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) +int +IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - UInt32 count = lock->count; - int res; - - assert(lock->thread == IOThreadSelf()); - - lock->count = 0; - lock->thread = 0; - res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); - - // Must re-establish the recursive lock no matter why we woke up - // otherwise we would potentially leave the return path corrupted. - assert(lock->thread == 0); - assert(lock->count == 0); - lock->thread = IOThreadSelf(); - lock->count = count; - return res; + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + + lock->count = 0; + lock->thread = 0; + res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); + + // Must re-establish the recursive lock no matter why we woke up + // otherwise we would potentially leave the return path corrupted. + assert(lock->thread == 0); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + return res; } -int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, - AbsoluteTime deadline, UInt32 interType) +int +IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, + AbsoluteTime deadline, UInt32 interType) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - UInt32 count = lock->count; - int res; - - assert(lock->thread == IOThreadSelf()); - - lock->count = 0; - lock->thread = 0; - res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, - (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); - - // Must re-establish the recursive lock no matter why we woke up - // otherwise we would potentially leave the return path corrupted. - assert(lock->thread == 0); - assert(lock->count == 0); - lock->thread = IOThreadSelf(); - lock->count = count; - return res; + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + + lock->count = 0; + lock->thread = 0; + res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, + (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); + + // Must re-establish the recursive lock no matter why we woke up + // otherwise we would potentially leave the return path corrupted. + assert(lock->thread == 0); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + return res; } -void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) +void +IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) { - thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); + thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); } /* * Complex (read/write) lock operations */ -IORWLock * IORWLockAlloc( void ) +IORWLock * +IORWLockAlloc( void ) { - return( lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL) ); + return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL); } -void IORWLockFree( IORWLock * lock) +void +IORWLockFree( IORWLock * lock) { - lck_rw_free( lock, IOLockGroup); + lck_rw_free( lock, IOLockGroup); } -lck_rw_t * IORWLockGetMachLock( IORWLock * lock) +lck_rw_t * +IORWLockGetMachLock( IORWLock * lock) { - return( (lck_rw_t *)lock); + return (lck_rw_t *)lock; } @@ -289,24 +319,28 @@ lck_rw_t * IORWLockGetMachLock( IORWLock * lock) * Spin locks */ -IOSimpleLock * IOSimpleLockAlloc( void ) +IOSimpleLock * +IOSimpleLockAlloc( void ) { - return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) ); + return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL); } -void IOSimpleLockInit( IOSimpleLock * lock) +void +IOSimpleLockInit( IOSimpleLock * lock) { - lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL); + lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL); } -void IOSimpleLockFree( IOSimpleLock * lock ) +void +IOSimpleLockFree( IOSimpleLock * lock ) { - lck_spin_free( lock, IOLockGroup); + lck_spin_free( lock, IOLockGroup); } -lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock) +lck_spin_t * +IOSimpleLockGetMachLock( IOSimpleLock * lock) { - return( (lck_spin_t *)lock); + return (lck_spin_t *)lock; } #ifndef IOLOCKS_INLINE @@ -317,22 +351,19 @@ lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock) void IOLockAssert(IOLock * lock, IOLockAssertState type) { - LCK_MTX_ASSERT(lock, type); + LCK_MTX_ASSERT(lock, type); } void IORWLockAssert(IORWLock * lock, IORWLockAssertState type) { - LCK_RW_ASSERT(lock, type); + LCK_RW_ASSERT(lock, type); } void IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type) { - LCK_SPIN_ASSERT(l, type); + LCK_SPIN_ASSERT(l, type); } #endif /* !IOLOCKS_INLINE */ - } /* extern "C" */ - - diff --git a/iokit/Kernel/IOMapper.cpp b/iokit/Kernel/IOMapper.cpp index 7f944e831..cbf31f3d4 100644 --- a/iokit/Kernel/IOMapper.cpp +++ b/iokit/Kernel/IOMapper.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -60,176 +60,218 @@ OSMetaClassDefineReservedUnused(IOMapper, 15); IOMapper * IOMapper::gSystem = (IOMapper *) IOMapper::kUnknown; class IOMapperLock { - IOLock *fWaitLock; + IOLock *fWaitLock; public: - IOMapperLock() { fWaitLock = IOLockAlloc(); } - ~IOMapperLock() { IOLockFree(fWaitLock); } - - void lock() { IOLockLock(fWaitLock); } - void unlock() { IOLockUnlock(fWaitLock); } - void sleep(void *event) { IOLockSleep(fWaitLock, event, THREAD_UNINT); } - void wakeup(void *event) { IOLockWakeup(fWaitLock, event, false); } + IOMapperLock() + { + fWaitLock = IOLockAlloc(); + } + ~IOMapperLock() + { + IOLockFree(fWaitLock); + } + + void + lock() + { + IOLockLock(fWaitLock); + } + void + unlock() + { + IOLockUnlock(fWaitLock); + } + void + sleep(void *event) + { + IOLockSleep(fWaitLock, event, THREAD_UNINT); + } + void + wakeup(void *event) + { + IOLockWakeup(fWaitLock, event, false); + } }; static IOMapperLock sMapperLock; -bool IOMapper::start(IOService *provider) +bool +IOMapper::start(IOService *provider) { - OSObject * obj; - if (!super::start(provider)) - return false; - - if (!initHardware(provider)) - return false; - - fPageSize = getPageSize(); - - if (fIsSystem) { - sMapperLock.lock(); - IOMapper::gSystem = this; - sMapperLock.wakeup(&IOMapper::gSystem); - sMapperLock.unlock(); - } - - if (provider) - { - obj = provider->getProperty("iommu-id"); - if (!obj) - obj = provider->getProperty("AAPL,phandle"); - if (obj) - setProperty(gIOMapperIDKey, obj); - } - return true; + OSObject * obj; + if (!super::start(provider)) { + return false; + } + + if (!initHardware(provider)) { + return false; + } + + fPageSize = getPageSize(); + + if (fIsSystem) { + sMapperLock.lock(); + IOMapper::gSystem = this; + sMapperLock.wakeup(&IOMapper::gSystem); + sMapperLock.unlock(); + } + + if (provider) { + obj = provider->getProperty("iommu-id"); + if (!obj) { + obj = provider->getProperty("AAPL,phandle"); + } + if (obj) { + setProperty(gIOMapperIDKey, obj); + } + } + return true; } -void IOMapper::free() +void +IOMapper::free() { - super::free(); + super::free(); } -void IOMapper::setMapperRequired(bool hasMapper) +void +IOMapper::setMapperRequired(bool hasMapper) { - if (hasMapper) - IOMapper::gSystem = (IOMapper *) kHasMapper; - else { - sMapperLock.lock(); - IOMapper::gSystem = (IOMapper *) kNoMapper; - sMapperLock.unlock(); - sMapperLock.wakeup(&IOMapper::gSystem); - } + if (hasMapper) { + IOMapper::gSystem = (IOMapper *) kHasMapper; + } else { + sMapperLock.lock(); + IOMapper::gSystem = (IOMapper *) kNoMapper; + sMapperLock.unlock(); + sMapperLock.wakeup(&IOMapper::gSystem); + } } -void IOMapper::waitForSystemMapper() +void +IOMapper::waitForSystemMapper() { - sMapperLock.lock(); - while ((uintptr_t) IOMapper::gSystem & kWaitMask) - { + sMapperLock.lock(); + while ((uintptr_t) IOMapper::gSystem & kWaitMask) { OSReportWithBacktrace("waitForSystemMapper"); - sMapperLock.sleep(&IOMapper::gSystem); - } - sMapperLock.unlock(); + sMapperLock.sleep(&IOMapper::gSystem); + } + sMapperLock.unlock(); } -IOMapper * IOMapper::copyMapperForDevice(IOService * device) +IOMapper * +IOMapper::copyMapperForDevice(IOService * device) { - return copyMapperForDeviceWithIndex(device, 0); + return copyMapperForDeviceWithIndex(device, 0); } -IOMapper * IOMapper::copyMapperForDeviceWithIndex(IOService * device, unsigned int index) +IOMapper * +IOMapper::copyMapperForDeviceWithIndex(IOService * device, unsigned int index) { - OSData *data; - OSObject * obj; - IOMapper * mapper = NULL; - OSDictionary * matching; - - obj = device->copyProperty("iommu-parent"); - if (!obj) return (NULL); - - if ((mapper = OSDynamicCast(IOMapper, obj))) goto found; - - if ((data = OSDynamicCast(OSData, obj))) - { - if (index >= data->getLength() / sizeof(UInt32)) goto done; - - data = OSData::withBytesNoCopy((UInt32 *)data->getBytesNoCopy() + index, sizeof(UInt32)); - if (!data) goto done; - - matching = IOService::propertyMatching(gIOMapperIDKey, data); - data->release(); - } - else - matching = IOService::propertyMatching(gIOMapperIDKey, obj); - - if (matching) - { - mapper = OSDynamicCast(IOMapper, IOService::waitForMatchingService(matching)); - matching->release(); - } + OSData *data; + OSObject * obj; + IOMapper * mapper = NULL; + OSDictionary * matching; + + obj = device->copyProperty("iommu-parent"); + if (!obj) { + return NULL; + } + + if ((mapper = OSDynamicCast(IOMapper, obj))) { + goto found; + } + + if ((data = OSDynamicCast(OSData, obj))) { + if (index >= data->getLength() / sizeof(UInt32)) { + goto done; + } + + data = OSData::withBytesNoCopy((UInt32 *)data->getBytesNoCopy() + index, sizeof(UInt32)); + if (!data) { + goto done; + } + + matching = IOService::propertyMatching(gIOMapperIDKey, data); + data->release(); + } else { + matching = IOService::propertyMatching(gIOMapperIDKey, obj); + } + + if (matching) { + mapper = OSDynamicCast(IOMapper, IOService::waitForMatchingService(matching)); + matching->release(); + } done: - if (obj) obj->release(); + if (obj) { + obj->release(); + } found: - if (mapper) - { - if (!mapper->fAllocName) - { - char name[MACH_ZONE_NAME_MAX_LEN]; - char kmodname[KMOD_MAX_NAME]; - vm_tag_t tag; - uint32_t kmodid; - - tag = IOMemoryTag(kernel_map); - if (!(kmodid = vm_tag_get_kext(tag, &kmodname[0], KMOD_MAX_NAME))) - { - snprintf(kmodname, sizeof(kmodname), "%d", tag); - } - snprintf(name, sizeof(name), "%s.DMA.%s", kmodname, device->getName()); - mapper->fAllocName = kern_allocation_name_allocate(name, 16); - } - } - - return (mapper); + if (mapper) { + if (!mapper->fAllocName) { + char name[MACH_ZONE_NAME_MAX_LEN]; + char kmodname[KMOD_MAX_NAME]; + vm_tag_t tag; + uint32_t kmodid; + + tag = IOMemoryTag(kernel_map); + if (!(kmodid = vm_tag_get_kext(tag, &kmodname[0], KMOD_MAX_NAME))) { + snprintf(kmodname, sizeof(kmodname), "%d", tag); + } + snprintf(name, sizeof(name), "%s.DMA.%s", kmodname, device->getName()); + mapper->fAllocName = kern_allocation_name_allocate(name, 16); + } + } + + return mapper; } __BEGIN_DECLS // These are C accessors to the system mapper for non-IOKit clients -ppnum_t IOMapperIOVMAlloc(unsigned pages) +ppnum_t +IOMapperIOVMAlloc(unsigned pages) { - IOReturn ret; - uint64_t dmaAddress, dmaLength; - - IOMapper::checkForSystemMapper(); - - ret = kIOReturnUnsupported; - if (IOMapper::gSystem) - { - ret = IOMapper::gSystem->iovmMapMemory( - NULL, 0, ptoa_64(pages), - (kIODMAMapReadAccess | kIODMAMapWriteAccess), - NULL, NULL, NULL, - &dmaAddress, &dmaLength); - } - - if (kIOReturnSuccess == ret) return (atop_64(dmaAddress)); - return (0); + IOReturn ret; + uint64_t dmaAddress, dmaLength; + + IOMapper::checkForSystemMapper(); + + ret = kIOReturnUnsupported; + if (IOMapper::gSystem) { + ret = IOMapper::gSystem->iovmMapMemory( + NULL, 0, ptoa_64(pages), + (kIODMAMapReadAccess | kIODMAMapWriteAccess), + NULL, NULL, NULL, + &dmaAddress, &dmaLength); + } + + if (kIOReturnSuccess == ret) { + return atop_64(dmaAddress); + } + return 0; } -void IOMapperIOVMFree(ppnum_t addr, unsigned pages) +void +IOMapperIOVMFree(ppnum_t addr, unsigned pages) { - if (IOMapper::gSystem) - { - IOMapper::gSystem->iovmUnmapMemory(NULL, NULL, ptoa_64(addr), ptoa_64(pages)); - } + if (IOMapper::gSystem) { + IOMapper::gSystem->iovmUnmapMemory(NULL, NULL, ptoa_64(addr), ptoa_64(pages)); + } } -ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page) +ppnum_t +IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page) { - if (!IOMapper::gSystem) return (page); - if (!addr) panic("!addr"); - IOMapper::gSystem->iovmInsert((kIODMAMapReadAccess | kIODMAMapWriteAccess), - ptoa_64(addr), ptoa_64(offset), ptoa_64(page), ptoa_64(1)); - return (addr + offset); + if (!IOMapper::gSystem) { + return page; + } + if (!addr) { + panic("!addr"); + } + IOMapper::gSystem->iovmInsert((kIODMAMapReadAccess | kIODMAMapWriteAccess), + ptoa_64(addr), ptoa_64(offset), ptoa_64(page), ptoa_64(1)); + return addr + offset; } ///////////////////////////////////////////////////////////////////////////// @@ -242,100 +284,108 @@ ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page) #include -UInt8 IOMappedRead8(IOPhysicalAddress address) +UInt8 +IOMappedRead8(IOPhysicalAddress address) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - return (UInt8) ml_phys_read_byte_64(addr); - } - else - return (UInt8) ml_phys_read_byte((vm_offset_t) address); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + return (UInt8) ml_phys_read_byte_64(addr); + } else { + return (UInt8) ml_phys_read_byte((vm_offset_t) address); + } } -UInt16 IOMappedRead16(IOPhysicalAddress address) +UInt16 +IOMappedRead16(IOPhysicalAddress address) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - return (UInt16) ml_phys_read_half_64(addr); - } - else - return (UInt16) ml_phys_read_half((vm_offset_t) address); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + return (UInt16) ml_phys_read_half_64(addr); + } else { + return (UInt16) ml_phys_read_half((vm_offset_t) address); + } } -UInt32 IOMappedRead32(IOPhysicalAddress address) +UInt32 +IOMappedRead32(IOPhysicalAddress address) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - return (UInt32) ml_phys_read_word_64(addr); - } - else - return (UInt32) ml_phys_read_word((vm_offset_t) address); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + return (UInt32) ml_phys_read_word_64(addr); + } else { + return (UInt32) ml_phys_read_word((vm_offset_t) address); + } } -UInt64 IOMappedRead64(IOPhysicalAddress address) +UInt64 +IOMappedRead64(IOPhysicalAddress address) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - return (UInt64) ml_phys_read_double_64(addr); - } - else - return (UInt64) ml_phys_read_double((vm_offset_t) address); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + return (UInt64) ml_phys_read_double_64(addr); + } else { + return (UInt64) ml_phys_read_double((vm_offset_t) address); + } } -void IOMappedWrite8(IOPhysicalAddress address, UInt8 value) +void +IOMappedWrite8(IOPhysicalAddress address, UInt8 value) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - ml_phys_write_byte_64(addr, value); - } - else - ml_phys_write_byte((vm_offset_t) address, value); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + ml_phys_write_byte_64(addr, value); + } else { + ml_phys_write_byte((vm_offset_t) address, value); + } } -void IOMappedWrite16(IOPhysicalAddress address, UInt16 value) +void +IOMappedWrite16(IOPhysicalAddress address, UInt16 value) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - ml_phys_write_half_64(addr, value); - } - else - ml_phys_write_half((vm_offset_t) address, value); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + ml_phys_write_half_64(addr, value); + } else { + ml_phys_write_half((vm_offset_t) address, value); + } } -void IOMappedWrite32(IOPhysicalAddress address, UInt32 value) +void +IOMappedWrite32(IOPhysicalAddress address, UInt32 value) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - ml_phys_write_word_64(addr, value); - } - else - ml_phys_write_word((vm_offset_t) address, value); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + ml_phys_write_word_64(addr, value); + } else { + ml_phys_write_word((vm_offset_t) address, value); + } } -void IOMappedWrite64(IOPhysicalAddress address, UInt64 value) +void +IOMappedWrite64(IOPhysicalAddress address, UInt64 value) { - IOMapper::checkForSystemMapper(); - - if (IOMapper::gSystem) { - addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); - ml_phys_write_double_64(addr, value); - } - else - ml_phys_write_double((vm_offset_t) address, value); + IOMapper::checkForSystemMapper(); + + if (IOMapper::gSystem) { + addr64_t addr = IOMapper::gSystem->mapToPhysicalAddress(address); + ml_phys_write_double_64(addr, value); + } else { + ml_phys_write_double((vm_offset_t) address, value); + } } __END_DECLS diff --git a/iokit/Kernel/IOMemoryCursor.cpp b/iokit/Kernel/IOMemoryCursor.cpp index 3314a68a1..1e4e94840 100644 --- a/iokit/Kernel/IOMemoryCursor.cpp +++ b/iokit/Kernel/IOMemoryCursor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOMemoryCursor.cpp created by wgulland on 1999-3-02 */ @@ -41,151 +41,153 @@ OSDefineMetaClassAndStructors(IOMemoryCursor, OSObject) IOMemoryCursor * IOMemoryCursor::withSpecification(SegmentFunction inSegFunc, - IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - IOMemoryCursor * me = new IOMemoryCursor; - - if (me && !me->initWithSpecification(inSegFunc, - inMaxSegmentSize, - inMaxTransferSize, - inAlignment)) - { - me->release(); - return 0; - } - - return me; + IOMemoryCursor * me = new IOMemoryCursor; + + if (me && !me->initWithSpecification(inSegFunc, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) { + me->release(); + return 0; + } + + return me; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOMemoryCursor::initWithSpecification(SegmentFunction inSegFunc, - IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxSegmentSize, + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { // @@@ gvdl: Remove me #if 1 -static UInt sMaxDBDMASegment; -if (!sMaxDBDMASegment) { - sMaxDBDMASegment = (UInt) -1; - if (PE_parse_boot_argn("mseg", &sMaxDBDMASegment, sizeof (sMaxDBDMASegment))) - IOLog("Setting MaxDBDMASegment to %d\n", sMaxDBDMASegment); -} - -if (inMaxSegmentSize > sMaxDBDMASegment) inMaxSegmentSize = sMaxDBDMASegment; + static UInt sMaxDBDMASegment; + if (!sMaxDBDMASegment) { + sMaxDBDMASegment = (UInt) - 1; + if (PE_parse_boot_argn("mseg", &sMaxDBDMASegment, sizeof(sMaxDBDMASegment))) { + IOLog("Setting MaxDBDMASegment to %d\n", sMaxDBDMASegment); + } + } + + if (inMaxSegmentSize > sMaxDBDMASegment) { + inMaxSegmentSize = sMaxDBDMASegment; + } #endif - if (!super::init()) - return false; - - if (!inSegFunc) - return false; - - outSeg = inSegFunc; - maxSegmentSize = inMaxSegmentSize; - if (inMaxTransferSize) - maxTransferSize = inMaxTransferSize; - else - maxTransferSize = (IOPhysicalLength) -1; - alignMask = inAlignment - 1; - assert(alignMask == 0); // No alignment code yet! - - return true; + if (!super::init()) { + return false; + } + + if (!inSegFunc) { + return false; + } + + outSeg = inSegFunc; + maxSegmentSize = inMaxSegmentSize; + if (inMaxTransferSize) { + maxTransferSize = inMaxTransferSize; + } else { + maxTransferSize = (IOPhysicalLength) - 1; + } + alignMask = inAlignment - 1; + assert(alignMask == 0); // No alignment code yet! + + return true; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -UInt32 +UInt32 IOMemoryCursor::genPhysicalSegments(IOMemoryDescriptor *inDescriptor, - IOByteCount fromPosition, - void * inSegments, - UInt32 inMaxSegments, - UInt32 inMaxTransferSize, - IOByteCount *outTransferSize) + IOByteCount fromPosition, + void * inSegments, + UInt32 inMaxSegments, + UInt32 inMaxTransferSize, + IOByteCount *outTransferSize) { - if (!inDescriptor) - return 0; - - if (!inMaxSegments) - return 0; - - if (!inMaxTransferSize) - inMaxTransferSize = maxTransferSize; - - /* - * Iterate over the packet, translating segments where allowed - * - * If we finished cleanly return number of segments found - * and update the position in the descriptor. - */ - PhysicalSegment curSeg = { 0, 0 }; - UInt curSegIndex = 0; - UInt curTransferSize = 0; - IOByteCount inDescriptorLength = inDescriptor->getLength(); - PhysicalSegment seg = { 0, 0 }; - - while ((seg.location) || (fromPosition < inDescriptorLength)) - { - if (!seg.location) - { - seg.location = inDescriptor->getPhysicalSegment( - fromPosition, (IOByteCount*)&seg.length); - assert(seg.location); - assert(seg.length); - fromPosition += seg.length; - } - - if (!curSeg.location) - { - curTransferSize += seg.length; - curSeg = seg; - seg.location = 0; - } - else if ((curSeg.location + curSeg.length == seg.location)) - { - curTransferSize += seg.length; - curSeg.length += seg.length; - seg.location = 0; - } - - if (!seg.location) - { - if ((curSeg.length > maxSegmentSize)) - { - seg.location = curSeg.location + maxSegmentSize; - seg.length = curSeg.length - maxSegmentSize; - curTransferSize -= seg.length; - curSeg.length -= seg.length; - } - - if ((curTransferSize >= inMaxTransferSize)) - { - curSeg.length -= curTransferSize - inMaxTransferSize; - curTransferSize = inMaxTransferSize; - break; - } - } - - if (seg.location) - { - if ((curSegIndex + 1 == inMaxSegments)) - break; - (*outSeg)(curSeg, inSegments, curSegIndex++); - curSeg.location = 0; - } - } - - if (curSeg.location) - (*outSeg)(curSeg, inSegments, curSegIndex++); - - if (outTransferSize) - *outTransferSize = curTransferSize; - - return curSegIndex; + if (!inDescriptor) { + return 0; + } + + if (!inMaxSegments) { + return 0; + } + + if (!inMaxTransferSize) { + inMaxTransferSize = maxTransferSize; + } + + /* + * Iterate over the packet, translating segments where allowed + * + * If we finished cleanly return number of segments found + * and update the position in the descriptor. + */ + PhysicalSegment curSeg = { 0, 0 }; + UInt curSegIndex = 0; + UInt curTransferSize = 0; + IOByteCount inDescriptorLength = inDescriptor->getLength(); + PhysicalSegment seg = { 0, 0 }; + + while ((seg.location) || (fromPosition < inDescriptorLength)) { + if (!seg.location) { + seg.location = inDescriptor->getPhysicalSegment( + fromPosition, (IOByteCount*)&seg.length); + assert(seg.location); + assert(seg.length); + fromPosition += seg.length; + } + + if (!curSeg.location) { + curTransferSize += seg.length; + curSeg = seg; + seg.location = 0; + } else if ((curSeg.location + curSeg.length == seg.location)) { + curTransferSize += seg.length; + curSeg.length += seg.length; + seg.location = 0; + } + + if (!seg.location) { + if ((curSeg.length > maxSegmentSize)) { + seg.location = curSeg.location + maxSegmentSize; + seg.length = curSeg.length - maxSegmentSize; + curTransferSize -= seg.length; + curSeg.length -= seg.length; + } + + if ((curTransferSize >= inMaxTransferSize)) { + curSeg.length -= curTransferSize - inMaxTransferSize; + curTransferSize = inMaxTransferSize; + break; + } + } + + if (seg.location) { + if ((curSegIndex + 1 == inMaxSegments)) { + break; + } + (*outSeg)(curSeg, inSegments, curSegIndex++); + curSeg.location = 0; + } + } + + if (curSeg.location) { + (*outSeg)(curSeg, inSegments, curSegIndex++); + } + + if (outTransferSize) { + *outTransferSize = curTransferSize; + } + + return curSegIndex; } /************************ class IONaturalMemoryCursor ************************/ @@ -194,40 +196,40 @@ IOMemoryCursor::genPhysicalSegments(IOMemoryDescriptor *inDescriptor, #define super IOMemoryCursor OSDefineMetaClassAndStructors(IONaturalMemoryCursor, IOMemoryCursor) -void IONaturalMemoryCursor::outputSegment(PhysicalSegment segment, - void * outSegments, - UInt32 outSegmentIndex) +void +IONaturalMemoryCursor::outputSegment(PhysicalSegment segment, + void * outSegments, + UInt32 outSegmentIndex) { - ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment; + ((PhysicalSegment *) outSegments)[outSegmentIndex] = segment; } -IONaturalMemoryCursor * +IONaturalMemoryCursor * IONaturalMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - IONaturalMemoryCursor *me = new IONaturalMemoryCursor; + IONaturalMemoryCursor *me = new IONaturalMemoryCursor; - if (me && !me->initWithSpecification(inMaxSegmentSize, - inMaxTransferSize, - inAlignment)) - { - me->release(); - return 0; - } + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) { + me->release(); + return 0; + } - return me; + return me; } -bool +bool IONaturalMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - return super::initWithSpecification(&IONaturalMemoryCursor::outputSegment, - inMaxSegmentSize, - inMaxTransferSize, - inAlignment); + return super::initWithSpecification(&IONaturalMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); } /************************** class IOBigMemoryCursor **************************/ @@ -236,50 +238,49 @@ IONaturalMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, #define super IOMemoryCursor OSDefineMetaClassAndStructors(IOBigMemoryCursor, IOMemoryCursor) -void +void IOBigMemoryCursor::outputSegment(PhysicalSegment inSegment, - void * inSegments, - UInt32 inSegmentIndex) + void * inSegments, + UInt32 inSegmentIndex) { - IOPhysicalAddress * segment; + IOPhysicalAddress * segment; - segment = &((PhysicalSegment *) inSegments)[inSegmentIndex].location; + segment = &((PhysicalSegment *) inSegments)[inSegmentIndex].location; #if IOPhysSize == 64 - OSWriteBigInt64(segment, 0, inSegment.location); - OSWriteBigInt64(segment, sizeof(IOPhysicalAddress), inSegment.length); + OSWriteBigInt64(segment, 0, inSegment.location); + OSWriteBigInt64(segment, sizeof(IOPhysicalAddress), inSegment.length); #else - OSWriteBigInt(segment, 0, inSegment.location); - OSWriteBigInt(segment, sizeof(IOPhysicalAddress), inSegment.length); + OSWriteBigInt(segment, 0, inSegment.location); + OSWriteBigInt(segment, sizeof(IOPhysicalAddress), inSegment.length); #endif } IOBigMemoryCursor * IOBigMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - IOBigMemoryCursor * me = new IOBigMemoryCursor; + IOBigMemoryCursor * me = new IOBigMemoryCursor; - if (me && !me->initWithSpecification(inMaxSegmentSize, - inMaxTransferSize, - inAlignment)) - { - me->release(); - return 0; - } + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) { + me->release(); + return 0; + } - return me; + return me; } -bool +bool IOBigMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - return super::initWithSpecification(&IOBigMemoryCursor::outputSegment, - inMaxSegmentSize, - inMaxTransferSize, - inAlignment); + return super::initWithSpecification(&IOBigMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); } /************************* class IOLittleMemoryCursor ************************/ @@ -288,50 +289,49 @@ IOBigMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, #define super IOMemoryCursor OSDefineMetaClassAndStructors(IOLittleMemoryCursor, IOMemoryCursor) -void +void IOLittleMemoryCursor::outputSegment(PhysicalSegment inSegment, - void * inSegments, - UInt32 inSegmentIndex) + void * inSegments, + UInt32 inSegmentIndex) { - IOPhysicalAddress * segment; + IOPhysicalAddress * segment; - segment = &((PhysicalSegment *) inSegments)[inSegmentIndex].location; + segment = &((PhysicalSegment *) inSegments)[inSegmentIndex].location; #if IOPhysSize == 64 - OSWriteLittleInt64(segment, 0, inSegment.location); - OSWriteLittleInt64(segment, sizeof(IOPhysicalAddress), inSegment.length); + OSWriteLittleInt64(segment, 0, inSegment.location); + OSWriteLittleInt64(segment, sizeof(IOPhysicalAddress), inSegment.length); #else - OSWriteLittleInt(segment, 0, inSegment.location); - OSWriteLittleInt(segment, sizeof(IOPhysicalAddress), inSegment.length); + OSWriteLittleInt(segment, 0, inSegment.location); + OSWriteLittleInt(segment, sizeof(IOPhysicalAddress), inSegment.length); #endif } IOLittleMemoryCursor * IOLittleMemoryCursor::withSpecification(IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - IOLittleMemoryCursor * me = new IOLittleMemoryCursor; + IOLittleMemoryCursor * me = new IOLittleMemoryCursor; - if (me && !me->initWithSpecification(inMaxSegmentSize, - inMaxTransferSize, - inAlignment)) - { - me->release(); - return 0; - } + if (me && !me->initWithSpecification(inMaxSegmentSize, + inMaxTransferSize, + inAlignment)) { + me->release(); + return 0; + } - return me; + return me; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool +bool IOLittleMemoryCursor::initWithSpecification(IOPhysicalLength inMaxSegmentSize, - IOPhysicalLength inMaxTransferSize, - IOPhysicalLength inAlignment) + IOPhysicalLength inMaxTransferSize, + IOPhysicalLength inAlignment) { - return super::initWithSpecification(&IOLittleMemoryCursor::outputSegment, - inMaxSegmentSize, - inMaxTransferSize, - inAlignment); + return super::initWithSpecification(&IOLittleMemoryCursor::outputSegment, + inMaxSegmentSize, + inMaxTransferSize, + inAlignment); } diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index 3c1c4674b..3ff1f79ca 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -71,11 +71,11 @@ extern void ipc_port_release_send(ipc_port_t port); __END_DECLS -#define kIOMapperWaitSystem ((IOMapper *) 1) +#define kIOMapperWaitSystem ((IOMapper *) 1) static IOMapper * gIOSystemMapper = NULL; -ppnum_t gIOLastPage; +ppnum_t gIOLastPage; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -89,16 +89,16 @@ OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) static IORecursiveLock * gIOMemoryLock; -#define LOCK IORecursiveLockLock( gIOMemoryLock) -#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) -#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) -#define WAKEUP \ +#define LOCK IORecursiveLockLock( gIOMemoryLock) +#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) +#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) +#define WAKEUP \ IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) #if 0 -#define DEBG(fmt, args...) { kprintf(fmt, ## args); } +#define DEBG(fmt, args...) { kprintf(fmt, ## args); } #else -#define DEBG(fmt, args...) {} +#define DEBG(fmt, args...) {} #endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -107,60 +107,58 @@ static IORecursiveLock * gIOMemoryLock; // Function enum ioPLBlockFlags { - kIOPLOnDevice = 0x00000001, - kIOPLExternUPL = 0x00000002, + kIOPLOnDevice = 0x00000001, + kIOPLExternUPL = 0x00000002, }; -struct IOMDPersistentInitData -{ - const IOGeneralMemoryDescriptor * fMD; - IOMemoryReference * fMemRef; +struct IOMDPersistentInitData { + const IOGeneralMemoryDescriptor * fMD; + IOMemoryReference * fMemRef; }; struct ioPLBlock { - upl_t fIOPL; - vm_address_t fPageInfo; // Pointer to page list or index into it - uint32_t fIOMDOffset; // The offset of this iopl in descriptor - ppnum_t fMappedPage; // Page number of first page in this iopl - unsigned int fPageOffset; // Offset within first page of iopl - unsigned int fFlags; // Flags + upl_t fIOPL; + vm_address_t fPageInfo; // Pointer to page list or index into it + uint32_t fIOMDOffset; // The offset of this iopl in descriptor + ppnum_t fMappedPage; // Page number of first page in this iopl + unsigned int fPageOffset; // Offset within first page of iopl + unsigned int fFlags; // Flags }; enum { kMaxWireTags = 6 }; -struct ioGMDData -{ - IOMapper * fMapper; - uint64_t fDMAMapAlignment; - uint64_t fMappedBase; - uint64_t fMappedLength; - uint64_t fPreparationID; +struct ioGMDData { + IOMapper * fMapper; + uint64_t fDMAMapAlignment; + uint64_t fMappedBase; + uint64_t fMappedLength; + uint64_t fPreparationID; #if IOTRACKING - IOTracking fWireTracking; + IOTracking fWireTracking; #endif /* IOTRACKING */ - unsigned int fPageCnt; - uint8_t fDMAMapNumAddressBits; - unsigned char fDiscontig:1; - unsigned char fCompletionError:1; - unsigned char fMappedBaseValid:1; - unsigned char _resv:3; - unsigned char fDMAAccess:2; - - /* variable length arrays */ - upl_page_info_t fPageList[1] + unsigned int fPageCnt; + uint8_t fDMAMapNumAddressBits; + unsigned char fDiscontig:1; + unsigned char fCompletionError:1; + unsigned char fMappedBaseValid:1; + unsigned char _resv:3; + unsigned char fDMAAccess:2; + + /* variable length arrays */ + upl_page_info_t fPageList[1] #if __LP64__ - // align fPageList as for ioPLBlock - __attribute__((aligned(sizeof(upl_t)))) + // align fPageList as for ioPLBlock + __attribute__((aligned(sizeof(upl_t)))) #endif - ; - ioPLBlock fBlocks[1]; + ; + ioPLBlock fBlocks[1]; }; -#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) -#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) -#define getNumIOPL(osd, d) \ +#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) +#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt])) +#define getNumIOPL(osd, d) \ (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) -#define getPageList(d) (&(d->fPageList[0])) +#define getPageList(d) (&(d->fPageList[0])) #define computeDataSize(p, u) \ (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) @@ -171,43 +169,43 @@ enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote }; #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) extern "C" { - -kern_return_t device_data_action( - uintptr_t device_handle, - ipc_port_t device_pager, - vm_prot_t protection, - vm_object_offset_t offset, - vm_size_t size) +kern_return_t +device_data_action( + uintptr_t device_handle, + ipc_port_t device_pager, + vm_prot_t protection, + vm_object_offset_t offset, + vm_size_t size) { - kern_return_t kr; - IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; - IOMemoryDescriptor * memDesc; - - LOCK; - memDesc = ref->dp.memory; - if( memDesc) - { - memDesc->retain(); - kr = memDesc->handleFault(device_pager, offset, size); - memDesc->release(); - } - else - kr = KERN_ABORTED; - UNLOCK; - - return( kr ); + kern_return_t kr; + IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; + IOMemoryDescriptor * memDesc; + + LOCK; + memDesc = ref->dp.memory; + if (memDesc) { + memDesc->retain(); + kr = memDesc->handleFault(device_pager, offset, size); + memDesc->release(); + } else { + kr = KERN_ABORTED; + } + UNLOCK; + + return kr; } -kern_return_t device_close( - uintptr_t device_handle) +kern_return_t +device_close( + uintptr_t device_handle) { - IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; + IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle; - IODelete( ref, IOMemoryDescriptorReserved, 1 ); + IODelete( ref, IOMemoryDescriptorReserved, 1 ); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } -}; // end extern "C" +}; // end extern "C" /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -216,198 +214,191 @@ kern_return_t device_close( // checked for as a NULL reference is illegal. static inline void getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables - UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) + UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) { - assert(kIOMemoryTypeUIO == type - || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type - || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); - if (kIOMemoryTypeUIO == type) { - user_size_t us; - user_addr_t ad; - uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us; - } + assert(kIOMemoryTypeUIO == type + || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type + || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); + if (kIOMemoryTypeUIO == type) { + user_size_t us; + user_addr_t ad; + uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us; + } #ifndef __LP64__ - else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { - IOAddressRange cur = r.v64[ind]; - addr = cur.address; - len = cur.length; - } + else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { + IOAddressRange cur = r.v64[ind]; + addr = cur.address; + len = cur.length; + } #endif /* !__LP64__ */ - else { - IOVirtualRange cur = r.v[ind]; - addr = cur.address; - len = cur.length; - } + else { + IOVirtualRange cur = r.v[ind]; + addr = cur.address; + len = cur.length; + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static IOReturn +static IOReturn purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) { - IOReturn err = kIOReturnSuccess; + IOReturn err = kIOReturnSuccess; - *control = VM_PURGABLE_SET_STATE; + *control = VM_PURGABLE_SET_STATE; - enum { kIOMemoryPurgeableControlMask = 15 }; + enum { kIOMemoryPurgeableControlMask = 15 }; - switch (kIOMemoryPurgeableControlMask & newState) - { + switch (kIOMemoryPurgeableControlMask & newState) { case kIOMemoryPurgeableKeepCurrent: - *control = VM_PURGABLE_GET_STATE; - break; + *control = VM_PURGABLE_GET_STATE; + break; case kIOMemoryPurgeableNonVolatile: - *state = VM_PURGABLE_NONVOLATILE; - break; + *state = VM_PURGABLE_NONVOLATILE; + break; case kIOMemoryPurgeableVolatile: - *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); - break; + *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask); + break; case kIOMemoryPurgeableEmpty: - *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask); - break; + *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask); + break; default: - err = kIOReturnBadArgument; - break; - } - - if (*control == VM_PURGABLE_SET_STATE) { - // let VM know this call is from the kernel and is allowed to alter - // the volatility of the memory entry even if it was created with - // MAP_MEM_PURGABLE_KERNEL_ONLY - *control = VM_PURGABLE_SET_STATE_FROM_KERNEL; - } - - return (err); + err = kIOReturnBadArgument; + break; + } + + if (*control == VM_PURGABLE_SET_STATE) { + // let VM know this call is from the kernel and is allowed to alter + // the volatility of the memory entry even if it was created with + // MAP_MEM_PURGABLE_KERNEL_ONLY + *control = VM_PURGABLE_SET_STATE_FROM_KERNEL; + } + + return err; } -static IOReturn +static IOReturn purgeableStateBits(int * state) { - IOReturn err = kIOReturnSuccess; + IOReturn err = kIOReturnSuccess; - switch (VM_PURGABLE_STATE_MASK & *state) - { + switch (VM_PURGABLE_STATE_MASK & *state) { case VM_PURGABLE_NONVOLATILE: - *state = kIOMemoryPurgeableNonVolatile; - break; + *state = kIOMemoryPurgeableNonVolatile; + break; case VM_PURGABLE_VOLATILE: - *state = kIOMemoryPurgeableVolatile; - break; + *state = kIOMemoryPurgeableVolatile; + break; case VM_PURGABLE_EMPTY: - *state = kIOMemoryPurgeableEmpty; - break; + *state = kIOMemoryPurgeableEmpty; + break; default: - *state = kIOMemoryPurgeableNonVolatile; - err = kIOReturnNotReady; - break; - } - return (err); + *state = kIOMemoryPurgeableNonVolatile; + err = kIOReturnNotReady; + break; + } + return err; } -static vm_prot_t +static vm_prot_t vmProtForCacheMode(IOOptionBits cacheMode) { - vm_prot_t prot = 0; - switch (cacheMode) - { + vm_prot_t prot = 0; + switch (cacheMode) { case kIOInhibitCache: - SET_MAP_MEM(MAP_MEM_IO, prot); - break; + SET_MAP_MEM(MAP_MEM_IO, prot); + break; case kIOWriteThruCache: - SET_MAP_MEM(MAP_MEM_WTHRU, prot); - break; + SET_MAP_MEM(MAP_MEM_WTHRU, prot); + break; case kIOWriteCombineCache: - SET_MAP_MEM(MAP_MEM_WCOMB, prot); - break; + SET_MAP_MEM(MAP_MEM_WCOMB, prot); + break; case kIOCopybackCache: - SET_MAP_MEM(MAP_MEM_COPYBACK, prot); - break; + SET_MAP_MEM(MAP_MEM_COPYBACK, prot); + break; case kIOCopybackInnerCache: - SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); - break; + SET_MAP_MEM(MAP_MEM_INNERWBACK, prot); + break; case kIOPostedWrite: - SET_MAP_MEM(MAP_MEM_POSTED, prot); - break; + SET_MAP_MEM(MAP_MEM_POSTED, prot); + break; case kIODefaultCache: default: - SET_MAP_MEM(MAP_MEM_NOOP, prot); - break; - } + SET_MAP_MEM(MAP_MEM_NOOP, prot); + break; + } - return (prot); + return prot; } static unsigned int pagerFlagsForCacheMode(IOOptionBits cacheMode) { - unsigned int pagerFlags = 0; - switch (cacheMode) - { + unsigned int pagerFlags = 0; + switch (cacheMode) { case kIOInhibitCache: - pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; case kIOWriteThruCache: - pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; - break; + pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; + break; case kIOWriteCombineCache: - pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT; - break; + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT; + break; case kIOCopybackCache: - pagerFlags = DEVICE_PAGER_COHERENT; - break; + pagerFlags = DEVICE_PAGER_COHERENT; + break; case kIOCopybackInnerCache: - pagerFlags = DEVICE_PAGER_COHERENT; - break; + pagerFlags = DEVICE_PAGER_COHERENT; + break; case kIOPostedWrite: - pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK; - break; + pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK; + break; case kIODefaultCache: default: - pagerFlags = -1U; - break; - } - return (pagerFlags); + pagerFlags = -1U; + break; + } + return pagerFlags; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -struct IOMemoryEntry -{ - ipc_port_t entry; - int64_t offset; - uint64_t size; +struct IOMemoryEntry { + ipc_port_t entry; + int64_t offset; + uint64_t size; }; -struct IOMemoryReference -{ - volatile SInt32 refCount; - vm_prot_t prot; - uint32_t capacity; - uint32_t count; - struct IOMemoryReference * mapRef; - IOMemoryEntry entries[0]; +struct IOMemoryReference { + volatile SInt32 refCount; + vm_prot_t prot; + uint32_t capacity; + uint32_t count; + struct IOMemoryReference * mapRef; + IOMemoryEntry entries[0]; }; -enum -{ - kIOMemoryReferenceReuse = 0x00000001, - kIOMemoryReferenceWrite = 0x00000002, - kIOMemoryReferenceCOW = 0x00000004, +enum{ + kIOMemoryReferenceReuse = 0x00000001, + kIOMemoryReferenceWrite = 0x00000002, + kIOMemoryReferenceCOW = 0x00000004, }; SInt32 gIOMemoryReferenceCount; @@ -415,774 +406,806 @@ SInt32 gIOMemoryReferenceCount; IOMemoryReference * IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc) { - IOMemoryReference * ref; - size_t newSize, oldSize, copySize; - - newSize = (sizeof(IOMemoryReference) - - sizeof(ref->entries) - + capacity * sizeof(ref->entries[0])); - ref = (typeof(ref)) IOMalloc(newSize); - if (realloc) - { - oldSize = (sizeof(IOMemoryReference) - - sizeof(realloc->entries) - + realloc->capacity * sizeof(realloc->entries[0])); - copySize = oldSize; - if (copySize > newSize) copySize = newSize; - if (ref) bcopy(realloc, ref, copySize); - IOFree(realloc, oldSize); - } - else if (ref) - { - bzero(ref, sizeof(*ref)); - ref->refCount = 1; - OSIncrementAtomic(&gIOMemoryReferenceCount); - } - if (!ref) return (0); - ref->capacity = capacity; - return (ref); + IOMemoryReference * ref; + size_t newSize, oldSize, copySize; + + newSize = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + capacity * sizeof(ref->entries[0])); + ref = (typeof(ref))IOMalloc(newSize); + if (realloc) { + oldSize = (sizeof(IOMemoryReference) + - sizeof(realloc->entries) + + realloc->capacity * sizeof(realloc->entries[0])); + copySize = oldSize; + if (copySize > newSize) { + copySize = newSize; + } + if (ref) { + bcopy(realloc, ref, copySize); + } + IOFree(realloc, oldSize); + } else if (ref) { + bzero(ref, sizeof(*ref)); + ref->refCount = 1; + OSIncrementAtomic(&gIOMemoryReferenceCount); + } + if (!ref) { + return 0; + } + ref->capacity = capacity; + return ref; } -void +void IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) { - IOMemoryEntry * entries; - size_t size; - - if (ref->mapRef) - { - memoryReferenceFree(ref->mapRef); - ref->mapRef = 0; - } - - entries = ref->entries + ref->count; - while (entries > &ref->entries[0]) - { - entries--; - ipc_port_release_send(entries->entry); - } - size = (sizeof(IOMemoryReference) - - sizeof(ref->entries) - + ref->capacity * sizeof(ref->entries[0])); - IOFree(ref, size); - - OSDecrementAtomic(&gIOMemoryReferenceCount); + IOMemoryEntry * entries; + size_t size; + + if (ref->mapRef) { + memoryReferenceFree(ref->mapRef); + ref->mapRef = 0; + } + + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) { + entries--; + ipc_port_release_send(entries->entry); + } + size = (sizeof(IOMemoryReference) + - sizeof(ref->entries) + + ref->capacity * sizeof(ref->entries[0])); + IOFree(ref, size); + + OSDecrementAtomic(&gIOMemoryReferenceCount); } -void +void IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref) { - if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref); + if (1 == OSDecrementAtomic(&ref->refCount)) { + memoryReferenceFree(ref); + } } IOReturn IOGeneralMemoryDescriptor::memoryReferenceCreate( - IOOptionBits options, - IOMemoryReference ** reference) + IOOptionBits options, + IOMemoryReference ** reference) { - enum { kCapacity = 4, kCapacityInc = 4 }; - - kern_return_t err; - IOMemoryReference * ref; - IOMemoryEntry * entries; - IOMemoryEntry * cloneEntries; - vm_map_t map; - ipc_port_t entry, cloneEntry; - vm_prot_t prot; - memory_object_size_t actualSize; - uint32_t rangeIdx; - uint32_t count; - mach_vm_address_t entryAddr, endAddr, entrySize; - mach_vm_size_t srcAddr, srcLen; - mach_vm_size_t nextAddr, nextLen; - mach_vm_size_t offset, remain; - IOByteCount physLen; - IOOptionBits type = (_flags & kIOMemoryTypeMask); - IOOptionBits cacheMode; - unsigned int pagerFlags; - vm_tag_t tag; - - ref = memoryReferenceAlloc(kCapacity, NULL); - if (!ref) return (kIOReturnNoMemory); - - tag = getVMTag(kernel_map); - entries = &ref->entries[0]; - count = 0; - err = KERN_SUCCESS; - - offset = 0; - rangeIdx = 0; - if (_task) - { - getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); - } - else - { - nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); - nextLen = physLen; - - // default cache mode for physical - if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) - { - IOOptionBits mode; - pagerFlags = IODefaultCacheBits(nextAddr); - if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) - { - if (DEVICE_PAGER_EARLY_ACK & pagerFlags) - mode = kIOPostedWrite; - else if (DEVICE_PAGER_GUARDED & pagerFlags) - mode = kIOInhibitCache; - else - mode = kIOWriteCombineCache; - } - else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) - mode = kIOWriteThruCache; - else - mode = kIOCopybackCache; - _flags |= (mode << kIOMemoryBufferCacheShift); - } - } - - // cache mode & vm_prot - prot = VM_PROT_READ; - cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); - prot |= vmProtForCacheMode(cacheMode); - // VM system requires write access to change cache mode - if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; - if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; - if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; - if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY; - - if ((kIOMemoryReferenceReuse & options) && _memRef) - { - cloneEntries = &_memRef->entries[0]; - prot |= MAP_MEM_NAMED_REUSE; - } - - if (_task) - { - // virtual ranges - - if (kIOMemoryBufferPageable & _flags) - { - // IOBufferMemoryDescriptor alloc - set flags for entry + object create - prot |= MAP_MEM_NAMED_CREATE; - if (kIOMemoryBufferPurgeable & _flags) - { - prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY); - if (VM_KERN_MEMORY_SKYWALK == tag) - { - prot |= MAP_MEM_LEDGER_TAG_NETWORK; + enum { kCapacity = 4, kCapacityInc = 4 }; + + kern_return_t err; + IOMemoryReference * ref; + IOMemoryEntry * entries; + IOMemoryEntry * cloneEntries; + vm_map_t map; + ipc_port_t entry, cloneEntry; + vm_prot_t prot; + memory_object_size_t actualSize; + uint32_t rangeIdx; + uint32_t count; + mach_vm_address_t entryAddr, endAddr, entrySize; + mach_vm_size_t srcAddr, srcLen; + mach_vm_size_t nextAddr, nextLen; + mach_vm_size_t offset, remain; + IOByteCount physLen; + IOOptionBits type = (_flags & kIOMemoryTypeMask); + IOOptionBits cacheMode; + unsigned int pagerFlags; + vm_tag_t tag; + + ref = memoryReferenceAlloc(kCapacity, NULL); + if (!ref) { + return kIOReturnNoMemory; + } + + tag = getVMTag(kernel_map); + entries = &ref->entries[0]; + count = 0; + err = KERN_SUCCESS; + + offset = 0; + rangeIdx = 0; + if (_task) { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + } else { + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = physLen; + + // default cache mode for physical + if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) { + IOOptionBits mode; + pagerFlags = IODefaultCacheBits(nextAddr); + if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) { + if (DEVICE_PAGER_EARLY_ACK & pagerFlags) { + mode = kIOPostedWrite; + } else if (DEVICE_PAGER_GUARDED & pagerFlags) { + mode = kIOInhibitCache; + } else { + mode = kIOWriteCombineCache; + } + } else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) { + mode = kIOWriteThruCache; + } else { + mode = kIOCopybackCache; + } + _flags |= (mode << kIOMemoryBufferCacheShift); } - } - if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED; + } + + // cache mode & vm_prot + prot = VM_PROT_READ; + cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + prot |= vmProtForCacheMode(cacheMode); + // VM system requires write access to change cache mode + if (kIODefaultCache != cacheMode) { + prot |= VM_PROT_WRITE; + } + if (kIODirectionOut != (kIODirectionOutIn & _flags)) { + prot |= VM_PROT_WRITE; + } + if (kIOMemoryReferenceWrite & options) { + prot |= VM_PROT_WRITE; + } + if (kIOMemoryReferenceCOW & options) { + prot |= MAP_MEM_VM_COPY; + } - prot |= VM_PROT_WRITE; - map = NULL; + if ((kIOMemoryReferenceReuse & options) && _memRef) { + cloneEntries = &_memRef->entries[0]; + prot |= MAP_MEM_NAMED_REUSE; } - else map = get_task_map(_task); - remain = _length; - while (remain) - { - srcAddr = nextAddr; - srcLen = nextLen; - nextAddr = 0; - nextLen = 0; - // coalesce addr range - for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) - { - getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); - if ((srcAddr + srcLen) != nextAddr) break; - srcLen += nextLen; - } - entryAddr = trunc_page_64(srcAddr); - endAddr = round_page_64(srcAddr + srcLen); - do - { - entrySize = (endAddr - entryAddr); - if (!entrySize) break; - actualSize = entrySize; - - cloneEntry = MACH_PORT_NULL; - if (MAP_MEM_NAMED_REUSE & prot) - { - if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry; - else prot &= ~MAP_MEM_NAMED_REUSE; + if (_task) { + // virtual ranges + + if (kIOMemoryBufferPageable & _flags) { + // IOBufferMemoryDescriptor alloc - set flags for entry + object create + prot |= MAP_MEM_NAMED_CREATE; + if (kIOMemoryBufferPurgeable & _flags) { + prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY); + if (VM_KERN_MEMORY_SKYWALK == tag) { + prot |= MAP_MEM_LEDGER_TAG_NETWORK; + } + } + if (kIOMemoryUseReserve & _flags) { + prot |= MAP_MEM_GRAB_SECLUDED; + } + + prot |= VM_PROT_WRITE; + map = NULL; + } else { + map = get_task_map(_task); + } + + remain = _length; + while (remain) { + srcAddr = nextAddr; + srcLen = nextLen; + nextAddr = 0; + nextLen = 0; + // coalesce addr range + for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if ((srcAddr + srcLen) != nextAddr) { + break; + } + srcLen += nextLen; + } + entryAddr = trunc_page_64(srcAddr); + endAddr = round_page_64(srcAddr + srcLen); + do{ + entrySize = (endAddr - entryAddr); + if (!entrySize) { + break; + } + actualSize = entrySize; + + cloneEntry = MACH_PORT_NULL; + if (MAP_MEM_NAMED_REUSE & prot) { + if (cloneEntries < &_memRef->entries[_memRef->count]) { + cloneEntry = cloneEntries->entry; + } else { + prot &= ~MAP_MEM_NAMED_REUSE; + } + } + + err = mach_make_memory_entry_internal(map, + &actualSize, entryAddr, prot, &entry, cloneEntry); + + if (KERN_SUCCESS != err) { + break; + } + if (actualSize > entrySize) { + panic("mach_make_memory_entry_64 actualSize"); + } + + if (count >= ref->capacity) { + ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); + entries = &ref->entries[count]; + } + entries->entry = entry; + entries->size = actualSize; + entries->offset = offset + (entryAddr - srcAddr); + entryAddr += actualSize; + if (MAP_MEM_NAMED_REUSE & prot) { + if ((cloneEntries->entry == entries->entry) + && (cloneEntries->size == entries->size) + && (cloneEntries->offset == entries->offset)) { + cloneEntries++; + } else { + prot &= ~MAP_MEM_NAMED_REUSE; + } + } + entries++; + count++; + }while (true); + offset += srcLen; + remain -= srcLen; } + } else { + // _task == 0, physical or kIOMemoryTypeUPL + memory_object_t pager; + vm_size_t size = ptoa_32(_pages); - err = mach_make_memory_entry_internal(map, - &actualSize, entryAddr, prot, &entry, cloneEntry); + if (!getKernelReserved()) { + panic("getKernelReserved"); + } - if (KERN_SUCCESS != err) break; - if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize"); + reserved->dp.pagerContig = (1 == _rangesCount); + reserved->dp.memory = this; - if (count >= ref->capacity) - { - ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref); - entries = &ref->entries[count]; - } - entries->entry = entry; - entries->size = actualSize; - entries->offset = offset + (entryAddr - srcAddr); - entryAddr += actualSize; - if (MAP_MEM_NAMED_REUSE & prot) - { - if ((cloneEntries->entry == entries->entry) - && (cloneEntries->size == entries->size) - && (cloneEntries->offset == entries->offset)) cloneEntries++; - else prot &= ~MAP_MEM_NAMED_REUSE; - } - entries++; - count++; - } - while (true); - offset += srcLen; - remain -= srcLen; - } - } - else - { - // _task == 0, physical or kIOMemoryTypeUPL - memory_object_t pager; - vm_size_t size = ptoa_32(_pages); + pagerFlags = pagerFlagsForCacheMode(cacheMode); + if (-1U == pagerFlags) { + panic("phys is kIODefaultCache"); + } + if (reserved->dp.pagerContig) { + pagerFlags |= DEVICE_PAGER_CONTIGUOUS; + } - if (!getKernelReserved()) panic("getKernelReserved"); + pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved, + size, pagerFlags); + assert(pager); + if (!pager) { + err = kIOReturnVMError; + } else { + srcAddr = nextAddr; + entryAddr = trunc_page_64(srcAddr); + err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/, + size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry); + assert(KERN_SUCCESS == err); + if (KERN_SUCCESS != err) { + device_pager_deallocate(pager); + } else { + reserved->dp.devicePager = pager; + entries->entry = entry; + entries->size = size; + entries->offset = offset + (entryAddr - srcAddr); + entries++; + count++; + } + } + } - reserved->dp.pagerContig = (1 == _rangesCount); - reserved->dp.memory = this; + ref->count = count; + ref->prot = prot; - pagerFlags = pagerFlagsForCacheMode(cacheMode); - if (-1U == pagerFlags) panic("phys is kIODefaultCache"); - if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS; + if (_task && (KERN_SUCCESS == err) + && (kIOMemoryMapCopyOnWrite & _flags) + && !(kIOMemoryReferenceCOW & options)) { + err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); + } - pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved, - size, pagerFlags); - assert (pager); - if (!pager) err = kIOReturnVMError; - else - { - srcAddr = nextAddr; - entryAddr = trunc_page_64(srcAddr); - err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/, - size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry); - assert (KERN_SUCCESS == err); - if (KERN_SUCCESS != err) device_pager_deallocate(pager); - else - { - reserved->dp.devicePager = pager; - entries->entry = entry; - entries->size = size; - entries->offset = offset + (entryAddr - srcAddr); - entries++; - count++; - } - } - } - - ref->count = count; - ref->prot = prot; - - if (_task && (KERN_SUCCESS == err) - && (kIOMemoryMapCopyOnWrite & _flags) - && !(kIOMemoryReferenceCOW & options)) - { - err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); - } - - if (KERN_SUCCESS == err) - { - if (MAP_MEM_NAMED_REUSE & prot) - { - memoryReferenceFree(ref); - OSIncrementAtomic(&_memRef->refCount); - ref = _memRef; + if (KERN_SUCCESS == err) { + if (MAP_MEM_NAMED_REUSE & prot) { + memoryReferenceFree(ref); + OSIncrementAtomic(&_memRef->refCount); + ref = _memRef; + } + } else { + memoryReferenceFree(ref); + ref = NULL; } - } - else - { - memoryReferenceFree(ref); - ref = NULL; - } - *reference = ref; + *reference = ref; - return (err); + return err; } -kern_return_t +kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) { - IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; - IOReturn err; - vm_map_offset_t addr; - - addr = ref->mapped; - - err = vm_map_enter_mem_object(map, &addr, ref->size, - (vm_map_offset_t) 0, - (((ref->options & kIOMapAnywhere) - ? VM_FLAGS_ANYWHERE - : VM_FLAGS_FIXED)), - VM_MAP_KERNEL_FLAGS_NONE, - ref->tag, - IPC_PORT_NULL, - (memory_object_offset_t) 0, - false, /* copy */ - ref->prot, - ref->prot, - VM_INHERIT_NONE); - if (KERN_SUCCESS == err) - { - ref->mapped = (mach_vm_address_t) addr; - ref->map = map; - } - - return( err ); + IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref; + IOReturn err; + vm_map_offset_t addr; + + addr = ref->mapped; + + err = vm_map_enter_mem_object(map, &addr, ref->size, + (vm_map_offset_t) 0, + (((ref->options & kIOMapAnywhere) + ? VM_FLAGS_ANYWHERE + : VM_FLAGS_FIXED)), + VM_MAP_KERNEL_FLAGS_NONE, + ref->tag, + IPC_PORT_NULL, + (memory_object_offset_t) 0, + false, /* copy */ + ref->prot, + ref->prot, + VM_INHERIT_NONE); + if (KERN_SUCCESS == err) { + ref->mapped = (mach_vm_address_t) addr; + ref->map = map; + } + + return err; } -IOReturn +IOReturn IOGeneralMemoryDescriptor::memoryReferenceMap( - IOMemoryReference * ref, - vm_map_t map, - mach_vm_size_t inoffset, - mach_vm_size_t size, - IOOptionBits options, - mach_vm_address_t * inaddr) + IOMemoryReference * ref, + vm_map_t map, + mach_vm_size_t inoffset, + mach_vm_size_t size, + IOOptionBits options, + mach_vm_address_t * inaddr) { - IOReturn err; - int64_t offset = inoffset; - uint32_t rangeIdx, entryIdx; - vm_map_offset_t addr, mapAddr; - vm_map_offset_t pageOffset, entryOffset, remain, chunk; - - mach_vm_address_t nextAddr; - mach_vm_size_t nextLen; - IOByteCount physLen; - IOMemoryEntry * entry; - vm_prot_t prot, memEntryCacheMode; - IOOptionBits type; - IOOptionBits cacheMode; - vm_tag_t tag; - // for the kIOMapPrefault option. - upl_page_info_t * pageList = NULL; - UInt currentPageIndex = 0; - bool didAlloc; - - if (ref->mapRef) - { - err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); - return (err); - } - - type = _flags & kIOMemoryTypeMask; - - prot = VM_PROT_READ; - if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; - prot &= ref->prot; - - cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); - if (kIODefaultCache != cacheMode) - { - // VM system requires write access to update named entry cache mode - memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); - } - - tag = getVMTag(map); - - if (_task) - { - // Find first range for offset - if (!_rangesCount) return (kIOReturnBadArgument); - for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) - { - getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); - if (remain < nextLen) break; - remain -= nextLen; - } - } - else - { - rangeIdx = 0; - remain = 0; - nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); - nextLen = size; - } - - assert(remain < nextLen); - if (remain >= nextLen) return (kIOReturnBadArgument); - - nextAddr += remain; - nextLen -= remain; - pageOffset = (page_mask & nextAddr); - addr = 0; - didAlloc = false; - - if (!(options & kIOMapAnywhere)) - { - addr = *inaddr; - if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned); - addr -= pageOffset; - } - - // find first entry for offset - for (entryIdx = 0; - (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset); - entryIdx++) {} - entryIdx--; - entry = &ref->entries[entryIdx]; - - // allocate VM - size = round_page_64(size + pageOffset); - if (kIOMapOverwrite & options) - { - if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) - { - map = IOPageableMapForAddress(addr); - } - err = KERN_SUCCESS; - } - else - { - IOMemoryDescriptorMapAllocRef ref; - ref.map = map; - ref.tag = tag; - ref.options = options; - ref.size = size; - ref.prot = prot; - if (options & kIOMapAnywhere) - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - ref.mapped = 0; - else - ref.mapped = addr; - if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) - err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); - else - err = IOMemoryDescriptorMapAlloc(ref.map, &ref); - if (KERN_SUCCESS == err) - { - addr = ref.mapped; - map = ref.map; - didAlloc = true; - } - } - - /* - * If the memory is associated with a device pager but doesn't have a UPL, - * it will be immediately faulted in through the pager via populateDevicePager(). - * kIOMapPrefault is redundant in that case, so don't try to use it for UPL - * operations. - */ - if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) - options &= ~kIOMapPrefault; - - /* - * Prefaulting is only possible if we wired the memory earlier. Check the - * memory type, and the underlying data. - */ - if (options & kIOMapPrefault) - { - /* - * The memory must have been wired by calling ::prepare(), otherwise - * we don't have the UPL. Without UPLs, pages cannot be pre-faulted - */ - assert(_wireCount != 0); - assert(_memoryEntries != NULL); - if ((_wireCount == 0) || - (_memoryEntries == NULL)) - { - return kIOReturnBadArgument; - } - - // Get the page list. - ioGMDData* dataP = getDataP(_memoryEntries); - ioPLBlock const* ioplList = getIOPLList(dataP); - pageList = getPageList(dataP); - - // Get the number of IOPLs. - UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); - - /* - * Scan through the IOPL Info Blocks, looking for the first block containing - * the offset. The research will go past it, so we'll need to go back to the - * right range at the end. - */ - UInt ioplIndex = 0; - while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) - ioplIndex++; - ioplIndex--; - - // Retrieve the IOPL info block. - ioPLBlock ioplInfo = ioplList[ioplIndex]; - - /* - * For external UPLs, the fPageInfo points directly to the UPL's page_info_t - * array. - */ - if (ioplInfo.fFlags & kIOPLExternUPL) - pageList = (upl_page_info_t*) ioplInfo.fPageInfo; - else - pageList = &pageList[ioplInfo.fPageInfo]; - - // Rebase [offset] into the IOPL in order to looks for the first page index. - mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; - - // Retrieve the index of the first page corresponding to the offset. - currentPageIndex = atop_32(offsetInIOPL); - } - - // enter mappings - remain = size; - mapAddr = addr; - addr += pageOffset; - - while (remain && (KERN_SUCCESS == err)) - { - entryOffset = offset - entry->offset; - if ((page_mask & entryOffset) != pageOffset) - { - err = kIOReturnNotAligned; - break; - } - - if (kIODefaultCache != cacheMode) - { - vm_size_t unused = 0; - err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, - memEntryCacheMode, NULL, entry->entry); - assert (KERN_SUCCESS == err); - } - - entryOffset -= pageOffset; - if (entryOffset >= entry->size) panic("entryOffset"); - chunk = entry->size - entryOffset; - if (chunk) - { - vm_map_kernel_flags_t vmk_flags; - - vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */ - - if (chunk > remain) chunk = remain; - if (options & kIOMapPrefault) - { - UInt nb_pages = round_page(chunk) / PAGE_SIZE; - - err = vm_map_enter_mem_object_prefault(map, - &mapAddr, - chunk, 0 /* mask */, - (VM_FLAGS_FIXED - | VM_FLAGS_OVERWRITE), - vmk_flags, - tag, - entry->entry, - entryOffset, - prot, // cur - prot, // max - &pageList[currentPageIndex], - nb_pages); - - // Compute the next index in the page list. - currentPageIndex += nb_pages; - assert(currentPageIndex <= _pages); - } - else - { - err = vm_map_enter_mem_object(map, - &mapAddr, - chunk, 0 /* mask */, - (VM_FLAGS_FIXED - | VM_FLAGS_OVERWRITE), - vmk_flags, - tag, - entry->entry, - entryOffset, - false, // copy - prot, // cur - prot, // max - VM_INHERIT_NONE); - } - if (KERN_SUCCESS != err) break; - remain -= chunk; - if (!remain) break; - mapAddr += chunk; - offset += chunk - pageOffset; - } - pageOffset = 0; - entry++; - entryIdx++; - if (entryIdx >= ref->count) - { - err = kIOReturnOverrun; - break; - } - } - - if ((KERN_SUCCESS != err) && didAlloc) - { - (void) mach_vm_deallocate(map, trunc_page_64(addr), size); - addr = 0; - } - *inaddr = addr; - - return (err); + IOReturn err; + int64_t offset = inoffset; + uint32_t rangeIdx, entryIdx; + vm_map_offset_t addr, mapAddr; + vm_map_offset_t pageOffset, entryOffset, remain, chunk; + + mach_vm_address_t nextAddr; + mach_vm_size_t nextLen; + IOByteCount physLen; + IOMemoryEntry * entry; + vm_prot_t prot, memEntryCacheMode; + IOOptionBits type; + IOOptionBits cacheMode; + vm_tag_t tag; + // for the kIOMapPrefault option. + upl_page_info_t * pageList = NULL; + UInt currentPageIndex = 0; + bool didAlloc; + + if (ref->mapRef) { + err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); + return err; + } + + type = _flags & kIOMemoryTypeMask; + + prot = VM_PROT_READ; + if (!(kIOMapReadOnly & options)) { + prot |= VM_PROT_WRITE; + } + prot &= ref->prot; + + cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift); + if (kIODefaultCache != cacheMode) { + // VM system requires write access to update named entry cache mode + memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode)); + } + + tag = getVMTag(map); + + if (_task) { + // Find first range for offset + if (!_rangesCount) { + return kIOReturnBadArgument; + } + for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if (remain < nextLen) { + break; + } + remain -= nextLen; + } + } else { + rangeIdx = 0; + remain = 0; + nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + nextLen = size; + } + + assert(remain < nextLen); + if (remain >= nextLen) { + return kIOReturnBadArgument; + } + + nextAddr += remain; + nextLen -= remain; + pageOffset = (page_mask & nextAddr); + addr = 0; + didAlloc = false; + + if (!(options & kIOMapAnywhere)) { + addr = *inaddr; + if (pageOffset != (page_mask & addr)) { + return kIOReturnNotAligned; + } + addr -= pageOffset; + } + + // find first entry for offset + for (entryIdx = 0; + (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset); + entryIdx++) { + } + entryIdx--; + entry = &ref->entries[entryIdx]; + + // allocate VM + size = round_page_64(size + pageOffset); + if (kIOMapOverwrite & options) { + if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + map = IOPageableMapForAddress(addr); + } + err = KERN_SUCCESS; + } else { + IOMemoryDescriptorMapAllocRef ref; + ref.map = map; + ref.tag = tag; + ref.options = options; + ref.size = size; + ref.prot = prot; + if (options & kIOMapAnywhere) { + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + } else { + ref.mapped = addr; + } + if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); + } else { + err = IOMemoryDescriptorMapAlloc(ref.map, &ref); + } + if (KERN_SUCCESS == err) { + addr = ref.mapped; + map = ref.map; + didAlloc = true; + } + } + + /* + * If the memory is associated with a device pager but doesn't have a UPL, + * it will be immediately faulted in through the pager via populateDevicePager(). + * kIOMapPrefault is redundant in that case, so don't try to use it for UPL + * operations. + */ + if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) { + options &= ~kIOMapPrefault; + } + + /* + * Prefaulting is only possible if we wired the memory earlier. Check the + * memory type, and the underlying data. + */ + if (options & kIOMapPrefault) { + /* + * The memory must have been wired by calling ::prepare(), otherwise + * we don't have the UPL. Without UPLs, pages cannot be pre-faulted + */ + assert(_wireCount != 0); + assert(_memoryEntries != NULL); + if ((_wireCount == 0) || + (_memoryEntries == NULL)) { + return kIOReturnBadArgument; + } + + // Get the page list. + ioGMDData* dataP = getDataP(_memoryEntries); + ioPLBlock const* ioplList = getIOPLList(dataP); + pageList = getPageList(dataP); + + // Get the number of IOPLs. + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + + /* + * Scan through the IOPL Info Blocks, looking for the first block containing + * the offset. The research will go past it, so we'll need to go back to the + * right range at the end. + */ + UInt ioplIndex = 0; + while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) { + ioplIndex++; + } + ioplIndex--; + + // Retrieve the IOPL info block. + ioPLBlock ioplInfo = ioplList[ioplIndex]; + + /* + * For external UPLs, the fPageInfo points directly to the UPL's page_info_t + * array. + */ + if (ioplInfo.fFlags & kIOPLExternUPL) { + pageList = (upl_page_info_t*) ioplInfo.fPageInfo; + } else { + pageList = &pageList[ioplInfo.fPageInfo]; + } + + // Rebase [offset] into the IOPL in order to looks for the first page index. + mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset; + + // Retrieve the index of the first page corresponding to the offset. + currentPageIndex = atop_32(offsetInIOPL); + } + + // enter mappings + remain = size; + mapAddr = addr; + addr += pageOffset; + + while (remain && (KERN_SUCCESS == err)) { + entryOffset = offset - entry->offset; + if ((page_mask & entryOffset) != pageOffset) { + err = kIOReturnNotAligned; + break; + } + + if (kIODefaultCache != cacheMode) { + vm_size_t unused = 0; + err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/, + memEntryCacheMode, NULL, entry->entry); + assert(KERN_SUCCESS == err); + } + + entryOffset -= pageOffset; + if (entryOffset >= entry->size) { + panic("entryOffset"); + } + chunk = entry->size - entryOffset; + if (chunk) { + vm_map_kernel_flags_t vmk_flags; + + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */ + + if (chunk > remain) { + chunk = remain; + } + if (options & kIOMapPrefault) { + UInt nb_pages = round_page(chunk) / PAGE_SIZE; + + err = vm_map_enter_mem_object_prefault(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE), + vmk_flags, + tag, + entry->entry, + entryOffset, + prot, // cur + prot, // max + &pageList[currentPageIndex], + nb_pages); + + // Compute the next index in the page list. + currentPageIndex += nb_pages; + assert(currentPageIndex <= _pages); + } else { + err = vm_map_enter_mem_object(map, + &mapAddr, + chunk, 0 /* mask */, + (VM_FLAGS_FIXED + | VM_FLAGS_OVERWRITE), + vmk_flags, + tag, + entry->entry, + entryOffset, + false, // copy + prot, // cur + prot, // max + VM_INHERIT_NONE); + } + if (KERN_SUCCESS != err) { + break; + } + remain -= chunk; + if (!remain) { + break; + } + mapAddr += chunk; + offset += chunk - pageOffset; + } + pageOffset = 0; + entry++; + entryIdx++; + if (entryIdx >= ref->count) { + err = kIOReturnOverrun; + break; + } + } + + if ((KERN_SUCCESS != err) && didAlloc) { + (void) mach_vm_deallocate(map, trunc_page_64(addr), size); + addr = 0; + } + *inaddr = addr; + + return err; } -IOReturn +IOReturn IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts( - IOMemoryReference * ref, - IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount) + IOMemoryReference * ref, + IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount) { - IOReturn err; - IOMemoryEntry * entries; - unsigned int resident, dirty; - unsigned int totalResident, totalDirty; - - totalResident = totalDirty = 0; - err = kIOReturnSuccess; - entries = ref->entries + ref->count; - while (entries > &ref->entries[0]) - { - entries--; - err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty); - if (KERN_SUCCESS != err) break; - totalResident += resident; - totalDirty += dirty; - } - - if (residentPageCount) *residentPageCount = totalResident; - if (dirtyPageCount) *dirtyPageCount = totalDirty; - return (err); + IOReturn err; + IOMemoryEntry * entries; + unsigned int resident, dirty; + unsigned int totalResident, totalDirty; + + totalResident = totalDirty = 0; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) { + entries--; + err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty); + if (KERN_SUCCESS != err) { + break; + } + totalResident += resident; + totalDirty += dirty; + } + + if (residentPageCount) { + *residentPageCount = totalResident; + } + if (dirtyPageCount) { + *dirtyPageCount = totalDirty; + } + return err; } IOReturn IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable( - IOMemoryReference * ref, - IOOptionBits newState, - IOOptionBits * oldState) + IOMemoryReference * ref, + IOOptionBits newState, + IOOptionBits * oldState) { - IOReturn err; - IOMemoryEntry * entries; - vm_purgable_t control; - int totalState, state; - - totalState = kIOMemoryPurgeableNonVolatile; - err = kIOReturnSuccess; - entries = ref->entries + ref->count; - while (entries > &ref->entries[0]) - { - entries--; - - err = purgeableControlBits(newState, &control, &state); - if (KERN_SUCCESS != err) break; - err = memory_entry_purgeable_control_internal(entries->entry, control, &state); - if (KERN_SUCCESS != err) break; - err = purgeableStateBits(&state); - if (KERN_SUCCESS != err) break; - - if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; - else if (kIOMemoryPurgeableEmpty == totalState) continue; - else if (kIOMemoryPurgeableVolatile == totalState) continue; - else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; - else totalState = kIOMemoryPurgeableNonVolatile; - } - - if (oldState) *oldState = totalState; - return (err); + IOReturn err; + IOMemoryEntry * entries; + vm_purgable_t control; + int totalState, state; + + totalState = kIOMemoryPurgeableNonVolatile; + err = kIOReturnSuccess; + entries = ref->entries + ref->count; + while (entries > &ref->entries[0]) { + entries--; + + err = purgeableControlBits(newState, &control, &state); + if (KERN_SUCCESS != err) { + break; + } + err = memory_entry_purgeable_control_internal(entries->entry, control, &state); + if (KERN_SUCCESS != err) { + break; + } + err = purgeableStateBits(&state); + if (KERN_SUCCESS != err) { + break; + } + + if (kIOMemoryPurgeableEmpty == state) { + totalState = kIOMemoryPurgeableEmpty; + } else if (kIOMemoryPurgeableEmpty == totalState) { + continue; + } else if (kIOMemoryPurgeableVolatile == totalState) { + continue; + } else if (kIOMemoryPurgeableVolatile == state) { + totalState = kIOMemoryPurgeableVolatile; + } else { + totalState = kIOMemoryPurgeableNonVolatile; + } + } + + if (oldState) { + *oldState = totalState; + } + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOMemoryDescriptor * IOMemoryDescriptor::withAddress(void * address, - IOByteCount length, - IODirection direction) + IOByteCount length, + IODirection direction) { - return IOMemoryDescriptor:: - withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task); + return IOMemoryDescriptor:: + withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task); } #ifndef __LP64__ IOMemoryDescriptor * IOMemoryDescriptor::withAddress(IOVirtualAddress address, - IOByteCount length, - IODirection direction, - task_t task) + IOByteCount length, + IODirection direction, + task_t task) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) - { - if (that->initWithAddress(address, length, direction, task)) - return that; - - that->release(); - } - return 0; + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) { + if (that->initWithAddress(address, length, direction, task)) { + return that; + } + + that->release(); + } + return 0; } #endif /* !__LP64__ */ IOMemoryDescriptor * IOMemoryDescriptor::withPhysicalAddress( - IOPhysicalAddress address, - IOByteCount length, - IODirection direction ) + IOPhysicalAddress address, + IOByteCount length, + IODirection direction ) { - return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL)); + return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL); } #ifndef __LP64__ IOMemoryDescriptor * -IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, - UInt32 withCount, - IODirection direction, - task_t task, - bool asReference) +IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, + UInt32 withCount, + IODirection direction, + task_t task, + bool asReference) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) - { - if (that->initWithRanges(ranges, withCount, direction, task, asReference)) - return that; - - that->release(); - } - return 0; + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) { + if (that->initWithRanges(ranges, withCount, direction, task, asReference)) { + return that; + } + + that->release(); + } + return 0; } #endif /* !__LP64__ */ IOMemoryDescriptor * IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, - mach_vm_size_t length, - IOOptionBits options, - task_t task) + mach_vm_size_t length, + IOOptionBits options, + task_t task) { - IOAddressRange range = { address, length }; - return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task)); + IOAddressRange range = { address, length }; + return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task); } IOMemoryDescriptor * IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, - UInt32 rangeCount, - IOOptionBits options, - task_t task) + UInt32 rangeCount, + IOOptionBits options, + task_t task) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) - { - if (task) - options |= kIOMemoryTypeVirtual64; - else - options |= kIOMemoryTypePhysical64; + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) { + if (task) { + options |= kIOMemoryTypeVirtual64; + } else { + options |= kIOMemoryTypePhysical64; + } - if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) - return that; + if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) { + return that; + } - that->release(); - } + that->release(); + } - return 0; + return 0; } @@ -1195,178 +1218,183 @@ IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, * Passing the ranges as a reference will avoid an extra allocation. */ IOMemoryDescriptor * -IOMemoryDescriptor::withOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits opts, - IOMapper * mapper) +IOMemoryDescriptor::withOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits opts, + IOMapper * mapper) { - IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; + IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; - if (self - && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) - { - self->release(); - return 0; - } + if (self + && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) { + self->release(); + return 0; + } - return self; + return self; } -bool IOMemoryDescriptor::initWithOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper) +bool +IOMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) { - return( false ); + return false; } #ifndef __LP64__ IOMemoryDescriptor * -IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, - UInt32 withCount, - IODirection direction, - bool asReference) +IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, + UInt32 withCount, + IODirection direction, + bool asReference) { - IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; - if (that) - { - if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) - return that; - - that->release(); - } - return 0; + IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; + if (that) { + if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) { + return that; + } + + that->release(); + } + return 0; } IOMemoryDescriptor * -IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, - IOByteCount offset, - IOByteCount length, - IODirection direction) +IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, + IOByteCount offset, + IOByteCount length, + IODirection direction) { - return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction)); + return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction); } #endif /* !__LP64__ */ IOMemoryDescriptor * IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) { - IOGeneralMemoryDescriptor *origGenMD = - OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); - - if (origGenMD) - return IOGeneralMemoryDescriptor:: - withPersistentMemoryDescriptor(origGenMD); - else - return 0; + IOGeneralMemoryDescriptor *origGenMD = + OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); + + if (origGenMD) { + return IOGeneralMemoryDescriptor:: + withPersistentMemoryDescriptor(origGenMD); + } else { + return 0; + } } IOMemoryDescriptor * IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) { - IOMemoryReference * memRef; - - if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0); - - if (memRef == originalMD->_memRef) - { - originalMD->retain(); // Add a new reference to ourselves - originalMD->memoryReferenceRelease(memRef); - return originalMD; - } - - IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; - IOMDPersistentInitData initData = { originalMD, memRef }; - - if (self - && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { - self->release(); - self = 0; - } - return self; + IOMemoryReference * memRef; + + if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) { + return 0; + } + + if (memRef == originalMD->_memRef) { + originalMD->retain(); // Add a new reference to ourselves + originalMD->memoryReferenceRelease(memRef); + return originalMD; + } + + IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; + IOMDPersistentInitData initData = { originalMD, memRef }; + + if (self + && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { + self->release(); + self = 0; + } + return self; } #ifndef __LP64__ bool IOGeneralMemoryDescriptor::initWithAddress(void * address, - IOByteCount withLength, - IODirection withDirection) + IOByteCount withLength, + IODirection withDirection) { - _singleRange.v.address = (vm_offset_t) address; - _singleRange.v.length = withLength; + _singleRange.v.address = (vm_offset_t) address; + _singleRange.v.length = withLength; - return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); + return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); } bool IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address, - IOByteCount withLength, - IODirection withDirection, - task_t withTask) + IOByteCount withLength, + IODirection withDirection, + task_t withTask) { - _singleRange.v.address = address; - _singleRange.v.length = withLength; + _singleRange.v.address = address; + _singleRange.v.length = withLength; - return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); + return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); } bool IOGeneralMemoryDescriptor::initWithPhysicalAddress( - IOPhysicalAddress address, - IOByteCount withLength, - IODirection withDirection ) + IOPhysicalAddress address, + IOByteCount withLength, + IODirection withDirection ) { - _singleRange.p.address = address; - _singleRange.p.length = withLength; + _singleRange.p.address = address; + _singleRange.p.length = withLength; - return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); + return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); } bool IOGeneralMemoryDescriptor::initWithPhysicalRanges( - IOPhysicalRange * ranges, - UInt32 count, - IODirection direction, - bool reference) + IOPhysicalRange * ranges, + UInt32 count, + IODirection direction, + bool reference) { - IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; + IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; - if (reference) - mdOpts |= kIOMemoryAsReference; + if (reference) { + mdOpts |= kIOMemoryAsReference; + } - return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); + return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); } bool IOGeneralMemoryDescriptor::initWithRanges( - IOVirtualRange * ranges, - UInt32 count, - IODirection direction, - task_t task, - bool reference) + IOVirtualRange * ranges, + UInt32 count, + IODirection direction, + task_t task, + bool reference) { - IOOptionBits mdOpts = direction; - - if (reference) - mdOpts |= kIOMemoryAsReference; - - if (task) { - mdOpts |= kIOMemoryTypeVirtual; - - // Auto-prepare if this is a kernel memory descriptor as very few - // clients bother to prepare() kernel memory. - // But it was not enforced so what are you going to do? - if (task == kernel_task) - mdOpts |= kIOMemoryAutoPrepare; - } - else - mdOpts |= kIOMemoryTypePhysical; - - return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); + IOOptionBits mdOpts = direction; + + if (reference) { + mdOpts |= kIOMemoryAsReference; + } + + if (task) { + mdOpts |= kIOMemoryTypeVirtual; + + // Auto-prepare if this is a kernel memory descriptor as very few + // clients bother to prepare() kernel memory. + // But it was not enforced so what are you going to do? + if (task == kernel_task) { + mdOpts |= kIOMemoryAutoPrepare; + } + } else { + mdOpts |= kIOMemoryTypePhysical; + } + + return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); } #endif /* !__LP64__ */ @@ -1385,343 +1413,359 @@ IOGeneralMemoryDescriptor::initWithRanges( */ bool -IOGeneralMemoryDescriptor::initWithOptions(void * buffers, - UInt32 count, - UInt32 offset, - task_t task, - IOOptionBits options, - IOMapper * mapper) +IOGeneralMemoryDescriptor::initWithOptions(void * buffers, + UInt32 count, + UInt32 offset, + task_t task, + IOOptionBits options, + IOMapper * mapper) { - IOOptionBits type = options & kIOMemoryTypeMask; + IOOptionBits type = options & kIOMemoryTypeMask; #ifndef __LP64__ - if (task - && (kIOMemoryTypeVirtual == type) - && vm_map_is_64bit(get_task_map(task)) - && ((IOVirtualRange *) buffers)->address) - { - OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); - return false; - } + if (task + && (kIOMemoryTypeVirtual == type) + && vm_map_is_64bit(get_task_map(task)) + && ((IOVirtualRange *) buffers)->address) { + OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); + return false; + } #endif /* !__LP64__ */ - // Grab the original MD's configuation data to initialse the - // arguments to this function. - if (kIOMemoryTypePersistentMD == type) { - - IOMDPersistentInitData *initData = (typeof(initData)) buffers; - const IOGeneralMemoryDescriptor *orig = initData->fMD; - ioGMDData *dataP = getDataP(orig->_memoryEntries); + // Grab the original MD's configuation data to initialse the + // arguments to this function. + if (kIOMemoryTypePersistentMD == type) { + IOMDPersistentInitData *initData = (typeof(initData))buffers; + const IOGeneralMemoryDescriptor *orig = initData->fMD; + ioGMDData *dataP = getDataP(orig->_memoryEntries); - // Only accept persistent memory descriptors with valid dataP data. - assert(orig->_rangesCount == 1); - if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) - return false; + // Only accept persistent memory descriptors with valid dataP data. + assert(orig->_rangesCount == 1); + if (!(orig->_flags & kIOMemoryPersistent) || !dataP) { + return false; + } - _memRef = initData->fMemRef; // Grab the new named entry - options = orig->_flags & ~kIOMemoryAsReference; - type = options & kIOMemoryTypeMask; - buffers = orig->_ranges.v; - count = orig->_rangesCount; + _memRef = initData->fMemRef; // Grab the new named entry + options = orig->_flags & ~kIOMemoryAsReference; + type = options & kIOMemoryTypeMask; + buffers = orig->_ranges.v; + count = orig->_rangesCount; - // Now grab the original task and whatever mapper was previously used - task = orig->_task; - mapper = dataP->fMapper; + // Now grab the original task and whatever mapper was previously used + task = orig->_task; + mapper = dataP->fMapper; - // We are ready to go through the original initialisation now - } + // We are ready to go through the original initialisation now + } - switch (type) { - case kIOMemoryTypeUIO: - case kIOMemoryTypeVirtual: + switch (type) { + case kIOMemoryTypeUIO: + case kIOMemoryTypeVirtual: #ifndef __LP64__ - case kIOMemoryTypeVirtual64: + case kIOMemoryTypeVirtual64: #endif /* !__LP64__ */ - assert(task); - if (!task) - return false; - break; + assert(task); + if (!task) { + return false; + } + break; - case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task + case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task #ifndef __LP64__ - case kIOMemoryTypePhysical64: + case kIOMemoryTypePhysical64: #endif /* !__LP64__ */ - case kIOMemoryTypeUPL: - assert(!task); - break; - default: - return false; /* bad argument */ - } - - assert(buffers); - assert(count); - - /* - * We can check the _initialized instance variable before having ever set - * it to an initial value because I/O Kit guarantees that all our instance - * variables are zeroed on an object's allocation. - */ - - if (_initialized) { - /* - * An existing memory descriptor is being retargeted to point to - * somewhere else. Clean up our present state. - */ - IOOptionBits type = _flags & kIOMemoryTypeMask; - if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) - { - while (_wireCount) - complete(); + case kIOMemoryTypeUPL: + assert(!task); + break; + default: + return false; /* bad argument */ } - if (_ranges.v && !(kIOMemoryAsReference & _flags)) - { - if (kIOMemoryTypeUIO == type) - uio_free((uio_t) _ranges.v); + + assert(buffers); + assert(count); + + /* + * We can check the _initialized instance variable before having ever set + * it to an initial value because I/O Kit guarantees that all our instance + * variables are zeroed on an object's allocation. + */ + + if (_initialized) { + /* + * An existing memory descriptor is being retargeted to point to + * somewhere else. Clean up our present state. + */ + IOOptionBits type = _flags & kIOMemoryTypeMask; + if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) { + while (_wireCount) { + complete(); + } + } + if (_ranges.v && !(kIOMemoryAsReference & _flags)) { + if (kIOMemoryTypeUIO == type) { + uio_free((uio_t) _ranges.v); + } #ifndef __LP64__ - else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) - IODelete(_ranges.v64, IOAddressRange, _rangesCount); + else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { + IODelete(_ranges.v64, IOAddressRange, _rangesCount); + } #endif /* !__LP64__ */ - else - IODelete(_ranges.v, IOVirtualRange, _rangesCount); + else { + IODelete(_ranges.v, IOVirtualRange, _rangesCount); + } + } + + options |= (kIOMemoryRedirected & _flags); + if (!(kIOMemoryRedirected & options)) { + if (_memRef) { + memoryReferenceRelease(_memRef); + _memRef = 0; + } + if (_mappings) { + _mappings->flushCollection(); + } + } + } else { + if (!super::init()) { + return false; + } + _initialized = true; } - options |= (kIOMemoryRedirected & _flags); - if (!(kIOMemoryRedirected & options)) - { - if (_memRef) - { - memoryReferenceRelease(_memRef); - _memRef = 0; - } - if (_mappings) - _mappings->flushCollection(); - } - } - else { - if (!super::init()) - return false; - _initialized = true; - } - - // Grab the appropriate mapper - if (kIOMemoryHostOrRemote & options) options |= kIOMemoryMapperNone; - if (kIOMemoryMapperNone & options) - mapper = 0; // No Mapper - else if (mapper == kIOMapperSystem) { - IOMapper::checkForSystemMapper(); - gIOSystemMapper = mapper = IOMapper::gSystem; - } - - // Remove the dynamic internal use flags from the initial setting - options &= ~(kIOMemoryPreparedReadOnly); - _flags = options; - _task = task; + // Grab the appropriate mapper + if (kIOMemoryHostOrRemote & options) { + options |= kIOMemoryMapperNone; + } + if (kIOMemoryMapperNone & options) { + mapper = 0; // No Mapper + } else if (mapper == kIOMapperSystem) { + IOMapper::checkForSystemMapper(); + gIOSystemMapper = mapper = IOMapper::gSystem; + } -#ifndef __LP64__ - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); -#endif /* !__LP64__ */ + // Remove the dynamic internal use flags from the initial setting + options &= ~(kIOMemoryPreparedReadOnly); + _flags = options; + _task = task; - _dmaReferences = 0; - __iomd_reservedA = 0; - __iomd_reservedB = 0; - _highestPage = 0; - - if (kIOMemoryThreadSafe & options) - { - if (!_prepareLock) - _prepareLock = IOLockAlloc(); - } - else if (_prepareLock) - { - IOLockFree(_prepareLock); - _prepareLock = NULL; - } - - if (kIOMemoryTypeUPL == type) { - - ioGMDData *dataP; - unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); - - if (!initMemoryEntries(dataSize, mapper)) return (false); - dataP = getDataP(_memoryEntries); - dataP->fPageCnt = 0; - switch (kIOMemoryDirectionMask & options) - { - case kIODirectionOut: - dataP->fDMAAccess = kIODMAMapReadAccess; - break; - case kIODirectionIn: - dataP->fDMAAccess = kIODMAMapWriteAccess; - break; - case kIODirectionNone: - case kIODirectionOutIn: - default: - panic("bad dir for upl 0x%x\n", (int) options); - break; - } - // _wireCount++; // UPLs start out life wired - - _length = count; - _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); - - ioPLBlock iopl; - iopl.fIOPL = (upl_t) buffers; - upl_set_referenced(iopl.fIOPL, true); - upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL); - - if (upl_get_size(iopl.fIOPL) < (count + offset)) - panic("short external upl"); - - _highestPage = upl_get_highest_page(iopl.fIOPL); - - // Set the flag kIOPLOnDevice convieniently equal to 1 - iopl.fFlags = pageList->device | kIOPLExternUPL; - if (!pageList->device) { - // Pre-compute the offset into the UPL's page list - pageList = &pageList[atop_32(offset)]; - offset &= PAGE_MASK; - } - iopl.fIOMDOffset = 0; - iopl.fMappedPage = 0; - iopl.fPageInfo = (vm_address_t) pageList; - iopl.fPageOffset = offset; - _memoryEntries->appendBytes(&iopl, sizeof(iopl)); - } - else { - // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO - // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 - - // Initialize the memory descriptor - if (options & kIOMemoryAsReference) { #ifndef __LP64__ - _rangesIsAllocated = false; + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ - // Hack assignment to get the buffer arg into _ranges. - // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't - // work, C++ sigh. - // This also initialises the uio & physical ranges. - _ranges.v = (IOVirtualRange *) buffers; + _dmaReferences = 0; + __iomd_reservedA = 0; + __iomd_reservedB = 0; + _highestPage = 0; + + if (kIOMemoryThreadSafe & options) { + if (!_prepareLock) { + _prepareLock = IOLockAlloc(); + } + } else if (_prepareLock) { + IOLockFree(_prepareLock); + _prepareLock = NULL; } - else { -#ifndef __LP64__ - _rangesIsAllocated = true; -#endif /* !__LP64__ */ - switch (type) - { - case kIOMemoryTypeUIO: - _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); - break; -#ifndef __LP64__ - case kIOMemoryTypeVirtual64: - case kIOMemoryTypePhysical64: - if (count == 1 -#ifndef __arm__ - && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL -#endif - ) { - if (kIOMemoryTypeVirtual64 == type) - type = kIOMemoryTypeVirtual; - else - type = kIOMemoryTypePhysical; - _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference; - _rangesIsAllocated = false; - _ranges.v = &_singleRange.v; - _singleRange.v.address = ((IOAddressRange *) buffers)->address; - _singleRange.v.length = ((IOAddressRange *) buffers)->length; - break; - } - _ranges.v64 = IONew(IOAddressRange, count); - if (!_ranges.v64) - return false; - bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); - break; -#endif /* !__LP64__ */ - case kIOMemoryTypeVirtual: - case kIOMemoryTypePhysical: - if (count == 1) { - _flags |= kIOMemoryAsReference; -#ifndef __LP64__ - _rangesIsAllocated = false; + if (kIOMemoryTypeUPL == type) { + ioGMDData *dataP; + unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); + + if (!initMemoryEntries(dataSize, mapper)) { + return false; + } + dataP = getDataP(_memoryEntries); + dataP->fPageCnt = 0; + switch (kIOMemoryDirectionMask & options) { + case kIODirectionOut: + dataP->fDMAAccess = kIODMAMapReadAccess; + break; + case kIODirectionIn: + dataP->fDMAAccess = kIODMAMapWriteAccess; + break; + case kIODirectionNone: + case kIODirectionOutIn: + default: + panic("bad dir for upl 0x%x\n", (int) options); + break; + } + // _wireCount++; // UPLs start out life wired + + _length = count; + _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); + + ioPLBlock iopl; + iopl.fIOPL = (upl_t) buffers; + upl_set_referenced(iopl.fIOPL, true); + upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL); + + if (upl_get_size(iopl.fIOPL) < (count + offset)) { + panic("short external upl"); + } + + _highestPage = upl_get_highest_page(iopl.fIOPL); + + // Set the flag kIOPLOnDevice convieniently equal to 1 + iopl.fFlags = pageList->device | kIOPLExternUPL; + if (!pageList->device) { + // Pre-compute the offset into the UPL's page list + pageList = &pageList[atop_32(offset)]; + offset &= PAGE_MASK; + } + iopl.fIOMDOffset = 0; + iopl.fMappedPage = 0; + iopl.fPageInfo = (vm_address_t) pageList; + iopl.fPageOffset = offset; + _memoryEntries->appendBytes(&iopl, sizeof(iopl)); + } else { + // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO + // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 + + // Initialize the memory descriptor + if (options & kIOMemoryAsReference) { +#ifndef __LP64__ + _rangesIsAllocated = false; #endif /* !__LP64__ */ - _ranges.v = &_singleRange.v; + + // Hack assignment to get the buffer arg into _ranges. + // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't + // work, C++ sigh. + // This also initialises the uio & physical ranges. + _ranges.v = (IOVirtualRange *) buffers; } else { - _ranges.v = IONew(IOVirtualRange, count); - if (!_ranges.v) - return false; +#ifndef __LP64__ + _rangesIsAllocated = true; +#endif /* !__LP64__ */ + switch (type) { + case kIOMemoryTypeUIO: + _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); + break; + +#ifndef __LP64__ + case kIOMemoryTypeVirtual64: + case kIOMemoryTypePhysical64: + if (count == 1 +#ifndef __arm__ + && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL +#endif + ) { + if (kIOMemoryTypeVirtual64 == type) { + type = kIOMemoryTypeVirtual; + } else { + type = kIOMemoryTypePhysical; + } + _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference; + _rangesIsAllocated = false; + _ranges.v = &_singleRange.v; + _singleRange.v.address = ((IOAddressRange *) buffers)->address; + _singleRange.v.length = ((IOAddressRange *) buffers)->length; + break; + } + _ranges.v64 = IONew(IOAddressRange, count); + if (!_ranges.v64) { + return false; + } + bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); + break; +#endif /* !__LP64__ */ + case kIOMemoryTypeVirtual: + case kIOMemoryTypePhysical: + if (count == 1) { + _flags |= kIOMemoryAsReference; +#ifndef __LP64__ + _rangesIsAllocated = false; +#endif /* !__LP64__ */ + _ranges.v = &_singleRange.v; + } else { + _ranges.v = IONew(IOVirtualRange, count); + if (!_ranges.v) { + return false; + } + } + bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); + break; + } } - bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); - break; - } - } - _rangesCount = count; + _rangesCount = count; + + // Find starting address within the vector of ranges + Ranges vec = _ranges; + mach_vm_size_t totalLength = 0; + unsigned int ind, pages = 0; + for (ind = 0; ind < count; ind++) { + mach_vm_address_t addr; + mach_vm_address_t endAddr; + mach_vm_size_t len; + + // addr & len are returned by this function + getAddrLenForInd(addr, len, type, vec, ind); + if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) { + break; + } + if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) { + break; + } + if (os_add_overflow(totalLength, len, &totalLength)) { + break; + } + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + ppnum_t highPage = atop_64(addr + len - 1); + if (highPage > _highestPage) { + _highestPage = highPage; + } + } + } + if ((ind < count) + || (totalLength != ((IOByteCount) totalLength))) { + return false; /* overflow */ + } + _length = totalLength; + _pages = pages; + + // Auto-prepare memory at creation time. + // Implied completion when descriptor is free-ed + + + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + _wireCount++; // Physical MDs are, by definition, wired + } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ + ioGMDData *dataP; + unsigned dataSize; + + if (_pages > atop_64(max_mem)) { + return false; + } + + dataSize = computeDataSize(_pages, /* upls */ count * 2); + if (!initMemoryEntries(dataSize, mapper)) { + return false; + } + dataP = getDataP(_memoryEntries); + dataP->fPageCnt = _pages; + + if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags)) + && (VM_KERN_MEMORY_NONE == _kernelTag)) { + _kernelTag = IOMemoryTag(kernel_map); + if (_kernelTag == gIOSurfaceTag) { + _userTag = VM_MEMORY_IOSURFACE; + } + } + + if ((kIOMemoryPersistent & _flags) && !_memRef) { + IOReturn + err = memoryReferenceCreate(0, &_memRef); + if (kIOReturnSuccess != err) { + return false; + } + } + + if ((_flags & kIOMemoryAutoPrepare) + && prepare() != kIOReturnSuccess) { + return false; + } + } + } - // Find starting address within the vector of ranges - Ranges vec = _ranges; - mach_vm_size_t totalLength = 0; - unsigned int ind, pages = 0; - for (ind = 0; ind < count; ind++) { - mach_vm_address_t addr; - mach_vm_address_t endAddr; - mach_vm_size_t len; - - // addr & len are returned by this function - getAddrLenForInd(addr, len, type, vec, ind); - if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break; - if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break; - if (os_add_overflow(totalLength, len, &totalLength)) break; - if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) - { - ppnum_t highPage = atop_64(addr + len - 1); - if (highPage > _highestPage) - _highestPage = highPage; - } - } - if ((ind < count) - || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */ - - _length = totalLength; - _pages = pages; - - // Auto-prepare memory at creation time. - // Implied completion when descriptor is free-ed - - - if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) - _wireCount++; // Physical MDs are, by definition, wired - else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ - ioGMDData *dataP; - unsigned dataSize; - - if (_pages > atop_64(max_mem)) return false; - - dataSize = computeDataSize(_pages, /* upls */ count * 2); - if (!initMemoryEntries(dataSize, mapper)) return false; - dataP = getDataP(_memoryEntries); - dataP->fPageCnt = _pages; - - if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags)) - && (VM_KERN_MEMORY_NONE == _kernelTag)) - { - _kernelTag = IOMemoryTag(kernel_map); - if (_kernelTag == gIOSurfaceTag) _userTag = VM_MEMORY_IOSURFACE; - } - - if ( (kIOMemoryPersistent & _flags) && !_memRef) - { - IOReturn - err = memoryReferenceCreate(0, &_memRef); - if (kIOReturnSuccess != err) return false; - } - - if ((_flags & kIOMemoryAutoPrepare) - && prepare() != kIOReturnSuccess) - return false; - } - } - - return true; + return true; } /* @@ -1729,74 +1773,80 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, * * Free resources. */ -void IOGeneralMemoryDescriptor::free() +void +IOGeneralMemoryDescriptor::free() { - IOOptionBits type = _flags & kIOMemoryTypeMask; + IOOptionBits type = _flags & kIOMemoryTypeMask; - if( reserved) - { - LOCK; - reserved->dp.memory = 0; - UNLOCK; - } - if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) - { - ioGMDData * dataP; - if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) - { - dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); - dataP->fMappedBaseValid = dataP->fMappedBase = 0; + if (reserved) { + LOCK; + reserved->dp.memory = 0; + UNLOCK; + } + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + ioGMDData * dataP; + if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) { + dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBaseValid = dataP->fMappedBase = 0; + } + } else { + while (_wireCount) { + complete(); + } } - } - else - { - while (_wireCount) complete(); - } - if (_memoryEntries) _memoryEntries->release(); + if (_memoryEntries) { + _memoryEntries->release(); + } - if (_ranges.v && !(kIOMemoryAsReference & _flags)) - { - if (kIOMemoryTypeUIO == type) - uio_free((uio_t) _ranges.v); + if (_ranges.v && !(kIOMemoryAsReference & _flags)) { + if (kIOMemoryTypeUIO == type) { + uio_free((uio_t) _ranges.v); + } #ifndef __LP64__ - else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) - IODelete(_ranges.v64, IOAddressRange, _rangesCount); + else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { + IODelete(_ranges.v64, IOAddressRange, _rangesCount); + } #endif /* !__LP64__ */ - else - IODelete(_ranges.v, IOVirtualRange, _rangesCount); - - _ranges.v = NULL; - } - - if (reserved) - { - if (reserved->dp.devicePager) - { - // memEntry holds a ref on the device pager which owns reserved - // (IOMemoryDescriptorReserved) so no reserved access after this point - device_pager_deallocate( (memory_object_t) reserved->dp.devicePager ); - } - else - IODelete(reserved, IOMemoryDescriptorReserved, 1); - reserved = NULL; - } - - if (_memRef) memoryReferenceRelease(_memRef); - if (_prepareLock) IOLockFree(_prepareLock); - - super::free(); + else { + IODelete(_ranges.v, IOVirtualRange, _rangesCount); + } + + _ranges.v = NULL; + } + + if (reserved) { + if (reserved->dp.devicePager) { + // memEntry holds a ref on the device pager which owns reserved + // (IOMemoryDescriptorReserved) so no reserved access after this point + device_pager_deallocate((memory_object_t) reserved->dp.devicePager ); + } else { + IODelete(reserved, IOMemoryDescriptorReserved, 1); + } + reserved = NULL; + } + + if (_memRef) { + memoryReferenceRelease(_memRef); + } + if (_prepareLock) { + IOLockFree(_prepareLock); + } + + super::free(); } #ifndef __LP64__ -void IOGeneralMemoryDescriptor::unmapFromKernel() +void +IOGeneralMemoryDescriptor::unmapFromKernel() { - panic("IOGMD::unmapFromKernel deprecated"); + panic("IOGMD::unmapFromKernel deprecated"); } -void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) +void +IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) { - panic("IOGMD::mapIntoKernel deprecated"); + panic("IOGMD::mapIntoKernel deprecated"); } #endif /* !__LP64__ */ @@ -1805,13 +1855,15 @@ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) * * Get the direction of the transfer. */ -IODirection IOMemoryDescriptor::getDirection() const +IODirection +IOMemoryDescriptor::getDirection() const { #ifndef __LP64__ - if (_direction) - return _direction; + if (_direction) { + return _direction; + } #endif /* !__LP64__ */ - return (IODirection) (_flags & kIOMemoryDirectionMask); + return (IODirection) (_flags & kIOMemoryDirectionMask); } /* @@ -1819,24 +1871,28 @@ IODirection IOMemoryDescriptor::getDirection() const * * Get the length of the transfer (over all ranges). */ -IOByteCount IOMemoryDescriptor::getLength() const +IOByteCount +IOMemoryDescriptor::getLength() const { - return _length; + return _length; } -void IOMemoryDescriptor::setTag( IOOptionBits tag ) +void +IOMemoryDescriptor::setTag( IOOptionBits tag ) { - _tag = tag; + _tag = tag; } -IOOptionBits IOMemoryDescriptor::getTag( void ) +IOOptionBits +IOMemoryDescriptor::getTag( void ) { - return( _tag); + return _tag; } -uint64_t IOMemoryDescriptor::getFlags(void) +uint64_t +IOMemoryDescriptor::getFlags(void) { - return (_flags); + return _flags; } #ifndef __LP64__ @@ -1847,136 +1903,153 @@ uint64_t IOMemoryDescriptor::getFlags(void) IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) { - addr64_t physAddr = 0; + addr64_t physAddr = 0; - if( prepare() == kIOReturnSuccess) { - physAddr = getPhysicalSegment64( offset, length ); - complete(); - } + if (prepare() == kIOReturnSuccess) { + physAddr = getPhysicalSegment64( offset, length ); + complete(); + } - return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used + return (IOPhysicalAddress) physAddr; // truncated but only page offset is used } #pragma clang diagnostic pop #endif /* !__LP64__ */ -IOByteCount IOMemoryDescriptor::readBytes - (IOByteCount offset, void *bytes, IOByteCount length) +IOByteCount +IOMemoryDescriptor::readBytes +(IOByteCount offset, void *bytes, IOByteCount length) { - addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); - IOByteCount remaining; - - // Assert that this entire I/O is withing the available range - assert(offset <= _length); - assert(offset + length <= _length); - if ((offset >= _length) - || ((offset + length) > _length)) { - return 0; - } + addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); + IOByteCount remaining; + + // Assert that this entire I/O is withing the available range + assert(offset <= _length); + assert(offset + length <= _length); + if ((offset >= _length) + || ((offset + length) > _length)) { + return 0; + } - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (0); + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return 0; + } - if (kIOMemoryThreadSafe & _flags) - LOCK; + if (kIOMemoryThreadSafe & _flags) { + LOCK; + } - remaining = length = min(length, _length - offset); - while (remaining) { // (process another target segment?) - addr64_t srcAddr64; - IOByteCount srcLen; + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t srcAddr64; + IOByteCount srcLen; - srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone); - if (!srcAddr64) - break; + srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone); + if (!srcAddr64) { + break; + } - // Clip segment length to remaining - if (srcLen > remaining) - srcLen = remaining; + // Clip segment length to remaining + if (srcLen > remaining) { + srcLen = remaining; + } - copypv(srcAddr64, dstAddr, srcLen, - cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); + copypv(srcAddr64, dstAddr, srcLen, + cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); - dstAddr += srcLen; - offset += srcLen; - remaining -= srcLen; - } + dstAddr += srcLen; + offset += srcLen; + remaining -= srcLen; + } - if (kIOMemoryThreadSafe & _flags) - UNLOCK; + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; + } - assert(!remaining); + assert(!remaining); - return length - remaining; + return length - remaining; } -IOByteCount IOMemoryDescriptor::writeBytes - (IOByteCount inoffset, const void *bytes, IOByteCount length) +IOByteCount +IOMemoryDescriptor::writeBytes +(IOByteCount inoffset, const void *bytes, IOByteCount length) { - addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); - IOByteCount remaining; - IOByteCount offset = inoffset; + addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); + IOByteCount remaining; + IOByteCount offset = inoffset; - // Assert that this entire I/O is withing the available range - assert(offset <= _length); - assert(offset + length <= _length); + // Assert that this entire I/O is withing the available range + assert(offset <= _length); + assert(offset + length <= _length); - assert( !(kIOMemoryPreparedReadOnly & _flags) ); + assert( !(kIOMemoryPreparedReadOnly & _flags)); - if ( (kIOMemoryPreparedReadOnly & _flags) - || (offset >= _length) - || ((offset + length) > _length)) { - return 0; - } + if ((kIOMemoryPreparedReadOnly & _flags) + || (offset >= _length) + || ((offset + length) > _length)) { + return 0; + } - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (0); + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return 0; + } - if (kIOMemoryThreadSafe & _flags) - LOCK; + if (kIOMemoryThreadSafe & _flags) { + LOCK; + } - remaining = length = min(length, _length - offset); - while (remaining) { // (process another target segment?) - addr64_t dstAddr64; - IOByteCount dstLen; + remaining = length = min(length, _length - offset); + while (remaining) { // (process another target segment?) + addr64_t dstAddr64; + IOByteCount dstLen; - dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); - if (!dstAddr64) - break; + dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); + if (!dstAddr64) { + break; + } - // Clip segment length to remaining - if (dstLen > remaining) - dstLen = remaining; + // Clip segment length to remaining + if (dstLen > remaining) { + dstLen = remaining; + } - if (!srcAddr) bzero_phys(dstAddr64, dstLen); - else - { - copypv(srcAddr, (addr64_t) dstAddr64, dstLen, - cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); - srcAddr += dstLen; + if (!srcAddr) { + bzero_phys(dstAddr64, dstLen); + } else { + copypv(srcAddr, (addr64_t) dstAddr64, dstLen, + cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); + srcAddr += dstLen; + } + offset += dstLen; + remaining -= dstLen; } - offset += dstLen; - remaining -= dstLen; - } - if (kIOMemoryThreadSafe & _flags) - UNLOCK; + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; + } - assert(!remaining); + assert(!remaining); #if defined(__x86_64__) - // copypv does not cppvFsnk on intel + // copypv does not cppvFsnk on intel #else - if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); + if (!srcAddr) { + performOperation(kIOMemoryIncoherentIOFlush, inoffset, length); + } #endif - return length - remaining; + return length - remaining; } #ifndef __LP64__ -void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) +void +IOGeneralMemoryDescriptor::setPosition(IOByteCount position) { - panic("IOGMD::setPosition deprecated"); + panic("IOGMD::setPosition deprecated"); } #endif /* !__LP64__ */ @@ -1985,550 +2058,571 @@ static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << uint64_t IOGeneralMemoryDescriptor::getPreparationID( void ) { - ioGMDData *dataP; - - if (!_wireCount) - return (kIOPreparationIDUnprepared); - - if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical) - || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) - { - IOMemoryDescriptor::setPreparationID(); - return (IOMemoryDescriptor::getPreparationID()); - } - - if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) - return (kIOPreparationIDUnprepared); - - if (kIOPreparationIDUnprepared == dataP->fPreparationID) - { - dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID); - } - return (dataP->fPreparationID); + ioGMDData *dataP; + + if (!_wireCount) { + return kIOPreparationIDUnprepared; + } + + if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical) + || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) { + IOMemoryDescriptor::setPreparationID(); + return IOMemoryDescriptor::getPreparationID(); + } + + if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) { + return kIOPreparationIDUnprepared; + } + + if (kIOPreparationIDUnprepared == dataP->fPreparationID) { + dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID); + } + return dataP->fPreparationID; } -IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void ) +IOMemoryDescriptorReserved * +IOMemoryDescriptor::getKernelReserved( void ) { - if (!reserved) - { - reserved = IONew(IOMemoryDescriptorReserved, 1); - if (reserved) - bzero(reserved, sizeof(IOMemoryDescriptorReserved)); - } - return (reserved); + if (!reserved) { + reserved = IONew(IOMemoryDescriptorReserved, 1); + if (reserved) { + bzero(reserved, sizeof(IOMemoryDescriptorReserved)); + } + } + return reserved; } -void IOMemoryDescriptor::setPreparationID( void ) +void +IOMemoryDescriptor::setPreparationID( void ) { - if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) - { - reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); - } + if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) { + reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID); + } } -uint64_t IOMemoryDescriptor::getPreparationID( void ) +uint64_t +IOMemoryDescriptor::getPreparationID( void ) { - if (reserved) - return (reserved->preparationID); - else - return (kIOPreparationIDUnsupported); + if (reserved) { + return reserved->preparationID; + } else { + return kIOPreparationIDUnsupported; + } } -void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag) +void +IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag) { - _kernelTag = kernelTag; - _userTag = userTag; + _kernelTag = kernelTag; + _userTag = userTag; } -vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map) +vm_tag_t +IOMemoryDescriptor::getVMTag(vm_map_t map) { - if (vm_kernel_map_is_kernel(map)) - { - if (VM_KERN_MEMORY_NONE != _kernelTag) return (_kernelTag); - } - else - { - if (VM_KERN_MEMORY_NONE != _userTag) return (_userTag); - } - return (IOMemoryTag(map)); + if (vm_kernel_map_is_kernel(map)) { + if (VM_KERN_MEMORY_NONE != _kernelTag) { + return _kernelTag; + } + } else { + if (VM_KERN_MEMORY_NONE != _userTag) { + return _userTag; + } + } + return IOMemoryTag(map); } -IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const +IOReturn +IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { - IOReturn err = kIOReturnSuccess; - DMACommandOps params; - IOGeneralMemoryDescriptor * md = const_cast(this); - ioGMDData *dataP; + IOReturn err = kIOReturnSuccess; + DMACommandOps params; + IOGeneralMemoryDescriptor * md = const_cast(this); + ioGMDData *dataP; - params = (op & ~kIOMDDMACommandOperationMask & op); - op &= kIOMDDMACommandOperationMask; + params = (op & ~kIOMDDMACommandOperationMask & op); + op &= kIOMDDMACommandOperationMask; - if (kIOMDDMAMap == op) - { - if (dataSize < sizeof(IOMDDMAMapArgs)) - return kIOReturnUnderrun; + if (kIOMDDMAMap == op) { + if (dataSize < sizeof(IOMDDMAMapArgs)) { + return kIOReturnUnderrun; + } - IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - if (!_memoryEntries - && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) { + return kIOReturnNoMemory; + } - if (_memoryEntries && data->fMapper) - { - bool remap, keepMap; - dataP = getDataP(_memoryEntries); + if (_memoryEntries && data->fMapper) { + bool remap, keepMap; + dataP = getDataP(_memoryEntries); - if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; - if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment; + if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) { + dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits; + } + if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) { + dataP->fDMAMapAlignment = data->fMapSpec.alignment; + } - keepMap = (data->fMapper == gIOSystemMapper); - keepMap &= ((data->fOffset == 0) && (data->fLength == _length)); + keepMap = (data->fMapper == gIOSystemMapper); + keepMap &= ((data->fOffset == 0) && (data->fLength == _length)); - if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockLock(_prepareLock); + if ((data->fMapper == gIOSystemMapper) && _prepareLock) { + IOLockLock(_prepareLock); + } - remap = (!keepMap); - remap |= (dataP->fDMAMapNumAddressBits < 64) - && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); - remap |= (dataP->fDMAMapAlignment > page_size); + remap = (!keepMap); + remap |= (dataP->fDMAMapNumAddressBits < 64) + && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits)); + remap |= (dataP->fDMAMapAlignment > page_size); - if (remap || !dataP->fMappedBaseValid) - { + if (remap || !dataP->fMappedBaseValid) { // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params); - err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); - if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) - { - dataP->fMappedBase = data->fAlloc; - dataP->fMappedBaseValid = true; - dataP->fMappedLength = data->fAllocLength; - data->fAllocLength = 0; // IOMD owns the alloc now - } - } - else - { - data->fAlloc = dataP->fMappedBase; - data->fAllocLength = 0; // give out IOMD map - md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength); - } - data->fMapContig = !dataP->fDiscontig; - - if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockUnlock(_prepareLock); - } - return (err); - } - if (kIOMDDMAUnmap == op) - { - if (dataSize < sizeof(IOMDDMAMapArgs)) - return kIOReturnUnderrun; - IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - - err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); - - return kIOReturnSuccess; - } - - if (kIOMDAddDMAMapSpec == op) - { - if (dataSize < sizeof(IODMAMapSpecification)) - return kIOReturnUnderrun; - - IODMAMapSpecification * data = (IODMAMapSpecification *) vData; - - if (!_memoryEntries - && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); - - if (_memoryEntries) - { - dataP = getDataP(_memoryEntries); - if (data->numAddressBits < dataP->fDMAMapNumAddressBits) - dataP->fDMAMapNumAddressBits = data->numAddressBits; - if (data->alignment > dataP->fDMAMapAlignment) - dataP->fDMAMapAlignment = data->alignment; + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) { + dataP->fMappedBase = data->fAlloc; + dataP->fMappedBaseValid = true; + dataP->fMappedLength = data->fAllocLength; + data->fAllocLength = 0; // IOMD owns the alloc now + } + } else { + data->fAlloc = dataP->fMappedBase; + data->fAllocLength = 0; // give out IOMD map + md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength); + } + data->fMapContig = !dataP->fDiscontig; + + if ((data->fMapper == gIOSystemMapper) && _prepareLock) { + IOLockUnlock(_prepareLock); + } + } + return err; } - return kIOReturnSuccess; - } - - if (kIOMDGetCharacteristics == op) { - - if (dataSize < sizeof(IOMDDMACharacteristics)) - return kIOReturnUnderrun; + if (kIOMDDMAUnmap == op) { + if (dataSize < sizeof(IOMDDMAMapArgs)) { + return kIOReturnUnderrun; + } + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; - data->fLength = _length; - data->fSGCount = _rangesCount; - data->fPages = _pages; - data->fDirection = getDirection(); - if (!_wireCount) - data->fIsPrepared = false; - else { - data->fIsPrepared = true; - data->fHighestPage = _highestPage; - if (_memoryEntries) - { - dataP = getDataP(_memoryEntries); - ioPLBlock *ioplList = getIOPLList(dataP); - UInt count = getNumIOPL(_memoryEntries, dataP); - if (count == 1) - data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; - } - } - - return kIOReturnSuccess; - } - - else if (kIOMDDMAActive == op) - { - if (params) - { - int16_t prior; - prior = OSAddAtomic16(1, &md->_dmaReferences); - if (!prior) md->_mapName = NULL; - } - else - { - if (md->_dmaReferences) OSAddAtomic16(-1, &md->_dmaReferences); - else panic("_dmaReferences underflow"); - } - } - else if (kIOMDWalkSegments != op) - return kIOReturnBadArgument; - - // Get the next segment - struct InternalState { - IOMDDMAWalkSegmentArgs fIO; - UInt fOffset2Index; - UInt fIndex; - UInt fNextOffset; - } *isP; - - // Find the next segment - if (dataSize < sizeof(*isP)) - return kIOReturnUnderrun; - - isP = (InternalState *) vData; - UInt offset = isP->fIO.fOffset; - uint8_t mapped = isP->fIO.fMapped; - uint64_t mappedBase; - - if (mapped && (kIOMemoryRemote & _flags)) return (kIOReturnNotAttached); - - if (IOMapper::gSystem && mapped - && (!(kIOMemoryHostOnly & _flags)) - && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) -// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid)) - { - if (!_memoryEntries - && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory); + err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); - dataP = getDataP(_memoryEntries); - if (dataP->fMapper) - { - IODMAMapSpecification mapSpec; - bzero(&mapSpec, sizeof(mapSpec)); - mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; - mapSpec.alignment = dataP->fDMAMapAlignment; - err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); - if (kIOReturnSuccess != err) return (err); - dataP->fMappedBaseValid = true; - } - } - - if (kIOMDDMAWalkMappedLocal == mapped) mappedBase = isP->fIO.fMappedBase; - else if (mapped) - { - if (IOMapper::gSystem - && (!(kIOMemoryHostOnly & _flags)) - && _memoryEntries - && (dataP = getDataP(_memoryEntries)) - && dataP->fMappedBaseValid) - { - mappedBase = dataP->fMappedBase; + return kIOReturnSuccess; } - else mapped = 0; - } - - if (offset >= _length) - return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; - // Validate the previous offset - UInt ind, off2Ind = isP->fOffset2Index; - if (!params - && offset - && (offset == isP->fNextOffset || off2Ind <= offset)) - ind = isP->fIndex; - else - ind = off2Ind = 0; // Start from beginning - - UInt length; - UInt64 address; + if (kIOMDAddDMAMapSpec == op) { + if (dataSize < sizeof(IODMAMapSpecification)) { + return kIOReturnUnderrun; + } - if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { + IODMAMapSpecification * data = (IODMAMapSpecification *) vData; - // Physical address based memory descriptor - const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) { + return kIOReturnNoMemory; + } - // Find the range after the one that contains the offset - mach_vm_size_t len; - for (len = 0; off2Ind <= offset; ind++) { - len = physP[ind].length; - off2Ind += len; + if (_memoryEntries) { + dataP = getDataP(_memoryEntries); + if (data->numAddressBits < dataP->fDMAMapNumAddressBits) { + dataP->fDMAMapNumAddressBits = data->numAddressBits; + } + if (data->alignment > dataP->fDMAMapAlignment) { + dataP->fDMAMapAlignment = data->alignment; + } + } + return kIOReturnSuccess; } - // Calculate length within range and starting address - length = off2Ind - offset; - address = physP[ind - 1].address + len - length; - - if (true && mapped) - { - address = mappedBase + offset; - } - else - { - // see how far we can coalesce ranges - while (ind < _rangesCount && address + length == physP[ind].address) { - len = physP[ind].length; - length += len; - off2Ind += len; - ind++; - } - } - - // correct contiguous check overshoot - ind--; - off2Ind -= len; - } -#ifndef __LP64__ - else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { + if (kIOMDGetCharacteristics == op) { + if (dataSize < sizeof(IOMDDMACharacteristics)) { + return kIOReturnUnderrun; + } - // Physical address based memory descriptor - const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0]; + IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; + data->fLength = _length; + data->fSGCount = _rangesCount; + data->fPages = _pages; + data->fDirection = getDirection(); + if (!_wireCount) { + data->fIsPrepared = false; + } else { + data->fIsPrepared = true; + data->fHighestPage = _highestPage; + if (_memoryEntries) { + dataP = getDataP(_memoryEntries); + ioPLBlock *ioplList = getIOPLList(dataP); + UInt count = getNumIOPL(_memoryEntries, dataP); + if (count == 1) { + data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; + } + } + } - // Find the range after the one that contains the offset - mach_vm_size_t len; - for (len = 0; off2Ind <= offset; ind++) { - len = physP[ind].length; - off2Ind += len; + return kIOReturnSuccess; + } else if (kIOMDDMAActive == op) { + if (params) { + int16_t prior; + prior = OSAddAtomic16(1, &md->_dmaReferences); + if (!prior) { + md->_mapName = NULL; + } + } else { + if (md->_dmaReferences) { + OSAddAtomic16(-1, &md->_dmaReferences); + } else { + panic("_dmaReferences underflow"); + } + } + } else if (kIOMDWalkSegments != op) { + return kIOReturnBadArgument; } - // Calculate length within range and starting address - length = off2Ind - offset; - address = physP[ind - 1].address + len - length; - - if (true && mapped) - { - address = mappedBase + offset; + // Get the next segment + struct InternalState { + IOMDDMAWalkSegmentArgs fIO; + UInt fOffset2Index; + UInt fIndex; + UInt fNextOffset; + } *isP; + + // Find the next segment + if (dataSize < sizeof(*isP)) { + return kIOReturnUnderrun; } - else - { - // see how far we can coalesce ranges - while (ind < _rangesCount && address + length == physP[ind].address) { - len = physP[ind].length; - length += len; - off2Ind += len; - ind++; - } - } - // correct contiguous check overshoot - ind--; - off2Ind -= len; - } -#endif /* !__LP64__ */ - else do { - if (!_wireCount) - panic("IOGMD: not wired for the IODMACommand"); - assert(_memoryEntries); + isP = (InternalState *) vData; + UInt offset = isP->fIO.fOffset; + uint8_t mapped = isP->fIO.fMapped; + uint64_t mappedBase; - dataP = getDataP(_memoryEntries); - const ioPLBlock *ioplList = getIOPLList(dataP); - UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); - upl_page_info_t *pageList = getPageList(dataP); - - assert(numIOPLs > 0); + if (mapped && (kIOMemoryRemote & _flags)) { + return kIOReturnNotAttached; + } - // Scan through iopl info blocks looking for block containing offset - while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) - ind++; + if (IOMapper::gSystem && mapped + && (!(kIOMemoryHostOnly & _flags)) + && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) { +// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid)) + if (!_memoryEntries + && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) { + return kIOReturnNoMemory; + } - // Go back to actual range as search goes past it - ioPLBlock ioplInfo = ioplList[ind - 1]; - off2Ind = ioplInfo.fIOMDOffset; + dataP = getDataP(_memoryEntries); + if (dataP->fMapper) { + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; + mapSpec.alignment = dataP->fDMAMapAlignment; + err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength); + if (kIOReturnSuccess != err) { + return err; + } + dataP->fMappedBaseValid = true; + } + } - if (ind < numIOPLs) - length = ioplList[ind].fIOMDOffset; - else - length = _length; - length -= offset; // Remainder within iopl + if (kIOMDDMAWalkMappedLocal == mapped) { + mappedBase = isP->fIO.fMappedBase; + } else if (mapped) { + if (IOMapper::gSystem + && (!(kIOMemoryHostOnly & _flags)) + && _memoryEntries + && (dataP = getDataP(_memoryEntries)) + && dataP->fMappedBaseValid) { + mappedBase = dataP->fMappedBase; + } else { + mapped = 0; + } + } - // Subtract offset till this iopl in total list - offset -= off2Ind; + if (offset >= _length) { + return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; + } - // If a mapped address is requested and this is a pre-mapped IOPL - // then just need to compute an offset relative to the mapped base. - if (mapped) { - offset += (ioplInfo.fPageOffset & PAGE_MASK); - address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; - continue; // Done leave do/while(false) now + // Validate the previous offset + UInt ind, off2Ind = isP->fOffset2Index; + if (!params + && offset + && (offset == isP->fNextOffset || off2Ind <= offset)) { + ind = isP->fIndex; + } else { + ind = off2Ind = 0; // Start from beginning } + UInt length; + UInt64 address; + + if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { + // Physical address based memory descriptor + const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; + + // Find the range after the one that contains the offset + mach_vm_size_t len; + for (len = 0; off2Ind <= offset; ind++) { + len = physP[ind].length; + off2Ind += len; + } - // The offset is rebased into the current iopl. - // Now add the iopl 1st page offset. - offset += ioplInfo.fPageOffset; + // Calculate length within range and starting address + length = off2Ind - offset; + address = physP[ind - 1].address + len - length; - // For external UPLs the fPageInfo field points directly to - // the upl's upl_page_info_t array. - if (ioplInfo.fFlags & kIOPLExternUPL) - pageList = (upl_page_info_t *) ioplInfo.fPageInfo; - else - pageList = &pageList[ioplInfo.fPageInfo]; + if (true && mapped) { + address = mappedBase + offset; + } else { + // see how far we can coalesce ranges + while (ind < _rangesCount && address + length == physP[ind].address) { + len = physP[ind].length; + length += len; + off2Ind += len; + ind++; + } + } - // Check for direct device non-paged memory - if ( ioplInfo.fFlags & kIOPLOnDevice ) { - address = ptoa_64(pageList->phys_addr) + offset; - continue; // Done leave do/while(false) now + // correct contiguous check overshoot + ind--; + off2Ind -= len; } +#ifndef __LP64__ + else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { + // Physical address based memory descriptor + const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0]; + + // Find the range after the one that contains the offset + mach_vm_size_t len; + for (len = 0; off2Ind <= offset; ind++) { + len = physP[ind].length; + off2Ind += len; + } - // Now we need compute the index into the pageList - UInt pageInd = atop_32(offset); - offset &= PAGE_MASK; + // Calculate length within range and starting address + length = off2Ind - offset; + address = physP[ind - 1].address + len - length; - // Compute the starting address of this segment - IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; - if (!pageAddr) { - panic("!pageList phys_addr"); + if (true && mapped) { + address = mappedBase + offset; + } else { + // see how far we can coalesce ranges + while (ind < _rangesCount && address + length == physP[ind].address) { + len = physP[ind].length; + length += len; + off2Ind += len; + ind++; + } + } + // correct contiguous check overshoot + ind--; + off2Ind -= len; } - - address = ptoa_64(pageAddr) + offset; - - // length is currently set to the length of the remainider of the iopl. - // We need to check that the remainder of the iopl is contiguous. - // This is indicated by pageList[ind].phys_addr being sequential. - IOByteCount contigLength = PAGE_SIZE - offset; - while (contigLength < length - && ++pageAddr == pageList[++pageInd].phys_addr) - { - contigLength += PAGE_SIZE; +#endif /* !__LP64__ */ + else { + do { + if (!_wireCount) { + panic("IOGMD: not wired for the IODMACommand"); + } + + assert(_memoryEntries); + + dataP = getDataP(_memoryEntries); + const ioPLBlock *ioplList = getIOPLList(dataP); + UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); + upl_page_info_t *pageList = getPageList(dataP); + + assert(numIOPLs > 0); + + // Scan through iopl info blocks looking for block containing offset + while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) { + ind++; + } + + // Go back to actual range as search goes past it + ioPLBlock ioplInfo = ioplList[ind - 1]; + off2Ind = ioplInfo.fIOMDOffset; + + if (ind < numIOPLs) { + length = ioplList[ind].fIOMDOffset; + } else { + length = _length; + } + length -= offset; // Remainder within iopl + + // Subtract offset till this iopl in total list + offset -= off2Ind; + + // If a mapped address is requested and this is a pre-mapped IOPL + // then just need to compute an offset relative to the mapped base. + if (mapped) { + offset += (ioplInfo.fPageOffset & PAGE_MASK); + address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset; + continue; // Done leave do/while(false) now + } + + // The offset is rebased into the current iopl. + // Now add the iopl 1st page offset. + offset += ioplInfo.fPageOffset; + + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplInfo.fFlags & kIOPLExternUPL) { + pageList = (upl_page_info_t *) ioplInfo.fPageInfo; + } else { + pageList = &pageList[ioplInfo.fPageInfo]; + } + + // Check for direct device non-paged memory + if (ioplInfo.fFlags & kIOPLOnDevice) { + address = ptoa_64(pageList->phys_addr) + offset; + continue; // Done leave do/while(false) now + } + + // Now we need compute the index into the pageList + UInt pageInd = atop_32(offset); + offset &= PAGE_MASK; + + // Compute the starting address of this segment + IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; + if (!pageAddr) { + panic("!pageList phys_addr"); + } + + address = ptoa_64(pageAddr) + offset; + + // length is currently set to the length of the remainider of the iopl. + // We need to check that the remainder of the iopl is contiguous. + // This is indicated by pageList[ind].phys_addr being sequential. + IOByteCount contigLength = PAGE_SIZE - offset; + while (contigLength < length + && ++pageAddr == pageList[++pageInd].phys_addr) { + contigLength += PAGE_SIZE; + } + + if (contigLength < length) { + length = contigLength; + } + + + assert(address); + assert(length); + } while (false); } - if (contigLength < length) - length = contigLength; - - - assert(address); - assert(length); - - } while (false); + // Update return values and state + isP->fIO.fIOVMAddr = address; + isP->fIO.fLength = length; + isP->fIndex = ind; + isP->fOffset2Index = off2Ind; + isP->fNextOffset = isP->fIO.fOffset + length; - // Update return values and state - isP->fIO.fIOVMAddr = address; - isP->fIO.fLength = length; - isP->fIndex = ind; - isP->fOffset2Index = off2Ind; - isP->fNextOffset = isP->fIO.fOffset + length; - - return kIOReturnSuccess; + return kIOReturnSuccess; } addr64_t IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { - IOReturn ret; - mach_vm_address_t address = 0; - mach_vm_size_t length = 0; - IOMapper * mapper = gIOSystemMapper; - IOOptionBits type = _flags & kIOMemoryTypeMask; - - if (lengthOfSegment) - *lengthOfSegment = 0; - - if (offset >= _length) - return 0; - - // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must - // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use - // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation - // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up - - if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) - { - unsigned rangesIndex = 0; - Ranges vec = _ranges; - mach_vm_address_t addr; + IOReturn ret; + mach_vm_address_t address = 0; + mach_vm_size_t length = 0; + IOMapper * mapper = gIOSystemMapper; + IOOptionBits type = _flags & kIOMemoryTypeMask; + + if (lengthOfSegment) { + *lengthOfSegment = 0; + } - // Find starting address within the vector of ranges - for (;;) { - getAddrLenForInd(addr, length, type, vec, rangesIndex); - if (offset < length) - break; - offset -= length; // (make offset relative) - rangesIndex++; - } + if (offset >= _length) { + return 0; + } - // Now that we have the starting range, - // lets find the last contiguous range - addr += offset; - length -= offset; + // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must + // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use + // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation + // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up + + if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) { + unsigned rangesIndex = 0; + Ranges vec = _ranges; + mach_vm_address_t addr; + + // Find starting address within the vector of ranges + for (;;) { + getAddrLenForInd(addr, length, type, vec, rangesIndex); + if (offset < length) { + break; + } + offset -= length; // (make offset relative) + rangesIndex++; + } - for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { - mach_vm_address_t newAddr; - mach_vm_size_t newLen; + // Now that we have the starting range, + // lets find the last contiguous range + addr += offset; + length -= offset; - getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); - if (addr + length != newAddr) - break; - length += newLen; - } - if (addr) - address = (IOPhysicalAddress) addr; // Truncate address to 32bit - } - else - { - IOMDDMAWalkSegmentState _state; - IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state; - - state->fOffset = offset; - state->fLength = _length - offset; - state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote); - - ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); - - if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) - DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", - ret, this, state->fOffset, - state->fIOVMAddr, state->fLength); - if (kIOReturnSuccess == ret) - { - address = state->fIOVMAddr; - length = state->fLength; - } + for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) { + mach_vm_address_t newAddr; + mach_vm_size_t newLen; + + getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); + if (addr + length != newAddr) { + break; + } + length += newLen; + } + if (addr) { + address = (IOPhysicalAddress) addr; // Truncate address to 32bit + } + } else { + IOMDDMAWalkSegmentState _state; + IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state; - // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even - // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up + state->fOffset = offset; + state->fLength = _length - offset; + state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote); - if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) - { - if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) - { - addr64_t origAddr = address; - IOByteCount origLen = length; + ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); - address = mapper->mapToPhysicalAddress(origAddr); - length = page_size - (address & (page_size - 1)); - while ((length < origLen) - && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) - length += page_size; - if (length > origLen) - length = origLen; - } + if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) { + DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", + ret, this, state->fOffset, + state->fIOVMAddr, state->fLength); + } + if (kIOReturnSuccess == ret) { + address = state->fIOVMAddr; + length = state->fLength; + } + + // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even + // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up + + if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) { + if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) { + addr64_t origAddr = address; + IOByteCount origLen = length; + + address = mapper->mapToPhysicalAddress(origAddr); + length = page_size - (address & (page_size - 1)); + while ((length < origLen) + && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) { + length += page_size; + } + if (length > origLen) { + length = origLen; + } + } + } } - } - if (!address) - length = 0; + if (!address) { + length = 0; + } - if (lengthOfSegment) - *lengthOfSegment = length; + if (lengthOfSegment) { + *lengthOfSegment = length; + } - return (address); + return address; } #ifndef __LP64__ @@ -2538,290 +2632,297 @@ IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *l addr64_t IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) { - addr64_t address = 0; - - if (options & _kIOMemorySourceSegment) - { - address = getSourceSegment(offset, lengthOfSegment); - } - else if (options & kIOMemoryMapperNone) - { - address = getPhysicalSegment64(offset, lengthOfSegment); - } - else - { - address = getPhysicalSegment(offset, lengthOfSegment); - } - - return (address); + addr64_t address = 0; + + if (options & _kIOMemorySourceSegment) { + address = getSourceSegment(offset, lengthOfSegment); + } else if (options & kIOMemoryMapperNone) { + address = getPhysicalSegment64(offset, lengthOfSegment); + } else { + address = getPhysicalSegment(offset, lengthOfSegment); + } + + return address; } #pragma clang diagnostic pop addr64_t IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) { - return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone)); + return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone); } IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) { - addr64_t address = 0; - IOByteCount length = 0; + addr64_t address = 0; + IOByteCount length = 0; - address = getPhysicalSegment(offset, lengthOfSegment, 0); + address = getPhysicalSegment(offset, lengthOfSegment, 0); - if (lengthOfSegment) - length = *lengthOfSegment; + if (lengthOfSegment) { + length = *lengthOfSegment; + } - if ((address + length) > 0x100000000ULL) - { - panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", + if ((address + length) > 0x100000000ULL) { + panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", address, (long) length, (getMetaClass())->getClassName()); - } + } - return ((IOPhysicalAddress) address); + return (IOPhysicalAddress) address; } addr64_t IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) { - IOPhysicalAddress phys32; - IOByteCount length; - addr64_t phys64; - IOMapper * mapper = 0; - - phys32 = getPhysicalSegment(offset, lengthOfSegment); - if (!phys32) - return 0; + IOPhysicalAddress phys32; + IOByteCount length; + addr64_t phys64; + IOMapper * mapper = 0; + + phys32 = getPhysicalSegment(offset, lengthOfSegment); + if (!phys32) { + return 0; + } - if (gIOSystemMapper) - mapper = gIOSystemMapper; + if (gIOSystemMapper) { + mapper = gIOSystemMapper; + } - if (mapper) - { - IOByteCount origLen; + if (mapper) { + IOByteCount origLen; - phys64 = mapper->mapToPhysicalAddress(phys32); - origLen = *lengthOfSegment; - length = page_size - (phys64 & (page_size - 1)); - while ((length < origLen) - && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) - length += page_size; - if (length > origLen) - length = origLen; + phys64 = mapper->mapToPhysicalAddress(phys32); + origLen = *lengthOfSegment; + length = page_size - (phys64 & (page_size - 1)); + while ((length < origLen) + && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) { + length += page_size; + } + if (length > origLen) { + length = origLen; + } - *lengthOfSegment = length; - } - else - phys64 = (addr64_t) phys32; + *lengthOfSegment = length; + } else { + phys64 = (addr64_t) phys32; + } - return phys64; + return phys64; } IOPhysicalAddress IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) { - return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0)); + return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0); } IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) { - return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); + return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment); } #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" -void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) +void * +IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) { - if (_task == kernel_task) - return (void *) getSourceSegment(offset, lengthOfSegment); - else - panic("IOGMD::getVirtualSegment deprecated"); + if (_task == kernel_task) { + return (void *) getSourceSegment(offset, lengthOfSegment); + } else { + panic("IOGMD::getVirtualSegment deprecated"); + } - return 0; + return 0; } #pragma clang diagnostic pop #endif /* !__LP64__ */ -IOReturn +IOReturn IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const { - IOMemoryDescriptor *md = const_cast(this); - DMACommandOps params; - IOReturn err; - - params = (op & ~kIOMDDMACommandOperationMask & op); - op &= kIOMDDMACommandOperationMask; - - if (kIOMDGetCharacteristics == op) { - if (dataSize < sizeof(IOMDDMACharacteristics)) - return kIOReturnUnderrun; - - IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; - data->fLength = getLength(); - data->fSGCount = 0; - data->fDirection = getDirection(); - data->fIsPrepared = true; // Assume prepared - fails safe - } - else if (kIOMDWalkSegments == op) { - if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) - return kIOReturnUnderrun; - - IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; - IOByteCount offset = (IOByteCount) data->fOffset; - - IOPhysicalLength length; - if (data->fMapped && IOMapper::gSystem) - data->fIOVMAddr = md->getPhysicalSegment(offset, &length); - else - data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); - data->fLength = length; - } - else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported; - else if (kIOMDDMAMap == op) - { - if (dataSize < sizeof(IOMDDMAMapArgs)) - return kIOReturnUnderrun; - IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - - if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); - - data->fMapContig = true; - err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); - - return (err); - } - else if (kIOMDDMAUnmap == op) - { - if (dataSize < sizeof(IOMDDMAMapArgs)) - return kIOReturnUnderrun; - IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; - - err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); - - return (kIOReturnSuccess); - } - else return kIOReturnBadArgument; - - return kIOReturnSuccess; + IOMemoryDescriptor *md = const_cast(this); + DMACommandOps params; + IOReturn err; + + params = (op & ~kIOMDDMACommandOperationMask & op); + op &= kIOMDDMACommandOperationMask; + + if (kIOMDGetCharacteristics == op) { + if (dataSize < sizeof(IOMDDMACharacteristics)) { + return kIOReturnUnderrun; + } + + IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; + data->fLength = getLength(); + data->fSGCount = 0; + data->fDirection = getDirection(); + data->fIsPrepared = true; // Assume prepared - fails safe + } else if (kIOMDWalkSegments == op) { + if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) { + return kIOReturnUnderrun; + } + + IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; + IOByteCount offset = (IOByteCount) data->fOffset; + + IOPhysicalLength length; + if (data->fMapped && IOMapper::gSystem) { + data->fIOVMAddr = md->getPhysicalSegment(offset, &length); + } else { + data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); + } + data->fLength = length; + } else if (kIOMDAddDMAMapSpec == op) { + return kIOReturnUnsupported; + } else if (kIOMDDMAMap == op) { + if (dataSize < sizeof(IOMDDMAMapArgs)) { + return kIOReturnUnderrun; + } + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + if (params) { + panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName()); + } + + data->fMapContig = true; + err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength); + + return err; + } else if (kIOMDDMAUnmap == op) { + if (dataSize < sizeof(IOMDDMAMapArgs)) { + return kIOReturnUnderrun; + } + IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData; + + err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength); + + return kIOReturnSuccess; + } else { + return kIOReturnBadArgument; + } + + return kIOReturnSuccess; } -IOReturn +IOReturn IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) + IOOptionBits * oldState ) { - IOReturn err = kIOReturnSuccess; - - vm_purgable_t control; - int state; - - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); - - if (_memRef) - { - err = super::setPurgeable(newState, oldState); - } - else - { - if (kIOMemoryThreadSafe & _flags) - LOCK; - do - { - // Find the appropriate vm_map for the given task - vm_map_t curMap; - if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) - { - err = kIOReturnNotReady; - break; - } - else if (!_task) - { - err = kIOReturnUnsupported; - break; - } - else - { - curMap = get_task_map(_task); - if (NULL == curMap) - { - err = KERN_INVALID_ARGUMENT; - break; - } - } + IOReturn err = kIOReturnSuccess; - // can only do one range - Ranges vec = _ranges; - IOOptionBits type = _flags & kIOMemoryTypeMask; - mach_vm_address_t addr; - mach_vm_size_t len; - getAddrLenForInd(addr, len, type, vec, 0); + vm_purgable_t control; + int state; - err = purgeableControlBits(newState, &control, &state); - if (kIOReturnSuccess != err) - break; - err = vm_map_purgable_control(curMap, addr, control, &state); - if (oldState) - { - if (kIOReturnSuccess == err) - { - err = purgeableStateBits(&state); - *oldState = state; + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return kIOReturnNotAttached; + } + + if (_memRef) { + err = super::setPurgeable(newState, oldState); + } else { + if (kIOMemoryThreadSafe & _flags) { + LOCK; + } + do{ + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) { + err = kIOReturnNotReady; + break; + } else if (!_task) { + err = kIOReturnUnsupported; + break; + } else { + curMap = get_task_map(_task); + if (NULL == curMap) { + err = KERN_INVALID_ARGUMENT; + break; + } + } + + // can only do one range + Ranges vec = _ranges; + IOOptionBits type = _flags & kIOMemoryTypeMask; + mach_vm_address_t addr; + mach_vm_size_t len; + getAddrLenForInd(addr, len, type, vec, 0); + + err = purgeableControlBits(newState, &control, &state); + if (kIOReturnSuccess != err) { + break; + } + err = vm_map_purgable_control(curMap, addr, control, &state); + if (oldState) { + if (kIOReturnSuccess == err) { + err = purgeableStateBits(&state); + *oldState = state; + } + } + }while (false); + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; } - } } - while (false); - if (kIOMemoryThreadSafe & _flags) - UNLOCK; - } - return (err); + return err; } -IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) +IOReturn +IOMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) { - IOReturn err = kIOReturnNotReady; + IOReturn err = kIOReturnNotReady; - if (kIOMemoryThreadSafe & _flags) LOCK; - if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState); - if (kIOMemoryThreadSafe & _flags) UNLOCK; + if (kIOMemoryThreadSafe & _flags) { + LOCK; + } + if (_memRef) { + err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState); + } + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; + } - return (err); + return err; } - -IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount ) + +IOReturn +IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount, + IOByteCount * dirtyPageCount ) { - IOReturn err = kIOReturnNotReady; + IOReturn err = kIOReturnNotReady; - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return kIOReturnNotAttached; + } - if (kIOMemoryThreadSafe & _flags) LOCK; - if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); - else - { - IOMultiMemoryDescriptor * mmd; - IOSubMemoryDescriptor * smd; - if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) - { - err = smd->getPageCounts(residentPageCount, dirtyPageCount); + if (kIOMemoryThreadSafe & _flags) { + LOCK; } - else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) - { - err = mmd->getPageCounts(residentPageCount, dirtyPageCount); + if (_memRef) { + err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount); + } else { + IOMultiMemoryDescriptor * mmd; + IOSubMemoryDescriptor * smd; + if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) { + err = smd->getPageCounts(residentPageCount, dirtyPageCount); + } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) { + err = mmd->getPageCounts(residentPageCount, dirtyPageCount); + } + } + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; } - } - if (kIOMemoryThreadSafe & _flags) UNLOCK; - return (err); + return err; } - + #if defined(__arm__) || defined(__arm64__) extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res); @@ -2831,129 +2932,137 @@ extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); #endif /* defined(__arm__) || defined(__arm64__) */ -static void SetEncryptOp(addr64_t pa, unsigned int count) +static void +SetEncryptOp(addr64_t pa, unsigned int count) { - ppnum_t page, end; - - page = atop_64(round_page_64(pa)); - end = atop_64(trunc_page_64(pa + count)); - for (; page < end; page++) - { - pmap_clear_noencrypt(page); - } + ppnum_t page, end; + + page = atop_64(round_page_64(pa)); + end = atop_64(trunc_page_64(pa + count)); + for (; page < end; page++) { + pmap_clear_noencrypt(page); + } } -static void ClearEncryptOp(addr64_t pa, unsigned int count) +static void +ClearEncryptOp(addr64_t pa, unsigned int count) { - ppnum_t page, end; - - page = atop_64(round_page_64(pa)); - end = atop_64(trunc_page_64(pa + count)); - for (; page < end; page++) - { - pmap_set_noencrypt(page); - } + ppnum_t page, end; + + page = atop_64(round_page_64(pa)); + end = atop_64(trunc_page_64(pa + count)); + for (; page < end; page++) { + pmap_set_noencrypt(page); + } } -IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, - IOByteCount offset, IOByteCount length ) +IOReturn +IOMemoryDescriptor::performOperation( IOOptionBits options, + IOByteCount offset, IOByteCount length ) { - IOByteCount remaining; - unsigned int res; - void (*func)(addr64_t pa, unsigned int count) = 0; + IOByteCount remaining; + unsigned int res; + void (*func)(addr64_t pa, unsigned int count) = 0; #if defined(__arm__) || defined(__arm64__) - void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0; + void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0; #endif - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return kIOReturnNotAttached; + } - switch (options) - { - case kIOMemoryIncoherentIOFlush: + switch (options) { + case kIOMemoryIncoherentIOFlush: #if defined(__arm__) || defined(__arm64__) - func_ext = &dcache_incoherent_io_flush64; + func_ext = &dcache_incoherent_io_flush64; #if __ARM_COHERENT_IO__ - func_ext(0, 0, 0, &res); - return kIOReturnSuccess; + func_ext(0, 0, 0, &res); + return kIOReturnSuccess; #else /* __ARM_COHERENT_IO__ */ - break; + break; #endif /* __ARM_COHERENT_IO__ */ #else /* defined(__arm__) || defined(__arm64__) */ - func = &dcache_incoherent_io_flush64; - break; + func = &dcache_incoherent_io_flush64; + break; #endif /* defined(__arm__) || defined(__arm64__) */ - case kIOMemoryIncoherentIOStore: + case kIOMemoryIncoherentIOStore: #if defined(__arm__) || defined(__arm64__) - func_ext = &dcache_incoherent_io_store64; + func_ext = &dcache_incoherent_io_store64; #if __ARM_COHERENT_IO__ - func_ext(0, 0, 0, &res); - return kIOReturnSuccess; + func_ext(0, 0, 0, &res); + return kIOReturnSuccess; #else /* __ARM_COHERENT_IO__ */ - break; + break; #endif /* __ARM_COHERENT_IO__ */ #else /* defined(__arm__) || defined(__arm64__) */ - func = &dcache_incoherent_io_store64; - break; + func = &dcache_incoherent_io_store64; + break; #endif /* defined(__arm__) || defined(__arm64__) */ - case kIOMemorySetEncrypted: - func = &SetEncryptOp; - break; - case kIOMemoryClearEncrypted: - func = &ClearEncryptOp; - break; - } + case kIOMemorySetEncrypted: + func = &SetEncryptOp; + break; + case kIOMemoryClearEncrypted: + func = &ClearEncryptOp; + break; + } #if defined(__arm__) || defined(__arm64__) - if ((func == 0) && (func_ext == 0)) - return (kIOReturnUnsupported); + if ((func == 0) && (func_ext == 0)) { + return kIOReturnUnsupported; + } #else /* defined(__arm__) || defined(__arm64__) */ - if (!func) - return (kIOReturnUnsupported); + if (!func) { + return kIOReturnUnsupported; + } #endif /* defined(__arm__) || defined(__arm64__) */ - if (kIOMemoryThreadSafe & _flags) - LOCK; + if (kIOMemoryThreadSafe & _flags) { + LOCK; + } - res = 0x0UL; - remaining = length = min(length, getLength() - offset); - while (remaining) - // (process another target segment?) - { - addr64_t dstAddr64; - IOByteCount dstLen; + res = 0x0UL; + remaining = length = min(length, getLength() - offset); + while (remaining) { + // (process another target segment?) + addr64_t dstAddr64; + IOByteCount dstLen; - dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); - if (!dstAddr64) - break; + dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); + if (!dstAddr64) { + break; + } - // Clip segment length to remaining - if (dstLen > remaining) - dstLen = remaining; + // Clip segment length to remaining + if (dstLen > remaining) { + dstLen = remaining; + } #if defined(__arm__) || defined(__arm64__) - if (func) - (*func)(dstAddr64, dstLen); - if (func_ext) { - (*func_ext)(dstAddr64, dstLen, remaining, &res); - if (res != 0x0UL) { - remaining = 0; - break; - } - } + if (func) { + (*func)(dstAddr64, dstLen); + } + if (func_ext) { + (*func_ext)(dstAddr64, dstLen, remaining, &res); + if (res != 0x0UL) { + remaining = 0; + break; + } + } #else /* defined(__arm__) || defined(__arm64__) */ - (*func)(dstAddr64, dstLen); + (*func)(dstAddr64, dstLen); #endif /* defined(__arm__) || defined(__arm64__) */ - offset += dstLen; - remaining -= dstLen; - } + offset += dstLen; + remaining -= dstLen; + } - if (kIOMemoryThreadSafe & _flags) - UNLOCK; + if (kIOMemoryThreadSafe & _flags) { + UNLOCK; + } - return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); + return remaining ? kIOReturnUnderrun : kIOReturnSuccess; } /* @@ -2962,12 +3071,12 @@ IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, #if defined(__i386__) || defined(__x86_64__) -#define io_kernel_static_start vm_kernel_stext -#define io_kernel_static_end vm_kernel_etext +#define io_kernel_static_start vm_kernel_stext +#define io_kernel_static_end vm_kernel_etext #elif defined(__arm__) || defined(__arm64__) -extern vm_offset_t static_memory_end; +extern vm_offset_t static_memory_end; #if defined(__arm64__) #define io_kernel_static_start vm_kext_base @@ -2975,7 +3084,7 @@ extern vm_offset_t static_memory_end; #define io_kernel_static_start vm_kernel_stext #endif /* defined(__arm64__) */ -#define io_kernel_static_end static_memory_end +#define io_kernel_static_end static_memory_end #else #error io_kernel_static_end is undefined for this architecture @@ -2983,494 +3092,536 @@ extern vm_offset_t static_memory_end; static kern_return_t io_get_kernel_static_upl( - vm_map_t /* map */, - uintptr_t offset, - upl_size_t *upl_size, - upl_t *upl, - upl_page_info_array_t page_list, - unsigned int *count, - ppnum_t *highest_page) + vm_map_t /* map */, + uintptr_t offset, + upl_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + ppnum_t *highest_page) { - unsigned int pageCount, page; - ppnum_t phys; - ppnum_t highestPage = 0; - - pageCount = atop_32(*upl_size); - if (pageCount > *count) - pageCount = *count; - - *upl = NULL; - - for (page = 0; page < pageCount; page++) - { - phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); - if (!phys) - break; - page_list[page].phys_addr = phys; - page_list[page].free_when_done = 0; - page_list[page].absent = 0; - page_list[page].dirty = 0; - page_list[page].precious = 0; - page_list[page].device = 0; - if (phys > highestPage) - highestPage = phys; - } - - *highest_page = highestPage; - - return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); + unsigned int pageCount, page; + ppnum_t phys; + ppnum_t highestPage = 0; + + pageCount = atop_32(*upl_size); + if (pageCount > *count) { + pageCount = *count; + } + + *upl = NULL; + + for (page = 0; page < pageCount; page++) { + phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); + if (!phys) { + break; + } + page_list[page].phys_addr = phys; + page_list[page].free_when_done = 0; + page_list[page].absent = 0; + page_list[page].dirty = 0; + page_list[page].precious = 0; + page_list[page].device = 0; + if (phys > highestPage) { + highestPage = phys; + } + } + + *highest_page = highestPage; + + return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError; } -IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) +IOReturn +IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) { - IOOptionBits type = _flags & kIOMemoryTypeMask; - IOReturn error = kIOReturnSuccess; - ioGMDData *dataP; - upl_page_info_array_t pageInfo; - ppnum_t mapBase; - vm_tag_t tag = VM_KERN_MEMORY_NONE; - - assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); - - if ((kIODirectionOutIn & forDirection) == kIODirectionNone) - forDirection = (IODirection) (forDirection | getDirection()); - - dataP = getDataP(_memoryEntries); - upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation - switch (kIODirectionOutIn & forDirection) - { + IOOptionBits type = _flags & kIOMemoryTypeMask; + IOReturn error = kIOReturnSuccess; + ioGMDData *dataP; + upl_page_info_array_t pageInfo; + ppnum_t mapBase; + vm_tag_t tag = VM_KERN_MEMORY_NONE; + + assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); + + if ((kIODirectionOutIn & forDirection) == kIODirectionNone) { + forDirection = (IODirection) (forDirection | getDirection()); + } + + dataP = getDataP(_memoryEntries); + upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation + switch (kIODirectionOutIn & forDirection) { case kIODirectionOut: - // Pages do not need to be marked as dirty on commit - uplFlags = UPL_COPYOUT_FROM; - dataP->fDMAAccess = kIODMAMapReadAccess; - break; + // Pages do not need to be marked as dirty on commit + uplFlags = UPL_COPYOUT_FROM; + dataP->fDMAAccess = kIODMAMapReadAccess; + break; case kIODirectionIn: - dataP->fDMAAccess = kIODMAMapWriteAccess; - uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM - break; + dataP->fDMAAccess = kIODMAMapWriteAccess; + uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM + break; default: - dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess; - uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM - break; - } - - if (_wireCount) - { - if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) - { - OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); - error = kIOReturnNotWritable; - } - } - else - { - IOMapper *mapper; - - mapper = dataP->fMapper; - dataP->fMappedBaseValid = dataP->fMappedBase = 0; - - uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; - tag = _kernelTag; - if (VM_KERN_MEMORY_NONE == tag) tag = IOMemoryTag(kernel_map); - - if (kIODirectionPrepareToPhys32 & forDirection) - { - if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR; - if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32; - } - if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT; - if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO; - if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY; - - mapBase = 0; - - // Note that appendBytes(NULL) zeros the data up to the desired length - // and the length parameter is an unsigned int - size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); - if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory); - if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory); - dataP = 0; - - // Find the appropriate vm_map for the given task - vm_map_t curMap; - if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0; - else curMap = get_task_map(_task); - - // Iterate over the vector of virtual ranges - Ranges vec = _ranges; - unsigned int pageIndex = 0; - IOByteCount mdOffset = 0; - ppnum_t highestPage = 0; - - IOMemoryEntry * memRefEntry = 0; - if (_memRef) memRefEntry = &_memRef->entries[0]; - - for (UInt range = 0; range < _rangesCount; range++) { - ioPLBlock iopl; - mach_vm_address_t startPage, startPageOffset; - mach_vm_size_t numBytes; - ppnum_t highPage = 0; - - // Get the startPage address and length of vec[range] - getAddrLenForInd(startPage, numBytes, type, vec, range); - startPageOffset = startPage & PAGE_MASK; - iopl.fPageOffset = startPageOffset; - numBytes += startPageOffset; - startPage = trunc_page_64(startPage); - - if (mapper) - iopl.fMappedPage = mapBase + pageIndex; - else - iopl.fMappedPage = 0; - - // Iterate over the current range, creating UPLs - while (numBytes) { - vm_address_t kernelStart = (vm_address_t) startPage; - vm_map_t theMap; - if (curMap) theMap = curMap; - else if (_memRef) - { - theMap = NULL; - } - else - { - assert(_task == kernel_task); - theMap = IOPageableMapForAddress(kernelStart); - } - - // ioplFlags is an in/out parameter - upl_control_flags_t ioplFlags = uplFlags; - dataP = getDataP(_memoryEntries); - pageInfo = getPageList(dataP); - upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; - - mach_vm_size_t _ioplSize = round_page(numBytes); - upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; - unsigned int numPageInfo = atop_32(ioplSize); - - if ((theMap == kernel_map) - && (kernelStart >= io_kernel_static_start) - && (kernelStart < io_kernel_static_end)) { - error = io_get_kernel_static_upl(theMap, - kernelStart, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &highPage); - } - else if (_memRef) { - memory_object_offset_t entryOffset; - - entryOffset = mdOffset; - entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); - if (entryOffset >= memRefEntry->size) { - memRefEntry++; - if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry"); - entryOffset = 0; - } - if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset); - error = memory_object_iopl_request(memRefEntry->entry, - entryOffset, - &ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags, - tag); - } - else { - assert(theMap); - error = vm_map_create_upl(theMap, - startPage, - (upl_size_t*)&ioplSize, - &iopl.fIOPL, - baseInfo, - &numPageInfo, - &ioplFlags, - tag); - } - - if (error != KERN_SUCCESS) goto abortExit; - - assert(ioplSize); - - if (iopl.fIOPL) - highPage = upl_get_highest_page(iopl.fIOPL); - if (highPage > highestPage) - highestPage = highPage; - - if (baseInfo->device) { - numPageInfo = 1; - iopl.fFlags = kIOPLOnDevice; - } - else { - iopl.fFlags = 0; - } - - iopl.fIOMDOffset = mdOffset; - iopl.fPageInfo = pageIndex; - if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) dataP->fDiscontig = true; - - if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { - // Clean up partial created and unsaved iopl - if (iopl.fIOPL) { - upl_abort(iopl.fIOPL, 0); - upl_deallocate(iopl.fIOPL); - } - goto abortExit; - } - dataP = 0; - - // Check for a multiple iopl's in one virtual range - pageIndex += numPageInfo; - mdOffset -= iopl.fPageOffset; - if (ioplSize < numBytes) { - numBytes -= ioplSize; - startPage += ioplSize; - mdOffset += ioplSize; - iopl.fPageOffset = 0; - if (mapper) iopl.fMappedPage = mapBase + pageIndex; - } - else { - mdOffset += numBytes; - break; - } - } - } - - _highestPage = highestPage; - - if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly; - } + dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess; + uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM + break; + } + + if (_wireCount) { + if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) { + OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this)); + error = kIOReturnNotWritable; + } + } else { + IOMapper *mapper; + + mapper = dataP->fMapper; + dataP->fMappedBaseValid = dataP->fMappedBase = 0; + + uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; + tag = _kernelTag; + if (VM_KERN_MEMORY_NONE == tag) { + tag = IOMemoryTag(kernel_map); + } + + if (kIODirectionPrepareToPhys32 & forDirection) { + if (!mapper) { + uplFlags |= UPL_NEED_32BIT_ADDR; + } + if (dataP->fDMAMapNumAddressBits > 32) { + dataP->fDMAMapNumAddressBits = 32; + } + } + if (kIODirectionPrepareNoFault & forDirection) { + uplFlags |= UPL_REQUEST_NO_FAULT; + } + if (kIODirectionPrepareNoZeroFill & forDirection) { + uplFlags |= UPL_NOZEROFILLIO; + } + if (kIODirectionPrepareNonCoherent & forDirection) { + uplFlags |= UPL_REQUEST_FORCE_COHERENCY; + } + + mapBase = 0; + + // Note that appendBytes(NULL) zeros the data up to the desired length + // and the length parameter is an unsigned int + size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t); + if (uplPageSize > ((unsigned int)uplPageSize)) { + return kIOReturnNoMemory; + } + if (!_memoryEntries->appendBytes(0, uplPageSize)) { + return kIOReturnNoMemory; + } + dataP = 0; + + // Find the appropriate vm_map for the given task + vm_map_t curMap; + if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) { + curMap = 0; + } else { + curMap = get_task_map(_task); + } + + // Iterate over the vector of virtual ranges + Ranges vec = _ranges; + unsigned int pageIndex = 0; + IOByteCount mdOffset = 0; + ppnum_t highestPage = 0; + + IOMemoryEntry * memRefEntry = 0; + if (_memRef) { + memRefEntry = &_memRef->entries[0]; + } + + for (UInt range = 0; range < _rangesCount; range++) { + ioPLBlock iopl; + mach_vm_address_t startPage, startPageOffset; + mach_vm_size_t numBytes; + ppnum_t highPage = 0; + + // Get the startPage address and length of vec[range] + getAddrLenForInd(startPage, numBytes, type, vec, range); + startPageOffset = startPage & PAGE_MASK; + iopl.fPageOffset = startPageOffset; + numBytes += startPageOffset; + startPage = trunc_page_64(startPage); + + if (mapper) { + iopl.fMappedPage = mapBase + pageIndex; + } else { + iopl.fMappedPage = 0; + } + + // Iterate over the current range, creating UPLs + while (numBytes) { + vm_address_t kernelStart = (vm_address_t) startPage; + vm_map_t theMap; + if (curMap) { + theMap = curMap; + } else if (_memRef) { + theMap = NULL; + } else { + assert(_task == kernel_task); + theMap = IOPageableMapForAddress(kernelStart); + } + + // ioplFlags is an in/out parameter + upl_control_flags_t ioplFlags = uplFlags; + dataP = getDataP(_memoryEntries); + pageInfo = getPageList(dataP); + upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; + + mach_vm_size_t _ioplSize = round_page(numBytes); + upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES; + unsigned int numPageInfo = atop_32(ioplSize); + + if ((theMap == kernel_map) + && (kernelStart >= io_kernel_static_start) + && (kernelStart < io_kernel_static_end)) { + error = io_get_kernel_static_upl(theMap, + kernelStart, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &highPage); + } else if (_memRef) { + memory_object_offset_t entryOffset; + + entryOffset = mdOffset; + entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset); + if (entryOffset >= memRefEntry->size) { + memRefEntry++; + if (memRefEntry >= &_memRef->entries[_memRef->count]) { + panic("memRefEntry"); + } + entryOffset = 0; + } + if (ioplSize > (memRefEntry->size - entryOffset)) { + ioplSize = (memRefEntry->size - entryOffset); + } + error = memory_object_iopl_request(memRefEntry->entry, + entryOffset, + &ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags, + tag); + } else { + assert(theMap); + error = vm_map_create_upl(theMap, + startPage, + (upl_size_t*)&ioplSize, + &iopl.fIOPL, + baseInfo, + &numPageInfo, + &ioplFlags, + tag); + } + + if (error != KERN_SUCCESS) { + goto abortExit; + } + + assert(ioplSize); + + if (iopl.fIOPL) { + highPage = upl_get_highest_page(iopl.fIOPL); + } + if (highPage > highestPage) { + highestPage = highPage; + } + + if (baseInfo->device) { + numPageInfo = 1; + iopl.fFlags = kIOPLOnDevice; + } else { + iopl.fFlags = 0; + } + + iopl.fIOMDOffset = mdOffset; + iopl.fPageInfo = pageIndex; + if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) { + dataP->fDiscontig = true; + } + + if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { + // Clean up partial created and unsaved iopl + if (iopl.fIOPL) { + upl_abort(iopl.fIOPL, 0); + upl_deallocate(iopl.fIOPL); + } + goto abortExit; + } + dataP = 0; + + // Check for a multiple iopl's in one virtual range + pageIndex += numPageInfo; + mdOffset -= iopl.fPageOffset; + if (ioplSize < numBytes) { + numBytes -= ioplSize; + startPage += ioplSize; + mdOffset += ioplSize; + iopl.fPageOffset = 0; + if (mapper) { + iopl.fMappedPage = mapBase + pageIndex; + } + } else { + mdOffset += numBytes; + break; + } + } + } + + _highestPage = highestPage; + + if (UPL_COPYOUT_FROM & uplFlags) { + _flags |= kIOMemoryPreparedReadOnly; + } + } #if IOTRACKING - if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) - { - dataP = getDataP(_memoryEntries); - if (!dataP->fWireTracking.link.next) - { - IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag); + if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) { + dataP = getDataP(_memoryEntries); + if (!dataP->fWireTracking.link.next) { + IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag); + } } - } #endif /* IOTRACKING */ - return (error); + return error; abortExit: - { - dataP = getDataP(_memoryEntries); - UInt done = getNumIOPL(_memoryEntries, dataP); - ioPLBlock *ioplList = getIOPLList(dataP); - - for (UInt range = 0; range < done; range++) { - if (ioplList[range].fIOPL) { - upl_abort(ioplList[range].fIOPL, 0); - upl_deallocate(ioplList[range].fIOPL); - } + dataP = getDataP(_memoryEntries); + UInt done = getNumIOPL(_memoryEntries, dataP); + ioPLBlock *ioplList = getIOPLList(dataP); + + for (UInt range = 0; range < done; range++) { + if (ioplList[range].fIOPL) { + upl_abort(ioplList[range].fIOPL, 0); + upl_deallocate(ioplList[range].fIOPL); + } + } + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() } - (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() - } - if (error == KERN_FAILURE) - error = kIOReturnCannotWire; - else if (error == KERN_MEMORY_ERROR) - error = kIOReturnNoResources; + if (error == KERN_FAILURE) { + error = kIOReturnCannotWire; + } else if (error == KERN_MEMORY_ERROR) { + error = kIOReturnNoResources; + } - return error; + return error; } -bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) +bool +IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper) { - ioGMDData * dataP; - unsigned dataSize = size; - - if (!_memoryEntries) { - _memoryEntries = OSData::withCapacity(dataSize); - if (!_memoryEntries) - return false; - } - else if (!_memoryEntries->initWithCapacity(dataSize)) - return false; + ioGMDData * dataP; + unsigned dataSize = size; + + if (!_memoryEntries) { + _memoryEntries = OSData::withCapacity(dataSize); + if (!_memoryEntries) { + return false; + } + } else if (!_memoryEntries->initWithCapacity(dataSize)) { + return false; + } + + _memoryEntries->appendBytes(0, computeDataSize(0, 0)); + dataP = getDataP(_memoryEntries); - _memoryEntries->appendBytes(0, computeDataSize(0, 0)); - dataP = getDataP(_memoryEntries); - - if (mapper == kIOMapperWaitSystem) { - IOMapper::checkForSystemMapper(); - mapper = IOMapper::gSystem; - } - dataP->fMapper = mapper; - dataP->fPageCnt = 0; - dataP->fMappedBase = 0; - dataP->fDMAMapNumAddressBits = 64; - dataP->fDMAMapAlignment = 0; - dataP->fPreparationID = kIOPreparationIDUnprepared; - dataP->fDiscontig = false; - dataP->fCompletionError = false; - dataP->fMappedBaseValid = false; - - return (true); + if (mapper == kIOMapperWaitSystem) { + IOMapper::checkForSystemMapper(); + mapper = IOMapper::gSystem; + } + dataP->fMapper = mapper; + dataP->fPageCnt = 0; + dataP->fMappedBase = 0; + dataP->fDMAMapNumAddressBits = 64; + dataP->fDMAMapAlignment = 0; + dataP->fPreparationID = kIOPreparationIDUnprepared; + dataP->fDiscontig = false; + dataP->fCompletionError = false; + dataP->fMappedBaseValid = false; + + return true; } -IOReturn IOMemoryDescriptor::dmaMap( - IOMapper * mapper, - IODMACommand * command, - const IODMAMapSpecification * mapSpec, - uint64_t offset, - uint64_t length, - uint64_t * mapAddress, - uint64_t * mapLength) +IOReturn +IOMemoryDescriptor::dmaMap( + IOMapper * mapper, + IODMACommand * command, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * mapAddress, + uint64_t * mapLength) { - IOReturn err; - uint32_t mapOptions; + IOReturn err; + uint32_t mapOptions; - mapOptions = 0; - mapOptions |= kIODMAMapReadAccess; - if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess; + mapOptions = 0; + mapOptions |= kIODMAMapReadAccess; + if (!(kIOMemoryPreparedReadOnly & _flags)) { + mapOptions |= kIODMAMapWriteAccess; + } - err = mapper->iovmMapMemory(this, offset, length, mapOptions, - mapSpec, command, NULL, mapAddress, mapLength); + err = mapper->iovmMapMemory(this, offset, length, mapOptions, + mapSpec, command, NULL, mapAddress, mapLength); - if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength); + if (kIOReturnSuccess == err) { + dmaMapRecord(mapper, command, *mapLength); + } - return (err); + return err; } -void IOMemoryDescriptor::dmaMapRecord( - IOMapper * mapper, - IODMACommand * command, - uint64_t mapLength) +void +IOMemoryDescriptor::dmaMapRecord( + IOMapper * mapper, + IODMACommand * command, + uint64_t mapLength) { - kern_allocation_name_t alloc; - int16_t prior; - - if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) - { - kern_allocation_update_size(mapper->fAllocName, mapLength); - } - - if (!command) return; - prior = OSAddAtomic16(1, &_dmaReferences); - if (!prior) - { - if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) - { - _mapName = alloc; - mapLength = _length; - kern_allocation_update_subtotal(alloc, _kernelTag, mapLength); + kern_allocation_name_t alloc; + int16_t prior; + + if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) { + kern_allocation_update_size(mapper->fAllocName, mapLength); + } + + if (!command) { + return; + } + prior = OSAddAtomic16(1, &_dmaReferences); + if (!prior) { + if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) { + _mapName = alloc; + mapLength = _length; + kern_allocation_update_subtotal(alloc, _kernelTag, mapLength); + } else { + _mapName = NULL; + } } - else _mapName = NULL; - } } -IOReturn IOMemoryDescriptor::dmaUnmap( - IOMapper * mapper, - IODMACommand * command, - uint64_t offset, - uint64_t mapAddress, - uint64_t mapLength) +IOReturn +IOMemoryDescriptor::dmaUnmap( + IOMapper * mapper, + IODMACommand * command, + uint64_t offset, + uint64_t mapAddress, + uint64_t mapLength) { - IOReturn ret; - kern_allocation_name_t alloc; - kern_allocation_name_t mapName; - int16_t prior; - - mapName = 0; - prior = 0; - if (command) - { - mapName = _mapName; - if (_dmaReferences) prior = OSAddAtomic16(-1, &_dmaReferences); - else panic("_dmaReferences underflow"); - } - - if (!mapLength) return (kIOReturnSuccess); - - ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength); - - if ((alloc = mapper->fAllocName)) - { - kern_allocation_update_size(alloc, -mapLength); - if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) - { - mapLength = _length; - kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength); - } - } - - return (ret); + IOReturn ret; + kern_allocation_name_t alloc; + kern_allocation_name_t mapName; + int16_t prior; + + mapName = 0; + prior = 0; + if (command) { + mapName = _mapName; + if (_dmaReferences) { + prior = OSAddAtomic16(-1, &_dmaReferences); + } else { + panic("_dmaReferences underflow"); + } + } + + if (!mapLength) { + return kIOReturnSuccess; + } + + ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength); + + if ((alloc = mapper->fAllocName)) { + kern_allocation_update_size(alloc, -mapLength); + if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) { + mapLength = _length; + kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength); + } + } + + return ret; } -IOReturn IOGeneralMemoryDescriptor::dmaMap( - IOMapper * mapper, - IODMACommand * command, - const IODMAMapSpecification * mapSpec, - uint64_t offset, - uint64_t length, - uint64_t * mapAddress, - uint64_t * mapLength) +IOReturn +IOGeneralMemoryDescriptor::dmaMap( + IOMapper * mapper, + IODMACommand * command, + const IODMAMapSpecification * mapSpec, + uint64_t offset, + uint64_t length, + uint64_t * mapAddress, + uint64_t * mapLength) { - IOReturn err = kIOReturnSuccess; - ioGMDData * dataP; - IOOptionBits type = _flags & kIOMemoryTypeMask; - - *mapAddress = 0; - if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess); - if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); - - if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) - || offset || (length != _length)) - { - err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); - } - else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) - { - const ioPLBlock * ioplList = getIOPLList(dataP); - upl_page_info_t * pageList; - uint32_t mapOptions = 0; - - IODMAMapSpecification mapSpec; - bzero(&mapSpec, sizeof(mapSpec)); - mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; - mapSpec.alignment = dataP->fDMAMapAlignment; - - // For external UPLs the fPageInfo field points directly to - // the upl's upl_page_info_t array. - if (ioplList->fFlags & kIOPLExternUPL) - { - pageList = (upl_page_info_t *) ioplList->fPageInfo; - mapOptions |= kIODMAMapPagingPath; - } - else pageList = getPageList(dataP); + IOReturn err = kIOReturnSuccess; + ioGMDData * dataP; + IOOptionBits type = _flags & kIOMemoryTypeMask; - if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) - { - mapOptions |= kIODMAMapPageListFullyOccupied; + *mapAddress = 0; + if (kIOMemoryHostOnly & _flags) { + return kIOReturnSuccess; + } + if (kIOMemoryRemote & _flags) { + return kIOReturnNotAttached; } - assert(dataP->fDMAAccess); - mapOptions |= dataP->fDMAAccess; + if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64) + || offset || (length != _length)) { + err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength); + } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) { + const ioPLBlock * ioplList = getIOPLList(dataP); + upl_page_info_t * pageList; + uint32_t mapOptions = 0; + + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits; + mapSpec.alignment = dataP->fDMAMapAlignment; + + // For external UPLs the fPageInfo field points directly to + // the upl's upl_page_info_t array. + if (ioplList->fFlags & kIOPLExternUPL) { + pageList = (upl_page_info_t *) ioplList->fPageInfo; + mapOptions |= kIODMAMapPagingPath; + } else { + pageList = getPageList(dataP); + } - // Check for direct device non-paged memory - if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous; + if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) { + mapOptions |= kIODMAMapPageListFullyOccupied; + } - IODMAMapPageList dmaPageList = - { - .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask), - .pageListCount = _pages, - .pageList = &pageList[0] - }; - err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, - command, &dmaPageList, mapAddress, mapLength); + assert(dataP->fDMAAccess); + mapOptions |= dataP->fDMAAccess; - if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength); - } + // Check for direct device non-paged memory + if (ioplList->fFlags & kIOPLOnDevice) { + mapOptions |= kIODMAMapPhysicallyContiguous; + } - return (err); + IODMAMapPageList dmaPageList = + { + .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask), + .pageListCount = _pages, + .pageList = &pageList[0] + }; + err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec, + command, &dmaPageList, mapAddress, mapLength); + + if (kIOReturnSuccess == err) { + dmaMapRecord(mapper, command, *mapLength); + } + } + + return err; } /* @@ -3483,38 +3634,42 @@ IOReturn IOGeneralMemoryDescriptor::dmaMap( * called for non-pageable memory. */ -IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) +IOReturn +IOGeneralMemoryDescriptor::prepare(IODirection forDirection) { - IOReturn error = kIOReturnSuccess; - IOOptionBits type = _flags & kIOMemoryTypeMask; + IOReturn error = kIOReturnSuccess; + IOOptionBits type = _flags & kIOMemoryTypeMask; - if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) - return kIOReturnSuccess; + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + return kIOReturnSuccess; + } - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return kIOReturnNotAttached; + } - if (_prepareLock) IOLockLock(_prepareLock); + if (_prepareLock) { + IOLockLock(_prepareLock); + } - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) - { - error = wireVirtual(forDirection); - } + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { + error = wireVirtual(forDirection); + } - if (kIOReturnSuccess == error) - { - if (1 == ++_wireCount) - { - if (kIOMemoryClearEncrypt & _flags) - { - performOperation(kIOMemoryClearEncrypted, 0, _length); - } - } - } + if (kIOReturnSuccess == error) { + if (1 == ++_wireCount) { + if (kIOMemoryClearEncrypt & _flags) { + performOperation(kIOMemoryClearEncrypted, 0, _length); + } + } + } - if (_prepareLock) IOLockUnlock(_prepareLock); + if (_prepareLock) { + IOLockUnlock(_prepareLock); + } - return error; + return error; } /* @@ -3526,271 +3681,284 @@ IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) * before and after an I/O transfer involving pageable memory. */ -IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection) +IOReturn +IOGeneralMemoryDescriptor::complete(IODirection forDirection) { - IOOptionBits type = _flags & kIOMemoryTypeMask; - ioGMDData * dataP; + IOOptionBits type = _flags & kIOMemoryTypeMask; + ioGMDData * dataP; - if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) - return kIOReturnSuccess; + if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { + return kIOReturnSuccess; + } + + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return kIOReturnNotAttached; + } + + if (_prepareLock) { + IOLockLock(_prepareLock); + } + do{ + assert(_wireCount); + if (!_wireCount) { + break; + } + dataP = getDataP(_memoryEntries); + if (!dataP) { + break; + } + + if (kIODirectionCompleteWithError & forDirection) { + dataP->fCompletionError = true; + } + + if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) { + performOperation(kIOMemorySetEncrypted, 0, _length); + } - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached); - - if (_prepareLock) IOLockLock(_prepareLock); - do - { - assert(_wireCount); - if (!_wireCount) break; - dataP = getDataP(_memoryEntries); - if (!dataP) break; - - if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true; - - if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) - { - performOperation(kIOMemorySetEncrypted, 0, _length); - } - - _wireCount--; - if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) - { - ioPLBlock *ioplList = getIOPLList(dataP); - UInt ind, count = getNumIOPL(_memoryEntries, dataP); - - if (_wireCount) - { - // kIODirectionCompleteWithDataValid & forDirection - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) - { - vm_tag_t tag; - tag = getVMTag(kernel_map); - for (ind = 0; ind < count; ind++) - { - if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL, tag); - } - } - } - else - { - if (_dmaReferences) panic("complete() while dma active"); - - if (dataP->fMappedBaseValid) { - dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); - dataP->fMappedBaseValid = dataP->fMappedBase = 0; - } + _wireCount--; + if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) { + ioPLBlock *ioplList = getIOPLList(dataP); + UInt ind, count = getNumIOPL(_memoryEntries, dataP); + + if (_wireCount) { + // kIODirectionCompleteWithDataValid & forDirection + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { + vm_tag_t tag; + tag = getVMTag(kernel_map); + for (ind = 0; ind < count; ind++) { + if (ioplList[ind].fIOPL) { + iopl_valid_data(ioplList[ind].fIOPL, tag); + } + } + } + } else { + if (_dmaReferences) { + panic("complete() while dma active"); + } + + if (dataP->fMappedBaseValid) { + dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength); + dataP->fMappedBaseValid = dataP->fMappedBase = 0; + } #if IOTRACKING - if (dataP->fWireTracking.link.next) IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages)); + if (dataP->fWireTracking.link.next) { + IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages)); + } #endif /* IOTRACKING */ - // Only complete iopls that we created which are for TypeVirtual - if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) - { - for (ind = 0; ind < count; ind++) - if (ioplList[ind].fIOPL) { - if (dataP->fCompletionError) - upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); - else - upl_commit(ioplList[ind].fIOPL, 0, 0); - upl_deallocate(ioplList[ind].fIOPL); - } - } else if (kIOMemoryTypeUPL == type) { - upl_set_referenced(ioplList[0].fIOPL, false); - } - - (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() - - dataP->fPreparationID = kIOPreparationIDUnprepared; - _flags &= ~kIOMemoryPreparedReadOnly; - } - } - } - while (false); - - if (_prepareLock) IOLockUnlock(_prepareLock); - - return kIOReturnSuccess; + // Only complete iopls that we created which are for TypeVirtual + if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { + for (ind = 0; ind < count; ind++) { + if (ioplList[ind].fIOPL) { + if (dataP->fCompletionError) { + upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/); + } else { + upl_commit(ioplList[ind].fIOPL, 0, 0); + } + upl_deallocate(ioplList[ind].fIOPL); + } + } + } else if (kIOMemoryTypeUPL == type) { + upl_set_referenced(ioplList[0].fIOPL, false); + } + + (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() + + dataP->fPreparationID = kIOPreparationIDUnprepared; + _flags &= ~kIOMemoryPreparedReadOnly; + } + } + }while (false); + + if (_prepareLock) { + IOLockUnlock(_prepareLock); + } + + return kIOReturnSuccess; } -IOReturn IOGeneralMemoryDescriptor::doMap( - vm_map_t __addressMap, - IOVirtualAddress * __address, - IOOptionBits options, - IOByteCount __offset, - IOByteCount __length ) +IOReturn +IOGeneralMemoryDescriptor::doMap( + vm_map_t __addressMap, + IOVirtualAddress * __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length ) { #ifndef __LP64__ - if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); + if (!(kIOMap64Bit & options)) { + panic("IOGeneralMemoryDescriptor::doMap !64bit"); + } #endif /* !__LP64__ */ - kern_return_t err; + kern_return_t err; - IOMemoryMap * mapping = (IOMemoryMap *) *__address; - mach_vm_size_t offset = mapping->fOffset + __offset; - mach_vm_size_t length = mapping->fLength; + IOMemoryMap * mapping = (IOMemoryMap *) *__address; + mach_vm_size_t offset = mapping->fOffset + __offset; + mach_vm_size_t length = mapping->fLength; - IOOptionBits type = _flags & kIOMemoryTypeMask; - Ranges vec = _ranges; - - mach_vm_address_t range0Addr = 0; - mach_vm_size_t range0Len = 0; + IOOptionBits type = _flags & kIOMemoryTypeMask; + Ranges vec = _ranges; - if ((offset >= _length) || ((offset + length) > _length)) - return( kIOReturnBadArgument ); + mach_vm_address_t range0Addr = 0; + mach_vm_size_t range0Len = 0; - assert (!(kIOMemoryRemote & _flags)); - if (kIOMemoryRemote & _flags) return (0); + if ((offset >= _length) || ((offset + length) > _length)) { + return kIOReturnBadArgument; + } - if (vec.v) - getAddrLenForInd(range0Addr, range0Len, type, vec, 0); + assert(!(kIOMemoryRemote & _flags)); + if (kIOMemoryRemote & _flags) { + return 0; + } - // mapping source == dest? (could be much better) - if (_task - && (mapping->fAddressTask == _task) - && (mapping->fAddressMap == get_task_map(_task)) - && (options & kIOMapAnywhere) - && (!(kIOMapUnique & options)) - && (1 == _rangesCount) - && (0 == offset) - && range0Addr - && (length <= range0Len)) - { - mapping->fAddress = range0Addr; - mapping->fOptions |= kIOMapStatic; + if (vec.v) { + getAddrLenForInd(range0Addr, range0Len, type, vec, 0); + } - return( kIOReturnSuccess ); - } + // mapping source == dest? (could be much better) + if (_task + && (mapping->fAddressTask == _task) + && (mapping->fAddressMap == get_task_map(_task)) + && (options & kIOMapAnywhere) + && (!(kIOMapUnique & options)) + && (1 == _rangesCount) + && (0 == offset) + && range0Addr + && (length <= range0Len)) { + mapping->fAddress = range0Addr; + mapping->fOptions |= kIOMapStatic; + + return kIOReturnSuccess; + } - if (!_memRef) - { - IOOptionBits createOptions = 0; - if (!(kIOMapReadOnly & options)) - { - createOptions |= kIOMemoryReferenceWrite; + if (!_memRef) { + IOOptionBits createOptions = 0; + if (!(kIOMapReadOnly & options)) { + createOptions |= kIOMemoryReferenceWrite; #if DEVELOPMENT || DEBUG - if (kIODirectionOut == (kIODirectionOutIn & _flags)) - { - OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); - } + if (kIODirectionOut == (kIODirectionOutIn & _flags)) { + OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction"); + } #endif + } + err = memoryReferenceCreate(createOptions, &_memRef); + if (kIOReturnSuccess != err) { + return err; + } } - err = memoryReferenceCreate(createOptions, &_memRef); - if (kIOReturnSuccess != err) return (err); - } - memory_object_t pager; - pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0); - - // count)) - { - err = kIOReturnNotReadable; - break; - } - - size = round_page(mapping->fLength); - flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL - | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - - if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, - NULL, NULL, - &flags, getVMTag(kernel_map))) - redirUPL2 = NULL; - - for (lock_count = 0; - IORecursiveLockHaveLock(gIOMemoryLock); - lock_count++) { - UNLOCK; - } - err = upl_transpose(redirUPL2, mapping->fRedirUPL); - for (; - lock_count; - lock_count--) { - LOCK; - } - - if (kIOReturnSuccess != err) - { - IOLog("upl_transpose(%x)\n", err); - err = kIOReturnSuccess; - } - - if (redirUPL2) - { - upl_commit(redirUPL2, NULL, 0); - upl_deallocate(redirUPL2); - redirUPL2 = 0; - } - { - // swap the memEntries since they now refer to different vm_objects - IOMemoryReference * me = _memRef; - _memRef = mapping->fMemory->_memRef; - mapping->fMemory->_memRef = me; - } - if (pager) - err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); - } - while (false); - } - // upl_transpose> // - else - { - err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); -#if IOTRACKING - if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) - { - // only dram maps in the default on developement case - IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); + memory_object_t pager; + pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0); + + // count)) { + err = kIOReturnNotReadable; + break; + } + + size = round_page(mapping->fLength); + flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; + + if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2, + NULL, NULL, + &flags, getVMTag(kernel_map))) { + redirUPL2 = NULL; + } + + for (lock_count = 0; + IORecursiveLockHaveLock(gIOMemoryLock); + lock_count++) { + UNLOCK; + } + err = upl_transpose(redirUPL2, mapping->fRedirUPL); + for (; + lock_count; + lock_count--) { + LOCK; + } + + if (kIOReturnSuccess != err) { + IOLog("upl_transpose(%x)\n", err); + err = kIOReturnSuccess; + } + + if (redirUPL2) { + upl_commit(redirUPL2, NULL, 0); + upl_deallocate(redirUPL2); + redirUPL2 = 0; + } + { + // swap the memEntries since they now refer to different vm_objects + IOMemoryReference * me = _memRef; + _memRef = mapping->fMemory->_memRef; + mapping->fMemory->_memRef = me; + } + if (pager) { + err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); + } + }while (false); } + // upl_transpose> // + else { + err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); +#if IOTRACKING + if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) { + // only dram maps in the default on developement case + IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); + } #endif /* IOTRACKING */ - if ((err == KERN_SUCCESS) && pager) - { - err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); - - if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); - else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) - { - mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); - } + if ((err == KERN_SUCCESS) && pager) { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options); + + if (err != KERN_SUCCESS) { + doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0); + } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) { + mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift); + } + } } - } - return (err); + return err; } #if IOTRACKING IOReturn IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task, - mach_vm_address_t * address, mach_vm_size_t * size) + mach_vm_address_t * address, mach_vm_size_t * size) { #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field)) - IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking)); + IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking)); - if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady); + if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) { + return kIOReturnNotReady; + } - *task = map->fAddressTask; - *address = map->fAddress; - *size = map->fLength; + *task = map->fAddressTask; + *address = map->fAddress; + *size = map->fLength; - return (kIOReturnSuccess); + return kIOReturnSuccess; } #endif /* IOTRACKING */ -IOReturn IOGeneralMemoryDescriptor::doUnmap( - vm_map_t addressMap, - IOVirtualAddress __address, - IOByteCount __length ) +IOReturn +IOGeneralMemoryDescriptor::doUnmap( + vm_map_t addressMap, + IOVirtualAddress __address, + IOByteCount __length ) { - return (super::doUnmap(addressMap, __address, __length)); + return super::doUnmap(addressMap, __address, __length); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -3810,549 +3978,585 @@ OSMetaClassDefineReservedUnused(IOMemoryMap, 6); OSMetaClassDefineReservedUnused(IOMemoryMap, 7); /* ex-inline function implementation */ -IOPhysicalAddress IOMemoryMap::getPhysicalAddress() - { return( getPhysicalSegment( 0, 0 )); } +IOPhysicalAddress +IOMemoryMap::getPhysicalAddress() +{ + return getPhysicalSegment( 0, 0 ); +} /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOMemoryMap::init( - task_t intoTask, - mach_vm_address_t toAddress, - IOOptionBits _options, - mach_vm_size_t _offset, - mach_vm_size_t _length ) +bool +IOMemoryMap::init( + task_t intoTask, + mach_vm_address_t toAddress, + IOOptionBits _options, + mach_vm_size_t _offset, + mach_vm_size_t _length ) { - if (!intoTask) - return( false); + if (!intoTask) { + return false; + } - if (!super::init()) - return(false); + if (!super::init()) { + return false; + } - fAddressMap = get_task_map(intoTask); - if (!fAddressMap) - return(false); - vm_map_reference(fAddressMap); + fAddressMap = get_task_map(intoTask); + if (!fAddressMap) { + return false; + } + vm_map_reference(fAddressMap); - fAddressTask = intoTask; - fOptions = _options; - fLength = _length; - fOffset = _offset; - fAddress = toAddress; + fAddressTask = intoTask; + fOptions = _options; + fLength = _length; + fOffset = _offset; + fAddress = toAddress; - return (true); + return true; } -bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) +bool +IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) { - if (!_memory) - return(false); - - if (!fSuperMap) - { - if( (_offset + fLength) > _memory->getLength()) - return( false); - fOffset = _offset; - } - - _memory->retain(); - if (fMemory) - { - if (fMemory != _memory) - fMemory->removeMapping(this); - fMemory->release(); - } - fMemory = _memory; - - return( true ); + if (!_memory) { + return false; + } + + if (!fSuperMap) { + if ((_offset + fLength) > _memory->getLength()) { + return false; + } + fOffset = _offset; + } + + _memory->retain(); + if (fMemory) { + if (fMemory != _memory) { + fMemory->removeMapping(this); + } + fMemory->release(); + } + fMemory = _memory; + + return true; } -IOReturn IOMemoryDescriptor::doMap( - vm_map_t __addressMap, - IOVirtualAddress * __address, - IOOptionBits options, - IOByteCount __offset, - IOByteCount __length ) +IOReturn +IOMemoryDescriptor::doMap( + vm_map_t __addressMap, + IOVirtualAddress * __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } -IOReturn IOMemoryDescriptor::handleFault( - void * _pager, - mach_vm_size_t sourceOffset, - mach_vm_size_t length) +IOReturn +IOMemoryDescriptor::handleFault( + void * _pager, + mach_vm_size_t sourceOffset, + mach_vm_size_t length) { - if( kIOMemoryRedirected & _flags) - { + if (kIOMemoryRedirected & _flags) { #if DEBUG - IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); + IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); #endif - do { - SLEEP; - } while( kIOMemoryRedirected & _flags ); - } - return (kIOReturnSuccess); + do { + SLEEP; + } while (kIOMemoryRedirected & _flags); + } + return kIOReturnSuccess; } -IOReturn IOMemoryDescriptor::populateDevicePager( - void * _pager, - vm_map_t addressMap, - mach_vm_address_t address, - mach_vm_size_t sourceOffset, - mach_vm_size_t length, - IOOptionBits options ) +IOReturn +IOMemoryDescriptor::populateDevicePager( + void * _pager, + vm_map_t addressMap, + mach_vm_address_t address, + mach_vm_size_t sourceOffset, + mach_vm_size_t length, + IOOptionBits options ) { - IOReturn err = kIOReturnSuccess; - memory_object_t pager = (memory_object_t) _pager; - mach_vm_size_t size; - mach_vm_size_t bytes; - mach_vm_size_t page; - mach_vm_size_t pageOffset; - mach_vm_size_t pagerOffset; - IOPhysicalLength segLen, chunk; - addr64_t physAddr; - IOOptionBits type; - - type = _flags & kIOMemoryTypeMask; - - if (reserved->dp.pagerContig) - { - sourceOffset = 0; - pagerOffset = 0; - } - - physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); - assert( physAddr ); - pageOffset = physAddr - trunc_page_64( physAddr ); - pagerOffset = sourceOffset; - - size = length + pageOffset; - physAddr -= pageOffset; - - segLen += pageOffset; - bytes = size; - do - { - // in the middle of the loop only map whole pages - if( segLen >= bytes) segLen = bytes; - else if (segLen != trunc_page(segLen)) err = kIOReturnVMError; - if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument; - - if (kIOReturnSuccess != err) break; + IOReturn err = kIOReturnSuccess; + memory_object_t pager = (memory_object_t) _pager; + mach_vm_size_t size; + mach_vm_size_t bytes; + mach_vm_size_t page; + mach_vm_size_t pageOffset; + mach_vm_size_t pagerOffset; + IOPhysicalLength segLen, chunk; + addr64_t physAddr; + IOOptionBits type; + + type = _flags & kIOMemoryTypeMask; + + if (reserved->dp.pagerContig) { + sourceOffset = 0; + pagerOffset = 0; + } + + physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); + assert( physAddr ); + pageOffset = physAddr - trunc_page_64( physAddr ); + pagerOffset = sourceOffset; + + size = length + pageOffset; + physAddr -= pageOffset; + + segLen += pageOffset; + bytes = size; + do{ + // in the middle of the loop only map whole pages + if (segLen >= bytes) { + segLen = bytes; + } else if (segLen != trunc_page(segLen)) { + err = kIOReturnVMError; + } + if (physAddr != trunc_page_64(physAddr)) { + err = kIOReturnBadArgument; + } + + if (kIOReturnSuccess != err) { + break; + } #if DEBUG || DEVELOPMENT - if ((kIOMemoryTypeUPL != type) - && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) - { - OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen); - } + if ((kIOMemoryTypeUPL != type) + && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) { + OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen); + } #endif /* DEBUG || DEVELOPMENT */ - chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size); - for (page = 0; - (page < segLen) && (KERN_SUCCESS == err); - page += chunk) - { - err = device_pager_populate_object(pager, pagerOffset, - (ppnum_t)(atop_64(physAddr + page)), chunk); - pagerOffset += chunk; - } - - assert (KERN_SUCCESS == err); - if (err) break; - - // This call to vm_fault causes an early pmap level resolution - // of the mappings created above for kernel mappings, since - // faulting in later can't take place from interrupt level. - if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) - { - err = vm_fault(addressMap, - (vm_map_offset_t)trunc_page_64(address), - options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, - (vm_map_offset_t)0); + chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size); + for (page = 0; + (page < segLen) && (KERN_SUCCESS == err); + page += chunk) { + err = device_pager_populate_object(pager, pagerOffset, + (ppnum_t)(atop_64(physAddr + page)), chunk); + pagerOffset += chunk; + } - if (KERN_SUCCESS != err) break; - } + assert(KERN_SUCCESS == err); + if (err) { + break; + } + + // This call to vm_fault causes an early pmap level resolution + // of the mappings created above for kernel mappings, since + // faulting in later can't take place from interrupt level. + if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) { + err = vm_fault(addressMap, + (vm_map_offset_t)trunc_page_64(address), + options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, + (vm_map_offset_t)0); + + if (KERN_SUCCESS != err) { + break; + } + } - sourceOffset += segLen - pageOffset; - address += segLen; - bytes -= segLen; - pageOffset = 0; - } - while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); + sourceOffset += segLen - pageOffset; + address += segLen; + bytes -= segLen; + pageOffset = 0; + }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); - if (bytes) - err = kIOReturnBadArgument; + if (bytes) { + err = kIOReturnBadArgument; + } - return (err); + return err; } -IOReturn IOMemoryDescriptor::doUnmap( - vm_map_t addressMap, - IOVirtualAddress __address, - IOByteCount __length ) +IOReturn +IOMemoryDescriptor::doUnmap( + vm_map_t addressMap, + IOVirtualAddress __address, + IOByteCount __length ) { - IOReturn err; - IOMemoryMap * mapping; - mach_vm_address_t address; - mach_vm_size_t length; - - if (__length) panic("doUnmap"); - - mapping = (IOMemoryMap *) __address; - addressMap = mapping->fAddressMap; - address = mapping->fAddress; - length = mapping->fLength; - - if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS; - else - { - if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) - addressMap = IOPageableMapForAddress( address ); + IOReturn err; + IOMemoryMap * mapping; + mach_vm_address_t address; + mach_vm_size_t length; + + if (__length) { + panic("doUnmap"); + } + + mapping = (IOMemoryMap *) __address; + addressMap = mapping->fAddressMap; + address = mapping->fAddress; + length = mapping->fLength; + + if (kIOMapOverwrite & mapping->fOptions) { + err = KERN_SUCCESS; + } else { + if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + addressMap = IOPageableMapForAddress( address ); + } #if DEBUG - if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", - addressMap, address, length ); + if (kIOLogMapping & gIOKitDebug) { + IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", + addressMap, address, length ); + } #endif - err = mach_vm_deallocate( addressMap, address, length ); - } + err = mach_vm_deallocate( addressMap, address, length ); + } #if IOTRACKING - IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking); + IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking); #endif /* IOTRACKING */ - return (err); + return err; } -IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) +IOReturn +IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { - IOReturn err = kIOReturnSuccess; - IOMemoryMap * mapping = 0; - OSIterator * iter; - - LOCK; - - if( doRedirect) - _flags |= kIOMemoryRedirected; - else - _flags &= ~kIOMemoryRedirected; + IOReturn err = kIOReturnSuccess; + IOMemoryMap * mapping = 0; + OSIterator * iter; - do { - if( (iter = OSCollectionIterator::withCollection( _mappings))) { - - memory_object_t pager; + LOCK; - if( reserved) - pager = (memory_object_t) reserved->dp.devicePager; - else - pager = MACH_PORT_NULL; + if (doRedirect) { + _flags |= kIOMemoryRedirected; + } else { + _flags &= ~kIOMemoryRedirected; + } - while( (mapping = (IOMemoryMap *) iter->getNextObject())) - { - mapping->redirect( safeTask, doRedirect ); - if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) - { - err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache ); + do { + if ((iter = OSCollectionIterator::withCollection( _mappings))) { + memory_object_t pager; + + if (reserved) { + pager = (memory_object_t) reserved->dp.devicePager; + } else { + pager = MACH_PORT_NULL; + } + + while ((mapping = (IOMemoryMap *) iter->getNextObject())) { + mapping->redirect( safeTask, doRedirect ); + if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) { + err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache ); + } + } + + iter->release(); } - } + } while (false); - iter->release(); + if (!doRedirect) { + WAKEUP; } - } while( false ); - - if (!doRedirect) - { - WAKEUP; - } - UNLOCK; + UNLOCK; #ifndef __LP64__ - // temporary binary compatibility - IOSubMemoryDescriptor * subMem; - if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) - err = subMem->redirect( safeTask, doRedirect ); - else - err = kIOReturnSuccess; + // temporary binary compatibility + IOSubMemoryDescriptor * subMem; + if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) { + err = subMem->redirect( safeTask, doRedirect ); + } else { + err = kIOReturnSuccess; + } #endif /* !__LP64__ */ - return( err ); + return err; } -IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) +IOReturn +IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) { - IOReturn err = kIOReturnSuccess; + IOReturn err = kIOReturnSuccess; - if( fSuperMap) { + if (fSuperMap) { // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); - } else { - - LOCK; - - do - { - if (!fAddress) - break; - if (!fAddressMap) - break; - - if ((!safeTask || (get_task_map(safeTask) != fAddressMap)) - && (0 == (fOptions & kIOMapStatic))) - { - IOUnmapPages( fAddressMap, fAddress, fLength ); - err = kIOReturnSuccess; + } else { + LOCK; + + do{ + if (!fAddress) { + break; + } + if (!fAddressMap) { + break; + } + + if ((!safeTask || (get_task_map(safeTask) != fAddressMap)) + && (0 == (fOptions & kIOMapStatic))) { + IOUnmapPages( fAddressMap, fAddress, fLength ); + err = kIOReturnSuccess; #if DEBUG - IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap); + IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap); #endif - } - else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) - { - IOOptionBits newMode; - newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache); - IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode); - } - } - while (false); - UNLOCK; - } + } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) { + IOOptionBits newMode; + newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache); + IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode); + } + }while (false); + UNLOCK; + } - if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) - || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) - && safeTask - && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) - fMemory->redirect(safeTask, doRedirect); + if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) + && safeTask + && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) { + fMemory->redirect(safeTask, doRedirect); + } - return( err ); + return err; } -IOReturn IOMemoryMap::unmap( void ) +IOReturn +IOMemoryMap::unmap( void ) { - IOReturn err; - - LOCK; + IOReturn err; - if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory - && (0 == (kIOMapStatic & fOptions))) { - - err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); + LOCK; - } else - err = kIOReturnSuccess; + if (fAddress && fAddressMap && (0 == fSuperMap) && fMemory + && (0 == (kIOMapStatic & fOptions))) { + err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); + } else { + err = kIOReturnSuccess; + } - if (fAddressMap) - { - vm_map_deallocate(fAddressMap); - fAddressMap = 0; - } + if (fAddressMap) { + vm_map_deallocate(fAddressMap); + fAddressMap = 0; + } - fAddress = 0; + fAddress = 0; - UNLOCK; + UNLOCK; - return( err ); + return err; } -void IOMemoryMap::taskDied( void ) +void +IOMemoryMap::taskDied( void ) { - LOCK; - if (fUserClientUnmap) unmap(); + LOCK; + if (fUserClientUnmap) { + unmap(); + } #if IOTRACKING - else IOTrackingRemoveUser(gIOMapTracking, &fTracking); + else { + IOTrackingRemoveUser(gIOMapTracking, &fTracking); + } #endif /* IOTRACKING */ - if( fAddressMap) { - vm_map_deallocate(fAddressMap); - fAddressMap = 0; - } - fAddressTask = 0; - fAddress = 0; - UNLOCK; + if (fAddressMap) { + vm_map_deallocate(fAddressMap); + fAddressMap = 0; + } + fAddressTask = 0; + fAddress = 0; + UNLOCK; } -IOReturn IOMemoryMap::userClientUnmap( void ) +IOReturn +IOMemoryMap::userClientUnmap( void ) { - fUserClientUnmap = true; - return (kIOReturnSuccess); + fUserClientUnmap = true; + return kIOReturnSuccess; } // Overload the release mechanism. All mappings must be a member // of a memory descriptors _mappings set. This means that we // always have 2 references on a mapping. When either of these mappings // are released we need to free ourselves. -void IOMemoryMap::taggedRelease(const void *tag) const +void +IOMemoryMap::taggedRelease(const void *tag) const { - LOCK; - super::taggedRelease(tag, 2); - UNLOCK; + LOCK; + super::taggedRelease(tag, 2); + UNLOCK; } -void IOMemoryMap::free() +void +IOMemoryMap::free() { - unmap(); + unmap(); - if (fMemory) - { - LOCK; - fMemory->removeMapping(this); - UNLOCK; - fMemory->release(); - } + if (fMemory) { + LOCK; + fMemory->removeMapping(this); + UNLOCK; + fMemory->release(); + } - if (fOwner && (fOwner != fMemory)) - { - LOCK; - fOwner->removeMapping(this); - UNLOCK; - } + if (fOwner && (fOwner != fMemory)) { + LOCK; + fOwner->removeMapping(this); + UNLOCK; + } - if (fSuperMap) - fSuperMap->release(); + if (fSuperMap) { + fSuperMap->release(); + } - if (fRedirUPL) { - upl_commit(fRedirUPL, NULL, 0); - upl_deallocate(fRedirUPL); - } + if (fRedirUPL) { + upl_commit(fRedirUPL, NULL, 0); + upl_deallocate(fRedirUPL); + } - super::free(); + super::free(); } -IOByteCount IOMemoryMap::getLength() +IOByteCount +IOMemoryMap::getLength() { - return( fLength ); + return fLength; } -IOVirtualAddress IOMemoryMap::getVirtualAddress() +IOVirtualAddress +IOMemoryMap::getVirtualAddress() { #ifndef __LP64__ - if (fSuperMap) - fSuperMap->getVirtualAddress(); - else if (fAddressMap - && vm_map_is_64bit(fAddressMap) - && (sizeof(IOVirtualAddress) < 8)) - { - OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress); - } + if (fSuperMap) { + fSuperMap->getVirtualAddress(); + } else if (fAddressMap + && vm_map_is_64bit(fAddressMap) + && (sizeof(IOVirtualAddress) < 8)) { + OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress); + } #endif /* !__LP64__ */ - return (fAddress); + return fAddress; } #ifndef __LP64__ -mach_vm_address_t IOMemoryMap::getAddress() +mach_vm_address_t +IOMemoryMap::getAddress() { - return( fAddress); + return fAddress; } -mach_vm_size_t IOMemoryMap::getSize() +mach_vm_size_t +IOMemoryMap::getSize() { - return( fLength ); + return fLength; } #endif /* !__LP64__ */ -task_t IOMemoryMap::getAddressTask() +task_t +IOMemoryMap::getAddressTask() { - if( fSuperMap) - return( fSuperMap->getAddressTask()); - else - return( fAddressTask); + if (fSuperMap) { + return fSuperMap->getAddressTask(); + } else { + return fAddressTask; + } } -IOOptionBits IOMemoryMap::getMapOptions() +IOOptionBits +IOMemoryMap::getMapOptions() { - return( fOptions); + return fOptions; } -IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor() +IOMemoryDescriptor * +IOMemoryMap::getMemoryDescriptor() { - return( fMemory ); + return fMemory; } -IOMemoryMap * IOMemoryMap::copyCompatible( - IOMemoryMap * newMapping ) +IOMemoryMap * +IOMemoryMap::copyCompatible( + IOMemoryMap * newMapping ) { - task_t task = newMapping->getAddressTask(); - mach_vm_address_t toAddress = newMapping->fAddress; - IOOptionBits _options = newMapping->fOptions; - mach_vm_size_t _offset = newMapping->fOffset; - mach_vm_size_t _length = newMapping->fLength; - - if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) - return( 0 ); - if( (fOptions ^ _options) & kIOMapReadOnly) - return( 0 ); - if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) - && ((fOptions ^ _options) & kIOMapCacheMask)) - return( 0 ); - - if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) - return( 0 ); - - if( _offset < fOffset) - return( 0 ); - - _offset -= fOffset; - - if( (_offset + _length) > fLength) - return( 0 ); - - retain(); - if( (fLength == _length) && (!_offset)) - { - newMapping = this; - } - else - { - newMapping->fSuperMap = this; - newMapping->fOffset = fOffset + _offset; - newMapping->fAddress = fAddress + _offset; - } - - return( newMapping ); + task_t task = newMapping->getAddressTask(); + mach_vm_address_t toAddress = newMapping->fAddress; + IOOptionBits _options = newMapping->fOptions; + mach_vm_size_t _offset = newMapping->fOffset; + mach_vm_size_t _length = newMapping->fLength; + + if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) { + return 0; + } + if ((fOptions ^ _options) & kIOMapReadOnly) { + return 0; + } + if ((kIOMapDefaultCache != (_options & kIOMapCacheMask)) + && ((fOptions ^ _options) & kIOMapCacheMask)) { + return 0; + } + + if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) { + return 0; + } + + if (_offset < fOffset) { + return 0; + } + + _offset -= fOffset; + + if ((_offset + _length) > fLength) { + return 0; + } + + retain(); + if ((fLength == _length) && (!_offset)) { + newMapping = this; + } else { + newMapping->fSuperMap = this; + newMapping->fOffset = fOffset + _offset; + newMapping->fAddress = fAddress + _offset; + } + + return newMapping; } -IOReturn IOMemoryMap::wireRange( - uint32_t options, - mach_vm_size_t offset, - mach_vm_size_t length) +IOReturn +IOMemoryMap::wireRange( + uint32_t options, + mach_vm_size_t offset, + mach_vm_size_t length) { - IOReturn kr; - mach_vm_address_t start = trunc_page_64(fAddress + offset); - mach_vm_address_t end = round_page_64(fAddress + offset + length); - vm_prot_t prot; - - prot = (kIODirectionOutIn & options); - if (prot) - { - kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE); - } - else - { - kr = vm_map_unwire(fAddressMap, start, end, FALSE); - } - - return (kr); + IOReturn kr; + mach_vm_address_t start = trunc_page_64(fAddress + offset); + mach_vm_address_t end = round_page_64(fAddress + offset + length); + vm_prot_t prot; + + prot = (kIODirectionOutIn & options); + if (prot) { + kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE); + } else { + kr = vm_map_unwire(fAddressMap, start, end, FALSE); + } + + return kr; } -IOPhysicalAddress +IOPhysicalAddress #ifdef __LP64__ IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options) #else /* !__LP64__ */ IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) #endif /* !__LP64__ */ { - IOPhysicalAddress address; + IOPhysicalAddress address; - LOCK; + LOCK; #ifdef __LP64__ - address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options ); + address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options ); #else /* !__LP64__ */ - address = fMemory->getPhysicalSegment( fOffset + _offset, _length ); + address = fMemory->getPhysicalSegment( fOffset + _offset, _length ); #endif /* !__LP64__ */ - UNLOCK; + UNLOCK; - return( address ); + return address; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -4362,457 +4566,477 @@ IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOMemoryDescriptor::initialize( void ) +void +IOMemoryDescriptor::initialize( void ) { - if( 0 == gIOMemoryLock) - gIOMemoryLock = IORecursiveLockAlloc(); + if (0 == gIOMemoryLock) { + gIOMemoryLock = IORecursiveLockAlloc(); + } - gIOLastPage = IOGetLastPageNumber(); + gIOLastPage = IOGetLastPageNumber(); } -void IOMemoryDescriptor::free( void ) +void +IOMemoryDescriptor::free( void ) { - if( _mappings) _mappings->release(); - - if (reserved) - { - IODelete(reserved, IOMemoryDescriptorReserved, 1); - reserved = NULL; - } - super::free(); + if (_mappings) { + _mappings->release(); + } + + if (reserved) { + IODelete(reserved, IOMemoryDescriptorReserved, 1); + reserved = NULL; + } + super::free(); } -IOMemoryMap * IOMemoryDescriptor::setMapping( - task_t intoTask, - IOVirtualAddress mapAddress, - IOOptionBits options ) +IOMemoryMap * +IOMemoryDescriptor::setMapping( + task_t intoTask, + IOVirtualAddress mapAddress, + IOOptionBits options ) { - return (createMappingInTask( intoTask, mapAddress, - options | kIOMapStatic, - 0, getLength() )); + return createMappingInTask( intoTask, mapAddress, + options | kIOMapStatic, + 0, getLength()); } -IOMemoryMap * IOMemoryDescriptor::map( - IOOptionBits options ) +IOMemoryMap * +IOMemoryDescriptor::map( + IOOptionBits options ) { - return (createMappingInTask( kernel_task, 0, - options | kIOMapAnywhere, - 0, getLength() )); + return createMappingInTask( kernel_task, 0, + options | kIOMapAnywhere, + 0, getLength()); } #ifndef __LP64__ -IOMemoryMap * IOMemoryDescriptor::map( - task_t intoTask, - IOVirtualAddress atAddress, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ) +IOMemoryMap * +IOMemoryDescriptor::map( + task_t intoTask, + IOVirtualAddress atAddress, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ) { - if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) - { - OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()"); - return (0); - } - - return (createMappingInTask(intoTask, atAddress, - options, offset, length)); + if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) { + OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()"); + return 0; + } + + return createMappingInTask(intoTask, atAddress, + options, offset, length); } #endif /* !__LP64__ */ -IOMemoryMap * IOMemoryDescriptor::createMappingInTask( - task_t intoTask, - mach_vm_address_t atAddress, - IOOptionBits options, - mach_vm_size_t offset, - mach_vm_size_t length) +IOMemoryMap * +IOMemoryDescriptor::createMappingInTask( + task_t intoTask, + mach_vm_address_t atAddress, + IOOptionBits options, + mach_vm_size_t offset, + mach_vm_size_t length) { - IOMemoryMap * result; - IOMemoryMap * mapping; + IOMemoryMap * result; + IOMemoryMap * mapping; - if (0 == length) - length = getLength(); + if (0 == length) { + length = getLength(); + } - mapping = new IOMemoryMap; + mapping = new IOMemoryMap; - if( mapping - && !mapping->init( intoTask, atAddress, - options, offset, length )) { - mapping->release(); - mapping = 0; - } + if (mapping + && !mapping->init( intoTask, atAddress, + options, offset, length )) { + mapping->release(); + mapping = 0; + } - if (mapping) - result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0); - else - result = 0; + if (mapping) { + result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0); + } else { + result = 0; + } #if DEBUG - if (!result) - IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n", - this, atAddress, (uint32_t) options, offset, length); + if (!result) { + IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n", + this, atAddress, (uint32_t) options, offset, length); + } #endif - return (result); + return result; } #ifndef __LP64__ // there is only a 64 bit version for LP64 -IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, - IOOptionBits options, - IOByteCount offset) +IOReturn +IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + IOByteCount offset) { - return (redirect(newBackingMemory, options, (mach_vm_size_t)offset)); + return redirect(newBackingMemory, options, (mach_vm_size_t)offset); } #endif -IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, - IOOptionBits options, - mach_vm_size_t offset) +IOReturn +IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, + IOOptionBits options, + mach_vm_size_t offset) { - IOReturn err = kIOReturnSuccess; - IOMemoryDescriptor * physMem = 0; - - LOCK; + IOReturn err = kIOReturnSuccess; + IOMemoryDescriptor * physMem = 0; - if (fAddress && fAddressMap) do - { - if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) - || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) - { - physMem = fMemory; - physMem->retain(); - } + LOCK; - if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) - { - upl_size_t size = round_page(fLength); - upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL - | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; - if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, - NULL, NULL, - &flags, fMemory->getVMTag(kernel_map))) - fRedirUPL = 0; - - if (physMem) - { - IOUnmapPages( fAddressMap, fAddress, fLength ); - if ((false)) - physMem->redirect(0, true); - } - } - - if (newBackingMemory) - { - if (newBackingMemory != fMemory) - { - fOffset = 0; - if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this, - options | kIOMapUnique | kIOMapReference | kIOMap64Bit, - offset, fLength)) - err = kIOReturnError; - } - if (fRedirUPL) - { - upl_commit(fRedirUPL, NULL, 0); - upl_deallocate(fRedirUPL); - fRedirUPL = 0; - } - if ((false) && physMem) - physMem->redirect(0, false); + if (fAddress && fAddressMap) { + do{ + if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) { + physMem = fMemory; + physMem->retain(); + } + + if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) { + upl_size_t size = round_page(fLength); + upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL + | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; + if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL, + NULL, NULL, + &flags, fMemory->getVMTag(kernel_map))) { + fRedirUPL = 0; + } + + if (physMem) { + IOUnmapPages( fAddressMap, fAddress, fLength ); + if ((false)) { + physMem->redirect(0, true); + } + } + } + + if (newBackingMemory) { + if (newBackingMemory != fMemory) { + fOffset = 0; + if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this, + options | kIOMapUnique | kIOMapReference | kIOMap64Bit, + offset, fLength)) { + err = kIOReturnError; + } + } + if (fRedirUPL) { + upl_commit(fRedirUPL, NULL, 0); + upl_deallocate(fRedirUPL); + fRedirUPL = 0; + } + if ((false) && physMem) { + physMem->redirect(0, false); + } + } + }while (false); } - } - while (false); - UNLOCK; + UNLOCK; - if (physMem) - physMem->release(); + if (physMem) { + physMem->release(); + } - return (err); + return err; } -IOMemoryMap * IOMemoryDescriptor::makeMapping( - IOMemoryDescriptor * owner, - task_t __intoTask, - IOVirtualAddress __address, - IOOptionBits options, - IOByteCount __offset, - IOByteCount __length ) +IOMemoryMap * +IOMemoryDescriptor::makeMapping( + IOMemoryDescriptor * owner, + task_t __intoTask, + IOVirtualAddress __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length ) { #ifndef __LP64__ - if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit"); + if (!(kIOMap64Bit & options)) { + panic("IOMemoryDescriptor::makeMapping !64bit"); + } #endif /* !__LP64__ */ - IOMemoryDescriptor * mapDesc = 0; - __block IOMemoryMap * result = 0; + IOMemoryDescriptor * mapDesc = 0; + __block IOMemoryMap * result = 0; - IOMemoryMap * mapping = (IOMemoryMap *) __address; - mach_vm_size_t offset = mapping->fOffset + __offset; - mach_vm_size_t length = mapping->fLength; + IOMemoryMap * mapping = (IOMemoryMap *) __address; + mach_vm_size_t offset = mapping->fOffset + __offset; + mach_vm_size_t length = mapping->fLength; - mapping->fOffset = offset; + mapping->fOffset = offset; - LOCK; + LOCK; - do - { - if (kIOMapStatic & options) - { - result = mapping; - addMapping(mapping); - mapping->setMemoryDescriptor(this, 0); - continue; - } + do{ + if (kIOMapStatic & options) { + result = mapping; + addMapping(mapping); + mapping->setMemoryDescriptor(this, 0); + continue; + } - if (kIOMapUnique & options) - { - addr64_t phys; - IOByteCount physLen; + if (kIOMapUnique & options) { + addr64_t phys; + IOByteCount physLen; // if (owner != this) continue; - if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) - || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) - { - phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); - if (!phys || (physLen < length)) - continue; - - mapDesc = IOMemoryDescriptor::withAddressRange( - phys, length, getDirection() | kIOMemoryMapperNone, NULL); - if (!mapDesc) - continue; - offset = 0; - mapping->fOffset = offset; - } - } - else - { - // look for a compatible existing mapping - if (_mappings) _mappings->iterateObjects(^(OSObject * object) - { - IOMemoryMap * lookMapping = (IOMemoryMap *) object; - if ((result = lookMapping->copyCompatible(mapping))) - { - addMapping(result); - result->setMemoryDescriptor(this, offset); - return (true); - } - return (false); - }); - if (result || (options & kIOMapReference)) - { - if (result != mapping) - { - mapping->release(); - mapping = NULL; - } - continue; - } - } - - if (!mapDesc) - { - mapDesc = this; - mapDesc->retain(); - } - IOReturn - kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 ); - if (kIOReturnSuccess == kr) - { - result = mapping; - mapDesc->addMapping(result); - result->setMemoryDescriptor(mapDesc, offset); - } - else - { - mapping->release(); - mapping = NULL; - } - } - while( false ); + if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) + || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) { + phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); + if (!phys || (physLen < length)) { + continue; + } + + mapDesc = IOMemoryDescriptor::withAddressRange( + phys, length, getDirection() | kIOMemoryMapperNone, NULL); + if (!mapDesc) { + continue; + } + offset = 0; + mapping->fOffset = offset; + } + } else { + // look for a compatible existing mapping + if (_mappings) { + _mappings->iterateObjects(^(OSObject * object) + { + IOMemoryMap * lookMapping = (IOMemoryMap *) object; + if ((result = lookMapping->copyCompatible(mapping))) { + addMapping(result); + result->setMemoryDescriptor(this, offset); + return true; + } + return false; + }); + } + if (result || (options & kIOMapReference)) { + if (result != mapping) { + mapping->release(); + mapping = NULL; + } + continue; + } + } + + if (!mapDesc) { + mapDesc = this; + mapDesc->retain(); + } + IOReturn + kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 ); + if (kIOReturnSuccess == kr) { + result = mapping; + mapDesc->addMapping(result); + result->setMemoryDescriptor(mapDesc, offset); + } else { + mapping->release(); + mapping = NULL; + } + }while (false); - UNLOCK; + UNLOCK; - if (mapDesc) - mapDesc->release(); + if (mapDesc) { + mapDesc->release(); + } - return (result); + return result; } -void IOMemoryDescriptor::addMapping( +void +IOMemoryDescriptor::addMapping( IOMemoryMap * mapping ) { - if( mapping) - { - if( 0 == _mappings) - _mappings = OSSet::withCapacity(1); - if( _mappings ) - _mappings->setObject( mapping ); - } + if (mapping) { + if (0 == _mappings) { + _mappings = OSSet::withCapacity(1); + } + if (_mappings) { + _mappings->setObject( mapping ); + } + } } -void IOMemoryDescriptor::removeMapping( +void +IOMemoryDescriptor::removeMapping( IOMemoryMap * mapping ) { - if( _mappings) - _mappings->removeObject( mapping); + if (_mappings) { + _mappings->removeObject( mapping); + } } #ifndef __LP64__ // obsolete initializers -// - initWithOptions is the designated initializer +// - initWithOptions is the designated initializer bool IOMemoryDescriptor::initWithAddress(void * address, - IOByteCount length, - IODirection direction) + IOByteCount length, + IODirection direction) { - return( false ); + return false; } bool IOMemoryDescriptor::initWithAddress(IOVirtualAddress address, - IOByteCount length, - IODirection direction, - task_t task) + IOByteCount length, + IODirection direction, + task_t task) { - return( false ); + return false; } bool IOMemoryDescriptor::initWithPhysicalAddress( - IOPhysicalAddress address, - IOByteCount length, - IODirection direction ) + IOPhysicalAddress address, + IOByteCount length, + IODirection direction ) { - return( false ); + return false; } bool IOMemoryDescriptor::initWithRanges( - IOVirtualRange * ranges, - UInt32 withCount, - IODirection direction, - task_t task, - bool asReference) + IOVirtualRange * ranges, + UInt32 withCount, + IODirection direction, + task_t task, + bool asReference) { - return( false ); + return false; } bool -IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, - UInt32 withCount, - IODirection direction, - bool asReference) +IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, + UInt32 withCount, + IODirection direction, + bool asReference) { - return( false ); + return false; } -void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, - IOByteCount * lengthOfSegment) +void * +IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, + IOByteCount * lengthOfSegment) { - return( 0 ); + return 0; } #endif /* !__LP64__ */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const +bool +IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const { - OSSymbol const *keys[2] = {0}; - OSObject *values[2] = {0}; - OSArray * array; - vm_size_t vcopy_size; - - struct SerData { - user_addr_t address; - user_size_t length; - } *vcopy = NULL; - unsigned int index, nRanges; - bool result = false; - - IOOptionBits type = _flags & kIOMemoryTypeMask; - - if (s == NULL) return false; - - array = OSArray::withCapacity(4); - if (!array) return (false); - - nRanges = _rangesCount; - if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) { - result = false; - goto bail; - } - vcopy = (SerData *) IOMalloc(vcopy_size); - if (vcopy == 0) { - result = false; - goto bail; - } - - keys[0] = OSSymbol::withCString("address"); - keys[1] = OSSymbol::withCString("length"); - - // Copy the volatile data so we don't have to allocate memory - // while the lock is held. - LOCK; - if (nRanges == _rangesCount) { - Ranges vec = _ranges; - for (index = 0; index < nRanges; index++) { - mach_vm_address_t addr; mach_vm_size_t len; - getAddrLenForInd(addr, len, type, vec, index); - vcopy[index].address = addr; - vcopy[index].length = len; - } - } else { - // The descriptor changed out from under us. Give up. - UNLOCK; - result = false; - goto bail; - } - UNLOCK; - - for (index = 0; index < nRanges; index++) - { - user_addr_t addr = vcopy[index].address; - IOByteCount len = (IOByteCount) vcopy[index].length; - values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8); - if (values[0] == 0) { - result = false; - goto bail; - } - values[1] = OSNumber::withNumber(len, sizeof(len) * 8); - if (values[1] == 0) { - result = false; - goto bail; - } - OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); - if (dict == 0) { - result = false; - goto bail; - } - array->setObject(dict); - dict->release(); - values[0]->release(); - values[1]->release(); - values[0] = values[1] = 0; - } - - result = array->serialize(s); - - bail: - if (array) - array->release(); - if (values[0]) - values[0]->release(); - if (values[1]) - values[1]->release(); - if (keys[0]) - keys[0]->release(); - if (keys[1]) - keys[1]->release(); - if (vcopy) - IOFree(vcopy, vcopy_size); - - return result; + OSSymbol const *keys[2] = {0}; + OSObject *values[2] = {0}; + OSArray * array; + vm_size_t vcopy_size; + + struct SerData { + user_addr_t address; + user_size_t length; + } *vcopy = NULL; + unsigned int index, nRanges; + bool result = false; + + IOOptionBits type = _flags & kIOMemoryTypeMask; + + if (s == NULL) { + return false; + } + + array = OSArray::withCapacity(4); + if (!array) { + return false; + } + + nRanges = _rangesCount; + if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) { + result = false; + goto bail; + } + vcopy = (SerData *) IOMalloc(vcopy_size); + if (vcopy == 0) { + result = false; + goto bail; + } + + keys[0] = OSSymbol::withCString("address"); + keys[1] = OSSymbol::withCString("length"); + + // Copy the volatile data so we don't have to allocate memory + // while the lock is held. + LOCK; + if (nRanges == _rangesCount) { + Ranges vec = _ranges; + for (index = 0; index < nRanges; index++) { + mach_vm_address_t addr; mach_vm_size_t len; + getAddrLenForInd(addr, len, type, vec, index); + vcopy[index].address = addr; + vcopy[index].length = len; + } + } else { + // The descriptor changed out from under us. Give up. + UNLOCK; + result = false; + goto bail; + } + UNLOCK; + + for (index = 0; index < nRanges; index++) { + user_addr_t addr = vcopy[index].address; + IOByteCount len = (IOByteCount) vcopy[index].length; + values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8); + if (values[0] == 0) { + result = false; + goto bail; + } + values[1] = OSNumber::withNumber(len, sizeof(len) * 8); + if (values[1] == 0) { + result = false; + goto bail; + } + OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); + if (dict == 0) { + result = false; + goto bail; + } + array->setObject(dict); + dict->release(); + values[0]->release(); + values[1]->release(); + values[0] = values[1] = 0; + } + + result = array->serialize(s); + +bail: + if (array) { + array->release(); + } + if (values[0]) { + values[0]->release(); + } + if (values[1]) { + values[1]->release(); + } + if (keys[0]) { + keys[0]->release(); + } + if (keys[1]) { + keys[1]->release(); + } + if (vcopy) { + IOFree(vcopy, vcopy_size); + } + + return result; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -4845,6 +5069,8 @@ OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); /* ex-inline function implementation */ -IOPhysicalAddress +IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress() - { return( getPhysicalSegment( 0, 0 )); } +{ + return getPhysicalSegment( 0, 0 ); +} diff --git a/iokit/Kernel/IOMultiMemoryDescriptor.cpp b/iokit/Kernel/IOMultiMemoryDescriptor.cpp index d54824088..3418e41a8 100644 --- a/iokit/Kernel/IOMultiMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMultiMemoryDescriptor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,392 +33,418 @@ OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor) IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors( - IOMemoryDescriptor ** descriptors, - UInt32 withCount, - IODirection withDirection, - bool asReference ) + IOMemoryDescriptor * *descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference ) { - // - // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several - // memory descriptors, that are to be chained end-to-end to make up a single - // memory descriptor. - // - // Passing the ranges as a reference will avoid an extra allocation. - // - - IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor; - - if ( me && me->initWithDescriptors( - /* descriptors */ descriptors, - /* withCount */ withCount, - /* withDirection */ withDirection, - /* asReference */ asReference ) == false ) - { - me->release(); - me = 0; - } - - return me; + // + // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several + // memory descriptors, that are to be chained end-to-end to make up a single + // memory descriptor. + // + // Passing the ranges as a reference will avoid an extra allocation. + // + + IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor; + + if (me && me->initWithDescriptors( + /* descriptors */ descriptors, + /* withCount */ withCount, + /* withDirection */ withDirection, + /* asReference */ asReference ) == false) { + me->release(); + me = 0; + } + + return me; } -bool IOMultiMemoryDescriptor::initWithDescriptors( - IOMemoryDescriptor ** descriptors, - UInt32 withCount, - IODirection withDirection, - bool asReference ) +bool +IOMultiMemoryDescriptor::initWithDescriptors( + IOMemoryDescriptor ** descriptors, + UInt32 withCount, + IODirection withDirection, + bool asReference ) { - unsigned index; - IOOptionBits copyFlags; - // - // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several - // memory descriptors, that are to be chained end-to-end to make up a single - // memory descriptor. - // - // Passing the ranges as a reference will avoid an extra allocation. - // - - assert(descriptors); - - // Release existing descriptors, if any - if ( _descriptors ) - { - for ( unsigned index = 0; index < _descriptorsCount; index++ ) - _descriptors[index]->release(); - - if ( _descriptorsIsAllocated ) - IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); - } else { - // Ask our superclass' opinion. - if ( super::init() == false ) return false; - } - - // Initialize our minimal state. - - _descriptors = 0; - _descriptorsCount = withCount; - _descriptorsIsAllocated = asReference ? false : true; - _flags = withDirection; + unsigned index; + IOOptionBits copyFlags; + // + // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several + // memory descriptors, that are to be chained end-to-end to make up a single + // memory descriptor. + // + // Passing the ranges as a reference will avoid an extra allocation. + // + + assert(descriptors); + + // Release existing descriptors, if any + if (_descriptors) { + for (unsigned index = 0; index < _descriptorsCount; index++) { + _descriptors[index]->release(); + } + + if (_descriptorsIsAllocated) { + IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); + } + } else { + // Ask our superclass' opinion. + if (super::init() == false) { + return false; + } + } + + // Initialize our minimal state. + + _descriptors = 0; + _descriptorsCount = withCount; + _descriptorsIsAllocated = asReference ? false : true; + _flags = withDirection; #ifndef __LP64__ - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ - _length = 0; - _mappings = 0; - _tag = 0; - - if ( asReference ) - { - _descriptors = descriptors; - } - else - { - _descriptors = IONew(IOMemoryDescriptor *, withCount); - if ( _descriptors == 0 ) return false; - - bcopy( /* from */ descriptors, - /* to */ _descriptors, - /* bytes */ withCount * sizeof(IOMemoryDescriptor *) ); - } - - for ( index = 0; index < withCount; index++ ) - { - descriptors[index]->retain(); - _length += descriptors[index]->getLength(); - if ( _tag == 0 ) _tag = descriptors[index]->getTag(); - assert(descriptors[index]->getDirection() == - (withDirection & kIOMemoryDirectionMask)); - } - - enum { kCopyFlags = kIOMemoryBufferPageable }; - copyFlags = 0; - for ( index = 0; index < withCount; index++ ) - { - if (!index) copyFlags = (kCopyFlags & descriptors[index]->_flags); - else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) break; - } - if (index < withCount) return (false); - _flags |= copyFlags; - - return true; -} - -void IOMultiMemoryDescriptor::free() -{ - // - // Free all of this object's outstanding resources. - // + _length = 0; + _mappings = 0; + _tag = 0; + + if (asReference) { + _descriptors = descriptors; + } else { + _descriptors = IONew(IOMemoryDescriptor *, withCount); + if (_descriptors == 0) { + return false; + } + + bcopy( /* from */ descriptors, + /* to */ _descriptors, + /* bytes */ withCount * sizeof(IOMemoryDescriptor *)); + } - if ( _descriptors ) - { - for ( unsigned index = 0; index < _descriptorsCount; index++ ) - _descriptors[index]->release(); + for (index = 0; index < withCount; index++) { + descriptors[index]->retain(); + _length += descriptors[index]->getLength(); + if (_tag == 0) { + _tag = descriptors[index]->getTag(); + } + assert(descriptors[index]->getDirection() == + (withDirection & kIOMemoryDirectionMask)); + } - if ( _descriptorsIsAllocated ) - IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); - } + enum { kCopyFlags = kIOMemoryBufferPageable }; + copyFlags = 0; + for (index = 0; index < withCount; index++) { + if (!index) { + copyFlags = (kCopyFlags & descriptors[index]->_flags); + } else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) { + break; + } + } + if (index < withCount) { + return false; + } + _flags |= copyFlags; - super::free(); + return true; } -IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection) +void +IOMultiMemoryDescriptor::free() { - // - // Prepare the memory for an I/O transfer. - // - // This involves paging in the memory and wiring it down for the duration - // of the transfer. The complete() method finishes the processing of the - // memory after the I/O transfer finishes. - // - - unsigned index; - IOReturn status = kIOReturnInternalError; - IOReturn statusUndo; - - if ( forDirection == kIODirectionNone ) - { - forDirection = getDirection(); - } - - for ( index = 0; index < _descriptorsCount; index++ ) - { - status = _descriptors[index]->prepare(forDirection); - if ( status != kIOReturnSuccess ) break; - } - - if ( status != kIOReturnSuccess ) - { - for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ ) - { - statusUndo = _descriptors[indexUndo]->complete(forDirection); - assert(statusUndo == kIOReturnSuccess); - } - } - - return status; + // + // Free all of this object's outstanding resources. + // + + if (_descriptors) { + for (unsigned index = 0; index < _descriptorsCount; index++) { + _descriptors[index]->release(); + } + + if (_descriptorsIsAllocated) { + IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount); + } + } + + super::free(); } -IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection) +IOReturn +IOMultiMemoryDescriptor::prepare(IODirection forDirection) { - // - // Complete processing of the memory after an I/O transfer finishes. - // - // This method shouldn't be called unless a prepare() was previously issued; - // the prepare() and complete() must occur in pairs, before and after an I/O - // transfer. - // - - IOReturn status; - IOReturn statusFinal = kIOReturnSuccess; - - if ( forDirection == kIODirectionNone ) - { - forDirection = getDirection(); - } - - for ( unsigned index = 0; index < _descriptorsCount; index++ ) - { - status = _descriptors[index]->complete(forDirection); - if ( status != kIOReturnSuccess ) statusFinal = status; - assert(status == kIOReturnSuccess); - } - - return statusFinal; + // + // Prepare the memory for an I/O transfer. + // + // This involves paging in the memory and wiring it down for the duration + // of the transfer. The complete() method finishes the processing of the + // memory after the I/O transfer finishes. + // + + unsigned index; + IOReturn status = kIOReturnInternalError; + IOReturn statusUndo; + + if (forDirection == kIODirectionNone) { + forDirection = getDirection(); + } + + for (index = 0; index < _descriptorsCount; index++) { + status = _descriptors[index]->prepare(forDirection); + if (status != kIOReturnSuccess) { + break; + } + } + + if (status != kIOReturnSuccess) { + for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) { + statusUndo = _descriptors[indexUndo]->complete(forDirection); + assert(statusUndo == kIOReturnSuccess); + } + } + + return status; } -addr64_t IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset, - IOByteCount * length, - IOOptionBits options) +IOReturn +IOMultiMemoryDescriptor::complete(IODirection forDirection) { - // - // This method returns the physical address of the byte at the given offset - // into the memory, and optionally the length of the physically contiguous - // segment from that offset. - // - - assert(offset <= _length); - - for ( unsigned index = 0; index < _descriptorsCount; index++ ) - { - if ( offset < _descriptors[index]->getLength() ) - { - return _descriptors[index]->getPhysicalSegment(offset, length, options); - } - offset -= _descriptors[index]->getLength(); - } - - if ( length ) *length = 0; - - return 0; -} + // + // Complete processing of the memory after an I/O transfer finishes. + // + // This method shouldn't be called unless a prepare() was previously issued; + // the prepare() and complete() must occur in pairs, before and after an I/O + // transfer. + // + + IOReturn status; + IOReturn statusFinal = kIOReturnSuccess; + + if (forDirection == kIODirectionNone) { + forDirection = getDirection(); + } -#include "IOKitKernelInternal.h" + for (unsigned index = 0; index < _descriptorsCount; index++) { + status = _descriptors[index]->complete(forDirection); + if (status != kIOReturnSuccess) { + statusFinal = status; + } + assert(status == kIOReturnSuccess); + } + + return statusFinal; +} -IOReturn IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap, - IOVirtualAddress * __address, - IOOptionBits options, - IOByteCount __offset, - IOByteCount __length) +addr64_t +IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset, + IOByteCount * length, + IOOptionBits options) { - IOMemoryMap * mapping = (IOMemoryMap *) *__address; - vm_map_t map = mapping->fAddressMap; - mach_vm_size_t offset = mapping->fOffset; - mach_vm_size_t length = mapping->fLength; - mach_vm_address_t address = mapping->fAddress; - - kern_return_t err; - IOOptionBits subOptions; - mach_vm_size_t mapOffset; - mach_vm_size_t bytesRemaining, chunk; - mach_vm_address_t nextAddress; - IOMemoryDescriptorMapAllocRef ref; - vm_prot_t prot; - - do - { - prot = VM_PROT_READ; - if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; - - if (kIOMapOverwrite & options) - { - if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) - { - map = IOPageableMapForAddress(address); - } - err = KERN_SUCCESS; + // + // This method returns the physical address of the byte at the given offset + // into the memory, and optionally the length of the physically contiguous + // segment from that offset. + // + + assert(offset <= _length); + + for (unsigned index = 0; index < _descriptorsCount; index++) { + if (offset < _descriptors[index]->getLength()) { + return _descriptors[index]->getPhysicalSegment(offset, length, options); + } + offset -= _descriptors[index]->getLength(); } - else - { - ref.map = map; - ref.tag = IOMemoryTag(map); - ref.options = options; - ref.size = length; - ref.prot = prot; - if (options & kIOMapAnywhere) - // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE - ref.mapped = 0; - else - ref.mapped = mapping->fAddress; - - if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) - err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref); - else - err = IOMemoryDescriptorMapAlloc(ref.map, &ref); - - if (KERN_SUCCESS != err) break; - - address = ref.mapped; - mapping->fAddress = address; + + if (length) { + *length = 0; } - mapOffset = offset; - bytesRemaining = length; - nextAddress = address; - assert(mapOffset <= _length); - subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite; - - for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) - { - chunk = _descriptors[index]->getLength(); - if (mapOffset >= chunk) - { - mapOffset -= chunk; - continue; - } - chunk -= mapOffset; - if (chunk > bytesRemaining) chunk = bytesRemaining; - IOMemoryMap * subMap; - subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk ); - if (!subMap) break; - subMap->release(); // kIOMapOverwrite means it will not deallocate - - bytesRemaining -= chunk; - nextAddress += chunk; - mapOffset = 0; - } - if (bytesRemaining) err = kIOReturnUnderrun; - } - while (false); - - if (kIOReturnSuccess == err) - { + return 0; +} + +#include "IOKitKernelInternal.h" + +IOReturn +IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap, + IOVirtualAddress * __address, + IOOptionBits options, + IOByteCount __offset, + IOByteCount __length) +{ + IOMemoryMap * mapping = (IOMemoryMap *) *__address; + vm_map_t map = mapping->fAddressMap; + mach_vm_size_t offset = mapping->fOffset; + mach_vm_size_t length = mapping->fLength; + mach_vm_address_t address = mapping->fAddress; + + kern_return_t err; + IOOptionBits subOptions; + mach_vm_size_t mapOffset; + mach_vm_size_t bytesRemaining, chunk; + mach_vm_address_t nextAddress; + IOMemoryDescriptorMapAllocRef ref; + vm_prot_t prot; + + do{ + prot = VM_PROT_READ; + if (!(kIOMapReadOnly & options)) { + prot |= VM_PROT_WRITE; + } + + if (kIOMapOverwrite & options) { + if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + map = IOPageableMapForAddress(address); + } + err = KERN_SUCCESS; + } else { + ref.map = map; + ref.tag = IOMemoryTag(map); + ref.options = options; + ref.size = length; + ref.prot = prot; + if (options & kIOMapAnywhere) { + // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE + ref.mapped = 0; + } else { + ref.mapped = mapping->fAddress; + } + + if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) { + err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref); + } else { + err = IOMemoryDescriptorMapAlloc(ref.map, &ref); + } + + if (KERN_SUCCESS != err) { + break; + } + + address = ref.mapped; + mapping->fAddress = address; + } + + mapOffset = offset; + bytesRemaining = length; + nextAddress = address; + assert(mapOffset <= _length); + subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite; + + for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) { + chunk = _descriptors[index]->getLength(); + if (mapOffset >= chunk) { + mapOffset -= chunk; + continue; + } + chunk -= mapOffset; + if (chunk > bytesRemaining) { + chunk = bytesRemaining; + } + IOMemoryMap * subMap; + subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk ); + if (!subMap) { + break; + } + subMap->release(); // kIOMapOverwrite means it will not deallocate + + bytesRemaining -= chunk; + nextAddress += chunk; + mapOffset = 0; + } + if (bytesRemaining) { + err = kIOReturnUnderrun; + } + }while (false); + + if (kIOReturnSuccess == err) { #if IOTRACKING - IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); + IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength); #endif - } + } - return (err); + return err; } -IOReturn IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) +IOReturn +IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) { - IOReturn err; - IOOptionBits totalState, state; - - totalState = kIOMemoryPurgeableNonVolatile; - err = kIOReturnSuccess; - for (unsigned index = 0; index < _descriptorsCount; index++) - { - err = _descriptors[index]->setPurgeable(newState, &state); - if (kIOReturnSuccess != err) break; - - if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty; - else if (kIOMemoryPurgeableEmpty == totalState) continue; - else if (kIOMemoryPurgeableVolatile == totalState) continue; - else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile; - else totalState = kIOMemoryPurgeableNonVolatile; - } - if (oldState) *oldState = totalState; - - return (err); + IOReturn err; + IOOptionBits totalState, state; + + totalState = kIOMemoryPurgeableNonVolatile; + err = kIOReturnSuccess; + for (unsigned index = 0; index < _descriptorsCount; index++) { + err = _descriptors[index]->setPurgeable(newState, &state); + if (kIOReturnSuccess != err) { + break; + } + + if (kIOMemoryPurgeableEmpty == state) { + totalState = kIOMemoryPurgeableEmpty; + } else if (kIOMemoryPurgeableEmpty == totalState) { + continue; + } else if (kIOMemoryPurgeableVolatile == totalState) { + continue; + } else if (kIOMemoryPurgeableVolatile == state) { + totalState = kIOMemoryPurgeableVolatile; + } else { + totalState = kIOMemoryPurgeableNonVolatile; + } + } + if (oldState) { + *oldState = totalState; + } + + return err; } -IOReturn IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount, - IOByteCount * pDirtyPageCount) +IOReturn +IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount, + IOByteCount * pDirtyPageCount) { - IOReturn err; - IOByteCount totalResidentPageCount, totalDirtyPageCount; - IOByteCount residentPageCount, dirtyPageCount; - - err = kIOReturnSuccess; - totalResidentPageCount = totalDirtyPageCount = 0; - for (unsigned index = 0; index < _descriptorsCount; index++) - { - err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount); - if (kIOReturnSuccess != err) break; - totalResidentPageCount += residentPageCount; - totalDirtyPageCount += dirtyPageCount; - } - - if (pResidentPageCount) *pResidentPageCount = totalResidentPageCount; - if (pDirtyPageCount) *pDirtyPageCount = totalDirtyPageCount; - - return (err); + IOReturn err; + IOByteCount totalResidentPageCount, totalDirtyPageCount; + IOByteCount residentPageCount, dirtyPageCount; + + err = kIOReturnSuccess; + totalResidentPageCount = totalDirtyPageCount = 0; + for (unsigned index = 0; index < _descriptorsCount; index++) { + err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount); + if (kIOReturnSuccess != err) { + break; + } + totalResidentPageCount += residentPageCount; + totalDirtyPageCount += dirtyPageCount; + } + + if (pResidentPageCount) { + *pResidentPageCount = totalResidentPageCount; + } + if (pDirtyPageCount) { + *pDirtyPageCount = totalDirtyPageCount; + } + + return err; } -uint64_t IOMultiMemoryDescriptor::getPreparationID( void ) +uint64_t +IOMultiMemoryDescriptor::getPreparationID( void ) { + if (!super::getKernelReserved()) { + return kIOPreparationIDUnsupported; + } - if (!super::getKernelReserved()) - { - return (kIOPreparationIDUnsupported); - } - - for (unsigned index = 0; index < _descriptorsCount; index++) - { - uint64_t preparationID = _descriptors[index]->getPreparationID(); + for (unsigned index = 0; index < _descriptorsCount; index++) { + uint64_t preparationID = _descriptors[index]->getPreparationID(); - if ( preparationID == kIOPreparationIDUnsupported ) - { - return (kIOPreparationIDUnsupported); - } + if (preparationID == kIOPreparationIDUnsupported) { + return kIOPreparationIDUnsupported; + } - if ( preparationID == kIOPreparationIDUnprepared ) - { - return (kIOPreparationIDUnprepared); - } - } + if (preparationID == kIOPreparationIDUnprepared) { + return kIOPreparationIDUnprepared; + } + } - super::setPreparationID(); + super::setPreparationID(); - return (super::getPreparationID()); + return super::getPreparationID(); } diff --git a/iokit/Kernel/IONVRAM.cpp b/iokit/Kernel/IONVRAM.cpp index 4814258d1..88d58595f 100644 --- a/iokit/Kernel/IONVRAM.cpp +++ b/iokit/Kernel/IONVRAM.cpp @@ -3,7 +3,7 @@ * Copyright (c) 2007-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +12,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +23,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,1497 +38,1713 @@ #define super IOService -#define kIONVRAMPrivilege kIOClientPrivilegeAdministrator +#define kIONVRAMPrivilege kIOClientPrivilegeAdministrator //#define kIONVRAMPrivilege kIOClientPrivilegeLocalUser OSDefineMetaClassAndStructors(IODTNVRAM, IOService); -bool IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane) +bool +IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane) { - OSDictionary *dict; - - if (!super::init(old, plane)) return false; - - dict = OSDictionary::withCapacity(1); - if (dict == 0) return false; - setPropertyTable(dict); - - _nvramImage = IONew(UInt8, kIODTNVRAMImageSize); - if (_nvramImage == 0) return false; - - _nvramPartitionOffsets = OSDictionary::withCapacity(1); - if (_nvramPartitionOffsets == 0) return false; - - _nvramPartitionLengths = OSDictionary::withCapacity(1); - if (_nvramPartitionLengths == 0) return false; - - _registryPropertiesKey = OSSymbol::withCStringNoCopy("aapl,pci"); - if (_registryPropertiesKey == 0) return false; - - // race condition possible between - // IODTNVRAM and IONVRAMController (restore loses boot-args) - initProxyData(); - - return true; + OSDictionary *dict; + + if (!super::init(old, plane)) { + return false; + } + + dict = OSDictionary::withCapacity(1); + if (dict == 0) { + return false; + } + setPropertyTable(dict); + + _nvramImage = IONew(UInt8, kIODTNVRAMImageSize); + if (_nvramImage == 0) { + return false; + } + + _nvramPartitionOffsets = OSDictionary::withCapacity(1); + if (_nvramPartitionOffsets == 0) { + return false; + } + + _nvramPartitionLengths = OSDictionary::withCapacity(1); + if (_nvramPartitionLengths == 0) { + return false; + } + + _registryPropertiesKey = OSSymbol::withCStringNoCopy("aapl,pci"); + if (_registryPropertiesKey == 0) { + return false; + } + + // race condition possible between + // IODTNVRAM and IONVRAMController (restore loses boot-args) + initProxyData(); + + return true; } -void IODTNVRAM::initProxyData(void) +void +IODTNVRAM::initProxyData(void) { - IORegistryEntry *entry; - const char *key = "nvram-proxy-data"; - OSObject *prop; - OSData *data; - const void *bytes; - - entry = IORegistryEntry::fromPath("/chosen", gIODTPlane); - if (entry != 0) { - prop = entry->getProperty(key); - if (prop != 0) { - data = OSDynamicCast(OSData, prop); - if (data != 0) { - bytes = data->getBytesNoCopy(); - if ((bytes != 0) && (data->getLength() <= kIODTNVRAMImageSize)) { - bcopy(bytes, _nvramImage, data->getLength()); - initNVRAMImage(); - _isProxied = true; - } - } - } - entry->removeProperty(key); - entry->release(); - } + IORegistryEntry *entry; + const char *key = "nvram-proxy-data"; + OSObject *prop; + OSData *data; + const void *bytes; + + entry = IORegistryEntry::fromPath("/chosen", gIODTPlane); + if (entry != 0) { + prop = entry->getProperty(key); + if (prop != 0) { + data = OSDynamicCast(OSData, prop); + if (data != 0) { + bytes = data->getBytesNoCopy(); + if ((bytes != 0) && (data->getLength() <= kIODTNVRAMImageSize)) { + bcopy(bytes, _nvramImage, data->getLength()); + initNVRAMImage(); + _isProxied = true; + } + } + } + entry->removeProperty(key); + entry->release(); + } } -void IODTNVRAM::registerNVRAMController(IONVRAMController *nvram) +void +IODTNVRAM::registerNVRAMController(IONVRAMController *nvram) { - if (_nvramController != 0) return; - - _nvramController = nvram; - - // race condition possible between - // IODTNVRAM and IONVRAMController (restore loses boot-args) - if (!_isProxied) { - _nvramController->read(0, _nvramImage, kIODTNVRAMImageSize); - initNVRAMImage(); - } else { - IOLockLock(_ofLock); - (void) syncVariables(); - IOLockUnlock(_ofLock); - } + if (_nvramController != 0) { + return; + } + + _nvramController = nvram; + + // race condition possible between + // IODTNVRAM and IONVRAMController (restore loses boot-args) + if (!_isProxied) { + _nvramController->read(0, _nvramImage, kIODTNVRAMImageSize); + initNVRAMImage(); + } else { + IOLockLock(_ofLock); + (void) syncVariables(); + IOLockUnlock(_ofLock); + } } -void IODTNVRAM::initNVRAMImage(void) +void +IODTNVRAM::initNVRAMImage(void) { - char partitionID[18]; - UInt32 partitionOffset, partitionLength; - UInt32 freePartitionOffset, freePartitionSize; - UInt32 currentLength, currentOffset = 0; - OSNumber *partitionOffsetNumber, *partitionLengthNumber; - - // Find the offsets for the OF, XPRAM, NameRegistry and PanicInfo partitions. - _ofPartitionOffset = 0xFFFFFFFF; - _piPartitionOffset = 0xFFFFFFFF; - freePartitionOffset = 0xFFFFFFFF; - freePartitionSize = 0; - - // Look through the partitions to find the OF, MacOS partitions. - while (currentOffset < kIODTNVRAMImageSize) { - currentLength = ((UInt16 *)(_nvramImage + currentOffset))[1] * 16; - - if (currentLength < 16) break; - partitionOffset = currentOffset + 16; - partitionLength = currentLength - 16; - if ((partitionOffset + partitionLength) > kIODTNVRAMImageSize) break; - - if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMOFPartitionName, 12) == 0) { - _ofPartitionOffset = partitionOffset; - _ofPartitionSize = partitionLength; - } else if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMXPRAMPartitionName, 12) == 0) { - } else if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMPanicInfoPartitonName, 12) == 0) { - _piPartitionOffset = partitionOffset; - _piPartitionSize = partitionLength; - } else if (strncmp((const char *)_nvramImage + currentOffset + 4, - kIODTNVRAMFreePartitionName, 12) == 0) { - freePartitionOffset = currentOffset; - freePartitionSize = currentLength; - } else { - // Construct the partition ID from the signature and name. - snprintf(partitionID, sizeof(partitionID), "0x%02x,", - *(UInt8 *)(_nvramImage + currentOffset)); - strncpy(partitionID + 5, - (const char *)(_nvramImage + currentOffset + 4), 12); - partitionID[17] = '\0'; - - partitionOffsetNumber = OSNumber::withNumber(partitionOffset, 32); - partitionLengthNumber = OSNumber::withNumber(partitionLength, 32); - - // Save the partition offset and length - _nvramPartitionOffsets->setObject(partitionID, partitionOffsetNumber); - _nvramPartitionLengths->setObject(partitionID, partitionLengthNumber); - - partitionOffsetNumber->release(); - partitionLengthNumber->release(); - } - currentOffset += currentLength; - } - - if (_ofPartitionOffset != 0xFFFFFFFF) - _ofImage = _nvramImage + _ofPartitionOffset; - - if (_piPartitionOffset == 0xFFFFFFFF) { - if (freePartitionSize > 0x20) { - // Set the signature to 0xa1. - _nvramImage[freePartitionOffset] = 0xa1; - // Set the checksum to 0. - _nvramImage[freePartitionOffset + 1] = 0; - // Set the name for the Panic Info partition. - strncpy((char *)(_nvramImage + freePartitionOffset + 4), - kIODTNVRAMPanicInfoPartitonName, 12); - - // Calculate the partition offset and size. - _piPartitionOffset = freePartitionOffset + 0x10; - _piPartitionSize = 0x800; - if (_piPartitionSize + 0x20 > freePartitionSize) - _piPartitionSize = freePartitionSize - 0x20; - - _piImage = _nvramImage + _piPartitionOffset; - - // Zero the new partition. - bzero(_piImage, _piPartitionSize); - - // Set the partition size. - *(UInt16 *)(_nvramImage + freePartitionOffset + 2) = - (_piPartitionSize / 0x10) + 1; - - // Set the partition checksum. - _nvramImage[freePartitionOffset + 1] = - calculatePartitionChecksum(_nvramImage + freePartitionOffset); - - // Calculate the free partition offset and size. - freePartitionOffset += _piPartitionSize + 0x10; - freePartitionSize -= _piPartitionSize + 0x10; - - // Set the signature to 0x7f. - _nvramImage[freePartitionOffset] = 0x7f; - // Set the checksum to 0. - _nvramImage[freePartitionOffset + 1] = 0; - // Set the name for the free partition. - strncpy((char *)(_nvramImage + freePartitionOffset + 4), - kIODTNVRAMFreePartitionName, 12); - // Set the partition size. - *(UInt16 *)(_nvramImage + freePartitionOffset + 2) = - freePartitionSize / 0x10; - // Set the partition checksum. - _nvramImage[freePartitionOffset + 1] = - calculatePartitionChecksum(_nvramImage + freePartitionOffset); - - if (_nvramController != 0) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - } - } else { - _piImage = _nvramImage + _piPartitionOffset; - } - - _lastDeviceSync = 0; - _freshInterval = TRUE; // we will allow sync() even before the first 15 minutes have passed. - - initOFVariables(); + char partitionID[18]; + UInt32 partitionOffset, partitionLength; + UInt32 freePartitionOffset, freePartitionSize; + UInt32 currentLength, currentOffset = 0; + OSNumber *partitionOffsetNumber, *partitionLengthNumber; + + // Find the offsets for the OF, XPRAM, NameRegistry and PanicInfo partitions. + _ofPartitionOffset = 0xFFFFFFFF; + _piPartitionOffset = 0xFFFFFFFF; + freePartitionOffset = 0xFFFFFFFF; + freePartitionSize = 0; + + // Look through the partitions to find the OF, MacOS partitions. + while (currentOffset < kIODTNVRAMImageSize) { + currentLength = ((UInt16 *)(_nvramImage + currentOffset))[1] * 16; + + if (currentLength < 16) { + break; + } + partitionOffset = currentOffset + 16; + partitionLength = currentLength - 16; + if ((partitionOffset + partitionLength) > kIODTNVRAMImageSize) { + break; + } + + if (strncmp((const char *)_nvramImage + currentOffset + 4, + kIODTNVRAMOFPartitionName, 12) == 0) { + _ofPartitionOffset = partitionOffset; + _ofPartitionSize = partitionLength; + } else if (strncmp((const char *)_nvramImage + currentOffset + 4, + kIODTNVRAMXPRAMPartitionName, 12) == 0) { + } else if (strncmp((const char *)_nvramImage + currentOffset + 4, + kIODTNVRAMPanicInfoPartitonName, 12) == 0) { + _piPartitionOffset = partitionOffset; + _piPartitionSize = partitionLength; + } else if (strncmp((const char *)_nvramImage + currentOffset + 4, + kIODTNVRAMFreePartitionName, 12) == 0) { + freePartitionOffset = currentOffset; + freePartitionSize = currentLength; + } else { + // Construct the partition ID from the signature and name. + snprintf(partitionID, sizeof(partitionID), "0x%02x,", + *(UInt8 *)(_nvramImage + currentOffset)); + strncpy(partitionID + 5, + (const char *)(_nvramImage + currentOffset + 4), 12); + partitionID[17] = '\0'; + + partitionOffsetNumber = OSNumber::withNumber(partitionOffset, 32); + partitionLengthNumber = OSNumber::withNumber(partitionLength, 32); + + // Save the partition offset and length + _nvramPartitionOffsets->setObject(partitionID, partitionOffsetNumber); + _nvramPartitionLengths->setObject(partitionID, partitionLengthNumber); + + partitionOffsetNumber->release(); + partitionLengthNumber->release(); + } + currentOffset += currentLength; + } + + if (_ofPartitionOffset != 0xFFFFFFFF) { + _ofImage = _nvramImage + _ofPartitionOffset; + } + + if (_piPartitionOffset == 0xFFFFFFFF) { + if (freePartitionSize > 0x20) { + // Set the signature to 0xa1. + _nvramImage[freePartitionOffset] = 0xa1; + // Set the checksum to 0. + _nvramImage[freePartitionOffset + 1] = 0; + // Set the name for the Panic Info partition. + strncpy((char *)(_nvramImage + freePartitionOffset + 4), + kIODTNVRAMPanicInfoPartitonName, 12); + + // Calculate the partition offset and size. + _piPartitionOffset = freePartitionOffset + 0x10; + _piPartitionSize = 0x800; + if (_piPartitionSize + 0x20 > freePartitionSize) { + _piPartitionSize = freePartitionSize - 0x20; + } + + _piImage = _nvramImage + _piPartitionOffset; + + // Zero the new partition. + bzero(_piImage, _piPartitionSize); + + // Set the partition size. + *(UInt16 *)(_nvramImage + freePartitionOffset + 2) = + (_piPartitionSize / 0x10) + 1; + + // Set the partition checksum. + _nvramImage[freePartitionOffset + 1] = + calculatePartitionChecksum(_nvramImage + freePartitionOffset); + + // Calculate the free partition offset and size. + freePartitionOffset += _piPartitionSize + 0x10; + freePartitionSize -= _piPartitionSize + 0x10; + + // Set the signature to 0x7f. + _nvramImage[freePartitionOffset] = 0x7f; + // Set the checksum to 0. + _nvramImage[freePartitionOffset + 1] = 0; + // Set the name for the free partition. + strncpy((char *)(_nvramImage + freePartitionOffset + 4), + kIODTNVRAMFreePartitionName, 12); + // Set the partition size. + *(UInt16 *)(_nvramImage + freePartitionOffset + 2) = + freePartitionSize / 0x10; + // Set the partition checksum. + _nvramImage[freePartitionOffset + 1] = + calculatePartitionChecksum(_nvramImage + freePartitionOffset); + + if (_nvramController != 0) { + _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + } + } + } else { + _piImage = _nvramImage + _piPartitionOffset; + } + + _lastDeviceSync = 0; + _freshInterval = TRUE; // we will allow sync() even before the first 15 minutes have passed. + + initOFVariables(); } -void IODTNVRAM::syncInternal(bool rateLimit) +void +IODTNVRAM::syncInternal(bool rateLimit) { - // Don't try to perform controller operations if none has been registered. - if (_nvramController == 0) return; - - // Rate limit requests to sync. Drivers that need this rate limiting will - // shadow the data and only write to flash when they get a sync call - if (rateLimit && !safeToSync()) return; - - _nvramController->sync(); + // Don't try to perform controller operations if none has been registered. + if (_nvramController == 0) { + return; + } + + // Rate limit requests to sync. Drivers that need this rate limiting will + // shadow the data and only write to flash when they get a sync call + if (rateLimit && !safeToSync()) { + return; + } + + _nvramController->sync(); } -void IODTNVRAM::sync(void) +void +IODTNVRAM::sync(void) { - syncInternal(false); + syncInternal(false); } -bool IODTNVRAM::serializeProperties(OSSerialize *s) const +bool +IODTNVRAM::serializeProperties(OSSerialize *s) const { - bool result, hasPrivilege; - UInt32 variablePerm; - const OSSymbol *key; - OSDictionary *dict; - OSCollectionIterator *iter = 0; - - // Verify permissions. - hasPrivilege = (kIOReturnSuccess == IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege)); - - if (_ofDict == 0) { - /* No nvram. Return an empty dictionary. */ - dict = OSDictionary::withCapacity(1); - if (dict == 0) return false; - } else { - IOLockLock(_ofLock); - dict = OSDictionary::withDictionary(_ofDict); - IOLockUnlock(_ofLock); - if (dict == 0) return false; - - /* Copy properties with client privilege. */ - iter = OSCollectionIterator::withCollection(dict); - if (iter == 0) { - dict->release(); - return false; - } - while (1) { - key = OSDynamicCast(OSSymbol, iter->getNextObject()); - if (key == 0) break; - - variablePerm = getOFVariablePerm(key); - if ((hasPrivilege || (variablePerm != kOFVariablePermRootOnly)) && - ( ! (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) )) { } - else { - dict->removeObject(key); - iter->reset(); - } - } - } - - result = dict->serialize(s); - - dict->release(); - if (iter != 0) iter->release(); - - return result; + bool result, hasPrivilege; + UInt32 variablePerm; + const OSSymbol *key; + OSDictionary *dict; + OSCollectionIterator *iter = 0; + + // Verify permissions. + hasPrivilege = (kIOReturnSuccess == IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege)); + + if (_ofDict == 0) { + /* No nvram. Return an empty dictionary. */ + dict = OSDictionary::withCapacity(1); + if (dict == 0) { + return false; + } + } else { + IOLockLock(_ofLock); + dict = OSDictionary::withDictionary(_ofDict); + IOLockUnlock(_ofLock); + if (dict == 0) { + return false; + } + + /* Copy properties with client privilege. */ + iter = OSCollectionIterator::withCollection(dict); + if (iter == 0) { + dict->release(); + return false; + } + while (1) { + key = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (key == 0) { + break; + } + + variablePerm = getOFVariablePerm(key); + if ((hasPrivilege || (variablePerm != kOFVariablePermRootOnly)) && + (!(variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task))) { + } else { + dict->removeObject(key); + iter->reset(); + } + } + } + + result = dict->serialize(s); + + dict->release(); + if (iter != 0) { + iter->release(); + } + + return result; } -OSObject *IODTNVRAM::copyProperty(const OSSymbol *aKey) const +OSObject * +IODTNVRAM::copyProperty(const OSSymbol *aKey) const { - IOReturn result; - UInt32 variablePerm; - OSObject *theObject; - - if (_ofDict == 0) return 0; - - // Verify permissions. - variablePerm = getOFVariablePerm(aKey); - result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege); - if (result != kIOReturnSuccess) { - if (variablePerm == kOFVariablePermRootOnly) return 0; - } - if (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) return 0; - - IOLockLock(_ofLock); - theObject = _ofDict->getObject(aKey); - if (theObject) theObject->retain(); - IOLockUnlock(_ofLock); - - return theObject; + IOReturn result; + UInt32 variablePerm; + OSObject *theObject; + + if (_ofDict == 0) { + return 0; + } + + // Verify permissions. + variablePerm = getOFVariablePerm(aKey); + result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege); + if (result != kIOReturnSuccess) { + if (variablePerm == kOFVariablePermRootOnly) { + return 0; + } + } + if (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) { + return 0; + } + + IOLockLock(_ofLock); + theObject = _ofDict->getObject(aKey); + if (theObject) { + theObject->retain(); + } + IOLockUnlock(_ofLock); + + return theObject; } -OSObject *IODTNVRAM::copyProperty(const char *aKey) const +OSObject * +IODTNVRAM::copyProperty(const char *aKey) const { - const OSSymbol *keySymbol; - OSObject *theObject = 0; - - keySymbol = OSSymbol::withCString(aKey); - if (keySymbol != 0) { - theObject = copyProperty(keySymbol); - keySymbol->release(); - } - - return theObject; + const OSSymbol *keySymbol; + OSObject *theObject = 0; + + keySymbol = OSSymbol::withCString(aKey); + if (keySymbol != 0) { + theObject = copyProperty(keySymbol); + keySymbol->release(); + } + + return theObject; } -OSObject *IODTNVRAM::getProperty(const OSSymbol *aKey) const +OSObject * +IODTNVRAM::getProperty(const OSSymbol *aKey) const { - OSObject *theObject; + OSObject *theObject; - theObject = copyProperty(aKey); - if (theObject) theObject->release(); + theObject = copyProperty(aKey); + if (theObject) { + theObject->release(); + } - return theObject; + return theObject; } -OSObject *IODTNVRAM::getProperty(const char *aKey) const +OSObject * +IODTNVRAM::getProperty(const char *aKey) const { - OSObject *theObject; + OSObject *theObject; - theObject = copyProperty(aKey); - if (theObject) theObject->release(); + theObject = copyProperty(aKey); + if (theObject) { + theObject->release(); + } - return theObject; + return theObject; } -bool IODTNVRAM::setProperty(const OSSymbol *aKey, OSObject *anObject) +bool +IODTNVRAM::setProperty(const OSSymbol *aKey, OSObject *anObject) { - bool result; - UInt32 propType, propPerm; - OSString *tmpString = 0; - OSObject *propObject = 0, *oldObject; - - if (_ofDict == 0) return false; - - // Verify permissions. - propPerm = getOFVariablePerm(aKey); - if (IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege) != kIOReturnSuccess) { - if (propPerm != kOFVariablePermUserWrite) return false; - } - if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) return 0; - - // Don't allow change of 'aapl,panic-info'. - if (aKey->isEqualTo(kIODTNVRAMPanicInfoKey)) return false; - - // Make sure the object is of the correct type. - propType = getOFVariableType(aKey); - switch (propType) { - case kOFVariableTypeBoolean : - propObject = OSDynamicCast(OSBoolean, anObject); - break; - - case kOFVariableTypeNumber : - propObject = OSDynamicCast(OSNumber, anObject); - break; - - case kOFVariableTypeString : - propObject = OSDynamicCast(OSString, anObject); - break; - - case kOFVariableTypeData : - propObject = OSDynamicCast(OSData, anObject); - if (propObject == 0) { - tmpString = OSDynamicCast(OSString, anObject); - if (tmpString != 0) { - propObject = OSData::withBytes(tmpString->getCStringNoCopy(), - tmpString->getLength()); - } - } - break; - } - - if (propObject == 0) return false; - - IOLockLock(_ofLock); - - oldObject = _ofDict->getObject(aKey); - if (oldObject) { - oldObject->retain(); - } - result = _ofDict->setObject(aKey, propObject); - - if (result) { - if (syncVariables() != kIOReturnSuccess) { - if (oldObject) { - _ofDict->setObject(aKey, oldObject); - } - else { - _ofDict->removeObject(aKey); - } - (void) syncVariables(); - result = false; - } - } - - if (oldObject) { - oldObject->release(); - } - if (tmpString) { - propObject->release(); - } - - IOLockUnlock(_ofLock); - - return result; + bool result; + UInt32 propType, propPerm; + OSString *tmpString = 0; + OSObject *propObject = 0, *oldObject; + + if (_ofDict == 0) { + return false; + } + + // Verify permissions. + propPerm = getOFVariablePerm(aKey); + if (IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege) != kIOReturnSuccess) { + if (propPerm != kOFVariablePermUserWrite) { + return false; + } + } + if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) { + return 0; + } + + // Don't allow change of 'aapl,panic-info'. + if (aKey->isEqualTo(kIODTNVRAMPanicInfoKey)) { + return false; + } + + // Make sure the object is of the correct type. + propType = getOFVariableType(aKey); + switch (propType) { + case kOFVariableTypeBoolean: + propObject = OSDynamicCast(OSBoolean, anObject); + break; + + case kOFVariableTypeNumber: + propObject = OSDynamicCast(OSNumber, anObject); + break; + + case kOFVariableTypeString: + propObject = OSDynamicCast(OSString, anObject); + break; + + case kOFVariableTypeData: + propObject = OSDynamicCast(OSData, anObject); + if (propObject == 0) { + tmpString = OSDynamicCast(OSString, anObject); + if (tmpString != 0) { + propObject = OSData::withBytes(tmpString->getCStringNoCopy(), + tmpString->getLength()); + } + } + break; + } + + if (propObject == 0) { + return false; + } + + IOLockLock(_ofLock); + + oldObject = _ofDict->getObject(aKey); + if (oldObject) { + oldObject->retain(); + } + result = _ofDict->setObject(aKey, propObject); + + if (result) { + if (syncVariables() != kIOReturnSuccess) { + if (oldObject) { + _ofDict->setObject(aKey, oldObject); + } else { + _ofDict->removeObject(aKey); + } + (void) syncVariables(); + result = false; + } + } + + if (oldObject) { + oldObject->release(); + } + if (tmpString) { + propObject->release(); + } + + IOLockUnlock(_ofLock); + + return result; } -void IODTNVRAM::removeProperty(const OSSymbol *aKey) +void +IODTNVRAM::removeProperty(const OSSymbol *aKey) { - bool result; - UInt32 propPerm; - - if (_ofDict == 0) return; - - // Verify permissions. - propPerm = getOFVariablePerm(aKey); - result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); - if (result != kIOReturnSuccess) { - if (propPerm != kOFVariablePermUserWrite) return; - } - if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) return; - - // Don't allow change of 'aapl,panic-info'. - if (aKey->isEqualTo(kIODTNVRAMPanicInfoKey)) return; - - // If the object exists, remove it from the dictionary. - - IOLockLock(_ofLock); - result = _ofDict->getObject(aKey) != 0; - if (result) { - _ofDict->removeObject(aKey); - } - - if (result) { - (void) syncVariables(); - } - - IOLockUnlock(_ofLock); + bool result; + UInt32 propPerm; + + if (_ofDict == 0) { + return; + } + + // Verify permissions. + propPerm = getOFVariablePerm(aKey); + result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (result != kIOReturnSuccess) { + if (propPerm != kOFVariablePermUserWrite) { + return; + } + } + if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) { + return; + } + + // Don't allow change of 'aapl,panic-info'. + if (aKey->isEqualTo(kIODTNVRAMPanicInfoKey)) { + return; + } + + // If the object exists, remove it from the dictionary. + + IOLockLock(_ofLock); + result = _ofDict->getObject(aKey) != 0; + if (result) { + _ofDict->removeObject(aKey); + } + + if (result) { + (void) syncVariables(); + } + + IOLockUnlock(_ofLock); } -IOReturn IODTNVRAM::setProperties(OSObject *properties) +IOReturn +IODTNVRAM::setProperties(OSObject *properties) { - bool result = true; - OSObject *object; - const OSSymbol *key; - const OSString *tmpStr; - OSDictionary *dict; - OSCollectionIterator *iter; - - dict = OSDynamicCast(OSDictionary, properties); - if (dict == 0) return kIOReturnBadArgument; - - iter = OSCollectionIterator::withCollection(dict); - if (iter == 0) return kIOReturnBadArgument; - - while (result) { - key = OSDynamicCast(OSSymbol, iter->getNextObject()); - if (key == 0) break; - - object = dict->getObject(key); - if (object == 0) continue; - - if (key->isEqualTo(kIONVRAMDeletePropertyKey)) { - tmpStr = OSDynamicCast(OSString, object); - if (tmpStr != 0) { - key = OSSymbol::withString(tmpStr); - removeProperty(key); - key->release(); - result = true; - } else { - result = false; - } - } else if(key->isEqualTo(kIONVRAMSyncNowPropertyKey) || key->isEqualTo(kIONVRAMForceSyncNowPropertyKey)) { - tmpStr = OSDynamicCast(OSString, object); - if (tmpStr != 0) { + bool result = true; + OSObject *object; + const OSSymbol *key; + const OSString *tmpStr; + OSDictionary *dict; + OSCollectionIterator *iter; + + dict = OSDynamicCast(OSDictionary, properties); + if (dict == 0) { + return kIOReturnBadArgument; + } - result = true; + iter = OSCollectionIterator::withCollection(dict); + if (iter == 0) { + return kIOReturnBadArgument; + } - // We still want to throttle NVRAM commit rate for SyncNow. ForceSyncNow is provided as a really big hammer. + while (result) { + key = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (key == 0) { + break; + } - syncInternal(key->isEqualTo(kIONVRAMSyncNowPropertyKey)); + object = dict->getObject(key); + if (object == 0) { + continue; + } + if (key->isEqualTo(kIONVRAMDeletePropertyKey)) { + tmpStr = OSDynamicCast(OSString, object); + if (tmpStr != 0) { + key = OSSymbol::withString(tmpStr); + removeProperty(key); + key->release(); + result = true; + } else { + result = false; + } + } else if (key->isEqualTo(kIONVRAMSyncNowPropertyKey) || key->isEqualTo(kIONVRAMForceSyncNowPropertyKey)) { + tmpStr = OSDynamicCast(OSString, object); + if (tmpStr != 0) { + result = true; + + // We still want to throttle NVRAM commit rate for SyncNow. ForceSyncNow is provided as a really big hammer. + + syncInternal(key->isEqualTo(kIONVRAMSyncNowPropertyKey)); + } else { + result = false; + } } else { - result = false; + result = setProperty(key, object); } } - else { - result = setProperty(key, object); - } - } - - iter->release(); - - if (result) return kIOReturnSuccess; - else return kIOReturnError; + iter->release(); + + if (result) { + return kIOReturnSuccess; + } else { + return kIOReturnError; + } } -IOReturn IODTNVRAM::readXPRAM(IOByteCount offset, UInt8 *buffer, - IOByteCount length) +IOReturn +IODTNVRAM::readXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IODTNVRAM::writeXPRAM(IOByteCount offset, UInt8 *buffer, - IOByteCount length) +IOReturn +IODTNVRAM::writeXPRAM(IOByteCount offset, UInt8 *buffer, + IOByteCount length) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IODTNVRAM::readNVRAMProperty(IORegistryEntry *entry, - const OSSymbol **name, - OSData **value) +IOReturn +IODTNVRAM::readNVRAMProperty(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value) { - IOReturn err; + IOReturn err; + + err = readNVRAMPropertyType1(entry, name, value); - err = readNVRAMPropertyType1(entry, name, value); - - return err; + return err; } -IOReturn IODTNVRAM::writeNVRAMProperty(IORegistryEntry *entry, - const OSSymbol *name, - OSData *value) +IOReturn +IODTNVRAM::writeNVRAMProperty(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value) { - IOReturn err; - - err = writeNVRAMPropertyType1(entry, name, value); - - return err; + IOReturn err; + + err = writeNVRAMPropertyType1(entry, name, value); + + return err; } -OSDictionary *IODTNVRAM::getNVRAMPartitions(void) +OSDictionary * +IODTNVRAM::getNVRAMPartitions(void) { - return _nvramPartitionLengths; + return _nvramPartitionLengths; } -IOReturn IODTNVRAM::readNVRAMPartition(const OSSymbol *partitionID, - IOByteCount offset, UInt8 *buffer, - IOByteCount length) +IOReturn +IODTNVRAM::readNVRAMPartition(const OSSymbol *partitionID, + IOByteCount offset, UInt8 *buffer, + IOByteCount length) { - OSNumber *partitionOffsetNumber, *partitionLengthNumber; - UInt32 partitionOffset, partitionLength, end; - - partitionOffsetNumber = - (OSNumber *)_nvramPartitionOffsets->getObject(partitionID); - partitionLengthNumber = - (OSNumber *)_nvramPartitionLengths->getObject(partitionID); - - if ((partitionOffsetNumber == 0) || (partitionLengthNumber == 0)) - return kIOReturnNotFound; - - partitionOffset = partitionOffsetNumber->unsigned32BitValue(); - partitionLength = partitionLengthNumber->unsigned32BitValue(); - - if (os_add_overflow(offset, length, &end)) return kIOReturnBadArgument; - if ((buffer == 0) || (length == 0) || (end > partitionLength)) - return kIOReturnBadArgument; - - bcopy(_nvramImage + partitionOffset + offset, buffer, length); - - return kIOReturnSuccess; + OSNumber *partitionOffsetNumber, *partitionLengthNumber; + UInt32 partitionOffset, partitionLength, end; + + partitionOffsetNumber = + (OSNumber *)_nvramPartitionOffsets->getObject(partitionID); + partitionLengthNumber = + (OSNumber *)_nvramPartitionLengths->getObject(partitionID); + + if ((partitionOffsetNumber == 0) || (partitionLengthNumber == 0)) { + return kIOReturnNotFound; + } + + partitionOffset = partitionOffsetNumber->unsigned32BitValue(); + partitionLength = partitionLengthNumber->unsigned32BitValue(); + + if (os_add_overflow(offset, length, &end)) { + return kIOReturnBadArgument; + } + if ((buffer == 0) || (length == 0) || (end > partitionLength)) { + return kIOReturnBadArgument; + } + + bcopy(_nvramImage + partitionOffset + offset, buffer, length); + + return kIOReturnSuccess; } -IOReturn IODTNVRAM::writeNVRAMPartition(const OSSymbol *partitionID, - IOByteCount offset, UInt8 *buffer, - IOByteCount length) +IOReturn +IODTNVRAM::writeNVRAMPartition(const OSSymbol *partitionID, + IOByteCount offset, UInt8 *buffer, + IOByteCount length) { - OSNumber *partitionOffsetNumber, *partitionLengthNumber; - UInt32 partitionOffset, partitionLength, end; - - partitionOffsetNumber = - (OSNumber *)_nvramPartitionOffsets->getObject(partitionID); - partitionLengthNumber = - (OSNumber *)_nvramPartitionLengths->getObject(partitionID); - - if ((partitionOffsetNumber == 0) || (partitionLengthNumber == 0)) - return kIOReturnNotFound; - - partitionOffset = partitionOffsetNumber->unsigned32BitValue(); - partitionLength = partitionLengthNumber->unsigned32BitValue(); - - if (os_add_overflow(offset, length, &end)) return kIOReturnBadArgument; - if ((buffer == 0) || (length == 0) || (end > partitionLength)) - return kIOReturnBadArgument; - - bcopy(buffer, _nvramImage + partitionOffset + offset, length); - - if (_nvramController != 0) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - - return kIOReturnSuccess; + OSNumber *partitionOffsetNumber, *partitionLengthNumber; + UInt32 partitionOffset, partitionLength, end; + + partitionOffsetNumber = + (OSNumber *)_nvramPartitionOffsets->getObject(partitionID); + partitionLengthNumber = + (OSNumber *)_nvramPartitionLengths->getObject(partitionID); + + if ((partitionOffsetNumber == 0) || (partitionLengthNumber == 0)) { + return kIOReturnNotFound; + } + + partitionOffset = partitionOffsetNumber->unsigned32BitValue(); + partitionLength = partitionLengthNumber->unsigned32BitValue(); + + if (os_add_overflow(offset, length, &end)) { + return kIOReturnBadArgument; + } + if ((buffer == 0) || (length == 0) || (end > partitionLength)) { + return kIOReturnBadArgument; + } + + bcopy(buffer, _nvramImage + partitionOffset + offset, length); + + if (_nvramController != 0) { + _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + } + + return kIOReturnSuccess; } -IOByteCount IODTNVRAM::savePanicInfo(UInt8 *buffer, IOByteCount length) +IOByteCount +IODTNVRAM::savePanicInfo(UInt8 *buffer, IOByteCount length) { - if ((_piImage == 0) || (length <= 0)) return 0; - - if (length > (_piPartitionSize - 4)) - length = _piPartitionSize - 4; - - // Save the Panic Info. - bcopy(buffer, _piImage + 4, length); - - // Save the Panic Info length. - *(UInt32 *)_piImage = length; - - if (_nvramController != 0) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - /* - * This prevents OF variables from being committed if the system has panicked - */ - _systemPaniced = true; - /* The call to sync() forces the NVRAM controller to write the panic info - * partition to NVRAM. - */ - sync(); - - return length; + if ((_piImage == 0) || (length <= 0)) { + return 0; + } + + if (length > (_piPartitionSize - 4)) { + length = _piPartitionSize - 4; + } + + // Save the Panic Info. + bcopy(buffer, _piImage + 4, length); + + // Save the Panic Info length. + *(UInt32 *)_piImage = length; + + if (_nvramController != 0) { + _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + } + /* + * This prevents OF variables from being committed if the system has panicked + */ + _systemPaniced = true; + /* The call to sync() forces the NVRAM controller to write the panic info + * partition to NVRAM. + */ + sync(); + + return length; } // Private methods -UInt8 IODTNVRAM::calculatePartitionChecksum(UInt8 *partitionHeader) +UInt8 +IODTNVRAM::calculatePartitionChecksum(UInt8 *partitionHeader) { - UInt8 cnt, isum, csum = 0; - - for (cnt = 0; cnt < 0x10; cnt++) { - isum = csum + partitionHeader[cnt]; - if (isum < csum) isum++; - csum = isum; - } - - return csum; + UInt8 cnt, isum, csum = 0; + + for (cnt = 0; cnt < 0x10; cnt++) { + isum = csum + partitionHeader[cnt]; + if (isum < csum) { + isum++; + } + csum = isum; + } + + return csum; } -IOReturn IODTNVRAM::initOFVariables(void) +IOReturn +IODTNVRAM::initOFVariables(void) { - UInt32 cnt; - UInt8 *propName, *propData; - UInt32 propNameLength, propDataLength; - const OSSymbol *propSymbol; - OSObject *propObject; - - if (_ofImage == 0) return kIOReturnNotReady; - - _ofDict = OSDictionary::withCapacity(1); - _ofLock = IOLockAlloc(); - if (!_ofDict || !_ofLock) return kIOReturnNoMemory; - - cnt = 0; - while (cnt < _ofPartitionSize) { - // Break if there is no name. - if (_ofImage[cnt] == '\0') break; - - // Find the length of the name. - propName = _ofImage + cnt; - for (propNameLength = 0; (cnt + propNameLength) < _ofPartitionSize; - propNameLength++) { - if (_ofImage[cnt + propNameLength] == '=') break; - } - - // Break if the name goes past the end of the partition. - if ((cnt + propNameLength) >= _ofPartitionSize) break; - cnt += propNameLength + 1; - - propData = _ofImage + cnt; - for (propDataLength = 0; (cnt + propDataLength) < _ofPartitionSize; - propDataLength++) { - if (_ofImage[cnt + propDataLength] == '\0') break; - } - - // Break if the data goes past the end of the partition. - if ((cnt + propDataLength) >= _ofPartitionSize) break; - cnt += propDataLength + 1; - - if (convertPropToObject(propName, propNameLength, - propData, propDataLength, - &propSymbol, &propObject)) { - _ofDict->setObject(propSymbol, propObject); - propSymbol->release(); - propObject->release(); - } - } - - // Create the boot-args property if it is not in the dictionary. - if (_ofDict->getObject("boot-args") == 0) { - propObject = OSString::withCStringNoCopy(""); - if (propObject != 0) { - _ofDict->setObject("boot-args", propObject); - propObject->release(); - } - } - - if (_piImage != 0) { - propDataLength = *(UInt32 *)_piImage; - if ((propDataLength != 0) && (propDataLength <= (_piPartitionSize - 4))) { - propObject = OSData::withBytes(_piImage + 4, propDataLength); - _ofDict->setObject(kIODTNVRAMPanicInfoKey, propObject); - propObject->release(); - - // Clear the length from _piImage and mark dirty. - *(UInt32 *)_piImage = 0; - if (_nvramController != 0) { - _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } - } - } - - return kIOReturnSuccess; + UInt32 cnt; + UInt8 *propName, *propData; + UInt32 propNameLength, propDataLength; + const OSSymbol *propSymbol; + OSObject *propObject; + + if (_ofImage == 0) { + return kIOReturnNotReady; + } + + _ofDict = OSDictionary::withCapacity(1); + _ofLock = IOLockAlloc(); + if (!_ofDict || !_ofLock) { + return kIOReturnNoMemory; + } + + cnt = 0; + while (cnt < _ofPartitionSize) { + // Break if there is no name. + if (_ofImage[cnt] == '\0') { + break; + } + + // Find the length of the name. + propName = _ofImage + cnt; + for (propNameLength = 0; (cnt + propNameLength) < _ofPartitionSize; + propNameLength++) { + if (_ofImage[cnt + propNameLength] == '=') { + break; + } + } + + // Break if the name goes past the end of the partition. + if ((cnt + propNameLength) >= _ofPartitionSize) { + break; + } + cnt += propNameLength + 1; + + propData = _ofImage + cnt; + for (propDataLength = 0; (cnt + propDataLength) < _ofPartitionSize; + propDataLength++) { + if (_ofImage[cnt + propDataLength] == '\0') { + break; + } + } + + // Break if the data goes past the end of the partition. + if ((cnt + propDataLength) >= _ofPartitionSize) { + break; + } + cnt += propDataLength + 1; + + if (convertPropToObject(propName, propNameLength, + propData, propDataLength, + &propSymbol, &propObject)) { + _ofDict->setObject(propSymbol, propObject); + propSymbol->release(); + propObject->release(); + } + } + + // Create the boot-args property if it is not in the dictionary. + if (_ofDict->getObject("boot-args") == 0) { + propObject = OSString::withCStringNoCopy(""); + if (propObject != 0) { + _ofDict->setObject("boot-args", propObject); + propObject->release(); + } + } + + if (_piImage != 0) { + propDataLength = *(UInt32 *)_piImage; + if ((propDataLength != 0) && (propDataLength <= (_piPartitionSize - 4))) { + propObject = OSData::withBytes(_piImage + 4, propDataLength); + _ofDict->setObject(kIODTNVRAMPanicInfoKey, propObject); + propObject->release(); + + // Clear the length from _piImage and mark dirty. + *(UInt32 *)_piImage = 0; + if (_nvramController != 0) { + _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + } + } + } + + return kIOReturnSuccess; } -IOReturn IODTNVRAM::syncOFVariables(void) +IOReturn +IODTNVRAM::syncOFVariables(void) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IODTNVRAM::syncVariables(void) +IOReturn +IODTNVRAM::syncVariables(void) { - bool ok; - UInt32 length, maxLength; - UInt8 *buffer, *tmpBuffer; - const OSSymbol *tmpSymbol; - OSObject *tmpObject; - OSCollectionIterator *iter; + bool ok; + UInt32 length, maxLength; + UInt8 *buffer, *tmpBuffer; + const OSSymbol *tmpSymbol; + OSObject *tmpObject; + OSCollectionIterator *iter; - IOLockAssert(_ofLock, kIOLockAssertOwned); + IOLockAssert(_ofLock, kIOLockAssertOwned); - if ((_ofImage == 0) || (_ofDict == 0) || _systemPaniced) return kIOReturnNotReady; + if ((_ofImage == 0) || (_ofDict == 0) || _systemPaniced) { + return kIOReturnNotReady; + } - buffer = tmpBuffer = IONew(UInt8, _ofPartitionSize); - if (buffer == 0) return kIOReturnNoMemory; - bzero(buffer, _ofPartitionSize); + buffer = tmpBuffer = IONew(UInt8, _ofPartitionSize); + if (buffer == 0) { + return kIOReturnNoMemory; + } + bzero(buffer, _ofPartitionSize); - ok = true; - maxLength = _ofPartitionSize; + ok = true; + maxLength = _ofPartitionSize; - iter = OSCollectionIterator::withCollection(_ofDict); - if (iter == 0) ok = false; + iter = OSCollectionIterator::withCollection(_ofDict); + if (iter == 0) { + ok = false; + } - while (ok) { - tmpSymbol = OSDynamicCast(OSSymbol, iter->getNextObject()); - if (tmpSymbol == 0) break; + while (ok) { + tmpSymbol = OSDynamicCast(OSSymbol, iter->getNextObject()); + if (tmpSymbol == 0) { + break; + } - // Don't save 'aapl,panic-info'. - if (tmpSymbol->isEqualTo(kIODTNVRAMPanicInfoKey)) continue; + // Don't save 'aapl,panic-info'. + if (tmpSymbol->isEqualTo(kIODTNVRAMPanicInfoKey)) { + continue; + } - tmpObject = _ofDict->getObject(tmpSymbol); + tmpObject = _ofDict->getObject(tmpSymbol); - length = maxLength; - ok = convertObjectToProp(tmpBuffer, &length, tmpSymbol, tmpObject); - if (ok) { - tmpBuffer += length; - maxLength -= length; - } - } - iter->release(); + length = maxLength; + ok = convertObjectToProp(tmpBuffer, &length, tmpSymbol, tmpObject); + if (ok) { + tmpBuffer += length; + maxLength -= length; + } + } + iter->release(); - if (ok) { - bcopy(buffer, _ofImage, _ofPartitionSize); - } + if (ok) { + bcopy(buffer, _ofImage, _ofPartitionSize); + } - IODelete(buffer, UInt8, _ofPartitionSize); + IODelete(buffer, UInt8, _ofPartitionSize); - if (!ok) return kIOReturnBadArgument; + if (!ok) { + return kIOReturnBadArgument; + } - if (_nvramController != 0) { - return _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); - } + if (_nvramController != 0) { + return _nvramController->write(0, _nvramImage, kIODTNVRAMImageSize); + } - return kIOReturnNotReady; + return kIOReturnNotReady; } struct OFVariable { - const char *variableName; - UInt32 variableType; - UInt32 variablePerm; - SInt32 variableOffset; + const char *variableName; + UInt32 variableType; + UInt32 variablePerm; + SInt32 variableOffset; }; typedef struct OFVariable OFVariable; enum { - kOWVariableOffsetNumber = 8, - kOWVariableOffsetString = 17 + kOWVariableOffsetNumber = 8, + kOWVariableOffsetString = 17 }; static const OFVariable gOFVariables[] = { - {"little-endian?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 0}, - {"real-mode?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 1}, - {"auto-boot?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 2}, - {"diag-switch?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 3}, - {"fcode-debug?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 4}, - {"oem-banner?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 5}, - {"oem-logo?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 6}, - {"use-nvramrc?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 7}, - {"use-generic?", kOFVariableTypeBoolean, kOFVariablePermUserRead, -1}, - {"default-mac-address?", kOFVariableTypeBoolean, kOFVariablePermUserRead,-1}, - {"real-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 8}, - {"real-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 9}, - {"virt-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 10}, - {"virt-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 11}, - {"load-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 12}, - {"pci-probe-list", kOFVariableTypeNumber, kOFVariablePermUserRead, 13}, - {"pci-probe-mask", kOFVariableTypeNumber, kOFVariablePermUserRead, -1}, - {"screen-#columns", kOFVariableTypeNumber, kOFVariablePermUserRead, 14}, - {"screen-#rows", kOFVariableTypeNumber, kOFVariablePermUserRead, 15}, - {"selftest-#megs", kOFVariableTypeNumber, kOFVariablePermUserRead, 16}, - {"boot-device", kOFVariableTypeString, kOFVariablePermUserRead, 17}, - {"boot-file", kOFVariableTypeString, kOFVariablePermUserRead, 18}, - {"boot-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"console-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"diag-device", kOFVariableTypeString, kOFVariablePermUserRead, 19}, - {"diag-file", kOFVariableTypeString, kOFVariablePermUserRead, 20}, - {"input-device", kOFVariableTypeString, kOFVariablePermUserRead, 21}, - {"output-device", kOFVariableTypeString, kOFVariablePermUserRead, 22}, - {"input-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"output-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"mouse-device", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"oem-banner", kOFVariableTypeString, kOFVariablePermUserRead, 23}, - {"oem-logo", kOFVariableTypeString, kOFVariablePermUserRead, 24}, - {"nvramrc", kOFVariableTypeString, kOFVariablePermUserRead, 25}, - {"boot-command", kOFVariableTypeString, kOFVariablePermUserRead, 26}, - {"default-client-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-server-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-gateway-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-subnet-mask", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"default-router-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"boot-script", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"boot-args", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"aapl,pci", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, - {"security-mode", kOFVariableTypeString, kOFVariablePermUserRead, -1}, - {"security-password", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, - {"boot-image", kOFVariableTypeData, kOFVariablePermUserWrite, -1}, - {"com.apple.System.fp-state", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, + {"little-endian?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 0}, + {"real-mode?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 1}, + {"auto-boot?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 2}, + {"diag-switch?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 3}, + {"fcode-debug?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 4}, + {"oem-banner?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 5}, + {"oem-logo?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 6}, + {"use-nvramrc?", kOFVariableTypeBoolean, kOFVariablePermUserRead, 7}, + {"use-generic?", kOFVariableTypeBoolean, kOFVariablePermUserRead, -1}, + {"default-mac-address?", kOFVariableTypeBoolean, kOFVariablePermUserRead, -1}, + {"real-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 8}, + {"real-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 9}, + {"virt-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 10}, + {"virt-size", kOFVariableTypeNumber, kOFVariablePermUserRead, 11}, + {"load-base", kOFVariableTypeNumber, kOFVariablePermUserRead, 12}, + {"pci-probe-list", kOFVariableTypeNumber, kOFVariablePermUserRead, 13}, + {"pci-probe-mask", kOFVariableTypeNumber, kOFVariablePermUserRead, -1}, + {"screen-#columns", kOFVariableTypeNumber, kOFVariablePermUserRead, 14}, + {"screen-#rows", kOFVariableTypeNumber, kOFVariablePermUserRead, 15}, + {"selftest-#megs", kOFVariableTypeNumber, kOFVariablePermUserRead, 16}, + {"boot-device", kOFVariableTypeString, kOFVariablePermUserRead, 17}, + {"boot-file", kOFVariableTypeString, kOFVariablePermUserRead, 18}, + {"boot-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"console-screen", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"diag-device", kOFVariableTypeString, kOFVariablePermUserRead, 19}, + {"diag-file", kOFVariableTypeString, kOFVariablePermUserRead, 20}, + {"input-device", kOFVariableTypeString, kOFVariablePermUserRead, 21}, + {"output-device", kOFVariableTypeString, kOFVariablePermUserRead, 22}, + {"input-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"output-device-1", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"mouse-device", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"oem-banner", kOFVariableTypeString, kOFVariablePermUserRead, 23}, + {"oem-logo", kOFVariableTypeString, kOFVariablePermUserRead, 24}, + {"nvramrc", kOFVariableTypeString, kOFVariablePermUserRead, 25}, + {"boot-command", kOFVariableTypeString, kOFVariablePermUserRead, 26}, + {"default-client-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-server-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-gateway-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-subnet-mask", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"default-router-ip", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"boot-script", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"boot-args", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"aapl,pci", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, + {"security-mode", kOFVariableTypeString, kOFVariablePermUserRead, -1}, + {"security-password", kOFVariableTypeData, kOFVariablePermRootOnly, -1}, + {"boot-image", kOFVariableTypeData, kOFVariablePermUserWrite, -1}, + {"com.apple.System.fp-state", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, #if CONFIG_EMBEDDED - {"backlight-level", kOFVariableTypeData, kOFVariablePermUserWrite, -1}, - {"com.apple.System.sep.art", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, - {"com.apple.System.boot-nonce", kOFVariableTypeString, kOFVariablePermKernelOnly, -1}, - {"darkboot", kOFVariableTypeBoolean, kOFVariablePermUserWrite, -1}, - {"acc-mb-ld-lifetime", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, - {"acc-cm-override-charger-count", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, - {"acc-cm-override-count", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, - {"enter-tdm-mode", kOFVariableTypeBoolean, kOFVariablePermUserWrite, -1}, + {"backlight-level", kOFVariableTypeData, kOFVariablePermUserWrite, -1}, + {"com.apple.System.sep.art", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, + {"com.apple.System.boot-nonce", kOFVariableTypeString, kOFVariablePermKernelOnly, -1}, + {"darkboot", kOFVariableTypeBoolean, kOFVariablePermUserWrite, -1}, + {"acc-mb-ld-lifetime", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, + {"acc-cm-override-charger-count", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, + {"acc-cm-override-count", kOFVariableTypeNumber, kOFVariablePermKernelOnly, -1}, + {"enter-tdm-mode", kOFVariableTypeBoolean, kOFVariablePermUserWrite, -1}, + {"nonce-seeds", kOFVariableTypeData, kOFVariablePermKernelOnly, -1}, #endif - {0, kOFVariableTypeData, kOFVariablePermUserRead, -1} + {0, kOFVariableTypeData, kOFVariablePermUserRead, -1} }; -UInt32 IODTNVRAM::getOFVariableType(const OSSymbol *propSymbol) const +UInt32 +IODTNVRAM::getOFVariableType(const OSSymbol *propSymbol) const { - const OFVariable *ofVar; - - ofVar = gOFVariables; - while (1) { - if ((ofVar->variableName == 0) || - propSymbol->isEqualTo(ofVar->variableName)) break; - ofVar++; - } - - return ofVar->variableType; + const OFVariable *ofVar; + + ofVar = gOFVariables; + while (1) { + if ((ofVar->variableName == 0) || + propSymbol->isEqualTo(ofVar->variableName)) { + break; + } + ofVar++; + } + + return ofVar->variableType; } -UInt32 IODTNVRAM::getOFVariablePerm(const OSSymbol *propSymbol) const +UInt32 +IODTNVRAM::getOFVariablePerm(const OSSymbol *propSymbol) const { - const OFVariable *ofVar; - - ofVar = gOFVariables; - while (1) { - if ((ofVar->variableName == 0) || - propSymbol->isEqualTo(ofVar->variableName)) break; - ofVar++; - } - - return ofVar->variablePerm; + const OFVariable *ofVar; + + ofVar = gOFVariables; + while (1) { + if ((ofVar->variableName == 0) || + propSymbol->isEqualTo(ofVar->variableName)) { + break; + } + ofVar++; + } + + return ofVar->variablePerm; } -bool IODTNVRAM::getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, - UInt32 *propType, UInt32 *propOffset) +bool +IODTNVRAM::getOWVariableInfo(UInt32 variableNumber, const OSSymbol **propSymbol, + UInt32 *propType, UInt32 *propOffset) { - const OFVariable *ofVar; - - ofVar = gOFVariables; - while (1) { - if (ofVar->variableName == 0) return false; - - if (ofVar->variableOffset == (SInt32) variableNumber) break; - - ofVar++; - } - - *propSymbol = OSSymbol::withCStringNoCopy(ofVar->variableName); - *propType = ofVar->variableType; - - switch (*propType) { - case kOFVariableTypeBoolean : - *propOffset = 1 << (31 - variableNumber); - break; - - case kOFVariableTypeNumber : - *propOffset = variableNumber - kOWVariableOffsetNumber; - break; - - case kOFVariableTypeString : - *propOffset = variableNumber - kOWVariableOffsetString; - break; - } - - return true; + const OFVariable *ofVar; + + ofVar = gOFVariables; + while (1) { + if (ofVar->variableName == 0) { + return false; + } + + if (ofVar->variableOffset == (SInt32) variableNumber) { + break; + } + + ofVar++; + } + + *propSymbol = OSSymbol::withCStringNoCopy(ofVar->variableName); + *propType = ofVar->variableType; + + switch (*propType) { + case kOFVariableTypeBoolean: + *propOffset = 1 << (31 - variableNumber); + break; + + case kOFVariableTypeNumber: + *propOffset = variableNumber - kOWVariableOffsetNumber; + break; + + case kOFVariableTypeString: + *propOffset = variableNumber - kOWVariableOffsetString; + break; + } + + return true; } -bool IODTNVRAM::convertPropToObject(UInt8 *propName, UInt32 propNameLength, - UInt8 *propData, UInt32 propDataLength, - const OSSymbol **propSymbol, - OSObject **propObject) +bool +IODTNVRAM::convertPropToObject(UInt8 *propName, UInt32 propNameLength, + UInt8 *propData, UInt32 propDataLength, + const OSSymbol **propSymbol, + OSObject **propObject) { - UInt32 propType; - const OSSymbol *tmpSymbol; - OSObject *tmpObject; - OSNumber *tmpNumber; - OSString *tmpString; - - // Create the symbol. - propName[propNameLength] = '\0'; - tmpSymbol = OSSymbol::withCString((const char *)propName); - propName[propNameLength] = '='; - if (tmpSymbol == 0) { - return false; - } - - propType = getOFVariableType(tmpSymbol); - - // Create the object. - tmpObject = 0; - switch (propType) { - case kOFVariableTypeBoolean : - if (!strncmp("true", (const char *)propData, propDataLength)) { - tmpObject = kOSBooleanTrue; - } else if (!strncmp("false", (const char *)propData, propDataLength)) { - tmpObject = kOSBooleanFalse; - } - break; - - case kOFVariableTypeNumber : - tmpNumber = OSNumber::withNumber(strtol((const char *)propData, 0, 0), 32); - if (tmpNumber != 0) tmpObject = tmpNumber; - break; - - case kOFVariableTypeString : - tmpString = OSString::withCString((const char *)propData); - if (tmpString != 0) tmpObject = tmpString; - break; - - case kOFVariableTypeData : - tmpObject = unescapeBytesToData(propData, propDataLength); - break; - } - - if (tmpObject == 0) { - tmpSymbol->release(); - return false; - } - - *propSymbol = tmpSymbol; - *propObject = tmpObject; - - return true; + UInt32 propType; + const OSSymbol *tmpSymbol; + OSObject *tmpObject; + OSNumber *tmpNumber; + OSString *tmpString; + + // Create the symbol. + propName[propNameLength] = '\0'; + tmpSymbol = OSSymbol::withCString((const char *)propName); + propName[propNameLength] = '='; + if (tmpSymbol == 0) { + return false; + } + + propType = getOFVariableType(tmpSymbol); + + // Create the object. + tmpObject = 0; + switch (propType) { + case kOFVariableTypeBoolean: + if (!strncmp("true", (const char *)propData, propDataLength)) { + tmpObject = kOSBooleanTrue; + } else if (!strncmp("false", (const char *)propData, propDataLength)) { + tmpObject = kOSBooleanFalse; + } + break; + + case kOFVariableTypeNumber: + tmpNumber = OSNumber::withNumber(strtol((const char *)propData, 0, 0), 32); + if (tmpNumber != 0) { + tmpObject = tmpNumber; + } + break; + + case kOFVariableTypeString: + tmpString = OSString::withCString((const char *)propData); + if (tmpString != 0) { + tmpObject = tmpString; + } + break; + + case kOFVariableTypeData: + tmpObject = unescapeBytesToData(propData, propDataLength); + break; + } + + if (tmpObject == 0) { + tmpSymbol->release(); + return false; + } + + *propSymbol = tmpSymbol; + *propObject = tmpObject; + + return true; } -bool IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, - const OSSymbol *propSymbol, OSObject *propObject) +bool +IODTNVRAM::convertObjectToProp(UInt8 *buffer, UInt32 *length, + const OSSymbol *propSymbol, OSObject *propObject) { - const UInt8 *propName; - UInt32 propNameLength, propDataLength, remaining; - UInt32 propType, tmpValue; - OSBoolean *tmpBoolean = 0; - OSNumber *tmpNumber = 0; - OSString *tmpString = 0; - OSData *tmpData = 0; - - propName = (const UInt8 *)propSymbol->getCStringNoCopy(); - propNameLength = propSymbol->getLength(); - propType = getOFVariableType(propSymbol); - - // Get the size of the data. - propDataLength = 0xFFFFFFFF; - switch (propType) { - case kOFVariableTypeBoolean : - tmpBoolean = OSDynamicCast(OSBoolean, propObject); - if (tmpBoolean != 0) propDataLength = 5; - break; - - case kOFVariableTypeNumber : - tmpNumber = OSDynamicCast(OSNumber, propObject); - if (tmpNumber != 0) propDataLength = 10; - break; - - case kOFVariableTypeString : - tmpString = OSDynamicCast(OSString, propObject); - if (tmpString != 0) propDataLength = tmpString->getLength(); - break; - - case kOFVariableTypeData : - tmpData = OSDynamicCast(OSData, propObject); - if (tmpData != 0) { - tmpData = escapeDataToData(tmpData); - propDataLength = tmpData->getLength(); - } - break; - } - - // Make sure the propertySize is known and will fit. - if (propDataLength == 0xFFFFFFFF) return false; - if ((propNameLength + propDataLength + 2) > *length) return false; - - // Copy the property name equal sign. - buffer += snprintf((char *)buffer, *length, "%s=", propName); - remaining = *length - propNameLength - 1; - - switch (propType) { - case kOFVariableTypeBoolean : - if (tmpBoolean->getValue()) { - strlcpy((char *)buffer, "true", remaining); - } else { - strlcpy((char *)buffer, "false", remaining); - } - break; - - case kOFVariableTypeNumber : - tmpValue = tmpNumber->unsigned32BitValue(); - if (tmpValue == 0xFFFFFFFF) { - strlcpy((char *)buffer, "-1", remaining); - } else if (tmpValue < 1000) { - snprintf((char *)buffer, remaining, "%d", (uint32_t)tmpValue); - } else { - snprintf((char *)buffer, remaining, "0x%x", (uint32_t)tmpValue); - } - break; - - case kOFVariableTypeString : - strlcpy((char *)buffer, tmpString->getCStringNoCopy(), remaining); - break; - - case kOFVariableTypeData : - bcopy(tmpData->getBytesNoCopy(), buffer, propDataLength); - tmpData->release(); - break; - } - - propDataLength = strlen((const char *)buffer); - - *length = propNameLength + propDataLength + 2; - - return true; + const UInt8 *propName; + UInt32 propNameLength, propDataLength, remaining; + UInt32 propType, tmpValue; + OSBoolean *tmpBoolean = 0; + OSNumber *tmpNumber = 0; + OSString *tmpString = 0; + OSData *tmpData = 0; + + propName = (const UInt8 *)propSymbol->getCStringNoCopy(); + propNameLength = propSymbol->getLength(); + propType = getOFVariableType(propSymbol); + + // Get the size of the data. + propDataLength = 0xFFFFFFFF; + switch (propType) { + case kOFVariableTypeBoolean: + tmpBoolean = OSDynamicCast(OSBoolean, propObject); + if (tmpBoolean != 0) { + propDataLength = 5; + } + break; + + case kOFVariableTypeNumber: + tmpNumber = OSDynamicCast(OSNumber, propObject); + if (tmpNumber != 0) { + propDataLength = 10; + } + break; + + case kOFVariableTypeString: + tmpString = OSDynamicCast(OSString, propObject); + if (tmpString != 0) { + propDataLength = tmpString->getLength(); + } + break; + + case kOFVariableTypeData: + tmpData = OSDynamicCast(OSData, propObject); + if (tmpData != 0) { + tmpData = escapeDataToData(tmpData); + propDataLength = tmpData->getLength(); + } + break; + } + + // Make sure the propertySize is known and will fit. + if (propDataLength == 0xFFFFFFFF) { + return false; + } + if ((propNameLength + propDataLength + 2) > *length) { + return false; + } + + // Copy the property name equal sign. + buffer += snprintf((char *)buffer, *length, "%s=", propName); + remaining = *length - propNameLength - 1; + + switch (propType) { + case kOFVariableTypeBoolean: + if (tmpBoolean->getValue()) { + strlcpy((char *)buffer, "true", remaining); + } else { + strlcpy((char *)buffer, "false", remaining); + } + break; + + case kOFVariableTypeNumber: + tmpValue = tmpNumber->unsigned32BitValue(); + if (tmpValue == 0xFFFFFFFF) { + strlcpy((char *)buffer, "-1", remaining); + } else if (tmpValue < 1000) { + snprintf((char *)buffer, remaining, "%d", (uint32_t)tmpValue); + } else { + snprintf((char *)buffer, remaining, "0x%x", (uint32_t)tmpValue); + } + break; + + case kOFVariableTypeString: + strlcpy((char *)buffer, tmpString->getCStringNoCopy(), remaining); + break; + + case kOFVariableTypeData: + bcopy(tmpData->getBytesNoCopy(), buffer, propDataLength); + tmpData->release(); + break; + } + + propDataLength = strlen((const char *)buffer); + + *length = propNameLength + propDataLength + 2; + + return true; } -UInt16 IODTNVRAM::generateOWChecksum(UInt8 *buffer) +UInt16 +IODTNVRAM::generateOWChecksum(UInt8 *buffer) { - UInt32 cnt, checksum = 0; - UInt16 *tmpBuffer = (UInt16 *)buffer; - - for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) - checksum += tmpBuffer[cnt]; - - return checksum % 0x0000FFFF; + UInt32 cnt, checksum = 0; + UInt16 *tmpBuffer = (UInt16 *)buffer; + + for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) { + checksum += tmpBuffer[cnt]; + } + + return checksum % 0x0000FFFF; } -bool IODTNVRAM::validateOWChecksum(UInt8 *buffer) +bool +IODTNVRAM::validateOWChecksum(UInt8 *buffer) { - UInt32 cnt, checksum, sum = 0; - UInt16 *tmpBuffer = (UInt16 *)buffer; - - for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) - sum += tmpBuffer[cnt]; - - checksum = (sum >> 16) + (sum & 0x0000FFFF); - if (checksum == 0x10000) checksum--; - checksum = (checksum ^ 0x0000FFFF) & 0x0000FFFF; - - return checksum == 0; + UInt32 cnt, checksum, sum = 0; + UInt16 *tmpBuffer = (UInt16 *)buffer; + + for (cnt = 0; cnt < _ofPartitionSize / 2; cnt++) { + sum += tmpBuffer[cnt]; + } + + checksum = (sum >> 16) + (sum & 0x0000FFFF); + if (checksum == 0x10000) { + checksum--; + } + checksum = (checksum ^ 0x0000FFFF) & 0x0000FFFF; + + return checksum == 0; } -void IODTNVRAM::updateOWBootArgs(const OSSymbol *key, OSObject *value) +void +IODTNVRAM::updateOWBootArgs(const OSSymbol *key, OSObject *value) { - bool wasBootArgs, bootr = false; - UInt32 cnt; - OSString *tmpString, *bootCommand, *bootArgs = 0; - const UInt8 *bootCommandData, *bootArgsData; - UInt8 *tmpData; - UInt32 bootCommandDataLength, bootArgsDataLength, tmpDataLength; - - tmpString = OSDynamicCast(OSString, value); - if (tmpString == 0) return; - - if (key->isEqualTo("boot-command")) { - wasBootArgs = false; - bootCommand = tmpString; - } else if (key->isEqualTo("boot-args")) { - wasBootArgs = true; - bootArgs = tmpString; - bootCommand = OSDynamicCast(OSString, _ofDict->getObject("boot-command")); - if (bootCommand == 0) return; - } else return; - - bootCommandData = (const UInt8 *)bootCommand->getCStringNoCopy(); - bootCommandDataLength = bootCommand->getLength(); - - if (bootCommandData == 0) return; - - for (cnt = 0; cnt < bootCommandDataLength; cnt++) { - if ((bootCommandData[cnt] == 'b') && - !strncmp("bootr", (const char *)bootCommandData + cnt, 5)) { - cnt += 5; - while (bootCommandData[cnt] == ' ') cnt++; - bootr = true; - break; - } - } - if (!bootr) { - _ofDict->removeObject("boot-args"); - return; - } - - if (wasBootArgs) { - bootArgsData = (const UInt8 *)bootArgs->getCStringNoCopy(); - bootArgsDataLength = bootArgs->getLength(); - if (bootArgsData == 0) return; - - tmpDataLength = cnt + bootArgsDataLength; - tmpData = IONew(UInt8, tmpDataLength + 1); - if (tmpData == 0) return; - - cnt -= strlcpy((char *)tmpData, (const char *)bootCommandData, cnt); - strlcat((char *)tmpData, (const char *)bootArgsData, cnt); - - bootCommand = OSString::withCString((const char *)tmpData); - if (bootCommand != 0) { - _ofDict->setObject("boot-command", bootCommand); - bootCommand->release(); - } - - IODelete(tmpData, UInt8, tmpDataLength + 1); - } else { - bootArgs = OSString::withCString((const char *)(bootCommandData + cnt)); - if (bootArgs != 0) { - _ofDict->setObject("boot-args", bootArgs); - bootArgs->release(); - } - } + bool wasBootArgs, bootr = false; + UInt32 cnt; + OSString *tmpString, *bootCommand, *bootArgs = 0; + const UInt8 *bootCommandData, *bootArgsData; + UInt8 *tmpData; + UInt32 bootCommandDataLength, bootArgsDataLength, tmpDataLength; + + tmpString = OSDynamicCast(OSString, value); + if (tmpString == 0) { + return; + } + + if (key->isEqualTo("boot-command")) { + wasBootArgs = false; + bootCommand = tmpString; + } else if (key->isEqualTo("boot-args")) { + wasBootArgs = true; + bootArgs = tmpString; + bootCommand = OSDynamicCast(OSString, _ofDict->getObject("boot-command")); + if (bootCommand == 0) { + return; + } + } else { + return; + } + + bootCommandData = (const UInt8 *)bootCommand->getCStringNoCopy(); + bootCommandDataLength = bootCommand->getLength(); + + if (bootCommandData == 0) { + return; + } + + for (cnt = 0; cnt < bootCommandDataLength; cnt++) { + if ((bootCommandData[cnt] == 'b') && + !strncmp("bootr", (const char *)bootCommandData + cnt, 5)) { + cnt += 5; + while (bootCommandData[cnt] == ' ') { + cnt++; + } + bootr = true; + break; + } + } + if (!bootr) { + _ofDict->removeObject("boot-args"); + return; + } + + if (wasBootArgs) { + bootArgsData = (const UInt8 *)bootArgs->getCStringNoCopy(); + bootArgsDataLength = bootArgs->getLength(); + if (bootArgsData == 0) { + return; + } + + tmpDataLength = cnt + bootArgsDataLength; + tmpData = IONew(UInt8, tmpDataLength + 1); + if (tmpData == 0) { + return; + } + + cnt -= strlcpy((char *)tmpData, (const char *)bootCommandData, cnt); + strlcat((char *)tmpData, (const char *)bootArgsData, cnt); + + bootCommand = OSString::withCString((const char *)tmpData); + if (bootCommand != 0) { + _ofDict->setObject("boot-command", bootCommand); + bootCommand->release(); + } + + IODelete(tmpData, UInt8, tmpDataLength + 1); + } else { + bootArgs = OSString::withCString((const char *)(bootCommandData + cnt)); + if (bootArgs != 0) { + _ofDict->setObject("boot-args", bootArgs); + bootArgs->release(); + } + } } -bool IODTNVRAM::searchNVRAMProperty(IONVRAMDescriptor *hdr, UInt32 *where) +bool +IODTNVRAM::searchNVRAMProperty(IONVRAMDescriptor *hdr, UInt32 *where) { - return false; + return false; } -IOReturn IODTNVRAM::readNVRAMPropertyType0(IORegistryEntry *entry, - const OSSymbol **name, - OSData **value) +IOReturn +IODTNVRAM::readNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IODTNVRAM::writeNVRAMPropertyType0(IORegistryEntry *entry, - const OSSymbol *name, - OSData *value) +IOReturn +IODTNVRAM::writeNVRAMPropertyType0(IORegistryEntry *entry, + const OSSymbol *name, + OSData *value) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -OSData *IODTNVRAM::unescapeBytesToData(const UInt8 *bytes, UInt32 length) +OSData * +IODTNVRAM::unescapeBytesToData(const UInt8 *bytes, UInt32 length) { - OSData *data = 0; - UInt32 totalLength = 0; - UInt32 cnt, cnt2; - UInt8 byte; - bool ok; - - // Calculate the actual length of the data. - ok = true; - totalLength = 0; - for (cnt = 0; cnt < length;) { - byte = bytes[cnt++]; - if (byte == 0xFF) { - byte = bytes[cnt++]; - if (byte == 0x00) { - ok = false; - break; - } - cnt2 = byte & 0x7F; - } else - cnt2 = 1; - totalLength += cnt2; - } - - if (ok) { - // Create an empty OSData of the correct size. - data = OSData::withCapacity(totalLength); - if (data != 0) { - for (cnt = 0; cnt < length;) { - byte = bytes[cnt++]; - if (byte == 0xFF) { - byte = bytes[cnt++]; - cnt2 = byte & 0x7F; - byte = (byte & 0x80) ? 0xFF : 0x00; - } else - cnt2 = 1; - data->appendByte(byte, cnt2); - } - } - } - - return data; + OSData *data = 0; + UInt32 totalLength = 0; + UInt32 cnt, cnt2; + UInt8 byte; + bool ok; + + // Calculate the actual length of the data. + ok = true; + totalLength = 0; + for (cnt = 0; cnt < length;) { + byte = bytes[cnt++]; + if (byte == 0xFF) { + byte = bytes[cnt++]; + if (byte == 0x00) { + ok = false; + break; + } + cnt2 = byte & 0x7F; + } else { + cnt2 = 1; + } + totalLength += cnt2; + } + + if (ok) { + // Create an empty OSData of the correct size. + data = OSData::withCapacity(totalLength); + if (data != 0) { + for (cnt = 0; cnt < length;) { + byte = bytes[cnt++]; + if (byte == 0xFF) { + byte = bytes[cnt++]; + cnt2 = byte & 0x7F; + byte = (byte & 0x80) ? 0xFF : 0x00; + } else { + cnt2 = 1; + } + data->appendByte(byte, cnt2); + } + } + } + + return data; } -OSData * IODTNVRAM::escapeDataToData(OSData * value) +OSData * +IODTNVRAM::escapeDataToData(OSData * value) { - OSData * result; - const UInt8 * startPtr; - const UInt8 * endPtr; - const UInt8 * wherePtr; - UInt8 byte; - bool ok = true; - - wherePtr = (const UInt8 *) value->getBytesNoCopy(); - endPtr = wherePtr + value->getLength(); - - result = OSData::withCapacity(endPtr - wherePtr); - if (!result) - return result; - - while (wherePtr < endPtr) { - startPtr = wherePtr; - byte = *wherePtr++; - if ((byte == 0x00) || (byte == 0xFF)) { - for (; - ((wherePtr - startPtr) < 0x80) && (wherePtr < endPtr) && (byte == *wherePtr); - wherePtr++) {} - ok &= result->appendByte(0xff, 1); - byte = (byte & 0x80) | (wherePtr - startPtr); - } - ok &= result->appendByte(byte, 1); - } - ok &= result->appendByte(0, 1); - - if (!ok) { - result->release(); - result = 0; - } - - return result; + OSData * result; + const UInt8 * startPtr; + const UInt8 * endPtr; + const UInt8 * wherePtr; + UInt8 byte; + bool ok = true; + + wherePtr = (const UInt8 *) value->getBytesNoCopy(); + endPtr = wherePtr + value->getLength(); + + result = OSData::withCapacity(endPtr - wherePtr); + if (!result) { + return result; + } + + while (wherePtr < endPtr) { + startPtr = wherePtr; + byte = *wherePtr++; + if ((byte == 0x00) || (byte == 0xFF)) { + for (; + ((wherePtr - startPtr) < 0x80) && (wherePtr < endPtr) && (byte == *wherePtr); + wherePtr++) { + } + ok &= result->appendByte(0xff, 1); + byte = (byte & 0x80) | (wherePtr - startPtr); + } + ok &= result->appendByte(byte, 1); + } + ok &= result->appendByte(0, 1); + + if (!ok) { + result->release(); + result = 0; + } + + return result; } -static bool IsApplePropertyName(const char * propName) +static bool +IsApplePropertyName(const char * propName) { - char c; - while ((c = *propName++)) { - if ((c >= 'A') && (c <= 'Z')) - break; - } + char c; + while ((c = *propName++)) { + if ((c >= 'A') && (c <= 'Z')) { + break; + } + } - return (c == 0); + return c == 0; } -IOReturn IODTNVRAM::readNVRAMPropertyType1(IORegistryEntry *entry, - const OSSymbol **name, - OSData **value) +IOReturn +IODTNVRAM::readNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol **name, + OSData **value) { - IOReturn err = kIOReturnNoResources; - OSData *data; - const UInt8 *startPtr; - const UInt8 *endPtr; - const UInt8 *wherePtr; - const UInt8 *nvPath = 0; - const char *nvName = 0; - const char *resultName = 0; - const UInt8 *resultValue = 0; - UInt32 resultValueLen = 0; - UInt8 byte; - - if (_ofDict == 0) return err; - - IOLockLock(_ofLock); - data = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); - IOLockUnlock(_ofLock); - - if (data == 0) return err; - - startPtr = (const UInt8 *) data->getBytesNoCopy(); - endPtr = startPtr + data->getLength(); - - wherePtr = startPtr; - while (wherePtr < endPtr) { - byte = *(wherePtr++); - if (byte) - continue; - - if (nvPath == 0) - nvPath = startPtr; - else if (nvName == 0) - nvName = (const char *) startPtr; - else { - IORegistryEntry * compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); - if (compareEntry) - compareEntry->release(); - if (entry == compareEntry) { - bool appleProp = IsApplePropertyName(nvName); - if (!appleProp || !resultName) { - resultName = nvName; - resultValue = startPtr; - resultValueLen = wherePtr - startPtr - 1; - } - if (!appleProp) - break; - } - nvPath = 0; - nvName = 0; - } - startPtr = wherePtr; - } - if (resultName) { - *name = OSSymbol::withCString(resultName); - *value = unescapeBytesToData(resultValue, resultValueLen); - if ((*name != 0) && (*value != 0)) - err = kIOReturnSuccess; - else - err = kIOReturnNoMemory; - } - return err; + IOReturn err = kIOReturnNoResources; + OSData *data; + const UInt8 *startPtr; + const UInt8 *endPtr; + const UInt8 *wherePtr; + const UInt8 *nvPath = 0; + const char *nvName = 0; + const char *resultName = 0; + const UInt8 *resultValue = 0; + UInt32 resultValueLen = 0; + UInt8 byte; + + if (_ofDict == 0) { + return err; + } + + IOLockLock(_ofLock); + data = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); + IOLockUnlock(_ofLock); + + if (data == 0) { + return err; + } + + startPtr = (const UInt8 *) data->getBytesNoCopy(); + endPtr = startPtr + data->getLength(); + + wherePtr = startPtr; + while (wherePtr < endPtr) { + byte = *(wherePtr++); + if (byte) { + continue; + } + + if (nvPath == 0) { + nvPath = startPtr; + } else if (nvName == 0) { + nvName = (const char *) startPtr; + } else { + IORegistryEntry * compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); + if (compareEntry) { + compareEntry->release(); + } + if (entry == compareEntry) { + bool appleProp = IsApplePropertyName(nvName); + if (!appleProp || !resultName) { + resultName = nvName; + resultValue = startPtr; + resultValueLen = wherePtr - startPtr - 1; + } + if (!appleProp) { + break; + } + } + nvPath = 0; + nvName = 0; + } + startPtr = wherePtr; + } + if (resultName) { + *name = OSSymbol::withCString(resultName); + *value = unescapeBytesToData(resultValue, resultValueLen); + if ((*name != 0) && (*value != 0)) { + err = kIOReturnSuccess; + } else { + err = kIOReturnNoMemory; + } + } + return err; } -IOReturn IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, - const OSSymbol *propName, - OSData *value) +IOReturn +IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, + const OSSymbol *propName, + OSData *value) { - OSData *oldData, *escapedData; - OSData *data = 0; - const UInt8 *startPtr; - const UInt8 *propStart; - const UInt8 *endPtr; - const UInt8 *wherePtr; - const UInt8 *nvPath = 0; - const char *nvName = 0; - const char * comp; - const char * name; - UInt8 byte; - bool ok = true; - bool settingAppleProp; - - if (_ofDict == 0) return kIOReturnNoResources; - - settingAppleProp = IsApplePropertyName(propName->getCStringNoCopy()); - - // copy over existing properties for other entries - - IOLockLock(_ofLock); - - oldData = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); - if (oldData) { - - startPtr = (const UInt8 *) oldData->getBytesNoCopy(); - endPtr = startPtr + oldData->getLength(); - - propStart = startPtr; - wherePtr = startPtr; - while (wherePtr < endPtr) { - byte = *(wherePtr++); - if (byte) - continue; - if (nvPath == 0) - nvPath = startPtr; - else if (nvName == 0) - nvName = (const char *) startPtr; - else { - IORegistryEntry * compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); - if (compareEntry) - compareEntry->release(); - if (entry == compareEntry) { - if ((settingAppleProp && propName->isEqualTo(nvName)) - || (!settingAppleProp && !IsApplePropertyName(nvName))) { - // delete old property (nvPath -> wherePtr) - data = OSData::withBytes(propStart, nvPath - propStart); - if (data) - ok &= data->appendBytes(wherePtr, endPtr - wherePtr); - break; - } - } - nvPath = 0; - nvName = 0; - } - - startPtr = wherePtr; - } - } - - // make the new property - - if (!data) { - if (oldData) - data = OSData::withData(oldData); - else - data = OSData::withCapacity(16); - if (!data) ok = false; - } - - if (ok && value && value->getLength()) do { - // get entries in path - OSArray *array = OSArray::withCapacity(5); - if (!array) { - ok = false; - break; - } - do - array->setObject(entry); - while ((entry = entry->getParentEntry(gIODTPlane))); - - // append path - for (int i = array->getCount() - 3; - (entry = (IORegistryEntry *) array->getObject(i)); - i--) { - - name = entry->getName(gIODTPlane); - comp = entry->getLocation(gIODTPlane); - if (comp) ok &= data->appendBytes("/@", 2); - else { - if (!name) continue; - ok &= data->appendByte('/', 1); - comp = name; - } - ok &= data->appendBytes(comp, strlen(comp)); - } - ok &= data->appendByte(0, 1); - array->release(); - - // append prop name - ok &= data->appendBytes(propName->getCStringNoCopy(), propName->getLength() + 1); - - // append escaped data - escapedData = escapeDataToData(value); - ok &= (escapedData != 0); - if (ok) ok &= data->appendBytes(escapedData); - - } while (false); - - oldData->retain(); - if (ok) { - ok = _ofDict->setObject(_registryPropertiesKey, data); - } - - if (data) data->release(); - - if (ok) { - if (syncVariables() != kIOReturnSuccess) { - if (oldData) { - _ofDict->setObject(_registryPropertiesKey, oldData); - } - else { - _ofDict->removeObject(_registryPropertiesKey); - } - (void) syncVariables(); - ok = false; - } - } - - if (oldData) { - oldData->release(); - } - - IOLockUnlock(_ofLock); - - return ok ? kIOReturnSuccess : kIOReturnNoMemory; + OSData *oldData, *escapedData; + OSData *data = 0; + const UInt8 *startPtr; + const UInt8 *propStart; + const UInt8 *endPtr; + const UInt8 *wherePtr; + const UInt8 *nvPath = 0; + const char *nvName = 0; + const char * comp; + const char * name; + UInt8 byte; + bool ok = true; + bool settingAppleProp; + + if (_ofDict == 0) { + return kIOReturnNoResources; + } + + settingAppleProp = IsApplePropertyName(propName->getCStringNoCopy()); + + // copy over existing properties for other entries + + IOLockLock(_ofLock); + + oldData = OSDynamicCast(OSData, _ofDict->getObject(_registryPropertiesKey)); + if (oldData) { + startPtr = (const UInt8 *) oldData->getBytesNoCopy(); + endPtr = startPtr + oldData->getLength(); + + propStart = startPtr; + wherePtr = startPtr; + while (wherePtr < endPtr) { + byte = *(wherePtr++); + if (byte) { + continue; + } + if (nvPath == 0) { + nvPath = startPtr; + } else if (nvName == 0) { + nvName = (const char *) startPtr; + } else { + IORegistryEntry * compareEntry = IORegistryEntry::fromPath((const char *) nvPath, gIODTPlane); + if (compareEntry) { + compareEntry->release(); + } + if (entry == compareEntry) { + if ((settingAppleProp && propName->isEqualTo(nvName)) + || (!settingAppleProp && !IsApplePropertyName(nvName))) { + // delete old property (nvPath -> wherePtr) + data = OSData::withBytes(propStart, nvPath - propStart); + if (data) { + ok &= data->appendBytes(wherePtr, endPtr - wherePtr); + } + break; + } + } + nvPath = 0; + nvName = 0; + } + + startPtr = wherePtr; + } + } + + // make the new property + + if (!data) { + if (oldData) { + data = OSData::withData(oldData); + } else { + data = OSData::withCapacity(16); + } + if (!data) { + ok = false; + } + } + + if (ok && value && value->getLength()) { + do { + // get entries in path + OSArray *array = OSArray::withCapacity(5); + if (!array) { + ok = false; + break; + } + do{ + array->setObject(entry); + } while ((entry = entry->getParentEntry(gIODTPlane))); + + // append path + for (int i = array->getCount() - 3; + (entry = (IORegistryEntry *) array->getObject(i)); + i--) { + name = entry->getName(gIODTPlane); + comp = entry->getLocation(gIODTPlane); + if (comp) { + ok &= data->appendBytes("/@", 2); + } else { + if (!name) { + continue; + } + ok &= data->appendByte('/', 1); + comp = name; + } + ok &= data->appendBytes(comp, strlen(comp)); + } + ok &= data->appendByte(0, 1); + array->release(); + + // append prop name + ok &= data->appendBytes(propName->getCStringNoCopy(), propName->getLength() + 1); + + // append escaped data + escapedData = escapeDataToData(value); + ok &= (escapedData != 0); + if (ok) { + ok &= data->appendBytes(escapedData); + } + } while (false); + } + + oldData->retain(); + if (ok) { + ok = _ofDict->setObject(_registryPropertiesKey, data); + } + + if (data) { + data->release(); + } + + if (ok) { + if (syncVariables() != kIOReturnSuccess) { + if (oldData) { + _ofDict->setObject(_registryPropertiesKey, oldData); + } else { + _ofDict->removeObject(_registryPropertiesKey); + } + (void) syncVariables(); + ok = false; + } + } + + if (oldData) { + oldData->release(); + } + + IOLockUnlock(_ofLock); + + return ok ? kIOReturnSuccess : kIOReturnNoMemory; } -bool IODTNVRAM::safeToSync(void) +bool +IODTNVRAM::safeToSync(void) { - AbsoluteTime delta; - UInt64 delta_ns; - SInt32 delta_secs; - + AbsoluteTime delta; + UInt64 delta_ns; + SInt32 delta_secs; + // delta interval went by clock_get_uptime(&delta); - - // Figure it in seconds. - absolutetime_to_nanoseconds(delta, &delta_ns); - delta_secs = (SInt32)(delta_ns / NSEC_PER_SEC); - if ((delta_secs > (_lastDeviceSync + MIN_SYNC_NOW_INTERVAL)) || _freshInterval) - { + // Figure it in seconds. + absolutetime_to_nanoseconds(delta, &delta_ns); + delta_secs = (SInt32)(delta_ns / NSEC_PER_SEC); + + if ((delta_secs > (_lastDeviceSync + MIN_SYNC_NOW_INTERVAL)) || _freshInterval) { _lastDeviceSync = delta_secs; _freshInterval = FALSE; return TRUE; diff --git a/iokit/Kernel/IOPMPowerSource.cpp b/iokit/Kernel/IOPMPowerSource.cpp index 614f4caa3..e89680c08 100644 --- a/iokit/Kernel/IOPMPowerSource.cpp +++ b/iokit/Kernel/IOPMPowerSource.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,80 +44,124 @@ OSDefineMetaClassAndStructors(IOPMPowerSource, IOService) IOPMPowerSource *IOPMPowerSource::powerSource(void) { - IOPMPowerSource *ps = new IOPMPowerSource; + IOPMPowerSource *ps = new IOPMPowerSource; - if(ps) { - ps->init(); - return ps; - } - return NULL; + if (ps) { + ps->init(); + return ps; + } + return NULL; } // ***************************************************************************** // init // // ***************************************************************************** -bool IOPMPowerSource::init (void) -{ - if (!super::init()) { - return false; - } - - nextInList = NULL; - - properties = OSDictionary::withCapacity(10); - if(!properties) return false; - properties->setCapacityIncrement(1); - - externalConnectedKey = OSSymbol::withCString(kIOPMPSExternalConnectedKey); - externalChargeCapableKey = OSSymbol::withCString(kIOPMPSExternalChargeCapableKey); - batteryInstalledKey = OSSymbol::withCString(kIOPMPSBatteryInstalledKey); - chargingKey = OSSymbol::withCString(kIOPMPSIsChargingKey); - warnLevelKey = OSSymbol::withCString(kIOPMPSAtWarnLevelKey); - criticalLevelKey = OSSymbol::withCString(kIOPMPSAtCriticalLevelKey); - currentCapacityKey = OSSymbol::withCString(kIOPMPSCurrentCapacityKey); - maxCapacityKey = OSSymbol::withCString(kIOPMPSMaxCapacityKey); - timeRemainingKey = OSSymbol::withCString(kIOPMPSTimeRemainingKey); - amperageKey = OSSymbol::withCString(kIOPMPSAmperageKey); - voltageKey = OSSymbol::withCString(kIOPMPSVoltageKey); - cycleCountKey = OSSymbol::withCString(kIOPMPSCycleCountKey); - adapterInfoKey = OSSymbol::withCString(kIOPMPSAdapterInfoKey); - locationKey = OSSymbol::withCString(kIOPMPSLocationKey); - errorConditionKey = OSSymbol::withCString(kIOPMPSErrorConditionKey); - manufacturerKey = OSSymbol::withCString(kIOPMPSManufacturerKey); - modelKey = OSSymbol::withCString(kIOPMPSModelKey); - serialKey = OSSymbol::withCString(kIOPMPSSerialKey); - batteryInfoKey = OSSymbol::withCString(kIOPMPSLegacyBatteryInfoKey); - - return true; +bool +IOPMPowerSource::init(void) +{ + if (!super::init()) { + return false; + } + + nextInList = NULL; + + properties = OSDictionary::withCapacity(10); + if (!properties) { + return false; + } + properties->setCapacityIncrement(1); + + externalConnectedKey = OSSymbol::withCString(kIOPMPSExternalConnectedKey); + externalChargeCapableKey = OSSymbol::withCString(kIOPMPSExternalChargeCapableKey); + batteryInstalledKey = OSSymbol::withCString(kIOPMPSBatteryInstalledKey); + chargingKey = OSSymbol::withCString(kIOPMPSIsChargingKey); + warnLevelKey = OSSymbol::withCString(kIOPMPSAtWarnLevelKey); + criticalLevelKey = OSSymbol::withCString(kIOPMPSAtCriticalLevelKey); + currentCapacityKey = OSSymbol::withCString(kIOPMPSCurrentCapacityKey); + maxCapacityKey = OSSymbol::withCString(kIOPMPSMaxCapacityKey); + timeRemainingKey = OSSymbol::withCString(kIOPMPSTimeRemainingKey); + amperageKey = OSSymbol::withCString(kIOPMPSAmperageKey); + voltageKey = OSSymbol::withCString(kIOPMPSVoltageKey); + cycleCountKey = OSSymbol::withCString(kIOPMPSCycleCountKey); + adapterInfoKey = OSSymbol::withCString(kIOPMPSAdapterInfoKey); + locationKey = OSSymbol::withCString(kIOPMPSLocationKey); + errorConditionKey = OSSymbol::withCString(kIOPMPSErrorConditionKey); + manufacturerKey = OSSymbol::withCString(kIOPMPSManufacturerKey); + modelKey = OSSymbol::withCString(kIOPMPSModelKey); + serialKey = OSSymbol::withCString(kIOPMPSSerialKey); + batteryInfoKey = OSSymbol::withCString(kIOPMPSLegacyBatteryInfoKey); + + return true; } // ***************************************************************************** // free // // ***************************************************************************** -void IOPMPowerSource::free(void) -{ - if(properties) properties->release(); - if(externalConnectedKey) externalConnectedKey->release(); - if(externalChargeCapableKey) externalChargeCapableKey->release(); - if(batteryInstalledKey) batteryInstalledKey->release(); - if(chargingKey) chargingKey->release(); - if(warnLevelKey) warnLevelKey->release(); - if(criticalLevelKey) criticalLevelKey->release(); - if(currentCapacityKey) currentCapacityKey->release(); - if(maxCapacityKey) maxCapacityKey->release(); - if(timeRemainingKey) timeRemainingKey->release(); - if(amperageKey) amperageKey->release(); - if(voltageKey) voltageKey->release(); - if(cycleCountKey) cycleCountKey->release(); - if(adapterInfoKey) adapterInfoKey->release(); - if(errorConditionKey) errorConditionKey->release(); - if(manufacturerKey) manufacturerKey->release(); - if(modelKey) modelKey->release(); - if(serialKey) serialKey->release(); - if(locationKey) locationKey->release(); - if(batteryInfoKey) batteryInfoKey->release(); +void +IOPMPowerSource::free(void) +{ + if (properties) { + properties->release(); + } + if (externalConnectedKey) { + externalConnectedKey->release(); + } + if (externalChargeCapableKey) { + externalChargeCapableKey->release(); + } + if (batteryInstalledKey) { + batteryInstalledKey->release(); + } + if (chargingKey) { + chargingKey->release(); + } + if (warnLevelKey) { + warnLevelKey->release(); + } + if (criticalLevelKey) { + criticalLevelKey->release(); + } + if (currentCapacityKey) { + currentCapacityKey->release(); + } + if (maxCapacityKey) { + maxCapacityKey->release(); + } + if (timeRemainingKey) { + timeRemainingKey->release(); + } + if (amperageKey) { + amperageKey->release(); + } + if (voltageKey) { + voltageKey->release(); + } + if (cycleCountKey) { + cycleCountKey->release(); + } + if (adapterInfoKey) { + adapterInfoKey->release(); + } + if (errorConditionKey) { + errorConditionKey->release(); + } + if (manufacturerKey) { + manufacturerKey->release(); + } + if (modelKey) { + modelKey->release(); + } + if (serialKey) { + serialKey->release(); + } + if (locationKey) { + locationKey->release(); + } + if (batteryInfoKey) { + batteryInfoKey->release(); + } } // ***************************************************************************** @@ -126,33 +170,42 @@ void IOPMPowerSource::free(void) // Update power source state in IORegistry and message interested clients // notifying them of our change. // ***************************************************************************** -void IOPMPowerSource::updateStatus (void) +void +IOPMPowerSource::updateStatus(void) { - OSCollectionIterator *iterator; - OSObject *iteratorKey; - OSObject *obj; + OSCollectionIterator *iterator; + OSObject *iteratorKey; + OSObject *obj; - // do nothing if settings haven't changed - if(!settingsChangedSinceUpdate) return; + // do nothing if settings haven't changed + if (!settingsChangedSinceUpdate) { + return; + } - iterator = OSCollectionIterator::withCollection(properties); - if(!iterator) return; + iterator = OSCollectionIterator::withCollection(properties); + if (!iterator) { + return; + } - while ((iteratorKey = iterator->getNextObject())) { - OSSymbol *key; - - key = OSDynamicCast(OSSymbol, iteratorKey); - if (!key) continue; - obj = properties->getObject(key); - if(!obj) continue; - setProperty(key, obj); - } - iterator->release(); + while ((iteratorKey = iterator->getNextObject())) { + OSSymbol *key; + + key = OSDynamicCast(OSSymbol, iteratorKey); + if (!key) { + continue; + } + obj = properties->getObject(key); + if (!obj) { + continue; + } + setProperty(key, obj); + } + iterator->release(); - settingsChangedSinceUpdate = false; + settingsChangedSinceUpdate = false; - // And up goes the flare - messageClients(kIOPMMessageBatteryStatusHasChanged); + // And up goes the flare + messageClients(kIOPMMessageBatteryStatusHasChanged); } @@ -161,133 +214,174 @@ void IOPMPowerSource::updateStatus (void) * PROTECTED Accessors. All the setters! Yay! * ******************************************************************************/ - -void IOPMPowerSource::setPSProperty(const OSSymbol *key, OSObject *val) + +void +IOPMPowerSource::setPSProperty(const OSSymbol *key, OSObject *val) { - OSObject *lastVal; + OSObject *lastVal; - if(!key || !val) return; + if (!key || !val) { + return; + } - // Compare new setting with existing setting; update - // 'settingsChangedSinceUpdate' if the setting has changed. - // If values are OSNumbers, do equality comparison. - // Otherwise, just compare pointers. - - if( (lastVal = properties->getObject(key)) ) { - if(val->isEqualTo(lastVal)) { - // settings didn't change + // Compare new setting with existing setting; update + // 'settingsChangedSinceUpdate' if the setting has changed. + // If values are OSNumbers, do equality comparison. + // Otherwise, just compare pointers. + + if ((lastVal = properties->getObject(key))) { + if (val->isEqualTo(lastVal)) { + // settings didn't change + } else { + // num val is not equal to last val + settingsChangedSinceUpdate = true; + } } else { - // num val is not equal to last val - settingsChangedSinceUpdate = true; + // new setting; no last value + settingsChangedSinceUpdate = true; } - } else { - // new setting; no last value - settingsChangedSinceUpdate = true; - } - - // here's the part where we go crazy. - properties->setObject(key, val); + + // here's the part where we go crazy. + properties->setObject(key, val); } - -void IOPMPowerSource::setExternalConnected(bool b) { - setPSProperty(externalConnectedKey, - b ? kOSBooleanTrue : kOSBooleanFalse); + +void +IOPMPowerSource::setExternalConnected(bool b) +{ + setPSProperty(externalConnectedKey, + b ? kOSBooleanTrue : kOSBooleanFalse); } -void IOPMPowerSource::setExternalChargeCapable(bool b) { - setPSProperty(externalChargeCapableKey, - b ? kOSBooleanTrue : kOSBooleanFalse); +void +IOPMPowerSource::setExternalChargeCapable(bool b) +{ + setPSProperty(externalChargeCapableKey, + b ? kOSBooleanTrue : kOSBooleanFalse); } -void IOPMPowerSource::setBatteryInstalled(bool b) { - setPSProperty(batteryInstalledKey, - b ? kOSBooleanTrue : kOSBooleanFalse); +void +IOPMPowerSource::setBatteryInstalled(bool b) +{ + setPSProperty(batteryInstalledKey, + b ? kOSBooleanTrue : kOSBooleanFalse); } -void IOPMPowerSource::setIsCharging(bool b) { - setPSProperty(chargingKey, - b ? kOSBooleanTrue : kOSBooleanFalse); +void +IOPMPowerSource::setIsCharging(bool b) +{ + setPSProperty(chargingKey, + b ? kOSBooleanTrue : kOSBooleanFalse); } -void IOPMPowerSource::setAtWarnLevel(bool b) { - setPSProperty(warnLevelKey, - b ? kOSBooleanTrue : kOSBooleanFalse); +void +IOPMPowerSource::setAtWarnLevel(bool b) +{ + setPSProperty(warnLevelKey, + b ? kOSBooleanTrue : kOSBooleanFalse); } -void IOPMPowerSource::setAtCriticalLevel(bool b) { - setPSProperty(criticalLevelKey, - b ? kOSBooleanTrue : kOSBooleanFalse); +void +IOPMPowerSource::setAtCriticalLevel(bool b) +{ + setPSProperty(criticalLevelKey, + b ? kOSBooleanTrue : kOSBooleanFalse); } -void IOPMPowerSource::setCurrentCapacity(unsigned int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(currentCapacityKey, n); - n->release(); +void +IOPMPowerSource::setCurrentCapacity(unsigned int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(currentCapacityKey, n); + n->release(); } -void IOPMPowerSource::setMaxCapacity(unsigned int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(maxCapacityKey, n); - n->release(); +void +IOPMPowerSource::setMaxCapacity(unsigned int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(maxCapacityKey, n); + n->release(); } -void IOPMPowerSource::setTimeRemaining(int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(timeRemainingKey, n); - n->release(); +void +IOPMPowerSource::setTimeRemaining(int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(timeRemainingKey, n); + n->release(); } -void IOPMPowerSource::setAmperage(int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(amperageKey, n); - n->release(); +void +IOPMPowerSource::setAmperage(int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(amperageKey, n); + n->release(); } -void IOPMPowerSource::setVoltage(unsigned int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(voltageKey, n); - n->release(); +void +IOPMPowerSource::setVoltage(unsigned int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(voltageKey, n); + n->release(); } -void IOPMPowerSource::setCycleCount(unsigned int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(cycleCountKey, n); - n->release(); +void +IOPMPowerSource::setCycleCount(unsigned int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(cycleCountKey, n); + n->release(); } -void IOPMPowerSource::setAdapterInfo(int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(adapterInfoKey, n); - n->release(); +void +IOPMPowerSource::setAdapterInfo(int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(adapterInfoKey, n); + n->release(); } -void IOPMPowerSource::setLocation(int val) { - OSNumber *n = OSNumber::withNumber(val, 32); - setPSProperty(locationKey, n); - n->release(); +void +IOPMPowerSource::setLocation(int val) +{ + OSNumber *n = OSNumber::withNumber(val, 32); + setPSProperty(locationKey, n); + n->release(); } -void IOPMPowerSource::setErrorCondition(OSSymbol *s) { - setPSProperty(errorConditionKey, s); +void +IOPMPowerSource::setErrorCondition(OSSymbol *s) +{ + setPSProperty(errorConditionKey, s); } -void IOPMPowerSource::setManufacturer(OSSymbol *s) { - setPSProperty(manufacturerKey, s); +void +IOPMPowerSource::setManufacturer(OSSymbol *s) +{ + setPSProperty(manufacturerKey, s); } -void IOPMPowerSource::setModel(OSSymbol *s) { - setPSProperty(modelKey, s); +void +IOPMPowerSource::setModel(OSSymbol *s) +{ + setPSProperty(modelKey, s); } -void IOPMPowerSource::setSerial(OSSymbol *s) { - setPSProperty(serialKey, s); +void +IOPMPowerSource::setSerial(OSSymbol *s) +{ + setPSProperty(serialKey, s); } -void IOPMPowerSource::setLegacyIOBatteryInfo(OSDictionary *d) { - setPSProperty(batteryInfoKey, d); +void +IOPMPowerSource::setLegacyIOBatteryInfo(OSDictionary *d) +{ + setPSProperty(batteryInfoKey, d); } @@ -299,118 +393,185 @@ void IOPMPowerSource::setLegacyIOBatteryInfo(OSDictionary *d) { * ******************************************************************************/ -OSObject *IOPMPowerSource::getPSProperty(const OSSymbol *symmie) { - if(!symmie) return NULL; - return properties->getObject(symmie); +OSObject * +IOPMPowerSource::getPSProperty(const OSSymbol *symmie) +{ + if (!symmie) { + return NULL; + } + return properties->getObject(symmie); } -bool IOPMPowerSource::externalConnected(void) { - return (kOSBooleanTrue == properties->getObject(externalConnectedKey)); +bool +IOPMPowerSource::externalConnected(void) +{ + return kOSBooleanTrue == properties->getObject(externalConnectedKey); } -bool IOPMPowerSource::externalChargeCapable(void) { - return (kOSBooleanTrue == properties->getObject(externalChargeCapableKey)); +bool +IOPMPowerSource::externalChargeCapable(void) +{ + return kOSBooleanTrue == properties->getObject(externalChargeCapableKey); } -bool IOPMPowerSource::batteryInstalled(void) { - return (kOSBooleanTrue == properties->getObject(batteryInstalledKey)); +bool +IOPMPowerSource::batteryInstalled(void) +{ + return kOSBooleanTrue == properties->getObject(batteryInstalledKey); } -bool IOPMPowerSource::isCharging(void) { - return (kOSBooleanTrue == properties->getObject(chargingKey)); +bool +IOPMPowerSource::isCharging(void) +{ + return kOSBooleanTrue == properties->getObject(chargingKey); } -bool IOPMPowerSource::atWarnLevel(void) { - return (kOSBooleanTrue == properties->getObject(warnLevelKey)); +bool +IOPMPowerSource::atWarnLevel(void) +{ + return kOSBooleanTrue == properties->getObject(warnLevelKey); } -bool IOPMPowerSource::atCriticalLevel(void) { - return (kOSBooleanTrue == properties->getObject(criticalLevelKey)); +bool +IOPMPowerSource::atCriticalLevel(void) +{ + return kOSBooleanTrue == properties->getObject(criticalLevelKey); } -unsigned int IOPMPowerSource::currentCapacity(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(currentCapacityKey)); - if(!n) return 0; - else return (unsigned int)n->unsigned32BitValue(); +unsigned int +IOPMPowerSource::currentCapacity(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(currentCapacityKey)); + if (!n) { + return 0; + } else { + return (unsigned int)n->unsigned32BitValue(); + } } -unsigned int IOPMPowerSource::maxCapacity(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(maxCapacityKey)); - if(!n) return 0; - else return (unsigned int)n->unsigned32BitValue(); +unsigned int +IOPMPowerSource::maxCapacity(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(maxCapacityKey)); + if (!n) { + return 0; + } else { + return (unsigned int)n->unsigned32BitValue(); + } } -unsigned int IOPMPowerSource::capacityPercentRemaining(void) +unsigned int +IOPMPowerSource::capacityPercentRemaining(void) { - unsigned int _currentCapacity = currentCapacity(); - unsigned int _maxCapacity = maxCapacity(); - if(0 == _maxCapacity) { - return 0; - } else { - return ((100*_currentCapacity) / _maxCapacity); - } + unsigned int _currentCapacity = currentCapacity(); + unsigned int _maxCapacity = maxCapacity(); + if (0 == _maxCapacity) { + return 0; + } else { + return (100 * _currentCapacity) / _maxCapacity; + } } -int IOPMPowerSource::timeRemaining(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(timeRemainingKey)); - if(!n) return 0; - else return (int)n->unsigned32BitValue(); +int +IOPMPowerSource::timeRemaining(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(timeRemainingKey)); + if (!n) { + return 0; + } else { + return (int)n->unsigned32BitValue(); + } } -int IOPMPowerSource::amperage(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(amperageKey)); - if(!n) return 0; - else return (int)n->unsigned32BitValue(); +int +IOPMPowerSource::amperage(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(amperageKey)); + if (!n) { + return 0; + } else { + return (int)n->unsigned32BitValue(); + } } -unsigned int IOPMPowerSource::voltage(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(voltageKey)); - if(!n) return 0; - else return (unsigned int)n->unsigned32BitValue(); +unsigned int +IOPMPowerSource::voltage(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(voltageKey)); + if (!n) { + return 0; + } else { + return (unsigned int)n->unsigned32BitValue(); + } } -unsigned int IOPMPowerSource::cycleCount(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(cycleCountKey)); - if(!n) return 0; - else return (unsigned int)n->unsigned32BitValue(); +unsigned int +IOPMPowerSource::cycleCount(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(cycleCountKey)); + if (!n) { + return 0; + } else { + return (unsigned int)n->unsigned32BitValue(); + } } -int IOPMPowerSource::adapterInfo(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(adapterInfoKey)); - if(!n) return 0; - else return (int)n->unsigned32BitValue(); +int +IOPMPowerSource::adapterInfo(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(adapterInfoKey)); + if (!n) { + return 0; + } else { + return (int)n->unsigned32BitValue(); + } } -int IOPMPowerSource::location(void) { - OSNumber *n; - n = OSDynamicCast(OSNumber, properties->getObject(locationKey)); - if(!n) return 0; - else return (unsigned int)n->unsigned32BitValue(); +int +IOPMPowerSource::location(void) +{ + OSNumber *n; + n = OSDynamicCast(OSNumber, properties->getObject(locationKey)); + if (!n) { + return 0; + } else { + return (unsigned int)n->unsigned32BitValue(); + } } -OSSymbol *IOPMPowerSource::errorCondition(void) { - return OSDynamicCast(OSSymbol, properties->getObject(errorConditionKey)); +OSSymbol * +IOPMPowerSource::errorCondition(void) +{ + return OSDynamicCast(OSSymbol, properties->getObject(errorConditionKey)); } -OSSymbol *IOPMPowerSource::manufacturer(void) { - return OSDynamicCast(OSSymbol, properties->getObject(manufacturerKey)); +OSSymbol * +IOPMPowerSource::manufacturer(void) +{ + return OSDynamicCast(OSSymbol, properties->getObject(manufacturerKey)); } -OSSymbol *IOPMPowerSource::model(void) { - return OSDynamicCast(OSSymbol, properties->getObject(modelKey)); +OSSymbol * +IOPMPowerSource::model(void) +{ + return OSDynamicCast(OSSymbol, properties->getObject(modelKey)); } -OSSymbol *IOPMPowerSource::serial(void) { - return OSDynamicCast(OSSymbol, properties->getObject(serialKey)); +OSSymbol * +IOPMPowerSource::serial(void) +{ + return OSDynamicCast(OSSymbol, properties->getObject(serialKey)); } -OSDictionary *IOPMPowerSource::legacyIOBatteryInfo(void) { - return OSDynamicCast(OSDictionary, properties->getObject(batteryInfoKey)); +OSDictionary * +IOPMPowerSource::legacyIOBatteryInfo(void) +{ + return OSDynamicCast(OSDictionary, properties->getObject(batteryInfoKey)); } diff --git a/iokit/Kernel/IOPMPowerSourceList.cpp b/iokit/Kernel/IOPMPowerSourceList.cpp index a933ebf76..e24770e7d 100644 --- a/iokit/Kernel/IOPMPowerSourceList.cpp +++ b/iokit/Kernel/IOPMPowerSourceList.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -30,16 +30,17 @@ #include #define super OSObject -OSDefineMetaClassAndStructors(IOPMPowerSourceList,OSObject) +OSDefineMetaClassAndStructors(IOPMPowerSourceList, OSObject) //****************************************************************************** // init // //****************************************************************************** -void IOPMPowerSourceList::initialize ( void ) +void +IOPMPowerSourceList::initialize( void ) { - firstItem = NULL; - length = 0; + firstItem = NULL; + length = 0; } //****************************************************************************** @@ -47,27 +48,26 @@ void IOPMPowerSourceList::initialize ( void ) // //****************************************************************************** -IOReturn IOPMPowerSourceList::addToList(IOPMPowerSource *newPowerSource) +IOReturn +IOPMPowerSourceList::addToList(IOPMPowerSource *newPowerSource) { - IOPMPowerSource * nextPowerSource; - - // Is new object already in the list? - nextPowerSource = firstItem; - while ( nextPowerSource != NULL ) - { - if ( nextPowerSource == newPowerSource ) - { - // yes, just return - return IOPMNoErr; - } - nextPowerSource = nextInList(nextPowerSource); - } - - // add it to list - newPowerSource->nextInList = firstItem; - firstItem = newPowerSource; - length++; - return IOPMNoErr; + IOPMPowerSource * nextPowerSource; + + // Is new object already in the list? + nextPowerSource = firstItem; + while (nextPowerSource != NULL) { + if (nextPowerSource == newPowerSource) { + // yes, just return + return IOPMNoErr; + } + nextPowerSource = nextInList(nextPowerSource); + } + + // add it to list + newPowerSource->nextInList = firstItem; + firstItem = newPowerSource; + length++; + return IOPMNoErr; } @@ -76,9 +76,10 @@ IOReturn IOPMPowerSourceList::addToList(IOPMPowerSource *newPowerSource) // //****************************************************************************** -IOPMPowerSource * IOPMPowerSourceList::firstInList ( void ) +IOPMPowerSource * +IOPMPowerSourceList::firstInList( void ) { - return firstItem; + return firstItem; } //****************************************************************************** @@ -86,12 +87,13 @@ IOPMPowerSource * IOPMPowerSourceList::firstInList ( void ) // //****************************************************************************** -IOPMPowerSource * IOPMPowerSourceList::nextInList(IOPMPowerSource *currentItem) +IOPMPowerSource * +IOPMPowerSourceList::nextInList(IOPMPowerSource *currentItem) { - if ( currentItem != NULL ) { - return (currentItem->nextInList); - } - return NULL; + if (currentItem != NULL) { + return currentItem->nextInList; + } + return NULL; } //****************************************************************************** @@ -99,9 +101,10 @@ IOPMPowerSource * IOPMPowerSourceList::nextInList(IOPMPowerSource *currentItem) // //****************************************************************************** -unsigned long IOPMPowerSourceList::numberOfItems ( void ) +unsigned long +IOPMPowerSourceList::numberOfItems( void ) { - return length; + return length; } //****************************************************************************** @@ -110,32 +113,35 @@ unsigned long IOPMPowerSourceList::numberOfItems ( void ) // Find the item in the list, unlink it, and free it. //****************************************************************************** -IOReturn IOPMPowerSourceList::removeFromList ( IOPMPowerSource * theItem ) +IOReturn +IOPMPowerSourceList::removeFromList( IOPMPowerSource * theItem ) { - IOPMPowerSource * item = firstItem; - IOPMPowerSource * temp; - - if ( NULL == item) goto exit; - - if ( item == theItem ) { - firstItem = item->nextInList; - length--; - item->release(); - return IOPMNoErr; - } - while ( item->nextInList != NULL ) { - if ( item->nextInList == theItem ) { - temp = item->nextInList; - item->nextInList = temp->nextInList; - length--; - temp->release(); - return IOPMNoErr; - } - item = item->nextInList; - } + IOPMPowerSource * item = firstItem; + IOPMPowerSource * temp; + + if (NULL == item) { + goto exit; + } + + if (item == theItem) { + firstItem = item->nextInList; + length--; + item->release(); + return IOPMNoErr; + } + while (item->nextInList != NULL) { + if (item->nextInList == theItem) { + temp = item->nextInList; + item->nextInList = temp->nextInList; + length--; + temp->release(); + return IOPMNoErr; + } + item = item->nextInList; + } exit: - return IOPMNoErr; + return IOPMNoErr; } @@ -145,22 +151,16 @@ exit: // Free all items in the list, and then free the list itself //****************************************************************************** -void IOPMPowerSourceList::free (void ) +void +IOPMPowerSourceList::free(void ) { - IOPMPowerSource * next = firstItem; - - while ( next != NULL ) { - firstItem = next->nextInList; - length--; - next->release(); - next = firstItem; - } - super::free(); + IOPMPowerSource * next = firstItem; + + while (next != NULL) { + firstItem = next->nextInList; + length--; + next->release(); + next = firstItem; + } + super::free(); } - - - - - - - diff --git a/iokit/Kernel/IOPMPowerStateQueue.cpp b/iokit/Kernel/IOPMPowerStateQueue.cpp index cd24b8c53..fc106dc1a 100644 --- a/iokit/Kernel/IOPMPowerStateQueue.cpp +++ b/iokit/Kernel/IOPMPowerStateQueue.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,83 +22,87 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #include "IOPMPowerStateQueue.h" #define super IOEventSource OSDefineMetaClassAndStructors( IOPMPowerStateQueue, IOEventSource ) IOPMPowerStateQueue * IOPMPowerStateQueue::PMPowerStateQueue( - OSObject * inOwner, Action inAction ) + OSObject * inOwner, Action inAction ) { - IOPMPowerStateQueue * me = new IOPMPowerStateQueue; + IOPMPowerStateQueue * me = new IOPMPowerStateQueue; - if (me && !me->init(inOwner, inAction)) - { - me->release(); - return NULL; - } + if (me && !me->init(inOwner, inAction)) { + me->release(); + return NULL; + } - return me; + return me; } -bool IOPMPowerStateQueue::init( OSObject * inOwner, Action inAction ) +bool +IOPMPowerStateQueue::init( OSObject * inOwner, Action inAction ) { - if (!inAction || !(super::init(inOwner, inAction))) - return false; + if (!inAction || !(super::init(inOwner, inAction))) { + return false; + } - queue_init( &queueHead ); + queue_init( &queueHead ); - queueLock = IOLockAlloc(); - if (!queueLock) - return false; + queueLock = IOLockAlloc(); + if (!queueLock) { + return false; + } - return true; + return true; } -bool IOPMPowerStateQueue::submitPowerEvent( - uint32_t eventType, - void * arg0, - uint64_t arg1 ) +bool +IOPMPowerStateQueue::submitPowerEvent( + uint32_t eventType, + void * arg0, + uint64_t arg1 ) { - PowerEventEntry * entry; + PowerEventEntry * entry; - entry = IONew(PowerEventEntry, 1); - if (!entry) - return false; + entry = IONew(PowerEventEntry, 1); + if (!entry) { + return false; + } - entry->eventType = eventType; - entry->arg0 = arg0; - entry->arg1 = arg1; + entry->eventType = eventType; + entry->arg0 = arg0; + entry->arg1 = arg1; - IOLockLock(queueLock); - queue_enter(&queueHead, entry, PowerEventEntry *, chain); - IOLockUnlock(queueLock); - signalWorkAvailable(); + IOLockLock(queueLock); + queue_enter(&queueHead, entry, PowerEventEntry *, chain); + IOLockUnlock(queueLock); + signalWorkAvailable(); - return true; + return true; } -bool IOPMPowerStateQueue::checkForWork( void ) +bool +IOPMPowerStateQueue::checkForWork( void ) { - IOPMPowerStateQueueAction queueAction = (IOPMPowerStateQueueAction) action; - PowerEventEntry * entry; + IOPMPowerStateQueueAction queueAction = (IOPMPowerStateQueueAction) action; + PowerEventEntry * entry; IOLockLock(queueLock); - while (!queue_empty(&queueHead)) - { - queue_remove_first(&queueHead, entry, PowerEventEntry *, chain); + while (!queue_empty(&queueHead)) { + queue_remove_first(&queueHead, entry, PowerEventEntry *, chain); IOLockUnlock(queueLock); - (*queueAction)(owner, entry->eventType, entry->arg0, entry->arg1); - IODelete(entry, PowerEventEntry, 1); + (*queueAction)(owner, entry->eventType, entry->arg0, entry->arg1); + IODelete(entry, PowerEventEntry, 1); - IOLockLock(queueLock); + IOLockLock(queueLock); } IOLockUnlock(queueLock); - return false; + return false; } diff --git a/iokit/Kernel/IOPMPowerStateQueue.h b/iokit/Kernel/IOPMPowerStateQueue.h index bccf6f45f..db2e57412 100644 --- a/iokit/Kernel/IOPMPowerStateQueue.h +++ b/iokit/Kernel/IOPMPowerStateQueue.h @@ -2,7 +2,7 @@ * Copyright (c) 2001-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #ifndef _IOPMPOWERSTATEQUEUE_H_ #define _IOPMPOWERSTATEQUEUE_H_ - + #include #include #include @@ -37,27 +37,27 @@ typedef void (*IOPMPowerStateQueueAction)(OSObject *, uint32_t event, void *, ui class IOPMPowerStateQueue : public IOEventSource { - OSDeclareDefaultStructors(IOPMPowerStateQueue) + OSDeclareDefaultStructors(IOPMPowerStateQueue) private: - struct PowerEventEntry { - queue_chain_t chain; - uint32_t eventType; - void * arg0; - uint64_t arg1; - }; + struct PowerEventEntry { + queue_chain_t chain; + uint32_t eventType; + void * arg0; + uint64_t arg1; + }; - queue_head_t queueHead; - IOLock * queueLock; + queue_head_t queueHead; + IOLock * queueLock; protected: - virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; - virtual bool init( OSObject * owner, Action action ) APPLE_KEXT_OVERRIDE; + virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; + virtual bool init( OSObject * owner, Action action ) APPLE_KEXT_OVERRIDE; public: - static IOPMPowerStateQueue * PMPowerStateQueue( OSObject * owner, Action action ); + static IOPMPowerStateQueue * PMPowerStateQueue( OSObject * owner, Action action ); - bool submitPowerEvent( uint32_t eventType, void * arg0 = 0, uint64_t arg1 = 0 ); + bool submitPowerEvent( uint32_t eventType, void * arg0 = 0, uint64_t arg1 = 0 ); }; #endif /* _IOPMPOWERSTATEQUEUE_H_ */ diff --git a/iokit/Kernel/IOPMinformee.cpp b/iokit/Kernel/IOPMinformee.cpp index 3889ffe44..adb85b28e 100644 --- a/iokit/Kernel/IOPMinformee.cpp +++ b/iokit/Kernel/IOPMinformee.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -36,15 +36,17 @@ OSDefineMetaClassAndStructors(IOPMinformee, OSObject) // static constructor // //********************************************************************************* -IOPMinformee *IOPMinformee::withObject( IOService *theObject ) +IOPMinformee *IOPMinformee::withObject( IOService * theObject ) { - IOPMinformee *newInformee = new IOPMinformee; - - if (!newInformee) return NULL; - newInformee->init(); - newInformee->initialize( theObject ); - - return newInformee; + IOPMinformee *newInformee = new IOPMinformee; + + if (!newInformee) { + return NULL; + } + newInformee->init(); + newInformee->initialize( theObject ); + + return newInformee; } @@ -52,12 +54,13 @@ IOPMinformee *IOPMinformee::withObject( IOService *theObject ) // constructor // //********************************************************************************* -void IOPMinformee::initialize ( IOService * theObject ) +void +IOPMinformee::initialize( IOService * theObject ) { - whatObject = theObject; - timer = 0; - active = true; - whatObject->retain(); + whatObject = theObject; + timer = 0; + active = true; + whatObject->retain(); } @@ -65,9 +68,9 @@ void IOPMinformee::initialize ( IOService * theObject ) // free // //********************************************************************************* -void IOPMinformee::free (void ) +void +IOPMinformee::free(void ) { - whatObject->release(); - super::free(); + whatObject->release(); + super::free(); } - diff --git a/iokit/Kernel/IOPMinformeeList.cpp b/iokit/Kernel/IOPMinformeeList.cpp index 5857bb6b4..18cf810ed 100644 --- a/iokit/Kernel/IOPMinformeeList.cpp +++ b/iokit/Kernel/IOPMinformeeList.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -30,64 +30,69 @@ #include #define super OSObject -OSDefineMetaClassAndStructors(IOPMinformeeList,OSObject) +OSDefineMetaClassAndStructors(IOPMinformeeList, OSObject) //********************************************************************************* // init // //********************************************************************************* -void IOPMinformeeList::initialize ( void ) +void +IOPMinformeeList::initialize( void ) { - firstItem = NULL; - length = 0; + firstItem = NULL; + length = 0; } //****************************************************************************** // getSharedRecursiveLock // //****************************************************************************** -IORecursiveLock *IOPMinformeeList::getSharedRecursiveLock( void ) +IORecursiveLock * +IOPMinformeeList::getSharedRecursiveLock( void ) { - static IORecursiveLock *sharedListLock = NULL; - - /* A running system could have 50-60+ instances of IOPMInformeeList. - * They'll share this lock, since list insertion and removal is relatively - * rare, and generally tied to major events like device discovery. - * - * getSharedRecursiveLock() is called from IOStartIOKit to initialize - * the sharedListLock before any IOPMinformeeLists are instantiated. - * - * The IOPMinformeeList class will be around for the lifetime of the system, - * we don't worry about freeing this lock. - */ - - if ( NULL == sharedListLock ) - { - sharedListLock = IORecursiveLockAlloc(); - } - return sharedListLock; + static IORecursiveLock *sharedListLock = NULL; + + /* A running system could have 50-60+ instances of IOPMInformeeList. + * They'll share this lock, since list insertion and removal is relatively + * rare, and generally tied to major events like device discovery. + * + * getSharedRecursiveLock() is called from IOStartIOKit to initialize + * the sharedListLock before any IOPMinformeeLists are instantiated. + * + * The IOPMinformeeList class will be around for the lifetime of the system, + * we don't worry about freeing this lock. + */ + + if (NULL == sharedListLock) { + sharedListLock = IORecursiveLockAlloc(); + } + return sharedListLock; } - //********************************************************************************* +//********************************************************************************* // appendNewInformee - // - //********************************************************************************* -IOPMinformee *IOPMinformeeList::appendNewInformee ( IOService * newObject ) +// +//********************************************************************************* +IOPMinformee * +IOPMinformeeList::appendNewInformee( IOService * newObject ) { - IOPMinformee * newInformee; - - if (!newObject) - return NULL; - - newInformee = IOPMinformee::withObject (newObject); - - if (!newInformee) - return NULL; - - if( IOPMNoErr == addToList (newInformee)) - return newInformee; - else - return NULL; + IOPMinformee * newInformee; + + if (!newObject) { + return NULL; + } + + newInformee = IOPMinformee::withObject(newObject); + + if (!newInformee) { + return NULL; + } + + if (IOPMNoErr == addToList(newInformee)) { + return newInformee; + } else { + return NULL; + } } @@ -96,36 +101,36 @@ IOPMinformee *IOPMinformeeList::appendNewInformee ( IOService * newObject ) // *OBSOLETE* do not call from outside of this file. // Try appendNewInformee() instead //********************************************************************************* -IOReturn IOPMinformeeList::addToList ( IOPMinformee * newInformee ) +IOReturn +IOPMinformeeList::addToList( IOPMinformee * newInformee ) { - IOPMinformee * nextInformee; - IORecursiveLock *listLock = getSharedRecursiveLock(); - - if(!listLock) - return kIOReturnError; - - IORecursiveLockLock(listLock); - nextInformee = firstItem; - - // Is new object already in the list? - while ( nextInformee != NULL ) - { - if ( nextInformee->whatObject == newInformee->whatObject ) - { - // object is present; just exit - goto unlock_and_exit; - } - nextInformee = nextInList(nextInformee); - } - - // add it to the front of the list - newInformee->nextInList = firstItem; - firstItem = newInformee; - length++; + IOPMinformee * nextInformee; + IORecursiveLock *listLock = getSharedRecursiveLock(); + + if (!listLock) { + return kIOReturnError; + } + + IORecursiveLockLock(listLock); + nextInformee = firstItem; + + // Is new object already in the list? + while (nextInformee != NULL) { + if (nextInformee->whatObject == newInformee->whatObject) { + // object is present; just exit + goto unlock_and_exit; + } + nextInformee = nextInList(nextInformee); + } + + // add it to the front of the list + newInformee->nextInList = firstItem; + firstItem = newInformee; + length++; unlock_and_exit: - IORecursiveLockUnlock(listLock); - return IOPMNoErr; + IORecursiveLockUnlock(listLock); + return IOPMNoErr; } @@ -135,43 +140,43 @@ unlock_and_exit: // Find the item in the list, unlink it, and free it. //********************************************************************************* -IOReturn IOPMinformeeList::removeFromList ( IOService * theItem ) +IOReturn +IOPMinformeeList::removeFromList( IOService * theItem ) { - IOPMinformee * item = firstItem; - IOPMinformee * temp; - IORecursiveLock *listLock = getSharedRecursiveLock(); - - if ( NULL == item ) - return IOPMNoErr; - if(!listLock) - return kIOReturnError; - - IORecursiveLockLock( listLock ); - - if ( item->whatObject == theItem ) - { - firstItem = item->nextInList; - length--; - item->release(); - goto unlock_and_exit; - } - - while ( item->nextInList != NULL ) - { - if ( item->nextInList->whatObject == theItem ) - { - temp = item->nextInList; - item->nextInList = temp->nextInList; - length--; - temp->release(); - goto unlock_and_exit; - } - item = item->nextInList; - } + IOPMinformee * item = firstItem; + IOPMinformee * temp; + IORecursiveLock *listLock = getSharedRecursiveLock(); + + if (NULL == item) { + return IOPMNoErr; + } + if (!listLock) { + return kIOReturnError; + } + + IORecursiveLockLock( listLock ); + + if (item->whatObject == theItem) { + firstItem = item->nextInList; + length--; + item->release(); + goto unlock_and_exit; + } + + while (item->nextInList != NULL) { + if (item->nextInList->whatObject == theItem) { + temp = item->nextInList; + item->nextInList = temp->nextInList; + length--; + temp->release(); + goto unlock_and_exit; + } + item = item->nextInList; + } unlock_and_exit: - IORecursiveLockUnlock(listLock); - return IOPMNoErr; + IORecursiveLockUnlock(listLock); + return IOPMNoErr; } @@ -180,9 +185,10 @@ unlock_and_exit: // //********************************************************************************* -IOPMinformee * IOPMinformeeList::firstInList ( void ) +IOPMinformee * +IOPMinformeeList::firstInList( void ) { - return firstItem; + return firstItem; } //********************************************************************************* @@ -190,12 +196,13 @@ IOPMinformee * IOPMinformeeList::firstInList ( void ) // //********************************************************************************* -IOPMinformee * IOPMinformeeList::nextInList ( IOPMinformee * currentItem ) +IOPMinformee * +IOPMinformeeList::nextInList( IOPMinformee * currentItem ) { - if ( currentItem != NULL ) { - return (currentItem->nextInList); - } - return NULL; + if (currentItem != NULL) { + return currentItem->nextInList; + } + return NULL; } //********************************************************************************* @@ -203,9 +210,10 @@ IOPMinformee * IOPMinformeeList::nextInList ( IOPMinformee * currentItem ) // //********************************************************************************* -unsigned long IOPMinformeeList::numberOfItems ( void ) +unsigned long +IOPMinformeeList::numberOfItems( void ) { - return length; + return length; } //********************************************************************************* @@ -215,18 +223,19 @@ unsigned long IOPMinformeeList::numberOfItems ( void ) // by the parameter. Return a pointer to the list item or NULL. //********************************************************************************* -IOPMinformee * IOPMinformeeList::findItem ( IOService * driverOrChild ) +IOPMinformee * +IOPMinformeeList::findItem( IOService * driverOrChild ) { - IOPMinformee * nextObject; - - nextObject = firstInList(); - while ( nextObject != NULL ) { - if ( nextObject->whatObject == driverOrChild ) { - return nextObject; - } - nextObject = nextInList(nextObject); - } - return NULL; + IOPMinformee * nextObject; + + nextObject = firstInList(); + while (nextObject != NULL) { + if (nextObject->whatObject == driverOrChild) { + return nextObject; + } + nextObject = nextInList(nextObject); + } + return NULL; } @@ -237,16 +246,16 @@ IOPMinformee * IOPMinformeeList::findItem ( IOService * driverOrChild ) // Free all items in the list, and then free the list itself //********************************************************************************* -void IOPMinformeeList::free (void ) +void +IOPMinformeeList::free(void ) { - IOPMinformee * next = firstItem; - - while ( next != NULL ) { - firstItem = next->nextInList; - length--; - next->release(); - next = firstItem; - } -super::free(); + IOPMinformee * next = firstItem; + + while (next != NULL) { + firstItem = next->nextInList; + length--; + next->release(); + next = firstItem; + } + super::free(); } - diff --git a/iokit/Kernel/IOPMrootDomain.cpp b/iokit/Kernel/IOPMrootDomain.cpp index 85669860f..320e8f3f2 100644 --- a/iokit/Kernel/IOPMrootDomain.cpp +++ b/iokit/Kernel/IOPMrootDomain.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -75,6 +75,7 @@ __END_DECLS #if defined(__i386__) || defined(__x86_64__) __BEGIN_DECLS #include "IOPMrootDomainInternal.h" +const char *processor_to_datastring(const char *prefix, processor_t target_processor); __END_DECLS #endif @@ -91,20 +92,20 @@ __END_DECLS #if DEVELOPMENT #define DLOG(x...) do { \ if (kIOLogPMRootDomain & gIOKitDebug) \ - kprintf(LOG_PREFIX x); \ + kprintf(LOG_PREFIX x); \ else \ - os_log(OS_LOG_DEFAULT, LOG_PREFIX x); \ + os_log(OS_LOG_DEFAULT, LOG_PREFIX x); \ } while (false) #else #define DLOG(x...) do { \ if (kIOLogPMRootDomain & gIOKitDebug) \ - kprintf(LOG_PREFIX x); \ + kprintf(LOG_PREFIX x); \ } while (false) #endif #define DMSG(x...) do { \ if (kIOLogPMRootDomain & gIOKitDebug) { \ - kprintf(LOG_PREFIX x); \ + kprintf(LOG_PREFIX x); \ } \ } while (false) @@ -117,7 +118,7 @@ static IOWorkLoop * gIOPMWorkLoop = 0; #define ASSERT_GATED() \ do { \ if (gIOPMWorkLoop && gIOPMWorkLoop->inGate() != true) { \ - panic("RootDomain: not inside PM gate"); \ + panic("RootDomain: not inside PM gate"); \ } \ } while(false) #else @@ -125,21 +126,21 @@ do { \ #endif /* CHECK_THREAD_CONTEXT */ #define CAP_LOSS(c) \ - (((_pendingCapability & (c)) == 0) && \ - ((_currentCapability & (c)) != 0)) + (((_pendingCapability & (c)) == 0) && \ + ((_currentCapability & (c)) != 0)) #define CAP_GAIN(c) \ - (((_currentCapability & (c)) == 0) && \ - ((_pendingCapability & (c)) != 0)) + (((_currentCapability & (c)) == 0) && \ + ((_pendingCapability & (c)) != 0)) #define CAP_CHANGE(c) \ - (((_currentCapability ^ _pendingCapability) & (c)) != 0) + (((_currentCapability ^ _pendingCapability) & (c)) != 0) #define CAP_CURRENT(c) \ - ((_currentCapability & (c)) != 0) + ((_currentCapability & (c)) != 0) #define CAP_HIGHEST(c) \ - ((_highestCapability & (c)) != 0) + ((_highestCapability & (c)) != 0) #if defined(__i386__) || defined(__x86_64__) #define DARK_TO_FULL_EVALUATE_CLAMSHELL 1 @@ -147,37 +148,37 @@ do { \ // Event types for IOPMPowerStateQueue::submitPowerEvent() enum { - kPowerEventFeatureChanged = 1, // 1 - kPowerEventReceivedPowerNotification, // 2 - kPowerEventSystemBootCompleted, // 3 - kPowerEventSystemShutdown, // 4 - kPowerEventUserDisabledSleep, // 5 - kPowerEventRegisterSystemCapabilityClient, // 6 - kPowerEventRegisterKernelCapabilityClient, // 7 - kPowerEventPolicyStimulus, // 8 - kPowerEventAssertionCreate, // 9 - kPowerEventAssertionRelease, // 10 - kPowerEventAssertionSetLevel, // 11 - kPowerEventQueueSleepWakeUUID, // 12 - kPowerEventPublishSleepWakeUUID, // 13 - kPowerEventSetDisplayPowerOn // 14 + kPowerEventFeatureChanged = 1, // 1 + kPowerEventReceivedPowerNotification, // 2 + kPowerEventSystemBootCompleted, // 3 + kPowerEventSystemShutdown, // 4 + kPowerEventUserDisabledSleep, // 5 + kPowerEventRegisterSystemCapabilityClient, // 6 + kPowerEventRegisterKernelCapabilityClient, // 7 + kPowerEventPolicyStimulus, // 8 + kPowerEventAssertionCreate, // 9 + kPowerEventAssertionRelease, // 10 + kPowerEventAssertionSetLevel, // 11 + kPowerEventQueueSleepWakeUUID, // 12 + kPowerEventPublishSleepWakeUUID, // 13 + kPowerEventSetDisplayPowerOn // 14 }; // For evaluatePolicy() // List of stimuli that affects the root domain policy. enum { - kStimulusDisplayWranglerSleep, // 0 - kStimulusDisplayWranglerWake, // 1 - kStimulusAggressivenessChanged, // 2 - kStimulusDemandSystemSleep, // 3 - kStimulusAllowSystemSleepChanged, // 4 - kStimulusDarkWakeActivityTickle, // 5 - kStimulusDarkWakeEntry, // 6 - kStimulusDarkWakeReentry, // 7 - kStimulusDarkWakeEvaluate, // 8 - kStimulusNoIdleSleepPreventers, // 9 - kStimulusEnterUserActiveState, // 10 - kStimulusLeaveUserActiveState // 11 + kStimulusDisplayWranglerSleep, // 0 + kStimulusDisplayWranglerWake, // 1 + kStimulusAggressivenessChanged, // 2 + kStimulusDemandSystemSleep, // 3 + kStimulusAllowSystemSleepChanged, // 4 + kStimulusDarkWakeActivityTickle, // 5 + kStimulusDarkWakeEntry, // 6 + kStimulusDarkWakeReentry, // 7 + kStimulusDarkWakeEvaluate, // 8 + kStimulusNoIdleSleepPreventers, // 9 + kStimulusEnterUserActiveState, // 10 + kStimulusLeaveUserActiveState // 11 }; extern "C" { @@ -196,50 +197,50 @@ static void pmEventTimeStamp(uint64_t *recordTS); static const OSSymbol *sleepSupportedPEFunction = NULL; static const OSSymbol *sleepMessagePEFunction = NULL; -static const OSSymbol * gIOPMPSExternalConnectedKey; -static const OSSymbol * gIOPMPSExternalChargeCapableKey; -static const OSSymbol * gIOPMPSBatteryInstalledKey; -static const OSSymbol * gIOPMPSIsChargingKey; -static const OSSymbol * gIOPMPSAtWarnLevelKey; -static const OSSymbol * gIOPMPSAtCriticalLevelKey; -static const OSSymbol * gIOPMPSCurrentCapacityKey; -static const OSSymbol * gIOPMPSMaxCapacityKey; -static const OSSymbol * gIOPMPSDesignCapacityKey; -static const OSSymbol * gIOPMPSTimeRemainingKey; -static const OSSymbol * gIOPMPSAmperageKey; -static const OSSymbol * gIOPMPSVoltageKey; -static const OSSymbol * gIOPMPSCycleCountKey; -static const OSSymbol * gIOPMPSMaxErrKey; -static const OSSymbol * gIOPMPSAdapterInfoKey; -static const OSSymbol * gIOPMPSLocationKey; -static const OSSymbol * gIOPMPSErrorConditionKey; -static const OSSymbol * gIOPMPSManufacturerKey; -static const OSSymbol * gIOPMPSManufactureDateKey; -static const OSSymbol * gIOPMPSModelKey; -static const OSSymbol * gIOPMPSSerialKey; -static const OSSymbol * gIOPMPSLegacyBatteryInfoKey; -static const OSSymbol * gIOPMPSBatteryHealthKey; -static const OSSymbol * gIOPMPSHealthConfidenceKey; -static const OSSymbol * gIOPMPSCapacityEstimatedKey; -static const OSSymbol * gIOPMPSBatteryChargeStatusKey; -static const OSSymbol * gIOPMPSBatteryTemperatureKey; -static const OSSymbol * gIOPMPSAdapterDetailsKey; -static const OSSymbol * gIOPMPSChargerConfigurationKey; -static const OSSymbol * gIOPMPSAdapterDetailsIDKey; -static const OSSymbol * gIOPMPSAdapterDetailsWattsKey; -static const OSSymbol * gIOPMPSAdapterDetailsRevisionKey; -static const OSSymbol * gIOPMPSAdapterDetailsSerialNumberKey; -static const OSSymbol * gIOPMPSAdapterDetailsFamilyKey; -static const OSSymbol * gIOPMPSAdapterDetailsAmperageKey; -static const OSSymbol * gIOPMPSAdapterDetailsDescriptionKey; -static const OSSymbol * gIOPMPSAdapterDetailsPMUConfigurationKey; -static const OSSymbol * gIOPMPSAdapterDetailsSourceIDKey; -static const OSSymbol * gIOPMPSAdapterDetailsErrorFlagsKey; -static const OSSymbol * gIOPMPSAdapterDetailsSharedSourceKey; -static const OSSymbol * gIOPMPSAdapterDetailsCloakedKey; -static const OSSymbol * gIOPMPSInvalidWakeSecondsKey; -static const OSSymbol * gIOPMPSPostChargeWaitSecondsKey; -static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey; +static const OSSymbol * gIOPMPSExternalConnectedKey; +static const OSSymbol * gIOPMPSExternalChargeCapableKey; +static const OSSymbol * gIOPMPSBatteryInstalledKey; +static const OSSymbol * gIOPMPSIsChargingKey; +static const OSSymbol * gIOPMPSAtWarnLevelKey; +static const OSSymbol * gIOPMPSAtCriticalLevelKey; +static const OSSymbol * gIOPMPSCurrentCapacityKey; +static const OSSymbol * gIOPMPSMaxCapacityKey; +static const OSSymbol * gIOPMPSDesignCapacityKey; +static const OSSymbol * gIOPMPSTimeRemainingKey; +static const OSSymbol * gIOPMPSAmperageKey; +static const OSSymbol * gIOPMPSVoltageKey; +static const OSSymbol * gIOPMPSCycleCountKey; +static const OSSymbol * gIOPMPSMaxErrKey; +static const OSSymbol * gIOPMPSAdapterInfoKey; +static const OSSymbol * gIOPMPSLocationKey; +static const OSSymbol * gIOPMPSErrorConditionKey; +static const OSSymbol * gIOPMPSManufacturerKey; +static const OSSymbol * gIOPMPSManufactureDateKey; +static const OSSymbol * gIOPMPSModelKey; +static const OSSymbol * gIOPMPSSerialKey; +static const OSSymbol * gIOPMPSLegacyBatteryInfoKey; +static const OSSymbol * gIOPMPSBatteryHealthKey; +static const OSSymbol * gIOPMPSHealthConfidenceKey; +static const OSSymbol * gIOPMPSCapacityEstimatedKey; +static const OSSymbol * gIOPMPSBatteryChargeStatusKey; +static const OSSymbol * gIOPMPSBatteryTemperatureKey; +static const OSSymbol * gIOPMPSAdapterDetailsKey; +static const OSSymbol * gIOPMPSChargerConfigurationKey; +static const OSSymbol * gIOPMPSAdapterDetailsIDKey; +static const OSSymbol * gIOPMPSAdapterDetailsWattsKey; +static const OSSymbol * gIOPMPSAdapterDetailsRevisionKey; +static const OSSymbol * gIOPMPSAdapterDetailsSerialNumberKey; +static const OSSymbol * gIOPMPSAdapterDetailsFamilyKey; +static const OSSymbol * gIOPMPSAdapterDetailsAmperageKey; +static const OSSymbol * gIOPMPSAdapterDetailsDescriptionKey; +static const OSSymbol * gIOPMPSAdapterDetailsPMUConfigurationKey; +static const OSSymbol * gIOPMPSAdapterDetailsSourceIDKey; +static const OSSymbol * gIOPMPSAdapterDetailsErrorFlagsKey; +static const OSSymbol * gIOPMPSAdapterDetailsSharedSourceKey; +static const OSSymbol * gIOPMPSAdapterDetailsCloakedKey; +static const OSSymbol * gIOPMPSInvalidWakeSecondsKey; +static const OSSymbol * gIOPMPSPostChargeWaitSecondsKey; +static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey; #define kIOSleepSupportedKey "IOSleepSupported" #define kIOPMSystemCapabilitiesKey "System Capabilities" @@ -252,25 +253,25 @@ static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey; #define kIOEFIBootRomFailureKey "wake-failure" #define kRD_AllPowerSources (kIOPMSupportedOnAC \ - | kIOPMSupportedOnBatt \ - | kIOPMSupportedOnUPS) + | kIOPMSupportedOnBatt \ + | kIOPMSupportedOnUPS) #define kLocalEvalClamshellCommand (1 << 15) #define kIdleSleepRetryInterval (3 * 60) enum { - kWranglerPowerStateMin = 0, - kWranglerPowerStateSleep = 2, - kWranglerPowerStateDim = 3, - kWranglerPowerStateMax = 4 + kWranglerPowerStateMin = 0, + kWranglerPowerStateSleep = 2, + kWranglerPowerStateDim = 3, + kWranglerPowerStateMax = 4 }; enum { - OFF_STATE = 0, - RESTART_STATE = 1, - SLEEP_STATE = 2, - ON_STATE = 3, - NUM_POWER_STATES + OFF_STATE = 0, + RESTART_STATE = 1, + SLEEP_STATE = 2, + ON_STATE = 3, + NUM_POWER_STATES }; #define ON_POWER kIOPMPowerOn @@ -279,10 +280,10 @@ enum { static IOPMPowerState ourPowerStates[NUM_POWER_STATES] = { - {1, 0, 0, 0, 0,0,0,0,0,0,0,0}, - {1, kIOPMRestartCapability, kIOPMRestart, RESTART_POWER, 0,0,0,0,0,0,0,0}, - {1, kIOPMSleepCapability, kIOPMSleep, SLEEP_POWER, 0,0,0,0,0,0,0,0}, - {1, kIOPMPowerOn, kIOPMPowerOn, ON_POWER, 0,0,0,0,0,0,0,0} + {1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {1, kIOPMRestartCapability, kIOPMRestart, RESTART_POWER, 0, 0, 0, 0, 0, 0, 0, 0}, + {1, kIOPMSleepCapability, kIOPMSleep, SLEEP_POWER, 0, 0, 0, 0, 0, 0, 0, 0}, + {1, kIOPMPowerOn, kIOPMPowerOn, ON_POWER, 0, 0, 0, 0, 0, 0, 0, 0} }; #define kIOPMRootDomainWakeTypeSleepService "SleepService" @@ -313,52 +314,52 @@ static IOPMPowerState ourPowerStates[NUM_POWER_STATES] = #define kAggressivesMinValue 1 enum { - kAggressivesStateBusy = 0x01, - kAggressivesStateQuickSpindown = 0x02 + kAggressivesStateBusy = 0x01, + kAggressivesStateQuickSpindown = 0x02 }; struct AggressivesRecord { - uint32_t flags; - uint32_t type; - uint32_t value; + uint32_t flags; + uint32_t type; + uint32_t value; }; struct AggressivesRequest { - queue_chain_t chain; - uint32_t options; - uint32_t dataType; - union { - IOService * service; - AggressivesRecord record; - } data; + queue_chain_t chain; + uint32_t options; + uint32_t dataType; + union { + IOService * service; + AggressivesRecord record; + } data; }; enum { - kAggressivesRequestTypeService = 1, - kAggressivesRequestTypeRecord + kAggressivesRequestTypeService = 1, + kAggressivesRequestTypeRecord }; enum { - kAggressivesOptionSynchronous = 0x00000001, - kAggressivesOptionQuickSpindownEnable = 0x00000100, - kAggressivesOptionQuickSpindownDisable = 0x00000200, - kAggressivesOptionQuickSpindownMask = 0x00000300 + kAggressivesOptionSynchronous = 0x00000001, + kAggressivesOptionQuickSpindownEnable = 0x00000100, + kAggressivesOptionQuickSpindownDisable = 0x00000200, + kAggressivesOptionQuickSpindownMask = 0x00000300 }; enum { - kAggressivesRecordFlagModified = 0x00000001, - kAggressivesRecordFlagMinValue = 0x00000002 + kAggressivesRecordFlagModified = 0x00000001, + kAggressivesRecordFlagMinValue = 0x00000002 }; // gDarkWakeFlags enum { - kDarkWakeFlagHIDTickleEarly = 0x01, // hid tickle before gfx suppression - kDarkWakeFlagHIDTickleLate = 0x02, // hid tickle after gfx suppression - kDarkWakeFlagHIDTickleNone = 0x03, // hid tickle is not posted - kDarkWakeFlagHIDTickleMask = 0x03, - kDarkWakeFlagAlarmIsDark = 0x0100, - kDarkWakeFlagGraphicsPowerState1 = 0x0200, - kDarkWakeFlagAudioNotSuppressed = 0x0400 + kDarkWakeFlagHIDTickleEarly = 0x01,// hid tickle before gfx suppression + kDarkWakeFlagHIDTickleLate = 0x02,// hid tickle after gfx suppression + kDarkWakeFlagHIDTickleNone = 0x03,// hid tickle is not posted + kDarkWakeFlagHIDTickleMask = 0x03, + kDarkWakeFlagAlarmIsDark = 0x0100, + kDarkWakeFlagGraphicsPowerState1 = 0x0200, + kDarkWakeFlagAudioNotSuppressed = 0x0400 }; static IOPMrootDomain * gRootDomain; @@ -412,6 +413,9 @@ z_stream swd_zs; vm_offset_t swd_zs_zmem; //size_t swd_zs_zsize; size_t swd_zs_zoffset; +#if defined(__i386__) || defined(__x86_64__) +IOCPU *currentShutdownTarget = NULL; +#endif static unsigned int gPMHaltBusyCount; static unsigned int gPMHaltIdleCount; @@ -425,9 +429,9 @@ static bool gPMQuiesced; // Constants used as arguments to IOPMrootDomain::informCPUStateChange #define kCPUUnknownIndex 9999999 enum { - kInformAC = 0, - kInformLid = 1, - kInformableCount = 2 + kInformAC = 0, + kInformLid = 1, + kInformableCount = 2 }; const OSSymbol *gIOPMStatsResponseTimedOut; @@ -444,12 +448,12 @@ const OSSymbol *gIOPMStatsDriverPSChangeSlow; */ class PMSettingHandle : public OSObject { - OSDeclareFinalStructors( PMSettingHandle ) - friend class PMSettingObject; + OSDeclareFinalStructors( PMSettingHandle ) + friend class PMSettingObject; private: - PMSettingObject *pmso; - void free(void) APPLE_KEXT_OVERRIDE; + PMSettingObject *pmso; + void free(void) APPLE_KEXT_OVERRIDE; }; /* @@ -458,40 +462,40 @@ private: */ class PMSettingObject : public OSObject { - OSDeclareFinalStructors( PMSettingObject ) - friend class IOPMrootDomain; + OSDeclareFinalStructors( PMSettingObject ) + friend class IOPMrootDomain; private: - queue_head_t calloutQueue; - thread_t waitThread; - IOPMrootDomain *parent; - PMSettingHandle *pmsh; - IOPMSettingControllerCallback func; - OSObject *target; - uintptr_t refcon; - uint32_t *publishedFeatureID; - uint32_t settingCount; - bool disabled; - - void free(void) APPLE_KEXT_OVERRIDE; + queue_head_t calloutQueue; + thread_t waitThread; + IOPMrootDomain *parent; + PMSettingHandle *pmsh; + IOPMSettingControllerCallback func; + OSObject *target; + uintptr_t refcon; + uint32_t *publishedFeatureID; + uint32_t settingCount; + bool disabled; + + void free(void) APPLE_KEXT_OVERRIDE; public: - static PMSettingObject *pmSettingObject( - IOPMrootDomain *parent_arg, - IOPMSettingControllerCallback handler_arg, - OSObject *target_arg, - uintptr_t refcon_arg, - uint32_t supportedPowerSources, - const OSSymbol *settings[], - OSObject **handle_obj); - - void dispatchPMSetting(const OSSymbol *type, OSObject *object); - void clientHandleFreed(void); + static PMSettingObject *pmSettingObject( + IOPMrootDomain *parent_arg, + IOPMSettingControllerCallback handler_arg, + OSObject *target_arg, + uintptr_t refcon_arg, + uint32_t supportedPowerSources, + const OSSymbol *settings[], + OSObject **handle_obj); + + void dispatchPMSetting(const OSSymbol *type, OSObject *object); + void clientHandleFreed(void); }; struct PMSettingCallEntry { - queue_chain_t link; - thread_t thread; + queue_chain_t link; + thread_t thread; }; #define PMSETTING_LOCK() IOLockLock(settingsCtrlLock) @@ -507,39 +511,39 @@ struct PMSettingCallEntry { */ typedef void (*IOPMTracePointHandler)( - void * target, uint32_t code, uint32_t data ); + void * target, uint32_t code, uint32_t data ); class PMTraceWorker : public OSObject { - OSDeclareDefaultStructors(PMTraceWorker) + OSDeclareDefaultStructors(PMTraceWorker) public: - typedef enum { kPowerChangeStart, kPowerChangeCompleted } change_t; - - static PMTraceWorker *tracer( IOPMrootDomain * ); - void tracePCIPowerChange(change_t, IOService *, uint32_t, uint32_t); - void tracePoint(uint8_t phase); - void traceDetail(uint32_t detail); - void traceComponentWakeProgress(uint32_t component, uint32_t data); - int recordTopLevelPCIDevice(IOService *); - void RTC_TRACE(void); - virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; - - IOPMTracePointHandler tracePointHandler; - void * tracePointTarget; - uint64_t getPMStatusCode(); - uint8_t getTracePhase(); - uint32_t getTraceData(); + typedef enum { kPowerChangeStart, kPowerChangeCompleted } change_t; + + static PMTraceWorker *tracer( IOPMrootDomain * ); + void tracePCIPowerChange(change_t, IOService *, uint32_t, uint32_t); + void tracePoint(uint8_t phase); + void traceDetail(uint32_t detail); + void traceComponentWakeProgress(uint32_t component, uint32_t data); + int recordTopLevelPCIDevice(IOService *); + void RTC_TRACE(void); + virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; + + IOPMTracePointHandler tracePointHandler; + void * tracePointTarget; + uint64_t getPMStatusCode(); + uint8_t getTracePhase(); + uint32_t getTraceData(); private: - IOPMrootDomain *owner; - IOLock *pmTraceWorkerLock; - OSArray *pciDeviceBitMappings; - - uint8_t addedToRegistry; - uint8_t tracePhase; - uint32_t traceData32; - uint8_t loginWindowData; - uint8_t coreDisplayData; - uint8_t coreGraphicsData; + IOPMrootDomain *owner; + IOLock *pmTraceWorkerLock; + OSArray *pciDeviceBitMappings; + + uint8_t addedToRegistry; + uint8_t tracePhase; + uint32_t traceData32; + uint8_t loginWindowData; + uint8_t coreDisplayData; + uint8_t coreGraphicsData; }; /* @@ -548,50 +552,50 @@ private: */ class PMAssertionsTracker : public OSObject { - OSDeclareFinalStructors(PMAssertionsTracker) + OSDeclareFinalStructors(PMAssertionsTracker) public: - static PMAssertionsTracker *pmAssertionsTracker( IOPMrootDomain * ); + static PMAssertionsTracker *pmAssertionsTracker( IOPMrootDomain * ); - IOReturn createAssertion(IOPMDriverAssertionType, IOPMDriverAssertionLevel, IOService *, const char *, IOPMDriverAssertionID *); - IOReturn releaseAssertion(IOPMDriverAssertionID); - IOReturn setAssertionLevel(IOPMDriverAssertionID, IOPMDriverAssertionLevel); - IOReturn setUserAssertionLevels(IOPMDriverAssertionType); + IOReturn createAssertion(IOPMDriverAssertionType, IOPMDriverAssertionLevel, IOService *, const char *, IOPMDriverAssertionID *); + IOReturn releaseAssertion(IOPMDriverAssertionID); + IOReturn setAssertionLevel(IOPMDriverAssertionID, IOPMDriverAssertionLevel); + IOReturn setUserAssertionLevels(IOPMDriverAssertionType); - OSArray *copyAssertionsArray(void); - IOPMDriverAssertionType getActivatedAssertions(void); - IOPMDriverAssertionLevel getAssertionLevel(IOPMDriverAssertionType); + OSArray *copyAssertionsArray(void); + IOPMDriverAssertionType getActivatedAssertions(void); + IOPMDriverAssertionLevel getAssertionLevel(IOPMDriverAssertionType); - IOReturn handleCreateAssertion(OSData *); - IOReturn handleReleaseAssertion(IOPMDriverAssertionID); - IOReturn handleSetAssertionLevel(IOPMDriverAssertionID, IOPMDriverAssertionLevel); - IOReturn handleSetUserAssertionLevels(void * arg0); - void publishProperties(void); + IOReturn handleCreateAssertion(OSData *); + IOReturn handleReleaseAssertion(IOPMDriverAssertionID); + IOReturn handleSetAssertionLevel(IOPMDriverAssertionID, IOPMDriverAssertionLevel); + IOReturn handleSetUserAssertionLevels(void * arg0); + void publishProperties(void); private: - typedef struct { - IOPMDriverAssertionID id; - IOPMDriverAssertionType assertionBits; - uint64_t createdTime; - uint64_t modifiedTime; - const OSSymbol *ownerString; - IOService *ownerService; - uint64_t registryEntryID; - IOPMDriverAssertionLevel level; - } PMAssertStruct; - - uint32_t tabulateProducerCount; - uint32_t tabulateConsumerCount; - - PMAssertStruct *detailsForID(IOPMDriverAssertionID, int *); - void tabulate(void); - - IOPMrootDomain *owner; - OSArray *assertionsArray; - IOLock *assertionsArrayLock; - IOPMDriverAssertionID issuingUniqueID __attribute__((aligned(8))); /* aligned for atomic access */ - IOPMDriverAssertionType assertionsKernel; - IOPMDriverAssertionType assertionsUser; - IOPMDriverAssertionType assertionsCombined; + typedef struct { + IOPMDriverAssertionID id; + IOPMDriverAssertionType assertionBits; + uint64_t createdTime; + uint64_t modifiedTime; + const OSSymbol *ownerString; + IOService *ownerService; + uint64_t registryEntryID; + IOPMDriverAssertionLevel level; + } PMAssertStruct; + + uint32_t tabulateProducerCount; + uint32_t tabulateConsumerCount; + + PMAssertStruct *detailsForID(IOPMDriverAssertionID, int *); + void tabulate(void); + + IOPMrootDomain *owner; + OSArray *assertionsArray; + IOLock *assertionsArrayLock; + IOPMDriverAssertionID issuingUniqueID __attribute__((aligned(8)));/* aligned for atomic access */ + IOPMDriverAssertionType assertionsKernel; + IOPMDriverAssertionType assertionsUser; + IOPMDriverAssertionType assertionsCombined; }; OSDefineMetaClassAndFinalStructors(PMAssertionsTracker, OSObject); @@ -605,21 +609,21 @@ OSDefineMetaClassAndFinalStructors(PMAssertionsTracker, OSObject); class PMHaltWorker : public OSObject { - OSDeclareFinalStructors( PMHaltWorker ) + OSDeclareFinalStructors( PMHaltWorker ) public: - IOService * service; // service being worked on - AbsoluteTime startTime; // time when work started - int depth; // work on nubs at this PM-tree depth - int visits; // number of nodes visited (debug) - IOLock * lock; - bool timeout; // service took too long - - static PMHaltWorker * worker( void ); - static void main( void * arg, wait_result_t waitResult ); - static void work( PMHaltWorker * me ); - static void checkTimeout( PMHaltWorker * me, AbsoluteTime * now ); - virtual void free( void ) APPLE_KEXT_OVERRIDE; + IOService * service;// service being worked on + AbsoluteTime startTime; // time when work started + int depth; // work on nubs at this PM-tree depth + int visits; // number of nodes visited (debug) + IOLock * lock; + bool timeout;// service took too long + + static PMHaltWorker * worker( void ); + static void main( void * arg, wait_result_t waitResult ); + static void work( PMHaltWorker * me ); + static void checkTimeout( PMHaltWorker * me, AbsoluteTime * now ); + virtual void free( void ) APPLE_KEXT_OVERRIDE; }; OSDefineMetaClassAndFinalStructors( PMHaltWorker, OSObject ) @@ -628,202 +632,216 @@ OSDefineMetaClassAndFinalStructors( PMHaltWorker, OSObject ) #define super IOService OSDefineMetaClassAndFinalStructors(IOPMrootDomain, IOService) -static void IOPMRootDomainWillShutdown(void) +static void +IOPMRootDomainWillShutdown(void) { - if (OSCompareAndSwap(0, 1, &gWillShutdown)) - { - OSKext::willShutdown(); - for (int i = 0; i < 100; i++) - { - if (OSCompareAndSwap(0, 1, &gSleepOrShutdownPending)) break; - IOSleep( 100 ); - } - } + if (OSCompareAndSwap(0, 1, &gWillShutdown)) { + OSKext::willShutdown(); + for (int i = 0; i < 100; i++) { + if (OSCompareAndSwap(0, 1, &gSleepOrShutdownPending)) { + break; + } + IOSleep( 100 ); + } + } } -extern "C" IONotifier * registerSleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref) +extern "C" IONotifier * +registerSleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref) { - return gRootDomain->registerInterest( gIOGeneralInterest, handler, self, ref ); + return gRootDomain->registerInterest( gIOGeneralInterest, handler, self, ref ); } -extern "C" IONotifier * registerPrioritySleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref) +extern "C" IONotifier * +registerPrioritySleepWakeInterest(IOServiceInterestHandler handler, void * self, void * ref) { - return gRootDomain->registerInterest( gIOPriorityPowerStateInterest, handler, self, ref ); + return gRootDomain->registerInterest( gIOPriorityPowerStateInterest, handler, self, ref ); } -extern "C" IOReturn acknowledgeSleepWakeNotification(void * PMrefcon) +extern "C" IOReturn +acknowledgeSleepWakeNotification(void * PMrefcon) { - return gRootDomain->allowPowerChange ( (unsigned long)PMrefcon ); + return gRootDomain->allowPowerChange((unsigned long)PMrefcon ); } -extern "C" IOReturn vetoSleepWakeNotification(void * PMrefcon) +extern "C" IOReturn +vetoSleepWakeNotification(void * PMrefcon) { - return gRootDomain->cancelPowerChange ( (unsigned long)PMrefcon ); + return gRootDomain->cancelPowerChange((unsigned long)PMrefcon ); } -extern "C" IOReturn rootDomainRestart ( void ) +extern "C" IOReturn +rootDomainRestart( void ) { - return gRootDomain->restartSystem(); + return gRootDomain->restartSystem(); } -extern "C" IOReturn rootDomainShutdown ( void ) +extern "C" IOReturn +rootDomainShutdown( void ) { - return gRootDomain->shutdownSystem(); + return gRootDomain->shutdownSystem(); } -static void halt_log_putc(char c) +static void +halt_log_putc(char c) { - if (gHaltLogPos >= (kHaltLogSize - 2)) return; - gHaltLog[gHaltLogPos++] = c; + if (gHaltLogPos >= (kHaltLogSize - 2)) { + return; + } + gHaltLog[gHaltLogPos++] = c; } extern "C" void _doprnt_log(const char *fmt, - va_list *argp, - void (*putc)(char), - int radix); + va_list *argp, + void (*putc)(char), + int radix); static int halt_log(const char *fmt, ...) { - va_list listp; + va_list listp; - va_start(listp, fmt); - _doprnt_log(fmt, &listp, &halt_log_putc, 16); - va_end(listp); + va_start(listp, fmt); + _doprnt_log(fmt, &listp, &halt_log_putc, 16); + va_end(listp); - return (0); + return 0; } extern "C" void halt_log_enter(const char * what, const void * pc, uint64_t time) { - uint64_t nano, millis; + uint64_t nano, millis; - if (!gHaltLog) return; - absolutetime_to_nanoseconds(time, &nano); - millis = nano / NSEC_PER_MSEC; - if (millis < 100) return; + if (!gHaltLog) { + return; + } + absolutetime_to_nanoseconds(time, &nano); + millis = nano / NSEC_PER_MSEC; + if (millis < 100) { + return; + } - IOLockLock(gHaltLogLock); - if (pc) { - halt_log("%s: %qd ms @ 0x%lx, ", what, millis, VM_KERNEL_UNSLIDE(pc)); - OSKext::printKextsInBacktrace((vm_offset_t *) &pc, 1, &halt_log, - OSKext::kPrintKextsLock | OSKext::kPrintKextsUnslide | OSKext::kPrintKextsTerse); - } - else { - halt_log("%s: %qd ms\n", what, millis); - } + IOLockLock(gHaltLogLock); + if (pc) { + halt_log("%s: %qd ms @ 0x%lx, ", what, millis, VM_KERNEL_UNSLIDE(pc)); + OSKext::printKextsInBacktrace((vm_offset_t *) &pc, 1, &halt_log, + OSKext::kPrintKextsLock | OSKext::kPrintKextsUnslide | OSKext::kPrintKextsTerse); + } else { + halt_log("%s: %qd ms\n", what, millis); + } - gHaltLog[gHaltLogPos] = 0; - IOLockUnlock(gHaltLogLock); + gHaltLog[gHaltLogPos] = 0; + IOLockUnlock(gHaltLogLock); } extern uint32_t gFSState; -extern "C" void IOSystemShutdownNotification(int stage) +extern "C" void +IOSystemShutdownNotification(int stage) { - uint64_t startTime; + uint64_t startTime; - if (kIOSystemShutdownNotificationStageRootUnmount == stage) - { + if (kIOSystemShutdownNotificationStageRootUnmount == stage) { #if !CONFIG_EMBEDDED - uint64_t nano, millis; - startTime = mach_absolute_time(); - IOService::getPlatform()->waitQuiet(30 * NSEC_PER_SEC); - absolutetime_to_nanoseconds(mach_absolute_time() - startTime, &nano); - millis = nano / NSEC_PER_MSEC; - if (gHaltTimeMaxLog && (millis >= gHaltTimeMaxLog)) - { - printf("waitQuiet() for unmount %qd ms\n", millis); - } + uint64_t nano, millis; + startTime = mach_absolute_time(); + IOService::getPlatform()->waitQuiet(30 * NSEC_PER_SEC); + absolutetime_to_nanoseconds(mach_absolute_time() - startTime, &nano); + millis = nano / NSEC_PER_MSEC; + if (gHaltTimeMaxLog && (millis >= gHaltTimeMaxLog)) { + printf("waitQuiet() for unmount %qd ms\n", millis); + } #endif - return; - } - - assert(kIOSystemShutdownNotificationStageProcessExit == stage); - - IOLockLock(gHaltLogLock); - if (!gHaltLog) - { - gHaltLog = IONew(char, kHaltLogSize); - gHaltStartTime = mach_absolute_time(); - if (gHaltLog) halt_log_putc('\n'); - } - IOLockUnlock(gHaltLogLock); - - startTime = mach_absolute_time(); - IOPMRootDomainWillShutdown(); - halt_log_enter("IOPMRootDomainWillShutdown", 0, mach_absolute_time() - startTime); + return; + } + + assert(kIOSystemShutdownNotificationStageProcessExit == stage); + + IOLockLock(gHaltLogLock); + if (!gHaltLog) { + gHaltLog = IONew(char, kHaltLogSize); + gHaltStartTime = mach_absolute_time(); + if (gHaltLog) { + halt_log_putc('\n'); + } + } + IOLockUnlock(gHaltLogLock); + + startTime = mach_absolute_time(); + IOPMRootDomainWillShutdown(); + halt_log_enter("IOPMRootDomainWillShutdown", 0, mach_absolute_time() - startTime); #if HIBERNATION - startTime = mach_absolute_time(); - IOHibernateSystemPostWake(true); - halt_log_enter("IOHibernateSystemPostWake", 0, mach_absolute_time() - startTime); + startTime = mach_absolute_time(); + IOHibernateSystemPostWake(true); + halt_log_enter("IOHibernateSystemPostWake", 0, mach_absolute_time() - startTime); #endif - if (OSCompareAndSwap(0, 1, &gPagingOff)) - { + if (OSCompareAndSwap(0, 1, &gPagingOff)) { #if !CONFIG_EMBEDDED - gRootDomain->handlePlatformHaltRestart(kPEPagingOff); + gRootDomain->handlePlatformHaltRestart(kPEPagingOff); #endif - } + } } extern "C" int sync_internal(void); /* -A device is always in the highest power state which satisfies its driver, -its policy-maker, and any power children it has, but within the constraint -of the power state provided by its parent. The driver expresses its desire by -calling changePowerStateTo(), the policy-maker expresses its desire by calling -changePowerStateToPriv(), and the children express their desires by calling -requestPowerDomainState(). - -The Root Power Domain owns the policy for idle and demand sleep for the system. -It is a power-managed IOService just like the others in the system. -It implements several power states which map to what we see as Sleep and On. - -The sleep policy is as follows: -1. Sleep is prevented if the case is open so that nobody will think the machine - is off and plug/unplug cards. -2. Sleep is prevented if the sleep timeout slider in the prefs panel is zero. -3. System cannot Sleep if some object in the tree is in a power state marked - kIOPMPreventSystemSleep. - -These three conditions are enforced using the "driver clamp" by calling -changePowerStateTo(). For example, if the case is opened, -changePowerStateTo(ON_STATE) is called to hold the system on regardless -of the desires of the children of the root or the state of the other clamp. - -Demand Sleep is initiated by pressing the front panel power button, closing -the clamshell, or selecting the menu item. In this case the root's parent -actually initiates the power state change so that the root domain has no -choice and does not give applications the opportunity to veto the change. - -Idle Sleep occurs if no objects in the tree are in a state marked -kIOPMPreventIdleSleep. When this is true, the root's children are not holding -the root on, so it sets the "policy-maker clamp" by calling -changePowerStateToPriv(ON_STATE) to hold itself on until the sleep timer expires. -This timer is set for the difference between the sleep timeout slider and the -display dim timeout slider. When the timer expires, it releases its clamp and -now nothing is holding it awake, so it falls asleep. - -Demand sleep is prevented when the system is booting. When preferences are -transmitted by the loginwindow at the end of boot, a flag is cleared, -and this allows subsequent Demand Sleep. -*/ + * A device is always in the highest power state which satisfies its driver, + * its policy-maker, and any power children it has, but within the constraint + * of the power state provided by its parent. The driver expresses its desire by + * calling changePowerStateTo(), the policy-maker expresses its desire by calling + * changePowerStateToPriv(), and the children express their desires by calling + * requestPowerDomainState(). + * + * The Root Power Domain owns the policy for idle and demand sleep for the system. + * It is a power-managed IOService just like the others in the system. + * It implements several power states which map to what we see as Sleep and On. + * + * The sleep policy is as follows: + * 1. Sleep is prevented if the case is open so that nobody will think the machine + * is off and plug/unplug cards. + * 2. Sleep is prevented if the sleep timeout slider in the prefs panel is zero. + * 3. System cannot Sleep if some object in the tree is in a power state marked + * kIOPMPreventSystemSleep. + * + * These three conditions are enforced using the "driver clamp" by calling + * changePowerStateTo(). For example, if the case is opened, + * changePowerStateTo(ON_STATE) is called to hold the system on regardless + * of the desires of the children of the root or the state of the other clamp. + * + * Demand Sleep is initiated by pressing the front panel power button, closing + * the clamshell, or selecting the menu item. In this case the root's parent + * actually initiates the power state change so that the root domain has no + * choice and does not give applications the opportunity to veto the change. + * + * Idle Sleep occurs if no objects in the tree are in a state marked + * kIOPMPreventIdleSleep. When this is true, the root's children are not holding + * the root on, so it sets the "policy-maker clamp" by calling + * changePowerStateToPriv(ON_STATE) to hold itself on until the sleep timer expires. + * This timer is set for the difference between the sleep timeout slider and the + * display dim timeout slider. When the timer expires, it releases its clamp and + * now nothing is holding it awake, so it falls asleep. + * + * Demand sleep is prevented when the system is booting. When preferences are + * transmitted by the loginwindow at the end of boot, a flag is cleared, + * and this allows subsequent Demand Sleep. + */ //****************************************************************************** -IOPMrootDomain * IOPMrootDomain::construct( void ) +IOPMrootDomain * +IOPMrootDomain::construct( void ) { - IOPMrootDomain *root; + IOPMrootDomain *root; - root = new IOPMrootDomain; - if( root) - root->init(); + root = new IOPMrootDomain; + if (root) { + root->init(); + } - return( root ); + return root; } //****************************************************************************** @@ -831,71 +849,73 @@ IOPMrootDomain * IOPMrootDomain::construct( void ) // //****************************************************************************** -static void updateConsoleUsersCallout(thread_call_param_t p0, thread_call_param_t p1) +static void +updateConsoleUsersCallout(thread_call_param_t p0, thread_call_param_t p1) { - IOPMrootDomain * rootDomain = (IOPMrootDomain *) p0; - rootDomain->updateConsoleUsers(); + IOPMrootDomain * rootDomain = (IOPMrootDomain *) p0; + rootDomain->updateConsoleUsers(); } -void IOPMrootDomain::updateConsoleUsers(void) +void +IOPMrootDomain::updateConsoleUsers(void) { - IOService::updateConsoleUsers(NULL, kIOMessageSystemHasPoweredOn); - if (tasksSuspended) - { - tasksSuspended = FALSE; - tasks_system_suspend(tasksSuspended); - } + IOService::updateConsoleUsers(NULL, kIOMessageSystemHasPoweredOn); + if (tasksSuspended) { + tasksSuspended = FALSE; + tasks_system_suspend(tasksSuspended); + } } //****************************************************************************** -static void disk_sync_callout( thread_call_param_t p0, thread_call_param_t p1 ) +static void +disk_sync_callout( thread_call_param_t p0, thread_call_param_t p1 ) { - IOService * rootDomain = (IOService *) p0; - uint32_t notifyRef = (uint32_t)(uintptr_t) p1; - uint32_t powerState = rootDomain->getPowerState(); + IOService * rootDomain = (IOService *) p0; + uint32_t notifyRef = (uint32_t)(uintptr_t) p1; + uint32_t powerState = rootDomain->getPowerState(); - DLOG("disk_sync_callout ps=%u\n", powerState); + DLOG("disk_sync_callout ps=%u\n", powerState); - if (ON_STATE == powerState) - { - sync_internal(); + if (ON_STATE == powerState) { + sync_internal(); #if HIBERNATION - // Block sleep until trim issued on previous wake path is completed. - IOHibernateSystemPostWake(true); + // Block sleep until trim issued on previous wake path is completed. + IOHibernateSystemPostWake(true); #endif - } + } #if HIBERNATION - else - { - IOHibernateSystemPostWake(false); + else { + IOHibernateSystemPostWake(false); - if (gRootDomain) - gRootDomain->sleepWakeDebugSaveSpinDumpFile(); - } + if (gRootDomain) { + gRootDomain->sleepWakeDebugSaveSpinDumpFile(); + } + } #endif - rootDomain->allowPowerChange(notifyRef); - DLOG("disk_sync_callout finish\n"); + rootDomain->allowPowerChange(notifyRef); + DLOG("disk_sync_callout finish\n"); } //****************************************************************************** -static UInt32 computeDeltaTimeMS( const AbsoluteTime * startTime, AbsoluteTime * elapsedTime ) +static UInt32 +computeDeltaTimeMS( const AbsoluteTime * startTime, AbsoluteTime * elapsedTime ) { - AbsoluteTime endTime; - UInt64 nano = 0; + AbsoluteTime endTime; + UInt64 nano = 0; - clock_get_uptime(&endTime); - if (CMP_ABSOLUTETIME(&endTime, startTime) <= 0) *elapsedTime = 0; - else - { - SUB_ABSOLUTETIME(&endTime, startTime); - absolutetime_to_nanoseconds(endTime, &nano); - *elapsedTime = endTime; - } + clock_get_uptime(&endTime); + if (CMP_ABSOLUTETIME(&endTime, startTime) <= 0) { + *elapsedTime = 0; + } else { + SUB_ABSOLUTETIME(&endTime, startTime); + absolutetime_to_nanoseconds(endTime, &nano); + *elapsedTime = endTime; + } - return (UInt32)(nano / NSEC_PER_MSEC); + return (UInt32)(nano / NSEC_PER_MSEC); } //****************************************************************************** @@ -903,55 +923,56 @@ static UInt32 computeDeltaTimeMS( const AbsoluteTime * startTime, AbsoluteTime * static int sysctl_sleepwaketime SYSCTL_HANDLER_ARGS { - struct timeval *swt = (struct timeval *)arg1; - struct proc *p = req->p; - - if (p == kernproc) { - return sysctl_io_opaque(req, swt, sizeof(*swt), NULL); - } else if(proc_is64bit(p)) { - struct user64_timeval t = {}; - t.tv_sec = swt->tv_sec; - t.tv_usec = swt->tv_usec; - return sysctl_io_opaque(req, &t, sizeof(t), NULL); - } else { - struct user32_timeval t = {}; - t.tv_sec = swt->tv_sec; - t.tv_usec = swt->tv_usec; - return sysctl_io_opaque(req, &t, sizeof(t), NULL); - } + struct timeval *swt = (struct timeval *)arg1; + struct proc *p = req->p; + + if (p == kernproc) { + return sysctl_io_opaque(req, swt, sizeof(*swt), NULL); + } else if (proc_is64bit(p)) { + struct user64_timeval t = {}; + t.tv_sec = swt->tv_sec; + t.tv_usec = swt->tv_usec; + return sysctl_io_opaque(req, &t, sizeof(t), NULL); + } else { + struct user32_timeval t = {}; + t.tv_sec = swt->tv_sec; + t.tv_usec = swt->tv_usec; + return sysctl_io_opaque(req, &t, sizeof(t), NULL); + } } static SYSCTL_PROC(_kern, OID_AUTO, sleeptime, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOLastSleepTime, 0, sysctl_sleepwaketime, "S,timeval", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOLastSleepTime, 0, sysctl_sleepwaketime, "S,timeval", ""); static SYSCTL_PROC(_kern, OID_AUTO, waketime, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOLastWakeTime, 0, sysctl_sleepwaketime, "S,timeval", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOLastWakeTime, 0, sysctl_sleepwaketime, "S,timeval", ""); -SYSCTL_QUAD(_kern, OID_AUTO, wake_abs_time, CTLFLAG_RD|CTLFLAG_LOCKED, &gIOLastWakeAbsTime, ""); -SYSCTL_QUAD(_kern, OID_AUTO, sleep_abs_time, CTLFLAG_RD|CTLFLAG_LOCKED, &gIOLastSleepAbsTime, ""); -SYSCTL_QUAD(_kern, OID_AUTO, useractive_abs_time, CTLFLAG_RD|CTLFLAG_LOCKED, &gUserActiveAbsTime, ""); -SYSCTL_QUAD(_kern, OID_AUTO, userinactive_abs_time, CTLFLAG_RD|CTLFLAG_LOCKED, &gUserInactiveAbsTime, ""); +SYSCTL_QUAD(_kern, OID_AUTO, wake_abs_time, CTLFLAG_RD | CTLFLAG_LOCKED, &gIOLastWakeAbsTime, ""); +SYSCTL_QUAD(_kern, OID_AUTO, sleep_abs_time, CTLFLAG_RD | CTLFLAG_LOCKED, &gIOLastSleepAbsTime, ""); +SYSCTL_QUAD(_kern, OID_AUTO, useractive_abs_time, CTLFLAG_RD | CTLFLAG_LOCKED, &gUserActiveAbsTime, ""); +SYSCTL_QUAD(_kern, OID_AUTO, userinactive_abs_time, CTLFLAG_RD | CTLFLAG_LOCKED, &gUserInactiveAbsTime, ""); static int sysctl_willshutdown (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int new_value, changed; - int error = sysctl_io_number(req, gWillShutdown, sizeof(int), &new_value, &changed); - if (changed) { - if (!gWillShutdown && (new_value == 1)) { - IOPMRootDomainWillShutdown(); - } else - error = EINVAL; - } - return(error); + int new_value, changed; + int error = sysctl_io_number(req, gWillShutdown, sizeof(int), &new_value, &changed); + if (changed) { + if (!gWillShutdown && (new_value == 1)) { + IOPMRootDomainWillShutdown(); + } else { + error = EINVAL; + } + } + return error; } static SYSCTL_PROC(_kern, OID_AUTO, willshutdown, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_willshutdown, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_willshutdown, "I", ""); extern struct sysctl_oid sysctl__kern_iokittest; extern struct sysctl_oid sysctl__debug_iokit; @@ -962,37 +983,41 @@ static int sysctl_progressmeterenable (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int error; - int new_value, changed; + int error; + int new_value, changed; - error = sysctl_io_number(req, vc_progressmeter_enable, sizeof(int), &new_value, &changed); + error = sysctl_io_number(req, vc_progressmeter_enable, sizeof(int), &new_value, &changed); - if (changed) vc_enable_progressmeter(new_value); + if (changed) { + vc_enable_progressmeter(new_value); + } - return (error); + return error; } static int sysctl_progressmeter (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int error; - int new_value, changed; + int error; + int new_value, changed; - error = sysctl_io_number(req, vc_progressmeter_value, sizeof(int), &new_value, &changed); + error = sysctl_io_number(req, vc_progressmeter_value, sizeof(int), &new_value, &changed); - if (changed) vc_set_progressmeter(new_value); + if (changed) { + vc_set_progressmeter(new_value); + } - return (error); + return error; } static SYSCTL_PROC(_kern, OID_AUTO, progressmeterenable, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_progressmeterenable, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_progressmeterenable, "I", ""); static SYSCTL_PROC(_kern, OID_AUTO, progressmeter, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_progressmeter, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_progressmeter, "I", ""); #endif /* !CONFIG_EMBEDDED */ @@ -1002,71 +1027,72 @@ static int sysctl_consoleoptions (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int error, changed; - uint32_t new_value; + int error, changed; + uint32_t new_value; - error = sysctl_io_number(req, vc_user_options.options, sizeof(uint32_t), &new_value, &changed); + error = sysctl_io_number(req, vc_user_options.options, sizeof(uint32_t), &new_value, &changed); - if (changed) vc_user_options.options = new_value; + if (changed) { + vc_user_options.options = new_value; + } - return (error); + return error; } static SYSCTL_PROC(_kern, OID_AUTO, consoleoptions, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_consoleoptions, "I", ""); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_consoleoptions, "I", ""); static int sysctl_progressoptions SYSCTL_HANDLER_ARGS { - return sysctl_io_opaque(req, &vc_user_options, sizeof(vc_user_options), NULL); + return sysctl_io_opaque(req, &vc_user_options, sizeof(vc_user_options), NULL); } static SYSCTL_PROC(_kern, OID_AUTO, progressoptions, - CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, - NULL, 0, sysctl_progressoptions, "S,vc_progress_user_options", ""); + CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, + NULL, 0, sysctl_progressoptions, "S,vc_progress_user_options", ""); static int sysctl_wakereason SYSCTL_HANDLER_ARGS { - char wr[ sizeof(gWakeReasonString) ]; + char wr[sizeof(gWakeReasonString)]; - wr[0] = '\0'; - if (gRootDomain) - gRootDomain->copyWakeReasonString(wr, sizeof(wr)); + wr[0] = '\0'; + if (gRootDomain) { + gRootDomain->copyWakeReasonString(wr, sizeof(wr)); + } - return sysctl_io_string(req, wr, 0, 0, NULL); + return sysctl_io_string(req, wr, 0, 0, NULL); } SYSCTL_PROC(_kern, OID_AUTO, wakereason, - CTLTYPE_STRING| CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, 0, sysctl_wakereason, "A", "wakereason"); static int sysctl_targettype SYSCTL_HANDLER_ARGS { - IOService * root; - OSObject * obj; - OSData * data; - char tt[32]; + IOService * root; + OSObject * obj; + OSData * data; + char tt[32]; - tt[0] = '\0'; - root = IOService::getServiceRoot(); - if (root && (obj = root->copyProperty(gIODTTargetTypeKey))) - { - if ((data = OSDynamicCast(OSData, obj))) - { - strlcpy(tt, (const char *) data->getBytesNoCopy(), sizeof(tt)); + tt[0] = '\0'; + root = IOService::getServiceRoot(); + if (root && (obj = root->copyProperty(gIODTTargetTypeKey))) { + if ((data = OSDynamicCast(OSData, obj))) { + strlcpy(tt, (const char *) data->getBytesNoCopy(), sizeof(tt)); + } + obj->release(); } - obj->release(); - } - return sysctl_io_string(req, tt, 0, 0, NULL); + return sysctl_io_string(req, tt, 0, 0, NULL); } SYSCTL_PROC(_hw, OID_AUTO, targettype, - CTLTYPE_STRING| CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, NULL, 0, sysctl_targettype, "A", "targettype"); static SYSCTL_INT(_debug, OID_AUTO, darkwake, CTLFLAG_RW, &gDarkWakeFlags, 0, ""); @@ -1093,260 +1119,263 @@ static const OSSymbol * gIOPMUserIsActiveKey; #define kRootDomainSettingsCount 17 -bool IOPMrootDomain::start( IOService * nub ) -{ - OSIterator *psIterator; - OSDictionary *tmpDict; - IORootParent * patriarch; - - super::start(nub); - - gRootDomain = this; - gIOPMSettingAutoWakeCalendarKey = OSSymbol::withCString(kIOPMSettingAutoWakeCalendarKey); - gIOPMSettingAutoWakeSecondsKey = OSSymbol::withCString(kIOPMSettingAutoWakeSecondsKey); - gIOPMSettingDebugWakeRelativeKey = OSSymbol::withCString(kIOPMSettingDebugWakeRelativeKey); - gIOPMSettingMaintenanceWakeCalendarKey = OSSymbol::withCString(kIOPMSettingMaintenanceWakeCalendarKey); - gIOPMSettingSleepServiceWakeCalendarKey = OSSymbol::withCString(kIOPMSettingSleepServiceWakeCalendarKey); - gIOPMSettingSilentRunningKey = OSSymbol::withCStringNoCopy(kIOPMSettingSilentRunningKey); - gIOPMUserTriggeredFullWakeKey = OSSymbol::withCStringNoCopy(kIOPMUserTriggeredFullWakeKey); - gIOPMUserIsActiveKey = OSSymbol::withCStringNoCopy(kIOPMUserIsActiveKey); - - gIOPMStatsResponseTimedOut = OSSymbol::withCString(kIOPMStatsResponseTimedOut); - gIOPMStatsResponseCancel = OSSymbol::withCString(kIOPMStatsResponseCancel); - gIOPMStatsResponseSlow = OSSymbol::withCString(kIOPMStatsResponseSlow); - gIOPMStatsResponsePrompt = OSSymbol::withCString(kIOPMStatsResponsePrompt); - gIOPMStatsDriverPSChangeSlow = OSSymbol::withCString(kIOPMStatsDriverPSChangeSlow); - - sleepSupportedPEFunction = OSSymbol::withCString("IOPMSetSleepSupported"); - sleepMessagePEFunction = OSSymbol::withCString("IOPMSystemSleepMessage"); - - const OSSymbol *settingsArr[kRootDomainSettingsCount] = - { - OSSymbol::withCString(kIOPMSettingSleepOnPowerButtonKey), - gIOPMSettingAutoWakeSecondsKey, - OSSymbol::withCString(kIOPMSettingAutoPowerSecondsKey), - gIOPMSettingAutoWakeCalendarKey, - OSSymbol::withCString(kIOPMSettingAutoPowerCalendarKey), - gIOPMSettingDebugWakeRelativeKey, - OSSymbol::withCString(kIOPMSettingDebugPowerRelativeKey), - OSSymbol::withCString(kIOPMSettingWakeOnRingKey), - OSSymbol::withCString(kIOPMSettingRestartOnPowerLossKey), - OSSymbol::withCString(kIOPMSettingWakeOnClamshellKey), - OSSymbol::withCString(kIOPMSettingWakeOnACChangeKey), - OSSymbol::withCString(kIOPMSettingTimeZoneOffsetKey), - OSSymbol::withCString(kIOPMSettingDisplaySleepUsesDimKey), - OSSymbol::withCString(kIOPMSettingMobileMotionModuleKey), - OSSymbol::withCString(kIOPMSettingGraphicsSwitchKey), - OSSymbol::withCString(kIOPMStateConsoleShutdown), - gIOPMSettingSilentRunningKey - }; - - PE_parse_boot_argn("darkwake", &gDarkWakeFlags, sizeof(gDarkWakeFlags)); - PE_parse_boot_argn("noidle", &gNoIdleFlag, sizeof(gNoIdleFlag)); - PE_parse_boot_argn("swd_sleeptimeout", &gSwdSleepTimeout, sizeof(gSwdSleepTimeout)); - PE_parse_boot_argn("swd_waketimeout", &gSwdWakeTimeout, sizeof(gSwdWakeTimeout)); - PE_parse_boot_argn("swd_timeout", &gSwdSleepWakeTimeout, sizeof(gSwdSleepWakeTimeout)); - PE_parse_boot_argn("haltmspanic", &gHaltTimeMaxPanic, sizeof(gHaltTimeMaxPanic)); - PE_parse_boot_argn("haltmslog", &gHaltTimeMaxLog, sizeof(gHaltTimeMaxLog)); - - queue_init(&aggressivesQueue); - aggressivesThreadCall = thread_call_allocate(handleAggressivesFunction, this); - aggressivesData = OSData::withCapacity( - sizeof(AggressivesRecord) * (kPMLastAggressivenessType + 4)); - - featuresDictLock = IOLockAlloc(); - settingsCtrlLock = IOLockAlloc(); - wakeEventLock = IOLockAlloc(); - gHaltLogLock = IOLockAlloc(); - setPMRootDomain(this); - - extraSleepTimer = thread_call_allocate( - idleSleepTimerExpired, - (thread_call_param_t) this); - - diskSyncCalloutEntry = thread_call_allocate( - &disk_sync_callout, - (thread_call_param_t) this); - updateConsoleUsersEntry = thread_call_allocate( - &updateConsoleUsersCallout, - (thread_call_param_t) this); +bool +IOPMrootDomain::start( IOService * nub ) +{ + OSIterator *psIterator; + OSDictionary *tmpDict; + IORootParent * patriarch; + + super::start(nub); + + gRootDomain = this; + gIOPMSettingAutoWakeCalendarKey = OSSymbol::withCString(kIOPMSettingAutoWakeCalendarKey); + gIOPMSettingAutoWakeSecondsKey = OSSymbol::withCString(kIOPMSettingAutoWakeSecondsKey); + gIOPMSettingDebugWakeRelativeKey = OSSymbol::withCString(kIOPMSettingDebugWakeRelativeKey); + gIOPMSettingMaintenanceWakeCalendarKey = OSSymbol::withCString(kIOPMSettingMaintenanceWakeCalendarKey); + gIOPMSettingSleepServiceWakeCalendarKey = OSSymbol::withCString(kIOPMSettingSleepServiceWakeCalendarKey); + gIOPMSettingSilentRunningKey = OSSymbol::withCStringNoCopy(kIOPMSettingSilentRunningKey); + gIOPMUserTriggeredFullWakeKey = OSSymbol::withCStringNoCopy(kIOPMUserTriggeredFullWakeKey); + gIOPMUserIsActiveKey = OSSymbol::withCStringNoCopy(kIOPMUserIsActiveKey); + + gIOPMStatsResponseTimedOut = OSSymbol::withCString(kIOPMStatsResponseTimedOut); + gIOPMStatsResponseCancel = OSSymbol::withCString(kIOPMStatsResponseCancel); + gIOPMStatsResponseSlow = OSSymbol::withCString(kIOPMStatsResponseSlow); + gIOPMStatsResponsePrompt = OSSymbol::withCString(kIOPMStatsResponsePrompt); + gIOPMStatsDriverPSChangeSlow = OSSymbol::withCString(kIOPMStatsDriverPSChangeSlow); + + sleepSupportedPEFunction = OSSymbol::withCString("IOPMSetSleepSupported"); + sleepMessagePEFunction = OSSymbol::withCString("IOPMSystemSleepMessage"); + + const OSSymbol *settingsArr[kRootDomainSettingsCount] = + { + OSSymbol::withCString(kIOPMSettingSleepOnPowerButtonKey), + gIOPMSettingAutoWakeSecondsKey, + OSSymbol::withCString(kIOPMSettingAutoPowerSecondsKey), + gIOPMSettingAutoWakeCalendarKey, + OSSymbol::withCString(kIOPMSettingAutoPowerCalendarKey), + gIOPMSettingDebugWakeRelativeKey, + OSSymbol::withCString(kIOPMSettingDebugPowerRelativeKey), + OSSymbol::withCString(kIOPMSettingWakeOnRingKey), + OSSymbol::withCString(kIOPMSettingRestartOnPowerLossKey), + OSSymbol::withCString(kIOPMSettingWakeOnClamshellKey), + OSSymbol::withCString(kIOPMSettingWakeOnACChangeKey), + OSSymbol::withCString(kIOPMSettingTimeZoneOffsetKey), + OSSymbol::withCString(kIOPMSettingDisplaySleepUsesDimKey), + OSSymbol::withCString(kIOPMSettingMobileMotionModuleKey), + OSSymbol::withCString(kIOPMSettingGraphicsSwitchKey), + OSSymbol::withCString(kIOPMStateConsoleShutdown), + gIOPMSettingSilentRunningKey + }; + + PE_parse_boot_argn("darkwake", &gDarkWakeFlags, sizeof(gDarkWakeFlags)); + PE_parse_boot_argn("noidle", &gNoIdleFlag, sizeof(gNoIdleFlag)); + PE_parse_boot_argn("swd_sleeptimeout", &gSwdSleepTimeout, sizeof(gSwdSleepTimeout)); + PE_parse_boot_argn("swd_waketimeout", &gSwdWakeTimeout, sizeof(gSwdWakeTimeout)); + PE_parse_boot_argn("swd_timeout", &gSwdSleepWakeTimeout, sizeof(gSwdSleepWakeTimeout)); + PE_parse_boot_argn("haltmspanic", &gHaltTimeMaxPanic, sizeof(gHaltTimeMaxPanic)); + PE_parse_boot_argn("haltmslog", &gHaltTimeMaxLog, sizeof(gHaltTimeMaxLog)); + + queue_init(&aggressivesQueue); + aggressivesThreadCall = thread_call_allocate(handleAggressivesFunction, this); + aggressivesData = OSData::withCapacity( + sizeof(AggressivesRecord) * (kPMLastAggressivenessType + 4)); + + featuresDictLock = IOLockAlloc(); + settingsCtrlLock = IOLockAlloc(); + wakeEventLock = IOLockAlloc(); + gHaltLogLock = IOLockAlloc(); + setPMRootDomain(this); + + extraSleepTimer = thread_call_allocate( + idleSleepTimerExpired, + (thread_call_param_t) this); + + diskSyncCalloutEntry = thread_call_allocate( + &disk_sync_callout, + (thread_call_param_t) this); + updateConsoleUsersEntry = thread_call_allocate( + &updateConsoleUsersCallout, + (thread_call_param_t) this); #if DARK_TO_FULL_EVALUATE_CLAMSHELL - fullWakeThreadCall = thread_call_allocate( - OSMemberFunctionCast(thread_call_func_t, this, - &IOPMrootDomain::fullWakeDelayedWork), - (thread_call_param_t) this); + fullWakeThreadCall = thread_call_allocate( + OSMemberFunctionCast(thread_call_func_t, this, + &IOPMrootDomain::fullWakeDelayedWork), + (thread_call_param_t) this); #endif - setProperty(kIOSleepSupportedKey, true); - - bzero(&gPMStats, sizeof(gPMStats)); - - pmTracer = PMTraceWorker::tracer(this); - - pmAssertions = PMAssertionsTracker::pmAssertionsTracker(this); - - userDisabledAllSleep = false; - systemBooting = true; - idleSleepEnabled = false; - sleepSlider = 0; - idleSleepTimerPending = false; - wrangler = NULL; - clamshellClosed = false; - clamshellExists = false; - clamshellDisabled = true; - acAdaptorConnected = true; - clamshellSleepDisabled = false; - gWakeReasonString[0] = '\0'; - - // Initialize to user active. - // Will never transition to user inactive w/o wrangler. - fullWakeReason = kFullWakeReasonLocalUser; - userIsActive = userWasActive = true; - clock_get_uptime(&gUserActiveAbsTime); - setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); - - // Set the default system capabilities at boot. - _currentCapability = kIOPMSystemCapabilityCPU | - kIOPMSystemCapabilityGraphics | - kIOPMSystemCapabilityAudio | - kIOPMSystemCapabilityNetwork; - - _pendingCapability = _currentCapability; - _desiredCapability = _currentCapability; - _highestCapability = _currentCapability; - setProperty(kIOPMSystemCapabilitiesKey, _currentCapability, 64); - - queuedSleepWakeUUIDString = NULL; - initializeBootSessionUUID(); - pmStatsAppResponses = OSArray::withCapacity(5); - _statsNameKey = OSSymbol::withCString(kIOPMStatsNameKey); - _statsPIDKey = OSSymbol::withCString(kIOPMStatsPIDKey); - _statsTimeMSKey = OSSymbol::withCString(kIOPMStatsTimeMSKey); - _statsResponseTypeKey = OSSymbol::withCString(kIOPMStatsApplicationResponseTypeKey); - _statsMessageTypeKey = OSSymbol::withCString(kIOPMStatsMessageTypeKey); - _statsPowerCapsKey = OSSymbol::withCString(kIOPMStatsPowerCapabilityKey); - assertOnWakeSecs = -1; // Invalid value to prevent updates - - pmStatsLock = IOLockAlloc(); - idxPMCPUClamshell = kCPUUnknownIndex; - idxPMCPULimitedPower = kCPUUnknownIndex; - - tmpDict = OSDictionary::withCapacity(1); - setProperty(kRootDomainSupportedFeatures, tmpDict); - tmpDict->release(); - - settingsCallbacks = OSDictionary::withCapacity(1); - - // Create a list of the valid PM settings that we'll relay to - // interested clients in setProperties() => setPMSetting() - allowedPMSettings = OSArray::withObjects( - (const OSObject **)settingsArr, - kRootDomainSettingsCount, - 0); - - // List of PM settings that should not automatically publish itself - // as a feature when registered by a listener. - noPublishPMSettings = OSArray::withObjects( - (const OSObject **) &gIOPMSettingSilentRunningKey, 1, 0); - - fPMSettingsDict = OSDictionary::withCapacity(5); - preventIdleSleepList = OSSet::withCapacity(8); - preventSystemSleepList = OSSet::withCapacity(2); - - PMinit(); // creates gIOPMWorkLoop - gIOPMWorkLoop = getIOPMWorkloop(); - - // Create IOPMPowerStateQueue used to queue external power - // events, and to handle those events on the PM work loop. - pmPowerStateQueue = IOPMPowerStateQueue::PMPowerStateQueue( - this, OSMemberFunctionCast(IOEventSource::Action, this, - &IOPMrootDomain::dispatchPowerEvent)); - gIOPMWorkLoop->addEventSource(pmPowerStateQueue); - - // create our power parent - patriarch = new IORootParent; - patriarch->init(); - patriarch->attach(this); - patriarch->start(this); - patriarch->addPowerChild(this); - - registerPowerDriver(this, ourPowerStates, NUM_POWER_STATES); - changePowerStateToPriv(ON_STATE); - - // install power change handler - gSysPowerDownNotifier = registerPrioritySleepWakeInterest( &sysPowerDownHandler, this, 0); + setProperty(kIOSleepSupportedKey, true); + + bzero(&gPMStats, sizeof(gPMStats)); + + pmTracer = PMTraceWorker::tracer(this); + + pmAssertions = PMAssertionsTracker::pmAssertionsTracker(this); + + userDisabledAllSleep = false; + systemBooting = true; + idleSleepEnabled = false; + sleepSlider = 0; + idleSleepTimerPending = false; + wrangler = NULL; + clamshellClosed = false; + clamshellExists = false; + clamshellDisabled = true; + acAdaptorConnected = true; + clamshellSleepDisabled = false; + gWakeReasonString[0] = '\0'; + + // Initialize to user active. + // Will never transition to user inactive w/o wrangler. + fullWakeReason = kFullWakeReasonLocalUser; + userIsActive = userWasActive = true; + clock_get_uptime(&gUserActiveAbsTime); + setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); + + // Set the default system capabilities at boot. + _currentCapability = kIOPMSystemCapabilityCPU | + kIOPMSystemCapabilityGraphics | + kIOPMSystemCapabilityAudio | + kIOPMSystemCapabilityNetwork; + + _pendingCapability = _currentCapability; + _desiredCapability = _currentCapability; + _highestCapability = _currentCapability; + setProperty(kIOPMSystemCapabilitiesKey, _currentCapability, 64); + + queuedSleepWakeUUIDString = NULL; + initializeBootSessionUUID(); + pmStatsAppResponses = OSArray::withCapacity(5); + _statsNameKey = OSSymbol::withCString(kIOPMStatsNameKey); + _statsPIDKey = OSSymbol::withCString(kIOPMStatsPIDKey); + _statsTimeMSKey = OSSymbol::withCString(kIOPMStatsTimeMSKey); + _statsResponseTypeKey = OSSymbol::withCString(kIOPMStatsApplicationResponseTypeKey); + _statsMessageTypeKey = OSSymbol::withCString(kIOPMStatsMessageTypeKey); + _statsPowerCapsKey = OSSymbol::withCString(kIOPMStatsPowerCapabilityKey); + assertOnWakeSecs = -1;// Invalid value to prevent updates + + pmStatsLock = IOLockAlloc(); + idxPMCPUClamshell = kCPUUnknownIndex; + idxPMCPULimitedPower = kCPUUnknownIndex; + + tmpDict = OSDictionary::withCapacity(1); + setProperty(kRootDomainSupportedFeatures, tmpDict); + tmpDict->release(); + + settingsCallbacks = OSDictionary::withCapacity(1); + + // Create a list of the valid PM settings that we'll relay to + // interested clients in setProperties() => setPMSetting() + allowedPMSettings = OSArray::withObjects( + (const OSObject **)settingsArr, + kRootDomainSettingsCount, + 0); + + // List of PM settings that should not automatically publish itself + // as a feature when registered by a listener. + noPublishPMSettings = OSArray::withObjects( + (const OSObject **) &gIOPMSettingSilentRunningKey, 1, 0); + + fPMSettingsDict = OSDictionary::withCapacity(5); + preventIdleSleepList = OSSet::withCapacity(8); + preventSystemSleepList = OSSet::withCapacity(2); + + PMinit(); // creates gIOPMWorkLoop + gIOPMWorkLoop = getIOPMWorkloop(); + + // Create IOPMPowerStateQueue used to queue external power + // events, and to handle those events on the PM work loop. + pmPowerStateQueue = IOPMPowerStateQueue::PMPowerStateQueue( + this, OSMemberFunctionCast(IOEventSource::Action, this, + &IOPMrootDomain::dispatchPowerEvent)); + gIOPMWorkLoop->addEventSource(pmPowerStateQueue); + + // create our power parent + patriarch = new IORootParent; + patriarch->init(); + patriarch->attach(this); + patriarch->start(this); + patriarch->addPowerChild(this); + + registerPowerDriver(this, ourPowerStates, NUM_POWER_STATES); + changePowerStateToPriv(ON_STATE); + + // install power change handler + gSysPowerDownNotifier = registerPrioritySleepWakeInterest( &sysPowerDownHandler, this, 0); #if !NO_KERNEL_HID - // Register for a notification when IODisplayWrangler is published - if ((tmpDict = serviceMatching("IODisplayWrangler"))) - { - _displayWranglerNotifier = addMatchingNotification( - gIOPublishNotification, tmpDict, - (IOServiceMatchingNotificationHandler) &displayWranglerMatchPublished, - this, 0); - tmpDict->release(); - } + // Register for a notification when IODisplayWrangler is published + if ((tmpDict = serviceMatching("IODisplayWrangler"))) { + _displayWranglerNotifier = addMatchingNotification( + gIOPublishNotification, tmpDict, + (IOServiceMatchingNotificationHandler) & displayWranglerMatchPublished, + this, 0); + tmpDict->release(); + } #endif #if defined(__i386__) || defined(__x86_64__) - wranglerIdleSettings = NULL; - OSNumber * wranglerIdlePeriod = NULL; - wranglerIdleSettings = OSDictionary::withCapacity(1); - wranglerIdlePeriod = OSNumber::withNumber(kDefaultWranglerIdlePeriod, 32); + wranglerIdleSettings = NULL; + OSNumber * wranglerIdlePeriod = NULL; + wranglerIdleSettings = OSDictionary::withCapacity(1); + wranglerIdlePeriod = OSNumber::withNumber(kDefaultWranglerIdlePeriod, 32); - if(wranglerIdleSettings && wranglerIdlePeriod) - wranglerIdleSettings->setObject(kIORequestWranglerIdleKey, - wranglerIdlePeriod); + if (wranglerIdleSettings && wranglerIdlePeriod) { + wranglerIdleSettings->setObject(kIORequestWranglerIdleKey, + wranglerIdlePeriod); + } - if(wranglerIdlePeriod) - wranglerIdlePeriod->release(); + if (wranglerIdlePeriod) { + wranglerIdlePeriod->release(); + } #endif - const OSSymbol *ucClassName = OSSymbol::withCStringNoCopy("RootDomainUserClient"); - setProperty(gIOUserClientClassKey, (OSObject *) ucClassName); - ucClassName->release(); - - // IOBacklightDisplay can take a long time to load at boot, or it may - // not load at all if you're booting with clamshell closed. We publish - // 'DisplayDims' here redundantly to get it published early and at all. - OSDictionary * matching; - matching = serviceMatching("IOPMPowerSource"); - psIterator = getMatchingServices( matching ); - if (matching) matching->release(); - if( psIterator && psIterator->getNextObject() ) - { - // There's at least one battery on the system, so we publish - // 'DisplayDims' support for the LCD. - publishFeature("DisplayDims"); - } - if(psIterator) { - psIterator->release(); - } - - sysctl_register_oid(&sysctl__kern_sleeptime); - sysctl_register_oid(&sysctl__kern_waketime); - sysctl_register_oid(&sysctl__kern_willshutdown); - sysctl_register_oid(&sysctl__kern_iokittest); - sysctl_register_oid(&sysctl__debug_iokit); - sysctl_register_oid(&sysctl__hw_targettype); + const OSSymbol *ucClassName = OSSymbol::withCStringNoCopy("RootDomainUserClient"); + setProperty(gIOUserClientClassKey, (OSObject *) ucClassName); + ucClassName->release(); + + // IOBacklightDisplay can take a long time to load at boot, or it may + // not load at all if you're booting with clamshell closed. We publish + // 'DisplayDims' here redundantly to get it published early and at all. + OSDictionary * matching; + matching = serviceMatching("IOPMPowerSource"); + psIterator = getMatchingServices( matching ); + if (matching) { + matching->release(); + } + if (psIterator && psIterator->getNextObject()) { + // There's at least one battery on the system, so we publish + // 'DisplayDims' support for the LCD. + publishFeature("DisplayDims"); + } + if (psIterator) { + psIterator->release(); + } + + sysctl_register_oid(&sysctl__kern_sleeptime); + sysctl_register_oid(&sysctl__kern_waketime); + sysctl_register_oid(&sysctl__kern_willshutdown); + sysctl_register_oid(&sysctl__kern_iokittest); + sysctl_register_oid(&sysctl__debug_iokit); + sysctl_register_oid(&sysctl__hw_targettype); #if !CONFIG_EMBEDDED - sysctl_register_oid(&sysctl__kern_progressmeterenable); - sysctl_register_oid(&sysctl__kern_progressmeter); - sysctl_register_oid(&sysctl__kern_wakereason); + sysctl_register_oid(&sysctl__kern_progressmeterenable); + sysctl_register_oid(&sysctl__kern_progressmeter); + sysctl_register_oid(&sysctl__kern_wakereason); #endif /* !CONFIG_EMBEDDED */ - sysctl_register_oid(&sysctl__kern_consoleoptions); - sysctl_register_oid(&sysctl__kern_progressoptions); + sysctl_register_oid(&sysctl__kern_consoleoptions); + sysctl_register_oid(&sysctl__kern_progressoptions); #if HIBERNATION - IOHibernateSystemInit(this); + IOHibernateSystemInit(this); #endif - registerService(); // let clients find us + registerService(); // let clients find us - return true; + return true; } //****************************************************************************** @@ -1356,220 +1385,220 @@ bool IOPMrootDomain::start( IOService * nub ) // The "System Boot" property means the system is completely booted. //****************************************************************************** -IOReturn IOPMrootDomain::setProperties( OSObject * props_obj ) -{ - IOReturn return_value = kIOReturnSuccess; - OSDictionary *dict = OSDynamicCast(OSDictionary, props_obj); - OSBoolean *b; - OSNumber *n; - const OSSymbol *key; - OSObject *obj; - OSCollectionIterator * iter = 0; - - const OSSymbol *publish_simulated_battery_string = OSSymbol::withCString("SoftwareSimulatedBatteries"); - const OSSymbol *boot_complete_string = OSSymbol::withCString("System Boot Complete"); - const OSSymbol *sys_shutdown_string = OSSymbol::withCString("System Shutdown"); - const OSSymbol *stall_halt_string = OSSymbol::withCString("StallSystemAtHalt"); - const OSSymbol *battery_warning_disabled_string = OSSymbol::withCString("BatteryWarningsDisabled"); - const OSSymbol *idle_seconds_string = OSSymbol::withCString("System Idle Seconds"); - const OSSymbol *sleepdisabled_string = OSSymbol::withCString("SleepDisabled"); - const OSSymbol *ondeck_sleepwake_uuid_string = OSSymbol::withCString(kIOPMSleepWakeUUIDKey); - const OSSymbol *loginwindow_progress_string = OSSymbol::withCString(kIOPMLoginWindowProgressKey); - const OSSymbol *coredisplay_progress_string = OSSymbol::withCString(kIOPMCoreDisplayProgressKey); - const OSSymbol *coregraphics_progress_string = OSSymbol::withCString(kIOPMCoreGraphicsProgressKey); +IOReturn +IOPMrootDomain::setProperties( OSObject * props_obj ) +{ + IOReturn return_value = kIOReturnSuccess; + OSDictionary *dict = OSDynamicCast(OSDictionary, props_obj); + OSBoolean *b; + OSNumber *n; + const OSSymbol *key; + OSObject *obj; + OSCollectionIterator * iter = 0; + + const OSSymbol *publish_simulated_battery_string = OSSymbol::withCString("SoftwareSimulatedBatteries"); + const OSSymbol *boot_complete_string = OSSymbol::withCString("System Boot Complete"); + const OSSymbol *sys_shutdown_string = OSSymbol::withCString("System Shutdown"); + const OSSymbol *stall_halt_string = OSSymbol::withCString("StallSystemAtHalt"); + const OSSymbol *battery_warning_disabled_string = OSSymbol::withCString("BatteryWarningsDisabled"); + const OSSymbol *idle_seconds_string = OSSymbol::withCString("System Idle Seconds"); + const OSSymbol *sleepdisabled_string = OSSymbol::withCString("SleepDisabled"); + const OSSymbol *ondeck_sleepwake_uuid_string = OSSymbol::withCString(kIOPMSleepWakeUUIDKey); + const OSSymbol *loginwindow_progress_string = OSSymbol::withCString(kIOPMLoginWindowProgressKey); + const OSSymbol *coredisplay_progress_string = OSSymbol::withCString(kIOPMCoreDisplayProgressKey); + const OSSymbol *coregraphics_progress_string = OSSymbol::withCString(kIOPMCoreGraphicsProgressKey); #if HIBERNATION - const OSSymbol *hibernatemode_string = OSSymbol::withCString(kIOHibernateModeKey); - const OSSymbol *hibernatefile_string = OSSymbol::withCString(kIOHibernateFileKey); - const OSSymbol *hibernatefilemin_string = OSSymbol::withCString(kIOHibernateFileMinSizeKey); - const OSSymbol *hibernatefilemax_string = OSSymbol::withCString(kIOHibernateFileMaxSizeKey); - const OSSymbol *hibernatefreeratio_string = OSSymbol::withCString(kIOHibernateFreeRatioKey); - const OSSymbol *hibernatefreetime_string = OSSymbol::withCString(kIOHibernateFreeTimeKey); + const OSSymbol *hibernatemode_string = OSSymbol::withCString(kIOHibernateModeKey); + const OSSymbol *hibernatefile_string = OSSymbol::withCString(kIOHibernateFileKey); + const OSSymbol *hibernatefilemin_string = OSSymbol::withCString(kIOHibernateFileMinSizeKey); + const OSSymbol *hibernatefilemax_string = OSSymbol::withCString(kIOHibernateFileMaxSizeKey); + const OSSymbol *hibernatefreeratio_string = OSSymbol::withCString(kIOHibernateFreeRatioKey); + const OSSymbol *hibernatefreetime_string = OSSymbol::withCString(kIOHibernateFreeTimeKey); #endif - if (!dict) - { - return_value = kIOReturnBadArgument; - goto exit; - } - - iter = OSCollectionIterator::withCollection(dict); - if (!iter) - { - return_value = kIOReturnNoMemory; - goto exit; - } - - while ((key = (const OSSymbol *) iter->getNextObject()) && - (obj = dict->getObject(key))) - { - if (key->isEqualTo(publish_simulated_battery_string)) - { - if (OSDynamicCast(OSBoolean, obj)) - publishResource(key, kOSBooleanTrue); - } - else if (key->isEqualTo(idle_seconds_string)) - { - if ((n = OSDynamicCast(OSNumber, obj))) - { - setProperty(key, n); - idleSeconds = n->unsigned32BitValue(); - } - } - else if (key->isEqualTo(boot_complete_string)) - { - pmPowerStateQueue->submitPowerEvent(kPowerEventSystemBootCompleted); - } - else if (key->isEqualTo(sys_shutdown_string)) - { - if ((b = OSDynamicCast(OSBoolean, obj))) - pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b); - } - else if (key->isEqualTo(battery_warning_disabled_string)) - { - setProperty(key, obj); - } + if (!dict) { + return_value = kIOReturnBadArgument; + goto exit; + } + + iter = OSCollectionIterator::withCollection(dict); + if (!iter) { + return_value = kIOReturnNoMemory; + goto exit; + } + + while ((key = (const OSSymbol *) iter->getNextObject()) && + (obj = dict->getObject(key))) { + if (key->isEqualTo(publish_simulated_battery_string)) { + if (OSDynamicCast(OSBoolean, obj)) { + publishResource(key, kOSBooleanTrue); + } + } else if (key->isEqualTo(idle_seconds_string)) { + if ((n = OSDynamicCast(OSNumber, obj))) { + setProperty(key, n); + idleSeconds = n->unsigned32BitValue(); + } + } else if (key->isEqualTo(boot_complete_string)) { + pmPowerStateQueue->submitPowerEvent(kPowerEventSystemBootCompleted); + } else if (key->isEqualTo(sys_shutdown_string)) { + if ((b = OSDynamicCast(OSBoolean, obj))) { + pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b); + } + } else if (key->isEqualTo(battery_warning_disabled_string)) { + setProperty(key, obj); + } #if HIBERNATION - else if (key->isEqualTo(hibernatemode_string) || - key->isEqualTo(hibernatefilemin_string) || - key->isEqualTo(hibernatefilemax_string) || - key->isEqualTo(hibernatefreeratio_string) || - key->isEqualTo(hibernatefreetime_string)) - { - if ((n = OSDynamicCast(OSNumber, obj))) - setProperty(key, n); - } - else if (key->isEqualTo(hibernatefile_string)) - { - OSString * str = OSDynamicCast(OSString, obj); - if (str) setProperty(key, str); - } + else if (key->isEqualTo(hibernatemode_string) || + key->isEqualTo(hibernatefilemin_string) || + key->isEqualTo(hibernatefilemax_string) || + key->isEqualTo(hibernatefreeratio_string) || + key->isEqualTo(hibernatefreetime_string)) { + if ((n = OSDynamicCast(OSNumber, obj))) { + setProperty(key, n); + } + } else if (key->isEqualTo(hibernatefile_string)) { + OSString * str = OSDynamicCast(OSString, obj); + if (str) { + setProperty(key, str); + } + } #endif - else if (key->isEqualTo(sleepdisabled_string)) - { - if ((b = OSDynamicCast(OSBoolean, obj))) - { - setProperty(key, b); - pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b); - } - } - else if (key->isEqualTo(ondeck_sleepwake_uuid_string)) - { - obj->retain(); - pmPowerStateQueue->submitPowerEvent(kPowerEventQueueSleepWakeUUID, (void *)obj); - } - else if (key->isEqualTo(loginwindow_progress_string)) - { - if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { - uint32_t data = n->unsigned32BitValue(); - pmTracer->traceComponentWakeProgress(kIOPMLoginWindowProgress, data); - kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMLoginWindowProgress, data); - } - } - else if (key->isEqualTo(coredisplay_progress_string)) - { - if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { - uint32_t data = n->unsigned32BitValue(); - pmTracer->traceComponentWakeProgress(kIOPMCoreDisplayProgress, data); - kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMCoreDisplayProgress, data); - } - } - else if (key->isEqualTo(coregraphics_progress_string)) - { - if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { - uint32_t data = n->unsigned32BitValue(); - pmTracer->traceComponentWakeProgress(kIOPMCoreGraphicsProgress, data); - kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMCoreGraphicsProgress, data); - } - } - else if (key->isEqualTo(kIOPMDeepSleepEnabledKey) || - key->isEqualTo(kIOPMDestroyFVKeyOnStandbyKey) || - key->isEqualTo(kIOPMAutoPowerOffEnabledKey) || - key->isEqualTo(stall_halt_string)) - { - if ((b = OSDynamicCast(OSBoolean, obj))) - setProperty(key, b); - } - else if (key->isEqualTo(kIOPMDeepSleepDelayKey) || - key->isEqualTo(kIOPMDeepSleepTimerKey) || - key->isEqualTo(kIOPMAutoPowerOffDelayKey) || - key->isEqualTo(kIOPMAutoPowerOffTimerKey)) - { - if ((n = OSDynamicCast(OSNumber, obj))) - setProperty(key, n); - } - else if (key->isEqualTo(kIOPMUserWakeAlarmScheduledKey)) - { - if (kOSBooleanTrue == obj) - OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_userScheduledAlarm); - else - OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_userScheduledAlarm); - DLOG("_userScheduledAlarm = 0x%x\n", (uint32_t) _userScheduledAlarm); - } - - // Relay our allowed PM settings onto our registered PM clients - else if ((allowedPMSettings->getNextIndexOfObject(key, 0) != (unsigned int) -1)) - { - return_value = setPMSetting(key, obj); - if (kIOReturnSuccess != return_value) - break; - - if (gIOPMSettingDebugWakeRelativeKey == key) - { - if ((n = OSDynamicCast(OSNumber, obj)) && - (_debugWakeSeconds = n->unsigned32BitValue())) - { - OSBitOrAtomic(kIOPMAlarmBitDebugWake, &_scheduledAlarms); - } - else - { - _debugWakeSeconds = 0; - OSBitAndAtomic(~kIOPMAlarmBitDebugWake, &_scheduledAlarms); - } - DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); - } - else if (gIOPMSettingAutoWakeCalendarKey == key) - { - OSData * data; - if ((data = OSDynamicCast(OSData, obj)) && - (data->getLength() == sizeof(IOPMCalendarStruct))) - { - const IOPMCalendarStruct * cs = - (const IOPMCalendarStruct *) data->getBytesNoCopy(); - - if (cs->year) - OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_scheduledAlarms); - else - OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_scheduledAlarms); - DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); - } - } - } - else - { - DLOG("setProperties(%s) not handled\n", key->getCStringNoCopy()); - } - } + else if (key->isEqualTo(sleepdisabled_string)) { + if ((b = OSDynamicCast(OSBoolean, obj))) { + setProperty(key, b); + pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b); + } + } else if (key->isEqualTo(ondeck_sleepwake_uuid_string)) { + obj->retain(); + pmPowerStateQueue->submitPowerEvent(kPowerEventQueueSleepWakeUUID, (void *)obj); + } else if (key->isEqualTo(loginwindow_progress_string)) { + if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { + uint32_t data = n->unsigned32BitValue(); + pmTracer->traceComponentWakeProgress(kIOPMLoginWindowProgress, data); + kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMLoginWindowProgress, data); + } + } else if (key->isEqualTo(coredisplay_progress_string)) { + if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { + uint32_t data = n->unsigned32BitValue(); + pmTracer->traceComponentWakeProgress(kIOPMCoreDisplayProgress, data); + kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMCoreDisplayProgress, data); + } + } else if (key->isEqualTo(coregraphics_progress_string)) { + if (pmTracer && (n = OSDynamicCast(OSNumber, obj))) { + uint32_t data = n->unsigned32BitValue(); + pmTracer->traceComponentWakeProgress(kIOPMCoreGraphicsProgress, data); + kdebugTrace(kPMLogComponentWakeProgress, 0, kIOPMCoreGraphicsProgress, data); + } + } else if (key->isEqualTo(kIOPMDeepSleepEnabledKey) || + key->isEqualTo(kIOPMDestroyFVKeyOnStandbyKey) || + key->isEqualTo(kIOPMAutoPowerOffEnabledKey) || + key->isEqualTo(stall_halt_string)) { + if ((b = OSDynamicCast(OSBoolean, obj))) { + setProperty(key, b); + } + } else if (key->isEqualTo(kIOPMDeepSleepDelayKey) || + key->isEqualTo(kIOPMDeepSleepTimerKey) || + key->isEqualTo(kIOPMAutoPowerOffDelayKey) || + key->isEqualTo(kIOPMAutoPowerOffTimerKey)) { + if ((n = OSDynamicCast(OSNumber, obj))) { + setProperty(key, n); + } + } else if (key->isEqualTo(kIOPMUserWakeAlarmScheduledKey)) { + if (kOSBooleanTrue == obj) { + OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_userScheduledAlarm); + } else { + OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_userScheduledAlarm); + } + DLOG("_userScheduledAlarm = 0x%x\n", (uint32_t) _userScheduledAlarm); + } + // Relay our allowed PM settings onto our registered PM clients + else if ((allowedPMSettings->getNextIndexOfObject(key, 0) != (unsigned int) -1)) { + return_value = setPMSetting(key, obj); + if (kIOReturnSuccess != return_value) { + break; + } + + if (gIOPMSettingDebugWakeRelativeKey == key) { + if ((n = OSDynamicCast(OSNumber, obj)) && + (_debugWakeSeconds = n->unsigned32BitValue())) { + OSBitOrAtomic(kIOPMAlarmBitDebugWake, &_scheduledAlarms); + } else { + _debugWakeSeconds = 0; + OSBitAndAtomic(~kIOPMAlarmBitDebugWake, &_scheduledAlarms); + } + DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); + } else if (gIOPMSettingAutoWakeCalendarKey == key) { + OSData * data; + if ((data = OSDynamicCast(OSData, obj)) && + (data->getLength() == sizeof(IOPMCalendarStruct))) { + const IOPMCalendarStruct * cs = + (const IOPMCalendarStruct *) data->getBytesNoCopy(); + + if (cs->year) { + OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_scheduledAlarms); + } else { + OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_scheduledAlarms); + } + DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); + } + } + } else { + DLOG("setProperties(%s) not handled\n", key->getCStringNoCopy()); + } + } exit: - if(publish_simulated_battery_string) publish_simulated_battery_string->release(); - if(boot_complete_string) boot_complete_string->release(); - if(sys_shutdown_string) sys_shutdown_string->release(); - if(stall_halt_string) stall_halt_string->release(); - if(battery_warning_disabled_string) battery_warning_disabled_string->release(); - if(idle_seconds_string) idle_seconds_string->release(); - if(sleepdisabled_string) sleepdisabled_string->release(); - if(ondeck_sleepwake_uuid_string) ondeck_sleepwake_uuid_string->release(); - if(loginwindow_progress_string) loginwindow_progress_string->release(); - if(coredisplay_progress_string) coredisplay_progress_string->release(); - if(coregraphics_progress_string) coregraphics_progress_string->release(); + if (publish_simulated_battery_string) { + publish_simulated_battery_string->release(); + } + if (boot_complete_string) { + boot_complete_string->release(); + } + if (sys_shutdown_string) { + sys_shutdown_string->release(); + } + if (stall_halt_string) { + stall_halt_string->release(); + } + if (battery_warning_disabled_string) { + battery_warning_disabled_string->release(); + } + if (idle_seconds_string) { + idle_seconds_string->release(); + } + if (sleepdisabled_string) { + sleepdisabled_string->release(); + } + if (ondeck_sleepwake_uuid_string) { + ondeck_sleepwake_uuid_string->release(); + } + if (loginwindow_progress_string) { + loginwindow_progress_string->release(); + } + if (coredisplay_progress_string) { + coredisplay_progress_string->release(); + } + if (coregraphics_progress_string) { + coregraphics_progress_string->release(); + } #if HIBERNATION - if(hibernatemode_string) hibernatemode_string->release(); - if(hibernatefile_string) hibernatefile_string->release(); - if(hibernatefreeratio_string) hibernatefreeratio_string->release(); - if(hibernatefreetime_string) hibernatefreetime_string->release(); + if (hibernatemode_string) { + hibernatemode_string->release(); + } + if (hibernatefile_string) { + hibernatefile_string->release(); + } + if (hibernatefreeratio_string) { + hibernatefreeratio_string->release(); + } + if (hibernatefreetime_string) { + hibernatefreetime_string->release(); + } #endif - if (iter) iter->release(); - return return_value; + if (iter) { + iter->release(); + } + return return_value; } // MARK: - @@ -1581,81 +1610,83 @@ exit: // Override IOService::setAggressiveness() //****************************************************************************** -IOReturn IOPMrootDomain::setAggressiveness( - unsigned long type, - unsigned long value ) +IOReturn +IOPMrootDomain::setAggressiveness( + unsigned long type, + unsigned long value ) { - return setAggressiveness( type, value, 0 ); + return setAggressiveness( type, value, 0 ); } /* * Private setAggressiveness() with an internal options argument. */ -IOReturn IOPMrootDomain::setAggressiveness( - unsigned long type, - unsigned long value, - IOOptionBits options ) -{ - AggressivesRequest * entry; - AggressivesRequest * request; - bool found = false; - - DLOG("setAggressiveness(%x) 0x%x = %u\n", - (uint32_t) options, (uint32_t) type, (uint32_t) value); - - request = IONew(AggressivesRequest, 1); - if (!request) - return kIOReturnNoMemory; - - memset(request, 0, sizeof(*request)); - request->options = options; - request->dataType = kAggressivesRequestTypeRecord; - request->data.record.type = (uint32_t) type; - request->data.record.value = (uint32_t) value; - - AGGRESSIVES_LOCK(); - - // Update disk quick spindown flag used by getAggressiveness(). - // Never merge requests with quick spindown flags set. - - if (options & kAggressivesOptionQuickSpindownEnable) - gAggressivesState |= kAggressivesStateQuickSpindown; - else if (options & kAggressivesOptionQuickSpindownDisable) - gAggressivesState &= ~kAggressivesStateQuickSpindown; - else - { - // Coalesce requests with identical aggressives types. - // Deal with callers that calls us too "aggressively". - - queue_iterate(&aggressivesQueue, entry, AggressivesRequest *, chain) - { - if ((entry->dataType == kAggressivesRequestTypeRecord) && - (entry->data.record.type == type) && - ((entry->options & kAggressivesOptionQuickSpindownMask) == 0)) - { - entry->data.record.value = value; - found = true; - break; - } - } - } - - if (!found) - { - queue_enter(&aggressivesQueue, request, AggressivesRequest *, chain); - } - - AGGRESSIVES_UNLOCK(); - - if (found) - IODelete(request, AggressivesRequest, 1); - - if (options & kAggressivesOptionSynchronous) - handleAggressivesRequests(); // not truly synchronous - else - thread_call_enter(aggressivesThreadCall); - - return kIOReturnSuccess; +IOReturn +IOPMrootDomain::setAggressiveness( + unsigned long type, + unsigned long value, + IOOptionBits options ) +{ + AggressivesRequest * entry; + AggressivesRequest * request; + bool found = false; + + DLOG("setAggressiveness(%x) 0x%x = %u\n", + (uint32_t) options, (uint32_t) type, (uint32_t) value); + + request = IONew(AggressivesRequest, 1); + if (!request) { + return kIOReturnNoMemory; + } + + memset(request, 0, sizeof(*request)); + request->options = options; + request->dataType = kAggressivesRequestTypeRecord; + request->data.record.type = (uint32_t) type; + request->data.record.value = (uint32_t) value; + + AGGRESSIVES_LOCK(); + + // Update disk quick spindown flag used by getAggressiveness(). + // Never merge requests with quick spindown flags set. + + if (options & kAggressivesOptionQuickSpindownEnable) { + gAggressivesState |= kAggressivesStateQuickSpindown; + } else if (options & kAggressivesOptionQuickSpindownDisable) { + gAggressivesState &= ~kAggressivesStateQuickSpindown; + } else { + // Coalesce requests with identical aggressives types. + // Deal with callers that calls us too "aggressively". + + queue_iterate(&aggressivesQueue, entry, AggressivesRequest *, chain) + { + if ((entry->dataType == kAggressivesRequestTypeRecord) && + (entry->data.record.type == type) && + ((entry->options & kAggressivesOptionQuickSpindownMask) == 0)) { + entry->data.record.value = value; + found = true; + break; + } + } + } + + if (!found) { + queue_enter(&aggressivesQueue, request, AggressivesRequest *, chain); + } + + AGGRESSIVES_UNLOCK(); + + if (found) { + IODelete(request, AggressivesRequest, 1); + } + + if (options & kAggressivesOptionSynchronous) { + handleAggressivesRequests(); // not truly synchronous + } else { + thread_call_enter(aggressivesThreadCall); + } + + return kIOReturnSuccess; } //****************************************************************************** @@ -1665,82 +1696,75 @@ IOReturn IOPMrootDomain::setAggressiveness( // Fetch the aggressiveness factor with the given type. //****************************************************************************** -IOReturn IOPMrootDomain::getAggressiveness ( - unsigned long type, - unsigned long * outLevel ) -{ - uint32_t value = 0; - int source = 0; - - if (!outLevel) - return kIOReturnBadArgument; - - AGGRESSIVES_LOCK(); - - // Disk quick spindown in effect, report value = 1 - - if ((gAggressivesState & kAggressivesStateQuickSpindown) && - (type == kPMMinutesToSpinDown)) - { - value = kAggressivesMinValue; - source = 1; - } - - // Consult the pending request queue. - - if (!source) - { - AggressivesRequest * entry; - - queue_iterate(&aggressivesQueue, entry, AggressivesRequest *, chain) - { - if ((entry->dataType == kAggressivesRequestTypeRecord) && - (entry->data.record.type == type) && - ((entry->options & kAggressivesOptionQuickSpindownMask) == 0)) - { - value = entry->data.record.value; - source = 2; - break; - } - } - } - - // Consult the backend records. - - if (!source && aggressivesData) - { - AggressivesRecord * record; - int i, count; - - count = aggressivesData->getLength() / sizeof(AggressivesRecord); - record = (AggressivesRecord *) aggressivesData->getBytesNoCopy(); - - for (i = 0; i < count; i++, record++) - { - if (record->type == type) - { - value = record->value; - source = 3; - break; - } - } - } - - AGGRESSIVES_UNLOCK(); - - if (source) - { - DLOG("getAggressiveness(%d) 0x%x = %u\n", - source, (uint32_t) type, value); - *outLevel = (unsigned long) value; - return kIOReturnSuccess; - } - else - { - DLOG("getAggressiveness type 0x%x not found\n", (uint32_t) type); - *outLevel = 0; // default return = 0, driver may not check for error - return kIOReturnInvalid; - } +IOReturn +IOPMrootDomain::getAggressiveness( + unsigned long type, + unsigned long * outLevel ) +{ + uint32_t value = 0; + int source = 0; + + if (!outLevel) { + return kIOReturnBadArgument; + } + + AGGRESSIVES_LOCK(); + + // Disk quick spindown in effect, report value = 1 + + if ((gAggressivesState & kAggressivesStateQuickSpindown) && + (type == kPMMinutesToSpinDown)) { + value = kAggressivesMinValue; + source = 1; + } + + // Consult the pending request queue. + + if (!source) { + AggressivesRequest * entry; + + queue_iterate(&aggressivesQueue, entry, AggressivesRequest *, chain) + { + if ((entry->dataType == kAggressivesRequestTypeRecord) && + (entry->data.record.type == type) && + ((entry->options & kAggressivesOptionQuickSpindownMask) == 0)) { + value = entry->data.record.value; + source = 2; + break; + } + } + } + + // Consult the backend records. + + if (!source && aggressivesData) { + AggressivesRecord * record; + int i, count; + + count = aggressivesData->getLength() / sizeof(AggressivesRecord); + record = (AggressivesRecord *) aggressivesData->getBytesNoCopy(); + + for (i = 0; i < count; i++, record++) { + if (record->type == type) { + value = record->value; + source = 3; + break; + } + } + } + + AGGRESSIVES_UNLOCK(); + + if (source) { + DLOG("getAggressiveness(%d) 0x%x = %u\n", + source, (uint32_t) type, value); + *outLevel = (unsigned long) value; + return kIOReturnSuccess; + } else { + DLOG("getAggressiveness type 0x%x not found\n", (uint32_t) type); + *outLevel = 0; // default return = 0, driver may not check for error + return kIOReturnInvalid; + } } //****************************************************************************** @@ -1749,33 +1773,36 @@ IOReturn IOPMrootDomain::getAggressiveness ( // Request from IOService to join future aggressiveness broadcasts. //****************************************************************************** -IOReturn IOPMrootDomain::joinAggressiveness( - IOService * service ) +IOReturn +IOPMrootDomain::joinAggressiveness( + IOService * service ) { - AggressivesRequest * request; + AggressivesRequest * request; - if (!service || (service == this)) - return kIOReturnBadArgument; + if (!service || (service == this)) { + return kIOReturnBadArgument; + } - DLOG("joinAggressiveness %s %p\n", service->getName(), OBFUSCATE(service)); + DLOG("joinAggressiveness %s %p\n", service->getName(), OBFUSCATE(service)); - request = IONew(AggressivesRequest, 1); - if (!request) - return kIOReturnNoMemory; + request = IONew(AggressivesRequest, 1); + if (!request) { + return kIOReturnNoMemory; + } - service->retain(); // released by synchronizeAggressives() + service->retain(); // released by synchronizeAggressives() - memset(request, 0, sizeof(*request)); - request->dataType = kAggressivesRequestTypeService; - request->data.service = service; + memset(request, 0, sizeof(*request)); + request->dataType = kAggressivesRequestTypeService; + request->data.service = service; - AGGRESSIVES_LOCK(); - queue_enter(&aggressivesQueue, request, AggressivesRequest *, chain); - AGGRESSIVES_UNLOCK(); + AGGRESSIVES_LOCK(); + queue_enter(&aggressivesQueue, request, AggressivesRequest *, chain); + AGGRESSIVES_UNLOCK(); - thread_call_enter(aggressivesThreadCall); + thread_call_enter(aggressivesThreadCall); - return kIOReturnSuccess; + return kIOReturnSuccess; } //****************************************************************************** @@ -1786,169 +1813,155 @@ IOReturn IOPMrootDomain::joinAggressiveness( static void handleAggressivesFunction( - thread_call_param_t param1, - thread_call_param_t param2 ) -{ - if (param1) - { - ((IOPMrootDomain *) param1)->handleAggressivesRequests(); - } -} - -void IOPMrootDomain::handleAggressivesRequests( void ) -{ - AggressivesRecord * start; - AggressivesRecord * record; - AggressivesRequest * request; - queue_head_t joinedQueue; - int i, count; - bool broadcast; - bool found; - bool pingSelf = false; - - AGGRESSIVES_LOCK(); - - if ((gAggressivesState & kAggressivesStateBusy) || !aggressivesData || - queue_empty(&aggressivesQueue)) - goto unlock_done; - - gAggressivesState |= kAggressivesStateBusy; - count = aggressivesData->getLength() / sizeof(AggressivesRecord); - start = (AggressivesRecord *) aggressivesData->getBytesNoCopy(); - - do - { - broadcast = false; - queue_init(&joinedQueue); - - do - { - // Remove request from the incoming queue in FIFO order. - queue_remove_first(&aggressivesQueue, request, AggressivesRequest *, chain); - switch (request->dataType) - { - case kAggressivesRequestTypeRecord: - // Update existing record if found. - found = false; - for (i = 0, record = start; i < count; i++, record++) - { - if (record->type == request->data.record.type) - { - found = true; - - if (request->options & kAggressivesOptionQuickSpindownEnable) - { - if ((record->flags & kAggressivesRecordFlagMinValue) == 0) - { - broadcast = true; - record->flags |= (kAggressivesRecordFlagMinValue | - kAggressivesRecordFlagModified); - DLOG("disk spindown accelerated, was %u min\n", - record->value); - } - } - else if (request->options & kAggressivesOptionQuickSpindownDisable) - { - if (record->flags & kAggressivesRecordFlagMinValue) - { - broadcast = true; - record->flags |= kAggressivesRecordFlagModified; - record->flags &= ~kAggressivesRecordFlagMinValue; - DLOG("disk spindown restored to %u min\n", - record->value); - } - } - else if (record->value != request->data.record.value) - { - record->value = request->data.record.value; - if ((record->flags & kAggressivesRecordFlagMinValue) == 0) - { - broadcast = true; - record->flags |= kAggressivesRecordFlagModified; - } - } - break; - } - } - - // No matching record, append a new record. - if (!found && - ((request->options & kAggressivesOptionQuickSpindownDisable) == 0)) - { - AggressivesRecord newRecord; - - newRecord.flags = kAggressivesRecordFlagModified; - newRecord.type = request->data.record.type; - newRecord.value = request->data.record.value; - if (request->options & kAggressivesOptionQuickSpindownEnable) - { - newRecord.flags |= kAggressivesRecordFlagMinValue; - DLOG("disk spindown accelerated\n"); - } - - aggressivesData->appendBytes(&newRecord, sizeof(newRecord)); - - // OSData may have switched to another (larger) buffer. - count = aggressivesData->getLength() / sizeof(AggressivesRecord); - start = (AggressivesRecord *) aggressivesData->getBytesNoCopy(); - broadcast = true; - } - - // Finished processing the request, release it. - IODelete(request, AggressivesRequest, 1); - break; - - case kAggressivesRequestTypeService: - // synchronizeAggressives() will free request. - queue_enter(&joinedQueue, request, AggressivesRequest *, chain); - break; - - default: - panic("bad aggressives request type %x\n", request->dataType); - break; - } - } while (!queue_empty(&aggressivesQueue)); - - // Release the lock to perform work, with busy flag set. - if (!queue_empty(&joinedQueue) || broadcast) - { - AGGRESSIVES_UNLOCK(); - if (!queue_empty(&joinedQueue)) - synchronizeAggressives(&joinedQueue, start, count); - if (broadcast) - broadcastAggressives(start, count); - AGGRESSIVES_LOCK(); - } - - // Remove the modified flag from all records. - for (i = 0, record = start; i < count; i++, record++) - { - if ((record->flags & kAggressivesRecordFlagModified) && - ((record->type == kPMMinutesToDim) || - (record->type == kPMMinutesToSleep))) - pingSelf = true; - - record->flags &= ~kAggressivesRecordFlagModified; - } - - // Check the incoming queue again since new entries may have been - // added while lock was released above. - - } while (!queue_empty(&aggressivesQueue)); - - gAggressivesState &= ~kAggressivesStateBusy; + thread_call_param_t param1, + thread_call_param_t param2 ) +{ + if (param1) { + ((IOPMrootDomain *) param1)->handleAggressivesRequests(); + } +} + +void +IOPMrootDomain::handleAggressivesRequests( void ) +{ + AggressivesRecord * start; + AggressivesRecord * record; + AggressivesRequest * request; + queue_head_t joinedQueue; + int i, count; + bool broadcast; + bool found; + bool pingSelf = false; + + AGGRESSIVES_LOCK(); + + if ((gAggressivesState & kAggressivesStateBusy) || !aggressivesData || + queue_empty(&aggressivesQueue)) { + goto unlock_done; + } + + gAggressivesState |= kAggressivesStateBusy; + count = aggressivesData->getLength() / sizeof(AggressivesRecord); + start = (AggressivesRecord *) aggressivesData->getBytesNoCopy(); + + do{ + broadcast = false; + queue_init(&joinedQueue); + + do{ + // Remove request from the incoming queue in FIFO order. + queue_remove_first(&aggressivesQueue, request, AggressivesRequest *, chain); + switch (request->dataType) { + case kAggressivesRequestTypeRecord: + // Update existing record if found. + found = false; + for (i = 0, record = start; i < count; i++, record++) { + if (record->type == request->data.record.type) { + found = true; + + if (request->options & kAggressivesOptionQuickSpindownEnable) { + if ((record->flags & kAggressivesRecordFlagMinValue) == 0) { + broadcast = true; + record->flags |= (kAggressivesRecordFlagMinValue | + kAggressivesRecordFlagModified); + DLOG("disk spindown accelerated, was %u min\n", + record->value); + } + } else if (request->options & kAggressivesOptionQuickSpindownDisable) { + if (record->flags & kAggressivesRecordFlagMinValue) { + broadcast = true; + record->flags |= kAggressivesRecordFlagModified; + record->flags &= ~kAggressivesRecordFlagMinValue; + DLOG("disk spindown restored to %u min\n", + record->value); + } + } else if (record->value != request->data.record.value) { + record->value = request->data.record.value; + if ((record->flags & kAggressivesRecordFlagMinValue) == 0) { + broadcast = true; + record->flags |= kAggressivesRecordFlagModified; + } + } + break; + } + } + + // No matching record, append a new record. + if (!found && + ((request->options & kAggressivesOptionQuickSpindownDisable) == 0)) { + AggressivesRecord newRecord; + + newRecord.flags = kAggressivesRecordFlagModified; + newRecord.type = request->data.record.type; + newRecord.value = request->data.record.value; + if (request->options & kAggressivesOptionQuickSpindownEnable) { + newRecord.flags |= kAggressivesRecordFlagMinValue; + DLOG("disk spindown accelerated\n"); + } + + aggressivesData->appendBytes(&newRecord, sizeof(newRecord)); + + // OSData may have switched to another (larger) buffer. + count = aggressivesData->getLength() / sizeof(AggressivesRecord); + start = (AggressivesRecord *) aggressivesData->getBytesNoCopy(); + broadcast = true; + } + + // Finished processing the request, release it. + IODelete(request, AggressivesRequest, 1); + break; + + case kAggressivesRequestTypeService: + // synchronizeAggressives() will free request. + queue_enter(&joinedQueue, request, AggressivesRequest *, chain); + break; + + default: + panic("bad aggressives request type %x\n", request->dataType); + break; + } + } while (!queue_empty(&aggressivesQueue)); + + // Release the lock to perform work, with busy flag set. + if (!queue_empty(&joinedQueue) || broadcast) { + AGGRESSIVES_UNLOCK(); + if (!queue_empty(&joinedQueue)) { + synchronizeAggressives(&joinedQueue, start, count); + } + if (broadcast) { + broadcastAggressives(start, count); + } + AGGRESSIVES_LOCK(); + } + + // Remove the modified flag from all records. + for (i = 0, record = start; i < count; i++, record++) { + if ((record->flags & kAggressivesRecordFlagModified) && + ((record->type == kPMMinutesToDim) || + (record->type == kPMMinutesToSleep))) { + pingSelf = true; + } + + record->flags &= ~kAggressivesRecordFlagModified; + } + + // Check the incoming queue again since new entries may have been + // added while lock was released above. + } while (!queue_empty(&aggressivesQueue)); + + gAggressivesState &= ~kAggressivesStateBusy; unlock_done: - AGGRESSIVES_UNLOCK(); + AGGRESSIVES_UNLOCK(); - // Root domain is interested in system and display sleep slider changes. - // Submit a power event to handle those changes on the PM work loop. + // Root domain is interested in system and display sleep slider changes. + // Submit a power event to handle those changes on the PM work loop. - if (pingSelf && pmPowerStateQueue) { - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusAggressivenessChanged ); - } + if (pingSelf && pmPowerStateQueue) { + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusAggressivenessChanged ); + } } //****************************************************************************** @@ -1957,48 +1970,47 @@ unlock_done: // Push all known aggressiveness records to one or more IOService. //****************************************************************************** -void IOPMrootDomain::synchronizeAggressives( - queue_head_t * joinedQueue, - const AggressivesRecord * array, - int count ) -{ - IOService * service; - AggressivesRequest * request; - const AggressivesRecord * record; - IOPMDriverCallEntry callEntry; - uint32_t value; - int i; - - while (!queue_empty(joinedQueue)) - { - queue_remove_first(joinedQueue, request, AggressivesRequest *, chain); - if (request->dataType == kAggressivesRequestTypeService) - service = request->data.service; - else - service = 0; - - IODelete(request, AggressivesRequest, 1); - request = 0; - - if (service) - { - if (service->assertPMDriverCall(&callEntry)) - { - for (i = 0, record = array; i < count; i++, record++) - { - value = record->value; - if (record->flags & kAggressivesRecordFlagMinValue) - value = kAggressivesMinValue; - - _LOG("synchronizeAggressives 0x%x = %u to %s\n", - record->type, value, service->getName()); - service->setAggressiveness(record->type, value); - } - service->deassertPMDriverCall(&callEntry); - } - service->release(); // retained by joinAggressiveness() - } - } +void +IOPMrootDomain::synchronizeAggressives( + queue_head_t * joinedQueue, + const AggressivesRecord * array, + int count ) +{ + IOService * service; + AggressivesRequest * request; + const AggressivesRecord * record; + IOPMDriverCallEntry callEntry; + uint32_t value; + int i; + + while (!queue_empty(joinedQueue)) { + queue_remove_first(joinedQueue, request, AggressivesRequest *, chain); + if (request->dataType == kAggressivesRequestTypeService) { + service = request->data.service; + } else { + service = 0; + } + + IODelete(request, AggressivesRequest, 1); + request = 0; + + if (service) { + if (service->assertPMDriverCall(&callEntry)) { + for (i = 0, record = array; i < count; i++, record++) { + value = record->value; + if (record->flags & kAggressivesRecordFlagMinValue) { + value = kAggressivesMinValue; + } + + _LOG("synchronizeAggressives 0x%x = %u to %s\n", + record->type, value, service->getName()); + service->setAggressiveness(record->type, value); + } + service->deassertPMDriverCall(&callEntry); + } + service->release(); // retained by joinAggressiveness() + } + } } //****************************************************************************** @@ -2007,57 +2019,52 @@ void IOPMrootDomain::synchronizeAggressives( // Traverse PM tree and call setAggressiveness() for records that have changed. //****************************************************************************** -void IOPMrootDomain::broadcastAggressives( - const AggressivesRecord * array, - int count ) -{ - IORegistryIterator * iter; - IORegistryEntry * entry; - IOPowerConnection * connect; - IOService * service; - const AggressivesRecord * record; - IOPMDriverCallEntry callEntry; - uint32_t value; - int i; - - iter = IORegistryIterator::iterateOver( - this, gIOPowerPlane, kIORegistryIterateRecursively); - if (iter) - { - do - { - iter->reset(); - while ((entry = iter->getNextObject())) - { - connect = OSDynamicCast(IOPowerConnection, entry); - if (!connect || !connect->getReadyFlag()) - continue; - - if ((service = OSDynamicCast(IOService, connect->copyChildEntry(gIOPowerPlane)))) - { - if (service->assertPMDriverCall(&callEntry)) - { - for (i = 0, record = array; i < count; i++, record++) - { - if (record->flags & kAggressivesRecordFlagModified) - { - value = record->value; - if (record->flags & kAggressivesRecordFlagMinValue) - value = kAggressivesMinValue; - _LOG("broadcastAggressives %x = %u to %s\n", - record->type, value, service->getName()); - service->setAggressiveness(record->type, value); - } - } - service->deassertPMDriverCall(&callEntry); - } - service->release(); - } - } - } - while (!entry && !iter->isValid()); - iter->release(); - } +void +IOPMrootDomain::broadcastAggressives( + const AggressivesRecord * array, + int count ) +{ + IORegistryIterator * iter; + IORegistryEntry * entry; + IOPowerConnection * connect; + IOService * service; + const AggressivesRecord * record; + IOPMDriverCallEntry callEntry; + uint32_t value; + int i; + + iter = IORegistryIterator::iterateOver( + this, gIOPowerPlane, kIORegistryIterateRecursively); + if (iter) { + do{ + iter->reset(); + while ((entry = iter->getNextObject())) { + connect = OSDynamicCast(IOPowerConnection, entry); + if (!connect || !connect->getReadyFlag()) { + continue; + } + + if ((service = OSDynamicCast(IOService, connect->copyChildEntry(gIOPowerPlane)))) { + if (service->assertPMDriverCall(&callEntry)) { + for (i = 0, record = array; i < count; i++, record++) { + if (record->flags & kAggressivesRecordFlagModified) { + value = record->value; + if (record->flags & kAggressivesRecordFlagMinValue) { + value = kAggressivesMinValue; + } + _LOG("broadcastAggressives %x = %u to %s\n", + record->type, value, service->getName()); + service->setAggressiveness(record->type, value); + } + } + service->deassertPMDriverCall(&callEntry); + } + service->release(); + } + } + }while (!entry && !iter->isValid()); + iter->release(); + } } // MARK: - @@ -2068,26 +2075,24 @@ void IOPMrootDomain::broadcastAggressives( // //****************************************************************************** -void IOPMrootDomain::startIdleSleepTimer( uint32_t inSeconds ) +void +IOPMrootDomain::startIdleSleepTimer( uint32_t inSeconds ) { - AbsoluteTime deadline; + AbsoluteTime deadline; - ASSERT_GATED(); - if (gNoIdleFlag) { - DLOG("idle timer not set (noidle=%d)\n", gNoIdleFlag); - return; - } - if (inSeconds) - { - clock_interval_to_deadline(inSeconds, kSecondScale, &deadline); - thread_call_enter_delayed(extraSleepTimer, deadline); - idleSleepTimerPending = true; - } - else - { - thread_call_enter(extraSleepTimer); - } - DLOG("idle timer set for %u seconds\n", inSeconds); + ASSERT_GATED(); + if (gNoIdleFlag) { + DLOG("idle timer not set (noidle=%d)\n", gNoIdleFlag); + return; + } + if (inSeconds) { + clock_interval_to_deadline(inSeconds, kSecondScale, &deadline); + thread_call_enter_delayed(extraSleepTimer, deadline); + idleSleepTimerPending = true; + } else { + thread_call_enter(extraSleepTimer); + } + DLOG("idle timer set for %u seconds\n", inSeconds); } //****************************************************************************** @@ -2095,27 +2100,27 @@ void IOPMrootDomain::startIdleSleepTimer( uint32_t inSeconds ) // //****************************************************************************** -void IOPMrootDomain::cancelIdleSleepTimer( void ) -{ - ASSERT_GATED(); - if (idleSleepTimerPending) - { - DLOG("idle timer cancelled\n"); - thread_call_cancel(extraSleepTimer); - idleSleepTimerPending = false; - - if (!assertOnWakeSecs && gIOLastWakeAbsTime) { - AbsoluteTime now; - clock_usec_t microsecs; - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); - absolutetime_to_microtime(now, &assertOnWakeSecs, µsecs); - if (assertOnWakeReport) { - HISTREPORT_TALLYVALUE(assertOnWakeReport, (int64_t)assertOnWakeSecs); - DLOG("Updated assertOnWake %lu\n", (unsigned long)assertOnWakeSecs); - } - } - } +void +IOPMrootDomain::cancelIdleSleepTimer( void ) +{ + ASSERT_GATED(); + if (idleSleepTimerPending) { + DLOG("idle timer cancelled\n"); + thread_call_cancel(extraSleepTimer); + idleSleepTimerPending = false; + + if (!assertOnWakeSecs && gIOLastWakeAbsTime) { + AbsoluteTime now; + clock_usec_t microsecs; + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); + absolutetime_to_microtime(now, &assertOnWakeSecs, µsecs); + if (assertOnWakeReport) { + HISTREPORT_TALLYVALUE(assertOnWakeReport, (int64_t)assertOnWakeSecs); + DLOG("Updated assertOnWake %lu\n", (unsigned long)assertOnWakeSecs); + } + } + } } //****************************************************************************** @@ -2123,10 +2128,11 @@ void IOPMrootDomain::cancelIdleSleepTimer( void ) // //****************************************************************************** -static void idleSleepTimerExpired( - thread_call_param_t us, thread_call_param_t ) +static void +idleSleepTimerExpired( + thread_call_param_t us, thread_call_param_t ) { - ((IOPMrootDomain *)us)->handleSleepTimerExpiration(); + ((IOPMrootDomain *)us)->handleSleepTimerExpiration(); } //****************************************************************************** @@ -2136,27 +2142,27 @@ static void idleSleepTimerExpired( // It's time to sleep. Start that by removing the clamp that's holding us awake. //****************************************************************************** -void IOPMrootDomain::handleSleepTimerExpiration( void ) +void +IOPMrootDomain::handleSleepTimerExpiration( void ) { - if (!gIOPMWorkLoop->inGate()) - { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, - &IOPMrootDomain::handleSleepTimerExpiration), - this); - return; - } + if (!gIOPMWorkLoop->inGate()) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, + &IOPMrootDomain::handleSleepTimerExpiration), + this); + return; + } - AbsoluteTime time; + AbsoluteTime time; - DLOG("sleep timer expired\n"); - ASSERT_GATED(); + DLOG("sleep timer expired\n"); + ASSERT_GATED(); - idleSleepTimerPending = false; + idleSleepTimerPending = false; - clock_get_uptime(&time); - setQuickSpinDownTimeout(); - adjustPowerState(true); + clock_get_uptime(&time); + setQuickSpinDownTimeout(); + adjustPowerState(true); } //****************************************************************************** @@ -2167,43 +2173,43 @@ void IOPMrootDomain::handleSleepTimerExpiration( void ) // this function //****************************************************************************** -uint32_t IOPMrootDomain::getTimeToIdleSleep( void ) +uint32_t +IOPMrootDomain::getTimeToIdleSleep( void ) { + AbsoluteTime now, lastActivityTime; + uint64_t nanos; + uint32_t minutesSinceUserInactive = 0; + uint32_t sleepDelay = 0; - AbsoluteTime now, lastActivityTime; - uint64_t nanos; - uint32_t minutesSinceUserInactive = 0; - uint32_t sleepDelay = 0; - - if (!idleSleepEnabled) - return 0xffffffff; + if (!idleSleepEnabled) { + return 0xffffffff; + } - if (userActivityTime) - lastActivityTime = userActivityTime; - else - lastActivityTime = userBecameInactiveTime; + if (userActivityTime) { + lastActivityTime = userActivityTime; + } else { + lastActivityTime = userBecameInactiveTime; + } - clock_get_uptime(&now); - if (CMP_ABSOLUTETIME(&now, &lastActivityTime) > 0) - { - SUB_ABSOLUTETIME(&now, &lastActivityTime); - absolutetime_to_nanoseconds(now, &nanos); - minutesSinceUserInactive = nanos / (60000000000ULL); + clock_get_uptime(&now); + if (CMP_ABSOLUTETIME(&now, &lastActivityTime) > 0) { + SUB_ABSOLUTETIME(&now, &lastActivityTime); + absolutetime_to_nanoseconds(now, &nanos); + minutesSinceUserInactive = nanos / (60000000000ULL); - if (minutesSinceUserInactive >= sleepSlider) - sleepDelay = 0; - else - sleepDelay = sleepSlider - minutesSinceUserInactive; - } - else - { - sleepDelay = sleepSlider; - } + if (minutesSinceUserInactive >= sleepSlider) { + sleepDelay = 0; + } else { + sleepDelay = sleepSlider - minutesSinceUserInactive; + } + } else { + sleepDelay = sleepSlider; + } - DLOG("user inactive %u min, time to idle sleep %u min\n", - minutesSinceUserInactive, sleepDelay); + DLOG("user inactive %u min, time to idle sleep %u min\n", + minutesSinceUserInactive, sleepDelay); - return (sleepDelay * 60); + return sleepDelay * 60; } //****************************************************************************** @@ -2211,11 +2217,12 @@ uint32_t IOPMrootDomain::getTimeToIdleSleep( void ) // //****************************************************************************** -void IOPMrootDomain::setQuickSpinDownTimeout( void ) +void +IOPMrootDomain::setQuickSpinDownTimeout( void ) { - ASSERT_GATED(); - setAggressiveness( - kPMMinutesToSpinDown, 0, kAggressivesOptionQuickSpindownEnable ); + ASSERT_GATED(); + setAggressiveness( + kPMMinutesToSpinDown, 0, kAggressivesOptionQuickSpindownEnable ); } //****************************************************************************** @@ -2223,11 +2230,12 @@ void IOPMrootDomain::setQuickSpinDownTimeout( void ) // //****************************************************************************** -void IOPMrootDomain::restoreUserSpinDownTimeout( void ) +void +IOPMrootDomain::restoreUserSpinDownTimeout( void ) { - ASSERT_GATED(); - setAggressiveness( - kPMMinutesToSpinDown, 0, kAggressivesOptionQuickSpindownDisable ); + ASSERT_GATED(); + setAggressiveness( + kPMMinutesToSpinDown, 0, kAggressivesOptionQuickSpindownDisable ); } //****************************************************************************** @@ -2236,56 +2244,57 @@ void IOPMrootDomain::restoreUserSpinDownTimeout( void ) //****************************************************************************** /* public */ -IOReturn IOPMrootDomain::sleepSystem( void ) +IOReturn +IOPMrootDomain::sleepSystem( void ) { - return sleepSystemOptions(NULL); + return sleepSystemOptions(NULL); } /* private */ -IOReturn IOPMrootDomain::sleepSystemOptions( OSDictionary *options ) -{ - OSObject *obj = NULL; - OSString *reason = NULL; - /* sleepSystem is a public function, and may be called by any kernel driver. - * And that's bad - drivers should sleep the system by calling - * receivePowerNotification() instead. Drivers should not use sleepSystem. - * - * Note that user space app calls to IOPMSleepSystem() will also travel - * this code path and thus be correctly identified as software sleeps. - */ - - if (options && options->getObject("OSSwitch")) - { - // Log specific sleep cause for OS Switch hibernation - return privateSleepSystem( kIOPMSleepReasonOSSwitchHibernate); - } - - if (options && (obj = options->getObject("Sleep Reason"))) - { - reason = OSDynamicCast(OSString, obj); - if (reason && reason->isEqualTo(kIOPMDarkWakeThermalEmergencyKey)) - return privateSleepSystem(kIOPMSleepReasonDarkWakeThermalEmergency); - } - - return privateSleepSystem( kIOPMSleepReasonSoftware); +IOReturn +IOPMrootDomain::sleepSystemOptions( OSDictionary *options ) +{ + OSObject *obj = NULL; + OSString *reason = NULL; + /* sleepSystem is a public function, and may be called by any kernel driver. + * And that's bad - drivers should sleep the system by calling + * receivePowerNotification() instead. Drivers should not use sleepSystem. + * + * Note that user space app calls to IOPMSleepSystem() will also travel + * this code path and thus be correctly identified as software sleeps. + */ + + if (options && options->getObject("OSSwitch")) { + // Log specific sleep cause for OS Switch hibernation + return privateSleepSystem( kIOPMSleepReasonOSSwitchHibernate); + } + + if (options && (obj = options->getObject("Sleep Reason"))) { + reason = OSDynamicCast(OSString, obj); + if (reason && reason->isEqualTo(kIOPMDarkWakeThermalEmergencyKey)) { + return privateSleepSystem(kIOPMSleepReasonDarkWakeThermalEmergency); + } + } + + return privateSleepSystem( kIOPMSleepReasonSoftware); } /* private */ -IOReturn IOPMrootDomain::privateSleepSystem( uint32_t sleepReason ) +IOReturn +IOPMrootDomain::privateSleepSystem( uint32_t sleepReason ) { - /* Called from both gated and non-gated context */ + /* Called from both gated and non-gated context */ - if (!checkSystemSleepEnabled() || !pmPowerStateQueue) - { - return kIOReturnNotPermitted; - } + if (!checkSystemSleepEnabled() || !pmPowerStateQueue) { + return kIOReturnNotPermitted; + } - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusDemandSystemSleep, - sleepReason); + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDemandSystemSleep, + sleepReason); - return kIOReturnSuccess; + return kIOReturnSuccess; } //****************************************************************************** @@ -2294,303 +2303,266 @@ IOReturn IOPMrootDomain::privateSleepSystem( uint32_t sleepReason ) // This overrides powerChangeDone in IOService. //****************************************************************************** -void IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) +void +IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) { #if !__i386__ && !__x86_64__ - uint64_t timeSinceReset = 0; + uint64_t timeSinceReset = 0; #endif - uint64_t now; - ASSERT_GATED(); - DLOG("PowerChangeDone: %u->%u\n", - (uint32_t) previousPowerState, (uint32_t) getPowerState()); - - notifierThread = current_thread(); - switch ( getPowerState() ) - { - case SLEEP_STATE: { - if (previousPowerState != ON_STATE) - break; - - acceptSystemWakeEvents(true); - - // re-enable this timer for next sleep - cancelIdleSleepTimer(); - - clock_sec_t secs; - clock_usec_t microsecs; - clock_get_calendar_absolute_and_microtime(&secs, µsecs, &now); - logtime(secs); - gIOLastSleepTime.tv_sec = secs; - gIOLastSleepTime.tv_usec = microsecs; - gIOLastWakeTime.tv_sec = 0; - gIOLastWakeTime.tv_usec = 0; - gIOLastSleepAbsTime = now; - - if (wake2DarkwakeDelay && sleepDelaysReport) { - clock_usec_t microsecs; - clock_sec_t wake2DarkwakeSecs, darkwake2SleepSecs; - // Update 'wake2DarkwakeDelay' histogram if this is a fullwake->sleep transition - - SUB_ABSOLUTETIME(&now, &ts_sleepStart); - absolutetime_to_microtime(now, &darkwake2SleepSecs, µsecs); - absolutetime_to_microtime(wake2DarkwakeDelay, &wake2DarkwakeSecs, µsecs); - HISTREPORT_TALLYVALUE(sleepDelaysReport, - (int64_t)(wake2DarkwakeSecs+darkwake2SleepSecs)); - - DLOG("Updated sleepDelaysReport %lu %lu\n", (unsigned long)wake2DarkwakeSecs, (unsigned long)darkwake2SleepSecs); - wake2DarkwakeDelay = 0; - } + uint64_t now; + ASSERT_GATED(); + DLOG("PowerChangeDone: %u->%u\n", + (uint32_t) previousPowerState, (uint32_t) getPowerState()); + + notifierThread = current_thread(); + switch (getPowerState()) { + case SLEEP_STATE: { + if (previousPowerState != ON_STATE) { + break; + } + + acceptSystemWakeEvents(true); + + // re-enable this timer for next sleep + cancelIdleSleepTimer(); + + clock_sec_t secs; + clock_usec_t microsecs; + clock_get_calendar_absolute_and_microtime(&secs, µsecs, &now); + logtime(secs); + gIOLastSleepTime.tv_sec = secs; + gIOLastSleepTime.tv_usec = microsecs; + gIOLastWakeTime.tv_sec = 0; + gIOLastWakeTime.tv_usec = 0; + gIOLastSleepAbsTime = now; + + if (wake2DarkwakeDelay && sleepDelaysReport) { + clock_usec_t microsecs; + clock_sec_t wake2DarkwakeSecs, darkwake2SleepSecs; + // Update 'wake2DarkwakeDelay' histogram if this is a fullwake->sleep transition + + SUB_ABSOLUTETIME(&now, &ts_sleepStart); + absolutetime_to_microtime(now, &darkwake2SleepSecs, µsecs); + absolutetime_to_microtime(wake2DarkwakeDelay, &wake2DarkwakeSecs, µsecs); + HISTREPORT_TALLYVALUE(sleepDelaysReport, + (int64_t)(wake2DarkwakeSecs + darkwake2SleepSecs)); + + DLOG("Updated sleepDelaysReport %lu %lu\n", (unsigned long)wake2DarkwakeSecs, (unsigned long)darkwake2SleepSecs); + wake2DarkwakeDelay = 0; + } #if HIBERNATION - LOG("System %sSleep\n", gIOHibernateState ? "Safe" : ""); + LOG("System %sSleep\n", gIOHibernateState ? "Safe" : ""); - IOHibernateSystemHasSlept(); + IOHibernateSystemHasSlept(); - evaluateSystemSleepPolicyFinal(); + evaluateSystemSleepPolicyFinal(); #else - LOG("System Sleep\n"); + LOG("System Sleep\n"); #endif - if (thermalWarningState) { - const OSSymbol *event = OSSymbol::withCString(kIOPMThermalLevelWarningKey); - if (event) { - systemPowerEventOccurred(event, kIOPMThermalLevelUnknown); - event->release(); - } - } - assertOnWakeSecs = 0; - lowBatteryCondition = false; + if (thermalWarningState) { + const OSSymbol *event = OSSymbol::withCString(kIOPMThermalLevelWarningKey); + if (event) { + systemPowerEventOccurred(event, kIOPMThermalLevelUnknown); + event->release(); + } + } + assertOnWakeSecs = 0; + lowBatteryCondition = false; #if DEVELOPMENT || DEBUG - extern int g_should_log_clock_adjustments; - if (g_should_log_clock_adjustments) { - clock_sec_t secs = 0; - clock_usec_t microsecs = 0; - uint64_t now_b = mach_absolute_time(); - - PEGetUTCTimeOfDay(&secs, µsecs); - - uint64_t now_a = mach_absolute_time(); - os_log(OS_LOG_DEFAULT, "%s PMU before going to sleep %lu s %d u %llu abs_b_PEG %llu abs_a_PEG \n", - __func__, (unsigned long)secs, microsecs, now_b, now_a); - } + extern int g_should_log_clock_adjustments; + if (g_should_log_clock_adjustments) { + clock_sec_t secs = 0; + clock_usec_t microsecs = 0; + uint64_t now_b = mach_absolute_time(); + + PEGetUTCTimeOfDay(&secs, µsecs); + + uint64_t now_a = mach_absolute_time(); + os_log(OS_LOG_DEFAULT, "%s PMU before going to sleep %lu s %d u %llu abs_b_PEG %llu abs_a_PEG \n", + __func__, (unsigned long)secs, microsecs, now_b, now_a); + } #endif - getPlatform()->sleepKernel(); + getPlatform()->sleepKernel(); - // The CPU(s) are off at this point, - // Code will resume execution here upon wake. + // The CPU(s) are off at this point, + // Code will resume execution here upon wake. - clock_get_uptime(&gIOLastWakeAbsTime); - IOLog("gIOLastWakeAbsTime: %lld\n", gIOLastWakeAbsTime); - _highestCapability = 0; + clock_get_uptime(&gIOLastWakeAbsTime); + IOLog("gIOLastWakeAbsTime: %lld\n", gIOLastWakeAbsTime); + _highestCapability = 0; #if HIBERNATION - IOHibernateSystemWake(); + IOHibernateSystemWake(); #endif - // sleep transition complete - gSleepOrShutdownPending = 0; + // sleep transition complete + gSleepOrShutdownPending = 0; - // trip the reset of the calendar clock - { - clock_sec_t wakeSecs; - clock_usec_t wakeMicrosecs; + // trip the reset of the calendar clock + { + clock_sec_t wakeSecs; + clock_usec_t wakeMicrosecs; - clock_wakeup_calendar(); + clock_wakeup_calendar(); - clock_get_calendar_microtime(&wakeSecs, &wakeMicrosecs); - gIOLastWakeTime.tv_sec = wakeSecs; - gIOLastWakeTime.tv_usec = wakeMicrosecs; - } + clock_get_calendar_microtime(&wakeSecs, &wakeMicrosecs); + gIOLastWakeTime.tv_sec = wakeSecs; + gIOLastWakeTime.tv_usec = wakeMicrosecs; + } #if HIBERNATION - LOG("System %sWake\n", gIOHibernateState ? "SafeSleep " : ""); + LOG("System %sWake\n", gIOHibernateState ? "SafeSleep " : ""); #endif - lastSleepReason = 0; + lastSleepReason = 0; - _lastDebugWakeSeconds = _debugWakeSeconds; - _debugWakeSeconds = 0; - _scheduledAlarms = 0; + _lastDebugWakeSeconds = _debugWakeSeconds; + _debugWakeSeconds = 0; + _scheduledAlarms = 0; #if defined(__i386__) || defined(__x86_64__) - kdebugTrace(kPMLogSystemWake, 0, 0, 0); - wranglerTickled = false; - graphicsSuppressed = false; - darkWakePostTickle = false; - darkWakeHibernateError = false; - darkWakeToSleepASAP = true; - logGraphicsClamp = true; - sleepTimerMaintenance = false; - sleepToStandby = false; - wranglerTickleLatched = false; - userWasActive = false; - fullWakeReason = kFullWakeReasonNone; - - OSString * wakeType = OSDynamicCast( - OSString, getProperty(kIOPMRootDomainWakeTypeKey)); - OSString * wakeReason = OSDynamicCast( - OSString, getProperty(kIOPMRootDomainWakeReasonKey)); - - if (wakeReason && (wakeReason->getLength() >= 2) && - gWakeReasonString[0] == '\0') - { - // Until the platform driver can claim its wake reasons - strlcat(gWakeReasonString, wakeReason->getCStringNoCopy(), - sizeof(gWakeReasonString)); - } - - if (wakeType && wakeType->isEqualTo(kIOPMrootDomainWakeTypeLowBattery)) - { - lowBatteryCondition = true; - darkWakeMaintenance = true; - } - else if ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) != 0) - { + kdebugTrace(kPMLogSystemWake, 0, 0, 0); + wranglerTickled = false; + graphicsSuppressed = false; + darkWakePostTickle = false; + darkWakeHibernateError = false; + darkWakeToSleepASAP = true; + logGraphicsClamp = true; + sleepTimerMaintenance = false; + sleepToStandby = false; + wranglerTickleLatched = false; + userWasActive = false; + fullWakeReason = kFullWakeReasonNone; + + OSString * wakeType = OSDynamicCast( + OSString, getProperty(kIOPMRootDomainWakeTypeKey)); + OSString * wakeReason = OSDynamicCast( + OSString, getProperty(kIOPMRootDomainWakeReasonKey)); + + if (wakeReason && (wakeReason->getLength() >= 2) && + gWakeReasonString[0] == '\0') { + // Until the platform driver can claim its wake reasons + strlcat(gWakeReasonString, wakeReason->getCStringNoCopy(), + sizeof(gWakeReasonString)); + } + + if (wakeType && wakeType->isEqualTo(kIOPMrootDomainWakeTypeLowBattery)) { + lowBatteryCondition = true; + darkWakeMaintenance = true; + } else if ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) != 0) { #if HIBERNATION - OSNumber * hibOptions = OSDynamicCast( - OSNumber, getProperty(kIOHibernateOptionsKey)); - if (hibernateAborted || ((hibOptions && - !(hibOptions->unsigned32BitValue() & kIOHibernateOptionDarkWake)))) - { - // Hibernate aborted, or EFI brought up graphics - wranglerTickled = true; - DLOG("hibernation aborted %d, options 0x%x\n", - hibernateAborted, - hibOptions ? hibOptions->unsigned32BitValue() : 0); - } - else + OSNumber * hibOptions = OSDynamicCast( + OSNumber, getProperty(kIOHibernateOptionsKey)); + if (hibernateAborted || ((hibOptions && + !(hibOptions->unsigned32BitValue() & kIOHibernateOptionDarkWake)))) { + // Hibernate aborted, or EFI brought up graphics + wranglerTickled = true; + DLOG("hibernation aborted %d, options 0x%x\n", + hibernateAborted, + hibOptions ? hibOptions->unsigned32BitValue() : 0); + } else #endif - if (wakeType && ( - wakeType->isEqualTo(kIOPMRootDomainWakeTypeUser) || - wakeType->isEqualTo(kIOPMRootDomainWakeTypeAlarm))) - { - // User wake or RTC alarm - wranglerTickled = true; - } - else - if (wakeType && - wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepTimer)) - { - // SMC standby timer trumps SleepX - darkWakeMaintenance = true; - sleepTimerMaintenance = true; - } - else - if ((_lastDebugWakeSeconds != 0) && - ((gDarkWakeFlags & kDarkWakeFlagAlarmIsDark) == 0)) - { - // SleepX before maintenance - wranglerTickled = true; - } - else - if (wakeType && - wakeType->isEqualTo(kIOPMRootDomainWakeTypeMaintenance)) - { - darkWakeMaintenance = true; - } - else - if (wakeType && - wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepService)) - { - darkWakeMaintenance = true; - darkWakeSleepService = true; + if (wakeType && ( + wakeType->isEqualTo(kIOPMRootDomainWakeTypeUser) || + wakeType->isEqualTo(kIOPMRootDomainWakeTypeAlarm))) { + // User wake or RTC alarm + wranglerTickled = true; + } else if (wakeType && + wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepTimer)) { + // SMC standby timer trumps SleepX + darkWakeMaintenance = true; + sleepTimerMaintenance = true; + } else if ((_lastDebugWakeSeconds != 0) && + ((gDarkWakeFlags & kDarkWakeFlagAlarmIsDark) == 0)) { + // SleepX before maintenance + wranglerTickled = true; + } else if (wakeType && + wakeType->isEqualTo(kIOPMRootDomainWakeTypeMaintenance)) { + darkWakeMaintenance = true; + } else if (wakeType && + wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepService)) { + darkWakeMaintenance = true; + darkWakeSleepService = true; #if HIBERNATION - if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { - sleepToStandby = true; - } + if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) { + sleepToStandby = true; + } #endif - } - else - if (wakeType && - wakeType->isEqualTo(kIOPMRootDomainWakeTypeHibernateError)) - { - darkWakeMaintenance = true; - darkWakeHibernateError = true; - } - else - { - // Unidentified wake source, resume to full wake if debug - // alarm is pending. - - if (_lastDebugWakeSeconds && - (!wakeReason || wakeReason->isEqualTo(""))) - wranglerTickled = true; - } - } - else - { - if (wakeType && - wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepTimer)) - { - darkWakeMaintenance = true; - sleepTimerMaintenance = true; - } - else if (hibernateAborted || !wakeType || - !wakeType->isEqualTo(kIOPMRootDomainWakeTypeMaintenance) || - !wakeReason || !wakeReason->isEqualTo("RTC")) - { - // Post a HID tickle immediately - except for RTC maintenance wake. - wranglerTickled = true; - } - else - { - darkWakeMaintenance = true; - } - } - - if (wranglerTickled) - { - darkWakeToSleepASAP = false; - fullWakeReason = kFullWakeReasonLocalUser; - reportUserInput(); - } - else if (displayPowerOnRequested && checkSystemCanSustainFullWake()) - { - handleDisplayPowerOn(); - } - else if (!darkWakeMaintenance) - { - // Early/late tickle for non-maintenance wake. - if (((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleEarly) || - ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleLate)) - { - darkWakePostTickle = true; - } - } + } else if (wakeType && + wakeType->isEqualTo(kIOPMRootDomainWakeTypeHibernateError)) { + darkWakeMaintenance = true; + darkWakeHibernateError = true; + } else { + // Unidentified wake source, resume to full wake if debug + // alarm is pending. + + if (_lastDebugWakeSeconds && + (!wakeReason || wakeReason->isEqualTo(""))) { + wranglerTickled = true; + } + } + } else { + if (wakeType && + wakeType->isEqualTo(kIOPMRootDomainWakeTypeSleepTimer)) { + darkWakeMaintenance = true; + sleepTimerMaintenance = true; + } else if (hibernateAborted || !wakeType || + !wakeType->isEqualTo(kIOPMRootDomainWakeTypeMaintenance) || + !wakeReason || !wakeReason->isEqualTo("RTC")) { + // Post a HID tickle immediately - except for RTC maintenance wake. + wranglerTickled = true; + } else { + darkWakeMaintenance = true; + } + } + + if (wranglerTickled) { + darkWakeToSleepASAP = false; + fullWakeReason = kFullWakeReasonLocalUser; + reportUserInput(); + } else if (displayPowerOnRequested && checkSystemCanSustainFullWake()) { + handleDisplayPowerOn(); + } else if (!darkWakeMaintenance) { + // Early/late tickle for non-maintenance wake. + if (((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == + kDarkWakeFlagHIDTickleEarly) || + ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == + kDarkWakeFlagHIDTickleLate)) { + darkWakePostTickle = true; + } + } #else /* !__i386__ && !__x86_64__ */ - timeSinceReset = ml_get_time_since_reset(); + timeSinceReset = ml_get_time_since_reset(); - kdebugTrace(kPMLogSystemWake, 0, timeSinceReset >> 32, timeSinceReset); - // stay awake for at least 30 seconds - wranglerTickled = true; - fullWakeReason = kFullWakeReasonLocalUser; - startIdleSleepTimer(30); + kdebugTrace(kPMLogSystemWake, 0, timeSinceReset >> 32, timeSinceReset); + // stay awake for at least 30 seconds + wranglerTickled = true; + fullWakeReason = kFullWakeReasonLocalUser; + startIdleSleepTimer(30); #endif - sleepCnt++; + sleepCnt++; - thread_call_enter(updateConsoleUsersEntry); + thread_call_enter(updateConsoleUsersEntry); - changePowerStateToPriv(ON_STATE); - } break; + changePowerStateToPriv(ON_STATE); + } break; #if !__i386__ && !__x86_64__ - case ON_STATE: { - if (previousPowerState != ON_STATE) - { - DLOG("Force re-evaluating aggressiveness\n"); - /* Force re-evaluate the aggressiveness values to set appropriate idle sleep timer */ - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusNoIdleSleepPreventers ); - } - break; - } + case ON_STATE: { + if (previousPowerState != ON_STATE) { + DLOG("Force re-evaluating aggressiveness\n"); + /* Force re-evaluate the aggressiveness values to set appropriate idle sleep timer */ + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusNoIdleSleepPreventers ); + } + break; + } #endif - - } - notifierThread = NULL; + } + notifierThread = NULL; } //****************************************************************************** @@ -2599,15 +2571,16 @@ void IOPMrootDomain::powerChangeDone( unsigned long previousPowerState ) // Extend implementation in IOService. Running on PM work loop thread. //****************************************************************************** -IOReturn IOPMrootDomain::requestPowerDomainState ( - IOPMPowerFlags childDesire, - IOPowerConnection * childConnection, - unsigned long specification ) +IOReturn +IOPMrootDomain::requestPowerDomainState( + IOPMPowerFlags childDesire, + IOPowerConnection * childConnection, + unsigned long specification ) { - // Idle and system sleep prevention flags affects driver desire. - // Children desire are irrelevant so they are cleared. + // Idle and system sleep prevention flags affects driver desire. + // Children desire are irrelevant so they are cleared. - return super::requestPowerDomainState(0, childConnection, specification); + return super::requestPowerDomainState(0, childConnection, specification); } @@ -2619,74 +2592,68 @@ IOReturn IOPMrootDomain::requestPowerDomainState ( // sleep and updated the list of idle sleep preventers. Returns false otherwise //****************************************************************************** -bool IOPMrootDomain::updatePreventIdleSleepList( - IOService * service, bool addNotRemove ) +bool +IOPMrootDomain::updatePreventIdleSleepList( + IOService * service, bool addNotRemove ) { - unsigned int oldCount, newCount; + unsigned int oldCount, newCount; - ASSERT_GATED(); + ASSERT_GATED(); #if defined(__i386__) || defined(__x86_64__) - // Disregard disk I/O (besides the display wrangler) as a factor preventing - // idle sleep, except in the case of legacy disk I/O - if ((service != wrangler) && (service != this)) - { - return false; - } + // Disregard disk I/O (besides the display wrangler) as a factor preventing + // idle sleep, except in the case of legacy disk I/O + if ((service != wrangler) && (service != this)) { + return false; + } #endif - oldCount = preventIdleSleepList->getCount(); - if (addNotRemove) - { - preventIdleSleepList->setObject(service); - DLOG("prevent idle sleep list: %s+ (%u)\n", - service->getName(), preventIdleSleepList->getCount()); - } - else if (preventIdleSleepList->member(service)) - { - preventIdleSleepList->removeObject(service); - DLOG("prevent idle sleep list: %s- (%u)\n", - service->getName(), preventIdleSleepList->getCount()); - } - newCount = preventIdleSleepList->getCount(); - - if ((oldCount == 0) && (newCount != 0)) - { - // Driver added to empty prevent list. - // Update the driver desire to prevent idle sleep. - // Driver desire does not prevent demand sleep. - - changePowerStateTo(ON_STATE); - } - else if ((oldCount != 0) && (newCount == 0)) - { - // Last driver removed from prevent list. - // Drop the driver clamp to allow idle sleep. - - changePowerStateTo(SLEEP_STATE); - evaluatePolicy( kStimulusNoIdleSleepPreventers ); - } - messageClient(kIOPMMessageIdleSleepPreventers, systemCapabilityNotifier, - &newCount, sizeof(newCount)); + oldCount = preventIdleSleepList->getCount(); + if (addNotRemove) { + preventIdleSleepList->setObject(service); + DLOG("prevent idle sleep list: %s+ (%u)\n", + service->getName(), preventIdleSleepList->getCount()); + } else if (preventIdleSleepList->member(service)) { + preventIdleSleepList->removeObject(service); + DLOG("prevent idle sleep list: %s- (%u)\n", + service->getName(), preventIdleSleepList->getCount()); + } + newCount = preventIdleSleepList->getCount(); + + if ((oldCount == 0) && (newCount != 0)) { + // Driver added to empty prevent list. + // Update the driver desire to prevent idle sleep. + // Driver desire does not prevent demand sleep. + + changePowerStateTo(ON_STATE); + } else if ((oldCount != 0) && (newCount == 0)) { + // Last driver removed from prevent list. + // Drop the driver clamp to allow idle sleep. + + changePowerStateTo(SLEEP_STATE); + evaluatePolicy( kStimulusNoIdleSleepPreventers ); + } + messageClient(kIOPMMessageIdleSleepPreventers, systemCapabilityNotifier, + &newCount, sizeof(newCount)); #if defined(__i386__) || defined(__x86_64__) - if (addNotRemove && (service == wrangler) && !checkSystemCanSustainFullWake()) - { - DLOG("Cannot cancel idle sleep\n"); - return false; // do not idle-cancel - } + if (addNotRemove && (service == wrangler) && !checkSystemCanSustainFullWake()) { + DLOG("Cannot cancel idle sleep\n"); + return false; // do not idle-cancel + } #endif - return true; + return true; } //****************************************************************************** // startSpinDump //****************************************************************************** -void IOPMrootDomain::startSpinDump(uint32_t spindumpKind) +void +IOPMrootDomain::startSpinDump(uint32_t spindumpKind) { - messageClients(kIOPMMessageLaunchBootSpinDump, (void *)(uintptr_t)spindumpKind); + messageClients(kIOPMMessageLaunchBootSpinDump, (void *)(uintptr_t)spindumpKind); } //****************************************************************************** @@ -2695,164 +2662,158 @@ void IOPMrootDomain::startSpinDump(uint32_t spindumpKind) // Called by IOService on PM work loop. //****************************************************************************** -void IOPMrootDomain::updatePreventSystemSleepList( - IOService * service, bool addNotRemove ) -{ - unsigned int oldCount, newCount; - - ASSERT_GATED(); - if (this == service) - return; - - oldCount = preventSystemSleepList->getCount(); - if (addNotRemove) - { - preventSystemSleepList->setObject(service); - DLOG("prevent system sleep list: %s+ (%u)\n", - service->getName(), preventSystemSleepList->getCount()); - if (!assertOnWakeSecs && gIOLastWakeAbsTime) { - AbsoluteTime now; - clock_usec_t microsecs; - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); - absolutetime_to_microtime(now, &assertOnWakeSecs, µsecs); - if (assertOnWakeReport) { - HISTREPORT_TALLYVALUE(assertOnWakeReport, (int64_t)assertOnWakeSecs); - DLOG("Updated assertOnWake %lu\n", (unsigned long)assertOnWakeSecs); - } - } - } - else if (preventSystemSleepList->member(service)) - { - preventSystemSleepList->removeObject(service); - DLOG("prevent system sleep list: %s- (%u)\n", - service->getName(), preventSystemSleepList->getCount()); - - if ((oldCount != 0) && (preventSystemSleepList->getCount() == 0)) - { - // Lost all system sleep preventers. - // Send stimulus if system sleep was blocked, and is in dark wake. - evaluatePolicy( kStimulusDarkWakeEvaluate ); - } - } - newCount = preventSystemSleepList->getCount(); - messageClient(kIOPMMessageSystemSleepPreventers, systemCapabilityNotifier, - &newCount, sizeof(newCount)); -} - -void IOPMrootDomain::copySleepPreventersList(OSArray **idleSleepList, OSArray **systemSleepList) -{ - - OSCollectionIterator *iterator = NULL; - OSObject *object = NULL; - OSArray *array = NULL; - - if (!gIOPMWorkLoop->inGate()) - { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, - &IOPMrootDomain::IOPMrootDomain::copySleepPreventersList), - this, (void *)idleSleepList, (void *)systemSleepList); - return; - } - - if (idleSleepList && preventIdleSleepList && (preventIdleSleepList->getCount() != 0)) - { - iterator = OSCollectionIterator::withCollection(preventIdleSleepList); - array = OSArray::withCapacity(5); - - while ((object = iterator->getNextObject())) - { - IOService *service = OSDynamicCast(IOService, object); - if (object) - { - array->setObject(OSSymbol::withCString(service->getName())); - } - } - - iterator->release(); - *idleSleepList = array; - } - - if (systemSleepList && preventSystemSleepList && (preventSystemSleepList->getCount() != 0)) - { - iterator = OSCollectionIterator::withCollection(preventSystemSleepList); - array = OSArray::withCapacity(5); - - while ((object = iterator->getNextObject())) - { - IOService *service = OSDynamicCast(IOService, object); - if (object) - { - array->setObject(OSSymbol::withCString(service->getName())); - } - } - - iterator->release(); - *systemSleepList = array; - } -} +void +IOPMrootDomain::updatePreventSystemSleepList( + IOService * service, bool addNotRemove ) +{ + unsigned int oldCount, newCount; -//****************************************************************************** -// tellChangeDown -// -// Override the superclass implementation to send a different message type. -//****************************************************************************** + ASSERT_GATED(); + if (this == service) { + return; + } + + oldCount = preventSystemSleepList->getCount(); + if (addNotRemove) { + preventSystemSleepList->setObject(service); + DLOG("prevent system sleep list: %s+ (%u)\n", + service->getName(), preventSystemSleepList->getCount()); + if (!assertOnWakeSecs && gIOLastWakeAbsTime) { + AbsoluteTime now; + clock_usec_t microsecs; + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); + absolutetime_to_microtime(now, &assertOnWakeSecs, µsecs); + if (assertOnWakeReport) { + HISTREPORT_TALLYVALUE(assertOnWakeReport, (int64_t)assertOnWakeSecs); + DLOG("Updated assertOnWake %lu\n", (unsigned long)assertOnWakeSecs); + } + } + } else if (preventSystemSleepList->member(service)) { + preventSystemSleepList->removeObject(service); + DLOG("prevent system sleep list: %s- (%u)\n", + service->getName(), preventSystemSleepList->getCount()); + + if ((oldCount != 0) && (preventSystemSleepList->getCount() == 0)) { + // Lost all system sleep preventers. + // Send stimulus if system sleep was blocked, and is in dark wake. + evaluatePolicy( kStimulusDarkWakeEvaluate ); + } + } + newCount = preventSystemSleepList->getCount(); + messageClient(kIOPMMessageSystemSleepPreventers, systemCapabilityNotifier, + &newCount, sizeof(newCount)); +} -bool IOPMrootDomain::tellChangeDown( unsigned long stateNum ) +void +IOPMrootDomain::copySleepPreventersList(OSArray **idleSleepList, OSArray **systemSleepList) { - DLOG("tellChangeDown %u->%u\n", - (uint32_t) getPowerState(), (uint32_t) stateNum); + OSCollectionIterator *iterator = NULL; + OSObject *object = NULL; + OSArray *array = NULL; - if (SLEEP_STATE == stateNum) - { - // Legacy apps were already told in the full->dark transition - if (!ignoreTellChangeDown) - tracePoint( kIOPMTracePointSleepApplications ); - else - tracePoint( kIOPMTracePointSleepPriorityClients ); - } + if (!gIOPMWorkLoop->inGate()) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, + &IOPMrootDomain::IOPMrootDomain::copySleepPreventersList), + this, (void *)idleSleepList, (void *)systemSleepList); + return; + } - if (!ignoreTellChangeDown) { - userActivityAtSleep = userActivityCount; - DLOG("tellChangeDown::userActivityAtSleep %d\n", userActivityAtSleep); + if (idleSleepList && preventIdleSleepList && (preventIdleSleepList->getCount() != 0)) { + iterator = OSCollectionIterator::withCollection(preventIdleSleepList); + array = OSArray::withCapacity(5); - if (SLEEP_STATE == stateNum) { - hibernateAborted = false; + while ((object = iterator->getNextObject())) { + IOService *service = OSDynamicCast(IOService, object); + if (object) { + array->setObject(OSSymbol::withCString(service->getName())); + } + } - // Direct callout into OSKext so it can disable kext unloads - // during sleep/wake to prevent deadlocks. - OSKextSystemSleepOrWake( kIOMessageSystemWillSleep ); + iterator->release(); + *idleSleepList = array; + } - IOService::updateConsoleUsers(NULL, kIOMessageSystemWillSleep); + if (systemSleepList && preventSystemSleepList && (preventSystemSleepList->getCount() != 0)) { + iterator = OSCollectionIterator::withCollection(preventSystemSleepList); + array = OSArray::withCapacity(5); - // Two change downs are sent by IOServicePM. Ignore the 2nd. - // But tellClientsWithResponse() must be called for both. - ignoreTellChangeDown = true; - } - } + while ((object = iterator->getNextObject())) { + IOService *service = OSDynamicCast(IOService, object); + if (object) { + array->setObject(OSSymbol::withCString(service->getName())); + } + } - return super::tellClientsWithResponse( kIOMessageSystemWillSleep ); + iterator->release(); + *systemSleepList = array; + } } //****************************************************************************** -// askChangeDown +// tellChangeDown // // Override the superclass implementation to send a different message type. -// This must be idle sleep since we don't ask during any other power change. //****************************************************************************** -bool IOPMrootDomain::askChangeDown( unsigned long stateNum ) +bool +IOPMrootDomain::tellChangeDown( unsigned long stateNum ) { - DLOG("askChangeDown %u->%u\n", - (uint32_t) getPowerState(), (uint32_t) stateNum); + DLOG("tellChangeDown %u->%u\n", + (uint32_t) getPowerState(), (uint32_t) stateNum); - // Don't log for dark wake entry - if (kSystemTransitionSleep == _systemTransitionType) - tracePoint( kIOPMTracePointSleepApplications ); - - return super::tellClientsWithResponse( kIOMessageCanSystemSleep ); -} + if (SLEEP_STATE == stateNum) { + // Legacy apps were already told in the full->dark transition + if (!ignoreTellChangeDown) { + tracePoint( kIOPMTracePointSleepApplications ); + } else { + tracePoint( kIOPMTracePointSleepPriorityClients ); + } + } + + if (!ignoreTellChangeDown) { + userActivityAtSleep = userActivityCount; + DLOG("tellChangeDown::userActivityAtSleep %d\n", userActivityAtSleep); + + if (SLEEP_STATE == stateNum) { + hibernateAborted = false; + + // Direct callout into OSKext so it can disable kext unloads + // during sleep/wake to prevent deadlocks. + OSKextSystemSleepOrWake( kIOMessageSystemWillSleep ); + + IOService::updateConsoleUsers(NULL, kIOMessageSystemWillSleep); + + // Two change downs are sent by IOServicePM. Ignore the 2nd. + // But tellClientsWithResponse() must be called for both. + ignoreTellChangeDown = true; + } + } + + return super::tellClientsWithResponse( kIOMessageSystemWillSleep ); +} + +//****************************************************************************** +// askChangeDown +// +// Override the superclass implementation to send a different message type. +// This must be idle sleep since we don't ask during any other power change. +//****************************************************************************** + +bool +IOPMrootDomain::askChangeDown( unsigned long stateNum ) +{ + DLOG("askChangeDown %u->%u\n", + (uint32_t) getPowerState(), (uint32_t) stateNum); + + // Don't log for dark wake entry + if (kSystemTransitionSleep == _systemTransitionType) { + tracePoint( kIOPMTracePointSleepApplications ); + } + + return super::tellClientsWithResponse( kIOMessageCanSystemSleep ); +} //****************************************************************************** // askChangeDownDone @@ -2878,29 +2839,28 @@ bool IOPMrootDomain::askChangeDown( unsigned long stateNum ) // 2. askChangeDownDone() //****************************************************************************** -void IOPMrootDomain::askChangeDownDone( - IOPMPowerChangeFlags * inOutChangeFlags, bool * cancel ) +void +IOPMrootDomain::askChangeDownDone( + IOPMPowerChangeFlags * inOutChangeFlags, bool * cancel ) { - DLOG("askChangeDownDone(0x%x, %u) type %x, cap %x->%x\n", - *inOutChangeFlags, *cancel, - _systemTransitionType, - _currentCapability, _pendingCapability); + DLOG("askChangeDownDone(0x%x, %u) type %x, cap %x->%x\n", + *inOutChangeFlags, *cancel, + _systemTransitionType, + _currentCapability, _pendingCapability); - if ((false == *cancel) && (kSystemTransitionSleep == _systemTransitionType)) - { - // Dark->Sleep transition. - // Check if there are any deny sleep assertions. - // lastSleepReason already set by handleOurPowerChangeStart() + if ((false == *cancel) && (kSystemTransitionSleep == _systemTransitionType)) { + // Dark->Sleep transition. + // Check if there are any deny sleep assertions. + // lastSleepReason already set by handleOurPowerChangeStart() - if (!checkSystemCanSleep(lastSleepReason)) - { - // Cancel dark wake to sleep transition. - // Must re-scan assertions upon entering dark wake. + if (!checkSystemCanSleep(lastSleepReason)) { + // Cancel dark wake to sleep transition. + // Must re-scan assertions upon entering dark wake. - *cancel = true; - DLOG("cancel dark->sleep\n"); - } - } + *cancel = true; + DLOG("cancel dark->sleep\n"); + } + } } //****************************************************************************** @@ -2909,55 +2869,50 @@ void IOPMrootDomain::askChangeDownDone( // Work common to both canceled or aborted sleep. //****************************************************************************** -void IOPMrootDomain::systemDidNotSleep( void ) +void +IOPMrootDomain::systemDidNotSleep( void ) { - // reset console lock state - thread_call_enter(updateConsoleUsersEntry); + // reset console lock state + thread_call_enter(updateConsoleUsersEntry); - if (!wrangler) - { - if (idleSleepEnabled) - { - // stay awake for at least idleSeconds - startIdleSleepTimer(idleSeconds); - } - } - else - { - if (idleSleepEnabled && !userIsActive) - { - // Manually start the idle sleep timer besides waiting for - // the user to become inactive. - startIdleSleepTimer( kIdleSleepRetryInterval ); - } - } + if (!wrangler) { + if (idleSleepEnabled) { + // stay awake for at least idleSeconds + startIdleSleepTimer(idleSeconds); + } + } else { + if (idleSleepEnabled && !userIsActive) { + // Manually start the idle sleep timer besides waiting for + // the user to become inactive. + startIdleSleepTimer( kIdleSleepRetryInterval ); + } + } - preventTransitionToUserActive(false); - IOService::setAdvisoryTickleEnable( true ); + preventTransitionToUserActive(false); + IOService::setAdvisoryTickleEnable( true ); - // After idle revert and cancel, send a did-change message to powerd - // to balance the previous will-change message. Kernel clients do not - // need this since sleep cannot be canceled once they are notified. + // After idle revert and cancel, send a did-change message to powerd + // to balance the previous will-change message. Kernel clients do not + // need this since sleep cannot be canceled once they are notified. - if (toldPowerdCapWillChange && systemCapabilityNotifier && - (_pendingCapability != _currentCapability) && - ((_systemMessageClientMask & kSystemMessageClientPowerd) != 0)) - { - // Differs from a real capability gain change where notifyRef != 0, - // but it is zero here since no response is expected. + if (toldPowerdCapWillChange && systemCapabilityNotifier && + (_pendingCapability != _currentCapability) && + ((_systemMessageClientMask & kSystemMessageClientPowerd) != 0)) { + // Differs from a real capability gain change where notifyRef != 0, + // but it is zero here since no response is expected. - IOPMSystemCapabilityChangeParameters params; + IOPMSystemCapabilityChangeParameters params; - bzero(¶ms, sizeof(params)); - params.fromCapabilities = _pendingCapability; - params.toCapabilities = _currentCapability; - params.changeFlags = kIOPMSystemCapabilityDidChange; + bzero(¶ms, sizeof(params)); + params.fromCapabilities = _pendingCapability; + params.toCapabilities = _currentCapability; + params.changeFlags = kIOPMSystemCapabilityDidChange; - DLOG("MESG cap %x->%x did change\n", - params.fromCapabilities, params.toCapabilities); - messageClient(kIOMessageSystemCapabilityChange, systemCapabilityNotifier, - ¶ms, sizeof(params)); - } + DLOG("MESG cap %x->%x did change\n", + params.fromCapabilities, params.toCapabilities); + messageClient(kIOMessageSystemCapabilityChange, systemCapabilityNotifier, + ¶ms, sizeof(params)); + } } //****************************************************************************** @@ -2972,16 +2927,17 @@ void IOPMrootDomain::systemDidNotSleep( void ) // This must be a vetoed idle sleep, since no other power change can be vetoed. //****************************************************************************** -void IOPMrootDomain::tellNoChangeDown( unsigned long stateNum ) +void +IOPMrootDomain::tellNoChangeDown( unsigned long stateNum ) { - DLOG("tellNoChangeDown %u->%u\n", - (uint32_t) getPowerState(), (uint32_t) stateNum); + DLOG("tellNoChangeDown %u->%u\n", + (uint32_t) getPowerState(), (uint32_t) stateNum); - // Sleep canceled, clear the sleep trace point. - tracePoint(kIOPMTracePointSystemUp); + // Sleep canceled, clear the sleep trace point. + tracePoint(kIOPMTracePointSystemUp); - systemDidNotSleep(); - return tellClients( kIOMessageSystemWillNotSleep ); + systemDidNotSleep(); + return tellClients( kIOMessageSystemWillNotSleep ); } //****************************************************************************** @@ -2993,35 +2949,34 @@ void IOPMrootDomain::tellNoChangeDown( unsigned long stateNum ) // type to the client or application being notified. //****************************************************************************** -void IOPMrootDomain::tellChangeUp( unsigned long stateNum ) +void +IOPMrootDomain::tellChangeUp( unsigned long stateNum ) { - DLOG("tellChangeUp %u->%u\n", - (uint32_t) getPowerState(), (uint32_t) stateNum); + DLOG("tellChangeUp %u->%u\n", + (uint32_t) getPowerState(), (uint32_t) stateNum); - ignoreTellChangeDown = false; + ignoreTellChangeDown = false; - if ( stateNum == ON_STATE ) - { - // Direct callout into OSKext so it can disable kext unloads - // during sleep/wake to prevent deadlocks. - OSKextSystemSleepOrWake( kIOMessageSystemHasPoweredOn ); + if (stateNum == ON_STATE) { + // Direct callout into OSKext so it can disable kext unloads + // during sleep/wake to prevent deadlocks. + OSKextSystemSleepOrWake( kIOMessageSystemHasPoweredOn ); - // Notify platform that sleep was cancelled or resumed. - getPlatform()->callPlatformFunction( - sleepMessagePEFunction, false, - (void *)(uintptr_t) kIOMessageSystemHasPoweredOn, - NULL, NULL, NULL); + // Notify platform that sleep was cancelled or resumed. + getPlatform()->callPlatformFunction( + sleepMessagePEFunction, false, + (void *)(uintptr_t) kIOMessageSystemHasPoweredOn, + NULL, NULL, NULL); - if (getPowerState() == ON_STATE) - { - // this is a quick wake from aborted sleep - systemDidNotSleep(); - tellClients( kIOMessageSystemWillPowerOn ); - } + if (getPowerState() == ON_STATE) { + // this is a quick wake from aborted sleep + systemDidNotSleep(); + tellClients( kIOMessageSystemWillPowerOn ); + } - tracePoint( kIOPMTracePointWakeApplications ); - tellClients( kIOMessageSystemHasPoweredOn ); - } + tracePoint( kIOPMTracePointWakeApplications ); + tellClients( kIOMessageSystemHasPoweredOn ); + } } #define CAP_WILL_CHANGE_TO_OFF(params, flag) \ @@ -3050,90 +3005,87 @@ void IOPMrootDomain::tellChangeUp( unsigned long stateNum ) // Perform a vfs sync before system sleep. //****************************************************************************** -IOReturn IOPMrootDomain::sysPowerDownHandler( - void * target, void * refCon, - UInt32 messageType, IOService * service, - void * messageArgs, vm_size_t argSize ) +IOReturn +IOPMrootDomain::sysPowerDownHandler( + void * target, void * refCon, + UInt32 messageType, IOService * service, + void * messageArgs, vm_size_t argSize ) { - IOReturn ret = 0; + IOReturn ret = 0; - DLOG("sysPowerDownHandler message %s\n", getIOMessageString(messageType)); + DLOG("sysPowerDownHandler message %s\n", getIOMessageString(messageType)); - if (!gRootDomain) - return kIOReturnUnsupported; + if (!gRootDomain) { + return kIOReturnUnsupported; + } - if (messageType == kIOMessageSystemCapabilityChange) - { - IOPMSystemCapabilityChangeParameters * params = - (IOPMSystemCapabilityChangeParameters *) messageArgs; + if (messageType == kIOMessageSystemCapabilityChange) { + IOPMSystemCapabilityChangeParameters * params = + (IOPMSystemCapabilityChangeParameters *) messageArgs; - // Interested applications have been notified of an impending power - // change and have acked (when applicable). - // This is our chance to save whatever state we can before powering - // down. - // We call sync_internal defined in xnu/bsd/vfs/vfs_syscalls.c, - // via callout + // Interested applications have been notified of an impending power + // change and have acked (when applicable). + // This is our chance to save whatever state we can before powering + // down. + // We call sync_internal defined in xnu/bsd/vfs/vfs_syscalls.c, + // via callout - DLOG("sysPowerDownHandler cap %x -> %x (flags %x)\n", - params->fromCapabilities, params->toCapabilities, - params->changeFlags); + DLOG("sysPowerDownHandler cap %x -> %x (flags %x)\n", + params->fromCapabilities, params->toCapabilities, + params->changeFlags); - if (CAP_WILL_CHANGE_TO_OFF(params, kIOPMSystemCapabilityCPU)) - { - // We will ack within 20 seconds - params->maxWaitForReply = 20 * 1000 * 1000; + if (CAP_WILL_CHANGE_TO_OFF(params, kIOPMSystemCapabilityCPU)) { + // We will ack within 20 seconds + params->maxWaitForReply = 20 * 1000 * 1000; #if HIBERNATION - gRootDomain->evaluateSystemSleepPolicyEarly(); - - // add in time we could spend freeing pages - if (gRootDomain->hibernateMode && !gRootDomain->hibernateDisabled) - { - params->maxWaitForReply = kCapabilityClientMaxWait; - } - DLOG("sysPowerDownHandler max wait %d s\n", - (int) (params->maxWaitForReply / 1000 / 1000)); + gRootDomain->evaluateSystemSleepPolicyEarly(); + + // add in time we could spend freeing pages + if (gRootDomain->hibernateMode && !gRootDomain->hibernateDisabled) { + params->maxWaitForReply = kCapabilityClientMaxWait; + } + DLOG("sysPowerDownHandler max wait %d s\n", + (int) (params->maxWaitForReply / 1000 / 1000)); #endif - // Notify platform that sleep has begun, after the early - // sleep policy evaluation. - getPlatform()->callPlatformFunction( - sleepMessagePEFunction, false, - (void *)(uintptr_t) kIOMessageSystemWillSleep, - NULL, NULL, NULL); - - if ( !OSCompareAndSwap( 0, 1, &gSleepOrShutdownPending ) ) - { - // Purposely delay the ack and hope that shutdown occurs quickly. - // Another option is not to schedule the thread and wait for - // ack timeout... - AbsoluteTime deadline; - clock_interval_to_deadline( 30, kSecondScale, &deadline ); - thread_call_enter1_delayed( - gRootDomain->diskSyncCalloutEntry, - (thread_call_param_t)(uintptr_t) params->notifyRef, - deadline ); - } - else - thread_call_enter1( - gRootDomain->diskSyncCalloutEntry, - (thread_call_param_t)(uintptr_t) params->notifyRef); - } + // Notify platform that sleep has begun, after the early + // sleep policy evaluation. + getPlatform()->callPlatformFunction( + sleepMessagePEFunction, false, + (void *)(uintptr_t) kIOMessageSystemWillSleep, + NULL, NULL, NULL); + + if (!OSCompareAndSwap( 0, 1, &gSleepOrShutdownPending )) { + // Purposely delay the ack and hope that shutdown occurs quickly. + // Another option is not to schedule the thread and wait for + // ack timeout... + AbsoluteTime deadline; + clock_interval_to_deadline( 30, kSecondScale, &deadline ); + thread_call_enter1_delayed( + gRootDomain->diskSyncCalloutEntry, + (thread_call_param_t)(uintptr_t) params->notifyRef, + deadline ); + } else { + thread_call_enter1( + gRootDomain->diskSyncCalloutEntry, + (thread_call_param_t)(uintptr_t) params->notifyRef); + } + } #if HIBERNATION - else if (CAP_DID_CHANGE_TO_ON(params, kIOPMSystemCapabilityCPU)) - { - // We will ack within 110 seconds - params->maxWaitForReply = 110 * 1000 * 1000; - - thread_call_enter1( - gRootDomain->diskSyncCalloutEntry, - (thread_call_param_t)(uintptr_t) params->notifyRef); - } + else if (CAP_DID_CHANGE_TO_ON(params, kIOPMSystemCapabilityCPU)) { + // We will ack within 110 seconds + params->maxWaitForReply = 110 * 1000 * 1000; + + thread_call_enter1( + gRootDomain->diskSyncCalloutEntry, + (thread_call_param_t)(uintptr_t) params->notifyRef); + } #endif - ret = kIOReturnSuccess; - } + ret = kIOReturnSuccess; + } - return ret; + return ret; } //****************************************************************************** @@ -3148,32 +3100,29 @@ IOReturn IOPMrootDomain::sysPowerDownHandler( // @param obj has a retain on it. We're responsible for releasing that retain. //****************************************************************************** -void IOPMrootDomain::handleQueueSleepWakeUUID(OSObject *obj) +void +IOPMrootDomain::handleQueueSleepWakeUUID(OSObject *obj) { - OSString *str = NULL; - - if (kOSBooleanFalse == obj) - { - handlePublishSleepWakeUUID(NULL); - } - else if ((str = OSDynamicCast(OSString, obj))) - { - // This branch caches the UUID for an upcoming sleep/wake - if (queuedSleepWakeUUIDString) { - queuedSleepWakeUUIDString->release(); - queuedSleepWakeUUIDString = NULL; - } - queuedSleepWakeUUIDString = str; - queuedSleepWakeUUIDString->retain(); + OSString *str = NULL; - DLOG("SleepWake UUID queued: %s\n", queuedSleepWakeUUIDString->getCStringNoCopy()); - } + if (kOSBooleanFalse == obj) { + handlePublishSleepWakeUUID(NULL); + } else if ((str = OSDynamicCast(OSString, obj))) { + // This branch caches the UUID for an upcoming sleep/wake + if (queuedSleepWakeUUIDString) { + queuedSleepWakeUUIDString->release(); + queuedSleepWakeUUIDString = NULL; + } + queuedSleepWakeUUIDString = str; + queuedSleepWakeUUIDString->retain(); - if (obj) { - obj->release(); - } - return; + DLOG("SleepWake UUID queued: %s\n", queuedSleepWakeUUIDString->getCStringNoCopy()); + } + if (obj) { + obj->release(); + } + return; } //****************************************************************************** // handlePublishSleepWakeUUID @@ -3185,45 +3134,43 @@ void IOPMrootDomain::handleQueueSleepWakeUUID(OSObject *obj) // sleep/wake. //****************************************************************************** -void IOPMrootDomain::handlePublishSleepWakeUUID( bool shouldPublish ) +void +IOPMrootDomain::handlePublishSleepWakeUUID( bool shouldPublish ) { - ASSERT_GATED(); - - /* - * Clear the current UUID - */ - if (gSleepWakeUUIDIsSet) - { - DLOG("SleepWake UUID cleared\n"); + ASSERT_GATED(); - gSleepWakeUUIDIsSet = false; + /* + * Clear the current UUID + */ + if (gSleepWakeUUIDIsSet) { + DLOG("SleepWake UUID cleared\n"); - removeProperty(kIOPMSleepWakeUUIDKey); - messageClients(kIOPMMessageSleepWakeUUIDChange, kIOPMMessageSleepWakeUUIDCleared); - } + gSleepWakeUUIDIsSet = false; - /* - * Optionally, publish a new UUID - */ - if (queuedSleepWakeUUIDString && shouldPublish) { + removeProperty(kIOPMSleepWakeUUIDKey); + messageClients(kIOPMMessageSleepWakeUUIDChange, kIOPMMessageSleepWakeUUIDCleared); + } - OSString *publishThisUUID = NULL; + /* + * Optionally, publish a new UUID + */ + if (queuedSleepWakeUUIDString && shouldPublish) { + OSString *publishThisUUID = NULL; - publishThisUUID = queuedSleepWakeUUIDString; - publishThisUUID->retain(); + publishThisUUID = queuedSleepWakeUUIDString; + publishThisUUID->retain(); - if (publishThisUUID) - { - setProperty(kIOPMSleepWakeUUIDKey, publishThisUUID); - publishThisUUID->release(); - } + if (publishThisUUID) { + setProperty(kIOPMSleepWakeUUIDKey, publishThisUUID); + publishThisUUID->release(); + } - gSleepWakeUUIDIsSet = true; - messageClients(kIOPMMessageSleepWakeUUIDChange, kIOPMMessageSleepWakeUUIDSet); + gSleepWakeUUIDIsSet = true; + messageClients(kIOPMMessageSleepWakeUUIDChange, kIOPMMessageSleepWakeUUIDSet); - queuedSleepWakeUUIDString->release(); - queuedSleepWakeUUIDString = NULL; - } + queuedSleepWakeUUIDString->release(); + queuedSleepWakeUUIDString = NULL; + } } //****************************************************************************** @@ -3239,7 +3186,7 @@ extern "C" bool IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len) { if (!gSleepWakeUUIDIsSet) { - return (false); + return false; } if (buffer != NULL) { @@ -3257,7 +3204,7 @@ IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len) } } - return (true); + return true; } //****************************************************************************** @@ -3266,16 +3213,17 @@ IOPMCopySleepWakeUUIDKey(char *buffer, size_t buf_len) // Initialize the boot session uuid at boot up and sets it into registry. //****************************************************************************** -void IOPMrootDomain::initializeBootSessionUUID(void) +void +IOPMrootDomain::initializeBootSessionUUID(void) { - uuid_t new_uuid; - uuid_string_t new_uuid_string; + uuid_t new_uuid; + uuid_string_t new_uuid_string; - uuid_generate(new_uuid); - uuid_unparse_upper(new_uuid, new_uuid_string); - memcpy(bootsessionuuid_string, new_uuid_string, sizeof(uuid_string_t)); + uuid_generate(new_uuid); + uuid_unparse_upper(new_uuid, new_uuid_string); + memcpy(bootsessionuuid_string, new_uuid_string, sizeof(uuid_string_t)); - setProperty(kIOPMBootSessionUUIDKey, new_uuid_string); + setProperty(kIOPMBootSessionUUIDKey, new_uuid_string); } //****************************************************************************** @@ -3284,24 +3232,28 @@ void IOPMrootDomain::initializeBootSessionUUID(void) // Override of these methods for logging purposes. //****************************************************************************** -IOReturn IOPMrootDomain::changePowerStateTo( unsigned long ordinal ) +IOReturn +IOPMrootDomain::changePowerStateTo( unsigned long ordinal ) { - DLOG("changePowerStateTo(%lu)\n", ordinal); + DLOG("changePowerStateTo(%lu)\n", ordinal); - if ((ordinal != ON_STATE) && (ordinal != SLEEP_STATE)) - return kIOReturnUnsupported; + if ((ordinal != ON_STATE) && (ordinal != SLEEP_STATE)) { + return kIOReturnUnsupported; + } - return super::changePowerStateTo(ordinal); + return super::changePowerStateTo(ordinal); } -IOReturn IOPMrootDomain::changePowerStateToPriv( unsigned long ordinal ) +IOReturn +IOPMrootDomain::changePowerStateToPriv( unsigned long ordinal ) { - DLOG("changePowerStateToPriv(%lu)\n", ordinal); + DLOG("changePowerStateToPriv(%lu)\n", ordinal); - if ((ordinal != ON_STATE) && (ordinal != SLEEP_STATE)) - return kIOReturnUnsupported; + if ((ordinal != ON_STATE) && (ordinal != SLEEP_STATE)) { + return kIOReturnUnsupported; + } - return super::changePowerStateToPriv(ordinal); + return super::changePowerStateToPriv(ordinal); } //****************************************************************************** @@ -3309,30 +3261,32 @@ IOReturn IOPMrootDomain::changePowerStateToPriv( unsigned long ordinal ) // //****************************************************************************** -bool IOPMrootDomain::activitySinceSleep(void) +bool +IOPMrootDomain::activitySinceSleep(void) { - return (userActivityCount != userActivityAtSleep); + return userActivityCount != userActivityAtSleep; } -bool IOPMrootDomain::abortHibernation(void) +bool +IOPMrootDomain::abortHibernation(void) { - bool ret = activitySinceSleep(); + bool ret = activitySinceSleep(); - if (ret && !hibernateAborted && checkSystemCanSustainFullWake()) - { - DLOG("activitySinceSleep ABORT [%d, %d]\n", userActivityCount, userActivityAtSleep); - hibernateAborted = true; - } - return (ret); + if (ret && !hibernateAborted && checkSystemCanSustainFullWake()) { + DLOG("activitySinceSleep ABORT [%d, %d]\n", userActivityCount, userActivityAtSleep); + hibernateAborted = true; + } + return ret; } extern "C" int hibernate_should_abort(void) { - if (gRootDomain) - return (gRootDomain->abortHibernation()); - else - return (0); + if (gRootDomain) { + return gRootDomain->abortHibernation(); + } else { + return 0; + } } //****************************************************************************** @@ -3344,45 +3298,48 @@ hibernate_should_abort(void) // machine (not thread) will block w/o timeout until this function returns. //****************************************************************************** -void IOPMrootDomain::willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ) +void +IOPMrootDomain::willNotifyPowerChildren( IOPMPowerStateIndex newPowerState ) { - OSDictionary *dict; - OSNumber *secs; + OSDictionary *dict; + OSNumber *secs; - if (SLEEP_STATE == newPowerState) - { - notifierThread = current_thread(); - if (!tasksSuspended) - { - AbsoluteTime deadline; - tasksSuspended = TRUE; - tasks_system_suspend(tasksSuspended); + if (SLEEP_STATE == newPowerState) { + notifierThread = current_thread(); + if (!tasksSuspended) { + AbsoluteTime deadline; + tasksSuspended = TRUE; + tasks_system_suspend(tasksSuspended); - clock_interval_to_deadline(10, kSecondScale, &deadline); + clock_interval_to_deadline(10, kSecondScale, &deadline); #if !CONFIG_EMBEDDED - vm_pageout_wait(AbsoluteTime_to_scalar(&deadline)); + vm_pageout_wait(AbsoluteTime_to_scalar(&deadline)); #endif /* !CONFIG_EMBEDDED */ - } + } #if HIBERNATION - IOHibernateSystemSleep(); - IOHibernateIOKitSleep(); + IOHibernateSystemSleep(); + IOHibernateIOKitSleep(); #endif - if (gRootDomain->activitySinceSleep()) { - dict = OSDictionary::withCapacity(1); - secs = OSNumber::withNumber(1, 32); - - if (dict && secs) { - dict->setObject(gIOPMSettingDebugWakeRelativeKey, secs); - gRootDomain->setProperties(dict); - MSG("Reverting sleep with relative wake\n"); - } - if (dict) dict->release(); - if (secs) secs->release(); - } + if (gRootDomain->activitySinceSleep()) { + dict = OSDictionary::withCapacity(1); + secs = OSNumber::withNumber(1, 32); + + if (dict && secs) { + dict->setObject(gIOPMSettingDebugWakeRelativeKey, secs); + gRootDomain->setProperties(dict); + MSG("Reverting sleep with relative wake\n"); + } + if (dict) { + dict->release(); + } + if (secs) { + secs->release(); + } + } - notifierThread = NULL; - } + notifierThread = NULL; + } } //****************************************************************************** @@ -3392,35 +3349,39 @@ void IOPMrootDomain::willNotifyPowerChildren( IOPMPowerStateIndex newPowerState // is closed. //****************************************************************************** -bool IOPMrootDomain::shouldSleepOnClamshellClosed( void ) +bool +IOPMrootDomain::shouldSleepOnClamshellClosed( void ) { - if (!clamshellExists) - return false; + if (!clamshellExists) { + return false; + } - DLOG("clamshell closed %d, disabled %d, desktopMode %d, ac %d sleepDisabled %d\n", - clamshellClosed, clamshellDisabled, desktopMode, acAdaptorConnected, clamshellSleepDisabled); + DLOG("clamshell closed %d, disabled %d, desktopMode %d, ac %d sleepDisabled %d\n", + clamshellClosed, clamshellDisabled, desktopMode, acAdaptorConnected, clamshellSleepDisabled); - return ( !clamshellDisabled && !(desktopMode && acAdaptorConnected) && !clamshellSleepDisabled ); + return !clamshellDisabled && !(desktopMode && acAdaptorConnected) && !clamshellSleepDisabled; } -void IOPMrootDomain::sendClientClamshellNotification( void ) +void +IOPMrootDomain::sendClientClamshellNotification( void ) { - /* Only broadcast clamshell alert if clamshell exists. */ - if (!clamshellExists) - return; + /* Only broadcast clamshell alert if clamshell exists. */ + if (!clamshellExists) { + return; + } - setProperty(kAppleClamshellStateKey, - clamshellClosed ? kOSBooleanTrue : kOSBooleanFalse); + setProperty(kAppleClamshellStateKey, + clamshellClosed ? kOSBooleanTrue : kOSBooleanFalse); - setProperty(kAppleClamshellCausesSleepKey, - shouldSleepOnClamshellClosed() ? kOSBooleanTrue : kOSBooleanFalse); + setProperty(kAppleClamshellCausesSleepKey, + shouldSleepOnClamshellClosed() ? kOSBooleanTrue : kOSBooleanFalse); - /* Argument to message is a bitfiel of - * ( kClamshellStateBit | kClamshellSleepBit ) - */ - messageClients(kIOPMMessageClamshellStateChange, - (void *)(uintptr_t) ( (clamshellClosed ? kClamshellStateBit : 0) - | ( shouldSleepOnClamshellClosed() ? kClamshellSleepBit : 0)) ); + /* Argument to message is a bitfiel of + * ( kClamshellStateBit | kClamshellSleepBit ) + */ + messageClients(kIOPMMessageClamshellStateChange, + (void *)(uintptr_t) ((clamshellClosed ? kClamshellStateBit : 0) + | (shouldSleepOnClamshellClosed() ? kClamshellSleepBit : 0))); } //****************************************************************************** @@ -3429,9 +3390,10 @@ void IOPMrootDomain::sendClientClamshellNotification( void ) // Deprecated //****************************************************************************** -IOOptionBits IOPMrootDomain::getSleepSupported( void ) +IOOptionBits +IOPMrootDomain::getSleepSupported( void ) { - return( platformSleepSupport ); + return platformSleepSupport; } //****************************************************************************** @@ -3440,10 +3402,11 @@ IOOptionBits IOPMrootDomain::getSleepSupported( void ) // Deprecated //****************************************************************************** -void IOPMrootDomain::setSleepSupported( IOOptionBits flags ) +void +IOPMrootDomain::setSleepSupported( IOOptionBits flags ) { - DLOG("setSleepSupported(%x)\n", (uint32_t) flags); - OSBitOrAtomic(flags, &platformSleepSupport); + DLOG("setSleepSupported(%x)\n", (uint32_t) flags); + OSBitOrAtomic(flags, &platformSleepSupport); } //****************************************************************************** @@ -3451,28 +3414,27 @@ void IOPMrootDomain::setSleepSupported( IOOptionBits flags ) // //****************************************************************************** -void IOPMrootDomain::setDisableClamShellSleep( bool val ) -{ - if (gIOPMWorkLoop->inGate() == false) { - - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setDisableClamShellSleep), - (OSObject *)this, - (void *)val); - - return; - } - else { - DLOG("setDisableClamShellSleep(%x)\n", (uint32_t) val); - if ( clamshellSleepDisabled != val ) - { - clamshellSleepDisabled = val; - // If clamshellSleepDisabled is reset to 0, reevaluate if - // system need to go to sleep due to clamshell state - if ( !clamshellSleepDisabled && clamshellClosed) - handlePowerNotification(kLocalEvalClamshellCommand); - } - } +void +IOPMrootDomain::setDisableClamShellSleep( bool val ) +{ + if (gIOPMWorkLoop->inGate() == false) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setDisableClamShellSleep), + (OSObject *)this, + (void *)val); + + return; + } else { + DLOG("setDisableClamShellSleep(%x)\n", (uint32_t) val); + if (clamshellSleepDisabled != val) { + clamshellSleepDisabled = val; + // If clamshellSleepDisabled is reset to 0, reevaluate if + // system need to go to sleep due to clamshell state + if (!clamshellSleepDisabled && clamshellClosed) { + handlePowerNotification(kLocalEvalClamshellCommand); + } + } + } } //****************************************************************************** @@ -3481,9 +3443,10 @@ void IOPMrootDomain::setDisableClamShellSleep( bool val ) // Deprecated. //****************************************************************************** -void IOPMrootDomain::wakeFromDoze( void ) +void +IOPMrootDomain::wakeFromDoze( void ) { - // Preserve symbol for familes (IOUSBFamily and IOGraphics) + // Preserve symbol for familes (IOUSBFamily and IOGraphics) } // MARK: - @@ -3495,9 +3458,10 @@ void IOPMrootDomain::wakeFromDoze( void ) // Adds a new feature to the supported features dictionary //****************************************************************************** -void IOPMrootDomain::publishFeature( const char * feature ) +void +IOPMrootDomain::publishFeature( const char * feature ) { - publishFeature(feature, kRD_AllPowerSources, NULL); + publishFeature(feature, kRD_AllPowerSources, NULL); } //****************************************************************************** @@ -3506,102 +3470,103 @@ void IOPMrootDomain::publishFeature( const char * feature ) // Adds a new feature to the supported features dictionary //****************************************************************************** -void IOPMrootDomain::publishFeature( - const char *feature, - uint32_t supportedWhere, - uint32_t *uniqueFeatureID) -{ - static uint16_t next_feature_id = 500; - - OSNumber *new_feature_data = NULL; - OSNumber *existing_feature = NULL; - OSArray *existing_feature_arr = NULL; - OSObject *osObj = NULL; - uint32_t feature_value = 0; - - supportedWhere &= kRD_AllPowerSources; // mask off any craziness! - - if(!supportedWhere) { - // Feature isn't supported anywhere! - return; - } - - if(next_feature_id > 5000) { - // Far, far too many features! - return; - } - - if(featuresDictLock) IOLockLock(featuresDictLock); - - OSDictionary *features = - (OSDictionary *) getProperty(kRootDomainSupportedFeatures); - - // Create new features dict if necessary - if ( features && OSDynamicCast(OSDictionary, features)) { - features = OSDictionary::withDictionary(features); - } else { - features = OSDictionary::withCapacity(1); - } - - // Create OSNumber to track new feature - - next_feature_id += 1; - if( uniqueFeatureID ) { - // We don't really mind if the calling kext didn't give us a place - // to stash their unique id. Many kexts don't plan to unload, and thus - // have no need to remove themselves later. - *uniqueFeatureID = next_feature_id; - } - - feature_value = (uint32_t)next_feature_id; - feature_value <<= 16; - feature_value += supportedWhere; - - new_feature_data = OSNumber::withNumber( - (unsigned long long)feature_value, 32); - - // Does features object already exist? - if( (osObj = features->getObject(feature)) ) - { - if(( existing_feature = OSDynamicCast(OSNumber, osObj) )) - { - // We need to create an OSArray to hold the now 2 elements. - existing_feature_arr = OSArray::withObjects( - (const OSObject **)&existing_feature, 1, 2); - } else if(( existing_feature_arr = OSDynamicCast(OSArray, osObj) )) - { - // Add object to existing array - existing_feature_arr = OSArray::withArray( - existing_feature_arr, - existing_feature_arr->getCount() + 1); - } - - if (existing_feature_arr) - { - existing_feature_arr->setObject(new_feature_data); - features->setObject(feature, existing_feature_arr); - existing_feature_arr->release(); - existing_feature_arr = 0; - } - } else { - // The easy case: no previously existing features listed. We simply - // set the OSNumber at key 'feature' and we're on our way. - features->setObject(feature, new_feature_data); - } - - new_feature_data->release(); - - setProperty(kRootDomainSupportedFeatures, features); - - features->release(); - - if(featuresDictLock) IOLockUnlock(featuresDictLock); - - // Notify EnergySaver and all those in user space so they might - // re-populate their feature specific UI - if(pmPowerStateQueue) { - pmPowerStateQueue->submitPowerEvent( kPowerEventFeatureChanged ); - } +void +IOPMrootDomain::publishFeature( + const char *feature, + uint32_t supportedWhere, + uint32_t *uniqueFeatureID) +{ + static uint16_t next_feature_id = 500; + + OSNumber *new_feature_data = NULL; + OSNumber *existing_feature = NULL; + OSArray *existing_feature_arr = NULL; + OSObject *osObj = NULL; + uint32_t feature_value = 0; + + supportedWhere &= kRD_AllPowerSources; // mask off any craziness! + + if (!supportedWhere) { + // Feature isn't supported anywhere! + return; + } + + if (next_feature_id > 5000) { + // Far, far too many features! + return; + } + + if (featuresDictLock) { + IOLockLock(featuresDictLock); + } + + OSDictionary *features = + (OSDictionary *) getProperty(kRootDomainSupportedFeatures); + + // Create new features dict if necessary + if (features && OSDynamicCast(OSDictionary, features)) { + features = OSDictionary::withDictionary(features); + } else { + features = OSDictionary::withCapacity(1); + } + + // Create OSNumber to track new feature + + next_feature_id += 1; + if (uniqueFeatureID) { + // We don't really mind if the calling kext didn't give us a place + // to stash their unique id. Many kexts don't plan to unload, and thus + // have no need to remove themselves later. + *uniqueFeatureID = next_feature_id; + } + + feature_value = (uint32_t)next_feature_id; + feature_value <<= 16; + feature_value += supportedWhere; + + new_feature_data = OSNumber::withNumber( + (unsigned long long)feature_value, 32); + + // Does features object already exist? + if ((osObj = features->getObject(feature))) { + if ((existing_feature = OSDynamicCast(OSNumber, osObj))) { + // We need to create an OSArray to hold the now 2 elements. + existing_feature_arr = OSArray::withObjects( + (const OSObject **)&existing_feature, 1, 2); + } else if ((existing_feature_arr = OSDynamicCast(OSArray, osObj))) { + // Add object to existing array + existing_feature_arr = OSArray::withArray( + existing_feature_arr, + existing_feature_arr->getCount() + 1); + } + + if (existing_feature_arr) { + existing_feature_arr->setObject(new_feature_data); + features->setObject(feature, existing_feature_arr); + existing_feature_arr->release(); + existing_feature_arr = 0; + } + } else { + // The easy case: no previously existing features listed. We simply + // set the OSNumber at key 'feature' and we're on our way. + features->setObject(feature, new_feature_data); + } + + new_feature_data->release(); + + setProperty(kRootDomainSupportedFeatures, features); + + features->release(); + + if (featuresDictLock) { + IOLockUnlock(featuresDictLock); + } + + // Notify EnergySaver and all those in user space so they might + // re-populate their feature specific UI + if (pmPowerStateQueue) { + pmPowerStateQueue->submitPowerEvent( kPowerEventFeatureChanged ); + } } //****************************************************************************** @@ -3610,129 +3575,128 @@ void IOPMrootDomain::publishFeature( // Removes previously published feature //****************************************************************************** -IOReturn IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) -{ - IOReturn ret = kIOReturnError; - uint32_t feature_value = 0; - uint16_t feature_id = 0; - bool madeAChange = false; - - OSSymbol *dictKey = NULL; - OSCollectionIterator *dictIterator = NULL; - OSArray *arrayMember = NULL; - OSNumber *numberMember = NULL; - OSObject *osObj = NULL; - OSNumber *osNum = NULL; - OSArray *arrayMemberCopy; - - if (kBadPMFeatureID == removeFeatureID) - return kIOReturnNotFound; - - if(featuresDictLock) IOLockLock(featuresDictLock); - - OSDictionary *features = - (OSDictionary *) getProperty(kRootDomainSupportedFeatures); - - if ( features && OSDynamicCast(OSDictionary, features) ) - { - // Any modifications to the dictionary are made to the copy to prevent - // races & crashes with userland clients. Dictionary updated - // automically later. - features = OSDictionary::withDictionary(features); - } else { - features = NULL; - ret = kIOReturnNotFound; - goto exit; - } - - // We iterate 'features' dictionary looking for an entry tagged - // with 'removeFeatureID'. If found, we remove it from our tracking - // structures and notify the OS via a general interest message. - - dictIterator = OSCollectionIterator::withCollection(features); - if(!dictIterator) { - goto exit; - } - - while( (dictKey = OSDynamicCast(OSSymbol, dictIterator->getNextObject())) ) - { - osObj = features->getObject(dictKey); - - // Each Feature is either tracked by an OSNumber - if( osObj && (numberMember = OSDynamicCast(OSNumber, osObj)) ) - { - feature_value = numberMember->unsigned32BitValue(); - feature_id = (uint16_t)(feature_value >> 16); - - if( feature_id == (uint16_t)removeFeatureID ) - { - // Remove this node - features->removeObject(dictKey); - madeAChange = true; - break; - } - - // Or tracked by an OSArray of OSNumbers - } else if( osObj && (arrayMember = OSDynamicCast(OSArray, osObj)) ) - { - unsigned int arrayCount = arrayMember->getCount(); - - for(unsigned int i=0; igetObject(i)); - if(!osNum) { - continue; - } - - feature_value = osNum->unsigned32BitValue(); - feature_id = (uint16_t)(feature_value >> 16); - - if( feature_id == (uint16_t)removeFeatureID ) - { - // Remove this node - if( 1 == arrayCount ) { - // If the array only contains one element, remove - // the whole thing. - features->removeObject(dictKey); - } else { - // Otherwise remove the element from a copy of the array. - arrayMemberCopy = OSArray::withArray(arrayMember); - if (arrayMemberCopy) - { - arrayMemberCopy->removeObject(i); - features->setObject(dictKey, arrayMemberCopy); - arrayMemberCopy->release(); - } - } - - madeAChange = true; - break; - } - } - } - } - - dictIterator->release(); - - if( madeAChange ) - { - ret = kIOReturnSuccess; - - setProperty(kRootDomainSupportedFeatures, features); - - // Notify EnergySaver and all those in user space so they might - // re-populate their feature specific UI - if(pmPowerStateQueue) { - pmPowerStateQueue->submitPowerEvent( kPowerEventFeatureChanged ); - } - } else { - ret = kIOReturnNotFound; - } +IOReturn +IOPMrootDomain::removePublishedFeature( uint32_t removeFeatureID ) +{ + IOReturn ret = kIOReturnError; + uint32_t feature_value = 0; + uint16_t feature_id = 0; + bool madeAChange = false; + + OSSymbol *dictKey = NULL; + OSCollectionIterator *dictIterator = NULL; + OSArray *arrayMember = NULL; + OSNumber *numberMember = NULL; + OSObject *osObj = NULL; + OSNumber *osNum = NULL; + OSArray *arrayMemberCopy; + + if (kBadPMFeatureID == removeFeatureID) { + return kIOReturnNotFound; + } + + if (featuresDictLock) { + IOLockLock(featuresDictLock); + } + + OSDictionary *features = + (OSDictionary *) getProperty(kRootDomainSupportedFeatures); + + if (features && OSDynamicCast(OSDictionary, features)) { + // Any modifications to the dictionary are made to the copy to prevent + // races & crashes with userland clients. Dictionary updated + // automically later. + features = OSDictionary::withDictionary(features); + } else { + features = NULL; + ret = kIOReturnNotFound; + goto exit; + } + + // We iterate 'features' dictionary looking for an entry tagged + // with 'removeFeatureID'. If found, we remove it from our tracking + // structures and notify the OS via a general interest message. + + dictIterator = OSCollectionIterator::withCollection(features); + if (!dictIterator) { + goto exit; + } + + while ((dictKey = OSDynamicCast(OSSymbol, dictIterator->getNextObject()))) { + osObj = features->getObject(dictKey); + + // Each Feature is either tracked by an OSNumber + if (osObj && (numberMember = OSDynamicCast(OSNumber, osObj))) { + feature_value = numberMember->unsigned32BitValue(); + feature_id = (uint16_t)(feature_value >> 16); + + if (feature_id == (uint16_t)removeFeatureID) { + // Remove this node + features->removeObject(dictKey); + madeAChange = true; + break; + } + + // Or tracked by an OSArray of OSNumbers + } else if (osObj && (arrayMember = OSDynamicCast(OSArray, osObj))) { + unsigned int arrayCount = arrayMember->getCount(); + + for (unsigned int i = 0; i < arrayCount; i++) { + osNum = OSDynamicCast(OSNumber, arrayMember->getObject(i)); + if (!osNum) { + continue; + } + + feature_value = osNum->unsigned32BitValue(); + feature_id = (uint16_t)(feature_value >> 16); + + if (feature_id == (uint16_t)removeFeatureID) { + // Remove this node + if (1 == arrayCount) { + // If the array only contains one element, remove + // the whole thing. + features->removeObject(dictKey); + } else { + // Otherwise remove the element from a copy of the array. + arrayMemberCopy = OSArray::withArray(arrayMember); + if (arrayMemberCopy) { + arrayMemberCopy->removeObject(i); + features->setObject(dictKey, arrayMemberCopy); + arrayMemberCopy->release(); + } + } + + madeAChange = true; + break; + } + } + } + } + + dictIterator->release(); + + if (madeAChange) { + ret = kIOReturnSuccess; + + setProperty(kRootDomainSupportedFeatures, features); + + // Notify EnergySaver and all those in user space so they might + // re-populate their feature specific UI + if (pmPowerStateQueue) { + pmPowerStateQueue->submitPowerEvent( kPowerEventFeatureChanged ); + } + } else { + ret = kIOReturnNotFound; + } exit: - if(features) features->release(); - if(featuresDictLock) IOLockUnlock(featuresDictLock); - return ret; + if (features) { + features->release(); + } + if (featuresDictLock) { + IOLockUnlock(featuresDictLock); + } + return ret; } //****************************************************************************** @@ -3742,19 +3706,19 @@ exit: // supported feature. //****************************************************************************** -void IOPMrootDomain::publishPMSetting( - const OSSymbol * feature, uint32_t where, uint32_t * featureID ) +void +IOPMrootDomain::publishPMSetting( + const OSSymbol * feature, uint32_t where, uint32_t * featureID ) { - if (noPublishPMSettings && - (noPublishPMSettings->getNextIndexOfObject(feature, 0) != (unsigned int)-1)) - { - // Setting found in noPublishPMSettings array - *featureID = kBadPMFeatureID; - return; - } + if (noPublishPMSettings && + (noPublishPMSettings->getNextIndexOfObject(feature, 0) != (unsigned int)-1)) { + // Setting found in noPublishPMSettings array + *featureID = kBadPMFeatureID; + return; + } - publishFeature( - feature->getCStringNoCopy(), where, featureID); + publishFeature( + feature->getCStringNoCopy(), where, featureID); } //****************************************************************************** @@ -3764,82 +3728,88 @@ void IOPMrootDomain::publishPMSetting( // drivers. Should be called only by IOPMrootDomain::setProperties. //****************************************************************************** -IOReturn IOPMrootDomain::setPMSetting( - const OSSymbol *type, - OSObject *object ) -{ - PMSettingCallEntry *entries = 0; - OSArray *chosen = 0; - const OSArray *array; - PMSettingObject *pmso; - thread_t thisThread; - int i, j, count, capacity; - - if (NULL == type) - return kIOReturnBadArgument; - - PMSETTING_LOCK(); - - // Update settings dict so changes are visible from copyPMSetting(). - fPMSettingsDict->setObject(type, object); - - // Prep all PMSetting objects with the given 'type' for callout. - array = OSDynamicCast(OSArray, settingsCallbacks->getObject(type)); - if (!array || ((capacity = array->getCount()) == 0)) - goto unlock_exit; - - // Array to retain PMSetting objects targeted for callout. - chosen = OSArray::withCapacity(capacity); - if (!chosen) - goto unlock_exit; // error - - entries = IONew(PMSettingCallEntry, capacity); - if (!entries) - goto unlock_exit; // error - memset(entries, 0, sizeof(PMSettingCallEntry) * capacity); - - thisThread = current_thread(); - - for (i = 0, j = 0; igetObject(i); - if (pmso->disabled) - continue; - entries[j].thread = thisThread; - queue_enter(&pmso->calloutQueue, &entries[j], PMSettingCallEntry *, link); - chosen->setObject(pmso); - j++; - } - count = j; - if (!count) - goto unlock_exit; - - PMSETTING_UNLOCK(); - - // Call each pmso in the chosen array. - for (i=0; igetObject(i); - pmso->dispatchPMSetting(type, object); - } - - PMSETTING_LOCK(); - for (i=0; igetObject(i); - queue_remove(&pmso->calloutQueue, &entries[i], PMSettingCallEntry *, link); - if (pmso->waitThread) - { - PMSETTING_WAKEUP(pmso); - } - } +IOReturn +IOPMrootDomain::setPMSetting( + const OSSymbol *type, + OSObject *object ) +{ + PMSettingCallEntry *entries = 0; + OSArray *chosen = 0; + const OSArray *array; + PMSettingObject *pmso; + thread_t thisThread; + int i, j, count, capacity; + + if (NULL == type) { + return kIOReturnBadArgument; + } + + PMSETTING_LOCK(); + + // Update settings dict so changes are visible from copyPMSetting(). + fPMSettingsDict->setObject(type, object); + + // Prep all PMSetting objects with the given 'type' for callout. + array = OSDynamicCast(OSArray, settingsCallbacks->getObject(type)); + if (!array || ((capacity = array->getCount()) == 0)) { + goto unlock_exit; + } + + // Array to retain PMSetting objects targeted for callout. + chosen = OSArray::withCapacity(capacity); + if (!chosen) { + goto unlock_exit; // error + } + entries = IONew(PMSettingCallEntry, capacity); + if (!entries) { + goto unlock_exit; // error + } + memset(entries, 0, sizeof(PMSettingCallEntry) * capacity); + + thisThread = current_thread(); + + for (i = 0, j = 0; i < capacity; i++) { + pmso = (PMSettingObject *) array->getObject(i); + if (pmso->disabled) { + continue; + } + entries[j].thread = thisThread; + queue_enter(&pmso->calloutQueue, &entries[j], PMSettingCallEntry *, link); + chosen->setObject(pmso); + j++; + } + count = j; + if (!count) { + goto unlock_exit; + } + + PMSETTING_UNLOCK(); + + // Call each pmso in the chosen array. + for (i = 0; i < count; i++) { + pmso = (PMSettingObject *) chosen->getObject(i); + pmso->dispatchPMSetting(type, object); + } + + PMSETTING_LOCK(); + for (i = 0; i < count; i++) { + pmso = (PMSettingObject *) chosen->getObject(i); + queue_remove(&pmso->calloutQueue, &entries[i], PMSettingCallEntry *, link); + if (pmso->waitThread) { + PMSETTING_WAKEUP(pmso); + } + } unlock_exit: - PMSETTING_UNLOCK(); + PMSETTING_UNLOCK(); - if (chosen) chosen->release(); - if (entries) IODelete(entries, PMSettingCallEntry, capacity); + if (chosen) { + chosen->release(); + } + if (entries) { + IODelete(entries, PMSettingCallEntry, capacity); + } - return kIOReturnSuccess; + return kIOReturnSuccess; } //****************************************************************************** @@ -3849,21 +3819,24 @@ unlock_exit: // notifications. //****************************************************************************** -OSObject * IOPMrootDomain::copyPMSetting( - OSSymbol *whichSetting) +OSObject * +IOPMrootDomain::copyPMSetting( + OSSymbol *whichSetting) { - OSObject *obj = NULL; + OSObject *obj = NULL; - if(!whichSetting) return NULL; + if (!whichSetting) { + return NULL; + } - PMSETTING_LOCK(); - obj = fPMSettingsDict->getObject(whichSetting); - if(obj) { - obj->retain(); - } - PMSETTING_UNLOCK(); + PMSETTING_LOCK(); + obj = fPMSettingsDict->getObject(whichSetting); + if (obj) { + obj->retain(); + } + PMSETTING_UNLOCK(); - return obj; + return obj; } //****************************************************************************** @@ -3872,17 +3845,18 @@ OSObject * IOPMrootDomain::copyPMSetting( // direct wrapper to registerPMSettingController with uint32_t power source arg //****************************************************************************** -IOReturn IOPMrootDomain::registerPMSettingController( - const OSSymbol * settings[], - IOPMSettingControllerCallback func, - OSObject *target, - uintptr_t refcon, - OSObject **handle) +IOReturn +IOPMrootDomain::registerPMSettingController( + const OSSymbol * settings[], + IOPMSettingControllerCallback func, + OSObject *target, + uintptr_t refcon, + OSObject **handle) { - return registerPMSettingController( - settings, - (kIOPMSupportedOnAC | kIOPMSupportedOnBatt | kIOPMSupportedOnUPS), - func, target, refcon, handle); + return registerPMSettingController( + settings, + (kIOPMSupportedOnAC | kIOPMSupportedOnBatt | kIOPMSupportedOnUPS), + func, target, refcon, handle); } //****************************************************************************** @@ -3905,55 +3879,54 @@ IOReturn IOPMrootDomain::registerPMSettingController( // kIOReturnSuccess on success //****************************************************************************** -IOReturn IOPMrootDomain::registerPMSettingController( - const OSSymbol * settings[], - uint32_t supportedPowerSources, - IOPMSettingControllerCallback func, - OSObject *target, - uintptr_t refcon, - OSObject **handle) -{ - PMSettingObject *pmso = NULL; - OSObject *pmsh = NULL; - OSArray *list = NULL; - int i; - - if (NULL == settings || - NULL == func || - NULL == handle) - { - return kIOReturnBadArgument; - } +IOReturn +IOPMrootDomain::registerPMSettingController( + const OSSymbol * settings[], + uint32_t supportedPowerSources, + IOPMSettingControllerCallback func, + OSObject *target, + uintptr_t refcon, + OSObject **handle) +{ + PMSettingObject *pmso = NULL; + OSObject *pmsh = NULL; + OSArray *list = NULL; + int i; + + if (NULL == settings || + NULL == func || + NULL == handle) { + return kIOReturnBadArgument; + } - pmso = PMSettingObject::pmSettingObject( - (IOPMrootDomain *) this, func, target, - refcon, supportedPowerSources, settings, &pmsh); + pmso = PMSettingObject::pmSettingObject( + (IOPMrootDomain *) this, func, target, + refcon, supportedPowerSources, settings, &pmsh); - if (!pmso) { - *handle = NULL; - return kIOReturnInternalError; - } + if (!pmso) { + *handle = NULL; + return kIOReturnInternalError; + } - PMSETTING_LOCK(); - for (i=0; settings[i]; i++) - { - list = OSDynamicCast(OSArray, settingsCallbacks->getObject(settings[i])); - if (!list) { - // New array of callbacks for this setting - list = OSArray::withCapacity(1); - settingsCallbacks->setObject(settings[i], list); - list->release(); - } + PMSETTING_LOCK(); + for (i = 0; settings[i]; i++) { + list = OSDynamicCast(OSArray, settingsCallbacks->getObject(settings[i])); + if (!list) { + // New array of callbacks for this setting + list = OSArray::withCapacity(1); + settingsCallbacks->setObject(settings[i], list); + list->release(); + } - // Add caller to the callback list - list->setObject(pmso); - } - PMSETTING_UNLOCK(); + // Add caller to the callback list + list->setObject(pmso); + } + PMSETTING_UNLOCK(); - // Return handle to the caller, the setting object is private. - *handle = pmsh; + // Return handle to the caller, the setting object is private. + *handle = pmsh; - return kIOReturnSuccess; + return kIOReturnSuccess; } //****************************************************************************** @@ -3962,58 +3935,55 @@ IOReturn IOPMrootDomain::registerPMSettingController( // Only called from PMSettingObject. //****************************************************************************** -void IOPMrootDomain::deregisterPMSettingObject( PMSettingObject * pmso ) -{ - thread_t thisThread = current_thread(); - PMSettingCallEntry *callEntry; - OSCollectionIterator *iter; - OSSymbol *sym; - OSArray *array; - int index; - bool wait; - - PMSETTING_LOCK(); - - pmso->disabled = true; - - // Wait for all callout threads to finish. - do { - wait = false; - queue_iterate(&pmso->calloutQueue, callEntry, PMSettingCallEntry *, link) - { - if (callEntry->thread != thisThread) - { - wait = true; - break; - } - } - if (wait) - { - assert(0 == pmso->waitThread); - pmso->waitThread = thisThread; - PMSETTING_WAIT(pmso); - pmso->waitThread = 0; - } - } while (wait); - - // Search each PM settings array in the kernel. - iter = OSCollectionIterator::withCollection(settingsCallbacks); - if (iter) - { - while ((sym = OSDynamicCast(OSSymbol, iter->getNextObject()))) - { - array = OSDynamicCast(OSArray, settingsCallbacks->getObject(sym)); - index = array->getNextIndexOfObject(pmso, 0); - if (-1 != index) { - array->removeObject(index); - } - } - iter->release(); - } - - PMSETTING_UNLOCK(); - - pmso->release(); +void +IOPMrootDomain::deregisterPMSettingObject( PMSettingObject * pmso ) +{ + thread_t thisThread = current_thread(); + PMSettingCallEntry *callEntry; + OSCollectionIterator *iter; + OSSymbol *sym; + OSArray *array; + int index; + bool wait; + + PMSETTING_LOCK(); + + pmso->disabled = true; + + // Wait for all callout threads to finish. + do { + wait = false; + queue_iterate(&pmso->calloutQueue, callEntry, PMSettingCallEntry *, link) + { + if (callEntry->thread != thisThread) { + wait = true; + break; + } + } + if (wait) { + assert(0 == pmso->waitThread); + pmso->waitThread = thisThread; + PMSETTING_WAIT(pmso); + pmso->waitThread = 0; + } + } while (wait); + + // Search each PM settings array in the kernel. + iter = OSCollectionIterator::withCollection(settingsCallbacks); + if (iter) { + while ((sym = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + array = OSDynamicCast(OSArray, settingsCallbacks->getObject(sym)); + index = array->getNextIndexOfObject(pmso, 0); + if (-1 != index) { + array->removeObject(index); + } + } + iter->release(); + } + + PMSETTING_UNLOCK(); + + pmso->release(); } //****************************************************************************** @@ -4026,57 +3996,56 @@ void IOPMrootDomain::deregisterPMSettingObject( PMSettingObject * pmso ) // only x86 has explicit support in the IntelCPUPowerManagement kext //****************************************************************************** -void IOPMrootDomain::informCPUStateChange( - uint32_t type, - uint32_t value ) +void +IOPMrootDomain::informCPUStateChange( + uint32_t type, + uint32_t value ) { #if defined(__i386__) || defined(__x86_64__) - pmioctlVariableInfo_t varInfoStruct; - int pmCPUret = 0; - const char *varNameStr = NULL; - int32_t *varIndex = NULL; - - if (kInformAC == type) { - varNameStr = kIOPMRootDomainBatPowerCString; - varIndex = &idxPMCPULimitedPower; - } else if (kInformLid == type) { - varNameStr = kIOPMRootDomainLidCloseCString; - varIndex = &idxPMCPUClamshell; - } else { - return; - } - - // Set the new value! - // pmCPUControl will assign us a new ID if one doesn't exist yet - bzero(&varInfoStruct, sizeof(pmioctlVariableInfo_t)); - varInfoStruct.varID = *varIndex; - varInfoStruct.varType = vBool; - varInfoStruct.varInitValue = value; - varInfoStruct.varCurValue = value; - strlcpy( (char *)varInfoStruct.varName, - (const char *)varNameStr, - sizeof(varInfoStruct.varName)); - - // Set! - pmCPUret = pmCPUControl( PMIOCSETVARINFO, (void *)&varInfoStruct ); - - // pmCPU only assigns numerical id's when a new varName is specified - if ((0 == pmCPUret) - && (*varIndex == kCPUUnknownIndex)) - { - // pmCPUControl has assigned us a new variable ID. - // Let's re-read the structure we just SET to learn that ID. - pmCPUret = pmCPUControl( PMIOCGETVARNAMEINFO, (void *)&varInfoStruct ); - - if (0 == pmCPUret) - { - // Store it in idxPMCPUClamshell or idxPMCPULimitedPower - *varIndex = varInfoStruct.varID; - } - } - - return; + pmioctlVariableInfo_t varInfoStruct; + int pmCPUret = 0; + const char *varNameStr = NULL; + int32_t *varIndex = NULL; + + if (kInformAC == type) { + varNameStr = kIOPMRootDomainBatPowerCString; + varIndex = &idxPMCPULimitedPower; + } else if (kInformLid == type) { + varNameStr = kIOPMRootDomainLidCloseCString; + varIndex = &idxPMCPUClamshell; + } else { + return; + } + + // Set the new value! + // pmCPUControl will assign us a new ID if one doesn't exist yet + bzero(&varInfoStruct, sizeof(pmioctlVariableInfo_t)); + varInfoStruct.varID = *varIndex; + varInfoStruct.varType = vBool; + varInfoStruct.varInitValue = value; + varInfoStruct.varCurValue = value; + strlcpy((char *)varInfoStruct.varName, + (const char *)varNameStr, + sizeof(varInfoStruct.varName)); + + // Set! + pmCPUret = pmCPUControl( PMIOCSETVARINFO, (void *)&varInfoStruct ); + + // pmCPU only assigns numerical id's when a new varName is specified + if ((0 == pmCPUret) + && (*varIndex == kCPUUnknownIndex)) { + // pmCPUControl has assigned us a new variable ID. + // Let's re-read the structure we just SET to learn that ID. + pmCPUret = pmCPUControl( PMIOCGETVARNAMEINFO, (void *)&varInfoStruct ); + + if (0 == pmCPUret) { + // Store it in idxPMCPUClamshell or idxPMCPULimitedPower + *varIndex = varInfoStruct.varID; + } + } + + return; #endif /* __i386__ || __x86_64__ */ } @@ -4094,764 +4063,756 @@ void IOPMrootDomain::informCPUStateChange( // Sleep flags enum { - kIOPMSleepFlagHibernate = 0x00000001, - kIOPMSleepFlagSleepTimerEnable = 0x00000002 + kIOPMSleepFlagHibernate = 0x00000001, + kIOPMSleepFlagSleepTimerEnable = 0x00000002 }; -struct IOPMSystemSleepPolicyEntry -{ - uint32_t factorMask; - uint32_t factorBits; - uint32_t sleepFlags; - uint32_t wakeEvents; +struct IOPMSystemSleepPolicyEntry { + uint32_t factorMask; + uint32_t factorBits; + uint32_t sleepFlags; + uint32_t wakeEvents; } __attribute__((packed)); -struct IOPMSystemSleepPolicyTable -{ - uint32_t signature; - uint16_t version; - uint16_t entryCount; - IOPMSystemSleepPolicyEntry entries[]; +struct IOPMSystemSleepPolicyTable { + uint32_t signature; + uint16_t version; + uint16_t entryCount; + IOPMSystemSleepPolicyEntry entries[]; } __attribute__((packed)); enum { - kIOPMSleepAttributeHibernateSetup = 0x00000001, - kIOPMSleepAttributeHibernateSleep = 0x00000002 + kIOPMSleepAttributeHibernateSetup = 0x00000001, + kIOPMSleepAttributeHibernateSleep = 0x00000002 }; static uint32_t getSleepTypeAttributes( uint32_t sleepType ) { - static const uint32_t sleepTypeAttributes[ kIOPMSleepTypeLast ] = - { - /* invalid */ 0, - /* abort */ 0, - /* normal */ 0, - /* safesleep */ kIOPMSleepAttributeHibernateSetup, - /* hibernate */ kIOPMSleepAttributeHibernateSetup | kIOPMSleepAttributeHibernateSleep, - /* standby */ kIOPMSleepAttributeHibernateSetup | kIOPMSleepAttributeHibernateSleep, - /* poweroff */ kIOPMSleepAttributeHibernateSetup | kIOPMSleepAttributeHibernateSleep, - /* deepidle */ 0 - }; - - if (sleepType >= kIOPMSleepTypeLast) - return 0; - - return sleepTypeAttributes[sleepType]; -} - -bool IOPMrootDomain::evaluateSystemSleepPolicy( - IOPMSystemSleepParameters * params, int sleepPhase, uint32_t * hibMode ) -{ - const IOPMSystemSleepPolicyTable * pt; - OSObject * prop = 0; - OSData * policyData; - uint64_t currentFactors = 0; - uint32_t standbyDelay = 0; - uint32_t powerOffDelay = 0; - uint32_t powerOffTimer = 0; - uint32_t standbyTimer = 0; - uint32_t mismatch; - bool standbyEnabled; - bool powerOffEnabled; - bool found = false; - - // Get platform's sleep policy table - if (!gSleepPolicyHandler) - { - prop = getServiceRoot()->copyProperty(kIOPlatformSystemSleepPolicyKey); - if (!prop) goto done; - } - - // Fetch additional settings - standbyEnabled = (getSleepOption(kIOPMDeepSleepDelayKey, &standbyDelay) - && (getProperty(kIOPMDeepSleepEnabledKey) == kOSBooleanTrue)); - powerOffEnabled = (getSleepOption(kIOPMAutoPowerOffDelayKey, &powerOffDelay) - && (getProperty(kIOPMAutoPowerOffEnabledKey) == kOSBooleanTrue)); - if (!getSleepOption(kIOPMAutoPowerOffTimerKey, &powerOffTimer)) - powerOffTimer = powerOffDelay; - if (!getSleepOption(kIOPMDeepSleepTimerKey, &standbyTimer)) - standbyTimer = standbyDelay; - - DLOG("phase %d, standby %d delay %u timer %u, poweroff %d delay %u timer %u, hibernate 0x%x\n", - sleepPhase, standbyEnabled, standbyDelay, standbyTimer, - powerOffEnabled, powerOffDelay, powerOffTimer, *hibMode); - - // pmset level overrides - if ((*hibMode & kIOHibernateModeOn) == 0) - { - if (!gSleepPolicyHandler) - { - standbyEnabled = false; - powerOffEnabled = false; - } - } - else if (!(*hibMode & kIOHibernateModeSleep)) - { - // Force hibernate (i.e. mode 25) - // If standby is enabled, force standy. - // If poweroff is enabled, force poweroff. - if (standbyEnabled) - currentFactors |= kIOPMSleepFactorStandbyForced; - else if (powerOffEnabled) - currentFactors |= kIOPMSleepFactorAutoPowerOffForced; - else - currentFactors |= kIOPMSleepFactorHibernateForced; - } - - // Current factors based on environment and assertions - if (sleepTimerMaintenance) - currentFactors |= kIOPMSleepFactorSleepTimerWake; - if (standbyEnabled && sleepToStandby && !gSleepPolicyHandler) - currentFactors |= kIOPMSleepFactorSleepTimerWake; - if (!clamshellClosed) - currentFactors |= kIOPMSleepFactorLidOpen; - if (acAdaptorConnected) - currentFactors |= kIOPMSleepFactorACPower; - if (lowBatteryCondition) - currentFactors |= kIOPMSleepFactorBatteryLow; - if (!standbyDelay || !standbyTimer) - currentFactors |= kIOPMSleepFactorStandbyNoDelay; - if (standbyNixed || !standbyEnabled) - currentFactors |= kIOPMSleepFactorStandbyDisabled; - if (resetTimers) - { - currentFactors |= kIOPMSleepFactorLocalUserActivity; - currentFactors &= ~kIOPMSleepFactorSleepTimerWake; - } - if (getPMAssertionLevel(kIOPMDriverAssertionUSBExternalDeviceBit) != - kIOPMDriverAssertionLevelOff) - currentFactors |= kIOPMSleepFactorUSBExternalDevice; - if (getPMAssertionLevel(kIOPMDriverAssertionBluetoothHIDDevicePairedBit) != - kIOPMDriverAssertionLevelOff) - currentFactors |= kIOPMSleepFactorBluetoothHIDDevice; - if (getPMAssertionLevel(kIOPMDriverAssertionExternalMediaMountedBit) != - kIOPMDriverAssertionLevelOff) - currentFactors |= kIOPMSleepFactorExternalMediaMounted; - if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) != - kIOPMDriverAssertionLevelOff) - currentFactors |= kIOPMSleepFactorThunderboltDevice; - if (_scheduledAlarms != 0) - currentFactors |= kIOPMSleepFactorRTCAlarmScheduled; - if (getPMAssertionLevel(kIOPMDriverAssertionMagicPacketWakeEnabledBit) != - kIOPMDriverAssertionLevelOff) - currentFactors |= kIOPMSleepFactorMagicPacketWakeEnabled; -#define TCPKEEPALIVE 1 -#if TCPKEEPALIVE - if (getPMAssertionLevel(kIOPMDriverAssertionNetworkKeepAliveActiveBit) != - kIOPMDriverAssertionLevelOff) - currentFactors |= kIOPMSleepFactorNetworkKeepAliveActive; -#endif - if (!powerOffEnabled) - currentFactors |= kIOPMSleepFactorAutoPowerOffDisabled; - if (desktopMode) - currentFactors |= kIOPMSleepFactorExternalDisplay; - if (userWasActive) - currentFactors |= kIOPMSleepFactorLocalUserActivity; - if (darkWakeHibernateError && !CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) - currentFactors |= kIOPMSleepFactorHibernateFailed; - if (thermalWarningState) - currentFactors |= kIOPMSleepFactorThermalWarning; - - DLOG("sleep factors 0x%llx\n", currentFactors); - - if (gSleepPolicyHandler) - { - uint32_t savedHibernateMode; - IOReturn result; - - if (!gSleepPolicyVars) - { - gSleepPolicyVars = IONew(IOPMSystemSleepPolicyVariables, 1); - if (!gSleepPolicyVars) - goto done; - bzero(gSleepPolicyVars, sizeof(*gSleepPolicyVars)); - } - gSleepPolicyVars->signature = kIOPMSystemSleepPolicySignature; - gSleepPolicyVars->version = kIOPMSystemSleepPolicyVersion; - gSleepPolicyVars->currentCapability = _currentCapability; - gSleepPolicyVars->highestCapability = _highestCapability; - gSleepPolicyVars->sleepFactors = currentFactors; - gSleepPolicyVars->sleepReason = lastSleepReason; - gSleepPolicyVars->sleepPhase = sleepPhase; - gSleepPolicyVars->standbyDelay = standbyDelay; - gSleepPolicyVars->standbyTimer = standbyTimer; - gSleepPolicyVars->poweroffDelay = powerOffDelay; - gSleepPolicyVars->scheduledAlarms = _scheduledAlarms | _userScheduledAlarm; - gSleepPolicyVars->poweroffTimer = powerOffTimer; - - if (kIOPMSleepPhase0 == sleepPhase) - { - // preserve hibernateMode - savedHibernateMode = gSleepPolicyVars->hibernateMode; - gSleepPolicyVars->hibernateMode = *hibMode; - } - else if (kIOPMSleepPhase1 == sleepPhase) - { - // use original hibernateMode for phase2 - gSleepPolicyVars->hibernateMode = *hibMode; - } - - result = gSleepPolicyHandler(gSleepPolicyTarget, gSleepPolicyVars, params); - - if (kIOPMSleepPhase0 == sleepPhase) - { - // restore hibernateMode - gSleepPolicyVars->hibernateMode = savedHibernateMode; - } - - if ((result != kIOReturnSuccess) || - (kIOPMSleepTypeInvalid == params->sleepType) || - (params->sleepType >= kIOPMSleepTypeLast) || - (kIOPMSystemSleepParametersVersion != params->version)) - { - MSG("sleep policy handler error\n"); - goto done; - } - - if ((getSleepTypeAttributes(params->sleepType) & - kIOPMSleepAttributeHibernateSetup) && - ((*hibMode & kIOHibernateModeOn) == 0)) - { - *hibMode |= (kIOHibernateModeOn | kIOHibernateModeSleep); - } - - DLOG("sleep params v%u, type %u, flags 0x%x, wake 0x%x, timer %u, poweroff %u\n", - params->version, params->sleepType, params->sleepFlags, - params->ecWakeEvents, params->ecWakeTimer, params->ecPoweroffTimer); - found = true; - goto done; - } - - // Policy table is meaningless without standby enabled - if (!standbyEnabled) - goto done; - - // Validate the sleep policy table - policyData = OSDynamicCast(OSData, prop); - if (!policyData || (policyData->getLength() <= sizeof(IOPMSystemSleepPolicyTable))) - goto done; - - pt = (const IOPMSystemSleepPolicyTable *) policyData->getBytesNoCopy(); - if ((pt->signature != kIOPMSystemSleepPolicySignature) || - (pt->version != 1) || (0 == pt->entryCount)) - goto done; - - if (((policyData->getLength() - sizeof(IOPMSystemSleepPolicyTable)) != - (sizeof(IOPMSystemSleepPolicyEntry) * pt->entryCount))) - goto done; - - for (uint32_t i = 0; i < pt->entryCount; i++) - { - const IOPMSystemSleepPolicyEntry * entry = &pt->entries[i]; - mismatch = (((uint32_t)currentFactors ^ entry->factorBits) & entry->factorMask); - - DLOG("mask 0x%08x, bits 0x%08x, flags 0x%08x, wake 0x%08x, mismatch 0x%08x\n", - entry->factorMask, entry->factorBits, - entry->sleepFlags, entry->wakeEvents, mismatch); - if (mismatch) - continue; - - DLOG("^ found match\n"); - found = true; - - params->version = kIOPMSystemSleepParametersVersion; - params->reserved1 = 1; - if (entry->sleepFlags & kIOPMSleepFlagHibernate) - params->sleepType = kIOPMSleepTypeStandby; - else - params->sleepType = kIOPMSleepTypeNormalSleep; - - params->ecWakeEvents = entry->wakeEvents; - if (entry->sleepFlags & kIOPMSleepFlagSleepTimerEnable) - { - if (kIOPMSleepPhase2 == sleepPhase) - { - clock_sec_t now_secs = gIOLastSleepTime.tv_sec; - - if (!_standbyTimerResetSeconds || - (now_secs <= _standbyTimerResetSeconds)) - { - // Reset standby timer adjustment - _standbyTimerResetSeconds = now_secs; - DLOG("standby delay %u, reset %u\n", - standbyDelay, (uint32_t) _standbyTimerResetSeconds); - } - else if (standbyDelay) - { - // Shorten the standby delay timer - clock_sec_t elapsed = now_secs - _standbyTimerResetSeconds; - if (standbyDelay > elapsed) - standbyDelay -= elapsed; - else - standbyDelay = 1; // must be > 0 - - DLOG("standby delay %u, elapsed %u\n", - standbyDelay, (uint32_t) elapsed); - } - } - params->ecWakeTimer = standbyDelay; - } - else if (kIOPMSleepPhase2 == sleepPhase) - { - // A sleep that does not enable the sleep timer will reset - // the standby delay adjustment. - _standbyTimerResetSeconds = 0; - } - break; - } + static const uint32_t sleepTypeAttributes[kIOPMSleepTypeLast] = + { + /* invalid */ 0, + /* abort */ 0, + /* normal */ 0, + /* safesleep */ kIOPMSleepAttributeHibernateSetup, + /* hibernate */ kIOPMSleepAttributeHibernateSetup | kIOPMSleepAttributeHibernateSleep, + /* standby */ kIOPMSleepAttributeHibernateSetup | kIOPMSleepAttributeHibernateSleep, + /* poweroff */ kIOPMSleepAttributeHibernateSetup | kIOPMSleepAttributeHibernateSleep, + /* deepidle */ 0 + }; + + if (sleepType >= kIOPMSleepTypeLast) { + return 0; + } -done: - if (prop) - prop->release(); + return sleepTypeAttributes[sleepType]; +} + +bool +IOPMrootDomain::evaluateSystemSleepPolicy( + IOPMSystemSleepParameters * params, int sleepPhase, uint32_t * hibMode ) +{ + const IOPMSystemSleepPolicyTable * pt; + OSObject * prop = 0; + OSData * policyData; + uint64_t currentFactors = 0; + uint32_t standbyDelay = 0; + uint32_t powerOffDelay = 0; + uint32_t powerOffTimer = 0; + uint32_t standbyTimer = 0; + uint32_t mismatch; + bool standbyEnabled; + bool powerOffEnabled; + bool found = false; + + // Get platform's sleep policy table + if (!gSleepPolicyHandler) { + prop = getServiceRoot()->copyProperty(kIOPlatformSystemSleepPolicyKey); + if (!prop) { + goto done; + } + } - return found; -} + // Fetch additional settings + standbyEnabled = (getSleepOption(kIOPMDeepSleepDelayKey, &standbyDelay) + && (getProperty(kIOPMDeepSleepEnabledKey) == kOSBooleanTrue)); + powerOffEnabled = (getSleepOption(kIOPMAutoPowerOffDelayKey, &powerOffDelay) + && (getProperty(kIOPMAutoPowerOffEnabledKey) == kOSBooleanTrue)); + if (!getSleepOption(kIOPMAutoPowerOffTimerKey, &powerOffTimer)) { + powerOffTimer = powerOffDelay; + } + if (!getSleepOption(kIOPMDeepSleepTimerKey, &standbyTimer)) { + standbyTimer = standbyDelay; + } -static IOPMSystemSleepParameters gEarlySystemSleepParams; + DLOG("phase %d, standby %d delay %u timer %u, poweroff %d delay %u timer %u, hibernate 0x%x\n", + sleepPhase, standbyEnabled, standbyDelay, standbyTimer, + powerOffEnabled, powerOffDelay, powerOffTimer, *hibMode); -void IOPMrootDomain::evaluateSystemSleepPolicyEarly( void ) -{ - // Evaluate early (priority interest phase), before drivers sleep. - - DLOG("%s\n", __FUNCTION__); - removeProperty(kIOPMSystemSleepParametersKey); - - // Full wake resets the standby timer delay adjustment - if (_highestCapability & kIOPMSystemCapabilityGraphics) - _standbyTimerResetSeconds = 0; - - hibernateDisabled = false; - hibernateMode = 0; - getSleepOption(kIOHibernateModeKey, &hibernateMode); - - // Save for late evaluation if sleep is aborted - bzero(&gEarlySystemSleepParams, sizeof(gEarlySystemSleepParams)); - - if (evaluateSystemSleepPolicy(&gEarlySystemSleepParams, kIOPMSleepPhase1, - &hibernateMode)) - { - if (!hibernateRetry && - ((getSleepTypeAttributes(gEarlySystemSleepParams.sleepType) & - kIOPMSleepAttributeHibernateSetup) == 0)) - { - // skip hibernate setup - hibernateDisabled = true; - } - } - - // Publish IOPMSystemSleepType - uint32_t sleepType = gEarlySystemSleepParams.sleepType; - if (sleepType == kIOPMSleepTypeInvalid) - { - // no sleep policy - sleepType = kIOPMSleepTypeNormalSleep; - if (hibernateMode & kIOHibernateModeOn) - sleepType = (hibernateMode & kIOHibernateModeSleep) ? - kIOPMSleepTypeSafeSleep : kIOPMSleepTypeHibernate; - } - else if ((sleepType == kIOPMSleepTypeStandby) && - (gEarlySystemSleepParams.ecPoweroffTimer)) - { - // report the lowest possible sleep state - sleepType = kIOPMSleepTypePowerOff; - } - - setProperty(kIOPMSystemSleepTypeKey, sleepType, 32); -} - -void IOPMrootDomain::evaluateSystemSleepPolicyFinal( void ) -{ - IOPMSystemSleepParameters params; - OSData * paramsData; - bool wakeNow; - // Evaluate sleep policy after sleeping drivers but before platform sleep. - - DLOG("%s\n", __FUNCTION__); - - bzero(¶ms, sizeof(params)); - wakeNow = false; - if (evaluateSystemSleepPolicy(¶ms, kIOPMSleepPhase2, &hibernateMode)) - { - if ((kIOPMSleepTypeStandby == params.sleepType) - && gIOHibernateStandbyDisabled && gSleepPolicyVars - && (!((kIOPMSleepFactorStandbyForced|kIOPMSleepFactorAutoPowerOffForced|kIOPMSleepFactorHibernateForced) - & gSleepPolicyVars->sleepFactors))) - { - standbyNixed = true; - wakeNow = true; - } - if (wakeNow - || ((hibernateDisabled || hibernateAborted) && - (getSleepTypeAttributes(params.sleepType) & - kIOPMSleepAttributeHibernateSetup))) - { - // Final evaluation picked a state requiring hibernation, - // but hibernate isn't going to proceed. Arm a short sleep using - // the early non-hibernate sleep parameters. - bcopy(&gEarlySystemSleepParams, ¶ms, sizeof(params)); - params.sleepType = kIOPMSleepTypeAbortedSleep; - params.ecWakeTimer = 1; - if (standbyNixed) - { - resetTimers = true; - } - else - { - // Set hibernateRetry flag to force hibernate setup on the - // next sleep. - hibernateRetry = true; - } - DLOG("wake in %u secs for hibernateDisabled %d, hibernateAborted %d, standbyNixed %d\n", - params.ecWakeTimer, hibernateDisabled, hibernateAborted, standbyNixed); - } - else - { - hibernateRetry = false; - } - - if (kIOPMSleepTypeAbortedSleep != params.sleepType) - { - resetTimers = false; - } - - paramsData = OSData::withBytes(¶ms, sizeof(params)); - if (paramsData) - { - setProperty(kIOPMSystemSleepParametersKey, paramsData); - paramsData->release(); - } - - if (getSleepTypeAttributes(params.sleepType) & - kIOPMSleepAttributeHibernateSleep) - { - // Disable sleep to force hibernation - gIOHibernateMode &= ~kIOHibernateModeSleep; - } - } -} - -bool IOPMrootDomain::getHibernateSettings( - uint32_t * hibernateModePtr, - uint32_t * hibernateFreeRatio, - uint32_t * hibernateFreeTime ) -{ - // Called by IOHibernateSystemSleep() after evaluateSystemSleepPolicyEarly() - // has updated the hibernateDisabled flag. - - bool ok = getSleepOption(kIOHibernateModeKey, hibernateModePtr); - getSleepOption(kIOHibernateFreeRatioKey, hibernateFreeRatio); - getSleepOption(kIOHibernateFreeTimeKey, hibernateFreeTime); - if (hibernateDisabled) - *hibernateModePtr = 0; - else if (gSleepPolicyHandler) - *hibernateModePtr = hibernateMode; - DLOG("hibernateMode 0x%x\n", *hibernateModePtr); - return ok; -} - -bool IOPMrootDomain::getSleepOption( const char * key, uint32_t * option ) -{ - OSObject * optionsProp; - OSDictionary * optionsDict; - OSObject * obj = 0; - OSNumber * num; - bool ok = false; - - optionsProp = copyProperty(kRootDomainSleepOptionsKey); - optionsDict = OSDynamicCast(OSDictionary, optionsProp); - - if (optionsDict) - { - obj = optionsDict->getObject(key); - if (obj) obj->retain(); - } - if (!obj) - { - obj = copyProperty(key); - } - if (obj) - { - if ((num = OSDynamicCast(OSNumber, obj))) - { - *option = num->unsigned32BitValue(); - ok = true; - } - else if (OSDynamicCast(OSBoolean, obj)) - { - *option = (obj == kOSBooleanTrue) ? 1 : 0; - ok = true; - } - } - - if (obj) - obj->release(); - if (optionsProp) - optionsProp->release(); - - return ok; -} -#endif /* HIBERNATION */ + // pmset level overrides + if ((*hibMode & kIOHibernateModeOn) == 0) { + if (!gSleepPolicyHandler) { + standbyEnabled = false; + powerOffEnabled = false; + } + } else if (!(*hibMode & kIOHibernateModeSleep)) { + // Force hibernate (i.e. mode 25) + // If standby is enabled, force standy. + // If poweroff is enabled, force poweroff. + if (standbyEnabled) { + currentFactors |= kIOPMSleepFactorStandbyForced; + } else if (powerOffEnabled) { + currentFactors |= kIOPMSleepFactorAutoPowerOffForced; + } else { + currentFactors |= kIOPMSleepFactorHibernateForced; + } + } -IOReturn IOPMrootDomain::getSystemSleepType( uint32_t * sleepType, uint32_t * standbyTimer ) -{ -#if HIBERNATION - IOPMSystemSleepParameters params; - uint32_t hibMode = 0; - bool ok; - - if (gIOPMWorkLoop->inGate() == false) - { - IOReturn ret = gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, - &IOPMrootDomain::getSystemSleepType), - (OSObject *) this, - (void *) sleepType, (void *) standbyTimer); - return ret; - } - - getSleepOption(kIOHibernateModeKey, &hibMode); - bzero(¶ms, sizeof(params)); - - ok = evaluateSystemSleepPolicy(¶ms, kIOPMSleepPhase0, &hibMode); - if (ok) - { - *sleepType = params.sleepType; - if (!getSleepOption(kIOPMDeepSleepTimerKey, standbyTimer) && - !getSleepOption(kIOPMDeepSleepDelayKey, standbyTimer)) { - DLOG("Standby delay is not set\n"); - *standbyTimer = 0; - } - return kIOReturnSuccess; - } + // Current factors based on environment and assertions + if (sleepTimerMaintenance) { + currentFactors |= kIOPMSleepFactorSleepTimerWake; + } + if (standbyEnabled && sleepToStandby && !gSleepPolicyHandler) { + currentFactors |= kIOPMSleepFactorSleepTimerWake; + } + if (!clamshellClosed) { + currentFactors |= kIOPMSleepFactorLidOpen; + } + if (acAdaptorConnected) { + currentFactors |= kIOPMSleepFactorACPower; + } + if (lowBatteryCondition) { + currentFactors |= kIOPMSleepFactorBatteryLow; + } + if (!standbyDelay || !standbyTimer) { + currentFactors |= kIOPMSleepFactorStandbyNoDelay; + } + if (standbyNixed || !standbyEnabled) { + currentFactors |= kIOPMSleepFactorStandbyDisabled; + } + if (resetTimers) { + currentFactors |= kIOPMSleepFactorLocalUserActivity; + currentFactors &= ~kIOPMSleepFactorSleepTimerWake; + } + if (getPMAssertionLevel(kIOPMDriverAssertionUSBExternalDeviceBit) != + kIOPMDriverAssertionLevelOff) { + currentFactors |= kIOPMSleepFactorUSBExternalDevice; + } + if (getPMAssertionLevel(kIOPMDriverAssertionBluetoothHIDDevicePairedBit) != + kIOPMDriverAssertionLevelOff) { + currentFactors |= kIOPMSleepFactorBluetoothHIDDevice; + } + if (getPMAssertionLevel(kIOPMDriverAssertionExternalMediaMountedBit) != + kIOPMDriverAssertionLevelOff) { + currentFactors |= kIOPMSleepFactorExternalMediaMounted; + } + if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) != + kIOPMDriverAssertionLevelOff) { + currentFactors |= kIOPMSleepFactorThunderboltDevice; + } + if (_scheduledAlarms != 0) { + currentFactors |= kIOPMSleepFactorRTCAlarmScheduled; + } + if (getPMAssertionLevel(kIOPMDriverAssertionMagicPacketWakeEnabledBit) != + kIOPMDriverAssertionLevelOff) { + currentFactors |= kIOPMSleepFactorMagicPacketWakeEnabled; + } +#define TCPKEEPALIVE 1 +#if TCPKEEPALIVE + if (getPMAssertionLevel(kIOPMDriverAssertionNetworkKeepAliveActiveBit) != + kIOPMDriverAssertionLevelOff) { + currentFactors |= kIOPMSleepFactorNetworkKeepAliveActive; + } #endif + if (!powerOffEnabled) { + currentFactors |= kIOPMSleepFactorAutoPowerOffDisabled; + } + if (desktopMode) { + currentFactors |= kIOPMSleepFactorExternalDisplay; + } + if (userWasActive) { + currentFactors |= kIOPMSleepFactorLocalUserActivity; + } + if (darkWakeHibernateError && !CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { + currentFactors |= kIOPMSleepFactorHibernateFailed; + } + if (thermalWarningState) { + currentFactors |= kIOPMSleepFactorThermalWarning; + } - return kIOReturnUnsupported; -} - -// MARK: - -// MARK: Shutdown and Restart + DLOG("sleep factors 0x%llx\n", currentFactors); -//****************************************************************************** -// handlePlatformHaltRestart -// -//****************************************************************************** + if (gSleepPolicyHandler) { + uint32_t savedHibernateMode; + IOReturn result; -// Phases while performing shutdown/restart -typedef enum { - kNotifyDone = 0x00, - kNotifyPriorityClients = 0x10, - kNotifyPowerPlaneDrivers = 0x20, - kNotifyHaltRestartAction = 0x30, - kQuiescePM = 0x40, -} shutdownPhase_t; + if (!gSleepPolicyVars) { + gSleepPolicyVars = IONew(IOPMSystemSleepPolicyVariables, 1); + if (!gSleepPolicyVars) { + goto done; + } + bzero(gSleepPolicyVars, sizeof(*gSleepPolicyVars)); + } + gSleepPolicyVars->signature = kIOPMSystemSleepPolicySignature; + gSleepPolicyVars->version = kIOPMSystemSleepPolicyVersion; + gSleepPolicyVars->currentCapability = _currentCapability; + gSleepPolicyVars->highestCapability = _highestCapability; + gSleepPolicyVars->sleepFactors = currentFactors; + gSleepPolicyVars->sleepReason = lastSleepReason; + gSleepPolicyVars->sleepPhase = sleepPhase; + gSleepPolicyVars->standbyDelay = standbyDelay; + gSleepPolicyVars->standbyTimer = standbyTimer; + gSleepPolicyVars->poweroffDelay = powerOffDelay; + gSleepPolicyVars->scheduledAlarms = _scheduledAlarms | _userScheduledAlarm; + gSleepPolicyVars->poweroffTimer = powerOffTimer; + + if (kIOPMSleepPhase0 == sleepPhase) { + // preserve hibernateMode + savedHibernateMode = gSleepPolicyVars->hibernateMode; + gSleepPolicyVars->hibernateMode = *hibMode; + } else if (kIOPMSleepPhase1 == sleepPhase) { + // use original hibernateMode for phase2 + gSleepPolicyVars->hibernateMode = *hibMode; + } + result = gSleepPolicyHandler(gSleepPolicyTarget, gSleepPolicyVars, params); -struct HaltRestartApplierContext { - IOPMrootDomain * RootDomain; - unsigned long PowerState; - IOPMPowerFlags PowerFlags; - UInt32 MessageType; - UInt32 Counter; - const char * LogString; - shutdownPhase_t phase; - - IOServiceInterestHandler handler; -} gHaltRestartCtx; + if (kIOPMSleepPhase0 == sleepPhase) { + // restore hibernateMode + gSleepPolicyVars->hibernateMode = savedHibernateMode; + } -const char *shutdownPhase2String(shutdownPhase_t phase) -{ - switch(phase) { - case kNotifyDone: - return "Notifications completed"; - case kNotifyPriorityClients: - return "Notifying priority clients"; - case kNotifyPowerPlaneDrivers: - return "Notifying power plane drivers"; - case kNotifyHaltRestartAction: - return "Notifying HaltRestart action handlers"; - case kQuiescePM: - return "Quiescing PM"; - default: - return "Unknown"; - } + if ((result != kIOReturnSuccess) || + (kIOPMSleepTypeInvalid == params->sleepType) || + (params->sleepType >= kIOPMSleepTypeLast) || + (kIOPMSystemSleepParametersVersion != params->version)) { + MSG("sleep policy handler error\n"); + goto done; + } -} + if ((getSleepTypeAttributes(params->sleepType) & + kIOPMSleepAttributeHibernateSetup) && + ((*hibMode & kIOHibernateModeOn) == 0)) { + *hibMode |= (kIOHibernateModeOn | kIOHibernateModeSleep); + } -static void -platformHaltRestartApplier( OSObject * object, void * context ) -{ - IOPowerStateChangeNotification notify; - HaltRestartApplierContext * ctx; - AbsoluteTime startTime, elapsedTime; - uint32_t deltaTime; + DLOG("sleep params v%u, type %u, flags 0x%x, wake 0x%x, timer %u, poweroff %u\n", + params->version, params->sleepType, params->sleepFlags, + params->ecWakeEvents, params->ecWakeTimer, params->ecPoweroffTimer); + found = true; + goto done; + } - ctx = (HaltRestartApplierContext *) context; + // Policy table is meaningless without standby enabled + if (!standbyEnabled) { + goto done; + } - _IOServiceInterestNotifier * notifier; - notifier = OSDynamicCast(_IOServiceInterestNotifier, object); - memset(¬ify, 0, sizeof(notify)); - notify.powerRef = (void *)(uintptr_t)ctx->Counter; - notify.returnValue = 0; - notify.stateNumber = ctx->PowerState; - notify.stateFlags = ctx->PowerFlags; + // Validate the sleep policy table + policyData = OSDynamicCast(OSData, prop); + if (!policyData || (policyData->getLength() <= sizeof(IOPMSystemSleepPolicyTable))) { + goto done; + } - if (notifier) { - ctx->handler = notifier->handler; - } + pt = (const IOPMSystemSleepPolicyTable *) policyData->getBytesNoCopy(); + if ((pt->signature != kIOPMSystemSleepPolicySignature) || + (pt->version != 1) || (0 == pt->entryCount)) { + goto done; + } - clock_get_uptime(&startTime); - ctx->RootDomain->messageClient( ctx->MessageType, object, (void *)¬ify ); - deltaTime = computeDeltaTimeMS(&startTime, &elapsedTime); + if (((policyData->getLength() - sizeof(IOPMSystemSleepPolicyTable)) != + (sizeof(IOPMSystemSleepPolicyEntry) * pt->entryCount))) { + goto done; + } - if ((deltaTime > kPMHaltTimeoutMS) && notifier) { + for (uint32_t i = 0; i < pt->entryCount; i++) { + const IOPMSystemSleepPolicyEntry * entry = &pt->entries[i]; + mismatch = (((uint32_t)currentFactors ^ entry->factorBits) & entry->factorMask); - LOG("%s handler %p took %u ms\n", - ctx->LogString, OBFUSCATE(notifier->handler), deltaTime); - halt_log_enter("PowerOff/Restart message to priority client", (const void *) notifier->handler, elapsedTime); - } + DLOG("mask 0x%08x, bits 0x%08x, flags 0x%08x, wake 0x%08x, mismatch 0x%08x\n", + entry->factorMask, entry->factorBits, + entry->sleepFlags, entry->wakeEvents, mismatch); + if (mismatch) { + continue; + } - ctx->handler = 0; - ctx->Counter++; -} + DLOG("^ found match\n"); + found = true; -static void quiescePowerTreeCallback( void * target, void * param ) -{ - IOLockLock(gPMHaltLock); - gPMQuiesced = true; - thread_wakeup(param); - IOLockUnlock(gPMHaltLock); -} + params->version = kIOPMSystemSleepParametersVersion; + params->reserved1 = 1; + if (entry->sleepFlags & kIOPMSleepFlagHibernate) { + params->sleepType = kIOPMSleepTypeStandby; + } else { + params->sleepType = kIOPMSleepTypeNormalSleep; + } -void IOPMrootDomain::handlePlatformHaltRestart( UInt32 pe_type ) -{ - AbsoluteTime startTime, elapsedTime; - uint32_t deltaTime; + params->ecWakeEvents = entry->wakeEvents; + if (entry->sleepFlags & kIOPMSleepFlagSleepTimerEnable) { + if (kIOPMSleepPhase2 == sleepPhase) { + clock_sec_t now_secs = gIOLastSleepTime.tv_sec; + + if (!_standbyTimerResetSeconds || + (now_secs <= _standbyTimerResetSeconds)) { + // Reset standby timer adjustment + _standbyTimerResetSeconds = now_secs; + DLOG("standby delay %u, reset %u\n", + standbyDelay, (uint32_t) _standbyTimerResetSeconds); + } else if (standbyDelay) { + // Shorten the standby delay timer + clock_sec_t elapsed = now_secs - _standbyTimerResetSeconds; + if (standbyDelay > elapsed) { + standbyDelay -= elapsed; + } else { + standbyDelay = 1; // must be > 0 + } + DLOG("standby delay %u, elapsed %u\n", + standbyDelay, (uint32_t) elapsed); + } + } + params->ecWakeTimer = standbyDelay; + } else if (kIOPMSleepPhase2 == sleepPhase) { + // A sleep that does not enable the sleep timer will reset + // the standby delay adjustment. + _standbyTimerResetSeconds = 0; + } + break; + } - memset(&gHaltRestartCtx, 0, sizeof(gHaltRestartCtx)); - gHaltRestartCtx.RootDomain = this; +done: + if (prop) { + prop->release(); + } + + return found; +} + +static IOPMSystemSleepParameters gEarlySystemSleepParams; + +void +IOPMrootDomain::evaluateSystemSleepPolicyEarly( void ) +{ + // Evaluate early (priority interest phase), before drivers sleep. + + DLOG("%s\n", __FUNCTION__); + removeProperty(kIOPMSystemSleepParametersKey); + + // Full wake resets the standby timer delay adjustment + if (_highestCapability & kIOPMSystemCapabilityGraphics) { + _standbyTimerResetSeconds = 0; + } + + hibernateDisabled = false; + hibernateMode = 0; + getSleepOption(kIOHibernateModeKey, &hibernateMode); + + // Save for late evaluation if sleep is aborted + bzero(&gEarlySystemSleepParams, sizeof(gEarlySystemSleepParams)); + + if (evaluateSystemSleepPolicy(&gEarlySystemSleepParams, kIOPMSleepPhase1, + &hibernateMode)) { + if (!hibernateRetry && + ((getSleepTypeAttributes(gEarlySystemSleepParams.sleepType) & + kIOPMSleepAttributeHibernateSetup) == 0)) { + // skip hibernate setup + hibernateDisabled = true; + } + } + + // Publish IOPMSystemSleepType + uint32_t sleepType = gEarlySystemSleepParams.sleepType; + if (sleepType == kIOPMSleepTypeInvalid) { + // no sleep policy + sleepType = kIOPMSleepTypeNormalSleep; + if (hibernateMode & kIOHibernateModeOn) { + sleepType = (hibernateMode & kIOHibernateModeSleep) ? + kIOPMSleepTypeSafeSleep : kIOPMSleepTypeHibernate; + } + } else if ((sleepType == kIOPMSleepTypeStandby) && + (gEarlySystemSleepParams.ecPoweroffTimer)) { + // report the lowest possible sleep state + sleepType = kIOPMSleepTypePowerOff; + } + + setProperty(kIOPMSystemSleepTypeKey, sleepType, 32); +} + +void +IOPMrootDomain::evaluateSystemSleepPolicyFinal( void ) +{ + IOPMSystemSleepParameters params; + OSData * paramsData; + bool wakeNow; + // Evaluate sleep policy after sleeping drivers but before platform sleep. + + DLOG("%s\n", __FUNCTION__); + + bzero(¶ms, sizeof(params)); + wakeNow = false; + if (evaluateSystemSleepPolicy(¶ms, kIOPMSleepPhase2, &hibernateMode)) { + if ((kIOPMSleepTypeStandby == params.sleepType) + && gIOHibernateStandbyDisabled && gSleepPolicyVars + && (!((kIOPMSleepFactorStandbyForced | kIOPMSleepFactorAutoPowerOffForced | kIOPMSleepFactorHibernateForced) + & gSleepPolicyVars->sleepFactors))) { + standbyNixed = true; + wakeNow = true; + } + if (wakeNow + || ((hibernateDisabled || hibernateAborted) && + (getSleepTypeAttributes(params.sleepType) & + kIOPMSleepAttributeHibernateSetup))) { + // Final evaluation picked a state requiring hibernation, + // but hibernate isn't going to proceed. Arm a short sleep using + // the early non-hibernate sleep parameters. + bcopy(&gEarlySystemSleepParams, ¶ms, sizeof(params)); + params.sleepType = kIOPMSleepTypeAbortedSleep; + params.ecWakeTimer = 1; + if (standbyNixed) { + resetTimers = true; + } else { + // Set hibernateRetry flag to force hibernate setup on the + // next sleep. + hibernateRetry = true; + } + DLOG("wake in %u secs for hibernateDisabled %d, hibernateAborted %d, standbyNixed %d\n", + params.ecWakeTimer, hibernateDisabled, hibernateAborted, standbyNixed); + } else { + hibernateRetry = false; + } + + if (kIOPMSleepTypeAbortedSleep != params.sleepType) { + resetTimers = false; + } + + paramsData = OSData::withBytes(¶ms, sizeof(params)); + if (paramsData) { + setProperty(kIOPMSystemSleepParametersKey, paramsData); + paramsData->release(); + } + + if (getSleepTypeAttributes(params.sleepType) & + kIOPMSleepAttributeHibernateSleep) { + // Disable sleep to force hibernation + gIOHibernateMode &= ~kIOHibernateModeSleep; + } + } +} + +bool +IOPMrootDomain::getHibernateSettings( + uint32_t * hibernateModePtr, + uint32_t * hibernateFreeRatio, + uint32_t * hibernateFreeTime ) +{ + // Called by IOHibernateSystemSleep() after evaluateSystemSleepPolicyEarly() + // has updated the hibernateDisabled flag. + + bool ok = getSleepOption(kIOHibernateModeKey, hibernateModePtr); + getSleepOption(kIOHibernateFreeRatioKey, hibernateFreeRatio); + getSleepOption(kIOHibernateFreeTimeKey, hibernateFreeTime); + if (hibernateDisabled) { + *hibernateModePtr = 0; + } else if (gSleepPolicyHandler) { + *hibernateModePtr = hibernateMode; + } + DLOG("hibernateMode 0x%x\n", *hibernateModePtr); + return ok; +} + +bool +IOPMrootDomain::getSleepOption( const char * key, uint32_t * option ) +{ + OSObject * optionsProp; + OSDictionary * optionsDict; + OSObject * obj = 0; + OSNumber * num; + bool ok = false; + + optionsProp = copyProperty(kRootDomainSleepOptionsKey); + optionsDict = OSDynamicCast(OSDictionary, optionsProp); + + if (optionsDict) { + obj = optionsDict->getObject(key); + if (obj) { + obj->retain(); + } + } + if (!obj) { + obj = copyProperty(key); + } + if (obj) { + if ((num = OSDynamicCast(OSNumber, obj))) { + *option = num->unsigned32BitValue(); + ok = true; + } else if (OSDynamicCast(OSBoolean, obj)) { + *option = (obj == kOSBooleanTrue) ? 1 : 0; + ok = true; + } + } - clock_get_uptime(&startTime); - switch (pe_type) - { - case kPEHaltCPU: - case kPEUPSDelayHaltCPU: - gHaltRestartCtx.PowerState = OFF_STATE; - gHaltRestartCtx.MessageType = kIOMessageSystemWillPowerOff; - gHaltRestartCtx.LogString = "PowerOff"; - break; + if (obj) { + obj->release(); + } + if (optionsProp) { + optionsProp->release(); + } - case kPERestartCPU: - gHaltRestartCtx.PowerState = RESTART_STATE; - gHaltRestartCtx.MessageType = kIOMessageSystemWillRestart; - gHaltRestartCtx.LogString = "Restart"; - break; + return ok; +} +#endif /* HIBERNATION */ - case kPEPagingOff: - gHaltRestartCtx.PowerState = ON_STATE; - gHaltRestartCtx.MessageType = kIOMessageSystemPagingOff; - gHaltRestartCtx.LogString = "PagingOff"; - IOService::updateConsoleUsers(NULL, kIOMessageSystemPagingOff); +IOReturn +IOPMrootDomain::getSystemSleepType( uint32_t * sleepType, uint32_t * standbyTimer ) +{ #if HIBERNATION - IOHibernateSystemRestart(); + IOPMSystemSleepParameters params; + uint32_t hibMode = 0; + bool ok; + + if (gIOPMWorkLoop->inGate() == false) { + IOReturn ret = gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, + &IOPMrootDomain::getSystemSleepType), + (OSObject *) this, + (void *) sleepType, (void *) standbyTimer); + return ret; + } + + getSleepOption(kIOHibernateModeKey, &hibMode); + bzero(¶ms, sizeof(params)); + + ok = evaluateSystemSleepPolicy(¶ms, kIOPMSleepPhase0, &hibMode); + if (ok) { + *sleepType = params.sleepType; + if (!getSleepOption(kIOPMDeepSleepTimerKey, standbyTimer) && + !getSleepOption(kIOPMDeepSleepDelayKey, standbyTimer)) { + DLOG("Standby delay is not set\n"); + *standbyTimer = 0; + } + return kIOReturnSuccess; + } #endif - break; - - default: - return; - } - - gHaltRestartCtx.phase = kNotifyPriorityClients; - // Notify legacy clients - applyToInterested(gIOPriorityPowerStateInterest, platformHaltRestartApplier, &gHaltRestartCtx); - - // For normal shutdown, turn off File Server Mode. - if (kPEHaltCPU == pe_type) - { - const OSSymbol * setting = OSSymbol::withCString(kIOPMSettingRestartOnPowerLossKey); - OSNumber * num = OSNumber::withNumber((unsigned long long) 0, 32); - if (setting && num) - { - setPMSetting(setting, num); - setting->release(); - num->release(); - } - } - - - if (kPEPagingOff != pe_type) - { - gHaltRestartCtx.phase = kNotifyPowerPlaneDrivers; - // Notify in power tree order - notifySystemShutdown(this, gHaltRestartCtx.MessageType); - } - - gHaltRestartCtx.phase = kNotifyHaltRestartAction; - IOCPURunPlatformHaltRestartActions(pe_type); - - // Wait for PM to quiesce - if ((kPEPagingOff != pe_type) && gPMHaltLock) - { - gHaltRestartCtx.phase = kQuiescePM; - AbsoluteTime quiesceTime = mach_absolute_time(); - - IOLockLock(gPMHaltLock); - gPMQuiesced = false; - if (quiescePowerTree(this, &quiescePowerTreeCallback, &gPMQuiesced) == - kIOReturnSuccess) - { - while (!gPMQuiesced) - { - IOLockSleep(gPMHaltLock, &gPMQuiesced, THREAD_UNINT); - } - } - IOLockUnlock(gPMHaltLock); - deltaTime = computeDeltaTimeMS(&quiesceTime, &elapsedTime); - DLOG("PM quiesce took %u ms\n", deltaTime); - halt_log_enter("Quiesce", NULL, elapsedTime); - } - gHaltRestartCtx.phase = kNotifyDone; - - deltaTime = computeDeltaTimeMS(&startTime, &elapsedTime); - LOG("%s all drivers took %u ms\n", gHaltRestartCtx.LogString, deltaTime); - - halt_log_enter(gHaltRestartCtx.LogString, NULL, elapsedTime); - - deltaTime = computeDeltaTimeMS(&gHaltStartTime, &elapsedTime); - LOG("%s total %u ms\n", gHaltRestartCtx.LogString, deltaTime); - - if (gHaltLog && gHaltTimeMaxLog && (deltaTime >= gHaltTimeMaxLog)) - { - printf("%s total %d ms:%s\n", gHaltRestartCtx.LogString, deltaTime, gHaltLog); - } - - checkShutdownTimeout(); -} - -bool IOPMrootDomain::checkShutdownTimeout() -{ - AbsoluteTime elapsedTime; - uint32_t deltaTime = computeDeltaTimeMS(&gHaltStartTime, &elapsedTime); - - if (gHaltTimeMaxPanic && (deltaTime >= gHaltTimeMaxPanic)) { - return true; - } - return false; -} - -void IOPMrootDomain::panicWithShutdownLog(uint32_t timeoutInMs) -{ - if (gHaltLog) { - if ((gHaltRestartCtx.phase == kNotifyPriorityClients) && gHaltRestartCtx.handler) { - halt_log_enter("Blocked on priority client", (void *)gHaltRestartCtx.handler, mach_absolute_time() - gHaltStartTime); - } - panic("%s timed out in phase '%s'. Total %d ms:%s", - gHaltRestartCtx.LogString, shutdownPhase2String(gHaltRestartCtx.phase), timeoutInMs, gHaltLog); - } - else { - panic("%s timed out in phase \'%s\'. Total %d ms", - gHaltRestartCtx.LogString, shutdownPhase2String(gHaltRestartCtx.phase), timeoutInMs); - } + + return kIOReturnUnsupported; +} + +// MARK: - +// MARK: Shutdown and Restart + +//****************************************************************************** +// handlePlatformHaltRestart +// +//****************************************************************************** + +// Phases while performing shutdown/restart +typedef enum { + kNotifyDone = 0x00, + kNotifyPriorityClients = 0x10, + kNotifyPowerPlaneDrivers = 0x20, + kNotifyHaltRestartAction = 0x30, + kQuiescePM = 0x40, +} shutdownPhase_t; + + +struct HaltRestartApplierContext { + IOPMrootDomain * RootDomain; + unsigned long PowerState; + IOPMPowerFlags PowerFlags; + UInt32 MessageType; + UInt32 Counter; + const char * LogString; + shutdownPhase_t phase; + + IOServiceInterestHandler handler; +} gHaltRestartCtx; + +const char * +shutdownPhase2String(shutdownPhase_t phase) +{ + switch (phase) { + case kNotifyDone: + return "Notifications completed"; + case kNotifyPriorityClients: + return "Notifying priority clients"; + case kNotifyPowerPlaneDrivers: + return "Notifying power plane drivers"; + case kNotifyHaltRestartAction: + return "Notifying HaltRestart action handlers"; + case kQuiescePM: + return "Quiescing PM"; + default: + return "Unknown"; + } +} + +static void +platformHaltRestartApplier( OSObject * object, void * context ) +{ + IOPowerStateChangeNotification notify; + HaltRestartApplierContext * ctx; + AbsoluteTime startTime, elapsedTime; + uint32_t deltaTime; + + ctx = (HaltRestartApplierContext *) context; + + _IOServiceInterestNotifier * notifier; + notifier = OSDynamicCast(_IOServiceInterestNotifier, object); + memset(¬ify, 0, sizeof(notify)); + notify.powerRef = (void *)(uintptr_t)ctx->Counter; + notify.returnValue = 0; + notify.stateNumber = ctx->PowerState; + notify.stateFlags = ctx->PowerFlags; + + if (notifier) { + ctx->handler = notifier->handler; + } + + clock_get_uptime(&startTime); + ctx->RootDomain->messageClient( ctx->MessageType, object, (void *)¬ify ); + deltaTime = computeDeltaTimeMS(&startTime, &elapsedTime); + + if ((deltaTime > kPMHaltTimeoutMS) && notifier) { + LOG("%s handler %p took %u ms\n", + ctx->LogString, OBFUSCATE(notifier->handler), deltaTime); + halt_log_enter("PowerOff/Restart message to priority client", (const void *) notifier->handler, elapsedTime); + } + + ctx->handler = 0; + ctx->Counter++; +} + +static void +quiescePowerTreeCallback( void * target, void * param ) +{ + IOLockLock(gPMHaltLock); + gPMQuiesced = true; + thread_wakeup(param); + IOLockUnlock(gPMHaltLock); +} + +void +IOPMrootDomain::handlePlatformHaltRestart( UInt32 pe_type ) +{ + AbsoluteTime startTime, elapsedTime; + uint32_t deltaTime; + + memset(&gHaltRestartCtx, 0, sizeof(gHaltRestartCtx)); + gHaltRestartCtx.RootDomain = this; + + clock_get_uptime(&startTime); + switch (pe_type) { + case kPEHaltCPU: + case kPEUPSDelayHaltCPU: + gHaltRestartCtx.PowerState = OFF_STATE; + gHaltRestartCtx.MessageType = kIOMessageSystemWillPowerOff; + gHaltRestartCtx.LogString = "PowerOff"; + break; + + case kPERestartCPU: + gHaltRestartCtx.PowerState = RESTART_STATE; + gHaltRestartCtx.MessageType = kIOMessageSystemWillRestart; + gHaltRestartCtx.LogString = "Restart"; + break; + + case kPEPagingOff: + gHaltRestartCtx.PowerState = ON_STATE; + gHaltRestartCtx.MessageType = kIOMessageSystemPagingOff; + gHaltRestartCtx.LogString = "PagingOff"; + IOService::updateConsoleUsers(NULL, kIOMessageSystemPagingOff); +#if HIBERNATION + IOHibernateSystemRestart(); +#endif + break; + + default: + return; + } + + gHaltRestartCtx.phase = kNotifyPriorityClients; + // Notify legacy clients + applyToInterested(gIOPriorityPowerStateInterest, platformHaltRestartApplier, &gHaltRestartCtx); + + // For normal shutdown, turn off File Server Mode. + if (kPEHaltCPU == pe_type) { + const OSSymbol * setting = OSSymbol::withCString(kIOPMSettingRestartOnPowerLossKey); + OSNumber * num = OSNumber::withNumber((unsigned long long) 0, 32); + if (setting && num) { + setPMSetting(setting, num); + setting->release(); + num->release(); + } + } + + + if (kPEPagingOff != pe_type) { + gHaltRestartCtx.phase = kNotifyPowerPlaneDrivers; + // Notify in power tree order + notifySystemShutdown(this, gHaltRestartCtx.MessageType); + } + + gHaltRestartCtx.phase = kNotifyHaltRestartAction; + IOCPURunPlatformHaltRestartActions(pe_type); + + // Wait for PM to quiesce + if ((kPEPagingOff != pe_type) && gPMHaltLock) { + gHaltRestartCtx.phase = kQuiescePM; + AbsoluteTime quiesceTime = mach_absolute_time(); + + IOLockLock(gPMHaltLock); + gPMQuiesced = false; + if (quiescePowerTree(this, &quiescePowerTreeCallback, &gPMQuiesced) == + kIOReturnSuccess) { + while (!gPMQuiesced) { + IOLockSleep(gPMHaltLock, &gPMQuiesced, THREAD_UNINT); + } + } + IOLockUnlock(gPMHaltLock); + deltaTime = computeDeltaTimeMS(&quiesceTime, &elapsedTime); + DLOG("PM quiesce took %u ms\n", deltaTime); + halt_log_enter("Quiesce", NULL, elapsedTime); + } + gHaltRestartCtx.phase = kNotifyDone; + + deltaTime = computeDeltaTimeMS(&startTime, &elapsedTime); + LOG("%s all drivers took %u ms\n", gHaltRestartCtx.LogString, deltaTime); + + halt_log_enter(gHaltRestartCtx.LogString, NULL, elapsedTime); + + deltaTime = computeDeltaTimeMS(&gHaltStartTime, &elapsedTime); + LOG("%s total %u ms\n", gHaltRestartCtx.LogString, deltaTime); + + if (gHaltLog && gHaltTimeMaxLog && (deltaTime >= gHaltTimeMaxLog)) { + printf("%s total %d ms:%s\n", gHaltRestartCtx.LogString, deltaTime, gHaltLog); + } + + checkShutdownTimeout(); +} + +bool +IOPMrootDomain::checkShutdownTimeout() +{ + AbsoluteTime elapsedTime; + uint32_t deltaTime = computeDeltaTimeMS(&gHaltStartTime, &elapsedTime); + + if (gHaltTimeMaxPanic && (deltaTime >= gHaltTimeMaxPanic)) { + return true; + } + return false; +} + +void +IOPMrootDomain::panicWithShutdownLog(uint32_t timeoutInMs) +{ + if (gHaltLog) { + if ((gHaltRestartCtx.phase == kNotifyPriorityClients) && gHaltRestartCtx.handler) { + halt_log_enter("Blocked on priority client", (void *)gHaltRestartCtx.handler, mach_absolute_time() - gHaltStartTime); + } + panic("%s timed out in phase '%s'. Total %d ms:%s", + gHaltRestartCtx.LogString, shutdownPhase2String(gHaltRestartCtx.phase), timeoutInMs, gHaltLog); + } else { + panic("%s timed out in phase \'%s\'. Total %d ms", + gHaltRestartCtx.LogString, shutdownPhase2String(gHaltRestartCtx.phase), timeoutInMs); + } } //****************************************************************************** @@ -4859,9 +4820,10 @@ void IOPMrootDomain::panicWithShutdownLog(uint32_t timeoutInMs) // //****************************************************************************** -IOReturn IOPMrootDomain::shutdownSystem( void ) +IOReturn +IOPMrootDomain::shutdownSystem( void ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } //****************************************************************************** @@ -4869,9 +4831,10 @@ IOReturn IOPMrootDomain::shutdownSystem( void ) // //****************************************************************************** -IOReturn IOPMrootDomain::restartSystem( void ) +IOReturn +IOPMrootDomain::restartSystem( void ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } // MARK: - @@ -4883,875 +4846,800 @@ IOReturn IOPMrootDomain::restartSystem( void ) // Running on PM work loop thread. //****************************************************************************** -void IOPMrootDomain::tagPowerPlaneService( - IOService * service, - IOPMActions * actions ) +void +IOPMrootDomain::tagPowerPlaneService( + IOService * service, + IOPMActions * actions ) { - uint32_t flags = 0; - bool isDisplayWrangler; + uint32_t flags = 0; + bool isDisplayWrangler; - memset(actions, 0, sizeof(*actions)); - actions->target = this; + memset(actions, 0, sizeof(*actions)); + actions->target = this; - if (service == this) - { - actions->actionPowerChangeStart = - OSMemberFunctionCast( - IOPMActionPowerChangeStart, this, - &IOPMrootDomain::handleOurPowerChangeStart); + if (service == this) { + actions->actionPowerChangeStart = + OSMemberFunctionCast( + IOPMActionPowerChangeStart, this, + &IOPMrootDomain::handleOurPowerChangeStart); - actions->actionPowerChangeDone = - OSMemberFunctionCast( - IOPMActionPowerChangeDone, this, - &IOPMrootDomain::handleOurPowerChangeDone); + actions->actionPowerChangeDone = + OSMemberFunctionCast( + IOPMActionPowerChangeDone, this, + &IOPMrootDomain::handleOurPowerChangeDone); - actions->actionPowerChangeOverride = - OSMemberFunctionCast( - IOPMActionPowerChangeOverride, this, - &IOPMrootDomain::overrideOurPowerChange); - return; - } + actions->actionPowerChangeOverride = + OSMemberFunctionCast( + IOPMActionPowerChangeOverride, this, + &IOPMrootDomain::overrideOurPowerChange); + return; + } #if !NO_KERNEL_HID - isDisplayWrangler = (0 != service->metaCast("IODisplayWrangler")); - if (isDisplayWrangler) - { - wrangler = service; + isDisplayWrangler = (0 != service->metaCast("IODisplayWrangler")); + if (isDisplayWrangler) { + wrangler = service; // found the display wrangler, check for any display assertions already created if (pmAssertions->getActivatedAssertions() & kIOPMDriverAssertionPreventDisplaySleepBit) { DLOG("wrangler setIgnoreIdleTimer\(1) due to pre-existing assertion\n"); wrangler->setIgnoreIdleTimer( true ); } - } + } #else - isDisplayWrangler = false; + isDisplayWrangler = false; #endif #if defined(__i386__) || defined(__x86_64__) - if (isDisplayWrangler) - flags |= kPMActionsFlagIsDisplayWrangler; - if (service->getProperty("IOPMStrictTreeOrder")) - flags |= kPMActionsFlagIsGraphicsDevice; - if (service->getProperty("IOPMUnattendedWakePowerState")) - flags |= kPMActionsFlagIsAudioDevice; + if (isDisplayWrangler) { + flags |= kPMActionsFlagIsDisplayWrangler; + } + if (service->getProperty("IOPMStrictTreeOrder")) { + flags |= kPMActionsFlagIsGraphicsDevice; + } + if (service->getProperty("IOPMUnattendedWakePowerState")) { + flags |= kPMActionsFlagIsAudioDevice; + } #endif - // Find the power connection object that is a child of the PCI host - // bridge, and has a graphics/audio device attached below. Mark the - // power branch for delayed child notifications. - - if (flags) - { - IORegistryEntry * child = service; - IORegistryEntry * parent = child->getParentEntry(gIOPowerPlane); - - while (child != this) - { - if (parent->metaCast("IOPCIDevice") || - (parent == this)) - { - if (OSDynamicCast(IOPowerConnection, child)) - { - IOPowerConnection * conn = (IOPowerConnection *) child; - conn->delayChildNotification = true; - DLOG("delayChildNotification for 0x%llx\n", conn->getRegistryEntryID()); - } - break; - } - child = parent; - parent = child->getParentEntry(gIOPowerPlane); - } - } - - if (flags) - { - DLOG("%s tag flags %x\n", service->getName(), flags); - actions->parameter |= flags; - actions->actionPowerChangeOverride = - OSMemberFunctionCast( - IOPMActionPowerChangeOverride, this, - &IOPMrootDomain::overridePowerChangeForUIService); - - if (flags & kPMActionsFlagIsDisplayWrangler) - { - actions->actionActivityTickle = - OSMemberFunctionCast( - IOPMActionActivityTickle, this, - &IOPMrootDomain::handleActivityTickleForDisplayWrangler); - - actions->actionUpdatePowerClient = - OSMemberFunctionCast( - IOPMActionUpdatePowerClient, this, - &IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler); - } - return; - } - - // Locate the first PCI host bridge for PMTrace. - if (!pciHostBridgeDevice && service->metaCast("IOPCIBridge")) - { - IOService * provider = service->getProvider(); - if (OSDynamicCast(IOPlatformDevice, provider) && - provider->inPlane(gIODTPlane)) - { - pciHostBridgeDevice = provider; - pciHostBridgeDriver = service; - DLOG("PMTrace found PCI host bridge %s->%s\n", - provider->getName(), service->getName()); - } - } - - // Tag top-level PCI devices. The order of PMinit() call does not - // change across boots and is used as the PCI bit number. - if (pciHostBridgeDevice && service->metaCast("IOPCIDevice")) - { - // Would prefer to check built-in property, but tagPowerPlaneService() - // is called before pciDevice->registerService(). - IORegistryEntry * parent = service->getParentEntry(gIODTPlane); - if ((parent == pciHostBridgeDevice) && service->getProperty("acpi-device")) - { - int bit = pmTracer->recordTopLevelPCIDevice( service ); - if (bit >= 0) - { - // Save the assigned bit for fast lookup. - actions->parameter |= (bit & kPMActionsPCIBitNumberMask); - - actions->actionPowerChangeStart = - OSMemberFunctionCast( - IOPMActionPowerChangeStart, this, - &IOPMrootDomain::handlePowerChangeStartForPCIDevice); - - actions->actionPowerChangeDone = - OSMemberFunctionCast( - IOPMActionPowerChangeDone, this, - &IOPMrootDomain::handlePowerChangeDoneForPCIDevice); - } - } - } + // Find the power connection object that is a child of the PCI host + // bridge, and has a graphics/audio device attached below. Mark the + // power branch for delayed child notifications. + + if (flags) { + IORegistryEntry * child = service; + IORegistryEntry * parent = child->getParentEntry(gIOPowerPlane); + + while (child != this) { + if (child->getProperty("IOPCITunnelled") == kOSBooleanTrue) { + // Skip delaying notifications and clamping power on external graphics and audio devices. + DLOG("Avoiding delayChildNotification on object 0x%llx. flags: 0x%x\n", service->getRegistryEntryID(), flags); + flags = 0; + break; + } + if ((parent == pciHostBridgeDriver) || + (parent == this)) { + if (OSDynamicCast(IOPowerConnection, child)) { + IOPowerConnection * conn = (IOPowerConnection *) child; + conn->delayChildNotification = true; + DLOG("delayChildNotification for 0x%llx\n", conn->getRegistryEntryID()); + } + break; + } + child = parent; + parent = child->getParentEntry(gIOPowerPlane); + } + } + + if (flags) { + DLOG("%s tag flags %x\n", service->getName(), flags); + actions->parameter |= flags; + actions->actionPowerChangeOverride = + OSMemberFunctionCast( + IOPMActionPowerChangeOverride, this, + &IOPMrootDomain::overridePowerChangeForUIService); + + if (flags & kPMActionsFlagIsDisplayWrangler) { + actions->actionActivityTickle = + OSMemberFunctionCast( + IOPMActionActivityTickle, this, + &IOPMrootDomain::handleActivityTickleForDisplayWrangler); + + actions->actionUpdatePowerClient = + OSMemberFunctionCast( + IOPMActionUpdatePowerClient, this, + &IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler); + } + return; + } + + // Locate the first PCI host bridge for PMTrace. + if (!pciHostBridgeDevice && service->metaCast("IOPCIBridge")) { + IOService * provider = service->getProvider(); + if (OSDynamicCast(IOPlatformDevice, provider) && + provider->inPlane(gIODTPlane)) { + pciHostBridgeDevice = provider; + pciHostBridgeDriver = service; + DLOG("PMTrace found PCI host bridge %s->%s\n", + provider->getName(), service->getName()); + } + } + + // Tag top-level PCI devices. The order of PMinit() call does not + // change across boots and is used as the PCI bit number. + if (pciHostBridgeDevice && service->metaCast("IOPCIDevice")) { + // Would prefer to check built-in property, but tagPowerPlaneService() + // is called before pciDevice->registerService(). + IORegistryEntry * parent = service->getParentEntry(gIODTPlane); + if ((parent == pciHostBridgeDevice) && service->getProperty("acpi-device")) { + int bit = pmTracer->recordTopLevelPCIDevice( service ); + if (bit >= 0) { + // Save the assigned bit for fast lookup. + actions->parameter |= (bit & kPMActionsPCIBitNumberMask); + + actions->actionPowerChangeStart = + OSMemberFunctionCast( + IOPMActionPowerChangeStart, this, + &IOPMrootDomain::handlePowerChangeStartForPCIDevice); + + actions->actionPowerChangeDone = + OSMemberFunctionCast( + IOPMActionPowerChangeDone, this, + &IOPMrootDomain::handlePowerChangeDoneForPCIDevice); + } + } + } } //****************************************************************************** // PM actions for root domain //****************************************************************************** -void IOPMrootDomain::overrideOurPowerChange( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex * inOutPowerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ) -{ - uint32_t powerState = (uint32_t) *inOutPowerState; - uint32_t changeFlags = *inOutChangeFlags; - uint32_t currentPowerState = (uint32_t) getPowerState(); - - if (changeFlags & kIOPMParentInitiated) - { - // Root parent is permanently pegged at max power, - // a parent initiated power change is unexpected. - *inOutChangeFlags |= kIOPMNotDone; - return; - } - - if (powerState < currentPowerState) - { - if (CAP_CURRENT(kIOPMSystemCapabilityGraphics)) - { - // Root domain is dropping power state ON->SLEEP. - // If system is in full wake, first enter dark wake by - // converting the power drop to a capability change. - // Once in dark wake, transition to sleep state ASAP. - - darkWakeToSleepASAP = true; - - // Drop graphics and audio capability - _desiredCapability &= ~( - kIOPMSystemCapabilityGraphics | - kIOPMSystemCapabilityAudio ); - - // Convert to capability change (ON->ON) - *inOutPowerState = ON_STATE; - *inOutChangeFlags |= kIOPMSynchronize; - - // Revert device desire from SLEEP to ON - changePowerStateToPriv(ON_STATE); - } - else - { - // System is in dark wake, ok to drop power state. - // Broadcast root powering down to entire tree. - *inOutChangeFlags |= kIOPMRootChangeDown; - } - } - else if (powerState > currentPowerState) - { - if ((_currentCapability & kIOPMSystemCapabilityCPU) == 0) - { - // Broadcast power up when waking from sleep, but not for the - // initial power change at boot by checking for cpu capability. - *inOutChangeFlags |= kIOPMRootChangeUp; - } - } -} - -void IOPMrootDomain::handleOurPowerChangeStart( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * inOutChangeFlags, - IOPMRequestTag requestTag ) -{ - uint32_t changeFlags = *inOutChangeFlags; - uint32_t currentPowerState = (uint32_t) getPowerState(); - uint32_t sleepReason = requestTag ? requestTag : kIOPMSleepReasonIdle; - bool publishSleepReason = false; - - _systemTransitionType = kSystemTransitionNone; - _systemMessageClientMask = 0; - capabilityLoss = false; - toldPowerdCapWillChange = false; - - if (lowBatteryCondition) - { - // Low battery notification may arrive after the initial sleep request - // has been queued. Override the sleep reason so powerd and others can - // treat this as an emergency sleep. - sleepReason = kIOPMSleepReasonLowPower; - } - - // 1. Explicit capability change. - - if (changeFlags & kIOPMSynchronize) - { - if (powerState == ON_STATE) - { - if (changeFlags & kIOPMSyncNoChildNotify) - _systemTransitionType = kSystemTransitionNewCapClient; - else - _systemTransitionType = kSystemTransitionCapability; - } - } - - // 2. Going to sleep (cancellation still possible). - - else if (powerState < currentPowerState) - _systemTransitionType = kSystemTransitionSleep; - - // 3. Woke from (idle or demand) sleep. - - else if (!systemBooting && - (changeFlags & kIOPMSelfInitiated) && - (powerState > currentPowerState)) - { - _systemTransitionType = kSystemTransitionWake; - _desiredCapability = kIOPMSystemCapabilityCPU | - kIOPMSystemCapabilityNetwork; - - // Early exit from dark wake to full (e.g. LID open) - if (kFullWakeReasonNone != fullWakeReason) - { - _desiredCapability |= ( - kIOPMSystemCapabilityGraphics | - kIOPMSystemCapabilityAudio ); - } +void +IOPMrootDomain::overrideOurPowerChange( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex * inOutPowerState, + IOPMPowerChangeFlags * inOutChangeFlags, + IOPMRequestTag requestTag ) +{ + uint32_t powerState = (uint32_t) *inOutPowerState; + uint32_t changeFlags = *inOutChangeFlags; + uint32_t currentPowerState = (uint32_t) getPowerState(); + + if (changeFlags & kIOPMParentInitiated) { + // Root parent is permanently pegged at max power, + // a parent initiated power change is unexpected. + *inOutChangeFlags |= kIOPMNotDone; + return; + } + + if (powerState < currentPowerState) { + if (CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { + // Root domain is dropping power state ON->SLEEP. + // If system is in full wake, first enter dark wake by + // converting the power drop to a capability change. + // Once in dark wake, transition to sleep state ASAP. + + darkWakeToSleepASAP = true; + + // Drop graphics and audio capability + _desiredCapability &= ~( + kIOPMSystemCapabilityGraphics | + kIOPMSystemCapabilityAudio); + + // Convert to capability change (ON->ON) + *inOutPowerState = ON_STATE; + *inOutChangeFlags |= kIOPMSynchronize; + + // Revert device desire from SLEEP to ON + changePowerStateToPriv(ON_STATE); + } else { + // System is in dark wake, ok to drop power state. + // Broadcast root powering down to entire tree. + *inOutChangeFlags |= kIOPMRootChangeDown; + } + } else if (powerState > currentPowerState) { + if ((_currentCapability & kIOPMSystemCapabilityCPU) == 0) { + // Broadcast power up when waking from sleep, but not for the + // initial power change at boot by checking for cpu capability. + *inOutChangeFlags |= kIOPMRootChangeUp; + } + } +} + +void +IOPMrootDomain::handleOurPowerChangeStart( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags * inOutChangeFlags, + IOPMRequestTag requestTag ) +{ + uint32_t changeFlags = *inOutChangeFlags; + uint32_t currentPowerState = (uint32_t) getPowerState(); + uint32_t sleepReason = requestTag ? requestTag : kIOPMSleepReasonIdle; + bool publishSleepReason = false; + + _systemTransitionType = kSystemTransitionNone; + _systemMessageClientMask = 0; + capabilityLoss = false; + toldPowerdCapWillChange = false; + + if (lowBatteryCondition) { + // Low battery notification may arrive after the initial sleep request + // has been queued. Override the sleep reason so powerd and others can + // treat this as an emergency sleep. + sleepReason = kIOPMSleepReasonLowPower; + } + + // 1. Explicit capability change. + + if (changeFlags & kIOPMSynchronize) { + if (powerState == ON_STATE) { + if (changeFlags & kIOPMSyncNoChildNotify) { + _systemTransitionType = kSystemTransitionNewCapClient; + } else { + _systemTransitionType = kSystemTransitionCapability; + } + } + } + // 2. Going to sleep (cancellation still possible). + else if (powerState < currentPowerState) { + _systemTransitionType = kSystemTransitionSleep; + } + // 3. Woke from (idle or demand) sleep. + else if (!systemBooting && + (changeFlags & kIOPMSelfInitiated) && + (powerState > currentPowerState)) { + _systemTransitionType = kSystemTransitionWake; + _desiredCapability = kIOPMSystemCapabilityCPU | + kIOPMSystemCapabilityNetwork; + + // Early exit from dark wake to full (e.g. LID open) + if (kFullWakeReasonNone != fullWakeReason) { + _desiredCapability |= ( + kIOPMSystemCapabilityGraphics | + kIOPMSystemCapabilityAudio); + } #if HIBERNATION - IOHibernateSetWakeCapabilities(_desiredCapability); + IOHibernateSetWakeCapabilities(_desiredCapability); #endif - } - - // Update pending wake capability at the beginning of every - // state transition (including synchronize). This will become - // the current capability at the end of the transition. - - if (kSystemTransitionSleep == _systemTransitionType) - { - _pendingCapability = 0; - capabilityLoss = true; - - } - else if (kSystemTransitionNewCapClient != _systemTransitionType) - { - _pendingCapability = _desiredCapability | - kIOPMSystemCapabilityCPU | - kIOPMSystemCapabilityNetwork; - - if (_pendingCapability & kIOPMSystemCapabilityGraphics) - _pendingCapability |= kIOPMSystemCapabilityAudio; - - if ((kSystemTransitionCapability == _systemTransitionType) && - (_pendingCapability == _currentCapability)) - { - // Cancel the PM state change. - _systemTransitionType = kSystemTransitionNone; - *inOutChangeFlags |= kIOPMNotDone; - } - if (__builtin_popcount(_pendingCapability) < - __builtin_popcount(_currentCapability)) - capabilityLoss = true; - } - - // 1. Capability change. - - if (kSystemTransitionCapability == _systemTransitionType) - { - // Dark to Full transition. - if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) - { - tracePoint( kIOPMTracePointDarkWakeExit ); - - willEnterFullWake(); - } - - // Full to Dark transition. - if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) - { - // Clear previous stats - IOLockLock(pmStatsLock); - if (pmStatsAppResponses) - { - pmStatsAppResponses->release(); - pmStatsAppResponses = OSArray::withCapacity(5); - } - IOLockUnlock(pmStatsLock); - - - tracePoint( kIOPMTracePointDarkWakeEntry ); - *inOutChangeFlags |= kIOPMSyncTellPowerDown; - _systemMessageClientMask = kSystemMessageClientPowerd | - kSystemMessageClientLegacyApp; - - - // rdar://15971327 - // Prevent user active transitions before notifying clients - // that system will sleep. - preventTransitionToUserActive(true); - - IOService::setAdvisoryTickleEnable( false ); - - // Publish the sleep reason for full to dark wake - publishSleepReason = true; - lastSleepReason = fullToDarkReason = sleepReason; - - // Publish a UUID for the Sleep --> Wake cycle - handlePublishSleepWakeUUID(true); - if (sleepDelaysReport) { - clock_get_uptime(&ts_sleepStart); - DLOG("sleepDelaysReport f->9 start at 0x%llx\n", ts_sleepStart); - } - - wranglerTickled = false; - } - } - - // 2. System sleep. - - else if (kSystemTransitionSleep == _systemTransitionType) - { - // Beginning of a system sleep transition. - // Cancellation is still possible. - tracePoint( kIOPMTracePointSleepStarted ); - - _systemMessageClientMask = kSystemMessageClientAll; - if ((_currentCapability & kIOPMSystemCapabilityGraphics) == 0) - _systemMessageClientMask &= ~kSystemMessageClientLegacyApp; - if ((_highestCapability & kIOPMSystemCapabilityGraphics) == 0) - _systemMessageClientMask &= ~kSystemMessageClientKernel; + } + + // Update pending wake capability at the beginning of every + // state transition (including synchronize). This will become + // the current capability at the end of the transition. + + if (kSystemTransitionSleep == _systemTransitionType) { + _pendingCapability = 0; + capabilityLoss = true; + } else if (kSystemTransitionNewCapClient != _systemTransitionType) { + _pendingCapability = _desiredCapability | + kIOPMSystemCapabilityCPU | + kIOPMSystemCapabilityNetwork; + + if (_pendingCapability & kIOPMSystemCapabilityGraphics) { + _pendingCapability |= kIOPMSystemCapabilityAudio; + } + + if ((kSystemTransitionCapability == _systemTransitionType) && + (_pendingCapability == _currentCapability)) { + // Cancel the PM state change. + _systemTransitionType = kSystemTransitionNone; + *inOutChangeFlags |= kIOPMNotDone; + } + if (__builtin_popcount(_pendingCapability) < + __builtin_popcount(_currentCapability)) { + capabilityLoss = true; + } + } + + // 1. Capability change. + + if (kSystemTransitionCapability == _systemTransitionType) { + // Dark to Full transition. + if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) { + tracePoint( kIOPMTracePointDarkWakeExit ); + + willEnterFullWake(); + } + + // Full to Dark transition. + if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) { + // Clear previous stats + IOLockLock(pmStatsLock); + if (pmStatsAppResponses) { + pmStatsAppResponses->release(); + pmStatsAppResponses = OSArray::withCapacity(5); + } + IOLockUnlock(pmStatsLock); + + + tracePoint( kIOPMTracePointDarkWakeEntry ); + *inOutChangeFlags |= kIOPMSyncTellPowerDown; + _systemMessageClientMask = kSystemMessageClientPowerd | + kSystemMessageClientLegacyApp; + + + // rdar://15971327 + // Prevent user active transitions before notifying clients + // that system will sleep. + preventTransitionToUserActive(true); + + IOService::setAdvisoryTickleEnable( false ); + + // Publish the sleep reason for full to dark wake + publishSleepReason = true; + lastSleepReason = fullToDarkReason = sleepReason; + + // Publish a UUID for the Sleep --> Wake cycle + handlePublishSleepWakeUUID(true); + if (sleepDelaysReport) { + clock_get_uptime(&ts_sleepStart); + DLOG("sleepDelaysReport f->9 start at 0x%llx\n", ts_sleepStart); + } + + wranglerTickled = false; + } + } + // 2. System sleep. + else if (kSystemTransitionSleep == _systemTransitionType) { + // Beginning of a system sleep transition. + // Cancellation is still possible. + tracePoint( kIOPMTracePointSleepStarted ); + + _systemMessageClientMask = kSystemMessageClientAll; + if ((_currentCapability & kIOPMSystemCapabilityGraphics) == 0) { + _systemMessageClientMask &= ~kSystemMessageClientLegacyApp; + } + if ((_highestCapability & kIOPMSystemCapabilityGraphics) == 0) { + _systemMessageClientMask &= ~kSystemMessageClientKernel; + } #if HIBERNATION - gIOHibernateState = 0; + gIOHibernateState = 0; #endif - // Record the reason for dark wake back to sleep - // System may not have ever achieved full wake - - publishSleepReason = true; - lastSleepReason = sleepReason; - if (sleepDelaysReport) { - clock_get_uptime(&ts_sleepStart); - DLOG("sleepDelaysReport 9->0 start at 0x%llx\n", ts_sleepStart); - } - } - - // 3. System wake. - - else if (kSystemTransitionWake == _systemTransitionType) - { - tracePoint( kIOPMTracePointWakeWillPowerOnClients ); - // Clear stats about sleep - - if (_pendingCapability & kIOPMSystemCapabilityGraphics) - { - willEnterFullWake(); - } - else - { - // Message powerd only - _systemMessageClientMask = kSystemMessageClientPowerd; - tellClients(kIOMessageSystemWillPowerOn); - } - } - - // The only location where the sleep reason is published. At this point - // sleep can still be cancelled, but sleep reason should be published - // early for logging purposes. - - if (publishSleepReason) - { - static const char * IOPMSleepReasons[] = - { - kIOPMClamshellSleepKey, - kIOPMPowerButtonSleepKey, - kIOPMSoftwareSleepKey, - kIOPMOSSwitchHibernationKey, - kIOPMIdleSleepKey, - kIOPMLowPowerSleepKey, - kIOPMThermalEmergencySleepKey, - kIOPMMaintenanceSleepKey, - kIOPMSleepServiceExitKey, - kIOPMDarkWakeThermalEmergencyKey - }; - - // Record sleep cause in IORegistry - uint32_t reasonIndex = sleepReason - kIOPMSleepReasonClamshell; - if (reasonIndex < sizeof(IOPMSleepReasons)/sizeof(IOPMSleepReasons[0])) { - DLOG("sleep reason %s\n", IOPMSleepReasons[reasonIndex]); - setProperty(kRootDomainSleepReasonKey, IOPMSleepReasons[reasonIndex]); - } - } - - if ((kSystemTransitionNone != _systemTransitionType) && - (kSystemTransitionNewCapClient != _systemTransitionType)) - { - _systemStateGeneration++; - systemDarkWake = false; - - DLOG("=== START (%u->%u, 0x%x) type %u, gen %u, msg %x, " - "dcp %x:%x:%x\n", - currentPowerState, (uint32_t) powerState, *inOutChangeFlags, - _systemTransitionType, _systemStateGeneration, - _systemMessageClientMask, - _desiredCapability, _currentCapability, _pendingCapability); - } -} - -void IOPMrootDomain::handleOurPowerChangeDone( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags, - IOPMRequestTag requestTag __unused ) -{ - if (kSystemTransitionNewCapClient == _systemTransitionType) - { - _systemTransitionType = kSystemTransitionNone; - return; - } - - if (_systemTransitionType != kSystemTransitionNone) - { - uint32_t currentPowerState = (uint32_t) getPowerState(); - - if (changeFlags & kIOPMNotDone) - { - // Power down was cancelled or vetoed. - _pendingCapability = _currentCapability; - lastSleepReason = 0; - - if (!CAP_CURRENT(kIOPMSystemCapabilityGraphics) && - CAP_CURRENT(kIOPMSystemCapabilityCPU)) - { + // Record the reason for dark wake back to sleep + // System may not have ever achieved full wake + + publishSleepReason = true; + lastSleepReason = sleepReason; + if (sleepDelaysReport) { + clock_get_uptime(&ts_sleepStart); + DLOG("sleepDelaysReport 9->0 start at 0x%llx\n", ts_sleepStart); + } + } + // 3. System wake. + else if (kSystemTransitionWake == _systemTransitionType) { + tracePoint( kIOPMTracePointWakeWillPowerOnClients ); + // Clear stats about sleep + + if (_pendingCapability & kIOPMSystemCapabilityGraphics) { + willEnterFullWake(); + } else { + // Message powerd only + _systemMessageClientMask = kSystemMessageClientPowerd; + tellClients(kIOMessageSystemWillPowerOn); + } + } + + // The only location where the sleep reason is published. At this point + // sleep can still be cancelled, but sleep reason should be published + // early for logging purposes. + + if (publishSleepReason) { + static const char * IOPMSleepReasons[] = + { + kIOPMClamshellSleepKey, + kIOPMPowerButtonSleepKey, + kIOPMSoftwareSleepKey, + kIOPMOSSwitchHibernationKey, + kIOPMIdleSleepKey, + kIOPMLowPowerSleepKey, + kIOPMThermalEmergencySleepKey, + kIOPMMaintenanceSleepKey, + kIOPMSleepServiceExitKey, + kIOPMDarkWakeThermalEmergencyKey + }; + + // Record sleep cause in IORegistry + uint32_t reasonIndex = sleepReason - kIOPMSleepReasonClamshell; + if (reasonIndex < sizeof(IOPMSleepReasons) / sizeof(IOPMSleepReasons[0])) { + DLOG("sleep reason %s\n", IOPMSleepReasons[reasonIndex]); + setProperty(kRootDomainSleepReasonKey, IOPMSleepReasons[reasonIndex]); + } + } + + if ((kSystemTransitionNone != _systemTransitionType) && + (kSystemTransitionNewCapClient != _systemTransitionType)) { + _systemStateGeneration++; + systemDarkWake = false; + + DLOG("=== START (%u->%u, 0x%x) type %u, gen %u, msg %x, " + "dcp %x:%x:%x\n", + currentPowerState, (uint32_t) powerState, *inOutChangeFlags, + _systemTransitionType, _systemStateGeneration, + _systemMessageClientMask, + _desiredCapability, _currentCapability, _pendingCapability); + } +} + +void +IOPMrootDomain::handleOurPowerChangeDone( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags changeFlags, + IOPMRequestTag requestTag __unused ) +{ + if (kSystemTransitionNewCapClient == _systemTransitionType) { + _systemTransitionType = kSystemTransitionNone; + return; + } + + if (_systemTransitionType != kSystemTransitionNone) { + uint32_t currentPowerState = (uint32_t) getPowerState(); + + if (changeFlags & kIOPMNotDone) { + // Power down was cancelled or vetoed. + _pendingCapability = _currentCapability; + lastSleepReason = 0; + + if (!CAP_CURRENT(kIOPMSystemCapabilityGraphics) && + CAP_CURRENT(kIOPMSystemCapabilityCPU)) { #if !CONFIG_EMBEDDED - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusDarkWakeReentry, - _systemStateGeneration ); + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDarkWakeReentry, + _systemStateGeneration ); #else - // On embedded, there are no factors that can prolong a - // "darkWake" when a power down is vetoed. We need to - // promote to "fullWake" at least once so that factors - // that prevent idle sleep can assert themselves if required - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusDarkWakeActivityTickle); + // On embedded, there are no factors that can prolong a + // "darkWake" when a power down is vetoed. We need to + // promote to "fullWake" at least once so that factors + // that prevent idle sleep can assert themselves if required + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDarkWakeActivityTickle); #endif - } - - // Revert device desire to max. - changePowerStateToPriv(ON_STATE); - } - else - { - // Send message on dark wake to full wake promotion. - // tellChangeUp() handles the normal SLEEP->ON case. - - if (kSystemTransitionCapability == _systemTransitionType) - { - if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) - { - lastSleepReason = 0; // stop logging wrangler tickles - tellClients(kIOMessageSystemHasPoweredOn); - } - if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) - { - // Going dark, reset full wake state - // userIsActive will be cleared by wrangler powering down - fullWakeReason = kFullWakeReasonNone; - - if (ts_sleepStart) { - clock_get_uptime(&wake2DarkwakeDelay); - SUB_ABSOLUTETIME(&wake2DarkwakeDelay, &ts_sleepStart); - DLOG("sleepDelaysReport f->9 end 0x%llx\n", wake2DarkwakeDelay); - ts_sleepStart = 0; - } - } - } - - // Reset state after exiting from dark wake. - - if (CAP_GAIN(kIOPMSystemCapabilityGraphics) || - CAP_LOSS(kIOPMSystemCapabilityCPU)) - { - darkWakeMaintenance = false; - darkWakeToSleepASAP = false; - pciCantSleepValid = false; - darkWakeSleepService = false; - - if (CAP_LOSS(kIOPMSystemCapabilityCPU)) - { - // Remove the influence of display power assertion - // before next system wake. - if (wrangler) wrangler->changePowerStateForRootDomain( - kWranglerPowerStateMin ); - removeProperty(gIOPMUserTriggeredFullWakeKey); - } - } - - // Entered dark mode. - - if (((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0) && - (_pendingCapability & kIOPMSystemCapabilityCPU)) - { - // Queue an evaluation of whether to remain in dark wake, - // and for how long. This serves the purpose of draining - // any assertions from the queue. - - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusDarkWakeEntry, - _systemStateGeneration ); - } - } - - DLOG("=== FINISH (%u->%u, 0x%x) type %u, gen %u, msg %x, " - "dcp %x:%x:%x, dbgtimer %u\n", - currentPowerState, (uint32_t) powerState, changeFlags, - _systemTransitionType, _systemStateGeneration, - _systemMessageClientMask, - _desiredCapability, _currentCapability, _pendingCapability, - _lastDebugWakeSeconds); - - if (_pendingCapability & kIOPMSystemCapabilityGraphics) - { - displayWakeCnt++; + } + + // Revert device desire to max. + changePowerStateToPriv(ON_STATE); + } else { + // Send message on dark wake to full wake promotion. + // tellChangeUp() handles the normal SLEEP->ON case. + + if (kSystemTransitionCapability == _systemTransitionType) { + if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) { + lastSleepReason = 0; // stop logging wrangler tickles + tellClients(kIOMessageSystemHasPoweredOn); + } + if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) { + // Going dark, reset full wake state + // userIsActive will be cleared by wrangler powering down + fullWakeReason = kFullWakeReasonNone; + + if (ts_sleepStart) { + clock_get_uptime(&wake2DarkwakeDelay); + SUB_ABSOLUTETIME(&wake2DarkwakeDelay, &ts_sleepStart); + DLOG("sleepDelaysReport f->9 end 0x%llx\n", wake2DarkwakeDelay); + ts_sleepStart = 0; + } + } + } + + // Reset state after exiting from dark wake. + + if (CAP_GAIN(kIOPMSystemCapabilityGraphics) || + CAP_LOSS(kIOPMSystemCapabilityCPU)) { + darkWakeMaintenance = false; + darkWakeToSleepASAP = false; + pciCantSleepValid = false; + darkWakeSleepService = false; + + if (CAP_LOSS(kIOPMSystemCapabilityCPU)) { + // Remove the influence of display power assertion + // before next system wake. + if (wrangler) { + wrangler->changePowerStateForRootDomain( + kWranglerPowerStateMin ); + } + removeProperty(gIOPMUserTriggeredFullWakeKey); + } + } + + // Entered dark mode. + + if (((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0) && + (_pendingCapability & kIOPMSystemCapabilityCPU)) { + // Queue an evaluation of whether to remain in dark wake, + // and for how long. This serves the purpose of draining + // any assertions from the queue. + + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDarkWakeEntry, + _systemStateGeneration ); + } + } + + DLOG("=== FINISH (%u->%u, 0x%x) type %u, gen %u, msg %x, " + "dcp %x:%x:%x, dbgtimer %u\n", + currentPowerState, (uint32_t) powerState, changeFlags, + _systemTransitionType, _systemStateGeneration, + _systemMessageClientMask, + _desiredCapability, _currentCapability, _pendingCapability, + _lastDebugWakeSeconds); + + if (_pendingCapability & kIOPMSystemCapabilityGraphics) { + displayWakeCnt++; #if DARK_TO_FULL_EVALUATE_CLAMSHELL - if (clamshellExists && fullWakeThreadCall && - CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) - { - // Not the initial graphics full power, graphics won't - // send a power notification to trigger a lid state - // evaluation. - - AbsoluteTime deadline; - clock_interval_to_deadline(45, kSecondScale, &deadline); - thread_call_enter_delayed(fullWakeThreadCall, deadline); - } + if (clamshellExists && fullWakeThreadCall && + CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { + // Not the initial graphics full power, graphics won't + // send a power notification to trigger a lid state + // evaluation. + + AbsoluteTime deadline; + clock_interval_to_deadline(45, kSecondScale, &deadline); + thread_call_enter_delayed(fullWakeThreadCall, deadline); + } #endif - } - else if (CAP_GAIN(kIOPMSystemCapabilityCPU)) - darkWakeCnt++; - - // Update current system capability. - if (_currentCapability != _pendingCapability) - _currentCapability = _pendingCapability; + } else if (CAP_GAIN(kIOPMSystemCapabilityCPU)) { + darkWakeCnt++; + } - // Update highest system capability. + // Update current system capability. + if (_currentCapability != _pendingCapability) { + _currentCapability = _pendingCapability; + } - _highestCapability |= _currentCapability; + // Update highest system capability. - if (darkWakePostTickle && - (kSystemTransitionWake == _systemTransitionType) && - (gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleLate) - { - darkWakePostTickle = false; - reportUserInput(); - } - else if (wranglerTickled) { - requestFullWake( kFullWakeReasonLocalUser ); - } + _highestCapability |= _currentCapability; - // Reset tracepoint at completion of capability change, - // completion of wake transition, and aborted sleep transition. + if (darkWakePostTickle && + (kSystemTransitionWake == _systemTransitionType) && + (gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == + kDarkWakeFlagHIDTickleLate) { + darkWakePostTickle = false; + reportUserInput(); + } else if (wranglerTickled) { + requestFullWake( kFullWakeReasonLocalUser ); + } - if ((_systemTransitionType == kSystemTransitionCapability) || - (_systemTransitionType == kSystemTransitionWake) || - ((_systemTransitionType == kSystemTransitionSleep) && - (changeFlags & kIOPMNotDone))) - { - setProperty(kIOPMSystemCapabilitiesKey, _currentCapability, 64); - tracePoint( kIOPMTracePointSystemUp ); - } + // Reset tracepoint at completion of capability change, + // completion of wake transition, and aborted sleep transition. - _systemTransitionType = kSystemTransitionNone; - _systemMessageClientMask = 0; - toldPowerdCapWillChange = false; + if ((_systemTransitionType == kSystemTransitionCapability) || + (_systemTransitionType == kSystemTransitionWake) || + ((_systemTransitionType == kSystemTransitionSleep) && + (changeFlags & kIOPMNotDone))) { + setProperty(kIOPMSystemCapabilitiesKey, _currentCapability, 64); + tracePoint( kIOPMTracePointSystemUp ); + } - logGraphicsClamp = false; + _systemTransitionType = kSystemTransitionNone; + _systemMessageClientMask = 0; + toldPowerdCapWillChange = false; - if (lowBatteryCondition) { - privateSleepSystem (kIOPMSleepReasonLowPower); - } - else if ((fullWakeReason == kFullWakeReasonDisplayOn) && (!displayPowerOnRequested)) { - // Request for full wake is removed while system is waking up to full wake - DLOG("DisplayOn fullwake request is removed\n"); - handleDisplayPowerOn(); - } + logGraphicsClamp = false; - } + if (lowBatteryCondition) { + privateSleepSystem(kIOPMSleepReasonLowPower); + } else if ((fullWakeReason == kFullWakeReasonDisplayOn) && (!displayPowerOnRequested)) { + // Request for full wake is removed while system is waking up to full wake + DLOG("DisplayOn fullwake request is removed\n"); + handleDisplayPowerOn(); + } + } } //****************************************************************************** // PM actions for graphics and audio. //****************************************************************************** -void IOPMrootDomain::overridePowerChangeForUIService( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex * inOutPowerState, - IOPMPowerChangeFlags * inOutChangeFlags ) -{ - uint32_t powerState = (uint32_t) *inOutPowerState; - uint32_t changeFlags = (uint32_t) *inOutChangeFlags; - - if (kSystemTransitionNone == _systemTransitionType) - { - // Not in midst of a system transition. - // Do not modify power limit enable state. - } - else if ((actions->parameter & kPMActionsFlagLimitPower) == 0) - { - // Activate power limiter. - - if ((actions->parameter & kPMActionsFlagIsDisplayWrangler) && - ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0) && - (changeFlags & kIOPMSynchronize)) - { - actions->parameter |= kPMActionsFlagLimitPower; - } - else if ((actions->parameter & kPMActionsFlagIsAudioDevice) && - ((gDarkWakeFlags & kDarkWakeFlagAudioNotSuppressed) == 0) && - ((_pendingCapability & kIOPMSystemCapabilityAudio) == 0) && - (changeFlags & kIOPMSynchronize)) - { - actions->parameter |= kPMActionsFlagLimitPower; - } - else if ((actions->parameter & kPMActionsFlagIsGraphicsDevice) && - (_systemTransitionType == kSystemTransitionSleep)) - { - // For graphics devices, arm the limiter when entering - // system sleep. Not when dropping to dark wake. - actions->parameter |= kPMActionsFlagLimitPower; - } - - if (actions->parameter & kPMActionsFlagLimitPower) - { - DLOG("+ plimit %s %p\n", - service->getName(), OBFUSCATE(service)); - } - } - else - { - // Remove power limit. - - if ((actions->parameter & ( - kPMActionsFlagIsDisplayWrangler | - kPMActionsFlagIsGraphicsDevice )) && - (_pendingCapability & kIOPMSystemCapabilityGraphics)) - { - actions->parameter &= ~kPMActionsFlagLimitPower; - } - else if ((actions->parameter & kPMActionsFlagIsAudioDevice) && - (_pendingCapability & kIOPMSystemCapabilityAudio)) - { - actions->parameter &= ~kPMActionsFlagLimitPower; - } - - if ((actions->parameter & kPMActionsFlagLimitPower) == 0) - { - DLOG("- plimit %s %p\n", - service->getName(), OBFUSCATE(service)); - } - } - - if (actions->parameter & kPMActionsFlagLimitPower) - { - uint32_t maxPowerState = (uint32_t)(-1); - - if (changeFlags & (kIOPMDomainDidChange | kIOPMDomainWillChange)) - { - // Enforce limit for system power/cap transitions. - - maxPowerState = 0; - if ((service->getPowerState() > maxPowerState) && - (actions->parameter & kPMActionsFlagIsDisplayWrangler)) - { - maxPowerState++; - - // Remove lingering effects of any tickle before entering - // dark wake. It will take a new tickle to return to full - // wake, so the existing tickle state is useless. - - if (changeFlags & kIOPMDomainDidChange) - *inOutChangeFlags |= kIOPMExpireIdleTimer; - } - else if (actions->parameter & kPMActionsFlagIsGraphicsDevice) - { - maxPowerState++; - } - } - else - { - // Deny all self-initiated changes when power is limited. - // Wrangler tickle should never defeat the limiter. - - maxPowerState = service->getPowerState(); - } - - if (powerState > maxPowerState) - { - DLOG("> plimit %s %p (%u->%u, 0x%x)\n", - service->getName(), OBFUSCATE(service), powerState, maxPowerState, - changeFlags); - *inOutPowerState = maxPowerState; - - if (darkWakePostTickle && - (actions->parameter & kPMActionsFlagIsDisplayWrangler) && - (changeFlags & kIOPMDomainWillChange) && - ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == - kDarkWakeFlagHIDTickleEarly)) - { - darkWakePostTickle = false; - reportUserInput(); - } - } - - if (!graphicsSuppressed && (changeFlags & kIOPMDomainDidChange)) - { - if (logGraphicsClamp) - { - AbsoluteTime now; - uint64_t nsec; - - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); - absolutetime_to_nanoseconds(now, &nsec); - if (kIOLogPMRootDomain & gIOKitDebug) - MSG("Graphics suppressed %u ms\n", - ((int)((nsec) / NSEC_PER_MSEC))); - } - graphicsSuppressed = true; - } - } -} - -void IOPMrootDomain::handleActivityTickleForDisplayWrangler( - IOService * service, - IOPMActions * actions ) +void +IOPMrootDomain::overridePowerChangeForUIService( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex * inOutPowerState, + IOPMPowerChangeFlags * inOutChangeFlags ) +{ + uint32_t powerState = (uint32_t) *inOutPowerState; + uint32_t changeFlags = (uint32_t) *inOutChangeFlags; + + if (kSystemTransitionNone == _systemTransitionType) { + // Not in midst of a system transition. + // Do not modify power limit enable state. + } else if ((actions->parameter & kPMActionsFlagLimitPower) == 0) { + // Activate power limiter. + + if ((actions->parameter & kPMActionsFlagIsDisplayWrangler) && + ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0) && + (changeFlags & kIOPMSynchronize)) { + actions->parameter |= kPMActionsFlagLimitPower; + } else if ((actions->parameter & kPMActionsFlagIsAudioDevice) && + ((gDarkWakeFlags & kDarkWakeFlagAudioNotSuppressed) == 0) && + ((_pendingCapability & kIOPMSystemCapabilityAudio) == 0) && + (changeFlags & kIOPMSynchronize)) { + actions->parameter |= kPMActionsFlagLimitPower; + } else if ((actions->parameter & kPMActionsFlagIsGraphicsDevice) && + (_systemTransitionType == kSystemTransitionSleep)) { + // For graphics devices, arm the limiter when entering + // system sleep. Not when dropping to dark wake. + actions->parameter |= kPMActionsFlagLimitPower; + } + + if (actions->parameter & kPMActionsFlagLimitPower) { + DLOG("+ plimit %s %p\n", + service->getName(), OBFUSCATE(service)); + } + } else { + // Remove power limit. + + if ((actions->parameter & ( + kPMActionsFlagIsDisplayWrangler | + kPMActionsFlagIsGraphicsDevice)) && + (_pendingCapability & kIOPMSystemCapabilityGraphics)) { + actions->parameter &= ~kPMActionsFlagLimitPower; + } else if ((actions->parameter & kPMActionsFlagIsAudioDevice) && + (_pendingCapability & kIOPMSystemCapabilityAudio)) { + actions->parameter &= ~kPMActionsFlagLimitPower; + } + + if ((actions->parameter & kPMActionsFlagLimitPower) == 0) { + DLOG("- plimit %s %p\n", + service->getName(), OBFUSCATE(service)); + } + } + + if (actions->parameter & kPMActionsFlagLimitPower) { + uint32_t maxPowerState = (uint32_t)(-1); + + if (changeFlags & (kIOPMDomainDidChange | kIOPMDomainWillChange)) { + // Enforce limit for system power/cap transitions. + + maxPowerState = 0; + if ((service->getPowerState() > maxPowerState) && + (actions->parameter & kPMActionsFlagIsDisplayWrangler)) { + maxPowerState++; + + // Remove lingering effects of any tickle before entering + // dark wake. It will take a new tickle to return to full + // wake, so the existing tickle state is useless. + + if (changeFlags & kIOPMDomainDidChange) { + *inOutChangeFlags |= kIOPMExpireIdleTimer; + } + } else if (actions->parameter & kPMActionsFlagIsGraphicsDevice) { + maxPowerState++; + } + } else { + // Deny all self-initiated changes when power is limited. + // Wrangler tickle should never defeat the limiter. + + maxPowerState = service->getPowerState(); + } + + if (powerState > maxPowerState) { + DLOG("> plimit %s %p (%u->%u, 0x%x)\n", + service->getName(), OBFUSCATE(service), powerState, maxPowerState, + changeFlags); + *inOutPowerState = maxPowerState; + + if (darkWakePostTickle && + (actions->parameter & kPMActionsFlagIsDisplayWrangler) && + (changeFlags & kIOPMDomainWillChange) && + ((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) == + kDarkWakeFlagHIDTickleEarly)) { + darkWakePostTickle = false; + reportUserInput(); + } + } + + if (!graphicsSuppressed && (changeFlags & kIOPMDomainDidChange)) { + if (logGraphicsClamp) { + AbsoluteTime now; + uint64_t nsec; + + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); + absolutetime_to_nanoseconds(now, &nsec); + if (kIOLogPMRootDomain & gIOKitDebug) { + MSG("Graphics suppressed %u ms\n", + ((int)((nsec) / NSEC_PER_MSEC))); + } + } + graphicsSuppressed = true; + } + } +} + +void +IOPMrootDomain::handleActivityTickleForDisplayWrangler( + IOService * service, + IOPMActions * actions ) { #if !NO_KERNEL_HID - // Warning: Not running in PM work loop context - don't modify state !!! - // Trap tickle directed to IODisplayWrangler while running with graphics - // capability suppressed. - - assert(service == wrangler); - - clock_get_uptime(&userActivityTime); - bool aborting = ((lastSleepReason == kIOPMSleepReasonIdle) - || (lastSleepReason == kIOPMSleepReasonMaintenance) - || (lastSleepReason == kIOPMSleepReasonSoftware)); - if (aborting) { - userActivityCount++; - DLOG("display wrangler tickled1 %d lastSleepReason %d\n", - userActivityCount, lastSleepReason); - } - - if (!wranglerTickled && - ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0)) - { - DLOG("display wrangler tickled\n"); - if (kIOLogPMRootDomain & gIOKitDebug) - OSReportWithBacktrace("Dark wake display tickle"); - if (pmPowerStateQueue) - { - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusDarkWakeActivityTickle, - true /* set wake type */ ); - } - } + // Warning: Not running in PM work loop context - don't modify state !!! + // Trap tickle directed to IODisplayWrangler while running with graphics + // capability suppressed. + + assert(service == wrangler); + + clock_get_uptime(&userActivityTime); + bool aborting = ((lastSleepReason == kIOPMSleepReasonIdle) + || (lastSleepReason == kIOPMSleepReasonMaintenance) + || (lastSleepReason == kIOPMSleepReasonSoftware)); + if (aborting) { + userActivityCount++; + DLOG("display wrangler tickled1 %d lastSleepReason %d\n", + userActivityCount, lastSleepReason); + } + + if (!wranglerTickled && + ((_pendingCapability & kIOPMSystemCapabilityGraphics) == 0)) { + DLOG("display wrangler tickled\n"); + if (kIOLogPMRootDomain & gIOKitDebug) { + OSReportWithBacktrace("Dark wake display tickle"); + } + if (pmPowerStateQueue) { + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDarkWakeActivityTickle, + true /* set wake type */ ); + } + } #endif } -void IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler( - IOService * service, - IOPMActions * actions, - const OSSymbol * powerClient, - IOPMPowerStateIndex oldPowerState, - IOPMPowerStateIndex newPowerState ) +void +IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler( + IOService * service, + IOPMActions * actions, + const OSSymbol * powerClient, + IOPMPowerStateIndex oldPowerState, + IOPMPowerStateIndex newPowerState ) { #if !NO_KERNEL_HID - assert(service == wrangler); - - // This function implements half of the user active detection - // by monitoring changes to the display wrangler's device desire. - // - // User becomes active when either: - // 1. Wrangler's DeviceDesire increases to max, but wrangler is already - // in max power state. This desire change in absence of a power state - // change is detected within. This handles the case when user becomes - // active while the display is already lit by setDisplayPowerOn(). - // - // 2. Power state change to max, and DeviceDesire is also at max. - // Handled by displayWranglerNotification(). - // - // User becomes inactive when DeviceDesire drops to sleep state or below. - - DLOG("wrangler %s (ps %u, %u->%u)\n", - powerClient->getCStringNoCopy(), - (uint32_t) service->getPowerState(), - (uint32_t) oldPowerState, (uint32_t) newPowerState); - - if (powerClient == gIOPMPowerClientDevice) - { - if ((newPowerState > oldPowerState) && - (newPowerState == kWranglerPowerStateMax) && - (service->getPowerState() == kWranglerPowerStateMax)) - { - evaluatePolicy( kStimulusEnterUserActiveState ); - } - else - if ((newPowerState < oldPowerState) && - (newPowerState <= kWranglerPowerStateSleep)) - { - evaluatePolicy( kStimulusLeaveUserActiveState ); - } - } - - if (newPowerState <= kWranglerPowerStateSleep) { - evaluatePolicy( kStimulusDisplayWranglerSleep ); - } - else if (newPowerState == kWranglerPowerStateMax) { - evaluatePolicy( kStimulusDisplayWranglerWake ); - } + assert(service == wrangler); + + // This function implements half of the user active detection + // by monitoring changes to the display wrangler's device desire. + // + // User becomes active when either: + // 1. Wrangler's DeviceDesire increases to max, but wrangler is already + // in max power state. This desire change in absence of a power state + // change is detected within. This handles the case when user becomes + // active while the display is already lit by setDisplayPowerOn(). + // + // 2. Power state change to max, and DeviceDesire is also at max. + // Handled by displayWranglerNotification(). + // + // User becomes inactive when DeviceDesire drops to sleep state or below. + + DLOG("wrangler %s (ps %u, %u->%u)\n", + powerClient->getCStringNoCopy(), + (uint32_t) service->getPowerState(), + (uint32_t) oldPowerState, (uint32_t) newPowerState); + + if (powerClient == gIOPMPowerClientDevice) { + if ((newPowerState > oldPowerState) && + (newPowerState == kWranglerPowerStateMax) && + (service->getPowerState() == kWranglerPowerStateMax)) { + evaluatePolicy( kStimulusEnterUserActiveState ); + } else if ((newPowerState < oldPowerState) && + (newPowerState <= kWranglerPowerStateSleep)) { + evaluatePolicy( kStimulusLeaveUserActiveState ); + } + } + + if (newPowerState <= kWranglerPowerStateSleep) { + evaluatePolicy( kStimulusDisplayWranglerSleep ); + } else if (newPowerState == kWranglerPowerStateMax) { + evaluatePolicy( kStimulusDisplayWranglerWake ); + } #endif } @@ -5759,24 +5647,23 @@ void IOPMrootDomain::handleUpdatePowerClientForDisplayWrangler( // User active state management //****************************************************************************** -void IOPMrootDomain::preventTransitionToUserActive( bool prevent ) +void +IOPMrootDomain::preventTransitionToUserActive( bool prevent ) { #if !NO_KERNEL_HID - _preventUserActive = prevent; - if (wrangler && !_preventUserActive) - { - // Allowing transition to user active, but the wrangler may have - // already powered ON in case of sleep cancel/revert. Poll the - // same conditions checked for in displayWranglerNotification() - // to bring the user active state up to date. - - if ((wrangler->getPowerState() == kWranglerPowerStateMax) && - (wrangler->getPowerStateForClient(gIOPMPowerClientDevice) == - kWranglerPowerStateMax)) - { - evaluatePolicy( kStimulusEnterUserActiveState ); - } - } + _preventUserActive = prevent; + if (wrangler && !_preventUserActive) { + // Allowing transition to user active, but the wrangler may have + // already powered ON in case of sleep cancel/revert. Poll the + // same conditions checked for in displayWranglerNotification() + // to bring the user active state up to date. + + if ((wrangler->getPowerState() == kWranglerPowerStateMax) && + (wrangler->getPowerStateForClient(gIOPMPowerClientDevice) == + kWranglerPowerStateMax)) { + evaluatePolicy( kStimulusEnterUserActiveState ); + } + } #endif } @@ -5784,306 +5671,299 @@ void IOPMrootDomain::preventTransitionToUserActive( bool prevent ) // Approve usage of delayed child notification by PM. //****************************************************************************** -bool IOPMrootDomain::shouldDelayChildNotification( - IOService * service ) +bool +IOPMrootDomain::shouldDelayChildNotification( + IOService * service ) { - if (((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) != 0) && - (kFullWakeReasonNone == fullWakeReason) && - (kSystemTransitionWake == _systemTransitionType)) - { - DLOG("%s: delay child notify\n", service->getName()); - return true; - } - return false; + if (((gDarkWakeFlags & kDarkWakeFlagHIDTickleMask) != 0) && + (kFullWakeReasonNone == fullWakeReason) && + (kSystemTransitionWake == _systemTransitionType)) { + DLOG("%s: delay child notify\n", service->getName()); + return true; + } + return false; } //****************************************************************************** // PM actions for PCI device. //****************************************************************************** -void IOPMrootDomain::handlePowerChangeStartForPCIDevice( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * inOutChangeFlags ) +void +IOPMrootDomain::handlePowerChangeStartForPCIDevice( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags * inOutChangeFlags ) { - pmTracer->tracePCIPowerChange( - PMTraceWorker::kPowerChangeStart, - service, *inOutChangeFlags, - (actions->parameter & kPMActionsPCIBitNumberMask)); + pmTracer->tracePCIPowerChange( + PMTraceWorker::kPowerChangeStart, + service, *inOutChangeFlags, + (actions->parameter & kPMActionsPCIBitNumberMask)); } -void IOPMrootDomain::handlePowerChangeDoneForPCIDevice( - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags ) -{ - pmTracer->tracePCIPowerChange( - PMTraceWorker::kPowerChangeCompleted, - service, changeFlags, - (actions->parameter & kPMActionsPCIBitNumberMask)); +void +IOPMrootDomain::handlePowerChangeDoneForPCIDevice( + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags changeFlags ) +{ + pmTracer->tracePCIPowerChange( + PMTraceWorker::kPowerChangeCompleted, + service, changeFlags, + (actions->parameter & kPMActionsPCIBitNumberMask)); +} + +//****************************************************************************** +// registerInterest +// +// Override IOService::registerInterest() to intercept special clients. +//****************************************************************************** + +class IOPMServiceInterestNotifier : public _IOServiceInterestNotifier +{ + friend class IOPMrootDomain; + OSDeclareDefaultStructors(IOPMServiceInterestNotifier) + +protected: + uint32_t ackTimeoutCnt; + uint32_t msgType;// Message pending ack + + uint64_t uuid0; + uint64_t uuid1; + const OSSymbol *identifier; +}; + +OSDefineMetaClassAndStructors(IOPMServiceInterestNotifier, _IOServiceInterestNotifier) + +IONotifier * IOPMrootDomain::registerInterest( + const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, + void * target, void * ref ) +{ + IOPMServiceInterestNotifier *notifier = 0; + bool isSystemCapabilityClient; + bool isKernelCapabilityClient; + IOReturn rc = kIOReturnError;; + + isSystemCapabilityClient = + typeOfInterest && + typeOfInterest->isEqualTo(kIOPMSystemCapabilityInterest); + + isKernelCapabilityClient = + typeOfInterest && + typeOfInterest->isEqualTo(gIOPriorityPowerStateInterest); + + if (isSystemCapabilityClient) { + typeOfInterest = gIOAppPowerStateInterest; + } + + notifier = new IOPMServiceInterestNotifier; + if (!notifier) { + return NULL; + } + + if (notifier->init()) { + rc = super::registerInterestForNotifier(notifier, typeOfInterest, handler, target, ref); + } + if (rc != kIOReturnSuccess) { + notifier->release(); + notifier = 0; + + return NULL; + } + if (pmPowerStateQueue) { + notifier->ackTimeoutCnt = 0; + if (isSystemCapabilityClient) { + notifier->retain(); + if (pmPowerStateQueue->submitPowerEvent( + kPowerEventRegisterSystemCapabilityClient, notifier) == false) { + notifier->release(); + } + } + + if (isKernelCapabilityClient) { + notifier->retain(); + if (pmPowerStateQueue->submitPowerEvent( + kPowerEventRegisterKernelCapabilityClient, notifier) == false) { + notifier->release(); + } + } + } + + OSData *data = NULL; + uint8_t *uuid = NULL; + OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)handler); + if (kext) { + data = kext->copyUUID(); + } + if (data && (data->getLength() == sizeof(uuid_t))) { + uuid = (uint8_t *)(data->getBytesNoCopy()); + + notifier->uuid0 = ((uint64_t)(uuid[0]) << 56) | ((uint64_t)(uuid[1]) << 48) | ((uint64_t)(uuid[2]) << 40) | + ((uint64_t)(uuid[3]) << 32) | ((uint64_t)(uuid[4]) << 24) | ((uint64_t)(uuid[5]) << 16) | + ((uint64_t)(uuid[6]) << 8) | (uuid[7]); + notifier->uuid1 = ((uint64_t)(uuid[8]) << 56) | ((uint64_t)(uuid[9]) << 48) | ((uint64_t)(uuid[10]) << 40) | + ((uint64_t)(uuid[11]) << 32) | ((uint64_t)(uuid[12]) << 24) | ((uint64_t)(uuid[13]) << 16) | + ((uint64_t)(uuid[14]) << 8) | (uuid[15]); + + notifier->identifier = kext->getIdentifier(); + } + if (kext) { + kext->release(); + } + if (data) { + data->release(); + } + + return notifier; } //****************************************************************************** -// registerInterest +// systemMessageFilter // -// Override IOService::registerInterest() to intercept special clients. //****************************************************************************** -class IOPMServiceInterestNotifier: public _IOServiceInterestNotifier +bool +IOPMrootDomain::systemMessageFilter( + void * object, void * arg1, void * arg2, void * arg3 ) { + const IOPMInterestContext * context = (const IOPMInterestContext *) arg1; + bool isCapMsg = (context->messageType == kIOMessageSystemCapabilityChange); + bool isCapClient = false; + bool allow = false; + IOPMServiceInterestNotifier *notifier; - friend class IOPMrootDomain; - OSDeclareDefaultStructors(IOPMServiceInterestNotifier) + notifier = OSDynamicCast(IOPMServiceInterestNotifier, (OSObject *)object); + do { + if ((kSystemTransitionNewCapClient == _systemTransitionType) && + (!isCapMsg || !_joinedCapabilityClients || + !_joinedCapabilityClients->containsObject((OSObject *) object))) { + break; + } -protected: - uint32_t ackTimeoutCnt; - uint32_t msgType; // Message pending ack + // Capability change message for app and kernel clients. - uint64_t uuid0; - uint64_t uuid1; - const OSSymbol *identifier; -}; + if (isCapMsg) { + if ((context->notifyType == kNotifyPriority) || + (context->notifyType == kNotifyCapabilityChangePriority)) { + isCapClient = true; + } -OSDefineMetaClassAndStructors(IOPMServiceInterestNotifier, _IOServiceInterestNotifier) + if ((context->notifyType == kNotifyCapabilityChangeApps) && + (object == (void *) systemCapabilityNotifier)) { + isCapClient = true; + } + } -IONotifier * IOPMrootDomain::registerInterest( - const OSSymbol * typeOfInterest, - IOServiceInterestHandler handler, - void * target, void * ref ) -{ - IOPMServiceInterestNotifier *notifier = 0; - bool isSystemCapabilityClient; - bool isKernelCapabilityClient; - IOReturn rc = kIOReturnError;; - - isSystemCapabilityClient = - typeOfInterest && - typeOfInterest->isEqualTo(kIOPMSystemCapabilityInterest); - - isKernelCapabilityClient = - typeOfInterest && - typeOfInterest->isEqualTo(gIOPriorityPowerStateInterest); - - if (isSystemCapabilityClient) - typeOfInterest = gIOAppPowerStateInterest; - - notifier = new IOPMServiceInterestNotifier; - if (!notifier) return NULL; - - if (notifier->init()) { - rc = super::registerInterestForNotifier(notifier, typeOfInterest, handler, target, ref); - } - if (rc != kIOReturnSuccess) { - notifier->release(); - notifier = 0; - - return NULL; - } - if (pmPowerStateQueue) - { - notifier->ackTimeoutCnt = 0; - if (isSystemCapabilityClient) - { - notifier->retain(); - if (pmPowerStateQueue->submitPowerEvent( - kPowerEventRegisterSystemCapabilityClient, notifier) == false) - notifier->release(); - } - - if (isKernelCapabilityClient) - { - notifier->retain(); - if (pmPowerStateQueue->submitPowerEvent( - kPowerEventRegisterKernelCapabilityClient, notifier) == false) - notifier->release(); - } - } - - OSData *data = NULL; - uint8_t *uuid = NULL; - OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)handler); - if (kext) { - data = kext->copyUUID(); - } - if (data && (data->getLength() == sizeof(uuid_t))) { - uuid = (uint8_t *)(data->getBytesNoCopy()); - - notifier->uuid0 = ((uint64_t)(uuid[0]) << 56) | ((uint64_t)(uuid[1]) << 48) | ((uint64_t)(uuid[2]) << 40)| - ((uint64_t)(uuid[3]) << 32) | ((uint64_t)(uuid[4]) << 24) | ((uint64_t)(uuid[5]) << 16) | - ((uint64_t)(uuid[6]) << 8) | (uuid[7]); - notifier->uuid1 = ((uint64_t)(uuid[8]) << 56) | ((uint64_t)(uuid[9]) << 48) | ((uint64_t)(uuid[10]) << 40)| - ((uint64_t)(uuid[11]) << 32) | ((uint64_t)(uuid[12]) << 24) | ((uint64_t)(uuid[13]) << 16) | - ((uint64_t)(uuid[14]) << 8) | (uuid[15]); - - notifier->identifier = kext->getIdentifier(); - - } - if (kext) kext->release(); - if (data) data->release(); - - return notifier; -} + if (isCapClient) { + IOPMSystemCapabilityChangeParameters * capArgs = + (IOPMSystemCapabilityChangeParameters *) arg2; + + if (kSystemTransitionNewCapClient == _systemTransitionType) { + capArgs->fromCapabilities = 0; + capArgs->toCapabilities = _currentCapability; + capArgs->changeFlags = 0; + } else { + capArgs->fromCapabilities = _currentCapability; + capArgs->toCapabilities = _pendingCapability; + + if (context->isPreChange) { + capArgs->changeFlags = kIOPMSystemCapabilityWillChange; + } else { + capArgs->changeFlags = kIOPMSystemCapabilityDidChange; + } + + if ((object == (void *) systemCapabilityNotifier) && + context->isPreChange) { + toldPowerdCapWillChange = true; + } + } -//****************************************************************************** -// systemMessageFilter -// -//****************************************************************************** + // Capability change messages only go to the PM configd plugin. + // Wait for response post-change if capabilitiy is increasing. + // Wait for response pre-change if capability is decreasing. + + if ((context->notifyType == kNotifyCapabilityChangeApps) && arg3 && + ((capabilityLoss && context->isPreChange) || + (!capabilityLoss && !context->isPreChange))) { + // app has not replied yet, wait for it + *((OSObject **) arg3) = kOSBooleanFalse; + } + + allow = true; + break; + } + + // Capability client will always see kIOMessageCanSystemSleep, + // even for demand sleep. It will also have a chance to veto + // sleep one last time after all clients have responded to + // kIOMessageSystemWillSleep + + if ((kIOMessageCanSystemSleep == context->messageType) || + (kIOMessageSystemWillNotSleep == context->messageType)) { + if (object == (OSObject *) systemCapabilityNotifier) { + allow = true; + break; + } + + // Not idle sleep, don't ask apps. + if (context->changeFlags & kIOPMSkipAskPowerDown) { + break; + } + } + + if (kIOPMMessageLastCallBeforeSleep == context->messageType) { + if ((object == (OSObject *) systemCapabilityNotifier) && + CAP_HIGHEST(kIOPMSystemCapabilityGraphics) && + (fullToDarkReason == kIOPMSleepReasonIdle)) { + allow = true; + } + break; + } + + // Reject capability change messages for legacy clients. + // Reject legacy system sleep messages for capability client. + + if (isCapMsg || (object == (OSObject *) systemCapabilityNotifier)) { + break; + } + + // Filter system sleep messages. + + if ((context->notifyType == kNotifyApps) && + (_systemMessageClientMask & kSystemMessageClientLegacyApp)) { + allow = true; + + if (notifier) { + if (arg3) { + if (notifier->ackTimeoutCnt >= 3) { + *((OSObject **) arg3) = kOSBooleanFalse; + } else { + *((OSObject **) arg3) = kOSBooleanTrue; + } + } + } + } else if ((context->notifyType == kNotifyPriority) && + (_systemMessageClientMask & kSystemMessageClientKernel)) { + allow = true; + } + }while (false); + + if (allow && isCapMsg && _joinedCapabilityClients) { + _joinedCapabilityClients->removeObject((OSObject *) object); + if (_joinedCapabilityClients->getCount() == 0) { + DLOG("destroyed capability client set %p\n", + OBFUSCATE(_joinedCapabilityClients)); + _joinedCapabilityClients->release(); + _joinedCapabilityClients = 0; + } + } + if (notifier) { + notifier->msgType = context->messageType; + } -bool IOPMrootDomain::systemMessageFilter( - void * object, void * arg1, void * arg2, void * arg3 ) -{ - const IOPMInterestContext * context = (const IOPMInterestContext *) arg1; - bool isCapMsg = (context->messageType == kIOMessageSystemCapabilityChange); - bool isCapClient = false; - bool allow = false; - IOPMServiceInterestNotifier *notifier; - - notifier = OSDynamicCast(IOPMServiceInterestNotifier, (OSObject *)object); - do { - if ((kSystemTransitionNewCapClient == _systemTransitionType) && - (!isCapMsg || !_joinedCapabilityClients || - !_joinedCapabilityClients->containsObject((OSObject *) object))) - break; - - // Capability change message for app and kernel clients. - - if (isCapMsg) - { - if ((context->notifyType == kNotifyPriority) || - (context->notifyType == kNotifyCapabilityChangePriority)) - isCapClient = true; - - if ((context->notifyType == kNotifyCapabilityChangeApps) && - (object == (void *) systemCapabilityNotifier)) - isCapClient = true; - } - - if (isCapClient) - { - IOPMSystemCapabilityChangeParameters * capArgs = - (IOPMSystemCapabilityChangeParameters *) arg2; - - if (kSystemTransitionNewCapClient == _systemTransitionType) - { - capArgs->fromCapabilities = 0; - capArgs->toCapabilities = _currentCapability; - capArgs->changeFlags = 0; - } - else - { - capArgs->fromCapabilities = _currentCapability; - capArgs->toCapabilities = _pendingCapability; - - if (context->isPreChange) - capArgs->changeFlags = kIOPMSystemCapabilityWillChange; - else - capArgs->changeFlags = kIOPMSystemCapabilityDidChange; - - if ((object == (void *) systemCapabilityNotifier) && - context->isPreChange) - { - toldPowerdCapWillChange = true; - } - } - - // Capability change messages only go to the PM configd plugin. - // Wait for response post-change if capabilitiy is increasing. - // Wait for response pre-change if capability is decreasing. - - if ((context->notifyType == kNotifyCapabilityChangeApps) && arg3 && - ( (capabilityLoss && context->isPreChange) || - (!capabilityLoss && !context->isPreChange) ) ) - { - // app has not replied yet, wait for it - *((OSObject **) arg3) = kOSBooleanFalse; - - } - - allow = true; - break; - } - - // Capability client will always see kIOMessageCanSystemSleep, - // even for demand sleep. It will also have a chance to veto - // sleep one last time after all clients have responded to - // kIOMessageSystemWillSleep - - if ((kIOMessageCanSystemSleep == context->messageType) || - (kIOMessageSystemWillNotSleep == context->messageType)) - { - if (object == (OSObject *) systemCapabilityNotifier) - { - allow = true; - break; - } - - // Not idle sleep, don't ask apps. - if (context->changeFlags & kIOPMSkipAskPowerDown) - { - break; - } - } - - if (kIOPMMessageLastCallBeforeSleep == context->messageType) - { - if ((object == (OSObject *) systemCapabilityNotifier) && - CAP_HIGHEST(kIOPMSystemCapabilityGraphics) && - (fullToDarkReason == kIOPMSleepReasonIdle)) { - allow = true; - } - break; - } - - // Reject capability change messages for legacy clients. - // Reject legacy system sleep messages for capability client. - - if (isCapMsg || (object == (OSObject *) systemCapabilityNotifier)) - { - break; - } - - // Filter system sleep messages. - - if ((context->notifyType == kNotifyApps) && - (_systemMessageClientMask & kSystemMessageClientLegacyApp)) - { - allow = true; - - if (notifier) { - if (arg3) { - if (notifier->ackTimeoutCnt >= 3) - *((OSObject **) arg3) = kOSBooleanFalse; - else - *((OSObject **) arg3) = kOSBooleanTrue; - } - } - } - else if ((context->notifyType == kNotifyPriority) && - (_systemMessageClientMask & kSystemMessageClientKernel)) - { - allow = true; - } - } - while (false); - - if (allow && isCapMsg && _joinedCapabilityClients) - { - _joinedCapabilityClients->removeObject((OSObject *) object); - if (_joinedCapabilityClients->getCount() == 0) - { - DLOG("destroyed capability client set %p\n", - OBFUSCATE(_joinedCapabilityClients)); - _joinedCapabilityClients->release(); - _joinedCapabilityClients = 0; - } - } - if (notifier) { - notifier->msgType = context->messageType; - } - - return allow; + return allow; } //****************************************************************************** @@ -6091,34 +5971,37 @@ bool IOPMrootDomain::systemMessageFilter( // //****************************************************************************** -IOReturn IOPMrootDomain::setMaintenanceWakeCalendar( - const IOPMCalendarStruct * calendar ) +IOReturn +IOPMrootDomain::setMaintenanceWakeCalendar( + const IOPMCalendarStruct * calendar ) { - OSData * data; - IOReturn ret = 0; + OSData * data; + IOReturn ret = 0; - if (!calendar) - return kIOReturnBadArgument; + if (!calendar) { + return kIOReturnBadArgument; + } - data = OSData::withBytesNoCopy((void *) calendar, sizeof(*calendar)); - if (!data) - return kIOReturnNoMemory; + data = OSData::withBytes((void *) calendar, sizeof(*calendar)); + if (!data) { + return kIOReturnNoMemory; + } - if (kPMCalendarTypeMaintenance == calendar->selector) { - ret = setPMSetting(gIOPMSettingMaintenanceWakeCalendarKey, data); - if (kIOReturnSuccess == ret) - OSBitOrAtomic(kIOPMAlarmBitMaintenanceWake, &_scheduledAlarms); - } else - if (kPMCalendarTypeSleepService == calendar->selector) - { - ret = setPMSetting(gIOPMSettingSleepServiceWakeCalendarKey, data); - if (kIOReturnSuccess == ret) - OSBitOrAtomic(kIOPMAlarmBitSleepServiceWake, &_scheduledAlarms); - } - DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); + if (kPMCalendarTypeMaintenance == calendar->selector) { + ret = setPMSetting(gIOPMSettingMaintenanceWakeCalendarKey, data); + if (kIOReturnSuccess == ret) { + OSBitOrAtomic(kIOPMAlarmBitMaintenanceWake, &_scheduledAlarms); + } + } else if (kPMCalendarTypeSleepService == calendar->selector) { + ret = setPMSetting(gIOPMSettingSleepServiceWakeCalendarKey, data); + if (kIOReturnSuccess == ret) { + OSBitOrAtomic(kIOPMAlarmBitSleepServiceWake, &_scheduledAlarms); + } + } + DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms); - data->release(); - return ret; + data->release(); + return ret; } // MARK: - @@ -6130,62 +6013,64 @@ IOReturn IOPMrootDomain::setMaintenanceWakeCalendar( // Handle the notification when the IODisplayWrangler changes power state. //****************************************************************************** -IOReturn IOPMrootDomain::displayWranglerNotification( - void * target, void * refCon, - UInt32 messageType, IOService * service, - void * messageArgument, vm_size_t argSize ) +IOReturn +IOPMrootDomain::displayWranglerNotification( + void * target, void * refCon, + UInt32 messageType, IOService * service, + void * messageArgument, vm_size_t argSize ) { #if !NO_KERNEL_HID - int displayPowerState; - IOPowerStateChangeNotification * params = - (IOPowerStateChangeNotification *) messageArgument; - - if ((messageType != kIOMessageDeviceWillPowerOff) && - (messageType != kIOMessageDeviceHasPoweredOn)) - return kIOReturnUnsupported; - - ASSERT_GATED(); - if (!gRootDomain) - return kIOReturnUnsupported; - - displayPowerState = params->stateNumber; - DLOG("wrangler %s ps %d\n", - getIOMessageString(messageType), displayPowerState); - - switch (messageType) { - case kIOMessageDeviceWillPowerOff: - // Display wrangler has dropped power due to display idle - // or force system sleep. - // - // 4 Display ON kWranglerPowerStateMax - // 3 Display Dim kWranglerPowerStateDim - // 2 Display Sleep kWranglerPowerStateSleep - // 1 Not visible to user - // 0 Not visible to user kWranglerPowerStateMin - - if (displayPowerState <= kWranglerPowerStateSleep) - gRootDomain->evaluatePolicy( kStimulusDisplayWranglerSleep ); - break; - - case kIOMessageDeviceHasPoweredOn: - // Display wrangler has powered on due to user activity - // or wake from sleep. - - if (kWranglerPowerStateMax == displayPowerState) - { - gRootDomain->evaluatePolicy( kStimulusDisplayWranglerWake ); - - // See comment in handleUpdatePowerClientForDisplayWrangler - if (service->getPowerStateForClient(gIOPMPowerClientDevice) == - kWranglerPowerStateMax) - { - gRootDomain->evaluatePolicy( kStimulusEnterUserActiveState ); - } - } - break; - } + int displayPowerState; + IOPowerStateChangeNotification * params = + (IOPowerStateChangeNotification *) messageArgument; + + if ((messageType != kIOMessageDeviceWillPowerOff) && + (messageType != kIOMessageDeviceHasPoweredOn)) { + return kIOReturnUnsupported; + } + + ASSERT_GATED(); + if (!gRootDomain) { + return kIOReturnUnsupported; + } + + displayPowerState = params->stateNumber; + DLOG("wrangler %s ps %d\n", + getIOMessageString(messageType), displayPowerState); + + switch (messageType) { + case kIOMessageDeviceWillPowerOff: + // Display wrangler has dropped power due to display idle + // or force system sleep. + // + // 4 Display ON kWranglerPowerStateMax + // 3 Display Dim kWranglerPowerStateDim + // 2 Display Sleep kWranglerPowerStateSleep + // 1 Not visible to user + // 0 Not visible to user kWranglerPowerStateMin + + if (displayPowerState <= kWranglerPowerStateSleep) { + gRootDomain->evaluatePolicy( kStimulusDisplayWranglerSleep ); + } + break; + + case kIOMessageDeviceHasPoweredOn: + // Display wrangler has powered on due to user activity + // or wake from sleep. + + if (kWranglerPowerStateMax == displayPowerState) { + gRootDomain->evaluatePolicy( kStimulusDisplayWranglerWake ); + + // See comment in handleUpdatePowerClientForDisplayWrangler + if (service->getPowerStateForClient(gIOPMPowerClientDevice) == + kWranglerPowerStateMax) { + gRootDomain->evaluatePolicy( kStimulusEnterUserActiveState ); + } + } + break; + } #endif - return kIOReturnUnsupported; + return kIOReturnUnsupported; } //****************************************************************************** @@ -6195,21 +6080,21 @@ IOReturn IOPMrootDomain::displayWranglerNotification( // When it's published we install a power state change handler. //****************************************************************************** -bool IOPMrootDomain::displayWranglerMatchPublished( - void * target, - void * refCon, - IOService * newService, - IONotifier * notifier __unused) +bool +IOPMrootDomain::displayWranglerMatchPublished( + void * target, + void * refCon, + IOService * newService, + IONotifier * notifier __unused) { #if !NO_KERNEL_HID - // install a handler - if( !newService->registerInterest( gIOGeneralInterest, - &displayWranglerNotification, target, 0) ) - { - return false; - } + // install a handler + if (!newService->registerInterest( gIOGeneralInterest, + &displayWranglerNotification, target, 0)) { + return false; + } #endif - return true; + return true; } //****************************************************************************** @@ -6217,26 +6102,28 @@ bool IOPMrootDomain::displayWranglerMatchPublished( // //****************************************************************************** -void IOPMrootDomain::reportUserInput( void ) +void +IOPMrootDomain::reportUserInput( void ) { #if !NO_KERNEL_HID - OSIterator * iter; - OSDictionary * matching; - - if(!wrangler) - { - matching = serviceMatching("IODisplayWrangler"); - iter = getMatchingServices(matching); - if (matching) matching->release(); - if(iter) - { - wrangler = OSDynamicCast(IOService, iter->getNextObject()); - iter->release(); - } - } - - if(wrangler) - wrangler->activityTickle(0,0); + OSIterator * iter; + OSDictionary * matching; + + if (!wrangler) { + matching = serviceMatching("IODisplayWrangler"); + iter = getMatchingServices(matching); + if (matching) { + matching->release(); + } + if (iter) { + wrangler = OSDynamicCast(IOService, iter->getNextObject()); + iter->release(); + } + } + + if (wrangler) { + wrangler->activityTickle(0, 0); + } #endif } @@ -6244,37 +6131,32 @@ void IOPMrootDomain::reportUserInput( void ) // latchDisplayWranglerTickle //****************************************************************************** -bool IOPMrootDomain::latchDisplayWranglerTickle( bool latch ) +bool +IOPMrootDomain::latchDisplayWranglerTickle( bool latch ) { #if !NO_KERNEL_HID - if (latch) - { - if (!(_currentCapability & kIOPMSystemCapabilityGraphics) && - !(_pendingCapability & kIOPMSystemCapabilityGraphics) && - !checkSystemCanSustainFullWake()) - { - // Currently in dark wake, and not transitioning to full wake. - // Full wake is unsustainable, so latch the tickle to prevent - // the display from lighting up momentarily. - wranglerTickleLatched = true; - } - else - { - wranglerTickleLatched = false; - } - } - else if (wranglerTickleLatched && checkSystemCanSustainFullWake()) - { - wranglerTickleLatched = false; - - pmPowerStateQueue->submitPowerEvent( - kPowerEventPolicyStimulus, - (void *) kStimulusDarkWakeActivityTickle ); - } - - return wranglerTickleLatched; + if (latch) { + if (!(_currentCapability & kIOPMSystemCapabilityGraphics) && + !(_pendingCapability & kIOPMSystemCapabilityGraphics) && + !checkSystemCanSustainFullWake()) { + // Currently in dark wake, and not transitioning to full wake. + // Full wake is unsustainable, so latch the tickle to prevent + // the display from lighting up momentarily. + wranglerTickleLatched = true; + } else { + wranglerTickleLatched = false; + } + } else if (wranglerTickleLatched && checkSystemCanSustainFullWake()) { + wranglerTickleLatched = false; + + pmPowerStateQueue->submitPowerEvent( + kPowerEventPolicyStimulus, + (void *) kStimulusDarkWakeActivityTickle ); + } + + return wranglerTickleLatched; #else - return false; + return false; #endif } @@ -6284,10 +6166,11 @@ bool IOPMrootDomain::latchDisplayWranglerTickle( bool latch ) // For root domain user client //****************************************************************************** -void IOPMrootDomain::setDisplayPowerOn( uint32_t options ) +void +IOPMrootDomain::setDisplayPowerOn( uint32_t options ) { - pmPowerStateQueue->submitPowerEvent( kPowerEventSetDisplayPowerOn, - (void *) 0, options ); + pmPowerStateQueue->submitPowerEvent( kPowerEventSetDisplayPowerOn, + (void *) 0, options ); } // MARK: - @@ -6299,20 +6182,21 @@ void IOPMrootDomain::setDisplayPowerOn( uint32_t options ) // Notification on battery class IOPowerSource appearance //****************************************************************************** -bool IOPMrootDomain::batteryPublished( - void * target, - void * root_domain, - IOService * resourceService, - IONotifier * notifier __unused ) +bool +IOPMrootDomain::batteryPublished( + void * target, + void * root_domain, + IOService * resourceService, + IONotifier * notifier __unused ) { - // rdar://2936060&4435589 - // All laptops have dimmable LCD displays - // All laptops have batteries - // So if this machine has a battery, publish the fact that the backlight - // supports dimming. - ((IOPMrootDomain *)root_domain)->publishFeature("DisplayDims"); + // rdar://2936060&4435589 + // All laptops have dimmable LCD displays + // All laptops have batteries + // So if this machine has a battery, publish the fact that the backlight + // supports dimming. + ((IOPMrootDomain *)root_domain)->publishFeature("DisplayDims"); - return (true); + return true; } // MARK: - @@ -6323,1092 +6207,1046 @@ bool IOPMrootDomain::batteryPublished( // //****************************************************************************** -bool IOPMrootDomain::checkSystemSleepAllowed( IOOptionBits options, - uint32_t sleepReason ) +bool +IOPMrootDomain::checkSystemSleepAllowed( IOOptionBits options, + uint32_t sleepReason ) { - int err = 0; + int err = 0; - // Conditions that prevent idle and demand system sleep. + // Conditions that prevent idle and demand system sleep. - do { - if (userDisabledAllSleep) - { - err = 1; // 1. user-space sleep kill switch - break; - } + do { + if (userDisabledAllSleep) { + err = 1; // 1. user-space sleep kill switch + break; + } - if (systemBooting || systemShutdown || gWillShutdown) - { - err = 2; // 2. restart or shutdown in progress - break; - } + if (systemBooting || systemShutdown || gWillShutdown) { + err = 2; // 2. restart or shutdown in progress + break; + } - if (options == 0) - break; + if (options == 0) { + break; + } - // Conditions above pegs the system at full wake. - // Conditions below prevent system sleep but does not prevent - // dark wake, and must be called from gated context. + // Conditions above pegs the system at full wake. + // Conditions below prevent system sleep but does not prevent + // dark wake, and must be called from gated context. #if !CONFIG_SLEEP - err = 3; // 3. config does not support sleep - break; + err = 3; // 3. config does not support sleep + break; #endif - if (lowBatteryCondition || thermalWarningState) - { - break; // always sleep on low battery or when in thermal warning state - } - - if (sleepReason == kIOPMSleepReasonDarkWakeThermalEmergency) - { - break; // always sleep on dark wake thermal emergencies - } - - if (preventSystemSleepList->getCount() != 0) - { - err = 4; // 4. child prevent system sleep clamp - break; - } - - if (getPMAssertionLevel( kIOPMDriverAssertionCPUBit ) == - kIOPMDriverAssertionLevelOn) - { - err = 5; // 5. CPU assertion - break; - } - - if (pciCantSleepValid) - { - if (pciCantSleepFlag) - err = 6; // 6. PCI card does not support PM (cached) - break; - } - else if (sleepSupportedPEFunction && - CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) - { - IOReturn ret; - OSBitAndAtomic(~kPCICantSleep, &platformSleepSupport); - ret = getPlatform()->callPlatformFunction( - sleepSupportedPEFunction, false, - NULL, NULL, NULL, NULL); - pciCantSleepValid = true; - pciCantSleepFlag = false; - if ((platformSleepSupport & kPCICantSleep) || - ((ret != kIOReturnSuccess) && (ret != kIOReturnUnsupported))) - { - err = 6; // 6. PCI card does not support PM - pciCantSleepFlag = true; - break; - } - } - } - while (false); - - if (err) - { - DLOG("System sleep prevented by %d\n", err); - return false; - } - return true; -} - -bool IOPMrootDomain::checkSystemSleepEnabled( void ) -{ - return checkSystemSleepAllowed(0, 0); -} - -bool IOPMrootDomain::checkSystemCanSleep( uint32_t sleepReason ) -{ - ASSERT_GATED(); - return checkSystemSleepAllowed(1, sleepReason); + if (lowBatteryCondition || thermalWarningState) { + break; // always sleep on low battery or when in thermal warning state + } + + if (sleepReason == kIOPMSleepReasonDarkWakeThermalEmergency) { + break; // always sleep on dark wake thermal emergencies + } + + if (preventSystemSleepList->getCount() != 0) { + err = 4; // 4. child prevent system sleep clamp + break; + } + + if (getPMAssertionLevel( kIOPMDriverAssertionCPUBit ) == + kIOPMDriverAssertionLevelOn) { + err = 5; // 5. CPU assertion + break; + } + + if (pciCantSleepValid) { + if (pciCantSleepFlag) { + err = 6; // 6. PCI card does not support PM (cached) + } + break; + } else if (sleepSupportedPEFunction && + CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { + IOReturn ret; + OSBitAndAtomic(~kPCICantSleep, &platformSleepSupport); + ret = getPlatform()->callPlatformFunction( + sleepSupportedPEFunction, false, + NULL, NULL, NULL, NULL); + pciCantSleepValid = true; + pciCantSleepFlag = false; + if ((platformSleepSupport & kPCICantSleep) || + ((ret != kIOReturnSuccess) && (ret != kIOReturnUnsupported))) { + err = 6; // 6. PCI card does not support PM + pciCantSleepFlag = true; + break; + } + } + }while (false); + + if (err) { + DLOG("System sleep prevented by %d\n", err); + return false; + } + return true; +} + +bool +IOPMrootDomain::checkSystemSleepEnabled( void ) +{ + return checkSystemSleepAllowed(0, 0); +} + +bool +IOPMrootDomain::checkSystemCanSleep( uint32_t sleepReason ) +{ + ASSERT_GATED(); + return checkSystemSleepAllowed(1, sleepReason); } //****************************************************************************** // checkSystemCanSustainFullWake //****************************************************************************** -bool IOPMrootDomain::checkSystemCanSustainFullWake( void ) +bool +IOPMrootDomain::checkSystemCanSustainFullWake( void ) { #if !NO_KERNEL_HID - if (lowBatteryCondition || thermalWarningState) - { - // Low battery wake, or received a low battery notification - // while system is awake. This condition will persist until - // the following wake. - return false; - } - - if (clamshellExists && clamshellClosed && !clamshellSleepDisabled) - { - // Graphics state is unknown and external display might not be probed. - // Do not incorporate state that requires graphics to be in max power - // such as desktopMode or clamshellDisabled. - - if (!acAdaptorConnected) - { - DLOG("full wake check: no AC\n"); - return false; - } - } + if (lowBatteryCondition || thermalWarningState) { + // Low battery wake, or received a low battery notification + // while system is awake. This condition will persist until + // the following wake. + return false; + } + + if (clamshellExists && clamshellClosed && !clamshellSleepDisabled) { + // Graphics state is unknown and external display might not be probed. + // Do not incorporate state that requires graphics to be in max power + // such as desktopMode or clamshellDisabled. + + if (!acAdaptorConnected) { + DLOG("full wake check: no AC\n"); + return false; + } + } #endif - return true; + return true; } //****************************************************************************** // mustHibernate //****************************************************************************** -#if HIBERNATION +#if HIBERNATION + +bool +IOPMrootDomain::mustHibernate( void ) +{ + return lowBatteryCondition || thermalWarningState; +} + +#endif /* HIBERNATION */ + +//****************************************************************************** +// adjustPowerState +// +// Conditions that affect our wake/sleep decision has changed. +// If conditions dictate that the system must remain awake, clamp power +// state to max with changePowerStateToPriv(ON). Otherwise if sleepASAP +// is TRUE, then remove the power clamp and allow the power state to drop +// to SLEEP_STATE. +//****************************************************************************** + +void +IOPMrootDomain::adjustPowerState( bool sleepASAP ) +{ + DLOG("adjustPowerState ps %u, asap %d, idleSleepEnabled %d\n", + (uint32_t) getPowerState(), sleepASAP, idleSleepEnabled); + + ASSERT_GATED(); + + if ((!idleSleepEnabled) || !checkSystemSleepEnabled()) { + changePowerStateToPriv(ON_STATE); + } else if (sleepASAP) { + changePowerStateToPriv(SLEEP_STATE); + } +} + +void +IOPMrootDomain::handleDisplayPowerOn() +{ + if (!wrangler) { + return; + } + if (displayPowerOnRequested) { + if (!checkSystemCanSustainFullWake()) { + return; + } + + // Force wrangler to max power state. If system is in dark wake + // this alone won't raise the wrangler's power state. + + wrangler->changePowerStateForRootDomain(kWranglerPowerStateMax); + + // System in dark wake, always requesting full wake should + // not have any bad side-effects, even if the request fails. + + if (!CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { + setProperty(kIOPMRootDomainWakeTypeKey, kIOPMRootDomainWakeTypeNotification); + requestFullWake( kFullWakeReasonDisplayOn ); + } + } else { + // Relenquish desire to power up display. + // Must first transition to state 1 since wrangler doesn't + // power off the displays at state 0. At state 0 the root + // domain is removed from the wrangler's power client list. + + wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin + 1); + wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin); + } +} + +//****************************************************************************** +// dispatchPowerEvent +// +// IOPMPowerStateQueue callback function. Running on PM work loop thread. +//****************************************************************************** + +void +IOPMrootDomain::dispatchPowerEvent( + uint32_t event, void * arg0, uint64_t arg1 ) +{ + ASSERT_GATED(); + + switch (event) { + case kPowerEventFeatureChanged: + DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + messageClients(kIOPMMessageFeatureChange, this); + break; + + case kPowerEventReceivedPowerNotification: + DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + handlePowerNotification((UInt32)(uintptr_t) arg0 ); + break; + + case kPowerEventSystemBootCompleted: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (systemBooting) { + systemBooting = false; + + // read noidle setting from Device Tree + IORegistryEntry *defaults = IORegistryEntry::fromPath("IODeviceTree:/defaults"); + if (defaults != NULL) { + OSData *data = OSDynamicCast(OSData, defaults->getProperty("no-idle")); + if ((data != NULL) && (data->getLength() == 4)) { + gNoIdleFlag = *(uint32_t*)data->getBytesNoCopy(); + DLOG("Setting gNoIdleFlag to %u from device tree\n", gNoIdleFlag); + } + defaults->release(); + } + if (lowBatteryCondition) { + privateSleepSystem(kIOPMSleepReasonLowPower); + + // The rest is unnecessary since the system is expected + // to sleep immediately. The following wake will update + // everything. + break; + } + + sleepWakeDebugMemAlloc(); + saveFailureData2File(); + + // If lid is closed, re-send lid closed notification + // now that booting is complete. + if (clamshellClosed) { + handlePowerNotification(kLocalEvalClamshellCommand); + } + evaluatePolicy( kStimulusAllowSystemSleepChanged ); + } + break; + + case kPowerEventSystemShutdown: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (kOSBooleanTrue == (OSBoolean *) arg0) { + /* We set systemShutdown = true during shutdown + * to prevent sleep at unexpected times while loginwindow is trying + * to shutdown apps and while the OS is trying to transition to + * complete power of. + * + * Set to true during shutdown, as soon as loginwindow shows + * the "shutdown countdown dialog", through individual app + * termination, and through black screen kernel shutdown. + */ + systemShutdown = true; + } else { + /* + * A shutdown was initiated, but then the shutdown + * was cancelled, clearing systemShutdown to false here. + */ + systemShutdown = false; + } + break; + + case kPowerEventUserDisabledSleep: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + userDisabledAllSleep = (kOSBooleanTrue == (OSBoolean *) arg0); + break; + + case kPowerEventRegisterSystemCapabilityClient: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (systemCapabilityNotifier) { + systemCapabilityNotifier->release(); + systemCapabilityNotifier = 0; + } + if (arg0) { + systemCapabilityNotifier = (IONotifier *) arg0; + systemCapabilityNotifier->retain(); + } + /* intentional fall-through */ + [[clang::fallthrough]]; + + case kPowerEventRegisterKernelCapabilityClient: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (!_joinedCapabilityClients) { + _joinedCapabilityClients = OSSet::withCapacity(8); + } + if (arg0) { + IONotifier * notify = (IONotifier *) arg0; + if (_joinedCapabilityClients) { + _joinedCapabilityClients->setObject(notify); + synchronizePowerTree( kIOPMSyncNoChildNotify ); + } + notify->release(); + } + break; + + case kPowerEventPolicyStimulus: + DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (arg0) { + int stimulus = (uintptr_t) arg0; + evaluatePolicy( stimulus, (uint32_t) arg1 ); + } + break; + + case kPowerEventAssertionCreate: + DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (pmAssertions) { + pmAssertions->handleCreateAssertion((OSData *)arg0); + } + break; + + + case kPowerEventAssertionRelease: + DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (pmAssertions) { + pmAssertions->handleReleaseAssertion(arg1); + } + break; + + case kPowerEventAssertionSetLevel: + DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (pmAssertions) { + pmAssertions->handleSetAssertionLevel(arg1, (IOPMDriverAssertionLevel)(uintptr_t)arg0); + } + break; + + case kPowerEventQueueSleepWakeUUID: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + handleQueueSleepWakeUUID((OSObject *)arg0); + break; + case kPowerEventPublishSleepWakeUUID: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + handlePublishSleepWakeUUID((bool)arg0); + break; + + case kPowerEventSetDisplayPowerOn: + DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); + if (!wrangler) { + break; + } + if (arg1 != 0) { + displayPowerOnRequested = true; + } else { + displayPowerOnRequested = false; + } + handleDisplayPowerOn(); + break; + } +} + +//****************************************************************************** +// systemPowerEventOccurred +// +// The power controller is notifying us of a hardware-related power management +// event that we must handle. +// +// systemPowerEventOccurred covers the same functionality that +// receivePowerNotification does; it simply provides a richer API for conveying +// more information. +//****************************************************************************** + +IOReturn +IOPMrootDomain::systemPowerEventOccurred( + const OSSymbol *event, + uint32_t intValue) +{ + IOReturn attempt = kIOReturnSuccess; + OSNumber *newNumber = NULL; + + if (!event) { + return kIOReturnBadArgument; + } + + newNumber = OSNumber::withNumber(intValue, 8 * sizeof(intValue)); + if (!newNumber) { + return kIOReturnInternalError; + } + + attempt = systemPowerEventOccurred(event, (OSObject *)newNumber); + + newNumber->release(); + + return attempt; +} + +void +IOPMrootDomain::setThermalState(OSObject *value) +{ + OSNumber * num; + + if (gIOPMWorkLoop->inGate() == false) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setThermalState), + (OSObject *)this, + (void *)value); + + return; + } + if (value && (num = OSDynamicCast(OSNumber, value))) { + thermalWarningState = ((num->unsigned32BitValue() == kIOPMThermalLevelWarning) || + (num->unsigned32BitValue() == kIOPMThermalLevelTrap)) ? 1 : 0; + } +} + +IOReturn +IOPMrootDomain::systemPowerEventOccurred( + const OSSymbol *event, + OSObject *value) +{ + OSDictionary *thermalsDict = NULL; + bool shouldUpdate = true; + + if (!event || !value) { + return kIOReturnBadArgument; + } + + // LOCK + // We reuse featuresDict Lock because it already exists and guards + // the very infrequently used publish/remove feature mechanism; so there's zero rsk + // of stepping on that lock. + if (featuresDictLock) { + IOLockLock(featuresDictLock); + } + + thermalsDict = (OSDictionary *)getProperty(kIOPMRootDomainPowerStatusKey); + + if (thermalsDict && OSDynamicCast(OSDictionary, thermalsDict)) { + thermalsDict = OSDictionary::withDictionary(thermalsDict); + } else { + thermalsDict = OSDictionary::withCapacity(1); + } + + if (!thermalsDict) { + shouldUpdate = false; + goto exit; + } + + thermalsDict->setObject(event, value); + + setProperty(kIOPMRootDomainPowerStatusKey, thermalsDict); + + thermalsDict->release(); + +exit: + // UNLOCK + if (featuresDictLock) { + IOLockUnlock(featuresDictLock); + } + + if (shouldUpdate) { + if (event && + event->isEqualTo(kIOPMThermalLevelWarningKey)) { + setThermalState(value); + } + messageClients(kIOPMMessageSystemPowerEventOccurred, (void *)NULL); + } + + return kIOReturnSuccess; +} + +//****************************************************************************** +// receivePowerNotification +// +// The power controller is notifying us of a hardware-related power management +// event that we must handle. This may be a result of an 'environment' interrupt +// from the power mgt micro. +//****************************************************************************** + +IOReturn +IOPMrootDomain::receivePowerNotification( UInt32 msg ) +{ + pmPowerStateQueue->submitPowerEvent( + kPowerEventReceivedPowerNotification, (void *)(uintptr_t) msg ); + return kIOReturnSuccess; +} + +void +IOPMrootDomain::handlePowerNotification( UInt32 msg ) +{ + bool eval_clamshell = false; + + ASSERT_GATED(); + + /* + * Local (IOPMrootDomain only) eval clamshell command + */ + if (msg & kLocalEvalClamshellCommand) { + eval_clamshell = true; + } + + /* + * Overtemp + */ + if (msg & kIOPMOverTemp) { + MSG("PowerManagement emergency overtemp signal. Going to sleep!"); + privateSleepSystem(kIOPMSleepReasonThermalEmergency); + } + + /* + * Forward DW thermal notification to client, if system is not going to sleep + */ + if ((msg & kIOPMDWOverTemp) && (_systemTransitionType != kSystemTransitionSleep)) { + DLOG("DarkWake thermal limits message received!\n"); + + messageClients(kIOPMMessageDarkWakeThermalEmergency); + } + + /* + * Sleep Now! + */ + if (msg & kIOPMSleepNow) { + privateSleepSystem(kIOPMSleepReasonSoftware); + } + + /* + * Power Emergency + */ + if (msg & kIOPMPowerEmergency) { + lowBatteryCondition = true; + privateSleepSystem(kIOPMSleepReasonLowPower); + } + + /* + * Clamshell OPEN + */ + if (msg & kIOPMClamshellOpened) { + DLOG("Clamshell opened\n"); + // Received clamshel open message from clamshell controlling driver + // Update our internal state and tell general interest clients + clamshellClosed = false; + clamshellExists = true; + + // Don't issue a hid tickle when lid is open and polled on wake + if (msg & kIOPMSetValue) { + setProperty(kIOPMRootDomainWakeTypeKey, "Lid Open"); + reportUserInput(); + } + + // Tell PMCPU + informCPUStateChange(kInformLid, 0); + + // Tell general interest clients + sendClientClamshellNotification(); + + bool aborting = ((lastSleepReason == kIOPMSleepReasonClamshell) + || (lastSleepReason == kIOPMSleepReasonIdle) + || (lastSleepReason == kIOPMSleepReasonMaintenance)); + if (aborting) { + userActivityCount++; + } + DLOG("clamshell tickled %d lastSleepReason %d\n", userActivityCount, lastSleepReason); + } + + /* + * Clamshell CLOSED + * Send the clamshell interest notification since the lid is closing. + */ + if (msg & kIOPMClamshellClosed) { + if (clamshellClosed && clamshellExists) { + DLOG("Ignoring redundant Clamshell close event\n"); + } else { + DLOG("Clamshell closed\n"); + // Received clamshel open message from clamshell controlling driver + // Update our internal state and tell general interest clients + clamshellClosed = true; + clamshellExists = true; + + // Tell PMCPU + informCPUStateChange(kInformLid, 1); + + // Tell general interest clients + sendClientClamshellNotification(); + + // And set eval_clamshell = so we can attempt + eval_clamshell = true; + } + } -bool IOPMrootDomain::mustHibernate( void ) -{ - return (lowBatteryCondition || thermalWarningState); -} + /* + * Set Desktop mode (sent from graphics) + * + * -> reevaluate lid state + */ + if (msg & kIOPMSetDesktopMode) { + DLOG("Desktop mode\n"); + desktopMode = (0 != (msg & kIOPMSetValue)); + msg &= ~(kIOPMSetDesktopMode | kIOPMSetValue); -#endif /* HIBERNATION */ + sendClientClamshellNotification(); -//****************************************************************************** -// adjustPowerState -// -// Conditions that affect our wake/sleep decision has changed. -// If conditions dictate that the system must remain awake, clamp power -// state to max with changePowerStateToPriv(ON). Otherwise if sleepASAP -// is TRUE, then remove the power clamp and allow the power state to drop -// to SLEEP_STATE. -//****************************************************************************** + // Re-evaluate the lid state + eval_clamshell = true; + } -void IOPMrootDomain::adjustPowerState( bool sleepASAP ) -{ - DLOG("adjustPowerState ps %u, asap %d, idleSleepEnabled %d\n", - (uint32_t) getPowerState(), sleepASAP, idleSleepEnabled); + /* + * AC Adaptor connected + * + * -> reevaluate lid state + */ + if (msg & kIOPMSetACAdaptorConnected) { + acAdaptorConnected = (0 != (msg & kIOPMSetValue)); + msg &= ~(kIOPMSetACAdaptorConnected | kIOPMSetValue); - ASSERT_GATED(); + // Tell CPU PM + informCPUStateChange(kInformAC, !acAdaptorConnected); - if ((!idleSleepEnabled) || !checkSystemSleepEnabled()) - { - changePowerStateToPriv(ON_STATE); - } - else if ( sleepASAP ) - { - changePowerStateToPriv(SLEEP_STATE); - } -} + // Tell BSD if AC is connected + // 0 == external power source; 1 == on battery + post_sys_powersource(acAdaptorConnected ? 0:1); -void IOPMrootDomain::handleDisplayPowerOn( ) -{ - if (!wrangler) return; - if (displayPowerOnRequested) - { - if (!checkSystemCanSustainFullWake()) return; + sendClientClamshellNotification(); + + // Re-evaluate the lid state + eval_clamshell = true; - // Force wrangler to max power state. If system is in dark wake - // this alone won't raise the wrangler's power state. + // Lack of AC may have latched a display wrangler tickle. + // This mirrors the hardware's USB wake event latch, where a latched + // USB wake event followed by an AC attach will trigger a full wake. + latchDisplayWranglerTickle( false ); - wrangler->changePowerStateForRootDomain(kWranglerPowerStateMax); +#if HIBERNATION + // AC presence will reset the standy timer delay adjustment. + _standbyTimerResetSeconds = 0; +#endif + if (!userIsActive) { + // Reset userActivityTime when power supply is changed(rdr 13789330) + clock_get_uptime(&userActivityTime); + } + } - // System in dark wake, always requesting full wake should - // not have any bad side-effects, even if the request fails. + /* + * Enable Clamshell (external display disappear) + * + * -> reevaluate lid state + */ + if (msg & kIOPMEnableClamshell) { + DLOG("Clamshell enabled\n"); + // Re-evaluate the lid state + // System should sleep on external display disappearance + // in lid closed operation. + if (true == clamshellDisabled) { + eval_clamshell = true; + } - if (!CAP_CURRENT(kIOPMSystemCapabilityGraphics)) - { - setProperty(kIOPMRootDomainWakeTypeKey, kIOPMRootDomainWakeTypeNotification); - requestFullWake( kFullWakeReasonDisplayOn ); - } - } - else - { - // Relenquish desire to power up display. - // Must first transition to state 1 since wrangler doesn't - // power off the displays at state 0. At state 0 the root - // domain is removed from the wrangler's power client list. + clamshellDisabled = false; + sendClientClamshellNotification(); + } - wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin + 1); - wrangler->changePowerStateForRootDomain(kWranglerPowerStateMin); + /* + * Disable Clamshell (external display appeared) + * We don't bother re-evaluating clamshell state. If the system is awake, + * the lid is probably open. + */ + if (msg & kIOPMDisableClamshell) { + DLOG("Clamshell disabled\n"); + clamshellDisabled = true; + sendClientClamshellNotification(); + } - } + /* + * Evaluate clamshell and SLEEP if appropiate + */ + if (eval_clamshell && clamshellClosed) { + if (shouldSleepOnClamshellClosed()) { + privateSleepSystem(kIOPMSleepReasonClamshell); + } else { + evaluatePolicy( kStimulusDarkWakeEvaluate ); + } + } + /* + * Power Button + */ + if (msg & kIOPMPowerButton) { + DLOG("Powerbutton press\n"); + if (!wranglerAsleep) { + OSString *pbs = OSString::withCString("DisablePowerButtonSleep"); + // Check that power button sleep is enabled + if (pbs) { + if (kOSBooleanTrue != getProperty(pbs)) { + privateSleepSystem(kIOPMSleepReasonPowerButton); + } + } + } else { + reportUserInput(); + } + } } //****************************************************************************** -// dispatchPowerEvent +// evaluatePolicy // -// IOPMPowerStateQueue callback function. Running on PM work loop thread. +// Evaluate root-domain policy in response to external changes. //****************************************************************************** -void IOPMrootDomain::dispatchPowerEvent( - uint32_t event, void * arg0, uint64_t arg1 ) -{ - ASSERT_GATED(); - - switch (event) - { - case kPowerEventFeatureChanged: - DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - messageClients(kIOPMMessageFeatureChange, this); - break; - - case kPowerEventReceivedPowerNotification: - DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - handlePowerNotification( (UInt32)(uintptr_t) arg0 ); - break; - - case kPowerEventSystemBootCompleted: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (systemBooting) - { - systemBooting = false; - - if (lowBatteryCondition) - { - privateSleepSystem (kIOPMSleepReasonLowPower); - - // The rest is unnecessary since the system is expected - // to sleep immediately. The following wake will update - // everything. - break; - } - - sleepWakeDebugMemAlloc(); - saveFailureData2File(); - - // If lid is closed, re-send lid closed notification - // now that booting is complete. - if ( clamshellClosed ) - { - handlePowerNotification(kLocalEvalClamshellCommand); - } - evaluatePolicy( kStimulusAllowSystemSleepChanged ); - - } - break; - - case kPowerEventSystemShutdown: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (kOSBooleanTrue == (OSBoolean *) arg0) - { - /* We set systemShutdown = true during shutdown - to prevent sleep at unexpected times while loginwindow is trying - to shutdown apps and while the OS is trying to transition to - complete power of. - - Set to true during shutdown, as soon as loginwindow shows - the "shutdown countdown dialog", through individual app - termination, and through black screen kernel shutdown. - */ - systemShutdown = true; - } else { - /* - A shutdown was initiated, but then the shutdown - was cancelled, clearing systemShutdown to false here. - */ - systemShutdown = false; - } - break; - - case kPowerEventUserDisabledSleep: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - userDisabledAllSleep = (kOSBooleanTrue == (OSBoolean *) arg0); - break; - - case kPowerEventRegisterSystemCapabilityClient: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (systemCapabilityNotifier) - { - systemCapabilityNotifier->release(); - systemCapabilityNotifier = 0; - } - if (arg0) - { - systemCapabilityNotifier = (IONotifier *) arg0; - systemCapabilityNotifier->retain(); - } - /* intentional fall-through */ - [[clang::fallthrough]]; - - case kPowerEventRegisterKernelCapabilityClient: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (!_joinedCapabilityClients) - _joinedCapabilityClients = OSSet::withCapacity(8); - if (arg0) - { - IONotifier * notify = (IONotifier *) arg0; - if (_joinedCapabilityClients) - { - _joinedCapabilityClients->setObject(notify); - synchronizePowerTree( kIOPMSyncNoChildNotify ); - } - notify->release(); - } - break; - - case kPowerEventPolicyStimulus: - DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (arg0) - { - int stimulus = (uintptr_t) arg0; - evaluatePolicy( stimulus, (uint32_t) arg1 ); - } - break; - - case kPowerEventAssertionCreate: - DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (pmAssertions) { - pmAssertions->handleCreateAssertion((OSData *)arg0); - } - break; - - - case kPowerEventAssertionRelease: - DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (pmAssertions) { - pmAssertions->handleReleaseAssertion(arg1); - } - break; - - case kPowerEventAssertionSetLevel: - DMSG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (pmAssertions) { - pmAssertions->handleSetAssertionLevel(arg1, (IOPMDriverAssertionLevel)(uintptr_t)arg0); - } - break; - - case kPowerEventQueueSleepWakeUUID: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - handleQueueSleepWakeUUID((OSObject *)arg0); - break; - case kPowerEventPublishSleepWakeUUID: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - handlePublishSleepWakeUUID((bool)arg0); - break; - - case kPowerEventSetDisplayPowerOn: - DLOG("power event %u args %p 0x%llx\n", event, OBFUSCATE(arg0), arg1); - if (!wrangler) break; - if (arg1 != 0) - { - displayPowerOnRequested = true; - } - else - { - displayPowerOnRequested = false; - } - handleDisplayPowerOn(); - break; - } -} +void +IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) +{ + union { + struct { + int idleSleepEnabled : 1; + int idleSleepDisabled : 1; + int displaySleep : 1; + int sleepDelayChanged : 1; + int evaluateDarkWake : 1; + int adjustPowerState : 1; + int userBecameInactive : 1; + } bit; + uint32_t u32; + } flags; + + + ASSERT_GATED(); + flags.u32 = 0; + + switch (stimulus) { + case kStimulusDisplayWranglerSleep: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + if (!wranglerAsleep) { + // first transition to wrangler sleep or lower + flags.bit.displaySleep = true; + } + break; + + case kStimulusDisplayWranglerWake: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + displayIdleForDemandSleep = false; + wranglerAsleep = false; + break; + + case kStimulusEnterUserActiveState: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + if (_preventUserActive) { + DLOG("user active dropped\n"); + break; + } + if (!userIsActive) { + userIsActive = true; + userWasActive = true; + clock_get_uptime(&gUserActiveAbsTime); + + // Stay awake after dropping demand for display power on + if (kFullWakeReasonDisplayOn == fullWakeReason) { + fullWakeReason = fFullWakeReasonDisplayOnAndLocalUser; + DLOG("User activity while in notification wake\n"); + changePowerStateWithOverrideTo( ON_STATE, 0); + } -//****************************************************************************** -// systemPowerEventOccurred -// -// The power controller is notifying us of a hardware-related power management -// event that we must handle. -// -// systemPowerEventOccurred covers the same functionality that -// receivePowerNotification does; it simply provides a richer API for conveying -// more information. -//****************************************************************************** + kdebugTrace(kPMLogUserActiveState, 0, 1, 0); + setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); + messageClients(kIOPMMessageUserIsActiveChanged); + } + flags.bit.idleSleepDisabled = true; + break; + + case kStimulusLeaveUserActiveState: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + if (userIsActive) { + clock_get_uptime(&gUserInactiveAbsTime); + userIsActive = false; + clock_get_uptime(&userBecameInactiveTime); + flags.bit.userBecameInactive = true; + + kdebugTrace(kPMLogUserActiveState, 0, 0, 0); + setProperty(gIOPMUserIsActiveKey, kOSBooleanFalse); + messageClients(kIOPMMessageUserIsActiveChanged); + } + break; -IOReturn IOPMrootDomain::systemPowerEventOccurred( - const OSSymbol *event, - uint32_t intValue) -{ - IOReturn attempt = kIOReturnSuccess; - OSNumber *newNumber = NULL; + case kStimulusAggressivenessChanged: + { + DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + unsigned long minutesToIdleSleep = 0; + unsigned long minutesToDisplayDim = 0; + unsigned long minutesDelta = 0; + + // Fetch latest display and system sleep slider values. + getAggressiveness(kPMMinutesToSleep, &minutesToIdleSleep); + getAggressiveness(kPMMinutesToDim, &minutesToDisplayDim); + DLOG("aggressiveness changed: system %u->%u, display %u\n", + (uint32_t) sleepSlider, + (uint32_t) minutesToIdleSleep, + (uint32_t) minutesToDisplayDim); + + DLOG("idle time -> %ld secs (ena %d)\n", + idleSeconds, (minutesToIdleSleep != 0)); + + + // How long to wait before sleeping the system once + // the displays turns off is indicated by 'extraSleepDelay'. + + if (minutesToIdleSleep > minutesToDisplayDim) { + minutesDelta = minutesToIdleSleep - minutesToDisplayDim; + } else if (minutesToIdleSleep == minutesToDisplayDim) { + minutesDelta = 1; + } - if (!event) - return kIOReturnBadArgument; + if ((!idleSleepEnabled) && (minutesToIdleSleep != 0)) { + idleSleepEnabled = flags.bit.idleSleepEnabled = true; + } - newNumber = OSNumber::withNumber(intValue, 8*sizeof(intValue)); - if (!newNumber) - return kIOReturnInternalError; + if ((idleSleepEnabled) && (minutesToIdleSleep == 0)) { + flags.bit.idleSleepDisabled = true; + idleSleepEnabled = false; + } + if (0x7fffffff == minutesToIdleSleep) { + minutesToIdleSleep = idleSeconds; + } - attempt = systemPowerEventOccurred(event, (OSObject *)newNumber); + if (((minutesDelta != extraSleepDelay) || + (userActivityTime != userActivityTime_prev)) && + !flags.bit.idleSleepEnabled && !flags.bit.idleSleepDisabled) { + flags.bit.sleepDelayChanged = true; + } - newNumber->release(); + if (systemDarkWake && !darkWakeToSleepASAP && + (flags.bit.idleSleepEnabled || flags.bit.idleSleepDisabled)) { + // Reconsider decision to remain in dark wake + flags.bit.evaluateDarkWake = true; + } - return attempt; -} + sleepSlider = minutesToIdleSleep; + extraSleepDelay = minutesDelta; + userActivityTime_prev = userActivityTime; + } break; + + case kStimulusDemandSystemSleep: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + displayIdleForDemandSleep = true; + if (wrangler && wranglerIdleSettings) { + // Request wrangler idle only when demand sleep is triggered + // from full wake. + if (CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { + wrangler->setProperties(wranglerIdleSettings); + DLOG("Requested wrangler idle\n"); + } + } + // arg = sleepReason + changePowerStateWithOverrideTo( SLEEP_STATE, arg ); + break; + + case kStimulusAllowSystemSleepChanged: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + flags.bit.adjustPowerState = true; + break; + + case kStimulusDarkWakeActivityTickle: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + // arg == true implies real and not self generated wrangler tickle. + // Update wake type on PM work loop instead of the tickle thread to + // eliminate the possibility of an early tickle clobbering the wake + // type set by the platform driver. + if (arg == true) { + setProperty(kIOPMRootDomainWakeTypeKey, kIOPMRootDomainWakeTypeHIDActivity); + } -void IOPMrootDomain::setThermalState(OSObject *value) -{ - OSNumber * num; + if (false == wranglerTickled) { + if (latchDisplayWranglerTickle(true)) { + DLOG("latched tickle\n"); + break; + } - if (gIOPMWorkLoop->inGate() == false) { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setThermalState), - (OSObject *)this, - (void *)value); + wranglerTickled = true; + DLOG("Requesting full wake after dark wake activity tickle\n"); + requestFullWake( kFullWakeReasonLocalUser ); + } + break; - return; - } - if (value && (num = OSDynamicCast(OSNumber, value))) { - thermalWarningState = ((num->unsigned32BitValue() == kIOPMThermalLevelWarning) || - (num->unsigned32BitValue() == kIOPMThermalLevelTrap)) ? 1 : 0; - } -} + case kStimulusDarkWakeEntry: + case kStimulusDarkWakeReentry: + DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + // Any system transitions since the last dark wake transition + // will invalid the stimulus. -IOReturn IOPMrootDomain::systemPowerEventOccurred( - const OSSymbol *event, - OSObject *value) -{ - OSDictionary *thermalsDict = NULL; - bool shouldUpdate = true; + if (arg == _systemStateGeneration) { + DLOG("dark wake entry\n"); + systemDarkWake = true; + + // Keep wranglerAsleep an invariant when wrangler is absent + if (wrangler) { + wranglerAsleep = true; + } - if (!event || !value) - return kIOReturnBadArgument; + if (kStimulusDarkWakeEntry == stimulus) { + clock_get_uptime(&userBecameInactiveTime); + flags.bit.evaluateDarkWake = true; + if (activitySinceSleep()) { + DLOG("User activity recorded while going to darkwake\n"); + reportUserInput(); + } + } - // LOCK - // We reuse featuresDict Lock because it already exists and guards - // the very infrequently used publish/remove feature mechanism; so there's zero rsk - // of stepping on that lock. - if (featuresDictLock) IOLockLock(featuresDictLock); + // Always accelerate disk spindown while in dark wake, + // even if system does not support/allow sleep. - thermalsDict = (OSDictionary *)getProperty(kIOPMRootDomainPowerStatusKey); + cancelIdleSleepTimer(); + setQuickSpinDownTimeout(); + } + break; - if (thermalsDict && OSDynamicCast(OSDictionary, thermalsDict)) { - thermalsDict = OSDictionary::withDictionary(thermalsDict); - } else { - thermalsDict = OSDictionary::withCapacity(1); - } + case kStimulusDarkWakeEvaluate: + DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + if (systemDarkWake) { + flags.bit.evaluateDarkWake = true; + } + break; + + case kStimulusNoIdleSleepPreventers: + DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); + flags.bit.adjustPowerState = true; + break; + } /* switch(stimulus) */ + + if (flags.bit.evaluateDarkWake && (kFullWakeReasonNone == fullWakeReason)) { + if (darkWakeToSleepASAP || + (clamshellClosed && !(desktopMode && acAdaptorConnected))) { + uint32_t newSleepReason; + + if (CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { + // System was previously in full wake. Sleep reason from + // full to dark already recorded in fullToDarkReason. + + if (lowBatteryCondition) { + newSleepReason = kIOPMSleepReasonLowPower; + } else { + newSleepReason = fullToDarkReason; + } + } else { + // In dark wake from system sleep. + + if (darkWakeSleepService) { + newSleepReason = kIOPMSleepReasonSleepServiceExit; + } else { + newSleepReason = kIOPMSleepReasonMaintenance; + } + } - if (!thermalsDict) { - shouldUpdate = false; - goto exit; - } + if (checkSystemCanSleep(newSleepReason)) { + privateSleepSystem(newSleepReason); + } + } else { // non-maintenance (network) dark wake + if (checkSystemCanSleep(kIOPMSleepReasonIdle)) { + // Release power clamp, and wait for children idle. + adjustPowerState(true); + } else { + changePowerStateToPriv(ON_STATE); + } + } + } - thermalsDict->setObject (event, value); + if (systemDarkWake) { + // The rest are irrelevant while system is in dark wake. + flags.u32 = 0; + } - setProperty (kIOPMRootDomainPowerStatusKey, thermalsDict); + if ((flags.bit.displaySleep) && + (kFullWakeReasonDisplayOn == fullWakeReason)) { + // kIOPMSleepReasonMaintenance? + DLOG("Display sleep while in notification wake\n"); + changePowerStateWithOverrideTo( SLEEP_STATE, kIOPMSleepReasonMaintenance ); + } - thermalsDict->release(); + if (flags.bit.userBecameInactive || flags.bit.sleepDelayChanged) { + bool cancelQuickSpindown = false; -exit: - // UNLOCK - if (featuresDictLock) IOLockUnlock(featuresDictLock); + if (flags.bit.sleepDelayChanged) { + // Cancel existing idle sleep timer and quick disk spindown. + // New settings will be applied by the idleSleepEnabled flag + // handler below if idle sleep is enabled. - if (shouldUpdate) { - if (event && - event->isEqualTo(kIOPMThermalLevelWarningKey)) { - setThermalState(value); - } - messageClients (kIOPMMessageSystemPowerEventOccurred, (void *)NULL); - } + DLOG("extra sleep timer changed\n"); + cancelIdleSleepTimer(); + cancelQuickSpindown = true; + } else { + DLOG("user inactive\n"); + } - return kIOReturnSuccess; -} + if (!userIsActive && idleSleepEnabled) { + startIdleSleepTimer(getTimeToIdleSleep()); + } -//****************************************************************************** -// receivePowerNotification -// -// The power controller is notifying us of a hardware-related power management -// event that we must handle. This may be a result of an 'environment' interrupt -// from the power mgt micro. -//****************************************************************************** + if (cancelQuickSpindown) { + restoreUserSpinDownTimeout(); + } + } -IOReturn IOPMrootDomain::receivePowerNotification( UInt32 msg ) -{ - pmPowerStateQueue->submitPowerEvent( - kPowerEventReceivedPowerNotification, (void *)(uintptr_t) msg ); - return kIOReturnSuccess; -} - -void IOPMrootDomain::handlePowerNotification( UInt32 msg ) -{ - bool eval_clamshell = false; - - ASSERT_GATED(); - - /* - * Local (IOPMrootDomain only) eval clamshell command - */ - if (msg & kLocalEvalClamshellCommand) - { - eval_clamshell = true; - } - - /* - * Overtemp - */ - if (msg & kIOPMOverTemp) - { - MSG("PowerManagement emergency overtemp signal. Going to sleep!"); - privateSleepSystem (kIOPMSleepReasonThermalEmergency); - } - - /* - * Forward DW thermal notification to client, if system is not going to sleep - */ - if ((msg & kIOPMDWOverTemp) && (_systemTransitionType != kSystemTransitionSleep)) - { - DLOG("DarkWake thermal limits message received!\n"); - - messageClients(kIOPMMessageDarkWakeThermalEmergency); - } - - /* - * Sleep Now! - */ - if (msg & kIOPMSleepNow) - { - privateSleepSystem (kIOPMSleepReasonSoftware); - } - - /* - * Power Emergency - */ - if (msg & kIOPMPowerEmergency) - { - lowBatteryCondition = true; - privateSleepSystem (kIOPMSleepReasonLowPower); - } - - /* - * Clamshell OPEN - */ - if (msg & kIOPMClamshellOpened) - { - DLOG("Clamshell opened\n"); - // Received clamshel open message from clamshell controlling driver - // Update our internal state and tell general interest clients - clamshellClosed = false; - clamshellExists = true; - - // Don't issue a hid tickle when lid is open and polled on wake - if (msg & kIOPMSetValue) - { - setProperty(kIOPMRootDomainWakeTypeKey, "Lid Open"); - reportUserInput(); - } - - // Tell PMCPU - informCPUStateChange(kInformLid, 0); - - // Tell general interest clients - sendClientClamshellNotification(); - - bool aborting = ((lastSleepReason == kIOPMSleepReasonClamshell) - || (lastSleepReason == kIOPMSleepReasonIdle) - || (lastSleepReason == kIOPMSleepReasonMaintenance)); - if (aborting) userActivityCount++; - DLOG("clamshell tickled %d lastSleepReason %d\n", userActivityCount, lastSleepReason); - } - - /* - * Clamshell CLOSED - * Send the clamshell interest notification since the lid is closing. - */ - if (msg & kIOPMClamshellClosed) - { - if (clamshellClosed && clamshellExists) { - DLOG("Ignoring redundant Clamshell close event\n"); - } - else { - DLOG("Clamshell closed\n"); - // Received clamshel open message from clamshell controlling driver - // Update our internal state and tell general interest clients - clamshellClosed = true; - clamshellExists = true; - - // Tell PMCPU - informCPUStateChange(kInformLid, 1); - - // Tell general interest clients - sendClientClamshellNotification(); - - // And set eval_clamshell = so we can attempt - eval_clamshell = true; - } - } - - /* - * Set Desktop mode (sent from graphics) - * - * -> reevaluate lid state - */ - if (msg & kIOPMSetDesktopMode) - { - DLOG("Desktop mode\n"); - desktopMode = (0 != (msg & kIOPMSetValue)); - msg &= ~(kIOPMSetDesktopMode | kIOPMSetValue); - - sendClientClamshellNotification(); - - // Re-evaluate the lid state - eval_clamshell = true; - } - - /* - * AC Adaptor connected - * - * -> reevaluate lid state - */ - if (msg & kIOPMSetACAdaptorConnected) - { - acAdaptorConnected = (0 != (msg & kIOPMSetValue)); - msg &= ~(kIOPMSetACAdaptorConnected | kIOPMSetValue); - - // Tell CPU PM - informCPUStateChange(kInformAC, !acAdaptorConnected); - - // Tell BSD if AC is connected - // 0 == external power source; 1 == on battery - post_sys_powersource(acAdaptorConnected ? 0:1); - - sendClientClamshellNotification(); - - // Re-evaluate the lid state - eval_clamshell = true; - - // Lack of AC may have latched a display wrangler tickle. - // This mirrors the hardware's USB wake event latch, where a latched - // USB wake event followed by an AC attach will trigger a full wake. - latchDisplayWranglerTickle( false ); + if (flags.bit.idleSleepEnabled) { + DLOG("idle sleep timer enabled\n"); + if (!wrangler) { + changePowerStateToPriv(ON_STATE); + startIdleSleepTimer( idleSeconds ); + } else { + // Start idle timer if prefs now allow system sleep + // and user is already inactive. Disk spindown is + // accelerated upon timer expiration. -#if HIBERNATION - // AC presence will reset the standy timer delay adjustment. - _standbyTimerResetSeconds = 0; -#endif - if (!userIsActive) { - // Reset userActivityTime when power supply is changed(rdr 13789330) - clock_get_uptime(&userActivityTime); - } - } - - /* - * Enable Clamshell (external display disappear) - * - * -> reevaluate lid state - */ - if (msg & kIOPMEnableClamshell) - { - DLOG("Clamshell enabled\n"); - // Re-evaluate the lid state - // System should sleep on external display disappearance - // in lid closed operation. - if (true == clamshellDisabled) - { - eval_clamshell = true; - } - - clamshellDisabled = false; - sendClientClamshellNotification(); - } - - /* - * Disable Clamshell (external display appeared) - * We don't bother re-evaluating clamshell state. If the system is awake, - * the lid is probably open. - */ - if (msg & kIOPMDisableClamshell) - { - DLOG("Clamshell disabled\n"); - clamshellDisabled = true; - sendClientClamshellNotification(); - } - - /* - * Evaluate clamshell and SLEEP if appropiate - */ - if (eval_clamshell && clamshellClosed) - { - if (shouldSleepOnClamshellClosed()) - privateSleepSystem (kIOPMSleepReasonClamshell); - else - evaluatePolicy( kStimulusDarkWakeEvaluate ); - } - - /* - * Power Button - */ - if (msg & kIOPMPowerButton) - { - DLOG("Powerbutton press\n"); - if (!wranglerAsleep) - { - OSString *pbs = OSString::withCString("DisablePowerButtonSleep"); - // Check that power button sleep is enabled - if( pbs ) { - if( kOSBooleanTrue != getProperty(pbs)) - privateSleepSystem (kIOPMSleepReasonPowerButton); - } - } - else - reportUserInput(); - } -} + if (!userIsActive) { + startIdleSleepTimer(getTimeToIdleSleep()); + } + } + } -//****************************************************************************** -// evaluatePolicy -// -// Evaluate root-domain policy in response to external changes. -//****************************************************************************** + if (flags.bit.idleSleepDisabled) { + DLOG("idle sleep timer disabled\n"); + cancelIdleSleepTimer(); + restoreUserSpinDownTimeout(); + adjustPowerState(); + } + + if (flags.bit.adjustPowerState) { + bool sleepASAP = false; + + if (!systemBooting && (preventIdleSleepList->getCount() == 0)) { + if (!wrangler) { + changePowerStateToPriv(ON_STATE); + if (idleSleepEnabled) { + // stay awake for at least idleSeconds + startIdleSleepTimer(idleSeconds); + } + } else if (!extraSleepDelay && !idleSleepTimerPending && !systemDarkWake) { + sleepASAP = true; + } + } -void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) -{ - union { - struct { - int idleSleepEnabled : 1; - int idleSleepDisabled : 1; - int displaySleep : 1; - int sleepDelayChanged : 1; - int evaluateDarkWake : 1; - int adjustPowerState : 1; - int userBecameInactive : 1; - } bit; - uint32_t u32; - } flags; - - - ASSERT_GATED(); - flags.u32 = 0; - - switch (stimulus) - { - case kStimulusDisplayWranglerSleep: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - if (!wranglerAsleep) - { - // first transition to wrangler sleep or lower - flags.bit.displaySleep = true; - } - break; - - case kStimulusDisplayWranglerWake: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - displayIdleForDemandSleep = false; - wranglerAsleep = false; - break; - - case kStimulusEnterUserActiveState: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - if (_preventUserActive) - { - DLOG("user active dropped\n"); - break; - } - if (!userIsActive) - { - userIsActive = true; - userWasActive = true; - clock_get_uptime(&gUserActiveAbsTime); - - // Stay awake after dropping demand for display power on - if (kFullWakeReasonDisplayOn == fullWakeReason) { - fullWakeReason = fFullWakeReasonDisplayOnAndLocalUser; - DLOG("User activity while in notification wake\n"); - changePowerStateWithOverrideTo( ON_STATE, 0); - } - - kdebugTrace(kPMLogUserActiveState, 0, 1, 0); - setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); - messageClients(kIOPMMessageUserIsActiveChanged); - } - flags.bit.idleSleepDisabled = true; - break; - - case kStimulusLeaveUserActiveState: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - if (userIsActive) - { - clock_get_uptime(&gUserInactiveAbsTime); - userIsActive = false; - clock_get_uptime(&userBecameInactiveTime); - flags.bit.userBecameInactive = true; - - kdebugTrace(kPMLogUserActiveState, 0, 0, 0); - setProperty(gIOPMUserIsActiveKey, kOSBooleanFalse); - messageClients(kIOPMMessageUserIsActiveChanged); - } - break; - - case kStimulusAggressivenessChanged: - { - DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - unsigned long minutesToIdleSleep = 0; - unsigned long minutesToDisplayDim = 0; - unsigned long minutesDelta = 0; - - // Fetch latest display and system sleep slider values. - getAggressiveness(kPMMinutesToSleep, &minutesToIdleSleep); - getAggressiveness(kPMMinutesToDim, &minutesToDisplayDim); - DLOG("aggressiveness changed: system %u->%u, display %u\n", - (uint32_t) sleepSlider, - (uint32_t) minutesToIdleSleep, - (uint32_t) minutesToDisplayDim); - - DLOG("idle time -> %ld secs (ena %d)\n", - idleSeconds, (minutesToIdleSleep != 0)); - - - // How long to wait before sleeping the system once - // the displays turns off is indicated by 'extraSleepDelay'. - - if ( minutesToIdleSleep > minutesToDisplayDim ) - minutesDelta = minutesToIdleSleep - minutesToDisplayDim; - else if ( minutesToIdleSleep == minutesToDisplayDim ) - minutesDelta = 1; - - if ((!idleSleepEnabled) && (minutesToIdleSleep != 0)) - idleSleepEnabled = flags.bit.idleSleepEnabled = true; - - if ((idleSleepEnabled) && (minutesToIdleSleep == 0)) { - flags.bit.idleSleepDisabled = true; - idleSleepEnabled = false; - } - if (0x7fffffff == minutesToIdleSleep) - minutesToIdleSleep = idleSeconds; - - if (((minutesDelta != extraSleepDelay) || - (userActivityTime != userActivityTime_prev)) && - !flags.bit.idleSleepEnabled && !flags.bit.idleSleepDisabled) - flags.bit.sleepDelayChanged = true; - - if (systemDarkWake && !darkWakeToSleepASAP && - (flags.bit.idleSleepEnabled || flags.bit.idleSleepDisabled)) - { - // Reconsider decision to remain in dark wake - flags.bit.evaluateDarkWake = true; - } - - sleepSlider = minutesToIdleSleep; - extraSleepDelay = minutesDelta; - userActivityTime_prev = userActivityTime; - } break; - - case kStimulusDemandSystemSleep: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - displayIdleForDemandSleep = true; - if (wrangler && wranglerIdleSettings) - { - // Request wrangler idle only when demand sleep is triggered - // from full wake. - if(CAP_CURRENT(kIOPMSystemCapabilityGraphics)) - { - wrangler->setProperties(wranglerIdleSettings); - DLOG("Requested wrangler idle\n"); - } - } - // arg = sleepReason - changePowerStateWithOverrideTo( SLEEP_STATE, arg ); - break; - - case kStimulusAllowSystemSleepChanged: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - flags.bit.adjustPowerState = true; - break; - - case kStimulusDarkWakeActivityTickle: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - // arg == true implies real and not self generated wrangler tickle. - // Update wake type on PM work loop instead of the tickle thread to - // eliminate the possibility of an early tickle clobbering the wake - // type set by the platform driver. - if (arg == true) - setProperty(kIOPMRootDomainWakeTypeKey, kIOPMRootDomainWakeTypeHIDActivity); - - if (false == wranglerTickled) - { - if (latchDisplayWranglerTickle(true)) - { - DLOG("latched tickle\n"); - break; - } - - wranglerTickled = true; - DLOG("Requesting full wake after dark wake activity tickle\n"); - requestFullWake( kFullWakeReasonLocalUser ); - } - break; - - case kStimulusDarkWakeEntry: - case kStimulusDarkWakeReentry: - DLOG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - // Any system transitions since the last dark wake transition - // will invalid the stimulus. - - if (arg == _systemStateGeneration) - { - DLOG("dark wake entry\n"); - systemDarkWake = true; - - // Keep wranglerAsleep an invariant when wrangler is absent - if (wrangler) - wranglerAsleep = true; - - if (kStimulusDarkWakeEntry == stimulus) - { - clock_get_uptime(&userBecameInactiveTime); - flags.bit.evaluateDarkWake = true; - if (activitySinceSleep()) { - DLOG("User activity recorded while going to darkwake\n"); - reportUserInput(); - } - } - - // Always accelerate disk spindown while in dark wake, - // even if system does not support/allow sleep. - - cancelIdleSleepTimer(); - setQuickSpinDownTimeout(); - } - break; - - case kStimulusDarkWakeEvaluate: - DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - if (systemDarkWake) - { - flags.bit.evaluateDarkWake = true; - } - break; - - case kStimulusNoIdleSleepPreventers: - DMSG("evaluatePolicy( %d, 0x%x )\n", stimulus, arg); - flags.bit.adjustPowerState = true; - break; - - } /* switch(stimulus) */ - - if (flags.bit.evaluateDarkWake && (kFullWakeReasonNone == fullWakeReason)) - { - if (darkWakeToSleepASAP || - (clamshellClosed && !(desktopMode && acAdaptorConnected))) - { - uint32_t newSleepReason; - - if (CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) - { - // System was previously in full wake. Sleep reason from - // full to dark already recorded in fullToDarkReason. - - if (lowBatteryCondition) - newSleepReason = kIOPMSleepReasonLowPower; - else - newSleepReason = fullToDarkReason; - } - else - { - // In dark wake from system sleep. - - if (darkWakeSleepService) - newSleepReason = kIOPMSleepReasonSleepServiceExit; - else - newSleepReason = kIOPMSleepReasonMaintenance; - } - - if (checkSystemCanSleep(newSleepReason)) - { - privateSleepSystem(newSleepReason); - } - } - else // non-maintenance (network) dark wake - { - if (checkSystemCanSleep(kIOPMSleepReasonIdle)) - { - // Release power clamp, and wait for children idle. - adjustPowerState(true); - } - else - { - changePowerStateToPriv(ON_STATE); - } - } - } - - if (systemDarkWake) - { - // The rest are irrelevant while system is in dark wake. - flags.u32 = 0; - } - - if ((flags.bit.displaySleep) && - (kFullWakeReasonDisplayOn == fullWakeReason)) - { - // kIOPMSleepReasonMaintenance? - DLOG("Display sleep while in notification wake\n"); - changePowerStateWithOverrideTo( SLEEP_STATE, kIOPMSleepReasonMaintenance ); - } - - if (flags.bit.userBecameInactive || flags.bit.sleepDelayChanged) - { - bool cancelQuickSpindown = false; - - if (flags.bit.sleepDelayChanged) - { - // Cancel existing idle sleep timer and quick disk spindown. - // New settings will be applied by the idleSleepEnabled flag - // handler below if idle sleep is enabled. - - DLOG("extra sleep timer changed\n"); - cancelIdleSleepTimer(); - cancelQuickSpindown = true; - } - else - { - DLOG("user inactive\n"); - } - - if (!userIsActive && idleSleepEnabled) - { - startIdleSleepTimer(getTimeToIdleSleep()); - } - - if (cancelQuickSpindown) - restoreUserSpinDownTimeout(); - } - - if (flags.bit.idleSleepEnabled) - { - DLOG("idle sleep timer enabled\n"); - if (!wrangler) - { - changePowerStateToPriv(ON_STATE); - startIdleSleepTimer( idleSeconds ); - } - else - { - // Start idle timer if prefs now allow system sleep - // and user is already inactive. Disk spindown is - // accelerated upon timer expiration. - - if (!userIsActive) - { - startIdleSleepTimer(getTimeToIdleSleep()); - } - } - } - - if (flags.bit.idleSleepDisabled) - { - DLOG("idle sleep timer disabled\n"); - cancelIdleSleepTimer(); - restoreUserSpinDownTimeout(); - adjustPowerState(); - } - - if (flags.bit.adjustPowerState) - { - bool sleepASAP = false; - - if (!systemBooting && (preventIdleSleepList->getCount() == 0)) - { - if (!wrangler) - { - changePowerStateToPriv(ON_STATE); - if (idleSleepEnabled) - { - // stay awake for at least idleSeconds - startIdleSleepTimer(idleSeconds); - } - } - else if (!extraSleepDelay && !idleSleepTimerPending && !systemDarkWake) - { - sleepASAP = true; - } - } - - adjustPowerState(sleepASAP); - } + adjustPowerState(sleepASAP); + } } //****************************************************************************** @@ -7417,82 +7255,80 @@ void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) // Request transition from dark wake to full wake //****************************************************************************** -void IOPMrootDomain::requestFullWake( FullWakeReason reason ) -{ - uint32_t options = 0; - IOService * pciRoot = 0; - bool promotion = false; - - // System must be in dark wake and a valid reason for entering full wake - if ((kFullWakeReasonNone == reason) || - (kFullWakeReasonNone != fullWakeReason) || - (CAP_CURRENT(kIOPMSystemCapabilityGraphics))) - { - return; - } - - // Will clear reason upon exit from full wake - fullWakeReason = reason; - - _desiredCapability |= (kIOPMSystemCapabilityGraphics | - kIOPMSystemCapabilityAudio); - - if ((kSystemTransitionWake == _systemTransitionType) && - !(_pendingCapability & kIOPMSystemCapabilityGraphics) && - !graphicsSuppressed) - { - // Promote to full wake while waking up to dark wake due to tickle. - // PM will hold off notifying the graphics subsystem about system wake - // as late as possible, so if a HID tickle does arrive, graphics can - // power up on this same wake cycle. The latency to power up graphics - // on the next cycle can be huge on some systems. However, once any - // graphics suppression has taken effect, it is too late. All other - // graphics devices must be similarly suppressed. But the delay till - // the following cycle should be short. - - _pendingCapability |= (kIOPMSystemCapabilityGraphics | - kIOPMSystemCapabilityAudio); - - // Immediately bring up audio and graphics - pciRoot = pciHostBridgeDriver; - willEnterFullWake(); - promotion = true; - } - - // Unsafe to cancel once graphics was powered. - // If system woke from dark wake, the return to sleep can - // be cancelled. "awake -> dark -> sleep" transition - // can be canceled also, during the "dark --> sleep" phase - // *prior* to driver power down. - if (!CAP_HIGHEST(kIOPMSystemCapabilityGraphics) || - _pendingCapability == 0) { - options |= kIOPMSyncCancelPowerDown; - } - - synchronizePowerTree(options, pciRoot); - if (kFullWakeReasonLocalUser == fullWakeReason) - { - // IOGraphics doesn't light the display even though graphics is - // enabled in kIOMessageSystemCapabilityChange message(radar 9502104) - // So, do an explicit activity tickle - if (wrangler) - wrangler->activityTickle(0,0); - } - - // Log a timestamp for the initial full wake request. - // System may not always honor this full wake request. - if (!CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) - { - AbsoluteTime now; - uint64_t nsec; - - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); - absolutetime_to_nanoseconds(now, &nsec); - MSG("full wake %s (reason %u) %u ms\n", - promotion ? "promotion" : "request", - fullWakeReason, ((int)((nsec) / NSEC_PER_MSEC))); - } +void +IOPMrootDomain::requestFullWake( FullWakeReason reason ) +{ + uint32_t options = 0; + IOService * pciRoot = 0; + bool promotion = false; + + // System must be in dark wake and a valid reason for entering full wake + if ((kFullWakeReasonNone == reason) || + (kFullWakeReasonNone != fullWakeReason) || + (CAP_CURRENT(kIOPMSystemCapabilityGraphics))) { + return; + } + + // Will clear reason upon exit from full wake + fullWakeReason = reason; + + _desiredCapability |= (kIOPMSystemCapabilityGraphics | + kIOPMSystemCapabilityAudio); + + if ((kSystemTransitionWake == _systemTransitionType) && + !(_pendingCapability & kIOPMSystemCapabilityGraphics) && + !graphicsSuppressed) { + // Promote to full wake while waking up to dark wake due to tickle. + // PM will hold off notifying the graphics subsystem about system wake + // as late as possible, so if a HID tickle does arrive, graphics can + // power up on this same wake cycle. The latency to power up graphics + // on the next cycle can be huge on some systems. However, once any + // graphics suppression has taken effect, it is too late. All other + // graphics devices must be similarly suppressed. But the delay till + // the following cycle should be short. + + _pendingCapability |= (kIOPMSystemCapabilityGraphics | + kIOPMSystemCapabilityAudio); + + // Immediately bring up audio and graphics + pciRoot = pciHostBridgeDriver; + willEnterFullWake(); + promotion = true; + } + + // Unsafe to cancel once graphics was powered. + // If system woke from dark wake, the return to sleep can + // be cancelled. "awake -> dark -> sleep" transition + // can be canceled also, during the "dark --> sleep" phase + // *prior* to driver power down. + if (!CAP_HIGHEST(kIOPMSystemCapabilityGraphics) || + _pendingCapability == 0) { + options |= kIOPMSyncCancelPowerDown; + } + + synchronizePowerTree(options, pciRoot); + if (kFullWakeReasonLocalUser == fullWakeReason) { + // IOGraphics doesn't light the display even though graphics is + // enabled in kIOMessageSystemCapabilityChange message(radar 9502104) + // So, do an explicit activity tickle + if (wrangler) { + wrangler->activityTickle(0, 0); + } + } + + // Log a timestamp for the initial full wake request. + // System may not always honor this full wake request. + if (!CAP_HIGHEST(kIOPMSystemCapabilityGraphics)) { + AbsoluteTime now; + uint64_t nsec; + + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); + absolutetime_to_nanoseconds(now, &nsec); + MSG("full wake %s (reason %u) %u ms\n", + promotion ? "promotion" : "request", + fullWakeReason, ((int)((nsec) / NSEC_PER_MSEC))); + } } //****************************************************************************** @@ -7505,34 +7341,34 @@ void IOPMrootDomain::requestFullWake( FullWakeReason reason ) // Assumptions: fullWakeReason was updated //****************************************************************************** -void IOPMrootDomain::willEnterFullWake( void ) +void +IOPMrootDomain::willEnterFullWake( void ) { - hibernateRetry = false; - sleepToStandby = false; - standbyNixed = false; - resetTimers = false; - sleepTimerMaintenance = false; + hibernateRetry = false; + sleepToStandby = false; + standbyNixed = false; + resetTimers = false; + sleepTimerMaintenance = false; - _systemMessageClientMask = kSystemMessageClientPowerd | - kSystemMessageClientLegacyApp; + _systemMessageClientMask = kSystemMessageClientPowerd | + kSystemMessageClientLegacyApp; - if ((_highestCapability & kIOPMSystemCapabilityGraphics) == 0) - { - // Initial graphics full power - _systemMessageClientMask |= kSystemMessageClientKernel; + if ((_highestCapability & kIOPMSystemCapabilityGraphics) == 0) { + // Initial graphics full power + _systemMessageClientMask |= kSystemMessageClientKernel; - // Set kIOPMUserTriggeredFullWakeKey before full wake for IOGraphics - setProperty(gIOPMUserTriggeredFullWakeKey, - (kFullWakeReasonLocalUser == fullWakeReason) ? - kOSBooleanTrue : kOSBooleanFalse); - } + // Set kIOPMUserTriggeredFullWakeKey before full wake for IOGraphics + setProperty(gIOPMUserTriggeredFullWakeKey, + (kFullWakeReasonLocalUser == fullWakeReason) ? + kOSBooleanTrue : kOSBooleanFalse); + } #if HIBERNATION - IOHibernateSetWakeCapabilities(_pendingCapability); + IOHibernateSetWakeCapabilities(_pendingCapability); #endif - IOService::setAdvisoryTickleEnable( true ); - tellClients(kIOMessageSystemWillPowerOn); - preventTransitionToUserActive(false); + IOService::setAdvisoryTickleEnable( true ); + tellClients(kIOMessageSystemWillPowerOn); + preventTransitionToUserActive(false); } //****************************************************************************** @@ -7541,15 +7377,15 @@ void IOPMrootDomain::willEnterFullWake( void ) // System has already entered full wake. Invoked by a delayed thread call. //****************************************************************************** -void IOPMrootDomain::fullWakeDelayedWork( void ) +void +IOPMrootDomain::fullWakeDelayedWork( void ) { #if DARK_TO_FULL_EVALUATE_CLAMSHELL - // Not gated, don't modify state - if ((kSystemTransitionNone == _systemTransitionType) && - CAP_CURRENT(kIOPMSystemCapabilityGraphics)) - { - receivePowerNotification( kLocalEvalClamshellCommand ); - } + // Not gated, don't modify state + if ((kSystemTransitionNone == _systemTransitionType) && + CAP_CURRENT(kIOPMSystemCapabilityGraphics)) { + receivePowerNotification( kLocalEvalClamshellCommand ); + } #endif } @@ -7557,48 +7393,47 @@ void IOPMrootDomain::fullWakeDelayedWork( void ) // evaluateAssertions // //****************************************************************************** -void IOPMrootDomain::evaluateAssertions(IOPMDriverAssertionType newAssertions, IOPMDriverAssertionType oldAssertions) -{ - IOPMDriverAssertionType changedBits = newAssertions ^ oldAssertions; - - messageClients(kIOPMMessageDriverAssertionsChanged); - - if (changedBits & kIOPMDriverAssertionPreventDisplaySleepBit) { - - if (wrangler) { - bool value = (newAssertions & kIOPMDriverAssertionPreventDisplaySleepBit) ? true : false; - - DLOG("wrangler->setIgnoreIdleTimer\(%d)\n", value); - wrangler->setIgnoreIdleTimer( value ); - } - } - - if (changedBits & kIOPMDriverAssertionCPUBit) { - evaluatePolicy(kStimulusDarkWakeEvaluate); - if (!assertOnWakeSecs && gIOLastWakeAbsTime) { - AbsoluteTime now; - clock_usec_t microsecs; - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); - absolutetime_to_microtime(now, &assertOnWakeSecs, µsecs); - if (assertOnWakeReport) { - HISTREPORT_TALLYVALUE(assertOnWakeReport, (int64_t)assertOnWakeSecs); - DLOG("Updated assertOnWake %lu\n", (unsigned long)assertOnWakeSecs); - } - } - } - - if (changedBits & kIOPMDriverAssertionReservedBit7) { - bool value = (newAssertions & kIOPMDriverAssertionReservedBit7) ? true : false; - if (value) { - DLOG("Driver assertion ReservedBit7 raised. Legacy IO preventing sleep\n"); - updatePreventIdleSleepList(this, true); - } - else { - DLOG("Driver assertion ReservedBit7 dropped\n"); - updatePreventIdleSleepList(this, false); - } - } +void +IOPMrootDomain::evaluateAssertions(IOPMDriverAssertionType newAssertions, IOPMDriverAssertionType oldAssertions) +{ + IOPMDriverAssertionType changedBits = newAssertions ^ oldAssertions; + + messageClients(kIOPMMessageDriverAssertionsChanged); + + if (changedBits & kIOPMDriverAssertionPreventDisplaySleepBit) { + if (wrangler) { + bool value = (newAssertions & kIOPMDriverAssertionPreventDisplaySleepBit) ? true : false; + + DLOG("wrangler->setIgnoreIdleTimer\(%d)\n", value); + wrangler->setIgnoreIdleTimer( value ); + } + } + + if (changedBits & kIOPMDriverAssertionCPUBit) { + evaluatePolicy(kStimulusDarkWakeEvaluate); + if (!assertOnWakeSecs && gIOLastWakeAbsTime) { + AbsoluteTime now; + clock_usec_t microsecs; + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &gIOLastWakeAbsTime); + absolutetime_to_microtime(now, &assertOnWakeSecs, µsecs); + if (assertOnWakeReport) { + HISTREPORT_TALLYVALUE(assertOnWakeReport, (int64_t)assertOnWakeSecs); + DLOG("Updated assertOnWake %lu\n", (unsigned long)assertOnWakeSecs); + } + } + } + + if (changedBits & kIOPMDriverAssertionReservedBit7) { + bool value = (newAssertions & kIOPMDriverAssertionReservedBit7) ? true : false; + if (value) { + DLOG("Driver assertion ReservedBit7 raised. Legacy IO preventing sleep\n"); + updatePreventIdleSleepList(this, true); + } else { + DLOG("Driver assertion ReservedBit7 dropped\n"); + updatePreventIdleSleepList(this, false); + } + } } // MARK: - @@ -7609,191 +7444,192 @@ void IOPMrootDomain::evaluateAssertions(IOPMDriverAssertionType newAssertions, I // //****************************************************************************** -void IOPMrootDomain::pmStatsRecordEvent( - int eventIndex, - AbsoluteTime timestamp) +void +IOPMrootDomain::pmStatsRecordEvent( + int eventIndex, + AbsoluteTime timestamp) { - bool starting = eventIndex & kIOPMStatsEventStartFlag ? true:false; - bool stopping = eventIndex & kIOPMStatsEventStopFlag ? true:false; - uint64_t delta; - uint64_t nsec; - OSData *publishPMStats = NULL; + bool starting = eventIndex & kIOPMStatsEventStartFlag ? true:false; + bool stopping = eventIndex & kIOPMStatsEventStopFlag ? true:false; + uint64_t delta; + uint64_t nsec; + OSData *publishPMStats = NULL; - eventIndex &= ~(kIOPMStatsEventStartFlag | kIOPMStatsEventStopFlag); + eventIndex &= ~(kIOPMStatsEventStartFlag | kIOPMStatsEventStopFlag); - absolutetime_to_nanoseconds(timestamp, &nsec); + absolutetime_to_nanoseconds(timestamp, &nsec); - switch (eventIndex) { - case kIOPMStatsHibernateImageWrite: - if (starting) - gPMStats.hibWrite.start = nsec; - else if (stopping) - gPMStats.hibWrite.stop = nsec; + switch (eventIndex) { + case kIOPMStatsHibernateImageWrite: + if (starting) { + gPMStats.hibWrite.start = nsec; + } else if (stopping) { + gPMStats.hibWrite.stop = nsec; + } - if (stopping) { - delta = gPMStats.hibWrite.stop - gPMStats.hibWrite.start; - IOLog("PMStats: Hibernate write took %qd ms\n", delta/NSEC_PER_MSEC); - } - break; - case kIOPMStatsHibernateImageRead: - if (starting) - gPMStats.hibRead.start = nsec; - else if (stopping) - gPMStats.hibRead.stop = nsec; + if (stopping) { + delta = gPMStats.hibWrite.stop - gPMStats.hibWrite.start; + IOLog("PMStats: Hibernate write took %qd ms\n", delta / NSEC_PER_MSEC); + } + break; + case kIOPMStatsHibernateImageRead: + if (starting) { + gPMStats.hibRead.start = nsec; + } else if (stopping) { + gPMStats.hibRead.stop = nsec; + } - if (stopping) { - delta = gPMStats.hibRead.stop - gPMStats.hibRead.start; - IOLog("PMStats: Hibernate read took %qd ms\n", delta/NSEC_PER_MSEC); + if (stopping) { + delta = gPMStats.hibRead.stop - gPMStats.hibRead.start; + IOLog("PMStats: Hibernate read took %qd ms\n", delta / NSEC_PER_MSEC); - publishPMStats = OSData::withBytes(&gPMStats, sizeof(gPMStats)); - setProperty(kIOPMSleepStatisticsKey, publishPMStats); - publishPMStats->release(); - bzero(&gPMStats, sizeof(gPMStats)); - } - break; - } + publishPMStats = OSData::withBytes(&gPMStats, sizeof(gPMStats)); + setProperty(kIOPMSleepStatisticsKey, publishPMStats); + publishPMStats->release(); + bzero(&gPMStats, sizeof(gPMStats)); + } + break; + } } /* * Appends a record of the application response to * IOPMrootDomain::pmStatsAppResponses */ -void IOPMrootDomain::pmStatsRecordApplicationResponse( - const OSSymbol *response, - const char *name, - int messageType, - uint32_t delay_ms, - uint64_t id, - OSObject *object, - IOPMPowerStateIndex powerState) -{ - OSDictionary *responseDescription = NULL; - OSNumber *delayNum = NULL; - OSNumber *powerCaps = NULL; - OSNumber *pidNum = NULL; - OSNumber *msgNum = NULL; - const OSSymbol *appname; - const OSSymbol *sleep = NULL, *wake = NULL; - IOPMServiceInterestNotifier *notify = 0; - - if (object && (notify = OSDynamicCast(IOPMServiceInterestNotifier, object))) - { - if (response->isEqualTo(gIOPMStatsResponseTimedOut)) - notify->ackTimeoutCnt++; - else - notify->ackTimeoutCnt = 0; - - } - - if (response->isEqualTo(gIOPMStatsResponsePrompt) || - (_systemTransitionType == kSystemTransitionNone) || (_systemTransitionType == kSystemTransitionNewCapClient)) - return; - - - if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow)) { - kdebugTrace(kPMLogDrvPSChangeDelay, id, messageType, delay_ms); - } - else if (notify) { - // User space app or kernel capability client - if (id) { - kdebugTrace(kPMLogAppResponseDelay, id, notify->msgType, delay_ms); - } - else { - kdebugTrace(kPMLogDrvResponseDelay, notify->uuid0, messageType, delay_ms); - } - notify->msgType = 0; - } - - responseDescription = OSDictionary::withCapacity(5); - if (responseDescription) - { - if (response) { - responseDescription->setObject(_statsResponseTypeKey, response); - } - - msgNum = OSNumber::withNumber(messageType, 32); - if (msgNum) { - responseDescription->setObject(_statsMessageTypeKey, msgNum); - msgNum->release(); - } - - if (!name && notify && notify->identifier) { - name = notify->identifier->getCStringNoCopy(); - } - - if (name && (strlen(name) > 0)) - { - appname = OSSymbol::withCString(name); - if (appname) { - responseDescription->setObject(_statsNameKey, appname); - appname->release(); - } - } - - if (!id && notify) { - id = notify->uuid0; - } - if (id != 0) { - pidNum = OSNumber::withNumber(id, 64); - if (pidNum) { - responseDescription->setObject(_statsPIDKey, pidNum); - pidNum->release(); - } - } - - delayNum = OSNumber::withNumber(delay_ms, 32); - if (delayNum) { - responseDescription->setObject(_statsTimeMSKey, delayNum); - delayNum->release(); - } - - if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow)) { - powerCaps = OSNumber::withNumber(powerState, 32); +void +IOPMrootDomain::pmStatsRecordApplicationResponse( + const OSSymbol *response, + const char *name, + int messageType, + uint32_t delay_ms, + uint64_t id, + OSObject *object, + IOPMPowerStateIndex powerState) +{ + OSDictionary *responseDescription = NULL; + OSNumber *delayNum = NULL; + OSNumber *powerCaps = NULL; + OSNumber *pidNum = NULL; + OSNumber *msgNum = NULL; + const OSSymbol *appname; + const OSSymbol *sleep = NULL, *wake = NULL; + IOPMServiceInterestNotifier *notify = 0; + + if (object && (notify = OSDynamicCast(IOPMServiceInterestNotifier, object))) { + if (response->isEqualTo(gIOPMStatsResponseTimedOut)) { + notify->ackTimeoutCnt++; + } else { + notify->ackTimeoutCnt = 0; + } + } + + if (response->isEqualTo(gIOPMStatsResponsePrompt) || + (_systemTransitionType == kSystemTransitionNone) || (_systemTransitionType == kSystemTransitionNewCapClient)) { + return; + } + + + if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow)) { + kdebugTrace(kPMLogDrvPSChangeDelay, id, messageType, delay_ms); + } else if (notify) { + // User space app or kernel capability client + if (id) { + kdebugTrace(kPMLogAppResponseDelay, id, notify->msgType, delay_ms); + } else { + kdebugTrace(kPMLogDrvResponseDelay, notify->uuid0, messageType, delay_ms); + } + notify->msgType = 0; + } + + responseDescription = OSDictionary::withCapacity(5); + if (responseDescription) { + if (response) { + responseDescription->setObject(_statsResponseTypeKey, response); + } + + msgNum = OSNumber::withNumber(messageType, 32); + if (msgNum) { + responseDescription->setObject(_statsMessageTypeKey, msgNum); + msgNum->release(); + } + + if (!name && notify && notify->identifier) { + name = notify->identifier->getCStringNoCopy(); + } + + if (name && (strlen(name) > 0)) { + appname = OSSymbol::withCString(name); + if (appname) { + responseDescription->setObject(_statsNameKey, appname); + appname->release(); + } + } + + if (!id && notify) { + id = notify->uuid0; + } + if (id != 0) { + pidNum = OSNumber::withNumber(id, 64); + if (pidNum) { + responseDescription->setObject(_statsPIDKey, pidNum); + pidNum->release(); + } + } + + delayNum = OSNumber::withNumber(delay_ms, 32); + if (delayNum) { + responseDescription->setObject(_statsTimeMSKey, delayNum); + delayNum->release(); + } + + if (response->isEqualTo(gIOPMStatsDriverPSChangeSlow)) { + powerCaps = OSNumber::withNumber(powerState, 32); #if !defined(__i386__) && !defined(__x86_64__) && (DEVELOPMENT || DEBUG) - IOLog("%s::powerStateChange type(%d) to(%lu) async took %d ms\n", - name, messageType, - powerState, delay_ms); + IOLog("%s::powerStateChange type(%d) to(%lu) async took %d ms\n", + name, messageType, + powerState, delay_ms); #endif + } else { + powerCaps = OSNumber::withNumber(_pendingCapability, 32); + } + if (powerCaps) { + responseDescription->setObject(_statsPowerCapsKey, powerCaps); + powerCaps->release(); + } - } - else { - powerCaps = OSNumber::withNumber(_pendingCapability, 32); - } - if (powerCaps) { - responseDescription->setObject(_statsPowerCapsKey, powerCaps); - powerCaps->release(); - } - - sleep = OSSymbol::withCString("Sleep"); - wake = OSSymbol::withCString("Wake"); - if (_systemTransitionType == kSystemTransitionSleep) { - responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep); - } - else if (_systemTransitionType == kSystemTransitionWake) { - responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake); - } - else if (_systemTransitionType == kSystemTransitionCapability) { - if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) - responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep); - else if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) - responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake); - } - if (sleep) sleep->release(); - if (wake) wake->release(); - - - - IOLockLock(pmStatsLock); - if (pmStatsAppResponses && pmStatsAppResponses->getCount() < 50) { - pmStatsAppResponses->setObject(responseDescription); - } - IOLockUnlock(pmStatsLock); - - responseDescription->release(); - } - - return; + sleep = OSSymbol::withCString("Sleep"); + wake = OSSymbol::withCString("Wake"); + if (_systemTransitionType == kSystemTransitionSleep) { + responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep); + } else if (_systemTransitionType == kSystemTransitionWake) { + responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake); + } else if (_systemTransitionType == kSystemTransitionCapability) { + if (CAP_LOSS(kIOPMSystemCapabilityGraphics)) { + responseDescription->setObject(kIOPMStatsSystemTransitionKey, sleep); + } else if (CAP_GAIN(kIOPMSystemCapabilityGraphics)) { + responseDescription->setObject(kIOPMStatsSystemTransitionKey, wake); + } + } + if (sleep) { + sleep->release(); + } + if (wake) { + wake->release(); + } + + + + IOLockLock(pmStatsLock); + if (pmStatsAppResponses && pmStatsAppResponses->getCount() < 50) { + pmStatsAppResponses->setObject(responseDescription); + } + IOLockUnlock(pmStatsLock); + + responseDescription->release(); + } + + return; } // MARK: - @@ -7804,336 +7640,344 @@ void IOPMrootDomain::pmStatsRecordApplicationResponse( // //****************************************************************************** -#define kIOPMRegisterNVRAMTracePointHandlerKey \ - "IOPMRegisterNVRAMTracePointHandler" - -IOReturn IOPMrootDomain::callPlatformFunction( - const OSSymbol * functionName, - bool waitForFunction, - void * param1, void * param2, - void * param3, void * param4 ) -{ - uint32_t bootFailureCode = 0xffffffff; - if (pmTracer && functionName && - functionName->isEqualTo(kIOPMRegisterNVRAMTracePointHandlerKey) && - !pmTracer->tracePointHandler && !pmTracer->tracePointTarget) - { - uint32_t tracePointPhases, tracePointPCI; - uint64_t statusCode; - - pmTracer->tracePointHandler = (IOPMTracePointHandler) param1; - pmTracer->tracePointTarget = (void *) param2; - tracePointPCI = (uint32_t)(uintptr_t) param3; - tracePointPhases = (uint32_t)(uintptr_t) param4; - if ((tracePointPhases & 0xff) == kIOPMTracePointSystemSleep) { - - IORegistryEntry *node = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); - if ( node ) { - OSData *data = OSDynamicCast( OSData, node->getProperty(kIOEFIBootRomFailureKey) ); - if ( data && data->getLength() == sizeof(bootFailureCode) ) { - memcpy(&bootFailureCode, data->getBytesNoCopy(), sizeof(bootFailureCode)); - } - node->release(); - } - // Failure code from EFI/BootRom is a four byte structure - tracePointPCI = OSSwapBigToHostInt32(bootFailureCode); - } - statusCode = (((uint64_t)tracePointPCI) << 32) | tracePointPhases; - if ((tracePointPhases & 0xff) != kIOPMTracePointSystemUp) { - MSG("Sleep failure code 0x%08x 0x%08x\n", - tracePointPCI, tracePointPhases); - } - setProperty(kIOPMSleepWakeFailureCodeKey, statusCode, 64); - pmTracer->tracePointHandler( pmTracer->tracePointTarget, 0, 0 ); - - return kIOReturnSuccess; - } -#if HIBERNATION - else if (functionName && - functionName->isEqualTo(kIOPMInstallSystemSleepPolicyHandlerKey)) - { - if (gSleepPolicyHandler) - return kIOReturnExclusiveAccess; - if (!param1) - return kIOReturnBadArgument; - gSleepPolicyHandler = (IOPMSystemSleepPolicyHandler) param1; - gSleepPolicyTarget = (void *) param2; - setProperty("IOPMSystemSleepPolicyHandler", kOSBooleanTrue); - return kIOReturnSuccess; - } -#endif +#define kIOPMRegisterNVRAMTracePointHandlerKey \ + "IOPMRegisterNVRAMTracePointHandler" + +IOReturn +IOPMrootDomain::callPlatformFunction( + const OSSymbol * functionName, + bool waitForFunction, + void * param1, void * param2, + void * param3, void * param4 ) +{ + uint32_t bootFailureCode = 0xffffffff; + if (pmTracer && functionName && + functionName->isEqualTo(kIOPMRegisterNVRAMTracePointHandlerKey) && + !pmTracer->tracePointHandler && !pmTracer->tracePointTarget) { + uint32_t tracePointPhases, tracePointPCI; + uint64_t statusCode; + + pmTracer->tracePointHandler = (IOPMTracePointHandler) param1; + pmTracer->tracePointTarget = (void *) param2; + tracePointPCI = (uint32_t)(uintptr_t) param3; + tracePointPhases = (uint32_t)(uintptr_t) param4; + if ((tracePointPhases & 0xff) == kIOPMTracePointSystemSleep) { + IORegistryEntry *node = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); + if (node) { + OSData *data = OSDynamicCast( OSData, node->getProperty(kIOEFIBootRomFailureKey)); + if (data && data->getLength() == sizeof(bootFailureCode)) { + memcpy(&bootFailureCode, data->getBytesNoCopy(), sizeof(bootFailureCode)); + } + node->release(); + } + // Failure code from EFI/BootRom is a four byte structure + tracePointPCI = OSSwapBigToHostInt32(bootFailureCode); + } + statusCode = (((uint64_t)tracePointPCI) << 32) | tracePointPhases; + if ((tracePointPhases & 0xff) != kIOPMTracePointSystemUp) { + MSG("Sleep failure code 0x%08x 0x%08x\n", + tracePointPCI, tracePointPhases); + } + setProperty(kIOPMSleepWakeFailureCodeKey, statusCode, 64); + pmTracer->tracePointHandler( pmTracer->tracePointTarget, 0, 0 ); + + return kIOReturnSuccess; + } +#if HIBERNATION + else if (functionName && + functionName->isEqualTo(kIOPMInstallSystemSleepPolicyHandlerKey)) { + if (gSleepPolicyHandler) { + return kIOReturnExclusiveAccess; + } + if (!param1) { + return kIOReturnBadArgument; + } + gSleepPolicyHandler = (IOPMSystemSleepPolicyHandler) param1; + gSleepPolicyTarget = (void *) param2; + setProperty("IOPMSystemSleepPolicyHandler", kOSBooleanTrue); + return kIOReturnSuccess; + } +#endif + + return super::callPlatformFunction( + functionName, waitForFunction, param1, param2, param3, param4); +} + +void +IOPMrootDomain::kdebugTrace(uint32_t event, uint64_t id, + uintptr_t param1, uintptr_t param2, uintptr_t param3) +{ + uint32_t code = IODBG_POWER(event); + uint64_t regId = id; + if (regId == 0) { + regId = getRegistryEntryID(); + } + IOTimeStampConstant(code, (uintptr_t) regId, param1, param2, param3); +} + + +void +IOPMrootDomain::tracePoint( uint8_t point ) +{ + if (systemBooting) { + return; + } + + if (kIOPMTracePointWakeCapabilityClients == point) { + acceptSystemWakeEvents(false); + } + + kdebugTrace(kPMLogSleepWakeTracePoint, 0, point, 0); + pmTracer->tracePoint(point); +} + +void +IOPMrootDomain::traceDetail(OSObject *object, bool start) +{ + IOPMServiceInterestNotifier *notifier; + + if (systemBooting) { + return; + } + + notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); + if (!notifier) { + return; + } + + if (start) { + pmTracer->traceDetail( notifier->uuid0 >> 32 ); + kdebugTrace(kPMLogSleepWakeMessage, pmTracer->getTracePhase(), notifier->msgType, notifier->uuid0, notifier->uuid1); + if (notifier->identifier) { + DLOG("trace point 0x%02x msg 0x%x to %s\n", pmTracer->getTracePhase(), notifier->msgType, + notifier->identifier->getCStringNoCopy()); + } else { + DLOG("trace point 0x%02x msg 0x%x\n", pmTracer->getTracePhase(), notifier->msgType); + } + notifierThread = current_thread(); + notifierObject = notifier; + notifier->retain(); + } else { + notifierThread = NULL; + notifierObject = NULL; + notifier->release(); + } +} + + +void +IOPMrootDomain::traceAckDelay(OSObject *object, uint32_t response, uint32_t delay_ms) +{ + IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); + if (!notifier) { + DLOG("Unknown notifier\n"); + return; + } + + if (!systemBooting) { + kdebugTrace(kPMLogDrvResponseDelay, notifier->uuid0, notifier->uuid1, response, delay_ms); + if (notifier->identifier) { + DLOG("Response from %s took %d ms(response:%d)\n", + notifier->identifier->getCStringNoCopy(), delay_ms, response); + } else { + DLOG("Response from kext UUID %llx-%llx took %d ms(response:%d)\n", + notifier->uuid0, notifier->uuid1, delay_ms, response); + } + } +} + +void +IOPMrootDomain::traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay) +{ + if (!systemBooting) { + uint32_t detail = ((msgType & 0xffff) << 16) | (delay & 0xffff); + pmTracer->traceDetail( detail ); + kdebugTrace(kPMLogSleepWakeTracePoint, pmTracer->getTracePhase(), msgType, delay); + DLOG("trace point 0x%02x msgType 0x%x detail 0x%08x\n", pmTracer->getTracePhase(), msgType, delay); + } +} + + +void +IOPMrootDomain::configureReportGated(uint64_t channel_id, uint64_t action, void *result) +{ + size_t reportSize; + void **report = NULL; + uint32_t bktCnt; + uint32_t bktSize; + uint32_t *clientCnt; + + ASSERT_GATED(); + + report = NULL; + if (channel_id == kAssertDelayChID) { + report = &assertOnWakeReport; + bktCnt = kAssertDelayBcktCnt; + bktSize = kAssertDelayBcktSize; + clientCnt = &assertOnWakeClientCnt; + } else if (channel_id == kSleepDelaysChID) { + report = &sleepDelaysReport; + bktCnt = kSleepDelaysBcktCnt; + bktSize = kSleepDelaysBcktSize; + clientCnt = &sleepDelaysClientCnt; + } + + switch (action) { + case kIOReportEnable: + + if (*report) { + (*clientCnt)++; + break; + } + + reportSize = HISTREPORT_BUFSIZE(bktCnt); + *report = IOMalloc(reportSize); + if (*report == NULL) { + break; + } + bzero(*report, reportSize); + HISTREPORT_INIT(bktCnt, bktSize, *report, reportSize, + getRegistryEntryID(), channel_id, kIOReportCategoryPower); + + if (channel_id == kAssertDelayChID) { + assertOnWakeSecs = 0; + } + + break; + + case kIOReportDisable: + if (*clientCnt == 0) { + break; + } + if (*clientCnt == 1) { + IOFree(*report, HISTREPORT_BUFSIZE(bktCnt)); + *report = NULL; + } + (*clientCnt)--; + + if (channel_id == kAssertDelayChID) { + assertOnWakeSecs = -1; // Invalid value to prevent updates + } + break; + + case kIOReportGetDimensions: + if (*report) { + HISTREPORT_UPDATERES(*report, kIOReportGetDimensions, result); + } + break; + } - return super::callPlatformFunction( - functionName, waitForFunction, param1, param2, param3, param4); + return; } -void IOPMrootDomain::kdebugTrace(uint32_t event, uint64_t id, - uintptr_t param1, uintptr_t param2, uintptr_t param3) +IOReturn +IOPMrootDomain::configureReport(IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination) { - uint32_t code = IODBG_POWER(event); - uint64_t regId = id; - if (regId == 0) { - regId = getRegistryEntryID(); - } - IOTimeStampConstant(code, (uintptr_t) regId, param1, param2, param3); -} + unsigned cnt; + uint64_t configAction = (uint64_t)action; + + for (cnt = 0; cnt < channelList->nchannels; cnt++) { + if ((channelList->channels[cnt].channel_id == kSleepCntChID) || + (channelList->channels[cnt].channel_id == kDarkWkCntChID) || + (channelList->channels[cnt].channel_id == kUserWkCntChID)) { + if (action != kIOReportGetDimensions) { + continue; + } + SIMPLEREPORT_UPDATERES(kIOReportGetDimensions, result); + } else if ((channelList->channels[cnt].channel_id == kAssertDelayChID) || + (channelList->channels[cnt].channel_id == kSleepDelaysChID)) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::configureReportGated), + (OSObject *)this, (void *)channelList->channels[cnt].channel_id, + (void *)configAction, (void *)result); + } + } + return super::configureReport(channelList, action, result, destination); +} -void IOPMrootDomain::tracePoint( uint8_t point ) +IOReturn +IOPMrootDomain::updateReportGated(uint64_t ch_id, void *result, IOBufferMemoryDescriptor *dest) { - if (systemBooting) return; + uint32_t size2cpy; + void *data2cpy; + void **report; + + ASSERT_GATED(); + + report = NULL; + if (ch_id == kAssertDelayChID) { + report = &assertOnWakeReport; + } else if (ch_id == kSleepDelaysChID) { + report = &sleepDelaysReport; + } + + if (*report == NULL) { + return kIOReturnNotOpen; + } - if (kIOPMTracePointWakeCapabilityClients == point) - acceptSystemWakeEvents(false); + HISTREPORT_UPDATEPREP(*report, data2cpy, size2cpy); + if (size2cpy > (dest->getCapacity() - dest->getLength())) { + return kIOReturnOverrun; + } + + HISTREPORT_UPDATERES(*report, kIOReportCopyChannelData, result); + dest->appendBytes(data2cpy, size2cpy); - kdebugTrace(kPMLogSleepWakeTracePoint, 0, point, 0); - pmTracer->tracePoint(point); + return kIOReturnSuccess; } -void IOPMrootDomain::traceDetail(OSObject *object, bool start) +IOReturn +IOPMrootDomain::updateReport(IOReportChannelList *channelList, + IOReportUpdateAction action, + void *result, + void *destination) { - IOPMServiceInterestNotifier *notifier; + uint32_t size2cpy; + void *data2cpy; + uint8_t buf[SIMPLEREPORT_BUFSIZE]; + IOBufferMemoryDescriptor *dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); + unsigned cnt; + uint64_t ch_id; - if (systemBooting) { - return; - } - - notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); - if (!notifier) { - return; - } + if (action != kIOReportCopyChannelData) { + goto exit; + } - if (start) { - pmTracer->traceDetail( notifier->uuid0 >> 32 ); - kdebugTrace(kPMLogSleepWakeMessage, pmTracer->getTracePhase(), notifier->msgType, notifier->uuid0, notifier->uuid1); - if (notifier->identifier) { - DLOG("trace point 0x%02x msg 0x%x to %s\n", pmTracer->getTracePhase(), notifier->msgType, - notifier->identifier->getCStringNoCopy()); - } - else { - DLOG("trace point 0x%02x msg 0x%x\n", pmTracer->getTracePhase(), notifier->msgType); - } - notifierThread = current_thread(); - notifierObject = notifier; - notifier->retain(); - } - else { - notifierThread = NULL; - notifierObject = NULL; - notifier->release(); - } -} - - -void IOPMrootDomain::traceAckDelay(OSObject *object, uint32_t response, uint32_t delay_ms) -{ - IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, object); - if (!notifier) { - DLOG("Unknown notifier\n"); - return; - } + for (cnt = 0; cnt < channelList->nchannels; cnt++) { + ch_id = channelList->channels[cnt].channel_id; + + if ((ch_id == kAssertDelayChID) || (ch_id == kSleepDelaysChID)) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::updateReportGated), + (OSObject *)this, (void *)ch_id, + (void *)result, (void *)dest); + continue; + } else if ((ch_id == kSleepCntChID) || + (ch_id == kDarkWkCntChID) || (ch_id == kUserWkCntChID)) { + SIMPLEREPORT_INIT(buf, sizeof(buf), getRegistryEntryID(), ch_id, kIOReportCategoryPower); + } else { + continue; + } - if (!systemBooting) { - kdebugTrace(kPMLogDrvResponseDelay, notifier->uuid0, notifier->uuid1, response, delay_ms); - if (notifier->identifier) { - DLOG("Response from %s took %d ms(response:%d)\n", - notifier->identifier->getCStringNoCopy(), delay_ms, response); - } - else { - DLOG("Response from kext UUID %llx-%llx took %d ms(response:%d)\n", - notifier->uuid0, notifier->uuid1, delay_ms, response); - } - } -} - -void IOPMrootDomain::traceDetail(uint32_t msgType, uint32_t msgIndex, uint32_t delay) -{ - if (!systemBooting) { - uint32_t detail = ((msgType & 0xffff) << 16) | (delay & 0xffff); - pmTracer->traceDetail( detail ); - kdebugTrace(kPMLogSleepWakeTracePoint, pmTracer->getTracePhase(), msgType, delay); - DLOG("trace point 0x%02x msgType 0x%x detail 0x%08x\n", pmTracer->getTracePhase(), msgType, delay); - } -} - - -void IOPMrootDomain::configureReportGated(uint64_t channel_id, uint64_t action, void *result) -{ - size_t reportSize; - void **report = NULL; - uint32_t bktCnt; - uint32_t bktSize; - uint32_t *clientCnt; - - ASSERT_GATED(); - - report = NULL; - if (channel_id == kAssertDelayChID) { - report = &assertOnWakeReport; - bktCnt = kAssertDelayBcktCnt; - bktSize = kAssertDelayBcktSize; - clientCnt = &assertOnWakeClientCnt; - } - else if (channel_id == kSleepDelaysChID) { - report = &sleepDelaysReport; - bktCnt = kSleepDelaysBcktCnt; - bktSize = kSleepDelaysBcktSize; - clientCnt = &sleepDelaysClientCnt; - } - - switch (action) - { - case kIOReportEnable: - - if (*report) { - (*clientCnt)++; - break; - } - - reportSize = HISTREPORT_BUFSIZE(bktCnt); - *report = IOMalloc(reportSize); - if (*report == NULL) { - break; - } - bzero(*report, reportSize); - HISTREPORT_INIT(bktCnt, bktSize, *report, reportSize, - getRegistryEntryID(), channel_id, kIOReportCategoryPower); - - if (channel_id == kAssertDelayChID) - assertOnWakeSecs = 0; - - break; - - case kIOReportDisable: - if (*clientCnt == 0) { - break; - } - if (*clientCnt == 1) - { - IOFree(*report, HISTREPORT_BUFSIZE(bktCnt)); - *report = NULL; - } - (*clientCnt)--; - - if (channel_id == kAssertDelayChID) - assertOnWakeSecs = -1; // Invalid value to prevent updates - - break; - - case kIOReportGetDimensions: - if (*report) { - HISTREPORT_UPDATERES(*report, kIOReportGetDimensions, result); - } - break; - } - - return; -} - -IOReturn IOPMrootDomain::configureReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) -{ - unsigned cnt; - uint64_t configAction = (uint64_t)action; - - for (cnt = 0; cnt < channelList->nchannels; cnt++) { - if ( (channelList->channels[cnt].channel_id == kSleepCntChID) || - (channelList->channels[cnt].channel_id == kDarkWkCntChID) || - (channelList->channels[cnt].channel_id == kUserWkCntChID) ) { - if (action != kIOReportGetDimensions) continue; - SIMPLEREPORT_UPDATERES(kIOReportGetDimensions, result); - } - else if ((channelList->channels[cnt].channel_id == kAssertDelayChID) || - (channelList->channels[cnt].channel_id == kSleepDelaysChID)) { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::configureReportGated), - (OSObject *)this, (void *)channelList->channels[cnt].channel_id, - (void *)configAction, (void *)result); - } - } - - return super::configureReport(channelList, action, result, destination); -} - -IOReturn IOPMrootDomain::updateReportGated(uint64_t ch_id, void *result, IOBufferMemoryDescriptor *dest) -{ - - uint32_t size2cpy; - void *data2cpy; - void **report; - - ASSERT_GATED(); - - report = NULL; - if (ch_id == kAssertDelayChID) { - report = &assertOnWakeReport; - } - else if (ch_id == kSleepDelaysChID) { - report = &sleepDelaysReport; - } - - if (*report == NULL) { - return kIOReturnNotOpen; - } - - HISTREPORT_UPDATEPREP(*report, data2cpy, size2cpy); - if (size2cpy > (dest->getCapacity() - dest->getLength()) ) { - return kIOReturnOverrun; - } - - HISTREPORT_UPDATERES(*report, kIOReportCopyChannelData, result); - dest->appendBytes(data2cpy, size2cpy); - - return kIOReturnSuccess; -} - -IOReturn IOPMrootDomain::updateReport(IOReportChannelList *channelList, - IOReportUpdateAction action, - void *result, - void *destination) -{ - uint32_t size2cpy; - void *data2cpy; - uint8_t buf[SIMPLEREPORT_BUFSIZE]; - IOBufferMemoryDescriptor *dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); - unsigned cnt; - uint64_t ch_id; - - if (action != kIOReportCopyChannelData) goto exit; - - for (cnt = 0; cnt < channelList->nchannels; cnt++) { - ch_id = channelList->channels[cnt].channel_id ; - - if ((ch_id == kAssertDelayChID) || (ch_id == kSleepDelaysChID)) { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::updateReportGated), - (OSObject *)this, (void *)ch_id, - (void *)result, (void *)dest); - continue; - - } - else if ((ch_id == kSleepCntChID) || - (ch_id == kDarkWkCntChID) || (ch_id == kUserWkCntChID)) { - SIMPLEREPORT_INIT(buf, sizeof(buf), getRegistryEntryID(), ch_id, kIOReportCategoryPower); - } - else continue; - - if (ch_id == kSleepCntChID) - SIMPLEREPORT_SETVALUE(buf, sleepCnt); - else if (ch_id == kDarkWkCntChID) - SIMPLEREPORT_SETVALUE(buf, darkWakeCnt); - else if (ch_id == kUserWkCntChID) - SIMPLEREPORT_SETVALUE(buf, displayWakeCnt); + if (ch_id == kSleepCntChID) { + SIMPLEREPORT_SETVALUE(buf, sleepCnt); + } else if (ch_id == kDarkWkCntChID) { + SIMPLEREPORT_SETVALUE(buf, darkWakeCnt); + } else if (ch_id == kUserWkCntChID) { + SIMPLEREPORT_SETVALUE(buf, displayWakeCnt); + } - SIMPLEREPORT_UPDATEPREP(buf, data2cpy, size2cpy); - SIMPLEREPORT_UPDATERES(kIOReportCopyChannelData, result); - dest->appendBytes(data2cpy, size2cpy); - } + SIMPLEREPORT_UPDATEPREP(buf, data2cpy, size2cpy); + SIMPLEREPORT_UPDATERES(kIOReportCopyChannelData, result); + dest->appendBytes(data2cpy, size2cpy); + } exit: - return super::updateReport(channelList, action, result, destination); + return super::updateReport(channelList, action, result, destination); } @@ -8149,195 +7993,202 @@ OSDefineMetaClassAndStructors(PMTraceWorker, OSObject) #define kPMBestGuessPCIDevicesCount 25 #define kPMMaxRTCBitfieldSize 32 -PMTraceWorker *PMTraceWorker::tracer(IOPMrootDomain *owner) +PMTraceWorker * PMTraceWorker::tracer(IOPMrootDomain * owner) { - PMTraceWorker *me; + PMTraceWorker *me; - me = OSTypeAlloc( PMTraceWorker ); - if (!me || !me->init()) - { - return NULL; - } + me = OSTypeAlloc( PMTraceWorker ); + if (!me || !me->init()) { + return NULL; + } - DLOG("PMTraceWorker %p\n", OBFUSCATE(me)); + DLOG("PMTraceWorker %p\n", OBFUSCATE(me)); - // Note that we cannot instantiate the PCI device -> bit mappings here, since - // the IODeviceTree has not yet been created by IOPlatformExpert. We create - // this dictionary lazily. - me->owner = owner; - me->pciDeviceBitMappings = NULL; - me->pmTraceWorkerLock = IOLockAlloc(); - me->tracePhase = kIOPMTracePointSystemUp; - me->traceData32 = 0; - me->loginWindowData = 0; - me->coreDisplayData = 0; - me->coreGraphicsData = 0; - return me; + // Note that we cannot instantiate the PCI device -> bit mappings here, since + // the IODeviceTree has not yet been created by IOPlatformExpert. We create + // this dictionary lazily. + me->owner = owner; + me->pciDeviceBitMappings = NULL; + me->pmTraceWorkerLock = IOLockAlloc(); + me->tracePhase = kIOPMTracePointSystemUp; + me->traceData32 = 0; + me->loginWindowData = 0; + me->coreDisplayData = 0; + me->coreGraphicsData = 0; + return me; } -void PMTraceWorker::RTC_TRACE(void) +void +PMTraceWorker::RTC_TRACE(void) { - if (tracePointHandler && tracePointTarget) - { - uint32_t wordA; + if (tracePointHandler && tracePointTarget) { + uint32_t wordA; - IOLockLock(pmTraceWorkerLock); - wordA = (loginWindowData << 24) | (coreDisplayData << 16) | - (coreGraphicsData << 8) | tracePhase; - IOLockUnlock(pmTraceWorkerLock); + IOLockLock(pmTraceWorkerLock); + wordA = (loginWindowData << 24) | (coreDisplayData << 16) | + (coreGraphicsData << 8) | tracePhase; + IOLockUnlock(pmTraceWorkerLock); - tracePointHandler( tracePointTarget, traceData32, wordA ); - _LOG("RTC_TRACE wrote 0x%08x 0x%08x\n", traceData32, wordA); - } + tracePointHandler( tracePointTarget, traceData32, wordA ); + _LOG("RTC_TRACE wrote 0x%08x 0x%08x\n", traceData32, wordA); + } } -int PMTraceWorker::recordTopLevelPCIDevice(IOService * pciDevice) +int +PMTraceWorker::recordTopLevelPCIDevice(IOService * pciDevice) { - const OSSymbol * deviceName; - int index = -1; + const OSSymbol * deviceName; + int index = -1; - IOLockLock(pmTraceWorkerLock); + IOLockLock(pmTraceWorkerLock); - if (!pciDeviceBitMappings) - { - pciDeviceBitMappings = OSArray::withCapacity(kPMBestGuessPCIDevicesCount); - if (!pciDeviceBitMappings) - goto exit; - } + if (!pciDeviceBitMappings) { + pciDeviceBitMappings = OSArray::withCapacity(kPMBestGuessPCIDevicesCount); + if (!pciDeviceBitMappings) { + goto exit; + } + } - // Check for bitmask overflow. - if (pciDeviceBitMappings->getCount() >= kPMMaxRTCBitfieldSize) - goto exit; + // Check for bitmask overflow. + if (pciDeviceBitMappings->getCount() >= kPMMaxRTCBitfieldSize) { + goto exit; + } - if ((deviceName = pciDevice->copyName()) && - (pciDeviceBitMappings->getNextIndexOfObject(deviceName, 0) == (unsigned int)-1) && - pciDeviceBitMappings->setObject(deviceName)) - { - index = pciDeviceBitMappings->getCount() - 1; - _LOG("PMTrace PCI array: set object %s => %d\n", - deviceName->getCStringNoCopy(), index); - } - if (deviceName) - deviceName->release(); - if (!addedToRegistry && (index >= 0)) - addedToRegistry = owner->setProperty("PCITopLevel", this); + if ((deviceName = pciDevice->copyName()) && + (pciDeviceBitMappings->getNextIndexOfObject(deviceName, 0) == (unsigned int)-1) && + pciDeviceBitMappings->setObject(deviceName)) { + index = pciDeviceBitMappings->getCount() - 1; + _LOG("PMTrace PCI array: set object %s => %d\n", + deviceName->getCStringNoCopy(), index); + } + if (deviceName) { + deviceName->release(); + } + if (!addedToRegistry && (index >= 0)) { + addedToRegistry = owner->setProperty("PCITopLevel", this); + } exit: - IOLockUnlock(pmTraceWorkerLock); - return index; + IOLockUnlock(pmTraceWorkerLock); + return index; } - -bool PMTraceWorker::serialize(OSSerialize *s) const + +bool +PMTraceWorker::serialize(OSSerialize *s) const { - bool ok = false; - if (pciDeviceBitMappings) - { - IOLockLock(pmTraceWorkerLock); - ok = pciDeviceBitMappings->serialize(s); - IOLockUnlock(pmTraceWorkerLock); - } - return ok; + bool ok = false; + if (pciDeviceBitMappings) { + IOLockLock(pmTraceWorkerLock); + ok = pciDeviceBitMappings->serialize(s); + IOLockUnlock(pmTraceWorkerLock); + } + return ok; } -void PMTraceWorker::tracePoint(uint8_t phase) +void +PMTraceWorker::tracePoint(uint8_t phase) { - // clear trace detail when phase begins - if (tracePhase != phase) - traceData32 = 0; + // clear trace detail when phase begins + if (tracePhase != phase) { + traceData32 = 0; + } - tracePhase = phase; + tracePhase = phase; - DLOG("trace point 0x%02x\n", tracePhase); - RTC_TRACE(); + DLOG("trace point 0x%02x\n", tracePhase); + RTC_TRACE(); } -void PMTraceWorker::traceDetail(uint32_t detail) +void +PMTraceWorker::traceDetail(uint32_t detail) { - if (detail == traceData32) { - return; - } - traceData32 = detail; - RTC_TRACE(); -} + if (detail == traceData32) { + return; + } + traceData32 = detail; + RTC_TRACE(); +} + +void +PMTraceWorker::traceComponentWakeProgress(uint32_t component, uint32_t data) +{ + switch (component) { + case kIOPMLoginWindowProgress: + loginWindowData = data & kIOPMLoginWindowProgressMask; + break; + case kIOPMCoreDisplayProgress: + coreDisplayData = data & kIOPMCoreDisplayProgressMask; + break; + case kIOPMCoreGraphicsProgress: + coreGraphicsData = data & kIOPMCoreGraphicsProgressMask; + break; + default: + return; + } -void PMTraceWorker::traceComponentWakeProgress(uint32_t component, uint32_t data) -{ - switch (component) { - case kIOPMLoginWindowProgress: - loginWindowData = data & kIOPMLoginWindowProgressMask; - break; - case kIOPMCoreDisplayProgress: - coreDisplayData = data & kIOPMCoreDisplayProgressMask; - break; - case kIOPMCoreGraphicsProgress: - coreGraphicsData = data & kIOPMCoreGraphicsProgressMask; - break; - default: - return; - } - - DLOG("component trace point 0x%02x data 0x%08x\n", component, data); - RTC_TRACE(); + DLOG("component trace point 0x%02x data 0x%08x\n", component, data); + RTC_TRACE(); } -void PMTraceWorker::tracePCIPowerChange( - change_t type, IOService *service, uint32_t changeFlags, uint32_t bitNum) +void +PMTraceWorker::tracePCIPowerChange( + change_t type, IOService *service, uint32_t changeFlags, uint32_t bitNum) { - uint32_t bitMask; - uint32_t expectedFlag; + uint32_t bitMask; + uint32_t expectedFlag; - // Ignore PCI changes outside of system sleep/wake. - if ((kIOPMTracePointSleepPowerPlaneDrivers != tracePhase) && - (kIOPMTracePointWakePowerPlaneDrivers != tracePhase)) - return; + // Ignore PCI changes outside of system sleep/wake. + if ((kIOPMTracePointSleepPowerPlaneDrivers != tracePhase) && + (kIOPMTracePointWakePowerPlaneDrivers != tracePhase)) { + return; + } - // Only record the WillChange transition when going to sleep, - // and the DidChange on the way up. - changeFlags &= (kIOPMDomainWillChange | kIOPMDomainDidChange); - expectedFlag = (kIOPMTracePointSleepPowerPlaneDrivers == tracePhase) ? - kIOPMDomainWillChange : kIOPMDomainDidChange; - if (changeFlags != expectedFlag) - return; + // Only record the WillChange transition when going to sleep, + // and the DidChange on the way up. + changeFlags &= (kIOPMDomainWillChange | kIOPMDomainDidChange); + expectedFlag = (kIOPMTracePointSleepPowerPlaneDrivers == tracePhase) ? + kIOPMDomainWillChange : kIOPMDomainDidChange; + if (changeFlags != expectedFlag) { + return; + } - // Mark this device off in our bitfield - if (bitNum < kPMMaxRTCBitfieldSize) - { - bitMask = (1 << bitNum); + // Mark this device off in our bitfield + if (bitNum < kPMMaxRTCBitfieldSize) { + bitMask = (1 << bitNum); - if (kPowerChangeStart == type) - { - traceData32 |= bitMask; - _LOG("PMTrace: Device %s started - bit %2d mask 0x%08x => 0x%08x\n", - service->getName(), bitNum, bitMask, traceData32); - owner->kdebugTrace(kPMLogPCIDevChangeStart, service->getRegistryEntryID(), traceData32, 0); - } - else - { - traceData32 &= ~bitMask; - _LOG("PMTrace: Device %s finished - bit %2d mask 0x%08x => 0x%08x\n", - service->getName(), bitNum, bitMask, traceData32); - owner->kdebugTrace(kPMLogPCIDevChangeDone, service->getRegistryEntryID(), traceData32, 0); - } + if (kPowerChangeStart == type) { + traceData32 |= bitMask; + _LOG("PMTrace: Device %s started - bit %2d mask 0x%08x => 0x%08x\n", + service->getName(), bitNum, bitMask, traceData32); + owner->kdebugTrace(kPMLogPCIDevChangeStart, service->getRegistryEntryID(), traceData32, 0); + } else { + traceData32 &= ~bitMask; + _LOG("PMTrace: Device %s finished - bit %2d mask 0x%08x => 0x%08x\n", + service->getName(), bitNum, bitMask, traceData32); + owner->kdebugTrace(kPMLogPCIDevChangeDone, service->getRegistryEntryID(), traceData32, 0); + } - DLOG("trace point 0x%02x detail 0x%08x\n", tracePhase, traceData32); - RTC_TRACE(); - } + DLOG("trace point 0x%02x detail 0x%08x\n", tracePhase, traceData32); + RTC_TRACE(); + } } -uint64_t PMTraceWorker::getPMStatusCode( ) +uint64_t +PMTraceWorker::getPMStatusCode() { - return (((uint64_t)traceData32 << 32) | ((uint64_t)tracePhase)); - + return ((uint64_t)traceData32 << 32) | ((uint64_t)tracePhase); } -uint8_t PMTraceWorker::getTracePhase() +uint8_t +PMTraceWorker::getTracePhase() { - return tracePhase; + return tracePhase; } -uint32_t PMTraceWorker::getTraceData() +uint32_t +PMTraceWorker::getTraceData() { - return traceData32; + return traceData32; } // MARK: - @@ -8348,190 +8199,183 @@ uint32_t PMTraceWorker::getTraceData() // //****************************************************************************** -PMHaltWorker * PMHaltWorker::worker( void ) -{ - PMHaltWorker * me; - IOThread thread; - - do { - me = OSTypeAlloc( PMHaltWorker ); - if (!me || !me->init()) - break; - - me->lock = IOLockAlloc(); - if (!me->lock) - break; - - DLOG("PMHaltWorker %p\n", OBFUSCATE(me)); - me->retain(); // thread holds extra retain - if (KERN_SUCCESS != kernel_thread_start(&PMHaltWorker::main, (void *) me, &thread)) - { - me->release(); - break; - } - thread_deallocate(thread); - return me; - - } while (false); - - if (me) me->release(); - return 0; -} - -void PMHaltWorker::free( void ) -{ - DLOG("PMHaltWorker free %p\n", OBFUSCATE(this)); - if (lock) - { - IOLockFree(lock); - lock = 0; - } - return OSObject::free(); -} - -void PMHaltWorker::main( void * arg, wait_result_t waitResult ) -{ - PMHaltWorker * me = (PMHaltWorker *) arg; - - IOLockLock( gPMHaltLock ); - gPMHaltBusyCount++; - me->depth = gPMHaltDepth; - IOLockUnlock( gPMHaltLock ); - - while (me->depth >= 0) - { - PMHaltWorker::work( me ); - - IOLockLock( gPMHaltLock ); - if (++gPMHaltIdleCount >= gPMHaltBusyCount) - { - // This is the last thread to finish work on this level, - // inform everyone to start working on next lower level. - gPMHaltDepth--; - me->depth = gPMHaltDepth; - gPMHaltIdleCount = 0; - thread_wakeup((event_t) &gPMHaltIdleCount); - } - else - { - // One or more threads are still working on this level, - // this thread must wait. - me->depth = gPMHaltDepth - 1; - do { - IOLockSleep(gPMHaltLock, &gPMHaltIdleCount, THREAD_UNINT); - } while (me->depth != gPMHaltDepth); - } - IOLockUnlock( gPMHaltLock ); - } - - // No more work to do, terminate thread - DLOG("All done for worker: %p (visits = %u)\n", OBFUSCATE(me), me->visits); - thread_wakeup( &gPMHaltDepth ); - me->release(); -} - -void PMHaltWorker::work( PMHaltWorker * me ) -{ - IOService * service; - OSSet * inner; - AbsoluteTime startTime, elapsedTime; - UInt32 deltaTime; - bool timeout; - - while (true) - { - service = 0; - timeout = false; - - // Claim an unit of work from the shared pool - IOLockLock( gPMHaltLock ); - inner = (OSSet *)gPMHaltArray->getObject(me->depth); - if (inner) - { - service = OSDynamicCast(IOService, inner->getAnyObject()); - if (service) - { - service->retain(); - inner->removeObject(service); - } - } - IOLockUnlock( gPMHaltLock ); - if (!service) - break; // no more work at this depth - - clock_get_uptime(&startTime); - - if (!service->isInactive() && - service->setProperty(gPMHaltClientAcknowledgeKey, me)) - { - IOLockLock(me->lock); - me->startTime = startTime; - me->service = service; - me->timeout = false; - IOLockUnlock(me->lock); - - service->systemWillShutdown( gPMHaltMessageType ); - - // Wait for driver acknowledgement - IOLockLock(me->lock); - while (service->getProperty(gPMHaltClientAcknowledgeKey)) - { - IOLockSleep(me->lock, me, THREAD_UNINT); - } - me->service = 0; - timeout = me->timeout; - IOLockUnlock(me->lock); - } - - deltaTime = computeDeltaTimeMS(&startTime, &elapsedTime); - if ((deltaTime > kPMHaltTimeoutMS) || timeout) - { - LOG("%s driver %s (0x%llx) took %u ms\n", - (gPMHaltMessageType == kIOMessageSystemWillPowerOff) ? - "PowerOff" : "Restart", - service->getName(), service->getRegistryEntryID(), - (uint32_t) deltaTime ); - halt_log_enter("PowerOff/Restart handler completed", - OSMemberFunctionCast(const void *, service, &IOService::systemWillShutdown), - elapsedTime); - } - - service->release(); - me->visits++; - } -} - -void PMHaltWorker::checkTimeout( PMHaltWorker * me, AbsoluteTime * now ) -{ - UInt64 nano; - AbsoluteTime startTime; - AbsoluteTime endTime; - - endTime = *now; - - IOLockLock(me->lock); - if (me->service && !me->timeout) - { - startTime = me->startTime; - nano = 0; - if (CMP_ABSOLUTETIME(&endTime, &startTime) > 0) - { - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nano); - } - if (nano > 3000000000ULL) - { - me->timeout = true; - - halt_log_enter("PowerOff/Restart still waiting on handler", - OSMemberFunctionCast(const void *, me->service, &IOService::systemWillShutdown), - endTime); - MSG("%s still waiting on %s\n", - (gPMHaltMessageType == kIOMessageSystemWillPowerOff) ? "PowerOff" : "Restart", - me->service->getName()); - } - } - IOLockUnlock(me->lock); +PMHaltWorker * +PMHaltWorker::worker( void ) +{ + PMHaltWorker * me; + IOThread thread; + + do { + me = OSTypeAlloc( PMHaltWorker ); + if (!me || !me->init()) { + break; + } + + me->lock = IOLockAlloc(); + if (!me->lock) { + break; + } + + DLOG("PMHaltWorker %p\n", OBFUSCATE(me)); + me->retain(); // thread holds extra retain + if (KERN_SUCCESS != kernel_thread_start(&PMHaltWorker::main, (void *) me, &thread)) { + me->release(); + break; + } + thread_deallocate(thread); + return me; + } while (false); + + if (me) { + me->release(); + } + return 0; +} + +void +PMHaltWorker::free( void ) +{ + DLOG("PMHaltWorker free %p\n", OBFUSCATE(this)); + if (lock) { + IOLockFree(lock); + lock = 0; + } + return OSObject::free(); +} + +void +PMHaltWorker::main( void * arg, wait_result_t waitResult ) +{ + PMHaltWorker * me = (PMHaltWorker *) arg; + + IOLockLock( gPMHaltLock ); + gPMHaltBusyCount++; + me->depth = gPMHaltDepth; + IOLockUnlock( gPMHaltLock ); + + while (me->depth >= 0) { + PMHaltWorker::work( me ); + + IOLockLock( gPMHaltLock ); + if (++gPMHaltIdleCount >= gPMHaltBusyCount) { + // This is the last thread to finish work on this level, + // inform everyone to start working on next lower level. + gPMHaltDepth--; + me->depth = gPMHaltDepth; + gPMHaltIdleCount = 0; + thread_wakeup((event_t) &gPMHaltIdleCount); + } else { + // One or more threads are still working on this level, + // this thread must wait. + me->depth = gPMHaltDepth - 1; + do { + IOLockSleep(gPMHaltLock, &gPMHaltIdleCount, THREAD_UNINT); + } while (me->depth != gPMHaltDepth); + } + IOLockUnlock( gPMHaltLock ); + } + + // No more work to do, terminate thread + DLOG("All done for worker: %p (visits = %u)\n", OBFUSCATE(me), me->visits); + thread_wakeup( &gPMHaltDepth ); + me->release(); +} + +void +PMHaltWorker::work( PMHaltWorker * me ) +{ + IOService * service; + OSSet * inner; + AbsoluteTime startTime, elapsedTime; + UInt32 deltaTime; + bool timeout; + + while (true) { + service = 0; + timeout = false; + + // Claim an unit of work from the shared pool + IOLockLock( gPMHaltLock ); + inner = (OSSet *)gPMHaltArray->getObject(me->depth); + if (inner) { + service = OSDynamicCast(IOService, inner->getAnyObject()); + if (service) { + service->retain(); + inner->removeObject(service); + } + } + IOLockUnlock( gPMHaltLock ); + if (!service) { + break; // no more work at this depth + } + clock_get_uptime(&startTime); + + if (!service->isInactive() && + service->setProperty(gPMHaltClientAcknowledgeKey, me)) { + IOLockLock(me->lock); + me->startTime = startTime; + me->service = service; + me->timeout = false; + IOLockUnlock(me->lock); + + service->systemWillShutdown( gPMHaltMessageType ); + + // Wait for driver acknowledgement + IOLockLock(me->lock); + while (service->getProperty(gPMHaltClientAcknowledgeKey)) { + IOLockSleep(me->lock, me, THREAD_UNINT); + } + me->service = 0; + timeout = me->timeout; + IOLockUnlock(me->lock); + } + + deltaTime = computeDeltaTimeMS(&startTime, &elapsedTime); + if ((deltaTime > kPMHaltTimeoutMS) || timeout) { + LOG("%s driver %s (0x%llx) took %u ms\n", + (gPMHaltMessageType == kIOMessageSystemWillPowerOff) ? + "PowerOff" : "Restart", + service->getName(), service->getRegistryEntryID(), + (uint32_t) deltaTime ); + halt_log_enter("PowerOff/Restart handler completed", + OSMemberFunctionCast(const void *, service, &IOService::systemWillShutdown), + elapsedTime); + } + + service->release(); + me->visits++; + } +} + +void +PMHaltWorker::checkTimeout( PMHaltWorker * me, AbsoluteTime * now ) +{ + UInt64 nano; + AbsoluteTime startTime; + AbsoluteTime endTime; + + endTime = *now; + + IOLockLock(me->lock); + if (me->service && !me->timeout) { + startTime = me->startTime; + nano = 0; + if (CMP_ABSOLUTETIME(&endTime, &startTime) > 0) { + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nano); + } + if (nano > 3000000000ULL) { + me->timeout = true; + + halt_log_enter("PowerOff/Restart still waiting on handler", + OSMemberFunctionCast(const void *, me->service, &IOService::systemWillShutdown), + endTime); + MSG("%s still waiting on %s\n", + (gPMHaltMessageType == kIOMessageSystemWillPowerOff) ? "PowerOff" : "Restart", + me->service->getName()); + } + } + IOLockUnlock(me->lock); } //****************************************************************************** @@ -8540,30 +8384,29 @@ void PMHaltWorker::checkTimeout( PMHaltWorker * me, AbsoluteTime * now ) // Acknowledgement from drivers that they have prepared for shutdown/restart. //****************************************************************************** -void IOPMrootDomain::acknowledgeSystemWillShutdown( IOService * from ) +void +IOPMrootDomain::acknowledgeSystemWillShutdown( IOService * from ) { - PMHaltWorker * worker; - OSObject * prop; + PMHaltWorker * worker; + OSObject * prop; - if (!from) - return; + if (!from) { + return; + } - //DLOG("%s acknowledged\n", from->getName()); - prop = from->copyProperty( gPMHaltClientAcknowledgeKey ); - if (prop) - { - worker = (PMHaltWorker *) prop; - IOLockLock(worker->lock); - from->removeProperty( gPMHaltClientAcknowledgeKey ); - thread_wakeup((event_t) worker); - IOLockUnlock(worker->lock); - worker->release(); - } - else - { - DLOG("%s acknowledged without worker property\n", - from->getName()); - } + //DLOG("%s acknowledged\n", from->getName()); + prop = from->copyProperty( gPMHaltClientAcknowledgeKey ); + if (prop) { + worker = (PMHaltWorker *) prop; + IOLockLock(worker->lock); + from->removeProperty( gPMHaltClientAcknowledgeKey ); + thread_wakeup((event_t) worker); + IOLockUnlock(worker->lock); + worker->release(); + } else { + DLOG("%s acknowledged without worker property\n", + from->getName()); + } } @@ -8577,198 +8420,203 @@ static void notifySystemShutdown( IOService * root, uint32_t messageType ) { #define PLACEHOLDER ((OSSet *)gPMHaltArray) - IORegistryIterator * iter; - IORegistryEntry * entry; - IOService * node; - OSSet * inner; - PMHaltWorker * workers[kPMHaltMaxWorkers]; - AbsoluteTime deadline; - unsigned int totalNodes = 0; - unsigned int depth; - unsigned int rootDepth; - unsigned int numWorkers; - unsigned int count; - int waitResult; - void * baseFunc; - bool ok; - - DLOG("%s msgType = 0x%x\n", __FUNCTION__, messageType); - - baseFunc = OSMemberFunctionCast(void *, root, &IOService::systemWillShutdown); - - // Iterate the entire PM tree starting from root - - rootDepth = root->getDepth( gIOPowerPlane ); - if (!rootDepth) goto done; - - // debug - for repeated test runs - while (PMHaltWorker::metaClass->getInstanceCount()) - IOSleep(1); - - if (!gPMHaltArray) - { - gPMHaltArray = OSArray::withCapacity(40); - if (!gPMHaltArray) goto done; - } - else // debug - gPMHaltArray->flushCollection(); - - if (!gPMHaltLock) - { - gPMHaltLock = IOLockAlloc(); - if (!gPMHaltLock) goto done; - } - - if (!gPMHaltClientAcknowledgeKey) - { - gPMHaltClientAcknowledgeKey = - OSSymbol::withCStringNoCopy("PMShutdown"); - if (!gPMHaltClientAcknowledgeKey) goto done; - } - - gPMHaltMessageType = messageType; - - // Depth-first walk of PM plane - - iter = IORegistryIterator::iterateOver( - root, gIOPowerPlane, kIORegistryIterateRecursively); - - if (iter) - { - while ((entry = iter->getNextObject())) - { - node = OSDynamicCast(IOService, entry); - if (!node) - continue; - - if (baseFunc == - OSMemberFunctionCast(void *, node, &IOService::systemWillShutdown)) - continue; - - depth = node->getDepth( gIOPowerPlane ); - if (depth <= rootDepth) - continue; - - ok = false; - - // adjust to zero based depth - depth -= (rootDepth + 1); - - // gPMHaltArray is an array of containers, each container - // refers to nodes with the same depth. - - count = gPMHaltArray->getCount(); - while (depth >= count) - { - // expand array and insert placeholders - gPMHaltArray->setObject(PLACEHOLDER); - count++; - } - count = gPMHaltArray->getCount(); - if (depth < count) - { - inner = (OSSet *)gPMHaltArray->getObject(depth); - if (inner == PLACEHOLDER) - { - inner = OSSet::withCapacity(40); - if (inner) - { - gPMHaltArray->replaceObject(depth, inner); - inner->release(); - } - } - - // PM nodes that appear more than once in the tree will have - // the same depth, OSSet will refuse to add the node twice. - if (inner) - ok = inner->setObject(node); - } - if (!ok) - DLOG("Skipped PM node %s\n", node->getName()); - } - iter->release(); - } - - // debug only - for (int i = 0; (inner = (OSSet *)gPMHaltArray->getObject(i)); i++) - { - count = 0; - if (inner != PLACEHOLDER) - count = inner->getCount(); - DLOG("Nodes at depth %u = %u\n", i, count); - } - - // strip placeholders (not all depths are populated) - numWorkers = 0; - for (int i = 0; (inner = (OSSet *)gPMHaltArray->getObject(i)); ) - { - if (inner == PLACEHOLDER) - { - gPMHaltArray->removeObject(i); - continue; - } - count = inner->getCount(); - if (count > numWorkers) - numWorkers = count; - totalNodes += count; - i++; - } - - if (gPMHaltArray->getCount() == 0 || !numWorkers) - goto done; - - gPMHaltBusyCount = 0; - gPMHaltIdleCount = 0; - gPMHaltDepth = gPMHaltArray->getCount() - 1; - - // Create multiple workers (and threads) - - if (numWorkers > kPMHaltMaxWorkers) - numWorkers = kPMHaltMaxWorkers; - - DLOG("PM nodes %u, maxDepth %u, workers %u\n", - totalNodes, gPMHaltArray->getCount(), numWorkers); - - for (unsigned int i = 0; i < numWorkers; i++) - workers[i] = PMHaltWorker::worker(); - - // Wait for workers to exhaust all available work - - IOLockLock(gPMHaltLock); - while (gPMHaltDepth >= 0) - { - clock_interval_to_deadline(1000, kMillisecondScale, &deadline); - - waitResult = IOLockSleepDeadline( - gPMHaltLock, &gPMHaltDepth, deadline, THREAD_UNINT); - if (THREAD_TIMED_OUT == waitResult) - { - AbsoluteTime now; - clock_get_uptime(&now); - - IOLockUnlock(gPMHaltLock); - for (unsigned int i = 0 ; i < numWorkers; i++) - { - if (workers[i]) - PMHaltWorker::checkTimeout(workers[i], &now); - } - IOLockLock(gPMHaltLock); - } - } - IOLockUnlock(gPMHaltLock); - - // Release all workers - - for (unsigned int i = 0; i < numWorkers; i++) - { - if (workers[i]) - workers[i]->release(); - // worker also retained by it's own thread - } + IORegistryIterator * iter; + IORegistryEntry * entry; + IOService * node; + OSSet * inner; + PMHaltWorker * workers[kPMHaltMaxWorkers]; + AbsoluteTime deadline; + unsigned int totalNodes = 0; + unsigned int depth; + unsigned int rootDepth; + unsigned int numWorkers; + unsigned int count; + int waitResult; + void * baseFunc; + bool ok; + + DLOG("%s msgType = 0x%x\n", __FUNCTION__, messageType); + + baseFunc = OSMemberFunctionCast(void *, root, &IOService::systemWillShutdown); + + // Iterate the entire PM tree starting from root + + rootDepth = root->getDepth( gIOPowerPlane ); + if (!rootDepth) { + goto done; + } + + // debug - for repeated test runs + while (PMHaltWorker::metaClass->getInstanceCount()) { + IOSleep(1); + } + + if (!gPMHaltArray) { + gPMHaltArray = OSArray::withCapacity(40); + if (!gPMHaltArray) { + goto done; + } + } else { // debug + gPMHaltArray->flushCollection(); + } + + if (!gPMHaltLock) { + gPMHaltLock = IOLockAlloc(); + if (!gPMHaltLock) { + goto done; + } + } + + if (!gPMHaltClientAcknowledgeKey) { + gPMHaltClientAcknowledgeKey = + OSSymbol::withCStringNoCopy("PMShutdown"); + if (!gPMHaltClientAcknowledgeKey) { + goto done; + } + } + + gPMHaltMessageType = messageType; + + // Depth-first walk of PM plane + + iter = IORegistryIterator::iterateOver( + root, gIOPowerPlane, kIORegistryIterateRecursively); + + if (iter) { + while ((entry = iter->getNextObject())) { + node = OSDynamicCast(IOService, entry); + if (!node) { + continue; + } + + if (baseFunc == + OSMemberFunctionCast(void *, node, &IOService::systemWillShutdown)) { + continue; + } + + depth = node->getDepth( gIOPowerPlane ); + if (depth <= rootDepth) { + continue; + } + + ok = false; + + // adjust to zero based depth + depth -= (rootDepth + 1); + + // gPMHaltArray is an array of containers, each container + // refers to nodes with the same depth. + + count = gPMHaltArray->getCount(); + while (depth >= count) { + // expand array and insert placeholders + gPMHaltArray->setObject(PLACEHOLDER); + count++; + } + count = gPMHaltArray->getCount(); + if (depth < count) { + inner = (OSSet *)gPMHaltArray->getObject(depth); + if (inner == PLACEHOLDER) { + inner = OSSet::withCapacity(40); + if (inner) { + gPMHaltArray->replaceObject(depth, inner); + inner->release(); + } + } + + // PM nodes that appear more than once in the tree will have + // the same depth, OSSet will refuse to add the node twice. + if (inner) { + ok = inner->setObject(node); + } + } + if (!ok) { + DLOG("Skipped PM node %s\n", node->getName()); + } + } + iter->release(); + } + + // debug only + for (int i = 0; (inner = (OSSet *)gPMHaltArray->getObject(i)); i++) { + count = 0; + if (inner != PLACEHOLDER) { + count = inner->getCount(); + } + DLOG("Nodes at depth %u = %u\n", i, count); + } + + // strip placeholders (not all depths are populated) + numWorkers = 0; + for (int i = 0; (inner = (OSSet *)gPMHaltArray->getObject(i));) { + if (inner == PLACEHOLDER) { + gPMHaltArray->removeObject(i); + continue; + } + count = inner->getCount(); + if (count > numWorkers) { + numWorkers = count; + } + totalNodes += count; + i++; + } + + if (gPMHaltArray->getCount() == 0 || !numWorkers) { + goto done; + } + + gPMHaltBusyCount = 0; + gPMHaltIdleCount = 0; + gPMHaltDepth = gPMHaltArray->getCount() - 1; + + // Create multiple workers (and threads) + + if (numWorkers > kPMHaltMaxWorkers) { + numWorkers = kPMHaltMaxWorkers; + } + + DLOG("PM nodes %u, maxDepth %u, workers %u\n", + totalNodes, gPMHaltArray->getCount(), numWorkers); + + for (unsigned int i = 0; i < numWorkers; i++) { + workers[i] = PMHaltWorker::worker(); + } + + // Wait for workers to exhaust all available work + + IOLockLock(gPMHaltLock); + while (gPMHaltDepth >= 0) { + clock_interval_to_deadline(1000, kMillisecondScale, &deadline); + + waitResult = IOLockSleepDeadline( + gPMHaltLock, &gPMHaltDepth, deadline, THREAD_UNINT); + if (THREAD_TIMED_OUT == waitResult) { + AbsoluteTime now; + clock_get_uptime(&now); + + IOLockUnlock(gPMHaltLock); + for (unsigned int i = 0; i < numWorkers; i++) { + if (workers[i]) { + PMHaltWorker::checkTimeout(workers[i], &now); + } + } + IOLockLock(gPMHaltLock); + } + } + IOLockUnlock(gPMHaltLock); + + // Release all workers + + for (unsigned int i = 0; i < numWorkers; i++) { + if (workers[i]) { + workers[i]->release(); + } + // worker also retained by it's own thread + } done: - DLOG("%s done\n", __FUNCTION__); - return; + DLOG("%s done\n", __FUNCTION__); + return; } // MARK: - @@ -8776,175 +8624,186 @@ done: /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOPMDriverAssertionID IOPMrootDomain::createPMAssertion( - IOPMDriverAssertionType whichAssertionBits, - IOPMDriverAssertionLevel assertionLevel, - IOService *ownerService, - const char *ownerDescription) +IOPMDriverAssertionID +IOPMrootDomain::createPMAssertion( + IOPMDriverAssertionType whichAssertionBits, + IOPMDriverAssertionLevel assertionLevel, + IOService *ownerService, + const char *ownerDescription) { - IOReturn ret; - IOPMDriverAssertionID newAssertion; + IOReturn ret; + IOPMDriverAssertionID newAssertion; - if (!pmAssertions) - return 0; + if (!pmAssertions) { + return 0; + } - ret = pmAssertions->createAssertion(whichAssertionBits, assertionLevel, ownerService, ownerDescription, &newAssertion); + ret = pmAssertions->createAssertion(whichAssertionBits, assertionLevel, ownerService, ownerDescription, &newAssertion); - if (kIOReturnSuccess == ret) - return newAssertion; - else - return 0; + if (kIOReturnSuccess == ret) { + return newAssertion; + } else { + return 0; + } } -IOReturn IOPMrootDomain::releasePMAssertion(IOPMDriverAssertionID releaseAssertion) +IOReturn +IOPMrootDomain::releasePMAssertion(IOPMDriverAssertionID releaseAssertion) { - if (!pmAssertions) - return kIOReturnInternalError; + if (!pmAssertions) { + return kIOReturnInternalError; + } - return pmAssertions->releaseAssertion(releaseAssertion); + return pmAssertions->releaseAssertion(releaseAssertion); } -IOReturn IOPMrootDomain::setPMAssertionLevel( - IOPMDriverAssertionID assertionID, - IOPMDriverAssertionLevel assertionLevel) +IOReturn +IOPMrootDomain::setPMAssertionLevel( + IOPMDriverAssertionID assertionID, + IOPMDriverAssertionLevel assertionLevel) { - return pmAssertions->setAssertionLevel(assertionID, assertionLevel); + return pmAssertions->setAssertionLevel(assertionID, assertionLevel); } -IOPMDriverAssertionLevel IOPMrootDomain::getPMAssertionLevel(IOPMDriverAssertionType whichAssertion) +IOPMDriverAssertionLevel +IOPMrootDomain::getPMAssertionLevel(IOPMDriverAssertionType whichAssertion) { - IOPMDriverAssertionType sysLevels; + IOPMDriverAssertionType sysLevels; - if (!pmAssertions || whichAssertion == 0) - return kIOPMDriverAssertionLevelOff; + if (!pmAssertions || whichAssertion == 0) { + return kIOPMDriverAssertionLevelOff; + } - sysLevels = pmAssertions->getActivatedAssertions(); + sysLevels = pmAssertions->getActivatedAssertions(); - // Check that every bit set in argument 'whichAssertion' is asserted - // in the aggregate bits. - if ((sysLevels & whichAssertion) == whichAssertion) - return kIOPMDriverAssertionLevelOn; - else - return kIOPMDriverAssertionLevelOff; + // Check that every bit set in argument 'whichAssertion' is asserted + // in the aggregate bits. + if ((sysLevels & whichAssertion) == whichAssertion) { + return kIOPMDriverAssertionLevelOn; + } else { + return kIOPMDriverAssertionLevelOff; + } } -IOReturn IOPMrootDomain::setPMAssertionUserLevels(IOPMDriverAssertionType inLevels) +IOReturn +IOPMrootDomain::setPMAssertionUserLevels(IOPMDriverAssertionType inLevels) { - if (!pmAssertions) - return kIOReturnNotFound; + if (!pmAssertions) { + return kIOReturnNotFound; + } - return pmAssertions->setUserAssertionLevels(inLevels); + return pmAssertions->setUserAssertionLevels(inLevels); } -bool IOPMrootDomain::serializeProperties( OSSerialize * s ) const +bool +IOPMrootDomain::serializeProperties( OSSerialize * s ) const { - if (pmAssertions) - { - pmAssertions->publishProperties(); - } - return( IOService::serializeProperties(s) ); + if (pmAssertions) { + pmAssertions->publishProperties(); + } + return IOService::serializeProperties(s); } -OSObject * IOPMrootDomain::copyProperty( const char * aKey) const +OSObject * +IOPMrootDomain::copyProperty( const char * aKey) const { - OSObject *obj = NULL; - obj = IOService::copyProperty(aKey); + OSObject *obj = NULL; + obj = IOService::copyProperty(aKey); - if (obj) return obj; + if (obj) { + return obj; + } + + if (!strncmp(aKey, kIOPMSleepWakeWdogRebootKey, + sizeof(kIOPMSleepWakeWdogRebootKey))) { + if (swd_flags & SWD_BOOT_BY_SW_WDOG) { + return kOSBooleanTrue; + } else { + return kOSBooleanFalse; + } + } + + if (!strncmp(aKey, kIOPMSleepWakeWdogLogsValidKey, + sizeof(kIOPMSleepWakeWdogLogsValidKey))) { + if (swd_flags & SWD_VALID_LOGS) { + return kOSBooleanTrue; + } else { + return kOSBooleanFalse; + } + } - if (!strncmp(aKey, kIOPMSleepWakeWdogRebootKey, - sizeof(kIOPMSleepWakeWdogRebootKey))) { - if (swd_flags & SWD_BOOT_BY_SW_WDOG) - return kOSBooleanTrue; - else - return kOSBooleanFalse; + /* + * XXX: We should get rid of "DesktopMode" property when 'kAppleClamshellCausesSleepKey' + * is set properly in darwake from sleep. For that, kIOPMEnableClamshell msg has to be + * issued by DisplayWrangler on darkwake. + */ + if (!strcmp(aKey, "DesktopMode")) { + if (desktopMode) { + return kOSBooleanTrue; + } else { + return kOSBooleanFalse; + } + } + if (!strcmp(aKey, "DisplayIdleForDemandSleep")) { + if (displayIdleForDemandSleep) { + return kOSBooleanTrue; + } else { + return kOSBooleanFalse; + } + } - } + if (!strcmp(aKey, kIOPMDriverWakeEventsKey)) { + OSArray * array = 0; + WAKEEVENT_LOCK(); + if (_systemWakeEventsArray && _systemWakeEventsArray->getCount()) { + OSCollection *collection = _systemWakeEventsArray->copyCollection(); + if (collection && !(array = OSDynamicCast(OSArray, collection))) { + collection->release(); + } + } + WAKEEVENT_UNLOCK(); + return array; + } - if (!strncmp(aKey, kIOPMSleepWakeWdogLogsValidKey, - sizeof(kIOPMSleepWakeWdogLogsValidKey))) { - if (swd_flags & SWD_VALID_LOGS) - return kOSBooleanTrue; - else - return kOSBooleanFalse; + if (!strcmp(aKey, kIOPMSleepStatisticsAppsKey)) { + OSArray * array = 0; + IOLockLock(pmStatsLock); + if (pmStatsAppResponses && pmStatsAppResponses->getCount()) { + OSCollection *collection = pmStatsAppResponses->copyCollection(); + if (collection && !(array = OSDynamicCast(OSArray, collection))) { + collection->release(); + } + pmStatsAppResponses->flushCollection(); + } + IOLockUnlock(pmStatsLock); + return array; + } - } + if (!strcmp(aKey, kIOPMIdleSleepPreventersKey)) { + OSArray *idleSleepList = NULL; + gRootDomain->copySleepPreventersList(&idleSleepList, NULL); + return idleSleepList; + } - /* - * XXX: We should get rid of "DesktopMode" property when 'kAppleClamshellCausesSleepKey' - * is set properly in darwake from sleep. For that, kIOPMEnableClamshell msg has to be - * issued by DisplayWrangler on darkwake. - */ - if (!strcmp(aKey, "DesktopMode")) { - if (desktopMode) - return kOSBooleanTrue; - else - return kOSBooleanFalse; - } - if (!strcmp(aKey, "DisplayIdleForDemandSleep")) { - if (displayIdleForDemandSleep) { - return kOSBooleanTrue; - } - else { - return kOSBooleanFalse; - } - } + if (!strcmp(aKey, kIOPMSystemSleepPreventersKey)) { + OSArray *systemSleepList = NULL; + gRootDomain->copySleepPreventersList(NULL, &systemSleepList); + return systemSleepList; + } - if (!strcmp(aKey, kIOPMDriverWakeEventsKey)) - { - OSArray * array = 0; - WAKEEVENT_LOCK(); - if (_systemWakeEventsArray && _systemWakeEventsArray->getCount()) { - OSCollection *collection = _systemWakeEventsArray->copyCollection(); - if (collection && !(array = OSDynamicCast(OSArray, collection))) { - collection->release(); - } - } - WAKEEVENT_UNLOCK(); - return array; - } - - if (!strcmp(aKey, kIOPMSleepStatisticsAppsKey)) - { - OSArray * array = 0; - IOLockLock(pmStatsLock); - if (pmStatsAppResponses && pmStatsAppResponses->getCount()) { - OSCollection *collection = pmStatsAppResponses->copyCollection(); - if (collection && !(array = OSDynamicCast(OSArray, collection))) { - collection->release(); - } - pmStatsAppResponses->flushCollection(); - } - IOLockUnlock(pmStatsLock); - return array; - } - - if (!strcmp(aKey, kIOPMIdleSleepPreventersKey)) - { - OSArray *idleSleepList = NULL; - gRootDomain->copySleepPreventersList(&idleSleepList, NULL); - return idleSleepList; - } - - if (!strcmp(aKey, kIOPMSystemSleepPreventersKey)) - { - OSArray *systemSleepList = NULL; - gRootDomain->copySleepPreventersList(NULL, &systemSleepList); - return systemSleepList; - } - - return NULL; + return NULL; } // MARK: - // MARK: Wake Event Reporting -void IOPMrootDomain::copyWakeReasonString( char * outBuf, size_t bufSize ) +void +IOPMrootDomain::copyWakeReasonString( char * outBuf, size_t bufSize ) { - WAKEEVENT_LOCK(); - strlcpy(outBuf, gWakeReasonString, bufSize); - WAKEEVENT_UNLOCK(); + WAKEEVENT_LOCK(); + strlcpy(outBuf, gWakeReasonString, bufSize); + WAKEEVENT_UNLOCK(); } //****************************************************************************** @@ -8953,50 +8812,52 @@ void IOPMrootDomain::copyWakeReasonString( char * outBuf, size_t bufSize ) // Private control for the acceptance of driver wake event claims. //****************************************************************************** -void IOPMrootDomain::acceptSystemWakeEvents( bool accept ) +void +IOPMrootDomain::acceptSystemWakeEvents( bool accept ) { - bool logWakeReason = false; + bool logWakeReason = false; - WAKEEVENT_LOCK(); - if (accept) - { - gWakeReasonString[0] = '\0'; - if (!_systemWakeEventsArray) - _systemWakeEventsArray = OSArray::withCapacity(4); - if ((_acceptSystemWakeEvents = (_systemWakeEventsArray != 0))) - _systemWakeEventsArray->flushCollection(); - } - else - { - _acceptSystemWakeEvents = false; + WAKEEVENT_LOCK(); + if (accept) { + gWakeReasonString[0] = '\0'; + if (!_systemWakeEventsArray) { + _systemWakeEventsArray = OSArray::withCapacity(4); + } + if ((_acceptSystemWakeEvents = (_systemWakeEventsArray != 0))) { + _systemWakeEventsArray->flushCollection(); + } + } else { + _acceptSystemWakeEvents = false; #if CONFIG_EMBEDDED - logWakeReason = gWakeReasonSysctlRegistered; + logWakeReason = gWakeReasonSysctlRegistered; #if DEVELOPMENT - static int panic_allowed = -1; - - if ((panic_allowed == -1) && - (PE_parse_boot_argn("swd_wakereason_panic", &panic_allowed, sizeof(panic_allowed)) == false)) { - panic_allowed = 1; - } - - if (panic_allowed) { - size_t i = 0; - // Panic if wake reason is null or empty - for (i = 0; (i < strlen(gWakeReasonString)); i++) { - if ((gWakeReasonString[i] != ' ') && (gWakeReasonString[i] != '\t')) - break; - } - if (i >= strlen(gWakeReasonString)) { - panic("Wake reason is empty\n"); - } - } + static int panic_allowed = -1; + + if ((panic_allowed == -1) && + (PE_parse_boot_argn("swd_wakereason_panic", &panic_allowed, sizeof(panic_allowed)) == false)) { + panic_allowed = 1; + } + + if (panic_allowed) { + size_t i = 0; + // Panic if wake reason is null or empty + for (i = 0; (i < strlen(gWakeReasonString)); i++) { + if ((gWakeReasonString[i] != ' ') && (gWakeReasonString[i] != '\t')) { + break; + } + } + if (i >= strlen(gWakeReasonString)) { + panic("Wake reason is empty\n"); + } + } #endif #endif - } - WAKEEVENT_UNLOCK(); + } + WAKEEVENT_UNLOCK(); - if (logWakeReason) - MSG("system wake events:%s\n", gWakeReasonString); + if (logWakeReason) { + MSG("system wake events:%s\n", gWakeReasonString); + } } //****************************************************************************** @@ -9005,68 +8866,84 @@ void IOPMrootDomain::acceptSystemWakeEvents( bool accept ) // For a driver to claim a device is the source/conduit of a system wake event. //****************************************************************************** -void IOPMrootDomain::claimSystemWakeEvent( - IOService * device, - IOOptionBits flags, - const char * reason, - OSObject * details ) -{ - const OSSymbol * deviceName = 0; - OSNumber * deviceRegId = 0; - OSNumber * claimTime = 0; - OSData * flagsData = 0; - OSString * reasonString = 0; - OSDictionary * d = 0; - uint64_t timestamp; - bool ok = false; - - pmEventTimeStamp(×tamp); - - if (!device || !reason) return; - - deviceName = device->copyName(gIOServicePlane); - deviceRegId = OSNumber::withNumber(device->getRegistryEntryID(), 64); - claimTime = OSNumber::withNumber(timestamp, 64); - flagsData = OSData::withBytes(&flags, sizeof(flags)); - reasonString = OSString::withCString(reason); - d = OSDictionary::withCapacity(5 + (details ? 1 : 0)); - if (!deviceName || !deviceRegId || !claimTime || !flagsData || !reasonString) - goto done; - - d->setObject(gIONameKey, deviceName); - d->setObject(gIORegistryEntryIDKey, deviceRegId); - d->setObject(kIOPMWakeEventTimeKey, claimTime); - d->setObject(kIOPMWakeEventFlagsKey, flagsData); - d->setObject(kIOPMWakeEventReasonKey, reasonString); - if (details) - d->setObject(kIOPMWakeEventDetailsKey, details); - - WAKEEVENT_LOCK(); - if (!gWakeReasonSysctlRegistered) - { - // Lazy registration until the platform driver stops registering - // the same name. - gWakeReasonSysctlRegistered = true; +void +IOPMrootDomain::claimSystemWakeEvent( + IOService * device, + IOOptionBits flags, + const char * reason, + OSObject * details ) +{ + const OSSymbol * deviceName = 0; + OSNumber * deviceRegId = 0; + OSNumber * claimTime = 0; + OSData * flagsData = 0; + OSString * reasonString = 0; + OSDictionary * d = 0; + uint64_t timestamp; + bool ok = false; + + pmEventTimeStamp(×tamp); + + if (!device || !reason) { + return; + } + + deviceName = device->copyName(gIOServicePlane); + deviceRegId = OSNumber::withNumber(device->getRegistryEntryID(), 64); + claimTime = OSNumber::withNumber(timestamp, 64); + flagsData = OSData::withBytes(&flags, sizeof(flags)); + reasonString = OSString::withCString(reason); + d = OSDictionary::withCapacity(5 + (details ? 1 : 0)); + if (!deviceName || !deviceRegId || !claimTime || !flagsData || !reasonString) { + goto done; + } + + d->setObject(gIONameKey, deviceName); + d->setObject(gIORegistryEntryIDKey, deviceRegId); + d->setObject(kIOPMWakeEventTimeKey, claimTime); + d->setObject(kIOPMWakeEventFlagsKey, flagsData); + d->setObject(kIOPMWakeEventReasonKey, reasonString); + if (details) { + d->setObject(kIOPMWakeEventDetailsKey, details); + } + + WAKEEVENT_LOCK(); + if (!gWakeReasonSysctlRegistered) { + // Lazy registration until the platform driver stops registering + // the same name. + gWakeReasonSysctlRegistered = true; #if CONFIG_EMBEDDED - sysctl_register_oid(&sysctl__kern_wakereason); + sysctl_register_oid(&sysctl__kern_wakereason); #endif - } - if (_acceptSystemWakeEvents) - { - ok = _systemWakeEventsArray->setObject(d); - if (gWakeReasonString[0] != '\0') - strlcat(gWakeReasonString, " ", sizeof(gWakeReasonString)); - strlcat(gWakeReasonString, reason, sizeof(gWakeReasonString)); - } - WAKEEVENT_UNLOCK(); + } + if (_acceptSystemWakeEvents) { + ok = _systemWakeEventsArray->setObject(d); + if (gWakeReasonString[0] != '\0') { + strlcat(gWakeReasonString, " ", sizeof(gWakeReasonString)); + } + strlcat(gWakeReasonString, reason, sizeof(gWakeReasonString)); + } + WAKEEVENT_UNLOCK(); done: - if (deviceName) deviceName->release(); - if (deviceRegId) deviceRegId->release(); - if (claimTime) claimTime->release(); - if (flagsData) flagsData->release(); - if (reasonString) reasonString->release(); - if (d) d->release(); + if (deviceName) { + deviceName->release(); + } + if (deviceRegId) { + deviceRegId->release(); + } + if (claimTime) { + claimTime->release(); + } + if (flagsData) { + flagsData->release(); + } + if (reasonString) { + reasonString->release(); + } + if (d) { + d->release(); + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -9076,16 +8953,16 @@ done: OSDefineMetaClassAndStructors( PMSettingHandle, OSObject ) -void PMSettingHandle::free( void ) +void +PMSettingHandle::free( void ) { - if (pmso) - { - pmso->clientHandleFreed(); - pmso->release(); - pmso = 0; - } + if (pmso) { + pmso->clientHandleFreed(); + pmso->release(); + pmso = 0; + } - OSObject::free(); + OSObject::free(); } // MARK: - @@ -9099,89 +8976,100 @@ OSDefineMetaClassAndFinalStructors( PMSettingObject, OSObject ) * Static constructor/initializer for PMSettingObject */ PMSettingObject *PMSettingObject::pmSettingObject( - IOPMrootDomain *parent_arg, - IOPMSettingControllerCallback handler_arg, - OSObject *target_arg, - uintptr_t refcon_arg, - uint32_t supportedPowerSources, - const OSSymbol * settings[], - OSObject **handle_obj) -{ - uint32_t settingCount = 0; - PMSettingObject *pmso = 0; - PMSettingHandle *pmsh = 0; - - if ( !parent_arg || !handler_arg || !settings || !handle_obj ) - return NULL; - - // count OSSymbol entries in NULL terminated settings array - while (settings[settingCount]) { - settingCount++; - } - if (0 == settingCount) - return NULL; - - pmso = new PMSettingObject; - if (!pmso || !pmso->init()) - goto fail; - - pmsh = new PMSettingHandle; - if (!pmsh || !pmsh->init()) - goto fail; - - queue_init(&pmso->calloutQueue); - pmso->parent = parent_arg; - pmso->func = handler_arg; - pmso->target = target_arg; - pmso->refcon = refcon_arg; - pmso->settingCount = settingCount; - - pmso->retain(); // handle holds a retain on pmso - pmsh->pmso = pmso; - pmso->pmsh = pmsh; - - pmso->publishedFeatureID = (uint32_t *)IOMalloc(sizeof(uint32_t)*settingCount); - if (pmso->publishedFeatureID) { - for (unsigned int i=0; ipublishPMSetting( settings[i], - supportedPowerSources, &pmso->publishedFeatureID[i] ); - } - } - - *handle_obj = pmsh; - return pmso; + IOPMrootDomain * parent_arg, + IOPMSettingControllerCallback handler_arg, + OSObject * target_arg, + uintptr_t refcon_arg, + uint32_t supportedPowerSources, + const OSSymbol * settings[], + OSObject * *handle_obj) +{ + uint32_t settingCount = 0; + PMSettingObject *pmso = 0; + PMSettingHandle *pmsh = 0; + + if (!parent_arg || !handler_arg || !settings || !handle_obj) { + return NULL; + } + + // count OSSymbol entries in NULL terminated settings array + while (settings[settingCount]) { + settingCount++; + } + if (0 == settingCount) { + return NULL; + } + + pmso = new PMSettingObject; + if (!pmso || !pmso->init()) { + goto fail; + } + + pmsh = new PMSettingHandle; + if (!pmsh || !pmsh->init()) { + goto fail; + } + + queue_init(&pmso->calloutQueue); + pmso->parent = parent_arg; + pmso->func = handler_arg; + pmso->target = target_arg; + pmso->refcon = refcon_arg; + pmso->settingCount = settingCount; + + pmso->retain(); // handle holds a retain on pmso + pmsh->pmso = pmso; + pmso->pmsh = pmsh; + + pmso->publishedFeatureID = (uint32_t *)IOMalloc(sizeof(uint32_t) * settingCount); + if (pmso->publishedFeatureID) { + for (unsigned int i = 0; i < settingCount; i++) { + // Since there is now at least one listener to this setting, publish + // PM root domain support for it. + parent_arg->publishPMSetting( settings[i], + supportedPowerSources, &pmso->publishedFeatureID[i] ); + } + } + + *handle_obj = pmsh; + return pmso; fail: - if (pmso) pmso->release(); - if (pmsh) pmsh->release(); - return NULL; + if (pmso) { + pmso->release(); + } + if (pmsh) { + pmsh->release(); + } + return NULL; } -void PMSettingObject::free( void ) +void +PMSettingObject::free( void ) { - if (publishedFeatureID) { - for (uint32_t i=0; iremovePublishedFeature( publishedFeatureID[i] ); - } - } + if (publishedFeatureID) { + for (uint32_t i = 0; i < settingCount; i++) { + if (publishedFeatureID[i]) { + parent->removePublishedFeature( publishedFeatureID[i] ); + } + } - IOFree(publishedFeatureID, sizeof(uint32_t) * settingCount); - } + IOFree(publishedFeatureID, sizeof(uint32_t) * settingCount); + } - super::free(); + super::free(); } -void PMSettingObject::dispatchPMSetting( const OSSymbol * type, OSObject * object ) +void +PMSettingObject::dispatchPMSetting( const OSSymbol * type, OSObject * object ) { - (*func)(target, type, object, refcon); + (*func)(target, type, object, refcon); } -void PMSettingObject::clientHandleFreed( void ) +void +PMSettingObject::clientHandleFreed( void ) { - parent->deregisterPMSettingObject(this); + parent->deregisterPMSettingObject(this); } // MARK: - @@ -9194,397 +9082,402 @@ void PMSettingObject::clientHandleFreed( void ) #define kAssertUniqueIDStart 500 -PMAssertionsTracker *PMAssertionsTracker::pmAssertionsTracker( IOPMrootDomain *rootDomain ) +PMAssertionsTracker * +PMAssertionsTracker::pmAssertionsTracker( IOPMrootDomain *rootDomain ) +{ + PMAssertionsTracker *myself; + + myself = new PMAssertionsTracker; + + if (myself) { + myself->init(); + myself->owner = rootDomain; + myself->issuingUniqueID = kAssertUniqueIDStart; + myself->assertionsArray = OSArray::withCapacity(5); + myself->assertionsKernel = 0; + myself->assertionsUser = 0; + myself->assertionsCombined = 0; + myself->assertionsArrayLock = IOLockAlloc(); + myself->tabulateProducerCount = myself->tabulateConsumerCount = 0; + + if (!myself->assertionsArray || !myself->assertionsArrayLock) { + myself = NULL; + } + } + + return myself; +} + +/* tabulate + * - Update assertionsKernel to reflect the state of all + * assertions in the kernel. + * - Update assertionsCombined to reflect both kernel & user space. + */ +void +PMAssertionsTracker::tabulate(void) +{ + int i; + int count; + PMAssertStruct *_a = NULL; + OSData *_d = NULL; + + IOPMDriverAssertionType oldKernel = assertionsKernel; + IOPMDriverAssertionType oldCombined = assertionsCombined; + + ASSERT_GATED(); + + assertionsKernel = 0; + assertionsCombined = 0; + + if (!assertionsArray) { + return; + } + + if ((count = assertionsArray->getCount())) { + for (i = 0; i < count; i++) { + _d = OSDynamicCast(OSData, assertionsArray->getObject(i)); + if (_d) { + _a = (PMAssertStruct *)_d->getBytesNoCopy(); + if (_a && (kIOPMDriverAssertionLevelOn == _a->level)) { + assertionsKernel |= _a->assertionBits; + } + } + } + } + + tabulateProducerCount++; + assertionsCombined = assertionsKernel | assertionsUser; + + if ((assertionsKernel != oldKernel) || + (assertionsCombined != oldCombined)) { + owner->evaluateAssertions(assertionsCombined, oldCombined); + } +} + +void +PMAssertionsTracker::publishProperties( void ) { - PMAssertionsTracker *myself; + OSArray *assertionsSummary = NULL; - myself = new PMAssertionsTracker; + if (tabulateConsumerCount != tabulateProducerCount) { + IOLockLock(assertionsArrayLock); - if (myself) { - myself->init(); - myself->owner = rootDomain; - myself->issuingUniqueID = kAssertUniqueIDStart; - myself->assertionsArray = OSArray::withCapacity(5); - myself->assertionsKernel = 0; - myself->assertionsUser = 0; - myself->assertionsCombined = 0; - myself->assertionsArrayLock = IOLockAlloc(); - myself->tabulateProducerCount = myself->tabulateConsumerCount = 0; + tabulateConsumerCount = tabulateProducerCount; + + /* Publish the IOPMrootDomain property "DriverPMAssertionsDetailed" + */ + assertionsSummary = copyAssertionsArray(); + if (assertionsSummary) { + owner->setProperty(kIOPMAssertionsDriverDetailedKey, assertionsSummary); + assertionsSummary->release(); + } else { + owner->removeProperty(kIOPMAssertionsDriverDetailedKey); + } - if (!myself->assertionsArray || !myself->assertionsArrayLock) - myself = NULL; - } + /* Publish the IOPMrootDomain property "DriverPMAssertions" + */ + owner->setProperty(kIOPMAssertionsDriverKey, assertionsKernel, 64); - return myself; + IOLockUnlock(assertionsArrayLock); + } } -/* tabulate - * - Update assertionsKernel to reflect the state of all - * assertions in the kernel. - * - Update assertionsCombined to reflect both kernel & user space. - */ -void PMAssertionsTracker::tabulate(void) -{ - int i; - int count; - PMAssertStruct *_a = NULL; - OSData *_d = NULL; - - IOPMDriverAssertionType oldKernel = assertionsKernel; - IOPMDriverAssertionType oldCombined = assertionsCombined; - - ASSERT_GATED(); - - assertionsKernel = 0; - assertionsCombined = 0; - - if (!assertionsArray) - return; - - if ((count = assertionsArray->getCount())) - { - for (i=0; igetObject(i)); - if (_d) - { - _a = (PMAssertStruct *)_d->getBytesNoCopy(); - if (_a && (kIOPMDriverAssertionLevelOn == _a->level)) - assertionsKernel |= _a->assertionBits; - } - } - } - - tabulateProducerCount++; - assertionsCombined = assertionsKernel | assertionsUser; - - if ((assertionsKernel != oldKernel) || - (assertionsCombined != oldCombined)) - { - owner->evaluateAssertions(assertionsCombined, oldCombined); - } -} - -void PMAssertionsTracker::publishProperties( void ) -{ - OSArray *assertionsSummary = NULL; - - if (tabulateConsumerCount != tabulateProducerCount) - { - IOLockLock(assertionsArrayLock); - - tabulateConsumerCount = tabulateProducerCount; - - /* Publish the IOPMrootDomain property "DriverPMAssertionsDetailed" - */ - assertionsSummary = copyAssertionsArray(); - if (assertionsSummary) - { - owner->setProperty(kIOPMAssertionsDriverDetailedKey, assertionsSummary); - assertionsSummary->release(); - } - else - { - owner->removeProperty(kIOPMAssertionsDriverDetailedKey); - } - - /* Publish the IOPMrootDomain property "DriverPMAssertions" - */ - owner->setProperty(kIOPMAssertionsDriverKey, assertionsKernel, 64); - - IOLockUnlock(assertionsArrayLock); - } -} - -PMAssertionsTracker::PMAssertStruct *PMAssertionsTracker::detailsForID(IOPMDriverAssertionID _id, int *index) -{ - PMAssertStruct *_a = NULL; - OSData *_d = NULL; - int found = -1; - int count = 0; - int i = 0; - - if (assertionsArray - && (count = assertionsArray->getCount())) - { - for (i=0; igetObject(i)); - if (_d) - { - _a = (PMAssertStruct *)_d->getBytesNoCopy(); - if (_a && (_id == _a->id)) { - found = i; - break; - } - } - } - } - - if (-1 == found) { - return NULL; - } else { - if (index) - *index = found; - return _a; - } +PMAssertionsTracker::PMAssertStruct * +PMAssertionsTracker::detailsForID(IOPMDriverAssertionID _id, int *index) +{ + PMAssertStruct *_a = NULL; + OSData *_d = NULL; + int found = -1; + int count = 0; + int i = 0; + + if (assertionsArray + && (count = assertionsArray->getCount())) { + for (i = 0; i < count; i++) { + _d = OSDynamicCast(OSData, assertionsArray->getObject(i)); + if (_d) { + _a = (PMAssertStruct *)_d->getBytesNoCopy(); + if (_a && (_id == _a->id)) { + found = i; + break; + } + } + } + } + + if (-1 == found) { + return NULL; + } else { + if (index) { + *index = found; + } + return _a; + } } /* PMAssertionsTracker::handleCreateAssertion * Perform assertion work on the PM workloop. Do not call directly. */ -IOReturn PMAssertionsTracker::handleCreateAssertion(OSData *newAssertion) +IOReturn +PMAssertionsTracker::handleCreateAssertion(OSData *newAssertion) { - ASSERT_GATED(); + ASSERT_GATED(); - if (newAssertion) - { - IOLockLock(assertionsArrayLock); - assertionsArray->setObject(newAssertion); - IOLockUnlock(assertionsArrayLock); - newAssertion->release(); + if (newAssertion) { + IOLockLock(assertionsArrayLock); + assertionsArray->setObject(newAssertion); + IOLockUnlock(assertionsArrayLock); + newAssertion->release(); - tabulate(); - } - return kIOReturnSuccess; + tabulate(); + } + return kIOReturnSuccess; } /* PMAssertionsTracker::createAssertion * createAssertion allocates memory for a new PM assertion, and affects system behavior, if * appropiate. */ -IOReturn PMAssertionsTracker::createAssertion( - IOPMDriverAssertionType which, - IOPMDriverAssertionLevel level, - IOService *serviceID, - const char *whoItIs, - IOPMDriverAssertionID *outID) -{ - OSData *dataStore = NULL; - PMAssertStruct track; - - // Warning: trillions and trillions of created assertions may overflow the unique ID. - track.id = OSIncrementAtomic64((SInt64*) &issuingUniqueID); - track.level = level; - track.assertionBits = which; - track.ownerString = whoItIs ? OSSymbol::withCString(whoItIs):0; - track.ownerService = serviceID; - track.registryEntryID = serviceID ? serviceID->getRegistryEntryID():0; - track.modifiedTime = 0; - pmEventTimeStamp(&track.createdTime); - - dataStore = OSData::withBytes(&track, sizeof(PMAssertStruct)); - if (!dataStore) - { - if (track.ownerString) - track.ownerString->release(); - return kIOReturnNoMemory; - } - - *outID = track.id; - - if (owner && owner->pmPowerStateQueue) { - owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionCreate, (void *)dataStore); - } - - return kIOReturnSuccess; +IOReturn +PMAssertionsTracker::createAssertion( + IOPMDriverAssertionType which, + IOPMDriverAssertionLevel level, + IOService *serviceID, + const char *whoItIs, + IOPMDriverAssertionID *outID) +{ + OSData *dataStore = NULL; + PMAssertStruct track; + + // Warning: trillions and trillions of created assertions may overflow the unique ID. + track.id = OSIncrementAtomic64((SInt64*) &issuingUniqueID); + track.level = level; + track.assertionBits = which; + track.ownerString = whoItIs ? OSSymbol::withCString(whoItIs):0; + track.ownerService = serviceID; + track.registryEntryID = serviceID ? serviceID->getRegistryEntryID():0; + track.modifiedTime = 0; + pmEventTimeStamp(&track.createdTime); + + dataStore = OSData::withBytes(&track, sizeof(PMAssertStruct)); + if (!dataStore) { + if (track.ownerString) { + track.ownerString->release(); + } + return kIOReturnNoMemory; + } + + *outID = track.id; + + if (owner && owner->pmPowerStateQueue) { + owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionCreate, (void *)dataStore); + } + + return kIOReturnSuccess; } /* PMAssertionsTracker::handleReleaseAssertion * Runs in PM workloop. Do not call directly. */ -IOReturn PMAssertionsTracker::handleReleaseAssertion( - IOPMDriverAssertionID _id) +IOReturn +PMAssertionsTracker::handleReleaseAssertion( + IOPMDriverAssertionID _id) { - ASSERT_GATED(); + ASSERT_GATED(); - int index; - PMAssertStruct *assertStruct = detailsForID(_id, &index); + int index; + PMAssertStruct *assertStruct = detailsForID(_id, &index); - if (!assertStruct) - return kIOReturnNotFound; + if (!assertStruct) { + return kIOReturnNotFound; + } - IOLockLock(assertionsArrayLock); - if (assertStruct->ownerString) - assertStruct->ownerString->release(); + IOLockLock(assertionsArrayLock); + if (assertStruct->ownerString) { + assertStruct->ownerString->release(); + } - assertionsArray->removeObject(index); - IOLockUnlock(assertionsArrayLock); + assertionsArray->removeObject(index); + IOLockUnlock(assertionsArrayLock); - tabulate(); - return kIOReturnSuccess; + tabulate(); + return kIOReturnSuccess; } /* PMAssertionsTracker::releaseAssertion * Releases an assertion and affects system behavior if appropiate. * Actual work happens on PM workloop. */ -IOReturn PMAssertionsTracker::releaseAssertion( - IOPMDriverAssertionID _id) +IOReturn +PMAssertionsTracker::releaseAssertion( + IOPMDriverAssertionID _id) { - if (owner && owner->pmPowerStateQueue) { - owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionRelease, 0, _id); - } - return kIOReturnSuccess; + if (owner && owner->pmPowerStateQueue) { + owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionRelease, 0, _id); + } + return kIOReturnSuccess; } /* PMAssertionsTracker::handleSetAssertionLevel * Runs in PM workloop. Do not call directly. */ -IOReturn PMAssertionsTracker::handleSetAssertionLevel( - IOPMDriverAssertionID _id, - IOPMDriverAssertionLevel _level) +IOReturn +PMAssertionsTracker::handleSetAssertionLevel( + IOPMDriverAssertionID _id, + IOPMDriverAssertionLevel _level) { - PMAssertStruct *assertStruct = detailsForID(_id, NULL); + PMAssertStruct *assertStruct = detailsForID(_id, NULL); - ASSERT_GATED(); + ASSERT_GATED(); - if (!assertStruct) { - return kIOReturnNotFound; - } + if (!assertStruct) { + return kIOReturnNotFound; + } - IOLockLock(assertionsArrayLock); - pmEventTimeStamp(&assertStruct->modifiedTime); - assertStruct->level = _level; - IOLockUnlock(assertionsArrayLock); + IOLockLock(assertionsArrayLock); + pmEventTimeStamp(&assertStruct->modifiedTime); + assertStruct->level = _level; + IOLockUnlock(assertionsArrayLock); - tabulate(); - return kIOReturnSuccess; + tabulate(); + return kIOReturnSuccess; } /* PMAssertionsTracker::setAssertionLevel */ -IOReturn PMAssertionsTracker::setAssertionLevel( - IOPMDriverAssertionID _id, - IOPMDriverAssertionLevel _level) -{ - if (owner && owner->pmPowerStateQueue) { - owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionSetLevel, - (void *)(uintptr_t)_level, _id); - } - - return kIOReturnSuccess; -} - -IOReturn PMAssertionsTracker::handleSetUserAssertionLevels(void * arg0) -{ - IOPMDriverAssertionType new_user_levels = *(IOPMDriverAssertionType *) arg0; - - ASSERT_GATED(); - - if (new_user_levels != assertionsUser) - { - assertionsUser = new_user_levels; - DLOG("assertionsUser 0x%llx\n", assertionsUser); - } - - tabulate(); - return kIOReturnSuccess; -} - -IOReturn PMAssertionsTracker::setUserAssertionLevels( - IOPMDriverAssertionType new_user_levels) -{ - if (gIOPMWorkLoop) { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast( - IOWorkLoop::Action, - this, - &PMAssertionsTracker::handleSetUserAssertionLevels), - this, - (void *) &new_user_levels, 0, 0, 0); - } - - return kIOReturnSuccess; -} - - -OSArray *PMAssertionsTracker::copyAssertionsArray(void) -{ - int count; - int i; - OSArray *outArray = NULL; - - if (!assertionsArray || - (0 == (count = assertionsArray->getCount())) || - (NULL == (outArray = OSArray::withCapacity(count)))) - { - goto exit; - } - - for (i=0; igetObject(i)); - if (_d && (_a = (PMAssertStruct *)_d->getBytesNoCopy())) - { - OSNumber *_n = NULL; - - details = OSDictionary::withCapacity(7); - if (!details) - continue; - - outArray->setObject(details); - details->release(); - - _n = OSNumber::withNumber(_a->id, 64); - if (_n) { - details->setObject(kIOPMDriverAssertionIDKey, _n); - _n->release(); - } - _n = OSNumber::withNumber(_a->createdTime, 64); - if (_n) { - details->setObject(kIOPMDriverAssertionCreatedTimeKey, _n); - _n->release(); - } - _n = OSNumber::withNumber(_a->modifiedTime, 64); - if (_n) { - details->setObject(kIOPMDriverAssertionModifiedTimeKey, _n); - _n->release(); - } - _n = OSNumber::withNumber((uintptr_t)_a->registryEntryID, 64); - if (_n) { - details->setObject(kIOPMDriverAssertionRegistryEntryIDKey, _n); - _n->release(); - } - _n = OSNumber::withNumber(_a->level, 64); - if (_n) { - details->setObject(kIOPMDriverAssertionLevelKey, _n); - _n->release(); - } - _n = OSNumber::withNumber(_a->assertionBits, 64); - if (_n) { - details->setObject(kIOPMDriverAssertionAssertedKey, _n); - _n->release(); - } - - if (_a->ownerString) { - details->setObject(kIOPMDriverAssertionOwnerStringKey, _a->ownerString); - } - } - } +IOReturn +PMAssertionsTracker::setAssertionLevel( + IOPMDriverAssertionID _id, + IOPMDriverAssertionLevel _level) +{ + if (owner && owner->pmPowerStateQueue) { + owner->pmPowerStateQueue->submitPowerEvent(kPowerEventAssertionSetLevel, + (void *)(uintptr_t)_level, _id); + } + + return kIOReturnSuccess; +} + +IOReturn +PMAssertionsTracker::handleSetUserAssertionLevels(void * arg0) +{ + IOPMDriverAssertionType new_user_levels = *(IOPMDriverAssertionType *) arg0; + + ASSERT_GATED(); + + if (new_user_levels != assertionsUser) { + assertionsUser = new_user_levels; + DLOG("assertionsUser 0x%llx\n", assertionsUser); + } + + tabulate(); + return kIOReturnSuccess; +} + +IOReturn +PMAssertionsTracker::setUserAssertionLevels( + IOPMDriverAssertionType new_user_levels) +{ + if (gIOPMWorkLoop) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast( + IOWorkLoop::Action, + this, + &PMAssertionsTracker::handleSetUserAssertionLevels), + this, + (void *) &new_user_levels, 0, 0, 0); + } + + return kIOReturnSuccess; +} + + +OSArray * +PMAssertionsTracker::copyAssertionsArray(void) +{ + int count; + int i; + OSArray *outArray = NULL; + + if (!assertionsArray || + (0 == (count = assertionsArray->getCount())) || + (NULL == (outArray = OSArray::withCapacity(count)))) { + goto exit; + } + + for (i = 0; i < count; i++) { + PMAssertStruct *_a = NULL; + OSData *_d = NULL; + OSDictionary *details = NULL; + + _d = OSDynamicCast(OSData, assertionsArray->getObject(i)); + if (_d && (_a = (PMAssertStruct *)_d->getBytesNoCopy())) { + OSNumber *_n = NULL; + + details = OSDictionary::withCapacity(7); + if (!details) { + continue; + } + + outArray->setObject(details); + details->release(); + + _n = OSNumber::withNumber(_a->id, 64); + if (_n) { + details->setObject(kIOPMDriverAssertionIDKey, _n); + _n->release(); + } + _n = OSNumber::withNumber(_a->createdTime, 64); + if (_n) { + details->setObject(kIOPMDriverAssertionCreatedTimeKey, _n); + _n->release(); + } + _n = OSNumber::withNumber(_a->modifiedTime, 64); + if (_n) { + details->setObject(kIOPMDriverAssertionModifiedTimeKey, _n); + _n->release(); + } + _n = OSNumber::withNumber((uintptr_t)_a->registryEntryID, 64); + if (_n) { + details->setObject(kIOPMDriverAssertionRegistryEntryIDKey, _n); + _n->release(); + } + _n = OSNumber::withNumber(_a->level, 64); + if (_n) { + details->setObject(kIOPMDriverAssertionLevelKey, _n); + _n->release(); + } + _n = OSNumber::withNumber(_a->assertionBits, 64); + if (_n) { + details->setObject(kIOPMDriverAssertionAssertedKey, _n); + _n->release(); + } + + if (_a->ownerString) { + details->setObject(kIOPMDriverAssertionOwnerStringKey, _a->ownerString); + } + } + } exit: - return outArray; + return outArray; } -IOPMDriverAssertionType PMAssertionsTracker::getActivatedAssertions(void) +IOPMDriverAssertionType +PMAssertionsTracker::getActivatedAssertions(void) { - return assertionsCombined; + return assertionsCombined; } -IOPMDriverAssertionLevel PMAssertionsTracker::getAssertionLevel( - IOPMDriverAssertionType type) +IOPMDriverAssertionLevel +PMAssertionsTracker::getAssertionLevel( + IOPMDriverAssertionType type) { - if (type && ((type & assertionsKernel) == assertionsKernel)) - { - return kIOPMDriverAssertionLevelOn; - } else { - return kIOPMDriverAssertionLevelOff; - } + if (type && ((type & assertionsKernel) == assertionsKernel)) { + return kIOPMDriverAssertionLevelOn; + } else { + return kIOPMDriverAssertionLevelOff; + } } //********************************************************************************* @@ -9592,25 +9485,27 @@ IOPMDriverAssertionLevel PMAssertionsTracker::getAssertionLevel( //********************************************************************************* -static void pmEventTimeStamp(uint64_t *recordTS) +static void +pmEventTimeStamp(uint64_t *recordTS) { - clock_sec_t tsec; - clock_usec_t tusec; + clock_sec_t tsec; + clock_usec_t tusec; - if (!recordTS) - return; + if (!recordTS) { + return; + } - // We assume tsec fits into 32 bits; 32 bits holds enough - // seconds for 136 years since the epoch in 1970. - clock_get_calendar_microtime(&tsec, &tusec); + // We assume tsec fits into 32 bits; 32 bits holds enough + // seconds for 136 years since the epoch in 1970. + clock_get_calendar_microtime(&tsec, &tusec); - // Pack the sec & microsec calendar time into a uint64_t, for fun. - *recordTS = 0; - *recordTS |= (uint32_t)tusec; - *recordTS |= ((uint64_t)tsec << 32); + // Pack the sec & microsec calendar time into a uint64_t, for fun. + *recordTS = 0; + *recordTS |= (uint32_t)tusec; + *recordTS |= ((uint64_t)tsec << 32); - return; + return; } // MARK: - @@ -9630,437 +9525,464 @@ OSDefineMetaClassAndFinalStructors(IORootParent, IOService) static IOPMPowerState patriarchPowerStates[2] = { - {1,0,ON_POWER,0,0,0,0,0,0,0,0,0}, - {1,0,ON_POWER,0,0,0,0,0,0,0,0,0}, + {1, 0, ON_POWER, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + {1, 0, ON_POWER, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }; -void IORootParent::initialize( void ) +void +IORootParent::initialize( void ) { - gIOPMPSExternalConnectedKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalConnectedKey); - gIOPMPSExternalChargeCapableKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalChargeCapableKey); - gIOPMPSBatteryInstalledKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryInstalledKey); - gIOPMPSIsChargingKey = OSSymbol::withCStringNoCopy(kIOPMPSIsChargingKey); - gIOPMPSAtWarnLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtWarnLevelKey); - gIOPMPSAtCriticalLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtCriticalLevelKey); - gIOPMPSCurrentCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSCurrentCapacityKey); - gIOPMPSMaxCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxCapacityKey); - gIOPMPSDesignCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSDesignCapacityKey); - gIOPMPSTimeRemainingKey = OSSymbol::withCStringNoCopy(kIOPMPSTimeRemainingKey); - gIOPMPSAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAmperageKey); - gIOPMPSVoltageKey = OSSymbol::withCStringNoCopy(kIOPMPSVoltageKey); - gIOPMPSCycleCountKey = OSSymbol::withCStringNoCopy(kIOPMPSCycleCountKey); - gIOPMPSMaxErrKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxErrKey); - gIOPMPSAdapterInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterInfoKey); - gIOPMPSLocationKey = OSSymbol::withCStringNoCopy(kIOPMPSLocationKey); - gIOPMPSErrorConditionKey = OSSymbol::withCStringNoCopy(kIOPMPSErrorConditionKey); - gIOPMPSManufacturerKey = OSSymbol::withCStringNoCopy(kIOPMPSManufacturerKey); - gIOPMPSManufactureDateKey = OSSymbol::withCStringNoCopy(kIOPMPSManufactureDateKey); - gIOPMPSModelKey = OSSymbol::withCStringNoCopy(kIOPMPSModelKey); - gIOPMPSSerialKey = OSSymbol::withCStringNoCopy(kIOPMPSSerialKey); - gIOPMPSLegacyBatteryInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSLegacyBatteryInfoKey); - gIOPMPSBatteryHealthKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryHealthKey); - gIOPMPSHealthConfidenceKey = OSSymbol::withCStringNoCopy(kIOPMPSHealthConfidenceKey); - gIOPMPSCapacityEstimatedKey = OSSymbol::withCStringNoCopy(kIOPMPSCapacityEstimatedKey); - gIOPMPSBatteryChargeStatusKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryChargeStatusKey); - gIOPMPSBatteryTemperatureKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryTemperatureKey); - gIOPMPSAdapterDetailsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsKey); - gIOPMPSChargerConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSChargerConfigurationKey); - gIOPMPSAdapterDetailsIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsIDKey); - gIOPMPSAdapterDetailsWattsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsWattsKey); - gIOPMPSAdapterDetailsRevisionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsRevisionKey); - gIOPMPSAdapterDetailsSerialNumberKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSerialNumberKey); - gIOPMPSAdapterDetailsFamilyKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsFamilyKey); - gIOPMPSAdapterDetailsAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsAmperageKey); - gIOPMPSAdapterDetailsDescriptionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsDescriptionKey); - gIOPMPSAdapterDetailsPMUConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsPMUConfigurationKey); - gIOPMPSAdapterDetailsSourceIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSourceIDKey); - gIOPMPSAdapterDetailsErrorFlagsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsErrorFlagsKey); - gIOPMPSAdapterDetailsSharedSourceKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSharedSourceKey); - gIOPMPSAdapterDetailsCloakedKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsCloakedKey); - gIOPMPSInvalidWakeSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSInvalidWakeSecondsKey); - gIOPMPSPostChargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostChargeWaitSecondsKey); - gIOPMPSPostDishargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostDishargeWaitSecondsKey); + gIOPMPSExternalConnectedKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalConnectedKey); + gIOPMPSExternalChargeCapableKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalChargeCapableKey); + gIOPMPSBatteryInstalledKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryInstalledKey); + gIOPMPSIsChargingKey = OSSymbol::withCStringNoCopy(kIOPMPSIsChargingKey); + gIOPMPSAtWarnLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtWarnLevelKey); + gIOPMPSAtCriticalLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtCriticalLevelKey); + gIOPMPSCurrentCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSCurrentCapacityKey); + gIOPMPSMaxCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxCapacityKey); + gIOPMPSDesignCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSDesignCapacityKey); + gIOPMPSTimeRemainingKey = OSSymbol::withCStringNoCopy(kIOPMPSTimeRemainingKey); + gIOPMPSAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAmperageKey); + gIOPMPSVoltageKey = OSSymbol::withCStringNoCopy(kIOPMPSVoltageKey); + gIOPMPSCycleCountKey = OSSymbol::withCStringNoCopy(kIOPMPSCycleCountKey); + gIOPMPSMaxErrKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxErrKey); + gIOPMPSAdapterInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterInfoKey); + gIOPMPSLocationKey = OSSymbol::withCStringNoCopy(kIOPMPSLocationKey); + gIOPMPSErrorConditionKey = OSSymbol::withCStringNoCopy(kIOPMPSErrorConditionKey); + gIOPMPSManufacturerKey = OSSymbol::withCStringNoCopy(kIOPMPSManufacturerKey); + gIOPMPSManufactureDateKey = OSSymbol::withCStringNoCopy(kIOPMPSManufactureDateKey); + gIOPMPSModelKey = OSSymbol::withCStringNoCopy(kIOPMPSModelKey); + gIOPMPSSerialKey = OSSymbol::withCStringNoCopy(kIOPMPSSerialKey); + gIOPMPSLegacyBatteryInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSLegacyBatteryInfoKey); + gIOPMPSBatteryHealthKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryHealthKey); + gIOPMPSHealthConfidenceKey = OSSymbol::withCStringNoCopy(kIOPMPSHealthConfidenceKey); + gIOPMPSCapacityEstimatedKey = OSSymbol::withCStringNoCopy(kIOPMPSCapacityEstimatedKey); + gIOPMPSBatteryChargeStatusKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryChargeStatusKey); + gIOPMPSBatteryTemperatureKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryTemperatureKey); + gIOPMPSAdapterDetailsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsKey); + gIOPMPSChargerConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSChargerConfigurationKey); + gIOPMPSAdapterDetailsIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsIDKey); + gIOPMPSAdapterDetailsWattsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsWattsKey); + gIOPMPSAdapterDetailsRevisionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsRevisionKey); + gIOPMPSAdapterDetailsSerialNumberKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSerialNumberKey); + gIOPMPSAdapterDetailsFamilyKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsFamilyKey); + gIOPMPSAdapterDetailsAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsAmperageKey); + gIOPMPSAdapterDetailsDescriptionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsDescriptionKey); + gIOPMPSAdapterDetailsPMUConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsPMUConfigurationKey); + gIOPMPSAdapterDetailsSourceIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSourceIDKey); + gIOPMPSAdapterDetailsErrorFlagsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsErrorFlagsKey); + gIOPMPSAdapterDetailsSharedSourceKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSharedSourceKey); + gIOPMPSAdapterDetailsCloakedKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsCloakedKey); + gIOPMPSInvalidWakeSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSInvalidWakeSecondsKey); + gIOPMPSPostChargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostChargeWaitSecondsKey); + gIOPMPSPostDishargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostDishargeWaitSecondsKey); } -bool IORootParent::start( IOService * nub ) +bool +IORootParent::start( IOService * nub ) { - IOService::start(nub); - attachToParent( getRegistryRoot(), gIOPowerPlane ); - PMinit(); - registerPowerDriver(this, patriarchPowerStates, 2); - makeUsable(); - return true; + IOService::start(nub); + attachToParent( getRegistryRoot(), gIOPowerPlane ); + PMinit(); + registerPowerDriver(this, patriarchPowerStates, 2); + makeUsable(); + return true; } -void IORootParent::shutDownSystem( void ) +void +IORootParent::shutDownSystem( void ) { } -void IORootParent::restartSystem( void ) +void +IORootParent::restartSystem( void ) { } -void IORootParent::sleepSystem( void ) +void +IORootParent::sleepSystem( void ) { } -void IORootParent::dozeSystem( void ) +void +IORootParent::dozeSystem( void ) { } -void IORootParent::sleepToDoze( void ) +void +IORootParent::sleepToDoze( void ) { } -void IORootParent::wakeSystem( void ) +void +IORootParent::wakeSystem( void ) { } -OSObject * IORootParent::copyProperty( const char * aKey) const +OSObject * +IORootParent::copyProperty( const char * aKey) const { - return (IOService::copyProperty(aKey)); + return IOService::copyProperty(aKey); } -uint32_t IOPMrootDomain::getWatchdogTimeout() +uint32_t +IOPMrootDomain::getWatchdogTimeout() { - if (gSwdSleepWakeTimeout) { - gSwdSleepTimeout = gSwdWakeTimeout = gSwdSleepWakeTimeout; - } - if ((pmTracer->getTracePhase() < kIOPMTracePointSystemSleep) || - (pmTracer->getTracePhase() == kIOPMTracePointDarkWakeEntry)) { - return gSwdSleepTimeout ? gSwdSleepTimeout : WATCHDOG_SLEEP_TIMEOUT; - } - else { - return gSwdWakeTimeout ? gSwdWakeTimeout : WATCHDOG_WAKE_TIMEOUT; - } + if (gSwdSleepWakeTimeout) { + gSwdSleepTimeout = gSwdWakeTimeout = gSwdSleepWakeTimeout; + } + if ((pmTracer->getTracePhase() < kIOPMTracePointSystemSleep) || + (pmTracer->getTracePhase() == kIOPMTracePointDarkWakeEntry)) { + return gSwdSleepTimeout ? gSwdSleepTimeout : WATCHDOG_SLEEP_TIMEOUT; + } else { + return gSwdWakeTimeout ? gSwdWakeTimeout : WATCHDOG_WAKE_TIMEOUT; + } } #if defined(__i386__) || defined(__x86_64__) -IOReturn IOPMrootDomain::restartWithStackshot() +IOReturn +IOPMrootDomain::restartWithStackshot() { - takeStackshot(true, true, false); + takeStackshot(true, true, false); + + return kIOReturnSuccess; +} - return kIOReturnSuccess; +void +IOPMrootDomain::sleepWakeDebugTrig(bool wdogTrigger) +{ + takeStackshot(wdogTrigger, false, false); } -void IOPMrootDomain::sleepWakeDebugTrig(bool wdogTrigger) +void +IOPMrootDomain::tracePhase2String(uint32_t tracePhase, const char **phaseString, const char **description) { - takeStackshot(wdogTrigger, false, false); + switch (tracePhase) { + case kIOPMTracePointSleepStarted: + *phaseString = "kIOPMTracePointSleepStarted"; + *description = "starting sleep"; + break; + + case kIOPMTracePointSleepApplications: + *phaseString = "kIOPMTracePointSleepApplications"; + *description = "notifying applications"; + break; + + case kIOPMTracePointSleepPriorityClients: + *phaseString = "kIOPMTracePointSleepPriorityClients"; + *description = "notifying clients about upcoming system capability changes"; + break; + + case kIOPMTracePointSleepWillChangeInterests: + *phaseString = "kIOPMTracePointSleepWillChangeInterests"; + *description = "creating hibernation file or while calling rootDomain's clients about upcoming rootDomain's state changes"; + break; + + case kIOPMTracePointSleepPowerPlaneDrivers: + *phaseString = "kIOPMTracePointSleepPowerPlaneDrivers"; + *description = "calling power state change callbacks"; + break; + + case kIOPMTracePointSleepDidChangeInterests: + *phaseString = "kIOPMTracePointSleepDidChangeInterests"; + *description = "calling rootDomain's clients about rootDomain's state changes"; + break; + + case kIOPMTracePointSleepCapabilityClients: + *phaseString = "kIOPMTracePointSleepCapabilityClients"; + *description = "notifying clients about current system capabilities"; + break; + + case kIOPMTracePointSleepPlatformActions: + *phaseString = "kIOPMTracePointSleepPlatformActions"; + *description = "calling Quiesce/Sleep action callbacks"; + break; + + case kIOPMTracePointSleepCPUs: + { + *phaseString = "kIOPMTracePointSleepCPUs"; +#if defined(__i386__) || defined(__x86_64__) + /* + * We cannot use the getCPUNumber() method to get the cpu number, since + * that cpu number is unrelated to the cpu number we need (we need the cpu + * number as enumerated by the scheduler, NOT the CPU number enumerated + * by ACPIPlatform as the CPUs are enumerated in MADT order). + * Instead, pass the Mach processor pointer associated with the current + * shutdown target so its associated cpu_id can be used in + * processor_to_datastring. + */ + if (currentShutdownTarget != NULL && + currentShutdownTarget->getMachProcessor() != NULL) { + const char *sbuf = processor_to_datastring("halting all non-boot CPUs", + currentShutdownTarget->getMachProcessor()); + *description = sbuf; + } else { + *description = "halting all non-boot CPUs"; + } +#else + *description = "halting all non-boot CPUs"; +#endif + break; + } + case kIOPMTracePointSleepPlatformDriver: + *phaseString = "kIOPMTracePointSleepPlatformDriver"; + *description = "executing platform specific code"; + break; + + case kIOPMTracePointHibernate: + *phaseString = "kIOPMTracePointHibernate"; + *description = "writing the hibernation image"; + break; + + case kIOPMTracePointSystemSleep: + *phaseString = "kIOPMTracePointSystemSleep"; + *description = "in EFI/Bootrom after last point of entry to sleep"; + break; + + case kIOPMTracePointWakePlatformDriver: + *phaseString = "kIOPMTracePointWakePlatformDriver"; + *description = "executing platform specific code"; + break; + + + case kIOPMTracePointWakePlatformActions: + *phaseString = "kIOPMTracePointWakePlatformActions"; + *description = "calling Wake action callbacks"; + break; + + case kIOPMTracePointWakeCPUs: + *phaseString = "kIOPMTracePointWakeCPUs"; + *description = "starting non-boot CPUs"; + break; + + case kIOPMTracePointWakeWillPowerOnClients: + *phaseString = "kIOPMTracePointWakeWillPowerOnClients"; + *description = "sending kIOMessageSystemWillPowerOn message to kernel and userspace clients"; + break; + + case kIOPMTracePointWakeWillChangeInterests: + *phaseString = "kIOPMTracePointWakeWillChangeInterests"; + *description = "calling rootDomain's clients about upcoming rootDomain's state changes"; + break; + + case kIOPMTracePointWakeDidChangeInterests: + *phaseString = "kIOPMTracePointWakeDidChangeInterests"; + *description = "calling rootDomain's clients about completed rootDomain's state changes"; + break; + + case kIOPMTracePointWakePowerPlaneDrivers: + *phaseString = "kIOPMTracePointWakePowerPlaneDrivers"; + *description = "calling power state change callbacks"; + break; + + case kIOPMTracePointWakeCapabilityClients: + *phaseString = "kIOPMTracePointWakeCapabilityClients"; + *description = "informing clients about current system capabilities"; + break; + + case kIOPMTracePointWakeApplications: + *phaseString = "kIOPMTracePointWakeApplications"; + *description = "sending asynchronous kIOMessageSystemHasPoweredOn message to userspace clients"; + break; + + case kIOPMTracePointDarkWakeEntry: + *phaseString = "kIOPMTracePointDarkWakeEntry"; + *description = "entering darkwake on way to sleep"; + break; + + case kIOPMTracePointDarkWakeExit: + *phaseString = "kIOPMTracePointDarkWakeExit"; + *description = "entering fullwake from darkwake"; + break; + + default: + *phaseString = NULL; + *description = NULL; + } } -void IOPMrootDomain::tracePhase2String(uint32_t tracePhase, const char **phaseString, const char **description) +void +IOPMrootDomain::saveFailureData2File() { - switch (tracePhase) { + unsigned int len = 0; + char failureStr[512]; + errno_t error; + char *outbuf; + bool oswatchdog = false; - case kIOPMTracePointSleepStarted: - *phaseString = "kIOPMTracePointSleepStarted"; - *description = "starting sleep"; - break; + if (!PEReadNVRAMProperty(kIOSleepWakeFailureString, NULL, &len) && + !PEReadNVRAMProperty(kIOOSWatchdogFailureString, NULL, &len)) { + DLOG("No SleepWake failure or OSWatchdog failure string to read\n"); + return; + } + + if (len == 0) { + DLOG("Ignoring zero byte SleepWake failure string\n"); + goto exit; + } - case kIOPMTracePointSleepApplications: - *phaseString = "kIOPMTracePointSleepApplications"; - *description = "notifying applications"; - break; + if (len > sizeof(failureStr)) { + len = sizeof(failureStr); + } + failureStr[0] = 0; + if (PEReadNVRAMProperty(kIOSleepWakeFailureString, failureStr, &len) == false) { + if (PEReadNVRAMProperty(kIOOSWatchdogFailureString, failureStr, &len)) { + oswatchdog = true; + } + } + if (failureStr[0] != 0) { + error = sleepWakeDebugSaveFile(oswatchdog ? kOSWatchdogFailureStringFile : kSleepWakeFailureStringFile, + failureStr, len); + if (error) { + DLOG("Failed to save SleepWake failure string to file. error:%d\n", error); + } else { + DLOG("Saved SleepWake failure string to file.\n"); + } + if (!oswatchdog) { + swd_flags |= SWD_BOOT_BY_SW_WDOG; + } + } - case kIOPMTracePointSleepPriorityClients: - *phaseString = "kIOPMTracePointSleepPriorityClients"; - *description = "notifying clients about upcoming system capability changes"; - break; + if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) { + goto exit; + } - case kIOPMTracePointSleepWillChangeInterests: - *phaseString = "kIOPMTracePointSleepWillChangeInterests"; - *description = "creating hibernation file or while calling rootDomain's clients about upcoming rootDomain's state changes"; - break; - - case kIOPMTracePointSleepPowerPlaneDrivers: - *phaseString = "kIOPMTracePointSleepPowerPlaneDrivers"; - *description = "calling power state change callbacks"; - break; + if (swd_buffer) { + unsigned int len = 0; + errno_t error; + char nvram_var_name_buffer[20]; + unsigned int concat_len = 0; + swd_hdr *hdr = NULL; - case kIOPMTracePointSleepDidChangeInterests: - *phaseString = "kIOPMTracePointSleepDidChangeInterests"; - *description = "calling rootDomain's clients about rootDomain's state changes"; - break; - case kIOPMTracePointSleepCapabilityClients: - *phaseString = "kIOPMTracePointSleepCapabilityClients"; - *description = "notifying clients about current system capabilities"; - break; + hdr = (swd_hdr *)swd_buffer; + outbuf = (char *)hdr + hdr->spindump_offset; - case kIOPMTracePointSleepPlatformActions: - *phaseString = "kIOPMTracePointSleepPlatformActions"; - *description = "calling Quiesce/Sleep action callbacks"; - break; + for (int i = 0; i < 8; i++) { + snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i + 1); + if (!PEReadNVRAMProperty(nvram_var_name_buffer, NULL, &len)) { + LOG("No SleepWake blob to read beyond chunk %d\n", i); + break; + } + if (PEReadNVRAMProperty(nvram_var_name_buffer, outbuf + concat_len, &len) == FALSE) { + PERemoveNVRAMProperty(nvram_var_name_buffer); + LOG("Could not read the property :-(\n"); + break; + } + PERemoveNVRAMProperty(nvram_var_name_buffer); + concat_len += len; + } + LOG("Concatenated length for the SWD blob %d\n", concat_len); + + if (concat_len) { + error = sleepWakeDebugSaveFile(oswatchdog ? kOSWatchdogStacksFilename : kSleepWakeStacksFilename, + outbuf, concat_len); + if (error) { + LOG("Failed to save SleepWake zipped data to file. error:%d\n", error); + } else { + LOG("Saved SleepWake zipped data to file.\n"); + } + } + } else { + LOG("No buffer allocated to save failure stackshot\n"); + } - case kIOPMTracePointSleepCPUs: - *phaseString = "kIOPMTracePointSleepCPUs"; - *description = "halting all non-boot CPUs"; - break; - case kIOPMTracePointSleepPlatformDriver: - *phaseString = "kIOPMTracePointSleepPlatformDriver"; - *description = "executing platform specific code"; - break; - - case kIOPMTracePointHibernate: - *phaseString = "kIOPMTracePointHibernate"; - *description = "writing the hibernation image"; - break; - - case kIOPMTracePointSystemSleep: - *phaseString = "kIOPMTracePointSystemSleep"; - *description = "in EFI/Bootrom after last point of entry to sleep"; - break; - - case kIOPMTracePointWakePlatformDriver: - *phaseString = "kIOPMTracePointWakePlatformDriver"; - *description = "executing platform specific code"; - break; - - - case kIOPMTracePointWakePlatformActions: - *phaseString = "kIOPMTracePointWakePlatformActions"; - *description = "calling Wake action callbacks"; - break; - - case kIOPMTracePointWakeCPUs: - *phaseString = "kIOPMTracePointWakeCPUs"; - *description = "starting non-boot CPUs"; - break; - - case kIOPMTracePointWakeWillPowerOnClients: - *phaseString = "kIOPMTracePointWakeWillPowerOnClients"; - *description = "sending kIOMessageSystemWillPowerOn message to kernel and userspace clients"; - break; - - case kIOPMTracePointWakeWillChangeInterests: - *phaseString = "kIOPMTracePointWakeWillChangeInterests"; - *description = "calling rootDomain's clients about upcoming rootDomain's state changes"; - break; - - case kIOPMTracePointWakeDidChangeInterests: - *phaseString = "kIOPMTracePointWakeDidChangeInterests"; - *description = "calling rootDomain's clients about completed rootDomain's state changes"; - break; - - case kIOPMTracePointWakePowerPlaneDrivers: - *phaseString = "kIOPMTracePointWakePowerPlaneDrivers"; - *description = "calling power state change callbacks"; - break; - - case kIOPMTracePointWakeCapabilityClients: - *phaseString = "kIOPMTracePointWakeCapabilityClients"; - *description = "informing clients about current system capabilities"; - break; - - case kIOPMTracePointWakeApplications: - *phaseString = "kIOPMTracePointWakeApplications"; - *description = "sending asynchronous kIOMessageSystemHasPoweredOn message to userspace clients"; - break; - - case kIOPMTracePointDarkWakeEntry: - *phaseString = "kIOPMTracePointDarkWakeEntry"; - *description = "entering darkwake on way to sleep"; - break; - - case kIOPMTracePointDarkWakeExit: - *phaseString = "kIOPMTracePointDarkWakeExit"; - *description = "entering fullwake from darkwake"; - break; - - default: - *phaseString = NULL; - *description = NULL; - } - -} - -void IOPMrootDomain::saveFailureData2File( ) -{ - unsigned int len = 0; - char failureStr[512]; - errno_t error; - char *outbuf; - bool oswatchdog = false; - - if (!PEReadNVRAMProperty(kIOSleepWakeFailureString, NULL, &len) && - !PEReadNVRAMProperty(kIOOSWatchdogFailureString, NULL, &len) ) { - DLOG("No SleepWake failure or OSWatchdog failure string to read\n"); - return; - } - - if (len == 0) { - DLOG("Ignoring zero byte SleepWake failure string\n"); - goto exit; - } - - if (len > sizeof(failureStr)) { - len = sizeof(failureStr); - } - failureStr[0] = 0; - if (PEReadNVRAMProperty(kIOSleepWakeFailureString, failureStr, &len) == false) { - if (PEReadNVRAMProperty(kIOOSWatchdogFailureString, failureStr, &len)) { - oswatchdog = true; - } - } - if (failureStr[0] != 0) { - error = sleepWakeDebugSaveFile(oswatchdog ? kOSWatchdogFailureStringFile : kSleepWakeFailureStringFile, - failureStr, len); - if (error) { - DLOG("Failed to save SleepWake failure string to file. error:%d\n", error); - } - else { - DLOG("Saved SleepWake failure string to file.\n"); - } - if (!oswatchdog) { - swd_flags |= SWD_BOOT_BY_SW_WDOG; - } - } - - if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) - goto exit; - - if (swd_buffer) { - unsigned int len = 0; - errno_t error; - char nvram_var_name_buffer[20]; - unsigned int concat_len = 0; - swd_hdr *hdr = NULL; - - - hdr = (swd_hdr *)swd_buffer; - outbuf = (char *)hdr + hdr->spindump_offset; - - for (int i=0; i < 8; i++) { - snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, i+1); - if (!PEReadNVRAMProperty(nvram_var_name_buffer, NULL, &len)) { - LOG("No SleepWake blob to read beyond chunk %d\n", i); - break; - } - if (PEReadNVRAMProperty(nvram_var_name_buffer, outbuf+concat_len, &len) == FALSE) { - PERemoveNVRAMProperty(nvram_var_name_buffer); - LOG("Could not read the property :-(\n"); - break; - } - PERemoveNVRAMProperty(nvram_var_name_buffer); - concat_len += len; - } - LOG("Concatenated length for the SWD blob %d\n", concat_len); - - if (concat_len) { - error = sleepWakeDebugSaveFile(oswatchdog ? kOSWatchdogStacksFilename : kSleepWakeStacksFilename, - outbuf, concat_len); - if (error) { - LOG("Failed to save SleepWake zipped data to file. error:%d\n", error); - } else { - LOG("Saved SleepWake zipped data to file.\n"); - } - } - - } - else { - LOG("No buffer allocated to save failure stackshot\n"); - } - - - gRootDomain->swd_lock = 0; + gRootDomain->swd_lock = 0; exit: - PERemoveNVRAMProperty(oswatchdog ? kIOOSWatchdogFailureString : kIOSleepWakeFailureString); - return; -} - - -void IOPMrootDomain::getFailureData(thread_t *thread, char *failureStr, size_t strLen) -{ - IORegistryIterator * iter; - IORegistryEntry * entry; - IOService * node; - bool nodeFound = false; - - const void * callMethod = NULL; - const char * objectName = NULL; - uint32_t timeout = getWatchdogTimeout(); - const char * phaseString = NULL; - const char * phaseDescription = NULL; - - IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, notifierObject); - uint32_t tracePhase = pmTracer->getTracePhase(); - - *thread = NULL; - if ((tracePhase < kIOPMTracePointSystemSleep) || (tracePhase == kIOPMTracePointDarkWakeEntry)) { - snprintf(failureStr, strLen, "%sSleep transition timed out after %d seconds", failureStr, timeout); - } - else { - snprintf(failureStr, strLen, "%sWake transition timed out after %d seconds", failureStr,timeout); - } - tracePhase2String(tracePhase, &phaseString, &phaseDescription); - - if (notifierThread) { - if (notifier && (notifier->identifier)) { - objectName = notifier->identifier->getCStringNoCopy(); - } - *thread = notifierThread; - } - else { - - iter = IORegistryIterator::iterateOver( - getPMRootDomain(), gIOPowerPlane, kIORegistryIterateRecursively); - - if (iter) - { - while ((entry = iter->getNextObject())) - { - node = OSDynamicCast(IOService, entry); - if (!node) - continue; - if (OSDynamicCast(IOPowerConnection, node)) { - continue; - } - - if(node->getBlockingDriverCall(thread, &callMethod)) { - nodeFound = true; - break; - } - } - iter->release(); - } - if (nodeFound) { - OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)callMethod); - if (kext) { - objectName = kext->getIdentifierCString(); - } - } - } - if (phaseDescription) { - snprintf(failureStr, strLen, "%s while %s.", failureStr, phaseDescription); - } - if (objectName) { - snprintf(failureStr, strLen, "%s Suspected bundle: %s.", failureStr, objectName); - } - if (*thread) { - snprintf(failureStr, strLen, "%s Thread 0x%llx.", failureStr, thread_tid(*thread)); - } - - DLOG("%s\n", failureStr); -} - -struct swd_stackshot_compressed_data -{ - z_output_func zoutput; - size_t zipped; - uint64_t totalbytes; - uint64_t lastpercent; - IOReturn error; - unsigned outremain; - unsigned outlen; - unsigned writes; - Bytef * outbuf; + PERemoveNVRAMProperty(oswatchdog ? kIOOSWatchdogFailureString : kIOSleepWakeFailureString); + return; +} + + +void +IOPMrootDomain::getFailureData(thread_t *thread, char *failureStr, size_t strLen) +{ + IORegistryIterator * iter; + IORegistryEntry * entry; + IOService * node; + bool nodeFound = false; + + const void * callMethod = NULL; + const char * objectName = NULL; + uint32_t timeout = getWatchdogTimeout(); + const char * phaseString = NULL; + const char * phaseDescription = NULL; + + IOPMServiceInterestNotifier *notifier = OSDynamicCast(IOPMServiceInterestNotifier, notifierObject); + uint32_t tracePhase = pmTracer->getTracePhase(); + + *thread = NULL; + if ((tracePhase < kIOPMTracePointSystemSleep) || (tracePhase == kIOPMTracePointDarkWakeEntry)) { + snprintf(failureStr, strLen, "%sSleep transition timed out after %d seconds", failureStr, timeout); + } else { + snprintf(failureStr, strLen, "%sWake transition timed out after %d seconds", failureStr, timeout); + } + tracePhase2String(tracePhase, &phaseString, &phaseDescription); + + if (notifierThread) { + if (notifier && (notifier->identifier)) { + objectName = notifier->identifier->getCStringNoCopy(); + } + *thread = notifierThread; + } else { + iter = IORegistryIterator::iterateOver( + getPMRootDomain(), gIOPowerPlane, kIORegistryIterateRecursively); + + if (iter) { + while ((entry = iter->getNextObject())) { + node = OSDynamicCast(IOService, entry); + if (!node) { + continue; + } + if (OSDynamicCast(IOPowerConnection, node)) { + continue; + } + + if (node->getBlockingDriverCall(thread, &callMethod)) { + nodeFound = true; + break; + } + } + iter->release(); + } + if (nodeFound) { + OSKext *kext = OSKext::lookupKextWithAddress((vm_address_t)callMethod); + if (kext) { + objectName = kext->getIdentifierCString(); + } + } + } + if (phaseDescription) { + snprintf(failureStr, strLen, "%s while %s.", failureStr, phaseDescription); + } + if (objectName) { + snprintf(failureStr, strLen, "%s Suspected bundle: %s.", failureStr, objectName); + } + if (*thread) { + snprintf(failureStr, strLen, "%s Thread 0x%llx.", failureStr, thread_tid(*thread)); + } + + DLOG("%s\n", failureStr); +} + +struct swd_stackshot_compressed_data { + z_output_func zoutput; + size_t zipped; + uint64_t totalbytes; + uint64_t lastpercent; + IOReturn error; + unsigned outremain; + unsigned outlen; + unsigned writes; + Bytef * outbuf; }; struct swd_stackshot_compressed_data swd_zip_var = { }; -static void *swd_zs_alloc(void *__unused ref, u_int items, u_int size) +static void * +swd_zs_alloc(void *__unused ref, u_int items, u_int size) { void *result; LOG("Alloc in zipping %d items of size %d\n", items, size); @@ -10068,592 +9990,615 @@ static void *swd_zs_alloc(void *__unused ref, u_int items, u_int size) result = (void *)(swd_zs_zmem + swd_zs_zoffset); swd_zs_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc LOG("Offset %zu\n", swd_zs_zoffset); - return (result); + return result; } -static int swd_zinput(z_streamp strm, Bytef *buf, unsigned size) +static int +swd_zinput(z_streamp strm, Bytef *buf, unsigned size) { unsigned len; len = strm->avail_in; - if (len > size) + if (len > size) { len = size; - if (len == 0) + } + if (len == 0) { return 0; + } - if (strm->next_in != (Bytef *) strm) + if (strm->next_in != (Bytef *) strm) { memcpy(buf, strm->next_in, len); - else + } else { bzero(buf, len); + } - strm->adler = z_crc32(strm->adler, buf, len); + strm->adler = z_crc32(strm->adler, buf, len); - strm->avail_in -= len; - strm->next_in += len; - strm->total_in += len; + strm->avail_in -= len; + strm->next_in += len; + strm->total_in += len; - return (int)len; + return (int)len; } -static int swd_zoutput(z_streamp strm, Bytef *buf, unsigned len) +static int +swd_zoutput(z_streamp strm, Bytef *buf, unsigned len) { unsigned int i = 0; // if outlen > max size don't add to the buffer if (strm && buf) { if (swd_zip_var.outlen + len > SWD_COMPRESSED_BUFSIZE) { LOG("No space to GZIP... not writing to NVRAM\n"); - return (len); + return len; } } for (i = 0; i < len; i++) { - *(swd_zip_var.outbuf + swd_zip_var.outlen + i) = *(buf +i); + *(swd_zip_var.outbuf + swd_zip_var.outlen + i) = *(buf + i); } swd_zip_var.outlen += len; - return (len); -} -static void swd_zs_free(void * __unused ref, void * __unused ptr) {} - -static int swd_compress(char *inPtr, char *outPtr, size_t numBytes) -{ - int wbits = 12; - int memlevel = 3; - - if (!swd_zs.zalloc) { - swd_zs.zalloc = swd_zs_alloc; - swd_zs.zfree = swd_zs_free; - if (deflateInit2(&swd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits + 16, memlevel, Z_DEFAULT_STRATEGY)) { - // allocation failed - bzero(&swd_zs, sizeof(swd_zs)); - // swd_zs_zoffset = 0; - } else { - LOG("PMRD inited the zlib allocation routines\n"); - } - } - - - - swd_zip_var.zipped = 0; - swd_zip_var.totalbytes = 0; // should this be the max that we have? - swd_zip_var.lastpercent = 0; - swd_zip_var.error = kIOReturnSuccess; - swd_zip_var.outremain = 0; - swd_zip_var.outlen = 0; - swd_zip_var.writes = 0; - swd_zip_var.outbuf = (Bytef *)outPtr; - - swd_zip_var.totalbytes = numBytes; - - swd_zs.avail_in = 0; - swd_zs.next_in = NULL; - swd_zs.avail_out = 0; - swd_zs.next_out = NULL; - - deflateResetWithIO(&swd_zs, swd_zinput, swd_zoutput); - - z_stream *zs; - int zr; - zs = &swd_zs; - - zr = Z_OK; - - while (swd_zip_var.error >= 0) { - if (!zs->avail_in) { - zs->next_in = (unsigned char *)inPtr ? (Bytef *)inPtr : (Bytef *)zs; /* zero marker? */ - zs->avail_in = numBytes; - } - if (!zs->avail_out) { - zs->next_out = (Bytef *)zs; - zs->avail_out = UINT32_MAX; - } - zr = deflate(zs, Z_NO_FLUSH); - if (Z_STREAM_END == zr) - break; - if (zr != Z_OK) { - LOG("ZERR %d\n", zr); - swd_zip_var.error = zr; - } else { - if (zs->total_in == numBytes) { - break; - } - } - } - zr = Z_OK; - //now flush the stream - while (swd_zip_var.error >= 0) { - if (!zs->avail_out) { - zs->next_out = (Bytef *)zs; - zs->avail_out = UINT32_MAX; - } - zr = deflate(zs, Z_FINISH); - if (Z_STREAM_END == zr) { - break; - } - if (zr != Z_OK) { - LOG("ZERR %d\n", zr); - swd_zip_var.error = zr; - } else { - if (zs->total_in == numBytes) { - LOG("Total output size %d\n", swd_zip_var.outlen); - break; - } - } - } - - return swd_zip_var.outlen; -} - -void IOPMrootDomain::takeStackshot(bool wdogTrigger, bool isOSXWatchdog, bool isSpinDump) -{ - swd_hdr * hdr = NULL; - int wdog_panic = -1; - int cnt = 0; - pid_t pid = 0; - kern_return_t kr = KERN_SUCCESS; - uint32_t flags; - - char * dstAddr; - uint32_t size; - uint32_t bytesRemaining; - unsigned bytesWritten = 0; - unsigned totalBytes = 0; - OSString * UUIDstring = NULL; - - char failureStr[512]; - thread_t thread = NULL; - const char * uuid; - - - uint32_t bufSize; - uint32_t initialStackSize; - - - - failureStr[0] = 0; - if (isSpinDump) { - if (_systemTransitionType != kSystemTransitionSleep && - _systemTransitionType != kSystemTransitionWake) - return; - - if (gSpinDumpBufferFull) - return; - if (swd_spindump_buffer == NULL) { - sleepWakeDebugSpinDumpMemAlloc(); - if (swd_spindump_buffer == NULL) return; - } - - bufSize = SWD_SPINDUMP_SIZE; - initialStackSize = SWD_INITIAL_SPINDUMP_SIZE; - hdr = (swd_hdr *)swd_spindump_buffer; - - } else { - if ( (kIOSleepWakeWdogOff & gIOKitDebug) || systemBooting || systemShutdown || gWillShutdown) - return; - - if (isOSXWatchdog) { - snprintf(failureStr, sizeof(failureStr), "Stackshot Reason: "); - snprintf(failureStr, sizeof(failureStr), "%smacOS watchdog triggered failure\n", failureStr); - } - else if (wdogTrigger) { - if ((UUIDstring = OSDynamicCast(OSString, getProperty(kIOPMSleepWakeUUIDKey))) != NULL ) { - uuid = UUIDstring->getCStringNoCopy(); - snprintf(failureStr, sizeof(failureStr), "UUID: %s\n", uuid); - } - - snprintf(failureStr, sizeof(failureStr), "%sStackshot Reason: ", failureStr); - getFailureData(&thread, failureStr, sizeof(failureStr)); - if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { - goto skip_stackshot; - } - - } - else { - snprintf(failureStr, sizeof(failureStr), "%sStackshot triggered for debugging stackshot collection.\n", failureStr); - } - // Take only one stackshot in this case. - cnt = SWD_MAX_STACKSHOTS-1; - - if (swd_buffer == NULL) { - sleepWakeDebugMemAlloc(); - if (swd_buffer == NULL) return; - } - hdr = (swd_hdr *)swd_buffer; - - bufSize = hdr->alloc_size;; - initialStackSize = bufSize; - - } - - - if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) - return; - - - dstAddr = (char*)hdr + hdr->spindump_offset; - bytesRemaining = bufSize - hdr->spindump_offset; - - DLOG("Taking snapshot. bytesRemaining: %d\n", bytesRemaining); - - flags = STACKSHOT_KCDATA_FORMAT|STACKSHOT_NO_IO_STATS|STACKSHOT_SAVE_KEXT_LOADINFO|STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY|STACKSHOT_THREAD_WAITINFO; - while (kr == KERN_SUCCESS) { - - if (cnt == 0) { - /* - * Take stackshot of all process on first sample. Size is restricted - * to SWD_INITIAL_STACK_SIZE - */ - pid = -1; - size = (bytesRemaining > initialStackSize) ? initialStackSize : bytesRemaining; - } - else { - /* Take sample of kernel threads only */ - pid = 0; - size = bytesRemaining; - } - - kr = stack_snapshot_from_kernel(pid, dstAddr, size, flags, 0, &bytesWritten); - DLOG("stack_snapshot_from_kernel returned 0x%x. pid: %d bufsize:0x%x flags:0x%x bytesWritten: %d\n", - kr, pid, size, flags, bytesWritten); - if (kr == KERN_INSUFFICIENT_BUFFER_SIZE) { - if (pid == -1) { - // Insufficient buffer when trying to take stackshot of user & kernel space threads. - // Continue to take stackshot of just kernel threads - ++cnt; - kr = KERN_SUCCESS; - continue; - } - else if (totalBytes == 0) { - MSG("Failed to get stackshot(0x%x) bufsize:0x%x flags:0x%x\n", kr, size, flags); - } - } - - dstAddr += bytesWritten; - totalBytes += bytesWritten; - bytesRemaining -= bytesWritten; - - if (++cnt == SWD_MAX_STACKSHOTS) { - break; - } - IOSleep(10); // 10 ms - } - - hdr->spindump_size = (bufSize - bytesRemaining - hdr->spindump_offset); - - memset(hdr->reason, 0x20, sizeof(hdr->reason)); - if (isSpinDump) { - snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: Power State Change Delay\n\n"); - gRootDomain->swd_lock = 0; - gSpinDumpBufferFull = true; - return; - } - - // Compress stackshot and save to NVRAM - { - char *outbuf = (char *)swd_compressed_buffer; - int outlen = 0; - int num_chunks = 0; - int max_chunks = 0; - int leftover = 0; - char nvram_var_name_buffer[20]; - - outlen = swd_compress((char*)hdr + hdr->spindump_offset, outbuf, bytesWritten); - - if (outlen) { - max_chunks = outlen / (2096 - 200); - leftover = outlen % (2096 - 200); - - if (max_chunks < 8) { - for (num_chunks = 0; num_chunks < max_chunks; num_chunks++) { - snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks+1); - if (PEWriteNVRAMProperty(nvram_var_name_buffer, (outbuf + (num_chunks * (2096-200))), (2096 - 200)) == FALSE) { - LOG("Failed to update NVRAM %d\n", num_chunks); - break; - } - } - if (leftover) { - snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks+1); - if (PEWriteNVRAMProperty(nvram_var_name_buffer, (outbuf + (num_chunks * (2096-200))), leftover) == FALSE) { - LOG("Failed to update NVRAM with leftovers\n"); - } - } - } - else { - LOG("Compressed failure stackshot is too large. size=%d bytes\n", outlen); - } - } - } - - if (failureStr[0]) { - - if (!isOSXWatchdog) { - // append sleep-wake failure code - snprintf(failureStr, sizeof(failureStr), "%s\nFailure code:: 0x%08x %08x\n", - failureStr, pmTracer->getTraceData(), pmTracer->getTracePhase()); - if (PEWriteNVRAMProperty(kIOSleepWakeFailureString, failureStr, strlen(failureStr)) == false) { - DLOG("Failed to write SleepWake failure string\n"); - } - } - else { - if (PEWriteNVRAMProperty(kIOOSWatchdogFailureString, failureStr, strlen(failureStr)) == false) { - DLOG("Failed to write OSWatchdog failure string\n"); - } - } - } - gRootDomain->swd_lock = 0; + return len; +} +static void +swd_zs_free(void * __unused ref, void * __unused ptr) +{ +} + +static int +swd_compress(char *inPtr, char *outPtr, size_t numBytes) +{ + int wbits = 12; + int memlevel = 3; + + if (!swd_zs.zalloc) { + swd_zs.zalloc = swd_zs_alloc; + swd_zs.zfree = swd_zs_free; + if (deflateInit2(&swd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits + 16, memlevel, Z_DEFAULT_STRATEGY)) { + // allocation failed + bzero(&swd_zs, sizeof(swd_zs)); + // swd_zs_zoffset = 0; + } else { + LOG("PMRD inited the zlib allocation routines\n"); + } + } + + + + swd_zip_var.zipped = 0; + swd_zip_var.totalbytes = 0; // should this be the max that we have? + swd_zip_var.lastpercent = 0; + swd_zip_var.error = kIOReturnSuccess; + swd_zip_var.outremain = 0; + swd_zip_var.outlen = 0; + swd_zip_var.writes = 0; + swd_zip_var.outbuf = (Bytef *)outPtr; + + swd_zip_var.totalbytes = numBytes; + + swd_zs.avail_in = 0; + swd_zs.next_in = NULL; + swd_zs.avail_out = 0; + swd_zs.next_out = NULL; + + deflateResetWithIO(&swd_zs, swd_zinput, swd_zoutput); + + z_stream *zs; + int zr; + zs = &swd_zs; + + zr = Z_OK; + + while (swd_zip_var.error >= 0) { + if (!zs->avail_in) { + zs->next_in = (unsigned char *)inPtr ? (Bytef *)inPtr : (Bytef *)zs; /* zero marker? */ + zs->avail_in = numBytes; + } + if (!zs->avail_out) { + zs->next_out = (Bytef *)zs; + zs->avail_out = UINT32_MAX; + } + zr = deflate(zs, Z_NO_FLUSH); + if (Z_STREAM_END == zr) { + break; + } + if (zr != Z_OK) { + LOG("ZERR %d\n", zr); + swd_zip_var.error = zr; + } else { + if (zs->total_in == numBytes) { + break; + } + } + } + zr = Z_OK; + //now flush the stream + while (swd_zip_var.error >= 0) { + if (!zs->avail_out) { + zs->next_out = (Bytef *)zs; + zs->avail_out = UINT32_MAX; + } + zr = deflate(zs, Z_FINISH); + if (Z_STREAM_END == zr) { + break; + } + if (zr != Z_OK) { + LOG("ZERR %d\n", zr); + swd_zip_var.error = zr; + } else { + if (zs->total_in == numBytes) { + LOG("Total output size %d\n", swd_zip_var.outlen); + break; + } + } + } + + return swd_zip_var.outlen; +} + +void +IOPMrootDomain::takeStackshot(bool wdogTrigger, bool isOSXWatchdog, bool isSpinDump) +{ + swd_hdr * hdr = NULL; + int wdog_panic = -1; + int cnt = 0; + pid_t pid = 0; + kern_return_t kr = KERN_SUCCESS; + uint32_t flags; + + char * dstAddr; + uint32_t size; + uint32_t bytesRemaining; + unsigned bytesWritten = 0; + unsigned totalBytes = 0; + OSString * UUIDstring = NULL; + + char failureStr[512]; + thread_t thread = NULL; + const char * uuid; + + + uint32_t bufSize; + uint32_t initialStackSize; + + + + failureStr[0] = 0; + if (isSpinDump) { + if (_systemTransitionType != kSystemTransitionSleep && + _systemTransitionType != kSystemTransitionWake) { + return; + } + + if (gSpinDumpBufferFull) { + return; + } + if (swd_spindump_buffer == NULL) { + sleepWakeDebugSpinDumpMemAlloc(); + if (swd_spindump_buffer == NULL) { + return; + } + } + + bufSize = SWD_SPINDUMP_SIZE; + initialStackSize = SWD_INITIAL_SPINDUMP_SIZE; + hdr = (swd_hdr *)swd_spindump_buffer; + } else { + if ((kIOSleepWakeWdogOff & gIOKitDebug) || systemBooting || systemShutdown || gWillShutdown) { + return; + } + + if (isOSXWatchdog) { + snprintf(failureStr, sizeof(failureStr), "Stackshot Reason: "); + snprintf(failureStr, sizeof(failureStr), "%smacOS watchdog triggered failure\n", failureStr); + } else if (wdogTrigger) { + if ((UUIDstring = OSDynamicCast(OSString, getProperty(kIOPMSleepWakeUUIDKey))) != NULL) { + uuid = UUIDstring->getCStringNoCopy(); + snprintf(failureStr, sizeof(failureStr), "UUID: %s\n", uuid); + } + + snprintf(failureStr, sizeof(failureStr), "%sStackshot Reason: ", failureStr); + getFailureData(&thread, failureStr, sizeof(failureStr)); + if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { + goto skip_stackshot; + } + } else { + snprintf(failureStr, sizeof(failureStr), "%sStackshot triggered for debugging stackshot collection.\n", failureStr); + } + // Take only one stackshot in this case. + cnt = SWD_MAX_STACKSHOTS - 1; + + if (swd_buffer == NULL) { + sleepWakeDebugMemAlloc(); + if (swd_buffer == NULL) { + return; + } + } + hdr = (swd_hdr *)swd_buffer; + + bufSize = hdr->alloc_size;; + initialStackSize = bufSize; + } + + + if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) { + return; + } + + + dstAddr = (char*)hdr + hdr->spindump_offset; + bytesRemaining = bufSize - hdr->spindump_offset; + + DLOG("Taking snapshot. bytesRemaining: %d\n", bytesRemaining); + + flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | STACKSHOT_THREAD_WAITINFO; + while (kr == KERN_SUCCESS) { + if (cnt == 0) { + /* + * Take stackshot of all process on first sample. Size is restricted + * to SWD_INITIAL_STACK_SIZE + */ + pid = -1; + size = (bytesRemaining > initialStackSize) ? initialStackSize : bytesRemaining; + } else { + /* Take sample of kernel threads only */ + pid = 0; + size = bytesRemaining; + } + + kr = stack_snapshot_from_kernel(pid, dstAddr, size, flags, 0, &bytesWritten); + DLOG("stack_snapshot_from_kernel returned 0x%x. pid: %d bufsize:0x%x flags:0x%x bytesWritten: %d\n", + kr, pid, size, flags, bytesWritten); + if (kr == KERN_INSUFFICIENT_BUFFER_SIZE) { + if (pid == -1) { + // Insufficient buffer when trying to take stackshot of user & kernel space threads. + // Continue to take stackshot of just kernel threads + ++cnt; + kr = KERN_SUCCESS; + continue; + } else if (totalBytes == 0) { + MSG("Failed to get stackshot(0x%x) bufsize:0x%x flags:0x%x\n", kr, size, flags); + } + } + + dstAddr += bytesWritten; + totalBytes += bytesWritten; + bytesRemaining -= bytesWritten; + + if (++cnt == SWD_MAX_STACKSHOTS) { + break; + } + IOSleep(10); // 10 ms + } + + hdr->spindump_size = (bufSize - bytesRemaining - hdr->spindump_offset); + + memset(hdr->reason, 0x20, sizeof(hdr->reason)); + if (isSpinDump) { + snprintf(hdr->reason, sizeof(hdr->reason), "\nStackshot reason: Power State Change Delay\n\n"); + gRootDomain->swd_lock = 0; + gSpinDumpBufferFull = true; + return; + } + + // Compress stackshot and save to NVRAM + { + char *outbuf = (char *)swd_compressed_buffer; + int outlen = 0; + int num_chunks = 0; + int max_chunks = 0; + int leftover = 0; + char nvram_var_name_buffer[20]; + + outlen = swd_compress((char*)hdr + hdr->spindump_offset, outbuf, bytesWritten); + + if (outlen) { + max_chunks = outlen / (2096 - 200); + leftover = outlen % (2096 - 200); + + if (max_chunks < 8) { + for (num_chunks = 0; num_chunks < max_chunks; num_chunks++) { + snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks + 1); + if (PEWriteNVRAMPropertyWithCopy(nvram_var_name_buffer, (outbuf + (num_chunks * (2096 - 200))), (2096 - 200)) == FALSE) { + LOG("Failed to update NVRAM %d\n", num_chunks); + break; + } + } + if (leftover) { + snprintf(nvram_var_name_buffer, 20, "%s%02d", SWD_STACKSHOT_VAR_PREFIX, num_chunks + 1); + if (PEWriteNVRAMPropertyWithCopy(nvram_var_name_buffer, (outbuf + (num_chunks * (2096 - 200))), leftover) == FALSE) { + LOG("Failed to update NVRAM with leftovers\n"); + } + } + } else { + LOG("Compressed failure stackshot is too large. size=%d bytes\n", outlen); + } + } + } + + if (failureStr[0]) { + if (!isOSXWatchdog) { + // append sleep-wake failure code + snprintf(failureStr, sizeof(failureStr), "%s\nFailure code:: 0x%08x %08x\n", + failureStr, pmTracer->getTraceData(), pmTracer->getTracePhase()); + if (PEWriteNVRAMProperty(kIOSleepWakeFailureString, failureStr, strlen(failureStr)) == false) { + DLOG("Failed to write SleepWake failure string\n"); + } + } else { + if (PEWriteNVRAMProperty(kIOOSWatchdogFailureString, failureStr, strlen(failureStr)) == false) { + DLOG("Failed to write OSWatchdog failure string\n"); + } + } + } + gRootDomain->swd_lock = 0; skip_stackshot: - if (wdogTrigger) { - PE_parse_boot_argn("swd_panic", &wdog_panic, sizeof(wdog_panic)); - - if ((wdog_panic == 1) || (PEGetCoprocessorVersion() >= kCoprocessorVersion2)) { - if (thread) { - panic_with_thread_context(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, thread, "%s", failureStr); - } - else { - panic_with_options(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, "%s", failureStr); - } - return; - } - else if (swd_flags & SWD_BOOT_BY_SW_WDOG) { - // If current boot is due to this watch dog trigger restart in previous boot, - // then don't trigger again until at least 1 successful sleep & wake. - if (!(sleepCnt && (displayWakeCnt || darkWakeCnt))) { - LOG("Shutting down due to repeated Sleep/Wake failures\n"); - if (!tasksSuspended) { - tasksSuspended = TRUE; - tasks_system_suspend(true); - } - PEHaltRestart(kPEHaltCPU); - return; - } - } - } - - - if (wdogTrigger) { - LOG("Restarting to collect Sleep wake debug logs\n"); - if (!tasksSuspended) { - tasksSuspended = TRUE; - tasks_system_suspend(true); - } - - PEHaltRestart(kPERestartCPU); - } - else { - saveFailureData2File(); - } -} - -void IOPMrootDomain::sleepWakeDebugMemAlloc( ) -{ - vm_size_t size = SWD_STACKSHOT_SIZE + SWD_COMPRESSED_BUFSIZE + SWD_ZLIB_BUFSIZE; - - swd_hdr *hdr = NULL; - void *bufPtr = NULL; - - IOBufferMemoryDescriptor *memDesc = NULL; - - - if ( kIOSleepWakeWdogOff & gIOKitDebug ) - return; - - if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) - return; - - if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) - return; - - memDesc = IOBufferMemoryDescriptor::inTaskWithOptions( - kernel_task, kIODirectionIn|kIOMemoryMapperNone, - size); - if (memDesc == NULL) - { - DLOG("Failed to allocate Memory descriptor for sleepWake debug\n"); - goto exit; - } - - bufPtr = memDesc->getBytesNoCopy(); - - // Carve out memory for zlib routines - swd_zs_zmem = (vm_offset_t)bufPtr; - bufPtr = (char *)bufPtr + SWD_ZLIB_BUFSIZE; - - // Carve out memory for compressed stackshots - swd_compressed_buffer = bufPtr; - bufPtr = (char *)bufPtr + SWD_COMPRESSED_BUFSIZE; - - // Remaining is used for holding stackshot - hdr = (swd_hdr *)bufPtr; - memset(hdr, 0, sizeof(swd_hdr)); - - hdr->signature = SWD_HDR_SIGNATURE; - hdr->alloc_size = SWD_STACKSHOT_SIZE; - - hdr->spindump_offset = sizeof(swd_hdr); - swd_buffer = (void *)hdr; - swd_memDesc = memDesc; - DLOG("SleepWake debug buffer size:0x%x spindump offset:0x%x\n", hdr->alloc_size, hdr->spindump_offset); + if (wdogTrigger) { + PE_parse_boot_argn("swd_panic", &wdog_panic, sizeof(wdog_panic)); + + if ((wdog_panic == 1) || (PEGetCoprocessorVersion() >= kCoprocessorVersion2)) { + if (thread) { + panic_with_thread_context(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, thread, "%s", failureStr); + } else { + panic_with_options(0, NULL, DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT, "%s", failureStr); + } + return; + } else if (swd_flags & SWD_BOOT_BY_SW_WDOG) { + // If current boot is due to this watch dog trigger restart in previous boot, + // then don't trigger again until at least 1 successful sleep & wake. + if (!(sleepCnt && (displayWakeCnt || darkWakeCnt))) { + LOG("Shutting down due to repeated Sleep/Wake failures\n"); + if (!tasksSuspended) { + tasksSuspended = TRUE; + tasks_system_suspend(true); + } + PEHaltRestart(kPEHaltCPU); + return; + } + } + } + + + if (wdogTrigger) { + LOG("Restarting to collect Sleep wake debug logs\n"); + if (!tasksSuspended) { + tasksSuspended = TRUE; + tasks_system_suspend(true); + } + + PEHaltRestart(kPERestartCPU); + } else { + saveFailureData2File(); + } +} + +void +IOPMrootDomain::sleepWakeDebugMemAlloc() +{ + vm_size_t size = SWD_STACKSHOT_SIZE + SWD_COMPRESSED_BUFSIZE + SWD_ZLIB_BUFSIZE; + + swd_hdr *hdr = NULL; + void *bufPtr = NULL; + + IOBufferMemoryDescriptor *memDesc = NULL; + + + if (kIOSleepWakeWdogOff & gIOKitDebug) { + return; + } + + if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { + return; + } + + if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) { + return; + } + + memDesc = IOBufferMemoryDescriptor::inTaskWithOptions( + kernel_task, kIODirectionIn | kIOMemoryMapperNone, + size); + if (memDesc == NULL) { + DLOG("Failed to allocate Memory descriptor for sleepWake debug\n"); + goto exit; + } + + bufPtr = memDesc->getBytesNoCopy(); + + // Carve out memory for zlib routines + swd_zs_zmem = (vm_offset_t)bufPtr; + bufPtr = (char *)bufPtr + SWD_ZLIB_BUFSIZE; + + // Carve out memory for compressed stackshots + swd_compressed_buffer = bufPtr; + bufPtr = (char *)bufPtr + SWD_COMPRESSED_BUFSIZE; + + // Remaining is used for holding stackshot + hdr = (swd_hdr *)bufPtr; + memset(hdr, 0, sizeof(swd_hdr)); + + hdr->signature = SWD_HDR_SIGNATURE; + hdr->alloc_size = SWD_STACKSHOT_SIZE; + + hdr->spindump_offset = sizeof(swd_hdr); + swd_buffer = (void *)hdr; + swd_memDesc = memDesc; + DLOG("SleepWake debug buffer size:0x%x spindump offset:0x%x\n", hdr->alloc_size, hdr->spindump_offset); exit: - gRootDomain->swd_lock = 0; + gRootDomain->swd_lock = 0; } -void IOPMrootDomain::sleepWakeDebugSpinDumpMemAlloc( ) +void +IOPMrootDomain::sleepWakeDebugSpinDumpMemAlloc() { - vm_size_t size = SWD_SPINDUMP_SIZE; + vm_size_t size = SWD_SPINDUMP_SIZE; - swd_hdr *hdr = NULL; + swd_hdr *hdr = NULL; - IOBufferMemoryDescriptor *memDesc = NULL; + IOBufferMemoryDescriptor *memDesc = NULL; - if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) - return; + if (!OSCompareAndSwap(0, 1, &gRootDomain->swd_lock)) { + return; + } - memDesc = IOBufferMemoryDescriptor::inTaskWithOptions( - kernel_task, kIODirectionIn|kIOMemoryMapperNone, - SWD_SPINDUMP_SIZE); + memDesc = IOBufferMemoryDescriptor::inTaskWithOptions( + kernel_task, kIODirectionIn | kIOMemoryMapperNone, + SWD_SPINDUMP_SIZE); - if (memDesc == NULL) - { - DLOG("Failed to allocate Memory descriptor for sleepWake debug spindump\n"); - goto exit; - } + if (memDesc == NULL) { + DLOG("Failed to allocate Memory descriptor for sleepWake debug spindump\n"); + goto exit; + } - hdr = (swd_hdr *)memDesc->getBytesNoCopy(); - memset(hdr, 0, sizeof(swd_hdr)); + hdr = (swd_hdr *)memDesc->getBytesNoCopy(); + memset(hdr, 0, sizeof(swd_hdr)); - hdr->signature = SWD_HDR_SIGNATURE; - hdr->alloc_size = size; + hdr->signature = SWD_HDR_SIGNATURE; + hdr->alloc_size = size; - hdr->spindump_offset = sizeof(swd_hdr); - swd_spindump_buffer = (void *)hdr; + hdr->spindump_offset = sizeof(swd_hdr); + swd_spindump_buffer = (void *)hdr; exit: - gRootDomain->swd_lock = 0; + gRootDomain->swd_lock = 0; } -void IOPMrootDomain::sleepWakeDebugEnableWdog() +void +IOPMrootDomain::sleepWakeDebugEnableWdog() { } -bool IOPMrootDomain::sleepWakeDebugIsWdogEnabled() +bool +IOPMrootDomain::sleepWakeDebugIsWdogEnabled() { - return (!systemBooting && !systemShutdown && !gWillShutdown); + return !systemBooting && !systemShutdown && !gWillShutdown; } -void IOPMrootDomain::sleepWakeDebugSaveSpinDumpFile() +void +IOPMrootDomain::sleepWakeDebugSaveSpinDumpFile() { - swd_hdr *hdr = NULL; - errno_t error = EIO; + swd_hdr *hdr = NULL; + errno_t error = EIO; - if (swd_spindump_buffer && gSpinDumpBufferFull) { - hdr = (swd_hdr *)swd_spindump_buffer; + if (swd_spindump_buffer && gSpinDumpBufferFull) { + hdr = (swd_hdr *)swd_spindump_buffer; - error = sleepWakeDebugSaveFile("/var/tmp/SleepWakeDelayStacks.dump", - (char*)hdr+hdr->spindump_offset, hdr->spindump_size); + error = sleepWakeDebugSaveFile("/var/tmp/SleepWakeDelayStacks.dump", + (char*)hdr + hdr->spindump_offset, hdr->spindump_size); - if (error) return; + if (error) { + return; + } - sleepWakeDebugSaveFile("/var/tmp/SleepWakeDelayLog.dump", - (char*)hdr+offsetof(swd_hdr, UUID), - sizeof(swd_hdr)-offsetof(swd_hdr, UUID)); + sleepWakeDebugSaveFile("/var/tmp/SleepWakeDelayLog.dump", + (char*)hdr + offsetof(swd_hdr, UUID), + sizeof(swd_hdr) - offsetof(swd_hdr, UUID)); - gSpinDumpBufferFull = false; - } + gSpinDumpBufferFull = false; + } } -errno_t IOPMrootDomain::sleepWakeDebugSaveFile(const char *name, char *buf, int len) +errno_t +IOPMrootDomain::sleepWakeDebugSaveFile(const char *name, char *buf, int len) { - struct vnode *vp = NULL; - vfs_context_t ctx = vfs_context_create(vfs_context_current()); - kauth_cred_t cred = vfs_context_ucred(ctx); - struct vnode_attr va; - errno_t error = EIO; + struct vnode *vp = NULL; + vfs_context_t ctx = vfs_context_create(vfs_context_current()); + kauth_cred_t cred = vfs_context_ucred(ctx); + struct vnode_attr va; + errno_t error = EIO; - if (vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), - S_IRUSR|S_IRGRP|S_IROTH, VNODE_LOOKUP_NOFOLLOW, &vp, ctx) != 0) - { - LOG("Failed to open the file %s\n", name); - swd_flags |= SWD_FILEOP_ERROR; - goto exit; - } - VATTR_INIT(&va); - VATTR_WANTED(&va, va_nlink); - /* Don't dump to non-regular files or files with links. */ - if (vp->v_type != VREG || - vnode_getattr(vp, &va, ctx) || va.va_nlink != 1) { - LOG("Bailing as this is not a regular file\n"); - swd_flags |= SWD_FILEOP_ERROR; - goto exit; - } - VATTR_INIT(&va); - VATTR_SET(&va, va_data_size, 0); - vnode_setattr(vp, &va, ctx); + if (vnode_open(name, (O_CREAT | FWRITE | O_NOFOLLOW), + S_IRUSR | S_IRGRP | S_IROTH, VNODE_LOOKUP_NOFOLLOW, &vp, ctx) != 0) { + LOG("Failed to open the file %s\n", name); + swd_flags |= SWD_FILEOP_ERROR; + goto exit; + } + VATTR_INIT(&va); + VATTR_WANTED(&va, va_nlink); + /* Don't dump to non-regular files or files with links. */ + if (vp->v_type != VREG || + vnode_getattr(vp, &va, ctx) || va.va_nlink != 1) { + LOG("Bailing as this is not a regular file\n"); + swd_flags |= SWD_FILEOP_ERROR; + goto exit; + } + VATTR_INIT(&va); + VATTR_SET(&va, va_data_size, 0); + vnode_setattr(vp, &va, ctx); - if (buf != NULL) { - error = vn_rdwr(UIO_WRITE, vp, buf, len, 0, - UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL, vfs_context_proc(ctx)); - if (error != 0) { - LOG("Failed to save sleep wake log. err 0x%x\n", error); - swd_flags |= SWD_FILEOP_ERROR; - } - else { - DLOG("Saved %d bytes to file %s\n",len, name); - } - } + if (buf != NULL) { + error = vn_rdwr(UIO_WRITE, vp, buf, len, 0, + UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, cred, (int *) NULL, vfs_context_proc(ctx)); + if (error != 0) { + LOG("Failed to save sleep wake log. err 0x%x\n", error); + swd_flags |= SWD_FILEOP_ERROR; + } else { + DLOG("Saved %d bytes to file %s\n", len, name); + } + } exit: - if (vp) vnode_close(vp, FWRITE, ctx); - if (ctx) vfs_context_rele(ctx); - - return error; + if (vp) { + vnode_close(vp, FWRITE, ctx); + } + if (ctx) { + vfs_context_rele(ctx); + } + return error; } #else -void IOPMrootDomain::sleepWakeDebugTrig(bool restart) +void +IOPMrootDomain::sleepWakeDebugTrig(bool restart) { - uint32_t wdog_panic = 1; + uint32_t wdog_panic = 1; - if (restart) { - if (PE_parse_boot_argn("swd_panic", &wdog_panic, sizeof(wdog_panic)) && - (wdog_panic == 0)) { - return; - } - panic("Sleep/Wake hang detected"); - return; - } + if (restart) { + if (PE_parse_boot_argn("swd_panic", &wdog_panic, sizeof(wdog_panic)) && + (wdog_panic == 0)) { + return; + } + panic("Sleep/Wake hang detected"); + return; + } } -void IOPMrootDomain::takeStackshot(bool restart, bool isOSXWatchdog, bool isSpinDump) +void +IOPMrootDomain::takeStackshot(bool restart, bool isOSXWatchdog, bool isSpinDump) { #pragma unused(restart) #pragma unused(isOSXWatchdog) } -void IOPMrootDomain::sleepWakeDebugMemAlloc( ) +void +IOPMrootDomain::sleepWakeDebugMemAlloc() { } -void IOPMrootDomain::saveFailureData2File( ) +void +IOPMrootDomain::saveFailureData2File() { } -void IOPMrootDomain::sleepWakeDebugEnableWdog() +void +IOPMrootDomain::sleepWakeDebugEnableWdog() { } -bool IOPMrootDomain::sleepWakeDebugIsWdogEnabled() +bool +IOPMrootDomain::sleepWakeDebugIsWdogEnabled() { - return false; + return false; } -errno_t IOPMrootDomain::sleepWakeDebugSaveFile(const char *name, char *buf, int len) +errno_t +IOPMrootDomain::sleepWakeDebugSaveFile(const char *name, char *buf, int len) { - return 0; + return 0; } #endif diff --git a/iokit/Kernel/IOPMrootDomainInternal.h b/iokit/Kernel/IOPMrootDomainInternal.h index 240b254bd..7849fae71 100644 --- a/iokit/Kernel/IOPMrootDomainInternal.h +++ b/iokit/Kernel/IOPMrootDomainInternal.h @@ -2,14 +2,14 @@ * Copyright (c) 2006-2007 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -28,7 +28,7 @@ * Changes may only be made to the original, pmioctl.h. * This file must be updated only when pmioctl.h changes. */ - + /* * Defines the IOCTLs for dealing with the CPU power management KEXT. */ @@ -38,34 +38,32 @@ #include #include -#define PMIOCGETVARIDINFO _IOW('P', 25, uint64_t) -#define PMIOCGETVARNAMEINFO _IOW('P', 26, uint64_t) -#define PMIOCSETVARINFO _IOW('P', 27, uint64_t) +#define PMIOCGETVARIDINFO _IOW('P', 25, uint64_t) +#define PMIOCGETVARNAMEINFO _IOW('P', 26, uint64_t) +#define PMIOCSETVARINFO _IOW('P', 27, uint64_t) /* * Data structures used by IOCTLs */ #pragma pack(4) -#define PMVARNAMELEN 16 +#define PMVARNAMELEN 16 -typedef enum -{ - vUnknown = 0, /* Unknown type */ - vBool = 1, /* Boolean value */ - vInt = 2, /* signed integer value */ - vUInt = 3, /* Unsigned integer value */ - vChars = 4, /* 8 characters */ - vInvalid = -1 /* invalid type */ +typedef enum{ + vUnknown = 0, /* Unknown type */ + vBool = 1, /* Boolean value */ + vInt = 2, /* signed integer value */ + vUInt = 3, /* Unsigned integer value */ + vChars = 4, /* 8 characters */ + vInvalid = -1 /* invalid type */ } pmioctlVarType_t; -typedef struct pmioctlVaribleInfo -{ - uint32_t varID; /* ID of variable */ - uint8_t varName[PMVARNAMELEN+1]; - pmioctlVarType_t varType; /* type of variable's value */ - uint64_t varInitValue; /* variable's initial value */ - uint64_t varCurValue; /* variable's current value */ +typedef struct pmioctlVaribleInfo { + uint32_t varID; /* ID of variable */ + uint8_t varName[PMVARNAMELEN + 1]; + pmioctlVarType_t varType; /* type of variable's value */ + uint64_t varInitValue;/* variable's initial value */ + uint64_t varCurValue;/* variable's current value */ } pmioctlVariableInfo_t; #pragma pack() diff --git a/iokit/Kernel/IOPerfControl.cpp b/iokit/Kernel/IOPerfControl.cpp index e5ece3480..f90699c34 100644 --- a/iokit/Kernel/IOPerfControl.cpp +++ b/iokit/Kernel/IOPerfControl.cpp @@ -12,192 +12,220 @@ #define super OSObject OSDefineMetaClassAndStructors(IOPerfControlClient, OSObject); -bool IOPerfControlClient::init(IOService *driver, uint64_t maxWorkCapacity) +bool +IOPerfControlClient::init(IOService *driver, uint64_t maxWorkCapacity) { - if (!super::init()) - return false; - - interface = PerfControllerInterface{ - .version = 0, - .registerDevice = - [](IOService *device) { - return kIOReturnSuccess; - }, - .unregisterDevice = - [](IOService *device) { - return kIOReturnSuccess; - }, - .workCanSubmit = - [](IOService *device, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) { - return false; - }, - .workSubmit = - [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) { - }, - .workBegin = - [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkBeginArgs *args) { - }, - .workEnd = - [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkEndArgs *args, bool done) { - }, - }; - - interfaceLock = IOLockAlloc(); - if (!interfaceLock) - goto error; - - deviceRegistrationList = OSSet::withCapacity(4); - if (!deviceRegistrationList) - goto error; - - bzero(workTable, sizeof(workTable)); - memset(&workTable[kIOPerfControlClientWorkUntracked], ~0, sizeof(WorkTableEntry)); - workTableNextIndex = kIOPerfControlClientWorkUntracked + 1; - - workTableLock = IOSimpleLockAlloc(); - if (!workTableLock) - goto error; - - // TODO: check sum(maxWorkCapacities) < table size - - return true; + if (!super::init()) { + return false; + } + + interface = PerfControllerInterface{ + .version = 0, + .registerDevice = + [](IOService *device) { + return kIOReturnSuccess; + }, + .unregisterDevice = + [](IOService *device) { + return kIOReturnSuccess; + }, + .workCanSubmit = + [](IOService *device, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) { + return false; + }, + .workSubmit = + [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) { + }, + .workBegin = + [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkBeginArgs *args) { + }, + .workEnd = + [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkEndArgs *args, bool done) { + }, + }; + + interfaceLock = IOLockAlloc(); + if (!interfaceLock) { + goto error; + } + + deviceRegistrationList = OSSet::withCapacity(4); + if (!deviceRegistrationList) { + goto error; + } + + bzero(workTable, sizeof(workTable)); + memset(&workTable[kIOPerfControlClientWorkUntracked], ~0, sizeof(WorkTableEntry)); + workTableNextIndex = kIOPerfControlClientWorkUntracked + 1; + + workTableLock = IOSimpleLockAlloc(); + if (!workTableLock) { + goto error; + } + + // TODO: check sum(maxWorkCapacities) < table size + + return true; error: - if (interfaceLock) - IOLockFree(interfaceLock); - if (deviceRegistrationList) - deviceRegistrationList->release(); - if (workTableLock) - IOSimpleLockFree(workTableLock); - return false; + if (interfaceLock) { + IOLockFree(interfaceLock); + } + if (deviceRegistrationList) { + deviceRegistrationList->release(); + } + if (workTableLock) { + IOSimpleLockFree(workTableLock); + } + return false; } IOPerfControlClient *_Atomic gSharedClient = nullptr; -IOPerfControlClient *IOPerfControlClient::copyClient(IOService *driver, uint64_t maxWorkCapacity) +IOPerfControlClient * +IOPerfControlClient::copyClient(IOService *driver, uint64_t maxWorkCapacity) { - IOPerfControlClient *client = atomic_load_explicit(&gSharedClient, memory_order_acquire); - if (client == nullptr) { - IOPerfControlClient *expected = client; - client = new IOPerfControlClient; - if (!client || !client->init(driver, maxWorkCapacity)) - panic("could not create IOPerfControlClient"); - if (!atomic_compare_exchange_strong_explicit(&gSharedClient, &expected, client, memory_order_acq_rel, - memory_order_acquire)) { - client->release(); - client = expected; - } - } - // TODO: add maxWorkCapacity to existing client - client->retain(); - return client; + IOPerfControlClient *client = atomic_load_explicit(&gSharedClient, memory_order_acquire); + if (client == nullptr) { + IOPerfControlClient *expected = client; + client = new IOPerfControlClient; + if (!client || !client->init(driver, maxWorkCapacity)) { + panic("could not create IOPerfControlClient"); + } + if (!atomic_compare_exchange_strong_explicit(&gSharedClient, &expected, client, memory_order_acq_rel, + memory_order_acquire)) { + client->release(); + client = expected; + } + } + // TODO: add maxWorkCapacity to existing client + client->retain(); + return client; } -uint64_t IOPerfControlClient::allocateToken(thread_group *thread_group) +uint64_t +IOPerfControlClient::allocateToken(thread_group *thread_group) { - uint64_t token = kIOPerfControlClientWorkUntracked; + uint64_t token = kIOPerfControlClientWorkUntracked; - return token; + return token; } -void IOPerfControlClient::deallocateToken(uint64_t token) +void +IOPerfControlClient::deallocateToken(uint64_t token) { } -bool IOPerfControlClient::getEntryForToken(uint64_t token, IOPerfControlClient::WorkTableEntry &entry) +bool +IOPerfControlClient::getEntryForToken(uint64_t token, IOPerfControlClient::WorkTableEntry &entry) { - if (token == kIOPerfControlClientWorkUntracked) - return false; - - if (token >= kWorkTableNumEntries) - panic("Invalid work token (%llu): index out of bounds.", token); - - entry = workTable[token]; - auto *thread_group = entry.thread_group; - assertf(thread_group, "Invalid work token: %llu", token); - return thread_group != nullptr; + if (token == kIOPerfControlClientWorkUntracked) { + return false; + } + + if (token >= kWorkTableNumEntries) { + panic("Invalid work token (%llu): index out of bounds.", token); + } + + entry = workTable[token]; + auto *thread_group = entry.thread_group; + assertf(thread_group, "Invalid work token: %llu", token); + return thread_group != nullptr; } -void IOPerfControlClient::markEntryStarted(uint64_t token, bool started) +void +IOPerfControlClient::markEntryStarted(uint64_t token, bool started) { - if (token == kIOPerfControlClientWorkUntracked) - return; + if (token == kIOPerfControlClientWorkUntracked) { + return; + } - if (token >= kWorkTableNumEntries) - panic("Invalid work token (%llu): index out of bounds.", token); + if (token >= kWorkTableNumEntries) { + panic("Invalid work token (%llu): index out of bounds.", token); + } - workTable[token].started = started; + workTable[token].started = started; } -IOReturn IOPerfControlClient::registerDevice(__unused IOService *driver, IOService *device) +IOReturn +IOPerfControlClient::registerDevice(__unused IOService *driver, IOService *device) { - IOReturn ret = kIOReturnSuccess; + IOReturn ret = kIOReturnSuccess; - IOLockLock(interfaceLock); + IOLockLock(interfaceLock); - if (interface.version > 0) - ret = interface.registerDevice(device); - else - deviceRegistrationList->setObject(device); + if (interface.version > 0) { + ret = interface.registerDevice(device); + } else { + deviceRegistrationList->setObject(device); + } - IOLockUnlock(interfaceLock); + IOLockUnlock(interfaceLock); - return ret; + return ret; } -void IOPerfControlClient::unregisterDevice(__unused IOService *driver, IOService *device) +void +IOPerfControlClient::unregisterDevice(__unused IOService *driver, IOService *device) { - IOLockLock(interfaceLock); + IOLockLock(interfaceLock); - if (interface.version > 0) - interface.unregisterDevice(device); - else - deviceRegistrationList->removeObject(device); + if (interface.version > 0) { + interface.unregisterDevice(device); + } else { + deviceRegistrationList->removeObject(device); + } - IOLockUnlock(interfaceLock); + IOLockUnlock(interfaceLock); } -uint64_t IOPerfControlClient::workSubmit(IOService *device, WorkSubmitArgs *args) +uint64_t +IOPerfControlClient::workSubmit(IOService *device, WorkSubmitArgs *args) { - return kIOPerfControlClientWorkUntracked; + return kIOPerfControlClientWorkUntracked; } -uint64_t IOPerfControlClient::workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs) +uint64_t +IOPerfControlClient::workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs) { - return kIOPerfControlClientWorkUntracked; + return kIOPerfControlClientWorkUntracked; } -void IOPerfControlClient::workBegin(IOService *device, uint64_t token, WorkBeginArgs *args) +void +IOPerfControlClient::workBegin(IOService *device, uint64_t token, WorkBeginArgs *args) { } -void IOPerfControlClient::workEnd(IOService *device, uint64_t token, WorkEndArgs *args, bool done) +void +IOPerfControlClient::workEnd(IOService *device, uint64_t token, WorkEndArgs *args, bool done) { } -IOReturn IOPerfControlClient::registerPerformanceController(PerfControllerInterface pci) +IOReturn +IOPerfControlClient::registerPerformanceController(PerfControllerInterface pci) { - IOReturn result = kIOReturnError; + IOReturn result = kIOReturnError; - IOLockLock(interfaceLock); + IOLockLock(interfaceLock); - if (interface.version == 0 && pci.version > 0) { - assert(pci.registerDevice && pci.unregisterDevice && pci.workCanSubmit && pci.workSubmit && pci.workBegin && pci.workEnd); - result = kIOReturnSuccess; + if (interface.version == 0 && pci.version > 0) { + assert(pci.registerDevice && pci.unregisterDevice && pci.workCanSubmit && pci.workSubmit && pci.workBegin && pci.workEnd); + result = kIOReturnSuccess; - OSObject *obj; - while ((obj = deviceRegistrationList->getAnyObject())) { - IOService *device = OSDynamicCast(IOService, obj); - if (device) - pci.registerDevice(device); - deviceRegistrationList->removeObject(obj); - } + OSObject *obj; + while ((obj = deviceRegistrationList->getAnyObject())) { + IOService *device = OSDynamicCast(IOService, obj); + if (device) { + pci.registerDevice(device); + } + deviceRegistrationList->removeObject(obj); + } - interface = pci; - } + interface = pci; + } - IOLockUnlock(interfaceLock); + IOLockUnlock(interfaceLock); - return result; + return result; } diff --git a/iokit/Kernel/IOPlatformExpert.cpp b/iokit/Kernel/IOPlatformExpert.cpp index 035177e8f..d7087fbb2 100644 --- a/iokit/Kernel/IOPlatformExpert.cpp +++ b/iokit/Kernel/IOPlatformExpert.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #include #include #include @@ -62,7 +62,7 @@ boolean_t coprocessor_cross_panic_enabled = TRUE; #define APPLE_SECURE_BOOT_VARIABLE_GUID "94b73556-2197-4702-82a8-3e1337dafbfb" #endif /* !CONFIG_EMBEDDED */ -void printDictionaryKeys (OSDictionary * inDictionary, char * inMsg); +void printDictionaryKeys(OSDictionary * inDictionary, char * inMsg); static void getCStringForObject(OSObject *inObj, char *outStr, size_t outStrLen); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -71,17 +71,17 @@ static void getCStringForObject(OSObject *inObj, char *outStr, size_t outStrLen) OSDefineMetaClassAndStructors(IOPlatformExpert, IOService) -OSMetaClassDefineReservedUsed(IOPlatformExpert, 0); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 1); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 2); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 3); -OSMetaClassDefineReservedUsed(IOPlatformExpert, 4); - -OSMetaClassDefineReservedUnused(IOPlatformExpert, 5); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 6); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 7); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 8); -OSMetaClassDefineReservedUnused(IOPlatformExpert, 9); +OSMetaClassDefineReservedUsed(IOPlatformExpert, 0); +OSMetaClassDefineReservedUsed(IOPlatformExpert, 1); +OSMetaClassDefineReservedUsed(IOPlatformExpert, 2); +OSMetaClassDefineReservedUsed(IOPlatformExpert, 3); +OSMetaClassDefineReservedUsed(IOPlatformExpert, 4); + +OSMetaClassDefineReservedUnused(IOPlatformExpert, 5); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 6); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 7); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 8); +OSMetaClassDefineReservedUnused(IOPlatformExpert, 9); OSMetaClassDefineReservedUnused(IOPlatformExpert, 10); OSMetaClassDefineReservedUnused(IOPlatformExpert, 11); @@ -94,322 +94,367 @@ OSSymbol * gPlatformInterruptControllerName; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOPlatformExpert::attach( IOService * provider ) +bool +IOPlatformExpert::attach( IOService * provider ) { + if (!super::attach( provider )) { + return false; + } - if( !super::attach( provider )) - return( false); - - return( true); + return true; } -bool IOPlatformExpert::start( IOService * provider ) +bool +IOPlatformExpert::start( IOService * provider ) { - IORangeAllocator * physicalRanges; - OSData * busFrequency; - uint32_t debugFlags; + IORangeAllocator * physicalRanges; + OSData * busFrequency; + uint32_t debugFlags; + + + if (!super::start(provider)) { + return false; + } - - if (!super::start(provider)) - return false; - - // Override the mapper present flag is requested by boot arguments, if SIP disabled. + // Override the mapper present flag is requested by boot arguments, if SIP disabled. #if CONFIG_CSR - if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) == 0) + if (csr_check(CSR_ALLOW_UNRESTRICTED_FS) == 0) #endif /* CONFIG_CSR */ - { - if (PE_parse_boot_argn("dart", &debugFlags, sizeof (debugFlags)) && (debugFlags == 0)) - removeProperty(kIOPlatformMapperPresentKey); + { + if (PE_parse_boot_argn("dart", &debugFlags, sizeof(debugFlags)) && (debugFlags == 0)) { + removeProperty(kIOPlatformMapperPresentKey); + } #if DEBUG || DEVELOPMENT - if (PE_parse_boot_argn("-x", &debugFlags, sizeof (debugFlags))) - removeProperty(kIOPlatformMapperPresentKey); + if (PE_parse_boot_argn("-x", &debugFlags, sizeof(debugFlags))) { + removeProperty(kIOPlatformMapperPresentKey); + } #endif /* DEBUG || DEVELOPMENT */ - } - - // Register the presence or lack thereof a system - // PCI address mapper with the IOMapper class - IOMapper::setMapperRequired(0 != getProperty(kIOPlatformMapperPresentKey)); - - gIOInterruptControllers = OSDictionary::withCapacity(1); - gIOInterruptControllersLock = IOLockAlloc(); - - // Correct the bus frequency in the device tree. - busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); - provider->setProperty("clock-frequency", busFrequency); - busFrequency->release(); - - gPlatformInterruptControllerName = (OSSymbol *)OSSymbol::withCStringNoCopy("IOPlatformInterruptController"); - - physicalRanges = IORangeAllocator::withRange(0xffffffff, 1, 16, - IORangeAllocator::kLocking); - assert(physicalRanges); - setProperty("Platform Memory Ranges", physicalRanges); - - setPlatform( this ); - gIOPlatform = this; - - PMInstantiatePowerDomains(); - - // Parse the serial-number data and publish a user-readable string - OSData* mydata = (OSData*) (provider->getProperty("serial-number")); - if (mydata != NULL) { - OSString *serNoString = createSystemSerialNumberString(mydata); - if (serNoString != NULL) { - provider->setProperty(kIOPlatformSerialNumberKey, serNoString); - serNoString->release(); - } - } + } + + // Register the presence or lack thereof a system + // PCI address mapper with the IOMapper class + IOMapper::setMapperRequired(0 != getProperty(kIOPlatformMapperPresentKey)); + + gIOInterruptControllers = OSDictionary::withCapacity(1); + gIOInterruptControllersLock = IOLockAlloc(); + + // Correct the bus frequency in the device tree. + busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4); + provider->setProperty("clock-frequency", busFrequency); + busFrequency->release(); + + gPlatformInterruptControllerName = (OSSymbol *)OSSymbol::withCStringNoCopy("IOPlatformInterruptController"); + + physicalRanges = IORangeAllocator::withRange(0xffffffff, 1, 16, + IORangeAllocator::kLocking); + assert(physicalRanges); + setProperty("Platform Memory Ranges", physicalRanges); + + setPlatform( this ); + gIOPlatform = this; + + PMInstantiatePowerDomains(); + + // Parse the serial-number data and publish a user-readable string + OSData* mydata = (OSData*) (provider->getProperty("serial-number")); + if (mydata != NULL) { + OSString *serNoString = createSystemSerialNumberString(mydata); + if (serNoString != NULL) { + provider->setProperty(kIOPlatformSerialNumberKey, serNoString); + serNoString->release(); + } + } #if !CONFIG_EMBEDDED - if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { - coprocessor_paniclog_flush = TRUE; - extended_debug_log_init(); - } + if (PEGetCoprocessorVersion() >= kCoprocessorVersion2) { + coprocessor_paniclog_flush = TRUE; + extended_debug_log_init(); + } #endif - return( configure(provider) ); + return configure(provider); } -bool IOPlatformExpert::configure( IOService * provider ) -{ - OSSet * topLevel; - OSDictionary * dict; - IOService * nub; - - topLevel = OSDynamicCast( OSSet, getProperty("top-level")); - - if( topLevel) { - while( (dict = OSDynamicCast( OSDictionary, - topLevel->getAnyObject()))) { - dict->retain(); - topLevel->removeObject( dict ); - nub = createNub( dict ); - if( 0 == nub) - continue; - dict->release(); - nub->attach( this ); - nub->registerService(); - } - } +bool +IOPlatformExpert::configure( IOService * provider ) +{ + OSSet * topLevel; + OSDictionary * dict; + IOService * nub; + + topLevel = OSDynamicCast( OSSet, getProperty("top-level")); + + if (topLevel) { + while ((dict = OSDynamicCast( OSDictionary, + topLevel->getAnyObject()))) { + dict->retain(); + topLevel->removeObject( dict ); + nub = createNub( dict ); + if (0 == nub) { + continue; + } + dict->release(); + nub->attach( this ); + nub->registerService(); + } + } - return( true ); + return true; } -IOService * IOPlatformExpert::createNub( OSDictionary * from ) +IOService * +IOPlatformExpert::createNub( OSDictionary * from ) { - IOService * nub; + IOService * nub; - nub = new IOPlatformDevice; - if(nub) { - if( !nub->init( from )) { - nub->release(); - nub = 0; + nub = new IOPlatformDevice; + if (nub) { + if (!nub->init( from )) { + nub->release(); + nub = 0; + } } - } - return( nub); + return nub; } -bool IOPlatformExpert::compareNubName( const IOService * nub, - OSString * name, OSString ** matched ) const +bool +IOPlatformExpert::compareNubName( const IOService * nub, + OSString * name, OSString ** matched ) const { - return( nub->IORegistryEntry::compareName( name, matched )); + return nub->IORegistryEntry::compareName( name, matched ); } -IOReturn IOPlatformExpert::getNubResources( IOService * nub ) +IOReturn +IOPlatformExpert::getNubResources( IOService * nub ) { - return( kIOReturnSuccess ); + return kIOReturnSuccess; } -long IOPlatformExpert::getBootROMType(void) +long +IOPlatformExpert::getBootROMType(void) { - return _peBootROMType; + return _peBootROMType; } -long IOPlatformExpert::getChipSetType(void) +long +IOPlatformExpert::getChipSetType(void) { - return _peChipSetType; + return _peChipSetType; } -long IOPlatformExpert::getMachineType(void) +long +IOPlatformExpert::getMachineType(void) { - return _peMachineType; + return _peMachineType; } -void IOPlatformExpert::setBootROMType(long peBootROMType) +void +IOPlatformExpert::setBootROMType(long peBootROMType) { - _peBootROMType = peBootROMType; + _peBootROMType = peBootROMType; } -void IOPlatformExpert::setChipSetType(long peChipSetType) +void +IOPlatformExpert::setChipSetType(long peChipSetType) { - _peChipSetType = peChipSetType; + _peChipSetType = peChipSetType; } -void IOPlatformExpert::setMachineType(long peMachineType) +void +IOPlatformExpert::setMachineType(long peMachineType) { - _peMachineType = peMachineType; + _peMachineType = peMachineType; } -bool IOPlatformExpert::getMachineName( char * /*name*/, int /*maxLength*/) +bool +IOPlatformExpert::getMachineName( char * /*name*/, int /*maxLength*/) { - return( false ); + return false; } -bool IOPlatformExpert::getModelName( char * /*name*/, int /*maxLength*/) +bool +IOPlatformExpert::getModelName( char * /*name*/, int /*maxLength*/) { - return( false ); + return false; } -OSString* IOPlatformExpert::createSystemSerialNumberString(OSData* myProperty) +OSString* +IOPlatformExpert::createSystemSerialNumberString(OSData* myProperty) { - return NULL; + return NULL; } -IORangeAllocator * IOPlatformExpert::getPhysicalRangeAllocator(void) +IORangeAllocator * +IOPlatformExpert::getPhysicalRangeAllocator(void) { - return(OSDynamicCast(IORangeAllocator, - getProperty("Platform Memory Ranges"))); + return OSDynamicCast(IORangeAllocator, + getProperty("Platform Memory Ranges")); } int (*PE_halt_restart)(unsigned int type) = 0; -int IOPlatformExpert::haltRestart(unsigned int type) +int +IOPlatformExpert::haltRestart(unsigned int type) { - if (type == kPEPanicSync) return 0; + if (type == kPEPanicSync) { + return 0; + } - if (type == kPEHangCPU) while (true) {} + if (type == kPEHangCPU) { + while (true) { + } + } - if (type == kPEUPSDelayHaltCPU) { - // RestartOnPowerLoss feature was turned on, proceed with shutdown. - type = kPEHaltCPU; - } + if (type == kPEUPSDelayHaltCPU) { + // RestartOnPowerLoss feature was turned on, proceed with shutdown. + type = kPEHaltCPU; + } #if !CONFIG_EMBEDDED - // On ARM kPEPanicRestartCPU is supported in the drivers - if (type == kPEPanicRestartCPU) - type = kPERestartCPU; + // On ARM kPEPanicRestartCPU is supported in the drivers + if (type == kPEPanicRestartCPU) { + type = kPERestartCPU; + } #endif - if (PE_halt_restart) return (*PE_halt_restart)(type); - else return -1; + if (PE_halt_restart) { + return (*PE_halt_restart)(type); + } else { + return -1; + } } -void IOPlatformExpert::sleepKernel(void) +void +IOPlatformExpert::sleepKernel(void) { #if 0 - long cnt; - boolean_t intState; - - intState = ml_set_interrupts_enabled(false); - - for (cnt = 0; cnt < 10000; cnt++) { - IODelay(1000); - } - - ml_set_interrupts_enabled(intState); + long cnt; + boolean_t intState; + + intState = ml_set_interrupts_enabled(false); + + for (cnt = 0; cnt < 10000; cnt++) { + IODelay(1000); + } + + ml_set_interrupts_enabled(intState); #else // PE_initialize_console(0, kPEDisableScreen); - - IOCPUSleepKernel(); - + + IOCPUSleepKernel(); + // PE_initialize_console(0, kPEEnableScreen); #endif } -long IOPlatformExpert::getGMTTimeOfDay(void) +long +IOPlatformExpert::getGMTTimeOfDay(void) { - return(0); + return 0; } -void IOPlatformExpert::setGMTTimeOfDay(long secs) +void +IOPlatformExpert::setGMTTimeOfDay(long secs) { } -IOReturn IOPlatformExpert::getConsoleInfo( PE_Video * consoleInfo ) +IOReturn +IOPlatformExpert::getConsoleInfo( PE_Video * consoleInfo ) { - return( PE_current_console( consoleInfo)); + return PE_current_console( consoleInfo); } -IOReturn IOPlatformExpert::setConsoleInfo( PE_Video * consoleInfo, - unsigned int op) +IOReturn +IOPlatformExpert::setConsoleInfo( PE_Video * consoleInfo, + unsigned int op) { - return( PE_initialize_console( consoleInfo, op )); + return PE_initialize_console( consoleInfo, op ); } -IOReturn IOPlatformExpert::registerInterruptController(OSSymbol *name, IOInterruptController *interruptController) +IOReturn +IOPlatformExpert::registerInterruptController(OSSymbol *name, IOInterruptController *interruptController) { - IOLockLock(gIOInterruptControllersLock); - - gIOInterruptControllers->setObject(name, interruptController); - - IOLockWakeup(gIOInterruptControllersLock, - gIOInterruptControllers, /* one-thread */ false); + IOLockLock(gIOInterruptControllersLock); + + gIOInterruptControllers->setObject(name, interruptController); + + IOLockWakeup(gIOInterruptControllersLock, + gIOInterruptControllers, /* one-thread */ false); + + IOLockUnlock(gIOInterruptControllersLock); - IOLockUnlock(gIOInterruptControllersLock); - - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOPlatformExpert::deregisterInterruptController(OSSymbol *name) +IOReturn +IOPlatformExpert::deregisterInterruptController(OSSymbol *name) { - IOLockLock(gIOInterruptControllersLock); - - gIOInterruptControllers->removeObject(name); - - IOLockUnlock(gIOInterruptControllersLock); - - return kIOReturnSuccess; + IOLockLock(gIOInterruptControllersLock); + + gIOInterruptControllers->removeObject(name); + + IOLockUnlock(gIOInterruptControllersLock); + + return kIOReturnSuccess; } -IOInterruptController *IOPlatformExpert::lookUpInterruptController(OSSymbol *name) +IOInterruptController * +IOPlatformExpert::lookUpInterruptController(OSSymbol *name) { - OSObject *object; - - IOLockLock(gIOInterruptControllersLock); - while (1) { - - object = gIOInterruptControllers->getObject(name); - - if (object != 0) - break; - - IOLockSleep(gIOInterruptControllersLock, - gIOInterruptControllers, THREAD_UNINT); - } - - IOLockUnlock(gIOInterruptControllersLock); - return OSDynamicCast(IOInterruptController, object); + OSObject *object; + + IOLockLock(gIOInterruptControllersLock); + while (1) { + object = gIOInterruptControllers->getObject(name); + + if (object != 0) { + break; + } + + IOLockSleep(gIOInterruptControllersLock, + gIOInterruptControllers, THREAD_UNINT); + } + + IOLockUnlock(gIOInterruptControllersLock); + return OSDynamicCast(IOInterruptController, object); } -void IOPlatformExpert::setCPUInterruptProperties(IOService *service) +void +IOPlatformExpert::setCPUInterruptProperties(IOService *service) { - IOCPUInterruptController *controller; - - controller = OSDynamicCast(IOCPUInterruptController, waitForService(serviceMatching("IOCPUInterruptController"))); - if (controller) controller->setCPUInterruptProperties(service); + IOCPUInterruptController *controller; + + controller = OSDynamicCast(IOCPUInterruptController, waitForService(serviceMatching("IOCPUInterruptController"))); + if (controller) { + controller->setCPUInterruptProperties(service); + } } -bool IOPlatformExpert::atInterruptLevel(void) +bool +IOPlatformExpert::atInterruptLevel(void) { - return ml_at_interrupt_context(); + return ml_at_interrupt_context(); } -bool IOPlatformExpert::platformAdjustService(IOService */*service*/) +bool +IOPlatformExpert::platformAdjustService(IOService */*service*/) { - return true; + return true; } -void IOPlatformExpert::getUTCTimeOfDay(clock_sec_t * secs, clock_nsec_t * nsecs) +void +IOPlatformExpert::getUTCTimeOfDay(clock_sec_t * secs, clock_nsec_t * nsecs) { - *secs = getGMTTimeOfDay(); - *nsecs = 0; + *secs = getGMTTimeOfDay(); + *nsecs = 0; } -void IOPlatformExpert::setUTCTimeOfDay(clock_sec_t secs, __unused clock_nsec_t nsecs) +void +IOPlatformExpert::setUTCTimeOfDay(clock_sec_t secs, __unused clock_nsec_t nsecs) { - setGMTTimeOfDay(secs); + setGMTTimeOfDay(secs); } @@ -418,18 +463,19 @@ void IOPlatformExpert::setUTCTimeOfDay(clock_sec_t secs, __unused clock_nsec_t n // //********************************************************************************* -void IOPlatformExpert:: +void +IOPlatformExpert:: PMLog(const char *who, unsigned long event, - unsigned long param1, unsigned long param2) + unsigned long param1, unsigned long param2) { clock_sec_t nows; clock_usec_t nowus; clock_get_system_microtime(&nows, &nowus); nowus += (nows % 1000) * 1000000; - kprintf("pm%u %p %.30s %d %lx %lx\n", - nowus, OBFUSCATE(current_thread()), who, // Identity - (int) event, (long)OBFUSCATE(param1), (long)OBFUSCATE(param2)); // Args + kprintf("pm%u %p %.30s %d %lx %lx\n", + nowus, OBFUSCATE(current_thread()), who, // Identity + (int) event, (long)OBFUSCATE(param1), (long)OBFUSCATE(param2)); // Args } @@ -438,16 +484,17 @@ PMLog(const char *who, unsigned long event, // // In this vanilla implementation, a Root Power Domain is instantiated. // All other objects which register will be children of this Root. -// Where this is inappropriate, PMInstantiatePowerDomains is overridden +// Where this is inappropriate, PMInstantiatePowerDomains is overridden // in a platform-specific subclass. //********************************************************************************* -void IOPlatformExpert::PMInstantiatePowerDomains ( void ) +void +IOPlatformExpert::PMInstantiatePowerDomains( void ) { - root = new IOPMrootDomain; - root->init(); - root->attach(this); - root->start(this); + root = new IOPMrootDomain; + root->init(); + root->attach(this); + root->start(this); } @@ -458,9 +505,10 @@ void IOPlatformExpert::PMInstantiatePowerDomains ( void ) // Where this is inappropriate, PMRegisterDevice is overridden in a platform-specific subclass. //********************************************************************************* -void IOPlatformExpert::PMRegisterDevice(IOService * theNub, IOService * theDevice) +void +IOPlatformExpert::PMRegisterDevice(IOService * theNub, IOService * theDevice) { - root->addPowerChild ( theDevice ); + root->addPowerChild( theDevice ); } //********************************************************************************* @@ -468,9 +516,10 @@ void IOPlatformExpert::PMRegisterDevice(IOService * theNub, IOService * theDevic // //********************************************************************************* -bool IOPlatformExpert::hasPMFeature (unsigned long featureMask) +bool +IOPlatformExpert::hasPMFeature(unsigned long featureMask) { - return ((_pePMFeatures & featureMask) != 0); + return (_pePMFeatures & featureMask) != 0; } //********************************************************************************* @@ -478,9 +527,10 @@ bool IOPlatformExpert::hasPMFeature (unsigned long featureMask) // //********************************************************************************* -bool IOPlatformExpert::hasPrivPMFeature (unsigned long privFeatureMask) +bool +IOPlatformExpert::hasPrivPMFeature(unsigned long privFeatureMask) { - return ((_pePrivPMFeatures & privFeatureMask) != 0); + return (_pePrivPMFeatures & privFeatureMask) != 0; } //********************************************************************************* @@ -488,9 +538,10 @@ bool IOPlatformExpert::hasPrivPMFeature (unsigned long privFeatureMask) // //********************************************************************************* -int IOPlatformExpert::numBatteriesSupported (void) +int +IOPlatformExpert::numBatteriesSupported(void) { - return (_peNumBatteriesSupported); + return _peNumBatteriesSupported; } //********************************************************************************* @@ -504,99 +555,102 @@ int IOPlatformExpert::numBatteriesSupported (void) // registered for the given service. //********************************************************************************* -bool IOPlatformExpert::CheckSubTree (OSArray * inSubTree, IOService * theNub, IOService * theDevice, OSDictionary * theParent) -{ - unsigned int i; - unsigned int numPowerTreeNodes; - OSDictionary * entry; - OSDictionary * matchingDictionary; - OSDictionary * providerDictionary; - OSDictionary * deviceDictionary; - OSDictionary * nubDictionary; - OSArray * children; - bool nodeFound = false; - bool continueSearch = false; - bool deviceMatch = false; - bool providerMatch = false; - bool multiParentMatch = false; - - if ( (NULL == theDevice) || (NULL == inSubTree) ) - return false; - - numPowerTreeNodes = inSubTree->getCount (); - - // iterate through the power tree to find a home for this device - - for ( i = 0; i < numPowerTreeNodes; i++ ) { - - entry = (OSDictionary *) inSubTree->getObject (i); - - matchingDictionary = (OSDictionary *) entry->getObject ("device"); - providerDictionary = (OSDictionary *) entry->getObject ("provider"); - - deviceMatch = true; // if no matching dictionary, this is not a criteria and so must match - if ( matchingDictionary ) { - deviceMatch = false; - if ( NULL != (deviceDictionary = theDevice->dictionaryWithProperties ())) { - deviceMatch = deviceDictionary->isEqualTo ( matchingDictionary, matchingDictionary ); - deviceDictionary->release (); - } - } - - providerMatch = true; // we indicate a match if there is no nub or provider - if ( theNub && providerDictionary ) { - providerMatch = false; - if ( NULL != (nubDictionary = theNub->dictionaryWithProperties ()) ) { - providerMatch = nubDictionary->isEqualTo ( providerDictionary, providerDictionary ); - nubDictionary->release (); - } - } - - multiParentMatch = true; // again we indicate a match if there is no multi-parent node - if (deviceMatch && providerMatch) { - if (NULL != multipleParentKeyValue) { - OSNumber * aNumber = (OSNumber *) entry->getObject ("multiple-parent"); - multiParentMatch = (NULL != aNumber) ? multipleParentKeyValue->isEqualTo (aNumber) : false; - } - } - - nodeFound = (deviceMatch && providerMatch && multiParentMatch); - - // if the power tree specifies a provider dictionary but theNub is - // NULL then we cannot match with this entry. - - if ( theNub == NULL && providerDictionary != NULL ) - nodeFound = false; - - // if this node is THE ONE...then register the device - - if ( nodeFound ) { - if (RegisterServiceInTree (theDevice, entry, theParent, theNub) ) { - - if ( kIOLogPower & gIOKitDebug) - IOLog ("PMRegisterDevice/CheckSubTree - service registered!\n"); - - numInstancesRegistered++; - - // determine if we need to search for additional nodes for this item - multipleParentKeyValue = (OSNumber *) entry->getObject ("multiple-parent"); - } - else - nodeFound = false; - } - - continueSearch = ( (false == nodeFound) || (NULL != multipleParentKeyValue) ); - - if ( continueSearch && (NULL != (children = (OSArray *) entry->getObject ("children"))) ) { - nodeFound = CheckSubTree ( children, theNub, theDevice, entry ); - continueSearch = ( (false == nodeFound) || (NULL != multipleParentKeyValue) ); - } +bool +IOPlatformExpert::CheckSubTree(OSArray * inSubTree, IOService * theNub, IOService * theDevice, OSDictionary * theParent) +{ + unsigned int i; + unsigned int numPowerTreeNodes; + OSDictionary * entry; + OSDictionary * matchingDictionary; + OSDictionary * providerDictionary; + OSDictionary * deviceDictionary; + OSDictionary * nubDictionary; + OSArray * children; + bool nodeFound = false; + bool continueSearch = false; + bool deviceMatch = false; + bool providerMatch = false; + bool multiParentMatch = false; + + if ((NULL == theDevice) || (NULL == inSubTree)) { + return false; + } - if ( false == continueSearch ) - break; - } + numPowerTreeNodes = inSubTree->getCount(); + + // iterate through the power tree to find a home for this device + + for (i = 0; i < numPowerTreeNodes; i++) { + entry = (OSDictionary *) inSubTree->getObject(i); + + matchingDictionary = (OSDictionary *) entry->getObject("device"); + providerDictionary = (OSDictionary *) entry->getObject("provider"); + + deviceMatch = true; // if no matching dictionary, this is not a criteria and so must match + if (matchingDictionary) { + deviceMatch = false; + if (NULL != (deviceDictionary = theDevice->dictionaryWithProperties())) { + deviceMatch = deviceDictionary->isEqualTo( matchingDictionary, matchingDictionary ); + deviceDictionary->release(); + } + } + + providerMatch = true; // we indicate a match if there is no nub or provider + if (theNub && providerDictionary) { + providerMatch = false; + if (NULL != (nubDictionary = theNub->dictionaryWithProperties())) { + providerMatch = nubDictionary->isEqualTo( providerDictionary, providerDictionary ); + nubDictionary->release(); + } + } + + multiParentMatch = true; // again we indicate a match if there is no multi-parent node + if (deviceMatch && providerMatch) { + if (NULL != multipleParentKeyValue) { + OSNumber * aNumber = (OSNumber *) entry->getObject("multiple-parent"); + multiParentMatch = (NULL != aNumber) ? multipleParentKeyValue->isEqualTo(aNumber) : false; + } + } + + nodeFound = (deviceMatch && providerMatch && multiParentMatch); + + // if the power tree specifies a provider dictionary but theNub is + // NULL then we cannot match with this entry. + + if (theNub == NULL && providerDictionary != NULL) { + nodeFound = false; + } + + // if this node is THE ONE...then register the device + + if (nodeFound) { + if (RegisterServiceInTree(theDevice, entry, theParent, theNub)) { + if (kIOLogPower & gIOKitDebug) { + IOLog("PMRegisterDevice/CheckSubTree - service registered!\n"); + } + + numInstancesRegistered++; + + // determine if we need to search for additional nodes for this item + multipleParentKeyValue = (OSNumber *) entry->getObject("multiple-parent"); + } else { + nodeFound = false; + } + } + + continueSearch = ((false == nodeFound) || (NULL != multipleParentKeyValue)); + + if (continueSearch && (NULL != (children = (OSArray *) entry->getObject("children")))) { + nodeFound = CheckSubTree( children, theNub, theDevice, entry ); + continueSearch = ((false == nodeFound) || (NULL != multipleParentKeyValue)); + } + + if (false == continueSearch) { + break; + } + } - return ( nodeFound ); + return nodeFound; } //********************************************************************************* @@ -605,51 +659,53 @@ bool IOPlatformExpert::CheckSubTree (OSArray * inSubTree, IOService * theNub, IO // Register a device at the specified node of our power tree. //********************************************************************************* -bool IOPlatformExpert::RegisterServiceInTree (IOService * theService, OSDictionary * theTreeNode, OSDictionary * theTreeParentNode, IOService * theProvider) -{ - IOService * aService; - bool registered = false; - OSArray * children; - unsigned int numChildren; - OSDictionary * child; - - // make sure someone is not already registered here - - if ( NULL == theTreeNode->getObject ("service") ) { - - if ( theTreeNode->setObject ("service", OSDynamicCast ( OSObject, theService)) ) { - - // 1. CHILDREN ------------------ - - // we registered the node in the tree...now if the node has children - // registered we must tell this service to add them. - - if ( NULL != (children = (OSArray *) theTreeNode->getObject ("children")) ) { - numChildren = children->getCount (); - for ( unsigned int i = 0; i < numChildren; i++ ) { - if ( NULL != (child = (OSDictionary *) children->getObject (i)) ) { - if ( NULL != (aService = (IOService *) child->getObject ("service")) ) - theService->addPowerChild (aService); - } - } - } - - // 2. PARENT -------------------- - - // also we must notify the parent of this node (if a registered service - // exists there) of a new child. - - if ( theTreeParentNode ) { - if ( NULL != (aService = (IOService *) theTreeParentNode->getObject ("service")) ) - if (aService != theProvider) - aService->addPowerChild (theService); - } - - registered = true; - } - } +bool +IOPlatformExpert::RegisterServiceInTree(IOService * theService, OSDictionary * theTreeNode, OSDictionary * theTreeParentNode, IOService * theProvider) +{ + IOService * aService; + bool registered = false; + OSArray * children; + unsigned int numChildren; + OSDictionary * child; + + // make sure someone is not already registered here + + if (NULL == theTreeNode->getObject("service")) { + if (theTreeNode->setObject("service", OSDynamicCast( OSObject, theService))) { + // 1. CHILDREN ------------------ + + // we registered the node in the tree...now if the node has children + // registered we must tell this service to add them. + + if (NULL != (children = (OSArray *) theTreeNode->getObject("children"))) { + numChildren = children->getCount(); + for (unsigned int i = 0; i < numChildren; i++) { + if (NULL != (child = (OSDictionary *) children->getObject(i))) { + if (NULL != (aService = (IOService *) child->getObject("service"))) { + theService->addPowerChild(aService); + } + } + } + } + + // 2. PARENT -------------------- + + // also we must notify the parent of this node (if a registered service + // exists there) of a new child. + + if (theTreeParentNode) { + if (NULL != (aService = (IOService *) theTreeParentNode->getObject("service"))) { + if (aService != theProvider) { + aService->addPowerChild(theService); + } + } + } + + registered = true; + } + } - return registered; + return registered; } //********************************************************************************* @@ -657,341 +713,372 @@ bool IOPlatformExpert::RegisterServiceInTree (IOService * theService, OSDictiona // // Print the keys for the given dictionary and selected contents. //********************************************************************************* -void printDictionaryKeys (OSDictionary * inDictionary, char * inMsg) +void +printDictionaryKeys(OSDictionary * inDictionary, char * inMsg) { - OSCollectionIterator * mcoll = OSCollectionIterator::withCollection (inDictionary); - OSSymbol * mkey; - OSString * ioClass; - unsigned int i = 0; - - mcoll->reset (); + OSCollectionIterator * mcoll = OSCollectionIterator::withCollection(inDictionary); + OSSymbol * mkey; + OSString * ioClass; + unsigned int i = 0; - mkey = OSDynamicCast (OSSymbol, mcoll->getNextObject ()); + mcoll->reset(); - while (mkey) { + mkey = OSDynamicCast(OSSymbol, mcoll->getNextObject()); - // kprintf ("dictionary key #%d: %s\n", i, mkey->getCStringNoCopy () ); + while (mkey) { + // kprintf ("dictionary key #%d: %s\n", i, mkey->getCStringNoCopy () ); - // if this is the IOClass key, print it's contents + // if this is the IOClass key, print it's contents - if ( mkey->isEqualTo ("IOClass") ) { - ioClass = (OSString *) inDictionary->getObject ("IOClass"); - if ( ioClass ) IOLog ("%s IOClass is %s\n", inMsg, ioClass->getCStringNoCopy () ); - } + if (mkey->isEqualTo("IOClass")) { + ioClass = (OSString *) inDictionary->getObject("IOClass"); + if (ioClass) { + IOLog("%s IOClass is %s\n", inMsg, ioClass->getCStringNoCopy()); + } + } - // if this is an IOProviderClass key print it + // if this is an IOProviderClass key print it - if ( mkey->isEqualTo ("IOProviderClass") ) { - ioClass = (OSString *) inDictionary->getObject ("IOProviderClass"); - if ( ioClass ) IOLog ("%s IOProviderClass is %s\n", inMsg, ioClass->getCStringNoCopy () ); + if (mkey->isEqualTo("IOProviderClass")) { + ioClass = (OSString *) inDictionary->getObject("IOProviderClass"); + if (ioClass) { + IOLog("%s IOProviderClass is %s\n", inMsg, ioClass->getCStringNoCopy()); + } + } - } + // also print IONameMatch keys + if (mkey->isEqualTo("IONameMatch")) { + ioClass = (OSString *) inDictionary->getObject("IONameMatch"); + if (ioClass) { + IOLog("%s IONameMatch is %s\n", inMsg, ioClass->getCStringNoCopy()); + } + } - // also print IONameMatch keys - if ( mkey->isEqualTo ("IONameMatch") ) { - ioClass = (OSString *) inDictionary->getObject ("IONameMatch"); - if ( ioClass ) IOLog ("%s IONameMatch is %s\n", inMsg, ioClass->getCStringNoCopy () ); - } + // also print IONameMatched keys - // also print IONameMatched keys - - if ( mkey->isEqualTo ("IONameMatched") ) { - ioClass = (OSString *) inDictionary->getObject ("IONameMatched"); - if ( ioClass ) IOLog ("%s IONameMatched is %s\n", inMsg, ioClass->getCStringNoCopy () ); - } + if (mkey->isEqualTo("IONameMatched")) { + ioClass = (OSString *) inDictionary->getObject("IONameMatched"); + if (ioClass) { + IOLog("%s IONameMatched is %s\n", inMsg, ioClass->getCStringNoCopy()); + } + } #if 0 - // print clock-id - - if ( mkey->isEqualTo ("AAPL,clock-id") ) { - char * cstr; - cstr = getCStringForObject (inDictionary->getObject ("AAPL,clock-id")); - if (cstr) - kprintf (" ===> AAPL,clock-id is %s\n", cstr ); - } + // print clock-id + + if (mkey->isEqualTo("AAPL,clock-id")) { + char * cstr; + cstr = getCStringForObject(inDictionary->getObject("AAPL,clock-id")); + if (cstr) { + kprintf(" ===> AAPL,clock-id is %s\n", cstr ); + } + } #endif - // print name + // print name - if ( mkey->isEqualTo ("name") ) { - char nameStr[64]; - nameStr[0] = 0; - getCStringForObject(inDictionary->getObject("name"), nameStr, - sizeof(nameStr)); - if (strlen(nameStr) > 0) - IOLog ("%s name is %s\n", inMsg, nameStr); - } + if (mkey->isEqualTo("name")) { + char nameStr[64]; + nameStr[0] = 0; + getCStringForObject(inDictionary->getObject("name"), nameStr, + sizeof(nameStr)); + if (strlen(nameStr) > 0) { + IOLog("%s name is %s\n", inMsg, nameStr); + } + } - mkey = (OSSymbol *) mcoll->getNextObject (); + mkey = (OSSymbol *) mcoll->getNextObject(); - i++; - } + i++; + } - mcoll->release (); + mcoll->release(); } static void getCStringForObject(OSObject *inObj, char *outStr, size_t outStrLen) { - char * buffer; - unsigned int len, i; - - if ( (NULL == inObj) || (NULL == outStr)) - return; - - char * objString = (char *) (inObj->getMetaClass())->getClassName(); + char * buffer; + unsigned int len, i; - if ((0 == strncmp(objString, "OSString", sizeof("OSString"))) || - (0 == strncmp(objString, "OSSymbol", sizeof("OSSymbol")))) - strlcpy(outStr, ((OSString *)inObj)->getCStringNoCopy(), outStrLen); + if ((NULL == inObj) || (NULL == outStr)) { + return; + } - else if (0 == strncmp(objString, "OSData", sizeof("OSData"))) { - len = ((OSData *)inObj)->getLength(); - buffer = (char *)((OSData *)inObj)->getBytesNoCopy(); - if (buffer && (len > 0)) { - for (i=0; i < len; i++) { - outStr[i] = buffer[i]; - } - outStr[len] = 0; - } - } + char * objString = (char *) (inObj->getMetaClass())->getClassName(); + + if ((0 == strncmp(objString, "OSString", sizeof("OSString"))) || + (0 == strncmp(objString, "OSSymbol", sizeof("OSSymbol")))) { + strlcpy(outStr, ((OSString *)inObj)->getCStringNoCopy(), outStrLen); + } else if (0 == strncmp(objString, "OSData", sizeof("OSData"))) { + len = ((OSData *)inObj)->getLength(); + buffer = (char *)((OSData *)inObj)->getBytesNoCopy(); + if (buffer && (len > 0)) { + for (i = 0; i < len; i++) { + outStr[i] = buffer[i]; + } + outStr[len] = 0; + } + } } /* IOShutdownNotificationsTimedOut * - Called from a timer installed by PEHaltRestart */ -static void IOShutdownNotificationsTimedOut( - thread_call_param_t p0, - thread_call_param_t p1) +static void +IOShutdownNotificationsTimedOut( + thread_call_param_t p0, + thread_call_param_t p1) { #ifdef CONFIG_EMBEDDED - /* 30 seconds has elapsed - panic */ - panic("Halt/Restart Timed Out"); + /* 30 seconds has elapsed - panic */ + panic("Halt/Restart Timed Out"); #else /* ! CONFIG_EMBEDDED */ - int type = (int)(long)p0; - uint32_t timeout = (uint32_t)(uintptr_t)p1; - - IOPMrootDomain *pmRootDomain = IOService::getPMRootDomain(); - if (pmRootDomain) { - if ((PEGetCoprocessorVersion() >= kCoprocessorVersion2) || pmRootDomain->checkShutdownTimeout()) { - pmRootDomain->panicWithShutdownLog(timeout * 1000); - } - } - - /* 30 seconds has elapsed - resume shutdown */ - if(gIOPlatform) gIOPlatform->haltRestart(type); + int type = (int)(long)p0; + uint32_t timeout = (uint32_t)(uintptr_t)p1; + + IOPMrootDomain *pmRootDomain = IOService::getPMRootDomain(); + if (pmRootDomain) { + if ((PEGetCoprocessorVersion() >= kCoprocessorVersion2) || pmRootDomain->checkShutdownTimeout()) { + pmRootDomain->panicWithShutdownLog(timeout * 1000); + } + } + + /* 30 seconds has elapsed - resume shutdown */ + if (gIOPlatform) { + gIOPlatform->haltRestart(type); + } #endif /* CONFIG_EMBEDDED */ } extern "C" { - /* * Callouts from BSD for machine name & model - */ - -boolean_t PEGetMachineName( char * name, int maxLength ) -{ - if( gIOPlatform) - return( gIOPlatform->getMachineName( name, maxLength )); - else - return( false ); -} - -boolean_t PEGetModelName( char * name, int maxLength ) -{ - if( gIOPlatform) - return( gIOPlatform->getModelName( name, maxLength )); - else - return( false ); -} - -int PEGetPlatformEpoch(void) -{ - if( gIOPlatform) - return( gIOPlatform->getBootROMType()); - else - return( -1 ); -} - -int PEHaltRestart(unsigned int type) -{ - IOPMrootDomain *pmRootDomain; - AbsoluteTime deadline; - thread_call_t shutdown_hang; - IORegistryEntry *node; - OSData *data; - uint32_t timeout = kShutdownTimeout; - static boolean_t panic_begin_called = FALSE; - - if(type == kPEHaltCPU || type == kPERestartCPU || type == kPEUPSDelayHaltCPU) - { - pmRootDomain = IOService::getPMRootDomain(); - /* Notify IOKit PM clients of shutdown/restart - Clients subscribe to this message with a call to - IOService::registerInterest() - */ - - /* Spawn a thread that will panic in 30 seconds. - If all goes well the machine will be off by the time - the timer expires. If the device wants a different - timeout, use that value instead of 30 seconds. - */ + */ + +boolean_t +PEGetMachineName( char * name, int maxLength ) +{ + if (gIOPlatform) { + return gIOPlatform->getMachineName( name, maxLength ); + } else { + return false; + } +} + +boolean_t +PEGetModelName( char * name, int maxLength ) +{ + if (gIOPlatform) { + return gIOPlatform->getModelName( name, maxLength ); + } else { + return false; + } +} + +int +PEGetPlatformEpoch(void) +{ + if (gIOPlatform) { + return gIOPlatform->getBootROMType(); + } else { + return -1; + } +} + +int +PEHaltRestart(unsigned int type) +{ + IOPMrootDomain *pmRootDomain; + AbsoluteTime deadline; + thread_call_t shutdown_hang; + IORegistryEntry *node; + OSData *data; + uint32_t timeout = kShutdownTimeout; + static boolean_t panic_begin_called = FALSE; + + if (type == kPEHaltCPU || type == kPERestartCPU || type == kPEUPSDelayHaltCPU) { + pmRootDomain = IOService::getPMRootDomain(); + /* Notify IOKit PM clients of shutdown/restart + * Clients subscribe to this message with a call to + * IOService::registerInterest() + */ + + /* Spawn a thread that will panic in 30 seconds. + * If all goes well the machine will be off by the time + * the timer expires. If the device wants a different + * timeout, use that value instead of 30 seconds. + */ #if CONFIG_EMBEDDED #define RESTART_NODE_PATH "/defaults" #else #define RESTART_NODE_PATH "/chosen" #endif - node = IORegistryEntry::fromPath( RESTART_NODE_PATH, gIODTPlane ); - if ( node ) { - data = OSDynamicCast( OSData, node->getProperty( "halt-restart-timeout" ) ); - if ( data && data->getLength() == 4 ) - timeout = *((uint32_t *) data->getBytesNoCopy()); - } - - shutdown_hang = thread_call_allocate( &IOShutdownNotificationsTimedOut, - (thread_call_param_t)(uintptr_t) type); - clock_interval_to_deadline( timeout, kSecondScale, &deadline ); - thread_call_enter1_delayed( shutdown_hang, (thread_call_param_t)(uintptr_t)timeout, deadline ); - - pmRootDomain->handlePlatformHaltRestart(type); - /* This notification should have few clients who all do - their work synchronously. - - In this "shutdown notification" context we don't give - drivers the option of working asynchronously and responding - later. PM internals make it very hard to wait for asynchronous - replies. - */ - } - else if(type == kPEPanicRestartCPU || type == kPEPanicSync) - { - if (type == kPEPanicRestartCPU) { - // Notify any listeners that we're done collecting - // panic data before we call through to do the restart + node = IORegistryEntry::fromPath( RESTART_NODE_PATH, gIODTPlane ); + if (node) { + data = OSDynamicCast( OSData, node->getProperty( "halt-restart-timeout" )); + if (data && data->getLength() == 4) { + timeout = *((uint32_t *) data->getBytesNoCopy()); + } + } + + shutdown_hang = thread_call_allocate( &IOShutdownNotificationsTimedOut, + (thread_call_param_t)(uintptr_t) type); + clock_interval_to_deadline( timeout, kSecondScale, &deadline ); + thread_call_enter1_delayed( shutdown_hang, (thread_call_param_t)(uintptr_t)timeout, deadline ); + + pmRootDomain->handlePlatformHaltRestart(type); + /* This notification should have few clients who all do + * their work synchronously. + * + * In this "shutdown notification" context we don't give + * drivers the option of working asynchronously and responding + * later. PM internals make it very hard to wait for asynchronous + * replies. + */ + } else if (type == kPEPanicRestartCPU || type == kPEPanicSync) { + if (type == kPEPanicRestartCPU) { + // Notify any listeners that we're done collecting + // panic data before we call through to do the restart #if !CONFIG_EMBEDDED - if (coprocessor_cross_panic_enabled) + if (coprocessor_cross_panic_enabled) #endif - IOCPURunPlatformPanicActions(kPEPanicEnd); - - // Callout to shutdown the disk driver once we've returned from the - // kPEPanicEnd callback (and we know all core dumps on this system - // are complete). - IOCPURunPlatformPanicActions(kPEPanicDiskShutdown); - } - - // Do an initial sync to flush as much panic data as possible, - // in case we have a problem in one of the platorm panic handlers. - // After running the platform handlers, do a final sync w/ - // platform hardware quiesced for the panic. - PE_sync_panic_buffers(); - IOCPURunPlatformPanicActions(type); - PE_sync_panic_buffers(); - } - else if (type == kPEPanicEnd) { + IOCPURunPlatformPanicActions(kPEPanicEnd); + + // Callout to shutdown the disk driver once we've returned from the + // kPEPanicEnd callback (and we know all core dumps on this system + // are complete). + IOCPURunPlatformPanicActions(kPEPanicDiskShutdown); + } + + // Do an initial sync to flush as much panic data as possible, + // in case we have a problem in one of the platorm panic handlers. + // After running the platform handlers, do a final sync w/ + // platform hardware quiesced for the panic. + PE_sync_panic_buffers(); + IOCPURunPlatformPanicActions(type); + PE_sync_panic_buffers(); + } else if (type == kPEPanicEnd) { #if !CONFIG_EMBEDDED - if (coprocessor_cross_panic_enabled) + if (coprocessor_cross_panic_enabled) #endif - IOCPURunPlatformPanicActions(type); - - } else if (type == kPEPanicBegin) { + IOCPURunPlatformPanicActions(type); + } else if (type == kPEPanicBegin) { #if !CONFIG_EMBEDDED - if (coprocessor_cross_panic_enabled) + if (coprocessor_cross_panic_enabled) #endif - { - // Only call the kPEPanicBegin callout once - if (!panic_begin_called) { - panic_begin_called = TRUE; - IOCPURunPlatformPanicActions(type); - } - } - } + { + // Only call the kPEPanicBegin callout once + if (!panic_begin_called) { + panic_begin_called = TRUE; + IOCPURunPlatformPanicActions(type); + } + } + } - if (gIOPlatform) return gIOPlatform->haltRestart(type); - else return -1; + if (gIOPlatform) { + return gIOPlatform->haltRestart(type); + } else { + return -1; + } } -UInt32 PESavePanicInfo(UInt8 *buffer, UInt32 length) +UInt32 +PESavePanicInfo(UInt8 *buffer, UInt32 length) { - if (gIOPlatform != 0) return gIOPlatform->savePanicInfo(buffer, length); - else return 0; + if (gIOPlatform != 0) { + return gIOPlatform->savePanicInfo(buffer, length); + } else { + return 0; + } } -void PESavePanicInfoAction(void *buffer, UInt32 offset, UInt32 length) +void +PESavePanicInfoAction(void *buffer, UInt32 offset, UInt32 length) { IOCPURunPlatformPanicSyncAction(buffer, offset, length); return; } -inline static int init_gIOOptionsEntry(void) +inline static int +init_gIOOptionsEntry(void) { - IORegistryEntry *entry; - void *nvram_entry; - volatile void **options; - int ret = -1; + IORegistryEntry *entry; + void *nvram_entry; + volatile void **options; + int ret = -1; - if (gIOOptionsEntry) - return 0; + if (gIOOptionsEntry) { + return 0; + } - entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); - if (!entry) - return -1; + entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); + if (!entry) { + return -1; + } - nvram_entry = (void *) OSDynamicCast(IODTNVRAM, entry); - if (!nvram_entry) - goto release; + nvram_entry = (void *) OSDynamicCast(IODTNVRAM, entry); + if (!nvram_entry) { + goto release; + } - options = (volatile void **) &gIOOptionsEntry; - if (!OSCompareAndSwapPtr(NULL, nvram_entry, options)) { - ret = 0; - goto release; - } + options = (volatile void **) &gIOOptionsEntry; + if (!OSCompareAndSwapPtr(NULL, nvram_entry, options)) { + ret = 0; + goto release; + } - return 0; + return 0; release: - entry->release(); - return ret; - + entry->release(); + return ret; } /* pass in a NULL value if you just want to figure out the len */ -boolean_t PEReadNVRAMProperty(const char *symbol, void *value, - unsigned int *len) +boolean_t +PEReadNVRAMProperty(const char *symbol, void *value, + unsigned int *len) { - OSObject *obj; - OSData *data; - unsigned int vlen; + OSObject *obj; + OSData *data; + unsigned int vlen; - if (!symbol || !len) - goto err; + if (!symbol || !len) { + goto err; + } - if (init_gIOOptionsEntry() < 0) - goto err; + if (init_gIOOptionsEntry() < 0) { + goto err; + } - vlen = *len; - *len = 0; + vlen = *len; + *len = 0; - obj = gIOOptionsEntry->getProperty(symbol); - if (!obj) - goto err; + obj = gIOOptionsEntry->getProperty(symbol); + if (!obj) { + goto err; + } - /* convert to data */ - data = OSDynamicCast(OSData, obj); - if (!data) - goto err; + /* convert to data */ + data = OSDynamicCast(OSData, obj); + if (!data) { + goto err; + } - *len = data->getLength(); - vlen = min(vlen, *len); - if (value && vlen) - memcpy((void *) value, data->getBytesNoCopy(), vlen); + *len = data->getLength(); + vlen = min(vlen, *len); + if (value && vlen) { + memcpy((void *) value, data->getBytesNoCopy(), vlen); + } - return TRUE; + return TRUE; err: - return FALSE; + return FALSE; } boolean_t @@ -1027,265 +1114,293 @@ exit: return ret; } -boolean_t PEWriteNVRAMProperty(const char *symbol, const void *value, - const unsigned int len) +static boolean_t +PEWriteNVRAMPropertyInternal(const char *symbol, boolean_t copySymbol, const void *value, + const unsigned int len) { - const OSSymbol *sym; - OSData *data; - bool ret = false; + const OSSymbol *sym; + OSData *data; + bool ret = false; - if (!symbol || !value || !len) - goto err; + if (!symbol || !value || !len) { + goto err; + } - if (init_gIOOptionsEntry() < 0) - goto err; + if (init_gIOOptionsEntry() < 0) { + goto err; + } - sym = OSSymbol::withCStringNoCopy(symbol); - if (!sym) - goto err; + if (copySymbol == TRUE) { + sym = OSSymbol::withCString(symbol); + } else { + sym = OSSymbol::withCStringNoCopy(symbol); + } - data = OSData::withBytes((void *) value, len); - if (!data) - goto sym_done; + if (!sym) { + goto err; + } + + data = OSData::withBytes((void *) value, len); + if (!data) { + goto sym_done; + } - ret = gIOOptionsEntry->setProperty(sym, data); - data->release(); + ret = gIOOptionsEntry->setProperty(sym, data); + data->release(); sym_done: - sym->release(); + sym->release(); - if (ret == true) { - gIOOptionsEntry->sync(); - return TRUE; - } + if (ret == true) { + gIOOptionsEntry->sync(); + return TRUE; + } err: - return FALSE; + return FALSE; } +boolean_t +PEWriteNVRAMProperty(const char *symbol, const void *value, + const unsigned int len) +{ + return PEWriteNVRAMPropertyInternal(symbol, FALSE, value, len); +} -boolean_t PERemoveNVRAMProperty(const char *symbol) +boolean_t +PEWriteNVRAMPropertyWithCopy(const char *symbol, const void *value, + const unsigned int len) { - const OSSymbol *sym; + return PEWriteNVRAMPropertyInternal(symbol, TRUE, value, len); +} - if (!symbol) - goto err; +boolean_t +PERemoveNVRAMProperty(const char *symbol) +{ + const OSSymbol *sym; - if (init_gIOOptionsEntry() < 0) - goto err; + if (!symbol) { + goto err; + } - sym = OSSymbol::withCStringNoCopy(symbol); - if (!sym) - goto err; + if (init_gIOOptionsEntry() < 0) { + goto err; + } - gIOOptionsEntry->removeProperty(sym); + sym = OSSymbol::withCStringNoCopy(symbol); + if (!sym) { + goto err; + } - sym->release(); + gIOOptionsEntry->removeProperty(sym); - gIOOptionsEntry->sync(); - return TRUE; + sym->release(); -err: - return FALSE; + gIOOptionsEntry->sync(); + return TRUE; +err: + return FALSE; } -long PEGetGMTTimeOfDay(void) +long +PEGetGMTTimeOfDay(void) { - clock_sec_t secs; - clock_usec_t usecs; + clock_sec_t secs; + clock_usec_t usecs; - PEGetUTCTimeOfDay(&secs, &usecs); - return secs; + PEGetUTCTimeOfDay(&secs, &usecs); + return secs; } -void PESetGMTTimeOfDay(long secs) +void +PESetGMTTimeOfDay(long secs) { - PESetUTCTimeOfDay(secs, 0); + PESetUTCTimeOfDay(secs, 0); } -void PEGetUTCTimeOfDay(clock_sec_t * secs, clock_usec_t * usecs) +void +PEGetUTCTimeOfDay(clock_sec_t * secs, clock_usec_t * usecs) { - clock_nsec_t nsecs = 0; + clock_nsec_t nsecs = 0; - *secs = 0; - if (gIOPlatform) - gIOPlatform->getUTCTimeOfDay(secs, &nsecs); + *secs = 0; + if (gIOPlatform) { + gIOPlatform->getUTCTimeOfDay(secs, &nsecs); + } - assert(nsecs < NSEC_PER_SEC); - *usecs = nsecs / NSEC_PER_USEC; + assert(nsecs < NSEC_PER_SEC); + *usecs = nsecs / NSEC_PER_USEC; } -void PESetUTCTimeOfDay(clock_sec_t secs, clock_usec_t usecs) +void +PESetUTCTimeOfDay(clock_sec_t secs, clock_usec_t usecs) { - assert(usecs < USEC_PER_SEC); - if (gIOPlatform) - gIOPlatform->setUTCTimeOfDay(secs, usecs * NSEC_PER_USEC); + assert(usecs < USEC_PER_SEC); + if (gIOPlatform) { + gIOPlatform->setUTCTimeOfDay(secs, usecs * NSEC_PER_USEC); + } } -coprocessor_type_t PEGetCoprocessorVersion( void ) +coprocessor_type_t +PEGetCoprocessorVersion( void ) { - coprocessor_type_t coprocessor_version = kCoprocessorVersionNone; + coprocessor_type_t coprocessor_version = kCoprocessorVersionNone; #if !CONFIG_EMBEDDED - IORegistryEntry *platform_entry = NULL; - OSData *coprocessor_version_obj = NULL; - - platform_entry = IORegistryEntry::fromPath(kIODeviceTreePlane ":/efi/platform"); - if (platform_entry != NULL) { - coprocessor_version_obj = OSDynamicCast(OSData, platform_entry->getProperty("apple-coprocessor-version")); - if ((coprocessor_version_obj != NULL) && (coprocessor_version_obj->getLength() <= sizeof(uint64_t))) { - memcpy(&coprocessor_version, coprocessor_version_obj->getBytesNoCopy(), coprocessor_version_obj->getLength()); - } - platform_entry->release(); - } + IORegistryEntry *platform_entry = NULL; + OSData *coprocessor_version_obj = NULL; + + platform_entry = IORegistryEntry::fromPath(kIODeviceTreePlane ":/efi/platform"); + if (platform_entry != NULL) { + coprocessor_version_obj = OSDynamicCast(OSData, platform_entry->getProperty("apple-coprocessor-version")); + if ((coprocessor_version_obj != NULL) && (coprocessor_version_obj->getLength() <= sizeof(uint64_t))) { + memcpy(&coprocessor_version, coprocessor_version_obj->getBytesNoCopy(), coprocessor_version_obj->getLength()); + } + platform_entry->release(); + } #endif - return coprocessor_version; + return coprocessor_version; } - } /* extern "C" */ -void IOPlatformExpert::registerNVRAMController(IONVRAMController * caller) +void +IOPlatformExpert::registerNVRAMController(IONVRAMController * caller) { - OSData * data; - IORegistryEntry * entry; - OSString * string = 0; - uuid_string_t uuid; + OSData * data; + IORegistryEntry * entry; + OSString * string = 0; + uuid_string_t uuid; #if CONFIG_EMBEDDED - entry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); - if ( entry ) - { - OSData * data1; - - data1 = OSDynamicCast( OSData, entry->getProperty( "unique-chip-id" ) ); - if ( data1 && data1->getLength( ) == 8 ) - { - OSData * data2; - - data2 = OSDynamicCast( OSData, entry->getProperty( "chip-id" ) ); - if ( data2 && data2->getLength( ) == 4 ) - { - SHA1_CTX context; - uint8_t digest[ SHA_DIGEST_LENGTH ]; - const uuid_t space = { 0xA6, 0xDD, 0x4C, 0xCB, 0xB5, 0xE8, 0x4A, 0xF5, 0xAC, 0xDD, 0xB6, 0xDC, 0x6A, 0x05, 0x42, 0xB8 }; - - SHA1Init( &context ); - SHA1Update( &context, space, sizeof( space ) ); - SHA1Update( &context, data1->getBytesNoCopy( ), data1->getLength( ) ); - SHA1Update( &context, data2->getBytesNoCopy( ), data2->getLength( ) ); - SHA1Final( digest, &context ); - - digest[ 6 ] = ( digest[ 6 ] & 0x0F ) | 0x50; - digest[ 8 ] = ( digest[ 8 ] & 0x3F ) | 0x80; - - uuid_unparse( digest, uuid ); - string = OSString::withCString( uuid ); - } - } - - entry->release( ); - } + entry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ); + if (entry) { + OSData * data1; + + data1 = OSDynamicCast( OSData, entry->getProperty( "unique-chip-id" )); + if (data1 && data1->getLength() == 8) { + OSData * data2; + + data2 = OSDynamicCast( OSData, entry->getProperty( "chip-id" )); + if (data2 && data2->getLength() == 4) { + SHA1_CTX context; + uint8_t digest[SHA_DIGEST_LENGTH]; + const uuid_t space = { 0xA6, 0xDD, 0x4C, 0xCB, 0xB5, 0xE8, 0x4A, 0xF5, 0xAC, 0xDD, 0xB6, 0xDC, 0x6A, 0x05, 0x42, 0xB8 }; + + SHA1Init( &context ); + SHA1Update( &context, space, sizeof(space)); + SHA1Update( &context, data1->getBytesNoCopy(), data1->getLength()); + SHA1Update( &context, data2->getBytesNoCopy(), data2->getLength()); + SHA1Final( digest, &context ); + + digest[6] = (digest[6] & 0x0F) | 0x50; + digest[8] = (digest[8] & 0x3F) | 0x80; + + uuid_unparse( digest, uuid ); + string = OSString::withCString( uuid ); + } + } + + entry->release(); + } #else /* !CONFIG_EMBEDDED */ - /* - * If we have panic debugging enabled and a prod-fused coprocessor, - * disable cross panics so that the co-processor doesn't cause the system - * to reset when we enter the debugger or hit a panic on the x86 side. - */ - if ( panicDebugging ) - { - entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); - if ( entry ) - { - data = OSDynamicCast( OSData, entry->getProperty( APPLE_SECURE_BOOT_VARIABLE_GUID":EffectiveProductionStatus" ) ); - if ( data && ( data->getLength( ) == sizeof( UInt8 ) ) ) { - UInt8 *isProdFused = (UInt8 *) data->getBytesNoCopy( ); - UInt32 debug_flags = 0; - if ( *isProdFused || ( PE_i_can_has_debugger(&debug_flags) && - ( debug_flags & DB_DISABLE_CROSS_PANIC ) ) ) { - coprocessor_cross_panic_enabled = FALSE; - } - } - entry->release( ); - } - } - - entry = IORegistryEntry::fromPath( "/efi/platform", gIODTPlane ); - if ( entry ) - { - data = OSDynamicCast( OSData, entry->getProperty( "system-id" ) ); - if ( data && data->getLength( ) == 16 ) - { - SHA1_CTX context; - uint8_t digest[ SHA_DIGEST_LENGTH ]; - const uuid_t space = { 0x2A, 0x06, 0x19, 0x90, 0xD3, 0x8D, 0x44, 0x40, 0xA1, 0x39, 0xC4, 0x97, 0x70, 0x37, 0x65, 0xAC }; - - SHA1Init( &context ); - SHA1Update( &context, space, sizeof( space ) ); - SHA1Update( &context, data->getBytesNoCopy( ), data->getLength( ) ); - SHA1Final( digest, &context ); - - digest[ 6 ] = ( digest[ 6 ] & 0x0F ) | 0x50; - digest[ 8 ] = ( digest[ 8 ] & 0x3F ) | 0x80; - - uuid_unparse( digest, uuid ); - string = OSString::withCString( uuid ); - } - - entry->release( ); - } -#endif /* !CONFIG_EMBEDDED */ + /* + * If we have panic debugging enabled and a prod-fused coprocessor, + * disable cross panics so that the co-processor doesn't cause the system + * to reset when we enter the debugger or hit a panic on the x86 side. + */ + if (panicDebugging) { + entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); + if (entry) { + data = OSDynamicCast( OSData, entry->getProperty( APPLE_SECURE_BOOT_VARIABLE_GUID":EffectiveProductionStatus" )); + if (data && (data->getLength() == sizeof(UInt8))) { + UInt8 *isProdFused = (UInt8 *) data->getBytesNoCopy(); + UInt32 debug_flags = 0; + if (*isProdFused || (PE_i_can_has_debugger(&debug_flags) && + (debug_flags & DB_DISABLE_CROSS_PANIC))) { + coprocessor_cross_panic_enabled = FALSE; + } + } + entry->release(); + } + } + + entry = IORegistryEntry::fromPath( "/efi/platform", gIODTPlane ); + if (entry) { + data = OSDynamicCast( OSData, entry->getProperty( "system-id" )); + if (data && data->getLength() == 16) { + SHA1_CTX context; + uint8_t digest[SHA_DIGEST_LENGTH]; + const uuid_t space = { 0x2A, 0x06, 0x19, 0x90, 0xD3, 0x8D, 0x44, 0x40, 0xA1, 0x39, 0xC4, 0x97, 0x70, 0x37, 0x65, 0xAC }; - if ( string == 0 ) - { - entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); - if ( entry ) - { - data = OSDynamicCast( OSData, entry->getProperty( "platform-uuid" ) ); - if ( data && data->getLength( ) == sizeof( uuid_t ) ) - { - uuid_unparse( ( uint8_t * ) data->getBytesNoCopy( ), uuid ); - string = OSString::withCString( uuid ); - } + SHA1Init( &context ); + SHA1Update( &context, space, sizeof(space)); + SHA1Update( &context, data->getBytesNoCopy(), data->getLength()); + SHA1Final( digest, &context ); - entry->release( ); - } - } + digest[6] = (digest[6] & 0x0F) | 0x50; + digest[8] = (digest[8] & 0x3F) | 0x80; - if ( string ) - { - getProvider( )->setProperty( kIOPlatformUUIDKey, string ); - publishResource( kIOPlatformUUIDKey, string ); + uuid_unparse( digest, uuid ); + string = OSString::withCString( uuid ); + } - string->release( ); - } + entry->release(); + } +#endif /* !CONFIG_EMBEDDED */ + + if (string == 0) { + entry = IORegistryEntry::fromPath( "/options", gIODTPlane ); + if (entry) { + data = OSDynamicCast( OSData, entry->getProperty( "platform-uuid" )); + if (data && data->getLength() == sizeof(uuid_t)) { + uuid_unparse((uint8_t *) data->getBytesNoCopy(), uuid ); + string = OSString::withCString( uuid ); + } + + entry->release(); + } + } + + if (string) { + getProvider()->setProperty( kIOPlatformUUIDKey, string ); + publishResource( kIOPlatformUUIDKey, string ); + + string->release(); + } - publishResource("IONVRAM"); + publishResource("IONVRAM"); } -IOReturn IOPlatformExpert::callPlatformFunction(const OSSymbol *functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4) +IOReturn +IOPlatformExpert::callPlatformFunction(const OSSymbol *functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4) { - IOService *service, *_resources; - - if (waitForFunction) { - _resources = waitForService(resourceMatching(functionName)); - } else { - _resources = getResourceService(); - } - if (_resources == 0) return kIOReturnUnsupported; - - service = OSDynamicCast(IOService, _resources->getProperty(functionName)); - if (service == 0) return kIOReturnUnsupported; - - return service->callPlatformFunction(functionName, waitForFunction, - param1, param2, param3, param4); + IOService *service, *_resources; + + if (waitForFunction) { + _resources = waitForService(resourceMatching(functionName)); + } else { + _resources = getResourceService(); + } + if (_resources == 0) { + return kIOReturnUnsupported; + } + + service = OSDynamicCast(IOService, _resources->getProperty(functionName)); + if (service == 0) { + return kIOReturnUnsupported; + } + + return service->callPlatformFunction(functionName, waitForFunction, + param1, param2, param3, param4); } -IOByteCount IOPlatformExpert::savePanicInfo(UInt8 *buffer, IOByteCount length) +IOByteCount +IOPlatformExpert::savePanicInfo(UInt8 *buffer, IOByteCount length) { - return 0; + return 0; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1295,295 +1410,354 @@ IOByteCount IOPlatformExpert::savePanicInfo(UInt8 *buffer, IOByteCount length) OSDefineMetaClassAndAbstractStructors( IODTPlatformExpert, IOPlatformExpert ) -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 0); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 1); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 2); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 3); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 4); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 5); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 6); -OSMetaClassDefineReservedUnused(IODTPlatformExpert, 7); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 0); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 1); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 2); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 3); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 4); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 5); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 6); +OSMetaClassDefineReservedUnused(IODTPlatformExpert, 7); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOService * IODTPlatformExpert::probe( IOService * provider, - SInt32 * score ) +IOService * +IODTPlatformExpert::probe( IOService * provider, + SInt32 * score ) { - if( !super::probe( provider, score)) - return( 0 ); + if (!super::probe( provider, score)) { + return 0; + } - // check machine types - if( !provider->compareNames( getProperty( gIONameMatchKey ) )) - return( 0 ); + // check machine types + if (!provider->compareNames( getProperty( gIONameMatchKey ))) { + return 0; + } - return( this); + return this; } -bool IODTPlatformExpert::configure( IOService * provider ) +bool +IODTPlatformExpert::configure( IOService * provider ) { - if( !super::configure( provider)) - return( false); + if (!super::configure( provider)) { + return false; + } - processTopLevel( provider ); + processTopLevel( provider ); - return( true ); + return true; } -IOService * IODTPlatformExpert::createNub( IORegistryEntry * from ) +IOService * +IODTPlatformExpert::createNub( IORegistryEntry * from ) { - IOService * nub; + IOService * nub; - nub = new IOPlatformDevice; - if( nub) { - if( !nub->init( from, gIODTPlane )) { - nub->free(); - nub = 0; + nub = new IOPlatformDevice; + if (nub) { + if (!nub->init( from, gIODTPlane )) { + nub->free(); + nub = 0; + } } - } - return( nub); + return nub; } -bool IODTPlatformExpert::createNubs( IOService * parent, OSIterator * iter ) -{ - IORegistryEntry * next; - IOService * nub; - bool ok = true; - - if( iter) { - while( (next = (IORegistryEntry *) iter->getNextObject())) { - - if( 0 == (nub = createNub( next ))) - continue; - - nub->attach( parent ); - nub->registerService(); - } - iter->release(); - } +bool +IODTPlatformExpert::createNubs( IOService * parent, OSIterator * iter ) +{ + IORegistryEntry * next; + IOService * nub; + bool ok = true; + + if (iter) { + while ((next = (IORegistryEntry *) iter->getNextObject())) { + if (0 == (nub = createNub( next ))) { + continue; + } + + nub->attach( parent ); + nub->registerService(); + } + iter->release(); + } - return( ok ); + return ok; } -void IODTPlatformExpert::processTopLevel( IORegistryEntry * rootEntry ) +void +IODTPlatformExpert::processTopLevel( IORegistryEntry * rootEntry ) { - OSIterator * kids; - IORegistryEntry * next; - IORegistryEntry * cpus; - IORegistryEntry * options; + OSIterator * kids; + IORegistryEntry * next; + IORegistryEntry * cpus; + IORegistryEntry * options; - // infanticide - kids = IODTFindMatchingEntries( rootEntry, 0, deleteList() ); - if( kids) { - while( (next = (IORegistryEntry *)kids->getNextObject())) { - next->detachAll( gIODTPlane); + // infanticide + kids = IODTFindMatchingEntries( rootEntry, 0, deleteList()); + if (kids) { + while ((next = (IORegistryEntry *)kids->getNextObject())) { + next->detachAll( gIODTPlane); + } + kids->release(); } - kids->release(); - } - // Publish an IODTNVRAM class on /options. - options = rootEntry->childFromPath("options", gIODTPlane); - if (options) { - dtNVRAM = new IODTNVRAM; - if (dtNVRAM) { - if (!dtNVRAM->init(options, gIODTPlane)) { - dtNVRAM->release(); - dtNVRAM = 0; - } else { - dtNVRAM->attach(this); - dtNVRAM->registerService(); - options->release(); + // Publish an IODTNVRAM class on /options. + options = rootEntry->childFromPath("options", gIODTPlane); + if (options) { + dtNVRAM = new IODTNVRAM; + if (dtNVRAM) { + if (!dtNVRAM->init(options, gIODTPlane)) { + dtNVRAM->release(); + dtNVRAM = 0; + } else { + dtNVRAM->attach(this); + dtNVRAM->registerService(); + options->release(); + } + } } - } - } - // Publish the cpus. - cpus = rootEntry->childFromPath( "cpus", gIODTPlane); - if ( cpus) - { - createNubs( this, IODTFindMatchingEntries( cpus, kIODTExclusive, 0)); - cpus->release(); - } + // Publish the cpus. + cpus = rootEntry->childFromPath( "cpus", gIODTPlane); + if (cpus) { + createNubs( this, IODTFindMatchingEntries( cpus, kIODTExclusive, 0)); + cpus->release(); + } - // publish top level, minus excludeList - createNubs( this, IODTFindMatchingEntries( rootEntry, kIODTExclusive, excludeList())); + // publish top level, minus excludeList + createNubs( this, IODTFindMatchingEntries( rootEntry, kIODTExclusive, excludeList())); } -IOReturn IODTPlatformExpert::getNubResources( IOService * nub ) +IOReturn +IODTPlatformExpert::getNubResources( IOService * nub ) { - if( nub->getDeviceMemory()) - return( kIOReturnSuccess ); + if (nub->getDeviceMemory()) { + return kIOReturnSuccess; + } - IODTResolveAddressing( nub, "reg", 0); + IODTResolveAddressing( nub, "reg", 0); - return( kIOReturnSuccess); + return kIOReturnSuccess; } -bool IODTPlatformExpert::compareNubName( const IOService * nub, - OSString * name, OSString ** matched ) const +bool +IODTPlatformExpert::compareNubName( const IOService * nub, + OSString * name, OSString ** matched ) const { - return( IODTCompareNubName( nub, name, matched ) - || super::compareNubName( nub, name, matched) ); + return IODTCompareNubName( nub, name, matched ) + || super::compareNubName( nub, name, matched); } -bool IODTPlatformExpert::getModelName( char * name, int maxLength ) -{ - OSData * prop; - const char * str; - int len; - char c; - bool ok = false; - - maxLength--; - - prop = (OSData *) getProvider()->getProperty( gIODTCompatibleKey ); - if( prop ) { - str = (const char *) prop->getBytesNoCopy(); - - if( 0 == strncmp( str, "AAPL,", strlen( "AAPL," ) )) - str += strlen( "AAPL," ); - - len = 0; - while( (c = *str++)) { - if( (c == '/') || (c == ' ')) - c = '-'; - - name[ len++ ] = c; - if( len >= maxLength) - break; +bool +IODTPlatformExpert::getModelName( char * name, int maxLength ) +{ + OSData * prop; + const char * str; + int len; + char c; + bool ok = false; + + maxLength--; + + prop = (OSData *) getProvider()->getProperty( gIODTCompatibleKey ); + if (prop) { + str = (const char *) prop->getBytesNoCopy(); + + if (0 == strncmp( str, "AAPL,", strlen( "AAPL," ))) { + str += strlen( "AAPL," ); + } + + len = 0; + while ((c = *str++)) { + if ((c == '/') || (c == ' ')) { + c = '-'; + } + + name[len++] = c; + if (len >= maxLength) { + break; + } + } + + name[len] = 0; + ok = true; } - - name[ len ] = 0; - ok = true; - } - return( ok ); + return ok; } -bool IODTPlatformExpert::getMachineName( char * name, int maxLength ) +bool +IODTPlatformExpert::getMachineName( char * name, int maxLength ) { - OSData * prop; - bool ok = false; + OSData * prop; + bool ok = false; - maxLength--; - prop = (OSData *) getProvider()->getProperty( gIODTModelKey ); - ok = (0 != prop); + maxLength--; + prop = (OSData *) getProvider()->getProperty( gIODTModelKey ); + ok = (0 != prop); - if( ok ) - strlcpy( name, (const char *) prop->getBytesNoCopy(), maxLength ); + if (ok) { + strlcpy( name, (const char *) prop->getBytesNoCopy(), maxLength ); + } - return( ok ); + return ok; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IODTPlatformExpert::registerNVRAMController( IONVRAMController * nvram ) +void +IODTPlatformExpert::registerNVRAMController( IONVRAMController * nvram ) { - if (dtNVRAM) dtNVRAM->registerNVRAMController(nvram); - - super::registerNVRAMController(nvram); + if (dtNVRAM) { + dtNVRAM->registerNVRAMController(nvram); + } + + super::registerNVRAMController(nvram); } -int IODTPlatformExpert::haltRestart(unsigned int type) +int +IODTPlatformExpert::haltRestart(unsigned int type) { - if (dtNVRAM) dtNVRAM->sync(); - - return super::haltRestart(type); + if (dtNVRAM) { + dtNVRAM->sync(); + } + + return super::haltRestart(type); } -IOReturn IODTPlatformExpert::readXPRAM(IOByteCount offset, UInt8 * buffer, - IOByteCount length) +IOReturn +IODTPlatformExpert::readXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length) { - if (dtNVRAM) return dtNVRAM->readXPRAM(offset, buffer, length); - else return kIOReturnNotReady; + if (dtNVRAM) { + return dtNVRAM->readXPRAM(offset, buffer, length); + } else { + return kIOReturnNotReady; + } } -IOReturn IODTPlatformExpert::writeXPRAM(IOByteCount offset, UInt8 * buffer, - IOByteCount length) +IOReturn +IODTPlatformExpert::writeXPRAM(IOByteCount offset, UInt8 * buffer, + IOByteCount length) { - if (dtNVRAM) return dtNVRAM->writeXPRAM(offset, buffer, length); - else return kIOReturnNotReady; + if (dtNVRAM) { + return dtNVRAM->writeXPRAM(offset, buffer, length); + } else { + return kIOReturnNotReady; + } } -IOReturn IODTPlatformExpert::readNVRAMProperty( +IOReturn +IODTPlatformExpert::readNVRAMProperty( IORegistryEntry * entry, const OSSymbol ** name, OSData ** value ) { - if (dtNVRAM) return dtNVRAM->readNVRAMProperty(entry, name, value); - else return kIOReturnNotReady; + if (dtNVRAM) { + return dtNVRAM->readNVRAMProperty(entry, name, value); + } else { + return kIOReturnNotReady; + } } -IOReturn IODTPlatformExpert::writeNVRAMProperty( +IOReturn +IODTPlatformExpert::writeNVRAMProperty( IORegistryEntry * entry, const OSSymbol * name, OSData * value ) { - if (dtNVRAM) return dtNVRAM->writeNVRAMProperty(entry, name, value); - else return kIOReturnNotReady; + if (dtNVRAM) { + return dtNVRAM->writeNVRAMProperty(entry, name, value); + } else { + return kIOReturnNotReady; + } } -OSDictionary *IODTPlatformExpert::getNVRAMPartitions(void) +OSDictionary * +IODTPlatformExpert::getNVRAMPartitions(void) { - if (dtNVRAM) return dtNVRAM->getNVRAMPartitions(); - else return 0; + if (dtNVRAM) { + return dtNVRAM->getNVRAMPartitions(); + } else { + return 0; + } } -IOReturn IODTPlatformExpert::readNVRAMPartition(const OSSymbol * partitionID, - IOByteCount offset, UInt8 * buffer, - IOByteCount length) +IOReturn +IODTPlatformExpert::readNVRAMPartition(const OSSymbol * partitionID, + IOByteCount offset, UInt8 * buffer, + IOByteCount length) { - if (dtNVRAM) return dtNVRAM->readNVRAMPartition(partitionID, offset, - buffer, length); - else return kIOReturnNotReady; + if (dtNVRAM) { + return dtNVRAM->readNVRAMPartition(partitionID, offset, + buffer, length); + } else { + return kIOReturnNotReady; + } } -IOReturn IODTPlatformExpert::writeNVRAMPartition(const OSSymbol * partitionID, - IOByteCount offset, UInt8 * buffer, - IOByteCount length) +IOReturn +IODTPlatformExpert::writeNVRAMPartition(const OSSymbol * partitionID, + IOByteCount offset, UInt8 * buffer, + IOByteCount length) { - if (dtNVRAM) return dtNVRAM->writeNVRAMPartition(partitionID, offset, - buffer, length); - else return kIOReturnNotReady; + if (dtNVRAM) { + return dtNVRAM->writeNVRAMPartition(partitionID, offset, + buffer, length); + } else { + return kIOReturnNotReady; + } } -IOByteCount IODTPlatformExpert::savePanicInfo(UInt8 *buffer, IOByteCount length) +IOByteCount +IODTPlatformExpert::savePanicInfo(UInt8 *buffer, IOByteCount length) { - IOByteCount lengthSaved = 0; - - if (dtNVRAM) lengthSaved = dtNVRAM->savePanicInfo(buffer, length); - - if (lengthSaved == 0) lengthSaved = super::savePanicInfo(buffer, length); - - return lengthSaved; -} + IOByteCount lengthSaved = 0; + + if (dtNVRAM) { + lengthSaved = dtNVRAM->savePanicInfo(buffer, length); + } + + if (lengthSaved == 0) { + lengthSaved = super::savePanicInfo(buffer, length); + } -OSString* IODTPlatformExpert::createSystemSerialNumberString(OSData* myProperty) { - UInt8* serialNumber; - unsigned int serialNumberSize; - unsigned short pos = 0; - char* temp; - char SerialNo[30]; - - if (myProperty != NULL) { - serialNumberSize = myProperty->getLength(); - serialNumber = (UInt8*)(myProperty->getBytesNoCopy()); - temp = (char*)serialNumber; - if (serialNumberSize > 0) { - // check to see if this is a CTO serial number... - while (pos < serialNumberSize && temp[pos] != '-') pos++; - - if (pos < serialNumberSize) { // there was a hyphen, so it's a CTO serial number - memcpy(SerialNo, serialNumber + 12, 8); - memcpy(&SerialNo[8], serialNumber, 3); - SerialNo[11] = '-'; - memcpy(&SerialNo[12], serialNumber + 3, 8); - SerialNo[20] = 0; - } else { // just a normal serial number - memcpy(SerialNo, serialNumber + 13, 8); - memcpy(&SerialNo[8], serialNumber, 3); - SerialNo[11] = 0; - } - return OSString::withCString(SerialNo); - } - } - return NULL; + return lengthSaved; +} + +OSString* +IODTPlatformExpert::createSystemSerialNumberString(OSData* myProperty) +{ + UInt8* serialNumber; + unsigned int serialNumberSize; + unsigned short pos = 0; + char* temp; + char SerialNo[30]; + + if (myProperty != NULL) { + serialNumberSize = myProperty->getLength(); + serialNumber = (UInt8*)(myProperty->getBytesNoCopy()); + temp = (char*)serialNumber; + if (serialNumberSize > 0) { + // check to see if this is a CTO serial number... + while (pos < serialNumberSize && temp[pos] != '-') { + pos++; + } + + if (pos < serialNumberSize) { // there was a hyphen, so it's a CTO serial number + memcpy(SerialNo, serialNumber + 12, 8); + memcpy(&SerialNo[8], serialNumber, 3); + SerialNo[11] = '-'; + memcpy(&SerialNo[12], serialNumber + 3, 8); + SerialNo[20] = 0; + } else { // just a normal serial number + memcpy(SerialNo, serialNumber + 13, 8); + memcpy(&SerialNo[8], serialNumber, 3); + SerialNo[11] = 0; + } + return OSString::withCString(SerialNo); + } + } + return NULL; } @@ -1594,91 +1768,99 @@ OSString* IODTPlatformExpert::createSystemSerialNumberString(OSData* myProperty) OSDefineMetaClassAndStructors(IOPlatformExpertDevice, IOService) -OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 0); -OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 1); -OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 2); -OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 3); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 0); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 1); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 2); +OSMetaClassDefineReservedUnused(IOPlatformExpertDevice, 3); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOPlatformExpertDevice::compareName( OSString * name, - OSString ** matched ) const +bool +IOPlatformExpertDevice::compareName( OSString * name, + OSString ** matched ) const { - return( IODTCompareNubName( this, name, matched )); + return IODTCompareNubName( this, name, matched ); } bool IOPlatformExpertDevice::initWithArgs( - void * dtTop, void * p2, void * p3, void * p4 ) + void * dtTop, void * p2, void * p3, void * p4 ) { - IORegistryEntry * dt = 0; - bool ok; + IORegistryEntry * dt = 0; + bool ok; - // dtTop may be zero on non- device tree systems - if( dtTop && (dt = IODeviceTreeAlloc( dtTop ))) - ok = super::init( dt, gIODTPlane ); - else - ok = super::init(); + // dtTop may be zero on non- device tree systems + if (dtTop && (dt = IODeviceTreeAlloc( dtTop ))) { + ok = super::init( dt, gIODTPlane ); + } else { + ok = super::init(); + } - if( !ok) - return( false); + if (!ok) { + return false; + } - workLoop = IOWorkLoop::workLoop(); - if (!workLoop) - return false; + workLoop = IOWorkLoop::workLoop(); + if (!workLoop) { + return false; + } - return( true); + return true; } -IOWorkLoop *IOPlatformExpertDevice::getWorkLoop() const +IOWorkLoop * +IOPlatformExpertDevice::getWorkLoop() const { - return workLoop; + return workLoop; } -IOReturn IOPlatformExpertDevice::setProperties( OSObject * properties ) +IOReturn +IOPlatformExpertDevice::setProperties( OSObject * properties ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IOPlatformExpertDevice::newUserClient( task_t owningTask, void * securityID, - UInt32 type, OSDictionary * properties, - IOUserClient ** handler ) +IOReturn +IOPlatformExpertDevice::newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler ) { - IOReturn err = kIOReturnSuccess; - IOUserClient * newConnect = 0; - IOUserClient * theConnect = 0; + IOReturn err = kIOReturnSuccess; + IOUserClient * newConnect = 0; + IOUserClient * theConnect = 0; - switch (type) - { - case kIOKitDiagnosticsClientType: - newConnect = IOKitDiagnosticsClient::withTask(owningTask); - if (!newConnect) err = kIOReturnNotPermitted; - break; - default: - err = kIOReturnBadArgument; - } + switch (type) { + case kIOKitDiagnosticsClientType: + newConnect = IOKitDiagnosticsClient::withTask(owningTask); + if (!newConnect) { + err = kIOReturnNotPermitted; + } + break; + default: + err = kIOReturnBadArgument; + } - if (newConnect) - { - if ((false == newConnect->attach(this)) - || (false == newConnect->start(this))) - { - newConnect->detach( this ); - newConnect->release(); - err = kIOReturnNotPermitted; - } - else - theConnect = newConnect; - } + if (newConnect) { + if ((false == newConnect->attach(this)) + || (false == newConnect->start(this))) { + newConnect->detach( this ); + newConnect->release(); + err = kIOReturnNotPermitted; + } else { + theConnect = newConnect; + } + } - *handler = theConnect; - return (err); + *handler = theConnect; + return err; } -void IOPlatformExpertDevice::free() +void +IOPlatformExpertDevice::free() { - if (workLoop) - workLoop->release(); + if (workLoop) { + workLoop->release(); + } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1688,28 +1870,31 @@ void IOPlatformExpertDevice::free() OSDefineMetaClassAndStructors(IOPlatformDevice, IOService) -OSMetaClassDefineReservedUnused(IOPlatformDevice, 0); -OSMetaClassDefineReservedUnused(IOPlatformDevice, 1); -OSMetaClassDefineReservedUnused(IOPlatformDevice, 2); -OSMetaClassDefineReservedUnused(IOPlatformDevice, 3); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 0); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 1); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 2); +OSMetaClassDefineReservedUnused(IOPlatformDevice, 3); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOPlatformDevice::compareName( OSString * name, - OSString ** matched ) const +bool +IOPlatformDevice::compareName( OSString * name, + OSString ** matched ) const { - return( ((IOPlatformExpert *)getProvider())-> - compareNubName( this, name, matched )); + return ((IOPlatformExpert *)getProvider())-> + compareNubName( this, name, matched ); } -IOService * IOPlatformDevice::matchLocation( IOService * /* client */ ) +IOService * +IOPlatformDevice::matchLocation( IOService * /* client */ ) { - return( this ); + return this; } -IOReturn IOPlatformDevice::getResources( void ) +IOReturn +IOPlatformDevice::getResources( void ) { - return( ((IOPlatformExpert *)getProvider())->getNubResources( this )); + return ((IOPlatformExpert *)getProvider())->getNubResources( this ); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1722,24 +1907,27 @@ IOReturn IOPlatformDevice::getResources( void ) *********************************************************************/ class IOPanicPlatform : IOPlatformExpert { - OSDeclareDefaultStructors(IOPanicPlatform); + OSDeclareDefaultStructors(IOPanicPlatform); public: - bool start(IOService * provider) APPLE_KEXT_OVERRIDE; + bool start(IOService * provider) APPLE_KEXT_OVERRIDE; }; OSDefineMetaClassAndStructors(IOPanicPlatform, IOPlatformExpert); -bool IOPanicPlatform::start(IOService * provider) { - const char * platform_name = "(unknown platform name)"; +bool +IOPanicPlatform::start(IOService * provider) +{ + const char * platform_name = "(unknown platform name)"; - if (provider) platform_name = provider->getName(); + if (provider) { + platform_name = provider->getName(); + } - panic("Unable to find driver for this platform: \"%s\".\n", - platform_name); + panic("Unable to find driver for this platform: \"%s\".\n", + platform_name); - return false; + return false; } - diff --git a/iokit/Kernel/IOPolledInterface.cpp b/iokit/Kernel/IOPolledInterface.cpp index 8218c5dde..cb982b0d6 100644 --- a/iokit/Kernel/IOPolledInterface.cpp +++ b/iokit/Kernel/IOPolledInterface.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2006-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -64,27 +64,27 @@ OSMetaClassDefineReservedUnused(IOPolledInterface, 15); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #ifndef kIOMediaPreferredBlockSizeKey -#define kIOMediaPreferredBlockSizeKey "Preferred Block Size" +#define kIOMediaPreferredBlockSizeKey "Preferred Block Size" #endif -enum { kDefaultIOSize = 128*1024 }; +enum { kDefaultIOSize = 128 * 1024 }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ class IOPolledFilePollers : public OSObject { - OSDeclareDefaultStructors(IOPolledFilePollers) + OSDeclareDefaultStructors(IOPolledFilePollers) public: - IOService * media; - OSArray * pollers; - IOBufferMemoryDescriptor * ioBuffer; - bool abortable; - bool io; - IOReturn ioStatus; - uint32_t openCount; - - static IOPolledFilePollers * copyPollers(IOService * media); + IOService * media; + OSArray * pollers; + IOBufferMemoryDescriptor * ioBuffer; + bool abortable; + bool io; + IOReturn ioStatus; + uint32_t openCount; + + static IOPolledFilePollers * copyPollers(IOService * media); }; OSDefineMetaClassAndStructors(IOPolledFilePollers, OSObject) @@ -94,70 +94,64 @@ OSDefineMetaClassAndStructors(IOPolledFilePollers, OSObject) IOPolledFilePollers * IOPolledFilePollers::copyPollers(IOService * media) { - IOPolledFilePollers * vars; - IOReturn err; - IOService * service; - OSObject * obj; - IORegistryEntry * next; - IORegistryEntry * child; - - if ((obj = media->copyProperty(kIOPolledInterfaceStackKey))) - { - return (OSDynamicCast(IOPolledFilePollers, obj)); - } - - do - { - vars = OSTypeAlloc(IOPolledFilePollers); - vars->init(); - - vars->pollers = OSArray::withCapacity(4); - if (!vars->pollers) - { - err = kIOReturnNoMemory; - break; - } - - next = vars->media = media; - do - { - IOPolledInterface * poller; - OSObject * obj; - - obj = next->getProperty(kIOPolledInterfaceSupportKey); - if (kOSBooleanFalse == obj) - { - vars->pollers->flushCollection(); - break; - } - else if ((poller = OSDynamicCast(IOPolledInterface, obj))) - vars->pollers->setObject(poller); - - if ((service = OSDynamicCast(IOService, next)) - && service->getDeviceMemory() - && !vars->pollers->getCount()) break; - - child = next; - } - while ((next = child->getParentEntry(gIOServicePlane)) - && child->isParent(next, gIOServicePlane, true)); - - if (!vars->pollers->getCount()) - { - err = kIOReturnUnsupported; - break; + IOPolledFilePollers * vars; + IOReturn err; + IOService * service; + OSObject * obj; + IORegistryEntry * next; + IORegistryEntry * child; + + if ((obj = media->copyProperty(kIOPolledInterfaceStackKey))) { + return OSDynamicCast(IOPolledFilePollers, obj); } - } - while (false); - media->setProperty(kIOPolledInterfaceStackKey, vars); - - return (vars); + do{ + vars = OSTypeAlloc(IOPolledFilePollers); + vars->init(); + + vars->pollers = OSArray::withCapacity(4); + if (!vars->pollers) { + err = kIOReturnNoMemory; + break; + } + + next = vars->media = media; + do{ + IOPolledInterface * poller; + OSObject * obj; + + obj = next->getProperty(kIOPolledInterfaceSupportKey); + if (kOSBooleanFalse == obj) { + vars->pollers->flushCollection(); + break; + } else if ((poller = OSDynamicCast(IOPolledInterface, obj))) { + vars->pollers->setObject(poller); + } + + if ((service = OSDynamicCast(IOService, next)) + && service->getDeviceMemory() + && !vars->pollers->getCount()) { + break; + } + + child = next; + }while ((next = child->getParentEntry(gIOServicePlane)) + && child->isParent(next, gIOServicePlane, true)); + + if (!vars->pollers->getCount()) { + err = kIOReturnUnsupported; + break; + } + }while (false); + + media->setProperty(kIOPolledInterfaceStackKey, vars); + + return vars; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static IOReturn +static IOReturn IOPolledFilePollersIODone(IOPolledFilePollers * vars, bool abortable); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -165,22 +159,20 @@ IOPolledFilePollersIODone(IOPolledFilePollers * vars, bool abortable); static IOReturn IOPolledFilePollersProbe(IOPolledFilePollers * vars) { - IOReturn err = kIOReturnError; - int32_t idx; - IOPolledInterface * poller; - - for (idx = vars->pollers->getCount() - 1; idx >= 0; idx--) - { - poller = (IOPolledInterface *) vars->pollers->getObject(idx); - err = poller->probe(vars->media); - if (err) - { - HIBLOG("IOPolledInterface::probe[%d] 0x%x\n", idx, err); - break; - } - } - - return (err); + IOReturn err = kIOReturnError; + int32_t idx; + IOPolledInterface * poller; + + for (idx = vars->pollers->getCount() - 1; idx >= 0; idx--) { + poller = (IOPolledInterface *) vars->pollers->getObject(idx); + err = poller->probe(vars->media); + if (err) { + HIBLOG("IOPolledInterface::probe[%d] 0x%x\n", idx, err); + break; + } + } + + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -188,56 +180,50 @@ IOPolledFilePollersProbe(IOPolledFilePollers * vars) IOReturn IOPolledFilePollersOpen(IOPolledFileIOVars * filevars, uint32_t state, bool abortable) { + IOPolledFilePollers * vars = filevars->pollers; + IOBufferMemoryDescriptor * ioBuffer; + IOPolledInterface * poller; + IOService * next; + IOReturn err = kIOReturnError; + int32_t idx; + + vars->abortable = abortable; + ioBuffer = 0; + + if (kIOPolledAfterSleepState == state) { + vars->ioStatus = 0; + vars->io = false; + } + (void) IOPolledFilePollersIODone(vars, false); + + if ((kIOPolledPreflightState == state) || (kIOPolledPreflightCoreDumpState == state)) { + ioBuffer = vars->ioBuffer; + if (!ioBuffer) { + vars->ioBuffer = ioBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionInOut, + 2 * kDefaultIOSize, page_size); + if (!ioBuffer) { + return kIOReturnNoMemory; + } + } + } - IOPolledFilePollers * vars = filevars->pollers; - IOBufferMemoryDescriptor * ioBuffer; - IOPolledInterface * poller; - IOService * next; - IOReturn err = kIOReturnError; - int32_t idx; - - vars->abortable = abortable; - ioBuffer = 0; - - if (kIOPolledAfterSleepState == state) - { - vars->ioStatus = 0; - vars->io = false; - } - (void) IOPolledFilePollersIODone(vars, false); - - if ((kIOPolledPreflightState == state) || (kIOPolledPreflightCoreDumpState == state)) - { - ioBuffer = vars->ioBuffer; - if (!ioBuffer) - { - vars->ioBuffer = ioBuffer = IOBufferMemoryDescriptor::withOptions(kIODirectionInOut, - 2 * kDefaultIOSize, page_size); - if (!ioBuffer) return (kIOReturnNoMemory); - } - } - - for (idx = vars->pollers->getCount() - 1; idx >= 0; idx--) - { - poller = (IOPolledInterface *) vars->pollers->getObject(idx); - err = poller->open(state, ioBuffer); - if (kIOReturnSuccess != err) - { - HIBLOG("IOPolledInterface::open[%d] 0x%x\n", idx, err); - break; - } - } - if ((kIOReturnSuccess == err) && (kIOPolledPreflightState == state)) - { - next = vars->media; - while (next) - { - next->setProperty(kIOPolledInterfaceActiveKey, kOSBooleanTrue); - next = next->getProvider(); + for (idx = vars->pollers->getCount() - 1; idx >= 0; idx--) { + poller = (IOPolledInterface *) vars->pollers->getObject(idx); + err = poller->open(state, ioBuffer); + if (kIOReturnSuccess != err) { + HIBLOG("IOPolledInterface::open[%d] 0x%x\n", idx, err); + break; + } + } + if ((kIOReturnSuccess == err) && (kIOPolledPreflightState == state)) { + next = vars->media; + while (next) { + next->setProperty(kIOPolledInterfaceActiveKey, kOSBooleanTrue); + next = next->getProvider(); + } } - } - return (err); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -245,84 +231,84 @@ IOPolledFilePollersOpen(IOPolledFileIOVars * filevars, uint32_t state, bool abor IOReturn IOPolledFilePollersClose(IOPolledFileIOVars * filevars, uint32_t state) { - IOPolledFilePollers * vars = filevars->pollers; - IOPolledInterface * poller; - IORegistryEntry * next; - IOReturn err; - int32_t idx; - - (void) IOPolledFilePollersIODone(vars, false); - - if ((kIOPolledPostflightState == state) || (kIOPolledPostflightCoreDumpState == state)) - { - vars->openCount--; - } - - for (idx = 0, err = kIOReturnSuccess; - (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); - idx++) - { - err = poller->close(state); - if ((kIOReturnSuccess != err) && (kIOPolledBeforeSleepStateAborted == state)) - { - err = poller->close(kIOPolledBeforeSleepState); - } - if (err) HIBLOG("IOPolledInterface::close[%d] 0x%x\n", idx, err); - } - - if (kIOPolledPostflightState == state) - { - next = vars->media; - while (next) - { - next->removeProperty(kIOPolledInterfaceActiveKey); - next = next->getParentEntry(gIOServicePlane); + IOPolledFilePollers * vars = filevars->pollers; + IOPolledInterface * poller; + IORegistryEntry * next; + IOReturn err; + int32_t idx; + + (void) IOPolledFilePollersIODone(vars, false); + + if ((kIOPolledPostflightState == state) || (kIOPolledPostflightCoreDumpState == state)) { + vars->openCount--; } - } - if ((kIOPolledPostflightState == state) || (kIOPolledPostflightCoreDumpState == state)) do - { - if (vars->openCount) break; - if (vars->ioBuffer) - { - vars->ioBuffer->release(); - vars->ioBuffer = 0; + for (idx = 0, err = kIOReturnSuccess; + (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); + idx++) { + err = poller->close(state); + if ((kIOReturnSuccess != err) && (kIOPolledBeforeSleepStateAborted == state)) { + err = poller->close(kIOPolledBeforeSleepState); + } + if (err) { + HIBLOG("IOPolledInterface::close[%d] 0x%x\n", idx, err); + } } - } - while (false); - return (err); + if (kIOPolledPostflightState == state) { + next = vars->media; + while (next) { + next->removeProperty(kIOPolledInterfaceActiveKey); + next = next->getParentEntry(gIOServicePlane); + } + } + + if ((kIOPolledPostflightState == state) || (kIOPolledPostflightCoreDumpState == state)) { + do{ + if (vars->openCount) { + break; + } + if (vars->ioBuffer) { + vars->ioBuffer->release(); + vars->ioBuffer = 0; + } + }while (false); + } + + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IOReturn IOPolledInterface::setEncryptionKey(const uint8_t * key, size_t keySize) +IOReturn +IOPolledInterface::setEncryptionKey(const uint8_t * key, size_t keySize) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOPolledFilePollersSetEncryptionKey(IOPolledFileIOVars * filevars, - const uint8_t * key, size_t keySize) + const uint8_t * key, size_t keySize) { - IOReturn ret = kIOReturnUnsupported; - IOReturn err; - int32_t idx; - IOPolledFilePollers * vars = filevars->pollers; - IOPolledInterface * poller; - - for (idx = 0; - (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); - idx++) - { - poller = (IOPolledInterface *) vars->pollers->getObject(idx); - err = poller->setEncryptionKey(key, keySize); - if (kIOReturnSuccess == err) ret = err; - } - - return (ret); + IOReturn ret = kIOReturnUnsupported; + IOReturn err; + int32_t idx; + IOPolledFilePollers * vars = filevars->pollers; + IOPolledInterface * poller; + + for (idx = 0; + (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); + idx++) { + poller = (IOPolledInterface *) vars->pollers->getObject(idx); + err = poller->setEncryptionKey(key, keySize); + if (kIOReturnSuccess == err) { + ret = err; + } + } + + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -330,53 +316,55 @@ IOPolledFilePollersSetEncryptionKey(IOPolledFileIOVars * filevars, IOMemoryDescriptor * IOPolledFileGetIOBuffer(IOPolledFileIOVars * vars) { - return (vars->pollers->ioBuffer); + return vars->pollers->ioBuffer; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static void IOPolledIOComplete(void * target, - void * parameter, - IOReturn status, - UInt64 actualByteCount) + void * parameter, + IOReturn status, + UInt64 actualByteCount) { - IOPolledFilePollers * vars = (IOPolledFilePollers *) parameter; + IOPolledFilePollers * vars = (IOPolledFilePollers *) parameter; - vars->ioStatus = status; + vars->ioStatus = status; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ static IOReturn -IOStartPolledIO(IOPolledFilePollers * vars, - uint32_t operation, uint32_t bufferOffset, - uint64_t deviceOffset, uint64_t length) +IOStartPolledIO(IOPolledFilePollers * vars, + uint32_t operation, uint32_t bufferOffset, + uint64_t deviceOffset, uint64_t length) { - IOReturn err; - IOPolledInterface * poller; - IOPolledCompletion completion; - - err = vars->ioStatus; - if (kIOReturnSuccess != err) return (err); + IOReturn err; + IOPolledInterface * poller; + IOPolledCompletion completion; - completion.target = 0; - completion.action = &IOPolledIOComplete; - completion.parameter = vars; - - vars->ioStatus = -1; + err = vars->ioStatus; + if (kIOReturnSuccess != err) { + return err; + } - poller = (IOPolledInterface *) vars->pollers->getObject(0); - err = poller->startIO(operation, bufferOffset, deviceOffset, length, completion); - if (err) { - if (kernel_debugger_entry_count) { - HIBLOG("IOPolledInterface::startIO[%d] 0x%x\n", 0, err); - } else { - HIBLOGFROMPANIC("IOPolledInterface::IOStartPolledIO(0x%p, %d, 0x%x, 0x%llx, %llu) : poller->startIO(%d, 0x%x, 0x%llx, %llu, completion) returned 0x%x", - vars, operation, bufferOffset, deviceOffset, length, operation, bufferOffset, deviceOffset, length, err); + completion.target = 0; + completion.action = &IOPolledIOComplete; + completion.parameter = vars; + + vars->ioStatus = -1; + + poller = (IOPolledInterface *) vars->pollers->getObject(0); + err = poller->startIO(operation, bufferOffset, deviceOffset, length, completion); + if (err) { + if (kernel_debugger_entry_count) { + HIBLOG("IOPolledInterface::startIO[%d] 0x%x\n", 0, err); + } else { + HIBLOGFROMPANIC("IOPolledInterface::IOStartPolledIO(0x%p, %d, 0x%x, 0x%llx, %llu) : poller->startIO(%d, 0x%x, 0x%llx, %llu, completion) returned 0x%x", + vars, operation, bufferOffset, deviceOffset, length, operation, bufferOffset, deviceOffset, length, err); + } } - } - return (err); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -384,423 +372,437 @@ IOStartPolledIO(IOPolledFilePollers * vars, static IOReturn IOPolledFilePollersIODone(IOPolledFilePollers * vars, bool abortable) { - IOReturn err = kIOReturnSuccess; - int32_t idx = 0; - IOPolledInterface * poller; - AbsoluteTime deadline; - - if (!vars->io) return (kIOReturnSuccess); + IOReturn err = kIOReturnSuccess; + int32_t idx = 0; + IOPolledInterface * poller; + AbsoluteTime deadline; - abortable &= vars->abortable; - - clock_interval_to_deadline(2000, kMillisecondScale, &deadline); + if (!vars->io) { + return kIOReturnSuccess; + } - while (-1 == vars->ioStatus) - { - for (idx = 0; - (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); - idx++) - { - IOReturn newErr; - newErr = poller->checkForWork(); - if ((newErr == kIOReturnAborted) && !abortable) - newErr = kIOReturnSuccess; - if (kIOReturnSuccess == err) - err = newErr; - } - if ((false) && (kIOReturnSuccess == err) && (mach_absolute_time() > AbsoluteTime_to_scalar(&deadline))) - { - HIBLOG("IOPolledInterface::forced timeout\n"); - vars->ioStatus = kIOReturnTimeout; - } - } - vars->io = false; + abortable &= vars->abortable; + + clock_interval_to_deadline(2000, kMillisecondScale, &deadline); + + while (-1 == vars->ioStatus) { + for (idx = 0; + (poller = (IOPolledInterface *) vars->pollers->getObject(idx)); + idx++) { + IOReturn newErr; + newErr = poller->checkForWork(); + if ((newErr == kIOReturnAborted) && !abortable) { + newErr = kIOReturnSuccess; + } + if (kIOReturnSuccess == err) { + err = newErr; + } + } + if ((false) && (kIOReturnSuccess == err) && (mach_absolute_time() > AbsoluteTime_to_scalar(&deadline))) { + HIBLOG("IOPolledInterface::forced timeout\n"); + vars->ioStatus = kIOReturnTimeout; + } + } + vars->io = false; #if HIBERNATION - if ((kIOReturnSuccess == err) && abortable && hibernate_should_abort()) - { - err = kIOReturnAborted; - HIBLOG("IOPolledInterface::checkForWork sw abort\n"); - } + if ((kIOReturnSuccess == err) && abortable && hibernate_should_abort()) { + err = kIOReturnAborted; + HIBLOG("IOPolledInterface::checkForWork sw abort\n"); + } #endif - if (err) - { - HIBLOG("IOPolledInterface::checkForWork[%d] 0x%x\n", idx, err); - } - else - { - err = vars->ioStatus; - if (kIOReturnSuccess != err) HIBLOG("IOPolledInterface::ioStatus 0x%x\n", err); - } + if (err) { + HIBLOG("IOPolledInterface::checkForWork[%d] 0x%x\n", idx, err); + } else { + err = vars->ioStatus; + if (kIOReturnSuccess != err) { + HIBLOG("IOPolledInterface::ioStatus 0x%x\n", err); + } + } - return (err); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -struct _OpenFileContext -{ - OSData * extents; - uint64_t size; +struct _OpenFileContext { + OSData * extents; + uint64_t size; }; static void file_extent_callback(void * ref, uint64_t start, uint64_t length) { - _OpenFileContext * ctx = (_OpenFileContext *) ref; - IOPolledFileExtent extent; + _OpenFileContext * ctx = (_OpenFileContext *) ref; + IOPolledFileExtent extent; - extent.start = start; - extent.length = length; - ctx->extents->appendBytes(&extent, sizeof(extent)); - ctx->size += length; + extent.start = start; + extent.length = length; + ctx->extents->appendBytes(&extent, sizeof(extent)); + ctx->size += length; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static IOService * +static IOService * IOCopyMediaForDev(dev_t device) { - OSDictionary * matching; - OSNumber * num; - OSIterator * iter; - IOService * result = 0; - - matching = IOService::serviceMatching("IOMedia"); - if (!matching) - return (0); - do - { - num = OSNumber::withNumber(major(device), 32); - if (!num) - break; - matching->setObject(kIOBSDMajorKey, num); - num->release(); - num = OSNumber::withNumber(minor(device), 32); - if (!num) - break; - matching->setObject(kIOBSDMinorKey, num); - num->release(); - if (!num) - break; - iter = IOService::getMatchingServices(matching); - if (iter) - { - result = (IOService *) iter->getNextObject(); - result->retain(); - iter->release(); - } - } - while (false); - matching->release(); - - return (result); + OSDictionary * matching; + OSNumber * num; + OSIterator * iter; + IOService * result = 0; + + matching = IOService::serviceMatching("IOMedia"); + if (!matching) { + return 0; + } + do{ + num = OSNumber::withNumber(major(device), 32); + if (!num) { + break; + } + matching->setObject(kIOBSDMajorKey, num); + num->release(); + num = OSNumber::withNumber(minor(device), 32); + if (!num) { + break; + } + matching->setObject(kIOBSDMinorKey, num); + num->release(); + if (!num) { + break; + } + iter = IOService::getMatchingServices(matching); + if (iter) { + result = (IOService *) iter->getNextObject(); + result->retain(); + iter->release(); + } + }while (false); + matching->release(); + + return result; } #define APFSMEDIA_GETHIBERKEY "getHiberKey" -static IOReturn -IOGetVolumeCryptKey(dev_t block_dev, OSString ** pKeyUUID, - uint8_t * volumeCryptKey, size_t * keySize) +static IOReturn +IOGetVolumeCryptKey(dev_t block_dev, OSString ** pKeyUUID, + uint8_t * volumeCryptKey, size_t * keySize) { - IOReturn err; - IOService * part; - OSString * keyUUID = 0; - OSString * keyStoreUUID = 0; - uuid_t volumeKeyUUID; - aks_volume_key_t vek; - size_t callerKeySize; - - static IOService * sKeyStore; - - part = IOCopyMediaForDev(block_dev); - if (!part) return (kIOReturnNotFound); - - callerKeySize = *keySize; - // Try APFS first - { - uuid_t volUuid = {0}; - err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, keySize, keySize); - if (kIOReturnBadArgument == err) + IOReturn err; + IOService * part; + OSString * keyUUID = 0; + OSString * keyStoreUUID = 0; + uuid_t volumeKeyUUID; + aks_volume_key_t vek; + size_t callerKeySize; + + static IOService * sKeyStore; + + part = IOCopyMediaForDev(block_dev); + if (!part) { + return kIOReturnNotFound; + } + + callerKeySize = *keySize; + // Try APFS first { - // apfs fails on buffer size >32 - *keySize = 32; - err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, keySize, keySize); + uuid_t volUuid = {0}; + err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, keySize, keySize); + if (kIOReturnBadArgument == err) { + // apfs fails on buffer size >32 + *keySize = 32; + err = part->callPlatformFunction(APFSMEDIA_GETHIBERKEY, false, &volUuid, volumeCryptKey, keySize, keySize); + } + if (err != kIOReturnSuccess) { + *keySize = 0; + } else { + // No need to create uuid string if it's not requested + if (pKeyUUID) { + uuid_string_t volUuidStr; + uuid_unparse(volUuid, volUuidStr); + *pKeyUUID = OSString::withCString(volUuidStr); + } + + part->release(); + return kIOReturnSuccess; + } } - if (err != kIOReturnSuccess) *keySize = 0; - else - { - // No need to create uuid string if it's not requested - if (pKeyUUID) - { - uuid_string_t volUuidStr; - uuid_unparse(volUuid, volUuidStr); - *pKeyUUID = OSString::withCString(volUuidStr); - } - - part->release(); - return kIOReturnSuccess; - } - } - - // Then old CS path - err = part->callPlatformFunction(PLATFORM_FUNCTION_GET_MEDIA_ENCRYPTION_KEY_UUID, false, - (void *) &keyUUID, (void *) &keyStoreUUID, NULL, NULL); - if ((kIOReturnSuccess == err) && keyUUID && keyStoreUUID) - { + + // Then old CS path + err = part->callPlatformFunction(PLATFORM_FUNCTION_GET_MEDIA_ENCRYPTION_KEY_UUID, false, + (void *) &keyUUID, (void *) &keyStoreUUID, NULL, NULL); + if ((kIOReturnSuccess == err) && keyUUID && keyStoreUUID) { // IOLog("got volume key %s\n", keyStoreUUID->getCStringNoCopy()); - if (!sKeyStore) - sKeyStore = (IOService *) IORegistryEntry::fromPath(AKS_SERVICE_PATH, gIOServicePlane); - if (sKeyStore) - err = uuid_parse(keyStoreUUID->getCStringNoCopy(), volumeKeyUUID); - else - err = kIOReturnNoResources; - if (kIOReturnSuccess == err) - err = sKeyStore->callPlatformFunction(gAKSGetKey, true, volumeKeyUUID, &vek, NULL, NULL); - if (kIOReturnSuccess != err) - IOLog("volume key err 0x%x\n", err); - else - { - if (vek.key.keybytecount <= callerKeySize) *keySize = vek.key.keybytecount; - bcopy(&vek.key.keybytes[0], volumeCryptKey, *keySize); - } - bzero(&vek, sizeof(vek)); - - if (pKeyUUID) - { - // Create a copy because the caller would release it - *pKeyUUID = OSString::withString(keyUUID); - } - } - - part->release(); - return (err); + if (!sKeyStore) { + sKeyStore = (IOService *) IORegistryEntry::fromPath(AKS_SERVICE_PATH, gIOServicePlane); + } + if (sKeyStore) { + err = uuid_parse(keyStoreUUID->getCStringNoCopy(), volumeKeyUUID); + } else { + err = kIOReturnNoResources; + } + if (kIOReturnSuccess == err) { + err = sKeyStore->callPlatformFunction(gAKSGetKey, true, volumeKeyUUID, &vek, NULL, NULL); + } + if (kIOReturnSuccess != err) { + IOLog("volume key err 0x%x\n", err); + } else { + if (vek.key.keybytecount <= callerKeySize) { + *keySize = vek.key.keybytecount; + } + bcopy(&vek.key.keybytes[0], volumeCryptKey, *keySize); + } + bzero(&vek, sizeof(vek)); + + if (pKeyUUID) { + // Create a copy because the caller would release it + *pKeyUUID = OSString::withString(keyUUID); + } + } + + part->release(); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOPolledFileOpen(const char * filename, - uint32_t flags, - uint64_t setFileSize, uint64_t fsFreeSize, - void * write_file_addr, size_t write_file_len, - IOPolledFileIOVars ** fileVars, - OSData ** imagePath, - uint8_t * volumeCryptKey, size_t * keySize) + uint32_t flags, + uint64_t setFileSize, uint64_t fsFreeSize, + void * write_file_addr, size_t write_file_len, + IOPolledFileIOVars ** fileVars, + OSData ** imagePath, + uint8_t * volumeCryptKey, size_t * keySize) { - IOReturn err = kIOReturnSuccess; - IOPolledFileIOVars * vars; - _OpenFileContext ctx; - OSData * extentsData = NULL; - OSNumber * num; - IOService * part = 0; - dev_t block_dev; - dev_t image_dev; - AbsoluteTime startTime, endTime; - uint64_t nsec; - - vars = IONew(IOPolledFileIOVars, 1); - if (!vars) return (kIOReturnNoMemory); - bzero(vars, sizeof(*vars)); - vars->allocated = true; - - do - { - extentsData = OSData::withCapacity(32); - ctx.extents = extentsData; - ctx.size = 0; - clock_get_uptime(&startTime); - - vars->fileRef = kern_open_file_for_direct_io(filename, - flags, - &file_extent_callback, &ctx, - setFileSize, - fsFreeSize, - // write file: - 0, write_file_addr, write_file_len, - // results - &block_dev, - &image_dev, - &vars->block0, - &vars->maxiobytes, - &vars->flags); + IOReturn err = kIOReturnSuccess; + IOPolledFileIOVars * vars; + _OpenFileContext ctx; + OSData * extentsData = NULL; + OSNumber * num; + IOService * part = 0; + dev_t block_dev; + dev_t image_dev; + AbsoluteTime startTime, endTime; + uint64_t nsec; + + vars = IONew(IOPolledFileIOVars, 1); + if (!vars) { + return kIOReturnNoMemory; + } + bzero(vars, sizeof(*vars)); + vars->allocated = true; + + do{ + extentsData = OSData::withCapacity(32); + ctx.extents = extentsData; + ctx.size = 0; + clock_get_uptime(&startTime); + + vars->fileRef = kern_open_file_for_direct_io(filename, + flags, + &file_extent_callback, &ctx, + setFileSize, + fsFreeSize, + // write file: + 0, write_file_addr, write_file_len, + // results + &block_dev, + &image_dev, + &vars->block0, + &vars->maxiobytes, + &vars->flags); #if 0 - uint32_t msDelay = (131071 & random()); - HIBLOG("sleep %d\n", msDelay); - IOSleep(msDelay); + uint32_t msDelay = (131071 & random()); + HIBLOG("sleep %d\n", msDelay); + IOSleep(msDelay); #endif - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nsec); - - if (!vars->fileRef) err = kIOReturnNoSpace; - - HIBLOG("kern_open_file_for_direct_io took %qd ms\n", nsec / 1000000ULL); - if (kIOReturnSuccess != err) break; - - HIBLOG("Opened file %s, size %qd, extents %ld, maxio %qx ssd %d\n", filename, ctx.size, - (extentsData->getLength() / sizeof(IOPolledFileExtent)) - 1, - vars->maxiobytes, kIOPolledFileSSD & vars->flags); - assert(!vars->block0); - if (extentsData->getLength() < sizeof(IOPolledFileExtent)) - { - err = kIOReturnNoSpace; - break; - } - - vars->fileSize = ctx.size; - vars->extentMap = (IOPolledFileExtent *) extentsData->getBytesNoCopy(); - - part = IOCopyMediaForDev(image_dev); - if (!part) - { - err = kIOReturnNotFound; - break; - } - - if (!(vars->pollers = IOPolledFilePollers::copyPollers(part))) break; - - if ((num = OSDynamicCast(OSNumber, part->getProperty(kIOMediaPreferredBlockSizeKey)))) - vars->blockSize = num->unsigned32BitValue(); - if (vars->blockSize < 4096) vars->blockSize = 4096; - - HIBLOG("polled file major %d, minor %d, blocksize %ld, pollers %d\n", - major(image_dev), minor(image_dev), (long)vars->blockSize, - vars->pollers->pollers->getCount()); - - OSString * keyUUID = NULL; - if (volumeCryptKey) - { - err = IOGetVolumeCryptKey(block_dev, &keyUUID, volumeCryptKey, keySize); - } - - *fileVars = vars; - vars->fileExtents = extentsData; - - // make imagePath - OSData * data; - if (imagePath) - { + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nsec); + + if (!vars->fileRef) { + err = kIOReturnNoSpace; + } + + HIBLOG("kern_open_file_for_direct_io took %qd ms\n", nsec / 1000000ULL); + if (kIOReturnSuccess != err) { + break; + } + + HIBLOG("Opened file %s, size %qd, extents %ld, maxio %qx ssd %d\n", filename, ctx.size, + (extentsData->getLength() / sizeof(IOPolledFileExtent)) - 1, + vars->maxiobytes, kIOPolledFileSSD & vars->flags); + assert(!vars->block0); + if (extentsData->getLength() < sizeof(IOPolledFileExtent)) { + err = kIOReturnNoSpace; + break; + } + + vars->fileSize = ctx.size; + vars->extentMap = (IOPolledFileExtent *) extentsData->getBytesNoCopy(); + + part = IOCopyMediaForDev(image_dev); + if (!part) { + err = kIOReturnNotFound; + break; + } + + if (!(vars->pollers = IOPolledFilePollers::copyPollers(part))) { + break; + } + + if ((num = OSDynamicCast(OSNumber, part->getProperty(kIOMediaPreferredBlockSizeKey)))) { + vars->blockSize = num->unsigned32BitValue(); + } + if (vars->blockSize < 4096) { + vars->blockSize = 4096; + } + + HIBLOG("polled file major %d, minor %d, blocksize %ld, pollers %d\n", + major(image_dev), minor(image_dev), (long)vars->blockSize, + vars->pollers->pollers->getCount()); + + OSString * keyUUID = NULL; + if (volumeCryptKey) { + err = IOGetVolumeCryptKey(block_dev, &keyUUID, volumeCryptKey, keySize); + } + + *fileVars = vars; + vars->fileExtents = extentsData; + + // make imagePath + OSData * data; + if (imagePath) { #if defined(__i386__) || defined(__x86_64__) - char str2[24 + sizeof(uuid_string_t) + 2]; - - if (keyUUID) - snprintf(str2, sizeof(str2), "%qx:%s", - vars->extentMap[0].start, keyUUID->getCStringNoCopy()); - else - snprintf(str2, sizeof(str2), "%qx", vars->extentMap[0].start); - - err = IOService::getPlatform()->callPlatformFunction( - gIOCreateEFIDevicePathSymbol, false, - (void *) part, (void *) str2, - (void *) (uintptr_t) true, (void *) &data); + char str2[24 + sizeof(uuid_string_t) + 2]; + + if (keyUUID) { + snprintf(str2, sizeof(str2), "%qx:%s", + vars->extentMap[0].start, keyUUID->getCStringNoCopy()); + } else { + snprintf(str2, sizeof(str2), "%qx", vars->extentMap[0].start); + } + + err = IOService::getPlatform()->callPlatformFunction( + gIOCreateEFIDevicePathSymbol, false, + (void *) part, (void *) str2, + (void *) (uintptr_t) true, (void *) &data); #else - data = 0; - err = kIOReturnSuccess; + data = 0; + err = kIOReturnSuccess; #endif - if (kIOReturnSuccess != err) - { - HIBLOG("error 0x%x getting path\n", err); - break; - } - *imagePath = data; - } - - // Release key UUID if we have one - if (keyUUID) - { - keyUUID->release(); - keyUUID = NULL; // Just in case - } - } - while (false); - - if (kIOReturnSuccess != err) - { - HIBLOG("error 0x%x opening polled file\n", err); - IOPolledFileClose(&vars, 0, 0, 0, 0, 0); - if (extentsData) extentsData->release(); - } - - if (part) part->release(); - - return (err); + if (kIOReturnSuccess != err) { + HIBLOG("error 0x%x getting path\n", err); + break; + } + *imagePath = data; + } + + // Release key UUID if we have one + if (keyUUID) { + keyUUID->release(); + keyUUID = NULL; // Just in case + } + }while (false); + + if (kIOReturnSuccess != err) { + HIBLOG("error 0x%x opening polled file\n", err); + IOPolledFileClose(&vars, 0, 0, 0, 0, 0); + if (extentsData) { + extentsData->release(); + } + } + + if (part) { + part->release(); + } + + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOPolledFileClose(IOPolledFileIOVars ** pVars, - off_t write_offset, void * addr, size_t write_length, - off_t discard_offset, off_t discard_end) + off_t write_offset, void * addr, size_t write_length, + off_t discard_offset, off_t discard_end) { - IOPolledFileIOVars * vars; - - vars = *pVars; - if (!vars) return(kIOReturnSuccess); - - if (vars->fileRef) - { - kern_close_file_for_direct_io(vars->fileRef, write_offset, addr, write_length, - discard_offset, discard_end); - vars->fileRef = NULL; - } - if (vars->fileExtents) - { - vars->fileExtents->release(); - vars->fileExtents = 0; - } - if (vars->pollers) - { - vars->pollers->release(); - vars->pollers = 0; - } - - if (vars->allocated) IODelete(vars, IOPolledFileIOVars, 1); - else bzero(vars, sizeof(IOPolledFileIOVars)); - *pVars = NULL; - - return (kIOReturnSuccess); + IOPolledFileIOVars * vars; + + vars = *pVars; + if (!vars) { + return kIOReturnSuccess; + } + + if (vars->fileRef) { + kern_close_file_for_direct_io(vars->fileRef, write_offset, addr, write_length, + discard_offset, discard_end); + vars->fileRef = NULL; + } + if (vars->fileExtents) { + vars->fileExtents->release(); + vars->fileExtents = 0; + } + if (vars->pollers) { + vars->pollers->release(); + vars->pollers = 0; + } + + if (vars->allocated) { + IODelete(vars, IOPolledFileIOVars, 1); + } else { + bzero(vars, sizeof(IOPolledFileIOVars)); + } + *pVars = NULL; + + return kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOPolledFilePollersSetup(IOPolledFileIOVars * vars, - uint32_t openState) + uint32_t openState) { - IOReturn err; - - err = kIOReturnSuccess; - do - { - if (!vars->pollers->openCount) - { - err = IOPolledFilePollersProbe(vars->pollers); - if (kIOReturnSuccess != err) break; - } - err = IOPolledFilePollersOpen(vars, openState, false); - if (kIOReturnSuccess != err) break; - if ((kIOPolledPreflightState == openState) || (kIOPolledPreflightCoreDumpState == openState)) - { - vars->pollers->openCount++; + IOReturn err; + + err = kIOReturnSuccess; + do{ + if (!vars->pollers->openCount) { + err = IOPolledFilePollersProbe(vars->pollers); + if (kIOReturnSuccess != err) { + break; + } + } + err = IOPolledFilePollersOpen(vars, openState, false); + if (kIOReturnSuccess != err) { + break; + } + if ((kIOPolledPreflightState == openState) || (kIOPolledPreflightCoreDumpState == openState)) { + vars->pollers->openCount++; + } + vars->pollers->io = false; + vars->buffer = (uint8_t *) vars->pollers->ioBuffer->getBytesNoCopy(); + vars->bufferHalf = 0; + vars->bufferOffset = 0; + vars->bufferSize = (vars->pollers->ioBuffer->getLength() >> 1); + + if (vars->maxiobytes < vars->bufferSize) { + vars->bufferSize = vars->maxiobytes; + } + }while (false); + + if (kIOReturnSuccess != err) { + HIBLOG("IOPolledFilePollersSetup(%d) error 0x%x\n", openState, err); } - vars->pollers->io = false; - vars->buffer = (uint8_t *) vars->pollers->ioBuffer->getBytesNoCopy(); - vars->bufferHalf = 0; - vars->bufferOffset = 0; - vars->bufferSize = (vars->pollers->ioBuffer->getLength() >> 1); - if (vars->maxiobytes < vars->bufferSize) vars->bufferSize = vars->maxiobytes; - } - while (false); - - if (kIOReturnSuccess != err) HIBLOG("IOPolledFilePollersSetup(%d) error 0x%x\n", openState, err); - - return (err); + return err; } @@ -809,169 +811,169 @@ IOPolledFilePollersSetup(IOPolledFileIOVars * vars, IOReturn IOPolledFileSeek(IOPolledFileIOVars * vars, uint64_t position) { - IOPolledFileExtent * extentMap; + IOPolledFileExtent * extentMap; - extentMap = vars->extentMap; + extentMap = vars->extentMap; - vars->position = position; + vars->position = position; - if (position > vars->fileSize) { - HIBLOG("IOPolledFileSeek: called to seek to 0x%llx greater than file size of 0x%llx\n", vars->position, vars->fileSize); - return kIOReturnNoSpace; - } + if (position > vars->fileSize) { + HIBLOG("IOPolledFileSeek: called to seek to 0x%llx greater than file size of 0x%llx\n", vars->position, vars->fileSize); + return kIOReturnNoSpace; + } - while (position >= extentMap->length) - { - position -= extentMap->length; - extentMap++; - } + while (position >= extentMap->length) { + position -= extentMap->length; + extentMap++; + } - vars->currentExtent = extentMap; - vars->extentRemaining = extentMap->length - position; - vars->extentPosition = vars->position - position; + vars->currentExtent = extentMap; + vars->extentRemaining = extentMap->length - position; + vars->extentPosition = vars->position - position; - if (vars->bufferSize <= vars->extentRemaining) - vars->bufferLimit = vars->bufferSize; - else - vars->bufferLimit = vars->extentRemaining; + if (vars->bufferSize <= vars->extentRemaining) { + vars->bufferLimit = vars->bufferSize; + } else { + vars->bufferLimit = vars->extentRemaining; + } - return (kIOReturnSuccess); + return kIOReturnSuccess; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOPolledFileWrite(IOPolledFileIOVars * vars, - const uint8_t * bytes, IOByteCount size, - IOPolledFileCryptVars * cryptvars) + const uint8_t * bytes, IOByteCount size, + IOPolledFileCryptVars * cryptvars) { - IOReturn err = kIOReturnSuccess; - IOByteCount copy, original_size = size; - bool flush = false; - - do - { - if (!bytes && !size) - { - // seek to end of block & flush - size = vars->position & (vars->blockSize - 1); - if (size) - size = vars->blockSize - size; - flush = true; - // use some garbage for the fill - bytes = vars->buffer + vars->bufferOffset; - } - - copy = vars->bufferLimit - vars->bufferOffset; - if (copy > size) - copy = size; - else - flush = true; - - if (bytes) - { + IOReturn err = kIOReturnSuccess; + IOByteCount copy, original_size = size; + bool flush = false; + + do{ + if (!bytes && !size) { + // seek to end of block & flush + size = vars->position & (vars->blockSize - 1); + if (size) { + size = vars->blockSize - size; + } + flush = true; + // use some garbage for the fill + bytes = vars->buffer + vars->bufferOffset; + } + + copy = vars->bufferLimit - vars->bufferOffset; + if (copy > size) { + copy = size; + } else { + flush = true; + } + + if (bytes) { #if KASAN - /* Since this may copy mach-o segments in bulk, use the nosan variants of bcopy to - * avoid triggering global redzone sanitizer violations when accessing - * interstices between 'C' structures - */ - __nosan_bcopy(bytes, vars->buffer + vars->bufferHalf + vars->bufferOffset, copy); + /* Since this may copy mach-o segments in bulk, use the nosan variants of bcopy to + * avoid triggering global redzone sanitizer violations when accessing + * interstices between 'C' structures + */ + __nosan_bcopy(bytes, vars->buffer + vars->bufferHalf + vars->bufferOffset, copy); #else - bcopy(bytes, vars->buffer + vars->bufferHalf + vars->bufferOffset, copy); + bcopy(bytes, vars->buffer + vars->bufferHalf + vars->bufferOffset, copy); #endif - bytes += copy; - } - else - bzero(vars->buffer + vars->bufferHalf + vars->bufferOffset, copy); - - size -= copy; - vars->bufferOffset += copy; - vars->position += copy; - - if (flush && vars->bufferOffset) - { - uint64_t offset = (vars->position - vars->bufferOffset - - vars->extentPosition + vars->currentExtent->start); - uint32_t length = (vars->bufferOffset); + bytes += copy; + } else { + bzero(vars->buffer + vars->bufferHalf + vars->bufferOffset, copy); + } + + size -= copy; + vars->bufferOffset += copy; + vars->position += copy; + + if (flush && vars->bufferOffset) { + uint64_t offset = (vars->position - vars->bufferOffset + - vars->extentPosition + vars->currentExtent->start); + uint32_t length = (vars->bufferOffset); #if CRYPTO - if (cryptvars && vars->encryptStart - && (vars->position > vars->encryptStart) - && ((vars->position - length) < vars->encryptEnd)) - { - AbsoluteTime startTime, endTime; - - uint64_t encryptLen, encryptStart; - encryptLen = vars->position - vars->encryptStart; - if (encryptLen > length) - encryptLen = length; - encryptStart = length - encryptLen; - if (vars->position > vars->encryptEnd) - encryptLen -= (vars->position - vars->encryptEnd); - - clock_get_uptime(&startTime); - - // encrypt the buffer - aes_encrypt_cbc(vars->buffer + vars->bufferHalf + encryptStart, - &cryptvars->aes_iv[0], - encryptLen / AES_BLOCK_SIZE, - vars->buffer + vars->bufferHalf + encryptStart, - &cryptvars->ctx.encrypt); - - clock_get_uptime(&endTime); - ADD_ABSOLUTETIME(&vars->cryptTime, &endTime); - SUB_ABSOLUTETIME(&vars->cryptTime, &startTime); - vars->cryptBytes += encryptLen; - - // save initial vector for following encrypts - bcopy(vars->buffer + vars->bufferHalf + encryptStart + encryptLen - AES_BLOCK_SIZE, - &cryptvars->aes_iv[0], - AES_BLOCK_SIZE); - } + if (cryptvars && vars->encryptStart + && (vars->position > vars->encryptStart) + && ((vars->position - length) < vars->encryptEnd)) { + AbsoluteTime startTime, endTime; + + uint64_t encryptLen, encryptStart; + encryptLen = vars->position - vars->encryptStart; + if (encryptLen > length) { + encryptLen = length; + } + encryptStart = length - encryptLen; + if (vars->position > vars->encryptEnd) { + encryptLen -= (vars->position - vars->encryptEnd); + } + + clock_get_uptime(&startTime); + + // encrypt the buffer + aes_encrypt_cbc(vars->buffer + vars->bufferHalf + encryptStart, + &cryptvars->aes_iv[0], + encryptLen / AES_BLOCK_SIZE, + vars->buffer + vars->bufferHalf + encryptStart, + &cryptvars->ctx.encrypt); + + clock_get_uptime(&endTime); + ADD_ABSOLUTETIME(&vars->cryptTime, &endTime); + SUB_ABSOLUTETIME(&vars->cryptTime, &startTime); + vars->cryptBytes += encryptLen; + + // save initial vector for following encrypts + bcopy(vars->buffer + vars->bufferHalf + encryptStart + encryptLen - AES_BLOCK_SIZE, + &cryptvars->aes_iv[0], + AES_BLOCK_SIZE); + } #endif /* CRYPTO */ - err = IOPolledFilePollersIODone(vars->pollers, true); - if (kIOReturnSuccess != err) - break; + err = IOPolledFilePollersIODone(vars->pollers, true); + if (kIOReturnSuccess != err) { + break; + } -if (vars->position & (vars->blockSize - 1)) HIBLOG("misaligned file pos %qx\n", vars->position); + if (vars->position & (vars->blockSize - 1)) { + HIBLOG("misaligned file pos %qx\n", vars->position); + } //if (length != vars->bufferSize) HIBLOG("short write of %qx ends@ %qx\n", length, offset + length); - err = IOStartPolledIO(vars->pollers, kIOPolledWrite, vars->bufferHalf, offset, length); - if (kIOReturnSuccess != err) { - HIBLOGFROMPANIC("IOPolledFileWrite(0x%p, 0x%p, %llu, 0x%p) : IOStartPolledIO(0x%p, kIOPolledWrite, %llu, 0x%llx, %d) returned 0x%x\n", - vars, bytes, (uint64_t) original_size, cryptvars, vars->pollers, (uint64_t) vars->bufferHalf, offset, length, err); - break; - } - vars->pollers->io = true; - - vars->extentRemaining -= vars->bufferOffset; - if (!vars->extentRemaining) - { - vars->currentExtent++; - vars->extentRemaining = vars->currentExtent->length; - vars->extentPosition = vars->position; - } - - vars->bufferHalf = vars->bufferHalf ? 0 : vars->bufferSize; - vars->bufferOffset = 0; - if (vars->bufferSize <= vars->extentRemaining) - vars->bufferLimit = vars->bufferSize; - else - vars->bufferLimit = vars->extentRemaining; - - if (!vars->extentRemaining) - { - err = kIOReturnOverrun; - break; - } - - flush = false; - } - } - while (size); - - return (err); + err = IOStartPolledIO(vars->pollers, kIOPolledWrite, vars->bufferHalf, offset, length); + if (kIOReturnSuccess != err) { + HIBLOGFROMPANIC("IOPolledFileWrite(0x%p, 0x%p, %llu, 0x%p) : IOStartPolledIO(0x%p, kIOPolledWrite, %llu, 0x%llx, %d) returned 0x%x\n", + vars, bytes, (uint64_t) original_size, cryptvars, vars->pollers, (uint64_t) vars->bufferHalf, offset, length, err); + break; + } + vars->pollers->io = true; + + vars->extentRemaining -= vars->bufferOffset; + if (!vars->extentRemaining) { + vars->currentExtent++; + vars->extentRemaining = vars->currentExtent->length; + vars->extentPosition = vars->position; + } + + vars->bufferHalf = vars->bufferHalf ? 0 : vars->bufferSize; + vars->bufferOffset = 0; + if (vars->bufferSize <= vars->extentRemaining) { + vars->bufferLimit = vars->bufferSize; + } else { + vars->bufferLimit = vars->extentRemaining; + } + + if (!vars->extentRemaining) { + err = kIOReturnOverrun; + break; + } + + flush = false; + } + }while (size); + + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -979,136 +981,137 @@ if (vars->position & (vars->blockSize - 1)) HIBLOG("misaligned file pos %qx\n", IOReturn IOPolledFileFlush(IOPolledFileIOVars * vars) { - // Only supported by the underlying polled mode driver on embedded currently (expect kIOReturnUnsupported on other platforms) - IOReturn err = kIOReturnSuccess; - - err = IOPolledFilePollersIODone(vars->pollers, true); - if (kIOReturnSuccess != err) - return err; - - err = IOStartPolledIO(vars->pollers, kIOPolledFlush, 0, 0, 0); - if (kIOReturnSuccess != err) { - HIBLOGFROMPANIC("IOPolledFileFlush(0x%p) : IOStartPolledIO(0x%p, kIOPolledFlush, 0, 0, 0) returned 0x%x\n", - vars, vars->pollers, err); - return err; - } - vars->pollers->io = true; - - return err; + // Only supported by the underlying polled mode driver on embedded currently (expect kIOReturnUnsupported on other platforms) + IOReturn err = kIOReturnSuccess; + + err = IOPolledFilePollersIODone(vars->pollers, true); + if (kIOReturnSuccess != err) { + return err; + } + + err = IOStartPolledIO(vars->pollers, kIOPolledFlush, 0, 0, 0); + if (kIOReturnSuccess != err) { + HIBLOGFROMPANIC("IOPolledFileFlush(0x%p) : IOStartPolledIO(0x%p, kIOPolledFlush, 0, 0, 0) returned 0x%x\n", + vars, vars->pollers, err); + return err; + } + vars->pollers->io = true; + + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ IOReturn IOPolledFileRead(IOPolledFileIOVars * vars, - uint8_t * bytes, IOByteCount size, - IOPolledFileCryptVars * cryptvars) + uint8_t * bytes, IOByteCount size, + IOPolledFileCryptVars * cryptvars) { - IOReturn err = kIOReturnSuccess; - IOByteCount copy; + IOReturn err = kIOReturnSuccess; + IOByteCount copy; // bytesWritten += size; - do - { - copy = vars->bufferLimit - vars->bufferOffset; - if (copy > size) - copy = size; + do{ + copy = vars->bufferLimit - vars->bufferOffset; + if (copy > size) { + copy = size; + } - if (bytes) - { + if (bytes) { #if KASAN - __nosan_bcopy(vars->buffer + vars->bufferHalf + vars->bufferOffset, bytes, copy); + __nosan_bcopy(vars->buffer + vars->bufferHalf + vars->bufferOffset, bytes, copy); #else - bcopy(vars->buffer + vars->bufferHalf + vars->bufferOffset, bytes, copy); + bcopy(vars->buffer + vars->bufferHalf + vars->bufferOffset, bytes, copy); #endif - bytes += copy; - } - size -= copy; - vars->bufferOffset += copy; + bytes += copy; + } + size -= copy; + vars->bufferOffset += copy; // vars->position += copy; - if ((vars->bufferOffset == vars->bufferLimit) && (vars->position < vars->readEnd)) - { - if (!vars->pollers->io) cryptvars = 0; - err = IOPolledFilePollersIODone(vars->pollers, true); - if (kIOReturnSuccess != err) - break; - -if (vars->position & (vars->blockSize - 1)) HIBLOG("misaligned file pos %qx\n", vars->position); - - vars->position += vars->lastRead; - vars->extentRemaining -= vars->lastRead; - vars->bufferLimit = vars->lastRead; - - if (!vars->extentRemaining) - { - vars->currentExtent++; - vars->extentRemaining = vars->currentExtent->length; - vars->extentPosition = vars->position; - if (!vars->extentRemaining) - { - err = kIOReturnOverrun; - break; - } - } - - uint64_t length; - uint64_t lastReadLength = vars->lastRead; - uint64_t offset = (vars->position - - vars->extentPosition + vars->currentExtent->start); - if (vars->extentRemaining <= vars->bufferSize) - length = vars->extentRemaining; - else - length = vars->bufferSize; - if ((length + vars->position) > vars->readEnd) - length = vars->readEnd - vars->position; - - vars->lastRead = length; - if (length) - { + if ((vars->bufferOffset == vars->bufferLimit) && (vars->position < vars->readEnd)) { + if (!vars->pollers->io) { + cryptvars = 0; + } + err = IOPolledFilePollersIODone(vars->pollers, true); + if (kIOReturnSuccess != err) { + break; + } + + if (vars->position & (vars->blockSize - 1)) { + HIBLOG("misaligned file pos %qx\n", vars->position); + } + + vars->position += vars->lastRead; + vars->extentRemaining -= vars->lastRead; + vars->bufferLimit = vars->lastRead; + + if (!vars->extentRemaining) { + vars->currentExtent++; + vars->extentRemaining = vars->currentExtent->length; + vars->extentPosition = vars->position; + if (!vars->extentRemaining) { + err = kIOReturnOverrun; + break; + } + } + + uint64_t length; + uint64_t lastReadLength = vars->lastRead; + uint64_t offset = (vars->position + - vars->extentPosition + vars->currentExtent->start); + if (vars->extentRemaining <= vars->bufferSize) { + length = vars->extentRemaining; + } else { + length = vars->bufferSize; + } + if ((length + vars->position) > vars->readEnd) { + length = vars->readEnd - vars->position; + } + + vars->lastRead = length; + if (length) { //if (length != vars->bufferSize) HIBLOG("short read of %qx ends@ %qx\n", length, offset + length); - err = IOStartPolledIO(vars->pollers, kIOPolledRead, vars->bufferHalf, offset, length); - if (kIOReturnSuccess != err) - break; - vars->pollers->io = true; - } + err = IOStartPolledIO(vars->pollers, kIOPolledRead, vars->bufferHalf, offset, length); + if (kIOReturnSuccess != err) { + break; + } + vars->pollers->io = true; + } - vars->bufferHalf = vars->bufferHalf ? 0 : vars->bufferSize; - vars->bufferOffset = 0; + vars->bufferHalf = vars->bufferHalf ? 0 : vars->bufferSize; + vars->bufferOffset = 0; #if CRYPTO - if (cryptvars) - { - uint8_t thisVector[AES_BLOCK_SIZE]; - AbsoluteTime startTime, endTime; - - // save initial vector for following decrypts - bcopy(&cryptvars->aes_iv[0], &thisVector[0], AES_BLOCK_SIZE); - bcopy(vars->buffer + vars->bufferHalf + lastReadLength - AES_BLOCK_SIZE, - &cryptvars->aes_iv[0], AES_BLOCK_SIZE); - - // decrypt the buffer - clock_get_uptime(&startTime); - - aes_decrypt_cbc(vars->buffer + vars->bufferHalf, - &thisVector[0], - lastReadLength / AES_BLOCK_SIZE, - vars->buffer + vars->bufferHalf, - &cryptvars->ctx.decrypt); - - clock_get_uptime(&endTime); - ADD_ABSOLUTETIME(&vars->cryptTime, &endTime); - SUB_ABSOLUTETIME(&vars->cryptTime, &startTime); - vars->cryptBytes += lastReadLength; - } + if (cryptvars) { + uint8_t thisVector[AES_BLOCK_SIZE]; + AbsoluteTime startTime, endTime; + + // save initial vector for following decrypts + bcopy(&cryptvars->aes_iv[0], &thisVector[0], AES_BLOCK_SIZE); + bcopy(vars->buffer + vars->bufferHalf + lastReadLength - AES_BLOCK_SIZE, + &cryptvars->aes_iv[0], AES_BLOCK_SIZE); + + // decrypt the buffer + clock_get_uptime(&startTime); + + aes_decrypt_cbc(vars->buffer + vars->bufferHalf, + &thisVector[0], + lastReadLength / AES_BLOCK_SIZE, + vars->buffer + vars->bufferHalf, + &cryptvars->ctx.decrypt); + + clock_get_uptime(&endTime); + ADD_ABSOLUTETIME(&vars->cryptTime, &endTime); + SUB_ABSOLUTETIME(&vars->cryptTime, &startTime); + vars->cryptBytes += lastReadLength; + } #endif /* CRYPTO */ - } - } - while (size); + } + }while (size); - return (err); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - diff --git a/iokit/Kernel/IOPowerConnection.cpp b/iokit/Kernel/IOPowerConnection.cpp index db5a7696b..8c7bc2b05 100644 --- a/iokit/Kernel/IOPowerConnection.cpp +++ b/iokit/Kernel/IOPowerConnection.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include #define super IOService -OSDefineMetaClassAndStructors(IOPowerConnection,IOService) +OSDefineMetaClassAndStructors(IOPowerConnection, IOService) // ********************************************************************************** @@ -37,9 +37,10 @@ OSDefineMetaClassAndStructors(IOPowerConnection,IOService) // // Parent of the connection calls here to save the childs desire // ********************************************************************************** -void IOPowerConnection::setDesiredDomainState (unsigned long stateNumber ) +void +IOPowerConnection::setDesiredDomainState(unsigned long stateNumber ) { - desiredDomainState = stateNumber; + desiredDomainState = stateNumber; } @@ -47,9 +48,10 @@ void IOPowerConnection::setDesiredDomainState (unsigned long stateNumber ) // getDesiredDomainState // // ********************************************************************************** -unsigned long IOPowerConnection::getDesiredDomainState ( void ) +unsigned long +IOPowerConnection::getDesiredDomainState( void ) { - return desiredDomainState; + return desiredDomainState; } @@ -58,9 +60,10 @@ unsigned long IOPowerConnection::getDesiredDomainState ( void ) // // Parent of the connection calls here when the child requests power // ********************************************************************************** -void IOPowerConnection::setChildHasRequestedPower ( void ) +void +IOPowerConnection::setChildHasRequestedPower( void ) { - requestFlag = true; + requestFlag = true; } // ********************************************************************************** @@ -68,9 +71,10 @@ void IOPowerConnection::setChildHasRequestedPower ( void ) // // Parent of the connection calls here when the child requests power // ********************************************************************************** -bool IOPowerConnection::childHasRequestedPower ( void ) +bool +IOPowerConnection::childHasRequestedPower( void ) { - return requestFlag; + return requestFlag; } @@ -78,9 +82,10 @@ bool IOPowerConnection::childHasRequestedPower ( void ) // setPreventIdleSleepFlag // // ********************************************************************************** -void IOPowerConnection::setPreventIdleSleepFlag ( unsigned long flag ) +void +IOPowerConnection::setPreventIdleSleepFlag( unsigned long flag ) { - preventIdleSleepFlag = (flag != 0); + preventIdleSleepFlag = (flag != 0); } @@ -88,9 +93,10 @@ void IOPowerConnection::setPreventIdleSleepFlag ( unsigned long flag ) // getPreventIdleSleepFlag // // ********************************************************************************** -bool IOPowerConnection::getPreventIdleSleepFlag ( void ) +bool +IOPowerConnection::getPreventIdleSleepFlag( void ) { - return preventIdleSleepFlag; + return preventIdleSleepFlag; } @@ -98,9 +104,10 @@ bool IOPowerConnection::getPreventIdleSleepFlag ( void ) // setPreventSystemSleepFlag // // ********************************************************************************** -void IOPowerConnection::setPreventSystemSleepFlag ( unsigned long flag ) +void +IOPowerConnection::setPreventSystemSleepFlag( unsigned long flag ) { - preventSystemSleepFlag = (flag != 0); + preventSystemSleepFlag = (flag != 0); } @@ -108,9 +115,10 @@ void IOPowerConnection::setPreventSystemSleepFlag ( unsigned long flag ) // getPreventSystemSleepFlag // // ********************************************************************************** -bool IOPowerConnection::getPreventSystemSleepFlag ( void ) +bool +IOPowerConnection::getPreventSystemSleepFlag( void ) { - return preventSystemSleepFlag; + return preventSystemSleepFlag; } @@ -120,9 +128,10 @@ bool IOPowerConnection::getPreventSystemSleepFlag ( void ) // Child of the connection calls here to set its reminder that the parent does // or does not yet know the state if its domain. // ********************************************************************************** -void IOPowerConnection::setParentKnowsState (bool flag ) +void +IOPowerConnection::setParentKnowsState(bool flag ) { - stateKnown = flag; + stateKnown = flag; } @@ -132,9 +141,10 @@ void IOPowerConnection::setParentKnowsState (bool flag ) // Child of the connection calls here to save what the parent says // is the state if its domain. // ********************************************************************************** -void IOPowerConnection::setParentCurrentPowerFlags (IOPMPowerFlags flags ) +void +IOPowerConnection::setParentCurrentPowerFlags(IOPMPowerFlags flags ) { - currentPowerFlags = flags; + currentPowerFlags = flags; } @@ -142,9 +152,10 @@ void IOPowerConnection::setParentCurrentPowerFlags (IOPMPowerFlags flags ) // parentKnowsState // // ********************************************************************************** -bool IOPowerConnection::parentKnowsState (void ) +bool +IOPowerConnection::parentKnowsState(void ) { - return stateKnown; + return stateKnown; } @@ -152,9 +163,10 @@ bool IOPowerConnection::parentKnowsState (void ) // parentCurrentPowerFlags // // ********************************************************************************** -IOPMPowerFlags IOPowerConnection::parentCurrentPowerFlags (void ) +IOPMPowerFlags +IOPowerConnection::parentCurrentPowerFlags(void ) { - return currentPowerFlags; + return currentPowerFlags; } @@ -162,9 +174,10 @@ IOPMPowerFlags IOPowerConnection::parentCurrentPowerFlags (void ) // setAwaitingAck // // ********************************************************************************** -void IOPowerConnection::setAwaitingAck ( bool value ) +void +IOPowerConnection::setAwaitingAck( bool value ) { - awaitingAck = value; + awaitingAck = value; } @@ -172,9 +185,10 @@ void IOPowerConnection::setAwaitingAck ( bool value ) // getAwaitingAck // // ********************************************************************************** -bool IOPowerConnection::getAwaitingAck ( void ) +bool +IOPowerConnection::getAwaitingAck( void ) { - return awaitingAck; + return awaitingAck; } @@ -182,7 +196,8 @@ bool IOPowerConnection::getAwaitingAck ( void ) // setReadyFlag // // ********************************************************************************** -void IOPowerConnection::setReadyFlag( bool flag ) +void +IOPowerConnection::setReadyFlag( bool flag ) { readyFlag = flag; } @@ -192,7 +207,8 @@ void IOPowerConnection::setReadyFlag( bool flag ) // getReadyFlag // // ********************************************************************************** -bool IOPowerConnection::getReadyFlag( void ) const +bool +IOPowerConnection::getReadyFlag( void ) const { return readyFlag; } diff --git a/iokit/Kernel/IORTC.cpp b/iokit/Kernel/IORTC.cpp index 335bfc317..b7b6a023c 100644 --- a/iokit/Kernel/IORTC.cpp +++ b/iokit/Kernel/IORTC.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,30 +32,35 @@ OSDefineMetaClassAndAbstractStructors(IORTC, IOService); -void IORTC::getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ) +void +IORTC::getUTCTimeOfDay( clock_sec_t * secs, clock_nsec_t * nsecs ) { - *nsecs = 0; - *secs = getGMTTimeOfDay(); + *nsecs = 0; + *secs = getGMTTimeOfDay(); } -void IORTC::setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ) +void +IORTC::setUTCTimeOfDay( clock_sec_t secs, clock_nsec_t nsecs ) { - setGMTTimeOfDay(secs); + setGMTTimeOfDay(secs); } -IOReturn IORTC::getMonotonicClockOffset( int64_t * usecs ) +IOReturn +IORTC::getMonotonicClockOffset( int64_t * usecs ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IORTC::setMonotonicClockOffset( int64_t usecs ) +IOReturn +IORTC::setMonotonicClockOffset( int64_t usecs ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } -IOReturn IORTC::getMonotonicClockAndTimestamp( uint64_t * usecs, uint64_t *mach_absolute_time ) +IOReturn +IORTC::getMonotonicClockAndTimestamp( uint64_t * usecs, uint64_t *mach_absolute_time ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } OSMetaClassDefineReservedUnused(IORTC, 0); diff --git a/iokit/Kernel/IORangeAllocator.cpp b/iokit/Kernel/IORangeAllocator.cpp index 393a9c03b..aca4f6790 100644 --- a/iokit/Kernel/IORangeAllocator.cpp +++ b/iokit/Kernel/IORangeAllocator.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -49,328 +49,355 @@ OSDefineMetaClassAndStructors( IORangeAllocator, OSObject ) struct IORangeAllocatorElement { - // closed range - IORangeScalar start; - IORangeScalar end; + // closed range + IORangeScalar start; + IORangeScalar end; }; -IOLock * gIORangeAllocatorLock; +IOLock * gIORangeAllocatorLock; -#define LOCK() \ +#define LOCK() \ if( options & kLocking) IOTakeLock( gIORangeAllocatorLock ) -#define UNLOCK() \ +#define UNLOCK() \ if( options & kLocking) IOUnlock( gIORangeAllocatorLock ) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IORangeAllocator::init( IORangeScalar endOfRange, - IORangeScalar _defaultAlignment, - UInt32 _capacity, - IOOptionBits _options ) +bool +IORangeAllocator::init( IORangeScalar endOfRange, + IORangeScalar _defaultAlignment, + UInt32 _capacity, + IOOptionBits _options ) { - if( !super::init()) - return( false ); - - if( !_capacity) - _capacity = 1; - if( !_defaultAlignment) - _defaultAlignment = 1; - capacity = 0; - capacityIncrement = _capacity; - numElements = 0; - elements = 0; - defaultAlignmentMask = _defaultAlignment - 1; - options = _options; - - if( (!gIORangeAllocatorLock) && (options & kLocking)) - gIORangeAllocatorLock = IOLockAlloc(); - - if( endOfRange) - deallocate( 0, endOfRange + 1 ); - - return( true ); + if (!super::init()) { + return false; + } + + if (!_capacity) { + _capacity = 1; + } + if (!_defaultAlignment) { + _defaultAlignment = 1; + } + capacity = 0; + capacityIncrement = _capacity; + numElements = 0; + elements = 0; + defaultAlignmentMask = _defaultAlignment - 1; + options = _options; + + if ((!gIORangeAllocatorLock) && (options & kLocking)) { + gIORangeAllocatorLock = IOLockAlloc(); + } + + if (endOfRange) { + deallocate( 0, endOfRange + 1 ); + } + + return true; } -IORangeAllocator * IORangeAllocator::withRange( - IORangeScalar endOfRange, - IORangeScalar defaultAlignment, - UInt32 capacity, - IOOptionBits options ) +IORangeAllocator * +IORangeAllocator::withRange( + IORangeScalar endOfRange, + IORangeScalar defaultAlignment, + UInt32 capacity, + IOOptionBits options ) { - IORangeAllocator * thingy; + IORangeAllocator * thingy; - thingy = new IORangeAllocator; - if( thingy && ! thingy->init( endOfRange, defaultAlignment, - capacity, options )) { - thingy->release(); - thingy = 0; - } + thingy = new IORangeAllocator; + if (thingy && !thingy->init( endOfRange, defaultAlignment, + capacity, options )) { + thingy->release(); + thingy = 0; + } - return( thingy ); + return thingy; } -void IORangeAllocator::free() +void +IORangeAllocator::free() { - if( elements) - IODelete( elements, IORangeAllocatorElement, capacity ); + if (elements) { + IODelete( elements, IORangeAllocatorElement, capacity ); + } - super::free(); + super::free(); } -UInt32 IORangeAllocator::getFragmentCount( void ) +UInt32 +IORangeAllocator::getFragmentCount( void ) { - return( numElements ); + return numElements; } -UInt32 IORangeAllocator::getFragmentCapacity( void ) +UInt32 +IORangeAllocator::getFragmentCapacity( void ) { - return( capacity ); + return capacity; } -void IORangeAllocator::setFragmentCapacityIncrement( UInt32 count ) +void +IORangeAllocator::setFragmentCapacityIncrement( UInt32 count ) { - capacityIncrement = count; + capacityIncrement = count; } // allocate element at index -bool IORangeAllocator::allocElement( UInt32 index ) +bool +IORangeAllocator::allocElement( UInt32 index ) { - UInt32 newCapacity; - IORangeAllocatorElement * newElements; - - if( ((numElements == capacity) && capacityIncrement) - || (!elements)) { - - if (os_add_overflow(capacity, capacityIncrement, &newCapacity)) - return( false ); - newElements = IONew( IORangeAllocatorElement, newCapacity ); - if( !newElements) - return( false ); - - if( elements) { - bcopy( elements, - newElements, - index * sizeof( IORangeAllocatorElement)); - bcopy( elements + index, - newElements + index + 1, - (numElements - index) * sizeof( IORangeAllocatorElement)); - - IODelete( elements, IORangeAllocatorElement, capacity ); + UInt32 newCapacity; + IORangeAllocatorElement * newElements; + + if (((numElements == capacity) && capacityIncrement) + || (!elements)) { + if (os_add_overflow(capacity, capacityIncrement, &newCapacity)) { + return false; + } + newElements = IONew( IORangeAllocatorElement, newCapacity ); + if (!newElements) { + return false; + } + + if (elements) { + bcopy( elements, + newElements, + index * sizeof(IORangeAllocatorElement)); + bcopy( elements + index, + newElements + index + 1, + (numElements - index) * sizeof(IORangeAllocatorElement)); + + IODelete( elements, IORangeAllocatorElement, capacity ); + } + + elements = newElements; + capacity = newCapacity; + } else { + bcopy( elements + index, + elements + index + 1, + (numElements - index) * sizeof(IORangeAllocatorElement)); } + numElements++; - elements = newElements; - capacity = newCapacity; - - } else { - - bcopy( elements + index, - elements + index + 1, - (numElements - index) * sizeof( IORangeAllocatorElement)); - } - numElements++; - - return( true ); + return true; } // destroy element at index -void IORangeAllocator::deallocElement( UInt32 index ) +void +IORangeAllocator::deallocElement( UInt32 index ) { - numElements--; - bcopy( elements + index + 1, - elements + index, - (numElements - index) * sizeof( IORangeAllocatorElement)); + numElements--; + bcopy( elements + index + 1, + elements + index, + (numElements - index) * sizeof(IORangeAllocatorElement)); } -bool IORangeAllocator::allocate( IORangeScalar size, - IORangeScalar * result, - IORangeScalar alignment ) +bool +IORangeAllocator::allocate( IORangeScalar size, + IORangeScalar * result, + IORangeScalar alignment ) { - IORangeScalar data, dataEnd; - IORangeScalar thisStart, thisEnd; - UInt32 index; - bool ok = false; - - if( !size || !result) - return( false ); - - if( 0 == alignment) - alignment = defaultAlignmentMask; - else - alignment--; - - size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; - - LOCK(); - - for( index = 0; index < numElements; index++ ) { - - thisStart = elements[index].start; - thisEnd = elements[index].end; - data = (thisStart + alignment) & ~alignment; - dataEnd = (data + size - 1); - - ok = (dataEnd <= thisEnd); - if( ok) { - if( data != thisStart) { - if( dataEnd != thisEnd) { - if( allocElement( index + 1 )) { - elements[index++].end = data - 1; - elements[index].start = dataEnd + 1; - elements[index].end = thisEnd; - } else - ok = false; - } else - elements[index].end = data - 1; - } else { - if( dataEnd != thisEnd) - elements[index].start = dataEnd + 1; - else - deallocElement( index ); - } - if( ok) - *result = data; - break; - } - } - - UNLOCK(); - - return( ok ); -} + IORangeScalar data, dataEnd; + IORangeScalar thisStart, thisEnd; + UInt32 index; + bool ok = false; -bool IORangeAllocator::allocateRange( IORangeScalar data, - IORangeScalar size ) -{ - IORangeScalar thisStart, thisEnd; - IORangeScalar dataEnd; - UInt32 index; - bool found = false; - - if( !size) - return( 0 ); - - size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; - dataEnd = data + size - 1; - - LOCK(); - - for( index = 0; - (!found) && (index < numElements); - index++ ) { - - thisStart = elements[index].start; - thisEnd = elements[index].end; - - if( thisStart > data) - break; - found = (dataEnd <= thisEnd); - - if( found) { - if( data != thisStart) { - if( dataEnd != thisEnd) { - found = allocElement( index + 1 ); - if( found) { - elements[index++].end = data - 1; - elements[index].start = dataEnd + 1; - elements[index].end = thisEnd; - } - } else - elements[index].end = data - 1; - } else if( dataEnd != thisEnd) - elements[index].start = dataEnd + 1; - else - deallocElement( index ); - } - } - - UNLOCK(); - - return( found ); + if (!size || !result) { + return false; + } + + if (0 == alignment) { + alignment = defaultAlignmentMask; + } else { + alignment--; + } + + size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; + + LOCK(); + + for (index = 0; index < numElements; index++) { + thisStart = elements[index].start; + thisEnd = elements[index].end; + data = (thisStart + alignment) & ~alignment; + dataEnd = (data + size - 1); + + ok = (dataEnd <= thisEnd); + if (ok) { + if (data != thisStart) { + if (dataEnd != thisEnd) { + if (allocElement( index + 1 )) { + elements[index++].end = data - 1; + elements[index].start = dataEnd + 1; + elements[index].end = thisEnd; + } else { + ok = false; + } + } else { + elements[index].end = data - 1; + } + } else { + if (dataEnd != thisEnd) { + elements[index].start = dataEnd + 1; + } else { + deallocElement( index ); + } + } + if (ok) { + *result = data; + } + break; + } + } + + UNLOCK(); + + return ok; } -void IORangeAllocator::deallocate( IORangeScalar data, - IORangeScalar size ) +bool +IORangeAllocator::allocateRange( IORangeScalar data, + IORangeScalar size ) { - IORangeScalar dataEnd; - UInt32 index; - bool headContig = false; - bool tailContig = false; - - size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; - dataEnd = data + size - 1; + IORangeScalar thisStart, thisEnd; + IORangeScalar dataEnd; + UInt32 index; + bool found = false; - LOCK(); + if (!size) { + return 0; + } - for( index = 0; index < numElements; index++ ) { - if( elements[index].start < data) { - headContig = (data <= (elements[index].end + 1)); - continue; + size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; + dataEnd = data + size - 1; + + LOCK(); + + for (index = 0; + (!found) && (index < numElements); + index++) { + thisStart = elements[index].start; + thisEnd = elements[index].end; + + if (thisStart > data) { + break; + } + found = (dataEnd <= thisEnd); + + if (found) { + if (data != thisStart) { + if (dataEnd != thisEnd) { + found = allocElement( index + 1 ); + if (found) { + elements[index++].end = data - 1; + elements[index].start = dataEnd + 1; + elements[index].end = thisEnd; + } + } else { + elements[index].end = data - 1; + } + } else if (dataEnd != thisEnd) { + elements[index].start = dataEnd + 1; + } else { + deallocElement( index ); + } + } } - tailContig = ((data + size) >= elements[index].start); - break; - } - - if( headContig) { - if( tailContig) { - elements[index-1].end = elements[index].end; - deallocElement( index ); - } else /*safe*/ if( dataEnd > elements[index-1].end) - elements[index-1].end = dataEnd; - - } else if( tailContig) { - if( data < elements[index].start) /*safe*/ - elements[index].start = data; - - } else if( allocElement( index)) { - elements[index].start = data; - elements[index].end = dataEnd; - } - - UNLOCK(); + + UNLOCK(); + + return found; } -bool IORangeAllocator::serialize(OSSerialize *s) const +void +IORangeAllocator::deallocate( IORangeScalar data, + IORangeScalar size ) { - OSArray * array = OSArray::withCapacity( numElements * 2 ); - OSNumber * num; - UInt32 index; - bool ret; + IORangeScalar dataEnd; + UInt32 index; + bool headContig = false; + bool tailContig = false; + + size = (size + defaultAlignmentMask) & ~defaultAlignmentMask; + dataEnd = data + size - 1; + + LOCK(); + + for (index = 0; index < numElements; index++) { + if (elements[index].start < data) { + headContig = (data <= (elements[index].end + 1)); + continue; + } + tailContig = ((data + size) >= elements[index].start); + break; + } - if( !array) - return( false ); + if (headContig) { + if (tailContig) { + elements[index - 1].end = elements[index].end; + deallocElement( index ); + } else /*safe*/ if (dataEnd > elements[index - 1].end) { + elements[index - 1].end = dataEnd; + } + } else if (tailContig) { + if (data < elements[index].start) { /*safe*/ + elements[index].start = data; + } + } else if (allocElement( index)) { + elements[index].start = data; + elements[index].end = dataEnd; + } - LOCK(); + UNLOCK(); +} - for( index = 0; index < numElements; index++) { - if( (num = OSNumber::withNumber( elements[index].start, - 8 * sizeof(IORangeScalar) ))) { - array->setObject(num); - num->release(); +bool +IORangeAllocator::serialize(OSSerialize *s) const +{ + OSArray * array = OSArray::withCapacity( numElements * 2 ); + OSNumber * num; + UInt32 index; + bool ret; + + if (!array) { + return false; } - if( (num = OSNumber::withNumber( elements[index].end, - 8 * sizeof(IORangeScalar) ))) { - array->setObject(num); - num->release(); + + LOCK(); + + for (index = 0; index < numElements; index++) { + if ((num = OSNumber::withNumber( elements[index].start, + 8 * sizeof(IORangeScalar)))) { + array->setObject(num); + num->release(); + } + if ((num = OSNumber::withNumber( elements[index].end, + 8 * sizeof(IORangeScalar)))) { + array->setObject(num); + num->release(); + } } - } - UNLOCK(); + UNLOCK(); - ret = array->serialize(s); - array->release(); + ret = array->serialize(s); + array->release(); - return( ret ); + return ret; } -IORangeScalar IORangeAllocator::getFreeCount( void ) +IORangeScalar +IORangeAllocator::getFreeCount( void ) { - UInt32 index; - IORangeScalar sum = 0; + UInt32 index; + IORangeScalar sum = 0; - for( index = 0; index < numElements; index++) - sum += elements[index].end - elements[index].start + 1; + for (index = 0; index < numElements; index++) { + sum += elements[index].end - elements[index].start + 1; + } - return( sum ); + return sum; } - diff --git a/iokit/Kernel/IORegistryEntry.cpp b/iokit/Kernel/IORegistryEntry.cpp index 03bb8724f..31e8ca1a9 100644 --- a/iokit/Kernel/IORegistryEntry.cpp +++ b/iokit/Kernel/IORegistryEntry.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,42 +46,41 @@ OSDefineMetaClassAndStructors(IORegistryEntry, OSObject) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define kIORegPlaneParentSuffix "ParentLinks" -#define kIORegPlaneChildSuffix "ChildLinks" -#define kIORegPlaneNameSuffix "Name" -#define kIORegPlaneLocationSuffix "Location" +#define kIORegPlaneParentSuffix "ParentLinks" +#define kIORegPlaneChildSuffix "ChildLinks" +#define kIORegPlaneNameSuffix "Name" +#define kIORegPlaneLocationSuffix "Location" -#define kIORegPlaneParentSuffixLen (sizeof(kIORegPlaneParentSuffix) - 1) -#define kIORegPlaneChildSuffixLen (sizeof(kIORegPlaneChildSuffix) - 1) -#define kIORegPlaneNameSuffixLen (sizeof(kIORegPlaneNameSuffix) - 1) -#define kIORegPlaneLocationSuffixLen (sizeof(kIORegPlaneLocationSuffix) - 1) +#define kIORegPlaneParentSuffixLen (sizeof(kIORegPlaneParentSuffix) - 1) +#define kIORegPlaneChildSuffixLen (sizeof(kIORegPlaneChildSuffix) - 1) +#define kIORegPlaneNameSuffixLen (sizeof(kIORegPlaneNameSuffix) - 1) +#define kIORegPlaneLocationSuffixLen (sizeof(kIORegPlaneLocationSuffix) - 1) #define KASLR_IOREG_DEBUG 0 -struct IORegistryEntry::ExpansionData -{ - IORecursiveLock * fLock; - uint64_t fRegistryEntryID; - SInt32 fRegistryEntryGenerationCount; - OSObject **_Atomic fIndexedProperties; +struct IORegistryEntry::ExpansionData { + IORecursiveLock * fLock; + uint64_t fRegistryEntryID; + SInt32 fRegistryEntryGenerationCount; + OSObject **_Atomic fIndexedProperties; }; static IORegistryEntry * gRegistryRoot; -static OSDictionary * gIORegistryPlanes; +static OSDictionary * gIORegistryPlanes; -const OSSymbol * gIONameKey; -const OSSymbol * gIOLocationKey; -const OSSymbol * gIORegistryEntryIDKey; -const OSSymbol * gIORegistryEntryPropertyKeysKey; +const OSSymbol * gIONameKey; +const OSSymbol * gIOLocationKey; +const OSSymbol * gIORegistryEntryIDKey; +const OSSymbol * gIORegistryEntryPropertyKeysKey; enum { - kParentSetIndex = 0, - kChildSetIndex = 1, - kNumSetIndex + kParentSetIndex = 0, + kChildSetIndex = 1, + kNumSetIndex }; enum { - kIOMaxPlaneName = 32 + kIOMaxPlaneName = 32 }; enum { kIORegistryIDReserved = (1ULL << 32) + 255 }; @@ -89,49 +88,48 @@ enum { kIORegistryIDReserved = (1ULL << 32) + 255 }; static uint64_t gIORegistryLastID = kIORegistryIDReserved; class IORegistryPlane : public OSObject { + friend class IORegistryEntry; - friend class IORegistryEntry; - - OSDeclareAbstractStructors(IORegistryPlane) + OSDeclareAbstractStructors(IORegistryPlane) - const OSSymbol * nameKey; - const OSSymbol * keys[ kNumSetIndex ]; - const OSSymbol * pathNameKey; - const OSSymbol * pathLocationKey; - int reserved[2]; + const OSSymbol * nameKey; + const OSSymbol * keys[kNumSetIndex]; + const OSSymbol * pathNameKey; + const OSSymbol * pathLocationKey; + int reserved[2]; public: - virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; + virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE; }; OSDefineMetaClassAndStructors(IORegistryPlane, OSObject) -static IORecursiveLock * gPropertiesLock; -static SInt32 gIORegistryGenerationCount; +static IORecursiveLock * gPropertiesLock; +static SInt32 gIORegistryGenerationCount; -#define UNLOCK lck_rw_done( &gIORegistryLock ) -#define RLOCK lck_rw_lock_shared( &gIORegistryLock ) -#define WLOCK lck_rw_lock_exclusive( &gIORegistryLock ); \ - gIORegistryGenerationCount++ - // make atomic +#define UNLOCK lck_rw_done( &gIORegistryLock ) +#define RLOCK lck_rw_lock_shared( &gIORegistryLock ) +#define WLOCK lck_rw_lock_exclusive( &gIORegistryLock ); \ + gIORegistryGenerationCount++ +// make atomic -#define PUNLOCK IORecursiveLockUnlock( reserved->fLock ) -#define PLOCK IORecursiveLockLock( reserved->fLock ) +#define PUNLOCK IORecursiveLockUnlock( reserved->fLock ) +#define PLOCK IORecursiveLockLock( reserved->fLock ) #define IOREGSPLITTABLES #ifdef IOREGSPLITTABLES -#define registryTable() fRegistryTable +#define registryTable() fRegistryTable #else -#define registryTable() fPropertyTable +#define registryTable() fPropertyTable #endif -#define DEBUG_FREE 1 +#define DEBUG_FREE 1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -lck_rw_t gIORegistryLock; +lck_rw_t gIORegistryLock; lck_grp_t *gIORegistryLockGrp; lck_grp_attr_t *gIORegistryLockGrpAttr; lck_attr_t *gIORegistryLockAttr; @@ -139,408 +137,446 @@ lck_attr_t *gIORegistryLockAttr; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -IORegistryEntry * IORegistryEntry::initialize( void ) +IORegistryEntry * +IORegistryEntry::initialize( void ) { - bool ok; + bool ok; - if( !gRegistryRoot) { + if (!gRegistryRoot) { + gIORegistryLockGrpAttr = lck_grp_attr_alloc_init(); + gIORegistryLockGrp = lck_grp_alloc_init("IORegistryLock", gIORegistryLockGrpAttr); + gIORegistryLockAttr = lck_attr_alloc_init(); + lck_attr_rw_shared_priority(gIORegistryLockAttr); + lck_rw_init( &gIORegistryLock, gIORegistryLockGrp, gIORegistryLockAttr); + gRegistryRoot = new IORegistryEntry; + gPropertiesLock = IORecursiveLockAlloc(); + gIORegistryPlanes = OSDictionary::withCapacity( 1 ); - gIORegistryLockGrpAttr = lck_grp_attr_alloc_init(); - //lck_grp_attr_setstat(gIORegistryLockGrpAttr); - gIORegistryLockGrp = lck_grp_alloc_init("IORegistryLock", gIORegistryLockGrpAttr); - gIORegistryLockAttr = lck_attr_alloc_init(); - lck_attr_rw_shared_priority(gIORegistryLockAttr); - //lck_attr_setdebug(gIORegistryLockAttr); - lck_rw_init( &gIORegistryLock, gIORegistryLockGrp, gIORegistryLockAttr); + assert( gRegistryRoot && gPropertiesLock + && gIORegistryPlanes ); + ok = gRegistryRoot->init(); - gRegistryRoot = new IORegistryEntry; - gPropertiesLock = IORecursiveLockAlloc(); - gIORegistryPlanes = OSDictionary::withCapacity( 1 ); - - assert( gRegistryRoot && gPropertiesLock - && gIORegistryPlanes ); - ok = gRegistryRoot->init(); - - if (ok) - gRegistryRoot->reserved->fRegistryEntryID = ++gIORegistryLastID; + if (ok) { + gRegistryRoot->reserved->fRegistryEntryID = ++gIORegistryLastID; + } - gIONameKey = OSSymbol::withCStringNoCopy( "IOName" ); - gIOLocationKey = OSSymbol::withCStringNoCopy( "IOLocation" ); - gIORegistryEntryIDKey = OSSymbol::withCStringNoCopy( kIORegistryEntryIDKey ); - gIORegistryEntryPropertyKeysKey = OSSymbol::withCStringNoCopy( kIORegistryEntryPropertyKeysKey ); + gIONameKey = OSSymbol::withCStringNoCopy( "IOName" ); + gIOLocationKey = OSSymbol::withCStringNoCopy( "IOLocation" ); + gIORegistryEntryIDKey = OSSymbol::withCStringNoCopy( kIORegistryEntryIDKey ); + gIORegistryEntryPropertyKeysKey = OSSymbol::withCStringNoCopy( kIORegistryEntryPropertyKeysKey ); - assert( ok && gIONameKey && gIOLocationKey ); + assert( ok && gIONameKey && gIOLocationKey ); - gRegistryRoot->setName( "Root" ); - gRegistryRoot->setProperty( kIORegistryPlanesKey, gIORegistryPlanes ); - } + gRegistryRoot->setName( "Root" ); + gRegistryRoot->setProperty( kIORegistryPlanesKey, gIORegistryPlanes ); + } - return( gRegistryRoot ); + return gRegistryRoot; } -IORegistryEntry * IORegistryEntry::getRegistryRoot( void ) +IORegistryEntry * +IORegistryEntry::getRegistryRoot( void ) { - return( gRegistryRoot ); + return gRegistryRoot; } -SInt32 IORegistryEntry::getGenerationCount( void ) +SInt32 +IORegistryEntry::getGenerationCount( void ) { - return( gIORegistryGenerationCount ); + return gIORegistryGenerationCount; } -SInt32 IORegistryEntry::getRegistryEntryGenerationCount(void) const +SInt32 +IORegistryEntry::getRegistryEntryGenerationCount(void) const { - return (reserved->fRegistryEntryGenerationCount); + return reserved->fRegistryEntryGenerationCount; } -const IORegistryPlane * IORegistryEntry::makePlane( const char * name ) +const IORegistryPlane * +IORegistryEntry::makePlane( const char * name ) { - IORegistryPlane * plane; - const OSSymbol * nameKey; - const OSSymbol * parentKey; - const OSSymbol * childKey; - const OSSymbol * pathNameKey; - const OSSymbol * pathLocationKey; - char key[ kIOMaxPlaneName + 16 ]; - char * end; - - strlcpy( key, name, kIOMaxPlaneName + 1 ); - end = key + strlen( key ); - - nameKey = OSSymbol::withCString( key); + IORegistryPlane * plane; + const OSSymbol * nameKey; + const OSSymbol * parentKey; + const OSSymbol * childKey; + const OSSymbol * pathNameKey; + const OSSymbol * pathLocationKey; + char key[kIOMaxPlaneName + 16]; + char * end; - strlcpy( end, kIORegPlaneParentSuffix, kIORegPlaneParentSuffixLen + 1 ); - parentKey = OSSymbol::withCString( key); + strlcpy( key, name, kIOMaxPlaneName + 1 ); + end = key + strlen( key ); - strlcpy( end, kIORegPlaneChildSuffix, kIORegPlaneChildSuffixLen + 1 ); - childKey = OSSymbol::withCString( key); + nameKey = OSSymbol::withCString( key); - strlcpy( end, kIORegPlaneNameSuffix, kIORegPlaneNameSuffixLen + 1 ); - pathNameKey = OSSymbol::withCString( key); + strlcpy( end, kIORegPlaneParentSuffix, kIORegPlaneParentSuffixLen + 1 ); + parentKey = OSSymbol::withCString( key); - strlcpy( end, kIORegPlaneLocationSuffix, kIORegPlaneLocationSuffixLen + 1 ); - pathLocationKey = OSSymbol::withCString( key); + strlcpy( end, kIORegPlaneChildSuffix, kIORegPlaneChildSuffixLen + 1 ); + childKey = OSSymbol::withCString( key); - plane = new IORegistryPlane; + strlcpy( end, kIORegPlaneNameSuffix, kIORegPlaneNameSuffixLen + 1 ); + pathNameKey = OSSymbol::withCString( key); - if( plane && plane->init() - && nameKey && parentKey && childKey - && pathNameKey && pathLocationKey ) { + strlcpy( end, kIORegPlaneLocationSuffix, kIORegPlaneLocationSuffixLen + 1 ); + pathLocationKey = OSSymbol::withCString( key); - plane->nameKey = nameKey; - plane->keys[ kParentSetIndex ] = parentKey; - plane->keys[ kChildSetIndex ] = childKey; - plane->pathNameKey = pathNameKey; - plane->pathLocationKey = pathLocationKey; + plane = new IORegistryPlane; - WLOCK; - gIORegistryPlanes->setObject( nameKey, plane ); - UNLOCK; - - } else { + if (plane && plane->init() + && nameKey && parentKey && childKey + && pathNameKey && pathLocationKey) { + plane->nameKey = nameKey; + plane->keys[kParentSetIndex] = parentKey; + plane->keys[kChildSetIndex] = childKey; + plane->pathNameKey = pathNameKey; + plane->pathLocationKey = pathLocationKey; - if( plane) - plane->release(); - if( pathLocationKey) - pathLocationKey->release(); - if( pathNameKey) - pathNameKey->release(); - if( parentKey) - parentKey->release(); - if( childKey) - childKey->release(); - if( nameKey) - nameKey->release(); - plane = 0; - } + WLOCK; + gIORegistryPlanes->setObject( nameKey, plane ); + UNLOCK; + } else { + if (plane) { + plane->release(); + } + if (pathLocationKey) { + pathLocationKey->release(); + } + if (pathNameKey) { + pathNameKey->release(); + } + if (parentKey) { + parentKey->release(); + } + if (childKey) { + childKey->release(); + } + if (nameKey) { + nameKey->release(); + } + plane = 0; + } - return( plane); + return plane; } -const IORegistryPlane * IORegistryEntry::getPlane( const char * name ) +const IORegistryPlane * +IORegistryEntry::getPlane( const char * name ) { - const IORegistryPlane * plane; + const IORegistryPlane * plane; - RLOCK; - plane = (const IORegistryPlane *) gIORegistryPlanes->getObject( name ); - UNLOCK; + RLOCK; + plane = (const IORegistryPlane *) gIORegistryPlanes->getObject( name ); + UNLOCK; - return( plane ); + return plane; } -bool IORegistryPlane::serialize(OSSerialize *s) const +bool +IORegistryPlane::serialize(OSSerialize *s) const { - return( nameKey->serialize(s) ); + return nameKey->serialize(s); } enum { kIORegCapacityIncrement = 4 }; -bool IORegistryEntry::init( OSDictionary * dict ) -{ - OSString * prop; - - if( !super::init()) - return( false); - - if (!reserved) - { - reserved = IONew(ExpansionData, 1); - if (!reserved) - return (false); - bzero(reserved, sizeof(ExpansionData)); - reserved->fLock = IORecursiveLockAlloc(); - if (!reserved->fLock) return (false); - } - if( dict) { - if (OSCollection::kImmutable & dict->setOptions(0, 0)) { - dict = (OSDictionary *) dict->copyCollection(); - if (!dict) - return (false); - } else - dict->retain(); - if( fPropertyTable) - fPropertyTable->release(); - fPropertyTable = dict; +bool +IORegistryEntry::init( OSDictionary * dict ) +{ + OSString * prop; - } else if( !fPropertyTable) { - fPropertyTable = OSDictionary::withCapacity( kIORegCapacityIncrement ); - if( fPropertyTable) - fPropertyTable->setCapacityIncrement( kIORegCapacityIncrement ); - } + if (!super::init()) { + return false; + } + + if (!reserved) { + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } + bzero(reserved, sizeof(ExpansionData)); + reserved->fLock = IORecursiveLockAlloc(); + if (!reserved->fLock) { + return false; + } + } + if (dict) { + if (OSCollection::kImmutable & dict->setOptions(0, 0)) { + dict = (OSDictionary *) dict->copyCollection(); + if (!dict) { + return false; + } + } else { + dict->retain(); + } + if (fPropertyTable) { + fPropertyTable->release(); + } + fPropertyTable = dict; + } else if (!fPropertyTable) { + fPropertyTable = OSDictionary::withCapacity( kIORegCapacityIncrement ); + if (fPropertyTable) { + fPropertyTable->setCapacityIncrement( kIORegCapacityIncrement ); + } + } - if( !fPropertyTable) - return( false); + if (!fPropertyTable) { + return false; + } #ifdef IOREGSPLITTABLES - if( !fRegistryTable) { - fRegistryTable = OSDictionary::withCapacity( kIORegCapacityIncrement ); - if( fRegistryTable) - fRegistryTable->setCapacityIncrement( kIORegCapacityIncrement ); - } - - if( (prop = OSDynamicCast( OSString, getProperty( gIONameKey)))) { - OSSymbol * sym = (OSSymbol *)OSSymbol::withString( prop); - // ok for OSSymbol too - setName( sym); - sym->release(); - } + if (!fRegistryTable) { + fRegistryTable = OSDictionary::withCapacity( kIORegCapacityIncrement ); + if (fRegistryTable) { + fRegistryTable->setCapacityIncrement( kIORegCapacityIncrement ); + } + } + + if ((prop = OSDynamicCast( OSString, getProperty( gIONameKey)))) { + OSSymbol * sym = (OSSymbol *)OSSymbol::withString( prop); + // ok for OSSymbol too + setName( sym); + sym->release(); + } #endif /* IOREGSPLITTABLES */ - return( true); + return true; } -bool IORegistryEntry::init( IORegistryEntry * old, - const IORegistryPlane * plane ) +bool +IORegistryEntry::init( IORegistryEntry * old, + const IORegistryPlane * plane ) { - OSArray * all; - IORegistryEntry * next; - unsigned int index; + OSArray * all; + IORegistryEntry * next; + unsigned int index; - if( !super::init()) - return( false); + if (!super::init()) { + return false; + } - if (!reserved) - { - reserved = IONew(ExpansionData, 1); - if (!reserved) return (false); - bzero(reserved, sizeof(ExpansionData)); - reserved->fLock = IORecursiveLockAlloc(); - if (!reserved->fLock) return (false); - } + if (!reserved) { + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } + bzero(reserved, sizeof(ExpansionData)); + reserved->fLock = IORecursiveLockAlloc(); + if (!reserved->fLock) { + return false; + } + } - WLOCK; + WLOCK; - reserved->fRegistryEntryID = old->reserved->fRegistryEntryID; + reserved->fRegistryEntryID = old->reserved->fRegistryEntryID; - fPropertyTable = old->dictionaryWithProperties(); + fPropertyTable = old->dictionaryWithProperties(); #ifdef IOREGSPLITTABLES - fRegistryTable = old->fRegistryTable; - old->fRegistryTable = (OSDictionary *) fRegistryTable->copyCollection(); + fRegistryTable = old->fRegistryTable; + old->fRegistryTable = (OSDictionary *) fRegistryTable->copyCollection(); #endif /* IOREGSPLITTABLES */ - old->registryTable()->removeObject( plane->keys[ kParentSetIndex ] ); - old->registryTable()->removeObject( plane->keys[ kChildSetIndex ] ); + old->registryTable()->removeObject( plane->keys[kParentSetIndex] ); + old->registryTable()->removeObject( plane->keys[kChildSetIndex] ); - all = getParentSetReference( plane ); - if( all) for( index = 0; - (next = (IORegistryEntry *) all->getObject(index)); - index++ ) { - next->makeLink( this, kChildSetIndex, plane ); - next->breakLink( old, kChildSetIndex, plane ); - } + all = getParentSetReference( plane ); + if (all) { + for (index = 0; + (next = (IORegistryEntry *) all->getObject(index)); + index++) { + next->makeLink( this, kChildSetIndex, plane ); + next->breakLink( old, kChildSetIndex, plane ); + } + } - all = getChildSetReference( plane ); - if( all) for( index = 0; - (next = (IORegistryEntry *) all->getObject(index)); - index++ ) { - next->makeLink( this, kParentSetIndex, plane ); - next->breakLink( old, kParentSetIndex, plane ); - } + all = getChildSetReference( plane ); + if (all) { + for (index = 0; + (next = (IORegistryEntry *) all->getObject(index)); + index++) { + next->makeLink( this, kParentSetIndex, plane ); + next->breakLink( old, kParentSetIndex, plane ); + } + } - UNLOCK; + UNLOCK; - return( true ); + return true; } -void IORegistryEntry::free( void ) +void +IORegistryEntry::free( void ) { #if DEBUG_FREE - if( registryTable() && gIOServicePlane) { - if( getParentSetReference( gIOServicePlane ) - || getChildSetReference( gIOServicePlane )) { - panic("%s: attached at free()", getName()); - } - } + if (registryTable() && gIOServicePlane) { + if (getParentSetReference( gIOServicePlane ) + || getChildSetReference( gIOServicePlane )) { + panic("%s: attached at free()", getName()); + } + } #endif - if( getPropertyTable()) - getPropertyTable()->release(); + if (getPropertyTable()) { + getPropertyTable()->release(); + } #ifdef IOREGSPLITTABLES - if( registryTable()) - registryTable()->release(); + if (registryTable()) { + registryTable()->release(); + } #endif /* IOREGSPLITTABLES */ - if (reserved) - { - if (reserved->fIndexedProperties) - { - for (int idx = 0; idx < kIORegistryEntryIndexedPropertyCount; idx++) - { - if (reserved->fIndexedProperties[idx]) reserved->fIndexedProperties[idx]->release(); - } - IODelete(reserved->fIndexedProperties, OSObject *, kIORegistryEntryIndexedPropertyCount); + if (reserved) { + if (reserved->fIndexedProperties) { + for (int idx = 0; idx < kIORegistryEntryIndexedPropertyCount; idx++) { + if (reserved->fIndexedProperties[idx]) { + reserved->fIndexedProperties[idx]->release(); + } + } + IODelete(reserved->fIndexedProperties, OSObject *, kIORegistryEntryIndexedPropertyCount); + } + if (reserved->fLock) { + IORecursiveLockFree(reserved->fLock); + } + IODelete(reserved, ExpansionData, 1); } - if (reserved->fLock) IORecursiveLockFree(reserved->fLock); - IODelete(reserved, ExpansionData, 1); - } - super::free(); + super::free(); } -void IORegistryEntry::setPropertyTable( OSDictionary * dict ) +void +IORegistryEntry::setPropertyTable( OSDictionary * dict ) { - if( dict) - dict->retain(); - if( fPropertyTable) - fPropertyTable->release(); + if (dict) { + dict->retain(); + } + if (fPropertyTable) { + fPropertyTable->release(); + } - fPropertyTable = dict; + fPropertyTable = dict; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* Wrappers to synchronize property table */ -#define wrap2(type, constant) \ -OSObject * \ -IORegistryEntry::copyProperty( type * aKey) constant \ -{ \ - OSObject * obj; \ - \ - PLOCK; \ - obj = getProperty( aKey ); \ - if( obj) \ - obj->retain(); \ - PUNLOCK; \ - \ - return( obj ); \ -} - -#define wrap4(type,constant) \ +#define wrap2(type, constant) \ +OSObject * \ +IORegistryEntry::copyProperty( type * aKey) constant \ +{ \ + OSObject * obj; \ + \ + PLOCK; \ + obj = getProperty( aKey ); \ + if( obj) \ + obj->retain(); \ + PUNLOCK; \ + \ + return( obj ); \ +} + +#define wrap4(type, constant) \ OSObject * \ IORegistryEntry::getProperty( type * aKey, \ - const IORegistryPlane * plane, \ - IOOptionBits options ) constant \ + const IORegistryPlane * plane, \ + IOOptionBits options ) constant \ { \ OSObject * obj = getProperty( aKey ); \ \ if ( (0 == obj) && plane && (options & kIORegistryIterateRecursively) ) { \ - IORegistryEntry * entry = (IORegistryEntry *) this; \ - IORegistryIterator * iter; \ - iter = IORegistryIterator::iterateOver( entry, plane, options ); \ + IORegistryEntry * entry = (IORegistryEntry *) this; \ + IORegistryIterator * iter; \ + iter = IORegistryIterator::iterateOver( entry, plane, options ); \ \ - if(iter) { \ - while ( (0 == obj) && (entry = iter->getNextObject()) ) { \ - obj = entry->getProperty( aKey ); \ - } \ - iter->release(); \ - } \ + if(iter) { \ + while ( (0 == obj) && (entry = iter->getNextObject()) ) { \ + obj = entry->getProperty( aKey ); \ + } \ + iter->release(); \ + } \ } \ \ return( obj ); \ } -#define wrap5(type,constant) \ +#define wrap5(type, constant) \ OSObject * \ IORegistryEntry::copyProperty( type * aKey, \ - const IORegistryPlane * plane, \ - IOOptionBits options ) constant \ + const IORegistryPlane * plane, \ + IOOptionBits options ) constant \ { \ OSObject * obj = copyProperty( aKey ); \ \ if ( (0 == obj) && plane && (options & kIORegistryIterateRecursively) ) { \ - IORegistryEntry * entry = (IORegistryEntry *) this; \ - IORegistryIterator * iter; \ - iter = IORegistryIterator::iterateOver( entry, plane, options ); \ + IORegistryEntry * entry = (IORegistryEntry *) this; \ + IORegistryIterator * iter; \ + iter = IORegistryIterator::iterateOver( entry, plane, options ); \ \ - if(iter) { \ - while ( (0 == obj) && (entry = iter->getNextObject()) ) { \ - obj = entry->copyProperty( aKey ); \ - } \ - iter->release(); \ - } \ + if(iter) { \ + while ( (0 == obj) && (entry = iter->getNextObject()) ) { \ + obj = entry->copyProperty( aKey ); \ + } \ + iter->release(); \ + } \ } \ \ return( obj ); \ } -bool IORegistryEntry::serializeProperties( OSSerialize * s ) const +bool +IORegistryEntry::serializeProperties( OSSerialize * s ) const { // setProperty( getRetainCount(), 32, "__retain" ); - PLOCK; - OSCollection *snapshotProperties = getPropertyTable()->copyCollection(); - PUNLOCK; + PLOCK; + OSCollection *snapshotProperties = getPropertyTable()->copyCollection(); + PUNLOCK; - if (!snapshotProperties) return (false); + if (!snapshotProperties) { + return false; + } - bool ok = snapshotProperties->serialize( s ); - snapshotProperties->release(); - return( ok ); + bool ok = snapshotProperties->serialize( s ); + snapshotProperties->release(); + return ok; } -OSArray * IORegistryEntry::copyPropertyKeys(void) const +OSArray * +IORegistryEntry::copyPropertyKeys(void) const { - PLOCK; - OSArray * keys = getPropertyTable()->copyKeys(); - PUNLOCK; + PLOCK; + OSArray * keys = getPropertyTable()->copyKeys(); + PUNLOCK; - return (keys); + return keys; } -OSDictionary * IORegistryEntry::dictionaryWithProperties( void ) const +OSDictionary * +IORegistryEntry::dictionaryWithProperties( void ) const { - OSDictionary * dict; + OSDictionary * dict; - PLOCK; - dict = OSDictionary::withDictionary( getPropertyTable(), - getPropertyTable()->getCapacity() ); - PUNLOCK; + PLOCK; + dict = OSDictionary::withDictionary( getPropertyTable(), + getPropertyTable()->getCapacity()); + PUNLOCK; - return( dict ); + return dict; } -IOReturn IORegistryEntry::setProperties( OSObject * properties ) +IOReturn +IORegistryEntry::setProperties( OSObject * properties ) { - return( kIOReturnUnsupported ); + return kIOReturnUnsupported; } wrap2(const OSSymbol, const) // copyProperty() definition wrap2(const OSString, const) // copyProperty() definition -wrap2(const char, const) // copyProperty() definition +wrap2(const char, const) // copyProperty() definition wrap4(const OSSymbol, const) // getProperty() w/plane definition wrap4(const OSString, const) // getProperty() w/plane definition @@ -554,1409 +590,1530 @@ wrap5(const char, const) // copyProperty() w/plane definition OSObject * IORegistryEntry::getProperty( const OSSymbol * aKey) const { - OSObject * obj; + OSObject * obj; - PLOCK; - obj = getPropertyTable()->getObject( aKey ); - PUNLOCK; + PLOCK; + obj = getPropertyTable()->getObject( aKey ); + PUNLOCK; - return( obj ); + return obj; } void IORegistryEntry::removeProperty( const OSSymbol * aKey) { - PLOCK; - getPropertyTable()->removeObject( aKey ); - PUNLOCK; + PLOCK; + getPropertyTable()->removeObject( aKey ); + PUNLOCK; } #if KASLR_IOREG_DEBUG extern "C" { - -bool ScanForAddrInObject(OSObject * theObject, - int indent); - +bool ScanForAddrInObject(OSObject * theObject, + int indent); }; /* extern "C" */ #endif bool IORegistryEntry::setProperty( const OSSymbol * aKey, OSObject * anObject) { - bool ret = false; + bool ret = false; - // If we are inserting a collection class and the current entry - // is attached into the registry (inPlane()) then mark the collection - // as immutable. - OSCollection *coll = OSDynamicCast(OSCollection, anObject); - bool makeImmutable = (coll && inPlane()); + // If we are inserting a collection class and the current entry + // is attached into the registry (inPlane()) then mark the collection + // as immutable. + OSCollection *coll = OSDynamicCast(OSCollection, anObject); + bool makeImmutable = (coll && inPlane()); - PLOCK; - if( makeImmutable ) - coll->setOptions( OSCollection::kMASK, OSCollection::kImmutable ); + PLOCK; + if (makeImmutable) { + coll->setOptions( OSCollection::kMASK, OSCollection::kImmutable ); + } - ret = getPropertyTable()->setObject( aKey, anObject ); - PUNLOCK; + ret = getPropertyTable()->setObject( aKey, anObject ); + PUNLOCK; #if KASLR_IOREG_DEBUG - if ( anObject && strcmp(kIOKitDiagnosticsKey, aKey->getCStringNoCopy()) != 0 ) { - if (ScanForAddrInObject(anObject, 0)) { - IOLog("%s: IORegistryEntry name %s with key \"%s\" \n", - __FUNCTION__, - getName(0), - aKey->getCStringNoCopy() ); - } - } + if (anObject && strcmp(kIOKitDiagnosticsKey, aKey->getCStringNoCopy()) != 0) { + if (ScanForAddrInObject(anObject, 0)) { + IOLog("%s: IORegistryEntry name %s with key \"%s\" \n", + __FUNCTION__, + getName(0), + aKey->getCStringNoCopy()); + } + } #endif - return ret; + return ret; } -IOReturn IORegistryEntry:: +IOReturn +IORegistryEntry:: runPropertyAction(Action inAction, OSObject *target, - void *arg0, void *arg1, void *arg2, void *arg3) + void *arg0, void *arg1, void *arg2, void *arg3) { - IOReturn res; + IOReturn res; - // closeGate is recursive so don't worry if we already hold the lock. - PLOCK; - res = (*inAction)(target, arg0, arg1, arg2, arg3); - PUNLOCK; + // closeGate is recursive so don't worry if we already hold the lock. + PLOCK; + res = (*inAction)(target, arg0, arg1, arg2, arg3); + PUNLOCK; - return res; + return res; } OSObject * IORegistryEntry::getProperty( const OSString * aKey) const { - const OSSymbol * tmpKey = OSSymbol::withString( aKey ); - OSObject * obj = getProperty( tmpKey ); + const OSSymbol * tmpKey = OSSymbol::withString( aKey ); + OSObject * obj = getProperty( tmpKey ); - tmpKey->release(); - return( obj ); + tmpKey->release(); + return obj; } OSObject * IORegistryEntry::getProperty( const char * aKey) const { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - OSObject * obj = getProperty( tmpKey ); + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + OSObject * obj = getProperty( tmpKey ); - tmpKey->release(); - return( obj ); + tmpKey->release(); + return obj; } void IORegistryEntry::removeProperty( const OSString * aKey) { - const OSSymbol * tmpKey = OSSymbol::withString( aKey ); - removeProperty( tmpKey ); - tmpKey->release(); + const OSSymbol * tmpKey = OSSymbol::withString( aKey ); + removeProperty( tmpKey ); + tmpKey->release(); } void IORegistryEntry::removeProperty( const char * aKey) { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - removeProperty( tmpKey ); - tmpKey->release(); + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + removeProperty( tmpKey ); + tmpKey->release(); } bool IORegistryEntry::setProperty( const OSString * aKey, OSObject * anObject) { - const OSSymbol * tmpKey = OSSymbol::withString( aKey ); - bool ret = setProperty( tmpKey, anObject ); + const OSSymbol * tmpKey = OSSymbol::withString( aKey ); + bool ret = setProperty( tmpKey, anObject ); - tmpKey->release(); - return ret; + tmpKey->release(); + return ret; } bool -IORegistryEntry::setProperty( const char * aKey, OSObject * anObject) +IORegistryEntry::setProperty( const char * aKey, OSObject * anObject) { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - bool ret = setProperty( tmpKey, anObject ); + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + bool ret = setProperty( tmpKey, anObject ); - tmpKey->release(); - return ret; + tmpKey->release(); + return ret; } bool IORegistryEntry::setProperty(const char * aKey, const char * aString) { - bool ret = false; - OSSymbol * aSymbol = (OSSymbol *) OSSymbol::withCString( aString ); + bool ret = false; + OSSymbol * aSymbol = (OSSymbol *) OSSymbol::withCString( aString ); - if( aSymbol) { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - ret = setProperty( tmpKey, aSymbol ); + if (aSymbol) { + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, aSymbol ); - tmpKey->release(); - aSymbol->release(); - } - return( ret ); + tmpKey->release(); + aSymbol->release(); + } + return ret; } bool IORegistryEntry::setProperty(const char * aKey, bool aBoolean) { - bool ret = false; - OSBoolean * aBooleanObj = OSBoolean::withBoolean( aBoolean ); + bool ret = false; + OSBoolean * aBooleanObj = OSBoolean::withBoolean( aBoolean ); - if( aBooleanObj) { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - ret = setProperty( tmpKey, aBooleanObj ); + if (aBooleanObj) { + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, aBooleanObj ); - tmpKey->release(); - aBooleanObj->release(); - } - return( ret ); + tmpKey->release(); + aBooleanObj->release(); + } + return ret; } bool IORegistryEntry::setProperty( const char * aKey, - unsigned long long aValue, - unsigned int aNumberOfBits) + unsigned long long aValue, + unsigned int aNumberOfBits) { - bool ret = false; - OSNumber * anOffset = OSNumber::withNumber( aValue, aNumberOfBits ); + bool ret = false; + OSNumber * anOffset = OSNumber::withNumber( aValue, aNumberOfBits ); - if( anOffset) { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - ret = setProperty( tmpKey, anOffset ); + if (anOffset) { + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, anOffset ); - tmpKey->release(); - anOffset->release(); - } - return( ret ); + tmpKey->release(); + anOffset->release(); + } + return ret; } bool IORegistryEntry::setProperty( const char * aKey, - void * bytes, - unsigned int length) + void * bytes, + unsigned int length) { - bool ret = false; - OSData * data = OSData::withBytes( bytes, length ); + bool ret = false; + OSData * data = OSData::withBytes( bytes, length ); - if( data) { - const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); - ret = setProperty( tmpKey, data ); + if (data) { + const OSSymbol * tmpKey = OSSymbol::withCString( aKey ); + ret = setProperty( tmpKey, data ); - tmpKey->release(); - data->release(); - } - return( ret ); + tmpKey->release(); + data->release(); + } + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -OSObject * IORegistryEntry::setIndexedProperty(uint32_t index, OSObject * anObject) +OSObject * +IORegistryEntry::setIndexedProperty(uint32_t index, OSObject * anObject) { - OSObject ** array; - OSObject * prior; + OSObject ** array; + OSObject * prior; - if (index >= kIORegistryEntryIndexedPropertyCount) return (0); + if (index >= kIORegistryEntryIndexedPropertyCount) { + return 0; + } - array = atomic_load_explicit(&reserved->fIndexedProperties, memory_order_acquire); - if (!array) - { - array = IONew(OSObject *, kIORegistryEntryIndexedPropertyCount); - if (!array) return (0); - bzero(array, kIORegistryEntryIndexedPropertyCount * sizeof(array[0])); - if (!OSCompareAndSwapPtr(NULL, array, &reserved->fIndexedProperties)) IODelete(array, OSObject *, kIORegistryEntryIndexedPropertyCount); - } - if (!reserved->fIndexedProperties) return (0); + array = atomic_load_explicit(&reserved->fIndexedProperties, memory_order_acquire); + if (!array) { + array = IONew(OSObject *, kIORegistryEntryIndexedPropertyCount); + if (!array) { + return 0; + } + bzero(array, kIORegistryEntryIndexedPropertyCount * sizeof(array[0])); + if (!OSCompareAndSwapPtr(NULL, array, &reserved->fIndexedProperties)) { + IODelete(array, OSObject *, kIORegistryEntryIndexedPropertyCount); + } + } + if (!reserved->fIndexedProperties) { + return 0; + } - prior = reserved->fIndexedProperties[index]; - if (anObject) anObject->retain(); - reserved->fIndexedProperties[index] = anObject; + prior = reserved->fIndexedProperties[index]; + if (anObject) { + anObject->retain(); + } + reserved->fIndexedProperties[index] = anObject; - return (prior); + return prior; } -OSObject * IORegistryEntry::getIndexedProperty(uint32_t index) const +OSObject * +IORegistryEntry::getIndexedProperty(uint32_t index) const { - if (index >= kIORegistryEntryIndexedPropertyCount) return (0); - if (!reserved->fIndexedProperties) return (0); + if (index >= kIORegistryEntryIndexedPropertyCount) { + return 0; + } + if (!reserved->fIndexedProperties) { + return 0; + } - return (reserved->fIndexedProperties[index]); + return reserved->fIndexedProperties[index]; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* Name, location, paths */ -const char * IORegistryEntry::getName( const IORegistryPlane * plane ) const +const char * +IORegistryEntry::getName( const IORegistryPlane * plane ) const { - OSSymbol * sym = 0; + OSSymbol * sym = 0; - RLOCK; - if( plane) - sym = (OSSymbol *) registryTable()->getObject( plane->pathNameKey ); - if( !sym) - sym = (OSSymbol *) registryTable()->getObject( gIONameKey ); - UNLOCK; + RLOCK; + if (plane) { + sym = (OSSymbol *) registryTable()->getObject( plane->pathNameKey ); + } + if (!sym) { + sym = (OSSymbol *) registryTable()->getObject( gIONameKey ); + } + UNLOCK; - if( sym) - return( sym->getCStringNoCopy()); - else - return( (getMetaClass())->getClassName()); + if (sym) { + return sym->getCStringNoCopy(); + } else { + return (getMetaClass())->getClassName(); + } } -const OSSymbol * IORegistryEntry::copyName( - const IORegistryPlane * plane ) const +const OSSymbol * +IORegistryEntry::copyName( + const IORegistryPlane * plane ) const { - OSSymbol * sym = 0; + OSSymbol * sym = 0; - RLOCK; - if( plane) - sym = (OSSymbol *) registryTable()->getObject( plane->pathNameKey ); - if( !sym) - sym = (OSSymbol *) registryTable()->getObject( gIONameKey ); - if( sym) - sym->retain(); - UNLOCK; + RLOCK; + if (plane) { + sym = (OSSymbol *) registryTable()->getObject( plane->pathNameKey ); + } + if (!sym) { + sym = (OSSymbol *) registryTable()->getObject( gIONameKey ); + } + if (sym) { + sym->retain(); + } + UNLOCK; - if( sym) - return( sym ); - else - return( OSSymbol::withCString((getMetaClass())->getClassName()) ); + if (sym) { + return sym; + } else { + return OSSymbol::withCString((getMetaClass())->getClassName()); + } } -const OSSymbol * IORegistryEntry::copyLocation( - const IORegistryPlane * plane ) const +const OSSymbol * +IORegistryEntry::copyLocation( + const IORegistryPlane * plane ) const { - OSSymbol * sym = 0; + OSSymbol * sym = 0; - RLOCK; - if( plane) - sym = (OSSymbol *) registryTable()->getObject( plane->pathLocationKey ); - if( !sym) - sym = (OSSymbol *) registryTable()->getObject( gIOLocationKey ); - if( sym) - sym->retain(); - UNLOCK; + RLOCK; + if (plane) { + sym = (OSSymbol *) registryTable()->getObject( plane->pathLocationKey ); + } + if (!sym) { + sym = (OSSymbol *) registryTable()->getObject( gIOLocationKey ); + } + if (sym) { + sym->retain(); + } + UNLOCK; - return( sym ); + return sym; } -const char * IORegistryEntry::getLocation( const IORegistryPlane * plane ) const +const char * +IORegistryEntry::getLocation( const IORegistryPlane * plane ) const { - const OSSymbol * sym = copyLocation( plane ); - const char * result = 0; + const OSSymbol * sym = copyLocation( plane ); + const char * result = 0; - if( sym) { - result = sym->getCStringNoCopy(); - sym->release(); - } + if (sym) { + result = sym->getCStringNoCopy(); + sym->release(); + } - return( result ); + return result; } -void IORegistryEntry::setName( const OSSymbol * name, - const IORegistryPlane * plane ) +void +IORegistryEntry::setName( const OSSymbol * name, + const IORegistryPlane * plane ) { - const OSSymbol * key; + const OSSymbol * key; - if( name) { - if( plane) - key = plane->pathNameKey; - else - key = gIONameKey; + if (name) { + if (plane) { + key = plane->pathNameKey; + } else { + key = gIONameKey; + } - if (gIOKitTrace && reserved && reserved->fRegistryEntryID) - { - uint64_t str_id = 0; - uint64_t __unused regID = getRegistryEntryID(); - kernel_debug_string(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME_STRING), &str_id, name->getCStringNoCopy()); - KERNEL_DEBUG_CONSTANT(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME), - (uintptr_t) regID, - (uintptr_t) (regID >> 32), - (uintptr_t) str_id, - (uintptr_t) (str_id >> 32), - 0); - } + if (gIOKitTrace && reserved && reserved->fRegistryEntryID) { + uint64_t str_id = 0; + uint64_t __unused regID = getRegistryEntryID(); + kernel_debug_string(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME_STRING), &str_id, name->getCStringNoCopy()); + KERNEL_DEBUG_CONSTANT(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME), + (uintptr_t) regID, + (uintptr_t) (regID >> 32), + (uintptr_t) str_id, + (uintptr_t) (str_id >> 32), + 0); + } - WLOCK; - registryTable()->setObject( key, (OSObject *) name); - UNLOCK; - } + WLOCK; + registryTable()->setObject( key, (OSObject *) name); + UNLOCK; + } } -void IORegistryEntry::setName( const char * name, - const IORegistryPlane * plane ) +void +IORegistryEntry::setName( const char * name, + const IORegistryPlane * plane ) { - OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( name ); - if ( sym ) { - setName( sym, plane ); - sym->release(); - } + OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( name ); + if (sym) { + setName( sym, plane ); + sym->release(); + } } -void IORegistryEntry::setLocation( const OSSymbol * location, - const IORegistryPlane * plane ) +void +IORegistryEntry::setLocation( const OSSymbol * location, + const IORegistryPlane * plane ) { - const OSSymbol * key; + const OSSymbol * key; - if( location) { - if( plane) - key = plane->pathLocationKey; - else - key = gIOLocationKey; + if (location) { + if (plane) { + key = plane->pathLocationKey; + } else { + key = gIOLocationKey; + } - WLOCK; - registryTable()->setObject( key, (OSObject *) location); - UNLOCK; - } + WLOCK; + registryTable()->setObject( key, (OSObject *) location); + UNLOCK; + } } -void IORegistryEntry::setLocation( const char * location, - const IORegistryPlane * plane ) +void +IORegistryEntry::setLocation( const char * location, + const IORegistryPlane * plane ) { - OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( location ); - if ( sym ) { - setLocation( sym, plane ); - sym->release(); - } + OSSymbol * sym = (OSSymbol *)OSSymbol::withCString( location ); + if (sym) { + setLocation( sym, plane ); + sym->release(); + } } bool IORegistryEntry::compareName( OSString * name, OSString ** matched ) const { - const OSSymbol * sym = copyName(); - bool isEqual; + const OSSymbol * sym = copyName(); + bool isEqual; - isEqual = (sym && sym->isEqualTo(name)); + isEqual = (sym && sym->isEqualTo(name)); - if( isEqual && matched) { - name->retain(); - *matched = name; - } + if (isEqual && matched) { + name->retain(); + *matched = name; + } - if( sym) - sym->release(); + if (sym) { + sym->release(); + } - return( isEqual ); + return isEqual; } bool IORegistryEntry::compareNames( OSObject * names, OSString ** matched ) const { - OSString * string; - OSCollection * collection; - OSIterator * iter = 0; - bool result = false; - - if( (collection = OSDynamicCast( OSCollection, names))) { - iter = OSCollectionIterator::withCollection( collection ); - string = 0; - } else - string = OSDynamicCast( OSString, names); - - do { - if( string) - result = compareName( string, matched ); - - } while( (false == result) - && iter && (string = OSDynamicCast( OSString, iter->getNextObject()))); - - if( iter) - iter->release(); - - return( result); -} - - -bool IORegistryEntry::getPath( char * path, int * length, - const IORegistryPlane * plane ) const -{ - OSArray * stack; - IORegistryEntry * root; - const IORegistryEntry * entry; - const IORegistryEntry * parent; - const OSSymbol * alias; - int index; - int len, maxLength, compLen, aliasLen; - char * nextComp; - bool ok; - - if( !path || !length || !plane) - return( false); - - len = 0; - maxLength = *length - 2; - nextComp = path; - - len = plane->nameKey->getLength(); - if( len >= maxLength) - return( false); - strlcpy( nextComp, plane->nameKey->getCStringNoCopy(), len + 1); - nextComp[ len++ ] = ':'; - nextComp += len; - - if( (alias = hasAlias( plane ))) { - aliasLen = alias->getLength(); - len += aliasLen; - ok = (maxLength > len); - *length = len; - if( ok) - strlcpy( nextComp, alias->getCStringNoCopy(), aliasLen + 1); - return( ok ); - } - - stack = OSArray::withCapacity( getDepth( plane )); - if (!stack) return( false); - - RLOCK; - - parent = entry = this; - root = gRegistryRoot->getChildEntry( plane ); - while (parent && (parent != root)) - { - // stop below root - entry = parent; - parent = entry->getParentEntry( plane ); - stack->setObject( (OSObject *) entry ); - } - - ok = (0 != parent); - if (ok) - { - index = stack->getCount(); - if( 0 == index) { - - *nextComp++ = '/'; - *nextComp = 0; - len++; - - } else while( ok && ((--index) >= 0)) { - - entry = (IORegistryEntry *) stack->getObject((unsigned int) index ); - assert( entry ); - - if( (alias = entry->hasAlias( plane ))) { - len = plane->nameKey->getLength() + 1; - nextComp = path + len; - - compLen = alias->getLength(); - ok = (maxLength > (len + compLen)); - if( ok) - strlcpy( nextComp, alias->getCStringNoCopy(), compLen + 1); - } else { - compLen = maxLength - len; - ok = entry->getPathComponent( nextComp + 1, &compLen, plane ); - - if( ok && compLen) { - compLen++; - *nextComp = '/'; - } - } - - if( ok) { - len += compLen; - nextComp += compLen; - } - } - *length = len; - } - UNLOCK; - stack->release(); - - return( ok ); -} - -bool IORegistryEntry::getPathComponent( char * path, int * length, - const IORegistryPlane * plane ) const -{ - int len, locLen, maxLength; - const char * compName; - const char * loc; - bool ok; - - maxLength = *length; - - compName = getName( plane ); - len = strlen( compName ); - if( (loc = getLocation( plane ))) - locLen = 1 + strlen( loc ); - else - locLen = 0; - - ok = ((len + locLen + 1) < maxLength); - if( ok) { - strlcpy( path, compName, len + 1 ); - if( loc) { - path += len; - len += locLen; - *path++ = '@'; - strlcpy( path, loc, locLen ); - } - *length = len; - } - - return( ok ); -} - -const char * IORegistryEntry::matchPathLocation( const char * cmp, - const IORegistryPlane * plane ) -{ - const char * str; - const char * result = 0; - u_quad_t num1, num2; - char lastPathChar, lastLocationChar; - - str = getLocation( plane ); - if( str) { - lastPathChar = cmp[0]; - lastLocationChar = str[0]; + OSString * string; + OSCollection * collection; + OSIterator * iter = 0; + bool result = false; + + if ((collection = OSDynamicCast( OSCollection, names))) { + iter = OSCollectionIterator::withCollection( collection ); + string = 0; + } else { + string = OSDynamicCast( OSString, names); + } + do { - if( lastPathChar) { - num1 = strtouq( cmp, (char **) &cmp, 16 ); - lastPathChar = *cmp++; - } else - num1 = 0; - - if( lastLocationChar) { - num2 = strtouq( str, (char **) &str, 16 ); - lastLocationChar = *str++; - } else - num2 = 0; - - if( num1 != num2) - break; - - if (!lastPathChar && !lastLocationChar) { - result = cmp - 1; - break; - } - - if( (',' != lastPathChar) && (':' != lastPathChar)) - lastPathChar = 0; - - if (lastPathChar && lastLocationChar && (lastPathChar != lastLocationChar)) - break; - - } while( true); - } - - return( result ); -} - -IORegistryEntry * IORegistryEntry::getChildFromComponent( const char ** opath, - const IORegistryPlane * plane ) -{ - IORegistryEntry * entry = 0; - OSArray * set; - unsigned int index; - const char * path; - const char * cmp = 0; - char c; - size_t len; - const char * str; - - set = getChildSetReference( plane ); - if( set) { - - path = *opath; - - for( index = 0; - (entry = (IORegistryEntry *) set->getObject(index)); - index++ ) { - - cmp = path; - - if( *cmp != '@') { - str = entry->getName( plane ); - len = strlen( str ); - if( strncmp( str, cmp, len )) - continue; - cmp += len; - - c = *cmp; - if( (c == 0) || (c == '/') || (c == ':')) - break; - if( c != '@') - continue; - } - cmp++; - if( (cmp = entry->matchPathLocation( cmp, plane ))) - break; - } - if( entry) - *opath = cmp; - } - - return( entry ); -} - -const OSSymbol * IORegistryEntry::hasAlias( const IORegistryPlane * plane, - char * opath, int * length ) const -{ - IORegistryEntry * entry; - IORegistryEntry * entry2; - const OSSymbol * key; - const OSSymbol * bestKey = 0; - OSIterator * iter; - OSData * data; - const char * path = "/aliases"; - - entry = IORegistryEntry::fromPath( path, plane ); - if( entry) { - RLOCK; - if( (iter = OSCollectionIterator::withCollection( - entry->getPropertyTable() ))) { - - while( (key = (OSSymbol *) iter->getNextObject())) { - - data = (OSData *) entry->getProperty( key ); - path = (const char *) data->getBytesNoCopy(); - if( (entry2 = IORegistryEntry::fromPath( path, plane, - opath, length ))) { - if( this == entry2) { - if( !bestKey - || (bestKey->getLength() > key->getLength())) - // pick the smallest alias - bestKey = key; - } - entry2->release(); + if (string) { + result = compareName( string, matched ); } - } - iter->release(); - } - entry->release(); - UNLOCK; - } - return( bestKey ); -} - -const char * IORegistryEntry::dealiasPath( - const char ** opath, - const IORegistryPlane * plane ) -{ - IORegistryEntry * entry; - OSData * data; - const char * path = *opath; - const char * rpath = 0; - const char * end; - char c; - char temp[ kIOMaxPlaneName + 1 ]; - - if( path[0] == '/') - return( rpath ); - - // check for alias - end = path; - while( (c = *end++) && (c != '/') && (c != ':')) - {} - end--; - if( (end - path) < kIOMaxPlaneName) { - strlcpy( temp, path, end - path + 1 ); - - RLOCK; - entry = IORegistryEntry::fromPath( "/aliases", plane ); - if( entry) { - data = (OSData *) entry->getProperty( temp ); - if( data ) { - rpath = (const char *) data->getBytesNoCopy(); - if( rpath) - *opath = end; - } - entry->release(); - } - UNLOCK; - } - - return( rpath ); -} - -IORegistryEntry * IORegistryEntry::fromPath( - const char * path, - const IORegistryPlane * plane, - char * opath, - int * length, - IORegistryEntry * fromEntry ) -{ - IORegistryEntry * where = 0; - IORegistryEntry * aliasEntry = 0; - IORegistryEntry * next; - const char * alias; - const char * end; - int len = 0; - int len2; - char c; - char temp[ kIOMaxPlaneName + 1 ]; - - if( 0 == path) - return( 0 ); - - if( 0 == plane) { - // get plane name - end = strchr( path, ':' ); - if( end && ((end - path) < kIOMaxPlaneName)) { - strlcpy( temp, path, end - path + 1 ); - plane = getPlane( temp ); - path = end + 1; - } - } - if( 0 == plane) - return( 0 ); - - // check for alias - end = path; - if( (alias = dealiasPath( &end, plane))) { - if( length) - len = *length; - aliasEntry = IORegistryEntry::fromPath( alias, plane, - opath, &len, fromEntry ); - where = aliasEntry; - if( where) - path = end; - else - len = 0; - } - - RLOCK; - - do { - if( 0 == where) { - if( (0 == fromEntry) && (*path++ == '/')) - fromEntry = gRegistryRoot->getChildEntry( plane ); - where = fromEntry; - if( 0 == where) - break; - } else { - c = *path++; - if( c != '/') { - if( c && (c != ':')) // check valid terminator - where = 0; - break; - } - } - next = where->getChildFromComponent( &path, plane ); - if( next) - where = next; - } while( next ); - - if( where) { - // check residual path - if( where != fromEntry) - path--; - - if( opath && length) { - // copy out residual path - len2 = strlen( path ); - if( (len + len2) < *length) - strlcpy( opath + len, path, len2 + 1 ); - *length = (len + len2); - - } else if( path[0]) - // no residual path => must be no tail for success - where = 0; - } - - if( where) - where->retain(); - if( aliasEntry) - aliasEntry->release(); - - UNLOCK; - - return( where ); -} - -IORegistryEntry * IORegistryEntry::childFromPath( - const char * path, - const IORegistryPlane * plane, - char * opath, - int * len ) -{ - return( IORegistryEntry::fromPath( path, plane, opath, len, this )); + } while ((false == result) + && iter && (string = OSDynamicCast( OSString, iter->getNextObject()))); + + if (iter) { + iter->release(); + } + + return result; } -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define IOLinkIterator OSCollectionIterator +bool +IORegistryEntry::getPath( char * path, int * length, + const IORegistryPlane * plane ) const +{ + OSArray * stack; + IORegistryEntry * root; + const IORegistryEntry * entry; + const IORegistryEntry * parent; + const OSSymbol * alias; + int index; + int len, maxLength, compLen, aliasLen; + char * nextComp; + bool ok; + + if (!path || !length || !plane) { + return false; + } -#undef super -#define super OSObject + len = 0; + maxLength = *length - 2; + nextComp = path; + + len = plane->nameKey->getLength(); + if (len >= maxLength) { + return false; + } + strlcpy( nextComp, plane->nameKey->getCStringNoCopy(), len + 1); + nextComp[len++] = ':'; + nextComp += len; + + if ((alias = hasAlias( plane ))) { + aliasLen = alias->getLength(); + len += aliasLen; + ok = (maxLength > len); + *length = len; + if (ok) { + strlcpy( nextComp, alias->getCStringNoCopy(), aliasLen + 1); + } + return ok; + } + + stack = OSArray::withCapacity( getDepth( plane )); + if (!stack) { + return false; + } + + RLOCK; + + parent = entry = this; + root = gRegistryRoot->getChildEntry( plane ); + while (parent && (parent != root)) { + // stop below root + entry = parent; + parent = entry->getParentEntry( plane ); + stack->setObject((OSObject *) entry ); + } + + ok = (0 != parent); + if (ok) { + index = stack->getCount(); + if (0 == index) { + *nextComp++ = '/'; + *nextComp = 0; + len++; + } else { + while (ok && ((--index) >= 0)) { + entry = (IORegistryEntry *) stack->getObject((unsigned int) index ); + assert( entry ); + + if ((alias = entry->hasAlias( plane ))) { + len = plane->nameKey->getLength() + 1; + nextComp = path + len; + + compLen = alias->getLength(); + ok = (maxLength > (len + compLen)); + if (ok) { + strlcpy( nextComp, alias->getCStringNoCopy(), compLen + 1); + } + } else { + compLen = maxLength - len; + ok = entry->getPathComponent( nextComp + 1, &compLen, plane ); + + if (ok && compLen) { + compLen++; + *nextComp = '/'; + } + } + + if (ok) { + len += compLen; + nextComp += compLen; + } + } + } + *length = len; + } + UNLOCK; + stack->release(); -inline bool IORegistryEntry::arrayMember( OSArray * set, - const IORegistryEntry * member, - unsigned int * index ) const + return ok; +} + +bool +IORegistryEntry::getPathComponent( char * path, int * length, + const IORegistryPlane * plane ) const { - int i; - OSObject * probeObject; + int len, locLen, maxLength; + const char * compName; + const char * loc; + bool ok; + + maxLength = *length; + + compName = getName( plane ); + len = strlen( compName ); + if ((loc = getLocation( plane ))) { + locLen = 1 + strlen( loc ); + } else { + locLen = 0; + } + + ok = ((len + locLen + 1) < maxLength); + if (ok) { + strlcpy( path, compName, len + 1 ); + if (loc) { + path += len; + len += locLen; + *path++ = '@'; + strlcpy( path, loc, locLen ); + } + *length = len; + } + + return ok; +} + +const char * +IORegistryEntry::matchPathLocation( const char * cmp, + const IORegistryPlane * plane ) +{ + const char * str; + const char * result = 0; + u_quad_t num1, num2; + char lastPathChar, lastLocationChar; + + str = getLocation( plane ); + if (str) { + lastPathChar = cmp[0]; + lastLocationChar = str[0]; + do { + if (lastPathChar) { + num1 = strtouq( cmp, (char **) &cmp, 16 ); + lastPathChar = *cmp++; + } else { + num1 = 0; + } + + if (lastLocationChar) { + num2 = strtouq( str, (char **) &str, 16 ); + lastLocationChar = *str++; + } else { + num2 = 0; + } + + if (num1 != num2) { + break; + } + + if (!lastPathChar && !lastLocationChar) { + result = cmp - 1; + break; + } + + if ((',' != lastPathChar) && (':' != lastPathChar)) { + lastPathChar = 0; + } + + if (lastPathChar && lastLocationChar && (lastPathChar != lastLocationChar)) { + break; + } + } while (true); + } + + return result; +} + +IORegistryEntry * +IORegistryEntry::getChildFromComponent( const char ** opath, + const IORegistryPlane * plane ) +{ + IORegistryEntry * entry = 0; + OSArray * set; + unsigned int index; + const char * path; + const char * cmp = 0; + char c; + size_t len; + const char * str; + + set = getChildSetReference( plane ); + if (set) { + path = *opath; + + for (index = 0; + (entry = (IORegistryEntry *) set->getObject(index)); + index++) { + cmp = path; + + if (*cmp != '@') { + str = entry->getName( plane ); + len = strlen( str ); + if (strncmp( str, cmp, len )) { + continue; + } + cmp += len; + + c = *cmp; + if ((c == 0) || (c == '/') || (c == ':')) { + break; + } + if (c != '@') { + continue; + } + } + cmp++; + if ((cmp = entry->matchPathLocation( cmp, plane ))) { + break; + } + } + if (entry) { + *opath = cmp; + } + } - for( i = 0; (probeObject = set->getObject(i)); i++) { - if (probeObject == (OSObject *) member) { - if( index) - *index = i; - return( true ); + return entry; +} + +const OSSymbol * +IORegistryEntry::hasAlias( const IORegistryPlane * plane, + char * opath, int * length ) const +{ + IORegistryEntry * entry; + IORegistryEntry * entry2; + const OSSymbol * key; + const OSSymbol * bestKey = 0; + OSIterator * iter; + OSData * data; + const char * path = "/aliases"; + + entry = IORegistryEntry::fromPath( path, plane ); + if (entry) { + RLOCK; + if ((iter = OSCollectionIterator::withCollection( + entry->getPropertyTable()))) { + while ((key = (OSSymbol *) iter->getNextObject())) { + data = (OSData *) entry->getProperty( key ); + path = (const char *) data->getBytesNoCopy(); + if ((entry2 = IORegistryEntry::fromPath( path, plane, + opath, length ))) { + if (this == entry2) { + if (!bestKey + || (bestKey->getLength() > key->getLength())) { + // pick the smallest alias + bestKey = key; + } + } + entry2->release(); + } + } + iter->release(); + } + entry->release(); + UNLOCK; } - } - return( false ); + return bestKey; } -bool IORegistryEntry::makeLink( IORegistryEntry * to, - unsigned int relation, - const IORegistryPlane * plane ) const +const char * +IORegistryEntry::dealiasPath( + const char ** opath, + const IORegistryPlane * plane ) { - OSArray * links; - bool result = false; + IORegistryEntry * entry; + OSData * data; + const char * path = *opath; + const char * rpath = 0; + const char * end; + char c; + char temp[kIOMaxPlaneName + 1]; + + if (path[0] == '/') { + return rpath; + } + + // check for alias + end = path; + while ((c = *end++) && (c != '/') && (c != ':')) { + } + end--; + if ((end - path) < kIOMaxPlaneName) { + strlcpy( temp, path, end - path + 1 ); + + RLOCK; + entry = IORegistryEntry::fromPath( "/aliases", plane ); + if (entry) { + data = (OSData *) entry->getProperty( temp ); + if (data) { + rpath = (const char *) data->getBytesNoCopy(); + if (rpath) { + *opath = end; + } + } + entry->release(); + } + UNLOCK; + } + + return rpath; +} + +IORegistryEntry * +IORegistryEntry::fromPath( + const char * path, + const IORegistryPlane * plane, + char * opath, + int * length, + IORegistryEntry * fromEntry ) +{ + IORegistryEntry * where = 0; + IORegistryEntry * aliasEntry = 0; + IORegistryEntry * next; + const char * alias; + const char * end; + int len = 0; + int len2; + char c; + char temp[kIOMaxPlaneName + 1]; + + if (0 == path) { + return 0; + } + + if (0 == plane) { + // get plane name + end = strchr( path, ':' ); + if (end && ((end - path) < kIOMaxPlaneName)) { + strlcpy( temp, path, end - path + 1 ); + plane = getPlane( temp ); + path = end + 1; + } + } + if (0 == plane) { + return 0; + } + + // check for alias + end = path; + if ((alias = dealiasPath( &end, plane))) { + if (length) { + len = *length; + } + aliasEntry = IORegistryEntry::fromPath( alias, plane, + opath, &len, fromEntry ); + where = aliasEntry; + if (where) { + path = end; + } else { + len = 0; + } + } - if( (links = (OSArray *) - registryTable()->getObject( plane->keys[ relation ] ))) { + RLOCK; - result = arrayMember( links, to ); - if( !result) - result = links->setObject( to ); + do { + if (0 == where) { + if ((0 == fromEntry) && (*path++ == '/')) { + fromEntry = gRegistryRoot->getChildEntry( plane ); + } + where = fromEntry; + if (0 == where) { + break; + } + } else { + c = *path++; + if (c != '/') { + if (c && (c != ':')) { // check valid terminator + where = 0; + } + break; + } + } + next = where->getChildFromComponent( &path, plane ); + if (next) { + where = next; + } + } while (next); - } else { + if (where) { + // check residual path + if (where != fromEntry) { + path--; + } - links = OSArray::withObjects( (const OSObject **) &to, 1, 1 ); - result = (links != 0); - if( result) { - result = registryTable()->setObject( plane->keys[ relation ], - links ); - links->release(); + if (opath && length) { + // copy out residual path + len2 = strlen( path ); + if ((len + len2) < *length) { + strlcpy( opath + len, path, len2 + 1 ); + } + *length = (len + len2); + } else if (path[0]) { + // no residual path => must be no tail for success + where = 0; + } } - } - reserved->fRegistryEntryGenerationCount++; - return( result); + if (where) { + where->retain(); + } + if (aliasEntry) { + aliasEntry->release(); + } + + UNLOCK; + + return where; +} + +IORegistryEntry * +IORegistryEntry::childFromPath( + const char * path, + const IORegistryPlane * plane, + char * opath, + int * len ) +{ + return IORegistryEntry::fromPath( path, plane, opath, len, this ); } -void IORegistryEntry::breakLink( IORegistryEntry * to, - unsigned int relation, - const IORegistryPlane * plane ) const +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +#define IOLinkIterator OSCollectionIterator + +#undef super +#define super OSObject + +inline bool +IORegistryEntry::arrayMember( OSArray * set, + const IORegistryEntry * member, + unsigned int * index ) const { - OSArray * links; - unsigned int index; + int i; + OSObject * probeObject; + + for (i = 0; (probeObject = set->getObject(i)); i++) { + if (probeObject == (OSObject *) member) { + if (index) { + *index = i; + } + return true; + } + } + return false; +} - if( (links = (OSArray *) - registryTable()->getObject( plane->keys[ relation ]))) { +bool +IORegistryEntry::makeLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const +{ + OSArray * links; + bool result = false; + + if ((links = (OSArray *) + registryTable()->getObject( plane->keys[relation] ))) { + result = arrayMember( links, to ); + if (!result) { + result = links->setObject( to ); + } + } else { + links = OSArray::withObjects((const OSObject **) &to, 1, 1 ); + result = (links != 0); + if (result) { + result = registryTable()->setObject( plane->keys[relation], + links ); + links->release(); + } + } + reserved->fRegistryEntryGenerationCount++; + + return result; +} - if( arrayMember( links, to, &index )) { - links->removeObject( index ); - if( 0 == links->getCount()) - registryTable()->removeObject( plane->keys[ relation ]); - } - } - reserved->fRegistryEntryGenerationCount++; +void +IORegistryEntry::breakLink( IORegistryEntry * to, + unsigned int relation, + const IORegistryPlane * plane ) const +{ + OSArray * links; + unsigned int index; + + if ((links = (OSArray *) + registryTable()->getObject( plane->keys[relation]))) { + if (arrayMember( links, to, &index )) { + links->removeObject( index ); + if (0 == links->getCount()) { + registryTable()->removeObject( plane->keys[relation]); + } + } + } + reserved->fRegistryEntryGenerationCount++; } -OSArray * IORegistryEntry::getParentSetReference( - const IORegistryPlane * plane ) const +OSArray * +IORegistryEntry::getParentSetReference( + const IORegistryPlane * plane ) const { - if( plane) - return( (OSArray *) registryTable()->getObject( - plane->keys[ kParentSetIndex ])); - else - return( 0 ); + if (plane) { + return (OSArray *) registryTable()->getObject( + plane->keys[kParentSetIndex]); + } else { + return 0; + } } -OSIterator * IORegistryEntry::getParentIterator( - const IORegistryPlane * plane ) const +OSIterator * +IORegistryEntry::getParentIterator( + const IORegistryPlane * plane ) const { - OSArray * links; - OSIterator * iter; + OSArray * links; + OSIterator * iter; - if( !plane) - return( 0 ); + if (!plane) { + return 0; + } - RLOCK; - links = getParentSetReference( plane ); - if( 0 == links) - links = OSArray::withCapacity( 1 ); - else - links = OSArray::withArray( links, links->getCount() ); - UNLOCK; + RLOCK; + links = getParentSetReference( plane ); + if (0 == links) { + links = OSArray::withCapacity( 1 ); + } else { + links = OSArray::withArray( links, links->getCount()); + } + UNLOCK; - iter = IOLinkIterator::withCollection( links ); + iter = IOLinkIterator::withCollection( links ); - if( links) - links->release(); + if (links) { + links->release(); + } - return( iter ); + return iter; } -IORegistryEntry * IORegistryEntry::copyParentEntry( const IORegistryPlane * plane ) const +IORegistryEntry * +IORegistryEntry::copyParentEntry( const IORegistryPlane * plane ) const { - IORegistryEntry * entry = 0; - OSArray * links; + IORegistryEntry * entry = 0; + OSArray * links; - RLOCK; + RLOCK; - if( (links = getParentSetReference( plane ))) { - entry = (IORegistryEntry *) links->getObject( 0 ); - entry->retain(); - } + if ((links = getParentSetReference( plane ))) { + entry = (IORegistryEntry *) links->getObject( 0 ); + entry->retain(); + } - UNLOCK; + UNLOCK; - return( entry); + return entry; } -IORegistryEntry * IORegistryEntry::getParentEntry( const IORegistryPlane * plane ) const +IORegistryEntry * +IORegistryEntry::getParentEntry( const IORegistryPlane * plane ) const { - IORegistryEntry * entry; + IORegistryEntry * entry; - entry = copyParentEntry( plane ); - if( entry) - entry->release(); + entry = copyParentEntry( plane ); + if (entry) { + entry->release(); + } - return( entry ); + return entry; } -OSArray * IORegistryEntry::getChildSetReference( const IORegistryPlane * plane ) const +OSArray * +IORegistryEntry::getChildSetReference( const IORegistryPlane * plane ) const { - if( plane) - return( (OSArray *) registryTable()->getObject( - plane->keys[ kChildSetIndex ])); - else - return( 0 ); + if (plane) { + return (OSArray *) registryTable()->getObject( + plane->keys[kChildSetIndex]); + } else { + return 0; + } } -OSIterator * IORegistryEntry::getChildIterator( const IORegistryPlane * plane ) const +OSIterator * +IORegistryEntry::getChildIterator( const IORegistryPlane * plane ) const { - OSArray * links; - OSIterator * iter; + OSArray * links; + OSIterator * iter; - if( !plane) - return( 0 ); + if (!plane) { + return 0; + } - RLOCK; - links = getChildSetReference( plane ); - if( 0 == links) - links = OSArray::withCapacity( 1 ); - else - links = OSArray::withArray( links, links->getCount() ); - UNLOCK; + RLOCK; + links = getChildSetReference( plane ); + if (0 == links) { + links = OSArray::withCapacity( 1 ); + } else { + links = OSArray::withArray( links, links->getCount()); + } + UNLOCK; - iter = IOLinkIterator::withCollection( links ); + iter = IOLinkIterator::withCollection( links ); - if( links) - links->release(); + if (links) { + links->release(); + } - return( iter ); + return iter; } -uint32_t IORegistryEntry::getChildCount( const IORegistryPlane * plane ) const +uint32_t +IORegistryEntry::getChildCount( const IORegistryPlane * plane ) const { - OSArray * links; - uint32_t count = 0; + OSArray * links; + uint32_t count = 0; - RLOCK; - links = getChildSetReference( plane ); - if (links) count = links->getCount(); - UNLOCK; + RLOCK; + links = getChildSetReference( plane ); + if (links) { + count = links->getCount(); + } + UNLOCK; - return (count); + return count; } -IORegistryEntry * IORegistryEntry::copyChildEntry( - const IORegistryPlane * plane ) const +IORegistryEntry * +IORegistryEntry::copyChildEntry( + const IORegistryPlane * plane ) const { - IORegistryEntry * entry = 0; - OSArray * links; + IORegistryEntry * entry = 0; + OSArray * links; - RLOCK; + RLOCK; - if( (links = getChildSetReference( plane ))) { - entry = (IORegistryEntry *) links->getObject( 0 ); - entry->retain(); - } + if ((links = getChildSetReference( plane ))) { + entry = (IORegistryEntry *) links->getObject( 0 ); + entry->retain(); + } - UNLOCK; + UNLOCK; - return( entry); + return entry; } -IORegistryEntry * IORegistryEntry::getChildEntry( - const IORegistryPlane * plane ) const +IORegistryEntry * +IORegistryEntry::getChildEntry( + const IORegistryPlane * plane ) const { - IORegistryEntry * entry; + IORegistryEntry * entry; - entry = copyChildEntry( plane ); - if( entry) - entry->release(); - - return( entry ); + entry = copyChildEntry( plane ); + if (entry) { + entry->release(); + } + + return entry; } -void IORegistryEntry::applyToChildren( IORegistryEntryApplierFunction applier, - void * context, - const IORegistryPlane * plane ) const +void +IORegistryEntry::applyToChildren( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const { - OSArray * array; - unsigned int index; - IORegistryEntry * next; + OSArray * array; + unsigned int index; + IORegistryEntry * next; - if( !plane) - return; + if (!plane) { + return; + } - RLOCK; - array = OSArray::withArray( getChildSetReference( plane )); - UNLOCK; - if( array) { - for( index = 0; - (next = (IORegistryEntry *) array->getObject( index )); - index++) - (*applier)(next, context); - array->release(); - } + RLOCK; + array = OSArray::withArray( getChildSetReference( plane )); + UNLOCK; + if (array) { + for (index = 0; + (next = (IORegistryEntry *) array->getObject( index )); + index++) { + (*applier)(next, context); + } + array->release(); + } } -void IORegistryEntry::applyToParents( IORegistryEntryApplierFunction applier, - void * context, - const IORegistryPlane * plane ) const +void +IORegistryEntry::applyToParents( IORegistryEntryApplierFunction applier, + void * context, + const IORegistryPlane * plane ) const { - OSArray * array; - unsigned int index; - IORegistryEntry * next; + OSArray * array; + unsigned int index; + IORegistryEntry * next; - if( !plane) - return; + if (!plane) { + return; + } - RLOCK; - array = OSArray::withArray( getParentSetReference( plane )); - UNLOCK; - if( array) { - for( index = 0; - (next = (IORegistryEntry *) array->getObject( index )); - index++) - (*applier)(next, context); - array->release(); - } + RLOCK; + array = OSArray::withArray( getParentSetReference( plane )); + UNLOCK; + if (array) { + for (index = 0; + (next = (IORegistryEntry *) array->getObject( index )); + index++) { + (*applier)(next, context); + } + array->release(); + } } -bool IORegistryEntry::isChild( IORegistryEntry * child, - const IORegistryPlane * plane, - bool onlyChild ) const +bool +IORegistryEntry::isChild( IORegistryEntry * child, + const IORegistryPlane * plane, + bool onlyChild ) const { - OSArray * links; - bool ret = false; + OSArray * links; + bool ret = false; - RLOCK; + RLOCK; - if( (links = getChildSetReference( plane ))) { - if( (!onlyChild) || (1 == links->getCount())) - ret = arrayMember( links, child ); - } - if( ret && (links = child->getParentSetReference( plane ))) - ret = arrayMember( links, this ); + if ((links = getChildSetReference( plane ))) { + if ((!onlyChild) || (1 == links->getCount())) { + ret = arrayMember( links, child ); + } + } + if (ret && (links = child->getParentSetReference( plane ))) { + ret = arrayMember( links, this ); + } - UNLOCK; + UNLOCK; - return( ret); + return ret; } -bool IORegistryEntry::isParent( IORegistryEntry * parent, - const IORegistryPlane * plane, - bool onlyParent ) const - +bool +IORegistryEntry::isParent( IORegistryEntry * parent, + const IORegistryPlane * plane, + bool onlyParent ) const { - OSArray * links; - bool ret = false; + OSArray * links; + bool ret = false; - RLOCK; + RLOCK; - if( (links = getParentSetReference( plane ))) { - if( (!onlyParent) || (1 == links->getCount())) - ret = arrayMember( links, parent ); - } - if( ret && (links = parent->getChildSetReference( plane ))) - ret = arrayMember( links, this ); + if ((links = getParentSetReference( plane ))) { + if ((!onlyParent) || (1 == links->getCount())) { + ret = arrayMember( links, parent ); + } + } + if (ret && (links = parent->getChildSetReference( plane ))) { + ret = arrayMember( links, this ); + } - UNLOCK; + UNLOCK; - return( ret); + return ret; } -bool IORegistryEntry::inPlane( const IORegistryPlane * plane ) const +bool +IORegistryEntry::inPlane( const IORegistryPlane * plane ) const { - bool ret; - - RLOCK; + bool ret; - if( plane) - ret = (0 != getParentSetReference( plane )); - else { + RLOCK; - // Check to see if this is in any plane. If it is in a plane - // then the registryTable will contain a key with the ParentLinks - // suffix. When we iterate over the keys looking for that suffix - ret = false; - - OSCollectionIterator *iter = - OSCollectionIterator::withCollection( registryTable()); - if( iter) { - const OSSymbol *key; - - while( (key = (OSSymbol *) iter->getNextObject()) ) { - size_t keysuffix; - - // Get a pointer to this keys suffix - keysuffix = key->getLength(); - if (keysuffix <= kIORegPlaneParentSuffixLen) - continue; - keysuffix -= kIORegPlaneParentSuffixLen; - if( !strncmp(key->getCStringNoCopy() + keysuffix, - kIORegPlaneParentSuffix, - kIORegPlaneParentSuffixLen + 1) ) { - ret = true; - break; + if (plane) { + ret = (0 != getParentSetReference( plane )); + } else { + // Check to see if this is in any plane. If it is in a plane + // then the registryTable will contain a key with the ParentLinks + // suffix. When we iterate over the keys looking for that suffix + ret = false; + + OSCollectionIterator *iter = + OSCollectionIterator::withCollection( registryTable()); + if (iter) { + const OSSymbol *key; + + while ((key = (OSSymbol *) iter->getNextObject())) { + size_t keysuffix; + + // Get a pointer to this keys suffix + keysuffix = key->getLength(); + if (keysuffix <= kIORegPlaneParentSuffixLen) { + continue; + } + keysuffix -= kIORegPlaneParentSuffixLen; + if (!strncmp(key->getCStringNoCopy() + keysuffix, + kIORegPlaneParentSuffix, + kIORegPlaneParentSuffixLen + 1)) { + ret = true; + break; + } + } + iter->release(); } - } - iter->release(); - } - } + } - UNLOCK; + UNLOCK; - return( ret ); + return ret; } -bool IORegistryEntry::attachToParent( IORegistryEntry * parent, - const IORegistryPlane * plane ) +bool +IORegistryEntry::attachToParent( IORegistryEntry * parent, + const IORegistryPlane * plane ) { - OSArray * links; - bool ret; - bool needParent; - bool traceName = false; - - if( this == parent) - return( false ); - - WLOCK; + OSArray * links; + bool ret; + bool needParent; + bool traceName = false; - if (!reserved->fRegistryEntryID) - { - reserved->fRegistryEntryID = ++gIORegistryLastID; - traceName = (0 != gIOKitTrace); - } + if (this == parent) { + return false; + } - ret = makeLink( parent, kParentSetIndex, plane ); + WLOCK; - if( (links = parent->getChildSetReference( plane ))) - needParent = (false == arrayMember( links, this )); - else - needParent = true; + if (!reserved->fRegistryEntryID) { + reserved->fRegistryEntryID = ++gIORegistryLastID; + traceName = (0 != gIOKitTrace); + } - UNLOCK; + ret = makeLink( parent, kParentSetIndex, plane ); - if (traceName) - { - uint64_t str_id = 0; - uint64_t __unused regID = getRegistryEntryID(); - kernel_debug_string(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME_STRING), &str_id, getName()); - KERNEL_DEBUG_CONSTANT(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME), - (uintptr_t) regID, - (uintptr_t) (regID >> 32), - (uintptr_t) str_id, - (uintptr_t) (str_id >> 32), - 0); - } + if ((links = parent->getChildSetReference( plane ))) { + needParent = (false == arrayMember( links, this )); + } else { + needParent = true; + } - PLOCK; + UNLOCK; - // Mark any collections in the property list as immutable - OSDictionary *ptable = getPropertyTable(); - OSCollectionIterator *iter = - OSCollectionIterator::withCollection( ptable ); - if( iter) { - const OSSymbol *key; + if (traceName) { + uint64_t str_id = 0; + uint64_t __unused regID = getRegistryEntryID(); + kernel_debug_string(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME_STRING), &str_id, getName()); + KERNEL_DEBUG_CONSTANT(IODBG_IOREGISTRY(IOREGISTRYENTRY_NAME), + (uintptr_t) regID, + (uintptr_t) (regID >> 32), + (uintptr_t) str_id, + (uintptr_t) (str_id >> 32), + 0); + } - while( (key = (OSSymbol *) iter->getNextObject( ))) { - // Is object for key a collection? - OSCollection *coll = - OSDynamicCast( OSCollection, ptable->getObject( key )); + PLOCK; - if( coll) { - // Yup so mark it as immutable - coll->setOptions( OSCollection::kMASK, - OSCollection::kImmutable ); - } + // Mark any collections in the property list as immutable + OSDictionary *ptable = getPropertyTable(); + OSCollectionIterator *iter = + OSCollectionIterator::withCollection( ptable ); + if (iter) { + const OSSymbol *key; + + while ((key = (OSSymbol *) iter->getNextObject())) { + // Is object for key a collection? + OSCollection *coll = + OSDynamicCast( OSCollection, ptable->getObject( key )); + + if (coll) { + // Yup so mark it as immutable + coll->setOptions( OSCollection::kMASK, + OSCollection::kImmutable ); + } + } + iter->release(); } - iter->release(); - } - PUNLOCK; + PUNLOCK; - if( needParent) - ret &= parent->attachToChild( this, plane ); + if (needParent) { + ret &= parent->attachToChild( this, plane ); + } - return( ret ); + return ret; } -uint64_t IORegistryEntry::getRegistryEntryID( void ) +uint64_t +IORegistryEntry::getRegistryEntryID( void ) { - if (reserved) - return (reserved->fRegistryEntryID); - else - return (0); + if (reserved) { + return reserved->fRegistryEntryID; + } else { + return 0; + } } -bool IORegistryEntry::attachToChild( IORegistryEntry * child, - const IORegistryPlane * plane ) +bool +IORegistryEntry::attachToChild( IORegistryEntry * child, + const IORegistryPlane * plane ) { - OSArray * links; - bool ret; - bool needChild; + OSArray * links; + bool ret; + bool needChild; - if( this == child) - return( false ); + if (this == child) { + return false; + } - WLOCK; + WLOCK; - ret = makeLink( child, kChildSetIndex, plane ); + ret = makeLink( child, kChildSetIndex, plane ); - if( (links = child->getParentSetReference( plane ))) - needChild = (false == arrayMember( links, this )); - else - needChild = true; + if ((links = child->getParentSetReference( plane ))) { + needChild = (false == arrayMember( links, this )); + } else { + needChild = true; + } - UNLOCK; + UNLOCK; - if( needChild) - ret &= child->attachToParent( this, plane ); + if (needChild) { + ret &= child->attachToParent( this, plane ); + } - return( ret ); + return ret; } -void IORegistryEntry::detachFromParent( IORegistryEntry * parent, - const IORegistryPlane * plane ) +void +IORegistryEntry::detachFromParent( IORegistryEntry * parent, + const IORegistryPlane * plane ) { - OSArray * links; - bool needParent; + OSArray * links; + bool needParent; - WLOCK; + WLOCK; - parent->retain(); + parent->retain(); - breakLink( parent, kParentSetIndex, plane ); + breakLink( parent, kParentSetIndex, plane ); - if( (links = parent->getChildSetReference( plane ))) - needParent = arrayMember( links, this ); - else - needParent = false; + if ((links = parent->getChildSetReference( plane ))) { + needParent = arrayMember( links, this ); + } else { + needParent = false; + } // parent->breakLink( this, kChildSetIndex, plane ); - UNLOCK; + UNLOCK; - if( needParent) - parent->detachFromChild( this, plane ); + if (needParent) { + parent->detachFromChild( this, plane ); + } - parent->release(); + parent->release(); } -void IORegistryEntry::detachFromChild( IORegistryEntry * child, - const IORegistryPlane * plane ) +void +IORegistryEntry::detachFromChild( IORegistryEntry * child, + const IORegistryPlane * plane ) { - OSArray * links; - bool needChild; + OSArray * links; + bool needChild; - WLOCK; + WLOCK; - child->retain(); + child->retain(); - breakLink( child, kChildSetIndex, plane ); + breakLink( child, kChildSetIndex, plane ); - if( (links = child->getParentSetReference( plane ))) - needChild = arrayMember( links, this ); - else - needChild = false; + if ((links = child->getParentSetReference( plane ))) { + needChild = arrayMember( links, this ); + } else { + needChild = false; + } - UNLOCK; + UNLOCK; - if( needChild) - child->detachFromParent( this, plane ); + if (needChild) { + child->detachFromParent( this, plane ); + } - child->release(); + child->release(); } -void IORegistryEntry::detachAbove( const IORegistryPlane * plane ) +void +IORegistryEntry::detachAbove( const IORegistryPlane * plane ) { - IORegistryEntry * parent; + IORegistryEntry * parent; - retain(); - while( (parent = copyParentEntry( plane ))) - { - detachFromParent( parent, plane ); - parent->release(); - } - release(); + retain(); + while ((parent = copyParentEntry( plane ))) { + detachFromParent( parent, plane ); + parent->release(); + } + release(); } -void IORegistryEntry::detachAll( const IORegistryPlane * plane ) +void +IORegistryEntry::detachAll( const IORegistryPlane * plane ) { - OSOrderedSet * all; - IORegistryEntry * next; - IORegistryIterator * regIter; + OSOrderedSet * all; + IORegistryEntry * next; + IORegistryIterator * regIter; - regIter = IORegistryIterator::iterateOver( this, plane, true ); - if( 0 == regIter) - return; - all = regIter->iterateAll(); - regIter->release(); - - detachAbove( plane ); - if( all) { - while( (next = (IORegistryEntry *) all->getLastObject())) { + regIter = IORegistryIterator::iterateOver( this, plane, true ); + if (0 == regIter) { + return; + } + all = regIter->iterateAll(); + regIter->release(); - next->retain(); - all->removeObject(next); + detachAbove( plane ); + if (all) { + while ((next = (IORegistryEntry *) all->getLastObject())) { + next->retain(); + all->removeObject(next); - next->detachAbove( plane ); - next->release(); - } - all->release(); - } + next->detachAbove( plane ); + next->release(); + } + all->release(); + } } -unsigned int IORegistryEntry::getDepth( const IORegistryPlane * plane ) const +unsigned int +IORegistryEntry::getDepth( const IORegistryPlane * plane ) const { - unsigned int depth = 1; - OSArray * parents; - unsigned int oneDepth, maxParentDepth, count; - IORegistryEntry * one; - const IORegistryEntry * next; - unsigned int index; - - RLOCK; + unsigned int depth = 1; + OSArray * parents; + unsigned int oneDepth, maxParentDepth, count; + IORegistryEntry * one; + const IORegistryEntry * next; + unsigned int index; - next = this; - while( (parents = next->getParentSetReference( plane ))) { + RLOCK; - count = parents->getCount(); - if( 0 == count) - break; - if( 1 == count) { - depth++; - next = (IORegistryEntry *) parents->getObject( 0 ); - } else { - // painful - maxParentDepth = 0; - for( index = 0; - (one = (IORegistryEntry *) parents->getObject( index )); - index++ ) { - oneDepth = one->getDepth( plane ); - if( oneDepth > maxParentDepth) - maxParentDepth = oneDepth; - } - depth += maxParentDepth; - break; + next = this; + while ((parents = next->getParentSetReference( plane ))) { + count = parents->getCount(); + if (0 == count) { + break; + } + if (1 == count) { + depth++; + next = (IORegistryEntry *) parents->getObject( 0 ); + } else { + // painful + maxParentDepth = 0; + for (index = 0; + (one = (IORegistryEntry *) parents->getObject( index )); + index++) { + oneDepth = one->getDepth( plane ); + if (oneDepth > maxParentDepth) { + maxParentDepth = oneDepth; + } + } + depth += maxParentDepth; + break; + } } - } - UNLOCK; + UNLOCK; - return( depth); + return depth; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1972,212 +2129,230 @@ enum { kIORegistryIteratorInvalidFlag = 0x80000000 }; IORegistryIterator * IORegistryIterator::iterateOver( IORegistryEntry * root, - const IORegistryPlane * plane, - IOOptionBits options ) + const IORegistryPlane * plane, + IOOptionBits options ) { - IORegistryIterator * create; - - if( 0 == root) - return( 0); - if( 0 == plane) - return( 0); - - create = new IORegistryIterator; - if( create) { - if( create->init()) { + IORegistryIterator * create; - root->retain(); - create->root = root; - create->where = &create->start; - create->start.current = root; - create->plane = plane; - create->options = options & ~kIORegistryIteratorInvalidFlag; + if (0 == root) { + return 0; + } + if (0 == plane) { + return 0; + } - } else { - create->release(); - create = 0; + create = new IORegistryIterator; + if (create) { + if (create->init()) { + root->retain(); + create->root = root; + create->where = &create->start; + create->start.current = root; + create->plane = plane; + create->options = options & ~kIORegistryIteratorInvalidFlag; + } else { + create->release(); + create = 0; + } } - } - return( create); + return create; } IORegistryIterator * IORegistryIterator::iterateOver( const IORegistryPlane * plane, - IOOptionBits options ) + IOOptionBits options ) { - return( iterateOver( gRegistryRoot, plane, options )); + return iterateOver( gRegistryRoot, plane, options ); } -bool IORegistryIterator::isValid( void ) +bool +IORegistryIterator::isValid( void ) { - bool ok; - IORegCursor * next; + bool ok; + IORegCursor * next; - next = where; + next = where; - RLOCK; + RLOCK; - ok = (0 == (kIORegistryIteratorInvalidFlag & options)); + ok = (0 == (kIORegistryIteratorInvalidFlag & options)); - while( ok && next) { - if( where->iter) - ok = where->iter->isValid(); - next = next->next; - } - UNLOCK; + while (ok && next) { + if (where->iter) { + ok = where->iter->isValid(); + } + next = next->next; + } + UNLOCK; - return( ok); + return ok; } -void IORegistryIterator::enterEntry( const IORegistryPlane * enterPlane ) +void +IORegistryIterator::enterEntry( const IORegistryPlane * enterPlane ) { - IORegCursor * prev; + IORegCursor * prev; - prev = where; - where = (IORegCursor *) IOMalloc( sizeof(IORegCursor)); - assert( where); + prev = where; + where = (IORegCursor *) IOMalloc( sizeof(IORegCursor)); + assert( where); - if( where) { - where->iter = 0; - where->next = prev; - where->current = prev->current; - plane = enterPlane; - } + if (where) { + where->iter = 0; + where->next = prev; + where->current = prev->current; + plane = enterPlane; + } } -void IORegistryIterator::enterEntry( void ) +void +IORegistryIterator::enterEntry( void ) { - enterEntry( plane ); + enterEntry( plane ); } -bool IORegistryIterator::exitEntry( void ) +bool +IORegistryIterator::exitEntry( void ) { - IORegCursor * gone; - - if( where->iter) { - where->iter->release(); - where->iter = 0; - if( where->current)// && (where != &start)) - where->current->release(); - } + IORegCursor * gone; - if( where != &start) { - gone = where; - where = gone->next; - IOFree( gone, sizeof(IORegCursor)); - return( true); + if (where->iter) { + where->iter->release(); + where->iter = 0; + if (where->current) {// && (where != &start)) + where->current->release(); + } + } - } else - return( false); + if (where != &start) { + gone = where; + where = gone->next; + IOFree( gone, sizeof(IORegCursor)); + return true; + } else { + return false; + } } -void IORegistryIterator::reset( void ) +void +IORegistryIterator::reset( void ) { - while( exitEntry()) - {} + while (exitEntry()) { + } - if( done) { - done->release(); - done = 0; - } + if (done) { + done->release(); + done = 0; + } - where->current = root; - options &= ~kIORegistryIteratorInvalidFlag; + where->current = root; + options &= ~kIORegistryIteratorInvalidFlag; } -void IORegistryIterator::free( void ) +void +IORegistryIterator::free( void ) { - reset(); + reset(); - if( root) - root->release(); + if (root) { + root->release(); + } - super::free(); + super::free(); } -IORegistryEntry * IORegistryIterator::getNextObjectFlat( void ) +IORegistryEntry * +IORegistryIterator::getNextObjectFlat( void ) { - IORegistryEntry * next = 0; - OSArray * links = 0; + IORegistryEntry * next = 0; + OSArray * links = 0; - RLOCK; + RLOCK; - if( (0 == where->iter)) { - // just entered - create new iter - if( isValid() - && where->current - && (links = ( (options & kIORegistryIterateParents) ? - where->current->getParentSetReference( plane ) : - where->current->getChildSetReference( plane ) )) ) - - where->iter = OSCollectionIterator::withCollection( links ); - - } else + if ((0 == where->iter)) { + // just entered - create new iter + if (isValid() + && where->current + && (links = ((options & kIORegistryIterateParents) ? + where->current->getParentSetReference( plane ) : + where->current->getChildSetReference( plane )))) { + where->iter = OSCollectionIterator::withCollection( links ); + } + } else // next sibling - release current - if( where->current) - where->current->release(); - - if( where->iter) { + if (where->current) { + where->current->release(); + } - next = (IORegistryEntry *) where->iter->getNextObject(); + if (where->iter) { + next = (IORegistryEntry *) where->iter->getNextObject(); - if( next) - next->retain(); - else if( !where->iter->isValid()) - options |= kIORegistryIteratorInvalidFlag; - } + if (next) { + next->retain(); + } else if (!where->iter->isValid()) { + options |= kIORegistryIteratorInvalidFlag; + } + } - where->current = next; + where->current = next; - UNLOCK; + UNLOCK; - return( next); + return next; } -IORegistryEntry * IORegistryIterator::getNextObjectRecursive( void ) +IORegistryEntry * +IORegistryIterator::getNextObjectRecursive( void ) { - IORegistryEntry * next; + IORegistryEntry * next; - do - next = getNextObjectFlat(); - while( (0 == next) && exitEntry()); + do{ + next = getNextObjectFlat(); + } while ((0 == next) && exitEntry()); - if( next) { - if( 0 == done) - done = OSOrderedSet::withCapacity( 10 ); - if( done->setObject((OSObject *) next)) { - // done set didn't contain this one, so recurse - enterEntry(); + if (next) { + if (0 == done) { + done = OSOrderedSet::withCapacity( 10 ); + } + if (done->setObject((OSObject *) next)) { + // done set didn't contain this one, so recurse + enterEntry(); + } } - } - return( next); + return next; } -IORegistryEntry * IORegistryIterator::getNextObject( void ) +IORegistryEntry * +IORegistryIterator::getNextObject( void ) { - if( options & kIORegistryIterateRecursively) - return( getNextObjectRecursive()); - else - return( getNextObjectFlat()); + if (options & kIORegistryIterateRecursively) { + return getNextObjectRecursive(); + } else { + return getNextObjectFlat(); + } } -IORegistryEntry * IORegistryIterator::getCurrentEntry( void ) +IORegistryEntry * +IORegistryIterator::getCurrentEntry( void ) { - if( isValid()) - return( where->current); - else - return( 0); + if (isValid()) { + return where->current; + } else { + return 0; + } } -OSOrderedSet * IORegistryIterator::iterateAll( void ) +OSOrderedSet * +IORegistryIterator::iterateAll( void ) { - reset(); - while( getNextObjectRecursive()) - {} - if( done) - done->retain(); - return( done); + reset(); + while (getNextObjectRecursive()) { + } + if (done) { + done->retain(); + } + return done; } #if __LP64__ @@ -2223,5 +2398,8 @@ OSMetaClassDefineReservedUnused(IORegistryEntry, 30); OSMetaClassDefineReservedUnused(IORegistryEntry, 31); /* inline function implementation */ -OSDictionary * IORegistryEntry::getPropertyTable( void ) const -{ return(fPropertyTable); } +OSDictionary * +IORegistryEntry::getPropertyTable( void ) const +{ + return fPropertyTable; +} diff --git a/iokit/Kernel/IOReportLegend.cpp b/iokit/Kernel/IOReportLegend.cpp index 47d5e206a..9c0eeb9c5 100644 --- a/iokit/Kernel/IOReportLegend.cpp +++ b/iokit/Kernel/IOReportLegend.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2012-2013 Apple Computer, Inc. All Rights Reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,9 +35,9 @@ #ifdef IORDEBUG_LEGEND #define IORLEGENDLOG(fmt, args...) \ do { \ - IOLog("IOReportLegend | "); \ - IOLog(fmt, ##args); \ - IOLog("\n"); \ + IOLog("IOReportLegend | "); \ + IOLog(fmt, ##args); \ + IOLog("\n"); \ } while(0) #else #define IORLEGENDLOG(fmt, args...) @@ -50,176 +50,194 @@ OSDefineMetaClassAndStructors(IOReportLegend, OSObject); IOReportLegend* IOReportLegend::with(OSArray *legend) { - IOReportLegend *iorLegend = new IOReportLegend; - - if (iorLegend) { - - if (legend != NULL) { - if (iorLegend->initWith(legend) != kIOReturnSuccess) { - OSSafeReleaseNULL(iorLegend); - return NULL; - } - } - - return iorLegend; - } - - else return NULL; + IOReportLegend *iorLegend = new IOReportLegend; + + if (iorLegend) { + if (legend != NULL) { + if (iorLegend->initWith(legend) != kIOReturnSuccess) { + OSSafeReleaseNULL(iorLegend); + return NULL; + } + } + + return iorLegend; + } else { + return NULL; + } } /* must clean up everything if it fails */ IOReturn IOReportLegend::initWith(OSArray *legend) { - if (legend) _reportLegend = OSArray::withArray(legend); - - if (_reportLegend == NULL) - return kIOReturnError; - - else return kIOReturnSuccess; + if (legend) { + _reportLegend = OSArray::withArray(legend); + } + + if (_reportLegend == NULL) { + return kIOReturnError; + } else { + return kIOReturnSuccess; + } } void IOReportLegend::free(void) { - if (_reportLegend) _reportLegend->release(); - super::free(); + if (_reportLegend) { + _reportLegend->release(); + } + super::free(); } OSArray* IOReportLegend::getLegend(void) { - return _reportLegend; + return _reportLegend; } IOReturn IOReportLegend::addReporterLegend(IOService *reportingService, - IOReporter *reporter, - const char *groupName, - const char *subGroupName) + IOReporter *reporter, + const char *groupName, + const char *subGroupName) { - IOReturn res = kIOReturnError; - IOReportLegend *legend = NULL; - OSObject *curLegend = NULL; - - // No need to check groupName and subGroupName because optional params - if (!reportingService || !reporter) { - goto finish; - } - - // It's fine if the legend doesn't exist (IOReportLegend::with(NULL) - // is how you make an empty legend). If it's not an array, then - // we're just going to replace it. - curLegend = reportingService->copyProperty(kIOReportLegendKey); - legend = IOReportLegend::with(OSDynamicCast(OSArray, curLegend)); - if (!legend) goto finish; - - // Add the reporter's entries and update the service property. - // The overwrite triggers a release of the old legend array. - legend->addReporterLegend(reporter, groupName, subGroupName); - reportingService->setProperty(kIOReportLegendKey, legend->getLegend()); - reportingService->setProperty(kIOReportLegendPublicKey, true); - - res = kIOReturnSuccess; - -finish: - if (legend) legend->release(); - if (curLegend) curLegend->release(); + IOReturn res = kIOReturnError; + IOReportLegend *legend = NULL; + OSObject *curLegend = NULL; + + // No need to check groupName and subGroupName because optional params + if (!reportingService || !reporter) { + goto finish; + } + + // It's fine if the legend doesn't exist (IOReportLegend::with(NULL) + // is how you make an empty legend). If it's not an array, then + // we're just going to replace it. + curLegend = reportingService->copyProperty(kIOReportLegendKey); + legend = IOReportLegend::with(OSDynamicCast(OSArray, curLegend)); + if (!legend) { + goto finish; + } + + // Add the reporter's entries and update the service property. + // The overwrite triggers a release of the old legend array. + legend->addReporterLegend(reporter, groupName, subGroupName); + reportingService->setProperty(kIOReportLegendKey, legend->getLegend()); + reportingService->setProperty(kIOReportLegendPublicKey, true); + + res = kIOReturnSuccess; - return res; +finish: + if (legend) { + legend->release(); + } + if (curLegend) { + curLegend->release(); + } + + return res; } IOReturn IOReportLegend::addLegendEntry(IOReportLegendEntry *legendEntry, - const char *groupName, - const char *subGroupName) + const char *groupName, + const char *subGroupName) { - kern_return_t res = kIOReturnError; - const OSSymbol *tmpGroupName = NULL; - const OSSymbol *tmpSubGroupName = NULL; - - if (!legendEntry) goto finish; - - if (groupName) { - tmpGroupName = OSSymbol::withCString(groupName); - } - - if (subGroupName) { - tmpSubGroupName = OSSymbol::withCString(subGroupName); - } - - // It is ok to call appendLegendWith() if tmpGroups are NULL - if (legendEntry) { - res = organizeLegend(legendEntry, tmpGroupName, tmpSubGroupName); - - if (tmpGroupName) tmpGroupName->release(); - if (tmpSubGroupName) tmpSubGroupName->release(); - } + kern_return_t res = kIOReturnError; + const OSSymbol *tmpGroupName = NULL; + const OSSymbol *tmpSubGroupName = NULL; + + if (!legendEntry) { + goto finish; + } + + if (groupName) { + tmpGroupName = OSSymbol::withCString(groupName); + } + + if (subGroupName) { + tmpSubGroupName = OSSymbol::withCString(subGroupName); + } + + // It is ok to call appendLegendWith() if tmpGroups are NULL + if (legendEntry) { + res = organizeLegend(legendEntry, tmpGroupName, tmpSubGroupName); + + if (tmpGroupName) { + tmpGroupName->release(); + } + if (tmpSubGroupName) { + tmpSubGroupName->release(); + } + } finish: - return res; + return res; } IOReturn IOReportLegend::addReporterLegend(IOReporter *reporter, - const char *groupName, - const char *subGroupName) + const char *groupName, + const char *subGroupName) { - IOReturn res = kIOReturnError; - IOReportLegendEntry *legendEntry = NULL; - - if (reporter) { - - legendEntry = reporter->createLegend(); - - if (legendEntry) { - - res = addLegendEntry(legendEntry, groupName, subGroupName); - legendEntry->release(); - } - } - - return res; + IOReturn res = kIOReturnError; + IOReportLegendEntry *legendEntry = NULL; + + if (reporter) { + legendEntry = reporter->createLegend(); + + if (legendEntry) { + res = addLegendEntry(legendEntry, groupName, subGroupName); + legendEntry->release(); + } + } + + return res; } IOReturn IOReportLegend::organizeLegend(IOReportLegendEntry *legendEntry, - const OSSymbol *groupName, - const OSSymbol *subGroupName) + const OSSymbol *groupName, + const OSSymbol *subGroupName) { - IOReturn res = kIOReturnError; - - if (!legendEntry) - return res = kIOReturnBadArgument; - - if (!groupName && subGroupName) - return res = kIOReturnBadArgument; - - IORLEGENDLOG("IOReportLegend::organizeLegend"); - // Legend is empty, enter first node - if (_reportLegend == NULL) { - IORLEGENDLOG("IOReportLegend::new legend creation"); - _reportLegend = OSArray::withCapacity(1); - - if (!_reportLegend) - return kIOReturnNoMemory; - } - - if (groupName) - legendEntry->setObject(kIOReportLegendGroupNameKey, groupName); - - if (subGroupName) - legendEntry->setObject(kIOReportLegendSubGroupNameKey, subGroupName); - - _reportLegend->setObject(legendEntry); - - // callers can now safely release legendEntry (it is part of _reportLegend) - - return res = kIOReturnSuccess; -} + IOReturn res = kIOReturnError; + if (!legendEntry) { + return res = kIOReturnBadArgument; + } + + if (!groupName && subGroupName) { + return res = kIOReturnBadArgument; + } + + IORLEGENDLOG("IOReportLegend::organizeLegend"); + // Legend is empty, enter first node + if (_reportLegend == NULL) { + IORLEGENDLOG("IOReportLegend::new legend creation"); + _reportLegend = OSArray::withCapacity(1); + + if (!_reportLegend) { + return kIOReturnNoMemory; + } + } + + if (groupName) { + legendEntry->setObject(kIOReportLegendGroupNameKey, groupName); + } + + if (subGroupName) { + legendEntry->setObject(kIOReportLegendSubGroupNameKey, subGroupName); + } + + _reportLegend->setObject(legendEntry); + + // callers can now safely release legendEntry (it is part of _reportLegend) + + return res = kIOReturnSuccess; +} diff --git a/iokit/Kernel/IOReporter.cpp b/iokit/Kernel/IOReporter.cpp index dd6ccf764..7d2bf3268 100644 --- a/iokit/Kernel/IOReporter.cpp +++ b/iokit/Kernel/IOReporter.cpp @@ -1,8 +1,8 @@ /* * Copyright (c) 2012-2013 Apple Computer, Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,111 +46,111 @@ static const OSSymbol *gIOReportNoChannelName = OSSymbol::withCString("_NO_NAME_ /**************************************/ IOReturn IOReporter::configureAllReports(OSSet *reporters, - IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) + IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination) { - IOReturn rval = kIOReturnError; - OSCollectionIterator *iterator = NULL; - - if (reporters == NULL || channelList == NULL || result == NULL) { - rval = kIOReturnBadArgument; - goto finish; - } - - switch (action) { - - case kIOReportGetDimensions: - case kIOReportEnable: - case kIOReportDisable: - { - OSObject * object; - iterator = OSCollectionIterator::withCollection(reporters); - - while ((object = iterator->getNextObject())) { - - IOReporter *rep = OSDynamicCast(IOReporter, object); - - if (rep) { - (void)rep->configureReport(channelList, action, result, destination); - } else { - rval = kIOReturnUnsupported; // kIOReturnNotFound? - goto finish; - } - } - - break; - } - - case kIOReportTraceOnChange: - case kIOReportNotifyHubOnChange: - default: - rval = kIOReturnUnsupported; - goto finish; - } - - rval = kIOReturnSuccess; - + IOReturn rval = kIOReturnError; + OSCollectionIterator *iterator = NULL; + + if (reporters == NULL || channelList == NULL || result == NULL) { + rval = kIOReturnBadArgument; + goto finish; + } + + switch (action) { + case kIOReportGetDimensions: + case kIOReportEnable: + case kIOReportDisable: + { + OSObject * object; + iterator = OSCollectionIterator::withCollection(reporters); + + while ((object = iterator->getNextObject())) { + IOReporter *rep = OSDynamicCast(IOReporter, object); + + if (rep) { + (void)rep->configureReport(channelList, action, result, destination); + } else { + rval = kIOReturnUnsupported; // kIOReturnNotFound? + goto finish; + } + } + + break; + } + + case kIOReportTraceOnChange: + case kIOReportNotifyHubOnChange: + default: + rval = kIOReturnUnsupported; + goto finish; + } + + rval = kIOReturnSuccess; + finish: - if (iterator) iterator->release(); + if (iterator) { + iterator->release(); + } - return rval; + return rval; } // the duplication in these functions almost makes one want Objective-C SEL* ;) IOReturn IOReporter::updateAllReports(OSSet *reporters, - IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) + IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination) { - IOReturn rval = kIOReturnError; - OSCollectionIterator *iterator = NULL; - - if (reporters == NULL || - channelList == NULL || - result == NULL || - destination == NULL) { - rval = kIOReturnBadArgument; - goto finish; - } - - switch (action) { - - case kIOReportCopyChannelData: - { - OSObject * object; - iterator = OSCollectionIterator::withCollection(reporters); - - while ((object = iterator->getNextObject())) { - - IOReporter *rep = OSDynamicCast(IOReporter, object); - - if (rep) { - (void)rep->updateReport(channelList, action, result, destination); - } else { - rval = kIOReturnUnsupported; // kIOReturnNotFound? - goto finish; - } - } - - break; - } - - case kIOReportTraceChannelData: - default: - rval = kIOReturnUnsupported; - goto finish; - } - - rval = kIOReturnSuccess; + IOReturn rval = kIOReturnError; + OSCollectionIterator *iterator = NULL; + + if (reporters == NULL || + channelList == NULL || + result == NULL || + destination == NULL) { + rval = kIOReturnBadArgument; + goto finish; + } + + switch (action) { + case kIOReportCopyChannelData: + { + OSObject * object; + iterator = OSCollectionIterator::withCollection(reporters); + + while ((object = iterator->getNextObject())) { + IOReporter *rep = OSDynamicCast(IOReporter, object); + + if (rep) { + (void)rep->updateReport(channelList, action, result, destination); + } else { + rval = kIOReturnUnsupported; // kIOReturnNotFound? + goto finish; + } + } + + break; + } + + case kIOReportTraceChannelData: + default: + rval = kIOReturnUnsupported; + goto finish; + } + + rval = kIOReturnSuccess; finish: - if (iterator) iterator->release(); + if (iterator) { + iterator->release(); + } - return rval; + return rval; } @@ -160,65 +160,73 @@ finish: bool IOReporter::init(IOService *reportingService, - IOReportChannelType channelType, - IOReportUnit unit) + IOReportChannelType channelType, + IOReportUnit unit) { - bool success = false; - - // ::free() relies on these being initialized - _reporterLock = NULL; - _configLock = NULL; - _elements = NULL; - _enableCounts = NULL; - _channelNames = NULL; - - if (channelType.report_format == kIOReportInvalidFormat) { - IORLOG("init ERROR: Channel Type ill-defined"); - goto finish; - } - - _driver_id = reportingService->getRegistryEntryID(); - if (_driver_id == 0) { - IORLOG("init() ERROR: no registry ID"); - goto finish; - } - - if (!super::init()) return false; - - _channelDimension = channelType.nelements; - _channelType = channelType; - // FIXME: need to look up dynamically - if (unit == kIOReportUnitHWTicks) { + bool success = false; + + // ::free() relies on these being initialized + _reporterLock = NULL; + _configLock = NULL; + _elements = NULL; + _enableCounts = NULL; + _channelNames = NULL; + + if (channelType.report_format == kIOReportInvalidFormat) { + IORLOG("init ERROR: Channel Type ill-defined"); + goto finish; + } + + _driver_id = reportingService->getRegistryEntryID(); + if (_driver_id == 0) { + IORLOG("init() ERROR: no registry ID"); + goto finish; + } + + if (!super::init()) { + return false; + } + + _channelDimension = channelType.nelements; + _channelType = channelType; + // FIXME: need to look up dynamically + if (unit == kIOReportUnitHWTicks) { #if defined(__arm__) || defined(__arm64__) - unit = kIOReportUnit24MHzTicks; + unit = kIOReportUnit24MHzTicks; #elif defined(__i386__) || defined(__x86_64__) - // Most, but not all Macs use 1GHz - unit = kIOReportUnit1GHzTicks; + // Most, but not all Macs use 1GHz + unit = kIOReportUnit1GHzTicks; #else #error kIOReportUnitHWTicks not defined #endif - } - _unit = unit; - - // Allocate a reporter (data) lock - _reporterLock = IOSimpleLockAlloc(); - if (!_reporterLock) goto finish; - _reporterIsLocked = false; - - // Allocate a config lock - _configLock = IOLockAlloc(); - if (!_configLock) goto finish; - _reporterConfigIsLocked = false; - - // Allocate channel names array - _channelNames = OSArray::withCapacity(1); - if (!_channelNames) goto finish; - - // success - success = true; + } + _unit = unit; + + // Allocate a reporter (data) lock + _reporterLock = IOSimpleLockAlloc(); + if (!_reporterLock) { + goto finish; + } + _reporterIsLocked = false; + + // Allocate a config lock + _configLock = IOLockAlloc(); + if (!_configLock) { + goto finish; + } + _reporterConfigIsLocked = false; + + // Allocate channel names array + _channelNames = OSArray::withCapacity(1); + if (!_channelNames) { + goto finish; + } + + // success + success = true; finish: - return success; + return success; } @@ -230,150 +238,154 @@ finish: // to ensure that _ = NULL void IOReporter::free(void) -{ - OSSafeReleaseNULL(_channelNames); - - if (_configLock) IOLockFree(_configLock); - if (_reporterLock) IOSimpleLockFree(_reporterLock); - - if (_elements) { - PREFL_MEMOP_PANIC(_nElements, IOReportElement); - IOFree(_elements, (size_t)_nElements * sizeof(IOReportElement)); - } - if (_enableCounts) { - PREFL_MEMOP_PANIC(_nChannels, int); - IOFree(_enableCounts, (size_t)_nChannels * sizeof(int)); - } - - super::free(); +{ + OSSafeReleaseNULL(_channelNames); + + if (_configLock) { + IOLockFree(_configLock); + } + if (_reporterLock) { + IOSimpleLockFree(_reporterLock); + } + + if (_elements) { + PREFL_MEMOP_PANIC(_nElements, IOReportElement); + IOFree(_elements, (size_t)_nElements * sizeof(IOReportElement)); + } + if (_enableCounts) { + PREFL_MEMOP_PANIC(_nChannels, int); + IOFree(_enableCounts, (size_t)_nChannels * sizeof(int)); + } + + super::free(); } /* -#define TESTALLOC() do { \ - void *tbuf; \ - tbuf = IOMalloc(10); \ - IOFree(tbuf, 10); \ - IORLOG("%s:%d - _reporterIsLocked = %d & allocation successful", \ - __PRETTY_FUNCTION__, __LINE__, _reporterIsLocked); \ -} while (0); -*/ + #define TESTALLOC() do { \ + * void *tbuf; \ + * tbuf = IOMalloc(10); \ + * IOFree(tbuf, 10); \ + * IORLOG("%s:%d - _reporterIsLocked = %d & allocation successful", \ + * __PRETTY_FUNCTION__, __LINE__, _reporterIsLocked); \ + * } while (0); + */ IOReturn IOReporter::addChannel(uint64_t channelID, - const char *channelName /* = NULL */) + const char *channelName /* = NULL */) { - IOReturn res = kIOReturnError, kerr; - const OSSymbol *symChannelName = NULL; - int oldNChannels, newNChannels = 0, freeNChannels = 0; - - IORLOG("IOReporter::addChannel %llx", channelID); - - // protect instance variables (but not contents) - lockReporterConfig(); - - // FIXME: Check if any channel is already present and return error - - // addChannel() always adds one channel - oldNChannels = _nChannels; - if (oldNChannels < 0 || oldNChannels > INT_MAX - 1) { - res = kIOReturnOverrun; - goto finish; - } - newNChannels = oldNChannels + 1; - freeNChannels = newNChannels; // until swap success - - // Expand addChannel()-specific data structure - if (_channelNames->ensureCapacity((unsigned)newNChannels) < - (unsigned)newNChannels) { - res = kIOReturnNoMemory; goto finish; - } - if (channelName) { - symChannelName = OSSymbol::withCString(channelName); - if (!symChannelName) { - res = kIOReturnNoMemory; goto finish; - } - } else { - // grab a reference to our shared global - symChannelName = gIOReportNoChannelName; - symChannelName->retain(); - } - - // allocate new buffers into _swap* variables - if ((kerr = handleSwapPrepare(newNChannels))) { - // on error, channels are *not* swapped - res = kerr; goto finish; - } - - // exchange main and _swap* buffers with buffer contents protected - // IOReporter::handleAddChannelSwap() also increments _nElements, etc - lockReporter(); - res = handleAddChannelSwap(channelID, symChannelName); - unlockReporter(); - // On failure, handleAddChannelSwap() leaves *new* buffers in _swap*. - // On success, it's the old buffers, so we put the right size in here. - if (res == kIOReturnSuccess) { - freeNChannels = oldNChannels; - } + IOReturn res = kIOReturnError, kerr; + const OSSymbol *symChannelName = NULL; + int oldNChannels, newNChannels = 0, freeNChannels = 0; + + IORLOG("IOReporter::addChannel %llx", channelID); + + // protect instance variables (but not contents) + lockReporterConfig(); + + // FIXME: Check if any channel is already present and return error + + // addChannel() always adds one channel + oldNChannels = _nChannels; + if (oldNChannels < 0 || oldNChannels > INT_MAX - 1) { + res = kIOReturnOverrun; + goto finish; + } + newNChannels = oldNChannels + 1; + freeNChannels = newNChannels; // until swap success + + // Expand addChannel()-specific data structure + if (_channelNames->ensureCapacity((unsigned)newNChannels) < + (unsigned)newNChannels) { + res = kIOReturnNoMemory; goto finish; + } + if (channelName) { + symChannelName = OSSymbol::withCString(channelName); + if (!symChannelName) { + res = kIOReturnNoMemory; goto finish; + } + } else { + // grab a reference to our shared global + symChannelName = gIOReportNoChannelName; + symChannelName->retain(); + } + + // allocate new buffers into _swap* variables + if ((kerr = handleSwapPrepare(newNChannels))) { + // on error, channels are *not* swapped + res = kerr; goto finish; + } + + // exchange main and _swap* buffers with buffer contents protected + // IOReporter::handleAddChannelSwap() also increments _nElements, etc + lockReporter(); + res = handleAddChannelSwap(channelID, symChannelName); + unlockReporter(); + // On failure, handleAddChannelSwap() leaves *new* buffers in _swap*. + // On success, it's the old buffers, so we put the right size in here. + if (res == kIOReturnSuccess) { + freeNChannels = oldNChannels; + } finish: - // free up not-in-use buffers (tracked by _swap*) - handleSwapCleanup(freeNChannels); - if (symChannelName) symChannelName->release(); - unlockReporterConfig(); - - return res; + // free up not-in-use buffers (tracked by _swap*) + handleSwapCleanup(freeNChannels); + if (symChannelName) { + symChannelName->release(); + } + unlockReporterConfig(); + + return res; } IOReportLegendEntry* IOReporter::createLegend(void) { - IOReportLegendEntry *legendEntry = NULL; - - lockReporterConfig(); - - legendEntry = handleCreateLegend(); - - unlockReporterConfig(); - - return legendEntry; + IOReportLegendEntry *legendEntry = NULL; + + lockReporterConfig(); + + legendEntry = handleCreateLegend(); + + unlockReporterConfig(); + + return legendEntry; } IOReturn IOReporter::configureReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) + IOReportConfigureAction action, + void *result, + void *destination) { - IOReturn res = kIOReturnError; - - lockReporterConfig(); - - res = handleConfigureReport(channelList, action, result, destination); - - unlockReporterConfig(); - - return res; - + IOReturn res = kIOReturnError; + + lockReporterConfig(); + + res = handleConfigureReport(channelList, action, result, destination); + + unlockReporterConfig(); + + return res; } IOReturn IOReporter::updateReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) + IOReportConfigureAction action, + void *result, + void *destination) { - IOReturn res = kIOReturnError; - - lockReporter(); - - res = handleUpdateReport(channelList, action, result, destination); - - unlockReporter(); - - return res; - + IOReturn res = kIOReturnError; + + lockReporter(); + + res = handleUpdateReport(channelList, action, result, destination); + + unlockReporter(); + + return res; } @@ -385,210 +397,209 @@ IOReporter::updateReport(IOReportChannelList *channelList, void IOReporter::lockReporter() { - _interruptState = IOSimpleLockLockDisableInterrupt(_reporterLock); - _reporterIsLocked = true; + _interruptState = IOSimpleLockLockDisableInterrupt(_reporterLock); + _reporterIsLocked = true; } void IOReporter::unlockReporter() { - _reporterIsLocked = false; - IOSimpleLockUnlockEnableInterrupt(_reporterLock, _interruptState); + _reporterIsLocked = false; + IOSimpleLockUnlockEnableInterrupt(_reporterLock, _interruptState); } void IOReporter::lockReporterConfig() { - IOLockLock(_configLock); - _reporterConfigIsLocked = true; + IOLockLock(_configLock); + _reporterConfigIsLocked = true; } void IOReporter::unlockReporterConfig() { - _reporterConfigIsLocked = false; - IOLockUnlock(_configLock); + _reporterConfigIsLocked = false; + IOLockUnlock(_configLock); } IOReturn IOReporter::handleSwapPrepare(int newNChannels) { - IOReturn res = kIOReturnError; - int newNElements; - size_t newElementsSize, newECSize; - - // analyzer appeasement - newElementsSize = newECSize = 0; - - //IORLOG("IOReporter::handleSwapPrepare"); - - IOREPORTER_CHECK_CONFIG_LOCK(); - - if (newNChannels < _nChannels) { - panic("%s doesn't support shrinking", __func__); - } - if (newNChannels <= 0 || _channelDimension <= 0) { - res = kIOReturnUnderrun; - goto finish; - } - if (_swapElements || _swapEnableCounts) { - panic("IOReporter::_swap* already in use"); - } - - // calculate the number of elements given #ch & the dimension of each - if (newNChannels < 0 || newNChannels > INT_MAX / _channelDimension) { - res = kIOReturnOverrun; - goto finish; - } - newNElements = newNChannels * _channelDimension; - - // Allocate memory for the new array of report elements - PREFL_MEMOP_FAIL(newNElements, IOReportElement); - newElementsSize = (size_t)newNElements * sizeof(IOReportElement); - _swapElements = (IOReportElement *)IOMalloc(newElementsSize); - if (_swapElements == NULL) { - res = kIOReturnNoMemory; goto finish; - } - memset(_swapElements, 0, newElementsSize); - - // Allocate memory for the new array of channel watch counts - PREFL_MEMOP_FAIL(newNChannels, int); - newECSize = (size_t)newNChannels * sizeof(int); - _swapEnableCounts = (int *)IOMalloc(newECSize); - if (_swapEnableCounts == NULL){ - res = kIOReturnNoMemory; goto finish; - } - memset(_swapEnableCounts, 0, newECSize); - - // success - res = kIOReturnSuccess; + IOReturn res = kIOReturnError; + int newNElements; + size_t newElementsSize, newECSize; + + // analyzer appeasement + newElementsSize = newECSize = 0; + + //IORLOG("IOReporter::handleSwapPrepare"); + + IOREPORTER_CHECK_CONFIG_LOCK(); + + if (newNChannels < _nChannels) { + panic("%s doesn't support shrinking", __func__); + } + if (newNChannels <= 0 || _channelDimension <= 0) { + res = kIOReturnUnderrun; + goto finish; + } + if (_swapElements || _swapEnableCounts) { + panic("IOReporter::_swap* already in use"); + } + + // calculate the number of elements given #ch & the dimension of each + if (newNChannels < 0 || newNChannels > INT_MAX / _channelDimension) { + res = kIOReturnOverrun; + goto finish; + } + newNElements = newNChannels * _channelDimension; + + // Allocate memory for the new array of report elements + PREFL_MEMOP_FAIL(newNElements, IOReportElement); + newElementsSize = (size_t)newNElements * sizeof(IOReportElement); + _swapElements = (IOReportElement *)IOMalloc(newElementsSize); + if (_swapElements == NULL) { + res = kIOReturnNoMemory; goto finish; + } + memset(_swapElements, 0, newElementsSize); + + // Allocate memory for the new array of channel watch counts + PREFL_MEMOP_FAIL(newNChannels, int); + newECSize = (size_t)newNChannels * sizeof(int); + _swapEnableCounts = (int *)IOMalloc(newECSize); + if (_swapEnableCounts == NULL) { + res = kIOReturnNoMemory; goto finish; + } + memset(_swapEnableCounts, 0, newECSize); + + // success + res = kIOReturnSuccess; finish: - if (res) { - if (_swapElements) { - IOFree(_swapElements, newElementsSize); - _swapElements = NULL; - } - if (_swapEnableCounts) { - IOFree(_swapEnableCounts, newECSize); - _swapEnableCounts = NULL; - } - } - - return res; + if (res) { + if (_swapElements) { + IOFree(_swapElements, newElementsSize); + _swapElements = NULL; + } + if (_swapEnableCounts) { + IOFree(_swapEnableCounts, newECSize); + _swapEnableCounts = NULL; + } + } + + return res; } IOReturn IOReporter::handleAddChannelSwap(uint64_t channel_id, - const OSSymbol *symChannelName) + const OSSymbol *symChannelName) { - IOReturn res = kIOReturnError; - int cnt; - int *tmpWatchCounts = NULL; - IOReportElement *tmpElements = NULL; - bool swapComplete = false; - - //IORLOG("IOReporter::handleSwap"); - - IOREPORTER_CHECK_CONFIG_LOCK(); - IOREPORTER_CHECK_LOCK(); - - if (!_swapElements || !_swapEnableCounts) { - IORLOG("IOReporter::handleSwap ERROR swap variables uninitialized!"); - goto finish; - } - - // Copy any existing elements to the new location - //IORLOG("handleSwap (base) -> copying %u elements over...", _nChannels); - if (_elements) { - PREFL_MEMOP_PANIC(_nElements, IOReportElement); - memcpy(_swapElements, _elements, - (size_t)_nElements * sizeof(IOReportElement)); - - PREFL_MEMOP_PANIC(_nElements, int); - memcpy(_swapEnableCounts, _enableCounts, - (size_t)_nChannels * sizeof(int)); - } - - // Update principal instance variables, keep old buffers for cleanup - tmpElements = _elements; - _elements = _swapElements; - _swapElements = tmpElements; - - tmpWatchCounts = _enableCounts; - _enableCounts = _swapEnableCounts; - _swapEnableCounts = tmpWatchCounts; - - swapComplete = true; - - // but _nChannels & _nElements is still the old (one smaller) size - - // Initialize new element metadata (existing elements copied above) - for (cnt = 0; cnt < _channelDimension; cnt++) { - - _elements[_nElements + cnt].channel_id = channel_id; - _elements[_nElements + cnt].provider_id = _driver_id; - _elements[_nElements + cnt].channel_type = _channelType; - _elements[_nElements + cnt].channel_type.element_idx = cnt; - - //IOREPORTER_DEBUG_ELEMENT(_swapNElements + cnt); - } - - // Store a channel name at the end - if (!_channelNames->setObject((unsigned)_nChannels, symChannelName)) { - // Should never happen because we ensured capacity in addChannel() - res = kIOReturnNoMemory; - goto finish; - } - - // And update the metadata: addChannel() always adds just one channel - _nChannels += 1; - _nElements += _channelDimension; - - // success - res = kIOReturnSuccess; + IOReturn res = kIOReturnError; + int cnt; + int *tmpWatchCounts = NULL; + IOReportElement *tmpElements = NULL; + bool swapComplete = false; + + //IORLOG("IOReporter::handleSwap"); + + IOREPORTER_CHECK_CONFIG_LOCK(); + IOREPORTER_CHECK_LOCK(); + + if (!_swapElements || !_swapEnableCounts) { + IORLOG("IOReporter::handleSwap ERROR swap variables uninitialized!"); + goto finish; + } + + // Copy any existing elements to the new location + //IORLOG("handleSwap (base) -> copying %u elements over...", _nChannels); + if (_elements) { + PREFL_MEMOP_PANIC(_nElements, IOReportElement); + memcpy(_swapElements, _elements, + (size_t)_nElements * sizeof(IOReportElement)); + + PREFL_MEMOP_PANIC(_nElements, int); + memcpy(_swapEnableCounts, _enableCounts, + (size_t)_nChannels * sizeof(int)); + } + + // Update principal instance variables, keep old buffers for cleanup + tmpElements = _elements; + _elements = _swapElements; + _swapElements = tmpElements; + + tmpWatchCounts = _enableCounts; + _enableCounts = _swapEnableCounts; + _swapEnableCounts = tmpWatchCounts; + + swapComplete = true; + + // but _nChannels & _nElements is still the old (one smaller) size + + // Initialize new element metadata (existing elements copied above) + for (cnt = 0; cnt < _channelDimension; cnt++) { + _elements[_nElements + cnt].channel_id = channel_id; + _elements[_nElements + cnt].provider_id = _driver_id; + _elements[_nElements + cnt].channel_type = _channelType; + _elements[_nElements + cnt].channel_type.element_idx = cnt; + + //IOREPORTER_DEBUG_ELEMENT(_swapNElements + cnt); + } + + // Store a channel name at the end + if (!_channelNames->setObject((unsigned)_nChannels, symChannelName)) { + // Should never happen because we ensured capacity in addChannel() + res = kIOReturnNoMemory; + goto finish; + } + + // And update the metadata: addChannel() always adds just one channel + _nChannels += 1; + _nElements += _channelDimension; + + // success + res = kIOReturnSuccess; finish: - if (res && swapComplete) { - // unswap so new buffers get cleaned up instead of old - tmpElements = _elements; - _elements = _swapElements; - _swapElements = tmpElements; - - tmpWatchCounts = _enableCounts; - _enableCounts = _swapEnableCounts; - _swapEnableCounts = tmpWatchCounts; - } - return res; + if (res && swapComplete) { + // unswap so new buffers get cleaned up instead of old + tmpElements = _elements; + _elements = _swapElements; + _swapElements = tmpElements; + + tmpWatchCounts = _enableCounts; + _enableCounts = _swapEnableCounts; + _swapEnableCounts = tmpWatchCounts; + } + return res; } void IOReporter::handleSwapCleanup(int swapNChannels) { - int swapNElements; - - if (!_channelDimension || swapNChannels > INT_MAX / _channelDimension) { - panic("%s - can't free %d channels of dimension %d", __func__, - swapNChannels, _channelDimension); - } - swapNElements = swapNChannels * _channelDimension; - - IOREPORTER_CHECK_CONFIG_LOCK(); - - // release buffers no longer used after swapping - if (_swapElements) { - PREFL_MEMOP_PANIC(swapNElements, IOReportElement); - IOFree(_swapElements, (size_t)swapNElements * sizeof(IOReportElement)); - _swapElements = NULL; - } - if (_swapEnableCounts) { - PREFL_MEMOP_PANIC(swapNChannels, int); - IOFree(_swapEnableCounts, (size_t)swapNChannels * sizeof(int)); - _swapEnableCounts = NULL; - } + int swapNElements; + + if (!_channelDimension || swapNChannels > INT_MAX / _channelDimension) { + panic("%s - can't free %d channels of dimension %d", __func__, + swapNChannels, _channelDimension); + } + swapNElements = swapNChannels * _channelDimension; + + IOREPORTER_CHECK_CONFIG_LOCK(); + + // release buffers no longer used after swapping + if (_swapElements) { + PREFL_MEMOP_PANIC(swapNElements, IOReportElement); + IOFree(_swapElements, (size_t)swapNElements * sizeof(IOReportElement)); + _swapElements = NULL; + } + if (_swapEnableCounts) { + PREFL_MEMOP_PANIC(swapNChannels, int); + IOFree(_swapEnableCounts, (size_t)swapNChannels * sizeof(int)); + _swapEnableCounts = NULL; + } } @@ -597,353 +608,360 @@ IOReporter::handleSwapCleanup(int swapNChannels) // clients can use to cull unused reporters after configureReport(disable). IOReturn IOReporter::handleConfigureReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) + IOReportConfigureAction action, + void *result, + void *destination) { - IOReturn res = kIOReturnError; - int channel_index = 0; - uint32_t chIdx; - int *nElements, *nChannels; - - // Check on channelList and result because used below - if (!channelList || !result) goto finish; - - //IORLOG("IOReporter::configureReport action %u for %u channels", - // action, channelList->nchannels); - - // Make sure channel is present, increase matching watch count, 'result' - for (chIdx = 0; chIdx < channelList->nchannels; chIdx++) { - - if (getChannelIndex(channelList->channels[chIdx].channel_id, - &channel_index) == kIOReturnSuccess) { - // IORLOG("reporter %p recognizes channel %lld", this, channelList->channels[chIdx].channel_id); - - switch (action) { - - case kIOReportEnable: - nChannels = (int*)result; - _enabled++; - _enableCounts[channel_index]++; - (*nChannels)++; - break; - - case kIOReportDisable: - nChannels = (int*)result; - _enabled--; - _enableCounts[channel_index]--; - (*nChannels)++; - break; - - case kIOReportGetDimensions: - nElements = (int *)result; - *nElements += _channelDimension; - break; - - default: - IORLOG("ERROR configureReport unknown action!"); - break; - } - } - } - - // success - res = kIOReturnSuccess; - + IOReturn res = kIOReturnError; + int channel_index = 0; + uint32_t chIdx; + int *nElements, *nChannels; + + // Check on channelList and result because used below + if (!channelList || !result) { + goto finish; + } + + //IORLOG("IOReporter::configureReport action %u for %u channels", + // action, channelList->nchannels); + + // Make sure channel is present, increase matching watch count, 'result' + for (chIdx = 0; chIdx < channelList->nchannels; chIdx++) { + if (getChannelIndex(channelList->channels[chIdx].channel_id, + &channel_index) == kIOReturnSuccess) { + // IORLOG("reporter %p recognizes channel %lld", this, channelList->channels[chIdx].channel_id); + + switch (action) { + case kIOReportEnable: + nChannels = (int*)result; + _enabled++; + _enableCounts[channel_index]++; + (*nChannels)++; + break; + + case kIOReportDisable: + nChannels = (int*)result; + _enabled--; + _enableCounts[channel_index]--; + (*nChannels)++; + break; + + case kIOReportGetDimensions: + nElements = (int *)result; + *nElements += _channelDimension; + break; + + default: + IORLOG("ERROR configureReport unknown action!"); + break; + } + } + } + + // success + res = kIOReturnSuccess; + finish: - return res; + return res; } IOReturn IOReporter::handleUpdateReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) + IOReportConfigureAction action, + void *result, + void *destination) { - IOReturn res = kIOReturnError; - int *nElements = (int *)result; - int channel_index = 0; - uint32_t chIdx; - IOBufferMemoryDescriptor *dest; - - if (!channelList || !result || !destination) goto finish; - - dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); - if (dest == NULL) { - // Invalid destination - res = kIOReturnBadArgument; - goto finish; - } - - if (!_enabled) { - goto finish; - } - - for (chIdx = 0; chIdx < channelList->nchannels; chIdx++) { - - if (getChannelIndex(channelList->channels[chIdx].channel_id, - &channel_index) == kIOReturnSuccess) { - - //IORLOG("%s - found channel_id %llx @ index %d", __func__, - // channelList->channels[chIdx].channel_id, - // channel_index); - - switch(action) { - - case kIOReportCopyChannelData: - res = updateChannelValues(channel_index); - if (res) { - IORLOG("ERROR: updateChannelValues() failed: %x", res); - goto finish; - } - - res = updateReportChannel(channel_index, nElements, dest); - if (res) { - IORLOG("ERROR: updateReportChannel() failed: %x", res); - goto finish; - } - break; - - default: - IORLOG("ERROR updateReport unknown action!"); - res = kIOReturnError; - goto finish; - } - } - } - - // success - res = kIOReturnSuccess; - + IOReturn res = kIOReturnError; + int *nElements = (int *)result; + int channel_index = 0; + uint32_t chIdx; + IOBufferMemoryDescriptor *dest; + + if (!channelList || !result || !destination) { + goto finish; + } + + dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); + if (dest == NULL) { + // Invalid destination + res = kIOReturnBadArgument; + goto finish; + } + + if (!_enabled) { + goto finish; + } + + for (chIdx = 0; chIdx < channelList->nchannels; chIdx++) { + if (getChannelIndex(channelList->channels[chIdx].channel_id, + &channel_index) == kIOReturnSuccess) { + //IORLOG("%s - found channel_id %llx @ index %d", __func__, + // channelList->channels[chIdx].channel_id, + // channel_index); + + switch (action) { + case kIOReportCopyChannelData: + res = updateChannelValues(channel_index); + if (res) { + IORLOG("ERROR: updateChannelValues() failed: %x", res); + goto finish; + } + + res = updateReportChannel(channel_index, nElements, dest); + if (res) { + IORLOG("ERROR: updateReportChannel() failed: %x", res); + goto finish; + } + break; + + default: + IORLOG("ERROR updateReport unknown action!"); + res = kIOReturnError; + goto finish; + } + } + } + + // success + res = kIOReturnSuccess; + finish: - return res; + return res; } IOReportLegendEntry* IOReporter::handleCreateLegend(void) { - IOReportLegendEntry *legendEntry = NULL; - OSArray *channelIDs; - - channelIDs = copyChannelIDs(); - - if (channelIDs) { - legendEntry = IOReporter::legendWith(channelIDs, _channelNames, _channelType, _unit); - channelIDs->release(); - } - - return legendEntry; + IOReportLegendEntry *legendEntry = NULL; + OSArray *channelIDs; + + channelIDs = copyChannelIDs(); + + if (channelIDs) { + legendEntry = IOReporter::legendWith(channelIDs, _channelNames, _channelType, _unit); + channelIDs->release(); + } + + return legendEntry; } IOReturn IOReporter::setElementValues(int element_index, - IOReportElementValues *values, - uint64_t record_time /* = 0 */) + IOReportElementValues *values, + uint64_t record_time /* = 0 */) { - IOReturn res = kIOReturnError; - - IOREPORTER_CHECK_LOCK(); - - if (record_time == 0) { - record_time = mach_absolute_time(); - } - - if (element_index >= _nElements || values == NULL) { - res = kIOReturnBadArgument; - goto finish; - } - - memcpy(&_elements[element_index].values, values, sizeof(IOReportElementValues)); - - _elements[element_index].timestamp = record_time; - - //IOREPORTER_DEBUG_ELEMENT(index); - - res = kIOReturnSuccess; - + IOReturn res = kIOReturnError; + + IOREPORTER_CHECK_LOCK(); + + if (record_time == 0) { + record_time = mach_absolute_time(); + } + + if (element_index >= _nElements || values == NULL) { + res = kIOReturnBadArgument; + goto finish; + } + + memcpy(&_elements[element_index].values, values, sizeof(IOReportElementValues)); + + _elements[element_index].timestamp = record_time; + + //IOREPORTER_DEBUG_ELEMENT(index); + + res = kIOReturnSuccess; + finish: - return res; + return res; } const IOReportElementValues* IOReporter::getElementValues(int element_index) { - IOReportElementValues *elementValues = NULL; - - IOREPORTER_CHECK_LOCK(); - - if (element_index < 0 || element_index >= _nElements) { - IORLOG("ERROR getElementValues out of bounds!"); - goto finish; - } - - elementValues = &_elements[element_index].values; - + IOReportElementValues *elementValues = NULL; + + IOREPORTER_CHECK_LOCK(); + + if (element_index < 0 || element_index >= _nElements) { + IORLOG("ERROR getElementValues out of bounds!"); + goto finish; + } + + elementValues = &_elements[element_index].values; + finish: - return elementValues; + return elementValues; } IOReturn IOReporter::updateChannelValues(int channel_index) { - return kIOReturnSuccess; + return kIOReturnSuccess; } IOReturn IOReporter::updateReportChannel(int channel_index, - int *nElements, - IOBufferMemoryDescriptor *destination) + int *nElements, + IOBufferMemoryDescriptor *destination) { - IOReturn res = kIOReturnError; - int start_element_idx, chElems; - size_t size2cpy; - - res = kIOReturnBadArgument; - if (!nElements || !destination) { - goto finish; - } - if (channel_index > _nChannels) { - goto finish; - } - - IOREPORTER_CHECK_LOCK(); - - res = kIOReturnOverrun; - - start_element_idx = channel_index * _channelDimension; - if (start_element_idx >= _nElements) goto finish; - - chElems = _elements[start_element_idx].channel_type.nelements; - - // make sure we don't go beyond the end of _elements[_nElements-1] - if (start_element_idx + chElems > _nElements) { - goto finish; - } - - PREFL_MEMOP_FAIL(chElems, IOReportElement); - size2cpy = (size_t)chElems * sizeof(IOReportElement); - - // make sure there's space in the destination - if (size2cpy > (destination->getCapacity() - destination->getLength())) { - IORLOG("CRITICAL ERROR: Report Buffer Overflow (buffer cap %luB, length %luB, size2cpy %luB", - (unsigned long)destination->getCapacity(), - (unsigned long)destination->getLength(), - (unsigned long)size2cpy); - goto finish; - } - - destination->appendBytes(&_elements[start_element_idx], size2cpy); - *nElements += chElems; - - res = kIOReturnSuccess; - + IOReturn res = kIOReturnError; + int start_element_idx, chElems; + size_t size2cpy; + + res = kIOReturnBadArgument; + if (!nElements || !destination) { + goto finish; + } + if (channel_index > _nChannels) { + goto finish; + } + + IOREPORTER_CHECK_LOCK(); + + res = kIOReturnOverrun; + + start_element_idx = channel_index * _channelDimension; + if (start_element_idx >= _nElements) { + goto finish; + } + + chElems = _elements[start_element_idx].channel_type.nelements; + + // make sure we don't go beyond the end of _elements[_nElements-1] + if (start_element_idx + chElems > _nElements) { + goto finish; + } + + PREFL_MEMOP_FAIL(chElems, IOReportElement); + size2cpy = (size_t)chElems * sizeof(IOReportElement); + + // make sure there's space in the destination + if (size2cpy > (destination->getCapacity() - destination->getLength())) { + IORLOG("CRITICAL ERROR: Report Buffer Overflow (buffer cap %luB, length %luB, size2cpy %luB", + (unsigned long)destination->getCapacity(), + (unsigned long)destination->getLength(), + (unsigned long)size2cpy); + goto finish; + } + + destination->appendBytes(&_elements[start_element_idx], size2cpy); + *nElements += chElems; + + res = kIOReturnSuccess; + finish: - return res; + return res; } IOReturn IOReporter::copyElementValues(int element_index, - IOReportElementValues *elementValues) + IOReportElementValues *elementValues) { - IOReturn res = kIOReturnError; - - if (!elementValues) goto finish; - - IOREPORTER_CHECK_LOCK(); - - if (element_index >= _nElements) { - IORLOG("ERROR getElementValues out of bounds!"); - res = kIOReturnBadArgument; - goto finish; - } - - memcpy(elementValues, &_elements[element_index].values, sizeof(IOReportElementValues)); - res = kIOReturnSuccess; + IOReturn res = kIOReturnError; + + if (!elementValues) { + goto finish; + } + + IOREPORTER_CHECK_LOCK(); + + if (element_index >= _nElements) { + IORLOG("ERROR getElementValues out of bounds!"); + res = kIOReturnBadArgument; + goto finish; + } + + memcpy(elementValues, &_elements[element_index].values, sizeof(IOReportElementValues)); + res = kIOReturnSuccess; finish: - return res; + return res; } IOReturn IOReporter::getFirstElementIndex(uint64_t channel_id, - int *index) + int *index) { - IOReturn res = kIOReturnError; - int channel_index = 0, element_index = 0; - - if (!index) goto finish; - - res = getChannelIndices(channel_id, &channel_index, &element_index); - - if (res == kIOReturnSuccess) { - *index = element_index; - } + IOReturn res = kIOReturnError; + int channel_index = 0, element_index = 0; + + if (!index) { + goto finish; + } + + res = getChannelIndices(channel_id, &channel_index, &element_index); + + if (res == kIOReturnSuccess) { + *index = element_index; + } finish: - return res; + return res; } IOReturn IOReporter::getChannelIndex(uint64_t channel_id, - int *index) + int *index) { - IOReturn res = kIOReturnError; - int channel_index = 0, element_index = 0; - - if (!index) goto finish; - - res = getChannelIndices(channel_id, &channel_index, &element_index); - - if (res == kIOReturnSuccess) { - *index = channel_index; - } + IOReturn res = kIOReturnError; + int channel_index = 0, element_index = 0; + + if (!index) { + goto finish; + } + + res = getChannelIndices(channel_id, &channel_index, &element_index); + + if (res == kIOReturnSuccess) { + *index = channel_index; + } finish: - return res; + return res; } IOReturn IOReporter::getChannelIndices(uint64_t channel_id, - int *channel_index, - int *element_index) + int *channel_index, + int *element_index) { - IOReturn res = kIOReturnNotFound; - int chIdx, elemIdx; - - if (!channel_index || !element_index) goto finish; - - for (chIdx = 0; chIdx < _nChannels; chIdx++) { - - elemIdx = chIdx * _channelDimension; - if (elemIdx >= _nElements) { - IORLOG("ERROR getChannelIndices out of bounds!"); - res = kIOReturnOverrun; - goto finish; - } - - if (channel_id == _elements[elemIdx].channel_id) { - - // The channel index does not care about the depth of elements... - *channel_index = chIdx; - *element_index = elemIdx; - - res = kIOReturnSuccess; - goto finish; - } - } + IOReturn res = kIOReturnNotFound; + int chIdx, elemIdx; + + if (!channel_index || !element_index) { + goto finish; + } + + for (chIdx = 0; chIdx < _nChannels; chIdx++) { + elemIdx = chIdx * _channelDimension; + if (elemIdx >= _nElements) { + IORLOG("ERROR getChannelIndices out of bounds!"); + res = kIOReturnOverrun; + goto finish; + } + + if (channel_id == _elements[elemIdx].channel_id) { + // The channel index does not care about the depth of elements... + *channel_index = chIdx; + *element_index = elemIdx; + + res = kIOReturnSuccess; + goto finish; + } + } finish: - return res; + return res; } /********************************/ @@ -955,114 +973,121 @@ finish: OSArray* IOReporter::copyChannelIDs() { - int cnt, cnt2; - OSArray *channelIDs = NULL; - OSNumber *tmpNum; - - channelIDs = OSArray::withCapacity((unsigned)_nChannels); - - if (!channelIDs) goto finish; - - for (cnt = 0; cnt < _nChannels; cnt++) { - - cnt2 = cnt * _channelDimension; - - // Encapsulate the Channel ID in OSNumber - tmpNum = OSNumber::withNumber(_elements[cnt2].channel_id, 64); - if (!tmpNum) { - IORLOG("ERROR: Could not create array of channelIDs"); - channelIDs->release(); - channelIDs = NULL; - goto finish; - } - - channelIDs->setObject((unsigned)cnt, tmpNum); - tmpNum->release(); - } + int cnt, cnt2; + OSArray *channelIDs = NULL; + OSNumber *tmpNum; + + channelIDs = OSArray::withCapacity((unsigned)_nChannels); + + if (!channelIDs) { + goto finish; + } + + for (cnt = 0; cnt < _nChannels; cnt++) { + cnt2 = cnt * _channelDimension; + + // Encapsulate the Channel ID in OSNumber + tmpNum = OSNumber::withNumber(_elements[cnt2].channel_id, 64); + if (!tmpNum) { + IORLOG("ERROR: Could not create array of channelIDs"); + channelIDs->release(); + channelIDs = NULL; + goto finish; + } + + channelIDs->setObject((unsigned)cnt, tmpNum); + tmpNum->release(); + } finish: - return channelIDs; + return channelIDs; } // DO NOT REMOVE THIS METHOD WHICH IS THE MAIN LEGEND CREATION FUNCTION /*static */ IOReportLegendEntry* IOReporter::legendWith(OSArray *channelIDs, - OSArray *channelNames, - IOReportChannelType channelType, - IOReportUnit unit) + OSArray *channelNames, + IOReportChannelType channelType, + IOReportUnit unit) { - unsigned int cnt, chCnt; - uint64_t type64; - OSNumber *tmpNum; - const OSSymbol *tmpSymbol; - OSArray *channelLegendArray = NULL, *tmpChannelArray = NULL; - OSDictionary *channelInfoDict = NULL; - IOReportLegendEntry *legendEntry = NULL; - - // No need to check validity of channelNames because param is optional - if (!channelIDs) goto finish; - chCnt = channelIDs->getCount(); - - channelLegendArray = OSArray::withCapacity(chCnt); - - for (cnt = 0; cnt < chCnt; cnt++) { - - tmpChannelArray = OSArray::withCapacity(3); - - // Encapsulate the Channel ID in OSNumber - tmpChannelArray->setObject(kIOReportChannelIDIdx, channelIDs->getObject(cnt)); - - // Encapsulate the Channel Type in OSNumber - memcpy(&type64, &channelType, sizeof(type64)); - tmpNum = OSNumber::withNumber(type64, 64); - if (!tmpNum) { - goto finish; - } - tmpChannelArray->setObject(kIOReportChannelTypeIdx, tmpNum); - tmpNum->release(); - - // Encapsulate the Channel Name in OSSymbol - // Use channelNames if provided - if (channelNames != NULL) { - tmpSymbol = OSDynamicCast(OSSymbol, channelNames->getObject(cnt)); - if (tmpSymbol && tmpSymbol != gIOReportNoChannelName) { - tmpChannelArray->setObject(kIOReportChannelNameIdx, tmpSymbol); - } // Else, skip and leave name field empty - } - - channelLegendArray->setObject(cnt, tmpChannelArray); - tmpChannelArray->release(); - tmpChannelArray = NULL; - } - - // Stuff the legend entry only if we have channels... - if (channelLegendArray->getCount() != 0) { - - channelInfoDict = OSDictionary::withCapacity(1); - - if (!channelInfoDict) { - goto finish; - } - - tmpNum = OSNumber::withNumber(unit, 64); - if (tmpNum) { - channelInfoDict->setObject(kIOReportLegendUnitKey, tmpNum); - tmpNum->release(); - } - - legendEntry = OSDictionary::withCapacity(1); - - if (legendEntry) { - legendEntry->setObject(kIOReportLegendChannelsKey, channelLegendArray); - legendEntry->setObject(kIOReportLegendInfoKey, channelInfoDict); - } - } - + unsigned int cnt, chCnt; + uint64_t type64; + OSNumber *tmpNum; + const OSSymbol *tmpSymbol; + OSArray *channelLegendArray = NULL, *tmpChannelArray = NULL; + OSDictionary *channelInfoDict = NULL; + IOReportLegendEntry *legendEntry = NULL; + + // No need to check validity of channelNames because param is optional + if (!channelIDs) { + goto finish; + } + chCnt = channelIDs->getCount(); + + channelLegendArray = OSArray::withCapacity(chCnt); + + for (cnt = 0; cnt < chCnt; cnt++) { + tmpChannelArray = OSArray::withCapacity(3); + + // Encapsulate the Channel ID in OSNumber + tmpChannelArray->setObject(kIOReportChannelIDIdx, channelIDs->getObject(cnt)); + + // Encapsulate the Channel Type in OSNumber + memcpy(&type64, &channelType, sizeof(type64)); + tmpNum = OSNumber::withNumber(type64, 64); + if (!tmpNum) { + goto finish; + } + tmpChannelArray->setObject(kIOReportChannelTypeIdx, tmpNum); + tmpNum->release(); + + // Encapsulate the Channel Name in OSSymbol + // Use channelNames if provided + if (channelNames != NULL) { + tmpSymbol = OSDynamicCast(OSSymbol, channelNames->getObject(cnt)); + if (tmpSymbol && tmpSymbol != gIOReportNoChannelName) { + tmpChannelArray->setObject(kIOReportChannelNameIdx, tmpSymbol); + } // Else, skip and leave name field empty + } + + channelLegendArray->setObject(cnt, tmpChannelArray); + tmpChannelArray->release(); + tmpChannelArray = NULL; + } + + // Stuff the legend entry only if we have channels... + if (channelLegendArray->getCount() != 0) { + channelInfoDict = OSDictionary::withCapacity(1); + + if (!channelInfoDict) { + goto finish; + } + + tmpNum = OSNumber::withNumber(unit, 64); + if (tmpNum) { + channelInfoDict->setObject(kIOReportLegendUnitKey, tmpNum); + tmpNum->release(); + } + + legendEntry = OSDictionary::withCapacity(1); + + if (legendEntry) { + legendEntry->setObject(kIOReportLegendChannelsKey, channelLegendArray); + legendEntry->setObject(kIOReportLegendInfoKey, channelInfoDict); + } + } + finish: - if (tmpChannelArray) tmpChannelArray->release(); - if (channelInfoDict) channelInfoDict->release(); - if (channelLegendArray) channelLegendArray->release(); - - return legendEntry; + if (tmpChannelArray) { + tmpChannelArray->release(); + } + if (channelInfoDict) { + channelInfoDict->release(); + } + if (channelLegendArray) { + channelLegendArray->release(); + } + + return legendEntry; } diff --git a/iokit/Kernel/IOReporterDefs.h b/iokit/Kernel/IOReporterDefs.h index ec94a3d24..1bac12706 100644 --- a/iokit/Kernel/IOReporterDefs.h +++ b/iokit/Kernel/IOReporterDefs.h @@ -1,8 +1,8 @@ /* * Copyright (c) 2012-2013 Apple Computer, Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,7 +33,7 @@ //#define IORDEBUG_IOLOG -#if defined(IORDEBUG_IOLOG) +#if defined(IORDEBUG_IOLOG) #define IORLOG(fmt, args...) \ do { \ IOLog((fmt), ##args); \ @@ -41,7 +41,7 @@ do { \ } while(0) #else -#define IORLOG(fmt, args...) +#define IORLOG(fmt, args...) #endif #define IORERROR_LOG @@ -59,21 +59,21 @@ do { \ #define PREFL_MEMOP_FAIL(__val, __type) do { \ if (__val <= 0) { \ - IORERROR("%s - %s <= 0!\n", __func__, #__val); \ - res = kIOReturnUnderrun; \ - goto finish; \ + IORERROR("%s - %s <= 0!\n", __func__, #__val); \ + res = kIOReturnUnderrun; \ + goto finish; \ } else if (__val > INT_MAX / (int)sizeof(__type)) { \ - IORERROR("%s - %s > INT_MAX / sizeof(%s)!\n",__func__,#__val,#__type);\ - res = kIOReturnOverrun; \ - goto finish; \ + IORERROR("%s - %s > INT_MAX / sizeof(%s)!\n",__func__,#__val,#__type);\ + res = kIOReturnOverrun; \ + goto finish; \ } \ } while(0) #define PREFL_MEMOP_PANIC(__val, __type) do { \ if (__val <= 0) { \ - panic("%s - %s <= 0!", __func__, #__val); \ + panic("%s - %s <= 0!", __func__, #__val); \ } else if (__val > INT_MAX / (int)sizeof(__type)) { \ - panic("%s - %s > INT_MAX / sizeof(%s)!", __func__, #__val, #__type); \ + panic("%s - %s > INT_MAX / sizeof(%s)!", __func__, #__val, #__type); \ } \ } while(0) @@ -99,16 +99,15 @@ _elements[idx].values.v[3]); \ #define IOREPORTER_CHECK_LOCK() \ do { \ if (!_reporterIsLocked) { \ - panic("%s was called out of locked context!", __PRETTY_FUNCTION__); \ + panic("%s was called out of locked context!", __PRETTY_FUNCTION__); \ } \ } while(0) \ #define IOREPORTER_CHECK_CONFIG_LOCK() \ do { \ if (!_reporterConfigIsLocked) { \ - panic("%s was called out of config locked context!", __PRETTY_FUNCTION__); \ + panic("%s was called out of config locked context!", __PRETTY_FUNCTION__); \ } \ } while(0) \ #endif /* ! _IOEPORTERDEFS_H */ - diff --git a/iokit/Kernel/IOService.cpp b/iokit/Kernel/IOService.cpp index cfb2d9c0f..5ef9f4325 100644 --- a/iokit/Kernel/IOService.cpp +++ b/iokit/Kernel/IOService.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #include #include @@ -64,11 +64,11 @@ #define LOG kprintf //#define LOG IOLog -#define MATCH_DEBUG 0 +#define MATCH_DEBUG 0 #define IOSERVICE_OBFUSCATE(x) ((void *)(VM_KERNEL_ADDRPERM(x))) // disabled since lockForArbitration() can be held externally -#define DEBUG_NOTIFIER_LOCKED 0 +#define DEBUG_NOTIFIER_LOCKED 0 #include "IOServicePrivate.h" #include "IOKitKernelInternal.h" @@ -98,125 +98,125 @@ OSDefineMetaClassAndAbstractStructors(IONotifier, OSObject) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -static IOPlatformExpert * gIOPlatform; -static class IOPMrootDomain * gIOPMRootDomain; -const IORegistryPlane * gIOServicePlane; -const IORegistryPlane * gIOPowerPlane; -const OSSymbol * gIODeviceMemoryKey; -const OSSymbol * gIOInterruptControllersKey; -const OSSymbol * gIOInterruptSpecifiersKey; - -const OSSymbol * gIOResourcesKey; -const OSSymbol * gIOResourceMatchKey; -const OSSymbol * gIOResourceMatchedKey; -const OSSymbol * gIOProviderClassKey; -const OSSymbol * gIONameMatchKey; -const OSSymbol * gIONameMatchedKey; -const OSSymbol * gIOPropertyMatchKey; -const OSSymbol * gIOPropertyExistsMatchKey; -const OSSymbol * gIOLocationMatchKey; -const OSSymbol * gIOParentMatchKey; -const OSSymbol * gIOPathMatchKey; -const OSSymbol * gIOMatchCategoryKey; -const OSSymbol * gIODefaultMatchCategoryKey; -const OSSymbol * gIOMatchedServiceCountKey; +static IOPlatformExpert * gIOPlatform; +static class IOPMrootDomain * gIOPMRootDomain; +const IORegistryPlane * gIOServicePlane; +const IORegistryPlane * gIOPowerPlane; +const OSSymbol * gIODeviceMemoryKey; +const OSSymbol * gIOInterruptControllersKey; +const OSSymbol * gIOInterruptSpecifiersKey; + +const OSSymbol * gIOResourcesKey; +const OSSymbol * gIOResourceMatchKey; +const OSSymbol * gIOResourceMatchedKey; +const OSSymbol * gIOProviderClassKey; +const OSSymbol * gIONameMatchKey; +const OSSymbol * gIONameMatchedKey; +const OSSymbol * gIOPropertyMatchKey; +const OSSymbol * gIOPropertyExistsMatchKey; +const OSSymbol * gIOLocationMatchKey; +const OSSymbol * gIOParentMatchKey; +const OSSymbol * gIOPathMatchKey; +const OSSymbol * gIOMatchCategoryKey; +const OSSymbol * gIODefaultMatchCategoryKey; +const OSSymbol * gIOMatchedServiceCountKey; #if !CONFIG_EMBEDDED -const OSSymbol * gIOServiceLegacyMatchingRegistryIDKey; +const OSSymbol * gIOServiceLegacyMatchingRegistryIDKey; #endif -const OSSymbol * gIOMapperIDKey; -const OSSymbol * gIOUserClientClassKey; -const OSSymbol * gIOKitDebugKey; - -const OSSymbol * gIOCommandPoolSizeKey; - -const OSSymbol * gIOConsoleLockedKey; -const OSSymbol * gIOConsoleUsersKey; -const OSSymbol * gIOConsoleSessionUIDKey; -const OSSymbol * gIOConsoleSessionAuditIDKey; -const OSSymbol * gIOConsoleUsersSeedKey; -const OSSymbol * gIOConsoleSessionOnConsoleKey; -const OSSymbol * gIOConsoleSessionLoginDoneKey; -const OSSymbol * gIOConsoleSessionSecureInputPIDKey; -const OSSymbol * gIOConsoleSessionScreenLockedTimeKey; -const OSSymbol * gIOConsoleSessionScreenIsLockedKey; -clock_sec_t gIOConsoleLockTime; -static bool gIOConsoleLoggedIn; +const OSSymbol * gIOMapperIDKey; +const OSSymbol * gIOUserClientClassKey; +const OSSymbol * gIOKitDebugKey; + +const OSSymbol * gIOCommandPoolSizeKey; + +const OSSymbol * gIOConsoleLockedKey; +const OSSymbol * gIOConsoleUsersKey; +const OSSymbol * gIOConsoleSessionUIDKey; +const OSSymbol * gIOConsoleSessionAuditIDKey; +const OSSymbol * gIOConsoleUsersSeedKey; +const OSSymbol * gIOConsoleSessionOnConsoleKey; +const OSSymbol * gIOConsoleSessionLoginDoneKey; +const OSSymbol * gIOConsoleSessionSecureInputPIDKey; +const OSSymbol * gIOConsoleSessionScreenLockedTimeKey; +const OSSymbol * gIOConsoleSessionScreenIsLockedKey; +clock_sec_t gIOConsoleLockTime; +static bool gIOConsoleLoggedIn; #if HIBERNATION -static OSBoolean * gIOConsoleBooterLockState; -static uint32_t gIOScreenLockState; +static OSBoolean * gIOConsoleBooterLockState; +static uint32_t gIOScreenLockState; #endif static IORegistryEntry * gIOChosenEntry; -static int gIOResourceGenerationCount; +static int gIOResourceGenerationCount; -const OSSymbol * gIOServiceKey; -const OSSymbol * gIOPublishNotification; -const OSSymbol * gIOFirstPublishNotification; -const OSSymbol * gIOMatchedNotification; -const OSSymbol * gIOFirstMatchNotification; -const OSSymbol * gIOTerminatedNotification; -const OSSymbol * gIOWillTerminateNotification; +const OSSymbol * gIOServiceKey; +const OSSymbol * gIOPublishNotification; +const OSSymbol * gIOFirstPublishNotification; +const OSSymbol * gIOMatchedNotification; +const OSSymbol * gIOFirstMatchNotification; +const OSSymbol * gIOTerminatedNotification; +const OSSymbol * gIOWillTerminateNotification; -const OSSymbol * gIOGeneralInterest; -const OSSymbol * gIOBusyInterest; -const OSSymbol * gIOAppPowerStateInterest; -const OSSymbol * gIOPriorityPowerStateInterest; -const OSSymbol * gIOConsoleSecurityInterest; +const OSSymbol * gIOGeneralInterest; +const OSSymbol * gIOBusyInterest; +const OSSymbol * gIOAppPowerStateInterest; +const OSSymbol * gIOPriorityPowerStateInterest; +const OSSymbol * gIOConsoleSecurityInterest; -const OSSymbol * gIOBSDKey; -const OSSymbol * gIOBSDNameKey; -const OSSymbol * gIOBSDMajorKey; -const OSSymbol * gIOBSDMinorKey; -const OSSymbol * gIOBSDUnitKey; +const OSSymbol * gIOBSDKey; +const OSSymbol * gIOBSDNameKey; +const OSSymbol * gIOBSDMajorKey; +const OSSymbol * gIOBSDMinorKey; +const OSSymbol * gIOBSDUnitKey; const OSSymbol * gAKSGetKey; #if defined(__i386__) || defined(__x86_64__) const OSSymbol * gIOCreateEFIDevicePathSymbol; #endif -static OSDictionary * gNotifications; -static IORecursiveLock * gNotificationLock; +static OSDictionary * gNotifications; +static IORecursiveLock * gNotificationLock; -static IOService * gIOResources; -static IOService * gIOServiceRoot; +static IOService * gIOResources; +static IOService * gIOServiceRoot; -static OSOrderedSet * gJobs; -static semaphore_port_t gJobsSemaphore; -static IOLock * gJobsLock; -static int gOutstandingJobs; -static int gNumConfigThreads; -static int gNumWaitingThreads; -static IOLock * gIOServiceBusyLock; -bool gCPUsRunning; +static OSOrderedSet * gJobs; +static semaphore_port_t gJobsSemaphore; +static IOLock * gJobsLock; +static int gOutstandingJobs; +static int gNumConfigThreads; +static int gNumWaitingThreads; +static IOLock * gIOServiceBusyLock; +bool gCPUsRunning; -static thread_t gIOTerminateThread; -static thread_t gIOTerminateWorkerThread; -static UInt32 gIOTerminateWork; -static OSArray * gIOTerminatePhase2List; -static OSArray * gIOStopList; -static OSArray * gIOStopProviderList; -static OSArray * gIOFinalizeList; +static thread_t gIOTerminateThread; +static thread_t gIOTerminateWorkerThread; +static UInt32 gIOTerminateWork; +static OSArray * gIOTerminatePhase2List; +static OSArray * gIOStopList; +static OSArray * gIOStopProviderList; +static OSArray * gIOFinalizeList; -static SInt32 gIOConsoleUsersSeed; -static OSData * gIOConsoleUsersSeedValue; +static SInt32 gIOConsoleUsersSeed; +static OSData * gIOConsoleUsersSeedValue; -extern const OSSymbol * gIODTPHandleKey; +extern const OSSymbol * gIODTPHandleKey; -const OSSymbol * gIOPlatformFunctionHandlerSet; +const OSSymbol * gIOPlatformFunctionHandlerSet; -static IOLock * gIOConsoleUsersLock; -static thread_call_t gIOConsoleLockCallout; +static IOLock * gIOConsoleUsersLock; +static thread_call_t gIOConsoleLockCallout; static IONotifier * gIOServiceNullNotifier; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define LOCKREADNOTIFY() \ +#define LOCKREADNOTIFY() \ IORecursiveLockLock( gNotificationLock ) -#define LOCKWRITENOTIFY() \ +#define LOCKWRITENOTIFY() \ IORecursiveLockLock( gNotificationLock ) #define LOCKWRITE2READNOTIFY() -#define UNLOCKNOTIFY() \ +#define UNLOCKNOTIFY() \ IORecursiveLockUnlock( gNotificationLock ) #define SLEEPNOTIFY(event) \ IORecursiveLockSleep( gNotificationLock, (void *)(event), THREAD_UNINT ) @@ -225,38 +225,38 @@ static IONotifier * gIOServiceNullNotifier; #define WAKEUPNOTIFY(event) \ IORecursiveLockWakeup( gNotificationLock, (void *)(event), /* wake one */ false ) -#define randomDelay() \ - int del = read_processor_clock(); \ - del = (((int)IOThreadSelf()) ^ del ^ (del >> 10)) & 0x3ff; \ - IOSleep( del ); +#define randomDelay() \ + int del = read_processor_clock(); \ + del = (((int)IOThreadSelf()) ^ del ^ (del >> 10)) & 0x3ff; \ + IOSleep( del ); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define queue_element(entry, element, type, field) do { \ - vm_address_t __ele = (vm_address_t) (entry); \ - __ele -= -4 + ((size_t)(&((type) 4)->field)); \ - (element) = (type) __ele; \ +#define queue_element(entry, element, type, field) do { \ + vm_address_t __ele = (vm_address_t) (entry); \ + __ele -= -4 + ((size_t)(&((type) 4)->field)); \ + (element) = (type) __ele; \ } while(0) -#define iterqueue(que, elt) \ - for (queue_entry_t elt = queue_first(que); \ - !queue_end(que, elt); \ +#define iterqueue(que, elt) \ + for (queue_entry_t elt = queue_first(que); \ + !queue_end(que, elt); \ elt = queue_next(elt)) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ struct IOInterruptAccountingReporter { - IOSimpleReporter * reporter; /* Reporter responsible for communicating the statistics */ - IOInterruptAccountingData * statistics; /* The live statistics values, if any */ + IOSimpleReporter * reporter; /* Reporter responsible for communicating the statistics */ + IOInterruptAccountingData * statistics; /* The live statistics values, if any */ }; struct ArbitrationLockQueueElement { - queue_chain_t link; - IOThread thread; - IOService * service; - unsigned count; - bool required; - bool aborted; + queue_chain_t link; + IOThread thread; + IOService * service; + unsigned count; + bool required; + bool aborted; }; static queue_head_t gArbitrationLockQueueActive; @@ -264,8 +264,11 @@ static queue_head_t gArbitrationLockQueueWaiting; static queue_head_t gArbitrationLockQueueFree; static IOLock * gArbitrationLockQueueLock; -bool IOService::isInactive( void ) const - { return( 0 != (kIOServiceInactiveState & getState())); } +bool +IOService::isInactive( void ) const +{ + return 0 != (kIOServiceInactiveState & getState()); +} /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -274,16 +277,15 @@ bool IOService::isInactive( void ) const // Only used by the intel implementation of // IOService::requireMaxBusStall(UInt32 ns) // IOService::requireMaxInterruptDelay(uint32_t ns) -struct CpuDelayEntry -{ - IOService * fService; - UInt32 fMaxDelay; - UInt32 fDelayType; +struct CpuDelayEntry { + IOService * fService; + UInt32 fMaxDelay; + UInt32 fDelayType; }; enum { - kCpuDelayBusStall, kCpuDelayInterrupt, - kCpuNumDelayTypes + kCpuDelayBusStall, kCpuDelayInterrupt, + kCpuNumDelayTypes }; static OSData *sCpuDelayData = OSData::withCapacity(8 * sizeof(CpuDelayEntry)); @@ -303,210 +305,216 @@ setLatencyHandler(UInt32 delayType, IOService * target, bool enable); /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOService::initialize( void ) +void +IOService::initialize( void ) { - kern_return_t err; + kern_return_t err; - gIOServicePlane = IORegistryEntry::makePlane( kIOServicePlane ); - gIOPowerPlane = IORegistryEntry::makePlane( kIOPowerPlane ); + gIOServicePlane = IORegistryEntry::makePlane( kIOServicePlane ); + gIOPowerPlane = IORegistryEntry::makePlane( kIOPowerPlane ); - gIOProviderClassKey = OSSymbol::withCStringNoCopy( kIOProviderClassKey ); - gIONameMatchKey = OSSymbol::withCStringNoCopy( kIONameMatchKey ); - gIONameMatchedKey = OSSymbol::withCStringNoCopy( kIONameMatchedKey ); - gIOPropertyMatchKey = OSSymbol::withCStringNoCopy( kIOPropertyMatchKey ); - gIOPropertyExistsMatchKey = OSSymbol::withCStringNoCopy( kIOPropertyExistsMatchKey ); - gIOPathMatchKey = OSSymbol::withCStringNoCopy( kIOPathMatchKey ); - gIOLocationMatchKey = OSSymbol::withCStringNoCopy( kIOLocationMatchKey ); - gIOParentMatchKey = OSSymbol::withCStringNoCopy( kIOParentMatchKey ); + gIOProviderClassKey = OSSymbol::withCStringNoCopy( kIOProviderClassKey ); + gIONameMatchKey = OSSymbol::withCStringNoCopy( kIONameMatchKey ); + gIONameMatchedKey = OSSymbol::withCStringNoCopy( kIONameMatchedKey ); + gIOPropertyMatchKey = OSSymbol::withCStringNoCopy( kIOPropertyMatchKey ); + gIOPropertyExistsMatchKey = OSSymbol::withCStringNoCopy( kIOPropertyExistsMatchKey ); + gIOPathMatchKey = OSSymbol::withCStringNoCopy( kIOPathMatchKey ); + gIOLocationMatchKey = OSSymbol::withCStringNoCopy( kIOLocationMatchKey ); + gIOParentMatchKey = OSSymbol::withCStringNoCopy( kIOParentMatchKey ); - gIOMatchCategoryKey = OSSymbol::withCStringNoCopy( kIOMatchCategoryKey ); - gIODefaultMatchCategoryKey = OSSymbol::withCStringNoCopy( - kIODefaultMatchCategoryKey ); - gIOMatchedServiceCountKey = OSSymbol::withCStringNoCopy( - kIOMatchedServiceCountKey ); + gIOMatchCategoryKey = OSSymbol::withCStringNoCopy( kIOMatchCategoryKey ); + gIODefaultMatchCategoryKey = OSSymbol::withCStringNoCopy( + kIODefaultMatchCategoryKey ); + gIOMatchedServiceCountKey = OSSymbol::withCStringNoCopy( + kIOMatchedServiceCountKey ); #if !CONFIG_EMBEDDED - gIOServiceLegacyMatchingRegistryIDKey = OSSymbol::withCStringNoCopy( - kIOServiceLegacyMatchingRegistryIDKey ); + gIOServiceLegacyMatchingRegistryIDKey = OSSymbol::withCStringNoCopy( + kIOServiceLegacyMatchingRegistryIDKey ); #endif - gIOUserClientClassKey = OSSymbol::withCStringNoCopy( kIOUserClientClassKey ); - - gIOResourcesKey = OSSymbol::withCStringNoCopy( kIOResourcesClass ); - gIOResourceMatchKey = OSSymbol::withCStringNoCopy( kIOResourceMatchKey ); - gIOResourceMatchedKey = OSSymbol::withCStringNoCopy( kIOResourceMatchedKey ); - - gIODeviceMemoryKey = OSSymbol::withCStringNoCopy( "IODeviceMemory" ); - gIOInterruptControllersKey - = OSSymbol::withCStringNoCopy("IOInterruptControllers"); - gIOInterruptSpecifiersKey - = OSSymbol::withCStringNoCopy("IOInterruptSpecifiers"); - - gIOMapperIDKey = OSSymbol::withCStringNoCopy(kIOMapperIDKey); - - gIOKitDebugKey = OSSymbol::withCStringNoCopy( kIOKitDebugKey ); - - gIOCommandPoolSizeKey = OSSymbol::withCStringNoCopy( kIOCommandPoolSizeKey ); - - gIOGeneralInterest = OSSymbol::withCStringNoCopy( kIOGeneralInterest ); - gIOBusyInterest = OSSymbol::withCStringNoCopy( kIOBusyInterest ); - gIOAppPowerStateInterest = OSSymbol::withCStringNoCopy( kIOAppPowerStateInterest ); - gIOPriorityPowerStateInterest = OSSymbol::withCStringNoCopy( kIOPriorityPowerStateInterest ); - gIOConsoleSecurityInterest = OSSymbol::withCStringNoCopy( kIOConsoleSecurityInterest ); - - gIOBSDKey = OSSymbol::withCStringNoCopy(kIOBSDKey); - gIOBSDNameKey = OSSymbol::withCStringNoCopy(kIOBSDNameKey); - gIOBSDMajorKey = OSSymbol::withCStringNoCopy(kIOBSDMajorKey); - gIOBSDMinorKey = OSSymbol::withCStringNoCopy(kIOBSDMinorKey); - gIOBSDUnitKey = OSSymbol::withCStringNoCopy(kIOBSDUnitKey); - - gNotifications = OSDictionary::withCapacity( 1 ); - gIOPublishNotification = OSSymbol::withCStringNoCopy( - kIOPublishNotification ); - gIOFirstPublishNotification = OSSymbol::withCStringNoCopy( - kIOFirstPublishNotification ); - gIOMatchedNotification = OSSymbol::withCStringNoCopy( - kIOMatchedNotification ); - gIOFirstMatchNotification = OSSymbol::withCStringNoCopy( - kIOFirstMatchNotification ); - gIOTerminatedNotification = OSSymbol::withCStringNoCopy( - kIOTerminatedNotification ); - gIOWillTerminateNotification = OSSymbol::withCStringNoCopy( - kIOWillTerminateNotification ); - gIOServiceKey = OSSymbol::withCStringNoCopy( kIOServiceClass); - - gIOConsoleLockedKey = OSSymbol::withCStringNoCopy( kIOConsoleLockedKey); - gIOConsoleUsersKey = OSSymbol::withCStringNoCopy( kIOConsoleUsersKey); - gIOConsoleSessionUIDKey = OSSymbol::withCStringNoCopy( kIOConsoleSessionUIDKey); - gIOConsoleSessionAuditIDKey = OSSymbol::withCStringNoCopy( kIOConsoleSessionAuditIDKey); - - gIOConsoleUsersSeedKey = OSSymbol::withCStringNoCopy(kIOConsoleUsersSeedKey); - gIOConsoleSessionOnConsoleKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionOnConsoleKey); - gIOConsoleSessionLoginDoneKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionLoginDoneKey); - gIOConsoleSessionSecureInputPIDKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionSecureInputPIDKey); - gIOConsoleSessionScreenLockedTimeKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenLockedTimeKey); - gIOConsoleSessionScreenIsLockedKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenIsLockedKey); - - gIOConsoleUsersSeedValue = OSData::withBytesNoCopy(&gIOConsoleUsersSeed, sizeof(gIOConsoleUsersSeed)); - - gIOPlatformFunctionHandlerSet = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerSet); + gIOUserClientClassKey = OSSymbol::withCStringNoCopy( kIOUserClientClassKey ); + + gIOResourcesKey = OSSymbol::withCStringNoCopy( kIOResourcesClass ); + gIOResourceMatchKey = OSSymbol::withCStringNoCopy( kIOResourceMatchKey ); + gIOResourceMatchedKey = OSSymbol::withCStringNoCopy( kIOResourceMatchedKey ); + + gIODeviceMemoryKey = OSSymbol::withCStringNoCopy( "IODeviceMemory" ); + gIOInterruptControllersKey + = OSSymbol::withCStringNoCopy("IOInterruptControllers"); + gIOInterruptSpecifiersKey + = OSSymbol::withCStringNoCopy("IOInterruptSpecifiers"); + + gIOMapperIDKey = OSSymbol::withCStringNoCopy(kIOMapperIDKey); + + gIOKitDebugKey = OSSymbol::withCStringNoCopy( kIOKitDebugKey ); + + gIOCommandPoolSizeKey = OSSymbol::withCStringNoCopy( kIOCommandPoolSizeKey ); + + gIOGeneralInterest = OSSymbol::withCStringNoCopy( kIOGeneralInterest ); + gIOBusyInterest = OSSymbol::withCStringNoCopy( kIOBusyInterest ); + gIOAppPowerStateInterest = OSSymbol::withCStringNoCopy( kIOAppPowerStateInterest ); + gIOPriorityPowerStateInterest = OSSymbol::withCStringNoCopy( kIOPriorityPowerStateInterest ); + gIOConsoleSecurityInterest = OSSymbol::withCStringNoCopy( kIOConsoleSecurityInterest ); + + gIOBSDKey = OSSymbol::withCStringNoCopy(kIOBSDKey); + gIOBSDNameKey = OSSymbol::withCStringNoCopy(kIOBSDNameKey); + gIOBSDMajorKey = OSSymbol::withCStringNoCopy(kIOBSDMajorKey); + gIOBSDMinorKey = OSSymbol::withCStringNoCopy(kIOBSDMinorKey); + gIOBSDUnitKey = OSSymbol::withCStringNoCopy(kIOBSDUnitKey); + + gNotifications = OSDictionary::withCapacity( 1 ); + gIOPublishNotification = OSSymbol::withCStringNoCopy( + kIOPublishNotification ); + gIOFirstPublishNotification = OSSymbol::withCStringNoCopy( + kIOFirstPublishNotification ); + gIOMatchedNotification = OSSymbol::withCStringNoCopy( + kIOMatchedNotification ); + gIOFirstMatchNotification = OSSymbol::withCStringNoCopy( + kIOFirstMatchNotification ); + gIOTerminatedNotification = OSSymbol::withCStringNoCopy( + kIOTerminatedNotification ); + gIOWillTerminateNotification = OSSymbol::withCStringNoCopy( + kIOWillTerminateNotification ); + gIOServiceKey = OSSymbol::withCStringNoCopy( kIOServiceClass); + + gIOConsoleLockedKey = OSSymbol::withCStringNoCopy( kIOConsoleLockedKey); + gIOConsoleUsersKey = OSSymbol::withCStringNoCopy( kIOConsoleUsersKey); + gIOConsoleSessionUIDKey = OSSymbol::withCStringNoCopy( kIOConsoleSessionUIDKey); + gIOConsoleSessionAuditIDKey = OSSymbol::withCStringNoCopy( kIOConsoleSessionAuditIDKey); + + gIOConsoleUsersSeedKey = OSSymbol::withCStringNoCopy(kIOConsoleUsersSeedKey); + gIOConsoleSessionOnConsoleKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionOnConsoleKey); + gIOConsoleSessionLoginDoneKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionLoginDoneKey); + gIOConsoleSessionSecureInputPIDKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionSecureInputPIDKey); + gIOConsoleSessionScreenLockedTimeKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenLockedTimeKey); + gIOConsoleSessionScreenIsLockedKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenIsLockedKey); + + gIOConsoleUsersSeedValue = OSData::withBytesNoCopy(&gIOConsoleUsersSeed, sizeof(gIOConsoleUsersSeed)); + + gIOPlatformFunctionHandlerSet = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerSet); #if defined(__i386__) || defined(__x86_64__) - sCPULatencyFunctionName[kCpuDelayBusStall] = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerMaxBusDelay); - sCPULatencyFunctionName[kCpuDelayInterrupt] = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerMaxInterruptDelay); - uint32_t idx; - for (idx = 0; idx < kCpuNumDelayTypes; idx++) - { - sCPULatencySet[idx] = OSNumber::withNumber(-1U, 32); - sCPULatencyHolder[idx] = OSNumber::withNumber(0ULL, 64); - assert(sCPULatencySet[idx] && sCPULatencyHolder[idx]); - } - gIOCreateEFIDevicePathSymbol = OSSymbol::withCString("CreateEFIDevicePath"); + sCPULatencyFunctionName[kCpuDelayBusStall] = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerMaxBusDelay); + sCPULatencyFunctionName[kCpuDelayInterrupt] = OSSymbol::withCStringNoCopy(kIOPlatformFunctionHandlerMaxInterruptDelay); + uint32_t idx; + for (idx = 0; idx < kCpuNumDelayTypes; idx++) { + sCPULatencySet[idx] = OSNumber::withNumber(-1U, 32); + sCPULatencyHolder[idx] = OSNumber::withNumber(0ULL, 64); + assert(sCPULatencySet[idx] && sCPULatencyHolder[idx]); + } + gIOCreateEFIDevicePathSymbol = OSSymbol::withCString("CreateEFIDevicePath"); #endif - gNotificationLock = IORecursiveLockAlloc(); + gNotificationLock = IORecursiveLockAlloc(); - gAKSGetKey = OSSymbol::withCStringNoCopy(AKS_PLATFORM_FUNCTION_GETKEY); + gAKSGetKey = OSSymbol::withCStringNoCopy(AKS_PLATFORM_FUNCTION_GETKEY); - assert( gIOServicePlane && gIODeviceMemoryKey - && gIOInterruptControllersKey && gIOInterruptSpecifiersKey - && gIOResourcesKey && gNotifications && gNotificationLock - && gIOProviderClassKey && gIONameMatchKey && gIONameMatchedKey - && gIOMatchCategoryKey && gIODefaultMatchCategoryKey - && gIOPublishNotification && gIOMatchedNotification - && gIOTerminatedNotification && gIOServiceKey - && gIOConsoleUsersKey && gIOConsoleSessionUIDKey - && gIOConsoleSessionOnConsoleKey && gIOConsoleSessionSecureInputPIDKey - && gIOConsoleUsersSeedKey && gIOConsoleUsersSeedValue); + assert( gIOServicePlane && gIODeviceMemoryKey + && gIOInterruptControllersKey && gIOInterruptSpecifiersKey + && gIOResourcesKey && gNotifications && gNotificationLock + && gIOProviderClassKey && gIONameMatchKey && gIONameMatchedKey + && gIOMatchCategoryKey && gIODefaultMatchCategoryKey + && gIOPublishNotification && gIOMatchedNotification + && gIOTerminatedNotification && gIOServiceKey + && gIOConsoleUsersKey && gIOConsoleSessionUIDKey + && gIOConsoleSessionOnConsoleKey && gIOConsoleSessionSecureInputPIDKey + && gIOConsoleUsersSeedKey && gIOConsoleUsersSeedValue); - gJobsLock = IOLockAlloc(); - gJobs = OSOrderedSet::withCapacity( 10 ); + gJobsLock = IOLockAlloc(); + gJobs = OSOrderedSet::withCapacity( 10 ); - gIOServiceBusyLock = IOLockAlloc(); + gIOServiceBusyLock = IOLockAlloc(); - gIOConsoleUsersLock = IOLockAlloc(); + gIOConsoleUsersLock = IOLockAlloc(); - err = semaphore_create(kernel_task, &gJobsSemaphore, SYNC_POLICY_FIFO, 0); + err = semaphore_create(kernel_task, &gJobsSemaphore, SYNC_POLICY_FIFO, 0); - gIOConsoleLockCallout = thread_call_allocate(&IOService::consoleLockTimer, NULL); + gIOConsoleLockCallout = thread_call_allocate(&IOService::consoleLockTimer, NULL); - IORegistryEntry::getRegistryRoot()->setProperty(gIOConsoleLockedKey, kOSBooleanTrue); + IORegistryEntry::getRegistryRoot()->setProperty(gIOConsoleLockedKey, kOSBooleanTrue); - assert( gIOServiceBusyLock && gJobs && gJobsLock && gIOConsoleUsersLock - && gIOConsoleLockCallout && (err == KERN_SUCCESS) ); + assert( gIOServiceBusyLock && gJobs && gJobsLock && gIOConsoleUsersLock + && gIOConsoleLockCallout && (err == KERN_SUCCESS)); - gIOResources = IOResources::resources(); - assert( gIOResources ); + gIOResources = IOResources::resources(); + assert( gIOResources ); - gIOServiceNullNotifier = OSTypeAlloc(_IOServiceNullNotifier); - assert(gIOServiceNullNotifier); + gIOServiceNullNotifier = OSTypeAlloc(_IOServiceNullNotifier); + assert(gIOServiceNullNotifier); - gArbitrationLockQueueLock = IOLockAlloc(); - queue_init(&gArbitrationLockQueueActive); - queue_init(&gArbitrationLockQueueWaiting); - queue_init(&gArbitrationLockQueueFree); + gArbitrationLockQueueLock = IOLockAlloc(); + queue_init(&gArbitrationLockQueueActive); + queue_init(&gArbitrationLockQueueWaiting); + queue_init(&gArbitrationLockQueueFree); - assert( gArbitrationLockQueueLock ); + assert( gArbitrationLockQueueLock ); - gIOTerminatePhase2List = OSArray::withCapacity( 2 ); - gIOStopList = OSArray::withCapacity( 16 ); - gIOStopProviderList = OSArray::withCapacity( 16 ); - gIOFinalizeList = OSArray::withCapacity( 16 ); - assert( gIOTerminatePhase2List && gIOStopList && gIOStopProviderList && gIOFinalizeList ); + gIOTerminatePhase2List = OSArray::withCapacity( 2 ); + gIOStopList = OSArray::withCapacity( 16 ); + gIOStopProviderList = OSArray::withCapacity( 16 ); + gIOFinalizeList = OSArray::withCapacity( 16 ); + assert( gIOTerminatePhase2List && gIOStopList && gIOStopProviderList && gIOFinalizeList ); - // worker thread that is responsible for terminating / cleaning up threads - kernel_thread_start(&terminateThread, NULL, &gIOTerminateWorkerThread); - assert(gIOTerminateWorkerThread); - thread_set_thread_name(gIOTerminateWorkerThread, "IOServiceTerminateThread"); + // worker thread that is responsible for terminating / cleaning up threads + kernel_thread_start(&terminateThread, NULL, &gIOTerminateWorkerThread); + assert(gIOTerminateWorkerThread); + thread_set_thread_name(gIOTerminateWorkerThread, "IOServiceTerminateThread"); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #if defined(__i386__) || defined(__x86_64__) extern "C" { - const char *getCpuDelayBusStallHolderName(void); -const char *getCpuDelayBusStallHolderName(void) { - return sCPULatencyHolderName[kCpuDelayBusStall]; +const char * +getCpuDelayBusStallHolderName(void) +{ + return sCPULatencyHolderName[kCpuDelayBusStall]; } const char *getCpuInterruptDelayHolderName(void); -const char *getCpuInterruptDelayHolderName(void) { - return sCPULatencyHolderName[kCpuDelayInterrupt]; +const char * +getCpuInterruptDelayHolderName(void) +{ + return sCPULatencyHolderName[kCpuDelayInterrupt]; } - } #endif #if IOMATCHDEBUG -static UInt64 getDebugFlags( OSDictionary * props ) -{ - OSNumber * debugProp; - UInt64 debugFlags; - - debugProp = OSDynamicCast( OSNumber, - props->getObject( gIOKitDebugKey )); - if( debugProp) - debugFlags = debugProp->unsigned64BitValue(); - else - debugFlags = gIOKitDebug; +static UInt64 +getDebugFlags( OSDictionary * props ) +{ + OSNumber * debugProp; + UInt64 debugFlags; + + debugProp = OSDynamicCast( OSNumber, + props->getObject( gIOKitDebugKey )); + if (debugProp) { + debugFlags = debugProp->unsigned64BitValue(); + } else { + debugFlags = gIOKitDebug; + } - return( debugFlags ); + return debugFlags; } -static UInt64 getDebugFlags( IOService * inst ) +static UInt64 +getDebugFlags( IOService * inst ) { - OSObject * prop; - OSNumber * debugProp; - UInt64 debugFlags; + OSObject * prop; + OSNumber * debugProp; + UInt64 debugFlags; - prop = inst->copyProperty(gIOKitDebugKey); - debugProp = OSDynamicCast(OSNumber, prop); - if( debugProp) - debugFlags = debugProp->unsigned64BitValue(); - else - debugFlags = gIOKitDebug; + prop = inst->copyProperty(gIOKitDebugKey); + debugProp = OSDynamicCast(OSNumber, prop); + if (debugProp) { + debugFlags = debugProp->unsigned64BitValue(); + } else { + debugFlags = gIOKitDebug; + } - OSSafeReleaseNULL(prop); + OSSafeReleaseNULL(prop); - return( debugFlags ); + return debugFlags; } #endif @@ -516,464 +524,501 @@ static UInt64 getDebugFlags( IOService * inst ) // The default score is from the property table, & may be altered // during probe to change the start order. -IOService * IOService::probe( IOService * provider, - SInt32 * score ) +IOService * +IOService::probe( IOService * provider, + SInt32 * score ) { - return( this ); + return this; } -bool IOService::start( IOService * provider ) +bool +IOService::start( IOService * provider ) { - return( true ); + return true; } -void IOService::stop( IOService * provider ) +void +IOService::stop( IOService * provider ) { } -bool IOService::init( OSDictionary * dictionary ) +bool +IOService::init( OSDictionary * dictionary ) { - bool ret; - - ret = super::init(dictionary); - if (!ret) return (false); - if (reserved) return (true); + bool ret; - reserved = IONew(ExpansionData, 1); - if (!reserved) return (false); - bzero(reserved, sizeof(*reserved)); + ret = super::init(dictionary); + if (!ret) { + return false; + } + if (reserved) { + return true; + } - /* - * TODO: Improve on this. Previous efforts to more lazily allocate this - * lock based on the presence of specifiers ran into issues as some - * platforms set up the specifiers after IOService initialization. - * - * We may be able to get away with a global lock, as this should only be - * contended by IOReporting clients and driver start/stop (unless a - * driver wants to remove/add handlers in the course of normal operation, - * which should be unlikely). - */ - reserved->interruptStatisticsLock = IOLockAlloc(); - if (!reserved->interruptStatisticsLock) return (false); + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } + bzero(reserved, sizeof(*reserved)); + + /* + * TODO: Improve on this. Previous efforts to more lazily allocate this + * lock based on the presence of specifiers ran into issues as some + * platforms set up the specifiers after IOService initialization. + * + * We may be able to get away with a global lock, as this should only be + * contended by IOReporting clients and driver start/stop (unless a + * driver wants to remove/add handlers in the course of normal operation, + * which should be unlikely). + */ + reserved->interruptStatisticsLock = IOLockAlloc(); + if (!reserved->interruptStatisticsLock) { + return false; + } - return (true); + return true; } -bool IOService::init( IORegistryEntry * from, - const IORegistryPlane * inPlane ) +bool +IOService::init( IORegistryEntry * from, + const IORegistryPlane * inPlane ) { - bool ret; + bool ret; - ret = super::init(from, inPlane); - if (!ret) return (false); - if (reserved) return (true); - - reserved = IONew(ExpansionData, 1); - if (!reserved) return (false); - bzero(reserved, sizeof(*reserved)); + ret = super::init(from, inPlane); + if (!ret) { + return false; + } + if (reserved) { + return true; + } - /* - * TODO: Improve on this. Previous efforts to more lazily allocate this - * lock based on the presence of specifiers ran into issues as some - * platforms set up the specifiers after IOService initialization. - * - * We may be able to get away with a global lock, as this should only be - * contended by IOReporting clients and driver start/stop (unless a - * driver wants to remove/add handlers in the course of normal operation, - * which should be unlikely). - */ - reserved->interruptStatisticsLock = IOLockAlloc(); - if (!reserved->interruptStatisticsLock) return (false); + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } + bzero(reserved, sizeof(*reserved)); + + /* + * TODO: Improve on this. Previous efforts to more lazily allocate this + * lock based on the presence of specifiers ran into issues as some + * platforms set up the specifiers after IOService initialization. + * + * We may be able to get away with a global lock, as this should only be + * contended by IOReporting clients and driver start/stop (unless a + * driver wants to remove/add handlers in the course of normal operation, + * which should be unlikely). + */ + reserved->interruptStatisticsLock = IOLockAlloc(); + if (!reserved->interruptStatisticsLock) { + return false; + } - return (true); + return true; } -void IOService::free( void ) +void +IOService::free( void ) { - int i = 0; - requireMaxBusStall(0); - requireMaxInterruptDelay(0); - if( getPropertyTable()) - unregisterAllInterest(); - PMfree(); + int i = 0; + requireMaxBusStall(0); + requireMaxInterruptDelay(0); + if (getPropertyTable()) { + unregisterAllInterest(); + } + PMfree(); - if (reserved) { - if (reserved->interruptStatisticsArray) { - for (i = 0; i < reserved->interruptStatisticsArrayCount; i++) { - if (reserved->interruptStatisticsArray[i].reporter) - reserved->interruptStatisticsArray[i].reporter->release(); - } + if (reserved) { + if (reserved->interruptStatisticsArray) { + for (i = 0; i < reserved->interruptStatisticsArrayCount; i++) { + if (reserved->interruptStatisticsArray[i].reporter) { + reserved->interruptStatisticsArray[i].reporter->release(); + } + } - IODelete(reserved->interruptStatisticsArray, IOInterruptAccountingReporter, reserved->interruptStatisticsArrayCount); - } + IODelete(reserved->interruptStatisticsArray, IOInterruptAccountingReporter, reserved->interruptStatisticsArrayCount); + } - if (reserved->interruptStatisticsLock) - IOLockFree(reserved->interruptStatisticsLock); - IODelete(reserved, ExpansionData, 1); - } + if (reserved->interruptStatisticsLock) { + IOLockFree(reserved->interruptStatisticsLock); + } + IODelete(reserved, ExpansionData, 1); + } - if (_numInterruptSources && _interruptSources) - { - for (i = 0; i < _numInterruptSources; i++) { - void * block = _interruptSourcesPrivate(this)[i].vectorBlock; - if (block) Block_release(block); + if (_numInterruptSources && _interruptSources) { + for (i = 0; i < _numInterruptSources; i++) { + void * block = _interruptSourcesPrivate(this)[i].vectorBlock; + if (block) { + Block_release(block); + } + } + IOFree(_interruptSources, + _numInterruptSources * sizeofAllIOInterruptSource); + _interruptSources = 0; } - IOFree(_interruptSources, - _numInterruptSources * sizeofAllIOInterruptSource); - _interruptSources = 0; - } - super::free(); + super::free(); } /* * Attach in service plane */ -bool IOService::attach( IOService * provider ) -{ - bool ok; - uint32_t count; - AbsoluteTime deadline; - int waitResult = THREAD_AWAKENED; - bool wait, computeDeadline = true; - - if( provider) { - - if( gIOKitDebug & kIOLogAttach) - LOG( "%s::attach(%s)\n", getName(), - provider->getName()); - - ok = false; - do - { - wait = false; - provider->lockForArbitration(); - if (provider->__state[0] & kIOServiceInactiveState) ok = false; - else - { - count = provider->getChildCount(gIOServicePlane); - wait = (count > (kIOServiceBusyMax - 4)); - if (!wait) ok = attachToParent(provider, gIOServicePlane); - else - { - IOLog("stalling for detach from %s\n", provider->getName()); - IOLockLock( gIOServiceBusyLock ); - provider->__state[1] |= kIOServiceWaitDetachState; +bool +IOService::attach( IOService * provider ) +{ + bool ok; + uint32_t count; + AbsoluteTime deadline; + int waitResult = THREAD_AWAKENED; + bool wait, computeDeadline = true; + + if (provider) { + if (gIOKitDebug & kIOLogAttach) { + LOG( "%s::attach(%s)\n", getName(), + provider->getName()); } - } - provider->unlockForArbitration(); - if (wait) - { - if (computeDeadline) - { - clock_interval_to_deadline(15, kSecondScale, &deadline); - computeDeadline = false; - } - assert_wait_deadline((event_t)&provider->__provider, THREAD_UNINT, deadline); - IOLockUnlock( gIOServiceBusyLock ); - waitResult = thread_block(THREAD_CONTINUE_NULL); - wait = (waitResult != THREAD_TIMED_OUT); - } - } - while (wait); - } else { - gIOServiceRoot = this; - ok = attachToParent( getRegistryRoot(), gIOServicePlane); - } + ok = false; + do{ + wait = false; + provider->lockForArbitration(); + if (provider->__state[0] & kIOServiceInactiveState) { + ok = false; + } else { + count = provider->getChildCount(gIOServicePlane); + wait = (count > (kIOServiceBusyMax - 4)); + if (!wait) { + ok = attachToParent(provider, gIOServicePlane); + } else { + IOLog("stalling for detach from %s\n", provider->getName()); + IOLockLock( gIOServiceBusyLock ); + provider->__state[1] |= kIOServiceWaitDetachState; + } + } + provider->unlockForArbitration(); + if (wait) { + if (computeDeadline) { + clock_interval_to_deadline(15, kSecondScale, &deadline); + computeDeadline = false; + } + assert_wait_deadline((event_t)&provider->__provider, THREAD_UNINT, deadline); + IOLockUnlock( gIOServiceBusyLock ); + waitResult = thread_block(THREAD_CONTINUE_NULL); + wait = (waitResult != THREAD_TIMED_OUT); + } + }while (wait); + } else { + gIOServiceRoot = this; + ok = attachToParent( getRegistryRoot(), gIOServicePlane); + } - if (ok && !__provider) (void) getProvider(); + if (ok && !__provider) { + (void) getProvider(); + } - return( ok ); + return ok; } -IOService * IOService::getServiceRoot( void ) +IOService * +IOService::getServiceRoot( void ) { - return( gIOServiceRoot ); + return gIOServiceRoot; } -void IOService::detach( IOService * provider ) +void +IOService::detach( IOService * provider ) { - IOService * newProvider = 0; - SInt32 busy; - bool adjParent; + IOService * newProvider = 0; + SInt32 busy; + bool adjParent; - if( gIOKitDebug & kIOLogAttach) - LOG("%s::detach(%s)\n", getName(), provider->getName()); - - lockForArbitration(); + if (gIOKitDebug & kIOLogAttach) { + LOG("%s::detach(%s)\n", getName(), provider->getName()); + } - uint64_t regID1 = provider->getRegistryEntryID(); - uint64_t regID2 = getRegistryEntryID(); - IOServiceTrace( - IOSERVICE_DETACH, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); + lockForArbitration(); - adjParent = ((busy = (__state[1] & kIOServiceBusyStateMask)) - && (provider == getProvider())); + uint64_t regID1 = provider->getRegistryEntryID(); + uint64_t regID2 = getRegistryEntryID(); + IOServiceTrace( + IOSERVICE_DETACH, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); - detachFromParent( provider, gIOServicePlane ); + adjParent = ((busy = (__state[1] & kIOServiceBusyStateMask)) + && (provider == getProvider())); - if( busy) { - newProvider = getProvider(); - if( busy && (__state[1] & kIOServiceTermPhase3State) && (0 == newProvider)) - _adjustBusy( -busy ); - } + detachFromParent( provider, gIOServicePlane ); - if (kIOServiceInactiveState & __state[0]) { - getMetaClass()->removeInstance(this); - IORemoveServicePlatformActions(this); - } + if (busy) { + newProvider = getProvider(); + if (busy && (__state[1] & kIOServiceTermPhase3State) && (0 == newProvider)) { + _adjustBusy( -busy ); + } + } - unlockForArbitration(); + if (kIOServiceInactiveState & __state[0]) { + getMetaClass()->removeInstance(this); + IORemoveServicePlatformActions(this); + } - if( newProvider && adjParent) { - newProvider->lockForArbitration(); - newProvider->_adjustBusy(1); - newProvider->unlockForArbitration(); - } + unlockForArbitration(); - // check for last client detach from a terminated service - if( provider->lockForArbitration( true )) - { - if (kIOServiceStartState & __state[1]) - { - provider->scheduleTerminatePhase2(); + if (newProvider && adjParent) { + newProvider->lockForArbitration(); + newProvider->_adjustBusy(1); + newProvider->unlockForArbitration(); } - if( adjParent) provider->_adjustBusy( -1 ); - if( (provider->__state[1] & kIOServiceTermPhase3State) - && (0 == provider->getClient())) { - provider->scheduleFinalize(false); - } - IOLockLock( gIOServiceBusyLock ); - if (kIOServiceWaitDetachState & provider->__state[1]) - { - provider->__state[1] &= ~kIOServiceWaitDetachState; - thread_wakeup(&provider->__provider); - } - IOLockUnlock( gIOServiceBusyLock ); + // check for last client detach from a terminated service + if (provider->lockForArbitration( true )) { + if (kIOServiceStartState & __state[1]) { + provider->scheduleTerminatePhase2(); + } + if (adjParent) { + provider->_adjustBusy( -1 ); + } + if ((provider->__state[1] & kIOServiceTermPhase3State) + && (0 == provider->getClient())) { + provider->scheduleFinalize(false); + } - provider->unlockForArbitration(); - } + IOLockLock( gIOServiceBusyLock ); + if (kIOServiceWaitDetachState & provider->__state[1]) { + provider->__state[1] &= ~kIOServiceWaitDetachState; + thread_wakeup(&provider->__provider); + } + IOLockUnlock( gIOServiceBusyLock ); + + provider->unlockForArbitration(); + } } /* * Register instance - publish it for matching */ -void IOService::registerService( IOOptionBits options ) +void +IOService::registerService( IOOptionBits options ) { - char * pathBuf; - const char * path; - char * skip; - int len; - enum { kMaxPathLen = 256 }; - enum { kMaxChars = 63 }; - - IORegistryEntry * parent = this; - IORegistryEntry * root = getRegistryRoot(); - while( parent && (parent != root)) - parent = parent->getParentEntry( gIOServicePlane); + char * pathBuf; + const char * path; + char * skip; + int len; + enum { kMaxPathLen = 256 }; + enum { kMaxChars = 63 }; - if( parent != root) { - IOLog("%s: not registry member at registerService()\n", getName()); - return; - } - - // Allow the Platform Expert to adjust this node. - if( gIOPlatform && (!gIOPlatform->platformAdjustService(this))) - return; + IORegistryEntry * parent = this; + IORegistryEntry * root = getRegistryRoot(); + while (parent && (parent != root)) { + parent = parent->getParentEntry( gIOServicePlane); + } - IOInstallServicePlatformActions(this); + if (parent != root) { + IOLog("%s: not registry member at registerService()\n", getName()); + return; + } - if( (this != gIOResources) - && (kIOLogRegister & gIOKitDebug)) { + // Allow the Platform Expert to adjust this node. + if (gIOPlatform && (!gIOPlatform->platformAdjustService(this))) { + return; + } - pathBuf = (char *) IOMalloc( kMaxPathLen ); + IOInstallServicePlatformActions(this); - IOLog( "Registering: " ); + if ((this != gIOResources) + && (kIOLogRegister & gIOKitDebug)) { + pathBuf = (char *) IOMalloc( kMaxPathLen ); - len = kMaxPathLen; - if( pathBuf && getPath( pathBuf, &len, gIOServicePlane)) { + IOLog( "Registering: " ); - path = pathBuf; - if( len > kMaxChars) { - IOLog(".."); - len -= kMaxChars; - path += len; - if( (skip = strchr( path, '/'))) - path = skip; - } - } else - path = getName(); + len = kMaxPathLen; + if (pathBuf && getPath( pathBuf, &len, gIOServicePlane)) { + path = pathBuf; + if (len > kMaxChars) { + IOLog(".."); + len -= kMaxChars; + path += len; + if ((skip = strchr( path, '/'))) { + path = skip; + } + } + } else { + path = getName(); + } - IOLog( "%s\n", path ); + IOLog( "%s\n", path ); - if( pathBuf) - IOFree( pathBuf, kMaxPathLen ); - } + if (pathBuf) { + IOFree( pathBuf, kMaxPathLen ); + } + } - startMatching( options ); + startMatching( options ); } -void IOService::startMatching( IOOptionBits options ) +void +IOService::startMatching( IOOptionBits options ) { - IOService * provider; - UInt32 prevBusy = 0; - bool needConfig; - bool needWake = false; - bool ok; - bool sync; - bool waitAgain; + IOService * provider; + UInt32 prevBusy = 0; + bool needConfig; + bool needWake = false; + bool ok; + bool sync; + bool waitAgain; - lockForArbitration(); + lockForArbitration(); - sync = (options & kIOServiceSynchronous) - || ((provider = getProvider()) - && (provider->__state[1] & kIOServiceSynchronousState)); + sync = (options & kIOServiceSynchronous) + || ((provider = getProvider()) + && (provider->__state[1] & kIOServiceSynchronousState)); - if ( options & kIOServiceAsynchronous ) + if (options & kIOServiceAsynchronous) { sync = false; + } - needConfig = (0 == (__state[1] & (kIOServiceNeedConfigState | kIOServiceConfigRunning))) - && (0 == (__state[0] & kIOServiceInactiveState)); + needConfig = (0 == (__state[1] & (kIOServiceNeedConfigState | kIOServiceConfigRunning))) + && (0 == (__state[0] & kIOServiceInactiveState)); - __state[1] |= kIOServiceNeedConfigState; + __state[1] |= kIOServiceNeedConfigState; // __state[0] &= ~kIOServiceInactiveState; // if( sync) LOG("OSKernelStackRemaining = %08x @ %s\n", // OSKernelStackRemaining(), getName()); - if( needConfig) { - needWake = (0 != (kIOServiceSyncPubState & __state[1])); - } - - if( sync) - __state[1] |= kIOServiceSynchronousState; - else - __state[1] &= ~kIOServiceSynchronousState; - - if( needConfig) prevBusy = _adjustBusy( 1 ); - - unlockForArbitration(); - - if( needConfig) { - - if( needWake) { - IOLockLock( gIOServiceBusyLock ); - thread_wakeup( (event_t) this/*&__state[1]*/ ); - IOLockUnlock( gIOServiceBusyLock ); + if (needConfig) { + needWake = (0 != (kIOServiceSyncPubState & __state[1])); + } - } else if( !sync || (kIOServiceAsynchronous & options)) { + if (sync) { + __state[1] |= kIOServiceSynchronousState; + } else { + __state[1] &= ~kIOServiceSynchronousState; + } - ok = (0 != _IOServiceJob::startJob( this, kMatchNubJob, options )); - - } else do { + if (needConfig) { + prevBusy = _adjustBusy( 1 ); + } - if( (__state[1] & kIOServiceNeedConfigState)) - doServiceMatch( options ); + unlockForArbitration(); - lockForArbitration(); - IOLockLock( gIOServiceBusyLock ); + if (needConfig) { + if (needWake) { + IOLockLock( gIOServiceBusyLock ); + thread_wakeup((event_t) this /*&__state[1]*/ ); + IOLockUnlock( gIOServiceBusyLock ); + } else if (!sync || (kIOServiceAsynchronous & options)) { + ok = (0 != _IOServiceJob::startJob( this, kMatchNubJob, options )); + } else { + do { + if ((__state[1] & kIOServiceNeedConfigState)) { + doServiceMatch( options ); + } - waitAgain = ((prevBusy < (__state[1] & kIOServiceBusyStateMask)) - && (0 == (__state[0] & kIOServiceInactiveState))); + lockForArbitration(); + IOLockLock( gIOServiceBusyLock ); - if( waitAgain) - __state[1] |= kIOServiceSyncPubState | kIOServiceBusyWaiterState; - else - __state[1] &= ~kIOServiceSyncPubState; + waitAgain = ((prevBusy < (__state[1] & kIOServiceBusyStateMask)) + && (0 == (__state[0] & kIOServiceInactiveState))); - unlockForArbitration(); + if (waitAgain) { + __state[1] |= kIOServiceSyncPubState | kIOServiceBusyWaiterState; + } else { + __state[1] &= ~kIOServiceSyncPubState; + } - if( waitAgain) - assert_wait( (event_t) this/*&__state[1]*/, THREAD_UNINT); + unlockForArbitration(); - IOLockUnlock( gIOServiceBusyLock ); - if( waitAgain) - thread_block(THREAD_CONTINUE_NULL); + if (waitAgain) { + assert_wait((event_t) this /*&__state[1]*/, THREAD_UNINT); + } - } while( waitAgain ); - } + IOLockUnlock( gIOServiceBusyLock ); + if (waitAgain) { + thread_block(THREAD_CONTINUE_NULL); + } + } while (waitAgain); + } + } } -IOReturn IOService::catalogNewDrivers( OSOrderedSet * newTables ) +IOReturn +IOService::catalogNewDrivers( OSOrderedSet * newTables ) { - OSDictionary * table; - OSSet * set; - OSSet * allSet = 0; - IOService * service; + OSDictionary * table; + OSSet * set; + OSSet * allSet = 0; + IOService * service; #if IOMATCHDEBUG - SInt32 count = 0; + SInt32 count = 0; #endif - newTables->retain(); - - while( (table = (OSDictionary *) newTables->getFirstObject())) { - - LOCKWRITENOTIFY(); - set = (OSSet *) copyExistingServices( table, - kIOServiceRegisteredState, - kIOServiceExistingSet); - UNLOCKNOTIFY(); - if( set) { + newTables->retain(); + while ((table = (OSDictionary *) newTables->getFirstObject())) { + LOCKWRITENOTIFY(); + set = (OSSet *) copyExistingServices( table, + kIOServiceRegisteredState, + kIOServiceExistingSet); + UNLOCKNOTIFY(); + if (set) { #if IOMATCHDEBUG - count += set->getCount(); + count += set->getCount(); #endif - if (allSet) { - allSet->merge((const OSSet *) set); - set->release(); - } - else - allSet = set; - } + if (allSet) { + allSet->merge((const OSSet *) set); + set->release(); + } else { + allSet = set; + } + } #if IOMATCHDEBUG - if( getDebugFlags( table ) & kIOLogMatch) - LOG("Matching service count = %ld\n", (long)count); + if (getDebugFlags( table ) & kIOLogMatch) { + LOG("Matching service count = %ld\n", (long)count); + } #endif - newTables->removeObject(table); - } + newTables->removeObject(table); + } - if (allSet) { - while( (service = (IOService *) allSet->getAnyObject())) { - service->startMatching(kIOServiceAsynchronous); - allSet->removeObject(service); - } - allSet->release(); - } + if (allSet) { + while ((service = (IOService *) allSet->getAnyObject())) { + service->startMatching(kIOServiceAsynchronous); + allSet->removeObject(service); + } + allSet->release(); + } - newTables->release(); + newTables->release(); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } - _IOServiceJob * _IOServiceJob::startJob( IOService * nub, int type, - IOOptionBits options ) +_IOServiceJob * +_IOServiceJob::startJob( IOService * nub, int type, + IOOptionBits options ) { - _IOServiceJob * job; + _IOServiceJob * job; - job = new _IOServiceJob; - if( job && !job->init()) { - job->release(); - job = 0; - } + job = new _IOServiceJob; + if (job && !job->init()) { + job->release(); + job = 0; + } - if( job) { - job->type = type; - job->nub = nub; - job->options = options; - nub->retain(); // thread will release() - pingConfig( job ); - } + if (job) { + job->type = type; + job->nub = nub; + job->options = options; + nub->retain(); // thread will release() + pingConfig( job ); + } - return( job ); + return job; } /* @@ -981,14 +1026,16 @@ IOReturn IOService::catalogNewDrivers( OSOrderedSet * newTables ) * a property table. */ -bool IOService::matchPropertyTable( OSDictionary * table, SInt32 * score ) +bool +IOService::matchPropertyTable( OSDictionary * table, SInt32 * score ) { - return( matchPropertyTable(table) ); + return matchPropertyTable(table); } -bool IOService::matchPropertyTable( OSDictionary * table ) +bool +IOService::matchPropertyTable( OSDictionary * table ) { - return( true ); + return true; } /* @@ -996,186 +1043,207 @@ bool IOService::matchPropertyTable( OSDictionary * table ) * before first driver is attached. */ -IOReturn IOService::getResources( void ) +IOReturn +IOService::getResources( void ) { - return( kIOReturnSuccess); + return kIOReturnSuccess; } /* * Client/provider accessors */ -IOService * IOService::getProvider( void ) const +IOService * +IOService::getProvider( void ) const { - IOService * self = (IOService *) this; - IOService * parent; - SInt32 generation; + IOService * self = (IOService *) this; + IOService * parent; + SInt32 generation; - generation = getRegistryEntryGenerationCount(); - if( __providerGeneration == generation) - return( __provider ); + generation = getRegistryEntryGenerationCount(); + if (__providerGeneration == generation) { + return __provider; + } - parent = (IOService *) getParentEntry( gIOServicePlane); - if( parent == IORegistryEntry::getRegistryRoot()) - /* root is not an IOService */ - parent = 0; + parent = (IOService *) getParentEntry( gIOServicePlane); + if (parent == IORegistryEntry::getRegistryRoot()) { + /* root is not an IOService */ + parent = 0; + } - self->__provider = parent; - OSMemoryBarrier(); - // save the count from before call to getParentEntry() - self->__providerGeneration = generation; + self->__provider = parent; + OSMemoryBarrier(); + // save the count from before call to getParentEntry() + self->__providerGeneration = generation; - return( parent ); + return parent; } -IOWorkLoop * IOService::getWorkLoop() const -{ - IOService *provider = getProvider(); +IOWorkLoop * +IOService::getWorkLoop() const +{ + IOService *provider = getProvider(); - if (provider) - return provider->getWorkLoop(); - else - return 0; + if (provider) { + return provider->getWorkLoop(); + } else { + return 0; + } } -OSIterator * IOService::getProviderIterator( void ) const +OSIterator * +IOService::getProviderIterator( void ) const { - return( getParentIterator( gIOServicePlane)); + return getParentIterator( gIOServicePlane); } -IOService * IOService::getClient( void ) const +IOService * +IOService::getClient( void ) const { - return( (IOService *) getChildEntry( gIOServicePlane)); + return (IOService *) getChildEntry( gIOServicePlane); } -OSIterator * IOService::getClientIterator( void ) const +OSIterator * +IOService::getClientIterator( void ) const { - return( getChildIterator( gIOServicePlane)); + return getChildIterator( gIOServicePlane); } -OSIterator * _IOOpenServiceIterator::iterator( OSIterator * _iter, - const IOService * client, - const IOService * provider ) +OSIterator * +_IOOpenServiceIterator::iterator( OSIterator * _iter, + const IOService * client, + const IOService * provider ) { - _IOOpenServiceIterator * inst; + _IOOpenServiceIterator * inst; - if( !_iter) - return( 0 ); + if (!_iter) { + return 0; + } - inst = new _IOOpenServiceIterator; + inst = new _IOOpenServiceIterator; - if( inst && !inst->init()) { - inst->release(); - inst = 0; - } - if( inst) { - inst->iter = _iter; - inst->client = client; - inst->provider = provider; - } + if (inst && !inst->init()) { + inst->release(); + inst = 0; + } + if (inst) { + inst->iter = _iter; + inst->client = client; + inst->provider = provider; + } - return( inst ); + return inst; } -void _IOOpenServiceIterator::free() +void +_IOOpenServiceIterator::free() { - iter->release(); - if( last) - last->unlockForArbitration(); - OSIterator::free(); + iter->release(); + if (last) { + last->unlockForArbitration(); + } + OSIterator::free(); } -OSObject * _IOOpenServiceIterator::getNextObject() +OSObject * +_IOOpenServiceIterator::getNextObject() { - IOService * next; - - if( last) - last->unlockForArbitration(); + IOService * next; - while( (next = (IOService *) iter->getNextObject())) { + if (last) { + last->unlockForArbitration(); + } - next->lockForArbitration(); - if( (client && (next->isOpen( client ))) - || (provider && (provider->isOpen( next ))) ) - break; - next->unlockForArbitration(); - } + while ((next = (IOService *) iter->getNextObject())) { + next->lockForArbitration(); + if ((client && (next->isOpen( client ))) + || (provider && (provider->isOpen( next )))) { + break; + } + next->unlockForArbitration(); + } - last = next; + last = next; - return( next ); + return next; } -bool _IOOpenServiceIterator::isValid() +bool +_IOOpenServiceIterator::isValid() { - return( iter->isValid() ); + return iter->isValid(); } -void _IOOpenServiceIterator::reset() +void +_IOOpenServiceIterator::reset() { - if( last) { - last->unlockForArbitration(); - last = 0; - } - iter->reset(); + if (last) { + last->unlockForArbitration(); + last = 0; + } + iter->reset(); } -OSIterator * IOService::getOpenProviderIterator( void ) const +OSIterator * +IOService::getOpenProviderIterator( void ) const { - return( _IOOpenServiceIterator::iterator( getProviderIterator(), this, 0 )); + return _IOOpenServiceIterator::iterator( getProviderIterator(), this, 0 ); } -OSIterator * IOService::getOpenClientIterator( void ) const +OSIterator * +IOService::getOpenClientIterator( void ) const { - return( _IOOpenServiceIterator::iterator( getClientIterator(), 0, this )); + return _IOOpenServiceIterator::iterator( getClientIterator(), 0, this ); } -IOReturn IOService::callPlatformFunction( const OSSymbol * functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4 ) +IOReturn +IOService::callPlatformFunction( const OSSymbol * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ) { - IOReturn result = kIOReturnUnsupported; - IOService *provider; + IOReturn result = kIOReturnUnsupported; + IOService *provider; - if (gIOPlatformFunctionHandlerSet == functionName) - { + if (gIOPlatformFunctionHandlerSet == functionName) { #if defined(__i386__) || defined(__x86_64__) - const OSSymbol * functionHandlerName = (const OSSymbol *) param1; - IOService * target = (IOService *) param2; - bool enable = (param3 != 0); - - if (sCPULatencyFunctionName[kCpuDelayBusStall] == functionHandlerName) - result = setLatencyHandler(kCpuDelayBusStall, target, enable); - else if (sCPULatencyFunctionName[kCpuDelayInterrupt] == param1) - result = setLatencyHandler(kCpuDelayInterrupt, target, enable); + const OSSymbol * functionHandlerName = (const OSSymbol *) param1; + IOService * target = (IOService *) param2; + bool enable = (param3 != 0); + + if (sCPULatencyFunctionName[kCpuDelayBusStall] == functionHandlerName) { + result = setLatencyHandler(kCpuDelayBusStall, target, enable); + } else if (sCPULatencyFunctionName[kCpuDelayInterrupt] == param1) { + result = setLatencyHandler(kCpuDelayInterrupt, target, enable); + } #endif /* defined(__i386__) || defined(__x86_64__) */ - } + } + + if ((kIOReturnUnsupported == result) && (provider = getProvider())) { + result = provider->callPlatformFunction(functionName, waitForFunction, + param1, param2, param3, param4); + } - if ((kIOReturnUnsupported == result) && (provider = getProvider())) { - result = provider->callPlatformFunction(functionName, waitForFunction, - param1, param2, param3, param4); - } - - return result; + return result; } -IOReturn IOService::callPlatformFunction( const char * functionName, - bool waitForFunction, - void *param1, void *param2, - void *param3, void *param4 ) +IOReturn +IOService::callPlatformFunction( const char * functionName, + bool waitForFunction, + void *param1, void *param2, + void *param3, void *param4 ) { - IOReturn result = kIOReturnNoMemory; - const OSSymbol *functionSymbol = OSSymbol::withCString(functionName); - - if (functionSymbol != 0) { - result = callPlatformFunction(functionSymbol, waitForFunction, - param1, param2, param3, param4); - functionSymbol->release(); - } - - return result; + IOReturn result = kIOReturnNoMemory; + const OSSymbol *functionSymbol = OSSymbol::withCString(functionName); + + if (functionSymbol != 0) { + result = callPlatformFunction(functionSymbol, waitForFunction, + param1, param2, param3, param4); + functionSymbol->release(); + } + + return result; } @@ -1183,468 +1251,459 @@ IOReturn IOService::callPlatformFunction( const char * functionName, * Accessors for global services */ -IOPlatformExpert * IOService::getPlatform( void ) +IOPlatformExpert * +IOService::getPlatform( void ) { - return( gIOPlatform); + return gIOPlatform; } -class IOPMrootDomain * IOService::getPMRootDomain( void ) +class IOPMrootDomain * + IOService::getPMRootDomain( void ) { - return( gIOPMRootDomain); + return gIOPMRootDomain; } -IOService * IOService::getResourceService( void ) +IOService * +IOService::getResourceService( void ) { - return( gIOResources ); + return gIOResources; } -void IOService::setPlatform( IOPlatformExpert * platform) +void +IOService::setPlatform( IOPlatformExpert * platform) { - gIOPlatform = platform; - gIOResources->attachToParent( gIOServiceRoot, gIOServicePlane ); + gIOPlatform = platform; + gIOResources->attachToParent( gIOServiceRoot, gIOServicePlane ); #if defined(__i386__) || defined(__x86_64__) - static const char * keys[kCpuNumDelayTypes] = { - kIOPlatformMaxBusDelay, kIOPlatformMaxInterruptDelay }; - const OSObject * objs[2]; - OSArray * array; - uint32_t idx; - - for (idx = 0; idx < kCpuNumDelayTypes; idx++) - { - objs[0] = sCPULatencySet[idx]; - objs[1] = sCPULatencyHolder[idx]; - array = OSArray::withObjects(objs, 2); - if (!array) break; - platform->setProperty(keys[idx], array); - array->release(); - } + static const char * keys[kCpuNumDelayTypes] = { + kIOPlatformMaxBusDelay, kIOPlatformMaxInterruptDelay }; + const OSObject * objs[2]; + OSArray * array; + uint32_t idx; + + for (idx = 0; idx < kCpuNumDelayTypes; idx++) { + objs[0] = sCPULatencySet[idx]; + objs[1] = sCPULatencyHolder[idx]; + array = OSArray::withObjects(objs, 2); + if (!array) { + break; + } + platform->setProperty(keys[idx], array); + array->release(); + } #endif /* defined(__i386__) || defined(__x86_64__) */ } -void IOService::setPMRootDomain( class IOPMrootDomain * rootDomain) +void +IOService::setPMRootDomain( class IOPMrootDomain * rootDomain) { - gIOPMRootDomain = rootDomain; - publishResource("IOKit"); + gIOPMRootDomain = rootDomain; + publishResource("IOKit"); } /* * Stacking change */ -bool IOService::lockForArbitration( bool isSuccessRequired ) -{ - bool found; - bool success; - ArbitrationLockQueueElement * element; - ArbitrationLockQueueElement * active; - ArbitrationLockQueueElement * waiting; - - enum { kPutOnFreeQueue, kPutOnActiveQueue, kPutOnWaitingQueue } action; - - // lock global access - IOTakeLock( gArbitrationLockQueueLock ); - - // obtain an unused queue element - if( !queue_empty( &gArbitrationLockQueueFree )) { - queue_remove_first( &gArbitrationLockQueueFree, - element, - ArbitrationLockQueueElement *, - link ); - } else { - element = IONew( ArbitrationLockQueueElement, 1 ); - assert( element ); - } - - // prepare the queue element - element->thread = IOThreadSelf(); - element->service = this; - element->count = 1; - element->required = isSuccessRequired; - element->aborted = false; - - // determine whether this object is already locked (ie. on active queue) - found = false; - queue_iterate( &gArbitrationLockQueueActive, - active, - ArbitrationLockQueueElement *, - link ) - { - if( active->service == element->service ) { - found = true; - break; - } - } - - if( found ) { // this object is already locked - - // determine whether it is the same or a different thread trying to lock - if( active->thread != element->thread ) { // it is a different thread - - ArbitrationLockQueueElement * victim = 0; - - // before placing this new thread on the waiting queue, we look for - // a deadlock cycle... - - while( 1 ) { - // determine whether the active thread holding the object we - // want is waiting for another object to be unlocked - found = false; - queue_iterate( &gArbitrationLockQueueWaiting, - waiting, - ArbitrationLockQueueElement *, - link ) - { - if( waiting->thread == active->thread ) { - assert( false == waiting->aborted ); - found = true; - break; - } - } - - if( found ) { // yes, active thread waiting for another object - - // this may be a candidate for rejection if the required - // flag is not set, should we detect a deadlock later on - if( false == waiting->required ) - victim = waiting; - - // find the thread that is holding this other object, that - // is blocking the active thread from proceeding (fun :-) - found = false; - queue_iterate( &gArbitrationLockQueueActive, - active, // (reuse active queue element) - ArbitrationLockQueueElement *, - link ) - { - if( active->service == waiting->service ) { - found = true; - break; - } - } - - // someone must be holding it or it wouldn't be waiting - assert( found ); - - if( active->thread == element->thread ) { - - // doh, it's waiting for the thread that originated - // this whole lock (ie. current thread) -> deadlock - if( false == element->required ) { // willing to fail? - - // the originating thread doesn't have the required - // flag, so it can fail - success = false; // (fail originating lock request) - break; // (out of while) - - } else { // originating thread is not willing to fail - - // see if we came across a waiting thread that did - // not have the 'required' flag set: we'll fail it - if( victim ) { - - // we do have a willing victim, fail it's lock - victim->aborted = true; - - // take the victim off the waiting queue - queue_remove( &gArbitrationLockQueueWaiting, - victim, - ArbitrationLockQueueElement *, - link ); - - // wake the victim - IOLockWakeup( gArbitrationLockQueueLock, - victim, - /* one thread */ true ); - - // allow this thread to proceed (ie. wait) - success = true; // (put request on wait queue) - break; // (out of while) - } else { - - // all the waiting threads we came across in - // finding this loop had the 'required' flag - // set, so we've got a deadlock we can't avoid - panic("I/O Kit: Unrecoverable deadlock."); - } - } - } else { - // repeat while loop, redefining active thread to be the - // thread holding "this other object" (see above), and - // looking for threads waiting on it; note the active - // variable points to "this other object" already... so - // there nothing to do in this else clause. - } - } else { // no, active thread is not waiting for another object - - success = true; // (put request on wait queue) - break; // (out of while) - } - } // while forever - - if( success ) { // put the request on the waiting queue? - kern_return_t wait_result; - - // place this thread on the waiting queue and put it to sleep; - // we place it at the tail of the queue... - queue_enter( &gArbitrationLockQueueWaiting, - element, - ArbitrationLockQueueElement *, - link ); - - // declare that this thread will wait for a given event -restart_sleep: wait_result = assert_wait( element, - element->required ? THREAD_UNINT - : THREAD_INTERRUPTIBLE ); - - // unlock global access - IOUnlock( gArbitrationLockQueueLock ); - - // put thread to sleep, waiting for our event to fire... - if (wait_result == THREAD_WAITING) - wait_result = thread_block(THREAD_CONTINUE_NULL); - - - // ...and we've been woken up; we might be in one of two states: - // (a) we've been aborted and our queue element is not on - // any of the three queues, but is floating around - // (b) we're allowed to proceed with the lock and we have - // already been moved from the waiting queue to the - // active queue. - // ...plus a 3rd state, should the thread have been interrupted: - // (c) we're still on the waiting queue - - // determine whether we were interrupted out of our sleep - if( THREAD_INTERRUPTED == wait_result ) { - - // re-lock global access - IOTakeLock( gArbitrationLockQueueLock ); - - // determine whether we're still on the waiting queue - found = false; - queue_iterate( &gArbitrationLockQueueWaiting, - waiting, // (reuse waiting queue element) - ArbitrationLockQueueElement *, - link ) - { - if( waiting == element ) { - found = true; - break; - } - } - - if( found ) { // yes, we're still on the waiting queue - - // determine whether we're willing to fail - if( false == element->required ) { - - // mark us as aborted - element->aborted = true; - - // take us off the waiting queue - queue_remove( &gArbitrationLockQueueWaiting, - element, - ArbitrationLockQueueElement *, - link ); - } else { // we are not willing to fail - - // ignore interruption, go back to sleep - goto restart_sleep; - } - } - - // unlock global access - IOUnlock( gArbitrationLockQueueLock ); - - // proceed as though this were a normal wake up - wait_result = THREAD_AWAKENED; - } - - assert( THREAD_AWAKENED == wait_result ); - - // determine whether we've been aborted while we were asleep - if( element->aborted ) { - assert( false == element->required ); - - // re-lock global access - IOTakeLock( gArbitrationLockQueueLock ); - - action = kPutOnFreeQueue; - success = false; - } else { // we weren't aborted, so we must be ready to go :-) - - // we've already been moved from waiting to active queue - return true; - } - - } else { // the lock request is to be failed - - // return unused queue element to queue - action = kPutOnFreeQueue; - } - } else { // it is the same thread, recursive access is allowed - - // add one level of recursion - active->count++; - - // return unused queue element to queue - action = kPutOnFreeQueue; - success = true; - } - } else { // this object is not already locked, so let this thread through - action = kPutOnActiveQueue; - success = true; - } - - // put the new element on a queue - if( kPutOnActiveQueue == action ) { - queue_enter( &gArbitrationLockQueueActive, - element, - ArbitrationLockQueueElement *, - link ); - } else if( kPutOnFreeQueue == action ) { - queue_enter( &gArbitrationLockQueueFree, - element, - ArbitrationLockQueueElement *, - link ); - } else { - assert( 0 ); // kPutOnWaitingQueue never occurs, handled specially above - } - - // unlock global access - IOUnlock( gArbitrationLockQueueLock ); - - return( success ); -} - -void IOService::unlockForArbitration( void ) -{ - bool found; - ArbitrationLockQueueElement * element; - - // lock global access - IOTakeLock( gArbitrationLockQueueLock ); - - // find the lock element for this object (ie. on active queue) - found = false; - queue_iterate( &gArbitrationLockQueueActive, - element, - ArbitrationLockQueueElement *, - link ) - { - if( element->service == this ) { - found = true; - break; - } - } - - assert( found ); - - // determine whether the lock has been taken recursively - if( element->count > 1 ) { - // undo one level of recursion - element->count--; - - } else { - - // remove it from the active queue - queue_remove( &gArbitrationLockQueueActive, - element, - ArbitrationLockQueueElement *, - link ); - - // put it on the free queue - queue_enter( &gArbitrationLockQueueFree, - element, - ArbitrationLockQueueElement *, - link ); - - // determine whether a thread is waiting for object (head to tail scan) - found = false; - queue_iterate( &gArbitrationLockQueueWaiting, - element, - ArbitrationLockQueueElement *, - link ) - { - if( element->service == this ) { - found = true; - break; - } - } - - if ( found ) { // we found an interested thread on waiting queue - - // remove it from the waiting queue - queue_remove( &gArbitrationLockQueueWaiting, - element, - ArbitrationLockQueueElement *, - link ); - - // put it on the active queue - queue_enter( &gArbitrationLockQueueActive, - element, - ArbitrationLockQueueElement *, - link ); - - // wake the waiting thread - IOLockWakeup( gArbitrationLockQueueLock, - element, - /* one thread */ true ); - } - } - - // unlock global access - IOUnlock( gArbitrationLockQueueLock ); -} - -uint32_t IOService::isLockedForArbitration(IOService * service) +bool +IOService::lockForArbitration( bool isSuccessRequired ) +{ + bool found; + bool success; + ArbitrationLockQueueElement * element; + ArbitrationLockQueueElement * active; + ArbitrationLockQueueElement * waiting; + + enum { kPutOnFreeQueue, kPutOnActiveQueue, kPutOnWaitingQueue } action; + + // lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + // obtain an unused queue element + if (!queue_empty( &gArbitrationLockQueueFree )) { + queue_remove_first( &gArbitrationLockQueueFree, + element, + ArbitrationLockQueueElement *, + link ); + } else { + element = IONew( ArbitrationLockQueueElement, 1 ); + assert( element ); + } + + // prepare the queue element + element->thread = IOThreadSelf(); + element->service = this; + element->count = 1; + element->required = isSuccessRequired; + element->aborted = false; + + // determine whether this object is already locked (ie. on active queue) + found = false; + queue_iterate( &gArbitrationLockQueueActive, + active, + ArbitrationLockQueueElement *, + link ) + { + if (active->service == element->service) { + found = true; + break; + } + } + + if (found) { // this object is already locked + // determine whether it is the same or a different thread trying to lock + if (active->thread != element->thread) { // it is a different thread + ArbitrationLockQueueElement * victim = 0; + + // before placing this new thread on the waiting queue, we look for + // a deadlock cycle... + + while (1) { + // determine whether the active thread holding the object we + // want is waiting for another object to be unlocked + found = false; + queue_iterate( &gArbitrationLockQueueWaiting, + waiting, + ArbitrationLockQueueElement *, + link ) + { + if (waiting->thread == active->thread) { + assert( false == waiting->aborted ); + found = true; + break; + } + } + + if (found) { // yes, active thread waiting for another object + // this may be a candidate for rejection if the required + // flag is not set, should we detect a deadlock later on + if (false == waiting->required) { + victim = waiting; + } + + // find the thread that is holding this other object, that + // is blocking the active thread from proceeding (fun :-) + found = false; + queue_iterate( &gArbitrationLockQueueActive, + active, // (reuse active queue element) + ArbitrationLockQueueElement *, + link ) + { + if (active->service == waiting->service) { + found = true; + break; + } + } + + // someone must be holding it or it wouldn't be waiting + assert( found ); + + if (active->thread == element->thread) { + // doh, it's waiting for the thread that originated + // this whole lock (ie. current thread) -> deadlock + if (false == element->required) { // willing to fail? + // the originating thread doesn't have the required + // flag, so it can fail + success = false; // (fail originating lock request) + break; // (out of while) + } else { // originating thread is not willing to fail + // see if we came across a waiting thread that did + // not have the 'required' flag set: we'll fail it + if (victim) { + // we do have a willing victim, fail it's lock + victim->aborted = true; + + // take the victim off the waiting queue + queue_remove( &gArbitrationLockQueueWaiting, + victim, + ArbitrationLockQueueElement *, + link ); + + // wake the victim + IOLockWakeup( gArbitrationLockQueueLock, + victim, + /* one thread */ true ); + + // allow this thread to proceed (ie. wait) + success = true; // (put request on wait queue) + break; // (out of while) + } else { + // all the waiting threads we came across in + // finding this loop had the 'required' flag + // set, so we've got a deadlock we can't avoid + panic("I/O Kit: Unrecoverable deadlock."); + } + } + } else { + // repeat while loop, redefining active thread to be the + // thread holding "this other object" (see above), and + // looking for threads waiting on it; note the active + // variable points to "this other object" already... so + // there nothing to do in this else clause. + } + } else { // no, active thread is not waiting for another object + success = true; // (put request on wait queue) + break; // (out of while) + } + } // while forever + + if (success) { // put the request on the waiting queue? + kern_return_t wait_result; + + // place this thread on the waiting queue and put it to sleep; + // we place it at the tail of the queue... + queue_enter( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ); + + // declare that this thread will wait for a given event +restart_sleep: wait_result = assert_wait( element, + element->required ? THREAD_UNINT + : THREAD_INTERRUPTIBLE ); + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); + + // put thread to sleep, waiting for our event to fire... + if (wait_result == THREAD_WAITING) { + wait_result = thread_block(THREAD_CONTINUE_NULL); + } + + + // ...and we've been woken up; we might be in one of two states: + // (a) we've been aborted and our queue element is not on + // any of the three queues, but is floating around + // (b) we're allowed to proceed with the lock and we have + // already been moved from the waiting queue to the + // active queue. + // ...plus a 3rd state, should the thread have been interrupted: + // (c) we're still on the waiting queue + + // determine whether we were interrupted out of our sleep + if (THREAD_INTERRUPTED == wait_result) { + // re-lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + // determine whether we're still on the waiting queue + found = false; + queue_iterate( &gArbitrationLockQueueWaiting, + waiting, // (reuse waiting queue element) + ArbitrationLockQueueElement *, + link ) + { + if (waiting == element) { + found = true; + break; + } + } + + if (found) { // yes, we're still on the waiting queue + // determine whether we're willing to fail + if (false == element->required) { + // mark us as aborted + element->aborted = true; + + // take us off the waiting queue + queue_remove( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ); + } else { // we are not willing to fail + // ignore interruption, go back to sleep + goto restart_sleep; + } + } + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); + + // proceed as though this were a normal wake up + wait_result = THREAD_AWAKENED; + } + + assert( THREAD_AWAKENED == wait_result ); + + // determine whether we've been aborted while we were asleep + if (element->aborted) { + assert( false == element->required ); + + // re-lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + action = kPutOnFreeQueue; + success = false; + } else { // we weren't aborted, so we must be ready to go :-) + // we've already been moved from waiting to active queue + return true; + } + } else { // the lock request is to be failed + // return unused queue element to queue + action = kPutOnFreeQueue; + } + } else { // it is the same thread, recursive access is allowed + // add one level of recursion + active->count++; + + // return unused queue element to queue + action = kPutOnFreeQueue; + success = true; + } + } else { // this object is not already locked, so let this thread through + action = kPutOnActiveQueue; + success = true; + } + + // put the new element on a queue + if (kPutOnActiveQueue == action) { + queue_enter( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ); + } else if (kPutOnFreeQueue == action) { + queue_enter( &gArbitrationLockQueueFree, + element, + ArbitrationLockQueueElement *, + link ); + } else { + assert( 0 ); // kPutOnWaitingQueue never occurs, handled specially above + } + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); + + return success; +} + +void +IOService::unlockForArbitration( void ) +{ + bool found; + ArbitrationLockQueueElement * element; + + // lock global access + IOTakeLock( gArbitrationLockQueueLock ); + + // find the lock element for this object (ie. on active queue) + found = false; + queue_iterate( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ) + { + if (element->service == this) { + found = true; + break; + } + } + + assert( found ); + + // determine whether the lock has been taken recursively + if (element->count > 1) { + // undo one level of recursion + element->count--; + } else { + // remove it from the active queue + queue_remove( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ); + + // put it on the free queue + queue_enter( &gArbitrationLockQueueFree, + element, + ArbitrationLockQueueElement *, + link ); + + // determine whether a thread is waiting for object (head to tail scan) + found = false; + queue_iterate( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ) + { + if (element->service == this) { + found = true; + break; + } + } + + if (found) { // we found an interested thread on waiting queue + // remove it from the waiting queue + queue_remove( &gArbitrationLockQueueWaiting, + element, + ArbitrationLockQueueElement *, + link ); + + // put it on the active queue + queue_enter( &gArbitrationLockQueueActive, + element, + ArbitrationLockQueueElement *, + link ); + + // wake the waiting thread + IOLockWakeup( gArbitrationLockQueueLock, + element, + /* one thread */ true ); + } + } + + // unlock global access + IOUnlock( gArbitrationLockQueueLock ); +} + +uint32_t +IOService::isLockedForArbitration(IOService * service) { #if DEBUG_NOTIFIER_LOCKED - uint32_t count; - ArbitrationLockQueueElement * active; - - // lock global access - IOLockLock(gArbitrationLockQueueLock); - - // determine whether this object is already locked (ie. on active queue) - count = 0; - queue_iterate(&gArbitrationLockQueueActive, - active, - ArbitrationLockQueueElement *, - link) - { - if ((active->thread == IOThreadSelf()) - && (!service || (active->service == service))) - { - count += 0x10000; - count += active->count; - } - } - - IOLockUnlock(gArbitrationLockQueueLock); - - return (count); + uint32_t count; + ArbitrationLockQueueElement * active; + + // lock global access + IOLockLock(gArbitrationLockQueueLock); + + // determine whether this object is already locked (ie. on active queue) + count = 0; + queue_iterate(&gArbitrationLockQueueActive, + active, + ArbitrationLockQueueElement *, + link) + { + if ((active->thread == IOThreadSelf()) + && (!service || (active->service == service))) { + count += 0x10000; + count += active->count; + } + } + + IOLockUnlock(gArbitrationLockQueueLock); + + return count; #else /* DEBUG_NOTIFIER_LOCKED */ - return (0); + return 0; #endif /* DEBUG_NOTIFIER_LOCKED */ } -void IOService::applyToProviders( IOServiceApplierFunction applier, - void * context ) +void +IOService::applyToProviders( IOServiceApplierFunction applier, + void * context ) { - applyToParents( (IORegistryEntryApplierFunction) applier, - context, gIOServicePlane ); + applyToParents((IORegistryEntryApplierFunction) applier, + context, gIOServicePlane ); } -void IOService::applyToClients( IOServiceApplierFunction applier, - void * context ) +void +IOService::applyToClients( IOServiceApplierFunction applier, + void * context ) { - applyToChildren( (IORegistryEntryApplierFunction) applier, - context, gIOServicePlane ); + applyToChildren((IORegistryEntryApplierFunction) applier, + context, gIOServicePlane ); } @@ -1654,293 +1713,314 @@ void IOService::applyToClients( IOServiceApplierFunction applier, // send a message to a client or interested party of this service -IOReturn IOService::messageClient( UInt32 type, OSObject * client, - void * argument, vm_size_t argSize ) -{ - IOReturn ret; - IOService * service; - _IOServiceInterestNotifier * notify; - - if( (service = OSDynamicCast( IOService, client))) - ret = service->message( type, this, argument ); - - else if( (notify = OSDynamicCast( _IOServiceInterestNotifier, client))) { - - _IOServiceNotifierInvocation invocation; - bool willNotify; - - invocation.thread = current_thread(); - - LOCKWRITENOTIFY(); - willNotify = (0 != (kIOServiceNotifyEnable & notify->state)); - - if( willNotify) { - queue_enter( ¬ify->handlerInvocations, &invocation, - _IOServiceNotifierInvocation *, link ); - } - UNLOCKNOTIFY(); - - if( willNotify) { - - ret = (*notify->handler)( notify->target, notify->ref, - type, this, argument, argSize ); - - LOCKWRITENOTIFY(); - queue_remove( ¬ify->handlerInvocations, &invocation, - _IOServiceNotifierInvocation *, link ); - if( kIOServiceNotifyWaiter & notify->state) { - notify->state &= ~kIOServiceNotifyWaiter; - WAKEUPNOTIFY( notify ); - } - UNLOCKNOTIFY(); - - } else - ret = kIOReturnSuccess; - - } else - ret = kIOReturnBadArgument; - - return( ret ); +IOReturn +IOService::messageClient( UInt32 type, OSObject * client, + void * argument, vm_size_t argSize ) +{ + IOReturn ret; + IOService * service; + _IOServiceInterestNotifier * notify; + + if ((service = OSDynamicCast( IOService, client))) { + ret = service->message( type, this, argument ); + } else if ((notify = OSDynamicCast( _IOServiceInterestNotifier, client))) { + _IOServiceNotifierInvocation invocation; + bool willNotify; + + invocation.thread = current_thread(); + + LOCKWRITENOTIFY(); + willNotify = (0 != (kIOServiceNotifyEnable & notify->state)); + + if (willNotify) { + queue_enter( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + } + UNLOCKNOTIFY(); + + if (willNotify) { + ret = (*notify->handler)( notify->target, notify->ref, + type, this, argument, argSize ); + + LOCKWRITENOTIFY(); + queue_remove( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + if (kIOServiceNotifyWaiter & notify->state) { + notify->state &= ~kIOServiceNotifyWaiter; + WAKEUPNOTIFY( notify ); + } + UNLOCKNOTIFY(); + } else { + ret = kIOReturnSuccess; + } + } else { + ret = kIOReturnBadArgument; + } + + return ret; } static void applyToInterestNotifiers(const IORegistryEntry *target, - const OSSymbol * typeOfInterest, - OSObjectApplierFunction applier, - void * context ) + const OSSymbol * typeOfInterest, + OSObjectApplierFunction applier, + void * context ) { - OSArray * copyArray = 0; - OSObject * prop; + OSArray * copyArray = 0; + OSObject * prop; - LOCKREADNOTIFY(); + LOCKREADNOTIFY(); - prop = target->copyProperty(typeOfInterest); - IOCommand *notifyList = OSDynamicCast(IOCommand, prop); + prop = target->copyProperty(typeOfInterest); + IOCommand *notifyList = OSDynamicCast(IOCommand, prop); - if( notifyList) { - copyArray = OSArray::withCapacity(1); + if (notifyList) { + copyArray = OSArray::withCapacity(1); - // iterate over queue, entry is set to each element in the list - iterqueue(¬ifyList->fCommandChain, entry) { - _IOServiceInterestNotifier * notify; + // iterate over queue, entry is set to each element in the list + iterqueue(¬ifyList->fCommandChain, entry) { + _IOServiceInterestNotifier * notify; - queue_element(entry, notify, _IOServiceInterestNotifier *, chain); - copyArray->setObject(notify); + queue_element(entry, notify, _IOServiceInterestNotifier *, chain); + copyArray->setObject(notify); + } } - } - UNLOCKNOTIFY(); + UNLOCKNOTIFY(); - if( copyArray) { - unsigned int index; - OSObject * next; + if (copyArray) { + unsigned int index; + OSObject * next; - for( index = 0; (next = copyArray->getObject( index )); index++) - (*applier)(next, context); - copyArray->release(); - } + for (index = 0; (next = copyArray->getObject( index )); index++) { + (*applier)(next, context); + } + copyArray->release(); + } - OSSafeReleaseNULL(prop); + OSSafeReleaseNULL(prop); } -void IOService::applyToInterested( const OSSymbol * typeOfInterest, - OSObjectApplierFunction applier, - void * context ) +void +IOService::applyToInterested( const OSSymbol * typeOfInterest, + OSObjectApplierFunction applier, + void * context ) { - if (gIOGeneralInterest == typeOfInterest) - applyToClients( (IOServiceApplierFunction) applier, context ); - applyToInterestNotifiers(this, typeOfInterest, applier, context); + if (gIOGeneralInterest == typeOfInterest) { + applyToClients((IOServiceApplierFunction) applier, context ); + } + applyToInterestNotifiers(this, typeOfInterest, applier, context); } struct MessageClientsContext { - IOService * service; - UInt32 type; - void * argument; - vm_size_t argSize; - IOReturn ret; + IOService * service; + UInt32 type; + void * argument; + vm_size_t argSize; + IOReturn ret; }; -static void messageClientsApplier( OSObject * object, void * ctx ) +static void +messageClientsApplier( OSObject * object, void * ctx ) { - IOReturn ret; - MessageClientsContext * context = (MessageClientsContext *) ctx; + IOReturn ret; + MessageClientsContext * context = (MessageClientsContext *) ctx; - ret = context->service->messageClient( context->type, - object, context->argument, context->argSize ); - if( kIOReturnSuccess != ret) - context->ret = ret; + ret = context->service->messageClient( context->type, + object, context->argument, context->argSize ); + if (kIOReturnSuccess != ret) { + context->ret = ret; + } } // send a message to all clients -IOReturn IOService::messageClients( UInt32 type, - void * argument, vm_size_t argSize ) +IOReturn +IOService::messageClients( UInt32 type, + void * argument, vm_size_t argSize ) { - MessageClientsContext context; + MessageClientsContext context; - context.service = this; - context.type = type; - context.argument = argument; - context.argSize = argSize; - context.ret = kIOReturnSuccess; + context.service = this; + context.type = type; + context.argument = argument; + context.argSize = argSize; + context.ret = kIOReturnSuccess; - applyToInterested( gIOGeneralInterest, - &messageClientsApplier, &context ); + applyToInterested( gIOGeneralInterest, + &messageClientsApplier, &context ); - return( context.ret ); + return context.ret; } -IOReturn IOService::acknowledgeNotification( IONotificationRef notification, - IOOptionBits response ) +IOReturn +IOService::acknowledgeNotification( IONotificationRef notification, + IOOptionBits response ) { - return( kIOReturnUnsupported ); + return kIOReturnUnsupported; } -IONotifier * IOService::registerInterest( const OSSymbol * typeOfInterest, - IOServiceInterestHandler handler, void * target, void * ref ) +IONotifier * +IOService::registerInterest( const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, void * target, void * ref ) { - _IOServiceInterestNotifier * notify = 0; - IOReturn rc = kIOReturnError; + _IOServiceInterestNotifier * notify = 0; + IOReturn rc = kIOReturnError; - notify = new _IOServiceInterestNotifier; - if (!notify) return NULL; + notify = new _IOServiceInterestNotifier; + if (!notify) { + return NULL; + } - if(notify->init()) { - rc = registerInterestForNotifier(notify, typeOfInterest, - handler, target, ref); - } + if (notify->init()) { + rc = registerInterestForNotifier(notify, typeOfInterest, + handler, target, ref); + } - if (rc != kIOReturnSuccess) { - notify->release(); - notify = 0; - } + if (rc != kIOReturnSuccess) { + notify->release(); + notify = 0; + } - return( notify ); + return notify; } static IOReturn IOServiceInterestHandlerToBlock( void * target __unused, void * refCon, - UInt32 messageType, IOService * provider, - void * messageArgument, vm_size_t argSize ) + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ) { - return ((IOServiceInterestHandlerBlock) refCon)(messageType, provider, messageArgument, argSize); + return ((IOServiceInterestHandlerBlock) refCon)(messageType, provider, messageArgument, argSize); } -IONotifier * IOService::registerInterest(const OSSymbol * typeOfInterest, - IOServiceInterestHandlerBlock handler) +IONotifier * +IOService::registerInterest(const OSSymbol * typeOfInterest, + IOServiceInterestHandlerBlock handler) { - IONotifier * notify; - void * block; + IONotifier * notify; + void * block; - block = Block_copy(handler); - if (!block) return (NULL); + block = Block_copy(handler); + if (!block) { + return NULL; + } - notify = registerInterest(typeOfInterest, &IOServiceInterestHandlerToBlock, NULL, block); + notify = registerInterest(typeOfInterest, &IOServiceInterestHandlerToBlock, NULL, block); - if (!notify) Block_release(block); + if (!notify) { + Block_release(block); + } - return (notify); + return notify; } -IOReturn IOService::registerInterestForNotifier( IONotifier *svcNotify, const OSSymbol * typeOfInterest, - IOServiceInterestHandler handler, void * target, void * ref ) +IOReturn +IOService::registerInterestForNotifier( IONotifier *svcNotify, const OSSymbol * typeOfInterest, + IOServiceInterestHandler handler, void * target, void * ref ) { - IOReturn rc = kIOReturnSuccess; - _IOServiceInterestNotifier *notify = 0; - - if (!svcNotify || !(notify = OSDynamicCast(_IOServiceInterestNotifier, svcNotify))) - return( kIOReturnBadArgument ); - - notify->handler = handler; - notify->target = target; - notify->ref = ref; - - if( (typeOfInterest != gIOGeneralInterest) - && (typeOfInterest != gIOBusyInterest) - && (typeOfInterest != gIOAppPowerStateInterest) - && (typeOfInterest != gIOConsoleSecurityInterest) - && (typeOfInterest != gIOPriorityPowerStateInterest)) - return( kIOReturnBadArgument ); + IOReturn rc = kIOReturnSuccess; + _IOServiceInterestNotifier *notify = 0; - lockForArbitration(); - if( 0 == (__state[0] & kIOServiceInactiveState)) { - - notify->state = kIOServiceNotifyEnable; - - ////// queue + if (!svcNotify || !(notify = OSDynamicCast(_IOServiceInterestNotifier, svcNotify))) { + return kIOReturnBadArgument; + } - LOCKWRITENOTIFY(); + notify->handler = handler; + notify->target = target; + notify->ref = ref; + + if ((typeOfInterest != gIOGeneralInterest) + && (typeOfInterest != gIOBusyInterest) + && (typeOfInterest != gIOAppPowerStateInterest) + && (typeOfInterest != gIOConsoleSecurityInterest) + && (typeOfInterest != gIOPriorityPowerStateInterest)) { + return kIOReturnBadArgument; + } - // Get the head of the notifier linked list - IOCommand * notifyList; - OSObject * obj = copyProperty( typeOfInterest ); - if (!(notifyList = OSDynamicCast(IOCommand, obj))) { - notifyList = OSTypeAlloc(IOCommand); - if (notifyList) { - notifyList->init(); - bool ok = setProperty( typeOfInterest, notifyList); - notifyList->release(); - if (!ok) notifyList = 0; - } - } - if (obj) obj->release(); + lockForArbitration(); + if (0 == (__state[0] & kIOServiceInactiveState)) { + notify->state = kIOServiceNotifyEnable; + + ////// queue + + LOCKWRITENOTIFY(); + + // Get the head of the notifier linked list + IOCommand * notifyList; + OSObject * obj = copyProperty( typeOfInterest ); + if (!(notifyList = OSDynamicCast(IOCommand, obj))) { + notifyList = OSTypeAlloc(IOCommand); + if (notifyList) { + notifyList->init(); + bool ok = setProperty( typeOfInterest, notifyList); + notifyList->release(); + if (!ok) { + notifyList = 0; + } + } + } + if (obj) { + obj->release(); + } - if (notifyList) { - enqueue(¬ifyList->fCommandChain, ¬ify->chain); - notify->retain(); // ref'ed while in list - } + if (notifyList) { + enqueue(¬ifyList->fCommandChain, ¬ify->chain); + notify->retain(); // ref'ed while in list + } - UNLOCKNOTIFY(); - } - else { - rc = kIOReturnNotReady; - } - unlockForArbitration(); + UNLOCKNOTIFY(); + } else { + rc = kIOReturnNotReady; + } + unlockForArbitration(); - return rc; + return rc; } -static void cleanInterestList( OSObject * head ) +static void +cleanInterestList( OSObject * head ) { - IOCommand *notifyHead = OSDynamicCast(IOCommand, head); - if (!notifyHead) - return; + IOCommand *notifyHead = OSDynamicCast(IOCommand, head); + if (!notifyHead) { + return; + } - LOCKWRITENOTIFY(); - while ( queue_entry_t entry = dequeue(¬ifyHead->fCommandChain) ) { - queue_next(entry) = queue_prev(entry) = 0; + LOCKWRITENOTIFY(); + while (queue_entry_t entry = dequeue(¬ifyHead->fCommandChain)) { + queue_next(entry) = queue_prev(entry) = 0; - _IOServiceInterestNotifier * notify; + _IOServiceInterestNotifier * notify; - queue_element(entry, notify, _IOServiceInterestNotifier *, chain); - notify->release(); - } - UNLOCKNOTIFY(); + queue_element(entry, notify, _IOServiceInterestNotifier *, chain); + notify->release(); + } + UNLOCKNOTIFY(); } -void IOService::unregisterAllInterest( void ) +void +IOService::unregisterAllInterest( void ) { - OSObject * prop; + OSObject * prop; - prop = copyProperty(gIOGeneralInterest); - cleanInterestList(prop); - OSSafeReleaseNULL(prop); + prop = copyProperty(gIOGeneralInterest); + cleanInterestList(prop); + OSSafeReleaseNULL(prop); - prop = copyProperty(gIOBusyInterest); - cleanInterestList(prop); - OSSafeReleaseNULL(prop); + prop = copyProperty(gIOBusyInterest); + cleanInterestList(prop); + OSSafeReleaseNULL(prop); - prop = copyProperty(gIOAppPowerStateInterest); - cleanInterestList(prop); - OSSafeReleaseNULL(prop); + prop = copyProperty(gIOAppPowerStateInterest); + cleanInterestList(prop); + OSSafeReleaseNULL(prop); - prop = copyProperty(gIOPriorityPowerStateInterest); - cleanInterestList(prop); - OSSafeReleaseNULL(prop); + prop = copyProperty(gIOPriorityPowerStateInterest); + cleanInterestList(prop); + OSSafeReleaseNULL(prop); - prop = copyProperty(gIOConsoleSecurityInterest); - cleanInterestList(prop); - OSSafeReleaseNULL(prop); + prop = copyProperty(gIOConsoleSecurityInterest); + cleanInterestList(prop); + OSSafeReleaseNULL(prop); } /* @@ -1950,86 +2030,95 @@ void IOService::unregisterAllInterest( void ) // wait for all threads, other than the current one, // to exit the handler -void _IOServiceInterestNotifier::wait() +void +_IOServiceInterestNotifier::wait() { - _IOServiceNotifierInvocation * next; - bool doWait; + _IOServiceNotifierInvocation * next; + bool doWait; - do { - doWait = false; - queue_iterate( &handlerInvocations, next, - _IOServiceNotifierInvocation *, link) { - if( next->thread != current_thread() ) { - doWait = true; - break; - } - } - if( doWait) { - state |= kIOServiceNotifyWaiter; - SLEEPNOTIFY(this); - } - - } while( doWait ); + do { + doWait = false; + queue_iterate( &handlerInvocations, next, + _IOServiceNotifierInvocation *, link) { + if (next->thread != current_thread()) { + doWait = true; + break; + } + } + if (doWait) { + state |= kIOServiceNotifyWaiter; + SLEEPNOTIFY(this); + } + } while (doWait); } -void _IOServiceInterestNotifier::free() +void +_IOServiceInterestNotifier::free() { - assert( queue_empty( &handlerInvocations )); + assert( queue_empty( &handlerInvocations )); - if (handler == &IOServiceInterestHandlerToBlock) Block_release(ref); + if (handler == &IOServiceInterestHandlerToBlock) { + Block_release(ref); + } - OSObject::free(); + OSObject::free(); } -void _IOServiceInterestNotifier::remove() +void +_IOServiceInterestNotifier::remove() { - LOCKWRITENOTIFY(); + LOCKWRITENOTIFY(); - if( queue_next( &chain )) { - remqueue(&chain); - queue_next( &chain) = queue_prev( &chain) = 0; - release(); - } + if (queue_next( &chain )) { + remqueue(&chain); + queue_next( &chain) = queue_prev( &chain) = 0; + release(); + } + + state &= ~kIOServiceNotifyEnable; - state &= ~kIOServiceNotifyEnable; + wait(); - wait(); + UNLOCKNOTIFY(); - UNLOCKNOTIFY(); - - release(); + release(); } -bool _IOServiceInterestNotifier::disable() +bool +_IOServiceInterestNotifier::disable() { - bool ret; + bool ret; - LOCKWRITENOTIFY(); + LOCKWRITENOTIFY(); - ret = (0 != (kIOServiceNotifyEnable & state)); - state &= ~kIOServiceNotifyEnable; - if( ret) - wait(); + ret = (0 != (kIOServiceNotifyEnable & state)); + state &= ~kIOServiceNotifyEnable; + if (ret) { + wait(); + } - UNLOCKNOTIFY(); + UNLOCKNOTIFY(); - return( ret ); + return ret; } -void _IOServiceInterestNotifier::enable( bool was ) +void +_IOServiceInterestNotifier::enable( bool was ) { - LOCKWRITENOTIFY(); - if( was) - state |= kIOServiceNotifyEnable; - else - state &= ~kIOServiceNotifyEnable; - UNLOCKNOTIFY(); + LOCKWRITENOTIFY(); + if (was) { + state |= kIOServiceNotifyEnable; + } else { + state &= ~kIOServiceNotifyEnable; + } + UNLOCKNOTIFY(); } -bool _IOServiceInterestNotifier::init() +bool +_IOServiceInterestNotifier::init() { - queue_init( &handlerInvocations ); - return (OSObject::init()); + queue_init( &handlerInvocations ); + return OSObject::init(); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -2037,864 +2126,878 @@ bool _IOServiceInterestNotifier::init() * Termination */ -#define tailQ(o) setObject(o) -#define headQ(o) setObject(0, o) -#define TLOG(fmt, args...) { if(kIOLogYield & gIOKitDebug) { IOLog("[%llx] ", thread_tid(current_thread())); IOLog(fmt, ## args); }} - -static void _workLoopAction( IOWorkLoop::Action action, - IOService * service, - void * p0 = 0, void * p1 = 0, - void * p2 = 0, void * p3 = 0 ) -{ - IOWorkLoop * wl; +#define tailQ(o) setObject(o) +#define headQ(o) setObject(0, o) +#define TLOG(fmt, args...) { if(kIOLogYield & gIOKitDebug) { IOLog("[%llx] ", thread_tid(current_thread())); IOLog(fmt, ## args); }} - if( (wl = service->getWorkLoop())) { - wl->retain(); - wl->runAction( action, service, p0, p1, p2, p3 ); - wl->release(); - } else - (*action)( service, p0, p1, p2, p3 ); +static void +_workLoopAction( IOWorkLoop::Action action, + IOService * service, + void * p0 = 0, void * p1 = 0, + void * p2 = 0, void * p3 = 0 ) +{ + IOWorkLoop * wl; + + if ((wl = service->getWorkLoop())) { + wl->retain(); + wl->runAction( action, service, p0, p1, p2, p3 ); + wl->release(); + } else { + (*action)( service, p0, p1, p2, p3 ); + } } -bool IOService::requestTerminate( IOService * provider, IOOptionBits options ) +bool +IOService::requestTerminate( IOService * provider, IOOptionBits options ) { - bool ok; + bool ok; - // if its our only provider - ok = isParent( provider, gIOServicePlane, true); + // if its our only provider + ok = isParent( provider, gIOServicePlane, true); - // -- compat - if( ok) { - provider->terminateClient( this, options | kIOServiceRecursing ); - ok = (0 != (kIOServiceInactiveState & __state[0])); - } - // -- + // -- compat + if (ok) { + provider->terminateClient( this, options | kIOServiceRecursing ); + ok = (0 != (kIOServiceInactiveState & __state[0])); + } + // -- - return( ok ); + return ok; } -bool IOService::terminatePhase1( IOOptionBits options ) +bool +IOService::terminatePhase1( IOOptionBits options ) { - IOService * victim; - IOService * client; - OSIterator * iter; - OSArray * makeInactive; - OSArray * waitingInactive; - int waitResult = THREAD_AWAKENED; - bool wait; - bool ok; - bool didInactive; - bool startPhase2 = false; + IOService * victim; + IOService * client; + OSIterator * iter; + OSArray * makeInactive; + OSArray * waitingInactive; + int waitResult = THREAD_AWAKENED; + bool wait; + bool ok; + bool didInactive; + bool startPhase2 = false; - TLOG("%s[0x%qx]::terminatePhase1(%08llx)\n", getName(), getRegistryEntryID(), (long long)options); + TLOG("%s[0x%qx]::terminatePhase1(%08llx)\n", getName(), getRegistryEntryID(), (long long)options); - uint64_t regID = getRegistryEntryID(); - IOServiceTrace( - IOSERVICE_TERMINATE_PHASE1, - (uintptr_t) regID, - (uintptr_t) (regID >> 32), - (uintptr_t) this, - (uintptr_t) options); - - // -- compat - if( options & kIOServiceRecursing) { - lockForArbitration(); - if (0 == (kIOServiceInactiveState & __state[0])) - { - __state[0] |= kIOServiceInactiveState; - __state[1] |= kIOServiceRecursing | kIOServiceTermPhase1State; - } - unlockForArbitration(); - - return( true ); - } - // -- - - makeInactive = OSArray::withCapacity( 16 ); - waitingInactive = OSArray::withCapacity( 16 ); - if(!makeInactive || !waitingInactive) return( false ); - - victim = this; - victim->retain(); - - while( victim ) - { - didInactive = victim->lockForArbitration( true ); - if( didInactive) - { - uint64_t regID1 = victim->getRegistryEntryID(); - IOServiceTrace(IOSERVICE_TERM_SET_INACTIVE, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) victim->__state[1], - (uintptr_t) 0); + uint64_t regID = getRegistryEntryID(); + IOServiceTrace( + IOSERVICE_TERMINATE_PHASE1, + (uintptr_t) regID, + (uintptr_t) (regID >> 32), + (uintptr_t) this, + (uintptr_t) options); + + // -- compat + if (options & kIOServiceRecursing) { + lockForArbitration(); + if (0 == (kIOServiceInactiveState & __state[0])) { + __state[0] |= kIOServiceInactiveState; + __state[1] |= kIOServiceRecursing | kIOServiceTermPhase1State; + } + unlockForArbitration(); - enum { kRP1 = kIOServiceRecursing | kIOServiceTermPhase1State }; - didInactive = (kRP1 == (victim->__state[1] & kRP1)) - || (0 == (victim->__state[0] & kIOServiceInactiveState)); + return true; + } + // -- - if (!didInactive) - { - // a multiply attached IOService can be visited twice - if (-1U == waitingInactive->getNextIndexOfObject(victim, 0)) do - { - IOLockLock(gIOServiceBusyLock); - wait = (victim->__state[1] & kIOServiceTermPhase1State); - if( wait) { - TLOG("%s[0x%qx]::waitPhase1(%s[0x%qx])\n", - getName(), getRegistryEntryID(), victim->getName(), victim->getRegistryEntryID()); - victim->__state[1] |= kIOServiceTerm1WaiterState; - victim->unlockForArbitration(); - assert_wait((event_t)&victim->__state[1], THREAD_UNINT); - } - IOLockUnlock(gIOServiceBusyLock); - if( wait) { - waitResult = thread_block(THREAD_CONTINUE_NULL); - TLOG("%s[0x%qx]::did waitPhase1(%s[0x%qx])\n", - getName(), getRegistryEntryID(), victim->getName(), victim->getRegistryEntryID()); - victim->lockForArbitration(); - } - } - while (wait && (waitResult != THREAD_TIMED_OUT)); - } - else - { - victim->__state[0] |= kIOServiceInactiveState; - victim->__state[0] &= ~(kIOServiceRegisteredState | kIOServiceMatchedState - | kIOServiceFirstPublishState | kIOServiceFirstMatchState); - victim->__state[1] &= ~kIOServiceRecursing; - victim->__state[1] |= kIOServiceTermPhase1State; - waitingInactive->headQ(victim); - if (victim == this) - { - if (kIOServiceTerminateNeedWillTerminate & options) - { - victim->__state[1] |= kIOServiceNeedWillTerminate; - } - } - victim->_adjustBusy( 1 ); - } - victim->unlockForArbitration(); - } - if( victim == this) startPhase2 = didInactive; - if (didInactive) - { - OSArray * notifiers; - notifiers = victim->copyNotifiers(gIOTerminatedNotification, 0, 0xffffffff); - victim->invokeNotifiers(¬ifiers); - - IOUserClient::destroyUserReferences( victim ); - - iter = victim->getClientIterator(); - if( iter) { - while( (client = (IOService *) iter->getNextObject())) { - TLOG("%s[0x%qx]::requestTerminate(%s[0x%qx], %08llx)\n", - client->getName(), client->getRegistryEntryID(), - victim->getName(), victim->getRegistryEntryID(), (long long)options); - ok = client->requestTerminate( victim, options ); - TLOG("%s[0x%qx]::requestTerminate(%s[0x%qx], ok = %d)\n", - client->getName(), client->getRegistryEntryID(), - victim->getName(), victim->getRegistryEntryID(), ok); - - uint64_t regID1 = client->getRegistryEntryID(); - uint64_t regID2 = victim->getRegistryEntryID(); - IOServiceTrace( - (ok ? IOSERVICE_TERMINATE_REQUEST_OK - : IOSERVICE_TERMINATE_REQUEST_FAIL), - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); - - if( ok) - makeInactive->setObject( client ); - } - iter->release(); - } - } - victim->release(); - victim = (IOService *) makeInactive->getObject(0); - if( victim) { - victim->retain(); - makeInactive->removeObject(0); - } - } - makeInactive->release(); - - while ((victim = (IOService *) waitingInactive->getObject(0))) - { + makeInactive = OSArray::withCapacity( 16 ); + waitingInactive = OSArray::withCapacity( 16 ); + if (!makeInactive || !waitingInactive) { + return false; + } + + victim = this; victim->retain(); - waitingInactive->removeObject(0); - victim->lockForArbitration(); - victim->__state[1] &= ~kIOServiceTermPhase1State; - if (kIOServiceTerm1WaiterState & victim->__state[1]) - { - victim->__state[1] &= ~kIOServiceTerm1WaiterState; - TLOG("%s[0x%qx]::wakePhase1\n", victim->getName(), victim->getRegistryEntryID()); - IOLockLock( gIOServiceBusyLock ); - thread_wakeup( (event_t) &victim->__state[1]); - IOLockUnlock( gIOServiceBusyLock ); - } - victim->unlockForArbitration(); - victim->release(); - } - waitingInactive->release(); - - if( startPhase2) - { - retain(); - lockForArbitration(); - scheduleTerminatePhase2(options); - unlockForArbitration(); - release(); - } + while (victim) { + didInactive = victim->lockForArbitration( true ); + if (didInactive) { + uint64_t regID1 = victim->getRegistryEntryID(); + IOServiceTrace(IOSERVICE_TERM_SET_INACTIVE, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) victim->__state[1], + (uintptr_t) 0); + + enum { kRP1 = kIOServiceRecursing | kIOServiceTermPhase1State }; + didInactive = (kRP1 == (victim->__state[1] & kRP1)) + || (0 == (victim->__state[0] & kIOServiceInactiveState)); + + if (!didInactive) { + // a multiply attached IOService can be visited twice + if (-1U == waitingInactive->getNextIndexOfObject(victim, 0)) { + do{ + IOLockLock(gIOServiceBusyLock); + wait = (victim->__state[1] & kIOServiceTermPhase1State); + if (wait) { + TLOG("%s[0x%qx]::waitPhase1(%s[0x%qx])\n", + getName(), getRegistryEntryID(), victim->getName(), victim->getRegistryEntryID()); + victim->__state[1] |= kIOServiceTerm1WaiterState; + victim->unlockForArbitration(); + assert_wait((event_t)&victim->__state[1], THREAD_UNINT); + } + IOLockUnlock(gIOServiceBusyLock); + if (wait) { + waitResult = thread_block(THREAD_CONTINUE_NULL); + TLOG("%s[0x%qx]::did waitPhase1(%s[0x%qx])\n", + getName(), getRegistryEntryID(), victim->getName(), victim->getRegistryEntryID()); + victim->lockForArbitration(); + } + }while (wait && (waitResult != THREAD_TIMED_OUT)); + } + } else { + victim->__state[0] |= kIOServiceInactiveState; + victim->__state[0] &= ~(kIOServiceRegisteredState | kIOServiceMatchedState + | kIOServiceFirstPublishState | kIOServiceFirstMatchState); + victim->__state[1] &= ~kIOServiceRecursing; + victim->__state[1] |= kIOServiceTermPhase1State; + waitingInactive->headQ(victim); + if (victim == this) { + if (kIOServiceTerminateNeedWillTerminate & options) { + victim->__state[1] |= kIOServiceNeedWillTerminate; + } + } + victim->_adjustBusy( 1 ); + } + victim->unlockForArbitration(); + } + if (victim == this) { + startPhase2 = didInactive; + } + if (didInactive) { + OSArray * notifiers; + notifiers = victim->copyNotifiers(gIOTerminatedNotification, 0, 0xffffffff); + victim->invokeNotifiers(¬ifiers); + + IOUserClient::destroyUserReferences( victim ); + + iter = victim->getClientIterator(); + if (iter) { + while ((client = (IOService *) iter->getNextObject())) { + TLOG("%s[0x%qx]::requestTerminate(%s[0x%qx], %08llx)\n", + client->getName(), client->getRegistryEntryID(), + victim->getName(), victim->getRegistryEntryID(), (long long)options); + ok = client->requestTerminate( victim, options ); + TLOG("%s[0x%qx]::requestTerminate(%s[0x%qx], ok = %d)\n", + client->getName(), client->getRegistryEntryID(), + victim->getName(), victim->getRegistryEntryID(), ok); + + uint64_t regID1 = client->getRegistryEntryID(); + uint64_t regID2 = victim->getRegistryEntryID(); + IOServiceTrace( + (ok ? IOSERVICE_TERMINATE_REQUEST_OK + : IOSERVICE_TERMINATE_REQUEST_FAIL), + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); + + if (ok) { + makeInactive->setObject( client ); + } + } + iter->release(); + } + } + victim->release(); + victim = (IOService *) makeInactive->getObject(0); + if (victim) { + victim->retain(); + makeInactive->removeObject(0); + } + } + makeInactive->release(); + + while ((victim = (IOService *) waitingInactive->getObject(0))) { + victim->retain(); + waitingInactive->removeObject(0); + + victim->lockForArbitration(); + victim->__state[1] &= ~kIOServiceTermPhase1State; + if (kIOServiceTerm1WaiterState & victim->__state[1]) { + victim->__state[1] &= ~kIOServiceTerm1WaiterState; + TLOG("%s[0x%qx]::wakePhase1\n", victim->getName(), victim->getRegistryEntryID()); + IOLockLock( gIOServiceBusyLock ); + thread_wakeup((event_t) &victim->__state[1]); + IOLockUnlock( gIOServiceBusyLock ); + } + victim->unlockForArbitration(); + victim->release(); + } + waitingInactive->release(); + + if (startPhase2) { + retain(); + lockForArbitration(); + scheduleTerminatePhase2(options); + unlockForArbitration(); + release(); + } - return( true ); + return true; } -void IOService::setTerminateDefer(IOService * provider, bool defer) +void +IOService::setTerminateDefer(IOService * provider, bool defer) { - lockForArbitration(); - if (defer) __state[1] |= kIOServiceStartState; - else __state[1] &= ~kIOServiceStartState; - unlockForArbitration(); + lockForArbitration(); + if (defer) { + __state[1] |= kIOServiceStartState; + } else { + __state[1] &= ~kIOServiceStartState; + } + unlockForArbitration(); - if (provider && !defer) - { - provider->lockForArbitration(); - provider->scheduleTerminatePhase2(); - provider->unlockForArbitration(); - } + if (provider && !defer) { + provider->lockForArbitration(); + provider->scheduleTerminatePhase2(); + provider->unlockForArbitration(); + } } // Must call this while holding gJobsLock -void IOService::waitToBecomeTerminateThread(void) -{ - IOLockAssert(gJobsLock, kIOLockAssertOwned); - bool wait; - do { - wait = (gIOTerminateThread != THREAD_NULL); - if (wait) { - IOLockSleep(gJobsLock, &gIOTerminateThread, THREAD_UNINT); - } - } while (wait); - gIOTerminateThread = current_thread(); +void +IOService::waitToBecomeTerminateThread(void) +{ + IOLockAssert(gJobsLock, kIOLockAssertOwned); + bool wait; + do { + wait = (gIOTerminateThread != THREAD_NULL); + if (wait) { + IOLockSleep(gJobsLock, &gIOTerminateThread, THREAD_UNINT); + } + } while (wait); + gIOTerminateThread = current_thread(); } // call with lockForArbitration -void IOService::scheduleTerminatePhase2( IOOptionBits options ) -{ - AbsoluteTime deadline; - uint64_t regID1; - int waitResult = THREAD_AWAKENED; - bool wait = false, haveDeadline = false; - - if (!(__state[0] & kIOServiceInactiveState)) return; - - regID1 = getRegistryEntryID(); - IOServiceTrace( - IOSERVICE_TERM_SCHED_PHASE2, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) __state[1], - (uintptr_t) options); - - if (__state[1] & kIOServiceTermPhase1State) return; - - retain(); - unlockForArbitration(); - options |= kIOServiceRequired; - IOLockLock( gJobsLock ); - - if( (options & kIOServiceSynchronous) - && (current_thread() != gIOTerminateThread)) { - - waitToBecomeTerminateThread(); - gIOTerminatePhase2List->setObject( this ); - gIOTerminateWork++; - - do { - while( gIOTerminateWork ) - terminateWorker( options ); - wait = (0 != (__state[1] & kIOServiceBusyStateMask)); - if( wait) { - /* wait for the victim to go non-busy */ - if( !haveDeadline) { - clock_interval_to_deadline( 15, kSecondScale, &deadline ); - haveDeadline = true; - } - /* let others do work while we wait */ - gIOTerminateThread = 0; - IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); - waitResult = IOLockSleepDeadline( gJobsLock, &gIOTerminateWork, - deadline, THREAD_UNINT ); - if (__improbable(waitResult == THREAD_TIMED_OUT)) { - panic("%s[0x%qx]::terminate(kIOServiceSynchronous) timeout\n", getName(), getRegistryEntryID()); - } - waitToBecomeTerminateThread(); - } - } while(gIOTerminateWork || (wait && (waitResult != THREAD_TIMED_OUT))); - - gIOTerminateThread = 0; - IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); - - } else { - // ! kIOServiceSynchronous - - gIOTerminatePhase2List->setObject( this ); - if( 0 == gIOTerminateWork++) { - assert(gIOTerminateWorkerThread); - IOLockWakeup(gJobsLock, (event_t)&gIOTerminateWork, /* one-thread */ false ); - } - } - - IOLockUnlock( gJobsLock ); - lockForArbitration(); - release(); +void +IOService::scheduleTerminatePhase2( IOOptionBits options ) +{ + AbsoluteTime deadline; + uint64_t regID1; + int waitResult = THREAD_AWAKENED; + bool wait = false, haveDeadline = false; + + if (!(__state[0] & kIOServiceInactiveState)) { + return; + } + + regID1 = getRegistryEntryID(); + IOServiceTrace( + IOSERVICE_TERM_SCHED_PHASE2, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) __state[1], + (uintptr_t) options); + + if (__state[1] & kIOServiceTermPhase1State) { + return; + } + + retain(); + unlockForArbitration(); + options |= kIOServiceRequired; + IOLockLock( gJobsLock ); + + if ((options & kIOServiceSynchronous) + && (current_thread() != gIOTerminateThread)) { + waitToBecomeTerminateThread(); + gIOTerminatePhase2List->setObject( this ); + gIOTerminateWork++; + + do { + while (gIOTerminateWork) { + terminateWorker( options ); + } + wait = (0 != (__state[1] & kIOServiceBusyStateMask)); + if (wait) { + /* wait for the victim to go non-busy */ + if (!haveDeadline) { + clock_interval_to_deadline( 15, kSecondScale, &deadline ); + haveDeadline = true; + } + /* let others do work while we wait */ + gIOTerminateThread = 0; + IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); + waitResult = IOLockSleepDeadline( gJobsLock, &gIOTerminateWork, + deadline, THREAD_UNINT ); + if (__improbable(waitResult == THREAD_TIMED_OUT)) { + panic("%s[0x%qx]::terminate(kIOServiceSynchronous) timeout\n", getName(), getRegistryEntryID()); + } + waitToBecomeTerminateThread(); + } + } while (gIOTerminateWork || (wait && (waitResult != THREAD_TIMED_OUT))); + + gIOTerminateThread = 0; + IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); + } else { + // ! kIOServiceSynchronous + + gIOTerminatePhase2List->setObject( this ); + if (0 == gIOTerminateWork++) { + assert(gIOTerminateWorkerThread); + IOLockWakeup(gJobsLock, (event_t)&gIOTerminateWork, /* one-thread */ false ); + } + } + + IOLockUnlock( gJobsLock ); + lockForArbitration(); + release(); } __attribute__((__noreturn__)) -void IOService::terminateThread( void * arg, wait_result_t waitResult ) -{ - // IOLockSleep re-acquires the lock on wakeup, so we only need to do this once - IOLockLock(gJobsLock); - while (true) { - if (gIOTerminateThread != gIOTerminateWorkerThread) { - waitToBecomeTerminateThread(); - } +void +IOService::terminateThread( void * arg, wait_result_t waitResult ) +{ + // IOLockSleep re-acquires the lock on wakeup, so we only need to do this once + IOLockLock(gJobsLock); + while (true) { + if (gIOTerminateThread != gIOTerminateWorkerThread) { + waitToBecomeTerminateThread(); + } - while (gIOTerminateWork) - terminateWorker( (uintptr_t)arg ); + while (gIOTerminateWork) { + terminateWorker((uintptr_t)arg ); + } - gIOTerminateThread = 0; - IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); - IOLockSleep(gJobsLock, &gIOTerminateWork, THREAD_UNINT); - } + gIOTerminateThread = 0; + IOLockWakeup( gJobsLock, (event_t) &gIOTerminateThread, /* one-thread */ false); + IOLockSleep(gJobsLock, &gIOTerminateWork, THREAD_UNINT); + } } -void IOService::scheduleStop( IOService * provider ) +void +IOService::scheduleStop( IOService * provider ) { - uint64_t regID1 = getRegistryEntryID(); - uint64_t regID2 = provider->getRegistryEntryID(); + uint64_t regID1 = getRegistryEntryID(); + uint64_t regID2 = provider->getRegistryEntryID(); - TLOG("%s[0x%qx]::scheduleStop(%s[0x%qx])\n", getName(), regID1, provider->getName(), regID2); - IOServiceTrace( - IOSERVICE_TERMINATE_SCHEDULE_STOP, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); + TLOG("%s[0x%qx]::scheduleStop(%s[0x%qx])\n", getName(), regID1, provider->getName(), regID2); + IOServiceTrace( + IOSERVICE_TERMINATE_SCHEDULE_STOP, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); - IOLockLock( gJobsLock ); - gIOStopList->tailQ( this ); - gIOStopProviderList->tailQ( provider ); + IOLockLock( gJobsLock ); + gIOStopList->tailQ( this ); + gIOStopProviderList->tailQ( provider ); - if( 0 == gIOTerminateWork++) { - assert(gIOTerminateWorkerThread); - IOLockWakeup(gJobsLock, (event_t)&gIOTerminateWork, /* one-thread */ false ); - } + if (0 == gIOTerminateWork++) { + assert(gIOTerminateWorkerThread); + IOLockWakeup(gJobsLock, (event_t)&gIOTerminateWork, /* one-thread */ false ); + } - IOLockUnlock( gJobsLock ); + IOLockUnlock( gJobsLock ); } -void IOService::scheduleFinalize(bool now) +void +IOService::scheduleFinalize(bool now) { - uint64_t regID1 = getRegistryEntryID(); - - TLOG("%s[0x%qx]::scheduleFinalize\n", getName(), regID1); - IOServiceTrace( - IOSERVICE_TERMINATE_SCHEDULE_FINALIZE, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - 0, 0); + uint64_t regID1 = getRegistryEntryID(); - if (now || IOUserClient::finalizeUserReferences(this)) - { - IOLockLock( gJobsLock ); - gIOFinalizeList->tailQ(this); - if( 0 == gIOTerminateWork++) { - assert(gIOTerminateWorkerThread); - IOLockWakeup(gJobsLock, (event_t)&gIOTerminateWork, /* one-thread */ false ); + TLOG("%s[0x%qx]::scheduleFinalize\n", getName(), regID1); + IOServiceTrace( + IOSERVICE_TERMINATE_SCHEDULE_FINALIZE, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + 0, 0); + + if (now || IOUserClient::finalizeUserReferences(this)) { + IOLockLock( gJobsLock ); + gIOFinalizeList->tailQ(this); + if (0 == gIOTerminateWork++) { + assert(gIOTerminateWorkerThread); + IOLockWakeup(gJobsLock, (event_t)&gIOTerminateWork, /* one-thread */ false ); + } + IOLockUnlock( gJobsLock ); } - IOLockUnlock( gJobsLock ); - } } -bool IOService::willTerminate( IOService * provider, IOOptionBits options ) +bool +IOService::willTerminate( IOService * provider, IOOptionBits options ) { - return( true ); + return true; } -bool IOService::didTerminate( IOService * provider, IOOptionBits options, bool * defer ) +bool +IOService::didTerminate( IOService * provider, IOOptionBits options, bool * defer ) { - if( false == *defer) { + if (false == *defer) { + if (lockForArbitration( true )) { + if (false == provider->handleIsOpen( this )) { + scheduleStop( provider ); + } + // -- compat + else { + message( kIOMessageServiceIsRequestingClose, provider, (void *)(uintptr_t) options ); + if (false == provider->handleIsOpen( this )) { + scheduleStop( provider ); + } + } + // -- + unlockForArbitration(); + } + } + + return true; +} - if( lockForArbitration( true )) { - if( false == provider->handleIsOpen( this )) - scheduleStop( provider ); - // -- compat - else { - message( kIOMessageServiceIsRequestingClose, provider, (void *)(uintptr_t) options ); - if( false == provider->handleIsOpen( this )) - scheduleStop( provider ); - } - // -- - unlockForArbitration(); - } - } +void +IOService::actionWillTerminate( IOService * victim, IOOptionBits options, + OSArray * doPhase2List, + void *unused2 __unused, + void *unused3 __unused ) +{ + OSIterator * iter; + IOService * client; + bool ok; + uint64_t regID1, regID2 = victim->getRegistryEntryID(); - return( true ); + iter = victim->getClientIterator(); + if (iter) { + while ((client = (IOService *) iter->getNextObject())) { + regID1 = client->getRegistryEntryID(); + TLOG("%s[0x%qx]::willTerminate(%s[0x%qx], %08llx)\n", + client->getName(), regID1, + victim->getName(), regID2, (long long)options); + IOServiceTrace( + IOSERVICE_TERMINATE_WILL, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); + + ok = client->willTerminate( victim, options ); + doPhase2List->tailQ( client ); + } + iter->release(); + } } -void IOService::actionWillTerminate( IOService * victim, IOOptionBits options, - OSArray * doPhase2List, - void *unused2 __unused, - void *unused3 __unused ) +void +IOService::actionDidTerminate( IOService * victim, IOOptionBits options, + void *unused1 __unused, void *unused2 __unused, + void *unused3 __unused ) { - OSIterator * iter; - IOService * client; - bool ok; - uint64_t regID1, regID2 = victim->getRegistryEntryID(); + OSIterator * iter; + IOService * client; + bool defer; + uint64_t regID1, regID2 = victim->getRegistryEntryID(); - iter = victim->getClientIterator(); - if( iter) { - while( (client = (IOService *) iter->getNextObject())) { + victim->messageClients( kIOMessageServiceIsTerminated, (void *)(uintptr_t) options ); - regID1 = client->getRegistryEntryID(); - TLOG("%s[0x%qx]::willTerminate(%s[0x%qx], %08llx)\n", - client->getName(), regID1, - victim->getName(), regID2, (long long)options); - IOServiceTrace( - IOSERVICE_TERMINATE_WILL, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); + iter = victim->getClientIterator(); + if (iter) { + while ((client = (IOService *) iter->getNextObject())) { + regID1 = client->getRegistryEntryID(); + TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], %08llx)\n", + client->getName(), regID1, + victim->getName(), regID2, (long long)options); + defer = false; + client->didTerminate( victim, options, &defer ); + + IOServiceTrace( + (defer ? IOSERVICE_TERMINATE_DID_DEFER + : IOSERVICE_TERMINATE_DID), + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); + + TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], defer %d)\n", + client->getName(), regID1, + victim->getName(), regID2, defer); + } + iter->release(); + } +} + + +void +IOService::actionWillStop( IOService * victim, IOOptionBits options, + void *unused1 __unused, void *unused2 __unused, + void *unused3 __unused ) +{ + OSIterator * iter; + IOService * provider; + bool ok; + uint64_t regID1, regID2 = victim->getRegistryEntryID(); - ok = client->willTerminate( victim, options ); - doPhase2List->tailQ( client ); - } - iter->release(); - } + iter = victim->getProviderIterator(); + if (iter) { + while ((provider = (IOService *) iter->getNextObject())) { + regID1 = provider->getRegistryEntryID(); + TLOG("%s[0x%qx]::willTerminate(%s[0x%qx], %08llx)\n", + victim->getName(), regID2, + provider->getName(), regID1, (long long)options); + IOServiceTrace( + IOSERVICE_TERMINATE_WILL, + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32), + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32)); + + ok = victim->willTerminate( provider, options ); + } + iter->release(); + } } -void IOService::actionDidTerminate( IOService * victim, IOOptionBits options, - void *unused1 __unused, void *unused2 __unused, - void *unused3 __unused ) +void +IOService::actionDidStop( IOService * victim, IOOptionBits options, + void *unused1 __unused, void *unused2 __unused, + void *unused3 __unused ) { - OSIterator * iter; - IOService * client; - bool defer; - uint64_t regID1, regID2 = victim->getRegistryEntryID(); + OSIterator * iter; + IOService * provider; + bool defer = false; + uint64_t regID1, regID2 = victim->getRegistryEntryID(); - victim->messageClients( kIOMessageServiceIsTerminated, (void *)(uintptr_t) options ); + iter = victim->getProviderIterator(); + if (iter) { + while ((provider = (IOService *) iter->getNextObject())) { + regID1 = provider->getRegistryEntryID(); + TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], %08llx)\n", + victim->getName(), regID2, + provider->getName(), regID1, (long long)options); + victim->didTerminate( provider, options, &defer ); - iter = victim->getClientIterator(); - if( iter) { - while( (client = (IOService *) iter->getNextObject())) { + IOServiceTrace( + (defer ? IOSERVICE_TERMINATE_DID_DEFER + : IOSERVICE_TERMINATE_DID), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32), + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32)); + + TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], defer %d)\n", + victim->getName(), regID2, + provider->getName(), regID1, defer); + } + iter->release(); + } +} - regID1 = client->getRegistryEntryID(); - TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], %08llx)\n", - client->getName(), regID1, - victim->getName(), regID2, (long long)options); - defer = false; - client->didTerminate( victim, options, &defer ); - IOServiceTrace( - (defer ? IOSERVICE_TERMINATE_DID_DEFER - : IOSERVICE_TERMINATE_DID), - (uintptr_t) regID1, +void +IOService::actionFinalize( IOService * victim, IOOptionBits options, + void *unused1 __unused, void *unused2 __unused, + void *unused3 __unused ) +{ + uint64_t regID1 = victim->getRegistryEntryID(); + TLOG("%s[0x%qx]::finalize(%08llx)\n", victim->getName(), regID1, (long long)options); + IOServiceTrace( + IOSERVICE_TERMINATE_FINALIZE, + (uintptr_t) regID1, (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); + 0, 0); + + victim->finalize( options ); +} - TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], defer %d)\n", - client->getName(), regID1, - victim->getName(), regID2, defer); - } - iter->release(); - } -} - - -void IOService::actionWillStop( IOService * victim, IOOptionBits options, - void *unused1 __unused, void *unused2 __unused, - void *unused3 __unused ) +void +IOService::actionStop( IOService * provider, IOService * client, + void *unused1 __unused, void *unused2 __unused, + void *unused3 __unused ) { - OSIterator * iter; - IOService * provider; - bool ok; - uint64_t regID1, regID2 = victim->getRegistryEntryID(); + uint64_t regID1 = provider->getRegistryEntryID(); + uint64_t regID2 = client->getRegistryEntryID(); - iter = victim->getProviderIterator(); - if( iter) { - while( (provider = (IOService *) iter->getNextObject())) { - - regID1 = provider->getRegistryEntryID(); - TLOG("%s[0x%qx]::willTerminate(%s[0x%qx], %08llx)\n", - victim->getName(), regID2, - provider->getName(), regID1, (long long)options); - IOServiceTrace( - IOSERVICE_TERMINATE_WILL, - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32), - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32)); - - ok = victim->willTerminate( provider, options ); - } - iter->release(); - } -} - -void IOService::actionDidStop( IOService * victim, IOOptionBits options, - void *unused1 __unused, void *unused2 __unused, - void *unused3 __unused ) -{ - OSIterator * iter; - IOService * provider; - bool defer = false; - uint64_t regID1, regID2 = victim->getRegistryEntryID(); - - iter = victim->getProviderIterator(); - if( iter) { - while( (provider = (IOService *) iter->getNextObject())) { - - regID1 = provider->getRegistryEntryID(); - TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], %08llx)\n", - victim->getName(), regID2, - provider->getName(), regID1, (long long)options); - victim->didTerminate( provider, options, &defer ); - - IOServiceTrace( - (defer ? IOSERVICE_TERMINATE_DID_DEFER - : IOSERVICE_TERMINATE_DID), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32), - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32)); - - TLOG("%s[0x%qx]::didTerminate(%s[0x%qx], defer %d)\n", - victim->getName(), regID2, - provider->getName(), regID1, defer); - } - iter->release(); - } -} - - -void IOService::actionFinalize( IOService * victim, IOOptionBits options, - void *unused1 __unused, void *unused2 __unused, - void *unused3 __unused ) -{ - uint64_t regID1 = victim->getRegistryEntryID(); - TLOG("%s[0x%qx]::finalize(%08llx)\n", victim->getName(), regID1, (long long)options); - IOServiceTrace( - IOSERVICE_TERMINATE_FINALIZE, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - 0, 0); - - victim->finalize( options ); -} - -void IOService::actionStop( IOService * provider, IOService * client, - void *unused1 __unused, void *unused2 __unused, - void *unused3 __unused ) -{ - uint64_t regID1 = provider->getRegistryEntryID(); - uint64_t regID2 = client->getRegistryEntryID(); - - TLOG("%s[0x%qx]::stop(%s[0x%qx])\n", client->getName(), regID2, provider->getName(), regID1); - IOServiceTrace( - IOSERVICE_TERMINATE_STOP, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); - - client->stop( provider ); - if( provider->isOpen( client )) - provider->close( client ); - - TLOG("%s[0x%qx]::detach(%s[0x%qx])\n", client->getName(), regID2, provider->getName(), regID1); - client->detach( provider ); -} - -void IOService::terminateWorker( IOOptionBits options ) -{ - OSArray * doPhase2List; - OSArray * didPhase2List; - OSSet * freeList; - OSIterator * iter; - UInt32 workDone; - IOService * victim; - IOService * client; - IOService * provider; - unsigned int idx; - bool moreToDo; - bool doPhase2; - bool doPhase3; - - options |= kIOServiceRequired; - - doPhase2List = OSArray::withCapacity( 16 ); - didPhase2List = OSArray::withCapacity( 16 ); - freeList = OSSet::withCapacity( 16 ); - if( (0 == doPhase2List) || (0 == didPhase2List) || (0 == freeList)) - return; - - do { - workDone = gIOTerminateWork; - - while( (victim = (IOService *) gIOTerminatePhase2List->getObject(0) )) { - - victim->retain(); - gIOTerminatePhase2List->removeObject(0); - IOLockUnlock( gJobsLock ); - - uint64_t regID1 = victim->getRegistryEntryID(); - IOServiceTrace( - IOSERVICE_TERM_START_PHASE2, + TLOG("%s[0x%qx]::stop(%s[0x%qx])\n", client->getName(), regID2, provider->getName(), regID1); + IOServiceTrace( + IOSERVICE_TERMINATE_STOP, (uintptr_t) regID1, (uintptr_t) (regID1 >> 32), - (uintptr_t) 0, - (uintptr_t) 0); + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); - while( victim ) { - - doPhase2 = victim->lockForArbitration( true ); - if( doPhase2) { - doPhase2 = (0 != (kIOServiceInactiveState & victim->__state[0])); - if( doPhase2) { + client->stop( provider ); + if (provider->isOpen( client )) { + provider->close( client ); + } + + TLOG("%s[0x%qx]::detach(%s[0x%qx])\n", client->getName(), regID2, provider->getName(), regID1); + client->detach( provider ); +} + +void +IOService::terminateWorker( IOOptionBits options ) +{ + OSArray * doPhase2List; + OSArray * didPhase2List; + OSSet * freeList; + OSIterator * iter; + UInt32 workDone; + IOService * victim; + IOService * client; + IOService * provider; + unsigned int idx; + bool moreToDo; + bool doPhase2; + bool doPhase3; + + options |= kIOServiceRequired; + + doPhase2List = OSArray::withCapacity( 16 ); + didPhase2List = OSArray::withCapacity( 16 ); + freeList = OSSet::withCapacity( 16 ); + if ((0 == doPhase2List) || (0 == didPhase2List) || (0 == freeList)) { + return; + } + + do { + workDone = gIOTerminateWork; + + while ((victim = (IOService *) gIOTerminatePhase2List->getObject(0))) { + victim->retain(); + gIOTerminatePhase2List->removeObject(0); + IOLockUnlock( gJobsLock ); uint64_t regID1 = victim->getRegistryEntryID(); IOServiceTrace( - IOSERVICE_TERM_TRY_PHASE2, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) victim->__state[1], - (uintptr_t) 0); + IOSERVICE_TERM_START_PHASE2, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) 0, + (uintptr_t) 0); + + while (victim) { + doPhase2 = victim->lockForArbitration( true ); + if (doPhase2) { + doPhase2 = (0 != (kIOServiceInactiveState & victim->__state[0])); + if (doPhase2) { + uint64_t regID1 = victim->getRegistryEntryID(); + IOServiceTrace( + IOSERVICE_TERM_TRY_PHASE2, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) victim->__state[1], + (uintptr_t) 0); + + doPhase2 = (0 == (victim->__state[1] & + (kIOServiceTermPhase1State + | kIOServiceTermPhase2State + | kIOServiceConfigState))); + + if (doPhase2 && (iter = victim->getClientIterator())) { + while (doPhase2 && (client = (IOService *) iter->getNextObject())) { + doPhase2 = (0 == (client->__state[1] & kIOServiceStartState)); + if (!doPhase2) { + uint64_t regID1 = client->getRegistryEntryID(); + IOServiceTrace( + IOSERVICE_TERM_UC_DEFER, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) client->__state[1], + (uintptr_t) 0); + TLOG("%s[0x%qx]::defer phase2(%s[0x%qx])\n", + victim->getName(), victim->getRegistryEntryID(), + client->getName(), client->getRegistryEntryID()); + } + } + iter->release(); + } + if (doPhase2) { + victim->__state[1] |= kIOServiceTermPhase2State; + } + } + victim->unlockForArbitration(); + } + if (doPhase2) { + if (kIOServiceNeedWillTerminate & victim->__state[1]) { + _workLoopAction((IOWorkLoop::Action) &actionWillStop, + victim, (void *)(uintptr_t) options, NULL ); + } - doPhase2 = (0 == (victim->__state[1] & - (kIOServiceTermPhase1State - | kIOServiceTermPhase2State - | kIOServiceConfigState))); + OSArray * notifiers; + notifiers = victim->copyNotifiers(gIOWillTerminateNotification, 0, 0xffffffff); + victim->invokeNotifiers(¬ifiers); - if (doPhase2 && (iter = victim->getClientIterator())) { - while (doPhase2 && (client = (IOService *) iter->getNextObject())) { - doPhase2 = (0 == (client->__state[1] & kIOServiceStartState)); - if (!doPhase2) - { - uint64_t regID1 = client->getRegistryEntryID(); - IOServiceTrace( - IOSERVICE_TERM_UC_DEFER, + _workLoopAction((IOWorkLoop::Action) &actionWillTerminate, + victim, (void *)(uintptr_t) options, (void *)(uintptr_t) doPhase2List ); + + didPhase2List->headQ( victim ); + } + victim->release(); + victim = (IOService *) doPhase2List->getObject(0); + if (victim) { + victim->retain(); + doPhase2List->removeObject(0); + } + } + + while ((victim = (IOService *) didPhase2List->getObject(0))) { + bool scheduleFinalize = false; + if (victim->lockForArbitration( true )) { + victim->__state[1] |= kIOServiceTermPhase3State; + scheduleFinalize = (0 == victim->getClient()); + victim->unlockForArbitration(); + } + _workLoopAction((IOWorkLoop::Action) &actionDidTerminate, + victim, (void *)(uintptr_t) options ); + if (kIOServiceNeedWillTerminate & victim->__state[1]) { + _workLoopAction((IOWorkLoop::Action) &actionDidStop, + victim, (void *)(uintptr_t) options, NULL ); + } + // no clients - will go to finalize + if (scheduleFinalize) { + victim->scheduleFinalize(false); + } + didPhase2List->removeObject(0); + } + IOLockLock( gJobsLock ); + } + + // phase 3 + do { + doPhase3 = false; + // finalize leaves + while ((victim = (IOService *) gIOFinalizeList->getObject(0))) { + bool sendFinal = false; + IOLockUnlock( gJobsLock ); + if (victim->lockForArbitration(true)) { + sendFinal = (0 == (victim->__state[1] & kIOServiceFinalized)); + if (sendFinal) { + victim->__state[1] |= kIOServiceFinalized; + } + victim->unlockForArbitration(); + } + if (sendFinal) { + _workLoopAction((IOWorkLoop::Action) &actionFinalize, + victim, (void *)(uintptr_t) options ); + } + IOLockLock( gJobsLock ); + // hold off free + freeList->setObject( victim ); + // safe if finalize list is append only + gIOFinalizeList->removeObject(0); + } + + for (idx = 0; + (!doPhase3) && (client = (IOService *) gIOStopList->getObject(idx));) { + provider = (IOService *) gIOStopProviderList->getObject(idx); + assert( provider ); + + uint64_t regID1 = provider->getRegistryEntryID(); + uint64_t regID2 = client->getRegistryEntryID(); + + if (!provider->isChild( client, gIOServicePlane )) { + // may be multiply queued - nop it + TLOG("%s[0x%qx]::nop stop(%s[0x%qx])\n", client->getName(), regID2, provider->getName(), regID1); + IOServiceTrace( + IOSERVICE_TERMINATE_STOP_NOP, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); + } else { + // a terminated client is not ready for stop if it has clients, skip it + bool deferStop = (0 != (kIOServiceInactiveState & client->__state[0])); + IOLockUnlock( gJobsLock ); + if (deferStop && client->lockForArbitration(true)) { + deferStop = (0 == (client->__state[1] & kIOServiceFinalized)); + //deferStop = (!deferStop && (0 != client->getClient())); + //deferStop = (0 != client->getClient()); + client->unlockForArbitration(); + if (deferStop) { + TLOG("%s[0x%qx]::defer stop()\n", client->getName(), regID2); + IOServiceTrace(IOSERVICE_TERMINATE_STOP_DEFER, + (uintptr_t) regID1, + (uintptr_t) (regID1 >> 32), + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); + + idx++; + IOLockLock( gJobsLock ); + continue; + } + } + _workLoopAction((IOWorkLoop::Action) &actionStop, + provider, (void *) client ); + IOLockLock( gJobsLock ); + // check the finalize list now + doPhase3 = true; + } + // hold off free + freeList->setObject( client ); + freeList->setObject( provider ); + + // safe if stop list is append only + gIOStopList->removeObject( idx ); + gIOStopProviderList->removeObject( idx ); + idx = 0; + } + } while (doPhase3); + + gIOTerminateWork -= workDone; + moreToDo = (gIOTerminateWork != 0); + + if (!moreToDo) { + TLOG("iokit terminate done, %d stops remain\n", gIOStopList->getCount()); + IOServiceTrace( + IOSERVICE_TERMINATE_DONE, + (uintptr_t) gIOStopList->getCount(), 0, 0, 0); + } + } while (moreToDo); + + IOLockUnlock( gJobsLock ); + + freeList->release(); + doPhase2List->release(); + didPhase2List->release(); + + IOLockLock( gJobsLock ); +} + +bool +IOService::finalize( IOOptionBits options ) +{ + OSIterator * iter; + IOService * provider; + uint64_t regID1, regID2 = getRegistryEntryID(); + + iter = getProviderIterator(); + assert( iter ); + + if (iter) { + while ((provider = (IOService *) iter->getNextObject())) { + // -- compat + if (0 == (__state[1] & kIOServiceTermPhase3State)) { + /* we come down here on programmatic terminate */ + + regID1 = provider->getRegistryEntryID(); + TLOG("%s[0x%qx]::stop1(%s[0x%qx])\n", getName(), regID2, provider->getName(), regID1); + IOServiceTrace( + IOSERVICE_TERMINATE_STOP, (uintptr_t) regID1, (uintptr_t) (regID1 >> 32), - (uintptr_t) client->__state[1], - (uintptr_t) 0); - TLOG("%s[0x%qx]::defer phase2(%s[0x%qx])\n", - victim->getName(), victim->getRegistryEntryID(), - client->getName(), client->getRegistryEntryID()); + (uintptr_t) regID2, + (uintptr_t) (regID2 >> 32)); + + stop( provider ); + if (provider->isOpen( this )) { + provider->close( this ); + } + detach( provider ); + } else { + //-- + if (provider->lockForArbitration( true )) { + if (0 == (provider->__state[1] & kIOServiceTermPhase3State)) { + scheduleStop( provider ); + } + provider->unlockForArbitration(); } - } - iter->release(); } - if( doPhase2) - victim->__state[1] |= kIOServiceTermPhase2State; - } - victim->unlockForArbitration(); - } - if( doPhase2) { - - if (kIOServiceNeedWillTerminate & victim->__state[1]) { - _workLoopAction( (IOWorkLoop::Action) &actionWillStop, - victim, (void *)(uintptr_t) options, NULL ); - } - - OSArray * notifiers; - notifiers = victim->copyNotifiers(gIOWillTerminateNotification, 0, 0xffffffff); - victim->invokeNotifiers(¬ifiers); - - _workLoopAction( (IOWorkLoop::Action) &actionWillTerminate, - victim, (void *)(uintptr_t) options, (void *)(uintptr_t) doPhase2List ); - - didPhase2List->headQ( victim ); - } - victim->release(); - victim = (IOService *) doPhase2List->getObject(0); - if( victim) { - victim->retain(); - doPhase2List->removeObject(0); - } - } - - while( (victim = (IOService *) didPhase2List->getObject(0)) ) { - bool scheduleFinalize = false; - if( victim->lockForArbitration( true )) { - victim->__state[1] |= kIOServiceTermPhase3State; - scheduleFinalize = (0 == victim->getClient()); - victim->unlockForArbitration(); - } - _workLoopAction( (IOWorkLoop::Action) &actionDidTerminate, - victim, (void *)(uintptr_t) options ); - if (kIOServiceNeedWillTerminate & victim->__state[1]) { - _workLoopAction( (IOWorkLoop::Action) &actionDidStop, - victim, (void *)(uintptr_t) options, NULL ); - } - // no clients - will go to finalize - if (scheduleFinalize) victim->scheduleFinalize(false); - didPhase2List->removeObject(0); - } - IOLockLock( gJobsLock ); - } - - // phase 3 - do { - doPhase3 = false; - // finalize leaves - while( (victim = (IOService *) gIOFinalizeList->getObject(0))) { - bool sendFinal = false; - IOLockUnlock( gJobsLock ); - if (victim->lockForArbitration(true)) { - sendFinal = (0 == (victim->__state[1] & kIOServiceFinalized)); - if (sendFinal) victim->__state[1] |= kIOServiceFinalized; - victim->unlockForArbitration(); - } - if (sendFinal) { - _workLoopAction( (IOWorkLoop::Action) &actionFinalize, - victim, (void *)(uintptr_t) options ); - } - IOLockLock( gJobsLock ); - // hold off free - freeList->setObject( victim ); - // safe if finalize list is append only - gIOFinalizeList->removeObject(0); - } - - for( idx = 0; - (!doPhase3) && (client = (IOService *) gIOStopList->getObject(idx)); ) { - - provider = (IOService *) gIOStopProviderList->getObject(idx); - assert( provider ); - - uint64_t regID1 = provider->getRegistryEntryID(); - uint64_t regID2 = client->getRegistryEntryID(); - - if( !provider->isChild( client, gIOServicePlane )) { - // may be multiply queued - nop it - TLOG("%s[0x%qx]::nop stop(%s[0x%qx])\n", client->getName(), regID2, provider->getName(), regID1); - IOServiceTrace( - IOSERVICE_TERMINATE_STOP_NOP, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); - - } else { - // a terminated client is not ready for stop if it has clients, skip it - bool deferStop = (0 != (kIOServiceInactiveState & client->__state[0])); - IOLockUnlock( gJobsLock ); - if (deferStop && client->lockForArbitration(true)) { - deferStop = (0 == (client->__state[1] & kIOServiceFinalized)); - //deferStop = (!deferStop && (0 != client->getClient())); - //deferStop = (0 != client->getClient()); - client->unlockForArbitration(); - if (deferStop) { - TLOG("%s[0x%qx]::defer stop()\n", client->getName(), regID2); - IOServiceTrace(IOSERVICE_TERMINATE_STOP_DEFER, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); - - idx++; - IOLockLock( gJobsLock ); - continue; - } - } - _workLoopAction( (IOWorkLoop::Action) &actionStop, - provider, (void *) client ); - IOLockLock( gJobsLock ); - // check the finalize list now - doPhase3 = true; - } - // hold off free - freeList->setObject( client ); - freeList->setObject( provider ); - - // safe if stop list is append only - gIOStopList->removeObject( idx ); - gIOStopProviderList->removeObject( idx ); - idx = 0; - } - - } while( doPhase3 ); - - gIOTerminateWork -= workDone; - moreToDo = (gIOTerminateWork != 0); - - if( !moreToDo) { - TLOG("iokit terminate done, %d stops remain\n", gIOStopList->getCount()); - IOServiceTrace( - IOSERVICE_TERMINATE_DONE, - (uintptr_t) gIOStopList->getCount(), 0, 0, 0); - } - - } while( moreToDo ); - - IOLockUnlock( gJobsLock ); - - freeList->release(); - doPhase2List->release(); - didPhase2List->release(); - - IOLockLock( gJobsLock ); -} - -bool IOService::finalize( IOOptionBits options ) -{ - OSIterator * iter; - IOService * provider; - uint64_t regID1, regID2 = getRegistryEntryID(); - - iter = getProviderIterator(); - assert( iter ); - - if( iter) { - while( (provider = (IOService *) iter->getNextObject())) { - - // -- compat - if( 0 == (__state[1] & kIOServiceTermPhase3State)) { - /* we come down here on programmatic terminate */ - - regID1 = provider->getRegistryEntryID(); - TLOG("%s[0x%qx]::stop1(%s[0x%qx])\n", getName(), regID2, provider->getName(), regID1); - IOServiceTrace( - IOSERVICE_TERMINATE_STOP, - (uintptr_t) regID1, - (uintptr_t) (regID1 >> 32), - (uintptr_t) regID2, - (uintptr_t) (regID2 >> 32)); - - stop( provider ); - if( provider->isOpen( this )) - provider->close( this ); - detach( provider ); - } else { - //-- - if( provider->lockForArbitration( true )) { - if( 0 == (provider->__state[1] & kIOServiceTermPhase3State)) - scheduleStop( provider ); - provider->unlockForArbitration(); - } - } - } - iter->release(); - } - - return( true ); + } + iter->release(); + } + + return true; } #undef tailQ @@ -2904,29 +3007,33 @@ bool IOService::finalize( IOOptionBits options ) * Terminate */ -void IOService::doServiceTerminate( IOOptionBits options ) +void +IOService::doServiceTerminate( IOOptionBits options ) { } // a method in case someone needs to override it -bool IOService::terminateClient( IOService * client, IOOptionBits options ) +bool +IOService::terminateClient( IOService * client, IOOptionBits options ) { - bool ok; + bool ok; - if( client->isParent( this, gIOServicePlane, true)) - // we are the clients only provider - ok = client->terminate( options ); - else - ok = true; + if (client->isParent( this, gIOServicePlane, true)) { + // we are the clients only provider + ok = client->terminate( options ); + } else { + ok = true; + } - return( ok ); + return ok; } -bool IOService::terminate( IOOptionBits options ) +bool +IOService::terminate( IOOptionBits options ) { - options |= kIOServiceTerminate; + options |= kIOServiceTerminate; - return( terminatePhase1( options )); + return terminatePhase1( options ); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -2935,299 +3042,320 @@ bool IOService::terminate( IOOptionBits options ) * Open & close */ -struct ServiceOpenMessageContext -{ - IOService * service; - UInt32 type; - IOService * excludeClient; - IOOptionBits options; +struct ServiceOpenMessageContext { + IOService * service; + UInt32 type; + IOService * excludeClient; + IOOptionBits options; }; -static void serviceOpenMessageApplier( OSObject * object, void * ctx ) +static void +serviceOpenMessageApplier( OSObject * object, void * ctx ) { - ServiceOpenMessageContext * context = (ServiceOpenMessageContext *) ctx; + ServiceOpenMessageContext * context = (ServiceOpenMessageContext *) ctx; - if( object != context->excludeClient) - context->service->messageClient( context->type, object, (void *)(uintptr_t) context->options ); + if (object != context->excludeClient) { + context->service->messageClient( context->type, object, (void *)(uintptr_t) context->options ); + } } -bool IOService::open( IOService * forClient, - IOOptionBits options, - void * arg ) +bool +IOService::open( IOService * forClient, + IOOptionBits options, + void * arg ) { - bool ok; - ServiceOpenMessageContext context; + bool ok; + ServiceOpenMessageContext context; - context.service = this; - context.type = kIOMessageServiceIsAttemptingOpen; - context.excludeClient = forClient; - context.options = options; + context.service = this; + context.type = kIOMessageServiceIsAttemptingOpen; + context.excludeClient = forClient; + context.options = options; - applyToInterested( gIOGeneralInterest, - &serviceOpenMessageApplier, &context ); + applyToInterested( gIOGeneralInterest, + &serviceOpenMessageApplier, &context ); - if( false == lockForArbitration(false) ) - return false; + if (false == lockForArbitration(false)) { + return false; + } - ok = (0 == (__state[0] & kIOServiceInactiveState)); - if( ok) - ok = handleOpen( forClient, options, arg ); + ok = (0 == (__state[0] & kIOServiceInactiveState)); + if (ok) { + ok = handleOpen( forClient, options, arg ); + } - unlockForArbitration(); + unlockForArbitration(); - return( ok ); + return ok; } -void IOService::close( IOService * forClient, - IOOptionBits options ) +void +IOService::close( IOService * forClient, + IOOptionBits options ) { - bool wasClosed; - bool last = false; - - lockForArbitration(); + bool wasClosed; + bool last = false; - wasClosed = handleIsOpen( forClient ); - if( wasClosed) { - handleClose( forClient, options ); - last = (__state[1] & kIOServiceTermPhase3State); - } + lockForArbitration(); - unlockForArbitration(); + wasClosed = handleIsOpen( forClient ); + if (wasClosed) { + handleClose( forClient, options ); + last = (__state[1] & kIOServiceTermPhase3State); + } - if( last) - forClient->scheduleStop( this ); + unlockForArbitration(); - else if( wasClosed) { + if (last) { + forClient->scheduleStop( this ); + } else if (wasClosed) { + ServiceOpenMessageContext context; - ServiceOpenMessageContext context; - - context.service = this; - context.type = kIOMessageServiceWasClosed; - context.excludeClient = forClient; - context.options = options; + context.service = this; + context.type = kIOMessageServiceWasClosed; + context.excludeClient = forClient; + context.options = options; - applyToInterested( gIOGeneralInterest, - &serviceOpenMessageApplier, &context ); - } + applyToInterested( gIOGeneralInterest, + &serviceOpenMessageApplier, &context ); + } } -bool IOService::isOpen( const IOService * forClient ) const +bool +IOService::isOpen( const IOService * forClient ) const { - IOService * self = (IOService *) this; - bool ok; + IOService * self = (IOService *) this; + bool ok; - self->lockForArbitration(); + self->lockForArbitration(); - ok = handleIsOpen( forClient ); + ok = handleIsOpen( forClient ); - self->unlockForArbitration(); + self->unlockForArbitration(); - return( ok ); + return ok; } -bool IOService::handleOpen( IOService * forClient, - IOOptionBits options, - void * arg ) +bool +IOService::handleOpen( IOService * forClient, + IOOptionBits options, + void * arg ) { - bool ok; + bool ok; - ok = (0 == __owner); - if( ok ) - __owner = forClient; - - else if( options & kIOServiceSeize ) { - ok = (kIOReturnSuccess == messageClient( kIOMessageServiceIsRequestingClose, - __owner, (void *)(uintptr_t) options )); - if( ok && (0 == __owner )) - __owner = forClient; - else - ok = false; - } - return( ok ); + ok = (0 == __owner); + if (ok) { + __owner = forClient; + } else if (options & kIOServiceSeize) { + ok = (kIOReturnSuccess == messageClient( kIOMessageServiceIsRequestingClose, + __owner, (void *)(uintptr_t) options )); + if (ok && (0 == __owner)) { + __owner = forClient; + } else { + ok = false; + } + } + return ok; } -void IOService::handleClose( IOService * forClient, - IOOptionBits options ) +void +IOService::handleClose( IOService * forClient, + IOOptionBits options ) { - if( __owner == forClient) - __owner = 0; + if (__owner == forClient) { + __owner = 0; + } } -bool IOService::handleIsOpen( const IOService * forClient ) const +bool +IOService::handleIsOpen( const IOService * forClient ) const { - if( forClient) - return( __owner == forClient ); - else - return( __owner != forClient ); + if (forClient) { + return __owner == forClient; + } else { + return __owner != forClient; + } } /* * Probing & starting */ -static SInt32 IONotifyOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ) +static SInt32 +IONotifyOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ) { - const _IOServiceNotifier * obj1 = (const _IOServiceNotifier *) inObj1; - const _IOServiceNotifier * obj2 = (const _IOServiceNotifier *) inObj2; - SInt32 val1; - SInt32 val2; - - val1 = 0; - val2 = 0; + const _IOServiceNotifier * obj1 = (const _IOServiceNotifier *) inObj1; + const _IOServiceNotifier * obj2 = (const _IOServiceNotifier *) inObj2; + SInt32 val1; + SInt32 val2; - if ( obj1 ) - val1 = obj1->priority; + val1 = 0; + val2 = 0; - if ( obj2 ) - val2 = obj2->priority; - - return ( val1 - val2 ); -} + if (obj1) { + val1 = obj1->priority; + } -static SInt32 IOServiceObjectOrder( const OSObject * entry, void * ref) -{ - OSDictionary * dict; - IOService * service; - _IOServiceNotifier * notify; - OSSymbol * key = (OSSymbol *) ref; - OSNumber * offset; - OSObject * prop; - SInt32 result; + if (obj2) { + val2 = obj2->priority; + } - prop = 0; - result = kIODefaultProbeScore; - if( (dict = OSDynamicCast( OSDictionary, entry))) - offset = OSDynamicCast(OSNumber, dict->getObject( key )); - else if( (notify = OSDynamicCast( _IOServiceNotifier, entry))) - return( notify->priority ); - else if( (service = OSDynamicCast( IOService, entry))) - { - prop = service->copyProperty(key); - offset = OSDynamicCast(OSNumber, prop); - } - else { - assert( false ); - offset = 0; - } + return val1 - val2; +} + +static SInt32 +IOServiceObjectOrder( const OSObject * entry, void * ref) +{ + OSDictionary * dict; + IOService * service; + _IOServiceNotifier * notify; + OSSymbol * key = (OSSymbol *) ref; + OSNumber * offset; + OSObject * prop; + SInt32 result; + + prop = 0; + result = kIODefaultProbeScore; + if ((dict = OSDynamicCast( OSDictionary, entry))) { + offset = OSDynamicCast(OSNumber, dict->getObject( key )); + } else if ((notify = OSDynamicCast( _IOServiceNotifier, entry))) { + return notify->priority; + } else if ((service = OSDynamicCast( IOService, entry))) { + prop = service->copyProperty(key); + offset = OSDynamicCast(OSNumber, prop); + } else { + assert( false ); + offset = 0; + } - if (offset) result = offset->unsigned32BitValue(); + if (offset) { + result = offset->unsigned32BitValue(); + } - OSSafeReleaseNULL(prop); + OSSafeReleaseNULL(prop); - return (result); + return result; } -SInt32 IOServiceOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ) +SInt32 +IOServiceOrdering( const OSMetaClassBase * inObj1, const OSMetaClassBase * inObj2, void * ref ) { - const OSObject * obj1 = (const OSObject *) inObj1; - const OSObject * obj2 = (const OSObject *) inObj2; - SInt32 val1; - SInt32 val2; + const OSObject * obj1 = (const OSObject *) inObj1; + const OSObject * obj2 = (const OSObject *) inObj2; + SInt32 val1; + SInt32 val2; - val1 = 0; - val2 = 0; + val1 = 0; + val2 = 0; - if ( obj1 ) - val1 = IOServiceObjectOrder( obj1, ref ); + if (obj1) { + val1 = IOServiceObjectOrder( obj1, ref ); + } - if ( obj2 ) - val2 = IOServiceObjectOrder( obj2, ref ); + if (obj2) { + val2 = IOServiceObjectOrder( obj2, ref ); + } - return ( val1 - val2 ); + return val1 - val2; } -IOService * IOService::copyClientWithCategory( const OSSymbol * category ) +IOService * +IOService::copyClientWithCategory( const OSSymbol * category ) { - IOService * service = 0; - OSIterator * iter; - const OSSymbol * nextCat; + IOService * service = 0; + OSIterator * iter; + const OSSymbol * nextCat; - iter = getClientIterator(); - if( iter) { - while( (service = (IOService *) iter->getNextObject())) { - if( kIOServiceInactiveState & service->__state[0]) - continue; - nextCat = (const OSSymbol *) OSDynamicCast( OSSymbol, - service->getProperty( gIOMatchCategoryKey )); - if( category == nextCat) - { - service->retain(); - break; - } + iter = getClientIterator(); + if (iter) { + while ((service = (IOService *) iter->getNextObject())) { + if (kIOServiceInactiveState & service->__state[0]) { + continue; + } + nextCat = (const OSSymbol *) OSDynamicCast( OSSymbol, + service->getProperty( gIOMatchCategoryKey )); + if (category == nextCat) { + service->retain(); + break; + } + } + iter->release(); } - iter->release(); - } - return( service ); + return service; } -IOService * IOService::getClientWithCategory( const OSSymbol * category ) +IOService * +IOService::getClientWithCategory( const OSSymbol * category ) { - IOService * - service = copyClientWithCategory(category); - if (service) - service->release(); - return (service); + IOService * + service = copyClientWithCategory(category); + if (service) { + service->release(); + } + return service; } -bool IOService::invokeNotifier( _IOServiceNotifier * notify ) +bool +IOService::invokeNotifier( _IOServiceNotifier * notify ) { - _IOServiceNotifierInvocation invocation; - bool willNotify; - bool ret = true; - invocation.thread = current_thread(); + _IOServiceNotifierInvocation invocation; + bool willNotify; + bool ret = true; + invocation.thread = current_thread(); #if DEBUG_NOTIFIER_LOCKED - uint32_t count; - if ((count = isLockedForArbitration(0))) - { - IOLog("[%s, 0x%x]\n", notify->type->getCStringNoCopy(), count); - panic("[%s, 0x%x]\n", notify->type->getCStringNoCopy(), count); - } + uint32_t count; + if ((count = isLockedForArbitration(0))) { + IOLog("[%s, 0x%x]\n", notify->type->getCStringNoCopy(), count); + panic("[%s, 0x%x]\n", notify->type->getCStringNoCopy(), count); + } #endif /* DEBUG_NOTIFIER_LOCKED */ - LOCKWRITENOTIFY(); - willNotify = (0 != (kIOServiceNotifyEnable & notify->state)); - - if( willNotify) { - queue_enter( ¬ify->handlerInvocations, &invocation, - _IOServiceNotifierInvocation *, link ); - } - UNLOCKNOTIFY(); + LOCKWRITENOTIFY(); + willNotify = (0 != (kIOServiceNotifyEnable & notify->state)); - if( willNotify) { + if (willNotify) { + queue_enter( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + } + UNLOCKNOTIFY(); - ret = (*notify->handler)(notify->target, notify->ref, this, notify); + if (willNotify) { + ret = (*notify->handler)(notify->target, notify->ref, this, notify); - LOCKWRITENOTIFY(); - queue_remove( ¬ify->handlerInvocations, &invocation, - _IOServiceNotifierInvocation *, link ); - if( kIOServiceNotifyWaiter & notify->state) { - notify->state &= ~kIOServiceNotifyWaiter; - WAKEUPNOTIFY( notify ); - } - UNLOCKNOTIFY(); - } + LOCKWRITENOTIFY(); + queue_remove( ¬ify->handlerInvocations, &invocation, + _IOServiceNotifierInvocation *, link ); + if (kIOServiceNotifyWaiter & notify->state) { + notify->state &= ~kIOServiceNotifyWaiter; + WAKEUPNOTIFY( notify ); + } + UNLOCKNOTIFY(); + } - return( ret ); + return ret; } -bool IOService::invokeNotifiers(OSArray ** willSend) +bool +IOService::invokeNotifiers(OSArray ** willSend) { - OSArray * array; - _IOServiceNotifier * notify; - bool ret = true; + OSArray * array; + _IOServiceNotifier * notify; + bool ret = true; - array = *willSend; - if (!array) return (true); - *willSend = 0; + array = *willSend; + if (!array) { + return true; + } + *willSend = 0; - for( unsigned int idx = 0; - (notify = (_IOServiceNotifier *) array->getObject(idx)); - idx++) { - ret &= invokeNotifier(notify); - } - array->release(); + for (unsigned int idx = 0; + (notify = (_IOServiceNotifier *) array->getObject(idx)); + idx++) { + ret &= invokeNotifier(notify); + } + array->release(); - return (ret); + return ret; } @@ -3236,302 +3364,317 @@ bool IOService::invokeNotifiers(OSArray ** willSend) * called on the provider instance */ -void IOService::probeCandidates( OSOrderedSet * matches ) -{ - OSDictionary * match = 0; - OSSymbol * symbol; - IOService * inst; - IOService * newInst; - OSDictionary * props; - SInt32 score; - OSNumber * newPri; - OSOrderedSet * familyMatches = 0; - OSOrderedSet * startList; - OSDictionary * startDict = 0; - const OSSymbol * category; - OSIterator * iter; - _IOServiceNotifier * notify; - OSObject * nextMatch = 0; - bool started; - bool needReloc = false; +void +IOService::probeCandidates( OSOrderedSet * matches ) +{ + OSDictionary * match = 0; + OSSymbol * symbol; + IOService * inst; + IOService * newInst; + OSDictionary * props; + SInt32 score; + OSNumber * newPri; + OSOrderedSet * familyMatches = 0; + OSOrderedSet * startList; + OSDictionary * startDict = 0; + const OSSymbol * category; + OSIterator * iter; + _IOServiceNotifier * notify; + OSObject * nextMatch = 0; + bool started; + bool needReloc = false; #if IOMATCHDEBUG - SInt64 debugFlags; + SInt64 debugFlags; #endif - IOService * client = NULL; + IOService * client = NULL; - assert( matches ); - while( !needReloc && (nextMatch = matches->getFirstObject())) { + assert( matches ); + while (!needReloc && (nextMatch = matches->getFirstObject())) { + nextMatch->retain(); + matches->removeObject(nextMatch); - nextMatch->retain(); - matches->removeObject(nextMatch); - - if( (notify = OSDynamicCast( _IOServiceNotifier, nextMatch ))) { - - if (0 == (__state[0] & kIOServiceInactiveState)) invokeNotifier( notify ); - nextMatch->release(); - nextMatch = 0; - continue; - - } else if( !(match = OSDynamicCast( OSDictionary, nextMatch ))) { - nextMatch->release(); - nextMatch = 0; - continue; - } + if ((notify = OSDynamicCast( _IOServiceNotifier, nextMatch ))) { + if (0 == (__state[0] & kIOServiceInactiveState)) { + invokeNotifier( notify ); + } + nextMatch->release(); + nextMatch = 0; + continue; + } else if (!(match = OSDynamicCast( OSDictionary, nextMatch ))) { + nextMatch->release(); + nextMatch = 0; + continue; + } - props = 0; + props = 0; #if IOMATCHDEBUG - debugFlags = getDebugFlags( match ); + debugFlags = getDebugFlags( match ); #endif - do { - category = OSDynamicCast( OSSymbol, - match->getObject( gIOMatchCategoryKey )); - if( 0 == category) - category = gIODefaultMatchCategoryKey; - - if( (client = copyClientWithCategory(category)) ) { + do { + category = OSDynamicCast( OSSymbol, + match->getObject( gIOMatchCategoryKey )); + if (0 == category) { + category = gIODefaultMatchCategoryKey; + } + + if ((client = copyClientWithCategory(category))) { #if IOMATCHDEBUG - if( (debugFlags & kIOLogMatch) && (this != gIOResources)) - LOG("%s: match category %s exists\n", getName(), - category->getCStringNoCopy()); + if ((debugFlags & kIOLogMatch) && (this != gIOResources)) { + LOG("%s: match category %s exists\n", getName(), + category->getCStringNoCopy()); + } #endif - nextMatch->release(); - nextMatch = 0; + nextMatch->release(); + nextMatch = 0; - client->release(); - client = NULL; + client->release(); + client = NULL; - continue; - } + continue; + } - // create a copy now in case its modified during matching - props = OSDictionary::withDictionary( match, match->getCount()); - if( 0 == props) - continue; - props->setCapacityIncrement(1); + // create a copy now in case its modified during matching + props = OSDictionary::withDictionary( match, match->getCount()); + if (0 == props) { + continue; + } + props->setCapacityIncrement(1); - // check the nub matches - if( false == matchPassive(props, kIOServiceChangesOK | kIOServiceClassDone)) - continue; + // check the nub matches + if (false == matchPassive(props, kIOServiceChangesOK | kIOServiceClassDone)) { + continue; + } - // Check to see if driver reloc has been loaded. - needReloc = (false == gIOCatalogue->isModuleLoaded( match )); - if( needReloc) { -#if IOMATCHDEBUG - if( debugFlags & kIOLogCatalogue) - LOG("%s: stalling for module\n", getName()); -#endif - // If reloc hasn't been loaded, exit; - // reprobing will occur after reloc has been loaded. - continue; - } - - // reorder on family matchPropertyTable score. - if( 0 == familyMatches) - familyMatches = OSOrderedSet::withCapacity( 1, - IOServiceOrdering, (void *) gIOProbeScoreKey ); - if( familyMatches) - familyMatches->setObject( props ); - - } while( false ); - - if (nextMatch) { - nextMatch->release(); - nextMatch = 0; - } - if( props) - props->release(); - } - matches->release(); - matches = 0; - - if( familyMatches) { - - while( !needReloc - && (props = (OSDictionary *) familyMatches->getFirstObject())) { - - props->retain(); - familyMatches->removeObject( props ); - - inst = 0; - newInst = 0; + // Check to see if driver reloc has been loaded. + needReloc = (false == gIOCatalogue->isModuleLoaded( match )); + if (needReloc) { #if IOMATCHDEBUG - debugFlags = getDebugFlags( props ); -#endif - do { - symbol = OSDynamicCast( OSSymbol, - props->getObject( gIOClassKey)); - if( !symbol) - continue; - - //IOLog("%s alloc (symbol %p props %p)\n", symbol->getCStringNoCopy(), IOSERVICE_OBFUSCATE(symbol), IOSERVICE_OBFUSCATE(props)); - - // alloc the driver instance - inst = (IOService *) OSMetaClass::allocClassWithName( symbol); - - if( !inst || !OSDynamicCast(IOService, inst)) { - IOLog("Couldn't alloc class \"%s\"\n", - symbol->getCStringNoCopy()); - continue; - } - - // init driver instance - if( !(inst->init( props ))) { -#if IOMATCHDEBUG - if( debugFlags & kIOLogStart) - IOLog("%s::init fails\n", symbol->getCStringNoCopy()); + if (debugFlags & kIOLogCatalogue) { + LOG("%s: stalling for module\n", getName()); + } #endif - continue; - } - if( __state[1] & kIOServiceSynchronousState) - inst->__state[1] |= kIOServiceSynchronousState; - - // give the driver the default match category if not specified - category = OSDynamicCast( OSSymbol, - props->getObject( gIOMatchCategoryKey )); - if( 0 == category) - category = gIODefaultMatchCategoryKey; - inst->setProperty( gIOMatchCategoryKey, (OSObject *) category ); - // attach driver instance - if( !(inst->attach( this ))) - continue; - - // pass in score from property table - score = familyMatches->orderObject( props ); - - // & probe the new driver instance + // If reloc hasn't been loaded, exit; + // reprobing will occur after reloc has been loaded. + continue; + } + + // reorder on family matchPropertyTable score. + if (0 == familyMatches) { + familyMatches = OSOrderedSet::withCapacity( 1, + IOServiceOrdering, (void *) gIOProbeScoreKey ); + } + if (familyMatches) { + familyMatches->setObject( props ); + } + } while (false); + + if (nextMatch) { + nextMatch->release(); + nextMatch = 0; + } + if (props) { + props->release(); + } + } + matches->release(); + matches = 0; + + if (familyMatches) { + while (!needReloc + && (props = (OSDictionary *) familyMatches->getFirstObject())) { + props->retain(); + familyMatches->removeObject( props ); + + inst = 0; + newInst = 0; #if IOMATCHDEBUG - if( debugFlags & kIOLogProbe) - LOG("%s::probe(%s)\n", - inst->getMetaClass()->getClassName(), getName()); + debugFlags = getDebugFlags( props ); #endif - - newInst = inst->probe( this, &score ); - inst->detach( this ); - if( 0 == newInst) { + do { + symbol = OSDynamicCast( OSSymbol, + props->getObject( gIOClassKey)); + if (!symbol) { + continue; + } + + //IOLog("%s alloc (symbol %p props %p)\n", symbol->getCStringNoCopy(), IOSERVICE_OBFUSCATE(symbol), IOSERVICE_OBFUSCATE(props)); + + // alloc the driver instance + inst = (IOService *) OSMetaClass::allocClassWithName( symbol); + + if (!inst || !OSDynamicCast(IOService, inst)) { + IOLog("Couldn't alloc class \"%s\"\n", + symbol->getCStringNoCopy()); + continue; + } + + // init driver instance + if (!(inst->init( props ))) { #if IOMATCHDEBUG - if( debugFlags & kIOLogProbe) - IOLog("%s::probe fails\n", symbol->getCStringNoCopy()); + if (debugFlags & kIOLogStart) { + IOLog("%s::init fails\n", symbol->getCStringNoCopy()); + } #endif - continue; - } - - // save the score - newPri = OSNumber::withNumber( score, 32 ); - if( newPri) { - newInst->setProperty( gIOProbeScoreKey, newPri ); - newPri->release(); - } - - // add to start list for the match category - if( 0 == startDict) - startDict = OSDictionary::withCapacity( 1 ); - assert( startDict ); - startList = (OSOrderedSet *) - startDict->getObject( category ); - if( 0 == startList) { - startList = OSOrderedSet::withCapacity( 1, - IOServiceOrdering, (void *) gIOProbeScoreKey ); - if( startDict && startList) { - startDict->setObject( category, startList ); - startList->release(); - } - } - assert( startList ); - if( startList) - startList->setObject( newInst ); - - } while( false ); - - props->release(); - if( inst) - inst->release(); - } - familyMatches->release(); - familyMatches = 0; - } - - // start the best (until success) of each category - - iter = OSCollectionIterator::withCollection( startDict ); - if( iter) { - while( (category = (const OSSymbol *) iter->getNextObject())) { - - startList = (OSOrderedSet *) startDict->getObject( category ); - assert( startList ); - if( !startList) - continue; - - started = false; - while( true // (!started) - && (inst = (IOService *)startList->getFirstObject())) { - - inst->retain(); - startList->removeObject(inst); + continue; + } + if (__state[1] & kIOServiceSynchronousState) { + inst->__state[1] |= kIOServiceSynchronousState; + } + + // give the driver the default match category if not specified + category = OSDynamicCast( OSSymbol, + props->getObject( gIOMatchCategoryKey )); + if (0 == category) { + category = gIODefaultMatchCategoryKey; + } + inst->setProperty( gIOMatchCategoryKey, (OSObject *) category ); + // attach driver instance + if (!(inst->attach( this ))) { + continue; + } + // pass in score from property table + score = familyMatches->orderObject( props ); + + // & probe the new driver instance #if IOMATCHDEBUG - debugFlags = getDebugFlags( inst ); - - if( debugFlags & kIOLogStart) { - if( started) - LOG( "match category exists, skipping " ); - LOG( "%s::start(%s) <%d>\n", inst->getName(), - getName(), inst->getRetainCount()); - } + if (debugFlags & kIOLogProbe) { + LOG("%s::probe(%s)\n", + inst->getMetaClass()->getClassName(), getName()); + } #endif - if( false == started) - started = startCandidate( inst ); + + newInst = inst->probe( this, &score ); + inst->detach( this ); + if (0 == newInst) { #if IOMATCHDEBUG - if( (debugFlags & kIOLogStart) && (false == started)) - LOG( "%s::start(%s) <%d> failed\n", inst->getName(), getName(), - inst->getRetainCount()); + if (debugFlags & kIOLogProbe) { + IOLog("%s::probe fails\n", symbol->getCStringNoCopy()); + } #endif - inst->release(); - } - } - iter->release(); - } + continue; + } + + // save the score + newPri = OSNumber::withNumber( score, 32 ); + if (newPri) { + newInst->setProperty( gIOProbeScoreKey, newPri ); + newPri->release(); + } + + // add to start list for the match category + if (0 == startDict) { + startDict = OSDictionary::withCapacity( 1 ); + } + assert( startDict ); + startList = (OSOrderedSet *) + startDict->getObject( category ); + if (0 == startList) { + startList = OSOrderedSet::withCapacity( 1, + IOServiceOrdering, (void *) gIOProbeScoreKey ); + if (startDict && startList) { + startDict->setObject( category, startList ); + startList->release(); + } + } + assert( startList ); + if (startList) { + startList->setObject( newInst ); + } + } while (false); + props->release(); + if (inst) { + inst->release(); + } + } + familyMatches->release(); + familyMatches = 0; + } - // adjust the busy count by +1 if matching is stalled for a module, - // or -1 if a previously stalled matching is complete. - lockForArbitration(); - SInt32 adjBusy = 0; - uint64_t regID = getRegistryEntryID(); + // start the best (until success) of each category - if( needReloc) { - adjBusy = (__state[1] & kIOServiceModuleStallState) ? 0 : 1; - if( adjBusy) { + iter = OSCollectionIterator::withCollection( startDict ); + if (iter) { + while ((category = (const OSSymbol *) iter->getNextObject())) { + startList = (OSOrderedSet *) startDict->getObject( category ); + assert( startList ); + if (!startList) { + continue; + } - IOServiceTrace( - IOSERVICE_MODULESTALL, - (uintptr_t) regID, - (uintptr_t) (regID >> 32), - (uintptr_t) this, - 0); + started = false; + while (true // (!started) + && (inst = (IOService *)startList->getFirstObject())) { + inst->retain(); + startList->removeObject(inst); - __state[1] |= kIOServiceModuleStallState; +#if IOMATCHDEBUG + debugFlags = getDebugFlags( inst ); + + if (debugFlags & kIOLogStart) { + if (started) { + LOG( "match category exists, skipping " ); + } + LOG( "%s::start(%s) <%d>\n", inst->getName(), + getName(), inst->getRetainCount()); + } +#endif + if (false == started) { + started = startCandidate( inst ); + } +#if IOMATCHDEBUG + if ((debugFlags & kIOLogStart) && (false == started)) { + LOG( "%s::start(%s) <%d> failed\n", inst->getName(), getName(), + inst->getRetainCount()); + } +#endif + inst->release(); + } + } + iter->release(); } - } else if( __state[1] & kIOServiceModuleStallState) { - IOServiceTrace( - IOSERVICE_MODULEUNSTALL, - (uintptr_t) regID, - (uintptr_t) (regID >> 32), - (uintptr_t) this, - 0); + // adjust the busy count by +1 if matching is stalled for a module, + // or -1 if a previously stalled matching is complete. + lockForArbitration(); + SInt32 adjBusy = 0; + uint64_t regID = getRegistryEntryID(); + + if (needReloc) { + adjBusy = (__state[1] & kIOServiceModuleStallState) ? 0 : 1; + if (adjBusy) { + IOServiceTrace( + IOSERVICE_MODULESTALL, + (uintptr_t) regID, + (uintptr_t) (regID >> 32), + (uintptr_t) this, + 0); - __state[1] &= ~kIOServiceModuleStallState; - adjBusy = -1; - } - if( adjBusy) - _adjustBusy( adjBusy ); - unlockForArbitration(); + __state[1] |= kIOServiceModuleStallState; + } + } else if (__state[1] & kIOServiceModuleStallState) { + IOServiceTrace( + IOSERVICE_MODULEUNSTALL, + (uintptr_t) regID, + (uintptr_t) (regID >> 32), + (uintptr_t) this, + 0); + + __state[1] &= ~kIOServiceModuleStallState; + adjBusy = -1; + } + if (adjBusy) { + _adjustBusy( adjBusy ); + } + unlockForArbitration(); - if( startDict) - startDict->release(); + if (startDict) { + startDict->release(); + } } /* @@ -3539,1477 +3682,1553 @@ void IOService::probeCandidates( OSOrderedSet * matches ) * called on exporting object instance */ -bool IOService::startCandidate( IOService * service ) +bool +IOService::startCandidate( IOService * service ) { - bool ok; + bool ok; - ok = service->attach( this ); + ok = service->attach( this ); - if( ok) - { - if (this != gIOResources) - { - // stall for any nub resources - checkResources(); - // stall for any driver resources - service->checkResources(); - } - - AbsoluteTime startTime; - AbsoluteTime endTime; - UInt64 nano; + if (ok) { + if (this != gIOResources) { + // stall for any nub resources + checkResources(); + // stall for any driver resources + service->checkResources(); + } - if (kIOLogStart & gIOKitDebug) - clock_get_uptime(&startTime); + AbsoluteTime startTime; + AbsoluteTime endTime; + UInt64 nano; - ok = service->start(this); + if (kIOLogStart & gIOKitDebug) { + clock_get_uptime(&startTime); + } - if (kIOLogStart & gIOKitDebug) - { - clock_get_uptime(&endTime); - - if (CMP_ABSOLUTETIME(&endTime, &startTime) > 0) - { - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nano); - if (nano > 500000000ULL) - IOLog("%s::start took %ld ms\n", service->getName(), (long)(UInt32)(nano / 1000000ULL)); - } + ok = service->start(this); + + if (kIOLogStart & gIOKitDebug) { + clock_get_uptime(&endTime); + + if (CMP_ABSOLUTETIME(&endTime, &startTime) > 0) { + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nano); + if (nano > 500000000ULL) { + IOLog("%s::start took %ld ms\n", service->getName(), (long)(UInt32)(nano / 1000000ULL)); + } + } + } + if (!ok) { + service->detach( this ); + } } - if( !ok) - service->detach( this ); - } - return( ok ); + return ok; } -void IOService::publishResource( const char * key, OSObject * value ) +void +IOService::publishResource( const char * key, OSObject * value ) { - const OSSymbol * sym; + const OSSymbol * sym; - if( (sym = OSSymbol::withCString( key))) { - publishResource( sym, value); - sym->release(); - } + if ((sym = OSSymbol::withCString( key))) { + publishResource( sym, value); + sym->release(); + } } -void IOService::publishResource( const OSSymbol * key, OSObject * value ) +void +IOService::publishResource( const OSSymbol * key, OSObject * value ) { - if( 0 == value) - value = (OSObject *) gIOServiceKey; + if (0 == value) { + value = (OSObject *) gIOServiceKey; + } - gIOResources->setProperty( key, value); + gIOResources->setProperty( key, value); - if( IORecursiveLockHaveLock( gNotificationLock)) - return; + if (IORecursiveLockHaveLock( gNotificationLock)) { + return; + } - gIOResourceGenerationCount++; - gIOResources->registerService(); + gIOResourceGenerationCount++; + gIOResources->registerService(); } -bool IOService::addNeededResource( const char * key ) +bool +IOService::addNeededResource( const char * key ) { - OSObject * resourcesProp; - OSSet * set; - OSString * newKey; - bool ret; + OSObject * resourcesProp; + OSSet * set; + OSString * newKey; + bool ret; - resourcesProp = copyProperty( gIOResourceMatchKey ); - if (!resourcesProp) return(false); + resourcesProp = copyProperty( gIOResourceMatchKey ); + if (!resourcesProp) { + return false; + } - newKey = OSString::withCString( key ); - if (!newKey) - { - resourcesProp->release(); - return( false); - } + newKey = OSString::withCString( key ); + if (!newKey) { + resourcesProp->release(); + return false; + } - set = OSDynamicCast( OSSet, resourcesProp ); - if( !set) { - set = OSSet::withCapacity( 1 ); - if( set) - set->setObject( resourcesProp ); - } - else - set->retain(); + set = OSDynamicCast( OSSet, resourcesProp ); + if (!set) { + set = OSSet::withCapacity( 1 ); + if (set) { + set->setObject( resourcesProp ); + } + } else { + set->retain(); + } - set->setObject( newKey ); - newKey->release(); - ret = setProperty( gIOResourceMatchKey, set ); - set->release(); - resourcesProp->release(); + set->setObject( newKey ); + newKey->release(); + ret = setProperty( gIOResourceMatchKey, set ); + set->release(); + resourcesProp->release(); - return( ret ); + return ret; } -bool IOService::checkResource( OSObject * matching ) +bool +IOService::checkResource( OSObject * matching ) { - OSString * str; - OSDictionary * table; + OSString * str; + OSDictionary * table; - if( (str = OSDynamicCast( OSString, matching ))) { - if( gIOResources->getProperty( str )) - return( true ); - } + if ((str = OSDynamicCast( OSString, matching ))) { + if (gIOResources->getProperty( str )) { + return true; + } + } - if( str) - table = resourceMatching( str ); - else if( (table = OSDynamicCast( OSDictionary, matching ))) - table->retain(); - else { - IOLog("%s: Can't match using: %s\n", getName(), - matching->getMetaClass()->getClassName()); - /* false would stall forever */ - return( true ); - } + if (str) { + table = resourceMatching( str ); + } else if ((table = OSDynamicCast( OSDictionary, matching ))) { + table->retain(); + } else { + IOLog("%s: Can't match using: %s\n", getName(), + matching->getMetaClass()->getClassName()); + /* false would stall forever */ + return true; + } - if( gIOKitDebug & kIOLogConfig) - LOG("config(%p): stalling %s\n", IOSERVICE_OBFUSCATE(IOThreadSelf()), getName()); + if (gIOKitDebug & kIOLogConfig) { + LOG("config(%p): stalling %s\n", IOSERVICE_OBFUSCATE(IOThreadSelf()), getName()); + } - waitForService( table ); + waitForService( table ); - if( gIOKitDebug & kIOLogConfig) - LOG("config(%p): waking\n", IOSERVICE_OBFUSCATE(IOThreadSelf()) ); + if (gIOKitDebug & kIOLogConfig) { + LOG("config(%p): waking\n", IOSERVICE_OBFUSCATE(IOThreadSelf())); + } - return( true ); + return true; } -bool IOService::checkResources( void ) +bool +IOService::checkResources( void ) { - OSObject * resourcesProp; - OSSet * set; - OSIterator * iter; - bool ok; - - resourcesProp = copyProperty( gIOResourceMatchKey ); - if( 0 == resourcesProp) - return( true ); + OSObject * resourcesProp; + OSSet * set; + OSIterator * iter; + bool ok; - if( (set = OSDynamicCast( OSSet, resourcesProp ))) { - - iter = OSCollectionIterator::withCollection( set ); - ok = (0 != iter); - while( ok && (resourcesProp = iter->getNextObject()) ) - ok = checkResource( resourcesProp ); - if( iter) - iter->release(); + resourcesProp = copyProperty( gIOResourceMatchKey ); + if (0 == resourcesProp) { + return true; + } - } else - ok = checkResource( resourcesProp ); + if ((set = OSDynamicCast( OSSet, resourcesProp ))) { + iter = OSCollectionIterator::withCollection( set ); + ok = (0 != iter); + while (ok && (resourcesProp = iter->getNextObject())) { + ok = checkResource( resourcesProp ); + } + if (iter) { + iter->release(); + } + } else { + ok = checkResource( resourcesProp ); + } - OSSafeReleaseNULL(resourcesProp); + OSSafeReleaseNULL(resourcesProp); - return( ok ); + return ok; } -void _IOConfigThread::configThread( void ) +void +_IOConfigThread::configThread( void ) { - _IOConfigThread * inst; - - do { - if( !(inst = new _IOConfigThread)) - continue; - if( !inst->init()) - continue; - thread_t unused; - if (KERN_SUCCESS != kernel_thread_start(&_IOConfigThread::main, inst, &unused)) - continue; + _IOConfigThread * inst; - return; + do { + if (!(inst = new _IOConfigThread)) { + continue; + } + if (!inst->init()) { + continue; + } + thread_t unused; + if (KERN_SUCCESS != kernel_thread_start(&_IOConfigThread::main, inst, &unused)) { + continue; + } - } while( false); + return; + } while (false); - if( inst) - inst->release(); + if (inst) { + inst->release(); + } - return; + return; } -void _IOConfigThread::free( void ) +void +_IOConfigThread::free( void ) { - thread_deallocate(current_thread()); - OSObject::free(); + thread_deallocate(current_thread()); + OSObject::free(); } -void IOService::doServiceMatch( IOOptionBits options ) +void +IOService::doServiceMatch( IOOptionBits options ) { - _IOServiceNotifier * notify; - OSIterator * iter; - OSOrderedSet * matches; - OSArray * resourceKeys = 0; - SInt32 catalogGeneration; - bool keepGuessing = true; - bool reRegistered = true; - bool didRegister; - OSArray * notifiers[2] = {0}; + _IOServiceNotifier * notify; + OSIterator * iter; + OSOrderedSet * matches; + OSArray * resourceKeys = 0; + SInt32 catalogGeneration; + bool keepGuessing = true; + bool reRegistered = true; + bool didRegister; + OSArray * notifiers[2] = {0}; // job->nub->deliverNotification( gIOPublishNotification, -// kIOServiceRegisteredState, 0xffffffff ); - - while( keepGuessing ) { - - matches = gIOCatalogue->findDrivers( this, &catalogGeneration ); - // the matches list should always be created by findDrivers() - if( matches) { - - lockForArbitration(); - if( 0 == (__state[0] & kIOServiceFirstPublishState)) { - getMetaClass()->addInstance(this); - notifiers[0] = copyNotifiers(gIOFirstPublishNotification, - kIOServiceFirstPublishState, 0xffffffff ); - } - LOCKREADNOTIFY(); - __state[1] &= ~kIOServiceNeedConfigState; - __state[1] |= kIOServiceConfigState | kIOServiceConfigRunning; - didRegister = (0 == (kIOServiceRegisteredState & __state[0])); - __state[0] |= kIOServiceRegisteredState; - - keepGuessing &= (0 == (__state[0] & kIOServiceInactiveState)); - if (reRegistered && keepGuessing) { - iter = OSCollectionIterator::withCollection( (OSOrderedSet *) - gNotifications->getObject( gIOPublishNotification ) ); - if( iter) { - while((notify = (_IOServiceNotifier *) - iter->getNextObject())) { - - if( matchPassive(notify->matching, 0) - && (kIOServiceNotifyEnable & notify->state)) - matches->setObject( notify ); - } - iter->release(); - } - } - - UNLOCKNOTIFY(); - unlockForArbitration(); - invokeNotifiers(¬ifiers[0]); - - if (keepGuessing && matches->getCount() && (kIOReturnSuccess == getResources())) - { - if (this == gIOResources) - { - if (resourceKeys) resourceKeys->release(); - resourceKeys = copyPropertyKeys(); - } - probeCandidates( matches ); - } - else - matches->release(); - } - - lockForArbitration(); - reRegistered = (0 != (__state[1] & kIOServiceNeedConfigState)); - keepGuessing = - (reRegistered || (catalogGeneration != - gIOCatalogue->getGenerationCount())) - && (0 == (__state[0] & kIOServiceInactiveState)); - - if( keepGuessing) - unlockForArbitration(); - } - - if( (0 == (__state[0] & kIOServiceInactiveState)) - && (0 == (__state[1] & kIOServiceModuleStallState)) ) { - - if (resourceKeys) setProperty(gIOResourceMatchedKey, resourceKeys); - - notifiers[0] = copyNotifiers(gIOMatchedNotification, - kIOServiceMatchedState, 0xffffffff); - if( 0 == (__state[0] & kIOServiceFirstMatchState)) - notifiers[1] = copyNotifiers(gIOFirstMatchNotification, - kIOServiceFirstMatchState, 0xffffffff); - } - - __state[1] &= ~kIOServiceConfigRunning; - unlockForArbitration(); - - if (resourceKeys) resourceKeys->release(); - - invokeNotifiers(¬ifiers[0]); - invokeNotifiers(¬ifiers[1]); - - lockForArbitration(); - __state[1] &= ~kIOServiceConfigState; - scheduleTerminatePhase2(); - - _adjustBusy( -1 ); - unlockForArbitration(); -} - -UInt32 IOService::_adjustBusy( SInt32 delta ) -{ - IOService * next; - UInt32 count; - UInt32 result; - bool wasQuiet, nowQuiet, needWake; - - next = this; - result = __state[1] & kIOServiceBusyStateMask; - - if( delta) do { - if( next != this) - next->lockForArbitration(); - count = next->__state[1] & kIOServiceBusyStateMask; - wasQuiet = (0 == count); - if (((delta < 0) && wasQuiet) || ((delta > 0) && (kIOServiceBusyMax == count))) - OSReportWithBacktrace("%s: bad busy count (%d,%d)\n", next->getName(), count, delta); - else - count += delta; - next->__state[1] = (next->__state[1] & ~kIOServiceBusyStateMask) | count; - nowQuiet = (0 == count); - needWake = (0 != (kIOServiceBusyWaiterState & next->__state[1])); - - if( needWake) { - next->__state[1] &= ~kIOServiceBusyWaiterState; - IOLockLock( gIOServiceBusyLock ); - thread_wakeup( (event_t) next); - IOLockUnlock( gIOServiceBusyLock ); - } - if( next != this) - next->unlockForArbitration(); - - if( (wasQuiet || nowQuiet) ) { - - uint64_t regID = next->getRegistryEntryID(); - IOServiceTrace( - ((wasQuiet/*nowBusy*/) ? IOSERVICE_BUSY : IOSERVICE_NONBUSY), - (uintptr_t) regID, - (uintptr_t) (regID >> 32), - (uintptr_t) next, - 0); +// kIOServiceRegisteredState, 0xffffffff ); + + while (keepGuessing) { + matches = gIOCatalogue->findDrivers( this, &catalogGeneration ); + // the matches list should always be created by findDrivers() + if (matches) { + lockForArbitration(); + if (0 == (__state[0] & kIOServiceFirstPublishState)) { + getMetaClass()->addInstance(this); + notifiers[0] = copyNotifiers(gIOFirstPublishNotification, + kIOServiceFirstPublishState, 0xffffffff ); + } + LOCKREADNOTIFY(); + __state[1] &= ~kIOServiceNeedConfigState; + __state[1] |= kIOServiceConfigState | kIOServiceConfigRunning; + didRegister = (0 == (kIOServiceRegisteredState & __state[0])); + __state[0] |= kIOServiceRegisteredState; + + keepGuessing &= (0 == (__state[0] & kIOServiceInactiveState)); + if (reRegistered && keepGuessing) { + iter = OSCollectionIterator::withCollection((OSOrderedSet *) + gNotifications->getObject( gIOPublishNotification )); + if (iter) { + while ((notify = (_IOServiceNotifier *) + iter->getNextObject())) { + if (matchPassive(notify->matching, 0) + && (kIOServiceNotifyEnable & notify->state)) { + matches->setObject( notify ); + } + } + iter->release(); + } + } + + UNLOCKNOTIFY(); + unlockForArbitration(); + invokeNotifiers(¬ifiers[0]); + + if (keepGuessing && matches->getCount() && (kIOReturnSuccess == getResources())) { + if (this == gIOResources) { + if (resourceKeys) { + resourceKeys->release(); + } + resourceKeys = copyPropertyKeys(); + } + probeCandidates( matches ); + } else { + matches->release(); + } + } + + lockForArbitration(); + reRegistered = (0 != (__state[1] & kIOServiceNeedConfigState)); + keepGuessing = + (reRegistered || (catalogGeneration != + gIOCatalogue->getGenerationCount())) + && (0 == (__state[0] & kIOServiceInactiveState)); + + if (keepGuessing) { + unlockForArbitration(); + } + } + + if ((0 == (__state[0] & kIOServiceInactiveState)) + && (0 == (__state[1] & kIOServiceModuleStallState))) { + if (resourceKeys) { + setProperty(gIOResourceMatchedKey, resourceKeys); + } + + notifiers[0] = copyNotifiers(gIOMatchedNotification, + kIOServiceMatchedState, 0xffffffff); + if (0 == (__state[0] & kIOServiceFirstMatchState)) { + notifiers[1] = copyNotifiers(gIOFirstMatchNotification, + kIOServiceFirstMatchState, 0xffffffff); + } + } - if (wasQuiet) - { - next->__timeBusy = mach_absolute_time(); - } - else - { - next->__accumBusy += mach_absolute_time() - next->__timeBusy; - next->__timeBusy = 0; - } + __state[1] &= ~kIOServiceConfigRunning; + unlockForArbitration(); + + if (resourceKeys) { + resourceKeys->release(); + } + + invokeNotifiers(¬ifiers[0]); + invokeNotifiers(¬ifiers[1]); + + lockForArbitration(); + __state[1] &= ~kIOServiceConfigState; + scheduleTerminatePhase2(); + + _adjustBusy( -1 ); + unlockForArbitration(); +} + +UInt32 +IOService::_adjustBusy( SInt32 delta ) +{ + IOService * next; + UInt32 count; + UInt32 result; + bool wasQuiet, nowQuiet, needWake; + + next = this; + result = __state[1] & kIOServiceBusyStateMask; + + if (delta) { + do { + if (next != this) { + next->lockForArbitration(); + } + count = next->__state[1] & kIOServiceBusyStateMask; + wasQuiet = (0 == count); + if (((delta < 0) && wasQuiet) || ((delta > 0) && (kIOServiceBusyMax == count))) { + OSReportWithBacktrace("%s: bad busy count (%d,%d)\n", next->getName(), count, delta); + } else { + count += delta; + } + next->__state[1] = (next->__state[1] & ~kIOServiceBusyStateMask) | count; + nowQuiet = (0 == count); + needWake = (0 != (kIOServiceBusyWaiterState & next->__state[1])); + + if (needWake) { + next->__state[1] &= ~kIOServiceBusyWaiterState; + IOLockLock( gIOServiceBusyLock ); + thread_wakeup((event_t) next); + IOLockUnlock( gIOServiceBusyLock ); + } + if (next != this) { + next->unlockForArbitration(); + } + + if ((wasQuiet || nowQuiet)) { + uint64_t regID = next->getRegistryEntryID(); + IOServiceTrace( + ((wasQuiet /*nowBusy*/) ? IOSERVICE_BUSY : IOSERVICE_NONBUSY), + (uintptr_t) regID, + (uintptr_t) (regID >> 32), + (uintptr_t) next, + 0); + + if (wasQuiet) { + next->__timeBusy = mach_absolute_time(); + } else { + next->__accumBusy += mach_absolute_time() - next->__timeBusy; + next->__timeBusy = 0; + } - MessageClientsContext context; + MessageClientsContext context; - context.service = next; - context.type = kIOMessageServiceBusyStateChange; - context.argument = (void *) wasQuiet; /*nowBusy*/ - context.argSize = 0; + context.service = next; + context.type = kIOMessageServiceBusyStateChange; + context.argument = (void *) wasQuiet; /*nowBusy*/ + context.argSize = 0; - applyToInterestNotifiers( next, gIOBusyInterest, - &messageClientsApplier, &context ); + applyToInterestNotifiers( next, gIOBusyInterest, + &messageClientsApplier, &context ); #if !NO_KEXTD - if( nowQuiet && (next == gIOServiceRoot)) { - OSKext::considerUnloads(); - IOServiceTrace(IOSERVICE_REGISTRY_QUIET, 0, 0, 0, 0); - } + if (nowQuiet && (next == gIOServiceRoot)) { + OSKext::considerUnloads(); + IOServiceTrace(IOSERVICE_REGISTRY_QUIET, 0, 0, 0, 0); + } #endif - } + } + + delta = nowQuiet ? -1 : +1; + } while ((wasQuiet || nowQuiet) && (next = next->getProvider())); + } + + return result; +} + +void +IOService::adjustBusy( SInt32 delta ) +{ + lockForArbitration(); + _adjustBusy( delta ); + unlockForArbitration(); +} + +uint64_t +IOService::getAccumulatedBusyTime( void ) +{ + uint64_t accumBusy = __accumBusy; + uint64_t timeBusy = __timeBusy; + uint64_t nano; - delta = nowQuiet ? -1 : +1; + do{ + accumBusy = __accumBusy; + timeBusy = __timeBusy; + if (timeBusy) { + accumBusy += mach_absolute_time() - timeBusy; + } + }while (timeBusy != __timeBusy); + + absolutetime_to_nanoseconds(*(AbsoluteTime *)&accumBusy, &nano); + + return nano; +} - } while( (wasQuiet || nowQuiet) && (next = next->getProvider())); +UInt32 +IOService::getBusyState( void ) +{ + return __state[1] & kIOServiceBusyStateMask; +} - return( result ); +IOReturn +IOService::waitForState( UInt32 mask, UInt32 value, + mach_timespec_t * timeout ) +{ + panic("waitForState"); + return kIOReturnUnsupported; } -void IOService::adjustBusy( SInt32 delta ) +IOReturn +IOService::waitForState( UInt32 mask, UInt32 value, + uint64_t timeout ) { - lockForArbitration(); - _adjustBusy( delta ); - unlockForArbitration(); + bool wait; + int waitResult = THREAD_AWAKENED; + bool computeDeadline = true; + AbsoluteTime abstime; + + do { + lockForArbitration(); + IOLockLock( gIOServiceBusyLock ); + wait = (value != (__state[1] & mask)); + if (wait) { + __state[1] |= kIOServiceBusyWaiterState; + unlockForArbitration(); + if (timeout != UINT64_MAX) { + if (computeDeadline) { + AbsoluteTime nsinterval; + nanoseconds_to_absolutetime(timeout, &nsinterval ); + clock_absolutetime_interval_to_deadline(nsinterval, &abstime); + computeDeadline = false; + } + assert_wait_deadline((event_t)this, THREAD_UNINT, __OSAbsoluteTime(abstime)); + } else { + assert_wait((event_t)this, THREAD_UNINT ); + } + } else { + unlockForArbitration(); + } + IOLockUnlock( gIOServiceBusyLock ); + if (wait) { + waitResult = thread_block(THREAD_CONTINUE_NULL); + } + } while (wait && (waitResult != THREAD_TIMED_OUT)); + + if (waitResult == THREAD_TIMED_OUT) { + return kIOReturnTimeout; + } else { + return kIOReturnSuccess; + } +} + +IOReturn +IOService::waitQuiet( uint64_t timeout ) +{ + IOReturn ret; + uint32_t loops; + char * string = NULL; + char * panicString = NULL; + size_t len; + size_t panicStringLen; + uint64_t time; + uint64_t nano; + bool kextdWait; + bool dopanic; + + enum { kTimeoutExtensions = 4 }; + + time = mach_absolute_time(); + kextdWait = false; + for (loops = 0; loops < kTimeoutExtensions; loops++) { + ret = waitForState( kIOServiceBusyStateMask, 0, timeout ); + + if (loops && (kIOReturnSuccess == ret)) { + time = mach_absolute_time() - time; + absolutetime_to_nanoseconds(*(AbsoluteTime *)&time, &nano); + IOLog("busy extended ok[%d], (%llds, %llds)\n", + loops, timeout / 1000000000ULL, nano / 1000000000ULL); + break; + } else if (kIOReturnTimeout != ret) { + break; + } else if (timeout < 41000000000) { + break; + } + + { + IORegistryIterator * iter; + OSOrderedSet * set; + OSOrderedSet * leaves; + IOService * next; + IOService * nextParent; + char * s; + size_t l; + + len = 256; + panicStringLen = 256; + if (!string) { + string = IONew(char, len); + } + if (!panicString) { + panicString = IONew(char, panicStringLen); + } + set = NULL; + kextdWait = OSKext::isWaitingKextd(); + iter = IORegistryIterator::iterateOver(this, gIOServicePlane, kIORegistryIterateRecursively); + leaves = OSOrderedSet::withCapacity(4); + if (iter) { + set = iter->iterateAll(); + } + if (string && panicString && leaves && set) { + string[0] = panicString[0] = 0; + set->setObject(this); + while ((next = (IOService *) set->getLastObject())) { + if (next->getBusyState()) { + if (kIOServiceModuleStallState & next->__state[1]) { + kextdWait = true; + } + leaves->setObject(next); + nextParent = next; + while ((nextParent = nextParent->getProvider())) { + set->removeObject(nextParent); + leaves->removeObject(nextParent); + } + } + set->removeObject(next); + } + s = string; + while ((next = (IOService *) leaves->getLastObject())) { + l = snprintf(s, len, "%s'%s'", ((s == string) ? "" : ", "), next->getName()); + if (l >= len) { + break; + } + s += l; + len -= l; + leaves->removeObject(next); + } + } + OSSafeReleaseNULL(leaves); + OSSafeReleaseNULL(set); + OSSafeReleaseNULL(iter); + } + + dopanic = ((loops >= (kTimeoutExtensions - 1)) && (kIOWaitQuietPanics & gIOKitDebug)); + snprintf(panicString, panicStringLen, + "%s[%d], (%llds): %s", + kextdWait ? "kextd stall" : "busy timeout", + loops, timeout / 1000000000ULL, + string ? string : ""); + IOLog("%s\n", panicString); + if (dopanic) { + panic("%s", panicString); + } else if (!loops) { + getPMRootDomain()->startSpinDump(1); + } + } + + if (string) { + IODelete(string, char, 256); + } + if (panicString) { + IODelete(panicString, char, panicStringLen); + } + + return ret; } -uint64_t IOService::getAccumulatedBusyTime( void ) +IOReturn +IOService::waitQuiet( mach_timespec_t * timeout ) { - uint64_t accumBusy = __accumBusy; - uint64_t timeBusy = __timeBusy; - uint64_t nano; + uint64_t timeoutNS; + + if (timeout) { + timeoutNS = timeout->tv_sec; + timeoutNS *= kSecondScale; + timeoutNS += timeout->tv_nsec; + } else { + timeoutNS = UINT64_MAX; + } - do - { - accumBusy = __accumBusy; - timeBusy = __timeBusy; - if (timeBusy) - accumBusy += mach_absolute_time() - timeBusy; - } - while (timeBusy != __timeBusy); + return waitQuiet(timeoutNS); +} - absolutetime_to_nanoseconds(*(AbsoluteTime *)&accumBusy, &nano); - - return (nano); -} - -UInt32 IOService::getBusyState( void ) -{ - return( __state[1] & kIOServiceBusyStateMask ); -} - -IOReturn IOService::waitForState( UInt32 mask, UInt32 value, - mach_timespec_t * timeout ) -{ - panic("waitForState"); - return (kIOReturnUnsupported); -} - -IOReturn IOService::waitForState( UInt32 mask, UInt32 value, - uint64_t timeout ) -{ - bool wait; - int waitResult = THREAD_AWAKENED; - bool computeDeadline = true; - AbsoluteTime abstime; - - do { - lockForArbitration(); - IOLockLock( gIOServiceBusyLock ); - wait = (value != (__state[1] & mask)); - if( wait) { - __state[1] |= kIOServiceBusyWaiterState; - unlockForArbitration(); - if( timeout != UINT64_MAX ) { - if( computeDeadline ) { - AbsoluteTime nsinterval; - nanoseconds_to_absolutetime(timeout, &nsinterval ); - clock_absolutetime_interval_to_deadline(nsinterval, &abstime); - computeDeadline = false; - } - assert_wait_deadline((event_t)this, THREAD_UNINT, __OSAbsoluteTime(abstime)); - } - else - assert_wait((event_t)this, THREAD_UNINT ); - } else - unlockForArbitration(); - IOLockUnlock( gIOServiceBusyLock ); - if( wait) - waitResult = thread_block(THREAD_CONTINUE_NULL); - - } while( wait && (waitResult != THREAD_TIMED_OUT)); - - if( waitResult == THREAD_TIMED_OUT) - return( kIOReturnTimeout ); - else - return( kIOReturnSuccess ); -} - -IOReturn IOService::waitQuiet( uint64_t timeout ) -{ - IOReturn ret; - uint32_t loops; - char * string = NULL; - char * panicString = NULL; - size_t len; - size_t panicStringLen; - uint64_t time; - uint64_t nano; - bool kextdWait; - bool dopanic; - - enum { kTimeoutExtensions = 4 }; - - time = mach_absolute_time(); - kextdWait = false; - for (loops = 0; loops < kTimeoutExtensions; loops++) - { - ret = waitForState( kIOServiceBusyStateMask, 0, timeout ); - - if (loops && (kIOReturnSuccess == ret)) - { - time = mach_absolute_time() - time; - absolutetime_to_nanoseconds(*(AbsoluteTime *)&time, &nano); - IOLog("busy extended ok[%d], (%llds, %llds)\n", - loops, timeout / 1000000000ULL, nano / 1000000000ULL); - break; - } - else if (kIOReturnTimeout != ret) break; - else if (timeout < 41000000000) break; - - { - IORegistryIterator * iter; - OSOrderedSet * set; - OSOrderedSet * leaves; - IOService * next; - IOService * nextParent; - char * s; - size_t l; - - len = 256; - panicStringLen = 256; - if (!string) string = IONew(char, len); - if (!panicString) panicString = IONew(char, panicStringLen); - set = NULL; - kextdWait = OSKext::isWaitingKextd(); - iter = IORegistryIterator::iterateOver(this, gIOServicePlane, kIORegistryIterateRecursively); - leaves = OSOrderedSet::withCapacity(4); - if (iter) set = iter->iterateAll(); - if (string && panicString && leaves && set) - { - string[0] = panicString[0] = 0; - set->setObject(this); - while ((next = (IOService *) set->getLastObject())) - { - if (next->getBusyState()) - { - if (kIOServiceModuleStallState & next->__state[1]) kextdWait = true; - leaves->setObject(next); - nextParent = next; - while ((nextParent = nextParent->getProvider())) - { - set->removeObject(nextParent); - leaves->removeObject(nextParent); - } - } - set->removeObject(next); - } - s = string; - while ((next = (IOService *) leaves->getLastObject())) - { - l = snprintf(s, len, "%s'%s'", ((s == string) ? "" : ", "), next->getName()); - if (l >= len) break; - s += l; - len -= l; - leaves->removeObject(next); - } - } - OSSafeReleaseNULL(leaves); - OSSafeReleaseNULL(set); - OSSafeReleaseNULL(iter); - } - - dopanic = ((loops >= (kTimeoutExtensions - 1)) && (kIOWaitQuietPanics & gIOKitDebug)); - snprintf(panicString, panicStringLen, - "%s[%d], (%llds): %s", - kextdWait ? "kextd stall" : "busy timeout", - loops, timeout / 1000000000ULL, - string ? string : ""); - IOLog("%s\n", panicString); - if (dopanic) panic("%s", panicString); - else if (!loops) getPMRootDomain()->startSpinDump(1); - } - - if (string) IODelete(string, char, 256); - if (panicString) IODelete(panicString, char, panicStringLen); - - return (ret); -} - -IOReturn IOService::waitQuiet( mach_timespec_t * timeout ) -{ - uint64_t timeoutNS; - - if (timeout) - { - timeoutNS = timeout->tv_sec; - timeoutNS *= kSecondScale; - timeoutNS += timeout->tv_nsec; - } - else - timeoutNS = UINT64_MAX; - - return (waitQuiet(timeoutNS)); -} - -bool IOService::serializeProperties( OSSerialize * s ) const +bool +IOService::serializeProperties( OSSerialize * s ) const { #if 0 - ((IOService *)this)->setProperty( ((IOService *)this)->__state, - sizeof( __state), "__state"); + ((IOService *)this)->setProperty(((IOService *)this)->__state, + sizeof(__state), "__state"); #endif - return( super::serializeProperties(s) ); + return super::serializeProperties(s); } -void _IOConfigThread::main(void * arg, wait_result_t result) +void +_IOConfigThread::main(void * arg, wait_result_t result) { - _IOConfigThread * self = (_IOConfigThread *) arg; - _IOServiceJob * job; - IOService * nub; - bool alive = true; - kern_return_t kr; - thread_precedence_policy_data_t precedence = { -1 }; + _IOConfigThread * self = (_IOConfigThread *) arg; + _IOServiceJob * job; + IOService * nub; + bool alive = true; + kern_return_t kr; + thread_precedence_policy_data_t precedence = { -1 }; - kr = thread_policy_set(current_thread(), - THREAD_PRECEDENCE_POLICY, - (thread_policy_t) &precedence, - THREAD_PRECEDENCE_POLICY_COUNT); - if (KERN_SUCCESS != kr) - IOLog("thread_policy_set(%d)\n", kr); - - do { + kr = thread_policy_set(current_thread(), + THREAD_PRECEDENCE_POLICY, + (thread_policy_t) &precedence, + THREAD_PRECEDENCE_POLICY_COUNT); + if (KERN_SUCCESS != kr) { + IOLog("thread_policy_set(%d)\n", kr); + } + do { // randomDelay(); - semaphore_wait( gJobsSemaphore ); + semaphore_wait( gJobsSemaphore ); - IOTakeLock( gJobsLock ); - job = (_IOServiceJob *) gJobs->getFirstObject(); - job->retain(); - gJobs->removeObject(job); - if( job) { - gOutstandingJobs--; + IOTakeLock( gJobsLock ); + job = (_IOServiceJob *) gJobs->getFirstObject(); + job->retain(); + gJobs->removeObject(job); + if (job) { + gOutstandingJobs--; // gNumConfigThreads--; // we're out of service - gNumWaitingThreads--; // we're out of service - } - IOUnlock( gJobsLock ); - - if( job) { - - nub = job->nub; + gNumWaitingThreads--; // we're out of service + } + IOUnlock( gJobsLock ); - if( gIOKitDebug & kIOLogConfig) - LOG("config(%p): starting on %s, %d\n", - IOSERVICE_OBFUSCATE(IOThreadSelf()), job->nub->getName(), job->type); + if (job) { + nub = job->nub; - switch( job->type) { + if (gIOKitDebug & kIOLogConfig) { + LOG("config(%p): starting on %s, %d\n", + IOSERVICE_OBFUSCATE(IOThreadSelf()), job->nub->getName(), job->type); + } - case kMatchNubJob: - nub->doServiceMatch( job->options ); - break; + switch (job->type) { + case kMatchNubJob: + nub->doServiceMatch( job->options ); + break; - default: - LOG("config(%p): strange type (%d)\n", - IOSERVICE_OBFUSCATE(IOThreadSelf()), job->type ); - break; - } + default: + LOG("config(%p): strange type (%d)\n", + IOSERVICE_OBFUSCATE(IOThreadSelf()), job->type ); + break; + } - nub->release(); - job->release(); + nub->release(); + job->release(); - IOTakeLock( gJobsLock ); - alive = (gOutstandingJobs > gNumWaitingThreads); - if( alive) - gNumWaitingThreads++; // back in service + IOTakeLock( gJobsLock ); + alive = (gOutstandingJobs > gNumWaitingThreads); + if (alive) { + gNumWaitingThreads++; // back in service + } // gNumConfigThreads++; - else { - if( 0 == --gNumConfigThreads) { + else { + if (0 == --gNumConfigThreads) { // IOLog("MATCH IDLE\n"); - IOLockWakeup( gJobsLock, (event_t) &gNumConfigThreads, /* one-thread */ false ); - } - } - IOUnlock( gJobsLock ); - } - - } while( alive ); - - if( gIOKitDebug & kIOLogConfig) - LOG("config(%p): terminating\n", IOSERVICE_OBFUSCATE(IOThreadSelf()) ); - - self->release(); -} - -IOReturn IOService::waitMatchIdle( UInt32 msToWait ) -{ - bool wait; - int waitResult = THREAD_AWAKENED; - bool computeDeadline = true; - AbsoluteTime deadline; - - IOLockLock( gJobsLock ); - do { - wait = (0 != gNumConfigThreads); - if( wait) { - if( msToWait) { - if( computeDeadline ) { - clock_interval_to_deadline( - msToWait, kMillisecondScale, &deadline ); - computeDeadline = false; - } - waitResult = IOLockSleepDeadline( gJobsLock, &gNumConfigThreads, - deadline, THREAD_UNINT ); - } else { - waitResult = IOLockSleep( gJobsLock, &gNumConfigThreads, - THREAD_UNINT ); - } - } - } while( wait && (waitResult != THREAD_TIMED_OUT)); + IOLockWakeup( gJobsLock, (event_t) &gNumConfigThreads, /* one-thread */ false ); + } + } + IOUnlock( gJobsLock ); + } + } while (alive); + + if (gIOKitDebug & kIOLogConfig) { + LOG("config(%p): terminating\n", IOSERVICE_OBFUSCATE(IOThreadSelf())); + } + + self->release(); +} + +IOReturn +IOService::waitMatchIdle( UInt32 msToWait ) +{ + bool wait; + int waitResult = THREAD_AWAKENED; + bool computeDeadline = true; + AbsoluteTime deadline; + + IOLockLock( gJobsLock ); + do { + wait = (0 != gNumConfigThreads); + if (wait) { + if (msToWait) { + if (computeDeadline) { + clock_interval_to_deadline( + msToWait, kMillisecondScale, &deadline ); + computeDeadline = false; + } + waitResult = IOLockSleepDeadline( gJobsLock, &gNumConfigThreads, + deadline, THREAD_UNINT ); + } else { + waitResult = IOLockSleep( gJobsLock, &gNumConfigThreads, + THREAD_UNINT ); + } + } + } while (wait && (waitResult != THREAD_TIMED_OUT)); IOLockUnlock( gJobsLock ); - if( waitResult == THREAD_TIMED_OUT) - return( kIOReturnTimeout ); - else - return( kIOReturnSuccess ); + if (waitResult == THREAD_TIMED_OUT) { + return kIOReturnTimeout; + } else { + return kIOReturnSuccess; + } } -void IOService::cpusRunning(void) +void +IOService::cpusRunning(void) { - gCPUsRunning = true; + gCPUsRunning = true; } -void _IOServiceJob::pingConfig( _IOServiceJob * job ) +void +_IOServiceJob::pingConfig( _IOServiceJob * job ) { - int count; - bool create; + int count; + bool create; - assert( job ); + assert( job ); - IOTakeLock( gJobsLock ); + IOTakeLock( gJobsLock ); - gOutstandingJobs++; - gJobs->setLastObject( job ); + gOutstandingJobs++; + gJobs->setLastObject( job ); - count = gNumWaitingThreads; + count = gNumWaitingThreads; // if( gNumConfigThreads) count++;// assume we're called from a config thread - create = ( (gOutstandingJobs > count) - && ((gNumConfigThreads < kMaxConfigThreads) - || (job->nub == gIOResources) - || !gCPUsRunning)); - if( create) { - gNumConfigThreads++; - gNumWaitingThreads++; - } + create = ((gOutstandingJobs > count) + && ((gNumConfigThreads < kMaxConfigThreads) + || (job->nub == gIOResources) + || !gCPUsRunning)); + if (create) { + gNumConfigThreads++; + gNumWaitingThreads++; + } - IOUnlock( gJobsLock ); + IOUnlock( gJobsLock ); - job->release(); + job->release(); - if( create) { - if( gIOKitDebug & kIOLogConfig) - LOG("config(%d): creating\n", gNumConfigThreads - 1); - _IOConfigThread::configThread(); - } + if (create) { + if (gIOKitDebug & kIOLogConfig) { + LOG("config(%d): creating\n", gNumConfigThreads - 1); + } + _IOConfigThread::configThread(); + } - semaphore_signal( gJobsSemaphore ); + semaphore_signal( gJobsSemaphore ); } -struct IOServiceMatchContext -{ - OSDictionary * table; - OSObject * result; - uint32_t options; - uint32_t state; - uint32_t count; - uint32_t done; +struct IOServiceMatchContext { + OSDictionary * table; + OSObject * result; + uint32_t options; + uint32_t state; + uint32_t count; + uint32_t done; }; -bool IOService::instanceMatch(const OSObject * entry, void * context) -{ - IOServiceMatchContext * ctx = (typeof(ctx)) context; - IOService * service = (typeof(service)) entry; - OSDictionary * table = ctx->table; - uint32_t options = ctx->options; - uint32_t state = ctx->state; - uint32_t done; - bool match; - - done = 0; - do - { - match = ((state == (state & service->__state[0])) - && (0 == (service->__state[0] & kIOServiceInactiveState))); - if (!match) break; - ctx->count += table->getCount(); - match = service->matchInternal(table, options, &done); - ctx->done += done; - } - while (false); - if (!match) - return (false); - - if ((kIONotifyOnce & options) && (ctx->done == ctx->count)) - { - service->retain(); - ctx->result = service; - return (true); - } - else if (!ctx->result) - { - ctx->result = OSSet::withObjects((const OSObject **) &service, 1, 1); - } - else - { - ((OSSet *)ctx->result)->setObject(service); - } - return (false); -} - -// internal - call with gNotificationLock -OSObject * IOService::copyExistingServices( OSDictionary * matching, - IOOptionBits inState, IOOptionBits options ) +bool +IOService::instanceMatch(const OSObject * entry, void * context) { - OSObject * current = 0; - OSIterator * iter; - IOService * service; - OSObject * obj; - OSString * str; - - if( !matching) - return( 0 ); + IOServiceMatchContext * ctx = (typeof(ctx))context; + IOService * service = (typeof(service))entry; + OSDictionary * table = ctx->table; + uint32_t options = ctx->options; + uint32_t state = ctx->state; + uint32_t done; + bool match; -#if MATCH_DEBUG - OSSerialize * s = OSSerialize::withCapacity(128); - matching->serialize(s); -#endif + done = 0; + do{ + match = ((state == (state & service->__state[0])) + && (0 == (service->__state[0] & kIOServiceInactiveState))); + if (!match) { + break; + } + ctx->count += table->getCount(); + match = service->matchInternal(table, options, &done); + ctx->done += done; + }while (false); + if (!match) { + return false; + } - if((obj = matching->getObject(gIOProviderClassKey)) - && gIOResourcesKey - && gIOResourcesKey->isEqualTo(obj) - && (service = gIOResources)) - { - if( (inState == (service->__state[0] & inState)) - && (0 == (service->__state[0] & kIOServiceInactiveState)) - && service->matchPassive(matching, options)) - { - if( options & kIONotifyOnce) - { + if ((kIONotifyOnce & options) && (ctx->done == ctx->count)) { service->retain(); - current = service; - } - else - current = OSSet::withObjects((const OSObject **) &service, 1, 1 ); - } - } - else - { - IOServiceMatchContext ctx; - ctx.table = matching; - ctx.state = inState; - ctx.count = 0; - ctx.done = 0; - ctx.options = options; - ctx.result = 0; - - if ((str = OSDynamicCast(OSString, obj))) - { - const OSSymbol * sym = OSSymbol::withString(str); - OSMetaClass::applyToInstancesOfClassName(sym, instanceMatch, &ctx); - sym->release(); + ctx->result = service; + return true; + } else if (!ctx->result) { + ctx->result = OSSet::withObjects((const OSObject **) &service, 1, 1); + } else { + ((OSSet *)ctx->result)->setObject(service); } - else - { - IOService::gMetaClass.applyToInstances(instanceMatch, &ctx); + return false; +} + +// internal - call with gNotificationLock +OSObject * +IOService::copyExistingServices( OSDictionary * matching, + IOOptionBits inState, IOOptionBits options ) +{ + OSObject * current = 0; + OSIterator * iter; + IOService * service; + OSObject * obj; + OSString * str; + + if (!matching) { + return 0; } +#if MATCH_DEBUG + OSSerialize * s = OSSerialize::withCapacity(128); + matching->serialize(s); +#endif - current = ctx.result; + if ((obj = matching->getObject(gIOProviderClassKey)) + && gIOResourcesKey + && gIOResourcesKey->isEqualTo(obj) + && (service = gIOResources)) { + if ((inState == (service->__state[0] & inState)) + && (0 == (service->__state[0] & kIOServiceInactiveState)) + && service->matchPassive(matching, options)) { + if (options & kIONotifyOnce) { + service->retain(); + current = service; + } else { + current = OSSet::withObjects((const OSObject **) &service, 1, 1 ); + } + } + } else { + IOServiceMatchContext ctx; + ctx.table = matching; + ctx.state = inState; + ctx.count = 0; + ctx.done = 0; + ctx.options = options; + ctx.result = 0; + + if ((str = OSDynamicCast(OSString, obj))) { + const OSSymbol * sym = OSSymbol::withString(str); + OSMetaClass::applyToInstancesOfClassName(sym, instanceMatch, &ctx); + sym->release(); + } else { + IOService::gMetaClass.applyToInstances(instanceMatch, &ctx); + } - options |= kIOServiceInternalDone | kIOServiceClassDone; - if (current && (ctx.done != ctx.count)) - { - OSSet * - source = OSDynamicCast(OSSet, current); - current = 0; - while ((service = (IOService *) source->getAnyObject())) - { - if (service->matchPassive(matching, options)) - { - if( options & kIONotifyOnce) - { - service->retain(); - current = service; - break; - } - if( current) - { - ((OSSet *)current)->setObject( service ); - } - else - { - current = OSSet::withObjects( - (const OSObject **) &service, 1, 1 ); - } - } - source->removeObject(service); - } - source->release(); - } - } -#if MATCH_DEBUG - { - OSObject * _current = 0; - - iter = IORegistryIterator::iterateOver( gIOServicePlane, - kIORegistryIterateRecursively ); - if( iter) { - do { - iter->reset(); - while( (service = (IOService *) iter->getNextObject())) { - if( (inState == (service->__state[0] & inState)) - && (0 == (service->__state[0] & kIOServiceInactiveState)) - && service->matchPassive(matching, 0)) { - - if( options & kIONotifyOnce) { - service->retain(); - _current = service; - break; + current = ctx.result; + + options |= kIOServiceInternalDone | kIOServiceClassDone; + if (current && (ctx.done != ctx.count)) { + OSSet * + source = OSDynamicCast(OSSet, current); + current = 0; + while ((service = (IOService *) source->getAnyObject())) { + if (service->matchPassive(matching, options)) { + if (options & kIONotifyOnce) { + service->retain(); + current = service; + break; + } + if (current) { + ((OSSet *)current)->setObject( service ); + } else { + current = OSSet::withObjects( + (const OSObject **) &service, 1, 1 ); + } + } + source->removeObject(service); } - if( _current) - ((OSSet *)_current)->setObject( service ); - else - _current = OSSet::withObjects( - (const OSObject **) &service, 1, 1 ); - } + source->release(); } - } while( !service && !iter->isValid()); - iter->release(); } - - if ( ((current != 0) != (_current != 0)) - || (current && _current && !current->isEqualTo(_current))) +#if MATCH_DEBUG { - OSSerialize * s1 = OSSerialize::withCapacity(128); - OSSerialize * s2 = OSSerialize::withCapacity(128); - current->serialize(s1); - _current->serialize(s2); - kprintf("**mismatch** %p %p\n%s\n%s\n%s\n", IOSERVICE_OBFUSCATE(current), - IOSERVICE_OBFUSCATE(_current), s->text(), s1->text(), s2->text()); - s1->release(); - s2->release(); - } + OSObject * _current = 0; + + iter = IORegistryIterator::iterateOver( gIOServicePlane, + kIORegistryIterateRecursively ); + if (iter) { + do { + iter->reset(); + while ((service = (IOService *) iter->getNextObject())) { + if ((inState == (service->__state[0] & inState)) + && (0 == (service->__state[0] & kIOServiceInactiveState)) + && service->matchPassive(matching, 0)) { + if (options & kIONotifyOnce) { + service->retain(); + _current = service; + break; + } + if (_current) { + ((OSSet *)_current)->setObject( service ); + } else { + _current = OSSet::withObjects( + (const OSObject **) &service, 1, 1 ); + } + } + } + } while (!service && !iter->isValid()); + iter->release(); + } + + + if (((current != 0) != (_current != 0)) + || (current && _current && !current->isEqualTo(_current))) { + OSSerialize * s1 = OSSerialize::withCapacity(128); + OSSerialize * s2 = OSSerialize::withCapacity(128); + current->serialize(s1); + _current->serialize(s2); + kprintf("**mismatch** %p %p\n%s\n%s\n%s\n", IOSERVICE_OBFUSCATE(current), + IOSERVICE_OBFUSCATE(_current), s->text(), s1->text(), s2->text()); + s1->release(); + s2->release(); + } - if (_current) _current->release(); - } + if (_current) { + _current->release(); + } + } - s->release(); + s->release(); #endif - if( current && (0 == (options & (kIONotifyOnce | kIOServiceExistingSet)))) { - iter = OSCollectionIterator::withCollection( (OSSet *)current ); - current->release(); - current = iter; - } + if (current && (0 == (options & (kIONotifyOnce | kIOServiceExistingSet)))) { + iter = OSCollectionIterator::withCollection((OSSet *)current ); + current->release(); + current = iter; + } - return( current ); + return current; } // public version -OSIterator * IOService::getMatchingServices( OSDictionary * matching ) +OSIterator * +IOService::getMatchingServices( OSDictionary * matching ) { - OSIterator * iter; + OSIterator * iter; + + // is a lock even needed? + LOCKWRITENOTIFY(); - // is a lock even needed? - LOCKWRITENOTIFY(); + iter = (OSIterator *) copyExistingServices( matching, + kIOServiceMatchedState ); - iter = (OSIterator *) copyExistingServices( matching, - kIOServiceMatchedState ); - - UNLOCKNOTIFY(); + UNLOCKNOTIFY(); - return( iter ); + return iter; } -IOService * IOService::copyMatchingService( OSDictionary * matching ) +IOService * +IOService::copyMatchingService( OSDictionary * matching ) { - IOService * service; + IOService * service; - // is a lock even needed? - LOCKWRITENOTIFY(); + // is a lock even needed? + LOCKWRITENOTIFY(); - service = (IOService *) copyExistingServices( matching, - kIOServiceMatchedState, kIONotifyOnce ); - - UNLOCKNOTIFY(); + service = (IOService *) copyExistingServices( matching, + kIOServiceMatchedState, kIONotifyOnce ); - return( service ); + UNLOCKNOTIFY(); + + return service; } -struct _IOServiceMatchingNotificationHandlerRef -{ - IOServiceNotificationHandler handler; - void * ref; +struct _IOServiceMatchingNotificationHandlerRef { + IOServiceNotificationHandler handler; + void * ref; }; -static bool _IOServiceMatchingNotificationHandler( void * target, void * refCon, - IOService * newService, - IONotifier * notifier ) +static bool +_IOServiceMatchingNotificationHandler( void * target, void * refCon, + IOService * newService, + IONotifier * notifier ) { - return ((*((_IOServiceNotifier *) notifier)->compatHandler)(target, refCon, newService)); + return (*((_IOServiceNotifier *) notifier)->compatHandler)(target, refCon, newService); } // internal - call with gNotificationLock -IONotifier * IOService::setNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, void * target, void * ref, - SInt32 priority ) +IONotifier * +IOService::setNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, void * target, void * ref, + SInt32 priority ) { - _IOServiceNotifier * notify = 0; - OSOrderedSet * set; - - if( !matching) - return( 0 ); + _IOServiceNotifier * notify = 0; + OSOrderedSet * set; - notify = new _IOServiceNotifier; - if( notify && !notify->init()) { - notify->release(); - notify = 0; - } - - if( notify) { - notify->handler = handler; - notify->target = target; - notify->type = type; - notify->matching = matching; - matching->retain(); - if (handler == &_IOServiceMatchingNotificationHandler) - { - notify->compatHandler = ((_IOServiceMatchingNotificationHandlerRef *)ref)->handler; - notify->ref = ((_IOServiceMatchingNotificationHandlerRef *)ref)->ref; + if (!matching) { + return 0; } - else - notify->ref = ref; - notify->priority = priority; - notify->state = kIOServiceNotifyEnable; - queue_init( ¬ify->handlerInvocations ); - ////// queue + notify = new _IOServiceNotifier; + if (notify && !notify->init()) { + notify->release(); + notify = 0; + } - if( 0 == (set = (OSOrderedSet *) gNotifications->getObject( type ))) { - set = OSOrderedSet::withCapacity( 1, - IONotifyOrdering, 0 ); - if( set) { - gNotifications->setObject( type, set ); - set->release(); - } - } - notify->whence = set; - if( set) - set->setObject( notify ); - } + if (notify) { + notify->handler = handler; + notify->target = target; + notify->type = type; + notify->matching = matching; + matching->retain(); + if (handler == &_IOServiceMatchingNotificationHandler) { + notify->compatHandler = ((_IOServiceMatchingNotificationHandlerRef *)ref)->handler; + notify->ref = ((_IOServiceMatchingNotificationHandlerRef *)ref)->ref; + } else { + notify->ref = ref; + } + notify->priority = priority; + notify->state = kIOServiceNotifyEnable; + queue_init( ¬ify->handlerInvocations ); + + ////// queue + + if (0 == (set = (OSOrderedSet *) gNotifications->getObject( type ))) { + set = OSOrderedSet::withCapacity( 1, + IONotifyOrdering, 0 ); + if (set) { + gNotifications->setObject( type, set ); + set->release(); + } + } + notify->whence = set; + if (set) { + set->setObject( notify ); + } + } - return( notify ); + return notify; } // internal - call with gNotificationLock -IONotifier * IOService::doInstallNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref, - SInt32 priority, OSIterator ** existing ) -{ - OSIterator * exist; - IONotifier * notify; - IOOptionBits inState; - - if( !matching) - return( 0 ); - - if( type == gIOPublishNotification) - inState = kIOServiceRegisteredState; - - else if( type == gIOFirstPublishNotification) - inState = kIOServiceFirstPublishState; - - else if (type == gIOMatchedNotification) - inState = kIOServiceMatchedState; +IONotifier * +IOService::doInstallNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ) +{ + OSIterator * exist; + IONotifier * notify; + IOOptionBits inState; + + if (!matching) { + return 0; + } - else if (type == gIOFirstMatchNotification) - inState = kIOServiceFirstMatchState; + if (type == gIOPublishNotification) { + inState = kIOServiceRegisteredState; + } else if (type == gIOFirstPublishNotification) { + inState = kIOServiceFirstPublishState; + } else if (type == gIOMatchedNotification) { + inState = kIOServiceMatchedState; + } else if (type == gIOFirstMatchNotification) { + inState = kIOServiceFirstMatchState; + } else if ((type == gIOTerminatedNotification) || (type == gIOWillTerminateNotification)) { + inState = 0; + } else { + return 0; + } - else if ((type == gIOTerminatedNotification) || (type == gIOWillTerminateNotification)) - inState = 0; - else - return( 0 ); + notify = setNotification( type, matching, handler, target, ref, priority ); - notify = setNotification( type, matching, handler, target, ref, priority ); - - if( inState) - // get the current set - exist = (OSIterator *) copyExistingServices( matching, inState ); - else - exist = 0; + if (inState) { + // get the current set + exist = (OSIterator *) copyExistingServices( matching, inState ); + } else { + exist = 0; + } - *existing = exist; + *existing = exist; - return( notify ); + return notify; } #if !defined(__LP64__) -IONotifier * IOService::installNotification(const OSSymbol * type, OSDictionary * matching, - IOServiceNotificationHandler handler, - void * target, void * refCon, - SInt32 priority, OSIterator ** existing ) -{ - IONotifier * result; - _IOServiceMatchingNotificationHandlerRef ref; - ref.handler = handler; - ref.ref = refCon; - - result = (_IOServiceNotifier *) installNotification( type, matching, - &_IOServiceMatchingNotificationHandler, - target, &ref, priority, existing ); - if (result) - matching->release(); +IONotifier * +IOService::installNotification(const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * refCon, + SInt32 priority, OSIterator ** existing ) +{ + IONotifier * result; + _IOServiceMatchingNotificationHandlerRef ref; + ref.handler = handler; + ref.ref = refCon; + + result = (_IOServiceNotifier *) installNotification( type, matching, + &_IOServiceMatchingNotificationHandler, + target, &ref, priority, existing ); + if (result) { + matching->release(); + } - return (result); + return result; } #endif /* !defined(__LP64__) */ -IONotifier * IOService::installNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref, - SInt32 priority, OSIterator ** existing ) +IONotifier * +IOService::installNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref, + SInt32 priority, OSIterator ** existing ) { - IONotifier * notify; + IONotifier * notify; - LOCKWRITENOTIFY(); + LOCKWRITENOTIFY(); - notify = doInstallNotification( type, matching, handler, target, ref, - priority, existing ); + notify = doInstallNotification( type, matching, handler, target, ref, + priority, existing ); - // in case handler remove()s - if (notify) notify->retain(); + // in case handler remove()s + if (notify) { + notify->retain(); + } - UNLOCKNOTIFY(); + UNLOCKNOTIFY(); - return( notify ); + return notify; } -IONotifier * IOService::addNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceNotificationHandler handler, - void * target, void * refCon, - SInt32 priority ) +IONotifier * +IOService::addNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceNotificationHandler handler, + void * target, void * refCon, + SInt32 priority ) { - IONotifier * result; - _IOServiceMatchingNotificationHandlerRef ref; - - ref.handler = handler; - ref.ref = refCon; - - result = addMatchingNotification(type, matching, &_IOServiceMatchingNotificationHandler, - target, &ref, priority); + IONotifier * result; + _IOServiceMatchingNotificationHandlerRef ref; - if (result) - matching->release(); + ref.handler = handler; + ref.ref = refCon; + + result = addMatchingNotification(type, matching, &_IOServiceMatchingNotificationHandler, + target, &ref, priority); + + if (result) { + matching->release(); + } - return (result); + return result; } -IONotifier * IOService::addMatchingNotification( - const OSSymbol * type, OSDictionary * matching, - IOServiceMatchingNotificationHandler handler, - void * target, void * ref, - SInt32 priority ) +IONotifier * +IOService::addMatchingNotification( + const OSSymbol * type, OSDictionary * matching, + IOServiceMatchingNotificationHandler handler, + void * target, void * ref, + SInt32 priority ) { - OSIterator * existing = NULL; - IONotifier * ret; - _IOServiceNotifier * notify; - IOService * next; + OSIterator * existing = NULL; + IONotifier * ret; + _IOServiceNotifier * notify; + IOService * next; - ret = notify = (_IOServiceNotifier *) installNotification( type, matching, - handler, target, ref, priority, &existing ); - if (!ret) return (0); + ret = notify = (_IOServiceNotifier *) installNotification( type, matching, + handler, target, ref, priority, &existing ); + if (!ret) { + return 0; + } - // send notifications for existing set - if (existing) - { - while( (next = (IOService *) existing->getNextObject())) - { - if( 0 == (next->__state[0] & kIOServiceInactiveState)) - { - next->invokeNotifier( notify ); - } + // send notifications for existing set + if (existing) { + while ((next = (IOService *) existing->getNextObject())) { + if (0 == (next->__state[0] & kIOServiceInactiveState)) { + next->invokeNotifier( notify ); + } + } + existing->release(); } - existing->release(); - } - LOCKWRITENOTIFY(); - bool removed = (0 == notify->whence); - notify->release(); - if (removed) ret = gIOServiceNullNotifier; - UNLOCKNOTIFY(); + LOCKWRITENOTIFY(); + bool removed = (0 == notify->whence); + notify->release(); + if (removed) { + ret = gIOServiceNullNotifier; + } + UNLOCKNOTIFY(); - return( ret ); + return ret; } static bool IOServiceMatchingNotificationHandlerToBlock( void * target __unused, void * refCon, - IOService * newService, - IONotifier * notifier ) + IOService * newService, + IONotifier * notifier ) { - return ((IOServiceMatchingNotificationHandlerBlock) refCon)(newService, notifier); + return ((IOServiceMatchingNotificationHandlerBlock) refCon)(newService, notifier); } -IONotifier * IOService::addMatchingNotification( - const OSSymbol * type, OSDictionary * matching, - SInt32 priority, - IOServiceMatchingNotificationHandlerBlock handler) +IONotifier * +IOService::addMatchingNotification( + const OSSymbol * type, OSDictionary * matching, + SInt32 priority, + IOServiceMatchingNotificationHandlerBlock handler) { - IONotifier * notify; - void * block; + IONotifier * notify; + void * block; - block = Block_copy(handler); - if (!block) return (NULL); + block = Block_copy(handler); + if (!block) { + return NULL; + } - notify = addMatchingNotification(type, matching, - &IOServiceMatchingNotificationHandlerToBlock, NULL, block, priority); + notify = addMatchingNotification(type, matching, + &IOServiceMatchingNotificationHandlerToBlock, NULL, block, priority); - if (!notify) Block_release(block); + if (!notify) { + Block_release(block); + } - return (notify); + return notify; } -bool IOService::syncNotificationHandler( - void * /* target */, void * ref, - IOService * newService, - IONotifier * notifier ) +bool +IOService::syncNotificationHandler( + void * /* target */, void * ref, + IOService * newService, + IONotifier * notifier ) { + LOCKWRITENOTIFY(); + if (!*((IOService **) ref)) { + newService->retain(); + (*(IOService **) ref) = newService; + WAKEUPNOTIFY(ref); + } + UNLOCKNOTIFY(); - LOCKWRITENOTIFY(); - if (!*((IOService **) ref)) - { - newService->retain(); - (*(IOService **) ref) = newService; - WAKEUPNOTIFY(ref); - } - UNLOCKNOTIFY(); - - return( false ); + return false; } -IOService * IOService::waitForMatchingService( OSDictionary * matching, - uint64_t timeout) +IOService * +IOService::waitForMatchingService( OSDictionary * matching, + uint64_t timeout) { - IONotifier * notify = 0; - // priority doesn't help us much since we need a thread wakeup - SInt32 priority = 0; - IOService * result; - - if (!matching) - return( 0 ); - - result = NULL; + IONotifier * notify = 0; + // priority doesn't help us much since we need a thread wakeup + SInt32 priority = 0; + IOService * result; - LOCKWRITENOTIFY(); - do - { - result = (IOService *) copyExistingServices( matching, - kIOServiceMatchedState, kIONotifyOnce ); - if (result) - break; - notify = IOService::setNotification( gIOMatchedNotification, matching, - &IOService::syncNotificationHandler, (void *) 0, - &result, priority ); - if (!notify) - break; - if (UINT64_MAX != timeout) - { - AbsoluteTime deadline; - nanoseconds_to_absolutetime(timeout, &deadline); - clock_absolutetime_interval_to_deadline(deadline, &deadline); - SLEEPNOTIFYTO(&result, deadline); - } - else - { - SLEEPNOTIFY(&result); + if (!matching) { + return 0; } - } - while( false ); - UNLOCKNOTIFY(); + result = NULL; - if (notify) - notify->remove(); // dequeues + LOCKWRITENOTIFY(); + do{ + result = (IOService *) copyExistingServices( matching, + kIOServiceMatchedState, kIONotifyOnce ); + if (result) { + break; + } + notify = IOService::setNotification( gIOMatchedNotification, matching, + &IOService::syncNotificationHandler, (void *) 0, + &result, priority ); + if (!notify) { + break; + } + if (UINT64_MAX != timeout) { + AbsoluteTime deadline; + nanoseconds_to_absolutetime(timeout, &deadline); + clock_absolutetime_interval_to_deadline(deadline, &deadline); + SLEEPNOTIFYTO(&result, deadline); + } else { + SLEEPNOTIFY(&result); + } + }while (false); + + UNLOCKNOTIFY(); - return( result ); + if (notify) { + notify->remove(); // dequeues + } + return result; } -IOService * IOService::waitForService( OSDictionary * matching, - mach_timespec_t * timeout ) +IOService * +IOService::waitForService( OSDictionary * matching, + mach_timespec_t * timeout ) { - IOService * result; - uint64_t timeoutNS; + IOService * result; + uint64_t timeoutNS; - if (timeout) - { - timeoutNS = timeout->tv_sec; - timeoutNS *= kSecondScale; - timeoutNS += timeout->tv_nsec; - } - else - timeoutNS = UINT64_MAX; + if (timeout) { + timeoutNS = timeout->tv_sec; + timeoutNS *= kSecondScale; + timeoutNS += timeout->tv_nsec; + } else { + timeoutNS = UINT64_MAX; + } - result = waitForMatchingService(matching, timeoutNS); + result = waitForMatchingService(matching, timeoutNS); - matching->release(); - if (result) - result->release(); + matching->release(); + if (result) { + result->release(); + } - return (result); + return result; } -void IOService::deliverNotification( const OSSymbol * type, - IOOptionBits orNewState, IOOptionBits andNewState ) +void +IOService::deliverNotification( const OSSymbol * type, + IOOptionBits orNewState, IOOptionBits andNewState ) { - panic("deliverNotification"); + panic("deliverNotification"); } -OSArray * IOService::copyNotifiers(const OSSymbol * type, - IOOptionBits orNewState, IOOptionBits andNewState ) +OSArray * +IOService::copyNotifiers(const OSSymbol * type, + IOOptionBits orNewState, IOOptionBits andNewState ) { - _IOServiceNotifier * notify; - OSIterator * iter; - OSArray * willSend = 0; - - lockForArbitration(); - - if( (0 == (__state[0] & kIOServiceInactiveState)) - || (type == gIOTerminatedNotification) - || (type == gIOWillTerminateNotification)) { - - LOCKREADNOTIFY(); + _IOServiceNotifier * notify; + OSIterator * iter; + OSArray * willSend = 0; - iter = OSCollectionIterator::withCollection( (OSOrderedSet *) - gNotifications->getObject( type ) ); - - if( iter) { - while( (notify = (_IOServiceNotifier *) iter->getNextObject())) { - - if( matchPassive(notify->matching, 0) - && (kIOServiceNotifyEnable & notify->state)) { - if( 0 == willSend) - willSend = OSArray::withCapacity(8); - if( willSend) - willSend->setObject( notify ); - } - } - iter->release(); - } - __state[0] = (__state[0] | orNewState) & andNewState; - UNLOCKNOTIFY(); - } + lockForArbitration(); - unlockForArbitration(); + if ((0 == (__state[0] & kIOServiceInactiveState)) + || (type == gIOTerminatedNotification) + || (type == gIOWillTerminateNotification)) { + LOCKREADNOTIFY(); + + iter = OSCollectionIterator::withCollection((OSOrderedSet *) + gNotifications->getObject( type )); + + if (iter) { + while ((notify = (_IOServiceNotifier *) iter->getNextObject())) { + if (matchPassive(notify->matching, 0) + && (kIOServiceNotifyEnable & notify->state)) { + if (0 == willSend) { + willSend = OSArray::withCapacity(8); + } + if (willSend) { + willSend->setObject( notify ); + } + } + } + iter->release(); + } + __state[0] = (__state[0] | orNewState) & andNewState; + UNLOCKNOTIFY(); + } - return (willSend); + unlockForArbitration(); + return willSend; } -IOOptionBits IOService::getState( void ) const +IOOptionBits +IOService::getState( void ) const { - return( __state[0] ); + return __state[0]; } /* * Helpers to make matching objects for simple cases */ -OSDictionary * IOService::serviceMatching( const OSString * name, - OSDictionary * table ) +OSDictionary * +IOService::serviceMatching( const OSString * name, + OSDictionary * table ) { + const OSString * str; - const OSString * str; - - str = OSSymbol::withString(name); - if( !str) - return( 0 ); + str = OSSymbol::withString(name); + if (!str) { + return 0; + } - if( !table) - table = OSDictionary::withCapacity( 2 ); - if( table) - table->setObject(gIOProviderClassKey, (OSObject *)str ); - str->release(); + if (!table) { + table = OSDictionary::withCapacity( 2 ); + } + if (table) { + table->setObject(gIOProviderClassKey, (OSObject *)str ); + } + str->release(); - return( table ); + return table; } -OSDictionary * IOService::serviceMatching( const char * name, - OSDictionary * table ) +OSDictionary * +IOService::serviceMatching( const char * name, + OSDictionary * table ) { - const OSString * str; + const OSString * str; - str = OSSymbol::withCString( name ); - if( !str) - return( 0 ); + str = OSSymbol::withCString( name ); + if (!str) { + return 0; + } - table = serviceMatching( str, table ); - str->release(); - return( table ); + table = serviceMatching( str, table ); + str->release(); + return table; } -OSDictionary * IOService::nameMatching( const OSString * name, - OSDictionary * table ) +OSDictionary * +IOService::nameMatching( const OSString * name, + OSDictionary * table ) { - if( !table) - table = OSDictionary::withCapacity( 2 ); - if( table) - table->setObject( gIONameMatchKey, (OSObject *)name ); + if (!table) { + table = OSDictionary::withCapacity( 2 ); + } + if (table) { + table->setObject( gIONameMatchKey, (OSObject *)name ); + } - return( table ); + return table; } -OSDictionary * IOService::nameMatching( const char * name, - OSDictionary * table ) +OSDictionary * +IOService::nameMatching( const char * name, + OSDictionary * table ) { - const OSString * str; + const OSString * str; - str = OSSymbol::withCString( name ); - if( !str) - return( 0 ); + str = OSSymbol::withCString( name ); + if (!str) { + return 0; + } - table = nameMatching( str, table ); - str->release(); - return( table ); + table = nameMatching( str, table ); + str->release(); + return table; } -OSDictionary * IOService::resourceMatching( const OSString * str, - OSDictionary * table ) +OSDictionary * +IOService::resourceMatching( const OSString * str, + OSDictionary * table ) { - table = serviceMatching( gIOResourcesKey, table ); - if( table) - table->setObject( gIOResourceMatchKey, (OSObject *) str ); + table = serviceMatching( gIOResourcesKey, table ); + if (table) { + table->setObject( gIOResourceMatchKey, (OSObject *) str ); + } - return( table ); + return table; } -OSDictionary * IOService::resourceMatching( const char * name, - OSDictionary * table ) +OSDictionary * +IOService::resourceMatching( const char * name, + OSDictionary * table ) { - const OSSymbol * str; + const OSSymbol * str; - str = OSSymbol::withCString( name ); - if( !str) - return( 0 ); + str = OSSymbol::withCString( name ); + if (!str) { + return 0; + } - table = resourceMatching( str, table ); - str->release(); + table = resourceMatching( str, table ); + str->release(); - return( table ); + return table; } -OSDictionary * IOService::propertyMatching( const OSSymbol * key, const OSObject * value, - OSDictionary * table ) +OSDictionary * +IOService::propertyMatching( const OSSymbol * key, const OSObject * value, + OSDictionary * table ) { - OSDictionary * properties; + OSDictionary * properties; - properties = OSDictionary::withCapacity( 2 ); - if( !properties) - return( 0 ); - properties->setObject( key, value ); + properties = OSDictionary::withCapacity( 2 ); + if (!properties) { + return 0; + } + properties->setObject( key, value ); - if( !table) - table = OSDictionary::withCapacity( 2 ); - if( table) - table->setObject( gIOPropertyMatchKey, properties ); + if (!table) { + table = OSDictionary::withCapacity( 2 ); + } + if (table) { + table->setObject( gIOPropertyMatchKey, properties ); + } - properties->release(); + properties->release(); - return( table ); + return table; } -OSDictionary * IOService::registryEntryIDMatching( uint64_t entryID, - OSDictionary * table ) +OSDictionary * +IOService::registryEntryIDMatching( uint64_t entryID, + OSDictionary * table ) { - OSNumber * num; + OSNumber * num; - num = OSNumber::withNumber( entryID, 64 ); - if( !num) - return( 0 ); + num = OSNumber::withNumber( entryID, 64 ); + if (!num) { + return 0; + } - if( !table) - table = OSDictionary::withCapacity( 2 ); - if( table) - table->setObject( gIORegistryEntryIDKey, num ); - - if (num) - num->release(); + if (!table) { + table = OSDictionary::withCapacity( 2 ); + } + if (table) { + table->setObject( gIORegistryEntryIDKey, num ); + } - return( table ); + if (num) { + num->release(); + } + + return table; } @@ -5020,83 +5239,91 @@ OSDictionary * IOService::registryEntryIDMatching( uint64_t entryID, // wait for all threads, other than the current one, // to exit the handler -void _IOServiceNotifier::wait() +void +_IOServiceNotifier::wait() { - _IOServiceNotifierInvocation * next; - bool doWait; - - do { - doWait = false; - queue_iterate( &handlerInvocations, next, - _IOServiceNotifierInvocation *, link) { - if( next->thread != current_thread() ) { - doWait = true; - break; - } - } - if( doWait) { - state |= kIOServiceNotifyWaiter; - SLEEPNOTIFY(this); - } + _IOServiceNotifierInvocation * next; + bool doWait; - } while( doWait ); + do { + doWait = false; + queue_iterate( &handlerInvocations, next, + _IOServiceNotifierInvocation *, link) { + if (next->thread != current_thread()) { + doWait = true; + break; + } + } + if (doWait) { + state |= kIOServiceNotifyWaiter; + SLEEPNOTIFY(this); + } + } while (doWait); } -void _IOServiceNotifier::free() +void +_IOServiceNotifier::free() { - assert( queue_empty( &handlerInvocations )); + assert( queue_empty( &handlerInvocations )); - if (handler == &IOServiceMatchingNotificationHandlerToBlock) Block_release(ref); + if (handler == &IOServiceMatchingNotificationHandlerToBlock) { + Block_release(ref); + } - OSObject::free(); + OSObject::free(); } -void _IOServiceNotifier::remove() +void +_IOServiceNotifier::remove() { - LOCKWRITENOTIFY(); + LOCKWRITENOTIFY(); + + if (whence) { + whence->removeObject((OSObject *) this ); + whence = 0; + } + if (matching) { + matching->release(); + matching = 0; + } - if( whence) { - whence->removeObject( (OSObject *) this ); - whence = 0; - } - if( matching) { - matching->release(); - matching = 0; - } + state &= ~kIOServiceNotifyEnable; - state &= ~kIOServiceNotifyEnable; + wait(); - wait(); + UNLOCKNOTIFY(); - UNLOCKNOTIFY(); - - release(); + release(); } -bool _IOServiceNotifier::disable() +bool +_IOServiceNotifier::disable() { - bool ret; + bool ret; - LOCKWRITENOTIFY(); + LOCKWRITENOTIFY(); - ret = (0 != (kIOServiceNotifyEnable & state)); - state &= ~kIOServiceNotifyEnable; - if( ret) - wait(); + ret = (0 != (kIOServiceNotifyEnable & state)); + state &= ~kIOServiceNotifyEnable; + if (ret) { + wait(); + } - UNLOCKNOTIFY(); + UNLOCKNOTIFY(); - return( ret ); + return ret; } -void _IOServiceNotifier::enable( bool was ) +void +_IOServiceNotifier::enable( bool was ) { - LOCKWRITENOTIFY(); - if( was) - state |= kIOServiceNotifyEnable; - else - state &= ~kIOServiceNotifyEnable; - UNLOCKNOTIFY(); + LOCKWRITENOTIFY(); + if (was) { + state |= kIOServiceNotifyEnable; + } else { + state &= ~kIOServiceNotifyEnable; + } + UNLOCKNOTIFY(); } @@ -5104,301 +5331,324 @@ void _IOServiceNotifier::enable( bool was ) * _IOServiceNullNotifier */ -void _IOServiceNullNotifier::taggedRetain(const void *tag) const {} -void _IOServiceNullNotifier::taggedRelease(const void *tag, const int when) const {} -void _IOServiceNullNotifier::free() {} -void _IOServiceNullNotifier::wait() {} -void _IOServiceNullNotifier::remove() {} -void _IOServiceNullNotifier::enable(bool was) {} -bool _IOServiceNullNotifier::disable() { return(false); } +void +_IOServiceNullNotifier::taggedRetain(const void *tag) const +{ +} +void +_IOServiceNullNotifier::taggedRelease(const void *tag, const int when) const +{ +} +void +_IOServiceNullNotifier::free() +{ +} +void +_IOServiceNullNotifier::wait() +{ +} +void +_IOServiceNullNotifier::remove() +{ +} +void +_IOServiceNullNotifier::enable(bool was) +{ +} +bool +_IOServiceNullNotifier::disable() +{ + return false; +} /* * IOResources */ -IOService * IOResources::resources( void ) +IOService * +IOResources::resources( void ) { - IOResources * inst; + IOResources * inst; - inst = new IOResources; - if( inst && !inst->init()) { - inst->release(); - inst = 0; - } + inst = new IOResources; + if (inst && !inst->init()) { + inst->release(); + inst = 0; + } - return( inst ); + return inst; } -bool IOResources::init( OSDictionary * dictionary ) +bool +IOResources::init( OSDictionary * dictionary ) { - // Do super init first - if ( !IOService::init() ) - return false; + // Do super init first + if (!IOService::init()) { + return false; + } - // Allow PAL layer to publish a value - const char *property_name; - int property_value; + // Allow PAL layer to publish a value + const char *property_name; + int property_value; - pal_get_resource_property( &property_name, &property_value ); + pal_get_resource_property( &property_name, &property_value ); - if( property_name ) { - OSNumber *num; - const OSSymbol * sym; + if (property_name) { + OSNumber *num; + const OSSymbol * sym; - if( (num = OSNumber::withNumber(property_value, 32)) != 0 ) { - if( (sym = OSSymbol::withCString( property_name)) != 0 ) { - this->setProperty( sym, num ); - sym->release(); - } - num->release(); + if ((num = OSNumber::withNumber(property_value, 32)) != 0) { + if ((sym = OSSymbol::withCString( property_name)) != 0) { + this->setProperty( sym, num ); + sym->release(); + } + num->release(); + } } - } - return true; + return true; } -IOReturn IOResources::newUserClient(task_t owningTask, void * securityID, - UInt32 type, OSDictionary * properties, - IOUserClient ** handler) +IOReturn +IOResources::newUserClient(task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler) { - return( kIOReturnUnsupported ); + return kIOReturnUnsupported; } -IOWorkLoop * IOResources::getWorkLoop() const +IOWorkLoop * +IOResources::getWorkLoop() const { - // If we are the resource root - // then use the platform's workloop - if (this == (IOResources *) gIOResources) - return getPlatform()->getWorkLoop(); - else - return IOService::getWorkLoop(); + // If we are the resource root + // then use the platform's workloop + if (this == (IOResources *) gIOResources) { + return getPlatform()->getWorkLoop(); + } else { + return IOService::getWorkLoop(); + } } -bool IOResources::matchPropertyTable( OSDictionary * table ) +bool +IOResources::matchPropertyTable( OSDictionary * table ) { - OSObject * prop; - OSString * str; - OSSet * set; - OSIterator * iter; - OSObject * obj; - OSArray * keys; - bool ok = true; - - prop = table->getObject( gIOResourceMatchKey ); - str = OSDynamicCast( OSString, prop ); - if( str) - ok = (0 != getProperty( str )); + OSObject * prop; + OSString * str; + OSSet * set; + OSIterator * iter; + OSObject * obj; + OSArray * keys; + bool ok = true; - else if( (set = OSDynamicCast( OSSet, prop))) { - - iter = OSCollectionIterator::withCollection( set ); - ok = (iter != 0); - while( ok && (str = OSDynamicCast( OSString, iter->getNextObject()) )) - ok = (0 != getProperty( str )); + prop = table->getObject( gIOResourceMatchKey ); + str = OSDynamicCast( OSString, prop ); + if (str) { + ok = (0 != getProperty( str )); + } else if ((set = OSDynamicCast( OSSet, prop))) { + iter = OSCollectionIterator::withCollection( set ); + ok = (iter != 0); + while (ok && (str = OSDynamicCast( OSString, iter->getNextObject()))) { + ok = (0 != getProperty( str )); + } - if( iter) - iter->release(); - } - else if ((prop = table->getObject(gIOResourceMatchedKey))) - { - obj = copyProperty(gIOResourceMatchedKey); - keys = OSDynamicCast(OSArray, obj); - ok = false; - if (keys) - { - // assuming OSSymbol - ok = ((-1U) != keys->getNextIndexOfObject(prop, 0)); - } - OSSafeReleaseNULL(obj); - } + if (iter) { + iter->release(); + } + } else if ((prop = table->getObject(gIOResourceMatchedKey))) { + obj = copyProperty(gIOResourceMatchedKey); + keys = OSDynamicCast(OSArray, obj); + ok = false; + if (keys) { + // assuming OSSymbol + ok = ((-1U) != keys->getNextIndexOfObject(prop, 0)); + } + OSSafeReleaseNULL(obj); + } - return( ok ); + return ok; } -void IOService::consoleLockTimer(thread_call_param_t p0, thread_call_param_t p1) +void +IOService::consoleLockTimer(thread_call_param_t p0, thread_call_param_t p1) { - IOService::updateConsoleUsers(NULL, 0); + IOService::updateConsoleUsers(NULL, 0); } -void IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage) +void +IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessage) { - IORegistryEntry * regEntry; - OSObject * locked = kOSBooleanFalse; - uint32_t idx; - bool publish; - OSDictionary * user; - static IOMessage sSystemPower; - clock_sec_t now = 0; - clock_usec_t microsecs; + IORegistryEntry * regEntry; + OSObject * locked = kOSBooleanFalse; + uint32_t idx; + bool publish; + OSDictionary * user; + static IOMessage sSystemPower; + clock_sec_t now = 0; + clock_usec_t microsecs; - regEntry = IORegistryEntry::getRegistryRoot(); + regEntry = IORegistryEntry::getRegistryRoot(); - if (!gIOChosenEntry) - gIOChosenEntry = IORegistryEntry::fromPath("/chosen", gIODTPlane); + if (!gIOChosenEntry) { + gIOChosenEntry = IORegistryEntry::fromPath("/chosen", gIODTPlane); + } - IOLockLock(gIOConsoleUsersLock); + IOLockLock(gIOConsoleUsersLock); - if (systemMessage) - { - sSystemPower = systemMessage; + if (systemMessage) { + sSystemPower = systemMessage; #if HIBERNATION - if (kIOMessageSystemHasPoweredOn == systemMessage) - { - uint32_t lockState = IOHibernateWasScreenLocked(); - switch (lockState) - { - case 0: - break; - case kIOScreenLockLocked: - case kIOScreenLockFileVaultDialog: - gIOConsoleBooterLockState = kOSBooleanTrue; - break; - case kIOScreenLockNoLock: - gIOConsoleBooterLockState = 0; - break; - case kIOScreenLockUnlocked: - default: - gIOConsoleBooterLockState = kOSBooleanFalse; - break; - } - } + if (kIOMessageSystemHasPoweredOn == systemMessage) { + uint32_t lockState = IOHibernateWasScreenLocked(); + switch (lockState) { + case 0: + break; + case kIOScreenLockLocked: + case kIOScreenLockFileVaultDialog: + gIOConsoleBooterLockState = kOSBooleanTrue; + break; + case kIOScreenLockNoLock: + gIOConsoleBooterLockState = 0; + break; + case kIOScreenLockUnlocked: + default: + gIOConsoleBooterLockState = kOSBooleanFalse; + break; + } + } #endif /* HIBERNATION */ - } + } - if (consoleUsers) - { - OSNumber * num = 0; - bool loginLocked = true; + if (consoleUsers) { + OSNumber * num = 0; + bool loginLocked = true; - gIOConsoleLoggedIn = false; - for (idx = 0; - (user = OSDynamicCast(OSDictionary, consoleUsers->getObject(idx))); - idx++) - { - gIOConsoleLoggedIn |= ((kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) - && (kOSBooleanTrue == user->getObject(gIOConsoleSessionLoginDoneKey))); + gIOConsoleLoggedIn = false; + for (idx = 0; + (user = OSDynamicCast(OSDictionary, consoleUsers->getObject(idx))); + idx++) { + gIOConsoleLoggedIn |= ((kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) + && (kOSBooleanTrue == user->getObject(gIOConsoleSessionLoginDoneKey))); - loginLocked &= (kOSBooleanTrue == user->getObject(gIOConsoleSessionScreenIsLockedKey)); - if (!num) - { - num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionScreenLockedTimeKey)); - } - } + loginLocked &= (kOSBooleanTrue == user->getObject(gIOConsoleSessionScreenIsLockedKey)); + if (!num) { + num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionScreenLockedTimeKey)); + } + } #if HIBERNATION - if (!loginLocked) gIOConsoleBooterLockState = 0; - IOLog("IOConsoleUsers: time(%d) %ld->%d, lin %d, llk %d, \n", - (num != 0), gIOConsoleLockTime, (num ? num->unsigned32BitValue() : 0), - gIOConsoleLoggedIn, loginLocked); + if (!loginLocked) { + gIOConsoleBooterLockState = 0; + } + IOLog("IOConsoleUsers: time(%d) %ld->%d, lin %d, llk %d, \n", + (num != 0), gIOConsoleLockTime, (num ? num->unsigned32BitValue() : 0), + gIOConsoleLoggedIn, loginLocked); #endif /* HIBERNATION */ - gIOConsoleLockTime = num ? num->unsigned32BitValue() : 0; - } - - if (!gIOConsoleLoggedIn - || (kIOMessageSystemWillSleep == sSystemPower) - || (kIOMessageSystemPagingOff == sSystemPower)) - { - locked = kOSBooleanTrue; - } + gIOConsoleLockTime = num ? num->unsigned32BitValue() : 0; + } + + if (!gIOConsoleLoggedIn + || (kIOMessageSystemWillSleep == sSystemPower) + || (kIOMessageSystemPagingOff == sSystemPower)) { + locked = kOSBooleanTrue; + } #if HIBERNATION - else if (gIOConsoleBooterLockState) - { - locked = gIOConsoleBooterLockState; - } -#endif /* HIBERNATION */ - else if (gIOConsoleLockTime) - { - clock_get_calendar_microtime(&now, µsecs); - if (gIOConsoleLockTime > now) - { - AbsoluteTime deadline; - clock_interval_to_deadline(gIOConsoleLockTime - now, kSecondScale, &deadline); - thread_call_enter_delayed(gIOConsoleLockCallout, deadline); + else if (gIOConsoleBooterLockState) { + locked = gIOConsoleBooterLockState; } - else - { - locked = kOSBooleanTrue; +#endif /* HIBERNATION */ + else if (gIOConsoleLockTime) { + clock_get_calendar_microtime(&now, µsecs); + if (gIOConsoleLockTime > now) { + AbsoluteTime deadline; + clock_interval_to_deadline(gIOConsoleLockTime - now, kSecondScale, &deadline); + thread_call_enter_delayed(gIOConsoleLockCallout, deadline); + } else { + locked = kOSBooleanTrue; + } } - } - publish = (consoleUsers || (locked != regEntry->getProperty(gIOConsoleLockedKey))); - if (publish) - { - regEntry->setProperty(gIOConsoleLockedKey, locked); - if (consoleUsers) - { - regEntry->setProperty(gIOConsoleUsersKey, consoleUsers); + publish = (consoleUsers || (locked != regEntry->getProperty(gIOConsoleLockedKey))); + if (publish) { + regEntry->setProperty(gIOConsoleLockedKey, locked); + if (consoleUsers) { + regEntry->setProperty(gIOConsoleUsersKey, consoleUsers); + } + OSIncrementAtomic( &gIOConsoleUsersSeed ); } - OSIncrementAtomic( &gIOConsoleUsersSeed ); - } #if HIBERNATION - if (gIOChosenEntry) - { - if (locked == kOSBooleanTrue) gIOScreenLockState = kIOScreenLockLocked; - else if (gIOConsoleLockTime) gIOScreenLockState = kIOScreenLockUnlocked; - else gIOScreenLockState = kIOScreenLockNoLock; - gIOChosenEntry->setProperty(kIOScreenLockStateKey, &gIOScreenLockState, sizeof(gIOScreenLockState)); - - IOLog("IOConsoleUsers: gIOScreenLockState %d, hs %d, bs %d, now %ld, sm 0x%x\n", - gIOScreenLockState, gIOHibernateState, (gIOConsoleBooterLockState != 0), now, systemMessage); - } + if (gIOChosenEntry) { + if (locked == kOSBooleanTrue) { + gIOScreenLockState = kIOScreenLockLocked; + } else if (gIOConsoleLockTime) { + gIOScreenLockState = kIOScreenLockUnlocked; + } else { + gIOScreenLockState = kIOScreenLockNoLock; + } + gIOChosenEntry->setProperty(kIOScreenLockStateKey, &gIOScreenLockState, sizeof(gIOScreenLockState)); + + IOLog("IOConsoleUsers: gIOScreenLockState %d, hs %d, bs %d, now %ld, sm 0x%x\n", + gIOScreenLockState, gIOHibernateState, (gIOConsoleBooterLockState != 0), now, systemMessage); + } #endif /* HIBERNATION */ - IOLockUnlock(gIOConsoleUsersLock); + IOLockUnlock(gIOConsoleUsersLock); + + if (publish) { + publishResource( gIOConsoleUsersSeedKey, gIOConsoleUsersSeedValue ); - if (publish) - { - publishResource( gIOConsoleUsersSeedKey, gIOConsoleUsersSeedValue ); + MessageClientsContext context; - MessageClientsContext context; - - context.service = getServiceRoot(); - context.type = kIOMessageConsoleSecurityChange; - context.argument = (void *) regEntry; - context.argSize = 0; - - applyToInterestNotifiers(getServiceRoot(), gIOConsoleSecurityInterest, - &messageClientsApplier, &context ); - } + context.service = getServiceRoot(); + context.type = kIOMessageConsoleSecurityChange; + context.argument = (void *) regEntry; + context.argSize = 0; + + applyToInterestNotifiers(getServiceRoot(), gIOConsoleSecurityInterest, + &messageClientsApplier, &context ); + } } -IOReturn IOResources::setProperties( OSObject * properties ) +IOReturn +IOResources::setProperties( OSObject * properties ) { - IOReturn err; - const OSSymbol * key; - OSDictionary * dict; - OSCollectionIterator * iter; + IOReturn err; + const OSSymbol * key; + OSDictionary * dict; + OSCollectionIterator * iter; - err = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); - if ( kIOReturnSuccess != err) - return( err ); - - dict = OSDynamicCast(OSDictionary, properties); - if( 0 == dict) - return( kIOReturnBadArgument); + err = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator); + if (kIOReturnSuccess != err) { + return err; + } - iter = OSCollectionIterator::withCollection( dict); - if( 0 == iter) - return( kIOReturnBadArgument); + dict = OSDynamicCast(OSDictionary, properties); + if (0 == dict) { + return kIOReturnBadArgument; + } - while( (key = OSDynamicCast(OSSymbol, iter->getNextObject()))) - { - if (gIOConsoleUsersKey == key) do - { - OSArray * consoleUsers; - consoleUsers = OSDynamicCast(OSArray, dict->getObject(key)); - if (!consoleUsers) - continue; - IOService::updateConsoleUsers(consoleUsers, 0); + iter = OSCollectionIterator::withCollection( dict); + if (0 == iter) { + return kIOReturnBadArgument; } - while (false); - publishResource( key, dict->getObject(key) ); - } + while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { + if (gIOConsoleUsersKey == key) { + do{ + OSArray * consoleUsers; + consoleUsers = OSDynamicCast(OSArray, dict->getObject(key)); + if (!consoleUsers) { + continue; + } + IOService::updateConsoleUsers(consoleUsers, 0); + }while (false); + } + + publishResource( key, dict->getObject(key)); + } - iter->release(); + iter->release(); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* @@ -5407,747 +5657,800 @@ IOReturn IOResources::setProperties( OSObject * properties ) * Keys may be a string or OSCollection of IOStrings */ -bool IOService::compareProperty( OSDictionary * matching, - const char * key ) +bool +IOService::compareProperty( OSDictionary * matching, + const char * key ) { - OSObject * value; - OSObject * prop; - bool ok; + OSObject * value; + OSObject * prop; + bool ok; - value = matching->getObject( key ); - if( value) - { - prop = copyProperty(key); - ok = value->isEqualTo(prop); - if (prop) prop->release(); - } - else - ok = true; + value = matching->getObject( key ); + if (value) { + prop = copyProperty(key); + ok = value->isEqualTo(prop); + if (prop) { + prop->release(); + } + } else { + ok = true; + } - return( ok ); + return ok; } -bool IOService::compareProperty( OSDictionary * matching, - const OSString * key ) +bool +IOService::compareProperty( OSDictionary * matching, + const OSString * key ) { - OSObject * value; - OSObject * prop; - bool ok; + OSObject * value; + OSObject * prop; + bool ok; - value = matching->getObject( key ); - if( value) - { - prop = copyProperty(key); - ok = value->isEqualTo(prop); - if (prop) prop->release(); - } - else - ok = true; + value = matching->getObject( key ); + if (value) { + prop = copyProperty(key); + ok = value->isEqualTo(prop); + if (prop) { + prop->release(); + } + } else { + ok = true; + } - return( ok ); + return ok; } -bool IOService::compareProperties( OSDictionary * matching, - OSCollection * keys ) +bool +IOService::compareProperties( OSDictionary * matching, + OSCollection * keys ) { - OSCollectionIterator * iter; - const OSString * key; - bool ok = true; + OSCollectionIterator * iter; + const OSString * key; + bool ok = true; - if( !matching || !keys) - return( false ); + if (!matching || !keys) { + return false; + } - iter = OSCollectionIterator::withCollection( keys ); + iter = OSCollectionIterator::withCollection( keys ); - if( iter) { - while( ok && (key = OSDynamicCast( OSString, iter->getNextObject()))) - ok = compareProperty( matching, key ); + if (iter) { + while (ok && (key = OSDynamicCast( OSString, iter->getNextObject()))) { + ok = compareProperty( matching, key ); + } - iter->release(); - } - keys->release(); // !! consume a ref !! + iter->release(); + } + keys->release(); // !! consume a ref !! - return( ok ); + return ok; } /* Helper to add a location matching dict to the table */ -OSDictionary * IOService::addLocation( OSDictionary * table ) +OSDictionary * +IOService::addLocation( OSDictionary * table ) { - OSDictionary * dict; + OSDictionary * dict; - if( !table) - return( 0 ); + if (!table) { + return 0; + } - dict = OSDictionary::withCapacity( 1 ); - if( dict) { - table->setObject( gIOLocationMatchKey, dict ); - dict->release(); - } + dict = OSDictionary::withCapacity( 1 ); + if (dict) { + table->setObject( gIOLocationMatchKey, dict ); + dict->release(); + } - return( dict ); + return dict; } /* * Go looking for a provider to match a location dict. */ -IOService * IOService::matchLocation( IOService * /* client */ ) +IOService * +IOService::matchLocation( IOService * /* client */ ) { - IOService * parent; + IOService * parent; - parent = getProvider(); + parent = getProvider(); - if( parent) - parent = parent->matchLocation( this ); + if (parent) { + parent = parent->matchLocation( this ); + } - return( parent ); + return parent; } -bool IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) +bool +IOService::matchInternal(OSDictionary * table, uint32_t options, uint32_t * did) { - OSString * matched; - OSObject * obj; - OSString * str; - IORegistryEntry * entry; - OSNumber * num; - bool match = true; - bool changesOK = (0 != (kIOServiceChangesOK & options)); - uint32_t count; - uint32_t done; + OSString * matched; + OSObject * obj; + OSString * str; + IORegistryEntry * entry; + OSNumber * num; + bool match = true; + bool changesOK = (0 != (kIOServiceChangesOK & options)); + uint32_t count; + uint32_t done; - do - { - count = table->getCount(); - done = 0; + do{ + count = table->getCount(); + done = 0; - str = OSDynamicCast(OSString, table->getObject(gIOProviderClassKey)); - if (str) { - done++; - match = ((kIOServiceClassDone & options) || (0 != metaCast(str))); + str = OSDynamicCast(OSString, table->getObject(gIOProviderClassKey)); + if (str) { + done++; + match = ((kIOServiceClassDone & options) || (0 != metaCast(str))); #if MATCH_DEBUG - match = (0 != metaCast( str )); - if ((kIOServiceClassDone & options) && !match) panic("classDone"); + match = (0 != metaCast( str )); + if ((kIOServiceClassDone & options) && !match) { + panic("classDone"); + } #endif - if ((!match) || (done == count)) break; - } + if ((!match) || (done == count)) { + break; + } + } - obj = table->getObject( gIONameMatchKey ); - if( obj) { - done++; - match = compareNames( obj, changesOK ? &matched : 0 ); - if (!match) break; - if( changesOK && matched) { - // leave a hint as to which name matched - table->setObject( gIONameMatchedKey, matched ); - matched->release(); - } - if (done == count) break; - } + obj = table->getObject( gIONameMatchKey ); + if (obj) { + done++; + match = compareNames( obj, changesOK ? &matched : 0 ); + if (!match) { + break; + } + if (changesOK && matched) { + // leave a hint as to which name matched + table->setObject( gIONameMatchedKey, matched ); + matched->release(); + } + if (done == count) { + break; + } + } - str = OSDynamicCast( OSString, table->getObject( gIOLocationMatchKey )); - if (str) - { - const OSSymbol * sym; - done++; - match = false; - sym = copyLocation(); - if (sym) { - match = sym->isEqualTo( str ); - sym->release(); - } - if ((!match) || (done == count)) break; - } + str = OSDynamicCast( OSString, table->getObject( gIOLocationMatchKey )); + if (str) { + const OSSymbol * sym; + done++; + match = false; + sym = copyLocation(); + if (sym) { + match = sym->isEqualTo( str ); + sym->release(); + } + if ((!match) || (done == count)) { + break; + } + } - obj = table->getObject( gIOPropertyMatchKey ); - if( obj) - { - OSDictionary * dict; - OSDictionary * nextDict; - OSIterator * iter; - done++; - match = false; - dict = dictionaryWithProperties(); - if( dict) { - nextDict = OSDynamicCast( OSDictionary, obj); - if( nextDict) - iter = 0; - else - iter = OSCollectionIterator::withCollection( - OSDynamicCast(OSCollection, obj)); - - while( nextDict - || (iter && (0 != (nextDict = OSDynamicCast(OSDictionary, - iter->getNextObject()))))) { - match = dict->isEqualTo( nextDict, nextDict); - if( match) - break; - nextDict = 0; + obj = table->getObject( gIOPropertyMatchKey ); + if (obj) { + OSDictionary * dict; + OSDictionary * nextDict; + OSIterator * iter; + done++; + match = false; + dict = dictionaryWithProperties(); + if (dict) { + nextDict = OSDynamicCast( OSDictionary, obj); + if (nextDict) { + iter = 0; + } else { + iter = OSCollectionIterator::withCollection( + OSDynamicCast(OSCollection, obj)); + } + + while (nextDict + || (iter && (0 != (nextDict = OSDynamicCast(OSDictionary, + iter->getNextObject()))))) { + match = dict->isEqualTo( nextDict, nextDict); + if (match) { + break; + } + nextDict = 0; + } + dict->release(); + if (iter) { + iter->release(); + } + } + if ((!match) || (done == count)) { + break; + } } - dict->release(); - if( iter) - iter->release(); - } - if ((!match) || (done == count)) break; - } - obj = table->getObject( gIOPropertyExistsMatchKey ); - if( obj) - { - OSDictionary * dict; - OSString * nextKey; - OSIterator * iter; - done++; - match = false; - dict = dictionaryWithProperties(); - if( dict) { - nextKey = OSDynamicCast( OSString, obj); - if( nextKey) - iter = 0; - else - iter = OSCollectionIterator::withCollection( - OSDynamicCast(OSCollection, obj)); - - while( nextKey - || (iter && (0 != (nextKey = OSDynamicCast(OSString, - iter->getNextObject()))))) { - match = (0 != dict->getObject(nextKey)); - if( match) - break; - nextKey = 0; + obj = table->getObject( gIOPropertyExistsMatchKey ); + if (obj) { + OSDictionary * dict; + OSString * nextKey; + OSIterator * iter; + done++; + match = false; + dict = dictionaryWithProperties(); + if (dict) { + nextKey = OSDynamicCast( OSString, obj); + if (nextKey) { + iter = 0; + } else { + iter = OSCollectionIterator::withCollection( + OSDynamicCast(OSCollection, obj)); + } + + while (nextKey + || (iter && (0 != (nextKey = OSDynamicCast(OSString, + iter->getNextObject()))))) { + match = (0 != dict->getObject(nextKey)); + if (match) { + break; + } + nextKey = 0; + } + dict->release(); + if (iter) { + iter->release(); + } + } + if ((!match) || (done == count)) { + break; + } } - dict->release(); - if( iter) - iter->release(); - } - if ((!match) || (done == count)) break; - } - str = OSDynamicCast( OSString, table->getObject( gIOPathMatchKey )); - if( str) { - done++; - entry = IORegistryEntry::fromPath( str->getCStringNoCopy() ); - match = (this == entry); - if( entry) - entry->release(); - if ((!match) || (done == count)) break; - } + str = OSDynamicCast( OSString, table->getObject( gIOPathMatchKey )); + if (str) { + done++; + entry = IORegistryEntry::fromPath( str->getCStringNoCopy()); + match = (this == entry); + if (entry) { + entry->release(); + } + if ((!match) || (done == count)) { + break; + } + } - num = OSDynamicCast( OSNumber, table->getObject( gIORegistryEntryIDKey )); - if (num) { - done++; - match = (getRegistryEntryID() == num->unsigned64BitValue()); - if ((!match) || (done == count)) break; - } + num = OSDynamicCast( OSNumber, table->getObject( gIORegistryEntryIDKey )); + if (num) { + done++; + match = (getRegistryEntryID() == num->unsigned64BitValue()); + if ((!match) || (done == count)) { + break; + } + } - num = OSDynamicCast( OSNumber, table->getObject( gIOMatchedServiceCountKey )); - if( num) - { - OSIterator * iter; - IOService * service = 0; - UInt32 serviceCount = 0; - - done++; - iter = getClientIterator(); - if( iter) { - while( (service = (IOService *) iter->getNextObject())) { - if( kIOServiceInactiveState & service->__state[0]) - continue; - if( 0 == service->getProperty( gIOMatchCategoryKey )) - continue; - ++serviceCount; + num = OSDynamicCast( OSNumber, table->getObject( gIOMatchedServiceCountKey )); + if (num) { + OSIterator * iter; + IOService * service = 0; + UInt32 serviceCount = 0; + + done++; + iter = getClientIterator(); + if (iter) { + while ((service = (IOService *) iter->getNextObject())) { + if (kIOServiceInactiveState & service->__state[0]) { + continue; + } + if (0 == service->getProperty( gIOMatchCategoryKey )) { + continue; + } + ++serviceCount; + } + iter->release(); + } + match = (serviceCount == num->unsigned32BitValue()); + if ((!match) || (done == count)) { + break; + } } - iter->release(); - } - match = (serviceCount == num->unsigned32BitValue()); - if ((!match) || (done == count)) break; - } - -#define propMatch(key) \ - obj = table->getObject(key); \ - if (obj) \ - { \ - OSObject * prop; \ - done++; \ - prop = copyProperty(key); \ - match = obj->isEqualTo(prop); \ - if (prop) prop->release(); \ - if ((!match) || (done == count)) break; \ - } - propMatch(gIOBSDNameKey) - propMatch(gIOBSDMajorKey) - propMatch(gIOBSDMinorKey) - propMatch(gIOBSDUnitKey) + +#define propMatch(key) \ + obj = table->getObject(key); \ + if (obj) \ + { \ + OSObject * prop; \ + done++; \ + prop = copyProperty(key); \ + match = obj->isEqualTo(prop); \ + if (prop) prop->release(); \ + if ((!match) || (done == count)) break; \ + } + propMatch(gIOBSDNameKey) + propMatch(gIOBSDMajorKey) + propMatch(gIOBSDMinorKey) + propMatch(gIOBSDUnitKey) #undef propMatch - } - while (false); + }while (false); - if (did) *did = done; - return (match); + if (did) { + *did = done; + } + return match; } -bool IOService::passiveMatch( OSDictionary * table, bool changesOK ) +bool +IOService::passiveMatch( OSDictionary * table, bool changesOK ) { - return (matchPassive(table, changesOK ? kIOServiceChangesOK : 0)); + return matchPassive(table, changesOK ? kIOServiceChangesOK : 0); } -bool IOService::matchPassive(OSDictionary * table, uint32_t options) +bool +IOService::matchPassive(OSDictionary * table, uint32_t options) { - IOService * where; - OSDictionary * nextTable; - SInt32 score; - OSNumber * newPri; - bool match = true; - bool matchParent = false; - uint32_t count; - uint32_t done; + IOService * where; + OSDictionary * nextTable; + SInt32 score; + OSNumber * newPri; + bool match = true; + bool matchParent = false; + uint32_t count; + uint32_t done; - assert( table ); + assert( table ); #if !CONFIG_EMBEDDED - OSArray* aliasServiceRegIds = NULL; - IOService* foundAlternateService = NULL; + OSArray* aliasServiceRegIds = NULL; + IOService* foundAlternateService = NULL; #endif #if MATCH_DEBUG - OSDictionary * root = table; + OSDictionary * root = table; #endif - where = this; - do - { - do - { - count = table->getCount(); - if (!(kIOServiceInternalDone & options)) - { - match = where->matchInternal(table, options, &done); - // don't call family if we've done all the entries in the table - if ((!match) || (done == count)) break; - } - - // pass in score from property table - score = IOServiceObjectOrder( table, (void *) gIOProbeScoreKey); - - // do family specific matching - match = where->matchPropertyTable( table, &score ); - - if( !match) { + where = this; + do{ + do{ + count = table->getCount(); + if (!(kIOServiceInternalDone & options)) { + match = where->matchInternal(table, options, &done); + // don't call family if we've done all the entries in the table + if ((!match) || (done == count)) { + break; + } + } + + // pass in score from property table + score = IOServiceObjectOrder( table, (void *) gIOProbeScoreKey); + + // do family specific matching + match = where->matchPropertyTable( table, &score ); + + if (!match) { #if IOMATCHDEBUG - if( kIOLogMatch & getDebugFlags( table )) - LOG("%s: family specific matching fails\n", where->getName()); + if (kIOLogMatch & getDebugFlags( table )) { + LOG("%s: family specific matching fails\n", where->getName()); + } #endif - break; - } - - if (kIOServiceChangesOK & options) { - // save the score - newPri = OSNumber::withNumber( score, 32 ); - if( newPri) { - table->setObject( gIOProbeScoreKey, newPri ); - newPri->release(); - } - } - - options = 0; - matchParent = false; - - nextTable = OSDynamicCast(OSDictionary, - table->getObject( gIOParentMatchKey )); - if( nextTable) { - // look for a matching entry anywhere up to root - match = false; - matchParent = true; - table = nextTable; - break; - } - - table = OSDynamicCast(OSDictionary, - table->getObject( gIOLocationMatchKey )); - if (table) { - // look for a matching entry at matchLocation() - match = false; - where = where->getProvider(); - if (where && (where = where->matchLocation(where))) continue; - } - break; - } - while (true); - - if(match == true) { - break; - } - - if(matchParent == true) { + break; + } + + if (kIOServiceChangesOK & options) { + // save the score + newPri = OSNumber::withNumber( score, 32 ); + if (newPri) { + table->setObject( gIOProbeScoreKey, newPri ); + newPri->release(); + } + } + + options = 0; + matchParent = false; + + nextTable = OSDynamicCast(OSDictionary, + table->getObject( gIOParentMatchKey )); + if (nextTable) { + // look for a matching entry anywhere up to root + match = false; + matchParent = true; + table = nextTable; + break; + } + + table = OSDynamicCast(OSDictionary, + table->getObject( gIOLocationMatchKey )); + if (table) { + // look for a matching entry at matchLocation() + match = false; + where = where->getProvider(); + if (where && (where = where->matchLocation(where))) { + continue; + } + } + break; + }while (true); + + if (match == true) { + break; + } + + if (matchParent == true) { #if !CONFIG_EMBEDDED - // check if service has an alias to search its other "parents" if a parent match isn't found - OSObject * prop = where->copyProperty(gIOServiceLegacyMatchingRegistryIDKey); - OSNumber * alternateRegistryID = OSDynamicCast(OSNumber, prop); - if(alternateRegistryID != NULL) { - if(aliasServiceRegIds == NULL) - { - aliasServiceRegIds = OSArray::withCapacity(sizeof(alternateRegistryID)); - } - aliasServiceRegIds->setObject(alternateRegistryID); - } - OSSafeReleaseNULL(prop); + // check if service has an alias to search its other "parents" if a parent match isn't found + OSObject * prop = where->copyProperty(gIOServiceLegacyMatchingRegistryIDKey); + OSNumber * alternateRegistryID = OSDynamicCast(OSNumber, prop); + if (alternateRegistryID != NULL) { + if (aliasServiceRegIds == NULL) { + aliasServiceRegIds = OSArray::withCapacity(sizeof(alternateRegistryID)); + } + aliasServiceRegIds->setObject(alternateRegistryID); + } + OSSafeReleaseNULL(prop); #endif - } - else { - break; - } + } else { + break; + } - where = where->getProvider(); + where = where->getProvider(); #if !CONFIG_EMBEDDED - if(where == NULL) { - // there were no matching parent services, check to see if there are aliased services that have a matching parent - if(aliasServiceRegIds != NULL) { - unsigned int numAliasedServices = aliasServiceRegIds->getCount(); - if(numAliasedServices != 0) { - OSNumber* alternateRegistryID = OSDynamicCast(OSNumber, aliasServiceRegIds->getObject(numAliasedServices - 1)); - if(alternateRegistryID != NULL) { - OSDictionary* alternateMatchingDict = IOService::registryEntryIDMatching(alternateRegistryID->unsigned64BitValue()); - aliasServiceRegIds->removeObject(numAliasedServices - 1); - if(alternateMatchingDict != NULL) { - OSSafeReleaseNULL(foundAlternateService); - foundAlternateService = IOService::copyMatchingService(alternateMatchingDict); - alternateMatchingDict->release(); - if(foundAlternateService != NULL) { - where = foundAlternateService; - } - } - } - } - } - } + if (where == NULL) { + // there were no matching parent services, check to see if there are aliased services that have a matching parent + if (aliasServiceRegIds != NULL) { + unsigned int numAliasedServices = aliasServiceRegIds->getCount(); + if (numAliasedServices != 0) { + OSNumber* alternateRegistryID = OSDynamicCast(OSNumber, aliasServiceRegIds->getObject(numAliasedServices - 1)); + if (alternateRegistryID != NULL) { + OSDictionary* alternateMatchingDict = IOService::registryEntryIDMatching(alternateRegistryID->unsigned64BitValue()); + aliasServiceRegIds->removeObject(numAliasedServices - 1); + if (alternateMatchingDict != NULL) { + OSSafeReleaseNULL(foundAlternateService); + foundAlternateService = IOService::copyMatchingService(alternateMatchingDict); + alternateMatchingDict->release(); + if (foundAlternateService != NULL) { + where = foundAlternateService; + } + } + } + } + } + } #endif - } - while( where != NULL ); + }while (where != NULL); #if !CONFIG_EMBEDDED - OSSafeReleaseNULL(foundAlternateService); - OSSafeReleaseNULL(aliasServiceRegIds); + OSSafeReleaseNULL(foundAlternateService); + OSSafeReleaseNULL(aliasServiceRegIds); #endif #if MATCH_DEBUG - if (where != this) - { - OSSerialize * s = OSSerialize::withCapacity(128); - root->serialize(s); - kprintf("parent match 0x%llx, %d,\n%s\n", getRegistryEntryID(), match, s->text()); - s->release(); - } + if (where != this) { + OSSerialize * s = OSSerialize::withCapacity(128); + root->serialize(s); + kprintf("parent match 0x%llx, %d,\n%s\n", getRegistryEntryID(), match, s->text()); + s->release(); + } #endif - return( match ); + return match; } -IOReturn IOService::newUserClient( task_t owningTask, void * securityID, - UInt32 type, OSDictionary * properties, - IOUserClient ** handler ) +IOReturn +IOService::newUserClient( task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler ) { - const OSSymbol *userClientClass = 0; - IOUserClient *client; - OSObject *prop; - OSObject *temp; + const OSSymbol *userClientClass = 0; + IOUserClient *client; + OSObject *prop; + OSObject *temp; - if (kIOReturnSuccess == newUserClient( owningTask, securityID, type, handler )) - return kIOReturnSuccess; + if (kIOReturnSuccess == newUserClient( owningTask, securityID, type, handler )) { + return kIOReturnSuccess; + } - // First try my own properties for a user client class name - prop = copyProperty(gIOUserClientClassKey); - if (prop) { - if (OSDynamicCast(OSSymbol, prop)) - userClientClass = (const OSSymbol *) prop; - else if (OSDynamicCast(OSString, prop)) { - userClientClass = OSSymbol::withString((OSString *) prop); - if (userClientClass) - setProperty(gIOUserClientClassKey, - (OSObject *) userClientClass); + // First try my own properties for a user client class name + prop = copyProperty(gIOUserClientClassKey); + if (prop) { + if (OSDynamicCast(OSSymbol, prop)) { + userClientClass = (const OSSymbol *) prop; + } else if (OSDynamicCast(OSString, prop)) { + userClientClass = OSSymbol::withString((OSString *) prop); + if (userClientClass) { + setProperty(gIOUserClientClassKey, + (OSObject *) userClientClass); + } + } } - } - // Didn't find one so lets just bomb out now without further ado. - if (!userClientClass) - { - OSSafeReleaseNULL(prop); - return kIOReturnUnsupported; - } + // Didn't find one so lets just bomb out now without further ado. + if (!userClientClass) { + OSSafeReleaseNULL(prop); + return kIOReturnUnsupported; + } - // This reference is consumed by the IOServiceOpen call - temp = OSMetaClass::allocClassWithName(userClientClass); - OSSafeReleaseNULL(prop); - if (!temp) - return kIOReturnNoMemory; + // This reference is consumed by the IOServiceOpen call + temp = OSMetaClass::allocClassWithName(userClientClass); + OSSafeReleaseNULL(prop); + if (!temp) { + return kIOReturnNoMemory; + } - if (OSDynamicCast(IOUserClient, temp)) - client = (IOUserClient *) temp; - else { - temp->release(); - return kIOReturnUnsupported; - } + if (OSDynamicCast(IOUserClient, temp)) { + client = (IOUserClient *) temp; + } else { + temp->release(); + return kIOReturnUnsupported; + } - if ( !client->initWithTask(owningTask, securityID, type, properties) ) { - client->release(); - return kIOReturnBadArgument; - } + if (!client->initWithTask(owningTask, securityID, type, properties)) { + client->release(); + return kIOReturnBadArgument; + } - if ( !client->attach(this) ) { - client->release(); - return kIOReturnUnsupported; - } + if (!client->attach(this)) { + client->release(); + return kIOReturnUnsupported; + } - if ( !client->start(this) ) { - client->detach(this); - client->release(); - return kIOReturnUnsupported; - } + if (!client->start(this)) { + client->detach(this); + client->release(); + return kIOReturnUnsupported; + } - *handler = client; - return kIOReturnSuccess; + *handler = client; + return kIOReturnSuccess; } -IOReturn IOService::newUserClient( task_t owningTask, void * securityID, - UInt32 type, IOUserClient ** handler ) +IOReturn +IOService::newUserClient( task_t owningTask, void * securityID, + UInt32 type, IOUserClient ** handler ) { - return( kIOReturnUnsupported ); + return kIOReturnUnsupported; } -IOReturn IOService::requestProbe( IOOptionBits options ) +IOReturn +IOService::requestProbe( IOOptionBits options ) { - return( kIOReturnUnsupported); + return kIOReturnUnsupported; } /* * Convert an IOReturn to text. Subclasses which add additional - * IOReturn's should override this method and call + * IOReturn's should override this method and call * super::stringFromReturn if the desired value is not found. */ -const char * IOService::stringFromReturn( IOReturn rtn ) -{ - static const IONamedValue IOReturn_values[] = { - {kIOReturnSuccess, "success" }, - {kIOReturnError, "general error" }, - {kIOReturnNoMemory, "memory allocation error" }, - {kIOReturnNoResources, "resource shortage" }, - {kIOReturnIPCError, "Mach IPC failure" }, - {kIOReturnNoDevice, "no such device" }, - {kIOReturnNotPrivileged, "privilege violation" }, - {kIOReturnBadArgument, "invalid argument" }, - {kIOReturnLockedRead, "device is read locked" }, - {kIOReturnLockedWrite, "device is write locked" }, - {kIOReturnExclusiveAccess, "device is exclusive access" }, - {kIOReturnBadMessageID, "bad IPC message ID" }, - {kIOReturnUnsupported, "unsupported function" }, - {kIOReturnVMError, "virtual memory error" }, - {kIOReturnInternalError, "internal driver error" }, - {kIOReturnIOError, "I/O error" }, - {kIOReturnCannotLock, "cannot acquire lock" }, - {kIOReturnNotOpen, "device is not open" }, - {kIOReturnNotReadable, "device is not readable" }, - {kIOReturnNotWritable, "device is not writeable" }, - {kIOReturnNotAligned, "alignment error" }, - {kIOReturnBadMedia, "media error" }, - {kIOReturnStillOpen, "device is still open" }, - {kIOReturnRLDError, "rld failure" }, - {kIOReturnDMAError, "DMA failure" }, - {kIOReturnBusy, "device is busy" }, - {kIOReturnTimeout, "I/O timeout" }, - {kIOReturnOffline, "device is offline" }, - {kIOReturnNotReady, "device is not ready" }, - {kIOReturnNotAttached, "device/channel is not attached" }, - {kIOReturnNoChannels, "no DMA channels available" }, - {kIOReturnNoSpace, "no space for data" }, - {kIOReturnPortExists, "device port already exists" }, - {kIOReturnCannotWire, "cannot wire physical memory" }, - {kIOReturnNoInterrupt, "no interrupt attached" }, - {kIOReturnNoFrames, "no DMA frames enqueued" }, - {kIOReturnMessageTooLarge, "message is too large" }, - {kIOReturnNotPermitted, "operation is not permitted" }, - {kIOReturnNoPower, "device is without power" }, - {kIOReturnNoMedia, "media is not present" }, - {kIOReturnUnformattedMedia, "media is not formatted" }, - {kIOReturnUnsupportedMode, "unsupported mode" }, - {kIOReturnUnderrun, "data underrun" }, - {kIOReturnOverrun, "data overrun" }, - {kIOReturnDeviceError, "device error" }, - {kIOReturnNoCompletion, "no completion routine" }, - {kIOReturnAborted, "operation was aborted" }, - {kIOReturnNoBandwidth, "bus bandwidth would be exceeded" }, - {kIOReturnNotResponding, "device is not responding" }, - {kIOReturnInvalid, "unanticipated driver error" }, - {0, NULL } - }; - - return IOFindNameForValue(rtn, IOReturn_values); +const char * +IOService::stringFromReturn( IOReturn rtn ) +{ + static const IONamedValue IOReturn_values[] = { + {kIOReturnSuccess, "success" }, + {kIOReturnError, "general error" }, + {kIOReturnNoMemory, "memory allocation error" }, + {kIOReturnNoResources, "resource shortage" }, + {kIOReturnIPCError, "Mach IPC failure" }, + {kIOReturnNoDevice, "no such device" }, + {kIOReturnNotPrivileged, "privilege violation" }, + {kIOReturnBadArgument, "invalid argument" }, + {kIOReturnLockedRead, "device is read locked" }, + {kIOReturnLockedWrite, "device is write locked" }, + {kIOReturnExclusiveAccess, "device is exclusive access" }, + {kIOReturnBadMessageID, "bad IPC message ID" }, + {kIOReturnUnsupported, "unsupported function" }, + {kIOReturnVMError, "virtual memory error" }, + {kIOReturnInternalError, "internal driver error" }, + {kIOReturnIOError, "I/O error" }, + {kIOReturnCannotLock, "cannot acquire lock" }, + {kIOReturnNotOpen, "device is not open" }, + {kIOReturnNotReadable, "device is not readable" }, + {kIOReturnNotWritable, "device is not writeable" }, + {kIOReturnNotAligned, "alignment error" }, + {kIOReturnBadMedia, "media error" }, + {kIOReturnStillOpen, "device is still open" }, + {kIOReturnRLDError, "rld failure" }, + {kIOReturnDMAError, "DMA failure" }, + {kIOReturnBusy, "device is busy" }, + {kIOReturnTimeout, "I/O timeout" }, + {kIOReturnOffline, "device is offline" }, + {kIOReturnNotReady, "device is not ready" }, + {kIOReturnNotAttached, "device/channel is not attached" }, + {kIOReturnNoChannels, "no DMA channels available" }, + {kIOReturnNoSpace, "no space for data" }, + {kIOReturnPortExists, "device port already exists" }, + {kIOReturnCannotWire, "cannot wire physical memory" }, + {kIOReturnNoInterrupt, "no interrupt attached" }, + {kIOReturnNoFrames, "no DMA frames enqueued" }, + {kIOReturnMessageTooLarge, "message is too large" }, + {kIOReturnNotPermitted, "operation is not permitted" }, + {kIOReturnNoPower, "device is without power" }, + {kIOReturnNoMedia, "media is not present" }, + {kIOReturnUnformattedMedia, "media is not formatted" }, + {kIOReturnUnsupportedMode, "unsupported mode" }, + {kIOReturnUnderrun, "data underrun" }, + {kIOReturnOverrun, "data overrun" }, + {kIOReturnDeviceError, "device error" }, + {kIOReturnNoCompletion, "no completion routine" }, + {kIOReturnAborted, "operation was aborted" }, + {kIOReturnNoBandwidth, "bus bandwidth would be exceeded" }, + {kIOReturnNotResponding, "device is not responding" }, + {kIOReturnInvalid, "unanticipated driver error" }, + {0, NULL } + }; + + return IOFindNameForValue(rtn, IOReturn_values); } /* * Convert an IOReturn to an errno. */ -int IOService::errnoFromReturn( IOReturn rtn ) -{ - if (unix_err(err_get_code(rtn)) == rtn) - return err_get_code(rtn); - - switch(rtn) { - // (obvious match) - case kIOReturnSuccess: - return(0); - case kIOReturnNoMemory: - return(ENOMEM); - case kIOReturnNoDevice: - return(ENXIO); - case kIOReturnVMError: - return(EFAULT); - case kIOReturnNotPermitted: - return(EPERM); - case kIOReturnNotPrivileged: - return(EACCES); - case kIOReturnIOError: - return(EIO); - case kIOReturnNotWritable: - return(EROFS); - case kIOReturnBadArgument: - return(EINVAL); - case kIOReturnUnsupported: - return(ENOTSUP); - case kIOReturnBusy: - return(EBUSY); - case kIOReturnNoPower: - return(EPWROFF); - case kIOReturnDeviceError: - return(EDEVERR); - case kIOReturnTimeout: - return(ETIMEDOUT); - case kIOReturnMessageTooLarge: - return(EMSGSIZE); - case kIOReturnNoSpace: - return(ENOSPC); - case kIOReturnCannotLock: - return(ENOLCK); - - // (best match) - case kIOReturnBadMessageID: - case kIOReturnNoCompletion: - case kIOReturnNotAligned: - return(EINVAL); - case kIOReturnNotReady: - return(EBUSY); - case kIOReturnRLDError: - return(EBADMACHO); - case kIOReturnPortExists: - case kIOReturnStillOpen: - return(EEXIST); - case kIOReturnExclusiveAccess: - case kIOReturnLockedRead: - case kIOReturnLockedWrite: - case kIOReturnNotOpen: - case kIOReturnNotReadable: - return(EACCES); - case kIOReturnCannotWire: - case kIOReturnNoResources: - return(ENOMEM); - case kIOReturnAborted: - case kIOReturnOffline: - case kIOReturnNotResponding: - return(EBUSY); - case kIOReturnBadMedia: - case kIOReturnNoMedia: - case kIOReturnNotAttached: - case kIOReturnUnformattedMedia: - return(ENXIO); // (media error) - case kIOReturnDMAError: - case kIOReturnOverrun: - case kIOReturnUnderrun: - return(EIO); // (transfer error) - case kIOReturnNoBandwidth: - case kIOReturnNoChannels: - case kIOReturnNoFrames: - case kIOReturnNoInterrupt: - return(EIO); // (hardware error) - case kIOReturnError: - case kIOReturnInternalError: - case kIOReturnInvalid: - return(EIO); // (generic error) - case kIOReturnIPCError: - return(EIO); // (ipc error) - default: - return(EIO); // (all other errors) - } -} - -IOReturn IOService::message( UInt32 type, IOService * provider, - void * argument ) -{ - /* - * Generic entry point for calls from the provider. A return value of - * kIOReturnSuccess indicates that the message was received, and where - * applicable, that it was successful. - */ - - return kIOReturnUnsupported; +int +IOService::errnoFromReturn( IOReturn rtn ) +{ + if (unix_err(err_get_code(rtn)) == rtn) { + return err_get_code(rtn); + } + + switch (rtn) { + // (obvious match) + case kIOReturnSuccess: + return 0; + case kIOReturnNoMemory: + return ENOMEM; + case kIOReturnNoDevice: + return ENXIO; + case kIOReturnVMError: + return EFAULT; + case kIOReturnNotPermitted: + return EPERM; + case kIOReturnNotPrivileged: + return EACCES; + case kIOReturnIOError: + return EIO; + case kIOReturnNotWritable: + return EROFS; + case kIOReturnBadArgument: + return EINVAL; + case kIOReturnUnsupported: + return ENOTSUP; + case kIOReturnBusy: + return EBUSY; + case kIOReturnNoPower: + return EPWROFF; + case kIOReturnDeviceError: + return EDEVERR; + case kIOReturnTimeout: + return ETIMEDOUT; + case kIOReturnMessageTooLarge: + return EMSGSIZE; + case kIOReturnNoSpace: + return ENOSPC; + case kIOReturnCannotLock: + return ENOLCK; + + // (best match) + case kIOReturnBadMessageID: + case kIOReturnNoCompletion: + case kIOReturnNotAligned: + return EINVAL; + case kIOReturnNotReady: + return EBUSY; + case kIOReturnRLDError: + return EBADMACHO; + case kIOReturnPortExists: + case kIOReturnStillOpen: + return EEXIST; + case kIOReturnExclusiveAccess: + case kIOReturnLockedRead: + case kIOReturnLockedWrite: + case kIOReturnNotOpen: + case kIOReturnNotReadable: + return EACCES; + case kIOReturnCannotWire: + case kIOReturnNoResources: + return ENOMEM; + case kIOReturnAborted: + case kIOReturnOffline: + case kIOReturnNotResponding: + return EBUSY; + case kIOReturnBadMedia: + case kIOReturnNoMedia: + case kIOReturnNotAttached: + case kIOReturnUnformattedMedia: + return ENXIO; // (media error) + case kIOReturnDMAError: + case kIOReturnOverrun: + case kIOReturnUnderrun: + return EIO; // (transfer error) + case kIOReturnNoBandwidth: + case kIOReturnNoChannels: + case kIOReturnNoFrames: + case kIOReturnNoInterrupt: + return EIO; // (hardware error) + case kIOReturnError: + case kIOReturnInternalError: + case kIOReturnInvalid: + return EIO; // (generic error) + case kIOReturnIPCError: + return EIO; // (ipc error) + default: + return EIO; // (all other errors) + } +} + +IOReturn +IOService::message( UInt32 type, IOService * provider, + void * argument ) +{ + /* + * Generic entry point for calls from the provider. A return value of + * kIOReturnSuccess indicates that the message was received, and where + * applicable, that it was successful. + */ + + return kIOReturnUnsupported; } /* * Device memory */ -IOItemCount IOService::getDeviceMemoryCount( void ) +IOItemCount +IOService::getDeviceMemoryCount( void ) { - OSArray * array; - IOItemCount count; + OSArray * array; + IOItemCount count; - array = OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); - if( array) - count = array->getCount(); - else - count = 0; + array = OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); + if (array) { + count = array->getCount(); + } else { + count = 0; + } - return( count); + return count; } -IODeviceMemory * IOService::getDeviceMemoryWithIndex( unsigned int index ) +IODeviceMemory * +IOService::getDeviceMemoryWithIndex( unsigned int index ) { - OSArray * array; - IODeviceMemory * range; + OSArray * array; + IODeviceMemory * range; - array = OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); - if( array) - range = (IODeviceMemory *) array->getObject( index ); - else - range = 0; + array = OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); + if (array) { + range = (IODeviceMemory *) array->getObject( index ); + } else { + range = 0; + } - return( range); + return range; } -IOMemoryMap * IOService::mapDeviceMemoryWithIndex( unsigned int index, - IOOptionBits options ) +IOMemoryMap * +IOService::mapDeviceMemoryWithIndex( unsigned int index, + IOOptionBits options ) { - IODeviceMemory * range; - IOMemoryMap * map; + IODeviceMemory * range; + IOMemoryMap * map; - range = getDeviceMemoryWithIndex( index ); - if( range) - map = range->map( options ); - else - map = 0; + range = getDeviceMemoryWithIndex( index ); + if (range) { + map = range->map( options ); + } else { + map = 0; + } - return( map ); + return map; } -OSArray * IOService::getDeviceMemory( void ) +OSArray * +IOService::getDeviceMemory( void ) { - return( OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey))); + return OSDynamicCast( OSArray, getProperty( gIODeviceMemoryKey)); } -void IOService::setDeviceMemory( OSArray * array ) +void +IOService::setDeviceMemory( OSArray * array ) { - setProperty( gIODeviceMemoryKey, array); + setProperty( gIODeviceMemoryKey, array); } /* @@ -6155,21 +6458,23 @@ void IOService::setDeviceMemory( OSArray * array ) * the CPU is in an idle mode, These APIs allow a driver to specify * the maximum bus stall that they can handle. 0 indicates no limit. */ -void IOService:: +void +IOService:: setCPUSnoopDelay(UInt32 __unused ns) { #if defined(__i386__) || defined(__x86_64__) - ml_set_maxsnoop(ns); + ml_set_maxsnoop(ns); #endif /* defined(__i386__) || defined(__x86_64__) */ } -UInt32 IOService:: +UInt32 +IOService:: getCPUSnoopDelay() { #if defined(__i386__) || defined(__x86_64__) - return ml_get_maxsnoop(); + return ml_get_maxsnoop(); #else - return 0; + return 0; #endif /* defined(__i386__) || defined(__x86_64__) */ } @@ -6177,176 +6482,170 @@ getCPUSnoopDelay() static void requireMaxCpuDelay(IOService * service, UInt32 ns, UInt32 delayType) { - static const UInt kNoReplace = -1U; // Must be an illegal index - UInt replace = kNoReplace; - bool setCpuDelay = false; - - IORecursiveLockLock(sCpuDelayLock); - - UInt count = sCpuDelayData->getLength() / sizeof(CpuDelayEntry); - CpuDelayEntry *entries = (CpuDelayEntry *) sCpuDelayData->getBytesNoCopy(); - IOService * holder = NULL; - - if (ns) { - const CpuDelayEntry ne = {service, ns, delayType}; - holder = service; - // Set maximum delay. - for (UInt i = 0; i < count; i++) { - IOService *thisService = entries[i].fService; - bool sameType = (delayType == entries[i].fDelayType); - if ((service == thisService) && sameType) - replace = i; - else if (!thisService) { - if (kNoReplace == replace) - replace = i; - } - else if (sameType) { - const UInt32 thisMax = entries[i].fMaxDelay; - if (thisMax < ns) - { - ns = thisMax; - holder = thisService; - } - } - } - - setCpuDelay = true; - if (kNoReplace == replace) - sCpuDelayData->appendBytes(&ne, sizeof(ne)); - else - entries[replace] = ne; - } - else { - ns = -1U; // Set to max unsigned, i.e. no restriction - - for (UInt i = 0; i < count; i++) { - // Clear a maximum delay. - IOService *thisService = entries[i].fService; - if (thisService && (delayType == entries[i].fDelayType)) { - UInt32 thisMax = entries[i].fMaxDelay; - if (service == thisService) - replace = i; - else if (thisMax < ns) { - ns = thisMax; - holder = thisService; - } - } - } - - // Check if entry found - if (kNoReplace != replace) { - entries[replace].fService = 0; // Null the entry - setCpuDelay = true; - } - } - - if (setCpuDelay) - { - if (holder && debug_boot_arg) { - strlcpy(sCPULatencyHolderName[delayType], holder->getName(), sizeof(sCPULatencyHolderName[delayType])); - } - - // Must be safe to call from locked context - if (delayType == kCpuDelayBusStall) - { - ml_set_maxbusdelay(ns); - } - else if (delayType == kCpuDelayInterrupt) - { - ml_set_maxintdelay(ns); - } - sCPULatencyHolder[delayType]->setValue(holder ? holder->getRegistryEntryID() : 0); - sCPULatencySet [delayType]->setValue(ns); - - OSArray * handlers = sCpuLatencyHandlers[delayType]; - IOService * target; - if (handlers) for (unsigned int idx = 0; + static const UInt kNoReplace = -1U; // Must be an illegal index + UInt replace = kNoReplace; + bool setCpuDelay = false; + + IORecursiveLockLock(sCpuDelayLock); + + UInt count = sCpuDelayData->getLength() / sizeof(CpuDelayEntry); + CpuDelayEntry *entries = (CpuDelayEntry *) sCpuDelayData->getBytesNoCopy(); + IOService * holder = NULL; + + if (ns) { + const CpuDelayEntry ne = {service, ns, delayType}; + holder = service; + // Set maximum delay. + for (UInt i = 0; i < count; i++) { + IOService *thisService = entries[i].fService; + bool sameType = (delayType == entries[i].fDelayType); + if ((service == thisService) && sameType) { + replace = i; + } else if (!thisService) { + if (kNoReplace == replace) { + replace = i; + } + } else if (sameType) { + const UInt32 thisMax = entries[i].fMaxDelay; + if (thisMax < ns) { + ns = thisMax; + holder = thisService; + } + } + } + + setCpuDelay = true; + if (kNoReplace == replace) { + sCpuDelayData->appendBytes(&ne, sizeof(ne)); + } else { + entries[replace] = ne; + } + } else { + ns = -1U; // Set to max unsigned, i.e. no restriction + + for (UInt i = 0; i < count; i++) { + // Clear a maximum delay. + IOService *thisService = entries[i].fService; + if (thisService && (delayType == entries[i].fDelayType)) { + UInt32 thisMax = entries[i].fMaxDelay; + if (service == thisService) { + replace = i; + } else if (thisMax < ns) { + ns = thisMax; + holder = thisService; + } + } + } + + // Check if entry found + if (kNoReplace != replace) { + entries[replace].fService = 0; // Null the entry + setCpuDelay = true; + } + } + + if (setCpuDelay) { + if (holder && debug_boot_arg) { + strlcpy(sCPULatencyHolderName[delayType], holder->getName(), sizeof(sCPULatencyHolderName[delayType])); + } + + // Must be safe to call from locked context + if (delayType == kCpuDelayBusStall) { + ml_set_maxbusdelay(ns); + } else if (delayType == kCpuDelayInterrupt) { + ml_set_maxintdelay(ns); + } + sCPULatencyHolder[delayType]->setValue(holder ? holder->getRegistryEntryID() : 0); + sCPULatencySet[delayType]->setValue(ns); + + OSArray * handlers = sCpuLatencyHandlers[delayType]; + IOService * target; + if (handlers) { + for (unsigned int idx = 0; (target = (IOService *) handlers->getObject(idx)); - idx++) - { - target->callPlatformFunction(sCPULatencyFunctionName[delayType], false, - (void *) (uintptr_t) ns, holder, - NULL, NULL); + idx++) { + target->callPlatformFunction(sCPULatencyFunctionName[delayType], false, + (void *) (uintptr_t) ns, holder, + NULL, NULL); + } + } } - } - IORecursiveLockUnlock(sCpuDelayLock); + IORecursiveLockUnlock(sCpuDelayLock); } static IOReturn setLatencyHandler(UInt32 delayType, IOService * target, bool enable) { - IOReturn result = kIOReturnNotFound; - OSArray * array; - unsigned int idx; - - IORecursiveLockLock(sCpuDelayLock); - - do - { - if (enable && !sCpuLatencyHandlers[delayType]) - sCpuLatencyHandlers[delayType] = OSArray::withCapacity(4); - array = sCpuLatencyHandlers[delayType]; - if (!array) - break; - idx = array->getNextIndexOfObject(target, 0); - if (!enable) - { - if (-1U != idx) - { - array->removeObject(idx); - result = kIOReturnSuccess; - } - } - else - { - if (-1U != idx) { - result = kIOReturnExclusiveAccess; - break; - } - array->setObject(target); - - UInt count = sCpuDelayData->getLength() / sizeof(CpuDelayEntry); - CpuDelayEntry *entries = (CpuDelayEntry *) sCpuDelayData->getBytesNoCopy(); - UInt32 ns = -1U; // Set to max unsigned, i.e. no restriction - IOService * holder = NULL; - - for (UInt i = 0; i < count; i++) { - if (entries[i].fService - && (delayType == entries[i].fDelayType) - && (entries[i].fMaxDelay < ns)) { - ns = entries[i].fMaxDelay; - holder = entries[i].fService; - } - } - target->callPlatformFunction(sCPULatencyFunctionName[delayType], false, - (void *) (uintptr_t) ns, holder, - NULL, NULL); - result = kIOReturnSuccess; - } - } - while (false); - - IORecursiveLockUnlock(sCpuDelayLock); - - return (result); + IOReturn result = kIOReturnNotFound; + OSArray * array; + unsigned int idx; + + IORecursiveLockLock(sCpuDelayLock); + + do{ + if (enable && !sCpuLatencyHandlers[delayType]) { + sCpuLatencyHandlers[delayType] = OSArray::withCapacity(4); + } + array = sCpuLatencyHandlers[delayType]; + if (!array) { + break; + } + idx = array->getNextIndexOfObject(target, 0); + if (!enable) { + if (-1U != idx) { + array->removeObject(idx); + result = kIOReturnSuccess; + } + } else { + if (-1U != idx) { + result = kIOReturnExclusiveAccess; + break; + } + array->setObject(target); + + UInt count = sCpuDelayData->getLength() / sizeof(CpuDelayEntry); + CpuDelayEntry *entries = (CpuDelayEntry *) sCpuDelayData->getBytesNoCopy(); + UInt32 ns = -1U; // Set to max unsigned, i.e. no restriction + IOService * holder = NULL; + + for (UInt i = 0; i < count; i++) { + if (entries[i].fService + && (delayType == entries[i].fDelayType) + && (entries[i].fMaxDelay < ns)) { + ns = entries[i].fMaxDelay; + holder = entries[i].fService; + } + } + target->callPlatformFunction(sCPULatencyFunctionName[delayType], false, + (void *) (uintptr_t) ns, holder, + NULL, NULL); + result = kIOReturnSuccess; + } + }while (false); + + IORecursiveLockUnlock(sCpuDelayLock); + + return result; } #endif /* defined(__i386__) || defined(__x86_64__) */ -void IOService:: +void +IOService:: requireMaxBusStall(UInt32 __unused ns) { #if defined(__i386__) || defined(__x86_64__) - requireMaxCpuDelay(this, ns, kCpuDelayBusStall); + requireMaxCpuDelay(this, ns, kCpuDelayBusStall); #endif } -void IOService:: +void +IOService:: requireMaxInterruptDelay(uint32_t __unused ns) { #if defined(__i386__) || defined(__x86_64__) - requireMaxCpuDelay(this, ns, kCpuDelayInterrupt); + requireMaxCpuDelay(this, ns, kCpuDelayInterrupt); #endif } @@ -6354,503 +6653,560 @@ requireMaxInterruptDelay(uint32_t __unused ns) * Device interrupts */ -IOReturn IOService::resolveInterrupt(IOService *nub, int source) -{ - IOInterruptController *interruptController; - OSArray *array; - OSData *data; - OSSymbol *interruptControllerName; - long numSources; - IOInterruptSource *interruptSources; - - // Get the parents list from the nub. - array = OSDynamicCast(OSArray, nub->getProperty(gIOInterruptControllersKey)); - if (array == 0) return kIOReturnNoResources; - - // Allocate space for the IOInterruptSources if needed... then return early. - if (nub->_interruptSources == 0) { - numSources = array->getCount(); - interruptSources = (IOInterruptSource *)IOMalloc( - numSources * sizeofAllIOInterruptSource); - if (interruptSources == 0) return kIOReturnNoMemory; - - bzero(interruptSources, numSources * sizeofAllIOInterruptSource); - - nub->_numInterruptSources = numSources; - nub->_interruptSources = interruptSources; - return kIOReturnSuccess; - } - - interruptControllerName = OSDynamicCast(OSSymbol,array->getObject(source)); - if (interruptControllerName == 0) return kIOReturnNoResources; - - interruptController = getPlatform()->lookUpInterruptController(interruptControllerName); - if (interruptController == 0) return kIOReturnNoResources; - - // Get the interrupt numbers from the nub. - array = OSDynamicCast(OSArray, nub->getProperty(gIOInterruptSpecifiersKey)); - if (array == 0) return kIOReturnNoResources; - data = OSDynamicCast(OSData, array->getObject(source)); - if (data == 0) return kIOReturnNoResources; - - // Set the interruptController and interruptSource in the nub's table. - interruptSources = nub->_interruptSources; - interruptSources[source].interruptController = interruptController; - interruptSources[source].vectorData = data; - - return kIOReturnSuccess; -} - -IOReturn IOService::lookupInterrupt(int source, bool resolve, IOInterruptController **interruptController) -{ - IOReturn ret; - - /* Make sure the _interruptSources are set */ - if (_interruptSources == 0) { - ret = resolveInterrupt(this, source); - if (ret != kIOReturnSuccess) return ret; - } - - /* Make sure the local source number is valid */ - if ((source < 0) || (source >= _numInterruptSources)) - return kIOReturnNoInterrupt; - - /* Look up the contoller for the local source */ - *interruptController = _interruptSources[source].interruptController; - - if (*interruptController == NULL) { - if (!resolve) return kIOReturnNoInterrupt; - - /* Try to resolve the interrupt */ - ret = resolveInterrupt(this, source); - if (ret != kIOReturnSuccess) return ret; - - *interruptController = _interruptSources[source].interruptController; - } - - return kIOReturnSuccess; -} - -IOReturn IOService::registerInterrupt(int source, OSObject *target, - IOInterruptAction handler, - void *refCon) -{ - IOInterruptController *interruptController; - IOReturn ret; - - ret = lookupInterrupt(source, true, &interruptController); - if (ret != kIOReturnSuccess) return ret; - - /* Register the source */ - return interruptController->registerInterrupt(this, source, target, - (IOInterruptHandler)handler, - refCon); -} - -static void IOServiceInterruptActionToBlock( OSObject * target, void * refCon, - IOService * nub, int source ) -{ - ((IOInterruptActionBlock)(refCon))(nub, source); -} - -IOReturn IOService::registerInterruptBlock(int source, OSObject *target, - IOInterruptActionBlock handler) -{ - IOReturn ret; - void * block; - - block = Block_copy(handler); - if (!block) return (kIOReturnNoMemory); - - ret = registerInterrupt(source, target, &IOServiceInterruptActionToBlock, block); - if (kIOReturnSuccess != ret) { - Block_release(block); - return (ret); - } - _interruptSourcesPrivate(this)[source].vectorBlock = block; - - return (ret); -} - -IOReturn IOService::unregisterInterrupt(int source) -{ - IOReturn ret; - IOInterruptController *interruptController; - void *block; - - ret = lookupInterrupt(source, false, &interruptController); - if (ret != kIOReturnSuccess) return ret; - - /* Unregister the source */ - block = _interruptSourcesPrivate(this)[source].vectorBlock; - ret = interruptController->unregisterInterrupt(this, source); - if ((kIOReturnSuccess == ret) && (block = _interruptSourcesPrivate(this)[source].vectorBlock)) { - _interruptSourcesPrivate(this)[source].vectorBlock = NULL; - Block_release(block); - } - - return ret; -} - -IOReturn IOService::addInterruptStatistics(IOInterruptAccountingData * statistics, int source) -{ - IOReportLegend * legend = NULL; - IOInterruptAccountingData * oldValue = NULL; - IOInterruptAccountingReporter * newArray = NULL; - char subgroupName[64]; - int newArraySize = 0; - int i = 0; - - if (source < 0) { - return kIOReturnBadArgument; - } - - /* - * We support statistics on a maximum of 256 interrupts per nub; if a nub - * has more than 256 interrupt specifiers associated with it, and tries - * to register a high interrupt index with interrupt accounting, panic. - * Having more than 256 interrupts associated with a single nub is - * probably a sign that something fishy is going on. - */ - if (source > IA_INDEX_MAX) { - panic("addInterruptStatistics called for an excessively large index (%d)", source); - } - - /* - * TODO: This is ugly (wrapping a lock around an allocation). I'm only - * leaving it as is because the likelihood of contention where we are - * actually growing the array is minimal (we would realistically need - * to be starting a driver for the first time, with an IOReporting - * client already in place). Nonetheless, cleanup that can be done - * to adhere to best practices; it'll make the code more complicated, - * unfortunately. - */ - IOLockLock(reserved->interruptStatisticsLock); - - /* - * Lazily allocate the statistics array. - */ - if (!reserved->interruptStatisticsArray) { - reserved->interruptStatisticsArray = IONew(IOInterruptAccountingReporter, 1); - assert(reserved->interruptStatisticsArray); - reserved->interruptStatisticsArrayCount = 1; - bzero(reserved->interruptStatisticsArray, sizeof(*reserved->interruptStatisticsArray)); - } - - if (source >= reserved->interruptStatisticsArrayCount) { - /* - * We're still within the range of supported indices, but we are out - * of space in the current array. Do a nasty realloc (because - * IORealloc isn't a thing) here. We'll double the size with each - * reallocation. - * - * Yes, the "next power of 2" could be more efficient; but this will - * be invoked incredibly rarely. Who cares. - */ - newArraySize = (reserved->interruptStatisticsArrayCount << 1); - - while (newArraySize <= source) - newArraySize = (newArraySize << 1); - newArray = IONew(IOInterruptAccountingReporter, newArraySize); - - assert(newArray); - - /* - * TODO: This even zeroes the memory it is about to overwrite. - * Shameful; fix it. Not particularly high impact, however. - */ - bzero(newArray, newArraySize * sizeof(*newArray)); - memcpy(newArray, reserved->interruptStatisticsArray, reserved->interruptStatisticsArrayCount * sizeof(*newArray)); - IODelete(reserved->interruptStatisticsArray, IOInterruptAccountingReporter, reserved->interruptStatisticsArrayCount); - reserved->interruptStatisticsArray = newArray; - reserved->interruptStatisticsArrayCount = newArraySize; - } - - if (!reserved->interruptStatisticsArray[source].reporter) { - /* - * We don't have a reporter associated with this index yet, so we - * need to create one. - */ - /* - * TODO: Some statistics do in fact have common units (time); should this be - * split into separate reporters to communicate this? - */ - reserved->interruptStatisticsArray[source].reporter = IOSimpleReporter::with(this, kIOReportCategoryPower, kIOReportUnitNone); - - /* - * Each statistic is given an identifier based on the interrupt index (which - * should be unique relative to any single nub) and the statistic involved. - * We should now have a sane (small and positive) index, so start - * constructing the channels for statistics. - */ - for (i = 0; i < IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS; i++) { - /* - * TODO: Currently, this does not add channels for disabled statistics. - * Will this be confusing for clients? If so, we should just add the - * channels; we can avoid updating the channels even if they exist. - */ - if (IA_GET_STATISTIC_ENABLED(i)) - reserved->interruptStatisticsArray[source].reporter->addChannel(IA_GET_CHANNEL_ID(source, i), kInterruptAccountingStatisticNameArray[i]); - } - - /* - * We now need to add the legend for this reporter to the registry. - */ - OSObject * prop = copyProperty(kIOReportLegendKey); - legend = IOReportLegend::with(OSDynamicCast(OSArray, prop)); - OSSafeReleaseNULL(prop); - - /* - * Note that while we compose the subgroup name, we do not need to - * manage its lifecycle (the reporter will handle this). - */ - snprintf(subgroupName, sizeof(subgroupName), "%s %d", getName(), source); - subgroupName[sizeof(subgroupName) - 1] = 0; - legend->addReporterLegend(reserved->interruptStatisticsArray[source].reporter, kInterruptAccountingGroupName, subgroupName); - setProperty(kIOReportLegendKey, legend->getLegend()); - legend->release(); - - /* - * TODO: Is this a good idea? Probably not; my assumption is it opts - * all entities who register interrupts into public disclosure of all - * IOReporting channels. Unfortunately, this appears to be as fine - * grain as it gets. - */ - setProperty(kIOReportLegendPublicKey, true); - } - - /* - * Don't stomp existing entries. If we are about to, panic; this - * probably means we failed to tear down our old interrupt source - * correctly. - */ - oldValue = reserved->interruptStatisticsArray[source].statistics; - - if (oldValue) { - panic("addInterruptStatistics call for index %d would have clobbered existing statistics", source); - } - - reserved->interruptStatisticsArray[source].statistics = statistics; - - /* - * Inherit the reporter values for each statistic. The target may - * be torn down as part of the runtime of the service (especially - * for sleep/wake), so we inherit in order to avoid having values - * reset for no apparent reason. Our statistics are ultimately - * tied to the index and the sevice, not to an individual target, - * so we should maintain them accordingly. - */ - interruptAccountingDataInheritChannels(reserved->interruptStatisticsArray[source].statistics, reserved->interruptStatisticsArray[source].reporter); - - IOLockUnlock(reserved->interruptStatisticsLock); - - return kIOReturnSuccess; -} - -IOReturn IOService::removeInterruptStatistics(int source) -{ - IOInterruptAccountingData * value = NULL; - - if (source < 0) { - return kIOReturnBadArgument; - } - - IOLockLock(reserved->interruptStatisticsLock); - - /* - * We dynamically grow the statistics array, so an excessively - * large index value has NEVER been registered. This either - * means our cap on the array size is too small (unlikely), or - * that we have been passed a corrupt index (this must be passed - * the plain index into the interrupt specifier list). - */ - if (source >= reserved->interruptStatisticsArrayCount) { - panic("removeInterruptStatistics called for index %d, which was never registered", source); - } - - assert(reserved->interruptStatisticsArray); - - /* - * If there is no existing entry, we are most likely trying to - * free an interrupt owner twice, or we have corrupted the - * index value. - */ - value = reserved->interruptStatisticsArray[source].statistics; - - if (!value) { - panic("removeInterruptStatistics called for empty index %d", source); - } - - /* - * We update the statistics, so that any delta with the reporter - * state is not lost. - */ - interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[source].statistics, reserved->interruptStatisticsArray[source].reporter); - reserved->interruptStatisticsArray[source].statistics = NULL; - IOLockUnlock(reserved->interruptStatisticsLock); - - return kIOReturnSuccess; -} - -IOReturn IOService::getInterruptType(int source, int *interruptType) -{ - IOInterruptController *interruptController; - IOReturn ret; - - ret = lookupInterrupt(source, true, &interruptController); - if (ret != kIOReturnSuccess) return ret; - - /* Return the type */ - return interruptController->getInterruptType(this, source, interruptType); -} - -IOReturn IOService::enableInterrupt(int source) -{ - IOInterruptController *interruptController; - IOReturn ret; - - ret = lookupInterrupt(source, false, &interruptController); - if (ret != kIOReturnSuccess) return ret; - - /* Enable the source */ - return interruptController->enableInterrupt(this, source); -} - -IOReturn IOService::disableInterrupt(int source) -{ - IOInterruptController *interruptController; - IOReturn ret; - - ret = lookupInterrupt(source, false, &interruptController); - if (ret != kIOReturnSuccess) return ret; - - /* Disable the source */ - return interruptController->disableInterrupt(this, source); +IOReturn +IOService::resolveInterrupt(IOService *nub, int source) +{ + IOInterruptController *interruptController; + OSArray *array; + OSData *data; + OSSymbol *interruptControllerName; + long numSources; + IOInterruptSource *interruptSources; + + // Get the parents list from the nub. + array = OSDynamicCast(OSArray, nub->getProperty(gIOInterruptControllersKey)); + if (array == 0) { + return kIOReturnNoResources; + } + + // Allocate space for the IOInterruptSources if needed... then return early. + if (nub->_interruptSources == 0) { + numSources = array->getCount(); + interruptSources = (IOInterruptSource *)IOMalloc( + numSources * sizeofAllIOInterruptSource); + if (interruptSources == 0) { + return kIOReturnNoMemory; + } + + bzero(interruptSources, numSources * sizeofAllIOInterruptSource); + + nub->_numInterruptSources = numSources; + nub->_interruptSources = interruptSources; + return kIOReturnSuccess; + } + + interruptControllerName = OSDynamicCast(OSSymbol, array->getObject(source)); + if (interruptControllerName == 0) { + return kIOReturnNoResources; + } + + interruptController = getPlatform()->lookUpInterruptController(interruptControllerName); + if (interruptController == 0) { + return kIOReturnNoResources; + } + + // Get the interrupt numbers from the nub. + array = OSDynamicCast(OSArray, nub->getProperty(gIOInterruptSpecifiersKey)); + if (array == 0) { + return kIOReturnNoResources; + } + data = OSDynamicCast(OSData, array->getObject(source)); + if (data == 0) { + return kIOReturnNoResources; + } + + // Set the interruptController and interruptSource in the nub's table. + interruptSources = nub->_interruptSources; + interruptSources[source].interruptController = interruptController; + interruptSources[source].vectorData = data; + + return kIOReturnSuccess; } -IOReturn IOService::causeInterrupt(int source) +IOReturn +IOService::lookupInterrupt(int source, bool resolve, IOInterruptController **interruptController) { - IOInterruptController *interruptController; - IOReturn ret; - - ret = lookupInterrupt(source, false, &interruptController); - if (ret != kIOReturnSuccess) return ret; - - /* Cause an interrupt for the source */ - return interruptController->causeInterrupt(this, source); + IOReturn ret; + + /* Make sure the _interruptSources are set */ + if (_interruptSources == 0) { + ret = resolveInterrupt(this, source); + if (ret != kIOReturnSuccess) { + return ret; + } + } + + /* Make sure the local source number is valid */ + if ((source < 0) || (source >= _numInterruptSources)) { + return kIOReturnNoInterrupt; + } + + /* Look up the contoller for the local source */ + *interruptController = _interruptSources[source].interruptController; + + if (*interruptController == NULL) { + if (!resolve) { + return kIOReturnNoInterrupt; + } + + /* Try to resolve the interrupt */ + ret = resolveInterrupt(this, source); + if (ret != kIOReturnSuccess) { + return ret; + } + + *interruptController = _interruptSources[source].interruptController; + } + + return kIOReturnSuccess; +} + +IOReturn +IOService::registerInterrupt(int source, OSObject *target, + IOInterruptAction handler, + void *refCon) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, true, &interruptController); + if (ret != kIOReturnSuccess) { + return ret; + } + + /* Register the source */ + return interruptController->registerInterrupt(this, source, target, + (IOInterruptHandler)handler, + refCon); } -IOReturn IOService::configureReport(IOReportChannelList *channelList, - IOReportConfigureAction action, - void *result, - void *destination) +static void +IOServiceInterruptActionToBlock( OSObject * target, void * refCon, + IOService * nub, int source ) { - unsigned cnt; + ((IOInterruptActionBlock)(refCon))(nub, source); +} + +IOReturn +IOService::registerInterruptBlock(int source, OSObject *target, + IOInterruptActionBlock handler) +{ + IOReturn ret; + void * block; + + block = Block_copy(handler); + if (!block) { + return kIOReturnNoMemory; + } + + ret = registerInterrupt(source, target, &IOServiceInterruptActionToBlock, block); + if (kIOReturnSuccess != ret) { + Block_release(block); + return ret; + } + _interruptSourcesPrivate(this)[source].vectorBlock = block; - for (cnt = 0; cnt < channelList->nchannels; cnt++) { - if ( channelList->channels[cnt].channel_id == kPMPowerStatesChID ) { - if (pwrMgt) configurePowerStatesReport(action, result); - else return kIOReturnUnsupported; - } - else if ( channelList->channels[cnt].channel_id == kPMCurrStateChID ) { - if (pwrMgt) configureSimplePowerReport(action, result); - else return kIOReturnUnsupported; - } - } - - IOLockLock(reserved->interruptStatisticsLock); - - /* The array count is signed (because the interrupt indices are signed), hence the cast */ - for (cnt = 0; cnt < (unsigned) reserved->interruptStatisticsArrayCount; cnt++) { - if (reserved->interruptStatisticsArray[cnt].reporter) { - /* - * If the reporter is currently associated with the statistics - * for an event source, we may need to update the reporter. - */ - if (reserved->interruptStatisticsArray[cnt].statistics) - interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[cnt].statistics, reserved->interruptStatisticsArray[cnt].reporter); + return ret; +} - reserved->interruptStatisticsArray[cnt].reporter->configureReport(channelList, action, result, destination); - } - } +IOReturn +IOService::unregisterInterrupt(int source) +{ + IOReturn ret; + IOInterruptController *interruptController; + void *block; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) { + return ret; + } - IOLockUnlock(reserved->interruptStatisticsLock); + /* Unregister the source */ + block = _interruptSourcesPrivate(this)[source].vectorBlock; + ret = interruptController->unregisterInterrupt(this, source); + if ((kIOReturnSuccess == ret) && (block = _interruptSourcesPrivate(this)[source].vectorBlock)) { + _interruptSourcesPrivate(this)[source].vectorBlock = NULL; + Block_release(block); + } - return kIOReturnSuccess; + return ret; } -IOReturn IOService::updateReport(IOReportChannelList *channelList, - IOReportUpdateAction action, - void *result, - void *destination) +IOReturn +IOService::addInterruptStatistics(IOInterruptAccountingData * statistics, int source) { - unsigned cnt; + IOReportLegend * legend = NULL; + IOInterruptAccountingData * oldValue = NULL; + IOInterruptAccountingReporter * newArray = NULL; + char subgroupName[64]; + int newArraySize = 0; + int i = 0; + + if (source < 0) { + return kIOReturnBadArgument; + } + + /* + * We support statistics on a maximum of 256 interrupts per nub; if a nub + * has more than 256 interrupt specifiers associated with it, and tries + * to register a high interrupt index with interrupt accounting, panic. + * Having more than 256 interrupts associated with a single nub is + * probably a sign that something fishy is going on. + */ + if (source > IA_INDEX_MAX) { + panic("addInterruptStatistics called for an excessively large index (%d)", source); + } + + /* + * TODO: This is ugly (wrapping a lock around an allocation). I'm only + * leaving it as is because the likelihood of contention where we are + * actually growing the array is minimal (we would realistically need + * to be starting a driver for the first time, with an IOReporting + * client already in place). Nonetheless, cleanup that can be done + * to adhere to best practices; it'll make the code more complicated, + * unfortunately. + */ + IOLockLock(reserved->interruptStatisticsLock); + + /* + * Lazily allocate the statistics array. + */ + if (!reserved->interruptStatisticsArray) { + reserved->interruptStatisticsArray = IONew(IOInterruptAccountingReporter, 1); + assert(reserved->interruptStatisticsArray); + reserved->interruptStatisticsArrayCount = 1; + bzero(reserved->interruptStatisticsArray, sizeof(*reserved->interruptStatisticsArray)); + } - for (cnt = 0; cnt < channelList->nchannels; cnt++) { - if ( channelList->channels[cnt].channel_id == kPMPowerStatesChID ) { - if (pwrMgt) updatePowerStatesReport(action, result, destination); - else return kIOReturnUnsupported; - } - else if ( channelList->channels[cnt].channel_id == kPMCurrStateChID ) { - if (pwrMgt) updateSimplePowerReport(action, result, destination); - else return kIOReturnUnsupported; - } - } + if (source >= reserved->interruptStatisticsArrayCount) { + /* + * We're still within the range of supported indices, but we are out + * of space in the current array. Do a nasty realloc (because + * IORealloc isn't a thing) here. We'll double the size with each + * reallocation. + * + * Yes, the "next power of 2" could be more efficient; but this will + * be invoked incredibly rarely. Who cares. + */ + newArraySize = (reserved->interruptStatisticsArrayCount << 1); + + while (newArraySize <= source) { + newArraySize = (newArraySize << 1); + } + newArray = IONew(IOInterruptAccountingReporter, newArraySize); + + assert(newArray); + + /* + * TODO: This even zeroes the memory it is about to overwrite. + * Shameful; fix it. Not particularly high impact, however. + */ + bzero(newArray, newArraySize * sizeof(*newArray)); + memcpy(newArray, reserved->interruptStatisticsArray, reserved->interruptStatisticsArrayCount * sizeof(*newArray)); + IODelete(reserved->interruptStatisticsArray, IOInterruptAccountingReporter, reserved->interruptStatisticsArrayCount); + reserved->interruptStatisticsArray = newArray; + reserved->interruptStatisticsArrayCount = newArraySize; + } + + if (!reserved->interruptStatisticsArray[source].reporter) { + /* + * We don't have a reporter associated with this index yet, so we + * need to create one. + */ + /* + * TODO: Some statistics do in fact have common units (time); should this be + * split into separate reporters to communicate this? + */ + reserved->interruptStatisticsArray[source].reporter = IOSimpleReporter::with(this, kIOReportCategoryPower, kIOReportUnitNone); + + /* + * Each statistic is given an identifier based on the interrupt index (which + * should be unique relative to any single nub) and the statistic involved. + * We should now have a sane (small and positive) index, so start + * constructing the channels for statistics. + */ + for (i = 0; i < IA_NUM_INTERRUPT_ACCOUNTING_STATISTICS; i++) { + /* + * TODO: Currently, this does not add channels for disabled statistics. + * Will this be confusing for clients? If so, we should just add the + * channels; we can avoid updating the channels even if they exist. + */ + if (IA_GET_STATISTIC_ENABLED(i)) { + reserved->interruptStatisticsArray[source].reporter->addChannel(IA_GET_CHANNEL_ID(source, i), kInterruptAccountingStatisticNameArray[i]); + } + } + + /* + * We now need to add the legend for this reporter to the registry. + */ + OSObject * prop = copyProperty(kIOReportLegendKey); + legend = IOReportLegend::with(OSDynamicCast(OSArray, prop)); + OSSafeReleaseNULL(prop); + + /* + * Note that while we compose the subgroup name, we do not need to + * manage its lifecycle (the reporter will handle this). + */ + snprintf(subgroupName, sizeof(subgroupName), "%s %d", getName(), source); + subgroupName[sizeof(subgroupName) - 1] = 0; + legend->addReporterLegend(reserved->interruptStatisticsArray[source].reporter, kInterruptAccountingGroupName, subgroupName); + setProperty(kIOReportLegendKey, legend->getLegend()); + legend->release(); + + /* + * TODO: Is this a good idea? Probably not; my assumption is it opts + * all entities who register interrupts into public disclosure of all + * IOReporting channels. Unfortunately, this appears to be as fine + * grain as it gets. + */ + setProperty(kIOReportLegendPublicKey, true); + } - IOLockLock(reserved->interruptStatisticsLock); + /* + * Don't stomp existing entries. If we are about to, panic; this + * probably means we failed to tear down our old interrupt source + * correctly. + */ + oldValue = reserved->interruptStatisticsArray[source].statistics; - /* The array count is signed (because the interrupt indices are signed), hence the cast */ - for (cnt = 0; cnt < (unsigned) reserved->interruptStatisticsArrayCount; cnt++) { - if (reserved->interruptStatisticsArray[cnt].reporter) { - /* - * If the reporter is currently associated with the statistics - * for an event source, we need to update the reporter. - */ - if (reserved->interruptStatisticsArray[cnt].statistics) - interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[cnt].statistics, reserved->interruptStatisticsArray[cnt].reporter); + if (oldValue) { + panic("addInterruptStatistics call for index %d would have clobbered existing statistics", source); + } + + reserved->interruptStatisticsArray[source].statistics = statistics; - reserved->interruptStatisticsArray[cnt].reporter->updateReport(channelList, action, result, destination); - } - } + /* + * Inherit the reporter values for each statistic. The target may + * be torn down as part of the runtime of the service (especially + * for sleep/wake), so we inherit in order to avoid having values + * reset for no apparent reason. Our statistics are ultimately + * tied to the index and the sevice, not to an individual target, + * so we should maintain them accordingly. + */ + interruptAccountingDataInheritChannels(reserved->interruptStatisticsArray[source].statistics, reserved->interruptStatisticsArray[source].reporter); - IOLockUnlock(reserved->interruptStatisticsLock); + IOLockUnlock(reserved->interruptStatisticsLock); - return kIOReturnSuccess; + return kIOReturnSuccess; } -uint64_t IOService::getAuthorizationID( void ) -{ - return reserved->authorizationID; +IOReturn +IOService::removeInterruptStatistics(int source) +{ + IOInterruptAccountingData * value = NULL; + + if (source < 0) { + return kIOReturnBadArgument; + } + + IOLockLock(reserved->interruptStatisticsLock); + + /* + * We dynamically grow the statistics array, so an excessively + * large index value has NEVER been registered. This either + * means our cap on the array size is too small (unlikely), or + * that we have been passed a corrupt index (this must be passed + * the plain index into the interrupt specifier list). + */ + if (source >= reserved->interruptStatisticsArrayCount) { + panic("removeInterruptStatistics called for index %d, which was never registered", source); + } + + assert(reserved->interruptStatisticsArray); + + /* + * If there is no existing entry, we are most likely trying to + * free an interrupt owner twice, or we have corrupted the + * index value. + */ + value = reserved->interruptStatisticsArray[source].statistics; + + if (!value) { + panic("removeInterruptStatistics called for empty index %d", source); + } + + /* + * We update the statistics, so that any delta with the reporter + * state is not lost. + */ + interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[source].statistics, reserved->interruptStatisticsArray[source].reporter); + reserved->interruptStatisticsArray[source].statistics = NULL; + IOLockUnlock(reserved->interruptStatisticsLock); + + return kIOReturnSuccess; } -IOReturn IOService::setAuthorizationID( uint64_t authorizationID ) +IOReturn +IOService::getInterruptType(int source, int *interruptType) { - OSObject * entitlement; - IOReturn status; + IOInterruptController *interruptController; + IOReturn ret; - entitlement = IOUserClient::copyClientEntitlement( current_task( ), "com.apple.private.iokit.IOServiceSetAuthorizationID" ); + ret = lookupInterrupt(source, true, &interruptController); + if (ret != kIOReturnSuccess) { + return ret; + } - if ( entitlement ) - { - if ( entitlement == kOSBooleanTrue ) - { - reserved->authorizationID = authorizationID; + /* Return the type */ + return interruptController->getInterruptType(this, source, interruptType); +} - status = kIOReturnSuccess; - } - else - { - status = kIOReturnNotPrivileged; - } +IOReturn +IOService::enableInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; - entitlement->release( ); - } - else - { - status = kIOReturnNotPrivileged; - } + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) { + return ret; + } + + /* Enable the source */ + return interruptController->enableInterrupt(this, source); +} + +IOReturn +IOService::disableInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) { + return ret; + } + + /* Disable the source */ + return interruptController->disableInterrupt(this, source); +} + +IOReturn +IOService::causeInterrupt(int source) +{ + IOInterruptController *interruptController; + IOReturn ret; + + ret = lookupInterrupt(source, false, &interruptController); + if (ret != kIOReturnSuccess) { + return ret; + } + + /* Cause an interrupt for the source */ + return interruptController->causeInterrupt(this, source); +} + +IOReturn +IOService::configureReport(IOReportChannelList *channelList, + IOReportConfigureAction action, + void *result, + void *destination) +{ + unsigned cnt; + + for (cnt = 0; cnt < channelList->nchannels; cnt++) { + if (channelList->channels[cnt].channel_id == kPMPowerStatesChID) { + if (pwrMgt) { + configurePowerStatesReport(action, result); + } else { + return kIOReturnUnsupported; + } + } else if (channelList->channels[cnt].channel_id == kPMCurrStateChID) { + if (pwrMgt) { + configureSimplePowerReport(action, result); + } else { + return kIOReturnUnsupported; + } + } + } + + IOLockLock(reserved->interruptStatisticsLock); + + /* The array count is signed (because the interrupt indices are signed), hence the cast */ + for (cnt = 0; cnt < (unsigned) reserved->interruptStatisticsArrayCount; cnt++) { + if (reserved->interruptStatisticsArray[cnt].reporter) { + /* + * If the reporter is currently associated with the statistics + * for an event source, we may need to update the reporter. + */ + if (reserved->interruptStatisticsArray[cnt].statistics) { + interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[cnt].statistics, reserved->interruptStatisticsArray[cnt].reporter); + } + + reserved->interruptStatisticsArray[cnt].reporter->configureReport(channelList, action, result, destination); + } + } + + IOLockUnlock(reserved->interruptStatisticsLock); + + return kIOReturnSuccess; +} + +IOReturn +IOService::updateReport(IOReportChannelList *channelList, + IOReportUpdateAction action, + void *result, + void *destination) +{ + unsigned cnt; + + for (cnt = 0; cnt < channelList->nchannels; cnt++) { + if (channelList->channels[cnt].channel_id == kPMPowerStatesChID) { + if (pwrMgt) { + updatePowerStatesReport(action, result, destination); + } else { + return kIOReturnUnsupported; + } + } else if (channelList->channels[cnt].channel_id == kPMCurrStateChID) { + if (pwrMgt) { + updateSimplePowerReport(action, result, destination); + } else { + return kIOReturnUnsupported; + } + } + } + + IOLockLock(reserved->interruptStatisticsLock); + + /* The array count is signed (because the interrupt indices are signed), hence the cast */ + for (cnt = 0; cnt < (unsigned) reserved->interruptStatisticsArrayCount; cnt++) { + if (reserved->interruptStatisticsArray[cnt].reporter) { + /* + * If the reporter is currently associated with the statistics + * for an event source, we need to update the reporter. + */ + if (reserved->interruptStatisticsArray[cnt].statistics) { + interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[cnt].statistics, reserved->interruptStatisticsArray[cnt].reporter); + } + + reserved->interruptStatisticsArray[cnt].reporter->updateReport(channelList, action, result, destination); + } + } + + IOLockUnlock(reserved->interruptStatisticsLock); + + return kIOReturnSuccess; +} + +uint64_t +IOService::getAuthorizationID( void ) +{ + return reserved->authorizationID; +} + +IOReturn +IOService::setAuthorizationID( uint64_t authorizationID ) +{ + OSObject * entitlement; + IOReturn status; + + entitlement = IOUserClient::copyClientEntitlement( current_task(), "com.apple.private.iokit.IOServiceSetAuthorizationID" ); + + if (entitlement) { + if (entitlement == kOSBooleanTrue) { + reserved->authorizationID = authorizationID; + + status = kIOReturnSuccess; + } else { + status = kIOReturnNotPrivileged; + } + + entitlement->release(); + } else { + status = kIOReturnNotPrivileged; + } - return status; + return status; } #if __LP64__ diff --git a/iokit/Kernel/IOServicePM.cpp b/iokit/Kernel/IOServicePM.cpp index e6bf85a01..c4d1c96fd 100644 --- a/iokit/Kernel/IOServicePM.cpp +++ b/iokit/Kernel/IOServicePM.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -62,15 +62,16 @@ static void idle_timer_expired(thread_call_param_t, thread_call_param_t); static void tellKernelClientApplier(OSObject * object, void * arg); static void tellAppClientApplier(OSObject * object, void * arg); -static uint64_t computeTimeDeltaNS( const AbsoluteTime * start ) +static uint64_t +computeTimeDeltaNS( const AbsoluteTime * start ) { - AbsoluteTime now; - uint64_t nsec; + AbsoluteTime now; + uint64_t nsec; - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, start); - absolutetime_to_nanoseconds(now, &nsec); - return nsec; + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, start); + absolutetime_to_nanoseconds(now, &nsec); + return nsec; } #if PM_VARS_SUPPORT @@ -99,14 +100,14 @@ static char gIOSpinDumpDelayType[16]; static uint32_t gIOSpinDumpDelayDuration = 0; static SYSCTL_STRING(_debug, OID_AUTO, swd_kext_name, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOSpinDumpKextName, sizeof(gIOSpinDumpKextName), ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOSpinDumpKextName, sizeof(gIOSpinDumpKextName), ""); static SYSCTL_STRING(_debug, OID_AUTO, swd_delay_type, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOSpinDumpDelayType, sizeof(gIOSpinDumpDelayType), ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOSpinDumpDelayType, sizeof(gIOSpinDumpDelayType), ""); static SYSCTL_INT(_debug, OID_AUTO, swd_delay_duration, - CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, - &gIOSpinDumpDelayDuration, 0, ""); + CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, + &gIOSpinDumpDelayDuration, 0, ""); const OSSymbol * gIOPMPowerClientDevice = 0; const OSSymbol * gIOPMPowerClientDriver = 0; @@ -119,23 +120,25 @@ static bool gIOPMAdvisoryTickleEnabled = true; static thread_t gIOPMWatchDogThread = NULL; uint32_t gCanSleepTimeout = 0; -static uint32_t getPMRequestType( void ) +static uint32_t +getPMRequestType( void ) { - uint32_t type = kIOPMRequestTypeInvalid; - if (gIOPMRequest) - type = gIOPMRequest->getType(); - return type; + uint32_t type = kIOPMRequestTypeInvalid; + if (gIOPMRequest) { + type = gIOPMRequest->getType(); + } + return type; } -static IOPMRequestTag getPMRequestTag( void ) +static IOPMRequestTag +getPMRequestTag( void ) { - IOPMRequestTag tag = 0; - if (gIOPMRequest && - (gIOPMRequest->getType() == kIOPMRequestTypeRequestPowerStateOverride)) - { - tag = gIOPMRequest->fRequestTag; - } - return tag; + IOPMRequestTag tag = 0; + if (gIOPMRequest && + (gIOPMRequest->getType() == kIOPMRequestTypeRequestPowerStateOverride)) { + tag = gIOPMRequest->fRequestTag; + } + return tag; } SYSCTL_UINT(_kern, OID_AUTO, pmtimeout, CTLFLAG_RW | CTLFLAG_LOCKED, &gCanSleepTimeout, 0, "Power Management Timeout"); @@ -145,16 +148,16 @@ SYSCTL_UINT(_kern, OID_AUTO, pmtimeout, CTLFLAG_RW | CTLFLAG_LOCKED, &gCanSleepT //****************************************************************************** #define PM_ERROR(x...) do { kprintf(x);IOLog(x); \ - } while (false) + } while (false) #define PM_LOG(x...) do { kprintf(x); } while (false) #define PM_LOG1(x...) do { \ - if (kIOLogDebugPower & gIOKitDebug) \ - kprintf(x); } while (false) + if (kIOLogDebugPower & gIOKitDebug) \ + kprintf(x); } while (false) #define PM_LOG2(x...) do { \ - if (kIOLogDebugPower & gIOKitDebug) \ - kprintf(x); } while (false) + if (kIOLogDebugPower & gIOKitDebug) \ + kprintf(x); } while (false) #if 0 #define PM_LOG3(x...) do { kprintf(x); } while (false) @@ -163,10 +166,10 @@ SYSCTL_UINT(_kern, OID_AUTO, pmtimeout, CTLFLAG_RW | CTLFLAG_LOCKED, &gCanSleepT #endif #define RD_LOG(x...) do { \ - if ((kIOLogPMRootDomain & gIOKitDebug) && \ - (getPMRootDomain() == this)) { \ - kprintf("PMRD: " x); \ - }} while (false) + if ((kIOLogPMRootDomain & gIOKitDebug) && \ + (getPMRootDomain() == this)) { \ + kprintf("PMRD: " x); \ + }} while (false) #define PM_ASSERT_IN_GATE(x) \ do { \ assert(gIOPMWorkLoop->inGate()); \ @@ -194,28 +197,28 @@ do { \ #define OUR_PMLog(t, a, b) do { \ if (pwrMgt) { \ - if (gIOKitDebug & kIOLogPower) \ - pwrMgt->pmPrint(t, a, b); \ - if (gIOKitTrace & kIOTracePowerMgmt) \ - pwrMgt->pmTrace(t, DBG_FUNC_NONE, a, b); \ + if (gIOKitDebug & kIOLogPower) \ + pwrMgt->pmPrint(t, a, b); \ + if (gIOKitTrace & kIOTracePowerMgmt) \ + pwrMgt->pmTrace(t, DBG_FUNC_NONE, a, b); \ } \ } while(0) #define OUR_PMLogFuncStart(t, a, b) do { \ if (pwrMgt) { \ - if (gIOKitDebug & kIOLogPower) \ - pwrMgt->pmPrint(t, a, b); \ - if (gIOKitTrace & kIOTracePowerMgmt) \ - pwrMgt->pmTrace(t, DBG_FUNC_START, a, b); \ + if (gIOKitDebug & kIOLogPower) \ + pwrMgt->pmPrint(t, a, b); \ + if (gIOKitTrace & kIOTracePowerMgmt) \ + pwrMgt->pmTrace(t, DBG_FUNC_START, a, b); \ } \ } while(0) #define OUR_PMLogFuncEnd(t, a, b) do { \ if (pwrMgt) { \ - if (gIOKitDebug & kIOLogPower) \ - pwrMgt->pmPrint(-t, a, b); \ - if (gIOKitTrace & kIOTracePowerMgmt) \ - pwrMgt->pmTrace(t, DBG_FUNC_END, a, b); \ + if (gIOKitDebug & kIOLogPower) \ + pwrMgt->pmPrint(-t, a, b); \ + if (gIOKitTrace & kIOTracePowerMgmt) \ + pwrMgt->pmTrace(t, DBG_FUNC_END, a, b); \ } \ } while(0) @@ -249,43 +252,43 @@ do { \ #define LOG_KEXT_RESPONSE_TIMES (100ULL * 1000ULL * 1000ULL) enum { - kReserveDomainPower = 1 + kReserveDomainPower = 1 }; #define MS_PUSH(n) \ do { assert(kIOPM_BadMachineState == fSavedMachineState); \ - assert(kIOPM_BadMachineState != n); \ - fSavedMachineState = n; } while (false) + assert(kIOPM_BadMachineState != n); \ + fSavedMachineState = n; } while (false) #define MS_POP() \ do { assert(kIOPM_BadMachineState != fSavedMachineState); \ - fMachineState = fSavedMachineState; \ - fSavedMachineState = kIOPM_BadMachineState; } while (false) + fMachineState = fSavedMachineState; \ + fSavedMachineState = kIOPM_BadMachineState; } while (false) #define PM_ACTION_0(a) \ do { if (fPMActions.a) { \ - (fPMActions.a)(fPMActions.target, this, &fPMActions); } \ - } while (false) + (fPMActions.a)(fPMActions.target, this, &fPMActions); } \ + } while (false) #define PM_ACTION_2(a, x, y) \ do { if (fPMActions.a) { \ - (fPMActions.a)(fPMActions.target, this, &fPMActions, x, y, \ - getPMRequestTag()); } \ - } while (false) + (fPMActions.a)(fPMActions.target, this, &fPMActions, x, y, \ + getPMRequestTag()); } \ + } while (false) #define PM_ACTION_3(a, x, y, z) \ do { if (fPMActions.a) { \ - (fPMActions.a)(fPMActions.target, this, &fPMActions, x, y, z); } \ - } while (false) + (fPMActions.a)(fPMActions.target, this, &fPMActions, x, y, z); } \ + } while (false) static OSNumber * copyClientIDForNotification( - OSObject *object, - IOPMInterestContext *context); + OSObject *object, + IOPMInterestContext *context); static void logClientIDForNotification( - OSObject *object, - IOPMInterestContext *context, - const char *logString); + OSObject *object, + IOPMInterestContext *context, + const char *logString); //********************************************************************************* // PM machine states @@ -294,39 +297,39 @@ static void logClientIDForNotification( //********************************************************************************* enum { - kIOPM_Finished = 0, - - kIOPM_OurChangeTellClientsPowerDown = 1, - kIOPM_OurChangeTellUserPMPolicyPowerDown = 2, - kIOPM_OurChangeTellPriorityClientsPowerDown = 3, - kIOPM_OurChangeNotifyInterestedDriversWillChange = 4, - kIOPM_OurChangeSetPowerState = 5, - kIOPM_OurChangeWaitForPowerSettle = 6, - kIOPM_OurChangeNotifyInterestedDriversDidChange = 7, - kIOPM_OurChangeTellCapabilityDidChange = 8, - kIOPM_OurChangeFinish = 9, - - kIOPM_ParentChangeTellPriorityClientsPowerDown = 10, - kIOPM_ParentChangeNotifyInterestedDriversWillChange = 11, - kIOPM_ParentChangeSetPowerState = 12, - kIOPM_ParentChangeWaitForPowerSettle = 13, - kIOPM_ParentChangeNotifyInterestedDriversDidChange = 14, - kIOPM_ParentChangeTellCapabilityDidChange = 15, - kIOPM_ParentChangeAcknowledgePowerChange = 16, - - kIOPM_NotifyChildrenStart = 17, - kIOPM_NotifyChildrenOrdered = 18, - kIOPM_NotifyChildrenDelayed = 19, - kIOPM_SyncTellClientsPowerDown = 20, - kIOPM_SyncTellPriorityClientsPowerDown = 21, - kIOPM_SyncNotifyWillChange = 22, - kIOPM_SyncNotifyDidChange = 23, - kIOPM_SyncTellCapabilityDidChange = 24, - kIOPM_SyncFinish = 25, - kIOPM_TellCapabilityChangeDone = 26, - kIOPM_DriverThreadCallDone = 27, - - kIOPM_BadMachineState = 0xFFFFFFFF + kIOPM_Finished = 0, + + kIOPM_OurChangeTellClientsPowerDown = 1, + kIOPM_OurChangeTellUserPMPolicyPowerDown = 2, + kIOPM_OurChangeTellPriorityClientsPowerDown = 3, + kIOPM_OurChangeNotifyInterestedDriversWillChange = 4, + kIOPM_OurChangeSetPowerState = 5, + kIOPM_OurChangeWaitForPowerSettle = 6, + kIOPM_OurChangeNotifyInterestedDriversDidChange = 7, + kIOPM_OurChangeTellCapabilityDidChange = 8, + kIOPM_OurChangeFinish = 9, + + kIOPM_ParentChangeTellPriorityClientsPowerDown = 10, + kIOPM_ParentChangeNotifyInterestedDriversWillChange = 11, + kIOPM_ParentChangeSetPowerState = 12, + kIOPM_ParentChangeWaitForPowerSettle = 13, + kIOPM_ParentChangeNotifyInterestedDriversDidChange = 14, + kIOPM_ParentChangeTellCapabilityDidChange = 15, + kIOPM_ParentChangeAcknowledgePowerChange = 16, + + kIOPM_NotifyChildrenStart = 17, + kIOPM_NotifyChildrenOrdered = 18, + kIOPM_NotifyChildrenDelayed = 19, + kIOPM_SyncTellClientsPowerDown = 20, + kIOPM_SyncTellPriorityClientsPowerDown = 21, + kIOPM_SyncNotifyWillChange = 22, + kIOPM_SyncNotifyDidChange = 23, + kIOPM_SyncTellCapabilityDidChange = 24, + kIOPM_SyncFinish = 25, + kIOPM_TellCapabilityChangeDone = 26, + kIOPM_DriverThreadCallDone = 27, + + kIOPM_BadMachineState = 0xFFFFFFFF }; //********************************************************************************* @@ -335,190 +338,179 @@ enum { // Initialize power management. //********************************************************************************* -void IOService::PMinit( void ) -{ - if ( !initialized ) - { - if ( !gIOPMInitialized ) - { - gPlatform = getPlatform(); - gIOPMWorkLoop = IOWorkLoop::workLoop(); - if (gIOPMWorkLoop) - { - gIOPMRequestQueue = IOPMRequestQueue::create( - this, OSMemberFunctionCast(IOPMRequestQueue::Action, - this, &IOService::actionPMRequestQueue)); - - gIOPMReplyQueue = IOPMRequestQueue::create( - this, OSMemberFunctionCast(IOPMRequestQueue::Action, - this, &IOService::actionPMReplyQueue)); - - gIOPMWorkQueue = IOPMWorkQueue::create(this, - OSMemberFunctionCast(IOPMWorkQueue::Action, this, - &IOService::actionPMWorkQueueInvoke), - OSMemberFunctionCast(IOPMWorkQueue::Action, this, - &IOService::actionPMWorkQueueRetire)); - - gIOPMCompletionQueue = IOPMCompletionQueue::create( - this, OSMemberFunctionCast(IOPMCompletionQueue::Action, - this, &IOService::actionPMCompletionQueue)); - - if (gIOPMWorkLoop->addEventSource(gIOPMRequestQueue) != - kIOReturnSuccess) - { - gIOPMRequestQueue->release(); - gIOPMRequestQueue = 0; - } - - if (gIOPMWorkLoop->addEventSource(gIOPMReplyQueue) != - kIOReturnSuccess) - { - gIOPMReplyQueue->release(); - gIOPMReplyQueue = 0; - } - - if (gIOPMWorkLoop->addEventSource(gIOPMWorkQueue) != - kIOReturnSuccess) - { - gIOPMWorkQueue->release(); - gIOPMWorkQueue = 0; - } - - // Must be added after the work queue, which pushes request - // to the completion queue without signaling the work loop. - if (gIOPMWorkLoop->addEventSource(gIOPMCompletionQueue) != - kIOReturnSuccess) - { - gIOPMCompletionQueue->release(); - gIOPMCompletionQueue = 0; - } - - gIOPMPowerClientDevice = - OSSymbol::withCStringNoCopy( "DevicePowerState" ); - - gIOPMPowerClientDriver = - OSSymbol::withCStringNoCopy( "DriverPowerState" ); - - gIOPMPowerClientChildProxy = - OSSymbol::withCStringNoCopy( "ChildProxyPowerState" ); - - gIOPMPowerClientChildren = - OSSymbol::withCStringNoCopy( "ChildrenPowerState" ); - - gIOPMPowerClientAdvisoryTickle = - OSSymbol::withCStringNoCopy( "AdvisoryTicklePowerState" ); - - gIOPMPowerClientRootDomain = - OSSymbol::withCStringNoCopy( "RootDomainPower" ); - - gIOSpinDumpKextName[0] = '\0'; - gIOSpinDumpDelayType[0] = '\0'; - } - - if (gIOPMRequestQueue && gIOPMReplyQueue && gIOPMCompletionQueue) - gIOPMInitialized = true; - } - if (!gIOPMInitialized) - return; - - pwrMgt = new IOServicePM; - pwrMgt->init(); - setProperty(kPwrMgtKey, pwrMgt); - - queue_init(&pwrMgt->WorkChain); - queue_init(&pwrMgt->RequestHead); - queue_init(&pwrMgt->PMDriverCallQueue); - - fOwner = this; - fPMLock = IOLockAlloc(); - fInterestedDrivers = new IOPMinformeeList; - fInterestedDrivers->initialize(); - fDesiredPowerState = kPowerStateZero; - fDeviceDesire = kPowerStateZero; - fInitialPowerChange = true; - fInitialSetPowerState = true; - fPreviousRequestPowerFlags = 0; - fDeviceOverrideEnabled = false; - fMachineState = kIOPM_Finished; - fSavedMachineState = kIOPM_BadMachineState; - fIdleTimerMinPowerState = kPowerStateZero; - fActivityLock = IOLockAlloc(); - fStrictTreeOrder = false; - fActivityTicklePowerState = kInvalidTicklePowerState; - fAdvisoryTicklePowerState = kInvalidTicklePowerState; - fControllingDriver = NULL; - fPowerStates = NULL; - fNumberOfPowerStates = 0; - fCurrentPowerState = kPowerStateZero; - fParentsCurrentPowerFlags = 0; - fMaxPowerState = kPowerStateZero; - fName = getName(); - fParentsKnowState = false; - fSerialNumber = 0; - fResponseArray = NULL; - fNotifyClientArray = NULL; - fCurrentPowerConsumption = kIOPMUnknown; - fOverrideMaxPowerState = kIOPMPowerStateMax; - - if (!gIOPMRootNode && (getParentEntry(gIOPowerPlane) == getRegistryRoot())) - { - gIOPMRootNode = this; - fParentsKnowState = true; - } - else if (getProperty(kIOPMResetPowerStateOnWakeKey) == kOSBooleanTrue) - { - fResetPowerStateOnWake = true; - } - - if (IS_ROOT_DOMAIN) - { - fWatchdogTimer = thread_call_allocate( - &IOService::watchdog_timer_expired, (thread_call_param_t)this); - fWatchdogLock = IOLockAlloc(); - - fBlockedArray = OSArray::withCapacity(4); - } - - fAckTimer = thread_call_allocate( - &IOService::ack_timer_expired, (thread_call_param_t)this); - fSettleTimer = thread_call_allocate( - &settle_timer_expired, (thread_call_param_t)this); - fIdleTimer = thread_call_allocate( - &idle_timer_expired, (thread_call_param_t)this); - fDriverCallEntry = thread_call_allocate( - (thread_call_func_t) &IOService::pmDriverCallout, this); - assert(fDriverCallEntry); - if (kIOKextSpinDump & gIOKitDebug) - { - fSpinDumpTimer = thread_call_allocate( - &IOService::spindump_timer_expired, (thread_call_param_t)this); - } - - // Check for powerChangeDone override. - if (OSMemberFunctionCast(void (*)(void), - getResourceService(), &IOService::powerChangeDone) != - OSMemberFunctionCast(void (*)(void), - this, &IOService::powerChangeDone)) - { - fPCDFunctionOverride = true; - } +void +IOService::PMinit( void ) +{ + if (!initialized) { + if (!gIOPMInitialized) { + gPlatform = getPlatform(); + gIOPMWorkLoop = IOWorkLoop::workLoop(); + if (gIOPMWorkLoop) { + gIOPMRequestQueue = IOPMRequestQueue::create( + this, OSMemberFunctionCast(IOPMRequestQueue::Action, + this, &IOService::actionPMRequestQueue)); + + gIOPMReplyQueue = IOPMRequestQueue::create( + this, OSMemberFunctionCast(IOPMRequestQueue::Action, + this, &IOService::actionPMReplyQueue)); + + gIOPMWorkQueue = IOPMWorkQueue::create(this, + OSMemberFunctionCast(IOPMWorkQueue::Action, this, + &IOService::actionPMWorkQueueInvoke), + OSMemberFunctionCast(IOPMWorkQueue::Action, this, + &IOService::actionPMWorkQueueRetire)); + + gIOPMCompletionQueue = IOPMCompletionQueue::create( + this, OSMemberFunctionCast(IOPMCompletionQueue::Action, + this, &IOService::actionPMCompletionQueue)); + + if (gIOPMWorkLoop->addEventSource(gIOPMRequestQueue) != + kIOReturnSuccess) { + gIOPMRequestQueue->release(); + gIOPMRequestQueue = 0; + } + + if (gIOPMWorkLoop->addEventSource(gIOPMReplyQueue) != + kIOReturnSuccess) { + gIOPMReplyQueue->release(); + gIOPMReplyQueue = 0; + } + + if (gIOPMWorkLoop->addEventSource(gIOPMWorkQueue) != + kIOReturnSuccess) { + gIOPMWorkQueue->release(); + gIOPMWorkQueue = 0; + } + + // Must be added after the work queue, which pushes request + // to the completion queue without signaling the work loop. + if (gIOPMWorkLoop->addEventSource(gIOPMCompletionQueue) != + kIOReturnSuccess) { + gIOPMCompletionQueue->release(); + gIOPMCompletionQueue = 0; + } + + gIOPMPowerClientDevice = + OSSymbol::withCStringNoCopy( "DevicePowerState" ); + + gIOPMPowerClientDriver = + OSSymbol::withCStringNoCopy( "DriverPowerState" ); + + gIOPMPowerClientChildProxy = + OSSymbol::withCStringNoCopy( "ChildProxyPowerState" ); + + gIOPMPowerClientChildren = + OSSymbol::withCStringNoCopy( "ChildrenPowerState" ); + + gIOPMPowerClientAdvisoryTickle = + OSSymbol::withCStringNoCopy( "AdvisoryTicklePowerState" ); + + gIOPMPowerClientRootDomain = + OSSymbol::withCStringNoCopy( "RootDomainPower" ); + + gIOSpinDumpKextName[0] = '\0'; + gIOSpinDumpDelayType[0] = '\0'; + } + + if (gIOPMRequestQueue && gIOPMReplyQueue && gIOPMCompletionQueue) { + gIOPMInitialized = true; + } + } + if (!gIOPMInitialized) { + return; + } + + pwrMgt = new IOServicePM; + pwrMgt->init(); + setProperty(kPwrMgtKey, pwrMgt); + + queue_init(&pwrMgt->WorkChain); + queue_init(&pwrMgt->RequestHead); + queue_init(&pwrMgt->PMDriverCallQueue); + + fOwner = this; + fPMLock = IOLockAlloc(); + fInterestedDrivers = new IOPMinformeeList; + fInterestedDrivers->initialize(); + fDesiredPowerState = kPowerStateZero; + fDeviceDesire = kPowerStateZero; + fInitialPowerChange = true; + fInitialSetPowerState = true; + fPreviousRequestPowerFlags = 0; + fDeviceOverrideEnabled = false; + fMachineState = kIOPM_Finished; + fSavedMachineState = kIOPM_BadMachineState; + fIdleTimerMinPowerState = kPowerStateZero; + fActivityLock = IOLockAlloc(); + fStrictTreeOrder = false; + fActivityTicklePowerState = kInvalidTicklePowerState; + fAdvisoryTicklePowerState = kInvalidTicklePowerState; + fControllingDriver = NULL; + fPowerStates = NULL; + fNumberOfPowerStates = 0; + fCurrentPowerState = kPowerStateZero; + fParentsCurrentPowerFlags = 0; + fMaxPowerState = kPowerStateZero; + fName = getName(); + fParentsKnowState = false; + fSerialNumber = 0; + fResponseArray = NULL; + fNotifyClientArray = NULL; + fCurrentPowerConsumption = kIOPMUnknown; + fOverrideMaxPowerState = kIOPMPowerStateMax; + + if (!gIOPMRootNode && (getParentEntry(gIOPowerPlane) == getRegistryRoot())) { + gIOPMRootNode = this; + fParentsKnowState = true; + } else if (getProperty(kIOPMResetPowerStateOnWakeKey) == kOSBooleanTrue) { + fResetPowerStateOnWake = true; + } + + if (IS_ROOT_DOMAIN) { + fWatchdogTimer = thread_call_allocate( + &IOService::watchdog_timer_expired, (thread_call_param_t)this); + fWatchdogLock = IOLockAlloc(); + + fBlockedArray = OSArray::withCapacity(4); + } + + fAckTimer = thread_call_allocate( + &IOService::ack_timer_expired, (thread_call_param_t)this); + fSettleTimer = thread_call_allocate( + &settle_timer_expired, (thread_call_param_t)this); + fIdleTimer = thread_call_allocate( + &idle_timer_expired, (thread_call_param_t)this); + fDriverCallEntry = thread_call_allocate( + (thread_call_func_t) &IOService::pmDriverCallout, this); + assert(fDriverCallEntry); + if (kIOKextSpinDump & gIOKitDebug) { + fSpinDumpTimer = thread_call_allocate( + &IOService::spindump_timer_expired, (thread_call_param_t)this); + } + + // Check for powerChangeDone override. + if (OSMemberFunctionCast(void (*)(void), + getResourceService(), &IOService::powerChangeDone) != + OSMemberFunctionCast(void (*)(void), + this, &IOService::powerChangeDone)) { + fPCDFunctionOverride = true; + } #if PM_VARS_SUPPORT - IOPMprot * prot = new IOPMprot; - if (prot) - { - prot->init(); - prot->ourName = fName; - prot->thePlatform = gPlatform; - fPMVars = prot; - pm_vars = prot; - } + IOPMprot * prot = new IOPMprot; + if (prot) { + prot->init(); + prot->ourName = fName; + prot->thePlatform = gPlatform; + fPMVars = prot; + pm_vars = prot; + } #else - pm_vars = (void *) (uintptr_t) true; + pm_vars = (void *) (uintptr_t) true; #endif - initialized = true; - } + initialized = true; + } } //********************************************************************************* @@ -527,111 +519,111 @@ void IOService::PMinit( void ) // Free the data created by PMinit. Only called from IOService::free(). //********************************************************************************* -void IOService::PMfree( void ) -{ - initialized = false; - pm_vars = 0; - - if ( pwrMgt ) - { - assert(fMachineState == kIOPM_Finished); - assert(fInsertInterestSet == NULL); - assert(fRemoveInterestSet == NULL); - assert(fNotifyChildArray == NULL); - assert(queue_empty(&pwrMgt->RequestHead)); - assert(queue_empty(&fPMDriverCallQueue)); - - if (fWatchdogTimer) { - thread_call_cancel(fWatchdogTimer); - thread_call_free(fWatchdogTimer); - fWatchdogTimer = NULL; - } - - if (fWatchdogLock) { - IOLockFree(fWatchdogLock); - fWatchdogLock = NULL; - } - - if (fBlockedArray) { - fBlockedArray->release(); - fBlockedArray = NULL; - } - - if ( fSettleTimer ) { - thread_call_cancel(fSettleTimer); - thread_call_free(fSettleTimer); - fSettleTimer = NULL; - } - if ( fAckTimer ) { - thread_call_cancel(fAckTimer); - thread_call_free(fAckTimer); - fAckTimer = NULL; - } - if ( fIdleTimer ) { - thread_call_cancel(fIdleTimer); - thread_call_free(fIdleTimer); - fIdleTimer = NULL; - } - if ( fDriverCallEntry ) { - thread_call_free(fDriverCallEntry); - fDriverCallEntry = NULL; - } - if ( fSpinDumpTimer ) { - thread_call_cancel(fSpinDumpTimer); - thread_call_free(fSpinDumpTimer); - fSpinDumpTimer = NULL; - } - if ( fPMLock ) { - IOLockFree(fPMLock); - fPMLock = NULL; - } - if ( fActivityLock ) { - IOLockFree(fActivityLock); - fActivityLock = NULL; - } - if ( fInterestedDrivers ) { - fInterestedDrivers->release(); - fInterestedDrivers = NULL; - } - if (fDriverCallParamSlots && fDriverCallParamPtr) { - IODelete(fDriverCallParamPtr, DriverCallParam, fDriverCallParamSlots); - fDriverCallParamPtr = 0; - fDriverCallParamSlots = 0; - } - if ( fResponseArray ) { - fResponseArray->release(); - fResponseArray = NULL; - } - if ( fNotifyClientArray ) { - fNotifyClientArray->release(); - fNotifyClientArray = NULL; - } - if (fPowerStates && fNumberOfPowerStates) { - IODelete(fPowerStates, IOPMPSEntry, fNumberOfPowerStates); - fNumberOfPowerStates = 0; - fPowerStates = NULL; - } - if (fPowerClients) { - fPowerClients->release(); - fPowerClients = 0; - } +void +IOService::PMfree( void ) +{ + initialized = false; + pm_vars = 0; + + if (pwrMgt) { + assert(fMachineState == kIOPM_Finished); + assert(fInsertInterestSet == NULL); + assert(fRemoveInterestSet == NULL); + assert(fNotifyChildArray == NULL); + assert(queue_empty(&pwrMgt->RequestHead)); + assert(queue_empty(&fPMDriverCallQueue)); + + if (fWatchdogTimer) { + thread_call_cancel(fWatchdogTimer); + thread_call_free(fWatchdogTimer); + fWatchdogTimer = NULL; + } + + if (fWatchdogLock) { + IOLockFree(fWatchdogLock); + fWatchdogLock = NULL; + } + + if (fBlockedArray) { + fBlockedArray->release(); + fBlockedArray = NULL; + } + + if (fSettleTimer) { + thread_call_cancel(fSettleTimer); + thread_call_free(fSettleTimer); + fSettleTimer = NULL; + } + if (fAckTimer) { + thread_call_cancel(fAckTimer); + thread_call_free(fAckTimer); + fAckTimer = NULL; + } + if (fIdleTimer) { + thread_call_cancel(fIdleTimer); + thread_call_free(fIdleTimer); + fIdleTimer = NULL; + } + if (fDriverCallEntry) { + thread_call_free(fDriverCallEntry); + fDriverCallEntry = NULL; + } + if (fSpinDumpTimer) { + thread_call_cancel(fSpinDumpTimer); + thread_call_free(fSpinDumpTimer); + fSpinDumpTimer = NULL; + } + if (fPMLock) { + IOLockFree(fPMLock); + fPMLock = NULL; + } + if (fActivityLock) { + IOLockFree(fActivityLock); + fActivityLock = NULL; + } + if (fInterestedDrivers) { + fInterestedDrivers->release(); + fInterestedDrivers = NULL; + } + if (fDriverCallParamSlots && fDriverCallParamPtr) { + IODelete(fDriverCallParamPtr, DriverCallParam, fDriverCallParamSlots); + fDriverCallParamPtr = 0; + fDriverCallParamSlots = 0; + } + if (fResponseArray) { + fResponseArray->release(); + fResponseArray = NULL; + } + if (fNotifyClientArray) { + fNotifyClientArray->release(); + fNotifyClientArray = NULL; + } + if (fPowerStates && fNumberOfPowerStates) { + IODelete(fPowerStates, IOPMPSEntry, fNumberOfPowerStates); + fNumberOfPowerStates = 0; + fPowerStates = NULL; + } + if (fPowerClients) { + fPowerClients->release(); + fPowerClients = 0; + } #if PM_VARS_SUPPORT - if (fPMVars) - { - fPMVars->release(); - fPMVars = 0; - } + if (fPMVars) { + fPMVars->release(); + fPMVars = 0; + } #endif - pwrMgt->release(); - pwrMgt = 0; - } + pwrMgt->release(); + pwrMgt = 0; + } } -void IOService::PMDebug( uint32_t event, uintptr_t param1, uintptr_t param2 ) +void +IOService::PMDebug( uint32_t event, uintptr_t param1, uintptr_t param2 ) { - OUR_PMLog(event, param1, param2); + OUR_PMLog(event, param1, param2); } //********************************************************************************* @@ -647,13 +639,14 @@ void IOService::PMDebug( uint32_t event, uintptr_t param1, uintptr_t param2 ) // meaning it may not be initialized for power management. //********************************************************************************* -void IOService::joinPMtree( IOService * driver ) +void +IOService::joinPMtree( IOService * driver ) { - IOPlatformExpert * platform; + IOPlatformExpert * platform; - platform = getPlatform(); - assert(platform != 0); - platform->PMRegisterDevice(this, driver); + platform = getPlatform(); + assert(platform != 0); + platform->PMRegisterDevice(this, driver); } #ifndef __LP64__ @@ -663,9 +656,10 @@ void IOService::joinPMtree( IOService * driver ) // Power Managment is informing us that we are the root power domain. //********************************************************************************* -IOReturn IOService::youAreRoot( void ) +IOReturn +IOService::youAreRoot( void ) { - return IOPMNoErr; + return IOPMNoErr; } #endif /* !__LP64__ */ @@ -676,37 +670,37 @@ IOReturn IOService::youAreRoot( void ) // from power plane. //********************************************************************************* -void IOService::PMstop( void ) +void +IOService::PMstop( void ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return; + if (!initialized) { + return; + } - PM_LOCK(); + PM_LOCK(); - if (fLockedFlags.PMStop) - { - PM_LOG2("%s: PMstop() already stopped\n", fName); - PM_UNLOCK(); - return; - } + if (fLockedFlags.PMStop) { + PM_LOG2("%s: PMstop() already stopped\n", fName); + PM_UNLOCK(); + return; + } - // Inhibit future driver calls. - fLockedFlags.PMStop = true; + // Inhibit future driver calls. + fLockedFlags.PMStop = true; - // Wait for all prior driver calls to finish. - waitForPMDriverCall(); + // Wait for all prior driver calls to finish. + waitForPMDriverCall(); - PM_UNLOCK(); + PM_UNLOCK(); - // The rest of the work is performed async. - request = acquirePMRequest( this, kIOPMRequestTypePMStop ); - if (request) - { - PM_LOG2("%s: %p PMstop\n", getName(), OBFUSCATE(this)); - submitPMRequest( request ); - } + // The rest of the work is performed async. + request = acquirePMRequest( this, kIOPMRequestTypePMStop ); + if (request) { + PM_LOG2("%s: %p PMstop\n", getName(), OBFUSCATE(this)); + submitPMRequest( request ); + } } //********************************************************************************* @@ -715,100 +709,92 @@ void IOService::PMstop( void ) // Disconnect the node from all parents and children in the power plane. //********************************************************************************* -void IOService::handlePMstop( IOPMRequest * request ) -{ - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - IOService * theChild; - IOService * theParent; - - PM_ASSERT_IN_GATE(); - PM_LOG2("%s: %p %s start\n", getName(), OBFUSCATE(this), __FUNCTION__); - - // remove driver from prevent system sleep lists - getPMRootDomain()->updatePreventIdleSleepList(this, false); - getPMRootDomain()->updatePreventSystemSleepList(this, false); - - // remove the property - removeProperty(kPwrMgtKey); - - // detach parents - iter = getParentIterator(gIOPowerPlane); - if ( iter ) - { - while ( (next = iter->getNextObject()) ) - { - if ( (connection = OSDynamicCast(IOPowerConnection, next)) ) - { - theParent = (IOService *)connection->copyParentEntry(gIOPowerPlane); - if ( theParent ) - { - theParent->removePowerChild(connection); - theParent->release(); - } - } - } - iter->release(); - } - - // detach IOConnections - detachAbove( gIOPowerPlane ); - - // no more power state changes - fParentsKnowState = false; - - // detach children - iter = getChildIterator(gIOPowerPlane); - if ( iter ) - { - while ( (next = iter->getNextObject()) ) - { - if ( (connection = OSDynamicCast(IOPowerConnection, next)) ) - { - theChild = ((IOService *)(connection->copyChildEntry(gIOPowerPlane))); - if ( theChild ) - { - // detach nub from child - connection->detachFromChild(theChild, gIOPowerPlane); - theChild->release(); - } - // detach us from nub - detachFromChild(connection, gIOPowerPlane); - } - } - iter->release(); - } - - // Remove all interested drivers from the list, including the power - // controlling driver. - // - // Usually, the controlling driver and the policy-maker functionality - // are implemented by the same object, and without the deregistration, - // the object will be holding an extra retain on itself, and cannot - // be freed. - - if ( fInterestedDrivers ) - { - IOPMinformeeList * list = fInterestedDrivers; - IOPMinformee * item; - - PM_LOCK(); - while ((item = list->firstInList())) - { - list->removeFromList(item->whatObject); - } - PM_UNLOCK(); - } - - // Clear idle period to prevent idleTimerExpired() from servicing - // idle timer expirations. - - fIdleTimerPeriod = 0; - if (fIdleTimer && thread_call_cancel(fIdleTimer)) - release(); - - PM_LOG2("%s: %p %s done\n", getName(), OBFUSCATE(this), __FUNCTION__); +void +IOService::handlePMstop( IOPMRequest * request ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + IOService * theChild; + IOService * theParent; + + PM_ASSERT_IN_GATE(); + PM_LOG2("%s: %p %s start\n", getName(), OBFUSCATE(this), __FUNCTION__); + + // remove driver from prevent system sleep lists + getPMRootDomain()->updatePreventIdleSleepList(this, false); + getPMRootDomain()->updatePreventSystemSleepList(this, false); + + // remove the property + removeProperty(kPwrMgtKey); + + // detach parents + iter = getParentIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((connection = OSDynamicCast(IOPowerConnection, next))) { + theParent = (IOService *)connection->copyParentEntry(gIOPowerPlane); + if (theParent) { + theParent->removePowerChild(connection); + theParent->release(); + } + } + } + iter->release(); + } + + // detach IOConnections + detachAbove( gIOPowerPlane ); + + // no more power state changes + fParentsKnowState = false; + + // detach children + iter = getChildIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((connection = OSDynamicCast(IOPowerConnection, next))) { + theChild = ((IOService *)(connection->copyChildEntry(gIOPowerPlane))); + if (theChild) { + // detach nub from child + connection->detachFromChild(theChild, gIOPowerPlane); + theChild->release(); + } + // detach us from nub + detachFromChild(connection, gIOPowerPlane); + } + } + iter->release(); + } + + // Remove all interested drivers from the list, including the power + // controlling driver. + // + // Usually, the controlling driver and the policy-maker functionality + // are implemented by the same object, and without the deregistration, + // the object will be holding an extra retain on itself, and cannot + // be freed. + + if (fInterestedDrivers) { + IOPMinformeeList * list = fInterestedDrivers; + IOPMinformee * item; + + PM_LOCK(); + while ((item = list->firstInList())) { + list->removeFromList(item->whatObject); + } + PM_UNLOCK(); + } + + // Clear idle period to prevent idleTimerExpired() from servicing + // idle timer expirations. + + fIdleTimerPeriod = 0; + if (fIdleTimer && thread_call_cancel(fIdleTimer)) { + release(); + } + + PM_LOG2("%s: %p %s done\n", getName(), OBFUSCATE(this), __FUNCTION__); } //********************************************************************************* @@ -817,106 +803,114 @@ void IOService::handlePMstop( IOPMRequest * request ) // Power Management is informing us who our children are. //********************************************************************************* -IOReturn IOService::addPowerChild( IOService * child ) -{ - IOPowerConnection * connection = 0; - IOPMRequest * requests[3] = {0, 0, 0}; - OSIterator * iter; - bool ok = true; - - if (!child) - return kIOReturnBadArgument; - - if (!initialized || !child->initialized) - return IOPMNotYetInitialized; - - OUR_PMLog( kPMLogAddChild, (uintptr_t) child, 0 ); - - do { - // Is this child already one of our children? - - iter = child->getParentIterator( gIOPowerPlane ); - if ( iter ) - { - IORegistryEntry * entry; - OSObject * next; - - while ((next = iter->getNextObject())) - { - if ((entry = OSDynamicCast(IORegistryEntry, next)) && - isChild(entry, gIOPowerPlane)) - { - ok = false; - break; - } - } - iter->release(); - } - if (!ok) - { - PM_LOG("%s: %s (%p) is already a child\n", - getName(), child->getName(), OBFUSCATE(child)); - break; - } +IOReturn +IOService::addPowerChild( IOService * child ) +{ + IOPowerConnection * connection = 0; + IOPMRequest * requests[3] = {0, 0, 0}; + OSIterator * iter; + bool ok = true; + + if (!child) { + return kIOReturnBadArgument; + } + + if (!initialized || !child->initialized) { + return IOPMNotYetInitialized; + } + + OUR_PMLog( kPMLogAddChild, (uintptr_t) child, 0 ); + + do { + // Is this child already one of our children? + + iter = child->getParentIterator( gIOPowerPlane ); + if (iter) { + IORegistryEntry * entry; + OSObject * next; + + while ((next = iter->getNextObject())) { + if ((entry = OSDynamicCast(IORegistryEntry, next)) && + isChild(entry, gIOPowerPlane)) { + ok = false; + break; + } + } + iter->release(); + } + if (!ok) { + PM_LOG("%s: %s (%p) is already a child\n", + getName(), child->getName(), OBFUSCATE(child)); + break; + } - // Add the child to the power plane immediately, but the - // joining connection is marked as not ready. - // We want the child to appear in the power plane before - // returning to the caller, but don't want the caller to - // block on the PM work loop. + // Add the child to the power plane immediately, but the + // joining connection is marked as not ready. + // We want the child to appear in the power plane before + // returning to the caller, but don't want the caller to + // block on the PM work loop. - connection = new IOPowerConnection; - if (!connection) - break; + connection = new IOPowerConnection; + if (!connection) { + break; + } - // Create a chain of PM requests to perform the bottom-half - // work from the PM work loop. + // Create a chain of PM requests to perform the bottom-half + // work from the PM work loop. - requests[0] = acquirePMRequest( - /* target */ this, - /* type */ kIOPMRequestTypeAddPowerChild1 ); + requests[0] = acquirePMRequest( + /* target */ this, + /* type */ kIOPMRequestTypeAddPowerChild1 ); - requests[1] = acquirePMRequest( - /* target */ child, - /* type */ kIOPMRequestTypeAddPowerChild2 ); + requests[1] = acquirePMRequest( + /* target */ child, + /* type */ kIOPMRequestTypeAddPowerChild2 ); - requests[2] = acquirePMRequest( - /* target */ this, - /* type */ kIOPMRequestTypeAddPowerChild3 ); + requests[2] = acquirePMRequest( + /* target */ this, + /* type */ kIOPMRequestTypeAddPowerChild3 ); - if (!requests[0] || !requests[1] || !requests[2]) - break; + if (!requests[0] || !requests[1] || !requests[2]) { + break; + } - requests[0]->attachNextRequest( requests[1] ); - requests[1]->attachNextRequest( requests[2] ); + requests[0]->attachNextRequest( requests[1] ); + requests[1]->attachNextRequest( requests[2] ); - connection->init(); - connection->start(this); - connection->setAwaitingAck(false); - connection->setReadyFlag(false); + connection->init(); + connection->start(this); + connection->setAwaitingAck(false); + connection->setReadyFlag(false); - attachToChild( connection, gIOPowerPlane ); - connection->attachToChild( child, gIOPowerPlane ); + attachToChild( connection, gIOPowerPlane ); + connection->attachToChild( child, gIOPowerPlane ); - // connection needs to be released - requests[0]->fArg0 = connection; - requests[1]->fArg0 = connection; - requests[2]->fArg0 = connection; + // connection needs to be released + requests[0]->fArg0 = connection; + requests[1]->fArg0 = connection; + requests[2]->fArg0 = connection; - submitPMRequests( requests, 3 ); - return kIOReturnSuccess; - } - while (false); + submitPMRequests( requests, 3 ); + return kIOReturnSuccess; + }while (false); - if (connection) connection->release(); - if (requests[0]) releasePMRequest(requests[0]); - if (requests[1]) releasePMRequest(requests[1]); - if (requests[2]) releasePMRequest(requests[2]); + if (connection) { + connection->release(); + } + if (requests[0]) { + releasePMRequest(requests[0]); + } + if (requests[1]) { + releasePMRequest(requests[1]); + } + if (requests[2]) { + releasePMRequest(requests[2]); + } - // Silent failure, to prevent platform drivers from adding the child - // to the root domain. + // Silent failure, to prevent platform drivers from adding the child + // to the root domain. - return kIOReturnSuccess; + return kIOReturnSuccess; } //********************************************************************************* @@ -925,25 +919,24 @@ IOReturn IOService::addPowerChild( IOService * child ) // Step 1/3 of adding a power child. Called on the power parent. //********************************************************************************* -void IOService::addPowerChild1( IOPMRequest * request ) +void +IOService::addPowerChild1( IOPMRequest * request ) { - IOPMPowerStateIndex tempDesire = kPowerStateZero; + IOPMPowerStateIndex tempDesire = kPowerStateZero; - // Make us temporary usable before adding the child. + // Make us temporary usable before adding the child. - PM_ASSERT_IN_GATE(); - OUR_PMLog( kPMLogMakeUsable, kPMLogMakeUsable, 0 ); + PM_ASSERT_IN_GATE(); + OUR_PMLog( kPMLogMakeUsable, kPMLogMakeUsable, 0 ); - if (fControllingDriver && inPlane(gIOPowerPlane) && fParentsKnowState) - { - tempDesire = fHighestPowerState; - } + if (fControllingDriver && inPlane(gIOPowerPlane) && fParentsKnowState) { + tempDesire = fHighestPowerState; + } - if ((tempDesire != kPowerStateZero) && - (IS_PM_ROOT || (StateOrder(fMaxPowerState) >= StateOrder(tempDesire)))) - { - adjustPowerState(tempDesire); - } + if ((tempDesire != kPowerStateZero) && + (IS_PM_ROOT || (StateOrder(fMaxPowerState) >= StateOrder(tempDesire)))) { + adjustPowerState(tempDesire); + } } //********************************************************************************* @@ -953,53 +946,53 @@ void IOService::addPowerChild1( IOPMRequest * request ) // Execution blocked behind addPowerChild1. //********************************************************************************* -void IOService::addPowerChild2( IOPMRequest * request ) +void +IOService::addPowerChild2( IOPMRequest * request ) { - IOPowerConnection * connection = (IOPowerConnection *) request->fArg0; - IOService * parent; - IOPMPowerFlags powerFlags; - bool knowsState; - unsigned long powerState; - unsigned long tempDesire; + IOPowerConnection * connection = (IOPowerConnection *) request->fArg0; + IOService * parent; + IOPMPowerFlags powerFlags; + bool knowsState; + unsigned long powerState; + unsigned long tempDesire; - PM_ASSERT_IN_GATE(); - parent = (IOService *) connection->getParentEntry(gIOPowerPlane); + PM_ASSERT_IN_GATE(); + parent = (IOService *) connection->getParentEntry(gIOPowerPlane); - if (!parent || !inPlane(gIOPowerPlane)) - { - PM_LOG("%s: addPowerChild2 not in power plane\n", getName()); - return; - } + if (!parent || !inPlane(gIOPowerPlane)) { + PM_LOG("%s: addPowerChild2 not in power plane\n", getName()); + return; + } - // Parent will be waiting for us to complete this stage. - // It is safe to directly access parent's vars. + // Parent will be waiting for us to complete this stage. + // It is safe to directly access parent's vars. - knowsState = (parent->fPowerStates) && (parent->fParentsKnowState); - powerState = parent->fCurrentPowerState; + knowsState = (parent->fPowerStates) && (parent->fParentsKnowState); + powerState = parent->fCurrentPowerState; - if (knowsState) - powerFlags = parent->fPowerStates[powerState].outputPowerFlags; - else - powerFlags = 0; + if (knowsState) { + powerFlags = parent->fPowerStates[powerState].outputPowerFlags; + } else { + powerFlags = 0; + } - // Set our power parent. + // Set our power parent. - OUR_PMLog(kPMLogSetParent, knowsState, powerFlags); + OUR_PMLog(kPMLogSetParent, knowsState, powerFlags); - setParentInfo( powerFlags, connection, knowsState ); + setParentInfo( powerFlags, connection, knowsState ); - connection->setReadyFlag(true); + connection->setReadyFlag(true); - if ( fControllingDriver && fParentsKnowState ) - { - fMaxPowerState = fControllingDriver->maxCapabilityForDomainState(fParentsCurrentPowerFlags); - // initially change into the state we are already in - tempDesire = fControllingDriver->initialPowerStateForDomainState(fParentsCurrentPowerFlags); - fPreviousRequestPowerFlags = (IOPMPowerFlags)(-1); - adjustPowerState(tempDesire); - } + if (fControllingDriver && fParentsKnowState) { + fMaxPowerState = fControllingDriver->maxCapabilityForDomainState(fParentsCurrentPowerFlags); + // initially change into the state we are already in + tempDesire = fControllingDriver->initialPowerStateForDomainState(fParentsCurrentPowerFlags); + fPreviousRequestPowerFlags = (IOPMPowerFlags)(-1); + adjustPowerState(tempDesire); + } - getPMRootDomain()->tagPowerPlaneService(this, &fPMActions); + getPMRootDomain()->tagPowerPlaneService(this, &fPMActions); } //********************************************************************************* @@ -1009,32 +1002,30 @@ void IOService::addPowerChild2( IOPMRequest * request ) // Execution blocked behind addPowerChild2. //********************************************************************************* -void IOService::addPowerChild3( IOPMRequest * request ) +void +IOService::addPowerChild3( IOPMRequest * request ) { - IOPowerConnection * connection = (IOPowerConnection *) request->fArg0; - IOService * child; - IOPMrootDomain * rootDomain = getPMRootDomain(); + IOPowerConnection * connection = (IOPowerConnection *) request->fArg0; + IOService * child; + IOPMrootDomain * rootDomain = getPMRootDomain(); - PM_ASSERT_IN_GATE(); - child = (IOService *) connection->getChildEntry(gIOPowerPlane); + PM_ASSERT_IN_GATE(); + child = (IOService *) connection->getChildEntry(gIOPowerPlane); - if (child && inPlane(gIOPowerPlane)) - { - if ((this != rootDomain) && child->getProperty("IOPMStrictTreeOrder")) - { - PM_LOG1("%s: strict PM order enforced\n", getName()); - fStrictTreeOrder = true; - } + if (child && inPlane(gIOPowerPlane)) { + if ((this != rootDomain) && child->getProperty("IOPMStrictTreeOrder")) { + PM_LOG1("%s: strict PM order enforced\n", getName()); + fStrictTreeOrder = true; + } - if (rootDomain) - rootDomain->joinAggressiveness( child ); - } - else - { - PM_LOG("%s: addPowerChild3 not in power plane\n", getName()); - } + if (rootDomain) { + rootDomain->joinAggressiveness( child ); + } + } else { + PM_LOG("%s: addPowerChild3 not in power plane\n", getName()); + } - connection->release(); + connection->release(); } #ifndef __LP64__ @@ -1047,10 +1038,11 @@ void IOService::addPowerChild3( IOPMRequest * request ) // to assume that state. //********************************************************************************* -IOReturn IOService::setPowerParent( - IOPowerConnection * theParent, bool stateKnown, IOPMPowerFlags powerFlags ) +IOReturn +IOService::setPowerParent( + IOPowerConnection * theParent, bool stateKnown, IOPMPowerFlags powerFlags ) { - return kIOReturnUnsupported; + return kIOReturnUnsupported; } #endif /* !__LP64__ */ @@ -1060,284 +1052,265 @@ IOReturn IOService::setPowerParent( // Called on a parent whose child is being removed by PMstop(). //********************************************************************************* -IOReturn IOService::removePowerChild( IOPowerConnection * theNub ) -{ - IORegistryEntry * theChild; +IOReturn +IOService::removePowerChild( IOPowerConnection * theNub ) +{ + IORegistryEntry * theChild; + + PM_ASSERT_IN_GATE(); + OUR_PMLog( kPMLogRemoveChild, 0, 0 ); + + theNub->retain(); + + // detach nub from child + theChild = theNub->copyChildEntry(gIOPowerPlane); + if (theChild) { + theNub->detachFromChild(theChild, gIOPowerPlane); + theChild->release(); + } + // detach from the nub + detachFromChild(theNub, gIOPowerPlane); + + // Are we awaiting an ack from this child? + if (theNub->getAwaitingAck()) { + // yes, pretend we got one + theNub->setAwaitingAck(false); + if (fHeadNotePendingAcks != 0) { + // that's one fewer ack to worry about + fHeadNotePendingAcks--; + + // is that the last? + if (fHeadNotePendingAcks == 0) { + stop_ack_timer(); + getPMRootDomain()->reset_watchdog_timer(this, 0); + + // This parent may have a request in the work queue that is + // blocked on fHeadNotePendingAcks=0. And removePowerChild() + // is called while executing the child's PMstop request so they + // can occur simultaneously. IOPMWorkQueue::checkForWork() must + // restart and check all request queues again. + + gIOPMWorkQueue->incrementProducerCount(); + } + } + } - PM_ASSERT_IN_GATE(); - OUR_PMLog( kPMLogRemoveChild, 0, 0 ); + theNub->release(); - theNub->retain(); + // A child has gone away, re-scan children desires and clamp bits. + // The fPendingAdjustPowerRequest helps to reduce redundant parent work. + + if (!fAdjustPowerScheduled) { + IOPMRequest * request; + request = acquirePMRequest( this, kIOPMRequestTypeAdjustPowerState ); + if (request) { + submitPMRequest( request ); + fAdjustPowerScheduled = true; + } + } - // detach nub from child - theChild = theNub->copyChildEntry(gIOPowerPlane); - if ( theChild ) - { - theNub->detachFromChild(theChild, gIOPowerPlane); - theChild->release(); - } - // detach from the nub - detachFromChild(theNub, gIOPowerPlane); + return IOPMNoErr; +} - // Are we awaiting an ack from this child? - if ( theNub->getAwaitingAck() ) - { - // yes, pretend we got one - theNub->setAwaitingAck(false); - if (fHeadNotePendingAcks != 0 ) - { - // that's one fewer ack to worry about - fHeadNotePendingAcks--; +//********************************************************************************* +// [public] registerPowerDriver +// +// A driver has called us volunteering to control power to our device. +//********************************************************************************* - // is that the last? - if ( fHeadNotePendingAcks == 0 ) - { - stop_ack_timer(); - getPMRootDomain()->reset_watchdog_timer(this, 0); +IOReturn +IOService::registerPowerDriver( + IOService * powerDriver, + IOPMPowerState * powerStates, + unsigned long numberOfStates ) +{ + IOPMRequest * request; + IOPMPSEntry * powerStatesCopy = 0; + IOPMPowerStateIndex stateOrder; + IOReturn error = kIOReturnSuccess; + + if (!initialized) { + return IOPMNotYetInitialized; + } + + if (!powerStates || (numberOfStates < 2)) { + OUR_PMLog(kPMLogControllingDriverErr5, numberOfStates, 0); + return kIOReturnBadArgument; + } + + if (!powerDriver || !powerDriver->initialized) { + OUR_PMLog(kPMLogControllingDriverErr4, 0, 0); + return kIOReturnBadArgument; + } + + if (powerStates[0].version > kIOPMPowerStateVersion2) { + OUR_PMLog(kPMLogControllingDriverErr1, powerStates[0].version, 0); + return kIOReturnBadArgument; + } + + do { + // Make a copy of the supplied power state array. + powerStatesCopy = IONew(IOPMPSEntry, numberOfStates); + if (!powerStatesCopy) { + error = kIOReturnNoMemory; + break; + } - // This parent may have a request in the work queue that is - // blocked on fHeadNotePendingAcks=0. And removePowerChild() - // is called while executing the child's PMstop request so they - // can occur simultaneously. IOPMWorkQueue::checkForWork() must - // restart and check all request queues again. + // Initialize to bogus values + for (IOPMPowerStateIndex i = 0; i < numberOfStates; i++) { + powerStatesCopy[i].stateOrderToIndex = kIOPMPowerStateMax; + } - gIOPMWorkQueue->incrementProducerCount(); - } - } - } + for (uint32_t i = 0; i < numberOfStates; i++) { + powerStatesCopy[i].capabilityFlags = powerStates[i].capabilityFlags; + powerStatesCopy[i].outputPowerFlags = powerStates[i].outputPowerCharacter; + powerStatesCopy[i].inputPowerFlags = powerStates[i].inputPowerRequirement; + powerStatesCopy[i].staticPower = powerStates[i].staticPower; + powerStatesCopy[i].settleUpTime = powerStates[i].settleUpTime; + powerStatesCopy[i].settleDownTime = powerStates[i].settleDownTime; + if (powerStates[i].version >= kIOPMPowerStateVersion2) { + stateOrder = powerStates[i].stateOrder; + } else { + stateOrder = i; + } + + if (stateOrder < numberOfStates) { + powerStatesCopy[i].stateOrder = stateOrder; + powerStatesCopy[stateOrder].stateOrderToIndex = i; + } + } - theNub->release(); + for (IOPMPowerStateIndex i = 0; i < numberOfStates; i++) { + if (powerStatesCopy[i].stateOrderToIndex == kIOPMPowerStateMax) { + // power state order missing + error = kIOReturnBadArgument; + break; + } + } + if (kIOReturnSuccess != error) { + break; + } - // A child has gone away, re-scan children desires and clamp bits. - // The fPendingAdjustPowerRequest helps to reduce redundant parent work. + request = acquirePMRequest( this, kIOPMRequestTypeRegisterPowerDriver ); + if (!request) { + error = kIOReturnNoMemory; + break; + } - if (!fAdjustPowerScheduled) - { - IOPMRequest * request; - request = acquirePMRequest( this, kIOPMRequestTypeAdjustPowerState ); - if (request) - { - submitPMRequest( request ); - fAdjustPowerScheduled = true; - } - } + powerDriver->retain(); + request->fArg0 = (void *) powerDriver; + request->fArg1 = (void *) powerStatesCopy; + request->fArg2 = (void *) numberOfStates; - return IOPMNoErr; -} + submitPMRequest( request ); + return kIOReturnSuccess; + }while (false); -//********************************************************************************* -// [public] registerPowerDriver -// -// A driver has called us volunteering to control power to our device. -//********************************************************************************* + if (powerStatesCopy) { + IODelete(powerStatesCopy, IOPMPSEntry, numberOfStates); + } -IOReturn IOService::registerPowerDriver( - IOService * powerDriver, - IOPMPowerState * powerStates, - unsigned long numberOfStates ) -{ - IOPMRequest * request; - IOPMPSEntry * powerStatesCopy = 0; - IOPMPowerStateIndex stateOrder; - IOReturn error = kIOReturnSuccess; - - if (!initialized) - return IOPMNotYetInitialized; - - if (!powerStates || (numberOfStates < 2)) - { - OUR_PMLog(kPMLogControllingDriverErr5, numberOfStates, 0); - return kIOReturnBadArgument; - } - - if (!powerDriver || !powerDriver->initialized) - { - OUR_PMLog(kPMLogControllingDriverErr4, 0, 0); - return kIOReturnBadArgument; - } - - if (powerStates[0].version > kIOPMPowerStateVersion2) - { - OUR_PMLog(kPMLogControllingDriverErr1, powerStates[0].version, 0); - return kIOReturnBadArgument; - } - - do { - // Make a copy of the supplied power state array. - powerStatesCopy = IONew(IOPMPSEntry, numberOfStates); - if (!powerStatesCopy) - { - error = kIOReturnNoMemory; - break; - } - - // Initialize to bogus values - for (IOPMPowerStateIndex i = 0; i < numberOfStates; i++) - powerStatesCopy[i].stateOrderToIndex = kIOPMPowerStateMax; - - for (uint32_t i = 0; i < numberOfStates; i++) - { - powerStatesCopy[i].capabilityFlags = powerStates[i].capabilityFlags; - powerStatesCopy[i].outputPowerFlags = powerStates[i].outputPowerCharacter; - powerStatesCopy[i].inputPowerFlags = powerStates[i].inputPowerRequirement; - powerStatesCopy[i].staticPower = powerStates[i].staticPower; - powerStatesCopy[i].settleUpTime = powerStates[i].settleUpTime; - powerStatesCopy[i].settleDownTime = powerStates[i].settleDownTime; - if (powerStates[i].version >= kIOPMPowerStateVersion2) - stateOrder = powerStates[i].stateOrder; - else - stateOrder = i; - - if (stateOrder < numberOfStates) - { - powerStatesCopy[i].stateOrder = stateOrder; - powerStatesCopy[stateOrder].stateOrderToIndex = i; - } - } - - for (IOPMPowerStateIndex i = 0; i < numberOfStates; i++) - { - if (powerStatesCopy[i].stateOrderToIndex == kIOPMPowerStateMax) - { - // power state order missing - error = kIOReturnBadArgument; - break; - } - } - if (kIOReturnSuccess != error) - break; - - request = acquirePMRequest( this, kIOPMRequestTypeRegisterPowerDriver ); - if (!request) - { - error = kIOReturnNoMemory; - break; - } - - powerDriver->retain(); - request->fArg0 = (void *) powerDriver; - request->fArg1 = (void *) powerStatesCopy; - request->fArg2 = (void *) numberOfStates; - - submitPMRequest( request ); - return kIOReturnSuccess; - } - while (false); - - if (powerStatesCopy) - IODelete(powerStatesCopy, IOPMPSEntry, numberOfStates); - - return error; + return error; } //********************************************************************************* // [private] handleRegisterPowerDriver //********************************************************************************* -void IOService::handleRegisterPowerDriver( IOPMRequest * request ) -{ - IOService * powerDriver = (IOService *) request->fArg0; - IOPMPSEntry * powerStates = (IOPMPSEntry *) request->fArg1; - unsigned long numberOfStates = (unsigned long) request->fArg2; - unsigned long i, stateIndex; - unsigned long lowestPowerState; - IOService * root; - OSIterator * iter; - - PM_ASSERT_IN_GATE(); - assert(powerStates); - assert(powerDriver); - assert(numberOfStates > 1); - - if ( !fNumberOfPowerStates ) - { - OUR_PMLog(kPMLogControllingDriver, - (unsigned long) numberOfStates, - (unsigned long) kIOPMPowerStateVersion1); - - fPowerStates = powerStates; - fNumberOfPowerStates = numberOfStates; - fControllingDriver = powerDriver; - fCurrentCapabilityFlags = fPowerStates[0].capabilityFlags; - - lowestPowerState = fPowerStates[0].stateOrderToIndex; - fHighestPowerState = fPowerStates[numberOfStates - 1].stateOrderToIndex; - - // OR'in all the output power flags - fMergedOutputPowerFlags = 0; - fDeviceUsablePowerState = lowestPowerState; - for ( i = 0; i < numberOfStates; i++ ) - { - fMergedOutputPowerFlags |= fPowerStates[i].outputPowerFlags; - - stateIndex = fPowerStates[i].stateOrderToIndex; - assert(stateIndex < numberOfStates); - if ((fDeviceUsablePowerState == lowestPowerState) && - (fPowerStates[stateIndex].capabilityFlags & IOPMDeviceUsable)) - { - // The minimum power state that the device is usable - fDeviceUsablePowerState = stateIndex; - } - } - - // Register powerDriver as interested, unless already done. - // We don't want to register the default implementation since - // it does nothing. One ramification of not always registering - // is the one fewer retain count held. - - root = getPlatform()->getProvider(); - assert(root); - if (!root || - ((OSMemberFunctionCast(void (*)(void), - root, &IOService::powerStateDidChangeTo)) != - ((OSMemberFunctionCast(void (*)(void), - this, &IOService::powerStateDidChangeTo)))) || - ((OSMemberFunctionCast(void (*)(void), - root, &IOService::powerStateWillChangeTo)) != - ((OSMemberFunctionCast(void (*)(void), - this, &IOService::powerStateWillChangeTo))))) - { - if (fInterestedDrivers->findItem(powerDriver) == NULL) - { - PM_LOCK(); - fInterestedDrivers->appendNewInformee(powerDriver); - PM_UNLOCK(); - } - } - - // Examine all existing power clients and perform limit check. - - if (fPowerClients && - (iter = OSCollectionIterator::withCollection(fPowerClients))) - { - const OSSymbol * client; - while ((client = (const OSSymbol *) iter->getNextObject())) - { - IOPMPowerStateIndex powerState = getPowerStateForClient(client); - if (powerState >= numberOfStates) - { - updatePowerClient(client, fHighestPowerState); - } - } - iter->release(); - } - - if ( inPlane(gIOPowerPlane) && fParentsKnowState ) - { - IOPMPowerStateIndex tempDesire; - fMaxPowerState = fControllingDriver->maxCapabilityForDomainState(fParentsCurrentPowerFlags); - // initially change into the state we are already in - tempDesire = fControllingDriver->initialPowerStateForDomainState(fParentsCurrentPowerFlags); - adjustPowerState(tempDesire); - } - } - else - { - OUR_PMLog(kPMLogControllingDriverErr2, numberOfStates, 0); - IODelete(powerStates, IOPMPSEntry, numberOfStates); - } - - powerDriver->release(); +void +IOService::handleRegisterPowerDriver( IOPMRequest * request ) +{ + IOService * powerDriver = (IOService *) request->fArg0; + IOPMPSEntry * powerStates = (IOPMPSEntry *) request->fArg1; + unsigned long numberOfStates = (unsigned long) request->fArg2; + unsigned long i, stateIndex; + unsigned long lowestPowerState; + IOService * root; + OSIterator * iter; + + PM_ASSERT_IN_GATE(); + assert(powerStates); + assert(powerDriver); + assert(numberOfStates > 1); + + if (!fNumberOfPowerStates) { + OUR_PMLog(kPMLogControllingDriver, + (unsigned long) numberOfStates, + (unsigned long) kIOPMPowerStateVersion1); + + fPowerStates = powerStates; + fNumberOfPowerStates = numberOfStates; + fControllingDriver = powerDriver; + fCurrentCapabilityFlags = fPowerStates[0].capabilityFlags; + + lowestPowerState = fPowerStates[0].stateOrderToIndex; + fHighestPowerState = fPowerStates[numberOfStates - 1].stateOrderToIndex; + + // OR'in all the output power flags + fMergedOutputPowerFlags = 0; + fDeviceUsablePowerState = lowestPowerState; + for (i = 0; i < numberOfStates; i++) { + fMergedOutputPowerFlags |= fPowerStates[i].outputPowerFlags; + + stateIndex = fPowerStates[i].stateOrderToIndex; + assert(stateIndex < numberOfStates); + if ((fDeviceUsablePowerState == lowestPowerState) && + (fPowerStates[stateIndex].capabilityFlags & IOPMDeviceUsable)) { + // The minimum power state that the device is usable + fDeviceUsablePowerState = stateIndex; + } + } + + // Register powerDriver as interested, unless already done. + // We don't want to register the default implementation since + // it does nothing. One ramification of not always registering + // is the one fewer retain count held. + + root = getPlatform()->getProvider(); + assert(root); + if (!root || + ((OSMemberFunctionCast(void (*)(void), + root, &IOService::powerStateDidChangeTo)) != + ((OSMemberFunctionCast(void (*)(void), + this, &IOService::powerStateDidChangeTo)))) || + ((OSMemberFunctionCast(void (*)(void), + root, &IOService::powerStateWillChangeTo)) != + ((OSMemberFunctionCast(void (*)(void), + this, &IOService::powerStateWillChangeTo))))) { + if (fInterestedDrivers->findItem(powerDriver) == NULL) { + PM_LOCK(); + fInterestedDrivers->appendNewInformee(powerDriver); + PM_UNLOCK(); + } + } + + // Examine all existing power clients and perform limit check. + + if (fPowerClients && + (iter = OSCollectionIterator::withCollection(fPowerClients))) { + const OSSymbol * client; + while ((client = (const OSSymbol *) iter->getNextObject())) { + IOPMPowerStateIndex powerState = getPowerStateForClient(client); + if (powerState >= numberOfStates) { + updatePowerClient(client, fHighestPowerState); + } + } + iter->release(); + } + + if (inPlane(gIOPowerPlane) && fParentsKnowState) { + IOPMPowerStateIndex tempDesire; + fMaxPowerState = fControllingDriver->maxCapabilityForDomainState(fParentsCurrentPowerFlags); + // initially change into the state we are already in + tempDesire = fControllingDriver->initialPowerStateForDomainState(fParentsCurrentPowerFlags); + adjustPowerState(tempDesire); + } + } else { + OUR_PMLog(kPMLogControllingDriverErr2, numberOfStates, 0); + IODelete(powerStates, IOPMPSEntry, numberOfStates); + } + + powerDriver->release(); } //********************************************************************************* @@ -1349,90 +1322,93 @@ void IOService::handleRegisterPowerDriver( IOPMRequest * request ) // out what the current power state of the device is. //********************************************************************************* -IOPMPowerFlags IOService::registerInterestedDriver( IOService * driver ) +IOPMPowerFlags +IOService::registerInterestedDriver( IOService * driver ) { - IOPMRequest * request; - bool signal; + IOPMRequest * request; + bool signal; - if (!driver || !initialized || !fInterestedDrivers) - return 0; + if (!driver || !initialized || !fInterestedDrivers) { + return 0; + } - PM_LOCK(); - signal = (!fInsertInterestSet && !fRemoveInterestSet); - if (fInsertInterestSet == NULL) - fInsertInterestSet = OSSet::withCapacity(4); - if (fInsertInterestSet) - { - fInsertInterestSet->setObject(driver); - if (fRemoveInterestSet) - fRemoveInterestSet->removeObject(driver); - } - PM_UNLOCK(); + PM_LOCK(); + signal = (!fInsertInterestSet && !fRemoveInterestSet); + if (fInsertInterestSet == NULL) { + fInsertInterestSet = OSSet::withCapacity(4); + } + if (fInsertInterestSet) { + fInsertInterestSet->setObject(driver); + if (fRemoveInterestSet) { + fRemoveInterestSet->removeObject(driver); + } + } + PM_UNLOCK(); - if (signal) - { - request = acquirePMRequest( this, kIOPMRequestTypeInterestChanged ); - if (request) - submitPMRequest( request ); - } + if (signal) { + request = acquirePMRequest( this, kIOPMRequestTypeInterestChanged ); + if (request) { + submitPMRequest( request ); + } + } - // This return value cannot be trusted, but return a value - // for those clients that care. + // This return value cannot be trusted, but return a value + // for those clients that care. - OUR_PMLog(kPMLogInterestedDriver, kIOPMDeviceUsable, 2); - return kIOPMDeviceUsable; + OUR_PMLog(kPMLogInterestedDriver, kIOPMDeviceUsable, 2); + return kIOPMDeviceUsable; } //********************************************************************************* // [public] deRegisterInterestedDriver //********************************************************************************* -IOReturn IOService::deRegisterInterestedDriver( IOService * driver ) -{ - IOPMinformee * item; - IOPMRequest * request; - bool signal; - - if (!driver) - return kIOReturnBadArgument; - if (!initialized || !fInterestedDrivers) - return IOPMNotPowerManaged; - - PM_LOCK(); - if (fInsertInterestSet) - { - fInsertInterestSet->removeObject(driver); - } - - item = fInterestedDrivers->findItem(driver); - if (!item) - { - PM_UNLOCK(); - return kIOReturnNotFound; - } - - signal = (!fRemoveInterestSet && !fInsertInterestSet); - if (fRemoveInterestSet == NULL) - fRemoveInterestSet = OSSet::withCapacity(4); - if (fRemoveInterestSet) - { - fRemoveInterestSet->setObject(driver); - if (item->active) - { - item->active = false; - waitForPMDriverCall( driver ); - } - } - PM_UNLOCK(); +IOReturn +IOService::deRegisterInterestedDriver( IOService * driver ) +{ + IOPMinformee * item; + IOPMRequest * request; + bool signal; + + if (!driver) { + return kIOReturnBadArgument; + } + if (!initialized || !fInterestedDrivers) { + return IOPMNotPowerManaged; + } + + PM_LOCK(); + if (fInsertInterestSet) { + fInsertInterestSet->removeObject(driver); + } + + item = fInterestedDrivers->findItem(driver); + if (!item) { + PM_UNLOCK(); + return kIOReturnNotFound; + } + + signal = (!fRemoveInterestSet && !fInsertInterestSet); + if (fRemoveInterestSet == NULL) { + fRemoveInterestSet = OSSet::withCapacity(4); + } + if (fRemoveInterestSet) { + fRemoveInterestSet->setObject(driver); + if (item->active) { + item->active = false; + waitForPMDriverCall( driver ); + } + } + PM_UNLOCK(); - if (signal) - { - request = acquirePMRequest( this, kIOPMRequestTypeInterestChanged ); - if (request) - submitPMRequest( request ); - } + if (signal) { + request = acquirePMRequest( this, kIOPMRequestTypeInterestChanged ); + if (request) { + submitPMRequest( request ); + } + } - return IOPMNoErr; + return IOPMNoErr; } //********************************************************************************* @@ -1441,50 +1417,44 @@ IOReturn IOService::deRegisterInterestedDriver( IOService * driver ) // Handle interest added or removed. //********************************************************************************* -void IOService::handleInterestChanged( IOPMRequest * request ) -{ - IOService * driver; - IOPMinformee * informee; - IOPMinformeeList * list = fInterestedDrivers; - - PM_LOCK(); - - if (fInsertInterestSet) - { - while ((driver = (IOService *) fInsertInterestSet->getAnyObject())) - { - if (list->findItem(driver) == NULL) - { - informee = list->appendNewInformee(driver); - } - fInsertInterestSet->removeObject(driver); - } - fInsertInterestSet->release(); - fInsertInterestSet = 0; - } - - if (fRemoveInterestSet) - { - while ((driver = (IOService *) fRemoveInterestSet->getAnyObject())) - { - informee = list->findItem(driver); - if (informee) - { - // Clean-up async interest acknowledgement - if (fHeadNotePendingAcks && informee->timer) - { - informee->timer = 0; - fHeadNotePendingAcks--; - } - list->removeFromList(driver); - } - fRemoveInterestSet->removeObject(driver); - } - fRemoveInterestSet->release(); - fRemoveInterestSet = 0; - } - - PM_UNLOCK(); +void +IOService::handleInterestChanged( IOPMRequest * request ) +{ + IOService * driver; + IOPMinformee * informee; + IOPMinformeeList * list = fInterestedDrivers; + + PM_LOCK(); + + if (fInsertInterestSet) { + while ((driver = (IOService *) fInsertInterestSet->getAnyObject())) { + if (list->findItem(driver) == NULL) { + informee = list->appendNewInformee(driver); + } + fInsertInterestSet->removeObject(driver); + } + fInsertInterestSet->release(); + fInsertInterestSet = 0; + } + + if (fRemoveInterestSet) { + while ((driver = (IOService *) fRemoveInterestSet->getAnyObject())) { + informee = list->findItem(driver); + if (informee) { + // Clean-up async interest acknowledgement + if (fHeadNotePendingAcks && informee->timer) { + informee->timer = 0; + fHeadNotePendingAcks--; + } + list->removeFromList(driver); + } + fRemoveInterestSet->removeObject(driver); + } + fRemoveInterestSet->release(); + fRemoveInterestSet = 0; + } + + PM_UNLOCK(); } //********************************************************************************* @@ -1501,130 +1471,125 @@ void IOService::handleInterestChanged( IOPMRequest * request ) // of a "current change note".) //********************************************************************************* -IOReturn IOService::acknowledgePowerChange( IOService * whichObject ) +IOReturn +IOService::acknowledgePowerChange( IOService * whichObject ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return IOPMNotYetInitialized; - if (!whichObject) - return kIOReturnBadArgument; + if (!initialized) { + return IOPMNotYetInitialized; + } + if (!whichObject) { + return kIOReturnBadArgument; + } - request = acquirePMRequest( this, kIOPMRequestTypeAckPowerChange ); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest( this, kIOPMRequestTypeAckPowerChange ); + if (!request) { + return kIOReturnNoMemory; + } - whichObject->retain(); - request->fArg0 = whichObject; + whichObject->retain(); + request->fArg0 = whichObject; - submitPMRequest( request ); - return IOPMNoErr; + submitPMRequest( request ); + return IOPMNoErr; } //********************************************************************************* // [private] handleAcknowledgePowerChange //********************************************************************************* -bool IOService::handleAcknowledgePowerChange( IOPMRequest * request ) -{ - IOPMinformee * informee; - unsigned long childPower = kIOPMUnknown; - IOService * theChild; - IOService * whichObject; - bool all_acked = false; - - PM_ASSERT_IN_GATE(); - whichObject = (IOService *) request->fArg0; - assert(whichObject); - - // one of our interested drivers? - informee = fInterestedDrivers->findItem( whichObject ); - if ( informee == NULL ) - { - if ( !isChild(whichObject, gIOPowerPlane) ) - { - OUR_PMLog(kPMLogAcknowledgeErr1, 0, 0); - goto no_err; - } else { - OUR_PMLog(kPMLogChildAcknowledge, fHeadNotePendingAcks, 0); - } - } else { - OUR_PMLog(kPMLogDriverAcknowledge, fHeadNotePendingAcks, 0); - } - - if ( fHeadNotePendingAcks != 0 ) - { - assert(fPowerStates != NULL); - - // yes, make sure we're expecting acks - if ( informee != NULL ) - { - // it's an interested driver - // make sure we're expecting this ack - if ( informee->timer != 0 ) - { - - if (informee->timer > 0) - { - uint64_t nsec = computeTimeDeltaNS(&informee->startTime); - if (nsec > LOG_SETPOWER_TIMES) { - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsDriverPSChangeSlow, informee->whatObject->getName(), - fDriverCallReason, NS_TO_MS(nsec), informee->whatObject->getRegistryEntryID(), - NULL, fHeadNotePowerState); - } - } - - // mark it acked - informee->timer = 0; - // that's one fewer to worry about - fHeadNotePendingAcks--; - } else { - // this driver has already acked - OUR_PMLog(kPMLogAcknowledgeErr2, 0, 0); - } - } else { - // it's a child - // make sure we're expecting this ack - if ( ((IOPowerConnection *)whichObject)->getAwaitingAck() ) - { - // that's one fewer to worry about - fHeadNotePendingAcks--; - ((IOPowerConnection *)whichObject)->setAwaitingAck(false); - theChild = (IOService *)whichObject->copyChildEntry(gIOPowerPlane); - if ( theChild ) - { - childPower = theChild->currentPowerConsumption(); - theChild->release(); - } - if ( childPower == kIOPMUnknown ) - { - fHeadNotePowerArrayEntry->staticPower = kIOPMUnknown; - } else { - if (fHeadNotePowerArrayEntry->staticPower != kIOPMUnknown) - { - fHeadNotePowerArrayEntry->staticPower += childPower; - } - } - } - } - - if ( fHeadNotePendingAcks == 0 ) { - // yes, stop the timer - stop_ack_timer(); - // and now we can continue - all_acked = true; - getPMRootDomain()->reset_watchdog_timer(this, 0); - } - } else { - OUR_PMLog(kPMLogAcknowledgeErr3, 0, 0); // not expecting anybody to ack - } +bool +IOService::handleAcknowledgePowerChange( IOPMRequest * request ) +{ + IOPMinformee * informee; + unsigned long childPower = kIOPMUnknown; + IOService * theChild; + IOService * whichObject; + bool all_acked = false; + + PM_ASSERT_IN_GATE(); + whichObject = (IOService *) request->fArg0; + assert(whichObject); + + // one of our interested drivers? + informee = fInterestedDrivers->findItem( whichObject ); + if (informee == NULL) { + if (!isChild(whichObject, gIOPowerPlane)) { + OUR_PMLog(kPMLogAcknowledgeErr1, 0, 0); + goto no_err; + } else { + OUR_PMLog(kPMLogChildAcknowledge, fHeadNotePendingAcks, 0); + } + } else { + OUR_PMLog(kPMLogDriverAcknowledge, fHeadNotePendingAcks, 0); + } + + if (fHeadNotePendingAcks != 0) { + assert(fPowerStates != NULL); + + // yes, make sure we're expecting acks + if (informee != NULL) { + // it's an interested driver + // make sure we're expecting this ack + if (informee->timer != 0) { + if (informee->timer > 0) { + uint64_t nsec = computeTimeDeltaNS(&informee->startTime); + if (nsec > LOG_SETPOWER_TIMES) { + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsDriverPSChangeSlow, informee->whatObject->getName(), + fDriverCallReason, NS_TO_MS(nsec), informee->whatObject->getRegistryEntryID(), + NULL, fHeadNotePowerState); + } + } + + // mark it acked + informee->timer = 0; + // that's one fewer to worry about + fHeadNotePendingAcks--; + } else { + // this driver has already acked + OUR_PMLog(kPMLogAcknowledgeErr2, 0, 0); + } + } else { + // it's a child + // make sure we're expecting this ack + if (((IOPowerConnection *)whichObject)->getAwaitingAck()) { + // that's one fewer to worry about + fHeadNotePendingAcks--; + ((IOPowerConnection *)whichObject)->setAwaitingAck(false); + theChild = (IOService *)whichObject->copyChildEntry(gIOPowerPlane); + if (theChild) { + childPower = theChild->currentPowerConsumption(); + theChild->release(); + } + if (childPower == kIOPMUnknown) { + fHeadNotePowerArrayEntry->staticPower = kIOPMUnknown; + } else { + if (fHeadNotePowerArrayEntry->staticPower != kIOPMUnknown) { + fHeadNotePowerArrayEntry->staticPower += childPower; + } + } + } + } + + if (fHeadNotePendingAcks == 0) { + // yes, stop the timer + stop_ack_timer(); + // and now we can continue + all_acked = true; + getPMRootDomain()->reset_watchdog_timer(this, 0); + } + } else { + OUR_PMLog(kPMLogAcknowledgeErr3, 0, 0); // not expecting anybody to ack + } no_err: - if (whichObject) - whichObject->release(); + if (whichObject) { + whichObject->release(); + } - return all_acked; + return all_acked; } //********************************************************************************* @@ -1635,121 +1600,136 @@ no_err: // We continue to process the power state change. //********************************************************************************* -IOReturn IOService::acknowledgeSetPowerState( void ) +IOReturn +IOService::acknowledgeSetPowerState( void ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return IOPMNotYetInitialized; + if (!initialized) { + return IOPMNotYetInitialized; + } - request = acquirePMRequest( this, kIOPMRequestTypeAckSetPowerState ); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest( this, kIOPMRequestTypeAckSetPowerState ); + if (!request) { + return kIOReturnNoMemory; + } - submitPMRequest( request ); - return kIOReturnSuccess; + submitPMRequest( request ); + return kIOReturnSuccess; } //********************************************************************************* // [private] adjustPowerState //********************************************************************************* -void IOService::adjustPowerState( uint32_t clamp ) +void +IOService::adjustPowerState( uint32_t clamp ) { - PM_ASSERT_IN_GATE(); - computeDesiredState(clamp, false); - if (fControllingDriver && fParentsKnowState && inPlane(gIOPowerPlane)) - { - IOPMPowerChangeFlags changeFlags = kIOPMSelfInitiated; + PM_ASSERT_IN_GATE(); + computeDesiredState(clamp, false); + if (fControllingDriver && fParentsKnowState && inPlane(gIOPowerPlane)) { + IOPMPowerChangeFlags changeFlags = kIOPMSelfInitiated; - // Indicate that children desires must be ignored, and do not ask - // apps for permission to drop power. This is used by root domain - // for demand sleep. + // Indicate that children desires must be ignored, and do not ask + // apps for permission to drop power. This is used by root domain + // for demand sleep. - if (getPMRequestType() == kIOPMRequestTypeRequestPowerStateOverride) - changeFlags |= (kIOPMIgnoreChildren | kIOPMSkipAskPowerDown); + if (getPMRequestType() == kIOPMRequestTypeRequestPowerStateOverride) { + changeFlags |= (kIOPMIgnoreChildren | kIOPMSkipAskPowerDown); + } - startPowerChange( - /* flags */ changeFlags, - /* power state */ fDesiredPowerState, - /* domain flags */ 0, - /* connection */ 0, - /* parent flags */ 0); - } + startPowerChange( + /* flags */ changeFlags, + /* power state */ fDesiredPowerState, + /* domain flags */ 0, + /* connection */ 0, + /* parent flags */ 0); + } } //********************************************************************************* // [public] synchronizePowerTree //********************************************************************************* -IOReturn IOService::synchronizePowerTree( - IOOptionBits options, - IOService * notifyRoot ) +IOReturn +IOService::synchronizePowerTree( + IOOptionBits options, + IOService * notifyRoot ) { - IOPMRequest * request_c = 0; - IOPMRequest * request_s; + IOPMRequest * request_c = 0; + IOPMRequest * request_s; - if (this != getPMRootDomain()) - return kIOReturnBadArgument; - if (!initialized) - return kIOPMNotYetInitialized; + if (this != getPMRootDomain()) { + return kIOReturnBadArgument; + } + if (!initialized) { + return kIOPMNotYetInitialized; + } - OUR_PMLog(kPMLogCSynchronizePowerTree, options, (notifyRoot != 0)); + OUR_PMLog(kPMLogCSynchronizePowerTree, options, (notifyRoot != 0)); - if (notifyRoot) - { - IOPMRequest * nr; + if (notifyRoot) { + IOPMRequest * nr; - // Cancels don't need to be synchronized. - nr = acquirePMRequest(notifyRoot, kIOPMRequestTypeChildNotifyDelayCancel); - if (nr) submitPMRequest(nr); - nr = acquirePMRequest(getPMRootDomain(), kIOPMRequestTypeChildNotifyDelayCancel); - if (nr) submitPMRequest(nr); - } + // Cancels don't need to be synchronized. + nr = acquirePMRequest(notifyRoot, kIOPMRequestTypeChildNotifyDelayCancel); + if (nr) { + submitPMRequest(nr); + } + nr = acquirePMRequest(getPMRootDomain(), kIOPMRequestTypeChildNotifyDelayCancel); + if (nr) { + submitPMRequest(nr); + } + } - request_s = acquirePMRequest( this, kIOPMRequestTypeSynchronizePowerTree ); - if (!request_s) - goto error_no_memory; + request_s = acquirePMRequest( this, kIOPMRequestTypeSynchronizePowerTree ); + if (!request_s) { + goto error_no_memory; + } - if (options & kIOPMSyncCancelPowerDown) - request_c = acquirePMRequest( this, kIOPMRequestTypeIdleCancel ); - if (request_c) - { - request_c->attachNextRequest( request_s ); - submitPMRequest(request_c); - } + if (options & kIOPMSyncCancelPowerDown) { + request_c = acquirePMRequest( this, kIOPMRequestTypeIdleCancel ); + } + if (request_c) { + request_c->attachNextRequest( request_s ); + submitPMRequest(request_c); + } - request_s->fArg0 = (void *)(uintptr_t) options; - submitPMRequest(request_s); + request_s->fArg0 = (void *)(uintptr_t) options; + submitPMRequest(request_s); - return kIOReturnSuccess; + return kIOReturnSuccess; error_no_memory: - if (request_c) releasePMRequest(request_c); - if (request_s) releasePMRequest(request_s); - return kIOReturnNoMemory; + if (request_c) { + releasePMRequest(request_c); + } + if (request_s) { + releasePMRequest(request_s); + } + return kIOReturnNoMemory; } //********************************************************************************* // [private] handleSynchronizePowerTree //********************************************************************************* -void IOService::handleSynchronizePowerTree( IOPMRequest * request ) +void +IOService::handleSynchronizePowerTree( IOPMRequest * request ) { - PM_ASSERT_IN_GATE(); - if (fControllingDriver && fParentsKnowState && inPlane(gIOPowerPlane) && - (fCurrentPowerState == fHighestPowerState)) - { - IOOptionBits options = (uintptr_t) request->fArg0; + PM_ASSERT_IN_GATE(); + if (fControllingDriver && fParentsKnowState && inPlane(gIOPowerPlane) && + (fCurrentPowerState == fHighestPowerState)) { + IOOptionBits options = (uintptr_t) request->fArg0; - startPowerChange( - /* flags */ kIOPMSelfInitiated | kIOPMSynchronize | - (options & kIOPMSyncNoChildNotify), - /* power state */ fCurrentPowerState, - /* domain flags */ 0, - /* connection */ 0, - /* parent flags */ 0); - } + startPowerChange( + /* flags */ kIOPMSelfInitiated | kIOPMSynchronize | + (options & kIOPMSyncNoChildNotify), + /* power state */ fCurrentPowerState, + /* domain flags */ 0, + /* connection */ 0, + /* parent flags */ 0); + } } #ifndef __LP64__ @@ -1763,12 +1743,13 @@ void IOService::handleSynchronizePowerTree( IOPMRequest * request ) // kind of change is occuring in the domain. //********************************************************************************* -IOReturn IOService::powerDomainWillChangeTo( - IOPMPowerFlags newPowerFlags, - IOPowerConnection * whichParent ) +IOReturn +IOService::powerDomainWillChangeTo( + IOPMPowerFlags newPowerFlags, + IOPowerConnection * whichParent ) { - assert(false); - return kIOReturnUnsupported; + assert(false); + return kIOReturnUnsupported; } #endif /* !__LP64__ */ @@ -1776,110 +1757,105 @@ IOReturn IOService::powerDomainWillChangeTo( // [private] handlePowerDomainWillChangeTo //********************************************************************************* -void IOService::handlePowerDomainWillChangeTo( IOPMRequest * request ) -{ - IOPMPowerFlags parentPowerFlags = (IOPMPowerFlags) request->fArg0; - IOPowerConnection * whichParent = (IOPowerConnection *) request->fArg1; - IOPMPowerChangeFlags parentChangeFlags = (IOPMPowerChangeFlags)(uintptr_t) request->fArg2; - IOPMPowerChangeFlags myChangeFlags; - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - IOPMPowerStateIndex maxPowerState; - IOPMPowerFlags combinedPowerFlags; - bool savedParentsKnowState; - IOReturn result = IOPMAckImplied; - - PM_ASSERT_IN_GATE(); - OUR_PMLog(kPMLogWillChange, parentPowerFlags, 0); - - if (!inPlane(gIOPowerPlane) || !whichParent || !whichParent->getAwaitingAck()) - { - PM_LOG("%s::%s not in power tree\n", getName(), __FUNCTION__); - goto exit_no_ack; - } - - savedParentsKnowState = fParentsKnowState; - - // Combine parents' output power flags. - - combinedPowerFlags = 0; - - iter = getParentIterator(gIOPowerPlane); - if ( iter ) - { - while ( (next = iter->getNextObject()) ) - { - if ( (connection = OSDynamicCast(IOPowerConnection, next)) ) - { - if ( connection == whichParent ) - combinedPowerFlags |= parentPowerFlags; - else - combinedPowerFlags |= connection->parentCurrentPowerFlags(); - } - } - iter->release(); - } - - // If our initial change has yet to occur, then defer the power change - // until after the power domain has completed its power transition. - - if ( fControllingDriver && !fInitialPowerChange ) - { - maxPowerState = fControllingDriver->maxCapabilityForDomainState( - combinedPowerFlags); - - if (parentChangeFlags & kIOPMDomainPowerDrop) - { - // fMaxPowerState set a limit on self-initiated power changes. - // Update it before a parent power drop. - fMaxPowerState = maxPowerState; - } - - // Use kIOPMSynchronize below instead of kIOPMRootBroadcastFlags - // to avoid propagating the root change flags if any service must - // change power state due to root's will-change notification. - // Root does not change power state for kIOPMSynchronize. - - myChangeFlags = kIOPMParentInitiated | kIOPMDomainWillChange | - (parentChangeFlags & kIOPMSynchronize); - - result = startPowerChange( - /* flags */ myChangeFlags, - /* power state */ maxPowerState, - /* domain flags */ combinedPowerFlags, - /* connection */ whichParent, - /* parent flags */ parentPowerFlags); - } - - // If parent is dropping power, immediately update the parent's - // capability flags. Any future merging of parent(s) combined - // power flags should account for this power drop. - - if (parentChangeFlags & kIOPMDomainPowerDrop) - { - setParentInfo(parentPowerFlags, whichParent, true); - } - - // Parent is expecting an ACK from us. If we did not embark on a state - // transition, i.e. startPowerChange() returned IOPMAckImplied. We are - // still required to issue an ACK to our parent. - - if (IOPMAckImplied == result) - { - IOService * parent; - parent = (IOService *) whichParent->copyParentEntry(gIOPowerPlane); - assert(parent); - if ( parent ) - { - parent->acknowledgePowerChange( whichParent ); - parent->release(); - } - } +void +IOService::handlePowerDomainWillChangeTo( IOPMRequest * request ) +{ + IOPMPowerFlags parentPowerFlags = (IOPMPowerFlags) request->fArg0; + IOPowerConnection * whichParent = (IOPowerConnection *) request->fArg1; + IOPMPowerChangeFlags parentChangeFlags = (IOPMPowerChangeFlags)(uintptr_t) request->fArg2; + IOPMPowerChangeFlags myChangeFlags; + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + IOPMPowerStateIndex maxPowerState; + IOPMPowerFlags combinedPowerFlags; + bool savedParentsKnowState; + IOReturn result = IOPMAckImplied; + + PM_ASSERT_IN_GATE(); + OUR_PMLog(kPMLogWillChange, parentPowerFlags, 0); + + if (!inPlane(gIOPowerPlane) || !whichParent || !whichParent->getAwaitingAck()) { + PM_LOG("%s::%s not in power tree\n", getName(), __FUNCTION__); + goto exit_no_ack; + } + + savedParentsKnowState = fParentsKnowState; + + // Combine parents' output power flags. + + combinedPowerFlags = 0; + + iter = getParentIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((connection = OSDynamicCast(IOPowerConnection, next))) { + if (connection == whichParent) { + combinedPowerFlags |= parentPowerFlags; + } else { + combinedPowerFlags |= connection->parentCurrentPowerFlags(); + } + } + } + iter->release(); + } + + // If our initial change has yet to occur, then defer the power change + // until after the power domain has completed its power transition. + + if (fControllingDriver && !fInitialPowerChange) { + maxPowerState = fControllingDriver->maxCapabilityForDomainState( + combinedPowerFlags); + + if (parentChangeFlags & kIOPMDomainPowerDrop) { + // fMaxPowerState set a limit on self-initiated power changes. + // Update it before a parent power drop. + fMaxPowerState = maxPowerState; + } + + // Use kIOPMSynchronize below instead of kIOPMRootBroadcastFlags + // to avoid propagating the root change flags if any service must + // change power state due to root's will-change notification. + // Root does not change power state for kIOPMSynchronize. + + myChangeFlags = kIOPMParentInitiated | kIOPMDomainWillChange | + (parentChangeFlags & kIOPMSynchronize); + + result = startPowerChange( + /* flags */ myChangeFlags, + /* power state */ maxPowerState, + /* domain flags */ combinedPowerFlags, + /* connection */ whichParent, + /* parent flags */ parentPowerFlags); + } + + // If parent is dropping power, immediately update the parent's + // capability flags. Any future merging of parent(s) combined + // power flags should account for this power drop. + + if (parentChangeFlags & kIOPMDomainPowerDrop) { + setParentInfo(parentPowerFlags, whichParent, true); + } + + // Parent is expecting an ACK from us. If we did not embark on a state + // transition, i.e. startPowerChange() returned IOPMAckImplied. We are + // still required to issue an ACK to our parent. + + if (IOPMAckImplied == result) { + IOService * parent; + parent = (IOService *) whichParent->copyParentEntry(gIOPowerPlane); + assert(parent); + if (parent) { + parent->acknowledgePowerChange( whichParent ); + parent->release(); + } + } exit_no_ack: - // Drop the retain from notifyChild(). - if (whichParent) whichParent->release(); + // Drop the retain from notifyChild(). + if (whichParent) { + whichParent->release(); + } } #ifndef __LP64__ @@ -1893,12 +1869,13 @@ exit_no_ack: // kind of change is occuring in the domain. //********************************************************************************* -IOReturn IOService::powerDomainDidChangeTo( - IOPMPowerFlags newPowerFlags, - IOPowerConnection * whichParent ) +IOReturn +IOService::powerDomainDidChangeTo( + IOPMPowerFlags newPowerFlags, + IOPowerConnection * whichParent ) { - assert(false); - return kIOReturnUnsupported; + assert(false); + return kIOReturnUnsupported; } #endif /* !__LP64__ */ @@ -1906,134 +1883,126 @@ IOReturn IOService::powerDomainDidChangeTo( // [private] handlePowerDomainDidChangeTo //********************************************************************************* -void IOService::handlePowerDomainDidChangeTo( IOPMRequest * request ) -{ - IOPMPowerFlags parentPowerFlags = (IOPMPowerFlags) request->fArg0; - IOPowerConnection * whichParent = (IOPowerConnection *) request->fArg1; - IOPMPowerChangeFlags parentChangeFlags = (IOPMPowerChangeFlags)(uintptr_t) request->fArg2; - IOPMPowerChangeFlags myChangeFlags; - IOPMPowerStateIndex maxPowerState; - IOPMPowerStateIndex initialDesire = kPowerStateZero; - bool computeDesire = false; - bool desireChanged = false; - bool savedParentsKnowState; - IOReturn result = IOPMAckImplied; - - PM_ASSERT_IN_GATE(); - OUR_PMLog(kPMLogDidChange, parentPowerFlags, 0); - - if (!inPlane(gIOPowerPlane) || !whichParent || !whichParent->getAwaitingAck()) - { - PM_LOG("%s::%s not in power tree\n", getName(), __FUNCTION__); - goto exit_no_ack; - } - - savedParentsKnowState = fParentsKnowState; - - setParentInfo(parentPowerFlags, whichParent, true); - - if ( fControllingDriver ) - { - maxPowerState = fControllingDriver->maxCapabilityForDomainState( - fParentsCurrentPowerFlags); - - if ((parentChangeFlags & kIOPMDomainPowerDrop) == 0) - { - // fMaxPowerState set a limit on self-initiated power changes. - // Update it after a parent power rise. - fMaxPowerState = maxPowerState; - } - - if (fInitialPowerChange) - { - computeDesire = true; - initialDesire = fControllingDriver->initialPowerStateForDomainState( - fParentsCurrentPowerFlags); - } - else if (parentChangeFlags & kIOPMRootChangeUp) - { - if (fAdvisoryTickleUsed) - { - // On system wake, re-compute the desired power state since - // gIOPMAdvisoryTickleEnabled will change for a full wake, - // which is an input to computeDesiredState(). This is not - // necessary for a dark wake because powerChangeDone() will - // handle the dark to full wake case, but it does no harm. - - desireChanged = true; - } - - if (fResetPowerStateOnWake) - { - // Query the driver for the desired power state on system wake. - // Default implementation returns the lowest power state. - - IOPMPowerStateIndex wakePowerState = - fControllingDriver->initialPowerStateForDomainState( - kIOPMRootDomainState | kIOPMPowerOn ); - - // fDesiredPowerState was adjusted before going to sleep - // with fDeviceDesire at min. - - if (StateOrder(wakePowerState) > StateOrder(fDesiredPowerState)) - { - // Must schedule a power adjustment if we changed the - // device desire. That will update the desired domain - // power on the parent power connection and ping the - // power parent if necessary. - - updatePowerClient(gIOPMPowerClientDevice, wakePowerState); - desireChanged = true; - } - } - } - - if (computeDesire || desireChanged) - computeDesiredState(initialDesire, false); - - // Absorb and propagate parent's broadcast flags - myChangeFlags = kIOPMParentInitiated | kIOPMDomainDidChange | - (parentChangeFlags & kIOPMRootBroadcastFlags); - - result = startPowerChange( - /* flags */ myChangeFlags, - /* power state */ maxPowerState, - /* domain flags */ fParentsCurrentPowerFlags, - /* connection */ whichParent, - /* parent flags */ 0); - } - - // Parent is expecting an ACK from us. If we did not embark on a state - // transition, i.e. startPowerChange() returned IOPMAckImplied. We are - // still required to issue an ACK to our parent. - - if (IOPMAckImplied == result) - { - IOService * parent; - parent = (IOService *) whichParent->copyParentEntry(gIOPowerPlane); - assert(parent); - if ( parent ) - { - parent->acknowledgePowerChange( whichParent ); - parent->release(); - } - } - - // If the parent registers its power driver late, then this is the - // first opportunity to tell our parent about our desire. Or if the - // child's desire changed during a parent change notify. - - if (fControllingDriver && - ((!savedParentsKnowState && fParentsKnowState) || desireChanged)) - { - PM_LOG1("%s::powerDomainDidChangeTo parentsKnowState %d\n", - getName(), fParentsKnowState); - requestDomainPower( fDesiredPowerState ); - } +void +IOService::handlePowerDomainDidChangeTo( IOPMRequest * request ) +{ + IOPMPowerFlags parentPowerFlags = (IOPMPowerFlags) request->fArg0; + IOPowerConnection * whichParent = (IOPowerConnection *) request->fArg1; + IOPMPowerChangeFlags parentChangeFlags = (IOPMPowerChangeFlags)(uintptr_t) request->fArg2; + IOPMPowerChangeFlags myChangeFlags; + IOPMPowerStateIndex maxPowerState; + IOPMPowerStateIndex initialDesire = kPowerStateZero; + bool computeDesire = false; + bool desireChanged = false; + bool savedParentsKnowState; + IOReturn result = IOPMAckImplied; + + PM_ASSERT_IN_GATE(); + OUR_PMLog(kPMLogDidChange, parentPowerFlags, 0); + + if (!inPlane(gIOPowerPlane) || !whichParent || !whichParent->getAwaitingAck()) { + PM_LOG("%s::%s not in power tree\n", getName(), __FUNCTION__); + goto exit_no_ack; + } + + savedParentsKnowState = fParentsKnowState; + + setParentInfo(parentPowerFlags, whichParent, true); + + if (fControllingDriver) { + maxPowerState = fControllingDriver->maxCapabilityForDomainState( + fParentsCurrentPowerFlags); + + if ((parentChangeFlags & kIOPMDomainPowerDrop) == 0) { + // fMaxPowerState set a limit on self-initiated power changes. + // Update it after a parent power rise. + fMaxPowerState = maxPowerState; + } + + if (fInitialPowerChange) { + computeDesire = true; + initialDesire = fControllingDriver->initialPowerStateForDomainState( + fParentsCurrentPowerFlags); + } else if (parentChangeFlags & kIOPMRootChangeUp) { + if (fAdvisoryTickleUsed) { + // On system wake, re-compute the desired power state since + // gIOPMAdvisoryTickleEnabled will change for a full wake, + // which is an input to computeDesiredState(). This is not + // necessary for a dark wake because powerChangeDone() will + // handle the dark to full wake case, but it does no harm. + + desireChanged = true; + } + + if (fResetPowerStateOnWake) { + // Query the driver for the desired power state on system wake. + // Default implementation returns the lowest power state. + + IOPMPowerStateIndex wakePowerState = + fControllingDriver->initialPowerStateForDomainState( + kIOPMRootDomainState | kIOPMPowerOn ); + + // fDesiredPowerState was adjusted before going to sleep + // with fDeviceDesire at min. + + if (StateOrder(wakePowerState) > StateOrder(fDesiredPowerState)) { + // Must schedule a power adjustment if we changed the + // device desire. That will update the desired domain + // power on the parent power connection and ping the + // power parent if necessary. + + updatePowerClient(gIOPMPowerClientDevice, wakePowerState); + desireChanged = true; + } + } + } + + if (computeDesire || desireChanged) { + computeDesiredState(initialDesire, false); + } + + // Absorb and propagate parent's broadcast flags + myChangeFlags = kIOPMParentInitiated | kIOPMDomainDidChange | + (parentChangeFlags & kIOPMRootBroadcastFlags); + + result = startPowerChange( + /* flags */ myChangeFlags, + /* power state */ maxPowerState, + /* domain flags */ fParentsCurrentPowerFlags, + /* connection */ whichParent, + /* parent flags */ 0); + } + + // Parent is expecting an ACK from us. If we did not embark on a state + // transition, i.e. startPowerChange() returned IOPMAckImplied. We are + // still required to issue an ACK to our parent. + + if (IOPMAckImplied == result) { + IOService * parent; + parent = (IOService *) whichParent->copyParentEntry(gIOPowerPlane); + assert(parent); + if (parent) { + parent->acknowledgePowerChange( whichParent ); + parent->release(); + } + } + + // If the parent registers its power driver late, then this is the + // first opportunity to tell our parent about our desire. Or if the + // child's desire changed during a parent change notify. + + if (fControllingDriver && + ((!savedParentsKnowState && fParentsKnowState) || desireChanged)) { + PM_LOG1("%s::powerDomainDidChangeTo parentsKnowState %d\n", + getName(), fParentsKnowState); + requestDomainPower( fDesiredPowerState ); + } exit_no_ack: - // Drop the retain from notifyChild(). - if (whichParent) whichParent->release(); + // Drop the retain from notifyChild(). + if (whichParent) { + whichParent->release(); + } } //********************************************************************************* @@ -2043,85 +2012,82 @@ exit_no_ack: // data together. //********************************************************************************* -void IOService::setParentInfo( - IOPMPowerFlags newPowerFlags, - IOPowerConnection * whichParent, - bool knowsState ) -{ - OSIterator * iter; - OSObject * next; - IOPowerConnection * conn; - - PM_ASSERT_IN_GATE(); - - // set our connection data - whichParent->setParentCurrentPowerFlags(newPowerFlags); - whichParent->setParentKnowsState(knowsState); - - // recompute our parent info - fParentsCurrentPowerFlags = 0; - fParentsKnowState = true; - - iter = getParentIterator(gIOPowerPlane); - if ( iter ) - { - while ( (next = iter->getNextObject()) ) - { - if ( (conn = OSDynamicCast(IOPowerConnection, next)) ) - { - fParentsKnowState &= conn->parentKnowsState(); - fParentsCurrentPowerFlags |= conn->parentCurrentPowerFlags(); - } - } - iter->release(); - } +void +IOService::setParentInfo( + IOPMPowerFlags newPowerFlags, + IOPowerConnection * whichParent, + bool knowsState ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * conn; + + PM_ASSERT_IN_GATE(); + + // set our connection data + whichParent->setParentCurrentPowerFlags(newPowerFlags); + whichParent->setParentKnowsState(knowsState); + + // recompute our parent info + fParentsCurrentPowerFlags = 0; + fParentsKnowState = true; + + iter = getParentIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((conn = OSDynamicCast(IOPowerConnection, next))) { + fParentsKnowState &= conn->parentKnowsState(); + fParentsCurrentPowerFlags |= conn->parentCurrentPowerFlags(); + } + } + iter->release(); + } } //****************************************************************************** // [private] trackSystemSleepPreventers //****************************************************************************** -void IOService::trackSystemSleepPreventers( - IOPMPowerStateIndex oldPowerState, - IOPMPowerStateIndex newPowerState, - IOPMPowerChangeFlags changeFlags __unused ) -{ - IOPMPowerFlags oldCapability, newCapability; - - oldCapability = fPowerStates[oldPowerState].capabilityFlags & - (kIOPMPreventIdleSleep | kIOPMPreventSystemSleep); - newCapability = fPowerStates[newPowerState].capabilityFlags & - (kIOPMPreventIdleSleep | kIOPMPreventSystemSleep); - - if (fHeadNoteChangeFlags & kIOPMInitialPowerChange) - oldCapability = 0; - if (oldCapability == newCapability) - return; - - if ((oldCapability ^ newCapability) & kIOPMPreventIdleSleep) - { - bool enablePrevention = ((oldCapability & kIOPMPreventIdleSleep) == 0); - bool idleCancelAllowed = getPMRootDomain()->updatePreventIdleSleepList( - this, enablePrevention); +void +IOService::trackSystemSleepPreventers( + IOPMPowerStateIndex oldPowerState, + IOPMPowerStateIndex newPowerState, + IOPMPowerChangeFlags changeFlags __unused ) +{ + IOPMPowerFlags oldCapability, newCapability; + + oldCapability = fPowerStates[oldPowerState].capabilityFlags & + (kIOPMPreventIdleSleep | kIOPMPreventSystemSleep); + newCapability = fPowerStates[newPowerState].capabilityFlags & + (kIOPMPreventIdleSleep | kIOPMPreventSystemSleep); + + if (fHeadNoteChangeFlags & kIOPMInitialPowerChange) { + oldCapability = 0; + } + if (oldCapability == newCapability) { + return; + } + + if ((oldCapability ^ newCapability) & kIOPMPreventIdleSleep) { + bool enablePrevention = ((oldCapability & kIOPMPreventIdleSleep) == 0); + bool idleCancelAllowed = getPMRootDomain()->updatePreventIdleSleepList( + this, enablePrevention); #if SUPPORT_IDLE_CANCEL - if (idleCancelAllowed && enablePrevention) - { - IOPMRequest * cancelRequest; - - cancelRequest = acquirePMRequest( getPMRootDomain(), kIOPMRequestTypeIdleCancel ); - if (cancelRequest) - { - submitPMRequest( cancelRequest ); - } - } + if (idleCancelAllowed && enablePrevention) { + IOPMRequest * cancelRequest; + + cancelRequest = acquirePMRequest( getPMRootDomain(), kIOPMRequestTypeIdleCancel ); + if (cancelRequest) { + submitPMRequest( cancelRequest ); + } + } #endif - } + } - if ((oldCapability ^ newCapability) & kIOPMPreventSystemSleep) - { - getPMRootDomain()->updatePreventSystemSleepList(this, - ((oldCapability & kIOPMPreventSystemSleep) == 0)); - } + if ((oldCapability ^ newCapability) & kIOPMPreventSystemSleep) { + getPMRootDomain()->updatePreventSystemSleepList(this, + ((oldCapability & kIOPMPreventSystemSleep) == 0)); + } } //********************************************************************************* @@ -2130,146 +2096,139 @@ void IOService::trackSystemSleepPreventers( // Called on a power parent when a child's power requirement changes. //********************************************************************************* -IOReturn IOService::requestPowerDomainState( - IOPMPowerFlags childRequestPowerFlags, - IOPowerConnection * childConnection, - unsigned long specification ) -{ - IOPMPowerStateIndex order, powerState; - IOPMPowerFlags outputPowerFlags; - IOService * child; - IOPMRequest * subRequest; - bool adjustPower = false; - - if (!initialized) - return IOPMNotYetInitialized; - - if (gIOPMWorkLoop->onThread() == false) - { - PM_LOG("%s::requestPowerDomainState\n", getName()); - return kIOReturnSuccess; - } - - OUR_PMLog(kPMLogRequestDomain, childRequestPowerFlags, specification); - - if (!isChild(childConnection, gIOPowerPlane)) - return kIOReturnNotAttached; - - if (!fControllingDriver || !fNumberOfPowerStates) - return kIOReturnNotReady; - - child = (IOService *) childConnection->getChildEntry(gIOPowerPlane); - assert(child); - - // Remove flags from child request which we can't possibly supply - childRequestPowerFlags &= fMergedOutputPowerFlags; - - // Merge in the power flags contributed by this power parent - // at its current or impending power state. - - outputPowerFlags = fPowerStates[fCurrentPowerState].outputPowerFlags; - if (fMachineState != kIOPM_Finished) - { - if (IS_POWER_DROP && !IS_ROOT_DOMAIN) - { - // Use the lower power state when dropping power. - // Must be careful since a power drop can be cancelled - // from the following states: - // - kIOPM_OurChangeTellClientsPowerDown - // - kIOPM_OurChangeTellPriorityClientsPowerDown - // - // The child must not wait for this parent to raise power - // if the power drop was cancelled. The solution is to cancel - // the power drop if possible, then schedule an adjustment to - // re-evaluate the parent's power state. - // - // Root domain is excluded to avoid idle sleep issues. And allow - // root domain children to pop up when system is going to sleep. - - if ((fMachineState == kIOPM_OurChangeTellClientsPowerDown) || - (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown)) - { - fDoNotPowerDown = true; // cancel power drop - adjustPower = true; // schedule an adjustment - PM_LOG1("%s: power drop cancelled in state %u by %s\n", - getName(), fMachineState, child->getName()); - } - else - { - // Beyond cancellation point, report the impending state. - outputPowerFlags = - fPowerStates[fHeadNotePowerState].outputPowerFlags; - } - } - else if (IS_POWER_RISE) - { - // When raising power, must report the output power flags from - // child's perspective. A child power request may arrive while - // parent is transitioning upwards. If a request arrives after - // setParentInfo() has already recorded the output power flags - // for the next power state, then using the power supplied by - // fCurrentPowerState is incorrect, and might cause the child - // to wait when it should not. - - outputPowerFlags = childConnection->parentCurrentPowerFlags(); - } - } - child->fHeadNoteDomainTargetFlags |= outputPowerFlags; - - // Map child's requested power flags to one of our power state. - - for (order = 0; order < fNumberOfPowerStates; order++) - { - powerState = fPowerStates[order].stateOrderToIndex; - if ((fPowerStates[powerState].outputPowerFlags & childRequestPowerFlags) - == childRequestPowerFlags) - break; - } - if (order >= fNumberOfPowerStates) - { - powerState = kPowerStateZero; - } - - // Conditions that warrants a power adjustment on this parent. - // Adjust power will also propagate any changes to the child's - // prevent idle/sleep flags towards the root domain. - - if (!childConnection->childHasRequestedPower() || - (powerState != childConnection->getDesiredDomainState())) - adjustPower = true; +IOReturn +IOService::requestPowerDomainState( + IOPMPowerFlags childRequestPowerFlags, + IOPowerConnection * childConnection, + unsigned long specification ) +{ + IOPMPowerStateIndex order, powerState; + IOPMPowerFlags outputPowerFlags; + IOService * child; + IOPMRequest * subRequest; + bool adjustPower = false; + + if (!initialized) { + return IOPMNotYetInitialized; + } + + if (gIOPMWorkLoop->onThread() == false) { + PM_LOG("%s::requestPowerDomainState\n", getName()); + return kIOReturnSuccess; + } + + OUR_PMLog(kPMLogRequestDomain, childRequestPowerFlags, specification); + + if (!isChild(childConnection, gIOPowerPlane)) { + return kIOReturnNotAttached; + } + + if (!fControllingDriver || !fNumberOfPowerStates) { + return kIOReturnNotReady; + } + + child = (IOService *) childConnection->getChildEntry(gIOPowerPlane); + assert(child); + + // Remove flags from child request which we can't possibly supply + childRequestPowerFlags &= fMergedOutputPowerFlags; + + // Merge in the power flags contributed by this power parent + // at its current or impending power state. + + outputPowerFlags = fPowerStates[fCurrentPowerState].outputPowerFlags; + if (fMachineState != kIOPM_Finished) { + if (IS_POWER_DROP && !IS_ROOT_DOMAIN) { + // Use the lower power state when dropping power. + // Must be careful since a power drop can be cancelled + // from the following states: + // - kIOPM_OurChangeTellClientsPowerDown + // - kIOPM_OurChangeTellPriorityClientsPowerDown + // + // The child must not wait for this parent to raise power + // if the power drop was cancelled. The solution is to cancel + // the power drop if possible, then schedule an adjustment to + // re-evaluate the parent's power state. + // + // Root domain is excluded to avoid idle sleep issues. And allow + // root domain children to pop up when system is going to sleep. + + if ((fMachineState == kIOPM_OurChangeTellClientsPowerDown) || + (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown)) { + fDoNotPowerDown = true; // cancel power drop + adjustPower = true;// schedule an adjustment + PM_LOG1("%s: power drop cancelled in state %u by %s\n", + getName(), fMachineState, child->getName()); + } else { + // Beyond cancellation point, report the impending state. + outputPowerFlags = + fPowerStates[fHeadNotePowerState].outputPowerFlags; + } + } else if (IS_POWER_RISE) { + // When raising power, must report the output power flags from + // child's perspective. A child power request may arrive while + // parent is transitioning upwards. If a request arrives after + // setParentInfo() has already recorded the output power flags + // for the next power state, then using the power supplied by + // fCurrentPowerState is incorrect, and might cause the child + // to wait when it should not. + + outputPowerFlags = childConnection->parentCurrentPowerFlags(); + } + } + child->fHeadNoteDomainTargetFlags |= outputPowerFlags; + + // Map child's requested power flags to one of our power state. -#if ENABLE_DEBUG_LOGS - if (adjustPower) - { - PM_LOG("requestPowerDomainState[%s]: %s, init %d, %u->%u\n", - getName(), child->getName(), - !childConnection->childHasRequestedPower(), - (uint32_t) childConnection->getDesiredDomainState(), - (uint32_t) powerState); - } -#endif + for (order = 0; order < fNumberOfPowerStates; order++) { + powerState = fPowerStates[order].stateOrderToIndex; + if ((fPowerStates[powerState].outputPowerFlags & childRequestPowerFlags) + == childRequestPowerFlags) { + break; + } + } + if (order >= fNumberOfPowerStates) { + powerState = kPowerStateZero; + } - // Record the child's desires on the connection. - childConnection->setChildHasRequestedPower(); - childConnection->setDesiredDomainState( powerState ); + // Conditions that warrants a power adjustment on this parent. + // Adjust power will also propagate any changes to the child's + // prevent idle/sleep flags towards the root domain. - // Schedule a request to re-evaluate all children desires and - // adjust power state. Submit a request if one wasn't pending, - // or if the current request is part of a call tree. + if (!childConnection->childHasRequestedPower() || + (powerState != childConnection->getDesiredDomainState())) { + adjustPower = true; + } - if (adjustPower && !fDeviceOverrideEnabled && - (!fAdjustPowerScheduled || gIOPMRequest->getRootRequest())) - { - subRequest = acquirePMRequest( - this, kIOPMRequestTypeAdjustPowerState, gIOPMRequest ); - if (subRequest) - { - submitPMRequest( subRequest ); - fAdjustPowerScheduled = true; - } - } +#if ENABLE_DEBUG_LOGS + if (adjustPower) { + PM_LOG("requestPowerDomainState[%s]: %s, init %d, %u->%u\n", + getName(), child->getName(), + !childConnection->childHasRequestedPower(), + (uint32_t) childConnection->getDesiredDomainState(), + (uint32_t) powerState); + } +#endif + + // Record the child's desires on the connection. + childConnection->setChildHasRequestedPower(); + childConnection->setDesiredDomainState( powerState ); + + // Schedule a request to re-evaluate all children desires and + // adjust power state. Submit a request if one wasn't pending, + // or if the current request is part of a call tree. + + if (adjustPower && !fDeviceOverrideEnabled && + (!fAdjustPowerScheduled || gIOPMRequest->getRootRequest())) { + subRequest = acquirePMRequest( + this, kIOPMRequestTypeAdjustPowerState, gIOPMRequest ); + if (subRequest) { + submitPMRequest( subRequest ); + fAdjustPowerScheduled = true; + } + } - return kIOReturnSuccess; + return kIOReturnSuccess; } //********************************************************************************* @@ -2281,9 +2240,10 @@ IOReturn IOService::requestPowerDomainState( // We enter the highest state until addPowerChild is called. //********************************************************************************* -IOReturn IOService::temporaryPowerClampOn( void ) +IOReturn +IOService::temporaryPowerClampOn( void ) { - return requestPowerState( gIOPMPowerClientChildProxy, kIOPMPowerStateMax ); + return requestPowerState( gIOPMPowerClientChildProxy, kIOPMPowerStateMax ); } //********************************************************************************* @@ -2298,22 +2258,25 @@ IOReturn IOService::temporaryPowerClampOn( void ) // highest power state. //********************************************************************************* -IOReturn IOService::makeUsable( void ) +IOReturn +IOService::makeUsable( void ) { - OUR_PMLog(kPMLogMakeUsable, 0, 0); - return requestPowerState( gIOPMPowerClientDevice, kIOPMPowerStateMax ); + OUR_PMLog(kPMLogMakeUsable, 0, 0); + return requestPowerState( gIOPMPowerClientDevice, kIOPMPowerStateMax ); } //********************************************************************************* // [public] currentCapability //********************************************************************************* -IOPMPowerFlags IOService::currentCapability( void ) +IOPMPowerFlags +IOService::currentCapability( void ) { - if (!initialized) - return IOPMNotPowerManaged; + if (!initialized) { + return IOPMNotPowerManaged; + } - return fCurrentCapabilityFlags; + return fCurrentCapabilityFlags; } //********************************************************************************* @@ -2324,10 +2287,11 @@ IOPMPowerFlags IOService::currentCapability( void ) // power states differ, then a power state change is initiated. //********************************************************************************* -IOReturn IOService::changePowerStateTo( unsigned long ordinal ) +IOReturn +IOService::changePowerStateTo( unsigned long ordinal ) { - OUR_PMLog(kPMLogChangeStateTo, ordinal, 0); - return requestPowerState( gIOPMPowerClientDriver, ordinal ); + OUR_PMLog(kPMLogChangeStateTo, ordinal, 0); + return requestPowerState( gIOPMPowerClientDriver, ordinal ); } //********************************************************************************* @@ -2338,10 +2302,11 @@ IOReturn IOService::changePowerStateTo( unsigned long ordinal ) // power states differ, then a power state change is initiated. //********************************************************************************* -IOReturn IOService::changePowerStateToPriv( unsigned long ordinal ) +IOReturn +IOService::changePowerStateToPriv( unsigned long ordinal ) { - OUR_PMLog(kPMLogChangeStateToPriv, ordinal, 0); - return requestPowerState( gIOPMPowerClientDevice, ordinal ); + OUR_PMLog(kPMLogChangeStateToPriv, ordinal, 0); + return requestPowerState( gIOPMPowerClientDevice, ordinal ); } //********************************************************************************* @@ -2353,43 +2318,46 @@ IOReturn IOService::changePowerStateToPriv( unsigned long ordinal ) // Override enforced - Children and Driver desires are ignored. //********************************************************************************* -IOReturn IOService::changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, - IOPMRequestTag tag ) +IOReturn +IOService::changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, + IOPMRequestTag tag ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return kIOPMNotYetInitialized; + if (!initialized) { + return kIOPMNotYetInitialized; + } - OUR_PMLog(kPMLogChangeStateToPriv, ordinal, 0); + OUR_PMLog(kPMLogChangeStateToPriv, ordinal, 0); - request = acquirePMRequest( this, kIOPMRequestTypeRequestPowerStateOverride ); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest( this, kIOPMRequestTypeRequestPowerStateOverride ); + if (!request) { + return kIOReturnNoMemory; + } - gIOPMPowerClientDevice->retain(); - request->fRequestTag = tag; - request->fArg0 = (void *) ordinal; - request->fArg1 = (void *) gIOPMPowerClientDevice; - request->fArg2 = 0; + gIOPMPowerClientDevice->retain(); + request->fRequestTag = tag; + request->fArg0 = (void *) ordinal; + request->fArg1 = (void *) gIOPMPowerClientDevice; + request->fArg2 = 0; #if NOT_READY - if (action) - request->installCompletionAction( action, target, param ); + if (action) { + request->installCompletionAction( action, target, param ); + } #endif - // Prevent needless downwards power transitions by clamping power - // until the scheduled request is executed. + // Prevent needless downwards power transitions by clamping power + // until the scheduled request is executed. - if (gIOPMWorkLoop->inGate() && (ordinal < fNumberOfPowerStates)) - { - fTempClampPowerState = StateMax(fTempClampPowerState, ordinal); - fTempClampCount++; - fOverrideMaxPowerState = ordinal; - request->fArg2 = (void *) (uintptr_t) true; - } + if (gIOPMWorkLoop->inGate() && (ordinal < fNumberOfPowerStates)) { + fTempClampPowerState = StateMax(fTempClampPowerState, ordinal); + fTempClampCount++; + fOverrideMaxPowerState = ordinal; + request->fArg2 = (void *) (uintptr_t) true; + } - submitPMRequest( request ); - return IOPMNoErr; + submitPMRequest( request ); + return IOPMNoErr; } //********************************************************************************* @@ -2398,10 +2366,11 @@ IOReturn IOService::changePowerStateWithOverrideTo( IOPMPowerStateIndex ordinal, // Adjust the root domain's power desire on the target //********************************************************************************* -IOReturn IOService::changePowerStateForRootDomain( IOPMPowerStateIndex ordinal ) +IOReturn +IOService::changePowerStateForRootDomain( IOPMPowerStateIndex ordinal ) { - OUR_PMLog(kPMLogChangeStateForRootDomain, ordinal, 0); - return requestPowerState( gIOPMPowerClientRootDomain, ordinal ); + OUR_PMLog(kPMLogChangeStateForRootDomain, ordinal, 0); + return requestPowerState( gIOPMPowerClientRootDomain, ordinal ); } //********************************************************************************* @@ -2411,369 +2380,385 @@ IOReturn IOService::changePowerStateForRootDomain( IOPMPowerStateIndex ordinal ) // Supplied callback invoked upon completion. //********************************************************************************* -IOReturn IOService::quiescePowerTree( - void * target, IOPMCompletionAction action, void * param ) +IOReturn +IOService::quiescePowerTree( + void * target, IOPMCompletionAction action, void * param ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return kIOPMNotYetInitialized; - if (!target || !action) - return kIOReturnBadArgument; + if (!initialized) { + return kIOPMNotYetInitialized; + } + if (!target || !action) { + return kIOReturnBadArgument; + } - OUR_PMLog(kPMLogQuiescePowerTree, 0, 0); + OUR_PMLog(kPMLogQuiescePowerTree, 0, 0); - // Target the root node instead of root domain. This is to avoid blocking - // the quiesce request behind an existing root domain request in the work - // queue. Root parent and root domain requests in the work queue must not - // block the completion of the quiesce request. + // Target the root node instead of root domain. This is to avoid blocking + // the quiesce request behind an existing root domain request in the work + // queue. Root parent and root domain requests in the work queue must not + // block the completion of the quiesce request. - request = acquirePMRequest(gIOPMRootNode, kIOPMRequestTypeQuiescePowerTree); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest(gIOPMRootNode, kIOPMRequestTypeQuiescePowerTree); + if (!request) { + return kIOReturnNoMemory; + } - request->installCompletionAction(target, action, param); + request->installCompletionAction(target, action, param); - // Submit through the normal request flow. This will make sure any request - // already in the request queue will get pushed over to the work queue for - // execution. Any request submitted after this request may not be serviced. + // Submit through the normal request flow. This will make sure any request + // already in the request queue will get pushed over to the work queue for + // execution. Any request submitted after this request may not be serviced. - submitPMRequest( request ); - return kIOReturnSuccess; + submitPMRequest( request ); + return kIOReturnSuccess; } //********************************************************************************* // [private] requestPowerState //********************************************************************************* -IOReturn IOService::requestPowerState( - const OSSymbol * client, - uint32_t state ) -{ - IOPMRequest * request; - - if (!client) - return kIOReturnBadArgument; - if (!initialized) - return kIOPMNotYetInitialized; - - request = acquirePMRequest( this, kIOPMRequestTypeRequestPowerState ); - if (!request) - return kIOReturnNoMemory; - - client->retain(); - request->fArg0 = (void *)(uintptr_t) state; - request->fArg1 = (void *) client; - request->fArg2 = 0; +IOReturn +IOService::requestPowerState( + const OSSymbol * client, + uint32_t state ) +{ + IOPMRequest * request; + + if (!client) { + return kIOReturnBadArgument; + } + if (!initialized) { + return kIOPMNotYetInitialized; + } + + request = acquirePMRequest( this, kIOPMRequestTypeRequestPowerState ); + if (!request) { + return kIOReturnNoMemory; + } + + client->retain(); + request->fArg0 = (void *)(uintptr_t) state; + request->fArg1 = (void *) client; + request->fArg2 = 0; #if NOT_READY - if (action) - request->installCompletionAction( action, target, param ); + if (action) { + request->installCompletionAction( action, target, param ); + } #endif - // Prevent needless downwards power transitions by clamping power - // until the scheduled request is executed. + // Prevent needless downwards power transitions by clamping power + // until the scheduled request is executed. - if (gIOPMWorkLoop->inGate() && (state < fNumberOfPowerStates)) - { - fTempClampPowerState = StateMax(fTempClampPowerState, state); - fTempClampCount++; - request->fArg2 = (void *) (uintptr_t) true; - } + if (gIOPMWorkLoop->inGate() && (state < fNumberOfPowerStates)) { + fTempClampPowerState = StateMax(fTempClampPowerState, state); + fTempClampCount++; + request->fArg2 = (void *) (uintptr_t) true; + } - submitPMRequest( request ); - return IOPMNoErr; + submitPMRequest( request ); + return IOPMNoErr; } //********************************************************************************* // [private] handleRequestPowerState //********************************************************************************* -void IOService::handleRequestPowerState( IOPMRequest * request ) +void +IOService::handleRequestPowerState( IOPMRequest * request ) { - const OSSymbol * client = (const OSSymbol *) request->fArg1; - uint32_t state = (uint32_t)(uintptr_t) request->fArg0; + const OSSymbol * client = (const OSSymbol *) request->fArg1; + uint32_t state = (uint32_t)(uintptr_t) request->fArg0; - PM_ASSERT_IN_GATE(); - if (request->fArg2) - { - assert(fTempClampCount != 0); - if (fTempClampCount) fTempClampCount--; - if (!fTempClampCount) fTempClampPowerState = kPowerStateZero; - } + PM_ASSERT_IN_GATE(); + if (request->fArg2) { + assert(fTempClampCount != 0); + if (fTempClampCount) { + fTempClampCount--; + } + if (!fTempClampCount) { + fTempClampPowerState = kPowerStateZero; + } + } - if (fNumberOfPowerStates && (state >= fNumberOfPowerStates)) - state = fHighestPowerState; + if (fNumberOfPowerStates && (state >= fNumberOfPowerStates)) { + state = fHighestPowerState; + } - // The power suppression due to changePowerStateWithOverrideTo() expires - // upon the next "device" power request - changePowerStateToPriv(). + // The power suppression due to changePowerStateWithOverrideTo() expires + // upon the next "device" power request - changePowerStateToPriv(). - if ((getPMRequestType() != kIOPMRequestTypeRequestPowerStateOverride) && - (client == gIOPMPowerClientDevice)) - fOverrideMaxPowerState = kIOPMPowerStateMax; + if ((getPMRequestType() != kIOPMRequestTypeRequestPowerStateOverride) && + (client == gIOPMPowerClientDevice)) { + fOverrideMaxPowerState = kIOPMPowerStateMax; + } - if ((state == kPowerStateZero) && - (client != gIOPMPowerClientDevice) && - (client != gIOPMPowerClientDriver) && - (client != gIOPMPowerClientChildProxy)) - removePowerClient(client); - else - updatePowerClient(client, state); + if ((state == kPowerStateZero) && + (client != gIOPMPowerClientDevice) && + (client != gIOPMPowerClientDriver) && + (client != gIOPMPowerClientChildProxy)) { + removePowerClient(client); + } else { + updatePowerClient(client, state); + } - adjustPowerState(); - client->release(); + adjustPowerState(); + client->release(); } //********************************************************************************* // [private] Helper functions to update/remove power clients. //********************************************************************************* -void IOService::updatePowerClient( const OSSymbol * client, uint32_t powerState ) -{ - IOPMPowerStateIndex oldPowerState = kPowerStateZero; - - if (!fPowerClients) - fPowerClients = OSDictionary::withCapacity(4); - if (fPowerClients && client) - { - OSNumber * num = (OSNumber *) fPowerClients->getObject(client); - if (num) - { - oldPowerState = num->unsigned32BitValue(); - num->setValue(powerState); - } - else - { - num = OSNumber::withNumber(powerState, 32); - if (num) - { - fPowerClients->setObject(client, num); - num->release(); - } - } +void +IOService::updatePowerClient( const OSSymbol * client, uint32_t powerState ) +{ + IOPMPowerStateIndex oldPowerState = kPowerStateZero; + + if (!fPowerClients) { + fPowerClients = OSDictionary::withCapacity(4); + } + if (fPowerClients && client) { + OSNumber * num = (OSNumber *) fPowerClients->getObject(client); + if (num) { + oldPowerState = num->unsigned32BitValue(); + num->setValue(powerState); + } else { + num = OSNumber::withNumber(powerState, 32); + if (num) { + fPowerClients->setObject(client, num); + num->release(); + } + } - PM_ACTION_3(actionUpdatePowerClient, client, oldPowerState, powerState); - } + PM_ACTION_3(actionUpdatePowerClient, client, oldPowerState, powerState); + } } -void IOService::removePowerClient( const OSSymbol * client ) +void +IOService::removePowerClient( const OSSymbol * client ) { - if (fPowerClients && client) - fPowerClients->removeObject(client); + if (fPowerClients && client) { + fPowerClients->removeObject(client); + } } -uint32_t IOService::getPowerStateForClient( const OSSymbol * client ) +uint32_t +IOService::getPowerStateForClient( const OSSymbol * client ) { - uint32_t powerState = kPowerStateZero; + uint32_t powerState = kPowerStateZero; - if (fPowerClients && client) - { - OSNumber * num = (OSNumber *) fPowerClients->getObject(client); - if (num) powerState = num->unsigned32BitValue(); - } - return powerState; + if (fPowerClients && client) { + OSNumber * num = (OSNumber *) fPowerClients->getObject(client); + if (num) { + powerState = num->unsigned32BitValue(); + } + } + return powerState; } //********************************************************************************* // [protected] powerOverrideOnPriv //********************************************************************************* -IOReturn IOService::powerOverrideOnPriv( void ) +IOReturn +IOService::powerOverrideOnPriv( void ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return IOPMNotYetInitialized; + if (!initialized) { + return IOPMNotYetInitialized; + } - if (gIOPMWorkLoop->inGate()) - { - fDeviceOverrideEnabled = true; - return IOPMNoErr; - } + if (gIOPMWorkLoop->inGate()) { + fDeviceOverrideEnabled = true; + return IOPMNoErr; + } - request = acquirePMRequest( this, kIOPMRequestTypePowerOverrideOnPriv ); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest( this, kIOPMRequestTypePowerOverrideOnPriv ); + if (!request) { + return kIOReturnNoMemory; + } - submitPMRequest( request ); - return IOPMNoErr; + submitPMRequest( request ); + return IOPMNoErr; } //********************************************************************************* // [protected] powerOverrideOffPriv //********************************************************************************* -IOReturn IOService::powerOverrideOffPriv( void ) +IOReturn +IOService::powerOverrideOffPriv( void ) { - IOPMRequest * request; + IOPMRequest * request; - if (!initialized) - return IOPMNotYetInitialized; + if (!initialized) { + return IOPMNotYetInitialized; + } - if (gIOPMWorkLoop->inGate()) - { - fDeviceOverrideEnabled = false; - return IOPMNoErr; - } + if (gIOPMWorkLoop->inGate()) { + fDeviceOverrideEnabled = false; + return IOPMNoErr; + } - request = acquirePMRequest( this, kIOPMRequestTypePowerOverrideOffPriv ); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest( this, kIOPMRequestTypePowerOverrideOffPriv ); + if (!request) { + return kIOReturnNoMemory; + } - submitPMRequest( request ); - return IOPMNoErr; + submitPMRequest( request ); + return IOPMNoErr; } //********************************************************************************* // [private] handlePowerOverrideChanged //********************************************************************************* -void IOService::handlePowerOverrideChanged( IOPMRequest * request ) +void +IOService::handlePowerOverrideChanged( IOPMRequest * request ) { - PM_ASSERT_IN_GATE(); - if (request->getType() == kIOPMRequestTypePowerOverrideOnPriv) - { - OUR_PMLog(kPMLogOverrideOn, 0, 0); - fDeviceOverrideEnabled = true; - } - else - { - OUR_PMLog(kPMLogOverrideOff, 0, 0); - fDeviceOverrideEnabled = false; - } + PM_ASSERT_IN_GATE(); + if (request->getType() == kIOPMRequestTypePowerOverrideOnPriv) { + OUR_PMLog(kPMLogOverrideOn, 0, 0); + fDeviceOverrideEnabled = true; + } else { + OUR_PMLog(kPMLogOverrideOff, 0, 0); + fDeviceOverrideEnabled = false; + } - adjustPowerState(); + adjustPowerState(); } //********************************************************************************* // [private] computeDesiredState //********************************************************************************* -void IOService::computeDesiredState( unsigned long localClamp, bool computeOnly ) -{ - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - uint32_t desiredState = kPowerStateZero; - uint32_t newPowerState = kPowerStateZero; - bool hasChildren = false; - - // Desired power state is always 0 without a controlling driver. - - if (!fNumberOfPowerStates) - { - fDesiredPowerState = kPowerStateZero; - return; - } - - // Examine the children's desired power state. - - iter = getChildIterator(gIOPowerPlane); - if (iter) - { - while ((next = iter->getNextObject())) - { - if ((connection = OSDynamicCast(IOPowerConnection, next))) - { - if (connection->getReadyFlag() == false) - { - PM_LOG3("[%s] %s: connection not ready\n", - getName(), __FUNCTION__); - continue; - } - if (connection->childHasRequestedPower()) - hasChildren = true; - desiredState = StateMax(connection->getDesiredDomainState(), desiredState); - } - } - iter->release(); - } - if (hasChildren) - updatePowerClient(gIOPMPowerClientChildren, desiredState); - else - removePowerClient(gIOPMPowerClientChildren); - - // Iterate through all power clients to determine the min power state. - - iter = OSCollectionIterator::withCollection(fPowerClients); - if (iter) - { - const OSSymbol * client; - while ((client = (const OSSymbol *) iter->getNextObject())) - { - // Ignore child and driver when override is in effect. - if ((fDeviceOverrideEnabled || - (getPMRequestType() == kIOPMRequestTypeRequestPowerStateOverride)) && - ((client == gIOPMPowerClientChildren) || - (client == gIOPMPowerClientDriver))) - continue; - - // Ignore child proxy when children are present. - if (hasChildren && (client == gIOPMPowerClientChildProxy)) - continue; - - // Advisory tickles are irrelevant unless system is in full wake - if (client == gIOPMPowerClientAdvisoryTickle && - !gIOPMAdvisoryTickleEnabled) - continue; - - desiredState = getPowerStateForClient(client); - assert(desiredState < fNumberOfPowerStates); - PM_LOG1(" %u %s\n", - desiredState, client->getCStringNoCopy()); - - newPowerState = StateMax(newPowerState, desiredState); - - if (client == gIOPMPowerClientDevice) - fDeviceDesire = desiredState; - } - iter->release(); - } - - // Factor in the temporary power desires. - - newPowerState = StateMax(newPowerState, localClamp); - newPowerState = StateMax(newPowerState, fTempClampPowerState); - - // Limit check against max power override. - - newPowerState = StateMin(newPowerState, fOverrideMaxPowerState); - - // Limit check against number of power states. - - if (newPowerState >= fNumberOfPowerStates) - newPowerState = fHighestPowerState; - - fDesiredPowerState = newPowerState; - - PM_LOG1(" temp %u, clamp %u, current %u, new %u\n", - (uint32_t) localClamp, (uint32_t) fTempClampPowerState, - (uint32_t) fCurrentPowerState, newPowerState); - - if (!computeOnly) - { - // Restart idle timer if possible when device desire has increased. - // Or if an advisory desire exists. - - if (fIdleTimerPeriod && fIdleTimerStopped) - { - restartIdleTimer(); - } - - // Invalidate cached tickle power state when desires change, and not - // due to a tickle request. In case the driver has requested a lower - // power state, but the tickle is caching a higher power state which - // will drop future tickles until the cached value is lowered or in- - // validated. The invalidation must occur before the power transition - // to avoid dropping a necessary tickle. - - if ((getPMRequestType() != kIOPMRequestTypeActivityTickle) && - (fActivityTicklePowerState != kInvalidTicklePowerState)) - { - IOLockLock(fActivityLock); - fActivityTicklePowerState = kInvalidTicklePowerState; - IOLockUnlock(fActivityLock); - } - } +void +IOService::computeDesiredState( unsigned long localClamp, bool computeOnly ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + uint32_t desiredState = kPowerStateZero; + uint32_t newPowerState = kPowerStateZero; + bool hasChildren = false; + + // Desired power state is always 0 without a controlling driver. + + if (!fNumberOfPowerStates) { + fDesiredPowerState = kPowerStateZero; + return; + } + + // Examine the children's desired power state. + + iter = getChildIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((connection = OSDynamicCast(IOPowerConnection, next))) { + if (connection->getReadyFlag() == false) { + PM_LOG3("[%s] %s: connection not ready\n", + getName(), __FUNCTION__); + continue; + } + if (connection->childHasRequestedPower()) { + hasChildren = true; + } + desiredState = StateMax(connection->getDesiredDomainState(), desiredState); + } + } + iter->release(); + } + if (hasChildren) { + updatePowerClient(gIOPMPowerClientChildren, desiredState); + } else { + removePowerClient(gIOPMPowerClientChildren); + } + + // Iterate through all power clients to determine the min power state. + + iter = OSCollectionIterator::withCollection(fPowerClients); + if (iter) { + const OSSymbol * client; + while ((client = (const OSSymbol *) iter->getNextObject())) { + // Ignore child and driver when override is in effect. + if ((fDeviceOverrideEnabled || + (getPMRequestType() == kIOPMRequestTypeRequestPowerStateOverride)) && + ((client == gIOPMPowerClientChildren) || + (client == gIOPMPowerClientDriver))) { + continue; + } + + // Ignore child proxy when children are present. + if (hasChildren && (client == gIOPMPowerClientChildProxy)) { + continue; + } + + // Advisory tickles are irrelevant unless system is in full wake + if (client == gIOPMPowerClientAdvisoryTickle && + !gIOPMAdvisoryTickleEnabled) { + continue; + } + + desiredState = getPowerStateForClient(client); + assert(desiredState < fNumberOfPowerStates); + PM_LOG1(" %u %s\n", + desiredState, client->getCStringNoCopy()); + + newPowerState = StateMax(newPowerState, desiredState); + + if (client == gIOPMPowerClientDevice) { + fDeviceDesire = desiredState; + } + } + iter->release(); + } + + // Factor in the temporary power desires. + + newPowerState = StateMax(newPowerState, localClamp); + newPowerState = StateMax(newPowerState, fTempClampPowerState); + + // Limit check against max power override. + + newPowerState = StateMin(newPowerState, fOverrideMaxPowerState); + + // Limit check against number of power states. + + if (newPowerState >= fNumberOfPowerStates) { + newPowerState = fHighestPowerState; + } + + fDesiredPowerState = newPowerState; + + PM_LOG1(" temp %u, clamp %u, current %u, new %u\n", + (uint32_t) localClamp, (uint32_t) fTempClampPowerState, + (uint32_t) fCurrentPowerState, newPowerState); + + if (!computeOnly) { + // Restart idle timer if possible when device desire has increased. + // Or if an advisory desire exists. + + if (fIdleTimerPeriod && fIdleTimerStopped) { + restartIdleTimer(); + } + + // Invalidate cached tickle power state when desires change, and not + // due to a tickle request. In case the driver has requested a lower + // power state, but the tickle is caching a higher power state which + // will drop future tickles until the cached value is lowered or in- + // validated. The invalidation must occur before the power transition + // to avoid dropping a necessary tickle. + + if ((getPMRequestType() != kIOPMRequestTypeActivityTickle) && + (fActivityTicklePowerState != kInvalidTicklePowerState)) { + IOLockLock(fActivityLock); + fActivityTicklePowerState = kInvalidTicklePowerState; + IOLockUnlock(fActivityLock); + } + } } //********************************************************************************* @@ -2781,12 +2766,14 @@ void IOService::computeDesiredState( unsigned long localClamp, bool computeOnly // //********************************************************************************* -unsigned long IOService::currentPowerConsumption( void ) +unsigned long +IOService::currentPowerConsumption( void ) { - if (!initialized) - return kIOPMUnknown; + if (!initialized) { + return kIOPMUnknown; + } - return fCurrentPowerConsumption; + return fCurrentPowerConsumption; } //********************************************************************************* @@ -2794,9 +2781,10 @@ unsigned long IOService::currentPowerConsumption( void ) //********************************************************************************* #ifndef __LP64__ -IOWorkLoop * IOService::getPMworkloop( void ) +IOWorkLoop * +IOService::getPMworkloop( void ) { - return gIOPMWorkLoop; + return gIOPMWorkLoop; } #endif @@ -2808,71 +2796,63 @@ IOWorkLoop * IOService::getPMworkloop( void ) static void applyToPowerChildren( - IOService * service, - IOServiceApplierFunction applier, - void * context, - IOOptionBits options ) -{ - PM_ASSERT_IN_GATE(); - - IORegistryEntry * entry; - IORegistryIterator * iter; - IOPowerConnection * connection; - IOService * child; - - iter = IORegistryIterator::iterateOver(service, gIOPowerPlane, options); - if (iter) - { - while ((entry = iter->getNextObject())) - { - // Get child of IOPowerConnection objects - if ((connection = OSDynamicCast(IOPowerConnection, entry))) - { - child = (IOService *) connection->copyChildEntry(gIOPowerPlane); - if (child) - { - (*applier)(child, context); - child->release(); - } - } - } - iter->release(); - } + IOService * service, + IOServiceApplierFunction applier, + void * context, + IOOptionBits options ) +{ + PM_ASSERT_IN_GATE(); + + IORegistryEntry * entry; + IORegistryIterator * iter; + IOPowerConnection * connection; + IOService * child; + + iter = IORegistryIterator::iterateOver(service, gIOPowerPlane, options); + if (iter) { + while ((entry = iter->getNextObject())) { + // Get child of IOPowerConnection objects + if ((connection = OSDynamicCast(IOPowerConnection, entry))) { + child = (IOService *) connection->copyChildEntry(gIOPowerPlane); + if (child) { + (*applier)(child, context); + child->release(); + } + } + } + iter->release(); + } } static void applyToPowerParent( - IOService * service, - IOServiceApplierFunction applier, - void * context, - IOOptionBits options ) -{ - PM_ASSERT_IN_GATE(); - - IORegistryEntry * entry; - IORegistryIterator * iter; - IOPowerConnection * connection; - IOService * parent; - - iter = IORegistryIterator::iterateOver(service, gIOPowerPlane, - options | kIORegistryIterateParents); - if (iter) - { - while ((entry = iter->getNextObject())) - { - // Get child of IOPowerConnection objects - if ((connection = OSDynamicCast(IOPowerConnection, entry))) - { - parent = (IOService *) connection->copyParentEntry(gIOPowerPlane); - if (parent) - { - (*applier)(parent, context); - parent->release(); - } - } - } - iter->release(); - } + IOService * service, + IOServiceApplierFunction applier, + void * context, + IOOptionBits options ) +{ + PM_ASSERT_IN_GATE(); + + IORegistryEntry * entry; + IORegistryIterator * iter; + IOPowerConnection * connection; + IOService * parent; + + iter = IORegistryIterator::iterateOver(service, gIOPowerPlane, + options | kIORegistryIterateParents); + if (iter) { + while ((entry = iter->getNextObject())) { + // Get child of IOPowerConnection objects + if ((connection = OSDynamicCast(IOPowerConnection, entry))) { + parent = (IOService *) connection->copyParentEntry(gIOPowerPlane); + if (parent) { + (*applier)(parent, context); + parent->release(); + } + } + } + iter->release(); + } } #endif /* NOT_YET */ @@ -2880,9 +2860,10 @@ applyToPowerParent( // MARK: - // MARK: Activity Tickle & Idle Timer -void IOService::setAdvisoryTickleEnable( bool enable ) +void +IOService::setAdvisoryTickleEnable( bool enable ) { - gIOPMAdvisoryTickleEnabled = enable; + gIOPMAdvisoryTickleEnabled = enable; } //********************************************************************************* @@ -2895,172 +2876,150 @@ void IOService::setAdvisoryTickleEnable( bool enable ) // should be intercepted by a subclass. //********************************************************************************* -bool IOService::activityTickle( unsigned long type, unsigned long stateNumber ) +bool +IOService::activityTickle( unsigned long type, unsigned long stateNumber ) { - IOPMRequest * request; - bool noPowerChange = true; - uint32_t tickleFlags; - - if (!initialized) - return true; // no power change - - if ((type == kIOPMSuperclassPolicy1) && StateOrder(stateNumber)) - { - IOLockLock(fActivityLock); - - // Record device activity for the idle timer handler. - - fDeviceWasActive = true; - fActivityTickleCount++; - clock_get_uptime(&fDeviceActiveTimestamp); - - PM_ACTION_0(actionActivityTickle); + IOPMRequest * request; + bool noPowerChange = true; + uint32_t tickleFlags; - // Record the last tickle power state. - // This helps to filter out redundant tickles as - // this function may be called from the data path. + if (!initialized) { + return true; // no power change + } + if ((type == kIOPMSuperclassPolicy1) && StateOrder(stateNumber)) { + IOLockLock(fActivityLock); - if ((fActivityTicklePowerState == kInvalidTicklePowerState) - || StateOrder(fActivityTicklePowerState) < StateOrder(stateNumber)) - { - fActivityTicklePowerState = stateNumber; - noPowerChange = false; + // Record device activity for the idle timer handler. - tickleFlags = kTickleTypeActivity | kTickleTypePowerRise; - request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); - if (request) - { - request->fArg0 = (void *) stateNumber; - request->fArg1 = (void *)(uintptr_t) tickleFlags; - request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; - submitPMRequest(request); - } - } + fDeviceWasActive = true; + fActivityTickleCount++; + clock_get_uptime(&fDeviceActiveTimestamp); - IOLockUnlock(fActivityLock); - } + PM_ACTION_0(actionActivityTickle); - else if ((type == kIOPMActivityTickleTypeAdvisory) && - ((stateNumber = fDeviceUsablePowerState) != kPowerStateZero)) - { - IOLockLock(fActivityLock); + // Record the last tickle power state. + // This helps to filter out redundant tickles as + // this function may be called from the data path. - fAdvisoryTickled = true; + if ((fActivityTicklePowerState == kInvalidTicklePowerState) + || StateOrder(fActivityTicklePowerState) < StateOrder(stateNumber)) { + fActivityTicklePowerState = stateNumber; + noPowerChange = false; - if (fAdvisoryTicklePowerState != stateNumber) - { - fAdvisoryTicklePowerState = stateNumber; - noPowerChange = false; + tickleFlags = kTickleTypeActivity | kTickleTypePowerRise; + request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); + if (request) { + request->fArg0 = (void *) stateNumber; + request->fArg1 = (void *)(uintptr_t) tickleFlags; + request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; + submitPMRequest(request); + } + } - tickleFlags = kTickleTypeAdvisory | kTickleTypePowerRise; - request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); - if (request) - { - request->fArg0 = (void *) stateNumber; - request->fArg1 = (void *)(uintptr_t) tickleFlags; - request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; - submitPMRequest(request); - } - } + IOLockUnlock(fActivityLock); + } else if ((type == kIOPMActivityTickleTypeAdvisory) && + ((stateNumber = fDeviceUsablePowerState) != kPowerStateZero)) { + IOLockLock(fActivityLock); + + fAdvisoryTickled = true; + + if (fAdvisoryTicklePowerState != stateNumber) { + fAdvisoryTicklePowerState = stateNumber; + noPowerChange = false; + + tickleFlags = kTickleTypeAdvisory | kTickleTypePowerRise; + request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); + if (request) { + request->fArg0 = (void *) stateNumber; + request->fArg1 = (void *)(uintptr_t) tickleFlags; + request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; + submitPMRequest(request); + } + } - IOLockUnlock(fActivityLock); - } + IOLockUnlock(fActivityLock); + } - // Returns false if the activityTickle might cause a transition to a - // higher powered state, true otherwise. + // Returns false if the activityTickle might cause a transition to a + // higher powered state, true otherwise. - return noPowerChange; + return noPowerChange; } //********************************************************************************* // [private] handleActivityTickle //********************************************************************************* -void IOService::handleActivityTickle( IOPMRequest * request ) -{ - uint32_t ticklePowerState = (uint32_t)(uintptr_t) request->fArg0; - uint32_t tickleFlags = (uint32_t)(uintptr_t) request->fArg1; - uint32_t tickleGeneration = (uint32_t)(uintptr_t) request->fArg2; - bool adjustPower = false; - - PM_ASSERT_IN_GATE(); - if (fResetPowerStateOnWake && (tickleGeneration != gIOPMTickleGeneration)) - { - // Drivers that don't want power restored on wake will drop any - // tickles that pre-dates the current system wake. The model is - // that each wake is a fresh start, with power state depressed - // until a new tickle or an explicit power up request from the - // driver. It is possible for the PM work loop to enter the - // system sleep path with tickle requests queued. - - return; - } - - if (tickleFlags & kTickleTypeActivity) - { - IOPMPowerStateIndex deviceDesireOrder = StateOrder(fDeviceDesire); - uint32_t idleTimerGeneration = ticklePowerState; // kTickleTypePowerDrop - - if (tickleFlags & kTickleTypePowerRise) - { - if ((StateOrder(ticklePowerState) > deviceDesireOrder) && - (ticklePowerState < fNumberOfPowerStates)) - { - fIdleTimerMinPowerState = ticklePowerState; - updatePowerClient(gIOPMPowerClientDevice, ticklePowerState); - adjustPower = true; - } - } - else if ((deviceDesireOrder > StateOrder(fIdleTimerMinPowerState)) && - (idleTimerGeneration == fIdleTimerGeneration)) - { - // Power drop due to idle timer expiration. - // Do not allow idle timer to reduce power below tickle power. - // This prevents the idle timer from decreasing the device desire - // to zero and cancelling the effect of a pre-sleep tickle when - // system wakes up to doze state, while the device is unable to - // raise its power state to satisfy the tickle. - - deviceDesireOrder--; - if (deviceDesireOrder < fNumberOfPowerStates) - { - ticklePowerState = fPowerStates[deviceDesireOrder].stateOrderToIndex; - updatePowerClient(gIOPMPowerClientDevice, ticklePowerState); - adjustPower = true; - } - } - } - else // advisory tickle - { - if (tickleFlags & kTickleTypePowerRise) - { - if ((ticklePowerState == fDeviceUsablePowerState) && - (ticklePowerState < fNumberOfPowerStates)) - { - updatePowerClient(gIOPMPowerClientAdvisoryTickle, ticklePowerState); - fHasAdvisoryDesire = true; - fAdvisoryTickleUsed = true; - adjustPower = true; - } - else - { - IOLockLock(fActivityLock); - fAdvisoryTicklePowerState = kInvalidTicklePowerState; - IOLockUnlock(fActivityLock); - } - } - else if (fHasAdvisoryDesire) - { - removePowerClient(gIOPMPowerClientAdvisoryTickle); - fHasAdvisoryDesire = false; - adjustPower = true; - } - } - - if (adjustPower) - { - adjustPowerState(); - } +void +IOService::handleActivityTickle( IOPMRequest * request ) +{ + uint32_t ticklePowerState = (uint32_t)(uintptr_t) request->fArg0; + uint32_t tickleFlags = (uint32_t)(uintptr_t) request->fArg1; + uint32_t tickleGeneration = (uint32_t)(uintptr_t) request->fArg2; + bool adjustPower = false; + + PM_ASSERT_IN_GATE(); + if (fResetPowerStateOnWake && (tickleGeneration != gIOPMTickleGeneration)) { + // Drivers that don't want power restored on wake will drop any + // tickles that pre-dates the current system wake. The model is + // that each wake is a fresh start, with power state depressed + // until a new tickle or an explicit power up request from the + // driver. It is possible for the PM work loop to enter the + // system sleep path with tickle requests queued. + + return; + } + + if (tickleFlags & kTickleTypeActivity) { + IOPMPowerStateIndex deviceDesireOrder = StateOrder(fDeviceDesire); + uint32_t idleTimerGeneration = ticklePowerState; // kTickleTypePowerDrop + + if (tickleFlags & kTickleTypePowerRise) { + if ((StateOrder(ticklePowerState) > deviceDesireOrder) && + (ticklePowerState < fNumberOfPowerStates)) { + fIdleTimerMinPowerState = ticklePowerState; + updatePowerClient(gIOPMPowerClientDevice, ticklePowerState); + adjustPower = true; + } + } else if ((deviceDesireOrder > StateOrder(fIdleTimerMinPowerState)) && + (idleTimerGeneration == fIdleTimerGeneration)) { + // Power drop due to idle timer expiration. + // Do not allow idle timer to reduce power below tickle power. + // This prevents the idle timer from decreasing the device desire + // to zero and cancelling the effect of a pre-sleep tickle when + // system wakes up to doze state, while the device is unable to + // raise its power state to satisfy the tickle. + + deviceDesireOrder--; + if (deviceDesireOrder < fNumberOfPowerStates) { + ticklePowerState = fPowerStates[deviceDesireOrder].stateOrderToIndex; + updatePowerClient(gIOPMPowerClientDevice, ticklePowerState); + adjustPower = true; + } + } + } else { // advisory tickle + if (tickleFlags & kTickleTypePowerRise) { + if ((ticklePowerState == fDeviceUsablePowerState) && + (ticklePowerState < fNumberOfPowerStates)) { + updatePowerClient(gIOPMPowerClientAdvisoryTickle, ticklePowerState); + fHasAdvisoryDesire = true; + fAdvisoryTickleUsed = true; + adjustPower = true; + } else { + IOLockLock(fActivityLock); + fAdvisoryTicklePowerState = kInvalidTicklePowerState; + IOLockUnlock(fActivityLock); + } + } else if (fHasAdvisoryDesire) { + removePowerClient(gIOPMPowerClientAdvisoryTickle); + fHasAdvisoryDesire = false; + adjustPower = true; + } + } + + if (adjustPower) { + adjustPowerState(); + } } //****************************************************************************** @@ -3070,40 +3029,46 @@ void IOService::handleActivityTickle( IOPMRequest * request ) // Start the idle timer. Period is in seconds. //****************************************************************************** -IOReturn IOService::setIdleTimerPeriod( unsigned long period ) +IOReturn +IOService::setIdleTimerPeriod( unsigned long period ) { - if (!initialized) - return IOPMNotYetInitialized; + if (!initialized) { + return IOPMNotYetInitialized; + } - OUR_PMLog(kPMLogSetIdleTimerPeriod, period, fIdleTimerPeriod); + OUR_PMLog(kPMLogSetIdleTimerPeriod, period, fIdleTimerPeriod); - IOPMRequest * request = - acquirePMRequest( this, kIOPMRequestTypeSetIdleTimerPeriod ); - if (!request) - return kIOReturnNoMemory; + IOPMRequest * request = + acquirePMRequest( this, kIOPMRequestTypeSetIdleTimerPeriod ); + if (!request) { + return kIOReturnNoMemory; + } - request->fArg0 = (void *) period; - submitPMRequest( request ); + request->fArg0 = (void *) period; + submitPMRequest( request ); - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOService::setIgnoreIdleTimer( bool ignore ) +IOReturn +IOService::setIgnoreIdleTimer( bool ignore ) { - if (!initialized) - return IOPMNotYetInitialized; + if (!initialized) { + return IOPMNotYetInitialized; + } - OUR_PMLog(kIOPMRequestTypeIgnoreIdleTimer, ignore, 0); + OUR_PMLog(kIOPMRequestTypeIgnoreIdleTimer, ignore, 0); - IOPMRequest * request = - acquirePMRequest( this, kIOPMRequestTypeIgnoreIdleTimer ); - if (!request) - return kIOReturnNoMemory; + IOPMRequest * request = + acquirePMRequest( this, kIOPMRequestTypeIgnoreIdleTimer ); + if (!request) { + return kIOReturnNoMemory; + } - request->fArg0 = (void *) ignore; - submitPMRequest( request ); + request->fArg0 = (void *) ignore; + submitPMRequest( request ); - return kIOReturnSuccess; + return kIOReturnSuccess; } //****************************************************************************** @@ -3113,99 +3078,100 @@ IOReturn IOService::setIgnoreIdleTimer( bool ignore ) // next lowest power state. //****************************************************************************** -SInt32 IOService::nextIdleTimeout( - AbsoluteTime currentTime, - AbsoluteTime lastActivity, - unsigned int powerState) +SInt32 +IOService::nextIdleTimeout( + AbsoluteTime currentTime, + AbsoluteTime lastActivity, + unsigned int powerState) { - AbsoluteTime delta; - UInt64 delta_ns; - SInt32 delta_secs; - SInt32 delay_secs; + AbsoluteTime delta; + UInt64 delta_ns; + SInt32 delta_secs; + SInt32 delay_secs; - // Calculate time difference using funky macro from clock.h. - delta = currentTime; - SUB_ABSOLUTETIME(&delta, &lastActivity); + // Calculate time difference using funky macro from clock.h. + delta = currentTime; + SUB_ABSOLUTETIME(&delta, &lastActivity); - // Figure it in seconds. - absolutetime_to_nanoseconds(delta, &delta_ns); - delta_secs = (SInt32)(delta_ns / NSEC_PER_SEC); + // Figure it in seconds. + absolutetime_to_nanoseconds(delta, &delta_ns); + delta_secs = (SInt32)(delta_ns / NSEC_PER_SEC); - // Be paranoid about delta somehow exceeding timer period. - if (delta_secs < (int) fIdleTimerPeriod) - delay_secs = (int) fIdleTimerPeriod - delta_secs; - else - delay_secs = (int) fIdleTimerPeriod; + // Be paranoid about delta somehow exceeding timer period. + if (delta_secs < (int) fIdleTimerPeriod) { + delay_secs = (int) fIdleTimerPeriod - delta_secs; + } else { + delay_secs = (int) fIdleTimerPeriod; + } - return (SInt32)delay_secs; + return (SInt32)delay_secs; } //********************************************************************************* // [public] start_PM_idle_timer //********************************************************************************* -void IOService::start_PM_idle_timer( void ) +void +IOService::start_PM_idle_timer( void ) { - static const int maxTimeout = 100000; - static const int minTimeout = 1; - AbsoluteTime uptime, deadline; - SInt32 idle_in = 0; - boolean_t pending; + static const int maxTimeout = 100000; + static const int minTimeout = 1; + AbsoluteTime uptime, deadline; + SInt32 idle_in = 0; + boolean_t pending; - if (!initialized || !fIdleTimerPeriod) - return; + if (!initialized || !fIdleTimerPeriod) { + return; + } - IOLockLock(fActivityLock); + IOLockLock(fActivityLock); - clock_get_uptime(&uptime); + clock_get_uptime(&uptime); - // Subclasses may modify idle sleep algorithm - idle_in = nextIdleTimeout(uptime, fDeviceActiveTimestamp, fCurrentPowerState); + // Subclasses may modify idle sleep algorithm + idle_in = nextIdleTimeout(uptime, fDeviceActiveTimestamp, fCurrentPowerState); - // Check for out-of range responses - if (idle_in > maxTimeout) - { - // use standard implementation - idle_in = IOService::nextIdleTimeout(uptime, - fDeviceActiveTimestamp, - fCurrentPowerState); - } else if (idle_in < minTimeout) { - idle_in = fIdleTimerPeriod; - } + // Check for out-of range responses + if (idle_in > maxTimeout) { + // use standard implementation + idle_in = IOService::nextIdleTimeout(uptime, + fDeviceActiveTimestamp, + fCurrentPowerState); + } else if (idle_in < minTimeout) { + idle_in = fIdleTimerPeriod; + } - IOLockUnlock(fActivityLock); + IOLockUnlock(fActivityLock); - fNextIdleTimerPeriod = idle_in; - fIdleTimerStartTime = uptime; + fNextIdleTimerPeriod = idle_in; + fIdleTimerStartTime = uptime; - retain(); - clock_interval_to_absolutetime_interval(idle_in, kSecondScale, &deadline); - ADD_ABSOLUTETIME(&deadline, &uptime); - pending = thread_call_enter_delayed(fIdleTimer, deadline); - if (pending) release(); + retain(); + clock_interval_to_absolutetime_interval(idle_in, kSecondScale, &deadline); + ADD_ABSOLUTETIME(&deadline, &uptime); + pending = thread_call_enter_delayed(fIdleTimer, deadline); + if (pending) { + release(); + } } //********************************************************************************* // [private] restartIdleTimer //********************************************************************************* -void IOService::restartIdleTimer( void ) +void +IOService::restartIdleTimer( void ) { - if (fDeviceDesire != kPowerStateZero) - { - fIdleTimerStopped = false; - fActivityTickleCount = 0; - start_PM_idle_timer(); - } - else if (fHasAdvisoryDesire) - { - fIdleTimerStopped = false; - start_PM_idle_timer(); - } - else - { - fIdleTimerStopped = true; - } + if (fDeviceDesire != kPowerStateZero) { + fIdleTimerStopped = false; + fActivityTickleCount = 0; + start_PM_idle_timer(); + } else if (fHasAdvisoryDesire) { + fIdleTimerStopped = false; + start_PM_idle_timer(); + } else { + fIdleTimerStopped = true; + } } //********************************************************************************* @@ -3214,17 +3180,18 @@ void IOService::restartIdleTimer( void ) static void idle_timer_expired( - thread_call_param_t arg0, thread_call_param_t arg1 ) + thread_call_param_t arg0, thread_call_param_t arg1 ) { - IOService * me = (IOService *) arg0; + IOService * me = (IOService *) arg0; - if (gIOPMWorkLoop) - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, me, - &IOService::idleTimerExpired), - me); + if (gIOPMWorkLoop) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, me, + &IOService::idleTimerExpired), + me); + } - me->release(); + me->release(); } //********************************************************************************* @@ -3235,83 +3202,79 @@ idle_timer_expired( // activity, switch to the next lower power state and restart the timer. //********************************************************************************* -void IOService::idleTimerExpired( void ) +void +IOService::idleTimerExpired( void ) { - IOPMRequest * request; - bool restartTimer = true; - uint32_t tickleFlags; - - if ( !initialized || !fIdleTimerPeriod || fIdleTimerStopped || - fLockedFlags.PMStop ) - return; + IOPMRequest * request; + bool restartTimer = true; + uint32_t tickleFlags; - fIdleTimerStartTime = 0; + if (!initialized || !fIdleTimerPeriod || fIdleTimerStopped || + fLockedFlags.PMStop) { + return; + } - IOLockLock(fActivityLock); + fIdleTimerStartTime = 0; - // Check for device activity (tickles) over last timer period. + IOLockLock(fActivityLock); - if (fDeviceWasActive) - { - // Device was active - do not drop power, restart timer. - fDeviceWasActive = false; - } - else if (!fIdleTimerIgnored) - { - // No device activity - drop power state by one level. - // Decrement the cached tickle power state when possible. - // This value may be kInvalidTicklePowerState before activityTickle() - // is called, but the power drop request must be issued regardless. + // Check for device activity (tickles) over last timer period. - if ((fActivityTicklePowerState != kInvalidTicklePowerState) && - (fActivityTicklePowerState != kPowerStateZero)) - fActivityTicklePowerState--; + if (fDeviceWasActive) { + // Device was active - do not drop power, restart timer. + fDeviceWasActive = false; + } else if (!fIdleTimerIgnored) { + // No device activity - drop power state by one level. + // Decrement the cached tickle power state when possible. + // This value may be kInvalidTicklePowerState before activityTickle() + // is called, but the power drop request must be issued regardless. - tickleFlags = kTickleTypeActivity | kTickleTypePowerDrop; - request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); - if (request) - { - request->fArg0 = (void *)(uintptr_t) fIdleTimerGeneration; - request->fArg1 = (void *)(uintptr_t) tickleFlags; - request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; - submitPMRequest( request ); - - // Do not restart timer until after the tickle request has been - // processed. - - restartTimer = false; - } - } - - if (fAdvisoryTickled) - { - fAdvisoryTickled = false; - } - else if (fHasAdvisoryDesire) - { - // Want new tickles to turn into pm request after we drop the lock - fAdvisoryTicklePowerState = kInvalidTicklePowerState; + if ((fActivityTicklePowerState != kInvalidTicklePowerState) && + (fActivityTicklePowerState != kPowerStateZero)) { + fActivityTicklePowerState--; + } - tickleFlags = kTickleTypeAdvisory | kTickleTypePowerDrop; - request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); - if (request) - { - request->fArg0 = (void *)(uintptr_t) fIdleTimerGeneration; - request->fArg1 = (void *)(uintptr_t) tickleFlags; - request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; - submitPMRequest( request ); + tickleFlags = kTickleTypeActivity | kTickleTypePowerDrop; + request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); + if (request) { + request->fArg0 = (void *)(uintptr_t) fIdleTimerGeneration; + request->fArg1 = (void *)(uintptr_t) tickleFlags; + request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; + submitPMRequest( request ); - // Do not restart timer until after the tickle request has been - // processed. + // Do not restart timer until after the tickle request has been + // processed. - restartTimer = false; - } - } + restartTimer = false; + } + } + + if (fAdvisoryTickled) { + fAdvisoryTickled = false; + } else if (fHasAdvisoryDesire) { + // Want new tickles to turn into pm request after we drop the lock + fAdvisoryTicklePowerState = kInvalidTicklePowerState; + + tickleFlags = kTickleTypeAdvisory | kTickleTypePowerDrop; + request = acquirePMRequest( this, kIOPMRequestTypeActivityTickle ); + if (request) { + request->fArg0 = (void *)(uintptr_t) fIdleTimerGeneration; + request->fArg1 = (void *)(uintptr_t) tickleFlags; + request->fArg2 = (void *)(uintptr_t) gIOPMTickleGeneration; + submitPMRequest( request ); + + // Do not restart timer until after the tickle request has been + // processed. + + restartTimer = false; + } + } - IOLockUnlock(fActivityLock); + IOLockUnlock(fActivityLock); - if (restartTimer) - start_PM_idle_timer(); + if (restartTimer) { + start_PM_idle_timer(); + } } #ifndef __LP64__ @@ -3319,7 +3282,8 @@ void IOService::idleTimerExpired( void ) // [deprecated] PM_idle_timer_expiration //********************************************************************************* -void IOService::PM_idle_timer_expiration( void ) +void +IOService::PM_idle_timer_expiration( void ) { } @@ -3327,7 +3291,8 @@ void IOService::PM_idle_timer_expiration( void ) // [deprecated] command_received //********************************************************************************* -void IOService::command_received( void *statePtr , void *, void * , void * ) +void +IOService::command_received( void *statePtr, void *, void *, void * ) { } #endif /* !__LP64__ */ @@ -3339,9 +3304,10 @@ void IOService::command_received( void *statePtr , void *, void * , void * ) // power domains will pass it on to their children, etc. //********************************************************************************* -IOReturn IOService::setAggressiveness( unsigned long type, unsigned long newLevel ) +IOReturn +IOService::setAggressiveness( unsigned long type, unsigned long newLevel ) { - return kIOReturnSuccess; + return kIOReturnSuccess; } //********************************************************************************* @@ -3350,14 +3316,16 @@ IOReturn IOService::setAggressiveness( unsigned long type, unsigned long newLeve // Called by the user client. //********************************************************************************* -IOReturn IOService::getAggressiveness( unsigned long type, unsigned long * currentLevel ) +IOReturn +IOService::getAggressiveness( unsigned long type, unsigned long * currentLevel ) { - IOPMrootDomain * rootDomain = getPMRootDomain(); + IOPMrootDomain * rootDomain = getPMRootDomain(); - if (!rootDomain) - return kIOReturnNotReady; + if (!rootDomain) { + return kIOReturnNotReady; + } - return rootDomain->getAggressiveness( type, currentLevel ); + return rootDomain->getAggressiveness( type, currentLevel ); } //********************************************************************************* @@ -3365,12 +3333,14 @@ IOReturn IOService::getAggressiveness( unsigned long type, unsigned long * curre // //********************************************************************************* -UInt32 IOService::getPowerState( void ) +UInt32 +IOService::getPowerState( void ) { - if (!initialized) - return kPowerStateZero; + if (!initialized) { + return kPowerStateZero; + } - return fCurrentPowerState; + return fCurrentPowerState; } #ifndef __LP64__ @@ -3381,75 +3351,67 @@ UInt32 IOService::getPowerState( void ) // power domains will pass it on to their children, etc. //********************************************************************************* -IOReturn IOService::systemWake( void ) -{ - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - IOService * theChild; - - iter = getChildIterator(gIOPowerPlane); - if ( iter ) - { - while ( (next = iter->getNextObject()) ) - { - if ( (connection = OSDynamicCast(IOPowerConnection, next)) ) - { - if (connection->getReadyFlag() == false) - { - PM_LOG3("[%s] %s: connection not ready\n", - getName(), __FUNCTION__); - continue; - } - - theChild = (IOService *)connection->copyChildEntry(gIOPowerPlane); - if ( theChild ) - { - theChild->systemWake(); - theChild->release(); - } - } - } - iter->release(); - } - - if ( fControllingDriver != NULL ) - { - if ( fControllingDriver->didYouWakeSystem() ) - { - makeUsable(); - } - } - - return IOPMNoErr; +IOReturn +IOService::systemWake( void ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + IOService * theChild; + + iter = getChildIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((connection = OSDynamicCast(IOPowerConnection, next))) { + if (connection->getReadyFlag() == false) { + PM_LOG3("[%s] %s: connection not ready\n", + getName(), __FUNCTION__); + continue; + } + + theChild = (IOService *)connection->copyChildEntry(gIOPowerPlane); + if (theChild) { + theChild->systemWake(); + theChild->release(); + } + } + } + iter->release(); + } + + if (fControllingDriver != NULL) { + if (fControllingDriver->didYouWakeSystem()) { + makeUsable(); + } + } + + return IOPMNoErr; } //********************************************************************************* // [deprecated] temperatureCriticalForZone //********************************************************************************* -IOReturn IOService::temperatureCriticalForZone( IOService * whichZone ) -{ - IOService * theParent; - IOService * theNub; - - OUR_PMLog(kPMLogCriticalTemp, 0, 0); - - if ( inPlane(gIOPowerPlane) && !IS_PM_ROOT ) - { - theNub = (IOService *)copyParentEntry(gIOPowerPlane); - if ( theNub ) - { - theParent = (IOService *)theNub->copyParentEntry(gIOPowerPlane); - theNub->release(); - if ( theParent ) - { - theParent->temperatureCriticalForZone(whichZone); - theParent->release(); - } - } - } - return IOPMNoErr; +IOReturn +IOService::temperatureCriticalForZone( IOService * whichZone ) +{ + IOService * theParent; + IOService * theNub; + + OUR_PMLog(kPMLogCriticalTemp, 0, 0); + + if (inPlane(gIOPowerPlane) && !IS_PM_ROOT) { + theNub = (IOService *)copyParentEntry(gIOPowerPlane); + if (theNub) { + theParent = (IOService *)theNub->copyParentEntry(gIOPowerPlane); + theNub->release(); + if (theParent) { + theParent->temperatureCriticalForZone(whichZone); + theParent->release(); + } + } + } + return IOPMNoErr; } #endif /* !__LP64__ */ @@ -3462,476 +3424,460 @@ IOReturn IOService::temperatureCriticalForZone( IOService * whichZone ) // All power state changes starts here. //********************************************************************************* -IOReturn IOService::startPowerChange( - IOPMPowerChangeFlags changeFlags, - IOPMPowerStateIndex powerState, - IOPMPowerFlags domainFlags, - IOPowerConnection * parentConnection, - IOPMPowerFlags parentFlags ) -{ - PM_ASSERT_IN_GATE(); - assert( fMachineState == kIOPM_Finished ); - assert( powerState < fNumberOfPowerStates ); - - if (powerState >= fNumberOfPowerStates) - return IOPMAckImplied; - - fIsPreChange = true; - PM_ACTION_2(actionPowerChangeOverride, &powerState, &changeFlags); - - if (changeFlags & kIOPMExpireIdleTimer) - { - // Root domain requested removal of tickle influence - if (StateOrder(fDeviceDesire) > StateOrder(powerState)) - { - // Reset device desire down to the clamped power state - updatePowerClient(gIOPMPowerClientDevice, powerState); - computeDesiredState(kPowerStateZero, true); - - // Invalidate tickle cache so the next tickle will issue a request - IOLockLock(fActivityLock); - fDeviceWasActive = false; - fActivityTicklePowerState = kInvalidTicklePowerState; - IOLockUnlock(fActivityLock); - - fIdleTimerMinPowerState = kPowerStateZero; - } - } - - // Root domain's override handler may cancel the power change by - // setting the kIOPMNotDone flag. - - if (changeFlags & kIOPMNotDone) - return IOPMAckImplied; - - // Forks to either Driver or Parent initiated power change paths. - - fHeadNoteChangeFlags = changeFlags; - fHeadNotePowerState = powerState; - fHeadNotePowerArrayEntry = &fPowerStates[ powerState ]; - fHeadNoteParentConnection = NULL; - - if (changeFlags & kIOPMSelfInitiated) - { - if (changeFlags & kIOPMSynchronize) - OurSyncStart(); - else - OurChangeStart(); - return 0; - } - else - { - assert(changeFlags & kIOPMParentInitiated); - fHeadNoteDomainFlags = domainFlags; - fHeadNoteParentFlags = parentFlags; - fHeadNoteParentConnection = parentConnection; - return ParentChangeStart(); - } +IOReturn +IOService::startPowerChange( + IOPMPowerChangeFlags changeFlags, + IOPMPowerStateIndex powerState, + IOPMPowerFlags domainFlags, + IOPowerConnection * parentConnection, + IOPMPowerFlags parentFlags ) +{ + PM_ASSERT_IN_GATE(); + assert( fMachineState == kIOPM_Finished ); + assert( powerState < fNumberOfPowerStates ); + + if (powerState >= fNumberOfPowerStates) { + return IOPMAckImplied; + } + + fIsPreChange = true; + PM_ACTION_2(actionPowerChangeOverride, &powerState, &changeFlags); + + if (changeFlags & kIOPMExpireIdleTimer) { + // Root domain requested removal of tickle influence + if (StateOrder(fDeviceDesire) > StateOrder(powerState)) { + // Reset device desire down to the clamped power state + updatePowerClient(gIOPMPowerClientDevice, powerState); + computeDesiredState(kPowerStateZero, true); + + // Invalidate tickle cache so the next tickle will issue a request + IOLockLock(fActivityLock); + fDeviceWasActive = false; + fActivityTicklePowerState = kInvalidTicklePowerState; + IOLockUnlock(fActivityLock); + + fIdleTimerMinPowerState = kPowerStateZero; + } + } + + // Root domain's override handler may cancel the power change by + // setting the kIOPMNotDone flag. + + if (changeFlags & kIOPMNotDone) { + return IOPMAckImplied; + } + + // Forks to either Driver or Parent initiated power change paths. + + fHeadNoteChangeFlags = changeFlags; + fHeadNotePowerState = powerState; + fHeadNotePowerArrayEntry = &fPowerStates[powerState]; + fHeadNoteParentConnection = NULL; + + if (changeFlags & kIOPMSelfInitiated) { + if (changeFlags & kIOPMSynchronize) { + OurSyncStart(); + } else { + OurChangeStart(); + } + return 0; + } else { + assert(changeFlags & kIOPMParentInitiated); + fHeadNoteDomainFlags = domainFlags; + fHeadNoteParentFlags = parentFlags; + fHeadNoteParentConnection = parentConnection; + return ParentChangeStart(); + } } //********************************************************************************* // [private] notifyInterestedDrivers //********************************************************************************* -bool IOService::notifyInterestedDrivers( void ) -{ - IOPMinformee * informee; - IOPMinformeeList * list = fInterestedDrivers; - DriverCallParam * param; - IOItemCount count; - IOItemCount skipCnt = 0; - - PM_ASSERT_IN_GATE(); - assert( fDriverCallParamCount == 0 ); - assert( fHeadNotePendingAcks == 0 ); - - fHeadNotePendingAcks = 0; - - count = list->numberOfItems(); - if (!count) - goto done; // no interested drivers - - // Allocate an array of interested drivers and their return values - // for the callout thread. Everything else is still "owned" by the - // PM work loop, which can run to process acknowledgePowerChange() - // responses. - - param = (DriverCallParam *) fDriverCallParamPtr; - if (count > fDriverCallParamSlots) - { - if (fDriverCallParamSlots) - { - assert(fDriverCallParamPtr); - IODelete(fDriverCallParamPtr, DriverCallParam, fDriverCallParamSlots); - fDriverCallParamPtr = 0; - fDriverCallParamSlots = 0; - } - - param = IONew(DriverCallParam, count); - if (!param) - goto done; // no memory - - fDriverCallParamPtr = (void *) param; - fDriverCallParamSlots = count; - } - - informee = list->firstInList(); - assert(informee); - for (IOItemCount i = 0; i < count; i++) - { - if (fInitialSetPowerState || (fHeadNoteChangeFlags & kIOPMInitialPowerChange)) { - // Skip notifying self, if 'kIOPMInitialDeviceState' is set and - // this is the initial power state change - if ((this == informee->whatObject) && - (fHeadNotePowerArrayEntry->capabilityFlags & kIOPMInitialDeviceState)) { - skipCnt++; - continue; - } - } - informee->timer = -1; - param[i].Target = informee; - informee->retain(); - informee = list->nextInList( informee ); - } - - count -= skipCnt; - if (!count) { - goto done; - } - fDriverCallParamCount = count; - fHeadNotePendingAcks = count; - - // Block state machine and wait for callout completion. - assert(!fDriverCallBusy); - fDriverCallBusy = true; - thread_call_enter( fDriverCallEntry ); - return true; +bool +IOService::notifyInterestedDrivers( void ) +{ + IOPMinformee * informee; + IOPMinformeeList * list = fInterestedDrivers; + DriverCallParam * param; + IOItemCount count; + IOItemCount skipCnt = 0; + + PM_ASSERT_IN_GATE(); + assert( fDriverCallParamCount == 0 ); + assert( fHeadNotePendingAcks == 0 ); + + fHeadNotePendingAcks = 0; + + count = list->numberOfItems(); + if (!count) { + goto done; // no interested drivers + } + // Allocate an array of interested drivers and their return values + // for the callout thread. Everything else is still "owned" by the + // PM work loop, which can run to process acknowledgePowerChange() + // responses. + + param = (DriverCallParam *) fDriverCallParamPtr; + if (count > fDriverCallParamSlots) { + if (fDriverCallParamSlots) { + assert(fDriverCallParamPtr); + IODelete(fDriverCallParamPtr, DriverCallParam, fDriverCallParamSlots); + fDriverCallParamPtr = 0; + fDriverCallParamSlots = 0; + } + + param = IONew(DriverCallParam, count); + if (!param) { + goto done; // no memory + } + fDriverCallParamPtr = (void *) param; + fDriverCallParamSlots = count; + } + + informee = list->firstInList(); + assert(informee); + for (IOItemCount i = 0; i < count; i++) { + if (fInitialSetPowerState || (fHeadNoteChangeFlags & kIOPMInitialPowerChange)) { + // Skip notifying self, if 'kIOPMInitialDeviceState' is set and + // this is the initial power state change + if ((this == informee->whatObject) && + (fHeadNotePowerArrayEntry->capabilityFlags & kIOPMInitialDeviceState)) { + skipCnt++; + continue; + } + } + informee->timer = -1; + param[i].Target = informee; + informee->retain(); + informee = list->nextInList( informee ); + } + + count -= skipCnt; + if (!count) { + goto done; + } + fDriverCallParamCount = count; + fHeadNotePendingAcks = count; + + // Block state machine and wait for callout completion. + assert(!fDriverCallBusy); + fDriverCallBusy = true; + thread_call_enter( fDriverCallEntry ); + return true; done: - // Return false if there are no interested drivers or could not schedule - // callout thread due to error. - return false; + // Return false if there are no interested drivers or could not schedule + // callout thread due to error. + return false; } //********************************************************************************* // [private] notifyInterestedDriversDone //********************************************************************************* -void IOService::notifyInterestedDriversDone( void ) -{ - IOPMinformee * informee; - IOItemCount count; - DriverCallParam * param; - IOReturn result; - int maxTimeout = 0; - - PM_ASSERT_IN_GATE(); - assert( fDriverCallBusy == false ); - assert( fMachineState == kIOPM_DriverThreadCallDone ); - - param = (DriverCallParam *) fDriverCallParamPtr; - count = fDriverCallParamCount; - - if (param && count) - { - for (IOItemCount i = 0; i < count; i++, param++) - { - informee = (IOPMinformee *) param->Target; - result = param->Result; - - if ((result == IOPMAckImplied) || (result < 0)) - { - // Interested driver return IOPMAckImplied. - // If informee timer is zero, it must have de-registered - // interest during the thread callout. That also drops - // the pending ack count. - - if (fHeadNotePendingAcks && informee->timer) - fHeadNotePendingAcks--; - - informee->timer = 0; - } - else if (informee->timer) - { - assert(informee->timer == -1); - - // Driver has not acked, and has returned a positive result. - // Enforce a minimum permissible timeout value. - // Make the min value large enough so timeout is less likely - // to occur if a driver misinterpreted that the return value - // should be in microsecond units. And make it large enough - // to be noticeable if a driver neglects to ack. - - if (result < kMinAckTimeoutTicks) - result = kMinAckTimeoutTicks; - - informee->timer = (result / (ACK_TIMER_PERIOD / ns_per_us)) + 1; - if (result > maxTimeout) { - maxTimeout = result; - } - } - // else, child has already acked or driver has removed interest, - // and head_note_pendingAcks decremented. - // informee may have been removed from the interested drivers list, - // thus the informee must be retained across the callout. - - informee->release(); - } - - fDriverCallParamCount = 0; - - if ( fHeadNotePendingAcks ) - { - OUR_PMLog(kPMLogStartAckTimer, 0, 0); - start_ack_timer(); - getPMRootDomain()->reset_watchdog_timer(this, maxTimeout/USEC_PER_SEC+1); - } - } - - MS_POP(); // pop the machine state passed to notifyAll() - - // If interest acks are outstanding, block the state machine until - // fHeadNotePendingAcks drops to zero before notifying root domain. - // Otherwise notify root domain directly. - - if (!fHeadNotePendingAcks) - { - notifyRootDomain(); - } - else - { - MS_PUSH(fMachineState); - fMachineState = kIOPM_NotifyChildrenStart; - } +void +IOService::notifyInterestedDriversDone( void ) +{ + IOPMinformee * informee; + IOItemCount count; + DriverCallParam * param; + IOReturn result; + int maxTimeout = 0; + + PM_ASSERT_IN_GATE(); + assert( fDriverCallBusy == false ); + assert( fMachineState == kIOPM_DriverThreadCallDone ); + + param = (DriverCallParam *) fDriverCallParamPtr; + count = fDriverCallParamCount; + + if (param && count) { + for (IOItemCount i = 0; i < count; i++, param++) { + informee = (IOPMinformee *) param->Target; + result = param->Result; + + if ((result == IOPMAckImplied) || (result < 0)) { + // Interested driver return IOPMAckImplied. + // If informee timer is zero, it must have de-registered + // interest during the thread callout. That also drops + // the pending ack count. + + if (fHeadNotePendingAcks && informee->timer) { + fHeadNotePendingAcks--; + } + + informee->timer = 0; + } else if (informee->timer) { + assert(informee->timer == -1); + + // Driver has not acked, and has returned a positive result. + // Enforce a minimum permissible timeout value. + // Make the min value large enough so timeout is less likely + // to occur if a driver misinterpreted that the return value + // should be in microsecond units. And make it large enough + // to be noticeable if a driver neglects to ack. + + if (result < kMinAckTimeoutTicks) { + result = kMinAckTimeoutTicks; + } + + informee->timer = (result / (ACK_TIMER_PERIOD / ns_per_us)) + 1; + if (result > maxTimeout) { + maxTimeout = result; + } + } + // else, child has already acked or driver has removed interest, + // and head_note_pendingAcks decremented. + // informee may have been removed from the interested drivers list, + // thus the informee must be retained across the callout. + + informee->release(); + } + + fDriverCallParamCount = 0; + + if (fHeadNotePendingAcks) { + OUR_PMLog(kPMLogStartAckTimer, 0, 0); + start_ack_timer(); + getPMRootDomain()->reset_watchdog_timer(this, maxTimeout / USEC_PER_SEC + 1); + } + } + + MS_POP(); // pop the machine state passed to notifyAll() + + // If interest acks are outstanding, block the state machine until + // fHeadNotePendingAcks drops to zero before notifying root domain. + // Otherwise notify root domain directly. + + if (!fHeadNotePendingAcks) { + notifyRootDomain(); + } else { + MS_PUSH(fMachineState); + fMachineState = kIOPM_NotifyChildrenStart; + } } //********************************************************************************* // [private] notifyRootDomain //********************************************************************************* -void IOService::notifyRootDomain( void ) +void +IOService::notifyRootDomain( void ) { - assert( fDriverCallBusy == false ); + assert( fDriverCallBusy == false ); - // Only for root domain in the will-change phase - if (!IS_ROOT_DOMAIN || (fMachineState != kIOPM_OurChangeSetPowerState)) - { - notifyChildren(); - return; - } + // Only for root domain in the will-change phase + if (!IS_ROOT_DOMAIN || (fMachineState != kIOPM_OurChangeSetPowerState)) { + notifyChildren(); + return; + } - MS_PUSH(fMachineState); // push notifyAll() machine state - fMachineState = kIOPM_DriverThreadCallDone; + MS_PUSH(fMachineState); // push notifyAll() machine state + fMachineState = kIOPM_DriverThreadCallDone; - // Call IOPMrootDomain::willNotifyPowerChildren() on a thread call - // to avoid a deadlock. - fDriverCallReason = kRootDomainInformPreChange; - fDriverCallBusy = true; - thread_call_enter( fDriverCallEntry ); + // Call IOPMrootDomain::willNotifyPowerChildren() on a thread call + // to avoid a deadlock. + fDriverCallReason = kRootDomainInformPreChange; + fDriverCallBusy = true; + thread_call_enter( fDriverCallEntry ); } -void IOService::notifyRootDomainDone( void ) +void +IOService::notifyRootDomainDone( void ) { - assert( fDriverCallBusy == false ); - assert( fMachineState == kIOPM_DriverThreadCallDone ); + assert( fDriverCallBusy == false ); + assert( fMachineState == kIOPM_DriverThreadCallDone ); - MS_POP(); // pop notifyAll() machine state - notifyChildren(); + MS_POP(); // pop notifyAll() machine state + notifyChildren(); } //********************************************************************************* // [private] notifyChildren //********************************************************************************* -void IOService::notifyChildren( void ) -{ - OSIterator * iter; - OSObject * next; - IOPowerConnection * connection; - OSArray * children = 0; - IOPMrootDomain * rootDomain; - bool delayNotify = false; - - if ((fHeadNotePowerState != fCurrentPowerState) && - (IS_POWER_DROP == fIsPreChange) && - ((rootDomain = getPMRootDomain()) == this)) - { - rootDomain->tracePoint( IS_POWER_DROP ? - kIOPMTracePointSleepPowerPlaneDrivers : - kIOPMTracePointWakePowerPlaneDrivers ); - } - - if (fStrictTreeOrder) - children = OSArray::withCapacity(8); - - // Sum child power consumption in notifyChild() - fHeadNotePowerArrayEntry->staticPower = 0; - - iter = getChildIterator(gIOPowerPlane); - if ( iter ) - { - while ((next = iter->getNextObject())) - { - if ((connection = OSDynamicCast(IOPowerConnection, next))) - { - if (connection->getReadyFlag() == false) - { - PM_LOG3("[%s] %s: connection not ready\n", - getName(), __FUNCTION__); - continue; - } - - // Mechanism to postpone the did-change notification to - // certain power children to order those children last. - // Cannot be used together with strict tree ordering. - - if (!fIsPreChange && - connection->delayChildNotification && - getPMRootDomain()->shouldDelayChildNotification(this)) - { - if (!children) - { - children = OSArray::withCapacity(8); - if (children) - delayNotify = true; - } - if (delayNotify) - { - children->setObject( connection ); - continue; - } - } - - if (!delayNotify && children) - children->setObject( connection ); - else - notifyChild( connection ); - } - } - iter->release(); - } - - if (children && (children->getCount() == 0)) - { - children->release(); - children = 0; - } - if (children) - { - assert(fNotifyChildArray == 0); - fNotifyChildArray = children; - MS_PUSH(fMachineState); - - if (delayNotify) - { - // Block until all non-delayed children have acked their - // notification. Then notify the remaining delayed child - // in the array. This is used to hold off graphics child - // notification while the rest of the system powers up. - // If a hid tickle arrives during this time, the delayed - // children are immediately notified and root domain will - // not clamp power for dark wake. - - fMachineState = kIOPM_NotifyChildrenDelayed; - PM_LOG2("%s: %d children in delayed array\n", - getName(), children->getCount()); - } - else - { - // Child array created to support strict notification order. - // Notify children in the array one at a time. - - fMachineState = kIOPM_NotifyChildrenOrdered; - } - } +void +IOService::notifyChildren( void ) +{ + OSIterator * iter; + OSObject * next; + IOPowerConnection * connection; + OSArray * children = 0; + IOPMrootDomain * rootDomain; + bool delayNotify = false; + + if ((fHeadNotePowerState != fCurrentPowerState) && + (IS_POWER_DROP == fIsPreChange) && + ((rootDomain = getPMRootDomain()) == this)) { + rootDomain->tracePoint( IS_POWER_DROP ? + kIOPMTracePointSleepPowerPlaneDrivers : + kIOPMTracePointWakePowerPlaneDrivers ); + } + + if (fStrictTreeOrder) { + children = OSArray::withCapacity(8); + } + + // Sum child power consumption in notifyChild() + fHeadNotePowerArrayEntry->staticPower = 0; + + iter = getChildIterator(gIOPowerPlane); + if (iter) { + while ((next = iter->getNextObject())) { + if ((connection = OSDynamicCast(IOPowerConnection, next))) { + if (connection->getReadyFlag() == false) { + PM_LOG3("[%s] %s: connection not ready\n", + getName(), __FUNCTION__); + continue; + } + + // Mechanism to postpone the did-change notification to + // certain power children to order those children last. + // Cannot be used together with strict tree ordering. + + if (!fIsPreChange && + connection->delayChildNotification && + getPMRootDomain()->shouldDelayChildNotification(this)) { + if (!children) { + children = OSArray::withCapacity(8); + if (children) { + delayNotify = true; + } + } + if (delayNotify) { + children->setObject( connection ); + continue; + } + } + + if (!delayNotify && children) { + children->setObject( connection ); + } else { + notifyChild( connection ); + } + } + } + iter->release(); + } + + if (children && (children->getCount() == 0)) { + children->release(); + children = 0; + } + if (children) { + assert(fNotifyChildArray == 0); + fNotifyChildArray = children; + MS_PUSH(fMachineState); + + if (delayNotify) { + // Block until all non-delayed children have acked their + // notification. Then notify the remaining delayed child + // in the array. This is used to hold off graphics child + // notification while the rest of the system powers up. + // If a hid tickle arrives during this time, the delayed + // children are immediately notified and root domain will + // not clamp power for dark wake. + + fMachineState = kIOPM_NotifyChildrenDelayed; + PM_LOG2("%s: %d children in delayed array\n", + getName(), children->getCount()); + } else { + // Child array created to support strict notification order. + // Notify children in the array one at a time. + + fMachineState = kIOPM_NotifyChildrenOrdered; + } + } } //********************************************************************************* // [private] notifyChildrenOrdered //********************************************************************************* -void IOService::notifyChildrenOrdered( void ) +void +IOService::notifyChildrenOrdered( void ) { - PM_ASSERT_IN_GATE(); - assert(fNotifyChildArray); - assert(fMachineState == kIOPM_NotifyChildrenOrdered); + PM_ASSERT_IN_GATE(); + assert(fNotifyChildArray); + assert(fMachineState == kIOPM_NotifyChildrenOrdered); - // Notify one child, wait for it to ack, then repeat for next child. - // This is a workaround for some drivers with multiple instances at - // the same branch in the power tree, but the driver is slow to power - // up unless the tree ordering is observed. Problem observed only on - // system wake, not on system sleep. - // - // We have the ability to power off in reverse child index order. - // That works nicely on some machines, but not on all HW configs. + // Notify one child, wait for it to ack, then repeat for next child. + // This is a workaround for some drivers with multiple instances at + // the same branch in the power tree, but the driver is slow to power + // up unless the tree ordering is observed. Problem observed only on + // system wake, not on system sleep. + // + // We have the ability to power off in reverse child index order. + // That works nicely on some machines, but not on all HW configs. - if (fNotifyChildArray->getCount()) - { - IOPowerConnection * connection; - connection = (IOPowerConnection *) fNotifyChildArray->getObject(0); - notifyChild( connection ); - fNotifyChildArray->removeObject(0); - } - else - { - fNotifyChildArray->release(); - fNotifyChildArray = 0; + if (fNotifyChildArray->getCount()) { + IOPowerConnection * connection; + connection = (IOPowerConnection *) fNotifyChildArray->getObject(0); + notifyChild( connection ); + fNotifyChildArray->removeObject(0); + } else { + fNotifyChildArray->release(); + fNotifyChildArray = 0; - MS_POP(); // pushed by notifyChildren() - } + MS_POP(); // pushed by notifyChildren() + } } //********************************************************************************* // [private] notifyChildrenDelayed //********************************************************************************* -void IOService::notifyChildrenDelayed( void ) +void +IOService::notifyChildrenDelayed( void ) { - IOPowerConnection * connection; + IOPowerConnection * connection; - PM_ASSERT_IN_GATE(); - assert(fNotifyChildArray); - assert(fMachineState == kIOPM_NotifyChildrenDelayed); + PM_ASSERT_IN_GATE(); + assert(fNotifyChildArray); + assert(fMachineState == kIOPM_NotifyChildrenDelayed); - // Wait after all non-delayed children and interested drivers have ack'ed, - // then notify all delayed children. If notify delay is canceled, child - // acks may be outstanding with PM blocked on fHeadNotePendingAcks != 0. - // But the handling for either case is identical. + // Wait after all non-delayed children and interested drivers have ack'ed, + // then notify all delayed children. If notify delay is canceled, child + // acks may be outstanding with PM blocked on fHeadNotePendingAcks != 0. + // But the handling for either case is identical. - for (int i = 0; ; i++) - { - connection = (IOPowerConnection *) fNotifyChildArray->getObject(i); - if (!connection) - break; + for (int i = 0;; i++) { + connection = (IOPowerConnection *) fNotifyChildArray->getObject(i); + if (!connection) { + break; + } - notifyChild( connection ); - } + notifyChild( connection ); + } - PM_LOG2("%s: notified delayed children\n", getName()); - fNotifyChildArray->release(); - fNotifyChildArray = 0; + PM_LOG2("%s: notified delayed children\n", getName()); + fNotifyChildArray->release(); + fNotifyChildArray = 0; - MS_POP(); // pushed by notifyChildren() + MS_POP(); // pushed by notifyChildren() } //********************************************************************************* // [private] notifyAll //********************************************************************************* -IOReturn IOService::notifyAll( uint32_t nextMS ) +IOReturn +IOService::notifyAll( uint32_t nextMS ) { - // Save the machine state to be restored by notifyInterestedDriversDone() + // Save the machine state to be restored by notifyInterestedDriversDone() - PM_ASSERT_IN_GATE(); - MS_PUSH(nextMS); - fMachineState = kIOPM_DriverThreadCallDone; - fDriverCallReason = fIsPreChange ? - kDriverCallInformPreChange : kDriverCallInformPostChange; + PM_ASSERT_IN_GATE(); + MS_PUSH(nextMS); + fMachineState = kIOPM_DriverThreadCallDone; + fDriverCallReason = fIsPreChange ? + kDriverCallInformPreChange : kDriverCallInformPostChange; - if (!notifyInterestedDrivers()) - notifyInterestedDriversDone(); + if (!notifyInterestedDrivers()) { + notifyInterestedDriversDone(); + } - return IOPMWillAckLater; + return IOPMWillAckLater; } //********************************************************************************* @@ -3940,48 +3886,49 @@ IOReturn IOService::notifyAll( uint32_t nextMS ) // Thread call context //********************************************************************************* -IOReturn IOService::actionDriverCalloutDone( - OSObject * target, - void * arg0, void * arg1, - void * arg2, void * arg3 ) +IOReturn +IOService::actionDriverCalloutDone( + OSObject * target, + void * arg0, void * arg1, + void * arg2, void * arg3 ) { - IOServicePM * pwrMgt = (IOServicePM *) arg0; + IOServicePM * pwrMgt = (IOServicePM *) arg0; - assert( fDriverCallBusy ); - fDriverCallBusy = false; + assert( fDriverCallBusy ); + fDriverCallBusy = false; - assert(gIOPMWorkQueue); - gIOPMWorkQueue->signalWorkAvailable(); + assert(gIOPMWorkQueue); + gIOPMWorkQueue->signalWorkAvailable(); - return kIOReturnSuccess; + return kIOReturnSuccess; } -void IOService::pmDriverCallout( IOService * from ) +void +IOService::pmDriverCallout( IOService * from ) { - assert(from); - switch (from->fDriverCallReason) - { - case kDriverCallSetPowerState: - from->driverSetPowerState(); - break; + assert(from); + switch (from->fDriverCallReason) { + case kDriverCallSetPowerState: + from->driverSetPowerState(); + break; - case kDriverCallInformPreChange: - case kDriverCallInformPostChange: - from->driverInformPowerChange(); - break; + case kDriverCallInformPreChange: + case kDriverCallInformPostChange: + from->driverInformPowerChange(); + break; - case kRootDomainInformPreChange: - getPMRootDomain()->willNotifyPowerChildren(from->fHeadNotePowerState); - break; + case kRootDomainInformPreChange: + getPMRootDomain()->willNotifyPowerChildren(from->fHeadNotePowerState); + break; - default: - panic("IOService::pmDriverCallout bad machine state %x", - from->fDriverCallReason); - } + default: + panic("IOService::pmDriverCallout bad machine state %x", + from->fDriverCallReason); + } - gIOPMWorkLoop->runAction(actionDriverCalloutDone, - /* target */ from, - /* arg0 */ (void *) from->pwrMgt ); + gIOPMWorkLoop->runAction(actionDriverCalloutDone, + /* target */ from, + /* arg0 */ (void *) from->pwrMgt ); } //********************************************************************************* @@ -3990,68 +3937,66 @@ void IOService::pmDriverCallout( IOService * from ) // Thread call context //********************************************************************************* -void IOService::driverSetPowerState( void ) -{ - IOPMPowerStateIndex powerState; - DriverCallParam * param; - IOPMDriverCallEntry callEntry; - AbsoluteTime end; - IOReturn result; - uint32_t oldPowerState = getPowerState(); - - assert( fDriverCallBusy ); - assert( fDriverCallParamPtr ); - assert( fDriverCallParamCount == 1 ); - - param = (DriverCallParam *) fDriverCallParamPtr; - powerState = fHeadNotePowerState; - - callEntry.callMethod = OSMemberFunctionCast(const void *, fControllingDriver, &IOService::setPowerState); - if (assertPMDriverCall(&callEntry)) - { - OUR_PMLogFuncStart(kPMLogProgramHardware, (uintptr_t) this, powerState); - start_spindump_timer("SetState"); - clock_get_uptime(&fDriverCallStartTime); - result = fControllingDriver->setPowerState( powerState, this ); - clock_get_uptime(&end); - stop_spindump_timer(); - OUR_PMLogFuncEnd(kPMLogProgramHardware, (uintptr_t) this, (UInt32) result); - - deassertPMDriverCall(&callEntry); - - // Record the most recent max power state residency timings. - // Use with DeviceActiveTimestamp to diagnose tickle issues. - if (powerState == fHighestPowerState) - fMaxPowerStateEntryTime = end; - else if (oldPowerState == fHighestPowerState) - fMaxPowerStateExitTime = end; - - if (result < 0) - { - PM_LOG("%s::setPowerState(%p, %lu -> %lu) returned 0x%x\n", - fName, OBFUSCATE(this), fCurrentPowerState, powerState, result); - } +void +IOService::driverSetPowerState( void ) +{ + IOPMPowerStateIndex powerState; + DriverCallParam * param; + IOPMDriverCallEntry callEntry; + AbsoluteTime end; + IOReturn result; + uint32_t oldPowerState = getPowerState(); + + assert( fDriverCallBusy ); + assert( fDriverCallParamPtr ); + assert( fDriverCallParamCount == 1 ); + + param = (DriverCallParam *) fDriverCallParamPtr; + powerState = fHeadNotePowerState; + + callEntry.callMethod = OSMemberFunctionCast(const void *, fControllingDriver, &IOService::setPowerState); + if (assertPMDriverCall(&callEntry)) { + OUR_PMLogFuncStart(kPMLogProgramHardware, (uintptr_t) this, powerState); + start_spindump_timer("SetState"); + clock_get_uptime(&fDriverCallStartTime); + result = fControllingDriver->setPowerState( powerState, this ); + clock_get_uptime(&end); + stop_spindump_timer(); + OUR_PMLogFuncEnd(kPMLogProgramHardware, (uintptr_t) this, (UInt32) result); + + deassertPMDriverCall(&callEntry); + + // Record the most recent max power state residency timings. + // Use with DeviceActiveTimestamp to diagnose tickle issues. + if (powerState == fHighestPowerState) { + fMaxPowerStateEntryTime = end; + } else if (oldPowerState == fHighestPowerState) { + fMaxPowerStateExitTime = end; + } + if (result < 0) { + PM_LOG("%s::setPowerState(%p, %lu -> %lu) returned 0x%x\n", + fName, OBFUSCATE(this), fCurrentPowerState, powerState, result); + } - if ((result == IOPMAckImplied) || (result < 0)) - { - uint64_t nsec; - SUB_ABSOLUTETIME(&end, &fDriverCallStartTime); - absolutetime_to_nanoseconds(end, &nsec); - if (nsec > LOG_SETPOWER_TIMES) { - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsDriverPSChangeSlow, - fName, kDriverCallSetPowerState, NS_TO_MS(nsec), getRegistryEntryID(), - NULL, powerState); - } - } + if ((result == IOPMAckImplied) || (result < 0)) { + uint64_t nsec; - } - else - result = kIOPMAckImplied; + SUB_ABSOLUTETIME(&end, &fDriverCallStartTime); + absolutetime_to_nanoseconds(end, &nsec); + if (nsec > LOG_SETPOWER_TIMES) { + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsDriverPSChangeSlow, + fName, kDriverCallSetPowerState, NS_TO_MS(nsec), getRegistryEntryID(), + NULL, powerState); + } + } + } else { + result = kIOPMAckImplied; + } - param->Result = result; + param->Result = result; } //********************************************************************************* @@ -4060,86 +4005,79 @@ void IOService::driverSetPowerState( void ) // Thread call context //********************************************************************************* -void IOService::driverInformPowerChange( void ) -{ - IOPMinformee * informee; - IOService * driver; - DriverCallParam * param; - IOPMDriverCallEntry callEntry; - IOPMPowerFlags powerFlags; - IOPMPowerStateIndex powerState; - AbsoluteTime end; - IOReturn result; - IOItemCount count; - - assert( fDriverCallBusy ); - assert( fDriverCallParamPtr ); - assert( fDriverCallParamCount ); - - param = (DriverCallParam *) fDriverCallParamPtr; - count = fDriverCallParamCount; - - powerFlags = fHeadNotePowerArrayEntry->capabilityFlags; - powerState = fHeadNotePowerState; - - for (IOItemCount i = 0; i < count; i++) - { - informee = (IOPMinformee *) param->Target; - driver = informee->whatObject; - - if (fDriverCallReason == kDriverCallInformPreChange) { - callEntry.callMethod = OSMemberFunctionCast(const void *, driver, &IOService::powerStateWillChangeTo); - } - else { - callEntry.callMethod = OSMemberFunctionCast(const void *, driver, &IOService::powerStateDidChangeTo); - } - if (assertPMDriverCall(&callEntry, 0, informee)) - { - if (fDriverCallReason == kDriverCallInformPreChange) - { - OUR_PMLogFuncStart(kPMLogInformDriverPreChange, (uintptr_t) this, powerState); - start_spindump_timer("WillChange"); - clock_get_uptime(&informee->startTime); - result = driver->powerStateWillChangeTo(powerFlags, powerState, this); - clock_get_uptime(&end); - stop_spindump_timer(); - OUR_PMLogFuncEnd(kPMLogInformDriverPreChange, (uintptr_t) this, result); - } - else - { - OUR_PMLogFuncStart(kPMLogInformDriverPostChange, (uintptr_t) this, powerState); - start_spindump_timer("DidChange"); - clock_get_uptime(&informee->startTime); - result = driver->powerStateDidChangeTo(powerFlags, powerState, this); - clock_get_uptime(&end); - stop_spindump_timer(); - OUR_PMLogFuncEnd(kPMLogInformDriverPostChange, (uintptr_t) this, result); - } - - deassertPMDriverCall(&callEntry); - - - if ((result == IOPMAckImplied) || (result < 0)) - { - uint64_t nsec; - - SUB_ABSOLUTETIME(&end, &informee->startTime); - absolutetime_to_nanoseconds(end, &nsec); - if (nsec > LOG_SETPOWER_TIMES) { - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsDriverPSChangeSlow, driver->getName(), - fDriverCallReason, NS_TO_MS(nsec), driver->getRegistryEntryID(), - NULL, powerState); - } - } - - } - else - result = kIOPMAckImplied; - - param->Result = result; - param++; - } +void +IOService::driverInformPowerChange( void ) +{ + IOPMinformee * informee; + IOService * driver; + DriverCallParam * param; + IOPMDriverCallEntry callEntry; + IOPMPowerFlags powerFlags; + IOPMPowerStateIndex powerState; + AbsoluteTime end; + IOReturn result; + IOItemCount count; + + assert( fDriverCallBusy ); + assert( fDriverCallParamPtr ); + assert( fDriverCallParamCount ); + + param = (DriverCallParam *) fDriverCallParamPtr; + count = fDriverCallParamCount; + + powerFlags = fHeadNotePowerArrayEntry->capabilityFlags; + powerState = fHeadNotePowerState; + + for (IOItemCount i = 0; i < count; i++) { + informee = (IOPMinformee *) param->Target; + driver = informee->whatObject; + + if (fDriverCallReason == kDriverCallInformPreChange) { + callEntry.callMethod = OSMemberFunctionCast(const void *, driver, &IOService::powerStateWillChangeTo); + } else { + callEntry.callMethod = OSMemberFunctionCast(const void *, driver, &IOService::powerStateDidChangeTo); + } + if (assertPMDriverCall(&callEntry, 0, informee)) { + if (fDriverCallReason == kDriverCallInformPreChange) { + OUR_PMLogFuncStart(kPMLogInformDriverPreChange, (uintptr_t) this, powerState); + start_spindump_timer("WillChange"); + clock_get_uptime(&informee->startTime); + result = driver->powerStateWillChangeTo(powerFlags, powerState, this); + clock_get_uptime(&end); + stop_spindump_timer(); + OUR_PMLogFuncEnd(kPMLogInformDriverPreChange, (uintptr_t) this, result); + } else { + OUR_PMLogFuncStart(kPMLogInformDriverPostChange, (uintptr_t) this, powerState); + start_spindump_timer("DidChange"); + clock_get_uptime(&informee->startTime); + result = driver->powerStateDidChangeTo(powerFlags, powerState, this); + clock_get_uptime(&end); + stop_spindump_timer(); + OUR_PMLogFuncEnd(kPMLogInformDriverPostChange, (uintptr_t) this, result); + } + + deassertPMDriverCall(&callEntry); + + + if ((result == IOPMAckImplied) || (result < 0)) { + uint64_t nsec; + + SUB_ABSOLUTETIME(&end, &informee->startTime); + absolutetime_to_nanoseconds(end, &nsec); + if (nsec > LOG_SETPOWER_TIMES) { + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsDriverPSChangeSlow, driver->getName(), + fDriverCallReason, NS_TO_MS(nsec), driver->getRegistryEntryID(), + NULL, powerState); + } + } + } else { + result = kIOPMAckImplied; + } + + param->Result = result; + param++; + } } //********************************************************************************* @@ -4149,167 +4087,160 @@ void IOService::driverInformPowerChange( void ) // If the object acknowledges the current change, we return TRUE. //********************************************************************************* -bool IOService::notifyChild( IOPowerConnection * theNub ) -{ - IOReturn ret = IOPMAckImplied; - unsigned long childPower; - IOService * theChild; - IOPMRequest * childRequest; - IOPMPowerChangeFlags requestArg2; - int requestType; - - PM_ASSERT_IN_GATE(); - theChild = (IOService *)(theNub->copyChildEntry(gIOPowerPlane)); - if (!theChild) - { - return true; - } - - // Unless the child handles the notification immediately and returns - // kIOPMAckImplied, we'll be awaiting their acknowledgement later. - fHeadNotePendingAcks++; - theNub->setAwaitingAck(true); - - requestArg2 = fHeadNoteChangeFlags; - if (StateOrder(fHeadNotePowerState) < StateOrder(fCurrentPowerState)) - requestArg2 |= kIOPMDomainPowerDrop; - - requestType = fIsPreChange ? - kIOPMRequestTypePowerDomainWillChange : - kIOPMRequestTypePowerDomainDidChange; - - childRequest = acquirePMRequest( theChild, requestType ); - if (childRequest) - { - theNub->retain(); - childRequest->fArg0 = (void *) fHeadNotePowerArrayEntry->outputPowerFlags; - childRequest->fArg1 = (void *) theNub; - childRequest->fArg2 = (void *)(uintptr_t) requestArg2; - theChild->submitPMRequest( childRequest ); - ret = IOPMWillAckLater; - } - else - { - ret = IOPMAckImplied; - fHeadNotePendingAcks--; - theNub->setAwaitingAck(false); - childPower = theChild->currentPowerConsumption(); - if ( childPower == kIOPMUnknown ) - { - fHeadNotePowerArrayEntry->staticPower = kIOPMUnknown; - } else { - if (fHeadNotePowerArrayEntry->staticPower != kIOPMUnknown ) - fHeadNotePowerArrayEntry->staticPower += childPower; - } - } - - theChild->release(); - return (IOPMAckImplied == ret); +bool +IOService::notifyChild( IOPowerConnection * theNub ) +{ + IOReturn ret = IOPMAckImplied; + unsigned long childPower; + IOService * theChild; + IOPMRequest * childRequest; + IOPMPowerChangeFlags requestArg2; + int requestType; + + PM_ASSERT_IN_GATE(); + theChild = (IOService *)(theNub->copyChildEntry(gIOPowerPlane)); + if (!theChild) { + return true; + } + + // Unless the child handles the notification immediately and returns + // kIOPMAckImplied, we'll be awaiting their acknowledgement later. + fHeadNotePendingAcks++; + theNub->setAwaitingAck(true); + + requestArg2 = fHeadNoteChangeFlags; + if (StateOrder(fHeadNotePowerState) < StateOrder(fCurrentPowerState)) { + requestArg2 |= kIOPMDomainPowerDrop; + } + + requestType = fIsPreChange ? + kIOPMRequestTypePowerDomainWillChange : + kIOPMRequestTypePowerDomainDidChange; + + childRequest = acquirePMRequest( theChild, requestType ); + if (childRequest) { + theNub->retain(); + childRequest->fArg0 = (void *) fHeadNotePowerArrayEntry->outputPowerFlags; + childRequest->fArg1 = (void *) theNub; + childRequest->fArg2 = (void *)(uintptr_t) requestArg2; + theChild->submitPMRequest( childRequest ); + ret = IOPMWillAckLater; + } else { + ret = IOPMAckImplied; + fHeadNotePendingAcks--; + theNub->setAwaitingAck(false); + childPower = theChild->currentPowerConsumption(); + if (childPower == kIOPMUnknown) { + fHeadNotePowerArrayEntry->staticPower = kIOPMUnknown; + } else { + if (fHeadNotePowerArrayEntry->staticPower != kIOPMUnknown) { + fHeadNotePowerArrayEntry->staticPower += childPower; + } + } + } + + theChild->release(); + return IOPMAckImplied == ret; } //********************************************************************************* // [private] notifyControllingDriver //********************************************************************************* -bool IOService::notifyControllingDriver( void ) +bool +IOService::notifyControllingDriver( void ) { - DriverCallParam * param; - - PM_ASSERT_IN_GATE(); - assert( fDriverCallParamCount == 0 ); - assert( fControllingDriver ); + DriverCallParam * param; - if (fInitialSetPowerState) - { - fInitialSetPowerState = false; - fHeadNoteChangeFlags |= kIOPMInitialPowerChange; + PM_ASSERT_IN_GATE(); + assert( fDriverCallParamCount == 0 ); + assert( fControllingDriver ); - // Driver specified flag to skip the inital setPowerState() - if (fHeadNotePowerArrayEntry->capabilityFlags & kIOPMInitialDeviceState) - { - return false; - } - } + if (fInitialSetPowerState) { + fInitialSetPowerState = false; + fHeadNoteChangeFlags |= kIOPMInitialPowerChange; - param = (DriverCallParam *) fDriverCallParamPtr; - if (!param) - { - param = IONew(DriverCallParam, 1); - if (!param) - return false; // no memory + // Driver specified flag to skip the inital setPowerState() + if (fHeadNotePowerArrayEntry->capabilityFlags & kIOPMInitialDeviceState) { + return false; + } + } - fDriverCallParamPtr = (void *) param; - fDriverCallParamSlots = 1; - } + param = (DriverCallParam *) fDriverCallParamPtr; + if (!param) { + param = IONew(DriverCallParam, 1); + if (!param) { + return false; // no memory + } + fDriverCallParamPtr = (void *) param; + fDriverCallParamSlots = 1; + } - param->Target = fControllingDriver; - fDriverCallParamCount = 1; - fDriverTimer = -1; + param->Target = fControllingDriver; + fDriverCallParamCount = 1; + fDriverTimer = -1; - // Block state machine and wait for callout completion. - assert(!fDriverCallBusy); - fDriverCallBusy = true; - thread_call_enter( fDriverCallEntry ); + // Block state machine and wait for callout completion. + assert(!fDriverCallBusy); + fDriverCallBusy = true; + thread_call_enter( fDriverCallEntry ); - return true; + return true; } //********************************************************************************* // [private] notifyControllingDriverDone //********************************************************************************* -void IOService::notifyControllingDriverDone( void ) +void +IOService::notifyControllingDriverDone( void ) { - DriverCallParam * param; - IOReturn result; + DriverCallParam * param; + IOReturn result; - PM_ASSERT_IN_GATE(); - param = (DriverCallParam *) fDriverCallParamPtr; + PM_ASSERT_IN_GATE(); + param = (DriverCallParam *) fDriverCallParamPtr; - assert( fDriverCallBusy == false ); - assert( fMachineState == kIOPM_DriverThreadCallDone ); + assert( fDriverCallBusy == false ); + assert( fMachineState == kIOPM_DriverThreadCallDone ); - if (param && fDriverCallParamCount) - { - assert(fDriverCallParamCount == 1); + if (param && fDriverCallParamCount) { + assert(fDriverCallParamCount == 1); - // the return value from setPowerState() - result = param->Result; + // the return value from setPowerState() + result = param->Result; - if ((result == IOPMAckImplied) || (result < 0)) - { - fDriverTimer = 0; - } - else if (fDriverTimer) - { - assert(fDriverTimer == -1); + if ((result == IOPMAckImplied) || (result < 0)) { + fDriverTimer = 0; + } else if (fDriverTimer) { + assert(fDriverTimer == -1); - // Driver has not acked, and has returned a positive result. - // Enforce a minimum permissible timeout value. - // Make the min value large enough so timeout is less likely - // to occur if a driver misinterpreted that the return value - // should be in microsecond units. And make it large enough - // to be noticeable if a driver neglects to ack. + // Driver has not acked, and has returned a positive result. + // Enforce a minimum permissible timeout value. + // Make the min value large enough so timeout is less likely + // to occur if a driver misinterpreted that the return value + // should be in microsecond units. And make it large enough + // to be noticeable if a driver neglects to ack. - if (result < kMinAckTimeoutTicks) - result = kMinAckTimeoutTicks; + if (result < kMinAckTimeoutTicks) { + result = kMinAckTimeoutTicks; + } - fDriverTimer = (result / (ACK_TIMER_PERIOD / ns_per_us)) + 1; - } - // else, child has already acked and driver_timer reset to 0. + fDriverTimer = (result / (ACK_TIMER_PERIOD / ns_per_us)) + 1; + } + // else, child has already acked and driver_timer reset to 0. - fDriverCallParamCount = 0; + fDriverCallParamCount = 0; - if ( fDriverTimer ) - { - OUR_PMLog(kPMLogStartAckTimer, 0, 0); - start_ack_timer(); - getPMRootDomain()->reset_watchdog_timer(this, result/USEC_PER_SEC+1); - } - } + if (fDriverTimer) { + OUR_PMLog(kPMLogStartAckTimer, 0, 0); + start_ack_timer(); + getPMRootDomain()->reset_watchdog_timer(this, result / USEC_PER_SEC + 1); + } + } - MS_POP(); // pushed by OurChangeSetPowerState() - fIsPreChange = false; + MS_POP(); // pushed by OurChangeSetPowerState() + fIsPreChange = false; } //********************************************************************************* @@ -4318,182 +4249,167 @@ void IOService::notifyControllingDriverDone( void ) // A power change is done. //********************************************************************************* -void IOService::all_done( void ) -{ - IOPMPowerStateIndex prevPowerState; - const IOPMPSEntry * powerStatePtr; - IOPMDriverCallEntry callEntry; - uint32_t prevMachineState = fMachineState; - bool actionCalled = false; - uint64_t ts; - - fMachineState = kIOPM_Finished; - - if ((fHeadNoteChangeFlags & kIOPMSynchronize) && - ((prevMachineState == kIOPM_Finished) || - (prevMachineState == kIOPM_SyncFinish))) - { - // Sync operation and no power change occurred. - // Do not inform driver and clients about this request completion, - // except for the originator (root domain). - - PM_ACTION_2(actionPowerChangeDone, - fHeadNotePowerState, fHeadNoteChangeFlags); - - if (getPMRequestType() == kIOPMRequestTypeSynchronizePowerTree) - { - powerChangeDone(fCurrentPowerState); - } - else if (fAdvisoryTickleUsed) - { - // Not root domain and advisory tickle target. - // Re-adjust power after power tree sync at the 'did' pass - // to recompute desire and adjust power state between dark - // and full wake transitions. Root domain is responsible - // for calling setAdvisoryTickleEnable() before starting - // the kIOPMSynchronize power change. - - if (!fAdjustPowerScheduled && - (fHeadNoteChangeFlags & kIOPMDomainDidChange)) - { - IOPMRequest * request; - request = acquirePMRequest( this, kIOPMRequestTypeAdjustPowerState ); - if (request) - { - submitPMRequest( request ); - fAdjustPowerScheduled = true; - } - } - } - - return; - } - - // our power change - if (fHeadNoteChangeFlags & kIOPMSelfInitiated) - { - // power state changed - if ((fHeadNoteChangeFlags & kIOPMNotDone) == 0) - { - trackSystemSleepPreventers( - fCurrentPowerState, fHeadNotePowerState, fHeadNoteChangeFlags); - - // we changed, tell our parent - requestDomainPower(fHeadNotePowerState); - - // yes, did power raise? - if ( StateOrder(fCurrentPowerState) < StateOrder(fHeadNotePowerState) ) - { - // yes, inform clients and apps - tellChangeUp (fHeadNotePowerState); - } - prevPowerState = fCurrentPowerState; - // either way - fCurrentPowerState = fHeadNotePowerState; - PM_LOCK(); - if (fReportBuf) { - ts = mach_absolute_time(); - STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); - } - PM_UNLOCK(); +void +IOService::all_done( void ) +{ + IOPMPowerStateIndex prevPowerState; + const IOPMPSEntry * powerStatePtr; + IOPMDriverCallEntry callEntry; + uint32_t prevMachineState = fMachineState; + bool actionCalled = false; + uint64_t ts; + + fMachineState = kIOPM_Finished; + + if ((fHeadNoteChangeFlags & kIOPMSynchronize) && + ((prevMachineState == kIOPM_Finished) || + (prevMachineState == kIOPM_SyncFinish))) { + // Sync operation and no power change occurred. + // Do not inform driver and clients about this request completion, + // except for the originator (root domain). + + PM_ACTION_2(actionPowerChangeDone, + fHeadNotePowerState, fHeadNoteChangeFlags); + + if (getPMRequestType() == kIOPMRequestTypeSynchronizePowerTree) { + powerChangeDone(fCurrentPowerState); + } else if (fAdvisoryTickleUsed) { + // Not root domain and advisory tickle target. + // Re-adjust power after power tree sync at the 'did' pass + // to recompute desire and adjust power state between dark + // and full wake transitions. Root domain is responsible + // for calling setAdvisoryTickleEnable() before starting + // the kIOPMSynchronize power change. + + if (!fAdjustPowerScheduled && + (fHeadNoteChangeFlags & kIOPMDomainDidChange)) { + IOPMRequest * request; + request = acquirePMRequest( this, kIOPMRequestTypeAdjustPowerState ); + if (request) { + submitPMRequest( request ); + fAdjustPowerScheduled = true; + } + } + } + + return; + } + + // our power change + if (fHeadNoteChangeFlags & kIOPMSelfInitiated) { + // power state changed + if ((fHeadNoteChangeFlags & kIOPMNotDone) == 0) { + trackSystemSleepPreventers( + fCurrentPowerState, fHeadNotePowerState, fHeadNoteChangeFlags); + + // we changed, tell our parent + requestDomainPower(fHeadNotePowerState); + + // yes, did power raise? + if (StateOrder(fCurrentPowerState) < StateOrder(fHeadNotePowerState)) { + // yes, inform clients and apps + tellChangeUp(fHeadNotePowerState); + } + prevPowerState = fCurrentPowerState; + // either way + fCurrentPowerState = fHeadNotePowerState; + PM_LOCK(); + if (fReportBuf) { + ts = mach_absolute_time(); + STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); + } + PM_UNLOCK(); #if PM_VARS_SUPPORT - fPMVars->myCurrentState = fCurrentPowerState; + fPMVars->myCurrentState = fCurrentPowerState; #endif - OUR_PMLog(kPMLogChangeDone, fCurrentPowerState, prevPowerState); - PM_ACTION_2(actionPowerChangeDone, - fHeadNotePowerState, fHeadNoteChangeFlags); - actionCalled = true; - - powerStatePtr = &fPowerStates[fCurrentPowerState]; - fCurrentCapabilityFlags = powerStatePtr->capabilityFlags; - if (fCurrentCapabilityFlags & kIOPMStaticPowerValid) - fCurrentPowerConsumption = powerStatePtr->staticPower; - - if (fHeadNoteChangeFlags & kIOPMRootChangeDown) - { - // Bump tickle generation count once the entire tree is down - gIOPMTickleGeneration++; - } - - // inform subclass policy-maker - if (fPCDFunctionOverride && fParentsKnowState && - assertPMDriverCall(&callEntry, kIOPMADC_NoInactiveCheck)) - { - powerChangeDone(prevPowerState); - deassertPMDriverCall(&callEntry); - } - } - else if (getPMRequestType() == kIOPMRequestTypeRequestPowerStateOverride) - { - // changePowerStateWithOverrideTo() was cancelled - fOverrideMaxPowerState = kIOPMPowerStateMax; - } - } - - // parent-initiated power change - if (fHeadNoteChangeFlags & kIOPMParentInitiated) - { - if (fHeadNoteChangeFlags & kIOPMRootChangeDown) - ParentChangeRootChangeDown(); - - // power state changed - if ((fHeadNoteChangeFlags & kIOPMNotDone) == 0) - { - trackSystemSleepPreventers( - fCurrentPowerState, fHeadNotePowerState, fHeadNoteChangeFlags); - - // did power raise? - if ( StateOrder(fCurrentPowerState) < StateOrder(fHeadNotePowerState) ) - { - // yes, inform clients and apps - tellChangeUp (fHeadNotePowerState); - } - // either way - prevPowerState = fCurrentPowerState; - fCurrentPowerState = fHeadNotePowerState; - PM_LOCK(); - if (fReportBuf) { - ts = mach_absolute_time(); - STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); - } - PM_UNLOCK(); + OUR_PMLog(kPMLogChangeDone, fCurrentPowerState, prevPowerState); + PM_ACTION_2(actionPowerChangeDone, + fHeadNotePowerState, fHeadNoteChangeFlags); + actionCalled = true; + + powerStatePtr = &fPowerStates[fCurrentPowerState]; + fCurrentCapabilityFlags = powerStatePtr->capabilityFlags; + if (fCurrentCapabilityFlags & kIOPMStaticPowerValid) { + fCurrentPowerConsumption = powerStatePtr->staticPower; + } + + if (fHeadNoteChangeFlags & kIOPMRootChangeDown) { + // Bump tickle generation count once the entire tree is down + gIOPMTickleGeneration++; + } + + // inform subclass policy-maker + if (fPCDFunctionOverride && fParentsKnowState && + assertPMDriverCall(&callEntry, kIOPMADC_NoInactiveCheck)) { + powerChangeDone(prevPowerState); + deassertPMDriverCall(&callEntry); + } + } else if (getPMRequestType() == kIOPMRequestTypeRequestPowerStateOverride) { + // changePowerStateWithOverrideTo() was cancelled + fOverrideMaxPowerState = kIOPMPowerStateMax; + } + } + + // parent-initiated power change + if (fHeadNoteChangeFlags & kIOPMParentInitiated) { + if (fHeadNoteChangeFlags & kIOPMRootChangeDown) { + ParentChangeRootChangeDown(); + } + + // power state changed + if ((fHeadNoteChangeFlags & kIOPMNotDone) == 0) { + trackSystemSleepPreventers( + fCurrentPowerState, fHeadNotePowerState, fHeadNoteChangeFlags); + + // did power raise? + if (StateOrder(fCurrentPowerState) < StateOrder(fHeadNotePowerState)) { + // yes, inform clients and apps + tellChangeUp(fHeadNotePowerState); + } + // either way + prevPowerState = fCurrentPowerState; + fCurrentPowerState = fHeadNotePowerState; + PM_LOCK(); + if (fReportBuf) { + ts = mach_absolute_time(); + STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); + } + PM_UNLOCK(); #if PM_VARS_SUPPORT - fPMVars->myCurrentState = fCurrentPowerState; + fPMVars->myCurrentState = fCurrentPowerState; #endif - OUR_PMLog(kPMLogChangeDone, fCurrentPowerState, prevPowerState); - PM_ACTION_2(actionPowerChangeDone, - fHeadNotePowerState, fHeadNoteChangeFlags); - actionCalled = true; - - powerStatePtr = &fPowerStates[fCurrentPowerState]; - fCurrentCapabilityFlags = powerStatePtr->capabilityFlags; - if (fCurrentCapabilityFlags & kIOPMStaticPowerValid) - fCurrentPowerConsumption = powerStatePtr->staticPower; - - // inform subclass policy-maker - if (fPCDFunctionOverride && fParentsKnowState && - assertPMDriverCall(&callEntry, kIOPMADC_NoInactiveCheck)) - { - powerChangeDone(prevPowerState); - deassertPMDriverCall(&callEntry); - } - } - } - - // When power rises enough to satisfy the tickle's desire for more power, - // the condition preventing idle-timer from dropping power is removed. - - if (StateOrder(fCurrentPowerState) >= StateOrder(fIdleTimerMinPowerState)) - { - fIdleTimerMinPowerState = kPowerStateZero; - } - - if (!actionCalled) - { - PM_ACTION_2(actionPowerChangeDone, - fHeadNotePowerState, fHeadNoteChangeFlags); - } + OUR_PMLog(kPMLogChangeDone, fCurrentPowerState, prevPowerState); + PM_ACTION_2(actionPowerChangeDone, + fHeadNotePowerState, fHeadNoteChangeFlags); + actionCalled = true; + + powerStatePtr = &fPowerStates[fCurrentPowerState]; + fCurrentCapabilityFlags = powerStatePtr->capabilityFlags; + if (fCurrentCapabilityFlags & kIOPMStaticPowerValid) { + fCurrentPowerConsumption = powerStatePtr->staticPower; + } + + // inform subclass policy-maker + if (fPCDFunctionOverride && fParentsKnowState && + assertPMDriverCall(&callEntry, kIOPMADC_NoInactiveCheck)) { + powerChangeDone(prevPowerState); + deassertPMDriverCall(&callEntry); + } + } + } + + // When power rises enough to satisfy the tickle's desire for more power, + // the condition preventing idle-timer from dropping power is removed. + + if (StateOrder(fCurrentPowerState) >= StateOrder(fIdleTimerMinPowerState)) { + fIdleTimerMinPowerState = kPowerStateZero; + } + + if (!actionCalled) { + PM_ACTION_2(actionPowerChangeDone, + fHeadNotePowerState, fHeadNoteChangeFlags); + } } // MARK: - @@ -4505,77 +4421,72 @@ void IOService::all_done( void ) // Begin the processing of a power change initiated by us. //********************************************************************************* -void IOService::OurChangeStart( void ) -{ - PM_ASSERT_IN_GATE(); - OUR_PMLog( kPMLogStartDeviceChange, fHeadNotePowerState, fCurrentPowerState ); - - // fMaxPowerState is our maximum possible power state based on the current - // power state of our parents. If we are trying to raise power beyond the - // maximum, send an async request for more power to all parents. - - if (!IS_PM_ROOT && (StateOrder(fMaxPowerState) < StateOrder(fHeadNotePowerState))) - { - fHeadNoteChangeFlags |= kIOPMNotDone; - requestDomainPower(fHeadNotePowerState); - OurChangeFinish(); - return; - } - - // Redundant power changes skips to the end of the state machine. - - if (!fInitialPowerChange && (fHeadNotePowerState == fCurrentPowerState)) - { - OurChangeFinish(); - return; - } - fInitialPowerChange = false; - - // Change started, but may not complete... - // Can be canceled (power drop) or deferred (power rise). - - PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); - - // Two separate paths, depending if power is being raised or lowered. - // Lowering power is subject to approval by clients of this service. - - if (IS_POWER_DROP) - { - fDoNotPowerDown = false; - - // Ask for persmission to drop power state - fMachineState = kIOPM_OurChangeTellClientsPowerDown; - fOutOfBandParameter = kNotifyApps; - askChangeDown(fHeadNotePowerState); - } - else - { - // This service is raising power and parents are able to support the - // new power state. However a parent may have already committed to - // drop power, which might force this object to temporarily drop power. - // This results in "oscillations" before the state machines converge - // to a steady state. - // - // To prevent this, a child must make a power reservation against all - // parents before raising power. If the reservation fails, indicating - // that the child will be unable to sustain the higher power state, - // then the child will signal the parent to adjust power, and the child - // will defer its power change. - - IOReturn ret; - - // Reserve parent power necessary to achieve fHeadNotePowerState. - ret = requestDomainPower( fHeadNotePowerState, kReserveDomainPower ); - if (ret != kIOReturnSuccess) - { - // Reservation failed, defer power rise. - fHeadNoteChangeFlags |= kIOPMNotDone; - OurChangeFinish(); - return; - } - - OurChangeTellCapabilityWillChange(); - } +void +IOService::OurChangeStart( void ) +{ + PM_ASSERT_IN_GATE(); + OUR_PMLog( kPMLogStartDeviceChange, fHeadNotePowerState, fCurrentPowerState ); + + // fMaxPowerState is our maximum possible power state based on the current + // power state of our parents. If we are trying to raise power beyond the + // maximum, send an async request for more power to all parents. + + if (!IS_PM_ROOT && (StateOrder(fMaxPowerState) < StateOrder(fHeadNotePowerState))) { + fHeadNoteChangeFlags |= kIOPMNotDone; + requestDomainPower(fHeadNotePowerState); + OurChangeFinish(); + return; + } + + // Redundant power changes skips to the end of the state machine. + + if (!fInitialPowerChange && (fHeadNotePowerState == fCurrentPowerState)) { + OurChangeFinish(); + return; + } + fInitialPowerChange = false; + + // Change started, but may not complete... + // Can be canceled (power drop) or deferred (power rise). + + PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); + + // Two separate paths, depending if power is being raised or lowered. + // Lowering power is subject to approval by clients of this service. + + if (IS_POWER_DROP) { + fDoNotPowerDown = false; + + // Ask for persmission to drop power state + fMachineState = kIOPM_OurChangeTellClientsPowerDown; + fOutOfBandParameter = kNotifyApps; + askChangeDown(fHeadNotePowerState); + } else { + // This service is raising power and parents are able to support the + // new power state. However a parent may have already committed to + // drop power, which might force this object to temporarily drop power. + // This results in "oscillations" before the state machines converge + // to a steady state. + // + // To prevent this, a child must make a power reservation against all + // parents before raising power. If the reservation fails, indicating + // that the child will be unable to sustain the higher power state, + // then the child will signal the parent to adjust power, and the child + // will defer its power change. + + IOReturn ret; + + // Reserve parent power necessary to achieve fHeadNotePowerState. + ret = requestDomainPower( fHeadNotePowerState, kReserveDomainPower ); + if (ret != kIOReturnSuccess) { + // Reservation failed, defer power rise. + fHeadNoteChangeFlags |= kIOPMNotDone; + OurChangeFinish(); + return; + } + + OurChangeTellCapabilityWillChange(); + } } //********************************************************************************* @@ -4585,37 +4496,38 @@ void IOService::OurChangeStart( void ) //********************************************************************************* struct IOPMRequestDomainPowerContext { - IOService * child; // the requesting child - IOPMPowerFlags requestPowerFlags; // power flags requested by child + IOService * child; // the requesting child + IOPMPowerFlags requestPowerFlags;// power flags requested by child }; static void requestDomainPowerApplier( - IORegistryEntry * entry, - void * inContext ) + IORegistryEntry * entry, + void * inContext ) { - IOPowerConnection * connection; - IOService * parent; - IOPMRequestDomainPowerContext * context; + IOPowerConnection * connection; + IOService * parent; + IOPMRequestDomainPowerContext * context; - if ((connection = OSDynamicCast(IOPowerConnection, entry)) == 0) - return; - parent = (IOService *) connection->copyParentEntry(gIOPowerPlane); - if (!parent) - return; + if ((connection = OSDynamicCast(IOPowerConnection, entry)) == 0) { + return; + } + parent = (IOService *) connection->copyParentEntry(gIOPowerPlane); + if (!parent) { + return; + } - assert(inContext); - context = (IOPMRequestDomainPowerContext *) inContext; + assert(inContext); + context = (IOPMRequestDomainPowerContext *) inContext; - if (connection->parentKnowsState() && connection->getReadyFlag()) - { - parent->requestPowerDomainState( - context->requestPowerFlags, - connection, - IOPMLowestState); - } + if (connection->parentKnowsState() && connection->getReadyFlag()) { + parent->requestPowerDomainState( + context->requestPowerFlags, + connection, + IOPMLowestState); + } - parent->release(); + parent->release(); } //********************************************************************************* @@ -4626,94 +4538,92 @@ requestDomainPowerApplier( // allow its parents to adjust power state. //********************************************************************************* -IOReturn IOService::requestDomainPower( - IOPMPowerStateIndex ourPowerState, - IOOptionBits options ) -{ - IOPMPowerFlags requestPowerFlags; - IOPMPowerStateIndex maxPowerState; - IOPMRequestDomainPowerContext context; - - PM_ASSERT_IN_GATE(); - assert(ourPowerState < fNumberOfPowerStates); - if (ourPowerState >= fNumberOfPowerStates) - return kIOReturnBadArgument; - if (IS_PM_ROOT) - return kIOReturnSuccess; - - // Fetch our input power flags for the requested power state. - // Parent request is stated in terms of required power flags. - - requestPowerFlags = fPowerStates[ourPowerState].inputPowerFlags; - - // Disregard the "previous request" for power reservation. - - if (((options & kReserveDomainPower) == 0) && - (fPreviousRequestPowerFlags == requestPowerFlags)) - { - // skip if domain already knows our requirements - goto done; - } - fPreviousRequestPowerFlags = requestPowerFlags; - - // The results will be collected by fHeadNoteDomainTargetFlags - context.child = this; - context.requestPowerFlags = requestPowerFlags; - fHeadNoteDomainTargetFlags = 0; - applyToParents(requestDomainPowerApplier, &context, gIOPowerPlane); - - if (options & kReserveDomainPower) - { - maxPowerState = fControllingDriver->maxCapabilityForDomainState( - fHeadNoteDomainTargetFlags ); - - if (StateOrder(maxPowerState) < StateOrder(ourPowerState)) - { - PM_LOG1("%s: power desired %u:0x%x got %u:0x%x\n", - getName(), - (uint32_t) ourPowerState, (uint32_t) requestPowerFlags, - (uint32_t) maxPowerState, (uint32_t) fHeadNoteDomainTargetFlags); - return kIOReturnNoPower; - } - } +IOReturn +IOService::requestDomainPower( + IOPMPowerStateIndex ourPowerState, + IOOptionBits options ) +{ + IOPMPowerFlags requestPowerFlags; + IOPMPowerStateIndex maxPowerState; + IOPMRequestDomainPowerContext context; + + PM_ASSERT_IN_GATE(); + assert(ourPowerState < fNumberOfPowerStates); + if (ourPowerState >= fNumberOfPowerStates) { + return kIOReturnBadArgument; + } + if (IS_PM_ROOT) { + return kIOReturnSuccess; + } + + // Fetch our input power flags for the requested power state. + // Parent request is stated in terms of required power flags. + + requestPowerFlags = fPowerStates[ourPowerState].inputPowerFlags; + + // Disregard the "previous request" for power reservation. + + if (((options & kReserveDomainPower) == 0) && + (fPreviousRequestPowerFlags == requestPowerFlags)) { + // skip if domain already knows our requirements + goto done; + } + fPreviousRequestPowerFlags = requestPowerFlags; + + // The results will be collected by fHeadNoteDomainTargetFlags + context.child = this; + context.requestPowerFlags = requestPowerFlags; + fHeadNoteDomainTargetFlags = 0; + applyToParents(requestDomainPowerApplier, &context, gIOPowerPlane); + + if (options & kReserveDomainPower) { + maxPowerState = fControllingDriver->maxCapabilityForDomainState( + fHeadNoteDomainTargetFlags ); + + if (StateOrder(maxPowerState) < StateOrder(ourPowerState)) { + PM_LOG1("%s: power desired %u:0x%x got %u:0x%x\n", + getName(), + (uint32_t) ourPowerState, (uint32_t) requestPowerFlags, + (uint32_t) maxPowerState, (uint32_t) fHeadNoteDomainTargetFlags); + return kIOReturnNoPower; + } + } done: - return kIOReturnSuccess; + return kIOReturnSuccess; } //********************************************************************************* // [private] OurSyncStart //********************************************************************************* -void IOService::OurSyncStart( void ) +void +IOService::OurSyncStart( void ) { - PM_ASSERT_IN_GATE(); + PM_ASSERT_IN_GATE(); - if (fInitialPowerChange) - return; + if (fInitialPowerChange) { + return; + } - PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); + PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); - if (fHeadNoteChangeFlags & kIOPMNotDone) - { - OurChangeFinish(); - return; - } + if (fHeadNoteChangeFlags & kIOPMNotDone) { + OurChangeFinish(); + return; + } - if (fHeadNoteChangeFlags & kIOPMSyncTellPowerDown) - { - fDoNotPowerDown = false; + if (fHeadNoteChangeFlags & kIOPMSyncTellPowerDown) { + fDoNotPowerDown = false; - // Ask for permission to drop power state - fMachineState = kIOPM_SyncTellClientsPowerDown; - fOutOfBandParameter = kNotifyApps; - askChangeDown(fHeadNotePowerState); - } - else - { - // Only inform capability app and clients. - tellSystemCapabilityChange( kIOPM_SyncNotifyWillChange ); - } + // Ask for permission to drop power state + fMachineState = kIOPM_SyncTellClientsPowerDown; + fOutOfBandParameter = kNotifyApps; + askChangeDown(fHeadNotePowerState); + } else { + // Only inform capability app and clients. + tellSystemCapabilityChange( kIOPM_SyncNotifyWillChange ); + } } //********************************************************************************* @@ -4723,15 +4633,15 @@ void IOService::OurSyncStart( void ) // power. Here we notify them that we will lower the power and wait for acks. //********************************************************************************* -void IOService::OurChangeTellClientsPowerDown( void ) +void +IOService::OurChangeTellClientsPowerDown( void ) { - if(!IS_ROOT_DOMAIN) - fMachineState = kIOPM_OurChangeTellPriorityClientsPowerDown; - else - { - fMachineState = kIOPM_OurChangeTellUserPMPolicyPowerDown; - } - tellChangeDown1(fHeadNotePowerState); + if (!IS_ROOT_DOMAIN) { + fMachineState = kIOPM_OurChangeTellPriorityClientsPowerDown; + } else { + fMachineState = kIOPM_OurChangeTellUserPMPolicyPowerDown; + } + tellChangeDown1(fHeadNotePowerState); } //********************************************************************************* @@ -4741,12 +4651,13 @@ void IOService::OurChangeTellClientsPowerDown( void ) // power. Here we notify power management policy in user-space and wait for acks // one last time before we lower power //********************************************************************************* -void IOService::OurChangeTellUserPMPolicyPowerDown ( void ) +void +IOService::OurChangeTellUserPMPolicyPowerDown( void ) { - fMachineState = kIOPM_OurChangeTellPriorityClientsPowerDown; - fOutOfBandParameter = kNotifyApps; + fMachineState = kIOPM_OurChangeTellPriorityClientsPowerDown; + fOutOfBandParameter = kNotifyApps; - tellClientsWithResponse(kIOPMMessageLastCallBeforeSleep); + tellClientsWithResponse(kIOPMMessageLastCallBeforeSleep); } //********************************************************************************* @@ -4756,10 +4667,11 @@ void IOService::OurChangeTellUserPMPolicyPowerDown ( void ) // power. Here we notify "priority" clients that we are lowering power. //********************************************************************************* -void IOService::OurChangeTellPriorityClientsPowerDown( void ) +void +IOService::OurChangeTellPriorityClientsPowerDown( void ) { - fMachineState = kIOPM_OurChangeNotifyInterestedDriversWillChange; - tellChangeDown2(fHeadNotePowerState); + fMachineState = kIOPM_OurChangeNotifyInterestedDriversWillChange; + tellChangeDown2(fHeadNotePowerState); } //********************************************************************************* @@ -4769,12 +4681,14 @@ void IOService::OurChangeTellPriorityClientsPowerDown( void ) // system capability change when raising power state. //********************************************************************************* -void IOService::OurChangeTellCapabilityWillChange( void ) +void +IOService::OurChangeTellCapabilityWillChange( void ) { - if (!IS_ROOT_DOMAIN) - return OurChangeNotifyInterestedDriversWillChange(); + if (!IS_ROOT_DOMAIN) { + return OurChangeNotifyInterestedDriversWillChange(); + } - tellSystemCapabilityChange( kIOPM_OurChangeNotifyInterestedDriversWillChange ); + tellSystemCapabilityChange( kIOPM_OurChangeNotifyInterestedDriversWillChange ); } //********************************************************************************* @@ -4784,20 +4698,19 @@ void IOService::OurChangeTellCapabilityWillChange( void ) // Here we notify interested drivers pre-change. //********************************************************************************* -void IOService::OurChangeNotifyInterestedDriversWillChange( void ) -{ - IOPMrootDomain * rootDomain; - if ((rootDomain = getPMRootDomain()) == this) - { - if (IS_POWER_DROP) - { - rootDomain->tracePoint( kIOPMTracePointSleepWillChangeInterests ); - } - else - rootDomain->tracePoint( kIOPMTracePointWakeWillChangeInterests ); - } +void +IOService::OurChangeNotifyInterestedDriversWillChange( void ) +{ + IOPMrootDomain * rootDomain; + if ((rootDomain = getPMRootDomain()) == this) { + if (IS_POWER_DROP) { + rootDomain->tracePoint( kIOPMTracePointSleepWillChangeInterests ); + } else { + rootDomain->tracePoint( kIOPMTracePointWakeWillChangeInterests ); + } + } - notifyAll( kIOPM_OurChangeSetPowerState ); + notifyAll( kIOPM_OurChangeSetPowerState ); } //********************************************************************************* @@ -4807,14 +4720,16 @@ void IOService::OurChangeNotifyInterestedDriversWillChange( void ) // change. Wait for async completions. //********************************************************************************* -void IOService::OurChangeSetPowerState( void ) +void +IOService::OurChangeSetPowerState( void ) { - MS_PUSH( kIOPM_OurChangeWaitForPowerSettle ); - fMachineState = kIOPM_DriverThreadCallDone; - fDriverCallReason = kDriverCallSetPowerState; + MS_PUSH( kIOPM_OurChangeWaitForPowerSettle ); + fMachineState = kIOPM_DriverThreadCallDone; + fDriverCallReason = kDriverCallSetPowerState; - if (notifyControllingDriver() == false) - notifyControllingDriverDone(); + if (notifyControllingDriver() == false) { + notifyControllingDriverDone(); + } } //********************************************************************************* @@ -4824,10 +4739,11 @@ void IOService::OurChangeSetPowerState( void ) // Wait for the driver specified settle time to expire. //********************************************************************************* -void IOService::OurChangeWaitForPowerSettle( void ) +void +IOService::OurChangeWaitForPowerSettle( void ) { - fMachineState = kIOPM_OurChangeNotifyInterestedDriversDidChange; - startSettleTimer(); + fMachineState = kIOPM_OurChangeNotifyInterestedDriversDidChange; + startSettleTimer(); } //********************************************************************************* @@ -4837,17 +4753,17 @@ void IOService::OurChangeWaitForPowerSettle( void ) // all our interested drivers post-change. //********************************************************************************* -void IOService::OurChangeNotifyInterestedDriversDidChange( void ) +void +IOService::OurChangeNotifyInterestedDriversDidChange( void ) { - IOPMrootDomain * rootDomain; - if ((rootDomain = getPMRootDomain()) == this) - { - rootDomain->tracePoint( IS_POWER_DROP ? - kIOPMTracePointSleepDidChangeInterests : - kIOPMTracePointWakeDidChangeInterests ); - } + IOPMrootDomain * rootDomain; + if ((rootDomain = getPMRootDomain()) == this) { + rootDomain->tracePoint( IS_POWER_DROP ? + kIOPMTracePointSleepDidChangeInterests : + kIOPMTracePointWakeDidChangeInterests ); + } - notifyAll( kIOPM_OurChangeTellCapabilityDidChange ); + notifyAll( kIOPM_OurChangeTellCapabilityDidChange ); } //********************************************************************************* @@ -4856,16 +4772,18 @@ void IOService::OurChangeNotifyInterestedDriversDidChange( void ) // For root domain to notify capability power-change. //********************************************************************************* -void IOService::OurChangeTellCapabilityDidChange( void ) +void +IOService::OurChangeTellCapabilityDidChange( void ) { - if (!IS_ROOT_DOMAIN) - return OurChangeFinish(); + if (!IS_ROOT_DOMAIN) { + return OurChangeFinish(); + } - getPMRootDomain()->tracePoint( IS_POWER_DROP ? - kIOPMTracePointSleepCapabilityClients : - kIOPMTracePointWakeCapabilityClients ); + getPMRootDomain()->tracePoint( IS_POWER_DROP ? + kIOPMTracePointSleepCapabilityClients : + kIOPMTracePointWakeCapabilityClients ); - tellSystemCapabilityChange( kIOPM_OurChangeFinish ); + tellSystemCapabilityChange( kIOPM_OurChangeFinish ); } //********************************************************************************* @@ -4874,9 +4792,10 @@ void IOService::OurChangeTellCapabilityDidChange( void ) // Done with this self-induced power state change. //********************************************************************************* -void IOService::OurChangeFinish( void ) +void +IOService::OurChangeFinish( void ) { - all_done(); + all_done(); } // MARK: - @@ -4888,83 +4807,74 @@ void IOService::OurChangeFinish( void ) // Here we begin the processing of a power change initiated by our parent. //********************************************************************************* -IOReturn IOService::ParentChangeStart( void ) -{ - PM_ASSERT_IN_GATE(); - OUR_PMLog( kPMLogStartParentChange, fHeadNotePowerState, fCurrentPowerState ); - - // Root power domain has transitioned to its max power state - if ((fHeadNoteChangeFlags & (kIOPMDomainDidChange | kIOPMRootChangeUp)) == - (kIOPMDomainDidChange | kIOPMRootChangeUp)) - { - // Restart the idle timer stopped by ParentChangeRootChangeDown() - if (fIdleTimerPeriod && fIdleTimerStopped) - { - restartIdleTimer(); - } - } - - // Power domain is forcing us to lower power - if ( StateOrder(fHeadNotePowerState) < StateOrder(fCurrentPowerState) ) - { - PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); - - // Tell apps and kernel clients - fInitialPowerChange = false; - fMachineState = kIOPM_ParentChangeTellPriorityClientsPowerDown; - tellChangeDown1(fHeadNotePowerState); - return IOPMWillAckLater; - } - - // Power domain is allowing us to raise power up to fHeadNotePowerState - if ( StateOrder(fHeadNotePowerState) > StateOrder(fCurrentPowerState) ) - { - if ( StateOrder(fDesiredPowerState) > StateOrder(fCurrentPowerState) ) - { - if ( StateOrder(fDesiredPowerState) < StateOrder(fHeadNotePowerState) ) - { - // We power up, but not all the way - fHeadNotePowerState = fDesiredPowerState; - fHeadNotePowerArrayEntry = &fPowerStates[fDesiredPowerState]; - OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0); - } - } else { - // We don't need to change - fHeadNotePowerState = fCurrentPowerState; - fHeadNotePowerArrayEntry = &fPowerStates[fCurrentPowerState]; - OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0); - } - } - - if ( fHeadNoteChangeFlags & kIOPMDomainDidChange ) - { - if ( StateOrder(fHeadNotePowerState) > StateOrder(fCurrentPowerState) ) - { - PM_ACTION_2(actionPowerChangeStart, - fHeadNotePowerState, &fHeadNoteChangeFlags); - - // Parent did change up - start our change up - fInitialPowerChange = false; - ParentChangeTellCapabilityWillChange(); - return IOPMWillAckLater; - } - else if (fHeadNoteChangeFlags & kIOPMRootBroadcastFlags) - { - // No need to change power state, but broadcast change - // to our children. - fMachineState = kIOPM_SyncNotifyDidChange; - fDriverCallReason = kDriverCallInformPreChange; - fHeadNoteChangeFlags |= kIOPMNotDone; - notifyChildren(); - return IOPMWillAckLater; - } - } - - // No power state change necessary - fHeadNoteChangeFlags |= kIOPMNotDone; - - all_done(); - return IOPMAckImplied; +IOReturn +IOService::ParentChangeStart( void ) +{ + PM_ASSERT_IN_GATE(); + OUR_PMLog( kPMLogStartParentChange, fHeadNotePowerState, fCurrentPowerState ); + + // Root power domain has transitioned to its max power state + if ((fHeadNoteChangeFlags & (kIOPMDomainDidChange | kIOPMRootChangeUp)) == + (kIOPMDomainDidChange | kIOPMRootChangeUp)) { + // Restart the idle timer stopped by ParentChangeRootChangeDown() + if (fIdleTimerPeriod && fIdleTimerStopped) { + restartIdleTimer(); + } + } + + // Power domain is forcing us to lower power + if (StateOrder(fHeadNotePowerState) < StateOrder(fCurrentPowerState)) { + PM_ACTION_2(actionPowerChangeStart, fHeadNotePowerState, &fHeadNoteChangeFlags); + + // Tell apps and kernel clients + fInitialPowerChange = false; + fMachineState = kIOPM_ParentChangeTellPriorityClientsPowerDown; + tellChangeDown1(fHeadNotePowerState); + return IOPMWillAckLater; + } + + // Power domain is allowing us to raise power up to fHeadNotePowerState + if (StateOrder(fHeadNotePowerState) > StateOrder(fCurrentPowerState)) { + if (StateOrder(fDesiredPowerState) > StateOrder(fCurrentPowerState)) { + if (StateOrder(fDesiredPowerState) < StateOrder(fHeadNotePowerState)) { + // We power up, but not all the way + fHeadNotePowerState = fDesiredPowerState; + fHeadNotePowerArrayEntry = &fPowerStates[fDesiredPowerState]; + OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0); + } + } else { + // We don't need to change + fHeadNotePowerState = fCurrentPowerState; + fHeadNotePowerArrayEntry = &fPowerStates[fCurrentPowerState]; + OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0); + } + } + + if (fHeadNoteChangeFlags & kIOPMDomainDidChange) { + if (StateOrder(fHeadNotePowerState) > StateOrder(fCurrentPowerState)) { + PM_ACTION_2(actionPowerChangeStart, + fHeadNotePowerState, &fHeadNoteChangeFlags); + + // Parent did change up - start our change up + fInitialPowerChange = false; + ParentChangeTellCapabilityWillChange(); + return IOPMWillAckLater; + } else if (fHeadNoteChangeFlags & kIOPMRootBroadcastFlags) { + // No need to change power state, but broadcast change + // to our children. + fMachineState = kIOPM_SyncNotifyDidChange; + fDriverCallReason = kDriverCallInformPreChange; + fHeadNoteChangeFlags |= kIOPMNotDone; + notifyChildren(); + return IOPMWillAckLater; + } + } + + // No power state change necessary + fHeadNoteChangeFlags |= kIOPMNotDone; + + all_done(); + return IOPMAckImplied; } //****************************************************************************** @@ -4976,93 +4886,83 @@ IOReturn IOService::ParentChangeStart( void ) // automatically restored on wake. //****************************************************************************** -void IOService::ParentChangeRootChangeDown( void ) -{ - // Always stop the idle timer before root power down - if (fIdleTimerPeriod && !fIdleTimerStopped) - { - fIdleTimerStopped = true; - if (fIdleTimer && thread_call_cancel(fIdleTimer)) - release(); - } - - if (fResetPowerStateOnWake) - { - // Reset device desire down to the lowest power state. - // Advisory tickle desire is intentionally untouched since - // it has no effect until system is promoted to full wake. - - if (fDeviceDesire != kPowerStateZero) - { - updatePowerClient(gIOPMPowerClientDevice, kPowerStateZero); - computeDesiredState(kPowerStateZero, true); - requestDomainPower( fDesiredPowerState ); - PM_LOG1("%s: tickle desire removed\n", fName); - } - - // Invalidate tickle cache so the next tickle will issue a request - IOLockLock(fActivityLock); - fDeviceWasActive = false; - fActivityTicklePowerState = kInvalidTicklePowerState; - IOLockUnlock(fActivityLock); - - fIdleTimerMinPowerState = kPowerStateZero; - } - else if (fAdvisoryTickleUsed) - { - // Less aggressive mechanism to accelerate idle timer expiration - // before system sleep. May not always allow the driver to wake - // up from system sleep in the min power state. - - AbsoluteTime now; - uint64_t nsec; - bool dropTickleDesire = false; - - if (fIdleTimerPeriod && !fIdleTimerIgnored && - (fIdleTimerMinPowerState == kPowerStateZero) && - (fDeviceDesire != kPowerStateZero)) - { - IOLockLock(fActivityLock); - - if (!fDeviceWasActive) - { - // No tickles since the last idle timer expiration. - // Safe to drop the device desire to zero. - dropTickleDesire = true; - } - else - { - // Was tickled since the last idle timer expiration, - // but not in the last minute. - clock_get_uptime(&now); - SUB_ABSOLUTETIME(&now, &fDeviceActiveTimestamp); - absolutetime_to_nanoseconds(now, &nsec); - if (nsec >= kNoTickleCancelWindow) - { - dropTickleDesire = true; - } - } - - if (dropTickleDesire) - { - // Force the next tickle to raise power state - fDeviceWasActive = false; - fActivityTicklePowerState = kInvalidTicklePowerState; - } - - IOLockUnlock(fActivityLock); - } - - if (dropTickleDesire) - { - // Advisory tickle desire is intentionally untouched since - // it has no effect until system is promoted to full wake. - - updatePowerClient(gIOPMPowerClientDevice, kPowerStateZero); - computeDesiredState(kPowerStateZero, true); - PM_LOG1("%s: tickle desire dropped\n", fName); - } - } +void +IOService::ParentChangeRootChangeDown( void ) +{ + // Always stop the idle timer before root power down + if (fIdleTimerPeriod && !fIdleTimerStopped) { + fIdleTimerStopped = true; + if (fIdleTimer && thread_call_cancel(fIdleTimer)) { + release(); + } + } + + if (fResetPowerStateOnWake) { + // Reset device desire down to the lowest power state. + // Advisory tickle desire is intentionally untouched since + // it has no effect until system is promoted to full wake. + + if (fDeviceDesire != kPowerStateZero) { + updatePowerClient(gIOPMPowerClientDevice, kPowerStateZero); + computeDesiredState(kPowerStateZero, true); + requestDomainPower( fDesiredPowerState ); + PM_LOG1("%s: tickle desire removed\n", fName); + } + + // Invalidate tickle cache so the next tickle will issue a request + IOLockLock(fActivityLock); + fDeviceWasActive = false; + fActivityTicklePowerState = kInvalidTicklePowerState; + IOLockUnlock(fActivityLock); + + fIdleTimerMinPowerState = kPowerStateZero; + } else if (fAdvisoryTickleUsed) { + // Less aggressive mechanism to accelerate idle timer expiration + // before system sleep. May not always allow the driver to wake + // up from system sleep in the min power state. + + AbsoluteTime now; + uint64_t nsec; + bool dropTickleDesire = false; + + if (fIdleTimerPeriod && !fIdleTimerIgnored && + (fIdleTimerMinPowerState == kPowerStateZero) && + (fDeviceDesire != kPowerStateZero)) { + IOLockLock(fActivityLock); + + if (!fDeviceWasActive) { + // No tickles since the last idle timer expiration. + // Safe to drop the device desire to zero. + dropTickleDesire = true; + } else { + // Was tickled since the last idle timer expiration, + // but not in the last minute. + clock_get_uptime(&now); + SUB_ABSOLUTETIME(&now, &fDeviceActiveTimestamp); + absolutetime_to_nanoseconds(now, &nsec); + if (nsec >= kNoTickleCancelWindow) { + dropTickleDesire = true; + } + } + + if (dropTickleDesire) { + // Force the next tickle to raise power state + fDeviceWasActive = false; + fActivityTicklePowerState = kInvalidTicklePowerState; + } + + IOLockUnlock(fActivityLock); + } + + if (dropTickleDesire) { + // Advisory tickle desire is intentionally untouched since + // it has no effect until system is promoted to full wake. + + updatePowerClient(gIOPMPowerClientDevice, kPowerStateZero); + computeDesiredState(kPowerStateZero, true); + PM_LOG1("%s: tickle desire dropped\n", fName); + } + } } //********************************************************************************* @@ -5072,10 +4972,11 @@ void IOService::ParentChangeRootChangeDown( void ) // power. Here we notify "priority" clients that we are lowering power. //********************************************************************************* -void IOService::ParentChangeTellPriorityClientsPowerDown( void ) +void +IOService::ParentChangeTellPriorityClientsPowerDown( void ) { - fMachineState = kIOPM_ParentChangeNotifyInterestedDriversWillChange; - tellChangeDown2(fHeadNotePowerState); + fMachineState = kIOPM_ParentChangeNotifyInterestedDriversWillChange; + tellChangeDown2(fHeadNotePowerState); } //********************************************************************************* @@ -5085,12 +4986,14 @@ void IOService::ParentChangeTellPriorityClientsPowerDown( void ) // root domain to notify apps and drivers about the system capability change. //********************************************************************************* -void IOService::ParentChangeTellCapabilityWillChange( void ) +void +IOService::ParentChangeTellCapabilityWillChange( void ) { - if (!IS_ROOT_DOMAIN) - return ParentChangeNotifyInterestedDriversWillChange(); + if (!IS_ROOT_DOMAIN) { + return ParentChangeNotifyInterestedDriversWillChange(); + } - tellSystemCapabilityChange( kIOPM_ParentChangeNotifyInterestedDriversWillChange ); + tellSystemCapabilityChange( kIOPM_ParentChangeNotifyInterestedDriversWillChange ); } //********************************************************************************* @@ -5100,9 +5003,10 @@ void IOService::ParentChangeTellCapabilityWillChange( void ) // Here we notify interested drivers pre-change. //********************************************************************************* -void IOService::ParentChangeNotifyInterestedDriversWillChange( void ) +void +IOService::ParentChangeNotifyInterestedDriversWillChange( void ) { - notifyAll( kIOPM_ParentChangeSetPowerState ); + notifyAll( kIOPM_ParentChangeSetPowerState ); } //********************************************************************************* @@ -5112,14 +5016,16 @@ void IOService::ParentChangeNotifyInterestedDriversWillChange( void ) // change. Wait for async completions. //********************************************************************************* -void IOService::ParentChangeSetPowerState( void ) +void +IOService::ParentChangeSetPowerState( void ) { - MS_PUSH( kIOPM_ParentChangeWaitForPowerSettle ); - fMachineState = kIOPM_DriverThreadCallDone; - fDriverCallReason = kDriverCallSetPowerState; + MS_PUSH( kIOPM_ParentChangeWaitForPowerSettle ); + fMachineState = kIOPM_DriverThreadCallDone; + fDriverCallReason = kDriverCallSetPowerState; - if (notifyControllingDriver() == false) - notifyControllingDriverDone(); + if (notifyControllingDriver() == false) { + notifyControllingDriverDone(); + } } //********************************************************************************* @@ -5129,10 +5035,11 @@ void IOService::ParentChangeSetPowerState( void ) // parent. Wait for the driver specified settle time to expire. //********************************************************************************* -void IOService::ParentChangeWaitForPowerSettle( void ) +void +IOService::ParentChangeWaitForPowerSettle( void ) { - fMachineState = kIOPM_ParentChangeNotifyInterestedDriversDidChange; - startSettleTimer(); + fMachineState = kIOPM_ParentChangeNotifyInterestedDriversDidChange; + startSettleTimer(); } //********************************************************************************* @@ -5142,9 +5049,10 @@ void IOService::ParentChangeWaitForPowerSettle( void ) // all our interested drivers post-change. //********************************************************************************* -void IOService::ParentChangeNotifyInterestedDriversDidChange( void ) +void +IOService::ParentChangeNotifyInterestedDriversDidChange( void ) { - notifyAll( kIOPM_ParentChangeTellCapabilityDidChange ); + notifyAll( kIOPM_ParentChangeTellCapabilityDidChange ); } //********************************************************************************* @@ -5153,12 +5061,14 @@ void IOService::ParentChangeNotifyInterestedDriversDidChange( void ) // For root domain to notify capability power-change. //********************************************************************************* -void IOService::ParentChangeTellCapabilityDidChange( void ) +void +IOService::ParentChangeTellCapabilityDidChange( void ) { - if (!IS_ROOT_DOMAIN) - return ParentChangeAcknowledgePowerChange(); + if (!IS_ROOT_DOMAIN) { + return ParentChangeAcknowledgePowerChange(); + } - tellSystemCapabilityChange( kIOPM_ParentChangeAcknowledgePowerChange ); + tellSystemCapabilityChange( kIOPM_ParentChangeAcknowledgePowerChange ); } //********************************************************************************* @@ -5167,21 +5077,21 @@ void IOService::ParentChangeTellCapabilityDidChange( void ) // Acknowledge our power parent that our power change is done. //********************************************************************************* -void IOService::ParentChangeAcknowledgePowerChange( void ) +void +IOService::ParentChangeAcknowledgePowerChange( void ) { - IORegistryEntry * nub; - IOService * parent; + IORegistryEntry * nub; + IOService * parent; - nub = fHeadNoteParentConnection; - nub->retain(); - all_done(); - parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); - if ( parent ) - { - parent->acknowledgePowerChange((IOService *)nub); - parent->release(); - } - nub->release(); + nub = fHeadNoteParentConnection; + nub->retain(); + all_done(); + parent = (IOService *)nub->copyParentEntry(gIOPowerPlane); + if (parent) { + parent->acknowledgePowerChange((IOService *)nub); + parent->release(); + } + nub->release(); } // MARK: - @@ -5194,10 +5104,11 @@ void IOService::ParentChangeAcknowledgePowerChange( void ) // there is a new power state. //********************************************************************************* -void IOService::settleTimerExpired( void ) +void +IOService::settleTimerExpired( void ) { - fSettleTimeUS = 0; - gIOPMWorkQueue->signalWorkAvailable(); + fSettleTimeUS = 0; + gIOPMWorkQueue->signalWorkAvailable(); } //********************************************************************************* @@ -5209,15 +5120,14 @@ void IOService::settleTimerExpired( void ) static void settle_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) { - IOService * me = (IOService *) arg0; + IOService * me = (IOService *) arg0; - if (gIOPMWorkLoop && gIOPMWorkQueue) - { - gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, me, &IOService::settleTimerExpired), - me); - } - me->release(); + if (gIOPMWorkLoop && gIOPMWorkQueue) { + gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, me, &IOService::settleTimerExpired), + me); + } + me->release(); } //********************************************************************************* @@ -5226,56 +5136,54 @@ settle_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) // Calculate a power-settling delay in microseconds and start a timer. //********************************************************************************* -void IOService::startSettleTimer( void ) +void +IOService::startSettleTimer( void ) { #if NOT_USEFUL - // This function is broken and serves no useful purpose since it never - // updates fSettleTimeUS to a non-zero value to stall the state machine, - // yet it starts a delay timer. It appears no driver relies on a delay - // from settleUpTime and settleDownTime in the power state table. - - AbsoluteTime deadline; - IOPMPowerStateIndex stateIndex; - IOPMPowerStateIndex currentOrder, newOrder, i; - uint32_t settleTime = 0; - boolean_t pending; - - PM_ASSERT_IN_GATE(); - - currentOrder = StateOrder(fCurrentPowerState); - newOrder = StateOrder(fHeadNotePowerState); - - i = currentOrder; - - // lowering power - if ( newOrder < currentOrder ) - { - while ( i > newOrder ) - { - stateIndex = fPowerStates[i].stateOrderToIndex; - settleTime += (uint32_t) fPowerStates[stateIndex].settleDownTime; - i--; - } - } - - // raising power - if ( newOrder > currentOrder ) - { - while ( i < newOrder ) - { - stateIndex = fPowerStates[i+1].stateOrderToIndex; - settleTime += (uint32_t) fPowerStates[stateIndex].settleUpTime; - i++; - } - } - - if (settleTime) - { - retain(); - clock_interval_to_deadline(settleTime, kMicrosecondScale, &deadline); - pending = thread_call_enter_delayed(fSettleTimer, deadline); - if (pending) release(); - } + // This function is broken and serves no useful purpose since it never + // updates fSettleTimeUS to a non-zero value to stall the state machine, + // yet it starts a delay timer. It appears no driver relies on a delay + // from settleUpTime and settleDownTime in the power state table. + + AbsoluteTime deadline; + IOPMPowerStateIndex stateIndex; + IOPMPowerStateIndex currentOrder, newOrder, i; + uint32_t settleTime = 0; + boolean_t pending; + + PM_ASSERT_IN_GATE(); + + currentOrder = StateOrder(fCurrentPowerState); + newOrder = StateOrder(fHeadNotePowerState); + + i = currentOrder; + + // lowering power + if (newOrder < currentOrder) { + while (i > newOrder) { + stateIndex = fPowerStates[i].stateOrderToIndex; + settleTime += (uint32_t) fPowerStates[stateIndex].settleDownTime; + i--; + } + } + + // raising power + if (newOrder > currentOrder) { + while (i < newOrder) { + stateIndex = fPowerStates[i + 1].stateOrderToIndex; + settleTime += (uint32_t) fPowerStates[stateIndex].settleUpTime; + i++; + } + } + + if (settleTime) { + retain(); + clock_interval_to_deadline(settleTime, kMicrosecondScale, &deadline); + pending = thread_call_enter_delayed(fSettleTimer, deadline); + if (pending) { + release(); + } + } #endif } @@ -5295,250 +5203,245 @@ void IOService::startSettleTimer( void ) //********************************************************************************* #ifndef __LP64__ -void IOService::ack_timer_ticked ( void ) +void +IOService::ack_timer_ticked( void ) { - assert(false); + assert(false); } #endif /* !__LP64__ */ -bool IOService::ackTimerTick( void ) -{ - IOPMinformee * nextObject; - bool done = false; - - PM_ASSERT_IN_GATE(); - switch (fMachineState) { - case kIOPM_OurChangeWaitForPowerSettle: - case kIOPM_ParentChangeWaitForPowerSettle: - // are we waiting for controlling driver to acknowledge? - if ( fDriverTimer > 0 ) - { - // yes, decrement timer tick - fDriverTimer--; - if ( fDriverTimer == 0 ) - { - // controlling driver is tardy - uint64_t nsec = computeTimeDeltaNS(&fDriverCallStartTime); - OUR_PMLog(kPMLogCtrlDriverTardy, 0, 0); - setProperty(kIOPMTardyAckSPSKey, kOSBooleanTrue); - PM_ERROR("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms\n", - fName, OBFUSCATE(this), fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); +bool +IOService::ackTimerTick( void ) +{ + IOPMinformee * nextObject; + bool done = false; + + PM_ASSERT_IN_GATE(); + switch (fMachineState) { + case kIOPM_OurChangeWaitForPowerSettle: + case kIOPM_ParentChangeWaitForPowerSettle: + // are we waiting for controlling driver to acknowledge? + if (fDriverTimer > 0) { + // yes, decrement timer tick + fDriverTimer--; + if (fDriverTimer == 0) { + // controlling driver is tardy + uint64_t nsec = computeTimeDeltaNS(&fDriverCallStartTime); + OUR_PMLog(kPMLogCtrlDriverTardy, 0, 0); + setProperty(kIOPMTardyAckSPSKey, kOSBooleanTrue); + PM_ERROR("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms\n", + fName, OBFUSCATE(this), fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); #if DEBUG && CONFIG_EMBEDDED - panic("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms", - fName, this, fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); + panic("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms", + fName, this, fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); #else - if (gIOKitDebug & kIOLogDebugPower) - { - panic("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms", - fName, this, fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); - } - else - { - // Unblock state machine and pretend driver has acked. - done = true; - } + if (gIOKitDebug & kIOLogDebugPower) { + panic("%s::setPowerState(%p, %lu -> %lu) timed out after %d ms", + fName, this, fCurrentPowerState, fHeadNotePowerState, NS_TO_MS(nsec)); + } else { + // Unblock state machine and pretend driver has acked. + done = true; + } #endif - getPMRootDomain()->reset_watchdog_timer(this, 0); - } else { - // still waiting, set timer again - start_ack_timer(); - } - } - break; - - case kIOPM_NotifyChildrenStart: - // are we waiting for interested parties to acknowledge? - if ( fHeadNotePendingAcks != 0 ) - { - // yes, go through the list of interested drivers - nextObject = fInterestedDrivers->firstInList(); - // and check each one - while ( nextObject != NULL ) - { - if ( nextObject->timer > 0 ) - { - nextObject->timer--; - // this one should have acked by now - if ( nextObject->timer == 0 ) - { - uint64_t nsec = computeTimeDeltaNS(&nextObject->startTime); - OUR_PMLog(kPMLogIntDriverTardy, 0, 0); - nextObject->whatObject->setProperty(kIOPMTardyAckPSCKey, kOSBooleanTrue); - PM_ERROR("%s::powerState%sChangeTo(%p, %s, %lu -> %lu) timed out after %d ms\n", - nextObject->whatObject->getName(), - (fDriverCallReason == kDriverCallInformPreChange) ? "Will" : "Did", - OBFUSCATE(nextObject->whatObject), fName, fCurrentPowerState, fHeadNotePowerState, - NS_TO_MS(nsec)); - - // Pretend driver has acked. - fHeadNotePendingAcks--; - } - } - nextObject = fInterestedDrivers->nextInList(nextObject); - } - - // is that the last? - if ( fHeadNotePendingAcks == 0 ) - { - // yes, we can continue - done = true; - } else { - // no, set timer again - start_ack_timer(); - } - } - break; - - // TODO: aggreggate this - case kIOPM_OurChangeTellClientsPowerDown: - case kIOPM_OurChangeTellUserPMPolicyPowerDown: - case kIOPM_OurChangeTellPriorityClientsPowerDown: - case kIOPM_OurChangeNotifyInterestedDriversWillChange: - case kIOPM_ParentChangeTellPriorityClientsPowerDown: - case kIOPM_ParentChangeNotifyInterestedDriversWillChange: - case kIOPM_SyncTellClientsPowerDown: - case kIOPM_SyncTellPriorityClientsPowerDown: - case kIOPM_SyncNotifyWillChange: - case kIOPM_TellCapabilityChangeDone: - // apps didn't respond in time - cleanClientResponses(true); - OUR_PMLog(kPMLogClientTardy, 0, 1); - // tardy equates to approval - done = true; - break; - - default: - PM_LOG1("%s: unexpected ack timer tick (state = %d)\n", - getName(), fMachineState); - break; - } - return done; + getPMRootDomain()->reset_watchdog_timer(this, 0); + } else { + // still waiting, set timer again + start_ack_timer(); + } + } + break; + + case kIOPM_NotifyChildrenStart: + // are we waiting for interested parties to acknowledge? + if (fHeadNotePendingAcks != 0) { + // yes, go through the list of interested drivers + nextObject = fInterestedDrivers->firstInList(); + // and check each one + while (nextObject != NULL) { + if (nextObject->timer > 0) { + nextObject->timer--; + // this one should have acked by now + if (nextObject->timer == 0) { + uint64_t nsec = computeTimeDeltaNS(&nextObject->startTime); + OUR_PMLog(kPMLogIntDriverTardy, 0, 0); + nextObject->whatObject->setProperty(kIOPMTardyAckPSCKey, kOSBooleanTrue); + PM_ERROR("%s::powerState%sChangeTo(%p, %s, %lu -> %lu) timed out after %d ms\n", + nextObject->whatObject->getName(), + (fDriverCallReason == kDriverCallInformPreChange) ? "Will" : "Did", + OBFUSCATE(nextObject->whatObject), fName, fCurrentPowerState, fHeadNotePowerState, + NS_TO_MS(nsec)); + + // Pretend driver has acked. + fHeadNotePendingAcks--; + } + } + nextObject = fInterestedDrivers->nextInList(nextObject); + } + + // is that the last? + if (fHeadNotePendingAcks == 0) { + // yes, we can continue + done = true; + } else { + // no, set timer again + start_ack_timer(); + } + } + break; + + // TODO: aggreggate this + case kIOPM_OurChangeTellClientsPowerDown: + case kIOPM_OurChangeTellUserPMPolicyPowerDown: + case kIOPM_OurChangeTellPriorityClientsPowerDown: + case kIOPM_OurChangeNotifyInterestedDriversWillChange: + case kIOPM_ParentChangeTellPriorityClientsPowerDown: + case kIOPM_ParentChangeNotifyInterestedDriversWillChange: + case kIOPM_SyncTellClientsPowerDown: + case kIOPM_SyncTellPriorityClientsPowerDown: + case kIOPM_SyncNotifyWillChange: + case kIOPM_TellCapabilityChangeDone: + // apps didn't respond in time + cleanClientResponses(true); + OUR_PMLog(kPMLogClientTardy, 0, 1); + // tardy equates to approval + done = true; + break; + + default: + PM_LOG1("%s: unexpected ack timer tick (state = %d)\n", + getName(), fMachineState); + break; + } + return done; } //********************************************************************************* // [private] start_watchdog_timer //********************************************************************************* -void IOService::start_watchdog_timer( void ) +void +IOService::start_watchdog_timer( void ) { - int timeout; - uint64_t deadline; + int timeout; + uint64_t deadline; - if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) - return; + if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) { + return; + } - IOLockLock(fWatchdogLock); + IOLockLock(fWatchdogLock); - timeout = getPMRootDomain()->getWatchdogTimeout(); - clock_interval_to_deadline(timeout, kSecondScale, &deadline); - fWatchdogDeadline = deadline; - start_watchdog_timer(deadline); - IOLockUnlock(fWatchdogLock); + timeout = getPMRootDomain()->getWatchdogTimeout(); + clock_interval_to_deadline(timeout, kSecondScale, &deadline); + fWatchdogDeadline = deadline; + start_watchdog_timer(deadline); + IOLockUnlock(fWatchdogLock); } -void IOService::start_watchdog_timer(uint64_t deadline) +void +IOService::start_watchdog_timer(uint64_t deadline) { + IOLockAssert(fWatchdogLock, kIOLockAssertOwned); - IOLockAssert(fWatchdogLock, kIOLockAssertOwned); - - if (!thread_call_isactive(fWatchdogTimer)) { - thread_call_enter_delayed(fWatchdogTimer, deadline); - } - + if (!thread_call_isactive(fWatchdogTimer)) { + thread_call_enter_delayed(fWatchdogTimer, deadline); + } } //********************************************************************************* // [private] stop_watchdog_timer //********************************************************************************* -void IOService::stop_watchdog_timer( void ) +void +IOService::stop_watchdog_timer( void ) { - if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) - return; + if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) { + return; + } - IOLockLock(fWatchdogLock); + IOLockLock(fWatchdogLock); - thread_call_cancel(fWatchdogTimer); - fWatchdogDeadline = 0; + thread_call_cancel(fWatchdogTimer); + fWatchdogDeadline = 0; - while (fBlockedArray->getCount()) { - IOService *obj = OSDynamicCast(IOService, fBlockedArray->getObject(0)); - if (obj) { - PM_ERROR("WDOG:Object %s unexpected in blocked array\n", obj->fName); - fBlockedArray->removeObject(0); - } - } + while (fBlockedArray->getCount()) { + IOService *obj = OSDynamicCast(IOService, fBlockedArray->getObject(0)); + if (obj) { + PM_ERROR("WDOG:Object %s unexpected in blocked array\n", obj->fName); + fBlockedArray->removeObject(0); + } + } - IOLockUnlock(fWatchdogLock); + IOLockUnlock(fWatchdogLock); } //********************************************************************************* // reset_watchdog_timer //********************************************************************************* -void IOService::reset_watchdog_timer(IOService *blockedObject, int pendingResponseTimeout) -{ - unsigned int i; - uint64_t deadline; - IOService *obj; - - if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) - return; - - - IOLockLock(fWatchdogLock); - if (!fWatchdogDeadline) { - goto exit; - } - - i = fBlockedArray->getNextIndexOfObject(blockedObject, 0); - if (pendingResponseTimeout == 0) { - blockedObject->fPendingResponseDeadline = 0; - if (i == (unsigned int)-1) { - goto exit; - } - fBlockedArray->removeObject(i); - } - else { - // Set deadline 2secs after the expected response timeout to allow - // ack timer to handle the timeout. - clock_interval_to_deadline(pendingResponseTimeout+2, kSecondScale, &deadline); - - if (i != (unsigned int)-1) { - PM_ERROR("WDOG:Object %s is already blocked for responses. Ignoring timeout %d\n", - fName, pendingResponseTimeout); - goto exit; - } - - - for (i = 0; i < fBlockedArray->getCount(); i++) { - obj = OSDynamicCast(IOService, fBlockedArray->getObject(i)); - if (obj && (obj->fPendingResponseDeadline < deadline)) { - blockedObject->fPendingResponseDeadline = deadline; - fBlockedArray->setObject(i, blockedObject); - break; - } - } - if (i == fBlockedArray->getCount()) { - blockedObject->fPendingResponseDeadline = deadline; - fBlockedArray->setObject(blockedObject); - } - } - - obj = OSDynamicCast(IOService, fBlockedArray->getObject(0)); - if (!obj) { - int timeout = getPMRootDomain()->getWatchdogTimeout(); - clock_interval_to_deadline(timeout, kSecondScale, &deadline); - } - else { - deadline = obj->fPendingResponseDeadline; - } - - thread_call_cancel(fWatchdogTimer); - start_watchdog_timer(deadline); +void +IOService::reset_watchdog_timer(IOService *blockedObject, int pendingResponseTimeout) +{ + unsigned int i; + uint64_t deadline; + IOService *obj; + + if (!fWatchdogTimer || (kIOSleepWakeWdogOff & gIOKitDebug)) { + return; + } + + + IOLockLock(fWatchdogLock); + if (!fWatchdogDeadline) { + goto exit; + } + + i = fBlockedArray->getNextIndexOfObject(blockedObject, 0); + if (pendingResponseTimeout == 0) { + blockedObject->fPendingResponseDeadline = 0; + if (i == (unsigned int)-1) { + goto exit; + } + fBlockedArray->removeObject(i); + } else { + // Set deadline 2secs after the expected response timeout to allow + // ack timer to handle the timeout. + clock_interval_to_deadline(pendingResponseTimeout + 2, kSecondScale, &deadline); + + if (i != (unsigned int)-1) { + PM_ERROR("WDOG:Object %s is already blocked for responses. Ignoring timeout %d\n", + fName, pendingResponseTimeout); + goto exit; + } + + + for (i = 0; i < fBlockedArray->getCount(); i++) { + obj = OSDynamicCast(IOService, fBlockedArray->getObject(i)); + if (obj && (obj->fPendingResponseDeadline < deadline)) { + blockedObject->fPendingResponseDeadline = deadline; + fBlockedArray->setObject(i, blockedObject); + break; + } + } + if (i == fBlockedArray->getCount()) { + blockedObject->fPendingResponseDeadline = deadline; + fBlockedArray->setObject(blockedObject); + } + } + + obj = OSDynamicCast(IOService, fBlockedArray->getObject(0)); + if (!obj) { + int timeout = getPMRootDomain()->getWatchdogTimeout(); + clock_interval_to_deadline(timeout, kSecondScale, &deadline); + } else { + deadline = obj->fPendingResponseDeadline; + } + + thread_call_cancel(fWatchdogTimer); + start_watchdog_timer(deadline); exit: - IOLockUnlock(fWatchdogLock); + IOLockUnlock(fWatchdogLock); } @@ -5551,22 +5454,23 @@ exit: void IOService::watchdog_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) { - IOService * me = (IOService *) arg0; + IOService * me = (IOService *) arg0; - gIOPMWatchDogThread = current_thread(); - getPMRootDomain()->sleepWakeDebugTrig(true); - gIOPMWatchDogThread = 0; - thread_call_free(me->fWatchdogTimer); - me->fWatchdogTimer = 0; + gIOPMWatchDogThread = current_thread(); + getPMRootDomain()->sleepWakeDebugTrig(true); + gIOPMWatchDogThread = 0; + thread_call_free(me->fWatchdogTimer); + me->fWatchdogTimer = 0; - return ; + return; } -IOWorkLoop * IOService::getIOPMWorkloop( void ) +IOWorkLoop * +IOService::getIOPMWorkloop( void ) { - return gIOPMWorkLoop; + return gIOPMWorkLoop; } @@ -5575,34 +5479,40 @@ IOWorkLoop * IOService::getIOPMWorkloop( void ) // [private] start_ack_timer //********************************************************************************* -void IOService::start_ack_timer( void ) +void +IOService::start_ack_timer( void ) { - start_ack_timer( ACK_TIMER_PERIOD, kNanosecondScale ); + start_ack_timer( ACK_TIMER_PERIOD, kNanosecondScale ); } -void IOService::start_ack_timer ( UInt32 interval, UInt32 scale ) +void +IOService::start_ack_timer( UInt32 interval, UInt32 scale ) { - AbsoluteTime deadline; - boolean_t pending; - - clock_interval_to_deadline(interval, scale, &deadline); + AbsoluteTime deadline; + boolean_t pending; - retain(); - pending = thread_call_enter_delayed(fAckTimer, deadline); - if (pending) release(); + clock_interval_to_deadline(interval, scale, &deadline); + retain(); + pending = thread_call_enter_delayed(fAckTimer, deadline); + if (pending) { + release(); + } } //********************************************************************************* // [private] stop_ack_timer //********************************************************************************* -void IOService::stop_ack_timer( void ) +void +IOService::stop_ack_timer( void ) { - boolean_t pending; + boolean_t pending; - pending = thread_call_cancel(fAckTimer); - if (pending) release(); + pending = thread_call_cancel(fAckTimer); + if (pending) { + release(); + } } //********************************************************************************* @@ -5613,23 +5523,22 @@ void IOService::stop_ack_timer( void ) IOReturn IOService::actionAckTimerExpired( - OSObject * target, - void * arg0, void * arg1, - void * arg2, void * arg3 ) + OSObject * target, + void * arg0, void * arg1, + void * arg2, void * arg3 ) { - IOService * me = (IOService *) target; - bool done; + IOService * me = (IOService *) target; + bool done; - // done will be true if the timer tick unblocks the machine state, - // otherwise no need to signal the work loop. + // done will be true if the timer tick unblocks the machine state, + // otherwise no need to signal the work loop. - done = me->ackTimerTick(); - if (done && gIOPMWorkQueue) - { - gIOPMWorkQueue->signalWorkAvailable(); - } + done = me->ackTimerTick(); + if (done && gIOPMWorkQueue) { + gIOPMWorkQueue->signalWorkAvailable(); + } - return kIOReturnSuccess; + return kIOReturnSuccess; } //********************************************************************************* @@ -5641,74 +5550,80 @@ IOService::actionAckTimerExpired( void IOService::ack_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) { - IOService * me = (IOService *) arg0; + IOService * me = (IOService *) arg0; - if (gIOPMWorkLoop) - { - gIOPMWorkLoop->runAction(&actionAckTimerExpired, me); - } - me->release(); + if (gIOPMWorkLoop) { + gIOPMWorkLoop->runAction(&actionAckTimerExpired, me); + } + me->release(); } //********************************************************************************* // [private] start_spindump_timer //********************************************************************************* -void IOService::start_spindump_timer( const char * delay_type ) +void +IOService::start_spindump_timer( const char * delay_type ) { - AbsoluteTime deadline; - boolean_t pending; + AbsoluteTime deadline; + boolean_t pending; - if (!fSpinDumpTimer || !(kIOKextSpinDump & gIOKitDebug)) - return; + if (!fSpinDumpTimer || !(kIOKextSpinDump & gIOKitDebug)) { + return; + } - if (gIOSpinDumpKextName[0] == '\0' && - !(PE_parse_boot_argn("swd_kext_name", &gIOSpinDumpKextName, - sizeof(gIOSpinDumpKextName)))) - { - return; - } + if (gIOSpinDumpKextName[0] == '\0' && + !(PE_parse_boot_argn("swd_kext_name", &gIOSpinDumpKextName, + sizeof(gIOSpinDumpKextName)))) { + return; + } - if (strncmp(gIOSpinDumpKextName, fName, sizeof(gIOSpinDumpKextName)) != 0) - return; + if (strncmp(gIOSpinDumpKextName, fName, sizeof(gIOSpinDumpKextName)) != 0) { + return; + } - if (gIOSpinDumpDelayType[0] == '\0' && - !(PE_parse_boot_argn("swd_delay_type", &gIOSpinDumpDelayType, - sizeof(gIOSpinDumpDelayType)))) - { - strncpy(gIOSpinDumpDelayType, "SetState", sizeof(gIOSpinDumpDelayType)); - } + if (gIOSpinDumpDelayType[0] == '\0' && + !(PE_parse_boot_argn("swd_delay_type", &gIOSpinDumpDelayType, + sizeof(gIOSpinDumpDelayType)))) { + strncpy(gIOSpinDumpDelayType, "SetState", sizeof(gIOSpinDumpDelayType)); + } - if (strncmp(delay_type, gIOSpinDumpDelayType, sizeof(gIOSpinDumpDelayType)) != 0) - return; + if (strncmp(delay_type, gIOSpinDumpDelayType, sizeof(gIOSpinDumpDelayType)) != 0) { + return; + } - if (gIOSpinDumpDelayDuration == 0 && - !(PE_parse_boot_argn("swd_delay_duration", &gIOSpinDumpDelayDuration, - sizeof(gIOSpinDumpDelayDuration)))) - { - gIOSpinDumpDelayDuration = 300; - } + if (gIOSpinDumpDelayDuration == 0 && + !(PE_parse_boot_argn("swd_delay_duration", &gIOSpinDumpDelayDuration, + sizeof(gIOSpinDumpDelayDuration)))) { + gIOSpinDumpDelayDuration = 300; + } - clock_interval_to_deadline(gIOSpinDumpDelayDuration, kMillisecondScale, &deadline); + clock_interval_to_deadline(gIOSpinDumpDelayDuration, kMillisecondScale, &deadline); - retain(); - pending = thread_call_enter_delayed(fSpinDumpTimer, deadline); - if (pending) release(); + retain(); + pending = thread_call_enter_delayed(fSpinDumpTimer, deadline); + if (pending) { + release(); + } } //********************************************************************************* // [private] stop_spindump_timer //********************************************************************************* -void IOService::stop_spindump_timer( void ) +void +IOService::stop_spindump_timer( void ) { - boolean_t pending; + boolean_t pending; - if (!fSpinDumpTimer || !(kIOKextSpinDump & gIOKitDebug)) - return; + if (!fSpinDumpTimer || !(kIOKextSpinDump & gIOKitDebug)) { + return; + } - pending = thread_call_cancel(fSpinDumpTimer); - if (pending) release(); + pending = thread_call_cancel(fSpinDumpTimer); + if (pending) { + release(); + } } @@ -5720,13 +5635,13 @@ void IOService::stop_spindump_timer( void ) IOReturn IOService::actionSpinDumpTimerExpired( - OSObject * target, - void * arg0, void * arg1, - void * arg2, void * arg3 ) + OSObject * target, + void * arg0, void * arg1, + void * arg2, void * arg3 ) { - getPMRootDomain()->takeStackshot(false, false, true); + getPMRootDomain()->takeStackshot(false, false, true); - return kIOReturnSuccess; + return kIOReturnSuccess; } //********************************************************************************* @@ -5738,13 +5653,12 @@ IOService::actionSpinDumpTimerExpired( void IOService::spindump_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 ) { - IOService * me = (IOService *) arg0; + IOService * me = (IOService *) arg0; - if (gIOPMWorkLoop) - { - gIOPMWorkLoop->runAction(&actionSpinDumpTimerExpired, me); - } - me->release(); + if (gIOPMWorkLoop) { + gIOPMWorkLoop->runAction(&actionSpinDumpTimerExpired, me); + } + me->release(); } // MARK: - @@ -5754,24 +5668,22 @@ IOService::spindump_timer_expired( thread_call_param_t arg0, thread_call_param_t // [private] tellSystemCapabilityChange //********************************************************************************* -void IOService::tellSystemCapabilityChange( uint32_t nextMS ) +void +IOService::tellSystemCapabilityChange( uint32_t nextMS ) { - MS_PUSH( nextMS ); - fMachineState = kIOPM_TellCapabilityChangeDone; - fOutOfBandMessage = kIOMessageSystemCapabilityChange; + MS_PUSH( nextMS ); + fMachineState = kIOPM_TellCapabilityChangeDone; + fOutOfBandMessage = kIOMessageSystemCapabilityChange; - if (fIsPreChange) - { - // Notify app first on pre-change. - fOutOfBandParameter = kNotifyCapabilityChangeApps; - } - else - { - // Notify kernel clients first on post-change. - fOutOfBandParameter = kNotifyCapabilityChangePriority; - } + if (fIsPreChange) { + // Notify app first on pre-change. + fOutOfBandParameter = kNotifyCapabilityChangeApps; + } else { + // Notify kernel clients first on post-change. + fOutOfBandParameter = kNotifyCapabilityChangePriority; + } - tellClientsWithResponse( fOutOfBandMessage ); + tellClientsWithResponse( fOutOfBandMessage ); } //********************************************************************************* @@ -5786,9 +5698,10 @@ void IOService::tellSystemCapabilityChange( uint32_t nextMS ) // Return true if we don't have to wait for acknowledgements //********************************************************************************* -bool IOService::askChangeDown( unsigned long stateNum ) +bool +IOService::askChangeDown( unsigned long stateNum ) { - return tellClientsWithResponse( kIOMessageCanDevicePowerOff ); + return tellClientsWithResponse( kIOMessageCanDevicePowerOff ); } //********************************************************************************* @@ -5800,10 +5713,11 @@ bool IOService::askChangeDown( unsigned long stateNum ) // Return true if we don't have to wait for acknowledgements //********************************************************************************* -bool IOService::tellChangeDown1( unsigned long stateNum ) +bool +IOService::tellChangeDown1( unsigned long stateNum ) { - fOutOfBandParameter = kNotifyApps; - return tellChangeDown(stateNum); + fOutOfBandParameter = kNotifyApps; + return tellChangeDown(stateNum); } //********************************************************************************* @@ -5814,10 +5728,11 @@ bool IOService::tellChangeDown1( unsigned long stateNum ) // Return true if we don't have to wait for acknowledgements //********************************************************************************* -bool IOService::tellChangeDown2( unsigned long stateNum ) +bool +IOService::tellChangeDown2( unsigned long stateNum ) { - fOutOfBandParameter = kNotifyPriority; - return tellChangeDown(stateNum); + fOutOfBandParameter = kNotifyPriority; + return tellChangeDown(stateNum); } //********************************************************************************* @@ -5832,9 +5747,10 @@ bool IOService::tellChangeDown2( unsigned long stateNum ) // Return true if we don't have to wait for acknowledgements //********************************************************************************* -bool IOService::tellChangeDown( unsigned long stateNum ) +bool +IOService::tellChangeDown( unsigned long stateNum ) { - return tellClientsWithResponse( kIOMessageDeviceWillPowerOff ); + return tellClientsWithResponse( kIOMessageDeviceWillPowerOff ); } //********************************************************************************* @@ -5842,100 +5758,94 @@ bool IOService::tellChangeDown( unsigned long stateNum ) // //********************************************************************************* -static void logAppTimeouts( OSObject * object, void * arg ) -{ - IOPMInterestContext * context = (IOPMInterestContext *) arg; - OSObject * flag; - unsigned int clientIndex; - int pid = 0; - char name[128]; - - if (OSDynamicCast(_IOServiceInterestNotifier, object)) - { - // Discover the 'counter' value or index assigned to this client - // when it was notified, by searching for the array index of the - // client in an array holding the cached interested clients. - - clientIndex = context->notifyClients->getNextIndexOfObject(object, 0); - - if ((clientIndex != (unsigned int) -1) && - (flag = context->responseArray->getObject(clientIndex)) && - (flag != kOSBooleanTrue)) - { - OSNumber *clientID = copyClientIDForNotification(object, context); - - name[0] = '\0'; - if (clientID) { - pid = clientID->unsigned32BitValue(); - proc_name(pid, name, sizeof(name)); - clientID->release(); - } - - PM_ERROR(context->errorLog, pid, name); - - // TODO: record message type if possible - IOService::getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsResponseTimedOut, - name, 0, (30*1000), pid, object); - - } - } -} - -void IOService::cleanClientResponses( bool logErrors ) -{ - if (logErrors && fResponseArray) - { - switch ( fOutOfBandParameter ) { - case kNotifyApps: - case kNotifyCapabilityChangeApps: - if (fNotifyClientArray) - { - IOPMInterestContext context; - - context.responseArray = fResponseArray; - context.notifyClients = fNotifyClientArray; - context.serialNumber = fSerialNumber; - context.messageType = kIOMessageCopyClientID; - context.notifyType = kNotifyApps; - context.isPreChange = fIsPreChange; - context.enableTracing = false; - context.us = this; - context.maxTimeRequested = 0; - context.stateNumber = fHeadNotePowerState; - context.stateFlags = fHeadNotePowerArrayEntry->capabilityFlags; - context.changeFlags = fHeadNoteChangeFlags; - context.errorLog = "PM notification timeout (pid %d, %s)\n"; - - applyToInterested(gIOAppPowerStateInterest, logAppTimeouts, (void *) &context); - } - break; - - default: - // kNotifyPriority, kNotifyCapabilityChangePriority - // TODO: identify the priority client that has not acked - PM_ERROR("PM priority notification timeout\n"); - if (gIOKitDebug & kIOLogDebugPower) - { - panic("PM priority notification timeout"); - } - break; - } - } - - if (IS_ROOT_DOMAIN) { - getPMRootDomain()->reset_watchdog_timer(this, 0); - } - if (fResponseArray) - { - fResponseArray->release(); - fResponseArray = NULL; - } - if (fNotifyClientArray) - { - fNotifyClientArray->release(); - fNotifyClientArray = NULL; - } +static void +logAppTimeouts( OSObject * object, void * arg ) +{ + IOPMInterestContext * context = (IOPMInterestContext *) arg; + OSObject * flag; + unsigned int clientIndex; + int pid = 0; + char name[128]; + + if (OSDynamicCast(_IOServiceInterestNotifier, object)) { + // Discover the 'counter' value or index assigned to this client + // when it was notified, by searching for the array index of the + // client in an array holding the cached interested clients. + + clientIndex = context->notifyClients->getNextIndexOfObject(object, 0); + + if ((clientIndex != (unsigned int) -1) && + (flag = context->responseArray->getObject(clientIndex)) && + (flag != kOSBooleanTrue)) { + OSNumber *clientID = copyClientIDForNotification(object, context); + + name[0] = '\0'; + if (clientID) { + pid = clientID->unsigned32BitValue(); + proc_name(pid, name, sizeof(name)); + clientID->release(); + } + + PM_ERROR(context->errorLog, pid, name); + + // TODO: record message type if possible + IOService::getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsResponseTimedOut, + name, 0, (30 * 1000), pid, object); + } + } +} + +void +IOService::cleanClientResponses( bool logErrors ) +{ + if (logErrors && fResponseArray) { + switch (fOutOfBandParameter) { + case kNotifyApps: + case kNotifyCapabilityChangeApps: + if (fNotifyClientArray) { + IOPMInterestContext context; + + context.responseArray = fResponseArray; + context.notifyClients = fNotifyClientArray; + context.serialNumber = fSerialNumber; + context.messageType = kIOMessageCopyClientID; + context.notifyType = kNotifyApps; + context.isPreChange = fIsPreChange; + context.enableTracing = false; + context.us = this; + context.maxTimeRequested = 0; + context.stateNumber = fHeadNotePowerState; + context.stateFlags = fHeadNotePowerArrayEntry->capabilityFlags; + context.changeFlags = fHeadNoteChangeFlags; + context.errorLog = "PM notification timeout (pid %d, %s)\n"; + + applyToInterested(gIOAppPowerStateInterest, logAppTimeouts, (void *) &context); + } + break; + + default: + // kNotifyPriority, kNotifyCapabilityChangePriority + // TODO: identify the priority client that has not acked + PM_ERROR("PM priority notification timeout\n"); + if (gIOKitDebug & kIOLogDebugPower) { + panic("PM priority notification timeout"); + } + break; + } + } + + if (IS_ROOT_DOMAIN) { + getPMRootDomain()->reset_watchdog_timer(this, 0); + } + if (fResponseArray) { + fResponseArray->release(); + fResponseArray = NULL; + } + if (fNotifyClientArray) { + fNotifyClientArray->release(); + fNotifyClientArray = NULL; + } } //********************************************************************************* @@ -5947,146 +5857,141 @@ void IOService::cleanClientResponses( bool logErrors ) // Return true if we don't have to wait for acknowledgements //********************************************************************************* -bool IOService::tellClientsWithResponse( int messageType ) -{ - IOPMInterestContext context; - bool isRootDomain = IS_ROOT_DOMAIN; - uint32_t maxTimeOut = kMaxTimeRequested; - - PM_ASSERT_IN_GATE(); - assert( fResponseArray == NULL ); - assert( fNotifyClientArray == NULL ); - - if(messageType == (int)kIOPMMessageLastCallBeforeSleep) - RD_LOG("tellClientsWithResponse( kIOPMMessageLastCallBeforeSleep, %d )\n", - fOutOfBandParameter); - else - RD_LOG("tellClientsWithResponse( %s, %d )\n", - getIOMessageString(messageType), fOutOfBandParameter); - - fResponseArray = OSArray::withCapacity( 1 ); - if (!fResponseArray) - goto exit; - - fResponseArray->setCapacityIncrement(8); - if (++fSerialNumber == 0) - fSerialNumber++; - - context.responseArray = fResponseArray; - context.notifyClients = 0; - context.serialNumber = fSerialNumber; - context.messageType = messageType; - context.notifyType = fOutOfBandParameter; - context.isPreChange = fIsPreChange; - context.enableTracing = false; - context.us = this; - context.maxTimeRequested = 0; - context.stateNumber = fHeadNotePowerState; - context.stateFlags = fHeadNotePowerArrayEntry->capabilityFlags; - context.changeFlags = fHeadNoteChangeFlags; - context.messageFilter = (isRootDomain) ? - OSMemberFunctionCast( - IOPMMessageFilter, - this, - &IOPMrootDomain::systemMessageFilter) : 0; - - switch ( fOutOfBandParameter ) { - case kNotifyApps: - applyToInterested( gIOAppPowerStateInterest, - pmTellAppWithResponse, (void *) &context ); - - if (isRootDomain && - (fMachineState != kIOPM_OurChangeTellClientsPowerDown) && - (fMachineState != kIOPM_SyncTellClientsPowerDown) && - (context.messageType != kIOPMMessageLastCallBeforeSleep)) - { - // Notify capability app for tellChangeDown1() - // but not for askChangeDown(). - context.notifyType = kNotifyCapabilityChangeApps; - context.messageType = kIOMessageSystemCapabilityChange; - applyToInterested( gIOAppPowerStateInterest, - pmTellCapabilityAppWithResponse, (void *) &context ); - context.notifyType = fOutOfBandParameter; - context.messageType = messageType; - } - if(context.messageType == kIOMessageCanSystemSleep) - { - maxTimeOut = kCanSleepMaxTimeReq; - if(gCanSleepTimeout) - { - maxTimeOut = (gCanSleepTimeout*us_per_s); - } - } - context.maxTimeRequested = maxTimeOut; - context.enableTracing = isRootDomain; - applyToInterested( gIOGeneralInterest, - pmTellClientWithResponse, (void *) &context ); - - break; - - case kNotifyPriority: - context.enableTracing = isRootDomain; - applyToInterested( gIOPriorityPowerStateInterest, - pmTellClientWithResponse, (void *) &context ); - - if (isRootDomain) - { - // Notify capability clients for tellChangeDown2(). - context.notifyType = kNotifyCapabilityChangePriority; - context.messageType = kIOMessageSystemCapabilityChange; - applyToInterested( gIOPriorityPowerStateInterest, - pmTellCapabilityClientWithResponse, (void *) &context ); - } - break; - - case kNotifyCapabilityChangeApps: - applyToInterested( gIOAppPowerStateInterest, - pmTellCapabilityAppWithResponse, (void *) &context ); - if(context.messageType == kIOMessageCanSystemSleep) - { - maxTimeOut = kCanSleepMaxTimeReq; - if(gCanSleepTimeout) - { - maxTimeOut = (gCanSleepTimeout*us_per_s); - } - } - context.maxTimeRequested = maxTimeOut; - break; - - case kNotifyCapabilityChangePriority: - context.enableTracing = isRootDomain; - applyToInterested( gIOPriorityPowerStateInterest, - pmTellCapabilityClientWithResponse, (void *) &context ); - break; - } - fNotifyClientArray = context.notifyClients; - - // do we have to wait for somebody? - if ( !checkForDone() ) - { - OUR_PMLog(kPMLogStartAckTimer, context.maxTimeRequested, 0); - if (context.enableTracing) { - getPMRootDomain()->traceDetail(context.messageType, 0, context.maxTimeRequested / 1000); - getPMRootDomain()->reset_watchdog_timer(this, context.maxTimeRequested/USEC_PER_SEC+1); - } - start_ack_timer( context.maxTimeRequested / 1000, kMillisecondScale ); - return false; - } +bool +IOService::tellClientsWithResponse( int messageType ) +{ + IOPMInterestContext context; + bool isRootDomain = IS_ROOT_DOMAIN; + uint32_t maxTimeOut = kMaxTimeRequested; + + PM_ASSERT_IN_GATE(); + assert( fResponseArray == NULL ); + assert( fNotifyClientArray == NULL ); + + if (messageType == (int)kIOPMMessageLastCallBeforeSleep) { + RD_LOG("tellClientsWithResponse( kIOPMMessageLastCallBeforeSleep, %d )\n", + fOutOfBandParameter); + } else { + RD_LOG("tellClientsWithResponse( %s, %d )\n", + getIOMessageString(messageType), fOutOfBandParameter); + } + + fResponseArray = OSArray::withCapacity( 1 ); + if (!fResponseArray) { + goto exit; + } + + fResponseArray->setCapacityIncrement(8); + if (++fSerialNumber == 0) { + fSerialNumber++; + } + + context.responseArray = fResponseArray; + context.notifyClients = 0; + context.serialNumber = fSerialNumber; + context.messageType = messageType; + context.notifyType = fOutOfBandParameter; + context.isPreChange = fIsPreChange; + context.enableTracing = false; + context.us = this; + context.maxTimeRequested = 0; + context.stateNumber = fHeadNotePowerState; + context.stateFlags = fHeadNotePowerArrayEntry->capabilityFlags; + context.changeFlags = fHeadNoteChangeFlags; + context.messageFilter = (isRootDomain) ? + OSMemberFunctionCast( + IOPMMessageFilter, + this, + &IOPMrootDomain::systemMessageFilter) : 0; + + switch (fOutOfBandParameter) { + case kNotifyApps: + applyToInterested( gIOAppPowerStateInterest, + pmTellAppWithResponse, (void *) &context ); + + if (isRootDomain && + (fMachineState != kIOPM_OurChangeTellClientsPowerDown) && + (fMachineState != kIOPM_SyncTellClientsPowerDown) && + (context.messageType != kIOPMMessageLastCallBeforeSleep)) { + // Notify capability app for tellChangeDown1() + // but not for askChangeDown(). + context.notifyType = kNotifyCapabilityChangeApps; + context.messageType = kIOMessageSystemCapabilityChange; + applyToInterested( gIOAppPowerStateInterest, + pmTellCapabilityAppWithResponse, (void *) &context ); + context.notifyType = fOutOfBandParameter; + context.messageType = messageType; + } + if (context.messageType == kIOMessageCanSystemSleep) { + maxTimeOut = kCanSleepMaxTimeReq; + if (gCanSleepTimeout) { + maxTimeOut = (gCanSleepTimeout * us_per_s); + } + } + context.maxTimeRequested = maxTimeOut; + context.enableTracing = isRootDomain; + applyToInterested( gIOGeneralInterest, + pmTellClientWithResponse, (void *) &context ); + + break; + + case kNotifyPriority: + context.enableTracing = isRootDomain; + applyToInterested( gIOPriorityPowerStateInterest, + pmTellClientWithResponse, (void *) &context ); + + if (isRootDomain) { + // Notify capability clients for tellChangeDown2(). + context.notifyType = kNotifyCapabilityChangePriority; + context.messageType = kIOMessageSystemCapabilityChange; + applyToInterested( gIOPriorityPowerStateInterest, + pmTellCapabilityClientWithResponse, (void *) &context ); + } + break; + + case kNotifyCapabilityChangeApps: + applyToInterested( gIOAppPowerStateInterest, + pmTellCapabilityAppWithResponse, (void *) &context ); + if (context.messageType == kIOMessageCanSystemSleep) { + maxTimeOut = kCanSleepMaxTimeReq; + if (gCanSleepTimeout) { + maxTimeOut = (gCanSleepTimeout * us_per_s); + } + } + context.maxTimeRequested = maxTimeOut; + break; + + case kNotifyCapabilityChangePriority: + context.enableTracing = isRootDomain; + applyToInterested( gIOPriorityPowerStateInterest, + pmTellCapabilityClientWithResponse, (void *) &context ); + break; + } + fNotifyClientArray = context.notifyClients; + + // do we have to wait for somebody? + if (!checkForDone()) { + OUR_PMLog(kPMLogStartAckTimer, context.maxTimeRequested, 0); + if (context.enableTracing) { + getPMRootDomain()->traceDetail(context.messageType, 0, context.maxTimeRequested / 1000); + getPMRootDomain()->reset_watchdog_timer(this, context.maxTimeRequested / USEC_PER_SEC + 1); + } + start_ack_timer( context.maxTimeRequested / 1000, kMillisecondScale ); + return false; + } exit: - // everybody responded - if (fResponseArray) - { - fResponseArray->release(); - fResponseArray = NULL; - } - if (fNotifyClientArray) - { - fNotifyClientArray->release(); - fNotifyClientArray = NULL; - } + // everybody responded + if (fResponseArray) { + fResponseArray->release(); + fResponseArray = NULL; + } + if (fNotifyClientArray) { + fNotifyClientArray->release(); + fNotifyClientArray = NULL; + } - return true; + return true; } //********************************************************************************* @@ -6096,97 +6001,87 @@ exit: // cookie we can identify the response with. //********************************************************************************* -void IOService::pmTellAppWithResponse( OSObject * object, void * arg ) -{ - IOPMInterestContext * context = (IOPMInterestContext *) arg; - IOServicePM * pwrMgt = context->us->pwrMgt; - uint32_t msgIndex, msgRef, msgType; - OSNumber *clientID = NULL; - proc_t proc = NULL; - boolean_t proc_suspended = FALSE; - OSObject * waitForReply = kOSBooleanTrue; +void +IOService::pmTellAppWithResponse( OSObject * object, void * arg ) +{ + IOPMInterestContext * context = (IOPMInterestContext *) arg; + IOServicePM * pwrMgt = context->us->pwrMgt; + uint32_t msgIndex, msgRef, msgType; + OSNumber *clientID = NULL; + proc_t proc = NULL; + boolean_t proc_suspended = FALSE; + OSObject * waitForReply = kOSBooleanTrue; #if LOG_APP_RESPONSE_TIMES - AbsoluteTime now; + AbsoluteTime now; #endif - if (!OSDynamicCast(_IOServiceInterestNotifier, object)) - return; - - if (context->us == getPMRootDomain()) - { - if ((clientID = copyClientIDForNotification(object, context))) - { - uint32_t clientPID = clientID->unsigned32BitValue(); - clientID->release(); - proc = proc_find(clientPID); - - if (proc) - { - proc_suspended = get_task_pidsuspended((task_t) proc->task); - proc_rele(proc); - - if (proc_suspended) - { - logClientIDForNotification(object, context, "PMTellAppWithResponse - Suspended"); - return; - } - } - } - } - - if (context->messageFilter && - !context->messageFilter(context->us, object, context, 0, &waitForReply)) - { - if (kIOLogDebugPower & gIOKitDebug) - { - logClientIDForNotification(object, context, "DROP App"); - } - return; - } - - // Create client array (for tracking purposes) only if the service - // has app clients. Usually only root domain does. - if (0 == context->notifyClients) - context->notifyClients = OSArray::withCapacity( 32 ); - - msgType = context->messageType; - msgIndex = context->responseArray->getCount(); - msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); - - OUR_PMLog(kPMLogAppNotify, msgType, msgRef); - if (kIOLogDebugPower & gIOKitDebug) - { - logClientIDForNotification(object, context, "MESG App"); - } - - if (waitForReply == kOSBooleanTrue) - { - - OSNumber * num; - clock_get_uptime(&now); - num = OSNumber::withNumber(AbsoluteTime_to_scalar(&now), sizeof(uint64_t) * 8); - if (num) - { - context->responseArray->setObject(msgIndex, num); - num->release(); - } - else { - context->responseArray->setObject(msgIndex, kOSBooleanFalse); - } - } - else - { - context->responseArray->setObject(msgIndex, kOSBooleanTrue); - if (kIOLogDebugPower & gIOKitDebug) - { - logClientIDForNotification(object, context, "App response ignored"); - } - } - - if (context->notifyClients) - context->notifyClients->setObject(msgIndex, object); - - context->us->messageClient(msgType, object, (void *)(uintptr_t) msgRef); + if (!OSDynamicCast(_IOServiceInterestNotifier, object)) { + return; + } + + if (context->us == getPMRootDomain()) { + if ((clientID = copyClientIDForNotification(object, context))) { + uint32_t clientPID = clientID->unsigned32BitValue(); + clientID->release(); + proc = proc_find(clientPID); + + if (proc) { + proc_suspended = get_task_pidsuspended((task_t) proc->task); + proc_rele(proc); + + if (proc_suspended) { + logClientIDForNotification(object, context, "PMTellAppWithResponse - Suspended"); + return; + } + } + } + } + + if (context->messageFilter && + !context->messageFilter(context->us, object, context, 0, &waitForReply)) { + if (kIOLogDebugPower & gIOKitDebug) { + logClientIDForNotification(object, context, "DROP App"); + } + return; + } + + // Create client array (for tracking purposes) only if the service + // has app clients. Usually only root domain does. + if (0 == context->notifyClients) { + context->notifyClients = OSArray::withCapacity( 32 ); + } + + msgType = context->messageType; + msgIndex = context->responseArray->getCount(); + msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); + + OUR_PMLog(kPMLogAppNotify, msgType, msgRef); + if (kIOLogDebugPower & gIOKitDebug) { + logClientIDForNotification(object, context, "MESG App"); + } + + if (waitForReply == kOSBooleanTrue) { + OSNumber * num; + clock_get_uptime(&now); + num = OSNumber::withNumber(AbsoluteTime_to_scalar(&now), sizeof(uint64_t) * 8); + if (num) { + context->responseArray->setObject(msgIndex, num); + num->release(); + } else { + context->responseArray->setObject(msgIndex, kOSBooleanFalse); + } + } else { + context->responseArray->setObject(msgIndex, kOSBooleanTrue); + if (kIOLogDebugPower & gIOKitDebug) { + logClientIDForNotification(object, context, "App response ignored"); + } + } + + if (context->notifyClients) { + context->notifyClients->setObject(msgIndex, object); + } + + context->us->messageClient(msgType, object, (void *)(uintptr_t) msgRef); } //********************************************************************************* @@ -6196,356 +6091,332 @@ void IOService::pmTellAppWithResponse( OSObject * object, void * arg ) // so we compute a cookie we can identify the response with. //********************************************************************************* -void IOService::pmTellClientWithResponse( OSObject * object, void * arg ) -{ - IOPowerStateChangeNotification notify; - IOPMInterestContext * context = (IOPMInterestContext *) arg; - OSObject * replied = kOSBooleanTrue; - _IOServiceInterestNotifier * notifier; - uint32_t msgIndex, msgRef, msgType; - IOReturn retCode; - AbsoluteTime start, end; - uint64_t nsec; - - if (context->messageFilter && - !context->messageFilter(context->us, object, context, 0, 0)) - { - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) - { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } - return; - } - - notifier = OSDynamicCast(_IOServiceInterestNotifier, object); - msgType = context->messageType; - msgIndex = context->responseArray->getCount(); - msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); - - IOServicePM * pwrMgt = context->us->pwrMgt; - if (gIOKitDebug & kIOLogPower) { - OUR_PMLog(kPMLogClientNotify, msgRef, msgType); - if (OSDynamicCast(IOService, object)) { - const char *who = ((IOService *) object)->getName(); - gPlatform->PMLog(who, kPMLogClientNotify, (uintptr_t) object, 0); - } - else if (notifier) { - OUR_PMLog(kPMLogClientNotify, (uintptr_t) notifier->handler, 0); - } - } - if ((kIOLogDebugPower & gIOKitDebug) && notifier) - { - PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(msgType), - OBFUSCATE(object), OBFUSCATE(notifier->handler)); - } - - if (0 == context->notifyClients) - context->notifyClients = OSArray::withCapacity( 32 ); - - notify.powerRef = (void *)(uintptr_t) msgRef; - notify.returnValue = 0; - notify.stateNumber = context->stateNumber; - notify.stateFlags = context->stateFlags; - - if (context->enableTracing && (notifier != 0)) - { - getPMRootDomain()->traceDetail(notifier, true); - } - - clock_get_uptime(&start); - retCode = context->us->messageClient(msgType, object, (void *) ¬ify, sizeof(notify)); - clock_get_uptime(&end); - - if (context->enableTracing && (notifier != NULL)) - { - getPMRootDomain()->traceDetail(notifier, false); - } - - - if (kIOReturnSuccess == retCode) - { - if (0 == notify.returnValue) { - OUR_PMLog(kPMLogClientAcknowledge, msgRef, (uintptr_t) object); - context->responseArray->setObject(msgIndex, replied); - } else { - replied = kOSBooleanFalse; - if ( notify.returnValue > context->maxTimeRequested ) - { - if (notify.returnValue > kPriorityClientMaxWait) - { - context->maxTimeRequested = kPriorityClientMaxWait; - PM_ERROR("%s: client %p returned %llu for %s\n", - context->us->getName(), - notifier ? (void *) OBFUSCATE(notifier->handler) : OBFUSCATE(object), - (uint64_t) notify.returnValue, - getIOMessageString(msgType)); - } - else - context->maxTimeRequested = notify.returnValue; - } - // - // Track time taken to ack, by storing the timestamp of - // callback completion - OSNumber * num; - num = OSNumber::withNumber(AbsoluteTime_to_scalar(&end), sizeof(uint64_t) * 8); - if (num) { - context->responseArray->setObject(msgIndex, num); - num->release(); - } - else { - context->responseArray->setObject(msgIndex, replied); - } - } - - if (context->enableTracing) { - SUB_ABSOLUTETIME(&end, &start); - absolutetime_to_nanoseconds(end, &nsec); - - if ((nsec > LOG_KEXT_RESPONSE_TIMES) || (notify.returnValue != 0)) { - getPMRootDomain()->traceAckDelay(notifier, notify.returnValue/1000, NS_TO_MS(nsec)); - } - } - } - else { - // not a client of ours - // so we won't be waiting for response - OUR_PMLog(kPMLogClientAcknowledge, msgRef, 0); - context->responseArray->setObject(msgIndex, replied); - } - if (context->notifyClients) { - context->notifyClients->setObject(msgIndex, object); - } +void +IOService::pmTellClientWithResponse( OSObject * object, void * arg ) +{ + IOPowerStateChangeNotification notify; + IOPMInterestContext * context = (IOPMInterestContext *) arg; + OSObject * replied = kOSBooleanTrue; + _IOServiceInterestNotifier * notifier; + uint32_t msgIndex, msgRef, msgType; + IOReturn retCode; + AbsoluteTime start, end; + uint64_t nsec; + + if (context->messageFilter && + !context->messageFilter(context->us, object, context, 0, 0)) { + if ((kIOLogDebugPower & gIOKitDebug) && + (OSDynamicCast(_IOServiceInterestNotifier, object))) { + _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; + PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", + context->us->getName(), + getIOMessageString(context->messageType), + OBFUSCATE(object), OBFUSCATE(n->handler)); + } + return; + } + + notifier = OSDynamicCast(_IOServiceInterestNotifier, object); + msgType = context->messageType; + msgIndex = context->responseArray->getCount(); + msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); + + IOServicePM * pwrMgt = context->us->pwrMgt; + if (gIOKitDebug & kIOLogPower) { + OUR_PMLog(kPMLogClientNotify, msgRef, msgType); + if (OSDynamicCast(IOService, object)) { + const char *who = ((IOService *) object)->getName(); + gPlatform->PMLog(who, kPMLogClientNotify, (uintptr_t) object, 0); + } else if (notifier) { + OUR_PMLog(kPMLogClientNotify, (uintptr_t) notifier->handler, 0); + } + } + if ((kIOLogDebugPower & gIOKitDebug) && notifier) { + PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", + context->us->getName(), + getIOMessageString(msgType), + OBFUSCATE(object), OBFUSCATE(notifier->handler)); + } + + if (0 == context->notifyClients) { + context->notifyClients = OSArray::withCapacity( 32 ); + } + + notify.powerRef = (void *)(uintptr_t) msgRef; + notify.returnValue = 0; + notify.stateNumber = context->stateNumber; + notify.stateFlags = context->stateFlags; + + if (context->enableTracing && (notifier != 0)) { + getPMRootDomain()->traceDetail(notifier, true); + } + + clock_get_uptime(&start); + retCode = context->us->messageClient(msgType, object, (void *) ¬ify, sizeof(notify)); + clock_get_uptime(&end); + + if (context->enableTracing && (notifier != NULL)) { + getPMRootDomain()->traceDetail(notifier, false); + } + + + if (kIOReturnSuccess == retCode) { + if (0 == notify.returnValue) { + OUR_PMLog(kPMLogClientAcknowledge, msgRef, (uintptr_t) object); + context->responseArray->setObject(msgIndex, replied); + } else { + replied = kOSBooleanFalse; + if (notify.returnValue > context->maxTimeRequested) { + if (notify.returnValue > kPriorityClientMaxWait) { + context->maxTimeRequested = kPriorityClientMaxWait; + PM_ERROR("%s: client %p returned %llu for %s\n", + context->us->getName(), + notifier ? (void *) OBFUSCATE(notifier->handler) : OBFUSCATE(object), + (uint64_t) notify.returnValue, + getIOMessageString(msgType)); + } else { + context->maxTimeRequested = notify.returnValue; + } + } + // + // Track time taken to ack, by storing the timestamp of + // callback completion + OSNumber * num; + num = OSNumber::withNumber(AbsoluteTime_to_scalar(&end), sizeof(uint64_t) * 8); + if (num) { + context->responseArray->setObject(msgIndex, num); + num->release(); + } else { + context->responseArray->setObject(msgIndex, replied); + } + } + + if (context->enableTracing) { + SUB_ABSOLUTETIME(&end, &start); + absolutetime_to_nanoseconds(end, &nsec); + if ((nsec > LOG_KEXT_RESPONSE_TIMES) || (notify.returnValue != 0)) { + getPMRootDomain()->traceAckDelay(notifier, notify.returnValue / 1000, NS_TO_MS(nsec)); + } + } + } else { + // not a client of ours + // so we won't be waiting for response + OUR_PMLog(kPMLogClientAcknowledge, msgRef, 0); + context->responseArray->setObject(msgIndex, replied); + } + if (context->notifyClients) { + context->notifyClients->setObject(msgIndex, object); + } } //********************************************************************************* // [static private] pmTellCapabilityAppWithResponse //********************************************************************************* -void IOService::pmTellCapabilityAppWithResponse( OSObject * object, void * arg ) +void +IOService::pmTellCapabilityAppWithResponse( OSObject * object, void * arg ) { - IOPMSystemCapabilityChangeParameters msgArg; - IOPMInterestContext * context = (IOPMInterestContext *) arg; - OSObject * replied = kOSBooleanTrue; - IOServicePM * pwrMgt = context->us->pwrMgt; - uint32_t msgIndex, msgRef, msgType; + IOPMSystemCapabilityChangeParameters msgArg; + IOPMInterestContext * context = (IOPMInterestContext *) arg; + OSObject * replied = kOSBooleanTrue; + IOServicePM * pwrMgt = context->us->pwrMgt; + uint32_t msgIndex, msgRef, msgType; #if LOG_APP_RESPONSE_TIMES - AbsoluteTime now; + AbsoluteTime now; #endif - if (!OSDynamicCast(_IOServiceInterestNotifier, object)) - return; - - memset(&msgArg, 0, sizeof(msgArg)); - if (context->messageFilter && - !context->messageFilter(context->us, object, context, &msgArg, &replied)) - { - return; - } - - // Create client array (for tracking purposes) only if the service - // has app clients. Usually only root domain does. - if (0 == context->notifyClients) - context->notifyClients = OSArray::withCapacity( 32 ); - - msgType = context->messageType; - msgIndex = context->responseArray->getCount(); - msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); - - OUR_PMLog(kPMLogAppNotify, msgType, msgRef); - if (kIOLogDebugPower & gIOKitDebug) - { - // Log client pid/name and client array index. - OSNumber * clientID = NULL; - OSString * clientIDString = NULL;; - context->us->messageClient(kIOMessageCopyClientID, object, &clientID); - if (clientID) { - clientIDString = IOCopyLogNameForPID(clientID->unsigned32BitValue()); - } - - PM_LOG("%s MESG App(%u) %s, wait %u, %s\n", - context->us->getName(), - msgIndex, getIOMessageString(msgType), - (replied != kOSBooleanTrue), - clientIDString ? clientIDString->getCStringNoCopy() : ""); - if (clientID) clientID->release(); - if (clientIDString) clientIDString->release(); - } - - msgArg.notifyRef = msgRef; - msgArg.maxWaitForReply = 0; - - if (replied == kOSBooleanTrue) - { - msgArg.notifyRef = 0; - context->responseArray->setObject(msgIndex, kOSBooleanTrue); - if (context->notifyClients) - context->notifyClients->setObject(msgIndex, kOSBooleanTrue); - } - else - { - - OSNumber * num; - clock_get_uptime(&now); - num = OSNumber::withNumber(AbsoluteTime_to_scalar(&now), sizeof(uint64_t) * 8); - if (num) - { - context->responseArray->setObject(msgIndex, num); - num->release(); - } - else { - context->responseArray->setObject(msgIndex, kOSBooleanFalse); - } - - if (context->notifyClients) - context->notifyClients->setObject(msgIndex, object); - } - - context->us->messageClient(msgType, object, (void *) &msgArg, sizeof(msgArg)); + if (!OSDynamicCast(_IOServiceInterestNotifier, object)) { + return; + } + + memset(&msgArg, 0, sizeof(msgArg)); + if (context->messageFilter && + !context->messageFilter(context->us, object, context, &msgArg, &replied)) { + return; + } + + // Create client array (for tracking purposes) only if the service + // has app clients. Usually only root domain does. + if (0 == context->notifyClients) { + context->notifyClients = OSArray::withCapacity( 32 ); + } + + msgType = context->messageType; + msgIndex = context->responseArray->getCount(); + msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); + + OUR_PMLog(kPMLogAppNotify, msgType, msgRef); + if (kIOLogDebugPower & gIOKitDebug) { + // Log client pid/name and client array index. + OSNumber * clientID = NULL; + OSString * clientIDString = NULL;; + context->us->messageClient(kIOMessageCopyClientID, object, &clientID); + if (clientID) { + clientIDString = IOCopyLogNameForPID(clientID->unsigned32BitValue()); + } + + PM_LOG("%s MESG App(%u) %s, wait %u, %s\n", + context->us->getName(), + msgIndex, getIOMessageString(msgType), + (replied != kOSBooleanTrue), + clientIDString ? clientIDString->getCStringNoCopy() : ""); + if (clientID) { + clientID->release(); + } + if (clientIDString) { + clientIDString->release(); + } + } + + msgArg.notifyRef = msgRef; + msgArg.maxWaitForReply = 0; + + if (replied == kOSBooleanTrue) { + msgArg.notifyRef = 0; + context->responseArray->setObject(msgIndex, kOSBooleanTrue); + if (context->notifyClients) { + context->notifyClients->setObject(msgIndex, kOSBooleanTrue); + } + } else { + OSNumber * num; + clock_get_uptime(&now); + num = OSNumber::withNumber(AbsoluteTime_to_scalar(&now), sizeof(uint64_t) * 8); + if (num) { + context->responseArray->setObject(msgIndex, num); + num->release(); + } else { + context->responseArray->setObject(msgIndex, kOSBooleanFalse); + } + + if (context->notifyClients) { + context->notifyClients->setObject(msgIndex, object); + } + } + + context->us->messageClient(msgType, object, (void *) &msgArg, sizeof(msgArg)); } //********************************************************************************* // [static private] pmTellCapabilityClientWithResponse //********************************************************************************* -void IOService::pmTellCapabilityClientWithResponse( - OSObject * object, void * arg ) -{ - IOPMSystemCapabilityChangeParameters msgArg; - IOPMInterestContext * context = (IOPMInterestContext *) arg; - OSObject * replied = kOSBooleanTrue; - _IOServiceInterestNotifier * notifier; - uint32_t msgIndex, msgRef, msgType; - IOReturn retCode; - AbsoluteTime start, end; - uint64_t nsec; - - memset(&msgArg, 0, sizeof(msgArg)); - if (context->messageFilter && - !context->messageFilter(context->us, object, context, &msgArg, 0)) - { - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) - { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } - return; - } - - if (0 == context->notifyClients) { - context->notifyClients = OSArray::withCapacity( 32 ); - } - notifier = OSDynamicCast(_IOServiceInterestNotifier, object); - msgType = context->messageType; - msgIndex = context->responseArray->getCount(); - msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); - - IOServicePM * pwrMgt = context->us->pwrMgt; - if (gIOKitDebug & kIOLogPower) { - OUR_PMLog(kPMLogClientNotify, msgRef, msgType); - if (OSDynamicCast(IOService, object)) { - const char *who = ((IOService *) object)->getName(); - gPlatform->PMLog(who, kPMLogClientNotify, (uintptr_t) object, 0); - } - else if (notifier) { - OUR_PMLog(kPMLogClientNotify, (uintptr_t) notifier->handler, 0); - } - } - if ((kIOLogDebugPower & gIOKitDebug) && notifier) - { - PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", - context->us->getName(), - getIOMessageString(msgType), - OBFUSCATE(object), OBFUSCATE(notifier->handler)); - } - - msgArg.notifyRef = msgRef; - msgArg.maxWaitForReply = 0; - - if (context->enableTracing && (notifier != 0)) - { - getPMRootDomain()->traceDetail(notifier, true); - } - - clock_get_uptime(&start); - retCode = context->us->messageClient( - msgType, object, (void *) &msgArg, sizeof(msgArg)); - clock_get_uptime(&end); - if (context->enableTracing && (notifier != NULL)) - { - getPMRootDomain()->traceDetail(notifier, false); - } - - if ( kIOReturnSuccess == retCode ) - { - if ( 0 == msgArg.maxWaitForReply ) - { - // client doesn't want time to respond - OUR_PMLog(kPMLogClientAcknowledge, msgRef, (uintptr_t) object); - context->responseArray->setObject(msgIndex, replied); - } - else - { - replied = kOSBooleanFalse; - if ( msgArg.maxWaitForReply > context->maxTimeRequested ) - { - if (msgArg.maxWaitForReply > kCapabilityClientMaxWait) - { - context->maxTimeRequested = kCapabilityClientMaxWait; - PM_ERROR("%s: client %p returned %u for %s\n", - context->us->getName(), - notifier ? (void *) OBFUSCATE(notifier->handler) : OBFUSCATE(object), - msgArg.maxWaitForReply, - getIOMessageString(msgType)); - } - else - context->maxTimeRequested = msgArg.maxWaitForReply; - } - - // Track time taken to ack, by storing the timestamp of - // callback completion - OSNumber * num; - num = OSNumber::withNumber(AbsoluteTime_to_scalar(&end), sizeof(uint64_t) * 8); - if (num) { - context->responseArray->setObject(msgIndex, num); - num->release(); - } - else { - context->responseArray->setObject(msgIndex, replied); - } - } - - if (context->enableTracing) { - SUB_ABSOLUTETIME(&end, &start); - absolutetime_to_nanoseconds(end, &nsec); - - if ((nsec > LOG_KEXT_RESPONSE_TIMES) || (msgArg.maxWaitForReply != 0)) { - getPMRootDomain()->traceAckDelay(notifier, msgArg.maxWaitForReply/1000, NS_TO_MS(nsec)); - } - } - } - else - { - // not a client of ours - // so we won't be waiting for response - OUR_PMLog(kPMLogClientAcknowledge, msgRef, 0); - context->responseArray->setObject(msgIndex, replied); - } - if (context->notifyClients) { - context->notifyClients->setObject(msgIndex, object); - } +void +IOService::pmTellCapabilityClientWithResponse( + OSObject * object, void * arg ) +{ + IOPMSystemCapabilityChangeParameters msgArg; + IOPMInterestContext * context = (IOPMInterestContext *) arg; + OSObject * replied = kOSBooleanTrue; + _IOServiceInterestNotifier * notifier; + uint32_t msgIndex, msgRef, msgType; + IOReturn retCode; + AbsoluteTime start, end; + uint64_t nsec; + + memset(&msgArg, 0, sizeof(msgArg)); + if (context->messageFilter && + !context->messageFilter(context->us, object, context, &msgArg, 0)) { + if ((kIOLogDebugPower & gIOKitDebug) && + (OSDynamicCast(_IOServiceInterestNotifier, object))) { + _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; + PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", + context->us->getName(), + getIOMessageString(context->messageType), + OBFUSCATE(object), OBFUSCATE(n->handler)); + } + return; + } + + if (0 == context->notifyClients) { + context->notifyClients = OSArray::withCapacity( 32 ); + } + notifier = OSDynamicCast(_IOServiceInterestNotifier, object); + msgType = context->messageType; + msgIndex = context->responseArray->getCount(); + msgRef = ((context->serialNumber & 0xFFFF) << 16) + (msgIndex & 0xFFFF); + + IOServicePM * pwrMgt = context->us->pwrMgt; + if (gIOKitDebug & kIOLogPower) { + OUR_PMLog(kPMLogClientNotify, msgRef, msgType); + if (OSDynamicCast(IOService, object)) { + const char *who = ((IOService *) object)->getName(); + gPlatform->PMLog(who, kPMLogClientNotify, (uintptr_t) object, 0); + } else if (notifier) { + OUR_PMLog(kPMLogClientNotify, (uintptr_t) notifier->handler, 0); + } + } + if ((kIOLogDebugPower & gIOKitDebug) && notifier) { + PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", + context->us->getName(), + getIOMessageString(msgType), + OBFUSCATE(object), OBFUSCATE(notifier->handler)); + } + + msgArg.notifyRef = msgRef; + msgArg.maxWaitForReply = 0; + + if (context->enableTracing && (notifier != 0)) { + getPMRootDomain()->traceDetail(notifier, true); + } + + clock_get_uptime(&start); + retCode = context->us->messageClient( + msgType, object, (void *) &msgArg, sizeof(msgArg)); + clock_get_uptime(&end); + if (context->enableTracing && (notifier != NULL)) { + getPMRootDomain()->traceDetail(notifier, false); + } + + if (kIOReturnSuccess == retCode) { + if (0 == msgArg.maxWaitForReply) { + // client doesn't want time to respond + OUR_PMLog(kPMLogClientAcknowledge, msgRef, (uintptr_t) object); + context->responseArray->setObject(msgIndex, replied); + } else { + replied = kOSBooleanFalse; + if (msgArg.maxWaitForReply > context->maxTimeRequested) { + if (msgArg.maxWaitForReply > kCapabilityClientMaxWait) { + context->maxTimeRequested = kCapabilityClientMaxWait; + PM_ERROR("%s: client %p returned %u for %s\n", + context->us->getName(), + notifier ? (void *) OBFUSCATE(notifier->handler) : OBFUSCATE(object), + msgArg.maxWaitForReply, + getIOMessageString(msgType)); + } else { + context->maxTimeRequested = msgArg.maxWaitForReply; + } + } + + // Track time taken to ack, by storing the timestamp of + // callback completion + OSNumber * num; + num = OSNumber::withNumber(AbsoluteTime_to_scalar(&end), sizeof(uint64_t) * 8); + if (num) { + context->responseArray->setObject(msgIndex, num); + num->release(); + } else { + context->responseArray->setObject(msgIndex, replied); + } + } + + if (context->enableTracing) { + SUB_ABSOLUTETIME(&end, &start); + absolutetime_to_nanoseconds(end, &nsec); + if ((nsec > LOG_KEXT_RESPONSE_TIMES) || (msgArg.maxWaitForReply != 0)) { + getPMRootDomain()->traceAckDelay(notifier, msgArg.maxWaitForReply / 1000, NS_TO_MS(nsec)); + } + } + } else { + // not a client of ours + // so we won't be waiting for response + OUR_PMLog(kPMLogClientAcknowledge, msgRef, 0); + context->responseArray->setObject(msgIndex, replied); + } + if (context->notifyClients) { + context->notifyClients->setObject(msgIndex, object); + } } //********************************************************************************* @@ -6558,9 +6429,10 @@ void IOService::pmTellCapabilityClientWithResponse( // the aborted destination state number. //********************************************************************************* -void IOService::tellNoChangeDown( unsigned long ) +void +IOService::tellNoChangeDown( unsigned long ) { - return tellClients( kIOMessageDeviceWillNotPowerOff ); + return tellClients( kIOMessageDeviceWillNotPowerOff ); } //********************************************************************************* @@ -6572,9 +6444,10 @@ void IOService::tellNoChangeDown( unsigned long ) // the aborted destination state number. //********************************************************************************* -void IOService::tellChangeUp( unsigned long ) +void +IOService::tellChangeUp( unsigned long ) { - return tellClients( kIOMessageDeviceHasPoweredOn ); + return tellClients( kIOMessageDeviceHasPoweredOn ); } //********************************************************************************* @@ -6583,36 +6456,37 @@ void IOService::tellChangeUp( unsigned long ) // Notify registered applications and kernel clients of something. //********************************************************************************* -void IOService::tellClients( int messageType ) +void +IOService::tellClients( int messageType ) { - IOPMInterestContext context; + IOPMInterestContext context; - RD_LOG("tellClients( %s )\n", getIOMessageString(messageType)); + RD_LOG("tellClients( %s )\n", getIOMessageString(messageType)); - memset(&context, 0, sizeof(context)); - context.messageType = messageType; - context.isPreChange = fIsPreChange; - context.us = this; - context.stateNumber = fHeadNotePowerState; - context.stateFlags = fHeadNotePowerArrayEntry->capabilityFlags; - context.changeFlags = fHeadNoteChangeFlags; - context.enableTracing = IS_ROOT_DOMAIN; - context.messageFilter = (IS_ROOT_DOMAIN) ? - OSMemberFunctionCast( - IOPMMessageFilter, - this, - &IOPMrootDomain::systemMessageFilter) : 0; + memset(&context, 0, sizeof(context)); + context.messageType = messageType; + context.isPreChange = fIsPreChange; + context.us = this; + context.stateNumber = fHeadNotePowerState; + context.stateFlags = fHeadNotePowerArrayEntry->capabilityFlags; + context.changeFlags = fHeadNoteChangeFlags; + context.enableTracing = IS_ROOT_DOMAIN; + context.messageFilter = (IS_ROOT_DOMAIN) ? + OSMemberFunctionCast( + IOPMMessageFilter, + this, + &IOPMrootDomain::systemMessageFilter) : 0; - context.notifyType = kNotifyPriority; - applyToInterested( gIOPriorityPowerStateInterest, - tellKernelClientApplier, (void *) &context ); + context.notifyType = kNotifyPriority; + applyToInterested( gIOPriorityPowerStateInterest, + tellKernelClientApplier, (void *) &context ); - context.notifyType = kNotifyApps; - applyToInterested( gIOAppPowerStateInterest, - tellAppClientApplier, (void *) &context ); + context.notifyType = kNotifyApps; + applyToInterested( gIOAppPowerStateInterest, + tellAppClientApplier, (void *) &context ); - applyToInterested( gIOGeneralInterest, - tellKernelClientApplier, (void *) &context ); + applyToInterested( gIOGeneralInterest, + tellKernelClientApplier, (void *) &context ); } //********************************************************************************* @@ -6621,263 +6495,246 @@ void IOService::tellClients( int messageType ) // Message a kernel client. //********************************************************************************* -static void tellKernelClientApplier( OSObject * object, void * arg ) -{ - IOPowerStateChangeNotification notify; - IOPMInterestContext * context = (IOPMInterestContext *) arg; - - if (context->messageFilter && - !context->messageFilter(context->us, object, context, 0, 0)) - { - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) - { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", - context->us->getName(), - IOService::getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } - return; - } - - notify.powerRef = (void *) 0; - notify.returnValue = 0; - notify.stateNumber = context->stateNumber; - notify.stateFlags = context->stateFlags; - - if (context->enableTracing && object) - { - IOService::getPMRootDomain()->traceDetail(object, true); - } - context->us->messageClient(context->messageType, object, ¬ify, sizeof(notify)); - if (context->enableTracing && object) - { - IOService::getPMRootDomain()->traceDetail(object, false); - } - - - - if ((kIOLogDebugPower & gIOKitDebug) && - (OSDynamicCast(_IOServiceInterestNotifier, object))) - { - _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; - PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", - context->us->getName(), - IOService::getIOMessageString(context->messageType), - OBFUSCATE(object), OBFUSCATE(n->handler)); - } +static void +tellKernelClientApplier( OSObject * object, void * arg ) +{ + IOPowerStateChangeNotification notify; + IOPMInterestContext * context = (IOPMInterestContext *) arg; + + if (context->messageFilter && + !context->messageFilter(context->us, object, context, 0, 0)) { + if ((kIOLogDebugPower & gIOKitDebug) && + (OSDynamicCast(_IOServiceInterestNotifier, object))) { + _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; + PM_LOG("%s DROP Client %s, notifier %p, handler %p\n", + context->us->getName(), + IOService::getIOMessageString(context->messageType), + OBFUSCATE(object), OBFUSCATE(n->handler)); + } + return; + } + + notify.powerRef = (void *) 0; + notify.returnValue = 0; + notify.stateNumber = context->stateNumber; + notify.stateFlags = context->stateFlags; + + if (context->enableTracing && object) { + IOService::getPMRootDomain()->traceDetail(object, true); + } + context->us->messageClient(context->messageType, object, ¬ify, sizeof(notify)); + if (context->enableTracing && object) { + IOService::getPMRootDomain()->traceDetail(object, false); + } + + + + if ((kIOLogDebugPower & gIOKitDebug) && + (OSDynamicCast(_IOServiceInterestNotifier, object))) { + _IOServiceInterestNotifier *n = (_IOServiceInterestNotifier *) object; + PM_LOG("%s MESG Client %s, notifier %p, handler %p\n", + context->us->getName(), + IOService::getIOMessageString(context->messageType), + OBFUSCATE(object), OBFUSCATE(n->handler)); + } } -static OSNumber * copyClientIDForNotification( - OSObject *object, - IOPMInterestContext *context) +static OSNumber * +copyClientIDForNotification( + OSObject *object, + IOPMInterestContext *context) { - OSNumber *clientID = NULL; - context->us->messageClient(kIOMessageCopyClientID, object, &clientID); - return clientID; + OSNumber *clientID = NULL; + context->us->messageClient(kIOMessageCopyClientID, object, &clientID); + return clientID; } -static void logClientIDForNotification( - OSObject *object, - IOPMInterestContext *context, - const char *logString) +static void +logClientIDForNotification( + OSObject *object, + IOPMInterestContext *context, + const char *logString) { - OSString *logClientID = NULL; - OSNumber *clientID = copyClientIDForNotification(object, context); + OSString *logClientID = NULL; + OSNumber *clientID = copyClientIDForNotification(object, context); - if (logString) - { - if (clientID) - logClientID = IOCopyLogNameForPID(clientID->unsigned32BitValue()); + if (logString) { + if (clientID) { + logClientID = IOCopyLogNameForPID(clientID->unsigned32BitValue()); + } - PM_LOG("%s %s %s, %s\n", - context->us->getName(), logString, - IOService::getIOMessageString(context->messageType), - logClientID ? logClientID->getCStringNoCopy() : ""); + PM_LOG("%s %s %s, %s\n", + context->us->getName(), logString, + IOService::getIOMessageString(context->messageType), + logClientID ? logClientID->getCStringNoCopy() : ""); - if (logClientID) - logClientID->release(); - } + if (logClientID) { + logClientID->release(); + } + } - if (clientID) - clientID->release(); + if (clientID) { + clientID->release(); + } - return; + return; } -static void tellAppClientApplier( OSObject * object, void * arg ) -{ - IOPMInterestContext * context = (IOPMInterestContext *) arg; - OSNumber * clientID = NULL; - proc_t proc = NULL; - boolean_t proc_suspended = FALSE; - - if (context->us == IOService::getPMRootDomain()) - { - if ((clientID = copyClientIDForNotification(object, context))) - { - uint32_t clientPID = clientID->unsigned32BitValue(); - clientID->release(); - proc = proc_find(clientPID); - - if (proc) - { - proc_suspended = get_task_pidsuspended((task_t) proc->task); - proc_rele(proc); - - if (proc_suspended) - { - logClientIDForNotification(object, context, "tellAppClientApplier - Suspended"); - return; - } - } - } - } +static void +tellAppClientApplier( OSObject * object, void * arg ) +{ + IOPMInterestContext * context = (IOPMInterestContext *) arg; + OSNumber * clientID = NULL; + proc_t proc = NULL; + boolean_t proc_suspended = FALSE; + + if (context->us == IOService::getPMRootDomain()) { + if ((clientID = copyClientIDForNotification(object, context))) { + uint32_t clientPID = clientID->unsigned32BitValue(); + clientID->release(); + proc = proc_find(clientPID); + + if (proc) { + proc_suspended = get_task_pidsuspended((task_t) proc->task); + proc_rele(proc); + + if (proc_suspended) { + logClientIDForNotification(object, context, "tellAppClientApplier - Suspended"); + return; + } + } + } + } - if (context->messageFilter && - !context->messageFilter(context->us, object, context, 0, 0)) - { - if (kIOLogDebugPower & gIOKitDebug) - { - logClientIDForNotification(object, context, "DROP App"); - } - return; - } + if (context->messageFilter && + !context->messageFilter(context->us, object, context, 0, 0)) { + if (kIOLogDebugPower & gIOKitDebug) { + logClientIDForNotification(object, context, "DROP App"); + } + return; + } - if (kIOLogDebugPower & gIOKitDebug) - { - logClientIDForNotification(object, context, "MESG App"); - } + if (kIOLogDebugPower & gIOKitDebug) { + logClientIDForNotification(object, context, "MESG App"); + } - context->us->messageClient(context->messageType, object, 0); + context->us->messageClient(context->messageType, object, 0); } //********************************************************************************* // [private] checkForDone //********************************************************************************* -bool IOService::checkForDone( void ) +bool +IOService::checkForDone( void ) { - int i = 0; - OSObject * theFlag; + int i = 0; + OSObject * theFlag; - if (fResponseArray == NULL) { - return true; - } + if (fResponseArray == NULL) { + return true; + } - for (i = 0; ; i++) { - theFlag = fResponseArray->getObject(i); + for (i = 0;; i++) { + theFlag = fResponseArray->getObject(i); - if (NULL == theFlag) { - break; - } + if (NULL == theFlag) { + break; + } - if (kOSBooleanTrue != theFlag) { - return false; - } - } - return true; + if (kOSBooleanTrue != theFlag) { + return false; + } + } + return true; } //********************************************************************************* // [public] responseValid //********************************************************************************* -bool IOService::responseValid( uint32_t refcon, int pid ) -{ - UInt16 serialComponent; - UInt16 ordinalComponent; - OSObject * theFlag; - OSObject *object = 0; - - serialComponent = (refcon >> 16) & 0xFFFF; - ordinalComponent = (refcon & 0xFFFF); - - if ( serialComponent != fSerialNumber ) - { - return false; - } - - if ( fResponseArray == NULL ) - { - return false; - } - - theFlag = fResponseArray->getObject(ordinalComponent); - - if ( theFlag == 0 ) - { - return false; - } - - if (fNotifyClientArray) - object = fNotifyClientArray->getObject(ordinalComponent); - - OSNumber * num; - if ((num = OSDynamicCast(OSNumber, theFlag))) - { - - AbsoluteTime now; - AbsoluteTime start; - uint64_t nsec; - char name[128]; - - clock_get_uptime(&now); - AbsoluteTime_to_scalar(&start) = num->unsigned64BitValue(); - SUB_ABSOLUTETIME(&now, &start); - absolutetime_to_nanoseconds(now, &nsec); - - if (pid != 0) { - name[0] = '\0'; - proc_name(pid, name, sizeof(name)); - - if (nsec > LOG_APP_RESPONSE_TIMES) - { - IOLog("PM response took %d ms (%d, %s)\n", NS_TO_MS(nsec), - pid, name); - } - - - if (nsec > LOG_APP_RESPONSE_MSG_TRACER) - { - // TODO: populate the messageType argument - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsResponseSlow, - name, 0, NS_TO_MS(nsec), pid, object); - } - else - { - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsResponsePrompt, - name, 0, NS_TO_MS(nsec), pid, object); - } - } - else { - getPMRootDomain()->traceAckDelay(object, 0, NS_TO_MS(nsec)); - } - - if (kIOLogDebugPower & gIOKitDebug) - { - PM_LOG("Ack(%u) %u ms\n", - (uint32_t) ordinalComponent, - NS_TO_MS(nsec)); - } - theFlag = kOSBooleanFalse; - } - else if (object) { - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsResponsePrompt, - 0, 0, 0, pid, object); - - } - - if ( kOSBooleanFalse == theFlag ) - { - fResponseArray->replaceObject(ordinalComponent, kOSBooleanTrue); - } - - return true; +bool +IOService::responseValid( uint32_t refcon, int pid ) +{ + UInt16 serialComponent; + UInt16 ordinalComponent; + OSObject * theFlag; + OSObject *object = 0; + + serialComponent = (refcon >> 16) & 0xFFFF; + ordinalComponent = (refcon & 0xFFFF); + + if (serialComponent != fSerialNumber) { + return false; + } + + if (fResponseArray == NULL) { + return false; + } + + theFlag = fResponseArray->getObject(ordinalComponent); + + if (theFlag == 0) { + return false; + } + + if (fNotifyClientArray) { + object = fNotifyClientArray->getObject(ordinalComponent); + } + + OSNumber * num; + if ((num = OSDynamicCast(OSNumber, theFlag))) { + AbsoluteTime now; + AbsoluteTime start; + uint64_t nsec; + char name[128]; + + clock_get_uptime(&now); + AbsoluteTime_to_scalar(&start) = num->unsigned64BitValue(); + SUB_ABSOLUTETIME(&now, &start); + absolutetime_to_nanoseconds(now, &nsec); + + if (pid != 0) { + name[0] = '\0'; + proc_name(pid, name, sizeof(name)); + + if (nsec > LOG_APP_RESPONSE_TIMES) { + IOLog("PM response took %d ms (%d, %s)\n", NS_TO_MS(nsec), + pid, name); + } + + + if (nsec > LOG_APP_RESPONSE_MSG_TRACER) { + // TODO: populate the messageType argument + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsResponseSlow, + name, 0, NS_TO_MS(nsec), pid, object); + } else { + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsResponsePrompt, + name, 0, NS_TO_MS(nsec), pid, object); + } + } else { + getPMRootDomain()->traceAckDelay(object, 0, NS_TO_MS(nsec)); + } + + if (kIOLogDebugPower & gIOKitDebug) { + PM_LOG("Ack(%u) %u ms\n", + (uint32_t) ordinalComponent, + NS_TO_MS(nsec)); + } + theFlag = kOSBooleanFalse; + } else if (object) { + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsResponsePrompt, + 0, 0, 0, pid, object); + } + + if (kOSBooleanFalse == theFlag) { + fResponseArray->replaceObject(ordinalComponent, kOSBooleanTrue); + } + + return true; } //********************************************************************************* @@ -6888,33 +6745,35 @@ bool IOService::responseValid( uint32_t refcon, int pid ) // so, and all acknowledgements are positive, we continue with the power change. //********************************************************************************* -IOReturn IOService::allowPowerChange( unsigned long refcon ) +IOReturn +IOService::allowPowerChange( unsigned long refcon ) { - IOPMRequest * request; + IOPMRequest * request; - if ( !initialized ) - { - // we're unloading - return kIOReturnSuccess; - } + if (!initialized) { + // we're unloading + return kIOReturnSuccess; + } - request = acquirePMRequest( this, kIOPMRequestTypeAllowPowerChange ); - if (!request) - return kIOReturnNoMemory; + request = acquirePMRequest( this, kIOPMRequestTypeAllowPowerChange ); + if (!request) { + return kIOReturnNoMemory; + } - request->fArg0 = (void *) refcon; - request->fArg1 = (void *)(uintptr_t) proc_selfpid(); - request->fArg2 = (void *) 0; - submitPMRequest( request ); + request->fArg0 = (void *) refcon; + request->fArg1 = (void *)(uintptr_t) proc_selfpid(); + request->fArg2 = (void *) 0; + submitPMRequest( request ); - return kIOReturnSuccess; + return kIOReturnSuccess; } #ifndef __LP64__ -IOReturn IOService::serializedAllowPowerChange2( unsigned long refcon ) +IOReturn +IOService::serializedAllowPowerChange2( unsigned long refcon ) { - // [deprecated] public - return kIOReturnUnsupported; + // [deprecated] public + return kIOReturnUnsupported; } #endif /* !__LP64__ */ @@ -6926,41 +6785,41 @@ IOReturn IOService::serializedAllowPowerChange2( unsigned long refcon ) // client to respond, we abandon the power change. //********************************************************************************* -IOReturn IOService::cancelPowerChange( unsigned long refcon ) +IOReturn +IOService::cancelPowerChange( unsigned long refcon ) { - IOPMRequest * request; - char name[128]; - pid_t pid = proc_selfpid(); + IOPMRequest * request; + char name[128]; + pid_t pid = proc_selfpid(); - if ( !initialized ) - { - // we're unloading - return kIOReturnSuccess; - } + if (!initialized) { + // we're unloading + return kIOReturnSuccess; + } - name[0] = '\0'; - proc_name(pid, name, sizeof(name)); - PM_ERROR("PM notification cancel (pid %d, %s)\n", pid, name); + name[0] = '\0'; + proc_name(pid, name, sizeof(name)); + PM_ERROR("PM notification cancel (pid %d, %s)\n", pid, name); - request = acquirePMRequest( this, kIOPMRequestTypeCancelPowerChange ); - if (!request) - { - return kIOReturnNoMemory; - } + request = acquirePMRequest( this, kIOPMRequestTypeCancelPowerChange ); + if (!request) { + return kIOReturnNoMemory; + } - request->fArg0 = (void *) refcon; - request->fArg1 = (void *)(uintptr_t) proc_selfpid(); - request->fArg2 = (void *) OSString::withCString(name); - submitPMRequest( request ); + request->fArg0 = (void *) refcon; + request->fArg1 = (void *)(uintptr_t) proc_selfpid(); + request->fArg2 = (void *) OSString::withCString(name); + submitPMRequest( request ); - return kIOReturnSuccess; + return kIOReturnSuccess; } #ifndef __LP64__ -IOReturn IOService::serializedCancelPowerChange2( unsigned long refcon ) +IOReturn +IOService::serializedCancelPowerChange2( unsigned long refcon ) { - // [deprecated] public - return kIOReturnUnsupported; + // [deprecated] public + return kIOReturnUnsupported; } //********************************************************************************* @@ -6969,7 +6828,8 @@ IOReturn IOService::serializedCancelPowerChange2( unsigned long refcon ) // called when clamp timer expires...set power state to 0. //********************************************************************************* -void IOService::PM_Clamp_Timer_Expired( void ) +void +IOService::PM_Clamp_Timer_Expired( void ) { } @@ -6979,7 +6839,8 @@ void IOService::PM_Clamp_Timer_Expired( void ) // Set to highest available power state for a minimum of duration milliseconds //********************************************************************************* -void IOService::clampPowerOn( unsigned long duration ) +void +IOService::clampPowerOn( unsigned long duration ) { } #endif /* !__LP64__ */ @@ -6989,79 +6850,82 @@ void IOService::clampPowerOn( unsigned long duration ) // // Configures the IOStateReport for kPMPowerStateChannel //********************************************************************************* -IOReturn IOService::configurePowerStatesReport( IOReportConfigureAction action, void *result ) -{ - - IOReturn rc = kIOReturnSuccess; - size_t reportSize; - unsigned long i; - uint64_t ts; - - if (!pwrMgt) - return kIOReturnUnsupported; - - if (!fNumberOfPowerStates) - return kIOReturnSuccess; // For drivers which are in power plane, but haven't called registerPowerDriver() - PM_LOCK(); - - switch (action) - { - case kIOReportEnable: - if (fReportBuf) - { - fReportClientCnt++; - break; - } - reportSize = STATEREPORT_BUFSIZE(fNumberOfPowerStates); - fReportBuf = IOMalloc(reportSize); - if (!fReportBuf) { - rc = kIOReturnNoMemory; - break; - } - memset(fReportBuf, 0, reportSize); - - STATEREPORT_INIT(fNumberOfPowerStates, fReportBuf, reportSize, - getRegistryEntryID(), kPMPowerStatesChID, kIOReportCategoryPower); - - for (i = 0; i < fNumberOfPowerStates; i++) { - unsigned bits = 0; - - if (fPowerStates[i].capabilityFlags & kIOPMPowerOn) - bits |= kPMReportPowerOn; - if (fPowerStates[i].capabilityFlags & kIOPMDeviceUsable) - bits |= kPMReportDeviceUsable; - if (fPowerStates[i].capabilityFlags & kIOPMLowPower) - bits |= kPMReportLowPower; - - STATEREPORT_SETSTATEID(fReportBuf, i, ((bits & 0xff) << 8) | - ((StateOrder(fMaxPowerState) & 0xf) << 4) | (StateOrder(i) & 0xf)); - } - ts = mach_absolute_time(); - STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); - break; - - case kIOReportDisable: - if (fReportClientCnt == 0) { - rc = kIOReturnBadArgument; - break; - } - if (fReportClientCnt == 1) - { - IOFree(fReportBuf, STATEREPORT_BUFSIZE(fNumberOfPowerStates)); - fReportBuf = NULL; - } - fReportClientCnt--; - break; - - case kIOReportGetDimensions: - if (fReportBuf) - STATEREPORT_UPDATERES(fReportBuf, kIOReportGetDimensions, result); - break; - } - - PM_UNLOCK(); - - return rc; +IOReturn +IOService::configurePowerStatesReport( IOReportConfigureAction action, void *result ) +{ + IOReturn rc = kIOReturnSuccess; + size_t reportSize; + unsigned long i; + uint64_t ts; + + if (!pwrMgt) { + return kIOReturnUnsupported; + } + + if (!fNumberOfPowerStates) { + return kIOReturnSuccess; // For drivers which are in power plane, but haven't called registerPowerDriver() + } + PM_LOCK(); + + switch (action) { + case kIOReportEnable: + if (fReportBuf) { + fReportClientCnt++; + break; + } + reportSize = STATEREPORT_BUFSIZE(fNumberOfPowerStates); + fReportBuf = IOMalloc(reportSize); + if (!fReportBuf) { + rc = kIOReturnNoMemory; + break; + } + memset(fReportBuf, 0, reportSize); + + STATEREPORT_INIT(fNumberOfPowerStates, fReportBuf, reportSize, + getRegistryEntryID(), kPMPowerStatesChID, kIOReportCategoryPower); + + for (i = 0; i < fNumberOfPowerStates; i++) { + unsigned bits = 0; + + if (fPowerStates[i].capabilityFlags & kIOPMPowerOn) { + bits |= kPMReportPowerOn; + } + if (fPowerStates[i].capabilityFlags & kIOPMDeviceUsable) { + bits |= kPMReportDeviceUsable; + } + if (fPowerStates[i].capabilityFlags & kIOPMLowPower) { + bits |= kPMReportLowPower; + } + + STATEREPORT_SETSTATEID(fReportBuf, i, ((bits & 0xff) << 8) | + ((StateOrder(fMaxPowerState) & 0xf) << 4) | (StateOrder(i) & 0xf)); + } + ts = mach_absolute_time(); + STATEREPORT_SETSTATE(fReportBuf, fCurrentPowerState, ts); + break; + + case kIOReportDisable: + if (fReportClientCnt == 0) { + rc = kIOReturnBadArgument; + break; + } + if (fReportClientCnt == 1) { + IOFree(fReportBuf, STATEREPORT_BUFSIZE(fNumberOfPowerStates)); + fReportBuf = NULL; + } + fReportClientCnt--; + break; + + case kIOReportGetDimensions: + if (fReportBuf) { + STATEREPORT_UPDATERES(fReportBuf, kIOReportGetDimensions, result); + } + break; + } + + PM_UNLOCK(); + + return rc; } //********************************************************************************* @@ -7069,50 +6933,53 @@ IOReturn IOService::configurePowerStatesReport( IOReportConfigureAction action, // // Updates the IOStateReport for kPMPowerStateChannel //********************************************************************************* -IOReturn IOService::updatePowerStatesReport( IOReportConfigureAction action, void *result, void *destination ) -{ - uint32_t size2cpy; - void *data2cpy; - uint64_t ts; - IOReturn rc = kIOReturnSuccess; - IOBufferMemoryDescriptor *dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); - - - if (!pwrMgt) - return kIOReturnUnsupported; - if (!fNumberOfPowerStates) - return kIOReturnSuccess; - - if ( !result || !dest ) return kIOReturnBadArgument; - PM_LOCK(); - - switch (action) { - case kIOReportCopyChannelData: - if ( !fReportBuf ) { - rc = kIOReturnNotOpen; - break; - } - - ts = mach_absolute_time(); - STATEREPORT_UPDATEPREP(fReportBuf, ts, data2cpy, size2cpy); - if (size2cpy > (dest->getCapacity() - dest->getLength()) ) { - rc = kIOReturnOverrun; - break; - } - - STATEREPORT_UPDATERES(fReportBuf, kIOReportCopyChannelData, result); - dest->appendBytes(data2cpy, size2cpy); - break; +IOReturn +IOService::updatePowerStatesReport( IOReportConfigureAction action, void *result, void *destination ) +{ + uint32_t size2cpy; + void *data2cpy; + uint64_t ts; + IOReturn rc = kIOReturnSuccess; + IOBufferMemoryDescriptor *dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); + + + if (!pwrMgt) { + return kIOReturnUnsupported; + } + if (!fNumberOfPowerStates) { + return kIOReturnSuccess; + } + + if (!result || !dest) { + return kIOReturnBadArgument; + } + PM_LOCK(); + + switch (action) { + case kIOReportCopyChannelData: + if (!fReportBuf) { + rc = kIOReturnNotOpen; + break; + } - default: - break; + ts = mach_absolute_time(); + STATEREPORT_UPDATEPREP(fReportBuf, ts, data2cpy, size2cpy); + if (size2cpy > (dest->getCapacity() - dest->getLength())) { + rc = kIOReturnOverrun; + break; + } - } + STATEREPORT_UPDATERES(fReportBuf, kIOReportCopyChannelData, result); + dest->appendBytes(data2cpy, size2cpy); + break; - PM_UNLOCK(); + default: + break; + } - return rc; + PM_UNLOCK(); + return rc; } //********************************************************************************* @@ -7120,30 +6987,31 @@ IOReturn IOService::updatePowerStatesReport( IOReportConfigureAction action, voi // // Configures the IOSimpleReport for given channel id //********************************************************************************* -IOReturn IOService::configureSimplePowerReport(IOReportConfigureAction action, void *result ) +IOReturn +IOService::configureSimplePowerReport(IOReportConfigureAction action, void *result ) { + IOReturn rc = kIOReturnSuccess; - IOReturn rc = kIOReturnSuccess; - - if ( !pwrMgt ) - return kIOReturnUnsupported; + if (!pwrMgt) { + return kIOReturnUnsupported; + } - if ( !fNumberOfPowerStates ) - return rc; + if (!fNumberOfPowerStates) { + return rc; + } - switch (action) - { - case kIOReportEnable: - case kIOReportDisable: - break; + switch (action) { + case kIOReportEnable: + case kIOReportDisable: + break; - case kIOReportGetDimensions: - SIMPLEREPORT_UPDATERES(kIOReportGetDimensions, result); - break; - } + case kIOReportGetDimensions: + SIMPLEREPORT_UPDATERES(kIOReportGetDimensions, result); + break; + } - return rc; + return rc; } //********************************************************************************* @@ -7151,59 +7019,65 @@ IOReturn IOService::configureSimplePowerReport(IOReportConfigureAction action, v // // Updates the IOSimpleReport for the given chanel id //********************************************************************************* -IOReturn IOService::updateSimplePowerReport( IOReportConfigureAction action, void *result, void *destination ) +IOReturn +IOService::updateSimplePowerReport( IOReportConfigureAction action, void *result, void *destination ) { - uint32_t size2cpy; - void *data2cpy; - uint64_t buf[SIMPLEREPORT_BUFSIZE/sizeof(uint64_t)+1]; // Force a 8-byte alignment - IOBufferMemoryDescriptor *dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); - IOReturn rc = kIOReturnSuccess; - unsigned bits = 0; - - - if ( !pwrMgt ) - return kIOReturnUnsupported; - if ( !result || !dest ) return kIOReturnBadArgument; + uint32_t size2cpy; + void *data2cpy; + uint64_t buf[SIMPLEREPORT_BUFSIZE / sizeof(uint64_t) + 1]; // Force a 8-byte alignment + IOBufferMemoryDescriptor *dest = OSDynamicCast(IOBufferMemoryDescriptor, (OSObject *)destination); + IOReturn rc = kIOReturnSuccess; + unsigned bits = 0; - if ( !fNumberOfPowerStates ) - return rc; - PM_LOCK(); - switch (action) { - case kIOReportCopyChannelData: + if (!pwrMgt) { + return kIOReturnUnsupported; + } + if (!result || !dest) { + return kIOReturnBadArgument; + } - SIMPLEREPORT_INIT(buf, sizeof(buf), getRegistryEntryID(), kPMCurrStateChID, kIOReportCategoryPower); + if (!fNumberOfPowerStates) { + return rc; + } + PM_LOCK(); - if (fPowerStates[fCurrentPowerState].capabilityFlags & kIOPMPowerOn) - bits |= kPMReportPowerOn; - if (fPowerStates[fCurrentPowerState].capabilityFlags & kIOPMDeviceUsable) - bits |= kPMReportDeviceUsable; - if (fPowerStates[fCurrentPowerState].capabilityFlags & kIOPMLowPower) - bits |= kPMReportLowPower; + switch (action) { + case kIOReportCopyChannelData: + SIMPLEREPORT_INIT(buf, sizeof(buf), getRegistryEntryID(), kPMCurrStateChID, kIOReportCategoryPower); - SIMPLEREPORT_SETVALUE(buf, ((bits & 0xff) << 8) | ((StateOrder(fMaxPowerState) & 0xf) << 4) | - (StateOrder(fCurrentPowerState) & 0xf)); + if (fPowerStates[fCurrentPowerState].capabilityFlags & kIOPMPowerOn) { + bits |= kPMReportPowerOn; + } + if (fPowerStates[fCurrentPowerState].capabilityFlags & kIOPMDeviceUsable) { + bits |= kPMReportDeviceUsable; + } + if (fPowerStates[fCurrentPowerState].capabilityFlags & kIOPMLowPower) { + bits |= kPMReportLowPower; + } - SIMPLEREPORT_UPDATEPREP(buf, data2cpy, size2cpy); - if (size2cpy > (dest->getCapacity() - dest->getLength())) { - rc = kIOReturnOverrun; - break; - } - SIMPLEREPORT_UPDATERES(kIOReportCopyChannelData, result); - dest->appendBytes(data2cpy, size2cpy); - break; + SIMPLEREPORT_SETVALUE(buf, ((bits & 0xff) << 8) | ((StateOrder(fMaxPowerState) & 0xf) << 4) | + (StateOrder(fCurrentPowerState) & 0xf)); - default: - break; + SIMPLEREPORT_UPDATEPREP(buf, data2cpy, size2cpy); + if (size2cpy > (dest->getCapacity() - dest->getLength())) { + rc = kIOReturnOverrun; + break; + } - } + SIMPLEREPORT_UPDATERES(kIOReportCopyChannelData, result); + dest->appendBytes(data2cpy, size2cpy); + break; - PM_UNLOCK(); + default: + break; + } - return kIOReturnSuccess; + PM_UNLOCK(); + return kIOReturnSuccess; } @@ -7217,10 +7091,11 @@ IOReturn IOService::updateSimplePowerReport( IOReportConfigureAction action, voi // Does nothing here. This should be implemented in a subclass driver. //********************************************************************************* -IOReturn IOService::setPowerState( - unsigned long powerStateOrdinal, IOService * whatDevice ) +IOReturn +IOService::setPowerState( + unsigned long powerStateOrdinal, IOService * whatDevice ) { - return IOPMNoErr; + return IOPMNoErr; } //********************************************************************************* @@ -7231,29 +7106,30 @@ IOReturn IOService::setPowerState( // possible, override this in the subclassed driver. //********************************************************************************* -IOPMPowerStateIndex IOService::getPowerStateForDomainFlags( IOPMPowerFlags flags ) +IOPMPowerStateIndex +IOService::getPowerStateForDomainFlags( IOPMPowerFlags flags ) { - IOPMPowerStateIndex stateIndex; + IOPMPowerStateIndex stateIndex; - if (!fNumberOfPowerStates) - return kPowerStateZero; + if (!fNumberOfPowerStates) { + return kPowerStateZero; + } - for ( int order = fNumberOfPowerStates - 1; order >= 0; order-- ) - { - stateIndex = fPowerStates[order].stateOrderToIndex; + for (int order = fNumberOfPowerStates - 1; order >= 0; order--) { + stateIndex = fPowerStates[order].stateOrderToIndex; - if ( (flags & fPowerStates[stateIndex].inputPowerFlags) == - fPowerStates[stateIndex].inputPowerFlags ) - { - return stateIndex; - } - } - return kPowerStateZero; + if ((flags & fPowerStates[stateIndex].inputPowerFlags) == + fPowerStates[stateIndex].inputPowerFlags) { + return stateIndex; + } + } + return kPowerStateZero; } -unsigned long IOService::maxCapabilityForDomainState( IOPMPowerFlags domainState ) +unsigned long +IOService::maxCapabilityForDomainState( IOPMPowerFlags domainState ) { - return getPowerStateForDomainFlags(domainState); + return getPowerStateForDomainFlags(domainState); } //********************************************************************************* @@ -7262,15 +7138,15 @@ unsigned long IOService::maxCapabilityForDomainState( IOPMPowerFlags domainState // Called to query the power state for the initial power transition. //********************************************************************************* -unsigned long IOService::initialPowerStateForDomainState( IOPMPowerFlags domainState ) +unsigned long +IOService::initialPowerStateForDomainState( IOPMPowerFlags domainState ) { - if (fResetPowerStateOnWake && (domainState & kIOPMRootDomainState)) - { - // Return lowest power state for any root power domain changes - return kPowerStateZero; - } + if (fResetPowerStateOnWake && (domainState & kIOPMRootDomainState)) { + // Return lowest power state for any root power domain changes + return kPowerStateZero; + } - return getPowerStateForDomainFlags(domainState); + return getPowerStateForDomainFlags(domainState); } //********************************************************************************* @@ -7279,9 +7155,10 @@ unsigned long IOService::initialPowerStateForDomainState( IOPMPowerFlags domainS // This method is not called from PM. //********************************************************************************* -unsigned long IOService::powerStateForDomainState( IOPMPowerFlags domainState ) +unsigned long +IOService::powerStateForDomainState( IOPMPowerFlags domainState ) { - return getPowerStateForDomainFlags(domainState); + return getPowerStateForDomainFlags(domainState); } #ifndef __LP64__ @@ -7291,9 +7168,10 @@ unsigned long IOService::powerStateForDomainState( IOPMPowerFlags domainState ) // Does nothing here. This should be implemented in a subclass driver. //********************************************************************************* -bool IOService::didYouWakeSystem( void ) +bool +IOService::didYouWakeSystem( void ) { - return false; + return false; } #endif /* !__LP64__ */ @@ -7303,9 +7181,10 @@ bool IOService::didYouWakeSystem( void ) // Does nothing here. This should be implemented in a subclass driver. //********************************************************************************* -IOReturn IOService::powerStateWillChangeTo( IOPMPowerFlags, unsigned long, IOService * ) +IOReturn +IOService::powerStateWillChangeTo( IOPMPowerFlags, unsigned long, IOService * ) { - return kIOPMAckImplied; + return kIOPMAckImplied; } //********************************************************************************* @@ -7314,9 +7193,10 @@ IOReturn IOService::powerStateWillChangeTo( IOPMPowerFlags, unsigned long, IOSer // Does nothing here. This should be implemented in a subclass driver. //********************************************************************************* -IOReturn IOService::powerStateDidChangeTo( IOPMPowerFlags, unsigned long, IOService * ) +IOReturn +IOService::powerStateDidChangeTo( IOPMPowerFlags, unsigned long, IOService * ) { - return kIOPMAckImplied; + return kIOPMAckImplied; } //********************************************************************************* @@ -7326,7 +7206,8 @@ IOReturn IOService::powerStateDidChangeTo( IOPMPowerFlags, unsigned long, IOServ // Does nothing here. This should be implemented in a subclass policy-maker. //********************************************************************************* -void IOService::powerChangeDone( unsigned long ) +void +IOService::powerChangeDone( unsigned long ) { } @@ -7337,9 +7218,10 @@ void IOService::powerChangeDone( unsigned long ) // Does nothing here. This should be implemented in a subclass driver. //********************************************************************************* -IOReturn IOService::newTemperature( long currentTemp, IOService * whichZone ) +IOReturn +IOService::newTemperature( long currentTemp, IOService * whichZone ) { - return IOPMNoErr; + return IOPMNoErr; } #endif /* !__LP64__ */ @@ -7349,11 +7231,13 @@ IOReturn IOService::newTemperature( long currentTemp, IOService * whichZone ) // System shutdown and restart notification. //********************************************************************************* -void IOService::systemWillShutdown( IOOptionBits specifier ) +void +IOService::systemWillShutdown( IOOptionBits specifier ) { - IOPMrootDomain * rootDomain = IOService::getPMRootDomain(); - if (rootDomain) - rootDomain->acknowledgeSystemWillShutdown( this ); + IOPMrootDomain * rootDomain = IOService::getPMRootDomain(); + if (rootDomain) { + rootDomain->acknowledgeSystemWillShutdown( this ); + } } // MARK: - @@ -7365,82 +7249,82 @@ void IOService::systemWillShutdown( IOOptionBits specifier ) IOPMRequest * IOService::acquirePMRequest( IOService * target, IOOptionBits requestType, - IOPMRequest * active ) + IOPMRequest * active ) { - IOPMRequest * request; + IOPMRequest * request; - assert(target); + assert(target); - request = IOPMRequest::create(); - if (request) - { - request->init( target, requestType ); - if (active) - { - IOPMRequest * root = active->getRootRequest(); - if (root) request->attachRootRequest(root); - } - } - else - { - PM_ERROR("%s: No memory for PM request type 0x%x\n", - target->getName(), (uint32_t) requestType); - } - return request; + request = IOPMRequest::create(); + if (request) { + request->init( target, requestType ); + if (active) { + IOPMRequest * root = active->getRootRequest(); + if (root) { + request->attachRootRequest(root); + } + } + } else { + PM_ERROR("%s: No memory for PM request type 0x%x\n", + target->getName(), (uint32_t) requestType); + } + return request; } //********************************************************************************* // [private static] releasePMRequest //********************************************************************************* -void IOService::releasePMRequest( IOPMRequest * request ) +void +IOService::releasePMRequest( IOPMRequest * request ) { - if (request) - { - request->reset(); - request->release(); - } + if (request) { + request->reset(); + request->release(); + } } //********************************************************************************* // [private static] submitPMRequest //********************************************************************************* -void IOService::submitPMRequest( IOPMRequest * request ) +void +IOService::submitPMRequest( IOPMRequest * request ) { - assert( request ); - assert( gIOPMReplyQueue ); - assert( gIOPMRequestQueue ); + assert( request ); + assert( gIOPMReplyQueue ); + assert( gIOPMRequestQueue ); - PM_LOG1("[+ %02lx] %p [%p %s] %p %p %p\n", - (long)request->getType(), OBFUSCATE(request), - OBFUSCATE(request->getTarget()), request->getTarget()->getName(), - OBFUSCATE(request->fArg0), - OBFUSCATE(request->fArg1), OBFUSCATE(request->fArg2)); + PM_LOG1("[+ %02lx] %p [%p %s] %p %p %p\n", + (long)request->getType(), OBFUSCATE(request), + OBFUSCATE(request->getTarget()), request->getTarget()->getName(), + OBFUSCATE(request->fArg0), + OBFUSCATE(request->fArg1), OBFUSCATE(request->fArg2)); - if (request->isReplyType()) - gIOPMReplyQueue->queuePMRequest( request ); - else - gIOPMRequestQueue->queuePMRequest( request ); + if (request->isReplyType()) { + gIOPMReplyQueue->queuePMRequest( request ); + } else { + gIOPMRequestQueue->queuePMRequest( request ); + } } -void IOService::submitPMRequests( IOPMRequest ** requests, IOItemCount count ) +void +IOService::submitPMRequests( IOPMRequest ** requests, IOItemCount count ) { - assert( requests ); - assert( count > 0 ); - assert( gIOPMRequestQueue ); + assert( requests ); + assert( count > 0 ); + assert( gIOPMRequestQueue ); - for (IOItemCount i = 0; i < count; i++) - { - IOPMRequest * req = requests[i]; - PM_LOG1("[+ %02lx] %p [%p %s] %p %p %p\n", - (long)req->getType(), OBFUSCATE(req), - OBFUSCATE(req->getTarget()), req->getTarget()->getName(), - OBFUSCATE(req->fArg0), - OBFUSCATE(req->fArg1), OBFUSCATE(req->fArg2)); - } + for (IOItemCount i = 0; i < count; i++) { + IOPMRequest * req = requests[i]; + PM_LOG1("[+ %02lx] %p [%p %s] %p %p %p\n", + (long)req->getType(), OBFUSCATE(req), + OBFUSCATE(req->getTarget()), req->getTarget()->getName(), + OBFUSCATE(req->fArg0), + OBFUSCATE(req->fArg1), OBFUSCATE(req->fArg2)); + } - gIOPMRequestQueue->queuePMRequestChain( requests, count ); + gIOPMRequestQueue->queuePMRequestChain( requests, count ); } //********************************************************************************* @@ -7449,34 +7333,34 @@ void IOService::submitPMRequests( IOPMRequest ** requests, IOItemCount count ) // IOPMRequestQueue::checkForWork() passing a new request to the request target. //********************************************************************************* -bool IOService::actionPMRequestQueue( - IOPMRequest * request, - IOPMRequestQueue * queue ) +bool +IOService::actionPMRequestQueue( + IOPMRequest * request, + IOPMRequestQueue * queue ) { - bool more; + bool more; - if (initialized) - { - // Work queue will immediately execute the request if the per-service - // request queue is empty. Note pwrMgt is the target's IOServicePM. + if (initialized) { + // Work queue will immediately execute the request if the per-service + // request queue is empty. Note pwrMgt is the target's IOServicePM. - more = gIOPMWorkQueue->queuePMRequest(request, pwrMgt); - } - else - { - // Calling PM without PMinit() is not allowed, fail the request. - // Need to signal more when completing attached requests. + more = gIOPMWorkQueue->queuePMRequest(request, pwrMgt); + } else { + // Calling PM without PMinit() is not allowed, fail the request. + // Need to signal more when completing attached requests. - PM_LOG("%s: PM not initialized\n", getName()); - PM_LOG1("[- %02x] %p [%p %s] !initialized\n", - request->getType(), OBFUSCATE(request), - OBFUSCATE(this), getName()); + PM_LOG("%s: PM not initialized\n", getName()); + PM_LOG1("[- %02x] %p [%p %s] !initialized\n", + request->getType(), OBFUSCATE(request), + OBFUSCATE(this), getName()); - more = gIOPMCompletionQueue->queuePMRequest(request); - if (more) gIOPMWorkQueue->incrementProducerCount(); - } + more = gIOPMCompletionQueue->queuePMRequest(request); + if (more) { + gIOPMWorkQueue->incrementProducerCount(); + } + } - return more; + return more; } //********************************************************************************* @@ -7486,20 +7370,23 @@ bool IOService::actionPMRequestQueue( // request target. //********************************************************************************* -bool IOService::actionPMCompletionQueue( - IOPMRequest * request, - IOPMCompletionQueue * queue ) +bool +IOService::actionPMCompletionQueue( + IOPMRequest * request, + IOPMCompletionQueue * queue ) { - bool more = (request->getNextRequest() != 0); - IOPMRequest * root = request->getRootRequest(); + bool more = (request->getNextRequest() != 0); + IOPMRequest * root = request->getRootRequest(); - if (root && (root != request)) - more = true; - if (more) - gIOPMWorkQueue->incrementProducerCount(); + if (root && (root != request)) { + more = true; + } + if (more) { + gIOPMWorkQueue->incrementProducerCount(); + } - releasePMRequest( request ); - return more; + releasePMRequest( request ); + return more; } //********************************************************************************* @@ -7508,36 +7395,33 @@ bool IOService::actionPMCompletionQueue( // IOPMWorkQueue::checkForWork() passing a retired request to the request target. //********************************************************************************* -bool IOService::actionPMWorkQueueRetire( IOPMRequest * request, IOPMWorkQueue * queue ) +bool +IOService::actionPMWorkQueueRetire( IOPMRequest * request, IOPMWorkQueue * queue ) { - assert(request && queue); + assert(request && queue); + + PM_LOG1("[- %02x] %p [%p %s] state %d, busy %d\n", + request->getType(), OBFUSCATE(request), + OBFUSCATE(this), getName(), + fMachineState, gIOPMBusyRequestCount); - PM_LOG1("[- %02x] %p [%p %s] state %d, busy %d\n", - request->getType(), OBFUSCATE(request), - OBFUSCATE(this), getName(), - fMachineState, gIOPMBusyRequestCount); + // Catch requests created by idleTimerExpired() + if (request->getType() == kIOPMRequestTypeActivityTickle) { + uint32_t tickleFlags = (uint32_t)(uintptr_t) request->fArg1; - // Catch requests created by idleTimerExpired() - if (request->getType() == kIOPMRequestTypeActivityTickle) - { - uint32_t tickleFlags = (uint32_t)(uintptr_t) request->fArg1; + if ((tickleFlags & kTickleTypePowerDrop) && fIdleTimerPeriod) { + restartIdleTimer(); + } else if (tickleFlags == (kTickleTypeActivity | kTickleTypePowerRise)) { + // Invalidate any idle power drop that got queued while + // processing this request. + fIdleTimerGeneration++; + } + } - if ((tickleFlags & kTickleTypePowerDrop) && fIdleTimerPeriod) - { - restartIdleTimer(); - } - else if (tickleFlags == (kTickleTypeActivity | kTickleTypePowerRise)) - { - // Invalidate any idle power drop that got queued while - // processing this request. - fIdleTimerGeneration++; - } - } - - // When the completed request is linked, tell work queue there is - // more work pending. + // When the completed request is linked, tell work queue there is + // more work pending. - return (gIOPMCompletionQueue->queuePMRequest( request )); + return gIOPMCompletionQueue->queuePMRequest( request ); } //********************************************************************************* @@ -7546,66 +7430,62 @@ bool IOService::actionPMWorkQueueRetire( IOPMRequest * request, IOPMWorkQueue * // Check if machine state transition is blocked. //********************************************************************************* -bool IOService::isPMBlocked( IOPMRequest * request, int count ) +bool +IOService::isPMBlocked( IOPMRequest * request, int count ) { - int reason = 0; + int reason = 0; - do { - if (kIOPM_Finished == fMachineState) - break; + do { + if (kIOPM_Finished == fMachineState) { + break; + } - if (kIOPM_DriverThreadCallDone == fMachineState) - { - // 5 = kDriverCallInformPreChange - // 6 = kDriverCallInformPostChange - // 7 = kDriverCallSetPowerState - // 8 = kRootDomainInformPreChange - if (fDriverCallBusy) - reason = 5 + fDriverCallReason; - break; - } + if (kIOPM_DriverThreadCallDone == fMachineState) { + // 5 = kDriverCallInformPreChange + // 6 = kDriverCallInformPostChange + // 7 = kDriverCallSetPowerState + // 8 = kRootDomainInformPreChange + if (fDriverCallBusy) { + reason = 5 + fDriverCallReason; + } + break; + } - // Waiting on driver's setPowerState() timeout. - if (fDriverTimer) - { - reason = 1; break; - } + // Waiting on driver's setPowerState() timeout. + if (fDriverTimer) { + reason = 1; break; + } - // Child or interested driver acks pending. - if (fHeadNotePendingAcks) - { - reason = 2; break; - } + // Child or interested driver acks pending. + if (fHeadNotePendingAcks) { + reason = 2; break; + } - // Waiting on apps or priority power interest clients. - if (fResponseArray) - { - reason = 3; break; - } + // Waiting on apps or priority power interest clients. + if (fResponseArray) { + reason = 3; break; + } - // Waiting on settle timer expiration. - if (fSettleTimeUS) - { - reason = 4; break; - } - } while (false); + // Waiting on settle timer expiration. + if (fSettleTimeUS) { + reason = 4; break; + } + } while (false); - fWaitReason = reason; + fWaitReason = reason; - if (reason) - { - if (count) - { - PM_LOG1("[B %02x] %p [%p %s] state %d, reason %d\n", - request->getType(), OBFUSCATE(request), - OBFUSCATE(this), getName(), - fMachineState, reason); - } + if (reason) { + if (count) { + PM_LOG1("[B %02x] %p [%p %s] state %d, reason %d\n", + request->getType(), OBFUSCATE(request), + OBFUSCATE(this), getName(), + fMachineState, reason); + } - return true; - } + return true; + } - return false; + return false; } //********************************************************************************* @@ -7615,399 +7495,372 @@ bool IOService::isPMBlocked( IOPMRequest * request, int count ) // request target for execution. //********************************************************************************* -bool IOService::actionPMWorkQueueInvoke( IOPMRequest * request, IOPMWorkQueue * queue ) -{ - bool done = false; - int loop = 0; - - assert(request && queue); - - while (isPMBlocked(request, loop++) == false) - { - PM_LOG1("[W %02x] %p [%p %s] state %d\n", - request->getType(), OBFUSCATE(request), - OBFUSCATE(this), getName(), fMachineState); - - gIOPMRequest = request; - gIOPMWorkInvokeCount++; - - // Every PM machine states must be handled in one of the cases below. - - switch ( fMachineState ) - { - case kIOPM_Finished: - start_watchdog_timer(); - - executePMRequest( request ); - break; - - case kIOPM_OurChangeTellClientsPowerDown: - // Root domain might self cancel due to assertions. - if (IS_ROOT_DOMAIN) - { - bool cancel = (bool) fDoNotPowerDown; - getPMRootDomain()->askChangeDownDone( - &fHeadNoteChangeFlags, &cancel); - fDoNotPowerDown = cancel; - } - - // askChangeDown() done, was it vetoed? - if (!fDoNotPowerDown) - { - // no, we can continue - OurChangeTellClientsPowerDown(); - } - else - { - OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); - PM_ERROR("%s: idle cancel, state %u\n", fName, fMachineState); - if (IS_ROOT_DOMAIN) { - // RootDomain already sent "WillSleep" to its clients - tellChangeUp(fCurrentPowerState); - } - else { - tellNoChangeDown(fHeadNotePowerState); - } - // mark the change note un-actioned - fHeadNoteChangeFlags |= kIOPMNotDone; - // and we're done - OurChangeFinish(); - } - break; - - case kIOPM_OurChangeTellUserPMPolicyPowerDown: - // PMRD: tellChangeDown/kNotifyApps done, was it cancelled? - if (fDoNotPowerDown) - { - OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); - PM_ERROR("%s: idle cancel, state %u\n", fName, fMachineState); - if (IS_ROOT_DOMAIN) { - // RootDomain already sent "WillSleep" to its clients - tellChangeUp(fCurrentPowerState); - } - else { - tellNoChangeDown(fHeadNotePowerState); - } - // mark the change note un-actioned - fHeadNoteChangeFlags |= kIOPMNotDone; - // and we're done - OurChangeFinish(); - } - else - OurChangeTellUserPMPolicyPowerDown(); - break; - - case kIOPM_OurChangeTellPriorityClientsPowerDown: - // PMRD: LastCallBeforeSleep notify done - // Non-PMRD: tellChangeDown/kNotifyApps done - if (fDoNotPowerDown) - { - OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); - PM_ERROR("%s: idle revert, state %u\n", fName, fMachineState); - // no, tell clients we're back in the old state - tellChangeUp(fCurrentPowerState); - // mark the change note un-actioned - fHeadNoteChangeFlags |= kIOPMNotDone; - // and we're done - OurChangeFinish(); - } - else - { - // yes, we can continue - OurChangeTellPriorityClientsPowerDown(); - } - break; - - case kIOPM_OurChangeNotifyInterestedDriversWillChange: - OurChangeNotifyInterestedDriversWillChange(); - break; - - case kIOPM_OurChangeSetPowerState: - OurChangeSetPowerState(); - break; - - case kIOPM_OurChangeWaitForPowerSettle: - OurChangeWaitForPowerSettle(); - break; - - case kIOPM_OurChangeNotifyInterestedDriversDidChange: - OurChangeNotifyInterestedDriversDidChange(); - break; - - case kIOPM_OurChangeTellCapabilityDidChange: - OurChangeTellCapabilityDidChange(); - break; - - case kIOPM_OurChangeFinish: - OurChangeFinish(); - break; - - case kIOPM_ParentChangeTellPriorityClientsPowerDown: - ParentChangeTellPriorityClientsPowerDown(); - break; - - case kIOPM_ParentChangeNotifyInterestedDriversWillChange: - ParentChangeNotifyInterestedDriversWillChange(); - break; - - case kIOPM_ParentChangeSetPowerState: - ParentChangeSetPowerState(); - break; - - case kIOPM_ParentChangeWaitForPowerSettle: - ParentChangeWaitForPowerSettle(); - break; - - case kIOPM_ParentChangeNotifyInterestedDriversDidChange: - ParentChangeNotifyInterestedDriversDidChange(); - break; - - case kIOPM_ParentChangeTellCapabilityDidChange: - ParentChangeTellCapabilityDidChange(); - break; - - case kIOPM_ParentChangeAcknowledgePowerChange: - ParentChangeAcknowledgePowerChange(); - break; - - case kIOPM_DriverThreadCallDone: - switch (fDriverCallReason) - { - case kDriverCallInformPreChange: - case kDriverCallInformPostChange: - notifyInterestedDriversDone(); - break; - case kDriverCallSetPowerState: - notifyControllingDriverDone(); - break; - case kRootDomainInformPreChange: - notifyRootDomainDone(); - break; - default: - panic("%s: bad call reason %x", - getName(), fDriverCallReason); - } - break; - - case kIOPM_NotifyChildrenOrdered: - notifyChildrenOrdered(); - break; - - case kIOPM_NotifyChildrenDelayed: - notifyChildrenDelayed(); - break; - - case kIOPM_NotifyChildrenStart: - // pop notifyAll() state saved by notifyInterestedDriversDone() - MS_POP(); - notifyRootDomain(); - break; - - case kIOPM_SyncTellClientsPowerDown: - // Root domain might self cancel due to assertions. - if (IS_ROOT_DOMAIN) - { - bool cancel = (bool) fDoNotPowerDown; - getPMRootDomain()->askChangeDownDone( - &fHeadNoteChangeFlags, &cancel); - fDoNotPowerDown = cancel; - } - if (!fDoNotPowerDown) - { - fMachineState = kIOPM_SyncTellPriorityClientsPowerDown; - fOutOfBandParameter = kNotifyApps; - tellChangeDown(fHeadNotePowerState); - } - else - { - // Cancelled by IOPMrootDomain::askChangeDownDone() or - // askChangeDown/kNotifyApps - OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); - PM_ERROR("%s: idle cancel, state %u\n", fName, fMachineState); - tellNoChangeDown(fHeadNotePowerState); - fHeadNoteChangeFlags |= kIOPMNotDone; - OurChangeFinish(); - } - break; - - case kIOPM_SyncTellPriorityClientsPowerDown: - // PMRD: tellChangeDown/kNotifyApps done, was it cancelled? - if (!fDoNotPowerDown) - { - fMachineState = kIOPM_SyncNotifyWillChange; - fOutOfBandParameter = kNotifyPriority; - tellChangeDown(fHeadNotePowerState); - } - else - { - OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); - PM_ERROR("%s: idle revert, state %u\n", fName, fMachineState); - tellChangeUp(fCurrentPowerState); - fHeadNoteChangeFlags |= kIOPMNotDone; - OurChangeFinish(); - } - break; - - case kIOPM_SyncNotifyWillChange: - if (kIOPMSyncNoChildNotify & fHeadNoteChangeFlags) - { - fMachineState = kIOPM_SyncFinish; - continue; - } - fMachineState = kIOPM_SyncNotifyDidChange; - fDriverCallReason = kDriverCallInformPreChange; - notifyChildren(); - break; - - case kIOPM_SyncNotifyDidChange: - fIsPreChange = false; - - if (fHeadNoteChangeFlags & kIOPMParentInitiated) - { - fMachineState = kIOPM_SyncFinish; - } - else - { - assert(IS_ROOT_DOMAIN); - fMachineState = kIOPM_SyncTellCapabilityDidChange; - } - - fDriverCallReason = kDriverCallInformPostChange; - notifyChildren(); - break; - - case kIOPM_SyncTellCapabilityDidChange: - tellSystemCapabilityChange( kIOPM_SyncFinish ); - break; - - case kIOPM_SyncFinish: - if (fHeadNoteChangeFlags & kIOPMParentInitiated) - ParentChangeAcknowledgePowerChange(); - else - OurChangeFinish(); - break; - - case kIOPM_TellCapabilityChangeDone: - if (fIsPreChange) - { - if (fOutOfBandParameter == kNotifyCapabilityChangePriority) - { - MS_POP(); // tellSystemCapabilityChange() - continue; - } - fOutOfBandParameter = kNotifyCapabilityChangePriority; - } - else - { - if (fOutOfBandParameter == kNotifyCapabilityChangeApps) - { - MS_POP(); // tellSystemCapabilityChange() - continue; - } - fOutOfBandParameter = kNotifyCapabilityChangeApps; - } - tellClientsWithResponse( fOutOfBandMessage ); - break; - - default: - panic("PMWorkQueueInvoke: unknown machine state %x", - fMachineState); - } - - gIOPMRequest = 0; - - if (fMachineState == kIOPM_Finished) - { - stop_watchdog_timer(); - done = true; - break; - } - } - - return done; +bool +IOService::actionPMWorkQueueInvoke( IOPMRequest * request, IOPMWorkQueue * queue ) +{ + bool done = false; + int loop = 0; + + assert(request && queue); + + while (isPMBlocked(request, loop++) == false) { + PM_LOG1("[W %02x] %p [%p %s] state %d\n", + request->getType(), OBFUSCATE(request), + OBFUSCATE(this), getName(), fMachineState); + + gIOPMRequest = request; + gIOPMWorkInvokeCount++; + + // Every PM machine states must be handled in one of the cases below. + + switch (fMachineState) { + case kIOPM_Finished: + start_watchdog_timer(); + + executePMRequest( request ); + break; + + case kIOPM_OurChangeTellClientsPowerDown: + // Root domain might self cancel due to assertions. + if (IS_ROOT_DOMAIN) { + bool cancel = (bool) fDoNotPowerDown; + getPMRootDomain()->askChangeDownDone( + &fHeadNoteChangeFlags, &cancel); + fDoNotPowerDown = cancel; + } + + // askChangeDown() done, was it vetoed? + if (!fDoNotPowerDown) { + // no, we can continue + OurChangeTellClientsPowerDown(); + } else { + OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); + PM_ERROR("%s: idle cancel, state %u\n", fName, fMachineState); + if (IS_ROOT_DOMAIN) { + // RootDomain already sent "WillSleep" to its clients + tellChangeUp(fCurrentPowerState); + } else { + tellNoChangeDown(fHeadNotePowerState); + } + // mark the change note un-actioned + fHeadNoteChangeFlags |= kIOPMNotDone; + // and we're done + OurChangeFinish(); + } + break; + + case kIOPM_OurChangeTellUserPMPolicyPowerDown: + // PMRD: tellChangeDown/kNotifyApps done, was it cancelled? + if (fDoNotPowerDown) { + OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); + PM_ERROR("%s: idle cancel, state %u\n", fName, fMachineState); + if (IS_ROOT_DOMAIN) { + // RootDomain already sent "WillSleep" to its clients + tellChangeUp(fCurrentPowerState); + } else { + tellNoChangeDown(fHeadNotePowerState); + } + // mark the change note un-actioned + fHeadNoteChangeFlags |= kIOPMNotDone; + // and we're done + OurChangeFinish(); + } else { + OurChangeTellUserPMPolicyPowerDown(); + } + break; + + case kIOPM_OurChangeTellPriorityClientsPowerDown: + // PMRD: LastCallBeforeSleep notify done + // Non-PMRD: tellChangeDown/kNotifyApps done + if (fDoNotPowerDown) { + OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); + PM_ERROR("%s: idle revert, state %u\n", fName, fMachineState); + // no, tell clients we're back in the old state + tellChangeUp(fCurrentPowerState); + // mark the change note un-actioned + fHeadNoteChangeFlags |= kIOPMNotDone; + // and we're done + OurChangeFinish(); + } else { + // yes, we can continue + OurChangeTellPriorityClientsPowerDown(); + } + break; + + case kIOPM_OurChangeNotifyInterestedDriversWillChange: + OurChangeNotifyInterestedDriversWillChange(); + break; + + case kIOPM_OurChangeSetPowerState: + OurChangeSetPowerState(); + break; + + case kIOPM_OurChangeWaitForPowerSettle: + OurChangeWaitForPowerSettle(); + break; + + case kIOPM_OurChangeNotifyInterestedDriversDidChange: + OurChangeNotifyInterestedDriversDidChange(); + break; + + case kIOPM_OurChangeTellCapabilityDidChange: + OurChangeTellCapabilityDidChange(); + break; + + case kIOPM_OurChangeFinish: + OurChangeFinish(); + break; + + case kIOPM_ParentChangeTellPriorityClientsPowerDown: + ParentChangeTellPriorityClientsPowerDown(); + break; + + case kIOPM_ParentChangeNotifyInterestedDriversWillChange: + ParentChangeNotifyInterestedDriversWillChange(); + break; + + case kIOPM_ParentChangeSetPowerState: + ParentChangeSetPowerState(); + break; + + case kIOPM_ParentChangeWaitForPowerSettle: + ParentChangeWaitForPowerSettle(); + break; + + case kIOPM_ParentChangeNotifyInterestedDriversDidChange: + ParentChangeNotifyInterestedDriversDidChange(); + break; + + case kIOPM_ParentChangeTellCapabilityDidChange: + ParentChangeTellCapabilityDidChange(); + break; + + case kIOPM_ParentChangeAcknowledgePowerChange: + ParentChangeAcknowledgePowerChange(); + break; + + case kIOPM_DriverThreadCallDone: + switch (fDriverCallReason) { + case kDriverCallInformPreChange: + case kDriverCallInformPostChange: + notifyInterestedDriversDone(); + break; + case kDriverCallSetPowerState: + notifyControllingDriverDone(); + break; + case kRootDomainInformPreChange: + notifyRootDomainDone(); + break; + default: + panic("%s: bad call reason %x", + getName(), fDriverCallReason); + } + break; + + case kIOPM_NotifyChildrenOrdered: + notifyChildrenOrdered(); + break; + + case kIOPM_NotifyChildrenDelayed: + notifyChildrenDelayed(); + break; + + case kIOPM_NotifyChildrenStart: + // pop notifyAll() state saved by notifyInterestedDriversDone() + MS_POP(); + notifyRootDomain(); + break; + + case kIOPM_SyncTellClientsPowerDown: + // Root domain might self cancel due to assertions. + if (IS_ROOT_DOMAIN) { + bool cancel = (bool) fDoNotPowerDown; + getPMRootDomain()->askChangeDownDone( + &fHeadNoteChangeFlags, &cancel); + fDoNotPowerDown = cancel; + } + if (!fDoNotPowerDown) { + fMachineState = kIOPM_SyncTellPriorityClientsPowerDown; + fOutOfBandParameter = kNotifyApps; + tellChangeDown(fHeadNotePowerState); + } else { + // Cancelled by IOPMrootDomain::askChangeDownDone() or + // askChangeDown/kNotifyApps + OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); + PM_ERROR("%s: idle cancel, state %u\n", fName, fMachineState); + tellNoChangeDown(fHeadNotePowerState); + fHeadNoteChangeFlags |= kIOPMNotDone; + OurChangeFinish(); + } + break; + + case kIOPM_SyncTellPriorityClientsPowerDown: + // PMRD: tellChangeDown/kNotifyApps done, was it cancelled? + if (!fDoNotPowerDown) { + fMachineState = kIOPM_SyncNotifyWillChange; + fOutOfBandParameter = kNotifyPriority; + tellChangeDown(fHeadNotePowerState); + } else { + OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); + PM_ERROR("%s: idle revert, state %u\n", fName, fMachineState); + tellChangeUp(fCurrentPowerState); + fHeadNoteChangeFlags |= kIOPMNotDone; + OurChangeFinish(); + } + break; + + case kIOPM_SyncNotifyWillChange: + if (kIOPMSyncNoChildNotify & fHeadNoteChangeFlags) { + fMachineState = kIOPM_SyncFinish; + continue; + } + fMachineState = kIOPM_SyncNotifyDidChange; + fDriverCallReason = kDriverCallInformPreChange; + notifyChildren(); + break; + + case kIOPM_SyncNotifyDidChange: + fIsPreChange = false; + + if (fHeadNoteChangeFlags & kIOPMParentInitiated) { + fMachineState = kIOPM_SyncFinish; + } else { + assert(IS_ROOT_DOMAIN); + fMachineState = kIOPM_SyncTellCapabilityDidChange; + } + + fDriverCallReason = kDriverCallInformPostChange; + notifyChildren(); + break; + + case kIOPM_SyncTellCapabilityDidChange: + tellSystemCapabilityChange( kIOPM_SyncFinish ); + break; + + case kIOPM_SyncFinish: + if (fHeadNoteChangeFlags & kIOPMParentInitiated) { + ParentChangeAcknowledgePowerChange(); + } else { + OurChangeFinish(); + } + break; + + case kIOPM_TellCapabilityChangeDone: + if (fIsPreChange) { + if (fOutOfBandParameter == kNotifyCapabilityChangePriority) { + MS_POP(); // tellSystemCapabilityChange() + continue; + } + fOutOfBandParameter = kNotifyCapabilityChangePriority; + } else { + if (fOutOfBandParameter == kNotifyCapabilityChangeApps) { + MS_POP(); // tellSystemCapabilityChange() + continue; + } + fOutOfBandParameter = kNotifyCapabilityChangeApps; + } + tellClientsWithResponse( fOutOfBandMessage ); + break; + + default: + panic("PMWorkQueueInvoke: unknown machine state %x", + fMachineState); + } + + gIOPMRequest = 0; + + if (fMachineState == kIOPM_Finished) { + stop_watchdog_timer(); + done = true; + break; + } + } + + return done; } //********************************************************************************* // [private] executePMRequest //********************************************************************************* -void IOService::executePMRequest( IOPMRequest * request ) -{ - assert( kIOPM_Finished == fMachineState ); - - switch (request->getType()) - { - case kIOPMRequestTypePMStop: - handlePMstop( request ); - break; - - case kIOPMRequestTypeAddPowerChild1: - addPowerChild1( request ); - break; - - case kIOPMRequestTypeAddPowerChild2: - addPowerChild2( request ); - break; - - case kIOPMRequestTypeAddPowerChild3: - addPowerChild3( request ); - break; - - case kIOPMRequestTypeRegisterPowerDriver: - handleRegisterPowerDriver( request ); - break; - - case kIOPMRequestTypeAdjustPowerState: - fAdjustPowerScheduled = false; - adjustPowerState(); - break; - - case kIOPMRequestTypePowerDomainWillChange: - handlePowerDomainWillChangeTo( request ); - break; - - case kIOPMRequestTypePowerDomainDidChange: - handlePowerDomainDidChangeTo( request ); - break; - - case kIOPMRequestTypeRequestPowerState: - case kIOPMRequestTypeRequestPowerStateOverride: - handleRequestPowerState( request ); - break; - - case kIOPMRequestTypePowerOverrideOnPriv: - case kIOPMRequestTypePowerOverrideOffPriv: - handlePowerOverrideChanged( request ); - break; - - case kIOPMRequestTypeActivityTickle: - handleActivityTickle( request ); - break; - - case kIOPMRequestTypeSynchronizePowerTree: - handleSynchronizePowerTree( request ); - break; - - case kIOPMRequestTypeSetIdleTimerPeriod: - { - fIdleTimerPeriod = (uintptr_t) request->fArg0; - fNextIdleTimerPeriod = fIdleTimerPeriod; - if ((false == fLockedFlags.PMStop) && (fIdleTimerPeriod > 0)) - restartIdleTimer(); - } - break; +void +IOService::executePMRequest( IOPMRequest * request ) +{ + assert( kIOPM_Finished == fMachineState ); + + switch (request->getType()) { + case kIOPMRequestTypePMStop: + handlePMstop( request ); + break; + + case kIOPMRequestTypeAddPowerChild1: + addPowerChild1( request ); + break; + + case kIOPMRequestTypeAddPowerChild2: + addPowerChild2( request ); + break; + + case kIOPMRequestTypeAddPowerChild3: + addPowerChild3( request ); + break; + + case kIOPMRequestTypeRegisterPowerDriver: + handleRegisterPowerDriver( request ); + break; + + case kIOPMRequestTypeAdjustPowerState: + fAdjustPowerScheduled = false; + adjustPowerState(); + break; + + case kIOPMRequestTypePowerDomainWillChange: + handlePowerDomainWillChangeTo( request ); + break; + + case kIOPMRequestTypePowerDomainDidChange: + handlePowerDomainDidChangeTo( request ); + break; + + case kIOPMRequestTypeRequestPowerState: + case kIOPMRequestTypeRequestPowerStateOverride: + handleRequestPowerState( request ); + break; + + case kIOPMRequestTypePowerOverrideOnPriv: + case kIOPMRequestTypePowerOverrideOffPriv: + handlePowerOverrideChanged( request ); + break; + + case kIOPMRequestTypeActivityTickle: + handleActivityTickle( request ); + break; + + case kIOPMRequestTypeSynchronizePowerTree: + handleSynchronizePowerTree( request ); + break; + + case kIOPMRequestTypeSetIdleTimerPeriod: + { + fIdleTimerPeriod = (uintptr_t) request->fArg0; + fNextIdleTimerPeriod = fIdleTimerPeriod; + if ((false == fLockedFlags.PMStop) && (fIdleTimerPeriod > 0)) { + restartIdleTimer(); + } + } + break; - case kIOPMRequestTypeIgnoreIdleTimer: - fIdleTimerIgnored = request->fArg0 ? 1 : 0; - break; + case kIOPMRequestTypeIgnoreIdleTimer: + fIdleTimerIgnored = request->fArg0 ? 1 : 0; + break; - case kIOPMRequestTypeQuiescePowerTree: - gIOPMWorkQueue->finishQuiesceRequest(request); - break; + case kIOPMRequestTypeQuiescePowerTree: + gIOPMWorkQueue->finishQuiesceRequest(request); + break; - default: - panic("executePMRequest: unknown request type %x", request->getType()); - } + default: + panic("executePMRequest: unknown request type %x", request->getType()); + } } //********************************************************************************* @@ -8017,321 +7870,311 @@ void IOService::executePMRequest( IOPMRequest * request ) // request target. //********************************************************************************* -bool IOService::actionPMReplyQueue( IOPMRequest * request, IOPMRequestQueue * queue ) -{ - bool more = false; - - assert( request && queue ); - assert( request->isReplyType() ); - - PM_LOG1("[A %02x] %p [%p %s] state %d\n", - request->getType(), OBFUSCATE(request), - OBFUSCATE(this), getName(), fMachineState); - - switch ( request->getType() ) - { - case kIOPMRequestTypeAllowPowerChange: - case kIOPMRequestTypeCancelPowerChange: - // Check if we are expecting this response. - if (responseValid((uint32_t)(uintptr_t) request->fArg0, - (int)(uintptr_t) request->fArg1)) - { - if (kIOPMRequestTypeCancelPowerChange == request->getType()) - { - // Clients are not allowed to cancel when kIOPMSkipAskPowerDown - // flag is set. Only root domain will set this flag. - // However, there is one exception to this rule. User-space PM - // policy may choose to cancel sleep even after all clients have - // been notified that we will lower power. - - if ((fMachineState == kIOPM_OurChangeTellUserPMPolicyPowerDown) - || (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown) - || ((fHeadNoteChangeFlags & kIOPMSkipAskPowerDown) == 0)) - { - fDoNotPowerDown = true; - - OSString * name = (OSString *) request->fArg2; - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsResponseCancel, - name ? name->getCStringNoCopy() : "", 0, - 0, (int)(uintptr_t) request->fArg1, 0); - } - } - - if (checkForDone()) - { - stop_ack_timer(); - cleanClientResponses(false); - more = true; - } - } - // OSString containing app name in Arg2 must be released. - if (request->getType() == kIOPMRequestTypeCancelPowerChange) - { - OSObject * obj = (OSObject *) request->fArg2; - if (obj) obj->release(); - } - break; - - case kIOPMRequestTypeAckPowerChange: - more = handleAcknowledgePowerChange( request ); - break; - - case kIOPMRequestTypeAckSetPowerState: - if (fDriverTimer == -1) - { - // driver acked while setPowerState() call is in-flight. - // take this ack, return value from setPowerState() is irrelevant. - OUR_PMLog(kPMLogDriverAcknowledgeSet, - (uintptr_t) this, fDriverTimer); - fDriverTimer = 0; - } - else if (fDriverTimer > 0) - { - // expected ack, stop the timer - stop_ack_timer(); - - getPMRootDomain()->reset_watchdog_timer(this, 0); - - uint64_t nsec = computeTimeDeltaNS(&fDriverCallStartTime); - if (nsec > LOG_SETPOWER_TIMES) { - getPMRootDomain()->pmStatsRecordApplicationResponse( - gIOPMStatsDriverPSChangeSlow, - fName, kDriverCallSetPowerState, NS_TO_MS(nsec), getRegistryEntryID(), - NULL, fHeadNotePowerState); - } - - OUR_PMLog(kPMLogDriverAcknowledgeSet, (uintptr_t) this, fDriverTimer); - fDriverTimer = 0; - more = true; - } - else - { - // unexpected ack - OUR_PMLog(kPMLogAcknowledgeErr4, (uintptr_t) this, 0); - } - break; - - case kIOPMRequestTypeInterestChanged: - handleInterestChanged( request ); - more = true; - break; - - case kIOPMRequestTypeIdleCancel: - if ((fMachineState == kIOPM_OurChangeTellClientsPowerDown) - || (fMachineState == kIOPM_OurChangeTellUserPMPolicyPowerDown) - || (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown) - || (fMachineState == kIOPM_SyncTellClientsPowerDown) - || (fMachineState == kIOPM_SyncTellPriorityClientsPowerDown)) - { - OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); - PM_LOG2("%s: cancel from machine state %d\n", - getName(), fMachineState); - fDoNotPowerDown = true; - // Stop waiting for app replys. - if ((fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown) || - (fMachineState == kIOPM_OurChangeTellUserPMPolicyPowerDown) || - (fMachineState == kIOPM_SyncTellPriorityClientsPowerDown) || - (fMachineState == kIOPM_SyncTellClientsPowerDown) ) - cleanClientResponses(false); - more = true; - } - break; - - case kIOPMRequestTypeChildNotifyDelayCancel: - if (fMachineState == kIOPM_NotifyChildrenDelayed) - { - PM_LOG2("%s: delay notify cancelled\n", getName()); - notifyChildrenDelayed(); - } - break; - - default: - panic("PMReplyQueue: unknown reply type %x", request->getType()); - } - - more |= gIOPMCompletionQueue->queuePMRequest(request); - if (more) - gIOPMWorkQueue->incrementProducerCount(); - - return more; +bool +IOService::actionPMReplyQueue( IOPMRequest * request, IOPMRequestQueue * queue ) +{ + bool more = false; + + assert( request && queue ); + assert( request->isReplyType()); + + PM_LOG1("[A %02x] %p [%p %s] state %d\n", + request->getType(), OBFUSCATE(request), + OBFUSCATE(this), getName(), fMachineState); + + switch (request->getType()) { + case kIOPMRequestTypeAllowPowerChange: + case kIOPMRequestTypeCancelPowerChange: + // Check if we are expecting this response. + if (responseValid((uint32_t)(uintptr_t) request->fArg0, + (int)(uintptr_t) request->fArg1)) { + if (kIOPMRequestTypeCancelPowerChange == request->getType()) { + // Clients are not allowed to cancel when kIOPMSkipAskPowerDown + // flag is set. Only root domain will set this flag. + // However, there is one exception to this rule. User-space PM + // policy may choose to cancel sleep even after all clients have + // been notified that we will lower power. + + if ((fMachineState == kIOPM_OurChangeTellUserPMPolicyPowerDown) + || (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown) + || ((fHeadNoteChangeFlags & kIOPMSkipAskPowerDown) == 0)) { + fDoNotPowerDown = true; + + OSString * name = (OSString *) request->fArg2; + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsResponseCancel, + name ? name->getCStringNoCopy() : "", 0, + 0, (int)(uintptr_t) request->fArg1, 0); + } + } + + if (checkForDone()) { + stop_ack_timer(); + cleanClientResponses(false); + more = true; + } + } + // OSString containing app name in Arg2 must be released. + if (request->getType() == kIOPMRequestTypeCancelPowerChange) { + OSObject * obj = (OSObject *) request->fArg2; + if (obj) { + obj->release(); + } + } + break; + + case kIOPMRequestTypeAckPowerChange: + more = handleAcknowledgePowerChange( request ); + break; + + case kIOPMRequestTypeAckSetPowerState: + if (fDriverTimer == -1) { + // driver acked while setPowerState() call is in-flight. + // take this ack, return value from setPowerState() is irrelevant. + OUR_PMLog(kPMLogDriverAcknowledgeSet, + (uintptr_t) this, fDriverTimer); + fDriverTimer = 0; + } else if (fDriverTimer > 0) { + // expected ack, stop the timer + stop_ack_timer(); + + getPMRootDomain()->reset_watchdog_timer(this, 0); + + uint64_t nsec = computeTimeDeltaNS(&fDriverCallStartTime); + if (nsec > LOG_SETPOWER_TIMES) { + getPMRootDomain()->pmStatsRecordApplicationResponse( + gIOPMStatsDriverPSChangeSlow, + fName, kDriverCallSetPowerState, NS_TO_MS(nsec), getRegistryEntryID(), + NULL, fHeadNotePowerState); + } + + OUR_PMLog(kPMLogDriverAcknowledgeSet, (uintptr_t) this, fDriverTimer); + fDriverTimer = 0; + more = true; + } else { + // unexpected ack + OUR_PMLog(kPMLogAcknowledgeErr4, (uintptr_t) this, 0); + } + break; + + case kIOPMRequestTypeInterestChanged: + handleInterestChanged( request ); + more = true; + break; + + case kIOPMRequestTypeIdleCancel: + if ((fMachineState == kIOPM_OurChangeTellClientsPowerDown) + || (fMachineState == kIOPM_OurChangeTellUserPMPolicyPowerDown) + || (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown) + || (fMachineState == kIOPM_SyncTellClientsPowerDown) + || (fMachineState == kIOPM_SyncTellPriorityClientsPowerDown)) { + OUR_PMLog(kPMLogIdleCancel, (uintptr_t) this, fMachineState); + PM_LOG2("%s: cancel from machine state %d\n", + getName(), fMachineState); + fDoNotPowerDown = true; + // Stop waiting for app replys. + if ((fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown) || + (fMachineState == kIOPM_OurChangeTellUserPMPolicyPowerDown) || + (fMachineState == kIOPM_SyncTellPriorityClientsPowerDown) || + (fMachineState == kIOPM_SyncTellClientsPowerDown)) { + cleanClientResponses(false); + } + more = true; + } + break; + + case kIOPMRequestTypeChildNotifyDelayCancel: + if (fMachineState == kIOPM_NotifyChildrenDelayed) { + PM_LOG2("%s: delay notify cancelled\n", getName()); + notifyChildrenDelayed(); + } + break; + + default: + panic("PMReplyQueue: unknown reply type %x", request->getType()); + } + + more |= gIOPMCompletionQueue->queuePMRequest(request); + if (more) { + gIOPMWorkQueue->incrementProducerCount(); + } + + return more; } //********************************************************************************* // [private] assertPMDriverCall / deassertPMDriverCall //********************************************************************************* -bool IOService::assertPMDriverCall( - IOPMDriverCallEntry * entry, - IOOptionBits options, - IOPMinformee * inform ) +bool +IOService::assertPMDriverCall( + IOPMDriverCallEntry * entry, + IOOptionBits options, + IOPMinformee * inform ) { - IOService * target = 0; - bool ok = false; + IOService * target = 0; + bool ok = false; - if (!initialized) - return false; + if (!initialized) { + return false; + } - PM_LOCK(); + PM_LOCK(); - if (fLockedFlags.PMStop) - { - goto fail; - } + if (fLockedFlags.PMStop) { + goto fail; + } - if (((options & kIOPMADC_NoInactiveCheck) == 0) && isInactive()) - { - goto fail; - } + if (((options & kIOPMADC_NoInactiveCheck) == 0) && isInactive()) { + goto fail; + } - if (inform) - { - if (!inform->active) - { - goto fail; - } - target = inform->whatObject; - if (target->isInactive()) - { - goto fail; - } - } + if (inform) { + if (!inform->active) { + goto fail; + } + target = inform->whatObject; + if (target->isInactive()) { + goto fail; + } + } - entry->thread = current_thread(); - entry->target = target; - queue_enter(&fPMDriverCallQueue, entry, IOPMDriverCallEntry *, link); - ok = true; + entry->thread = current_thread(); + entry->target = target; + queue_enter(&fPMDriverCallQueue, entry, IOPMDriverCallEntry *, link); + ok = true; fail: - PM_UNLOCK(); + PM_UNLOCK(); - return ok; + return ok; } -void IOService::deassertPMDriverCall( IOPMDriverCallEntry * entry ) +void +IOService::deassertPMDriverCall( IOPMDriverCallEntry * entry ) { - bool wakeup = false; + bool wakeup = false; - PM_LOCK(); + PM_LOCK(); - assert( !queue_empty(&fPMDriverCallQueue) ); - queue_remove(&fPMDriverCallQueue, entry, IOPMDriverCallEntry *, link); - if (fLockedFlags.PMDriverCallWait) - { - wakeup = true; - } + assert( !queue_empty(&fPMDriverCallQueue)); + queue_remove(&fPMDriverCallQueue, entry, IOPMDriverCallEntry *, link); + if (fLockedFlags.PMDriverCallWait) { + wakeup = true; + } - PM_UNLOCK(); + PM_UNLOCK(); - if (wakeup) - PM_LOCK_WAKEUP(&fPMDriverCallQueue); + if (wakeup) { + PM_LOCK_WAKEUP(&fPMDriverCallQueue); + } } -bool IOService::getBlockingDriverCall(thread_t *thread, const void **callMethod) +bool +IOService::getBlockingDriverCall(thread_t *thread, const void **callMethod) { - const IOPMDriverCallEntry * entry = NULL; - bool blocked = false; + const IOPMDriverCallEntry * entry = NULL; + bool blocked = false; - if (!initialized) { - return false; - } + if (!initialized) { + return false; + } - if (current_thread() != gIOPMWatchDogThread) { - // Meant to be accessed only from watchdog thread - return false; - } + if (current_thread() != gIOPMWatchDogThread) { + // Meant to be accessed only from watchdog thread + return false; + } - PM_LOCK(); - entry = qe_queue_first(&fPMDriverCallQueue, IOPMDriverCallEntry, link); - if (entry) { - *thread = entry->thread; - *callMethod = entry->callMethod; - blocked = true; - } - PM_UNLOCK(); + PM_LOCK(); + entry = qe_queue_first(&fPMDriverCallQueue, IOPMDriverCallEntry, link); + if (entry) { + *thread = entry->thread; + *callMethod = entry->callMethod; + blocked = true; + } + PM_UNLOCK(); - return blocked; + return blocked; } -void IOService::waitForPMDriverCall( IOService * target ) -{ - const IOPMDriverCallEntry * entry; - thread_t thread = current_thread(); - AbsoluteTime deadline; - int waitResult; - bool log = true; - bool wait; - - do { - wait = false; - queue_iterate(&fPMDriverCallQueue, entry, const IOPMDriverCallEntry *, link) - { - // Target of interested driver call - if (target && (target != entry->target)) - continue; - - if (entry->thread == thread) - { - if (log) - { - PM_LOG("%s: %s(%s) on PM thread\n", - fName, __FUNCTION__, target ? target->getName() : ""); - OSReportWithBacktrace("%s: %s(%s) on PM thread\n", - fName, __FUNCTION__, target ? target->getName() : ""); - log = false; - } - continue; - } - - wait = true; - break; - } +void +IOService::waitForPMDriverCall( IOService * target ) +{ + const IOPMDriverCallEntry * entry; + thread_t thread = current_thread(); + AbsoluteTime deadline; + int waitResult; + bool log = true; + bool wait; + + do { + wait = false; + queue_iterate(&fPMDriverCallQueue, entry, const IOPMDriverCallEntry *, link) + { + // Target of interested driver call + if (target && (target != entry->target)) { + continue; + } + + if (entry->thread == thread) { + if (log) { + PM_LOG("%s: %s(%s) on PM thread\n", + fName, __FUNCTION__, target ? target->getName() : ""); + OSReportWithBacktrace("%s: %s(%s) on PM thread\n", + fName, __FUNCTION__, target ? target->getName() : ""); + log = false; + } + continue; + } + + wait = true; + break; + } - if (wait) - { - fLockedFlags.PMDriverCallWait = true; - clock_interval_to_deadline(15, kSecondScale, &deadline); - waitResult = PM_LOCK_SLEEP(&fPMDriverCallQueue, deadline); - fLockedFlags.PMDriverCallWait = false; - if (THREAD_TIMED_OUT == waitResult) - { - PM_ERROR("%s: waitForPMDriverCall timeout\n", fName); - wait = false; - } - } - } while (wait); + if (wait) { + fLockedFlags.PMDriverCallWait = true; + clock_interval_to_deadline(15, kSecondScale, &deadline); + waitResult = PM_LOCK_SLEEP(&fPMDriverCallQueue, deadline); + fLockedFlags.PMDriverCallWait = false; + if (THREAD_TIMED_OUT == waitResult) { + PM_ERROR("%s: waitForPMDriverCall timeout\n", fName); + wait = false; + } + } + } while (wait); } //********************************************************************************* // [private] Debug helpers //********************************************************************************* -const char * IOService::getIOMessageString( uint32_t msg ) +const char * +IOService::getIOMessageString( uint32_t msg ) { #define MSG_ENTRY(x) {(int) x, #x} - static const IONamedValue msgNames[] = { - MSG_ENTRY( kIOMessageCanDevicePowerOff ), - MSG_ENTRY( kIOMessageDeviceWillPowerOff ), - MSG_ENTRY( kIOMessageDeviceWillNotPowerOff ), - MSG_ENTRY( kIOMessageDeviceHasPoweredOn ), - MSG_ENTRY( kIOMessageCanSystemPowerOff ), - MSG_ENTRY( kIOMessageSystemWillPowerOff ), - MSG_ENTRY( kIOMessageSystemWillNotPowerOff ), - MSG_ENTRY( kIOMessageCanSystemSleep ), - MSG_ENTRY( kIOMessageSystemWillSleep ), - MSG_ENTRY( kIOMessageSystemWillNotSleep ), - MSG_ENTRY( kIOMessageSystemHasPoweredOn ), - MSG_ENTRY( kIOMessageSystemWillRestart ), - MSG_ENTRY( kIOMessageSystemWillPowerOn ), - MSG_ENTRY( kIOMessageSystemCapabilityChange ), - MSG_ENTRY( kIOPMMessageLastCallBeforeSleep ), - MSG_ENTRY( kIOMessageSystemPagingOff ), - { 0, NULL } - }; - - return IOFindNameForValue(msg, msgNames); + static const IONamedValue msgNames[] = { + MSG_ENTRY( kIOMessageCanDevicePowerOff ), + MSG_ENTRY( kIOMessageDeviceWillPowerOff ), + MSG_ENTRY( kIOMessageDeviceWillNotPowerOff ), + MSG_ENTRY( kIOMessageDeviceHasPoweredOn ), + MSG_ENTRY( kIOMessageCanSystemPowerOff ), + MSG_ENTRY( kIOMessageSystemWillPowerOff ), + MSG_ENTRY( kIOMessageSystemWillNotPowerOff ), + MSG_ENTRY( kIOMessageCanSystemSleep ), + MSG_ENTRY( kIOMessageSystemWillSleep ), + MSG_ENTRY( kIOMessageSystemWillNotSleep ), + MSG_ENTRY( kIOMessageSystemHasPoweredOn ), + MSG_ENTRY( kIOMessageSystemWillRestart ), + MSG_ENTRY( kIOMessageSystemWillPowerOn ), + MSG_ENTRY( kIOMessageSystemCapabilityChange ), + MSG_ENTRY( kIOPMMessageLastCallBeforeSleep ), + MSG_ENTRY( kIOMessageSystemPagingOff ), + { 0, NULL } + }; + + return IOFindNameForValue(msg, msgNames); } @@ -8346,147 +8189,151 @@ const char * IOService::getIOMessageString( uint32_t msg ) OSDefineMetaClassAndStructors( IOPMRequest, IOCommand ); -IOPMRequest * IOPMRequest::create( void ) +IOPMRequest * +IOPMRequest::create( void ) { - IOPMRequest * me = OSTypeAlloc(IOPMRequest); - if (me && !me->init(0, kIOPMRequestTypeInvalid)) - { - me->release(); - me = 0; - } - return me; + IOPMRequest * me = OSTypeAlloc(IOPMRequest); + if (me && !me->init(0, kIOPMRequestTypeInvalid)) { + me->release(); + me = 0; + } + return me; } -bool IOPMRequest::init( IOService * target, IOOptionBits type ) +bool +IOPMRequest::init( IOService * target, IOOptionBits type ) { - if (!IOCommand::init()) - return false; + if (!IOCommand::init()) { + return false; + } - fRequestType = type; - fTarget = target; + fRequestType = type; + fTarget = target; - if (fTarget) - fTarget->retain(); + if (fTarget) { + fTarget->retain(); + } - // Root node and root domain requests does not prevent the power tree from - // becoming quiescent. + // Root node and root domain requests does not prevent the power tree from + // becoming quiescent. - fIsQuiesceBlocker = ((fTarget != gIOPMRootNode) && - (fTarget != IOService::getPMRootDomain())); + fIsQuiesceBlocker = ((fTarget != gIOPMRootNode) && + (fTarget != IOService::getPMRootDomain())); - return true; + return true; } -void IOPMRequest::reset( void ) +void +IOPMRequest::reset( void ) { - assert( fWorkWaitCount == 0 ); - assert( fFreeWaitCount == 0 ); + assert( fWorkWaitCount == 0 ); + assert( fFreeWaitCount == 0 ); - detachNextRequest(); - detachRootRequest(); + detachNextRequest(); + detachRootRequest(); - if (fCompletionAction && (fRequestType == kIOPMRequestTypeQuiescePowerTree)) - { - // Call the completion on PM work loop context - fCompletionAction(fCompletionTarget, fCompletionParam); - fCompletionAction = 0; - } + if (fCompletionAction && (fRequestType == kIOPMRequestTypeQuiescePowerTree)) { + // Call the completion on PM work loop context + fCompletionAction(fCompletionTarget, fCompletionParam); + fCompletionAction = 0; + } - fRequestType = kIOPMRequestTypeInvalid; + fRequestType = kIOPMRequestTypeInvalid; - if (fTarget) - { - fTarget->release(); - fTarget = 0; - } + if (fTarget) { + fTarget->release(); + fTarget = 0; + } } -bool IOPMRequest::attachNextRequest( IOPMRequest * next ) +bool +IOPMRequest::attachNextRequest( IOPMRequest * next ) { - bool ok = false; + bool ok = false; - if (!fRequestNext) - { - // Postpone the execution of the next request after - // this request. - fRequestNext = next; - fRequestNext->fWorkWaitCount++; + if (!fRequestNext) { + // Postpone the execution of the next request after + // this request. + fRequestNext = next; + fRequestNext->fWorkWaitCount++; #if LOG_REQUEST_ATTACH - PM_LOG("Attached next: %p [0x%x] -> %p [0x%x, %u] %s\n", - OBFUSCATE(this), fRequestType, OBFUSCATE(fRequestNext), - fRequestNext->fRequestType, - (uint32_t) fRequestNext->fWorkWaitCount, - fTarget->getName()); + PM_LOG("Attached next: %p [0x%x] -> %p [0x%x, %u] %s\n", + OBFUSCATE(this), fRequestType, OBFUSCATE(fRequestNext), + fRequestNext->fRequestType, + (uint32_t) fRequestNext->fWorkWaitCount, + fTarget->getName()); #endif - ok = true; - } - return ok; + ok = true; + } + return ok; } -bool IOPMRequest::detachNextRequest( void ) +bool +IOPMRequest::detachNextRequest( void ) { - bool ok = false; + bool ok = false; - if (fRequestNext) - { - assert(fRequestNext->fWorkWaitCount); - if (fRequestNext->fWorkWaitCount) - fRequestNext->fWorkWaitCount--; + if (fRequestNext) { + assert(fRequestNext->fWorkWaitCount); + if (fRequestNext->fWorkWaitCount) { + fRequestNext->fWorkWaitCount--; + } #if LOG_REQUEST_ATTACH - PM_LOG("Detached next: %p [0x%x] -> %p [0x%x, %u] %s\n", - OBFUSCATE(this), fRequestType, OBFUSCATE(fRequestNext), - fRequestNext->fRequestType, - (uint32_t) fRequestNext->fWorkWaitCount, - fTarget->getName()); + PM_LOG("Detached next: %p [0x%x] -> %p [0x%x, %u] %s\n", + OBFUSCATE(this), fRequestType, OBFUSCATE(fRequestNext), + fRequestNext->fRequestType, + (uint32_t) fRequestNext->fWorkWaitCount, + fTarget->getName()); #endif - fRequestNext = 0; - ok = true; - } - return ok; + fRequestNext = 0; + ok = true; + } + return ok; } -bool IOPMRequest::attachRootRequest( IOPMRequest * root ) +bool +IOPMRequest::attachRootRequest( IOPMRequest * root ) { - bool ok = false; + bool ok = false; - if (!fRequestRoot) - { - // Delay the completion of the root request after - // this request. - fRequestRoot = root; - fRequestRoot->fFreeWaitCount++; + if (!fRequestRoot) { + // Delay the completion of the root request after + // this request. + fRequestRoot = root; + fRequestRoot->fFreeWaitCount++; #if LOG_REQUEST_ATTACH - PM_LOG("Attached root: %p [0x%x] -> %p [0x%x, %u] %s\n", - OBFUSCATE(this), (uint32_t) fType, OBFUSCATE(fRequestRoot), - (uint32_t) fRequestRoot->fType, - (uint32_t) fRequestRoot->fFreeWaitCount, - fTarget->getName()); + PM_LOG("Attached root: %p [0x%x] -> %p [0x%x, %u] %s\n", + OBFUSCATE(this), (uint32_t) fType, OBFUSCATE(fRequestRoot), + (uint32_t) fRequestRoot->fType, + (uint32_t) fRequestRoot->fFreeWaitCount, + fTarget->getName()); #endif - ok = true; - } - return ok; + ok = true; + } + return ok; } -bool IOPMRequest::detachRootRequest( void ) +bool +IOPMRequest::detachRootRequest( void ) { - bool ok = false; + bool ok = false; - if (fRequestRoot) - { - assert(fRequestRoot->fFreeWaitCount); - if (fRequestRoot->fFreeWaitCount) - fRequestRoot->fFreeWaitCount--; + if (fRequestRoot) { + assert(fRequestRoot->fFreeWaitCount); + if (fRequestRoot->fFreeWaitCount) { + fRequestRoot->fFreeWaitCount--; + } #if LOG_REQUEST_ATTACH - PM_LOG("Detached root: %p [0x%x] -> %p [0x%x, %u] %s\n", - OBFUSCATE(this), (uint32_t) fType, OBFUSCATE(fRequestRoot), - (uint32_t) fRequestRoot->fType, - (uint32_t) fRequestRoot->fFreeWaitCount, - fTarget->getName()); + PM_LOG("Detached root: %p [0x%x] -> %p [0x%x, %u] %s\n", + OBFUSCATE(this), (uint32_t) fType, OBFUSCATE(fRequestRoot), + (uint32_t) fRequestRoot->fType, + (uint32_t) fRequestRoot->fFreeWaitCount, + fTarget->getName()); #endif - fRequestRoot = 0; - ok = true; - } - return ok; + fRequestRoot = 0; + ok = true; + } + return ok; } // MARK: - @@ -8500,92 +8347,97 @@ bool IOPMRequest::detachRootRequest( void ) OSDefineMetaClassAndStructors( IOPMRequestQueue, IOEventSource ); -IOPMRequestQueue * IOPMRequestQueue::create( IOService * inOwner, Action inAction ) +IOPMRequestQueue * +IOPMRequestQueue::create( IOService * inOwner, Action inAction ) { - IOPMRequestQueue * me = OSTypeAlloc(IOPMRequestQueue); - if (me && !me->init(inOwner, inAction)) - { - me->release(); - me = 0; - } - return me; + IOPMRequestQueue * me = OSTypeAlloc(IOPMRequestQueue); + if (me && !me->init(inOwner, inAction)) { + me->release(); + me = 0; + } + return me; } -bool IOPMRequestQueue::init( IOService * inOwner, Action inAction ) +bool +IOPMRequestQueue::init( IOService * inOwner, Action inAction ) { - if (!inAction || !IOEventSource::init(inOwner, (IOEventSourceAction)inAction)) - return false; + if (!inAction || !IOEventSource::init(inOwner, (IOEventSourceAction)inAction)) { + return false; + } - queue_init(&fQueue); - fLock = IOLockAlloc(); - return (fLock != 0); + queue_init(&fQueue); + fLock = IOLockAlloc(); + return fLock != 0; } -void IOPMRequestQueue::free( void ) +void +IOPMRequestQueue::free( void ) { - if (fLock) - { - IOLockFree(fLock); - fLock = 0; - } - return IOEventSource::free(); + if (fLock) { + IOLockFree(fLock); + fLock = 0; + } + return IOEventSource::free(); } -void IOPMRequestQueue::queuePMRequest( IOPMRequest * request ) +void +IOPMRequestQueue::queuePMRequest( IOPMRequest * request ) { - assert(request); - IOLockLock(fLock); - queue_enter(&fQueue, request, typeof(request), fCommandChain); - IOLockUnlock(fLock); - if (workLoop) signalWorkAvailable(); + assert(request); + IOLockLock(fLock); + queue_enter(&fQueue, request, typeof(request), fCommandChain); + IOLockUnlock(fLock); + if (workLoop) { + signalWorkAvailable(); + } } void IOPMRequestQueue::queuePMRequestChain( IOPMRequest ** requests, IOItemCount count ) { - IOPMRequest * next; - - assert(requests && count); - IOLockLock(fLock); - while (count--) - { - next = *requests; - requests++; - queue_enter(&fQueue, next, typeof(next), fCommandChain); - } - IOLockUnlock(fLock); - if (workLoop) signalWorkAvailable(); -} - -bool IOPMRequestQueue::checkForWork( void ) -{ - Action dqAction = (Action) action; - IOPMRequest * request; - IOService * target; - int dequeueCount = 0; - bool more = false; - - IOLockLock( fLock ); - - while (!queue_empty(&fQueue)) - { - if (dequeueCount++ >= kMaxDequeueCount) - { - // Allow other queues a chance to work - more = true; - break; - } - - queue_remove_first(&fQueue, request, typeof(request), fCommandChain); - IOLockUnlock(fLock); - target = request->getTarget(); - assert(target); - more |= (*dqAction)( target, request, this ); - IOLockLock( fLock ); - } - - IOLockUnlock( fLock ); - return more; + IOPMRequest * next; + + assert(requests && count); + IOLockLock(fLock); + while (count--) { + next = *requests; + requests++; + queue_enter(&fQueue, next, typeof(next), fCommandChain); + } + IOLockUnlock(fLock); + if (workLoop) { + signalWorkAvailable(); + } +} + +bool +IOPMRequestQueue::checkForWork( void ) +{ + Action dqAction = (Action) action; + IOPMRequest * request; + IOService * target; + int dequeueCount = 0; + bool more = false; + + IOLockLock( fLock ); + + while (!queue_empty(&fQueue)) { + if (dequeueCount++ >= kMaxDequeueCount) { + // Allow other queues a chance to work + more = true; + break; + } + + queue_remove_first(&fQueue, request, typeof(request), fCommandChain); + IOLockUnlock(fLock); + target = request->getTarget(); + assert(target); + more |= (*dqAction)( target, request, this ); + IOLockLock( fLock ); + } + + IOLockUnlock( fLock ); + return more; } // MARK: - @@ -8603,245 +8455,239 @@ OSDefineMetaClassAndStructors( IOPMWorkQueue, IOEventSource ); IOPMWorkQueue * IOPMWorkQueue::create( IOService * inOwner, Action invoke, Action retire ) { - IOPMWorkQueue * me = OSTypeAlloc(IOPMWorkQueue); - if (me && !me->init(inOwner, invoke, retire)) - { - me->release(); - me = 0; - } - return me; -} - -bool IOPMWorkQueue::init( IOService * inOwner, Action invoke, Action retire ) -{ - if (!invoke || !retire || - !IOEventSource::init(inOwner, (IOEventSourceAction)0)) - return false; - - queue_init(&fWorkQueue); - - fInvokeAction = invoke; - fRetireAction = retire; - fConsumerCount = fProducerCount = 0; - - return true; -} - -bool IOPMWorkQueue::queuePMRequest( IOPMRequest * request, IOServicePM * pwrMgt ) -{ - queue_head_t * requestQueue; - bool more = false; - bool empty; - - assert( request ); - assert( pwrMgt ); - assert( onThread() ); - assert( queue_next(&request->fCommandChain) == - queue_prev(&request->fCommandChain) ); - - gIOPMBusyRequestCount++; - - if (request->isQuiesceType()) - { - if ((request->getTarget() == gIOPMRootNode) && !fQuiesceStartTime) - { - // Attach new quiesce request to all quiesce blockers in the queue - fQuiesceStartTime = mach_absolute_time(); - attachQuiesceRequest(request); - fQuiesceRequest = request; - } - } - else if (fQuiesceRequest && request->isQuiesceBlocker()) - { - // Attach the new quiesce blocker to the blocked quiesce request - request->attachNextRequest(fQuiesceRequest); - } - - // Add new request to the tail of the per-service request queue. - // Then immediately check the request queue to minimize latency - // if the queue was empty. - - requestQueue = &pwrMgt->RequestHead; - empty = queue_empty(requestQueue); - queue_enter(requestQueue, request, typeof(request), fCommandChain); - if (empty) - { - more = checkRequestQueue(requestQueue, &empty); - if (!empty) - { - // Request just added is blocked, add its target IOServicePM - // to the work queue. - assert( queue_next(&pwrMgt->WorkChain) == - queue_prev(&pwrMgt->WorkChain) ); - - queue_enter(&fWorkQueue, pwrMgt, typeof(pwrMgt), WorkChain); - fQueueLength++; - PM_LOG3("IOPMWorkQueue: [%u] added %s@%p to queue\n", - fQueueLength, pwrMgt->Name, OBFUSCATE(pwrMgt)); - } - } - - return more; -} - -bool IOPMWorkQueue::checkRequestQueue( queue_head_t * requestQueue, bool * empty ) -{ - IOPMRequest * request; - IOService * target; - bool more = false; - bool done = false; - - assert(!queue_empty(requestQueue)); - do { - request = (typeof(request)) queue_first(requestQueue); - if (request->isWorkBlocked()) - break; // request dispatch blocked on attached request - - target = request->getTarget(); - if (fInvokeAction) - { - done = (*fInvokeAction)( target, request, this ); - } - else - { - PM_LOG("PM request 0x%x dropped\n", request->getType()); - done = true; - } - if (!done) - break; // PM state machine blocked - - assert(gIOPMBusyRequestCount > 0); - if (gIOPMBusyRequestCount) - gIOPMBusyRequestCount--; - - if (request == fQuiesceRequest) - { - fQuiesceRequest = 0; - } - - queue_remove_first(requestQueue, request, typeof(request), fCommandChain); - more |= (*fRetireAction)( target, request, this ); - done = queue_empty(requestQueue); - } while (!done); - - *empty = done; - - if (more) - { - // Retired a request that may unblock a previously visited request - // that is still waiting on the work queue. Must trigger another - // queue check. - fProducerCount++; - } - - return more; -} - -bool IOPMWorkQueue::checkForWork( void ) -{ - IOServicePM * entry; - IOServicePM * next; - bool more = false; - bool empty; + IOPMWorkQueue * me = OSTypeAlloc(IOPMWorkQueue); + if (me && !me->init(inOwner, invoke, retire)) { + me->release(); + me = 0; + } + return me; +} + +bool +IOPMWorkQueue::init( IOService * inOwner, Action invoke, Action retire ) +{ + if (!invoke || !retire || + !IOEventSource::init(inOwner, (IOEventSourceAction)0)) { + return false; + } + + queue_init(&fWorkQueue); + + fInvokeAction = invoke; + fRetireAction = retire; + fConsumerCount = fProducerCount = 0; + + return true; +} + +bool +IOPMWorkQueue::queuePMRequest( IOPMRequest * request, IOServicePM * pwrMgt ) +{ + queue_head_t * requestQueue; + bool more = false; + bool empty; + + assert( request ); + assert( pwrMgt ); + assert( onThread()); + assert( queue_next(&request->fCommandChain) == + queue_prev(&request->fCommandChain)); + + gIOPMBusyRequestCount++; + + if (request->isQuiesceType()) { + if ((request->getTarget() == gIOPMRootNode) && !fQuiesceStartTime) { + // Attach new quiesce request to all quiesce blockers in the queue + fQuiesceStartTime = mach_absolute_time(); + attachQuiesceRequest(request); + fQuiesceRequest = request; + } + } else if (fQuiesceRequest && request->isQuiesceBlocker()) { + // Attach the new quiesce blocker to the blocked quiesce request + request->attachNextRequest(fQuiesceRequest); + } + + // Add new request to the tail of the per-service request queue. + // Then immediately check the request queue to minimize latency + // if the queue was empty. + + requestQueue = &pwrMgt->RequestHead; + empty = queue_empty(requestQueue); + queue_enter(requestQueue, request, typeof(request), fCommandChain); + if (empty) { + more = checkRequestQueue(requestQueue, &empty); + if (!empty) { + // Request just added is blocked, add its target IOServicePM + // to the work queue. + assert( queue_next(&pwrMgt->WorkChain) == + queue_prev(&pwrMgt->WorkChain)); + + queue_enter(&fWorkQueue, pwrMgt, typeof(pwrMgt), WorkChain); + fQueueLength++; + PM_LOG3("IOPMWorkQueue: [%u] added %s@%p to queue\n", + fQueueLength, pwrMgt->Name, OBFUSCATE(pwrMgt)); + } + } + + return more; +} + +bool +IOPMWorkQueue::checkRequestQueue( queue_head_t * requestQueue, bool * empty ) +{ + IOPMRequest * request; + IOService * target; + bool more = false; + bool done = false; + + assert(!queue_empty(requestQueue)); + do { + request = (typeof(request))queue_first(requestQueue); + if (request->isWorkBlocked()) { + break; // request dispatch blocked on attached request + } + target = request->getTarget(); + if (fInvokeAction) { + done = (*fInvokeAction)( target, request, this ); + } else { + PM_LOG("PM request 0x%x dropped\n", request->getType()); + done = true; + } + if (!done) { + break; // PM state machine blocked + } + assert(gIOPMBusyRequestCount > 0); + if (gIOPMBusyRequestCount) { + gIOPMBusyRequestCount--; + } + + if (request == fQuiesceRequest) { + fQuiesceRequest = 0; + } + + queue_remove_first(requestQueue, request, typeof(request), fCommandChain); + more |= (*fRetireAction)( target, request, this ); + done = queue_empty(requestQueue); + } while (!done); + + *empty = done; + + if (more) { + // Retired a request that may unblock a previously visited request + // that is still waiting on the work queue. Must trigger another + // queue check. + fProducerCount++; + } + + return more; +} + +bool +IOPMWorkQueue::checkForWork( void ) +{ + IOServicePM * entry; + IOServicePM * next; + bool more = false; + bool empty; #if WORK_QUEUE_STATS - fStatCheckForWork++; + fStatCheckForWork++; #endif - // Iterate over all IOServicePM entries in the work queue, - // and check each entry's request queue. + // Iterate over all IOServicePM entries in the work queue, + // and check each entry's request queue. - while (fConsumerCount != fProducerCount) - { - PM_LOG3("IOPMWorkQueue: checkForWork %u %u\n", - fProducerCount, fConsumerCount); + while (fConsumerCount != fProducerCount) { + PM_LOG3("IOPMWorkQueue: checkForWork %u %u\n", + fProducerCount, fConsumerCount); - fConsumerCount = fProducerCount; + fConsumerCount = fProducerCount; #if WORK_QUEUE_STATS - if (queue_empty(&fWorkQueue)) - { - fStatQueueEmpty++; - break; - } - fStatScanEntries++; - uint32_t cachedWorkCount = gIOPMWorkInvokeCount; + if (queue_empty(&fWorkQueue)) { + fStatQueueEmpty++; + break; + } + fStatScanEntries++; + uint32_t cachedWorkCount = gIOPMWorkInvokeCount; #endif - __IGNORE_WCASTALIGN(entry = (typeof(entry)) queue_first(&fWorkQueue)); - while (!queue_end(&fWorkQueue, (queue_entry_t) entry)) - { - more |= checkRequestQueue(&entry->RequestHead, &empty); - - // Get next entry, points to head if current entry is last. - __IGNORE_WCASTALIGN(next = (typeof(next)) queue_next(&entry->WorkChain)); - - // if request queue is empty, remove IOServicePM from work queue. - if (empty) - { - assert(fQueueLength); - if (fQueueLength) fQueueLength--; - PM_LOG3("IOPMWorkQueue: [%u] removed %s@%p from queue\n", - fQueueLength, entry->Name, OBFUSCATE(entry)); - queue_remove(&fWorkQueue, entry, typeof(entry), WorkChain); - } - entry = next; - } + __IGNORE_WCASTALIGN(entry = (typeof(entry))queue_first(&fWorkQueue)); + while (!queue_end(&fWorkQueue, (queue_entry_t) entry)) { + more |= checkRequestQueue(&entry->RequestHead, &empty); + + // Get next entry, points to head if current entry is last. + __IGNORE_WCASTALIGN(next = (typeof(next))queue_next(&entry->WorkChain)); + + // if request queue is empty, remove IOServicePM from work queue. + if (empty) { + assert(fQueueLength); + if (fQueueLength) { + fQueueLength--; + } + PM_LOG3("IOPMWorkQueue: [%u] removed %s@%p from queue\n", + fQueueLength, entry->Name, OBFUSCATE(entry)); + queue_remove(&fWorkQueue, entry, typeof(entry), WorkChain); + } + entry = next; + } #if WORK_QUEUE_STATS - if (cachedWorkCount == gIOPMWorkInvokeCount) - fStatNoWorkDone++; + if (cachedWorkCount == gIOPMWorkInvokeCount) { + fStatNoWorkDone++; + } #endif - } + } - return more; + return more; } -void IOPMWorkQueue::signalWorkAvailable( void ) +void +IOPMWorkQueue::signalWorkAvailable( void ) { - fProducerCount++; - IOEventSource::signalWorkAvailable(); + fProducerCount++; + IOEventSource::signalWorkAvailable(); } -void IOPMWorkQueue::incrementProducerCount( void ) +void +IOPMWorkQueue::incrementProducerCount( void ) { - fProducerCount++; + fProducerCount++; } -void IOPMWorkQueue::attachQuiesceRequest( IOPMRequest * quiesceRequest ) +void +IOPMWorkQueue::attachQuiesceRequest( IOPMRequest * quiesceRequest ) { - IOServicePM * entry; - IOPMRequest * request; + IOServicePM * entry; + IOPMRequest * request; + + if (queue_empty(&fWorkQueue)) { + return; + } - if (queue_empty(&fWorkQueue)) - { - return; - } + queue_iterate(&fWorkQueue, entry, typeof(entry), WorkChain) + { + queue_iterate(&entry->RequestHead, request, typeof(request), fCommandChain) + { + // Attach the quiesce request to any request in the queue that + // is not linked to a next request. These requests will block + // the quiesce request. - queue_iterate(&fWorkQueue, entry, typeof(entry), WorkChain) - { - queue_iterate(&entry->RequestHead, request, typeof(request), fCommandChain) - { - // Attach the quiesce request to any request in the queue that - // is not linked to a next request. These requests will block - // the quiesce request. - - if (request->isQuiesceBlocker()) - { - request->attachNextRequest(quiesceRequest); - } - } - } + if (request->isQuiesceBlocker()) { + request->attachNextRequest(quiesceRequest); + } + } + } } -void IOPMWorkQueue::finishQuiesceRequest( IOPMRequest * quiesceRequest ) +void +IOPMWorkQueue::finishQuiesceRequest( IOPMRequest * quiesceRequest ) { - if (fQuiesceRequest && (quiesceRequest == fQuiesceRequest) && - (fQuiesceStartTime != 0)) - { - fInvokeAction = 0; - fQuiesceFinishTime = mach_absolute_time(); - } + if (fQuiesceRequest && (quiesceRequest == fQuiesceRequest) && + (fQuiesceStartTime != 0)) { + fInvokeAction = 0; + fQuiesceFinishTime = mach_absolute_time(); + } } // MARK: - @@ -8856,58 +8702,59 @@ OSDefineMetaClassAndStructors( IOPMCompletionQueue, IOEventSource ); IOPMCompletionQueue * IOPMCompletionQueue::create( IOService * inOwner, Action inAction ) { - IOPMCompletionQueue * me = OSTypeAlloc(IOPMCompletionQueue); - if (me && !me->init(inOwner, inAction)) - { - me->release(); - me = 0; - } - return me; + IOPMCompletionQueue * me = OSTypeAlloc(IOPMCompletionQueue); + if (me && !me->init(inOwner, inAction)) { + me->release(); + me = 0; + } + return me; } -bool IOPMCompletionQueue::init( IOService * inOwner, Action inAction ) +bool +IOPMCompletionQueue::init( IOService * inOwner, Action inAction ) { - if (!inAction || !IOEventSource::init(inOwner, (IOEventSourceAction)inAction)) - return false; + if (!inAction || !IOEventSource::init(inOwner, (IOEventSourceAction)inAction)) { + return false; + } - queue_init(&fQueue); - return true; + queue_init(&fQueue); + return true; } -bool IOPMCompletionQueue::queuePMRequest( IOPMRequest * request ) +bool +IOPMCompletionQueue::queuePMRequest( IOPMRequest * request ) { - bool more; + bool more; - assert(request); - // unblock dependent request - more = request->detachNextRequest(); - queue_enter(&fQueue, request, typeof(request), fCommandChain); - return more; + assert(request); + // unblock dependent request + more = request->detachNextRequest(); + queue_enter(&fQueue, request, typeof(request), fCommandChain); + return more; } -bool IOPMCompletionQueue::checkForWork( void ) +bool +IOPMCompletionQueue::checkForWork( void ) { - Action dqAction = (Action) action; - IOPMRequest * request; - IOPMRequest * next; - IOService * target; - bool more = false; + Action dqAction = (Action) action; + IOPMRequest * request; + IOPMRequest * next; + IOService * target; + bool more = false; - request = (typeof(request)) queue_first(&fQueue); - while (!queue_end(&fQueue, (queue_entry_t) request)) - { - next = (typeof(next)) queue_next(&request->fCommandChain); - if (!request->isFreeBlocked()) - { - queue_remove(&fQueue, request, typeof(request), fCommandChain); - target = request->getTarget(); - assert(target); - more |= (*dqAction)( target, request, this ); - } - request = next; - } + request = (typeof(request))queue_first(&fQueue); + while (!queue_end(&fQueue, (queue_entry_t) request)) { + next = (typeof(next))queue_next(&request->fCommandChain); + if (!request->isFreeBlocked()) { + queue_remove(&fQueue, request, typeof(request), fCommandChain); + target = request->getTarget(); + assert(target); + more |= (*dqAction)( target, request, this ); + } + request = next; + } - return more; + return more; } // MARK: - @@ -8924,162 +8771,163 @@ OSDefineMetaClassAndStructors(IOServicePM, OSObject) static void setPMProperty( OSDictionary * dict, const char * key, uint64_t value ) { - OSNumber * num = OSNumber::withNumber(value, sizeof(value) * 8); - if (num) - { - dict->setObject(key, num); - num->release(); - } + OSNumber * num = OSNumber::withNumber(value, sizeof(value) * 8); + if (num) { + dict->setObject(key, num); + num->release(); + } } -IOReturn IOServicePM::gatedSerialize( OSSerialize * s ) const -{ - OSDictionary * dict; - bool ok = false; - int powerClamp = -1; - int dictSize = 6; - - if (IdleTimerPeriod) - dictSize += 4; - - if (PMActions.parameter & kPMActionsFlagLimitPower) - { - dictSize += 1; - powerClamp = 0; - if (PMActions.parameter & - (kPMActionsFlagIsDisplayWrangler | kPMActionsFlagIsGraphicsDevice)) - powerClamp++; - } +IOReturn +IOServicePM::gatedSerialize( OSSerialize * s ) const +{ + OSDictionary * dict; + bool ok = false; + int powerClamp = -1; + int dictSize = 6; + + if (IdleTimerPeriod) { + dictSize += 4; + } + + if (PMActions.parameter & kPMActionsFlagLimitPower) { + dictSize += 1; + powerClamp = 0; + if (PMActions.parameter & + (kPMActionsFlagIsDisplayWrangler | kPMActionsFlagIsGraphicsDevice)) { + powerClamp++; + } + } #if WORK_QUEUE_STATS - if (gIOPMRootNode == ControllingDriver) - dictSize += 4; + if (gIOPMRootNode == ControllingDriver) { + dictSize += 4; + } #endif - if (PowerClients) - dict = OSDictionary::withDictionary( - PowerClients, PowerClients->getCount() + dictSize); - else - dict = OSDictionary::withCapacity(dictSize); - - if (dict) - { - setPMProperty(dict, "CurrentPowerState", CurrentPowerState); - setPMProperty(dict, "CapabilityFlags", CurrentCapabilityFlags); - if (NumberOfPowerStates) - setPMProperty(dict, "MaxPowerState", NumberOfPowerStates-1); - if (DesiredPowerState != CurrentPowerState) - setPMProperty(dict, "DesiredPowerState", DesiredPowerState); - if (kIOPM_Finished != MachineState) - setPMProperty(dict, "MachineState", MachineState); - if (DeviceOverrideEnabled) - dict->setObject("PowerOverrideOn", kOSBooleanTrue); - if (powerClamp >= 0) - setPMProperty(dict, "PowerClamp", powerClamp); - - if (IdleTimerPeriod) - { - AbsoluteTime now; - AbsoluteTime delta; - uint64_t nsecs; - - clock_get_uptime(&now); - - // The idle timer period in milliseconds - setPMProperty(dict, "IdleTimerPeriod", NextIdleTimerPeriod * 1000ULL); - - // Number of tickles since the last idle timer expiration - setPMProperty(dict, "ActivityTickles", ActivityTickleCount); - - if (AbsoluteTime_to_scalar(&DeviceActiveTimestamp)) - { - // Milliseconds since the last activity tickle - delta = now; - SUB_ABSOLUTETIME(&delta, &DeviceActiveTimestamp); - absolutetime_to_nanoseconds(delta, &nsecs); - setPMProperty(dict, "TimeSinceLastTickle", NS_TO_MS(nsecs)); - } - - if (!IdleTimerStopped && AbsoluteTime_to_scalar(&IdleTimerStartTime)) - { - // Idle timer elapsed time in milliseconds - delta = now; - SUB_ABSOLUTETIME(&delta, &IdleTimerStartTime); - absolutetime_to_nanoseconds(delta, &nsecs); - setPMProperty(dict, "IdleTimerElapsedTime", NS_TO_MS(nsecs)); - } - } + if (PowerClients) { + dict = OSDictionary::withDictionary( + PowerClients, PowerClients->getCount() + dictSize); + } else { + dict = OSDictionary::withCapacity(dictSize); + } + + if (dict) { + setPMProperty(dict, "CurrentPowerState", CurrentPowerState); + setPMProperty(dict, "CapabilityFlags", CurrentCapabilityFlags); + if (NumberOfPowerStates) { + setPMProperty(dict, "MaxPowerState", NumberOfPowerStates - 1); + } + if (DesiredPowerState != CurrentPowerState) { + setPMProperty(dict, "DesiredPowerState", DesiredPowerState); + } + if (kIOPM_Finished != MachineState) { + setPMProperty(dict, "MachineState", MachineState); + } + if (DeviceOverrideEnabled) { + dict->setObject("PowerOverrideOn", kOSBooleanTrue); + } + if (powerClamp >= 0) { + setPMProperty(dict, "PowerClamp", powerClamp); + } + + if (IdleTimerPeriod) { + AbsoluteTime now; + AbsoluteTime delta; + uint64_t nsecs; + + clock_get_uptime(&now); + + // The idle timer period in milliseconds + setPMProperty(dict, "IdleTimerPeriod", NextIdleTimerPeriod * 1000ULL); + + // Number of tickles since the last idle timer expiration + setPMProperty(dict, "ActivityTickles", ActivityTickleCount); + + if (AbsoluteTime_to_scalar(&DeviceActiveTimestamp)) { + // Milliseconds since the last activity tickle + delta = now; + SUB_ABSOLUTETIME(&delta, &DeviceActiveTimestamp); + absolutetime_to_nanoseconds(delta, &nsecs); + setPMProperty(dict, "TimeSinceLastTickle", NS_TO_MS(nsecs)); + } + + if (!IdleTimerStopped && AbsoluteTime_to_scalar(&IdleTimerStartTime)) { + // Idle timer elapsed time in milliseconds + delta = now; + SUB_ABSOLUTETIME(&delta, &IdleTimerStartTime); + absolutetime_to_nanoseconds(delta, &nsecs); + setPMProperty(dict, "IdleTimerElapsedTime", NS_TO_MS(nsecs)); + } + } #if WORK_QUEUE_STATS - if (gIOPMRootNode == Owner) - { - setPMProperty(dict, "WQ-CheckForWork", - gIOPMWorkQueue->fStatCheckForWork); - setPMProperty(dict, "WQ-ScanEntries", - gIOPMWorkQueue->fStatScanEntries); - setPMProperty(dict, "WQ-QueueEmpty", - gIOPMWorkQueue->fStatQueueEmpty); - setPMProperty(dict, "WQ-NoWorkDone", - gIOPMWorkQueue->fStatNoWorkDone); - } + if (gIOPMRootNode == Owner) { + setPMProperty(dict, "WQ-CheckForWork", + gIOPMWorkQueue->fStatCheckForWork); + setPMProperty(dict, "WQ-ScanEntries", + gIOPMWorkQueue->fStatScanEntries); + setPMProperty(dict, "WQ-QueueEmpty", + gIOPMWorkQueue->fStatQueueEmpty); + setPMProperty(dict, "WQ-NoWorkDone", + gIOPMWorkQueue->fStatNoWorkDone); + } #endif - if (HasAdvisoryDesire && !gIOPMAdvisoryTickleEnabled) - { - // Don't report advisory tickle when it has no influence - dict->removeObject(gIOPMPowerClientAdvisoryTickle); - } + if (HasAdvisoryDesire && !gIOPMAdvisoryTickleEnabled) { + // Don't report advisory tickle when it has no influence + dict->removeObject(gIOPMPowerClientAdvisoryTickle); + } - ok = dict->serialize(s); - dict->release(); - } + ok = dict->serialize(s); + dict->release(); + } - return (ok ? kIOReturnSuccess : kIOReturnNoMemory); + return ok ? kIOReturnSuccess : kIOReturnNoMemory; } -bool IOServicePM::serialize( OSSerialize * s ) const +bool +IOServicePM::serialize( OSSerialize * s ) const { - IOReturn ret = kIOReturnNotReady; + IOReturn ret = kIOReturnNotReady; - if (gIOPMWatchDogThread == current_thread()) - { - // Calling without lock as this data is collected for debug purpose, before reboot. - // The workloop is probably already hung in state machine. - ret = gatedSerialize(s); - } - else if (gIOPMWorkLoop) - { - ret = gIOPMWorkLoop->runAction( - OSMemberFunctionCast(IOWorkLoop::Action, this, &IOServicePM::gatedSerialize), - (OSObject *) this, (void *) s); - } + if (gIOPMWatchDogThread == current_thread()) { + // Calling without lock as this data is collected for debug purpose, before reboot. + // The workloop is probably already hung in state machine. + ret = gatedSerialize(s); + } else if (gIOPMWorkLoop) { + ret = gIOPMWorkLoop->runAction( + OSMemberFunctionCast(IOWorkLoop::Action, this, &IOServicePM::gatedSerialize), + (OSObject *) this, (void *) s); + } - return (kIOReturnSuccess == ret); + return kIOReturnSuccess == ret; } -void IOServicePM::pmPrint( - uint32_t event, - uintptr_t param1, - uintptr_t param2 ) const +void +IOServicePM::pmPrint( + uint32_t event, + uintptr_t param1, + uintptr_t param2 ) const { - gPlatform->PMLog(Name, event, param1, param2); + gPlatform->PMLog(Name, event, param1, param2); } -void IOServicePM::pmTrace( - uint32_t event, - uint32_t eventFunc, - uintptr_t param1, - uintptr_t param2 ) const +void +IOServicePM::pmTrace( + uint32_t event, + uint32_t eventFunc, + uintptr_t param1, + uintptr_t param2 ) const { - uintptr_t nameAsArg = 0; + uintptr_t nameAsArg = 0; - assert(event < KDBG_CODE_MAX); - assert((eventFunc & ~KDBG_FUNC_MASK) == 0); + assert(event < KDBG_CODE_MAX); + assert((eventFunc & ~KDBG_FUNC_MASK) == 0); - // Copy the first characters of the name into an uintptr_t. - // NULL termination is not required. - strncpy((char*)&nameAsArg, Name, sizeof(nameAsArg)); + // Copy the first characters of the name into an uintptr_t. + // NULL termination is not required. + strncpy((char*)&nameAsArg, Name, sizeof(nameAsArg)); - IOTimeStampConstant(IODBG_POWER(event) | eventFunc, nameAsArg, (uintptr_t)Owner->getRegistryEntryID(), (uintptr_t)(OBFUSCATE(param1)), (uintptr_t)(OBFUSCATE(param2))); + IOTimeStampConstant(IODBG_POWER(event) | eventFunc, nameAsArg, (uintptr_t)Owner->getRegistryEntryID(), (uintptr_t)(OBFUSCATE(param1)), (uintptr_t)(OBFUSCATE(param2))); } - diff --git a/iokit/Kernel/IOServicePMPrivate.h b/iokit/Kernel/IOServicePMPrivate.h index 26bfbee7f..9c4f3bb7f 100644 --- a/iokit/Kernel/IOServicePMPrivate.h +++ b/iokit/Kernel/IOServicePMPrivate.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,35 +37,35 @@ //****************************************************************************** enum { - /* Command Types */ - kIOPMRequestTypeInvalid = 0x00, - kIOPMRequestTypePMStop = 0x01, - kIOPMRequestTypeAddPowerChild1 = 0x02, - kIOPMRequestTypeAddPowerChild2 = 0x03, - kIOPMRequestTypeAddPowerChild3 = 0x04, - kIOPMRequestTypeRegisterPowerDriver = 0x05, - kIOPMRequestTypeAdjustPowerState = 0x06, - kIOPMRequestTypePowerDomainWillChange = 0x07, - kIOPMRequestTypePowerDomainDidChange = 0x08, - kIOPMRequestTypePowerOverrideOnPriv = 0x09, - kIOPMRequestTypePowerOverrideOffPriv = 0x0A, - kIOPMRequestTypeActivityTickle = 0x0B, - kIOPMRequestTypeRequestPowerState = 0x0C, - kIOPMRequestTypeSynchronizePowerTree = 0x0D, - kIOPMRequestTypeRequestPowerStateOverride = 0x0E, - kIOPMRequestTypeSetIdleTimerPeriod = 0x0F, - kIOPMRequestTypeIgnoreIdleTimer = 0x10, - kIOPMRequestTypeQuiescePowerTree = 0x11, - - /* Reply Types */ - kIOPMRequestTypeReplyStart = 0x80, - kIOPMRequestTypeAckPowerChange = 0x81, - kIOPMRequestTypeAckSetPowerState = 0x82, - kIOPMRequestTypeAllowPowerChange = 0x83, - kIOPMRequestTypeCancelPowerChange = 0x84, - kIOPMRequestTypeInterestChanged = 0x85, - kIOPMRequestTypeIdleCancel = 0x86, - kIOPMRequestTypeChildNotifyDelayCancel = 0x87 + /* Command Types */ + kIOPMRequestTypeInvalid = 0x00, + kIOPMRequestTypePMStop = 0x01, + kIOPMRequestTypeAddPowerChild1 = 0x02, + kIOPMRequestTypeAddPowerChild2 = 0x03, + kIOPMRequestTypeAddPowerChild3 = 0x04, + kIOPMRequestTypeRegisterPowerDriver = 0x05, + kIOPMRequestTypeAdjustPowerState = 0x06, + kIOPMRequestTypePowerDomainWillChange = 0x07, + kIOPMRequestTypePowerDomainDidChange = 0x08, + kIOPMRequestTypePowerOverrideOnPriv = 0x09, + kIOPMRequestTypePowerOverrideOffPriv = 0x0A, + kIOPMRequestTypeActivityTickle = 0x0B, + kIOPMRequestTypeRequestPowerState = 0x0C, + kIOPMRequestTypeSynchronizePowerTree = 0x0D, + kIOPMRequestTypeRequestPowerStateOverride = 0x0E, + kIOPMRequestTypeSetIdleTimerPeriod = 0x0F, + kIOPMRequestTypeIgnoreIdleTimer = 0x10, + kIOPMRequestTypeQuiescePowerTree = 0x11, + + /* Reply Types */ + kIOPMRequestTypeReplyStart = 0x80, + kIOPMRequestTypeAckPowerChange = 0x81, + kIOPMRequestTypeAckSetPowerState = 0x82, + kIOPMRequestTypeAllowPowerChange = 0x83, + kIOPMRequestTypeCancelPowerChange = 0x84, + kIOPMRequestTypeInterestChanged = 0x85, + kIOPMRequestTypeIdleCancel = 0x86, + kIOPMRequestTypeChildNotifyDelayCancel = 0x87 }; //****************************************************************************** @@ -76,78 +76,77 @@ struct IOPMActions; typedef void (*IOPMActionPowerChangeStart)( - void * target, - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags * changeFlags, - IOPMRequestTag requestTag ); + void * target, + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags * changeFlags, + IOPMRequestTag requestTag ); typedef void (*IOPMActionPowerChangeDone)( - void * target, - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex powerState, - IOPMPowerChangeFlags changeFlags, - IOPMRequestTag requestTag ); + void * target, + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex powerState, + IOPMPowerChangeFlags changeFlags, + IOPMRequestTag requestTag ); typedef void (*IOPMActionPowerChangeOverride)( - void * target, - IOService * service, - IOPMActions * actions, - IOPMPowerStateIndex * powerState, - IOPMPowerChangeFlags * changeFlags, - IOPMRequestTag requestTag ); + void * target, + IOService * service, + IOPMActions * actions, + IOPMPowerStateIndex * powerState, + IOPMPowerChangeFlags * changeFlags, + IOPMRequestTag requestTag ); typedef void (*IOPMActionActivityTickle)( - void * target, - IOService * service, - IOPMActions * actions ); + void * target, + IOService * service, + IOPMActions * actions ); typedef void (*IOPMActionUpdatePowerClient)( - void * target, - IOService * service, - IOPMActions * actions, - const OSSymbol * powerClient, - IOPMPowerStateIndex oldPowerState, - IOPMPowerStateIndex newPowerState -); + void * target, + IOService * service, + IOPMActions * actions, + const OSSymbol * powerClient, + IOPMPowerStateIndex oldPowerState, + IOPMPowerStateIndex newPowerState + ); struct IOPMActions { - void * target; - uint32_t parameter; - IOPMActionPowerChangeStart actionPowerChangeStart; - IOPMActionPowerChangeDone actionPowerChangeDone; - IOPMActionPowerChangeOverride actionPowerChangeOverride; - IOPMActionActivityTickle actionActivityTickle; - IOPMActionUpdatePowerClient actionUpdatePowerClient; + void * target; + uint32_t parameter; + IOPMActionPowerChangeStart actionPowerChangeStart; + IOPMActionPowerChangeDone actionPowerChangeDone; + IOPMActionPowerChangeOverride actionPowerChangeOverride; + IOPMActionActivityTickle actionActivityTickle; + IOPMActionUpdatePowerClient actionUpdatePowerClient; }; // IOPMActions parameter flags enum { - kPMActionsFlagIsDisplayWrangler = 0x00000100, - kPMActionsFlagIsGraphicsDevice = 0x00000200, - kPMActionsFlagIsAudioDevice = 0x00000400, - kPMActionsFlagLimitPower = 0x00000800, - kPMActionsPCIBitNumberMask = 0x000000ff + kPMActionsFlagIsDisplayWrangler = 0x00000100, + kPMActionsFlagIsGraphicsDevice = 0x00000200, + kPMActionsFlagIsAudioDevice = 0x00000400, + kPMActionsFlagLimitPower = 0x00000800, + kPMActionsPCIBitNumberMask = 0x000000ff }; //****************************************************************************** // Internal concise representation of IOPMPowerState -struct IOPMPSEntry -{ - IOPMPowerFlags capabilityFlags; - IOPMPowerFlags outputPowerFlags; - IOPMPowerFlags inputPowerFlags; - uint32_t staticPower; - uint32_t settleUpTime; - uint32_t settleDownTime; - IOPMPowerStateIndex stateOrder; - IOPMPowerStateIndex stateOrderToIndex; +struct IOPMPSEntry { + IOPMPowerFlags capabilityFlags; + IOPMPowerFlags outputPowerFlags; + IOPMPowerFlags inputPowerFlags; + uint32_t staticPower; + uint32_t settleUpTime; + uint32_t settleDownTime; + IOPMPowerStateIndex stateOrder; + IOPMPowerStateIndex stateOrderToIndex; }; //****************************************************************************** @@ -156,205 +155,205 @@ struct IOPMPSEntry class IOServicePM : public OSObject { - friend class IOService; - friend class IOPMWorkQueue; + friend class IOService; + friend class IOPMWorkQueue; - OSDeclareDefaultStructors( IOServicePM ) + OSDeclareDefaultStructors( IOServicePM ) private: - // Link IOServicePM objects on IOPMWorkQueue. - queue_chain_t WorkChain; +// Link IOServicePM objects on IOPMWorkQueue. + queue_chain_t WorkChain; - // Queue of IOPMRequest objects. - queue_head_t RequestHead; +// Queue of IOPMRequest objects. + queue_head_t RequestHead; - // IOService creator and owner. - IOService * Owner; +// IOService creator and owner. + IOService * Owner; - // List of interested drivers (protected by PMLock). - IOPMinformeeList * InterestedDrivers; +// List of interested drivers (protected by PMLock). + IOPMinformeeList * InterestedDrivers; - // How long to wait for controlling driver to acknowledge. - IOReturn DriverTimer; +// How long to wait for controlling driver to acknowledge. + IOReturn DriverTimer; - // Current power management machine state. - uint32_t MachineState; +// Current power management machine state. + uint32_t MachineState; - thread_call_t AckTimer; - thread_call_t SettleTimer; - thread_call_t IdleTimer; - thread_call_t WatchdogTimer; - thread_call_t SpinDumpTimer; + thread_call_t AckTimer; + thread_call_t SettleTimer; + thread_call_t IdleTimer; + thread_call_t WatchdogTimer; + thread_call_t SpinDumpTimer; - IOLock * WatchdogLock; - OSArray * BlockedArray; - uint64_t PendingResponseDeadline; - uint64_t WatchdogDeadline; + IOLock * WatchdogLock; + OSArray * BlockedArray; + uint64_t PendingResponseDeadline; + uint64_t WatchdogDeadline; - // Settle time after changing power state. - uint32_t SettleTimeUS; - uint32_t IdleTimerGeneration; +// Settle time after changing power state. + uint32_t SettleTimeUS; + uint32_t IdleTimerGeneration; - // The flags describing current change note. - IOPMPowerChangeFlags HeadNoteChangeFlags; +// The flags describing current change note. + IOPMPowerChangeFlags HeadNoteChangeFlags; - // The new power state number being changed to. - IOPMPowerStateIndex HeadNotePowerState; +// The new power state number being changed to. + IOPMPowerStateIndex HeadNotePowerState; - // Points to the entry in the power state array. - IOPMPSEntry * HeadNotePowerArrayEntry; +// Points to the entry in the power state array. + IOPMPSEntry * HeadNotePowerArrayEntry; - // Power flags supplied by all parents (domain). - IOPMPowerFlags HeadNoteDomainFlags; +// Power flags supplied by all parents (domain). + IOPMPowerFlags HeadNoteDomainFlags; - // Power flags supplied by domain accounting for parent changes. - IOPMPowerFlags HeadNoteDomainTargetFlags; +// Power flags supplied by domain accounting for parent changes. + IOPMPowerFlags HeadNoteDomainTargetFlags; - // Connection attached to the changing parent. - IOPowerConnection * HeadNoteParentConnection; +// Connection attached to the changing parent. + IOPowerConnection * HeadNoteParentConnection; - // Power flags supplied by the changing parent. - IOPMPowerFlags HeadNoteParentFlags; +// Power flags supplied by the changing parent. + IOPMPowerFlags HeadNoteParentFlags; - // Number of acks still outstanding. - uint32_t HeadNotePendingAcks; +// Number of acks still outstanding. + uint32_t HeadNotePendingAcks; - // PM state lock. - IOLock * PMLock; - - unsigned int InitialPowerChange :1; - unsigned int InitialSetPowerState :1; - unsigned int DeviceOverrideEnabled :1; - unsigned int DoNotPowerDown :1; - unsigned int ParentsKnowState :1; - unsigned int StrictTreeOrder :1; - unsigned int IdleTimerStopped :1; - unsigned int AdjustPowerScheduled :1; - - unsigned int IsPreChange :1; - unsigned int DriverCallBusy :1; - unsigned int PCDFunctionOverride :1; - unsigned int IdleTimerIgnored :1; - unsigned int HasAdvisoryDesire :1; - unsigned int AdvisoryTickleUsed :1; - unsigned int ResetPowerStateOnWake :1; - - // Time of last device activity. - AbsoluteTime DeviceActiveTimestamp; - AbsoluteTime MaxPowerStateEntryTime; - AbsoluteTime MaxPowerStateExitTime; - - // Used to protect activity flag. - IOLock * ActivityLock; - - // Idle timer's period in seconds. - unsigned long IdleTimerPeriod; - unsigned long IdleTimerMinPowerState; - unsigned long NextIdleTimerPeriod; - AbsoluteTime IdleTimerStartTime; - - // Power state desired by a subclassed device object. - IOPMPowerStateIndex DeviceDesire; - - // This is the power state we desire currently. - IOPMPowerStateIndex DesiredPowerState; - - // This is what our parent thinks our need is. - IOPMPowerFlags PreviousRequestPowerFlags; - - // Cache result from getName(), used in logging. - const char * Name; +// PM state lock. + IOLock * PMLock; + + unsigned int InitialPowerChange :1; + unsigned int InitialSetPowerState :1; + unsigned int DeviceOverrideEnabled :1; + unsigned int DoNotPowerDown :1; + unsigned int ParentsKnowState :1; + unsigned int StrictTreeOrder :1; + unsigned int IdleTimerStopped :1; + unsigned int AdjustPowerScheduled :1; + + unsigned int IsPreChange :1; + unsigned int DriverCallBusy :1; + unsigned int PCDFunctionOverride :1; + unsigned int IdleTimerIgnored :1; + unsigned int HasAdvisoryDesire :1; + unsigned int AdvisoryTickleUsed :1; + unsigned int ResetPowerStateOnWake :1; + +// Time of last device activity. + AbsoluteTime DeviceActiveTimestamp; + AbsoluteTime MaxPowerStateEntryTime; + AbsoluteTime MaxPowerStateExitTime; + +// Used to protect activity flag. + IOLock * ActivityLock; + +// Idle timer's period in seconds. + unsigned long IdleTimerPeriod; + unsigned long IdleTimerMinPowerState; + unsigned long NextIdleTimerPeriod; + AbsoluteTime IdleTimerStartTime; + +// Power state desired by a subclassed device object. + IOPMPowerStateIndex DeviceDesire; + +// This is the power state we desire currently. + IOPMPowerStateIndex DesiredPowerState; + +// This is what our parent thinks our need is. + IOPMPowerFlags PreviousRequestPowerFlags; + +// Cache result from getName(), used in logging. + const char * Name; - // Number of power states in the power array. - IOPMPowerStateIndex NumberOfPowerStates; +// Number of power states in the power array. + IOPMPowerStateIndex NumberOfPowerStates; - // Ordered highest power state in the power array. - IOPMPowerStateIndex HighestPowerState; - - // Power state array. - IOPMPSEntry * PowerStates; - - // The controlling driver. - IOService * ControllingDriver; - - // Our current power state. - IOPMPowerStateIndex CurrentPowerState; - - // Logical OR of power flags for each power domain parent. - IOPMPowerFlags ParentsCurrentPowerFlags; - - // The highest power state we can achieve in current power domain. - IOPMPowerStateIndex MaxPowerState; - - // Logical OR of all output power flags in the power state array. - IOPMPowerFlags MergedOutputPowerFlags; - - // OSArray which manages responses from notified apps and clients. - OSArray * ResponseArray; - OSArray * NotifyClientArray; - - // Used to uniquely identify power management notification to apps and clients. - UInt16 SerialNumber; - - // Used to communicate desired function to tellClientsWithResponse(). - // This is used because it avoids changing the signatures of the affected virtual methods. - int OutOfBandParameter; - - AbsoluteTime DriverCallStartTime; - IOPMPowerFlags CurrentCapabilityFlags; - unsigned long CurrentPowerConsumption; - IOPMPowerStateIndex TempClampPowerState; - OSArray * NotifyChildArray; - OSDictionary * PowerClients; - thread_call_t DriverCallEntry; - void * DriverCallParamPtr; - IOItemCount DriverCallParamCount; - IOItemCount DriverCallParamSlots; - uint32_t DriverCallReason; - uint32_t OutOfBandMessage; - uint32_t TempClampCount; - uint32_t OverrideMaxPowerState; - uint32_t DeviceUsablePowerState; - - // Protected by ActivityLock - BEGIN - IOPMPowerStateIndex ActivityTicklePowerState; - IOPMPowerStateIndex AdvisoryTicklePowerState; - uint32_t ActivityTickleCount; - uint32_t DeviceWasActive : 1; - uint32_t AdvisoryTickled : 1; - // Protected by ActivityLock - END - - uint32_t WaitReason; - uint32_t SavedMachineState; - - // Protected by PMLock - BEGIN - struct { - uint32_t PMStop : 1; - uint32_t PMDriverCallWait : 1; - } LockedFlags; - - queue_head_t PMDriverCallQueue; - OSSet * InsertInterestSet; - OSSet * RemoveInterestSet; - - // IOReporter Data - uint32_t ReportClientCnt; - void * ReportBuf; - // Protected by PMLock - END +// Ordered highest power state in the power array. + IOPMPowerStateIndex HighestPowerState; + +// Power state array. + IOPMPSEntry * PowerStates; + +// The controlling driver. + IOService * ControllingDriver; + +// Our current power state. + IOPMPowerStateIndex CurrentPowerState; + +// Logical OR of power flags for each power domain parent. + IOPMPowerFlags ParentsCurrentPowerFlags; + +// The highest power state we can achieve in current power domain. + IOPMPowerStateIndex MaxPowerState; + +// Logical OR of all output power flags in the power state array. + IOPMPowerFlags MergedOutputPowerFlags; + +// OSArray which manages responses from notified apps and clients. + OSArray * ResponseArray; + OSArray * NotifyClientArray; + +// Used to uniquely identify power management notification to apps and clients. + UInt16 SerialNumber; + +// Used to communicate desired function to tellClientsWithResponse(). +// This is used because it avoids changing the signatures of the affected virtual methods. + int OutOfBandParameter; + + AbsoluteTime DriverCallStartTime; + IOPMPowerFlags CurrentCapabilityFlags; + unsigned long CurrentPowerConsumption; + IOPMPowerStateIndex TempClampPowerState; + OSArray * NotifyChildArray; + OSDictionary * PowerClients; + thread_call_t DriverCallEntry; + void * DriverCallParamPtr; + IOItemCount DriverCallParamCount; + IOItemCount DriverCallParamSlots; + uint32_t DriverCallReason; + uint32_t OutOfBandMessage; + uint32_t TempClampCount; + uint32_t OverrideMaxPowerState; + uint32_t DeviceUsablePowerState; + +// Protected by ActivityLock - BEGIN + IOPMPowerStateIndex ActivityTicklePowerState; + IOPMPowerStateIndex AdvisoryTicklePowerState; + uint32_t ActivityTickleCount; + uint32_t DeviceWasActive : 1; + uint32_t AdvisoryTickled : 1; +// Protected by ActivityLock - END + + uint32_t WaitReason; + uint32_t SavedMachineState; + +// Protected by PMLock - BEGIN + struct { + uint32_t PMStop : 1; + uint32_t PMDriverCallWait : 1; + } LockedFlags; + + queue_head_t PMDriverCallQueue; + OSSet * InsertInterestSet; + OSSet * RemoveInterestSet; + +// IOReporter Data + uint32_t ReportClientCnt; + void * ReportBuf; +// Protected by PMLock - END #if PM_VARS_SUPPORT - IOPMprot * PMVars; + IOPMprot * PMVars; #endif - IOPMActions PMActions; + IOPMActions PMActions; - // Serialize IOServicePM state for debug output. - IOReturn gatedSerialize( OSSerialize * s ) const; - virtual bool serialize( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; +// Serialize IOServicePM state for debug output. + IOReturn gatedSerialize( OSSerialize * s ) const; + virtual bool serialize( OSSerialize * s ) const APPLE_KEXT_OVERRIDE; - // PM log and trace - void pmPrint( uint32_t event, uintptr_t param1, uintptr_t param2 ) const; - void pmTrace( uint32_t event, uint32_t eventFunc, uintptr_t param1, uintptr_t param2 ) const; +// PM log and trace + void pmPrint( uint32_t event, uintptr_t param1, uintptr_t param2 ) const; + void pmTrace( uint32_t event, uint32_t eventFunc, uintptr_t param1, uintptr_t param2 ) const; }; #define fOwner pwrMgt->Owner @@ -452,19 +451,19 @@ private: #define fPMActions pwrMgt->PMActions #define StateOrder(state) (((state) < fNumberOfPowerStates) \ - ? pwrMgt->PowerStates[(state)].stateOrder \ - : (state)) -#define StateMax(a,b) (StateOrder((a)) < StateOrder((b)) ? (b) : (a)) -#define StateMin(a,b) (StateOrder((a)) < StateOrder((b)) ? (a) : (b)) + ? pwrMgt->PowerStates[(state)].stateOrder \ + : (state)) +#define StateMax(a, b) (StateOrder((a)) < StateOrder((b)) ? (b) : (a)) +#define StateMin(a, b) (StateOrder((a)) < StateOrder((b)) ? (a) : (b)) #define kPowerStateZero (0) /* -When an IOService is waiting for acknowledgement to a power change -notification from an interested driver or the controlling driver, -the ack timer is ticking every tenth of a second. -(100000000 nanoseconds are one tenth of a second). -*/ + * When an IOService is waiting for acknowledgement to a power change + * notification from an interested driver or the controlling driver, + * the ack timer is ticking every tenth of a second. + * (100000000 nanoseconds are one tenth of a second). + */ #define ACK_TIMER_PERIOD 100000000 #if defined(__i386__) || defined(__x86_64__) @@ -502,7 +501,7 @@ the ack timer is ticking every tenth of a second. #define kIOPMExpireIdleTimer 0x8000 // Accelerate idle timer expiration #define kIOPMRootBroadcastFlags (kIOPMSynchronize | \ - kIOPMRootChangeUp | kIOPMRootChangeDown) + kIOPMRootChangeUp | kIOPMRootChangeDown) // Activity tickle request flags #define kTickleTypePowerDrop 0x01 @@ -511,49 +510,49 @@ the ack timer is ticking every tenth of a second. #define kTickleTypeAdvisory 0x08 enum { - kDriverCallInformPreChange, - kDriverCallInformPostChange, - kDriverCallSetPowerState, - kRootDomainInformPreChange + kDriverCallInformPreChange, + kDriverCallInformPostChange, + kDriverCallSetPowerState, + kRootDomainInformPreChange }; struct DriverCallParam { - OSObject * Target; - IOReturn Result; + OSObject * Target; + IOReturn Result; }; // values of OutOfBandParameter enum { - kNotifyApps, - kNotifyPriority, - kNotifyCapabilityChangeApps, - kNotifyCapabilityChangePriority + kNotifyApps, + kNotifyPriority, + kNotifyCapabilityChangeApps, + kNotifyCapabilityChangePriority }; typedef bool (*IOPMMessageFilter)( - void * target, void * object, void * arg1, void * arg2, void * arg3 ); + void * target, void * object, void * arg1, void * arg2, void * arg3 ); // used for applyToInterested struct IOPMInterestContext { - OSArray * responseArray; - OSArray * notifyClients; - uint16_t serialNumber; - uint8_t isPreChange; - uint8_t enableTracing; - uint32_t maxTimeRequested; - uint32_t messageType; - uint32_t notifyType; - IOService * us; - IOPMPowerStateIndex stateNumber; - IOPMPowerFlags stateFlags; - IOPMPowerChangeFlags changeFlags; - const char * errorLog; - IOPMMessageFilter messageFilter; + OSArray * responseArray; + OSArray * notifyClients; + uint16_t serialNumber; + uint8_t isPreChange; + uint8_t enableTracing; + uint32_t maxTimeRequested; + uint32_t messageType; + uint32_t notifyType; + IOService * us; + IOPMPowerStateIndex stateNumber; + IOPMPowerFlags stateFlags; + IOPMPowerChangeFlags changeFlags; + const char * errorLog; + IOPMMessageFilter messageFilter; }; // assertPMDriverCall() options enum { - kIOPMADC_NoInactiveCheck = 1 + kIOPMADC_NoInactiveCheck = 1 }; //****************************************************************************** @@ -572,94 +571,108 @@ extern const OSSymbol *gIOPMStatsDriverPSChangeSlow; class IOPMRequest : public IOCommand { - OSDeclareDefaultStructors( IOPMRequest ) + OSDeclareDefaultStructors( IOPMRequest ) protected: - IOService * fTarget; // request target - IOPMRequest * fRequestNext; // the next request in the chain - IOPMRequest * fRequestRoot; // the root request in the call tree - IOItemCount fWorkWaitCount; // execution blocked if non-zero - IOItemCount fFreeWaitCount; // completion blocked if non-zero - uint32_t fRequestType; // request type - bool fIsQuiesceBlocker; - - IOPMCompletionAction fCompletionAction; - void * fCompletionTarget; - void * fCompletionParam; + IOService * fTarget; // request target + IOPMRequest * fRequestNext; // the next request in the chain + IOPMRequest * fRequestRoot; // the root request in the call tree + IOItemCount fWorkWaitCount;// execution blocked if non-zero + IOItemCount fFreeWaitCount;// completion blocked if non-zero + uint32_t fRequestType; // request type + bool fIsQuiesceBlocker; + + IOPMCompletionAction fCompletionAction; + void * fCompletionTarget; + void * fCompletionParam; public: - uint32_t fRequestTag; - void * fArg0; - void * fArg1; - void * fArg2; - - inline bool isWorkBlocked( void ) const - { - return (fWorkWaitCount != 0); - } - - inline bool isFreeBlocked( void ) const - { - return (fFreeWaitCount != 0); - } - - inline IOPMRequest * getNextRequest( void ) const - { - return fRequestNext; - } - - inline IOPMRequest * getRootRequest( void ) const - { - if (fRequestRoot) return fRequestRoot; + uint32_t fRequestTag; + void * fArg0; + void * fArg1; + void * fArg2; + + inline bool + isWorkBlocked( void ) const + { + return fWorkWaitCount != 0; + } + + inline bool + isFreeBlocked( void ) const + { + return fFreeWaitCount != 0; + } + + inline IOPMRequest * + getNextRequest( void ) const + { + return fRequestNext; + } + + inline IOPMRequest * + getRootRequest( void ) const + { + if (fRequestRoot) { + return fRequestRoot; + } #if NOT_READY - if (fCompletionAction) return (IOPMRequest *) this; + if (fCompletionAction) { + return (IOPMRequest *) this; + } #endif - return 0; - } - - inline uint32_t getType( void ) const - { - return fRequestType; - } - - inline bool isReplyType( void ) const - { - return (fRequestType > kIOPMRequestTypeReplyStart); - } - - inline IOService * getTarget( void ) const - { - return fTarget; - } - - inline bool isQuiesceBlocker( void ) const - { - return fIsQuiesceBlocker; - } - - inline bool isQuiesceType( void ) const - { - return ((kIOPMRequestTypeQuiescePowerTree == fRequestType) && - (fCompletionAction != 0) && (fCompletionTarget != 0)); - } - - inline void installCompletionAction( - void * target, - IOPMCompletionAction action, - void * param ) - { - fCompletionTarget = target; - fCompletionAction = action; - fCompletionParam = param; - } - - static IOPMRequest * create( void ); - bool init( IOService * owner, IOOptionBits type ); - void reset( void ); - bool attachNextRequest( IOPMRequest * next ); - bool detachNextRequest( void ); - bool attachRootRequest( IOPMRequest * root ); - bool detachRootRequest( void ); + return 0; + } + + inline uint32_t + getType( void ) const + { + return fRequestType; + } + + inline bool + isReplyType( void ) const + { + return fRequestType > kIOPMRequestTypeReplyStart; + } + + inline IOService * + getTarget( void ) const + { + return fTarget; + } + + inline bool + isQuiesceBlocker( void ) const + { + return fIsQuiesceBlocker; + } + + inline bool + isQuiesceType( void ) const + { + return (kIOPMRequestTypeQuiescePowerTree == fRequestType) && + (fCompletionAction != 0) && (fCompletionTarget != 0); + } + + inline void + installCompletionAction( + void * target, + IOPMCompletionAction action, + void * param ) + { + fCompletionTarget = target; + fCompletionAction = action; + fCompletionParam = param; + } + + static IOPMRequest * create( void ); + bool init( IOService * owner, IOOptionBits type ); + void reset( void ); + bool attachNextRequest( IOPMRequest * next ); + bool detachNextRequest( void ); + bool attachRootRequest( IOPMRequest * root ); + bool detachRootRequest( void ); }; //****************************************************************************** @@ -668,25 +681,25 @@ public: class IOPMRequestQueue : public IOEventSource { - OSDeclareDefaultStructors( IOPMRequestQueue ) + OSDeclareDefaultStructors( IOPMRequestQueue ) public: - typedef bool (*Action)( IOService *, IOPMRequest *, IOPMRequestQueue * ); + typedef bool (*Action)( IOService *, IOPMRequest *, IOPMRequestQueue * ); protected: - queue_head_t fQueue; - IOLock * fLock; + queue_head_t fQueue; + IOLock * fLock; - enum { kMaxDequeueCount = 256 }; + enum { kMaxDequeueCount = 256 }; - virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; - virtual void free( void ) APPLE_KEXT_OVERRIDE; - virtual bool init( IOService * inOwner, Action inAction ); + virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; + virtual void free( void ) APPLE_KEXT_OVERRIDE; + virtual bool init( IOService * inOwner, Action inAction ); public: - static IOPMRequestQueue * create( IOService * inOwner, Action inAction ); - void queuePMRequest( IOPMRequest * request ); - void queuePMRequestChain( IOPMRequest ** requests, IOItemCount count ); + static IOPMRequestQueue * create( IOService * inOwner, Action inAction ); + void queuePMRequest( IOPMRequest * request ); + void queuePMRequestChain( IOPMRequest ** requests, IOItemCount count ); }; //****************************************************************************** @@ -697,40 +710,40 @@ public: class IOPMWorkQueue : public IOEventSource { - OSDeclareDefaultStructors( IOPMWorkQueue ) + OSDeclareDefaultStructors( IOPMWorkQueue ) public: - typedef bool (*Action)( IOService *, IOPMRequest *, IOPMWorkQueue * ); + typedef bool (*Action)( IOService *, IOPMRequest *, IOPMWorkQueue * ); #if WORK_QUEUE_STATS - uint64_t fStatCheckForWork; - uint64_t fStatScanEntries; - uint64_t fStatQueueEmpty; - uint64_t fStatNoWorkDone; + uint64_t fStatCheckForWork; + uint64_t fStatScanEntries; + uint64_t fStatQueueEmpty; + uint64_t fStatNoWorkDone; #endif protected: - queue_head_t fWorkQueue; - Action fInvokeAction; - Action fRetireAction; - uint32_t fQueueLength; - uint32_t fConsumerCount; - volatile uint32_t fProducerCount; - IOPMRequest * fQuiesceRequest; - AbsoluteTime fQuiesceStartTime; - AbsoluteTime fQuiesceFinishTime; - - virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; - virtual bool init( IOService * inOwner, Action invoke, Action retire ); - bool checkRequestQueue( queue_head_t * queue, bool * empty ); + queue_head_t fWorkQueue; + Action fInvokeAction; + Action fRetireAction; + uint32_t fQueueLength; + uint32_t fConsumerCount; + volatile uint32_t fProducerCount; + IOPMRequest * fQuiesceRequest; + AbsoluteTime fQuiesceStartTime; + AbsoluteTime fQuiesceFinishTime; + + virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; + virtual bool init( IOService * inOwner, Action invoke, Action retire ); + bool checkRequestQueue( queue_head_t * queue, bool * empty ); public: - static IOPMWorkQueue * create( IOService * inOwner, Action invoke, Action retire ); - bool queuePMRequest( IOPMRequest * request, IOServicePM * pwrMgt ); - void signalWorkAvailable( void ); - void incrementProducerCount( void ); - void attachQuiesceRequest( IOPMRequest * quiesceRequest ); - void finishQuiesceRequest( IOPMRequest * quiesceRequest ); + static IOPMWorkQueue * create( IOService * inOwner, Action invoke, Action retire ); + bool queuePMRequest( IOPMRequest * request, IOServicePM * pwrMgt ); + void signalWorkAvailable( void ); + void incrementProducerCount( void ); + void attachQuiesceRequest( IOPMRequest * quiesceRequest ); + void finishQuiesceRequest( IOPMRequest * quiesceRequest ); }; //****************************************************************************** @@ -739,20 +752,20 @@ public: class IOPMCompletionQueue : public IOEventSource { - OSDeclareDefaultStructors( IOPMCompletionQueue ) + OSDeclareDefaultStructors( IOPMCompletionQueue ) public: - typedef bool (*Action)( IOService *, IOPMRequest *, IOPMCompletionQueue * ); + typedef bool (*Action)( IOService *, IOPMRequest *, IOPMCompletionQueue * ); protected: - queue_head_t fQueue; + queue_head_t fQueue; - virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; - virtual bool init( IOService * inOwner, Action inAction ); + virtual bool checkForWork( void ) APPLE_KEXT_OVERRIDE; + virtual bool init( IOService * inOwner, Action inAction ); public: - static IOPMCompletionQueue * create( IOService * inOwner, Action inAction ); - bool queuePMRequest( IOPMRequest * request ); + static IOPMCompletionQueue * create( IOService * inOwner, Action inAction ); + bool queuePMRequest( IOPMRequest * request ); }; #endif /* !_IOKIT_IOSERVICEPMPRIVATE_H */ diff --git a/iokit/Kernel/IOServicePrivate.h b/iokit/Kernel/IOServicePrivate.h index 5b420452e..91bc2a1d1 100644 --- a/iokit/Kernel/IOServicePrivate.h +++ b/iokit/Kernel/IOServicePrivate.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -38,191 +38,189 @@ // options for getExistingServices() enum { - kIONotifyOnce = 0x00000001, - kIOServiceExistingSet = 0x00000002, - kIOServiceChangesOK = 0x00000004, - kIOServiceInternalDone = 0x00000008, - kIOServiceClassDone = 0x00000010, + kIONotifyOnce = 0x00000001, + kIOServiceExistingSet = 0x00000002, + kIOServiceChangesOK = 0x00000004, + kIOServiceInternalDone = 0x00000008, + kIOServiceClassDone = 0x00000010, }; // masks for __state[1] enum { - kIOServiceBusyStateMask = 0x000003ff, - kIOServiceBusyMax = 1023, - kIOServiceNeedConfigState = 0x80000000, - kIOServiceSynchronousState = 0x40000000, - kIOServiceModuleStallState = 0x20000000, - kIOServiceBusyWaiterState = 0x10000000, - - kIOServiceSyncPubState = 0x08000000, - kIOServiceConfigState = 0x04000000, - kIOServiceStartState = 0x02000000, - kIOServiceTermPhase2State = 0x01000000, - kIOServiceTermPhase3State = 0x00800000, - kIOServiceTermPhase1State = 0x00400000, - kIOServiceTerm1WaiterState = 0x00200000, - kIOServiceRecursing = 0x00100000, - kIOServiceNeedWillTerminate = 0x00080000, - kIOServiceWaitDetachState = 0x00040000, - kIOServiceConfigRunning = 0x00020000, - kIOServiceFinalized = 0x00010000, + kIOServiceBusyStateMask = 0x000003ff, + kIOServiceBusyMax = 1023, + kIOServiceNeedConfigState = 0x80000000, + kIOServiceSynchronousState = 0x40000000, + kIOServiceModuleStallState = 0x20000000, + kIOServiceBusyWaiterState = 0x10000000, + + kIOServiceSyncPubState = 0x08000000, + kIOServiceConfigState = 0x04000000, + kIOServiceStartState = 0x02000000, + kIOServiceTermPhase2State = 0x01000000, + kIOServiceTermPhase3State = 0x00800000, + kIOServiceTermPhase1State = 0x00400000, + kIOServiceTerm1WaiterState = 0x00200000, + kIOServiceRecursing = 0x00100000, + kIOServiceNeedWillTerminate = 0x00080000, + kIOServiceWaitDetachState = 0x00040000, + kIOServiceConfigRunning = 0x00020000, + kIOServiceFinalized = 0x00010000, }; // notify state enum { - kIOServiceNotifyEnable = 0x00000001, - kIOServiceNotifyWaiter = 0x00000002, - kIOServiceNotifyBlock = 0x00000004 + kIOServiceNotifyEnable = 0x00000001, + kIOServiceNotifyWaiter = 0x00000002, + kIOServiceNotifyBlock = 0x00000004 }; -struct _IOServiceNotifierInvocation -{ - IOThread thread; - queue_chain_t link; +struct _IOServiceNotifierInvocation { + IOThread thread; + queue_chain_t link; }; class _IOServiceNotifier : public IONotifier { - friend class IOService; + friend class IOService; - OSDeclareDefaultStructors(_IOServiceNotifier) + OSDeclareDefaultStructors(_IOServiceNotifier) public: - OSOrderedSet * whence; - - OSDictionary * matching; - const OSSymbol * type; - IOServiceMatchingNotificationHandler handler; - IOServiceNotificationHandler compatHandler; - void * target; - void * ref; - SInt32 priority; - queue_head_t handlerInvocations; - IOOptionBits state; - - virtual void free() APPLE_KEXT_OVERRIDE; - virtual void remove() APPLE_KEXT_OVERRIDE; - virtual bool disable() APPLE_KEXT_OVERRIDE; - virtual void enable( bool was ) APPLE_KEXT_OVERRIDE; - virtual void wait(); + OSOrderedSet * whence; + + OSDictionary * matching; + const OSSymbol * type; + IOServiceMatchingNotificationHandler handler; + IOServiceNotificationHandler compatHandler; + void * target; + void * ref; + SInt32 priority; + queue_head_t handlerInvocations; + IOOptionBits state; + + virtual void free() APPLE_KEXT_OVERRIDE; + virtual void remove() APPLE_KEXT_OVERRIDE; + virtual bool disable() APPLE_KEXT_OVERRIDE; + virtual void enable( bool was ) APPLE_KEXT_OVERRIDE; + virtual void wait(); }; class _IOServiceInterestNotifier : public IONotifier { - friend class IOService; + friend class IOService; - OSDeclareDefaultStructors(_IOServiceInterestNotifier) + OSDeclareDefaultStructors(_IOServiceInterestNotifier) public: - queue_chain_t chain; - - IOServiceInterestHandler handler; - void * target; - void * ref; - queue_head_t handlerInvocations; - IOOptionBits state; - - virtual void free() APPLE_KEXT_OVERRIDE; - virtual void remove() APPLE_KEXT_OVERRIDE; - virtual bool disable() APPLE_KEXT_OVERRIDE; - virtual void enable( bool was ) APPLE_KEXT_OVERRIDE; - virtual void wait(); - virtual bool init() APPLE_KEXT_OVERRIDE; + queue_chain_t chain; + + IOServiceInterestHandler handler; + void * target; + void * ref; + queue_head_t handlerInvocations; + IOOptionBits state; + + virtual void free() APPLE_KEXT_OVERRIDE; + virtual void remove() APPLE_KEXT_OVERRIDE; + virtual bool disable() APPLE_KEXT_OVERRIDE; + virtual void enable( bool was ) APPLE_KEXT_OVERRIDE; + virtual void wait(); + virtual bool init() APPLE_KEXT_OVERRIDE; }; class _IOServiceNullNotifier : public IONotifier { - OSDeclareDefaultStructors(_IOServiceNullNotifier) + OSDeclareDefaultStructors(_IOServiceNullNotifier) public: - virtual void taggedRetain(const void *tag) const APPLE_KEXT_OVERRIDE; - virtual void taggedRelease(const void *tag, const int when) const APPLE_KEXT_OVERRIDE; - virtual void free() APPLE_KEXT_OVERRIDE; - virtual void remove() APPLE_KEXT_OVERRIDE; - virtual bool disable() APPLE_KEXT_OVERRIDE; - virtual void enable( bool was ) APPLE_KEXT_OVERRIDE; - virtual void wait(); + virtual void taggedRetain(const void *tag) const APPLE_KEXT_OVERRIDE; + virtual void taggedRelease(const void *tag, const int when) const APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; + virtual void remove() APPLE_KEXT_OVERRIDE; + virtual bool disable() APPLE_KEXT_OVERRIDE; + virtual void enable( bool was ) APPLE_KEXT_OVERRIDE; + virtual void wait(); }; class _IOConfigThread : public OSObject { - friend class IOService; + friend class IOService; - OSDeclareDefaultStructors(_IOConfigThread) + OSDeclareDefaultStructors(_IOConfigThread) public: - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - static void configThread( void ); - static void main( void * arg, wait_result_t result ); + static void configThread( void ); + static void main( void * arg, wait_result_t result ); }; enum { - kMaxConfigThreads = CONFIG_MAX_THREADS, + kMaxConfigThreads = CONFIG_MAX_THREADS, }; enum { - kMatchNubJob = 10, + kMatchNubJob = 10, }; class _IOServiceJob : public OSObject { - friend class IOService; + friend class IOService; - OSDeclareDefaultStructors(_IOServiceJob) + OSDeclareDefaultStructors(_IOServiceJob) public: - int type; - IOService * nub; - IOOptionBits options; - - static _IOServiceJob * startJob( IOService * nub, int type, - IOOptionBits options = 0 ); - static void pingConfig( class _IOServiceJob * job ); + int type; + IOService * nub; + IOOptionBits options; + static _IOServiceJob * startJob( IOService * nub, int type, + IOOptionBits options = 0 ); + static void pingConfig( class _IOServiceJob * job ); }; class IOResources : public IOService { - friend class IOService; + friend class IOService; - OSDeclareDefaultStructors(IOResources) + OSDeclareDefaultStructors(IOResources) public: - static IOService * resources( void ); - virtual bool init( OSDictionary * dictionary = 0 ) APPLE_KEXT_OVERRIDE; - virtual IOReturn newUserClient(task_t owningTask, void * securityID, - UInt32 type, OSDictionary * properties, - IOUserClient ** handler) APPLE_KEXT_OVERRIDE; - virtual IOWorkLoop * getWorkLoop( ) const APPLE_KEXT_OVERRIDE; - virtual bool matchPropertyTable( OSDictionary * table ) APPLE_KEXT_OVERRIDE; - virtual IOReturn setProperties( OSObject * properties ) APPLE_KEXT_OVERRIDE; + static IOService * resources( void ); + virtual bool init( OSDictionary * dictionary = 0 ) APPLE_KEXT_OVERRIDE; + virtual IOReturn newUserClient(task_t owningTask, void * securityID, + UInt32 type, OSDictionary * properties, + IOUserClient ** handler) APPLE_KEXT_OVERRIDE; + virtual IOWorkLoop * getWorkLoop() const APPLE_KEXT_OVERRIDE; + virtual bool matchPropertyTable( OSDictionary * table ) APPLE_KEXT_OVERRIDE; + virtual IOReturn setProperties( OSObject * properties ) APPLE_KEXT_OVERRIDE; }; class _IOOpenServiceIterator : public OSIterator { - friend class IOService; + friend class IOService; - OSDeclareDefaultStructors(_IOOpenServiceIterator) + OSDeclareDefaultStructors(_IOOpenServiceIterator) - OSIterator * iter; - const IOService * client; - const IOService * provider; - IOService * last; + OSIterator * iter; + const IOService * client; + const IOService * provider; + IOService * last; public: - static OSIterator * iterator( OSIterator * _iter, - const IOService * client, - const IOService * provider ); - virtual void free() APPLE_KEXT_OVERRIDE; - virtual void reset() APPLE_KEXT_OVERRIDE; - virtual bool isValid() APPLE_KEXT_OVERRIDE; - virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; + static OSIterator * iterator(LIBKERN_CONSUMED OSIterator * _iter, + const IOService * client, + const IOService * provider ); + virtual void free() APPLE_KEXT_OVERRIDE; + virtual void reset() APPLE_KEXT_OVERRIDE; + virtual bool isValid() APPLE_KEXT_OVERRIDE; + virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; }; extern const OSSymbol * gIOConsoleUsersKey; extern const OSSymbol * gIOConsoleSessionUIDKey; -extern const OSSymbol * gIOConsoleSessionAuditIDKey; +extern const OSSymbol * gIOConsoleSessionAuditIDKey; extern const OSSymbol * gIOConsoleSessionOnConsoleKey; extern const OSSymbol * gIOConsoleSessionSecureInputPIDKey; @@ -234,4 +232,3 @@ extern const OSSymbol * gIOConsoleSessionSecureInputPIDKey; (sizeof(IOInterruptSourcePrivate) + sizeof(IOInterruptSource)) #endif /* ! _IOKIT_IOSERVICEPRIVATE_H */ - diff --git a/iokit/Kernel/IOSharedDataQueue.cpp b/iokit/Kernel/IOSharedDataQueue.cpp index 385393f65..797583bf8 100644 --- a/iokit/Kernel/IOSharedDataQueue.cpp +++ b/iokit/Kernel/IOSharedDataQueue.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,245 +45,240 @@ OSDefineMetaClassAndStructors(IOSharedDataQueue, IODataQueue) IOSharedDataQueue *IOSharedDataQueue::withCapacity(UInt32 size) { - IOSharedDataQueue *dataQueue = new IOSharedDataQueue; + IOSharedDataQueue *dataQueue = new IOSharedDataQueue; - if (dataQueue) { - if (!dataQueue->initWithCapacity(size)) { - dataQueue->release(); - dataQueue = 0; - } - } + if (dataQueue) { + if (!dataQueue->initWithCapacity(size)) { + dataQueue->release(); + dataQueue = 0; + } + } - return dataQueue; + return dataQueue; } -IOSharedDataQueue *IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) +IOSharedDataQueue * +IOSharedDataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) { - IOSharedDataQueue *dataQueue = new IOSharedDataQueue; + IOSharedDataQueue *dataQueue = new IOSharedDataQueue; - if (dataQueue) { - if (!dataQueue->initWithEntries(numEntries, entrySize)) { - dataQueue->release(); - dataQueue = 0; - } - } + if (dataQueue) { + if (!dataQueue->initWithEntries(numEntries, entrySize)) { + dataQueue->release(); + dataQueue = 0; + } + } - return dataQueue; + return dataQueue; } -Boolean IOSharedDataQueue::initWithCapacity(UInt32 size) +Boolean +IOSharedDataQueue::initWithCapacity(UInt32 size) { - IODataQueueAppendix * appendix; - vm_size_t allocSize; - - if (!super::init()) { - return false; - } - - _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData)); - if (!_reserved) { - return false; - } - - if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) { - return false; - } - - allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE); - - if (allocSize < size) { - return false; - } - - dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE); - if (dataQueue == 0) { - return false; - } - bzero(dataQueue, allocSize); - - dataQueue->queueSize = size; + IODataQueueAppendix * appendix; + vm_size_t allocSize; + + if (!super::init()) { + return false; + } + + _reserved = (ExpansionData *)IOMalloc(sizeof(struct ExpansionData)); + if (!_reserved) { + return false; + } + + if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE - DATA_QUEUE_MEMORY_APPENDIX_SIZE) { + return false; + } + + allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE); + + if (allocSize < size) { + return false; + } + + dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE); + if (dataQueue == 0) { + return false; + } + bzero(dataQueue, allocSize); + + dataQueue->queueSize = size; // dataQueue->head = 0; // dataQueue->tail = 0; - if (!setQueueSize(size)) { - return false; - } - - appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE); - appendix->version = 0; + if (!setQueueSize(size)) { + return false; + } + + appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE); + appendix->version = 0; - if (!notifyMsg) { - notifyMsg = IOMalloc(sizeof(mach_msg_header_t)); - if (!notifyMsg) - return false; - } - bzero(notifyMsg, sizeof(mach_msg_header_t)); + if (!notifyMsg) { + notifyMsg = IOMalloc(sizeof(mach_msg_header_t)); + if (!notifyMsg) { + return false; + } + } + bzero(notifyMsg, sizeof(mach_msg_header_t)); - setNotificationPort(MACH_PORT_NULL); + setNotificationPort(MACH_PORT_NULL); - return true; + return true; } -void IOSharedDataQueue::free() +void +IOSharedDataQueue::free() { - if (dataQueue) { - IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE)); - dataQueue = NULL; - if (notifyMsg) { - IOFree(notifyMsg, sizeof(mach_msg_header_t)); - notifyMsg = NULL; - } - } - - if (_reserved) { - IOFree (_reserved, sizeof(struct ExpansionData)); - _reserved = NULL; - } - - super::free(); + if (dataQueue) { + IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE)); + dataQueue = NULL; + if (notifyMsg) { + IOFree(notifyMsg, sizeof(mach_msg_header_t)); + notifyMsg = NULL; + } + } + + if (_reserved) { + IOFree(_reserved, sizeof(struct ExpansionData)); + _reserved = NULL; + } + + super::free(); } -IOMemoryDescriptor *IOSharedDataQueue::getMemoryDescriptor() +IOMemoryDescriptor * +IOSharedDataQueue::getMemoryDescriptor() { - IOMemoryDescriptor *descriptor = 0; + IOMemoryDescriptor *descriptor = 0; - if (dataQueue != 0) { - descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn); - } + if (dataQueue != 0) { + descriptor = IOMemoryDescriptor::withAddress(dataQueue, getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE, kIODirectionOutIn); + } - return descriptor; + return descriptor; } -IODataQueueEntry * IOSharedDataQueue::peek() +IODataQueueEntry * +IOSharedDataQueue::peek() { - IODataQueueEntry *entry = 0; - UInt32 headOffset; - UInt32 tailOffset; - - if (!dataQueue) { - return NULL; - } - - // Read head and tail with acquire barrier - // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers - headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); - tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); - - if (headOffset != tailOffset) { - IODataQueueEntry * head = 0; - UInt32 headSize = 0; - UInt32 headOffset = dataQueue->head; - UInt32 queueSize = getQueueSize(); - - if (headOffset >= queueSize) { - return NULL; - } - - head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); - headSize = head->size; - - // Check if there's enough room before the end of the queue for a header. - // If there is room, check if there's enough room to hold the header and - // the data. - - if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || - (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || - (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || - (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { - // No room for the header or the data, wrap to the beginning of the queue. - // Note: wrapping even with the UINT32_MAX checks, as we have to support - // queueSize of UINT32_MAX - entry = dataQueue->queue; - } else { - entry = head; - } - } - - return entry; + IODataQueueEntry *entry = 0; + UInt32 headOffset; + UInt32 tailOffset; + + if (!dataQueue) { + return NULL; + } + + // Read head and tail with acquire barrier + // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers + headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); + tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); + + if (headOffset != tailOffset) { + volatile IODataQueueEntry * head = 0; + UInt32 headSize = 0; + UInt32 headOffset = dataQueue->head; + UInt32 queueSize = getQueueSize(); + + if (headOffset >= queueSize) { + return NULL; + } + + head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); + headSize = head->size; + + // Check if there's enough room before the end of the queue for a header. + // If there is room, check if there's enough room to hold the header and + // the data. + + if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || + (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || + (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || + (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { + // No room for the header or the data, wrap to the beginning of the queue. + // Note: wrapping even with the UINT32_MAX checks, as we have to support + // queueSize of UINT32_MAX + entry = dataQueue->queue; + } else { + entry = (IODataQueueEntry *)head; + } + } + + return entry; } -Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize) +Boolean +IOSharedDataQueue::enqueue(void * data, UInt32 dataSize) { - UInt32 head; - UInt32 tail; - UInt32 newTail; - const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; - IODataQueueEntry * entry; - - // Force a single read of head and tail - // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers - tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); - head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); - - // Check for overflow of entrySize - if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) { - return false; - } - // Check for underflow of (getQueueSize() - tail) - if (getQueueSize() < tail || getQueueSize() < head) { - return false; - } - - if ( tail >= head ) - { - // Is there enough room at the end for the entry? - if ((entrySize <= UINT32_MAX - tail) && - ((tail + entrySize) <= getQueueSize()) ) - { - entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); - - entry->size = dataSize; - memcpy(&entry->data, data, dataSize); - - // The tail can be out of bound when the size of the new entry - // exactly matches the available space at the end of the queue. - // The tail can range from 0 to dataQueue->queueSize inclusive. - - newTail = tail + entrySize; - } - else if ( head > entrySize ) // Is there enough room at the beginning? - { - // Wrap around to the beginning, but do not allow the tail to catch - // up to the head. - - dataQueue->queue->size = dataSize; - - // We need to make sure that there is enough room to set the size before - // doing this. The user client checks for this and will look for the size - // at the beginning if there isn't room for it at the end. - - if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE ) - { - ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; - } - - memcpy(&dataQueue->queue->data, data, dataSize); - newTail = entrySize; - } - else - { - return false; // queue is full - } - } - else - { - // Do not allow the tail to catch up to the head when the queue is full. - // That's why the comparison uses a '>' rather than '>='. - - if ( (head - tail) > entrySize ) - { - entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); - - entry->size = dataSize; - memcpy(&entry->data, data, dataSize); - newTail = tail + entrySize; - } - else - { - return false; // queue is full - } - } + UInt32 head; + UInt32 tail; + UInt32 newTail; + const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; + IODataQueueEntry * entry; + + // Force a single read of head and tail + // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers + tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); + head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); + + // Check for overflow of entrySize + if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) { + return false; + } + // Check for underflow of (getQueueSize() - tail) + if (getQueueSize() < tail || getQueueSize() < head) { + return false; + } + + if (tail >= head) { + // Is there enough room at the end for the entry? + if ((entrySize <= UINT32_MAX - tail) && + ((tail + entrySize) <= getQueueSize())) { + entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); + + entry->size = dataSize; + memcpy(&entry->data, data, dataSize); + + // The tail can be out of bound when the size of the new entry + // exactly matches the available space at the end of the queue. + // The tail can range from 0 to dataQueue->queueSize inclusive. + + newTail = tail + entrySize; + } else if (head > entrySize) { // Is there enough room at the beginning? + // Wrap around to the beginning, but do not allow the tail to catch + // up to the head. + + dataQueue->queue->size = dataSize; + + // We need to make sure that there is enough room to set the size before + // doing this. The user client checks for this and will look for the size + // at the beginning if there isn't room for it at the end. + + if ((getQueueSize() - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) { + ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; + } + + memcpy(&dataQueue->queue->data, data, dataSize); + newTail = entrySize; + } else { + return false; // queue is full + } + } else { + // Do not allow the tail to catch up to the head when the queue is full. + // That's why the comparison uses a '>' rather than '>='. + + if ((head - tail) > entrySize) { + entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); + + entry->size = dataSize; + memcpy(&entry->data, data, dataSize); + newTail = tail + entrySize; + } else { + return false; // queue is full + } + } // Publish the data we just enqueued __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); @@ -309,63 +304,64 @@ Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize) return true; } -Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) +Boolean +IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) { - Boolean retVal = TRUE; - IODataQueueEntry * entry = 0; - UInt32 entrySize = 0; - UInt32 headOffset = 0; - UInt32 tailOffset = 0; - UInt32 newHeadOffset = 0; + Boolean retVal = TRUE; + volatile IODataQueueEntry * entry = 0; + UInt32 entrySize = 0; + UInt32 headOffset = 0; + UInt32 tailOffset = 0; + UInt32 newHeadOffset = 0; if (!dataQueue || (data && !dataSize)) { - return false; - } - - // Read head and tail with acquire barrier - // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers - headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); - tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); - - if (headOffset != tailOffset) { - IODataQueueEntry * head = 0; - UInt32 headSize = 0; - UInt32 queueSize = getQueueSize(); - - if (headOffset > queueSize) { - return false; - } - - head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); - headSize = head->size; - - // we wrapped around to beginning, so read from there - // either there was not even room for the header - if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || - (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || - // or there was room for the header, but not for the data - (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || - (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { - // Note: we have to wrap to the beginning even with the UINT32_MAX checks - // because we have to support a queueSize of UINT32_MAX. - entry = dataQueue->queue; - entrySize = entry->size; - if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || - (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { - return false; - } - newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; - // else it is at the end - } else { - entry = head; - entrySize = entry->size; - if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || - (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) || - (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) { - return false; - } - newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; - } + return false; + } + + // Read head and tail with acquire barrier + // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers + headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); + tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE); + + if (headOffset != tailOffset) { + volatile IODataQueueEntry * head = 0; + UInt32 headSize = 0; + UInt32 queueSize = getQueueSize(); + + if (headOffset > queueSize) { + return false; + } + + head = (IODataQueueEntry *)((char *)dataQueue->queue + headOffset); + headSize = head->size; + + // we wrapped around to beginning, so read from there + // either there was not even room for the header + if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || + (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || + // or there was room for the header, but not for the data + (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || + (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { + // Note: we have to wrap to the beginning even with the UINT32_MAX checks + // because we have to support a queueSize of UINT32_MAX. + entry = dataQueue->queue; + entrySize = entry->size; + if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || + (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { + return false; + } + newHeadOffset = entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; + // else it is at the end + } else { + entry = head; + entrySize = entry->size; + if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || + (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) || + (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) { + return false; + } + newHeadOffset = headOffset + entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE; + } } else { // empty queue return false; @@ -376,7 +372,7 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) // not enough space return false; } - memcpy(data, &(entry->data), entrySize); + memcpy(data, (void *)entry->data, entrySize); *dataSize = entrySize; } @@ -390,25 +386,27 @@ Boolean IOSharedDataQueue::dequeue(void *data, UInt32 *dataSize) // __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); } - - return retVal; + + return retVal; } -UInt32 IOSharedDataQueue::getQueueSize() +UInt32 +IOSharedDataQueue::getQueueSize() { - if (!_reserved) { - return 0; - } - return _reserved->queueSize; + if (!_reserved) { + return 0; + } + return _reserved->queueSize; } -Boolean IOSharedDataQueue::setQueueSize(UInt32 size) +Boolean +IOSharedDataQueue::setQueueSize(UInt32 size) { - if (!_reserved) { - return false; - } - _reserved->queueSize = size; - return true; + if (!_reserved) { + return false; + } + _reserved->queueSize = size; + return true; } OSMetaClassDefineReservedUnused(IOSharedDataQueue, 0); diff --git a/iokit/Kernel/IOSimpleReporter.cpp b/iokit/Kernel/IOSimpleReporter.cpp index de430f566..7807763a9 100644 --- a/iokit/Kernel/IOSimpleReporter.cpp +++ b/iokit/Kernel/IOSimpleReporter.cpp @@ -1,8 +1,8 @@ /* * Copyright (c) 2012-2013 Apple Computer, Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,126 +36,127 @@ OSDefineMetaClassAndStructors(IOSimpleReporter, IOReporter); /* static */ IOSimpleReporter* IOSimpleReporter::with(IOService *reportingService, - IOReportCategories categories, - IOReportUnit unit) + IOReportCategories categories, + IOReportUnit unit) { - IOSimpleReporter *reporter, *rval = NULL; + IOSimpleReporter *reporter, *rval = NULL; - // kprintf("%s\n", __func__); // can't IORLOG() from static + // kprintf("%s\n", __func__); // can't IORLOG() from static - reporter = new IOSimpleReporter; - if (!reporter) goto finish; + reporter = new IOSimpleReporter; + if (!reporter) { + goto finish; + } - if (!reporter->initWith(reportingService, categories, unit)) { - goto finish; - } - - // success - rval = reporter; - + if (!reporter->initWith(reportingService, categories, unit)) { + goto finish; + } + + // success + rval = reporter; + finish: - if (!rval) { - OSSafeReleaseNULL(reporter); - } - - return rval; + if (!rval) { + OSSafeReleaseNULL(reporter); + } + + return rval; } bool IOSimpleReporter::initWith(IOService *reportingService, - IOReportCategories categories, - IOReportUnit unit) + IOReportCategories categories, + IOReportUnit unit) { - // fully specify the channel type for the superclass - IOReportChannelType channelType = { - .categories = categories, - .report_format = kIOReportFormatSimple, - .nelements = 1, - .element_idx = 0 - }; - - return super::init(reportingService, channelType, unit); + // fully specify the channel type for the superclass + IOReportChannelType channelType = { + .categories = categories, + .report_format = kIOReportFormatSimple, + .nelements = 1, + .element_idx = 0 + }; + + return super::init(reportingService, channelType, unit); } IOReturn IOSimpleReporter::setValue(uint64_t channel_id, - int64_t value) + int64_t value) { - IOReturn res = kIOReturnError; - IOSimpleReportValues simple_values; - int element_index = 0; - - lockReporter(); - - if (getFirstElementIndex(channel_id, &element_index) != kIOReturnSuccess) { - res = kIOReturnBadArgument; - goto finish; - } - - - if (copyElementValues(element_index, (IOReportElementValues *)&simple_values) != kIOReturnSuccess) { - res = kIOReturnBadArgument; - goto finish; - } - - simple_values.simple_value = value; - res = setElementValues(element_index, (IOReportElementValues *)&simple_values); - + IOReturn res = kIOReturnError; + IOSimpleReportValues simple_values; + int element_index = 0; + + lockReporter(); + + if (getFirstElementIndex(channel_id, &element_index) != kIOReturnSuccess) { + res = kIOReturnBadArgument; + goto finish; + } + + + if (copyElementValues(element_index, (IOReportElementValues *)&simple_values) != kIOReturnSuccess) { + res = kIOReturnBadArgument; + goto finish; + } + + simple_values.simple_value = value; + res = setElementValues(element_index, (IOReportElementValues *)&simple_values); + finish: - unlockReporter(); - return res; + unlockReporter(); + return res; } IOReturn IOSimpleReporter::incrementValue(uint64_t channel_id, - int64_t increment) + int64_t increment) { - IOReturn res = kIOReturnError; - IOSimpleReportValues simple_values; - int element_index = 0; - - lockReporter(); - - if (getFirstElementIndex(channel_id, &element_index) != kIOReturnSuccess) { - res = kIOReturnBadArgument; - goto finish; - } - - if (copyElementValues(element_index, (IOReportElementValues *)&simple_values) != kIOReturnSuccess){ - res = kIOReturnBadArgument; - goto finish; - } - - simple_values.simple_value += increment; - - res = setElementValues(element_index, (IOReportElementValues *)&simple_values); - + IOReturn res = kIOReturnError; + IOSimpleReportValues simple_values; + int element_index = 0; + + lockReporter(); + + if (getFirstElementIndex(channel_id, &element_index) != kIOReturnSuccess) { + res = kIOReturnBadArgument; + goto finish; + } + + if (copyElementValues(element_index, (IOReportElementValues *)&simple_values) != kIOReturnSuccess) { + res = kIOReturnBadArgument; + goto finish; + } + + simple_values.simple_value += increment; + + res = setElementValues(element_index, (IOReportElementValues *)&simple_values); + finish: - unlockReporter(); - return res; + unlockReporter(); + return res; } int64_t IOSimpleReporter::getValue(uint64_t channel_id) { - IOSimpleReportValues *values = NULL; - int64_t simple_value = (int64_t)kIOReportInvalidValue; - int index = 0; - - lockReporter(); - - if (getFirstElementIndex(channel_id, &index) == kIOReturnSuccess) { - - values = (IOSimpleReportValues *)getElementValues(index); - - if (values != NULL) - simple_value = values->simple_value; - } - - unlockReporter(); - return simple_value; -} + IOSimpleReportValues *values = NULL; + int64_t simple_value = (int64_t)kIOReportInvalidValue; + int index = 0; + + lockReporter(); + + if (getFirstElementIndex(channel_id, &index) == kIOReturnSuccess) { + values = (IOSimpleReportValues *)getElementValues(index); + if (values != NULL) { + simple_value = values->simple_value; + } + } + + unlockReporter(); + return simple_value; +} diff --git a/iokit/Kernel/IOStartIOKit.cpp b/iokit/Kernel/IOStartIOKit.cpp index 6eb48b713..196e91086 100644 --- a/iokit/Kernel/IOStartIOKit.cpp +++ b/iokit/Kernel/IOStartIOKit.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,21 +52,21 @@ const OSSymbol * gIOProgressBackbufferKey; OSSet * gIORemoveOnReadProperties; extern "C" { - void StartIOKit( void * p1, void * p2, void * p3, void * p4 ); void IORegistrySetOSBuildVersion(char * build_version); void IORecordProgressBackbuffer(void * buffer, size_t size, uint32_t theme); -extern void OSlibkernInit (void); +extern void OSlibkernInit(void); void iokit_post_constructor_init(void); #include #include -void IOKitInitializeTime( void ) +void +IOKitInitializeTime( void ) { - mach_timespec_t t; + mach_timespec_t t; t.tv_sec = 30; t.tv_nsec = 0; @@ -80,36 +80,37 @@ void IOKitInitializeTime( void ) clock_initialize_calendar(); } -void iokit_post_constructor_init(void) +void +iokit_post_constructor_init(void) { - IORegistryEntry * root; - OSObject * obj; - - IOCPUInitialize(); - root = IORegistryEntry::initialize(); - assert( root ); - IOService::initialize(); - IOCatalogue::initialize(); - IOStatistics::initialize(); - OSKext::initialize(); - IOUserClient::initialize(); - IOMemoryDescriptor::initialize(); - IORootParent::initialize(); - - // Initializes IOPMinformeeList class-wide shared lock - IOPMinformeeList::getSharedRecursiveLock(); - - obj = OSString::withCString( version ); - assert( obj ); - if( obj ) { - root->setProperty( kIOKitBuildVersionKey, obj ); - obj->release(); - } - obj = IOKitDiagnostics::diagnostics(); - if( obj ) { - root->setProperty( kIOKitDiagnosticsKey, obj ); - obj->release(); - } + IORegistryEntry * root; + OSObject * obj; + + IOCPUInitialize(); + root = IORegistryEntry::initialize(); + assert( root ); + IOService::initialize(); + IOCatalogue::initialize(); + IOStatistics::initialize(); + OSKext::initialize(); + IOUserClient::initialize(); + IOMemoryDescriptor::initialize(); + IORootParent::initialize(); + + // Initializes IOPMinformeeList class-wide shared lock + IOPMinformeeList::getSharedRecursiveLock(); + + obj = OSString::withCString( version ); + assert( obj ); + if (obj) { + root->setProperty( kIOKitBuildVersionKey, obj ); + obj->release(); + } + obj = IOKitDiagnostics::diagnostics(); + if (obj) { + root->setProperty( kIOKitDiagnosticsKey, obj ); + obj->release(); + } } /***** @@ -117,93 +118,97 @@ void iokit_post_constructor_init(void) */ void (*record_startup_extensions_function)(void) = 0; -void StartIOKit( void * p1, void * p2, void * p3, void * p4 ) +void +StartIOKit( void * p1, void * p2, void * p3, void * p4 ) { - IOPlatformExpertDevice * rootNub; - int debugFlags; + IOPlatformExpertDevice * rootNub; + int debugFlags; - if( PE_parse_boot_argn( "io", &debugFlags, sizeof (debugFlags) )) - gIOKitDebug = debugFlags; + if (PE_parse_boot_argn( "io", &debugFlags, sizeof(debugFlags))) { + gIOKitDebug = debugFlags; + } #if DEVELOPMENT || DEBUG - else gIOKitDebug |= kIOWaitQuietPanics; + else { + gIOKitDebug |= kIOWaitQuietPanics; + } #endif /* DEVELOPMENT || DEBUG */ - - if( PE_parse_boot_argn( "iotrace", &debugFlags, sizeof (debugFlags) )) - gIOKitTrace = debugFlags; - - // Compat for boot-args - gIOKitTrace |= (gIOKitDebug & kIOTraceCompatBootArgs); - - if( PE_parse_boot_argn( "pmtimeout", &debugFlags, sizeof (debugFlags) )) - gCanSleepTimeout = debugFlags; - // - // Have to start IOKit environment before we attempt to start - // the C++ runtime environment. At some stage we have to clean up - // the initialisation path so that OS C++ can initialise independantly - // of iokit basic service initialisation, or better we have IOLib stuff - // initialise as basic OS services. - // - IOLibInit(); - OSlibkernInit(); - devsw_init(); - - gIOProgressBackbufferKey = OSSymbol::withCStringNoCopy(kIOProgressBackbufferKey); - gIORemoveOnReadProperties = OSSet::withObjects((const OSObject **) &gIOProgressBackbufferKey, 1); - - interruptAccountingInit(); - - rootNub = new IOPlatformExpertDevice; - - if( rootNub && rootNub->initWithArgs( p1, p2, p3, p4)) { - rootNub->attach( 0 ); - - /* If the bootstrap segment set up a function to record startup - * extensions, call it now. - */ - if (record_startup_extensions_function) { - record_startup_extensions_function(); - } - - rootNub->registerService(); + + if (PE_parse_boot_argn( "iotrace", &debugFlags, sizeof(debugFlags))) { + gIOKitTrace = debugFlags; + } + + // Compat for boot-args + gIOKitTrace |= (gIOKitDebug & kIOTraceCompatBootArgs); + + if (PE_parse_boot_argn( "pmtimeout", &debugFlags, sizeof(debugFlags))) { + gCanSleepTimeout = debugFlags; + } + // + // Have to start IOKit environment before we attempt to start + // the C++ runtime environment. At some stage we have to clean up + // the initialisation path so that OS C++ can initialise independantly + // of iokit basic service initialisation, or better we have IOLib stuff + // initialise as basic OS services. + // + IOLibInit(); + OSlibkernInit(); + devsw_init(); + + gIOProgressBackbufferKey = OSSymbol::withCStringNoCopy(kIOProgressBackbufferKey); + gIORemoveOnReadProperties = OSSet::withObjects((const OSObject **) &gIOProgressBackbufferKey, 1); + + interruptAccountingInit(); + + rootNub = new IOPlatformExpertDevice; + + if (rootNub && rootNub->initWithArgs( p1, p2, p3, p4)) { + rootNub->attach( 0 ); + + /* If the bootstrap segment set up a function to record startup + * extensions, call it now. + */ + if (record_startup_extensions_function) { + record_startup_extensions_function(); + } + + rootNub->registerService(); #if !NO_KEXTD - /* Add a busy count to keep the registry busy until kextd has - * completely finished launching. This is decremented when kextd - * messages the kernel after the in-kernel linker has been - * removed and personalities have been sent. - */ - IOService::getServiceRoot()->adjustBusy(1); + /* Add a busy count to keep the registry busy until kextd has + * completely finished launching. This is decremented when kextd + * messages the kernel after the in-kernel linker has been + * removed and personalities have been sent. + */ + IOService::getServiceRoot()->adjustBusy(1); #endif - } + } } void IORegistrySetOSBuildVersion(char * build_version) { - IORegistryEntry * root = IORegistryEntry::getRegistryRoot(); + IORegistryEntry * root = IORegistryEntry::getRegistryRoot(); - if (root) { - if (build_version) { - root->setProperty(kOSBuildVersionKey, build_version); - } else { - root->removeProperty(kOSBuildVersionKey); - } - } + if (root) { + if (build_version) { + root->setProperty(kOSBuildVersionKey, build_version); + } else { + root->removeProperty(kOSBuildVersionKey); + } + } - return; + return; } void IORecordProgressBackbuffer(void * buffer, size_t size, uint32_t theme) { - IORegistryEntry * chosen; - if ((chosen = IORegistryEntry::fromPath(kIODeviceTreePlane ":/chosen"))) - { - chosen->setProperty(kIOProgressBackbufferKey, buffer, size); - chosen->setProperty(kIOProgressColorThemeKey, theme, 32); - - chosen->release(); - } -} + IORegistryEntry * chosen; + if ((chosen = IORegistryEntry::fromPath(kIODeviceTreePlane ":/chosen"))) { + chosen->setProperty(kIOProgressBackbufferKey, buffer, size); + chosen->setProperty(kIOProgressColorThemeKey, theme, 32); + chosen->release(); + } +} }; /* extern "C" */ diff --git a/iokit/Kernel/IOStateReporter.cpp b/iokit/Kernel/IOStateReporter.cpp index e1214dc55..4380c6cf2 100644 --- a/iokit/Kernel/IOStateReporter.cpp +++ b/iokit/Kernel/IOStateReporter.cpp @@ -1,8 +1,8 @@ /* * Copyright (c) 2012-2013 Apple Computer, Inc. All Rights Reserved. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,852 +38,840 @@ OSDefineMetaClassAndStructors(IOStateReporter, IOReporter); /* static */ IOStateReporter* IOStateReporter::with(IOService *reportingService, - IOReportCategories categories, - int nstates, - IOReportUnit unit/* = kIOReportUnitHWTicks*/) + IOReportCategories categories, + int nstates, + IOReportUnit unit /* = kIOReportUnitHWTicks*/) { - IOStateReporter *reporter, *rval = NULL; - - // kprintf("%s\n", __func__); // can't IORLOG() from static - - reporter = new IOStateReporter; - if (!reporter) goto finish; - - if (!reporter->initWith(reportingService, categories, nstates, unit)) { - goto finish; - } - - // success - rval = reporter; - + IOStateReporter *reporter, *rval = NULL; + + // kprintf("%s\n", __func__); // can't IORLOG() from static + + reporter = new IOStateReporter; + if (!reporter) { + goto finish; + } + + if (!reporter->initWith(reportingService, categories, nstates, unit)) { + goto finish; + } + + // success + rval = reporter; + finish: - if (!rval) { - OSSafeReleaseNULL(reporter); - } - - return rval; + if (!rval) { + OSSafeReleaseNULL(reporter); + } + + return rval; } bool IOStateReporter::initWith(IOService *reportingService, - IOReportCategories categories, - int16_t nstates, - IOReportUnit unit) + IOReportCategories categories, + int16_t nstates, + IOReportUnit unit) { - bool success = false; - - IOReportChannelType channelType = { - .categories = categories, - .report_format = kIOReportFormatState, - .nelements = static_cast(nstates), - .element_idx = 0 - }; - - if(super::init(reportingService, channelType, unit) != true) { - IORLOG("ERROR super::initWith failed"); - success = false; - goto finish; - } - - _currentStates = NULL; - _lastUpdateTimes = NULL; - - success = true; - -finish: - return success; + bool success = false; + + IOReportChannelType channelType = { + .categories = categories, + .report_format = kIOReportFormatState, + .nelements = static_cast(nstates), + .element_idx = 0 + }; + + if (super::init(reportingService, channelType, unit) != true) { + IORLOG("ERROR super::initWith failed"); + success = false; + goto finish; + } + + _currentStates = NULL; + _lastUpdateTimes = NULL; + + success = true; + +finish: + return success; } void IOStateReporter::free(void) { - if (_currentStates) { - PREFL_MEMOP_PANIC(_nChannels, int); - IOFree(_currentStates, (size_t)_nChannels * sizeof(int)); - } - if (_lastUpdateTimes) { - PREFL_MEMOP_PANIC(_nChannels, uint64_t); - IOFree(_lastUpdateTimes, (size_t)_nChannels * sizeof(uint64_t)); - } - - super::free(); + if (_currentStates) { + PREFL_MEMOP_PANIC(_nChannels, int); + IOFree(_currentStates, (size_t)_nChannels * sizeof(int)); + } + if (_lastUpdateTimes) { + PREFL_MEMOP_PANIC(_nChannels, uint64_t); + IOFree(_lastUpdateTimes, (size_t)_nChannels * sizeof(uint64_t)); + } + + super::free(); } IOReturn IOStateReporter::handleSwapPrepare(int newNChannels) { - IOReturn res = kIOReturnError; - size_t newCurStatesSize, newTSSize; - - //IORLOG("handleSwapPrepare (state) _nChannels before = %u", _nChannels); - - IOREPORTER_CHECK_CONFIG_LOCK(); - - if (_swapCurrentStates || _swapLastUpdateTimes) { - panic("IOStateReporter::_swap* already in use"); - } - - // new currentStates buffer - PREFL_MEMOP_FAIL(newNChannels, int); - newCurStatesSize = (size_t)newNChannels * sizeof(int); - _swapCurrentStates = (int*)IOMalloc(newCurStatesSize); - if (_swapCurrentStates == NULL) { - res = kIOReturnNoMemory; goto finish; - } - memset(_swapCurrentStates, -1, newCurStatesSize); // init w/"no state" - - // new timestamps buffer - PREFL_MEMOP_FAIL(newNChannels, uint64_t); - newTSSize = (size_t)newNChannels * sizeof(uint64_t); - _swapLastUpdateTimes = (uint64_t *)IOMalloc(newTSSize); - if (_swapLastUpdateTimes == NULL) { - res = kIOReturnNoMemory; goto finish; - } - memset(_swapLastUpdateTimes, 0, newTSSize); - - res = super::handleSwapPrepare(newNChannels); - + IOReturn res = kIOReturnError; + size_t newCurStatesSize, newTSSize; + + //IORLOG("handleSwapPrepare (state) _nChannels before = %u", _nChannels); + + IOREPORTER_CHECK_CONFIG_LOCK(); + + if (_swapCurrentStates || _swapLastUpdateTimes) { + panic("IOStateReporter::_swap* already in use"); + } + + // new currentStates buffer + PREFL_MEMOP_FAIL(newNChannels, int); + newCurStatesSize = (size_t)newNChannels * sizeof(int); + _swapCurrentStates = (int*)IOMalloc(newCurStatesSize); + if (_swapCurrentStates == NULL) { + res = kIOReturnNoMemory; goto finish; + } + memset(_swapCurrentStates, -1, newCurStatesSize); // init w/"no state" + + // new timestamps buffer + PREFL_MEMOP_FAIL(newNChannels, uint64_t); + newTSSize = (size_t)newNChannels * sizeof(uint64_t); + _swapLastUpdateTimes = (uint64_t *)IOMalloc(newTSSize); + if (_swapLastUpdateTimes == NULL) { + res = kIOReturnNoMemory; goto finish; + } + memset(_swapLastUpdateTimes, 0, newTSSize); + + res = super::handleSwapPrepare(newNChannels); + finish: - if (res) { - if (_swapCurrentStates) { - IOFree(_swapCurrentStates, newCurStatesSize); - _swapCurrentStates = NULL; - } - if (_swapLastUpdateTimes) { - IOFree(_swapLastUpdateTimes, newTSSize); - _swapLastUpdateTimes = NULL; - } - } - - return res; + if (res) { + if (_swapCurrentStates) { + IOFree(_swapCurrentStates, newCurStatesSize); + _swapCurrentStates = NULL; + } + if (_swapLastUpdateTimes) { + IOFree(_swapLastUpdateTimes, newTSSize); + _swapLastUpdateTimes = NULL; + } + } + + return res; } IOReturn IOStateReporter::handleAddChannelSwap(uint64_t channelID, - const OSSymbol *symChannelName) + const OSSymbol *symChannelName) { - IOReturn res = kIOReturnError; - int cnt; - int *tmpCurStates; - uint64_t *tmpTimestamps; - bool swapComplete = false; - - //IORLOG("IOStateReporter::handleSwap"); - - if (!_swapCurrentStates || !_swapLastUpdateTimes) { - IORLOG("IOReporter::handleSwap ERROR swap variables uninitialized!"); - goto finish; - } - - IOREPORTER_CHECK_CONFIG_LOCK(); - IOREPORTER_CHECK_LOCK(); - - // Copy any existing buffers - if (_currentStates) { - PREFL_MEMOP_FAIL(_nChannels, int); - memcpy(_swapCurrentStates, _currentStates, - (size_t)_nChannels * sizeof(int)); - - if (!_lastUpdateTimes) { - panic("IOStateReporter::handleAddChannelSwap _lastUpdateTimes unset despite non-NULL _currentStates"); - } - PREFL_MEMOP_FAIL(_nChannels, uint64_t); - memcpy(_swapLastUpdateTimes, _lastUpdateTimes, - (size_t)_nChannels * sizeof(uint64_t)); - } - - // Update principal instance variables, keep old values in _swap* for cleanup - tmpCurStates = _currentStates; - _currentStates = _swapCurrentStates; - _swapCurrentStates = tmpCurStates; - - tmpTimestamps = _lastUpdateTimes; - _lastUpdateTimes = _swapLastUpdateTimes; - _swapLastUpdateTimes = tmpTimestamps; - - swapComplete = true; - - // subclass success - - // invoke superclass(es): base class updates _nChannels & _nElements - res = super::handleAddChannelSwap(channelID, symChannelName); - if (res) { - IORLOG("handleSwap(state) ERROR super::handleSwap failed!"); - goto finish; - } - - // Channel added successfully, initialize the new channel's state_ids to 0..nStates-1 - for (cnt = 0; cnt < _channelDimension; cnt++) { - handleSetStateID(channelID, cnt, (uint64_t)cnt); - } - + IOReturn res = kIOReturnError; + int cnt; + int *tmpCurStates; + uint64_t *tmpTimestamps; + bool swapComplete = false; + + //IORLOG("IOStateReporter::handleSwap"); + + if (!_swapCurrentStates || !_swapLastUpdateTimes) { + IORLOG("IOReporter::handleSwap ERROR swap variables uninitialized!"); + goto finish; + } + + IOREPORTER_CHECK_CONFIG_LOCK(); + IOREPORTER_CHECK_LOCK(); + + // Copy any existing buffers + if (_currentStates) { + PREFL_MEMOP_FAIL(_nChannels, int); + memcpy(_swapCurrentStates, _currentStates, + (size_t)_nChannels * sizeof(int)); + + if (!_lastUpdateTimes) { + panic("IOStateReporter::handleAddChannelSwap _lastUpdateTimes unset despite non-NULL _currentStates"); + } + PREFL_MEMOP_FAIL(_nChannels, uint64_t); + memcpy(_swapLastUpdateTimes, _lastUpdateTimes, + (size_t)_nChannels * sizeof(uint64_t)); + } + + // Update principal instance variables, keep old values in _swap* for cleanup + tmpCurStates = _currentStates; + _currentStates = _swapCurrentStates; + _swapCurrentStates = tmpCurStates; + + tmpTimestamps = _lastUpdateTimes; + _lastUpdateTimes = _swapLastUpdateTimes; + _swapLastUpdateTimes = tmpTimestamps; + + swapComplete = true; + + // subclass success + + // invoke superclass(es): base class updates _nChannels & _nElements + res = super::handleAddChannelSwap(channelID, symChannelName); + if (res) { + IORLOG("handleSwap(state) ERROR super::handleSwap failed!"); + goto finish; + } + + // Channel added successfully, initialize the new channel's state_ids to 0..nStates-1 + for (cnt = 0; cnt < _channelDimension; cnt++) { + handleSetStateID(channelID, cnt, (uint64_t)cnt); + } + finish: - if (res && swapComplete) { - // unswap so the unused buffers get cleaned up - tmpCurStates = _currentStates; - _currentStates = _swapCurrentStates; - _swapCurrentStates = tmpCurStates; - - tmpTimestamps = _lastUpdateTimes; - _lastUpdateTimes = _swapLastUpdateTimes; - _swapLastUpdateTimes = tmpTimestamps; - } - - return res; + if (res && swapComplete) { + // unswap so the unused buffers get cleaned up + tmpCurStates = _currentStates; + _currentStates = _swapCurrentStates; + _swapCurrentStates = tmpCurStates; + + tmpTimestamps = _lastUpdateTimes; + _lastUpdateTimes = _swapLastUpdateTimes; + _swapLastUpdateTimes = tmpTimestamps; + } + + return res; } void IOStateReporter::handleSwapCleanup(int swapNChannels) { - IOREPORTER_CHECK_CONFIG_LOCK(); - - super::handleSwapCleanup(swapNChannels); - - if (_swapCurrentStates) { - PREFL_MEMOP_PANIC(swapNChannels, int); - IOFree(_swapCurrentStates, (size_t)swapNChannels * sizeof(int)); - _swapCurrentStates = NULL; - } - if (_swapLastUpdateTimes) { - PREFL_MEMOP_PANIC(swapNChannels, uint64_t); - IOFree(_swapLastUpdateTimes, (size_t)swapNChannels * sizeof(uint64_t)); - _swapLastUpdateTimes = NULL; - } + IOREPORTER_CHECK_CONFIG_LOCK(); + + super::handleSwapCleanup(swapNChannels); + + if (_swapCurrentStates) { + PREFL_MEMOP_PANIC(swapNChannels, int); + IOFree(_swapCurrentStates, (size_t)swapNChannels * sizeof(int)); + _swapCurrentStates = NULL; + } + if (_swapLastUpdateTimes) { + PREFL_MEMOP_PANIC(swapNChannels, uint64_t); + IOFree(_swapLastUpdateTimes, (size_t)swapNChannels * sizeof(uint64_t)); + _swapLastUpdateTimes = NULL; + } } IOReturn IOStateReporter::_getStateIndices(uint64_t channel_id, - uint64_t state_id, - int *channel_index, - int *state_index) + uint64_t state_id, + int *channel_index, + int *state_index) { - IOReturn res = kIOReturnError; - int cnt; - IOStateReportValues *values; - int element_index = 0; - - IOREPORTER_CHECK_LOCK(); - - if (getChannelIndices(channel_id, - channel_index, - &element_index) != kIOReturnSuccess) { - res = kIOReturnBadArgument; - - goto finish; - } - - for (cnt = 0; cnt < _channelDimension; cnt++) { - - values = (IOStateReportValues *)getElementValues(element_index + cnt); - - if (values == NULL) { - - res = kIOReturnError; - goto finish; - } - - if (values->state_id == state_id) { - *state_index = cnt; - res = kIOReturnSuccess; - goto finish; - } - } - - res = kIOReturnBadArgument; - + IOReturn res = kIOReturnError; + int cnt; + IOStateReportValues *values; + int element_index = 0; + + IOREPORTER_CHECK_LOCK(); + + if (getChannelIndices(channel_id, + channel_index, + &element_index) != kIOReturnSuccess) { + res = kIOReturnBadArgument; + + goto finish; + } + + for (cnt = 0; cnt < _channelDimension; cnt++) { + values = (IOStateReportValues *)getElementValues(element_index + cnt); + + if (values == NULL) { + res = kIOReturnError; + goto finish; + } + + if (values->state_id == state_id) { + *state_index = cnt; + res = kIOReturnSuccess; + goto finish; + } + } + + res = kIOReturnBadArgument; + finish: - return res; + return res; } IOReturn IOStateReporter::setChannelState(uint64_t channel_id, - uint64_t new_state_id) + uint64_t new_state_id) { - IOReturn res = kIOReturnError; - int channel_index, new_state_index; - uint64_t last_intransition = 0; - uint64_t prev_state_residency = 0; - - lockReporter(); - - if (_getStateIndices(channel_id, new_state_id, &channel_index, &new_state_index) == kIOReturnSuccess) { - res = handleSetStateByIndices(channel_index, new_state_index, - last_intransition, - prev_state_residency); - goto finish; - } - - res = kIOReturnBadArgument; - + IOReturn res = kIOReturnError; + int channel_index, new_state_index; + uint64_t last_intransition = 0; + uint64_t prev_state_residency = 0; + + lockReporter(); + + if (_getStateIndices(channel_id, new_state_id, &channel_index, &new_state_index) == kIOReturnSuccess) { + res = handleSetStateByIndices(channel_index, new_state_index, + last_intransition, + prev_state_residency); + goto finish; + } + + res = kIOReturnBadArgument; + finish: - unlockReporter(); - return res; + unlockReporter(); + return res; } IOReturn IOStateReporter::setChannelState(uint64_t channel_id, - uint64_t new_state_id, - uint64_t last_intransition, - uint64_t prev_state_residency) + uint64_t new_state_id, + uint64_t last_intransition, + uint64_t prev_state_residency) { - return setChannelState(channel_id, new_state_id); + return setChannelState(channel_id, new_state_id); } IOReturn IOStateReporter::overrideChannelState(uint64_t channel_id, - uint64_t state_id, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition /*=0*/) + uint64_t state_id, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition /*=0*/) { - IOReturn res = kIOReturnError; - int channel_index, state_index; - - lockReporter(); - - if (_getStateIndices(channel_id, state_id, &channel_index, &state_index) == kIOReturnSuccess) { - - if (_lastUpdateTimes[channel_index]) { - panic("overrideChannelState() cannot be used after setChannelState()!\n"); - } - - res = handleOverrideChannelStateByIndices(channel_index, state_index, - time_in_state, intransitions, - last_intransition); - goto finish; - } - - res = kIOReturnBadArgument; - + IOReturn res = kIOReturnError; + int channel_index, state_index; + + lockReporter(); + + if (_getStateIndices(channel_id, state_id, &channel_index, &state_index) == kIOReturnSuccess) { + if (_lastUpdateTimes[channel_index]) { + panic("overrideChannelState() cannot be used after setChannelState()!\n"); + } + + res = handleOverrideChannelStateByIndices(channel_index, state_index, + time_in_state, intransitions, + last_intransition); + goto finish; + } + + res = kIOReturnBadArgument; + finish: - unlockReporter(); - return res; + unlockReporter(); + return res; } IOReturn IOStateReporter::handleOverrideChannelStateByIndices(int channel_index, - int state_index, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition /*=0*/) + int state_index, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition /*=0*/) { - IOReturn kerr, result = kIOReturnError; - IOStateReportValues state_values; - int element_index; - - if (channel_index < 0 || channel_index >= _nChannels) { - result = kIOReturnBadArgument; goto finish; - } - - if (channel_index < 0 || channel_index > (_nElements - state_index) - / _channelDimension) { - result = kIOReturnOverrun; goto finish; - } - element_index = channel_index * _channelDimension + state_index; - - kerr = copyElementValues(element_index,(IOReportElementValues*)&state_values); - if (kerr) { - result = kerr; goto finish; - } - - // last_intransition = 0 -> no current state ("residency summary only") - state_values.last_intransition = last_intransition; - state_values.intransitions = intransitions; - state_values.upticks = time_in_state; - - // determines current time for metadata - kerr = setElementValues(element_index, (IOReportElementValues *)&state_values); - if (kerr) { - result = kerr; goto finish; - } - - // success - result = kIOReturnSuccess; - + IOReturn kerr, result = kIOReturnError; + IOStateReportValues state_values; + int element_index; + + if (channel_index < 0 || channel_index >= _nChannels) { + result = kIOReturnBadArgument; goto finish; + } + + if (channel_index < 0 || channel_index > (_nElements - state_index) + / _channelDimension) { + result = kIOReturnOverrun; goto finish; + } + element_index = channel_index * _channelDimension + state_index; + + kerr = copyElementValues(element_index, (IOReportElementValues*)&state_values); + if (kerr) { + result = kerr; goto finish; + } + + // last_intransition = 0 -> no current state ("residency summary only") + state_values.last_intransition = last_intransition; + state_values.intransitions = intransitions; + state_values.upticks = time_in_state; + + // determines current time for metadata + kerr = setElementValues(element_index, (IOReportElementValues *)&state_values); + if (kerr) { + result = kerr; goto finish; + } + + // success + result = kIOReturnSuccess; + finish: - return result; + return result; } IOReturn IOStateReporter::incrementChannelState(uint64_t channel_id, - uint64_t state_id, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition /*=0*/) + uint64_t state_id, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition /*=0*/) { - IOReturn res = kIOReturnError; - int channel_index, state_index; - - lockReporter(); - - if (_getStateIndices(channel_id, state_id, &channel_index, &state_index) == kIOReturnSuccess) { - - if (_lastUpdateTimes[channel_index]) { - panic("incrementChannelState() cannot be used after setChannelState()!\n"); - } - - res = handleIncrementChannelStateByIndices(channel_index, state_index, - time_in_state, intransitions, - last_intransition); - goto finish; - } - - res = kIOReturnBadArgument; - -finish: - unlockReporter(); - return res; + IOReturn res = kIOReturnError; + int channel_index, state_index; + + lockReporter(); + + if (_getStateIndices(channel_id, state_id, &channel_index, &state_index) == kIOReturnSuccess) { + if (_lastUpdateTimes[channel_index]) { + panic("incrementChannelState() cannot be used after setChannelState()!\n"); + } + res = handleIncrementChannelStateByIndices(channel_index, state_index, + time_in_state, intransitions, + last_intransition); + goto finish; + } + + res = kIOReturnBadArgument; + +finish: + unlockReporter(); + return res; } IOReturn IOStateReporter::handleIncrementChannelStateByIndices(int channel_index, - int state_index, - uint64_t time_in_state, - uint64_t intransitions, - uint64_t last_intransition /*=0*/) + int state_index, + uint64_t time_in_state, + uint64_t intransitions, + uint64_t last_intransition /*=0*/) { - IOReturn kerr, result = kIOReturnError; - IOStateReportValues state_values; - int element_index; - - if (channel_index < 0 || channel_index >= _nChannels) { - result = kIOReturnBadArgument; goto finish; - } - - if (channel_index < 0 || channel_index > (_nElements - state_index) - / _channelDimension) { - result = kIOReturnOverrun; goto finish; - } - element_index = channel_index * _channelDimension + state_index; - - kerr = copyElementValues(element_index,(IOReportElementValues*)&state_values); - if (kerr) { - result = kerr; - goto finish; - } - - state_values.last_intransition = last_intransition; - state_values.intransitions += intransitions; - state_values.upticks += time_in_state; - - // determines current time for metadata - kerr = setElementValues(element_index, (IOReportElementValues *)&state_values); - if (kerr) { - result = kerr; - goto finish; - } - - // success - result = kIOReturnSuccess; - + IOReturn kerr, result = kIOReturnError; + IOStateReportValues state_values; + int element_index; + + if (channel_index < 0 || channel_index >= _nChannels) { + result = kIOReturnBadArgument; goto finish; + } + + if (channel_index < 0 || channel_index > (_nElements - state_index) + / _channelDimension) { + result = kIOReturnOverrun; goto finish; + } + element_index = channel_index * _channelDimension + state_index; + + kerr = copyElementValues(element_index, (IOReportElementValues*)&state_values); + if (kerr) { + result = kerr; + goto finish; + } + + state_values.last_intransition = last_intransition; + state_values.intransitions += intransitions; + state_values.upticks += time_in_state; + + // determines current time for metadata + kerr = setElementValues(element_index, (IOReportElementValues *)&state_values); + if (kerr) { + result = kerr; + goto finish; + } + + // success + result = kIOReturnSuccess; + finish: - return result; + return result; } IOReturn IOStateReporter::setState(uint64_t new_state_id) { - uint64_t last_intransition = 0; - uint64_t prev_state_residency = 0; - IOReturn res = kIOReturnError; - IOStateReportValues *values; - int channel_index = 0, element_index = 0, new_state_index = 0; - int cnt; - - lockReporter(); - - if (_nChannels == 1) { - - for (cnt = 0; cnt < _channelDimension; cnt++) { - - new_state_index = element_index + cnt; - - values = (IOStateReportValues *)getElementValues(new_state_index); - - if (values == NULL) { - res = kIOReturnError; - goto finish; - } - - if (values->state_id == new_state_id) { - - res = handleSetStateByIndices(channel_index, new_state_index, - last_intransition, - prev_state_residency); - goto finish; - } - } - } - - res = kIOReturnBadArgument; + uint64_t last_intransition = 0; + uint64_t prev_state_residency = 0; + IOReturn res = kIOReturnError; + IOStateReportValues *values; + int channel_index = 0, element_index = 0, new_state_index = 0; + int cnt; + + lockReporter(); + + if (_nChannels == 1) { + for (cnt = 0; cnt < _channelDimension; cnt++) { + new_state_index = element_index + cnt; + + values = (IOStateReportValues *)getElementValues(new_state_index); + + if (values == NULL) { + res = kIOReturnError; + goto finish; + } + + if (values->state_id == new_state_id) { + res = handleSetStateByIndices(channel_index, new_state_index, + last_intransition, + prev_state_residency); + goto finish; + } + } + } + + res = kIOReturnBadArgument; finish: - unlockReporter(); - return res; + unlockReporter(); + return res; } IOReturn IOStateReporter::setState(uint64_t new_state_id, - uint64_t last_intransition, - uint64_t prev_state_residency) + uint64_t last_intransition, + uint64_t prev_state_residency) { - return setState(new_state_id); + return setState(new_state_id); } IOReturn IOStateReporter::setStateID(uint64_t channel_id, - int state_index, - uint64_t state_id) + int state_index, + uint64_t state_id) { - IOReturn res = kIOReturnError; - - lockReporter(); - - res = handleSetStateID(channel_id, state_index, state_id); - - unlockReporter(); - - return res; + IOReturn res = kIOReturnError; + + lockReporter(); + + res = handleSetStateID(channel_id, state_index, state_id); + + unlockReporter(); + + return res; } IOReturn IOStateReporter::handleSetStateID(uint64_t channel_id, - int state_index, - uint64_t state_id) + int state_index, + uint64_t state_id) { - IOReturn res = kIOReturnError; - IOStateReportValues state_values; - int element_index = 0; - - IOREPORTER_CHECK_LOCK(); - - if (getFirstElementIndex(channel_id, &element_index) == kIOReturnSuccess) { - - if (state_index >= _channelDimension) { - res = kIOReturnBadArgument; goto finish; - } - if (_nElements - state_index <= element_index) { - res = kIOReturnOverrun; goto finish; - } - element_index += state_index; - - if (copyElementValues(element_index, (IOReportElementValues *)&state_values) != kIOReturnSuccess) { - res = kIOReturnBadArgument; - goto finish; - } - - state_values.state_id = state_id; - - res = setElementValues(element_index, (IOReportElementValues *)&state_values); - } - - // FIXME: set a bit somewhere (reporter-wide?) that state_ids can no longer be - // assumed to be contiguous + IOReturn res = kIOReturnError; + IOStateReportValues state_values; + int element_index = 0; + + IOREPORTER_CHECK_LOCK(); + + if (getFirstElementIndex(channel_id, &element_index) == kIOReturnSuccess) { + if (state_index >= _channelDimension) { + res = kIOReturnBadArgument; goto finish; + } + if (_nElements - state_index <= element_index) { + res = kIOReturnOverrun; goto finish; + } + element_index += state_index; + + if (copyElementValues(element_index, (IOReportElementValues *)&state_values) != kIOReturnSuccess) { + res = kIOReturnBadArgument; + goto finish; + } + + state_values.state_id = state_id; + + res = setElementValues(element_index, (IOReportElementValues *)&state_values); + } + + // FIXME: set a bit somewhere (reporter-wide?) that state_ids can no longer be + // assumed to be contiguous finish: - return res; + return res; } IOReturn IOStateReporter::setStateByIndices(int channel_index, - int new_state_index) + int new_state_index) { - IOReturn res = kIOReturnError; - uint64_t last_intransition = 0; - uint64_t prev_state_residency = 0; - - lockReporter(); - - res = handleSetStateByIndices(channel_index, new_state_index, - last_intransition, prev_state_residency); - - unlockReporter(); - - return res; + IOReturn res = kIOReturnError; + uint64_t last_intransition = 0; + uint64_t prev_state_residency = 0; + + lockReporter(); + + res = handleSetStateByIndices(channel_index, new_state_index, + last_intransition, prev_state_residency); + + unlockReporter(); + + return res; } IOReturn IOStateReporter::setStateByIndices(int channel_index, - int new_state_index, - uint64_t last_intransition, - uint64_t prev_state_residency) + int new_state_index, + uint64_t last_intransition, + uint64_t prev_state_residency) { - return setStateByIndices(channel_index, new_state_index); + return setStateByIndices(channel_index, new_state_index); } IOReturn IOStateReporter::handleSetStateByIndices(int channel_index, - int new_state_index, - uint64_t last_intransition, - uint64_t prev_state_residency) + int new_state_index, + uint64_t last_intransition, + uint64_t prev_state_residency) { - IOReturn res = kIOReturnError; - - IOStateReportValues curr_state_values, new_state_values; - int curr_state_index = 0; - int curr_element_index, new_element_index; - uint64_t last_ch_update_time = 0; - uint64_t recordTime = mach_absolute_time(); - - IOREPORTER_CHECK_LOCK(); - - if (channel_index < 0 || channel_index >= _nChannels) { - res = kIOReturnBadArgument; goto finish; - } - - // if no timestamp provided, last_intransition = time of recording (now) - if (last_intransition == 0) { - last_intransition = recordTime; - } - - // First update target state if different than the current state - // _currentStates[] initialized to -1 to detect first state transition - curr_state_index = _currentStates[channel_index]; - if (new_state_index != curr_state_index) { - // fetch element data - if (channel_index < 0 || channel_index > (_nElements-new_state_index) - / _channelDimension) { - res = kIOReturnOverrun; goto finish; - } - new_element_index = channel_index*_channelDimension + new_state_index; - if (copyElementValues(new_element_index, - (IOReportElementValues *)&new_state_values)) { - res = kIOReturnBadArgument; - goto finish; - } - - // Update new state's transition info - new_state_values.intransitions += 1; - new_state_values.last_intransition = last_intransition; - - // and store the values - res = setElementValues(new_element_index, - (IOReportElementValues *)&new_state_values, - recordTime); - - if (res != kIOReturnSuccess) { - goto finish; - } - - _currentStates[channel_index] = new_state_index; - } - - /* Now update time spent in any previous state - If new_state_index = curr_state_index, this updates time in the - current state. If this is the channel's first state transition, - the last update time will be zero. - - Note: While setState() should never be called on a channel being - updated with increment/overrideChannelState(), that's another way - that the last update time might not exist. Regardless, if there - is no basis for determining time spent in previous state, there's - nothing to update! - */ - last_ch_update_time = _lastUpdateTimes[channel_index]; - if (last_ch_update_time != 0) { - if (channel_index < 0 || channel_index > (_nElements-curr_state_index) - / _channelDimension) { - res = kIOReturnOverrun; goto finish; - } - curr_element_index = channel_index*_channelDimension + curr_state_index; - if (copyElementValues(curr_element_index, - (IOReportElementValues *)&curr_state_values)) { - res = kIOReturnBadArgument; - goto finish; - } - // compute the time spent in previous state, unless provided - if (prev_state_residency == 0) { - prev_state_residency = last_intransition - last_ch_update_time; - } - - curr_state_values.upticks += prev_state_residency; - - res = setElementValues(curr_element_index, - (IOReportElementValues*)&curr_state_values, - recordTime); - - if (res != kIOReturnSuccess) { - goto finish; - } - } - - // record basis for next "time in prior state" calculation - // (also arms a panic in override/incrementChannelState()) - _lastUpdateTimes[channel_index] = last_intransition; - + IOReturn res = kIOReturnError; + + IOStateReportValues curr_state_values, new_state_values; + int curr_state_index = 0; + int curr_element_index, new_element_index; + uint64_t last_ch_update_time = 0; + uint64_t recordTime = mach_absolute_time(); + + IOREPORTER_CHECK_LOCK(); + + if (channel_index < 0 || channel_index >= _nChannels) { + res = kIOReturnBadArgument; goto finish; + } + + // if no timestamp provided, last_intransition = time of recording (now) + if (last_intransition == 0) { + last_intransition = recordTime; + } + + // First update target state if different than the current state + // _currentStates[] initialized to -1 to detect first state transition + curr_state_index = _currentStates[channel_index]; + if (new_state_index != curr_state_index) { + // fetch element data + if (channel_index < 0 || channel_index > (_nElements - new_state_index) + / _channelDimension) { + res = kIOReturnOverrun; goto finish; + } + new_element_index = channel_index * _channelDimension + new_state_index; + if (copyElementValues(new_element_index, + (IOReportElementValues *)&new_state_values)) { + res = kIOReturnBadArgument; + goto finish; + } + + // Update new state's transition info + new_state_values.intransitions += 1; + new_state_values.last_intransition = last_intransition; + + // and store the values + res = setElementValues(new_element_index, + (IOReportElementValues *)&new_state_values, + recordTime); + + if (res != kIOReturnSuccess) { + goto finish; + } + + _currentStates[channel_index] = new_state_index; + } + + /* Now update time spent in any previous state + * If new_state_index = curr_state_index, this updates time in the + * current state. If this is the channel's first state transition, + * the last update time will be zero. + * + * Note: While setState() should never be called on a channel being + * updated with increment/overrideChannelState(), that's another way + * that the last update time might not exist. Regardless, if there + * is no basis for determining time spent in previous state, there's + * nothing to update! + */ + last_ch_update_time = _lastUpdateTimes[channel_index]; + if (last_ch_update_time != 0) { + if (channel_index < 0 || channel_index > (_nElements - curr_state_index) + / _channelDimension) { + res = kIOReturnOverrun; goto finish; + } + curr_element_index = channel_index * _channelDimension + curr_state_index; + if (copyElementValues(curr_element_index, + (IOReportElementValues *)&curr_state_values)) { + res = kIOReturnBadArgument; + goto finish; + } + // compute the time spent in previous state, unless provided + if (prev_state_residency == 0) { + prev_state_residency = last_intransition - last_ch_update_time; + } + + curr_state_values.upticks += prev_state_residency; + + res = setElementValues(curr_element_index, + (IOReportElementValues*)&curr_state_values, + recordTime); + + if (res != kIOReturnSuccess) { + goto finish; + } + } + + // record basis for next "time in prior state" calculation + // (also arms a panic in override/incrementChannelState()) + _lastUpdateTimes[channel_index] = last_intransition; + finish: - return res; + return res; } // blocks might make this slightly easier? uint64_t IOStateReporter::getStateInTransitions(uint64_t channel_id, - uint64_t state_id) + uint64_t state_id) { - return _getStateValue(channel_id, state_id, kInTransitions); + return _getStateValue(channel_id, state_id, kInTransitions); } uint64_t IOStateReporter::getStateResidencyTime(uint64_t channel_id, - uint64_t state_id) + uint64_t state_id) { - return _getStateValue(channel_id, state_id, kResidencyTime); + return _getStateValue(channel_id, state_id, kResidencyTime); } uint64_t IOStateReporter::getStateLastTransitionTime(uint64_t channel_id, - uint64_t state_id) + uint64_t state_id) { - return _getStateValue(channel_id, state_id, kLastTransitionTime); + return _getStateValue(channel_id, state_id, kLastTransitionTime); } uint64_t IOStateReporter::_getStateValue(uint64_t channel_id, - uint64_t state_id, - enum valueSelector value) + uint64_t state_id, + enum valueSelector value) { - int channel_index = 0, element_index = 0, cnt; - IOStateReportValues *values = NULL; - uint64_t result = kIOReportInvalidValue; - - lockReporter(); - - if (getChannelIndices(channel_id, &channel_index, &element_index) == kIOReturnSuccess) { - - if (updateChannelValues(channel_index) == kIOReturnSuccess) { - - for (cnt = 0; cnt < _channelDimension; cnt++) { - - values = (IOStateReportValues *)getElementValues(element_index); - - if (state_id == values->state_id) { - - switch (value) { - case kInTransitions: - result = values->intransitions; - break; - case kResidencyTime: - result = values->upticks; - break; - case kLastTransitionTime: - result = values->last_intransition; - break; - default: - break; - } - - break; - } - - element_index++; - } - } - } - - unlockReporter(); - return result; + int channel_index = 0, element_index = 0, cnt; + IOStateReportValues *values = NULL; + uint64_t result = kIOReportInvalidValue; + + lockReporter(); + + if (getChannelIndices(channel_id, &channel_index, &element_index) == kIOReturnSuccess) { + if (updateChannelValues(channel_index) == kIOReturnSuccess) { + for (cnt = 0; cnt < _channelDimension; cnt++) { + values = (IOStateReportValues *)getElementValues(element_index); + + if (state_id == values->state_id) { + switch (value) { + case kInTransitions: + result = values->intransitions; + break; + case kResidencyTime: + result = values->upticks; + break; + case kLastTransitionTime: + result = values->last_intransition; + break; + default: + break; + } + + break; + } + + element_index++; + } + } + } + + unlockReporter(); + return result; } uint64_t IOStateReporter::getStateLastChannelUpdateTime(uint64_t channel_id) { - int channel_index; - uint64_t result = kIOReportInvalidValue; - - lockReporter(); - - if (getChannelIndex(channel_id, &channel_index) == kIOReturnSuccess) { - - result = _lastUpdateTimes[channel_index]; - } - - unlockReporter(); - - return result; + int channel_index; + uint64_t result = kIOReportInvalidValue; + + lockReporter(); + + if (getChannelIndex(channel_id, &channel_index) == kIOReturnSuccess) { + result = _lastUpdateTimes[channel_index]; + } + + unlockReporter(); + + return result; } /* updateChannelValues() is called to refresh state before being - reported outside the reporter. In the case of IOStateReporter, - this is primarily an update to the "time in state" data. -*/ + * reported outside the reporter. In the case of IOStateReporter, + * this is primarily an update to the "time in state" data. + */ IOReturn IOStateReporter::updateChannelValues(int channel_index) { - IOReturn kerr, result = kIOReturnError; - - int state_index, element_idx; - uint64_t currentTime; - uint64_t last_ch_update_time; - uint64_t time_in_state; - IOStateReportValues state_values; - - IOREPORTER_CHECK_LOCK(); - - if (channel_index < 0 || channel_index >= _nChannels) { - result = kIOReturnBadArgument; goto finish; - } - - /* First check to see whether this channel has begun self- - calculation of time in state. It's possible this channel - has yet to be initialized or that the driver is updating - the channel with override/incrementChannelState() which - never enable automatic time-in-state updates. In that case, - there is nothing to update and we return success. - */ - last_ch_update_time = _lastUpdateTimes[channel_index]; - if (last_ch_update_time == 0) { - result = kIOReturnSuccess; goto finish; - } - - // figure out the current state (if any) - state_index = _currentStates[channel_index]; - - // e.g. given 4 4-state channels, the boundary is ch[3].st[3] <- _elems[15] - if (channel_index < 0 || channel_index > (_nElements - state_index) - / _channelDimension) { - result = kIOReturnOverrun; goto finish; - } - element_idx = channel_index * _channelDimension + state_index; - - // get the current values - kerr = copyElementValues(element_idx,(IOReportElementValues*)&state_values); - if (kerr) { - result = kerr; goto finish; - } - - // calculate time in state - currentTime = mach_absolute_time(); - time_in_state = currentTime - last_ch_update_time; - state_values.upticks += time_in_state; - - // and store the values - kerr = setElementValues(element_idx, - (IOReportElementValues *)&state_values, - currentTime); - if (kerr) { - result = kerr; goto finish; - } - - // Record basis for next "prior time" calculation - _lastUpdateTimes[channel_index] = currentTime; - - - // success - result = kIOReturnSuccess; - + IOReturn kerr, result = kIOReturnError; + + int state_index, element_idx; + uint64_t currentTime; + uint64_t last_ch_update_time; + uint64_t time_in_state; + IOStateReportValues state_values; + + IOREPORTER_CHECK_LOCK(); + + if (channel_index < 0 || channel_index >= _nChannels) { + result = kIOReturnBadArgument; goto finish; + } + + /* First check to see whether this channel has begun self- + * calculation of time in state. It's possible this channel + * has yet to be initialized or that the driver is updating + * the channel with override/incrementChannelState() which + * never enable automatic time-in-state updates. In that case, + * there is nothing to update and we return success. + */ + last_ch_update_time = _lastUpdateTimes[channel_index]; + if (last_ch_update_time == 0) { + result = kIOReturnSuccess; goto finish; + } + + // figure out the current state (if any) + state_index = _currentStates[channel_index]; + + // e.g. given 4 4-state channels, the boundary is ch[3].st[3] <- _elems[15] + if (channel_index < 0 || channel_index > (_nElements - state_index) + / _channelDimension) { + result = kIOReturnOverrun; goto finish; + } + element_idx = channel_index * _channelDimension + state_index; + + // get the current values + kerr = copyElementValues(element_idx, (IOReportElementValues*)&state_values); + if (kerr) { + result = kerr; goto finish; + } + + // calculate time in state + currentTime = mach_absolute_time(); + time_in_state = currentTime - last_ch_update_time; + state_values.upticks += time_in_state; + + // and store the values + kerr = setElementValues(element_idx, + (IOReportElementValues *)&state_values, + currentTime); + if (kerr) { + result = kerr; goto finish; + } + + // Record basis for next "prior time" calculation + _lastUpdateTimes[channel_index] = currentTime; + + + // success + result = kIOReturnSuccess; + finish: - return result; + return result; } diff --git a/iokit/Kernel/IOStatistics.cpp b/iokit/Kernel/IOStatistics.cpp index 141eecabf..54338fb77 100644 --- a/iokit/Kernel/IOStatistics.cpp +++ b/iokit/Kernel/IOStatistics.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -64,7 +64,7 @@ IOWorkLoopDependency *IOStatistics::nextWorkLoopDependency = NULL; #define LOG(level, format, ...) \ do { \ if (level <= LOG_LEVEL) \ - printf(format, ##__VA_ARGS__); \ + printf(format, ##__VA_ARGS__); \ } while (0) /* Locks */ @@ -77,14 +77,16 @@ KextNode *IOStatistics::kextHint = NULL; IOStatistics::KextTreeHead IOStatistics::kextHead = RB_INITIALIZER(&IOStatistics::kextHead); -int IOStatistics::kextNodeCompare(KextNode *e1, KextNode *e2) +int +IOStatistics::kextNodeCompare(KextNode *e1, KextNode *e2) { - if (e1->kext < e2->kext) - return -1; - else if (e1->kext > e2->kext) - return 1; - else - return 0; + if (e1->kext < e2->kext) { + return -1; + } else if (e1->kext > e2->kext) { + return 1; + } else { + return 0; + } } RB_GENERATE(IOStatistics::KextTree, KextNode, link, kextNodeCompare); @@ -93,14 +95,16 @@ RB_GENERATE(IOStatistics::KextTree, KextNode, link, kextNodeCompare); IOStatistics::KextAddressTreeHead IOStatistics::kextAddressHead = RB_INITIALIZER(&IOStatistics::kextAddressHead); -int IOStatistics::kextAddressNodeCompare(KextNode *e1, KextNode *e2) +int +IOStatistics::kextAddressNodeCompare(KextNode *e1, KextNode *e2) { - if (e1->address < e2->address) - return -1; - else if (e1->address > e2->address) - return 1; - else - return 0; + if (e1->address < e2->address) { + return -1; + } else if (e1->address > e2->address) { + return 1; + } else { + return 0; + } } RB_GENERATE(IOStatistics::KextAddressTree, KextNode, addressLink, kextAddressNodeCompare); @@ -109,71 +113,77 @@ RB_GENERATE(IOStatistics::KextAddressTree, KextNode, addressLink, kextAddressNod IOStatistics::ClassTreeHead IOStatistics::classHead = RB_INITIALIZER(&IOStatistics::classHead); -int IOStatistics::classNodeCompare(ClassNode *e1, ClassNode *e2) { - if (e1->metaClass < e2->metaClass) - return -1; - else if (e1->metaClass > e2->metaClass) - return 1; - else - return 0; +int +IOStatistics::classNodeCompare(ClassNode *e1, ClassNode *e2) +{ + if (e1->metaClass < e2->metaClass) { + return -1; + } else if (e1->metaClass > e2->metaClass) { + return 1; + } else { + return 0; + } } RB_GENERATE(IOStatistics::ClassTree, ClassNode, tLink, classNodeCompare); /* Workloop dependencies */ -int IOWorkLoopCounter::loadTagCompare(IOWorkLoopDependency *e1, IOWorkLoopDependency *e2) { - if (e1->loadTag < e2->loadTag) - return -1; - else if (e1->loadTag > e2->loadTag) - return 1; - else - return 0; +int +IOWorkLoopCounter::loadTagCompare(IOWorkLoopDependency *e1, IOWorkLoopDependency *e2) +{ + if (e1->loadTag < e2->loadTag) { + return -1; + } else if (e1->loadTag > e2->loadTag) { + return 1; + } else { + return 0; + } } RB_GENERATE(IOWorkLoopCounter::DependencyTree, IOWorkLoopDependency, link, IOWorkLoopCounter::loadTagCompare); /* sysctl stuff */ -static int +static int oid_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req) { int error = EINVAL; uint32_t request = arg2; - switch (request) - { - case kIOStatisticsGeneral: - error = IOStatistics::getStatistics(req); - break; - case kIOStatisticsWorkLoop: - error = IOStatistics::getWorkLoopStatistics(req); - break; - case kIOStatisticsUserClient: - error = IOStatistics::getUserClientStatistics(req); - break; - default: - break; + switch (request) { + case kIOStatisticsGeneral: + error = IOStatistics::getStatistics(req); + break; + case kIOStatisticsWorkLoop: + error = IOStatistics::getWorkLoopStatistics(req); + break; + case kIOStatisticsUserClient: + error = IOStatistics::getUserClientStatistics(req); + break; + default: + break; } return error; } - + SYSCTL_NODE(_debug, OID_AUTO, iokit_statistics, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "IOStatistics"); static SYSCTL_PROC(_debug_iokit_statistics, OID_AUTO, general, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, kIOStatisticsGeneral, oid_sysctl, "S", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, kIOStatisticsGeneral, oid_sysctl, "S", ""); static SYSCTL_PROC(_debug_iokit_statistics, OID_AUTO, workloop, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, kIOStatisticsWorkLoop, oid_sysctl, "S", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, kIOStatisticsWorkLoop, oid_sysctl, "S", ""); static SYSCTL_PROC(_debug_iokit_statistics, OID_AUTO, userclient, - CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, kIOStatisticsUserClient, oid_sysctl, "S", ""); + CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, kIOStatisticsUserClient, oid_sysctl, "S", ""); -void IOStatistics::initialize() +void +IOStatistics::initialize() { if (enabled) { return; @@ -183,25 +193,26 @@ void IOStatistics::initialize() if (!(kIOStatistics & gIOKitDebug)) { return; } - + sysctl_register_oid(&sysctl__debug_iokit_statistics_general); sysctl_register_oid(&sysctl__debug_iokit_statistics_workloop); sysctl_register_oid(&sysctl__debug_iokit_statistics_userclient); - + lock = IORWLockAlloc(); if (!lock) { return; } - + nextWorkLoopDependency = (IOWorkLoopDependency*)kalloc(sizeof(IOWorkLoopDependency)); if (!nextWorkLoopDependency) { return; } - + enabled = true; } -void IOStatistics::onKextLoad(OSKext *kext, kmod_info_t *kmod_info) +void +IOStatistics::onKextLoad(OSKext *kext, kmod_info_t *kmod_info) { KextNode *ke; @@ -212,7 +223,7 @@ void IOStatistics::onKextLoad(OSKext *kext, kmod_info_t *kmod_info) } LOG(1, "IOStatistics::onKextLoad: %s, tag %d, address 0x%llx, address end 0x%llx\n", - kext->getIdentifierCString(), kmod_info->id, (uint64_t)kmod_info->address, (uint64_t)(kmod_info->address + kmod_info->size)); + kext->getIdentifierCString(), kmod_info->id, (uint64_t)kmod_info->address, (uint64_t)(kmod_info->address + kmod_info->size)); ke = (KextNode *)kalloc(sizeof(KextNode)); if (!ke) { @@ -220,7 +231,7 @@ void IOStatistics::onKextLoad(OSKext *kext, kmod_info_t *kmod_info) } memset(ke, 0, sizeof(KextNode)); - + ke->kext = kext; ke->loadTag = kmod_info->id; ke->address = kmod_info->address; @@ -233,26 +244,27 @@ void IOStatistics::onKextLoad(OSKext *kext, kmod_info_t *kmod_info) RB_INSERT(KextTree, &kextHead, ke); RB_INSERT(KextAddressTree, &kextAddressHead, ke); - + sequenceID++; loadedKexts++; lastKextIndex++; - + IORWLockUnlock(lock); } -void IOStatistics::onKextUnload(OSKext *kext) +void +IOStatistics::onKextUnload(OSKext *kext) { KextNode sought, *found; - + assert(kext); - + if (!enabled) { return; } LOG(1, "IOStatistics::onKextUnload: %s\n", kext->getIdentifierCString()); - + IORWLockWrite(lock); sought.kext = kext; @@ -284,21 +296,21 @@ void IOStatistics::onKextUnload(OSKext *kext) if (found == kextHint) { kextHint = NULL; } - + /* Finally, free the class node */ kfree(found, sizeof(KextNode)); - + sequenceID++; loadedKexts--; - } - else { + } else { panic("IOStatistics::onKextUnload: cannot find kext: %s", kext->getIdentifierCString()); } IORWLockUnlock(lock); } -void IOStatistics::onClassAdded(OSKext *parentKext, OSMetaClass *metaClass) +void +IOStatistics::onClassAdded(OSKext *parentKext, OSMetaClass *metaClass) { ClassNode *ce; KextNode soughtKext, *foundKext = NULL; @@ -313,7 +325,7 @@ void IOStatistics::onClassAdded(OSKext *parentKext, OSMetaClass *metaClass) ce = (ClassNode *)kalloc(sizeof(ClassNode)); if (!ce) { - return; + return; } memset(ce, 0, sizeof(ClassNode)); @@ -323,8 +335,7 @@ void IOStatistics::onClassAdded(OSKext *parentKext, OSMetaClass *metaClass) /* Hinted? */ if (kextHint && kextHint->kext == parentKext) { foundKext = kextHint; - } - else { + } else { soughtKext.kext = parentKext; foundKext = RB_FIND(KextTree, &kextHead, &soughtKext); } @@ -336,9 +347,9 @@ void IOStatistics::onClassAdded(OSKext *parentKext, OSMetaClass *metaClass) ce->metaClass = metaClass; ce->classID = lastClassIndex++; ce->parentKext = foundKext; - + /* Has superclass? */ - superClass = ce->metaClass->getSuperClass(); + superClass = ce->metaClass->getSuperClass(); if (superClass) { soughtClass.metaClass = superClass; foundClass = RB_FIND(ClassTree, &classHead, &soughtClass); @@ -347,25 +358,25 @@ void IOStatistics::onClassAdded(OSKext *parentKext, OSMetaClass *metaClass) SLIST_INIT(&ce->counterList); SLIST_INIT(&ce->userClientList); - + RB_INSERT(ClassTree, &classHead, ce); SLIST_INSERT_HEAD(&foundKext->classList, ce, lLink); - + foundKext->classes++; - + kextHint = foundKext; - - sequenceID++; + + sequenceID++; registeredClasses++; - } - else { + } else { panic("IOStatistics::onClassAdded: cannot find parent kext: %s", parentKext->getIdentifierCString()); } - + IORWLockUnlock(lock); } -void IOStatistics::onClassRemoved(OSKext *parentKext, OSMetaClass *metaClass) +void +IOStatistics::onClassRemoved(OSKext *parentKext, OSMetaClass *metaClass) { ClassNode sought, *found; @@ -384,7 +395,7 @@ void IOStatistics::onClassRemoved(OSKext *parentKext, OSMetaClass *metaClass) if (found) { IOEventSourceCounter *esc; IOUserClientCounter *ucc; - + /* Free up the list of counters */ while ((esc = SLIST_FIRST(&found->counterList))) { SLIST_REMOVE_HEAD(&found->counterList, link); @@ -399,29 +410,29 @@ void IOStatistics::onClassRemoved(OSKext *parentKext, OSMetaClass *metaClass) /* Remove from class tree */ RB_REMOVE(ClassTree, &classHead, found); - + /* Remove from parent */ SLIST_REMOVE(&found->parentKext->classList, found, ClassNode, lLink); - + /* Finally, free the class node */ kfree(found, sizeof(ClassNode)); - + sequenceID++; registeredClasses--; - } - else { + } else { panic("IOStatistics::onClassRemoved: cannot find class: %s", metaClass->getClassName()); } IORWLockUnlock(lock); } -IOEventSourceCounter *IOStatistics::registerEventSource(OSObject *inOwner) +IOEventSourceCounter * +IOStatistics::registerEventSource(OSObject *inOwner) { IOEventSourceCounter *counter = NULL; ClassNode sought, *found = NULL; boolean_t createDummyCounter = FALSE; - + assert(inOwner); if (!enabled) { @@ -432,7 +443,7 @@ IOEventSourceCounter *IOStatistics::registerEventSource(OSObject *inOwner) if (!counter) { return NULL; } - + memset(counter, 0, sizeof(IOEventSourceCounter)); IORWLockWrite(lock); @@ -443,8 +454,7 @@ IOEventSourceCounter *IOStatistics::registerEventSource(OSObject *inOwner) if (inOwner->retainCount > 0xFFFFFF) { kprintf("IOStatistics::registerEventSource - bad metaclass %p\n", inOwner); createDummyCounter = TRUE; - } - else { + } else { sought.metaClass = inOwner->getMetaClass(); found = RB_FIND(ClassTree, &classHead, &sought); } @@ -458,13 +468,14 @@ IOEventSourceCounter *IOStatistics::registerEventSource(OSObject *inOwner) if (!(createDummyCounter || found)) { panic("IOStatistics::registerEventSource: cannot find parent class: %s", inOwner->getMetaClass()->getClassName()); } - + IORWLockUnlock(lock); - + return counter; } -void IOStatistics::unregisterEventSource(IOEventSourceCounter *counter) +void +IOStatistics::unregisterEventSource(IOEventSourceCounter *counter) { if (!counter) { return; @@ -477,11 +488,12 @@ void IOStatistics::unregisterEventSource(IOEventSourceCounter *counter) registeredCounters--; } kfree(counter, sizeof(IOEventSourceCounter)); - + IORWLockUnlock(lock); } -IOWorkLoopCounter* IOStatistics::registerWorkLoop(IOWorkLoop *workLoop) +IOWorkLoopCounter* +IOStatistics::registerWorkLoop(IOWorkLoop *workLoop) { IOWorkLoopCounter *counter = NULL; KextNode *found; @@ -496,7 +508,7 @@ IOWorkLoopCounter* IOStatistics::registerWorkLoop(IOWorkLoop *workLoop) if (!counter) { return NULL; } - + memset(counter, 0, sizeof(IOWorkLoopCounter)); found = getKextNodeFromBacktrace(TRUE); @@ -515,23 +527,25 @@ IOWorkLoopCounter* IOStatistics::registerWorkLoop(IOWorkLoop *workLoop) return counter; } -void IOStatistics::unregisterWorkLoop(IOWorkLoopCounter *counter) +void +IOStatistics::unregisterWorkLoop(IOWorkLoopCounter *counter) { if (!counter) { return; } - + IORWLockWrite(lock); if (counter->parentKext) { SLIST_REMOVE(&counter->parentKext->workLoopList, counter, IOWorkLoopCounter, link); } kfree(counter, sizeof(IOWorkLoopCounter)); registeredWorkloops--; - + IORWLockUnlock(lock); } -IOUserClientCounter *IOStatistics::registerUserClient(IOUserClient *userClient) +IOUserClientCounter * +IOStatistics::registerUserClient(IOUserClient *userClient) { ClassNode sought, *found; IOUserClientCounter *counter = NULL; @@ -546,7 +560,7 @@ IOUserClientCounter *IOStatistics::registerUserClient(IOUserClient *userClient) if (!counter) { return NULL; } - + memset(counter, 0, sizeof(IOUserClientCounter)); IORWLockWrite(lock); @@ -557,8 +571,7 @@ IOUserClientCounter *IOStatistics::registerUserClient(IOUserClient *userClient) if (found) { counter->parentClass = found; SLIST_INSERT_HEAD(&found->userClientList, counter, link); - } - else { + } else { panic("IOStatistics::registerUserClient: cannot find parent class: %s", sought.metaClass->getClassName()); } @@ -567,57 +580,60 @@ IOUserClientCounter *IOStatistics::registerUserClient(IOUserClient *userClient) return counter; } -void IOStatistics::unregisterUserClient(IOUserClientCounter *counter) +void +IOStatistics::unregisterUserClient(IOUserClientCounter *counter) { if (!counter) { return; } - + IORWLockWrite(lock); - + SLIST_REMOVE(&counter->parentClass->userClientList, counter, IOUserClientCounter, link); kfree(counter, sizeof(IOUserClientCounter)); IORWLockUnlock(lock); } -void IOStatistics::attachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSourceCounter *esc) +void +IOStatistics::attachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSourceCounter *esc) { if (!wlc) { - return; + return; } - + IORWLockWrite(lock); - + if (!nextWorkLoopDependency) { return; } - + attachedEventSources++; wlc->attachedEventSources++; - + /* Track the kext dependency */ nextWorkLoopDependency->loadTag = esc->parentClass->parentKext->loadTag; if (NULL == RB_INSERT(IOWorkLoopCounter::DependencyTree, &wlc->dependencyHead, nextWorkLoopDependency)) { nextWorkLoopDependency = (IOWorkLoopDependency*)kalloc(sizeof(IOWorkLoopDependency)); } - + IORWLockUnlock(lock); } -void IOStatistics::detachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSourceCounter *esc) +void +IOStatistics::detachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSourceCounter *esc) { IOWorkLoopDependency sought, *found; - + if (!wlc) { return; } - + IORWLockWrite(lock); attachedEventSources--; wlc->attachedEventSources--; - + sought.loadTag = esc->parentClass->parentKext->loadTag; found = RB_FIND(IOWorkLoopCounter::DependencyTree, &wlc->dependencyHead, &sought); @@ -629,7 +645,8 @@ void IOStatistics::detachWorkLoopEventSource(IOWorkLoopCounter *wlc, IOEventSour IORWLockUnlock(lock); } -int IOStatistics::getStatistics(sysctl_req *req) +int +IOStatistics::getStatistics(sysctl_req *req) { int error; uint32_t calculatedSize, size; @@ -637,25 +654,25 @@ int IOStatistics::getStatistics(sysctl_req *req) IOStatisticsHeader *header; assert(IOStatistics::enabled && req); - + IORWLockRead(IOStatistics::lock); /* Work out how much we need to allocate. IOStatisticsKext is of variable size. */ - calculatedSize = sizeof(IOStatisticsHeader) + - sizeof(IOStatisticsGlobal) + - (sizeof(IOStatisticsKext) * loadedKexts) + (sizeof(uint32_t) * registeredClasses) + - (sizeof(IOStatisticsMemory) * loadedKexts) + - (sizeof(IOStatisticsClass) * registeredClasses) + - (sizeof(IOStatisticsCounter) * registeredClasses) + - (sizeof(IOStatisticsKextIdentifier) * loadedKexts) + - (sizeof(IOStatisticsClassName) * registeredClasses); + calculatedSize = sizeof(IOStatisticsHeader) + + sizeof(IOStatisticsGlobal) + + (sizeof(IOStatisticsKext) * loadedKexts) + (sizeof(uint32_t) * registeredClasses) + + (sizeof(IOStatisticsMemory) * loadedKexts) + + (sizeof(IOStatisticsClass) * registeredClasses) + + (sizeof(IOStatisticsCounter) * registeredClasses) + + (sizeof(IOStatisticsKextIdentifier) * loadedKexts) + + (sizeof(IOStatisticsClassName) * registeredClasses); /* Size request? */ if (req->oldptr == USER_ADDR_NULL) { error = SYSCTL_OUT(req, NULL, calculatedSize); goto exit; } - + /* Read only */ if (req->newptr != USER_ADDR_NULL) { error = EPERM; @@ -669,9 +686,9 @@ int IOStatistics::getStatistics(sysctl_req *req) } memset(buffer, 0, calculatedSize); - + ptr = buffer; - + header = (IOStatisticsHeader*)((void*)ptr); header->sig = IOSTATISTICS_SIG; @@ -695,17 +712,17 @@ int IOStatistics::getStatistics(sysctl_req *req) header->memoryStatsOffset = header->kextStatsOffset + size; size = copyMemoryStatistics((IOStatisticsMemory*)((void*)ptr)); ptr += size; - + /* Class statistics */ header->classStatsOffset = header->memoryStatsOffset + size; size = copyClassStatistics((IOStatisticsClass*)((void*)ptr)); ptr += size; - + /* Dynamic class counter data */ header->counterStatsOffset = header->classStatsOffset + size; size = copyCounterStatistics((IOStatisticsCounter*)((void*)ptr)); ptr += size; - + /* Kext identifiers */ header->kextIdentifiersOffset = header->counterStatsOffset + size; size = copyKextIdentifiers((IOStatisticsKextIdentifier*)((void*)ptr)); @@ -715,11 +732,11 @@ int IOStatistics::getStatistics(sysctl_req *req) header->classNamesOffset = header->kextIdentifiersOffset + size; size = copyClassNames((IOStatisticsClassName*)ptr); ptr += size; - + LOG(2, "IOStatistics::getStatistics - calculatedSize 0x%x, kexts 0x%x, classes 0x%x.\n", - calculatedSize, loadedKexts, registeredClasses); + calculatedSize, loadedKexts, registeredClasses); - assert( (uint32_t)(ptr - buffer) == calculatedSize ); + assert((uint32_t)(ptr - buffer) == calculatedSize ); error = SYSCTL_OUT(req, buffer, calculatedSize); @@ -730,7 +747,8 @@ exit: return error; } -int IOStatistics::getWorkLoopStatistics(sysctl_req *req) +int +IOStatistics::getWorkLoopStatistics(sysctl_req *req) { int error; uint32_t calculatedSize, size; @@ -743,14 +761,14 @@ int IOStatistics::getWorkLoopStatistics(sysctl_req *req) /* Approximate how much we need to allocate (worse case estimate) */ calculatedSize = sizeof(IOStatisticsWorkLoop) * registeredWorkloops + - sizeof(uint32_t) * attachedEventSources; + sizeof(uint32_t) * attachedEventSources; /* Size request? */ if (req->oldptr == USER_ADDR_NULL) { error = SYSCTL_OUT(req, NULL, calculatedSize); goto exit; } - + /* Read only */ if (req->newptr != USER_ADDR_NULL) { error = EPERM; @@ -764,12 +782,12 @@ int IOStatistics::getWorkLoopStatistics(sysctl_req *req) } memset(buffer, 0, calculatedSize); header = (IOStatisticsWorkLoopHeader*)((void*)buffer); - + header->sig = IOSTATISTICS_SIG_WORKLOOP; header->ver = IOSTATISTICS_VER; header->seq = sequenceID; - + header->workloopCount = registeredWorkloops; size = copyWorkLoopStatistics(&header->workLoopStats); @@ -787,8 +805,9 @@ exit: return error; } -int IOStatistics::getUserClientStatistics(sysctl_req *req) -{ +int +IOStatistics::getUserClientStatistics(sysctl_req *req) +{ int error; uint32_t calculatedSize, size; char *buffer; @@ -800,9 +819,9 @@ int IOStatistics::getUserClientStatistics(sysctl_req *req) IORWLockRead(IOStatistics::lock); /* Work out how much we need to allocate */ - calculatedSize = sizeof(IOStatisticsUserClientHeader) + - sizeof(IOStatisticsUserClientCall) * IOKIT_STATISTICS_RECORDED_USERCLIENT_PROCS * loadedKexts; - + calculatedSize = sizeof(IOStatisticsUserClientHeader) + + sizeof(IOStatisticsUserClientCall) * IOKIT_STATISTICS_RECORDED_USERCLIENT_PROCS * loadedKexts; + /* Size request? */ if (req->oldptr == USER_ADDR_NULL) { error = SYSCTL_OUT(req, NULL, calculatedSize); @@ -832,19 +851,18 @@ int IOStatistics::getUserClientStatistics(sysctl_req *req) header->sig = IOSTATISTICS_SIG_USERCLIENT; header->ver = IOSTATISTICS_VER; - + header->seq = sequenceID; header->processes = 0; size = copyUserClientStatistics(header, requestedLoadTag); - + assert((sizeof(IOStatisticsUserClientHeader) + size) <= calculatedSize); - + if (size) { error = SYSCTL_OUT(req, buffer, sizeof(IOStatisticsUserClientHeader) + size); - } - else { + } else { error = EINVAL; } @@ -855,16 +873,18 @@ exit: return error; } -uint32_t IOStatistics::copyGlobalStatistics(IOStatisticsGlobal *stats) +uint32_t +IOStatistics::copyGlobalStatistics(IOStatisticsGlobal *stats) { stats->kextCount = loadedKexts; stats->classCount = registeredClasses; stats->workloops = registeredWorkloops; - + return sizeof(IOStatisticsGlobal); } -uint32_t IOStatistics::copyKextStatistics(IOStatisticsKext *stats) +uint32_t +IOStatistics::copyKextStatistics(IOStatisticsKext *stats) { KextNode *ke; ClassNode *ce; @@ -880,20 +900,21 @@ uint32_t IOStatistics::copyKextStatistics(IOStatisticsKext *stats) SLIST_FOREACH(ce, &ke->classList, lLink) { stats->classIndexes[index++] = ce->classID; } - + stats = (IOStatisticsKext *)((void*)((char*)stats + sizeof(IOStatisticsKext) + (ke->classes * sizeof(uint32_t)))); } - return (sizeof(IOStatisticsKext) * loadedKexts + sizeof(uint32_t) * registeredClasses); + return sizeof(IOStatisticsKext) * loadedKexts + sizeof(uint32_t) * registeredClasses; } -uint32_t IOStatistics::copyMemoryStatistics(IOStatisticsMemory *stats) +uint32_t +IOStatistics::copyMemoryStatistics(IOStatisticsMemory *stats) { KextNode *ke; RB_FOREACH(ke, KextTree, &kextHead) { stats->allocatedSize = ke->memoryCounters[kIOStatisticsMalloc]; - stats->freedSize = ke->memoryCounters[kIOStatisticsFree]; + stats->freedSize = ke->memoryCounters[kIOStatisticsFree]; stats->allocatedAlignedSize = ke->memoryCounters[kIOStatisticsMallocAligned]; stats->freedAlignedSize = ke->memoryCounters[kIOStatisticsFreeAligned]; stats->allocatedContiguousSize = ke->memoryCounters[kIOStatisticsMallocContiguous]; @@ -902,11 +923,12 @@ uint32_t IOStatistics::copyMemoryStatistics(IOStatisticsMemory *stats) stats->freedPageableSize = ke->memoryCounters[kIOStatisticsFreePageable]; stats++; } - - return (sizeof(IOStatisticsMemory) * loadedKexts); + + return sizeof(IOStatisticsMemory) * loadedKexts; } -uint32_t IOStatistics::copyClassStatistics(IOStatisticsClass *stats) +uint32_t +IOStatistics::copyClassStatistics(IOStatisticsClass *stats) { KextNode *ke; ClassNode *ce; @@ -914,7 +936,7 @@ uint32_t IOStatistics::copyClassStatistics(IOStatisticsClass *stats) RB_FOREACH(ke, KextTree, &kextHead) { SLIST_FOREACH(ce, &ke->classList, lLink) { stats->classID = ce->classID; - stats->superClassID = ce->superClassID; + stats->superClassID = ce->superClassID; stats->classSize = ce->metaClass->getClassSize(); stats++; @@ -924,7 +946,8 @@ uint32_t IOStatistics::copyClassStatistics(IOStatisticsClass *stats) return sizeof(IOStatisticsClass) * registeredClasses; } -uint32_t IOStatistics::copyCounterStatistics(IOStatisticsCounter *stats) +uint32_t +IOStatistics::copyCounterStatistics(IOStatisticsCounter *stats) { KextNode *ke; ClassNode *ce; @@ -954,45 +977,45 @@ uint32_t IOStatistics::copyCounterStatistics(IOStatisticsCounter *stats) /* Event source counters */ SLIST_FOREACH(counter, &ce->counterList, link) { - switch (counter->type) { - case kIOStatisticsInterruptEventSourceCounter: - iec->created++; - iec->produced += counter->u.interrupt.produced; - iec->checksForWork += counter->u.interrupt.checksForWork; - break; - case kIOStatisticsFilterInterruptEventSourceCounter: - fiec->created++; - fiec->produced += counter->u.filter.produced; - fiec->checksForWork += counter->u.filter.checksForWork; - break; - case kIOStatisticsTimerEventSourceCounter: - tec->created++; - tec->timeouts += counter->u.timer.timeouts; - tec->checksForWork += counter->u.timer.checksForWork; - tec->timeOnGate += counter->timeOnGate; - tec->closeGateCalls += counter->closeGateCalls; - tec->openGateCalls += counter->openGateCalls; - break; - case kIOStatisticsCommandGateCounter: - cgc->created++; - cgc->timeOnGate += counter->timeOnGate; - cgc->actionCalls += counter->u.commandGate.actionCalls; - break; - case kIOStatisticsCommandQueueCounter: - cqc->created++; - cqc->actionCalls += counter->u.commandQueue.actionCalls; - break; - case kIOStatisticsDerivedEventSourceCounter: - dec->created++; - dec->timeOnGate += counter->timeOnGate; - dec->closeGateCalls += counter->closeGateCalls; - dec->openGateCalls += counter->openGateCalls; - break; - default: - break; + switch (counter->type) { + case kIOStatisticsInterruptEventSourceCounter: + iec->created++; + iec->produced += counter->u.interrupt.produced; + iec->checksForWork += counter->u.interrupt.checksForWork; + break; + case kIOStatisticsFilterInterruptEventSourceCounter: + fiec->created++; + fiec->produced += counter->u.filter.produced; + fiec->checksForWork += counter->u.filter.checksForWork; + break; + case kIOStatisticsTimerEventSourceCounter: + tec->created++; + tec->timeouts += counter->u.timer.timeouts; + tec->checksForWork += counter->u.timer.checksForWork; + tec->timeOnGate += counter->timeOnGate; + tec->closeGateCalls += counter->closeGateCalls; + tec->openGateCalls += counter->openGateCalls; + break; + case kIOStatisticsCommandGateCounter: + cgc->created++; + cgc->timeOnGate += counter->timeOnGate; + cgc->actionCalls += counter->u.commandGate.actionCalls; + break; + case kIOStatisticsCommandQueueCounter: + cqc->created++; + cqc->actionCalls += counter->u.commandQueue.actionCalls; + break; + case kIOStatisticsDerivedEventSourceCounter: + dec->created++; + dec->timeOnGate += counter->timeOnGate; + dec->closeGateCalls += counter->closeGateCalls; + dec->openGateCalls += counter->openGateCalls; + break; + default: + break; } } - + stats++; } } @@ -1000,7 +1023,8 @@ uint32_t IOStatistics::copyCounterStatistics(IOStatisticsCounter *stats) return sizeof(IOStatisticsCounter) * registeredClasses; } -uint32_t IOStatistics::copyKextIdentifiers(IOStatisticsKextIdentifier *kextIDs) +uint32_t +IOStatistics::copyKextIdentifiers(IOStatisticsKextIdentifier *kextIDs) { KextNode *ke; @@ -1009,10 +1033,11 @@ uint32_t IOStatistics::copyKextIdentifiers(IOStatisticsKextIdentifier *kextIDs) kextIDs++; } - return (sizeof(IOStatisticsKextIdentifier) * loadedKexts); + return sizeof(IOStatisticsKextIdentifier) * loadedKexts; } -uint32_t IOStatistics::copyClassNames(IOStatisticsClassName *classNames) +uint32_t +IOStatistics::copyClassNames(IOStatisticsClassName *classNames) { KextNode *ke; ClassNode *ce; @@ -1023,11 +1048,12 @@ uint32_t IOStatistics::copyClassNames(IOStatisticsClassName *classNames) classNames++; } } - - return (sizeof(IOStatisticsClassName) * registeredClasses); + + return sizeof(IOStatisticsClassName) * registeredClasses; } -uint32_t IOStatistics::copyWorkLoopStatistics(IOStatisticsWorkLoop *stats) +uint32_t +IOStatistics::copyWorkLoopStatistics(IOStatisticsWorkLoop *stats) { KextNode *ke; IOWorkLoopCounter *wlc; @@ -1044,9 +1070,9 @@ uint32_t IOStatistics::copyWorkLoopStatistics(IOStatisticsWorkLoop *stats) stats->dependentKextLoadTags[stats->dependentKexts] = dependentNode->loadTag; stats->dependentKexts++; } - + size = sizeof(IOStatisticsWorkLoop) + (sizeof(uint32_t) * stats->dependentKexts); - + accumulatedSize += size; stats = (IOStatisticsWorkLoop*)((void*)((char*)stats + size)); } @@ -1055,7 +1081,8 @@ uint32_t IOStatistics::copyWorkLoopStatistics(IOStatisticsWorkLoop *stats) return accumulatedSize; } -uint32_t IOStatistics::copyUserClientStatistics(IOStatisticsUserClientHeader *stats, uint32_t loadTag) +uint32_t +IOStatistics::copyUserClientStatistics(IOStatisticsUserClientHeader *stats, uint32_t loadTag) { KextNode *sought, *found = NULL; uint32_t procs = 0; @@ -1067,7 +1094,7 @@ uint32_t IOStatistics::copyUserClientStatistics(IOStatisticsUserClientHeader *st break; } } - + if (!found) { return 0; } @@ -1083,8 +1110,9 @@ uint32_t IOStatistics::copyUserClientStatistics(IOStatisticsUserClientHeader *st return sizeof(IOStatisticsUserClientCall) * stats->processes; } -void IOStatistics::storeUserClientCallInfo(IOUserClient *userClient, IOUserClientCounter *counter) -{ +void +IOStatistics::storeUserClientCallInfo(IOUserClient *userClient, IOUserClientCounter *counter) +{ OSString *ossUserClientCreator = NULL; int32_t pid = -1; KextNode *parentKext; @@ -1092,49 +1120,51 @@ void IOStatistics::storeUserClientCallInfo(IOUserClient *userClient, IOUserClien uint32_t count = 0; const char *ptr = NULL; OSObject *obj; - + /* TODO: see if this can be more efficient */ obj = userClient->copyProperty("IOUserClientCreator", - gIOServicePlane, - kIORegistryIterateRecursively | kIORegistryIterateParents); + gIOServicePlane, + kIORegistryIterateRecursively | kIORegistryIterateParents); - if (!obj) + if (!obj) { goto err_nounlock; + } ossUserClientCreator = OSDynamicCast(OSString, obj); if (ossUserClientCreator) { - uint32_t len, lenIter = 0; - + uint32_t len, lenIter = 0; + ptr = ossUserClientCreator->getCStringNoCopy(); len = ossUserClientCreator->getLength(); - + while ((*ptr != ' ') && (lenIter < len)) { ptr++; lenIter++; } - + if (lenIter < len) { ptr++; // Skip the space lenIter++; pid = 0; - while ( (*ptr != ',') && (lenIter < len)) { - pid = pid*10 + (*ptr - '0'); + while ((*ptr != ',') && (lenIter < len)) { + pid = pid * 10 + (*ptr - '0'); ptr++; lenIter++; } - - if(lenIter == len) { + + if (lenIter == len) { pid = -1; } else { ptr += 2; } } } - - if (-1 == pid) + + if (-1 == pid) { goto err_nounlock; - + } + IORWLockWrite(lock); parentKext = counter->parentClass->parentKext; @@ -1146,13 +1176,12 @@ void IOStatistics::storeUserClientCallInfo(IOUserClient *userClient, IOUserClien if (count) { TAILQ_REMOVE(&parentKext->userClientCallList, entry, link); break; - } - else { + } else { /* At the head already, so increment and return */ goto err_unlock; } } - + count++; } @@ -1161,12 +1190,11 @@ void IOStatistics::storeUserClientCallInfo(IOUserClient *userClient, IOUserClien /* Max elements hit, so reuse the last */ entry = TAILQ_LAST(&parentKext->userClientCallList, ProcessEntryList); TAILQ_REMOVE(&parentKext->userClientCallList, entry, link); - } - else { + } else { /* Otherwise, allocate a new entry */ entry = (IOUserClientProcessEntry*)kalloc(sizeof(IOUserClientProcessEntry)); if (!entry) { - IORWLockUnlock(lock); + IORWLockUnlock(lock); return; } } @@ -1175,43 +1203,50 @@ void IOStatistics::storeUserClientCallInfo(IOUserClient *userClient, IOUserClien entry->pid = pid; entry->calls = 1; } - + TAILQ_FOREACH(nextEntry, &parentKext->userClientCallList, link) { - if (nextEntry->calls <= entry->calls) + if (nextEntry->calls <= entry->calls) { break; - + } + prevEntry = nextEntry; } - - if (!prevEntry) + + if (!prevEntry) { TAILQ_INSERT_HEAD(&parentKext->userClientCallList, entry, link); - else + } else { TAILQ_INSERT_AFTER(&parentKext->userClientCallList, prevEntry, entry, link); - + } + err_unlock: IORWLockUnlock(lock); - + err_nounlock: - if (obj) + if (obj) { obj->release(); + } } -void IOStatistics::countUserClientCall(IOUserClient *client) { +void +IOStatistics::countUserClientCall(IOUserClient *client) +{ IOUserClient::ExpansionData *data; IOUserClientCounter *counter; - + /* Guard against an uninitialized client object - */ if (!(data = client->reserved)) { return; } - + if ((counter = data->counter)) { storeUserClientCallInfo(client, counter); OSIncrementAtomic(&counter->clientCalls); } } -KextNode *IOStatistics::getKextNodeFromBacktrace(boolean_t write) { +KextNode * +IOStatistics::getKextNodeFromBacktrace(boolean_t write) +{ const uint32_t btMin = 3; void *bt[16]; @@ -1241,11 +1276,10 @@ KextNode *IOStatistics::getKextNodeFromBacktrace(boolean_t write) { while (ke) { if (*scanAddr < ke->address) { ke = RB_LEFT(ke, addressLink); - } - else { + } else { if ((*scanAddr < ke->address_end) && (*scanAddr >= ke->address)) { - if (!ke->kext->isKernelComponent()) { - return ke; + if (!ke->kext->isKernelComponent()) { + return ke; } else { found = ke; } @@ -1258,19 +1292,23 @@ KextNode *IOStatistics::getKextNodeFromBacktrace(boolean_t write) { if (!found) { IORWLockUnlock(lock); } - + return found; } - -void IOStatistics::releaseKextNode(KextNode *node) { + +void +IOStatistics::releaseKextNode(KextNode *node) +{ #pragma unused(node) IORWLockUnlock(lock); } /* IOLib allocations */ -void IOStatistics::countAlloc(uint32_t index, vm_size_t size) { +void +IOStatistics::countAlloc(uint32_t index, vm_size_t size) +{ KextNode *ke; - + if (!enabled) { return; } diff --git a/iokit/Kernel/IOStringFuncs.c b/iokit/Kernel/IOStringFuncs.c index 6890305d6..fc3a8f825 100644 --- a/iokit/Kernel/IOStringFuncs.c +++ b/iokit/Kernel/IOStringFuncs.c @@ -2,7 +2,7 @@ * Copyright (c) 1998-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Copyright (c) 1995 NeXT Computer, Inc. All rights reserved. * * strol.c - The functions strtol() & strtoul() are exported as public API @@ -74,10 +74,10 @@ */ /* -#include -#include -#include -*/ + #include + #include + #include + */ #include #include @@ -94,26 +94,26 @@ typedef int BOOL; static inline BOOL isupper(char c) { - return (c >= 'A' && c <= 'Z'); + return c >= 'A' && c <= 'Z'; } static inline BOOL isalpha(char c) { - return ((c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')); + return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z'); } static inline BOOL isspace(char c) { - return (c == ' ' || c == '\t' || c == '\n' || c == '\12'); + return c == ' ' || c == '\t' || c == '\n' || c == '\12'; } static inline BOOL isdigit(char c) { - return (c >= '0' && c <= '9'); + return c >= '0' && c <= '9'; } /* @@ -142,8 +142,9 @@ strtol(const char *nptr, char **endptr, int base) if (c == '-') { neg = 1; c = *s++; - } else if (c == '+') + } else if (c == '+') { c = *s++; + } if ((base == 0 || base == 16) && c == '0' && (*s == 'x' || *s == 'X')) { c = s[1]; @@ -155,8 +156,9 @@ strtol(const char *nptr, char **endptr, int base) s += 2; base = 2; } - if (base == 0) + if (base == 0) { base = c == '0' ? 8 : 10; + } /* * Compute the cutoff value between legal numbers and illegal @@ -179,17 +181,19 @@ strtol(const char *nptr, char **endptr, int base) cutlim = cutoff % (unsigned long)base; cutoff /= (unsigned long)base; for (acc = 0, any = 0;; c = *s++) { - if (isdigit(c)) + if (isdigit(c)) { c -= '0'; - else if (isalpha(c)) + } else if (isalpha(c)) { c -= isupper(c) ? 'A' - 10 : 'a' - 10; - else + } else { break; - if (c >= base) + } + if (c >= base) { break; - if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim) ) + } + if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) { any = -1; - else { + } else { any = 1; acc *= base; acc += c; @@ -198,20 +202,17 @@ strtol(const char *nptr, char **endptr, int base) if (any < 0) { acc = neg ? LONG_MIN : LONG_MAX; // errno = ERANGE; - } else if (neg) + } else if (neg) { acc = -acc; - if (endptr != 0) - { - if(any) - { + } + if (endptr != 0) { + if (any) { *endptr = __CAST_AWAY_QUALIFIER(s - 1, const, char *); - } - else - { + } else { *endptr = __CAST_AWAY_QUALIFIER(nptr, const, char *); } } - return (acc); + return acc; } unsigned long @@ -232,8 +233,9 @@ strtoul(const char *nptr, char **endptr, int base) if (c == '-') { neg = 1; c = *s++; - } else if (c == '+') + } else if (c == '+') { c = *s++; + } if ((base == 0 || base == 16) && c == '0' && (*s == 'x' || *s == 'X')) { c = s[1]; @@ -245,22 +247,25 @@ strtoul(const char *nptr, char **endptr, int base) s += 2; base = 2; } - if (base == 0) + if (base == 0) { base = c == '0' ? 8 : 10; + } cutoff = (unsigned long)ULONG_MAX / (unsigned long)base; cutlim = (unsigned long)ULONG_MAX % (unsigned long)base; for (acc = 0, any = 0;; c = *s++) { - if (isdigit(c)) + if (isdigit(c)) { c -= '0'; - else if (isalpha(c)) + } else if (isalpha(c)) { c -= isupper(c) ? 'A' - 10 : 'a' - 10; - else + } else { break; - if (c >= base) + } + if (c >= base) { break; - if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim) ) + } + if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) { any = -1; - else { + } else { any = 1; acc *= base; acc += c; @@ -269,21 +274,18 @@ strtoul(const char *nptr, char **endptr, int base) if (any < 0) { acc = ULONG_MAX; // errno = ERANGE; - } else if (neg) + } else if (neg) { acc = -acc; - if (endptr != 0) - { - if(any) - { + } + if (endptr != 0) { + if (any) { *endptr = __CAST_AWAY_QUALIFIER(s - 1, const, char *); - } - else - { + } else { *endptr = __CAST_AWAY_QUALIFIER(nptr, const, char *); } } - return (acc); + return acc; } /* @@ -315,8 +317,9 @@ strtoq(const char *nptr, char **endptr, int base) c = *s++; } else { neg = 0; - if (c == '+') + if (c == '+') { c = *s++; + } } if ((base == 0 || base == 16) && c == '0' && (*s == 'x' || *s == 'X')) { @@ -324,8 +327,9 @@ strtoq(const char *nptr, char **endptr, int base) s += 2; base = 16; } - if (base == 0) + if (base == 0) { base = c == '0' ? 8 : 10; + } /* * Compute the cutoff value between legal numbers and illegal @@ -350,17 +354,19 @@ strtoq(const char *nptr, char **endptr, int base) cutlim = cutoff % qbase; cutoff /= qbase; for (acc = 0, any = 0;; c = *s++) { - if (isdigit(c)) + if (isdigit(c)) { c -= '0'; - else if (isalpha(c)) + } else if (isalpha(c)) { c -= isupper(c) ? 'A' - 10 : 'a' - 10; - else + } else { break; - if (c >= base) + } + if (c >= base) { break; - if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) + } + if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) { any = -1; - else { + } else { any = 1; acc *= qbase; acc += c; @@ -369,21 +375,18 @@ strtoq(const char *nptr, char **endptr, int base) if (any < 0) { acc = neg ? QUAD_MIN : QUAD_MAX; // errno = ERANGE; - } else if (neg) + } else if (neg) { acc = -acc; - if (endptr != 0) - { - if(any) - { + } + if (endptr != 0) { + if (any) { *endptr = __CAST_AWAY_QUALIFIER(s - 1, const, char *); - } - else - { + } else { *endptr = __CAST_AWAY_QUALIFIER(nptr, const, char *); } } - return (acc); + return acc; } @@ -395,8 +398,8 @@ strtoq(const char *nptr, char **endptr, int base) */ u_quad_t strtouq(const char *nptr, - char **endptr, - int base) + char **endptr, + int base) { const char *s = nptr; u_quad_t acc; @@ -414,10 +417,11 @@ strtouq(const char *nptr, if (c == '-') { neg = 1; c = *s++; - } else { + } else { neg = 0; - if (c == '+') + if (c == '+') { c = *s++; + } } if ((base == 0 || base == 16) && c == '0' && (*s == 'x' || *s == 'X')) { @@ -425,23 +429,26 @@ strtouq(const char *nptr, s += 2; base = 16; } - if (base == 0) + if (base == 0) { base = c == '0' ? 8 : 10; + } qbase = (unsigned)base; cutoff = (u_quad_t)UQUAD_MAX / qbase; cutlim = (u_quad_t)UQUAD_MAX % qbase; for (acc = 0, any = 0;; c = *s++) { - if (isdigit(c)) + if (isdigit(c)) { c -= '0'; - else if (isalpha(c)) + } else if (isalpha(c)) { c -= isupper(c) ? 'A' - 10 : 'a' - 10; - else + } else { break; - if (c >= base) + } + if (c >= base) { break; - if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) + } + if (any < 0 || acc > cutoff || (acc == cutoff && c > cutlim)) { any = -1; - else { + } else { any = 1; acc *= qbase; acc += c; @@ -450,21 +457,18 @@ strtouq(const char *nptr, if (any < 0) { acc = UQUAD_MAX; // errno = ERANGE; - } else if (neg) + } else if (neg) { acc = -acc; - if (endptr != 0) - { - if(any) - { + } + if (endptr != 0) { + if (any) { *endptr = __CAST_AWAY_QUALIFIER(s - 1, const, char *); - } - else - { + } else { *endptr = __CAST_AWAY_QUALIFIER(nptr, const, char *); } } - return (acc); + return acc; } @@ -479,14 +483,16 @@ strncat(char *s1, const char *s2, unsigned long n) char *d = s1; const char *s = s2; - while (*d != 0) + while (*d != 0) { d++; + } do { - if ((*d = *s++) == '\0') + if ((*d = *s++) == '\0') { break; + } d++; } while (--n != 0); *d = '\0'; } - return (s1); + return s1; } diff --git a/iokit/Kernel/IOSubMemoryDescriptor.cpp b/iokit/Kernel/IOSubMemoryDescriptor.cpp index 5b377141a..8d0a472b1 100644 --- a/iokit/Kernel/IOSubMemoryDescriptor.cpp +++ b/iokit/Kernel/IOSubMemoryDescriptor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,182 +34,193 @@ OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) -IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) +IOReturn +IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) { #ifdef __LP64__ - super::redirect( safeTask, doRedirect ); + super::redirect( safeTask, doRedirect ); #endif /* __LP64__ */ - return( _parent->redirect( safeTask, doRedirect )); + return _parent->redirect( safeTask, doRedirect ); } IOSubMemoryDescriptor * -IOSubMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, - IOByteCount offset, - IOByteCount length, - IOOptionBits options) +IOSubMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, + IOByteCount offset, + IOByteCount length, + IOOptionBits options) { - IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; + IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; - if (self && !self->initSubRange(of, offset, length, (IODirection) options)) { - self->release(); - self = 0; - } - return self; + if (self && !self->initSubRange(of, offset, length, (IODirection) options)) { + self->release(); + self = 0; + } + return self; } -bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, - IOByteCount offset, IOByteCount length, - IODirection direction ) +bool +IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, + IOByteCount offset, IOByteCount length, + IODirection direction ) { - if( parent && ((offset + length) > parent->getLength())) - return( false); - - /* - * We can check the _parent instance variable before having ever set it - * to an initial value because I/O Kit guarantees that all our instance - * variables are zeroed on an object's allocation. - */ - - if( !_parent) { - if( !super::init()) - return( false ); - } else { + if (parent && ((offset + length) > parent->getLength())) { + return false; + } + /* - * An existing memory descriptor is being retargeted to - * point to somewhere else. Clean up our present state. + * We can check the _parent instance variable before having ever set it + * to an initial value because I/O Kit guarantees that all our instance + * variables are zeroed on an object's allocation. */ - _parent->release(); - } - - if (parent) { - parent->retain(); - _tag = parent->getTag(); - } - else { - _tag = 0; - } - _parent = parent; - _start = offset; - _length = length; - _flags = direction; - _flags |= kIOMemoryThreadSafe; + if (!_parent) { + if (!super::init()) { + return false; + } + } else { + /* + * An existing memory descriptor is being retargeted to + * point to somewhere else. Clean up our present state. + */ + + _parent->release(); + } + + if (parent) { + parent->retain(); + _tag = parent->getTag(); + } else { + _tag = 0; + } + _parent = parent; + _start = offset; + _length = length; + _flags = direction; + _flags |= kIOMemoryThreadSafe; #ifndef __LP64__ - _direction = (IODirection) (_flags & kIOMemoryDirectionMask); + _direction = (IODirection) (_flags & kIOMemoryDirectionMask); #endif /* !__LP64__ */ - return( true ); + return true; } -void IOSubMemoryDescriptor::free( void ) +void +IOSubMemoryDescriptor::free( void ) { - if( _parent) - _parent->release(); + if (_parent) { + _parent->release(); + } - super::free(); + super::free(); } addr64_t IOSubMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount * length, IOOptionBits options) { - addr64_t address; - IOByteCount actualLength; + addr64_t address; + IOByteCount actualLength; - assert(offset <= _length); + assert(offset <= _length); - if( length) - *length = 0; + if (length) { + *length = 0; + } - if( offset >= _length) - return( 0 ); + if (offset >= _length) { + return 0; + } - address = _parent->getPhysicalSegment( offset + _start, &actualLength, options ); + address = _parent->getPhysicalSegment( offset + _start, &actualLength, options ); - if( address && length) - *length = min( _length - offset, actualLength ); + if (address && length) { + *length = min( _length - offset, actualLength ); + } - return( address ); + return address; } -IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState, - IOOptionBits * oldState ) +IOReturn +IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState, + IOOptionBits * oldState ) { - IOReturn err; + IOReturn err; - err = _parent->setPurgeable( newState, oldState ); + err = _parent->setPurgeable( newState, oldState ); - return( err ); + return err; } -IOReturn IOSubMemoryDescriptor::prepare( - IODirection forDirection) +IOReturn +IOSubMemoryDescriptor::prepare( + IODirection forDirection) { - IOReturn err; + IOReturn err; - err = _parent->prepare( forDirection); + err = _parent->prepare( forDirection); - return( err ); + return err; } -IOReturn IOSubMemoryDescriptor::complete( - IODirection forDirection) +IOReturn +IOSubMemoryDescriptor::complete( + IODirection forDirection) { - IOReturn err; + IOReturn err; - err = _parent->complete( forDirection); + err = _parent->complete( forDirection); - return( err ); + return err; } -IOMemoryMap * IOSubMemoryDescriptor::makeMapping( - IOMemoryDescriptor * owner, - task_t intoTask, - IOVirtualAddress address, - IOOptionBits options, - IOByteCount offset, - IOByteCount length ) +IOMemoryMap * +IOSubMemoryDescriptor::makeMapping( + IOMemoryDescriptor * owner, + task_t intoTask, + IOVirtualAddress address, + IOOptionBits options, + IOByteCount offset, + IOByteCount length ) { - IOMemoryMap * mapping = 0; + IOMemoryMap * mapping = 0; #ifndef __LP64__ - if (!(kIOMap64Bit & options)) - { - panic("IOSubMemoryDescriptor::makeMapping !64bit"); - } + if (!(kIOMap64Bit & options)) { + panic("IOSubMemoryDescriptor::makeMapping !64bit"); + } #endif /* !__LP64__ */ - mapping = (IOMemoryMap *) _parent->makeMapping( - owner, - intoTask, - address, - options, _start + offset, length ); + mapping = (IOMemoryMap *) _parent->makeMapping( + owner, + intoTask, + address, + options, _start + offset, length ); - return( mapping ); + return mapping; } uint64_t IOSubMemoryDescriptor::getPreparationID( void ) { - uint64_t pID; + uint64_t pID; - if (!super::getKernelReserved()) - return (kIOPreparationIDUnsupported); + if (!super::getKernelReserved()) { + return kIOPreparationIDUnsupported; + } - pID = _parent->getPreparationID(); - if (reserved->kernReserved[0] != pID) - { - reserved->kernReserved[0] = pID; - reserved->preparationID = kIOPreparationIDUnprepared; - super::setPreparationID(); - } + pID = _parent->getPreparationID(); + if (reserved->kernReserved[0] != pID) { + reserved->kernReserved[0] = pID; + reserved->preparationID = kIOPreparationIDUnprepared; + super::setPreparationID(); + } - return (super::getPreparationID()); + return super::getPreparationID(); } IOReturn IOSubMemoryDescriptor::getPageCounts(IOByteCount * residentPageCount, - IOByteCount * dirtyPageCount) + IOByteCount * dirtyPageCount) { - return (_parent->getPageCounts(residentPageCount, dirtyPageCount)); + return _parent->getPageCounts(residentPageCount, dirtyPageCount); } diff --git a/iokit/Kernel/IOSyncer.cpp b/iokit/Kernel/IOSyncer.cpp index 8379cc7ca..32449d6f6 100644 --- a/iokit/Kernel/IOSyncer.cpp +++ b/iokit/Kernel/IOSyncer.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOSyncer.cpp created by wgulland on 2000-02-02 */ @@ -34,88 +34,100 @@ OSDefineMetaClassAndStructors(IOSyncer, OSObject) IOSyncer * IOSyncer::create(bool twoRetains) { - IOSyncer * me = new IOSyncer; + IOSyncer * me = new IOSyncer; - if (me && !me->init(twoRetains)) { - me->release(); - return 0; - } + if (me && !me->init(twoRetains)) { + me->release(); + return 0; + } - return me; + return me; } -bool IOSyncer::init(bool twoRetains) +bool +IOSyncer::init(bool twoRetains) { - if (!OSObject::init()) - return false; + if (!OSObject::init()) { + return false; + } + + if (!(guardLock = IOSimpleLockAlloc())) { + return false; + } - if (!(guardLock = IOSimpleLockAlloc()) ) - return false; - - IOSimpleLockInit(guardLock); + IOSimpleLockInit(guardLock); - if(twoRetains) - retain(); + if (twoRetains) { + retain(); + } - fResult = kIOReturnSuccess; + fResult = kIOReturnSuccess; - reinit(); + reinit(); - return true; + return true; } -void IOSyncer::reinit() +void +IOSyncer::reinit() { - IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); - threadMustStop = true; - IOSimpleLockUnlockEnableInterrupt(guardLock, is); + IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); + threadMustStop = true; + IOSimpleLockUnlockEnableInterrupt(guardLock, is); } -void IOSyncer::free() +void +IOSyncer::free() { - // just in case a thread is blocked here: - privateSignal(); + // just in case a thread is blocked here: + privateSignal(); - if (guardLock != NULL) - IOSimpleLockFree(guardLock); + if (guardLock != NULL) { + IOSimpleLockFree(guardLock); + } - OSObject::free(); + OSObject::free(); } -IOReturn IOSyncer::wait(bool autoRelease) +IOReturn +IOSyncer::wait(bool autoRelease) { - IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); + IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); - if (threadMustStop) { - assert_wait((void *) &threadMustStop, false); - IOSimpleLockUnlockEnableInterrupt(guardLock, is); - thread_block(THREAD_CONTINUE_NULL); - } - else - IOSimpleLockUnlockEnableInterrupt(guardLock, is); + if (threadMustStop) { + assert_wait((void *) &threadMustStop, false); + IOSimpleLockUnlockEnableInterrupt(guardLock, is); + thread_block(THREAD_CONTINUE_NULL); + } else { + IOSimpleLockUnlockEnableInterrupt(guardLock, is); + } - IOReturn result = fResult; // Pick up before auto deleting! + IOReturn result = fResult; // Pick up before auto deleting! - if(autoRelease) - release(); + if (autoRelease) { + release(); + } - return result; + return result; } -void IOSyncer::signal(IOReturn res, bool autoRelease) +void +IOSyncer::signal(IOReturn res, bool autoRelease) { - fResult = res; - privateSignal(); - if(autoRelease) - release(); + fResult = res; + privateSignal(); + if (autoRelease) { + release(); + } } -void IOSyncer::privateSignal() +void +IOSyncer::privateSignal() { - if (threadMustStop) { - IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); - threadMustStop = false; - thread_wakeup_one((void *) &threadMustStop); - IOSimpleLockUnlockEnableInterrupt(guardLock, is); - } + if (threadMustStop) { + IOInterruptState is = IOSimpleLockLockDisableInterrupt(guardLock); + threadMustStop = false; + thread_wakeup_one((void *) &threadMustStop); + IOSimpleLockUnlockEnableInterrupt(guardLock, is); + } } diff --git a/iokit/Kernel/IOTimerEventSource.cpp b/iokit/Kernel/IOTimerEventSource.cpp index 22987d8b8..ad6b75455 100644 --- a/iokit/Kernel/IOTimerEventSource.cpp +++ b/iokit/Kernel/IOTimerEventSource.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000, 2009-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -90,9 +90,9 @@ do { \ #endif /* IOKITSTATS */ -// +// // reserved != 0 means IOTimerEventSource::timeoutAndRelease is being used, -// not a subclassed implementation. +// not a subclassed implementation. // // Timeout handler function. This function is called by the kernel when @@ -101,220 +101,240 @@ do { \ __inline__ void IOTimerEventSource::invokeAction(IOTimerEventSource::Action action, IOTimerEventSource * ts, - OSObject * owner, IOWorkLoop * workLoop) + OSObject * owner, IOWorkLoop * workLoop) { - bool trace = (gIOKitTrace & kIOTraceTimers) ? true : false; + bool trace = (gIOKitTrace & kIOTraceTimers) ? true : false; - if (trace) - IOTimeStampStartConstant(IODBG_TIMES(IOTIMES_ACTION), - VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner)); + if (trace) { + IOTimeStampStartConstant(IODBG_TIMES(IOTIMES_ACTION), + VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner)); + } - if (kActionBlock & flags) ((IOTimerEventSource::ActionBlock) actionBlock)(ts); - else (*action)(owner, ts); + if (kActionBlock & flags) { + ((IOTimerEventSource::ActionBlock) actionBlock)(ts); + } else { + (*action)(owner, ts); + } #if CONFIG_DTRACE - DTRACE_TMR3(iotescallout__expire, Action, action, OSObject, owner, void, workLoop); + DTRACE_TMR3(iotescallout__expire, Action, action, OSObject, owner, void, workLoop); #endif - if (trace) - IOTimeStampEndConstant(IODBG_TIMES(IOTIMES_ACTION), - VM_KERNEL_UNSLIDE(action), VM_KERNEL_ADDRHIDE(owner)); + if (trace) { + IOTimeStampEndConstant(IODBG_TIMES(IOTIMES_ACTION), + VM_KERNEL_UNSLIDE(action), VM_KERNEL_ADDRHIDE(owner)); + } } -void IOTimerEventSource::timeout(void *self) +void +IOTimerEventSource::timeout(void *self) { - IOTimerEventSource *me = (IOTimerEventSource *) self; - - IOStatisticsTimeout(); - - if (me->enabled && me->action) - { - IOWorkLoop * - wl = me->workLoop; - if (wl) - { - Action doit; - wl->closeGate(); - IOStatisticsCloseGate(); - doit = (Action) me->action; - if (doit && me->enabled && AbsoluteTime_to_scalar(&me->abstime)) - { - me->invokeAction(doit, me, me->owner, me->workLoop); - } - IOStatisticsOpenGate(); - wl->openGate(); - } - } + IOTimerEventSource *me = (IOTimerEventSource *) self; + + IOStatisticsTimeout(); + + if (me->enabled && me->action) { + IOWorkLoop * + wl = me->workLoop; + if (wl) { + Action doit; + wl->closeGate(); + IOStatisticsCloseGate(); + doit = (Action) me->action; + if (doit && me->enabled && AbsoluteTime_to_scalar(&me->abstime)) { + me->invokeAction(doit, me, me->owner, me->workLoop); + } + IOStatisticsOpenGate(); + wl->openGate(); + } + } } -void IOTimerEventSource::timeoutAndRelease(void * self, void * c) +void +IOTimerEventSource::timeoutAndRelease(void * self, void * c) { - IOTimerEventSource *me = (IOTimerEventSource *) self; + IOTimerEventSource *me = (IOTimerEventSource *) self; /* The second parameter (a pointer) gets abused to carry an SInt32, so on LP64, "count" - must be cast to "long" before, in order to tell GCC we're not truncating a pointer. */ + * must be cast to "long" before, in order to tell GCC we're not truncating a pointer. */ SInt32 count = (SInt32) (long) c; - IOStatisticsTimeout(); - - if (me->enabled && me->action) - { - IOWorkLoop * - wl = me->reserved->workLoop; - if (wl) - { - Action doit; - wl->closeGate(); - IOStatisticsCloseGate(); - doit = (Action) me->action; - if (doit && (me->reserved->calloutGeneration == count)) - { - me->invokeAction(doit, me, me->owner, me->workLoop); - } - IOStatisticsOpenGate(); - wl->openGate(); - } - } - - me->reserved->workLoop->release(); - me->release(); + IOStatisticsTimeout(); + + if (me->enabled && me->action) { + IOWorkLoop * + wl = me->reserved->workLoop; + if (wl) { + Action doit; + wl->closeGate(); + IOStatisticsCloseGate(); + doit = (Action) me->action; + if (doit && (me->reserved->calloutGeneration == count)) { + me->invokeAction(doit, me, me->owner, me->workLoop); + } + IOStatisticsOpenGate(); + wl->openGate(); + } + } + + me->reserved->workLoop->release(); + me->release(); } // -- work loop delivery -bool IOTimerEventSource::checkForWork() +bool +IOTimerEventSource::checkForWork() { - Action doit; + Action doit; - if (reserved - && (reserved->calloutGenerationSignaled == reserved->calloutGeneration) - && enabled && (doit = (Action) action)) - { - reserved->calloutGenerationSignaled = ~reserved->calloutGeneration; - invokeAction(doit, this, owner, workLoop); - } + if (reserved + && (reserved->calloutGenerationSignaled == reserved->calloutGeneration) + && enabled && (doit = (Action) action)) { + reserved->calloutGenerationSignaled = ~reserved->calloutGeneration; + invokeAction(doit, this, owner, workLoop); + } - return false; + return false; } -void IOTimerEventSource::timeoutSignaled(void * self, void * c) +void +IOTimerEventSource::timeoutSignaled(void * self, void * c) { - IOTimerEventSource *me = (IOTimerEventSource *) self; + IOTimerEventSource *me = (IOTimerEventSource *) self; - me->reserved->calloutGenerationSignaled = (SInt32)(long) c; - if (me->enabled) me->signalWorkAvailable(); + me->reserved->calloutGenerationSignaled = (SInt32)(long) c; + if (me->enabled) { + me->signalWorkAvailable(); + } } // -- -void IOTimerEventSource::setTimeoutFunc() +void +IOTimerEventSource::setTimeoutFunc() { - thread_call_priority_t pri; - uint32_t options; - - if (reserved) panic("setTimeoutFunc already %p, %p", this, reserved); - - // reserved != 0 means IOTimerEventSource::timeoutAndRelease is being used, - // not a subclassed implementation - reserved = IONew(ExpansionData, 1); - reserved->calloutGenerationSignaled = ~reserved->calloutGeneration; - options = abstime; - abstime = 0; - - thread_call_options_t tcoptions = 0; - thread_call_func_t func = NULL; - - switch (kIOTimerEventSourceOptionsPriorityMask & options) - { - case kIOTimerEventSourceOptionsPriorityHigh: - pri = THREAD_CALL_PRIORITY_HIGH; - func = &IOTimerEventSource::timeoutAndRelease; - break; - - case kIOTimerEventSourceOptionsPriorityKernel: - pri = THREAD_CALL_PRIORITY_KERNEL; - func = &IOTimerEventSource::timeoutAndRelease; - break; - - case kIOTimerEventSourceOptionsPriorityKernelHigh: - pri = THREAD_CALL_PRIORITY_KERNEL_HIGH; - func = &IOTimerEventSource::timeoutAndRelease; - break; - - case kIOTimerEventSourceOptionsPriorityUser: - pri = THREAD_CALL_PRIORITY_USER; - func = &IOTimerEventSource::timeoutAndRelease; - break; - - case kIOTimerEventSourceOptionsPriorityLow: - pri = THREAD_CALL_PRIORITY_LOW; - func = &IOTimerEventSource::timeoutAndRelease; - break; - - case kIOTimerEventSourceOptionsPriorityWorkLoop: - pri = THREAD_CALL_PRIORITY_KERNEL; - tcoptions |= THREAD_CALL_OPTIONS_SIGNAL; - if (kIOTimerEventSourceOptionsAllowReenter & options) break; - func = &IOTimerEventSource::timeoutSignaled; - break; - - default: - break; - } - - assertf(func, "IOTimerEventSource options 0x%x", options); - if (!func) return; // init will fail - - if (THREAD_CALL_OPTIONS_SIGNAL & tcoptions) flags |= kActive; - else flags |= kPassive; - - if (!(kIOTimerEventSourceOptionsAllowReenter & options)) tcoptions |= THREAD_CALL_OPTIONS_ONCE; - - calloutEntry = (void *) thread_call_allocate_with_options(func, - (thread_call_param_t) this, pri, tcoptions); - assert(calloutEntry); + thread_call_priority_t pri; + uint32_t options; + + if (reserved) { + panic("setTimeoutFunc already %p, %p", this, reserved); + } + + // reserved != 0 means IOTimerEventSource::timeoutAndRelease is being used, + // not a subclassed implementation + reserved = IONew(ExpansionData, 1); + reserved->calloutGenerationSignaled = ~reserved->calloutGeneration; + options = abstime; + abstime = 0; + + thread_call_options_t tcoptions = 0; + thread_call_func_t func = NULL; + + switch (kIOTimerEventSourceOptionsPriorityMask & options) { + case kIOTimerEventSourceOptionsPriorityHigh: + pri = THREAD_CALL_PRIORITY_HIGH; + func = &IOTimerEventSource::timeoutAndRelease; + break; + + case kIOTimerEventSourceOptionsPriorityKernel: + pri = THREAD_CALL_PRIORITY_KERNEL; + func = &IOTimerEventSource::timeoutAndRelease; + break; + + case kIOTimerEventSourceOptionsPriorityKernelHigh: + pri = THREAD_CALL_PRIORITY_KERNEL_HIGH; + func = &IOTimerEventSource::timeoutAndRelease; + break; + + case kIOTimerEventSourceOptionsPriorityUser: + pri = THREAD_CALL_PRIORITY_USER; + func = &IOTimerEventSource::timeoutAndRelease; + break; + + case kIOTimerEventSourceOptionsPriorityLow: + pri = THREAD_CALL_PRIORITY_LOW; + func = &IOTimerEventSource::timeoutAndRelease; + break; + + case kIOTimerEventSourceOptionsPriorityWorkLoop: + pri = THREAD_CALL_PRIORITY_KERNEL; + tcoptions |= THREAD_CALL_OPTIONS_SIGNAL; + if (kIOTimerEventSourceOptionsAllowReenter & options) { + break; + } + func = &IOTimerEventSource::timeoutSignaled; + break; + + default: + break; + } + + assertf(func, "IOTimerEventSource options 0x%x", options); + if (!func) { + return; // init will fail + } + if (THREAD_CALL_OPTIONS_SIGNAL & tcoptions) { + flags |= kActive; + } else { + flags |= kPassive; + } + + if (!(kIOTimerEventSourceOptionsAllowReenter & options)) { + tcoptions |= THREAD_CALL_OPTIONS_ONCE; + } + + calloutEntry = (void *) thread_call_allocate_with_options(func, + (thread_call_param_t) this, pri, tcoptions); + assert(calloutEntry); } -bool IOTimerEventSource::init(OSObject *inOwner, Action inAction) +bool +IOTimerEventSource::init(OSObject *inOwner, Action inAction) { - if (!super::init(inOwner, (IOEventSource::Action) inAction) ) - return false; + if (!super::init(inOwner, (IOEventSource::Action) inAction)) { + return false; + } - setTimeoutFunc(); - if (!calloutEntry) - return false; + setTimeoutFunc(); + if (!calloutEntry) { + return false; + } - IOStatisticsInitializeCounter(); + IOStatisticsInitializeCounter(); - return true; + return true; } -bool IOTimerEventSource::init(uint32_t options, OSObject *inOwner, Action inAction) +bool +IOTimerEventSource::init(uint32_t options, OSObject *inOwner, Action inAction) { - abstime = options; - return (init(inOwner, inAction)); + abstime = options; + return init(inOwner, inAction); } IOTimerEventSource * IOTimerEventSource::timerEventSource(uint32_t inOptions, OSObject *inOwner, Action inAction) { - IOTimerEventSource *me = new IOTimerEventSource; + IOTimerEventSource *me = new IOTimerEventSource; - if (me && !me->init(inOptions, inOwner, inAction)) { - me->release(); - return 0; - } + if (me && !me->init(inOptions, inOwner, inAction)) { + me->release(); + return 0; + } - return me; + return me; } IOTimerEventSource * IOTimerEventSource::timerEventSource(uint32_t options, OSObject *inOwner, ActionBlock action) { - IOTimerEventSource * tes; - tes = IOTimerEventSource::timerEventSource(options, inOwner, (Action) NULL); - if (tes) tes->setActionBlock((IOEventSource::ActionBlock) action); + IOTimerEventSource * tes; + tes = IOTimerEventSource::timerEventSource(options, inOwner, (Action) NULL); + if (tes) { + tes->setActionBlock((IOEventSource::ActionBlock) action); + } - return tes; + return tes; } #define _thread_call_cancel(tc) ((kActive & flags) ? thread_call_cancel_wait((tc)) : thread_call_cancel((tc))) @@ -322,204 +342,226 @@ IOTimerEventSource::timerEventSource(uint32_t options, OSObject *inOwner, Action IOTimerEventSource * IOTimerEventSource::timerEventSource(OSObject *inOwner, Action inAction) { - return (IOTimerEventSource::timerEventSource( - kIOTimerEventSourceOptionsPriorityKernelHigh, - inOwner, inAction)); + return IOTimerEventSource::timerEventSource( + kIOTimerEventSourceOptionsPriorityKernelHigh, + inOwner, inAction); } -void IOTimerEventSource::free() +void +IOTimerEventSource::free() { - if (calloutEntry) { - __assert_only bool freed; + if (calloutEntry) { + __assert_only bool freed; - cancelTimeout(); + cancelTimeout(); - freed = thread_call_free((thread_call_t) calloutEntry); - assert(freed); - } + freed = thread_call_free((thread_call_t) calloutEntry); + assert(freed); + } - if (reserved) - IODelete(reserved, ExpansionData, 1); + if (reserved) { + IODelete(reserved, ExpansionData, 1); + } - super::free(); + super::free(); } -void IOTimerEventSource::cancelTimeout() +void +IOTimerEventSource::cancelTimeout() { - if (reserved) - reserved->calloutGeneration++; - bool active = _thread_call_cancel((thread_call_t) calloutEntry); - AbsoluteTime_to_scalar(&abstime) = 0; - if (active && reserved && (kPassive & flags)) - { - release(); - workLoop->release(); - } + if (reserved) { + reserved->calloutGeneration++; + } + bool active = _thread_call_cancel((thread_call_t) calloutEntry); + AbsoluteTime_to_scalar(&abstime) = 0; + if (active && reserved && (kPassive & flags)) { + release(); + workLoop->release(); + } } -void IOTimerEventSource::enable() +void +IOTimerEventSource::enable() { - super::enable(); - if (kIOReturnSuccess != wakeAtTime(abstime)) - super::disable(); // Problem re-scheduling timeout ignore enable + super::enable(); + if (kIOReturnSuccess != wakeAtTime(abstime)) { + super::disable(); // Problem re-scheduling timeout ignore enable + } } -void IOTimerEventSource::disable() +void +IOTimerEventSource::disable() { - if (reserved) - reserved->calloutGeneration++; - bool active = _thread_call_cancel((thread_call_t) calloutEntry); - super::disable(); - if (active && reserved && (kPassive & flags)) - { - release(); - workLoop->release(); - } + if (reserved) { + reserved->calloutGeneration++; + } + bool active = _thread_call_cancel((thread_call_t) calloutEntry); + super::disable(); + if (active && reserved && (kPassive & flags)) { + release(); + workLoop->release(); + } } -IOReturn IOTimerEventSource::setTimeoutTicks(UInt32 ticks) +IOReturn +IOTimerEventSource::setTimeoutTicks(UInt32 ticks) { - return setTimeout(ticks, kTickScale); + return setTimeout(ticks, kTickScale); } -IOReturn IOTimerEventSource::setTimeoutMS(UInt32 ms) +IOReturn +IOTimerEventSource::setTimeoutMS(UInt32 ms) { - return setTimeout(ms, kMillisecondScale); + return setTimeout(ms, kMillisecondScale); } -IOReturn IOTimerEventSource::setTimeoutUS(UInt32 us) +IOReturn +IOTimerEventSource::setTimeoutUS(UInt32 us) { - return setTimeout(us, kMicrosecondScale); + return setTimeout(us, kMicrosecondScale); } -IOReturn IOTimerEventSource::setTimeout(UInt32 interval, UInt32 scale_factor) +IOReturn +IOTimerEventSource::setTimeout(UInt32 interval, UInt32 scale_factor) { - AbsoluteTime end; + AbsoluteTime end; - clock_interval_to_deadline(interval, scale_factor, &end); - return wakeAtTime(end); + clock_interval_to_deadline(interval, scale_factor, &end); + return wakeAtTime(end); } #if !defined(__LP64__) -IOReturn IOTimerEventSource::setTimeout(mach_timespec_t interval) +IOReturn +IOTimerEventSource::setTimeout(mach_timespec_t interval) { - AbsoluteTime end, nsecs; + AbsoluteTime end, nsecs; - clock_interval_to_absolutetime_interval - (interval.tv_nsec, kNanosecondScale, &nsecs); - clock_interval_to_deadline - (interval.tv_sec, NSEC_PER_SEC, &end); - ADD_ABSOLUTETIME(&end, &nsecs); + clock_interval_to_absolutetime_interval + (interval.tv_nsec, kNanosecondScale, &nsecs); + clock_interval_to_deadline + (interval.tv_sec, NSEC_PER_SEC, &end); + ADD_ABSOLUTETIME(&end, &nsecs); - return wakeAtTime(end); + return wakeAtTime(end); } #endif -IOReturn IOTimerEventSource::setTimeout(AbsoluteTime interval) +IOReturn +IOTimerEventSource::setTimeout(AbsoluteTime interval) { - AbsoluteTime end; - clock_absolutetime_interval_to_deadline(interval, &end); - return wakeAtTime(end); + AbsoluteTime end; + clock_absolutetime_interval_to_deadline(interval, &end); + return wakeAtTime(end); } -IOReturn IOTimerEventSource::setTimeout(uint32_t options, - AbsoluteTime abstime, AbsoluteTime leeway) +IOReturn +IOTimerEventSource::setTimeout(uint32_t options, + AbsoluteTime abstime, AbsoluteTime leeway) { - AbsoluteTime end; - if (options & kIOTimeOptionsContinuous) - clock_continuoustime_interval_to_deadline(abstime, &end); - else - clock_absolutetime_interval_to_deadline(abstime, &end); - - return wakeAtTime(options, end, leeway); + AbsoluteTime end; + if (options & kIOTimeOptionsContinuous) { + clock_continuoustime_interval_to_deadline(abstime, &end); + } else { + clock_absolutetime_interval_to_deadline(abstime, &end); + } + + return wakeAtTime(options, end, leeway); } -IOReturn IOTimerEventSource::wakeAtTimeTicks(UInt32 ticks) +IOReturn +IOTimerEventSource::wakeAtTimeTicks(UInt32 ticks) { - return wakeAtTime(ticks, kTickScale); + return wakeAtTime(ticks, kTickScale); } -IOReturn IOTimerEventSource::wakeAtTimeMS(UInt32 ms) +IOReturn +IOTimerEventSource::wakeAtTimeMS(UInt32 ms) { - return wakeAtTime(ms, kMillisecondScale); + return wakeAtTime(ms, kMillisecondScale); } -IOReturn IOTimerEventSource::wakeAtTimeUS(UInt32 us) +IOReturn +IOTimerEventSource::wakeAtTimeUS(UInt32 us) { - return wakeAtTime(us, kMicrosecondScale); + return wakeAtTime(us, kMicrosecondScale); } -IOReturn IOTimerEventSource::wakeAtTime(UInt32 inAbstime, UInt32 scale_factor) +IOReturn +IOTimerEventSource::wakeAtTime(UInt32 inAbstime, UInt32 scale_factor) { - AbsoluteTime end; - clock_interval_to_absolutetime_interval(inAbstime, scale_factor, &end); + AbsoluteTime end; + clock_interval_to_absolutetime_interval(inAbstime, scale_factor, &end); - return wakeAtTime(end); + return wakeAtTime(end); } #if !defined(__LP64__) -IOReturn IOTimerEventSource::wakeAtTime(mach_timespec_t inAbstime) +IOReturn +IOTimerEventSource::wakeAtTime(mach_timespec_t inAbstime) { - AbsoluteTime end, nsecs; + AbsoluteTime end, nsecs; - clock_interval_to_absolutetime_interval - (inAbstime.tv_nsec, kNanosecondScale, &nsecs); - clock_interval_to_absolutetime_interval - (inAbstime.tv_sec, kSecondScale, &end); - ADD_ABSOLUTETIME(&end, &nsecs); + clock_interval_to_absolutetime_interval + (inAbstime.tv_nsec, kNanosecondScale, &nsecs); + clock_interval_to_absolutetime_interval + (inAbstime.tv_sec, kSecondScale, &end); + ADD_ABSOLUTETIME(&end, &nsecs); - return wakeAtTime(end); + return wakeAtTime(end); } #endif -void IOTimerEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) +void +IOTimerEventSource::setWorkLoop(IOWorkLoop *inWorkLoop) { - super::setWorkLoop(inWorkLoop); - if ( enabled && AbsoluteTime_to_scalar(&abstime) && workLoop ) - wakeAtTime(abstime); + super::setWorkLoop(inWorkLoop); + if (enabled && AbsoluteTime_to_scalar(&abstime) && workLoop) { + wakeAtTime(abstime); + } } -IOReturn IOTimerEventSource::wakeAtTime(AbsoluteTime inAbstime) +IOReturn +IOTimerEventSource::wakeAtTime(AbsoluteTime inAbstime) { - return wakeAtTime(0, inAbstime, 0); + return wakeAtTime(0, inAbstime, 0); } -IOReturn IOTimerEventSource::wakeAtTime(uint32_t options, AbsoluteTime inAbstime, AbsoluteTime leeway) +IOReturn +IOTimerEventSource::wakeAtTime(uint32_t options, AbsoluteTime inAbstime, AbsoluteTime leeway) { - if (!action) - return kIOReturnNoResources; - - abstime = inAbstime; - if ( enabled && AbsoluteTime_to_scalar(&inAbstime) && AbsoluteTime_to_scalar(&abstime) && workLoop ) - { - uint32_t tcoptions = 0; - - if (kIOTimeOptionsWithLeeway & options) tcoptions |= THREAD_CALL_DELAY_LEEWAY; - if (kIOTimeOptionsContinuous & options) tcoptions |= THREAD_CALL_CONTINUOUS; - - if (reserved) - { - if (kPassive & flags) - { - retain(); - workLoop->retain(); - } - reserved->workLoop = workLoop; - reserved->calloutGeneration++; - if (thread_call_enter_delayed_with_leeway((thread_call_t) calloutEntry, - (void *)(uintptr_t) reserved->calloutGeneration, inAbstime, leeway, tcoptions) - && (kPassive & flags)) - { - release(); - workLoop->release(); - } - } - else - { - thread_call_enter_delayed_with_leeway((thread_call_t) calloutEntry, - NULL, inAbstime, leeway, tcoptions); - } - } - - return kIOReturnSuccess; + if (!action) { + return kIOReturnNoResources; + } + + abstime = inAbstime; + if (enabled && AbsoluteTime_to_scalar(&inAbstime) && AbsoluteTime_to_scalar(&abstime) && workLoop) { + uint32_t tcoptions = 0; + + if (kIOTimeOptionsWithLeeway & options) { + tcoptions |= THREAD_CALL_DELAY_LEEWAY; + } + if (kIOTimeOptionsContinuous & options) { + tcoptions |= THREAD_CALL_CONTINUOUS; + } + + if (reserved) { + if (kPassive & flags) { + retain(); + workLoop->retain(); + } + reserved->workLoop = workLoop; + reserved->calloutGeneration++; + if (thread_call_enter_delayed_with_leeway((thread_call_t) calloutEntry, + (void *)(uintptr_t) reserved->calloutGeneration, inAbstime, leeway, tcoptions) + && (kPassive & flags)) { + release(); + workLoop->release(); + } + } else { + thread_call_enter_delayed_with_leeway((thread_call_t) calloutEntry, + NULL, inAbstime, leeway, tcoptions); + } + } + + return kIOReturnSuccess; } diff --git a/iokit/Kernel/IOUserClient.cpp b/iokit/Kernel/IOUserClient.cpp index b99f027dd..12ae32416 100644 --- a/iokit/Kernel/IOUserClient.cpp +++ b/iokit/Kernel/IOUserClient.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -71,11 +71,10 @@ extern "C" { #define REF64(x) ((io_user_reference_t)((UInt64)(x))) #define REF32(x) ((int)(x)) -enum -{ - kIOUCAsync0Flags = 3ULL, - kIOUCAsync64Flag = 1ULL, - kIOUCAsyncErrorLoggedFlag = 2ULL +enum{ + kIOUCAsync0Flags = 3ULL, + kIOUCAsync64Flag = 1ULL, + kIOUCAsyncErrorLoggedFlag = 2ULL }; #if IOKITSTATS @@ -88,7 +87,7 @@ do { \ #define IOStatisticsUnregisterCounter() \ do { \ if (reserved) \ - IOStatistics::unregisterUserClient(reserved->counter); \ + IOStatistics::unregisterUserClient(reserved->counter); \ } while (0) #define IOStatisticsClientCall() \ @@ -107,14 +106,14 @@ do { \ #if DEVELOPMENT || DEBUG #define FAKE_STACK_FRAME(a) \ - const void ** __frameptr; \ - const void * __retaddr; \ - __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \ - __retaddr = __frameptr[1]; \ - __frameptr[1] = (a); + const void ** __frameptr; \ + const void * __retaddr; \ + __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \ + __retaddr = __frameptr[1]; \ + __frameptr[1] = (a); #define FAKE_STACK_FRAME_END() \ - __frameptr[1] = __retaddr; + __frameptr[1] = __retaddr; #else /* DEVELOPMENT || DEBUG */ @@ -123,13 +122,14 @@ do { \ #endif /* DEVELOPMENT || DEBUG */ +#define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t)) +#define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t)) + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ extern "C" { - #include #include - } /* extern "C" */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -138,270 +138,274 @@ extern "C" { class IOMachPort : public OSObject { - OSDeclareDefaultStructors(IOMachPort) + OSDeclareDefaultStructors(IOMachPort) public: - OSObject * object; - ipc_port_t port; - UInt32 mscount; - UInt8 holdDestroy; + OSObject * object; + ipc_port_t port; + UInt32 mscount; + UInt8 holdDestroy; - static IOMachPort * portForObject( OSObject * obj, - ipc_kobject_type_t type ); - static bool noMoreSendersForObject( OSObject * obj, - ipc_kobject_type_t type, mach_port_mscount_t * mscount ); - static void releasePortForObject( OSObject * obj, - ipc_kobject_type_t type ); - static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ); + static IOMachPort * portForObject( OSObject * obj, + ipc_kobject_type_t type ); + static bool noMoreSendersForObject( OSObject * obj, + ipc_kobject_type_t type, mach_port_mscount_t * mscount ); + static void releasePortForObject( OSObject * obj, + ipc_kobject_type_t type ); + static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ); - static OSDictionary * dictForType( ipc_kobject_type_t type ); + static OSDictionary * dictForType( ipc_kobject_type_t type ); - static mach_port_name_t makeSendRightForTask( task_t task, - io_object_t obj, ipc_kobject_type_t type ); + static mach_port_name_t makeSendRightForTask( task_t task, + io_object_t obj, ipc_kobject_type_t type ); - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; }; #define super OSObject OSDefineMetaClassAndStructors(IOMachPort, OSObject) -static IOLock * gIOObjectPortLock; +static IOLock * gIOObjectPortLock; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // not in dictForType() for debugging ease -static OSDictionary * gIOObjectPorts; -static OSDictionary * gIOConnectPorts; -static OSDictionary * gIOIdentifierPorts; +static OSDictionary * gIOObjectPorts; +static OSDictionary * gIOConnectPorts; +static OSDictionary * gIOIdentifierPorts; -OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type ) +OSDictionary * +IOMachPort::dictForType( ipc_kobject_type_t type ) { - OSDictionary ** dict; + OSDictionary ** dict; - switch (type) - { + switch (type) { case IKOT_IOKIT_OBJECT: - dict = &gIOObjectPorts; - break; + dict = &gIOObjectPorts; + break; case IKOT_IOKIT_CONNECT: - dict = &gIOConnectPorts; - break; + dict = &gIOConnectPorts; + break; case IKOT_IOKIT_IDENT: - dict = &gIOIdentifierPorts; - break; + dict = &gIOIdentifierPorts; + break; default: - panic("dictForType %d", type); - dict = NULL; - break; - } + panic("dictForType %d", type); + dict = NULL; + break; + } - if( 0 == *dict) - *dict = OSDictionary::withCapacity( 1 ); + if (0 == *dict) { + *dict = OSDictionary::withCapacity( 1 ); + } - return( *dict ); + return *dict; } -IOMachPort * IOMachPort::portForObject ( OSObject * obj, - ipc_kobject_type_t type ) +IOMachPort * +IOMachPort::portForObject( OSObject * obj, + ipc_kobject_type_t type ) { - IOMachPort * inst = 0; - OSDictionary * dict; - - IOTakeLock( gIOObjectPortLock); - - do { + IOMachPort * inst = 0; + OSDictionary * dict; - dict = dictForType( type ); - if( !dict) - continue; + IOTakeLock( gIOObjectPortLock); - if( (inst = (IOMachPort *) - dict->getObject( (const OSSymbol *) obj ))) { - inst->mscount++; - inst->retain(); - continue; - } - - inst = new IOMachPort; - if( inst && !inst->init()) { - inst = 0; - continue; - } + do { + dict = dictForType( type ); + if (!dict) { + continue; + } - inst->port = iokit_alloc_object_port( obj, type ); - if( inst->port) { - // retains obj - dict->setObject( (const OSSymbol *) obj, inst ); - inst->mscount++; + if ((inst = (IOMachPort *) + dict->getObject((const OSSymbol *) obj ))) { + inst->mscount++; + inst->retain(); + continue; + } - } else { - inst->release(); - inst = 0; - } + inst = new IOMachPort; + if (inst && !inst->init()) { + inst = 0; + continue; + } - } while( false ); + inst->port = iokit_alloc_object_port( obj, type ); + if (inst->port) { + // retains obj + dict->setObject((const OSSymbol *) obj, inst ); + inst->mscount++; + } else { + inst->release(); + inst = 0; + } + } while (false); - IOUnlock( gIOObjectPortLock); + IOUnlock( gIOObjectPortLock); - return( inst ); + return inst; } -bool IOMachPort::noMoreSendersForObject( OSObject * obj, - ipc_kobject_type_t type, mach_port_mscount_t * mscount ) -{ - OSDictionary * dict; - IOMachPort * machPort; - IOUserClient * uc; - bool destroyed = true; - - IOTakeLock( gIOObjectPortLock); - - if( (dict = dictForType( type ))) { - obj->retain(); - - machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); - if( machPort) { - destroyed = (machPort->mscount <= *mscount); - if (!destroyed) *mscount = machPort->mscount; - else - { - if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) - { - uc->noMoreSenders(); +bool +IOMachPort::noMoreSendersForObject( OSObject * obj, + ipc_kobject_type_t type, mach_port_mscount_t * mscount ) +{ + OSDictionary * dict; + IOMachPort * machPort; + IOUserClient * uc; + bool destroyed = true; + + IOTakeLock( gIOObjectPortLock); + + if ((dict = dictForType( type ))) { + obj->retain(); + + machPort = (IOMachPort *) dict->getObject((const OSSymbol *) obj ); + if (machPort) { + destroyed = (machPort->mscount <= *mscount); + if (!destroyed) { + *mscount = machPort->mscount; + } else { + if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) { + uc->noMoreSenders(); + } + dict->removeObject((const OSSymbol *) obj ); + } } - dict->removeObject( (const OSSymbol *) obj ); - } - } - obj->release(); - } + obj->release(); + } - IOUnlock( gIOObjectPortLock); + IOUnlock( gIOObjectPortLock); - return( destroyed ); + return destroyed; } -void IOMachPort::releasePortForObject( OSObject * obj, - ipc_kobject_type_t type ) +void +IOMachPort::releasePortForObject( OSObject * obj, + ipc_kobject_type_t type ) { - OSDictionary * dict; - IOMachPort * machPort; + OSDictionary * dict; + IOMachPort * machPort; - assert(IKOT_IOKIT_CONNECT != type); + assert(IKOT_IOKIT_CONNECT != type); - IOTakeLock( gIOObjectPortLock); + IOTakeLock( gIOObjectPortLock); - if( (dict = dictForType( type ))) { - obj->retain(); - machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); - if( machPort && !machPort->holdDestroy) - dict->removeObject( (const OSSymbol *) obj ); - obj->release(); - } + if ((dict = dictForType( type ))) { + obj->retain(); + machPort = (IOMachPort *) dict->getObject((const OSSymbol *) obj ); + if (machPort && !machPort->holdDestroy) { + dict->removeObject((const OSSymbol *) obj ); + } + obj->release(); + } - IOUnlock( gIOObjectPortLock); + IOUnlock( gIOObjectPortLock); } -void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ) +void +IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type ) { - OSDictionary * dict; - IOMachPort * machPort; + OSDictionary * dict; + IOMachPort * machPort; - IOLockLock( gIOObjectPortLock ); + IOLockLock( gIOObjectPortLock ); - if( (dict = dictForType( type ))) { - machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); - if( machPort) - machPort->holdDestroy = true; - } + if ((dict = dictForType( type ))) { + machPort = (IOMachPort *) dict->getObject((const OSSymbol *) obj ); + if (machPort) { + machPort->holdDestroy = true; + } + } - IOLockUnlock( gIOObjectPortLock ); + IOLockUnlock( gIOObjectPortLock ); } -void IOUserClient::destroyUserReferences( OSObject * obj ) -{ - IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT ); - - // panther, 3160200 - // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT ); - - OSDictionary * dict; - - IOTakeLock( gIOObjectPortLock); - obj->retain(); - - if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT ))) - { - IOMachPort * port; - port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); - if (port) - { - IOUserClient * uc; - if ((uc = OSDynamicCast(IOUserClient, obj))) - { - uc->noMoreSenders(); - if (uc->mappings) - { - dict->setObject((const OSSymbol *) uc->mappings, port); - iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT); - - uc->mappings->release(); - uc->mappings = 0; - } - } - dict->removeObject( (const OSSymbol *) obj ); +void +IOUserClient::destroyUserReferences( OSObject * obj ) +{ + IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT ); + + // panther, 3160200 + // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT ); + + OSDictionary * dict; + + IOTakeLock( gIOObjectPortLock); + obj->retain(); + + if ((dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT ))) { + IOMachPort * port; + port = (IOMachPort *) dict->getObject((const OSSymbol *) obj ); + if (port) { + IOUserClient * uc; + if ((uc = OSDynamicCast(IOUserClient, obj))) { + uc->noMoreSenders(); + if (uc->mappings) { + dict->setObject((const OSSymbol *) uc->mappings, port); + iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT); + + uc->mappings->release(); + uc->mappings = 0; + } + } + dict->removeObject((const OSSymbol *) obj ); + } } - } - obj->release(); - IOUnlock( gIOObjectPortLock); + obj->release(); + IOUnlock( gIOObjectPortLock); } -mach_port_name_t IOMachPort::makeSendRightForTask( task_t task, - io_object_t obj, ipc_kobject_type_t type ) +mach_port_name_t +IOMachPort::makeSendRightForTask( task_t task, + io_object_t obj, ipc_kobject_type_t type ) { - return( iokit_make_send_right( task, obj, type )); + return iokit_make_send_right( task, obj, type ); } -void IOMachPort::free( void ) +void +IOMachPort::free( void ) { - if( port) - iokit_destroy_object_port( port ); - super::free(); + if (port) { + iokit_destroy_object_port( port ); + } + super::free(); } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ class IOUserIterator : public OSIterator { - OSDeclareDefaultStructors(IOUserIterator) + OSDeclareDefaultStructors(IOUserIterator) public: - OSObject * userIteratorObject; - IOLock * lock; + OSObject * userIteratorObject; + IOLock * lock; - static IOUserIterator * withIterator(OSIterator * iter); - virtual bool init( void ) APPLE_KEXT_OVERRIDE; - virtual void free() APPLE_KEXT_OVERRIDE; + static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter); + virtual bool init( void ) APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - virtual void reset() APPLE_KEXT_OVERRIDE; - virtual bool isValid() APPLE_KEXT_OVERRIDE; - virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; - virtual OSObject * copyNextObject(); + virtual void reset() APPLE_KEXT_OVERRIDE; + virtual bool isValid() APPLE_KEXT_OVERRIDE; + virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; + virtual OSObject * copyNextObject(); }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ class IOUserNotification : public IOUserIterator { - OSDeclareDefaultStructors(IOUserNotification) + OSDeclareDefaultStructors(IOUserNotification) -#define holdNotify userIteratorObject +#define holdNotify userIteratorObject public: - virtual void free() APPLE_KEXT_OVERRIDE; + virtual void free() APPLE_KEXT_OVERRIDE; - virtual void setNotification( IONotifier * obj ); + virtual void setNotification( IONotifier * obj ); - virtual void reset() APPLE_KEXT_OVERRIDE; - virtual bool isValid() APPLE_KEXT_OVERRIDE; + virtual void reset() APPLE_KEXT_OVERRIDE; + virtual bool isValid() APPLE_KEXT_OVERRIDE; }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -411,276 +415,291 @@ OSDefineMetaClassAndStructors( IOUserIterator, OSIterator ) IOUserIterator * IOUserIterator::withIterator(OSIterator * iter) { - IOUserIterator * me; + IOUserIterator * me; - if (!iter) return (0); + if (!iter) { + return 0; + } - me = new IOUserIterator; - if (me && !me->init()) - { - me->release(); - me = 0; - } - if (!me) return me; - me->userIteratorObject = iter; + me = new IOUserIterator; + if (me && !me->init()) { + me->release(); + me = 0; + } + if (!me) { + return me; + } + me->userIteratorObject = iter; - return (me); + return me; } bool IOUserIterator::init( void ) { - if (!OSObject::init()) return (false); + if (!OSObject::init()) { + return false; + } - lock = IOLockAlloc(); - if( !lock) - return( false ); + lock = IOLockAlloc(); + if (!lock) { + return false; + } - return (true); + return true; } void IOUserIterator::free() { - if (userIteratorObject) userIteratorObject->release(); - if (lock) IOLockFree(lock); - OSObject::free(); + if (userIteratorObject) { + userIteratorObject->release(); + } + if (lock) { + IOLockFree(lock); + } + OSObject::free(); } void IOUserIterator::reset() { - IOLockLock(lock); - assert(OSDynamicCast(OSIterator, userIteratorObject)); - ((OSIterator *)userIteratorObject)->reset(); - IOLockUnlock(lock); + IOLockLock(lock); + assert(OSDynamicCast(OSIterator, userIteratorObject)); + ((OSIterator *)userIteratorObject)->reset(); + IOLockUnlock(lock); } bool IOUserIterator::isValid() { - bool ret; + bool ret; - IOLockLock(lock); - assert(OSDynamicCast(OSIterator, userIteratorObject)); - ret = ((OSIterator *)userIteratorObject)->isValid(); - IOLockUnlock(lock); + IOLockLock(lock); + assert(OSDynamicCast(OSIterator, userIteratorObject)); + ret = ((OSIterator *)userIteratorObject)->isValid(); + IOLockUnlock(lock); - return (ret); + return ret; } OSObject * IOUserIterator::getNextObject() { - assert(false); - return (NULL); + assert(false); + return NULL; } OSObject * IOUserIterator::copyNextObject() { - OSObject * ret = NULL; + OSObject * ret = NULL; - IOLockLock(lock); - if (userIteratorObject) { - ret = ((OSIterator *)userIteratorObject)->getNextObject(); - if (ret) ret->retain(); - } - IOLockUnlock(lock); + IOLockLock(lock); + if (userIteratorObject) { + ret = ((OSIterator *)userIteratorObject)->getNextObject(); + if (ret) { + ret->retain(); + } + } + IOLockUnlock(lock); - return (ret); + return ret; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ extern "C" { - // functions called from osfmk/device/iokit_rpc.c void iokit_add_reference( io_object_t obj, ipc_kobject_type_t type ) { - IOUserClient * uc; + IOUserClient * uc; - if (!obj) return; + if (!obj) { + return; + } - if ((IKOT_IOKIT_CONNECT == type) - && (uc = OSDynamicCast(IOUserClient, obj))) - { - OSIncrementAtomic(&uc->__ipc); - } + if ((IKOT_IOKIT_CONNECT == type) + && (uc = OSDynamicCast(IOUserClient, obj))) { + OSIncrementAtomic(&uc->__ipc); + } - obj->retain(); + obj->retain(); } void iokit_remove_reference( io_object_t obj ) { - if( obj) - obj->release(); + if (obj) { + obj->release(); + } } void iokit_remove_connect_reference( io_object_t obj ) { - IOUserClient * uc; - bool finalize = false; + IOUserClient * uc; + bool finalize = false; - if (!obj) return; + if (!obj) { + return; + } - if ((uc = OSDynamicCast(IOUserClient, obj))) - { - if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) - { - IOLockLock(gIOObjectPortLock); - if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false; - IOLockUnlock(gIOObjectPortLock); + if ((uc = OSDynamicCast(IOUserClient, obj))) { + if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) { + IOLockLock(gIOObjectPortLock); + if ((finalize = uc->__ipcFinal)) { + uc->__ipcFinal = false; + } + IOLockUnlock(gIOObjectPortLock); + } + if (finalize) { + uc->scheduleFinalize(true); + } } - if (finalize) uc->scheduleFinalize(true); - } - obj->release(); + obj->release(); } bool IOUserClient::finalizeUserReferences(OSObject * obj) { - IOUserClient * uc; - bool ok = true; + IOUserClient * uc; + bool ok = true; - if ((uc = OSDynamicCast(IOUserClient, obj))) - { - IOLockLock(gIOObjectPortLock); - if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false; - IOLockUnlock(gIOObjectPortLock); - } - return (ok); + if ((uc = OSDynamicCast(IOUserClient, obj))) { + IOLockLock(gIOObjectPortLock); + if ((uc->__ipcFinal = (0 != uc->__ipc))) { + ok = false; + } + IOLockUnlock(gIOObjectPortLock); + } + return ok; } ipc_port_t iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type ) { - IOMachPort * machPort; - ipc_port_t port; - - if( (machPort = IOMachPort::portForObject( obj, type ))) { + IOMachPort * machPort; + ipc_port_t port; - port = machPort->port; - if( port) - iokit_retain_port( port ); - - machPort->release(); + if ((machPort = IOMachPort::portForObject( obj, type ))) { + port = machPort->port; + if (port) { + iokit_retain_port( port ); + } - } else - port = NULL; + machPort->release(); + } else { + port = NULL; + } - return( port ); + return port; } kern_return_t iokit_client_died( io_object_t obj, ipc_port_t /* port */, - ipc_kobject_type_t type, mach_port_mscount_t * mscount ) -{ - IOUserClient * client; - IOMemoryMap * map; - IOUserNotification * notify; - - if( !IOMachPort::noMoreSendersForObject( obj, type, mscount )) - return( kIOReturnNotReady ); - - if( IKOT_IOKIT_CONNECT == type) - { - if( (client = OSDynamicCast( IOUserClient, obj ))) - { - IOStatisticsClientCall(); - IOLockLock(client->lock); - client->clientDied(); - IOLockUnlock(client->lock); - } - } - else if( IKOT_IOKIT_OBJECT == type) - { - if( (map = OSDynamicCast( IOMemoryMap, obj ))) - map->taskDied(); - else if( (notify = OSDynamicCast( IOUserNotification, obj ))) - notify->setNotification( 0 ); - } - - return( kIOReturnSuccess ); -} - -}; /* extern "C" */ + ipc_kobject_type_t type, mach_port_mscount_t * mscount ) +{ + IOUserClient * client; + IOMemoryMap * map; + IOUserNotification * notify; + + if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) { + return kIOReturnNotReady; + } + + if (IKOT_IOKIT_CONNECT == type) { + if ((client = OSDynamicCast( IOUserClient, obj ))) { + IOStatisticsClientCall(); + IOLockLock(client->lock); + client->clientDied(); + IOLockUnlock(client->lock); + } + } else if (IKOT_IOKIT_OBJECT == type) { + if ((map = OSDynamicCast( IOMemoryMap, obj ))) { + map->taskDied(); + } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) { + notify->setNotification( 0 ); + } + } + + return kIOReturnSuccess; +} +}; /* extern "C" */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ class IOServiceUserNotification : public IOUserNotification { - OSDeclareDefaultStructors(IOServiceUserNotification) + OSDeclareDefaultStructors(IOServiceUserNotification) - struct PingMsg { - mach_msg_header_t msgHdr; - OSNotificationHeader64 notifyHeader; - }; + struct PingMsg { + mach_msg_header_t msgHdr; + OSNotificationHeader64 notifyHeader; + }; - enum { kMaxOutstanding = 1024 }; + enum { kMaxOutstanding = 1024 }; - PingMsg * pingMsg; - vm_size_t msgSize; - OSArray * newSet; - bool armed; - bool ipcLogged; + PingMsg * pingMsg; + vm_size_t msgSize; + OSArray * newSet; + bool armed; + bool ipcLogged; public: - virtual bool init( mach_port_t port, natural_t type, - void * reference, vm_size_t referenceSize, - bool clientIs64 ); - virtual void free() APPLE_KEXT_OVERRIDE; - void invalidatePort(void); + virtual bool init( mach_port_t port, natural_t type, + void * reference, vm_size_t referenceSize, + bool clientIs64 ); + virtual void free() APPLE_KEXT_OVERRIDE; + void invalidatePort(void); - static bool _handler( void * target, - void * ref, IOService * newService, IONotifier * notifier ); - virtual bool handler( void * ref, IOService * newService ); + static bool _handler( void * target, + void * ref, IOService * newService, IONotifier * notifier ); + virtual bool handler( void * ref, IOService * newService ); - virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; - virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE; + virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; + virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE; }; class IOServiceMessageUserNotification : public IOUserNotification { - OSDeclareDefaultStructors(IOServiceMessageUserNotification) + OSDeclareDefaultStructors(IOServiceMessageUserNotification) - struct PingMsg { - mach_msg_header_t msgHdr; - mach_msg_body_t msgBody; - mach_msg_port_descriptor_t ports[1]; - OSNotificationHeader64 notifyHeader __attribute__ ((packed)); - }; + struct PingMsg { + mach_msg_header_t msgHdr; + mach_msg_body_t msgBody; + mach_msg_port_descriptor_t ports[1]; + OSNotificationHeader64 notifyHeader __attribute__ ((packed)); + }; - PingMsg * pingMsg; - vm_size_t msgSize; - uint8_t clientIs64; - int owningPID; - bool ipcLogged; + PingMsg * pingMsg; + vm_size_t msgSize; + uint8_t clientIs64; + int owningPID; + bool ipcLogged; public: - virtual bool init( mach_port_t port, natural_t type, - void * reference, vm_size_t referenceSize, - vm_size_t extraSize, - bool clientIs64 ); - - virtual void free() APPLE_KEXT_OVERRIDE; - void invalidatePort(void); - - static IOReturn _handler( void * target, void * ref, - UInt32 messageType, IOService * provider, - void * messageArgument, vm_size_t argSize ); - virtual IOReturn handler( void * ref, - UInt32 messageType, IOService * provider, - void * messageArgument, vm_size_t argSize ); - - virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; - virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE; + virtual bool init( mach_port_t port, natural_t type, + void * reference, vm_size_t referenceSize, + vm_size_t extraSize, + bool clientIs64 ); + + virtual void free() APPLE_KEXT_OVERRIDE; + void invalidatePort(void); + + static IOReturn _handler( void * target, void * ref, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ); + virtual IOReturn handler( void * ref, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ); + + virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; + virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE; }; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -692,46 +711,48 @@ OSDefineAbstractStructors( IOUserNotification, IOUserIterator ) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -void IOUserNotification::free( void ) +void +IOUserNotification::free( void ) { - if (holdNotify) - { - assert(OSDynamicCast(IONotifier, holdNotify)); - ((IONotifier *)holdNotify)->remove(); - holdNotify = 0; - } - // can't be in handler now + if (holdNotify) { + assert(OSDynamicCast(IONotifier, holdNotify)); + ((IONotifier *)holdNotify)->remove(); + holdNotify = 0; + } + // can't be in handler now - super::free(); + super::free(); } -void IOUserNotification::setNotification( IONotifier * notify ) +void +IOUserNotification::setNotification( IONotifier * notify ) { - OSObject * previousNotify; + OSObject * previousNotify; - IOLockLock( gIOObjectPortLock); + IOLockLock( gIOObjectPortLock); - previousNotify = holdNotify; - holdNotify = notify; + previousNotify = holdNotify; + holdNotify = notify; - IOLockUnlock( gIOObjectPortLock); + IOLockUnlock( gIOObjectPortLock); - if( previousNotify) - { - assert(OSDynamicCast(IONotifier, previousNotify)); - ((IONotifier *)previousNotify)->remove(); - } + if (previousNotify) { + assert(OSDynamicCast(IONotifier, previousNotify)); + ((IONotifier *)previousNotify)->remove(); + } } -void IOUserNotification::reset() +void +IOUserNotification::reset() { - // ? + // ? } -bool IOUserNotification::isValid() +bool +IOUserNotification::isValid() { - return( true ); + return true; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -742,146 +763,162 @@ OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOServiceUserNotification::init( mach_port_t port, natural_t type, - void * reference, vm_size_t referenceSize, - bool clientIs64 ) +bool +IOServiceUserNotification::init( mach_port_t port, natural_t type, + void * reference, vm_size_t referenceSize, + bool clientIs64 ) { - if( !super::init()) - return( false ); + if (!super::init()) { + return false; + } - newSet = OSArray::withCapacity( 1 ); - if( !newSet) - return( false ); + newSet = OSArray::withCapacity( 1 ); + if (!newSet) { + return false; + } - if (referenceSize > sizeof(OSAsyncReference64)) - return( false ); + if (referenceSize > sizeof(OSAsyncReference64)) { + return false; + } - msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; - pingMsg = (PingMsg *) IOMalloc( msgSize); - if( !pingMsg) - return( false ); + msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; + pingMsg = (PingMsg *) IOMalloc( msgSize); + if (!pingMsg) { + return false; + } - bzero( pingMsg, msgSize); + bzero( pingMsg, msgSize); - pingMsg->msgHdr.msgh_remote_port = port; - pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( - MACH_MSG_TYPE_COPY_SEND /*remote*/, - MACH_MSG_TYPE_MAKE_SEND /*local*/); - pingMsg->msgHdr.msgh_size = msgSize; - pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; + pingMsg->msgHdr.msgh_remote_port = port; + pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS( + MACH_MSG_TYPE_COPY_SEND /*remote*/, + MACH_MSG_TYPE_MAKE_SEND /*local*/); + pingMsg->msgHdr.msgh_size = msgSize; + pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; - pingMsg->notifyHeader.size = 0; - pingMsg->notifyHeader.type = type; - bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); + pingMsg->notifyHeader.size = 0; + pingMsg->notifyHeader.type = type; + bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); - return( true ); + return true; } -void IOServiceUserNotification::invalidatePort(void) +void +IOServiceUserNotification::invalidatePort(void) { - if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL; + if (pingMsg) { + pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL; + } } -void IOServiceUserNotification::free( void ) +void +IOServiceUserNotification::free( void ) { - PingMsg * _pingMsg; - vm_size_t _msgSize; - OSArray * _newSet; + PingMsg * _pingMsg; + vm_size_t _msgSize; + OSArray * _newSet; - _pingMsg = pingMsg; - _msgSize = msgSize; - _newSet = newSet; + _pingMsg = pingMsg; + _msgSize = msgSize; + _newSet = newSet; - super::free(); + super::free(); - if( _pingMsg && _msgSize) { + if (_pingMsg && _msgSize) { if (_pingMsg->msgHdr.msgh_remote_port) { iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port); } - IOFree(_pingMsg, _msgSize); + IOFree(_pingMsg, _msgSize); } - if( _newSet) - _newSet->release(); + if (_newSet) { + _newSet->release(); + } } -bool IOServiceUserNotification::_handler( void * target, - void * ref, IOService * newService, IONotifier * notifier ) +bool +IOServiceUserNotification::_handler( void * target, + void * ref, IOService * newService, IONotifier * notifier ) { - return( ((IOServiceUserNotification *) target)->handler( ref, newService )); + return ((IOServiceUserNotification *) target)->handler( ref, newService ); } -bool IOServiceUserNotification::handler( void * ref, - IOService * newService ) +bool +IOServiceUserNotification::handler( void * ref, + IOService * newService ) { - unsigned int count; - kern_return_t kr; - ipc_port_t port = NULL; - bool sendPing = false; + unsigned int count; + kern_return_t kr; + ipc_port_t port = NULL; + bool sendPing = false; - IOTakeLock( lock ); + IOTakeLock( lock ); - count = newSet->getCount(); - if( count < kMaxOutstanding) { - - newSet->setObject( newService ); - if( (sendPing = (armed && (0 == count)))) - armed = false; - } + count = newSet->getCount(); + if (count < kMaxOutstanding) { + newSet->setObject( newService ); + if ((sendPing = (armed && (0 == count)))) { + armed = false; + } + } - IOUnlock( lock ); + IOUnlock( lock ); - if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) - IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT ); + if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) { + IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT ); + } - if( sendPing) { - if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) )) - pingMsg->msgHdr.msgh_local_port = port; - else - pingMsg->msgHdr.msgh_local_port = NULL; + if (sendPing) { + if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) { + pingMsg->msgHdr.msgh_local_port = port; + } else { + pingMsg->msgHdr.msgh_local_port = NULL; + } - kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr, - pingMsg->msgHdr.msgh_size, - (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE), - 0); - if( port) - iokit_release_port( port ); + kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr, + pingMsg->msgHdr.msgh_size, + (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE), + 0); + if (port) { + iokit_release_port( port ); + } - if( (KERN_SUCCESS != kr) && !ipcLogged) - { - ipcLogged = true; - IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr ); - } - } + if ((KERN_SUCCESS != kr) && !ipcLogged) { + ipcLogged = true; + IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr ); + } + } - return( true ); + return true; } -OSObject * IOServiceUserNotification::getNextObject() +OSObject * +IOServiceUserNotification::getNextObject() { - assert(false); - return (NULL); + assert(false); + return NULL; } -OSObject * IOServiceUserNotification::copyNextObject() +OSObject * +IOServiceUserNotification::copyNextObject() { - unsigned int count; - OSObject * result; + unsigned int count; + OSObject * result; - IOLockLock(lock); + IOLockLock(lock); - count = newSet->getCount(); - if( count ) { - result = newSet->getObject( count - 1 ); - result->retain(); - newSet->removeObject( count - 1); - } else { - result = 0; - armed = true; - } + count = newSet->getCount(); + if (count) { + result = newSet->getObject( count - 1 ); + result->retain(); + newSet->removeObject( count - 1); + } else { + result = 0; + armed = true; + } - IOLockUnlock(lock); + IOLockUnlock(lock); - return( result ); + return result; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -890,193 +927,202 @@ OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotificati /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, - void * reference, vm_size_t referenceSize, vm_size_t extraSize, - bool client64 ) +bool +IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, + void * reference, vm_size_t referenceSize, vm_size_t extraSize, + bool client64 ) { - if( !super::init()) - return( false ); + if (!super::init()) { + return false; + } - if (referenceSize > sizeof(OSAsyncReference64)) - return( false ); + if (referenceSize > sizeof(OSAsyncReference64)) { + return false; + } - clientIs64 = client64; + clientIs64 = client64; - owningPID = proc_selfpid(); + owningPID = proc_selfpid(); - extraSize += sizeof(IOServiceInterestContent64); - msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; - pingMsg = (PingMsg *) IOMalloc( msgSize); - if( !pingMsg) - return( false ); + extraSize += sizeof(IOServiceInterestContent64); + msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; + pingMsg = (PingMsg *) IOMalloc( msgSize); + if (!pingMsg) { + return false; + } - bzero( pingMsg, msgSize); + bzero( pingMsg, msgSize); - pingMsg->msgHdr.msgh_remote_port = port; - pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX - | MACH_MSGH_BITS( - MACH_MSG_TYPE_COPY_SEND /*remote*/, - MACH_MSG_TYPE_MAKE_SEND /*local*/); - pingMsg->msgHdr.msgh_size = msgSize; - pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; + pingMsg->msgHdr.msgh_remote_port = port; + pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX + | MACH_MSGH_BITS( + MACH_MSG_TYPE_COPY_SEND /*remote*/, + MACH_MSG_TYPE_MAKE_SEND /*local*/); + pingMsg->msgHdr.msgh_size = msgSize; + pingMsg->msgHdr.msgh_id = kOSNotificationMessageID; - pingMsg->msgBody.msgh_descriptor_count = 1; + pingMsg->msgBody.msgh_descriptor_count = 1; - pingMsg->ports[0].name = 0; - pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND; - pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR; + pingMsg->ports[0].name = 0; + pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND; + pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR; - pingMsg->notifyHeader.size = extraSize; - pingMsg->notifyHeader.type = type; - bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); + pingMsg->notifyHeader.size = extraSize; + pingMsg->notifyHeader.type = type; + bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); - return( true ); + return true; } -void IOServiceMessageUserNotification::invalidatePort(void) +void +IOServiceMessageUserNotification::invalidatePort(void) { - if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL; + if (pingMsg) { + pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL; + } } -void IOServiceMessageUserNotification::free( void ) +void +IOServiceMessageUserNotification::free( void ) { - PingMsg * _pingMsg; - vm_size_t _msgSize; + PingMsg * _pingMsg; + vm_size_t _msgSize; - _pingMsg = pingMsg; - _msgSize = msgSize; + _pingMsg = pingMsg; + _msgSize = msgSize; - super::free(); + super::free(); - if( _pingMsg && _msgSize) { + if (_pingMsg && _msgSize) { if (_pingMsg->msgHdr.msgh_remote_port) { iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port); } - IOFree( _pingMsg, _msgSize); - } -} - -IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref, - UInt32 messageType, IOService * provider, - void * argument, vm_size_t argSize ) -{ - return( ((IOServiceMessageUserNotification *) target)->handler( - ref, messageType, provider, argument, argSize)); -} - -IOReturn IOServiceMessageUserNotification::handler( void * ref, - UInt32 messageType, IOService * provider, - void * messageArgument, vm_size_t callerArgSize ) -{ - enum { kLocalMsgSize = 0x100 }; - uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)]; - void * allocMsg; - kern_return_t kr; - vm_size_t argSize; - vm_size_t thisMsgSize; - ipc_port_t thisPort, providerPort; - struct PingMsg * thisMsg; - IOServiceInterestContent64 * data; - - if (kIOMessageCopyClientID == messageType) - { - *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32); - return (kIOReturnSuccess); - } - - if (callerArgSize == 0) - { - if (clientIs64) argSize = sizeof(data->messageArgument[0]); - else argSize = sizeof(uint32_t); - } - else - { - if( callerArgSize > kIOUserNotifyMaxMessageSize) - callerArgSize = kIOUserNotifyMaxMessageSize; - argSize = callerArgSize; - } - - // adjust message size for ipc restrictions - natural_t type; - type = pingMsg->notifyHeader.type; - type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift); - type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift); - argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask; - - thisMsgSize = msgSize - + sizeof( IOServiceInterestContent64 ) - - sizeof( data->messageArgument) - + argSize; - - if (thisMsgSize > sizeof(stackMsg)) - { - allocMsg = IOMalloc(thisMsgSize); - if (!allocMsg) return (kIOReturnNoMemory); - thisMsg = (typeof(thisMsg)) allocMsg; - } - else - { - allocMsg = 0; - thisMsg = (typeof(thisMsg)) stackMsg; - } - - bcopy(pingMsg, thisMsg, msgSize); - thisMsg->notifyHeader.type = type; - data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize); + IOFree( _pingMsg, _msgSize); + } +} + +IOReturn +IOServiceMessageUserNotification::_handler( void * target, void * ref, + UInt32 messageType, IOService * provider, + void * argument, vm_size_t argSize ) +{ + return ((IOServiceMessageUserNotification *) target)->handler( + ref, messageType, provider, argument, argSize); +} + +IOReturn +IOServiceMessageUserNotification::handler( void * ref, + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t callerArgSize ) +{ + enum { kLocalMsgSize = 0x100 }; + uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)]; + void * allocMsg; + kern_return_t kr; + vm_size_t argSize; + vm_size_t thisMsgSize; + ipc_port_t thisPort, providerPort; + struct PingMsg * thisMsg; + IOServiceInterestContent64 * data; + + if (kIOMessageCopyClientID == messageType) { + *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32); + return kIOReturnSuccess; + } + + if (callerArgSize == 0) { + if (clientIs64) { + argSize = sizeof(data->messageArgument[0]); + } else { + argSize = sizeof(uint32_t); + } + } else { + if (callerArgSize > kIOUserNotifyMaxMessageSize) { + callerArgSize = kIOUserNotifyMaxMessageSize; + } + argSize = callerArgSize; + } + + // adjust message size for ipc restrictions + natural_t type; + type = pingMsg->notifyHeader.type; + type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift); + type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift); + argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask; + + thisMsgSize = msgSize + + sizeof(IOServiceInterestContent64) + - sizeof(data->messageArgument) + + argSize; + + if (thisMsgSize > sizeof(stackMsg)) { + allocMsg = IOMalloc(thisMsgSize); + if (!allocMsg) { + return kIOReturnNoMemory; + } + thisMsg = (typeof(thisMsg))allocMsg; + } else { + allocMsg = 0; + thisMsg = (typeof(thisMsg))stackMsg; + } + + bcopy(pingMsg, thisMsg, msgSize); + thisMsg->notifyHeader.type = type; + data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize); // == pingMsg->notifyHeader.content; - data->messageType = messageType; + data->messageType = messageType; - if (callerArgSize == 0) - { - data->messageArgument[0] = (io_user_reference_t) messageArgument; - if (!clientIs64) - { - data->messageArgument[0] |= (data->messageArgument[0] << 32); + if (callerArgSize == 0) { + data->messageArgument[0] = (io_user_reference_t) messageArgument; + if (!clientIs64) { + data->messageArgument[0] |= (data->messageArgument[0] << 32); + } + } else { + bcopy( messageArgument, data->messageArgument, callerArgSize ); + bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize); } - } - else - { - bcopy( messageArgument, data->messageArgument, callerArgSize ); - bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize); - } - thisMsg->notifyHeader.type = type; - thisMsg->msgHdr.msgh_size = thisMsgSize; + thisMsg->notifyHeader.type = type; + thisMsg->msgHdr.msgh_size = thisMsgSize; - providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT ); - thisMsg->ports[0].name = providerPort; - thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ); - thisMsg->msgHdr.msgh_local_port = thisPort; + providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT ); + thisMsg->ports[0].name = providerPort; + thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ); + thisMsg->msgHdr.msgh_local_port = thisPort; - kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr, - thisMsg->msgHdr.msgh_size, - (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE), - 0); - if( thisPort) - iokit_release_port( thisPort ); - if( providerPort) - iokit_release_port( providerPort ); + kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr, + thisMsg->msgHdr.msgh_size, + (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE), + 0); + if (thisPort) { + iokit_release_port( thisPort ); + } + if (providerPort) { + iokit_release_port( providerPort ); + } - if (allocMsg) - IOFree(allocMsg, thisMsgSize); + if (allocMsg) { + IOFree(allocMsg, thisMsgSize); + } - if((KERN_SUCCESS != kr) && !ipcLogged) - { - ipcLogged = true; - IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr ); - } + if ((KERN_SUCCESS != kr) && !ipcLogged) { + ipcLogged = true; + IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr ); + } - return( kIOReturnSuccess ); + return kIOReturnSuccess; } -OSObject * IOServiceMessageUserNotification::getNextObject() +OSObject * +IOServiceMessageUserNotification::getNextObject() { - return( 0 ); + return 0; } -OSObject * IOServiceMessageUserNotification::copyNextObject() +OSObject * +IOServiceMessageUserNotification::copyNextObject() { - return( NULL ); + return NULL; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1087,210 +1133,213 @@ OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService ) IOLock * gIOUserClientOwnersLock; -void IOUserClient::initialize( void ) +void +IOUserClient::initialize( void ) +{ + gIOObjectPortLock = IOLockAlloc(); + gIOUserClientOwnersLock = IOLockAlloc(); + assert(gIOObjectPortLock && gIOUserClientOwnersLock); +} + +void +IOUserClient::setAsyncReference(OSAsyncReference asyncRef, + mach_port_t wakePort, + void *callback, void *refcon) { - gIOObjectPortLock = IOLockAlloc(); - gIOUserClientOwnersLock = IOLockAlloc(); - assert(gIOObjectPortLock && gIOUserClientOwnersLock); + asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort) + | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); + asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback; + asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon; } -void IOUserClient::setAsyncReference(OSAsyncReference asyncRef, - mach_port_t wakePort, - void *callback, void *refcon) +void +IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, + mach_port_t wakePort, + mach_vm_address_t callback, io_user_reference_t refcon) { - asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort) - | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); - asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback; - asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon; + asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort) + | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); + asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback; + asyncRef[kIOAsyncCalloutRefconIndex] = refcon; } -void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, - mach_port_t wakePort, - mach_vm_address_t callback, io_user_reference_t refcon) +void +IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, + mach_port_t wakePort, + mach_vm_address_t callback, io_user_reference_t refcon, task_t task) { - asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort) - | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); - asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback; - asyncRef[kIOAsyncCalloutRefconIndex] = refcon; + setAsyncReference64(asyncRef, wakePort, callback, refcon); + if (vm_map_is_64bit(get_task_map(task))) { + asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag; + } } -void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, - mach_port_t wakePort, - mach_vm_address_t callback, io_user_reference_t refcon, task_t task) +static OSDictionary * +CopyConsoleUser(UInt32 uid) { - setAsyncReference64(asyncRef, wakePort, callback, refcon); - if (vm_map_is_64bit(get_task_map(task))) { - asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag; - } + OSArray * array; + OSDictionary * user = 0; + + if ((array = OSDynamicCast(OSArray, + IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) { + for (unsigned int idx = 0; + (user = OSDynamicCast(OSDictionary, array->getObject(idx))); + idx++) { + OSNumber * num; + + if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) + && (uid == num->unsigned32BitValue())) { + user->retain(); + break; + } + } + array->release(); + } + return user; } -static OSDictionary * CopyConsoleUser(UInt32 uid) +static OSDictionary * +CopyUserOnConsole(void) { OSArray * array; - OSDictionary * user = 0; + OSDictionary * user = 0; if ((array = OSDynamicCast(OSArray, - IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) - { - for (unsigned int idx = 0; + IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) { + for (unsigned int idx = 0; (user = OSDynamicCast(OSDictionary, array->getObject(idx))); idx++) { - OSNumber * num; - - if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) - && (uid == num->unsigned32BitValue())) { - user->retain(); - break; - } - } - array->release(); - } - return user; -} - -static OSDictionary * CopyUserOnConsole(void) -{ - OSArray * array; - OSDictionary * user = 0; - - if ((array = OSDynamicCast(OSArray, - IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) - { - for (unsigned int idx = 0; - (user = OSDynamicCast(OSDictionary, array->getObject(idx))); - idx++) - { - if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) - { - user->retain(); - break; - } - } - array->release(); - } - return (user); -} - -IOReturn IOUserClient::clientHasAuthorization( task_t task, - IOService * service ) -{ - proc_t p; - - p = (proc_t) get_bsdtask_info(task); - if (p) - { - uint64_t authorizationID; - - authorizationID = proc_uniqueid(p); - if (authorizationID) - { - if (service->getAuthorizationID() == authorizationID) - { - return (kIOReturnSuccess); - } - } - } - - return (kIOReturnNotPermitted); -} - -IOReturn IOUserClient::clientHasPrivilege( void * securityToken, - const char * privilegeName ) -{ - kern_return_t kr; - security_token_t token; - mach_msg_type_number_t count; - task_t task; - OSDictionary * user; - bool secureConsole; - - - if (!strncmp(privilegeName, kIOClientPrivilegeForeground, - sizeof(kIOClientPrivilegeForeground))) - { - if (task_is_gpu_denied(current_task())) - return (kIOReturnNotPrivileged); - else - return (kIOReturnSuccess); - } - - if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession, - sizeof(kIOClientPrivilegeConsoleSession))) - { - kauth_cred_t cred; - proc_t p; - - task = (task_t) securityToken; - if (!task) - task = current_task(); + if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) { + user->retain(); + break; + } + } + array->release(); + } + return user; +} + +IOReturn +IOUserClient::clientHasAuthorization( task_t task, + IOService * service ) +{ + proc_t p; + p = (proc_t) get_bsdtask_info(task); - kr = kIOReturnNotPrivileged; - - if (p && (cred = kauth_cred_proc_ref(p))) - { - user = CopyUserOnConsole(); - if (user) - { - OSNumber * num; - if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey))) - && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) - { - kr = kIOReturnSuccess; - } - user->release(); - } - kauth_cred_unref(&cred); - } - return (kr); - } - - if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess, - sizeof(kIOClientPrivilegeSecureConsoleProcess)))) - task = (task_t)((IOUCProcessToken *)securityToken)->token; - else - task = (task_t)securityToken; - - count = TASK_SECURITY_TOKEN_COUNT; - kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count ); - - if (KERN_SUCCESS != kr) - {} - else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator, - sizeof(kIOClientPrivilegeAdministrator))) { - if (0 != token.val[0]) - kr = kIOReturnNotPrivileged; - } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser, - sizeof(kIOClientPrivilegeLocalUser))) { - user = CopyConsoleUser(token.val[0]); - if ( user ) - user->release(); - else - kr = kIOReturnNotPrivileged; - } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser, - sizeof(kIOClientPrivilegeConsoleUser))) { - user = CopyConsoleUser(token.val[0]); - if ( user ) { - if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) - kr = kIOReturnNotPrivileged; - else if ( secureConsole ) { - OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey)); - if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) - kr = kIOReturnNotPrivileged; - } - user->release(); - } - else - kr = kIOReturnNotPrivileged; - } else - kr = kIOReturnUnsupported; - - return (kr); -} - -OSObject * IOUserClient::copyClientEntitlement( task_t task, - const char * entitlement ) -{ -#define MAX_ENTITLEMENTS_LEN (128 * 1024) + if (p) { + uint64_t authorizationID; + + authorizationID = proc_uniqueid(p); + if (authorizationID) { + if (service->getAuthorizationID() == authorizationID) { + return kIOReturnSuccess; + } + } + } + + return kIOReturnNotPermitted; +} + +IOReturn +IOUserClient::clientHasPrivilege( void * securityToken, + const char * privilegeName ) +{ + kern_return_t kr; + security_token_t token; + mach_msg_type_number_t count; + task_t task; + OSDictionary * user; + bool secureConsole; + + + if (!strncmp(privilegeName, kIOClientPrivilegeForeground, + sizeof(kIOClientPrivilegeForeground))) { + if (task_is_gpu_denied(current_task())) { + return kIOReturnNotPrivileged; + } else { + return kIOReturnSuccess; + } + } + + if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession, + sizeof(kIOClientPrivilegeConsoleSession))) { + kauth_cred_t cred; + proc_t p; + + task = (task_t) securityToken; + if (!task) { + task = current_task(); + } + p = (proc_t) get_bsdtask_info(task); + kr = kIOReturnNotPrivileged; + + if (p && (cred = kauth_cred_proc_ref(p))) { + user = CopyUserOnConsole(); + if (user) { + OSNumber * num; + if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey))) + && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) { + kr = kIOReturnSuccess; + } + user->release(); + } + kauth_cred_unref(&cred); + } + return kr; + } + + if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess, + sizeof(kIOClientPrivilegeSecureConsoleProcess)))) { + task = (task_t)((IOUCProcessToken *)securityToken)->token; + } else { + task = (task_t)securityToken; + } + + count = TASK_SECURITY_TOKEN_COUNT; + kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count ); + + if (KERN_SUCCESS != kr) { + } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator, + sizeof(kIOClientPrivilegeAdministrator))) { + if (0 != token.val[0]) { + kr = kIOReturnNotPrivileged; + } + } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser, + sizeof(kIOClientPrivilegeLocalUser))) { + user = CopyConsoleUser(token.val[0]); + if (user) { + user->release(); + } else { + kr = kIOReturnNotPrivileged; + } + } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser, + sizeof(kIOClientPrivilegeConsoleUser))) { + user = CopyConsoleUser(token.val[0]); + if (user) { + if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) { + kr = kIOReturnNotPrivileged; + } else if (secureConsole) { + OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey)); + if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) { + kr = kIOReturnNotPrivileged; + } + } + user->release(); + } else { + kr = kIOReturnNotPrivileged; + } + } else { + kr = kIOReturnUnsupported; + } + + return kr; +} + +OSObject * +IOUserClient::copyClientEntitlement( task_t task, + const char * entitlement ) +{ +#define MAX_ENTITLEMENTS_LEN (128 * 1024) proc_t p = NULL; pid_t pid = 0; @@ -1304,16 +1353,19 @@ OSObject * IOUserClient::copyClientEntitlement( task_t task, OSObject *value = NULL; p = (proc_t)get_bsdtask_info(task); - if (p == NULL) + if (p == NULL) { goto fail; + } pid = proc_pid(p); proc_name(pid, procname, (int)sizeof(procname)); - if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) + if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) { goto fail; + } - if (len <= offsetof(CS_GenericBlob, data)) + if (len <= offsetof(CS_GenericBlob, data)) { goto fail; + } /* * Per , enforce a limit on the amount of XML @@ -1331,8 +1383,9 @@ OSObject * IOUserClient::copyClientEntitlement( task_t task, * terminate it. */ entitlements_data = (char *)IOMalloc(len + 1); - if (entitlements_data == NULL) + if (entitlements_data == NULL) { goto fail; + } memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len); entitlements_data[len] = '\0'; @@ -1341,360 +1394,395 @@ OSObject * IOUserClient::copyClientEntitlement( task_t task, IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy()); goto fail; } - if (entitlements_obj == NULL) + if (entitlements_obj == NULL) { goto fail; + } entitlements = OSDynamicCast(OSDictionary, entitlements_obj); - if (entitlements == NULL) + if (entitlements == NULL) { goto fail; + } /* Fetch the entitlement value from the dictionary. */ value = entitlements->getObject(entitlement); - if (value != NULL) + if (value != NULL) { value->retain(); + } fail: - if (entitlements_data != NULL) + if (entitlements_data != NULL) { IOFree(entitlements_data, len + 1); - if (entitlements_obj != NULL) + } + if (entitlements_obj != NULL) { entitlements_obj->release(); - if (errorString != NULL) + } + if (errorString != NULL) { errorString->release(); + } return value; } -bool IOUserClient::init() +bool +IOUserClient::init() { - if (getPropertyTable() || super::init()) + if (getPropertyTable() || super::init()) { return reserve(); - + } + return false; } -bool IOUserClient::init(OSDictionary * dictionary) +bool +IOUserClient::init(OSDictionary * dictionary) { - if (getPropertyTable() || super::init(dictionary)) + if (getPropertyTable() || super::init(dictionary)) { return reserve(); - + } + return false; } -bool IOUserClient::initWithTask(task_t owningTask, - void * securityID, - UInt32 type ) -{ - if (getPropertyTable() || super::init()) +bool +IOUserClient::initWithTask(task_t owningTask, + void * securityID, + UInt32 type ) +{ + if (getPropertyTable() || super::init()) { return reserve(); - + } + return false; } -bool IOUserClient::initWithTask(task_t owningTask, - void * securityID, - UInt32 type, - OSDictionary * properties ) +bool +IOUserClient::initWithTask(task_t owningTask, + void * securityID, + UInt32 type, + OSDictionary * properties ) { - bool ok; + bool ok; - ok = super::init( properties ); - ok &= initWithTask( owningTask, securityID, type ); + ok = super::init( properties ); + ok &= initWithTask( owningTask, securityID, type ); - return( ok ); + return ok; } -bool IOUserClient::reserve() -{ - if(!reserved) { - reserved = IONew(ExpansionData, 1); +bool +IOUserClient::reserve() +{ if (!reserved) { - return false; + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } } - } - setTerminateDefer(NULL, true); - IOStatisticsRegisterCounter(); - - return true; + setTerminateDefer(NULL, true); + IOStatisticsRegisterCounter(); + + return true; } -struct IOUserClientOwner -{ - task_t task; - queue_chain_t taskLink; - IOUserClient * uc; - queue_chain_t ucLink; +struct IOUserClientOwner { + task_t task; + queue_chain_t taskLink; + IOUserClient * uc; + queue_chain_t ucLink; }; IOReturn IOUserClient::registerOwner(task_t task) { - IOUserClientOwner * owner; - IOReturn ret; - bool newOwner; - - IOLockLock(gIOUserClientOwnersLock); - - newOwner = true; - ret = kIOReturnSuccess; - - if (!owners.next) queue_init(&owners); - else - { - queue_iterate(&owners, owner, IOUserClientOwner *, ucLink) - { - if (task != owner->task) continue; - newOwner = false; - break; - } - } - if (newOwner) - { - owner = IONew(IOUserClientOwner, 1); - if (!owner) ret = kIOReturnNoMemory; - else - { - owner->task = task; - owner->uc = this; - queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink); - queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink); - } - } - - IOLockUnlock(gIOUserClientOwnersLock); - - return (ret); + IOUserClientOwner * owner; + IOReturn ret; + bool newOwner; + + IOLockLock(gIOUserClientOwnersLock); + + newOwner = true; + ret = kIOReturnSuccess; + + if (!owners.next) { + queue_init(&owners); + } else { + queue_iterate(&owners, owner, IOUserClientOwner *, ucLink) + { + if (task != owner->task) { + continue; + } + newOwner = false; + break; + } + } + if (newOwner) { + owner = IONew(IOUserClientOwner, 1); + if (!owner) { + ret = kIOReturnNoMemory; + } else { + owner->task = task; + owner->uc = this; + queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink); + queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink); + } + } + + IOLockUnlock(gIOUserClientOwnersLock); + + return ret; } void IOUserClient::noMoreSenders(void) { - IOUserClientOwner * owner; + IOUserClientOwner * owner; - IOLockLock(gIOUserClientOwnersLock); + IOLockLock(gIOUserClientOwnersLock); - if (owners.next) - { - while (!queue_empty(&owners)) - { - owner = (IOUserClientOwner *)(void *) queue_first(&owners); - queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink); - queue_remove(&owners, owner, IOUserClientOwner *, ucLink); - IODelete(owner, IOUserClientOwner, 1); - } - owners.next = owners.prev = NULL; - } + if (owners.next) { + while (!queue_empty(&owners)) { + owner = (IOUserClientOwner *)(void *) queue_first(&owners); + queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink); + queue_remove(&owners, owner, IOUserClientOwner *, ucLink); + IODelete(owner, IOUserClientOwner, 1); + } + owners.next = owners.prev = NULL; + } - IOLockUnlock(gIOUserClientOwnersLock); + IOLockUnlock(gIOUserClientOwnersLock); } extern "C" kern_return_t iokit_task_terminate(task_t task) { - IOUserClientOwner * owner; - IOUserClient * dead; - IOUserClient * uc; - queue_head_t * taskque; - - IOLockLock(gIOUserClientOwnersLock); - - taskque = task_io_user_clients(task); - dead = NULL; - while (!queue_empty(taskque)) - { - owner = (IOUserClientOwner *)(void *) queue_first(taskque); - uc = owner->uc; - queue_remove(taskque, owner, IOUserClientOwner *, taskLink); - queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink); - if (queue_empty(&uc->owners)) - { - uc->retain(); - IOLog("destroying out of band connect for %s\n", uc->getName()); - // now using the uc queue head as a singly linked queue, - // leaving .next as NULL to mark it empty - uc->owners.next = NULL; - uc->owners.prev = (queue_entry_t) dead; - dead = uc; - } - IODelete(owner, IOUserClientOwner, 1); - } + IOUserClientOwner * owner; + IOUserClient * dead; + IOUserClient * uc; + queue_head_t * taskque; + + IOLockLock(gIOUserClientOwnersLock); + + taskque = task_io_user_clients(task); + dead = NULL; + while (!queue_empty(taskque)) { + owner = (IOUserClientOwner *)(void *) queue_first(taskque); + uc = owner->uc; + queue_remove(taskque, owner, IOUserClientOwner *, taskLink); + queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink); + if (queue_empty(&uc->owners)) { + uc->retain(); + IOLog("destroying out of band connect for %s\n", uc->getName()); + // now using the uc queue head as a singly linked queue, + // leaving .next as NULL to mark it empty + uc->owners.next = NULL; + uc->owners.prev = (queue_entry_t) dead; + dead = uc; + } + IODelete(owner, IOUserClientOwner, 1); + } - IOLockUnlock(gIOUserClientOwnersLock); + IOLockUnlock(gIOUserClientOwnersLock); - while (dead) - { - uc = dead; - dead = (IOUserClient *)(void *) dead->owners.prev; - uc->owners.prev = NULL; - if (uc->sharedInstance || !uc->closed) uc->clientDied(); - uc->release(); - } + while (dead) { + uc = dead; + dead = (IOUserClient *)(void *) dead->owners.prev; + uc->owners.prev = NULL; + if (uc->sharedInstance || !uc->closed) { + uc->clientDied(); + } + uc->release(); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } -void IOUserClient::free() +void +IOUserClient::free() { - if( mappings) mappings->release(); - if (lock) IOLockFree(lock); - - IOStatisticsUnregisterCounter(); - - assert(!owners.next); - assert(!owners.prev); + if (mappings) { + mappings->release(); + } + if (lock) { + IOLockFree(lock); + } + + IOStatisticsUnregisterCounter(); + + assert(!owners.next); + assert(!owners.prev); - if (reserved) IODelete(reserved, ExpansionData, 1); - - super::free(); + if (reserved) { + IODelete(reserved, ExpansionData, 1); + } + + super::free(); } -IOReturn IOUserClient::clientDied( void ) +IOReturn +IOUserClient::clientDied( void ) { - IOReturn ret = kIOReturnNotReady; + IOReturn ret = kIOReturnNotReady; - if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) - { - ret = clientClose(); - } + if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) { + ret = clientClose(); + } - return (ret); + return ret; } -IOReturn IOUserClient::clientClose( void ) +IOReturn +IOUserClient::clientClose( void ) { - return( kIOReturnUnsupported ); + return kIOReturnUnsupported; } -IOService * IOUserClient::getService( void ) +IOService * +IOUserClient::getService( void ) { - return( 0 ); + return 0; } -IOReturn IOUserClient::registerNotificationPort( - mach_port_t /* port */, - UInt32 /* type */, - UInt32 /* refCon */) +IOReturn +IOUserClient::registerNotificationPort( + mach_port_t /* port */, + UInt32 /* type */, + UInt32 /* refCon */) { - return( kIOReturnUnsupported); + return kIOReturnUnsupported; } -IOReturn IOUserClient::registerNotificationPort( - mach_port_t port, - UInt32 type, - io_user_reference_t refCon) +IOReturn +IOUserClient::registerNotificationPort( + mach_port_t port, + UInt32 type, + io_user_reference_t refCon) { - return (registerNotificationPort(port, type, (UInt32) refCon)); + return registerNotificationPort(port, type, (UInt32) refCon); } -IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type, - semaphore_t * semaphore ) +IOReturn +IOUserClient::getNotificationSemaphore( UInt32 notification_type, + semaphore_t * semaphore ) { - return( kIOReturnUnsupported); + return kIOReturnUnsupported; } -IOReturn IOUserClient::connectClient( IOUserClient * /* client */ ) +IOReturn +IOUserClient::connectClient( IOUserClient * /* client */ ) { - return( kIOReturnUnsupported); + return kIOReturnUnsupported; } -IOReturn IOUserClient::clientMemoryForType( UInt32 type, - IOOptionBits * options, - IOMemoryDescriptor ** memory ) +IOReturn +IOUserClient::clientMemoryForType( UInt32 type, + IOOptionBits * options, + IOMemoryDescriptor ** memory ) { - return( kIOReturnUnsupported); + return kIOReturnUnsupported; } #if !__LP64__ -IOMemoryMap * IOUserClient::mapClientMemory( - IOOptionBits type, - task_t task, - IOOptionBits mapFlags, - IOVirtualAddress atAddress ) +IOMemoryMap * +IOUserClient::mapClientMemory( + IOOptionBits type, + task_t task, + IOOptionBits mapFlags, + IOVirtualAddress atAddress ) { - return (NULL); + return NULL; } #endif -IOMemoryMap * IOUserClient::mapClientMemory64( - IOOptionBits type, - task_t task, - IOOptionBits mapFlags, - mach_vm_address_t atAddress ) +IOMemoryMap * +IOUserClient::mapClientMemory64( + IOOptionBits type, + task_t task, + IOOptionBits mapFlags, + mach_vm_address_t atAddress ) { - IOReturn err; - IOOptionBits options = 0; - IOMemoryDescriptor * memory = 0; - IOMemoryMap * map = 0; + IOReturn err; + IOOptionBits options = 0; + IOMemoryDescriptor * memory = 0; + IOMemoryMap * map = 0; - err = clientMemoryForType( (UInt32) type, &options, &memory ); + err = clientMemoryForType((UInt32) type, &options, &memory ); - if( memory && (kIOReturnSuccess == err)) { + if (memory && (kIOReturnSuccess == err)) { + FAKE_STACK_FRAME(getMetaClass()); - FAKE_STACK_FRAME(getMetaClass()); + options = (options & ~kIOMapUserOptionsMask) + | (mapFlags & kIOMapUserOptionsMask); + map = memory->createMappingInTask( task, atAddress, options ); + memory->release(); - options = (options & ~kIOMapUserOptionsMask) - | (mapFlags & kIOMapUserOptionsMask); - map = memory->createMappingInTask( task, atAddress, options ); - memory->release(); - - FAKE_STACK_FRAME_END(); - } + FAKE_STACK_FRAME_END(); + } - return( map ); + return map; } -IOReturn IOUserClient::exportObjectToClient(task_t task, - OSObject *obj, io_object_t *clientObj) +IOReturn +IOUserClient::exportObjectToClient(task_t task, + OSObject *obj, io_object_t *clientObj) { - mach_port_name_t name; + mach_port_name_t name; - name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT ); + name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT ); - *(mach_port_name_t *)clientObj = name; + *clientObj = (io_object_t)(uintptr_t) name; - if (obj) obj->release(); + if (obj) { + obj->release(); + } - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOUserClient::copyPortNameForObjectInTask(task_t task, - OSObject *obj, mach_port_name_t * port_name) +IOReturn +IOUserClient::copyPortNameForObjectInTask(task_t task, + OSObject *obj, mach_port_name_t * port_name) { - mach_port_name_t name; + mach_port_name_t name; - name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT ); + name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT ); - *(mach_port_name_t *) port_name = name; + *(mach_port_name_t *) port_name = name; - return kIOReturnSuccess; + return kIOReturnSuccess; } -IOReturn IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, - OSObject **obj) +IOReturn +IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name, + OSObject **obj) { - OSObject * object; + OSObject * object; - object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task); + object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task); - *obj = object; + *obj = object; - return (object ? kIOReturnSuccess : kIOReturnIPCError); + return object ? kIOReturnSuccess : kIOReturnIPCError; } -IOReturn IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta) +IOReturn +IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta) { - return (iokit_mod_send_right(task, port_name, delta)); + return iokit_mod_send_right(task, port_name, delta); } -IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */) +IOExternalMethod * +IOUserClient::getExternalMethodForIndex( UInt32 /* index */) { - return( 0 ); + return 0; } -IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */) +IOExternalAsyncMethod * +IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */) { - return( 0 ); + return 0; } -IOExternalTrap * IOUserClient:: +IOExternalTrap * +IOUserClient:: getExternalTrapForIndex(UInt32 index) { return NULL; @@ -1705,198 +1793,210 @@ getExternalTrapForIndex(UInt32 index) // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated // functions can break clients of kexts implementing getExternalMethodForIndex() -IOExternalMethod * IOUserClient:: +IOExternalMethod * +IOUserClient:: getTargetAndMethodForIndex(IOService **targetP, UInt32 index) { - IOExternalMethod *method = getExternalMethodForIndex(index); + IOExternalMethod *method = getExternalMethodForIndex(index); - if (method) - *targetP = (IOService *) method->object; + if (method) { + *targetP = (IOService *) method->object; + } - return method; + return method; } -IOExternalAsyncMethod * IOUserClient:: +IOExternalAsyncMethod * +IOUserClient:: getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index) { - IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index); + IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index); - if (method) - *targetP = (IOService *) method->object; + if (method) { + *targetP = (IOService *) method->object; + } - return method; + return method; } -IOExternalTrap * IOUserClient:: +IOExternalTrap * +IOUserClient:: getTargetAndTrapForIndex(IOService ** targetP, UInt32 index) { - IOExternalTrap *trap = getExternalTrapForIndex(index); + IOExternalTrap *trap = getExternalTrapForIndex(index); - if (trap) { - *targetP = trap->object; - } + if (trap) { + *targetP = trap->object; + } - return trap; + return trap; } #pragma clang diagnostic pop -IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference) +IOReturn +IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference) { - mach_port_t port; - port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); + mach_port_t port; + port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); - if (MACH_PORT_NULL != port) - iokit_release_port_send(port); + if (MACH_PORT_NULL != port) { + iokit_release_port_send(port); + } - return (kIOReturnSuccess); + return kIOReturnSuccess; } -IOReturn IOUserClient::releaseNotificationPort(mach_port_t port) +IOReturn +IOUserClient::releaseNotificationPort(mach_port_t port) { - if (MACH_PORT_NULL != port) - iokit_release_port_send(port); + if (MACH_PORT_NULL != port) { + iokit_release_port_send(port); + } - return (kIOReturnSuccess); + return kIOReturnSuccess; } -IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference, - IOReturn result, void *args[], UInt32 numArgs) +IOReturn +IOUserClient::sendAsyncResult(OSAsyncReference reference, + IOReturn result, void *args[], UInt32 numArgs) { - OSAsyncReference64 reference64; - io_user_reference_t args64[kMaxAsyncArgs]; - unsigned int idx; + OSAsyncReference64 reference64; + io_user_reference_t args64[kMaxAsyncArgs]; + unsigned int idx; - if (numArgs > kMaxAsyncArgs) - return kIOReturnMessageTooLarge; + if (numArgs > kMaxAsyncArgs) { + return kIOReturnMessageTooLarge; + } - for (idx = 0; idx < kOSAsyncRef64Count; idx++) - reference64[idx] = REF64(reference[idx]); + for (idx = 0; idx < kOSAsyncRef64Count; idx++) { + reference64[idx] = REF64(reference[idx]); + } - for (idx = 0; idx < numArgs; idx++) - args64[idx] = REF64(args[idx]); + for (idx = 0; idx < numArgs; idx++) { + args64[idx] = REF64(args[idx]); + } - return (sendAsyncResult64(reference64, result, args64, numArgs)); + return sendAsyncResult64(reference64, result, args64, numArgs); } -IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference, - IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options) +IOReturn +IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options) { return _sendAsyncResult64(reference, result, args, numArgs, options); } -IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference, - IOReturn result, io_user_reference_t args[], UInt32 numArgs) -{ - return _sendAsyncResult64(reference, result, args, numArgs, 0); -} - -IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference, - IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options) -{ - struct ReplyMsg - { - mach_msg_header_t msgHdr; - union - { - struct - { - OSNotificationHeader notifyHdr; - IOAsyncCompletionContent asyncContent; - uint32_t args[kMaxAsyncArgs]; - } msg32; - struct - { - OSNotificationHeader64 notifyHdr; - IOAsyncCompletionContent asyncContent; - io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed)); - } msg64; - } m; - }; - ReplyMsg replyMsg; - mach_port_t replyPort; - kern_return_t kr; - - // If no reply port, do nothing. - replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); - if (replyPort == MACH_PORT_NULL) - return kIOReturnSuccess; - - if (numArgs > kMaxAsyncArgs) - return kIOReturnMessageTooLarge; - - bzero(&replyMsg, sizeof(replyMsg)); - replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/, - 0 /*local*/); - replyMsg.msgHdr.msgh_remote_port = replyPort; - replyMsg.msgHdr.msgh_local_port = 0; - replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; - if (kIOUCAsync64Flag & reference[0]) - { - replyMsg.msgHdr.msgh_size = - sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64) - - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t); - replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent) - + numArgs * sizeof(io_user_reference_t); - replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType; - bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64)); - - replyMsg.m.msg64.asyncContent.result = result; - if (numArgs) - bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t)); - } - else - { - unsigned int idx; - - replyMsg.msgHdr.msgh_size = - sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32) - - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t); - - replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent) - + numArgs * sizeof(uint32_t); - replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType; - - for (idx = 0; idx < kOSAsyncRefCount; idx++) - replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]); - - replyMsg.m.msg32.asyncContent.result = result; - - for (idx = 0; idx < numArgs; idx++) - replyMsg.m.msg32.args[idx] = REF32(args[idx]); - } - - if ((options & kIOUserNotifyOptionCanDrop) != 0) { +IOReturn +IOUserClient::sendAsyncResult64(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs) +{ + return _sendAsyncResult64(reference, result, args, numArgs, 0); +} + +IOReturn +IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options) +{ + struct ReplyMsg { + mach_msg_header_t msgHdr; + union{ + struct{ + OSNotificationHeader notifyHdr; + IOAsyncCompletionContent asyncContent; + uint32_t args[kMaxAsyncArgs]; + } msg32; + struct{ + OSNotificationHeader64 notifyHdr; + IOAsyncCompletionContent asyncContent; + io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed)); + } msg64; + } m; + }; + ReplyMsg replyMsg; + mach_port_t replyPort; + kern_return_t kr; + + // If no reply port, do nothing. + replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); + if (replyPort == MACH_PORT_NULL) { + return kIOReturnSuccess; + } + + if (numArgs > kMaxAsyncArgs) { + return kIOReturnMessageTooLarge; + } + + bzero(&replyMsg, sizeof(replyMsg)); + replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/, + 0 /*local*/); + replyMsg.msgHdr.msgh_remote_port = replyPort; + replyMsg.msgHdr.msgh_local_port = 0; + replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; + if (kIOUCAsync64Flag & reference[0]) { + replyMsg.msgHdr.msgh_size = + sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64) + - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t); + replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent) + + numArgs * sizeof(io_user_reference_t); + replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType; + bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64)); + + replyMsg.m.msg64.asyncContent.result = result; + if (numArgs) { + bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t)); + } + } else { + unsigned int idx; + + replyMsg.msgHdr.msgh_size = + sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32) + - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t); + + replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent) + + numArgs * sizeof(uint32_t); + replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType; + + for (idx = 0; idx < kOSAsyncRefCount; idx++) { + replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]); + } + + replyMsg.m.msg32.asyncContent.result = result; + + for (idx = 0; idx < numArgs; idx++) { + replyMsg.m.msg32.args[idx] = REF32(args[idx]); + } + } + + if ((options & kIOUserNotifyOptionCanDrop) != 0) { kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr, - replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE); + replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE); } else { /* Fail on full queue. */ kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr, - replyMsg.msgHdr.msgh_size); + replyMsg.msgHdr.msgh_size); + } + if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) { + reference[0] |= kIOUCAsyncErrorLoggedFlag; + IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr ); } - if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) - { - reference[0] |= kIOUCAsyncErrorLoggedFlag; - IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr ); - } - return kr; + return kr; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ extern "C" { - -#define CHECK(cls,obj,out) \ - cls * out; \ - if( !(out = OSDynamicCast( cls, obj))) \ +#define CHECK(cls, obj, out) \ + cls * out; \ + if( !(out = OSDynamicCast( cls, obj))) \ return( kIOReturnBadArgument ) -#define CHECKLOCKED(cls,obj,out) \ - IOUserIterator * oIter; \ - cls * out; \ - if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \ - return (kIOReturnBadArgument); \ - if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \ +#define CHECKLOCKED(cls, obj, out) \ + IOUserIterator * oIter; \ + cls * out; \ + if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \ + return (kIOReturnBadArgument); \ + if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \ return (kIOReturnBadArgument) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -1904,434 +2004,491 @@ extern "C" { // Create a vm_map_copy_t or kalloc'ed data for memory // to be copied out. ipc will free after the copyout. -static kern_return_t copyoutkdata( const void * data, vm_size_t len, - io_buf_ptr_t * buf ) +static kern_return_t +copyoutkdata( const void * data, vm_size_t len, + io_buf_ptr_t * buf ) { - kern_return_t err; - vm_map_copy_t copy; + kern_return_t err; + vm_map_copy_t copy; - err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len, - false /* src_destroy */, ©); + err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len, + false /* src_destroy */, ©); - assert( err == KERN_SUCCESS ); - if( err == KERN_SUCCESS ) - *buf = (char *) copy; + assert( err == KERN_SUCCESS ); + if (err == KERN_SUCCESS) { + *buf = (char *) copy; + } - return( err ); + return err; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ /* Routine io_server_version */ -kern_return_t is_io_server_version( +kern_return_t +is_io_server_version( mach_port_t master_port, uint64_t *version) { - *version = IOKIT_SERVER_VERSION; - return (kIOReturnSuccess); + *version = IOKIT_SERVER_VERSION; + return kIOReturnSuccess; } /* Routine io_object_get_class */ -kern_return_t is_io_object_get_class( - io_object_t object, - io_name_t className ) +kern_return_t +is_io_object_get_class( + io_object_t object, + io_name_t className ) { - const OSMetaClass* my_obj = NULL; - - if( !object) - return( kIOReturnBadArgument ); - - my_obj = object->getMetaClass(); - if (!my_obj) { - return (kIOReturnNotFound); - } + const OSMetaClass* my_obj = NULL; + + if (!object) { + return kIOReturnBadArgument; + } - strlcpy( className, my_obj->getClassName(), sizeof(io_name_t)); + my_obj = object->getMetaClass(); + if (!my_obj) { + return kIOReturnNotFound; + } + + strlcpy( className, my_obj->getClassName(), sizeof(io_name_t)); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_object_get_superclass */ -kern_return_t is_io_object_get_superclass( +kern_return_t +is_io_object_get_superclass( mach_port_t master_port, - io_name_t obj_name, + io_name_t obj_name, io_name_t class_name) { - IOReturn ret; - const OSMetaClass * meta; - const OSMetaClass * super; - const OSSymbol * name; - const char * cstr; - - if (!obj_name || !class_name) return (kIOReturnBadArgument); - if (master_port != master_device_port) return( kIOReturnNotPrivileged); - - ret = kIOReturnNotFound; - meta = 0; - do - { - name = OSSymbol::withCString(obj_name); - if (!name) break; - meta = OSMetaClass::copyMetaClassWithName(name); - if (!meta) break; - super = meta->getSuperClass(); - if (!super) break; - cstr = super->getClassName(); - if (!cstr) break; - strlcpy(class_name, cstr, sizeof(io_name_t)); - ret = kIOReturnSuccess; - } - while (false); - - OSSafeReleaseNULL(name); - if (meta) meta->releaseMetaClass(); - - return (ret); + IOReturn ret; + const OSMetaClass * meta; + const OSMetaClass * super; + const OSSymbol * name; + const char * cstr; + + if (!obj_name || !class_name) { + return kIOReturnBadArgument; + } + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } + + ret = kIOReturnNotFound; + meta = 0; + do{ + name = OSSymbol::withCString(obj_name); + if (!name) { + break; + } + meta = OSMetaClass::copyMetaClassWithName(name); + if (!meta) { + break; + } + super = meta->getSuperClass(); + if (!super) { + break; + } + cstr = super->getClassName(); + if (!cstr) { + break; + } + strlcpy(class_name, cstr, sizeof(io_name_t)); + ret = kIOReturnSuccess; + }while (false); + + OSSafeReleaseNULL(name); + if (meta) { + meta->releaseMetaClass(); + } + + return ret; } /* Routine io_object_get_bundle_identifier */ -kern_return_t is_io_object_get_bundle_identifier( +kern_return_t +is_io_object_get_bundle_identifier( mach_port_t master_port, - io_name_t obj_name, + io_name_t obj_name, io_name_t bundle_name) { - IOReturn ret; - const OSMetaClass * meta; - const OSSymbol * name; - const OSSymbol * identifier; - const char * cstr; - - if (!obj_name || !bundle_name) return (kIOReturnBadArgument); - if (master_port != master_device_port) return( kIOReturnNotPrivileged); - - ret = kIOReturnNotFound; - meta = 0; - do - { - name = OSSymbol::withCString(obj_name); - if (!name) break; - meta = OSMetaClass::copyMetaClassWithName(name); - if (!meta) break; - identifier = meta->getKmodName(); - if (!identifier) break; - cstr = identifier->getCStringNoCopy(); - if (!cstr) break; - strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t)); - ret = kIOReturnSuccess; - } - while (false); - - OSSafeReleaseNULL(name); - if (meta) meta->releaseMetaClass(); - - return (ret); + IOReturn ret; + const OSMetaClass * meta; + const OSSymbol * name; + const OSSymbol * identifier; + const char * cstr; + + if (!obj_name || !bundle_name) { + return kIOReturnBadArgument; + } + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } + + ret = kIOReturnNotFound; + meta = 0; + do{ + name = OSSymbol::withCString(obj_name); + if (!name) { + break; + } + meta = OSMetaClass::copyMetaClassWithName(name); + if (!meta) { + break; + } + identifier = meta->getKmodName(); + if (!identifier) { + break; + } + cstr = identifier->getCStringNoCopy(); + if (!cstr) { + break; + } + strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t)); + ret = kIOReturnSuccess; + }while (false); + + OSSafeReleaseNULL(name); + if (meta) { + meta->releaseMetaClass(); + } + + return ret; } /* Routine io_object_conforms_to */ -kern_return_t is_io_object_conforms_to( +kern_return_t +is_io_object_conforms_to( io_object_t object, io_name_t className, boolean_t *conforms ) { - if( !object) - return( kIOReturnBadArgument ); + if (!object) { + return kIOReturnBadArgument; + } - *conforms = (0 != object->metaCast( className )); + *conforms = (0 != object->metaCast( className )); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_object_get_retain_count */ -kern_return_t is_io_object_get_retain_count( +kern_return_t +is_io_object_get_retain_count( io_object_t object, uint32_t *retainCount ) { - if( !object) - return( kIOReturnBadArgument ); + if (!object) { + return kIOReturnBadArgument; + } - *retainCount = object->getRetainCount(); - return( kIOReturnSuccess ); + *retainCount = object->getRetainCount(); + return kIOReturnSuccess; } /* Routine io_iterator_next */ -kern_return_t is_io_iterator_next( +kern_return_t +is_io_iterator_next( io_object_t iterator, io_object_t *object ) { - IOReturn ret; - OSObject * obj; - OSIterator * iter; - IOUserIterator * uiter; + IOReturn ret; + OSObject * obj; + OSIterator * iter; + IOUserIterator * uiter; - if ((uiter = OSDynamicCast(IOUserIterator, iterator))) - { + if ((uiter = OSDynamicCast(IOUserIterator, iterator))) { obj = uiter->copyNextObject(); - } - else if ((iter = OSDynamicCast(OSIterator, iterator))) - { + } else if ((iter = OSDynamicCast(OSIterator, iterator))) { obj = iter->getNextObject(); - if (obj) obj->retain(); - } - else - { - return( kIOReturnBadArgument ); + if (obj) { + obj->retain(); + } + } else { + return kIOReturnBadArgument; } - if( obj) { - *object = obj; - ret = kIOReturnSuccess; - } else - ret = kIOReturnNoDevice; + if (obj) { + *object = obj; + ret = kIOReturnSuccess; + } else { + ret = kIOReturnNoDevice; + } - return (ret); + return ret; } /* Routine io_iterator_reset */ -kern_return_t is_io_iterator_reset( +kern_return_t +is_io_iterator_reset( io_object_t iterator ) { - CHECK( OSIterator, iterator, iter ); + CHECK( OSIterator, iterator, iter ); - iter->reset(); + iter->reset(); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_iterator_is_valid */ -kern_return_t is_io_iterator_is_valid( +kern_return_t +is_io_iterator_is_valid( io_object_t iterator, boolean_t *is_valid ) { - CHECK( OSIterator, iterator, iter ); + CHECK( OSIterator, iterator, iter ); - *is_valid = iter->isValid(); + *is_valid = iter->isValid(); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } -static kern_return_t internal_io_service_match_property_table( +static kern_return_t +internal_io_service_match_property_table( io_service_t _service, const char * matching, mach_msg_type_number_t matching_size, boolean_t *matches) { - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - kern_return_t kr; - OSObject * obj; - OSDictionary * dict; + kern_return_t kr; + OSObject * obj; + OSDictionary * dict; - assert(matching_size); - obj = OSUnserializeXML(matching, matching_size); + assert(matching_size); + obj = OSUnserializeXML(matching, matching_size); - if( (dict = OSDynamicCast( OSDictionary, obj))) { - *matches = service->passiveMatch( dict ); - kr = kIOReturnSuccess; - } else - kr = kIOReturnBadArgument; + if ((dict = OSDynamicCast( OSDictionary, obj))) { + *matches = service->passiveMatch( dict ); + kr = kIOReturnSuccess; + } else { + kr = kIOReturnBadArgument; + } - if( obj) - obj->release(); + if (obj) { + obj->release(); + } - return( kr ); + return kr; } /* Routine io_service_match_property_table */ -kern_return_t is_io_service_match_property_table( +kern_return_t +is_io_service_match_property_table( io_service_t service, io_string_t matching, boolean_t *matches ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } /* Routine io_service_match_property_table_ool */ -kern_return_t is_io_service_match_property_table_ool( +kern_return_t +is_io_service_match_property_table_ool( io_object_t service, io_buf_ptr_t matching, mach_msg_type_number_t matchingCnt, kern_return_t *result, boolean_t *matches ) { - kern_return_t kr; - vm_offset_t data; - vm_map_offset_t map_data; + kern_return_t kr; + vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); - data = CAST_DOWN(vm_offset_t, map_data); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); - if( KERN_SUCCESS == kr) { - // must return success after vm_map_copyout() succeeds - *result = internal_io_service_match_property_table(service, - (const char *)data, matchingCnt, matches ); - vm_deallocate( kernel_map, data, matchingCnt ); - } + if (KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + *result = internal_io_service_match_property_table(service, + (const char *)data, matchingCnt, matches ); + vm_deallocate( kernel_map, data, matchingCnt ); + } - return( kr ); + return kr; } /* Routine io_service_match_property_table_bin */ -kern_return_t is_io_service_match_property_table_bin( +kern_return_t +is_io_service_match_property_table_bin( io_object_t service, io_struct_inband_t matching, mach_msg_type_number_t matchingCnt, boolean_t *matches) { - return (internal_io_service_match_property_table(service, matching, matchingCnt, matches)); + return internal_io_service_match_property_table(service, matching, matchingCnt, matches); } -static kern_return_t internal_io_service_get_matching_services( +static kern_return_t +internal_io_service_get_matching_services( mach_port_t master_port, const char * matching, mach_msg_type_number_t matching_size, io_iterator_t *existing ) { - kern_return_t kr; - OSObject * obj; - OSDictionary * dict; + kern_return_t kr; + OSObject * obj; + OSDictionary * dict; - if( master_port != master_device_port) - return( kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - assert(matching_size); - obj = OSUnserializeXML(matching, matching_size); + assert(matching_size); + obj = OSUnserializeXML(matching, matching_size); - if( (dict = OSDynamicCast( OSDictionary, obj))) { - *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict )); - kr = kIOReturnSuccess; - } else - kr = kIOReturnBadArgument; + if ((dict = OSDynamicCast( OSDictionary, obj))) { + *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict )); + kr = kIOReturnSuccess; + } else { + kr = kIOReturnBadArgument; + } - if( obj) - obj->release(); + if (obj) { + obj->release(); + } - return( kr ); + return kr; } /* Routine io_service_get_matching_services */ -kern_return_t is_io_service_get_matching_services( +kern_return_t +is_io_service_get_matching_services( mach_port_t master_port, io_string_t matching, io_iterator_t *existing ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } /* Routine io_service_get_matching_services_ool */ -kern_return_t is_io_service_get_matching_services_ool( +kern_return_t +is_io_service_get_matching_services_ool( mach_port_t master_port, io_buf_ptr_t matching, mach_msg_type_number_t matchingCnt, kern_return_t *result, io_object_t *existing ) { - kern_return_t kr; - vm_offset_t data; - vm_map_offset_t map_data; + kern_return_t kr; + vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); - data = CAST_DOWN(vm_offset_t, map_data); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); - if( KERN_SUCCESS == kr) { - // must return success after vm_map_copyout() succeeds - // and mig will copy out objects on success - *existing = 0; - *result = internal_io_service_get_matching_services(master_port, - (const char *) data, matchingCnt, existing); - vm_deallocate( kernel_map, data, matchingCnt ); - } + if (KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + // and mig will copy out objects on success + *existing = 0; + *result = internal_io_service_get_matching_services(master_port, + (const char *) data, matchingCnt, existing); + vm_deallocate( kernel_map, data, matchingCnt ); + } - return( kr ); + return kr; } /* Routine io_service_get_matching_services_bin */ -kern_return_t is_io_service_get_matching_services_bin( +kern_return_t +is_io_service_get_matching_services_bin( mach_port_t master_port, io_struct_inband_t matching, mach_msg_type_number_t matchingCnt, io_object_t *existing) { - return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing)); + return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing); } -static kern_return_t internal_io_service_get_matching_service( +static kern_return_t +internal_io_service_get_matching_service( mach_port_t master_port, const char * matching, mach_msg_type_number_t matching_size, io_service_t *service ) { - kern_return_t kr; - OSObject * obj; - OSDictionary * dict; + kern_return_t kr; + OSObject * obj; + OSDictionary * dict; - if( master_port != master_device_port) - return( kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - assert(matching_size); - obj = OSUnserializeXML(matching, matching_size); + assert(matching_size); + obj = OSUnserializeXML(matching, matching_size); - if( (dict = OSDynamicCast( OSDictionary, obj))) { - *service = IOService::copyMatchingService( dict ); - kr = *service ? kIOReturnSuccess : kIOReturnNotFound; - } else - kr = kIOReturnBadArgument; + if ((dict = OSDynamicCast( OSDictionary, obj))) { + *service = IOService::copyMatchingService( dict ); + kr = *service ? kIOReturnSuccess : kIOReturnNotFound; + } else { + kr = kIOReturnBadArgument; + } - if( obj) - obj->release(); + if (obj) { + obj->release(); + } - return( kr ); + return kr; } /* Routine io_service_get_matching_service */ -kern_return_t is_io_service_get_matching_service( +kern_return_t +is_io_service_get_matching_service( mach_port_t master_port, io_string_t matching, io_service_t *service ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } /* Routine io_service_get_matching_services_ool */ -kern_return_t is_io_service_get_matching_service_ool( +kern_return_t +is_io_service_get_matching_service_ool( mach_port_t master_port, io_buf_ptr_t matching, mach_msg_type_number_t matchingCnt, kern_return_t *result, io_object_t *service ) { - kern_return_t kr; - vm_offset_t data; - vm_map_offset_t map_data; + kern_return_t kr; + vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); - data = CAST_DOWN(vm_offset_t, map_data); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); - if( KERN_SUCCESS == kr) { - // must return success after vm_map_copyout() succeeds - // and mig will copy out objects on success - *service = 0; - *result = internal_io_service_get_matching_service(master_port, - (const char *) data, matchingCnt, service ); - vm_deallocate( kernel_map, data, matchingCnt ); - } + if (KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + // and mig will copy out objects on success + *service = 0; + *result = internal_io_service_get_matching_service(master_port, + (const char *) data, matchingCnt, service ); + vm_deallocate( kernel_map, data, matchingCnt ); + } - return( kr ); + return kr; } /* Routine io_service_get_matching_service_bin */ -kern_return_t is_io_service_get_matching_service_bin( +kern_return_t +is_io_service_get_matching_service_bin( mach_port_t master_port, io_struct_inband_t matching, mach_msg_type_number_t matchingCnt, io_object_t *service) { - return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service)); + return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service); } -static kern_return_t internal_io_service_add_notification( +static kern_return_t +internal_io_service_add_notification( mach_port_t master_port, io_name_t notification_type, const char * matching, @@ -2342,82 +2499,90 @@ static kern_return_t internal_io_service_add_notification( bool client64, io_object_t * notification ) { - IOServiceUserNotification * userNotify = 0; - IONotifier * notify = 0; - const OSSymbol * sym; - OSDictionary * dict; - IOReturn err; - unsigned long int userMsgType; + IOServiceUserNotification * userNotify = 0; + IONotifier * notify = 0; + const OSSymbol * sym; + OSDictionary * dict; + IOReturn err; + unsigned long int userMsgType; + + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } + + do { + err = kIOReturnNoResources; - if( master_port != master_device_port) - return( kIOReturnNotPrivileged); + if (matching_size > (sizeof(io_struct_inband_t) * 1024)) { + return kIOReturnMessageTooLarge; + } + + if (!(sym = OSSymbol::withCString( notification_type ))) { + err = kIOReturnNoResources; + } - do { - err = kIOReturnNoResources; + assert(matching_size); + dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size)); + if (!dict) { + err = kIOReturnBadArgument; + continue; + } - if (matching_size > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge); + if ((sym == gIOPublishNotification) + || (sym == gIOFirstPublishNotification)) { + userMsgType = kIOServicePublishNotificationType; + } else if ((sym == gIOMatchedNotification) + || (sym == gIOFirstMatchNotification)) { + userMsgType = kIOServiceMatchedNotificationType; + } else if ((sym == gIOTerminatedNotification) + || (sym == gIOWillTerminateNotification)) { + userMsgType = kIOServiceTerminatedNotificationType; + } else { + userMsgType = kLastIOKitNotificationType; + } - if( !(sym = OSSymbol::withCString( notification_type ))) - err = kIOReturnNoResources; + userNotify = new IOServiceUserNotification; - assert(matching_size); - dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size)); - if (!dict) { - err = kIOReturnBadArgument; - continue; - } - - if( (sym == gIOPublishNotification) - || (sym == gIOFirstPublishNotification)) - userMsgType = kIOServicePublishNotificationType; - else if( (sym == gIOMatchedNotification) - || (sym == gIOFirstMatchNotification)) - userMsgType = kIOServiceMatchedNotificationType; - else if ((sym == gIOTerminatedNotification) - || (sym == gIOWillTerminateNotification)) - userMsgType = kIOServiceTerminatedNotificationType; - else - userMsgType = kLastIOKitNotificationType; - - userNotify = new IOServiceUserNotification; - - if( userNotify && !userNotify->init( port, userMsgType, - reference, referenceSize, client64)) { - userNotify->release(); - userNotify = 0; - } - if( !userNotify) - continue; - - notify = IOService::addMatchingNotification( sym, dict, - &userNotify->_handler, userNotify ); - if( notify) { - *notification = userNotify; - userNotify->setNotification( notify ); - err = kIOReturnSuccess; - } else - err = kIOReturnUnsupported; - - } while( false ); - - if ((kIOReturnSuccess != err) && userNotify) - { - userNotify->invalidatePort(); - userNotify->release(); - userNotify = 0; - } - - if( sym) - sym->release(); - if( dict) - dict->release(); + if (userNotify && !userNotify->init( port, userMsgType, + reference, referenceSize, client64)) { + userNotify->release(); + userNotify = 0; + } + if (!userNotify) { + continue; + } + + notify = IOService::addMatchingNotification( sym, dict, + &userNotify->_handler, userNotify ); + if (notify) { + *notification = userNotify; + userNotify->setNotification( notify ); + err = kIOReturnSuccess; + } else { + err = kIOReturnUnsupported; + } + } while (false); + + if ((kIOReturnSuccess != err) && userNotify) { + userNotify->invalidatePort(); + userNotify->release(); + userNotify = 0; + } + + if (sym) { + sym->release(); + } + if (dict) { + dict->release(); + } - return( err ); + return err; } /* Routine io_service_add_notification */ -kern_return_t is_io_service_add_notification( +kern_return_t +is_io_service_add_notification( mach_port_t master_port, io_name_t notification_type, io_string_t matching, @@ -2426,11 +2591,12 @@ kern_return_t is_io_service_add_notification( mach_msg_type_number_t referenceCnt, io_object_t * notification ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } /* Routine io_service_add_notification_64 */ -kern_return_t is_io_service_add_notification_64( +kern_return_t +is_io_service_add_notification_64( mach_port_t master_port, io_name_t notification_type, io_string_t matching, @@ -2439,11 +2605,12 @@ kern_return_t is_io_service_add_notification_64( mach_msg_type_number_t referenceCnt, io_object_t *notification ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } /* Routine io_service_add_notification_bin */ -kern_return_t is_io_service_add_notification_bin +kern_return_t +is_io_service_add_notification_bin ( mach_port_t master_port, io_name_t notification_type, @@ -2454,13 +2621,22 @@ kern_return_t is_io_service_add_notification_bin mach_msg_type_number_t referenceCnt, io_object_t *notification) { - return (internal_io_service_add_notification(master_port, notification_type, - matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t), - false, notification)); + io_async_ref_t zreference; + + if (referenceCnt > ASYNC_REF_COUNT) { + return kIOReturnBadArgument; + } + bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0])); + bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0])); + + return internal_io_service_add_notification(master_port, notification_type, + matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t), + false, notification); } /* Routine io_service_add_notification_bin_64 */ -kern_return_t is_io_service_add_notification_bin_64 +kern_return_t +is_io_service_add_notification_bin_64 ( mach_port_t master_port, io_name_t notification_type, @@ -2471,12 +2647,21 @@ kern_return_t is_io_service_add_notification_bin_64 mach_msg_type_number_t referenceCnt, io_object_t *notification) { - return (internal_io_service_add_notification(master_port, notification_type, - matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t), - true, notification)); + io_async_ref64_t zreference; + + if (referenceCnt > ASYNC_REF64_COUNT) { + return kIOReturnBadArgument; + } + bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0])); + bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0])); + + return internal_io_service_add_notification(master_port, notification_type, + matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t), + true, notification); } -static kern_return_t internal_io_service_add_notification_ool( +static kern_return_t +internal_io_service_add_notification_ool( mach_port_t master_port, io_name_t notification_type, io_buf_ptr_t matching, @@ -2488,27 +2673,28 @@ static kern_return_t internal_io_service_add_notification_ool( kern_return_t *result, io_object_t *notification ) { - kern_return_t kr; - vm_offset_t data; - vm_map_offset_t map_data; + kern_return_t kr; + vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); - data = CAST_DOWN(vm_offset_t, map_data); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); - if( KERN_SUCCESS == kr) { - // must return success after vm_map_copyout() succeeds - // and mig will copy out objects on success - *notification = 0; - *result = internal_io_service_add_notification( master_port, notification_type, - (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification ); - vm_deallocate( kernel_map, data, matchingCnt ); - } + if (KERN_SUCCESS == kr) { + // must return success after vm_map_copyout() succeeds + // and mig will copy out objects on success + *notification = 0; + *result = internal_io_service_add_notification( master_port, notification_type, + (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification ); + vm_deallocate( kernel_map, data, matchingCnt ); + } - return( kr ); + return kr; } /* Routine io_service_add_notification_ool */ -kern_return_t is_io_service_add_notification_ool( +kern_return_t +is_io_service_add_notification_ool( mach_port_t master_port, io_name_t notification_type, io_buf_ptr_t matching, @@ -2519,13 +2705,22 @@ kern_return_t is_io_service_add_notification_ool( kern_return_t *result, io_object_t *notification ) { - return (internal_io_service_add_notification_ool(master_port, notification_type, - matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t), - false, result, notification)); + io_async_ref_t zreference; + + if (referenceCnt > ASYNC_REF_COUNT) { + return kIOReturnBadArgument; + } + bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0])); + bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0])); + + return internal_io_service_add_notification_ool(master_port, notification_type, + matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t), + false, result, notification); } /* Routine io_service_add_notification_ool_64 */ -kern_return_t is_io_service_add_notification_ool_64( +kern_return_t +is_io_service_add_notification_ool_64( mach_port_t master_port, io_name_t notification_type, io_buf_ptr_t matching, @@ -2536,13 +2731,22 @@ kern_return_t is_io_service_add_notification_ool_64( kern_return_t *result, io_object_t *notification ) { - return (internal_io_service_add_notification_ool(master_port, notification_type, - matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t), - true, result, notification)); + io_async_ref64_t zreference; + + if (referenceCnt > ASYNC_REF64_COUNT) { + return kIOReturnBadArgument; + } + bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0])); + bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0])); + + return internal_io_service_add_notification_ool(master_port, notification_type, + matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t), + true, result, notification); } /* Routine io_service_add_notification_old */ -kern_return_t is_io_service_add_notification_old( +kern_return_t +is_io_service_add_notification_old( mach_port_t master_port, io_name_t notification_type, io_string_t matching, @@ -2551,81 +2755,92 @@ kern_return_t is_io_service_add_notification_old( natural_t ref, io_object_t * notification ) { - return( is_io_service_add_notification( master_port, notification_type, - matching, port, &ref, 1, notification )); + return is_io_service_add_notification( master_port, notification_type, + matching, port, &ref, 1, notification ); } -static kern_return_t internal_io_service_add_interest_notification( - io_object_t _service, - io_name_t type_of_interest, - mach_port_t port, +static kern_return_t +internal_io_service_add_interest_notification( + io_object_t _service, + io_name_t type_of_interest, + mach_port_t port, void * reference, vm_size_t referenceSize, bool client64, - io_object_t * notification ) + io_object_t * notification ) { + IOServiceMessageUserNotification * userNotify = 0; + IONotifier * notify = 0; + const OSSymbol * sym; + IOReturn err; + + CHECK( IOService, _service, service ); + + err = kIOReturnNoResources; + if ((sym = OSSymbol::withCString( type_of_interest ))) { + do { + userNotify = new IOServiceMessageUserNotification; + + if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType, + reference, referenceSize, + kIOUserNotifyMaxMessageSize, + client64 )) { + userNotify->release(); + userNotify = 0; + } + if (!userNotify) { + continue; + } + + notify = service->registerInterest( sym, + &userNotify->_handler, userNotify ); + if (notify) { + *notification = userNotify; + userNotify->setNotification( notify ); + err = kIOReturnSuccess; + } else { + err = kIOReturnUnsupported; + } + + sym->release(); + } while (false); + } - IOServiceMessageUserNotification * userNotify = 0; - IONotifier * notify = 0; - const OSSymbol * sym; - IOReturn err; - - CHECK( IOService, _service, service ); - - err = kIOReturnNoResources; - if( (sym = OSSymbol::withCString( type_of_interest ))) do { - - userNotify = new IOServiceMessageUserNotification; - - if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType, - reference, referenceSize, - kIOUserNotifyMaxMessageSize, - client64 )) { - userNotify->release(); - userNotify = 0; - } - if( !userNotify) - continue; - - notify = service->registerInterest( sym, - &userNotify->_handler, userNotify ); - if( notify) { - *notification = userNotify; - userNotify->setNotification( notify ); - err = kIOReturnSuccess; - } else - err = kIOReturnUnsupported; - - sym->release(); - - } while( false ); - - if ((kIOReturnSuccess != err) && userNotify) - { - userNotify->invalidatePort(); - userNotify->release(); - userNotify = 0; - } + if ((kIOReturnSuccess != err) && userNotify) { + userNotify->invalidatePort(); + userNotify->release(); + userNotify = 0; + } - return( err ); + return err; } /* Routine io_service_add_message_notification */ -kern_return_t is_io_service_add_interest_notification( - io_object_t service, - io_name_t type_of_interest, - mach_port_t port, +kern_return_t +is_io_service_add_interest_notification( + io_object_t service, + io_name_t type_of_interest, + mach_port_t port, io_async_ref_t reference, mach_msg_type_number_t referenceCnt, - io_object_t * notification ) + io_object_t * notification ) { - return (internal_io_service_add_interest_notification(service, type_of_interest, - port, &reference[0], sizeof(io_async_ref_t), false, notification)); + io_async_ref_t zreference; + + if (referenceCnt > ASYNC_REF_COUNT) { + return kIOReturnBadArgument; + } + bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0])); + bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0])); + + return internal_io_service_add_interest_notification(service, type_of_interest, + port, &zreference[0], sizeof(io_async_ref_t), false, notification); } /* Routine io_service_add_interest_notification_64 */ -kern_return_t is_io_service_add_interest_notification_64( +kern_return_t +is_io_service_add_interest_notification_64( io_object_t service, io_name_t type_of_interest, mach_port_t wake_port, @@ -2633,137 +2848,157 @@ kern_return_t is_io_service_add_interest_notification_64( mach_msg_type_number_t referenceCnt, io_object_t *notification ) { - return (internal_io_service_add_interest_notification(service, type_of_interest, - wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification)); + io_async_ref64_t zreference; + + if (referenceCnt > ASYNC_REF64_COUNT) { + return kIOReturnBadArgument; + } + bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0])); + bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0])); + + return internal_io_service_add_interest_notification(service, type_of_interest, + wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification); } /* Routine io_service_acknowledge_notification */ -kern_return_t is_io_service_acknowledge_notification( +kern_return_t +is_io_service_acknowledge_notification( io_object_t _service, natural_t notify_ref, natural_t response ) { - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref, - (IOOptionBits) response )); - + return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref, + (IOOptionBits) response ); } /* Routine io_connect_get_semaphore */ -kern_return_t is_io_connect_get_notification_semaphore( +kern_return_t +is_io_connect_get_notification_semaphore( io_connect_t connection, natural_t notification_type, semaphore_t *semaphore ) { - CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connection, client ); - IOStatisticsClientCall(); - return( client->getNotificationSemaphore( (UInt32) notification_type, - semaphore )); + IOStatisticsClientCall(); + return client->getNotificationSemaphore((UInt32) notification_type, + semaphore ); } /* Routine io_registry_get_root_entry */ -kern_return_t is_io_registry_get_root_entry( +kern_return_t +is_io_registry_get_root_entry( mach_port_t master_port, io_object_t *root ) { - IORegistryEntry * entry; + IORegistryEntry * entry; - if( master_port != master_device_port) - return( kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - entry = IORegistryEntry::getRegistryRoot(); - if( entry) - entry->retain(); - *root = entry; + entry = IORegistryEntry::getRegistryRoot(); + if (entry) { + entry->retain(); + } + *root = entry; - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_create_iterator */ -kern_return_t is_io_registry_create_iterator( +kern_return_t +is_io_registry_create_iterator( mach_port_t master_port, io_name_t plane, uint32_t options, io_object_t *iterator ) { - if( master_port != master_device_port) - return( kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - *iterator = IOUserIterator::withIterator( - IORegistryIterator::iterateOver( - IORegistryEntry::getPlane( plane ), options )); + *iterator = IOUserIterator::withIterator( + IORegistryIterator::iterateOver( + IORegistryEntry::getPlane( plane ), options )); - return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); + return *iterator ? kIOReturnSuccess : kIOReturnBadArgument; } /* Routine io_registry_entry_create_iterator */ -kern_return_t is_io_registry_entry_create_iterator( +kern_return_t +is_io_registry_entry_create_iterator( io_object_t registry_entry, io_name_t plane, uint32_t options, io_object_t *iterator ) { - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - *iterator = IOUserIterator::withIterator( - IORegistryIterator::iterateOver( entry, + *iterator = IOUserIterator::withIterator( + IORegistryIterator::iterateOver( entry, IORegistryEntry::getPlane( plane ), options )); - return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument ); + return *iterator ? kIOReturnSuccess : kIOReturnBadArgument; } /* Routine io_registry_iterator_enter */ -kern_return_t is_io_registry_iterator_enter_entry( +kern_return_t +is_io_registry_iterator_enter_entry( io_object_t iterator ) { - CHECKLOCKED( IORegistryIterator, iterator, iter ); + CHECKLOCKED( IORegistryIterator, iterator, iter ); - IOLockLock(oIter->lock); - iter->enterEntry(); - IOLockUnlock(oIter->lock); + IOLockLock(oIter->lock); + iter->enterEntry(); + IOLockUnlock(oIter->lock); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_iterator_exit */ -kern_return_t is_io_registry_iterator_exit_entry( +kern_return_t +is_io_registry_iterator_exit_entry( io_object_t iterator ) { - bool didIt; + bool didIt; - CHECKLOCKED( IORegistryIterator, iterator, iter ); + CHECKLOCKED( IORegistryIterator, iterator, iter ); - IOLockLock(oIter->lock); - didIt = iter->exitEntry(); - IOLockUnlock(oIter->lock); + IOLockLock(oIter->lock); + didIt = iter->exitEntry(); + IOLockUnlock(oIter->lock); - return( didIt ? kIOReturnSuccess : kIOReturnNoDevice ); + return didIt ? kIOReturnSuccess : kIOReturnNoDevice; } /* Routine io_registry_entry_from_path */ -kern_return_t is_io_registry_entry_from_path( +kern_return_t +is_io_registry_entry_from_path( mach_port_t master_port, io_string_t path, io_object_t *registry_entry ) { - IORegistryEntry * entry; + IORegistryEntry * entry; - if( master_port != master_device_port) - return( kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - entry = IORegistryEntry::fromPath( path ); + entry = IORegistryEntry::fromPath( path ); - *registry_entry = entry; + *registry_entry = entry; - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_entry_from_path */ -kern_return_t is_io_registry_entry_from_path_ool( +kern_return_t +is_io_registry_entry_from_path_ool( mach_port_t master_port, io_string_inband_t path, io_buf_ptr_t path_ool, @@ -2771,423 +3006,463 @@ kern_return_t is_io_registry_entry_from_path_ool( kern_return_t *result, io_object_t *registry_entry) { - IORegistryEntry * entry; - vm_map_offset_t map_data; - const char * cpath; - IOReturn res; - kern_return_t err; + IORegistryEntry * entry; + vm_map_offset_t map_data; + const char * cpath; + IOReturn res; + kern_return_t err; - if (master_port != master_device_port) return(kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - map_data = 0; - entry = 0; - res = err = KERN_SUCCESS; - if (path[0]) cpath = path; - else - { - if (!path_oolCnt) return(kIOReturnBadArgument); - if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge); + map_data = 0; + entry = 0; + res = err = KERN_SUCCESS; + if (path[0]) { + cpath = path; + } else { + if (!path_oolCnt) { + return kIOReturnBadArgument; + } + if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) { + return kIOReturnMessageTooLarge; + } - err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool); - if (KERN_SUCCESS == err) - { - // must return success to mig after vm_map_copyout() succeeds, so result is actual - cpath = CAST_DOWN(const char *, map_data); - if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument; + err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool); + if (KERN_SUCCESS == err) { + // must return success to mig after vm_map_copyout() succeeds, so result is actual + cpath = CAST_DOWN(const char *, map_data); + if (cpath[path_oolCnt - 1]) { + res = kIOReturnBadArgument; + } + } } - } - if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) - { - entry = IORegistryEntry::fromPath(cpath); - res = entry ? kIOReturnSuccess : kIOReturnNotFound; - } + if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) { + entry = IORegistryEntry::fromPath(cpath); + res = entry ? kIOReturnSuccess : kIOReturnNotFound; + } - if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt); + if (map_data) { + vm_deallocate(kernel_map, map_data, path_oolCnt); + } - if (KERN_SUCCESS != err) res = err; - *registry_entry = entry; - *result = res; + if (KERN_SUCCESS != err) { + res = err; + } + *registry_entry = entry; + *result = res; - return (err); + return err; } /* Routine io_registry_entry_in_plane */ -kern_return_t is_io_registry_entry_in_plane( +kern_return_t +is_io_registry_entry_in_plane( io_object_t registry_entry, io_name_t plane, boolean_t *inPlane ) { - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane )); + *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane )); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_entry_get_path */ -kern_return_t is_io_registry_entry_get_path( +kern_return_t +is_io_registry_entry_get_path( io_object_t registry_entry, io_name_t plane, io_string_t path ) { - int length; - CHECK( IORegistryEntry, registry_entry, entry ); + int length; + CHECK( IORegistryEntry, registry_entry, entry ); - length = sizeof( io_string_t); - if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) - return( kIOReturnSuccess ); - else - return( kIOReturnBadArgument ); + length = sizeof(io_string_t); + if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) { + return kIOReturnSuccess; + } else { + return kIOReturnBadArgument; + } } /* Routine io_registry_entry_get_path */ -kern_return_t is_io_registry_entry_get_path_ool( +kern_return_t +is_io_registry_entry_get_path_ool( io_object_t registry_entry, io_name_t plane, io_string_inband_t path, io_buf_ptr_t *path_ool, mach_msg_type_number_t *path_oolCnt) { - enum { kMaxPath = 16384 }; - IOReturn err; - int length; - char * buf; + enum { kMaxPath = 16384 }; + IOReturn err; + int length; + char * buf; - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - *path_ool = NULL; - *path_oolCnt = 0; - length = sizeof(io_string_inband_t); - if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess; - else - { - length = kMaxPath; - buf = IONew(char, length); - if (!buf) err = kIOReturnNoMemory; - else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError; - else - { - *path_oolCnt = length; - err = copyoutkdata(buf, length, path_ool); + *path_ool = NULL; + *path_oolCnt = 0; + length = sizeof(io_string_inband_t); + if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) { + err = kIOReturnSuccess; + } else { + length = kMaxPath; + buf = IONew(char, length); + if (!buf) { + err = kIOReturnNoMemory; + } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) { + err = kIOReturnError; + } else { + *path_oolCnt = length; + err = copyoutkdata(buf, length, path_ool); + } + if (buf) { + IODelete(buf, char, kMaxPath); + } } - if (buf) IODelete(buf, char, kMaxPath); - } - return (err); + return err; } /* Routine io_registry_entry_get_name */ -kern_return_t is_io_registry_entry_get_name( +kern_return_t +is_io_registry_entry_get_name( io_object_t registry_entry, io_name_t name ) { - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - strncpy( name, entry->getName(), sizeof( io_name_t)); + strncpy( name, entry->getName(), sizeof(io_name_t)); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_entry_get_name_in_plane */ -kern_return_t is_io_registry_entry_get_name_in_plane( +kern_return_t +is_io_registry_entry_get_name_in_plane( io_object_t registry_entry, io_name_t planeName, io_name_t name ) { - const IORegistryPlane * plane; - CHECK( IORegistryEntry, registry_entry, entry ); + const IORegistryPlane * plane; + CHECK( IORegistryEntry, registry_entry, entry ); - if( planeName[0]) - plane = IORegistryEntry::getPlane( planeName ); - else - plane = 0; + if (planeName[0]) { + plane = IORegistryEntry::getPlane( planeName ); + } else { + plane = 0; + } - strncpy( name, entry->getName( plane), sizeof( io_name_t)); + strncpy( name, entry->getName( plane), sizeof(io_name_t)); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_entry_get_location_in_plane */ -kern_return_t is_io_registry_entry_get_location_in_plane( +kern_return_t +is_io_registry_entry_get_location_in_plane( io_object_t registry_entry, io_name_t planeName, io_name_t location ) { - const IORegistryPlane * plane; - CHECK( IORegistryEntry, registry_entry, entry ); + const IORegistryPlane * plane; + CHECK( IORegistryEntry, registry_entry, entry ); - if( planeName[0]) - plane = IORegistryEntry::getPlane( planeName ); - else - plane = 0; + if (planeName[0]) { + plane = IORegistryEntry::getPlane( planeName ); + } else { + plane = 0; + } - const char * cstr = entry->getLocation( plane ); + const char * cstr = entry->getLocation( plane ); - if( cstr) { - strncpy( location, cstr, sizeof( io_name_t)); - return( kIOReturnSuccess ); - } else - return( kIOReturnNotFound ); + if (cstr) { + strncpy( location, cstr, sizeof(io_name_t)); + return kIOReturnSuccess; + } else { + return kIOReturnNotFound; + } } /* Routine io_registry_entry_get_registry_entry_id */ -kern_return_t is_io_registry_entry_get_registry_entry_id( +kern_return_t +is_io_registry_entry_get_registry_entry_id( io_object_t registry_entry, uint64_t *entry_id ) { - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - *entry_id = entry->getRegistryEntryID(); + *entry_id = entry->getRegistryEntryID(); - return (kIOReturnSuccess); + return kIOReturnSuccess; } /* Routine io_registry_entry_get_property */ -kern_return_t is_io_registry_entry_get_property_bytes( +kern_return_t +is_io_registry_entry_get_property_bytes( io_object_t registry_entry, io_name_t property_name, io_struct_inband_t buf, mach_msg_type_number_t *dataCnt ) { - OSObject * obj; - OSData * data; - OSString * str; - OSBoolean * boo; - OSNumber * off; - UInt64 offsetBytes; - unsigned int len = 0; - const void * bytes = 0; - IOReturn ret = kIOReturnSuccess; + OSObject * obj; + OSData * data; + OSString * str; + OSBoolean * boo; + OSNumber * off; + UInt64 offsetBytes; + unsigned int len = 0; + const void * bytes = 0; + IOReturn ret = kIOReturnSuccess; - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); #if CONFIG_MACF - if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) - return kIOReturnNotPermitted; + if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) { + return kIOReturnNotPermitted; + } #endif - obj = entry->copyProperty(property_name); - if( !obj) - return( kIOReturnNoResources ); - - // One day OSData will be a common container base class - // until then... - if( (data = OSDynamicCast( OSData, obj ))) { - len = data->getLength(); - bytes = data->getBytesNoCopy(); - if (!data->isSerializable()) len = 0; - - } else if( (str = OSDynamicCast( OSString, obj ))) { - len = str->getLength() + 1; - bytes = str->getCStringNoCopy(); - - } else if( (boo = OSDynamicCast( OSBoolean, obj ))) { - len = boo->isTrue() ? sizeof("Yes") : sizeof("No"); - bytes = boo->isTrue() ? "Yes" : "No"; - - } else if( (off = OSDynamicCast( OSNumber, obj ))) { - offsetBytes = off->unsigned64BitValue(); - len = off->numberOfBytes(); - if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes); - bytes = &offsetBytes; + obj = entry->copyProperty(property_name); + if (!obj) { + return kIOReturnNoResources; + } + + // One day OSData will be a common container base class + // until then... + if ((data = OSDynamicCast( OSData, obj ))) { + len = data->getLength(); + bytes = data->getBytesNoCopy(); + if (!data->isSerializable()) { + len = 0; + } + } else if ((str = OSDynamicCast( OSString, obj ))) { + len = str->getLength() + 1; + bytes = str->getCStringNoCopy(); + } else if ((boo = OSDynamicCast( OSBoolean, obj ))) { + len = boo->isTrue() ? sizeof("Yes") : sizeof("No"); + bytes = boo->isTrue() ? "Yes" : "No"; + } else if ((off = OSDynamicCast( OSNumber, obj ))) { + offsetBytes = off->unsigned64BitValue(); + len = off->numberOfBytes(); + if (len > sizeof(offsetBytes)) { + len = sizeof(offsetBytes); + } + bytes = &offsetBytes; #ifdef __BIG_ENDIAN__ - bytes = (const void *) - (((UInt32) bytes) + (sizeof( UInt64) - len)); + bytes = (const void *) + (((UInt32) bytes) + (sizeof(UInt64) - len)); #endif + } else { + ret = kIOReturnBadArgument; + } - } else - ret = kIOReturnBadArgument; - - if( bytes) { - if( *dataCnt < len) - ret = kIOReturnIPCError; - else { - *dataCnt = len; - bcopy( bytes, buf, len ); + if (bytes) { + if (*dataCnt < len) { + ret = kIOReturnIPCError; + } else { + *dataCnt = len; + bcopy( bytes, buf, len ); + } } - } - obj->release(); + obj->release(); - return( ret ); + return ret; } /* Routine io_registry_entry_get_property */ -kern_return_t is_io_registry_entry_get_property( +kern_return_t +is_io_registry_entry_get_property( io_object_t registry_entry, io_name_t property_name, io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt ) { - kern_return_t err; - vm_size_t len; - OSObject * obj; + kern_return_t err; + vm_size_t len; + OSObject * obj; - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); #if CONFIG_MACF - if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) - return kIOReturnNotPermitted; + if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) { + return kIOReturnNotPermitted; + } #endif - obj = entry->copyProperty(property_name); - if( !obj) - return( kIOReturnNotFound ); - - OSSerialize * s = OSSerialize::withCapacity(4096); - if( !s) { - obj->release(); - return( kIOReturnNoMemory ); - } + obj = entry->copyProperty(property_name); + if (!obj) { + return kIOReturnNotFound; + } - if( obj->serialize( s )) { - len = s->getLength(); - *propertiesCnt = len; - err = copyoutkdata( s->text(), len, properties ); + OSSerialize * s = OSSerialize::withCapacity(4096); + if (!s) { + obj->release(); + return kIOReturnNoMemory; + } - } else - err = kIOReturnUnsupported; + if (obj->serialize( s )) { + len = s->getLength(); + *propertiesCnt = len; + err = copyoutkdata( s->text(), len, properties ); + } else { + err = kIOReturnUnsupported; + } - s->release(); - obj->release(); + s->release(); + obj->release(); - return( err ); + return err; } /* Routine io_registry_entry_get_property_recursively */ -kern_return_t is_io_registry_entry_get_property_recursively( +kern_return_t +is_io_registry_entry_get_property_recursively( io_object_t registry_entry, io_name_t plane, io_name_t property_name, - uint32_t options, + uint32_t options, io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt ) { - kern_return_t err; - vm_size_t len; - OSObject * obj; + kern_return_t err; + vm_size_t len; + OSObject * obj; - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); #if CONFIG_MACF - if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) - return kIOReturnNotPermitted; + if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) { + return kIOReturnNotPermitted; + } #endif - obj = entry->copyProperty( property_name, - IORegistryEntry::getPlane( plane ), options ); - if( !obj) - return( kIOReturnNotFound ); - - OSSerialize * s = OSSerialize::withCapacity(4096); - if( !s) { - obj->release(); - return( kIOReturnNoMemory ); - } + obj = entry->copyProperty( property_name, + IORegistryEntry::getPlane( plane ), options ); + if (!obj) { + return kIOReturnNotFound; + } - if( obj->serialize( s )) { - len = s->getLength(); - *propertiesCnt = len; - err = copyoutkdata( s->text(), len, properties ); + OSSerialize * s = OSSerialize::withCapacity(4096); + if (!s) { + obj->release(); + return kIOReturnNoMemory; + } - } else - err = kIOReturnUnsupported; + if (obj->serialize( s )) { + len = s->getLength(); + *propertiesCnt = len; + err = copyoutkdata( s->text(), len, properties ); + } else { + err = kIOReturnUnsupported; + } - s->release(); - obj->release(); + s->release(); + obj->release(); - return( err ); + return err; } /* Routine io_registry_entry_get_properties */ -kern_return_t is_io_registry_entry_get_properties( +kern_return_t +is_io_registry_entry_get_properties( io_object_t registry_entry, io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt ) { - return (kIOReturnUnsupported); + return kIOReturnUnsupported; } #if CONFIG_MACF -struct GetPropertiesEditorRef -{ - kauth_cred_t cred; - IORegistryEntry * entry; - OSCollection * root; +struct GetPropertiesEditorRef { + kauth_cred_t cred; + IORegistryEntry * entry; + OSCollection * root; }; static const OSMetaClassBase * GetPropertiesEditor(void * reference, - OSSerialize * s, - OSCollection * container, - const OSSymbol * name, - const OSMetaClassBase * value) + OSSerialize * s, + OSCollection * container, + const OSSymbol * name, + const OSMetaClassBase * value) { - GetPropertiesEditorRef * ref = (typeof(ref)) reference; + GetPropertiesEditorRef * ref = (typeof(ref))reference; - if (!ref->root) ref->root = container; - if (ref->root == container) - { - if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) - { - value = 0; - } - } - if (value) value->retain(); - return (value); + if (!ref->root) { + ref->root = container; + } + if (ref->root == container) { + if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) { + value = 0; + } + } + if (value) { + value->retain(); + } + return value; } #endif /* CONFIG_MACF */ /* Routine io_registry_entry_get_properties */ -kern_return_t is_io_registry_entry_get_properties_bin( +kern_return_t +is_io_registry_entry_get_properties_bin( io_object_t registry_entry, io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt) { - kern_return_t err = kIOReturnSuccess; - vm_size_t len; - OSSerialize * s; - OSSerialize::Editor editor = 0; - void * editRef = 0; + kern_return_t err = kIOReturnSuccess; + vm_size_t len; + OSSerialize * s; + OSSerialize::Editor editor = 0; + void * editRef = 0; - CHECK(IORegistryEntry, registry_entry, entry); + CHECK(IORegistryEntry, registry_entry, entry); #if CONFIG_MACF - GetPropertiesEditorRef ref; - if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) - { - editor = &GetPropertiesEditor; - editRef = &ref; - ref.cred = kauth_cred_get(); - ref.entry = entry; - ref.root = 0; - } + GetPropertiesEditorRef ref; + if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) { + editor = &GetPropertiesEditor; + editRef = &ref; + ref.cred = kauth_cred_get(); + ref.entry = entry; + ref.root = 0; + } #endif - s = OSSerialize::binaryWithCapacity(4096, editor, editRef); - if (!s) return (kIOReturnNoMemory); + s = OSSerialize::binaryWithCapacity(4096, editor, editRef); + if (!s) { + return kIOReturnNoMemory; + } - if (!entry->serializeProperties(s)) err = kIOReturnUnsupported; + if (!entry->serializeProperties(s)) { + err = kIOReturnUnsupported; + } - if (kIOReturnSuccess == err) - { - len = s->getLength(); - *propertiesCnt = len; - err = copyoutkdata(s->text(), len, properties); - } - s->release(); + if (kIOReturnSuccess == err) { + len = s->getLength(); + *propertiesCnt = len; + err = copyoutkdata(s->text(), len, properties); + } + s->release(); - return (err); + return err; } /* Routine io_registry_entry_get_property_bin */ -kern_return_t is_io_registry_entry_get_property_bin( +kern_return_t +is_io_registry_entry_get_property_bin( io_object_t registry_entry, io_name_t plane, io_name_t property_name, @@ -3195,432 +3470,457 @@ kern_return_t is_io_registry_entry_get_property_bin( io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt ) { - kern_return_t err; - vm_size_t len; - OSObject * obj; - const OSSymbol * sym; + kern_return_t err; + vm_size_t len; + OSObject * obj; + const OSSymbol * sym; - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); #if CONFIG_MACF - if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) - return kIOReturnNotPermitted; + if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) { + return kIOReturnNotPermitted; + } #endif - sym = OSSymbol::withCString(property_name); - if (!sym) return (kIOReturnNoMemory); - - if (gIORegistryEntryPropertyKeysKey == sym) - { - obj = entry->copyPropertyKeys(); - } - else - { - if ((kIORegistryIterateRecursively & options) && plane[0]) - { - obj = entry->copyProperty(property_name, - IORegistryEntry::getPlane(plane), options ); - } - else - { - obj = entry->copyProperty(property_name); - } - if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym); - } + sym = OSSymbol::withCString(property_name); + if (!sym) { + return kIOReturnNoMemory; + } - sym->release(); - if (!obj) return (kIOReturnNotFound); + if (gIORegistryEntryPropertyKeysKey == sym) { + obj = entry->copyPropertyKeys(); + } else { + if ((kIORegistryIterateRecursively & options) && plane[0]) { + obj = entry->copyProperty(property_name, + IORegistryEntry::getPlane(plane), options ); + } else { + obj = entry->copyProperty(property_name); + } + if (obj && gIORemoveOnReadProperties->containsObject(sym)) { + entry->removeProperty(sym); + } + } - OSSerialize * s = OSSerialize::binaryWithCapacity(4096); - if( !s) { - obj->release(); - return( kIOReturnNoMemory ); - } + sym->release(); + if (!obj) { + return kIOReturnNotFound; + } - if( obj->serialize( s )) { - len = s->getLength(); - *propertiesCnt = len; - err = copyoutkdata( s->text(), len, properties ); + OSSerialize * s = OSSerialize::binaryWithCapacity(4096); + if (!s) { + obj->release(); + return kIOReturnNoMemory; + } - } else err = kIOReturnUnsupported; + if (obj->serialize( s )) { + len = s->getLength(); + *propertiesCnt = len; + err = copyoutkdata( s->text(), len, properties ); + } else { + err = kIOReturnUnsupported; + } - s->release(); - obj->release(); + s->release(); + obj->release(); - return( err ); + return err; } /* Routine io_registry_entry_set_properties */ -kern_return_t is_io_registry_entry_set_properties +kern_return_t +is_io_registry_entry_set_properties ( io_object_t registry_entry, io_buf_ptr_t properties, mach_msg_type_number_t propertiesCnt, - kern_return_t * result) + kern_return_t * result) { - OSObject * obj; - kern_return_t err; - IOReturn res; - vm_offset_t data; - vm_map_offset_t map_data; - - CHECK( IORegistryEntry, registry_entry, entry ); + OSObject * obj; + kern_return_t err; + IOReturn res; + vm_offset_t data; + vm_map_offset_t map_data; - if( propertiesCnt > sizeof(io_struct_inband_t) * 1024) - return( kIOReturnMessageTooLarge); + CHECK( IORegistryEntry, registry_entry, entry ); - err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); - data = CAST_DOWN(vm_offset_t, map_data); + if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) { + return kIOReturnMessageTooLarge; + } - if( KERN_SUCCESS == err) { + err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); + data = CAST_DOWN(vm_offset_t, map_data); - FAKE_STACK_FRAME(entry->getMetaClass()); + if (KERN_SUCCESS == err) { + FAKE_STACK_FRAME(entry->getMetaClass()); - // must return success after vm_map_copyout() succeeds - obj = OSUnserializeXML( (const char *) data, propertiesCnt ); - vm_deallocate( kernel_map, data, propertiesCnt ); + // must return success after vm_map_copyout() succeeds + obj = OSUnserializeXML((const char *) data, propertiesCnt ); + vm_deallocate( kernel_map, data, propertiesCnt ); - if (!obj) - res = kIOReturnBadArgument; + if (!obj) { + res = kIOReturnBadArgument; + } #if CONFIG_MACF - else if (0 != mac_iokit_check_set_properties(kauth_cred_get(), - registry_entry, obj)) - { - res = kIOReturnNotPermitted; - } + else if (0 != mac_iokit_check_set_properties(kauth_cred_get(), + registry_entry, obj)) { + res = kIOReturnNotPermitted; + } #endif - else - { - res = entry->setProperties( obj ); - } - - if (obj) - obj->release(); + else { + res = entry->setProperties( obj ); + } - FAKE_STACK_FRAME_END(); + if (obj) { + obj->release(); + } - } else - res = err; + FAKE_STACK_FRAME_END(); + } else { + res = err; + } - *result = res; - return( err ); + *result = res; + return err; } /* Routine io_registry_entry_get_child_iterator */ -kern_return_t is_io_registry_entry_get_child_iterator( +kern_return_t +is_io_registry_entry_get_child_iterator( io_object_t registry_entry, io_name_t plane, io_object_t *iterator ) { - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - *iterator = IOUserIterator::withIterator(entry->getChildIterator( - IORegistryEntry::getPlane( plane ))); + *iterator = IOUserIterator::withIterator(entry->getChildIterator( + IORegistryEntry::getPlane( plane ))); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_registry_entry_get_parent_iterator */ -kern_return_t is_io_registry_entry_get_parent_iterator( +kern_return_t +is_io_registry_entry_get_parent_iterator( io_object_t registry_entry, io_name_t plane, io_object_t *iterator) { - CHECK( IORegistryEntry, registry_entry, entry ); + CHECK( IORegistryEntry, registry_entry, entry ); - *iterator = IOUserIterator::withIterator(entry->getParentIterator( - IORegistryEntry::getPlane( plane ))); + *iterator = IOUserIterator::withIterator(entry->getParentIterator( + IORegistryEntry::getPlane( plane ))); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_service_get_busy_state */ -kern_return_t is_io_service_get_busy_state( +kern_return_t +is_io_service_get_busy_state( io_object_t _service, uint32_t *busyState ) { - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - *busyState = service->getBusyState(); + *busyState = service->getBusyState(); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_service_get_state */ -kern_return_t is_io_service_get_state( +kern_return_t +is_io_service_get_state( io_object_t _service, uint64_t *state, uint32_t *busy_state, uint64_t *accumulated_busy_time ) { - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - *state = service->getState(); - *busy_state = service->getBusyState(); - *accumulated_busy_time = service->getAccumulatedBusyTime(); + *state = service->getState(); + *busy_state = service->getBusyState(); + *accumulated_busy_time = service->getAccumulatedBusyTime(); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_service_wait_quiet */ -kern_return_t is_io_service_wait_quiet( +kern_return_t +is_io_service_wait_quiet( io_object_t _service, mach_timespec_t wait_time ) { - uint64_t timeoutNS; - - CHECK( IOService, _service, service ); + uint64_t timeoutNS; - timeoutNS = wait_time.tv_sec; - timeoutNS *= kSecondScale; - timeoutNS += wait_time.tv_nsec; - - return( service->waitQuiet(timeoutNS) ); + CHECK( IOService, _service, service ); + + timeoutNS = wait_time.tv_sec; + timeoutNS *= kSecondScale; + timeoutNS += wait_time.tv_nsec; + + return service->waitQuiet(timeoutNS); } /* Routine io_service_request_probe */ -kern_return_t is_io_service_request_probe( +kern_return_t +is_io_service_request_probe( io_object_t _service, uint32_t options ) { - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - return( service->requestProbe( options )); + return service->requestProbe( options ); } /* Routine io_service_get_authorization_id */ -kern_return_t is_io_service_get_authorization_id( +kern_return_t +is_io_service_get_authorization_id( io_object_t _service, uint64_t *authorization_id ) { - kern_return_t kr; + kern_return_t kr; - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - kr = IOUserClient::clientHasPrivilege( (void *) current_task(), - kIOClientPrivilegeAdministrator ); - if( kIOReturnSuccess != kr) - return( kr ); + kr = IOUserClient::clientHasPrivilege((void *) current_task(), + kIOClientPrivilegeAdministrator ); + if (kIOReturnSuccess != kr) { + return kr; + } - *authorization_id = service->getAuthorizationID(); + *authorization_id = service->getAuthorizationID(); - return( kr ); + return kr; } /* Routine io_service_set_authorization_id */ -kern_return_t is_io_service_set_authorization_id( +kern_return_t +is_io_service_set_authorization_id( io_object_t _service, uint64_t authorization_id ) { - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - return( service->setAuthorizationID( authorization_id ) ); + return service->setAuthorizationID( authorization_id ); } /* Routine io_service_open_ndr */ -kern_return_t is_io_service_open_extended( +kern_return_t +is_io_service_open_extended( io_object_t _service, task_t owningTask, uint32_t connect_type, NDR_record_t ndr, io_buf_ptr_t properties, mach_msg_type_number_t propertiesCnt, - kern_return_t * result, + kern_return_t * result, io_object_t *connection ) { - IOUserClient * client = 0; - kern_return_t err = KERN_SUCCESS; - IOReturn res = kIOReturnSuccess; - OSDictionary * propertiesDict = 0; - bool crossEndian; - bool disallowAccess; + IOUserClient * client = 0; + kern_return_t err = KERN_SUCCESS; + IOReturn res = kIOReturnSuccess; + OSDictionary * propertiesDict = 0; + bool crossEndian; + bool disallowAccess; - CHECK( IOService, _service, service ); + CHECK( IOService, _service, service ); - if (!owningTask) return (kIOReturnBadArgument); - assert(owningTask == current_task()); - if (owningTask != current_task()) return (kIOReturnBadArgument); + if (!owningTask) { + return kIOReturnBadArgument; + } + assert(owningTask == current_task()); + if (owningTask != current_task()) { + return kIOReturnBadArgument; + } - do - { - if (properties) return (kIOReturnUnsupported); + do{ + if (properties) { + return kIOReturnUnsupported; + } #if 0 - { - OSObject * obj; - vm_offset_t data; - vm_map_offset_t map_data; - - if( propertiesCnt > sizeof(io_struct_inband_t)) - return( kIOReturnMessageTooLarge); - - err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); - res = err; - data = CAST_DOWN(vm_offset_t, map_data); - if (KERN_SUCCESS == err) - { - // must return success after vm_map_copyout() succeeds - obj = OSUnserializeXML( (const char *) data, propertiesCnt ); - vm_deallocate( kernel_map, data, propertiesCnt ); - propertiesDict = OSDynamicCast(OSDictionary, obj); - if (!propertiesDict) { - res = kIOReturnBadArgument; - if (obj) - obj->release(); + OSObject * obj; + vm_offset_t data; + vm_map_offset_t map_data; + + if (propertiesCnt > sizeof(io_struct_inband_t)) { + return kIOReturnMessageTooLarge; + } + + err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); + res = err; + data = CAST_DOWN(vm_offset_t, map_data); + if (KERN_SUCCESS == err) { + // must return success after vm_map_copyout() succeeds + obj = OSUnserializeXML((const char *) data, propertiesCnt ); + vm_deallocate( kernel_map, data, propertiesCnt ); + propertiesDict = OSDynamicCast(OSDictionary, obj); + if (!propertiesDict) { + res = kIOReturnBadArgument; + if (obj) { + obj->release(); + } + } + } + if (kIOReturnSuccess != res) { + break; + } } - } - if (kIOReturnSuccess != res) - break; - } #endif - crossEndian = (ndr.int_rep != NDR_record.int_rep); - if (crossEndian) - { - if (!propertiesDict) - propertiesDict = OSDictionary::withCapacity(4); - OSData * data = OSData::withBytes(&ndr, sizeof(ndr)); - if (data) - { - if (propertiesDict) - propertiesDict->setObject(kIOUserClientCrossEndianKey, data); - data->release(); - } - } - - res = service->newUserClient( owningTask, (void *) owningTask, + crossEndian = (ndr.int_rep != NDR_record.int_rep); + if (crossEndian) { + if (!propertiesDict) { + propertiesDict = OSDictionary::withCapacity(4); + } + OSData * data = OSData::withBytes(&ndr, sizeof(ndr)); + if (data) { + if (propertiesDict) { + propertiesDict->setObject(kIOUserClientCrossEndianKey, data); + } + data->release(); + } + } + + res = service->newUserClient( owningTask, (void *) owningTask, connect_type, propertiesDict, &client ); - if (propertiesDict) - propertiesDict->release(); + if (propertiesDict) { + propertiesDict->release(); + } - if (res == kIOReturnSuccess) - { - assert( OSDynamicCast(IOUserClient, client) ); + if (res == kIOReturnSuccess) { + assert( OSDynamicCast(IOUserClient, client)); - client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey)); - client->closed = false; - client->lock = IOLockAlloc(); + client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey)); + client->closed = false; + client->lock = IOLockAlloc(); - disallowAccess = (crossEndian - && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey)) - && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey))); - if (disallowAccess) res = kIOReturnUnsupported; + disallowAccess = (crossEndian + && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey)) + && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey))); + if (disallowAccess) { + res = kIOReturnUnsupported; + } #if CONFIG_MACF - else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) - res = kIOReturnNotPermitted; + else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) { + res = kIOReturnNotPermitted; + } #endif - if (kIOReturnSuccess == res) res = client->registerOwner(owningTask); - - if (kIOReturnSuccess != res) - { - IOStatisticsClientCall(); - client->clientClose(); - client->release(); - client = 0; - break; - } - OSString * creatorName = IOCopyLogNameForPID(proc_selfpid()); - if (creatorName) - { - client->setProperty(kIOUserClientCreatorKey, creatorName); - creatorName->release(); - } - client->setTerminateDefer(service, false); - } - } - while (false); + if (kIOReturnSuccess == res) { + res = client->registerOwner(owningTask); + } + + if (kIOReturnSuccess != res) { + IOStatisticsClientCall(); + client->clientClose(); + client->release(); + client = 0; + break; + } + OSString * creatorName = IOCopyLogNameForPID(proc_selfpid()); + if (creatorName) { + client->setProperty(kIOUserClientCreatorKey, creatorName); + creatorName->release(); + } + client->setTerminateDefer(service, false); + } + }while (false); - *connection = client; - *result = res; + *connection = client; + *result = res; - return (err); + return err; } /* Routine io_service_close */ -kern_return_t is_io_service_close( +kern_return_t +is_io_service_close( io_object_t connection ) { - OSSet * mappings; - if ((mappings = OSDynamicCast(OSSet, connection))) - return( kIOReturnSuccess ); + OSSet * mappings; + if ((mappings = OSDynamicCast(OSSet, connection))) { + return kIOReturnSuccess; + } - CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connection, client ); - IOStatisticsClientCall(); + IOStatisticsClientCall(); - if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) - { - IOLockLock(client->lock); - client->clientClose(); - IOLockUnlock(client->lock); - } - else - { - IOLog("ignored is_io_service_close(0x%qx,%s)\n", - client->getRegistryEntryID(), client->getName()); - } + if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) { + IOLockLock(client->lock); + client->clientClose(); + IOLockUnlock(client->lock); + } else { + IOLog("ignored is_io_service_close(0x%qx,%s)\n", + client->getRegistryEntryID(), client->getName()); + } - return( kIOReturnSuccess ); + return kIOReturnSuccess; } /* Routine io_connect_get_service */ -kern_return_t is_io_connect_get_service( +kern_return_t +is_io_connect_get_service( io_object_t connection, io_object_t *service ) { - IOService * theService; + IOService * theService; - CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connection, client ); - theService = client->getService(); - if( theService) - theService->retain(); + theService = client->getService(); + if (theService) { + theService->retain(); + } - *service = theService; + *service = theService; - return( theService ? kIOReturnSuccess : kIOReturnUnsupported ); + return theService ? kIOReturnSuccess : kIOReturnUnsupported; } /* Routine io_connect_set_notification_port */ -kern_return_t is_io_connect_set_notification_port( +kern_return_t +is_io_connect_set_notification_port( io_object_t connection, uint32_t notification_type, mach_port_t port, uint32_t reference) { - kern_return_t ret; - CHECK( IOUserClient, connection, client ); + kern_return_t ret; + CHECK( IOUserClient, connection, client ); - IOStatisticsClientCall(); - IOLockLock(client->lock); - ret = client->registerNotificationPort( port, notification_type, - (io_user_reference_t) reference ); - IOLockUnlock(client->lock); - return (ret); + IOStatisticsClientCall(); + IOLockLock(client->lock); + ret = client->registerNotificationPort( port, notification_type, + (io_user_reference_t) reference ); + IOLockUnlock(client->lock); + return ret; } /* Routine io_connect_set_notification_port */ -kern_return_t is_io_connect_set_notification_port_64( +kern_return_t +is_io_connect_set_notification_port_64( io_object_t connection, uint32_t notification_type, mach_port_t port, io_user_reference_t reference) { - kern_return_t ret; - CHECK( IOUserClient, connection, client ); + kern_return_t ret; + CHECK( IOUserClient, connection, client ); - IOStatisticsClientCall(); - IOLockLock(client->lock); - ret = client->registerNotificationPort( port, notification_type, - reference ); - IOLockUnlock(client->lock); - return (ret); + IOStatisticsClientCall(); + IOLockLock(client->lock); + ret = client->registerNotificationPort( port, notification_type, + reference ); + IOLockUnlock(client->lock); + return ret; } /* Routine io_connect_map_memory_into_task */ -kern_return_t is_io_connect_map_memory_into_task +kern_return_t +is_io_connect_map_memory_into_task ( io_connect_t connection, uint32_t memory_type, @@ -3630,204 +3930,210 @@ kern_return_t is_io_connect_map_memory_into_task uint32_t flags ) { - IOReturn err; - IOMemoryMap * map; - - CHECK( IOUserClient, connection, client ); - - if (!into_task) return (kIOReturnBadArgument); - - IOStatisticsClientCall(); - map = client->mapClientMemory64( memory_type, into_task, flags, *address ); - - if( map) { - *address = map->getAddress(); - if( size) - *size = map->getSize(); - - if( client->sharedInstance - || (into_task != current_task())) { - // push a name out to the task owning the map, - // so we can clean up maps - mach_port_name_t name __unused = - IOMachPort::makeSendRightForTask( - into_task, map, IKOT_IOKIT_OBJECT ); - map->release(); - - } else { - // keep it with the user client - IOLockLock( gIOObjectPortLock); - if( 0 == client->mappings) - client->mappings = OSSet::withCapacity(2); - if( client->mappings) - client->mappings->setObject( map); - IOLockUnlock( gIOObjectPortLock); - map->release(); - } - err = kIOReturnSuccess; - - } else - err = kIOReturnBadArgument; + IOReturn err; + IOMemoryMap * map; + + CHECK( IOUserClient, connection, client ); + + if (!into_task) { + return kIOReturnBadArgument; + } - return( err ); + IOStatisticsClientCall(); + map = client->mapClientMemory64( memory_type, into_task, flags, *address ); + + if (map) { + *address = map->getAddress(); + if (size) { + *size = map->getSize(); + } + + if (client->sharedInstance + || (into_task != current_task())) { + // push a name out to the task owning the map, + // so we can clean up maps + mach_port_name_t name __unused = + IOMachPort::makeSendRightForTask( + into_task, map, IKOT_IOKIT_OBJECT ); + map->release(); + } else { + // keep it with the user client + IOLockLock( gIOObjectPortLock); + if (0 == client->mappings) { + client->mappings = OSSet::withCapacity(2); + } + if (client->mappings) { + client->mappings->setObject( map); + } + IOLockUnlock( gIOObjectPortLock); + map->release(); + } + err = kIOReturnSuccess; + } else { + err = kIOReturnBadArgument; + } + + return err; } /* Routine is_io_connect_map_memory */ -kern_return_t is_io_connect_map_memory( +kern_return_t +is_io_connect_map_memory( io_object_t connect, - uint32_t type, - task_t task, - uint32_t * mapAddr, - uint32_t * mapSize, - uint32_t flags ) + uint32_t type, + task_t task, + uint32_t * mapAddr, + uint32_t * mapSize, + uint32_t flags ) { - IOReturn err; - mach_vm_address_t address; - mach_vm_size_t size; + IOReturn err; + mach_vm_address_t address; + mach_vm_size_t size; - address = SCALAR64(*mapAddr); - size = SCALAR64(*mapSize); + address = SCALAR64(*mapAddr); + size = SCALAR64(*mapSize); - err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags); + err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags); - *mapAddr = SCALAR32(address); - *mapSize = SCALAR32(size); + *mapAddr = SCALAR32(address); + *mapSize = SCALAR32(size); - return (err); + return err; } - } /* extern "C" */ -IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem) +IOMemoryMap * +IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem) { - OSIterator * iter; - IOMemoryMap * map = 0; + OSIterator * iter; + IOMemoryMap * map = 0; - IOLockLock(gIOObjectPortLock); + IOLockLock(gIOObjectPortLock); - iter = OSCollectionIterator::withCollection(mappings); - if(iter) - { - while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) - { - if(mem == map->getMemoryDescriptor()) - { - map->retain(); - mappings->removeObject(map); - break; - } - } - iter->release(); - } + iter = OSCollectionIterator::withCollection(mappings); + if (iter) { + while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) { + if (mem == map->getMemoryDescriptor()) { + map->retain(); + mappings->removeObject(map); + break; + } + } + iter->release(); + } - IOLockUnlock(gIOObjectPortLock); + IOLockUnlock(gIOObjectPortLock); - return (map); + return map; } extern "C" { - /* Routine io_connect_unmap_memory_from_task */ -kern_return_t is_io_connect_unmap_memory_from_task +kern_return_t +is_io_connect_unmap_memory_from_task ( io_connect_t connection, uint32_t memory_type, task_t from_task, mach_vm_address_t address) { - IOReturn err; - IOOptionBits options = 0; - IOMemoryDescriptor * memory = 0; - IOMemoryMap * map; - - CHECK( IOUserClient, connection, client ); - - if (!from_task) return (kIOReturnBadArgument); - - IOStatisticsClientCall(); - err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory ); + IOReturn err; + IOOptionBits options = 0; + IOMemoryDescriptor * memory = 0; + IOMemoryMap * map; - if( memory && (kIOReturnSuccess == err)) { + CHECK( IOUserClient, connection, client ); - options = (options & ~kIOMapUserOptionsMask) - | kIOMapAnywhere | kIOMapReference; - - map = memory->createMappingInTask( from_task, address, options ); - memory->release(); - if( map) - { - IOLockLock( gIOObjectPortLock); - if( client->mappings) - client->mappings->removeObject( map); - IOLockUnlock( gIOObjectPortLock); - - mach_port_name_t name = 0; - if (from_task != current_task()) - { - name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT ); - map->release(); - } + if (!from_task) { + return kIOReturnBadArgument; + } - if (name) - { - map->userClientUnmap(); - err = iokit_mod_send_right( from_task, name, -2 ); - err = kIOReturnSuccess; - } - else - IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); - if (from_task == current_task()) - map->release(); - } - else - err = kIOReturnBadArgument; - } + IOStatisticsClientCall(); + err = client->clientMemoryForType((UInt32) memory_type, &options, &memory ); + + if (memory && (kIOReturnSuccess == err)) { + options = (options & ~kIOMapUserOptionsMask) + | kIOMapAnywhere | kIOMapReference; + + map = memory->createMappingInTask( from_task, address, options ); + memory->release(); + if (map) { + IOLockLock( gIOObjectPortLock); + if (client->mappings) { + client->mappings->removeObject( map); + } + IOLockUnlock( gIOObjectPortLock); + + mach_port_name_t name = 0; + if (from_task != current_task()) { + name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT ); + map->release(); + } + + if (name) { + map->userClientUnmap(); + err = iokit_mod_send_right( from_task, name, -2 ); + err = kIOReturnSuccess; + } else { + IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); + } + if (from_task == current_task()) { + map->release(); + } + } else { + err = kIOReturnBadArgument; + } + } - return( err ); + return err; } -kern_return_t is_io_connect_unmap_memory( +kern_return_t +is_io_connect_unmap_memory( io_object_t connect, - uint32_t type, - task_t task, - uint32_t mapAddr ) + uint32_t type, + task_t task, + uint32_t mapAddr ) { - IOReturn err; - mach_vm_address_t address; - - address = SCALAR64(mapAddr); - - err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr); + IOReturn err; + mach_vm_address_t address; - return (err); + address = SCALAR64(mapAddr); + + err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr); + + return err; } /* Routine io_connect_add_client */ -kern_return_t is_io_connect_add_client( +kern_return_t +is_io_connect_add_client( io_object_t connection, io_object_t connect_to) { - CHECK( IOUserClient, connection, client ); - CHECK( IOUserClient, connect_to, to ); + CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connect_to, to ); - IOStatisticsClientCall(); - return( client->connectClient( to ) ); + IOStatisticsClientCall(); + return client->connectClient( to ); } /* Routine io_connect_set_properties */ -kern_return_t is_io_connect_set_properties( +kern_return_t +is_io_connect_set_properties( io_object_t connection, io_buf_ptr_t properties, mach_msg_type_number_t propertiesCnt, - kern_return_t * result) + kern_return_t * result) { - return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result )); + return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ); } /* Routine io_user_client_method */ -kern_return_t is_io_connect_method_var_output +kern_return_t +is_io_connect_method_var_output ( io_connect_t connection, uint32_t selector, @@ -3845,86 +4151,86 @@ kern_return_t is_io_connect_method_var_output mach_msg_type_number_t *var_outputCnt ) { - CHECK( IOUserClient, connection, client ); - - IOExternalMethodArguments args; - IOReturn ret; - IOMemoryDescriptor * inputMD = 0; - OSObject * structureVariableOutputData = 0; - - bzero(&args.__reserved[0], sizeof(args.__reserved)); - args.__reservedA = 0; - args.version = kIOExternalMethodArgumentsCurrentVersion; + CHECK( IOUserClient, connection, client ); - args.selector = selector; + IOExternalMethodArguments args; + IOReturn ret; + IOMemoryDescriptor * inputMD = 0; + OSObject * structureVariableOutputData = 0; - args.asyncWakePort = MACH_PORT_NULL; - args.asyncReference = 0; - args.asyncReferenceCount = 0; - args.structureVariableOutputData = &structureVariableOutputData; + bzero(&args.__reserved[0], sizeof(args.__reserved)); + args.__reservedA = 0; + args.version = kIOExternalMethodArgumentsCurrentVersion; - args.scalarInput = scalar_input; - args.scalarInputCount = scalar_inputCnt; - args.structureInput = inband_input; - args.structureInputSize = inband_inputCnt; + args.selector = selector; - if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError); + args.asyncWakePort = MACH_PORT_NULL; + args.asyncReference = 0; + args.asyncReferenceCount = 0; + args.structureVariableOutputData = &structureVariableOutputData; - if (ool_input) - inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, - kIODirectionOut | kIOMemoryMapCopyOnWrite, - current_task()); + args.scalarInput = scalar_input; + args.scalarInputCount = scalar_inputCnt; + args.structureInput = inband_input; + args.structureInputSize = inband_inputCnt; - args.structureInputDescriptor = inputMD; - - args.scalarOutput = scalar_output; - args.scalarOutputCount = *scalar_outputCnt; - bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); - args.structureOutput = inband_output; - args.structureOutputSize = *inband_outputCnt; - args.structureOutputDescriptor = NULL; - args.structureOutputDescriptorSize = 0; - - IOStatisticsClientCall(); - ret = client->externalMethod( selector, &args ); - - *scalar_outputCnt = args.scalarOutputCount; - *inband_outputCnt = args.structureOutputSize; + if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) { + return kIOReturnIPCError; + } - if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) - { - OSSerialize * serialize; - OSData * data; - vm_size_t len; + if (ool_input) { + inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, + kIODirectionOut | kIOMemoryMapCopyOnWrite, + current_task()); + } - if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) - { - len = serialize->getLength(); - *var_outputCnt = len; - ret = copyoutkdata(serialize->text(), len, var_output); + args.structureInputDescriptor = inputMD; + + args.scalarOutput = scalar_output; + args.scalarOutputCount = *scalar_outputCnt; + bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); + args.structureOutput = inband_output; + args.structureOutputSize = *inband_outputCnt; + args.structureOutputDescriptor = NULL; + args.structureOutputDescriptorSize = 0; + + IOStatisticsClientCall(); + ret = client->externalMethod( selector, &args ); + + *scalar_outputCnt = args.scalarOutputCount; + *inband_outputCnt = args.structureOutputSize; + + if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) { + OSSerialize * serialize; + OSData * data; + vm_size_t len; + + if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) { + len = serialize->getLength(); + *var_outputCnt = len; + ret = copyoutkdata(serialize->text(), len, var_output); + } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) { + len = data->getLength(); + *var_outputCnt = len; + ret = copyoutkdata(data->getBytesNoCopy(), len, var_output); + } else { + ret = kIOReturnUnderrun; + } } - else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) - { - len = data->getLength(); - *var_outputCnt = len; - ret = copyoutkdata(data->getBytesNoCopy(), len, var_output); + + if (inputMD) { + inputMD->release(); } - else - { - ret = kIOReturnUnderrun; + if (structureVariableOutputData) { + structureVariableOutputData->release(); } - } - - if (inputMD) - inputMD->release(); - if (structureVariableOutputData) - structureVariableOutputData->release(); - return (ret); + return ret; } /* Routine io_user_client_method */ -kern_return_t is_io_connect_method +kern_return_t +is_io_connect_method ( io_connect_t connection, uint32_t selector, @@ -3942,71 +4248,78 @@ kern_return_t is_io_connect_method mach_vm_size_t *ool_output_size ) { - CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connection, client ); - IOExternalMethodArguments args; - IOReturn ret; - IOMemoryDescriptor * inputMD = 0; - IOMemoryDescriptor * outputMD = 0; + IOExternalMethodArguments args; + IOReturn ret; + IOMemoryDescriptor * inputMD = 0; + IOMemoryDescriptor * outputMD = 0; - bzero(&args.__reserved[0], sizeof(args.__reserved)); - args.__reservedA = 0; - args.version = kIOExternalMethodArgumentsCurrentVersion; + bzero(&args.__reserved[0], sizeof(args.__reserved)); + args.__reservedA = 0; + args.version = kIOExternalMethodArgumentsCurrentVersion; - args.selector = selector; + args.selector = selector; - args.asyncWakePort = MACH_PORT_NULL; - args.asyncReference = 0; - args.asyncReferenceCount = 0; - args.structureVariableOutputData = 0; + args.asyncWakePort = MACH_PORT_NULL; + args.asyncReference = 0; + args.asyncReferenceCount = 0; + args.structureVariableOutputData = 0; - args.scalarInput = scalar_input; - args.scalarInputCount = scalar_inputCnt; - args.structureInput = inband_input; - args.structureInputSize = inband_inputCnt; + args.scalarInput = scalar_input; + args.scalarInputCount = scalar_inputCnt; + args.structureInput = inband_input; + args.structureInputSize = inband_inputCnt; - if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError); - if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError); + if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) { + return kIOReturnIPCError; + } + if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) { + return kIOReturnIPCError; + } - if (ool_input) - inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, - kIODirectionOut | kIOMemoryMapCopyOnWrite, - current_task()); + if (ool_input) { + inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, + kIODirectionOut | kIOMemoryMapCopyOnWrite, + current_task()); + } - args.structureInputDescriptor = inputMD; + args.structureInputDescriptor = inputMD; - args.scalarOutput = scalar_output; - args.scalarOutputCount = *scalar_outputCnt; - bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); - args.structureOutput = inband_output; - args.structureOutputSize = *inband_outputCnt; + args.scalarOutput = scalar_output; + args.scalarOutputCount = *scalar_outputCnt; + bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); + args.structureOutput = inband_output; + args.structureOutputSize = *inband_outputCnt; - if (ool_output && ool_output_size) - { - outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, - kIODirectionIn, current_task()); - } + if (ool_output && ool_output_size) { + outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, + kIODirectionIn, current_task()); + } - args.structureOutputDescriptor = outputMD; - args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0; + args.structureOutputDescriptor = outputMD; + args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0; - IOStatisticsClientCall(); - ret = client->externalMethod( selector, &args ); + IOStatisticsClientCall(); + ret = client->externalMethod( selector, &args ); - *scalar_outputCnt = args.scalarOutputCount; - *inband_outputCnt = args.structureOutputSize; - *ool_output_size = args.structureOutputDescriptorSize; + *scalar_outputCnt = args.scalarOutputCount; + *inband_outputCnt = args.structureOutputSize; + *ool_output_size = args.structureOutputDescriptorSize; - if (inputMD) - inputMD->release(); - if (outputMD) - outputMD->release(); + if (inputMD) { + inputMD->release(); + } + if (outputMD) { + outputMD->release(); + } - return (ret); + return ret; } /* Routine io_async_user_client_method */ -kern_return_t is_io_connect_async_method +kern_return_t +is_io_connect_async_method ( io_connect_t connection, mach_port_t wake_port, @@ -4027,912 +4340,917 @@ kern_return_t is_io_connect_async_method mach_vm_size_t * ool_output_size ) { - CHECK( IOUserClient, connection, client ); + CHECK( IOUserClient, connection, client ); - IOExternalMethodArguments args; - IOReturn ret; - IOMemoryDescriptor * inputMD = 0; - IOMemoryDescriptor * outputMD = 0; + IOExternalMethodArguments args; + IOReturn ret; + IOMemoryDescriptor * inputMD = 0; + IOMemoryDescriptor * outputMD = 0; - bzero(&args.__reserved[0], sizeof(args.__reserved)); - args.__reservedA = 0; - args.version = kIOExternalMethodArgumentsCurrentVersion; + bzero(&args.__reserved[0], sizeof(args.__reserved)); + args.__reservedA = 0; + args.version = kIOExternalMethodArgumentsCurrentVersion; - reference[0] = (io_user_reference_t) wake_port; - if (vm_map_is_64bit(get_task_map(current_task()))) - reference[0] |= kIOUCAsync64Flag; + reference[0] = (io_user_reference_t) wake_port; + if (vm_map_is_64bit(get_task_map(current_task()))) { + reference[0] |= kIOUCAsync64Flag; + } - args.selector = selector; + args.selector = selector; - args.asyncWakePort = wake_port; - args.asyncReference = reference; - args.asyncReferenceCount = referenceCnt; + args.asyncWakePort = wake_port; + args.asyncReference = reference; + args.asyncReferenceCount = referenceCnt; - args.structureVariableOutputData = 0; + args.structureVariableOutputData = 0; - args.scalarInput = scalar_input; - args.scalarInputCount = scalar_inputCnt; - args.structureInput = inband_input; - args.structureInputSize = inband_inputCnt; + args.scalarInput = scalar_input; + args.scalarInputCount = scalar_inputCnt; + args.structureInput = inband_input; + args.structureInputSize = inband_inputCnt; - if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError); - if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError); + if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) { + return kIOReturnIPCError; + } + if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) { + return kIOReturnIPCError; + } - if (ool_input) - inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, - kIODirectionOut | kIOMemoryMapCopyOnWrite, - current_task()); + if (ool_input) { + inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, + kIODirectionOut | kIOMemoryMapCopyOnWrite, + current_task()); + } - args.structureInputDescriptor = inputMD; + args.structureInputDescriptor = inputMD; - args.scalarOutput = scalar_output; - args.scalarOutputCount = *scalar_outputCnt; - bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); - args.structureOutput = inband_output; - args.structureOutputSize = *inband_outputCnt; + args.scalarOutput = scalar_output; + args.scalarOutputCount = *scalar_outputCnt; + bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0])); + args.structureOutput = inband_output; + args.structureOutputSize = *inband_outputCnt; - if (ool_output) - { - outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, - kIODirectionIn, current_task()); - } + if (ool_output) { + outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, + kIODirectionIn, current_task()); + } - args.structureOutputDescriptor = outputMD; - args.structureOutputDescriptorSize = *ool_output_size; + args.structureOutputDescriptor = outputMD; + args.structureOutputDescriptorSize = *ool_output_size; - IOStatisticsClientCall(); - ret = client->externalMethod( selector, &args ); + IOStatisticsClientCall(); + ret = client->externalMethod( selector, &args ); - *inband_outputCnt = args.structureOutputSize; - *ool_output_size = args.structureOutputDescriptorSize; + *inband_outputCnt = args.structureOutputSize; + *ool_output_size = args.structureOutputDescriptorSize; - if (inputMD) - inputMD->release(); - if (outputMD) - outputMD->release(); + if (inputMD) { + inputMD->release(); + } + if (outputMD) { + outputMD->release(); + } - return (ret); + return ret; } /* Routine io_connect_method_scalarI_scalarO */ -kern_return_t is_io_connect_method_scalarI_scalarO( - io_object_t connect, - uint32_t index, - io_scalar_inband_t input, - mach_msg_type_number_t inputCount, - io_scalar_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - IOReturn err; - uint32_t i; - io_scalar_inband64_t _input; - io_scalar_inband64_t _output; - - mach_msg_type_number_t struct_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - bzero(&_output[0], sizeof(_output)); - for (i = 0; i < inputCount; i++) - _input[i] = SCALAR64(input[i]); - - err = is_io_connect_method(connect, index, - _input, inputCount, - NULL, 0, - 0, 0, - NULL, &struct_outputCnt, - _output, outputCount, - 0, &ool_output_size); - - for (i = 0; i < *outputCount; i++) - output[i] = SCALAR32(_output[i]); - - return (err); -} - -kern_return_t shim_io_connect_method_scalarI_scalarO( - IOExternalMethod * method, - IOService * object, - const io_user_scalar_t * input, - mach_msg_type_number_t inputCount, - io_user_scalar_t * output, - mach_msg_type_number_t * outputCount ) -{ - IOMethod func; - io_scalar_inband_t _output; - IOReturn err; - err = kIOReturnBadArgument; - - bzero(&_output[0], sizeof(_output)); - do { - - if( inputCount != method->count0) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( *outputCount != method->count1) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - switch( inputCount) { - - case 6: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); - break; - case 5: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), - &_output[0] ); - break; - case 4: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), - &_output[0], &_output[1] ); - break; - case 3: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - &_output[0], &_output[1], &_output[2] ); - break; - case 2: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), - &_output[0], &_output[1], &_output[2], - &_output[3] ); - break; - case 1: - err = (object->*func)( ARG32(input[0]), - &_output[0], &_output[1], &_output[2], - &_output[3], &_output[4] ); - break; - case 0: - err = (object->*func)( &_output[0], &_output[1], &_output[2], - &_output[3], &_output[4], &_output[5] ); - break; +kern_return_t +is_io_connect_method_scalarI_scalarO( + io_object_t connect, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_scalar_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + IOReturn err; + uint32_t i; + io_scalar_inband64_t _input; + io_scalar_inband64_t _output; + + mach_msg_type_number_t struct_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + bzero(&_output[0], sizeof(_output)); + for (i = 0; i < inputCount; i++) { + _input[i] = SCALAR64(input[i]); + } + + err = is_io_connect_method(connect, index, + _input, inputCount, + NULL, 0, + 0, 0, + NULL, &struct_outputCnt, + _output, outputCount, + 0, &ool_output_size); - default: - IOLog("%s: Bad method table\n", object->getName()); + for (i = 0; i < *outputCount; i++) { + output[i] = SCALAR32(_output[i]); } - } - while( false); - uint32_t i; - for (i = 0; i < *outputCount; i++) - output[i] = SCALAR32(_output[i]); + return err; +} + +kern_return_t +shim_io_connect_method_scalarI_scalarO( + IOExternalMethod * method, + IOService * object, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_user_scalar_t * output, + mach_msg_type_number_t * outputCount ) +{ + IOMethod func; + io_scalar_inband_t _output; + IOReturn err; + err = kIOReturnBadArgument; + + bzero(&_output[0], sizeof(_output)); + do { + if (inputCount != method->count0) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if (*outputCount != method->count1) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); + continue; + } - return( err); + func = method->func; + + switch (inputCount) { + case 6: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), ARG32(input[5])); + break; + case 5: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + &_output[0] ); + break; + case 4: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + &_output[0], &_output[1] ); + break; + case 3: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + &_output[0], &_output[1], &_output[2] ); + break; + case 2: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), + &_output[0], &_output[1], &_output[2], + &_output[3] ); + break; + case 1: + err = (object->*func)( ARG32(input[0]), + &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4] ); + break; + case 0: + err = (object->*func)( &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4], &_output[5] ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + }while (false); + + uint32_t i; + for (i = 0; i < *outputCount; i++) { + output[i] = SCALAR32(_output[i]); + } + + return err; } /* Routine io_async_method_scalarI_scalarO */ -kern_return_t is_io_async_method_scalarI_scalarO( - io_object_t connect, +kern_return_t +is_io_async_method_scalarI_scalarO( + io_object_t connect, mach_port_t wake_port, io_async_ref_t reference, mach_msg_type_number_t referenceCnt, - uint32_t index, - io_scalar_inband_t input, - mach_msg_type_number_t inputCount, - io_scalar_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - IOReturn err; - uint32_t i; - io_scalar_inband64_t _input; - io_scalar_inband64_t _output; - io_async_ref64_t _reference; - - bzero(&_output[0], sizeof(_output)); - for (i = 0; i < referenceCnt; i++) - _reference[i] = REF64(reference[i]); - - mach_msg_type_number_t struct_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - for (i = 0; i < inputCount; i++) - _input[i] = SCALAR64(input[i]); - - err = is_io_connect_async_method(connect, - wake_port, _reference, referenceCnt, - index, - _input, inputCount, - NULL, 0, - 0, 0, - NULL, &struct_outputCnt, - _output, outputCount, - 0, &ool_output_size); - - for (i = 0; i < *outputCount; i++) - output[i] = SCALAR32(_output[i]); - - return (err); + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_scalar_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + IOReturn err; + uint32_t i; + io_scalar_inband64_t _input; + io_scalar_inband64_t _output; + io_async_ref64_t _reference; + + bzero(&_output[0], sizeof(_output)); + for (i = 0; i < referenceCnt; i++) { + _reference[i] = REF64(reference[i]); + } + + mach_msg_type_number_t struct_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) { + _input[i] = SCALAR64(input[i]); + } + + err = is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + _input, inputCount, + NULL, 0, + 0, 0, + NULL, &struct_outputCnt, + _output, outputCount, + 0, &ool_output_size); + + for (i = 0; i < *outputCount; i++) { + output[i] = SCALAR32(_output[i]); + } + + return err; } /* Routine io_async_method_scalarI_structureO */ -kern_return_t is_io_async_method_scalarI_structureO( - io_object_t connect, +kern_return_t +is_io_async_method_scalarI_structureO( + io_object_t connect, mach_port_t wake_port, io_async_ref_t reference, mach_msg_type_number_t referenceCnt, - uint32_t index, - io_scalar_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - uint32_t i; - io_scalar_inband64_t _input; - io_async_ref64_t _reference; - - for (i = 0; i < referenceCnt; i++) - _reference[i] = REF64(reference[i]); - - mach_msg_type_number_t scalar_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - for (i = 0; i < inputCount; i++) - _input[i] = SCALAR64(input[i]); - - return (is_io_connect_async_method(connect, - wake_port, _reference, referenceCnt, - index, - _input, inputCount, - NULL, 0, - 0, 0, - output, outputCount, - NULL, &scalar_outputCnt, - 0, &ool_output_size)); + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) { + _reference[i] = REF64(reference[i]); + } + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) { + _input[i] = SCALAR64(input[i]); + } + + return is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + _input, inputCount, + NULL, 0, + 0, 0, + output, outputCount, + NULL, &scalar_outputCnt, + 0, &ool_output_size); } /* Routine io_async_method_scalarI_structureI */ -kern_return_t is_io_async_method_scalarI_structureI( - io_connect_t connect, +kern_return_t +is_io_async_method_scalarI_structureI( + io_connect_t connect, mach_port_t wake_port, io_async_ref_t reference, mach_msg_type_number_t referenceCnt, - uint32_t index, - io_scalar_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t inputStruct, - mach_msg_type_number_t inputStructCount ) -{ - uint32_t i; - io_scalar_inband64_t _input; - io_async_ref64_t _reference; - - for (i = 0; i < referenceCnt; i++) - _reference[i] = REF64(reference[i]); - - mach_msg_type_number_t scalar_outputCnt = 0; - mach_msg_type_number_t inband_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - for (i = 0; i < inputCount; i++) - _input[i] = SCALAR64(input[i]); - - return (is_io_connect_async_method(connect, - wake_port, _reference, referenceCnt, - index, - _input, inputCount, - inputStruct, inputStructCount, - 0, 0, - NULL, &inband_outputCnt, - NULL, &scalar_outputCnt, - 0, &ool_output_size)); + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) { + _reference[i] = REF64(reference[i]); + } + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_msg_type_number_t inband_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) { + _input[i] = SCALAR64(input[i]); + } + + return is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + _input, inputCount, + inputStruct, inputStructCount, + 0, 0, + NULL, &inband_outputCnt, + NULL, &scalar_outputCnt, + 0, &ool_output_size); } /* Routine io_async_method_structureI_structureO */ -kern_return_t is_io_async_method_structureI_structureO( - io_object_t connect, +kern_return_t +is_io_async_method_structureI_structureO( + io_object_t connect, mach_port_t wake_port, io_async_ref_t reference, mach_msg_type_number_t referenceCnt, - uint32_t index, - io_struct_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - uint32_t i; - mach_msg_type_number_t scalar_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - io_async_ref64_t _reference; - - for (i = 0; i < referenceCnt; i++) - _reference[i] = REF64(reference[i]); - - return (is_io_connect_async_method(connect, - wake_port, _reference, referenceCnt, - index, - NULL, 0, - input, inputCount, - 0, 0, - output, outputCount, - NULL, &scalar_outputCnt, - 0, &ool_output_size)); -} - - -kern_return_t shim_io_async_method_scalarI_scalarO( - IOExternalAsyncMethod * method, - IOService * object, + uint32_t index, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + uint32_t i; + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) { + _reference[i] = REF64(reference[i]); + } + + return is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + NULL, 0, + input, inputCount, + 0, 0, + output, outputCount, + NULL, &scalar_outputCnt, + 0, &ool_output_size); +} + + +kern_return_t +shim_io_async_method_scalarI_scalarO( + IOExternalAsyncMethod * method, + IOService * object, mach_port_t asyncWakePort, io_user_reference_t * asyncReference, uint32_t asyncReferenceCount, - const io_user_scalar_t * input, - mach_msg_type_number_t inputCount, - io_user_scalar_t * output, - mach_msg_type_number_t * outputCount ) -{ - IOAsyncMethod func; - uint32_t i; - io_scalar_inband_t _output; - IOReturn err; - io_async_ref_t reference; - - bzero(&_output[0], sizeof(_output)); - for (i = 0; i < asyncReferenceCount; i++) - reference[i] = REF32(asyncReference[i]); - - err = kIOReturnBadArgument; - - do { - - if( inputCount != method->count0) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( *outputCount != method->count1) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - switch( inputCount) { - - case 6: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); - break; - case 5: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), - &_output[0] ); - break; - case 4: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), - &_output[0], &_output[1] ); - break; - case 3: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - &_output[0], &_output[1], &_output[2] ); - break; - case 2: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), - &_output[0], &_output[1], &_output[2], - &_output[3] ); - break; - case 1: - err = (object->*func)( reference, - ARG32(input[0]), - &_output[0], &_output[1], &_output[2], - &_output[3], &_output[4] ); - break; - case 0: - err = (object->*func)( reference, - &_output[0], &_output[1], &_output[2], - &_output[3], &_output[4], &_output[5] ); - break; - - default: - IOLog("%s: Bad method table\n", object->getName()); - } - } - while( false); - - for (i = 0; i < *outputCount; i++) - output[i] = SCALAR32(_output[i]); - - return( err); + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_user_scalar_t * output, + mach_msg_type_number_t * outputCount ) +{ + IOAsyncMethod func; + uint32_t i; + io_scalar_inband_t _output; + IOReturn err; + io_async_ref_t reference; + + bzero(&_output[0], sizeof(_output)); + for (i = 0; i < asyncReferenceCount; i++) { + reference[i] = REF32(asyncReference[i]); + } + + err = kIOReturnBadArgument; + + do { + if (inputCount != method->count0) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if (*outputCount != method->count1) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); + continue; + } + + func = method->func; + + switch (inputCount) { + case 6: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), ARG32(input[5])); + break; + case 5: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + &_output[0] ); + break; + case 4: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + &_output[0], &_output[1] ); + break; + case 3: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + &_output[0], &_output[1], &_output[2] ); + break; + case 2: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), + &_output[0], &_output[1], &_output[2], + &_output[3] ); + break; + case 1: + err = (object->*func)( reference, + ARG32(input[0]), + &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4] ); + break; + case 0: + err = (object->*func)( reference, + &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4], &_output[5] ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + }while (false); + + for (i = 0; i < *outputCount; i++) { + output[i] = SCALAR32(_output[i]); + } + + return err; } /* Routine io_connect_method_scalarI_structureO */ -kern_return_t is_io_connect_method_scalarI_structureO( - io_object_t connect, - uint32_t index, - io_scalar_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - uint32_t i; - io_scalar_inband64_t _input; - - mach_msg_type_number_t scalar_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - for (i = 0; i < inputCount; i++) - _input[i] = SCALAR64(input[i]); - - return (is_io_connect_method(connect, index, - _input, inputCount, - NULL, 0, - 0, 0, - output, outputCount, - NULL, &scalar_outputCnt, - 0, &ool_output_size)); -} - -kern_return_t shim_io_connect_method_scalarI_structureO( - - IOExternalMethod * method, - IOService * object, - const io_user_scalar_t * input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - IOByteCount * outputCount ) -{ - IOMethod func; - IOReturn err; - - err = kIOReturnBadArgument; - - do { - if( inputCount != method->count0) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( (kIOUCVariableStructureSize != method->count1) - && (*outputCount != method->count1)) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - switch( inputCount) { - - case 5: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), - output ); - break; - case 4: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), - output, (void *)outputCount ); - break; - case 3: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - output, (void *)outputCount, 0 ); - break; - case 2: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), - output, (void *)outputCount, 0, 0 ); - break; - case 1: - err = (object->*func)( ARG32(input[0]), - output, (void *)outputCount, 0, 0, 0 ); - break; - case 0: - err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); - break; +kern_return_t +is_io_connect_method_scalarI_structureO( + io_object_t connect, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; - default: - IOLog("%s: Bad method table\n", object->getName()); + for (i = 0; i < inputCount; i++) { + _input[i] = SCALAR64(input[i]); } - } - while( false); - return( err); + return is_io_connect_method(connect, index, + _input, inputCount, + NULL, 0, + 0, 0, + output, outputCount, + NULL, &scalar_outputCnt, + 0, &ool_output_size); } +kern_return_t +shim_io_connect_method_scalarI_structureO( + + IOExternalMethod * method, + IOService * object, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + IOByteCount * outputCount ) +{ + IOMethod func; + IOReturn err; -kern_return_t shim_io_async_method_scalarI_structureO( - IOExternalAsyncMethod * method, - IOService * object, + err = kIOReturnBadArgument; + + do { + if (inputCount != method->count0) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if ((kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); + continue; + } + + func = method->func; + + switch (inputCount) { + case 5: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + output ); + break; + case 4: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + output, (void *)outputCount ); + break; + case 3: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + output, (void *)outputCount, 0 ); + break; + case 2: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), + output, (void *)outputCount, 0, 0 ); + break; + case 1: + err = (object->*func)( ARG32(input[0]), + output, (void *)outputCount, 0, 0, 0 ); + break; + case 0: + err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + }while (false); + + return err; +} + + +kern_return_t +shim_io_async_method_scalarI_structureO( + IOExternalAsyncMethod * method, + IOService * object, mach_port_t asyncWakePort, io_user_reference_t * asyncReference, uint32_t asyncReferenceCount, - const io_user_scalar_t * input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - IOAsyncMethod func; - uint32_t i; - IOReturn err; - io_async_ref_t reference; - - for (i = 0; i < asyncReferenceCount; i++) - reference[i] = REF32(asyncReference[i]); - - err = kIOReturnBadArgument; - do { - if( inputCount != method->count0) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( (kIOUCVariableStructureSize != method->count1) - && (*outputCount != method->count1)) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - switch( inputCount) { - - case 5: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), - output ); - break; - case 4: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), - output, (void *)outputCount ); - break; - case 3: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - output, (void *)outputCount, 0 ); - break; - case 2: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), - output, (void *)outputCount, 0, 0 ); - break; - case 1: - err = (object->*func)( reference, - ARG32(input[0]), - output, (void *)outputCount, 0, 0, 0 ); - break; - case 0: - err = (object->*func)( reference, - output, (void *)outputCount, 0, 0, 0, 0 ); - break; - - default: - IOLog("%s: Bad method table\n", object->getName()); - } - } - while( false); - - return( err); + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + IOAsyncMethod func; + uint32_t i; + IOReturn err; + io_async_ref_t reference; + + for (i = 0; i < asyncReferenceCount; i++) { + reference[i] = REF32(asyncReference[i]); + } + + err = kIOReturnBadArgument; + do { + if (inputCount != method->count0) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if ((kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); + continue; + } + + func = method->func; + + switch (inputCount) { + case 5: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + output ); + break; + case 4: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + output, (void *)outputCount ); + break; + case 3: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + output, (void *)outputCount, 0 ); + break; + case 2: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), + output, (void *)outputCount, 0, 0 ); + break; + case 1: + err = (object->*func)( reference, + ARG32(input[0]), + output, (void *)outputCount, 0, 0, 0 ); + break; + case 0: + err = (object->*func)( reference, + output, (void *)outputCount, 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + }while (false); + + return err; } /* Routine io_connect_method_scalarI_structureI */ -kern_return_t is_io_connect_method_scalarI_structureI( - io_connect_t connect, - uint32_t index, - io_scalar_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t inputStruct, - mach_msg_type_number_t inputStructCount ) -{ - uint32_t i; - io_scalar_inband64_t _input; - - mach_msg_type_number_t scalar_outputCnt = 0; - mach_msg_type_number_t inband_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - for (i = 0; i < inputCount; i++) - _input[i] = SCALAR64(input[i]); - - return (is_io_connect_method(connect, index, - _input, inputCount, - inputStruct, inputStructCount, - 0, 0, - NULL, &inband_outputCnt, - NULL, &scalar_outputCnt, - 0, &ool_output_size)); -} - -kern_return_t shim_io_connect_method_scalarI_structureI( - IOExternalMethod * method, - IOService * object, - const io_user_scalar_t * input, - mach_msg_type_number_t inputCount, - io_struct_inband_t inputStruct, - mach_msg_type_number_t inputStructCount ) -{ - IOMethod func; - IOReturn err = kIOReturnBadArgument; - - do - { - if (inputCount != method->count0) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( (kIOUCVariableStructureSize != method->count1) - && (inputStructCount != method->count1)) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - switch( inputCount) { - - case 5: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), - inputStruct ); - break; - case 4: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2], - ARG32(input[3]), - inputStruct, (void *)(uintptr_t)inputStructCount ); - break; - case 3: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - inputStruct, (void *)(uintptr_t)inputStructCount, - 0 ); - break; - case 2: - err = (object->*func)( ARG32(input[0]), ARG32(input[1]), - inputStruct, (void *)(uintptr_t)inputStructCount, - 0, 0 ); - break; - case 1: - err = (object->*func)( ARG32(input[0]), - inputStruct, (void *)(uintptr_t)inputStructCount, - 0, 0, 0 ); - break; - case 0: - err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount, - 0, 0, 0, 0 ); - break; - - default: - IOLog("%s: Bad method table\n", object->getName()); +kern_return_t +is_io_connect_method_scalarI_structureI( + io_connect_t connect, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_msg_type_number_t inband_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) { + _input[i] = SCALAR64(input[i]); } - } - while (false); - return( err); + return is_io_connect_method(connect, index, + _input, inputCount, + inputStruct, inputStructCount, + 0, 0, + NULL, &inband_outputCnt, + NULL, &scalar_outputCnt, + 0, &ool_output_size); } -kern_return_t shim_io_async_method_scalarI_structureI( - IOExternalAsyncMethod * method, - IOService * object, +kern_return_t +shim_io_connect_method_scalarI_structureI( + IOExternalMethod * method, + IOService * object, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + IOMethod func; + IOReturn err = kIOReturnBadArgument; + + do{ + if (inputCount != method->count0) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if ((kIOUCVariableStructureSize != method->count1) + && (inputStructCount != method->count1)) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1); + continue; + } + + func = method->func; + + switch (inputCount) { + case 5: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + inputStruct ); + break; + case 4: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2], + ARG32(input[3]), + inputStruct, (void *)(uintptr_t)inputStructCount ); + break; + case 3: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + inputStruct, (void *)(uintptr_t)inputStructCount, + 0 ); + break; + case 2: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), + inputStruct, (void *)(uintptr_t)inputStructCount, + 0, 0 ); + break; + case 1: + err = (object->*func)( ARG32(input[0]), + inputStruct, (void *)(uintptr_t)inputStructCount, + 0, 0, 0 ); + break; + case 0: + err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount, + 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + }while (false); + + return err; +} + +kern_return_t +shim_io_async_method_scalarI_structureI( + IOExternalAsyncMethod * method, + IOService * object, mach_port_t asyncWakePort, io_user_reference_t * asyncReference, uint32_t asyncReferenceCount, - const io_user_scalar_t * input, - mach_msg_type_number_t inputCount, - io_struct_inband_t inputStruct, - mach_msg_type_number_t inputStructCount ) -{ - IOAsyncMethod func; - uint32_t i; - IOReturn err = kIOReturnBadArgument; - io_async_ref_t reference; - - for (i = 0; i < asyncReferenceCount; i++) - reference[i] = REF32(asyncReference[i]); - - do - { - if (inputCount != method->count0) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( (kIOUCVariableStructureSize != method->count1) - && (inputStructCount != method->count1)) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - switch( inputCount) { - - case 5: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), ARG32(input[4]), - inputStruct ); - break; - case 4: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - ARG32(input[3]), - inputStruct, (void *)(uintptr_t)inputStructCount ); - break; - case 3: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), - inputStruct, (void *)(uintptr_t)inputStructCount, - 0 ); - break; - case 2: - err = (object->*func)( reference, - ARG32(input[0]), ARG32(input[1]), - inputStruct, (void *)(uintptr_t)inputStructCount, - 0, 0 ); - break; - case 1: - err = (object->*func)( reference, - ARG32(input[0]), - inputStruct, (void *)(uintptr_t)inputStructCount, - 0, 0, 0 ); - break; - case 0: - err = (object->*func)( reference, - inputStruct, (void *)(uintptr_t)inputStructCount, - 0, 0, 0, 0 ); - break; - - default: - IOLog("%s: Bad method table\n", object->getName()); - } - } - while (false); - - return( err); + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + IOAsyncMethod func; + uint32_t i; + IOReturn err = kIOReturnBadArgument; + io_async_ref_t reference; + + for (i = 0; i < asyncReferenceCount; i++) { + reference[i] = REF32(asyncReference[i]); + } + + do{ + if (inputCount != method->count0) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if ((kIOUCVariableStructureSize != method->count1) + && (inputStructCount != method->count1)) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1); + continue; + } + + func = method->func; + + switch (inputCount) { + case 5: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + inputStruct ); + break; + case 4: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + inputStruct, (void *)(uintptr_t)inputStructCount ); + break; + case 3: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + inputStruct, (void *)(uintptr_t)inputStructCount, + 0 ); + break; + case 2: + err = (object->*func)( reference, + ARG32(input[0]), ARG32(input[1]), + inputStruct, (void *)(uintptr_t)inputStructCount, + 0, 0 ); + break; + case 1: + err = (object->*func)( reference, + ARG32(input[0]), + inputStruct, (void *)(uintptr_t)inputStructCount, + 0, 0, 0 ); + break; + case 0: + err = (object->*func)( reference, + inputStruct, (void *)(uintptr_t)inputStructCount, + 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + }while (false); + + return err; } /* Routine io_connect_method_structureI_structureO */ -kern_return_t is_io_connect_method_structureI_structureO( - io_object_t connect, - uint32_t index, - io_struct_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - mach_msg_type_number_t scalar_outputCnt = 0; - mach_vm_size_t ool_output_size = 0; - - return (is_io_connect_method(connect, index, - NULL, 0, - input, inputCount, - 0, 0, - output, outputCount, - NULL, &scalar_outputCnt, - 0, &ool_output_size)); -} - -kern_return_t shim_io_connect_method_structureI_structureO( - IOExternalMethod * method, - IOService * object, - io_struct_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - IOByteCount * outputCount ) -{ - IOMethod func; - IOReturn err = kIOReturnBadArgument; - - do - { - if( (kIOUCVariableStructureSize != method->count0) - && (inputCount != method->count0)) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( (kIOUCVariableStructureSize != method->count1) - && (*outputCount != method->count1)) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - if( method->count1) { - if( method->count0) { - err = (object->*func)( input, output, - (void *)(uintptr_t)inputCount, outputCount, 0, 0 ); - } else { - err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); - } - } else { - err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 ); - } - } - while( false); +kern_return_t +is_io_connect_method_structureI_structureO( + io_object_t connect, + uint32_t index, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + return is_io_connect_method(connect, index, + NULL, 0, + input, inputCount, + 0, 0, + output, outputCount, + NULL, &scalar_outputCnt, + 0, &ool_output_size); +} + +kern_return_t +shim_io_connect_method_structureI_structureO( + IOExternalMethod * method, + IOService * object, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + IOByteCount * outputCount ) +{ + IOMethod func; + IOReturn err = kIOReturnBadArgument; + + do{ + if ((kIOUCVariableStructureSize != method->count0) + && (inputCount != method->count0)) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if ((kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); + continue; + } + func = method->func; + + if (method->count1) { + if (method->count0) { + err = (object->*func)( input, output, + (void *)(uintptr_t)inputCount, outputCount, 0, 0 ); + } else { + err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); + } + } else { + err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 ); + } + }while (false); - return( err); + + return err; } -kern_return_t shim_io_async_method_structureI_structureO( - IOExternalAsyncMethod * method, - IOService * object, +kern_return_t +shim_io_async_method_structureI_structureO( + IOExternalAsyncMethod * method, + IOService * object, mach_port_t asyncWakePort, io_user_reference_t * asyncReference, uint32_t asyncReferenceCount, - io_struct_inband_t input, - mach_msg_type_number_t inputCount, - io_struct_inband_t output, - mach_msg_type_number_t * outputCount ) -{ - IOAsyncMethod func; - uint32_t i; - IOReturn err; - io_async_ref_t reference; - - for (i = 0; i < asyncReferenceCount; i++) - reference[i] = REF32(asyncReference[i]); - - err = kIOReturnBadArgument; - do - { - if( (kIOUCVariableStructureSize != method->count0) - && (inputCount != method->count0)) - { - IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); - continue; - } - if( (kIOUCVariableStructureSize != method->count1) - && (*outputCount != method->count1)) - { - IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); - DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); - continue; - } - - func = method->func; - - if( method->count1) { - if( method->count0) { - err = (object->*func)( reference, - input, output, - (void *)(uintptr_t)inputCount, outputCount, 0, 0 ); - } else { - err = (object->*func)( reference, - output, outputCount, 0, 0, 0, 0 ); - } - } else { - err = (object->*func)( reference, - input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 ); - } - } - while( false); - - return( err); + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + IOAsyncMethod func; + uint32_t i; + IOReturn err; + io_async_ref_t reference; + + for (i = 0; i < asyncReferenceCount; i++) { + reference[i] = REF32(asyncReference[i]); + } + + err = kIOReturnBadArgument; + do{ + if ((kIOUCVariableStructureSize != method->count0) + && (inputCount != method->count0)) { + IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0); + continue; + } + if ((kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) { + IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize); + DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1); + continue; + } + + func = method->func; + + if (method->count1) { + if (method->count0) { + err = (object->*func)( reference, + input, output, + (void *)(uintptr_t)inputCount, outputCount, 0, 0 ); + } else { + err = (object->*func)( reference, + output, outputCount, 0, 0, 0, 0 ); + } + } else { + err = (object->*func)( reference, + input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 ); + } + }while (false); + + return err; } #if !NO_KEXTD @@ -4940,372 +5258,390 @@ bool gIOKextdClearedBusy = false; #endif /* Routine io_catalog_send_data */ -kern_return_t is_io_catalog_send_data( - mach_port_t master_port, - uint32_t flag, - io_buf_ptr_t inData, - mach_msg_type_number_t inDataCount, - kern_return_t * result) +kern_return_t +is_io_catalog_send_data( + mach_port_t master_port, + uint32_t flag, + io_buf_ptr_t inData, + mach_msg_type_number_t inDataCount, + kern_return_t * result) { #if NO_KEXTD - return kIOReturnNotPrivileged; + return kIOReturnNotPrivileged; #else /* NO_KEXTD */ - OSObject * obj = 0; - vm_offset_t data; - kern_return_t kr = kIOReturnError; - - //printf("io_catalog_send_data called. flag: %d\n", flag); - - if( master_port != master_device_port) - return kIOReturnNotPrivileged; - - if( (flag != kIOCatalogRemoveKernelLinker && - flag != kIOCatalogKextdActive && - flag != kIOCatalogKextdFinishedLaunching) && - ( !inData || !inDataCount) ) - { - return kIOReturnBadArgument; - } - - if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management")) - { - OSString * taskName = IOCopyLogNameForPID(proc_selfpid()); - IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : ""); - OSSafeReleaseNULL(taskName); - // For now, fake success to not break applications relying on this function succeeding. - // See for more details. - return kIOReturnSuccess; - } - - if (inData) { - vm_map_offset_t map_data; - - if( inDataCount > sizeof(io_struct_inband_t) * 1024) - return( kIOReturnMessageTooLarge); - - kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData); + OSObject * obj = 0; + vm_offset_t data; + kern_return_t kr = kIOReturnError; + + //printf("io_catalog_send_data called. flag: %d\n", flag); + + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } + + if ((flag != kIOCatalogRemoveKernelLinker && + flag != kIOCatalogKextdActive && + flag != kIOCatalogKextdFinishedLaunching) && + (!inData || !inDataCount)) { + return kIOReturnBadArgument; + } + + if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management")) { + OSString * taskName = IOCopyLogNameForPID(proc_selfpid()); + IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : ""); + OSSafeReleaseNULL(taskName); + // For now, fake success to not break applications relying on this function succeeding. + // See for more details. + return kIOReturnSuccess; + } + + if (inData) { + vm_map_offset_t map_data; + + if (inDataCount > sizeof(io_struct_inband_t) * 1024) { + return kIOReturnMessageTooLarge; + } + + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData); data = CAST_DOWN(vm_offset_t, map_data); - if( kr != KERN_SUCCESS) - return kr; - - // must return success after vm_map_copyout() succeeds - - if( inDataCount ) { - obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount); - vm_deallocate( kernel_map, data, inDataCount ); - if( !obj) { - *result = kIOReturnNoMemory; - return( KERN_SUCCESS); - } - } - } - - switch ( flag ) { - case kIOCatalogResetDrivers: - case kIOCatalogResetDriversNoMatch: { - OSArray * array; - - array = OSDynamicCast(OSArray, obj); - if (array) { - if ( !gIOCatalogue->resetAndAddDrivers(array, - flag == kIOCatalogResetDrivers) ) { - - kr = kIOReturnError; - } - } else { - kr = kIOReturnBadArgument; - } - } - break; - - case kIOCatalogAddDrivers: - case kIOCatalogAddDriversNoMatch: { - OSArray * array; - - array = OSDynamicCast(OSArray, obj); - if ( array ) { - if ( !gIOCatalogue->addDrivers( array , - flag == kIOCatalogAddDrivers) ) { - kr = kIOReturnError; - } - } - else { - kr = kIOReturnBadArgument; - } - } - break; - - case kIOCatalogRemoveDrivers: - case kIOCatalogRemoveDriversNoMatch: { - OSDictionary * dict; - - dict = OSDynamicCast(OSDictionary, obj); - if ( dict ) { - if ( !gIOCatalogue->removeDrivers( dict, - flag == kIOCatalogRemoveDrivers ) ) { - kr = kIOReturnError; - } - } - else { - kr = kIOReturnBadArgument; - } - } - break; - - case kIOCatalogStartMatching: { - OSDictionary * dict; - - dict = OSDynamicCast(OSDictionary, obj); - if ( dict ) { - if ( !gIOCatalogue->startMatching( dict ) ) { - kr = kIOReturnError; - } - } - else { - kr = kIOReturnBadArgument; - } - } - break; - - case kIOCatalogRemoveKernelLinker: - kr = KERN_NOT_SUPPORTED; - break; - - case kIOCatalogKextdActive: + if (kr != KERN_SUCCESS) { + return kr; + } + + // must return success after vm_map_copyout() succeeds + + if (inDataCount) { + obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount); + vm_deallocate( kernel_map, data, inDataCount ); + if (!obj) { + *result = kIOReturnNoMemory; + return KERN_SUCCESS; + } + } + } + + switch (flag) { + case kIOCatalogResetDrivers: + case kIOCatalogResetDriversNoMatch: { + OSArray * array; + + array = OSDynamicCast(OSArray, obj); + if (array) { + if (!gIOCatalogue->resetAndAddDrivers(array, + flag == kIOCatalogResetDrivers)) { + kr = kIOReturnError; + } + } else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogAddDrivers: + case kIOCatalogAddDriversNoMatch: { + OSArray * array; + + array = OSDynamicCast(OSArray, obj); + if (array) { + if (!gIOCatalogue->addDrivers( array, + flag == kIOCatalogAddDrivers)) { + kr = kIOReturnError; + } + } else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogRemoveDrivers: + case kIOCatalogRemoveDriversNoMatch: { + OSDictionary * dict; + + dict = OSDynamicCast(OSDictionary, obj); + if (dict) { + if (!gIOCatalogue->removeDrivers( dict, + flag == kIOCatalogRemoveDrivers )) { + kr = kIOReturnError; + } + } else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogStartMatching: { + OSDictionary * dict; + + dict = OSDynamicCast(OSDictionary, obj); + if (dict) { + if (!gIOCatalogue->startMatching( dict )) { + kr = kIOReturnError; + } + } else { + kr = kIOReturnBadArgument; + } + } + break; + + case kIOCatalogRemoveKernelLinker: + kr = KERN_NOT_SUPPORTED; + break; + + case kIOCatalogKextdActive: #if !NO_KEXTD - IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0); - OSKext::setKextdActive(); + IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0); + OSKext::setKextdActive(); - /* Dump all nonloaded startup extensions; kextd will now send them - * down on request. - */ - OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false); + /* Dump all nonloaded startup extensions; kextd will now send them + * down on request. + */ + OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false); #endif - kr = kIOReturnSuccess; - break; + kr = kIOReturnSuccess; + break; - case kIOCatalogKextdFinishedLaunching: { + case kIOCatalogKextdFinishedLaunching: { #if !NO_KEXTD - if (!gIOKextdClearedBusy) { - IOService * serviceRoot = IOService::getServiceRoot(); - if (serviceRoot) { - IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0); - serviceRoot->adjustBusy(-1); - gIOKextdClearedBusy = true; - } - } + if (!gIOKextdClearedBusy) { + IOService * serviceRoot = IOService::getServiceRoot(); + if (serviceRoot) { + IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0); + serviceRoot->adjustBusy(-1); + gIOKextdClearedBusy = true; + } + } #endif - kr = kIOReturnSuccess; - } - break; + kr = kIOReturnSuccess; + } + break; - default: - kr = kIOReturnBadArgument; - break; - } + default: + kr = kIOReturnBadArgument; + break; + } - if (obj) obj->release(); + if (obj) { + obj->release(); + } - *result = kr; - return( KERN_SUCCESS); + *result = kr; + return KERN_SUCCESS; #endif /* NO_KEXTD */ } /* Routine io_catalog_terminate */ -kern_return_t is_io_catalog_terminate( +kern_return_t +is_io_catalog_terminate( mach_port_t master_port, uint32_t flag, io_name_t name ) { - kern_return_t kr; + kern_return_t kr; - if( master_port != master_device_port ) - return kIOReturnNotPrivileged; + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - kr = IOUserClient::clientHasPrivilege( (void *) current_task(), - kIOClientPrivilegeAdministrator ); - if( kIOReturnSuccess != kr) - return( kr ); + kr = IOUserClient::clientHasPrivilege((void *) current_task(), + kIOClientPrivilegeAdministrator ); + if (kIOReturnSuccess != kr) { + return kr; + } - switch ( flag ) { + switch (flag) { #if !defined(SECURE_KERNEL) - case kIOCatalogServiceTerminate: - OSIterator * iter; - IOService * service; - - iter = IORegistryIterator::iterateOver(gIOServicePlane, - kIORegistryIterateRecursively); - if ( !iter ) - return kIOReturnNoMemory; - - do { - iter->reset(); - while( (service = (IOService *)iter->getNextObject()) ) { - if( service->metaCast(name)) { - if ( !service->terminate( kIOServiceRequired - | kIOServiceSynchronous) ) { - kr = kIOReturnUnsupported; - break; - } - } - } - } while( !service && !iter->isValid()); - iter->release(); - break; - - case kIOCatalogModuleUnload: - case kIOCatalogModuleTerminate: - kr = gIOCatalogue->terminateDriversForModule(name, - flag == kIOCatalogModuleUnload); - break; + case kIOCatalogServiceTerminate: + OSIterator * iter; + IOService * service; + + iter = IORegistryIterator::iterateOver(gIOServicePlane, + kIORegistryIterateRecursively); + if (!iter) { + return kIOReturnNoMemory; + } + + do { + iter->reset(); + while ((service = (IOService *)iter->getNextObject())) { + if (service->metaCast(name)) { + if (!service->terminate( kIOServiceRequired + | kIOServiceSynchronous)) { + kr = kIOReturnUnsupported; + break; + } + } + } + } while (!service && !iter->isValid()); + iter->release(); + break; + + case kIOCatalogModuleUnload: + case kIOCatalogModuleTerminate: + kr = gIOCatalogue->terminateDriversForModule(name, + flag == kIOCatalogModuleUnload); + break; #endif - default: - kr = kIOReturnBadArgument; - break; - } + default: + kr = kIOReturnBadArgument; + break; + } - return( kr ); + return kr; } /* Routine io_catalog_get_data */ -kern_return_t is_io_catalog_get_data( - mach_port_t master_port, - uint32_t flag, - io_buf_ptr_t *outData, - mach_msg_type_number_t *outDataCount) -{ - kern_return_t kr = kIOReturnSuccess; - OSSerialize * s; - - if( master_port != master_device_port) - return kIOReturnNotPrivileged; - - //printf("io_catalog_get_data called. flag: %d\n", flag); - - s = OSSerialize::withCapacity(4096); - if ( !s ) - return kIOReturnNoMemory; - - kr = gIOCatalogue->serializeData(flag, s); - - if ( kr == kIOReturnSuccess ) { - vm_offset_t data; - vm_map_copy_t copy; - vm_size_t size; - - size = s->getLength(); - kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); - if ( kr == kIOReturnSuccess ) { - bcopy(s->text(), (void *)data, size); - kr = vm_map_copyin(kernel_map, (vm_map_address_t)data, - (vm_map_size_t)size, true, ©); - *outData = (char *)copy; - *outDataCount = size; - } - } - - s->release(); - - return kr; +kern_return_t +is_io_catalog_get_data( + mach_port_t master_port, + uint32_t flag, + io_buf_ptr_t *outData, + mach_msg_type_number_t *outDataCount) +{ + kern_return_t kr = kIOReturnSuccess; + OSSerialize * s; + + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } + + //printf("io_catalog_get_data called. flag: %d\n", flag); + + s = OSSerialize::withCapacity(4096); + if (!s) { + return kIOReturnNoMemory; + } + + kr = gIOCatalogue->serializeData(flag, s); + + if (kr == kIOReturnSuccess) { + vm_offset_t data; + vm_map_copy_t copy; + vm_size_t size; + + size = s->getLength(); + kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); + if (kr == kIOReturnSuccess) { + bcopy(s->text(), (void *)data, size); + kr = vm_map_copyin(kernel_map, (vm_map_address_t)data, + (vm_map_size_t)size, true, ©); + *outData = (char *)copy; + *outDataCount = size; + } + } + + s->release(); + + return kr; } /* Routine io_catalog_get_gen_count */ -kern_return_t is_io_catalog_get_gen_count( - mach_port_t master_port, - uint32_t *genCount) +kern_return_t +is_io_catalog_get_gen_count( + mach_port_t master_port, + uint32_t *genCount) { - if( master_port != master_device_port) - return kIOReturnNotPrivileged; + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - //printf("io_catalog_get_gen_count called.\n"); + //printf("io_catalog_get_gen_count called.\n"); + + if (!genCount) { + return kIOReturnBadArgument; + } - if ( !genCount ) - return kIOReturnBadArgument; + *genCount = gIOCatalogue->getGenerationCount(); - *genCount = gIOCatalogue->getGenerationCount(); - - return kIOReturnSuccess; + return kIOReturnSuccess; } /* Routine io_catalog_module_loaded. * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used. */ -kern_return_t is_io_catalog_module_loaded( - mach_port_t master_port, - io_name_t name) +kern_return_t +is_io_catalog_module_loaded( + mach_port_t master_port, + io_name_t name) { - if( master_port != master_device_port) - return kIOReturnNotPrivileged; + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - //printf("io_catalog_module_loaded called. name %s\n", name); - - if ( !name ) - return kIOReturnBadArgument; - - gIOCatalogue->moduleHasLoaded(name); - - return kIOReturnSuccess; + //printf("io_catalog_module_loaded called. name %s\n", name); + + if (!name) { + return kIOReturnBadArgument; + } + + gIOCatalogue->moduleHasLoaded(name); + + return kIOReturnSuccess; } -kern_return_t is_io_catalog_reset( - mach_port_t master_port, - uint32_t flag) +kern_return_t +is_io_catalog_reset( + mach_port_t master_port, + uint32_t flag) { - if( master_port != master_device_port) - return kIOReturnNotPrivileged; + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } - switch ( flag ) { - case kIOCatalogResetDefault: - gIOCatalogue->reset(); - break; + switch (flag) { + case kIOCatalogResetDefault: + gIOCatalogue->reset(); + break; + + default: + return kIOReturnBadArgument; + } - default: - return kIOReturnBadArgument; - } - - return kIOReturnSuccess; + return kIOReturnSuccess; } -kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args) +kern_return_t +iokit_user_client_trap(struct iokit_user_client_trap_args *args) { - kern_return_t result = kIOReturnBadArgument; - IOUserClient *userClient; + kern_return_t result = kIOReturnBadArgument; + IOUserClient *userClient; - if ((userClient = OSDynamicCast(IOUserClient, - iokit_lookup_connect_ref_current_task((mach_port_name_t)(uintptr_t)args->userClientRef)))) { - IOExternalTrap *trap; - IOService *target = NULL; + if ((userClient = OSDynamicCast(IOUserClient, + iokit_lookup_connect_ref_current_task((mach_port_name_t)(uintptr_t)args->userClientRef)))) { + IOExternalTrap *trap; + IOService *target = NULL; - trap = userClient->getTargetAndTrapForIndex(&target, args->index); + trap = userClient->getTargetAndTrapForIndex(&target, args->index); - if (trap && target) { - IOTrap func; + if (trap && target) { + IOTrap func; - func = trap->func; + func = trap->func; - if (func) { - result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6); - } - } + if (func) { + result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6); + } + } - iokit_remove_connect_reference(userClient); - } + iokit_remove_connect_reference(userClient); + } - return result; + return result; } /* Routine io_device_tree_entry_exists_with_name */ -kern_return_t is_io_device_tree_entry_exists_with_name( +kern_return_t +is_io_device_tree_entry_exists_with_name( mach_port_t master_port, io_name_t name, boolean_t *exists ) { OSCollectionIterator *iter; - if (master_port != master_device_port) - return (kIOReturnNotPrivileged); + if (master_port != master_device_port) { + return kIOReturnNotPrivileged; + } iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name); *exists = iter && iter->getNextObject(); @@ -5313,163 +5649,154 @@ kern_return_t is_io_device_tree_entry_exists_with_name( return kIOReturnSuccess; } - } /* extern "C" */ -IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args, - IOExternalMethodDispatch * dispatch, OSObject * target, void * reference ) -{ - IOReturn err; - IOService * object; - IOByteCount structureOutputSize; - - if (dispatch) - { - uint32_t count; - count = dispatch->checkScalarInputCount; - if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) - { - return (kIOReturnBadArgument); - } - - count = dispatch->checkStructureInputSize; - if ((kIOUCVariableStructureSize != count) - && (count != ((args->structureInputDescriptor) - ? args->structureInputDescriptor->getLength() : args->structureInputSize))) - { - return (kIOReturnBadArgument); - } - - count = dispatch->checkScalarOutputCount; - if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) - { - return (kIOReturnBadArgument); - } - - count = dispatch->checkStructureOutputSize; - if ((kIOUCVariableStructureSize != count) - && (count != ((args->structureOutputDescriptor) - ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) - { - return (kIOReturnBadArgument); - } - - if (dispatch->function) - err = (*dispatch->function)(target, reference, args); - else - err = kIOReturnNoCompletion; /* implementator can dispatch */ - - return (err); - } - - - // pre-Leopard API's don't do ool structs - if (args->structureInputDescriptor || args->structureOutputDescriptor) - { - err = kIOReturnIPCError; - return (err); - } - - structureOutputSize = args->structureOutputSize; - - if (args->asyncWakePort) - { - IOExternalAsyncMethod * method; - object = 0; - if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object ) - return (kIOReturnUnsupported); - - if (kIOUCForegroundOnly & method->flags) - { - if (task_is_gpu_denied(current_task())) - return (kIOReturnNotPermitted); - } - - switch (method->flags & kIOUCTypeMask) - { - case kIOUCScalarIStructI: - err = shim_io_async_method_scalarI_structureI( method, object, - args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, - args->scalarInput, args->scalarInputCount, - (char *)args->structureInput, args->structureInputSize ); - break; +IOReturn +IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args, + IOExternalMethodDispatch * dispatch, OSObject * target, void * reference ) +{ + IOReturn err; + IOService * object; + IOByteCount structureOutputSize; + + if (dispatch) { + uint32_t count; + count = dispatch->checkScalarInputCount; + if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) { + return kIOReturnBadArgument; + } - case kIOUCScalarIScalarO: - err = shim_io_async_method_scalarI_scalarO( method, object, - args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, - args->scalarInput, args->scalarInputCount, - args->scalarOutput, &args->scalarOutputCount ); - break; + count = dispatch->checkStructureInputSize; + if ((kIOUCVariableStructureSize != count) + && (count != ((args->structureInputDescriptor) + ? args->structureInputDescriptor->getLength() : args->structureInputSize))) { + return kIOReturnBadArgument; + } - case kIOUCScalarIStructO: - err = shim_io_async_method_scalarI_structureO( method, object, - args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, - args->scalarInput, args->scalarInputCount, - (char *) args->structureOutput, &args->structureOutputSize ); - break; + count = dispatch->checkScalarOutputCount; + if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) { + return kIOReturnBadArgument; + } + count = dispatch->checkStructureOutputSize; + if ((kIOUCVariableStructureSize != count) + && (count != ((args->structureOutputDescriptor) + ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) { + return kIOReturnBadArgument; + } - case kIOUCStructIStructO: - err = shim_io_async_method_structureI_structureO( method, object, - args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, - (char *)args->structureInput, args->structureInputSize, - (char *) args->structureOutput, &args->structureOutputSize ); - break; + if (dispatch->function) { + err = (*dispatch->function)(target, reference, args); + } else { + err = kIOReturnNoCompletion; /* implementator can dispatch */ + } + return err; + } - default: - err = kIOReturnBadArgument; - break; + + // pre-Leopard API's don't do ool structs + if (args->structureInputDescriptor || args->structureOutputDescriptor) { + err = kIOReturnIPCError; + return err; } - } - else - { - IOExternalMethod * method; - object = 0; - if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object ) - return (kIOReturnUnsupported); - - if (kIOUCForegroundOnly & method->flags) - { - if (task_is_gpu_denied(current_task())) - return (kIOReturnNotPermitted); - } - - switch (method->flags & kIOUCTypeMask) - { - case kIOUCScalarIStructI: - err = shim_io_connect_method_scalarI_structureI( method, object, - args->scalarInput, args->scalarInputCount, - (char *) args->structureInput, args->structureInputSize ); - break; - case kIOUCScalarIScalarO: - err = shim_io_connect_method_scalarI_scalarO( method, object, - args->scalarInput, args->scalarInputCount, - args->scalarOutput, &args->scalarOutputCount ); - break; + structureOutputSize = args->structureOutputSize; - case kIOUCScalarIStructO: - err = shim_io_connect_method_scalarI_structureO( method, object, - args->scalarInput, args->scalarInputCount, - (char *) args->structureOutput, &structureOutputSize ); - break; + if (args->asyncWakePort) { + IOExternalAsyncMethod * method; + object = 0; + if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) { + return kIOReturnUnsupported; + } + if (kIOUCForegroundOnly & method->flags) { + if (task_is_gpu_denied(current_task())) { + return kIOReturnNotPermitted; + } + } - case kIOUCStructIStructO: - err = shim_io_connect_method_structureI_structureO( method, object, - (char *) args->structureInput, args->structureInputSize, - (char *) args->structureOutput, &structureOutputSize ); - break; + switch (method->flags & kIOUCTypeMask) { + case kIOUCScalarIStructI: + err = shim_io_async_method_scalarI_structureI( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + args->scalarInput, args->scalarInputCount, + (char *)args->structureInput, args->structureInputSize ); + break; + + case kIOUCScalarIScalarO: + err = shim_io_async_method_scalarI_scalarO( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + args->scalarInput, args->scalarInputCount, + args->scalarOutput, &args->scalarOutputCount ); + break; + + case kIOUCScalarIStructO: + err = shim_io_async_method_scalarI_structureO( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + args->scalarInput, args->scalarInputCount, + (char *) args->structureOutput, &args->structureOutputSize ); + break; + + + case kIOUCStructIStructO: + err = shim_io_async_method_structureI_structureO( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + (char *)args->structureInput, args->structureInputSize, + (char *) args->structureOutput, &args->structureOutputSize ); + break; + + default: + err = kIOReturnBadArgument; + break; + } + } else { + IOExternalMethod * method; + object = 0; + if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) { + return kIOReturnUnsupported; + } - default: - err = kIOReturnBadArgument; - break; + if (kIOUCForegroundOnly & method->flags) { + if (task_is_gpu_denied(current_task())) { + return kIOReturnNotPermitted; + } + } + + switch (method->flags & kIOUCTypeMask) { + case kIOUCScalarIStructI: + err = shim_io_connect_method_scalarI_structureI( method, object, + args->scalarInput, args->scalarInputCount, + (char *) args->structureInput, args->structureInputSize ); + break; + + case kIOUCScalarIScalarO: + err = shim_io_connect_method_scalarI_scalarO( method, object, + args->scalarInput, args->scalarInputCount, + args->scalarOutput, &args->scalarOutputCount ); + break; + + case kIOUCScalarIStructO: + err = shim_io_connect_method_scalarI_structureO( method, object, + args->scalarInput, args->scalarInputCount, + (char *) args->structureOutput, &structureOutputSize ); + break; + + + case kIOUCStructIStructO: + err = shim_io_connect_method_structureI_structureO( method, object, + (char *) args->structureInput, args->structureInputSize, + (char *) args->structureOutput, &structureOutputSize ); + break; + + default: + err = kIOReturnBadArgument; + break; + } } - } - args->structureOutputSize = structureOutputSize; + args->structureOutputSize = structureOutputSize; - return (err); + return err; } #if __LP64__ @@ -5493,4 +5820,3 @@ OSMetaClassDefineReservedUnused(IOUserClient, 12); OSMetaClassDefineReservedUnused(IOUserClient, 13); OSMetaClassDefineReservedUnused(IOUserClient, 14); OSMetaClassDefineReservedUnused(IOUserClient, 15); - diff --git a/iokit/Kernel/IOWorkLoop.cpp b/iokit/Kernel/IOWorkLoop.cpp index 74efbd0cd..a9aff9f93 100644 --- a/iokit/Kernel/IOWorkLoop.cpp +++ b/iokit/Kernel/IOWorkLoop.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -58,16 +58,25 @@ OSMetaClassDefineReservedUnused(IOWorkLoop, 6); OSMetaClassDefineReservedUnused(IOWorkLoop, 7); enum IOWorkLoopState { kLoopRestart = 0x1, kLoopTerminate = 0x2 }; -static inline void SETP(void *addr, unsigned int flag) - { unsigned char *num = (unsigned char *) addr; *num |= flag; } -static inline void CLRP(void *addr, unsigned int flag) - { unsigned char *num = (unsigned char *) addr; *num &= ~flag; } -static inline bool ISSETP(void *addr, unsigned int flag) - { unsigned char *num = (unsigned char *) addr; return (*num & flag) != 0; } +static inline void +SETP(void *addr, unsigned int flag) +{ + unsigned char *num = (unsigned char *) addr; *num |= flag; +} +static inline void +CLRP(void *addr, unsigned int flag) +{ + unsigned char *num = (unsigned char *) addr; *num &= ~flag; +} +static inline bool +ISSETP(void *addr, unsigned int flag) +{ + unsigned char *num = (unsigned char *) addr; return (*num & flag) != 0; +} #define fFlags loopRestart -#define passiveEventChain reserved->passiveEventChain +#define passiveEventChain reserved->passiveEventChain #if IOKITSTATS @@ -79,18 +88,18 @@ do { \ #define IOStatisticsUnregisterCounter() \ do { \ if (reserved) \ - IOStatistics::unregisterWorkLoop(reserved->counter); \ + IOStatistics::unregisterWorkLoop(reserved->counter); \ } while(0) #define IOStatisticsOpenGate() \ do { \ IOStatistics::countWorkLoopOpenGate(reserved->counter); \ - if (reserved->lockInterval) lockTime(); \ + if (reserved->lockInterval) lockTime(); \ } while(0) #define IOStatisticsCloseGate() \ do { \ IOStatistics::countWorkLoopCloseGate(reserved->counter); \ - if (reserved->lockInterval) reserved->lockTime = mach_absolute_time(); \ + if (reserved->lockInterval) reserved->lockTime = mach_absolute_time(); \ } while(0) #define IOStatisticsAttachEventSource() \ @@ -114,93 +123,100 @@ do { \ #endif /* IOKITSTATS */ -bool IOWorkLoop::init() +bool +IOWorkLoop::init() { - // The super init and gateLock allocation MUST be done first. - if ( !super::init() ) - return false; - + // The super init and gateLock allocation MUST be done first. + if (!super::init()) { + return false; + } + // Allocate our ExpansionData if it hasn't been allocated already. - if ( !reserved ) - { - reserved = IONew(ExpansionData,1); - if ( !reserved ) + if (!reserved) { + reserved = IONew(ExpansionData, 1); + if (!reserved) { + return false; + } + + bzero(reserved, sizeof(ExpansionData)); + } + + if (gateLock == NULL) { + if (!(gateLock = IORecursiveLockAlloc())) { return false; - - bzero(reserved,sizeof(ExpansionData)); + } + } + + if (workToDoLock == NULL) { + if (!(workToDoLock = IOSimpleLockAlloc())) { + return false; + } + IOSimpleLockInit(workToDoLock); + workToDo = false; + } + + IOStatisticsRegisterCounter(); + + if (controlG == NULL) { + controlG = IOCommandGate::commandGate( + this, + OSMemberFunctionCast( + IOCommandGate::Action, + this, + &IOWorkLoop::_maintRequest)); + + if (!controlG) { + return false; + } + // Point the controlGate at the workLoop. Usually addEventSource + // does this automatically. The problem is in this case addEventSource + // uses the control gate and it has to be bootstrapped. + controlG->setWorkLoop(this); + if (addEventSource(controlG) != kIOReturnSuccess) { + return false; + } + } + + if (workThread == NULL) { + thread_continue_t cptr = OSMemberFunctionCast( + thread_continue_t, + this, + &IOWorkLoop::threadMain); + if (KERN_SUCCESS != kernel_thread_start(cptr, this, &workThread)) { + return false; + } } - if ( gateLock == NULL ) { - if ( !( gateLock = IORecursiveLockAlloc()) ) - return false; - } - - if ( workToDoLock == NULL ) { - if ( !(workToDoLock = IOSimpleLockAlloc()) ) - return false; - IOSimpleLockInit(workToDoLock); - workToDo = false; - } - - IOStatisticsRegisterCounter(); - - if ( controlG == NULL ) { - controlG = IOCommandGate::commandGate( - this, - OSMemberFunctionCast( - IOCommandGate::Action, - this, - &IOWorkLoop::_maintRequest)); - - if ( !controlG ) - return false; - // Point the controlGate at the workLoop. Usually addEventSource - // does this automatically. The problem is in this case addEventSource - // uses the control gate and it has to be bootstrapped. - controlG->setWorkLoop(this); - if (addEventSource(controlG) != kIOReturnSuccess) - return false; - } - - if ( workThread == NULL ) { - thread_continue_t cptr = OSMemberFunctionCast( - thread_continue_t, - this, - &IOWorkLoop::threadMain); - if (KERN_SUCCESS != kernel_thread_start(cptr, this, &workThread)) - return false; - } - - (void) thread_set_tag(workThread, THREAD_TAG_IOWORKLOOP); - return true; + (void) thread_set_tag(workThread, THREAD_TAG_IOWORKLOOP); + return true; } IOWorkLoop * IOWorkLoop::workLoop() { - return IOWorkLoop::workLoopWithOptions(0); + return IOWorkLoop::workLoopWithOptions(0); } IOWorkLoop * IOWorkLoop::workLoopWithOptions(IOOptionBits options) { IOWorkLoop *me = new IOWorkLoop; - + if (me && options) { - me->reserved = IONew(ExpansionData,1); + me->reserved = IONew(ExpansionData, 1); if (!me->reserved) { me->release(); return 0; } - bzero(me->reserved,sizeof(ExpansionData)); + bzero(me->reserved, sizeof(ExpansionData)); me->reserved->options = options; } - + if (me && !me->init()) { me->release(); return 0; } - + return me; } @@ -208,152 +224,170 @@ IOWorkLoop::workLoopWithOptions(IOOptionBits options) // First when the atomic retainCount transitions from 1 -> 0 // Secondly when the work loop itself is commiting hari kari // Hence the each leg of the free must be single threaded. -void IOWorkLoop::free() +void +IOWorkLoop::free() { - if (workThread) { - IOInterruptState is; + if (workThread) { + IOInterruptState is; - // If we are here then we must be trying to shut down this work loop - // in this case disable all of the event source, mark the loop - // as terminating and wakeup the work thread itself and return - // Note: we hold the gate across the entire operation mainly for the - // benefit of our event sources so we can disable them cleanly. - closeGate(); + // If we are here then we must be trying to shut down this work loop + // in this case disable all of the event source, mark the loop + // as terminating and wakeup the work thread itself and return + // Note: we hold the gate across the entire operation mainly for the + // benefit of our event sources so we can disable them cleanly. + closeGate(); - disableAllEventSources(); + disableAllEventSources(); - is = IOSimpleLockLockDisableInterrupt(workToDoLock); - SETP(&fFlags, kLoopTerminate); - thread_wakeup_thread((void *) &workToDo, workThread); - IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + is = IOSimpleLockLockDisableInterrupt(workToDoLock); + SETP(&fFlags, kLoopTerminate); + thread_wakeup_thread((void *) &workToDo, workThread); + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); - openGate(); - } - else /* !workThread */ { - IOEventSource *event, *next; - - for (event = eventChain; event; event = next) { - next = event->getNext(); - event->setWorkLoop(0); - event->setNext(0); - event->release(); - } - eventChain = 0; - - for (event = passiveEventChain; event; event = next) { - next = event->getNext(); - event->setWorkLoop(0); - event->setNext(0); - event->release(); - } - passiveEventChain = 0; - - // Either we have a partial initialization to clean up - // or the workThread itself is performing hari-kari. - // Either way clean up all of our resources and return. - - if (controlG) { - controlG->workLoop = 0; - controlG->release(); - controlG = 0; - } + openGate(); + } else { /* !workThread */ + IOEventSource *event, *next; - if (workToDoLock) { - IOSimpleLockFree(workToDoLock); - workToDoLock = 0; - } + for (event = eventChain; event; event = next) { + next = event->getNext(); + event->setWorkLoop(0); + event->setNext(0); + event->release(); + } + eventChain = 0; - if (gateLock) { - IORecursiveLockFree(gateLock); - gateLock = 0; - } - - IOStatisticsUnregisterCounter(); - - if (reserved) { - IODelete(reserved, ExpansionData, 1); - reserved = 0; - } + for (event = passiveEventChain; event; event = next) { + next = event->getNext(); + event->setWorkLoop(0); + event->setNext(0); + event->release(); + } + passiveEventChain = 0; + + // Either we have a partial initialization to clean up + // or the workThread itself is performing hari-kari. + // Either way clean up all of our resources and return. + + if (controlG) { + controlG->workLoop = 0; + controlG->release(); + controlG = 0; + } + + if (workToDoLock) { + IOSimpleLockFree(workToDoLock); + workToDoLock = 0; + } + + if (gateLock) { + IORecursiveLockFree(gateLock); + gateLock = 0; + } - super::free(); - } + IOStatisticsUnregisterCounter(); + + if (reserved) { + IODelete(reserved, ExpansionData, 1); + reserved = 0; + } + + super::free(); + } } -IOReturn IOWorkLoop::addEventSource(IOEventSource *newEvent) +IOReturn +IOWorkLoop::addEventSource(IOEventSource *newEvent) { - if ((workThread) - && !thread_has_thread_name(workThread) - && (newEvent->owner) - && !OSDynamicCast(IOCommandPool, newEvent->owner)) { - thread_set_thread_name(workThread, newEvent->owner->getMetaClass()->getClassName()); - } + if ((workThread) + && !thread_has_thread_name(workThread) + && (newEvent->owner) + && !OSDynamicCast(IOCommandPool, newEvent->owner)) { + thread_set_thread_name(workThread, newEvent->owner->getMetaClass()->getClassName()); + } - return controlG->runCommand((void *) mAddEvent, (void *) newEvent); + return controlG->runCommand((void *) mAddEvent, (void *) newEvent); } - -IOReturn IOWorkLoop::removeEventSource(IOEventSource *toRemove) + +IOReturn +IOWorkLoop::removeEventSource(IOEventSource *toRemove) { - return controlG->runCommand((void *) mRemoveEvent, (void *) toRemove); + return controlG->runCommand((void *) mRemoveEvent, (void *) toRemove); } -void IOWorkLoop::enableAllEventSources() const +void +IOWorkLoop::enableAllEventSources() const { - IOEventSource *event; + IOEventSource *event; - for (event = eventChain; event; event = event->getNext()) - event->enable(); + for (event = eventChain; event; event = event->getNext()) { + event->enable(); + } - for (event = passiveEventChain; event; event = event->getNext()) - event->enable(); + for (event = passiveEventChain; event; event = event->getNext()) { + event->enable(); + } } -void IOWorkLoop::disableAllEventSources() const +void +IOWorkLoop::disableAllEventSources() const { - IOEventSource *event; + IOEventSource *event; - for (event = eventChain; event; event = event->getNext()) + for (event = eventChain; event; event = event->getNext()) { event->disable(); - + } + /* NOTE: controlG is in passiveEventChain since it's an IOCommandGate */ - for (event = passiveEventChain; event; event = event->getNext()) - if (event != controlG) // Don't disable the control gate - event->disable(); + for (event = passiveEventChain; event; event = event->getNext()) { + if (event != controlG) { // Don't disable the control gate + event->disable(); + } + } } -void IOWorkLoop::enableAllInterrupts() const +void +IOWorkLoop::enableAllInterrupts() const { - IOEventSource *event; - - for (event = eventChain; event; event = event->getNext()) - if (OSDynamicCast(IOInterruptEventSource, event)) - event->enable(); + IOEventSource *event; + + for (event = eventChain; event; event = event->getNext()) { + if (OSDynamicCast(IOInterruptEventSource, event)) { + event->enable(); + } + } } -void IOWorkLoop::disableAllInterrupts() const +void +IOWorkLoop::disableAllInterrupts() const { - IOEventSource *event; - - for (event = eventChain; event; event = event->getNext()) - if (OSDynamicCast(IOInterruptEventSource, event)) - event->disable(); + IOEventSource *event; + + for (event = eventChain; event; event = event->getNext()) { + if (OSDynamicCast(IOInterruptEventSource, event)) { + event->disable(); + } + } } -/* virtual */ bool IOWorkLoop::runEventSources() +/* virtual */ bool +IOWorkLoop::runEventSources() { - bool res = false; - bool traceWL = (gIOKitTrace & kIOTraceWorkLoops) ? true : false; - bool traceES = (gIOKitTrace & kIOTraceEventSources) ? true : false; - - closeGate(); - if (ISSETP(&fFlags, kLoopTerminate)) + bool res = false; + bool traceWL = (gIOKitTrace & kIOTraceWorkLoops) ? true : false; + bool traceES = (gIOKitTrace & kIOTraceEventSources) ? true : false; + + closeGate(); + if (ISSETP(&fFlags, kLoopTerminate)) { goto abort; - - if (traceWL) + } + + if (traceWL) { IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_WORK), VM_KERNEL_ADDRHIDE(this)); - - bool more; - do { + } + + bool more; + do { CLRP(&fFlags, kLoopRestart); more = false; IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); @@ -361,274 +395,292 @@ void IOWorkLoop::disableAllInterrupts() const IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); /* NOTE: only loop over event sources in eventChain. Bypass "passive" event sources for performance */ for (IOEventSource *evnt = eventChain; evnt; evnt = evnt->getNext()) { - - if (traceES) + if (traceES) { IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_CLIENT), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt)); - + } + more |= evnt->checkForWork(); - - if (traceES) + + if (traceES) { IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_CLIENT), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt)); - - if (ISSETP(&fFlags, kLoopTerminate)) + } + + if (ISSETP(&fFlags, kLoopTerminate)) { goto abort; - else if (fFlags & kLoopRestart) { + } else if (fFlags & kLoopRestart) { more = true; break; } } - } while (more); - - res = true; - - if (traceWL) + } while (more); + + res = true; + + if (traceWL) { IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_WORK), VM_KERNEL_ADDRHIDE(this)); - + } + abort: - openGate(); - return res; + openGate(); + return res; } -/* virtual */ void IOWorkLoop::threadMain() +/* virtual */ void +IOWorkLoop::threadMain() { restartThread: - do { - if ( !runEventSources() ) - goto exitThread; - - IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); - if ( !ISSETP(&fFlags, kLoopTerminate) && !workToDo) { - assert_wait((void *) &workToDo, false); - IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); - thread_continue_t cptr = NULL; - if (!reserved || !(kPreciousStack & reserved->options)) - cptr = OSMemberFunctionCast( - thread_continue_t, this, &IOWorkLoop::threadMain); - thread_block_parameter(cptr, this); - goto restartThread; - /* NOTREACHED */ - } + do { + if (!runEventSources()) { + goto exitThread; + } + + IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); + if (!ISSETP(&fFlags, kLoopTerminate) && !workToDo) { + assert_wait((void *) &workToDo, false); + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + thread_continue_t cptr = NULL; + if (!reserved || !(kPreciousStack & reserved->options)) { + cptr = OSMemberFunctionCast( + thread_continue_t, this, &IOWorkLoop::threadMain); + } + thread_block_parameter(cptr, this); + goto restartThread; + /* NOTREACHED */ + } - // At this point we either have work to do or we need - // to commit suicide. But no matter - // Clear the simple lock and retore the interrupt state - IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); - } while(workToDo); + // At this point we either have work to do or we need + // to commit suicide. But no matter + // Clear the simple lock and retore the interrupt state + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + } while (workToDo); exitThread: - closeGate(); - thread_t thread = workThread; - workThread = 0; // Say we don't have a loop and free ourselves - openGate(); + closeGate(); + thread_t thread = workThread; + workThread = 0; // Say we don't have a loop and free ourselves + openGate(); - free(); + free(); - thread_deallocate(thread); - (void) thread_terminate(thread); + thread_deallocate(thread); + (void) thread_terminate(thread); } -IOThread IOWorkLoop::getThread() const +IOThread +IOWorkLoop::getThread() const { - return workThread; + return workThread; } -bool IOWorkLoop::onThread() const +bool +IOWorkLoop::onThread() const { - return (IOThreadSelf() == workThread); + return IOThreadSelf() == workThread; } -bool IOWorkLoop::inGate() const +bool +IOWorkLoop::inGate() const { - return IORecursiveLockHaveLock(gateLock); + return IORecursiveLockHaveLock(gateLock); } // Internal APIs used by event sources to control the thread -void IOWorkLoop::signalWorkAvailable() +void +IOWorkLoop::signalWorkAvailable() { - if (workToDoLock) { - IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); - workToDo = true; - thread_wakeup_thread((void *) &workToDo, workThread); - IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); - } + if (workToDoLock) { + IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock); + workToDo = true; + thread_wakeup_thread((void *) &workToDo, workThread); + IOSimpleLockUnlockEnableInterrupt(workToDoLock, is); + } } -void IOWorkLoop::openGate() +void +IOWorkLoop::openGate() { - IOStatisticsOpenGate(); - IORecursiveLockUnlock(gateLock); + IOStatisticsOpenGate(); + IORecursiveLockUnlock(gateLock); } -void IOWorkLoop::closeGate() +void +IOWorkLoop::closeGate() { - IORecursiveLockLock(gateLock); - IOStatisticsCloseGate(); + IORecursiveLockLock(gateLock); + IOStatisticsCloseGate(); } -bool IOWorkLoop::tryCloseGate() +bool +IOWorkLoop::tryCloseGate() { - bool res = (IORecursiveLockTryLock(gateLock) != 0); - if (res) { - IOStatisticsCloseGate(); - } - return res; + bool res = (IORecursiveLockTryLock(gateLock) != 0); + if (res) { + IOStatisticsCloseGate(); + } + return res; } -int IOWorkLoop::sleepGate(void *event, UInt32 interuptibleType) +int +IOWorkLoop::sleepGate(void *event, UInt32 interuptibleType) { - int res; - IOStatisticsOpenGate(); - res = IORecursiveLockSleep(gateLock, event, interuptibleType); - IOStatisticsCloseGate(); - return res; + int res; + IOStatisticsOpenGate(); + res = IORecursiveLockSleep(gateLock, event, interuptibleType); + IOStatisticsCloseGate(); + return res; } -int IOWorkLoop::sleepGate(void *event, AbsoluteTime deadline, UInt32 interuptibleType) +int +IOWorkLoop::sleepGate(void *event, AbsoluteTime deadline, UInt32 interuptibleType) { - int res; - IOStatisticsOpenGate(); - res = IORecursiveLockSleepDeadline(gateLock, event, deadline, interuptibleType); - IOStatisticsCloseGate(); - return res; + int res; + IOStatisticsOpenGate(); + res = IORecursiveLockSleepDeadline(gateLock, event, deadline, interuptibleType); + IOStatisticsCloseGate(); + return res; } -void IOWorkLoop::wakeupGate(void *event, bool oneThread) +void +IOWorkLoop::wakeupGate(void *event, bool oneThread) { - IORecursiveLockWakeup(gateLock, event, oneThread); + IORecursiveLockWakeup(gateLock, event, oneThread); } -static IOReturn IOWorkLoopActionToBlock(OSObject *owner, - void *arg0, void *arg1, - void *arg2, void *arg3) +static IOReturn +IOWorkLoopActionToBlock(OSObject *owner, + void *arg0, void *arg1, + void *arg2, void *arg3) { - return ((IOWorkLoop::ActionBlock) arg0)(); + return ((IOWorkLoop::ActionBlock) arg0)(); } -IOReturn IOWorkLoop::runActionBlock(ActionBlock action) +IOReturn +IOWorkLoop::runActionBlock(ActionBlock action) { - return (runAction(&IOWorkLoopActionToBlock, this, action)); + return runAction(&IOWorkLoopActionToBlock, this, action); } -IOReturn IOWorkLoop::runAction(Action inAction, OSObject *target, - void *arg0, void *arg1, - void *arg2, void *arg3) +IOReturn +IOWorkLoop::runAction(Action inAction, OSObject *target, + void *arg0, void *arg1, + void *arg2, void *arg3) { - IOReturn res; + IOReturn res; - // closeGate is recursive so don't worry if we already hold the lock. - closeGate(); - res = (*inAction)(target, arg0, arg1, arg2, arg3); - openGate(); + // closeGate is recursive so don't worry if we already hold the lock. + closeGate(); + res = (*inAction)(target, arg0, arg1, arg2, arg3); + openGate(); - return res; + return res; } -IOReturn IOWorkLoop::_maintRequest(void *inC, void *inD, void *, void *) +IOReturn +IOWorkLoop::_maintRequest(void *inC, void *inD, void *, void *) { - maintCommandEnum command = (maintCommandEnum) (uintptr_t) inC; - IOEventSource *inEvent = (IOEventSource *) inD; - IOReturn res = kIOReturnSuccess; - - switch (command) - { - case mAddEvent: - if (!inEvent->getWorkLoop()) { - SETP(&fFlags, kLoopRestart); - - inEvent->retain(); - inEvent->setWorkLoop(this); - inEvent->setNext(0); + maintCommandEnum command = (maintCommandEnum) (uintptr_t) inC; + IOEventSource *inEvent = (IOEventSource *) inD; + IOReturn res = kIOReturnSuccess; + + switch (command) { + case mAddEvent: + if (!inEvent->getWorkLoop()) { + SETP(&fFlags, kLoopRestart); + + inEvent->retain(); + inEvent->setWorkLoop(this); + inEvent->setNext(0); + + /* Check if this is a passive or active event source being added */ + if (eventSourcePerformsWork(inEvent)) { + if (!eventChain) { + eventChain = inEvent; + } else { + IOEventSource *event, *next; + + for (event = eventChain; (next = event->getNext()); event = next) { + ; + } + event->setNext(inEvent); + } + } else { + if (!passiveEventChain) { + passiveEventChain = inEvent; + } else { + IOEventSource *event, *next; + + for (event = passiveEventChain; (next = event->getNext()); event = next) { + ; + } + event->setNext(inEvent); + } + } + IOStatisticsAttachEventSource(); + } + break; - /* Check if this is a passive or active event source being added */ - if (eventSourcePerformsWork(inEvent)) { - - if (!eventChain) - eventChain = inEvent; - else { - IOEventSource *event, *next; - - for (event = eventChain; (next = event->getNext()); event = next) - ; - event->setNext(inEvent); - - } - - } - else { - - if (!passiveEventChain) - passiveEventChain = inEvent; - else { - IOEventSource *event, *next; - - for (event = passiveEventChain; (next = event->getNext()); event = next) - ; - event->setNext(inEvent); - - } - - } - IOStatisticsAttachEventSource(); - } - break; + case mRemoveEvent: + if (inEvent->getWorkLoop()) { + IOStatisticsDetachEventSource(); - case mRemoveEvent: - if (inEvent->getWorkLoop()) { - IOStatisticsDetachEventSource(); - - if (eventSourcePerformsWork(inEvent)) { - if (eventChain == inEvent) + if (eventSourcePerformsWork(inEvent)) { + if (eventChain == inEvent) { eventChain = inEvent->getNext(); - else { + } else { IOEventSource *event, *next = 0; - + event = eventChain; - if (event) while ((next = event->getNext()) && (next != inEvent)) - event = next; - + if (event) { + while ((next = event->getNext()) && (next != inEvent)) { + event = next; + } + } + if (!next) { res = kIOReturnBadArgument; break; } event->setNext(inEvent->getNext()); } - } - else { - if (passiveEventChain == inEvent) + } else { + if (passiveEventChain == inEvent) { passiveEventChain = inEvent->getNext(); - else { + } else { IOEventSource *event, *next = 0; - + event = passiveEventChain; - if (event) while ((next = event->getNext()) && (next != inEvent)) - event = next; - + if (event) { + while ((next = event->getNext()) && (next != inEvent)) { + event = next; + } + } + if (!next) { res = kIOReturnBadArgument; break; } event->setNext(inEvent->getNext()); } - } - - inEvent->setWorkLoop(0); - inEvent->setNext(0); - inEvent->release(); - SETP(&fFlags, kLoopRestart); - } - break; + } + + inEvent->setWorkLoop(0); + inEvent->setNext(0); + inEvent->release(); + SETP(&fFlags, kLoopRestart); + } + break; - default: - return kIOReturnUnsupported; - } + default: + return kIOReturnUnsupported; + } - return res; + return res; } bool IOWorkLoop::eventSourcePerformsWork(IOEventSource *inEventSource) { - bool result = true; + bool result = true; /* * The idea here is to see if the subclass of IOEventSource has overridden checkForWork(). @@ -638,51 +690,56 @@ IOWorkLoop::eventSourcePerformsWork(IOEventSource *inEventSource) * We picked a known quantity controlG that does not override * IOEventSource::checkForWork(), namely the IOCommandGate associated with * the workloop to which this event source is getting attached. - * + * * We do a pointer comparison on the offset in the vtable for inNewEvent against * the offset in the vtable for inReferenceEvent. This works because * IOCommandGate's slot for checkForWork() has the address of * IOEventSource::checkForWork() in it. - * + * * Think of OSMemberFunctionCast yielding the value at the vtable offset for * checkForWork() here. We're just testing to see if it's the same or not. * */ - if (IOEventSource::kPassive & inEventSource->flags) result = false; - else if (IOEventSource::kActive & inEventSource->flags) result = true; - else if (controlG) { - void * ptr1; - void * ptr2; - + if (IOEventSource::kPassive & inEventSource->flags) { + result = false; + } else if (IOEventSource::kActive & inEventSource->flags) { + result = true; + } else if (controlG) { + void * ptr1; + void * ptr2; + ptr1 = OSMemberFunctionCast(void*, inEventSource, &IOEventSource::checkForWork); ptr2 = OSMemberFunctionCast(void*, controlG, &IOEventSource::checkForWork); - - if (ptr1 == ptr2) + + if (ptr1 == ptr2) { result = false; + } } - - return result; + + return result; } void IOWorkLoop::lockTime(void) { - uint64_t time; - time = mach_absolute_time() - reserved->lockTime; - if (time > reserved->lockInterval) - { - absolutetime_to_nanoseconds(time, &time); - if (kTimeLockPanics & reserved->options) panic("IOWorkLoop %p lock time %qd us", this, time / 1000ULL); - else OSReportWithBacktrace("IOWorkLoop %p lock time %qd us", this, time / 1000ULL); - } + uint64_t time; + time = mach_absolute_time() - reserved->lockTime; + if (time > reserved->lockInterval) { + absolutetime_to_nanoseconds(time, &time); + if (kTimeLockPanics & reserved->options) { + panic("IOWorkLoop %p lock time %qd us", this, time / 1000ULL); + } else { + OSReportWithBacktrace("IOWorkLoop %p lock time %qd us", this, time / 1000ULL); + } + } } void IOWorkLoop::setMaximumLockTime(uint64_t interval, uint32_t options) { - IORecursiveLockLock(gateLock); - reserved->lockInterval = interval; - reserved->options = (reserved->options & ~kTimeLockPanics) | (options & kTimeLockPanics); - IORecursiveLockUnlock(gateLock); + IORecursiveLockLock(gateLock); + reserved->lockInterval = interval; + reserved->options = (reserved->options & ~kTimeLockPanics) | (options & kTimeLockPanics); + IORecursiveLockUnlock(gateLock); } diff --git a/iokit/Kernel/RootDomainUserClient.cpp b/iokit/Kernel/RootDomainUserClient.cpp index 7a909d998..646ccec4e 100644 --- a/iokit/Kernel/RootDomainUserClient.cpp +++ b/iokit/Kernel/RootDomainUserClient.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -47,330 +47,333 @@ OSDefineMetaClassAndStructors(RootDomainUserClient, IOUserClient) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -bool RootDomainUserClient::initWithTask(task_t owningTask, void *security_id, - UInt32 type, OSDictionary * properties) +bool +RootDomainUserClient::initWithTask(task_t owningTask, void *security_id, + UInt32 type, OSDictionary * properties) { - if (properties) - properties->setObject(kIOUserClientCrossEndianCompatibleKey, kOSBooleanTrue); + if (properties) { + properties->setObject(kIOUserClientCrossEndianCompatibleKey, kOSBooleanTrue); + } - if (!super::initWithTask(owningTask, security_id, type, properties)) - return false; + if (!super::initWithTask(owningTask, security_id, type, properties)) { + return false; + } - fOwningTask = owningTask; - task_reference (fOwningTask); - return true; + fOwningTask = owningTask; + task_reference(fOwningTask); + return true; } -bool RootDomainUserClient::start( IOService * provider ) +bool +RootDomainUserClient::start( IOService * provider ) { - assert(OSDynamicCast(IOPMrootDomain, provider)); - if(!super::start(provider)) - return false; - fOwner = (IOPMrootDomain *)provider; + assert(OSDynamicCast(IOPMrootDomain, provider)); + if (!super::start(provider)) { + return false; + } + fOwner = (IOPMrootDomain *)provider; - return true; + return true; } -IOReturn RootDomainUserClient::secureSleepSystem( uint32_t *return_code ) +IOReturn +RootDomainUserClient::secureSleepSystem( uint32_t *return_code ) { - return secureSleepSystemOptions(NULL, 0, return_code); + return secureSleepSystemOptions(NULL, 0, return_code); } -IOReturn RootDomainUserClient::secureSleepSystemOptions( - const void *inOptions, - IOByteCount inOptionsSize, - uint32_t *returnCode) +IOReturn +RootDomainUserClient::secureSleepSystemOptions( + const void *inOptions, + IOByteCount inOptionsSize, + uint32_t *returnCode) { - - int local_priv = 0; - int admin_priv = 0; - IOReturn ret = kIOReturnNotPrivileged; - OSDictionary *unserializedOptions = NULL; - OSString *unserializeErrorString = NULL; - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeLocalUser); - local_priv = (kIOReturnSuccess == ret); - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - admin_priv = (kIOReturnSuccess == ret); - - - if (inOptions) - { - unserializedOptions = OSDynamicCast( OSDictionary, - OSUnserializeXML((const char *)inOptions, inOptionsSize, &unserializeErrorString)); - - if (!unserializedOptions) { - IOLog("IOPMRootDomain SleepSystem unserialization failure: %s\n", - unserializeErrorString ? unserializeErrorString->getCStringNoCopy() : "Unknown"); - } - } - - if ( (local_priv || admin_priv) && fOwner ) - { - proc_t p; - p = (proc_t)get_bsdtask_info(fOwningTask); - if (p) { - fOwner->setProperty("SleepRequestedByPID", proc_pid(p), 32); - } - - if (unserializedOptions) - { - // Publish Sleep Options in registry under root_domain - fOwner->setProperty( kRootDomainSleepOptionsKey, unserializedOptions); - - *returnCode = fOwner->sleepSystemOptions( unserializedOptions ); - - unserializedOptions->release(); - } else { - // No options - // Clear any pre-existing options - fOwner->removeProperty( kRootDomainSleepOptionsKey ); - - *returnCode = fOwner->sleepSystemOptions( NULL ); - } - - } else { - *returnCode = kIOReturnNotPrivileged; - } - - return kIOReturnSuccess; + int local_priv = 0; + int admin_priv = 0; + IOReturn ret = kIOReturnNotPrivileged; + OSDictionary *unserializedOptions = NULL; + OSString *unserializeErrorString = NULL; + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeLocalUser); + local_priv = (kIOReturnSuccess == ret); + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + admin_priv = (kIOReturnSuccess == ret); + + + if (inOptions) { + unserializedOptions = OSDynamicCast( OSDictionary, + OSUnserializeXML((const char *)inOptions, inOptionsSize, &unserializeErrorString)); + + if (!unserializedOptions) { + IOLog("IOPMRootDomain SleepSystem unserialization failure: %s\n", + unserializeErrorString ? unserializeErrorString->getCStringNoCopy() : "Unknown"); + } + } + + if ((local_priv || admin_priv) && fOwner) { + proc_t p; + p = (proc_t)get_bsdtask_info(fOwningTask); + if (p) { + fOwner->setProperty("SleepRequestedByPID", proc_pid(p), 32); + } + + if (unserializedOptions) { + // Publish Sleep Options in registry under root_domain + fOwner->setProperty( kRootDomainSleepOptionsKey, unserializedOptions); + + *returnCode = fOwner->sleepSystemOptions( unserializedOptions ); + + unserializedOptions->release(); + } else { + // No options + // Clear any pre-existing options + fOwner->removeProperty( kRootDomainSleepOptionsKey ); + + *returnCode = fOwner->sleepSystemOptions( NULL ); + } + } else { + *returnCode = kIOReturnNotPrivileged; + } + + return kIOReturnSuccess; } -IOReturn RootDomainUserClient::secureSetAggressiveness( - unsigned long type, - unsigned long newLevel, - int *return_code ) +IOReturn +RootDomainUserClient::secureSetAggressiveness( + unsigned long type, + unsigned long newLevel, + int *return_code ) { - int local_priv = 0; - int admin_priv = 0; - IOReturn ret = kIOReturnNotPrivileged; - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeLocalUser); - local_priv = (kIOReturnSuccess == ret); - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - admin_priv = (kIOReturnSuccess == ret); - - if((local_priv || admin_priv) && fOwner) { - *return_code = fOwner->setAggressiveness(type, newLevel); - } else { - *return_code = kIOReturnNotPrivileged; - } - return kIOReturnSuccess; + int local_priv = 0; + int admin_priv = 0; + IOReturn ret = kIOReturnNotPrivileged; + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeLocalUser); + local_priv = (kIOReturnSuccess == ret); + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + admin_priv = (kIOReturnSuccess == ret); + + if ((local_priv || admin_priv) && fOwner) { + *return_code = fOwner->setAggressiveness(type, newLevel); + } else { + *return_code = kIOReturnNotPrivileged; + } + return kIOReturnSuccess; } -IOReturn RootDomainUserClient::secureSetMaintenanceWakeCalendar( - IOPMCalendarStruct *inCalendar, - uint32_t *returnCode) +IOReturn +RootDomainUserClient::secureSetMaintenanceWakeCalendar( + IOPMCalendarStruct *inCalendar, + uint32_t *returnCode) { - int admin_priv = 0; - IOReturn ret = kIOReturnNotPrivileged; - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - admin_priv = (kIOReturnSuccess == ret); - - if (admin_priv && fOwner) { - *returnCode = fOwner->setMaintenanceWakeCalendar(inCalendar); - } else { - *returnCode = kIOReturnNotPrivileged; - } - return kIOReturnSuccess; + int admin_priv = 0; + IOReturn ret = kIOReturnNotPrivileged; + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + admin_priv = (kIOReturnSuccess == ret); + + if (admin_priv && fOwner) { + *returnCode = fOwner->setMaintenanceWakeCalendar(inCalendar); + } else { + *returnCode = kIOReturnNotPrivileged; + } + return kIOReturnSuccess; } -IOReturn RootDomainUserClient::secureSetUserAssertionLevels( - uint32_t assertionBitfield) +IOReturn +RootDomainUserClient::secureSetUserAssertionLevels( + uint32_t assertionBitfield) { - int admin_priv = 0; - IOReturn ret = kIOReturnNotPrivileged; - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - admin_priv = (kIOReturnSuccess == ret); - - if (admin_priv && fOwner) { - ret = fOwner->setPMAssertionUserLevels(assertionBitfield); - } else { - ret = kIOReturnNotPrivileged; - } - return kIOReturnSuccess; + int admin_priv = 0; + IOReturn ret = kIOReturnNotPrivileged; + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + admin_priv = (kIOReturnSuccess == ret); + + if (admin_priv && fOwner) { + ret = fOwner->setPMAssertionUserLevels(assertionBitfield); + } else { + ret = kIOReturnNotPrivileged; + } + return kIOReturnSuccess; } -IOReturn RootDomainUserClient::secureGetSystemSleepType( - uint32_t *outSleepType, uint32_t *sleepTimer) +IOReturn +RootDomainUserClient::secureGetSystemSleepType( + uint32_t *outSleepType, uint32_t *sleepTimer) { - int admin_priv = 0; - IOReturn ret; - - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - admin_priv = (kIOReturnSuccess == ret); - - if (admin_priv && fOwner) { - ret = fOwner->getSystemSleepType(outSleepType, sleepTimer); - } else { - ret = kIOReturnNotPrivileged; - } - return ret; + int admin_priv = 0; + IOReturn ret; + + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + admin_priv = (kIOReturnSuccess == ret); + + if (admin_priv && fOwner) { + ret = fOwner->getSystemSleepType(outSleepType, sleepTimer); + } else { + ret = kIOReturnNotPrivileged; + } + return ret; } -IOReturn RootDomainUserClient::clientClose( void ) +IOReturn +RootDomainUserClient::clientClose( void ) { - terminate(); + terminate(); - return kIOReturnSuccess; + return kIOReturnSuccess; } -void RootDomainUserClient::stop( IOService *provider) +void +RootDomainUserClient::stop( IOService *provider) { - if(fOwningTask) { - task_deallocate(fOwningTask); - fOwningTask = 0; - } + if (fOwningTask) { + task_deallocate(fOwningTask); + fOwningTask = 0; + } - super::stop(provider); + super::stop(provider); } -IOReturn RootDomainUserClient::externalMethod( - uint32_t selector, - IOExternalMethodArguments * arguments, - IOExternalMethodDispatch * dispatch __unused, - OSObject * target __unused, - void * reference __unused ) +IOReturn +RootDomainUserClient::externalMethod( + uint32_t selector, + IOExternalMethodArguments * arguments, + IOExternalMethodDispatch * dispatch __unused, + OSObject * target __unused, + void * reference __unused ) { - IOReturn ret = kIOReturnBadArgument; - - switch (selector) - { - case kPMSetAggressiveness: - if ((2 == arguments->scalarInputCount) - && (1 == arguments->scalarOutputCount)) - { - ret = this->secureSetAggressiveness( - (unsigned long)arguments->scalarInput[0], - (unsigned long)arguments->scalarInput[1], - (int *)&arguments->scalarOutput[0]); - } - break; - - case kPMGetAggressiveness: - if ((1 == arguments->scalarInputCount) - && (1 == arguments->scalarOutputCount)) - { - ret = fOwner->getAggressiveness( - (unsigned long)arguments->scalarInput[0], - (unsigned long *)&arguments->scalarOutput[0]); - } - break; - - case kPMSleepSystem: - if (1 == arguments->scalarOutputCount) - { - ret = this->secureSleepSystem( - (uint32_t *)&arguments->scalarOutput[0]); - } - break; - - case kPMAllowPowerChange: - if (1 == arguments->scalarInputCount) - { - ret = fOwner->allowPowerChange( - arguments->scalarInput[0]); - } - break; - - case kPMCancelPowerChange: - if (1 == arguments->scalarInputCount) - { - ret = fOwner->cancelPowerChange( - arguments->scalarInput[0]); - } - break; - - case kPMShutdownSystem: - // deperecated interface - ret = kIOReturnUnsupported; - break; - - case kPMRestartSystem: - // deperecated interface - ret = kIOReturnUnsupported; - break; - - case kPMSleepSystemOptions: - ret = this->secureSleepSystemOptions( - arguments->structureInput, - arguments->structureInputSize, - (uint32_t *)&arguments->scalarOutput[0]); - break; - case kPMSetMaintenanceWakeCalendar: - if ((arguments->structureInputSize >= sizeof(IOPMCalendarStruct)) && - (arguments->structureOutputSize >= sizeof(uint32_t) )) { - ret = this->secureSetMaintenanceWakeCalendar( - (IOPMCalendarStruct *)arguments->structureInput, - (uint32_t *)&arguments->structureOutput); - arguments->structureOutputSize = sizeof(uint32_t); - } - break; - - case kPMSetUserAssertionLevels: - ret = this->secureSetUserAssertionLevels( - (uint32_t)arguments->scalarInput[0]); - break; - - case kPMActivityTickle: - if ( fOwner->checkSystemCanSustainFullWake() ) - { - fOwner->reportUserInput( ); - fOwner->setProperty(kIOPMRootDomainWakeTypeKey, "UserActivity Assertion"); - } - ret = kIOReturnSuccess; - break; - - case kPMSetClamshellSleepState: - fOwner->setDisableClamShellSleep(arguments->scalarInput[0] ? true : false); - ret = kIOReturnSuccess; - break; - - case kPMGetSystemSleepType: - if (2 == arguments->scalarOutputCount) - { - ret = this->secureGetSystemSleepType( - (uint32_t *) &arguments->scalarOutput[0], - (uint32_t *) &arguments->scalarOutput[1]); - } - break; + IOReturn ret = kIOReturnBadArgument; + + switch (selector) { + case kPMSetAggressiveness: + if ((2 == arguments->scalarInputCount) + && (1 == arguments->scalarOutputCount)) { + ret = this->secureSetAggressiveness( + (unsigned long)arguments->scalarInput[0], + (unsigned long)arguments->scalarInput[1], + (int *)&arguments->scalarOutput[0]); + } + break; + + case kPMGetAggressiveness: + if ((1 == arguments->scalarInputCount) + && (1 == arguments->scalarOutputCount)) { + ret = fOwner->getAggressiveness( + (unsigned long)arguments->scalarInput[0], + (unsigned long *)&arguments->scalarOutput[0]); + } + break; + + case kPMSleepSystem: + if (1 == arguments->scalarOutputCount) { + ret = this->secureSleepSystem( + (uint32_t *)&arguments->scalarOutput[0]); + } + break; + + case kPMAllowPowerChange: + if (1 == arguments->scalarInputCount) { + ret = fOwner->allowPowerChange( + arguments->scalarInput[0]); + } + break; + + case kPMCancelPowerChange: + if (1 == arguments->scalarInputCount) { + ret = fOwner->cancelPowerChange( + arguments->scalarInput[0]); + } + break; + + case kPMShutdownSystem: + // deperecated interface + ret = kIOReturnUnsupported; + break; + + case kPMRestartSystem: + // deperecated interface + ret = kIOReturnUnsupported; + break; + + case kPMSleepSystemOptions: + ret = this->secureSleepSystemOptions( + arguments->structureInput, + arguments->structureInputSize, + (uint32_t *)&arguments->scalarOutput[0]); + break; + case kPMSetMaintenanceWakeCalendar: + if ((arguments->structureInputSize >= sizeof(IOPMCalendarStruct)) && + (arguments->structureOutputSize >= sizeof(uint32_t))) { + ret = this->secureSetMaintenanceWakeCalendar( + (IOPMCalendarStruct *)arguments->structureInput, + (uint32_t *)&arguments->structureOutput); + arguments->structureOutputSize = sizeof(uint32_t); + } + break; + + case kPMSetUserAssertionLevels: + ret = this->secureSetUserAssertionLevels( + (uint32_t)arguments->scalarInput[0]); + break; + + case kPMActivityTickle: + if (fOwner->checkSystemCanSustainFullWake()) { + fOwner->reportUserInput(); + fOwner->setProperty(kIOPMRootDomainWakeTypeKey, "UserActivity Assertion"); + } + ret = kIOReturnSuccess; + break; + + case kPMSetClamshellSleepState: + fOwner->setDisableClamShellSleep(arguments->scalarInput[0] ? true : false); + ret = kIOReturnSuccess; + break; + + case kPMGetSystemSleepType: + if (2 == arguments->scalarOutputCount) { + ret = this->secureGetSystemSleepType( + (uint32_t *) &arguments->scalarOutput[0], + (uint32_t *) &arguments->scalarOutput[1]); + } + break; #if defined(__i386__) || defined(__x86_64__) - case kPMSleepWakeWatchdogEnable: - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - if (ret == kIOReturnSuccess) - fOwner->sleepWakeDebugEnableWdog(); - break; - - - case kPMSleepWakeDebugTrig: - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - if (ret == kIOReturnSuccess) - fOwner->sleepWakeDebugTrig(false); - break; + case kPMSleepWakeWatchdogEnable: + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + if (ret == kIOReturnSuccess) { + fOwner->sleepWakeDebugEnableWdog(); + } + break; + + + case kPMSleepWakeDebugTrig: + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + if (ret == kIOReturnSuccess) { + fOwner->sleepWakeDebugTrig(false); + } + break; #endif - case kPMSetDisplayPowerOn: - if (1 == arguments->scalarInputCount) - { - ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); - if (ret == kIOReturnSuccess) - fOwner->setDisplayPowerOn((uint32_t)arguments->scalarInput[0]); - } - break; - - default: - // bad selector - return kIOReturnBadArgument; - } - - return ret; + case kPMSetDisplayPowerOn: + if (1 == arguments->scalarInputCount) { + ret = clientHasPrivilege(fOwningTask, kIOClientPrivilegeAdministrator); + if (ret == kIOReturnSuccess) { + fOwner->setDisplayPowerOn((uint32_t)arguments->scalarInput[0]); + } + } + break; + + default: + // bad selector + return kIOReturnBadArgument; + } + + return ret; } /* getTargetAndMethodForIndex @@ -378,11 +381,12 @@ IOReturn RootDomainUserClient::externalMethod( * We maintain getTargetAndExternalMethod since it's an exported symbol, * and only for that reason. */ -IOExternalMethod * RootDomainUserClient::getTargetAndMethodForIndex( - IOService ** targetP, UInt32 index ) +IOExternalMethod * +RootDomainUserClient::getTargetAndMethodForIndex( + IOService ** targetP, UInt32 index ) { - // DO NOT EDIT - return super::getTargetAndMethodForIndex(targetP, index); + // DO NOT EDIT + return super::getTargetAndMethodForIndex(targetP, index); } /* setPreventative @@ -390,4 +394,6 @@ IOExternalMethod * RootDomainUserClient::getTargetAndMethodForIndex( */ void RootDomainUserClient::setPreventative(UInt32 on_off, UInt32 types_of_sleep) -{ return; } // DO NOT EDIT +{ + return; +} // DO NOT EDIT diff --git a/iokit/Kernel/RootDomainUserClient.h b/iokit/Kernel/RootDomainUserClient.h index f4b815bce..aea9ca375 100644 --- a/iokit/Kernel/RootDomainUserClient.h +++ b/iokit/Kernel/RootDomainUserClient.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -44,54 +44,52 @@ class RootDomainUserClient : public IOUserClient { - OSDeclareDefaultStructors(RootDomainUserClient) + OSDeclareDefaultStructors(RootDomainUserClient) - friend class IOPMrootDomain; + friend class IOPMrootDomain; private: - IOPMrootDomain * fOwner; - task_t fOwningTask; + IOPMrootDomain * fOwner; + task_t fOwningTask; - IOReturn secureSleepSystem( uint32_t *return_code ); + IOReturn secureSleepSystem( uint32_t *return_code ); - IOReturn secureSleepSystemOptions( const void *inOptions, - IOByteCount inOptionsSize, - uint32_t *returnCode); + IOReturn secureSleepSystemOptions( const void *inOptions, + IOByteCount inOptionsSize, + uint32_t *returnCode); - IOReturn secureSetAggressiveness( unsigned long type, - unsigned long newLevel, - int *return_code ); + IOReturn secureSetAggressiveness( unsigned long type, + unsigned long newLevel, + int *return_code ); - IOReturn secureSetMaintenanceWakeCalendar( - IOPMCalendarStruct *inCalendar, - uint32_t *returnCode); + IOReturn secureSetMaintenanceWakeCalendar( + IOPMCalendarStruct *inCalendar, + uint32_t *returnCode); - IOReturn secureSetUserAssertionLevels(uint32_t assertionBitfield); + IOReturn secureSetUserAssertionLevels(uint32_t assertionBitfield); - IOReturn secureGetSystemSleepType( uint32_t *sleepType, uint32_t *sleepTimer); + IOReturn secureGetSystemSleepType( uint32_t *sleepType, uint32_t *sleepTimer); public: - virtual IOReturn clientClose( void ) APPLE_KEXT_OVERRIDE; - - virtual IOReturn externalMethod( uint32_t selector, - IOExternalMethodArguments * arguments, - IOExternalMethodDispatch * dispatch, - OSObject * target, - void * reference ) APPLE_KEXT_OVERRIDE; + virtual IOReturn clientClose( void ) APPLE_KEXT_OVERRIDE; - virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; + virtual IOReturn externalMethod( uint32_t selector, + IOExternalMethodArguments * arguments, + IOExternalMethodDispatch * dispatch, + OSObject * target, + void * reference ) APPLE_KEXT_OVERRIDE; - virtual bool initWithTask(task_t owningTask, void *security_id, - UInt32 type, OSDictionary * properties) APPLE_KEXT_OVERRIDE; + virtual bool start( IOService * provider ) APPLE_KEXT_OVERRIDE; - // Unused - retained for symbol compatibility - void setPreventative(UInt32 on_off, UInt32 types_of_sleep); + virtual bool initWithTask(task_t owningTask, void *security_id, + UInt32 type, OSDictionary * properties) APPLE_KEXT_OVERRIDE; - // Unused - retained for symbol compatibility - virtual IOExternalMethod * getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ) APPLE_KEXT_OVERRIDE; - virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE; +// Unused - retained for symbol compatibility + void setPreventative(UInt32 on_off, UInt32 types_of_sleep); +// Unused - retained for symbol compatibility + virtual IOExternalMethod * getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ) APPLE_KEXT_OVERRIDE; + virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE; }; #endif /* ! _IOKIT_ROOTDOMAINUSERCLIENT_H */ - diff --git a/iokit/Kernel/i386/IOKeyStoreHelper.cpp b/iokit/Kernel/i386/IOKeyStoreHelper.cpp index 17ebea802..88b077a25 100644 --- a/iokit/Kernel/i386/IOKeyStoreHelper.cpp +++ b/iokit/Kernel/i386/IOKeyStoreHelper.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -57,57 +57,58 @@ void IOSetAPFSKeyStoreData(IOMemoryDescriptor* data); __END_DECLS #if 1 -#define DEBG(fmt, args...) { kprintf(fmt, ## args); } +#define DEBG(fmt, args...) { kprintf(fmt, ## args); } #else -#define DEBG(fmt, args...) {} +#define DEBG(fmt, args...) {} #endif void IOSetKeyStoreData(IOMemoryDescriptor * data) { - newData = data; - alreadyFetched = 0; + newData = data; + alreadyFetched = 0; } IOMemoryDescriptor * IOGetBootKeyStoreData(void) { - IOMemoryDescriptor *memoryDescriptor; - boot_args *args = (boot_args *)PE_state.bootArgs; - IOOptionBits options; - IOAddressRange ranges; - - if (!OSCompareAndSwap(0, 1, &alreadyFetched)) - return (NULL); - - if (newData) - { - IOMemoryDescriptor * data = newData; - newData = NULL; - return (data); - } - - DEBG("%s: data at address %u size %u\n", __func__, - args->keyStoreDataStart, - args->keyStoreDataSize); - - if (args->keyStoreDataStart == 0) - return (NULL); - - ranges.address = args->keyStoreDataStart; - ranges.length = args->keyStoreDataSize; - - options = kIODirectionInOut | kIOMemoryTypePhysical64 | kIOMemoryMapperNone; - - memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges, - 1, - 0, - NULL, - options); - - DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); - - return memoryDescriptor; + IOMemoryDescriptor *memoryDescriptor; + boot_args *args = (boot_args *)PE_state.bootArgs; + IOOptionBits options; + IOAddressRange ranges; + + if (!OSCompareAndSwap(0, 1, &alreadyFetched)) { + return NULL; + } + + if (newData) { + IOMemoryDescriptor * data = newData; + newData = NULL; + return data; + } + + DEBG("%s: data at address %u size %u\n", __func__, + args->keyStoreDataStart, + args->keyStoreDataSize); + + if (args->keyStoreDataStart == 0) { + return NULL; + } + + ranges.address = args->keyStoreDataStart; + ranges.length = args->keyStoreDataSize; + + options = kIODirectionInOut | kIOMemoryTypePhysical64 | kIOMemoryMapperNone; + + memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges, + 1, + 0, + NULL, + options); + + DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); + + return memoryDescriptor; } // APFS volume key fetcher @@ -116,45 +117,45 @@ IOGetBootKeyStoreData(void) void IOSetAPFSKeyStoreData(IOMemoryDescriptor* data) { - // Do not allow re-fetching of the boot_args key by passing NULL here. - if (data != NULL) - { - apfsKeyData = data; - apfsKeyFetched = 0; - } + // Do not allow re-fetching of the boot_args key by passing NULL here. + if (data != NULL) { + apfsKeyData = data; + apfsKeyFetched = 0; + } } // Retrieve any key we may have (stored in boot_args or by Hibernate) IOMemoryDescriptor* IOGetAPFSKeyStoreData() { - // Check if someone got the key before us - if (!OSCompareAndSwap(0, 1, &apfsKeyFetched)) - return NULL; - - // Do we have in-memory key? - if (apfsKeyData) - { - IOMemoryDescriptor* data = apfsKeyData; - apfsKeyData = NULL; - return data; - } - - // Looks like there was no in-memory key and it's the first call - try boot_args - boot_args* args = (boot_args*)PE_state.bootArgs; - - DEBG("%s: data at address %u size %u\n", __func__, args->apfsDataStart, args->apfsDataSize); - if (args->apfsDataStart == 0) - return NULL; - - // We have the key in the boot_args, create IOMemoryDescriptor for the blob - IOAddressRange ranges; - ranges.address = args->apfsDataStart; - ranges.length = args->apfsDataSize; - - const IOOptionBits options = kIODirectionInOut | kIOMemoryTypePhysical64 | kIOMemoryMapperNone; - - IOMemoryDescriptor* memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges, 1, 0, NULL, options); - DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); - return memoryDescriptor; + // Check if someone got the key before us + if (!OSCompareAndSwap(0, 1, &apfsKeyFetched)) { + return NULL; + } + + // Do we have in-memory key? + if (apfsKeyData) { + IOMemoryDescriptor* data = apfsKeyData; + apfsKeyData = NULL; + return data; + } + + // Looks like there was no in-memory key and it's the first call - try boot_args + boot_args* args = (boot_args*)PE_state.bootArgs; + + DEBG("%s: data at address %u size %u\n", __func__, args->apfsDataStart, args->apfsDataSize); + if (args->apfsDataStart == 0) { + return NULL; + } + + // We have the key in the boot_args, create IOMemoryDescriptor for the blob + IOAddressRange ranges; + ranges.address = args->apfsDataStart; + ranges.length = args->apfsDataSize; + + const IOOptionBits options = kIODirectionInOut | kIOMemoryTypePhysical64 | kIOMemoryMapperNone; + + IOMemoryDescriptor* memoryDescriptor = IOMemoryDescriptor::withOptions(&ranges, 1, 0, NULL, options); + DEBG("%s: memory descriptor %p\n", __func__, memoryDescriptor); + return memoryDescriptor; } diff --git a/iokit/KernelConfigTables.cpp b/iokit/KernelConfigTables.cpp index de08bbf4b..9dda5e80d 100644 --- a/iokit/KernelConfigTables.cpp +++ b/iokit/KernelConfigTables.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,13 +33,13 @@ */ const char * gIOKernelConfigTables = -"(" -" {" -" 'IOClass' = IOPanicPlatform;" -" 'IOProviderClass' = IOPlatformExpertDevice;" -" 'IOProbeScore' = 0:32;" -" }" -")"; + "(" + " {" + " 'IOClass' = IOPanicPlatform;" + " 'IOProviderClass' = IOPlatformExpertDevice;" + " 'IOProbeScore' = 0:32;" + " }" + ")"; /* This stuff is no longer used at all but was exported in prior * releases, so we'll keep them around for PPC/i386 only. diff --git a/iokit/Tests/TestCollections.cpp b/iokit/Tests/TestCollections.cpp index 02813d3c1..16c4354e7 100644 --- a/iokit/Tests/TestCollections.cpp +++ b/iokit/Tests/TestCollections.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if DEBUG @@ -35,913 +35,966 @@ #include #include -void testArray() +void +testArray() { - bool res = true; - void *spaceCheck, *spaceCheck2 , *spaceCheck3; - int i, j, count, count2; - OSObject *cache[numStrCache], *str, *sym; - OSArray *array1, *array2; - - // Do first test without memory leak tests to initialise the metaclass - array1 = OSArray::withCapacity(1); - TEST_ASSERT('A', "0a", array1); - if (array1) - array1->release(); - - // Grow the symbol pool to maximum - for (i = 0; i < numStrCache; i++) - cache[i] = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - for (i = 0; i < numStrCache; i++) - cache[i]->release(); - - // Create and destroy an array - spaceCheck = checkPointSpace(); - array1 = OSArray::withCapacity(1); - TEST_ASSERT('A', "1a", array1); - if (array1) { - TEST_ASSERT('A', "1b", !array1->getCount()); - TEST_ASSERT('A', "1c", 1 == array1->getCapacity()); - TEST_ASSERT('A', "1d", 1 == array1->getCapacityIncrement()); - TEST_ASSERT('A', "1e", 4 == array1->setCapacityIncrement(4)); - TEST_ASSERT('A', "1f", 4 == array1->getCapacityIncrement()); - TEST_ASSERT('A', "1g", 8 == array1->ensureCapacity(5)); - - spaceCheck2 = checkPointSpace(); - cache[0] = IOString::withCStringNoCopy(strCache[0]); - - spaceCheck3 = checkPointSpace(); - TEST_ASSERT('A', "1h", array1->setObject(cache[0])); - TEST_ASSERT('A', "1i", cache[0] == array1->getObject(0)); - cache[0]->release(); - res = res && checkSpace("(A)1j", spaceCheck3, 0); - - TEST_ASSERT('A', "1k", 1 == array1->getCount()); - array1->flushCollection(); - TEST_ASSERT('A', "1l", !array1->getCount()); - res = res && checkSpace("(A)1m", spaceCheck2, 0); - - array1->release(); - } - res = res && checkSpace("(A)1", spaceCheck, 0); - - // Check the creation of a sizable OSArray from an array of IOObjects - // Also check indexing into the array. - spaceCheck = checkPointSpace(); - for (i = 0; i < numStrCache; i++) - cache[i] = OSString::withCStringNoCopy(strCache[i]); - array1 = OSArray::withObjects(cache, numStrCache, numStrCache); - TEST_ASSERT('A', "2a", array1); - for (i = 0; i < numStrCache; i++) - cache[i]->release(); - if (array1) { - TEST_ASSERT('A', "2b", numStrCache == (int) array1->getCount()); - TEST_ASSERT('A', "2c", numStrCache == (int) array1->getCapacity()); - TEST_ASSERT('A', "2d", - numStrCache == (int) array1->getCapacityIncrement()); - - for (i = 0; (str = array1->getObject(i)); i++) { - if (str != cache[i]) { - verPrintf(("testArray(A) test 2e%d failed\n", i)); - res = false; - } - } - TEST_ASSERT('A', "2f", numStrCache == i); - array1->release(); - } - res = res && checkSpace("(A)2", spaceCheck, 0); - - // Test array creation from another array by both the setObject method - // and the withArray factory. And test __takeObject code first - // with tail removal then with head removal - spaceCheck = checkPointSpace(); - for (i = 0; i < numStrCache; i++) - cache[i] = OSString::withCStringNoCopy(strCache[i]); - array1 = OSArray::withObjects(cache, numStrCache, numStrCache); - TEST_ASSERT('A', "3a", array1); - for (i = 0; i < numStrCache; i++) - cache[i]->release(); - array2 = 0; - if (array1) { - array2 = OSArray::withCapacity(1); - TEST_ASSERT('A', "3b", array2); - TEST_ASSERT('A', "3c", !array2->getCount()); - TEST_ASSERT('A', "3d", array2->setObject(array1)); - TEST_ASSERT('A', "3e", array1->getCount() == array2->getCount()); - } - if (array2) { - count = 0; - TEST_ASSERT('A', "3f", numStrCache == (int) array2->getCount()); - for (i = array2->getCount(); (str = array2->__takeObject(--i)); ) { - if (str != cache[i]) { - verPrintf(("testArray(A) test 3g%d failed\n", i)); - res = false; - } - count += ((int) array2->getCount() == i); - str->release(); - } - TEST_ASSERT('A', "3h", count == numStrCache); - TEST_ASSERT('A', "3i", -1 == i); - TEST_ASSERT('A', "3j", !array2->getCount()); - - spaceCheck2 = checkPointSpace(); - array2->flushCollection(); - res = res && checkSpace("(A)3k", spaceCheck2, 0); - - array2->release(); - array2 = 0; - } - if (array1) { - array2 = OSArray::withArray(array1, numStrCache - 1); - TEST_ASSERT('A', "3l", !array2); - array2 = OSArray::withArray(array1, array1->getCount()); - TEST_ASSERT('A', "3m", array2); - array1->release(); - } - if (array2) { - count = 0; - TEST_ASSERT('A', "3o", numStrCache == (int) array2->getCount()); - for (i = 0; (str = array2->__takeObject(0)); i++) { - count += (str == cache[i]); - str->release(); - } - TEST_ASSERT('A', "3p", count == numStrCache); - TEST_ASSERT('A', "3q", !array2->getCount()); - array2->release(); - array2 = 0; - } - res = res && checkSpace("(A)3", spaceCheck, 0); - - // Test object replacement from one array to another - spaceCheck = checkPointSpace(); - array1 = OSArray::withCapacity(numStrCache); - TEST_ASSERT('A', "4a", array1); - if (array1) { - count = count2 = 0; - for (i = 0; i < numStrCache; i++) { - str = OSString::withCStringNoCopy(strCache[i]); - count += array1->setObject(str); - count2 += (str == array1->lastObject()); - str->release(); - } - TEST_ASSERT('A', "4b", numStrCache == (int) array1->getCount()); - TEST_ASSERT('A', "4c", count == numStrCache); - TEST_ASSERT('A', "4d", count2 == numStrCache); - } - array2 = OSArray::withCapacity(1); - TEST_ASSERT('A', "4e", array2); - if (array2) { - count = count2 = 0; - str = (OSObject *) OSSymbol::withCStringNoCopy(strCache[0]); - for (i = 0; i < numStrCache; i++) { - sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - count += array2->setObject(sym, 0); - count2 += (str == array2->lastObject()); - sym->release(); - } - str->release(); - TEST_ASSERT('A', "4f", numStrCache == (int) array2->getCount()); - TEST_ASSERT('A', "4g", count == numStrCache); - TEST_ASSERT('A', "4h", count2 == numStrCache); - } - if (array1 && array2) { - - count = count2 = 0; - for (i = array1->getCount() - 1; (sym = array2->__takeObject(0)); i--) { - str = array1->replaceObject(sym, i); - count += (str != 0); - count2 += (sym != str); - if (str) - str->release(); - if (sym) - sym->release(); - } - TEST_ASSERT('A', "4k", numStrCache == (int) array1->getCount()); - TEST_ASSERT('A', "4l", count == numStrCache); - TEST_ASSERT('A', "4m", count2 == numStrCache); - array1->release(); - array2->release(); - } - else { - if (array1) array1->release(); - if (array2) array2->release(); - } - res = res && checkSpace("(A)4", spaceCheck, 0); - - // Test array duplicate removal - spaceCheck = checkPointSpace(); - array1 = OSArray::withCapacity(numStrCache); - TEST_ASSERT('A', "5a", array1); - if (array1) { - for (i = 0; i < numStrCache; i++) { - sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - count += array1->setObject(sym); - sym->release(); - } - TEST_ASSERT('A', "5b", numStrCache == (int) array1->getCount()); - - // remove duplicates - for (i = 0; (sym = array1->getObject(i)); ) - if (sym->getRetainCount() == 1) - i++; - else { - //sym = array1->__takeObject(i); - //sym->release(); - array1->removeObject(i); - } - TEST_ASSERT('A', "5c", numStrCache != (int) array1->getCount()); - - // check to see that all symbols are really there - for (count = 0, i = 0; i < numStrCache; i++) { - sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - for (count2 = false, j = 0; (str = array1->getObject(j)); j++) - if (str == sym) { - count2 = true; - break; - } - count += count2; - sym->release(); - } - TEST_ASSERT('A', "5c", count == numStrCache); - array1->release(); - } - res = res && checkSpace("(S)5", spaceCheck, 0); - - if (res) - verPrintf(("testArray: All OSArray Tests passed\n")); - else - logPrintf(("testArray: Some OSArray Tests failed\n")); + bool res = true; + void *spaceCheck, *spaceCheck2, *spaceCheck3; + int i, j, count, count2; + OSObject *cache[numStrCache], *str, *sym; + OSArray *array1, *array2; + + // Do first test without memory leak tests to initialise the metaclass + array1 = OSArray::withCapacity(1); + TEST_ASSERT('A', "0a", array1); + if (array1) { + array1->release(); + } + + // Grow the symbol pool to maximum + for (i = 0; i < numStrCache; i++) { + cache[i] = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + } + for (i = 0; i < numStrCache; i++) { + cache[i]->release(); + } + + // Create and destroy an array + spaceCheck = checkPointSpace(); + array1 = OSArray::withCapacity(1); + TEST_ASSERT('A', "1a", array1); + if (array1) { + TEST_ASSERT('A', "1b", !array1->getCount()); + TEST_ASSERT('A', "1c", 1 == array1->getCapacity()); + TEST_ASSERT('A', "1d", 1 == array1->getCapacityIncrement()); + TEST_ASSERT('A', "1e", 4 == array1->setCapacityIncrement(4)); + TEST_ASSERT('A', "1f", 4 == array1->getCapacityIncrement()); + TEST_ASSERT('A', "1g", 8 == array1->ensureCapacity(5)); + + spaceCheck2 = checkPointSpace(); + cache[0] = IOString::withCStringNoCopy(strCache[0]); + + spaceCheck3 = checkPointSpace(); + TEST_ASSERT('A', "1h", array1->setObject(cache[0])); + TEST_ASSERT('A', "1i", cache[0] == array1->getObject(0)); + cache[0]->release(); + res = res && checkSpace("(A)1j", spaceCheck3, 0); + + TEST_ASSERT('A', "1k", 1 == array1->getCount()); + array1->flushCollection(); + TEST_ASSERT('A', "1l", !array1->getCount()); + res = res && checkSpace("(A)1m", spaceCheck2, 0); + + array1->release(); + } + res = res && checkSpace("(A)1", spaceCheck, 0); + + // Check the creation of a sizable OSArray from an array of IOObjects + // Also check indexing into the array. + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + cache[i] = OSString::withCStringNoCopy(strCache[i]); + } + array1 = OSArray::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('A', "2a", array1); + for (i = 0; i < numStrCache; i++) { + cache[i]->release(); + } + if (array1) { + TEST_ASSERT('A', "2b", numStrCache == (int) array1->getCount()); + TEST_ASSERT('A', "2c", numStrCache == (int) array1->getCapacity()); + TEST_ASSERT('A', "2d", + numStrCache == (int) array1->getCapacityIncrement()); + + for (i = 0; (str = array1->getObject(i)); i++) { + if (str != cache[i]) { + verPrintf(("testArray(A) test 2e%d failed\n", i)); + res = false; + } + } + TEST_ASSERT('A', "2f", numStrCache == i); + array1->release(); + } + res = res && checkSpace("(A)2", spaceCheck, 0); + + // Test array creation from another array by both the setObject method + // and the withArray factory. And test __takeObject code first + // with tail removal then with head removal + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + cache[i] = OSString::withCStringNoCopy(strCache[i]); + } + array1 = OSArray::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('A', "3a", array1); + for (i = 0; i < numStrCache; i++) { + cache[i]->release(); + } + array2 = 0; + if (array1) { + array2 = OSArray::withCapacity(1); + TEST_ASSERT('A', "3b", array2); + TEST_ASSERT('A', "3c", !array2->getCount()); + TEST_ASSERT('A', "3d", array2->setObject(array1)); + TEST_ASSERT('A', "3e", array1->getCount() == array2->getCount()); + } + if (array2) { + count = 0; + TEST_ASSERT('A', "3f", numStrCache == (int) array2->getCount()); + for (i = array2->getCount(); (str = array2->__takeObject(--i));) { + if (str != cache[i]) { + verPrintf(("testArray(A) test 3g%d failed\n", i)); + res = false; + } + count += ((int) array2->getCount() == i); + str->release(); + } + TEST_ASSERT('A', "3h", count == numStrCache); + TEST_ASSERT('A', "3i", -1 == i); + TEST_ASSERT('A', "3j", !array2->getCount()); + + spaceCheck2 = checkPointSpace(); + array2->flushCollection(); + res = res && checkSpace("(A)3k", spaceCheck2, 0); + + array2->release(); + array2 = 0; + } + if (array1) { + array2 = OSArray::withArray(array1, numStrCache - 1); + TEST_ASSERT('A', "3l", !array2); + array2 = OSArray::withArray(array1, array1->getCount()); + TEST_ASSERT('A', "3m", array2); + array1->release(); + } + if (array2) { + count = 0; + TEST_ASSERT('A', "3o", numStrCache == (int) array2->getCount()); + for (i = 0; (str = array2->__takeObject(0)); i++) { + count += (str == cache[i]); + str->release(); + } + TEST_ASSERT('A', "3p", count == numStrCache); + TEST_ASSERT('A', "3q", !array2->getCount()); + array2->release(); + array2 = 0; + } + res = res && checkSpace("(A)3", spaceCheck, 0); + + // Test object replacement from one array to another + spaceCheck = checkPointSpace(); + array1 = OSArray::withCapacity(numStrCache); + TEST_ASSERT('A', "4a", array1); + if (array1) { + count = count2 = 0; + for (i = 0; i < numStrCache; i++) { + str = OSString::withCStringNoCopy(strCache[i]); + count += array1->setObject(str); + count2 += (str == array1->lastObject()); + str->release(); + } + TEST_ASSERT('A', "4b", numStrCache == (int) array1->getCount()); + TEST_ASSERT('A', "4c", count == numStrCache); + TEST_ASSERT('A', "4d", count2 == numStrCache); + } + array2 = OSArray::withCapacity(1); + TEST_ASSERT('A', "4e", array2); + if (array2) { + count = count2 = 0; + str = (OSObject *) OSSymbol::withCStringNoCopy(strCache[0]); + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += array2->setObject(sym, 0); + count2 += (str == array2->lastObject()); + sym->release(); + } + str->release(); + TEST_ASSERT('A', "4f", numStrCache == (int) array2->getCount()); + TEST_ASSERT('A', "4g", count == numStrCache); + TEST_ASSERT('A', "4h", count2 == numStrCache); + } + if (array1 && array2) { + count = count2 = 0; + for (i = array1->getCount() - 1; (sym = array2->__takeObject(0)); i--) { + str = array1->replaceObject(sym, i); + count += (str != 0); + count2 += (sym != str); + if (str) { + str->release(); + } + if (sym) { + sym->release(); + } + } + TEST_ASSERT('A', "4k", numStrCache == (int) array1->getCount()); + TEST_ASSERT('A', "4l", count == numStrCache); + TEST_ASSERT('A', "4m", count2 == numStrCache); + array1->release(); + array2->release(); + } else { + if (array1) { + array1->release(); + } + if (array2) { + array2->release(); + } + } + res = res && checkSpace("(A)4", spaceCheck, 0); + + // Test array duplicate removal + spaceCheck = checkPointSpace(); + array1 = OSArray::withCapacity(numStrCache); + TEST_ASSERT('A', "5a", array1); + if (array1) { + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += array1->setObject(sym); + sym->release(); + } + TEST_ASSERT('A', "5b", numStrCache == (int) array1->getCount()); + + // remove duplicates + for (i = 0; (sym = array1->getObject(i));) { + if (sym->getRetainCount() == 1) { + i++; + } else { + //sym = array1->__takeObject(i); + //sym->release(); + array1->removeObject(i); + } + } + TEST_ASSERT('A', "5c", numStrCache != (int) array1->getCount()); + + // check to see that all symbols are really there + for (count = 0, i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + for (count2 = false, j = 0; (str = array1->getObject(j)); j++) { + if (str == sym) { + count2 = true; + break; + } + } + count += count2; + sym->release(); + } + TEST_ASSERT('A', "5c", count == numStrCache); + array1->release(); + } + res = res && checkSpace("(S)5", spaceCheck, 0); + + if (res) { + verPrintf(("testArray: All OSArray Tests passed\n")); + } else { + logPrintf(("testArray: Some OSArray Tests failed\n")); + } } -void testSet() +void +testSet() { - bool res = true; - void *spaceCheck, *spaceCheck2 , *spaceCheck3; - int i, count, count2; - OSObject *cache[numStrCache], *str, *sym; - OSSet *set1, *set2; - OSArray *array; - - // Do first test without memory leak tests to initialise the metaclass - set1 = OSSet::withCapacity(1); - TEST_ASSERT('S', "0a", set1); - if (set1) - set1->release(); - - // Grow the symbol pool to maximum - for (i = 0; i < numStrCache; i++) - cache[i] = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - for (i = 0; i < numStrCache; i++) - cache[i]->release(); - - // Create and destroy an set - spaceCheck = checkPointSpace(); - set1 = OSSet::withCapacity(1); - TEST_ASSERT('S', "1a", set1); - if (set1) { - TEST_ASSERT('S', "1b", !set1->getCount()); - TEST_ASSERT('S', "1c", 1 == set1->getCapacity()); - TEST_ASSERT('S', "1d", 1 == set1->getCapacityIncrement()); - TEST_ASSERT('S', "1e", 4 == set1->setCapacityIncrement(4)); - TEST_ASSERT('S', "1f", 4 == set1->getCapacityIncrement()); - TEST_ASSERT('S', "1g", 8 == set1->ensureCapacity(5)); - - spaceCheck2 = checkPointSpace(); - cache[0] = IOString::withCStringNoCopy(strCache[0]); - - spaceCheck3 = checkPointSpace(); - TEST_ASSERT('S', "1h", set1->setObject(cache[0])); - TEST_ASSERT('S', "1i", set1->containsObject(cache[0])); - TEST_ASSERT('S', "1j", cache[0] == set1->getAnyObject()); - cache[0]->release(); - res = res && checkSpace("(S)1k", spaceCheck3, 0); - - TEST_ASSERT('S', "1l", 1 == set1->getCount()); - set1->flushCollection(); - TEST_ASSERT('S', "1m", !set1->getCount()); - res = res && checkSpace("(S)1n", spaceCheck2, 0); - - set1->release(); - } - res = res && checkSpace("(S)1", spaceCheck, 0); - - // Check the creation of a sizable OSSet from an set of IOObjects - // Also check member test of set. - spaceCheck = checkPointSpace(); - for (i = 0; i < numStrCache; i++) - cache[i] = OSString::withCStringNoCopy(strCache[i]); - set1 = OSSet::withObjects(cache, numStrCache, numStrCache); - TEST_ASSERT('S', "2a", set1); - for (i = 0; i < numStrCache; i++) - cache[i]->release(); - if (set1) { - TEST_ASSERT('S', "2b", numStrCache == (int) set1->getCount()); - TEST_ASSERT('S', "2c", numStrCache == (int) set1->getCapacity()); - TEST_ASSERT('S', "2d", - numStrCache == (int) set1->getCapacityIncrement()); - - count = 0; - for (i = set1->getCount(); --i >= 0; ) - count += set1->member(cache[i]); - - TEST_ASSERT('S', "2e", numStrCache == count); - set1->release(); - } - res = res && checkSpace("(S)2", spaceCheck, 0); - - // Test set creation from another set by both the setObject method - // and the withArray factory. And test __takeObject code first - // with tail removal then with head removal - spaceCheck = checkPointSpace(); - for (i = 0; i < numStrCache; i++) - cache[i] = OSString::withCStringNoCopy(strCache[i]); - set1 = OSSet::withObjects(cache, numStrCache, numStrCache); - TEST_ASSERT('S', "3a", set1); - for (i = 0; i < numStrCache; i++) - cache[i]->release(); - set2 = 0; - if (set1) { - set2 = OSSet::withCapacity(set1->getCount()); - TEST_ASSERT('S', "3b", set2); - TEST_ASSERT('S', "3c", !set2->getCount()); - TEST_ASSERT('S', "3d", set2->setObject(set1)); - TEST_ASSERT('S', "3e", set1->getCount() == set2->getCount()); - } - if (set2) { - TEST_ASSERT('S', "3f", numStrCache == (int) set2->getCount()); - count = count2 = 0; - while ( (str = set2->getAnyObject()) ) { - count += set2->__takeObject(str); - count2 += set1->member(str); - str->release(); - } - TEST_ASSERT('S', "3g", !set2->getCount()); - TEST_ASSERT('S', "3h", numStrCache == count); - TEST_ASSERT('S', "3i", numStrCache == count2); - - spaceCheck2 = checkPointSpace(); - set2->flushCollection(); - res = res && checkSpace("(S)3j", spaceCheck2, 0); - - set2->release(); - set2 = 0; - } - if (set1) { - set2 = OSSet::withSet(set1, numStrCache - 1); - TEST_ASSERT('S', "3k", !set2); - set2 = OSSet::withSet(set1, set1->getCount()); - TEST_ASSERT('S', "3l", set2); - set1->release(); - } - if (set2) { - TEST_ASSERT('S', "3m", numStrCache == (int) set2->getCount()); - i = count = count2 = 0; - while ( (str = set2->getAnyObject()) ) { - count += set2->__takeObject(str); - count2 += (cache[i++] == str); - str->release(); - } - TEST_ASSERT('S', "3n", !set2->getCount()); - TEST_ASSERT('S', "3o", numStrCache == count); - TEST_ASSERT('S', "3p", numStrCache == count2); - - set2->release(); - set2 = 0; - } - res = res && checkSpace("(S)3", spaceCheck, 0); - - // Test duplicate removal - spaceCheck = checkPointSpace(); - set2 = 0; - set1 = OSSet::withCapacity(numStrCache); - TEST_ASSERT('S', "4a", set1); - if (set1) { - count = 0; - for (i = 0; i < numStrCache; i++) { - sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - count += set1->setObject(sym); - sym->release(); - } - TEST_ASSERT('S', "4b", numStrCache != (int) set1->getCount()); - TEST_ASSERT('S', "4c", count == (int) set1->getCount()); - - count = count2 = 0; - for (i = 0; i < numStrCache; i++) { - sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - count += set1->member(sym); - count2 += sym->getRetainCount(); - sym->release(); - } - TEST_ASSERT('S', "4d", count == numStrCache); - TEST_ASSERT('S', "4e", count2 == numStrCache * 2); - - set2 = OSSet::withSet(set1, 2 * set1->getCount()); - } - TEST_ASSERT('S', "4f", set2); - if (set2) { - set2->setObject(set1); - TEST_ASSERT('S', "4g", set1->getCount() == set2->getCount()); - set1->release(); - set2->release(); - } - res = res && checkSpace("(S)4", spaceCheck, 0); - - // Test array duplicate removal - spaceCheck = checkPointSpace(); - array = OSArray::withCapacity(numStrCache); - for (i = 0; i < numStrCache; i++) { - sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); - count += array->setObject(sym); - sym->release(); - } - set1 = OSSet::withArray(array, numStrCache); - TEST_ASSERT('S', "5a", set1); - if (set1) { - TEST_ASSERT('S', "5b", array->getCount() != set1->getCount()); - array->release(); - - count = count2 = set1->getCount(); - while ( (sym = set1->getAnyObject()) ) { - count -= set1->__takeObject(sym); - count2 -= sym->getRetainCount(); - sym->release(); - } - TEST_ASSERT('S', "5c", !count); - TEST_ASSERT('S', "5d", !count2); - set1->release(); - } - res = res && checkSpace("(S)5", spaceCheck, 0); - - if (res) - verPrintf(("testSet: All OSSet Tests passed\n")); - else - logPrintf(("testSet: Some OSSet Tests failed\n")); + bool res = true; + void *spaceCheck, *spaceCheck2, *spaceCheck3; + int i, count, count2; + OSObject *cache[numStrCache], *str, *sym; + OSSet *set1, *set2; + OSArray *array; + + // Do first test without memory leak tests to initialise the metaclass + set1 = OSSet::withCapacity(1); + TEST_ASSERT('S', "0a", set1); + if (set1) { + set1->release(); + } + + // Grow the symbol pool to maximum + for (i = 0; i < numStrCache; i++) { + cache[i] = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + } + for (i = 0; i < numStrCache; i++) { + cache[i]->release(); + } + + // Create and destroy an set + spaceCheck = checkPointSpace(); + set1 = OSSet::withCapacity(1); + TEST_ASSERT('S', "1a", set1); + if (set1) { + TEST_ASSERT('S', "1b", !set1->getCount()); + TEST_ASSERT('S', "1c", 1 == set1->getCapacity()); + TEST_ASSERT('S', "1d", 1 == set1->getCapacityIncrement()); + TEST_ASSERT('S', "1e", 4 == set1->setCapacityIncrement(4)); + TEST_ASSERT('S', "1f", 4 == set1->getCapacityIncrement()); + TEST_ASSERT('S', "1g", 8 == set1->ensureCapacity(5)); + + spaceCheck2 = checkPointSpace(); + cache[0] = IOString::withCStringNoCopy(strCache[0]); + + spaceCheck3 = checkPointSpace(); + TEST_ASSERT('S', "1h", set1->setObject(cache[0])); + TEST_ASSERT('S', "1i", set1->containsObject(cache[0])); + TEST_ASSERT('S', "1j", cache[0] == set1->getAnyObject()); + cache[0]->release(); + res = res && checkSpace("(S)1k", spaceCheck3, 0); + + TEST_ASSERT('S', "1l", 1 == set1->getCount()); + set1->flushCollection(); + TEST_ASSERT('S', "1m", !set1->getCount()); + res = res && checkSpace("(S)1n", spaceCheck2, 0); + + set1->release(); + } + res = res && checkSpace("(S)1", spaceCheck, 0); + + // Check the creation of a sizable OSSet from an set of IOObjects + // Also check member test of set. + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + cache[i] = OSString::withCStringNoCopy(strCache[i]); + } + set1 = OSSet::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('S', "2a", set1); + for (i = 0; i < numStrCache; i++) { + cache[i]->release(); + } + if (set1) { + TEST_ASSERT('S', "2b", numStrCache == (int) set1->getCount()); + TEST_ASSERT('S', "2c", numStrCache == (int) set1->getCapacity()); + TEST_ASSERT('S', "2d", + numStrCache == (int) set1->getCapacityIncrement()); + + count = 0; + for (i = set1->getCount(); --i >= 0;) { + count += set1->member(cache[i]); + } + + TEST_ASSERT('S', "2e", numStrCache == count); + set1->release(); + } + res = res && checkSpace("(S)2", spaceCheck, 0); + + // Test set creation from another set by both the setObject method + // and the withArray factory. And test __takeObject code first + // with tail removal then with head removal + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + cache[i] = OSString::withCStringNoCopy(strCache[i]); + } + set1 = OSSet::withObjects(cache, numStrCache, numStrCache); + TEST_ASSERT('S', "3a", set1); + for (i = 0; i < numStrCache; i++) { + cache[i]->release(); + } + set2 = 0; + if (set1) { + set2 = OSSet::withCapacity(set1->getCount()); + TEST_ASSERT('S', "3b", set2); + TEST_ASSERT('S', "3c", !set2->getCount()); + TEST_ASSERT('S', "3d", set2->setObject(set1)); + TEST_ASSERT('S', "3e", set1->getCount() == set2->getCount()); + } + if (set2) { + TEST_ASSERT('S', "3f", numStrCache == (int) set2->getCount()); + count = count2 = 0; + while ((str = set2->getAnyObject())) { + count += set2->__takeObject(str); + count2 += set1->member(str); + str->release(); + } + TEST_ASSERT('S', "3g", !set2->getCount()); + TEST_ASSERT('S', "3h", numStrCache == count); + TEST_ASSERT('S', "3i", numStrCache == count2); + + spaceCheck2 = checkPointSpace(); + set2->flushCollection(); + res = res && checkSpace("(S)3j", spaceCheck2, 0); + + set2->release(); + set2 = 0; + } + if (set1) { + set2 = OSSet::withSet(set1, numStrCache - 1); + TEST_ASSERT('S', "3k", !set2); + set2 = OSSet::withSet(set1, set1->getCount()); + TEST_ASSERT('S', "3l", set2); + set1->release(); + } + if (set2) { + TEST_ASSERT('S', "3m", numStrCache == (int) set2->getCount()); + i = count = count2 = 0; + while ((str = set2->getAnyObject())) { + count += set2->__takeObject(str); + count2 += (cache[i++] == str); + str->release(); + } + TEST_ASSERT('S', "3n", !set2->getCount()); + TEST_ASSERT('S', "3o", numStrCache == count); + TEST_ASSERT('S', "3p", numStrCache == count2); + + set2->release(); + set2 = 0; + } + res = res && checkSpace("(S)3", spaceCheck, 0); + + // Test duplicate removal + spaceCheck = checkPointSpace(); + set2 = 0; + set1 = OSSet::withCapacity(numStrCache); + TEST_ASSERT('S', "4a", set1); + if (set1) { + count = 0; + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += set1->setObject(sym); + sym->release(); + } + TEST_ASSERT('S', "4b", numStrCache != (int) set1->getCount()); + TEST_ASSERT('S', "4c", count == (int) set1->getCount()); + + count = count2 = 0; + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += set1->member(sym); + count2 += sym->getRetainCount(); + sym->release(); + } + TEST_ASSERT('S', "4d", count == numStrCache); + TEST_ASSERT('S', "4e", count2 == numStrCache * 2); + + set2 = OSSet::withSet(set1, 2 * set1->getCount()); + } + TEST_ASSERT('S', "4f", set2); + if (set2) { + set2->setObject(set1); + TEST_ASSERT('S', "4g", set1->getCount() == set2->getCount()); + set1->release(); + set2->release(); + } + res = res && checkSpace("(S)4", spaceCheck, 0); + + // Test array duplicate removal + spaceCheck = checkPointSpace(); + array = OSArray::withCapacity(numStrCache); + for (i = 0; i < numStrCache; i++) { + sym = (OSObject *) OSSymbol::withCStringNoCopy(strCache[i]); + count += array->setObject(sym); + sym->release(); + } + set1 = OSSet::withArray(array, numStrCache); + TEST_ASSERT('S', "5a", set1); + if (set1) { + TEST_ASSERT('S', "5b", array->getCount() != set1->getCount()); + array->release(); + + count = count2 = set1->getCount(); + while ((sym = set1->getAnyObject())) { + count -= set1->__takeObject(sym); + count2 -= sym->getRetainCount(); + sym->release(); + } + TEST_ASSERT('S', "5c", !count); + TEST_ASSERT('S', "5d", !count2); + set1->release(); + } + res = res && checkSpace("(S)5", spaceCheck, 0); + + if (res) { + verPrintf(("testSet: All OSSet Tests passed\n")); + } else { + logPrintf(("testSet: Some OSSet Tests failed\n")); + } } -void testDictionary() +void +testDictionary() { - bool res = true; - void *spaceCheck, *spaceCheck2, *spaceCheck3; - OSObject *cache[numStrCache]; - OSString *str; - const OSSymbol *symCache[numStrCache], *sym; - OSDictionary *dict1, *dict2; - int i, numSymbols, count1, count2; - - // Do first test without memory leak tests to initialise the metaclass - dict1 = OSDictionary::withCapacity(1); - TEST_ASSERT('D', "0a", dict1); - if (dict1) - dict1->release(); - - // Grow the symbol pool to maximum - for (i = 0; i < numStrCache; i++) - symCache[i] = OSSymbol::withCStringNoCopy(strCache[i]); - for (i = 0; i < numStrCache; i++) - symCache[i]->release(); - - // Create and destroy a dictionary - spaceCheck = checkPointSpace(); - dict1 = OSDictionary::withCapacity(1); - TEST_ASSERT('D', "1a", dict1); - if (dict1) { - TEST_ASSERT('D', "1b", !dict1->getCount()); - TEST_ASSERT('D', "1c", 1 == dict1->getCapacity()); - TEST_ASSERT('D', "1d", 1 == dict1->getCapacityIncrement()); - TEST_ASSERT('D', "1e", 4 == dict1->setCapacityIncrement(4)); - TEST_ASSERT('D', "1f", 4 == dict1->getCapacityIncrement()); - TEST_ASSERT('D', "1g", 8 == dict1->ensureCapacity(5)); - - spaceCheck2 = checkPointSpace(); - sym = OSSymbol::withCStringNoCopy(strCache[0]); - - spaceCheck3 = checkPointSpace(); - TEST_ASSERT('D', "1h", dict1->setObject((OSObject *) sym, sym)); - TEST_ASSERT('D', "1i", (OSObject *) sym == dict1->getObject(sym)); - sym->release(); - TEST_ASSERT('D', "1i", 2 == sym->getRetainCount()); - res = res && checkSpace("(D)1j", spaceCheck3, 0); - - TEST_ASSERT('D', "1k", 1 == dict1->getCount()); - dict1->flushCollection(); - TEST_ASSERT('D', "1l", !dict1->getCount()); - res = res && checkSpace("(D)1m", spaceCheck2, 0); - - dict1->release(); - } - res = res && checkSpace("(D)1", spaceCheck, 0); - - // Check the creation of a sizable OSDictionary from an array of IOObjects - // Also check indexing into the array. - spaceCheck = checkPointSpace(); - for (i = 0, numSymbols = 0; i < numStrCache; i++) { - sym = OSSymbol::withCStringNoCopy(strCache[i]); - if (1 == sym->getRetainCount()) - symCache[numSymbols++] = sym; - else - sym->release(); - } - dict1 = OSDictionary::withObjects( - (OSObject **) symCache, symCache, numSymbols, numSymbols); - TEST_ASSERT('D', "2a", dict1); - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) - count1 += (symCache[i]->getRetainCount() == 3); - TEST_ASSERT('D', "2b", count1 == numSymbols); - if (dict1) { - TEST_ASSERT('D', "2c", numSymbols == (int) dict1->getCount()); - TEST_ASSERT('D', "2d", numSymbols == (int) dict1->getCapacity()); - TEST_ASSERT('D', "2e", - numSymbols == (int) dict1->getCapacityIncrement()); - - for (i = dict1->getCount(); --i >= 0; ) { - str = (OSString *) dict1->getObject(symCache[i]); - if (str != (OSString *) symCache[i]) { - verPrintf(("testDictionary(D) test 2f%d failed\n", i)); - res = false; - } - } - dict1->release(); - } - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - count1 += (symCache[i]->getRetainCount() == 1); - symCache[i]->release(); - } - TEST_ASSERT('D', "2g", count1 == numSymbols); - res = res && checkSpace("(D)2", spaceCheck, 0); - - // Check the creation of a sizable Dictionary from an array of IOStrings - // Also check searching dictionary use OSString for a key. - spaceCheck = checkPointSpace(); - for (i = 0, numSymbols = 0; i < numStrCache; i++) { - sym = OSSymbol::withCStringNoCopy(strCache[i]); - if (1 == sym->getRetainCount()) { - cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); - symCache[numSymbols] = sym; - numSymbols++; - } - else - sym->release(); - } - dict1 = OSDictionary::withObjects((OSObject **) symCache, - (OSString **) cache, - numSymbols, numSymbols); - TEST_ASSERT('D', "3a", dict1); - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - count1 += (symCache[i]->getRetainCount() == 3); - count2 += (cache[i]->getRetainCount() == 1); - } - TEST_ASSERT('D', "3b", count1 == numSymbols); - TEST_ASSERT('D', "3c", count2 == numSymbols); - if (dict1) { - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - str = (OSString *) cache[i]; - count1 += (symCache[i] == (const OSSymbol *) dict1->getObject(str)); - count2 += (symCache[i]->getRetainCount() == 3); - } - TEST_ASSERT('D', "3d", count1 == numSymbols); - TEST_ASSERT('D', "3e", count2 == numSymbols); - - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - const char *cStr = ((OSString *) cache[i])->getCStringNoCopy(); - - count1 += (symCache[i] == (const OSSymbol *) dict1->getObject(cStr)); - count2 += (symCache[i]->getRetainCount() == 3); - } - TEST_ASSERT('D', "3f", count1 == numSymbols); - TEST_ASSERT('D', "3g", count2 == numSymbols); - - dict1->release(); - } - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - count1 += (symCache[i]->getRetainCount() == 1); - count2 += (cache[i]->getRetainCount() == 1); - symCache[i]->release(); - cache[i]->release(); - } - TEST_ASSERT('D', "3h", count1 == numSymbols); - res = res && checkSpace("(D)3", spaceCheck, 0); - - // Check the creation of a small dictionary then grow it one item at a time - // Create a new dictionary from the old dictionary. - // Finally remove each item permanently. - spaceCheck = checkPointSpace(); - for (i = 0, numSymbols = 0; i < numStrCache; i++) { - sym = OSSymbol::withCStringNoCopy(strCache[i]); - if (1 == sym->getRetainCount()) { - cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); - symCache[numSymbols] = sym; - numSymbols++; - } - else - sym->release(); - } - dict2 = 0; - dict1 = OSDictionary::withCapacity(1); - TEST_ASSERT('D', "4a", dict1); - if (dict1) { - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - sym = symCache[i]; - count1 += ((OSObject *) sym == dict1->setObject((OSObject *) sym, - sym->getCStringNoCopy())); - count2 += (sym->getRetainCount() == 3); - } - TEST_ASSERT('D', "4b", numSymbols == (int) dict1->getCount()); - TEST_ASSERT('D', "4c", numSymbols == count1); - TEST_ASSERT('D', "4d", numSymbols == count2); - - dict2 = OSDictionary::withDictionary(dict1, numSymbols-1); - TEST_ASSERT('D', "4b", !dict2); - dict2 = OSDictionary::withDictionary(dict1, numSymbols); - } - TEST_ASSERT('D', "4e", dict2); - if (dict2) { - dict1->release(); dict1 = 0; - - TEST_ASSERT('D', "4f", numSymbols == (int) dict2->getCount()); - - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - OSObject *replacedObject; - - sym = symCache[i]; - str = (OSString *) cache[i]; - replacedObject = dict2->setObject(str, str); - count1 += ((OSString *) sym == replacedObject); - replacedObject->release(); - count2 += (sym->getRetainCount() == 2); - str->release(); - } - TEST_ASSERT('D', "4g", numSymbols == count1); - TEST_ASSERT('D', "4h", numSymbols == count2); - - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) { - sym = symCache[i]; - str = (OSString *) cache[i]; - count1 += (str == dict2->__takeObject(sym)); - str->release(); - count2 += (sym->getRetainCount() == 1); - sym->release(); - } - TEST_ASSERT('D', "4i", numSymbols == count1); - TEST_ASSERT('D', "4j", numSymbols == count2); - TEST_ASSERT('D', "4k", !dict2->getCount()); - dict2->release(); dict2 = 0; - } - else if (dict1) - dict1->release(); - res = res && checkSpace("(D)4", spaceCheck, 0); - - if (res) - verPrintf(("testDictionary: All OSDictionary Tests passed\n")); - else - logPrintf(("testDictionary: Some OSDictionary Tests failed\n")); + bool res = true; + void *spaceCheck, *spaceCheck2, *spaceCheck3; + OSObject *cache[numStrCache]; + OSString *str; + const OSSymbol *symCache[numStrCache], *sym; + OSDictionary *dict1, *dict2; + int i, numSymbols, count1, count2; + + // Do first test without memory leak tests to initialise the metaclass + dict1 = OSDictionary::withCapacity(1); + TEST_ASSERT('D', "0a", dict1); + if (dict1) { + dict1->release(); + } + + // Grow the symbol pool to maximum + for (i = 0; i < numStrCache; i++) { + symCache[i] = OSSymbol::withCStringNoCopy(strCache[i]); + } + for (i = 0; i < numStrCache; i++) { + symCache[i]->release(); + } + + // Create and destroy a dictionary + spaceCheck = checkPointSpace(); + dict1 = OSDictionary::withCapacity(1); + TEST_ASSERT('D', "1a", dict1); + if (dict1) { + TEST_ASSERT('D', "1b", !dict1->getCount()); + TEST_ASSERT('D', "1c", 1 == dict1->getCapacity()); + TEST_ASSERT('D', "1d", 1 == dict1->getCapacityIncrement()); + TEST_ASSERT('D', "1e", 4 == dict1->setCapacityIncrement(4)); + TEST_ASSERT('D', "1f", 4 == dict1->getCapacityIncrement()); + TEST_ASSERT('D', "1g", 8 == dict1->ensureCapacity(5)); + + spaceCheck2 = checkPointSpace(); + sym = OSSymbol::withCStringNoCopy(strCache[0]); + + spaceCheck3 = checkPointSpace(); + TEST_ASSERT('D', "1h", dict1->setObject((OSObject *) sym, sym)); + TEST_ASSERT('D', "1i", (OSObject *) sym == dict1->getObject(sym)); + sym->release(); + TEST_ASSERT('D', "1i", 2 == sym->getRetainCount()); + res = res && checkSpace("(D)1j", spaceCheck3, 0); + + TEST_ASSERT('D', "1k", 1 == dict1->getCount()); + dict1->flushCollection(); + TEST_ASSERT('D', "1l", !dict1->getCount()); + res = res && checkSpace("(D)1m", spaceCheck2, 0); + + dict1->release(); + } + res = res && checkSpace("(D)1", spaceCheck, 0); + + // Check the creation of a sizable OSDictionary from an array of IOObjects + // Also check indexing into the array. + spaceCheck = checkPointSpace(); + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + symCache[numSymbols++] = sym; + } else { + sym->release(); + } + } + dict1 = OSDictionary::withObjects( + (OSObject **) symCache, symCache, numSymbols, numSymbols); + TEST_ASSERT('D', "2a", dict1); + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 3); + } + TEST_ASSERT('D', "2b", count1 == numSymbols); + if (dict1) { + TEST_ASSERT('D', "2c", numSymbols == (int) dict1->getCount()); + TEST_ASSERT('D', "2d", numSymbols == (int) dict1->getCapacity()); + TEST_ASSERT('D', "2e", + numSymbols == (int) dict1->getCapacityIncrement()); + + for (i = dict1->getCount(); --i >= 0;) { + str = (OSString *) dict1->getObject(symCache[i]); + if (str != (OSString *) symCache[i]) { + verPrintf(("testDictionary(D) test 2f%d failed\n", i)); + res = false; + } + } + dict1->release(); + } + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 1); + symCache[i]->release(); + } + TEST_ASSERT('D', "2g", count1 == numSymbols); + res = res && checkSpace("(D)2", spaceCheck, 0); + + // Check the creation of a sizable Dictionary from an array of IOStrings + // Also check searching dictionary use OSString for a key. + spaceCheck = checkPointSpace(); + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); + symCache[numSymbols] = sym; + numSymbols++; + } else { + sym->release(); + } + } + dict1 = OSDictionary::withObjects((OSObject **) symCache, + (OSString **) cache, + numSymbols, numSymbols); + TEST_ASSERT('D', "3a", dict1); + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 3); + count2 += (cache[i]->getRetainCount() == 1); + } + TEST_ASSERT('D', "3b", count1 == numSymbols); + TEST_ASSERT('D', "3c", count2 == numSymbols); + if (dict1) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + str = (OSString *) cache[i]; + count1 += (symCache[i] == (const OSSymbol *) dict1->getObject(str)); + count2 += (symCache[i]->getRetainCount() == 3); + } + TEST_ASSERT('D', "3d", count1 == numSymbols); + TEST_ASSERT('D', "3e", count2 == numSymbols); + + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + const char *cStr = ((OSString *) cache[i])->getCStringNoCopy(); + + count1 += (symCache[i] == (const OSSymbol *) dict1->getObject(cStr)); + count2 += (symCache[i]->getRetainCount() == 3); + } + TEST_ASSERT('D', "3f", count1 == numSymbols); + TEST_ASSERT('D', "3g", count2 == numSymbols); + + dict1->release(); + } + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (symCache[i]->getRetainCount() == 1); + count2 += (cache[i]->getRetainCount() == 1); + symCache[i]->release(); + cache[i]->release(); + } + TEST_ASSERT('D', "3h", count1 == numSymbols); + res = res && checkSpace("(D)3", spaceCheck, 0); + + // Check the creation of a small dictionary then grow it one item at a time + // Create a new dictionary from the old dictionary. + // Finally remove each item permanently. + spaceCheck = checkPointSpace(); + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); + symCache[numSymbols] = sym; + numSymbols++; + } else { + sym->release(); + } + } + dict2 = 0; + dict1 = OSDictionary::withCapacity(1); + TEST_ASSERT('D', "4a", dict1); + if (dict1) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + sym = symCache[i]; + count1 += ((OSObject *) sym == dict1->setObject((OSObject *) sym, + sym->getCStringNoCopy())); + count2 += (sym->getRetainCount() == 3); + } + TEST_ASSERT('D', "4b", numSymbols == (int) dict1->getCount()); + TEST_ASSERT('D', "4c", numSymbols == count1); + TEST_ASSERT('D', "4d", numSymbols == count2); + + dict2 = OSDictionary::withDictionary(dict1, numSymbols - 1); + TEST_ASSERT('D', "4b", !dict2); + dict2 = OSDictionary::withDictionary(dict1, numSymbols); + } + TEST_ASSERT('D', "4e", dict2); + if (dict2) { + dict1->release(); dict1 = 0; + + TEST_ASSERT('D', "4f", numSymbols == (int) dict2->getCount()); + + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + OSObject *replacedObject; + + sym = symCache[i]; + str = (OSString *) cache[i]; + replacedObject = dict2->setObject(str, str); + count1 += ((OSString *) sym == replacedObject); + replacedObject->release(); + count2 += (sym->getRetainCount() == 2); + str->release(); + } + TEST_ASSERT('D', "4g", numSymbols == count1); + TEST_ASSERT('D', "4h", numSymbols == count2); + + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + sym = symCache[i]; + str = (OSString *) cache[i]; + count1 += (str == dict2->__takeObject(sym)); + str->release(); + count2 += (sym->getRetainCount() == 1); + sym->release(); + } + TEST_ASSERT('D', "4i", numSymbols == count1); + TEST_ASSERT('D', "4j", numSymbols == count2); + TEST_ASSERT('D', "4k", !dict2->getCount()); + dict2->release(); dict2 = 0; + } else if (dict1) { + dict1->release(); + } + res = res && checkSpace("(D)4", spaceCheck, 0); + + if (res) { + verPrintf(("testDictionary: All OSDictionary Tests passed\n")); + } else { + logPrintf(("testDictionary: Some OSDictionary Tests failed\n")); + } } -void testIterator() +void +testIterator() { - bool res = true; - void *spaceCheck; - OSObject *cache[numStrCache]; - OSString *str = 0; - const OSSymbol *symCache[numStrCache], *sym; - OSDictionary *dict; - OSSet *set; - OSArray *array, *bigReturn; - OSCollectionIterator *iter1, *iter2; - int i, numSymbols, count1, count2, count3; - - // Setup symbol and string pools - for (i = 0, numSymbols = 0; i < numStrCache; i++) { - sym = OSSymbol::withCStringNoCopy(strCache[i]); - if (1 == sym->getRetainCount()) { - cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); - symCache[numSymbols] = sym; - numSymbols++; - } - else - sym->release(); - } - - // Test the array iterator - spaceCheck = checkPointSpace(); - iter1 = iter2 = 0; - array = OSArray::withCapacity(numSymbols); - TEST_ASSERT('I', "1a", array); - if (array) { - count1 = count2 = 0; - for (i = numSymbols; --i >= 0; ) - count1 += array->setObject(cache[i], 0); - TEST_ASSERT('I', "1b", count1 == numSymbols); - - iter1 = OSCollectionIterator::withCollection(array); - iter2 = OSCollectionIterator::withCollection(array); - } - TEST_ASSERT('I', "1c", iter1); - TEST_ASSERT('I', "1d", iter2); - if (iter1 && iter2) { - count1 = count2 = count3 = 0; - for (i = 0; (str = (IOString *) iter1->getNextObject()); i++) { - bigReturn = iter2->nextEntries(); - count1 += (bigReturn->getCount() == 1); - count2 += (cache[i] == bigReturn->getObject(0)); - count3 += (cache[i] == str); - } - TEST_ASSERT('I', "1e", count1 == numSymbols); - TEST_ASSERT('I', "1f", count2 == numSymbols); - TEST_ASSERT('I', "1g", count3 == numSymbols); - TEST_ASSERT('I', "1h", iter1->valid()); - TEST_ASSERT('I', "1i", iter2->valid()); - - iter1->reset(); - str = (OSString *) array->__takeObject(0); - array->setObject(str, 0); - str->release(); - TEST_ASSERT('I', "1j", !iter1->getNextObject()); - TEST_ASSERT('I', "1k", !iter1->valid()); - - iter1->reset(); - count1 = count2 = count3 = 0; - for (i = 0; ; i++) { - if (i & 1) - str = (OSString *) iter1->getNextObject(); - else if ( (bigReturn = iter1->nextEntries()) ) - str = (OSString *) bigReturn->getObject(0); - else - str = 0; - - if (!str) - break; - count1 += (cache[i] == str); - } - TEST_ASSERT('I', "1l", count1 == numSymbols); - TEST_ASSERT('I', "1m", i == numSymbols); - TEST_ASSERT('I', "1n", iter1->valid()); - - TEST_ASSERT('I', "1o", 3 == array->getRetainCount()); - array->release(); - } - - if (iter1) iter1->release(); - if (iter2) iter2->release(); - res = res && checkSpace("(I)1", spaceCheck, 0); - - // Test the set iterator - spaceCheck = checkPointSpace(); - iter1 = 0; - set = OSSet::withCapacity(numSymbols); - TEST_ASSERT('I', "2a", set); - if (set) { - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) - count1 += set->setObject(cache[i]); - TEST_ASSERT('I', "2b", count1 == numSymbols); - - iter1 = OSCollectionIterator::withCollection(set); - iter2 = OSCollectionIterator::withCollection(set); - } - TEST_ASSERT('I', "2c", iter1); - TEST_ASSERT('I', "2d", iter2); - if (iter1 && iter2) { - count1 = count2 = count3 = 0; - for (i = 0; (str = (IOString *) iter1->getNextObject()); i++) { - bigReturn = iter2->nextEntries(); - count1 += (bigReturn->getCount() == 1); - count2 += (cache[i] == bigReturn->getObject(0)); - count3 += (cache[i] == str); - } - TEST_ASSERT('I', "2e", count1 == numSymbols); - TEST_ASSERT('I', "2f", count2 == numSymbols); - TEST_ASSERT('I', "2g", count3 == numSymbols); - TEST_ASSERT('I', "2h", iter1->valid()); - TEST_ASSERT('I', "2i", iter2->valid()); - - iter1->reset(); - count1 = count2 = count3 = 0; - for (i = 0; ; i++) { - if (i & 1) - str = (OSString *) iter1->getNextObject(); - else if ( (bigReturn = iter1->nextEntries()) ) - str = (OSString *) bigReturn->getObject(0); - else - str = 0; - - if (!str) - break; - count1 += (cache[i] == str); - } - TEST_ASSERT('I', "2l", count1 == numSymbols); - TEST_ASSERT('I', "2m", i == numSymbols); - TEST_ASSERT('I', "2n", iter1->valid()); - - iter1->reset(); - str = (OSString *) set->getAnyObject(); - (void) set->__takeObject(str); - set->setObject(str); - str->release(); - TEST_ASSERT('I', "2j", !iter1->getNextObject()); - TEST_ASSERT('I', "2k", !iter1->valid()); - - TEST_ASSERT('I', "2o", 3 == set->getRetainCount()); - set->release(); - } - - if (iter1) iter1->release(); - if (iter2) iter2->release(); - res = res && checkSpace("(I)2", spaceCheck, 0); - - // Test the dictionary iterator - spaceCheck = checkPointSpace(); - iter1 = 0; - dict = OSDictionary::withCapacity(numSymbols); - TEST_ASSERT('I', "3a", dict); - if (dict) { - count1 = count2 = 0; - for (i = 0; i < numSymbols; i++) - count1 += (0 != dict->setObject(cache[i], symCache[i])); - TEST_ASSERT('I', "3b", count1 == numSymbols); - - iter1 = OSCollectionIterator::withCollection(dict); - iter2 = OSCollectionIterator::withCollection(dict); - } - TEST_ASSERT('I', "3c", iter1); - TEST_ASSERT('I', "3d", iter2); - if (iter1 && iter2) { - count1 = count2 = count3 = 0; - for (i = 0; (sym = (const IOSymbol *) iter1->getNextObject()); i++) { - bigReturn = iter2->nextEntries(); - count1 += (bigReturn->getCount() == 2); - count2 += (cache[i] == bigReturn->getObject(1)); - count3 += (symCache[i] == sym); - } - TEST_ASSERT('I', "3e", count1 == numSymbols); - TEST_ASSERT('I', "3f", count2 == numSymbols); - TEST_ASSERT('I', "3g", count3 == numSymbols); - TEST_ASSERT('I', "3h", iter1->valid()); - TEST_ASSERT('I', "3i", iter2->valid()); - - iter1->reset(); - count1 = count2 = count3 = 0; - i = 0; - for (i = 0; ; i++) { - if (i & 1) { - sym = (const OSSymbol *) iter1->getNextObject(); - str = 0; - } - else if ( (bigReturn = iter1->nextEntries()) ) { - sym = (const OSSymbol *) bigReturn->getObject(0); - str = (OSString *) bigReturn->getObject(1); - } - else - sym = 0; - - if (!sym) - break; - - count1 += (symCache[i] == sym); - count2 += (!str || cache[i] == str); - } - TEST_ASSERT('I', "3l", count1 == numSymbols); - TEST_ASSERT('I', "3m", count2 == numSymbols); - TEST_ASSERT('I', "3n", i == numSymbols); - TEST_ASSERT('I', "3o", iter1->valid()); - - iter1->reset(); - str = (OSString *) dict->__takeObject(symCache[numSymbols-1]); - dict->setObject(str, symCache[numSymbols-1]); - str->release(); - TEST_ASSERT('I', "3j", !iter1->getNextObject()); - TEST_ASSERT('I', "3k", !iter1->valid()); - - TEST_ASSERT('I', "3p", 3 == dict->getRetainCount()); - dict->release(); - } - - if (iter1) iter1->release(); - if (iter2) iter2->release(); - res = res && checkSpace("(I)3", spaceCheck, 0); - - count1 = count2 = count3 = 0; - for (i = 0; i < numSymbols; i++) { - count1 += (1 == cache[i]->getRetainCount()); - count2 += (1 == symCache[i]->getRetainCount()); - cache[i]->release(); - symCache[i]->release(); - } - TEST_ASSERT('I', "4a", count1 == numSymbols); - TEST_ASSERT('I', "4b", count2 == numSymbols); - - if (res) - verPrintf(("testIterator: All OSCollectionIterator Tests passed\n")); - else - logPrintf(("testIterator: Some OSCollectionIterator Tests failed\n")); + bool res = true; + void *spaceCheck; + OSObject *cache[numStrCache]; + OSString *str = 0; + const OSSymbol *symCache[numStrCache], *sym; + OSDictionary *dict; + OSSet *set; + OSArray *array, *bigReturn; + OSCollectionIterator *iter1, *iter2; + int i, numSymbols, count1, count2, count3; + + // Setup symbol and string pools + for (i = 0, numSymbols = 0; i < numStrCache; i++) { + sym = OSSymbol::withCStringNoCopy(strCache[i]); + if (1 == sym->getRetainCount()) { + cache[numSymbols] = OSString::withCStringNoCopy(strCache[i]); + symCache[numSymbols] = sym; + numSymbols++; + } else { + sym->release(); + } + } + + // Test the array iterator + spaceCheck = checkPointSpace(); + iter1 = iter2 = 0; + array = OSArray::withCapacity(numSymbols); + TEST_ASSERT('I', "1a", array); + if (array) { + count1 = count2 = 0; + for (i = numSymbols; --i >= 0;) { + count1 += array->setObject(cache[i], 0); + } + TEST_ASSERT('I', "1b", count1 == numSymbols); + + iter1 = OSCollectionIterator::withCollection(array); + iter2 = OSCollectionIterator::withCollection(array); + } + TEST_ASSERT('I', "1c", iter1); + TEST_ASSERT('I', "1d", iter2); + if (iter1 && iter2) { + count1 = count2 = count3 = 0; + for (i = 0; (str = (IOString *) iter1->getNextObject()); i++) { + bigReturn = iter2->nextEntries(); + count1 += (bigReturn->getCount() == 1); + count2 += (cache[i] == bigReturn->getObject(0)); + count3 += (cache[i] == str); + } + TEST_ASSERT('I', "1e", count1 == numSymbols); + TEST_ASSERT('I', "1f", count2 == numSymbols); + TEST_ASSERT('I', "1g", count3 == numSymbols); + TEST_ASSERT('I', "1h", iter1->valid()); + TEST_ASSERT('I', "1i", iter2->valid()); + + iter1->reset(); + str = (OSString *) array->__takeObject(0); + array->setObject(str, 0); + str->release(); + TEST_ASSERT('I', "1j", !iter1->getNextObject()); + TEST_ASSERT('I', "1k", !iter1->valid()); + + iter1->reset(); + count1 = count2 = count3 = 0; + for (i = 0;; i++) { + if (i & 1) { + str = (OSString *) iter1->getNextObject(); + } else if ((bigReturn = iter1->nextEntries())) { + str = (OSString *) bigReturn->getObject(0); + } else { + str = 0; + } + + if (!str) { + break; + } + count1 += (cache[i] == str); + } + TEST_ASSERT('I', "1l", count1 == numSymbols); + TEST_ASSERT('I', "1m", i == numSymbols); + TEST_ASSERT('I', "1n", iter1->valid()); + + TEST_ASSERT('I', "1o", 3 == array->getRetainCount()); + array->release(); + } + + if (iter1) { + iter1->release(); + } + if (iter2) { + iter2->release(); + } + res = res && checkSpace("(I)1", spaceCheck, 0); + + // Test the set iterator + spaceCheck = checkPointSpace(); + iter1 = 0; + set = OSSet::withCapacity(numSymbols); + TEST_ASSERT('I', "2a", set); + if (set) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += set->setObject(cache[i]); + } + TEST_ASSERT('I', "2b", count1 == numSymbols); + + iter1 = OSCollectionIterator::withCollection(set); + iter2 = OSCollectionIterator::withCollection(set); + } + TEST_ASSERT('I', "2c", iter1); + TEST_ASSERT('I', "2d", iter2); + if (iter1 && iter2) { + count1 = count2 = count3 = 0; + for (i = 0; (str = (IOString *) iter1->getNextObject()); i++) { + bigReturn = iter2->nextEntries(); + count1 += (bigReturn->getCount() == 1); + count2 += (cache[i] == bigReturn->getObject(0)); + count3 += (cache[i] == str); + } + TEST_ASSERT('I', "2e", count1 == numSymbols); + TEST_ASSERT('I', "2f", count2 == numSymbols); + TEST_ASSERT('I', "2g", count3 == numSymbols); + TEST_ASSERT('I', "2h", iter1->valid()); + TEST_ASSERT('I', "2i", iter2->valid()); + + iter1->reset(); + count1 = count2 = count3 = 0; + for (i = 0;; i++) { + if (i & 1) { + str = (OSString *) iter1->getNextObject(); + } else if ((bigReturn = iter1->nextEntries())) { + str = (OSString *) bigReturn->getObject(0); + } else { + str = 0; + } + + if (!str) { + break; + } + count1 += (cache[i] == str); + } + TEST_ASSERT('I', "2l", count1 == numSymbols); + TEST_ASSERT('I', "2m", i == numSymbols); + TEST_ASSERT('I', "2n", iter1->valid()); + + iter1->reset(); + str = (OSString *) set->getAnyObject(); + (void) set->__takeObject(str); + set->setObject(str); + str->release(); + TEST_ASSERT('I', "2j", !iter1->getNextObject()); + TEST_ASSERT('I', "2k", !iter1->valid()); + + TEST_ASSERT('I', "2o", 3 == set->getRetainCount()); + set->release(); + } + + if (iter1) { + iter1->release(); + } + if (iter2) { + iter2->release(); + } + res = res && checkSpace("(I)2", spaceCheck, 0); + + // Test the dictionary iterator + spaceCheck = checkPointSpace(); + iter1 = 0; + dict = OSDictionary::withCapacity(numSymbols); + TEST_ASSERT('I', "3a", dict); + if (dict) { + count1 = count2 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (0 != dict->setObject(cache[i], symCache[i])); + } + TEST_ASSERT('I', "3b", count1 == numSymbols); + + iter1 = OSCollectionIterator::withCollection(dict); + iter2 = OSCollectionIterator::withCollection(dict); + } + TEST_ASSERT('I', "3c", iter1); + TEST_ASSERT('I', "3d", iter2); + if (iter1 && iter2) { + count1 = count2 = count3 = 0; + for (i = 0; (sym = (const IOSymbol *) iter1->getNextObject()); i++) { + bigReturn = iter2->nextEntries(); + count1 += (bigReturn->getCount() == 2); + count2 += (cache[i] == bigReturn->getObject(1)); + count3 += (symCache[i] == sym); + } + TEST_ASSERT('I', "3e", count1 == numSymbols); + TEST_ASSERT('I', "3f", count2 == numSymbols); + TEST_ASSERT('I', "3g", count3 == numSymbols); + TEST_ASSERT('I', "3h", iter1->valid()); + TEST_ASSERT('I', "3i", iter2->valid()); + + iter1->reset(); + count1 = count2 = count3 = 0; + i = 0; + for (i = 0;; i++) { + if (i & 1) { + sym = (const OSSymbol *) iter1->getNextObject(); + str = 0; + } else if ((bigReturn = iter1->nextEntries())) { + sym = (const OSSymbol *) bigReturn->getObject(0); + str = (OSString *) bigReturn->getObject(1); + } else { + sym = 0; + } + + if (!sym) { + break; + } + + count1 += (symCache[i] == sym); + count2 += (!str || cache[i] == str); + } + TEST_ASSERT('I', "3l", count1 == numSymbols); + TEST_ASSERT('I', "3m", count2 == numSymbols); + TEST_ASSERT('I', "3n", i == numSymbols); + TEST_ASSERT('I', "3o", iter1->valid()); + + iter1->reset(); + str = (OSString *) dict->__takeObject(symCache[numSymbols - 1]); + dict->setObject(str, symCache[numSymbols - 1]); + str->release(); + TEST_ASSERT('I', "3j", !iter1->getNextObject()); + TEST_ASSERT('I', "3k", !iter1->valid()); + + TEST_ASSERT('I', "3p", 3 == dict->getRetainCount()); + dict->release(); + } + + if (iter1) { + iter1->release(); + } + if (iter2) { + iter2->release(); + } + res = res && checkSpace("(I)3", spaceCheck, 0); + + count1 = count2 = count3 = 0; + for (i = 0; i < numSymbols; i++) { + count1 += (1 == cache[i]->getRetainCount()); + count2 += (1 == symCache[i]->getRetainCount()); + cache[i]->release(); + symCache[i]->release(); + } + TEST_ASSERT('I', "4a", count1 == numSymbols); + TEST_ASSERT('I', "4b", count2 == numSymbols); + + if (res) { + verPrintf(("testIterator: All OSCollectionIterator Tests passed\n")); + } else { + logPrintf(("testIterator: Some OSCollectionIterator Tests failed\n")); + } } #endif /* DEBUG */ diff --git a/iokit/Tests/TestContainers.cpp b/iokit/Tests/TestContainers.cpp index f4c86b8d9..3910aa76d 100644 --- a/iokit/Tests/TestContainers.cpp +++ b/iokit/Tests/TestContainers.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if DEBUG @@ -46,7 +46,7 @@ static const char testC10[] = "the "; static const char testC11[] = "lazy "; static const char testC12[] = "dog. \n"; static const char testC13[] = "Now is the time for all good " - "men to come to the aid of the party \n"; + "men to come to the aid of the party \n"; static const char testC14[] = "Now is the time for "; static const char testC15[] = "all good men to come "; static const char testC16[] = "to the aid of the party \n"; @@ -74,8 +74,8 @@ static const char testC37[] = "Burns "; static const char testC38[] = "eats "; static const char testC39[] = "worms. \n"; static const char testC40[] = "Tired eyes? Stiff neck? Tight shoulders? " - "Aching back? The right moves can help " - "prevent these kinds of problem. "; + "Aching back? The right moves can help " + "prevent these kinds of problem. "; static const char testC41[] = "Tired eyes? Stiff neck? "; static const char testC42[] = "Tight shoulders? Aching back? "; static const char testC43[] = "The right moves can help prevent "; @@ -100,377 +100,410 @@ static const char testC61[] = "of "; static const char testC62[] = "problem. "; const char *strCache[] = { - testC00, testC01, testC02, testC03, testC04, testC05, testC06, testC07, - testC08, testC09, testC10, testC11, testC12, testC13, testC14, testC15, - testC16, testC17, testC18, testC19, testC20, testC21, testC22, testC23, - testC24, testC25, testC26, testC27, testC28, testC29, testC30, testC31, - testC32, testC33, testC34, testC35, testC36, testC37, testC38, testC39, - testC40, testC41, testC42, testC43, testC44, testC45, testC46, testC47, - testC48, testC49, testC50, testC51, testC52, testC53, testC54, testC55, - testC56, testC57, testC58, testC59, testC60, testC61, testC62, + testC00, testC01, testC02, testC03, testC04, testC05, testC06, testC07, + testC08, testC09, testC10, testC11, testC12, testC13, testC14, testC15, + testC16, testC17, testC18, testC19, testC20, testC21, testC22, testC23, + testC24, testC25, testC26, testC27, testC28, testC29, testC30, testC31, + testC32, testC33, testC34, testC35, testC36, testC37, testC38, testC39, + testC40, testC41, testC42, testC43, testC44, testC45, testC46, testC47, + testC48, testC49, testC50, testC51, testC52, testC53, testC54, testC55, + testC56, testC57, testC58, testC59, testC60, testC61, testC62, }; -const int numStrCache = ((int) (sizeof(strCache)/sizeof(strCache[0]))); +const int numStrCache = ((int) (sizeof(strCache) / sizeof(strCache[0]))); -void testData() +void +testData() { -#define DATA_SIZE_1 256 -#define DATA_SIZE_2 512 -#define DATA_SIZE_3 1024 -#define DATA_SIZE_4 8192 - - OSData *test1, *test2, *test3; - void *spaceCheck; - unsigned int len; - unsigned int i; - bool res = true; - unsigned short testData[DATA_SIZE_4/sizeof(short)], *cp; - - // very first test initialises the OSMetaClass cache. - test1 = OSData::withCapacity(DATA_SIZE_1); - TEST_ASSERT('d', "0a", test1); - if (test1) - test1->release(); - - for (i = 0; i < sizeof(testData)/sizeof(short); i++) - testData[i] = (unsigned short) i; - - // Check empty data allocation - spaceCheck = checkPointSpace(); - test1 = OSData::withCapacity(DATA_SIZE_1); - TEST_ASSERT('d', "1a", test1); - if (test1) { - TEST_ASSERT('d', "1b", !test1->getLength()); - TEST_ASSERT('d', "1c", test1->getCapacity() == DATA_SIZE_1); - TEST_ASSERT('d', "1d", !test1->getBytesNoCopy()); - TEST_ASSERT('d', "1e", !test1->getBytesNoCopy(10, DATA_SIZE_1 - 10)); - TEST_ASSERT('d', "1f", test1->appendBytes(spaceCheck, 0)); - TEST_ASSERT('d', "1g", !test1->getLength()); - TEST_ASSERT('d', "1h", test1->getCapacity() == DATA_SIZE_1); - TEST_ASSERT('d', "1i", !test1->getBytesNoCopy()); - test1->release(); - } - res = res && checkSpace("(d)1", spaceCheck, 0); - - // Check appending to empty data allocation - spaceCheck = checkPointSpace(); - test1 = OSData::withCapacity(DATA_SIZE_1); - TEST_ASSERT('d', "2a", test1); - if (test1) { - TEST_ASSERT('d', "2b", !test1->getLength()); - TEST_ASSERT('d', "2c", !test1->getBytesNoCopy()); - TEST_ASSERT('d', "2d", test1->appendBytes(testData, DATA_SIZE_1)); - TEST_ASSERT('d', "2e", test1->getLength() == DATA_SIZE_1); - TEST_ASSERT('d', "2f", test1->getBytesNoCopy()); - cp = (unsigned short *) test1->getBytesNoCopy(); - for (i = 0; cp && i < (DATA_SIZE_1/sizeof(short)); i++) { - TEST_ASSERT('d', "2g", *cp++ == testData[i]); - if (*cp != testData[i]) - break; - } - TEST_ASSERT('d', "2h", test1->getBytesNoCopy(10, DATA_SIZE_1-10)); - cp = (unsigned short *) test1->getBytesNoCopy(10, DATA_SIZE_1 - 10); - for (i = 5; cp && i < (DATA_SIZE_1/sizeof(short)) - 5; i++) { - TEST_ASSERT('d', "2i", *cp++ == testData[i]); - if (*cp != testData[i]) - break; - } - TEST_ASSERT('d', "2j", test1->isEqualTo(testData, DATA_SIZE_1)); - test1->release(); - } - res = res && checkSpace("(d)2", spaceCheck, 0); - - // Check data allocation from some constant data - spaceCheck = checkPointSpace(); - test1 = OSData::withBytes(testData, sizeof(testData)); - TEST_ASSERT('d', "3a", test1); - if (test1) { - TEST_ASSERT('d', "3b", test1->getLength() == sizeof(testData)); - TEST_ASSERT('d', "3c", test1->getCapacity() == sizeof(testData)); - TEST_ASSERT('d', "3d", test1->getBytesNoCopy()); - TEST_ASSERT('d', "3e", test1->getBytesNoCopy(10, sizeof(testData)-10)); - TEST_ASSERT('d', "3f", test1->appendBytes(spaceCheck, 0)); - TEST_ASSERT('d', "3g", test1->getLength() == sizeof(testData)); - TEST_ASSERT('d', "3h", test1->getCapacity() == sizeof(testData)); - TEST_ASSERT('d', "3i", test1->getBytesNoCopy()); - TEST_ASSERT('d', "3j", test1->getBytesNoCopy(10, sizeof(testData)-10)); - TEST_ASSERT('d', "3k", !test1->appendBytes(testData, 10)); - test1->release(); - } - res = res && checkSpace("(d)3", spaceCheck, 0); - - // Check and continious addition of more data - spaceCheck = checkPointSpace(); - test1 = OSData::withCapacity(DATA_SIZE_4); - test2 = OSData::withBytesNoCopy(testData, DATA_SIZE_3); - len = DATA_SIZE_3; - TEST_ASSERT('d', "4a", (test1 && test2)); - if (test1 && test2) { - TEST_ASSERT('d', "4b", !test1->getLength()); - for (i = 0; i < DATA_SIZE_4; i += DATA_SIZE_3) - TEST_ASSERT('d', "4c", test1->appendBytes(test2)); - TEST_ASSERT('d', "4d", !test1->appendBytes(test2)); - for (i = 0; i < DATA_SIZE_4; i += DATA_SIZE_3) { - - TEST_ASSERT('d', "4e", test2->isEqualTo( - test1->getBytesNoCopy(i, DATA_SIZE_3), - DATA_SIZE_3)); - - test3 = OSData::withData(test1, i, DATA_SIZE_3); - TEST_ASSERT('d', "4f", test3); - if (test3) { - TEST_ASSERT('d', "4g", test2->isEqualTo(test3)); - test3->release(); - } - - test3 = OSData::withData(test1, i, len); - TEST_ASSERT('d', "4i", test3); - if (test3) { - TEST_ASSERT('d', "4j", test2->isEqualTo(test3)); - test3->release(); - } - } - test1->release(); - test2->release(); - } - res = res && checkSpace("(d)3", spaceCheck, 0); - - if (res) - verPrintf(("testData: All OSData Tests passed\n")); - else - logPrintf(("testData: Some OSData Tests failed\n")); +#define DATA_SIZE_1 256 +#define DATA_SIZE_2 512 +#define DATA_SIZE_3 1024 +#define DATA_SIZE_4 8192 + + OSData *test1, *test2, *test3; + void *spaceCheck; + unsigned int len; + unsigned int i; + bool res = true; + unsigned short testData[DATA_SIZE_4 / sizeof(short)], *cp; + + // very first test initialises the OSMetaClass cache. + test1 = OSData::withCapacity(DATA_SIZE_1); + TEST_ASSERT('d', "0a", test1); + if (test1) { + test1->release(); + } + + for (i = 0; i < sizeof(testData) / sizeof(short); i++) { + testData[i] = (unsigned short) i; + } + + // Check empty data allocation + spaceCheck = checkPointSpace(); + test1 = OSData::withCapacity(DATA_SIZE_1); + TEST_ASSERT('d', "1a", test1); + if (test1) { + TEST_ASSERT('d', "1b", !test1->getLength()); + TEST_ASSERT('d', "1c", test1->getCapacity() == DATA_SIZE_1); + TEST_ASSERT('d', "1d", !test1->getBytesNoCopy()); + TEST_ASSERT('d', "1e", !test1->getBytesNoCopy(10, DATA_SIZE_1 - 10)); + TEST_ASSERT('d', "1f", test1->appendBytes(spaceCheck, 0)); + TEST_ASSERT('d', "1g", !test1->getLength()); + TEST_ASSERT('d', "1h", test1->getCapacity() == DATA_SIZE_1); + TEST_ASSERT('d', "1i", !test1->getBytesNoCopy()); + test1->release(); + } + res = res && checkSpace("(d)1", spaceCheck, 0); + + // Check appending to empty data allocation + spaceCheck = checkPointSpace(); + test1 = OSData::withCapacity(DATA_SIZE_1); + TEST_ASSERT('d', "2a", test1); + if (test1) { + TEST_ASSERT('d', "2b", !test1->getLength()); + TEST_ASSERT('d', "2c", !test1->getBytesNoCopy()); + TEST_ASSERT('d', "2d", test1->appendBytes(testData, DATA_SIZE_1)); + TEST_ASSERT('d', "2e", test1->getLength() == DATA_SIZE_1); + TEST_ASSERT('d', "2f", test1->getBytesNoCopy()); + cp = (unsigned short *) test1->getBytesNoCopy(); + for (i = 0; cp && i < (DATA_SIZE_1 / sizeof(short)); i++) { + TEST_ASSERT('d', "2g", *cp++ == testData[i]); + if (*cp != testData[i]) { + break; + } + } + TEST_ASSERT('d', "2h", test1->getBytesNoCopy(10, DATA_SIZE_1 - 10)); + cp = (unsigned short *) test1->getBytesNoCopy(10, DATA_SIZE_1 - 10); + for (i = 5; cp && i < (DATA_SIZE_1 / sizeof(short)) - 5; i++) { + TEST_ASSERT('d', "2i", *cp++ == testData[i]); + if (*cp != testData[i]) { + break; + } + } + TEST_ASSERT('d', "2j", test1->isEqualTo(testData, DATA_SIZE_1)); + test1->release(); + } + res = res && checkSpace("(d)2", spaceCheck, 0); + + // Check data allocation from some constant data + spaceCheck = checkPointSpace(); + test1 = OSData::withBytes(testData, sizeof(testData)); + TEST_ASSERT('d', "3a", test1); + if (test1) { + TEST_ASSERT('d', "3b", test1->getLength() == sizeof(testData)); + TEST_ASSERT('d', "3c", test1->getCapacity() == sizeof(testData)); + TEST_ASSERT('d', "3d", test1->getBytesNoCopy()); + TEST_ASSERT('d', "3e", test1->getBytesNoCopy(10, sizeof(testData) - 10)); + TEST_ASSERT('d', "3f", test1->appendBytes(spaceCheck, 0)); + TEST_ASSERT('d', "3g", test1->getLength() == sizeof(testData)); + TEST_ASSERT('d', "3h", test1->getCapacity() == sizeof(testData)); + TEST_ASSERT('d', "3i", test1->getBytesNoCopy()); + TEST_ASSERT('d', "3j", test1->getBytesNoCopy(10, sizeof(testData) - 10)); + TEST_ASSERT('d', "3k", !test1->appendBytes(testData, 10)); + test1->release(); + } + res = res && checkSpace("(d)3", spaceCheck, 0); + + // Check and continious addition of more data + spaceCheck = checkPointSpace(); + test1 = OSData::withCapacity(DATA_SIZE_4); + test2 = OSData::withBytesNoCopy(testData, DATA_SIZE_3); + len = DATA_SIZE_3; + TEST_ASSERT('d', "4a", (test1 && test2)); + if (test1 && test2) { + TEST_ASSERT('d', "4b", !test1->getLength()); + for (i = 0; i < DATA_SIZE_4; i += DATA_SIZE_3) { + TEST_ASSERT('d', "4c", test1->appendBytes(test2)); + } + TEST_ASSERT('d', "4d", !test1->appendBytes(test2)); + for (i = 0; i < DATA_SIZE_4; i += DATA_SIZE_3) { + TEST_ASSERT('d', "4e", test2->isEqualTo( + test1->getBytesNoCopy(i, DATA_SIZE_3), + DATA_SIZE_3)); + + test3 = OSData::withData(test1, i, DATA_SIZE_3); + TEST_ASSERT('d', "4f", test3); + if (test3) { + TEST_ASSERT('d', "4g", test2->isEqualTo(test3)); + test3->release(); + } + + test3 = OSData::withData(test1, i, len); + TEST_ASSERT('d', "4i", test3); + if (test3) { + TEST_ASSERT('d', "4j", test2->isEqualTo(test3)); + test3->release(); + } + } + test1->release(); + test2->release(); + } + res = res && checkSpace("(d)3", spaceCheck, 0); + + if (res) { + verPrintf(("testData: All OSData Tests passed\n")); + } else { + logPrintf(("testData: Some OSData Tests failed\n")); + } #undef DATA_SIZE_4 #undef DATA_SIZE_3 #undef DATA_SIZE_2 #undef DATA_SIZE_1 } -void testString() +void +testString() { - OSString *test1, *test2; - void *spaceCheck; - int i; - char c; - bool res = true; - - // very first test initialises the OSMetaClass cache. - test1 = OSString::withCStringNoCopy(testC00); - TEST_ASSERT('s', "0a", test1); - if (test1) - test1->release(); - - // Check c string allocation - spaceCheck = checkPointSpace(); - test1 = OSString::withCString(testC00); - TEST_ASSERT('s', "1a", test1); - TEST_ASSERT('s', "1b", testC00 != test1->getCStringNoCopy()); - TEST_ASSERT('s', "1c", strcmp(testC00, test1->getCStringNoCopy()) == 0); - TEST_ASSERT('s', "1d", strlen(testC00) == test1->getLength()); - TEST_ASSERT('s', "1e", test1->isEqualTo(testC00)); - TEST_ASSERT('s', "1f", !test1->isEqualTo(testC01)); - if (test1) test1->release(); - res = res && checkSpace("(s)1", spaceCheck, 0); - - // Check c string no allocation - spaceCheck = checkPointSpace(); - test1 = OSString::withCStringNoCopy(testC00); - TEST_ASSERT('s', "2a", test1); - TEST_ASSERT('s', "2b", testC00 == test1->getCStringNoCopy()); - if (test1) test1->release(); - res = res && checkSpace("(s)2", spaceCheck, 0); - - // Check string from other string generation - spaceCheck = checkPointSpace(); - test1 = OSString::withCStringNoCopy(testC00); - TEST_ASSERT('s', "3a", test1); - test2 = OSString::withString(test1); - TEST_ASSERT('s', "3b", test2); - TEST_ASSERT('s', "3c", test1 != test2); - TEST_ASSERT('s', "3d", test1->isEqualTo(test2)); - if (test1) test1->release(); - if (test2) test2->release(); - res = res && checkSpace("(s)3", spaceCheck, 0); - - // Check string comparison functionality no copy - spaceCheck = checkPointSpace(); - test1 = OSString::withCStringNoCopy(testC00); - test2 = OSString::withCStringNoCopy(testC01); - TEST_ASSERT('s', "4a", test1 && test2); - TEST_ASSERT('s', "4b", !test1->isEqualTo(test2)); - TEST_ASSERT('s', "4c", !test1->isEqualTo(testC01)); - TEST_ASSERT('s', "4d", test1->isEqualTo(testC00)); - if (test1) test1->release(); - if (test2) test2->release(); - res = res && checkSpace("(s)4", spaceCheck, 0); - - // Check string comparison functionality with copy - spaceCheck = checkPointSpace(); - test1 = OSString::withCString(testC00); - test2 = OSString::withCString(testC01); - TEST_ASSERT('s', "5a", test1 && test2); - TEST_ASSERT('s', "5b", !test1->isEqualTo(test2)); - TEST_ASSERT('s', "5c", !test1->isEqualTo(testC01)); - TEST_ASSERT('s', "5d", test1->isEqualTo(testC00)); - if (test1) test1->release(); - if (test2) test2->release(); - res = res && checkSpace("(s)5", spaceCheck, 0); - - // Check string inplace modifications - spaceCheck = checkPointSpace(); - test1 = OSString::withCString(testC00); - TEST_ASSERT('s', "6a", test1); - for (i = 0; (c = test1->getChar(i)); i++) - if (c != testC00[i]) { - verPrintf(("testString(s) test 6b failed\n")); res = false; - break; - } - TEST_ASSERT('s', "6c", !c); - TEST_ASSERT('s', "6d", test1->setChar(' ', 0)); - TEST_ASSERT('s', "6e", !test1->isEqualTo(testC00)); - TEST_ASSERT('s', "6f", test1->setChar('T', 0)); - TEST_ASSERT('s', "6g", !test1->setChar(' ', sizeof(testC00))); - TEST_ASSERT('s', "6h", test1->isEqualTo(testC00)); - if (test1) test1->release(); - res = res && checkSpace("(s)6", spaceCheck, 0); - - // Check const string fail inplace modifications - spaceCheck = checkPointSpace(); - test1 = OSString::withCStringNoCopy(testC00); - TEST_ASSERT('s', "7a", test1); - for (i = 0; (c = test1->getChar(i)); i++) - if (c != testC00[i]) { - verPrintf(("testString(s) test 7b failed\n")); res = false; - break; - } - TEST_ASSERT('s', "7c", !c); - TEST_ASSERT('s', "7d", !test1->setChar(' ', 0)); - TEST_ASSERT('s', "7e", test1->isEqualTo(testC00)); - TEST_ASSERT('s', "7f", !test1->setChar(' ', sizeof(testC00))); - TEST_ASSERT('s', "7g", test1->isEqualTo(testC00)); - if (test1) test1->release(); - res = res && checkSpace("(s)7", spaceCheck, 0); - - if (res) - verPrintf(("testString: All OSString Tests passed\n")); - else - logPrintf(("testString: Some OSString Tests failed\n")); + OSString *test1, *test2; + void *spaceCheck; + int i; + char c; + bool res = true; + + // very first test initialises the OSMetaClass cache. + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "0a", test1); + if (test1) { + test1->release(); + } + + // Check c string allocation + spaceCheck = checkPointSpace(); + test1 = OSString::withCString(testC00); + TEST_ASSERT('s', "1a", test1); + TEST_ASSERT('s', "1b", testC00 != test1->getCStringNoCopy()); + TEST_ASSERT('s', "1c", strcmp(testC00, test1->getCStringNoCopy()) == 0); + TEST_ASSERT('s', "1d", strlen(testC00) == test1->getLength()); + TEST_ASSERT('s', "1e", test1->isEqualTo(testC00)); + TEST_ASSERT('s', "1f", !test1->isEqualTo(testC01)); + if (test1) { + test1->release(); + } + res = res && checkSpace("(s)1", spaceCheck, 0); + + // Check c string no allocation + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "2a", test1); + TEST_ASSERT('s', "2b", testC00 == test1->getCStringNoCopy()); + if (test1) { + test1->release(); + } + res = res && checkSpace("(s)2", spaceCheck, 0); + + // Check string from other string generation + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "3a", test1); + test2 = OSString::withString(test1); + TEST_ASSERT('s', "3b", test2); + TEST_ASSERT('s', "3c", test1 != test2); + TEST_ASSERT('s', "3d", test1->isEqualTo(test2)); + if (test1) { + test1->release(); + } + if (test2) { + test2->release(); + } + res = res && checkSpace("(s)3", spaceCheck, 0); + + // Check string comparison functionality no copy + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + test2 = OSString::withCStringNoCopy(testC01); + TEST_ASSERT('s', "4a", test1 && test2); + TEST_ASSERT('s', "4b", !test1->isEqualTo(test2)); + TEST_ASSERT('s', "4c", !test1->isEqualTo(testC01)); + TEST_ASSERT('s', "4d", test1->isEqualTo(testC00)); + if (test1) { + test1->release(); + } + if (test2) { + test2->release(); + } + res = res && checkSpace("(s)4", spaceCheck, 0); + + // Check string comparison functionality with copy + spaceCheck = checkPointSpace(); + test1 = OSString::withCString(testC00); + test2 = OSString::withCString(testC01); + TEST_ASSERT('s', "5a", test1 && test2); + TEST_ASSERT('s', "5b", !test1->isEqualTo(test2)); + TEST_ASSERT('s', "5c", !test1->isEqualTo(testC01)); + TEST_ASSERT('s', "5d", test1->isEqualTo(testC00)); + if (test1) { + test1->release(); + } + if (test2) { + test2->release(); + } + res = res && checkSpace("(s)5", spaceCheck, 0); + + // Check string inplace modifications + spaceCheck = checkPointSpace(); + test1 = OSString::withCString(testC00); + TEST_ASSERT('s', "6a", test1); + for (i = 0; (c = test1->getChar(i)); i++) { + if (c != testC00[i]) { + verPrintf(("testString(s) test 6b failed\n")); res = false; + break; + } + } + TEST_ASSERT('s', "6c", !c); + TEST_ASSERT('s', "6d", test1->setChar(' ', 0)); + TEST_ASSERT('s', "6e", !test1->isEqualTo(testC00)); + TEST_ASSERT('s', "6f", test1->setChar('T', 0)); + TEST_ASSERT('s', "6g", !test1->setChar(' ', sizeof(testC00))); + TEST_ASSERT('s', "6h", test1->isEqualTo(testC00)); + if (test1) { + test1->release(); + } + res = res && checkSpace("(s)6", spaceCheck, 0); + + // Check const string fail inplace modifications + spaceCheck = checkPointSpace(); + test1 = OSString::withCStringNoCopy(testC00); + TEST_ASSERT('s', "7a", test1); + for (i = 0; (c = test1->getChar(i)); i++) { + if (c != testC00[i]) { + verPrintf(("testString(s) test 7b failed\n")); res = false; + break; + } + } + TEST_ASSERT('s', "7c", !c); + TEST_ASSERT('s', "7d", !test1->setChar(' ', 0)); + TEST_ASSERT('s', "7e", test1->isEqualTo(testC00)); + TEST_ASSERT('s', "7f", !test1->setChar(' ', sizeof(testC00))); + TEST_ASSERT('s', "7g", test1->isEqualTo(testC00)); + if (test1) { + test1->release(); + } + res = res && checkSpace("(s)7", spaceCheck, 0); + + if (res) { + verPrintf(("testString: All OSString Tests passed\n")); + } else { + logPrintf(("testString: Some OSString Tests failed\n")); + } } -void testSymbol() +void +testSymbol() { - bool res = true; - int i, j; - int countDups; - const OSSymbol *cache[numStrCache]; - void *spaceCheck; - - // very first test initialises the OSMetaClass cache. - cache[0] = IOSymbol::withCStringNoCopy(testC00); - TEST_ASSERT('u', "0a", cache[0]); - if (cache[0]) - cache[0]->release(); - - spaceCheck = checkPointSpace(); - - // Setup the symbol cache, make sure it grows the symbol unique'ing - // hash table. Also determine that the symbol is created ok and that - // it is indeed equal to the creating cString by strcmp. - for (i = 0; i < numStrCache; i++) { - cache[i] = OSSymbol::withCStringNoCopy(strCache[i]); - if (!cache[i]) { - verPrintf(("testSymbol(u) test 1a%d failed\n", i)); res = false; - } - else if (!cache[i]->isEqualTo(strCache[i])) { - verPrintf(("testSymbol(u) test 1b%d failed\n", i)); res = false; - } - } - - // The strCache does have some duplicates in it, mostly 'the'. Make - // sure that we wind them and that different cache entries really are - // different by strcmp. Fundamental to OSSymbol semantics. - countDups = 0; - for (i = 0; i < numStrCache; i++) - for (j = i+1; j < numStrCache; j++) { - if (cache[i] != cache[j] && cache[i]->isEqualTo(cache[j])) { - verPrintf(("testSymbol(u) test 2a%d,%d failed\n", i, j)); - res = false; - } - else if (cache[i] == cache[j]) { - if (cache[i]->getRetainCount() == 1) { - verPrintf(("testSymbol(u) test 2b%d,%d failed\n", i, j)); - res = false; - } - countDups++; - } - } - TEST_ASSERT('u', "2c", countDups); - - // Clear out the cache and check that the unique'ing hashtable has grown - for (i = 0; i < numStrCache; i++) { - if (cache[i]) { - cache[i]->release(); - cache[i] = 0; - } - } - // As of 1998-11-17 the hash growth is 364. - res = res && checkSpace("(u)3", spaceCheck, 972); - logSpace(); - - // Check for leaks by repeating the cacheing and freeing - spaceCheck = checkPointSpace(); - for (i = 0; i < numStrCache; i++) - cache[i] = OSSymbol::withCString(strCache[i]); - for (i = 0; i < numStrCache; i++) { - if (cache[i]) { - cache[i]->release(); - cache[i] = 0; - } - } - res = res && checkSpace("(u)4", spaceCheck, 0); - - // Check that the OSString based symbol constructors work - // and that they don't leak, and finally double check that while - // the cache is active the symbol semantics still work. - spaceCheck = checkPointSpace(); - for (i = 0; i < numStrCache; i++) { - OSString *tmpStr; - - tmpStr = (i&1) - ? OSString::withCString(strCache[i]) - : OSString::withCStringNoCopy(strCache[i]); - if (tmpStr) { - cache[i] = OSSymbol::withString(tmpStr); - if (!cache[i]) { - verPrintf(("testSymbol(u) test 5a%d failed\n", i)); - res = false; - } - tmpStr->release(); - } - } - - for (i = 0; i < numStrCache; i++) { - if (cache[i]) { - const OSSymbol *tmpSymb; - - tmpSymb = OSSymbol::withCStringNoCopy(strCache[i]); - if (cache[i] != tmpSymb) { - verPrintf(("testSymbol(u) test 5b%d failed\n", i)); - res = false; - } - tmpSymb->release(); - cache[i]->release(); - cache[i] = 0; - } - else { - verPrintf(("testSymbol(u) test 5c%d failed\n", i)); - res = false; - } - } - res = res && checkSpace("(u)5", spaceCheck, 0); - - if (res) - verPrintf(("testSymbol: All OSSymbol Tests passed\n")); - else - logPrintf(("testSymbol: Some OSSymbol Tests failed\n")); + bool res = true; + int i, j; + int countDups; + const OSSymbol *cache[numStrCache]; + void *spaceCheck; + + // very first test initialises the OSMetaClass cache. + cache[0] = IOSymbol::withCStringNoCopy(testC00); + TEST_ASSERT('u', "0a", cache[0]); + if (cache[0]) { + cache[0]->release(); + } + + spaceCheck = checkPointSpace(); + + // Setup the symbol cache, make sure it grows the symbol unique'ing + // hash table. Also determine that the symbol is created ok and that + // it is indeed equal to the creating cString by strcmp. + for (i = 0; i < numStrCache; i++) { + cache[i] = OSSymbol::withCStringNoCopy(strCache[i]); + if (!cache[i]) { + verPrintf(("testSymbol(u) test 1a%d failed\n", i)); res = false; + } else if (!cache[i]->isEqualTo(strCache[i])) { + verPrintf(("testSymbol(u) test 1b%d failed\n", i)); res = false; + } + } + + // The strCache does have some duplicates in it, mostly 'the'. Make + // sure that we wind them and that different cache entries really are + // different by strcmp. Fundamental to OSSymbol semantics. + countDups = 0; + for (i = 0; i < numStrCache; i++) { + for (j = i + 1; j < numStrCache; j++) { + if (cache[i] != cache[j] && cache[i]->isEqualTo(cache[j])) { + verPrintf(("testSymbol(u) test 2a%d,%d failed\n", i, j)); + res = false; + } else if (cache[i] == cache[j]) { + if (cache[i]->getRetainCount() == 1) { + verPrintf(("testSymbol(u) test 2b%d,%d failed\n", i, j)); + res = false; + } + countDups++; + } + } + } + TEST_ASSERT('u', "2c", countDups); + + // Clear out the cache and check that the unique'ing hashtable has grown + for (i = 0; i < numStrCache; i++) { + if (cache[i]) { + cache[i]->release(); + cache[i] = 0; + } + } + // As of 1998-11-17 the hash growth is 364. + res = res && checkSpace("(u)3", spaceCheck, 972); + logSpace(); + + // Check for leaks by repeating the cacheing and freeing + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + cache[i] = OSSymbol::withCString(strCache[i]); + } + for (i = 0; i < numStrCache; i++) { + if (cache[i]) { + cache[i]->release(); + cache[i] = 0; + } + } + res = res && checkSpace("(u)4", spaceCheck, 0); + + // Check that the OSString based symbol constructors work + // and that they don't leak, and finally double check that while + // the cache is active the symbol semantics still work. + spaceCheck = checkPointSpace(); + for (i = 0; i < numStrCache; i++) { + OSString *tmpStr; + + tmpStr = (i & 1) + ? OSString::withCString(strCache[i]) + : OSString::withCStringNoCopy(strCache[i]); + if (tmpStr) { + cache[i] = OSSymbol::withString(tmpStr); + if (!cache[i]) { + verPrintf(("testSymbol(u) test 5a%d failed\n", i)); + res = false; + } + tmpStr->release(); + } + } + + for (i = 0; i < numStrCache; i++) { + if (cache[i]) { + const OSSymbol *tmpSymb; + + tmpSymb = OSSymbol::withCStringNoCopy(strCache[i]); + if (cache[i] != tmpSymb) { + verPrintf(("testSymbol(u) test 5b%d failed\n", i)); + res = false; + } + tmpSymb->release(); + cache[i]->release(); + cache[i] = 0; + } else { + verPrintf(("testSymbol(u) test 5c%d failed\n", i)); + res = false; + } + } + res = res && checkSpace("(u)5", spaceCheck, 0); + + if (res) { + verPrintf(("testSymbol: All OSSymbol Tests passed\n")); + } else { + logPrintf(("testSymbol: Some OSSymbol Tests failed\n")); + } } #endif /* DEBUG */ diff --git a/iokit/Tests/TestDevice.cpp b/iokit/Tests/TestDevice.cpp index 021f756f4..a9dd48b0f 100644 --- a/iokit/Tests/TestDevice.cpp +++ b/iokit/Tests/TestDevice.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if DEBUG @@ -46,139 +46,160 @@ OSDefineMetaClassAndStructors(TestDevice, OSObject) kern_return_t TestDevice::enqueueCommand(bool sleep, - TestDeviceAction act, int tag, void *dataP) + TestDeviceAction act, int tag, void *dataP) { - return commQ->enqueueCommand(sleep, (void *) act, (void *) tag, dataP); + return commQ->enqueueCommand(sleep, (void *) act, (void *) tag, dataP); } -bool TestDevice::init() +bool +TestDevice::init() { - if ( !super::init() ) - return false; - - workLoop = IOWorkLoop::workLoop(); - if ( !workLoop ) - return false; - - commQ = IOCommandQueue::commandQueue - (this, (IOCommandQueueAction) &rawCommandOccurred, 8); - if (!commQ || kIOReturnSuccess != workLoop->addEventSource(commQ)) - return false; - - intES = IOInterruptEventSource::interruptEventSource - (this, (IOInterruptEventAction) &interruptAction); - if (!intES || kIOReturnSuccess != workLoop->addEventSource(intES)) - return false; - - return true; + if (!super::init()) { + return false; + } + + workLoop = IOWorkLoop::workLoop(); + if (!workLoop) { + return false; + } + + commQ = IOCommandQueue::commandQueue + (this, (IOCommandQueueAction) & rawCommandOccurred, 8); + if (!commQ || kIOReturnSuccess != workLoop->addEventSource(commQ)) { + return false; + } + + intES = IOInterruptEventSource::interruptEventSource + (this, (IOInterruptEventAction) & interruptAction); + if (!intES || kIOReturnSuccess != workLoop->addEventSource(intES)) { + return false; + } + + return true; } -void TestDevice::free() +void +TestDevice::free() { - if (intES) intES->release(); - if (commQ) commQ->release(); - if (workLoop) workLoop->release(); - - super::free(); + if (intES) { + intES->release(); + } + if (commQ) { + commQ->release(); + } + if (workLoop) { + workLoop->release(); + } + + super::free(); } void TestDevice::rawCommandOccurred - (void *field0, void *field1, void *field2, void *) +(void *field0, void *field1, void *field2, void *) { - (*(TestDeviceAction) field0)(this, (int) field1, field2); + (*(TestDeviceAction) field0)(this, (int) field1, field2); } void TestDevice::interruptAction(IOInterruptEventSource *, int count) { - logPrintf(("I(%d, %d) ", count, ++intCount)); + logPrintf(("I(%d, %d) ", count, ++intCount)); } void TestDevice::producer1Action(int tag) { - logPrintf(("C1(%d) ", tag)); + logPrintf(("C1(%d) ", tag)); } void TestDevice::producer2Action(int tag, void *count) { - logPrintf(("C2(%d,%d) ", tag, (int) count)); - if ( !(tag % 10) ) - IOSleep(1000); + logPrintf(("C2(%d,%d) ", tag, (int) count)); + if (!(tag % 10)) { + IOSleep(1000); + } } void TestDevice::alarm() { - intES->interruptOccurred(0, 0, 0); - IOScheduleFunc((IOThreadFunc) alarm, (void *) this, hundredMill, 1); + intES->interruptOccurred(0, 0, 0); + IOScheduleFunc((IOThreadFunc) alarm, (void *) this, hundredMill, 1); } -static void producer(void *inProducerId) +static void +producer(void *inProducerId) { - int producerId = (int) inProducerId; - TestDeviceAction command; - int i; - - semaphore_wait(completeSema); - - if (producerId & 1) - command = (TestDeviceAction) sDevice->producer1Action; - else - command = (TestDeviceAction) sDevice->producer2Action; - - for (i = 0; i < 5 * (producerId << 1); i++) { - sDevice->enqueueCommand - (true, command, i, (void *) (i % (producerId + 1))); - if ( !(i % (producerId + 1)) ) - /* cthread_yield() */; - logPrintf(("TestDevice(%d): %d\n", producerId, i)); - } - - logPrintf(("TestDevice: producer %d exiting\n", producerId)); - semaphore_signal(completeSema); - - IOExitThread(producerId); + int producerId = (int) inProducerId; + TestDeviceAction command; + int i; + + semaphore_wait(completeSema); + + if (producerId & 1) { + command = (TestDeviceAction) sDevice->producer1Action; + } else { + command = (TestDeviceAction) sDevice->producer2Action; + } + + for (i = 0; i < 5 * (producerId << 1); i++) { + sDevice->enqueueCommand + (true, command, i, (void *) (i % (producerId + 1))); + if (!(i % (producerId + 1))) { + /* cthread_yield() */; + } + logPrintf(("TestDevice(%d): %d\n", producerId, i)); + } + + logPrintf(("TestDevice: producer %d exiting\n", producerId)); + semaphore_signal(completeSema); + + IOExitThread(producerId); } -void testWorkLoop() +void +testWorkLoop() { - int i; + int i; - sDevice = new TestDevice; - if (!sDevice || !sDevice->init()) { - if (sDevice) sDevice->free(); - logPrintf(("TestDevice: couldn't create device instance\n")); - return; - } + sDevice = new TestDevice; + if (!sDevice || !sDevice->init()) { + if (sDevice) { + sDevice->free(); + } + logPrintf(("TestDevice: couldn't create device instance\n")); + return; + } - IOSleep(1000); + IOSleep(1000); - IOScheduleFunc((IOThreadFunc) sDevice->alarm, sDevice, hundredMill, 1); + IOScheduleFunc((IOThreadFunc) sDevice->alarm, sDevice, hundredMill, 1); - IOSleep(2000); + IOSleep(2000); - if (KERN_SUCCESS - != semaphore_create(kernel_task, &completeSema, SYNC_POLICY_FIFO, 4)) - return; + if (KERN_SUCCESS + != semaphore_create(kernel_task, &completeSema, SYNC_POLICY_FIFO, 4)) { + return; + } - IOCreateThread(producer, (void *) 4); - IOCreateThread(producer, (void *) 3); - IOCreateThread(producer, (void *) 2); - IOCreateThread(producer, (void *) 1); + IOCreateThread(producer, (void *) 4); + IOCreateThread(producer, (void *) 3); + IOCreateThread(producer, (void *) 2); + IOCreateThread(producer, (void *) 1); - IOSleep(2000); + IOSleep(2000); - for (i = 0; i < 4; i++) - semaphore_wait(completeSema); + for (i = 0; i < 4; i++) { + semaphore_wait(completeSema); + } - IOUnscheduleFunc((IOThreadFunc) sDevice->alarm, sDevice); + IOUnscheduleFunc((IOThreadFunc) sDevice->alarm, sDevice); - sDevice->free(); sDevice = 0; + sDevice->free(); sDevice = 0; - logPrintf(("TestDevice: exiting\n")); + logPrintf(("TestDevice: exiting\n")); } #endif /* DEBUG */ diff --git a/iokit/Tests/TestIOMemoryDescriptor.cpp b/iokit/Tests/TestIOMemoryDescriptor.cpp index bc939f8ed..9e3220369 100644 --- a/iokit/Tests/TestIOMemoryDescriptor.cpp +++ b/iokit/Tests/TestIOMemoryDescriptor.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2014-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -67,47 +67,50 @@ __END_DECLS extern SInt32 gIOMemoryReferenceCount; -static int IOMultMemoryDescriptorTest(int newValue) +static int +IOMultMemoryDescriptorTest(int newValue) { - IOMemoryDescriptor * mds[3]; - IOMultiMemoryDescriptor * mmd; - IOMemoryMap * map; - void * addr; - uint8_t * data; - uint32_t i; - IOAddressRange ranges[2]; - - data = (typeof(data)) IOMallocAligned(ptoa(8), page_size); - for (i = 0; i < ptoa(8); i++) data[i] = atop(i) | 0xD0; - - ranges[0].address = (IOVirtualAddress)(data + ptoa(4)); - ranges[0].length = ptoa(4); - ranges[1].address = (IOVirtualAddress)(data + ptoa(0)); - ranges[1].length = ptoa(4); - - mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task); - - mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn); - mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn); - - mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false); - mds[2]->release(); - mds[1]->release(); - mds[0]->release(); - map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7)); - mmd->release(); - assert(map); - - addr = (void *) map->getVirtualAddress(); - assert(ptoa(4) == map->getLength()); - assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]); - assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]); - assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]); - assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]); - map->release(); - IOFreeAligned(data, ptoa(8)); - - return (0); + IOMemoryDescriptor * mds[3]; + IOMultiMemoryDescriptor * mmd; + IOMemoryMap * map; + void * addr; + uint8_t * data; + uint32_t i; + IOAddressRange ranges[2]; + + data = (typeof(data))IOMallocAligned(ptoa(8), page_size); + for (i = 0; i < ptoa(8); i++) { + data[i] = atop(i) | 0xD0; + } + + ranges[0].address = (IOVirtualAddress)(data + ptoa(4)); + ranges[0].length = ptoa(4); + ranges[1].address = (IOVirtualAddress)(data + ptoa(0)); + ranges[1].length = ptoa(4); + + mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task); + + mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn); + mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn); + + mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false); + mds[2]->release(); + mds[1]->release(); + mds[0]->release(); + map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7)); + mmd->release(); + assert(map); + + addr = (void *) map->getVirtualAddress(); + assert(ptoa(4) == map->getLength()); + assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]); + assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]); + assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]); + assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]); + map->release(); + IOFreeAligned(data, ptoa(8)); + + return 0; } @@ -116,276 +119,278 @@ static int IOMultMemoryDescriptorTest(int newValue) static int IODMACommandForceDoubleBufferTest(int newValue) { - IOReturn ret; - IOBufferMemoryDescriptor * bmd; - IODMACommand * dma; - uint32_t dir, data; - IODMACommand::SegmentOptions segOptions = - { - .fStructSize = sizeof(segOptions), - .fNumAddressBits = 64, - .fMaxSegmentSize = 0x2000, - .fMaxTransferSize = 128*1024, - .fAlignment = 1, - .fAlignmentLength = 1, - .fAlignmentInternalSegments = 1 - }; - IODMACommand::Segment64 segments[1]; - UInt32 numSegments; - UInt64 dmaOffset; - - - for (dir = kIODirectionIn; ; dir++) - { - bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, - dir | kIOMemoryPageable, ptoa(8)); - assert(bmd); + IOReturn ret; + IOBufferMemoryDescriptor * bmd; + IODMACommand * dma; + uint32_t dir, data; + IODMACommand::SegmentOptions segOptions = + { + .fStructSize = sizeof(segOptions), + .fNumAddressBits = 64, + .fMaxSegmentSize = 0x2000, + .fMaxTransferSize = 128 * 1024, + .fAlignment = 1, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = 1 + }; + IODMACommand::Segment64 segments[1]; + UInt32 numSegments; + UInt64 dmaOffset; - ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir; - ret = bmd->prepare((IODirection) dir); - assert(kIOReturnSuccess == ret); + for (dir = kIODirectionIn;; dir++) { + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, + dir | kIOMemoryPageable, ptoa(8)); + assert(bmd); - dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, - kIODMAMapOptionMapped, - NULL, NULL); - assert(dma); - ret = dma->setMemoryDescriptor(bmd, true); - assert(kIOReturnSuccess == ret); + ((uint32_t*) bmd->getBytesNoCopy())[0] = 0x53535300 | dir; - ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut); - assert(kIOReturnSuccess == ret); + ret = bmd->prepare((IODirection) dir); + assert(kIOReturnSuccess == ret); - dmaOffset = 0; - numSegments = 1; - ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments); - assert(kIOReturnSuccess == ret); - assert(1 == numSegments); + dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, + kIODMAMapOptionMapped, + NULL, NULL); + assert(dma); + ret = dma->setMemoryDescriptor(bmd, true); + assert(kIOReturnSuccess == ret); - if (kIODirectionOut & dir) - { - data = ((uint32_t*) bmd->getBytesNoCopy())[0]; - assertf((0x53535300 | dir) == data, "mismatch 0x%x", data); - } - if (kIODirectionIn & dir) - { - IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir); - } + ret = dma->synchronize(IODMACommand::kForceDoubleBuffer | kIODirectionOut); + assert(kIOReturnSuccess == ret); - ret = dma->clearMemoryDescriptor(true); - assert(kIOReturnSuccess == ret); - dma->release(); + dmaOffset = 0; + numSegments = 1; + ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments); + assert(kIOReturnSuccess == ret); + assert(1 == numSegments); - bmd->complete((IODirection) dir); + if (kIODirectionOut & dir) { + data = ((uint32_t*) bmd->getBytesNoCopy())[0]; + assertf((0x53535300 | dir) == data, "mismatch 0x%x", data); + } + if (kIODirectionIn & dir) { + IOMappedWrite32(segments[0].fIOVMAddr, 0x11223300 | dir); + } - if (kIODirectionIn & dir) - { - data = ((uint32_t*) bmd->getBytesNoCopy())[0]; - assertf((0x11223300 | dir) == data, "mismatch 0x%x", data); - } + ret = dma->clearMemoryDescriptor(true); + assert(kIOReturnSuccess == ret); + dma->release(); - bmd->release(); + bmd->complete((IODirection) dir); - if (dir == kIODirectionInOut) break; - } + if (kIODirectionIn & dir) { + data = ((uint32_t*) bmd->getBytesNoCopy())[0]; + assertf((0x11223300 | dir) == data, "mismatch 0x%x", data); + } - return (0); + bmd->release(); + + if (dir == kIODirectionInOut) { + break; + } + } + + return 0; } // static int __unused IODMACommandLocalMappedNonContig(int newValue) { - IOReturn kr; - IOMemoryDescriptor * md; - IODMACommand * dma; - OSDictionary * matching; - IOService * device; - IOMapper * mapper; - IODMACommand::SegmentOptions segOptions = - { - .fStructSize = sizeof(segOptions), - .fNumAddressBits = 64, - .fMaxSegmentSize = 128*1024, - .fMaxTransferSize = 128*1024, - .fAlignment = 1, - .fAlignmentLength = 1, - .fAlignmentInternalSegments = 1 - }; - IODMACommand::Segment64 segments[1]; - UInt32 numSegments; - UInt64 dmaOffset; - UInt64 segPhys; - vm_address_t buffer; - vm_size_t bufSize = ptoa(4); - - if (!IOMapper::gSystem) return (0); - - buffer = 0; - kr = vm_allocate_kernel(kernel_map, &buffer, bufSize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); - assert(KERN_SUCCESS == kr); - - // fragment the vmentries - kr = vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE); - assert(KERN_SUCCESS == kr); - - md = IOMemoryDescriptor::withAddressRange( - buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task); - assert(md); - kr = md->prepare(kIODirectionOutIn); - assert(kIOReturnSuccess == kr); - - segPhys = md->getPhysicalSegment(0, NULL, 0); - - matching = IOService::nameMatching("XHC1"); - assert(matching); - device = IOService::copyMatchingService(matching); - matching->release(); - mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL; - - dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, - kIODMAMapOptionMapped, - mapper, NULL); - assert(dma); - kr = dma->setMemoryDescriptor(md, true); - assert(kIOReturnSuccess == kr); - - dmaOffset = 0; - numSegments = 1; - kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments); - assert(kIOReturnSuccess == kr); - assert(1 == numSegments); - - if (mapper) assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma); - - kr = dma->clearMemoryDescriptor(true); - assert(kIOReturnSuccess == kr); - dma->release(); - - kr = md->complete(kIODirectionOutIn); - assert(kIOReturnSuccess == kr); - md->release(); - - kr = vm_deallocate(kernel_map, buffer, bufSize); - assert(KERN_SUCCESS == kr); - OSSafeReleaseNULL(mapper); - - return (0); + IOReturn kr; + IOMemoryDescriptor * md; + IODMACommand * dma; + OSDictionary * matching; + IOService * device; + IOMapper * mapper; + IODMACommand::SegmentOptions segOptions = + { + .fStructSize = sizeof(segOptions), + .fNumAddressBits = 64, + .fMaxSegmentSize = 128 * 1024, + .fMaxTransferSize = 128 * 1024, + .fAlignment = 1, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = 1 + }; + IODMACommand::Segment64 segments[1]; + UInt32 numSegments; + UInt64 dmaOffset; + UInt64 segPhys; + vm_address_t buffer; + vm_size_t bufSize = ptoa(4); + + if (!IOMapper::gSystem) { + return 0; + } + + buffer = 0; + kr = vm_allocate_kernel(kernel_map, &buffer, bufSize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); + assert(KERN_SUCCESS == kr); + + // fragment the vmentries + kr = vm_inherit(kernel_map, buffer + ptoa(1), ptoa(1), VM_INHERIT_NONE); + assert(KERN_SUCCESS == kr); + + md = IOMemoryDescriptor::withAddressRange( + buffer + 0xa00, 0x2000, kIODirectionOutIn, kernel_task); + assert(md); + kr = md->prepare(kIODirectionOutIn); + assert(kIOReturnSuccess == kr); + + segPhys = md->getPhysicalSegment(0, NULL, 0); + + matching = IOService::nameMatching("XHC1"); + assert(matching); + device = IOService::copyMatchingService(matching); + matching->release(); + mapper = device ? IOMapper::copyMapperForDeviceWithIndex(device, 0) : NULL; + + dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, + kIODMAMapOptionMapped, + mapper, NULL); + assert(dma); + kr = dma->setMemoryDescriptor(md, true); + assert(kIOReturnSuccess == kr); + + dmaOffset = 0; + numSegments = 1; + kr = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments); + assert(kIOReturnSuccess == kr); + assert(1 == numSegments); + + if (mapper) { + assertf(segments[0].fIOVMAddr != segPhys, "phys !local 0x%qx, 0x%qx, %p", segments[0].fIOVMAddr, segPhys, dma); + } + + kr = dma->clearMemoryDescriptor(true); + assert(kIOReturnSuccess == kr); + dma->release(); + + kr = md->complete(kIODirectionOutIn); + assert(kIOReturnSuccess == kr); + md->release(); + + kr = vm_deallocate(kernel_map, buffer, bufSize); + assert(KERN_SUCCESS == kr); + OSSafeReleaseNULL(mapper); + + return 0; } // static int IOMemoryRemoteTest(int newValue) { - IOReturn ret; - IOMemoryDescriptor * md; - IOByteCount offset, length; - addr64_t addr; - uint32_t idx; - - IODMACommand * dma; - IODMACommand::SegmentOptions segOptions = - { - .fStructSize = sizeof(segOptions), - .fNumAddressBits = 64, - .fMaxSegmentSize = 0x2000, - .fMaxTransferSize = 128*1024, - .fAlignment = 1, - .fAlignmentLength = 1, - .fAlignmentInternalSegments = 1 - }; - IODMACommand::Segment64 segments[1]; - UInt32 numSegments; - UInt64 dmaOffset; - - IOAddressRange ranges[2] = { - { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 }, - }; - - md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn|kIOMemoryRemote, TASK_NULL); - assert(md); + IOReturn ret; + IOMemoryDescriptor * md; + IOByteCount offset, length; + addr64_t addr; + uint32_t idx; + + IODMACommand * dma; + IODMACommand::SegmentOptions segOptions = + { + .fStructSize = sizeof(segOptions), + .fNumAddressBits = 64, + .fMaxSegmentSize = 0x2000, + .fMaxTransferSize = 128 * 1024, + .fAlignment = 1, + .fAlignmentLength = 1, + .fAlignmentInternalSegments = 1 + }; + IODMACommand::Segment64 segments[1]; + UInt32 numSegments; + UInt64 dmaOffset; + + IOAddressRange ranges[2] = { + { 0x1234567890123456ULL, 0x1000 }, { 0x5432109876543210, 0x2000 }, + }; + + md = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn | kIOMemoryRemote, TASK_NULL); + assert(md); // md->map(); // md->readBytes(0, &idx, sizeof(idx)); - ret = md->prepare(kIODirectionOutIn); - assert(kIOReturnSuccess == ret); - - printf("remote md flags 0x%qx, r %d\n", - md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags()))); - - for (offset = 0, idx = 0; true; offset += length, idx++) - { - addr = md->getPhysicalSegment(offset, &length, 0); - if (!length) break; - assert(idx < 2); - assert(addr == ranges[idx].address); - assert(length == ranges[idx].length); - } - assert(offset == md->getLength()); - - dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, - kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly, - NULL, NULL); - assert(dma); - ret = dma->setMemoryDescriptor(md, true); - assert(kIOReturnSuccess == ret); - - for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) - { - numSegments = 1; - ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments); + ret = md->prepare(kIODirectionOutIn); assert(kIOReturnSuccess == ret); - assert(1 == numSegments); - assert(idx < 2); - assert(segments[0].fIOVMAddr == ranges[idx].address); - assert(segments[0].fLength == ranges[idx].length); - } - assert(dmaOffset == md->getLength()); - - ret = dma->clearMemoryDescriptor(true); - assert(kIOReturnSuccess == ret); - dma->release(); - md->complete(kIODirectionOutIn); - md->release(); - - return (0); + + printf("remote md flags 0x%qx, r %d\n", + md->getFlags(), (0 != (kIOMemoryRemote & md->getFlags()))); + + for (offset = 0, idx = 0; true; offset += length, idx++) { + addr = md->getPhysicalSegment(offset, &length, 0); + if (!length) { + break; + } + assert(idx < 2); + assert(addr == ranges[idx].address); + assert(length == ranges[idx].length); + } + assert(offset == md->getLength()); + + dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, + kIODMAMapOptionUnmapped | kIODMAMapOptionIterateOnly, + NULL, NULL); + assert(dma); + ret = dma->setMemoryDescriptor(md, true); + assert(kIOReturnSuccess == ret); + + for (dmaOffset = 0, idx = 0; dmaOffset < md->getLength(); idx++) { + numSegments = 1; + ret = dma->gen64IOVMSegments(&dmaOffset, &segments[0], &numSegments); + assert(kIOReturnSuccess == ret); + assert(1 == numSegments); + assert(idx < 2); + assert(segments[0].fIOVMAddr == ranges[idx].address); + assert(segments[0].fLength == ranges[idx].length); + } + assert(dmaOffset == md->getLength()); + + ret = dma->clearMemoryDescriptor(true); + assert(kIOReturnSuccess == ret); + dma->release(); + md->complete(kIODirectionOutIn); + md->release(); + + return 0; } static IOReturn IOMemoryPrefaultTest(uint32_t options) { - IOBufferMemoryDescriptor * bmd; - IOMemoryMap * map; - IOReturn kr; - uint32_t data; - uint32_t * p; - IOSimpleLock * lock; - - lock = IOSimpleLockAlloc(); - assert(lock); - - bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(), - kIODirectionOutIn | kIOMemoryPageable, ptoa(8)); - assert(bmd); - kr = bmd->prepare(); - assert(KERN_SUCCESS == kr); + IOBufferMemoryDescriptor * bmd; + IOMemoryMap * map; + IOReturn kr; + uint32_t data; + uint32_t * p; + IOSimpleLock * lock; + + lock = IOSimpleLockAlloc(); + assert(lock); + + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(), + kIODirectionOutIn | kIOMemoryPageable, ptoa(8)); + assert(bmd); + kr = bmd->prepare(); + assert(KERN_SUCCESS == kr); - map = bmd->map(kIOMapPrefault); - assert(map); + map = bmd->map(kIOMapPrefault); + assert(map); - p = (typeof(p)) map->getVirtualAddress(); - IOSimpleLockLock(lock); - data = p[0]; - IOSimpleLockUnlock(lock); + p = (typeof(p))map->getVirtualAddress(); + IOSimpleLockLock(lock); + data = p[0]; + IOSimpleLockUnlock(lock); - IOLog("IOMemoryPrefaultTest %d\n", data); + IOLog("IOMemoryPrefaultTest %d\n", data); - map->release(); - bmd->release(); - IOSimpleLockFree(lock); + map->release(); + bmd->release(); + IOSimpleLockFree(lock); - return (kIOReturnSuccess); + return kIOReturnSuccess; } @@ -393,548 +398,563 @@ IOMemoryPrefaultTest(uint32_t options) static IOReturn ZeroLengthTest(int newValue) { - IOMemoryDescriptor * md; - - md = IOMemoryDescriptor::withAddressRange( - 0, 0, kIODirectionNone, current_task()); - assert(md); - md->prepare(); - md->complete(); - md->release(); - return (0); + IOMemoryDescriptor * md; + + md = IOMemoryDescriptor::withAddressRange( + 0, 0, kIODirectionNone, current_task()); + assert(md); + md->prepare(); + md->complete(); + md->release(); + return 0; } // static IOReturn BadFixedAllocTest(int newValue) { - IOBufferMemoryDescriptor * bmd; - IOMemoryMap * map; + IOBufferMemoryDescriptor * bmd; + IOMemoryMap * map; - bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL, - kIODirectionIn | kIOMemoryPageable, ptoa(1)); - assert(bmd); - map = bmd->createMappingInTask(kernel_task, 0x2000, 0); - assert(!map); + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL, + kIODirectionIn | kIOMemoryPageable, ptoa(1)); + assert(bmd); + map = bmd->createMappingInTask(kernel_task, 0x2000, 0); + assert(!map); - bmd->release(); - return (0); + bmd->release(); + return 0; } // static IOReturn IODirectionPrepareNoZeroFillTest(int newValue) { - IOBufferMemoryDescriptor * bmd; - - bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL, - kIODirectionIn | kIOMemoryPageable, ptoa(24)); - assert(bmd); - bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill)); - bmd->prepare(kIODirectionIn); - bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid)); - bmd->complete(kIODirectionIn); - bmd->release(); - return (0); + IOBufferMemoryDescriptor * bmd; + + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL, + kIODirectionIn | kIOMemoryPageable, ptoa(24)); + assert(bmd); + bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill)); + bmd->prepare(kIODirectionIn); + bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid)); + bmd->complete(kIODirectionIn); + bmd->release(); + return 0; } // static IOReturn IOMemoryMapTest(uint32_t options) { - IOBufferMemoryDescriptor * bmd; - IOMemoryDescriptor * md; - IOMemoryMap * map; - uint32_t data; - user_addr_t p; - uint8_t * p2; - int r; - uint64_t time, nano; - - bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(), - kIODirectionOutIn | kIOMemoryPageable, 0x4018+0x800); - assert(bmd); - p = (typeof(p)) bmd->getBytesNoCopy(); - p += 0x800; - data = 0x11111111; - r = copyout(&data, p, sizeof(data)); - assert(r == 0); - data = 0x22222222; - r = copyout(&data, p + 0x1000, sizeof(data)); - assert(r == 0); - data = 0x33333333; - r = copyout(&data, p + 0x2000, sizeof(data)); - assert(r == 0); - data = 0x44444444; - r = copyout(&data, p + 0x3000, sizeof(data)); - assert(r == 0); - - md = IOMemoryDescriptor::withAddressRange(p, 0x4018, - kIODirectionOut | options, - current_task()); - assert(md); - time = mach_absolute_time(); - map = md->map(kIOMapReadOnly); - time = mach_absolute_time() - time; - assert(map); - absolutetime_to_nanoseconds(time, &nano); - - p2 = (typeof(p2)) map->getVirtualAddress(); - assert(0x11 == p2[0]); - assert(0x22 == p2[0x1000]); - assert(0x33 == p2[0x2000]); - assert(0x44 == p2[0x3000]); - - data = 0x99999999; - r = copyout(&data, p + 0x2000, sizeof(data)); - assert(r == 0); - - assert(0x11 == p2[0]); - assert(0x22 == p2[0x1000]); - assert(0x44 == p2[0x3000]); - if (kIOMemoryMapCopyOnWrite & options) assert(0x33 == p2[0x2000]); - else assert(0x99 == p2[0x2000]); - - IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n", - kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "", - nano); - - map->release(); - md->release(); - bmd->release(); - - return (kIOReturnSuccess); + IOBufferMemoryDescriptor * bmd; + IOMemoryDescriptor * md; + IOMemoryMap * map; + uint32_t data; + user_addr_t p; + uint8_t * p2; + int r; + uint64_t time, nano; + + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(), + kIODirectionOutIn | kIOMemoryPageable, 0x4018 + 0x800); + assert(bmd); + p = (typeof(p))bmd->getBytesNoCopy(); + p += 0x800; + data = 0x11111111; + r = copyout(&data, p, sizeof(data)); + assert(r == 0); + data = 0x22222222; + r = copyout(&data, p + 0x1000, sizeof(data)); + assert(r == 0); + data = 0x33333333; + r = copyout(&data, p + 0x2000, sizeof(data)); + assert(r == 0); + data = 0x44444444; + r = copyout(&data, p + 0x3000, sizeof(data)); + assert(r == 0); + + md = IOMemoryDescriptor::withAddressRange(p, 0x4018, + kIODirectionOut | options, + current_task()); + assert(md); + time = mach_absolute_time(); + map = md->map(kIOMapReadOnly); + time = mach_absolute_time() - time; + assert(map); + absolutetime_to_nanoseconds(time, &nano); + + p2 = (typeof(p2))map->getVirtualAddress(); + assert(0x11 == p2[0]); + assert(0x22 == p2[0x1000]); + assert(0x33 == p2[0x2000]); + assert(0x44 == p2[0x3000]); + + data = 0x99999999; + r = copyout(&data, p + 0x2000, sizeof(data)); + assert(r == 0); + + assert(0x11 == p2[0]); + assert(0x22 == p2[0x1000]); + assert(0x44 == p2[0x3000]); + if (kIOMemoryMapCopyOnWrite & options) { + assert(0x33 == p2[0x2000]); + } else { + assert(0x99 == p2[0x2000]); + } + + IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n", + kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "", + nano); + + map->release(); + md->release(); + bmd->release(); + + return kIOReturnSuccess; } static int IOMemoryMapCopyOnWriteTest(int newValue) { - IOMemoryMapTest(0); - IOMemoryMapTest(kIOMemoryMapCopyOnWrite); - return (0); + IOMemoryMapTest(0); + IOMemoryMapTest(kIOMemoryMapCopyOnWrite); + return 0; } static int AllocationNameTest(int newValue) { - IOMemoryDescriptor * bmd; - kern_allocation_name_t name, prior; + IOMemoryDescriptor * bmd; + kern_allocation_name_t name, prior; - name = kern_allocation_name_allocate("com.apple.iokit.test", 0); - assert(name); + name = kern_allocation_name_allocate("com.apple.iokit.test", 0); + assert(name); - prior = thread_set_allocation_name(name); + prior = thread_set_allocation_name(name); - bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, - kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared, - ptoa(13)); - assert(bmd); - bmd->prepare(); + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, + kIODirectionOutIn | kIOMemoryPageable | kIOMemoryKernelUserShared, + ptoa(13)); + assert(bmd); + bmd->prepare(); - thread_set_allocation_name(prior); - kern_allocation_name_release(name); + thread_set_allocation_name(prior); + kern_allocation_name_release(name); - if (newValue != 7) bmd->release(); + if (newValue != 7) { + bmd->release(); + } - return (0); + return 0; } -int IOMemoryDescriptorTest(int newValue) +int +IOMemoryDescriptorTest(int newValue) { - int result; + int result; - IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount); + IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount); #if 0 - if (6 == newValue) - { - IOMemoryDescriptor * sbmds[3]; - IOMultiMemoryDescriptor * smmd; - IOMemoryDescriptor * mds[2]; - IOMultiMemoryDescriptor * mmd; - IOMemoryMap * map; - - sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1)); - sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2)); - sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3)); - smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds)/sizeof(sbmds[0]), kIODirectionOutIn, false); - - mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1)); - mds[1] = smmd; - mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false); - map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere); - assert(map); - map->release(); - mmd->release(); - mds[0]->release(); - mds[1]->release(); - sbmds[0]->release(); - sbmds[1]->release(); - sbmds[2]->release(); - - return (0); - } - else if (5 == newValue) - { - IOReturn ret; - IOMemoryDescriptor * md; - IODMACommand * dma; - IODMACommand::SegmentOptions segOptions = - { - .fStructSize = sizeof(segOptions), - .fNumAddressBits = 64, - .fMaxSegmentSize = 4096, - .fMaxTransferSize = 128*1024, - .fAlignment = 4, - .fAlignmentLength = 4, - .fAlignmentInternalSegments = 0x1000 - }; + if (6 == newValue) { + IOMemoryDescriptor * sbmds[3]; + IOMultiMemoryDescriptor * smmd; + IOMemoryDescriptor * mds[2]; + IOMultiMemoryDescriptor * mmd; + IOMemoryMap * map; + + sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1)); + sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2)); + sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3)); + smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds) / sizeof(sbmds[0]), kIODirectionOutIn, false); + + mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1)); + mds[1] = smmd; + mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds) / sizeof(mds[0]), kIODirectionOutIn, false); + map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere); + assert(map); + map->release(); + mmd->release(); + mds[0]->release(); + mds[1]->release(); + sbmds[0]->release(); + sbmds[1]->release(); + sbmds[2]->release(); + + return 0; + } else if (5 == newValue) { + IOReturn ret; + IOMemoryDescriptor * md; + IODMACommand * dma; + IODMACommand::SegmentOptions segOptions = + { + .fStructSize = sizeof(segOptions), + .fNumAddressBits = 64, + .fMaxSegmentSize = 4096, + .fMaxTransferSize = 128 * 1024, + .fAlignment = 4, + .fAlignmentLength = 4, + .fAlignmentInternalSegments = 0x1000 + }; + + IOAddressRange ranges[3][2] = + { + { + { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc }, + { 0, 0 }, + }, + { + { ranges[0][0].address, 0x10 }, + { 0x3000 + ranges[0][0].address, 0xff0 }, + }, + { + { ranges[0][0].address, 0x2ffc }, + { trunc_page(ranges[0][0].address), 0x800 }, + }, + }; + static const uint32_t rangesCount[3] = { 1, 2, 2 }; + uint32_t test; + + for (test = 0; test < 3; test++) { + kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test, + ranges[test][0].address, ranges[test][0].length, + ranges[test][1].address, ranges[test][1].length); + + md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task); + assert(md); + ret = md->prepare(); + assert(kIOReturnSuccess == ret); + dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, + IODMACommand::kMapped, NULL, NULL); + assert(dma); + ret = dma->setMemoryDescriptor(md, true); + if (kIOReturnSuccess == ret) { + IODMACommand::Segment64 segments[1]; + UInt32 numSegments; + UInt64 offset; + + offset = 0; + do{ + numSegments = 1; + ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments); + assert(kIOReturnSuccess == ret); + assert(1 == numSegments); + kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength); + }while (offset < md->getLength()); + + ret = dma->clearMemoryDescriptor(true); + assert(kIOReturnSuccess == ret); + dma->release(); + } + md->release(); + } - IOAddressRange ranges[3][2] = - { - { - { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc }, - { 0, 0 }, - }, - { - { ranges[0][0].address, 0x10 }, - { 0x3000 + ranges[0][0].address, 0xff0 }, - }, - { - { ranges[0][0].address, 0x2ffc }, - { trunc_page(ranges[0][0].address), 0x800 }, - }, - }; - static const uint32_t rangesCount[3] = { 1, 2, 2 }; - uint32_t test; + return kIOReturnSuccess; + } else if (4 == newValue) { + IOService * isp; + IOMapper * mapper; + IOBufferMemoryDescriptor * md1; + IODMACommand * dma; + IOReturn ret; + size_t bufSize = 8192 * 8192 * sizeof(uint32_t); + uint64_t start, time, nano; + + isp = IOService::copyMatchingService(IOService::nameMatching("isp")); + assert(isp); + mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0); + assert(mapper); + + md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, + kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable, + bufSize, page_size); + + ret = md1->prepare(); + assert(kIOReturnSuccess == ret); - for (test = 0; test < 3; test++) - { - kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test, - ranges[test][0].address, ranges[test][0].length, - ranges[test][1].address, ranges[test][1].length); - - md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task); - assert(md); - ret = md->prepare(); - assert(kIOReturnSuccess == ret); - dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions, - IODMACommand::kMapped, NULL, NULL); - assert(dma); - ret = dma->setMemoryDescriptor(md, true); - if (kIOReturnSuccess == ret) - { - IODMACommand::Segment64 segments[1]; - UInt32 numSegments; - UInt64 offset; + IODMAMapSpecification mapSpec; + bzero(&mapSpec, sizeof(mapSpec)); + uint64_t mapped; + uint64_t mappedLength; - offset = 0; - do - { - numSegments = 1; - ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments); - assert(kIOReturnSuccess == ret); - assert(1 == numSegments); - kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength); - } - while (offset < md->getLength()); + start = mach_absolute_time(); - ret = dma->clearMemoryDescriptor(true); + ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength); assert(kIOReturnSuccess == ret); - dma->release(); - } - md->release(); - } - - return (kIOReturnSuccess); - } - else if (4 == newValue) - { - IOService * isp; - IOMapper * mapper; - IOBufferMemoryDescriptor * md1; - IODMACommand * dma; - IOReturn ret; - size_t bufSize = 8192 * 8192 * sizeof(uint32_t); - uint64_t start, time, nano; - - isp = IOService::copyMatchingService(IOService::nameMatching("isp")); - assert(isp); - mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0); - assert(mapper); - - md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, - kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable, - bufSize, page_size); - - ret = md1->prepare(); - assert(kIOReturnSuccess == ret); - IODMAMapSpecification mapSpec; - bzero(&mapSpec, sizeof(mapSpec)); - uint64_t mapped; - uint64_t mappedLength; + time = mach_absolute_time() - start; - start = mach_absolute_time(); + absolutetime_to_nanoseconds(time, &nano); + kprintf("time %lld us\n", nano / 1000ULL); + kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength); - ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength); - assert(kIOReturnSuccess == ret); + assert(md1); - time = mach_absolute_time() - start; + dma = IODMACommand::withSpecification(kIODMACommandOutputHost32, + 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL); - absolutetime_to_nanoseconds(time, &nano); - kprintf("time %lld us\n", nano / 1000ULL); - kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength); + assert(dma); - assert(md1); + start = mach_absolute_time(); + ret = dma->setMemoryDescriptor(md1, true); + assert(kIOReturnSuccess == ret); + time = mach_absolute_time() - start; - dma = IODMACommand::withSpecification(kIODMACommandOutputHost32, - 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL); + absolutetime_to_nanoseconds(time, &nano); + kprintf("time %lld us\n", nano / 1000ULL); - assert(dma); - start = mach_absolute_time(); - ret = dma->setMemoryDescriptor(md1, true); - assert(kIOReturnSuccess == ret); - time = mach_absolute_time() - start; + IODMACommand::Segment32 segments[1]; + UInt32 numSegments = 1; + UInt64 offset; - absolutetime_to_nanoseconds(time, &nano); - kprintf("time %lld us\n", nano / 1000ULL); + offset = 0; + ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments); + assert(kIOReturnSuccess == ret); + assert(1 == numSegments); + kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength); - - IODMACommand::Segment32 segments[1]; - UInt32 numSegments = 1; - UInt64 offset; + ret = dma->clearMemoryDescriptor(true); + assert(kIOReturnSuccess == ret); - offset = 0; - ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments); - assert(kIOReturnSuccess == ret); - assert(1 == numSegments); - kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength); + md1->release(); - ret = dma->clearMemoryDescriptor(true); - assert(kIOReturnSuccess == ret); + return kIOReturnSuccess; + } - md1->release(); - - return (kIOReturnSuccess); - } - - if (3 == newValue) - { - IOBufferMemoryDescriptor * md1; - IOBufferMemoryDescriptor * md2; - IOMemoryMap * map1; - IOMemoryMap * map2; - uint32_t * buf1; - uint32_t * buf2; - IOReturn err; - - md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, - kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable, - 64*1024, page_size); - assert(md1); - map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique); - assert(map1); - buf1 = (uint32_t *) map1->getVirtualAddress(); - - md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, - kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable, - 64*1024, page_size); - assert(md2); - map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique); - assert(map2); - buf2 = (uint32_t *) map2->getVirtualAddress(); - - memset(buf1, 0x11, 64*1024L); - memset(buf2, 0x22, 64*1024L); - - kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2); - - kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]); - assert(0x11111111 == buf1[0]); - assert(0x22222222 == buf2[0]); - err = map1->redirect(md2, 0, 0ULL); - kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]); - assert(0x11111111 == buf2[0]); - assert(0x22222222 == buf1[0]); - err = map1->redirect(md1, 0, 0ULL); - kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]); - assert(0x11111111 == buf1[0]); - assert(0x22222222 == buf2[0]); - map1->release(); - map2->release(); - md1->release(); - md2->release(); - } + if (3 == newValue) { + IOBufferMemoryDescriptor * md1; + IOBufferMemoryDescriptor * md2; + IOMemoryMap * map1; + IOMemoryMap * map2; + uint32_t * buf1; + uint32_t * buf2; + IOReturn err; + + md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, + kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable, + 64 * 1024, page_size); + assert(md1); + map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique); + assert(map1); + buf1 = (uint32_t *) map1->getVirtualAddress(); + + md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL, + kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable, + 64 * 1024, page_size); + assert(md2); + map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique); + assert(map2); + buf2 = (uint32_t *) map2->getVirtualAddress(); + + memset(buf1, 0x11, 64 * 1024L); + memset(buf2, 0x22, 64 * 1024L); + + kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2); + + kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]); + assert(0x11111111 == buf1[0]); + assert(0x22222222 == buf2[0]); + err = map1->redirect(md2, 0, 0ULL); + kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]); + assert(0x11111111 == buf2[0]); + assert(0x22222222 == buf1[0]); + err = map1->redirect(md1, 0, 0ULL); + kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]); + assert(0x11111111 == buf1[0]); + assert(0x22222222 == buf2[0]); + map1->release(); + map2->release(); + md1->release(); + md2->release(); + } #endif // result = IODMACommandLocalMappedNonContig(newValue); // if (result) return (result); - result = IODMACommandForceDoubleBufferTest(newValue); - if (result) return (result); + result = IODMACommandForceDoubleBufferTest(newValue); + if (result) { + return result; + } - result = AllocationNameTest(newValue); - if (result) return (result); + result = AllocationNameTest(newValue); + if (result) { + return result; + } - result = IOMemoryMapCopyOnWriteTest(newValue); - if (result) return (result); + result = IOMemoryMapCopyOnWriteTest(newValue); + if (result) { + return result; + } - result = IOMultMemoryDescriptorTest(newValue); - if (result) return (result); + result = IOMultMemoryDescriptorTest(newValue); + if (result) { + return result; + } - result = ZeroLengthTest(newValue); - if (result) return (result); + result = ZeroLengthTest(newValue); + if (result) { + return result; + } - result = IODirectionPrepareNoZeroFillTest(newValue); - if (result) return (result); + result = IODirectionPrepareNoZeroFillTest(newValue); + if (result) { + return result; + } - result = BadFixedAllocTest(newValue); - if (result) return (result); + result = BadFixedAllocTest(newValue); + if (result) { + return result; + } - result = IOMemoryRemoteTest(newValue); - if (result) return (result); + result = IOMemoryRemoteTest(newValue); + if (result) { + return result; + } - result = IOMemoryPrefaultTest(newValue); - if (result) return (result); + result = IOMemoryPrefaultTest(newValue); + if (result) { + return result; + } - IOGeneralMemoryDescriptor * md; - vm_offset_t data[2]; - vm_size_t bsize = 16*1024*1024; - vm_size_t srcsize, srcoffset, mapoffset, size; - kern_return_t kr; + IOGeneralMemoryDescriptor * md; + vm_offset_t data[2]; + vm_size_t bsize = 16 * 1024 * 1024; + vm_size_t srcsize, srcoffset, mapoffset, size; + kern_return_t kr; - data[0] = data[1] = 0; - kr = vm_allocate_kernel(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); - assert(KERN_SUCCESS == kr); + data[0] = data[1] = 0; + kr = vm_allocate_kernel(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT); + assert(KERN_SUCCESS == kr); - vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE); - vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE); + vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE); + vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE); - IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]); + IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]); - uint32_t idx, offidx; - for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) - { - ((uint32_t*)data[0])[idx] = idx; - } + uint32_t idx, offidx; + for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++) { + ((uint32_t*)data[0])[idx] = idx; + } - for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) - { - for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) - { - IOAddressRange ranges[3]; - uint32_t rangeCount = 1; - - bzero(&ranges[0], sizeof(ranges)); - ranges[0].address = data[0] + srcoffset; - ranges[0].length = srcsize; - ranges[1].address = ranges[2].address = data[0]; - - if (srcsize > ptoa(5)) - { - ranges[0].length = 7634; - ranges[1].length = 9870; - ranges[2].length = srcsize - ranges[0].length - ranges[1].length; - ranges[1].address = ranges[0].address + ranges[0].length; - ranges[2].address = ranges[1].address + ranges[1].length; - rangeCount = 3; - } - else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) - { - ranges[0].length = ptoa(1); - ranges[1].length = ptoa(1); - ranges[2].length = srcsize - ranges[0].length - ranges[1].length; - ranges[0].address = data[0] + srcoffset + ptoa(1); - ranges[1].address = data[0] + srcoffset; - ranges[2].address = ranges[0].address + ranges[0].length; - rangeCount = 3; - } - - md = OSDynamicCast(IOGeneralMemoryDescriptor, - IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task)); - assert(md); - - IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n", - (long) srcsize, (long) srcoffset, - (long long) ranges[0].address - data[0], (long long) ranges[0].length, - (long long) ranges[1].address - data[0], (long long) ranges[1].length, - (long long) ranges[2].address - data[0], (long long) ranges[2].length); - - if (kIOReturnSuccess == kr) - { - for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) - { - for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) - { - IOMemoryMap * map; - mach_vm_address_t addr = 0; - uint32_t data; + for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c)) { + for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc)) { + IOAddressRange ranges[3]; + uint32_t rangeCount = 1; + + bzero(&ranges[0], sizeof(ranges)); + ranges[0].address = data[0] + srcoffset; + ranges[0].length = srcsize; + ranges[1].address = ranges[2].address = data[0]; + + if (srcsize > ptoa(5)) { + ranges[0].length = 7634; + ranges[1].length = 9870; + ranges[2].length = srcsize - ranges[0].length - ranges[1].length; + ranges[1].address = ranges[0].address + ranges[0].length; + ranges[2].address = ranges[1].address + ranges[1].length; + rangeCount = 3; + } else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset)) { + ranges[0].length = ptoa(1); + ranges[1].length = ptoa(1); + ranges[2].length = srcsize - ranges[0].length - ranges[1].length; + ranges[0].address = data[0] + srcoffset + ptoa(1); + ranges[1].address = data[0] + srcoffset; + ranges[2].address = ranges[0].address + ranges[0].length; + rangeCount = 3; + } + + md = OSDynamicCast(IOGeneralMemoryDescriptor, + IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task)); + assert(md); + + IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n", + (long) srcsize, (long) srcoffset, + (long long) ranges[0].address - data[0], (long long) ranges[0].length, + (long long) ranges[1].address - data[0], (long long) ranges[1].length, + (long long) ranges[2].address - data[0], (long long) ranges[2].length); + + if (kIOReturnSuccess == kr) { + for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00)) { + for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200)) { + IOMemoryMap * map; + mach_vm_address_t addr = 0; + uint32_t data; // IOLog("createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size); - if (map) addr = map->getAddress(); - else kr = kIOReturnError; + map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size); + if (map) { + addr = map->getAddress(); + } else { + kr = kIOReturnError; + } // IOLog(">mapRef 0x%x %llx\n", kr, addr); - if (kIOReturnSuccess != kr) break; - kr = md->prepare(); - if (kIOReturnSuccess != kr) - { - panic("prepare() fail 0x%x\n", kr); - break; - } - for (idx = 0; idx < size; idx += sizeof(uint32_t)) - { - offidx = (idx + mapoffset + srcoffset); - if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) - { - if (offidx < ptoa(2)) offidx ^= ptoa(1); - } - offidx /= sizeof(uint32_t); - - if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)]) - { - panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset); - kr = kIOReturnBadMedia; - } - else - { - if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0; - if (offidx != data) - { - panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset); - kr = kIOReturnBadMedia; + if (kIOReturnSuccess != kr) { + break; + } + kr = md->prepare(); + if (kIOReturnSuccess != kr) { + panic("prepare() fail 0x%x\n", kr); + break; + } + for (idx = 0; idx < size; idx += sizeof(uint32_t)) { + offidx = (idx + mapoffset + srcoffset); + if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset)) { + if (offidx < ptoa(2)) { + offidx ^= ptoa(1); + } + } + offidx /= sizeof(uint32_t); + + if (offidx != ((uint32_t*)addr)[idx / sizeof(uint32_t)]) { + panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset); + kr = kIOReturnBadMedia; + } else { + if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) { + data = 0; + } + if (offidx != data) { + panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset); + kr = kIOReturnBadMedia; + } + } + } + md->complete(); + map->release(); +// IOLog("unmapRef %llx\n", addr); + } + if (kIOReturnSuccess != kr) { + break; + } } - } } - md->complete(); - map->release(); -// IOLog("unmapRef %llx\n", addr); - } - if (kIOReturnSuccess != kr) break; + md->release(); + if (kIOReturnSuccess != kr) { + break; + } + } + if (kIOReturnSuccess != kr) { + break; } - } - md->release(); - if (kIOReturnSuccess != kr) break; } - if (kIOReturnSuccess != kr) break; - } - if (kIOReturnSuccess != kr) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n", - (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset); + if (kIOReturnSuccess != kr) { + IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n", + (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset); + } - assert(kr == kIOReturnSuccess); + assert(kr == kIOReturnSuccess); - vm_deallocate(kernel_map, data[0], bsize); + vm_deallocate(kernel_map, data[0], bsize); // vm_deallocate(kernel_map, data[1], size); - IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount); + IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount); - return (0); + return 0; } #endif /* DEVELOPMENT || DEBUG */ diff --git a/iokit/Tests/Tests.cpp b/iokit/Tests/Tests.cpp index dca24997b..7e7fe8f1d 100644 --- a/iokit/Tests/Tests.cpp +++ b/iokit/Tests/Tests.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -193,142 +193,146 @@ static uint64_t gIOWorkLoopTestDeadline; static void TESAction(OSObject * owner, IOTimerEventSource * tes) { - if (mach_absolute_time() < gIOWorkLoopTestDeadline) tes->setTimeout(1, kMicrosecondScale); + if (mach_absolute_time() < gIOWorkLoopTestDeadline) { + tes->setTimeout(1, kMicrosecondScale); + } } static int IOWorkLoopTest(int newValue) { - IOReturn err; - uint32_t idx; - IOWorkLoop * wl; - IOTimerEventSource * tes; - IOInterruptEventSource * ies; - - wl = IOWorkLoop::workLoop(); - assert(wl); - tes = IOTimerEventSource::timerEventSource(kIOTimerEventSourceOptionsPriorityWorkLoop, wl, &TESAction); - assert(tes); - err = wl->addEventSource(tes); - assert(kIOReturnSuccess == err); - clock_interval_to_deadline(100, kMillisecondScale, &gIOWorkLoopTestDeadline); - for (idx = 0; mach_absolute_time() < gIOWorkLoopTestDeadline; idx++) - { - tes->setTimeout(idx & 1023, kNanosecondScale); - } - tes->cancelTimeout(); - wl->removeEventSource(tes); - tes->release(); - - int value = 3; - - tes = IOTimerEventSource::timerEventSource(kIOTimerEventSourceOptionsDefault, wl, ^(IOTimerEventSource * tes){ - kprintf("wl %p, value %d\n", wl, value); - }); - err = wl->addEventSource(tes); - assert(kIOReturnSuccess == err); - - value = 2; - tes->setTimeout(1, kNanosecondScale); - IOSleep(1); - wl->removeEventSource(tes); - tes->release(); - - ies = IOInterruptEventSource::interruptEventSource(wl, NULL, 0, ^void(IOInterruptEventSource *sender, int count){ - kprintf("ies block %p, %d\n", sender, count); - }); - - assert(ies); - kprintf("ies %p\n", ies); - err = wl->addEventSource(ies); - assert(kIOReturnSuccess == err); - ies->interruptOccurred(NULL, NULL, 0); - IOSleep(1); - ies->interruptOccurred(NULL, NULL, 0); - IOSleep(1); - wl->removeEventSource(ies); - ies->release(); - - wl->release(); - - return (0); + IOReturn err; + uint32_t idx; + IOWorkLoop * wl; + IOTimerEventSource * tes; + IOInterruptEventSource * ies; + + wl = IOWorkLoop::workLoop(); + assert(wl); + tes = IOTimerEventSource::timerEventSource(kIOTimerEventSourceOptionsPriorityWorkLoop, wl, &TESAction); + assert(tes); + err = wl->addEventSource(tes); + assert(kIOReturnSuccess == err); + clock_interval_to_deadline(100, kMillisecondScale, &gIOWorkLoopTestDeadline); + for (idx = 0; mach_absolute_time() < gIOWorkLoopTestDeadline; idx++) { + tes->setTimeout(idx & 1023, kNanosecondScale); + } + tes->cancelTimeout(); + wl->removeEventSource(tes); + tes->release(); + + int value = 3; + + tes = IOTimerEventSource::timerEventSource(kIOTimerEventSourceOptionsDefault, wl, ^(IOTimerEventSource * tes){ + kprintf("wl %p, value %d\n", wl, value); + }); + err = wl->addEventSource(tes); + assert(kIOReturnSuccess == err); + + value = 2; + tes->setTimeout(1, kNanosecondScale); + IOSleep(1); + wl->removeEventSource(tes); + tes->release(); + + ies = IOInterruptEventSource::interruptEventSource(wl, NULL, 0, ^void (IOInterruptEventSource *sender, int count){ + kprintf("ies block %p, %d\n", sender, count); + }); + + assert(ies); + kprintf("ies %p\n", ies); + err = wl->addEventSource(ies); + assert(kIOReturnSuccess == err); + ies->interruptOccurred(NULL, NULL, 0); + IOSleep(1); + ies->interruptOccurred(NULL, NULL, 0); + IOSleep(1); + wl->removeEventSource(ies); + ies->release(); + + wl->release(); + + return 0; } static int OSCollectionTest(int newValue) { - OSArray * array = OSArray::withCapacity(8); - array->setObject(kOSBooleanTrue); - array->setObject(kOSBooleanFalse); - array->setObject(kOSBooleanFalse); - array->setObject(kOSBooleanTrue); - array->setObject(kOSBooleanFalse); - array->setObject(kOSBooleanTrue); - - __block unsigned int index; - index = 0; - array->iterateObjects(^bool(OSObject * obj) { - kprintf("%d:%d ", index, (obj == kOSBooleanTrue) ? 1 : (obj == kOSBooleanFalse) ? 0 : 2); - index++; - return (false); - }); - kprintf("\n"); - array->release(); - - OSDictionary * dict = IOService::resourceMatching("hello"); - assert(dict); - index = 0; - dict->iterateObjects(^bool(const OSSymbol * sym, OSObject * obj) { - OSString * str = OSDynamicCast(OSString, obj); - assert(str); - kprintf("%d:%s=%s\n", index, sym->getCStringNoCopy(), str->getCStringNoCopy()); - index++; - return (false); - }); - dict->release(); - - OSSerializer * serializer = OSSerializer::withBlock(^bool(OSSerialize * s){ - return (gIOBSDUnitKey->serialize(s)); - }); - assert(serializer); - IOService::getPlatform()->setProperty("OSSerializer_withBlock", serializer); - serializer->release(); - - return (0); + OSArray * array = OSArray::withCapacity(8); + array->setObject(kOSBooleanTrue); + array->setObject(kOSBooleanFalse); + array->setObject(kOSBooleanFalse); + array->setObject(kOSBooleanTrue); + array->setObject(kOSBooleanFalse); + array->setObject(kOSBooleanTrue); + + __block unsigned int index; + index = 0; + array->iterateObjects(^bool (OSObject * obj) { + kprintf("%d:%d ", index, (obj == kOSBooleanTrue) ? 1 : (obj == kOSBooleanFalse) ? 0 : 2); + index++; + return false; + }); + kprintf("\n"); + array->release(); + + OSDictionary * dict = IOService::resourceMatching("hello"); + assert(dict); + index = 0; + dict->iterateObjects(^bool (const OSSymbol * sym, OSObject * obj) { + OSString * str = OSDynamicCast(OSString, obj); + assert(str); + kprintf("%d:%s=%s\n", index, sym->getCStringNoCopy(), str->getCStringNoCopy()); + index++; + return false; + }); + dict->release(); + + OSSerializer * serializer = OSSerializer::withBlock(^bool (OSSerialize * s){ + return gIOBSDUnitKey->serialize(s); + }); + assert(serializer); + IOService::getPlatform()->setProperty("OSSerializer_withBlock", serializer); + serializer->release(); + + return 0; } #if 0 #include class TestUserClient : public IOUserClient { - OSDeclareDefaultStructors(TestUserClient); - virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE; - virtual bool finalize(IOOptionBits options) APPLE_KEXT_OVERRIDE; - virtual IOReturn externalMethod( uint32_t selector, - IOExternalMethodArguments * arguments, - IOExternalMethodDispatch * dispatch, - OSObject * target, - void * reference ) APPLE_KEXT_OVERRIDE; + OSDeclareDefaultStructors(TestUserClient); + virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE; + virtual bool finalize(IOOptionBits options) APPLE_KEXT_OVERRIDE; + virtual IOReturn externalMethod( uint32_t selector, + IOExternalMethodArguments * arguments, + IOExternalMethodDispatch * dispatch, + OSObject * target, + void * reference ) APPLE_KEXT_OVERRIDE; }; -void TestUserClient::stop( IOService *provider) +void +TestUserClient::stop( IOService *provider) { - kprintf("TestUserClient::stop\n"); + kprintf("TestUserClient::stop\n"); } -bool TestUserClient::finalize(IOOptionBits options) +bool +TestUserClient::finalize(IOOptionBits options) { - kprintf("TestUserClient::finalize\n"); - return(true); + kprintf("TestUserClient::finalize\n"); + return true; } -IOReturn TestUserClient::externalMethod( uint32_t selector, - IOExternalMethodArguments * arguments, - IOExternalMethodDispatch * dispatch, - OSObject * target, - void * reference ) +IOReturn +TestUserClient::externalMethod( uint32_t selector, + IOExternalMethodArguments * arguments, + IOExternalMethodDispatch * dispatch, + OSObject * target, + void * reference ) { - getProvider()->terminate(); - IOSleep(500); - return (0); + getProvider()->terminate(); + IOSleep(500); + return 0; } OSDefineMetaClassAndStructors(TestUserClient, IOUserClient); #endif @@ -336,47 +340,47 @@ OSDefineMetaClassAndStructors(TestUserClient, IOUserClient); static int IOServiceTest(int newValue) { - OSDictionary * matching; - IONotifier * note; - __block IOService * found; + OSDictionary * matching; + IONotifier * note; + __block IOService * found; #if 0 - found = new IOService; - found->init(); - found->setName("IOTestUserClientProvider"); - found->attach(IOService::getPlatform()); - found->setProperty("IOUserClientClass", "TestUserClient"); - found->registerService(); + found = new IOService; + found->init(); + found->setName("IOTestUserClientProvider"); + found->attach(IOService::getPlatform()); + found->setProperty("IOUserClientClass", "TestUserClient"); + found->registerService(); #endif - matching = IOService::serviceMatching("IOPlatformExpert"); - assert(matching); - found = nullptr; - note = IOService::addMatchingNotification(gIOMatchedNotification, matching, 0, - ^bool(IOService * newService, IONotifier * notifier) { - kprintf("found %s, %d\n", newService->getName(), newService->getRetainCount()); - found = newService; - found->retain(); - return (true); + matching = IOService::serviceMatching("IOPlatformExpert"); + assert(matching); + found = nullptr; + note = IOService::addMatchingNotification(gIOMatchedNotification, matching, 0, + ^bool (IOService * newService, IONotifier * notifier) { + kprintf("found %s, %d\n", newService->getName(), newService->getRetainCount()); + found = newService; + found->retain(); + return true; } - ); - assert(note); - assert(found); - matching->release(); - note->remove(); - - note = found->registerInterest(gIOBusyInterest, - ^IOReturn(uint32_t messageType, IOService * provider, - void * messageArgument, size_t argSize) { - kprintf("%p messageType 0x%08x %p\n", provider, messageType, messageArgument); - return (kIOReturnSuccess); - }); - assert(note); - IOSleep(1*1000); - note->remove(); - found->release(); - - return (0); + ); + assert(note); + assert(found); + matching->release(); + note->remove(); + + note = found->registerInterest(gIOBusyInterest, + ^IOReturn (uint32_t messageType, IOService * provider, + void * messageArgument, size_t argSize) { + kprintf("%p messageType 0x%08x %p\n", provider, messageType, messageArgument); + return kIOReturnSuccess; + }); + assert(note); + IOSleep(1 * 1000); + note->remove(); + found->release(); + + return 0; } #endif /* DEVELOPMENT || DEBUG */ @@ -384,64 +388,60 @@ IOServiceTest(int newValue) static int sysctl_iokittest(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) { - int error; - int newValue, changed; + int error; + int newValue, changed; - error = sysctl_io_number(req, 0, sizeof(int), &newValue, &changed); - if (error) return (error); + error = sysctl_io_number(req, 0, sizeof(int), &newValue, &changed); + if (error) { + return error; + } #if DEVELOPMENT || DEBUG - if (changed && (66==newValue)) - { - IOReturn ret; - IOWorkLoop * wl = IOWorkLoop::workLoop(); - IOCommandGate * cg = IOCommandGate::commandGate(wl); - ret = wl->addEventSource(cg); - - struct x - { - uint64_t h; - uint64_t l; - }; - struct x y; - - y.h = 0x1111111122222222; - y.l = 0x3333333344444444; - - kprintf("ret1 %d\n", ret); - ret = cg->runActionBlock(^(){ - printf("hello %d 0x%qx\n", wl->inGate(), y.h); - return 99; - }); - kprintf("ret %d\n", ret); - } - - if (changed && (999==newValue)) - { - OSData * data = OSData::withCapacity(16); - data->release(); - data->release(); - } - - - if (changed && newValue) - { - error = IOWorkLoopTest(newValue); - assert(KERN_SUCCESS == error); - error = IOServiceTest(newValue); - assert(KERN_SUCCESS == error); - error = OSCollectionTest(newValue); - assert(KERN_SUCCESS == error); - error = IOMemoryDescriptorTest(newValue); - assert(KERN_SUCCESS == error); - } + if (changed && (66 == newValue)) { + IOReturn ret; + IOWorkLoop * wl = IOWorkLoop::workLoop(); + IOCommandGate * cg = IOCommandGate::commandGate(wl); + ret = wl->addEventSource(cg); + + struct x { + uint64_t h; + uint64_t l; + }; + struct x y; + + y.h = 0x1111111122222222; + y.l = 0x3333333344444444; + + kprintf("ret1 %d\n", ret); + ret = cg->runActionBlock(^(){ + printf("hello %d 0x%qx\n", wl->inGate(), y.h); + return 99; + }); + kprintf("ret %d\n", ret); + } + + if (changed && (999 == newValue)) { + OSData * data = OSData::withCapacity(16); + data->release(); + data->release(); + } + + + if (changed && newValue) { + error = IOWorkLoopTest(newValue); + assert(KERN_SUCCESS == error); + error = IOServiceTest(newValue); + assert(KERN_SUCCESS == error); + error = OSCollectionTest(newValue); + assert(KERN_SUCCESS == error); + error = IOMemoryDescriptorTest(newValue); + assert(KERN_SUCCESS == error); + } #endif /* DEVELOPMENT || DEBUG */ - return (error); + return error; } SYSCTL_PROC(_kern, OID_AUTO, iokittest, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, - 0, 0, sysctl_iokittest, "I", ""); - - + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NOAUTO | CTLFLAG_KERN | CTLFLAG_LOCKED, + 0, 0, sysctl_iokittest, "I", ""); diff --git a/iokit/Tests/Tests.h b/iokit/Tests/Tests.h index 67abf6bf6..ddb8f0ed9 100644 --- a/iokit/Tests/Tests.h +++ b/iokit/Tests/Tests.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/iokit/bsddev/DINetBootHook.cpp b/iokit/bsddev/DINetBootHook.cpp index 6ca295a81..17ebe5a23 100644 --- a/iokit/bsddev/DINetBootHook.cpp +++ b/iokit/bsddev/DINetBootHook.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2002-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -40,42 +40,42 @@ * * Revision 1.3.1558.1 2005/06/24 01:47:25 lindak * Bringing over all of the Karma changes into chardonnay. - * + * * Revision 1.1.1.1 2005/02/24 21:48:06 akosut * Import xnu-764 from Tiger8A395 - * + * * Revision 1.3 2002/06/16 20:36:02 lindak * Merged PR-2957314 into Jaguar (siegmund: netboot kernel code needs to set * com.apple.AppleDiskImageController.load to boolean Yes) - * + * * Revision 1.2.40.2 2002/06/15 03:50:38 dieter * - corrected com.apple.AppleDiskImageController.load string - * + * * Revision 1.2.40.1 2002/06/15 03:01:08 dieter * Bug #: 2957314 * - add call to force IOHDIXController to get loaded/matched - * + * * Revision 1.2 2002/05/03 18:08:39 lindak * Merged PR-2909558 into Jaguar (siegmund POST WWDC: add support for NetBoot * over IOHDIXController) - * + * * Revision 1.1.2.1 2002/04/24 22:29:12 dieter * Bug #: 2909558 * - added IOHDIXController netboot stubs - * + * * Revision 1.3 2002/04/16 00:41:37 han * migrated code out of here to IOHDIXController's setProperty method - * + * * Revision 1.2 2002/04/14 23:53:53 han * eliminate qDEBUG=1, use emums instead of hard coded string constants - * + * * Revision 1.1 2002/04/14 22:54:42 han * Renamed from DINetBookHook.c. * First stab at implementing this code. - * + * * Revision 1.1 2002/04/13 19:22:28 han * added stub file DINetBookHook.c - * + * * */ #ifndef qDEBUG @@ -91,109 +91,129 @@ #include #include "DINetBootHook.h" -#define kIOHDIXControllerClassName "IOHDIXController" -#define kDIRootImageKey "di-root-image" -#define kDIRootImageResultKey "di-root-image-result" -#define kDIRootImageDevNameKey "di-root-image-devname" -#define kDIRootImageDevTKey "di-root-image-devt" +#define kIOHDIXControllerClassName "IOHDIXController" +#define kDIRootImageKey "di-root-image" +#define kDIRootImageResultKey "di-root-image-result" +#define kDIRootImageDevNameKey "di-root-image-devname" +#define kDIRootImageDevTKey "di-root-image-devt" #define kDIRootRamFileKey "di-root-ram-file" static IOService * di_load_controller( void ) { - OSIterator * controllerIterator = 0; - OSDictionary * matchDictionary = 0; - IOService * controller = 0; - - do { - IOService::getResourceService()->publishResource("com.apple.AppleDiskImageController.load", kOSBooleanTrue); - IOService::getResourceService()->waitQuiet(); - - // first find IOHDIXController - matchDictionary = IOService::serviceMatching(kIOHDIXControllerClassName); - if (!matchDictionary) - break; - - controllerIterator = IOService::getMatchingServices(matchDictionary); - if (!controllerIterator) - break; - - controller = OSDynamicCast(IOService, controllerIterator->getNextObject()); - if (!controller) - break; - - controller->retain(); - } while (false); - - if (matchDictionary) matchDictionary->release(); - if (controllerIterator) controllerIterator->release(); + OSIterator * controllerIterator = 0; + OSDictionary * matchDictionary = 0; + IOService * controller = 0; + + do { + IOService::getResourceService()->publishResource("com.apple.AppleDiskImageController.load", kOSBooleanTrue); + IOService::getResourceService()->waitQuiet(); + + // first find IOHDIXController + matchDictionary = IOService::serviceMatching(kIOHDIXControllerClassName); + if (!matchDictionary) { + break; + } + + controllerIterator = IOService::getMatchingServices(matchDictionary); + if (!controllerIterator) { + break; + } + + controller = OSDynamicCast(IOService, controllerIterator->getNextObject()); + if (!controller) { + break; + } + + controller->retain(); + } while (false); + + if (matchDictionary) { + matchDictionary->release(); + } + if (controllerIterator) { + controllerIterator->release(); + } - return controller; + return controller; } extern "C" { /* - Name: di_root_image - Function: mount the disk image returning the dev node - Parameters: path -> path/url to disk image - devname <- dev node used to set the rootdevice global variable - dev_p <- device number generated from major/minor numbers - Comments: -*/ -int di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) + * Name: di_root_image + * Function: mount the disk image returning the dev node + * Parameters: path -> path/url to disk image + * devname <- dev node used to set the rootdevice global variable + * dev_p <- device number generated from major/minor numbers + * Comments: + */ +int +di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) { - IOReturn res = 0; - IOService * controller = 0; - OSString * pathString = 0; - OSNumber * myResult = 0; - OSString * myDevName = 0; - OSNumber * myDevT = 0; - + IOReturn res = 0; + IOService * controller = 0; + OSString * pathString = 0; + OSNumber * myResult = 0; + OSString * myDevName = 0; + OSNumber * myDevT = 0; + // sanity check arguments please - if (devname) *devname = 0; - if (dev_p) *dev_p = 0; - - if (!path) return kIOReturnBadArgument; - if (!devname) return kIOReturnBadArgument; - if (!dev_p) return kIOReturnBadArgument; - - controller = di_load_controller(); + if (devname) { + *devname = 0; + } + if (dev_p) { + *dev_p = 0; + } + + if (!path) { + return kIOReturnBadArgument; + } + if (!devname) { + return kIOReturnBadArgument; + } + if (!dev_p) { + return kIOReturnBadArgument; + } + + controller = di_load_controller(); if (!controller) { res = kIOReturnNotFound; goto NoIOHDIXController; } - + // okay create path object pathString = OSString::withCString(path); if (!pathString) { res = kIOReturnNoMemory; goto CannotCreatePathOSString; } - + // do it - if (!controller->setProperty(kDIRootImageKey, pathString)) + if (!controller->setProperty(kDIRootImageKey, pathString)) { IOLog("IOHDIXController::setProperty(%s, %s) failed.\n", kDIRootImageKey, pathString->getCStringNoCopy()); - + } + myResult = OSDynamicCast(OSNumber, controller->getProperty(kDIRootImageResultKey)); res = kIOReturnError; - if (myResult) + if (myResult) { res = myResult->unsigned32BitValue(); - + } + if (res) { IOLog("%s is 0x%08X/%d\n", kDIRootImageResultKey, res, res); goto di_root_image_FAILED; } - // success - grab + // success - grab myDevT = OSDynamicCast(OSNumber, controller->getProperty(kDIRootImageDevTKey)); - if (myDevT) + if (myDevT) { *dev_p = myDevT->unsigned32BitValue(); - else { + } else { IOLog("could not get %s\n", kDIRootImageDevTKey); res = kIOReturnError; goto di_root_image_FAILED; } - + myDevName = OSDynamicCast(OSString, controller->getProperty(kDIRootImageDevNameKey)); if (myDevName) { strlcpy(devname, myDevName->getCStringNoCopy(), devsz); @@ -202,15 +222,19 @@ int di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p) res = kIOReturnError; goto di_root_image_FAILED; } - + di_root_image_FAILED: CannotCreatePathOSString: NoIOHDIXController: // clean up memory allocations - if (pathString) pathString->release(); - if (controller) controller->release(); + if (pathString) { + pathString->release(); + } + if (controller) { + controller->release(); + } return res; } @@ -250,9 +274,9 @@ di_root_ramfile_buf(void *buf, size_t bufsz, char *devname, size_t devsz, dev_t } myDevT = OSDynamicCast(OSNumber, controller->getProperty(kDIRootImageDevTKey)); - if (myDevT) + if (myDevT) { *dev_p = myDevT->unsigned32BitValue(); - else { + } else { IOLog("could not get %s\n", kDIRootImageDevTKey); res = kIOReturnError; goto out; @@ -275,76 +299,76 @@ out: return res; } -void di_root_ramfile( IORegistryEntry * entry ) +void +di_root_ramfile( IORegistryEntry * entry ) { - OSData * data; - IOMemoryDescriptor * mem; - uint64_t dmgSize; - uint64_t remain, length; - OSData * extentData = 0; - IOAddressRange * extentList; - uint64_t extentSize; - uint32_t extentCount; - - do { - data = OSDynamicCast(OSData, entry->getProperty("boot-ramdmg-size")); - if (!data || (data->getLength() != sizeof(uint64_t))) - break; // bad disk image size - - dmgSize = *(uint64_t *) data->getBytesNoCopy(); - if (!dmgSize) - break; - - data = OSDynamicCast(OSData, entry->getProperty("boot-ramdmg-extents")); - if (!data || (data->getLength() == 0) || - ((data->getLength() & (sizeof(IOAddressRange)-1)) != 0)) - break; // bad extents - - // make modifications to local copy - extentData = OSData::withData(data); - assert(extentData); - - extentList = (IOAddressRange *) extentData->getBytesNoCopy(); - extentCount = extentData->getLength() / sizeof(IOAddressRange); - extentSize = 0; - remain = dmgSize; - - // truncate extent length to enclosing disk image - for (uint32_t i = 0; i < extentCount; i++) - { - length = extentList[i].length; - if (!length) break; - - extentSize += length; - if (length >= remain) - { - extentList[i].length = remain; - extentCount = i + 1; - break; - } - remain -= length; - } - if (extentSize < dmgSize) - break; // not enough extent bytes for enclosing disk image - - mem = IOMemoryDescriptor::withAddressRanges( - extentList, extentCount, - kIODirectionOut | kIOMemoryMapperNone, NULL); - - if (mem) - { - IOService * controller = di_load_controller(); - if (controller) - { - controller->setProperty(kDIRootRamFileKey, mem); - controller->release(); - } - mem->release(); - } - } while (false); - - if (extentData) - extentData->release(); + OSData * data; + IOMemoryDescriptor * mem; + uint64_t dmgSize; + uint64_t remain, length; + OSData * extentData = 0; + IOAddressRange * extentList; + uint64_t extentSize; + uint32_t extentCount; + + do { + data = OSDynamicCast(OSData, entry->getProperty("boot-ramdmg-size")); + if (!data || (data->getLength() != sizeof(uint64_t))) { + break; // bad disk image size + } + dmgSize = *(uint64_t *) data->getBytesNoCopy(); + if (!dmgSize) { + break; + } + + data = OSDynamicCast(OSData, entry->getProperty("boot-ramdmg-extents")); + if (!data || (data->getLength() == 0) || + ((data->getLength() & (sizeof(IOAddressRange) - 1)) != 0)) { + break; // bad extents + } + // make modifications to local copy + extentData = OSData::withData(data); + assert(extentData); + + extentList = (IOAddressRange *) extentData->getBytesNoCopy(); + extentCount = extentData->getLength() / sizeof(IOAddressRange); + extentSize = 0; + remain = dmgSize; + + // truncate extent length to enclosing disk image + for (uint32_t i = 0; i < extentCount; i++) { + length = extentList[i].length; + if (!length) { + break; + } + + extentSize += length; + if (length >= remain) { + extentList[i].length = remain; + extentCount = i + 1; + break; + } + remain -= length; + } + if (extentSize < dmgSize) { + break; // not enough extent bytes for enclosing disk image + } + mem = IOMemoryDescriptor::withAddressRanges( + extentList, extentCount, + kIODirectionOut | kIOMemoryMapperNone, NULL); + + if (mem) { + IOService * controller = di_load_controller(); + if (controller) { + controller->setProperty(kDIRootRamFileKey, mem); + controller->release(); + } + mem->release(); + } + } while (false); + + if (extentData) { + extentData->release(); + } } - }; diff --git a/iokit/bsddev/DINetBootHook.h b/iokit/bsddev/DINetBootHook.h index 172679524..3f3a0e331 100644 --- a/iokit/bsddev/DINetBootHook.h +++ b/iokit/bsddev/DINetBootHook.h @@ -39,37 +39,37 @@ * * Revision 1.3.1582.1 2005/06/24 01:47:25 lindak * Bringing over all of the Karma changes into chardonnay. - * + * * Revision 1.1.1.1 2005/02/24 21:48:06 akosut * Import xnu-764 from Tiger8A395 - * + * * Revision 1.3 2002/05/22 18:50:49 aramesh * Kernel API Cleanup * Bug #: 2853781 * Changes from Josh(networking), Rick(IOKit), Jim & David(osfmk), Umesh, Dan & Ramesh(BSD) * Submitted by: Ramesh * Reviewed by: Vincent - * + * * Revision 1.2.12.1 2002/05/21 23:08:14 aramesh * Kernel API Cleanup * Bug #: 2853781 * Submitted by: Josh, Umesh, Jim, Rick and Ramesh * Reviewed by: Vincent - * + * * Revision 1.2 2002/05/03 18:08:39 lindak * Merged PR-2909558 into Jaguar (siegmund POST WWDC: add support for NetBoot * over IOHDIXController) - * + * * Revision 1.1.2.1 2002/04/24 22:29:12 dieter * Bug #: 2909558 * - added IOHDIXController netboot stubs - * + * * Revision 1.2 2002/04/14 22:56:47 han * fixed up comment re dev_t - * + * * Revision 1.1 2002/04/13 19:22:28 han * added stub file DINetBookHook.c - * + * * */ @@ -78,7 +78,7 @@ #include -#ifdef __APPLE_API_PRIVATE +#ifdef __APPLE_API_PRIVATE #ifdef __cplusplus extern "C" { @@ -87,13 +87,13 @@ extern "C" { #include /* - Name: di_root_image - Function: mount the disk image returning the dev node - Parameters: path -> path/url to disk image - devname <- dev node used to set the rootdevice global variable - dev_p <- combination of major/minor node - Comments: -*/ + * Name: di_root_image + * Function: mount the disk image returning the dev node + * Parameters: path -> path/url to disk image + * devname <- dev node used to set the rootdevice global variable + * dev_p <- combination of major/minor node + * Comments: + */ int di_root_image(const char *path, char *devname, size_t devsz, dev_t *dev_p); void di_root_ramfile( IORegistryEntry * entry ); int di_root_ramfile_buf(void *buf, size_t bufsz, char *devname, size_t devsz, dev_t *dev_p); diff --git a/iokit/bsddev/IOKitBSDInit.cpp b/iokit/bsddev/IOKitBSDInit.cpp index 7c0e5e9a7..6ce81657a 100644 --- a/iokit/bsddev/IOKitBSDInit.cpp +++ b/iokit/bsddev/IOKitBSDInit.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1998-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -35,7 +35,6 @@ #include extern "C" { - #include #include #include @@ -59,7 +58,7 @@ extern int mdevgetrange(int devid, uint64_t *base, uint64_t *size); extern void di_root_ramfile(IORegistryEntry * entry); #if CONFIG_EMBEDDED -#define IOPOLLED_COREFILE (CONFIG_KDP_INTERACTIVE_DEBUGGING) +#define IOPOLLED_COREFILE (CONFIG_KDP_INTERACTIVE_DEBUGGING) #if defined(XNU_TARGET_OS_BRIDGE) @@ -78,20 +77,20 @@ extern void di_root_ramfile(IORegistryEntry * entry); #endif /* defined(XNU_TARGET_OS_BRIDGE) */ #elif DEVELOPMENT /* CONFIG_EMBEDDED */ -#define IOPOLLED_COREFILE 1 +#define IOPOLLED_COREFILE 1 // no sizing -#define kIOCoreDumpSize 0ULL -#define kIOCoreDumpFreeSize 0ULL +#define kIOCoreDumpSize 0ULL +#define kIOCoreDumpFreeSize 0ULL #else /* CONFIG_EMBEDDED */ -#define IOPOLLED_COREFILE 0 +#define IOPOLLED_COREFILE 0 #endif /* CONFIG_EMBEDDED */ #if IOPOLLED_COREFILE -static bool +static bool NewKernelCoreMedia(void * target, void * refCon, - IOService * newService, - IONotifier * notifier); + IOService * newService, + IONotifier * notifier); #endif /* IOPOLLED_COREFILE */ #if CONFIG_KDP_INTERACTIVE_DEBUGGING @@ -105,606 +104,656 @@ extern uint64_t kdp_core_ramdisk_size; kern_return_t IOKitBSDInit( void ) { - IOService::publishResource("IOBSD"); + IOService::publishResource("IOBSD"); - return( kIOReturnSuccess ); + return kIOReturnSuccess; } void IOServicePublishResource( const char * property, boolean_t value ) { - if ( value) - IOService::publishResource( property, kOSBooleanTrue ); - else - IOService::getResourceService()->removeProperty( property ); + if (value) { + IOService::publishResource( property, kOSBooleanTrue ); + } else { + IOService::getResourceService()->removeProperty( property ); + } } boolean_t IOServiceWaitForMatchingResource( const char * property, uint64_t timeout ) { - OSDictionary * dict = 0; - IOService * match = 0; - boolean_t found = false; - - do { - - dict = IOService::resourceMatching( property ); - if( !dict) - continue; - match = IOService::waitForMatchingService( dict, timeout ); - if ( match) - found = true; - - } while( false ); - - if( dict) - dict->release(); - if( match) - match->release(); - - return( found ); + OSDictionary * dict = 0; + IOService * match = 0; + boolean_t found = false; + + do { + dict = IOService::resourceMatching( property ); + if (!dict) { + continue; + } + match = IOService::waitForMatchingService( dict, timeout ); + if (match) { + found = true; + } + } while (false); + + if (dict) { + dict->release(); + } + if (match) { + match->release(); + } + + return found; } boolean_t IOCatalogueMatchingDriversPresent( const char * property ) { - OSDictionary * dict = 0; - OSOrderedSet * set = 0; - SInt32 generationCount = 0; - boolean_t found = false; - - do { - - dict = OSDictionary::withCapacity(1); - if( !dict) - continue; - dict->setObject( property, kOSBooleanTrue ); - set = gIOCatalogue->findDrivers( dict, &generationCount ); - if ( set && (set->getCount() > 0)) - found = true; - - } while( false ); - - if( dict) - dict->release(); - if( set) - set->release(); - - return( found ); -} + OSDictionary * dict = 0; + OSOrderedSet * set = 0; + SInt32 generationCount = 0; + boolean_t found = false; + + do { + dict = OSDictionary::withCapacity(1); + if (!dict) { + continue; + } + dict->setObject( property, kOSBooleanTrue ); + set = gIOCatalogue->findDrivers( dict, &generationCount ); + if (set && (set->getCount() > 0)) { + found = true; + } + } while (false); -OSDictionary * IOBSDNameMatching( const char * name ) -{ - OSDictionary * dict; - const OSSymbol * str = 0; + if (dict) { + dict->release(); + } + if (set) { + set->release(); + } - do { + return found; +} - dict = IOService::serviceMatching( gIOServiceKey ); - if( !dict) - continue; - str = OSSymbol::withCString( name ); - if( !str) - continue; - dict->setObject( kIOBSDNameKey, (OSObject *) str ); - str->release(); +OSDictionary * +IOBSDNameMatching( const char * name ) +{ + OSDictionary * dict; + const OSSymbol * str = 0; - return( dict ); + do { + dict = IOService::serviceMatching( gIOServiceKey ); + if (!dict) { + continue; + } + str = OSSymbol::withCString( name ); + if (!str) { + continue; + } + dict->setObject( kIOBSDNameKey, (OSObject *) str ); + str->release(); - } while( false ); + return dict; + } while (false); - if( dict) - dict->release(); - if( str) - str->release(); + if (dict) { + dict->release(); + } + if (str) { + str->release(); + } - return( 0 ); + return 0; } -OSDictionary * IOUUIDMatching( void ) +OSDictionary * +IOUUIDMatching( void ) { - return IOService::resourceMatching( "boot-uuid-media" ); + return IOService::resourceMatching( "boot-uuid-media" ); } -OSDictionary * IONetworkNamePrefixMatching( const char * prefix ) +OSDictionary * +IONetworkNamePrefixMatching( const char * prefix ) { - OSDictionary * matching; - OSDictionary * propDict = 0; - const OSSymbol * str = 0; + OSDictionary * matching; + OSDictionary * propDict = 0; + const OSSymbol * str = 0; char networkType[128]; - - do { - matching = IOService::serviceMatching( "IONetworkInterface" ); - if ( matching == 0 ) - continue; - propDict = OSDictionary::withCapacity(1); - if ( propDict == 0 ) - continue; + do { + matching = IOService::serviceMatching( "IONetworkInterface" ); + if (matching == 0) { + continue; + } - str = OSSymbol::withCString( prefix ); - if ( str == 0 ) - continue; + propDict = OSDictionary::withCapacity(1); + if (propDict == 0) { + continue; + } - propDict->setObject( "IOInterfaceNamePrefix", (OSObject *) str ); - str->release(); - str = 0; + str = OSSymbol::withCString( prefix ); + if (str == 0) { + continue; + } + + propDict->setObject( "IOInterfaceNamePrefix", (OSObject *) str ); + str->release(); + str = 0; // see if we're contrained to netroot off of specific network type - if(PE_parse_boot_argn( "network-type", networkType, 128 )) - { + if (PE_parse_boot_argn( "network-type", networkType, 128 )) { str = OSSymbol::withCString( networkType ); - if(str) - { + if (str) { propDict->setObject( "IONetworkRootType", str); str->release(); str = 0; } } - if ( matching->setObject( gIOPropertyMatchKey, - (OSObject *) propDict ) != true ) - continue; - - propDict->release(); - propDict = 0; + if (matching->setObject( gIOPropertyMatchKey, + (OSObject *) propDict ) != true) { + continue; + } - return( matching ); + propDict->release(); + propDict = 0; - } while ( false ); + return matching; + } while (false); - if ( matching ) matching->release(); - if ( propDict ) propDict->release(); - if ( str ) str->release(); + if (matching) { + matching->release(); + } + if (propDict) { + propDict->release(); + } + if (str) { + str->release(); + } - return( 0 ); + return 0; } -static bool IORegisterNetworkInterface( IOService * netif ) +static bool +IORegisterNetworkInterface( IOService * netif ) { - // A network interface is typically named and registered - // with BSD after receiving a request from a user space - // "namer". However, for cases when the system needs to - // root from the network, this registration task must be - // done inside the kernel and completed before the root - // device is handed to BSD. - - IOService * stack; - OSNumber * zero = 0; - OSString * path = 0; - OSDictionary * dict = 0; - char * pathBuf = 0; - int len; - enum { kMaxPathLen = 512 }; - - do { - stack = IOService::waitForService( - IOService::serviceMatching("IONetworkStack") ); - if ( stack == 0 ) break; - - dict = OSDictionary::withCapacity(3); - if ( dict == 0 ) break; - - zero = OSNumber::withNumber((UInt64) 0, 32); - if ( zero == 0 ) break; - - pathBuf = (char *) IOMalloc( kMaxPathLen ); - if ( pathBuf == 0 ) break; - - len = kMaxPathLen; - if ( netif->getPath( pathBuf, &len, gIOServicePlane ) - == false ) break; - - path = OSString::withCStringNoCopy( pathBuf ); - if ( path == 0 ) break; - - dict->setObject( "IOInterfaceUnit", zero ); - dict->setObject( kIOPathMatchKey, path ); - - stack->setProperties( dict ); - } - while ( false ); - - if ( zero ) zero->release(); - if ( path ) path->release(); - if ( dict ) dict->release(); - if ( pathBuf ) IOFree(pathBuf, kMaxPathLen); - - return ( netif->getProperty( kIOBSDNameKey ) != 0 ); -} + // A network interface is typically named and registered + // with BSD after receiving a request from a user space + // "namer". However, for cases when the system needs to + // root from the network, this registration task must be + // done inside the kernel and completed before the root + // device is handed to BSD. + + IOService * stack; + OSNumber * zero = 0; + OSString * path = 0; + OSDictionary * dict = 0; + char * pathBuf = 0; + int len; + enum { kMaxPathLen = 512 }; + + do { + stack = IOService::waitForService( + IOService::serviceMatching("IONetworkStack")); + if (stack == 0) { + break; + } -OSDictionary * IOOFPathMatching( const char * path, char * buf, int maxLen ) -{ - OSDictionary * matching = NULL; - OSString * str; - char * comp; - int len; + dict = OSDictionary::withCapacity(3); + if (dict == 0) { + break; + } + + zero = OSNumber::withNumber((UInt64) 0, 32); + if (zero == 0) { + break; + } - do { + pathBuf = (char *) IOMalloc( kMaxPathLen ); + if (pathBuf == 0) { + break; + } + + len = kMaxPathLen; + if (netif->getPath( pathBuf, &len, gIOServicePlane ) + == false) { + break; + } + + path = OSString::withCStringNoCopy( pathBuf ); + if (path == 0) { + break; + } + + dict->setObject( "IOInterfaceUnit", zero ); + dict->setObject( kIOPathMatchKey, path ); + + stack->setProperties( dict ); + }while (false); + + if (zero) { + zero->release(); + } + if (path) { + path->release(); + } + if (dict) { + dict->release(); + } + if (pathBuf) { + IOFree(pathBuf, kMaxPathLen); + } - len = strlen( kIODeviceTreePlane ":" ); - maxLen -= len; - if( maxLen <= 0) - continue; + return netif->getProperty( kIOBSDNameKey ) != 0; +} - strlcpy( buf, kIODeviceTreePlane ":", len + 1 ); - comp = buf + len; +OSDictionary * +IOOFPathMatching( const char * path, char * buf, int maxLen ) +{ + OSDictionary * matching = NULL; + OSString * str; + char * comp; + int len; + + do { + len = strlen( kIODeviceTreePlane ":" ); + maxLen -= len; + if (maxLen <= 0) { + continue; + } - len = strlen( path ); - maxLen -= len; - if( maxLen <= 0) - continue; - strlcpy( comp, path, len + 1 ); + strlcpy( buf, kIODeviceTreePlane ":", len + 1 ); + comp = buf + len; - matching = OSDictionary::withCapacity( 1 ); - if( !matching) - continue; + len = strlen( path ); + maxLen -= len; + if (maxLen <= 0) { + continue; + } + strlcpy( comp, path, len + 1 ); - str = OSString::withCString( buf ); - if( !str) - continue; - matching->setObject( kIOPathMatchKey, str ); - str->release(); + matching = OSDictionary::withCapacity( 1 ); + if (!matching) { + continue; + } - return( matching ); + str = OSString::withCString( buf ); + if (!str) { + continue; + } + matching->setObject( kIOPathMatchKey, str ); + str->release(); - } while( false ); + return matching; + } while (false); - if( matching) - matching->release(); + if (matching) { + matching->release(); + } - return( 0 ); + return 0; } static int didRam = 0; enum { kMaxPathBuf = 512, kMaxBootVar = 128 }; -kern_return_t IOFindBSDRoot( char * rootName, unsigned int rootNameSize, - dev_t * root, u_int32_t * oflags ) +kern_return_t +IOFindBSDRoot( char * rootName, unsigned int rootNameSize, + dev_t * root, u_int32_t * oflags ) { - mach_timespec_t t; - IOService * service; - IORegistryEntry * regEntry; - OSDictionary * matching = 0; - OSString * iostr; - OSNumber * off; - OSData * data = 0; - - UInt32 flags = 0; - int mnr, mjr; - const char * mediaProperty = 0; - char * rdBootVar; - char * str; - const char * look = 0; - int len; - bool debugInfoPrintedOnce = false; - const char * uuidStr = NULL; - - static int mountAttempts = 0; - - int xchar, dchar; - - // stall here for anyone matching on the IOBSD resource to finish (filesystems) - matching = IOService::serviceMatching(gIOResourcesKey); - assert(matching); - matching->setObject(gIOResourceMatchedKey, gIOBSDKey); + mach_timespec_t t; + IOService * service; + IORegistryEntry * regEntry; + OSDictionary * matching = 0; + OSString * iostr; + OSNumber * off; + OSData * data = 0; + + UInt32 flags = 0; + int mnr, mjr; + const char * mediaProperty = 0; + char * rdBootVar; + char * str; + const char * look = 0; + int len; + bool debugInfoPrintedOnce = false; + const char * uuidStr = NULL; + + static int mountAttempts = 0; + + int xchar, dchar; + + // stall here for anyone matching on the IOBSD resource to finish (filesystems) + matching = IOService::serviceMatching(gIOResourcesKey); + assert(matching); + matching->setObject(gIOResourceMatchedKey, gIOBSDKey); if ((service = IOService::waitForMatchingService(matching, 30ULL * kSecondScale))) { service->release(); } else { IOLog("!BSD\n"); } - matching->release(); + matching->release(); matching = NULL; - if( mountAttempts++) - { - IOLog("mount(%d) failed\n", mountAttempts); - IOSleep( 5 * 1000 ); - } - - str = (char *) IOMalloc( kMaxPathBuf + kMaxBootVar ); - if( !str) - return( kIOReturnNoMemory ); - rdBootVar = str + kMaxPathBuf; - - if (!PE_parse_boot_argn("rd", rdBootVar, kMaxBootVar ) - && !PE_parse_boot_argn("rootdev", rdBootVar, kMaxBootVar )) - rdBootVar[0] = 0; - - do { - if( (regEntry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ))) { - di_root_ramfile(regEntry); - data = OSDynamicCast(OSData, regEntry->getProperty( "root-matching" )); - if (data) { - matching = OSDynamicCast(OSDictionary, OSUnserializeXML((char *)data->getBytesNoCopy())); - if (matching) { - continue; - } - } - - data = (OSData *) regEntry->getProperty( "boot-uuid" ); - if( data) { - uuidStr = (const char*)data->getBytesNoCopy(); - OSString *uuidString = OSString::withCString( uuidStr ); - - // match the boot-args boot-uuid processing below - if( uuidString) { - IOLog("rooting via boot-uuid from /chosen: %s\n", uuidStr); - IOService::publishResource( "boot-uuid", uuidString ); - uuidString->release(); - matching = IOUUIDMatching(); - mediaProperty = "boot-uuid-media"; - regEntry->release(); - continue; - } else { - uuidStr = NULL; - } - } - regEntry->release(); + if (mountAttempts++) { + IOLog("mount(%d) failed\n", mountAttempts); + IOSleep( 5 * 1000 ); + } + + str = (char *) IOMalloc( kMaxPathBuf + kMaxBootVar ); + if (!str) { + return kIOReturnNoMemory; + } + rdBootVar = str + kMaxPathBuf; + + if (!PE_parse_boot_argn("rd", rdBootVar, kMaxBootVar ) + && !PE_parse_boot_argn("rootdev", rdBootVar, kMaxBootVar )) { + rdBootVar[0] = 0; } - } while( false ); + + do { + if ((regEntry = IORegistryEntry::fromPath( "/chosen", gIODTPlane ))) { + di_root_ramfile(regEntry); + data = OSDynamicCast(OSData, regEntry->getProperty( "root-matching" )); + if (data) { + matching = OSDynamicCast(OSDictionary, OSUnserializeXML((char *)data->getBytesNoCopy())); + if (matching) { + continue; + } + } + + data = (OSData *) regEntry->getProperty( "boot-uuid" ); + if (data) { + uuidStr = (const char*)data->getBytesNoCopy(); + OSString *uuidString = OSString::withCString( uuidStr ); + + // match the boot-args boot-uuid processing below + if (uuidString) { + IOLog("rooting via boot-uuid from /chosen: %s\n", uuidStr); + IOService::publishResource( "boot-uuid", uuidString ); + uuidString->release(); + matching = IOUUIDMatching(); + mediaProperty = "boot-uuid-media"; + regEntry->release(); + continue; + } else { + uuidStr = NULL; + } + } + regEntry->release(); + } + } while (false); // // See if we have a RAMDisk property in /chosen/memory-map. If so, make it into a device. -// It will become /dev/mdx, where x is 0-f. +// It will become /dev/mdx, where x is 0-f. // - if(!didRam) { /* Have we already build this ram disk? */ - didRam = 1; /* Remember we did this */ - if((regEntry = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ))) { /* Find the map node */ - data = (OSData *)regEntry->getProperty("RAMDisk"); /* Find the ram disk, if there */ - if(data) { /* We found one */ + if (!didRam) { /* Have we already build this ram disk? */ + didRam = 1; /* Remember we did this */ + if ((regEntry = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ))) { /* Find the map node */ + data = (OSData *)regEntry->getProperty("RAMDisk"); /* Find the ram disk, if there */ + if (data) { /* We found one */ uintptr_t *ramdParms; - ramdParms = (uintptr_t *)data->getBytesNoCopy(); /* Point to the ram disk base and size */ - (void)mdevadd(-1, ml_static_ptovirt(ramdParms[0]) >> 12, ramdParms[1] >> 12, 0); /* Initialize it and pass back the device number */ + ramdParms = (uintptr_t *)data->getBytesNoCopy(); /* Point to the ram disk base and size */ + (void)mdevadd(-1, ml_static_ptovirt(ramdParms[0]) >> 12, ramdParms[1] >> 12, 0); /* Initialize it and pass back the device number */ } - regEntry->release(); /* Toss the entry */ + regEntry->release(); /* Toss the entry */ } } - + // // Now check if we are trying to root on a memory device // - if((rdBootVar[0] == 'm') && (rdBootVar[1] == 'd') && (rdBootVar[3] == 0)) { - dchar = xchar = rdBootVar[2]; /* Get the actual device */ - if((xchar >= '0') && (xchar <= '9')) xchar = xchar - '0'; /* If digit, convert */ - else { - xchar = xchar & ~' '; /* Fold to upper case */ - if((xchar >= 'A') && (xchar <= 'F')) { /* Is this a valid digit? */ - xchar = (xchar & 0xF) + 9; /* Convert the hex digit */ - dchar = dchar | ' '; /* Fold to lower case */ + if ((rdBootVar[0] == 'm') && (rdBootVar[1] == 'd') && (rdBootVar[3] == 0)) { + dchar = xchar = rdBootVar[2]; /* Get the actual device */ + if ((xchar >= '0') && (xchar <= '9')) { + xchar = xchar - '0'; /* If digit, convert */ + } else { + xchar = xchar & ~' '; /* Fold to upper case */ + if ((xchar >= 'A') && (xchar <= 'F')) { /* Is this a valid digit? */ + xchar = (xchar & 0xF) + 9; /* Convert the hex digit */ + dchar = dchar | ' '; /* Fold to lower case */ + } else { + xchar = -1; /* Show bogus */ } - else xchar = -1; /* Show bogus */ } - if(xchar >= 0) { /* Do we have a valid memory device name? */ - *root = mdevlookup(xchar); /* Find the device number */ - if(*root >= 0) { /* Did we find one? */ - rootName[0] = 'm'; /* Build root name */ - rootName[1] = 'd'; /* Build root name */ - rootName[2] = dchar; /* Build root name */ - rootName[3] = 0; /* Build root name */ + if (xchar >= 0) { /* Do we have a valid memory device name? */ + *root = mdevlookup(xchar); /* Find the device number */ + if (*root >= 0) { /* Did we find one? */ + rootName[0] = 'm'; /* Build root name */ + rootName[1] = 'd'; /* Build root name */ + rootName[2] = dchar; /* Build root name */ + rootName[3] = 0; /* Build root name */ IOLog("BSD root: %s, major %d, minor %d\n", rootName, major(*root), minor(*root)); - *oflags = 0; /* Show that this is not network */ + *oflags = 0; /* Show that this is not network */ #if CONFIG_KDP_INTERACTIVE_DEBUGGING - /* retrieve final ramdisk range and initialize KDP variables */ - if (mdevgetrange(xchar, &kdp_core_ramdisk_addr, &kdp_core_ramdisk_size) != 0) { - IOLog("Unable to retrieve range for root memory device %d\n", xchar); - kdp_core_ramdisk_addr = 0; - kdp_core_ramdisk_size = 0; - } + /* retrieve final ramdisk range and initialize KDP variables */ + if (mdevgetrange(xchar, &kdp_core_ramdisk_addr, &kdp_core_ramdisk_size) != 0) { + IOLog("Unable to retrieve range for root memory device %d\n", xchar); + kdp_core_ramdisk_addr = 0; + kdp_core_ramdisk_size = 0; + } #endif - goto iofrootx; /* Join common exit... */ + goto iofrootx; /* Join common exit... */ } - panic("IOFindBSDRoot: specified root memory device, %s, has not been configured\n", rdBootVar); /* Not there */ + panic("IOFindBSDRoot: specified root memory device, %s, has not been configured\n", rdBootVar); /* Not there */ } } - if( (!matching) && rdBootVar[0] ) { - // by BSD name - look = rdBootVar; - if( look[0] == '*') - look++; - - if ( strncmp( look, "en", strlen( "en" )) == 0 ) { - matching = IONetworkNamePrefixMatching( "en" ); - } else if ( strncmp( look, "uuid", strlen( "uuid" )) == 0 ) { - char *uuid; - OSString *uuidString; - - uuid = (char *)IOMalloc( kMaxBootVar ); - - if ( uuid ) { - if (!PE_parse_boot_argn( "boot-uuid", uuid, kMaxBootVar )) { - panic( "rd=uuid but no boot-uuid= specified" ); - } - uuidString = OSString::withCString( uuid ); - if ( uuidString ) { - IOService::publishResource( "boot-uuid", uuidString ); - uuidString->release(); - IOLog( "\nWaiting for boot volume with UUID %s\n", uuid ); - matching = IOUUIDMatching(); - mediaProperty = "boot-uuid-media"; - } - IOFree( uuid, kMaxBootVar ); - } - } else { - matching = IOBSDNameMatching( look ); + if ((!matching) && rdBootVar[0]) { + // by BSD name + look = rdBootVar; + if (look[0] == '*') { + look++; + } + + if (strncmp( look, "en", strlen( "en" )) == 0) { + matching = IONetworkNamePrefixMatching( "en" ); + } else if (strncmp( look, "uuid", strlen( "uuid" )) == 0) { + char *uuid; + OSString *uuidString; + + uuid = (char *)IOMalloc( kMaxBootVar ); + + if (uuid) { + if (!PE_parse_boot_argn( "boot-uuid", uuid, kMaxBootVar )) { + panic( "rd=uuid but no boot-uuid= specified" ); + } + uuidString = OSString::withCString( uuid ); + if (uuidString) { + IOService::publishResource( "boot-uuid", uuidString ); + uuidString->release(); + IOLog( "\nWaiting for boot volume with UUID %s\n", uuid ); + matching = IOUUIDMatching(); + mediaProperty = "boot-uuid-media"; + } + IOFree( uuid, kMaxBootVar ); + } + } else { + matching = IOBSDNameMatching( look ); + } + } + + if (!matching) { + OSString * astring; + // Match any HFS media + + matching = IOService::serviceMatching( "IOMedia" ); + astring = OSString::withCStringNoCopy("Apple_HFS"); + if (astring) { + matching->setObject("Content", astring); + astring->release(); + } } - } - - if( !matching) { - OSString * astring; - // Match any HFS media - - matching = IOService::serviceMatching( "IOMedia" ); - astring = OSString::withCStringNoCopy("Apple_HFS"); - if ( astring ) { - matching->setObject("Content", astring); - astring->release(); - } - } - - if( gIOKitDebug & kIOWaitQuietBeforeRoot ) { - IOLog( "Waiting for matching to complete\n" ); - IOService::getPlatform()->waitQuiet(); - } - - if( true && matching) { - OSSerialize * s = OSSerialize::withCapacity( 5 ); - - if( matching->serialize( s )) { - IOLog( "Waiting on %s\n", s->text() ); - s->release(); - } - } - - do { - t.tv_sec = ROOTDEVICETIMEOUT; - t.tv_nsec = 0; - matching->retain(); - service = IOService::waitForService( matching, &t ); - if( (!service) || (mountAttempts == 10)) { - PE_display_icon( 0, "noroot"); - IOLog( "Still waiting for root device\n" ); - - if( !debugInfoPrintedOnce) { - debugInfoPrintedOnce = true; - if( gIOKitDebug & kIOLogDTree) { - IOLog("\nDT plane:\n"); - IOPrintPlane( gIODTPlane ); - } - if( gIOKitDebug & kIOLogServiceTree) { - IOLog("\nService plane:\n"); - IOPrintPlane( gIOServicePlane ); - } - if( gIOKitDebug & kIOLogMemory) - IOPrintMemory(); - } + + if (gIOKitDebug & kIOWaitQuietBeforeRoot) { + IOLog( "Waiting for matching to complete\n" ); + IOService::getPlatform()->waitQuiet(); } - } while( !service); - matching->release(); - if ( service && mediaProperty ) { - service = (IOService *)service->getProperty(mediaProperty); - } + if (true && matching) { + OSSerialize * s = OSSerialize::withCapacity( 5 ); - mjr = 0; - mnr = 0; + if (matching->serialize( s )) { + IOLog( "Waiting on %s\n", s->text()); + s->release(); + } + } - // If the IOService we matched to is a subclass of IONetworkInterface, - // then make sure it has been registered with BSD and has a BSD name - // assigned. + do { + t.tv_sec = ROOTDEVICETIMEOUT; + t.tv_nsec = 0; + matching->retain(); + service = IOService::waitForService( matching, &t ); + if ((!service) || (mountAttempts == 10)) { + PE_display_icon( 0, "noroot"); + IOLog( "Still waiting for root device\n" ); + + if (!debugInfoPrintedOnce) { + debugInfoPrintedOnce = true; + if (gIOKitDebug & kIOLogDTree) { + IOLog("\nDT plane:\n"); + IOPrintPlane( gIODTPlane ); + } + if (gIOKitDebug & kIOLogServiceTree) { + IOLog("\nService plane:\n"); + IOPrintPlane( gIOServicePlane ); + } + if (gIOKitDebug & kIOLogMemory) { + IOPrintMemory(); + } + } + } + } while (!service); + matching->release(); - if ( service - && service->metaCast( "IONetworkInterface" ) - && !IORegisterNetworkInterface( service ) ) - { - service = 0; - } + if (service && mediaProperty) { + service = (IOService *)service->getProperty(mediaProperty); + } - if( service) { + mjr = 0; + mnr = 0; - len = kMaxPathBuf; - service->getPath( str, &len, gIOServicePlane ); - IOLog( "Got boot device = %s\n", str ); + // If the IOService we matched to is a subclass of IONetworkInterface, + // then make sure it has been registered with BSD and has a BSD name + // assigned. - iostr = (OSString *) service->getProperty( kIOBSDNameKey ); - if( iostr) - strlcpy( rootName, iostr->getCStringNoCopy(), rootNameSize ); - off = (OSNumber *) service->getProperty( kIOBSDMajorKey ); - if( off) - mjr = off->unsigned32BitValue(); - off = (OSNumber *) service->getProperty( kIOBSDMinorKey ); - if( off) - mnr = off->unsigned32BitValue(); + if (service + && service->metaCast( "IONetworkInterface" ) + && !IORegisterNetworkInterface( service )) { + service = 0; + } - if( service->metaCast( "IONetworkInterface" )) - flags |= 1; + if (service) { + len = kMaxPathBuf; + service->getPath( str, &len, gIOServicePlane ); + IOLog( "Got boot device = %s\n", str ); - } else { + iostr = (OSString *) service->getProperty( kIOBSDNameKey ); + if (iostr) { + strlcpy( rootName, iostr->getCStringNoCopy(), rootNameSize ); + } + off = (OSNumber *) service->getProperty( kIOBSDMajorKey ); + if (off) { + mjr = off->unsigned32BitValue(); + } + off = (OSNumber *) service->getProperty( kIOBSDMinorKey ); + if (off) { + mnr = off->unsigned32BitValue(); + } - IOLog( "Wait for root failed\n" ); - strlcpy( rootName, "en0", rootNameSize ); - flags |= 1; - } + if (service->metaCast( "IONetworkInterface" )) { + flags |= 1; + } + } else { + IOLog( "Wait for root failed\n" ); + strlcpy( rootName, "en0", rootNameSize ); + flags |= 1; + } - IOLog( "BSD root: %s", rootName ); - if( mjr) - IOLog(", major %d, minor %d\n", mjr, mnr ); - else - IOLog("\n"); + IOLog( "BSD root: %s", rootName ); + if (mjr) { + IOLog(", major %d, minor %d\n", mjr, mnr ); + } else { + IOLog("\n"); + } - *root = makedev( mjr, mnr ); - *oflags = flags; + *root = makedev( mjr, mnr ); + *oflags = flags; - IOFree( str, kMaxPathBuf + kMaxBootVar ); + IOFree( str, kMaxPathBuf + kMaxBootVar ); iofrootx: - if( (gIOKitDebug & (kIOLogDTree | kIOLogServiceTree | kIOLogMemory)) && !debugInfoPrintedOnce) { - - IOService::getPlatform()->waitQuiet(); - if( gIOKitDebug & kIOLogDTree) { - IOLog("\nDT plane:\n"); - IOPrintPlane( gIODTPlane ); - } - if( gIOKitDebug & kIOLogServiceTree) { - IOLog("\nService plane:\n"); - IOPrintPlane( gIOServicePlane ); - } - if( gIOKitDebug & kIOLogMemory) - IOPrintMemory(); - } - - return( kIOReturnSuccess ); + if ((gIOKitDebug & (kIOLogDTree | kIOLogServiceTree | kIOLogMemory)) && !debugInfoPrintedOnce) { + IOService::getPlatform()->waitQuiet(); + if (gIOKitDebug & kIOLogDTree) { + IOLog("\nDT plane:\n"); + IOPrintPlane( gIODTPlane ); + } + if (gIOKitDebug & kIOLogServiceTree) { + IOLog("\nService plane:\n"); + IOPrintPlane( gIOServicePlane ); + } + if (gIOKitDebug & kIOLogMemory) { + IOPrintMemory(); + } + } + + return kIOReturnSuccess; } -bool IORamDiskBSDRoot(void) +bool +IORamDiskBSDRoot(void) { - char rdBootVar[kMaxBootVar]; - if (PE_parse_boot_argn("rd", rdBootVar, kMaxBootVar ) - || PE_parse_boot_argn("rootdev", rdBootVar, kMaxBootVar )) { - if((rdBootVar[0] == 'm') && (rdBootVar[1] == 'd') && (rdBootVar[3] == 0)) { - return true; - } - } - return false; + char rdBootVar[kMaxBootVar]; + if (PE_parse_boot_argn("rd", rdBootVar, kMaxBootVar ) + || PE_parse_boot_argn("rootdev", rdBootVar, kMaxBootVar )) { + if ((rdBootVar[0] == 'm') && (rdBootVar[1] == 'd') && (rdBootVar[3] == 0)) { + return true; + } + } + return false; } -void IOSecureBSDRoot(const char * rootName) +void +IOSecureBSDRoot(const char * rootName) { #if CONFIG_EMBEDDED - int tmpInt; - IOReturn result; - IOPlatformExpert *pe; - OSDictionary *matching; - const OSSymbol *functionName = OSSymbol::withCStringNoCopy("SecureRootName"); - - matching = IOService::serviceMatching("IOPlatformExpert"); - assert(matching); - pe = (IOPlatformExpert *) IOService::waitForMatchingService(matching, 30ULL * kSecondScale); - matching->release(); - assert(pe); - // Returns kIOReturnNotPrivileged is the root device is not secure. - // Returns kIOReturnUnsupported if "SecureRootName" is not implemented. - result = pe->callPlatformFunction(functionName, false, (void *)rootName, (void *)0, (void *)0, (void *)0); - functionName->release(); - OSSafeReleaseNULL(pe); - - if (result == kIOReturnNotPrivileged) { - mdevremoveall(); - } else if (result == kIOReturnSuccess) { - // If we are booting with a secure root, and we have the right - // boot-arg, we will want to panic on exception triage. This - // behavior is intended as a debug aid (we can look at why an - // exception occured in the kernel debugger). - if (PE_parse_boot_argn("-panic_on_exception_triage", &tmpInt, sizeof(tmpInt))) { - panic_on_exception_triage = 1; - } - } + int tmpInt; + IOReturn result; + IOPlatformExpert *pe; + OSDictionary *matching; + const OSSymbol *functionName = OSSymbol::withCStringNoCopy("SecureRootName"); + + matching = IOService::serviceMatching("IOPlatformExpert"); + assert(matching); + pe = (IOPlatformExpert *) IOService::waitForMatchingService(matching, 30ULL * kSecondScale); + matching->release(); + assert(pe); + // Returns kIOReturnNotPrivileged is the root device is not secure. + // Returns kIOReturnUnsupported if "SecureRootName" is not implemented. + result = pe->callPlatformFunction(functionName, false, (void *)rootName, (void *)0, (void *)0, (void *)0); + functionName->release(); + OSSafeReleaseNULL(pe); + + if (result == kIOReturnNotPrivileged) { + mdevremoveall(); + } else if (result == kIOReturnSuccess) { + // If we are booting with a secure root, and we have the right + // boot-arg, we will want to panic on exception triage. This + // behavior is intended as a debug aid (we can look at why an + // exception occured in the kernel debugger). + if (PE_parse_boot_argn("-panic_on_exception_triage", &tmpInt, sizeof(tmpInt))) { + panic_on_exception_triage = 1; + } + } #endif // CONFIG_EMBEDDED } @@ -712,50 +761,55 @@ void IOSecureBSDRoot(const char * rootName) void * IOBSDRegistryEntryForDeviceTree(char * path) { - return (IORegistryEntry::fromPath(path, gIODTPlane)); + return IORegistryEntry::fromPath(path, gIODTPlane); } void IOBSDRegistryEntryRelease(void * entry) { - IORegistryEntry * regEntry = (IORegistryEntry *)entry; + IORegistryEntry * regEntry = (IORegistryEntry *)entry; - if (regEntry) - regEntry->release(); - return; + if (regEntry) { + regEntry->release(); + } + return; } const void * -IOBSDRegistryEntryGetData(void * entry, char * property_name, - int * packet_length) +IOBSDRegistryEntryGetData(void * entry, char * property_name, + int * packet_length) { - OSData * data; - IORegistryEntry * regEntry = (IORegistryEntry *)entry; - - data = (OSData *) regEntry->getProperty(property_name); - if (data) { - *packet_length = data->getLength(); - return (data->getBytesNoCopy()); - } - return (NULL); + OSData * data; + IORegistryEntry * regEntry = (IORegistryEntry *)entry; + + data = (OSData *) regEntry->getProperty(property_name); + if (data) { + *packet_length = data->getLength(); + return data->getBytesNoCopy(); + } + return NULL; } -kern_return_t IOBSDGetPlatformUUID( uuid_t uuid, mach_timespec_t timeout ) +kern_return_t +IOBSDGetPlatformUUID( uuid_t uuid, mach_timespec_t timeout ) { - IOService * resources; - OSString * string; + IOService * resources; + OSString * string; - resources = IOService::waitForService( IOService::resourceMatching( kIOPlatformUUIDKey ), ( timeout.tv_sec || timeout.tv_nsec ) ? &timeout : 0 ); - if ( resources == 0 ) return KERN_OPERATION_TIMED_OUT; + resources = IOService::waitForService( IOService::resourceMatching( kIOPlatformUUIDKey ), (timeout.tv_sec || timeout.tv_nsec) ? &timeout : 0 ); + if (resources == 0) { + return KERN_OPERATION_TIMED_OUT; + } - string = ( OSString * ) IOService::getPlatform( )->getProvider( )->getProperty( kIOPlatformUUIDKey ); - if ( string == 0 ) return KERN_NOT_SUPPORTED; + string = (OSString *) IOService::getPlatform()->getProvider()->getProperty( kIOPlatformUUIDKey ); + if (string == 0) { + return KERN_NOT_SUPPORTED; + } - uuid_parse( string->getCStringNoCopy( ), uuid ); + uuid_parse( string->getCStringNoCopy(), uuid ); - return KERN_SUCCESS; + return KERN_SUCCESS; } - } /* extern "C" */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -771,264 +825,285 @@ IOPolledFileIOVars * gIOPolledCoreFileVars; kern_return_t gIOPolledCoreFileOpenRet = kIOReturnNotReady; #if IOPOLLED_COREFILE -static IOReturn +static IOReturn IOOpenPolledCoreFile(const char * filename) { - IOReturn err; - unsigned int debug; - uint64_t corefile_size_bytes = 0; + IOReturn err; + unsigned int debug; + uint64_t corefile_size_bytes = 0; - if (gIOPolledCoreFileVars) return (kIOReturnBusy); - if (!IOPolledInterface::gMetaClass.getInstanceCount()) return (kIOReturnUnsupported); + if (gIOPolledCoreFileVars) { + return kIOReturnBusy; + } + if (!IOPolledInterface::gMetaClass.getInstanceCount()) { + return kIOReturnUnsupported; + } - debug = 0; - PE_parse_boot_argn("debug", &debug, sizeof (debug)); - if (DB_DISABLE_LOCAL_CORE & debug) return (kIOReturnUnsupported); + debug = 0; + PE_parse_boot_argn("debug", &debug, sizeof(debug)); + if (DB_DISABLE_LOCAL_CORE & debug) { + return kIOReturnUnsupported; + } #if CONFIG_EMBEDDED - unsigned int requested_corefile_size = 0; - if (PE_parse_boot_argn("corefile_size_mb", &requested_corefile_size, sizeof(requested_corefile_size))) { - IOLog("Boot-args specify %d MB kernel corefile\n", requested_corefile_size); + unsigned int requested_corefile_size = 0; + if (PE_parse_boot_argn("corefile_size_mb", &requested_corefile_size, sizeof(requested_corefile_size))) { + IOLog("Boot-args specify %d MB kernel corefile\n", requested_corefile_size); - corefile_size_bytes = (requested_corefile_size * 1024ULL * 1024ULL); - } + corefile_size_bytes = (requested_corefile_size * 1024ULL * 1024ULL); + } #endif - do { + do { #if defined(kIOCoreDumpLargeSize) - if (0 == corefile_size_bytes) - { - // If no custom size was requested and we're on a device with >3GB of DRAM, attempt - // to allocate a large corefile otherwise use a small file. - if (max_mem > (3 * 1024ULL * 1024ULL * 1024ULL)) - { - corefile_size_bytes = kIOCoreDumpLargeSize; - err = IOPolledFileOpen(filename, - kIOPolledFileCreate, - corefile_size_bytes, kIOCoreDumpFreeSize, - NULL, 0, - &gIOPolledCoreFileVars, NULL, NULL, 0); - if (kIOReturnSuccess == err) - { - break; - } - else if (kIOReturnNoSpace == err) - { - IOLog("Failed to open corefile of size %llu MB (low disk space)", - (corefile_size_bytes / (1024ULL * 1024ULL))); - if (corefile_size_bytes == kIOCoreDumpMinSize) - { - gIOPolledCoreFileOpenRet = err; - return (err); - } - // Try to open a smaller corefile (set size and fall-through) - corefile_size_bytes = kIOCoreDumpMinSize; - } - else - { - IOLog("Failed to open corefile of size %llu MB (returned error 0x%x)\n", - (corefile_size_bytes / (1024ULL * 1024ULL)), err); - gIOPolledCoreFileOpenRet = err; - return (err); - } - } - else - { - corefile_size_bytes = kIOCoreDumpMinSize; - } - } + if (0 == corefile_size_bytes) { + // If no custom size was requested and we're on a device with >3GB of DRAM, attempt + // to allocate a large corefile otherwise use a small file. + if (max_mem > (3 * 1024ULL * 1024ULL * 1024ULL)) { + corefile_size_bytes = kIOCoreDumpLargeSize; + err = IOPolledFileOpen(filename, + kIOPolledFileCreate, + corefile_size_bytes, kIOCoreDumpFreeSize, + NULL, 0, + &gIOPolledCoreFileVars, NULL, NULL, 0); + if (kIOReturnSuccess == err) { + break; + } else if (kIOReturnNoSpace == err) { + IOLog("Failed to open corefile of size %llu MB (low disk space)", + (corefile_size_bytes / (1024ULL * 1024ULL))); + if (corefile_size_bytes == kIOCoreDumpMinSize) { + gIOPolledCoreFileOpenRet = err; + return err; + } + // Try to open a smaller corefile (set size and fall-through) + corefile_size_bytes = kIOCoreDumpMinSize; + } else { + IOLog("Failed to open corefile of size %llu MB (returned error 0x%x)\n", + (corefile_size_bytes / (1024ULL * 1024ULL)), err); + gIOPolledCoreFileOpenRet = err; + return err; + } + } else { + corefile_size_bytes = kIOCoreDumpMinSize; + } + } #else /* defined(kIOCoreDumpLargeSize) */ - if (0 == corefile_size_bytes) - { - corefile_size_bytes = kIOCoreDumpSize; - } + if (0 == corefile_size_bytes) { + corefile_size_bytes = kIOCoreDumpSize; + } #endif /* defined(kIOCoreDumpLargeSize) */ - err = IOPolledFileOpen(filename, - kIOPolledFileCreate, - corefile_size_bytes, kIOCoreDumpFreeSize, - NULL, 0, - &gIOPolledCoreFileVars, NULL, NULL, 0); - if (kIOReturnSuccess != err) - { - IOLog("Failed to open corefile of size %llu MB (returned error 0x%x)\n", - (corefile_size_bytes / (1024ULL * 1024ULL)), err); - gIOPolledCoreFileOpenRet = err; - return (err); - } - } while (false); - - err = IOPolledFilePollersSetup(gIOPolledCoreFileVars, kIOPolledPreflightCoreDumpState); - if (kIOReturnSuccess != err) - { - IOPolledFileClose(&gIOPolledCoreFileVars, NULL, NULL, 0, 0, 0); - IOLog("IOPolledFilePollersSetup for corefile failed with error: 0x%x\n", err); - gIOPolledCoreFileOpenRet = err; - } - else - { - IOLog("Opened corefile of size %llu MB\n", (corefile_size_bytes / (1024ULL * 1024ULL))); - } - - return (err); + err = IOPolledFileOpen(filename, + kIOPolledFileCreate, + corefile_size_bytes, kIOCoreDumpFreeSize, + NULL, 0, + &gIOPolledCoreFileVars, NULL, NULL, 0); + if (kIOReturnSuccess != err) { + IOLog("Failed to open corefile of size %llu MB (returned error 0x%x)\n", + (corefile_size_bytes / (1024ULL * 1024ULL)), err); + gIOPolledCoreFileOpenRet = err; + return err; + } + } while (false); + + err = IOPolledFilePollersSetup(gIOPolledCoreFileVars, kIOPolledPreflightCoreDumpState); + if (kIOReturnSuccess != err) { + IOPolledFileClose(&gIOPolledCoreFileVars, NULL, NULL, 0, 0, 0); + IOLog("IOPolledFilePollersSetup for corefile failed with error: 0x%x\n", err); + gIOPolledCoreFileOpenRet = err; + } else { + IOLog("Opened corefile of size %llu MB\n", (corefile_size_bytes / (1024ULL * 1024ULL))); + } + + return err; } -static void +static void IOClosePolledCoreFile(void) { - gIOPolledCoreFileOpenRet = kIOReturnNotOpen; - IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledPostflightCoreDumpState); - IOPolledFileClose(&gIOPolledCoreFileVars, NULL, NULL, 0, 0, 0); + gIOPolledCoreFileOpenRet = kIOReturnNotOpen; + IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledPostflightCoreDumpState); + IOPolledFileClose(&gIOPolledCoreFileVars, NULL, NULL, 0, 0, 0); } static thread_call_t gIOOpenPolledCoreFileTC; static IONotifier * gIOPolledCoreFileNotifier; static IONotifier * gIOPolledCoreFileInterestNotifier; -static IOReturn +static IOReturn KernelCoreMediaInterest(void * target, void * refCon, - UInt32 messageType, IOService * provider, - void * messageArgument, vm_size_t argSize ) + UInt32 messageType, IOService * provider, + void * messageArgument, vm_size_t argSize ) { - if (kIOMessageServiceIsTerminated == messageType) - { - gIOPolledCoreFileInterestNotifier->remove(); - gIOPolledCoreFileInterestNotifier = 0; - IOClosePolledCoreFile(); - } - - return (kIOReturnSuccess); + if (kIOMessageServiceIsTerminated == messageType) { + gIOPolledCoreFileInterestNotifier->remove(); + gIOPolledCoreFileInterestNotifier = 0; + IOClosePolledCoreFile(); + } + + return kIOReturnSuccess; } static void OpenKernelCoreMedia(thread_call_param_t p0, thread_call_param_t p1) { - IOService * newService; - OSString * string; - char filename[16]; - - newService = (IOService *) p1; - do - { - if (gIOPolledCoreFileVars) break; - string = OSDynamicCast(OSString, newService->getProperty(kIOBSDNameKey)); - if (!string) break; - snprintf(filename, sizeof(filename), "/dev/%s", string->getCStringNoCopy()); - if (kIOReturnSuccess != IOOpenPolledCoreFile(filename)) break; - gIOPolledCoreFileInterestNotifier = newService->registerInterest( - gIOGeneralInterest, &KernelCoreMediaInterest, NULL, 0); - } - while (false); - - newService->release(); + IOService * newService; + OSString * string; + char filename[16]; + + newService = (IOService *) p1; + do{ + if (gIOPolledCoreFileVars) { + break; + } + string = OSDynamicCast(OSString, newService->getProperty(kIOBSDNameKey)); + if (!string) { + break; + } + snprintf(filename, sizeof(filename), "/dev/%s", string->getCStringNoCopy()); + if (kIOReturnSuccess != IOOpenPolledCoreFile(filename)) { + break; + } + gIOPolledCoreFileInterestNotifier = newService->registerInterest( + gIOGeneralInterest, &KernelCoreMediaInterest, NULL, 0); + }while (false); + + newService->release(); } -static bool +static bool NewKernelCoreMedia(void * target, void * refCon, - IOService * newService, - IONotifier * notifier) + IOService * newService, + IONotifier * notifier) { - static volatile UInt32 onlyOneCorePartition = 0; - do - { - if (!OSCompareAndSwap(0, 1, &onlyOneCorePartition)) break; - if (gIOPolledCoreFileVars) break; - if (!gIOOpenPolledCoreFileTC) break; - newService = newService->getProvider(); - if (!newService) break; - newService->retain(); - thread_call_enter1(gIOOpenPolledCoreFileTC, newService); - } - while (false); - - return (false); + static volatile UInt32 onlyOneCorePartition = 0; + do{ + if (!OSCompareAndSwap(0, 1, &onlyOneCorePartition)) { + break; + } + if (gIOPolledCoreFileVars) { + break; + } + if (!gIOOpenPolledCoreFileTC) { + break; + } + newService = newService->getProvider(); + if (!newService) { + break; + } + newService->retain(); + thread_call_enter1(gIOOpenPolledCoreFileTC, newService); + }while (false); + + return false; } #endif /* IOPOLLED_COREFILE */ -extern "C" void +extern "C" void IOBSDMountChange(struct mount * mp, uint32_t op) { #if IOPOLLED_COREFILE - OSDictionary * bsdMatching; - OSDictionary * mediaMatching; - OSString * string; - - if (!gIOPolledCoreFileNotifier) do - { - if (!gIOOpenPolledCoreFileTC) gIOOpenPolledCoreFileTC = thread_call_allocate(&OpenKernelCoreMedia, NULL); - bsdMatching = IOService::serviceMatching("IOMediaBSDClient"); - if (!bsdMatching) break; - mediaMatching = IOService::serviceMatching("IOMedia"); - string = OSString::withCStringNoCopy("5361644D-6163-11AA-AA11-00306543ECAC"); - if (!string || !mediaMatching) break; - mediaMatching->setObject("Content", string); - string->release(); - bsdMatching->setObject(gIOParentMatchKey, mediaMatching); - mediaMatching->release(); - - gIOPolledCoreFileNotifier = IOService::addMatchingNotification( - gIOFirstMatchNotification, bsdMatching, - &NewKernelCoreMedia, NULL, NULL, -1000); - } - while (false); + OSDictionary * bsdMatching; + OSDictionary * mediaMatching; + OSString * string; + + if (!gIOPolledCoreFileNotifier) { + do{ + if (!gIOOpenPolledCoreFileTC) { + gIOOpenPolledCoreFileTC = thread_call_allocate(&OpenKernelCoreMedia, NULL); + } + bsdMatching = IOService::serviceMatching("IOMediaBSDClient"); + if (!bsdMatching) { + break; + } + mediaMatching = IOService::serviceMatching("IOMedia"); + string = OSString::withCStringNoCopy("5361644D-6163-11AA-AA11-00306543ECAC"); + if (!string || !mediaMatching) { + break; + } + mediaMatching->setObject("Content", string); + string->release(); + bsdMatching->setObject(gIOParentMatchKey, mediaMatching); + mediaMatching->release(); + + gIOPolledCoreFileNotifier = IOService::addMatchingNotification( + gIOFirstMatchNotification, bsdMatching, + &NewKernelCoreMedia, NULL, NULL, -1000); + }while (false); + } #if CONFIG_EMBEDDED - uint64_t flags; - char path[128]; - int pathLen; - vnode_t vn; - int result; - - switch (op) - { + uint64_t flags; + char path[128]; + int pathLen; + vnode_t vn; + int result; + + switch (op) { case kIOMountChangeMount: case kIOMountChangeDidResize: - if (gIOPolledCoreFileVars) break; - flags = vfs_flags(mp); - if (MNT_RDONLY & flags) break; - if (!(MNT_LOCAL & flags)) break; - - vn = vfs_vnodecovered(mp); - if (!vn) break; - pathLen = sizeof(path); - result = vn_getpath(vn, &path[0], &pathLen); - vnode_put(vn); - if (0 != result) break; - if (!pathLen) break; + if (gIOPolledCoreFileVars) { + break; + } + flags = vfs_flags(mp); + if (MNT_RDONLY & flags) { + break; + } + if (!(MNT_LOCAL & flags)) { + break; + } + + vn = vfs_vnodecovered(mp); + if (!vn) { + break; + } + pathLen = sizeof(path); + result = vn_getpath(vn, &path[0], &pathLen); + vnode_put(vn); + if (0 != result) { + break; + } + if (!pathLen) { + break; + } #if defined(XNU_TARGET_OS_BRIDGE) - // on bridgeOS systems we put the core in /private/var/internal. We don't - // want to match with /private/var because /private/var/internal is often mounted - // over /private/var - if ((pathLen - 1) < (int) strlen("/private/var/internal")) break; + // on bridgeOS systems we put the core in /private/var/internal. We don't + // want to match with /private/var because /private/var/internal is often mounted + // over /private/var + if ((pathLen - 1) < (int) strlen("/private/var/internal")) { + break; + } #endif - if (0 != strncmp(path, kIOCoreDumpPath, pathLen - 1)) break; - IOOpenPolledCoreFile(kIOCoreDumpPath); - break; + if (0 != strncmp(path, kIOCoreDumpPath, pathLen - 1)) { + break; + } + IOOpenPolledCoreFile(kIOCoreDumpPath); + break; case kIOMountChangeUnmount: case kIOMountChangeWillResize: - if (gIOPolledCoreFileVars && (mp == kern_file_mount(gIOPolledCoreFileVars->fileRef))) - { - IOClosePolledCoreFile(); - } - break; - } + if (gIOPolledCoreFileVars && (mp == kern_file_mount(gIOPolledCoreFileVars->fileRef))) { + IOClosePolledCoreFile(); + } + break; + } #endif /* CONFIG_EMBEDDED */ #endif /* IOPOLLED_COREFILE */ } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -extern "C" boolean_t +extern "C" boolean_t IOTaskHasEntitlement(task_t task, const char * entitlement) { - OSObject * obj; - obj = IOUserClient::copyClientEntitlement(task, entitlement); - if (!obj) return (false); - obj->release(); - return (obj != kOSBooleanFalse); + OSObject * obj; + obj = IOUserClient::copyClientEntitlement(task, entitlement); + if (!obj) { + return false; + } + obj->release(); + return obj != kOSBooleanFalse; } - diff --git a/iokit/bsddev/IOKitBSDInit.h b/iokit/bsddev/IOKitBSDInit.h index 7d0d3e760..2722cea9b 100644 --- a/iokit/bsddev/IOKitBSDInit.h +++ b/iokit/bsddev/IOKitBSDInit.h @@ -2,7 +2,7 @@ * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef __cplusplus @@ -44,4 +44,3 @@ IOCatalogueMatchingDriversPresent( const char * property ); #ifdef __cplusplus } #endif - diff --git a/iokit/bsddev/skywalk/IOSkywalkSupport.cpp b/iokit/bsddev/skywalk/IOSkywalkSupport.cpp new file mode 100644 index 000000000..b38d804d8 --- /dev/null +++ b/iokit/bsddev/skywalk/IOSkywalkSupport.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2015-2016 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#if defined(__x86_64__) +#include // IOSKCopyKextIdentifierWithAddress() +#endif + + +#if defined(__x86_64__) +const OSSymbol * +IOSKCopyKextIdentifierWithAddress( vm_address_t address ); + +const OSSymbol * +IOSKCopyKextIdentifierWithAddress( vm_address_t address ) +{ + const OSSymbol * id = 0; + + OSKext * kext = OSKext::lookupKextWithAddress(address); + if (kext) { + id = kext->getIdentifier(); + if (id) { + id->retain(); + } + kext->release(); + } + return id; +} +#endif /* __x86_64__ */ diff --git a/iokit/conf/files b/iokit/conf/files index ac6b06e0a..5af89b2b0 100644 --- a/iokit/conf/files +++ b/iokit/conf/files @@ -107,4 +107,7 @@ iokit/Kernel/IOPowerConnection.cpp optional iokitcpp # System Management iokit/Families/IOSystemManagement/IOWatchDogTimer.cpp optional iokitcpp +# Performance Control +iokit/Kernel/IOPerfControl.cpp optional iokitcpp +iokit/bsddev/skywalk/IOSkywalkSupport.cpp optional iokitcpp diff --git a/libkdd/kcdata.h b/libkdd/kcdata.h index e36c55352..85cf4998b 100644 --- a/libkdd/kcdata.h +++ b/libkdd/kcdata.h @@ -305,7 +305,7 @@ kcs_get_elem_size(kcdata_subtype_descriptor_t d) { if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) { /* size is composed as ((count &0xffff)<<16 | (elem_size & 0xffff)) */ - return (uint32_t)((d->kcs_elem_size & 0xffff) * ((d->kcs_elem_size & 0xffff0000)>>16)); + return (uint32_t)((d->kcs_elem_size & 0xffff) * ((d->kcs_elem_size & 0xffff0000) >> 16)); } return d->kcs_elem_size; } @@ -313,8 +313,9 @@ kcs_get_elem_size(kcdata_subtype_descriptor_t d) static inline uint32_t kcs_get_elem_count(kcdata_subtype_descriptor_t d) { - if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) + if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) { return (d->kcs_elem_size >> 16) & 0xffff; + } return 1; } @@ -323,12 +324,11 @@ kcs_set_elem_size(kcdata_subtype_descriptor_t d, uint32_t size, uint32_t count) { if (count > 1) { /* means we are setting up an array */ - if (size > 0xffff || count > 0xffff) + if (size > 0xffff || count > 0xffff) { return -1; //invalid argument + } d->kcs_elem_size = ((count & 0xffff) << 16 | (size & 0xffff)); - } - else - { + } else { d->kcs_elem_size = size; } return 0; @@ -367,9 +367,9 @@ struct kcdata_type_definition { #define KCDATA_TYPE_TYPEDEFINTION 0x12u /* Meta type that describes a type on the fly. */ #define KCDATA_TYPE_CONTAINER_BEGIN \ 0x13u /* Container type which has corresponding CONTAINER_END header. \ - * KCDATA_TYPE_CONTAINER_BEGIN has type in the data segment. \ - * Both headers have (uint64_t) ID for matching up nested data. \ - */ + * KCDATA_TYPE_CONTAINER_BEGIN has type in the data segment. \ + * Both headers have (uint64_t) ID for matching up nested data. \ + */ #define KCDATA_TYPE_CONTAINER_END 0x14u #define KCDATA_TYPE_ARRAY_PAD0 0x20u /* Array of data with 0 byte of padding*/ @@ -423,14 +423,14 @@ struct kcdata_type_definition { /* next type range number available 0x1060 */ /**************** definitions for XNUPOST *********************/ -#define XNUPOST_KCTYPE_TESTCONFIG 0x1040 +#define XNUPOST_KCTYPE_TESTCONFIG 0x1040 /**************** definitions for stackshot *********************/ /* This value must always match IO_NUM_PRIORITIES defined in thread_info.h */ -#define STACKSHOT_IO_NUM_PRIORITIES 4 +#define STACKSHOT_IO_NUM_PRIORITIES 4 /* This value must always match MAXTHREADNAMESIZE used in bsd */ -#define STACKSHOT_MAX_THREAD_NAME_SIZE 64 +#define STACKSHOT_MAX_THREAD_NAME_SIZE 64 /* * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes @@ -485,35 +485,35 @@ struct stack_snapshot_frame32 { }; struct stack_snapshot_frame64 { - uint64_t lr; - uint64_t sp; + uint64_t lr; + uint64_t sp; }; struct dyld_uuid_info_32 { - uint32_t imageLoadAddress; /* base address image is mapped at */ - uuid_t imageUUID; + uint32_t imageLoadAddress; /* base address image is mapped at */ + uuid_t imageUUID; }; struct dyld_uuid_info_64 { - uint64_t imageLoadAddress; /* XXX image slide */ - uuid_t imageUUID; + uint64_t imageLoadAddress; /* XXX image slide */ + uuid_t imageUUID; }; struct dyld_uuid_info_64_v2 { - uint64_t imageLoadAddress; /* XXX image slide */ - uuid_t imageUUID; - /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */ - uint64_t imageSlidBaseAddress; /* slid base address of image */ + uint64_t imageLoadAddress; /* XXX image slide */ + uuid_t imageUUID; + /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */ + uint64_t imageSlidBaseAddress; /* slid base address of image */ }; struct user32_dyld_uuid_info { - uint32_t imageLoadAddress; /* base address image is mapped into */ - uuid_t imageUUID; /* UUID of image */ + uint32_t imageLoadAddress; /* base address image is mapped into */ + uuid_t imageUUID; /* UUID of image */ }; struct user64_dyld_uuid_info { - uint64_t imageLoadAddress; /* base address image is mapped into */ - uuid_t imageUUID; /* UUID of image */ + uint64_t imageLoadAddress; /* base address image is mapped into */ + uuid_t imageUUID; /* UUID of image */ }; enum task_snapshot_flags { @@ -561,22 +561,22 @@ enum thread_snapshot_flags { }; struct mem_and_io_snapshot { - uint32_t snapshot_magic; - uint32_t free_pages; - uint32_t active_pages; - uint32_t inactive_pages; - uint32_t purgeable_pages; - uint32_t wired_pages; - uint32_t speculative_pages; - uint32_t throttled_pages; - uint32_t filebacked_pages; - uint32_t compressions; - uint32_t decompressions; - uint32_t compressor_size; - int32_t busy_buffer_count; - uint32_t pages_wanted; - uint32_t pages_reclaimed; - uint8_t pages_wanted_reclaimed_valid; // did mach_vm_pressure_monitor succeed? + uint32_t snapshot_magic; + uint32_t free_pages; + uint32_t active_pages; + uint32_t inactive_pages; + uint32_t purgeable_pages; + uint32_t wired_pages; + uint32_t speculative_pages; + uint32_t throttled_pages; + uint32_t filebacked_pages; + uint32_t compressions; + uint32_t decompressions; + uint32_t compressor_size; + int32_t busy_buffer_count; + uint32_t pages_wanted; + uint32_t pages_reclaimed; + uint8_t pages_wanted_reclaimed_valid; // did mach_vm_pressure_monitor succeed? } __attribute__((packed)); /* SS_TH_* macros are for ths_state */ @@ -727,8 +727,7 @@ struct thread_delta_snapshot_v3 { uint64_t tds_effective_policy; } __attribute__ ((packed)); -struct io_stats_snapshot -{ +struct io_stats_snapshot { /* * I/O Statistics * XXX: These fields must be together. @@ -748,7 +747,6 @@ struct io_stats_snapshot uint64_t ss_metadata_count; uint64_t ss_metadata_size; /* XXX: I/O Statistics end */ - } __attribute__ ((packed)); struct task_snapshot_v2 { @@ -810,10 +808,10 @@ struct stackshot_fault_stats { } __attribute__((packed)); typedef struct stackshot_thread_waitinfo { - uint64_t owner; /* The thread that owns the object */ - uint64_t waiter; /* The thread that's waiting on the object */ - uint64_t context; /* A context uniquely identifying the object */ - uint8_t wait_type; /* The type of object that the thread is waiting on */ + uint64_t owner; /* The thread that owns the object */ + uint64_t waiter; /* The thread that's waiting on the object */ + uint64_t context; /* A context uniquely identifying the object */ + uint8_t wait_type; /* The type of object that the thread is waiting on */ } __attribute__((packed)) thread_waitinfo_t; #define STACKSHOT_WAITOWNER_KERNEL (UINT64_MAX - 1) @@ -841,12 +839,12 @@ struct stack_snapshot_stacktop { /* FIXME some of these types aren't clean (fixed width, packed, and defined *here*) */ struct crashinfo_proc_uniqidentifierinfo { - uint8_t p_uuid[16]; /* UUID of the main executable */ - uint64_t p_uniqueid; /* 64 bit unique identifier for process */ - uint64_t p_puniqueid; /* unique identifier for process's parent */ - uint64_t p_reserve2; /* reserved for future use */ - uint64_t p_reserve3; /* reserved for future use */ - uint64_t p_reserve4; /* reserved for future use */ + uint8_t p_uuid[16]; /* UUID of the main executable */ + uint64_t p_uniqueid; /* 64 bit unique identifier for process */ + uint64_t p_puniqueid; /* unique identifier for process's parent */ + uint64_t p_reserve2; /* reserved for future use */ + uint64_t p_reserve3; /* reserved for future use */ + uint64_t p_reserve4; /* reserved for future use */ } __attribute__((packed)); #define TASK_CRASHINFO_BEGIN KCDATA_BUFFER_BEGIN_CRASHINFO @@ -861,7 +859,7 @@ struct crashinfo_proc_uniqidentifierinfo { #define TASK_CRASHINFO_PID 0x805 #define TASK_CRASHINFO_PPID 0x806 #define TASK_CRASHINFO_RUSAGE 0x807 /* struct rusage DEPRECATED do not use. - This struct has longs in it */ + * This struct has longs in it */ #define TASK_CRASHINFO_RUSAGE_INFO 0x808 /* struct rusage_info_v3 from resource.h */ #define TASK_CRASHINFO_PROC_NAME 0x809 /* char * */ #define TASK_CRASHINFO_PROC_STARTTIME 0x80B /* struct timeval64 */ @@ -912,10 +910,10 @@ struct crashinfo_proc_uniqidentifierinfo { #define EXIT_REASON_DISPATCH_QUEUE_NO 0x1006 struct exit_reason_snapshot { - uint32_t ers_namespace; - uint64_t ers_code; - /* end of version 1 of exit_reason_snapshot. sizeof v1 was 12 */ - uint64_t ers_flags; + uint32_t ers_namespace; + uint64_t ers_code; + /* end of version 1 of exit_reason_snapshot. sizeof v1 was 12 */ + uint64_t ers_flags; } __attribute__((packed)); #define EXIT_REASON_CODESIG_PATH_MAX 1024 @@ -951,7 +949,9 @@ typedef struct kcdata_iter { static inline -kcdata_iter_t kcdata_iter(void *buffer, unsigned long size) { +kcdata_iter_t +kcdata_iter(void *buffer, unsigned long size) +{ kcdata_iter_t iter; iter.item = (kcdata_item_t) buffer; iter.end = (void*) (((uintptr_t)buffer) + size); @@ -962,7 +962,9 @@ static inline kcdata_iter_t kcdata_iter_unsafe(void *buffer) __attribute__((deprecated)); static inline -kcdata_iter_t kcdata_iter_unsafe(void *buffer) { +kcdata_iter_t +kcdata_iter_unsafe(void *buffer) +{ kcdata_iter_t iter; iter.item = (kcdata_item_t) buffer; iter.end = (void*) (uintptr_t) ~0; @@ -972,15 +974,19 @@ kcdata_iter_t kcdata_iter_unsafe(void *buffer) { static const kcdata_iter_t kcdata_invalid_iter = { .item = 0, .end = 0 }; static inline -int kcdata_iter_valid(kcdata_iter_t iter) { +int +kcdata_iter_valid(kcdata_iter_t iter) +{ return - ( (uintptr_t)iter.item + sizeof(struct kcdata_item) <= (uintptr_t)iter.end ) && - ( (uintptr_t)iter.item + sizeof(struct kcdata_item) + iter.item->size <= (uintptr_t)iter.end); + ((uintptr_t)iter.item + sizeof(struct kcdata_item) <= (uintptr_t)iter.end) && + ((uintptr_t)iter.item + sizeof(struct kcdata_item) + iter.item->size <= (uintptr_t)iter.end); } static inline -kcdata_iter_t kcdata_iter_next(kcdata_iter_t iter) { +kcdata_iter_t +kcdata_iter_next(kcdata_iter_t iter) +{ iter.item = (kcdata_item_t) (((uintptr_t)iter.item) + sizeof(struct kcdata_item) + (iter.item->size)); return iter; } @@ -988,10 +994,11 @@ kcdata_iter_t kcdata_iter_next(kcdata_iter_t iter) { static inline uint32_t kcdata_iter_type(kcdata_iter_t iter) { - if ((iter.item->type & ~0xfu) == KCDATA_TYPE_ARRAY_PAD0) + if ((iter.item->type & ~0xfu) == KCDATA_TYPE_ARRAY_PAD0) { return KCDATA_TYPE_ARRAY; - else + } else { return iter.item->type; + } } static inline uint32_t @@ -1012,9 +1019,8 @@ static inline int kcdata_iter_is_legacy_item(kcdata_iter_t iter, uint32_t legacy_size) { uint32_t legacy_size_padded = legacy_size + kcdata_calc_padding(legacy_size); - return (iter.item->size == legacy_size_padded && - (iter.item->flags & (KCDATA_FLAGS_STRUCT_PADDING_MASK | KCDATA_FLAGS_STRUCT_HAS_PADDING)) == 0); - + return iter.item->size == legacy_size_padded && + (iter.item->flags & (KCDATA_FLAGS_STRUCT_PADDING_MASK | KCDATA_FLAGS_STRUCT_HAS_PADDING)) == 0; } static inline uint32_t @@ -1044,10 +1050,11 @@ kcdata_iter_size(kcdata_iter_t iter) } not_legacy: default: - if (iter.item->size < kcdata_flags_get_padding(iter.item->flags)) + if (iter.item->size < kcdata_flags_get_padding(iter.item->flags)) { return 0; - else + } else { return iter.item->size - kcdata_flags_get_padding(iter.item->flags); + } } } @@ -1058,18 +1065,24 @@ kcdata_iter_flags(kcdata_iter_t iter) } static inline -void * kcdata_iter_payload(kcdata_iter_t iter) { +void * +kcdata_iter_payload(kcdata_iter_t iter) +{ return &iter.item->data; } static inline -uint32_t kcdata_iter_array_elem_type(kcdata_iter_t iter) { +uint32_t +kcdata_iter_array_elem_type(kcdata_iter_t iter) +{ return (iter.item->flags >> 32) & UINT32_MAX; } static inline -uint32_t kcdata_iter_array_elem_count(kcdata_iter_t iter) { +uint32_t +kcdata_iter_array_elem_count(kcdata_iter_t iter) +{ return (iter.item->flags) & UINT32_MAX; } @@ -1083,8 +1096,9 @@ uint32_t kcdata_iter_array_elem_count(kcdata_iter_t iter) { static inline uint32_t -kcdata_iter_array_size_switch(kcdata_iter_t iter) { - switch(kcdata_iter_array_elem_type(iter)) { +kcdata_iter_array_size_switch(kcdata_iter_t iter) +{ + switch (kcdata_iter_array_elem_type(iter)) { case KCDATA_TYPE_LIBRARY_LOADINFO: return sizeof(struct dyld_uuid_info_32); case KCDATA_TYPE_LIBRARY_LOADINFO64: @@ -1099,8 +1113,8 @@ kcdata_iter_array_size_switch(kcdata_iter_t iter) { return sizeof(int32_t); case STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT: return sizeof(struct thread_delta_snapshot_v2); - // This one is only here to make some unit tests work. It should be OK to - // remove. + // This one is only here to make some unit tests work. It should be OK to + // remove. case TASK_CRASHINFO_CRASHED_THREADID: return sizeof(uint64_t); default: @@ -1109,54 +1123,70 @@ kcdata_iter_array_size_switch(kcdata_iter_t iter) { } static inline -int kcdata_iter_array_valid(kcdata_iter_t iter) { - if (!kcdata_iter_valid(iter)) +int +kcdata_iter_array_valid(kcdata_iter_t iter) +{ + if (!kcdata_iter_valid(iter)) { return 0; - if (kcdata_iter_type(iter) != KCDATA_TYPE_ARRAY) + } + if (kcdata_iter_type(iter) != KCDATA_TYPE_ARRAY) { return 0; - if (kcdata_iter_array_elem_count(iter) == 0) + } + if (kcdata_iter_array_elem_count(iter) == 0) { return iter.item->size == 0; + } if (iter.item->type == KCDATA_TYPE_ARRAY) { uint32_t elem_size = kcdata_iter_array_size_switch(iter); - if (elem_size == 0) + if (elem_size == 0) { return 0; + } /* sizes get aligned to the nearest 16. */ return - kcdata_iter_array_elem_count(iter) <= iter.item->size / elem_size && - iter.item->size % kcdata_iter_array_elem_count(iter) < 16; + kcdata_iter_array_elem_count(iter) <= iter.item->size / elem_size && + iter.item->size % kcdata_iter_array_elem_count(iter) < 16; } else { return - (iter.item->type & 0xf) <= iter.item->size && - kcdata_iter_array_elem_count(iter) <= iter.item->size - (iter.item->type & 0xf) && - (iter.item->size - (iter.item->type & 0xf)) % kcdata_iter_array_elem_count(iter) == 0; + (iter.item->type & 0xf) <= iter.item->size && + kcdata_iter_array_elem_count(iter) <= iter.item->size - (iter.item->type & 0xf) && + (iter.item->size - (iter.item->type & 0xf)) % kcdata_iter_array_elem_count(iter) == 0; } } static inline -uint32_t kcdata_iter_array_elem_size(kcdata_iter_t iter) { - if (iter.item->type == KCDATA_TYPE_ARRAY) +uint32_t +kcdata_iter_array_elem_size(kcdata_iter_t iter) +{ + if (iter.item->type == KCDATA_TYPE_ARRAY) { return kcdata_iter_array_size_switch(iter); - if (kcdata_iter_array_elem_count(iter) == 0) + } + if (kcdata_iter_array_elem_count(iter) == 0) { return 0; + } return (iter.item->size - (iter.item->type & 0xf)) / kcdata_iter_array_elem_count(iter); } static inline -int kcdata_iter_container_valid(kcdata_iter_t iter) { +int +kcdata_iter_container_valid(kcdata_iter_t iter) +{ return - kcdata_iter_valid(iter) && - kcdata_iter_type(iter) == KCDATA_TYPE_CONTAINER_BEGIN && - iter.item->size >= sizeof(uint32_t); + kcdata_iter_valid(iter) && + kcdata_iter_type(iter) == KCDATA_TYPE_CONTAINER_BEGIN && + iter.item->size >= sizeof(uint32_t); } static inline -uint32_t kcdata_iter_container_type(kcdata_iter_t iter) { - return * (uint32_t *) kcdata_iter_payload(iter); +uint32_t +kcdata_iter_container_type(kcdata_iter_t iter) +{ + return *(uint32_t *) kcdata_iter_payload(iter); } static inline -uint64_t kcdata_iter_container_id(kcdata_iter_t iter) { +uint64_t +kcdata_iter_container_id(kcdata_iter_t iter) +{ return iter.item->flags; } @@ -1170,22 +1200,27 @@ kcdata_iter_find_type(kcdata_iter_t iter, uint32_t type) { KCDATA_ITER_FOREACH(iter) { - if (kcdata_iter_type(iter) == type) + if (kcdata_iter_type(iter) == type) { return iter; + } } return kcdata_invalid_iter; } static inline -int kcdata_iter_data_with_desc_valid(kcdata_iter_t iter, uint32_t minsize) { +int +kcdata_iter_data_with_desc_valid(kcdata_iter_t iter, uint32_t minsize) +{ return - kcdata_iter_valid(iter) && - kcdata_iter_size(iter) >= KCDATA_DESC_MAXLEN + minsize && - ((char*)kcdata_iter_payload(iter))[KCDATA_DESC_MAXLEN-1] == 0; + kcdata_iter_valid(iter) && + kcdata_iter_size(iter) >= KCDATA_DESC_MAXLEN + minsize && + ((char*)kcdata_iter_payload(iter))[KCDATA_DESC_MAXLEN - 1] == 0; } static inline -char *kcdata_iter_string(kcdata_iter_t iter, uint32_t offset) { +char * +kcdata_iter_string(kcdata_iter_t iter, uint32_t offset) +{ if (offset > kcdata_iter_size(iter)) { return NULL; } @@ -1198,13 +1233,18 @@ char *kcdata_iter_string(kcdata_iter_t iter, uint32_t offset) { } } -static inline void kcdata_iter_get_data_with_desc(kcdata_iter_t iter, char **desc_ptr, void **data_ptr, uint32_t *size_ptr) { - if (desc_ptr) +static inline void +kcdata_iter_get_data_with_desc(kcdata_iter_t iter, char **desc_ptr, void **data_ptr, uint32_t *size_ptr) +{ + if (desc_ptr) { *desc_ptr = (char *)kcdata_iter_payload(iter); - if (data_ptr) + } + if (data_ptr) { *data_ptr = (void *)((uintptr_t)kcdata_iter_payload(iter) + KCDATA_DESC_MAXLEN); - if (size_ptr) + } + if (size_ptr) { *size_ptr = kcdata_iter_size(iter) - KCDATA_DESC_MAXLEN; + } } #endif diff --git a/libkdd/kcdtypes.c b/libkdd/kcdtypes.c index c9a2809d5..e3ef22e33 100644 --- a/libkdd/kcdtypes.c +++ b/libkdd/kcdtypes.c @@ -63,7 +63,7 @@ struct kcdata_type_definition * kcdata_get_typedescription(unsigned type_id, uin static uint32_t get_kctype_subtype_size(kctype_subtype_t type); static void setup_subtype_description(kcdata_subtype_descriptor_t desc, kctype_subtype_t type, uint32_t offset, char * name); static void setup_subtype_array_description( - kcdata_subtype_descriptor_t desc, kctype_subtype_t type, uint32_t offset, uint32_t count, char * name); + kcdata_subtype_descriptor_t desc, kctype_subtype_t type, uint32_t offset, uint32_t count, char * name); static void setup_type_definition(struct kcdata_type_definition * d, uint32_t type, uint32_t num_elems, char * name); struct kcdata_type_definition * @@ -75,8 +75,9 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s #define _SUBTYPE_ARRAY(t, s, f, c) setup_subtype_array_description(&subtypes[i++], (t), offsetof(s, f), (c), _STR_VALUE(f)) #define _STRINGTYPE(f) setup_subtype_array_description(&subtypes[i++], KC_ST_CHAR, 0, UINT16_MAX, f) - if (buffer_size < sizeof(struct kcdata_type_definition) || buffer == NULL) + if (buffer_size < sizeof(struct kcdata_type_definition) || buffer == NULL) { return NULL; + } struct kcdata_type_definition * retval = (struct kcdata_type_definition *)&buffer[0]; kcdata_subtype_descriptor_t subtypes = (kcdata_subtype_descriptor_t)&buffer[sizeof(struct kcdata_type_definition)]; @@ -120,7 +121,7 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s setup_type_definition(retval, type_id, i, "int64_desc"); break; } - + case KCDATA_TYPE_TYPEDEFINTION: { i = 0; setup_subtype_description(&subtypes[i++], KC_ST_UINT32, offsetof(struct kcdata_type_definition, kct_type_identifier), "typeID"); @@ -490,14 +491,14 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s } /* case TASK_CRASHINFO_RUSAGE: { */ - /* /\* */ - /* * rusage is a complex structure and is only for legacy use for crashed processes rusage info. */ - /* * So we just consider it as opaque data. */ - /* *\/ */ - /* i = 0; */ - /* setup_subtype_array_description(&subtypes[i++], KC_ST_UINT8, 0, sizeof(struct rusage), "rusage"); */ - /* setup_type_definition(retval, type_id, i, "rusage"); */ - /* break; */ + /* /\* */ + /* * rusage is a complex structure and is only for legacy use for crashed processes rusage info. */ + /* * So we just consider it as opaque data. */ + /* *\/ */ + /* i = 0; */ + /* setup_subtype_array_description(&subtypes[i++], KC_ST_UINT8, 0, sizeof(struct rusage), "rusage"); */ + /* setup_type_definition(retval, type_id, i, "rusage"); */ + /* break; */ /* } */ case TASK_CRASHINFO_RUSAGE_INFO: { @@ -598,7 +599,7 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s _SUBTYPE(KC_ST_UINT64, struct jetsam_coalition_snapshot, jcs_leader_task_uniqueid); setup_type_definition(retval, type_id, i, "jetsam_coalition_snapshot"); break; - } + } case STACKSHOT_KCTYPE_JETSAM_COALITION: { i = 0; @@ -613,7 +614,7 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s _SUBTYPE(KC_ST_UINT64, struct instrs_cycles_snapshot, ics_cycles); setup_type_definition(retval, type_id, i, "instrs_cycles_snapshot"); break; - } + } case STACKSHOT_KCTYPE_USER_STACKTOP: { i = 0; @@ -755,7 +756,6 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s setup_type_definition(retval, type_id, i, "exit_reason_basic_info"); break; - } case EXIT_REASON_USER_DESC: { @@ -831,14 +831,14 @@ kcdata_get_typedescription(unsigned type_id, uint8_t * buffer, uint32_t buffer_s setup_type_definition(retval, type_id, i, "system_shared_cache_layout"); break; } - + default: retval = NULL; break; } assert(retval == NULL || (buffer_size > sizeof(struct kcdata_type_definition) + - (retval->kct_num_elements * sizeof(struct kcdata_subtype_descriptor)))); + (retval->kct_num_elements * sizeof(struct kcdata_subtype_descriptor)))); return retval; } @@ -882,7 +882,7 @@ get_kctype_subtype_size(kctype_subtype_t type) static void setup_subtype_array_description( - kcdata_subtype_descriptor_t desc, kctype_subtype_t type, uint32_t offset, uint32_t count, char * name) + kcdata_subtype_descriptor_t desc, kctype_subtype_t type, uint32_t offset, uint32_t count, char * name) { desc->kcs_flags = KCS_SUBTYPE_FLAGS_ARRAY; desc->kcs_elem_type = type; @@ -902,4 +902,3 @@ setup_subtype_description(kcdata_subtype_descriptor_t desc, kctype_subtype_t typ memcpy(desc->kcs_name, name, sizeof(desc->kcs_name)); desc->kcs_name[sizeof(desc->kcs_name) - 1] = '\0'; } - diff --git a/libkdd/kdd.h b/libkdd/kdd.h index 8a2a75dd6..93c44a841 100644 --- a/libkdd/kdd.h +++ b/libkdd/kdd.h @@ -118,7 +118,7 @@ NSMutableDictionary * _Nullable parseKCDataArray(kcdata_iter_t iter, NSError * _ * @discussion * This function tries to parse one container. If it encounters sub containers * they will be parsed and collected within the same dictionary. - * Other data type fields will also be parsed based on their type. + * Other data type fields will also be parsed based on their type. * */ diff --git a/libkern/.clang-format b/libkern/.clang-format deleted file mode 120000 index f91369598..000000000 --- a/libkern/.clang-format +++ /dev/null @@ -1 +0,0 @@ -./iokit/.clang-format \ No newline at end of file diff --git a/libkern/OSKextLib.cpp b/libkern/OSKextLib.cpp index 5b9ee7b4a..4fff0ba81 100644 --- a/libkern/OSKextLib.cpp +++ b/libkern/OSKextLib.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,15 +37,15 @@ extern "C" { #include extern "C" { - #if PRAGMA_MARK #pragma mark C-based kext interface (loading/loaded kexts only) #endif /********************************************************************* *********************************************************************/ -kern_return_t OSKextLoadKextWithIdentifier(const char * bundle_id) +kern_return_t +OSKextLoadKextWithIdentifier(const char * bundle_id) { - return OSKext::loadKextWithIdentifier(bundle_id); + return OSKext::loadKextWithIdentifier(bundle_id); } uint32_t OSKextGetLoadTagForIdentifier(const char * kextIdentifier); @@ -54,102 +54,107 @@ uint32_t OSKextGetLoadTagForIdentifier(const char * kextIdentifier); uint32_t OSKextGetLoadTagForIdentifier(const char * kextIdentifier) { - uint32_t result = kOSKextInvalidLoadTag; - OSKext * theKext = NULL; // must release + uint32_t result = kOSKextInvalidLoadTag; + OSKext * theKext = NULL; // must release - if (!kextIdentifier) { - goto finish; - } + if (!kextIdentifier) { + goto finish; + } - theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); - if (theKext && theKext->isLoaded()) { - result = theKext->getLoadTag(); - } + theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); + if (theKext && theKext->isLoaded()) { + result = theKext->getLoadTag(); + } finish: - if (theKext) theKext->release(); - return result; + if (theKext) { + theKext->release(); + } + return result; } /********************************************************************* *********************************************************************/ -OSReturn OSKextRetainKextWithLoadTag(uint32_t loadTag) +OSReturn +OSKextRetainKextWithLoadTag(uint32_t loadTag) { - OSReturn result = kOSKextReturnNotFound; - OSKext * theKext = NULL; // do not release; as this function is a retain - - if (loadTag == kOSKextInvalidLoadTag) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - theKext = OSKext::lookupKextWithLoadTag(loadTag); - if (theKext) { - result = kOSReturnSuccess; - - OSKextLog(theKext, - kOSKextLogDebugLevel | - kOSKextLogKextBookkeepingFlag, - "Kext %s (load tag %d) has been retained.", - theKext->getIdentifierCString(), - loadTag); - - /* Call this after so a log message about autounload comes second. - */ - theKext->setAutounloadEnabled(true); - } else { - OSKextLog(theKext, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "Can't retain kext with load tag %d - no such kext is loaded.", - loadTag); - } + OSReturn result = kOSKextReturnNotFound; + OSKext * theKext = NULL;// do not release; as this function is a retain + + if (loadTag == kOSKextInvalidLoadTag) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + theKext = OSKext::lookupKextWithLoadTag(loadTag); + if (theKext) { + result = kOSReturnSuccess; + + OSKextLog(theKext, + kOSKextLogDebugLevel | + kOSKextLogKextBookkeepingFlag, + "Kext %s (load tag %d) has been retained.", + theKext->getIdentifierCString(), + loadTag); + + /* Call this after so a log message about autounload comes second. + */ + theKext->setAutounloadEnabled(true); + } else { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "Can't retain kext with load tag %d - no such kext is loaded.", + loadTag); + } finish: - return result; + return result; } /********************************************************************* *********************************************************************/ -OSReturn OSKextReleaseKextWithLoadTag(uint32_t loadTag) +OSReturn +OSKextReleaseKextWithLoadTag(uint32_t loadTag) { - OSReturn result = kOSKextReturnNotFound; - OSKext * theKext = NULL; // must release twice! - - if (loadTag == kOSKextInvalidLoadTag) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - theKext = OSKext::lookupKextWithLoadTag(loadTag); - if (theKext) { - result = kOSReturnSuccess; - OSKext::considerUnloads(); // schedule autounload pass - theKext->release(); // do the release the caller wants - theKext->release(); // now do the release on the lookup - OSKextLog(theKext, - kOSKextLogDebugLevel | - kOSKextLogKextBookkeepingFlag, - "Kext %s (load tag %d) has been released.", - theKext->getIdentifierCString(), - loadTag); - } else { - OSKextLog(theKext, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "Can't release kext with load tag %d - no such kext is loaded.", - loadTag); - } - - // xxx - should I check that the refcount of the OSKext is above the lower bound? - // xxx - do we want a OSKextGetRetainCountOfKextWithLoadTag()? + OSReturn result = kOSKextReturnNotFound; + OSKext * theKext = NULL; // must release twice! + + if (loadTag == kOSKextInvalidLoadTag) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + theKext = OSKext::lookupKextWithLoadTag(loadTag); + if (theKext) { + result = kOSReturnSuccess; + OSKext::considerUnloads(); // schedule autounload pass + theKext->release(); // do the release the caller wants + theKext->release(); // now do the release on the lookup + OSKextLog(theKext, + kOSKextLogDebugLevel | + kOSKextLogKextBookkeepingFlag, + "Kext %s (load tag %d) has been released.", + theKext->getIdentifierCString(), + loadTag); + } else { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "Can't release kext with load tag %d - no such kext is loaded.", + loadTag); + } + + // xxx - should I check that the refcount of the OSKext is above the lower bound? + // xxx - do we want a OSKextGetRetainCountOfKextWithLoadTag()? finish: - return result; + return result; } /********************************************************************* * Not to be called by the kext being unloaded! *********************************************************************/ -OSReturn OSKextUnloadKextWithLoadTag(uint32_t loadTag) +OSReturn +OSKextUnloadKextWithLoadTag(uint32_t loadTag) { - return OSKext::removeKextWithLoadTag(loadTag, - /* terminateServicesAndRemovePersonalitiesFlag */ false); + return OSKext::removeKextWithLoadTag(loadTag, + /* terminateServicesAndRemovePersonalitiesFlag */ false); } @@ -159,24 +164,26 @@ OSReturn OSKextUnloadKextWithLoadTag(uint32_t loadTag) /********************************************************************* * Kext Requests *********************************************************************/ -OSReturn OSKextRequestResource( - const char * kextIdentifier, - const char * resourceName, - OSKextRequestResourceCallback callback, - void * context, - OSKextRequestTag * requestTagOut) +OSReturn +OSKextRequestResource( + const char * kextIdentifier, + const char * resourceName, + OSKextRequestResourceCallback callback, + void * context, + OSKextRequestTag * requestTagOut) { - return OSKext::requestResource(kextIdentifier, resourceName, - callback, context, requestTagOut); + return OSKext::requestResource(kextIdentifier, resourceName, + callback, context, requestTagOut); } /********************************************************************* *********************************************************************/ -OSReturn OSKextCancelRequest( - OSKextRequestTag requestTag, - void ** contextOut) +OSReturn +OSKextCancelRequest( + OSKextRequestTag requestTag, + void ** contextOut) { - return OSKext::cancelRequest(requestTag, contextOut); + return OSKext::cancelRequest(requestTag, contextOut); } #if PRAGMA_MARK @@ -187,168 +194,166 @@ OSReturn OSKextCancelRequest( * KERN_SUCCESS or the kernel map gets messed up (reason as yet * unknown). We use op_result to return the real result of our work. *********************************************************************/ -kern_return_t kext_request( - host_priv_t hostPriv, - /* in only */ uint32_t clientLogSpec, - /* in only */ vm_offset_t requestIn, - /* in only */ mach_msg_type_number_t requestLengthIn, - /* out only */ vm_offset_t * responseOut, - /* out only */ mach_msg_type_number_t * responseLengthOut, - /* out only */ vm_offset_t * logDataOut, - /* out only */ mach_msg_type_number_t * logDataLengthOut, - /* out only */ kern_return_t * op_result) +kern_return_t +kext_request( + host_priv_t hostPriv, + /* in only */ uint32_t clientLogSpec, + /* in only */ vm_offset_t requestIn, + /* in only */ mach_msg_type_number_t requestLengthIn, + /* out only */ vm_offset_t * responseOut, + /* out only */ mach_msg_type_number_t * responseLengthOut, + /* out only */ vm_offset_t * logDataOut, + /* out only */ mach_msg_type_number_t * logDataLengthOut, + /* out only */ kern_return_t * op_result) { - kern_return_t result = KERN_FAILURE; - vm_map_address_t map_addr = 0; // do not free/deallocate - char * request = NULL; // must vm_deallocate - - mkext2_header * mkextHeader = NULL; // do not release - bool isMkext = false; - - char * response = NULL; // must kmem_free - uint32_t responseLength = 0; - char * logData = NULL; // must kmem_free - uint32_t logDataLength = 0; - - /* MIG doesn't pass "out" parameters as empty, so clear them immediately - * just in case, or MIG will try to copy out bogus data. - */ - *op_result = KERN_FAILURE; - *responseOut = NULL; - *responseLengthOut = 0; - *logDataOut = NULL; - *logDataLengthOut = 0; - - /* Check for input. Don't discard what isn't there, though. - */ - if (!requestLengthIn || !requestIn) { + kern_return_t result = KERN_FAILURE; + vm_map_address_t map_addr = 0; // do not free/deallocate + char * request = NULL;// must vm_deallocate + + mkext2_header * mkextHeader = NULL;// do not release + bool isMkext = false; + + char * response = NULL;// must kmem_free + uint32_t responseLength = 0; + char * logData = NULL;// must kmem_free + uint32_t logDataLength = 0; + + /* MIG doesn't pass "out" parameters as empty, so clear them immediately + * just in case, or MIG will try to copy out bogus data. + */ + *op_result = KERN_FAILURE; + *responseOut = NULL; + *responseLengthOut = 0; + *logDataOut = NULL; + *logDataLengthOut = 0; + + /* Check for input. Don't discard what isn't there, though. + */ + if (!requestLengthIn || !requestIn) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid request from user space (no data)."); + *op_result = KERN_INVALID_ARGUMENT; + goto finish; + } + + /* Once we have done the vm_map_copyout(), we *must* return KERN_SUCCESS + * or the kernel map gets messed up (reason as yet unknown). We will use + * op_result to return the real result of our work. + */ + result = vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)requestIn); + if (result != KERN_SUCCESS) { OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid request from user space (no data)."); - *op_result = KERN_INVALID_ARGUMENT; - goto finish; - } - - /* Once we have done the vm_map_copyout(), we *must* return KERN_SUCCESS - * or the kernel map gets messed up (reason as yet unknown). We will use - * op_result to return the real result of our work. - */ - result = vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)requestIn); - if (result != KERN_SUCCESS) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "vm_map_copyout() failed for request from user space."); - vm_map_copy_discard((vm_map_copy_t)requestIn); - goto finish; - } - request = CAST_DOWN(char *, map_addr); - - /* Check if request is an mkext; this is always a load request - * and requires root access. If it isn't an mkext, see if it's - * an XML request, and check the request to see if that requires - * root access. - */ - if (requestLengthIn > sizeof(mkext2_header)) { - mkextHeader = (mkext2_header *)request; - if (MKEXT_GET_MAGIC(mkextHeader) == MKEXT_MAGIC && - MKEXT_GET_SIGNATURE(mkextHeader) == MKEXT_SIGN) { - - isMkext = true; - } - } - - if (isMkext) { + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "vm_map_copyout() failed for request from user space."); + vm_map_copy_discard((vm_map_copy_t)requestIn); + goto finish; + } + request = CAST_DOWN(char *, map_addr); + + /* Check if request is an mkext; this is always a load request + * and requires root access. If it isn't an mkext, see if it's + * an XML request, and check the request to see if that requires + * root access. + */ + if (requestLengthIn > sizeof(mkext2_header)) { + mkextHeader = (mkext2_header *)request; + if (MKEXT_GET_MAGIC(mkextHeader) == MKEXT_MAGIC && + MKEXT_GET_SIGNATURE(mkextHeader) == MKEXT_SIGN) { + isMkext = true; + } + } + + if (isMkext) { #ifdef SECURE_KERNEL - // xxx - something tells me if we have a secure kernel we don't even - // xxx - want to log a message here. :-) - *op_result = KERN_NOT_SUPPORTED; - goto finish; + // xxx - something tells me if we have a secure kernel we don't even + // xxx - want to log a message here. :-) + *op_result = KERN_NOT_SUPPORTED; + goto finish; #else - // xxx - can we find out if calling task is kextd? - // xxx - can we find the name of the calling task? - if (hostPriv == HOST_PRIV_NULL) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogIPCFlag, - "Attempt by non-root process to load a kext."); - *op_result = kOSKextReturnNotPrivileged; - goto finish; - } - - *op_result = OSKext::loadFromMkext((OSKextLogSpec)clientLogSpec, - request, requestLengthIn, - &logData, &logDataLength); + // xxx - can we find out if calling task is kextd? + // xxx - can we find the name of the calling task? + if (hostPriv == HOST_PRIV_NULL) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogIPCFlag, + "Attempt by non-root process to load a kext."); + *op_result = kOSKextReturnNotPrivileged; + goto finish; + } + + *op_result = OSKext::loadFromMkext((OSKextLogSpec)clientLogSpec, + request, requestLengthIn, + &logData, &logDataLength); #endif /* defined(SECURE_KERNEL) */ - - } else { - - /* If the request isn't an mkext, then is should be XML. Parse it - * if possible and hand the request over to OSKext. - */ - *op_result = OSKext::handleRequest(hostPriv, - (OSKextLogSpec)clientLogSpec, - request, requestLengthIn, - &response, &responseLength, - &logData, &logDataLength); - } - - if (response && responseLength > 0) { - kern_return_t copyin_result; - - copyin_result = vm_map_copyin(kernel_map, - CAST_USER_ADDR_T(response), responseLength, - /* src_destroy */ false, (vm_map_copy_t *)responseOut); - if (copyin_result == KERN_SUCCESS) { - *responseLengthOut = responseLength; - } else { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Failed to copy response to request from user space."); - *op_result = copyin_result; // xxx - should we map to our own code? - *responseOut = NULL; - *responseLengthOut = 0; - goto finish; - } - } - - if (logData && logDataLength > 0) { - kern_return_t copyin_result; - - copyin_result = vm_map_copyin(kernel_map, - CAST_USER_ADDR_T(logData), logDataLength, - /* src_destroy */ false, (vm_map_copy_t *)logDataOut); - if (copyin_result == KERN_SUCCESS) { - *logDataLengthOut = logDataLength; - } else { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Failed to copy log data for request from user space."); - *op_result = copyin_result; // xxx - should we map to our own code? - *logDataOut = NULL; - *logDataLengthOut = 0; - goto finish; - } - } + } else { + /* If the request isn't an mkext, then is should be XML. Parse it + * if possible and hand the request over to OSKext. + */ + *op_result = OSKext::handleRequest(hostPriv, + (OSKextLogSpec)clientLogSpec, + request, requestLengthIn, + &response, &responseLength, + &logData, &logDataLength); + } + + if (response && responseLength > 0) { + kern_return_t copyin_result; + + copyin_result = vm_map_copyin(kernel_map, + CAST_USER_ADDR_T(response), responseLength, + /* src_destroy */ false, (vm_map_copy_t *)responseOut); + if (copyin_result == KERN_SUCCESS) { + *responseLengthOut = responseLength; + } else { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Failed to copy response to request from user space."); + *op_result = copyin_result; // xxx - should we map to our own code? + *responseOut = NULL; + *responseLengthOut = 0; + goto finish; + } + } + + if (logData && logDataLength > 0) { + kern_return_t copyin_result; + + copyin_result = vm_map_copyin(kernel_map, + CAST_USER_ADDR_T(logData), logDataLength, + /* src_destroy */ false, (vm_map_copy_t *)logDataOut); + if (copyin_result == KERN_SUCCESS) { + *logDataLengthOut = logDataLength; + } else { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Failed to copy log data for request from user space."); + *op_result = copyin_result; // xxx - should we map to our own code? + *logDataOut = NULL; + *logDataLengthOut = 0; + goto finish; + } + } finish: - if (request) { - (void)vm_deallocate(kernel_map, (vm_offset_t)request, requestLengthIn); - } - if (response) { - /* 11981737 - clear uninitialized data in last page */ - kmem_free(kernel_map, (vm_offset_t)response, round_page(responseLength)); - } - if (logData) { - /* 11981737 - clear uninitialized data in last page */ - kmem_free(kernel_map, (vm_offset_t)logData, round_page(logDataLength)); - } - - return result; + if (request) { + (void)vm_deallocate(kernel_map, (vm_offset_t)request, requestLengthIn); + } + if (response) { + /* 11981737 - clear uninitialized data in last page */ + kmem_free(kernel_map, (vm_offset_t)response, round_page(responseLength)); + } + if (logData) { + /* 11981737 - clear uninitialized data in last page */ + kmem_free(kernel_map, (vm_offset_t)logData, round_page(logDataLength)); + } + + return result; } /********************************************************************* @@ -362,18 +367,17 @@ extern vm_map_t g_kext_map; vm_map_t kext_get_vm_map(kmod_info_t *info) { - vm_map_t kext_map = NULL; - - /* Set the vm map */ - if ((info->address >= segPRELINKTEXTB) && - (info->address < (segPRELINKTEXTB + segSizePRELINKTEXT))) - { - kext_map = kernel_map; - } else { - kext_map = g_kext_map; - } - - return kext_map; + vm_map_t kext_map = NULL; + + /* Set the vm map */ + if ((info->address >= segPRELINKTEXTB) && + (info->address < (segPRELINKTEXTB + segSizePRELINKTEXT))) { + kext_map = kernel_map; + } else { + kext_map = g_kext_map; + } + + return kext_map; } @@ -385,7 +389,7 @@ kext_get_vm_map(kmod_info_t *info) void kext_weak_symbol_referenced(void) { - panic("A kext referenced an unresolved weak symbol\n"); + panic("A kext referenced an unresolved weak symbol\n"); } const void *gOSKextUnresolved = (const void *)&kext_weak_symbol_referenced; @@ -396,28 +400,31 @@ const void *gOSKextUnresolved = (const void *)&kext_weak_symbol_referenced; /********************************************************************* * Called from startup.c. *********************************************************************/ -void OSKextRemoveKextBootstrap(void) +void +OSKextRemoveKextBootstrap(void) { - OSKext::removeKextBootstrap(); - return; + OSKext::removeKextBootstrap(); + return; } #if CONFIG_DTRACE /********************************************************************* *********************************************************************/ -void OSKextRegisterKextsWithDTrace(void) +void +OSKextRegisterKextsWithDTrace(void) { - OSKext::registerKextsWithDTrace(); - return; + OSKext::registerKextsWithDTrace(); + return; } #endif /* CONFIG_DTRACE */ /********************************************************************* *********************************************************************/ -void kext_dump_panic_lists(int (*printf_func)(const char * fmt, ...)) +void +kext_dump_panic_lists(int (*printf_func)(const char * fmt, ...)) { - OSKext::printKextPanicLists(printf_func); - return; + OSKext::printKextPanicLists(printf_func); + return; } #if PRAGMA_MARK @@ -439,11 +446,11 @@ void kext_dump_panic_lists(int (*printf_func)(const char * fmt, ...)) void kmod_panic_dump(vm_offset_t * addr, unsigned int cnt) { - extern int paniclog_append_noflush(const char *format, ...) __printflike(1,2); + extern int paniclog_append_noflush(const char *format, ...) __printflike(1, 2); - OSKext::printKextsInBacktrace(addr, cnt, &paniclog_append_noflush, 0); + OSKext::printKextsInBacktrace(addr, cnt, &paniclog_append_noflush, 0); - return; + return; } /********************************************************************/ @@ -451,19 +458,21 @@ void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide); void kmod_dump_log( - vm_offset_t * addr, - unsigned int cnt, - boolean_t doUnslide) + vm_offset_t * addr, + unsigned int cnt, + boolean_t doUnslide) { - uint32_t flags = OSKext::kPrintKextsLock; - if (doUnslide) flags |= OSKext::kPrintKextsUnslide; - OSKext::printKextsInBacktrace(addr, cnt, &printf, flags); + uint32_t flags = OSKext::kPrintKextsLock; + if (doUnslide) { + flags |= OSKext::kPrintKextsUnslide; + } + OSKext::printKextsInBacktrace(addr, cnt, &printf, flags); } void * OSKextKextForAddress(const void *addr) { - return OSKext::kextForAddress(addr); + return OSKext::kextForAddress(addr); } @@ -476,10 +485,9 @@ OSKextKextForAddress(const void *addr) #pragma mark Loaded Kext Summary #endif -void +void OSKextLoadedKextSummariesUpdated(void) { - // Do nothing. + // Do nothing. } - }; diff --git a/libkern/OSKextVersion.c b/libkern/OSKextVersion.c index 236984072..cbdef89e2 100644 --- a/libkern/OSKextVersion.c +++ b/libkern/OSKextVersion.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL @@ -53,108 +53,117 @@ typedef enum { - kOSKextVersionStageInvalid = 0, - kOSKextVersionStageDevelopment = 1, - kOSKextVersionStageAlpha = 3, - kOSKextVersionStageBeta = 5, - kOSKextVersionStageCandidate = 7, - kOSKextVersionStageRelease = 9, + kOSKextVersionStageInvalid = 0, + kOSKextVersionStageDevelopment = 1, + kOSKextVersionStageAlpha = 3, + kOSKextVersionStageBeta = 5, + kOSKextVersionStageCandidate = 7, + kOSKextVersionStageRelease = 9, } OSKextVersionStage; /********************************************************************* *********************************************************************/ -static int __vers_isdigit(char c) { - return (c == '0' || - c == '1' || c == '2' || c == '3' || - c == '4' || c == '5' || c == '6' || - c == '7' || c == '8' || c == '9'); +static int +__vers_isdigit(char c) +{ + return c == '0' || + c == '1' || c == '2' || c == '3' || + c == '4' || c == '5' || c == '6' || + c == '7' || c == '8' || c == '9'; } /********************************************************************* *********************************************************************/ -static int __vers_isspace(char c) { - return (c == ' ' || - c == '\t' || - c == '\r' || - c == '\n'); +static int +__vers_isspace(char c) +{ + return c == ' ' || + c == '\t' || + c == '\r' || + c == '\n'; } /********************************************************************* *********************************************************************/ static int -__vers_digit_for_char(char c) { +__vers_digit_for_char(char c) +{ switch (c) { - case '0': return 0; - case '1': return 1; - case '2': return 2; - case '3': return 3; - case '4': return 4; - case '5': return 5; - case '6': return 6; - case '7': return 7; - case '8': return 8; - case '9': return 9; - default: return -1; + case '0': return 0; + case '1': return 1; + case '2': return 2; + case '3': return 3; + case '4': return 4; + case '5': return 5; + case '6': return 6; + case '7': return 7; + case '8': return 8; + case '9': return 9; + default: return -1; } } /********************************************************************* *********************************************************************/ -static int __VERS_isreleasestate(char c) { - return (c == 'd' || c == 'a' || c == 'b' || c == 'f'); +static int +__VERS_isreleasestate(char c) +{ + return c == 'd' || c == 'a' || c == 'b' || c == 'f'; } /********************************************************************* *********************************************************************/ -static OSKextVersionStage __OSKextVersionStageForString(const char ** string_p) { - const char * string; - - if (!string_p || !*string_p) { - return kOSKextVersionStageInvalid; - } - - string = *string_p; - - if (__vers_isspace(string[0]) || string[0] == '\0') { - return kOSKextVersionStageRelease; - } else { - switch (string[0]) { - case 'd': - if (__vers_isdigit(string[1])) { - *string_p = &string[1]; - return kOSKextVersionStageDevelopment; - } - break; - case 'a': - if (__vers_isdigit(string[1])) { - *string_p = &string[1]; - return kOSKextVersionStageAlpha; - } - break; - case 'b': - if (__vers_isdigit(string[1])) { - *string_p = &string[1]; - return kOSKextVersionStageBeta; - } - break; - case 'f': - if (__vers_isdigit(string[1])) { - *string_p = &string[1]; - return kOSKextVersionStageCandidate; - } else if (string[1] == 'c' && __vers_isdigit(string[2])) { - *string_p = &string[2]; - return kOSKextVersionStageCandidate; - } else { - return kOSKextVersionStageInvalid; - } - default: - return kOSKextVersionStageInvalid; - } - } - - return kOSKextVersionStageInvalid; +static OSKextVersionStage +__OSKextVersionStageForString(const char ** string_p) +{ + const char * string; + + if (!string_p || !*string_p) { + return kOSKextVersionStageInvalid; + } + + string = *string_p; + + if (__vers_isspace(string[0]) || string[0] == '\0') { + return kOSKextVersionStageRelease; + } else { + switch (string[0]) { + case 'd': + if (__vers_isdigit(string[1])) { + *string_p = &string[1]; + return kOSKextVersionStageDevelopment; + } + break; + case 'a': + if (__vers_isdigit(string[1])) { + *string_p = &string[1]; + return kOSKextVersionStageAlpha; + } + break; + case 'b': + if (__vers_isdigit(string[1])) { + *string_p = &string[1]; + return kOSKextVersionStageBeta; + } + break; + case 'f': + if (__vers_isdigit(string[1])) { + *string_p = &string[1]; + return kOSKextVersionStageCandidate; + } else if (string[1] == 'c' && __vers_isdigit(string[2])) { + *string_p = &string[2]; + return kOSKextVersionStageCandidate; + } else { + return kOSKextVersionStageInvalid; + } + default: + return kOSKextVersionStageInvalid; + } + } + + return kOSKextVersionStageInvalid; } /********************************************************************* @@ -163,333 +172,334 @@ static const char * __OSKextVersionStringForStage(OSKextVersionStage stage) { switch (stage) { - case kOSKextVersionStageInvalid: return NULL; - case kOSKextVersionStageDevelopment: return "d"; - case kOSKextVersionStageAlpha: return "a"; - case kOSKextVersionStageBeta: return "b"; - case kOSKextVersionStageCandidate: return "f"; - case kOSKextVersionStageRelease: return ""; + case kOSKextVersionStageInvalid: return NULL; + case kOSKextVersionStageDevelopment: return "d"; + case kOSKextVersionStageAlpha: return "a"; + case kOSKextVersionStageBeta: return "b"; + case kOSKextVersionStageCandidate: return "f"; + case kOSKextVersionStageRelease: return ""; } } /********************************************************************* *********************************************************************/ -OSKextVersion OSKextParseVersionString(const char * versionString) +OSKextVersion +OSKextParseVersionString(const char * versionString) { - OSKextVersion result = -1; - int vers_digit = -1; - int num_digits_scanned = 0; - OSKextVersion vers_major = 0; - OSKextVersion vers_minor = 0; - OSKextVersion vers_revision = 0; - OSKextVersion vers_stage = 0; - OSKextVersion vers_stage_level = 0; - const char * current_char_p; - - if (!versionString || *versionString == '\0') { - return -1; - } - - current_char_p = (const char *)&versionString[0]; - - /***** - * Check for an initial digit of the major release number. - */ - vers_major = __vers_digit_for_char(*current_char_p); - if (vers_major < 0) { - return -1; - } - - current_char_p++; - num_digits_scanned = 1; - - /* Complete scan for major version number. Legal characters are - * any digit, period, any buildstage letter. - */ - while (num_digits_scanned < VERS_MAJOR_DIGITS) { - if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { - vers_stage = kOSKextVersionStageRelease; - goto finish; - } else if (__vers_isdigit(*current_char_p)) { - vers_digit = __vers_digit_for_char(*current_char_p); - if (vers_digit < 0) { - return -1; - } - vers_major = (vers_major) * 10 + vers_digit; - current_char_p++; - num_digits_scanned++; - } else if (__VERS_isreleasestate(*current_char_p)) { - goto release_state; - } else if (*current_char_p == '.') { - current_char_p++; - goto minor_version; - } else { - return -1; - } - } - - /* Check for too many digits. - */ - if (num_digits_scanned == VERS_MAJOR_DIGITS) { - if (*current_char_p == '.') { - current_char_p++; - } else if (__vers_isdigit(*current_char_p)) { - return -1; - } - } + OSKextVersion result = -1; + int vers_digit = -1; + int num_digits_scanned = 0; + OSKextVersion vers_major = 0; + OSKextVersion vers_minor = 0; + OSKextVersion vers_revision = 0; + OSKextVersion vers_stage = 0; + OSKextVersion vers_stage_level = 0; + const char * current_char_p; + + if (!versionString || *versionString == '\0') { + return -1; + } + + current_char_p = (const char *)&versionString[0]; + + /***** + * Check for an initial digit of the major release number. + */ + vers_major = __vers_digit_for_char(*current_char_p); + if (vers_major < 0) { + return -1; + } + + current_char_p++; + num_digits_scanned = 1; + + /* Complete scan for major version number. Legal characters are + * any digit, period, any buildstage letter. + */ + while (num_digits_scanned < VERS_MAJOR_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = kOSKextVersionStageRelease; + goto finish; + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_major = (vers_major) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else if (__VERS_isreleasestate(*current_char_p)) { + goto release_state; + } else if (*current_char_p == '.') { + current_char_p++; + goto minor_version; + } else { + return -1; + } + } + + /* Check for too many digits. + */ + if (num_digits_scanned == VERS_MAJOR_DIGITS) { + if (*current_char_p == '.') { + current_char_p++; + } else if (__vers_isdigit(*current_char_p)) { + return -1; + } + } minor_version: - num_digits_scanned = 0; - - /* Scan for minor version number. Legal characters are - * any digit, period, any buildstage letter. - */ - while (num_digits_scanned < VERS_MINOR_DIGITS) { - if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { - vers_stage = kOSKextVersionStageRelease; - goto finish; - } else if (__vers_isdigit(*current_char_p)) { - vers_digit = __vers_digit_for_char(*current_char_p); - if (vers_digit < 0) { - return -1; - } - vers_minor = (vers_minor) * 10 + vers_digit; - current_char_p++; - num_digits_scanned++; - } else if (__VERS_isreleasestate(*current_char_p)) { - goto release_state; - } else if (*current_char_p == '.') { - current_char_p++; - goto revision; - } else { - return -1; - } - } - - /* Check for too many digits. - */ - if (num_digits_scanned == VERS_MINOR_DIGITS) { - if (*current_char_p == '.') { - current_char_p++; - } else if (__vers_isdigit(*current_char_p)) { - return -1; - } - } + num_digits_scanned = 0; + + /* Scan for minor version number. Legal characters are + * any digit, period, any buildstage letter. + */ + while (num_digits_scanned < VERS_MINOR_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = kOSKextVersionStageRelease; + goto finish; + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_minor = (vers_minor) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else if (__VERS_isreleasestate(*current_char_p)) { + goto release_state; + } else if (*current_char_p == '.') { + current_char_p++; + goto revision; + } else { + return -1; + } + } + + /* Check for too many digits. + */ + if (num_digits_scanned == VERS_MINOR_DIGITS) { + if (*current_char_p == '.') { + current_char_p++; + } else if (__vers_isdigit(*current_char_p)) { + return -1; + } + } revision: - num_digits_scanned = 0; - - /* Scan for revision version number. Legal characters are - * any digit, any buildstage letter (NOT PERIOD). - */ - while (num_digits_scanned < VERS_REVISION_DIGITS) { - if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { - vers_stage = kOSKextVersionStageRelease; - goto finish; - } else if (__vers_isdigit(*current_char_p)) { - vers_digit = __vers_digit_for_char(*current_char_p); - if (vers_digit < 0) { - return -1; - } - vers_revision = (vers_revision) * 10 + vers_digit; - current_char_p++; - num_digits_scanned++; - } else if (__VERS_isreleasestate(*current_char_p)) { - goto release_state; - } else { - return -1; - } - } - - /* Check for too many digits. - */ - if (num_digits_scanned == VERS_REVISION_DIGITS) { - if (*current_char_p == '.') { - current_char_p++; - } else if (__vers_isdigit(*current_char_p)) { - return -1; - } - } + num_digits_scanned = 0; + + /* Scan for revision version number. Legal characters are + * any digit, any buildstage letter (NOT PERIOD). + */ + while (num_digits_scanned < VERS_REVISION_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = kOSKextVersionStageRelease; + goto finish; + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_revision = (vers_revision) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else if (__VERS_isreleasestate(*current_char_p)) { + goto release_state; + } else { + return -1; + } + } + + /* Check for too many digits. + */ + if (num_digits_scanned == VERS_REVISION_DIGITS) { + if (*current_char_p == '.') { + current_char_p++; + } else if (__vers_isdigit(*current_char_p)) { + return -1; + } + } release_state: - /***** - * Check for the release state. - */ - if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { - vers_stage = kOSKextVersionStageRelease; - goto finish; - } else { - vers_stage = __OSKextVersionStageForString(¤t_char_p); - if (vers_stage == kOSKextVersionStageInvalid) { - return -1; - } - } + /***** + * Check for the release state. + */ + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + vers_stage = kOSKextVersionStageRelease; + goto finish; + } else { + vers_stage = __OSKextVersionStageForString(¤t_char_p); + if (vers_stage == kOSKextVersionStageInvalid) { + return -1; + } + } // stage level - num_digits_scanned = 0; - - /* Scan for stage level number. Legal characters are - * any digit only. - */ - while (num_digits_scanned < VERS_STAGE_LEVEL_DIGITS) { - if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { - if (num_digits_scanned) { - goto finish; - } else { - return -1; - } - } else if (__vers_isdigit(*current_char_p)) { - vers_digit = __vers_digit_for_char(*current_char_p); - if (vers_digit < 0) { - return -1; - } - vers_stage_level = (vers_stage_level) * 10 + vers_digit; - current_char_p++; - num_digits_scanned++; - } else { - return -1; - } - } - - /* Check for too many digits. - */ - if ((num_digits_scanned == VERS_STAGE_LEVEL_DIGITS) && - ! (__vers_isspace(*current_char_p) || (*current_char_p == '\0'))) { - - return -1; - } - - if (vers_stage_level > VERS_STAGE_LEVEL_MAX) { - return -1; - } + num_digits_scanned = 0; + + /* Scan for stage level number. Legal characters are + * any digit only. + */ + while (num_digits_scanned < VERS_STAGE_LEVEL_DIGITS) { + if (__vers_isspace(*current_char_p) || *current_char_p == '\0') { + if (num_digits_scanned) { + goto finish; + } else { + return -1; + } + } else if (__vers_isdigit(*current_char_p)) { + vers_digit = __vers_digit_for_char(*current_char_p); + if (vers_digit < 0) { + return -1; + } + vers_stage_level = (vers_stage_level) * 10 + vers_digit; + current_char_p++; + num_digits_scanned++; + } else { + return -1; + } + } + + /* Check for too many digits. + */ + if ((num_digits_scanned == VERS_STAGE_LEVEL_DIGITS) && + !(__vers_isspace(*current_char_p) || (*current_char_p == '\0'))) { + return -1; + } + + if (vers_stage_level > VERS_STAGE_LEVEL_MAX) { + return -1; + } finish: - if (vers_stage == kOSKextVersionStageCandidate && vers_stage_level == 0) { - return -1; - } + if (vers_stage == kOSKextVersionStageCandidate && vers_stage_level == 0) { + return -1; + } - result = (vers_major * VERS_MAJOR_MULT) + - (vers_minor * VERS_MINOR_MULT) + - (vers_revision * VERS_REVISION_MULT) + - (vers_stage * VERS_STAGE_MULT) + - vers_stage_level; + result = (vers_major * VERS_MAJOR_MULT) + + (vers_minor * VERS_MINOR_MULT) + + (vers_revision * VERS_REVISION_MULT) + + (vers_stage * VERS_STAGE_MULT) + + vers_stage_level; - return result; + return result; } /********************************************************************* * This function must be safe to call in panic context. *********************************************************************/ -Boolean OSKextVersionGetString( - OSKextVersion aVersion, - char * buffer, - uint32_t bufferLength) +Boolean +OSKextVersionGetString( + OSKextVersion aVersion, + char * buffer, + uint32_t bufferLength) { - int cpos = 0; - OSKextVersion vers_major = 0; - OSKextVersion vers_minor = 0; - OSKextVersion vers_revision = 0; - OSKextVersion vers_stage = 0; - OSKextVersion vers_stage_level = 0; - const char * stage_string = NULL; // don't free - - /* No buffer or length less than longest possible vers string, - * return 0. - */ - if (!buffer || bufferLength < kOSKextVersionMaxLength) { - return FALSE; - } - - bzero(buffer, bufferLength * sizeof(char)); - - if (aVersion < 0) { - strlcpy(buffer, "(invalid)", bufferLength); - return TRUE; - } - if (aVersion == 0) { - strlcpy(buffer, "(missing)", bufferLength); - return TRUE; - } - - vers_major = aVersion / VERS_MAJOR_MULT; - if (vers_major > VERS_MAJOR_MAX) { - strlcpy(buffer, "(invalid)", bufferLength); - return TRUE; - } - - vers_minor = aVersion - (vers_major * VERS_MAJOR_MULT); - vers_minor /= VERS_MINOR_MULT; - - vers_revision = aVersion - - ( (vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) ); - vers_revision /= VERS_REVISION_MULT; - - vers_stage = aVersion - - ( (vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) + - (vers_revision * VERS_REVISION_MULT)); - vers_stage /= VERS_STAGE_MULT; - - vers_stage_level = aVersion - - ( (vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) + - (vers_revision * VERS_REVISION_MULT) + (vers_stage * VERS_STAGE_MULT)); - if (vers_stage_level > VERS_STAGE_LEVEL_MAX) { - strlcpy(buffer, "(invalid)", bufferLength); - return TRUE; - } - - cpos = snprintf(buffer, bufferLength, "%u", (uint32_t)vers_major); - - /* Always include the minor version; it just looks weird without. - */ - buffer[cpos] = '.'; - cpos++; - cpos += snprintf(buffer+cpos, bufferLength - cpos, "%u", (uint32_t)vers_minor); - - /* The revision is displayed only if nonzero. - */ - if (vers_revision) { - buffer[cpos] = '.'; - cpos++; - cpos += snprintf(buffer+cpos, bufferLength - cpos, "%u", - (uint32_t)vers_revision); - } - - stage_string = __OSKextVersionStringForStage(vers_stage); - if (!stage_string) { - strlcpy(buffer, "(invalid)", bufferLength); - return TRUE; - } - if (stage_string[0]) { - strlcat(buffer, stage_string, bufferLength); - cpos += strlen(stage_string); - } - - if (vers_stage < kOSKextVersionStageRelease) { - snprintf(buffer+cpos, bufferLength - cpos, "%u", (uint32_t)vers_stage_level); - } - - return TRUE; + int cpos = 0; + OSKextVersion vers_major = 0; + OSKextVersion vers_minor = 0; + OSKextVersion vers_revision = 0; + OSKextVersion vers_stage = 0; + OSKextVersion vers_stage_level = 0; + const char * stage_string = NULL;// don't free + + /* No buffer or length less than longest possible vers string, + * return 0. + */ + if (!buffer || bufferLength < kOSKextVersionMaxLength) { + return FALSE; + } + + bzero(buffer, bufferLength * sizeof(char)); + + if (aVersion < 0) { + strlcpy(buffer, "(invalid)", bufferLength); + return TRUE; + } + if (aVersion == 0) { + strlcpy(buffer, "(missing)", bufferLength); + return TRUE; + } + + vers_major = aVersion / VERS_MAJOR_MULT; + if (vers_major > VERS_MAJOR_MAX) { + strlcpy(buffer, "(invalid)", bufferLength); + return TRUE; + } + + vers_minor = aVersion - (vers_major * VERS_MAJOR_MULT); + vers_minor /= VERS_MINOR_MULT; + + vers_revision = aVersion - + ((vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT)); + vers_revision /= VERS_REVISION_MULT; + + vers_stage = aVersion - + ((vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) + + (vers_revision * VERS_REVISION_MULT)); + vers_stage /= VERS_STAGE_MULT; + + vers_stage_level = aVersion - + ((vers_major * VERS_MAJOR_MULT) + (vers_minor * VERS_MINOR_MULT) + + (vers_revision * VERS_REVISION_MULT) + (vers_stage * VERS_STAGE_MULT)); + if (vers_stage_level > VERS_STAGE_LEVEL_MAX) { + strlcpy(buffer, "(invalid)", bufferLength); + return TRUE; + } + + cpos = snprintf(buffer, bufferLength, "%u", (uint32_t)vers_major); + + /* Always include the minor version; it just looks weird without. + */ + buffer[cpos] = '.'; + cpos++; + cpos += snprintf(buffer + cpos, bufferLength - cpos, "%u", (uint32_t)vers_minor); + + /* The revision is displayed only if nonzero. + */ + if (vers_revision) { + buffer[cpos] = '.'; + cpos++; + cpos += snprintf(buffer + cpos, bufferLength - cpos, "%u", + (uint32_t)vers_revision); + } + + stage_string = __OSKextVersionStringForStage(vers_stage); + if (!stage_string) { + strlcpy(buffer, "(invalid)", bufferLength); + return TRUE; + } + if (stage_string[0]) { + strlcat(buffer, stage_string, bufferLength); + cpos += strlen(stage_string); + } + + if (vers_stage < kOSKextVersionStageRelease) { + snprintf(buffer + cpos, bufferLength - cpos, "%u", (uint32_t)vers_stage_level); + } + + return TRUE; } /********************************************************************* *********************************************************************/ #ifndef KERNEL -OSKextVersion OSKextParseVersionCFString(CFStringRef versionString) +OSKextVersion +OSKextParseVersionCFString(CFStringRef versionString) { - OSKextVersion result = -1; - char versBuffer[kOSKextVersionMaxLength]; - - if (CFStringGetCString(versionString, versBuffer, - sizeof(versBuffer), kCFStringEncodingASCII)) { - - result = OSKextParseVersionString(versBuffer); - } - return result; + OSKextVersion result = -1; + char versBuffer[kOSKextVersionMaxLength]; + + if (CFStringGetCString(versionString, versBuffer, + sizeof(versBuffer), kCFStringEncodingASCII)) { + result = OSKextParseVersionString(versBuffer); + } + return result; } #endif diff --git a/libkern/c++/OSArray.cpp b/libkern/c++/OSArray.cpp index 5089da70f..92558ac4c 100644 --- a/libkern/c++/OSArray.cpp +++ b/libkern/c++/OSArray.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOArray.m created by rsulack on Fri 12-Sep-1997 */ @@ -51,437 +51,512 @@ OSMetaClassDefineReservedUnused(OSArray, 7); #define EXT_CAST(obj) \ reinterpret_cast(const_cast(obj)) -bool OSArray::initWithCapacity(unsigned int inCapacity) +bool +OSArray::initWithCapacity(unsigned int inCapacity) { - unsigned int size; + unsigned int size; - if (!super::init()) - return false; + if (!super::init()) { + return false; + } - // integer overflow check - if (inCapacity > (UINT_MAX / sizeof(const OSMetaClassBase*))) - return false; + // integer overflow check + if (inCapacity > (UINT_MAX / sizeof(const OSMetaClassBase*))) { + return false; + } - size = sizeof(const OSMetaClassBase *) * inCapacity; - array = (const OSMetaClassBase **) kalloc_container(size); - if (!array) - return false; + size = sizeof(const OSMetaClassBase *) * inCapacity; + array = (const OSMetaClassBase **) kalloc_container(size); + if (!array) { + return false; + } - count = 0; - capacity = inCapacity; - capacityIncrement = (inCapacity)? inCapacity : 16; + count = 0; + capacity = inCapacity; + capacityIncrement = (inCapacity)? inCapacity : 16; - bzero(array, size); - OSCONTAINER_ACCUMSIZE(size); + bzero(array, size); + OSCONTAINER_ACCUMSIZE(size); - return true; + return true; } -bool OSArray::initWithObjects(const OSObject *objects[], - unsigned int theCount, - unsigned int theCapacity) +bool +OSArray::initWithObjects(const OSObject *objects[], + unsigned int theCount, + unsigned int theCapacity) { - unsigned int initCapacity; - - if (!theCapacity) - initCapacity = theCount; - else if (theCount > theCapacity) - return false; - else - initCapacity = theCapacity; + unsigned int initCapacity; + + if (!theCapacity) { + initCapacity = theCount; + } else if (theCount > theCapacity) { + return false; + } else { + initCapacity = theCapacity; + } - if (!objects || !initWithCapacity(initCapacity)) - return false; + if (!objects || !initWithCapacity(initCapacity)) { + return false; + } - for ( unsigned int i = 0; i < theCount; i++ ) { - const OSMetaClassBase *newObject = *objects++; + for (unsigned int i = 0; i < theCount; i++) { + const OSMetaClassBase *newObject = *objects++; - if (!newObject) - return false; + if (!newObject) { + return false; + } - array[count++] = newObject; - newObject->taggedRetain(OSTypeID(OSCollection)); - } + array[count++] = newObject; + newObject->taggedRetain(OSTypeID(OSCollection)); + } - return true; + return true; } -bool OSArray::initWithArray(const OSArray *anArray, - unsigned int theCapacity) +bool +OSArray::initWithArray(const OSArray *anArray, + unsigned int theCapacity) { - if ( !anArray ) - return false; + if (!anArray) { + return false; + } - return initWithObjects((const OSObject **) anArray->array, - anArray->count, theCapacity); + return initWithObjects((const OSObject **) anArray->array, + anArray->count, theCapacity); } -OSArray *OSArray::withCapacity(unsigned int capacity) +OSArray * +OSArray::withCapacity(unsigned int capacity) { - OSArray *me = new OSArray; + OSArray *me = new OSArray; - if (me && !me->initWithCapacity(capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithCapacity(capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSArray *OSArray::withObjects(const OSObject *objects[], - unsigned int count, - unsigned int capacity) +OSArray * +OSArray::withObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity) { - OSArray *me = new OSArray; + OSArray *me = new OSArray; - if (me && !me->initWithObjects(objects, count, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithObjects(objects, count, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSArray *OSArray::withArray(const OSArray *array, - unsigned int capacity) +OSArray * +OSArray::withArray(const OSArray *array, + unsigned int capacity) { - OSArray *me = new OSArray; + OSArray *me = new OSArray; - if (me && !me->initWithArray(array, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithArray(array, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -void OSArray::free() +void +OSArray::free() { - // Clear immutability - assumes the container is doing the right thing - (void) super::setOptions(0, kImmutable); + // Clear immutability - assumes the container is doing the right thing + (void) super::setOptions(0, kImmutable); - flushCollection(); + flushCollection(); - if (array) { - kfree(array, sizeof(const OSMetaClassBase *) * capacity); - OSCONTAINER_ACCUMSIZE( -(sizeof(const OSMetaClassBase *) * capacity) ); - } + if (array) { + kfree(array, sizeof(const OSMetaClassBase *) * capacity); + OSCONTAINER_ACCUMSIZE( -(sizeof(const OSMetaClassBase *) * capacity)); + } - super::free(); + super::free(); } -unsigned int OSArray::getCount() const { return count; } -unsigned int OSArray::getCapacity() const { return capacity; } -unsigned int OSArray::getCapacityIncrement() const { return capacityIncrement; } -unsigned int OSArray::setCapacityIncrement(unsigned int increment) +unsigned int +OSArray::getCount() const { - capacityIncrement = (increment)? increment : 16; + return count; +} +unsigned int +OSArray::getCapacity() const +{ + return capacity; +} +unsigned int +OSArray::getCapacityIncrement() const +{ + return capacityIncrement; +} +unsigned int +OSArray::setCapacityIncrement(unsigned int increment) +{ + capacityIncrement = (increment)? increment : 16; - return capacityIncrement; + return capacityIncrement; } -unsigned int OSArray::ensureCapacity(unsigned int newCapacity) +unsigned int +OSArray::ensureCapacity(unsigned int newCapacity) { - const OSMetaClassBase **newArray; - unsigned int finalCapacity; - vm_size_t oldSize, newSize; + const OSMetaClassBase **newArray; + unsigned int finalCapacity; + vm_size_t oldSize, newSize; - if (newCapacity <= capacity) - return capacity; + if (newCapacity <= capacity) { + return capacity; + } - // round up - finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) - * capacityIncrement; + // round up + finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; - // integer overflow check - if ((finalCapacity < newCapacity) || (finalCapacity > (UINT_MAX / sizeof(const OSMetaClassBase*)))) - return capacity; + // integer overflow check + if ((finalCapacity < newCapacity) || (finalCapacity > (UINT_MAX / sizeof(const OSMetaClassBase*)))) { + return capacity; + } - newSize = sizeof(const OSMetaClassBase *) * finalCapacity; + newSize = sizeof(const OSMetaClassBase *) * finalCapacity; - newArray = (const OSMetaClassBase **) kallocp_container(&newSize); - if (newArray) { - // use all of the actual allocation size - finalCapacity = newSize / sizeof(const OSMetaClassBase *); + newArray = (const OSMetaClassBase **) kallocp_container(&newSize); + if (newArray) { + // use all of the actual allocation size + finalCapacity = newSize / sizeof(const OSMetaClassBase *); - oldSize = sizeof(const OSMetaClassBase *) * capacity; + oldSize = sizeof(const OSMetaClassBase *) * capacity; - OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); + OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); - bcopy(array, newArray, oldSize); - bzero(&newArray[capacity], newSize - oldSize); - kfree(array, oldSize); - array = newArray; - capacity = finalCapacity; - } + bcopy(array, newArray, oldSize); + bzero(&newArray[capacity], newSize - oldSize); + kfree(array, oldSize); + array = newArray; + capacity = finalCapacity; + } - return capacity; + return capacity; } -void OSArray::flushCollection() +void +OSArray::flushCollection() { - unsigned int i; + unsigned int i; - haveUpdated(); - for (i = 0; i < count; i++) { - array[i]->taggedRelease(OSTypeID(OSCollection)); - } - count = 0; + haveUpdated(); + for (i = 0; i < count; i++) { + array[i]->taggedRelease(OSTypeID(OSCollection)); + } + count = 0; } -bool OSArray::setObject(const OSMetaClassBase *anObject) +bool +OSArray::setObject(const OSMetaClassBase *anObject) { - return setObject(count, anObject); + return setObject(count, anObject); } -bool OSArray::setObject(unsigned int index, const OSMetaClassBase *anObject) +bool +OSArray::setObject(unsigned int index, const OSMetaClassBase *anObject) { - unsigned int i; - unsigned int newCount = count + 1; - - if ((index > count) || !anObject) - return false; - - // do we need more space? - if (newCount > capacity && newCount > ensureCapacity(newCount)) - return false; - - haveUpdated(); - if (index != count) { - for (i = count; i > index; i--) - array[i] = array[i-1]; - } - array[index] = anObject; - anObject->taggedRetain(OSTypeID(OSCollection)); - count++; - - return true; + unsigned int i; + unsigned int newCount = count + 1; + + if ((index > count) || !anObject) { + return false; + } + + // do we need more space? + if (newCount > capacity && newCount > ensureCapacity(newCount)) { + return false; + } + + haveUpdated(); + if (index != count) { + for (i = count; i > index; i--) { + array[i] = array[i - 1]; + } + } + array[index] = anObject; + anObject->taggedRetain(OSTypeID(OSCollection)); + count++; + + return true; } -bool OSArray::merge(const OSArray * otherArray) +bool +OSArray::merge(const OSArray * otherArray) { - unsigned int otherCount = otherArray->getCount(); - unsigned int newCount = count + otherCount; + unsigned int otherCount = otherArray->getCount(); + unsigned int newCount = count + otherCount; - if (!otherCount) - return true; + if (!otherCount) { + return true; + } - if (newCount < count) - return false; + if (newCount < count) { + return false; + } - // do we need more space? - if (newCount > capacity && newCount > ensureCapacity(newCount)) - return false; + // do we need more space? + if (newCount > capacity && newCount > ensureCapacity(newCount)) { + return false; + } - haveUpdated(); - for (unsigned int i = 0; i < otherCount; i++) { - const OSMetaClassBase *newObject = otherArray->getObject(i); + haveUpdated(); + for (unsigned int i = 0; i < otherCount; i++) { + const OSMetaClassBase *newObject = otherArray->getObject(i); - array[count++] = newObject; - newObject->taggedRetain(OSTypeID(OSCollection)); - } + array[count++] = newObject; + newObject->taggedRetain(OSTypeID(OSCollection)); + } - return true; + return true; } -void OSArray:: +void +OSArray:: replaceObject(unsigned int index, const OSMetaClassBase *anObject) { - const OSMetaClassBase *oldObject; + const OSMetaClassBase *oldObject; - if ((index >= count) || !anObject) - return; + if ((index >= count) || !anObject) { + return; + } - haveUpdated(); - oldObject = array[index]; - array[index] = anObject; - anObject->taggedRetain(OSTypeID(OSCollection)); + haveUpdated(); + oldObject = array[index]; + array[index] = anObject; + anObject->taggedRetain(OSTypeID(OSCollection)); - oldObject->taggedRelease(OSTypeID(OSCollection)); + oldObject->taggedRelease(OSTypeID(OSCollection)); } -void OSArray::removeObject(unsigned int index) +void +OSArray::removeObject(unsigned int index) { - unsigned int i; - const OSMetaClassBase *oldObject; + unsigned int i; + const OSMetaClassBase *oldObject; - if (index >= count) - return; + if (index >= count) { + return; + } - haveUpdated(); - oldObject = array[index]; + haveUpdated(); + oldObject = array[index]; - count--; - for (i = index; i < count; i++) - array[i] = array[i+1]; + count--; + for (i = index; i < count; i++) { + array[i] = array[i + 1]; + } - oldObject->taggedRelease(OSTypeID(OSCollection)); + oldObject->taggedRelease(OSTypeID(OSCollection)); } -bool OSArray::isEqualTo(const OSArray *anArray) const +bool +OSArray::isEqualTo(const OSArray *anArray) const { - unsigned int i; - - if ( this == anArray ) - return true; - - if ( count != anArray->getCount() ) - return false; - - for ( i = 0; i < count; i++ ) { - if ( !array[i]->isEqualTo(anArray->getObject(i)) ) - return false; - } - - return true; + unsigned int i; + + if (this == anArray) { + return true; + } + + if (count != anArray->getCount()) { + return false; + } + + for (i = 0; i < count; i++) { + if (!array[i]->isEqualTo(anArray->getObject(i))) { + return false; + } + } + + return true; } -bool OSArray::isEqualTo(const OSMetaClassBase *anObject) const +bool +OSArray::isEqualTo(const OSMetaClassBase *anObject) const { - OSArray *otherArray; + OSArray *otherArray; - otherArray = OSDynamicCast(OSArray, anObject); - if ( otherArray ) - return isEqualTo(otherArray); - else - return false; + otherArray = OSDynamicCast(OSArray, anObject); + if (otherArray) { + return isEqualTo(otherArray); + } else { + return false; + } } -OSObject *OSArray::getObject(unsigned int index) const +OSObject * +OSArray::getObject(unsigned int index) const { - if (index >= count) - return 0; - else - return (OSObject *) (const_cast(array[index])); + if (index >= count) { + return 0; + } else { + return (OSObject *) (const_cast(array[index])); + } } -OSObject *OSArray::getLastObject() const +OSObject * +OSArray::getLastObject() const { - if (count == 0) - return 0; - else - return ( OSObject *) (const_cast(array[count - 1])); + if (count == 0) { + return 0; + } else { + return (OSObject *) (const_cast(array[count - 1])); + } } -unsigned int OSArray::getNextIndexOfObject(const OSMetaClassBase * anObject, - unsigned int index) const +unsigned int +OSArray::getNextIndexOfObject(const OSMetaClassBase * anObject, + unsigned int index) const { - while ((index < count) && (array[index] != anObject)) - index++; - if (index >= count) - index = (unsigned int)-1; - return index; + while ((index < count) && (array[index] != anObject)) { + index++; + } + if (index >= count) { + index = (unsigned int)-1; + } + return index; } -unsigned int OSArray::iteratorSize() const +unsigned int +OSArray::iteratorSize() const { - return sizeof(unsigned int); + return sizeof(unsigned int); } -bool OSArray::initIterator(void *inIterator) const +bool +OSArray::initIterator(void *inIterator) const { - unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int *iteratorP = (unsigned int *) inIterator; - *iteratorP = 0; - return true; + *iteratorP = 0; + return true; } -bool OSArray::getNextObjectForIterator(void *inIterator, OSObject **ret) const +bool +OSArray::getNextObjectForIterator(void *inIterator, OSObject **ret) const { - unsigned int *iteratorP = (unsigned int *) inIterator; - unsigned int index = (*iteratorP)++; - - if (index < count) { - *ret = (OSObject *)(const_cast (array[index])); - return true; - } - else { - *ret = 0; - return false; - } + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; + + if (index < count) { + *ret = (OSObject *)(const_cast (array[index])); + return true; + } else { + *ret = 0; + return false; + } } -bool OSArray::serialize(OSSerialize *s) const +bool +OSArray::serialize(OSSerialize *s) const { - if (s->previouslySerialized(this)) return true; - - if (!s->addXMLStartTag(this, "array")) return false; + if (s->previouslySerialized(this)) { + return true; + } - for (unsigned i = 0; i < count; i++) { - if (array[i] == NULL || !array[i]->serialize(s)) return false; - } + if (!s->addXMLStartTag(this, "array")) { + return false; + } - return s->addXMLEndTag("array"); + for (unsigned i = 0; i < count; i++) { + if (array[i] == NULL || !array[i]->serialize(s)) { + return false; + } + } + + return s->addXMLEndTag("array"); } -unsigned OSArray::setOptions(unsigned options, unsigned mask, void *) +unsigned +OSArray::setOptions(unsigned options, unsigned mask, void *) { - unsigned old = super::setOptions(options, mask); - if ((old ^ options) & mask) { - - // Value changed need to recurse over all of the child collections - for ( unsigned i = 0; i < count; i++ ) { - OSCollection *coll = OSDynamicCast(OSCollection, array[i]); - if (coll) - coll->setOptions(options, mask); + unsigned old = super::setOptions(options, mask); + if ((old ^ options) & mask) { + // Value changed need to recurse over all of the child collections + for (unsigned i = 0; i < count; i++) { + OSCollection *coll = OSDynamicCast(OSCollection, array[i]); + if (coll) { + coll->setOptions(options, mask); + } + } } - } - return old; + return old; } -OSCollection * OSArray::copyCollection(OSDictionary *cycleDict) +OSCollection * +OSArray::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = 0; - OSArray *newArray = 0; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) - return 0; - } - - do { - // Check for a cycle - ret = super::copyCollection(cycleDict); - if (ret) - continue; - - newArray = OSArray::withArray(this); - if (!newArray) - continue; - - // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newArray); - - for (unsigned int i = 0; i < count; i++) { - OSCollection *coll = - OSDynamicCast(OSCollection, EXT_CAST(newArray->array[i])); - - if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); - if (!newColl) - goto abortCopy; - - newArray->replaceObject(i, newColl); - newColl->release(); - }; - }; - - ret = newArray; - newArray = 0; - - } while (false); + bool allocDict = !cycleDict; + OSCollection *ret = 0; + OSArray *newArray = 0; + + if (allocDict) { + cycleDict = OSDictionary::withCapacity(16); + if (!cycleDict) { + return 0; + } + } + + do { + // Check for a cycle + ret = super::copyCollection(cycleDict); + if (ret) { + continue; + } + + newArray = OSArray::withArray(this); + if (!newArray) { + continue; + } + + // Insert object into cycle Dictionary + cycleDict->setObject((const OSSymbol *) this, newArray); + + for (unsigned int i = 0; i < count; i++) { + OSCollection *coll = + OSDynamicCast(OSCollection, EXT_CAST(newArray->array[i])); + + if (coll) { + OSCollection *newColl = coll->copyCollection(cycleDict); + if (!newColl) { + goto abortCopy; + } + + newArray->replaceObject(i, newColl); + newColl->release(); + } + ; + } + ; + + ret = newArray; + newArray = 0; + } while (false); abortCopy: - if (newArray) - newArray->release(); + if (newArray) { + newArray->release(); + } - if (allocDict) - cycleDict->release(); + if (allocDict) { + cycleDict->release(); + } - return ret; + return ret; } - diff --git a/libkern/c++/OSBoolean.cpp b/libkern/c++/OSBoolean.cpp index 7e67b4e9f..cabe30ab7 100644 --- a/libkern/c++/OSBoolean.cpp +++ b/libkern/c++/OSBoolean.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSBoolean.cpp created by rsulack on Tue Oct 12 1999 */ @@ -50,67 +50,96 @@ static OSBoolean * gOSBooleanFalse = 0; OSBoolean * const & kOSBooleanTrue = gOSBooleanTrue; OSBoolean * const & kOSBooleanFalse = gOSBooleanFalse; -void OSBoolean::initialize() +void +OSBoolean::initialize() { - gOSBooleanTrue = new OSBoolean; - assert(gOSBooleanTrue); - - if (!gOSBooleanTrue->init()) { - gOSBooleanTrue->OSObject::free(); - assert(false); - }; - gOSBooleanTrue->value = true; - - gOSBooleanFalse = new OSBoolean; - assert(gOSBooleanFalse); - - if (!gOSBooleanFalse->init()) { - gOSBooleanFalse->OSObject::free(); - assert(false); - }; - gOSBooleanFalse->value = false; + gOSBooleanTrue = new OSBoolean; + assert(gOSBooleanTrue); + + if (!gOSBooleanTrue->init()) { + gOSBooleanTrue->OSObject::free(); + assert(false); + } + ; + gOSBooleanTrue->value = true; + + gOSBooleanFalse = new OSBoolean; + assert(gOSBooleanFalse); + + if (!gOSBooleanFalse->init()) { + gOSBooleanFalse->OSObject::free(); + assert(false); + } + ; + gOSBooleanFalse->value = false; } -void OSBoolean::free() +void +OSBoolean::free() { - /* - * An OSBoolean should never have free() called on it, since it is a shared - * object, with two non-mutable instances: kOSBooleanTrue, kOSBooleanFalse. - * There will be cases where an incorrect number of releases will cause the - * free() method to be called, however, which we must catch and ignore here. - */ - assert(false); + /* + * An OSBoolean should never have free() called on it, since it is a shared + * object, with two non-mutable instances: kOSBooleanTrue, kOSBooleanFalse. + * There will be cases where an incorrect number of releases will cause the + * free() method to be called, however, which we must catch and ignore here. + */ + assert(false); } -void OSBoolean::taggedRetain(__unused const void *tag) const { } -void OSBoolean::taggedRelease(__unused const void *tag, __unused const int when) const { } +void +OSBoolean::taggedRetain(__unused const void *tag) const +{ +} +void +OSBoolean::taggedRelease(__unused const void *tag, __unused const int when) const +{ +} -OSBoolean *OSBoolean::withBoolean(bool inValue) +OSBoolean * +OSBoolean::withBoolean(bool inValue) { - return (inValue) ? kOSBooleanTrue : kOSBooleanFalse; + return (inValue) ? kOSBooleanTrue : kOSBooleanFalse; } -bool OSBoolean::isTrue() const { return value; } -bool OSBoolean::isFalse() const { return !value; } -bool OSBoolean::getValue() const { return value; } +bool +OSBoolean::isTrue() const +{ + return value; +} +bool +OSBoolean::isFalse() const +{ + return !value; +} +bool +OSBoolean::getValue() const +{ + return value; +} -bool OSBoolean::isEqualTo(const OSBoolean *boolean) const +bool +OSBoolean::isEqualTo(const OSBoolean *boolean) const { - return (boolean == this); + return boolean == this; } -bool OSBoolean::isEqualTo(const OSMetaClassBase *obj) const +bool +OSBoolean::isEqualTo(const OSMetaClassBase *obj) const { - OSBoolean * boolean; - if ((boolean = OSDynamicCast(OSBoolean, obj))) - return isEqualTo(boolean); - else - return false; + OSBoolean * boolean; + if ((boolean = OSDynamicCast(OSBoolean, obj))) { + return isEqualTo(boolean); + } else { + return false; + } } -bool OSBoolean::serialize(OSSerialize *s) const +bool +OSBoolean::serialize(OSSerialize *s) const { - if (s->binary) return s->binarySerialize(this); + if (s->binary) { + return s->binarySerialize(this); + } - return s->addString(value ? "" : ""); + return s->addString(value ? "" : ""); } diff --git a/libkern/c++/OSCPPDebug.cpp b/libkern/c++/OSCPPDebug.cpp index 603ce7641..04f2208de 100644 --- a/libkern/c++/OSCPPDebug.cpp +++ b/libkern/c++/OSCPPDebug.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,23 +30,22 @@ __BEGIN_DECLS -void OSPrintMemory( void ) +void +OSPrintMemory( void ) { + OSMetaClass::printInstanceCounts(); - OSMetaClass::printInstanceCounts(); - - IOLog("\n" - "ivar kalloc() 0x%08x\n" - "malloc() 0x%08x\n" - "containers kalloc() 0x%08x\n" - "IOMalloc() 0x%08x\n" - "----------------------------------------\n", - debug_ivars_size, - debug_malloc_size, - debug_container_malloc_size, - debug_iomalloc_size - ); + IOLog("\n" + "ivar kalloc() 0x%08x\n" + "malloc() 0x%08x\n" + "containers kalloc() 0x%08x\n" + "IOMalloc() 0x%08x\n" + "----------------------------------------\n", + debug_ivars_size, + debug_malloc_size, + debug_container_malloc_size, + debug_iomalloc_size + ); } __END_DECLS - diff --git a/libkern/c++/OSCollection.cpp b/libkern/c++/OSCollection.cpp index 260d9b361..459824188 100644 --- a/libkern/c++/OSCollection.cpp +++ b/libkern/c++/OSCollection.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOArray.h created by rsulack on Thu 11-Sep-1997 */ @@ -48,91 +48,98 @@ OSMetaClassDefineReservedUnused(OSCollection, 5); OSMetaClassDefineReservedUnused(OSCollection, 6); OSMetaClassDefineReservedUnused(OSCollection, 7); -bool OSCollection::init() +bool +OSCollection::init() { - if (!super::init()) - return false; + if (!super::init()) { + return false; + } - updateStamp = 0; + updateStamp = 0; - return true; + return true; } -void OSCollection::haveUpdated() +void +OSCollection::haveUpdated() { - if (fOptions & kImmutable) - { - if (!(gIOKitDebug & kOSRegistryModsMode)) - { - panic("Trying to change a collection in the registry"); + if (fOptions & kImmutable) { + if (!(gIOKitDebug & kOSRegistryModsMode)) { + panic("Trying to change a collection in the registry"); + } else { + OSReportWithBacktrace("Trying to change a collection in the registry"); + } } - else - { - OSReportWithBacktrace("Trying to change a collection in the registry"); - } - } - updateStamp++; + updateStamp++; } -unsigned OSCollection::setOptions(unsigned options, unsigned mask, void *) +unsigned +OSCollection::setOptions(unsigned options, unsigned mask, void *) { - unsigned old = fOptions; + unsigned old = fOptions; - if (mask) - fOptions = (old & ~mask) | (options & mask); + if (mask) { + fOptions = (old & ~mask) | (options & mask); + } - return old; + return old; } -OSCollection * OSCollection::copyCollection(OSDictionary *cycleDict) +OSCollection * +OSCollection::copyCollection(OSDictionary *cycleDict) { - if (cycleDict) { - OSObject *obj = cycleDict->getObject((const OSSymbol *) this); - if (obj) - obj->retain(); - - return reinterpret_cast(obj); - } - else { - // If we are here it means that there is a collection subclass that - // hasn't overridden the copyCollection method. In which case just - // return a reference to ourselves. - // Hopefully this collection will not be inserted into the registry - retain(); - return this; - } + if (cycleDict) { + OSObject *obj = cycleDict->getObject((const OSSymbol *) this); + if (obj) { + obj->retain(); + } + + return reinterpret_cast(obj); + } else { + // If we are here it means that there is a collection subclass that + // hasn't overridden the copyCollection method. In which case just + // return a reference to ourselves. + // Hopefully this collection will not be inserted into the registry + retain(); + return this; + } } -bool OSCollection::iterateObjects(void * refcon, bool (*callback)(void * refcon, OSObject * object)) +bool +OSCollection::iterateObjects(void * refcon, bool (*callback)(void * refcon, OSObject * object)) { - uint64_t iteratorStore[2]; - unsigned int initialUpdateStamp; - bool done; + uint64_t iteratorStore[2]; + unsigned int initialUpdateStamp; + bool done; - assert(iteratorSize() < sizeof(iteratorStore)); + assert(iteratorSize() < sizeof(iteratorStore)); - if (!initIterator(&iteratorStore[0])) return (false); - - initialUpdateStamp = updateStamp; - done = false; - do - { - OSObject * object; - if (!getNextObjectForIterator(&iteratorStore[0], &object)) break; - done = callback(refcon, object); - } - while (!done && (initialUpdateStamp == updateStamp)); + if (!initIterator(&iteratorStore[0])) { + return false; + } - return initialUpdateStamp == updateStamp; + initialUpdateStamp = updateStamp; + done = false; + do{ + OSObject * object; + if (!getNextObjectForIterator(&iteratorStore[0], &object)) { + break; + } + done = callback(refcon, object); + }while (!done && (initialUpdateStamp == updateStamp)); + + return initialUpdateStamp == updateStamp; } -static bool OSCollectionIterateObjectsBlock(void * refcon, OSObject * object) +static bool +OSCollectionIterateObjectsBlock(void * refcon, OSObject * object) { - bool (^block)(OSObject * object) = (typeof(block)) refcon; - return (block(object)); + bool (^block)(OSObject * object) = (typeof(block))refcon; + return block(object); } -bool OSCollection::iterateObjects(bool (^block)(OSObject * object)) +bool +OSCollection::iterateObjects(bool (^block)(OSObject * object)) { - return (iterateObjects((void *) block, OSCollectionIterateObjectsBlock)); + return iterateObjects((void *) block, OSCollectionIterateObjectsBlock); } diff --git a/libkern/c++/OSCollectionIterator.cpp b/libkern/c++/OSCollectionIterator.cpp index e623b6492..93a2433e6 100644 --- a/libkern/c++/OSCollectionIterator.cpp +++ b/libkern/c++/OSCollectionIterator.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOArray.h created by rsulack on Thu 11-Sep-1997 */ @@ -36,93 +36,101 @@ OSDefineMetaClassAndStructors(OSCollectionIterator, OSIterator) -bool OSCollectionIterator::initWithCollection(const OSCollection *inColl) +bool +OSCollectionIterator::initWithCollection(const OSCollection *inColl) { - if ( !super::init() || !inColl) - return false; + if (!super::init() || !inColl) { + return false; + } - inColl->retain(); - collection = inColl; - collIterator = 0; - initialUpdateStamp = 0; - valid = false; + inColl->retain(); + collection = inColl; + collIterator = 0; + initialUpdateStamp = 0; + valid = false; - return true; + return true; } OSCollectionIterator * OSCollectionIterator::withCollection(const OSCollection *inColl) { + OSCollectionIterator *me = new OSCollectionIterator; - OSCollectionIterator *me = new OSCollectionIterator; - - if (me && !me->initWithCollection(inColl)) { - me->release(); - return 0; - } + if (me && !me->initWithCollection(inColl)) { + me->release(); + return 0; + } - return me; + return me; } -void OSCollectionIterator::free() +void +OSCollectionIterator::free() { - if (collIterator) { - kfree(collIterator, collection->iteratorSize()); - OSCONTAINER_ACCUMSIZE(-((size_t) collection->iteratorSize())); - collIterator = 0; - } - - if (collection) { - collection->release(); - collection = 0; - } - - super::free(); + if (collIterator) { + kfree(collIterator, collection->iteratorSize()); + OSCONTAINER_ACCUMSIZE(-((size_t) collection->iteratorSize())); + collIterator = 0; + } + + if (collection) { + collection->release(); + collection = 0; + } + + super::free(); } -void OSCollectionIterator::reset() +void +OSCollectionIterator::reset() { - valid = false; - - if (!collIterator) { - collIterator = (void *)kalloc_container(collection->iteratorSize()); - OSCONTAINER_ACCUMSIZE(collection->iteratorSize()); - if (!collIterator) - return; - } - - if (!collection->initIterator(collIterator)) - return; - - initialUpdateStamp = collection->updateStamp; - valid = true; + valid = false; + + if (!collIterator) { + collIterator = (void *)kalloc_container(collection->iteratorSize()); + OSCONTAINER_ACCUMSIZE(collection->iteratorSize()); + if (!collIterator) { + return; + } + } + + if (!collection->initIterator(collIterator)) { + return; + } + + initialUpdateStamp = collection->updateStamp; + valid = true; } -bool OSCollectionIterator::isValid() +bool +OSCollectionIterator::isValid() { - if (!collIterator) { - collIterator = (void *)kalloc_container(collection->iteratorSize()); - OSCONTAINER_ACCUMSIZE(collection->iteratorSize()); - if (!collection->initIterator(collIterator)) - return false; - initialUpdateStamp = collection->updateStamp; - valid = true; - } - else if (!valid || collection->updateStamp != initialUpdateStamp) - return false; - - return true; + if (!collIterator) { + collIterator = (void *)kalloc_container(collection->iteratorSize()); + OSCONTAINER_ACCUMSIZE(collection->iteratorSize()); + if (!collection->initIterator(collIterator)) { + return false; + } + initialUpdateStamp = collection->updateStamp; + valid = true; + } else if (!valid || collection->updateStamp != initialUpdateStamp) { + return false; + } + + return true; } -OSObject *OSCollectionIterator::getNextObject() +OSObject * +OSCollectionIterator::getNextObject() { - OSObject *retObj; - bool retVal; + OSObject *retObj; + bool retVal; - if (!isValid()) - return 0; + if (!isValid()) { + return 0; + } - retVal = collection->getNextObjectForIterator(collIterator, &retObj); - return (retVal)? retObj : 0; + retVal = collection->getNextObjectForIterator(collIterator, &retObj); + return (retVal)? retObj : 0; } - diff --git a/libkern/c++/OSData.cpp b/libkern/c++/OSData.cpp index e37bb128b..92daa9c1e 100644 --- a/libkern/c++/OSData.cpp +++ b/libkern/c++/OSData.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOData.m created by rsulack on Thu 25-Sep-1997 */ @@ -53,455 +53,562 @@ OSMetaClassDefineReservedUnused(OSData, 7); #define EXTERNAL ((unsigned int) -1) -bool OSData::initWithCapacity(unsigned int inCapacity) +bool +OSData::initWithCapacity(unsigned int inCapacity) { - if (data) - { - OSCONTAINER_ACCUMSIZE(-((size_t)capacity)); - if (!inCapacity || (capacity < inCapacity)) - { - // clean out old data's storage if it isn't big enough - if (capacity < page_size) kfree(data, capacity); - else kmem_free(kernel_map, (vm_offset_t)data, capacity); - data = 0; - capacity = 0; - } - } - - if (!super::init()) - return false; - - if (inCapacity && !data) { - - if (inCapacity < page_size) data = (void *) kalloc_container(inCapacity); - else { - kern_return_t kr; - if (round_page_overflow(inCapacity, &inCapacity)) kr = KERN_RESOURCE_SHORTAGE; - else kr = kmem_alloc(kernel_map, (vm_offset_t *)&data, inCapacity, IOMemoryTag(kernel_map)); - if (KERN_SUCCESS != kr) data = NULL; - } - if (!data) - return false; - capacity = inCapacity; - } - OSCONTAINER_ACCUMSIZE(capacity); - - length = 0; - if (inCapacity < 16) - capacityIncrement = 16; - else - capacityIncrement = inCapacity; - - return true; + if (data) { + OSCONTAINER_ACCUMSIZE(-((size_t)capacity)); + if (!inCapacity || (capacity < inCapacity)) { + // clean out old data's storage if it isn't big enough + if (capacity < page_size) { + kfree(data, capacity); + } else { + kmem_free(kernel_map, (vm_offset_t)data, capacity); + } + data = 0; + capacity = 0; + } + } + + if (!super::init()) { + return false; + } + + if (inCapacity && !data) { + if (inCapacity < page_size) { + data = (void *) kalloc_container(inCapacity); + } else { + kern_return_t kr; + if (round_page_overflow(inCapacity, &inCapacity)) { + kr = KERN_RESOURCE_SHORTAGE; + } else { + kr = kmem_alloc(kernel_map, (vm_offset_t *)&data, inCapacity, IOMemoryTag(kernel_map)); + } + if (KERN_SUCCESS != kr) { + data = NULL; + } + } + if (!data) { + return false; + } + capacity = inCapacity; + } + OSCONTAINER_ACCUMSIZE(capacity); + + length = 0; + if (inCapacity < 16) { + capacityIncrement = 16; + } else { + capacityIncrement = inCapacity; + } + + return true; } -bool OSData::initWithBytes(const void *bytes, unsigned int inLength) +bool +OSData::initWithBytes(const void *bytes, unsigned int inLength) { - if ((inLength && !bytes) || !initWithCapacity(inLength)) - return false; + if ((inLength && !bytes) || !initWithCapacity(inLength)) { + return false; + } - if (bytes != data) - bcopy(bytes, data, inLength); - length = inLength; + if (bytes != data) { + bcopy(bytes, data, inLength); + } + length = inLength; - return true; + return true; } -bool OSData::initWithBytesNoCopy(void *bytes, unsigned int inLength) +bool +OSData::initWithBytesNoCopy(void *bytes, unsigned int inLength) { - if (!super::init()) - return false; + if (!super::init()) { + return false; + } - length = inLength; - capacity = EXTERNAL; - data = bytes; + length = inLength; + capacity = EXTERNAL; + data = bytes; - return true; + return true; } -bool OSData::initWithData(const OSData *inData) +bool +OSData::initWithData(const OSData *inData) { - return initWithBytes(inData->data, inData->length); + return initWithBytes(inData->data, inData->length); } -bool OSData::initWithData(const OSData *inData, - unsigned int start, unsigned int inLength) +bool +OSData::initWithData(const OSData *inData, + unsigned int start, unsigned int inLength) { - const void *localData = inData->getBytesNoCopy(start, inLength); + const void *localData = inData->getBytesNoCopy(start, inLength); - if (localData) - return initWithBytes(localData, inLength); - else - return false; + if (localData) { + return initWithBytes(localData, inLength); + } else { + return false; + } } -OSData *OSData::withCapacity(unsigned int inCapacity) +OSData * +OSData::withCapacity(unsigned int inCapacity) { - OSData *me = new OSData; + OSData *me = new OSData; - if (me && !me->initWithCapacity(inCapacity)) { - me->release(); - return 0; - } + if (me && !me->initWithCapacity(inCapacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSData *OSData::withBytes(const void *bytes, unsigned int inLength) +OSData * +OSData::withBytes(const void *bytes, unsigned int inLength) { - OSData *me = new OSData; + OSData *me = new OSData; - if (me && !me->initWithBytes(bytes, inLength)) { - me->release(); - return 0; - } - return me; + if (me && !me->initWithBytes(bytes, inLength)) { + me->release(); + return 0; + } + return me; } -OSData *OSData::withBytesNoCopy(void *bytes, unsigned int inLength) +OSData * +OSData::withBytesNoCopy(void *bytes, unsigned int inLength) { - OSData *me = new OSData; + OSData *me = new OSData; - if (me && !me->initWithBytesNoCopy(bytes, inLength)) { - me->release(); - return 0; - } + if (me && !me->initWithBytesNoCopy(bytes, inLength)) { + me->release(); + return 0; + } - return me; + return me; } -OSData *OSData::withData(const OSData *inData) +OSData * +OSData::withData(const OSData *inData) { - OSData *me = new OSData; + OSData *me = new OSData; - if (me && !me->initWithData(inData)) { - me->release(); - return 0; - } + if (me && !me->initWithData(inData)) { + me->release(); + return 0; + } - return me; + return me; } -OSData *OSData::withData(const OSData *inData, - unsigned int start, unsigned int inLength) +OSData * +OSData::withData(const OSData *inData, + unsigned int start, unsigned int inLength) { - OSData *me = new OSData; + OSData *me = new OSData; - if (me && !me->initWithData(inData, start, inLength)) { - me->release(); - return 0; - } + if (me && !me->initWithData(inData, start, inLength)) { + me->release(); + return 0; + } - return me; + return me; } -void OSData::free() +void +OSData::free() { - if ((capacity != EXTERNAL) && data && capacity) { - if (capacity < page_size) kfree(data, capacity); - else kmem_free(kernel_map, (vm_offset_t)data, capacity); - OSCONTAINER_ACCUMSIZE( -((size_t)capacity) ); - } else if (capacity == EXTERNAL) { - DeallocFunction freemem = reserved ? reserved->deallocFunction : NULL; - if (freemem && data && length) { - freemem(data, length); - } - } - if (reserved) kfree(reserved, sizeof(ExpansionData)); - super::free(); + if ((capacity != EXTERNAL) && data && capacity) { + if (capacity < page_size) { + kfree(data, capacity); + } else { + kmem_free(kernel_map, (vm_offset_t)data, capacity); + } + OSCONTAINER_ACCUMSIZE( -((size_t)capacity)); + } else if (capacity == EXTERNAL) { + DeallocFunction freemem = reserved ? reserved->deallocFunction : NULL; + if (freemem && data && length) { + freemem(data, length); + } + } + if (reserved) { + kfree(reserved, sizeof(ExpansionData)); + } + super::free(); } -unsigned int OSData::getLength() const { return length; } -unsigned int OSData::getCapacity() const { return capacity; } +unsigned int +OSData::getLength() const +{ + return length; +} +unsigned int +OSData::getCapacity() const +{ + return capacity; +} -unsigned int OSData::getCapacityIncrement() const -{ - return capacityIncrement; +unsigned int +OSData::getCapacityIncrement() const +{ + return capacityIncrement; } -unsigned int OSData::setCapacityIncrement(unsigned increment) +unsigned int +OSData::setCapacityIncrement(unsigned increment) { - return capacityIncrement = increment; + return capacityIncrement = increment; } // xx-review: does not check for capacity == EXTERNAL -unsigned int OSData::ensureCapacity(unsigned int newCapacity) +unsigned int +OSData::ensureCapacity(unsigned int newCapacity) { - unsigned char * newData; - unsigned int finalCapacity; - void * copydata; - kern_return_t kr; - - if (newCapacity <= capacity) - return capacity; - - finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) - * capacityIncrement; + unsigned char * newData; + unsigned int finalCapacity; + void * copydata; + kern_return_t kr; - // integer overflow check - if (finalCapacity < newCapacity) return capacity; + if (newCapacity <= capacity) { + return capacity; + } - copydata = data; + finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; - if (finalCapacity >= page_size) { - // round up - finalCapacity = round_page_32(finalCapacity); // integer overflow check - if (finalCapacity < newCapacity) return capacity; - if (capacity >= page_size) { - copydata = NULL; - kr = kmem_realloc(kernel_map, - (vm_offset_t)data, - capacity, - (vm_offset_t *)&newData, - finalCapacity, - IOMemoryTag(kernel_map)); + if (finalCapacity < newCapacity) { + return capacity; + } + + copydata = data; + + if (finalCapacity >= page_size) { + // round up + finalCapacity = round_page_32(finalCapacity); + // integer overflow check + if (finalCapacity < newCapacity) { + return capacity; + } + if (capacity >= page_size) { + copydata = NULL; + kr = kmem_realloc(kernel_map, + (vm_offset_t)data, + capacity, + (vm_offset_t *)&newData, + finalCapacity, + IOMemoryTag(kernel_map)); + } else { + kr = kmem_alloc(kernel_map, (vm_offset_t *)&newData, finalCapacity, IOMemoryTag(kernel_map)); + } + if (KERN_SUCCESS != kr) { + newData = NULL; + } } else { - kr = kmem_alloc(kernel_map, (vm_offset_t *)&newData, finalCapacity, IOMemoryTag(kernel_map)); - } - if (KERN_SUCCESS != kr) newData = NULL; - } - else newData = (unsigned char *) kalloc_container(finalCapacity); - - if ( newData ) { - bzero(newData + capacity, finalCapacity - capacity); - if (copydata) bcopy(copydata, newData, capacity); - if (data) { - if (capacity < page_size) kfree(data, capacity); - else kmem_free(kernel_map, (vm_offset_t)data, capacity); - } - OSCONTAINER_ACCUMSIZE( ((size_t)finalCapacity) - ((size_t)capacity) ); - data = (void *) newData; - capacity = finalCapacity; - } - - return capacity; + newData = (unsigned char *) kalloc_container(finalCapacity); + } + + if (newData) { + bzero(newData + capacity, finalCapacity - capacity); + if (copydata) { + bcopy(copydata, newData, capacity); + } + if (data) { + if (capacity < page_size) { + kfree(data, capacity); + } else { + kmem_free(kernel_map, (vm_offset_t)data, capacity); + } + } + OSCONTAINER_ACCUMSIZE(((size_t)finalCapacity) - ((size_t)capacity)); + data = (void *) newData; + capacity = finalCapacity; + } + + return capacity; } -bool OSData::appendBytes(const void *bytes, unsigned int inLength) +bool +OSData::appendBytes(const void *bytes, unsigned int inLength) { - unsigned int newSize; + unsigned int newSize; - if (!inLength) - return true; + if (!inLength) { + return true; + } - if (capacity == EXTERNAL) - return false; - - if (os_add_overflow(length, inLength, &newSize)) - return false; + if (capacity == EXTERNAL) { + return false; + } - if ( (newSize > capacity) && newSize > ensureCapacity(newSize) ) - return false; + if (os_add_overflow(length, inLength, &newSize)) { + return false; + } + + if ((newSize > capacity) && newSize > ensureCapacity(newSize)) { + return false; + } - if (bytes) - bcopy(bytes, &((unsigned char *)data)[length], inLength); - else - bzero(&((unsigned char *)data)[length], inLength); + if (bytes) { + bcopy(bytes, &((unsigned char *)data)[length], inLength); + } else { + bzero(&((unsigned char *)data)[length], inLength); + } - length = newSize; + length = newSize; - return true; + return true; } -bool OSData::appendByte(unsigned char byte, unsigned int inLength) +bool +OSData::appendByte(unsigned char byte, unsigned int inLength) { - unsigned int newSize; + unsigned int newSize; - if (!inLength) - return true; + if (!inLength) { + return true; + } - if (capacity == EXTERNAL) - return false; - - if (os_add_overflow(length, inLength, &newSize)) - return false; + if (capacity == EXTERNAL) { + return false; + } - if ( (newSize > capacity) && newSize > ensureCapacity(newSize) ) - return false; + if (os_add_overflow(length, inLength, &newSize)) { + return false; + } - memset(&((unsigned char *)data)[length], byte, inLength); - length = newSize; + if ((newSize > capacity) && newSize > ensureCapacity(newSize)) { + return false; + } - return true; + memset(&((unsigned char *)data)[length], byte, inLength); + length = newSize; + + return true; } -bool OSData::appendBytes(const OSData *other) +bool +OSData::appendBytes(const OSData *other) { - return appendBytes(other->data, other->length); + return appendBytes(other->data, other->length); } -const void *OSData::getBytesNoCopy() const +const void * +OSData::getBytesNoCopy() const { - if (!length) - return 0; - else - return data; + if (!length) { + return 0; + } else { + return data; + } } -const void *OSData::getBytesNoCopy(unsigned int start, - unsigned int inLength) const +const void * +OSData::getBytesNoCopy(unsigned int start, + unsigned int inLength) const { - const void *outData = 0; + const void *outData = 0; - if (length - && start < length - && (start + inLength) >= inLength // overflow check - && (start + inLength) <= length) - outData = (const void *) ((char *) data + start); + if (length + && start < length + && (start + inLength) >= inLength // overflow check + && (start + inLength) <= length) { + outData = (const void *) ((char *) data + start); + } - return outData; + return outData; } -bool OSData::isEqualTo(const OSData *aData) const +bool +OSData::isEqualTo(const OSData *aData) const { - unsigned int len; + unsigned int len; - len = aData->length; - if ( length != len ) - return false; + len = aData->length; + if (length != len) { + return false; + } - return isEqualTo(aData->data, len); + return isEqualTo(aData->data, len); } -bool OSData::isEqualTo(const void *someData, unsigned int inLength) const +bool +OSData::isEqualTo(const void *someData, unsigned int inLength) const { - return (length >= inLength) && (bcmp(data, someData, inLength) == 0); + return (length >= inLength) && (bcmp(data, someData, inLength) == 0); } -bool OSData::isEqualTo(const OSMetaClassBase *obj) const +bool +OSData::isEqualTo(const OSMetaClassBase *obj) const { - OSData * otherData; - OSString * str; - - if ((otherData = OSDynamicCast(OSData, obj))) - return isEqualTo(otherData); - else if ((str = OSDynamicCast (OSString, obj))) - return isEqualTo(str); - else - return false; + OSData * otherData; + OSString * str; + + if ((otherData = OSDynamicCast(OSData, obj))) { + return isEqualTo(otherData); + } else if ((str = OSDynamicCast(OSString, obj))) { + return isEqualTo(str); + } else { + return false; + } } -bool OSData::isEqualTo(const OSString *obj) const +bool +OSData::isEqualTo(const OSString *obj) const { - const char * aCString; - char * dataPtr; - unsigned int checkLen = length; - unsigned int stringLen; - - if (!obj) - return false; + const char * aCString; + char * dataPtr; + unsigned int checkLen = length; + unsigned int stringLen; - stringLen = obj->getLength (); - - dataPtr = (char *)data; - - if (stringLen != checkLen) { - - // check for the fact that OSData may be a buffer that - // that includes a termination byte and will thus have - // a length of the actual string length PLUS 1. In this - // case we verify that the additional byte is a terminator - // and if so count the two lengths as being the same. + if (!obj) { + return false; + } - if ( (checkLen - stringLen) == 1) { - if (dataPtr[checkLen-1] != 0) // non-zero means not a terminator and thus not likely the same - return false; - checkLen--; - } - else - return false; - } + stringLen = obj->getLength(); + + dataPtr = (char *)data; + + if (stringLen != checkLen) { + // check for the fact that OSData may be a buffer that + // that includes a termination byte and will thus have + // a length of the actual string length PLUS 1. In this + // case we verify that the additional byte is a terminator + // and if so count the two lengths as being the same. + + if ((checkLen - stringLen) == 1) { + if (dataPtr[checkLen - 1] != 0) { // non-zero means not a terminator and thus not likely the same + return false; + } + checkLen--; + } else { + return false; + } + } - aCString = obj->getCStringNoCopy (); + aCString = obj->getCStringNoCopy(); - for ( unsigned int i=0; i < checkLen; i++ ) { - if ( *dataPtr++ != aCString[i] ) - return false; - } + for (unsigned int i = 0; i < checkLen; i++) { + if (*dataPtr++ != aCString[i]) { + return false; + } + } - return true; + return true; } -//this was taken from CFPropertyList.c +//this was taken from CFPropertyList.c static const char __CFPLDataEncodeTable[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; -bool OSData::serialize(OSSerialize *s) const +bool +OSData::serialize(OSSerialize *s) const { - unsigned int i; - const unsigned char *p; - unsigned char c; - unsigned int serializeLength; + unsigned int i; + const unsigned char *p; + unsigned char c; + unsigned int serializeLength; - if (s->previouslySerialized(this)) return true; + if (s->previouslySerialized(this)) { + return true; + } - if (!s->addXMLStartTag(this, "data")) return false; + if (!s->addXMLStartTag(this, "data")) { + return false; + } - serializeLength = length; - if (reserved && reserved->disableSerialization) serializeLength = 0; + serializeLength = length; + if (reserved && reserved->disableSerialization) { + serializeLength = 0; + } - for (i = 0, p = (unsigned char *)data; i < serializeLength; i++, p++) { - /* 3 bytes are encoded as 4 */ - switch (i % 3) { + for (i = 0, p = (unsigned char *)data; i < serializeLength; i++, p++) { + /* 3 bytes are encoded as 4 */ + switch (i % 3) { + case 0: + c = __CFPLDataEncodeTable[((p[0] >> 2) & 0x3f)]; + if (!s->addChar(c)) { + return false; + } + break; + case 1: + c = __CFPLDataEncodeTable[((((p[-1] << 8) | p[0]) >> 4) & 0x3f)]; + if (!s->addChar(c)) { + return false; + } + break; + case 2: + c = __CFPLDataEncodeTable[((((p[-1] << 8) | p[0]) >> 6) & 0x3f)]; + if (!s->addChar(c)) { + return false; + } + c = __CFPLDataEncodeTable[(p[0] & 0x3f)]; + if (!s->addChar(c)) { + return false; + } + break; + } + } + switch (i % 3) { case 0: - c = __CFPLDataEncodeTable [ ((p[0] >> 2) & 0x3f)]; - if (!s->addChar(c)) return false; break; case 1: - c = __CFPLDataEncodeTable [ ((((p[-1] << 8) | p[0]) >> 4) & 0x3f)]; - if (!s->addChar(c)) return false; + c = __CFPLDataEncodeTable[((p[-1] << 4) & 0x30)]; + if (!s->addChar(c)) { + return false; + } + if (!s->addChar('=')) { + return false; + } + if (!s->addChar('=')) { + return false; + } break; case 2: - c = __CFPLDataEncodeTable [ ((((p[-1] << 8) | p[0]) >> 6) & 0x3f)]; - if (!s->addChar(c)) return false; - c = __CFPLDataEncodeTable [ (p[0] & 0x3f)]; - if (!s->addChar(c)) return false; + c = __CFPLDataEncodeTable[((p[-1] << 2) & 0x3c)]; + if (!s->addChar(c)) { + return false; + } + if (!s->addChar('=')) { + return false; + } break; } - } - switch (i % 3) { - case 0: - break; - case 1: - c = __CFPLDataEncodeTable [ ((p[-1] << 4) & 0x30)]; - if (!s->addChar(c)) return false; - if (!s->addChar('=')) return false; - if (!s->addChar('=')) return false; - break; - case 2: - c = __CFPLDataEncodeTable [ ((p[-1] << 2) & 0x3c)]; - if (!s->addChar(c)) return false; - if (!s->addChar('=')) return false; - break; - } - - return s->addXMLEndTag("data"); + + return s->addXMLEndTag("data"); } -void OSData::setDeallocFunction(DeallocFunction func) +void +OSData::setDeallocFunction(DeallocFunction func) { - if (!reserved) - { - reserved = (typeof(reserved)) kalloc_container(sizeof(ExpansionData)); - if (!reserved) return; - bzero(reserved, sizeof(ExpansionData)); - } - reserved->deallocFunction = func; + if (!reserved) { + reserved = (typeof(reserved))kalloc_container(sizeof(ExpansionData)); + if (!reserved) { + return; + } + bzero(reserved, sizeof(ExpansionData)); + } + reserved->deallocFunction = func; } -void OSData::setSerializable(bool serializable) +void +OSData::setSerializable(bool serializable) { - if (!reserved) - { - reserved = (typeof(reserved)) kalloc_container(sizeof(ExpansionData)); - if (!reserved) return; - bzero(reserved, sizeof(ExpansionData)); - } - reserved->disableSerialization = (!serializable); + if (!reserved) { + reserved = (typeof(reserved))kalloc_container(sizeof(ExpansionData)); + if (!reserved) { + return; + } + bzero(reserved, sizeof(ExpansionData)); + } + reserved->disableSerialization = (!serializable); } -bool OSData::isSerializable(void) +bool +OSData::isSerializable(void) { - return (!reserved || !reserved->disableSerialization); + return !reserved || !reserved->disableSerialization; } diff --git a/libkern/c++/OSDictionary.cpp b/libkern/c++/OSDictionary.cpp index 868152ba1..d9c756798 100644 --- a/libkern/c++/OSDictionary.cpp +++ b/libkern/c++/OSDictionary.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSDictionary.m created by rsulack on Fri 12-Sep-1997 */ @@ -52,702 +52,852 @@ OSMetaClassDefineReservedUnused(OSDictionary, 7); #define EXT_CAST(obj) \ reinterpret_cast(const_cast(obj)) -bool OSDictionary::initWithCapacity(unsigned int inCapacity) +extern "C" { +void qsort(void *, size_t, size_t, int (*)(const void *, const void *)); +} + +int +OSDictionary::dictEntry::compare(const void *_e1, const void *_e2) +{ + const OSDictionary::dictEntry *e1 = (const OSDictionary::dictEntry *)_e1; + const OSDictionary::dictEntry *e2 = (const OSDictionary::dictEntry *)_e2; + + if ((uintptr_t)e1->key == (uintptr_t)e2->key) { + return 0; + } + + return (uintptr_t)e1->key > (uintptr_t)e2->key ? 1 : -1; +} + +void +OSDictionary::sortBySymbol(void) { - if (!super::init()) - return false; + qsort(dictionary, count, sizeof(OSDictionary::dictEntry), + &OSDictionary::dictEntry::compare); +} + +bool +OSDictionary::initWithCapacity(unsigned int inCapacity) +{ + if (!super::init()) { + return false; + } - if (inCapacity > (UINT_MAX / sizeof(dictEntry))) - return false; + if (inCapacity > (UINT_MAX / sizeof(dictEntry))) { + return false; + } - unsigned int size = inCapacity * sizeof(dictEntry); + unsigned int size = inCapacity * sizeof(dictEntry); //fOptions |= kSort; - dictionary = (dictEntry *) kalloc_container(size); - if (!dictionary) - return false; + dictionary = (dictEntry *) kalloc_container(size); + if (!dictionary) { + return false; + } - bzero(dictionary, size); - OSCONTAINER_ACCUMSIZE(size); + bzero(dictionary, size); + OSCONTAINER_ACCUMSIZE(size); - count = 0; - capacity = inCapacity; - capacityIncrement = (inCapacity)? inCapacity : 16; + count = 0; + capacity = inCapacity; + capacityIncrement = (inCapacity)? inCapacity : 16; - return true; + return true; } -bool OSDictionary::initWithObjects(const OSObject *objects[], - const OSSymbol *keys[], - unsigned int theCount, - unsigned int theCapacity) +bool +OSDictionary::initWithObjects(const OSObject *objects[], + const OSSymbol *keys[], + unsigned int theCount, + unsigned int theCapacity) { - unsigned int newCapacity = theCount; + unsigned int newCapacity = theCount; + + if (!objects || !keys) { + return false; + } - if (!objects || !keys) - return false; + if (theCapacity) { + if (theCount > theCapacity) { + return false; + } - if ( theCapacity ) { - if (theCount > theCapacity) - return false; - - newCapacity = theCapacity; - } + newCapacity = theCapacity; + } - if (!initWithCapacity(newCapacity)) - return false; + if (!initWithCapacity(newCapacity)) { + return false; + } - for (unsigned int i = 0; i < theCount; i++) { - const OSMetaClassBase *newObject = *objects++; + for (unsigned int i = 0; i < theCount; i++) { + const OSMetaClassBase *newObject = *objects++; - if (!newObject || !keys[i] || !setObject(keys[i], newObject)) - return false; - } + if (!newObject || !keys[i] || !setObject(keys[i], newObject)) { + return false; + } + } - return true; + return true; } -bool OSDictionary::initWithObjects(const OSObject *objects[], - const OSString *keys[], - unsigned int theCount, - unsigned int theCapacity) +bool +OSDictionary::initWithObjects(const OSObject *objects[], + const OSString *keys[], + unsigned int theCount, + unsigned int theCapacity) { - unsigned int newCapacity = theCount; + unsigned int newCapacity = theCount; - if (!objects || !keys) - return false; + if (!objects || !keys) { + return false; + } - if ( theCapacity ) { - if (theCount > theCapacity) - return false; + if (theCapacity) { + if (theCount > theCapacity) { + return false; + } - newCapacity = theCapacity; - } + newCapacity = theCapacity; + } - if (!initWithCapacity(newCapacity)) - return false; + if (!initWithCapacity(newCapacity)) { + return false; + } - for (unsigned int i = 0; i < theCount; i++) { - const OSSymbol *key = OSSymbol::withString(*keys++); - const OSMetaClassBase *newObject = *objects++; + for (unsigned int i = 0; i < theCount; i++) { + const OSSymbol *key = OSSymbol::withString(*keys++); + const OSMetaClassBase *newObject = *objects++; - if (!key) - return false; + if (!key) { + return false; + } - if (!newObject || !setObject(key, newObject)) { - key->release(); - return false; - } + if (!newObject || !setObject(key, newObject)) { + key->release(); + return false; + } - key->release(); - } + key->release(); + } - return true; + return true; } -bool OSDictionary::initWithDictionary(const OSDictionary *dict, - unsigned int theCapacity) +bool +OSDictionary::initWithDictionary(const OSDictionary *dict, + unsigned int theCapacity) { - unsigned int newCapacity; + unsigned int newCapacity; - if ( !dict ) - return false; + if (!dict) { + return false; + } - newCapacity = dict->count; + newCapacity = dict->count; - if ( theCapacity ) { - if ( dict->count > theCapacity ) - return false; - - newCapacity = theCapacity; - } + if (theCapacity) { + if (dict->count > theCapacity) { + return false; + } - if (!initWithCapacity(newCapacity)) - return false; + newCapacity = theCapacity; + } - if ((kSort & fOptions) && !(kSort & dict->fOptions)) { - for (unsigned int i = 0; i < dict->count; i++) { - if (!setObject(dict->dictionary[i].key, dict->dictionary[i].value)) { + if (!initWithCapacity(newCapacity)) { return false; - } } - return true; - } - count = dict->count; - bcopy(dict->dictionary, dictionary, count * sizeof(dictEntry)); - for (unsigned int i = 0; i < count; i++) { - dictionary[i].key->taggedRetain(OSTypeID(OSCollection)); - dictionary[i].value->taggedRetain(OSTypeID(OSCollection)); - } + count = dict->count; + bcopy(dict->dictionary, dictionary, count * sizeof(dictEntry)); + for (unsigned int i = 0; i < count; i++) { + dictionary[i].key->taggedRetain(OSTypeID(OSCollection)); + dictionary[i].value->taggedRetain(OSTypeID(OSCollection)); + } - return true; + if ((kSort & fOptions) && !(kSort & dict->fOptions)) { + sortBySymbol(); + } + + return true; } -OSDictionary *OSDictionary::withCapacity(unsigned int capacity) +OSDictionary * +OSDictionary::withCapacity(unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSDictionary *me = new OSDictionary; - if (me && !me->initWithCapacity(capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithCapacity(capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSDictionary *OSDictionary::withObjects(const OSObject *objects[], - const OSSymbol *keys[], - unsigned int count, - unsigned int capacity) +OSDictionary * +OSDictionary::withObjects(const OSObject *objects[], + const OSSymbol *keys[], + unsigned int count, + unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSDictionary *me = new OSDictionary; - if (me && !me->initWithObjects(objects, keys, count, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithObjects(objects, keys, count, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSDictionary *OSDictionary::withObjects(const OSObject *objects[], - const OSString *keys[], - unsigned int count, - unsigned int capacity) +OSDictionary * +OSDictionary::withObjects(const OSObject *objects[], + const OSString *keys[], + unsigned int count, + unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSDictionary *me = new OSDictionary; - if (me && !me->initWithObjects(objects, keys, count, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithObjects(objects, keys, count, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSDictionary *OSDictionary::withDictionary(const OSDictionary *dict, - unsigned int capacity) +OSDictionary * +OSDictionary::withDictionary(const OSDictionary *dict, + unsigned int capacity) { - OSDictionary *me = new OSDictionary; + OSDictionary *me = new OSDictionary; - if (me && !me->initWithDictionary(dict, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithDictionary(dict, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -void OSDictionary::free() +void +OSDictionary::free() { - (void) super::setOptions(0, kImmutable); - flushCollection(); - if (dictionary) { - kfree(dictionary, capacity * sizeof(dictEntry)); - OSCONTAINER_ACCUMSIZE( -(capacity * sizeof(dictEntry)) ); - } + (void) super::setOptions(0, kImmutable); + flushCollection(); + if (dictionary) { + kfree(dictionary, capacity * sizeof(dictEntry)); + OSCONTAINER_ACCUMSIZE( -(capacity * sizeof(dictEntry))); + } - super::free(); + super::free(); } -unsigned int OSDictionary::getCount() const { return count; } -unsigned int OSDictionary::getCapacity() const { return capacity; } +unsigned int +OSDictionary::getCount() const +{ + return count; +} +unsigned int +OSDictionary::getCapacity() const +{ + return capacity; +} -unsigned int OSDictionary::getCapacityIncrement() const +unsigned int +OSDictionary::getCapacityIncrement() const { - return capacityIncrement; + return capacityIncrement; } -unsigned int OSDictionary::setCapacityIncrement(unsigned int increment) +unsigned int +OSDictionary::setCapacityIncrement(unsigned int increment) { - capacityIncrement = (increment)? increment : 16; + capacityIncrement = (increment)? increment : 16; - return capacityIncrement; + return capacityIncrement; } -unsigned int OSDictionary::ensureCapacity(unsigned int newCapacity) +unsigned int +OSDictionary::ensureCapacity(unsigned int newCapacity) { - dictEntry *newDict; - unsigned int finalCapacity; - vm_size_t oldSize, newSize; + dictEntry *newDict; + unsigned int finalCapacity; + vm_size_t oldSize, newSize; - if (newCapacity <= capacity) - return capacity; + if (newCapacity <= capacity) { + return capacity; + } + + // round up + finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; - // round up - finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) - * capacityIncrement; + // integer overflow check + if (finalCapacity < newCapacity || (finalCapacity > (UINT_MAX / sizeof(dictEntry)))) { + return capacity; + } - // integer overflow check - if (finalCapacity < newCapacity || (finalCapacity > (UINT_MAX / sizeof(dictEntry)))) - return capacity; - - newSize = sizeof(dictEntry) * finalCapacity; + newSize = sizeof(dictEntry) * finalCapacity; - newDict = (dictEntry *) kallocp_container(&newSize); - if (newDict) { - // use all of the actual allocation size - finalCapacity = newSize / sizeof(dictEntry); + newDict = (dictEntry *) kallocp_container(&newSize); + if (newDict) { + // use all of the actual allocation size + finalCapacity = newSize / sizeof(dictEntry); - oldSize = sizeof(dictEntry) * capacity; + oldSize = sizeof(dictEntry) * capacity; - bcopy(dictionary, newDict, oldSize); - bzero(&newDict[capacity], newSize - oldSize); + bcopy(dictionary, newDict, oldSize); + bzero(&newDict[capacity], newSize - oldSize); - OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); - kfree(dictionary, oldSize); + OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); + kfree(dictionary, oldSize); - dictionary = newDict; - capacity = finalCapacity; - } + dictionary = newDict; + capacity = finalCapacity; + } - return capacity; + return capacity; } -void OSDictionary::flushCollection() +void +OSDictionary::flushCollection() { - haveUpdated(); + haveUpdated(); - for (unsigned int i = 0; i < count; i++) { - dictionary[i].key->taggedRelease(OSTypeID(OSCollection)); - dictionary[i].value->taggedRelease(OSTypeID(OSCollection)); - } - count = 0; + for (unsigned int i = 0; i < count; i++) { + dictionary[i].key->taggedRelease(OSTypeID(OSCollection)); + dictionary[i].value->taggedRelease(OSTypeID(OSCollection)); + } + count = 0; } -bool OSDictionary:: +bool +OSDictionary:: setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject, bool onlyAdd) { - unsigned int i; - bool exists; + unsigned int i; + bool exists; - if (!anObject || !aKey) - return false; + if (!anObject || !aKey) { + return false; + } - // if the key exists, replace the object + // if the key exists, replace the object + + if (fOptions & kSort) { + i = OSSymbol::bsearch(aKey, &dictionary[0], count, sizeof(dictionary[0])); + exists = (i < count) && (aKey == dictionary[i].key); + } else { + for (exists = false, i = 0; i < count; i++) { + if ((exists = (aKey == dictionary[i].key))) { + break; + } + } + } - if (fOptions & kSort) { - i = OSSymbol::bsearch(aKey, &dictionary[0], count, sizeof(dictionary[0])); - exists = (i < count) && (aKey == dictionary[i].key); - } else for (exists = false, i = 0; i < count; i++) { - if ((exists = (aKey == dictionary[i].key))) break; - } + if (exists) { + if (onlyAdd) { + return false; + } - if (exists) { + const OSMetaClassBase *oldObject = dictionary[i].value; - if (onlyAdd) return false; + haveUpdated(); - const OSMetaClassBase *oldObject = dictionary[i].value; - - haveUpdated(); - - anObject->taggedRetain(OSTypeID(OSCollection)); - dictionary[i].value = anObject; - - oldObject->taggedRelease(OSTypeID(OSCollection)); - return true; - } + anObject->taggedRetain(OSTypeID(OSCollection)); + dictionary[i].value = anObject; - // add new key, possibly extending our capacity - if (count >= capacity && count >= ensureCapacity(count+1)) - return false; + oldObject->taggedRelease(OSTypeID(OSCollection)); + return true; + } - haveUpdated(); + // add new key, possibly extending our capacity + if (count >= capacity && count >= ensureCapacity(count + 1)) { + return false; + } - bcopy(&dictionary[i], &dictionary[i+1], (count - i) * sizeof(dictionary[0])); + haveUpdated(); + + bcopy(&dictionary[i], &dictionary[i + 1], (count - i) * sizeof(dictionary[0])); - aKey->taggedRetain(OSTypeID(OSCollection)); - anObject->taggedRetain(OSTypeID(OSCollection)); - dictionary[i].key = aKey; - dictionary[i].value = anObject; - count++; + aKey->taggedRetain(OSTypeID(OSCollection)); + anObject->taggedRetain(OSTypeID(OSCollection)); + dictionary[i].key = aKey; + dictionary[i].value = anObject; + count++; - return true; + return true; } -bool OSDictionary:: +bool +OSDictionary:: setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject) { - return (setObject(aKey, anObject, false)); + return setObject(aKey, anObject, false); } -void OSDictionary::removeObject(const OSSymbol *aKey) +void +OSDictionary::removeObject(const OSSymbol *aKey) { - unsigned int i; - bool exists; + unsigned int i; + bool exists; - if (!aKey) - return; + if (!aKey) { + return; + } - // if the key exists, remove the object + // if the key exists, remove the object + + if (fOptions & kSort) { + i = OSSymbol::bsearch(aKey, &dictionary[0], count, sizeof(dictionary[0])); + exists = (i < count) && (aKey == dictionary[i].key); + } else { + for (exists = false, i = 0; i < count; i++) { + if ((exists = (aKey == dictionary[i].key))) { + break; + } + } + } - if (fOptions & kSort) { - i = OSSymbol::bsearch(aKey, &dictionary[0], count, sizeof(dictionary[0])); - exists = (i < count) && (aKey == dictionary[i].key); - } else for (exists = false, i = 0; i < count; i++) { - if ((exists = (aKey == dictionary[i].key))) break; - } + if (exists) { + dictEntry oldEntry = dictionary[i]; - if (exists) { - dictEntry oldEntry = dictionary[i]; + haveUpdated(); - haveUpdated(); + count--; + bcopy(&dictionary[i + 1], &dictionary[i], (count - i) * sizeof(dictionary[0])); - count--; - bcopy(&dictionary[i+1], &dictionary[i], (count - i) * sizeof(dictionary[0])); - - oldEntry.key->taggedRelease(OSTypeID(OSCollection)); - oldEntry.value->taggedRelease(OSTypeID(OSCollection)); - return; - } + oldEntry.key->taggedRelease(OSTypeID(OSCollection)); + oldEntry.value->taggedRelease(OSTypeID(OSCollection)); + return; + } } // Returns true on success, false on an error condition. -bool OSDictionary::merge(const OSDictionary *srcDict) +bool +OSDictionary::merge(const OSDictionary *srcDict) { - const OSSymbol * sym; - OSCollectionIterator * iter; + const OSSymbol * sym; + OSCollectionIterator * iter; - if ( !OSDynamicCast(OSDictionary, srcDict) ) - return false; + if (!OSDynamicCast(OSDictionary, srcDict)) { + return false; + } - iter = OSCollectionIterator::withCollection(const_cast(srcDict)); - if ( !iter ) - return false; + iter = OSCollectionIterator::withCollection(const_cast(srcDict)); + if (!iter) { + return false; + } - while ( (sym = (const OSSymbol *)iter->getNextObject()) ) { - const OSMetaClassBase * obj; + while ((sym = (const OSSymbol *)iter->getNextObject())) { + const OSMetaClassBase * obj; - obj = srcDict->getObject(sym); - if ( !setObject(sym, obj) ) { - iter->release(); - return false; - } - } - iter->release(); + obj = srcDict->getObject(sym); + if (!setObject(sym, obj)) { + iter->release(); + return false; + } + } + iter->release(); - return true; + return true; } -OSObject *OSDictionary::getObject(const OSSymbol *aKey) const +OSObject * +OSDictionary::getObject(const OSSymbol *aKey) const { - unsigned int i; - bool exists; + unsigned int i, l = 0, r = count; - if (!aKey) - return 0; - - // if the key exists, return the object - - if (fOptions & kSort) { - i = OSSymbol::bsearch(aKey, &dictionary[0], count, sizeof(dictionary[0])); - exists = (i < count) && (aKey == dictionary[i].key); - } else for (exists = false, i = 0; i < count; i++) { - if ((exists = (aKey == dictionary[i].key))) break; - } + if (!aKey) { + return 0; + } - if (exists) { - return (const_cast ((const OSObject *)dictionary[i].value)); - } + // if the key exists, return the object + // + // inline OSSymbol::bsearch in this performance critical codepath + // for performance, the compiler can't do that due to the genericity + // of OSSymbol::bsearch + // + // If we have less than 4 objects, scanning is faster. + if (count > 4 && (fOptions & kSort)) { + while (l < r) { + i = (l + r) / 2; + if (aKey == dictionary[i].key) { + return const_cast ((const OSObject *)dictionary[i].value); + } + + if ((uintptr_t)aKey < (uintptr_t)dictionary[i].key) { + r = i; + } else { + l = i + 1; + } + } + } else { + for (i = l; i < r; i++) { + if (aKey == dictionary[i].key) { + return const_cast ((const OSObject *)dictionary[i].value); + } + } + } - return 0; + return NULL; } // Wrapper macros -#define OBJECT_WRAP_1(cmd, k) \ -{ \ - const OSSymbol *tmpKey = k; \ - OSObject *retObj = cmd(tmpKey); \ - \ - tmpKey->release(); \ - return retObj; \ +#define OBJECT_WRAP_1(cmd, k) \ +{ \ + const OSSymbol *tmpKey = k; \ + OSObject *retObj = NULL; \ + if (tmpKey) { \ + retObj = cmd(tmpKey); \ + tmpKey->release(); \ + } \ + return retObj; \ } -#define OBJECT_WRAP_2(cmd, k, o) \ -{ \ - const OSSymbol *tmpKey = k; \ - bool ret = cmd(tmpKey, o); \ - \ - tmpKey->release(); \ - return ret; \ +#define OBJECT_WRAP_2(cmd, k, o) \ +{ \ + const OSSymbol *tmpKey = k; \ + bool ret = cmd(tmpKey, o); \ + \ + tmpKey->release(); \ + return ret; \ } -#define OBJECT_WRAP_3(cmd, k) \ -{ \ - const OSSymbol *tmpKey = k; \ - cmd(tmpKey); \ - tmpKey->release(); \ +#define OBJECT_WRAP_3(cmd, k) \ +{ \ + const OSSymbol *tmpKey = k; \ + if (tmpKey) { \ + cmd(tmpKey); \ + tmpKey->release(); \ + } \ } -bool OSDictionary::setObject(const OSString *aKey, const OSMetaClassBase *anObject) - OBJECT_WRAP_2(setObject, OSSymbol::withString(aKey), anObject) -bool OSDictionary::setObject(const char *aKey, const OSMetaClassBase *anObject) - OBJECT_WRAP_2(setObject, OSSymbol::withCString(aKey), anObject) +bool +OSDictionary::setObject(const OSString *aKey, const OSMetaClassBase *anObject) +OBJECT_WRAP_2(setObject, OSSymbol::withString(aKey), anObject) +bool +OSDictionary::setObject(const char *aKey, const OSMetaClassBase *anObject) +OBJECT_WRAP_2(setObject, OSSymbol::withCString(aKey), anObject) -OSObject *OSDictionary::getObject(const OSString *aKey) const - OBJECT_WRAP_1(getObject, OSSymbol::withString(aKey)) +OSObject *OSDictionary::getObject(const OSString * aKey) const +OBJECT_WRAP_1(getObject, OSSymbol::existingSymbolForString(aKey)) OSObject *OSDictionary::getObject(const char *aKey) const - OBJECT_WRAP_1(getObject, OSSymbol::withCString(aKey)) +OBJECT_WRAP_1(getObject, OSSymbol::existingSymbolForCString(aKey)) -void OSDictionary::removeObject(const OSString *aKey) - OBJECT_WRAP_3(removeObject, OSSymbol::withString(aKey)) -void OSDictionary::removeObject(const char *aKey) - OBJECT_WRAP_3(removeObject, OSSymbol::withCString(aKey)) +void +OSDictionary::removeObject(const OSString *aKey) +OBJECT_WRAP_3(removeObject, OSSymbol::existingSymbolForString(aKey)) +void +OSDictionary::removeObject(const char *aKey) +OBJECT_WRAP_3(removeObject, OSSymbol::existingSymbolForCString(aKey)) bool OSDictionary::isEqualTo(const OSDictionary *srcDict, const OSCollection *keys) const { - OSCollectionIterator * iter; - unsigned int keysCount; - const OSMetaClassBase * obj1; - const OSMetaClassBase * obj2; - OSString * aKey; - bool ret; - - if ( this == srcDict ) - return true; - - keysCount = keys->getCount(); - if ( (count < keysCount) || (srcDict->getCount() < keysCount) ) - return false; + OSCollectionIterator * iter; + unsigned int keysCount; + const OSMetaClassBase * obj1; + const OSMetaClassBase * obj2; + OSString * aKey; + bool ret; + + if (this == srcDict) { + return true; + } - iter = OSCollectionIterator::withCollection(keys); - if ( !iter ) - return false; + keysCount = keys->getCount(); + if ((count < keysCount) || (srcDict->getCount() < keysCount)) { + return false; + } - ret = true; - while ( (aKey = OSDynamicCast(OSString, iter->getNextObject())) ) { - obj1 = getObject(aKey); - obj2 = srcDict->getObject(aKey); - if ( !obj1 || !obj2 ) { - ret = false; - break; - } + iter = OSCollectionIterator::withCollection(keys); + if (!iter) { + return false; + } - if ( !obj1->isEqualTo(obj2) ) { - ret = false; - break; - } - } - iter->release(); + ret = true; + while ((aKey = OSDynamicCast(OSString, iter->getNextObject()))) { + obj1 = getObject(aKey); + obj2 = srcDict->getObject(aKey); + if (!obj1 || !obj2) { + ret = false; + break; + } + + if (!obj1->isEqualTo(obj2)) { + ret = false; + break; + } + } + iter->release(); - return ret; + return ret; } -bool OSDictionary::isEqualTo(const OSDictionary *srcDict) const +bool +OSDictionary::isEqualTo(const OSDictionary *srcDict) const { - unsigned int i; - const OSMetaClassBase * obj; - - if ( this == srcDict ) - return true; + unsigned int i; + const OSMetaClassBase * obj; - if ( count != srcDict->getCount() ) - return false; + if (this == srcDict) { + return true; + } - for ( i = 0; i < count; i++ ) { - obj = srcDict->getObject(dictionary[i].key); - if ( !obj ) - return false; + if (count != srcDict->getCount()) { + return false; + } - if ( !dictionary[i].value->isEqualTo(obj) ) - return false; - } - - return true; + for (i = 0; i < count; i++) { + obj = srcDict->getObject(dictionary[i].key); + if (!obj) { + return false; + } + + if (!dictionary[i].value->isEqualTo(obj)) { + return false; + } + } + + return true; } -bool OSDictionary::isEqualTo(const OSMetaClassBase *anObject) const +bool +OSDictionary::isEqualTo(const OSMetaClassBase *anObject) const { - OSDictionary *dict; + OSDictionary *dict; - dict = OSDynamicCast(OSDictionary, anObject); - if ( dict ) - return isEqualTo(dict); - else - return false; + dict = OSDynamicCast(OSDictionary, anObject); + if (dict) { + return isEqualTo(dict); + } else { + return false; + } } -unsigned int OSDictionary::iteratorSize() const +unsigned int +OSDictionary::iteratorSize() const { - return sizeof(unsigned int); + return sizeof(unsigned int); } -bool OSDictionary::initIterator(void *inIterator) const +bool +OSDictionary::initIterator(void *inIterator) const { - unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int *iteratorP = (unsigned int *) inIterator; - *iteratorP = 0; - return true; + *iteratorP = 0; + return true; } -bool OSDictionary::getNextObjectForIterator(void *inIterator, OSObject **ret) const +bool +OSDictionary::getNextObjectForIterator(void *inIterator, OSObject **ret) const { - unsigned int *iteratorP = (unsigned int *) inIterator; - unsigned int index = (*iteratorP)++; + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; - if (index < count) - *ret = (OSObject *) dictionary[index].key; - else - *ret = 0; + if (index < count) { + *ret = (OSObject *) dictionary[index].key; + } else { + *ret = 0; + } - return (*ret != 0); + return *ret != 0; } -bool OSDictionary::serialize(OSSerialize *s) const +bool +OSDictionary::serialize(OSSerialize *s) const { - if (s->previouslySerialized(this)) return true; - - if (!s->addXMLStartTag(this, "dict")) return false; - - for (unsigned i = 0; i < count; i++) { - const OSSymbol *key = dictionary[i].key; + if (s->previouslySerialized(this)) { + return true; + } - // due the nature of the XML syntax, this must be a symbol - if (!key->metaCast("OSSymbol")) { - return false; - } - if (!s->addString("")) return false; - const char *c = key->getCStringNoCopy(); - while (*c) { - if (*c == '<') { - if (!s->addString("<")) return false; - } else if (*c == '>') { - if (!s->addString(">")) return false; - } else if (*c == '&') { - if (!s->addString("&")) return false; - } else { - if (!s->addChar(*c)) return false; - } - c++; - } - if (!s->addXMLEndTag("key")) return false; + if (!s->addXMLStartTag(this, "dict")) { + return false; + } - if (!dictionary[i].value->serialize(s)) return false; - } + for (unsigned i = 0; i < count; i++) { + const OSSymbol *key = dictionary[i].key; + + // due the nature of the XML syntax, this must be a symbol + if (!key->metaCast("OSSymbol")) { + return false; + } + if (!s->addString("")) { + return false; + } + const char *c = key->getCStringNoCopy(); + while (*c) { + if (*c == '<') { + if (!s->addString("<")) { + return false; + } + } else if (*c == '>') { + if (!s->addString(">")) { + return false; + } + } else if (*c == '&') { + if (!s->addString("&")) { + return false; + } + } else { + if (!s->addChar(*c)) { + return false; + } + } + c++; + } + if (!s->addXMLEndTag("key")) { + return false; + } + + if (!dictionary[i].value->serialize(s)) { + return false; + } + } - return s->addXMLEndTag("dict"); + return s->addXMLEndTag("dict"); } -unsigned OSDictionary::setOptions(unsigned options, unsigned mask, void *) +unsigned +OSDictionary::setOptions(unsigned options, unsigned mask, void *) { - unsigned old = super::setOptions(options, mask); - if ((old ^ options) & mask) { + unsigned old = super::setOptions(options, mask); + if ((old ^ options) & mask) { + // Value changed need to recurse over all of the child collections + for (unsigned i = 0; i < count; i++) { + OSCollection *v = OSDynamicCast(OSCollection, dictionary[i].value); + if (v) { + v->setOptions(options, mask); + } + } + } - // Value changed need to recurse over all of the child collections - for ( unsigned i = 0; i < count; i++ ) { - OSCollection *v = OSDynamicCast(OSCollection, dictionary[i].value); - if (v) - v->setOptions(options, mask); + if (!(old & kSort) && (fOptions & kSort)) { + sortBySymbol(); } - } - return old; + return old; } -OSCollection * OSDictionary::copyCollection(OSDictionary *cycleDict) +OSCollection * +OSDictionary::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = 0; - OSDictionary *newDict = 0; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) - return 0; - } + bool allocDict = !cycleDict; + OSCollection *ret = 0; + OSDictionary *newDict = 0; + + if (allocDict) { + cycleDict = OSDictionary::withCapacity(16); + if (!cycleDict) { + return 0; + } + } - do { - // Check for a cycle - ret = super::copyCollection(cycleDict); - if (ret) - continue; - - newDict = OSDictionary::withDictionary(this); - if (!newDict) - continue; + do { + // Check for a cycle + ret = super::copyCollection(cycleDict); + if (ret) { + continue; + } - // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newDict); + newDict = OSDictionary::withDictionary(this); + if (!newDict) { + continue; + } - for (unsigned int i = 0; i < count; i++) { - const OSMetaClassBase *obj = dictionary[i].value; - OSCollection *coll = OSDynamicCast(OSCollection, EXT_CAST(obj)); + // Insert object into cycle Dictionary + cycleDict->setObject((const OSSymbol *) this, newDict); - if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); - if (!newColl) - goto abortCopy; + for (unsigned int i = 0; i < count; i++) { + const OSMetaClassBase *obj = dictionary[i].value; + OSCollection *coll = OSDynamicCast(OSCollection, EXT_CAST(obj)); - newDict->dictionary[i].value = newColl; + if (coll) { + OSCollection *newColl = coll->copyCollection(cycleDict); + if (!newColl) { + goto abortCopy; + } - coll->taggedRelease(OSTypeID(OSCollection)); - newColl->taggedRetain(OSTypeID(OSCollection)); - newColl->release(); - }; - } + newDict->dictionary[i].value = newColl; - ret = newDict; - newDict = 0; + coll->taggedRelease(OSTypeID(OSCollection)); + newColl->taggedRetain(OSTypeID(OSCollection)); + newColl->release(); + } + ; + } - } while (false); + ret = newDict; + newDict = 0; + } while (false); abortCopy: - if (newDict) - newDict->release(); + if (newDict) { + newDict->release(); + } - if (allocDict) - cycleDict->release(); + if (allocDict) { + cycleDict->release(); + } - return ret; + return ret; } -OSArray * OSDictionary::copyKeys(void) +OSArray * +OSDictionary::copyKeys(void) { - OSArray * array; + OSArray * array; array = OSArray::withCapacity(count); - if (!array) return (0); + if (!array) { + return 0; + } - for (unsigned int i = 0; i < count; i++) - { - if (!array->setObject(i, dictionary[i].key)) - { - array->release(); - array = 0; - break; - } + for (unsigned int i = 0; i < count; i++) { + if (!array->setObject(i, dictionary[i].key)) { + array->release(); + array = 0; + break; + } } - return (array); + return array; } -bool OSDictionary::iterateObjects(void * refcon, bool (*callback)(void * refcon, const OSSymbol * key, OSObject * object)) +bool +OSDictionary::iterateObjects(void * refcon, bool (*callback)(void * refcon, const OSSymbol * key, OSObject * object)) { - unsigned int initialUpdateStamp; - bool done; + unsigned int initialUpdateStamp; + bool done; - initialUpdateStamp = updateStamp; - done = false; - for (unsigned int i = 0; i < count; i++) - { - done = callback(refcon, dictionary[i].key, EXT_CAST(dictionary[i].value)); - if (done) break; - if (initialUpdateStamp != updateStamp) break; - } + initialUpdateStamp = updateStamp; + done = false; + for (unsigned int i = 0; i < count; i++) { + done = callback(refcon, dictionary[i].key, EXT_CAST(dictionary[i].value)); + if (done) { + break; + } + if (initialUpdateStamp != updateStamp) { + break; + } + } - return initialUpdateStamp == updateStamp; + return initialUpdateStamp == updateStamp; } -static bool OSDictionaryIterateObjectsBlock(void * refcon, const OSSymbol * key, OSObject * object) +static bool +OSDictionaryIterateObjectsBlock(void * refcon, const OSSymbol * key, OSObject * object) { - bool (^block)(const OSSymbol * key, OSObject * object) = (typeof(block)) refcon; - return (block(key, object)); + bool (^block)(const OSSymbol * key, OSObject * object) = (typeof(block))refcon; + return block(key, object); } -bool OSDictionary::iterateObjects(bool (^block)(const OSSymbol * key, OSObject * object)) +bool +OSDictionary::iterateObjects(bool (^block)(const OSSymbol * key, OSObject * object)) { - return (iterateObjects((void *)block, &OSDictionaryIterateObjectsBlock)); + return iterateObjects((void *)block, &OSDictionaryIterateObjectsBlock); } diff --git a/libkern/c++/OSIterator.cpp b/libkern/c++/OSIterator.cpp index dac01d78d..9baf904ae 100644 --- a/libkern/c++/OSIterator.cpp +++ b/libkern/c++/OSIterator.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -39,4 +39,3 @@ OSMetaClassDefineReservedUnused(OSIterator, 0); OSMetaClassDefineReservedUnused(OSIterator, 1); OSMetaClassDefineReservedUnused(OSIterator, 2); OSMetaClassDefineReservedUnused(OSIterator, 3); - diff --git a/libkern/c++/OSKext.cpp b/libkern/c++/OSKext.cpp index 3f448512c..d79b4b64e 100644 --- a/libkern/c++/OSKext.cpp +++ b/libkern/c++/OSKext.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -92,24 +92,24 @@ extern vm_offset_t segLOWESTTEXT; #endif /* CONFIG_EMBEDDED */ static OSReturn _OSKextCreateRequest( - const char * predicate, - OSDictionary ** requestP); + const char * predicate, + OSDictionary ** requestP); static OSString * _OSKextGetRequestPredicate(OSDictionary * requestDict); static OSObject * _OSKextGetRequestArgument( - OSDictionary * requestDict, - const char * argName); + OSDictionary * requestDict, + const char * argName); static bool _OSKextSetRequestArgument( - OSDictionary * requestDict, - const char * argName, - OSObject * value); + OSDictionary * requestDict, + const char * argName, + OSObject * value); static void * _OSKextExtractPointer(OSData * wrapper); static OSReturn _OSDictionarySetCStringValue( - OSDictionary * dict, - const char * key, - const char * value); + OSDictionary * dict, + const char * key, + const char * value); static bool _OSKextInPrelinkRebuildWindow(void); static bool _OSKextInUnloadedPrelinkedKexts(const OSSymbol * theBundleID); - + // We really should add containsObject() & containsCString to OSCollection & subclasses. // So few pad slots, though.... static bool _OSArrayContainsCString(OSArray * array, const char * cString); @@ -210,12 +210,12 @@ static bool _OSArrayContainsCString(OSArray * array, const char * cString); * return an offset and length of 0 for that section. *********************************************************************/ typedef struct osLogDataHeader { - uint32_t version; - uint32_t sect_count; - struct { - uint32_t sect_offset; - uint32_t sect_size; - } sections[0]; + uint32_t version; + uint32_t sect_count; + struct { + uint32_t sect_offset; + uint32_t sect_size; + } sections[0]; } osLogDataHeaderRef; /********************************************************************* @@ -228,8 +228,8 @@ typedef struct osLogDataHeader { * Snow Leopard. *********************************************************************/ typedef struct MkextEntryRef { - mkext_basic_header * mkext; // beginning of whole mkext file - void * fileinfo; // mkext2_file_entry or equiv; see mkext.h + mkext_basic_header * mkext; // beginning of whole mkext file + void * fileinfo;// mkext2_file_entry or equiv; see mkext.h } MkextEntryRef; #if PRAGMA_MARK @@ -244,24 +244,24 @@ static bool sSafeBoot = false; static bool sKeepSymbols = false; /********************************************************************* -* sKextLock is the principal lock for OSKext, and guards all static -* and global variables not owned by other locks (declared further -* below). It must be taken by any entry-point method or function, -* including internal functions called on scheduled threads. -* -* sKextLock and sKextInnerLock are recursive due to multiple functions -* that are called both externally and internally. The other locks are -* nonrecursive. -* -* Which locks are taken depends on what they protect, but if more than -* one must be taken, they must always be locked in this order -* (and unlocked in reverse order) to prevent deadlocks: -* -* 1. sKextLock -* 2. sKextInnerLock -* 3. sKextSummariesLock -* 4. sKextLoggingLock -*/ + * sKextLock is the principal lock for OSKext, and guards all static + * and global variables not owned by other locks (declared further + * below). It must be taken by any entry-point method or function, + * including internal functions called on scheduled threads. + * + * sKextLock and sKextInnerLock are recursive due to multiple functions + * that are called both externally and internally. The other locks are + * nonrecursive. + * + * Which locks are taken depends on what they protect, but if more than + * one must be taken, they must always be locked in this order + * (and unlocked in reverse order) to prevent deadlocks: + * + * 1. sKextLock + * 2. sKextInnerLock + * 3. sKextSummariesLock + * 4. sKextLoggingLock + */ static IORecursiveLock * sKextLock = NULL; static OSDictionary * sKextsByID = NULL; @@ -296,8 +296,8 @@ static bool sLoadEnabled = true; static bool sUnloadEnabled = true; /********************************************************************* -* Stuff for the OSKext representing the kernel itself. -**********/ + * Stuff for the OSKext representing the kernel itself. + **********/ static OSKext * sKernelKext = NULL; /* Set up a fake kmod_info struct for the kernel. @@ -313,35 +313,35 @@ static OSKext * sKernelKext = NULL; * binary compability. */ kmod_info_t g_kernel_kmod_info = { - /* next */ 0, - /* info_version */ KMOD_INFO_VERSION, - /* id */ 0, // loadTag: kernel is always 0 - /* name */ kOSKextKernelIdentifier, // bundle identifier - /* version */ "0", // filled in in OSKext::initialize() - /* reference_count */ -1, // never adjusted; kernel never unloads - /* reference_list */ NULL, - /* address */ 0, - /* size */ 0, // filled in in OSKext::initialize() - /* hdr_size */ 0, - /* start */ 0, - /* stop */ 0 + /* next */ 0, + /* info_version */ KMOD_INFO_VERSION, + /* id */ 0, // loadTag: kernel is always 0 + /* name */ kOSKextKernelIdentifier, // bundle identifier + /* version */ "0", // filled in in OSKext::initialize() + /* reference_count */ -1, // never adjusted; kernel never unloads + /* reference_list */ NULL, + /* address */ 0, + /* size */ 0, // filled in in OSKext::initialize() + /* hdr_size */ 0, + /* start */ 0, + /* stop */ 0 }; /* Set up a fake kmod_info struct for statically linked kexts that don't have one. */ kmod_info_t invalid_kmod_info = { - /* next */ 0, - /* info_version */ KMOD_INFO_VERSION, - /* id */ UINT32_MAX, - /* name */ "invalid", - /* version */ "0", - /* reference_count */ -1, - /* reference_list */ NULL, - /* address */ 0, - /* size */ 0, - /* hdr_size */ 0, - /* start */ 0, - /* stop */ 0 + /* next */ 0, + /* info_version */ KMOD_INFO_VERSION, + /* id */ UINT32_MAX, + /* name */ "invalid", + /* version */ "0", + /* reference_count */ -1, + /* reference_list */ NULL, + /* address */ 0, + /* size */ 0, + /* hdr_size */ 0, + /* start */ 0, + /* stop */ 0 }; extern "C" { @@ -356,15 +356,15 @@ kmod_info_t * kmod = NULL; static char * loaded_kext_paniclist = NULL; static uint32_t loaded_kext_paniclist_size = 0; - + AbsoluteTime last_loaded_timestamp; -static char last_loaded_str_buf[2*KMOD_MAX_NAME]; +static char last_loaded_str_buf[2 * KMOD_MAX_NAME]; static u_long last_loaded_strlen = 0; static void * last_loaded_address = NULL; static u_long last_loaded_size = 0; AbsoluteTime last_unloaded_timestamp; -static char last_unloaded_str_buf[2*KMOD_MAX_NAME]; +static char last_unloaded_str_buf[2 * KMOD_MAX_NAME]; static u_long last_unloaded_strlen = 0; static void * last_unloaded_address = NULL; static u_long last_unloaded_size = 0; @@ -389,17 +389,17 @@ static const OSSymbol * gIOSurfaceIdentifier; vm_tag_t gIOSurfaceTag; /********************************************************************* -* sKextInnerLock protects against cross-calls with IOService and -* IOCatalogue, and owns the variables declared immediately below. -* -* Note that sConsiderUnloadsExecuted above belongs to sKextLock! -* -* When both sKextLock and sKextInnerLock need to be taken, -* always lock sKextLock first and unlock it second. Never take both -* locks in an entry point to OSKext; if you need to do so, you must -* spawn an independent thread to avoid potential deadlocks for threads -* calling into OSKext. -**********/ + * sKextInnerLock protects against cross-calls with IOService and + * IOCatalogue, and owns the variables declared immediately below. + * + * Note that sConsiderUnloadsExecuted above belongs to sKextLock! + * + * When both sKextLock and sKextInnerLock need to be taken, + * always lock sKextLock first and unlock it second. Never take both + * locks in an entry point to OSKext; if you need to do so, you must + * spawn an independent thread to avoid potential deadlocks for threads + * calling into OSKext. + **********/ static IORecursiveLock * sKextInnerLock = NULL; static bool sAutounloadEnabled = true; @@ -410,21 +410,21 @@ static unsigned int sConsiderUnloadDelay = 60; // seconds static thread_call_t sUnloadCallout = 0; static thread_call_t sDestroyLinkContextThread = 0; // one-shot, one-at-a-time thread static bool sSystemSleep = false; // true when system going to sleep -static AbsoluteTime sLastWakeTime; // last time we woke up +static AbsoluteTime sLastWakeTime; // last time we woke up /********************************************************************* -* Backtraces can be printed at various times so we need a tight lock -* on data used for that. sKextSummariesLock protects the variables -* declared immediately below. -* -* gLoadedKextSummaries is accessed by other modules, but only during -* a panic so the lock isn't needed then. -* -* gLoadedKextSummaries has the "used" attribute in order to ensure -* that it remains visible even when we are performing extremely -* aggressive optimizations, as it is needed to allow the debugger -* to automatically parse the list of loaded kexts. -**********/ + * Backtraces can be printed at various times so we need a tight lock + * on data used for that. sKextSummariesLock protects the variables + * declared immediately below. + * + * gLoadedKextSummaries is accessed by other modules, but only during + * a panic so the lock isn't needed then. + * + * gLoadedKextSummaries has the "used" attribute in order to ensure + * that it remains visible even when we are performing extremely + * aggressive optimizations, as it is needed to allow the debugger + * to automatically parse the list of loaded kexts. + **********/ static IOLock * sKextSummariesLock = NULL; extern "C" lck_spin_t vm_allocation_sites_lock; static IOSimpleLock * sKextAccountsLock = &vm_allocation_sites_lock; @@ -439,12 +439,12 @@ static uint32_t sKextAccountsCount; }; /********************************************************************* -* sKextLoggingLock protects the logging variables declared immediately below. -**********/ + * sKextLoggingLock protects the logging variables declared immediately below. + **********/ static IOLock * sKextLoggingLock = NULL; static const OSKextLogSpec kDefaultKernelLogFilter = kOSKextLogBasicLevel | - kOSKextLogVerboseFlagsMask; + kOSKextLogVerboseFlagsMask; static OSKextLogSpec sKernelLogFilter = kDefaultKernelLogFilter; static bool sBootArgLogFilterFound = false; SYSCTL_UINT(_debug, OID_AUTO, kextlog, CTLFLAG_RW | CTLFLAG_LOCKED, &sKernelLogFilter, @@ -455,20 +455,20 @@ static OSArray * sUserSpaceLogSpecArray = NULL; static OSArray * sUserSpaceLogMessageArray = NULL; /********* -* End scope for sKextInnerLock-protected variables. -*********************************************************************/ + * End scope for sKextInnerLock-protected variables. + *********************************************************************/ /********************************************************************* - helper function used for collecting PGO data upon unload of a kext + * helper function used for collecting PGO data upon unload of a kext */ static int OSKextGrabPgoDataLocked(OSKext *kext, - bool metadata, - uuid_t instance_uuid, - uint64_t *pSize, - char *pBuffer, - uint64_t bufferSize); + bool metadata, + uuid_t instance_uuid, + uint64_t *pSize, + char *pBuffer, + uint64_t bufferSize); /**********************************************************************/ @@ -481,27 +481,32 @@ static int OSKextGrabPgoDataLocked(OSKext *kext, * C functions used for callbacks. *********************************************************************/ extern "C" { -void osdata_kmem_free(void * ptr, unsigned int length) { - kmem_free(kernel_map, (vm_address_t)ptr, length); - return; +void +osdata_kmem_free(void * ptr, unsigned int length) +{ + kmem_free(kernel_map, (vm_address_t)ptr, length); + return; } -void osdata_phys_free(void * ptr, unsigned int length) { - ml_static_mfree((vm_offset_t)ptr, length); - return; +void +osdata_phys_free(void * ptr, unsigned int length) +{ + ml_static_mfree((vm_offset_t)ptr, length); + return; } -void osdata_vm_deallocate(void * ptr, unsigned int length) +void +osdata_vm_deallocate(void * ptr, unsigned int length) { - (void)vm_deallocate(kernel_map, (vm_offset_t)ptr, length); - return; + (void)vm_deallocate(kernel_map, (vm_offset_t)ptr, length); + return; } -void osdata_kext_free(void * ptr, unsigned int length) +void +osdata_kext_free(void * ptr, unsigned int length) { - (void)kext_free((vm_offset_t)ptr, length); + (void)kext_free((vm_offset_t)ptr, length); } - }; #if PRAGMA_MARK @@ -512,106 +517,106 @@ void osdata_kext_free(void * ptr, unsigned int length) *********************************************************************/ kxld_addr_t kern_allocate( - u_long size, - KXLDAllocateFlags * flags, - void * user_data) -{ - vm_address_t result = 0; // returned - kern_return_t mach_result = KERN_FAILURE; - bool success = false; - OSKext * theKext = (OSKext *)user_data; - u_long roundSize = round_page(size); - OSData * linkBuffer = NULL; // must release - - mach_result = kext_alloc(&result, roundSize, /* fixed */ FALSE); - if (mach_result != KERN_SUCCESS) { - OSKextLog(theKext, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Can't allocate kernel memory to link %s.", - theKext->getIdentifierCString()); - goto finish; - } - - /* Create an OSData wrapper for the allocated buffer. - */ - linkBuffer = OSData::withBytesNoCopy((void *)result, roundSize); - if (!linkBuffer) { - OSKextLog(theKext, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Can't allocate linked executable wrapper for %s.", - theKext->getIdentifierCString()); - goto finish; - } - linkBuffer->setDeallocFunction(osdata_kext_free); - OSKextLog(theKext, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag | kOSKextLogLinkFlag, - "Allocated link buffer for kext %s at %p (%lu bytes).", - theKext->getIdentifierCString(), - (void *)result, (unsigned long)roundSize); - - theKext->setLinkedExecutable(linkBuffer); - - *flags = kKxldAllocateWritable; - success = true; + u_long size, + KXLDAllocateFlags * flags, + void * user_data) +{ + vm_address_t result = 0; // returned + kern_return_t mach_result = KERN_FAILURE; + bool success = false; + OSKext * theKext = (OSKext *)user_data; + u_long roundSize = round_page(size); + OSData * linkBuffer = NULL;// must release + + mach_result = kext_alloc(&result, roundSize, /* fixed */ FALSE); + if (mach_result != KERN_SUCCESS) { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Can't allocate kernel memory to link %s.", + theKext->getIdentifierCString()); + goto finish; + } + + /* Create an OSData wrapper for the allocated buffer. + */ + linkBuffer = OSData::withBytesNoCopy((void *)result, roundSize); + if (!linkBuffer) { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Can't allocate linked executable wrapper for %s.", + theKext->getIdentifierCString()); + goto finish; + } + linkBuffer->setDeallocFunction(osdata_kext_free); + OSKextLog(theKext, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag | kOSKextLogLinkFlag, + "Allocated link buffer for kext %s at %p (%lu bytes).", + theKext->getIdentifierCString(), + (void *)result, (unsigned long)roundSize); + + theKext->setLinkedExecutable(linkBuffer); + + *flags = kKxldAllocateWritable; + success = true; finish: - if (!success && result) { - kext_free(result, roundSize); - result = 0; - } + if (!success && result) { + kext_free(result, roundSize); + result = 0; + } - OSSafeReleaseNULL(linkBuffer); + OSSafeReleaseNULL(linkBuffer); - return (kxld_addr_t)result; + return (kxld_addr_t)result; } /********************************************************************* *********************************************************************/ void kxld_log_callback( - KXLDLogSubsystem subsystem, - KXLDLogLevel level, - const char * format, - va_list argList, - void * user_data) -{ - OSKext *theKext = (OSKext *) user_data; - OSKextLogSpec logSpec = 0; - - switch (subsystem) { - case kKxldLogLinking: - logSpec |= kOSKextLogLinkFlag; - break; - case kKxldLogPatching: - logSpec |= kOSKextLogPatchFlag; - break; - } - - switch (level) { - case kKxldLogExplicit: - logSpec |= kOSKextLogExplicitLevel; - break; - case kKxldLogErr: - logSpec |= kOSKextLogErrorLevel; - break; - case kKxldLogWarn: - logSpec |= kOSKextLogWarningLevel; - break; - case kKxldLogBasic: - logSpec |= kOSKextLogProgressLevel; - break; - case kKxldLogDetail: - logSpec |= kOSKextLogDetailLevel; - break; - case kKxldLogDebug: - logSpec |= kOSKextLogDebugLevel; - break; - } - - OSKextVLog(theKext, logSpec, format, argList); + KXLDLogSubsystem subsystem, + KXLDLogLevel level, + const char * format, + va_list argList, + void * user_data) +{ + OSKext *theKext = (OSKext *) user_data; + OSKextLogSpec logSpec = 0; + + switch (subsystem) { + case kKxldLogLinking: + logSpec |= kOSKextLogLinkFlag; + break; + case kKxldLogPatching: + logSpec |= kOSKextLogPatchFlag; + break; + } + + switch (level) { + case kKxldLogExplicit: + logSpec |= kOSKextLogExplicitLevel; + break; + case kKxldLogErr: + logSpec |= kOSKextLogErrorLevel; + break; + case kKxldLogWarn: + logSpec |= kOSKextLogWarningLevel; + break; + case kKxldLogBasic: + logSpec |= kOSKextLogProgressLevel; + break; + case kKxldLogDetail: + logSpec |= kOSKextLogDetailLevel; + break; + case kKxldLogDebug: + logSpec |= kOSKextLogDebugLevel; + break; + } + + OSKextVLog(theKext, logSpec, format, argList); } #if PRAGMA_MARK @@ -664,202 +669,203 @@ OSDefineMetaClassAndStructors(OSKext, OSObject) void OSKext::initialize(void) { - OSData * kernelExecutable = NULL; // do not release - u_char * kernelStart = NULL; // do not free - size_t kernelLength = 0; - OSString * scratchString = NULL; // must release - IORegistryEntry * registryRoot = NULL; // do not release - OSNumber * kernelCPUType = NULL; // must release - OSNumber * kernelCPUSubtype = NULL; // must release - OSKextLogSpec bootLogFilter = kOSKextLogSilentFilter; - bool setResult = false; - uint64_t * timestamp = 0; - char bootArgBuffer[16]; // for PE_parse_boot_argn w/strings - - /* This must be the first thing allocated. Everything else grabs this lock. - */ - sKextLock = IORecursiveLockAlloc(); - sKextInnerLock = IORecursiveLockAlloc(); - sKextSummariesLock = IOLockAlloc(); - sKextLoggingLock = IOLockAlloc(); - assert(sKextLock); - assert(sKextInnerLock); - assert(sKextSummariesLock); - assert(sKextLoggingLock); - - sKextsByID = OSDictionary::withCapacity(kOSKextTypicalLoadCount); - sLoadedKexts = OSArray::withCapacity(kOSKextTypicalLoadCount); - sUnloadedPrelinkedKexts = OSArray::withCapacity(kOSKextTypicalLoadCount / 10); - sKernelRequests = OSArray::withCapacity(0); - sPostedKextLoadIdentifiers = OSSet::withCapacity(0); - sAllKextLoadIdentifiers = OSSet::withCapacity(kOSKextTypicalLoadCount); - sRequestCallbackRecords = OSArray::withCapacity(0); - assert(sKextsByID && sLoadedKexts && sKernelRequests && - sPostedKextLoadIdentifiers && sAllKextLoadIdentifiers && - sRequestCallbackRecords && sUnloadedPrelinkedKexts); - - /* Read the log flag boot-args and set the log flags. - */ - if (PE_parse_boot_argn("kextlog", &bootLogFilter, sizeof(bootLogFilter))) { - sBootArgLogFilterFound = true; - sKernelLogFilter = bootLogFilter; - // log this if any flags are set - OSKextLog(/* kext */ NULL, - kOSKextLogBasicLevel | - kOSKextLogFlagsMask, - "Kernel kext log filter 0x%x per kextlog boot arg.", - (unsigned)sKernelLogFilter); - } - - sSafeBoot = PE_parse_boot_argn("-x", bootArgBuffer, - sizeof(bootArgBuffer)) ? true : false; - - if (sSafeBoot) { - OSKextLog(/* kext */ NULL, - kOSKextLogWarningLevel | - kOSKextLogGeneralFlag, - "SAFE BOOT DETECTED - " - "only valid OSBundleRequired kexts will be loaded."); - } - - PE_parse_boot_argn("keepsyms", &sKeepSymbols, sizeof(sKeepSymbols)); + OSData * kernelExecutable = NULL;// do not release + u_char * kernelStart = NULL;// do not free + size_t kernelLength = 0; + OSString * scratchString = NULL;// must release + IORegistryEntry * registryRoot = NULL;// do not release + OSNumber * kernelCPUType = NULL;// must release + OSNumber * kernelCPUSubtype = NULL;// must release + OSKextLogSpec bootLogFilter = kOSKextLogSilentFilter; + bool setResult = false; + uint64_t * timestamp = 0; + char bootArgBuffer[16];// for PE_parse_boot_argn w/strings + + /* This must be the first thing allocated. Everything else grabs this lock. + */ + sKextLock = IORecursiveLockAlloc(); + sKextInnerLock = IORecursiveLockAlloc(); + sKextSummariesLock = IOLockAlloc(); + sKextLoggingLock = IOLockAlloc(); + assert(sKextLock); + assert(sKextInnerLock); + assert(sKextSummariesLock); + assert(sKextLoggingLock); + + sKextsByID = OSDictionary::withCapacity(kOSKextTypicalLoadCount); + sLoadedKexts = OSArray::withCapacity(kOSKextTypicalLoadCount); + sUnloadedPrelinkedKexts = OSArray::withCapacity(kOSKextTypicalLoadCount / 10); + sKernelRequests = OSArray::withCapacity(0); + sPostedKextLoadIdentifiers = OSSet::withCapacity(0); + sAllKextLoadIdentifiers = OSSet::withCapacity(kOSKextTypicalLoadCount); + sRequestCallbackRecords = OSArray::withCapacity(0); + assert(sKextsByID && sLoadedKexts && sKernelRequests && + sPostedKextLoadIdentifiers && sAllKextLoadIdentifiers && + sRequestCallbackRecords && sUnloadedPrelinkedKexts); + + /* Read the log flag boot-args and set the log flags. + */ + if (PE_parse_boot_argn("kextlog", &bootLogFilter, sizeof(bootLogFilter))) { + sBootArgLogFilterFound = true; + sKernelLogFilter = bootLogFilter; + // log this if any flags are set + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | + kOSKextLogFlagsMask, + "Kernel kext log filter 0x%x per kextlog boot arg.", + (unsigned)sKernelLogFilter); + } + + sSafeBoot = PE_parse_boot_argn("-x", bootArgBuffer, + sizeof(bootArgBuffer)) ? true : false; + + if (sSafeBoot) { + OSKextLog(/* kext */ NULL, + kOSKextLogWarningLevel | + kOSKextLogGeneralFlag, + "SAFE BOOT DETECTED - " + "only valid OSBundleRequired kexts will be loaded."); + } + + PE_parse_boot_argn("keepsyms", &sKeepSymbols, sizeof(sKeepSymbols)); #if CONFIG_DTRACE - if (dtrace_keep_kernel_symbols()) - sKeepSymbols = true; + if (dtrace_keep_kernel_symbols()) { + sKeepSymbols = true; + } #endif /* CONFIG_DTRACE */ #if KASAN_DYNAMIC_BLACKLIST - /* needed for function lookup */ - sKeepSymbols = true; + /* needed for function lookup */ + sKeepSymbols = true; #endif - /* Set up an OSKext instance to represent the kernel itself. - */ - sKernelKext = new OSKext; - assert(sKernelKext); - - kernelStart = (u_char *)&_mh_execute_header; - kernelLength = getlastaddr() - (vm_offset_t)kernelStart; - kernelExecutable = OSData::withBytesNoCopy( - kernelStart, kernelLength); - assert(kernelExecutable); - -#if KASLR_KEXT_DEBUG - IOLog("kaslr: kernel start 0x%lx end 0x%lx length %lu vm_kernel_slide %llu (0x%016lx) \n", - (unsigned long)kernelStart, - (unsigned long)getlastaddr(), - kernelLength, - vm_kernel_slide, vm_kernel_slide); + /* Set up an OSKext instance to represent the kernel itself. + */ + sKernelKext = new OSKext; + assert(sKernelKext); + + kernelStart = (u_char *)&_mh_execute_header; + kernelLength = getlastaddr() - (vm_offset_t)kernelStart; + kernelExecutable = OSData::withBytesNoCopy( + kernelStart, kernelLength); + assert(kernelExecutable); + +#if KASLR_KEXT_DEBUG + IOLog("kaslr: kernel start 0x%lx end 0x%lx length %lu vm_kernel_slide %llu (0x%016lx) \n", + (unsigned long)kernelStart, + (unsigned long)getlastaddr(), + kernelLength, + vm_kernel_slide, vm_kernel_slide); #endif - sKernelKext->loadTag = sNextLoadTag++; // the kernel is load tag 0 - sKernelKext->bundleID = OSSymbol::withCString(kOSKextKernelIdentifier); - - sKernelKext->version = OSKextParseVersionString(osrelease); - sKernelKext->compatibleVersion = sKernelKext->version; - sKernelKext->linkedExecutable = kernelExecutable; - sKernelKext->interfaceUUID = sKernelKext->copyUUID(); - - sKernelKext->flags.hasAllDependencies = 1; - sKernelKext->flags.kernelComponent = 1; - sKernelKext->flags.prelinked = 0; - sKernelKext->flags.loaded = 1; - sKernelKext->flags.started = 1; - sKernelKext->flags.CPPInitialized = 0; - sKernelKext->flags.jettisonLinkeditSeg = 0; - - sKernelKext->kmod_info = &g_kernel_kmod_info; - strlcpy(g_kernel_kmod_info.version, osrelease, - sizeof(g_kernel_kmod_info.version)); - g_kernel_kmod_info.size = kernelLength; - g_kernel_kmod_info.id = sKernelKext->loadTag; - - /* Cons up an info dict, so we don't have to have special-case - * checking all over. - */ - sKernelKext->infoDict = OSDictionary::withCapacity(5); - assert(sKernelKext->infoDict); - setResult = sKernelKext->infoDict->setObject(kCFBundleIdentifierKey, - sKernelKext->bundleID); - assert(setResult); - setResult = sKernelKext->infoDict->setObject(kOSKernelResourceKey, - kOSBooleanTrue); - assert(setResult); - - scratchString = OSString::withCStringNoCopy(osrelease); - assert(scratchString); - setResult = sKernelKext->infoDict->setObject(kCFBundleVersionKey, - scratchString); - assert(setResult); - OSSafeReleaseNULL(scratchString); - - scratchString = OSString::withCStringNoCopy("mach_kernel"); - assert(scratchString); - setResult = sKernelKext->infoDict->setObject(kCFBundleNameKey, - scratchString); - assert(setResult); - OSSafeReleaseNULL(scratchString); - - /* Add the kernel kext to the bookkeeping dictionaries. Note that - * the kernel kext doesn't have a kmod_info struct. copyInfo() - * gathers info from other places anyhow. - */ - setResult = sKextsByID->setObject(sKernelKext->bundleID, sKernelKext); - assert(setResult); - setResult = sLoadedKexts->setObject(sKernelKext); - assert(setResult); - sKernelKext->release(); - - registryRoot = IORegistryEntry::getRegistryRoot(); - kernelCPUType = OSNumber::withNumber( - (long long unsigned int)_mh_execute_header.cputype, - 8 * sizeof(_mh_execute_header.cputype)); - kernelCPUSubtype = OSNumber::withNumber( - (long long unsigned int)_mh_execute_header.cpusubtype, - 8 * sizeof(_mh_execute_header.cpusubtype)); - assert(registryRoot && kernelCPUSubtype && kernelCPUType); - - registryRoot->setProperty(kOSKernelCPUTypeKey, kernelCPUType); - registryRoot->setProperty(kOSKernelCPUSubtypeKey, kernelCPUSubtype); - - OSSafeReleaseNULL(kernelCPUType); - OSSafeReleaseNULL(kernelCPUSubtype); - - gBuiltinKmodsSectionInfo = getsectbyname(kPrelinkInfoSegment, kBuiltinInfoSection); - if (gBuiltinKmodsSectionInfo) { - uint32_t count; - - assert(gBuiltinKmodsSectionInfo->addr); - assert(gBuiltinKmodsSectionInfo->size); - gBuiltinKmodsCount = (gBuiltinKmodsSectionInfo->size / sizeof(kmod_info_t *)); - - gBuiltinKmodsSectionStart = getsectbyname(kPrelinkInfoSegment, kBuiltinStartSection); - assert(gBuiltinKmodsSectionStart); - assert(gBuiltinKmodsSectionStart->addr); - assert(gBuiltinKmodsSectionStart->size); - count = (gBuiltinKmodsSectionStart->size / sizeof(uintptr_t)); - // one extra pointer for the end of last kmod - assert(count == (gBuiltinKmodsCount + 1)); - - vm_kernel_builtinkmod_text = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[0]; - vm_kernel_builtinkmod_text_end = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[count - 1]; - } - gIOSurfaceIdentifier = OSSymbol::withCStringNoCopy("com.apple.iokit.IOSurface"); - - timestamp = __OSAbsoluteTimePtr(&last_loaded_timestamp); - *timestamp = 0; - timestamp = __OSAbsoluteTimePtr(&last_unloaded_timestamp); - *timestamp = 0; - timestamp = __OSAbsoluteTimePtr(&sLastWakeTime); - *timestamp = 0; - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag, - "Kext system initialized."); - - notifyKextLoadObservers(sKernelKext, sKernelKext->kmod_info); - - return; + sKernelKext->loadTag = sNextLoadTag++; // the kernel is load tag 0 + sKernelKext->bundleID = OSSymbol::withCString(kOSKextKernelIdentifier); + + sKernelKext->version = OSKextParseVersionString(osrelease); + sKernelKext->compatibleVersion = sKernelKext->version; + sKernelKext->linkedExecutable = kernelExecutable; + sKernelKext->interfaceUUID = sKernelKext->copyUUID(); + + sKernelKext->flags.hasAllDependencies = 1; + sKernelKext->flags.kernelComponent = 1; + sKernelKext->flags.prelinked = 0; + sKernelKext->flags.loaded = 1; + sKernelKext->flags.started = 1; + sKernelKext->flags.CPPInitialized = 0; + sKernelKext->flags.jettisonLinkeditSeg = 0; + + sKernelKext->kmod_info = &g_kernel_kmod_info; + strlcpy(g_kernel_kmod_info.version, osrelease, + sizeof(g_kernel_kmod_info.version)); + g_kernel_kmod_info.size = kernelLength; + g_kernel_kmod_info.id = sKernelKext->loadTag; + + /* Cons up an info dict, so we don't have to have special-case + * checking all over. + */ + sKernelKext->infoDict = OSDictionary::withCapacity(5); + assert(sKernelKext->infoDict); + setResult = sKernelKext->infoDict->setObject(kCFBundleIdentifierKey, + sKernelKext->bundleID); + assert(setResult); + setResult = sKernelKext->infoDict->setObject(kOSKernelResourceKey, + kOSBooleanTrue); + assert(setResult); + + scratchString = OSString::withCStringNoCopy(osrelease); + assert(scratchString); + setResult = sKernelKext->infoDict->setObject(kCFBundleVersionKey, + scratchString); + assert(setResult); + OSSafeReleaseNULL(scratchString); + + scratchString = OSString::withCStringNoCopy("mach_kernel"); + assert(scratchString); + setResult = sKernelKext->infoDict->setObject(kCFBundleNameKey, + scratchString); + assert(setResult); + OSSafeReleaseNULL(scratchString); + + /* Add the kernel kext to the bookkeeping dictionaries. Note that + * the kernel kext doesn't have a kmod_info struct. copyInfo() + * gathers info from other places anyhow. + */ + setResult = sKextsByID->setObject(sKernelKext->bundleID, sKernelKext); + assert(setResult); + setResult = sLoadedKexts->setObject(sKernelKext); + assert(setResult); + sKernelKext->release(); + + registryRoot = IORegistryEntry::getRegistryRoot(); + kernelCPUType = OSNumber::withNumber( + (long long unsigned int)_mh_execute_header.cputype, + 8 * sizeof(_mh_execute_header.cputype)); + kernelCPUSubtype = OSNumber::withNumber( + (long long unsigned int)_mh_execute_header.cpusubtype, + 8 * sizeof(_mh_execute_header.cpusubtype)); + assert(registryRoot && kernelCPUSubtype && kernelCPUType); + + registryRoot->setProperty(kOSKernelCPUTypeKey, kernelCPUType); + registryRoot->setProperty(kOSKernelCPUSubtypeKey, kernelCPUSubtype); + + OSSafeReleaseNULL(kernelCPUType); + OSSafeReleaseNULL(kernelCPUSubtype); + + gBuiltinKmodsSectionInfo = getsectbyname(kPrelinkInfoSegment, kBuiltinInfoSection); + if (gBuiltinKmodsSectionInfo) { + uint32_t count; + + assert(gBuiltinKmodsSectionInfo->addr); + assert(gBuiltinKmodsSectionInfo->size); + gBuiltinKmodsCount = (gBuiltinKmodsSectionInfo->size / sizeof(kmod_info_t *)); + + gBuiltinKmodsSectionStart = getsectbyname(kPrelinkInfoSegment, kBuiltinStartSection); + assert(gBuiltinKmodsSectionStart); + assert(gBuiltinKmodsSectionStart->addr); + assert(gBuiltinKmodsSectionStart->size); + count = (gBuiltinKmodsSectionStart->size / sizeof(uintptr_t)); + // one extra pointer for the end of last kmod + assert(count == (gBuiltinKmodsCount + 1)); + + vm_kernel_builtinkmod_text = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[0]; + vm_kernel_builtinkmod_text_end = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[count - 1]; + } + gIOSurfaceIdentifier = OSSymbol::withCStringNoCopy("com.apple.iokit.IOSurface"); + + timestamp = __OSAbsoluteTimePtr(&last_loaded_timestamp); + *timestamp = 0; + timestamp = __OSAbsoluteTimePtr(&last_unloaded_timestamp); + *timestamp = 0; + timestamp = __OSAbsoluteTimePtr(&sLastWakeTime); + *timestamp = 0; + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag, + "Kext system initialized."); + + notifyKextLoadObservers(sKernelKext, sKernelKext->kmod_info); + + return; } /********************************************************************* @@ -870,296 +876,293 @@ OSKext::initialize(void) OSReturn OSKext::removeKextBootstrap(void) { - OSReturn result = kOSReturnError; - - const char * dt_kernel_header_name = "Kernel-__HEADER"; - const char * dt_kernel_symtab_name = "Kernel-__SYMTAB"; - kernel_mach_header_t * dt_mach_header = NULL; - int dt_mach_header_size = 0; - struct symtab_command * dt_symtab = NULL; - int dt_symtab_size = 0; - int dt_result = 0; + OSReturn result = kOSReturnError; - kernel_segment_command_t * seg_to_remove = NULL; + const char * dt_kernel_header_name = "Kernel-__HEADER"; + const char * dt_kernel_symtab_name = "Kernel-__SYMTAB"; + kernel_mach_header_t * dt_mach_header = NULL; + int dt_mach_header_size = 0; + struct symtab_command * dt_symtab = NULL; + int dt_symtab_size = 0; + int dt_result = 0; + + kernel_segment_command_t * seg_to_remove = NULL; #if __arm__ || __arm64__ - const char * dt_segment_name = NULL; - void * segment_paddress = NULL; - int segment_size = 0; + const char * dt_segment_name = NULL; + void * segment_paddress = NULL; + int segment_size = 0; #endif - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag, - "Jettisoning kext bootstrap segments."); - - /***** - * Dispose of unnecessary stuff that the booter didn't need to load. - */ - dt_result = IODTGetLoaderInfo(dt_kernel_header_name, - (void **)&dt_mach_header, &dt_mach_header_size); - if (dt_result == 0 && dt_mach_header) { - IODTFreeLoaderInfo(dt_kernel_header_name, (void *)dt_mach_header, - round_page_32(dt_mach_header_size)); - } - dt_result = IODTGetLoaderInfo(dt_kernel_symtab_name, - (void **)&dt_symtab, &dt_symtab_size); - if (dt_result == 0 && dt_symtab) { - IODTFreeLoaderInfo(dt_kernel_symtab_name, (void *)dt_symtab, - round_page_32(dt_symtab_size)); - } - - /***** - * KLD bootstrap segment. - */ - // xxx - should rename KLD segment - seg_to_remove = getsegbyname("__KLD"); - if (seg_to_remove) { - OSRuntimeUnloadCPPForSegment(seg_to_remove); - } + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag, + "Jettisoning kext bootstrap segments."); + + /***** + * Dispose of unnecessary stuff that the booter didn't need to load. + */ + dt_result = IODTGetLoaderInfo(dt_kernel_header_name, + (void **)&dt_mach_header, &dt_mach_header_size); + if (dt_result == 0 && dt_mach_header) { + IODTFreeLoaderInfo(dt_kernel_header_name, (void *)dt_mach_header, + round_page_32(dt_mach_header_size)); + } + dt_result = IODTGetLoaderInfo(dt_kernel_symtab_name, + (void **)&dt_symtab, &dt_symtab_size); + if (dt_result == 0 && dt_symtab) { + IODTFreeLoaderInfo(dt_kernel_symtab_name, (void *)dt_symtab, + round_page_32(dt_symtab_size)); + } + + /***** + * KLD bootstrap segment. + */ + // xxx - should rename KLD segment + seg_to_remove = getsegbyname("__KLD"); + if (seg_to_remove) { + OSRuntimeUnloadCPPForSegment(seg_to_remove); + } #if __arm__ || __arm64__ - /* Free the memory that was set up by bootx. - */ - dt_segment_name = "Kernel-__KLD"; - if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) { - /* We cannot free this with KTRR enabled, as we cannot - * update the permissions on the KLD range this late - * in the boot process. - */ - IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress, - (int)segment_size); - } + /* Free the memory that was set up by bootx. + */ + dt_segment_name = "Kernel-__KLD"; + if (0 == IODTGetLoaderInfo(dt_segment_name, &segment_paddress, &segment_size)) { + /* We cannot free this with KTRR enabled, as we cannot + * update the permissions on the KLD range this late + * in the boot process. + */ + IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress, + (int)segment_size); + } #elif __i386__ || __x86_64__ - /* On x86, use the mapping data from the segment load command to - * unload KLD directly. - * This may invalidate any assumptions about "avail_start" - * defining the lower bound for valid physical addresses. - */ - if (seg_to_remove && seg_to_remove->vmaddr && seg_to_remove->vmsize) { - // 04/18/11 - gab: - // overwrite memory occupied by KLD segment with random data before - // releasing it. - read_frandom((void *) seg_to_remove->vmaddr, seg_to_remove->vmsize); - ml_static_mfree(seg_to_remove->vmaddr, seg_to_remove->vmsize); - } + /* On x86, use the mapping data from the segment load command to + * unload KLD directly. + * This may invalidate any assumptions about "avail_start" + * defining the lower bound for valid physical addresses. + */ + if (seg_to_remove && seg_to_remove->vmaddr && seg_to_remove->vmsize) { + // 04/18/11 - gab: + // overwrite memory occupied by KLD segment with random data before + // releasing it. + read_frandom((void *) seg_to_remove->vmaddr, seg_to_remove->vmsize); + ml_static_mfree(seg_to_remove->vmaddr, seg_to_remove->vmsize); + } #else #error arch #endif - seg_to_remove = NULL; + seg_to_remove = NULL; - /***** - * Prelinked kernel's symtab (if there is one). - */ - kernel_section_t * sect; - sect = getsectbyname("__PRELINK", "__symtab"); - if (sect && sect->addr && sect->size) { - ml_static_mfree(sect->addr, sect->size); - } + /***** + * Prelinked kernel's symtab (if there is one). + */ + kernel_section_t * sect; + sect = getsectbyname("__PRELINK", "__symtab"); + if (sect && sect->addr && sect->size) { + ml_static_mfree(sect->addr, sect->size); + } - seg_to_remove = (kernel_segment_command_t *)getsegbyname("__LINKEDIT"); + seg_to_remove = (kernel_segment_command_t *)getsegbyname("__LINKEDIT"); - /* kxld always needs the kernel's __LINKEDIT segment, but we can make it - * pageable, unless keepsyms is set. To do that, we have to copy it from - * its booter-allocated memory, free the booter memory, reallocate proper - * managed memory, then copy the segment back in. - */ + /* kxld always needs the kernel's __LINKEDIT segment, but we can make it + * pageable, unless keepsyms is set. To do that, we have to copy it from + * its booter-allocated memory, free the booter memory, reallocate proper + * managed memory, then copy the segment back in. + */ #if CONFIG_KXLD #if (__arm__ || __arm64__) #error CONFIG_KXLD not expected for this arch #endif - if (!sKeepSymbols) { - kern_return_t mem_result; - void *seg_copy = NULL; - void *seg_data = NULL; - vm_map_offset_t seg_offset = 0; - vm_map_offset_t seg_copy_offset = 0; - vm_map_size_t seg_length = 0; - - seg_data = (void *) seg_to_remove->vmaddr; - seg_offset = (vm_map_offset_t) seg_to_remove->vmaddr; - seg_length = (vm_map_size_t) seg_to_remove->vmsize; - - /* Allocate space for the LINKEDIT copy. - */ - mem_result = kmem_alloc(kernel_map, (vm_offset_t *) &seg_copy, - seg_length, VM_KERN_MEMORY_KEXT); - if (mem_result != KERN_SUCCESS) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag | kOSKextLogArchiveFlag, - "Can't copy __LINKEDIT segment for VM reassign."); - return result; - } - seg_copy_offset = (vm_map_offset_t) seg_copy; - - /* Copy it out. - */ - memcpy(seg_copy, seg_data, seg_length); - - /* Dump the booter memory. - */ - ml_static_mfree(seg_offset, seg_length); - - /* Set up the VM region. - */ - mem_result = vm_map_enter_mem_object( - kernel_map, - &seg_offset, - seg_length, /* mask */ 0, - VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - (ipc_port_t)NULL, - (vm_object_offset_t) 0, - /* copy */ FALSE, - /* cur_protection */ VM_PROT_READ | VM_PROT_WRITE, - /* max_protection */ VM_PROT_ALL, - /* inheritance */ VM_INHERIT_DEFAULT); - if ((mem_result != KERN_SUCCESS) || - (seg_offset != (vm_map_offset_t) seg_data)) - { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag | kOSKextLogArchiveFlag, - "Can't create __LINKEDIT VM entry at %p, length 0x%llx (error 0x%x).", - seg_data, seg_length, mem_result); - return result; - } - - /* And copy it back. - */ - memcpy(seg_data, seg_copy, seg_length); - - /* Free the copy. - */ - kmem_free(kernel_map, seg_copy_offset, seg_length); - } + if (!sKeepSymbols) { + kern_return_t mem_result; + void *seg_copy = NULL; + void *seg_data = NULL; + vm_map_offset_t seg_offset = 0; + vm_map_offset_t seg_copy_offset = 0; + vm_map_size_t seg_length = 0; + + seg_data = (void *) seg_to_remove->vmaddr; + seg_offset = (vm_map_offset_t) seg_to_remove->vmaddr; + seg_length = (vm_map_size_t) seg_to_remove->vmsize; + + /* Allocate space for the LINKEDIT copy. + */ + mem_result = kmem_alloc(kernel_map, (vm_offset_t *) &seg_copy, + seg_length, VM_KERN_MEMORY_KEXT); + if (mem_result != KERN_SUCCESS) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogArchiveFlag, + "Can't copy __LINKEDIT segment for VM reassign."); + return result; + } + seg_copy_offset = (vm_map_offset_t) seg_copy; + + /* Copy it out. + */ + memcpy(seg_copy, seg_data, seg_length); + + /* Dump the booter memory. + */ + ml_static_mfree(seg_offset, seg_length); + + /* Set up the VM region. + */ + mem_result = vm_map_enter_mem_object( + kernel_map, + &seg_offset, + seg_length, /* mask */ 0, + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + (ipc_port_t)NULL, + (vm_object_offset_t) 0, + /* copy */ FALSE, + /* cur_protection */ VM_PROT_READ | VM_PROT_WRITE, + /* max_protection */ VM_PROT_ALL, + /* inheritance */ VM_INHERIT_DEFAULT); + if ((mem_result != KERN_SUCCESS) || + (seg_offset != (vm_map_offset_t) seg_data)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogArchiveFlag, + "Can't create __LINKEDIT VM entry at %p, length 0x%llx (error 0x%x).", + seg_data, seg_length, mem_result); + return result; + } + + /* And copy it back. + */ + memcpy(seg_data, seg_copy, seg_length); + + /* Free the copy. + */ + kmem_free(kernel_map, seg_copy_offset, seg_length); + } #else /* we are not CONFIG_KXLD */ #if !(__arm__ || __arm64__) #error CONFIG_KXLD is expected for this arch #endif - /***** - * Dump the LINKEDIT segment, unless keepsyms is set. - */ - if (!sKeepSymbols) { - dt_segment_name = "Kernel-__LINKEDIT"; - if (0 == IODTGetLoaderInfo(dt_segment_name, - &segment_paddress, &segment_size)) { + /***** + * Dump the LINKEDIT segment, unless keepsyms is set. + */ + if (!sKeepSymbols) { + dt_segment_name = "Kernel-__LINKEDIT"; + if (0 == IODTGetLoaderInfo(dt_segment_name, + &segment_paddress, &segment_size)) { #ifdef SECURE_KERNEL - vm_offset_t vmaddr = ml_static_ptovirt((vm_offset_t)segment_paddress); - bzero((void*)vmaddr, segment_size); + vm_offset_t vmaddr = ml_static_ptovirt((vm_offset_t)segment_paddress); + bzero((void*)vmaddr, segment_size); #endif - IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress, - (int)segment_size); - } - } else { - OSKextLog(/* kext */ NULL, - kOSKextLogBasicLevel | - kOSKextLogGeneralFlag, - "keepsyms boot arg specified; keeping linkedit segment for symbols."); - } + IODTFreeLoaderInfo(dt_segment_name, (void *)segment_paddress, + (int)segment_size); + } + } else { + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | + kOSKextLogGeneralFlag, + "keepsyms boot arg specified; keeping linkedit segment for symbols."); + } #endif /* CONFIG_KXLD */ - seg_to_remove = NULL; + seg_to_remove = NULL; - result = kOSReturnSuccess; + result = kOSReturnSuccess; - return result; + return result; } /********************************************************************* *********************************************************************/ void OSKext::flushNonloadedKexts( - Boolean flushPrelinkedKexts) -{ - OSSet * prelinkedKexts = NULL; // must release - OSCollectionIterator * kextIterator = NULL; // must release - OSCollectionIterator * prelinkIterator = NULL; // must release - const OSSymbol * thisID = NULL; // do not release - OSKext * thisKext = NULL; // do not release - uint32_t count, i; - - IORecursiveLockLock(sKextLock); - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogKextBookkeepingFlag, - "Flushing nonloaded kexts and other unused data."); - - OSKext::considerDestroyingLinkContext(); - - /* If we aren't flushing unused prelinked kexts, we have to put them - * aside while we flush everything else so make a container for them. - */ - if (!flushPrelinkedKexts) { - prelinkedKexts = OSSet::withCapacity(0); - if (!prelinkedKexts) { - goto finish; - } - } - - /* Set aside prelinked kexts (in-use or not) and break - * any lingering inter-kext references for nonloaded kexts - * so they have min. retain counts. - */ - kextIterator = OSCollectionIterator::withCollection(sKextsByID); - if (!kextIterator) { - goto finish; - } - - while ((thisID = OSDynamicCast(OSSymbol, - kextIterator->getNextObject()))) { - - thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); - - if (thisKext) { - if (prelinkedKexts && thisKext->isPrelinked()) { - prelinkedKexts->setObject(thisKext); - } - thisKext->flushDependencies(/* forceIfLoaded */ false); - } - } - - /* Dump all the kexts in the ID dictionary; we'll repopulate it shortly. - */ - sKextsByID->flushCollection(); - - /* Now put the loaded kexts back into the ID dictionary. - */ - count = sLoadedKexts->getCount(); - for (i = 0; i < count; i++) { - thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - sKextsByID->setObject(thisKext->getIdentifierCString(), thisKext); - } - - /* Finally, put back the prelinked kexts if we saved any. - */ - if (prelinkedKexts) { - prelinkIterator = OSCollectionIterator::withCollection(prelinkedKexts); - if (!prelinkIterator) { - goto finish; - } - - while ((thisKext = OSDynamicCast(OSKext, - prelinkIterator->getNextObject()))) { - - sKextsByID->setObject(thisKext->getIdentifierCString(), - thisKext); - } - } + Boolean flushPrelinkedKexts) +{ + OSSet * prelinkedKexts = NULL;// must release + OSCollectionIterator * kextIterator = NULL;// must release + OSCollectionIterator * prelinkIterator = NULL; // must release + const OSSymbol * thisID = NULL;// do not release + OSKext * thisKext = NULL;// do not release + uint32_t count, i; + + IORecursiveLockLock(sKextLock); + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogKextBookkeepingFlag, + "Flushing nonloaded kexts and other unused data."); + + OSKext::considerDestroyingLinkContext(); + + /* If we aren't flushing unused prelinked kexts, we have to put them + * aside while we flush everything else so make a container for them. + */ + if (!flushPrelinkedKexts) { + prelinkedKexts = OSSet::withCapacity(0); + if (!prelinkedKexts) { + goto finish; + } + } + + /* Set aside prelinked kexts (in-use or not) and break + * any lingering inter-kext references for nonloaded kexts + * so they have min. retain counts. + */ + kextIterator = OSCollectionIterator::withCollection(sKextsByID); + if (!kextIterator) { + goto finish; + } + + while ((thisID = OSDynamicCast(OSSymbol, + kextIterator->getNextObject()))) { + thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); + + if (thisKext) { + if (prelinkedKexts && thisKext->isPrelinked()) { + prelinkedKexts->setObject(thisKext); + } + thisKext->flushDependencies(/* forceIfLoaded */ false); + } + } + + /* Dump all the kexts in the ID dictionary; we'll repopulate it shortly. + */ + sKextsByID->flushCollection(); + + /* Now put the loaded kexts back into the ID dictionary. + */ + count = sLoadedKexts->getCount(); + for (i = 0; i < count; i++) { + thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + sKextsByID->setObject(thisKext->getIdentifierCString(), thisKext); + } + + /* Finally, put back the prelinked kexts if we saved any. + */ + if (prelinkedKexts) { + prelinkIterator = OSCollectionIterator::withCollection(prelinkedKexts); + if (!prelinkIterator) { + goto finish; + } + + while ((thisKext = OSDynamicCast(OSKext, + prelinkIterator->getNextObject()))) { + sKextsByID->setObject(thisKext->getIdentifierCString(), + thisKext); + } + } finish: - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(prelinkedKexts); - OSSafeReleaseNULL(kextIterator); - OSSafeReleaseNULL(prelinkIterator); + OSSafeReleaseNULL(prelinkedKexts); + OSSafeReleaseNULL(kextIterator); + OSSafeReleaseNULL(prelinkIterator); - return; + return; } /********************************************************************* @@ -1168,14 +1171,14 @@ finish: void OSKext::setKextdActive(Boolean active) { - IORecursiveLockLock(sKextLock); - sKextdActive = active; - if (sKernelRequests->getCount()) { - OSKext::pingKextd(); - } - IORecursiveLockUnlock(sKextLock); + IORecursiveLockLock(sKextLock); + sKextdActive = active; + if (sKernelRequests->getCount()) { + OSKext::pingKextd(); + } + IORecursiveLockUnlock(sKextLock); - return; + return; } /********************************************************************* @@ -1190,40 +1193,40 @@ extern void ipc_port_release_send(ipc_port_t); OSReturn OSKext::pingKextd(void) { - OSReturn result = kOSReturnError; + OSReturn result = kOSReturnError; #if !NO_KEXTD - mach_port_t kextd_port = IPC_PORT_NULL; - - if (!sKextdActive) { - result = kOSKextReturnDisabled; // basically unavailable - goto finish; - } - - result = host_get_kextd_port(host_priv_self(), &kextd_port); - if (result != KERN_SUCCESS || !IPC_PORT_VALID(kextd_port)) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Can't get kextd port."); - goto finish; - } - - result = kextd_ping(kextd_port); - if (result != KERN_SUCCESS) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "kextd ping failed (0x%x).", (int)result); - goto finish; - } + mach_port_t kextd_port = IPC_PORT_NULL; + + if (!sKextdActive) { + result = kOSKextReturnDisabled; // basically unavailable + goto finish; + } + + result = host_get_kextd_port(host_priv_self(), &kextd_port); + if (result != KERN_SUCCESS || !IPC_PORT_VALID(kextd_port)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Can't get kextd port."); + goto finish; + } + + result = kextd_ping(kextd_port); + if (result != KERN_SUCCESS) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "kextd ping failed (0x%x).", (int)result); + goto finish; + } finish: - if (IPC_PORT_VALID(kextd_port)) { - ipc_port_release_send(kextd_port); - } + if (IPC_PORT_VALID(kextd_port)) { + ipc_port_release_send(kextd_port); + } #endif - return result; + return result; } /********************************************************************* @@ -1232,11 +1235,11 @@ finish: void OSKext::setDeferredLoadSucceeded(Boolean succeeded) { - IORecursiveLockLock(sKextLock); - sDeferredLoadSucceeded = succeeded; - IORecursiveLockUnlock(sKextLock); + IORecursiveLockLock(sKextLock); + sDeferredLoadSucceeded = succeeded; + IORecursiveLockUnlock(sKextLock); - return; + return; } /********************************************************************* @@ -1247,41 +1250,41 @@ void OSKext::willShutdown(void) { #if !NO_KEXTD - OSReturn checkResult = kOSReturnError; + OSReturn checkResult = kOSReturnError; #endif - OSDictionary * exitRequest = NULL; // must release + OSDictionary * exitRequest = NULL; // must release - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); - OSKext::setLoadEnabled(false); - OSKext::setUnloadEnabled(false); - OSKext::setAutounloadsEnabled(false); - OSKext::setKernelRequestsEnabled(false); + OSKext::setLoadEnabled(false); + OSKext::setUnloadEnabled(false); + OSKext::setAutounloadsEnabled(false); + OSKext::setKernelRequestsEnabled(false); #if !NO_KEXTD - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag, - "System shutdown; requesting immediate kextd exit."); - - checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestKextdExit, - &exitRequest); - if (checkResult != kOSReturnSuccess) { - goto finish; - } - if (!sKernelRequests->setObject(exitRequest)) { - goto finish; - } - - OSKext::pingKextd(); + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag, + "System shutdown; requesting immediate kextd exit."); + + checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestKextdExit, + &exitRequest); + if (checkResult != kOSReturnSuccess) { + goto finish; + } + if (!sKernelRequests->setObject(exitRequest)) { + goto finish; + } + + OSKext::pingKextd(); finish: #endif - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(exitRequest); - return; + OSSafeReleaseNULL(exitRequest); + return; } /********************************************************************* @@ -1290,12 +1293,12 @@ finish: bool OSKext::getLoadEnabled(void) { - bool result; + bool result; - IORecursiveLockLock(sKextLock); - result = sLoadEnabled; - IORecursiveLockUnlock(sKextLock); - return result; + IORecursiveLockLock(sKextLock); + result = sLoadEnabled; + IORecursiveLockUnlock(sKextLock); + return result; } /********************************************************************* @@ -1304,22 +1307,22 @@ OSKext::getLoadEnabled(void) bool OSKext::setLoadEnabled(bool flag) { - bool result; + bool result; + + IORecursiveLockLock(sKextLock); + result = sLoadEnabled; + sLoadEnabled = (flag ? true : false); - IORecursiveLockLock(sKextLock); - result = sLoadEnabled; - sLoadEnabled = (flag ? true : false); - - if (sLoadEnabled != result) { - OSKextLog(/* kext */ NULL, - kOSKextLogBasicLevel | - kOSKextLogLoadFlag, - "Kext loading now %sabled.", sLoadEnabled ? "en" : "dis"); - } + if (sLoadEnabled != result) { + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | + kOSKextLogLoadFlag, + "Kext loading now %sabled.", sLoadEnabled ? "en" : "dis"); + } - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - return result; + return result; } /********************************************************************* @@ -1328,12 +1331,12 @@ OSKext::setLoadEnabled(bool flag) bool OSKext::getUnloadEnabled(void) { - bool result; + bool result; - IORecursiveLockLock(sKextLock); - result = sUnloadEnabled; - IORecursiveLockUnlock(sKextLock); - return result; + IORecursiveLockLock(sKextLock); + result = sUnloadEnabled; + IORecursiveLockUnlock(sKextLock); + return result; } /********************************************************************* @@ -1342,21 +1345,21 @@ OSKext::getUnloadEnabled(void) bool OSKext::setUnloadEnabled(bool flag) { - bool result; + bool result; + + IORecursiveLockLock(sKextLock); + result = sUnloadEnabled; + sUnloadEnabled = (flag ? true : false); + IORecursiveLockUnlock(sKextLock); - IORecursiveLockLock(sKextLock); - result = sUnloadEnabled; - sUnloadEnabled = (flag ? true : false); - IORecursiveLockUnlock(sKextLock); - - if (sUnloadEnabled != result) { - OSKextLog(/* kext */ NULL, - kOSKextLogBasicLevel | - kOSKextLogGeneralFlag | kOSKextLogLoadFlag, - "Kext unloading now %sabled.", sUnloadEnabled ? "en" : "dis"); - } + if (sUnloadEnabled != result) { + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Kext unloading now %sabled.", sUnloadEnabled ? "en" : "dis"); + } - return result; + return result; } /********************************************************************* @@ -1366,12 +1369,12 @@ OSKext::setUnloadEnabled(bool flag) bool OSKext::getAutounloadEnabled(void) { - bool result; + bool result; - IORecursiveLockLock(sKextInnerLock); - result = sAutounloadEnabled ? true : false; - IORecursiveLockUnlock(sKextInnerLock); - return result; + IORecursiveLockLock(sKextInnerLock); + result = sAutounloadEnabled ? true : false; + IORecursiveLockUnlock(sKextInnerLock); + return result; } /********************************************************************* @@ -1381,27 +1384,27 @@ OSKext::getAutounloadEnabled(void) bool OSKext::setAutounloadsEnabled(bool flag) { - bool result; + bool result; + + IORecursiveLockLock(sKextInnerLock); - IORecursiveLockLock(sKextInnerLock); + result = sAutounloadEnabled; + sAutounloadEnabled = (flag ? true : false); + if (!sAutounloadEnabled && sUnloadCallout) { + thread_call_cancel(sUnloadCallout); + } - result = sAutounloadEnabled; - sAutounloadEnabled = (flag ? true : false); - if (!sAutounloadEnabled && sUnloadCallout) { - thread_call_cancel(sUnloadCallout); - } - - if (sAutounloadEnabled != result) { - OSKextLog(/* kext */ NULL, - kOSKextLogBasicLevel | - kOSKextLogGeneralFlag | kOSKextLogLoadFlag, - "Kext autounloading now %sabled.", - sAutounloadEnabled ? "en" : "dis"); - } + if (sAutounloadEnabled != result) { + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Kext autounloading now %sabled.", + sAutounloadEnabled ? "en" : "dis"); + } - IORecursiveLockUnlock(sKextInnerLock); + IORecursiveLockUnlock(sKextInnerLock); - return result; + return result; } /********************************************************************* @@ -1410,18 +1413,18 @@ OSKext::setAutounloadsEnabled(bool flag) bool OSKext::setAutounloadEnabled(bool flag) { - bool result = flags.autounloadEnabled ? true : false; - flags.autounloadEnabled = flag ? 1 : 0; - - if (result != (flag ? true : false)) { - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, - "Autounloading for kext %s now %sabled.", - getIdentifierCString(), - flags.autounloadEnabled ? "en" : "dis"); - } - return result; + bool result = flags.autounloadEnabled ? true : false; + flags.autounloadEnabled = flag ? 1 : 0; + + if (result != (flag ? true : false)) { + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Autounloading for kext %s now %sabled.", + getIdentifierCString(), + flags.autounloadEnabled ? "en" : "dis"); + } + return result; } /********************************************************************* @@ -1430,21 +1433,21 @@ OSKext::setAutounloadEnabled(bool flag) bool OSKext::setKernelRequestsEnabled(bool flag) { - bool result; + bool result; - IORecursiveLockLock(sKextLock); - result = sKernelRequestsEnabled; - sKernelRequestsEnabled = flag ? true : false; - - if (sKernelRequestsEnabled != result) { - OSKextLog(/* kext */ NULL, - kOSKextLogBasicLevel | - kOSKextLogGeneralFlag, - "Kernel requests now %sabled.", - sKernelRequestsEnabled ? "en" : "dis"); - } - IORecursiveLockUnlock(sKextLock); - return result; + IORecursiveLockLock(sKextLock); + result = sKernelRequestsEnabled; + sKernelRequestsEnabled = flag ? true : false; + + if (sKernelRequestsEnabled != result) { + OSKextLog(/* kext */ NULL, + kOSKextLogBasicLevel | + kOSKextLogGeneralFlag, + "Kernel requests now %sabled.", + sKernelRequestsEnabled ? "en" : "dis"); + } + IORecursiveLockUnlock(sKextLock); + return result; } /********************************************************************* @@ -1453,12 +1456,12 @@ OSKext::setKernelRequestsEnabled(bool flag) bool OSKext::getKernelRequestsEnabled(void) { - bool result; + bool result; - IORecursiveLockLock(sKextLock); - result = sKernelRequestsEnabled; - IORecursiveLockUnlock(sKextLock); - return result; + IORecursiveLockLock(sKextLock); + result = sKernelRequestsEnabled; + IORecursiveLockUnlock(sKextLock); + return result; } #if PRAGMA_MARK @@ -1468,447 +1471,447 @@ OSKext::getKernelRequestsEnabled(void) *********************************************************************/ OSKext * OSKext::withPrelinkedInfoDict( - OSDictionary * anInfoDict, - bool doCoalesedSlides) + OSDictionary * anInfoDict, + bool doCoalesedSlides) { - OSKext * newKext = new OSKext; + OSKext * newKext = new OSKext; - if (newKext && !newKext->initWithPrelinkedInfoDict(anInfoDict, doCoalesedSlides)) { - newKext->release(); - return NULL; - } + if (newKext && !newKext->initWithPrelinkedInfoDict(anInfoDict, doCoalesedSlides)) { + newKext->release(); + return NULL; + } - return newKext; + return newKext; } /********************************************************************* *********************************************************************/ bool OSKext::initWithPrelinkedInfoDict( - OSDictionary * anInfoDict, - bool doCoalesedSlides) -{ - bool result = false; - OSString * kextPath = NULL; // do not release - OSNumber * addressNum = NULL; // reused; do not release - OSNumber * lengthNum = NULL; // reused; do not release - void * data = NULL; // do not free - void * srcData = NULL; // do not free - OSData * prelinkedExecutable = NULL; // must release - uint32_t length = 0; // reused - - if (!super::init()) { - goto finish; - } - - /* Get the path. Don't look for an arch-specific path property. - */ - kextPath = OSDynamicCast(OSString, - anInfoDict->getObject(kPrelinkBundlePathKey)); - - if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) { - goto finish; - } + OSDictionary * anInfoDict, + bool doCoalesedSlides) +{ + bool result = false; + OSString * kextPath = NULL;// do not release + OSNumber * addressNum = NULL;// reused; do not release + OSNumber * lengthNum = NULL;// reused; do not release + void * data = NULL;// do not free + void * srcData = NULL;// do not free + OSData * prelinkedExecutable = NULL;// must release + uint32_t length = 0; // reused + + if (!super::init()) { + goto finish; + } + + /* Get the path. Don't look for an arch-specific path property. + */ + kextPath = OSDynamicCast(OSString, + anInfoDict->getObject(kPrelinkBundlePathKey)); + + if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) { + goto finish; + } #if KASLR_KEXT_DEBUG - IOLog("kaslr: doCoalesedSlides %d kext %s \n", doCoalesedSlides, getIdentifierCString()); + IOLog("kaslr: doCoalesedSlides %d kext %s \n", doCoalesedSlides, getIdentifierCString()); #endif - /* Also get the executable's bundle-relative path if present. - * Don't look for an arch-specific path property. - */ - executableRelPath = OSDynamicCast(OSString, - anInfoDict->getObject(kPrelinkExecutableRelativePathKey)); - if (executableRelPath) { - executableRelPath->retain(); - } - - /* Don't need the paths to be in the info dictionary any more. - */ - anInfoDict->removeObject(kPrelinkBundlePathKey); - anInfoDict->removeObject(kPrelinkExecutableRelativePathKey); - - /* Create an OSData wrapper around the linked executable. - */ - addressNum = OSDynamicCast(OSNumber, - anInfoDict->getObject(kPrelinkExecutableLoadKey)); - if (addressNum) { - lengthNum = OSDynamicCast(OSNumber, - anInfoDict->getObject(kPrelinkExecutableSizeKey)); - if (!lengthNum) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Kext %s can't find prelinked kext executable size.", - getIdentifierCString()); - goto finish; - } - - data = (void *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); - length = (uint32_t) (lengthNum->unsigned32BitValue()); + /* Also get the executable's bundle-relative path if present. + * Don't look for an arch-specific path property. + */ + executableRelPath = OSDynamicCast(OSString, + anInfoDict->getObject(kPrelinkExecutableRelativePathKey)); + if (executableRelPath) { + executableRelPath->retain(); + } + + /* Don't need the paths to be in the info dictionary any more. + */ + anInfoDict->removeObject(kPrelinkBundlePathKey); + anInfoDict->removeObject(kPrelinkExecutableRelativePathKey); + + /* Create an OSData wrapper around the linked executable. + */ + addressNum = OSDynamicCast(OSNumber, + anInfoDict->getObject(kPrelinkExecutableLoadKey)); + if (addressNum) { + lengthNum = OSDynamicCast(OSNumber, + anInfoDict->getObject(kPrelinkExecutableSizeKey)); + if (!lengthNum) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Kext %s can't find prelinked kext executable size.", + getIdentifierCString()); + goto finish; + } + + data = (void *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); + length = (uint32_t) (lengthNum->unsigned32BitValue()); #if KASLR_KEXT_DEBUG - IOLog("kaslr: unslid 0x%lx slid 0x%lx length %u - prelink executable \n", - (unsigned long)ml_static_unslide(data), - (unsigned long)data, - length); + IOLog("kaslr: unslid 0x%lx slid 0x%lx length %u - prelink executable \n", + (unsigned long)ml_static_unslide(data), + (unsigned long)data, + length); #endif - anInfoDict->removeObject(kPrelinkExecutableLoadKey); - anInfoDict->removeObject(kPrelinkExecutableSizeKey); + anInfoDict->removeObject(kPrelinkExecutableLoadKey); + anInfoDict->removeObject(kPrelinkExecutableSizeKey); + + /* If the kext's load address differs from its source address, allocate + * space in the kext map at the load address and copy the kext over. + */ + addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkExecutableSourceKey)); + if (addressNum) { + srcData = (void *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); - /* If the kext's load address differs from its source address, allocate - * space in the kext map at the load address and copy the kext over. - */ - addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject(kPrelinkExecutableSourceKey)); - if (addressNum) { - srcData = (void *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); - #if KASLR_KEXT_DEBUG - IOLog("kaslr: unslid 0x%lx slid 0x%lx - prelink executable source \n", - (unsigned long)ml_static_unslide(srcData), - (unsigned long)srcData); + IOLog("kaslr: unslid 0x%lx slid 0x%lx - prelink executable source \n", + (unsigned long)ml_static_unslide(srcData), + (unsigned long)srcData); #endif - - if (data != srcData) { + + if (data != srcData) { #if __LP64__ - kern_return_t alloc_result; - - alloc_result = kext_alloc((vm_offset_t *)&data, length, /* fixed */ TRUE); - if (alloc_result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "Failed to allocate space for prelinked kext %s.", - getIdentifierCString()); - goto finish; - } - memcpy(data, srcData, length); + kern_return_t alloc_result; + + alloc_result = kext_alloc((vm_offset_t *)&data, length, /* fixed */ TRUE); + if (alloc_result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "Failed to allocate space for prelinked kext %s.", + getIdentifierCString()); + goto finish; + } + memcpy(data, srcData, length); #else - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "Error: prelinked kext %s - source and load addresses " - "differ on ILP32 architecture.", - getIdentifierCString()); - goto finish; + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "Error: prelinked kext %s - source and load addresses " + "differ on ILP32 architecture.", + getIdentifierCString()); + goto finish; #endif /* __LP64__ */ - } - - anInfoDict->removeObject(kPrelinkExecutableSourceKey); - } - - prelinkedExecutable = OSData::withBytesNoCopy(data, length); - if (!prelinkedExecutable) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag | kOSKextLogArchiveFlag, - "Kext %s failed to create executable wrapper.", - getIdentifierCString()); - goto finish; - } + } + + anInfoDict->removeObject(kPrelinkExecutableSourceKey); + } + + prelinkedExecutable = OSData::withBytesNoCopy(data, length); + if (!prelinkedExecutable) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogArchiveFlag, + "Kext %s failed to create executable wrapper.", + getIdentifierCString()); + goto finish; + } #if VM_MAPPED_KEXTS - prelinkedExecutable->setDeallocFunction(osdata_kext_free); + prelinkedExecutable->setDeallocFunction(osdata_kext_free); #else - prelinkedExecutable->setDeallocFunction(osdata_phys_free); + prelinkedExecutable->setDeallocFunction(osdata_phys_free); #endif - setLinkedExecutable(prelinkedExecutable); - addressNum = OSDynamicCast(OSNumber, - anInfoDict->getObject(kPrelinkKmodInfoKey)); - if (!addressNum) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Kext %s can't find prelinked kext kmod_info address.", - getIdentifierCString()); - goto finish; - } - - if (addressNum->unsigned64BitValue() != 0) { - kmod_info = (kmod_info_t *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); - kmod_info->address = ml_static_slide(kmod_info->address); + setLinkedExecutable(prelinkedExecutable); + addressNum = OSDynamicCast(OSNumber, + anInfoDict->getObject(kPrelinkKmodInfoKey)); + if (!addressNum) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Kext %s can't find prelinked kext kmod_info address.", + getIdentifierCString()); + goto finish; + } + + if (addressNum->unsigned64BitValue() != 0) { + kmod_info = (kmod_info_t *) ml_static_slide((intptr_t) (addressNum->unsigned64BitValue())); + kmod_info->address = ml_static_slide(kmod_info->address); #if KASLR_KEXT_DEBUG - IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info \n", - (unsigned long)ml_static_unslide(kmod_info), - (unsigned long)kmod_info); - IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info->address \n", - (unsigned long)ml_static_unslide(kmod_info->address), - (unsigned long)kmod_info->address); + IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info \n", + (unsigned long)ml_static_unslide(kmod_info), + (unsigned long)kmod_info); + IOLog("kaslr: unslid 0x%lx slid 0x%lx - kmod_info->address \n", + (unsigned long)ml_static_unslide(kmod_info->address), + (unsigned long)kmod_info->address); #endif - } - - anInfoDict->removeObject(kPrelinkKmodInfoKey); - } - - if ((addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject("ModuleIndex")))) - { - uintptr_t builtinTextStart; - uintptr_t builtinTextEnd; - - flags.builtin = true; - builtinKmodIdx = addressNum->unsigned32BitValue(); - assert(builtinKmodIdx < gBuiltinKmodsCount); - - builtinTextStart = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[builtinKmodIdx]; - builtinTextEnd = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[builtinKmodIdx + 1]; - - kmod_info = ((kmod_info_t **)gBuiltinKmodsSectionInfo->addr)[builtinKmodIdx]; - kmod_info->address = builtinTextStart; - kmod_info->size = builtinTextEnd - builtinTextStart; - } - - /* If the plist has a UUID for an interface, save that off. - */ - if (isInterface()) { - interfaceUUID = OSDynamicCast(OSData, - anInfoDict->getObject(kPrelinkInterfaceUUIDKey)); - if (interfaceUUID) { - interfaceUUID->retain(); - anInfoDict->removeObject(kPrelinkInterfaceUUIDKey); - } - } - - result = slidePrelinkedExecutable(doCoalesedSlides); - if (result != kOSReturnSuccess) { - goto finish; - } - - if (doCoalesedSlides == false) { - /* set VM protections now, wire later at kext load */ - result = setVMAttributes(true, false); - if (result != KERN_SUCCESS) { - goto finish; - } - } - - flags.prelinked = true; - - /* If we created a kext from prelink info, - * we must be booting from a prelinked kernel. - */ - sPrelinkBoot = true; - - result = registerIdentifier(); + } + + anInfoDict->removeObject(kPrelinkKmodInfoKey); + } + + if ((addressNum = OSDynamicCast(OSNumber, anInfoDict->getObject("ModuleIndex")))) { + uintptr_t builtinTextStart; + uintptr_t builtinTextEnd; + + flags.builtin = true; + builtinKmodIdx = addressNum->unsigned32BitValue(); + assert(builtinKmodIdx < gBuiltinKmodsCount); + + builtinTextStart = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[builtinKmodIdx]; + builtinTextEnd = ((uintptr_t *)gBuiltinKmodsSectionStart->addr)[builtinKmodIdx + 1]; + + kmod_info = ((kmod_info_t **)gBuiltinKmodsSectionInfo->addr)[builtinKmodIdx]; + kmod_info->address = builtinTextStart; + kmod_info->size = builtinTextEnd - builtinTextStart; + } + + /* If the plist has a UUID for an interface, save that off. + */ + if (isInterface()) { + interfaceUUID = OSDynamicCast(OSData, + anInfoDict->getObject(kPrelinkInterfaceUUIDKey)); + if (interfaceUUID) { + interfaceUUID->retain(); + anInfoDict->removeObject(kPrelinkInterfaceUUIDKey); + } + } + + result = slidePrelinkedExecutable(doCoalesedSlides); + if (result != kOSReturnSuccess) { + goto finish; + } + + if (doCoalesedSlides == false) { + /* set VM protections now, wire later at kext load */ + result = setVMAttributes(true, false); + if (result != KERN_SUCCESS) { + goto finish; + } + } + + flags.prelinked = true; + + /* If we created a kext from prelink info, + * we must be booting from a prelinked kernel. + */ + sPrelinkBoot = true; + + result = registerIdentifier(); finish: - OSSafeReleaseNULL(prelinkedExecutable); + OSSafeReleaseNULL(prelinkedExecutable); - return result; + return result; } /********************************************************************* - *********************************************************************/ +*********************************************************************/ /* static */ -void OSKext::setAllVMAttributes(void) -{ - OSCollectionIterator * kextIterator = NULL; // must release - const OSSymbol * thisID = NULL; // do not release - - IORecursiveLockLock(sKextLock); - - kextIterator = OSCollectionIterator::withCollection(sKextsByID); - if (!kextIterator) { - goto finish; - } - - while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) { - OSKext * thisKext; // do not release - - thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); - if (!thisKext || thisKext->isInterface() || !thisKext->declaresExecutable()) { - continue; - } - - /* set VM protections now, wire later at kext load */ - thisKext->setVMAttributes(true, false); - } - +void +OSKext::setAllVMAttributes(void) +{ + OSCollectionIterator * kextIterator = NULL;// must release + const OSSymbol * thisID = NULL;// do not release + + IORecursiveLockLock(sKextLock); + + kextIterator = OSCollectionIterator::withCollection(sKextsByID); + if (!kextIterator) { + goto finish; + } + + while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) { + OSKext * thisKext;// do not release + + thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); + if (!thisKext || thisKext->isInterface() || !thisKext->declaresExecutable()) { + continue; + } + + /* set VM protections now, wire later at kext load */ + thisKext->setVMAttributes(true, false); + } + finish: - IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(kextIterator); + IORecursiveLockUnlock(sKextLock); + OSSafeReleaseNULL(kextIterator); - return; + return; } /********************************************************************* *********************************************************************/ OSKext * OSKext::withBooterData( - OSString * deviceTreeName, - OSData * booterData) + OSString * deviceTreeName, + OSData * booterData) { - OSKext * newKext = new OSKext; + OSKext * newKext = new OSKext; + + if (newKext && !newKext->initWithBooterData(deviceTreeName, booterData)) { + newKext->release(); + return NULL; + } - if (newKext && !newKext->initWithBooterData(deviceTreeName, booterData)) { - newKext->release(); - return NULL; - } - - return newKext; + return newKext; } /********************************************************************* *********************************************************************/ typedef struct _BooterKextFileInfo { - uint32_t infoDictPhysAddr; - uint32_t infoDictLength; - uint32_t executablePhysAddr; - uint32_t executableLength; - uint32_t bundlePathPhysAddr; - uint32_t bundlePathLength; + uint32_t infoDictPhysAddr; + uint32_t infoDictLength; + uint32_t executablePhysAddr; + uint32_t executableLength; + uint32_t bundlePathPhysAddr; + uint32_t bundlePathLength; } _BooterKextFileInfo; bool OSKext::initWithBooterData( - OSString * deviceTreeName, - OSData * booterData) -{ - bool result = false; - _BooterKextFileInfo * kextFileInfo = NULL; // do not free - char * infoDictAddr = NULL; // do not free - void * executableAddr = NULL; // do not free - char * bundlePathAddr = NULL; // do not free - - OSObject * parsedXML = NULL; // must release - OSDictionary * theInfoDict = NULL; // do not release - OSString * kextPath = NULL; // must release - OSString * errorString = NULL; // must release - OSData * executable = NULL; // must release - - if (!super::init()) { - goto finish; - } - - kextFileInfo = (_BooterKextFileInfo *)booterData->getBytesNoCopy(); - if (!kextFileInfo) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "No booter-provided data for kext device tree entry %s.", - deviceTreeName->getCStringNoCopy()); - goto finish; - } - - /* The info plist must exist or we can't read the kext. - */ - if (!kextFileInfo->infoDictPhysAddr || !kextFileInfo->infoDictLength) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "No kext info dictionary for booter device tree entry %s.", - deviceTreeName->getCStringNoCopy()); - goto finish; - } - - infoDictAddr = (char *)ml_static_ptovirt(kextFileInfo->infoDictPhysAddr); - if (!infoDictAddr) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Can't translate physical address 0x%x of kext info dictionary " - "for device tree entry %s.", - (int)kextFileInfo->infoDictPhysAddr, - deviceTreeName->getCStringNoCopy()); - goto finish; - } - - parsedXML = OSUnserializeXML(infoDictAddr, &errorString); - if (parsedXML) { - theInfoDict = OSDynamicCast(OSDictionary, parsedXML); - } - if (!theInfoDict) { - const char * errorCString = "(unknown error)"; - - if (errorString && errorString->getCStringNoCopy()) { - errorCString = errorString->getCStringNoCopy(); - } else if (parsedXML) { - errorCString = "not a dictionary"; - } - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Error unserializing info dictionary for device tree entry %s: %s.", - deviceTreeName->getCStringNoCopy(), errorCString); - goto finish; - } - - /* A bundle path is not mandatory. - */ - if (kextFileInfo->bundlePathPhysAddr && kextFileInfo->bundlePathLength) { - bundlePathAddr = (char *)ml_static_ptovirt(kextFileInfo->bundlePathPhysAddr); - if (!bundlePathAddr) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Can't translate physical address 0x%x of kext bundle path " - "for device tree entry %s.", - (int)kextFileInfo->bundlePathPhysAddr, - deviceTreeName->getCStringNoCopy()); - goto finish; - } - bundlePathAddr[kextFileInfo->bundlePathLength-1] = '\0'; // just in case! - - kextPath = OSString::withCString(bundlePathAddr); - if (!kextPath) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Failed to create wrapper for device tree entry %s kext path %s.", - deviceTreeName->getCStringNoCopy(), bundlePathAddr); - goto finish; - } - } - - if (!setInfoDictionaryAndPath(theInfoDict, kextPath)) { - goto finish; - } - - /* An executable is not mandatory. - */ - if (kextFileInfo->executablePhysAddr && kextFileInfo->executableLength) { - executableAddr = (void *)ml_static_ptovirt(kextFileInfo->executablePhysAddr); - if (!executableAddr) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Can't translate physical address 0x%x of kext executable " - "for device tree entry %s.", - (int)kextFileInfo->executablePhysAddr, - deviceTreeName->getCStringNoCopy()); - goto finish; - } - - executable = OSData::withBytesNoCopy(executableAddr, - kextFileInfo->executableLength); - if (!executable) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Failed to create executable wrapper for device tree entry %s.", - deviceTreeName->getCStringNoCopy()); - goto finish; - } - - /* A kext with an executable needs to retain the whole booterData - * object to keep the executable in memory. - */ - if (!setExecutable(executable, booterData)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Failed to set kext executable for device tree entry %s.", - deviceTreeName->getCStringNoCopy()); - goto finish; - } - } - - result = registerIdentifier(); + OSString * deviceTreeName, + OSData * booterData) +{ + bool result = false; + _BooterKextFileInfo * kextFileInfo = NULL;// do not free + char * infoDictAddr = NULL;// do not free + void * executableAddr = NULL;// do not free + char * bundlePathAddr = NULL;// do not free + + OSObject * parsedXML = NULL;// must release + OSDictionary * theInfoDict = NULL;// do not release + OSString * kextPath = NULL;// must release + OSString * errorString = NULL;// must release + OSData * executable = NULL;// must release + + if (!super::init()) { + goto finish; + } + + kextFileInfo = (_BooterKextFileInfo *)booterData->getBytesNoCopy(); + if (!kextFileInfo) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "No booter-provided data for kext device tree entry %s.", + deviceTreeName->getCStringNoCopy()); + goto finish; + } + + /* The info plist must exist or we can't read the kext. + */ + if (!kextFileInfo->infoDictPhysAddr || !kextFileInfo->infoDictLength) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "No kext info dictionary for booter device tree entry %s.", + deviceTreeName->getCStringNoCopy()); + goto finish; + } + + infoDictAddr = (char *)ml_static_ptovirt(kextFileInfo->infoDictPhysAddr); + if (!infoDictAddr) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Can't translate physical address 0x%x of kext info dictionary " + "for device tree entry %s.", + (int)kextFileInfo->infoDictPhysAddr, + deviceTreeName->getCStringNoCopy()); + goto finish; + } + + parsedXML = OSUnserializeXML(infoDictAddr, &errorString); + if (parsedXML) { + theInfoDict = OSDynamicCast(OSDictionary, parsedXML); + } + if (!theInfoDict) { + const char * errorCString = "(unknown error)"; + + if (errorString && errorString->getCStringNoCopy()) { + errorCString = errorString->getCStringNoCopy(); + } else if (parsedXML) { + errorCString = "not a dictionary"; + } + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Error unserializing info dictionary for device tree entry %s: %s.", + deviceTreeName->getCStringNoCopy(), errorCString); + goto finish; + } + + /* A bundle path is not mandatory. + */ + if (kextFileInfo->bundlePathPhysAddr && kextFileInfo->bundlePathLength) { + bundlePathAddr = (char *)ml_static_ptovirt(kextFileInfo->bundlePathPhysAddr); + if (!bundlePathAddr) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Can't translate physical address 0x%x of kext bundle path " + "for device tree entry %s.", + (int)kextFileInfo->bundlePathPhysAddr, + deviceTreeName->getCStringNoCopy()); + goto finish; + } + bundlePathAddr[kextFileInfo->bundlePathLength - 1] = '\0'; // just in case! + + kextPath = OSString::withCString(bundlePathAddr); + if (!kextPath) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Failed to create wrapper for device tree entry %s kext path %s.", + deviceTreeName->getCStringNoCopy(), bundlePathAddr); + goto finish; + } + } + + if (!setInfoDictionaryAndPath(theInfoDict, kextPath)) { + goto finish; + } + + /* An executable is not mandatory. + */ + if (kextFileInfo->executablePhysAddr && kextFileInfo->executableLength) { + executableAddr = (void *)ml_static_ptovirt(kextFileInfo->executablePhysAddr); + if (!executableAddr) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Can't translate physical address 0x%x of kext executable " + "for device tree entry %s.", + (int)kextFileInfo->executablePhysAddr, + deviceTreeName->getCStringNoCopy()); + goto finish; + } + + executable = OSData::withBytesNoCopy(executableAddr, + kextFileInfo->executableLength); + if (!executable) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Failed to create executable wrapper for device tree entry %s.", + deviceTreeName->getCStringNoCopy()); + goto finish; + } + + /* A kext with an executable needs to retain the whole booterData + * object to keep the executable in memory. + */ + if (!setExecutable(executable, booterData)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Failed to set kext executable for device tree entry %s.", + deviceTreeName->getCStringNoCopy()); + goto finish; + } + } + + result = registerIdentifier(); finish: - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(kextPath); - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(executable); + OSSafeReleaseNULL(parsedXML); + OSSafeReleaseNULL(kextPath); + OSSafeReleaseNULL(errorString); + OSSafeReleaseNULL(executable); - return result; + return result; } /********************************************************************* @@ -1916,376 +1919,374 @@ finish: bool OSKext::registerIdentifier(void) { - bool result = false; - OSKext * existingKext = NULL; // do not release - bool existingIsLoaded = false; - bool existingIsPrelinked = false; - OSKextVersion newVersion = -1; - OSKextVersion existingVersion = -1; - char newVersionCString[kOSKextVersionMaxLength]; - char existingVersionCString[kOSKextVersionMaxLength]; - OSData * newUUID = NULL; // must release - OSData * existingUUID = NULL; // must release - - IORecursiveLockLock(sKextLock); - - /* Get the new kext's version for checks & log messages. - */ - newVersion = getVersion(); - OSKextVersionGetString(newVersion, newVersionCString, - kOSKextVersionMaxLength); - - /* If we don't have an existing kext with this identifier, - * just record the new kext and we're done! - */ - existingKext = OSDynamicCast(OSKext, sKextsByID->getObject(bundleID)); - if (!existingKext) { - sKextsByID->setObject(bundleID, this); - result = true; - goto finish; - } - - /* Get the existing kext's version for checks & log messages. - */ - existingVersion = existingKext->getVersion(); - OSKextVersionGetString(existingVersion, - existingVersionCString, kOSKextVersionMaxLength); - - existingIsLoaded = existingKext->isLoaded(); - existingIsPrelinked = existingKext->isPrelinked(); - - /* If we have a kext with this identifier that's already loaded/prelinked, - * we can't use the new one, but let's be really thorough and check how - * the two are related for a precise diagnostic log message. - * - * Note that user space can't find out about nonloaded prelinked kexts, - * so in this case we log a message when new & existing are equivalent - * at the step rather than warning level, because we are always going - * be getting a copy of the kext in the user load request mkext. - */ - if (existingIsLoaded || existingIsPrelinked) { - bool sameVersion = (newVersion == existingVersion); - bool sameExecutable = true; // assume true unless we have UUIDs - - /* Only get the UUID if the existing kext is loaded. Doing so - * might have to uncompress an mkext executable and we shouldn't - * take that hit when neither kext is loaded. - */ - newUUID = copyUUID(); - existingUUID = existingKext->copyUUID(); - - /* I'm entirely too paranoid about checking equivalence of executables, - * but I remember nasty problems with it in the past. - * - * - If we have UUIDs for both kexts, compare them. - * - If only one kext has a UUID, they're definitely different. - */ - if (newUUID && existingUUID) { - sameExecutable = newUUID->isEqualTo(existingUUID); - } else if (newUUID || existingUUID) { - sameExecutable = false; - } - - if (!newUUID && !existingUUID) { - - /* If there are no UUIDs, we can't really tell that the executables - * are *different* without a lot of work; the loaded kext's - * unrelocated executable is no longer around (and we never had it - * in-kernel for a prelinked kext). We certainly don't want to do - * a whole fake link for the new kext just to compare, either. - */ - - OSKextVersionGetString(version, newVersionCString, - sizeof(newVersionCString)); - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogKextBookkeepingFlag, - "Notice - new kext %s, v%s matches %s kext " - "but can't determine if executables are the same (no UUIDs).", - getIdentifierCString(), - newVersionCString, - (existingIsLoaded ? "loaded" : "prelinked")); - } - - if (sameVersion && sameExecutable) { - OSKextLog(this, - (existingIsLoaded ? kOSKextLogWarningLevel : kOSKextLogStepLevel) | - kOSKextLogKextBookkeepingFlag, - "Refusing new kext %s, v%s: a %s copy is already present " - "(same version and executable).", - getIdentifierCString(), newVersionCString, - (existingIsLoaded ? "loaded" : "prelinked")); - } else { - if (!sameVersion) { - /* This condition is significant so log it under warnings. - */ - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogKextBookkeepingFlag, - "Refusing new kext %s, v%s: already have %s v%s.", - getIdentifierCString(), - newVersionCString, - (existingIsLoaded ? "loaded" : "prelinked"), - existingVersionCString); - } else { - /* This condition is significant so log it under warnings. - */ - OSKextLog(this, - kOSKextLogWarningLevel | kOSKextLogKextBookkeepingFlag, - "Refusing new kext %s, v%s: a %s copy with a different " - "executable UUID is already present.", - getIdentifierCString(), newVersionCString, - (existingIsLoaded ? "loaded" : "prelinked")); - } - } - goto finish; - } /* if (existingIsLoaded || existingIsPrelinked) */ - - /* We have two nonloaded/nonprelinked kexts, so our decision depends on whether - * user loads are happening or if we're still in early boot. User agents are - * supposed to resolve dependencies topside and include only the exact - * kexts needed; so we always accept the new kext (in fact we should never - * see an older unloaded copy hanging around). - */ - if (sUserLoadsActive) { - sKextsByID->setObject(bundleID, this); - result = true; - - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogKextBookkeepingFlag, - "Dropping old copy of kext %s (v%s) for newly-added (v%s).", - getIdentifierCString(), - existingVersionCString, - newVersionCString); - - goto finish; - } - - /* During early boot, the kext with the highest version always wins out. - * Prelinked kernels will never hit this, but mkexts and booter-read - * kexts might have duplicates. - */ - if (newVersion > existingVersion) { - sKextsByID->setObject(bundleID, this); - result = true; - - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogKextBookkeepingFlag, - "Dropping lower version (v%s) of registered kext %s for higher (v%s).", - existingVersionCString, - getIdentifierCString(), - newVersionCString); - - } else { - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogKextBookkeepingFlag, - "Kext %s is already registered with a higher/same version (v%s); " - "dropping newly-added (v%s).", - getIdentifierCString(), - existingVersionCString, - newVersionCString); - } - - /* result has been set appropriately by now. */ + bool result = false; + OSKext * existingKext = NULL;// do not release + bool existingIsLoaded = false; + bool existingIsPrelinked = false; + OSKextVersion newVersion = -1; + OSKextVersion existingVersion = -1; + char newVersionCString[kOSKextVersionMaxLength]; + char existingVersionCString[kOSKextVersionMaxLength]; + OSData * newUUID = NULL;// must release + OSData * existingUUID = NULL;// must release + + IORecursiveLockLock(sKextLock); + + /* Get the new kext's version for checks & log messages. + */ + newVersion = getVersion(); + OSKextVersionGetString(newVersion, newVersionCString, + kOSKextVersionMaxLength); + + /* If we don't have an existing kext with this identifier, + * just record the new kext and we're done! + */ + existingKext = OSDynamicCast(OSKext, sKextsByID->getObject(bundleID)); + if (!existingKext) { + sKextsByID->setObject(bundleID, this); + result = true; + goto finish; + } + + /* Get the existing kext's version for checks & log messages. + */ + existingVersion = existingKext->getVersion(); + OSKextVersionGetString(existingVersion, + existingVersionCString, kOSKextVersionMaxLength); + + existingIsLoaded = existingKext->isLoaded(); + existingIsPrelinked = existingKext->isPrelinked(); + + /* If we have a kext with this identifier that's already loaded/prelinked, + * we can't use the new one, but let's be really thorough and check how + * the two are related for a precise diagnostic log message. + * + * Note that user space can't find out about nonloaded prelinked kexts, + * so in this case we log a message when new & existing are equivalent + * at the step rather than warning level, because we are always going + * be getting a copy of the kext in the user load request mkext. + */ + if (existingIsLoaded || existingIsPrelinked) { + bool sameVersion = (newVersion == existingVersion); + bool sameExecutable = true; // assume true unless we have UUIDs + + /* Only get the UUID if the existing kext is loaded. Doing so + * might have to uncompress an mkext executable and we shouldn't + * take that hit when neither kext is loaded. + */ + newUUID = copyUUID(); + existingUUID = existingKext->copyUUID(); + + /* I'm entirely too paranoid about checking equivalence of executables, + * but I remember nasty problems with it in the past. + * + * - If we have UUIDs for both kexts, compare them. + * - If only one kext has a UUID, they're definitely different. + */ + if (newUUID && existingUUID) { + sameExecutable = newUUID->isEqualTo(existingUUID); + } else if (newUUID || existingUUID) { + sameExecutable = false; + } + + if (!newUUID && !existingUUID) { + /* If there are no UUIDs, we can't really tell that the executables + * are *different* without a lot of work; the loaded kext's + * unrelocated executable is no longer around (and we never had it + * in-kernel for a prelinked kext). We certainly don't want to do + * a whole fake link for the new kext just to compare, either. + */ + + OSKextVersionGetString(version, newVersionCString, + sizeof(newVersionCString)); + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogKextBookkeepingFlag, + "Notice - new kext %s, v%s matches %s kext " + "but can't determine if executables are the same (no UUIDs).", + getIdentifierCString(), + newVersionCString, + (existingIsLoaded ? "loaded" : "prelinked")); + } + + if (sameVersion && sameExecutable) { + OSKextLog(this, + (existingIsLoaded ? kOSKextLogWarningLevel : kOSKextLogStepLevel) | + kOSKextLogKextBookkeepingFlag, + "Refusing new kext %s, v%s: a %s copy is already present " + "(same version and executable).", + getIdentifierCString(), newVersionCString, + (existingIsLoaded ? "loaded" : "prelinked")); + } else { + if (!sameVersion) { + /* This condition is significant so log it under warnings. + */ + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogKextBookkeepingFlag, + "Refusing new kext %s, v%s: already have %s v%s.", + getIdentifierCString(), + newVersionCString, + (existingIsLoaded ? "loaded" : "prelinked"), + existingVersionCString); + } else { + /* This condition is significant so log it under warnings. + */ + OSKextLog(this, + kOSKextLogWarningLevel | kOSKextLogKextBookkeepingFlag, + "Refusing new kext %s, v%s: a %s copy with a different " + "executable UUID is already present.", + getIdentifierCString(), newVersionCString, + (existingIsLoaded ? "loaded" : "prelinked")); + } + } + goto finish; + } /* if (existingIsLoaded || existingIsPrelinked) */ + + /* We have two nonloaded/nonprelinked kexts, so our decision depends on whether + * user loads are happening or if we're still in early boot. User agents are + * supposed to resolve dependencies topside and include only the exact + * kexts needed; so we always accept the new kext (in fact we should never + * see an older unloaded copy hanging around). + */ + if (sUserLoadsActive) { + sKextsByID->setObject(bundleID, this); + result = true; + + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogKextBookkeepingFlag, + "Dropping old copy of kext %s (v%s) for newly-added (v%s).", + getIdentifierCString(), + existingVersionCString, + newVersionCString); + + goto finish; + } + + /* During early boot, the kext with the highest version always wins out. + * Prelinked kernels will never hit this, but mkexts and booter-read + * kexts might have duplicates. + */ + if (newVersion > existingVersion) { + sKextsByID->setObject(bundleID, this); + result = true; + + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogKextBookkeepingFlag, + "Dropping lower version (v%s) of registered kext %s for higher (v%s).", + existingVersionCString, + getIdentifierCString(), + newVersionCString); + } else { + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogKextBookkeepingFlag, + "Kext %s is already registered with a higher/same version (v%s); " + "dropping newly-added (v%s).", + getIdentifierCString(), + existingVersionCString, + newVersionCString); + } + + /* result has been set appropriately by now. */ finish: - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - if (result) { - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogKextBookkeepingFlag, - "Kext %s, v%s registered and available for loading.", - getIdentifierCString(), newVersionCString); - } + if (result) { + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogKextBookkeepingFlag, + "Kext %s, v%s registered and available for loading.", + getIdentifierCString(), newVersionCString); + } - OSSafeReleaseNULL(newUUID); - OSSafeReleaseNULL(existingUUID); + OSSafeReleaseNULL(newUUID); + OSSafeReleaseNULL(existingUUID); - return result; + return result; } /********************************************************************* -* Does the bare minimum validation to look up a kext. -* All other validation is done on the spot as needed. -**********************************************************************/ + * Does the bare minimum validation to look up a kext. + * All other validation is done on the spot as needed. + **********************************************************************/ bool OSKext::setInfoDictionaryAndPath( - OSDictionary * aDictionary, - OSString * aPath) -{ - bool result = false; - OSString * bundleIDString = NULL; // do not release - OSString * versionString = NULL; // do not release - OSString * compatibleVersionString = NULL; // do not release - const char * versionCString = NULL; // do not free - const char * compatibleVersionCString = NULL; // do not free - OSBoolean * scratchBool = NULL; // do not release - OSDictionary * scratchDict = NULL; // do not release - - if (infoDict) { - panic("Attempt to set info dictionary on a kext " - "that already has one (%s).", - getIdentifierCString()); - } - - if (!aDictionary || !OSDynamicCast(OSDictionary, aDictionary)) { - goto finish; - } - - infoDict = aDictionary; - infoDict->retain(); - - /* Check right away if the info dictionary has any log flags. - */ - scratchBool = OSDynamicCast(OSBoolean, - getPropertyForHostArch(kOSBundleEnableKextLoggingKey)); - if (scratchBool == kOSBooleanTrue) { - flags.loggingEnabled = 1; - } - - /* The very next thing to get is the bundle identifier. Unlike - * in user space, a kext with no bundle identifier gets axed - * immediately. - */ - bundleIDString = OSDynamicCast(OSString, - getPropertyForHostArch(kCFBundleIdentifierKey)); - if (!bundleIDString) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "CFBundleIdentifier missing/invalid type in kext %s.", - aPath ? aPath->getCStringNoCopy() : "(unknown)"); - goto finish; - } - bundleID = OSSymbol::withString(bundleIDString); - if (!bundleID) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "Can't copy bundle identifier as symbol for kext %s.", - bundleIDString->getCStringNoCopy()); - goto finish; - } - - /* Save the path if we got one (it should always be available but it's - * just something nice to have for bookkeeping). - */ - if (aPath) { - path = aPath; - path->retain(); - } - - /***** - * Minimal validation to initialize. We'll do other validation on the spot. - */ - if (bundleID->getLength() >= KMOD_MAX_NAME) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "Kext %s error - CFBundleIdentifier over max length %d.", - getIdentifierCString(), KMOD_MAX_NAME - 1); - goto finish; - } - - version = compatibleVersion = -1; - - versionString = OSDynamicCast(OSString, - getPropertyForHostArch(kCFBundleVersionKey)); - if (!versionString) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "Kext %s error - CFBundleVersion missing/invalid type.", - getIdentifierCString()); - goto finish; - } - versionCString = versionString->getCStringNoCopy(); - version = OSKextParseVersionString(versionCString); - if (version < 0) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "Kext %s error - CFBundleVersion bad value '%s'.", - getIdentifierCString(), versionCString); - goto finish; - } - - compatibleVersion = -1; // set to illegal value for kexts that don't have - - compatibleVersionString = OSDynamicCast(OSString, - getPropertyForHostArch(kOSBundleCompatibleVersionKey)); - if (compatibleVersionString) { - compatibleVersionCString = compatibleVersionString->getCStringNoCopy(); - compatibleVersion = OSKextParseVersionString(compatibleVersionCString); - if (compatibleVersion < 0) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "Kext %s error - OSBundleCompatibleVersion bad value '%s'.", - getIdentifierCString(), compatibleVersionCString); - goto finish; - } - - if (compatibleVersion > version) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag, - "Kext %s error - %s %s > %s %s (must be <=).", - getIdentifierCString(), - kOSBundleCompatibleVersionKey, compatibleVersionCString, - kCFBundleVersionKey, versionCString); - goto finish; - } - } - - /* Check to see if this kext is in exclude list */ - if ( isInExcludeList() ) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "Kext %s is in exclude list, not loadable", - getIdentifierCString()); - goto finish; - } - - /* Set flags for later use if the infoDict gets flushed. We only - * check for true values, not false ones(!) - */ - scratchBool = OSDynamicCast(OSBoolean, - getPropertyForHostArch(kOSBundleIsInterfaceKey)); - if (scratchBool == kOSBooleanTrue) { - flags.interface = 1; - } - - scratchBool = OSDynamicCast(OSBoolean, - getPropertyForHostArch(kOSKernelResourceKey)); - if (scratchBool == kOSBooleanTrue) { - flags.kernelComponent = 1; - flags.interface = 1; // xxx - hm. the kernel itself isn't an interface... - flags.started = 1; - - /* A kernel component has one implicit dependency on the kernel. - */ - flags.hasAllDependencies = 1; - } - - /* Make sure common string values in personalities are uniqued to OSSymbols. - */ - scratchDict = OSDynamicCast(OSDictionary, - getPropertyForHostArch(kIOKitPersonalitiesKey)); - if (scratchDict) { - uniquePersonalityProperties(scratchDict); - } - - result = true; + OSDictionary * aDictionary, + OSString * aPath) +{ + bool result = false; + OSString * bundleIDString = NULL;// do not release + OSString * versionString = NULL;// do not release + OSString * compatibleVersionString = NULL;// do not release + const char * versionCString = NULL;// do not free + const char * compatibleVersionCString = NULL;// do not free + OSBoolean * scratchBool = NULL;// do not release + OSDictionary * scratchDict = NULL;// do not release + + if (infoDict) { + panic("Attempt to set info dictionary on a kext " + "that already has one (%s).", + getIdentifierCString()); + } + + if (!aDictionary || !OSDynamicCast(OSDictionary, aDictionary)) { + goto finish; + } + + infoDict = aDictionary; + infoDict->retain(); + + /* Check right away if the info dictionary has any log flags. + */ + scratchBool = OSDynamicCast(OSBoolean, + getPropertyForHostArch(kOSBundleEnableKextLoggingKey)); + if (scratchBool == kOSBooleanTrue) { + flags.loggingEnabled = 1; + } + + /* The very next thing to get is the bundle identifier. Unlike + * in user space, a kext with no bundle identifier gets axed + * immediately. + */ + bundleIDString = OSDynamicCast(OSString, + getPropertyForHostArch(kCFBundleIdentifierKey)); + if (!bundleIDString) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "CFBundleIdentifier missing/invalid type in kext %s.", + aPath ? aPath->getCStringNoCopy() : "(unknown)"); + goto finish; + } + bundleID = OSSymbol::withString(bundleIDString); + if (!bundleID) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "Can't copy bundle identifier as symbol for kext %s.", + bundleIDString->getCStringNoCopy()); + goto finish; + } + + /* Save the path if we got one (it should always be available but it's + * just something nice to have for bookkeeping). + */ + if (aPath) { + path = aPath; + path->retain(); + } + + /***** + * Minimal validation to initialize. We'll do other validation on the spot. + */ + if (bundleID->getLength() >= KMOD_MAX_NAME) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "Kext %s error - CFBundleIdentifier over max length %d.", + getIdentifierCString(), KMOD_MAX_NAME - 1); + goto finish; + } + + version = compatibleVersion = -1; + + versionString = OSDynamicCast(OSString, + getPropertyForHostArch(kCFBundleVersionKey)); + if (!versionString) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "Kext %s error - CFBundleVersion missing/invalid type.", + getIdentifierCString()); + goto finish; + } + versionCString = versionString->getCStringNoCopy(); + version = OSKextParseVersionString(versionCString); + if (version < 0) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "Kext %s error - CFBundleVersion bad value '%s'.", + getIdentifierCString(), versionCString); + goto finish; + } + + compatibleVersion = -1; // set to illegal value for kexts that don't have + + compatibleVersionString = OSDynamicCast(OSString, + getPropertyForHostArch(kOSBundleCompatibleVersionKey)); + if (compatibleVersionString) { + compatibleVersionCString = compatibleVersionString->getCStringNoCopy(); + compatibleVersion = OSKextParseVersionString(compatibleVersionCString); + if (compatibleVersion < 0) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "Kext %s error - OSBundleCompatibleVersion bad value '%s'.", + getIdentifierCString(), compatibleVersionCString); + goto finish; + } + + if (compatibleVersion > version) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag, + "Kext %s error - %s %s > %s %s (must be <=).", + getIdentifierCString(), + kOSBundleCompatibleVersionKey, compatibleVersionCString, + kCFBundleVersionKey, versionCString); + goto finish; + } + } + + /* Check to see if this kext is in exclude list */ + if (isInExcludeList()) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "Kext %s is in exclude list, not loadable", + getIdentifierCString()); + goto finish; + } + + /* Set flags for later use if the infoDict gets flushed. We only + * check for true values, not false ones(!) + */ + scratchBool = OSDynamicCast(OSBoolean, + getPropertyForHostArch(kOSBundleIsInterfaceKey)); + if (scratchBool == kOSBooleanTrue) { + flags.interface = 1; + } + + scratchBool = OSDynamicCast(OSBoolean, + getPropertyForHostArch(kOSKernelResourceKey)); + if (scratchBool == kOSBooleanTrue) { + flags.kernelComponent = 1; + flags.interface = 1; // xxx - hm. the kernel itself isn't an interface... + flags.started = 1; + + /* A kernel component has one implicit dependency on the kernel. + */ + flags.hasAllDependencies = 1; + } + + /* Make sure common string values in personalities are uniqued to OSSymbols. + */ + scratchDict = OSDynamicCast(OSDictionary, + getPropertyForHostArch(kIOKitPersonalitiesKey)); + if (scratchDict) { + uniquePersonalityProperties(scratchDict); + } + + result = true; finish: - return result; + return result; } /********************************************************************* @@ -2294,73 +2295,74 @@ finish: *********************************************************************/ bool OSKext::setExecutable( - OSData * anExecutable, - OSData * externalData, - bool externalDataIsMkext) -{ - bool result = false; - const char * executableKey = NULL; // do not free - - if (!anExecutable) { - infoDict->removeObject(_kOSKextExecutableKey); - infoDict->removeObject(_kOSKextMkextExecutableReferenceKey); - infoDict->removeObject(_kOSKextExecutableExternalDataKey); - result = true; - goto finish; - } - - if (infoDict->getObject(_kOSKextExecutableKey) || - infoDict->getObject(_kOSKextMkextExecutableReferenceKey)) { - - panic("Attempt to set an executable on a kext " - "that already has one (%s).", - getIdentifierCString()); - goto finish; - } - - if (externalDataIsMkext) { - executableKey = _kOSKextMkextExecutableReferenceKey; - } else { - executableKey = _kOSKextExecutableKey; - } - - if (anExecutable) { - infoDict->setObject(executableKey, anExecutable); - if (externalData) { - infoDict->setObject(_kOSKextExecutableExternalDataKey, externalData); - } - } - - result = true; + OSData * anExecutable, + OSData * externalData, + bool externalDataIsMkext) +{ + bool result = false; + const char * executableKey = NULL; // do not free + + if (!anExecutable) { + infoDict->removeObject(_kOSKextExecutableKey); + infoDict->removeObject(_kOSKextMkextExecutableReferenceKey); + infoDict->removeObject(_kOSKextExecutableExternalDataKey); + result = true; + goto finish; + } -finish: - return result; -} + if (infoDict->getObject(_kOSKextExecutableKey) || + infoDict->getObject(_kOSKextMkextExecutableReferenceKey)) { + panic("Attempt to set an executable on a kext " + "that already has one (%s).", + getIdentifierCString()); + goto finish; + } -/********************************************************************* -*********************************************************************/ + if (externalDataIsMkext) { + executableKey = _kOSKextMkextExecutableReferenceKey; + } else { + executableKey = _kOSKextExecutableKey; + } + + if (anExecutable) { + infoDict->setObject(executableKey, anExecutable); + if (externalData) { + infoDict->setObject(_kOSKextExecutableExternalDataKey, externalData); + } + } + + result = true; + +finish: + return result; +} + +/********************************************************************* +*********************************************************************/ static void uniqueStringPlistProperty(OSDictionary * dict, const char * key) { - OSString * stringValue = NULL; // do not release - const OSSymbol * symbolValue = NULL; // must release - - stringValue = OSDynamicCast(OSString, dict->getObject(key)); - if (!stringValue) { - goto finish; - } - - symbolValue = OSSymbol::withString(stringValue); - if (!symbolValue) { - goto finish; - } - - dict->setObject(key, symbolValue); - + OSString * stringValue = NULL;// do not release + const OSSymbol * symbolValue = NULL; // must release + + stringValue = OSDynamicCast(OSString, dict->getObject(key)); + if (!stringValue) { + goto finish; + } + + symbolValue = OSSymbol::withString(stringValue); + if (!symbolValue) { + goto finish; + } + + dict->setObject(key, symbolValue); + finish: - if (symbolValue) symbolValue->release(); + if (symbolValue) { + symbolValue->release(); + } - return; + return; } /********************************************************************* @@ -2368,25 +2370,27 @@ finish: static void uniqueStringPlistProperty(OSDictionary * dict, const OSString * key) { - OSString * stringValue = NULL; // do not release - const OSSymbol * symbolValue = NULL; // must release - - stringValue = OSDynamicCast(OSString, dict->getObject(key)); - if (!stringValue) { - goto finish; - } - - symbolValue = OSSymbol::withString(stringValue); - if (!symbolValue) { - goto finish; - } - - dict->setObject(key, symbolValue); - + OSString * stringValue = NULL;// do not release + const OSSymbol * symbolValue = NULL; // must release + + stringValue = OSDynamicCast(OSString, dict->getObject(key)); + if (!stringValue) { + goto finish; + } + + symbolValue = OSSymbol::withString(stringValue); + if (!symbolValue) { + goto finish; + } + + dict->setObject(key, symbolValue); + finish: - if (symbolValue) symbolValue->release(); + if (symbolValue) { + symbolValue->release(); + } - return; + return; } /********************************************************************* @@ -2397,33 +2401,33 @@ finish: void OSKext::uniquePersonalityProperties(OSDictionary * personalityDict) { - /* Properties every personality has. - */ - uniqueStringPlistProperty(personalityDict, kCFBundleIdentifierKey); - uniqueStringPlistProperty(personalityDict, kIOProviderClassKey); - uniqueStringPlistProperty(personalityDict, gIOClassKey); - - /* Other commonly used properties. - */ - uniqueStringPlistProperty(personalityDict, gIOMatchCategoryKey); - uniqueStringPlistProperty(personalityDict, gIOResourceMatchKey); - uniqueStringPlistProperty(personalityDict, gIOUserClientClassKey); - - uniqueStringPlistProperty(personalityDict, "HIDDefaultBehavior"); - uniqueStringPlistProperty(personalityDict, "HIDPointerAccelerationType"); - uniqueStringPlistProperty(personalityDict, "HIDRemoteControlType"); - uniqueStringPlistProperty(personalityDict, "HIDScrollAccelerationType"); - uniqueStringPlistProperty(personalityDict, "IOPersonalityPublisher"); - uniqueStringPlistProperty(personalityDict, "Physical Interconnect"); - uniqueStringPlistProperty(personalityDict, "Physical Interconnect Location"); - uniqueStringPlistProperty(personalityDict, "Vendor"); - uniqueStringPlistProperty(personalityDict, "Vendor Identification"); - uniqueStringPlistProperty(personalityDict, "Vendor Name"); - uniqueStringPlistProperty(personalityDict, "bConfigurationValue"); - uniqueStringPlistProperty(personalityDict, "bInterfaceNumber"); - uniqueStringPlistProperty(personalityDict, "idProduct"); - - return; + /* Properties every personality has. + */ + uniqueStringPlistProperty(personalityDict, kCFBundleIdentifierKey); + uniqueStringPlistProperty(personalityDict, kIOProviderClassKey); + uniqueStringPlistProperty(personalityDict, gIOClassKey); + + /* Other commonly used properties. + */ + uniqueStringPlistProperty(personalityDict, gIOMatchCategoryKey); + uniqueStringPlistProperty(personalityDict, gIOResourceMatchKey); + uniqueStringPlistProperty(personalityDict, gIOUserClientClassKey); + + uniqueStringPlistProperty(personalityDict, "HIDDefaultBehavior"); + uniqueStringPlistProperty(personalityDict, "HIDPointerAccelerationType"); + uniqueStringPlistProperty(personalityDict, "HIDRemoteControlType"); + uniqueStringPlistProperty(personalityDict, "HIDScrollAccelerationType"); + uniqueStringPlistProperty(personalityDict, "IOPersonalityPublisher"); + uniqueStringPlistProperty(personalityDict, "Physical Interconnect"); + uniqueStringPlistProperty(personalityDict, "Physical Interconnect Location"); + uniqueStringPlistProperty(personalityDict, "Vendor"); + uniqueStringPlistProperty(personalityDict, "Vendor Identification"); + uniqueStringPlistProperty(personalityDict, "Vendor Name"); + uniqueStringPlistProperty(personalityDict, "bConfigurationValue"); + uniqueStringPlistProperty(personalityDict, "bInterfaceNumber"); + uniqueStringPlistProperty(personalityDict, "idProduct"); + + return; } /********************************************************************* @@ -2431,25 +2435,25 @@ OSKext::uniquePersonalityProperties(OSDictionary * personalityDict) void OSKext::free(void) { - if (isLoaded()) { - panic("Attempt to free loaded kext %s.", getIdentifierCString()); - } - - OSSafeReleaseNULL(infoDict); - OSSafeReleaseNULL(bundleID); - OSSafeReleaseNULL(path); - OSSafeReleaseNULL(executableRelPath); - OSSafeReleaseNULL(dependencies); - OSSafeReleaseNULL(linkedExecutable); - OSSafeReleaseNULL(metaClasses); - OSSafeReleaseNULL(interfaceUUID); + if (isLoaded()) { + panic("Attempt to free loaded kext %s.", getIdentifierCString()); + } - if (isInterface() && kmod_info) { - kfree(kmod_info, sizeof(kmod_info_t)); - } + OSSafeReleaseNULL(infoDict); + OSSafeReleaseNULL(bundleID); + OSSafeReleaseNULL(path); + OSSafeReleaseNULL(executableRelPath); + OSSafeReleaseNULL(dependencies); + OSSafeReleaseNULL(linkedExecutable); + OSSafeReleaseNULL(metaClasses); + OSSafeReleaseNULL(interfaceUUID); + + if (isInterface() && kmod_info) { + kfree(kmod_info, sizeof(kmod_info_t)); + } - super::free(); - return; + super::free(); + return; } #if PRAGMA_MARK @@ -2461,55 +2465,55 @@ OSReturn OSKext::readMkextArchive(OSData * mkextData, uint32_t * checksumPtr) { - OSReturn result = kOSKextReturnBadData; - uint32_t mkextLength = 0; - mkext_header * mkextHeader = 0; // do not free - uint32_t mkextVersion = 0; - - /* Note default return of kOSKextReturnBadData above. - */ - mkextLength = mkextData->getLength(); - if (mkextLength < sizeof(mkext_basic_header)) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive too small to be valid."); - goto finish; - } - - mkextHeader = (mkext_header *)mkextData->getBytesNoCopy(); - - if (MKEXT_GET_MAGIC(mkextHeader) != MKEXT_MAGIC || - MKEXT_GET_SIGNATURE(mkextHeader) != MKEXT_SIGN) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive has invalid magic or signature."); - goto finish; - } - - if (MKEXT_GET_LENGTH(mkextHeader) != mkextLength) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive recorded length doesn't match actual file length."); - goto finish; - } - - mkextVersion = MKEXT_GET_VERSION(mkextHeader); - - if (mkextVersion == MKEXT_VERS_2) { - result = OSKext::readMkext2Archive(mkextData, NULL, checksumPtr); - } else { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive of unsupported mkext version 0x%x.", mkextVersion); - result = kOSKextReturnUnsupported; - } + OSReturn result = kOSKextReturnBadData; + uint32_t mkextLength = 0; + mkext_header * mkextHeader = 0;// do not free + uint32_t mkextVersion = 0; + + /* Note default return of kOSKextReturnBadData above. + */ + mkextLength = mkextData->getLength(); + if (mkextLength < sizeof(mkext_basic_header)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive too small to be valid."); + goto finish; + } + + mkextHeader = (mkext_header *)mkextData->getBytesNoCopy(); + + if (MKEXT_GET_MAGIC(mkextHeader) != MKEXT_MAGIC || + MKEXT_GET_SIGNATURE(mkextHeader) != MKEXT_SIGN) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive has invalid magic or signature."); + goto finish; + } + + if (MKEXT_GET_LENGTH(mkextHeader) != mkextLength) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive recorded length doesn't match actual file length."); + goto finish; + } + + mkextVersion = MKEXT_GET_VERSION(mkextHeader); + + if (mkextVersion == MKEXT_VERS_2) { + result = OSKext::readMkext2Archive(mkextData, NULL, checksumPtr); + } else { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive of unsupported mkext version 0x%x.", mkextVersion); + result = kOSKextReturnUnsupported; + } finish: - return result; + return result; } /********************************************************************* @@ -2524,169 +2528,168 @@ finish: /* static */ OSReturn OSKext::readMkext2Archive( - OSData * mkextData, - OSDictionary ** mkextPlistOut, - uint32_t * checksumPtr) -{ - OSReturn result = kOSReturnError; - uint32_t mkextLength; - mkext2_header * mkextHeader = NULL; // do not free - void * mkextEnd = NULL; // do not free - uint32_t mkextVersion; - uint8_t * crc_address = NULL; - uint32_t checksum; - uint32_t mkextPlistOffset; - uint32_t mkextPlistCompressedSize; - char * mkextPlistEnd = NULL; // do not free - uint32_t mkextPlistFullSize; - OSString * errorString = NULL; // must release - OSData * mkextPlistUncompressedData = NULL; // must release - const char * mkextPlistDataBuffer = NULL; // do not free - OSObject * parsedXML = NULL; // must release - OSDictionary * mkextPlist = NULL; // do not release - OSArray * mkextInfoDictArray = NULL; // do not release - uint32_t count, i; - - mkextLength = mkextData->getLength(); - mkextHeader = (mkext2_header *)mkextData->getBytesNoCopy(); - mkextEnd = (char *)mkextHeader + mkextLength; - mkextVersion = MKEXT_GET_VERSION(mkextHeader); - - crc_address = (u_int8_t *)&mkextHeader->version; - checksum = mkext_adler32(crc_address, - (uintptr_t)mkextHeader + - MKEXT_GET_LENGTH(mkextHeader) - (uintptr_t)crc_address); - - if (MKEXT_GET_CHECKSUM(mkextHeader) != checksum) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive has bad checksum."); - result = kOSKextReturnBadData; - goto finish; - } - - if (checksumPtr) { - *checksumPtr = checksum; - } - - /* Check that the CPU type & subtype match that of the running kernel. */ - if (MKEXT_GET_CPUTYPE(mkextHeader) == (UInt32)CPU_TYPE_ANY) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive must have a specific CPU type."); - result = kOSKextReturnBadData; - goto finish; - } else { - if ((UInt32)_mh_execute_header.cputype != - MKEXT_GET_CPUTYPE(mkextHeader)) { - - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive does not match the running kernel's CPU type."); - result = kOSKextReturnArchNotFound; - goto finish; - } - } - - mkextPlistOffset = MKEXT2_GET_PLIST(mkextHeader); - mkextPlistCompressedSize = MKEXT2_GET_PLIST_COMPSIZE(mkextHeader); - mkextPlistEnd = (char *)mkextHeader + mkextPlistOffset + - mkextPlistCompressedSize; - if (mkextPlistEnd > mkextEnd) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive file overrun."); - result = kOSKextReturnBadData; - } - - mkextPlistFullSize = MKEXT2_GET_PLIST_FULLSIZE(mkextHeader); - if (mkextPlistCompressedSize) { - mkextPlistUncompressedData = sKernelKext->extractMkext2FileData( - (UInt8 *)mkextHeader + mkextPlistOffset, - "plist", - mkextPlistCompressedSize, mkextPlistFullSize); - if (!mkextPlistUncompressedData) { - goto finish; - } - mkextPlistDataBuffer = (const char *) - mkextPlistUncompressedData->getBytesNoCopy(); - } else { - mkextPlistDataBuffer = (const char *)mkextHeader + mkextPlistOffset; - } - - /* IOCFSerialize added a nul byte to the end of the string. Very nice of it. - */ - parsedXML = OSUnserializeXML(mkextPlistDataBuffer, &errorString); - if (parsedXML) { - mkextPlist = OSDynamicCast(OSDictionary, parsedXML); - } - if (!mkextPlist) { - const char * errorCString = "(unknown error)"; - - if (errorString && errorString->getCStringNoCopy()) { - errorCString = errorString->getCStringNoCopy(); - } else if (parsedXML) { - errorCString = "not a dictionary"; - } - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Error unserializing mkext plist: %s.", errorCString); - goto finish; - } - - /* If the caller needs the plist, hand it back and retain it. - * (This function releases it at the end.) - */ - if (mkextPlistOut) { - *mkextPlistOut = mkextPlist; - (*mkextPlistOut)->retain(); - } - - mkextInfoDictArray = OSDynamicCast(OSArray, - mkextPlist->getObject(kMKEXTInfoDictionariesKey)); - if (!mkextInfoDictArray) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext archive contains no kext info dictionaries."); - goto finish; - } - - count = mkextInfoDictArray->getCount(); - for (i = 0; i < count; i++) { - OSDictionary * infoDict; - - - infoDict = OSDynamicCast(OSDictionary, - mkextInfoDictArray->getObject(i)); - - /* Create the kext for the entry, then release it, because the - * kext system keeps them around until explicitly removed. - * Any creation/registration failures are already logged for us. - */ - if (infoDict) { - OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData); - OSSafeReleaseNULL(newKext); - } - } - - /* Even if we didn't keep any kexts from the mkext, we may have a load - * request to process, so we are successful (no errors occurred). - */ - result = kOSReturnSuccess; + OSData * mkextData, + OSDictionary ** mkextPlistOut, + uint32_t * checksumPtr) +{ + OSReturn result = kOSReturnError; + uint32_t mkextLength; + mkext2_header * mkextHeader = NULL;// do not free + void * mkextEnd = NULL;// do not free + uint32_t mkextVersion; + uint8_t * crc_address = NULL; + uint32_t checksum; + uint32_t mkextPlistOffset; + uint32_t mkextPlistCompressedSize; + char * mkextPlistEnd = NULL;// do not free + uint32_t mkextPlistFullSize; + OSString * errorString = NULL;// must release + OSData * mkextPlistUncompressedData = NULL;// must release + const char * mkextPlistDataBuffer = NULL;// do not free + OSObject * parsedXML = NULL;// must release + OSDictionary * mkextPlist = NULL;// do not release + OSArray * mkextInfoDictArray = NULL;// do not release + uint32_t count, i; + + mkextLength = mkextData->getLength(); + mkextHeader = (mkext2_header *)mkextData->getBytesNoCopy(); + mkextEnd = (char *)mkextHeader + mkextLength; + mkextVersion = MKEXT_GET_VERSION(mkextHeader); + + crc_address = (u_int8_t *)&mkextHeader->version; + checksum = mkext_adler32(crc_address, + (uintptr_t)mkextHeader + + MKEXT_GET_LENGTH(mkextHeader) - (uintptr_t)crc_address); + + if (MKEXT_GET_CHECKSUM(mkextHeader) != checksum) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive has bad checksum."); + result = kOSKextReturnBadData; + goto finish; + } + + if (checksumPtr) { + *checksumPtr = checksum; + } + + /* Check that the CPU type & subtype match that of the running kernel. */ + if (MKEXT_GET_CPUTYPE(mkextHeader) == (UInt32)CPU_TYPE_ANY) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive must have a specific CPU type."); + result = kOSKextReturnBadData; + goto finish; + } else { + if ((UInt32)_mh_execute_header.cputype != + MKEXT_GET_CPUTYPE(mkextHeader)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive does not match the running kernel's CPU type."); + result = kOSKextReturnArchNotFound; + goto finish; + } + } + + mkextPlistOffset = MKEXT2_GET_PLIST(mkextHeader); + mkextPlistCompressedSize = MKEXT2_GET_PLIST_COMPSIZE(mkextHeader); + mkextPlistEnd = (char *)mkextHeader + mkextPlistOffset + + mkextPlistCompressedSize; + if (mkextPlistEnd > mkextEnd) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive file overrun."); + result = kOSKextReturnBadData; + } + + mkextPlistFullSize = MKEXT2_GET_PLIST_FULLSIZE(mkextHeader); + if (mkextPlistCompressedSize) { + mkextPlistUncompressedData = sKernelKext->extractMkext2FileData( + (UInt8 *)mkextHeader + mkextPlistOffset, + "plist", + mkextPlistCompressedSize, mkextPlistFullSize); + if (!mkextPlistUncompressedData) { + goto finish; + } + mkextPlistDataBuffer = (const char *) + mkextPlistUncompressedData->getBytesNoCopy(); + } else { + mkextPlistDataBuffer = (const char *)mkextHeader + mkextPlistOffset; + } + + /* IOCFSerialize added a nul byte to the end of the string. Very nice of it. + */ + parsedXML = OSUnserializeXML(mkextPlistDataBuffer, &errorString); + if (parsedXML) { + mkextPlist = OSDynamicCast(OSDictionary, parsedXML); + } + if (!mkextPlist) { + const char * errorCString = "(unknown error)"; + + if (errorString && errorString->getCStringNoCopy()) { + errorCString = errorString->getCStringNoCopy(); + } else if (parsedXML) { + errorCString = "not a dictionary"; + } + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Error unserializing mkext plist: %s.", errorCString); + goto finish; + } + + /* If the caller needs the plist, hand it back and retain it. + * (This function releases it at the end.) + */ + if (mkextPlistOut) { + *mkextPlistOut = mkextPlist; + (*mkextPlistOut)->retain(); + } + + mkextInfoDictArray = OSDynamicCast(OSArray, + mkextPlist->getObject(kMKEXTInfoDictionariesKey)); + if (!mkextInfoDictArray) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext archive contains no kext info dictionaries."); + goto finish; + } + + count = mkextInfoDictArray->getCount(); + for (i = 0; i < count; i++) { + OSDictionary * infoDict; + + + infoDict = OSDynamicCast(OSDictionary, + mkextInfoDictArray->getObject(i)); + + /* Create the kext for the entry, then release it, because the + * kext system keeps them around until explicitly removed. + * Any creation/registration failures are already logged for us. + */ + if (infoDict) { + OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData); + OSSafeReleaseNULL(newKext); + } + } + + /* Even if we didn't keep any kexts from the mkext, we may have a load + * request to process, so we are successful (no errors occurred). + */ + result = kOSReturnSuccess; finish: - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(mkextPlistUncompressedData); - OSSafeReleaseNULL(errorString); + OSSafeReleaseNULL(parsedXML); + OSSafeReleaseNULL(mkextPlistUncompressedData); + OSSafeReleaseNULL(errorString); - return result; + return result; } /********************************************************************* @@ -2694,115 +2697,115 @@ finish: /* static */ OSKext * OSKext::withMkext2Info( - OSDictionary * anInfoDict, - OSData * mkextData) + OSDictionary * anInfoDict, + OSData * mkextData) { - OSKext * newKext = new OSKext; + OSKext * newKext = new OSKext; - if (newKext && !newKext->initWithMkext2Info(anInfoDict, mkextData)) { - newKext->release(); - return NULL; - } + if (newKext && !newKext->initWithMkext2Info(anInfoDict, mkextData)) { + newKext->release(); + return NULL; + } - return newKext; + return newKext; } /********************************************************************* *********************************************************************/ bool OSKext::initWithMkext2Info( - OSDictionary * anInfoDict, - OSData * mkextData) -{ - bool result = false; - OSString * kextPath = NULL; // do not release - OSNumber * executableOffsetNum = NULL; // do not release - OSCollectionIterator * iterator = NULL; // must release - OSData * executable = NULL; // must release - - if (anInfoDict == NULL || !super::init()) { - goto finish; - } - - /* Get the path. Don't look for an arch-specific path property. - */ - kextPath = OSDynamicCast(OSString, - anInfoDict->getObject(kMKEXTBundlePathKey)); - - if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) { - goto finish; - } - - /* If we have a path to the executable, save it. - */ - executableRelPath = OSDynamicCast(OSString, - anInfoDict->getObject(kMKEXTExecutableRelativePathKey)); - if (executableRelPath) { - executableRelPath->retain(); - } - - /* Don't need the paths to be in the info dictionary any more. - */ - anInfoDict->removeObject(kMKEXTBundlePathKey); - anInfoDict->removeObject(kMKEXTExecutableRelativePathKey); - - executableOffsetNum = OSDynamicCast(OSNumber, - infoDict->getObject(kMKEXTExecutableKey)); - if (executableOffsetNum) { - executable = createMkext2FileEntry(mkextData, - executableOffsetNum, "executable"); - infoDict->removeObject(kMKEXTExecutableKey); - if (!executable) { - goto finish; - } - if (!setExecutable(executable, mkextData, true)) { - goto finish; - } - } - - result = registerIdentifier(); + OSDictionary * anInfoDict, + OSData * mkextData) +{ + bool result = false; + OSString * kextPath = NULL;// do not release + OSNumber * executableOffsetNum = NULL;// do not release + OSCollectionIterator * iterator = NULL;// must release + OSData * executable = NULL;// must release + + if (anInfoDict == NULL || !super::init()) { + goto finish; + } + + /* Get the path. Don't look for an arch-specific path property. + */ + kextPath = OSDynamicCast(OSString, + anInfoDict->getObject(kMKEXTBundlePathKey)); + + if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) { + goto finish; + } + + /* If we have a path to the executable, save it. + */ + executableRelPath = OSDynamicCast(OSString, + anInfoDict->getObject(kMKEXTExecutableRelativePathKey)); + if (executableRelPath) { + executableRelPath->retain(); + } + + /* Don't need the paths to be in the info dictionary any more. + */ + anInfoDict->removeObject(kMKEXTBundlePathKey); + anInfoDict->removeObject(kMKEXTExecutableRelativePathKey); + + executableOffsetNum = OSDynamicCast(OSNumber, + infoDict->getObject(kMKEXTExecutableKey)); + if (executableOffsetNum) { + executable = createMkext2FileEntry(mkextData, + executableOffsetNum, "executable"); + infoDict->removeObject(kMKEXTExecutableKey); + if (!executable) { + goto finish; + } + if (!setExecutable(executable, mkextData, true)) { + goto finish; + } + } + + result = registerIdentifier(); finish: - OSSafeReleaseNULL(executable); - OSSafeReleaseNULL(iterator); - return result; + OSSafeReleaseNULL(executable); + OSSafeReleaseNULL(iterator); + return result; } /********************************************************************* *********************************************************************/ OSData * OSKext::createMkext2FileEntry( - OSData * mkextData, - OSNumber * offsetNum, - const char * name) -{ - OSData * result = NULL; - MkextEntryRef entryRef; - uint8_t * mkextBuffer = (uint8_t *)mkextData->getBytesNoCopy(); - uint32_t entryOffset = offsetNum->unsigned32BitValue(); - - result = OSData::withCapacity(sizeof(entryRef)); - if (!result) { - goto finish; - } - - entryRef.mkext = (mkext_basic_header *)mkextBuffer; - entryRef.fileinfo = mkextBuffer + entryOffset; - if (!result->appendBytes(&entryRef, sizeof(entryRef))) { - OSSafeReleaseNULL(result); - goto finish; - } + OSData * mkextData, + OSNumber * offsetNum, + const char * name) +{ + OSData * result = NULL; + MkextEntryRef entryRef; + uint8_t * mkextBuffer = (uint8_t *)mkextData->getBytesNoCopy(); + uint32_t entryOffset = offsetNum->unsigned32BitValue(); + + result = OSData::withCapacity(sizeof(entryRef)); + if (!result) { + goto finish; + } + + entryRef.mkext = (mkext_basic_header *)mkextBuffer; + entryRef.fileinfo = mkextBuffer + entryOffset; + if (!result->appendBytes(&entryRef, sizeof(entryRef))) { + OSSafeReleaseNULL(result); + goto finish; + } finish: - if (!result) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Can't create wrapper for mkext file entry '%s' of kext %s.", - name, getIdentifierCString()); - } - return result; + if (!result) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Can't create wrapper for mkext file entry '%s' of kext %s.", + name, getIdentifierCString()); + } + return result; } /********************************************************************* @@ -2812,8 +2815,8 @@ static void * z_alloc(void *, u_int items, u_int size); static void z_free(void *, void *ptr); typedef struct z_mem { - uint32_t alloc_size; - uint8_t data[0]; + uint32_t alloc_size; + uint8_t data[0]; } z_mem; /* @@ -2822,211 +2825,212 @@ typedef struct z_mem { void * z_alloc(void * notused __unused, u_int num_items, u_int size) { - void * result = NULL; - z_mem * zmem = NULL; - - uint64_t total = ((uint64_t)num_items) * ((uint64_t)size); - //Check for overflow due to multiplication - if (total > UINT32_MAX){ - panic("z_alloc(%p, %x, %x): overflow caused by %x * %x\n", - notused, num_items, size, num_items, size); - } - - uint64_t allocSize64 = total + ((uint64_t)sizeof(zmem)); - //Check for overflow due to addition - if (allocSize64 > UINT32_MAX){ - panic("z_alloc(%p, %x, %x): overflow caused by %x + %lx\n", - notused, num_items, size, (uint32_t)total, sizeof(zmem)); - } - uint32_t allocSize = (uint32_t)allocSize64; - - zmem = (z_mem *)kalloc_tag(allocSize, VM_KERN_MEMORY_OSKEXT); - if (!zmem) { - goto finish; - } - zmem->alloc_size = allocSize; - result = (void *)&(zmem->data); + void * result = NULL; + z_mem * zmem = NULL; + + uint64_t total = ((uint64_t)num_items) * ((uint64_t)size); + //Check for overflow due to multiplication + if (total > UINT32_MAX) { + panic("z_alloc(%p, %x, %x): overflow caused by %x * %x\n", + notused, num_items, size, num_items, size); + } + + uint64_t allocSize64 = total + ((uint64_t)sizeof(zmem)); + //Check for overflow due to addition + if (allocSize64 > UINT32_MAX) { + panic("z_alloc(%p, %x, %x): overflow caused by %x + %lx\n", + notused, num_items, size, (uint32_t)total, sizeof(zmem)); + } + uint32_t allocSize = (uint32_t)allocSize64; + + zmem = (z_mem *)kalloc_tag(allocSize, VM_KERN_MEMORY_OSKEXT); + if (!zmem) { + goto finish; + } + zmem->alloc_size = allocSize; + result = (void *)&(zmem->data); finish: - return result; + return result; } void z_free(void * notused __unused, void * ptr) { - uint32_t * skipper = (uint32_t *)ptr - 1; - z_mem * zmem = (z_mem *)skipper; - kfree((void *)zmem, zmem->alloc_size); - return; + uint32_t * skipper = (uint32_t *)ptr - 1; + z_mem * zmem = (z_mem *)skipper; + kfree(zmem, zmem->alloc_size); + return; } }; OSData * OSKext::extractMkext2FileData( - UInt8 * data, - const char * name, - uint32_t compressedSize, - uint32_t fullSize) -{ - OSData * result = NULL; - - OSData * uncompressedData = NULL; // release on error - - uint8_t * uncompressedDataBuffer = 0; // do not free - unsigned long uncompressedSize; - z_stream zstream; - bool zstream_inited = false; - int zlib_result; - - /* If the file isn't compressed, we want to make a copy - * so that we don't have the tie to the larger mkext file buffer any more. - */ - if (!compressedSize) { - uncompressedData = OSData::withBytes(data, fullSize); - // xxx - no check for failure? - result = uncompressedData; - goto finish; - } - - if (KERN_SUCCESS != kmem_alloc(kernel_map, - (vm_offset_t*)&uncompressedDataBuffer, fullSize, VM_KERN_MEMORY_OSKEXT)) { - - /* How's this for cheesy? The kernel is only asked to extract - * kext plists so we tailor the log messages. - */ - if (isKernel()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Allocation failure extracting %s from mkext.", name); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Allocation failure extracting %s from mkext for kext %s.", - name, getIdentifierCString()); - } - - goto finish; - } - uncompressedData = OSData::withBytesNoCopy(uncompressedDataBuffer, fullSize); - if (!uncompressedData) { - if (isKernel()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Allocation failure extracting %s from mkext.", name); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Allocation failure extracting %s from mkext for kext %s.", - name, getIdentifierCString()); - } - goto finish; - } - uncompressedData->setDeallocFunction(&osdata_kmem_free); - - if (isKernel()) { - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogArchiveFlag, - "Kernel extracted %s from mkext - compressed size %d, uncompressed size %d.", - name, compressedSize, fullSize); - } else { - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogArchiveFlag, - "Kext %s extracted %s from mkext - compressed size %d, uncompressed size %d.", - getIdentifierCString(), name, compressedSize, fullSize); - } - - bzero(&zstream, sizeof(zstream)); - zstream.next_in = (UInt8 *)data; - zstream.avail_in = compressedSize; - - zstream.next_out = uncompressedDataBuffer; - zstream.avail_out = fullSize; - - zstream.zalloc = z_alloc; - zstream.zfree = z_free; - - zlib_result = inflateInit(&zstream); - if (Z_OK != zlib_result) { - if (isKernel()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext error; zlib inflateInit failed (%d) for %s.", - zlib_result, name); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Kext %s - mkext error; zlib inflateInit failed (%d) for %s .", - getIdentifierCString(), zlib_result, name); - } - goto finish; - } else { - zstream_inited = true; - } - - zlib_result = inflate(&zstream, Z_FINISH); - - if (zlib_result == Z_STREAM_END || zlib_result == Z_OK) { - uncompressedSize = zstream.total_out; - } else { - if (isKernel()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext error; zlib inflate failed (%d) for %s.", - zlib_result, name); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Kext %s - mkext error; zlib inflate failed (%d) for %s .", - getIdentifierCString(), zlib_result, name); - } - if (zstream.msg) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "zlib error: %s.", zstream.msg); - } - goto finish; - } - - if (uncompressedSize != fullSize) { - if (isKernel()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Mkext error; zlib inflate discrepancy for %s, " - "uncompressed size != original size.", name); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Kext %s - mkext error; zlib inflate discrepancy for %s, " - "uncompressed size != original size.", - getIdentifierCString(), name); - } - goto finish; - } - - result = uncompressedData; + UInt8 * data, + const char * name, + uint32_t compressedSize, + uint32_t fullSize) +{ + OSData * result = NULL; + + OSData * uncompressedData = NULL;// release on error + + uint8_t * uncompressedDataBuffer = 0;// do not free + unsigned long uncompressedSize; + z_stream zstream; + bool zstream_inited = false; + int zlib_result; + + /* If the file isn't compressed, we want to make a copy + * so that we don't have the tie to the larger mkext file buffer any more. + */ + if (!compressedSize) { + uncompressedData = OSData::withBytes(data, fullSize); + // xxx - no check for failure? + result = uncompressedData; + goto finish; + } + + if (KERN_SUCCESS != kmem_alloc(kernel_map, + (vm_offset_t*)&uncompressedDataBuffer, fullSize, VM_KERN_MEMORY_OSKEXT)) { + /* How's this for cheesy? The kernel is only asked to extract + * kext plists so we tailor the log messages. + */ + if (isKernel()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Allocation failure extracting %s from mkext.", name); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Allocation failure extracting %s from mkext for kext %s.", + name, getIdentifierCString()); + } + + goto finish; + } + uncompressedData = OSData::withBytesNoCopy(uncompressedDataBuffer, fullSize); + if (!uncompressedData) { + if (isKernel()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Allocation failure extracting %s from mkext.", name); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Allocation failure extracting %s from mkext for kext %s.", + name, getIdentifierCString()); + } + goto finish; + } + uncompressedData->setDeallocFunction(&osdata_kmem_free); + + if (isKernel()) { + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogArchiveFlag, + "Kernel extracted %s from mkext - compressed size %d, uncompressed size %d.", + name, compressedSize, fullSize); + } else { + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogArchiveFlag, + "Kext %s extracted %s from mkext - compressed size %d, uncompressed size %d.", + getIdentifierCString(), name, compressedSize, fullSize); + } + + bzero(&zstream, sizeof(zstream)); + zstream.next_in = (UInt8 *)data; + zstream.avail_in = compressedSize; + + zstream.next_out = uncompressedDataBuffer; + zstream.avail_out = fullSize; + + zstream.zalloc = z_alloc; + zstream.zfree = z_free; + + zlib_result = inflateInit(&zstream); + if (Z_OK != zlib_result) { + if (isKernel()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext error; zlib inflateInit failed (%d) for %s.", + zlib_result, name); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Kext %s - mkext error; zlib inflateInit failed (%d) for %s .", + getIdentifierCString(), zlib_result, name); + } + goto finish; + } else { + zstream_inited = true; + } + + zlib_result = inflate(&zstream, Z_FINISH); + + if (zlib_result == Z_STREAM_END || zlib_result == Z_OK) { + uncompressedSize = zstream.total_out; + } else { + if (isKernel()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext error; zlib inflate failed (%d) for %s.", + zlib_result, name); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Kext %s - mkext error; zlib inflate failed (%d) for %s .", + getIdentifierCString(), zlib_result, name); + } + if (zstream.msg) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "zlib error: %s.", zstream.msg); + } + goto finish; + } + + if (uncompressedSize != fullSize) { + if (isKernel()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Mkext error; zlib inflate discrepancy for %s, " + "uncompressed size != original size.", name); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Kext %s - mkext error; zlib inflate discrepancy for %s, " + "uncompressed size != original size.", + getIdentifierCString(), name); + } + goto finish; + } + + result = uncompressedData; finish: - /* Don't bother checking return, nothing we can do on fail. - */ - if (zstream_inited) inflateEnd(&zstream); + /* Don't bother checking return, nothing we can do on fail. + */ + if (zstream_inited) { + inflateEnd(&zstream); + } - if (!result) { - OSSafeReleaseNULL(uncompressedData); - } + if (!result) { + OSSafeReleaseNULL(uncompressedData); + } - return result; + return result; } /********************************************************************* @@ -3034,212 +3038,212 @@ finish: /* static */ OSReturn OSKext::loadFromMkext( - OSKextLogSpec clientLogFilter, - char * mkextBuffer, - uint32_t mkextBufferLength, - char ** logInfoOut, - uint32_t * logInfoLengthOut) -{ - OSReturn result = kOSReturnError; - OSReturn tempResult = kOSReturnError; - - OSData * mkextData = NULL; // must release - OSDictionary * mkextPlist = NULL; // must release - - OSArray * logInfoArray = NULL; // must release - OSSerialize * serializer = NULL; // must release - - OSString * predicate = NULL; // do not release - OSDictionary * requestArgs = NULL; // do not release - - OSString * kextIdentifier = NULL; // do not release - OSNumber * startKextExcludeNum = NULL; // do not release - OSNumber * startMatchingExcludeNum = NULL; // do not release - OSBoolean * delayAutounloadBool = NULL; // do not release - OSArray * personalityNames = NULL; // do not release - - /* Default values for these two options: regular autounload behavior, - * load all kexts, send no personalities. - */ - Boolean delayAutounload = false; - OSKextExcludeLevel startKextExcludeLevel = kOSKextExcludeNone; - OSKextExcludeLevel startMatchingExcludeLevel = kOSKextExcludeAll; - - IORecursiveLockLock(sKextLock); - - if (logInfoOut) { - *logInfoOut = NULL; - *logInfoLengthOut = 0; - } - - OSKext::setUserSpaceLogFilter(clientLogFilter, logInfoOut ? true : false); - - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogIPCFlag, - "Received kext load request from user space."); - - /* Regardless of processing, the fact that we have gotten here means some - * user-space program is up and talking to us, so we'll switch our kext - * registration to reflect that. - */ - if (!sUserLoadsActive) { - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag | kOSKextLogLoadFlag, - "Switching to late startup (user-space) kext loading policy."); - - sUserLoadsActive = true; - } - - if (!sLoadEnabled) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext loading is disabled."); - result = kOSKextReturnDisabled; - goto finish; - } - - /* Note that we do not set a dealloc function on this OSData - * object! No references to it can remain after the loadFromMkext() - * call since we are in a MIG function, and will vm_deallocate() - * the buffer. - */ - mkextData = OSData::withBytesNoCopy(mkextBuffer, - mkextBufferLength); - if (!mkextData) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogIPCFlag, - "Failed to create wrapper for kext load request."); - result = kOSKextReturnNoMemory; - goto finish; - } - - result = readMkext2Archive(mkextData, &mkextPlist, NULL); - if (result != kOSReturnSuccess) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Failed to read kext load request."); - goto finish; - } - - predicate = _OSKextGetRequestPredicate(mkextPlist); - if (!predicate || !predicate->isEqualTo(kKextRequestPredicateLoad)) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Received kext load request with no predicate; skipping."); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - requestArgs = OSDynamicCast(OSDictionary, - mkextPlist->getObject(kKextRequestArgumentsKey)); - if (!requestArgs || !requestArgs->getCount()) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Received kext load request with no arguments."); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - kextIdentifier = OSDynamicCast(OSString, - requestArgs->getObject(kKextRequestArgumentBundleIdentifierKey)); - if (!kextIdentifier) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Received kext load request with no kext identifier."); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - startKextExcludeNum = OSDynamicCast(OSNumber, - requestArgs->getObject(kKextRequestArgumentStartExcludeKey)); - startMatchingExcludeNum = OSDynamicCast(OSNumber, - requestArgs->getObject(kKextRequestArgumentStartMatchingExcludeKey)); - delayAutounloadBool = OSDynamicCast(OSBoolean, - requestArgs->getObject(kKextRequestArgumentDelayAutounloadKey)); - personalityNames = OSDynamicCast(OSArray, - requestArgs->getObject(kKextRequestArgumentPersonalityNamesKey)); - - if (delayAutounloadBool) { - delayAutounload = delayAutounloadBool->getValue(); - } - if (startKextExcludeNum) { - startKextExcludeLevel = startKextExcludeNum->unsigned8BitValue(); - } - if (startMatchingExcludeNum) { - startMatchingExcludeLevel = startMatchingExcludeNum->unsigned8BitValue(); - } - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogIPCFlag, - "Received request from user space to load kext %s.", - kextIdentifier->getCStringNoCopy()); - - /* Load the kext, with no deferral, since this is a load from outside - * the kernel. - * xxx - Would like a better way to handle the default values for the - * xxx - start/match opt args. - */ - result = OSKext::loadKextWithIdentifier( - kextIdentifier, - /* allowDefer */ false, - delayAutounload, - startKextExcludeLevel, - startMatchingExcludeLevel, - personalityNames); - if (result != kOSReturnSuccess) { - goto finish; - } - /* If the load came down from kextd, it will shortly inform IOCatalogue - * for matching via a separate IOKit calldown. - */ + OSKextLogSpec clientLogFilter, + char * mkextBuffer, + uint32_t mkextBufferLength, + char ** logInfoOut, + uint32_t * logInfoLengthOut) +{ + OSReturn result = kOSReturnError; + OSReturn tempResult = kOSReturnError; + + OSData * mkextData = NULL;// must release + OSDictionary * mkextPlist = NULL;// must release + + OSArray * logInfoArray = NULL;// must release + OSSerialize * serializer = NULL;// must release + + OSString * predicate = NULL;// do not release + OSDictionary * requestArgs = NULL;// do not release + + OSString * kextIdentifier = NULL;// do not release + OSNumber * startKextExcludeNum = NULL;// do not release + OSNumber * startMatchingExcludeNum = NULL;// do not release + OSBoolean * delayAutounloadBool = NULL;// do not release + OSArray * personalityNames = NULL;// do not release + + /* Default values for these two options: regular autounload behavior, + * load all kexts, send no personalities. + */ + Boolean delayAutounload = false; + OSKextExcludeLevel startKextExcludeLevel = kOSKextExcludeNone; + OSKextExcludeLevel startMatchingExcludeLevel = kOSKextExcludeAll; + + IORecursiveLockLock(sKextLock); + + if (logInfoOut) { + *logInfoOut = NULL; + *logInfoLengthOut = 0; + } -finish: + OSKext::setUserSpaceLogFilter(clientLogFilter, logInfoOut ? true : false); + + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Received kext load request from user space."); + + /* Regardless of processing, the fact that we have gotten here means some + * user-space program is up and talking to us, so we'll switch our kext + * registration to reflect that. + */ + if (!sUserLoadsActive) { + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag | kOSKextLogLoadFlag, + "Switching to late startup (user-space) kext loading policy."); + + sUserLoadsActive = true; + } + + if (!sLoadEnabled) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext loading is disabled."); + result = kOSKextReturnDisabled; + goto finish; + } + + /* Note that we do not set a dealloc function on this OSData + * object! No references to it can remain after the loadFromMkext() + * call since we are in a MIG function, and will vm_deallocate() + * the buffer. + */ + mkextData = OSData::withBytesNoCopy(mkextBuffer, + mkextBufferLength); + if (!mkextData) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogIPCFlag, + "Failed to create wrapper for kext load request."); + result = kOSKextReturnNoMemory; + goto finish; + } + + result = readMkext2Archive(mkextData, &mkextPlist, NULL); + if (result != kOSReturnSuccess) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to read kext load request."); + goto finish; + } - /* Gather up the collected log messages for user space. Any - * error messages past this call will not make it up as log messages - * but will be in the system log. - */ - logInfoArray = OSKext::clearUserSpaceLogFilter(); + predicate = _OSKextGetRequestPredicate(mkextPlist); + if (!predicate || !predicate->isEqualTo(kKextRequestPredicateLoad)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Received kext load request with no predicate; skipping."); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + requestArgs = OSDynamicCast(OSDictionary, + mkextPlist->getObject(kKextRequestArgumentsKey)); + if (!requestArgs || !requestArgs->getCount()) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Received kext load request with no arguments."); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + kextIdentifier = OSDynamicCast(OSString, + requestArgs->getObject(kKextRequestArgumentBundleIdentifierKey)); + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Received kext load request with no kext identifier."); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + startKextExcludeNum = OSDynamicCast(OSNumber, + requestArgs->getObject(kKextRequestArgumentStartExcludeKey)); + startMatchingExcludeNum = OSDynamicCast(OSNumber, + requestArgs->getObject(kKextRequestArgumentStartMatchingExcludeKey)); + delayAutounloadBool = OSDynamicCast(OSBoolean, + requestArgs->getObject(kKextRequestArgumentDelayAutounloadKey)); + personalityNames = OSDynamicCast(OSArray, + requestArgs->getObject(kKextRequestArgumentPersonalityNamesKey)); + + if (delayAutounloadBool) { + delayAutounload = delayAutounloadBool->getValue(); + } + if (startKextExcludeNum) { + startKextExcludeLevel = startKextExcludeNum->unsigned8BitValue(); + } + if (startMatchingExcludeNum) { + startMatchingExcludeLevel = startMatchingExcludeNum->unsigned8BitValue(); + } + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogIPCFlag, + "Received request from user space to load kext %s.", + kextIdentifier->getCStringNoCopy()); + + /* Load the kext, with no deferral, since this is a load from outside + * the kernel. + * xxx - Would like a better way to handle the default values for the + * xxx - start/match opt args. + */ + result = OSKext::loadKextWithIdentifier( + kextIdentifier, + /* allowDefer */ false, + delayAutounload, + startKextExcludeLevel, + startMatchingExcludeLevel, + personalityNames); + if (result != kOSReturnSuccess) { + goto finish; + } + /* If the load came down from kextd, it will shortly inform IOCatalogue + * for matching via a separate IOKit calldown. + */ - if (logInfoArray && logInfoOut && logInfoLengthOut) { - tempResult = OSKext::serializeLogInfo(logInfoArray, - logInfoOut, logInfoLengthOut); - if (tempResult != kOSReturnSuccess) { - result = tempResult; - } - } +finish: - OSKext::flushNonloadedKexts(/* flushPrelinkedKexts */ false); + /* Gather up the collected log messages for user space. Any + * error messages past this call will not make it up as log messages + * but will be in the system log. + */ + logInfoArray = OSKext::clearUserSpaceLogFilter(); + + if (logInfoArray && logInfoOut && logInfoLengthOut) { + tempResult = OSKext::serializeLogInfo(logInfoArray, + logInfoOut, logInfoLengthOut); + if (tempResult != kOSReturnSuccess) { + result = tempResult; + } + } - /* Note: mkextDataObject will have been retained by every kext w/an - * executable in it. That should all have been flushed out at the - * and of the load operation, but you never know.... - */ - if (mkextData && mkextData->getRetainCount() > 1) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogIPCFlag, - "Kext load request buffer from user space still retained by a kext; " - "probable memory leak."); - } + OSKext::flushNonloadedKexts(/* flushPrelinkedKexts */ false); + + /* Note: mkextDataObject will have been retained by every kext w/an + * executable in it. That should all have been flushed out at the + * and of the load operation, but you never know.... + */ + if (mkextData && mkextData->getRetainCount() > 1) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogIPCFlag, + "Kext load request buffer from user space still retained by a kext; " + "probable memory leak."); + } - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(mkextData); - OSSafeReleaseNULL(mkextPlist); - OSSafeReleaseNULL(serializer); - OSSafeReleaseNULL(logInfoArray); + OSSafeReleaseNULL(mkextData); + OSSafeReleaseNULL(mkextPlist); + OSSafeReleaseNULL(serializer); + OSSafeReleaseNULL(logInfoArray); - return result; + return result; } /********************************************************************* @@ -3247,70 +3251,70 @@ finish: /* static */ OSReturn OSKext::serializeLogInfo( - OSArray * logInfoArray, - char ** logInfoOut, - uint32_t * logInfoLengthOut) -{ - OSReturn result = kOSReturnError; - char * buffer = NULL; - kern_return_t kmem_result = KERN_FAILURE; - OSSerialize * serializer = NULL; // must release; reused - char * logInfo = NULL; // returned by reference - uint32_t logInfoLength = 0; - - if (!logInfoArray || !logInfoOut || !logInfoLengthOut) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Internal error; invalid arguments to OSKext::serializeLogInfo()."); - /* Bad programmer. */ - result = kOSKextReturnInvalidArgument; - goto finish; - } - - serializer = OSSerialize::withCapacity(0); - if (!serializer) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Failed to create serializer on log info for request from user space."); - /* Incidental error; we're going to (try to) allow the request - * itself to succeed. */ - } - - if (!logInfoArray->serialize(serializer)) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Failed to serialize log info for request from user space."); - /* Incidental error; we're going to (try to) allow the request - * itself to succeed. */ - } else { - logInfo = serializer->text(); - logInfoLength = serializer->getLength(); - - kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer, round_page(logInfoLength), VM_KERN_MEMORY_OSKEXT); - if (kmem_result != KERN_SUCCESS) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Failed to copy log info for request from user space."); - /* Incidental error; we're going to (try to) allow the request - * to succeed. */ - } else { - /* 11981737 - clear uninitialized data in last page */ - bzero((void *)(buffer + logInfoLength), - (round_page(logInfoLength) - logInfoLength)); - memcpy(buffer, logInfo, logInfoLength); - *logInfoOut = buffer; - *logInfoLengthOut = logInfoLength; - } - } - - result = kOSReturnSuccess; + OSArray * logInfoArray, + char ** logInfoOut, + uint32_t * logInfoLengthOut) +{ + OSReturn result = kOSReturnError; + char * buffer = NULL; + kern_return_t kmem_result = KERN_FAILURE; + OSSerialize * serializer = NULL;// must release; reused + char * logInfo = NULL;// returned by reference + uint32_t logInfoLength = 0; + + if (!logInfoArray || !logInfoOut || !logInfoLengthOut) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Internal error; invalid arguments to OSKext::serializeLogInfo()."); + /* Bad programmer. */ + result = kOSKextReturnInvalidArgument; + goto finish; + } + + serializer = OSSerialize::withCapacity(0); + if (!serializer) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Failed to create serializer on log info for request from user space."); + /* Incidental error; we're going to (try to) allow the request + * itself to succeed. */ + } + + if (!logInfoArray->serialize(serializer)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Failed to serialize log info for request from user space."); + /* Incidental error; we're going to (try to) allow the request + * itself to succeed. */ + } else { + logInfo = serializer->text(); + logInfoLength = serializer->getLength(); + + kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer, round_page(logInfoLength), VM_KERN_MEMORY_OSKEXT); + if (kmem_result != KERN_SUCCESS) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Failed to copy log info for request from user space."); + /* Incidental error; we're going to (try to) allow the request + * to succeed. */ + } else { + /* 11981737 - clear uninitialized data in last page */ + bzero((void *)(buffer + logInfoLength), + (round_page(logInfoLength) - logInfoLength)); + memcpy(buffer, logInfo, logInfoLength); + *logInfoOut = buffer; + *logInfoLengthOut = logInfoLength; + } + } + + result = kOSReturnSuccess; finish: - OSSafeReleaseNULL(serializer); - return result; + OSSafeReleaseNULL(serializer); + return result; } #if PRAGMA_MARK @@ -3321,16 +3325,16 @@ finish: OSKext * OSKext::lookupKextWithIdentifier(const char * kextIdentifier) { - OSKext * foundKext = NULL; + OSKext * foundKext = NULL; - IORecursiveLockLock(sKextLock); - foundKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); - if (foundKext) { - foundKext->retain(); - } - IORecursiveLockUnlock(sKextLock); + IORecursiveLockLock(sKextLock); + foundKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); + if (foundKext) { + foundKext->retain(); + } + IORecursiveLockUnlock(sKextLock); - return foundKext; + return foundKext; } /********************************************************************* @@ -3338,7 +3342,7 @@ OSKext::lookupKextWithIdentifier(const char * kextIdentifier) OSKext * OSKext::lookupKextWithIdentifier(OSString * kextIdentifier) { - return OSKext::lookupKextWithIdentifier(kextIdentifier->getCStringNoCopy()); + return OSKext::lookupKextWithIdentifier(kextIdentifier->getCStringNoCopy()); } /********************************************************************* @@ -3346,25 +3350,25 @@ OSKext::lookupKextWithIdentifier(OSString * kextIdentifier) OSKext * OSKext::lookupKextWithLoadTag(uint32_t aTag) { - OSKext * foundKext = NULL; // returned - uint32_t count, i; - - IORecursiveLockLock(sKextLock); - - count = sLoadedKexts->getCount(); - for (i = 0; i < count; i++) { - OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (thisKext->getLoadTag() == aTag) { - foundKext = thisKext; - foundKext->retain(); - goto finish; - } - } - + OSKext * foundKext = NULL; // returned + uint32_t count, i; + + IORecursiveLockLock(sKextLock); + + count = sLoadedKexts->getCount(); + for (i = 0; i < count; i++) { + OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (thisKext->getLoadTag() == aTag) { + foundKext = thisKext; + foundKext->retain(); + goto finish; + } + } + finish: - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - return foundKext; + return foundKext; } /********************************************************************* @@ -3372,41 +3376,41 @@ finish: OSKext * OSKext::lookupKextWithAddress(vm_address_t address) { - OSKext * foundKext = NULL; // returned - uint32_t count, i; - - IORecursiveLockLock(sKextLock); - - count = sLoadedKexts->getCount(); - for (i = 0; i < count; i++) { - OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (thisKext->linkedExecutable) { - vm_address_t kext_start = - (vm_address_t)thisKext->linkedExecutable->getBytesNoCopy(); - vm_address_t kext_end = kext_start + - thisKext->linkedExecutable->getLength(); - if ((kext_start <= address) && (address < kext_end)) { - foundKext = thisKext; - foundKext->retain(); - goto finish; - } - } - } - + OSKext * foundKext = NULL; // returned + uint32_t count, i; + + IORecursiveLockLock(sKextLock); + + count = sLoadedKexts->getCount(); + for (i = 0; i < count; i++) { + OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (thisKext->linkedExecutable) { + vm_address_t kext_start = + (vm_address_t)thisKext->linkedExecutable->getBytesNoCopy(); + vm_address_t kext_end = kext_start + + thisKext->linkedExecutable->getLength(); + if ((kext_start <= address) && (address < kext_end)) { + foundKext = thisKext; + foundKext->retain(); + goto finish; + } + } + } + finish: - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - return foundKext; + return foundKext; } OSData * OSKext::copyKextUUIDForAddress(OSNumber *address) { OSData * uuid = NULL; - OSKextActiveAccount * active; - OSKext * kext = NULL; - uint32_t baseIdx; - uint32_t lim; + OSKextActiveAccount * active; + OSKext * kext = NULL; + uint32_t baseIdx; + uint32_t lim; if (!address) { return NULL; @@ -3426,44 +3430,39 @@ OSKext::copyKextUUIDForAddress(OSNumber *address) if (macCheckResult != 0) { OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "Failed to query kext UUID (MAC policy error 0x%x).", - macCheckResult); + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Failed to query kext UUID (MAC policy error 0x%x).", + macCheckResult); return NULL; } } #endif - IOSimpleLockLock(sKextAccountsLock); - // bsearch sKextAccounts list - for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) - { - active = &sKextAccounts[baseIdx + (lim >> 1)]; - if ((addr >= active->address) && (addr < active->address_end)) - { - kext = active->account->kext; - if (kext) kext->retain(); - break; - } - else if (addr > active->address) - { - // move right - baseIdx += (lim >> 1) + 1; - lim--; - } - // else move left - } - IOSimpleLockUnlock(sKextAccountsLock); - - if (kext) - { - uuid = kext->copyTextUUID(); - kext->release(); - } - else if (((vm_offset_t)addr >= vm_kernel_stext) && ((vm_offset_t)addr < vm_kernel_etext)) - { - uuid = sKernelKext->copyTextUUID(); - } + IOSimpleLockLock(sKextAccountsLock); + // bsearch sKextAccounts list + for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) { + active = &sKextAccounts[baseIdx + (lim >> 1)]; + if ((addr >= active->address) && (addr < active->address_end)) { + kext = active->account->kext; + if (kext) { + kext->retain(); + } + break; + } else if (addr > active->address) { + // move right + baseIdx += (lim >> 1) + 1; + lim--; + } + // else move left + } + IOSimpleLockUnlock(sKextAccountsLock); + + if (kext) { + uuid = kext->copyTextUUID(); + kext->release(); + } else if (((vm_offset_t)addr >= vm_kernel_stext) && ((vm_offset_t)addr < vm_kernel_etext)) { + uuid = sKernelKext->copyTextUUID(); + } return uuid; } @@ -3473,42 +3472,41 @@ OSKext::copyKextUUIDForAddress(OSNumber *address) OSKext * OSKext::lookupKextWithUUID(uuid_t wanted) { - OSKext * foundKext = NULL; // returned - uint32_t count, i; + OSKext * foundKext = NULL; // returned + uint32_t count, i; - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); - count = sLoadedKexts->getCount(); + count = sLoadedKexts->getCount(); - for (i = 0; i < count; i++) { - OSKext * thisKext = NULL; + for (i = 0; i < count; i++) { + OSKext * thisKext = NULL; - thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (!thisKext) { - continue; - } - - OSData *uuid_data = thisKext->copyUUID(); - if (!uuid_data) { - continue; - } + thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (!thisKext) { + continue; + } - uuid_t uuid; - memcpy(&uuid, uuid_data->getBytesNoCopy(), sizeof(uuid)); - uuid_data->release(); + OSData *uuid_data = thisKext->copyUUID(); + if (!uuid_data) { + continue; + } - if (0 == uuid_compare(wanted, uuid)) { - foundKext = thisKext; - foundKext->retain(); - goto finish; - } + uuid_t uuid; + memcpy(&uuid, uuid_data->getBytesNoCopy(), sizeof(uuid)); + uuid_data->release(); - } + if (0 == uuid_compare(wanted, uuid)) { + foundKext = thisKext; + foundKext->retain(); + goto finish; + } + } finish: - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - return foundKext; + return foundKext; } @@ -3517,21 +3515,22 @@ finish: /********************************************************************* *********************************************************************/ /* static */ -bool OSKext::isKextWithIdentifierLoaded(const char * kextIdentifier) +bool +OSKext::isKextWithIdentifierLoaded(const char * kextIdentifier) { - bool result = false; - OSKext * foundKext = NULL; // returned + bool result = false; + OSKext * foundKext = NULL; // returned - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); + + foundKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); + if (foundKext && foundKext->isLoaded()) { + result = true; + } - foundKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); - if (foundKext && foundKext->isLoaded()) { - result = true; - } + IORecursiveLockUnlock(sKextLock); - IORecursiveLockUnlock(sKextLock); - - return result; + return result; } /********************************************************************* @@ -3541,150 +3540,150 @@ bool OSKext::isKextWithIdentifierLoaded(const char * kextIdentifier) /* static */ OSReturn OSKext::removeKext( - OSKext * aKext, + OSKext * aKext, #if CONFIG_EMBEDDED - __unused + __unused #endif - bool terminateServicesAndRemovePersonalitiesFlag) - { + bool terminateServicesAndRemovePersonalitiesFlag) +{ #if CONFIG_EMBEDDED - OSKextLog(aKext, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "removeKext() called for %s, not supported on embedded", - aKext->getIdentifier() ? aKext->getIdentifierCString() : "unknown kext"); + OSKextLog(aKext, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "removeKext() called for %s, not supported on embedded", + aKext->getIdentifier() ? aKext->getIdentifierCString() : "unknown kext"); - return kOSReturnSuccess; + return kOSReturnSuccess; #else /* CONFIG_EMBEDDED */ - OSReturn result = kOSKextReturnInUse; - OSKext * checkKext = NULL; // do not release + OSReturn result = kOSKextReturnInUse; + OSKext * checkKext = NULL; // do not release #if CONFIG_MACF - int macCheckResult = 0; - kauth_cred_t cred = NULL; + int macCheckResult = 0; + kauth_cred_t cred = NULL; #endif - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); - /* If the kext has no identifier, it failed to init - * so isn't in sKextsByID and it isn't loaded. - */ - if (!aKext->getIdentifier()) { - result = kOSReturnSuccess; - goto finish; - } + /* If the kext has no identifier, it failed to init + * so isn't in sKextsByID and it isn't loaded. + */ + if (!aKext->getIdentifier()) { + result = kOSReturnSuccess; + goto finish; + } - checkKext = OSDynamicCast(OSKext, - sKextsByID->getObject(aKext->getIdentifier())); - if (checkKext != aKext) { - result = kOSKextReturnNotFound; - goto finish; - } + checkKext = OSDynamicCast(OSKext, + sKextsByID->getObject(aKext->getIdentifier())); + if (checkKext != aKext) { + result = kOSKextReturnNotFound; + goto finish; + } - if (aKext->isLoaded()) { + if (aKext->isLoaded()) { #if CONFIG_MACF - if (current_task() != kernel_task) { - cred = kauth_cred_get_with_ref(); - macCheckResult = mac_kext_check_unload(cred, aKext->getIdentifierCString()); - kauth_cred_unref(&cred); - } - - if (macCheckResult != 0) { - result = kOSReturnError; - OSKextLog(aKext, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "Failed to remove kext %s (MAC policy error 0x%x).", - aKext->getIdentifierCString(), macCheckResult); - goto finish; - } + if (current_task() != kernel_task) { + cred = kauth_cred_get_with_ref(); + macCheckResult = mac_kext_check_unload(cred, aKext->getIdentifierCString()); + kauth_cred_unref(&cred); + } + + if (macCheckResult != 0) { + result = kOSReturnError; + OSKextLog(aKext, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "Failed to remove kext %s (MAC policy error 0x%x).", + aKext->getIdentifierCString(), macCheckResult); + goto finish; + } #endif - /* make sure there are no resource requests in flight - 17187548 */ - if (aKext->countRequestCallbacks()) { - goto finish; - } - - /* If we are terminating, send the request to the IOCatalogue - * (which will actually call us right back but that's ok we have - * a recursive lock don't you know) but do not ask the IOCatalogue - * to call back with an unload, we'll do that right here. - */ - if (terminateServicesAndRemovePersonalitiesFlag) { - result = gIOCatalogue->terminateDriversForModule( - aKext->getIdentifierCString(), /* unload */ false); - if (result != kOSReturnSuccess) { - OSKextLog(aKext, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "Can't remove kext %s; services failed to terminate - 0x%x.", - aKext->getIdentifierCString(), result); - goto finish; - } - } - - result = aKext->unload(); - if (result != kOSReturnSuccess) { - goto finish; - } - } - - /* Remove personalities as requested. This is a bit redundant for a loaded - * kext as IOCatalogue::terminateDriversForModule() removes driver - * personalities, but it doesn't restart matching, which we always want - * coming from here, and OSKext::removePersonalitiesFromCatalog() ensures - * that happens. - */ - if (terminateServicesAndRemovePersonalitiesFlag) { - aKext->removePersonalitiesFromCatalog(); - } - - OSKextLog(aKext, - kOSKextLogProgressLevel | - kOSKextLogKextBookkeepingFlag, - "Removing kext %s.", - aKext->getIdentifierCString()); - - sKextsByID->removeObject(aKext->getIdentifier()); - result = kOSReturnSuccess; + /* make sure there are no resource requests in flight - 17187548 */ + if (aKext->countRequestCallbacks()) { + goto finish; + } + + /* If we are terminating, send the request to the IOCatalogue + * (which will actually call us right back but that's ok we have + * a recursive lock don't you know) but do not ask the IOCatalogue + * to call back with an unload, we'll do that right here. + */ + if (terminateServicesAndRemovePersonalitiesFlag) { + result = gIOCatalogue->terminateDriversForModule( + aKext->getIdentifierCString(), /* unload */ false); + if (result != kOSReturnSuccess) { + OSKextLog(aKext, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "Can't remove kext %s; services failed to terminate - 0x%x.", + aKext->getIdentifierCString(), result); + goto finish; + } + } + + result = aKext->unload(); + if (result != kOSReturnSuccess) { + goto finish; + } + } + + /* Remove personalities as requested. This is a bit redundant for a loaded + * kext as IOCatalogue::terminateDriversForModule() removes driver + * personalities, but it doesn't restart matching, which we always want + * coming from here, and OSKext::removePersonalitiesFromCatalog() ensures + * that happens. + */ + if (terminateServicesAndRemovePersonalitiesFlag) { + aKext->removePersonalitiesFromCatalog(); + } + + OSKextLog(aKext, + kOSKextLogProgressLevel | + kOSKextLogKextBookkeepingFlag, + "Removing kext %s.", + aKext->getIdentifierCString()); + + sKextsByID->removeObject(aKext->getIdentifier()); + result = kOSReturnSuccess; finish: - IORecursiveLockUnlock(sKextLock); - return result; + IORecursiveLockUnlock(sKextLock); + return result; #endif /* CONFIG_EMBEDDED */ - } +} /********************************************************************* *********************************************************************/ /* static */ OSReturn OSKext::removeKextWithIdentifier( - const char * kextIdentifier, - bool terminateServicesAndRemovePersonalitiesFlag) -{ - OSReturn result = kOSReturnError; - - IORecursiveLockLock(sKextLock); - - OSKext * aKext = OSDynamicCast(OSKext, - sKextsByID->getObject(kextIdentifier)); - if (!aKext) { - result = kOSKextReturnNotFound; - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "Can't remove kext %s - not found.", - kextIdentifier); - goto finish; - } - - result = OSKext::removeKext(aKext, - terminateServicesAndRemovePersonalitiesFlag); + const char * kextIdentifier, + bool terminateServicesAndRemovePersonalitiesFlag) +{ + OSReturn result = kOSReturnError; + + IORecursiveLockLock(sKextLock); + + OSKext * aKext = OSDynamicCast(OSKext, + sKextsByID->getObject(kextIdentifier)); + if (!aKext) { + result = kOSKextReturnNotFound; + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "Can't remove kext %s - not found.", + kextIdentifier); + goto finish; + } + + result = OSKext::removeKext(aKext, + terminateServicesAndRemovePersonalitiesFlag); finish: - IORecursiveLockUnlock(sKextLock); - - return result; + IORecursiveLockUnlock(sKextLock); + + return result; } /********************************************************************* @@ -3692,240 +3691,236 @@ finish: /* static */ OSReturn OSKext::removeKextWithLoadTag( - OSKextLoadTag loadTag, - bool terminateServicesAndRemovePersonalitiesFlag) -{ - OSReturn result = kOSReturnError; - OSKext * foundKext = NULL; - uint32_t count, i; - - IORecursiveLockLock(sKextLock); - - count = sLoadedKexts->getCount(); - for (i = 0; i < count; i++) { - OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (thisKext->loadTag == loadTag) { - foundKext = thisKext; - break; - } - } - - if (!foundKext) { - result = kOSKextReturnNotFound; - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, - "Can't remove kext with load tag %d - not found.", - loadTag); - goto finish; - } - - result = OSKext::removeKext(foundKext, - terminateServicesAndRemovePersonalitiesFlag); + OSKextLoadTag loadTag, + bool terminateServicesAndRemovePersonalitiesFlag) +{ + OSReturn result = kOSReturnError; + OSKext * foundKext = NULL; + uint32_t count, i; + + IORecursiveLockLock(sKextLock); + + count = sLoadedKexts->getCount(); + for (i = 0; i < count; i++) { + OSKext * thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (thisKext->loadTag == loadTag) { + foundKext = thisKext; + break; + } + } + + if (!foundKext) { + result = kOSKextReturnNotFound; + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Can't remove kext with load tag %d - not found.", + loadTag); + goto finish; + } + + result = OSKext::removeKext(foundKext, + terminateServicesAndRemovePersonalitiesFlag); finish: - IORecursiveLockUnlock(sKextLock); - - return result; - } + IORecursiveLockUnlock(sKextLock); + + return result; +} /********************************************************************* *********************************************************************/ OSDictionary * OSKext::copyKexts(void) { - OSDictionary * result; + OSDictionary * result; - IORecursiveLockLock(sKextLock); - result = OSDynamicCast(OSDictionary, sKextsByID->copyCollection()); - IORecursiveLockUnlock(sKextLock); + IORecursiveLockLock(sKextLock); + result = OSDynamicCast(OSDictionary, sKextsByID->copyCollection()); + IORecursiveLockUnlock(sKextLock); - return result; + return result; } /********************************************************************* - *********************************************************************/ +*********************************************************************/ #define BOOTER_KEXT_PREFIX "Driver-" typedef struct _DeviceTreeBuffer { - uint32_t paddr; - uint32_t length; + uint32_t paddr; + uint32_t length; } _DeviceTreeBuffer; /********************************************************************* - * Create a dictionary of excluded kexts from the given booter data. - *********************************************************************/ +* Create a dictionary of excluded kexts from the given booter data. +*********************************************************************/ /* static */ void OSKext::createExcludeListFromBooterData( - OSDictionary * theDictionary, - OSCollectionIterator * theIterator ) -{ - OSString * deviceTreeName = NULL; // do not release - const _DeviceTreeBuffer * deviceTreeBuffer = NULL; // do not release - char * booterDataPtr = NULL; // do not release - _BooterKextFileInfo * kextFileInfo = NULL; // do not release - char * infoDictAddr = NULL; // do not release - OSObject * parsedXML = NULL; // must release - OSDictionary * theInfoDict = NULL; // do not release - - theIterator->reset(); - - /* look for AppleKextExcludeList.kext */ - while ( (deviceTreeName = - OSDynamicCast(OSString, theIterator->getNextObject())) ) { - - const char * devTreeNameCString; - OSData * deviceTreeEntry; - OSString * myBundleID; // do not release - - OSSafeReleaseNULL(parsedXML); - - deviceTreeEntry = - OSDynamicCast(OSData, theDictionary->getObject(deviceTreeName)); - if (!deviceTreeEntry) { - continue; - } - - /* Make sure it is a kext */ - devTreeNameCString = deviceTreeName->getCStringNoCopy(); - if (strncmp(devTreeNameCString, BOOTER_KEXT_PREFIX, - (sizeof(BOOTER_KEXT_PREFIX) - 1)) != 0) { - OSKextLog(NULL, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "\"%s\" not a kext", - devTreeNameCString); - continue; - } - - deviceTreeBuffer = (const _DeviceTreeBuffer *) - deviceTreeEntry->getBytesNoCopy(0, sizeof(deviceTreeBuffer)); - if (!deviceTreeBuffer) { - continue; - } - - booterDataPtr = (char *)ml_static_ptovirt(deviceTreeBuffer->paddr); - if (!booterDataPtr) { - continue; - } - - kextFileInfo = (_BooterKextFileInfo *) booterDataPtr; - if (!kextFileInfo->infoDictPhysAddr || - !kextFileInfo->infoDictLength) { - continue; - } - - infoDictAddr = (char *) - ml_static_ptovirt(kextFileInfo->infoDictPhysAddr); - if (!infoDictAddr) { - continue; - } - - parsedXML = OSUnserializeXML(infoDictAddr); - if (!parsedXML) { - continue; - } - - theInfoDict = OSDynamicCast(OSDictionary, parsedXML); - if (!theInfoDict) { - continue; - } - - myBundleID = - OSDynamicCast(OSString, - theInfoDict->getObject(kCFBundleIdentifierKey)); - if ( myBundleID && - strcmp( myBundleID->getCStringNoCopy(), "com.apple.driver.KextExcludeList" ) == 0 ) { - - boolean_t updated = updateExcludeList(theInfoDict); - if (!updated) { - /* 25322874 */ - panic("Missing OSKextExcludeList dictionary\n"); - } - break; - } - - } // while ( (deviceTreeName = ...) ) - - OSSafeReleaseNULL(parsedXML); - return; -} - -/********************************************************************* - * Create a dictionary of excluded kexts from the given prelink - * info (kernelcache). - *********************************************************************/ + OSDictionary * theDictionary, + OSCollectionIterator * theIterator ) +{ + OSString * deviceTreeName = NULL;// do not release + const _DeviceTreeBuffer * deviceTreeBuffer = NULL;// do not release + char * booterDataPtr = NULL;// do not release + _BooterKextFileInfo * kextFileInfo = NULL;// do not release + char * infoDictAddr = NULL;// do not release + OSObject * parsedXML = NULL;// must release + OSDictionary * theInfoDict = NULL;// do not release + + theIterator->reset(); + + /* look for AppleKextExcludeList.kext */ + while ((deviceTreeName = + OSDynamicCast(OSString, theIterator->getNextObject()))) { + const char * devTreeNameCString; + OSData * deviceTreeEntry; + OSString * myBundleID;// do not release + + OSSafeReleaseNULL(parsedXML); + + deviceTreeEntry = + OSDynamicCast(OSData, theDictionary->getObject(deviceTreeName)); + if (!deviceTreeEntry) { + continue; + } + + /* Make sure it is a kext */ + devTreeNameCString = deviceTreeName->getCStringNoCopy(); + if (strncmp(devTreeNameCString, BOOTER_KEXT_PREFIX, + (sizeof(BOOTER_KEXT_PREFIX) - 1)) != 0) { + OSKextLog(NULL, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "\"%s\" not a kext", + devTreeNameCString); + continue; + } + + deviceTreeBuffer = (const _DeviceTreeBuffer *) + deviceTreeEntry->getBytesNoCopy(0, sizeof(deviceTreeBuffer)); + if (!deviceTreeBuffer) { + continue; + } + + booterDataPtr = (char *)ml_static_ptovirt(deviceTreeBuffer->paddr); + if (!booterDataPtr) { + continue; + } + + kextFileInfo = (_BooterKextFileInfo *) booterDataPtr; + if (!kextFileInfo->infoDictPhysAddr || + !kextFileInfo->infoDictLength) { + continue; + } + + infoDictAddr = (char *) + ml_static_ptovirt(kextFileInfo->infoDictPhysAddr); + if (!infoDictAddr) { + continue; + } + + parsedXML = OSUnserializeXML(infoDictAddr); + if (!parsedXML) { + continue; + } + + theInfoDict = OSDynamicCast(OSDictionary, parsedXML); + if (!theInfoDict) { + continue; + } + + myBundleID = + OSDynamicCast(OSString, + theInfoDict->getObject(kCFBundleIdentifierKey)); + if (myBundleID && + strcmp( myBundleID->getCStringNoCopy(), "com.apple.driver.KextExcludeList" ) == 0) { + boolean_t updated = updateExcludeList(theInfoDict); + if (!updated) { + /* 25322874 */ + panic("Missing OSKextExcludeList dictionary\n"); + } + break; + } + } // while ( (deviceTreeName = ...) ) + + OSSafeReleaseNULL(parsedXML); + return; +} + +/********************************************************************* +* Create a dictionary of excluded kexts from the given prelink +* info (kernelcache). +*********************************************************************/ /* static */ void OSKext::createExcludeListFromPrelinkInfo( OSArray * theInfoArray ) { - OSDictionary * myInfoDict = NULL; // do not release - OSString * myBundleID; // do not release - u_int i; - - /* Find com.apple.driver.KextExcludeList. */ - for (i = 0; i < theInfoArray->getCount(); i++) { - myInfoDict = OSDynamicCast(OSDictionary, theInfoArray->getObject(i)); - if (!myInfoDict) { - continue; - } - myBundleID = - OSDynamicCast(OSString, - myInfoDict->getObject(kCFBundleIdentifierKey)); - if ( myBundleID && - strcmp( myBundleID->getCStringNoCopy(), "com.apple.driver.KextExcludeList" ) == 0 ) { - - boolean_t updated = updateExcludeList(myInfoDict); - if (!updated) { - /* 25322874 */ - panic("Missing OSKextExcludeList dictionary\n"); - } - break; - } - } // for (i = 0; i < theInfoArray->getCount()... - - return; + OSDictionary * myInfoDict = NULL;// do not release + OSString * myBundleID; // do not release + u_int i; + + /* Find com.apple.driver.KextExcludeList. */ + for (i = 0; i < theInfoArray->getCount(); i++) { + myInfoDict = OSDynamicCast(OSDictionary, theInfoArray->getObject(i)); + if (!myInfoDict) { + continue; + } + myBundleID = + OSDynamicCast(OSString, + myInfoDict->getObject(kCFBundleIdentifierKey)); + if (myBundleID && + strcmp( myBundleID->getCStringNoCopy(), "com.apple.driver.KextExcludeList" ) == 0) { + boolean_t updated = updateExcludeList(myInfoDict); + if (!updated) { + /* 25322874 */ + panic("Missing OSKextExcludeList dictionary\n"); + } + break; + } + } // for (i = 0; i < theInfoArray->getCount()... + + return; } /* static */ boolean_t OSKext::updateExcludeList(OSDictionary *infoDict) { - OSDictionary *myTempDict = NULL; // do not free - OSString *myTempString = NULL; // do not free - OSKextVersion newVersion = 0; - boolean_t updated = false; + OSDictionary *myTempDict = NULL; // do not free + OSString *myTempString = NULL;// do not free + OSKextVersion newVersion = 0; + boolean_t updated = false; - if (!infoDict) { - return false; - } + if (!infoDict) { + return false; + } - myTempDict = OSDynamicCast(OSDictionary, infoDict->getObject("OSKextExcludeList")); - if (!myTempDict) { - return false; - } + myTempDict = OSDynamicCast(OSDictionary, infoDict->getObject("OSKextExcludeList")); + if (!myTempDict) { + return false; + } - myTempString = OSDynamicCast(OSString, infoDict->getObject(kCFBundleVersionKey)); - if (!myTempString) { - return false; - } + myTempString = OSDynamicCast(OSString, infoDict->getObject(kCFBundleVersionKey)); + if (!myTempString) { + return false; + } - newVersion = OSKextParseVersionString(myTempString->getCStringNoCopy()); - if (newVersion == 0) { - return false; - } + newVersion = OSKextParseVersionString(myTempString->getCStringNoCopy()); + if (newVersion == 0) { + return false; + } - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); - if (newVersion > sExcludeListVersion) { - OSSafeReleaseNULL(sExcludeListByID); - sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0); - sExcludeListVersion = newVersion; - updated = true; - } + if (newVersion > sExcludeListVersion) { + OSSafeReleaseNULL(sExcludeListByID); + sExcludeListByID = OSDictionary::withDictionary(myTempDict, 0); + sExcludeListVersion = newVersion; + updated = true; + } - IORecursiveLockUnlock(sKextLock); - return updated; + IORecursiveLockUnlock(sKextLock); + return updated; } #if PRAGMA_MARK @@ -3936,7 +3931,7 @@ OSKext::updateExcludeList(OSDictionary *infoDict) const OSSymbol * OSKext::getIdentifier(void) { - return bundleID; + return bundleID; } /********************************************************************* @@ -3946,7 +3941,7 @@ OSKext::getIdentifier(void) const char * OSKext::getIdentifierCString(void) { - return bundleID->getCStringNoCopy(); + return bundleID->getCStringNoCopy(); } /********************************************************************* @@ -3954,7 +3949,7 @@ OSKext::getIdentifierCString(void) OSKextVersion OSKext::getVersion(void) { - return version; + return version; } /********************************************************************* @@ -3962,7 +3957,7 @@ OSKext::getVersion(void) OSKextVersion OSKext::getCompatibleVersion(void) { - return compatibleVersion; + return compatibleVersion; } /********************************************************************* @@ -3970,7 +3965,7 @@ OSKext::getCompatibleVersion(void) bool OSKext::isLibrary(void) { - return (getCompatibleVersion() > 0); + return getCompatibleVersion() > 0; } /********************************************************************* @@ -3978,11 +3973,11 @@ OSKext::isLibrary(void) bool OSKext::isCompatibleWithVersion(OSKextVersion aVersion) { - if ((compatibleVersion > -1 && version > -1) && - (compatibleVersion <= version && aVersion <= version)) { - return true; - } - return false; + if ((compatibleVersion > -1 && version > -1) && + (compatibleVersion <= version && aVersion <= version)) { + return true; + } + return false; } /********************************************************************* @@ -3990,7 +3985,7 @@ OSKext::isCompatibleWithVersion(OSKextVersion aVersion) bool OSKext::declaresExecutable(void) { - return (getPropertyForHostArch(kCFBundleExecutableKey) != NULL); + return getPropertyForHostArch(kCFBundleExecutableKey) != NULL; } /********************************************************************* @@ -3998,62 +3993,63 @@ OSKext::declaresExecutable(void) OSData * OSKext::getExecutable(void) { - OSData * result = NULL; - OSData * extractedExecutable = NULL; // must release - OSData * mkextExecutableRef = NULL; // do not release - - if (flags.builtin) return (sKernelKext->linkedExecutable); - - result = OSDynamicCast(OSData, infoDict->getObject(_kOSKextExecutableKey)); - if (result) { - goto finish; - } - - mkextExecutableRef = OSDynamicCast(OSData, - getPropertyForHostArch(_kOSKextMkextExecutableReferenceKey)); - - if (mkextExecutableRef) { - - MkextEntryRef * mkextEntryRef = (MkextEntryRef *) - mkextExecutableRef->getBytesNoCopy(); - uint32_t mkextVersion = MKEXT_GET_VERSION(mkextEntryRef->mkext); - if (mkextVersion == MKEXT_VERS_2) { - mkext2_file_entry * fileinfo = - (mkext2_file_entry *)mkextEntryRef->fileinfo; - uint32_t compressedSize = MKEXT2_GET_ENTRY_COMPSIZE(fileinfo); - uint32_t fullSize = MKEXT2_GET_ENTRY_FULLSIZE(fileinfo); - extractedExecutable = extractMkext2FileData( - MKEXT2_GET_ENTRY_DATA(fileinfo), "executable", - compressedSize, fullSize); - } else { - OSKextLog(this, kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Kext %s - unknown mkext version 0x%x for executable.", - getIdentifierCString(), mkextVersion); - } - - /* Regardless of success, remove the mkext executable, - * and drop one reference on the mkext. (setExecutable() does not - * replace, it removes, or panics if asked to replace.) - */ - infoDict->removeObject(_kOSKextMkextExecutableReferenceKey); - infoDict->removeObject(_kOSKextExecutableExternalDataKey); - - if (extractedExecutable && extractedExecutable->getLength()) { - if (!setExecutable(extractedExecutable)) { - goto finish; - } - result = extractedExecutable; - } else { - goto finish; - } - } + OSData * result = NULL; + OSData * extractedExecutable = NULL; // must release + OSData * mkextExecutableRef = NULL;// do not release + + if (flags.builtin) { + return sKernelKext->linkedExecutable; + } + + result = OSDynamicCast(OSData, infoDict->getObject(_kOSKextExecutableKey)); + if (result) { + goto finish; + } + + mkextExecutableRef = OSDynamicCast(OSData, + getPropertyForHostArch(_kOSKextMkextExecutableReferenceKey)); + + if (mkextExecutableRef) { + MkextEntryRef * mkextEntryRef = (MkextEntryRef *) + mkextExecutableRef->getBytesNoCopy(); + uint32_t mkextVersion = MKEXT_GET_VERSION(mkextEntryRef->mkext); + if (mkextVersion == MKEXT_VERS_2) { + mkext2_file_entry * fileinfo = + (mkext2_file_entry *)mkextEntryRef->fileinfo; + uint32_t compressedSize = MKEXT2_GET_ENTRY_COMPSIZE(fileinfo); + uint32_t fullSize = MKEXT2_GET_ENTRY_FULLSIZE(fileinfo); + extractedExecutable = extractMkext2FileData( + MKEXT2_GET_ENTRY_DATA(fileinfo), "executable", + compressedSize, fullSize); + } else { + OSKextLog(this, kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Kext %s - unknown mkext version 0x%x for executable.", + getIdentifierCString(), mkextVersion); + } + + /* Regardless of success, remove the mkext executable, + * and drop one reference on the mkext. (setExecutable() does not + * replace, it removes, or panics if asked to replace.) + */ + infoDict->removeObject(_kOSKextMkextExecutableReferenceKey); + infoDict->removeObject(_kOSKextExecutableExternalDataKey); + + if (extractedExecutable && extractedExecutable->getLength()) { + if (!setExecutable(extractedExecutable)) { + goto finish; + } + result = extractedExecutable; + } else { + goto finish; + } + } finish: - OSSafeReleaseNULL(extractedExecutable); + OSSafeReleaseNULL(extractedExecutable); - return result; + return result; } /********************************************************************* @@ -4061,7 +4057,7 @@ finish: bool OSKext::isInterface(void) { - return flags.interface; + return flags.interface; } /********************************************************************* @@ -4069,7 +4065,7 @@ OSKext::isInterface(void) bool OSKext::isKernel(void) { - return (this == sKernelKext); + return this == sKernelKext; } /********************************************************************* @@ -4077,7 +4073,7 @@ OSKext::isKernel(void) bool OSKext::isKernelComponent(void) { - return flags.kernelComponent ? true : false; + return flags.kernelComponent ? true : false; } /********************************************************************* @@ -4085,7 +4081,7 @@ OSKext::isKernelComponent(void) bool OSKext::isExecutable(void) { - return (!isKernel() && !isInterface() && declaresExecutable()); + return !isKernel() && !isInterface() && declaresExecutable(); } /********************************************************************* @@ -4103,30 +4099,29 @@ OSKext::isExecutable(void) bool OSKext::isLoadableInSafeBoot(void) { - bool result = false; - OSString * required = NULL; // do not release - - if (isKernel()) { - result = true; - goto finish; - } - - required = OSDynamicCast(OSString, - getPropertyForHostArch(kOSBundleRequiredKey)); - if (!required) { - goto finish; - } - if (required->isEqualTo(kOSBundleRequiredRoot) || - required->isEqualTo(kOSBundleRequiredLocalRoot) || - required->isEqualTo(kOSBundleRequiredNetworkRoot) || - required->isEqualTo(kOSBundleRequiredSafeBoot) || - required->isEqualTo(kOSBundleRequiredConsole)) { - - result = true; - } - + bool result = false; + OSString * required = NULL; // do not release + + if (isKernel()) { + result = true; + goto finish; + } + + required = OSDynamicCast(OSString, + getPropertyForHostArch(kOSBundleRequiredKey)); + if (!required) { + goto finish; + } + if (required->isEqualTo(kOSBundleRequiredRoot) || + required->isEqualTo(kOSBundleRequiredLocalRoot) || + required->isEqualTo(kOSBundleRequiredNetworkRoot) || + required->isEqualTo(kOSBundleRequiredSafeBoot) || + required->isEqualTo(kOSBundleRequiredConsole)) { + result = true; + } + finish: - return result; + return result; } /********************************************************************* @@ -4134,14 +4129,15 @@ finish: bool OSKext::isPrelinked(void) { - return flags.prelinked ? true : false; + return flags.prelinked ? true : false; } /********************************************************************* *********************************************************************/ -bool OSKext::isLoaded(void) +bool +OSKext::isLoaded(void) { - return flags.loaded ? true : false; + return flags.loaded ? true : false; } /********************************************************************* @@ -4149,7 +4145,7 @@ bool OSKext::isLoaded(void) bool OSKext::isStarted(void) { - return flags.started ? true : false; + return flags.started ? true : false; } /********************************************************************* @@ -4157,7 +4153,7 @@ OSKext::isStarted(void) bool OSKext::isCPPInitialized(void) { - return flags.CPPInitialized; + return flags.CPPInitialized; } /********************************************************************* @@ -4165,7 +4161,7 @@ OSKext::isCPPInitialized(void) void OSKext::setCPPInitialized(bool initialized) { - flags.CPPInitialized = initialized; + flags.CPPInitialized = initialized; } /********************************************************************* @@ -4173,29 +4169,29 @@ OSKext::setCPPInitialized(bool initialized) uint32_t OSKext::getLoadTag(void) { - return loadTag; + return loadTag; } /********************************************************************* - *********************************************************************/ -void OSKext::getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize) +*********************************************************************/ +void +OSKext::getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize) { - if (linkedExecutable) { - *loadSize = linkedExecutable->getLength(); - - /* If we have a kmod_info struct, calculated the wired size - * from that. Otherwise it's the full load size. - */ - if (kmod_info) { - *wiredSize = *loadSize - kmod_info->hdr_size; - } else { - *wiredSize = *loadSize; - } - } - else { - *wiredSize = 0; - *loadSize = 0; - } + if (linkedExecutable) { + *loadSize = linkedExecutable->getLength(); + + /* If we have a kmod_info struct, calculated the wired size + * from that. Otherwise it's the full load size. + */ + if (kmod_info) { + *wiredSize = *loadSize - kmod_info->hdr_size; + } else { + *wiredSize = *loadSize; + } + } else { + *wiredSize = 0; + *loadSize = 0; + } } /********************************************************************* @@ -4203,37 +4199,39 @@ void OSKext::getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize) OSData * OSKext::copyUUID(void) { - OSData * result = NULL; - OSData * theExecutable = NULL; // do not release - const kernel_mach_header_t * header; - - /* An interface kext doesn't have a linked executable with an LC_UUID, - * we create one when it's linked. - */ - if (interfaceUUID) { - result = interfaceUUID; - result->retain(); - goto finish; - } - - if (flags.builtin || isInterface()) return (sKernelKext->copyUUID()); - - /* For real kexts, try to get the UUID from the linked executable, - * or if is hasn't been linked yet, the unrelocated executable. - */ - theExecutable = linkedExecutable; - if (!theExecutable) { - theExecutable = getExecutable(); - } - if (!theExecutable) { - goto finish; - } - - header = (const kernel_mach_header_t *)theExecutable->getBytesNoCopy(); - result = copyMachoUUID(header); + OSData * result = NULL; + OSData * theExecutable = NULL;// do not release + const kernel_mach_header_t * header; + + /* An interface kext doesn't have a linked executable with an LC_UUID, + * we create one when it's linked. + */ + if (interfaceUUID) { + result = interfaceUUID; + result->retain(); + goto finish; + } + + if (flags.builtin || isInterface()) { + return sKernelKext->copyUUID(); + } + + /* For real kexts, try to get the UUID from the linked executable, + * or if is hasn't been linked yet, the unrelocated executable. + */ + theExecutable = linkedExecutable; + if (!theExecutable) { + theExecutable = getExecutable(); + } + if (!theExecutable) { + goto finish; + } + + header = (const kernel_mach_header_t *)theExecutable->getBytesNoCopy(); + result = copyMachoUUID(header); finish: - return result; + return result; } /********************************************************************* @@ -4241,11 +4239,10 @@ finish: OSData * OSKext::copyTextUUID(void) { - if (flags.builtin) - { - return (copyMachoUUID((const kernel_mach_header_t *)kmod_info->address)); - } - return (copyUUID()); + if (flags.builtin) { + return copyMachoUUID((const kernel_mach_header_t *)kmod_info->address); + } + return copyUUID(); } /********************************************************************* @@ -4253,33 +4250,33 @@ OSKext::copyTextUUID(void) OSData * OSKext::copyMachoUUID(const kernel_mach_header_t * header) { - OSData * result = NULL; - const struct load_command * load_cmd = NULL; - const struct uuid_command * uuid_cmd = NULL; - uint32_t i; - - load_cmd = (const struct load_command *)&header[1]; - - if (header->magic != MH_MAGIC_KERNEL) { - OSKextLog(NULL, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "%s: bad header %p", - __func__, - header); - goto finish; - } - - for (i = 0; i < header->ncmds; i++) { - if (load_cmd->cmd == LC_UUID) { - uuid_cmd = (struct uuid_command *)load_cmd; - result = OSData::withBytes(uuid_cmd->uuid, sizeof(uuid_cmd->uuid)); - goto finish; - } - load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize); - } + OSData * result = NULL; + const struct load_command * load_cmd = NULL; + const struct uuid_command * uuid_cmd = NULL; + uint32_t i; + + load_cmd = (const struct load_command *)&header[1]; + + if (header->magic != MH_MAGIC_KERNEL) { + OSKextLog(NULL, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "%s: bad header %p", + __func__, + header); + goto finish; + } + + for (i = 0; i < header->ncmds; i++) { + if (load_cmd->cmd == LC_UUID) { + uuid_cmd = (struct uuid_command *)load_cmd; + result = OSData::withBytes(uuid_cmd->uuid, sizeof(uuid_cmd->uuid)); + goto finish; + } + load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize); + } finish: - return result; + return result; } /********************************************************************* @@ -4314,27 +4311,28 @@ finish: #define ARCH_SEPARATOR_CHAR '_' -static char * makeHostArchKey(const char * key, uint32_t * keySizeOut) -{ - char * result = NULL; - uint32_t keyLength = strlen(key); - uint32_t keySize; - - /* Add 1 for the ARCH_SEPARATOR_CHAR, and 1 for the '\0'. - */ - keySize = 1 + 1 + strlen(key) + strlen(ARCHNAME); - result = (char *)kalloc_tag(keySize, VM_KERN_MEMORY_OSKEXT); - if (!result) { - goto finish; - } - strlcpy(result, key, keySize); - result[keyLength++] = ARCH_SEPARATOR_CHAR; - result[keyLength] = '\0'; - strlcat(result, ARCHNAME, keySize); - *keySizeOut = keySize; +static char * +makeHostArchKey(const char * key, uint32_t * keySizeOut) +{ + char * result = NULL; + uint32_t keyLength = strlen(key); + uint32_t keySize; + + /* Add 1 for the ARCH_SEPARATOR_CHAR, and 1 for the '\0'. + */ + keySize = 1 + 1 + strlen(key) + strlen(ARCHNAME); + result = (char *)kalloc_tag(keySize, VM_KERN_MEMORY_OSKEXT); + if (!result) { + goto finish; + } + strlcpy(result, key, keySize); + result[keyLength++] = ARCH_SEPARATOR_CHAR; + result[keyLength] = '\0'; + strlcat(result, ARCHNAME, keySize); + *keySizeOut = keySize; finish: - return result; + return result; } /********************************************************************* @@ -4342,312 +4340,307 @@ finish: OSObject * OSKext::getPropertyForHostArch(const char * key) { - OSObject * result = NULL; // do not release - uint32_t hostArchKeySize = 0; - char * hostArchKey = NULL; // must kfree - - if (!key || !infoDict) { - goto finish; - } - - /* Some properties are not allowed to be arch-variant: - * - Any CFBundle... property. - * - OSBundleIsInterface. - * - OSKernelResource. - */ - if (STRING_HAS_PREFIX(key, "OS") || - STRING_HAS_PREFIX(key, "IO")) { - - hostArchKey = makeHostArchKey(key, &hostArchKeySize); - if (!hostArchKey) { - OSKextLog(/* kext (this isn't about a kext) */ NULL, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "Allocation failure."); - goto finish; - } - result = infoDict->getObject(hostArchKey); - } - - if (!result) { - result = infoDict->getObject(key); - } + OSObject * result = NULL;// do not release + uint32_t hostArchKeySize = 0; + char * hostArchKey = NULL;// must kfree + + if (!key || !infoDict) { + goto finish; + } + + /* Some properties are not allowed to be arch-variant: + * - Any CFBundle... property. + * - OSBundleIsInterface. + * - OSKernelResource. + */ + if (STRING_HAS_PREFIX(key, "OS") || + STRING_HAS_PREFIX(key, "IO")) { + hostArchKey = makeHostArchKey(key, &hostArchKeySize); + if (!hostArchKey) { + OSKextLog(/* kext (this isn't about a kext) */ NULL, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "Allocation failure."); + goto finish; + } + result = infoDict->getObject(hostArchKey); + } + + if (!result) { + result = infoDict->getObject(key); + } finish: - if (hostArchKey) kfree(hostArchKey, hostArchKeySize); - return result; + if (hostArchKey) { + kfree(hostArchKey, hostArchKeySize); + } + return result; } #if PRAGMA_MARK #pragma mark Load/Start/Stop/Unload #endif -#define isWhiteSpace(c) ((c) == ' ' || (c) == '\t' || (c) == '\r' || (c) == ',' || (c) == '\n') +#define isWhiteSpace(c) ((c) == ' ' || (c) == '\t' || (c) == '\r' || (c) == ',' || (c) == '\n') /********************************************************************* - * sExcludeListByID is a dictionary with keys / values of: - * key = bundleID string of kext we will not allow to load - * value = version string(s) of the kext that is to be denied loading. - * The version strings can be comma delimited. For example if kext - * com.foocompany.fookext has two versions that we want to deny - * loading then the version strings might look like: - * 1.0.0, 1.0.1 - * If the current fookext has a version of 1.0.0 OR 1.0.1 we will - * not load the kext. - * - * Value may also be in the form of "LE 2.0.0" (version numbers - * less than or equal to 2.0.0 will not load) or "LT 2.0.0" (version - * number less than 2.0.0 will not load) - * - * NOTE - we cannot use the characters "<=" or "<" because we have code - * that serializes plists and treats '<' as a special character. - *********************************************************************/ -bool +* sExcludeListByID is a dictionary with keys / values of: +* key = bundleID string of kext we will not allow to load +* value = version string(s) of the kext that is to be denied loading. +* The version strings can be comma delimited. For example if kext +* com.foocompany.fookext has two versions that we want to deny +* loading then the version strings might look like: +* 1.0.0, 1.0.1 +* If the current fookext has a version of 1.0.0 OR 1.0.1 we will +* not load the kext. +* +* Value may also be in the form of "LE 2.0.0" (version numbers +* less than or equal to 2.0.0 will not load) or "LT 2.0.0" (version +* number less than 2.0.0 will not load) +* +* NOTE - we cannot use the characters "<=" or "<" because we have code +* that serializes plists and treats '<' as a special character. +*********************************************************************/ +bool OSKext::isInExcludeList(void) { - OSString * versionString = NULL; // do not release - char * versionCString = NULL; // do not free - size_t i; - boolean_t wantLessThan = false; - boolean_t wantLessThanEqualTo = false; - boolean_t isInExcludeList = true; - char myBuffer[32]; - - IORecursiveLockLock(sKextLock); - - if (!sExcludeListByID) { - isInExcludeList = false; - } else { - /* look up by bundleID in our exclude list and if found get version - * string (or strings) that we will not allow to load - */ - versionString = OSDynamicCast(OSString, sExcludeListByID->getObject(bundleID)); - if (versionString == NULL || versionString->getLength() > (sizeof(myBuffer) - 1)) { - isInExcludeList = false; - } - } - - IORecursiveLockUnlock(sKextLock); - - if (!isInExcludeList) { - return(false); - } - - /* parse version strings */ - versionCString = (char *) versionString->getCStringNoCopy(); - - /* look for "LT" or "LE" form of version string, must be in first two - * positions. - */ - if (*versionCString == 'L' && *(versionCString + 1) == 'T') { - wantLessThan = true; - versionCString +=2; - } - else if (*versionCString == 'L' && *(versionCString + 1) == 'E') { - wantLessThanEqualTo = true; - versionCString +=2; - } - - for (i = 0; *versionCString != 0x00; versionCString++) { - /* skip whitespace */ - if (isWhiteSpace(*versionCString)) { - continue; - } - - /* peek ahead for version string separator or null terminator */ - if (*(versionCString + 1) == ',' || *(versionCString + 1) == 0x00) { - - /* OK, we have a version string */ - myBuffer[i++] = *versionCString; - myBuffer[i] = 0x00; - - OSKextVersion excludeVers; - excludeVers = OSKextParseVersionString(myBuffer); - - if (wantLessThanEqualTo) { - if (version <= excludeVers) { - return(true); - } - } - else if (wantLessThan) { - if (version < excludeVers) { - return(true); - } - } - else if ( version == excludeVers ) { - return(true); - } - - /* reset for the next (if any) version string */ - i = 0; - wantLessThan = false; - wantLessThanEqualTo = false; - } - else { - /* save valid version character */ - myBuffer[i++] = *versionCString; - - /* make sure bogus version string doesn't overrun local buffer */ - if ( i >= sizeof(myBuffer) ) { - break; - } - } - } - - return(false); -} + OSString * versionString = NULL;// do not release + char * versionCString = NULL;// do not free + size_t i; + boolean_t wantLessThan = false; + boolean_t wantLessThanEqualTo = false; + boolean_t isInExcludeList = true; + char myBuffer[32]; + + IORecursiveLockLock(sKextLock); + + if (!sExcludeListByID) { + isInExcludeList = false; + } else { + /* look up by bundleID in our exclude list and if found get version + * string (or strings) that we will not allow to load + */ + versionString = OSDynamicCast(OSString, sExcludeListByID->getObject(bundleID)); + if (versionString == NULL || versionString->getLength() > (sizeof(myBuffer) - 1)) { + isInExcludeList = false; + } + } + + IORecursiveLockUnlock(sKextLock); + + if (!isInExcludeList) { + return false; + } + + /* parse version strings */ + versionCString = (char *) versionString->getCStringNoCopy(); + + /* look for "LT" or "LE" form of version string, must be in first two + * positions. + */ + if (*versionCString == 'L' && *(versionCString + 1) == 'T') { + wantLessThan = true; + versionCString += 2; + } else if (*versionCString == 'L' && *(versionCString + 1) == 'E') { + wantLessThanEqualTo = true; + versionCString += 2; + } + + for (i = 0; *versionCString != 0x00; versionCString++) { + /* skip whitespace */ + if (isWhiteSpace(*versionCString)) { + continue; + } + + /* peek ahead for version string separator or null terminator */ + if (*(versionCString + 1) == ',' || *(versionCString + 1) == 0x00) { + /* OK, we have a version string */ + myBuffer[i++] = *versionCString; + myBuffer[i] = 0x00; + + OSKextVersion excludeVers; + excludeVers = OSKextParseVersionString(myBuffer); + + if (wantLessThanEqualTo) { + if (version <= excludeVers) { + return true; + } + } else if (wantLessThan) { + if (version < excludeVers) { + return true; + } + } else if (version == excludeVers) { + return true; + } + + /* reset for the next (if any) version string */ + i = 0; + wantLessThan = false; + wantLessThanEqualTo = false; + } else { + /* save valid version character */ + myBuffer[i++] = *versionCString; + + /* make sure bogus version string doesn't overrun local buffer */ + if (i >= sizeof(myBuffer)) { + break; + } + } + } + + return false; +} /********************************************************************* *********************************************************************/ /* static */ OSReturn OSKext::loadKextWithIdentifier( - const char * kextIdentifierCString, - Boolean allowDeferFlag, - Boolean delayAutounloadFlag, - OSKextExcludeLevel startOpt, - OSKextExcludeLevel startMatchingOpt, - OSArray * personalityNames) -{ - OSReturn result = kOSReturnError; - OSString * kextIdentifier = NULL; // must release - - kextIdentifier = OSString::withCString(kextIdentifierCString); - if (!kextIdentifier) { - result = kOSKextReturnNoMemory; - goto finish; - } - result = OSKext::loadKextWithIdentifier(kextIdentifier, - allowDeferFlag, delayAutounloadFlag, - startOpt, startMatchingOpt, personalityNames); - + const char * kextIdentifierCString, + Boolean allowDeferFlag, + Boolean delayAutounloadFlag, + OSKextExcludeLevel startOpt, + OSKextExcludeLevel startMatchingOpt, + OSArray * personalityNames) +{ + OSReturn result = kOSReturnError; + OSString * kextIdentifier = NULL; // must release + + kextIdentifier = OSString::withCString(kextIdentifierCString); + if (!kextIdentifier) { + result = kOSKextReturnNoMemory; + goto finish; + } + result = OSKext::loadKextWithIdentifier(kextIdentifier, + allowDeferFlag, delayAutounloadFlag, + startOpt, startMatchingOpt, personalityNames); + finish: - OSSafeReleaseNULL(kextIdentifier); - return result; + OSSafeReleaseNULL(kextIdentifier); + return result; } /********************************************************************* *********************************************************************/ OSReturn OSKext::loadKextWithIdentifier( - OSString * kextIdentifier, - Boolean allowDeferFlag, - Boolean delayAutounloadFlag, - OSKextExcludeLevel startOpt, - OSKextExcludeLevel startMatchingOpt, - OSArray * personalityNames) -{ - OSReturn result = kOSReturnError; - OSReturn pingResult = kOSReturnError; - OSKext * theKext = NULL; // do not release - OSDictionary * loadRequest = NULL; // must release - const OSSymbol * kextIdentifierSymbol = NULL; // must release - - IORecursiveLockLock(sKextLock); - - if (!kextIdentifier) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - - OSKext::recordIdentifierRequest(kextIdentifier); - - theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); - if (!theKext) { - if (!allowDeferFlag) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Can't load kext %s - not found.", - kextIdentifier->getCStringNoCopy()); - goto finish; - } - - if (!sKernelRequestsEnabled) { - OSKextLog(theKext, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Can't load kext %s - requests to user space are disabled.", - kextIdentifier->getCStringNoCopy()); - result = kOSKextReturnDisabled; - goto finish; - } - - /* Create a new request unless one is already sitting - * in sKernelRequests for this bundle identifier - */ - kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); - if (!sPostedKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { - result = _OSKextCreateRequest(kKextRequestPredicateRequestLoad, - &loadRequest); - if (result != kOSReturnSuccess) { - goto finish; - } - if (!_OSKextSetRequestArgument(loadRequest, - kKextRequestArgumentBundleIdentifierKey, kextIdentifier)) { - - result = kOSKextReturnNoMemory; - goto finish; - } - if (!sKernelRequests->setObject(loadRequest)) { - result = kOSKextReturnNoMemory; - goto finish; - } - - if (!sPostedKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { - result = kOSKextReturnNoMemory; - goto finish; - } - - OSKextLog(theKext, - kOSKextLogDebugLevel | - kOSKextLogLoadFlag, - "Kext %s not found; queued load request to user space.", - kextIdentifier->getCStringNoCopy()); - } - - pingResult = OSKext::pingKextd(); - if (pingResult == kOSKextReturnDisabled) { - OSKextLog(/* kext */ NULL, - ((sPrelinkBoot) ? kOSKextLogDebugLevel : kOSKextLogErrorLevel) | - kOSKextLogLoadFlag, - "Kext %s might not load - kextd is currently unavailable.", - kextIdentifier->getCStringNoCopy()); - } - - result = kOSKextReturnDeferred; - goto finish; - } - - result = theKext->load(startOpt, startMatchingOpt, personalityNames); - - if (result != kOSReturnSuccess) { - OSKextLog(theKext, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Failed to load kext %s (error 0x%x).", - kextIdentifier->getCStringNoCopy(), (int)result); - - OSKext::removeKext(theKext, - /* terminateService/removePersonalities */ true); - goto finish; - } - - if (delayAutounloadFlag) { - OSKextLog(theKext, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, - "Setting delayed autounload for %s.", - kextIdentifier->getCStringNoCopy()); - theKext->flags.delayAutounload = 1; - } + OSString * kextIdentifier, + Boolean allowDeferFlag, + Boolean delayAutounloadFlag, + OSKextExcludeLevel startOpt, + OSKextExcludeLevel startMatchingOpt, + OSArray * personalityNames) +{ + OSReturn result = kOSReturnError; + OSReturn pingResult = kOSReturnError; + OSKext * theKext = NULL;// do not release + OSDictionary * loadRequest = NULL;// must release + const OSSymbol * kextIdentifierSymbol = NULL;// must release + + IORecursiveLockLock(sKextLock); + + if (!kextIdentifier) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + + OSKext::recordIdentifierRequest(kextIdentifier); + + theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); + if (!theKext) { + if (!allowDeferFlag) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - not found.", + kextIdentifier->getCStringNoCopy()); + goto finish; + } + + if (!sKernelRequestsEnabled) { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - requests to user space are disabled.", + kextIdentifier->getCStringNoCopy()); + result = kOSKextReturnDisabled; + goto finish; + } + + /* Create a new request unless one is already sitting + * in sKernelRequests for this bundle identifier + */ + kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); + if (!sPostedKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { + result = _OSKextCreateRequest(kKextRequestPredicateRequestLoad, + &loadRequest); + if (result != kOSReturnSuccess) { + goto finish; + } + if (!_OSKextSetRequestArgument(loadRequest, + kKextRequestArgumentBundleIdentifierKey, kextIdentifier)) { + result = kOSKextReturnNoMemory; + goto finish; + } + if (!sKernelRequests->setObject(loadRequest)) { + result = kOSKextReturnNoMemory; + goto finish; + } + + if (!sPostedKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { + result = kOSKextReturnNoMemory; + goto finish; + } + + OSKextLog(theKext, + kOSKextLogDebugLevel | + kOSKextLogLoadFlag, + "Kext %s not found; queued load request to user space.", + kextIdentifier->getCStringNoCopy()); + } + + pingResult = OSKext::pingKextd(); + if (pingResult == kOSKextReturnDisabled) { + OSKextLog(/* kext */ NULL, + ((sPrelinkBoot) ? kOSKextLogDebugLevel : kOSKextLogErrorLevel) | + kOSKextLogLoadFlag, + "Kext %s might not load - kextd is currently unavailable.", + kextIdentifier->getCStringNoCopy()); + } + + result = kOSKextReturnDeferred; + goto finish; + } + + result = theKext->load(startOpt, startMatchingOpt, personalityNames); + + if (result != kOSReturnSuccess) { + OSKextLog(theKext, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to load kext %s (error 0x%x).", + kextIdentifier->getCStringNoCopy(), (int)result); + + OSKext::removeKext(theKext, + /* terminateService/removePersonalities */ true); + goto finish; + } + + if (delayAutounloadFlag) { + OSKextLog(theKext, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Setting delayed autounload for %s.", + kextIdentifier->getCStringNoCopy()); + theKext->flags.delayAutounload = 1; + } finish: - OSSafeReleaseNULL(loadRequest); - OSSafeReleaseNULL(kextIdentifierSymbol); - - IORecursiveLockUnlock(sKextLock); + OSSafeReleaseNULL(loadRequest); + OSSafeReleaseNULL(kextIdentifierSymbol); + + IORecursiveLockUnlock(sKextLock); - return result; + return result; } /********************************************************************* @@ -4655,450 +4648,451 @@ finish: /* static */ void OSKext::recordIdentifierRequest( - OSString * kextIdentifier) -{ - const OSSymbol * kextIdentifierSymbol = NULL; // must release - bool fail = false; - - if (!sAllKextLoadIdentifiers || !kextIdentifier) { - goto finish; - } - - kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); - if (!kextIdentifierSymbol) { - // xxx - this is really a basic alloc failure - fail = true; - goto finish; - } - - IORecursiveLockLock(sKextLock); - if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { - if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { - fail = true; - } else { - // xxx - need to find a way to associate this whole func w/the kext - OSKextLog(/* kext */ NULL, - // xxx - check level - kOSKextLogStepLevel | - kOSKextLogArchiveFlag, - "Recorded kext %s as a candidate for inclusion in prelinked kernel.", - kextIdentifier->getCStringNoCopy()); - } - } - IORecursiveLockUnlock(sKextLock); + OSString * kextIdentifier) +{ + const OSSymbol * kextIdentifierSymbol = NULL; // must release + bool fail = false; + + if (!sAllKextLoadIdentifiers || !kextIdentifier) { + goto finish; + } + + kextIdentifierSymbol = OSSymbol::withString(kextIdentifier); + if (!kextIdentifierSymbol) { + // xxx - this is really a basic alloc failure + fail = true; + goto finish; + } + + IORecursiveLockLock(sKextLock); + if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { + if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { + fail = true; + } else { + // xxx - need to find a way to associate this whole func w/the kext + OSKextLog(/* kext */ NULL, + // xxx - check level + kOSKextLogStepLevel | + kOSKextLogArchiveFlag, + "Recorded kext %s as a candidate for inclusion in prelinked kernel.", + kextIdentifier->getCStringNoCopy()); + } + } + IORecursiveLockUnlock(sKextLock); finish: - if (fail) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogArchiveFlag, - "Failed to record kext %s as a candidate for inclusion in prelinked kernel.", - kextIdentifier->getCStringNoCopy()); - } - OSSafeReleaseNULL(kextIdentifierSymbol); - return; + if (fail) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogArchiveFlag, + "Failed to record kext %s as a candidate for inclusion in prelinked kernel.", + kextIdentifier->getCStringNoCopy()); + } + OSSafeReleaseNULL(kextIdentifierSymbol); + return; } /********************************************************************* *********************************************************************/ OSReturn OSKext::load( - OSKextExcludeLevel startOpt, - OSKextExcludeLevel startMatchingOpt, - OSArray * personalityNames) -{ - OSReturn result = kOSReturnError; - kern_return_t kxldResult; - OSKextExcludeLevel dependenciesStartOpt = startOpt; - OSKextExcludeLevel dependenciesStartMatchingOpt = startMatchingOpt; - unsigned int i, count; - Boolean alreadyLoaded = false; - OSKext * lastLoadedKext = NULL; - - if (isInExcludeList()) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag | - kOSKextLogLoadFlag, - "Kext %s is in exclude list, not loadable", - getIdentifierCString()); - - result = kOSKextReturnNotLoadable; - goto finish; - } - - if (isLoaded()) { - alreadyLoaded = true; - result = kOSReturnSuccess; - - OSKextLog(this, - kOSKextLogDebugLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, - "Kext %s is already loaded.", - getIdentifierCString()); - goto loaded; - } - + OSKextExcludeLevel startOpt, + OSKextExcludeLevel startMatchingOpt, + OSArray * personalityNames) +{ + OSReturn result = kOSReturnError; + kern_return_t kxldResult; + OSKextExcludeLevel dependenciesStartOpt = startOpt; + OSKextExcludeLevel dependenciesStartMatchingOpt = startMatchingOpt; + unsigned int i, count; + Boolean alreadyLoaded = false; + OSKext * lastLoadedKext = NULL; + + if (isInExcludeList()) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag | + kOSKextLogLoadFlag, + "Kext %s is in exclude list, not loadable", + getIdentifierCString()); + + result = kOSKextReturnNotLoadable; + goto finish; + } + + if (isLoaded()) { + alreadyLoaded = true; + result = kOSReturnSuccess; + + OSKextLog(this, + kOSKextLogDebugLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Kext %s is already loaded.", + getIdentifierCString()); + goto loaded; + } + #if CONFIG_MACF - if (current_task() != kernel_task) { - int macCheckResult = 0; - kauth_cred_t cred = NULL; - - cred = kauth_cred_get_with_ref(); - macCheckResult = mac_kext_check_load(cred, getIdentifierCString()); - kauth_cred_unref(&cred); - - if (macCheckResult != 0) { - result = kOSReturnError; - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "Failed to load kext %s (MAC policy error 0x%x).", - getIdentifierCString(), macCheckResult); - goto finish; - } - } + if (current_task() != kernel_task) { + int macCheckResult = 0; + kauth_cred_t cred = NULL; + + cred = kauth_cred_get_with_ref(); + macCheckResult = mac_kext_check_load(cred, getIdentifierCString()); + kauth_cred_unref(&cred); + + if (macCheckResult != 0) { + result = kOSReturnError; + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Failed to load kext %s (MAC policy error 0x%x).", + getIdentifierCString(), macCheckResult); + goto finish; + } + } #endif - if (!sLoadEnabled) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext loading is disabled (attempt to load kext %s).", - getIdentifierCString()); - result = kOSKextReturnDisabled; - goto finish; - } - - /* If we've pushed the next available load tag to the invalid value, - * we can't load any more kexts. - */ - if (sNextLoadTag == kOSKextInvalidLoadTag) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Can't load kext %s - no more load tags to assign.", - getIdentifierCString()); - result = kOSKextReturnNoResources; - goto finish; - } - - /* This is a bit of a hack, because we shouldn't be handling - * personalities within the load function. - */ - if (!declaresExecutable()) { - /* There is a special case where a non-executable kext can be loaded: the - * AppleKextExcludeList. Detect that special kext by bundle identifier and - * load its metadata into the global data structures, if appropriate - */ - if (strcmp(getIdentifierCString(), "com.apple.driver.KextExcludeList") == 0) { - boolean_t updated = updateExcludeList(infoDict); - if (updated) { - OSKextLog(this, - kOSKextLogDebugLevel | kOSKextLogLoadFlag, - "KextExcludeList was updated to version: %lld", sExcludeListVersion); - } - } - result = kOSReturnSuccess; - goto loaded; - } - - /* Are we in safe boot? - */ - if (sSafeBoot && !isLoadableInSafeBoot()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Can't load kext %s - not loadable during safe boot.", - getIdentifierCString()); - result = kOSKextReturnBootLevel; - goto finish; - } - - OSKextLog(this, - kOSKextLogProgressLevel | kOSKextLogLoadFlag, - "Loading kext %s.", - getIdentifierCString()); - - if (!sKxldContext) { - kxldResult = kxld_create_context(&sKxldContext, &kern_allocate, - &kxld_log_callback, /* Flags */ (KXLDFlags) 0, - /* cputype */ 0, /* cpusubtype */ 0, /* page size */ 0); - if (kxldResult) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogLinkFlag, - "Can't load kext %s - failed to create link context.", - getIdentifierCString()); - result = kOSKextReturnNoMemory; - goto finish; - } - } - - /* We only need to resolve dependencies once for the whole graph, but - * resolveDependencies will just return if there's no work to do, so it's - * safe to call it more than once. - */ - if (!resolveDependencies()) { - // xxx - check resolveDependencies() for log msg - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, - "Can't load kext %s - failed to resolve library dependencies.", - getIdentifierCString()); - result = kOSKextReturnDependencies; - goto finish; - } - - /* If we are excluding just the kext being loaded now (and not its - * dependencies), drop the exclusion level to none so dependencies - * start and/or add their personalities. - */ - if (dependenciesStartOpt == kOSKextExcludeKext) { - dependenciesStartOpt = kOSKextExcludeNone; - } - - if (dependenciesStartMatchingOpt == kOSKextExcludeKext) { - dependenciesStartMatchingOpt = kOSKextExcludeNone; - } - - /* Load the dependencies, recursively. - */ - count = getNumDependencies(); - for (i = 0; i < count; i++) { - OSKext * dependency = OSDynamicCast(OSKext, - dependencies->getObject(i)); - if (dependency == NULL) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, - "Internal error loading kext %s; dependency disappeared.", - getIdentifierCString()); - result = kOSKextReturnInternalError; - goto finish; - } - - /* Dependencies must be started accorting to the opt, - * but not given the personality names of the main kext. - */ - result = dependency->load(dependenciesStartOpt, - dependenciesStartMatchingOpt, - /* personalityNames */ NULL); - if (result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, - "Dependency %s of kext %s failed to load.", - dependency->getIdentifierCString(), - getIdentifierCString()); - - OSKext::removeKext(dependency, - /* terminateService/removePersonalities */ true); - result = kOSKextReturnDependencyLoadError; - - goto finish; - } - } - - result = loadExecutable(); - if (result != KERN_SUCCESS) { - goto finish; - } - - pendingPgoHead.next = &pendingPgoHead; - pendingPgoHead.prev = &pendingPgoHead; - - // The kernel PRNG is not initialized when the first kext is - // loaded, so use early random - uuid_generate_early_random(instance_uuid); - account = IONew(OSKextAccount, 1); - if (!account) { - result = KERN_MEMORY_ERROR; - goto finish; - } - bzero(account, sizeof(*account)); - account->loadTag = kmod_info->id; - account->site.refcount = 0; - account->site.flags = VM_TAG_KMOD; - account->kext = this; - if (gIOSurfaceIdentifier == bundleID) { - vm_tag_alloc(&account->site); - gIOSurfaceTag = account->site.tag; - } - - flags.loaded = true; - - /* Add the kext to the list of loaded kexts and update the kmod_info - * struct to point to that of the last loaded kext (which is the way - * it's always been done, though I'd rather do them in order now). - */ - lastLoadedKext = OSDynamicCast(OSKext, sLoadedKexts->getLastObject()); - sLoadedKexts->setObject(this); - - /* Keep the kernel itself out of the kmod list. - */ - if (lastLoadedKext->isKernel()) { - lastLoadedKext = NULL; - } - - if (lastLoadedKext) { - kmod_info->next = lastLoadedKext->kmod_info; - } - - notifyKextLoadObservers(this, kmod_info); - - /* Make the global kmod list point at the just-loaded kext. Note that the - * __kernel__ kext isn't in this list, as it wasn't before SnowLeopard, - * although we do report it in kextstat these days by using the newer - * OSArray of loaded kexts, which does contain it. - * - * (The OSKext object representing the kernel doesn't even have a kmod_info - * struct, though I suppose we could stick a pointer to it from the - * static struct in OSRuntime.cpp.) - */ - kmod = kmod_info; - - /* Save the list of loaded kexts in case we panic. - */ - OSKext::saveLoadedKextPanicList(); - - if (isExecutable()) { - OSKext::updateLoadedKextSummaries(); - savePanicString(/* isLoading */ true); + if (!sLoadEnabled) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext loading is disabled (attempt to load kext %s).", + getIdentifierCString()); + result = kOSKextReturnDisabled; + goto finish; + } + + /* If we've pushed the next available load tag to the invalid value, + * we can't load any more kexts. + */ + if (sNextLoadTag == kOSKextInvalidLoadTag) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - no more load tags to assign.", + getIdentifierCString()); + result = kOSKextReturnNoResources; + goto finish; + } + + /* This is a bit of a hack, because we shouldn't be handling + * personalities within the load function. + */ + if (!declaresExecutable()) { + /* There is a special case where a non-executable kext can be loaded: the + * AppleKextExcludeList. Detect that special kext by bundle identifier and + * load its metadata into the global data structures, if appropriate + */ + if (strcmp(getIdentifierCString(), "com.apple.driver.KextExcludeList") == 0) { + boolean_t updated = updateExcludeList(infoDict); + if (updated) { + OSKextLog(this, + kOSKextLogDebugLevel | kOSKextLogLoadFlag, + "KextExcludeList was updated to version: %lld", sExcludeListVersion); + } + } + result = kOSReturnSuccess; + goto loaded; + } + + /* Are we in safe boot? + */ + if (sSafeBoot && !isLoadableInSafeBoot()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - not loadable during safe boot.", + getIdentifierCString()); + result = kOSKextReturnBootLevel; + goto finish; + } + + OSKextLog(this, + kOSKextLogProgressLevel | kOSKextLogLoadFlag, + "Loading kext %s.", + getIdentifierCString()); + + if (!sKxldContext) { + kxldResult = kxld_create_context(&sKxldContext, &kern_allocate, + &kxld_log_callback, /* Flags */ (KXLDFlags) 0, + /* cputype */ 0, /* cpusubtype */ 0, /* page size */ 0); + if (kxldResult) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogLinkFlag, + "Can't load kext %s - failed to create link context.", + getIdentifierCString()); + result = kOSKextReturnNoMemory; + goto finish; + } + } + + /* We only need to resolve dependencies once for the whole graph, but + * resolveDependencies will just return if there's no work to do, so it's + * safe to call it more than once. + */ + if (!resolveDependencies()) { + // xxx - check resolveDependencies() for log msg + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, + "Can't load kext %s - failed to resolve library dependencies.", + getIdentifierCString()); + result = kOSKextReturnDependencies; + goto finish; + } + + /* If we are excluding just the kext being loaded now (and not its + * dependencies), drop the exclusion level to none so dependencies + * start and/or add their personalities. + */ + if (dependenciesStartOpt == kOSKextExcludeKext) { + dependenciesStartOpt = kOSKextExcludeNone; + } + + if (dependenciesStartMatchingOpt == kOSKextExcludeKext) { + dependenciesStartMatchingOpt = kOSKextExcludeNone; + } + + /* Load the dependencies, recursively. + */ + count = getNumDependencies(); + for (i = 0; i < count; i++) { + OSKext * dependency = OSDynamicCast(OSKext, + dependencies->getObject(i)); + if (dependency == NULL) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, + "Internal error loading kext %s; dependency disappeared.", + getIdentifierCString()); + result = kOSKextReturnInternalError; + goto finish; + } + + /* Dependencies must be started accorting to the opt, + * but not given the personality names of the main kext. + */ + result = dependency->load(dependenciesStartOpt, + dependenciesStartMatchingOpt, + /* personalityNames */ NULL); + if (result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, + "Dependency %s of kext %s failed to load.", + dependency->getIdentifierCString(), + getIdentifierCString()); + + OSKext::removeKext(dependency, + /* terminateService/removePersonalities */ true); + result = kOSKextReturnDependencyLoadError; + + goto finish; + } + } + + result = loadExecutable(); + if (result != KERN_SUCCESS) { + goto finish; + } + + pendingPgoHead.next = &pendingPgoHead; + pendingPgoHead.prev = &pendingPgoHead; + + // The kernel PRNG is not initialized when the first kext is + // loaded, so use early random + uuid_generate_early_random(instance_uuid); + account = IONew(OSKextAccount, 1); + if (!account) { + result = KERN_MEMORY_ERROR; + goto finish; + } + bzero(account, sizeof(*account)); + account->loadTag = kmod_info->id; + account->site.refcount = 0; + account->site.flags = VM_TAG_KMOD; + account->kext = this; + if (gIOSurfaceIdentifier == bundleID) { + vm_tag_alloc(&account->site); + gIOSurfaceTag = account->site.tag; + } + + flags.loaded = true; + + /* Add the kext to the list of loaded kexts and update the kmod_info + * struct to point to that of the last loaded kext (which is the way + * it's always been done, though I'd rather do them in order now). + */ + lastLoadedKext = OSDynamicCast(OSKext, sLoadedKexts->getLastObject()); + sLoadedKexts->setObject(this); + + /* Keep the kernel itself out of the kmod list. + */ + if (lastLoadedKext->isKernel()) { + lastLoadedKext = NULL; + } + + if (lastLoadedKext) { + kmod_info->next = lastLoadedKext->kmod_info; + } + + notifyKextLoadObservers(this, kmod_info); + + /* Make the global kmod list point at the just-loaded kext. Note that the + * __kernel__ kext isn't in this list, as it wasn't before SnowLeopard, + * although we do report it in kextstat these days by using the newer + * OSArray of loaded kexts, which does contain it. + * + * (The OSKext object representing the kernel doesn't even have a kmod_info + * struct, though I suppose we could stick a pointer to it from the + * static struct in OSRuntime.cpp.) + */ + kmod = kmod_info; + + /* Save the list of loaded kexts in case we panic. + */ + OSKext::saveLoadedKextPanicList(); + + if (isExecutable()) { + OSKext::updateLoadedKextSummaries(); + savePanicString(/* isLoading */ true); #if CONFIG_DTRACE - registerWithDTrace(); + registerWithDTrace(); #else - jettisonLinkeditSegment(); + jettisonLinkeditSegment(); #endif /* CONFIG_DTRACE */ #if !VM_MAPPED_KEXTS - /* If there is a page (or more) worth of padding after the end - * of the last data section but before the end of the data segment - * then free it in the same manner the LinkeditSegment is freed - */ - jettisonDATASegmentPadding(); + /* If there is a page (or more) worth of padding after the end + * of the last data section but before the end of the data segment + * then free it in the same manner the LinkeditSegment is freed + */ + jettisonDATASegmentPadding(); #endif - } + } loaded: - if (isExecutable() && !flags.started) { - if (startOpt == kOSKextExcludeNone) { - result = start(); - if (result != kOSReturnSuccess) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "Kext %s start failed (result 0x%x).", - getIdentifierCString(), result); - result = kOSKextReturnStartStopError; - } - } - } - - /* If not excluding matching, send the personalities to the kernel. - * This never affects the result of the load operation. - * This is a bit of a hack, because we shouldn't be handling - * personalities within the load function. - */ - if (result == kOSReturnSuccess && startMatchingOpt == kOSKextExcludeNone) { - result = sendPersonalitiesToCatalog(true, personalityNames); - } + if (isExecutable() && !flags.started) { + if (startOpt == kOSKextExcludeNone) { + result = start(); + if (result != kOSReturnSuccess) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Kext %s start failed (result 0x%x).", + getIdentifierCString(), result); + result = kOSKextReturnStartStopError; + } + } + } + + /* If not excluding matching, send the personalities to the kernel. + * This never affects the result of the load operation. + * This is a bit of a hack, because we shouldn't be handling + * personalities within the load function. + */ + if (result == kOSReturnSuccess && startMatchingOpt == kOSKextExcludeNone) { + result = sendPersonalitiesToCatalog(true, personalityNames); + } finish: - /* More hack! If the kext doesn't declare an executable, even if we - * "loaded" it, we have to remove any personalities naming it, or we'll - * never see the registry go quiet. Errors here do not count for the - * load operation itself. - * - * Note that in every other regard it's perfectly ok for a kext to - * not declare an executable and serve only as a package for personalities - * naming another kext, so we do have to allow such kexts to be "loaded" - * so that those other personalities get added & matched. - */ - if (!declaresExecutable()) { - OSKextLog(this, - kOSKextLogStepLevel | kOSKextLogLoadFlag, - "Kext %s has no executable; removing any personalities naming it.", - getIdentifierCString()); - removePersonalitiesFromCatalog(); - } - - if (result != kOSReturnSuccess) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s failed to load (0x%x).", - getIdentifierCString(), (int)result); - } else if (!alreadyLoaded) { - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s loaded.", - getIdentifierCString()); - - queueKextNotification(kKextRequestPredicateLoadNotification, - OSDynamicCast(OSString, bundleID)); - } - return result; -} - -/********************************************************************* -* -*********************************************************************/ -static char * strdup(const char * string) -{ - char * result = NULL; - size_t size; - - if (!string) { - goto finish; - } - - size = 1 + strlen(string); - result = (char *)kalloc_tag(size, VM_KERN_MEMORY_OSKEXT); - if (!result) { - goto finish; - } - - memcpy(result, string, size); + /* More hack! If the kext doesn't declare an executable, even if we + * "loaded" it, we have to remove any personalities naming it, or we'll + * never see the registry go quiet. Errors here do not count for the + * load operation itself. + * + * Note that in every other regard it's perfectly ok for a kext to + * not declare an executable and serve only as a package for personalities + * naming another kext, so we do have to allow such kexts to be "loaded" + * so that those other personalities get added & matched. + */ + if (!declaresExecutable()) { + OSKextLog(this, + kOSKextLogStepLevel | kOSKextLogLoadFlag, + "Kext %s has no executable; removing any personalities naming it.", + getIdentifierCString()); + removePersonalitiesFromCatalog(); + } + + if (result != kOSReturnSuccess) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s failed to load (0x%x).", + getIdentifierCString(), (int)result); + } else if (!alreadyLoaded) { + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s loaded.", + getIdentifierCString()); + + queueKextNotification(kKextRequestPredicateLoadNotification, + OSDynamicCast(OSString, bundleID)); + } + return result; +} + +/********************************************************************* +* +*********************************************************************/ +static char * +strdup(const char * string) +{ + char * result = NULL; + size_t size; + + if (!string) { + goto finish; + } + + size = 1 + strlen(string); + result = (char *)kalloc_tag(size, VM_KERN_MEMORY_OSKEXT); + if (!result) { + goto finish; + } + + memcpy(result, string, size); finish: - return result; + return result; } /********************************************************************* -* +* *********************************************************************/ kernel_section_t * OSKext::lookupSection(const char *segname, const char *secname) { - kernel_section_t * found_section = NULL; - kernel_mach_header_t * mh = NULL; - kernel_segment_command_t * seg = NULL; - kernel_section_t * sec = NULL; - - if (!linkedExecutable) return (NULL); - - mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy(); + kernel_section_t * found_section = NULL; + kernel_mach_header_t * mh = NULL; + kernel_segment_command_t * seg = NULL; + kernel_section_t * sec = NULL; - for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { + if (!linkedExecutable) { + return NULL; + } - if (0 != strcmp(seg->segname, segname)) { - continue; - } + mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy(); - for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { + for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { + if (0 != strcmp(seg->segname, segname)) { + continue; + } - if (0 == strcmp(sec->sectname, secname)) { - found_section = sec; - goto out; - } - } - } + for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { + if (0 == strcmp(sec->sectname, secname)) { + found_section = sec; + goto out; + } + } + } - out: - return found_section; +out: + return found_section; } /********************************************************************* @@ -5108,224 +5102,224 @@ OSKext::lookupSection(const char *segname, const char *secname) OSReturn OSKext::slidePrelinkedExecutable(bool doCoalesedSlides) { - OSReturn result = kOSKextReturnBadData; - kernel_mach_header_t * mh = NULL; - kernel_segment_command_t * seg = NULL; - kernel_segment_command_t * linkeditSeg = NULL; - kernel_section_t * sec = NULL; - char * linkeditBase = NULL; - bool haveLinkeditBase = false; - char * relocBase = NULL; - bool haveRelocBase = false; - struct dysymtab_command * dysymtab = NULL; - struct linkedit_data_command * segmentSplitInfo = NULL; - struct symtab_command * symtab = NULL; - kernel_nlist_t * sym = NULL; - struct relocation_info * reloc = NULL; - uint32_t i = 0; - int reloc_size; - vm_offset_t new_kextsize; - - if (linkedExecutable == NULL || flags.builtin) { - result = kOSReturnSuccess; - goto finish; - } - - mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy(); - segmentSplitInfo = (struct linkedit_data_command *) getcommandfromheader(mh, LC_SEGMENT_SPLIT_INFO); - - for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { - if (!seg->vmaddr) { - continue; - } - - seg->vmaddr = ml_static_slide(seg->vmaddr); + OSReturn result = kOSKextReturnBadData; + kernel_mach_header_t * mh = NULL; + kernel_segment_command_t * seg = NULL; + kernel_segment_command_t * linkeditSeg = NULL; + kernel_section_t * sec = NULL; + char * linkeditBase = NULL; + bool haveLinkeditBase = false; + char * relocBase = NULL; + bool haveRelocBase = false; + struct dysymtab_command * dysymtab = NULL; + struct linkedit_data_command * segmentSplitInfo = NULL; + struct symtab_command * symtab = NULL; + kernel_nlist_t * sym = NULL; + struct relocation_info * reloc = NULL; + uint32_t i = 0; + int reloc_size; + vm_offset_t new_kextsize; + + if (linkedExecutable == NULL || flags.builtin) { + result = kOSReturnSuccess; + goto finish; + } + + mh = (kernel_mach_header_t *)linkedExecutable->getBytesNoCopy(); + segmentSplitInfo = (struct linkedit_data_command *) getcommandfromheader(mh, LC_SEGMENT_SPLIT_INFO); + + for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { + if (!seg->vmaddr) { + continue; + } + + seg->vmaddr = ml_static_slide(seg->vmaddr); #if KASLR_KEXT_DEBUG - IOLog("kaslr: segname %s unslid 0x%lx slid 0x%lx \n", - seg->segname, - (unsigned long)ml_static_unslide(seg->vmaddr), - (unsigned long)seg->vmaddr); + IOLog("kaslr: segname %s unslid 0x%lx slid 0x%lx \n", + seg->segname, + (unsigned long)ml_static_unslide(seg->vmaddr), + (unsigned long)seg->vmaddr); #endif - - if (!haveRelocBase) { - relocBase = (char *) seg->vmaddr; - haveRelocBase = true; - } - if (!strcmp(seg->segname, "__LINKEDIT")) { - linkeditBase = (char *) seg->vmaddr - seg->fileoff; - haveLinkeditBase = true; - linkeditSeg = seg; - } - for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { - sec->addr = ml_static_slide(sec->addr); + + if (!haveRelocBase) { + relocBase = (char *) seg->vmaddr; + haveRelocBase = true; + } + if (!strcmp(seg->segname, "__LINKEDIT")) { + linkeditBase = (char *) seg->vmaddr - seg->fileoff; + haveLinkeditBase = true; + linkeditSeg = seg; + } + for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { + sec->addr = ml_static_slide(sec->addr); #if KASLR_KEXT_DEBUG - IOLog("kaslr: sectname %s unslid 0x%lx slid 0x%lx \n", - sec->sectname, - (unsigned long)ml_static_unslide(sec->addr), - (unsigned long)sec->addr); + IOLog("kaslr: sectname %s unslid 0x%lx slid 0x%lx \n", + sec->sectname, + (unsigned long)ml_static_unslide(sec->addr), + (unsigned long)sec->addr); #endif - } - } - - dysymtab = (struct dysymtab_command *) getcommandfromheader(mh, LC_DYSYMTAB); - - symtab = (struct symtab_command *) getcommandfromheader(mh, LC_SYMTAB); - - if (symtab != NULL && doCoalesedSlides == false) { - /* Some pseudo-kexts have symbol tables without segments. - * Ignore them. */ - if (symtab->nsyms > 0 && haveLinkeditBase) { - sym = (kernel_nlist_t *) (linkeditBase + symtab->symoff); - for (i = 0; i < symtab->nsyms; i++) { - if (sym[i].n_type & N_STAB) { - continue; - } - sym[i].n_value = ml_static_slide(sym[i].n_value); - + } + } + + dysymtab = (struct dysymtab_command *) getcommandfromheader(mh, LC_DYSYMTAB); + + symtab = (struct symtab_command *) getcommandfromheader(mh, LC_SYMTAB); + + if (symtab != NULL && doCoalesedSlides == false) { + /* Some pseudo-kexts have symbol tables without segments. + * Ignore them. */ + if (symtab->nsyms > 0 && haveLinkeditBase) { + sym = (kernel_nlist_t *) (linkeditBase + symtab->symoff); + for (i = 0; i < symtab->nsyms; i++) { + if (sym[i].n_type & N_STAB) { + continue; + } + sym[i].n_value = ml_static_slide(sym[i].n_value); + #if KASLR_KEXT_DEBUG #define MAX_SYMS_TO_LOG 5 - if ( i < MAX_SYMS_TO_LOG ) { - IOLog("kaslr: LC_SYMTAB unslid 0x%lx slid 0x%lx \n", - (unsigned long)ml_static_unslide(sym[i].n_value), - (unsigned long)sym[i].n_value); - } + if (i < MAX_SYMS_TO_LOG) { + IOLog("kaslr: LC_SYMTAB unslid 0x%lx slid 0x%lx \n", + (unsigned long)ml_static_unslide(sym[i].n_value), + (unsigned long)sym[i].n_value); + } #endif - } - } - } - - if (dysymtab != NULL && doCoalesedSlides == false) { - if (dysymtab->nextrel > 0) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag | - kOSKextLogLinkFlag, - "Sliding kext %s: External relocations found.", - getIdentifierCString()); - goto finish; - } - - if (dysymtab->nlocrel > 0) { - if (!haveLinkeditBase) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag | - kOSKextLogLinkFlag, - "Sliding kext %s: No linkedit segment.", - getIdentifierCString()); - goto finish; - } - - if (!haveRelocBase) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag | - kOSKextLogLinkFlag, + } + } + } + + if (dysymtab != NULL && doCoalesedSlides == false) { + if (dysymtab->nextrel > 0) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag | + kOSKextLogLinkFlag, + "Sliding kext %s: External relocations found.", + getIdentifierCString()); + goto finish; + } + + if (dysymtab->nlocrel > 0) { + if (!haveLinkeditBase) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag | + kOSKextLogLinkFlag, + "Sliding kext %s: No linkedit segment.", + getIdentifierCString()); + goto finish; + } + + if (!haveRelocBase) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag | + kOSKextLogLinkFlag, #if __x86_64__ - "Sliding kext %s: No writable segments.", + "Sliding kext %s: No writable segments.", #else - "Sliding kext %s: No segments.", + "Sliding kext %s: No segments.", #endif - getIdentifierCString()); - goto finish; - } - - reloc = (struct relocation_info *) (linkeditBase + dysymtab->locreloff); - reloc_size = dysymtab->nlocrel * sizeof(struct relocation_info); - - for (i = 0; i < dysymtab->nlocrel; i++) { - if ( reloc[i].r_extern != 0 - || reloc[i].r_type != 0 - || reloc[i].r_length != (sizeof(void *) == 8 ? 3 : 2) - ) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag | - kOSKextLogLinkFlag, - "Sliding kext %s: Unexpected relocation found.", - getIdentifierCString()); - goto finish; - } - if (reloc[i].r_pcrel != 0) { - continue; - } - uintptr_t *relocAddr = (uintptr_t*)(relocBase + reloc[i].r_address); - *relocAddr = ml_static_slide(*relocAddr); + getIdentifierCString()); + goto finish; + } + + reloc = (struct relocation_info *) (linkeditBase + dysymtab->locreloff); + reloc_size = dysymtab->nlocrel * sizeof(struct relocation_info); + + for (i = 0; i < dysymtab->nlocrel; i++) { + if (reloc[i].r_extern != 0 + || reloc[i].r_type != 0 + || reloc[i].r_length != (sizeof(void *) == 8 ? 3 : 2) + ) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag | + kOSKextLogLinkFlag, + "Sliding kext %s: Unexpected relocation found.", + getIdentifierCString()); + goto finish; + } + if (reloc[i].r_pcrel != 0) { + continue; + } + uintptr_t *relocAddr = (uintptr_t*)(relocBase + reloc[i].r_address); + *relocAddr = ml_static_slide(*relocAddr); #if KASLR_KEXT_DEBUG #define MAX_DYSYMS_TO_LOG 5 - if ( i < MAX_DYSYMS_TO_LOG ) { - IOLog("kaslr: LC_DYSYMTAB unslid 0x%lx slid 0x%lx \n", - (unsigned long)ml_static_unslide(*((uintptr_t *)(relocAddr))), - (unsigned long)*((uintptr_t *)(relocBase + reloc[i].r_address))); - } + if (i < MAX_DYSYMS_TO_LOG) { + IOLog("kaslr: LC_DYSYMTAB unslid 0x%lx slid 0x%lx \n", + (unsigned long)ml_static_unslide(*((uintptr_t *)(relocAddr))), + (unsigned long)*((uintptr_t *)(relocBase + reloc[i].r_address))); + } #endif - } - - /* We should free these relocations, not just delete the reference to them. - * Free relocations from PIE kexts. - * - * For now, we do not free LINKEDIT for kexts with split segments. - */ - new_kextsize = round_page(kmod_info->size - reloc_size); - if (((kmod_info->size - new_kextsize) > PAGE_SIZE) && (!segmentSplitInfo)) { - vm_offset_t endofkext = kmod_info->address + kmod_info->size; - vm_offset_t new_endofkext = kmod_info->address + new_kextsize; - vm_offset_t endofrelocInfo = (vm_offset_t) (((uint8_t *)reloc) + reloc_size); - int bytes_remaining = endofkext - endofrelocInfo; - OSData * new_osdata = NULL; - - /* fix up symbol offsets if they are after the dsymtab local relocs */ - if (symtab) { - if (dysymtab->locreloff < symtab->symoff){ - symtab->symoff -= reloc_size; - } - if (dysymtab->locreloff < symtab->stroff) { - symtab->stroff -= reloc_size; - } - } - if (dysymtab->locreloff < dysymtab->extreloff) { - dysymtab->extreloff -= reloc_size; - } - - /* move data behind reloc info down to new offset */ - if (endofrelocInfo < endofkext) { - memcpy(reloc, (void *)endofrelocInfo, bytes_remaining); - } - - /* Create a new OSData for the smaller kext object and reflect - * new linkedit segment size. - */ - linkeditSeg->vmsize = round_page(linkeditSeg->vmsize - reloc_size); - linkeditSeg->filesize = linkeditSeg->vmsize; - - new_osdata = OSData::withBytesNoCopy((void *)kmod_info->address, new_kextsize); - if (new_osdata) { - /* Fix up kmod info and linkedExecutable. - */ - kmod_info->size = new_kextsize; + } + + /* We should free these relocations, not just delete the reference to them. + * Free relocations from PIE kexts. + * + * For now, we do not free LINKEDIT for kexts with split segments. + */ + new_kextsize = round_page(kmod_info->size - reloc_size); + if (((kmod_info->size - new_kextsize) > PAGE_SIZE) && (!segmentSplitInfo)) { + vm_offset_t endofkext = kmod_info->address + kmod_info->size; + vm_offset_t new_endofkext = kmod_info->address + new_kextsize; + vm_offset_t endofrelocInfo = (vm_offset_t) (((uint8_t *)reloc) + reloc_size); + int bytes_remaining = endofkext - endofrelocInfo; + OSData * new_osdata = NULL; + + /* fix up symbol offsets if they are after the dsymtab local relocs */ + if (symtab) { + if (dysymtab->locreloff < symtab->symoff) { + symtab->symoff -= reloc_size; + } + if (dysymtab->locreloff < symtab->stroff) { + symtab->stroff -= reloc_size; + } + } + if (dysymtab->locreloff < dysymtab->extreloff) { + dysymtab->extreloff -= reloc_size; + } + + /* move data behind reloc info down to new offset */ + if (endofrelocInfo < endofkext) { + memcpy(reloc, (void *)endofrelocInfo, bytes_remaining); + } + + /* Create a new OSData for the smaller kext object and reflect + * new linkedit segment size. + */ + linkeditSeg->vmsize = round_page(linkeditSeg->vmsize - reloc_size); + linkeditSeg->filesize = linkeditSeg->vmsize; + + new_osdata = OSData::withBytesNoCopy((void *)kmod_info->address, new_kextsize); + if (new_osdata) { + /* Fix up kmod info and linkedExecutable. + */ + kmod_info->size = new_kextsize; #if VM_MAPPED_KEXTS - new_osdata->setDeallocFunction(osdata_kext_free); + new_osdata->setDeallocFunction(osdata_kext_free); #else - new_osdata->setDeallocFunction(osdata_phys_free); + new_osdata->setDeallocFunction(osdata_phys_free); #endif - linkedExecutable->setDeallocFunction(NULL); - linkedExecutable->release(); - linkedExecutable = new_osdata; - + linkedExecutable->setDeallocFunction(NULL); + linkedExecutable->release(); + linkedExecutable = new_osdata; + #if VM_MAPPED_KEXTS - kext_free(new_endofkext, (endofkext - new_endofkext)); + kext_free(new_endofkext, (endofkext - new_endofkext)); #else - ml_static_mfree(new_endofkext, (endofkext - new_endofkext)); + ml_static_mfree(new_endofkext, (endofkext - new_endofkext)); #endif - } - } - dysymtab->nlocrel = 0; - dysymtab->locreloff = 0; - } - } - - result = kOSReturnSuccess; + } + } + dysymtab->nlocrel = 0; + dysymtab->locreloff = 0; + } + } + + result = kOSReturnSuccess; finish: - return result; + return result; } /********************************************************************* @@ -5334,368 +5328,368 @@ finish: OSReturn OSKext::loadExecutable() { - OSReturn result = kOSReturnError; - kern_return_t kxldResult; - KXLDDependency * kxlddeps = NULL; // must kfree - uint32_t num_kxlddeps = 0; - OSArray * linkDependencies = NULL; // must release - uint32_t numDirectDependencies = 0; - uint32_t num_kmod_refs = 0; - struct mach_header ** kxldHeaderPtr = NULL; // do not free - struct mach_header * kxld_header = NULL; // xxx - need to free here? - OSData * theExecutable = NULL; // do not release - OSString * versString = NULL; // do not release - const char * versCString = NULL; // do not free - const char * string = NULL; // do not free - unsigned int i; - - /* We need the version string for a variety of bits below. - */ - versString = OSDynamicCast(OSString, - getPropertyForHostArch(kCFBundleVersionKey)); - if (!versString) { - goto finish; - } - versCString = versString->getCStringNoCopy(); - - if (isKernelComponent()) { - if (STRING_HAS_PREFIX(versCString, KERNEL_LIB_PREFIX)) { - - if (strncmp(versCString, KERNEL6_VERSION, strlen(KERNEL6_VERSION))) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kernel component %s has incorrect version %s; " - "expected %s.", - getIdentifierCString(), - versCString, KERNEL6_VERSION); - result = kOSKextReturnInternalError; - goto finish; - } else if (strcmp(versCString, osrelease)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kernel component %s has incorrect version %s; " - "expected %s.", - getIdentifierCString(), - versCString, osrelease); - result = kOSKextReturnInternalError; - goto finish; - } - } - } - - if (isPrelinked()) { - goto register_kmod; - } - - /* all callers must be entitled */ - if (FALSE == IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management")) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "Not entitled to link kext '%s'", - getIdentifierCString()); - result = kOSKextReturnNotPrivileged; - goto finish; - } - - theExecutable = getExecutable(); - if (!theExecutable) { - if (declaresExecutable()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Can't load kext %s - executable is missing.", - getIdentifierCString()); - result = kOSKextReturnValidation; - goto finish; - } - goto register_kmod; - } - - if (isInterface()) { - OSData *executableCopy = OSData::withData(theExecutable); - setLinkedExecutable(executableCopy); - executableCopy->release(); - goto register_kmod; - } - - numDirectDependencies = getNumDependencies(); - - if (flags.hasBleedthrough) { - linkDependencies = dependencies; - linkDependencies->retain(); - } else { - linkDependencies = OSArray::withArray(dependencies); - if (!linkDependencies) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogLinkFlag, - "Can't allocate link dependencies to load kext %s.", - getIdentifierCString()); - goto finish; - } - - for (i = 0; i < numDirectDependencies; ++i) { - OSKext * dependencyKext = OSDynamicCast(OSKext, - dependencies->getObject(i)); - dependencyKext->addBleedthroughDependencies(linkDependencies); - } - } - - num_kxlddeps = linkDependencies->getCount(); - if (!num_kxlddeps) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, - "Can't load kext %s - it has no library dependencies.", - getIdentifierCString()); - goto finish; - } - - kxlddeps = (KXLDDependency *)kalloc_tag(num_kxlddeps * sizeof(*kxlddeps), VM_KERN_MEMORY_OSKEXT); - if (!kxlddeps) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogLinkFlag, - "Can't allocate link context to load kext %s.", - getIdentifierCString()); - goto finish; - } - bzero(kxlddeps, num_kxlddeps * sizeof(*kxlddeps)); - - for (i = 0; i < num_kxlddeps; ++i ) { - OSKext * dependency = OSDynamicCast(OSKext, linkDependencies->getObject(i)); - - if (dependency->isInterface()) { - OSKext *interfaceTargetKext = NULL; - OSData * interfaceTarget = NULL; - - if (dependency->isKernelComponent()) { - interfaceTargetKext = sKernelKext; - interfaceTarget = sKernelKext->linkedExecutable; - } else { - interfaceTargetKext = OSDynamicCast(OSKext, - dependency->dependencies->getObject(0)); - - interfaceTarget = interfaceTargetKext->linkedExecutable; - } - - if (!interfaceTarget) { - // panic? - goto finish; - } - - /* The names set here aren't actually logged yet , - * it will be useful to have them in the debugger. - * strdup() failing isn't critical right here so we don't check that. - */ - kxlddeps[i].kext = (u_char *) interfaceTarget->getBytesNoCopy(); - kxlddeps[i].kext_size = interfaceTarget->getLength(); - kxlddeps[i].kext_name = strdup(interfaceTargetKext->getIdentifierCString()); - - kxlddeps[i].interface = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); - kxlddeps[i].interface_size = dependency->linkedExecutable->getLength(); - kxlddeps[i].interface_name = strdup(dependency->getIdentifierCString()); - } else { - kxlddeps[i].kext = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); - kxlddeps[i].kext_size = dependency->linkedExecutable->getLength(); - kxlddeps[i].kext_name = strdup(dependency->getIdentifierCString()); - } - - kxlddeps[i].is_direct_dependency = (i < numDirectDependencies); - } - - kxldHeaderPtr = &kxld_header; + OSReturn result = kOSReturnError; + kern_return_t kxldResult; + KXLDDependency * kxlddeps = NULL;// must kfree + uint32_t num_kxlddeps = 0; + OSArray * linkDependencies = NULL;// must release + uint32_t numDirectDependencies = 0; + uint32_t num_kmod_refs = 0; + struct mach_header ** kxldHeaderPtr = NULL;// do not free + struct mach_header * kxld_header = NULL;// xxx - need to free here? + OSData * theExecutable = NULL;// do not release + OSString * versString = NULL;// do not release + const char * versCString = NULL;// do not free + const char * string = NULL;// do not free + unsigned int i; + + /* We need the version string for a variety of bits below. + */ + versString = OSDynamicCast(OSString, + getPropertyForHostArch(kCFBundleVersionKey)); + if (!versString) { + goto finish; + } + versCString = versString->getCStringNoCopy(); + + if (isKernelComponent()) { + if (STRING_HAS_PREFIX(versCString, KERNEL_LIB_PREFIX)) { + if (strncmp(versCString, KERNEL6_VERSION, strlen(KERNEL6_VERSION))) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kernel component %s has incorrect version %s; " + "expected %s.", + getIdentifierCString(), + versCString, KERNEL6_VERSION); + result = kOSKextReturnInternalError; + goto finish; + } else if (strcmp(versCString, osrelease)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kernel component %s has incorrect version %s; " + "expected %s.", + getIdentifierCString(), + versCString, osrelease); + result = kOSKextReturnInternalError; + goto finish; + } + } + } -#if DEBUG - OSKextLog(this, - kOSKextLogExplicitLevel | - kOSKextLogLoadFlag | kOSKextLogLinkFlag, - "Kext %s - calling kxld_link_file:\n" - " kxld_context: %p\n" - " executable: %p executable_length: %d\n" - " user_data: %p\n" - " kxld_dependencies: %p num_dependencies: %d\n" - " kxld_header_ptr: %p kmod_info_ptr: %p\n", - getIdentifierCString(), sKxldContext, - theExecutable->getBytesNoCopy(), theExecutable->getLength(), - this, kxlddeps, num_kxlddeps, - kxldHeaderPtr, &kmod_info); -#endif + if (isPrelinked()) { + goto register_kmod; + } - /* After this call, the linkedExecutable instance variable - * should exist. - */ - kxldResult = kxld_link_file(sKxldContext, - (u_char *)theExecutable->getBytesNoCopy(), - theExecutable->getLength(), - getIdentifierCString(), this, kxlddeps, num_kxlddeps, - (u_char **)kxldHeaderPtr, (kxld_addr_t *)&kmod_info); - - if (kxldResult != KERN_SUCCESS) { - // xxx - add kxldResult here? - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Can't load kext %s - link failed.", - getIdentifierCString()); - result = kOSKextReturnLinkError; - goto finish; - } - - /* We've written data & instructions into kernel memory, so flush the data - * cache and invalidate the instruction cache. - * I/D caches are coherent on x86 - */ -#if !defined(__i386__) && !defined(__x86_64__) - flush_dcache(kmod_info->address, kmod_info->size, false); - invalidate_icache(kmod_info->address, kmod_info->size, false); -#endif -register_kmod: + /* all callers must be entitled */ + if (FALSE == IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management")) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Not entitled to link kext '%s'", + getIdentifierCString()); + result = kOSKextReturnNotPrivileged; + goto finish; + } - if (isInterface()) { - - /* Whip up a fake kmod_info entry for the interface kext. - */ - kmod_info = (kmod_info_t *)kalloc_tag(sizeof(kmod_info_t), VM_KERN_MEMORY_OSKEXT); - if (!kmod_info) { - result = KERN_MEMORY_ERROR; - goto finish; - } - - /* A pseudokext has almost nothing in its kmod_info struct. - */ - bzero(kmod_info, sizeof(kmod_info_t)); - - kmod_info->info_version = KMOD_INFO_VERSION; - - /* An interface kext doesn't have a linkedExecutable, so save a - * copy of the UUID out of the original executable via copyUUID() - * while we still have the original executable. - */ - interfaceUUID = copyUUID(); - } - - kmod_info->id = loadTag = sNextLoadTag++; - kmod_info->reference_count = 0; // KMOD_DECL... sets it to -1 (invalid). - - /* Stamp the bundle ID and version from the OSKext over anything - * resident inside the kmod_info. - */ - string = getIdentifierCString(); - strlcpy(kmod_info->name, string, sizeof(kmod_info->name)); - - string = versCString; - strlcpy(kmod_info->version, string, sizeof(kmod_info->version)); - - /* Add the dependencies' kmod_info structs as kmod_references. - */ - num_kmod_refs = getNumDependencies(); - if (num_kmod_refs) { - kmod_info->reference_list = (kmod_reference_t *)kalloc_tag( - num_kmod_refs * sizeof(kmod_reference_t), VM_KERN_MEMORY_OSKEXT); - if (!kmod_info->reference_list) { - result = KERN_MEMORY_ERROR; - goto finish; - } - bzero(kmod_info->reference_list, - num_kmod_refs * sizeof(kmod_reference_t)); - for (uint32_t refIndex = 0; refIndex < num_kmod_refs; refIndex++) { - kmod_reference_t * ref = &(kmod_info->reference_list[refIndex]); - OSKext * refKext = OSDynamicCast(OSKext, dependencies->getObject(refIndex)); - ref->info = refKext->kmod_info; - ref->info->reference_count++; - - if (refIndex + 1 < num_kmod_refs) { - ref->next = kmod_info->reference_list + refIndex + 1; - } - } - } - - if (!isInterface() && linkedExecutable) { - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s executable loaded; %u pages at 0x%lx (load tag %u).", - kmod_info->name, - (unsigned)kmod_info->size / PAGE_SIZE, - (unsigned long)ml_static_unslide(kmod_info->address), - (unsigned)kmod_info->id); - } - - /* if prelinked, VM protections are already set */ - result = setVMAttributes(!isPrelinked(), true); - if (result != KERN_SUCCESS) { - goto finish; - } + theExecutable = getExecutable(); + if (!theExecutable) { + if (declaresExecutable()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - executable is missing.", + getIdentifierCString()); + result = kOSKextReturnValidation; + goto finish; + } + goto register_kmod; + } -#if KASAN - if (linkedExecutable) { - kasan_load_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(), - linkedExecutable->getLength(), getIdentifierCString()); - } -#else - if (lookupSection(KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME)) { - OSKextLog(this, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "KASAN: cannot load KASAN-ified kext %s on a non-KASAN kernel\n", - getIdentifierCString() - ); - result = KERN_FAILURE; - goto finish; - } -#endif + if (isInterface()) { + OSData *executableCopy = OSData::withData(theExecutable); + setLinkedExecutable(executableCopy); + executableCopy->release(); + goto register_kmod; + } - result = kOSReturnSuccess; + numDirectDependencies = getNumDependencies(); + + if (flags.hasBleedthrough) { + linkDependencies = dependencies; + linkDependencies->retain(); + } else { + linkDependencies = OSArray::withArray(dependencies); + if (!linkDependencies) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogLinkFlag, + "Can't allocate link dependencies to load kext %s.", + getIdentifierCString()); + goto finish; + } -finish: - OSSafeReleaseNULL(linkDependencies); - - /* Clear up locally allocated dependency info. - */ - for (i = 0; i < num_kxlddeps; ++i ) { - size_t size; - - if (kxlddeps[i].kext_name) { - size = 1 + strlen(kxlddeps[i].kext_name); - kfree(kxlddeps[i].kext_name, size); - } - if (kxlddeps[i].interface_name) { - size = 1 + strlen(kxlddeps[i].interface_name); - kfree(kxlddeps[i].interface_name, size); - } - } - if (kxlddeps) kfree(kxlddeps, (num_kxlddeps * sizeof(*kxlddeps))); - - /* We no longer need the unrelocated executable (which the linker - * has altered anyhow). - */ - setExecutable(NULL); - - if (result != kOSReturnSuccess) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Failed to load executable for kext %s.", - getIdentifierCString()); - - if (kmod_info && kmod_info->reference_list) { - kfree(kmod_info->reference_list, - num_kmod_refs * sizeof(kmod_reference_t)); - } - if (isInterface()) { - kfree(kmod_info, sizeof(kmod_info_t)); - } - kmod_info = NULL; - if (linkedExecutable) { - linkedExecutable->release(); - linkedExecutable = NULL; - } - } - - return result; -} + for (i = 0; i < numDirectDependencies; ++i) { + OSKext * dependencyKext = OSDynamicCast(OSKext, + dependencies->getObject(i)); + dependencyKext->addBleedthroughDependencies(linkDependencies); + } + } + + num_kxlddeps = linkDependencies->getCount(); + if (!num_kxlddeps) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogDependenciesFlag, + "Can't load kext %s - it has no library dependencies.", + getIdentifierCString()); + goto finish; + } + + kxlddeps = (KXLDDependency *)kalloc_tag(num_kxlddeps * sizeof(*kxlddeps), VM_KERN_MEMORY_OSKEXT); + if (!kxlddeps) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogLinkFlag, + "Can't allocate link context to load kext %s.", + getIdentifierCString()); + goto finish; + } + bzero(kxlddeps, num_kxlddeps * sizeof(*kxlddeps)); + + for (i = 0; i < num_kxlddeps; ++i) { + OSKext * dependency = OSDynamicCast(OSKext, linkDependencies->getObject(i)); + + if (dependency->isInterface()) { + OSKext *interfaceTargetKext = NULL; + OSData * interfaceTarget = NULL; + + if (dependency->isKernelComponent()) { + interfaceTargetKext = sKernelKext; + interfaceTarget = sKernelKext->linkedExecutable; + } else { + interfaceTargetKext = OSDynamicCast(OSKext, + dependency->dependencies->getObject(0)); + + interfaceTarget = interfaceTargetKext->linkedExecutable; + } + + if (!interfaceTarget) { + // panic? + goto finish; + } + + /* The names set here aren't actually logged yet , + * it will be useful to have them in the debugger. + * strdup() failing isn't critical right here so we don't check that. + */ + kxlddeps[i].kext = (u_char *) interfaceTarget->getBytesNoCopy(); + kxlddeps[i].kext_size = interfaceTarget->getLength(); + kxlddeps[i].kext_name = strdup(interfaceTargetKext->getIdentifierCString()); + + kxlddeps[i].interface = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); + kxlddeps[i].interface_size = dependency->linkedExecutable->getLength(); + kxlddeps[i].interface_name = strdup(dependency->getIdentifierCString()); + } else { + kxlddeps[i].kext = (u_char *) dependency->linkedExecutable->getBytesNoCopy(); + kxlddeps[i].kext_size = dependency->linkedExecutable->getLength(); + kxlddeps[i].kext_name = strdup(dependency->getIdentifierCString()); + } + + kxlddeps[i].is_direct_dependency = (i < numDirectDependencies); + } + + kxldHeaderPtr = &kxld_header; + +#if DEBUG + OSKextLog(this, + kOSKextLogExplicitLevel | + kOSKextLogLoadFlag | kOSKextLogLinkFlag, + "Kext %s - calling kxld_link_file:\n" + " kxld_context: %p\n" + " executable: %p executable_length: %d\n" + " user_data: %p\n" + " kxld_dependencies: %p num_dependencies: %d\n" + " kxld_header_ptr: %p kmod_info_ptr: %p\n", + getIdentifierCString(), sKxldContext, + theExecutable->getBytesNoCopy(), theExecutable->getLength(), + this, kxlddeps, num_kxlddeps, + kxldHeaderPtr, &kmod_info); +#endif + + /* After this call, the linkedExecutable instance variable + * should exist. + */ + kxldResult = kxld_link_file(sKxldContext, + (u_char *)theExecutable->getBytesNoCopy(), + theExecutable->getLength(), + getIdentifierCString(), this, kxlddeps, num_kxlddeps, + (u_char **)kxldHeaderPtr, (kxld_addr_t *)&kmod_info); + + if (kxldResult != KERN_SUCCESS) { + // xxx - add kxldResult here? + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Can't load kext %s - link failed.", + getIdentifierCString()); + result = kOSKextReturnLinkError; + goto finish; + } + + /* We've written data & instructions into kernel memory, so flush the data + * cache and invalidate the instruction cache. + * I/D caches are coherent on x86 + */ +#if !defined(__i386__) && !defined(__x86_64__) + flush_dcache(kmod_info->address, kmod_info->size, false); + invalidate_icache(kmod_info->address, kmod_info->size, false); +#endif +register_kmod: + + if (isInterface()) { + /* Whip up a fake kmod_info entry for the interface kext. + */ + kmod_info = (kmod_info_t *)kalloc_tag(sizeof(kmod_info_t), VM_KERN_MEMORY_OSKEXT); + if (!kmod_info) { + result = KERN_MEMORY_ERROR; + goto finish; + } + + /* A pseudokext has almost nothing in its kmod_info struct. + */ + bzero(kmod_info, sizeof(kmod_info_t)); + + kmod_info->info_version = KMOD_INFO_VERSION; + + /* An interface kext doesn't have a linkedExecutable, so save a + * copy of the UUID out of the original executable via copyUUID() + * while we still have the original executable. + */ + interfaceUUID = copyUUID(); + } + + kmod_info->id = loadTag = sNextLoadTag++; + kmod_info->reference_count = 0; // KMOD_DECL... sets it to -1 (invalid). + + /* Stamp the bundle ID and version from the OSKext over anything + * resident inside the kmod_info. + */ + string = getIdentifierCString(); + strlcpy(kmod_info->name, string, sizeof(kmod_info->name)); + + string = versCString; + strlcpy(kmod_info->version, string, sizeof(kmod_info->version)); + + /* Add the dependencies' kmod_info structs as kmod_references. + */ + num_kmod_refs = getNumDependencies(); + if (num_kmod_refs) { + kmod_info->reference_list = (kmod_reference_t *)kalloc_tag( + num_kmod_refs * sizeof(kmod_reference_t), VM_KERN_MEMORY_OSKEXT); + if (!kmod_info->reference_list) { + result = KERN_MEMORY_ERROR; + goto finish; + } + bzero(kmod_info->reference_list, + num_kmod_refs * sizeof(kmod_reference_t)); + for (uint32_t refIndex = 0; refIndex < num_kmod_refs; refIndex++) { + kmod_reference_t * ref = &(kmod_info->reference_list[refIndex]); + OSKext * refKext = OSDynamicCast(OSKext, dependencies->getObject(refIndex)); + ref->info = refKext->kmod_info; + ref->info->reference_count++; + + if (refIndex + 1 < num_kmod_refs) { + ref->next = kmod_info->reference_list + refIndex + 1; + } + } + } + + if (!isInterface() && linkedExecutable) { + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s executable loaded; %u pages at 0x%lx (load tag %u).", + kmod_info->name, + (unsigned)kmod_info->size / PAGE_SIZE, + (unsigned long)ml_static_unslide(kmod_info->address), + (unsigned)kmod_info->id); + } + + /* if prelinked, VM protections are already set */ + result = setVMAttributes(!isPrelinked(), true); + if (result != KERN_SUCCESS) { + goto finish; + } + +#if KASAN + if (linkedExecutable) { + kasan_load_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(), + linkedExecutable->getLength(), getIdentifierCString()); + } +#else + if (lookupSection(KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME)) { + OSKextLog(this, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "KASAN: cannot load KASAN-ified kext %s on a non-KASAN kernel\n", + getIdentifierCString() + ); + result = KERN_FAILURE; + goto finish; + } +#endif + + result = kOSReturnSuccess; + +finish: + OSSafeReleaseNULL(linkDependencies); + + /* Clear up locally allocated dependency info. + */ + for (i = 0; i < num_kxlddeps; ++i) { + size_t size; + + if (kxlddeps[i].kext_name) { + size = 1 + strlen(kxlddeps[i].kext_name); + kfree(kxlddeps[i].kext_name, size); + } + if (kxlddeps[i].interface_name) { + size = 1 + strlen(kxlddeps[i].interface_name); + kfree(kxlddeps[i].interface_name, size); + } + } + if (kxlddeps) { + kfree(kxlddeps, (num_kxlddeps * sizeof(*kxlddeps))); + } + + /* We no longer need the unrelocated executable (which the linker + * has altered anyhow). + */ + setExecutable(NULL); + + if (result != kOSReturnSuccess) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to load executable for kext %s.", + getIdentifierCString()); + + if (kmod_info && kmod_info->reference_list) { + kfree(kmod_info->reference_list, + num_kmod_refs * sizeof(kmod_reference_t)); + } + if (isInterface()) { + kfree(kmod_info, sizeof(kmod_info_t)); + } + kmod_info = NULL; + if (linkedExecutable) { + linkedExecutable->release(); + linkedExecutable = NULL; + } + } + + return result; +} /********************************************************************* * The linkedit segment is used by the kext linker for dependency @@ -5706,73 +5700,72 @@ finish: void OSKext::jettisonLinkeditSegment(void) { - kernel_mach_header_t * machhdr = (kernel_mach_header_t *)kmod_info->address; - kernel_segment_command_t * linkedit = NULL; - vm_offset_t start; - vm_size_t linkeditsize, kextsize; - OSData * data = NULL; + kernel_mach_header_t * machhdr = (kernel_mach_header_t *)kmod_info->address; + kernel_segment_command_t * linkedit = NULL; + vm_offset_t start; + vm_size_t linkeditsize, kextsize; + OSData * data = NULL; #if NO_KEXTD - /* We can free symbol tables for all embedded kexts because we don't - * support runtime kext linking. - */ - if (sKeepSymbols || !isExecutable() || !linkedExecutable || flags.jettisonLinkeditSeg) { + /* We can free symbol tables for all embedded kexts because we don't + * support runtime kext linking. + */ + if (sKeepSymbols || !isExecutable() || !linkedExecutable || flags.jettisonLinkeditSeg) { #else - if (sKeepSymbols || isLibrary() || !isExecutable() || !linkedExecutable || flags.jettisonLinkeditSeg) { + if (sKeepSymbols || isLibrary() || !isExecutable() || !linkedExecutable || flags.jettisonLinkeditSeg) { #endif - goto finish; - } - - /* Find the linkedit segment. If it's not the last segment, then freeing - * it will fragment the kext into multiple VM regions, which OSKext is not - * designed to handle, so we'll have to skip it. - */ - linkedit = getsegbynamefromheader(machhdr, SEG_LINKEDIT); - if (!linkedit) { - goto finish; - } - - if (round_page(kmod_info->address + kmod_info->size) != - round_page(linkedit->vmaddr + linkedit->vmsize)) - { - goto finish; - } - - /* Create a new OSData for the smaller kext object. - */ - linkeditsize = round_page(linkedit->vmsize); - kextsize = kmod_info->size - linkeditsize; - start = linkedit->vmaddr; - - data = OSData::withBytesNoCopy((void *)kmod_info->address, kextsize); - if (!data) { - goto finish; - } - - /* Fix the kmod info and linkedExecutable. - */ - kmod_info->size = kextsize; - + goto finish; + } + + /* Find the linkedit segment. If it's not the last segment, then freeing + * it will fragment the kext into multiple VM regions, which OSKext is not + * designed to handle, so we'll have to skip it. + */ + linkedit = getsegbynamefromheader(machhdr, SEG_LINKEDIT); + if (!linkedit) { + goto finish; + } + + if (round_page(kmod_info->address + kmod_info->size) != + round_page(linkedit->vmaddr + linkedit->vmsize)) { + goto finish; + } + + /* Create a new OSData for the smaller kext object. + */ + linkeditsize = round_page(linkedit->vmsize); + kextsize = kmod_info->size - linkeditsize; + start = linkedit->vmaddr; + + data = OSData::withBytesNoCopy((void *)kmod_info->address, kextsize); + if (!data) { + goto finish; + } + + /* Fix the kmod info and linkedExecutable. + */ + kmod_info->size = kextsize; + #if VM_MAPPED_KEXTS - data->setDeallocFunction(osdata_kext_free); + data->setDeallocFunction(osdata_kext_free); #else - data->setDeallocFunction(osdata_phys_free); + data->setDeallocFunction(osdata_phys_free); #endif - linkedExecutable->setDeallocFunction(NULL); - linkedExecutable->release(); - linkedExecutable = data; - flags.jettisonLinkeditSeg = 1; - - /* Free the linkedit segment. - */ + linkedExecutable->setDeallocFunction(NULL); + linkedExecutable->release(); + linkedExecutable = data; + flags.jettisonLinkeditSeg = 1; + + /* Free the linkedit segment. + */ #if VM_MAPPED_KEXTS - kext_free(start, linkeditsize); + kext_free(start, linkeditsize); #else - ml_static_mfree(start, linkeditsize); + ml_static_mfree(start, linkeditsize); #endif finish: - return; + return; } /********************************************************************* @@ -5783,52 +5776,54 @@ finish: void OSKext::jettisonDATASegmentPadding(void) { - kernel_mach_header_t * mh; - kernel_segment_command_t * dataSeg; - kernel_section_t * sec, * lastSec; - vm_offset_t dataSegEnd, lastSecEnd; - vm_size_t padSize; + kernel_mach_header_t * mh; + kernel_segment_command_t * dataSeg; + kernel_section_t * sec, * lastSec; + vm_offset_t dataSegEnd, lastSecEnd; + vm_size_t padSize; - if (flags.builtin) return; - mh = (kernel_mach_header_t *)kmod_info->address; + if (flags.builtin) { + return; + } + mh = (kernel_mach_header_t *)kmod_info->address; - dataSeg = getsegbynamefromheader(mh, SEG_DATA); - if (dataSeg == NULL) { - return; - } + dataSeg = getsegbynamefromheader(mh, SEG_DATA); + if (dataSeg == NULL) { + return; + } - lastSec = NULL; - sec = firstsect(dataSeg); - while (sec != NULL) { - lastSec = sec; - sec = nextsect(dataSeg, sec); - } + lastSec = NULL; + sec = firstsect(dataSeg); + while (sec != NULL) { + lastSec = sec; + sec = nextsect(dataSeg, sec); + } - if (lastSec == NULL) { - return; - } + if (lastSec == NULL) { + return; + } - if ((dataSeg->vmaddr != round_page(dataSeg->vmaddr)) || - (dataSeg->vmsize != round_page(dataSeg->vmsize))) { - return; - } + if ((dataSeg->vmaddr != round_page(dataSeg->vmaddr)) || + (dataSeg->vmsize != round_page(dataSeg->vmsize))) { + return; + } - dataSegEnd = dataSeg->vmaddr + dataSeg->vmsize; - lastSecEnd = round_page(lastSec->addr + lastSec->size); + dataSegEnd = dataSeg->vmaddr + dataSeg->vmsize; + lastSecEnd = round_page(lastSec->addr + lastSec->size); - if (dataSegEnd <= lastSecEnd) { - return; - } + if (dataSegEnd <= lastSecEnd) { + return; + } - padSize = dataSegEnd - lastSecEnd; + padSize = dataSegEnd - lastSecEnd; - if (padSize >= PAGE_SIZE) { + if (padSize >= PAGE_SIZE) { #if VM_MAPPED_KEXTS - kext_free(lastSecEnd, padSize); + kext_free(lastSecEnd, padSize); #else - ml_static_mfree(lastSecEnd, padSize); + ml_static_mfree(lastSecEnd, padSize); #endif - } + } } /********************************************************************* @@ -5836,14 +5831,14 @@ OSKext::jettisonDATASegmentPadding(void) void OSKext::setLinkedExecutable(OSData * anExecutable) { - if (linkedExecutable) { - panic("Attempt to set linked executable on kext " - "that already has one (%s).\n", - getIdentifierCString()); - } - linkedExecutable = anExecutable; - linkedExecutable->retain(); - return; + if (linkedExecutable) { + panic("Attempt to set linked executable on kext " + "that already has one (%s).\n", + getIdentifierCString()); + } + linkedExecutable = anExecutable; + linkedExecutable->retain(); + return; } #if CONFIG_DTRACE @@ -5855,30 +5850,30 @@ OSKext::setLinkedExecutable(OSData * anExecutable) void OSKext::registerKextsWithDTrace(void) { - uint32_t count = sLoadedKexts->getCount(); - uint32_t i; + uint32_t count = sLoadedKexts->getCount(); + uint32_t i; - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); - for (i = 0; i < count; i++) { - OSKext * thisKext = NULL; // do not release + for (i = 0; i < count; i++) { + OSKext * thisKext = NULL;// do not release - thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (!thisKext || !thisKext->isExecutable()) { - continue; - } + thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (!thisKext || !thisKext->isExecutable()) { + continue; + } - thisKext->registerWithDTrace(); - } + thisKext->registerWithDTrace(); + } - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - return; + return; } extern "C" { - extern int (*dtrace_modload)(struct kmod_info *, uint32_t); - extern int (*dtrace_modunload)(struct kmod_info *); +extern int (*dtrace_modload)(struct kmod_info *, uint32_t); +extern int (*dtrace_modunload)(struct kmod_info *); }; /********************************************************************* @@ -5886,38 +5881,38 @@ extern "C" { void OSKext::registerWithDTrace(void) { - /* Register kext with dtrace. A dtrace_modload failure should not - * prevent a kext from loading, so we ignore the return code. - */ - if (!flags.dtraceInitialized && (dtrace_modload != NULL)) { - uint32_t modflag = 0; - OSObject * forceInit = getPropertyForHostArch("OSBundleForceDTraceInit"); - if (forceInit == kOSBooleanTrue) { - modflag |= KMOD_DTRACE_FORCE_INIT; - } - if (flags.builtin) { - modflag |= KMOD_DTRACE_STATIC_KEXT; - } + /* Register kext with dtrace. A dtrace_modload failure should not + * prevent a kext from loading, so we ignore the return code. + */ + if (!flags.dtraceInitialized && (dtrace_modload != NULL)) { + uint32_t modflag = 0; + OSObject * forceInit = getPropertyForHostArch("OSBundleForceDTraceInit"); + if (forceInit == kOSBooleanTrue) { + modflag |= KMOD_DTRACE_FORCE_INIT; + } + if (flags.builtin) { + modflag |= KMOD_DTRACE_STATIC_KEXT; + } - (void)(*dtrace_modload)(kmod_info, modflag); - flags.dtraceInitialized = true; - jettisonLinkeditSegment(); - } - return; + (void)(*dtrace_modload)(kmod_info, modflag); + flags.dtraceInitialized = true; + jettisonLinkeditSegment(); + } + return; } /********************************************************************* *********************************************************************/ void OSKext::unregisterWithDTrace(void) { - /* Unregister kext with dtrace. A dtrace_modunload failure should not - * prevent a kext from loading, so we ignore the return code. - */ - if (flags.dtraceInitialized && (dtrace_modunload != NULL)) { - (void)(*dtrace_modunload)(kmod_info); - flags.dtraceInitialized = false; - } - return; + /* Unregister kext with dtrace. A dtrace_modunload failure should not + * prevent a kext from loading, so we ignore the return code. + */ + if (flags.dtraceInitialized && (dtrace_modunload != NULL)) { + (void)(*dtrace_modunload)(kmod_info); + flags.dtraceInitialized = false; + } + return; } #endif /* CONFIG_DTRACE */ @@ -5929,59 +5924,60 @@ OSKext::unregisterWithDTrace(void) #if defined(__arm__) || defined(__arm64__) static inline kern_return_t OSKext_protect( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t new_prot, - boolean_t set_max) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t new_prot, + boolean_t set_max) { #pragma unused(map) - assert(map == kernel_map); // we can handle KEXTs arising from the PRELINK segment and no others - assert(start <= end); - if (start >= end) - return KERN_SUCCESS; // Punt segments of length zero (e.g., headers) or less (i.e., blunders) - else if (set_max) - return KERN_SUCCESS; // Punt set_max, as there's no mechanism to record that state - else - return ml_static_protect(start, end - start, new_prot); + assert(map == kernel_map); // we can handle KEXTs arising from the PRELINK segment and no others + assert(start <= end); + if (start >= end) { + return KERN_SUCCESS; // Punt segments of length zero (e.g., headers) or less (i.e., blunders) + } else if (set_max) { + return KERN_SUCCESS; // Punt set_max, as there's no mechanism to record that state + } else { + return ml_static_protect(start, end - start, new_prot); + } } static inline kern_return_t OSKext_wire( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t access_type, - boolean_t user_wire) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + boolean_t user_wire) { #pragma unused(map,start,end,access_type,user_wire) return KERN_SUCCESS; // No-op as PRELINK kexts are cemented into physical memory at boot } #else -#error Unrecognized architecture +#error Unrecognized architecture #endif #else static inline kern_return_t OSKext_protect( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t new_prot, - boolean_t set_max) -{ - if (start == end) { // 10538581 - return(KERN_SUCCESS); - } - return vm_map_protect(map, start, end, new_prot, set_max); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t new_prot, + boolean_t set_max) +{ + if (start == end) { // 10538581 + return KERN_SUCCESS; + } + return vm_map_protect(map, start, end, new_prot, set_max); } static inline kern_return_t OSKext_wire( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t access_type, - boolean_t user_wire) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + boolean_t user_wire) { return vm_map_wire_kernel(map, start, end, access_type, VM_KERN_MEMORY_KEXT, user_wire); } @@ -5990,103 +5986,102 @@ OSKext_wire( OSReturn OSKext::setVMAttributes(bool protect, bool wire) { - vm_map_t kext_map = NULL; - kernel_segment_command_t * seg = NULL; - vm_map_offset_t start = 0; - vm_map_offset_t end = 0; - OSReturn result = kOSReturnError; + vm_map_t kext_map = NULL; + kernel_segment_command_t * seg = NULL; + vm_map_offset_t start = 0; + vm_map_offset_t end = 0; + OSReturn result = kOSReturnError; - if (isInterface() || !declaresExecutable() || flags.builtin) { - result = kOSReturnSuccess; - goto finish; - } + if (isInterface() || !declaresExecutable() || flags.builtin) { + result = kOSReturnSuccess; + goto finish; + } - /* Get the kext's vm map */ - kext_map = kext_get_vm_map(kmod_info); - if (!kext_map) { - result = KERN_MEMORY_ERROR; - goto finish; - } + /* Get the kext's vm map */ + kext_map = kext_get_vm_map(kmod_info); + if (!kext_map) { + result = KERN_MEMORY_ERROR; + goto finish; + } #if !VM_MAPPED_KEXTS - if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) { - /* This is a split kext in a prelinked kernelcache; we'll let the - * platform code take care of protecting it. It is already wired. - */ - /* TODO: Should this still allow protections for the first segment - * to go through, in the event that we have a mix of split and - * unsplit kexts? - */ - result = KERN_SUCCESS; - goto finish; - } + if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) { + /* This is a split kext in a prelinked kernelcache; we'll let the + * platform code take care of protecting it. It is already wired. + */ + /* TODO: Should this still allow protections for the first segment + * to go through, in the event that we have a mix of split and + * unsplit kexts? + */ + result = KERN_SUCCESS; + goto finish; + } #endif - /* Protect the headers as read-only; they do not need to be wired */ - result = (protect) ? OSKext_protect(kext_map, kmod_info->address, - kmod_info->address + kmod_info->hdr_size, VM_PROT_READ, TRUE) - : KERN_SUCCESS; - if (result != KERN_SUCCESS) { - goto finish; - } - - /* Set the VM protections and wire down each of the segments */ - seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); - while (seg) { + /* Protect the headers as read-only; they do not need to be wired */ + result = (protect) ? OSKext_protect(kext_map, kmod_info->address, + kmod_info->address + kmod_info->hdr_size, VM_PROT_READ, TRUE) + : KERN_SUCCESS; + if (result != KERN_SUCCESS) { + goto finish; + } + /* Set the VM protections and wire down each of the segments */ + seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); + while (seg) { #if __arm__ - /* We build all ARM kexts, so we can ensure they are aligned */ - assert((seg->vmaddr & PAGE_MASK) == 0); - assert((seg->vmsize & PAGE_MASK) == 0); + /* We build all ARM kexts, so we can ensure they are aligned */ + assert((seg->vmaddr & PAGE_MASK) == 0); + assert((seg->vmsize & PAGE_MASK) == 0); #endif - start = round_page(seg->vmaddr); - end = trunc_page(seg->vmaddr + seg->vmsize); - - if (protect) { - result = OSKext_protect(kext_map, start, end, seg->maxprot, TRUE); - if (result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s failed to set maximum VM protections " - "for segment %s - 0x%x.", - getIdentifierCString(), seg->segname, (int)result); - goto finish; - } - - result = OSKext_protect(kext_map, start, end, seg->initprot, FALSE); - if (result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s failed to set initial VM protections " - "for segment %s - 0x%x.", - getIdentifierCString(), seg->segname, (int)result); - goto finish; - } - } - - if (segmentShouldBeWired(seg) && wire) { - result = OSKext_wire(kext_map, start, end, seg->initprot, FALSE); - if (result != KERN_SUCCESS) { - goto finish; - } - } - - seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); - } + start = round_page(seg->vmaddr); + end = trunc_page(seg->vmaddr + seg->vmsize); + + if (protect) { + result = OSKext_protect(kext_map, start, end, seg->maxprot, TRUE); + if (result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s failed to set maximum VM protections " + "for segment %s - 0x%x.", + getIdentifierCString(), seg->segname, (int)result); + goto finish; + } + + result = OSKext_protect(kext_map, start, end, seg->initprot, FALSE); + if (result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s failed to set initial VM protections " + "for segment %s - 0x%x.", + getIdentifierCString(), seg->segname, (int)result); + goto finish; + } + } + + if (segmentShouldBeWired(seg) && wire) { + result = OSKext_wire(kext_map, start, end, seg->initprot, FALSE); + if (result != KERN_SUCCESS) { + goto finish; + } + } + + seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); + } finish: - return result; + return result; } /********************************************************************* *********************************************************************/ -boolean_t +boolean_t OSKext::segmentShouldBeWired(kernel_segment_command_t *seg) { - return (sKeepSymbols || strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname))); + return sKeepSymbols || strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)); } /********************************************************************* @@ -6094,157 +6089,156 @@ OSKext::segmentShouldBeWired(kernel_segment_command_t *seg) OSReturn OSKext::validateKextMapping(bool startFlag) { - OSReturn result = kOSReturnError; - const char * whichOp = startFlag ? "start" : "stop"; - kern_return_t kern_result = 0; - vm_map_t kext_map = NULL; - kernel_segment_command_t * seg = NULL; - mach_vm_address_t address = 0; - mach_vm_size_t size = 0; - uint32_t depth = 0; - mach_msg_type_number_t count; - vm_region_submap_short_info_data_64_t info; - - if (flags.builtin) return (kOSReturnSuccess); - - count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; - bzero(&info, sizeof(info)); - - // xxx - do we need a distinct OSReturn value for these or is "bad data" - // xxx - sufficient? - - /* Verify that the kmod_info and start/stop pointers are non-NULL. - */ - if (!kmod_info) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - NULL kmod_info pointer.", - getIdentifierCString()); - result = kOSKextReturnBadData; - goto finish; - } - - if (startFlag) { - address = (mach_vm_address_t)kmod_info->start; - } else { - address = (mach_vm_address_t)kmod_info->stop; - } - - if (!address) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - NULL module %s pointer.", - getIdentifierCString(), whichOp); - result = kOSKextReturnBadData; - goto finish; - } - - kext_map = kext_get_vm_map(kmod_info); - depth = (kernel_map == kext_map) ? 1 : 2; - - /* Verify that the start/stop function lies within the kext's address range. - */ - if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) { - /* This will likely be how we deal with split kexts; walk the segments to - * check that the function lies inside one of the segments of this kext. - */ - for (seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); - seg != NULL; - seg = nextsegfromheader((kernel_mach_header_t *)kmod_info->address, seg)) { - if ((address >= seg->vmaddr) && address < (seg->vmaddr + seg->vmsize)) { - break; - } - } - - if (!seg) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s module %s pointer is outside of kext range " - "(%s %p - kext starts at %p).", - getIdentifierCString(), - whichOp, - whichOp, - (void *)ml_static_unslide(address), - (void *)ml_static_unslide(kmod_info->address)); - result = kOSKextReturnBadData; - goto finish; - } - - seg = NULL; - } else { - if (address < kmod_info->address + kmod_info->hdr_size || - kmod_info->address + kmod_info->size <= address) - { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s module %s pointer is outside of kext range " - "(%s %p - kext at %p-%p).", - getIdentifierCString(), - whichOp, - whichOp, - (void *)ml_static_unslide(address), - (void *)ml_static_unslide(kmod_info->address), - (void *)(ml_static_unslide(kmod_info->address) + kmod_info->size)); - result = kOSKextReturnBadData; - goto finish; - } - } - - /* Only do these checks before calling the start function; - * If anything goes wrong with the mapping while the kext is running, - * we'll likely have panicked well before any attempt to stop the kext. - */ - if (startFlag) { - - /* Verify that the start/stop function is executable. - */ - kern_result = mach_vm_region_recurse(kernel_map, &address, &size, &depth, - (vm_region_recurse_info_t)&info, &count); - if (kern_result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - bad %s pointer %p.", - getIdentifierCString(), - whichOp, (void *)ml_static_unslide(address)); - result = kOSKextReturnBadData; - goto finish; - } + OSReturn result = kOSReturnError; + const char * whichOp = startFlag ? "start" : "stop"; + kern_return_t kern_result = 0; + vm_map_t kext_map = NULL; + kernel_segment_command_t * seg = NULL; + mach_vm_address_t address = 0; + mach_vm_size_t size = 0; + uint32_t depth = 0; + mach_msg_type_number_t count; + vm_region_submap_short_info_data_64_t info; + + if (flags.builtin) { + return kOSReturnSuccess; + } + + count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; + bzero(&info, sizeof(info)); + + // xxx - do we need a distinct OSReturn value for these or is "bad data" + // xxx - sufficient? + + /* Verify that the kmod_info and start/stop pointers are non-NULL. + */ + if (!kmod_info) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - NULL kmod_info pointer.", + getIdentifierCString()); + result = kOSKextReturnBadData; + goto finish; + } + + if (startFlag) { + address = (mach_vm_address_t)kmod_info->start; + } else { + address = (mach_vm_address_t)kmod_info->stop; + } + + if (!address) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - NULL module %s pointer.", + getIdentifierCString(), whichOp); + result = kOSKextReturnBadData; + goto finish; + } + + kext_map = kext_get_vm_map(kmod_info); + depth = (kernel_map == kext_map) ? 1 : 2; + + /* Verify that the start/stop function lies within the kext's address range. + */ + if (getcommandfromheader((kernel_mach_header_t *)kmod_info->address, LC_SEGMENT_SPLIT_INFO)) { + /* This will likely be how we deal with split kexts; walk the segments to + * check that the function lies inside one of the segments of this kext. + */ + for (seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); + seg != NULL; + seg = nextsegfromheader((kernel_mach_header_t *)kmod_info->address, seg)) { + if ((address >= seg->vmaddr) && address < (seg->vmaddr + seg->vmsize)) { + break; + } + } + + if (!seg) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s module %s pointer is outside of kext range " + "(%s %p - kext starts at %p).", + getIdentifierCString(), + whichOp, + whichOp, + (void *)ml_static_unslide(address), + (void *)ml_static_unslide(kmod_info->address)); + result = kOSKextReturnBadData; + goto finish; + } + + seg = NULL; + } else { + if (address < kmod_info->address + kmod_info->hdr_size || + kmod_info->address + kmod_info->size <= address) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s module %s pointer is outside of kext range " + "(%s %p - kext at %p-%p).", + getIdentifierCString(), + whichOp, + whichOp, + (void *)ml_static_unslide(address), + (void *)ml_static_unslide(kmod_info->address), + (void *)(ml_static_unslide(kmod_info->address) + kmod_info->size)); + result = kOSKextReturnBadData; + goto finish; + } + } + + /* Only do these checks before calling the start function; + * If anything goes wrong with the mapping while the kext is running, + * we'll likely have panicked well before any attempt to stop the kext. + */ + if (startFlag) { + /* Verify that the start/stop function is executable. + */ + kern_result = mach_vm_region_recurse(kernel_map, &address, &size, &depth, + (vm_region_recurse_info_t)&info, &count); + if (kern_result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - bad %s pointer %p.", + getIdentifierCString(), + whichOp, (void *)ml_static_unslide(address)); + result = kOSKextReturnBadData; + goto finish; + } #if VM_MAPPED_KEXTS - if (!(info.protection & VM_PROT_EXECUTE)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - memory region containing module %s function " - "is not executable.", - getIdentifierCString(), whichOp); - result = kOSKextReturnBadData; - goto finish; - } + if (!(info.protection & VM_PROT_EXECUTE)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - memory region containing module %s function " + "is not executable.", + getIdentifierCString(), whichOp); + result = kOSKextReturnBadData; + goto finish; + } #endif - /* Verify that the kext's segments are backed by physical memory. - */ - seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); - while (seg) { - if (!verifySegmentMapping(seg)) { - result = kOSKextReturnBadData; - goto finish; - } - - seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); - } + /* Verify that the kext's segments are backed by physical memory. + */ + seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); + while (seg) { + if (!verifySegmentMapping(seg)) { + result = kOSKextReturnBadData; + goto finish; + } - } + seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); + } + } - result = kOSReturnSuccess; + result = kOSReturnSuccess; finish: - return result; + return result; } /********************************************************************* @@ -6252,26 +6246,27 @@ finish: boolean_t OSKext::verifySegmentMapping(kernel_segment_command_t *seg) { - mach_vm_address_t address = 0; + mach_vm_address_t address = 0; - if (!segmentShouldBeWired(seg)) return true; + if (!segmentShouldBeWired(seg)) { + return true; + } - for (address = seg->vmaddr; - address < round_page(seg->vmaddr + seg->vmsize); - address += PAGE_SIZE) - { - if (!pmap_find_phys(kernel_pmap, (vm_offset_t)address)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - page %p is not backed by physical memory.", - getIdentifierCString(), - (void *)address); - return false; - } - } + for (address = seg->vmaddr; + address < round_page(seg->vmaddr + seg->vmsize); + address += PAGE_SIZE) { + if (!pmap_find_phys(kernel_pmap, (vm_offset_t)address)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - page %p is not backed by physical memory.", + getIdentifierCString(), + (void *)address); + return false; + } + } - return true; + return true; } /********************************************************************* @@ -6279,28 +6274,27 @@ OSKext::verifySegmentMapping(kernel_segment_command_t *seg) static void OSKextLogKextInfo(OSKext *aKext, uint64_t address, uint64_t size, firehose_tracepoint_code_t code) { + uint64_t stamp = 0; + firehose_tracepoint_id_u trace_id; + struct firehose_trace_uuid_info_s uuid_info_s; + firehose_trace_uuid_info_t uuid_info = &uuid_info_s; + size_t uuid_info_len = sizeof(struct firehose_trace_uuid_info_s); + OSData *uuid_data; - uint64_t stamp = 0; - firehose_tracepoint_id_u trace_id; - struct firehose_trace_uuid_info_s uuid_info_s; - firehose_trace_uuid_info_t uuid_info = &uuid_info_s; - size_t uuid_info_len = sizeof(struct firehose_trace_uuid_info_s); - OSData *uuid_data; - - stamp = firehose_tracepoint_time(firehose_activity_flags_default); - trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_metadata, _firehose_tracepoint_type_metadata_kext, (firehose_tracepoint_flags_t)0, code); + stamp = firehose_tracepoint_time(firehose_activity_flags_default); + trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_metadata, _firehose_tracepoint_type_metadata_kext, (firehose_tracepoint_flags_t)0, code); - uuid_data = aKext->copyTextUUID(); - if (uuid_data) { - memcpy(uuid_info->ftui_uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_info->ftui_uuid)); - OSSafeReleaseNULL(uuid_data); - } + uuid_data = aKext->copyTextUUID(); + if (uuid_data) { + memcpy(uuid_info->ftui_uuid, uuid_data->getBytesNoCopy(), sizeof(uuid_info->ftui_uuid)); + OSSafeReleaseNULL(uuid_data); + } - uuid_info->ftui_size = size; - uuid_info->ftui_address = ml_static_unslide(address); + uuid_info->ftui_size = size; + uuid_info->ftui_address = ml_static_unslide(address); - firehose_trace_metadata(firehose_stream_metadata, trace_id, stamp, uuid_info, uuid_info_len); - return; + firehose_trace_metadata(firehose_stream_metadata, trace_id, stamp, uuid_info, uuid_info_len); + return; } /********************************************************************* @@ -6308,157 +6302,158 @@ OSKextLogKextInfo(OSKext *aKext, uint64_t address, uint64_t size, firehose_trace OSReturn OSKext::start(bool startDependenciesFlag) { - OSReturn result = kOSReturnError; - kern_return_t (* startfunc)(kmod_info_t *, void *); - unsigned int i, count; - void * kmodStartData = NULL; - - if (isStarted() || isInterface() || isKernelComponent()) { - result = kOSReturnSuccess; - goto finish; - } - - if (!isLoaded()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Attempt to start nonloaded kext %s.", - getIdentifierCString()); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - if (!sLoadEnabled) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext loading is disabled (attempt to start kext %s).", - getIdentifierCString()); - result = kOSKextReturnDisabled; - goto finish; - } - - result = validateKextMapping(/* start? */ true); - if (result != kOSReturnSuccess) { - goto finish; - } - - startfunc = kmod_info->start; - - count = getNumDependencies(); - for (i = 0; i < count; i++) { - OSKext * dependency = OSDynamicCast(OSKext, dependencies->getObject(i)); - if (dependency == NULL) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s start - internal error, dependency disappeared.", - getIdentifierCString()); - goto finish; - } - if (!dependency->isStarted()) { - if (startDependenciesFlag) { - OSReturn dependencyResult = - dependency->start(startDependenciesFlag); - if (dependencyResult != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s start - dependency %s failed to start (error 0x%x).", - getIdentifierCString(), - dependency->getIdentifierCString(), - dependencyResult); - goto finish; - } - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Not starting %s - dependency %s not started yet.", - getIdentifierCString(), - dependency->getIdentifierCString()); - result = kOSKextReturnStartStopError; // xxx - make new return? - goto finish; - } - } - } - - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogLoadFlag, - "Kext %s calling module start function.", - getIdentifierCString()); - - flags.starting = 1; - - // Drop a log message so logd can grab the needed information to decode this kext - OSKextLogKextInfo(this, kmod_info->address, kmod_info->size, firehose_tracepoint_code_load); - result = OSRuntimeInitializeCPP(this); - if (result == KERN_SUCCESS) { - result = startfunc(kmod_info, kmodStartData); - } - - flags.starting = 0; - - /* On success overlap the setting of started/starting. On failure just - * clear starting. - */ - if (result == KERN_SUCCESS) { - flags.started = 1; - - // xxx - log start error from kernel? - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s is now started.", - getIdentifierCString()); - } else { - invokeOrCancelRequestCallbacks( - /* result not actually used */ kOSKextReturnStartStopError, - /* invokeFlag */ false); - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s did not start (return code 0x%x).", - getIdentifierCString(), result); - } + OSReturn result = kOSReturnError; + kern_return_t (* startfunc)(kmod_info_t *, void *); + unsigned int i, count; + void * kmodStartData = NULL; + + if (isStarted() || isInterface() || isKernelComponent()) { + result = kOSReturnSuccess; + goto finish; + } + + if (!isLoaded()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Attempt to start nonloaded kext %s.", + getIdentifierCString()); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + if (!sLoadEnabled) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext loading is disabled (attempt to start kext %s).", + getIdentifierCString()); + result = kOSKextReturnDisabled; + goto finish; + } + + result = validateKextMapping(/* start? */ true); + if (result != kOSReturnSuccess) { + goto finish; + } + + startfunc = kmod_info->start; + + count = getNumDependencies(); + for (i = 0; i < count; i++) { + OSKext * dependency = OSDynamicCast(OSKext, dependencies->getObject(i)); + if (dependency == NULL) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s start - internal error, dependency disappeared.", + getIdentifierCString()); + goto finish; + } + if (!dependency->isStarted()) { + if (startDependenciesFlag) { + OSReturn dependencyResult = + dependency->start(startDependenciesFlag); + if (dependencyResult != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s start - dependency %s failed to start (error 0x%x).", + getIdentifierCString(), + dependency->getIdentifierCString(), + dependencyResult); + goto finish; + } + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Not starting %s - dependency %s not started yet.", + getIdentifierCString(), + dependency->getIdentifierCString()); + result = kOSKextReturnStartStopError; // xxx - make new return? + goto finish; + } + } + } + + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogLoadFlag, + "Kext %s calling module start function.", + getIdentifierCString()); + + flags.starting = 1; + + // Drop a log message so logd can grab the needed information to decode this kext + OSKextLogKextInfo(this, kmod_info->address, kmod_info->size, firehose_tracepoint_code_load); + result = OSRuntimeInitializeCPP(this); + if (result == KERN_SUCCESS) { + result = startfunc(kmod_info, kmodStartData); + } + + flags.starting = 0; + + /* On success overlap the setting of started/starting. On failure just + * clear starting. + */ + if (result == KERN_SUCCESS) { + flags.started = 1; + + // xxx - log start error from kernel? + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s is now started.", + getIdentifierCString()); + } else { + invokeOrCancelRequestCallbacks( + /* result not actually used */ kOSKextReturnStartStopError, + /* invokeFlag */ false); + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s did not start (return code 0x%x).", + getIdentifierCString(), result); + } finish: - return result; + return result; } /********************************************************************* *********************************************************************/ /* static */ -bool OSKext::canUnloadKextWithIdentifier( - OSString * kextIdentifier, - bool checkClassesFlag) +bool +OSKext::canUnloadKextWithIdentifier( + OSString * kextIdentifier, + bool checkClassesFlag) { - bool result = false; - OSKext * aKext = NULL; // do not release + bool result = false; + OSKext * aKext = NULL;// do not release - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); - aKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); + aKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); - if (!aKext) { - goto finish; // can't unload what's not loaded - } + if (!aKext) { + goto finish; // can't unload what's not loaded + } - if (aKext->isLoaded()) { - if (aKext->getRetainCount() > kOSKextMinLoadedRetainCount) { - goto finish; - } - if (checkClassesFlag && aKext->hasOSMetaClassInstances()) { - goto finish; - } - } + if (aKext->isLoaded()) { + if (aKext->getRetainCount() > kOSKextMinLoadedRetainCount) { + goto finish; + } + if (checkClassesFlag && aKext->hasOSMetaClassInstances()) { + goto finish; + } + } - result = true; + result = true; finish: - IORecursiveLockUnlock(sKextLock); - return result; + IORecursiveLockUnlock(sKextLock); + return result; } /********************************************************************* @@ -6466,97 +6461,96 @@ finish: OSReturn OSKext::stop(void) { - OSReturn result = kOSReturnError; - kern_return_t (*stopfunc)(kmod_info_t *, void *); - - if (!isStarted() || isInterface()) { - result = kOSReturnSuccess; - goto finish; - } - - if (!isLoaded()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Attempt to stop nonloaded kext %s.", - getIdentifierCString()); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - /* Refuse to stop if we have clients or instances. It is up to - * the caller to make sure those aren't true. - */ - if (getRetainCount() > kOSKextMinLoadedRetainCount) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - C++ instances; can't stop.", - getIdentifierCString()); - result = kOSKextReturnInUse; - goto finish; - } - - if (getRetainCount() > kOSKextMinLoadedRetainCount) { - - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s - has references (linkage or tracking object); " - "can't stop.", - getIdentifierCString()); - result = kOSKextReturnInUse; - goto finish; - } - - /* Note: If validateKextMapping fails on the stop & unload path, - * we are in serious trouble and a kernel panic is likely whether - * we stop & unload the kext or not. - */ - result = validateKextMapping(/* start? */ false); - if (result != kOSReturnSuccess) { - goto finish; - } - - stopfunc = kmod_info->stop; - if (stopfunc) { - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogLoadFlag, - "Kext %s calling module stop function.", - getIdentifierCString()); - - flags.stopping = 1; - - result = stopfunc(kmod_info, /* userData */ NULL); - if (result == KERN_SUCCESS) { - result = OSRuntimeFinalizeCPP(this); - } - - flags.stopping = 0; - - if (result == KERN_SUCCESS) { - flags.started = 0; - - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogLoadFlag, - "Kext %s is now stopped and ready to unload.", - getIdentifierCString()); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s did not stop (return code 0x%x).", - getIdentifierCString(), result); - result = kOSKextReturnStartStopError; - } - } + OSReturn result = kOSReturnError; + kern_return_t (*stopfunc)(kmod_info_t *, void *); + + if (!isStarted() || isInterface()) { + result = kOSReturnSuccess; + goto finish; + } + + if (!isLoaded()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Attempt to stop nonloaded kext %s.", + getIdentifierCString()); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + /* Refuse to stop if we have clients or instances. It is up to + * the caller to make sure those aren't true. + */ + if (getRetainCount() > kOSKextMinLoadedRetainCount) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - C++ instances; can't stop.", + getIdentifierCString()); + result = kOSKextReturnInUse; + goto finish; + } + + if (getRetainCount() > kOSKextMinLoadedRetainCount) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s - has references (linkage or tracking object); " + "can't stop.", + getIdentifierCString()); + result = kOSKextReturnInUse; + goto finish; + } + + /* Note: If validateKextMapping fails on the stop & unload path, + * we are in serious trouble and a kernel panic is likely whether + * we stop & unload the kext or not. + */ + result = validateKextMapping(/* start? */ false); + if (result != kOSReturnSuccess) { + goto finish; + } + + stopfunc = kmod_info->stop; + if (stopfunc) { + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogLoadFlag, + "Kext %s calling module stop function.", + getIdentifierCString()); + + flags.stopping = 1; + + result = stopfunc(kmod_info, /* userData */ NULL); + if (result == KERN_SUCCESS) { + result = OSRuntimeFinalizeCPP(this); + } + + flags.stopping = 0; + + if (result == KERN_SUCCESS) { + flags.started = 0; + + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogLoadFlag, + "Kext %s is now stopped and ready to unload.", + getIdentifierCString()); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s did not stop (return code 0x%x).", + getIdentifierCString(), result); + result = kOSKextReturnStartStopError; + } + } finish: - // Drop a log message so logd can update this kext's metadata - OSKextLogKextInfo(this, kmod_info->address, kmod_info->size, firehose_tracepoint_code_unload); - return result; + // Drop a log message so logd can update this kext's metadata + OSKextLogKextInfo(this, kmod_info->address, kmod_info->size, firehose_tracepoint_code_unload); + return result; } /********************************************************************* @@ -6564,267 +6558,270 @@ finish: OSReturn OSKext::unload(void) { - OSReturn result = kOSReturnError; - unsigned int index; - uint32_t num_kmod_refs = 0; - OSKextAccount * freeAccount; - - if (!sUnloadEnabled) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext unloading is disabled (%s).", - this->getIdentifierCString()); - - result = kOSKextReturnDisabled; - goto finish; - } - - /* Refuse to unload if we have clients or instances. It is up to - * the caller to make sure those aren't true. - */ - if (getRetainCount() > kOSKextMinLoadedRetainCount) { - // xxx - Don't log under errors? this is more of an info thing - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogKextBookkeepingFlag, - "Can't unload kext %s; outstanding references (linkage or tracking object).", - getIdentifierCString()); - result = kOSKextReturnInUse; - goto finish; - } - - if (!isLoaded()) { - result = kOSReturnSuccess; - goto finish; - } - - if (isKernelComponent()) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - - if (metaClasses && !OSMetaClass::removeClasses(metaClasses)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, - "Can't unload kext %s; classes have instances:", - getIdentifierCString()); - reportOSMetaClassInstances(kOSKextLogErrorLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag); - result = kOSKextReturnInUse; - goto finish; - } - - /* Note that the kext is unloading before running any code that - * might be in the kext (request callbacks, module stop function). - * We will deny certain requests made against a kext in the process - * of unloading. - */ - flags.unloading = 1; - - /* Update the string describing the last kext to unload in case we panic. - */ - savePanicString(/* isLoading */ false); - - if (isStarted()) { - result = stop(); - if (result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s can't unload - module stop returned 0x%x.", - getIdentifierCString(), (unsigned)result); - result = kOSKextReturnStartStopError; - goto finish; - } - } - - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s unloading.", - getIdentifierCString()); - - { - struct list_head *p; - struct list_head *prev; - struct list_head *next; - for (p = pendingPgoHead.next; p != &pendingPgoHead; p = next) { - OSKextGrabPgoStruct *s = container_of(p, OSKextGrabPgoStruct, list_head); - s->err = OSKextGrabPgoDataLocked(this, s->metadata, instance_uuid, s->pSize, s->pBuffer, s->bufferSize); - prev = p->prev; - next = p->next; - prev->next = next; - next->prev = prev; - p->prev = p; - p->next = p; - IORecursiveLockWakeup(sKextLock, s, false); - } - } - - - /* Even if we don't call the stop function, we want to be sure we - * have no OSMetaClass references before unloading the kext executable - * from memory. OSMetaClasses may have pointers into the kext executable - * and that would cause a panic on OSKext::free() when metaClasses is freed. - */ - if (metaClasses) { - metaClasses->flushCollection(); - } - (void) OSRuntimeFinalizeCPP(this); - - /* Remove the kext from the list of loaded kexts, patch the gap - * in the kmod_info_t linked list, and reset "kmod" to point to the - * last loaded kext that isn't the fake kernel kext (sKernelKext). - */ - index = sLoadedKexts->getNextIndexOfObject(this, 0); - if (index != (unsigned int)-1) { - - sLoadedKexts->removeObject(index); - - OSKext * nextKext = OSDynamicCast(OSKext, - sLoadedKexts->getObject(index)); - - if (nextKext) { - if (index > 0) { - OSKext * gapKext = OSDynamicCast(OSKext, - sLoadedKexts->getObject(index - 1)); - - nextKext->kmod_info->next = gapKext->kmod_info; - - } else /* index == 0 */ { - nextKext->kmod_info->next = NULL; - } - } - - OSKext * lastKext = OSDynamicCast(OSKext, sLoadedKexts->getLastObject()); - if (lastKext && !lastKext->isKernel()) { - kmod = lastKext->kmod_info; - } else { - kmod = NULL; // clear the global kmod variable - } - } - - /* Clear out the kmod references that we're keeping for compatibility - * with current panic backtrace code & kgmacros. - * xxx - will want to update those bits sometime and remove this. - */ - num_kmod_refs = getNumDependencies(); - if (num_kmod_refs && kmod_info && kmod_info->reference_list) { - for (uint32_t refIndex = 0; refIndex < num_kmod_refs; refIndex++) { - kmod_reference_t * ref = &(kmod_info->reference_list[refIndex]); - ref->info->reference_count--; - } - kfree(kmod_info->reference_list, - num_kmod_refs * sizeof(kmod_reference_t)); - } + OSReturn result = kOSReturnError; + unsigned int index; + uint32_t num_kmod_refs = 0; + OSKextAccount * freeAccount; + + if (!sUnloadEnabled) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext unloading is disabled (%s).", + this->getIdentifierCString()); + + result = kOSKextReturnDisabled; + goto finish; + } + + /* Refuse to unload if we have clients or instances. It is up to + * the caller to make sure those aren't true. + */ + if (getRetainCount() > kOSKextMinLoadedRetainCount) { + // xxx - Don't log under errors? this is more of an info thing + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogKextBookkeepingFlag, + "Can't unload kext %s; outstanding references (linkage or tracking object).", + getIdentifierCString()); + result = kOSKextReturnInUse; + goto finish; + } + + if (!isLoaded()) { + result = kOSReturnSuccess; + goto finish; + } + + if (isKernelComponent()) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + + if (metaClasses && !OSMetaClass::removeClasses(metaClasses)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Can't unload kext %s; classes have instances:", + getIdentifierCString()); + reportOSMetaClassInstances(kOSKextLogErrorLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag); + result = kOSKextReturnInUse; + goto finish; + } + + /* Note that the kext is unloading before running any code that + * might be in the kext (request callbacks, module stop function). + * We will deny certain requests made against a kext in the process + * of unloading. + */ + flags.unloading = 1; + + /* Update the string describing the last kext to unload in case we panic. + */ + savePanicString(/* isLoading */ false); + + if (isStarted()) { + result = stop(); + if (result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s can't unload - module stop returned 0x%x.", + getIdentifierCString(), (unsigned)result); + result = kOSKextReturnStartStopError; + goto finish; + } + } + + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s unloading.", + getIdentifierCString()); + + { + struct list_head *p; + struct list_head *prev; + struct list_head *next; + for (p = pendingPgoHead.next; p != &pendingPgoHead; p = next) { + OSKextGrabPgoStruct *s = container_of(p, OSKextGrabPgoStruct, list_head); + s->err = OSKextGrabPgoDataLocked(this, s->metadata, instance_uuid, s->pSize, s->pBuffer, s->bufferSize); + prev = p->prev; + next = p->next; + prev->next = next; + next->prev = prev; + p->prev = p; + p->next = p; + IORecursiveLockWakeup(sKextLock, s, false); + } + } + + + /* Even if we don't call the stop function, we want to be sure we + * have no OSMetaClass references before unloading the kext executable + * from memory. OSMetaClasses may have pointers into the kext executable + * and that would cause a panic on OSKext::free() when metaClasses is freed. + */ + if (metaClasses) { + metaClasses->flushCollection(); + } + (void) OSRuntimeFinalizeCPP(this); + + /* Remove the kext from the list of loaded kexts, patch the gap + * in the kmod_info_t linked list, and reset "kmod" to point to the + * last loaded kext that isn't the fake kernel kext (sKernelKext). + */ + index = sLoadedKexts->getNextIndexOfObject(this, 0); + if (index != (unsigned int)-1) { + sLoadedKexts->removeObject(index); + + OSKext * nextKext = OSDynamicCast(OSKext, + sLoadedKexts->getObject(index)); + + if (nextKext) { + if (index > 0) { + OSKext * gapKext = OSDynamicCast(OSKext, + sLoadedKexts->getObject(index - 1)); + + nextKext->kmod_info->next = gapKext->kmod_info; + } else { /* index == 0 */ + nextKext->kmod_info->next = NULL; + } + } + + OSKext * lastKext = OSDynamicCast(OSKext, sLoadedKexts->getLastObject()); + if (lastKext && !lastKext->isKernel()) { + kmod = lastKext->kmod_info; + } else { + kmod = NULL; // clear the global kmod variable + } + } + + /* Clear out the kmod references that we're keeping for compatibility + * with current panic backtrace code & kgmacros. + * xxx - will want to update those bits sometime and remove this. + */ + num_kmod_refs = getNumDependencies(); + if (num_kmod_refs && kmod_info && kmod_info->reference_list) { + for (uint32_t refIndex = 0; refIndex < num_kmod_refs; refIndex++) { + kmod_reference_t * ref = &(kmod_info->reference_list[refIndex]); + ref->info->reference_count--; + } + kfree(kmod_info->reference_list, + num_kmod_refs * sizeof(kmod_reference_t)); + } #if CONFIG_DTRACE - unregisterWithDTrace(); + unregisterWithDTrace(); #endif /* CONFIG_DTRACE */ - notifyKextUnloadObservers(this); + notifyKextUnloadObservers(this); - freeAccount = NULL; - IOSimpleLockLock(sKextAccountsLock); - account->kext = NULL; - if (account->site.tag) account->site.flags |= VM_TAG_UNLOAD; - else freeAccount = account; - IOSimpleLockUnlock(sKextAccountsLock); - if (freeAccount) IODelete(freeAccount, OSKextAccount, 1); + freeAccount = NULL; + IOSimpleLockLock(sKextAccountsLock); + account->kext = NULL; + if (account->site.tag) { + account->site.flags |= VM_TAG_UNLOAD; + } else { + freeAccount = account; + } + IOSimpleLockUnlock(sKextAccountsLock); + if (freeAccount) { + IODelete(freeAccount, OSKextAccount, 1); + } - /* Unwire and free the linked executable. - */ - if (linkedExecutable) { + /* Unwire and free the linked executable. + */ + if (linkedExecutable) { #if KASAN - kasan_unload_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(), linkedExecutable->getLength()); + kasan_unload_kext((vm_offset_t)linkedExecutable->getBytesNoCopy(), linkedExecutable->getLength()); #endif #if VM_MAPPED_KEXTS - if (!isInterface()) { - kernel_segment_command_t *seg = NULL; - vm_map_t kext_map = kext_get_vm_map(kmod_info); - - if (!kext_map) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Failed to free kext %s; couldn't find the kext map.", - getIdentifierCString()); - result = kOSKextReturnInternalError; - goto finish; - } - - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s unwiring and unmapping linked executable.", - getIdentifierCString()); - - seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); - while (seg) { - if (segmentShouldBeWired(seg)) { - result = vm_map_unwire(kext_map, seg->vmaddr, - seg->vmaddr + seg->vmsize, FALSE); - if (result != KERN_SUCCESS) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Failed to unwire kext %s.", - getIdentifierCString()); - result = kOSKextReturnInternalError; - goto finish; - } - } - - seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); - } - } + if (!isInterface()) { + kernel_segment_command_t *seg = NULL; + vm_map_t kext_map = kext_get_vm_map(kmod_info); + + if (!kext_map) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to free kext %s; couldn't find the kext map.", + getIdentifierCString()); + result = kOSKextReturnInternalError; + goto finish; + } + + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s unwiring and unmapping linked executable.", + getIdentifierCString()); + + seg = firstsegfromheader((kernel_mach_header_t *)kmod_info->address); + while (seg) { + if (segmentShouldBeWired(seg)) { + result = vm_map_unwire(kext_map, seg->vmaddr, + seg->vmaddr + seg->vmsize, FALSE); + if (result != KERN_SUCCESS) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to unwire kext %s.", + getIdentifierCString()); + result = kOSKextReturnInternalError; + goto finish; + } + } + + seg = nextsegfromheader((kernel_mach_header_t *) kmod_info->address, seg); + } + } #endif - OSSafeReleaseNULL(linkedExecutable); - } - - /* An interface kext has a fake kmod_info that was allocated, - * so we have to free it. - */ - if (isInterface()) { - kfree(kmod_info, sizeof(kmod_info_t)); - } - - kmod_info = NULL; - - flags.loaded = false; - flushDependencies(); - - /* save a copy of the bundle ID for us to check when deciding to - * rebuild the kernel cache file. If a kext was already in the kernel - * cache and unloaded then later loaded we do not need to rebuild the - * kernel cache. 9055303 - */ - if (isPrelinked()) { - if (!_OSKextInUnloadedPrelinkedKexts(bundleID)) { - IORecursiveLockLock(sKextLock); - if (sUnloadedPrelinkedKexts) { - sUnloadedPrelinkedKexts->setObject(bundleID); - } - IORecursiveLockUnlock(sKextLock); - } - } - - OSKextLog(this, - kOSKextLogProgressLevel | kOSKextLogLoadFlag, - "Kext %s unloaded.", getIdentifierCString()); - - queueKextNotification(kKextRequestPredicateUnloadNotification, - OSDynamicCast(OSString, bundleID)); + OSSafeReleaseNULL(linkedExecutable); + } + + /* An interface kext has a fake kmod_info that was allocated, + * so we have to free it. + */ + if (isInterface()) { + kfree(kmod_info, sizeof(kmod_info_t)); + } + + kmod_info = NULL; + + flags.loaded = false; + flushDependencies(); + + /* save a copy of the bundle ID for us to check when deciding to + * rebuild the kernel cache file. If a kext was already in the kernel + * cache and unloaded then later loaded we do not need to rebuild the + * kernel cache. 9055303 + */ + if (isPrelinked()) { + if (!_OSKextInUnloadedPrelinkedKexts(bundleID)) { + IORecursiveLockLock(sKextLock); + if (sUnloadedPrelinkedKexts) { + sUnloadedPrelinkedKexts->setObject(bundleID); + } + IORecursiveLockUnlock(sKextLock); + } + } + + OSKextLog(this, + kOSKextLogProgressLevel | kOSKextLogLoadFlag, + "Kext %s unloaded.", getIdentifierCString()); + + queueKextNotification(kKextRequestPredicateUnloadNotification, + OSDynamicCast(OSString, bundleID)); finish: - OSKext::saveLoadedKextPanicList(); - OSKext::updateLoadedKextSummaries(); + OSKext::saveLoadedKextPanicList(); + OSKext::updateLoadedKextSummaries(); - flags.unloading = 0; - return result; + flags.unloading = 0; + return result; } /********************************************************************* @@ -6833,88 +6830,87 @@ finish: /* static */ OSReturn OSKext::queueKextNotification( - const char * notificationName, - OSString * kextIdentifier) -{ - OSReturn result = kOSReturnError; - OSDictionary * loadRequest = NULL; // must release - - if (!kextIdentifier) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - - /* Create a new request unless one is already sitting - * in sKernelRequests for this bundle identifier - */ - result = _OSKextCreateRequest(notificationName, &loadRequest); - if (result != kOSReturnSuccess) { - goto finish; - } - if (!_OSKextSetRequestArgument(loadRequest, - kKextRequestArgumentBundleIdentifierKey, kextIdentifier)) { - - result = kOSKextReturnNoMemory; - goto finish; - } - if (!sKernelRequests->setObject(loadRequest)) { - result = kOSKextReturnNoMemory; - goto finish; - } - - /* We might want to only queue the notification if kextd is active, - * but that wouldn't work for embedded. Note that we don't care if - * the ping immediately succeeds here so don't do anything with the - * result of this call. - */ - OSKext::pingKextd(); - - result = kOSReturnSuccess; + const char * notificationName, + OSString * kextIdentifier) +{ + OSReturn result = kOSReturnError; + OSDictionary * loadRequest = NULL;// must release + + if (!kextIdentifier) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + + /* Create a new request unless one is already sitting + * in sKernelRequests for this bundle identifier + */ + result = _OSKextCreateRequest(notificationName, &loadRequest); + if (result != kOSReturnSuccess) { + goto finish; + } + if (!_OSKextSetRequestArgument(loadRequest, + kKextRequestArgumentBundleIdentifierKey, kextIdentifier)) { + result = kOSKextReturnNoMemory; + goto finish; + } + if (!sKernelRequests->setObject(loadRequest)) { + result = kOSKextReturnNoMemory; + goto finish; + } + + /* We might want to only queue the notification if kextd is active, + * but that wouldn't work for embedded. Note that we don't care if + * the ping immediately succeeds here so don't do anything with the + * result of this call. + */ + OSKext::pingKextd(); + + result = kOSReturnSuccess; finish: - OSSafeReleaseNULL(loadRequest); + OSSafeReleaseNULL(loadRequest); - return result; + return result; } /********************************************************************* *********************************************************************/ static void _OSKextConsiderDestroyingLinkContext( - __unused thread_call_param_t p0, - __unused thread_call_param_t p1) -{ - /* Take multiple locks in the correct order. - */ - IORecursiveLockLock(sKextLock); - IORecursiveLockLock(sKextInnerLock); - - /* The first time we destroy the kxldContext is in the first - * OSKext::considerUnloads() call, which sets sConsiderUnloadsCalled - * before calling this function. Thereafter any call to this function - * will actually destroy the context. - */ - if (sConsiderUnloadsCalled && sKxldContext) { - kxld_destroy_context(sKxldContext); - sKxldContext = NULL; - } - - /* Free the thread_call that was allocated to execute this function. - */ - if (sDestroyLinkContextThread) { - if (!thread_call_free(sDestroyLinkContextThread)) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "thread_call_free() failed for kext link context."); - } - sDestroyLinkContextThread = 0; - } - - IORecursiveLockUnlock(sKextInnerLock); - IORecursiveLockUnlock(sKextLock); - - return; + __unused thread_call_param_t p0, + __unused thread_call_param_t p1) +{ + /* Take multiple locks in the correct order. + */ + IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextInnerLock); + + /* The first time we destroy the kxldContext is in the first + * OSKext::considerUnloads() call, which sets sConsiderUnloadsCalled + * before calling this function. Thereafter any call to this function + * will actually destroy the context. + */ + if (sConsiderUnloadsCalled && sKxldContext) { + kxld_destroy_context(sKxldContext); + sKxldContext = NULL; + } + + /* Free the thread_call that was allocated to execute this function. + */ + if (sDestroyLinkContextThread) { + if (!thread_call_free(sDestroyLinkContextThread)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "thread_call_free() failed for kext link context."); + } + sDestroyLinkContextThread = 0; + } + + IORecursiveLockUnlock(sKextInnerLock); + IORecursiveLockUnlock(sKextLock); + + return; } /********************************************************************* @@ -6930,32 +6926,32 @@ _OSKextConsiderDestroyingLinkContext( void OSKext::considerDestroyingLinkContext(void) { - IORecursiveLockLock(sKextInnerLock); - - /* If we have already queued a thread to destroy the link context, - * don't bother resetting; that thread will take care of it. - */ - if (sDestroyLinkContextThread) { - goto finish; - } - - /* The function to be invoked in the thread will deallocate - * this thread_call, so don't share it around. - */ - sDestroyLinkContextThread = thread_call_allocate( - &_OSKextConsiderDestroyingLinkContext, 0); - if (!sDestroyLinkContextThread) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag | kOSKextLogLinkFlag, - "Can't create thread to destroy kext link context."); - goto finish; - } - - thread_call_enter(sDestroyLinkContextThread); + IORecursiveLockLock(sKextInnerLock); + + /* If we have already queued a thread to destroy the link context, + * don't bother resetting; that thread will take care of it. + */ + if (sDestroyLinkContextThread) { + goto finish; + } + + /* The function to be invoked in the thread will deallocate + * this thread_call, so don't share it around. + */ + sDestroyLinkContextThread = thread_call_allocate( + &_OSKextConsiderDestroyingLinkContext, 0); + if (!sDestroyLinkContextThread) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag | kOSKextLogLinkFlag, + "Can't create thread to destroy kext link context."); + goto finish; + } + + thread_call_enter(sDestroyLinkContextThread); finish: - IORecursiveLockUnlock(sKextInnerLock); - return; + IORecursiveLockUnlock(sKextInnerLock); + return; } #if PRAGMA_MARK @@ -6969,211 +6965,210 @@ finish: OSReturn OSKext::autounloadKext(OSKext * aKext) { - OSReturn result = kOSKextReturnInUse; - - /* Check for external references to this kext (usu. dependents), - * instances of defined classes (or classes derived from them), - * outstanding requests. - */ - if ((aKext->getRetainCount() > kOSKextMinLoadedRetainCount) || - !aKext->flags.autounloadEnabled || - aKext->isKernelComponent()) { - - goto finish; - } - - /* Skip a delay-autounload kext, once. - */ - if (aKext->flags.delayAutounload) { - OSKextLog(aKext, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, - "Kext %s has delayed autounload set; skipping and clearing flag.", - aKext->getIdentifierCString()); - aKext->flags.delayAutounload = 0; - goto finish; - } - - if (aKext->hasOSMetaClassInstances() || - aKext->countRequestCallbacks()) { - goto finish; - } - - result = OSKext::removeKext(aKext); + OSReturn result = kOSKextReturnInUse; + + /* Check for external references to this kext (usu. dependents), + * instances of defined classes (or classes derived from them), + * outstanding requests. + */ + if ((aKext->getRetainCount() > kOSKextMinLoadedRetainCount) || + !aKext->flags.autounloadEnabled || + aKext->isKernelComponent()) { + goto finish; + } + + /* Skip a delay-autounload kext, once. + */ + if (aKext->flags.delayAutounload) { + OSKextLog(aKext, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag, + "Kext %s has delayed autounload set; skipping and clearing flag.", + aKext->getIdentifierCString()); + aKext->flags.delayAutounload = 0; + goto finish; + } + + if (aKext->hasOSMetaClassInstances() || + aKext->countRequestCallbacks()) { + goto finish; + } + + result = OSKext::removeKext(aKext); + +finish: + return result; +} -finish: - return result; -} - /********************************************************************* *********************************************************************/ void _OSKextConsiderUnloads( - __unused thread_call_param_t p0, - __unused thread_call_param_t p1) -{ - bool didUnload = false; - unsigned int count, i; - - /* Take multiple locks in the correct order - * (note also sKextSummaries lock further down). - */ - IORecursiveLockLock(sKextLock); - IORecursiveLockLock(sKextInnerLock); - - OSKext::flushNonloadedKexts(/* flushPrelinkedKexts */ true); - - /* If the system is powering down, don't try to unload anything. - */ - if (sSystemSleep) { - goto finish; - } - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | kOSKextLogLoadFlag, - "Checking for unused kexts to autounload."); - - /***** - * Remove any request callbacks marked as stale, - * and mark as stale any currently in flight. - */ - count = sRequestCallbackRecords->getCount(); - if (count) { - i = count - 1; - do { - OSDictionary * callbackRecord = OSDynamicCast(OSDictionary, - sRequestCallbackRecords->getObject(i)); - OSBoolean * stale = OSDynamicCast(OSBoolean, - callbackRecord->getObject(kKextRequestStaleKey)); - - if (stale == kOSBooleanTrue) { - OSKext::invokeRequestCallback(callbackRecord, - kOSKextReturnTimeout); - } else { - callbackRecord->setObject(kKextRequestStaleKey, - kOSBooleanTrue); - } - } while (i--); - } - - /***** - * Make multiple passes through the array of loaded kexts until - * we don't unload any. This handles unwinding of dependency - * chains. We have to go *backwards* through the array because - * kexts are removed from it when unloaded, and we cannot make - * a copy or we'll mess up the retain counts we rely on to - * check whether a kext will unload. If only we could have - * nonretaining collections like CF has.... - */ - do { - didUnload = false; - - count = sLoadedKexts->getCount(); - if (count) { - i = count - 1; - do { - OSKext * thisKext = OSDynamicCast(OSKext, - sLoadedKexts->getObject(i)); - didUnload |= (kOSReturnSuccess == OSKext::autounloadKext(thisKext)); - } while (i--); - } - } while (didUnload); + __unused thread_call_param_t p0, + __unused thread_call_param_t p1) +{ + bool didUnload = false; + unsigned int count, i; + + /* Take multiple locks in the correct order + * (note also sKextSummaries lock further down). + */ + IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextInnerLock); + + OSKext::flushNonloadedKexts(/* flushPrelinkedKexts */ true); + + /* If the system is powering down, don't try to unload anything. + */ + if (sSystemSleep) { + goto finish; + } + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | kOSKextLogLoadFlag, + "Checking for unused kexts to autounload."); + + /***** + * Remove any request callbacks marked as stale, + * and mark as stale any currently in flight. + */ + count = sRequestCallbackRecords->getCount(); + if (count) { + i = count - 1; + do { + OSDictionary * callbackRecord = OSDynamicCast(OSDictionary, + sRequestCallbackRecords->getObject(i)); + OSBoolean * stale = OSDynamicCast(OSBoolean, + callbackRecord->getObject(kKextRequestStaleKey)); + + if (stale == kOSBooleanTrue) { + OSKext::invokeRequestCallback(callbackRecord, + kOSKextReturnTimeout); + } else { + callbackRecord->setObject(kKextRequestStaleKey, + kOSBooleanTrue); + } + } while (i--); + } + + /***** + * Make multiple passes through the array of loaded kexts until + * we don't unload any. This handles unwinding of dependency + * chains. We have to go *backwards* through the array because + * kexts are removed from it when unloaded, and we cannot make + * a copy or we'll mess up the retain counts we rely on to + * check whether a kext will unload. If only we could have + * nonretaining collections like CF has.... + */ + do { + didUnload = false; + + count = sLoadedKexts->getCount(); + if (count) { + i = count - 1; + do { + OSKext * thisKext = OSDynamicCast(OSKext, + sLoadedKexts->getObject(i)); + didUnload |= (kOSReturnSuccess == OSKext::autounloadKext(thisKext)); + } while (i--); + } + } while (didUnload); finish: - sConsiderUnloadsPending = false; - sConsiderUnloadsExecuted = true; + sConsiderUnloadsPending = false; + sConsiderUnloadsExecuted = true; + + (void) OSKext::considerRebuildOfPrelinkedKernel(); - (void) OSKext::considerRebuildOfPrelinkedKernel(); - - IORecursiveLockUnlock(sKextInnerLock); - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextInnerLock); + IORecursiveLockUnlock(sKextLock); - return; + return; } /********************************************************************* * Do not call any function that takes sKextLock here! *********************************************************************/ -void OSKext::considerUnloads(Boolean rescheduleOnlyFlag) +void +OSKext::considerUnloads(Boolean rescheduleOnlyFlag) { - AbsoluteTime when; - - IORecursiveLockLock(sKextInnerLock); + AbsoluteTime when; - if (!sUnloadCallout) { - sUnloadCallout = thread_call_allocate(&_OSKextConsiderUnloads, 0); - } + IORecursiveLockLock(sKextInnerLock); - /* we only reset delay value for unloading if we already have something - * pending. rescheduleOnlyFlag should not start the count down. - */ - if (rescheduleOnlyFlag && !sConsiderUnloadsPending) { - goto finish; - } - - thread_call_cancel(sUnloadCallout); - if (OSKext::getAutounloadEnabled() && !sSystemSleep) { - clock_interval_to_deadline(sConsiderUnloadDelay, - 1000 * 1000 * 1000, &when); + if (!sUnloadCallout) { + sUnloadCallout = thread_call_allocate(&_OSKextConsiderUnloads, 0); + } - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "%scheduling %sscan for unused kexts in %lu seconds.", - sConsiderUnloadsPending ? "Res" : "S", - sConsiderUnloadsCalled ? "" : "initial ", - (unsigned long)sConsiderUnloadDelay); + /* we only reset delay value for unloading if we already have something + * pending. rescheduleOnlyFlag should not start the count down. + */ + if (rescheduleOnlyFlag && !sConsiderUnloadsPending) { + goto finish; + } - sConsiderUnloadsPending = true; - thread_call_enter_delayed(sUnloadCallout, when); - } + thread_call_cancel(sUnloadCallout); + if (OSKext::getAutounloadEnabled() && !sSystemSleep) { + clock_interval_to_deadline(sConsiderUnloadDelay, + 1000 * 1000 * 1000, &when); + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "%scheduling %sscan for unused kexts in %lu seconds.", + sConsiderUnloadsPending ? "Res" : "S", + sConsiderUnloadsCalled ? "" : "initial ", + (unsigned long)sConsiderUnloadDelay); + + sConsiderUnloadsPending = true; + thread_call_enter_delayed(sUnloadCallout, when); + } finish: - /* The kxld context should be reused throughout boot. We mark the end of - * period as the first time considerUnloads() is called, and we destroy - * the first kxld context in that function. Afterwards, it will be - * destroyed in flushNonloadedKexts. - */ - if (!sConsiderUnloadsCalled) { - sConsiderUnloadsCalled = true; - OSKext::considerDestroyingLinkContext(); - } + /* The kxld context should be reused throughout boot. We mark the end of + * period as the first time considerUnloads() is called, and we destroy + * the first kxld context in that function. Afterwards, it will be + * destroyed in flushNonloadedKexts. + */ + if (!sConsiderUnloadsCalled) { + sConsiderUnloadsCalled = true; + OSKext::considerDestroyingLinkContext(); + } - IORecursiveLockUnlock(sKextInnerLock); - return; + IORecursiveLockUnlock(sKextInnerLock); + return; } /********************************************************************* * Do not call any function that takes sKextLock here! *********************************************************************/ extern "C" { - IOReturn OSKextSystemSleepOrWake(UInt32 messageType); -IOReturn OSKextSystemSleepOrWake(UInt32 messageType) -{ - IORecursiveLockLock(sKextInnerLock); - - /* If the system is going to sleep, cancel the reaper thread timer, - * and note that we're in a sleep state in case it just fired but hasn't - * taken the lock yet. If we are coming back from sleep, just - * clear the sleep flag; IOService's normal operation will cause - * unloads to be considered soon enough. - */ - if (messageType == kIOMessageSystemWillSleep) { - if (sUnloadCallout) { - thread_call_cancel(sUnloadCallout); - } - sSystemSleep = true; - AbsoluteTime_to_scalar(&sLastWakeTime) = 0; - } else if (messageType == kIOMessageSystemHasPoweredOn) { - sSystemSleep = false; - clock_get_uptime(&sLastWakeTime); - } - IORecursiveLockUnlock(sKextInnerLock); - - return kIOReturnSuccess; -} +IOReturn +OSKextSystemSleepOrWake(UInt32 messageType) +{ + IORecursiveLockLock(sKextInnerLock); + + /* If the system is going to sleep, cancel the reaper thread timer, + * and note that we're in a sleep state in case it just fired but hasn't + * taken the lock yet. If we are coming back from sleep, just + * clear the sleep flag; IOService's normal operation will cause + * unloads to be considered soon enough. + */ + if (messageType == kIOMessageSystemWillSleep) { + if (sUnloadCallout) { + thread_call_cancel(sUnloadCallout); + } + sSystemSleep = true; + AbsoluteTime_to_scalar(&sLastWakeTime) = 0; + } else if (messageType == kIOMessageSystemHasPoweredOn) { + sSystemSleep = false; + clock_get_uptime(&sLastWakeTime); + } + IORecursiveLockUnlock(sKextInnerLock); + return kIOReturnSuccess; +} }; @@ -7189,118 +7184,118 @@ IOReturn OSKextSystemSleepOrWake(UInt32 messageType) void OSKext::considerRebuildOfPrelinkedKernel(void) { - static bool requestedPrelink = false; - OSReturn checkResult = kOSReturnError; - OSDictionary * prelinkRequest = NULL; // must release - OSCollectionIterator * kextIterator = NULL; // must release - const OSSymbol * thisID = NULL; // do not release - bool doRebuild = false; - AbsoluteTime my_abstime; - UInt64 my_ns; - SInt32 delta_secs; - - /* Only one auto rebuild per boot and only on boot from prelinked kernel */ - if (requestedPrelink || !sPrelinkBoot) { - return; - } - - /* no direct return from this point */ - IORecursiveLockLock(sKextLock); - - /* We need to wait for kextd to get up and running with unloads already done - * and any new startup kexts loaded. - */ - if (!sConsiderUnloadsExecuted || - !sDeferredLoadSucceeded) { - goto finish; - } - - /* we really only care about boot / system start up related kexts so bail - * if we're here after REBUILD_MAX_TIME. - */ - if (!_OSKextInPrelinkRebuildWindow()) { - OSKextLog(/* kext */ NULL, - kOSKextLogArchiveFlag, - "%s prebuild rebuild has expired", - __FUNCTION__); - requestedPrelink = true; - goto finish; - } - - /* we do not want to trigger a rebuild if we get here too close to waking - * up. (see radar 10233768) - */ - IORecursiveLockLock(sKextInnerLock); - - clock_get_uptime(&my_abstime); - delta_secs = MINIMUM_WAKEUP_SECONDS + 1; - if (AbsoluteTime_to_scalar(&sLastWakeTime) != 0) { - SUB_ABSOLUTETIME(&my_abstime, &sLastWakeTime); - absolutetime_to_nanoseconds(my_abstime, &my_ns); - delta_secs = (SInt32)(my_ns / NSEC_PER_SEC); - } - IORecursiveLockUnlock(sKextInnerLock); - - if (delta_secs < MINIMUM_WAKEUP_SECONDS) { - /* too close to time of last wake from sleep */ - goto finish; - } - requestedPrelink = true; - - /* Now it's time to see if we have a reason to rebuild. We may have done - * some loads and unloads but the kernel cache didn't actually change. - * We will rebuild if any kext is not marked prelinked AND is not in our - * list of prelinked kexts that got unloaded. (see radar 9055303) - */ - kextIterator = OSCollectionIterator::withCollection(sKextsByID); - if (!kextIterator) { - goto finish; - } - - while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) { - OSKext * thisKext; // do not release - - thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); - if (!thisKext || thisKext->isPrelinked() || thisKext->isKernel()) { - continue; - } - - if (_OSKextInUnloadedPrelinkedKexts(thisKext->bundleID)) { - continue; - } - /* kext is loaded and was not in current kernel cache so let's rebuild - */ - doRebuild = true; - OSKextLog(/* kext */ NULL, - kOSKextLogArchiveFlag, - "considerRebuildOfPrelinkedKernel %s triggered rebuild", - thisKext->bundleID->getCStringNoCopy()); - break; - } - sUnloadedPrelinkedKexts->flushCollection(); - - if (!doRebuild) { - goto finish; - } - - checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestPrelink, - &prelinkRequest); - if (checkResult != kOSReturnSuccess) { - goto finish; - } - - if (!sKernelRequests->setObject(prelinkRequest)) { - goto finish; - } - - OSKext::pingKextd(); - + static bool requestedPrelink = false; + OSReturn checkResult = kOSReturnError; + OSDictionary * prelinkRequest = NULL;// must release + OSCollectionIterator * kextIterator = NULL;// must release + const OSSymbol * thisID = NULL;// do not release + bool doRebuild = false; + AbsoluteTime my_abstime; + UInt64 my_ns; + SInt32 delta_secs; + + /* Only one auto rebuild per boot and only on boot from prelinked kernel */ + if (requestedPrelink || !sPrelinkBoot) { + return; + } + + /* no direct return from this point */ + IORecursiveLockLock(sKextLock); + + /* We need to wait for kextd to get up and running with unloads already done + * and any new startup kexts loaded. + */ + if (!sConsiderUnloadsExecuted || + !sDeferredLoadSucceeded) { + goto finish; + } + + /* we really only care about boot / system start up related kexts so bail + * if we're here after REBUILD_MAX_TIME. + */ + if (!_OSKextInPrelinkRebuildWindow()) { + OSKextLog(/* kext */ NULL, + kOSKextLogArchiveFlag, + "%s prebuild rebuild has expired", + __FUNCTION__); + requestedPrelink = true; + goto finish; + } + + /* we do not want to trigger a rebuild if we get here too close to waking + * up. (see radar 10233768) + */ + IORecursiveLockLock(sKextInnerLock); + + clock_get_uptime(&my_abstime); + delta_secs = MINIMUM_WAKEUP_SECONDS + 1; + if (AbsoluteTime_to_scalar(&sLastWakeTime) != 0) { + SUB_ABSOLUTETIME(&my_abstime, &sLastWakeTime); + absolutetime_to_nanoseconds(my_abstime, &my_ns); + delta_secs = (SInt32)(my_ns / NSEC_PER_SEC); + } + IORecursiveLockUnlock(sKextInnerLock); + + if (delta_secs < MINIMUM_WAKEUP_SECONDS) { + /* too close to time of last wake from sleep */ + goto finish; + } + requestedPrelink = true; + + /* Now it's time to see if we have a reason to rebuild. We may have done + * some loads and unloads but the kernel cache didn't actually change. + * We will rebuild if any kext is not marked prelinked AND is not in our + * list of prelinked kexts that got unloaded. (see radar 9055303) + */ + kextIterator = OSCollectionIterator::withCollection(sKextsByID); + if (!kextIterator) { + goto finish; + } + + while ((thisID = OSDynamicCast(OSSymbol, kextIterator->getNextObject()))) { + OSKext * thisKext;// do not release + + thisKext = OSDynamicCast(OSKext, sKextsByID->getObject(thisID)); + if (!thisKext || thisKext->isPrelinked() || thisKext->isKernel()) { + continue; + } + + if (_OSKextInUnloadedPrelinkedKexts(thisKext->bundleID)) { + continue; + } + /* kext is loaded and was not in current kernel cache so let's rebuild + */ + doRebuild = true; + OSKextLog(/* kext */ NULL, + kOSKextLogArchiveFlag, + "considerRebuildOfPrelinkedKernel %s triggered rebuild", + thisKext->bundleID->getCStringNoCopy()); + break; + } + sUnloadedPrelinkedKexts->flushCollection(); + + if (!doRebuild) { + goto finish; + } + + checkResult = _OSKextCreateRequest(kKextRequestPredicateRequestPrelink, + &prelinkRequest); + if (checkResult != kOSReturnSuccess) { + goto finish; + } + + if (!sKernelRequests->setObject(prelinkRequest)) { + goto finish; + } + + OSKext::pingKextd(); + finish: - IORecursiveLockUnlock(sKextLock); - OSSafeReleaseNULL(prelinkRequest); - OSSafeReleaseNULL(kextIterator); - - return; + IORecursiveLockUnlock(sKextLock); + OSSafeReleaseNULL(prelinkRequest); + OSSafeReleaseNULL(kextIterator); + + return; } #if PRAGMA_MARK @@ -7310,398 +7305,394 @@ finish: *********************************************************************/ bool OSKext::resolveDependencies( - OSArray * loopStack) -{ - bool result = false; - OSArray * localLoopStack = NULL; // must release - bool addedToLoopStack = false; - OSDictionary * libraries = NULL; // do not release - OSCollectionIterator * libraryIterator = NULL; // must release - OSString * libraryID = NULL; // do not release - OSString * infoString = NULL; // do not release - OSString * readableString = NULL; // do not release - OSKext * libraryKext = NULL; // do not release - bool hasRawKernelDependency = false; - bool hasKernelDependency = false; - bool hasKPIDependency = false; - bool hasPrivateKPIDependency = false; - unsigned int count; - - /* A kernel component will automatically have this flag set, - * and a loaded kext should also have it set (as should all its - * loaded dependencies). - */ - if (flags.hasAllDependencies) { - result = true; - goto finish; - } - - /* Check for loops in the dependency graph. - */ - if (loopStack) { - if (loopStack->getNextIndexOfObject(this, 0) != (unsigned int)-1) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s has a dependency loop; can't resolve dependencies.", - getIdentifierCString()); - goto finish; - } - } else { - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogDependenciesFlag, - "Kext %s resolving dependencies.", - getIdentifierCString()); - - loopStack = OSArray::withCapacity(6); // any small capacity will do - if (!loopStack) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s can't create bookkeeping stack to resolve dependencies.", - getIdentifierCString()); - goto finish; - } - localLoopStack = loopStack; - } - if (!loopStack->setObject(this)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - internal error resolving dependencies.", - getIdentifierCString()); - goto finish; - } - addedToLoopStack = true; - - /* Purge any existing kexts in the dependency list and start over. - */ - flushDependencies(); - if (dependencies) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - internal error resolving dependencies.", - getIdentifierCString()); - } - - libraries = OSDynamicCast(OSDictionary, - getPropertyForHostArch(kOSBundleLibrariesKey)); - if (libraries == NULL || libraries->getCount() == 0) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, - "Kext %s - can't resolve dependencies; %s missing/invalid type.", - getIdentifierCString(), kOSBundleLibrariesKey); - goto finish; - } - - /* Make a new array to hold the dependencies (flush freed the old one). - */ - dependencies = OSArray::withCapacity(libraries->getCount()); - if (!dependencies) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - can't allocate dependencies array.", - getIdentifierCString()); - goto finish; - } - - // xxx - compat: We used to add an implicit dependency on kernel 6.0 - // xxx - compat: if none were declared. - - libraryIterator = OSCollectionIterator::withCollection(libraries); - if (!libraryIterator) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - can't allocate dependencies iterator.", - getIdentifierCString()); - goto finish; - } - - while ((libraryID = OSDynamicCast(OSString, - libraryIterator->getNextObject()))) { - - const char * library_id = libraryID->getCStringNoCopy(); - - OSString * libraryVersion = OSDynamicCast(OSString, - libraries->getObject(libraryID)); - if (libraryVersion == NULL) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, - "Kext %s - illegal type in OSBundleLibraries.", - getIdentifierCString()); - goto finish; - } - - OSKextVersion libraryVers = - OSKextParseVersionString(libraryVersion->getCStringNoCopy()); - if (libraryVers == -1) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, - "Kext %s - invalid library version %s.", - getIdentifierCString(), - libraryVersion->getCStringNoCopy()); - goto finish; - } - - libraryKext = OSDynamicCast(OSKext, sKextsByID->getObject(libraryID)); - if (libraryKext == NULL) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - library kext %s not found.", - getIdentifierCString(), library_id); - goto finish; - } - - if (!libraryKext->isCompatibleWithVersion(libraryVers)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - library kext %s not compatible " - "with requested version %s.", - getIdentifierCString(), library_id, - libraryVersion->getCStringNoCopy()); - goto finish; - } - - /* If a nonprelinked library somehow got into the mix for a - * prelinked kext, at any point in the chain, we must fail - * because the prelinked relocs for the library will be all wrong. - */ - if (this->isPrelinked() && - libraryKext->declaresExecutable() && - !libraryKext->isPrelinked()) { - - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s (prelinked) - library kext %s (v%s) not prelinked.", - getIdentifierCString(), library_id, - libraryVersion->getCStringNoCopy()); - goto finish; - } - - if (!libraryKext->resolveDependencies(loopStack)) { - goto finish; - } - - /* Add the library directly only if it has an executable to link. - * Otherwise it's just used to collect other dependencies, so put - * *its* dependencies on the list for this kext. - */ - // xxx - We are losing info here; would like to make fake entries or - // xxx - keep these in the dependency graph for loaded kexts. - // xxx - I really want to make kernel components not a special case! - if (libraryKext->declaresExecutable() || - libraryKext->isInterface()) { - - if (dependencies->getNextIndexOfObject(libraryKext, 0) == (unsigned)-1) { - dependencies->setObject(libraryKext); - - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogDependenciesFlag, - "Kext %s added dependency %s.", - getIdentifierCString(), - libraryKext->getIdentifierCString()); - } - } else { - int numLibDependencies = libraryKext->getNumDependencies(); - OSArray * libraryDependencies = libraryKext->getDependencies(); - int index; - - if (numLibDependencies) { - // xxx - this msg level should be 1 lower than the per-kext one - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogDependenciesFlag, - "Kext %s pulling %d dependencies from codeless library %s.", - getIdentifierCString(), - numLibDependencies, - libraryKext->getIdentifierCString()); - } - for (index = 0; index < numLibDependencies; index++) { - OSKext * thisLibDependency = OSDynamicCast(OSKext, - libraryDependencies->getObject(index)); - if (dependencies->getNextIndexOfObject(thisLibDependency, 0) == (unsigned)-1) { - dependencies->setObject(thisLibDependency); - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogDependenciesFlag, - "Kext %s added dependency %s from codeless library %s.", - getIdentifierCString(), - thisLibDependency->getIdentifierCString(), - libraryKext->getIdentifierCString()); - } - } - } - - if ((strlen(library_id) == strlen(KERNEL_LIB)) && - 0 == strncmp(library_id, KERNEL_LIB, sizeof(KERNEL_LIB)-1)) { - - hasRawKernelDependency = true; - } else if (STRING_HAS_PREFIX(library_id, KERNEL_LIB_PREFIX)) { - hasKernelDependency = true; - } else if (STRING_HAS_PREFIX(library_id, KPI_LIB_PREFIX)) { - hasKPIDependency = true; - if (!strncmp(library_id, PRIVATE_KPI, sizeof(PRIVATE_KPI)-1)) { - hasPrivateKPIDependency = true; - } - } - } - - if (hasRawKernelDependency) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, - "Error - kext %s declares a dependency on %s, which is not permitted.", - getIdentifierCString(), KERNEL_LIB); - goto finish; - } + OSArray * loopStack) +{ + bool result = false; + OSArray * localLoopStack = NULL;// must release + bool addedToLoopStack = false; + OSDictionary * libraries = NULL;// do not release + OSCollectionIterator * libraryIterator = NULL;// must release + OSString * libraryID = NULL;// do not release + OSString * infoString = NULL;// do not release + OSString * readableString = NULL;// do not release + OSKext * libraryKext = NULL;// do not release + bool hasRawKernelDependency = false; + bool hasKernelDependency = false; + bool hasKPIDependency = false; + bool hasPrivateKPIDependency = false; + unsigned int count; + + /* A kernel component will automatically have this flag set, + * and a loaded kext should also have it set (as should all its + * loaded dependencies). + */ + if (flags.hasAllDependencies) { + result = true; + goto finish; + } + + /* Check for loops in the dependency graph. + */ + if (loopStack) { + if (loopStack->getNextIndexOfObject(this, 0) != (unsigned int)-1) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s has a dependency loop; can't resolve dependencies.", + getIdentifierCString()); + goto finish; + } + } else { + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogDependenciesFlag, + "Kext %s resolving dependencies.", + getIdentifierCString()); + + loopStack = OSArray::withCapacity(6); // any small capacity will do + if (!loopStack) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s can't create bookkeeping stack to resolve dependencies.", + getIdentifierCString()); + goto finish; + } + localLoopStack = loopStack; + } + if (!loopStack->setObject(this)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - internal error resolving dependencies.", + getIdentifierCString()); + goto finish; + } + addedToLoopStack = true; + + /* Purge any existing kexts in the dependency list and start over. + */ + flushDependencies(); + if (dependencies) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - internal error resolving dependencies.", + getIdentifierCString()); + } + + libraries = OSDynamicCast(OSDictionary, + getPropertyForHostArch(kOSBundleLibrariesKey)); + if (libraries == NULL || libraries->getCount() == 0) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, + "Kext %s - can't resolve dependencies; %s missing/invalid type.", + getIdentifierCString(), kOSBundleLibrariesKey); + goto finish; + } + + /* Make a new array to hold the dependencies (flush freed the old one). + */ + dependencies = OSArray::withCapacity(libraries->getCount()); + if (!dependencies) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - can't allocate dependencies array.", + getIdentifierCString()); + goto finish; + } + + // xxx - compat: We used to add an implicit dependency on kernel 6.0 + // xxx - compat: if none were declared. + + libraryIterator = OSCollectionIterator::withCollection(libraries); + if (!libraryIterator) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - can't allocate dependencies iterator.", + getIdentifierCString()); + goto finish; + } + + while ((libraryID = OSDynamicCast(OSString, + libraryIterator->getNextObject()))) { + const char * library_id = libraryID->getCStringNoCopy(); + + OSString * libraryVersion = OSDynamicCast(OSString, + libraries->getObject(libraryID)); + if (libraryVersion == NULL) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, + "Kext %s - illegal type in OSBundleLibraries.", + getIdentifierCString()); + goto finish; + } + + OSKextVersion libraryVers = + OSKextParseVersionString(libraryVersion->getCStringNoCopy()); + if (libraryVers == -1) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, + "Kext %s - invalid library version %s.", + getIdentifierCString(), + libraryVersion->getCStringNoCopy()); + goto finish; + } + + libraryKext = OSDynamicCast(OSKext, sKextsByID->getObject(libraryID)); + if (libraryKext == NULL) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - library kext %s not found.", + getIdentifierCString(), library_id); + goto finish; + } + + if (!libraryKext->isCompatibleWithVersion(libraryVers)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - library kext %s not compatible " + "with requested version %s.", + getIdentifierCString(), library_id, + libraryVersion->getCStringNoCopy()); + goto finish; + } + + /* If a nonprelinked library somehow got into the mix for a + * prelinked kext, at any point in the chain, we must fail + * because the prelinked relocs for the library will be all wrong. + */ + if (this->isPrelinked() && + libraryKext->declaresExecutable() && + !libraryKext->isPrelinked()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s (prelinked) - library kext %s (v%s) not prelinked.", + getIdentifierCString(), library_id, + libraryVersion->getCStringNoCopy()); + goto finish; + } + + if (!libraryKext->resolveDependencies(loopStack)) { + goto finish; + } + + /* Add the library directly only if it has an executable to link. + * Otherwise it's just used to collect other dependencies, so put + * *its* dependencies on the list for this kext. + */ + // xxx - We are losing info here; would like to make fake entries or + // xxx - keep these in the dependency graph for loaded kexts. + // xxx - I really want to make kernel components not a special case! + if (libraryKext->declaresExecutable() || + libraryKext->isInterface()) { + if (dependencies->getNextIndexOfObject(libraryKext, 0) == (unsigned)-1) { + dependencies->setObject(libraryKext); + + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogDependenciesFlag, + "Kext %s added dependency %s.", + getIdentifierCString(), + libraryKext->getIdentifierCString()); + } + } else { + int numLibDependencies = libraryKext->getNumDependencies(); + OSArray * libraryDependencies = libraryKext->getDependencies(); + int index; + + if (numLibDependencies) { + // xxx - this msg level should be 1 lower than the per-kext one + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogDependenciesFlag, + "Kext %s pulling %d dependencies from codeless library %s.", + getIdentifierCString(), + numLibDependencies, + libraryKext->getIdentifierCString()); + } + for (index = 0; index < numLibDependencies; index++) { + OSKext * thisLibDependency = OSDynamicCast(OSKext, + libraryDependencies->getObject(index)); + if (dependencies->getNextIndexOfObject(thisLibDependency, 0) == (unsigned)-1) { + dependencies->setObject(thisLibDependency); + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogDependenciesFlag, + "Kext %s added dependency %s from codeless library %s.", + getIdentifierCString(), + thisLibDependency->getIdentifierCString(), + libraryKext->getIdentifierCString()); + } + } + } + + if ((strlen(library_id) == strlen(KERNEL_LIB)) && + 0 == strncmp(library_id, KERNEL_LIB, sizeof(KERNEL_LIB) - 1)) { + hasRawKernelDependency = true; + } else if (STRING_HAS_PREFIX(library_id, KERNEL_LIB_PREFIX)) { + hasKernelDependency = true; + } else if (STRING_HAS_PREFIX(library_id, KPI_LIB_PREFIX)) { + hasKPIDependency = true; + if (!strncmp(library_id, PRIVATE_KPI, sizeof(PRIVATE_KPI) - 1)) { + hasPrivateKPIDependency = true; + } + } + } + + if (hasRawKernelDependency) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, + "Error - kext %s declares a dependency on %s, which is not permitted.", + getIdentifierCString(), KERNEL_LIB); + goto finish; + } #if __LP64__ - if (hasKernelDependency) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, - "Error - kext %s declares %s dependencies. " - "Only %s* dependencies are supported for 64-bit kexts.", - getIdentifierCString(), KERNEL_LIB, KPI_LIB_PREFIX); - goto finish; - } - if (!hasKPIDependency) { - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogDependenciesFlag, - "Warning - kext %s declares no %s* dependencies. " - "If it uses any KPIs, the link may fail with undefined symbols.", - getIdentifierCString(), KPI_LIB_PREFIX); - } + if (hasKernelDependency) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogValidationFlag | kOSKextLogDependenciesFlag, + "Error - kext %s declares %s dependencies. " + "Only %s* dependencies are supported for 64-bit kexts.", + getIdentifierCString(), KERNEL_LIB, KPI_LIB_PREFIX); + goto finish; + } + if (!hasKPIDependency) { + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogDependenciesFlag, + "Warning - kext %s declares no %s* dependencies. " + "If it uses any KPIs, the link may fail with undefined symbols.", + getIdentifierCString(), KPI_LIB_PREFIX); + } #else /* __LP64__ */ - // xxx - will change to flatly disallow "kernel" dependencies at some point - // xxx - is it invalid to do both "com.apple.kernel" and any - // xxx - "com.apple.kernel.*"? - - if (hasKernelDependency && hasKPIDependency) { - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogDependenciesFlag, - "Warning - kext %s has immediate dependencies on both " - "%s* and %s* components; use only one style.", - getIdentifierCString(), KERNEL_LIB, KPI_LIB_PREFIX); - } - - if (!hasKernelDependency && !hasKPIDependency) { - // xxx - do we want to use validation flag for these too? - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogDependenciesFlag, - "Warning - %s declares no kernel dependencies; using %s.", - getIdentifierCString(), KERNEL6_LIB); - OSKext * kernelKext = OSDynamicCast(OSKext, - sKextsByID->getObject(KERNEL6_LIB)); - if (kernelKext) { - dependencies->setObject(kernelKext); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Error - Library %s not found for %s.", - KERNEL6_LIB, getIdentifierCString()); - } - } - - /* If the kext doesn't have a raw kernel or KPI dependency, then add all of - * its indirect dependencies to simulate old-style linking. XXX - Should - * check for duplicates. - */ - if (!hasKPIDependency) { - unsigned int i; - - flags.hasBleedthrough = true; - - count = getNumDependencies(); - - /* We add to the dependencies array in this loop, but do not iterate - * past its original count. - */ - for (i = 0; i < count; i++) { - OSKext * dependencyKext = OSDynamicCast(OSKext, - dependencies->getObject(i)); - dependencyKext->addBleedthroughDependencies(dependencies); - } - } + // xxx - will change to flatly disallow "kernel" dependencies at some point + // xxx - is it invalid to do both "com.apple.kernel" and any + // xxx - "com.apple.kernel.*"? + + if (hasKernelDependency && hasKPIDependency) { + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogDependenciesFlag, + "Warning - kext %s has immediate dependencies on both " + "%s* and %s* components; use only one style.", + getIdentifierCString(), KERNEL_LIB, KPI_LIB_PREFIX); + } + + if (!hasKernelDependency && !hasKPIDependency) { + // xxx - do we want to use validation flag for these too? + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogDependenciesFlag, + "Warning - %s declares no kernel dependencies; using %s.", + getIdentifierCString(), KERNEL6_LIB); + OSKext * kernelKext = OSDynamicCast(OSKext, + sKextsByID->getObject(KERNEL6_LIB)); + if (kernelKext) { + dependencies->setObject(kernelKext); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Error - Library %s not found for %s.", + KERNEL6_LIB, getIdentifierCString()); + } + } + + /* If the kext doesn't have a raw kernel or KPI dependency, then add all of + * its indirect dependencies to simulate old-style linking. XXX - Should + * check for duplicates. + */ + if (!hasKPIDependency) { + unsigned int i; + + flags.hasBleedthrough = true; + + count = getNumDependencies(); + + /* We add to the dependencies array in this loop, but do not iterate + * past its original count. + */ + for (i = 0; i < count; i++) { + OSKext * dependencyKext = OSDynamicCast(OSKext, + dependencies->getObject(i)); + dependencyKext->addBleedthroughDependencies(dependencies); + } + } #endif /* __LP64__ */ - if (hasPrivateKPIDependency) { - bool hasApplePrefix = false; - bool infoCopyrightIsValid = false; - bool readableCopyrightIsValid = false; - - hasApplePrefix = STRING_HAS_PREFIX(getIdentifierCString(), - APPLE_KEXT_PREFIX); - - infoString = OSDynamicCast(OSString, - getPropertyForHostArch("CFBundleGetInfoString")); - if (infoString) { - infoCopyrightIsValid = - kxld_validate_copyright_string(infoString->getCStringNoCopy()); - } - - readableString = OSDynamicCast(OSString, - getPropertyForHostArch("NSHumanReadableCopyright")); - if (readableString) { - readableCopyrightIsValid = - kxld_validate_copyright_string(readableString->getCStringNoCopy()); - } - - if (!hasApplePrefix || (!infoCopyrightIsValid && !readableCopyrightIsValid)) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Error - kext %s declares a dependency on %s. " - "Only Apple kexts may declare a dependency on %s.", - getIdentifierCString(), PRIVATE_KPI, PRIVATE_KPI); - goto finish; - } - } - - result = true; - flags.hasAllDependencies = 1; + if (hasPrivateKPIDependency) { + bool hasApplePrefix = false; + bool infoCopyrightIsValid = false; + bool readableCopyrightIsValid = false; + + hasApplePrefix = STRING_HAS_PREFIX(getIdentifierCString(), + APPLE_KEXT_PREFIX); + + infoString = OSDynamicCast(OSString, + getPropertyForHostArch("CFBundleGetInfoString")); + if (infoString) { + infoCopyrightIsValid = + kxld_validate_copyright_string(infoString->getCStringNoCopy()); + } + + readableString = OSDynamicCast(OSString, + getPropertyForHostArch("NSHumanReadableCopyright")); + if (readableString) { + readableCopyrightIsValid = + kxld_validate_copyright_string(readableString->getCStringNoCopy()); + } + + if (!hasApplePrefix || (!infoCopyrightIsValid && !readableCopyrightIsValid)) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Error - kext %s declares a dependency on %s. " + "Only Apple kexts may declare a dependency on %s.", + getIdentifierCString(), PRIVATE_KPI, PRIVATE_KPI); + goto finish; + } + } + + result = true; + flags.hasAllDependencies = 1; finish: - if (addedToLoopStack) { - count = loopStack->getCount(); - if (count > 0 && (this == loopStack->getObject(count - 1))) { - loopStack->removeObject(count - 1); - } else { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - internal error resolving dependencies.", - getIdentifierCString()); - } - } - - if (result && localLoopStack) { - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogDependenciesFlag, - "Kext %s successfully resolved dependencies.", - getIdentifierCString()); - } + if (addedToLoopStack) { + count = loopStack->getCount(); + if (count > 0 && (this == loopStack->getObject(count - 1))) { + loopStack->removeObject(count - 1); + } else { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - internal error resolving dependencies.", + getIdentifierCString()); + } + } + + if (result && localLoopStack) { + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogDependenciesFlag, + "Kext %s successfully resolved dependencies.", + getIdentifierCString()); + } - OSSafeReleaseNULL(localLoopStack); - OSSafeReleaseNULL(libraryIterator); + OSSafeReleaseNULL(localLoopStack); + OSSafeReleaseNULL(libraryIterator); - return result; + return result; } /********************************************************************* @@ -7709,35 +7700,34 @@ finish: bool OSKext::addBleedthroughDependencies(OSArray * anArray) { - bool result = false; - unsigned int dependencyIndex, dependencyCount; - - dependencyCount = getNumDependencies(); - - for (dependencyIndex = 0; - dependencyIndex < dependencyCount; - dependencyIndex++) { - - OSKext * dependency = OSDynamicCast(OSKext, - dependencies->getObject(dependencyIndex)); - if (!dependency) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogDependenciesFlag, - "Kext %s - internal error propagating compatibility dependencies.", - getIdentifierCString()); - goto finish; - } - if (anArray->getNextIndexOfObject(dependency, 0) == (unsigned int)-1) { - anArray->setObject(dependency); - } - dependency->addBleedthroughDependencies(anArray); - } - - result = true; + bool result = false; + unsigned int dependencyIndex, dependencyCount; + + dependencyCount = getNumDependencies(); + + for (dependencyIndex = 0; + dependencyIndex < dependencyCount; + dependencyIndex++) { + OSKext * dependency = OSDynamicCast(OSKext, + dependencies->getObject(dependencyIndex)); + if (!dependency) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogDependenciesFlag, + "Kext %s - internal error propagating compatibility dependencies.", + getIdentifierCString()); + goto finish; + } + if (anArray->getNextIndexOfObject(dependency, 0) == (unsigned int)-1) { + anArray->setObject(dependency); + } + dependency->addBleedthroughDependencies(anArray); + } + + result = true; finish: - return result; + return result; } /********************************************************************* @@ -7745,29 +7735,28 @@ finish: bool OSKext::flushDependencies(bool forceFlag) { - bool result = false; - - /* Only clear the dependencies if the kext isn't loaded; - * we need the info for loaded kexts to track references. - */ - if (!isLoaded() || forceFlag) { - if (dependencies) { - // xxx - check level - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogDependenciesFlag, - "Kext %s flushing dependencies.", - getIdentifierCString()); - OSSafeReleaseNULL(dependencies); - - } - if (!isKernelComponent()) { - flags.hasAllDependencies = 0; - } - result = true; - } + bool result = false; + + /* Only clear the dependencies if the kext isn't loaded; + * we need the info for loaded kexts to track references. + */ + if (!isLoaded() || forceFlag) { + if (dependencies) { + // xxx - check level + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogDependenciesFlag, + "Kext %s flushing dependencies.", + getIdentifierCString()); + OSSafeReleaseNULL(dependencies); + } + if (!isKernelComponent()) { + flags.hasAllDependencies = 0; + } + result = true; + } - return result; + return result; } /********************************************************************* @@ -7775,10 +7764,10 @@ OSKext::flushDependencies(bool forceFlag) uint32_t OSKext::getNumDependencies(void) { - if (!dependencies) { - return 0; - } - return dependencies->getCount(); + if (!dependencies) { + return 0; + } + return dependencies->getCount(); } /********************************************************************* @@ -7786,7 +7775,7 @@ OSKext::getNumDependencies(void) OSArray * OSKext::getDependencies(void) { - return dependencies; + return dependencies; } #if PRAGMA_MARK @@ -7796,122 +7785,121 @@ OSKext::getDependencies(void) *********************************************************************/ OSReturn OSKext::addClass( - OSMetaClass * aClass, - uint32_t numClasses) -{ - OSReturn result = kOSMetaClassNoInsKModSet; - - if (!metaClasses) { - metaClasses = OSSet::withCapacity(numClasses); - if (!metaClasses) { - goto finish; - } - } - - if (metaClasses->containsObject(aClass)) { - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogLoadFlag, - "Notice - kext %s has already registered class %s.", - getIdentifierCString(), - aClass->getClassName()); - result = kOSReturnSuccess; - goto finish; - } - - if (!metaClasses->setObject(aClass)) { - goto finish; - } else { - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogLoadFlag, - "Kext %s registered class %s.", - getIdentifierCString(), - aClass->getClassName()); - } - - if (!flags.autounloadEnabled) { - const OSMetaClass * metaScan = NULL; // do not release - - for (metaScan = aClass; metaScan; metaScan = metaScan->getSuperClass()) { - if (metaScan == OSTypeID(IOService)) { - - OSKextLog(this, - kOSKextLogProgressLevel | - kOSKextLogLoadFlag, - "Kext %s has IOService subclass %s; enabling autounload.", - getIdentifierCString(), - aClass->getClassName()); - - flags.autounloadEnabled = 1; - break; - } - } - } - - notifyAddClassObservers(this, aClass, flags); - - result = kOSReturnSuccess; + OSMetaClass * aClass, + uint32_t numClasses) +{ + OSReturn result = kOSMetaClassNoInsKModSet; + + if (!metaClasses) { + metaClasses = OSSet::withCapacity(numClasses); + if (!metaClasses) { + goto finish; + } + } + + if (metaClasses->containsObject(aClass)) { + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogLoadFlag, + "Notice - kext %s has already registered class %s.", + getIdentifierCString(), + aClass->getClassName()); + result = kOSReturnSuccess; + goto finish; + } + + if (!metaClasses->setObject(aClass)) { + goto finish; + } else { + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogLoadFlag, + "Kext %s registered class %s.", + getIdentifierCString(), + aClass->getClassName()); + } + + if (!flags.autounloadEnabled) { + const OSMetaClass * metaScan = NULL;// do not release + + for (metaScan = aClass; metaScan; metaScan = metaScan->getSuperClass()) { + if (metaScan == OSTypeID(IOService)) { + OSKextLog(this, + kOSKextLogProgressLevel | + kOSKextLogLoadFlag, + "Kext %s has IOService subclass %s; enabling autounload.", + getIdentifierCString(), + aClass->getClassName()); + + flags.autounloadEnabled = 1; + break; + } + } + } + + notifyAddClassObservers(this, aClass, flags); + + result = kOSReturnSuccess; finish: - if (result != kOSReturnSuccess) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s failed to register class %s.", - getIdentifierCString(), - aClass->getClassName()); - } + if (result != kOSReturnSuccess) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s failed to register class %s.", + getIdentifierCString(), + aClass->getClassName()); + } - return result; + return result; } /********************************************************************* *********************************************************************/ OSReturn OSKext::removeClass( - OSMetaClass * aClass) -{ - OSReturn result = kOSMetaClassNoKModSet; - - if (!metaClasses) { - goto finish; - } - - if (!metaClasses->containsObject(aClass)) { - OSKextLog(this, - kOSKextLogWarningLevel | - kOSKextLogLoadFlag, - "Notice - kext %s asked to unregister unknown class %s.", - getIdentifierCString(), - aClass->getClassName()); - result = kOSReturnSuccess; - goto finish; - } - - OSKextLog(this, - kOSKextLogDetailLevel | - kOSKextLogLoadFlag, - "Kext %s unregistering class %s.", - getIdentifierCString(), - aClass->getClassName()); - - metaClasses->removeObject(aClass); - - notifyRemoveClassObservers(this, aClass, flags); - - result = kOSReturnSuccess; + OSMetaClass * aClass) +{ + OSReturn result = kOSMetaClassNoKModSet; + + if (!metaClasses) { + goto finish; + } + + if (!metaClasses->containsObject(aClass)) { + OSKextLog(this, + kOSKextLogWarningLevel | + kOSKextLogLoadFlag, + "Notice - kext %s asked to unregister unknown class %s.", + getIdentifierCString(), + aClass->getClassName()); + result = kOSReturnSuccess; + goto finish; + } + + OSKextLog(this, + kOSKextLogDetailLevel | + kOSKextLogLoadFlag, + "Kext %s unregistering class %s.", + getIdentifierCString(), + aClass->getClassName()); + + metaClasses->removeObject(aClass); + + notifyRemoveClassObservers(this, aClass, flags); + + result = kOSReturnSuccess; finish: - if (result != kOSReturnSuccess) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Failed to unregister kext %s class %s.", - getIdentifierCString(), - aClass->getClassName()); - } - return result; + if (result != kOSReturnSuccess) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Failed to unregister kext %s class %s.", + getIdentifierCString(), + aClass->getClassName()); + } + return result; } /********************************************************************* @@ -7919,7 +7907,7 @@ finish: OSSet * OSKext::getMetaClasses(void) { - return metaClasses; + return metaClasses; } /********************************************************************* @@ -7927,30 +7915,30 @@ OSKext::getMetaClasses(void) bool OSKext::hasOSMetaClassInstances(void) { - bool result = false; - OSCollectionIterator * classIterator = NULL; // must release - OSMetaClass * checkClass = NULL; // do not release - - if (!metaClasses) { - goto finish; - } - - classIterator = OSCollectionIterator::withCollection(metaClasses); - if (!classIterator) { - // xxx - log alloc failure? - goto finish; - } - while ((checkClass = (OSMetaClass *)classIterator->getNextObject())) { - if (checkClass->getInstanceCount()) { - result = true; - goto finish; - } - } + bool result = false; + OSCollectionIterator * classIterator = NULL; // must release + OSMetaClass * checkClass = NULL;// do not release + + if (!metaClasses) { + goto finish; + } + + classIterator = OSCollectionIterator::withCollection(metaClasses); + if (!classIterator) { + // xxx - log alloc failure? + goto finish; + } + while ((checkClass = (OSMetaClass *)classIterator->getNextObject())) { + if (checkClass->getInstanceCount()) { + result = true; + goto finish; + } + } finish: - - OSSafeReleaseNULL(classIterator); - return result; + + OSSafeReleaseNULL(classIterator); + return result; } /********************************************************************* @@ -7958,20 +7946,20 @@ finish: /* static */ void OSKext::reportOSMetaClassInstances( - const char * kextIdentifier, - OSKextLogSpec msgLogSpec) -{ - OSKext * theKext = NULL; // must release - - theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); - if (!theKext) { - goto finish; - } - - theKext->reportOSMetaClassInstances(msgLogSpec); + const char * kextIdentifier, + OSKextLogSpec msgLogSpec) +{ + OSKext * theKext = NULL; // must release + + theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); + if (!theKext) { + goto finish; + } + + theKext->reportOSMetaClassInstances(msgLogSpec); finish: - OSSafeReleaseNULL(theKext); - return; + OSSafeReleaseNULL(theKext); + return; } /********************************************************************* @@ -7979,32 +7967,32 @@ finish: void OSKext::reportOSMetaClassInstances(OSKextLogSpec msgLogSpec) { - OSCollectionIterator * classIterator = NULL; // must release - OSMetaClass * checkClass = NULL; // do not release - - if (!metaClasses) { - goto finish; - } - - classIterator = OSCollectionIterator::withCollection(metaClasses); - if (!classIterator) { - goto finish; - } - while ((checkClass = (OSMetaClass *)classIterator->getNextObject())) { - if (checkClass->getInstanceCount()) { - OSKextLog(this, - msgLogSpec, - " Kext %s class %s has %d instance%s.", - getIdentifierCString(), - checkClass->getClassName(), - checkClass->getInstanceCount(), - checkClass->getInstanceCount() == 1 ? "" : "s"); - } - } + OSCollectionIterator * classIterator = NULL; // must release + OSMetaClass * checkClass = NULL;// do not release + + if (!metaClasses) { + goto finish; + } + + classIterator = OSCollectionIterator::withCollection(metaClasses); + if (!classIterator) { + goto finish; + } + while ((checkClass = (OSMetaClass *)classIterator->getNextObject())) { + if (checkClass->getInstanceCount()) { + OSKextLog(this, + msgLogSpec, + " Kext %s class %s has %d instance%s.", + getIdentifierCString(), + checkClass->getClassName(), + checkClass->getInstanceCount(), + checkClass->getInstanceCount() == 1 ? "" : "s"); + } + } finish: - OSSafeReleaseNULL(classIterator); - return; + OSSafeReleaseNULL(classIterator); + return; } #if PRAGMA_MARK @@ -8016,1694 +8004,1690 @@ finish: /* static */ OSReturn OSKext::handleRequest( - host_priv_t hostPriv, - OSKextLogSpec clientLogFilter, - char * requestBuffer, - uint32_t requestLength, - char ** responseOut, - uint32_t * responseLengthOut, - char ** logInfoOut, - uint32_t * logInfoLengthOut) -{ - OSReturn result = kOSReturnError; - kern_return_t kmem_result = KERN_FAILURE; - - char * response = NULL; // returned by reference - uint32_t responseLength = 0; - - OSObject * parsedXML = NULL; // must release - OSDictionary * requestDict = NULL; // do not release - OSString * errorString = NULL; // must release - - OSObject * responseObject = NULL; // must release - - OSSerialize * serializer = NULL; // must release - - OSArray * logInfoArray = NULL; // must release - - OSString * predicate = NULL; // do not release - OSString * kextIdentifier = NULL; // do not release - OSArray * kextIdentifiers = NULL; // do not release - OSKext * theKext = NULL; // do not release - OSBoolean * boolArg = NULL; // do not release - - IORecursiveLockLock(sKextLock); - - if (responseOut) { - *responseOut = NULL; - *responseLengthOut = 0; - } - if (logInfoOut) { - *logInfoOut = NULL; - *logInfoLengthOut = 0; - } - - OSKext::setUserSpaceLogFilter(clientLogFilter, logInfoOut ? true : false); - - /* XML must be nul-terminated. - */ - if (requestBuffer[requestLength - 1] != '\0') { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid request from user space (not nul-terminated)."); - result = kOSKextReturnBadData; - goto finish; - } - parsedXML = OSUnserializeXML((const char *)requestBuffer, &errorString); - if (parsedXML) { - requestDict = OSDynamicCast(OSDictionary, parsedXML); - } - if (!requestDict) { - const char * errorCString = "(unknown error)"; - - if (errorString && errorString->getCStringNoCopy()) { - errorCString = errorString->getCStringNoCopy(); - } else if (parsedXML) { - errorCString = "not a dictionary"; - } - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Error unserializing request from user space: %s.", - errorCString); - result = kOSKextReturnSerialization; - goto finish; - } - - predicate = _OSKextGetRequestPredicate(requestDict); - if (!predicate) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Recieved kext request from user space with no predicate."); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogIPCFlag, - "Received '%s' request from user space.", - predicate->getCStringNoCopy()); - - result = kOSKextReturnNotPrivileged; - if (hostPriv == HOST_PRIV_NULL) { - /* must be root to use these kext requests */ - if (predicate->isEqualTo(kKextRequestPredicateUnload) || - predicate->isEqualTo(kKextRequestPredicateStart) || - predicate->isEqualTo(kKextRequestPredicateStop) || - predicate->isEqualTo(kKextRequestPredicateGetKernelRequests) || - predicate->isEqualTo(kKextRequestPredicateSendResource) ) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Access Failure - must be root user."); - goto finish; - } - } - - /* Get common args in anticipation of use. - */ - kextIdentifier = OSDynamicCast(OSString, _OSKextGetRequestArgument( - requestDict, kKextRequestArgumentBundleIdentifierKey)); - kextIdentifiers = OSDynamicCast(OSArray, _OSKextGetRequestArgument( - requestDict, kKextRequestArgumentBundleIdentifierKey)); - if (kextIdentifier) { - theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); - } - boolArg = OSDynamicCast(OSBoolean, _OSKextGetRequestArgument( - requestDict, kKextRequestArgumentValueKey)); - - result = kOSKextReturnInvalidArgument; - - if (predicate->isEqualTo(kKextRequestPredicateStart)) { - if (!kextIdentifier) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid arguments to kext start request."); - } else if (!theKext) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Kext %s not found for start request.", - kextIdentifier->getCStringNoCopy()); - result = kOSKextReturnNotFound; - } else { - result = theKext->start(); - } - - } else if (predicate->isEqualTo(kKextRequestPredicateStop)) { - if (!kextIdentifier) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid arguments to kext stop request."); - } else if (!theKext) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Kext %s not found for stop request.", - kextIdentifier->getCStringNoCopy()); - result = kOSKextReturnNotFound; - } else { - result = theKext->stop(); - } - - } else if (predicate->isEqualTo(kKextRequestPredicateUnload)) { - if (!kextIdentifier) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid arguments to kext unload request."); - } else if (!theKext) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Kext %s not found for unload request.", - kextIdentifier->getCStringNoCopy()); - result = kOSKextReturnNotFound; - } else { - OSBoolean * terminateFlag = OSDynamicCast(OSBoolean, - _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentTerminateIOServicesKey)); - result = OSKext::removeKext(theKext, terminateFlag == kOSBooleanTrue); - } - - } else if (predicate->isEqualTo(kKextRequestPredicateSendResource)) { - result = OSKext::dispatchResource(requestDict); - - } else if (predicate->isEqualTo(kKextRequestPredicateGetUUIDByAddress)) { - - OSNumber *lookupNum = NULL; - lookupNum = OSDynamicCast(OSNumber, - _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentLookupAddressKey)); - - responseObject = OSKext::copyKextUUIDForAddress(lookupNum); - if (responseObject) { - result = kOSReturnSuccess; - } else { - goto finish; - } - - } else if (predicate->isEqualTo(kKextRequestPredicateGetLoaded) || - predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) { - OSBoolean * delayAutounloadBool = NULL; - OSObject * infoKeysRaw = NULL; - OSArray * infoKeys = NULL; - uint32_t infoKeysCount = 0; - - delayAutounloadBool = OSDynamicCast(OSBoolean, - _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentDelayAutounloadKey)); - - /* If asked to delay autounload, reset the timer if it's currently set. - * (That is, don't schedule an unload if one isn't already pending. - */ - if (delayAutounloadBool == kOSBooleanTrue) { - OSKext::considerUnloads(/* rescheduleOnly? */ true); - } - - infoKeysRaw = _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentInfoKeysKey); - infoKeys = OSDynamicCast(OSArray, infoKeysRaw); - if (infoKeysRaw && !infoKeys) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid arguments to kext info request."); - goto finish; - } - - if (infoKeys) { - infoKeysCount = infoKeys->getCount(); - for (uint32_t i = 0; i < infoKeysCount; i++) { - if (!OSDynamicCast(OSString, infoKeys->getObject(i))) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Invalid arguments to kext info request."); - goto finish; - } - } - } - - if (predicate->isEqualTo(kKextRequestPredicateGetLoaded)) { - responseObject = OSKext::copyLoadedKextInfo(kextIdentifiers, infoKeys); - } - else if (predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) { - responseObject = OSKext::copyLoadedKextInfoByUUID(kextIdentifiers, infoKeys); - } - if (!responseObject) { - result = kOSKextReturnInternalError; - } else { - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogIPCFlag, - "Returning loaded kext info."); - result = kOSReturnSuccess; - } - } else if (predicate->isEqualTo(kKextRequestPredicateGetKernelRequests)) { - - /* Hand the current sKernelRequests array to the caller - * (who must release it), and make a new one. - */ - responseObject = sKernelRequests; - sKernelRequests = OSArray::withCapacity(0); - sPostedKextLoadIdentifiers->flushCollection(); - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogIPCFlag, - "Returning kernel requests."); - result = kOSReturnSuccess; - - } else if (predicate->isEqualTo(kKextRequestPredicateGetAllLoadRequests)) { - - /* Return the set of all requested bundle identifiers */ - responseObject = sAllKextLoadIdentifiers; - responseObject->retain(); - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogIPCFlag, - "Returning load requests."); - result = kOSReturnSuccess; - } - else { - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogIPCFlag, - "Received '%s' invalid request from user space.", - predicate->getCStringNoCopy()); - goto finish; - } - - /********** - * Now we have handle the request, or not. Gather up the response & logging - * info to ship to user space. - *********/ - - /* Note: Nothing in OSKext is supposed to retain requestDict, - * but you never know.... - */ - if (requestDict->getRetainCount() > 1) { - OSKextLog(/* kext */ NULL, - kOSKextLogWarningLevel | - kOSKextLogIPCFlag, - "Request from user space still retained by a kext; " - "probable memory leak."); - } - - if (responseOut && responseObject) { - serializer = OSSerialize::withCapacity(0); - if (!serializer) { - result = kOSKextReturnNoMemory; - goto finish; - } - - if (!responseObject->serialize(serializer)) { - OSKextLog(/* kext */ NULL, - kOSKextLogGeneralFlag | kOSKextLogErrorLevel, - "Failed to serialize response to request from user space."); - result = kOSKextReturnSerialization; - goto finish; - } - - response = (char *)serializer->text(); - responseLength = serializer->getLength(); - } - - if (responseOut && response) { - char * buffer; - - /* This kmem_alloc sets the return value of the function. - */ - kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer, - round_page(responseLength), VM_KERN_MEMORY_OSKEXT); - if (kmem_result != KERN_SUCCESS) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogIPCFlag, - "Failed to copy response to request from user space."); - result = kmem_result; - goto finish; - } else { - /* 11981737 - clear uninitialized data in last page */ - bzero((void *)(buffer + responseLength), - (round_page(responseLength) - responseLength)); - memcpy(buffer, response, responseLength); - *responseOut = buffer; - *responseLengthOut = responseLength; - } - } + host_priv_t hostPriv, + OSKextLogSpec clientLogFilter, + char * requestBuffer, + uint32_t requestLength, + char ** responseOut, + uint32_t * responseLengthOut, + char ** logInfoOut, + uint32_t * logInfoLengthOut) +{ + OSReturn result = kOSReturnError; + kern_return_t kmem_result = KERN_FAILURE; -finish: + char * response = NULL;// returned by reference + uint32_t responseLength = 0; + + OSObject * parsedXML = NULL;// must release + OSDictionary * requestDict = NULL;// do not release + OSString * errorString = NULL;// must release + + OSObject * responseObject = NULL;// must release + + OSSerialize * serializer = NULL;// must release + + OSArray * logInfoArray = NULL;// must release + + OSString * predicate = NULL;// do not release + OSString * kextIdentifier = NULL;// do not release + OSArray * kextIdentifiers = NULL;// do not release + OSKext * theKext = NULL;// do not release + OSBoolean * boolArg = NULL;// do not release + + IORecursiveLockLock(sKextLock); + + if (responseOut) { + *responseOut = NULL; + *responseLengthOut = 0; + } + if (logInfoOut) { + *logInfoOut = NULL; + *logInfoLengthOut = 0; + } + + OSKext::setUserSpaceLogFilter(clientLogFilter, logInfoOut ? true : false); + + /* XML must be nul-terminated. + */ + if (requestBuffer[requestLength - 1] != '\0') { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid request from user space (not nul-terminated)."); + result = kOSKextReturnBadData; + goto finish; + } + parsedXML = OSUnserializeXML((const char *)requestBuffer, &errorString); + if (parsedXML) { + requestDict = OSDynamicCast(OSDictionary, parsedXML); + } + if (!requestDict) { + const char * errorCString = "(unknown error)"; + + if (errorString && errorString->getCStringNoCopy()) { + errorCString = errorString->getCStringNoCopy(); + } else if (parsedXML) { + errorCString = "not a dictionary"; + } + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Error unserializing request from user space: %s.", + errorCString); + result = kOSKextReturnSerialization; + goto finish; + } + + predicate = _OSKextGetRequestPredicate(requestDict); + if (!predicate) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Recieved kext request from user space with no predicate."); + result = kOSKextReturnInvalidArgument; + goto finish; + } + + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Received '%s' request from user space.", + predicate->getCStringNoCopy()); + + result = kOSKextReturnNotPrivileged; + if (hostPriv == HOST_PRIV_NULL) { + /* must be root to use these kext requests */ + if (predicate->isEqualTo(kKextRequestPredicateUnload) || + predicate->isEqualTo(kKextRequestPredicateStart) || + predicate->isEqualTo(kKextRequestPredicateStop) || + predicate->isEqualTo(kKextRequestPredicateGetKernelRequests) || + predicate->isEqualTo(kKextRequestPredicateSendResource)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Access Failure - must be root user."); + goto finish; + } + } + + /* Get common args in anticipation of use. + */ + kextIdentifier = OSDynamicCast(OSString, _OSKextGetRequestArgument( + requestDict, kKextRequestArgumentBundleIdentifierKey)); + kextIdentifiers = OSDynamicCast(OSArray, _OSKextGetRequestArgument( + requestDict, kKextRequestArgumentBundleIdentifierKey)); + if (kextIdentifier) { + theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextIdentifier)); + } + boolArg = OSDynamicCast(OSBoolean, _OSKextGetRequestArgument( + requestDict, kKextRequestArgumentValueKey)); + + result = kOSKextReturnInvalidArgument; + + if (predicate->isEqualTo(kKextRequestPredicateStart)) { + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to kext start request."); + } else if (!theKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Kext %s not found for start request.", + kextIdentifier->getCStringNoCopy()); + result = kOSKextReturnNotFound; + } else { + result = theKext->start(); + } + } else if (predicate->isEqualTo(kKextRequestPredicateStop)) { + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to kext stop request."); + } else if (!theKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Kext %s not found for stop request.", + kextIdentifier->getCStringNoCopy()); + result = kOSKextReturnNotFound; + } else { + result = theKext->stop(); + } + } else if (predicate->isEqualTo(kKextRequestPredicateUnload)) { + if (!kextIdentifier) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to kext unload request."); + } else if (!theKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Kext %s not found for unload request.", + kextIdentifier->getCStringNoCopy()); + result = kOSKextReturnNotFound; + } else { + OSBoolean * terminateFlag = OSDynamicCast(OSBoolean, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentTerminateIOServicesKey)); + result = OSKext::removeKext(theKext, terminateFlag == kOSBooleanTrue); + } + } else if (predicate->isEqualTo(kKextRequestPredicateSendResource)) { + result = OSKext::dispatchResource(requestDict); + } else if (predicate->isEqualTo(kKextRequestPredicateGetUUIDByAddress)) { + OSNumber *lookupNum = NULL; + lookupNum = OSDynamicCast(OSNumber, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentLookupAddressKey)); + + responseObject = OSKext::copyKextUUIDForAddress(lookupNum); + if (responseObject) { + result = kOSReturnSuccess; + } else { + goto finish; + } + } else if (predicate->isEqualTo(kKextRequestPredicateGetLoaded) || + predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) { + OSBoolean * delayAutounloadBool = NULL; + OSObject * infoKeysRaw = NULL; + OSArray * infoKeys = NULL; + uint32_t infoKeysCount = 0; + + delayAutounloadBool = OSDynamicCast(OSBoolean, + _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentDelayAutounloadKey)); + + /* If asked to delay autounload, reset the timer if it's currently set. + * (That is, don't schedule an unload if one isn't already pending. + */ + if (delayAutounloadBool == kOSBooleanTrue) { + OSKext::considerUnloads(/* rescheduleOnly? */ true); + } + + infoKeysRaw = _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentInfoKeysKey); + infoKeys = OSDynamicCast(OSArray, infoKeysRaw); + if (infoKeysRaw && !infoKeys) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to kext info request."); + goto finish; + } + + if (infoKeys) { + infoKeysCount = infoKeys->getCount(); + for (uint32_t i = 0; i < infoKeysCount; i++) { + if (!OSDynamicCast(OSString, infoKeys->getObject(i))) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Invalid arguments to kext info request."); + goto finish; + } + } + } + + if (predicate->isEqualTo(kKextRequestPredicateGetLoaded)) { + responseObject = OSKext::copyLoadedKextInfo(kextIdentifiers, infoKeys); + } else if (predicate->isEqualTo(kKextRequestPredicateGetLoadedByUUID)) { + responseObject = OSKext::copyLoadedKextInfoByUUID(kextIdentifiers, infoKeys); + } + if (!responseObject) { + result = kOSKextReturnInternalError; + } else { + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Returning loaded kext info."); + result = kOSReturnSuccess; + } + } else if (predicate->isEqualTo(kKextRequestPredicateGetKernelRequests)) { + /* Hand the current sKernelRequests array to the caller + * (who must release it), and make a new one. + */ + responseObject = sKernelRequests; + sKernelRequests = OSArray::withCapacity(0); + sPostedKextLoadIdentifiers->flushCollection(); + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Returning kernel requests."); + result = kOSReturnSuccess; + } else if (predicate->isEqualTo(kKextRequestPredicateGetAllLoadRequests)) { + /* Return the set of all requested bundle identifiers */ + responseObject = sAllKextLoadIdentifiers; + responseObject->retain(); + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Returning load requests."); + result = kOSReturnSuccess; + } else { + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogIPCFlag, + "Received '%s' invalid request from user space.", + predicate->getCStringNoCopy()); + goto finish; + } + + /********** + * Now we have handle the request, or not. Gather up the response & logging + * info to ship to user space. + *********/ + + /* Note: Nothing in OSKext is supposed to retain requestDict, + * but you never know.... + */ + if (requestDict->getRetainCount() > 1) { + OSKextLog(/* kext */ NULL, + kOSKextLogWarningLevel | + kOSKextLogIPCFlag, + "Request from user space still retained by a kext; " + "probable memory leak."); + } + + if (responseOut && responseObject) { + serializer = OSSerialize::withCapacity(0); + if (!serializer) { + result = kOSKextReturnNoMemory; + goto finish; + } + + if (!responseObject->serialize(serializer)) { + OSKextLog(/* kext */ NULL, + kOSKextLogGeneralFlag | kOSKextLogErrorLevel, + "Failed to serialize response to request from user space."); + result = kOSKextReturnSerialization; + goto finish; + } - /* Gather up the collected log messages for user space. Any messages - * messages past this call will not make it up as log messages but - * will be in the system log. Note that we ignore the return of the - * serialize; it has no bearing on the operation at hand even if we - * fail to get the log messages. - */ - logInfoArray = OSKext::clearUserSpaceLogFilter(); + response = (char *)serializer->text(); + responseLength = serializer->getLength(); + } - if (logInfoArray && logInfoOut && logInfoLengthOut) { - (void)OSKext::serializeLogInfo(logInfoArray, - logInfoOut, logInfoLengthOut); - } + if (responseOut && response) { + char * buffer; - IORecursiveLockUnlock(sKextLock); + /* This kmem_alloc sets the return value of the function. + */ + kmem_result = kmem_alloc(kernel_map, (vm_offset_t *)&buffer, + round_page(responseLength), VM_KERN_MEMORY_OSKEXT); + if (kmem_result != KERN_SUCCESS) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogIPCFlag, + "Failed to copy response to request from user space."); + result = kmem_result; + goto finish; + } else { + /* 11981737 - clear uninitialized data in last page */ + bzero((void *)(buffer + responseLength), + (round_page(responseLength) - responseLength)); + memcpy(buffer, response, responseLength); + *responseOut = buffer; + *responseLengthOut = responseLength; + } + } - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(responseObject); - OSSafeReleaseNULL(serializer); - OSSafeReleaseNULL(logInfoArray); +finish: - return result; + /* Gather up the collected log messages for user space. Any messages + * messages past this call will not make it up as log messages but + * will be in the system log. Note that we ignore the return of the + * serialize; it has no bearing on the operation at hand even if we + * fail to get the log messages. + */ + logInfoArray = OSKext::clearUserSpaceLogFilter(); + + if (logInfoArray && logInfoOut && logInfoLengthOut) { + (void)OSKext::serializeLogInfo(logInfoArray, + logInfoOut, logInfoLengthOut); + } + + IORecursiveLockUnlock(sKextLock); + + OSSafeReleaseNULL(parsedXML); + OSSafeReleaseNULL(errorString); + OSSafeReleaseNULL(responseObject); + OSSafeReleaseNULL(serializer); + OSSafeReleaseNULL(logInfoArray); + + return result; } // #include extern "C" { - - uint64_t __llvm_profile_get_size_for_buffer_internal(const char *DataBegin, - const char *DataEnd, - const char *CountersBegin, - const char *CountersEnd , - const char *NamesBegin, - const char *NamesEnd); - int __llvm_profile_write_buffer_internal(char *Buffer, - const char *DataBegin, - const char *DataEnd, - const char *CountersBegin, - const char *CountersEnd , - const char *NamesBegin, - const char *NamesEnd); +uint64_t __llvm_profile_get_size_for_buffer_internal(const char *DataBegin, + const char *DataEnd, + const char *CountersBegin, + const char *CountersEnd, + const char *NamesBegin, + const char *NamesEnd); +int __llvm_profile_write_buffer_internal(char *Buffer, + const char *DataBegin, + const char *DataEnd, + const char *CountersBegin, + const char *CountersEnd, + const char *NamesBegin, + const char *NamesEnd); } static -void OSKextPgoMetadataPut(char *pBuffer, - size_t *position, - size_t bufferSize, - uint32_t *num_pairs, - const char *key, - const char *value) -{ - size_t strlen_key = strlen(key); - size_t strlen_value = strlen(value); - size_t len = strlen(key) + 1 + strlen(value) + 1; - char *pos = pBuffer + *position; - *position += len; - if (pBuffer && bufferSize && *position <= bufferSize) { - memcpy(pos, key, strlen_key); pos += strlen_key; - *(pos++) = '='; - memcpy(pos, value, strlen_value); pos += strlen_value; - *(pos++) = 0; - if (num_pairs) { - (*num_pairs)++; - } - } +void +OSKextPgoMetadataPut(char *pBuffer, + size_t *position, + size_t bufferSize, + uint32_t *num_pairs, + const char *key, + const char *value) +{ + size_t strlen_key = strlen(key); + size_t strlen_value = strlen(value); + size_t len = strlen(key) + 1 + strlen(value) + 1; + char *pos = pBuffer + *position; + *position += len; + if (pBuffer && bufferSize && *position <= bufferSize) { + memcpy(pos, key, strlen_key); pos += strlen_key; + *(pos++) = '='; + memcpy(pos, value, strlen_value); pos += strlen_value; + *(pos++) = 0; + if (num_pairs) { + (*num_pairs)++; + } + } } static -void OSKextPgoMetadataPutMax(size_t *position, const char *key, size_t value_max) +void +OSKextPgoMetadataPutMax(size_t *position, const char *key, size_t value_max) { - *position += strlen(key) + 1 + value_max + 1; + *position += strlen(key) + 1 + value_max + 1; } static -void OSKextPgoMetadataPutAll(OSKext *kext, - uuid_t instance_uuid, - char *pBuffer, - size_t *position, - size_t bufferSize, - uint32_t *num_pairs) -{ - _static_assert_1_arg(sizeof(clock_sec_t) % 2 == 0); - //log_10 2^16 ≈ 4.82 - const size_t max_secs_string_size = 5 * sizeof(clock_sec_t)/2; - const size_t max_timestamp_string_size = max_secs_string_size + 1 + 6; - - if (!pBuffer) { - OSKextPgoMetadataPutMax(position, "INSTANCE", 36); - OSKextPgoMetadataPutMax(position, "UUID", 36); - OSKextPgoMetadataPutMax(position, "TIMESTAMP", max_timestamp_string_size); - } else { - uuid_string_t instance_uuid_string; - uuid_unparse(instance_uuid, instance_uuid_string); - OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, - "INSTANCE", instance_uuid_string); - - OSData *uuid_data; - uuid_t uuid; - uuid_string_t uuid_string; - uuid_data = kext->copyUUID(); - if (uuid_data) { - memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid)); - OSSafeReleaseNULL(uuid_data); - uuid_unparse(uuid, uuid_string); - OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, - "UUID", uuid_string); - } - - clock_sec_t secs; - clock_usec_t usecs; - clock_get_calendar_microtime(&secs, &usecs); - assert(usecs < 1000000); - char timestamp[max_timestamp_string_size + 1]; - _static_assert_1_arg(sizeof(long) >= sizeof(clock_sec_t)); - snprintf(timestamp, sizeof(timestamp), "%lu.%06d", (unsigned long)secs, (int)usecs); - OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, - "TIMESTAMP", timestamp); - } - - OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, - "NAME", kext->getIdentifierCString()); - - char versionCString[kOSKextVersionMaxLength]; - OSKextVersionGetString(kext->getVersion(), versionCString, kOSKextVersionMaxLength); - OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, - "VERSION", versionCString); +void +OSKextPgoMetadataPutAll(OSKext *kext, + uuid_t instance_uuid, + char *pBuffer, + size_t *position, + size_t bufferSize, + uint32_t *num_pairs) +{ + _static_assert_1_arg(sizeof(clock_sec_t) % 2 == 0); + //log_10 2^16 ≈ 4.82 + const size_t max_secs_string_size = 5 * sizeof(clock_sec_t) / 2; + const size_t max_timestamp_string_size = max_secs_string_size + 1 + 6; + + if (!pBuffer) { + OSKextPgoMetadataPutMax(position, "INSTANCE", 36); + OSKextPgoMetadataPutMax(position, "UUID", 36); + OSKextPgoMetadataPutMax(position, "TIMESTAMP", max_timestamp_string_size); + } else { + uuid_string_t instance_uuid_string; + uuid_unparse(instance_uuid, instance_uuid_string); + OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, + "INSTANCE", instance_uuid_string); + + OSData *uuid_data; + uuid_t uuid; + uuid_string_t uuid_string; + uuid_data = kext->copyUUID(); + if (uuid_data) { + memcpy(uuid, uuid_data->getBytesNoCopy(), sizeof(uuid)); + OSSafeReleaseNULL(uuid_data); + uuid_unparse(uuid, uuid_string); + OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, + "UUID", uuid_string); + } + clock_sec_t secs; + clock_usec_t usecs; + clock_get_calendar_microtime(&secs, &usecs); + assert(usecs < 1000000); + char timestamp[max_timestamp_string_size + 1]; + _static_assert_1_arg(sizeof(long) >= sizeof(clock_sec_t)); + snprintf(timestamp, sizeof(timestamp), "%lu.%06d", (unsigned long)secs, (int)usecs); + OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, + "TIMESTAMP", timestamp); + } + + OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, + "NAME", kext->getIdentifierCString()); + + char versionCString[kOSKextVersionMaxLength]; + OSKextVersionGetString(kext->getVersion(), versionCString, kOSKextVersionMaxLength); + OSKextPgoMetadataPut(pBuffer, position, bufferSize, num_pairs, + "VERSION", versionCString); } static -size_t OSKextPgoMetadataSize(OSKext *kext) -{ - size_t position = 0; - uuid_t fakeuuid = {}; - OSKextPgoMetadataPutAll(kext, fakeuuid, NULL, &position, 0, NULL); - return position; -} - -int OSKextGrabPgoDataLocked(OSKext *kext, - bool metadata, - uuid_t instance_uuid, - uint64_t *pSize, - char *pBuffer, - uint64_t bufferSize) -{ - int err = 0; - - kernel_section_t *sect_prf_data = NULL; - kernel_section_t *sect_prf_name = NULL; - kernel_section_t *sect_prf_cnts = NULL; - uint64_t size; - size_t metadata_size = 0; - - sect_prf_data = kext->lookupSection("__DATA", "__llvm_prf_data"); - sect_prf_name = kext->lookupSection("__DATA", "__llvm_prf_name"); - sect_prf_cnts = kext->lookupSection("__DATA", "__llvm_prf_cnts"); - - if (!sect_prf_data || !sect_prf_name || !sect_prf_cnts) { - err = ENOTSUP; - goto out; - } - - size = __llvm_profile_get_size_for_buffer_internal( - (const char*) sect_prf_data->addr, (const char*) sect_prf_data->addr + sect_prf_data->size, - (const char*) sect_prf_cnts->addr, (const char*) sect_prf_cnts->addr + sect_prf_cnts->size, - (const char*) sect_prf_name->addr, (const char*) sect_prf_name->addr + sect_prf_name->size); - - if (metadata) { - metadata_size = OSKextPgoMetadataSize(kext); - size += metadata_size; - size += sizeof(pgo_metadata_footer); - } - - - if (pSize) { - *pSize = size; - } - - if (pBuffer && bufferSize) { - if (bufferSize < size) { - err = ERANGE; - goto out; - } - - err = __llvm_profile_write_buffer_internal( - pBuffer, - (const char*) sect_prf_data->addr, (const char*) sect_prf_data->addr + sect_prf_data->size, - (const char*) sect_prf_cnts->addr, (const char*) sect_prf_cnts->addr + sect_prf_cnts->size, - (const char*) sect_prf_name->addr, (const char*) sect_prf_name->addr + sect_prf_name->size); - - if (err) { - err = EIO; - goto out; - } - - if (metadata) { - char *end_of_buffer = pBuffer + size; - struct pgo_metadata_footer *footerp = (struct pgo_metadata_footer *) (end_of_buffer - sizeof(struct pgo_metadata_footer)); - char *metadata_buffer = end_of_buffer - (sizeof(struct pgo_metadata_footer) + metadata_size); - - size_t metadata_position = 0; - uint32_t num_pairs = 0; - OSKextPgoMetadataPutAll(kext, instance_uuid, metadata_buffer, &metadata_position, metadata_size, &num_pairs); - while (metadata_position < metadata_size) { - metadata_buffer[metadata_position++] = 0; - } - - struct pgo_metadata_footer footer; - footer.magic = htonl(0x6d657461); - footer.number_of_pairs = htonl( num_pairs ); - footer.offset_to_pairs = htonl( sizeof(struct pgo_metadata_footer) + metadata_size ); - memcpy(footerp, &footer, sizeof(footer)); - } - - } +size_t +OSKextPgoMetadataSize(OSKext *kext) +{ + size_t position = 0; + uuid_t fakeuuid = {}; + OSKextPgoMetadataPutAll(kext, fakeuuid, NULL, &position, 0, NULL); + return position; +} + +int +OSKextGrabPgoDataLocked(OSKext *kext, + bool metadata, + uuid_t instance_uuid, + uint64_t *pSize, + char *pBuffer, + uint64_t bufferSize) +{ + int err = 0; + + kernel_section_t *sect_prf_data = NULL; + kernel_section_t *sect_prf_name = NULL; + kernel_section_t *sect_prf_cnts = NULL; + uint64_t size; + size_t metadata_size = 0; + + sect_prf_data = kext->lookupSection("__DATA", "__llvm_prf_data"); + sect_prf_name = kext->lookupSection("__DATA", "__llvm_prf_name"); + sect_prf_cnts = kext->lookupSection("__DATA", "__llvm_prf_cnts"); + + if (!sect_prf_data || !sect_prf_name || !sect_prf_cnts) { + err = ENOTSUP; + goto out; + } + + size = __llvm_profile_get_size_for_buffer_internal( + (const char*) sect_prf_data->addr, (const char*) sect_prf_data->addr + sect_prf_data->size, + (const char*) sect_prf_cnts->addr, (const char*) sect_prf_cnts->addr + sect_prf_cnts->size, + (const char*) sect_prf_name->addr, (const char*) sect_prf_name->addr + sect_prf_name->size); + + if (metadata) { + metadata_size = OSKextPgoMetadataSize(kext); + size += metadata_size; + size += sizeof(pgo_metadata_footer); + } + + + if (pSize) { + *pSize = size; + } + + if (pBuffer && bufferSize) { + if (bufferSize < size) { + err = ERANGE; + goto out; + } + + err = __llvm_profile_write_buffer_internal( + pBuffer, + (const char*) sect_prf_data->addr, (const char*) sect_prf_data->addr + sect_prf_data->size, + (const char*) sect_prf_cnts->addr, (const char*) sect_prf_cnts->addr + sect_prf_cnts->size, + (const char*) sect_prf_name->addr, (const char*) sect_prf_name->addr + sect_prf_name->size); + + if (err) { + err = EIO; + goto out; + } + + if (metadata) { + char *end_of_buffer = pBuffer + size; + struct pgo_metadata_footer *footerp = (struct pgo_metadata_footer *) (end_of_buffer - sizeof(struct pgo_metadata_footer)); + char *metadata_buffer = end_of_buffer - (sizeof(struct pgo_metadata_footer) + metadata_size); + + size_t metadata_position = 0; + uint32_t num_pairs = 0; + OSKextPgoMetadataPutAll(kext, instance_uuid, metadata_buffer, &metadata_position, metadata_size, &num_pairs); + while (metadata_position < metadata_size) { + metadata_buffer[metadata_position++] = 0; + } + + struct pgo_metadata_footer footer; + footer.magic = htonl(0x6d657461); + footer.number_of_pairs = htonl( num_pairs ); + footer.offset_to_pairs = htonl( sizeof(struct pgo_metadata_footer) + metadata_size ); + memcpy(footerp, &footer, sizeof(footer)); + } + } out: - return err; + return err; } int OSKextGrabPgoData(uuid_t uuid, - uint64_t *pSize, - char *pBuffer, - uint64_t bufferSize, - int wait_for_unload, - int metadata) + uint64_t *pSize, + char *pBuffer, + uint64_t bufferSize, + int wait_for_unload, + int metadata) { - int err = 0; - OSKext *kext = NULL; - + int err = 0; + OSKext *kext = NULL; - IORecursiveLockLock(sKextLock); - kext = OSKext::lookupKextWithUUID(uuid); - if (!kext) { - err = ENOENT; - goto out; - } + IORecursiveLockLock(sKextLock); - if (wait_for_unload) { - OSKextGrabPgoStruct s; + kext = OSKext::lookupKextWithUUID(uuid); + if (!kext) { + err = ENOENT; + goto out; + } - s.metadata = metadata; - s.pSize = pSize; - s.pBuffer = pBuffer; - s.bufferSize = bufferSize; - s.err = EINTR; + if (wait_for_unload) { + OSKextGrabPgoStruct s; - struct list_head *prev = &kext->pendingPgoHead; - struct list_head *next = kext->pendingPgoHead.next; + s.metadata = metadata; + s.pSize = pSize; + s.pBuffer = pBuffer; + s.bufferSize = bufferSize; + s.err = EINTR; - s.list_head.prev = prev; - s.list_head.next = next; + struct list_head *prev = &kext->pendingPgoHead; + struct list_head *next = kext->pendingPgoHead.next; - prev->next = &s.list_head; - next->prev = &s.list_head; + s.list_head.prev = prev; + s.list_head.next = next; - kext->release(); - kext = NULL; + prev->next = &s.list_head; + next->prev = &s.list_head; - IORecursiveLockSleep(sKextLock, &s, THREAD_ABORTSAFE); + kext->release(); + kext = NULL; - prev = s.list_head.prev; - next = s.list_head.next; + IORecursiveLockSleep(sKextLock, &s, THREAD_ABORTSAFE); - prev->next = next; - next->prev = prev; + prev = s.list_head.prev; + next = s.list_head.next; - err = s.err; + prev->next = next; + next->prev = prev; - } else { - err = OSKextGrabPgoDataLocked(kext, metadata, kext->instance_uuid, pSize, pBuffer, bufferSize); - } + err = s.err; + } else { + err = OSKextGrabPgoDataLocked(kext, metadata, kext->instance_uuid, pSize, pBuffer, bufferSize); + } - out: - if (kext) { - kext->release(); - } +out: + if (kext) { + kext->release(); + } - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - return err; + return err; } void OSKextResetPgoCountersLock() { - IORecursiveLockLock(sKextLock); + IORecursiveLockLock(sKextLock); } void OSKextResetPgoCountersUnlock() { - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); } extern unsigned int not_in_kdp; -void -OSKextResetPgoCounters() -{ - assert(!not_in_kdp); - uint32_t count = sLoadedKexts->getCount(); - for (uint32_t i = 0; i < count; i++) { - OSKext *kext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - kernel_section_t *sect_prf_cnts = kext->lookupSection("__DATA", "__llvm_prf_cnts"); - if (!sect_prf_cnts) { - continue; - } - memset((void*)sect_prf_cnts->addr, 0, sect_prf_cnts->size); - } -} +void +OSKextResetPgoCounters() +{ + assert(!not_in_kdp); + uint32_t count = sLoadedKexts->getCount(); + for (uint32_t i = 0; i < count; i++) { + OSKext *kext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + kernel_section_t *sect_prf_cnts = kext->lookupSection("__DATA", "__llvm_prf_cnts"); + if (!sect_prf_cnts) { + continue; + } + memset((void*)sect_prf_cnts->addr, 0, sect_prf_cnts->size); + } +} + +OSDictionary * +OSKext::copyLoadedKextInfoByUUID( + OSArray * kextIdentifiers, + OSArray * infoKeys) +{ + OSDictionary * result = NULL; + OSDictionary * kextInfo = NULL; // must release + uint32_t count, i; + uint32_t idCount = 0; + uint32_t idIndex = 0; + + IORecursiveLockLock(sKextLock); + +#if CONFIG_MACF + /* Is the calling process allowed to query kext info? */ + if (current_task() != kernel_task) { + int macCheckResult = 0; + kauth_cred_t cred = NULL; + + cred = kauth_cred_get_with_ref(); + macCheckResult = mac_kext_check_query(cred); + kauth_cred_unref(&cred); + + if (macCheckResult != 0) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Failed to query kext info (MAC policy error 0x%x).", + macCheckResult); + goto finish; + } + } +#endif + + /* Empty list of UUIDs is equivalent to no list (get all). + */ + if (kextIdentifiers && !kextIdentifiers->getCount()) { + kextIdentifiers = NULL; + } else if (kextIdentifiers) { + idCount = kextIdentifiers->getCount(); + } + + /* Same for keys. + */ + if (infoKeys && !infoKeys->getCount()) { + infoKeys = NULL; + } + + count = sLoadedKexts->getCount(); + result = OSDictionary::withCapacity(count); + if (!result) { + goto finish; + } + + for (i = 0; i < count; i++) { + OSKext *thisKext = NULL;// do not release + Boolean includeThis = true; + uuid_t thisKextUUID; + uuid_t thisKextTextUUID; + OSData *uuid_data; + uuid_string_t uuid_key; + + thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (!thisKext) { + continue; + } + + uuid_data = thisKext->copyUUID(); + if (!uuid_data) { + continue; + } + + memcpy(&thisKextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextUUID)); + OSSafeReleaseNULL(uuid_data); + + uuid_unparse(thisKextUUID, uuid_key); + + uuid_data = thisKext->copyTextUUID(); + if (!uuid_data) { + continue; + } + memcpy(&thisKextTextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextTextUUID)); + OSSafeReleaseNULL(uuid_data); + + /* Skip current kext if we have a list of UUIDs and + * it isn't in the list. + */ + if (kextIdentifiers) { + includeThis = false; + + for (idIndex = 0; idIndex < idCount; idIndex++) { + const OSString* wantedUUID = OSDynamicCast(OSString, + kextIdentifiers->getObject(idIndex)); + + uuid_t uuid; + uuid_parse(wantedUUID->getCStringNoCopy(), uuid); + + if ((0 == uuid_compare(uuid, thisKextUUID)) + || (0 == uuid_compare(uuid, thisKextTextUUID))) { + includeThis = true; + /* Only need to find the first kext if multiple match, + * ie. asking for the kernel uuid does not need to find + * interface kexts or builtin static kexts. + */ + kextIdentifiers->removeObject(idIndex); + uuid_unparse(uuid, uuid_key); + break; + } + } + } + + if (!includeThis) { + continue; + } + + kextInfo = thisKext->copyInfo(infoKeys); + if (kextInfo) { + result->setObject(uuid_key, kextInfo); + kextInfo->release(); + } + + if (kextIdentifiers && !kextIdentifiers->getCount()) { + break; + } + } + +finish: + IORecursiveLockUnlock(sKextLock); + + return result; +} + +/********************************************************************* +*********************************************************************/ +/* static */ +OSDictionary * +OSKext::copyLoadedKextInfo( + OSArray * kextIdentifiers, + OSArray * infoKeys) +{ + OSDictionary * result = NULL; + uint32_t idCount = 0; + bool onlyLoaded; + + IORecursiveLockLock(sKextLock); + +#if CONFIG_MACF + /* Is the calling process allowed to query kext info? */ + if (current_task() != kernel_task) { + int macCheckResult = 0; + kauth_cred_t cred = NULL; + + cred = kauth_cred_get_with_ref(); + macCheckResult = mac_kext_check_query(cred); + kauth_cred_unref(&cred); + + if (macCheckResult != 0) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogLoadFlag, + "Failed to query kext info (MAC policy error 0x%x).", + macCheckResult); + goto finish; + } + } +#endif + + /* Empty list of bundle ids is equivalent to no list (get all). + */ + if (kextIdentifiers && !kextIdentifiers->getCount()) { + kextIdentifiers = NULL; + } else if (kextIdentifiers) { + idCount = kextIdentifiers->getCount(); + } + + /* Same for keys. + */ + if (infoKeys && !infoKeys->getCount()) { + infoKeys = NULL; + } + + onlyLoaded = (!infoKeys || !_OSArrayContainsCString(infoKeys, kOSBundleAllPrelinkedKey)); + + result = OSDictionary::withCapacity(128); + if (!result) { + goto finish; + } + +#if 0 + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_kernel_slide 0x%lx \n", + vm_kernel_slide); + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_kernel_stext 0x%lx vm_kernel_etext 0x%lx \n", + vm_kernel_stext, vm_kernel_etext); + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_kernel_base 0x%lx vm_kernel_top 0x%lx \n", + vm_kernel_base, vm_kernel_top); + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_kext_base 0x%lx vm_kext_top 0x%lx \n", + vm_kext_base, vm_kext_top); + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_prelink_stext 0x%lx vm_prelink_etext 0x%lx \n", + vm_prelink_stext, vm_prelink_etext); + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_prelink_sinfo 0x%lx vm_prelink_einfo 0x%lx \n", + vm_prelink_sinfo, vm_prelink_einfo); + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "kaslr: vm_slinkedit 0x%lx vm_elinkedit 0x%lx \n", + vm_slinkedit, vm_elinkedit); +#endif + + sKextsByID->iterateObjects(^bool (const OSSymbol * thisKextID, OSObject * obj) + { + OSKext * thisKext = NULL;// do not release + Boolean includeThis = true; + OSDictionary * kextInfo = NULL;// must release + + thisKext = OSDynamicCast(OSKext, obj); + if (!thisKext) { + return false;; + } + + /* Skip current kext if not yet started and caller didn't request all. + */ + if (onlyLoaded && (-1U == sLoadedKexts->getNextIndexOfObject(thisKext, 0))) { + return false;; + } + + /* Skip current kext if we have a list of bundle IDs and + * it isn't in the list. + */ + if (kextIdentifiers) { + includeThis = false; + + for (uint32_t idIndex = 0; idIndex < idCount; idIndex++) { + const OSString * thisRequestID = OSDynamicCast(OSString, + kextIdentifiers->getObject(idIndex)); + if (thisKextID->isEqualTo(thisRequestID)) { + includeThis = true; + break; + } + } + } + + if (!includeThis) { + return false; + } + + kextInfo = thisKext->copyInfo(infoKeys); + if (kextInfo) { + result->setObject(thisKext->getIdentifier(), kextInfo); + kextInfo->release(); + } + return false; + }); + +finish: + IORecursiveLockUnlock(sKextLock); + + return result; +} + +/********************************************************************* +* Any info that needs to do allocations must goto finish on alloc +* failure. Info that is just a lookup should just not set the object +* if the info does not exist. +*********************************************************************/ +#define _OSKextLoadInfoDictCapacity (12) + +OSDictionary * +OSKext::copyInfo(OSArray * infoKeys) +{ + OSDictionary * result = NULL; + bool success = false; + OSData * headerData = NULL;// must release + OSData * logData = NULL;// must release + OSNumber * cpuTypeNumber = NULL;// must release + OSNumber * cpuSubtypeNumber = NULL;// must release + OSString * versionString = NULL;// do not release + uint32_t executablePathCStringSize = 0; + char * executablePathCString = NULL;// must release + OSString * executablePathString = NULL;// must release + OSData * uuid = NULL;// must release + OSNumber * scratchNumber = NULL;// must release + OSArray * dependencyLoadTags = NULL;// must release + OSCollectionIterator * metaClassIterator = NULL;// must release + OSArray * metaClassInfo = NULL;// must release + OSDictionary * metaClassDict = NULL;// must release + OSMetaClass * thisMetaClass = NULL;// do not release + OSString * metaClassName = NULL;// must release + OSString * superclassName = NULL;// must release + uint32_t count, i; + + result = OSDictionary::withCapacity(_OSKextLoadInfoDictCapacity); + if (!result) { + goto finish; + } + + + /* Empty keys means no keys, but NULL is quicker to check. + */ + if (infoKeys && !infoKeys->getCount()) { + infoKeys = NULL; + } + + /* Headers, CPU type, and CPU subtype. + */ + if (!infoKeys || + _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey) || + _OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey) || + _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey) || + _OSArrayContainsCString(infoKeys, kOSBundleCPUSubtypeKey)) { + if (linkedExecutable && !isInterface()) { + kernel_mach_header_t *kext_mach_hdr = (kernel_mach_header_t *) + linkedExecutable->getBytesNoCopy(); + +#if !SECURE_KERNEL + // do not return macho header info on shipping iOS - 19095897 + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey)) { + kernel_mach_header_t * temp_kext_mach_hdr; + struct load_command * lcp; + + headerData = OSData::withBytes(kext_mach_hdr, + (u_int) (sizeof(*kext_mach_hdr) + kext_mach_hdr->sizeofcmds)); + if (!headerData) { + goto finish; + } + + // unslide any vmaddrs we return to userspace - 10726716 + temp_kext_mach_hdr = (kernel_mach_header_t *) + headerData->getBytesNoCopy(); + if (temp_kext_mach_hdr == NULL) { + goto finish; + } + + lcp = (struct load_command *) (temp_kext_mach_hdr + 1); + for (i = 0; i < temp_kext_mach_hdr->ncmds; i++) { + if (lcp->cmd == LC_SEGMENT_KERNEL) { + kernel_segment_command_t * segp; + kernel_section_t * secp; + + segp = (kernel_segment_command_t *) lcp; + // 10543468 - if we jettisoned __LINKEDIT clear size info + if (flags.jettisonLinkeditSeg) { + if (strncmp(segp->segname, SEG_LINKEDIT, sizeof(segp->segname)) == 0) { + segp->vmsize = 0; + segp->fileoff = 0; + segp->filesize = 0; + } + } + +#if 0 + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "%s: LC_SEGMENT_KERNEL segname '%s' vmaddr 0x%llX 0x%lX vmsize %llu nsects %u", + __FUNCTION__, segp->segname, segp->vmaddr, + VM_KERNEL_UNSLIDE(segp->vmaddr), + segp->vmsize, segp->nsects); + if ((VM_KERNEL_IS_SLID(segp->vmaddr) == false) && + (VM_KERNEL_IS_KEXT(segp->vmaddr) == false) && + (VM_KERNEL_IS_PRELINKTEXT(segp->vmaddr) == false) && + (VM_KERNEL_IS_PRELINKINFO(segp->vmaddr) == false) && + (VM_KERNEL_IS_KEXT_LINKEDIT(segp->vmaddr) == false)) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "%s: not in kext range - vmaddr 0x%llX vm_kext_base 0x%lX vm_kext_top 0x%lX", + __FUNCTION__, segp->vmaddr, vm_kext_base, vm_kext_top); + } +#endif + segp->vmaddr = ml_static_unslide(segp->vmaddr); + + for (secp = firstsect(segp); secp != NULL; secp = nextsect(segp, secp)) { + secp->addr = ml_static_unslide(secp->addr); + } + } + lcp = (struct load_command *)((caddr_t)lcp + lcp->cmdsize); + } + result->setObject(kOSBundleMachOHeadersKey, headerData); + } +#endif // SECURE_KERNEL + + if (_OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey)) { + osLogDataHeaderRef *header; + char headerBytes[offsetof(osLogDataHeaderRef, sections) + NUM_OS_LOG_SECTIONS * sizeof(header->sections[0])]; + + void *os_log_data = NULL; + void *cstring_data = NULL; + unsigned long os_log_size = 0; + unsigned long cstring_size = 0; + uint32_t os_log_offset = 0; + uint32_t cstring_offset = 0; + bool res; + + os_log_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__os_log", &os_log_size); + os_log_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__os_log"); + cstring_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__cstring", &cstring_size); + cstring_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__cstring"); + + header = (osLogDataHeaderRef *) headerBytes; + header->version = OS_LOG_HDR_VERSION; + header->sect_count = NUM_OS_LOG_SECTIONS; + header->sections[OS_LOG_SECT_IDX].sect_offset = os_log_offset; + header->sections[OS_LOG_SECT_IDX].sect_size = (uint32_t) os_log_size; + header->sections[CSTRING_SECT_IDX].sect_offset = cstring_offset; + header->sections[CSTRING_SECT_IDX].sect_size = (uint32_t) cstring_size; + + + logData = OSData::withBytes(header, (u_int) (sizeof(osLogDataHeaderRef))); + if (!logData) { + goto finish; + } + res = logData->appendBytes(&(header->sections[0]), (u_int)(header->sect_count * sizeof(header->sections[0]))); + if (!res) { + goto finish; + } + if (os_log_data) { + res = logData->appendBytes(os_log_data, (u_int)header->sections[OS_LOG_SECT_IDX].sect_size); + if (!res) { + goto finish; + } + } + if (cstring_data) { + res = logData->appendBytes(cstring_data, (u_int)header->sections[CSTRING_SECT_IDX].sect_size); + if (!res) { + goto finish; + } + } + result->setObject(kOSBundleLogStringsKey, logData); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey)) { + cpuTypeNumber = OSNumber::withNumber( + (uint64_t) kext_mach_hdr->cputype, + 8 * sizeof(kext_mach_hdr->cputype)); + if (!cpuTypeNumber) { + goto finish; + } + result->setObject(kOSBundleCPUTypeKey, cpuTypeNumber); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUSubtypeKey)) { + cpuSubtypeNumber = OSNumber::withNumber( + (uint64_t) kext_mach_hdr->cpusubtype, + 8 * sizeof(kext_mach_hdr->cpusubtype)); + if (!cpuSubtypeNumber) { + goto finish; + } + result->setObject(kOSBundleCPUSubtypeKey, cpuSubtypeNumber); + } + } + } + + /* CFBundleIdentifier. We set this regardless because it's just stupid not to. + */ + result->setObject(kCFBundleIdentifierKey, bundleID); + + /* CFBundleVersion. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kCFBundleVersionKey)) { + versionString = OSDynamicCast(OSString, + getPropertyForHostArch(kCFBundleVersionKey)); + if (versionString) { + result->setObject(kCFBundleVersionKey, versionString); + } + } + + /* OSBundleCompatibleVersion. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCompatibleVersionKey)) { + versionString = OSDynamicCast(OSString, + getPropertyForHostArch(kOSBundleCompatibleVersionKey)); + if (versionString) { + result->setObject(kOSBundleCompatibleVersionKey, versionString); + } + } + + /* Path. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePathKey)) { + if (path) { + result->setObject(kOSBundlePathKey, path); + } + } + + + /* OSBundleExecutablePath. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecutablePathKey)) { + if (path && executableRelPath) { + uint32_t pathLength = path->getLength(); // gets incremented below + + // +1 for slash, +1 for \0 + executablePathCStringSize = pathLength + executableRelPath->getLength() + 2; + + executablePathCString = (char *)kalloc_tag((executablePathCStringSize) * + sizeof(char), VM_KERN_MEMORY_OSKEXT); // +1 for \0 + if (!executablePathCString) { + goto finish; + } + strlcpy(executablePathCString, path->getCStringNoCopy(), + executablePathCStringSize); + executablePathCString[pathLength++] = '/'; + executablePathCString[pathLength++] = '\0'; + strlcat(executablePathCString, executableRelPath->getCStringNoCopy(), + executablePathCStringSize); + + executablePathString = OSString::withCString(executablePathCString); + + if (!executablePathString) { + goto finish; + } + + result->setObject(kOSBundleExecutablePathKey, executablePathString); + } else if (flags.builtin) { + result->setObject(kOSBundleExecutablePathKey, bundleID); + } + } + + /* UUID, if the kext has one. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleUUIDKey)) { + uuid = copyUUID(); + if (uuid) { + result->setObject(kOSBundleUUIDKey, uuid); + uuid->release(); + } + } + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleTextUUIDKey)) { + uuid = copyTextUUID(); + if (uuid) { + result->setObject(kOSBundleTextUUIDKey, uuid); uuid->release(); + } + } + + /***** + * OSKernelResource, OSBundleIsInterface, OSBundlePrelinked, OSBundleStarted. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKernelResourceKey)) { + result->setObject(kOSKernelResourceKey, + isKernelComponent() ? kOSBooleanTrue : kOSBooleanFalse); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleIsInterfaceKey)) { + result->setObject(kOSBundleIsInterfaceKey, + isInterface() ? kOSBooleanTrue : kOSBooleanFalse); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePrelinkedKey)) { + result->setObject(kOSBundlePrelinkedKey, + isPrelinked() ? kOSBooleanTrue : kOSBooleanFalse); + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleStartedKey)) { + result->setObject(kOSBundleStartedKey, + isStarted() ? kOSBooleanTrue : kOSBooleanFalse); + } + + /* LoadTag (Index). + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadTagKey)) { + scratchNumber = OSNumber::withNumber((unsigned long long)loadTag, + /* numBits */ 8 * sizeof(loadTag)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleLoadTagKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } + + /* LoadAddress, LoadSize. + */ + if (!infoKeys || + _OSArrayContainsCString(infoKeys, kOSBundleLoadAddressKey) || + _OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey) || + _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey) || + _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey) || + _OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey)) { + if (isInterface() || flags.builtin || linkedExecutable) { + /* These go to userspace via serialization, so we don't want any doubts + * about their size. + */ + uint64_t loadAddress = 0; + uint32_t loadSize = 0; + uint32_t wiredSize = 0; + uint64_t execLoadAddress = 0; + uint32_t execLoadSize = 0; + + /* Interfaces always report 0 load address & size. + * Just the way they roll. + * + * xxx - leaving in # when we have a linkedExecutable...a kernelcomp + * xxx - shouldn't have one! + */ + + if (flags.builtin || linkedExecutable) { + kernel_mach_header_t *mh = NULL; + kernel_segment_command_t *seg = NULL; + + if (flags.builtin) { + loadAddress = kmod_info->address; + loadSize = kmod_info->size; + } else { + loadAddress = (uint64_t)linkedExecutable->getBytesNoCopy(); + loadSize = linkedExecutable->getLength(); + } + mh = (kernel_mach_header_t *)loadAddress; + loadAddress = ml_static_unslide(loadAddress); + + /* Walk through the kext, looking for the first executable + * segment in case we were asked for its size/address. + */ + for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { + if (seg->initprot & VM_PROT_EXECUTE) { + execLoadAddress = ml_static_unslide(seg->vmaddr); + execLoadSize = seg->vmsize; + break; + } + } + + /* If we have a kmod_info struct, calculated the wired size + * from that. Otherwise it's the full load size. + */ + if (kmod_info) { + wiredSize = loadSize - kmod_info->hdr_size; + } else { + wiredSize = loadSize; + } + } + + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadAddressKey)) { + scratchNumber = OSNumber::withNumber( + (unsigned long long)(loadAddress), + /* numBits */ 8 * sizeof(loadAddress)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleLoadAddressKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } +#if CONFIG_EMBEDDED + if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCacheLoadAddressKey)) + && loadAddress && loadSize) { + scratchNumber = OSNumber::withNumber( + (unsigned long long)ml_static_unslide((uintptr_t)segLOWESTTEXT), + /* numBits */ 8 * sizeof(loadAddress)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleCacheLoadAddressKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } + if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleKextsInKernelTextKey)) + && (this == sKernelKext) && gBuiltinKmodsCount) { + result->setObject(kOSBundleKextsInKernelTextKey, kOSBooleanTrue); + } +#endif /* CONFIG_EMBEDDED */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey)) { + scratchNumber = OSNumber::withNumber( + (unsigned long long)(execLoadAddress), + /* numBits */ 8 * sizeof(execLoadAddress)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleExecLoadAddressKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey)) { + scratchNumber = OSNumber::withNumber( + (unsigned long long)(loadSize), + /* numBits */ 8 * sizeof(loadSize)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleLoadSizeKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey)) { + scratchNumber = OSNumber::withNumber( + (unsigned long long)(execLoadSize), + /* numBits */ 8 * sizeof(execLoadSize)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleExecLoadSizeKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey)) { + scratchNumber = OSNumber::withNumber( + (unsigned long long)(wiredSize), + /* numBits */ 8 * sizeof(wiredSize)); + if (!scratchNumber) { + goto finish; + } + result->setObject(kOSBundleWiredSizeKey, scratchNumber); + OSSafeReleaseNULL(scratchNumber); + } + } + } + + /* OSBundleDependencies. In descending order for + * easy compatibility with kextstat(8). + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleDependenciesKey)) { + if ((count = getNumDependencies())) { + dependencyLoadTags = OSArray::withCapacity(count); + result->setObject(kOSBundleDependenciesKey, dependencyLoadTags); + + i = count - 1; + do { + OSKext * dependency = OSDynamicCast(OSKext, + dependencies->getObject(i)); + + OSSafeReleaseNULL(scratchNumber); + + if (!dependency) { + continue; + } + scratchNumber = OSNumber::withNumber( + (unsigned long long)dependency->getLoadTag(), + /* numBits*/ 8 * sizeof(loadTag)); + if (!scratchNumber) { + goto finish; + } + dependencyLoadTags->setObject(scratchNumber); + } while (i--); + } + } -OSDictionary * -OSKext::copyLoadedKextInfoByUUID( - OSArray * kextIdentifiers, - OSArray * infoKeys) -{ - OSDictionary * result = NULL; - OSDictionary * kextInfo = NULL; // must release - uint32_t count, i; - uint32_t idCount = 0; - uint32_t idIndex = 0; + OSSafeReleaseNULL(scratchNumber); - IORecursiveLockLock(sKextLock); + /* OSBundleMetaClasses. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleClassesKey)) { + if (metaClasses && metaClasses->getCount()) { + metaClassIterator = OSCollectionIterator::withCollection(metaClasses); + metaClassInfo = OSArray::withCapacity(metaClasses->getCount()); + if (!metaClassIterator || !metaClassInfo) { + goto finish; + } + result->setObject(kOSBundleClassesKey, metaClassInfo); + + while ((thisMetaClass = OSDynamicCast(OSMetaClass, + metaClassIterator->getNextObject()))) { + OSSafeReleaseNULL(metaClassDict); + OSSafeReleaseNULL(scratchNumber); + OSSafeReleaseNULL(metaClassName); + OSSafeReleaseNULL(superclassName); + + metaClassDict = OSDictionary::withCapacity(3); + if (!metaClassDict) { + goto finish; + } + + metaClassName = OSString::withCString(thisMetaClass->getClassName()); + if (thisMetaClass->getSuperClass()) { + superclassName = OSString::withCString( + thisMetaClass->getSuperClass()->getClassName()); + } + scratchNumber = OSNumber::withNumber(thisMetaClass->getInstanceCount(), + 8 * sizeof(unsigned int)); + + /* Bail if any of the essentials is missing. The root class lacks a superclass, + * of course. + */ + if (!metaClassDict || !metaClassName || !scratchNumber) { + goto finish; + } + + metaClassInfo->setObject(metaClassDict); + metaClassDict->setObject(kOSMetaClassNameKey, metaClassName); + if (superclassName) { + metaClassDict->setObject(kOSMetaClassSuperclassNameKey, superclassName); + } + metaClassDict->setObject(kOSMetaClassTrackingCountKey, scratchNumber); + } + } + } -#if CONFIG_MACF - /* Is the calling process allowed to query kext info? */ - if (current_task() != kernel_task) { - int macCheckResult = 0; - kauth_cred_t cred = NULL; - - cred = kauth_cred_get_with_ref(); - macCheckResult = mac_kext_check_query(cred); - kauth_cred_unref(&cred); - - if (macCheckResult != 0) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "Failed to query kext info (MAC policy error 0x%x).", - macCheckResult); - goto finish; - } - } -#endif + /* OSBundleRetainCount. + */ + if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleRetainCountKey)) { + OSSafeReleaseNULL(scratchNumber); + { + int kextRetainCount = getRetainCount() - 1; + if (isLoaded()) { + kextRetainCount--; + } + scratchNumber = OSNumber::withNumber( + (int)kextRetainCount, + /* numBits*/ 8 * sizeof(int)); + if (scratchNumber) { + result->setObject(kOSBundleRetainCountKey, scratchNumber); + } + } + } - /* Empty list of UUIDs is equivalent to no list (get all). - */ - if (kextIdentifiers && !kextIdentifiers->getCount()) { - kextIdentifiers = NULL; - } else if (kextIdentifiers) { - idCount = kextIdentifiers->getCount(); - } - - /* Same for keys. - */ - if (infoKeys && !infoKeys->getCount()) { - infoKeys = NULL; - } - - count = sLoadedKexts->getCount(); - result = OSDictionary::withCapacity(count); - if (!result) { - goto finish; - } - - for (i = 0; i < count; i++) { - OSKext *thisKext = NULL; // do not release - Boolean includeThis = true; - uuid_t thisKextUUID; - uuid_t thisKextTextUUID; - OSData *uuid_data; - uuid_string_t uuid_key; - - thisKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (!thisKext) { - continue; - } - - uuid_data = thisKext->copyUUID(); - if (!uuid_data) { - continue; - } - - memcpy(&thisKextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextUUID)); - OSSafeReleaseNULL(uuid_data); - - uuid_unparse(thisKextUUID, uuid_key); - - uuid_data = thisKext->copyTextUUID(); - if (!uuid_data) { - continue; - } - memcpy(&thisKextTextUUID, uuid_data->getBytesNoCopy(), sizeof(thisKextTextUUID)); - OSSafeReleaseNULL(uuid_data); - - /* Skip current kext if we have a list of UUIDs and - * it isn't in the list. - */ - if (kextIdentifiers) { - includeThis = false; - - for (idIndex = 0; idIndex < idCount; idIndex++) { - const OSString* wantedUUID = OSDynamicCast(OSString, - kextIdentifiers->getObject(idIndex)); - - uuid_t uuid; - uuid_parse(wantedUUID->getCStringNoCopy(), uuid); - - if ((0 == uuid_compare(uuid, thisKextUUID)) - || (0 == uuid_compare(uuid, thisKextTextUUID))) { - includeThis = true; - /* Only need to find the first kext if multiple match, - * ie. asking for the kernel uuid does not need to find - * interface kexts or builtin static kexts. - */ - kextIdentifiers->removeObject(idIndex); - uuid_unparse(uuid, uuid_key); - break; - } - - } - } - - if (!includeThis) { - continue; - } - - kextInfo = thisKext->copyInfo(infoKeys); - if (kextInfo) { - result->setObject(uuid_key, kextInfo); - kextInfo->release(); - } - - if (kextIdentifiers && !kextIdentifiers->getCount()) { - break; - } - } + success = true; finish: - IORecursiveLockUnlock(sKextLock); - - return result; + OSSafeReleaseNULL(headerData); + OSSafeReleaseNULL(logData); + OSSafeReleaseNULL(cpuTypeNumber); + OSSafeReleaseNULL(cpuSubtypeNumber); + OSSafeReleaseNULL(executablePathString); + if (executablePathCString) { + kfree(executablePathCString, executablePathCStringSize); + } + OSSafeReleaseNULL(scratchNumber); + OSSafeReleaseNULL(dependencyLoadTags); + OSSafeReleaseNULL(metaClassIterator); + OSSafeReleaseNULL(metaClassInfo); + OSSafeReleaseNULL(metaClassDict); + OSSafeReleaseNULL(metaClassName); + OSSafeReleaseNULL(superclassName); + if (!success) { + OSSafeReleaseNULL(result); + } + return result; } /********************************************************************* *********************************************************************/ /* static */ -OSDictionary * -OSKext::copyLoadedKextInfo( - OSArray * kextIdentifiers, - OSArray * infoKeys) +OSReturn +OSKext::requestResource( + const char * kextIdentifierCString, + const char * resourceNameCString, + OSKextRequestResourceCallback callback, + void * context, + OSKextRequestTag * requestTagOut) { - OSDictionary * result = NULL; - uint32_t idCount = 0; - bool onlyLoaded; + OSReturn result = kOSReturnError; + OSKext * callbackKext = NULL;// must release (looked up) - IORecursiveLockLock(sKextLock); + OSKextRequestTag requestTag = -1; + OSNumber * requestTagNum = NULL;// must release -#if CONFIG_MACF - /* Is the calling process allowed to query kext info? */ - if (current_task() != kernel_task) { - int macCheckResult = 0; - kauth_cred_t cred = NULL; - - cred = kauth_cred_get_with_ref(); - macCheckResult = mac_kext_check_query(cred); - kauth_cred_unref(&cred); - - if (macCheckResult != 0) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogLoadFlag, - "Failed to query kext info (MAC policy error 0x%x).", - macCheckResult); - goto finish; - } - } -#endif + OSDictionary * requestDict = NULL;// must release + OSString * kextIdentifier = NULL;// must release + OSString * resourceName = NULL;// must release - /* Empty list of bundle ids is equivalent to no list (get all). - */ - if (kextIdentifiers && !kextIdentifiers->getCount()) { - kextIdentifiers = NULL; - } else if (kextIdentifiers) { - idCount = kextIdentifiers->getCount(); - } + OSDictionary * callbackRecord = NULL;// must release + OSData * callbackWrapper = NULL;// must release - /* Same for keys. - */ - if (infoKeys && !infoKeys->getCount()) { - infoKeys = NULL; - } + OSData * contextWrapper = NULL;// must release - onlyLoaded = (!infoKeys || !_OSArrayContainsCString(infoKeys, kOSBundleAllPrelinkedKey)); + IORecursiveLockLock(sKextLock); - result = OSDictionary::withCapacity(128); - if (!result) { - goto finish; - } + if (requestTagOut) { + *requestTagOut = kOSKextRequestTagInvalid; + } -#if 0 - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_kernel_slide 0x%lx \n", - vm_kernel_slide); - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_kernel_stext 0x%lx vm_kernel_etext 0x%lx \n", - vm_kernel_stext, vm_kernel_etext); - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_kernel_base 0x%lx vm_kernel_top 0x%lx \n", - vm_kernel_base, vm_kernel_top); - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_kext_base 0x%lx vm_kext_top 0x%lx \n", - vm_kext_base, vm_kext_top); - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_prelink_stext 0x%lx vm_prelink_etext 0x%lx \n", - vm_prelink_stext, vm_prelink_etext); - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_prelink_sinfo 0x%lx vm_prelink_einfo 0x%lx \n", - vm_prelink_sinfo, vm_prelink_einfo); - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "kaslr: vm_slinkedit 0x%lx vm_elinkedit 0x%lx \n", - vm_slinkedit, vm_elinkedit); -#endif + /* If requests to user space are disabled, don't go any further */ + if (!sKernelRequestsEnabled) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "Can't request resource %s for %s - requests to user space are disabled.", + resourceNameCString, + kextIdentifierCString); + result = kOSKextReturnDisabled; + goto finish; + } - sKextsByID->iterateObjects(^bool(const OSSymbol * thisKextID, OSObject * obj) - { - OSKext * thisKext = NULL; // do not release - Boolean includeThis = true; - OSDictionary * kextInfo = NULL; // must release - - thisKext = OSDynamicCast(OSKext, obj); - if (!thisKext) { - return (false);; - } - - /* Skip current kext if not yet started and caller didn't request all. - */ - if (onlyLoaded && (-1U == sLoadedKexts->getNextIndexOfObject(thisKext, 0))) { - return (false);; - } - - /* Skip current kext if we have a list of bundle IDs and - * it isn't in the list. - */ - if (kextIdentifiers) { - - includeThis = false; - - for (uint32_t idIndex = 0; idIndex < idCount; idIndex++) { - const OSString * thisRequestID = OSDynamicCast(OSString, - kextIdentifiers->getObject(idIndex)); - if (thisKextID->isEqualTo(thisRequestID)) { - includeThis = true; - break; - } - } - } - - if (!includeThis) { - return (false); - } - - kextInfo = thisKext->copyInfo(infoKeys); - if (kextInfo) { - result->setObject(thisKext->getIdentifier(), kextInfo); - kextInfo->release(); - } - return (false); - }); - -finish: - IORecursiveLockUnlock(sKextLock); + if (!kextIdentifierCString || !resourceNameCString || !callback) { + result = kOSKextReturnInvalidArgument; + goto finish; + } - return result; -} + callbackKext = OSKext::lookupKextWithAddress((vm_address_t)callback); + if (!callbackKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "Resource request has bad callback address."); + result = kOSKextReturnInvalidArgument; + goto finish; + } + if (!callbackKext->flags.starting && !callbackKext->flags.started) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "Resource request callback is in a kext that is not started."); + result = kOSKextReturnInvalidArgument; + goto finish; + } -/********************************************************************* -* Any info that needs to do allocations must goto finish on alloc -* failure. Info that is just a lookup should just not set the object -* if the info does not exist. -*********************************************************************/ -#define _OSKextLoadInfoDictCapacity (12) + /* Do not allow any new requests to be made on a kext that is unloading. + */ + if (callbackKext->flags.stopping) { + result = kOSKextReturnStopping; + goto finish; + } -OSDictionary * -OSKext::copyInfo(OSArray * infoKeys) -{ - OSDictionary * result = NULL; - bool success = false; - OSData * headerData = NULL; // must release - OSData * logData = NULL; // must release - OSNumber * cpuTypeNumber = NULL; // must release - OSNumber * cpuSubtypeNumber = NULL; // must release - OSString * versionString = NULL; // do not release - uint32_t executablePathCStringSize = 0; - char * executablePathCString = NULL; // must release - OSString * executablePathString = NULL; // must release - OSData * uuid = NULL; // must release - OSNumber * scratchNumber = NULL; // must release - OSArray * dependencyLoadTags = NULL; // must release - OSCollectionIterator * metaClassIterator = NULL; // must release - OSArray * metaClassInfo = NULL; // must release - OSDictionary * metaClassDict = NULL; // must release - OSMetaClass * thisMetaClass = NULL; // do not release - OSString * metaClassName = NULL; // must release - OSString * superclassName = NULL; // must release - uint32_t count, i; - - result = OSDictionary::withCapacity(_OSKextLoadInfoDictCapacity); - if (!result) { - goto finish; - } - - - /* Empty keys means no keys, but NULL is quicker to check. - */ - if (infoKeys && !infoKeys->getCount()) { - infoKeys = NULL; - } - - /* Headers, CPU type, and CPU subtype. - */ - if (!infoKeys || - _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey) || - _OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey) || - _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey) || - _OSArrayContainsCString(infoKeys, kOSBundleCPUSubtypeKey)) - { - - if (linkedExecutable && !isInterface()) { - - kernel_mach_header_t *kext_mach_hdr = (kernel_mach_header_t *) - linkedExecutable->getBytesNoCopy(); + /* If we're wrapped the next available request tag around to the negative + * numbers, we can't service any more requests. + */ + if (sNextRequestTag == kOSKextRequestTagInvalid) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "No more request tags available; restart required."); + result = kOSKextReturnNoResources; + goto finish; + } + requestTag = sNextRequestTag++; -#if !SECURE_KERNEL - // do not return macho header info on shipping iOS - 19095897 - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey)) { - kernel_mach_header_t * temp_kext_mach_hdr; - struct load_command * lcp; - - headerData = OSData::withBytes(kext_mach_hdr, - (u_int) (sizeof(*kext_mach_hdr) + kext_mach_hdr->sizeofcmds)); - if (!headerData) { - goto finish; - } - - // unslide any vmaddrs we return to userspace - 10726716 - temp_kext_mach_hdr = (kernel_mach_header_t *) - headerData->getBytesNoCopy(); - if (temp_kext_mach_hdr == NULL) { - goto finish; - } - - lcp = (struct load_command *) (temp_kext_mach_hdr + 1); - for (i = 0; i < temp_kext_mach_hdr->ncmds; i++) { - if (lcp->cmd == LC_SEGMENT_KERNEL) { - kernel_segment_command_t * segp; - kernel_section_t * secp; - - segp = (kernel_segment_command_t *) lcp; - // 10543468 - if we jettisoned __LINKEDIT clear size info - if (flags.jettisonLinkeditSeg) { - if (strncmp(segp->segname, SEG_LINKEDIT, sizeof(segp->segname)) == 0) { - segp->vmsize = 0; - segp->fileoff = 0; - segp->filesize = 0; - } - } + result = _OSKextCreateRequest(kKextRequestPredicateRequestResource, + &requestDict); + if (result != kOSReturnSuccess) { + goto finish; + } -#if 0 - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "%s: LC_SEGMENT_KERNEL segname '%s' vmaddr 0x%llX 0x%lX vmsize %llu nsects %u", - __FUNCTION__, segp->segname, segp->vmaddr, - VM_KERNEL_UNSLIDE(segp->vmaddr), - segp->vmsize, segp->nsects); - if ( (VM_KERNEL_IS_SLID(segp->vmaddr) == false) && - (VM_KERNEL_IS_KEXT(segp->vmaddr) == false) && - (VM_KERNEL_IS_PRELINKTEXT(segp->vmaddr) == false) && - (VM_KERNEL_IS_PRELINKINFO(segp->vmaddr) == false) && - (VM_KERNEL_IS_KEXT_LINKEDIT(segp->vmaddr) == false) ) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "%s: not in kext range - vmaddr 0x%llX vm_kext_base 0x%lX vm_kext_top 0x%lX", - __FUNCTION__, segp->vmaddr, vm_kext_base, vm_kext_top); - } -#endif - segp->vmaddr = ml_static_unslide(segp->vmaddr); - - for (secp = firstsect(segp); secp != NULL; secp = nextsect(segp, secp)) { - secp->addr = ml_static_unslide(secp->addr); - } - } - lcp = (struct load_command *)((caddr_t)lcp + lcp->cmdsize); - } - result->setObject(kOSBundleMachOHeadersKey, headerData); - } -#endif // SECURE_KERNEL + kextIdentifier = OSString::withCString(kextIdentifierCString); + resourceName = OSString::withCString(resourceNameCString); + requestTagNum = OSNumber::withNumber((long long unsigned int)requestTag, + 8 * sizeof(requestTag)); + if (!kextIdentifier || + !resourceName || + !requestTagNum || + !_OSKextSetRequestArgument(requestDict, + kKextRequestArgumentBundleIdentifierKey, kextIdentifier) || + !_OSKextSetRequestArgument(requestDict, + kKextRequestArgumentNameKey, resourceName) || + !_OSKextSetRequestArgument(requestDict, + kKextRequestArgumentRequestTagKey, requestTagNum)) { + result = kOSKextReturnNoMemory; + goto finish; + } - if (_OSArrayContainsCString(infoKeys, kOSBundleLogStringsKey)) { - osLogDataHeaderRef *header; - char headerBytes[offsetof(osLogDataHeaderRef, sections) + NUM_OS_LOG_SECTIONS * sizeof(header->sections[0])]; - - void *os_log_data = NULL; - void *cstring_data = NULL; - unsigned long os_log_size = 0; - unsigned long cstring_size = 0; - uint32_t os_log_offset = 0; - uint32_t cstring_offset = 0; - bool res; - - os_log_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__os_log", &os_log_size); - os_log_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__os_log"); - cstring_data = getsectdatafromheader(kext_mach_hdr, "__TEXT", "__cstring", &cstring_size); - cstring_offset = getsectoffsetfromheader(kext_mach_hdr, "__TEXT", "__cstring"); - - header = (osLogDataHeaderRef *) headerBytes; - header->version = OS_LOG_HDR_VERSION; - header->sect_count = NUM_OS_LOG_SECTIONS; - header->sections[OS_LOG_SECT_IDX].sect_offset = os_log_offset; - header->sections[OS_LOG_SECT_IDX].sect_size = (uint32_t) os_log_size; - header->sections[CSTRING_SECT_IDX].sect_offset = cstring_offset; - header->sections[CSTRING_SECT_IDX].sect_size = (uint32_t) cstring_size; - - - logData = OSData::withBytes(header, (u_int) (sizeof(osLogDataHeaderRef))); - if (!logData) { - goto finish; - } - res = logData->appendBytes(&(header->sections[0]), (u_int)(header->sect_count * sizeof(header->sections[0]))); - if (!res) { - goto finish; - } - if (os_log_data) { - res = logData->appendBytes(os_log_data, (u_int)header->sections[OS_LOG_SECT_IDX].sect_size); - if (!res) { - goto finish; - } - } - if (cstring_data) { - res = logData->appendBytes(cstring_data, (u_int)header->sections[CSTRING_SECT_IDX].sect_size); - if (!res) { - goto finish; - } - } - result->setObject(kOSBundleLogStringsKey, logData); - } - - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey)) { - cpuTypeNumber = OSNumber::withNumber( - (uint64_t) kext_mach_hdr->cputype, - 8 * sizeof(kext_mach_hdr->cputype)); - if (!cpuTypeNumber) { - goto finish; - } - result->setObject(kOSBundleCPUTypeKey, cpuTypeNumber); - } - - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUSubtypeKey)) { - cpuSubtypeNumber = OSNumber::withNumber( - (uint64_t) kext_mach_hdr->cpusubtype, - 8 * sizeof(kext_mach_hdr->cpusubtype)); - if (!cpuSubtypeNumber) { - goto finish; - } - result->setObject(kOSBundleCPUSubtypeKey, cpuSubtypeNumber); - } - } - } - - /* CFBundleIdentifier. We set this regardless because it's just stupid not to. - */ - result->setObject(kCFBundleIdentifierKey, bundleID); - - /* CFBundleVersion. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kCFBundleVersionKey)) { - versionString = OSDynamicCast(OSString, - getPropertyForHostArch(kCFBundleVersionKey)); - if (versionString) { - result->setObject(kCFBundleVersionKey, versionString); - } - } - - /* OSBundleCompatibleVersion. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCompatibleVersionKey)) { - versionString = OSDynamicCast(OSString, - getPropertyForHostArch(kOSBundleCompatibleVersionKey)); - if (versionString) { - result->setObject(kOSBundleCompatibleVersionKey, versionString); - } - } - - /* Path. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePathKey)) { - if (path) { - result->setObject(kOSBundlePathKey, path); - } - } - - - /* OSBundleExecutablePath. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecutablePathKey)) { - if (path && executableRelPath) { - - uint32_t pathLength = path->getLength(); // gets incremented below - - // +1 for slash, +1 for \0 - executablePathCStringSize = pathLength + executableRelPath->getLength() + 2; - - executablePathCString = (char *)kalloc_tag((executablePathCStringSize) * - sizeof(char), VM_KERN_MEMORY_OSKEXT); // +1 for \0 - if (!executablePathCString) { - goto finish; - } - strlcpy(executablePathCString, path->getCStringNoCopy(), - executablePathCStringSize); - executablePathCString[pathLength++] = '/'; - executablePathCString[pathLength++] = '\0'; - strlcat(executablePathCString, executableRelPath->getCStringNoCopy(), - executablePathCStringSize); - - executablePathString = OSString::withCString(executablePathCString); - - if (!executablePathString) { - goto finish; - } - - result->setObject(kOSBundleExecutablePathKey, executablePathString); - } else if (flags.builtin) { - result->setObject(kOSBundleExecutablePathKey, bundleID); - } - } - - /* UUID, if the kext has one. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleUUIDKey)) { - uuid = copyUUID(); - if (uuid) { - result->setObject(kOSBundleUUIDKey, uuid); - uuid->release(); - } - } - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleTextUUIDKey)) { - uuid = copyTextUUID(); - if (uuid) { - result->setObject(kOSBundleTextUUIDKey, uuid); uuid->release(); - } - } - - /***** - * OSKernelResource, OSBundleIsInterface, OSBundlePrelinked, OSBundleStarted. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSKernelResourceKey)) { - result->setObject(kOSKernelResourceKey, - isKernelComponent() ? kOSBooleanTrue : kOSBooleanFalse); - } - - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleIsInterfaceKey)) { - result->setObject(kOSBundleIsInterfaceKey, - isInterface() ? kOSBooleanTrue : kOSBooleanFalse); - } - - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundlePrelinkedKey)) { - result->setObject(kOSBundlePrelinkedKey, - isPrelinked() ? kOSBooleanTrue : kOSBooleanFalse); - } - - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleStartedKey)) { - result->setObject(kOSBundleStartedKey, - isStarted() ? kOSBooleanTrue : kOSBooleanFalse); - } - - /* LoadTag (Index). - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadTagKey)) { - scratchNumber = OSNumber::withNumber((unsigned long long)loadTag, - /* numBits */ 8 * sizeof(loadTag)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleLoadTagKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - - /* LoadAddress, LoadSize. - */ - if (!infoKeys || - _OSArrayContainsCString(infoKeys, kOSBundleLoadAddressKey) || - _OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey) || - _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey) || - _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey) || - _OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey)) - { - if (isInterface() || flags.builtin || linkedExecutable) { - /* These go to userspace via serialization, so we don't want any doubts - * about their size. - */ - uint64_t loadAddress = 0; - uint32_t loadSize = 0; - uint32_t wiredSize = 0; - uint64_t execLoadAddress = 0; - uint32_t execLoadSize = 0; - - /* Interfaces always report 0 load address & size. - * Just the way they roll. - * - * xxx - leaving in # when we have a linkedExecutable...a kernelcomp - * xxx - shouldn't have one! - */ - - if (flags.builtin || linkedExecutable) { - kernel_mach_header_t *mh = NULL; - kernel_segment_command_t *seg = NULL; - - if (flags.builtin) { - loadAddress = kmod_info->address; - loadSize = kmod_info->size; - } else { - loadAddress = (uint64_t)linkedExecutable->getBytesNoCopy(); - loadSize = linkedExecutable->getLength(); - } - mh = (kernel_mach_header_t *)loadAddress; - loadAddress = ml_static_unslide(loadAddress); - - /* Walk through the kext, looking for the first executable - * segment in case we were asked for its size/address. - */ - for (seg = firstsegfromheader(mh); seg != NULL; seg = nextsegfromheader(mh, seg)) { - if (seg->initprot & VM_PROT_EXECUTE) { - execLoadAddress = ml_static_unslide(seg->vmaddr); - execLoadSize = seg->vmsize; - break; - } - } - - /* If we have a kmod_info struct, calculated the wired size - * from that. Otherwise it's the full load size. - */ - if (kmod_info) { - wiredSize = loadSize - kmod_info->hdr_size; - } else { - wiredSize = loadSize; - } - } - - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadAddressKey)) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)(loadAddress), - /* numBits */ 8 * sizeof(loadAddress)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleLoadAddressKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } -#if CONFIG_EMBEDDED - if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCacheLoadAddressKey)) - && loadAddress && loadSize) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)ml_static_unslide((uintptr_t)segLOWESTTEXT), - /* numBits */ 8 * sizeof(loadAddress)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleCacheLoadAddressKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - if ((!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleKextsInKernelTextKey)) - && (this == sKernelKext) && gBuiltinKmodsCount) { - result->setObject(kOSBundleKextsInKernelTextKey, kOSBooleanTrue); - } -#endif /* CONFIG_EMBEDDED */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadAddressKey)) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)(execLoadAddress), - /* numBits */ 8 * sizeof(execLoadAddress)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleExecLoadAddressKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleLoadSizeKey)) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)(loadSize), - /* numBits */ 8 * sizeof(loadSize)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleLoadSizeKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleExecLoadSizeKey)) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)(execLoadSize), - /* numBits */ 8 * sizeof(execLoadSize)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleExecLoadSizeKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleWiredSizeKey)) { - scratchNumber = OSNumber::withNumber( - (unsigned long long)(wiredSize), - /* numBits */ 8 * sizeof(wiredSize)); - if (!scratchNumber) { - goto finish; - } - result->setObject(kOSBundleWiredSizeKey, scratchNumber); - OSSafeReleaseNULL(scratchNumber); - } - } - } - - /* OSBundleDependencies. In descending order for - * easy compatibility with kextstat(8). - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleDependenciesKey)) { - if ((count = getNumDependencies())) { - dependencyLoadTags = OSArray::withCapacity(count); - result->setObject(kOSBundleDependenciesKey, dependencyLoadTags); - - i = count - 1; - do { - OSKext * dependency = OSDynamicCast(OSKext, - dependencies->getObject(i)); - - OSSafeReleaseNULL(scratchNumber); - - if (!dependency) { - continue; - } - scratchNumber = OSNumber::withNumber( - (unsigned long long)dependency->getLoadTag(), - /* numBits*/ 8 * sizeof(loadTag)); - if (!scratchNumber) { - goto finish; - } - dependencyLoadTags->setObject(scratchNumber); - } while (i--); - } - } - - OSSafeReleaseNULL(scratchNumber); - - /* OSBundleMetaClasses. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleClassesKey)) { - if (metaClasses && metaClasses->getCount()) { - metaClassIterator = OSCollectionIterator::withCollection(metaClasses); - metaClassInfo = OSArray::withCapacity(metaClasses->getCount()); - if (!metaClassIterator || !metaClassInfo) { - goto finish; - } - result->setObject(kOSBundleClassesKey, metaClassInfo); - - while ( (thisMetaClass = OSDynamicCast(OSMetaClass, - metaClassIterator->getNextObject())) ) { - - OSSafeReleaseNULL(metaClassDict); - OSSafeReleaseNULL(scratchNumber); - OSSafeReleaseNULL(metaClassName); - OSSafeReleaseNULL(superclassName); - - metaClassDict = OSDictionary::withCapacity(3); - if (!metaClassDict) { - goto finish; - } - - metaClassName = OSString::withCString(thisMetaClass->getClassName()); - if (thisMetaClass->getSuperClass()) { - superclassName = OSString::withCString( - thisMetaClass->getSuperClass()->getClassName()); - } - scratchNumber = OSNumber::withNumber(thisMetaClass->getInstanceCount(), - 8 * sizeof(unsigned int)); - - /* Bail if any of the essentials is missing. The root class lacks a superclass, - * of course. - */ - if (!metaClassDict || !metaClassName || !scratchNumber) { - goto finish; - } - - metaClassInfo->setObject(metaClassDict); - metaClassDict->setObject(kOSMetaClassNameKey, metaClassName); - if (superclassName) { - metaClassDict->setObject(kOSMetaClassSuperclassNameKey, superclassName); - } - metaClassDict->setObject(kOSMetaClassTrackingCountKey, scratchNumber); - } - } - } - - /* OSBundleRetainCount. - */ - if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleRetainCountKey)) { - OSSafeReleaseNULL(scratchNumber); - { - int kextRetainCount = getRetainCount() - 1; - if (isLoaded()) { - kextRetainCount--; - } - scratchNumber = OSNumber::withNumber( - (int)kextRetainCount, - /* numBits*/ 8 * sizeof(int)); - if (scratchNumber) { - result->setObject(kOSBundleRetainCountKey, scratchNumber); - } - } - } - - success = true; + callbackRecord = OSDynamicCast(OSDictionary, requestDict->copyCollection()); + if (!callbackRecord) { + result = kOSKextReturnNoMemory; + goto finish; + } + // we validate callback address at call time + callbackWrapper = OSData::withBytes((void *)&callback, sizeof(void *)); + if (context) { + contextWrapper = OSData::withBytes((void *)&context, sizeof(void *)); + } + if (!callbackWrapper || !_OSKextSetRequestArgument(callbackRecord, + kKextRequestArgumentCallbackKey, callbackWrapper)) { + result = kOSKextReturnNoMemory; + goto finish; + } -finish: - OSSafeReleaseNULL(headerData); - OSSafeReleaseNULL(logData); - OSSafeReleaseNULL(cpuTypeNumber); - OSSafeReleaseNULL(cpuSubtypeNumber); - OSSafeReleaseNULL(executablePathString); - if (executablePathCString) kfree(executablePathCString, executablePathCStringSize); - OSSafeReleaseNULL(scratchNumber); - OSSafeReleaseNULL(dependencyLoadTags); - OSSafeReleaseNULL(metaClassIterator); - OSSafeReleaseNULL(metaClassInfo); - OSSafeReleaseNULL(metaClassDict); - OSSafeReleaseNULL(metaClassName); - OSSafeReleaseNULL(superclassName); - if (!success) { - OSSafeReleaseNULL(result); - } - return result; -} + if (context) { + if (!contextWrapper || !_OSKextSetRequestArgument(callbackRecord, + kKextRequestArgumentContextKey, contextWrapper)) { + result = kOSKextReturnNoMemory; + goto finish; + } + } -/********************************************************************* - *********************************************************************/ -/* static */ -OSReturn -OSKext::requestResource( - const char * kextIdentifierCString, - const char * resourceNameCString, - OSKextRequestResourceCallback callback, - void * context, - OSKextRequestTag * requestTagOut) -{ - OSReturn result = kOSReturnError; - OSKext * callbackKext = NULL; // must release (looked up) - - OSKextRequestTag requestTag = -1; - OSNumber * requestTagNum = NULL; // must release - - OSDictionary * requestDict = NULL; // must release - OSString * kextIdentifier = NULL; // must release - OSString * resourceName = NULL; // must release - - OSDictionary * callbackRecord = NULL; // must release - OSData * callbackWrapper = NULL; // must release - - OSData * contextWrapper = NULL; // must release - - IORecursiveLockLock(sKextLock); - - if (requestTagOut) { - *requestTagOut = kOSKextRequestTagInvalid; - } - - /* If requests to user space are disabled, don't go any further */ - if (!sKernelRequestsEnabled) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "Can't request resource %s for %s - requests to user space are disabled.", - resourceNameCString, - kextIdentifierCString); - result = kOSKextReturnDisabled; - goto finish; - } - - if (!kextIdentifierCString || !resourceNameCString || !callback) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - - callbackKext = OSKext::lookupKextWithAddress((vm_address_t)callback); - if (!callbackKext) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "Resource request has bad callback address."); - result = kOSKextReturnInvalidArgument; - goto finish; - } - if (!callbackKext->flags.starting && !callbackKext->flags.started) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "Resource request callback is in a kext that is not started."); - result = kOSKextReturnInvalidArgument; - goto finish; - } - - /* Do not allow any new requests to be made on a kext that is unloading. - */ - if (callbackKext->flags.stopping) { - result = kOSKextReturnStopping; - goto finish; - } - - /* If we're wrapped the next available request tag around to the negative - * numbers, we can't service any more requests. - */ - if (sNextRequestTag == kOSKextRequestTagInvalid) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "No more request tags available; restart required."); - result = kOSKextReturnNoResources; - goto finish; - } - requestTag = sNextRequestTag++; - - result = _OSKextCreateRequest(kKextRequestPredicateRequestResource, - &requestDict); - if (result != kOSReturnSuccess) { - goto finish; - } - - kextIdentifier = OSString::withCString(kextIdentifierCString); - resourceName = OSString::withCString(resourceNameCString); - requestTagNum = OSNumber::withNumber((long long unsigned int)requestTag, - 8 * sizeof(requestTag)); - if (!kextIdentifier || - !resourceName || - !requestTagNum || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentBundleIdentifierKey, kextIdentifier) || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentNameKey, resourceName) || - !_OSKextSetRequestArgument(requestDict, - kKextRequestArgumentRequestTagKey, requestTagNum)) { - - result = kOSKextReturnNoMemory; - goto finish; - } - - callbackRecord = OSDynamicCast(OSDictionary, requestDict->copyCollection()); - if (!callbackRecord) { - result = kOSKextReturnNoMemory; - goto finish; - } - // we validate callback address at call time - callbackWrapper = OSData::withBytes((void *)&callback, sizeof(void *)); - if (context) { - contextWrapper = OSData::withBytes((void *)&context, sizeof(void *)); - } - if (!callbackWrapper || !_OSKextSetRequestArgument(callbackRecord, - kKextRequestArgumentCallbackKey, callbackWrapper)) { - - result = kOSKextReturnNoMemory; - goto finish; - } - - if (context) { - if (!contextWrapper || !_OSKextSetRequestArgument(callbackRecord, - kKextRequestArgumentContextKey, contextWrapper)) { - - result = kOSKextReturnNoMemory; - goto finish; - } - } - - /* Only post the requests after all the other potential failure points - * have been passed. - */ - if (!sKernelRequests->setObject(requestDict) || - !sRequestCallbackRecords->setObject(callbackRecord)) { - - result = kOSKextReturnNoMemory; - goto finish; - } - - OSKext::pingKextd(); - - result = kOSReturnSuccess; - if (requestTagOut) { - *requestTagOut = requestTag; - } + /* Only post the requests after all the other potential failure points + * have been passed. + */ + if (!sKernelRequests->setObject(requestDict) || + !sRequestCallbackRecords->setObject(callbackRecord)) { + result = kOSKextReturnNoMemory; + goto finish; + } + + OSKext::pingKextd(); + + result = kOSReturnSuccess; + if (requestTagOut) { + *requestTagOut = requestTag; + } finish: - /* If we didn't succeed, yank the request & callback - * from their holding arrays. - */ - if (result != kOSReturnSuccess) { - unsigned int index; - - index = sKernelRequests->getNextIndexOfObject(requestDict, 0); - if (index != (unsigned int)-1) { - sKernelRequests->removeObject(index); - } - index = sRequestCallbackRecords->getNextIndexOfObject(callbackRecord, 0); - if (index != (unsigned int)-1) { - sRequestCallbackRecords->removeObject(index); - } - } + /* If we didn't succeed, yank the request & callback + * from their holding arrays. + */ + if (result != kOSReturnSuccess) { + unsigned int index; - OSKext::considerUnloads(/* rescheduleOnly? */ true); + index = sKernelRequests->getNextIndexOfObject(requestDict, 0); + if (index != (unsigned int)-1) { + sKernelRequests->removeObject(index); + } + index = sRequestCallbackRecords->getNextIndexOfObject(callbackRecord, 0); + if (index != (unsigned int)-1) { + sRequestCallbackRecords->removeObject(index); + } + } + + OSKext::considerUnloads(/* rescheduleOnly? */ true); - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - if (callbackKext) callbackKext->release(); - if (requestTagNum) requestTagNum->release(); + if (callbackKext) { + callbackKext->release(); + } + if (requestTagNum) { + requestTagNum->release(); + } - if (requestDict) requestDict->release(); - if (kextIdentifier) kextIdentifier->release(); - if (resourceName) resourceName->release(); + if (requestDict) { + requestDict->release(); + } + if (kextIdentifier) { + kextIdentifier->release(); + } + if (resourceName) { + resourceName->release(); + } - if (callbackRecord) callbackRecord->release(); - if (callbackWrapper) callbackWrapper->release(); - if (contextWrapper) contextWrapper->release(); + if (callbackRecord) { + callbackRecord->release(); + } + if (callbackWrapper) { + callbackWrapper->release(); + } + if (contextWrapper) { + contextWrapper->release(); + } - return result; + return result; } /********************************************************************* @@ -9712,25 +9696,25 @@ finish: /* static */ OSReturn OSKext::dequeueCallbackForRequestTag( - OSKextRequestTag requestTag, - OSDictionary ** callbackRecordOut) + OSKextRequestTag requestTag, + OSDictionary ** callbackRecordOut) { - OSReturn result = kOSReturnError; - OSNumber * requestTagNum = NULL; // must release + OSReturn result = kOSReturnError; + OSNumber * requestTagNum = NULL;// must release - requestTagNum = OSNumber::withNumber((long long unsigned int)requestTag, - 8 * sizeof(requestTag)); - if (!requestTagNum) { - goto finish; - } + requestTagNum = OSNumber::withNumber((long long unsigned int)requestTag, + 8 * sizeof(requestTag)); + if (!requestTagNum) { + goto finish; + } - result = OSKext::dequeueCallbackForRequestTag(requestTagNum, - callbackRecordOut); + result = OSKext::dequeueCallbackForRequestTag(requestTagNum, + callbackRecordOut); finish: - OSSafeReleaseNULL(requestTagNum); + OSSafeReleaseNULL(requestTagNum); - return result; + return result; } /********************************************************************* @@ -9739,49 +9723,49 @@ finish: /* static */ OSReturn OSKext::dequeueCallbackForRequestTag( - OSNumber * requestTagNum, - OSDictionary ** callbackRecordOut) -{ - OSReturn result = kOSKextReturnInvalidArgument; - OSDictionary * callbackRecord = NULL; // retain if matched! - OSNumber * callbackTagNum = NULL; // do not release - unsigned int count, i; - - result = kOSReturnError; - count = sRequestCallbackRecords->getCount(); - for (i = 0; i < count; i++) { - callbackRecord = OSDynamicCast(OSDictionary, - sRequestCallbackRecords->getObject(i)); - if (!callbackRecord) { - goto finish; - } - - /* If we don't find a tag, we basically have a leak here. Maybe - * we should just remove it. - */ - callbackTagNum = OSDynamicCast(OSNumber, _OSKextGetRequestArgument( - callbackRecord, kKextRequestArgumentRequestTagKey)); - if (!callbackTagNum) { - goto finish; - } - - /* We could be even more paranoid and check that all the incoming - * args match what's in the callback record. - */ - if (callbackTagNum->isEqualTo(requestTagNum)) { - if (callbackRecordOut) { - *callbackRecordOut = callbackRecord; - callbackRecord->retain(); - } - sRequestCallbackRecords->removeObject(i); - result = kOSReturnSuccess; - goto finish; - } - } - result = kOSKextReturnNotFound; + OSNumber * requestTagNum, + OSDictionary ** callbackRecordOut) +{ + OSReturn result = kOSKextReturnInvalidArgument; + OSDictionary * callbackRecord = NULL;// retain if matched! + OSNumber * callbackTagNum = NULL;// do not release + unsigned int count, i; + + result = kOSReturnError; + count = sRequestCallbackRecords->getCount(); + for (i = 0; i < count; i++) { + callbackRecord = OSDynamicCast(OSDictionary, + sRequestCallbackRecords->getObject(i)); + if (!callbackRecord) { + goto finish; + } + + /* If we don't find a tag, we basically have a leak here. Maybe + * we should just remove it. + */ + callbackTagNum = OSDynamicCast(OSNumber, _OSKextGetRequestArgument( + callbackRecord, kKextRequestArgumentRequestTagKey)); + if (!callbackTagNum) { + goto finish; + } + + /* We could be even more paranoid and check that all the incoming + * args match what's in the callback record. + */ + if (callbackTagNum->isEqualTo(requestTagNum)) { + if (callbackRecordOut) { + *callbackRecordOut = callbackRecord; + callbackRecord->retain(); + } + sRequestCallbackRecords->removeObject(i); + result = kOSReturnSuccess; + goto finish; + } + } + result = kOSKextReturnNotFound; finish: - return result; + return result; } @@ -9792,7 +9776,7 @@ finish: bool OSKext::isWaitingKextd(void) { - return sRequestCallbackRecords && sRequestCallbackRecords->getCount(); + return sRequestCallbackRecords && sRequestCallbackRecords->getCount(); } /********************************************************************* @@ -9802,93 +9786,97 @@ OSKext::isWaitingKextd(void) OSReturn OSKext::dispatchResource(OSDictionary * requestDict) { - OSReturn result = kOSReturnError; - OSDictionary * callbackRecord = NULL; // must release - OSNumber * requestTag = NULL; // do not release - OSNumber * requestResult = NULL; // do not release - OSData * dataObj = NULL; // do not release - uint32_t dataLength = 0; - const void * dataPtr = NULL; // do not free - OSData * callbackWrapper = NULL; // do not release - OSKextRequestResourceCallback callback = NULL; - OSData * contextWrapper = NULL; // do not release - void * context = NULL; // do not free - OSKext * callbackKext = NULL; // must release (looked up) - - /* Get the args from the request. Right now we need the tag - * to look up the callback record, and the result for invoking the callback. - */ - requestTag = OSDynamicCast(OSNumber, _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentRequestTagKey)); - requestResult = OSDynamicCast(OSNumber, _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentResultKey)); - if (!requestTag || !requestResult) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - - /* Look for a callback record matching this request's tag. - */ - result = dequeueCallbackForRequestTag(requestTag, &callbackRecord); - if (result != kOSReturnSuccess) { - goto finish; - } - - /***** - * Get the context pointer of the callback record (if there is one). - */ - contextWrapper = OSDynamicCast(OSData, _OSKextGetRequestArgument(callbackRecord, - kKextRequestArgumentContextKey)); - context = _OSKextExtractPointer(contextWrapper); - if (contextWrapper && !context) { - goto finish; - } - - callbackWrapper = OSDynamicCast(OSData, - _OSKextGetRequestArgument(callbackRecord, - kKextRequestArgumentCallbackKey)); - callback = (OSKextRequestResourceCallback) - _OSKextExtractPointer(callbackWrapper); - if (!callback) { - goto finish; - } - - /* Check for a data obj. We might not have one and that's ok, that means - * we didn't find the requested resource, and we still have to tell the - * caller that via the callback. - */ - dataObj = OSDynamicCast(OSData, _OSKextGetRequestArgument(requestDict, - kKextRequestArgumentValueKey)); - if (dataObj) { - dataPtr = dataObj->getBytesNoCopy(); - dataLength = dataObj->getLength(); - } - - callbackKext = OSKext::lookupKextWithAddress((vm_address_t)callback); - if (!callbackKext) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "Can't invoke callback for resource request; "); - goto finish; - } - if (!callbackKext->flags.starting && !callbackKext->flags.started) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogIPCFlag, - "Can't invoke kext resource callback; "); - goto finish; - } - - (void)callback(requestTag->unsigned32BitValue(), - (OSReturn)requestResult->unsigned32BitValue(), - dataPtr, dataLength, context); - - result = kOSReturnSuccess; + OSReturn result = kOSReturnError; + OSDictionary * callbackRecord = NULL;// must release + OSNumber * requestTag = NULL;// do not release + OSNumber * requestResult = NULL;// do not release + OSData * dataObj = NULL;// do not release + uint32_t dataLength = 0; + const void * dataPtr = NULL;// do not free + OSData * callbackWrapper = NULL;// do not release + OSKextRequestResourceCallback callback = NULL; + OSData * contextWrapper = NULL;// do not release + void * context = NULL;// do not free + OSKext * callbackKext = NULL;// must release (looked up) + + /* Get the args from the request. Right now we need the tag + * to look up the callback record, and the result for invoking the callback. + */ + requestTag = OSDynamicCast(OSNumber, _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentRequestTagKey)); + requestResult = OSDynamicCast(OSNumber, _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentResultKey)); + if (!requestTag || !requestResult) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + + /* Look for a callback record matching this request's tag. + */ + result = dequeueCallbackForRequestTag(requestTag, &callbackRecord); + if (result != kOSReturnSuccess) { + goto finish; + } + + /***** + * Get the context pointer of the callback record (if there is one). + */ + contextWrapper = OSDynamicCast(OSData, _OSKextGetRequestArgument(callbackRecord, + kKextRequestArgumentContextKey)); + context = _OSKextExtractPointer(contextWrapper); + if (contextWrapper && !context) { + goto finish; + } + + callbackWrapper = OSDynamicCast(OSData, + _OSKextGetRequestArgument(callbackRecord, + kKextRequestArgumentCallbackKey)); + callback = (OSKextRequestResourceCallback) + _OSKextExtractPointer(callbackWrapper); + if (!callback) { + goto finish; + } + + /* Check for a data obj. We might not have one and that's ok, that means + * we didn't find the requested resource, and we still have to tell the + * caller that via the callback. + */ + dataObj = OSDynamicCast(OSData, _OSKextGetRequestArgument(requestDict, + kKextRequestArgumentValueKey)); + if (dataObj) { + dataPtr = dataObj->getBytesNoCopy(); + dataLength = dataObj->getLength(); + } + + callbackKext = OSKext::lookupKextWithAddress((vm_address_t)callback); + if (!callbackKext) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "Can't invoke callback for resource request; "); + goto finish; + } + if (!callbackKext->flags.starting && !callbackKext->flags.started) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "Can't invoke kext resource callback; "); + goto finish; + } + + (void)callback(requestTag->unsigned32BitValue(), + (OSReturn)requestResult->unsigned32BitValue(), + dataPtr, dataLength, context); + + result = kOSReturnSuccess; finish: - if (callbackKext) callbackKext->release(); - if (callbackRecord) callbackRecord->release(); + if (callbackKext) { + callbackKext->release(); + } + if (callbackRecord) { + callbackRecord->release(); + } - return result; + return result; } /********************************************************************* @@ -9896,37 +9884,39 @@ finish: /* static */ void OSKext::invokeRequestCallback( - OSDictionary * callbackRecord, - OSReturn callbackResult) -{ - OSString * predicate = _OSKextGetRequestPredicate(callbackRecord); - OSNumber * resultNum = NULL; // must release - - if (!predicate) { - goto finish; - } - - resultNum = OSNumber::withNumber((long long unsigned int)callbackResult, - 8 * sizeof(callbackResult)); - if (!resultNum) { - goto finish; - } - - /* Insert the result into the callback record and dispatch it as if it - * were the reply coming down from user space. - */ - _OSKextSetRequestArgument(callbackRecord, kKextRequestArgumentResultKey, - resultNum); - - if (predicate->isEqualTo(kKextRequestPredicateRequestResource)) { - /* This removes the pending callback record. - */ - OSKext::dispatchResource(callbackRecord); - } + OSDictionary * callbackRecord, + OSReturn callbackResult) +{ + OSString * predicate = _OSKextGetRequestPredicate(callbackRecord); + OSNumber * resultNum = NULL;// must release + + if (!predicate) { + goto finish; + } + + resultNum = OSNumber::withNumber((long long unsigned int)callbackResult, + 8 * sizeof(callbackResult)); + if (!resultNum) { + goto finish; + } + + /* Insert the result into the callback record and dispatch it as if it + * were the reply coming down from user space. + */ + _OSKextSetRequestArgument(callbackRecord, kKextRequestArgumentResultKey, + resultNum); + + if (predicate->isEqualTo(kKextRequestPredicateRequestResource)) { + /* This removes the pending callback record. + */ + OSKext::dispatchResource(callbackRecord); + } finish: - if (resultNum) resultNum->release(); - return; + if (resultNum) { + resultNum->release(); + } + return; } /********************************************************************* @@ -9935,28 +9925,30 @@ finish: /* static */ OSReturn OSKext::cancelRequest( - OSKextRequestTag requestTag, - void ** contextOut) -{ - OSReturn result = kOSKextReturnNoMemory; - OSDictionary * callbackRecord = NULL; // must release - OSData * contextWrapper = NULL; // do not release - - IORecursiveLockLock(sKextLock); - result = OSKext::dequeueCallbackForRequestTag(requestTag, - &callbackRecord); - IORecursiveLockUnlock(sKextLock); + OSKextRequestTag requestTag, + void ** contextOut) +{ + OSReturn result = kOSKextReturnNoMemory; + OSDictionary * callbackRecord = NULL; // must release + OSData * contextWrapper = NULL;// do not release + + IORecursiveLockLock(sKextLock); + result = OSKext::dequeueCallbackForRequestTag(requestTag, + &callbackRecord); + IORecursiveLockUnlock(sKextLock); + + if (result == kOSReturnSuccess && contextOut) { + contextWrapper = OSDynamicCast(OSData, + _OSKextGetRequestArgument(callbackRecord, + kKextRequestArgumentContextKey)); + *contextOut = _OSKextExtractPointer(contextWrapper); + } - if (result == kOSReturnSuccess && contextOut) { - contextWrapper = OSDynamicCast(OSData, - _OSKextGetRequestArgument(callbackRecord, - kKextRequestArgumentContextKey)); - *contextOut = _OSKextExtractPointer(contextWrapper); - } - - if (callbackRecord) callbackRecord->release(); + if (callbackRecord) { + callbackRecord->release(); + } - return result; + return result; } /********************************************************************* @@ -9964,51 +9956,50 @@ OSKext::cancelRequest( *********************************************************************/ void OSKext::invokeOrCancelRequestCallbacks( - OSReturn callbackResult, - bool invokeFlag) -{ - unsigned int count, i; - - count = sRequestCallbackRecords->getCount(); - if (!count) { - goto finish; - } - - i = count - 1; - do { - OSDictionary * request = OSDynamicCast(OSDictionary, - sRequestCallbackRecords->getObject(i)); - - if (!request) { - continue; - } - OSData * callbackWrapper = OSDynamicCast(OSData, - _OSKextGetRequestArgument(request, - kKextRequestArgumentCallbackKey)); - - if (!callbackWrapper) { - sRequestCallbackRecords->removeObject(i); - continue; - } - - vm_address_t callbackAddress = (vm_address_t) - _OSKextExtractPointer(callbackWrapper); - - if ((kmod_info->address <= callbackAddress) && - (callbackAddress < (kmod_info->address + kmod_info->size))) { - - if (invokeFlag) { - /* This removes the callback record. - */ - invokeRequestCallback(request, callbackResult); - } else { - sRequestCallbackRecords->removeObject(i); - } - } - } while (i--); + OSReturn callbackResult, + bool invokeFlag) +{ + unsigned int count, i; + + count = sRequestCallbackRecords->getCount(); + if (!count) { + goto finish; + } + + i = count - 1; + do { + OSDictionary * request = OSDynamicCast(OSDictionary, + sRequestCallbackRecords->getObject(i)); + + if (!request) { + continue; + } + OSData * callbackWrapper = OSDynamicCast(OSData, + _OSKextGetRequestArgument(request, + kKextRequestArgumentCallbackKey)); + + if (!callbackWrapper) { + sRequestCallbackRecords->removeObject(i); + continue; + } + + vm_address_t callbackAddress = (vm_address_t) + _OSKextExtractPointer(callbackWrapper); + + if ((kmod_info->address <= callbackAddress) && + (callbackAddress < (kmod_info->address + kmod_info->size))) { + if (invokeFlag) { + /* This removes the callback record. + */ + invokeRequestCallback(request, callbackResult); + } else { + sRequestCallbackRecords->removeObject(i); + } + } + } while (i--); finish: - return; + return; } /********************************************************************* @@ -10017,251 +10008,269 @@ finish: uint32_t OSKext::countRequestCallbacks(void) { - uint32_t result = 0; - unsigned int count, i; - - count = sRequestCallbackRecords->getCount(); - if (!count) { - goto finish; - } - - i = count - 1; - do { - OSDictionary * request = OSDynamicCast(OSDictionary, - sRequestCallbackRecords->getObject(i)); - - if (!request) { - continue; - } - OSData * callbackWrapper = OSDynamicCast(OSData, - _OSKextGetRequestArgument(request, - kKextRequestArgumentCallbackKey)); - - if (!callbackWrapper) { - continue; - } - - vm_address_t callbackAddress = (vm_address_t) - _OSKextExtractPointer(callbackWrapper); - - if ((kmod_info->address <= callbackAddress) && - (callbackAddress < (kmod_info->address + kmod_info->size))) { - - result++; - } - } while (i--); + uint32_t result = 0; + unsigned int count, i; + + count = sRequestCallbackRecords->getCount(); + if (!count) { + goto finish; + } + + i = count - 1; + do { + OSDictionary * request = OSDynamicCast(OSDictionary, + sRequestCallbackRecords->getObject(i)); + + if (!request) { + continue; + } + OSData * callbackWrapper = OSDynamicCast(OSData, + _OSKextGetRequestArgument(request, + kKextRequestArgumentCallbackKey)); + + if (!callbackWrapper) { + continue; + } + + vm_address_t callbackAddress = (vm_address_t) + _OSKextExtractPointer(callbackWrapper); + + if ((kmod_info->address <= callbackAddress) && + (callbackAddress < (kmod_info->address + kmod_info->size))) { + result++; + } + } while (i--); finish: - return result; + return result; } /********************************************************************* *********************************************************************/ -static OSReturn _OSKextCreateRequest( - const char * predicate, - OSDictionary ** requestP) -{ - OSReturn result = kOSKextReturnNoMemory; - OSDictionary * request = NULL; // must release on error - - request = OSDictionary::withCapacity(2); - if (!request) { - goto finish; - } - result = _OSDictionarySetCStringValue(request, - kKextRequestPredicateKey, predicate); - if (result != kOSReturnSuccess) { - goto finish; - } - result = kOSReturnSuccess; +static OSReturn +_OSKextCreateRequest( + const char * predicate, + OSDictionary ** requestP) +{ + OSReturn result = kOSKextReturnNoMemory; + OSDictionary * request = NULL; // must release on error + + request = OSDictionary::withCapacity(2); + if (!request) { + goto finish; + } + result = _OSDictionarySetCStringValue(request, + kKextRequestPredicateKey, predicate); + if (result != kOSReturnSuccess) { + goto finish; + } + result = kOSReturnSuccess; finish: - if (result != kOSReturnSuccess) { - if (request) request->release(); - } else { - *requestP = request; - } + if (result != kOSReturnSuccess) { + if (request) { + request->release(); + } + } else { + *requestP = request; + } - return result; + return result; } - + /********************************************************************* *********************************************************************/ -static OSString * _OSKextGetRequestPredicate(OSDictionary * requestDict) +static OSString * +_OSKextGetRequestPredicate(OSDictionary * requestDict) { - return OSDynamicCast(OSString, - requestDict->getObject(kKextRequestPredicateKey)); + return OSDynamicCast(OSString, + requestDict->getObject(kKextRequestPredicateKey)); } /********************************************************************* *********************************************************************/ -static OSObject * _OSKextGetRequestArgument( - OSDictionary * requestDict, - const char * argName) +static OSObject * +_OSKextGetRequestArgument( + OSDictionary * requestDict, + const char * argName) { - OSDictionary * args = OSDynamicCast(OSDictionary, - requestDict->getObject(kKextRequestArgumentsKey)); - if (args) { - return args->getObject(argName); - } - return NULL; + OSDictionary * args = OSDynamicCast(OSDictionary, + requestDict->getObject(kKextRequestArgumentsKey)); + if (args) { + return args->getObject(argName); + } + return NULL; } /********************************************************************* *********************************************************************/ -static bool _OSKextSetRequestArgument( - OSDictionary * requestDict, - const char * argName, - OSObject * value) -{ - OSDictionary * args = OSDynamicCast(OSDictionary, - requestDict->getObject(kKextRequestArgumentsKey)); - if (!args) { - args = OSDictionary::withCapacity(2); - if (!args) { - goto finish; - } - requestDict->setObject(kKextRequestArgumentsKey, args); - args->release(); - } - if (args) { - return args->setObject(argName, value); - } +static bool +_OSKextSetRequestArgument( + OSDictionary * requestDict, + const char * argName, + OSObject * value) +{ + OSDictionary * args = OSDynamicCast(OSDictionary, + requestDict->getObject(kKextRequestArgumentsKey)); + if (!args) { + args = OSDictionary::withCapacity(2); + if (!args) { + goto finish; + } + requestDict->setObject(kKextRequestArgumentsKey, args); + args->release(); + } + if (args) { + return args->setObject(argName, value); + } finish: - return false; + return false; } /********************************************************************* *********************************************************************/ -static void * _OSKextExtractPointer(OSData * wrapper) +static void * +_OSKextExtractPointer(OSData * wrapper) { - void * result = NULL; - const void * resultPtr = NULL; - - if (!wrapper) { - goto finish; - } - resultPtr = wrapper->getBytesNoCopy(); - result = *(void **)resultPtr; + void * result = NULL; + const void * resultPtr = NULL; + + if (!wrapper) { + goto finish; + } + resultPtr = wrapper->getBytesNoCopy(); + result = *(void **)resultPtr; finish: - return result; + return result; } /********************************************************************* *********************************************************************/ -static OSReturn _OSDictionarySetCStringValue( - OSDictionary * dict, - const char * cKey, - const char * cValue) -{ - OSReturn result = kOSKextReturnNoMemory; - const OSSymbol * key = NULL; // must release - OSString * value = NULL; // must release - - key = OSSymbol::withCString(cKey); - value = OSString::withCString(cValue); - if (!key || !value) { - goto finish; - } - if (dict->setObject(key, value)) { - result = kOSReturnSuccess; - } +static OSReturn +_OSDictionarySetCStringValue( + OSDictionary * dict, + const char * cKey, + const char * cValue) +{ + OSReturn result = kOSKextReturnNoMemory; + const OSSymbol * key = NULL; // must release + OSString * value = NULL; // must release + + key = OSSymbol::withCString(cKey); + value = OSString::withCString(cValue); + if (!key || !value) { + goto finish; + } + if (dict->setObject(key, value)) { + result = kOSReturnSuccess; + } finish: - if (key) key->release(); - if (value) value->release(); + if (key) { + key->release(); + } + if (value) { + value->release(); + } - return result; + return result; } /********************************************************************* *********************************************************************/ -static bool _OSArrayContainsCString( - OSArray * array, - const char * cString) +static bool +_OSArrayContainsCString( + OSArray * array, + const char * cString) { - bool result = false; - const OSSymbol * symbol = NULL; - uint32_t count, i; - - if (!array || !cString) { - goto finish; - } + bool result = false; + const OSSymbol * symbol = NULL; + uint32_t count, i; + + if (!array || !cString) { + goto finish; + } - symbol = OSSymbol::withCStringNoCopy(cString); - if (!symbol) { - goto finish; - } + symbol = OSSymbol::withCStringNoCopy(cString); + if (!symbol) { + goto finish; + } - count = array->getCount(); - for (i = 0; i < count; i++) { - OSObject * thisObject = array->getObject(i); - if (symbol->isEqualTo(thisObject)) { - result = true; - goto finish; - } - } + count = array->getCount(); + for (i = 0; i < count; i++) { + OSObject * thisObject = array->getObject(i); + if (symbol->isEqualTo(thisObject)) { + result = true; + goto finish; + } + } finish: - if (symbol) symbol->release(); - return result; + if (symbol) { + symbol->release(); + } + return result; } /********************************************************************* - * We really only care about boot / system start up related kexts. - * We return true if we're less than REBUILD_MAX_TIME since start up, - * otherwise return false. - *********************************************************************/ -bool _OSKextInPrelinkRebuildWindow(void) -{ - static bool outside_the_window = false; - AbsoluteTime my_abstime; - UInt64 my_ns; - SInt32 my_secs; - - if (outside_the_window) { - return(false); - } - clock_get_uptime(&my_abstime); - absolutetime_to_nanoseconds(my_abstime, &my_ns); - my_secs = (SInt32)(my_ns / NSEC_PER_SEC); - if (my_secs > REBUILD_MAX_TIME) { - outside_the_window = true; - return(false); - } - return(true); +* We really only care about boot / system start up related kexts. +* We return true if we're less than REBUILD_MAX_TIME since start up, +* otherwise return false. +*********************************************************************/ +bool +_OSKextInPrelinkRebuildWindow(void) +{ + static bool outside_the_window = false; + AbsoluteTime my_abstime; + UInt64 my_ns; + SInt32 my_secs; + + if (outside_the_window) { + return false; + } + clock_get_uptime(&my_abstime); + absolutetime_to_nanoseconds(my_abstime, &my_ns); + my_secs = (SInt32)(my_ns / NSEC_PER_SEC); + if (my_secs > REBUILD_MAX_TIME) { + outside_the_window = true; + return false; + } + return true; } /********************************************************************* - *********************************************************************/ -bool _OSKextInUnloadedPrelinkedKexts( const OSSymbol * theBundleID ) -{ - int unLoadedCount, i; - bool result = false; - - IORecursiveLockLock(sKextLock); - - if (sUnloadedPrelinkedKexts == NULL) { - goto finish; - } - unLoadedCount = sUnloadedPrelinkedKexts->getCount(); - if (unLoadedCount == 0) { - goto finish; - } - - for (i = 0; i < unLoadedCount; i++) { - const OSSymbol * myBundleID; // do not release - - myBundleID = OSDynamicCast(OSSymbol, sUnloadedPrelinkedKexts->getObject(i)); - if (!myBundleID) continue; - if (theBundleID->isEqualTo(myBundleID->getCStringNoCopy())) { - result = true; - break; - } - } +*********************************************************************/ +bool +_OSKextInUnloadedPrelinkedKexts( const OSSymbol * theBundleID ) +{ + int unLoadedCount, i; + bool result = false; + + IORecursiveLockLock(sKextLock); + + if (sUnloadedPrelinkedKexts == NULL) { + goto finish; + } + unLoadedCount = sUnloadedPrelinkedKexts->getCount(); + if (unLoadedCount == 0) { + goto finish; + } + + for (i = 0; i < unLoadedCount; i++) { + const OSSymbol * myBundleID;// do not release + + myBundleID = OSDynamicCast(OSSymbol, sUnloadedPrelinkedKexts->getObject(i)); + if (!myBundleID) { + continue; + } + if (theBundleID->isEqualTo(myBundleID->getCStringNoCopy())) { + result = true; + break; + } + } finish: - IORecursiveLockUnlock(sKextLock); - return(result); + IORecursiveLockUnlock(sKextLock); + return result; } #if PRAGMA_MARK @@ -10273,66 +10282,71 @@ finish: OSArray * OSKext::copyAllKextPersonalities(bool filterSafeBootFlag) { - OSArray * result = NULL; // returned - OSCollectionIterator * kextIterator = NULL; // must release - OSArray * personalities = NULL; // must release - OSCollectionIterator * personalitiesIterator = NULL; // must release - - OSString * kextID = NULL; // do not release - OSKext * theKext = NULL; // do not release - - IORecursiveLockLock(sKextLock); - - /* Let's conservatively guess that any given kext has around 3 - * personalities for now. - */ - result = OSArray::withCapacity(sKextsByID->getCount() * 3); - if (!result) { - goto finish; - } - - kextIterator = OSCollectionIterator::withCollection(sKextsByID); - if (!kextIterator) { - goto finish; - } - - while ((kextID = OSDynamicCast(OSString, kextIterator->getNextObject()))) { - if (personalitiesIterator) { - personalitiesIterator->release(); - personalitiesIterator = NULL; - } - if (personalities) { - personalities->release(); - personalities = NULL; - } - - theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextID)); - if (!sSafeBoot || !filterSafeBootFlag || theKext->isLoadableInSafeBoot()) { - personalities = theKext->copyPersonalitiesArray(); - if (!personalities) { - continue; - } - result->merge(personalities); - } else { - // xxx - check for better place to put this log msg - OSKextLog(theKext, - kOSKextLogWarningLevel | - kOSKextLogLoadFlag, - "Kext %s is not loadable during safe boot; " - "omitting its personalities.", - theKext->getIdentifierCString()); - } - - } + OSArray * result = NULL;// returned + OSCollectionIterator * kextIterator = NULL;// must release + OSArray * personalities = NULL;// must release + OSCollectionIterator * personalitiesIterator = NULL; // must release + + OSString * kextID = NULL;// do not release + OSKext * theKext = NULL;// do not release + + IORecursiveLockLock(sKextLock); + + /* Let's conservatively guess that any given kext has around 3 + * personalities for now. + */ + result = OSArray::withCapacity(sKextsByID->getCount() * 3); + if (!result) { + goto finish; + } + + kextIterator = OSCollectionIterator::withCollection(sKextsByID); + if (!kextIterator) { + goto finish; + } + + while ((kextID = OSDynamicCast(OSString, kextIterator->getNextObject()))) { + if (personalitiesIterator) { + personalitiesIterator->release(); + personalitiesIterator = NULL; + } + if (personalities) { + personalities->release(); + personalities = NULL; + } + + theKext = OSDynamicCast(OSKext, sKextsByID->getObject(kextID)); + if (!sSafeBoot || !filterSafeBootFlag || theKext->isLoadableInSafeBoot()) { + personalities = theKext->copyPersonalitiesArray(); + if (!personalities) { + continue; + } + result->merge(personalities); + } else { + // xxx - check for better place to put this log msg + OSKextLog(theKext, + kOSKextLogWarningLevel | + kOSKextLogLoadFlag, + "Kext %s is not loadable during safe boot; " + "omitting its personalities.", + theKext->getIdentifierCString()); + } + } finish: - IORecursiveLockUnlock(sKextLock); + IORecursiveLockUnlock(sKextLock); - if (kextIterator) kextIterator->release(); - if (personalitiesIterator) personalitiesIterator->release(); - if (personalities) personalities->release(); + if (kextIterator) { + kextIterator->release(); + } + if (personalitiesIterator) { + personalitiesIterator->release(); + } + if (personalities) { + personalities->release(); + } - return result; + return result; } /********************************************************************* @@ -10341,31 +10355,31 @@ finish: void OSKext::sendAllKextPersonalitiesToCatalog(bool startMatching) { - int numPersonalities = 0; + int numPersonalities = 0; - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Sending all eligible registered kexts' personalities " - "to the IOCatalogue %s.", - startMatching ? "and starting matching" : "but not starting matching"); + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Sending all eligible registered kexts' personalities " + "to the IOCatalogue %s.", + startMatching ? "and starting matching" : "but not starting matching"); - OSArray * personalities = OSKext::copyAllKextPersonalities( - /* filterSafeBootFlag */ true); + OSArray * personalities = OSKext::copyAllKextPersonalities( + /* filterSafeBootFlag */ true); - if (personalities) { - gIOCatalogue->addDrivers(personalities, startMatching); - numPersonalities = personalities->getCount(); - personalities->release(); - } + if (personalities) { + gIOCatalogue->addDrivers(personalities, startMatching); + numPersonalities = personalities->getCount(); + personalities->release(); + } - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "%d kext personalit%s sent to the IOCatalogue; %s.", - numPersonalities, numPersonalities > 0 ? "ies" : "y", - startMatching ? "matching started" : "matching not started"); - return; + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "%d kext personalit%s sent to the IOCatalogue; %s.", + numPersonalities, numPersonalities > 0 ? "ies" : "y", + startMatching ? "matching started" : "matching not started"); + return; } /********************************************************************* @@ -10375,138 +10389,139 @@ OSKext::sendAllKextPersonalitiesToCatalog(bool startMatching) OSArray * OSKext::copyPersonalitiesArray(void) { - OSArray * result = NULL; - OSDictionary * personalities = NULL; // do not release - OSCollectionIterator * personalitiesIterator = NULL; // must release - - OSString * personalityName = NULL; // do not release - OSString * personalityBundleIdentifier = NULL; // do not release - - personalities = OSDynamicCast(OSDictionary, - getPropertyForHostArch(kIOKitPersonalitiesKey)); - if (!personalities) { - goto finish; - } - - result = OSArray::withCapacity(personalities->getCount()); - if (!result) { - goto finish; - } - - personalitiesIterator = - OSCollectionIterator::withCollection(personalities); - if (!personalitiesIterator) { - goto finish; - } - while ((personalityName = OSDynamicCast(OSString, - personalitiesIterator->getNextObject()))) { - - OSDictionary * personality = OSDynamicCast(OSDictionary, - personalities->getObject(personalityName)); - - /****** - * If the personality doesn't have a CFBundleIdentifier, or if it - * differs from the kext's, insert the kext's ID so we can find it. - * The publisher ID is used to remove personalities from bundles - * correctly. - */ - personalityBundleIdentifier = OSDynamicCast(OSString, - personality->getObject(kCFBundleIdentifierKey)); - - if (!personalityBundleIdentifier) { - personality->setObject(kCFBundleIdentifierKey, bundleID); - } else if (!personalityBundleIdentifier->isEqualTo(bundleID)) { - personality->setObject(kIOPersonalityPublisherKey, bundleID); - } - - result->setObject(personality); - } + OSArray * result = NULL; + OSDictionary * personalities = NULL;// do not release + OSCollectionIterator * personalitiesIterator = NULL;// must release + + OSString * personalityName = NULL;// do not release + OSString * personalityBundleIdentifier = NULL;// do not release + + personalities = OSDynamicCast(OSDictionary, + getPropertyForHostArch(kIOKitPersonalitiesKey)); + if (!personalities) { + goto finish; + } + + result = OSArray::withCapacity(personalities->getCount()); + if (!result) { + goto finish; + } + + personalitiesIterator = + OSCollectionIterator::withCollection(personalities); + if (!personalitiesIterator) { + goto finish; + } + while ((personalityName = OSDynamicCast(OSString, + personalitiesIterator->getNextObject()))) { + OSDictionary * personality = OSDynamicCast(OSDictionary, + personalities->getObject(personalityName)); + + /****** + * If the personality doesn't have a CFBundleIdentifier, or if it + * differs from the kext's, insert the kext's ID so we can find it. + * The publisher ID is used to remove personalities from bundles + * correctly. + */ + personalityBundleIdentifier = OSDynamicCast(OSString, + personality->getObject(kCFBundleIdentifierKey)); + + if (!personalityBundleIdentifier) { + personality->setObject(kCFBundleIdentifierKey, bundleID); + } else if (!personalityBundleIdentifier->isEqualTo(bundleID)) { + personality->setObject(kIOPersonalityPublisherKey, bundleID); + } + + result->setObject(personality); + } finish: - if (personalitiesIterator) personalitiesIterator->release(); + if (personalitiesIterator) { + personalitiesIterator->release(); + } - return result; + return result; } /********************************************************************* -Might want to change this to a bool return? +* Might want to change this to a bool return? *********************************************************************/ OSReturn OSKext::sendPersonalitiesToCatalog( - bool startMatching, - OSArray * personalityNames) -{ - OSReturn result = kOSReturnSuccess; - OSArray * personalitiesToSend = NULL; // must release - OSDictionary * kextPersonalities = NULL; // do not release - int count, i; - - if (!sLoadEnabled) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext loading is disabled (attempt to start matching for kext %s).", - getIdentifierCString()); - result = kOSKextReturnDisabled; - goto finish; - } - - if (sSafeBoot && !isLoadableInSafeBoot()) { - OSKextLog(this, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Kext %s is not loadable during safe boot; " - "not sending personalities to the IOCatalogue.", - getIdentifierCString()); - result = kOSKextReturnNotLoadable; - goto finish; - } - - if (!personalityNames || !personalityNames->getCount()) { - personalitiesToSend = copyPersonalitiesArray(); - } else { - kextPersonalities = OSDynamicCast(OSDictionary, - getPropertyForHostArch(kIOKitPersonalitiesKey)); - if (!kextPersonalities || !kextPersonalities->getCount()) { - // not an error - goto finish; - } - personalitiesToSend = OSArray::withCapacity(0); - if (!personalitiesToSend) { - result = kOSKextReturnNoMemory; - goto finish; - } - count = personalityNames->getCount(); - for (i = 0; i < count; i++) { - OSString * name = OSDynamicCast(OSString, - personalityNames->getObject(i)); - if (!name) { - continue; - } - OSDictionary * personality = OSDynamicCast(OSDictionary, - kextPersonalities->getObject(name)); - if (personality) { - personalitiesToSend->setObject(personality); - } - } - } - if (personalitiesToSend) { - unsigned numPersonalities = personalitiesToSend->getCount(); - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Kext %s sending %d personalit%s to the IOCatalogue%s.", - getIdentifierCString(), - numPersonalities, - numPersonalities > 1 ? "ies" : "y", - startMatching ? " and starting matching" : " but not starting matching"); - gIOCatalogue->addDrivers(personalitiesToSend, startMatching); - } + bool startMatching, + OSArray * personalityNames) +{ + OSReturn result = kOSReturnSuccess; + OSArray * personalitiesToSend = NULL;// must release + OSDictionary * kextPersonalities = NULL;// do not release + int count, i; + + if (!sLoadEnabled) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext loading is disabled (attempt to start matching for kext %s).", + getIdentifierCString()); + result = kOSKextReturnDisabled; + goto finish; + } + + if (sSafeBoot && !isLoadableInSafeBoot()) { + OSKextLog(this, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Kext %s is not loadable during safe boot; " + "not sending personalities to the IOCatalogue.", + getIdentifierCString()); + result = kOSKextReturnNotLoadable; + goto finish; + } + + if (!personalityNames || !personalityNames->getCount()) { + personalitiesToSend = copyPersonalitiesArray(); + } else { + kextPersonalities = OSDynamicCast(OSDictionary, + getPropertyForHostArch(kIOKitPersonalitiesKey)); + if (!kextPersonalities || !kextPersonalities->getCount()) { + // not an error + goto finish; + } + personalitiesToSend = OSArray::withCapacity(0); + if (!personalitiesToSend) { + result = kOSKextReturnNoMemory; + goto finish; + } + count = personalityNames->getCount(); + for (i = 0; i < count; i++) { + OSString * name = OSDynamicCast(OSString, + personalityNames->getObject(i)); + if (!name) { + continue; + } + OSDictionary * personality = OSDynamicCast(OSDictionary, + kextPersonalities->getObject(name)); + if (personality) { + personalitiesToSend->setObject(personality); + } + } + } + if (personalitiesToSend) { + unsigned numPersonalities = personalitiesToSend->getCount(); + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Kext %s sending %d personalit%s to the IOCatalogue%s.", + getIdentifierCString(), + numPersonalities, + numPersonalities > 1 ? "ies" : "y", + startMatching ? " and starting matching" : " but not starting matching"); + gIOCatalogue->addDrivers(personalitiesToSend, startMatching); + } finish: - if (personalitiesToSend) { - personalitiesToSend->release(); - } - return result; + if (personalitiesToSend) { + personalitiesToSend->release(); + } + return result; } /********************************************************************* @@ -10516,29 +10531,31 @@ finish: void OSKext::removePersonalitiesFromCatalog(void) { - OSDictionary * personality = NULL; // do not release + OSDictionary * personality = NULL; // do not release - personality = OSDictionary::withCapacity(1); - if (!personality) { - goto finish; - } - personality->setObject(kCFBundleIdentifierKey, getIdentifier()); + personality = OSDictionary::withCapacity(1); + if (!personality) { + goto finish; + } + personality->setObject(kCFBundleIdentifierKey, getIdentifier()); - OSKextLog(this, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Kext %s removing all personalities naming it from the IOCatalogue.", - getIdentifierCString()); + OSKextLog(this, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Kext %s removing all personalities naming it from the IOCatalogue.", + getIdentifierCString()); - /* Have the IOCatalog remove all personalities matching this kext's - * bundle ID and trigger matching anew. - */ - gIOCatalogue->removeDrivers(personality, /* startMatching */ true); + /* Have the IOCatalog remove all personalities matching this kext's + * bundle ID and trigger matching anew. + */ + gIOCatalogue->removeDrivers(personality, /* startMatching */ true); - finish: - if (personality) personality->release(); +finish: + if (personality) { + personality->release(); + } - return; + return; } @@ -10551,55 +10568,54 @@ OSKext::removePersonalitiesFromCatalog(void) /* static */ OSKextLogSpec OSKext::setUserSpaceLogFilter( - OSKextLogSpec newUserLogFilter, - bool captureFlag) -{ - OSKextLogSpec result; - bool allocError = false; - - /* Do not call any function that takes sKextLoggingLock during - * this critical block. That means do logging after. - */ - IOLockLock(sKextLoggingLock); - - result = sUserSpaceKextLogFilter; - sUserSpaceKextLogFilter = newUserLogFilter; - - if (newUserLogFilter && captureFlag && - !sUserSpaceLogSpecArray && !sUserSpaceLogMessageArray) { - - // xxx - do some measurements for a good initial capacity? - sUserSpaceLogSpecArray = OSArray::withCapacity(0); - sUserSpaceLogMessageArray = OSArray::withCapacity(0); - - if (!sUserSpaceLogSpecArray || !sUserSpaceLogMessageArray) { - OSSafeReleaseNULL(sUserSpaceLogSpecArray); - OSSafeReleaseNULL(sUserSpaceLogMessageArray); - allocError = true; - } - } - - IOLockUnlock(sKextLoggingLock); - - /* If the config flag itself is changing, log the state change - * going both ways, before setting up the user-space log arrays, - * so that this is only logged in the kernel. - */ - if (result != newUserLogFilter) { - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogGeneralFlag, - "User-space log flags changed from 0x%x to 0x%x.", - result, newUserLogFilter); - } - if (allocError) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Failed to allocate user-space log message arrays."); - } - - return result; + OSKextLogSpec newUserLogFilter, + bool captureFlag) +{ + OSKextLogSpec result; + bool allocError = false; + + /* Do not call any function that takes sKextLoggingLock during + * this critical block. That means do logging after. + */ + IOLockLock(sKextLoggingLock); + + result = sUserSpaceKextLogFilter; + sUserSpaceKextLogFilter = newUserLogFilter; + + if (newUserLogFilter && captureFlag && + !sUserSpaceLogSpecArray && !sUserSpaceLogMessageArray) { + // xxx - do some measurements for a good initial capacity? + sUserSpaceLogSpecArray = OSArray::withCapacity(0); + sUserSpaceLogMessageArray = OSArray::withCapacity(0); + + if (!sUserSpaceLogSpecArray || !sUserSpaceLogMessageArray) { + OSSafeReleaseNULL(sUserSpaceLogSpecArray); + OSSafeReleaseNULL(sUserSpaceLogMessageArray); + allocError = true; + } + } + + IOLockUnlock(sKextLoggingLock); + + /* If the config flag itself is changing, log the state change + * going both ways, before setting up the user-space log arrays, + * so that this is only logged in the kernel. + */ + if (result != newUserLogFilter) { + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogGeneralFlag, + "User-space log flags changed from 0x%x to 0x%x.", + result, newUserLogFilter); + } + if (allocError) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Failed to allocate user-space log message arrays."); + } + + return result; } /********************************************************************* @@ -10609,41 +10625,41 @@ OSKext::setUserSpaceLogFilter( OSArray * OSKext::clearUserSpaceLogFilter(void) { - OSArray * result = NULL; - OSKextLogSpec oldLogFilter; - OSKextLogSpec newLogFilter = kOSKextLogSilentFilter; + OSArray * result = NULL; + OSKextLogSpec oldLogFilter; + OSKextLogSpec newLogFilter = kOSKextLogSilentFilter; - /* Do not call any function that takes sKextLoggingLock during - * this critical block. That means do logging after. - */ - IOLockLock(sKextLoggingLock); + /* Do not call any function that takes sKextLoggingLock during + * this critical block. That means do logging after. + */ + IOLockLock(sKextLoggingLock); - result = OSArray::withCapacity(2); - if (result) { - result->setObject(sUserSpaceLogSpecArray); - result->setObject(sUserSpaceLogMessageArray); - } - OSSafeReleaseNULL(sUserSpaceLogSpecArray); - OSSafeReleaseNULL(sUserSpaceLogMessageArray); - - oldLogFilter = sUserSpaceKextLogFilter; - sUserSpaceKextLogFilter = newLogFilter; - - IOLockUnlock(sKextLoggingLock); - - /* If the config flag itself is changing, log the state change - * going both ways, after tearing down the user-space log - * arrays, so this is only logged within the kernel. - */ - if (oldLogFilter != newLogFilter) { - OSKextLog(/* kext */ NULL, - kOSKextLogDebugLevel | - kOSKextLogGeneralFlag, - "User-space log flags changed from 0x%x to 0x%x.", - oldLogFilter, newLogFilter); - } + result = OSArray::withCapacity(2); + if (result) { + result->setObject(sUserSpaceLogSpecArray); + result->setObject(sUserSpaceLogMessageArray); + } + OSSafeReleaseNULL(sUserSpaceLogSpecArray); + OSSafeReleaseNULL(sUserSpaceLogMessageArray); + + oldLogFilter = sUserSpaceKextLogFilter; + sUserSpaceKextLogFilter = newLogFilter; + + IOLockUnlock(sKextLoggingLock); + + /* If the config flag itself is changing, log the state change + * going both ways, after tearing down the user-space log + * arrays, so this is only logged within the kernel. + */ + if (oldLogFilter != newLogFilter) { + OSKextLog(/* kext */ NULL, + kOSKextLogDebugLevel | + kOSKextLogGeneralFlag, + "User-space log flags changed from 0x%x to 0x%x.", + oldLogFilter, newLogFilter); + } - return result; + return result; } @@ -10654,13 +10670,13 @@ OSKext::clearUserSpaceLogFilter(void) OSKextLogSpec OSKext::getUserSpaceLogFilter(void) { - OSKextLogSpec result; + OSKextLogSpec result; - IOLockLock(sKextLoggingLock); - result = sUserSpaceKextLogFilter; - IOLockUnlock(sKextLoggingLock); + IOLockLock(sKextLoggingLock); + result = sUserSpaceKextLogFilter; + IOLockUnlock(sKextLoggingLock); - return result; + return result; } /********************************************************************* @@ -10682,374 +10698,364 @@ OSKext::getUserSpaceLogFilter(void) #define VTMAGENTA "\033[35m" #define VTCYAN "\033[36m" -inline const char * colorForFlags(OSKextLogSpec flags) -{ - OSKextLogSpec logLevel = flags & kOSKextLogLevelMask; - - switch (logLevel) { - case kOSKextLogErrorLevel: - return VTRED VTBOLD; - case kOSKextLogWarningLevel: - return VTRED; - case kOSKextLogBasicLevel: - return VTYELLOW VTUNDER; - case kOSKextLogProgressLevel: - return VTYELLOW; - case kOSKextLogStepLevel: - return VTGREEN; - case kOSKextLogDetailLevel: - return VTCYAN; - case kOSKextLogDebugLevel: - return VTMAGENTA; - default: - return ""; // white - } -} - -inline bool logSpecMatch( - OSKextLogSpec msgLogSpec, - OSKextLogSpec logFilter) -{ - OSKextLogSpec filterKextGlobal = logFilter & kOSKextLogKextOrGlobalMask; - OSKextLogSpec filterLevel = logFilter & kOSKextLogLevelMask; - OSKextLogSpec filterFlags = logFilter & kOSKextLogFlagsMask; - - OSKextLogSpec msgKextGlobal = msgLogSpec & kOSKextLogKextOrGlobalMask; - OSKextLogSpec msgLevel = msgLogSpec & kOSKextLogLevelMask; - OSKextLogSpec msgFlags = msgLogSpec & kOSKextLogFlagsMask; - - /* Explicit messages always get logged. - */ - if (msgLevel == kOSKextLogExplicitLevel) { - return true; - } - - /* Warnings and errors are logged regardless of the flags. - */ - if (msgLevel <= kOSKextLogBasicLevel && (msgLevel <= filterLevel)) { - return true; - } - - /* A verbose message that isn't for a logging-enabled kext and isn't global - * does *not* get logged. - */ - if (!msgKextGlobal && !filterKextGlobal) { - return false; - } - - /* Warnings and errors are logged regardless of the flags. - * All other messages must fit the flags and - * have a level at or below the filter. - * - */ - if ((msgFlags & filterFlags) && (msgLevel <= filterLevel)) { - return true; - } - return false; +inline const char * +colorForFlags(OSKextLogSpec flags) +{ + OSKextLogSpec logLevel = flags & kOSKextLogLevelMask; + + switch (logLevel) { + case kOSKextLogErrorLevel: + return VTRED VTBOLD; + case kOSKextLogWarningLevel: + return VTRED; + case kOSKextLogBasicLevel: + return VTYELLOW VTUNDER; + case kOSKextLogProgressLevel: + return VTYELLOW; + case kOSKextLogStepLevel: + return VTGREEN; + case kOSKextLogDetailLevel: + return VTCYAN; + case kOSKextLogDebugLevel: + return VTMAGENTA; + default: + return ""; // white + } } -extern "C" { +inline bool +logSpecMatch( + OSKextLogSpec msgLogSpec, + OSKextLogSpec logFilter) +{ + OSKextLogSpec filterKextGlobal = logFilter & kOSKextLogKextOrGlobalMask; + OSKextLogSpec filterLevel = logFilter & kOSKextLogLevelMask; + OSKextLogSpec filterFlags = logFilter & kOSKextLogFlagsMask; + + OSKextLogSpec msgKextGlobal = msgLogSpec & kOSKextLogKextOrGlobalMask; + OSKextLogSpec msgLevel = msgLogSpec & kOSKextLogLevelMask; + OSKextLogSpec msgFlags = msgLogSpec & kOSKextLogFlagsMask; + + /* Explicit messages always get logged. + */ + if (msgLevel == kOSKextLogExplicitLevel) { + return true; + } + + /* Warnings and errors are logged regardless of the flags. + */ + if (msgLevel <= kOSKextLogBasicLevel && (msgLevel <= filterLevel)) { + return true; + } + + /* A verbose message that isn't for a logging-enabled kext and isn't global + * does *not* get logged. + */ + if (!msgKextGlobal && !filterKextGlobal) { + return false; + } + + /* Warnings and errors are logged regardless of the flags. + * All other messages must fit the flags and + * have a level at or below the filter. + * + */ + if ((msgFlags & filterFlags) && (msgLevel <= filterLevel)) { + return true; + } + return false; +} +extern "C" { void OSKextLog( - OSKext * aKext, - OSKextLogSpec msgLogSpec, - const char * format, ...) + OSKext * aKext, + OSKextLogSpec msgLogSpec, + const char * format, ...) { - va_list argList; + va_list argList; - va_start(argList, format); - OSKextVLog(aKext, msgLogSpec, format, argList); - va_end(argList); + va_start(argList, format); + OSKextVLog(aKext, msgLogSpec, format, argList); + va_end(argList); } void OSKextVLog( - OSKext * aKext, - OSKextLogSpec msgLogSpec, - const char * format, - va_list srcArgList) -{ - extern int disableConsoleOutput; - - bool logForKernel = false; - bool logForUser = false; - va_list argList; - char stackBuffer[120]; - uint32_t length = 0; - char * allocBuffer = NULL; // must kfree - OSNumber * logSpecNum = NULL; // must release - OSString * logString = NULL; // must release - char * buffer = stackBuffer; // do not free - - IOLockLock(sKextLoggingLock); - - /* Set the kext/global bit in the message spec if we have no - * kext or if the kext requests logging. - */ - if (!aKext || aKext->flags.loggingEnabled) { - msgLogSpec = msgLogSpec | kOSKextLogKextOrGlobalMask; - } - - logForKernel = logSpecMatch(msgLogSpec, sKernelLogFilter); - if (sUserSpaceLogSpecArray && sUserSpaceLogMessageArray) { - logForUser = logSpecMatch(msgLogSpec, sUserSpaceKextLogFilter); - } - - if (! (logForKernel || logForUser) ) { - goto finish; - } - - /* No goto from here until past va_end()! - */ - va_copy(argList, srcArgList); - length = vsnprintf(stackBuffer, sizeof(stackBuffer), format, argList); - va_end(argList); - - if (length + 1 >= sizeof(stackBuffer)) { - allocBuffer = (char *)kalloc_tag((length + 1) * sizeof(char), VM_KERN_MEMORY_OSKEXT); - if (!allocBuffer) { - goto finish; - } - - /* No goto from here until past va_end()! - */ - va_copy(argList, srcArgList); - vsnprintf(allocBuffer, length + 1, format, argList); - va_end(argList); - - buffer = allocBuffer; - } - - /* If user space wants the log message, queue it up. - */ - if (logForUser && sUserSpaceLogSpecArray && sUserSpaceLogMessageArray) { - logSpecNum = OSNumber::withNumber(msgLogSpec, 8 * sizeof(msgLogSpec)); - logString = OSString::withCString(buffer); - if (logSpecNum && logString) { - sUserSpaceLogSpecArray->setObject(logSpecNum); - sUserSpaceLogMessageArray->setObject(logString); - } - } - - /* Always log messages from the kernel according to the kernel's - * log flags. - */ - if (logForKernel) { - - /* If we are in console mode and have a custom log filter, - * colorize the log message. - */ - if (!disableConsoleOutput && sBootArgLogFilterFound) { - const char * color = ""; // do not free - color = colorForFlags(msgLogSpec); - printf("%s%s%s\n", colorForFlags(msgLogSpec), - buffer, color[0] ? VTRESET : ""); - } else { - printf("%s\n", buffer); - } - } + OSKext * aKext, + OSKextLogSpec msgLogSpec, + const char * format, + va_list srcArgList) +{ + extern int disableConsoleOutput; + + bool logForKernel = false; + bool logForUser = false; + va_list argList; + char stackBuffer[120]; + uint32_t length = 0; + char * allocBuffer = NULL; // must kfree + OSNumber * logSpecNum = NULL; // must release + OSString * logString = NULL; // must release + char * buffer = stackBuffer;// do not free + + IOLockLock(sKextLoggingLock); + + /* Set the kext/global bit in the message spec if we have no + * kext or if the kext requests logging. + */ + if (!aKext || aKext->flags.loggingEnabled) { + msgLogSpec = msgLogSpec | kOSKextLogKextOrGlobalMask; + } + + logForKernel = logSpecMatch(msgLogSpec, sKernelLogFilter); + if (sUserSpaceLogSpecArray && sUserSpaceLogMessageArray) { + logForUser = logSpecMatch(msgLogSpec, sUserSpaceKextLogFilter); + } + + if (!(logForKernel || logForUser)) { + goto finish; + } + + /* No goto from here until past va_end()! + */ + va_copy(argList, srcArgList); + length = vsnprintf(stackBuffer, sizeof(stackBuffer), format, argList); + va_end(argList); + + if (length + 1 >= sizeof(stackBuffer)) { + allocBuffer = (char *)kalloc_tag((length + 1) * sizeof(char), VM_KERN_MEMORY_OSKEXT); + if (!allocBuffer) { + goto finish; + } + + /* No goto from here until past va_end()! + */ + va_copy(argList, srcArgList); + vsnprintf(allocBuffer, length + 1, format, argList); + va_end(argList); + + buffer = allocBuffer; + } + + /* If user space wants the log message, queue it up. + */ + if (logForUser && sUserSpaceLogSpecArray && sUserSpaceLogMessageArray) { + logSpecNum = OSNumber::withNumber(msgLogSpec, 8 * sizeof(msgLogSpec)); + logString = OSString::withCString(buffer); + if (logSpecNum && logString) { + sUserSpaceLogSpecArray->setObject(logSpecNum); + sUserSpaceLogMessageArray->setObject(logString); + } + } + + /* Always log messages from the kernel according to the kernel's + * log flags. + */ + if (logForKernel) { + /* If we are in console mode and have a custom log filter, + * colorize the log message. + */ + if (!disableConsoleOutput && sBootArgLogFilterFound) { + const char * color = ""; // do not free + color = colorForFlags(msgLogSpec); + printf("%s%s%s\n", colorForFlags(msgLogSpec), + buffer, color[0] ? VTRESET : ""); + } else { + printf("%s\n", buffer); + } + } finish: - IOLockUnlock(sKextLoggingLock); + IOLockUnlock(sKextLoggingLock); - if (allocBuffer) { - kfree(allocBuffer, (length + 1) * sizeof(char)); - } - OSSafeReleaseNULL(logString); - OSSafeReleaseNULL(logSpecNum); - return; + if (allocBuffer) { + kfree(allocBuffer, (length + 1) * sizeof(char)); + } + OSSafeReleaseNULL(logString); + OSSafeReleaseNULL(logSpecNum); + return; } #if KASLR_IOREG_DEBUG - + #define IOLOG_INDENT( the_indention ) \ { \ int i; \ for ( i = 0; i < (the_indention); i++ ) { \ - IOLog(" "); \ + IOLog(" "); \ } \ } - -extern vm_offset_t vm_kernel_stext; -extern vm_offset_t vm_kernel_etext; -extern mach_vm_offset_t kext_alloc_base; + +extern vm_offset_t vm_kernel_stext; +extern vm_offset_t vm_kernel_etext; +extern mach_vm_offset_t kext_alloc_base; extern mach_vm_offset_t kext_alloc_max; - -bool ScanForAddrInObject(OSObject * theObject, - int indent ); - -bool ScanForAddrInObject(OSObject * theObject, - int indent) -{ - const OSMetaClass * myTypeID; - OSCollectionIterator * myIter; - OSSymbol * myKey; - OSObject * myValue; - bool myResult = false; - - if ( theObject == NULL ) { - IOLog("%s: theObject is NULL \n", - __FUNCTION__); - return myResult; - } - - myTypeID = OSTypeIDInst(theObject); - - if ( myTypeID == OSTypeID(OSDictionary) ) { - OSDictionary * myDictionary; - - myDictionary = OSDynamicCast(OSDictionary, theObject); - myIter = OSCollectionIterator::withCollection( myDictionary ); - if ( myIter == NULL ) - return myResult; - myIter->reset(); - - while ( (myKey = OSDynamicCast(OSSymbol, myIter->getNextObject())) ) { - bool myTempResult; - - myValue = myDictionary->getObject(myKey); - myTempResult = ScanForAddrInObject(myValue, (indent + 4)); - if (myTempResult) { - // if we ever get a true result return true - myResult = true; - IOLOG_INDENT(indent); - IOLog("OSDictionary key \"%s\" \n", myKey->getCStringNoCopy()); - } - } - myIter->release(); - } - else if ( myTypeID == OSTypeID(OSArray) ) { - OSArray * myArray; - - myArray = OSDynamicCast(OSArray, theObject); - myIter = OSCollectionIterator::withCollection(myArray); - if ( myIter == NULL ) - return myResult; - myIter->reset(); - - while ( (myValue = myIter->getNextObject()) ) { - bool myTempResult; - myTempResult = ScanForAddrInObject(myValue, (indent + 4)); - if (myTempResult) { - // if we ever get a true result return true - myResult = true; - IOLOG_INDENT(indent); - IOLog("OSArray: \n"); - } - } - myIter->release(); - } - else if ( myTypeID == OSTypeID(OSString) || myTypeID == OSTypeID(OSSymbol) ) { - - // should we look for addresses in strings? - } - else if ( myTypeID == OSTypeID(OSData) ) { - - void * * myPtrPtr; - unsigned int myLen; - OSData * myDataObj; - - myDataObj = OSDynamicCast(OSData, theObject); - myPtrPtr = (void * *) myDataObj->getBytesNoCopy(); - myLen = myDataObj->getLength(); - - if (myPtrPtr && myLen && myLen > 7) { - int i; - int myPtrCount = (myLen / sizeof(void *)); - - for (i = 0; i < myPtrCount; i++) { - UInt64 numberValue = (UInt64) *(myPtrPtr); - - if ( kext_alloc_max != 0 && - numberValue >= kext_alloc_base && - numberValue < kext_alloc_max ) { - - OSKext * myKext = NULL; // must release (looked up) - // IOLog("found OSData %p in kext map %p to %p \n", - // *(myPtrPtr), - // (void *) kext_alloc_base, - // (void *) kext_alloc_max); - - myKext = OSKext::lookupKextWithAddress( (vm_address_t) *(myPtrPtr) ); - if (myKext) { - IOLog("found addr %p from an OSData obj within kext \"%s\" \n", - *(myPtrPtr), - myKext->getIdentifierCString()); - myKext->release(); - } - myResult = true; - } - if ( vm_kernel_etext != 0 && - numberValue >= vm_kernel_stext && - numberValue < vm_kernel_etext ) { - IOLog("found addr %p from an OSData obj within kernel text segment %p to %p \n", - *(myPtrPtr), - (void *) vm_kernel_stext, - (void *) vm_kernel_etext); - myResult = true; - } - myPtrPtr++; - } - } - } - else if ( myTypeID == OSTypeID(OSBoolean) ) { - - // do nothing here... - } - else if ( myTypeID == OSTypeID(OSNumber) ) { - - OSNumber * number = OSDynamicCast(OSNumber, theObject); - - UInt64 numberValue = number->unsigned64BitValue(); - - if ( kext_alloc_max != 0 && - numberValue >= kext_alloc_base && - numberValue < kext_alloc_max ) { - - OSKext * myKext = NULL; // must release (looked up) - IOLog("found OSNumber in kext map %p to %p \n", - (void *) kext_alloc_base, - (void *) kext_alloc_max); - IOLog("OSNumber 0x%08llx (%llu) \n", numberValue, numberValue); - - myKext = OSKext::lookupKextWithAddress( (vm_address_t) numberValue ); - if (myKext) { - IOLog("found in kext \"%s\" \n", - myKext->getIdentifierCString()); - myKext->release(); - } - - myResult = true; - } - if ( vm_kernel_etext != 0 && - numberValue >= vm_kernel_stext && - numberValue < vm_kernel_etext ) { - IOLog("found OSNumber in kernel text segment %p to %p \n", - (void *) vm_kernel_stext, - (void *) vm_kernel_etext); - IOLog("OSNumber 0x%08llx (%llu) \n", numberValue, numberValue); - myResult = true; - } - } + +bool ScanForAddrInObject(OSObject * theObject, + int indent ); + +bool +ScanForAddrInObject(OSObject * theObject, + int indent) +{ + const OSMetaClass * myTypeID; + OSCollectionIterator * myIter; + OSSymbol * myKey; + OSObject * myValue; + bool myResult = false; + + if (theObject == NULL) { + IOLog("%s: theObject is NULL \n", + __FUNCTION__); + return myResult; + } + + myTypeID = OSTypeIDInst(theObject); + + if (myTypeID == OSTypeID(OSDictionary)) { + OSDictionary * myDictionary; + + myDictionary = OSDynamicCast(OSDictionary, theObject); + myIter = OSCollectionIterator::withCollection( myDictionary ); + if (myIter == NULL) { + return myResult; + } + myIter->reset(); + + while ((myKey = OSDynamicCast(OSSymbol, myIter->getNextObject()))) { + bool myTempResult; + + myValue = myDictionary->getObject(myKey); + myTempResult = ScanForAddrInObject(myValue, (indent + 4)); + if (myTempResult) { + // if we ever get a true result return true + myResult = true; + IOLOG_INDENT(indent); + IOLog("OSDictionary key \"%s\" \n", myKey->getCStringNoCopy()); + } + } + myIter->release(); + } else if (myTypeID == OSTypeID(OSArray)) { + OSArray * myArray; + + myArray = OSDynamicCast(OSArray, theObject); + myIter = OSCollectionIterator::withCollection(myArray); + if (myIter == NULL) { + return myResult; + } + myIter->reset(); + + while ((myValue = myIter->getNextObject())) { + bool myTempResult; + myTempResult = ScanForAddrInObject(myValue, (indent + 4)); + if (myTempResult) { + // if we ever get a true result return true + myResult = true; + IOLOG_INDENT(indent); + IOLog("OSArray: \n"); + } + } + myIter->release(); + } else if (myTypeID == OSTypeID(OSString) || myTypeID == OSTypeID(OSSymbol)) { + // should we look for addresses in strings? + } else if (myTypeID == OSTypeID(OSData)) { + void * * myPtrPtr; + unsigned int myLen; + OSData * myDataObj; + + myDataObj = OSDynamicCast(OSData, theObject); + myPtrPtr = (void * *) myDataObj->getBytesNoCopy(); + myLen = myDataObj->getLength(); + + if (myPtrPtr && myLen && myLen > 7) { + int i; + int myPtrCount = (myLen / sizeof(void *)); + + for (i = 0; i < myPtrCount; i++) { + UInt64 numberValue = (UInt64) * (myPtrPtr); + + if (kext_alloc_max != 0 && + numberValue >= kext_alloc_base && + numberValue < kext_alloc_max) { + OSKext * myKext = NULL;// must release (looked up) + // IOLog("found OSData %p in kext map %p to %p \n", + // *(myPtrPtr), + // (void *) kext_alloc_base, + // (void *) kext_alloc_max); + + myKext = OSKext::lookupKextWithAddress((vm_address_t) *(myPtrPtr)); + if (myKext) { + IOLog("found addr %p from an OSData obj within kext \"%s\" \n", + *(myPtrPtr), + myKext->getIdentifierCString()); + myKext->release(); + } + myResult = true; + } + if (vm_kernel_etext != 0 && + numberValue >= vm_kernel_stext && + numberValue < vm_kernel_etext) { + IOLog("found addr %p from an OSData obj within kernel text segment %p to %p \n", + *(myPtrPtr), + (void *) vm_kernel_stext, + (void *) vm_kernel_etext); + myResult = true; + } + myPtrPtr++; + } + } + } else if (myTypeID == OSTypeID(OSBoolean)) { + // do nothing here... + } else if (myTypeID == OSTypeID(OSNumber)) { + OSNumber * number = OSDynamicCast(OSNumber, theObject); + + UInt64 numberValue = number->unsigned64BitValue(); + + if (kext_alloc_max != 0 && + numberValue >= kext_alloc_base && + numberValue < kext_alloc_max) { + OSKext * myKext = NULL;// must release (looked up) + IOLog("found OSNumber in kext map %p to %p \n", + (void *) kext_alloc_base, + (void *) kext_alloc_max); + IOLog("OSNumber 0x%08llx (%llu) \n", numberValue, numberValue); + + myKext = OSKext::lookupKextWithAddress((vm_address_t) numberValue ); + if (myKext) { + IOLog("found in kext \"%s\" \n", + myKext->getIdentifierCString()); + myKext->release(); + } + + myResult = true; + } + if (vm_kernel_etext != 0 && + numberValue >= vm_kernel_stext && + numberValue < vm_kernel_etext) { + IOLog("found OSNumber in kernel text segment %p to %p \n", + (void *) vm_kernel_stext, + (void *) vm_kernel_etext); + IOLog("OSNumber 0x%08llx (%llu) \n", numberValue, numberValue); + myResult = true; + } + } #if 0 - else { - const OSMetaClass* myMetaClass = NULL; - - myMetaClass = theObject->getMetaClass(); - if ( myMetaClass ) { - IOLog("class %s \n", myMetaClass->getClassName() ); - } - else { - IOLog("Unknown object \n" ); - } - } + else { + const OSMetaClass* myMetaClass = NULL; + + myMetaClass = theObject->getMetaClass(); + if (myMetaClass) { + IOLog("class %s \n", myMetaClass->getClassName()); + } else { + IOLog("Unknown object \n" ); + } + } #endif - - return myResult; -} -#endif // KASLR_KEXT_DEBUG + return myResult; +} +#endif // KASLR_KEXT_DEBUG }; /* extern "C" */ #if PRAGMA_MARK @@ -11061,64 +11067,66 @@ bool ScanForAddrInObject(OSObject * theObject, /* static */ void OSKext::printKextsInBacktrace( - vm_offset_t * addr __unused, - unsigned int cnt __unused, - int (* printf_func)(const char *fmt, ...) __unused, - uint32_t flags __unused) -{ - addr64_t summary_page = 0; - addr64_t last_summary_page = 0; - bool found_kmod = false; - u_int i = 0; - - if (kPrintKextsLock & flags) { - if (!sKextSummariesLock) return; - IOLockLock(sKextSummariesLock); - } - - if (!gLoadedKextSummaries) { - (*printf_func)(" can't perform kext scan: no kext summary"); - goto finish; - } - - summary_page = trunc_page((addr64_t)(uintptr_t)gLoadedKextSummaries); - last_summary_page = round_page(summary_page + sLoadedKextSummariesAllocSize); - for (; summary_page < last_summary_page; summary_page += PAGE_SIZE) { - if (pmap_find_phys(kernel_pmap, summary_page) == 0) { - (*printf_func)(" can't perform kext scan: " - "missing kext summary page %p", summary_page); - goto finish; - } - } - - for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { - OSKextLoadedKextSummary * summary; - - summary = gLoadedKextSummaries->summaries + i; - if (!summary->address) { - continue; - } - - if (!summaryIsInBacktrace(summary, addr, cnt)) { - continue; - } - - if (!found_kmod) { - if (!(kPrintKextsTerse & flags)) { - (*printf_func)(" Kernel Extensions in backtrace:\n"); - } - found_kmod = true; - } - - printSummary(summary, printf_func, flags); - } + vm_offset_t * addr __unused, + unsigned int cnt __unused, + int (* printf_func)(const char *fmt, ...) __unused, + uint32_t flags __unused) +{ + addr64_t summary_page = 0; + addr64_t last_summary_page = 0; + bool found_kmod = false; + u_int i = 0; + + if (kPrintKextsLock & flags) { + if (!sKextSummariesLock) { + return; + } + IOLockLock(sKextSummariesLock); + } + + if (!gLoadedKextSummaries) { + (*printf_func)(" can't perform kext scan: no kext summary"); + goto finish; + } + + summary_page = trunc_page((addr64_t)(uintptr_t)gLoadedKextSummaries); + last_summary_page = round_page(summary_page + sLoadedKextSummariesAllocSize); + for (; summary_page < last_summary_page; summary_page += PAGE_SIZE) { + if (pmap_find_phys(kernel_pmap, summary_page) == 0) { + (*printf_func)(" can't perform kext scan: " + "missing kext summary page %p", summary_page); + goto finish; + } + } + + for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { + OSKextLoadedKextSummary * summary; + + summary = gLoadedKextSummaries->summaries + i; + if (!summary->address) { + continue; + } + + if (!summaryIsInBacktrace(summary, addr, cnt)) { + continue; + } + + if (!found_kmod) { + if (!(kPrintKextsTerse & flags)) { + (*printf_func)(" Kernel Extensions in backtrace:\n"); + } + found_kmod = true; + } + + printSummary(summary, printf_func, flags); + } finish: - if (kPrintKextsLock & flags) { - IOLockUnlock(sKextSummariesLock); - } + if (kPrintKextsLock & flags) { + IOLockUnlock(sKextSummariesLock); + } - return; + return; } /********************************************************************* @@ -11127,22 +11135,21 @@ finish: /* static */ boolean_t OSKext::summaryIsInBacktrace( - OSKextLoadedKextSummary * summary, - vm_offset_t * addr, - unsigned int cnt) + OSKextLoadedKextSummary * summary, + vm_offset_t * addr, + unsigned int cnt) { - u_int i = 0; + u_int i = 0; - for (i = 0; i < cnt; i++) { - vm_offset_t kscan_addr = addr[i]; - if ((kscan_addr >= summary->address) && - (kscan_addr < (summary->address + summary->size))) - { - return TRUE; - } - } + for (i = 0; i < cnt; i++) { + vm_offset_t kscan_addr = addr[i]; + if ((kscan_addr >= summary->address) && + (kscan_addr < (summary->address + summary->size))) { + return TRUE; + } + } - return FALSE; + return FALSE; } /* @@ -11153,7 +11160,6 @@ OSKextLoadedKextSummary * OSKext::summaryForAddress(const uintptr_t addr) { for (unsigned i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { - OSKextLoadedKextSummary *summary = &gLoadedKextSummaries->summaries[i]; if (!summary->address) { continue; @@ -11188,146 +11194,144 @@ OSKext::summaryForAddress(const uintptr_t addr) void * OSKext::kextForAddress(const void *address) { - void * image = NULL; - OSKextActiveAccount * active; - OSKext * kext = NULL; - uint32_t baseIdx; - uint32_t lim; - uintptr_t addr = (uintptr_t) address; + void * image = NULL; + OSKextActiveAccount * active; + OSKext * kext = NULL; + uint32_t baseIdx; + uint32_t lim; + uintptr_t addr = (uintptr_t) address; if (!addr) { return NULL; } - if (sKextAccountsCount) - { - IOSimpleLockLock(sKextAccountsLock); - // bsearch sKextAccounts list - for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) - { - active = &sKextAccounts[baseIdx + (lim >> 1)]; - if ((addr >= active->address) && (addr < active->address_end)) - { - kext = active->account->kext; - if (kext && kext->kmod_info) image = (void *) kext->kmod_info->address; - break; - } - else if (addr > active->address) - { - // move right - baseIdx += (lim >> 1) + 1; - lim--; - } - // else move left - } - IOSimpleLockUnlock(sKextAccountsLock); - } - if (!image && (addr >= vm_kernel_stext) && (addr < vm_kernel_etext)) - { - image = (void *) &_mh_execute_header; - } + if (sKextAccountsCount) { + IOSimpleLockLock(sKextAccountsLock); + // bsearch sKextAccounts list + for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) { + active = &sKextAccounts[baseIdx + (lim >> 1)]; + if ((addr >= active->address) && (addr < active->address_end)) { + kext = active->account->kext; + if (kext && kext->kmod_info) { + image = (void *) kext->kmod_info->address; + } + break; + } else if (addr > active->address) { + // move right + baseIdx += (lim >> 1) + 1; + lim--; + } + // else move left + } + IOSimpleLockUnlock(sKextAccountsLock); + } + if (!image && (addr >= vm_kernel_stext) && (addr < vm_kernel_etext)) { + image = (void *) &_mh_execute_header; + } return image; } /********************************************************************* - * scan list of loaded kext summaries looking for a load address match and if - * found return the UUID C string. If not found then set empty string. - *********************************************************************/ +* scan list of loaded kext summaries looking for a load address match and if +* found return the UUID C string. If not found then set empty string. +*********************************************************************/ static void findSummaryUUID( - uint32_t tag_ID, - uuid_string_t uuid); + uint32_t tag_ID, + uuid_string_t uuid); -static void findSummaryUUID( - uint32_t tag_ID, - uuid_string_t uuid) -{ - u_int i; - - uuid[0] = 0x00; // default to no UUID - - for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { - OSKextLoadedKextSummary * summary; - - summary = gLoadedKextSummaries->summaries + i; - - if (summary->loadTag == tag_ID) { - (void) uuid_unparse(summary->uuid, uuid); - break; - } - } - return; +static void +findSummaryUUID( + uint32_t tag_ID, + uuid_string_t uuid) +{ + u_int i; + + uuid[0] = 0x00; // default to no UUID + + for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { + OSKextLoadedKextSummary * summary; + + summary = gLoadedKextSummaries->summaries + i; + + if (summary->loadTag == tag_ID) { + (void) uuid_unparse(summary->uuid, uuid); + break; + } + } + return; } /********************************************************************* * This function must be safe to call in panic context. *********************************************************************/ -void OSKext::printSummary( - OSKextLoadedKextSummary * summary, - int (* printf_func)(const char *fmt, ...), - uint32_t flags) -{ - kmod_reference_t * kmod_ref = NULL; - uuid_string_t uuid; - char version[kOSKextVersionMaxLength]; - uint64_t tmpAddr; - - if (!OSKextVersionGetString(summary->version, version, sizeof(version))) { - strlcpy(version, "unknown version", sizeof(version)); - } - (void) uuid_unparse(summary->uuid, uuid); - - if (kPrintKextsUnslide & flags) { - tmpAddr = ml_static_unslide(summary->address); - } - else { - tmpAddr = summary->address; - } - (*printf_func)("%s%s(%s)[%s]@0x%llx->0x%llx\n", - (kPrintKextsTerse & flags) ? "" : " ", - summary->name, version, uuid, - tmpAddr, tmpAddr + summary->size - 1); - - if (kPrintKextsTerse & flags) return; - - /* print dependency info */ - for (kmod_ref = (kmod_reference_t *) summary->reference_list; - kmod_ref; - kmod_ref = kmod_ref->next) { - kmod_info_t * rinfo; - - if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_ref)) == 0) { - (*printf_func)(" kmod dependency scan stopped " - "due to missing dependency page: %p\n", - (kPrintKextsUnslide & flags) ? (void *)ml_static_unslide((vm_offset_t)kmod_ref) : kmod_ref); - break; - } - rinfo = kmod_ref->info; - - if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) { - (*printf_func)(" kmod dependency scan stopped " - "due to missing kmod page: %p\n", - (kPrintKextsUnslide & flags) ? (void *)ml_static_unslide((vm_offset_t)rinfo) : rinfo); - break; - } - - if (!rinfo->address) { - continue; // skip fake entries for built-ins - } - - /* locate UUID in gLoadedKextSummaries */ - findSummaryUUID(rinfo->id, uuid); - - if (kPrintKextsUnslide & flags) { - tmpAddr = ml_static_unslide(rinfo->address); - } - else { - tmpAddr = rinfo->address; - } - (*printf_func)(" dependency: %s(%s)[%s]@%p\n", - rinfo->name, rinfo->version, uuid, tmpAddr); - } - return; +void +OSKext::printSummary( + OSKextLoadedKextSummary * summary, + int (* printf_func)(const char *fmt, ...), + uint32_t flags) +{ + kmod_reference_t * kmod_ref = NULL; + uuid_string_t uuid; + char version[kOSKextVersionMaxLength]; + uint64_t tmpAddr; + + if (!OSKextVersionGetString(summary->version, version, sizeof(version))) { + strlcpy(version, "unknown version", sizeof(version)); + } + (void) uuid_unparse(summary->uuid, uuid); + + if (kPrintKextsUnslide & flags) { + tmpAddr = ml_static_unslide(summary->address); + } else { + tmpAddr = summary->address; + } + (*printf_func)("%s%s(%s)[%s]@0x%llx->0x%llx\n", + (kPrintKextsTerse & flags) ? "" : " ", + summary->name, version, uuid, + tmpAddr, tmpAddr + summary->size - 1); + + if (kPrintKextsTerse & flags) { + return; + } + + /* print dependency info */ + for (kmod_ref = (kmod_reference_t *) summary->reference_list; + kmod_ref; + kmod_ref = kmod_ref->next) { + kmod_info_t * rinfo; + + if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_ref)) == 0) { + (*printf_func)(" kmod dependency scan stopped " + "due to missing dependency page: %p\n", + (kPrintKextsUnslide & flags) ? (void *)ml_static_unslide((vm_offset_t)kmod_ref) : kmod_ref); + break; + } + rinfo = kmod_ref->info; + + if (pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)rinfo)) == 0) { + (*printf_func)(" kmod dependency scan stopped " + "due to missing kmod page: %p\n", + (kPrintKextsUnslide & flags) ? (void *)ml_static_unslide((vm_offset_t)rinfo) : rinfo); + break; + } + + if (!rinfo->address) { + continue; // skip fake entries for built-ins + } + + /* locate UUID in gLoadedKextSummaries */ + findSummaryUUID(rinfo->id, uuid); + + if (kPrintKextsUnslide & flags) { + tmpAddr = ml_static_unslide(rinfo->address); + } else { + tmpAddr = rinfo->address; + } + (*printf_func)(" dependency: %s(%s)[%s]@%p\n", + rinfo->name, rinfo->version, uuid, tmpAddr); + } + return; } @@ -11338,42 +11342,42 @@ void OSKext::printSummary( * output (to) indexes as appropriate. *******************************************************************************/ static int substitute( - const char * scan_string, - char * string_out, - uint32_t * to_index, - uint32_t * from_index, - const char * substring, - char marker, - char substitution); + const char * scan_string, + char * string_out, + uint32_t * to_index, + uint32_t * from_index, + const char * substring, + char marker, + char substitution); /* string_out must be at least KMOD_MAX_NAME bytes. */ static int substitute( - const char * scan_string, - char * string_out, - uint32_t * to_index, - uint32_t * from_index, - const char * substring, - char marker, - char substitution) -{ - uint32_t substring_length = strnlen(substring, KMOD_MAX_NAME - 1); - - /* On a substring match, append the marker (if there is one) and then - * the substitution character, updating the output (to) index accordingly. - * Then update the input (from) length by the length of the substring - * that got replaced. - */ - if (!strncmp(scan_string, substring, substring_length)) { - if (marker) { - string_out[(*to_index)++] = marker; - } - string_out[(*to_index)++] = substitution; - (*from_index) += substring_length; - return 1; - } - return 0; + const char * scan_string, + char * string_out, + uint32_t * to_index, + uint32_t * from_index, + const char * substring, + char marker, + char substitution) +{ + uint32_t substring_length = strnlen(substring, KMOD_MAX_NAME - 1); + + /* On a substring match, append the marker (if there is one) and then + * the substitution character, updating the output (to) index accordingly. + * Then update the input (from) length by the length of the substring + * that got replaced. + */ + if (!strncmp(scan_string, substring, substring_length)) { + if (marker) { + string_out[(*to_index)++] = marker; + } + string_out[(*to_index)++] = substitution; + (*from_index) += substring_length; + return 1; + } + return 0; } /******************************************************************************* @@ -11382,73 +11386,70 @@ substitute( * prefixes & substrings as defined by tables in kext_panic_report.h. *******************************************************************************/ static void compactIdentifier( - const char * identifier, - char * identifier_out, - char ** identifier_out_end); + const char * identifier, + char * identifier_out, + char ** identifier_out_end); static void compactIdentifier( - const char * identifier, - char * identifier_out, - char ** identifier_out_end) -{ - uint32_t from_index, to_index; - uint32_t scan_from_index = 0; - uint32_t scan_to_index = 0; - subs_entry_t * subs_entry = NULL; - int did_sub = 0; - - from_index = to_index = 0; - identifier_out[0] = '\0'; - - /* Replace certain identifier prefixes with shorter @+character sequences. - * Check the return value of substitute() so we only replace the prefix. - */ - for (subs_entry = &kext_identifier_prefix_subs[0]; - subs_entry->substring && !did_sub; - subs_entry++) { - - did_sub = substitute(identifier, identifier_out, - &scan_to_index, &scan_from_index, - subs_entry->substring, /* marker */ '\0', subs_entry->substitute); - } - did_sub = 0; - - /* Now scan through the identifier looking for the common substrings - * and replacing them with shorter !+character sequences via substitute(). - */ - for (/* see above */; - scan_from_index < KMOD_MAX_NAME - 1 && identifier[scan_from_index]; - /* see loop */) { - - const char * scan_string = &identifier[scan_from_index]; - - did_sub = 0; - - if (scan_from_index) { - for (subs_entry = &kext_identifier_substring_subs[0]; - subs_entry->substring && !did_sub; - subs_entry++) { - - did_sub = substitute(scan_string, identifier_out, - &scan_to_index, &scan_from_index, - subs_entry->substring, '!', subs_entry->substitute); - } - } - - /* If we didn't substitute, copy the input character to the output. - */ - if (!did_sub) { - identifier_out[scan_to_index++] = identifier[scan_from_index++]; - } - } - - identifier_out[scan_to_index] = '\0'; - if (identifier_out_end) { - *identifier_out_end = &identifier_out[scan_to_index]; - } - - return; + const char * identifier, + char * identifier_out, + char ** identifier_out_end) +{ + uint32_t from_index, to_index; + uint32_t scan_from_index = 0; + uint32_t scan_to_index = 0; + subs_entry_t * subs_entry = NULL; + int did_sub = 0; + + from_index = to_index = 0; + identifier_out[0] = '\0'; + + /* Replace certain identifier prefixes with shorter @+character sequences. + * Check the return value of substitute() so we only replace the prefix. + */ + for (subs_entry = &kext_identifier_prefix_subs[0]; + subs_entry->substring && !did_sub; + subs_entry++) { + did_sub = substitute(identifier, identifier_out, + &scan_to_index, &scan_from_index, + subs_entry->substring, /* marker */ '\0', subs_entry->substitute); + } + did_sub = 0; + + /* Now scan through the identifier looking for the common substrings + * and replacing them with shorter !+character sequences via substitute(). + */ + for (/* see above */; + scan_from_index < KMOD_MAX_NAME - 1 && identifier[scan_from_index]; + /* see loop */) { + const char * scan_string = &identifier[scan_from_index]; + + did_sub = 0; + + if (scan_from_index) { + for (subs_entry = &kext_identifier_substring_subs[0]; + subs_entry->substring && !did_sub; + subs_entry++) { + did_sub = substitute(scan_string, identifier_out, + &scan_to_index, &scan_from_index, + subs_entry->substring, '!', subs_entry->substitute); + } + } + + /* If we didn't substitute, copy the input character to the output. + */ + if (!did_sub) { + identifier_out[scan_to_index++] = identifier[scan_from_index++]; + } + } + + identifier_out[scan_to_index] = '\0'; + if (identifier_out_end) { + *identifier_out_end = &identifier_out[scan_to_index]; + } + + return; } /******************************************************************************* @@ -11459,29 +11460,29 @@ compactIdentifier( /* identPlusVers must be at least 2*KMOD_MAX_NAME in length. */ static int assemble_identifier_and_version( - kmod_info_t * kmod_info, - char * identPlusVers, - int bufSize); + kmod_info_t * kmod_info, + char * identPlusVers, + int bufSize); static int assemble_identifier_and_version( - kmod_info_t * kmod_info, - char * identPlusVers, - int bufSize) -{ - int result = 0; + kmod_info_t * kmod_info, + char * identPlusVers, + int bufSize) +{ + int result = 0; + + compactIdentifier(kmod_info->name, identPlusVers, NULL); + result = strnlen(identPlusVers, KMOD_MAX_NAME - 1); + identPlusVers[result++] = '\t'; // increment for real char + identPlusVers[result] = '\0'; // don't increment for nul char + result = strlcat(identPlusVers, kmod_info->version, bufSize); + if (result >= bufSize) { + identPlusVers[bufSize - 1] = '\0'; + result = bufSize - 1; + } - compactIdentifier(kmod_info->name, identPlusVers, NULL); - result = strnlen(identPlusVers, KMOD_MAX_NAME - 1); - identPlusVers[result++] = '\t'; // increment for real char - identPlusVers[result] = '\0'; // don't increment for nul char - result = strlcat(identPlusVers, kmod_info->version, bufSize); - if (result >= bufSize) { - identPlusVers[bufSize - 1] = '\0'; - result = bufSize - 1; - } - - return result; + return result; } /******************************************************************************* @@ -11490,102 +11491,100 @@ assemble_identifier_and_version( /* static */ int OSKext::saveLoadedKextPanicListTyped( - const char * prefix, - int invertFlag, - int libsFlag, - char * paniclist, - uint32_t list_size) -{ - int result = -1; - unsigned int count, i; - - count = sLoadedKexts->getCount(); - if (!count) { - goto finish; - } - - i = count - 1; - do { - OSObject * rawKext = sLoadedKexts->getObject(i); - OSKext * theKext = OSDynamicCast(OSKext, rawKext); - int match; - uint32_t identPlusVersLength; - uint32_t tempLen; - char identPlusVers[2*KMOD_MAX_NAME]; - - if (!rawKext) { - printf("OSKext::saveLoadedKextPanicListTyped - " - "NULL kext in loaded kext list; continuing\n"); - continue; - } - - if (!theKext) { - printf("OSKext::saveLoadedKextPanicListTyped - " - "Kext type cast failed in loaded kext list; continuing\n"); - continue; - } - - /* Skip all built-in kexts. - */ - if (theKext->isKernelComponent()) { - continue; - } - - kmod_info_t * kmod_info = theKext->kmod_info; - - /* Filter for kmod name (bundle identifier). - */ - match = !strncmp(kmod_info->name, prefix, strnlen(prefix, KMOD_MAX_NAME)); - if ((match && invertFlag) || (!match && !invertFlag)) { - continue; - } - - /* Filter for libraries (kexts that have a compatible version). - */ - if ((libsFlag == 0 && theKext->getCompatibleVersion() > 1) || - (libsFlag == 1 && theKext->getCompatibleVersion() < 1)) { - - continue; - } - - if (!kmod_info || - !pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_info))) { - - printf("kext scan stopped due to missing kmod_info page: %p\n", - kmod_info); - goto finish; - } - - identPlusVersLength = assemble_identifier_and_version(kmod_info, - identPlusVers, - sizeof(identPlusVers)); - if (!identPlusVersLength) { - printf("error saving loaded kext info\n"); - goto finish; - } - - /* make sure everything fits and we null terminate. - */ - tempLen = strlcat(paniclist, identPlusVers, list_size); - if (tempLen >= list_size) { - // panic list is full, keep it and null terminate - paniclist[list_size - 1] = 0x00; - result = 0; - goto finish; - } - tempLen = strlcat(paniclist, "\n", list_size); - if (tempLen >= list_size) { - // panic list is full, keep it and null terminate - paniclist[list_size - 1] = 0x00; - result = 0; - goto finish; - } - } while (i--); - - result = 0; + const char * prefix, + int invertFlag, + int libsFlag, + char * paniclist, + uint32_t list_size) +{ + int result = -1; + unsigned int count, i; + + count = sLoadedKexts->getCount(); + if (!count) { + goto finish; + } + + i = count - 1; + do { + OSObject * rawKext = sLoadedKexts->getObject(i); + OSKext * theKext = OSDynamicCast(OSKext, rawKext); + int match; + uint32_t identPlusVersLength; + uint32_t tempLen; + char identPlusVers[2 * KMOD_MAX_NAME]; + + if (!rawKext) { + printf("OSKext::saveLoadedKextPanicListTyped - " + "NULL kext in loaded kext list; continuing\n"); + continue; + } + + if (!theKext) { + printf("OSKext::saveLoadedKextPanicListTyped - " + "Kext type cast failed in loaded kext list; continuing\n"); + continue; + } + + /* Skip all built-in kexts. + */ + if (theKext->isKernelComponent()) { + continue; + } + + kmod_info_t * kmod_info = theKext->kmod_info; + + /* Filter for kmod name (bundle identifier). + */ + match = !strncmp(kmod_info->name, prefix, strnlen(prefix, KMOD_MAX_NAME)); + if ((match && invertFlag) || (!match && !invertFlag)) { + continue; + } + + /* Filter for libraries (kexts that have a compatible version). + */ + if ((libsFlag == 0 && theKext->getCompatibleVersion() > 1) || + (libsFlag == 1 && theKext->getCompatibleVersion() < 1)) { + continue; + } + + if (!kmod_info || + !pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)kmod_info))) { + printf("kext scan stopped due to missing kmod_info page: %p\n", + kmod_info); + goto finish; + } + + identPlusVersLength = assemble_identifier_and_version(kmod_info, + identPlusVers, + sizeof(identPlusVers)); + if (!identPlusVersLength) { + printf("error saving loaded kext info\n"); + goto finish; + } + + /* make sure everything fits and we null terminate. + */ + tempLen = strlcat(paniclist, identPlusVers, list_size); + if (tempLen >= list_size) { + // panic list is full, keep it and null terminate + paniclist[list_size - 1] = 0x00; + result = 0; + goto finish; + } + tempLen = strlcat(paniclist, "\n", list_size); + if (tempLen >= list_size) { + // panic list is full, keep it and null terminate + paniclist[list_size - 1] = 0x00; + result = 0; + goto finish; + } + } while (i--); + + result = 0; finish: - - return result; + + return result; } /********************************************************************* @@ -11594,88 +11593,85 @@ finish: void OSKext::saveLoadedKextPanicList(void) { - char * newlist = NULL; - uint32_t newlist_size = 0; - - newlist_size = KEXT_PANICLIST_SIZE; - newlist = (char *)kalloc_tag(newlist_size, VM_KERN_MEMORY_OSKEXT); - - if (!newlist) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | kOSKextLogGeneralFlag, - "Couldn't allocate kext panic log buffer."); - goto finish; - } - - newlist[0] = '\0'; - - // non-"com.apple." kexts - if (OSKext::saveLoadedKextPanicListTyped("com.apple.", /* invert? */ 1, - /* libs? */ -1, newlist, newlist_size) != 0) { - - goto finish; - } - // "com.apple." nonlibrary kexts - if (OSKext::saveLoadedKextPanicListTyped("com.apple.", /* invert? */ 0, - /* libs? */ 0, newlist, newlist_size) != 0) { - - goto finish; - } - // "com.apple." library kexts - if (OSKext::saveLoadedKextPanicListTyped("com.apple.", /* invert? */ 0, - /* libs? */ 1, newlist, newlist_size) != 0) { - - goto finish; - } - - if (loaded_kext_paniclist) { - kfree(loaded_kext_paniclist, loaded_kext_paniclist_size); - } - loaded_kext_paniclist = newlist; - newlist = NULL; - loaded_kext_paniclist_size = newlist_size; - + char * newlist = NULL; + uint32_t newlist_size = 0; + + newlist_size = KEXT_PANICLIST_SIZE; + newlist = (char *)kalloc_tag(newlist_size, VM_KERN_MEMORY_OSKEXT); + + if (!newlist) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | kOSKextLogGeneralFlag, + "Couldn't allocate kext panic log buffer."); + goto finish; + } + + newlist[0] = '\0'; + + // non-"com.apple." kexts + if (OSKext::saveLoadedKextPanicListTyped("com.apple.", /* invert? */ 1, + /* libs? */ -1, newlist, newlist_size) != 0) { + goto finish; + } + // "com.apple." nonlibrary kexts + if (OSKext::saveLoadedKextPanicListTyped("com.apple.", /* invert? */ 0, + /* libs? */ 0, newlist, newlist_size) != 0) { + goto finish; + } + // "com.apple." library kexts + if (OSKext::saveLoadedKextPanicListTyped("com.apple.", /* invert? */ 0, + /* libs? */ 1, newlist, newlist_size) != 0) { + goto finish; + } + + if (loaded_kext_paniclist) { + kfree(loaded_kext_paniclist, loaded_kext_paniclist_size); + } + loaded_kext_paniclist = newlist; + newlist = NULL; + loaded_kext_paniclist_size = newlist_size; + finish: - if (newlist) { - kfree(newlist, newlist_size); - } - return; + if (newlist) { + kfree(newlist, newlist_size); + } + return; } - + /********************************************************************* * Assumes sKextLock is held. *********************************************************************/ void OSKext::savePanicString(bool isLoading) { - u_long len; - - if (!kmod_info) { - return; // do not goto finish here b/c of lock - } - - len = assemble_identifier_and_version( kmod_info, - (isLoading) ? last_loaded_str_buf : last_unloaded_str_buf, - (isLoading) ? sizeof(last_loaded_str_buf) : sizeof(last_unloaded_str_buf) ); - if (!len) { - printf("error saving unloaded kext info\n"); - goto finish; - } - - if (isLoading) { - last_loaded_strlen = len; - last_loaded_address = (void *)kmod_info->address; - last_loaded_size = kmod_info->size; - clock_get_uptime(&last_loaded_timestamp); - } else { - last_unloaded_strlen = len; - last_unloaded_address = (void *)kmod_info->address; - last_unloaded_size = kmod_info->size; - clock_get_uptime(&last_unloaded_timestamp); - } + u_long len; + + if (!kmod_info) { + return; // do not goto finish here b/c of lock + } + + len = assemble_identifier_and_version( kmod_info, + (isLoading) ? last_loaded_str_buf : last_unloaded_str_buf, + (isLoading) ? sizeof(last_loaded_str_buf) : sizeof(last_unloaded_str_buf)); + if (!len) { + printf("error saving unloaded kext info\n"); + goto finish; + } + + if (isLoading) { + last_loaded_strlen = len; + last_loaded_address = (void *)kmod_info->address; + last_loaded_size = kmod_info->size; + clock_get_uptime(&last_loaded_timestamp); + } else { + last_unloaded_strlen = len; + last_unloaded_address = (void *)kmod_info->address; + last_unloaded_size = kmod_info->size; + clock_get_uptime(&last_unloaded_timestamp); + } finish: - return; + return; } /********************************************************************* @@ -11684,32 +11680,31 @@ finish: void OSKext::printKextPanicLists(int (*printf_func)(const char *fmt, ...)) { - if (last_loaded_strlen) { - printf_func("last loaded kext at %llu: %.*s (addr %p, size %lu)\n", - AbsoluteTime_to_scalar(&last_loaded_timestamp), - last_loaded_strlen, last_loaded_str_buf, - last_loaded_address, last_loaded_size); - } - - if (last_unloaded_strlen) { - printf_func("last unloaded kext at %llu: %.*s (addr %p, size %lu)\n", - AbsoluteTime_to_scalar(&last_unloaded_timestamp), - last_unloaded_strlen, last_unloaded_str_buf, - last_unloaded_address, last_unloaded_size); - } + if (last_loaded_strlen) { + printf_func("last loaded kext at %llu: %.*s (addr %p, size %lu)\n", + AbsoluteTime_to_scalar(&last_loaded_timestamp), + last_loaded_strlen, last_loaded_str_buf, + last_loaded_address, last_loaded_size); + } - printf_func("loaded kexts:\n"); - if (loaded_kext_paniclist && - pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) loaded_kext_paniclist) && - loaded_kext_paniclist[0]) { + if (last_unloaded_strlen) { + printf_func("last unloaded kext at %llu: %.*s (addr %p, size %lu)\n", + AbsoluteTime_to_scalar(&last_unloaded_timestamp), + last_unloaded_strlen, last_unloaded_str_buf, + last_unloaded_address, last_unloaded_size); + } - printf_func("%.*s", - strnlen(loaded_kext_paniclist, loaded_kext_paniclist_size), - loaded_kext_paniclist); - } else { - printf_func("(none)\n"); - } - return; + printf_func("loaded kexts:\n"); + if (loaded_kext_paniclist && + pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) loaded_kext_paniclist) && + loaded_kext_paniclist[0]) { + printf_func("%.*s", + strnlen(loaded_kext_paniclist, loaded_kext_paniclist_size), + loaded_kext_paniclist); + } else { + printf_func("(none)\n"); + } + return; } /********************************************************************* @@ -11719,157 +11714,168 @@ OSKext::printKextPanicLists(int (*printf_func)(const char *fmt, ...)) void OSKext::updateLoadedKextSummaries(void) { - kern_return_t result = KERN_FAILURE; - OSKextLoadedKextSummaryHeader *summaryHeader = NULL; - OSKextLoadedKextSummaryHeader *summaryHeaderAlloc = NULL; - OSKext *aKext; - vm_map_offset_t start, end; - size_t summarySize = 0; - size_t size; - u_int count; - u_int maxKexts; - u_int i, j; - OSKextActiveAccount * accountingList; - OSKextActiveAccount * prevAccountingList; - uint32_t idx, accountingListAlloc, accountingListCount, prevAccountingListCount; - - prevAccountingList = NULL; - prevAccountingListCount = 0; + kern_return_t result = KERN_FAILURE; + OSKextLoadedKextSummaryHeader *summaryHeader = NULL; + OSKextLoadedKextSummaryHeader *summaryHeaderAlloc = NULL; + OSKext *aKext; + vm_map_offset_t start, end; + size_t summarySize = 0; + size_t size; + u_int count; + u_int maxKexts; + u_int i, j; + OSKextActiveAccount * accountingList; + OSKextActiveAccount * prevAccountingList; + uint32_t idx, accountingListAlloc, accountingListCount, prevAccountingListCount; + + prevAccountingList = NULL; + prevAccountingListCount = 0; #if DEVELOPMENT || DEBUG - if (IORecursiveLockHaveLock(sKextLock) == false) { - panic("sKextLock must be held"); - } + if (IORecursiveLockHaveLock(sKextLock) == false) { + panic("sKextLock must be held"); + } #endif - - IOLockLock(sKextSummariesLock); - - count = sLoadedKexts->getCount(); - for (i = 0, maxKexts = 0; i < count; ++i) { - aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - maxKexts += (aKext && aKext->isExecutable()); - } - - if (!maxKexts) goto finish; - if (maxKexts < kOSKextTypicalLoadCount) maxKexts = kOSKextTypicalLoadCount; - - /* Calculate the size needed for the new summary headers. - */ - - size = sizeof(*gLoadedKextSummaries); - size += maxKexts * sizeof(*gLoadedKextSummaries->summaries); - size = round_page(size); - - if (gLoadedKextSummaries == NULL || sLoadedKextSummariesAllocSize < size) { - if (gLoadedKextSummaries) { - kmem_free(kernel_map, (vm_offset_t)gLoadedKextSummaries, sLoadedKextSummariesAllocSize); - gLoadedKextSummaries = NULL; - gLoadedKextSummariesTimestamp = mach_absolute_time(); - sLoadedKextSummariesAllocSize = 0; - } - result = kmem_alloc(kernel_map, (vm_offset_t *)&summaryHeaderAlloc, size, VM_KERN_MEMORY_OSKEXT); - if (result != KERN_SUCCESS) goto finish; - summaryHeader = summaryHeaderAlloc; - summarySize = size; - } - else { - summaryHeader = gLoadedKextSummaries; - summarySize = sLoadedKextSummariesAllocSize; - - start = (vm_map_offset_t) summaryHeader; - end = start + summarySize; - result = vm_map_protect(kernel_map, - start, - end, - VM_PROT_DEFAULT, - FALSE); - if (result != KERN_SUCCESS) goto finish; - } - - /* Populate the summary header. - */ - - bzero(summaryHeader, summarySize); - summaryHeader->version = kOSKextLoadedKextSummaryVersion; - summaryHeader->entry_size = sizeof(OSKextLoadedKextSummary); - - /* Populate each kext summary. - */ - - count = sLoadedKexts->getCount(); - accountingListAlloc = 0; - for (i = 0, j = 0; i < count && j < maxKexts; ++i) { - aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (!aKext || !aKext->isExecutable()) { - continue; - } - - aKext->updateLoadedKextSummary(&summaryHeader->summaries[j++]); - summaryHeader->numSummaries++; - accountingListAlloc++; - } - - accountingList = IONew(typeof(accountingList[0]), accountingListAlloc); - accountingListCount = 0; - for (i = 0, j = 0; i < count && j < maxKexts; ++i) { - aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); - if (!aKext || !aKext->isExecutable()) { - continue; - } - - OSKextActiveAccount activeAccount; - aKext->updateActiveAccount(&activeAccount); - // order by address - for (idx = 0; idx < accountingListCount; idx++) - { - if (activeAccount.address < accountingList[idx].address) break; - } - bcopy(&accountingList[idx], &accountingList[idx + 1], (accountingListCount - idx) * sizeof(accountingList[0])); - accountingList[idx] = activeAccount; - accountingListCount++; - } - assert(accountingListCount == accountingListAlloc); - /* Write protect the buffer and move it into place. - */ - - start = (vm_map_offset_t) summaryHeader; - end = start + summarySize; - - result = vm_map_protect(kernel_map, start, end, VM_PROT_READ, FALSE); - if (result != KERN_SUCCESS) - goto finish; - - gLoadedKextSummaries = summaryHeader; - gLoadedKextSummariesTimestamp = mach_absolute_time(); - sLoadedKextSummariesAllocSize = summarySize; - summaryHeaderAlloc = NULL; - - /* Call the magic breakpoint function through a static function pointer so - * the compiler can't optimize the function away. - */ - if (sLoadedKextSummariesUpdated) (*sLoadedKextSummariesUpdated)(); - - IOSimpleLockLock(sKextAccountsLock); - prevAccountingList = sKextAccounts; - prevAccountingListCount = sKextAccountsCount; - sKextAccounts = accountingList; - sKextAccountsCount = accountingListCount; - IOSimpleLockUnlock(sKextAccountsLock); + + IOLockLock(sKextSummariesLock); + + count = sLoadedKexts->getCount(); + for (i = 0, maxKexts = 0; i < count; ++i) { + aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + maxKexts += (aKext && aKext->isExecutable()); + } + + if (!maxKexts) { + goto finish; + } + if (maxKexts < kOSKextTypicalLoadCount) { + maxKexts = kOSKextTypicalLoadCount; + } + + /* Calculate the size needed for the new summary headers. + */ + + size = sizeof(*gLoadedKextSummaries); + size += maxKexts * sizeof(*gLoadedKextSummaries->summaries); + size = round_page(size); + + if (gLoadedKextSummaries == NULL || sLoadedKextSummariesAllocSize < size) { + if (gLoadedKextSummaries) { + kmem_free(kernel_map, (vm_offset_t)gLoadedKextSummaries, sLoadedKextSummariesAllocSize); + gLoadedKextSummaries = NULL; + gLoadedKextSummariesTimestamp = mach_absolute_time(); + sLoadedKextSummariesAllocSize = 0; + } + result = kmem_alloc(kernel_map, (vm_offset_t *)&summaryHeaderAlloc, size, VM_KERN_MEMORY_OSKEXT); + if (result != KERN_SUCCESS) { + goto finish; + } + summaryHeader = summaryHeaderAlloc; + summarySize = size; + } else { + summaryHeader = gLoadedKextSummaries; + summarySize = sLoadedKextSummariesAllocSize; + + start = (vm_map_offset_t) summaryHeader; + end = start + summarySize; + result = vm_map_protect(kernel_map, + start, + end, + VM_PROT_DEFAULT, + FALSE); + if (result != KERN_SUCCESS) { + goto finish; + } + } + + /* Populate the summary header. + */ + + bzero(summaryHeader, summarySize); + summaryHeader->version = kOSKextLoadedKextSummaryVersion; + summaryHeader->entry_size = sizeof(OSKextLoadedKextSummary); + + /* Populate each kext summary. + */ + + count = sLoadedKexts->getCount(); + accountingListAlloc = 0; + for (i = 0, j = 0; i < count && j < maxKexts; ++i) { + aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (!aKext || !aKext->isExecutable()) { + continue; + } + + aKext->updateLoadedKextSummary(&summaryHeader->summaries[j++]); + summaryHeader->numSummaries++; + accountingListAlloc++; + } + + accountingList = IONew(typeof(accountingList[0]), accountingListAlloc); + accountingListCount = 0; + for (i = 0, j = 0; i < count && j < maxKexts; ++i) { + aKext = OSDynamicCast(OSKext, sLoadedKexts->getObject(i)); + if (!aKext || !aKext->isExecutable()) { + continue; + } + + OSKextActiveAccount activeAccount; + aKext->updateActiveAccount(&activeAccount); + // order by address + for (idx = 0; idx < accountingListCount; idx++) { + if (activeAccount.address < accountingList[idx].address) { + break; + } + } + bcopy(&accountingList[idx], &accountingList[idx + 1], (accountingListCount - idx) * sizeof(accountingList[0])); + accountingList[idx] = activeAccount; + accountingListCount++; + } + assert(accountingListCount == accountingListAlloc); + /* Write protect the buffer and move it into place. + */ + + start = (vm_map_offset_t) summaryHeader; + end = start + summarySize; + + result = vm_map_protect(kernel_map, start, end, VM_PROT_READ, FALSE); + if (result != KERN_SUCCESS) { + goto finish; + } + + gLoadedKextSummaries = summaryHeader; + gLoadedKextSummariesTimestamp = mach_absolute_time(); + sLoadedKextSummariesAllocSize = summarySize; + summaryHeaderAlloc = NULL; + + /* Call the magic breakpoint function through a static function pointer so + * the compiler can't optimize the function away. + */ + if (sLoadedKextSummariesUpdated) { + (*sLoadedKextSummariesUpdated)(); + } + + IOSimpleLockLock(sKextAccountsLock); + prevAccountingList = sKextAccounts; + prevAccountingListCount = sKextAccountsCount; + sKextAccounts = accountingList; + sKextAccountsCount = accountingListCount; + IOSimpleLockUnlock(sKextAccountsLock); finish: - IOLockUnlock(sKextSummariesLock); + IOLockUnlock(sKextSummariesLock); - /* If we had to allocate a new buffer but failed to generate the summaries, - * free that now. - */ - if (summaryHeaderAlloc) { - kmem_free(kernel_map, (vm_offset_t)summaryHeaderAlloc, summarySize); - } - if (prevAccountingList) { - IODelete(prevAccountingList, typeof(accountingList[0]), prevAccountingListCount); - } + /* If we had to allocate a new buffer but failed to generate the summaries, + * free that now. + */ + if (summaryHeaderAlloc) { + kmem_free(kernel_map, (vm_offset_t)summaryHeaderAlloc, summarySize); + } + if (prevAccountingList) { + IODelete(prevAccountingList, typeof(accountingList[0]), prevAccountingListCount); + } - return; + return; } /********************************************************************* @@ -11877,33 +11883,33 @@ finish: void OSKext::updateLoadedKextSummary(OSKextLoadedKextSummary *summary) { - OSData *uuid; + OSData *uuid; - strlcpy(summary->name, getIdentifierCString(), - sizeof(summary->name)); + strlcpy(summary->name, getIdentifierCString(), + sizeof(summary->name)); - uuid = copyUUID(); - if (uuid) { - memcpy(summary->uuid, uuid->getBytesNoCopy(), sizeof(summary->uuid)); - OSSafeReleaseNULL(uuid); - } + uuid = copyUUID(); + if (uuid) { + memcpy(summary->uuid, uuid->getBytesNoCopy(), sizeof(summary->uuid)); + OSSafeReleaseNULL(uuid); + } - if (flags.builtin) { + if (flags.builtin) { // this value will stop lldb from parsing the mach-o header // summary->address = UINT64_MAX; // summary->size = 0; - summary->address = kmod_info->address; - summary->size = kmod_info->size; - } else { - summary->address = kmod_info->address; - summary->size = kmod_info->size; - } - summary->version = getVersion(); - summary->loadTag = kmod_info->id; - summary->flags = 0; - summary->reference_list = (uint64_t) kmod_info->reference_list; + summary->address = kmod_info->address; + summary->size = kmod_info->size; + } else { + summary->address = kmod_info->address; + summary->size = kmod_info->size; + } + summary->version = getVersion(); + summary->loadTag = kmod_info->id; + summary->flags = 0; + summary->reference_list = (uint64_t) kmod_info->reference_list; - return; + return; } /********************************************************************* @@ -11912,108 +11918,111 @@ OSKext::updateLoadedKextSummary(OSKextLoadedKextSummary *summary) void OSKext::updateActiveAccount(OSKextActiveAccount *accountp) { - kernel_mach_header_t *hdr = NULL; - kernel_segment_command_t *seg = NULL; - - bzero(accountp, sizeof(*accountp)); - - hdr = (kernel_mach_header_t *)kmod_info->address; - if (getcommandfromheader(hdr, LC_SEGMENT_SPLIT_INFO)) { - /* If this kext supports split segments, use the first - * executable segment as the range for instructions - * (and thus for backtracing. - */ - for (seg = firstsegfromheader(hdr); seg != NULL; seg = nextsegfromheader(hdr, seg)) { - if (seg->initprot & VM_PROT_EXECUTE) { - break; - } - } - } - if (seg) { - accountp->address = seg->vmaddr; - if (accountp->address) { - accountp->address_end = seg->vmaddr + seg->vmsize; - } - } else { - /* For non-split kexts and for kexts without executable - * segments, just use the kmod_info range (as the kext - * is either all in one range or should not show up in - * instruction backtraces). - */ - accountp->address = kmod_info->address; - if (accountp->address) { - accountp->address_end = kmod_info->address + kmod_info->size; - } - } - - accountp->account = this->account; -} - -extern "C" const vm_allocation_site_t * + kernel_mach_header_t *hdr = NULL; + kernel_segment_command_t *seg = NULL; + + bzero(accountp, sizeof(*accountp)); + + hdr = (kernel_mach_header_t *)kmod_info->address; + if (getcommandfromheader(hdr, LC_SEGMENT_SPLIT_INFO)) { + /* If this kext supports split segments, use the first + * executable segment as the range for instructions + * (and thus for backtracing. + */ + for (seg = firstsegfromheader(hdr); seg != NULL; seg = nextsegfromheader(hdr, seg)) { + if (seg->initprot & VM_PROT_EXECUTE) { + break; + } + } + } + if (seg) { + accountp->address = seg->vmaddr; + if (accountp->address) { + accountp->address_end = seg->vmaddr + seg->vmsize; + } + } else { + /* For non-split kexts and for kexts without executable + * segments, just use the kmod_info range (as the kext + * is either all in one range or should not show up in + * instruction backtraces). + */ + accountp->address = kmod_info->address; + if (accountp->address) { + accountp->address_end = kmod_info->address + kmod_info->size; + } + } + + accountp->account = this->account; +} + +extern "C" const vm_allocation_site_t * OSKextGetAllocationSiteForCaller(uintptr_t address) { - OSKextActiveAccount * active; - vm_allocation_site_t * site; - vm_allocation_site_t * releasesite; - - uint32_t baseIdx; - uint32_t lim; - - IOSimpleLockLock(sKextAccountsLock); - site = releasesite = NULL; - - // bsearch sKextAccounts list - for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) - { - active = &sKextAccounts[baseIdx + (lim >> 1)]; - if ((address >= active->address) && (address < active->address_end)) - { - site = &active->account->site; - if (!site->tag) vm_tag_alloc_locked(site, &releasesite); - break; + OSKextActiveAccount * active; + vm_allocation_site_t * site; + vm_allocation_site_t * releasesite; + + uint32_t baseIdx; + uint32_t lim; + + IOSimpleLockLock(sKextAccountsLock); + site = releasesite = NULL; + + // bsearch sKextAccounts list + for (baseIdx = 0, lim = sKextAccountsCount; lim; lim >>= 1) { + active = &sKextAccounts[baseIdx + (lim >> 1)]; + if ((address >= active->address) && (address < active->address_end)) { + site = &active->account->site; + if (!site->tag) { + vm_tag_alloc_locked(site, &releasesite); + } + break; + } else if (address > active->address) { + // move right + baseIdx += (lim >> 1) + 1; + lim--; + } + // else move left } - else if (address > active->address) - { - // move right - baseIdx += (lim >> 1) + 1; - lim--; + IOSimpleLockUnlock(sKextAccountsLock); + if (releasesite) { + kern_allocation_name_release(releasesite); } - // else move left - } - IOSimpleLockUnlock(sKextAccountsLock); - if (releasesite) kern_allocation_name_release(releasesite); - return (site); + return site; } -extern "C" uint32_t +extern "C" uint32_t OSKextGetKmodIDForSite(const vm_allocation_site_t * site, char * name, vm_size_t namelen) { - OSKextAccount * account = (typeof(account)) site; - const char * kname; + OSKextAccount * account = (typeof(account))site; + const char * kname; - if (name) - { - if (account->kext) kname = account->kext->getIdentifierCString(); - else kname = "<>"; - strlcpy(name, kname, namelen); - } + if (name) { + if (account->kext) { + kname = account->kext->getIdentifierCString(); + } else { + kname = "<>"; + } + strlcpy(name, kname, namelen); + } - return (account->loadTag); + return account->loadTag; } -extern "C" void +extern "C" void OSKextFreeSite(vm_allocation_site_t * site) { - OSKextAccount * freeAccount = (typeof(freeAccount)) site; - IODelete(freeAccount, OSKextAccount, 1); + OSKextAccount * freeAccount = (typeof(freeAccount))site; + IODelete(freeAccount, OSKextAccount, 1); } /********************************************************************* *********************************************************************/ #if CONFIG_IMAGEBOOT -int OSKextGetUUIDForName(const char *name, uuid_t uuid) +int +OSKextGetUUIDForName(const char *name, uuid_t uuid) { OSKext *kext = OSKext::lookupKextWithIdentifier(name); if (!kext) { diff --git a/libkern/c++/OSMetaClass.cpp b/libkern/c++/OSMetaClass.cpp index f7c0594d8..7db8d37aa 100644 --- a/libkern/c++/OSMetaClass.cpp +++ b/libkern/c++/OSMetaClass.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSMetaClass.cpp created by gvdl on Fri 1998-11-17 */ @@ -37,8 +37,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -81,9 +81,9 @@ OSKextLogSpec kOSMetaClassLogSpec = kOSKextLogKextBookkeepingFlag; static enum { - kCompletedBootstrap = 0, - kNoDictionaries = 1, - kMakingDictionaries = 2 + kCompletedBootstrap = 0, + kNoDictionaries = 1, + kMakingDictionaries = 2 } sBootstrapState = kNoDictionaries; static const int kClassCapacityIncrement = 40; @@ -99,20 +99,20 @@ IOLock * sInstancesLock = NULL; * kext can be in flight at a time, guarded by sStalledClassesLock */ static struct StalledData { - const char * kextIdentifier; - OSReturn result; - unsigned int capacity; - unsigned int count; - OSMetaClass ** classes; + const char * kextIdentifier; + OSReturn result; + unsigned int capacity; + unsigned int count; + OSMetaClass ** classes; } * sStalled; IOLock * sStalledClassesLock = NULL; struct ExpansionData { - OSOrderedSet * instances; - OSKext * kext; - uint32_t retain; + OSOrderedSet * instances; + OSKext * kext; + uint32_t retain; #if IOTRACKING - IOTrackingQueue * tracking; + IOTrackingQueue * tracking; #endif }; @@ -129,23 +129,44 @@ struct ExpansionData { * Reserved vtable functions. *********************************************************************/ #if SLOT_USED -void OSMetaClassBase::_RESERVEDOSMetaClassBase0() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 0); } -void OSMetaClassBase::_RESERVEDOSMetaClassBase1() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 1); } -void OSMetaClassBase::_RESERVEDOSMetaClassBase2() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 2); } +void +OSMetaClassBase::_RESERVEDOSMetaClassBase0() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 0); +} +void +OSMetaClassBase::_RESERVEDOSMetaClassBase1() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 1); +} +void +OSMetaClassBase::_RESERVEDOSMetaClassBase2() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 2); +} #endif /* SLOT_USED */ // As these slots are used move them up inside the #if above -void OSMetaClassBase::_RESERVEDOSMetaClassBase3() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 3); } -void OSMetaClassBase::_RESERVEDOSMetaClassBase4() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 4); } -void OSMetaClassBase::_RESERVEDOSMetaClassBase5() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 5); } -void OSMetaClassBase::_RESERVEDOSMetaClassBase6() - { panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 6); } +void +OSMetaClassBase::_RESERVEDOSMetaClassBase3() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 3); +} +void +OSMetaClassBase::_RESERVEDOSMetaClassBase4() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 4); +} +void +OSMetaClassBase::_RESERVEDOSMetaClassBase5() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 5); +} +void +OSMetaClassBase::_RESERVEDOSMetaClassBase6() +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 6); +} #endif @@ -157,54 +178,53 @@ void OSMetaClassBase::_RESERVEDOSMetaClassBase6() /* -IHI0059A "C++ Application Binary Interface Standard for the ARM 64 - bit Architecture": - -3.2.1 Representation of pointer to member function The generic C++ ABI [GC++ABI] -specifies that a pointer to member function is a pair of words . The -least significant bit of ptr discriminates between (0) the address of a non- -virtual member function and (1) the offset in the class's virtual table of the -address of a virtual function. This encoding cannot work for the AArch64 -instruction set where the architecture reserves all bits of code addresses. This -ABI specifies that adj contains twice the this adjustment, plus 1 if the member -function is virtual. The least significant bit of adj then makes exactly the -same discrimination as the least significant bit of ptr does for Itanium. A -pointer to member function is NULL when ptr = 0 and the least significant bit of -adj is zero. -*/ + * IHI0059A "C++ Application Binary Interface Standard for the ARM 64 - bit Architecture": + * + * 3.2.1 Representation of pointer to member function The generic C++ ABI [GC++ABI] + * specifies that a pointer to member function is a pair of words . The + * least significant bit of ptr discriminates between (0) the address of a non- + * virtual member function and (1) the offset in the class's virtual table of the + * address of a virtual function. This encoding cannot work for the AArch64 + * instruction set where the architecture reserves all bits of code addresses. This + * ABI specifies that adj contains twice the this adjustment, plus 1 if the member + * function is virtual. The least significant bit of adj then makes exactly the + * same discrimination as the least significant bit of ptr does for Itanium. A + * pointer to member function is NULL when ptr = 0 and the least significant bit of + * adj is zero. + */ OSMetaClassBase::_ptf_t OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::*func)(void)) { typedef long int ptrdiff_t; - struct ptmf_t { - _ptf_t fPFN; - ptrdiff_t delta; - }; - union { - void (OSMetaClassBase::*fIn)(void); - struct ptmf_t pTMF; - } map; - _ptf_t pfn; - - map.fIn = func; - pfn = map.pTMF.fPFN; - - if (map.pTMF.delta & 1) { - // virtual - union { - const OSMetaClassBase *fObj; - _ptf_t **vtablep; - } u; - u.fObj = self; - - // Virtual member function so dereference table - pfn = *(_ptf_t *)(((uintptr_t)*u.vtablep) + (uintptr_t)pfn); - return pfn; - - } else { - // Not virtual, i.e. plain member func - return pfn; - } + struct ptmf_t { + _ptf_t fPFN; + ptrdiff_t delta; + }; + union { + void (OSMetaClassBase::*fIn)(void); + struct ptmf_t pTMF; + } map; + _ptf_t pfn; + + map.fIn = func; + pfn = map.pTMF.fPFN; + + if (map.pTMF.delta & 1) { + // virtual + union { + const OSMetaClassBase *fObj; + _ptf_t **vtablep; + } u; + u.fObj = self; + + // Virtual member function so dereference table + pfn = *(_ptf_t *)(((uintptr_t)*u.vtablep) + (uintptr_t)pfn); + return pfn; + } else { + // Not virtual, i.e. plain member func + return pfn; + } } #endif /* defined(__arm__) || defined(__arm64__) */ @@ -220,31 +240,32 @@ OSMetaClassBase::_ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::* *********************************************************************/ OSMetaClassBase * OSMetaClassBase::safeMetaCast( - const OSMetaClassBase * me, - const OSMetaClass * toType) + const OSMetaClassBase * me, + const OSMetaClass * toType) { - return (me)? me->metaCast(toType) : 0; + return (me)? me->metaCast(toType) : 0; } /********************************************************************* *********************************************************************/ bool OSMetaClassBase::checkTypeInst( - const OSMetaClassBase * inst, - const OSMetaClassBase * typeinst) + const OSMetaClassBase * inst, + const OSMetaClassBase * typeinst) { - const OSMetaClass * toType = OSTypeIDInst(typeinst); - return typeinst && inst && (0 != inst->metaCast(toType)); + const OSMetaClass * toType = OSTypeIDInst(typeinst); + return typeinst && inst && (0 != inst->metaCast(toType)); } /********************************************************************* *********************************************************************/ -void OSMetaClassBase:: +void +OSMetaClassBase:: initialize() { - sAllClassesLock = IOLockAlloc(); - sStalledClassesLock = IOLockAlloc(); - sInstancesLock = IOLockAlloc(); + sAllClassesLock = IOLockAlloc(); + sStalledClassesLock = IOLockAlloc(); + sInstancesLock = IOLockAlloc(); } #if APPLE_KEXT_VTABLE_PADDING @@ -255,7 +276,9 @@ initialize() *********************************************************************/ void OSMetaClassBase::_RESERVEDOSMetaClassBase7() -{ panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 7); } +{ + panic("OSMetaClassBase::_RESERVEDOSMetaClassBase%d called.", 7); +} #endif /********************************************************************* @@ -268,10 +291,10 @@ OSMetaClassBase::OSMetaClassBase() *********************************************************************/ OSMetaClassBase::~OSMetaClassBase() { - void ** thisVTable; + void ** thisVTable; - thisVTable = (void **) this; - *thisVTable = (void *) -1UL; + thisVTable = (void **) this; + *thisVTable = (void *) -1UL; } /********************************************************************* @@ -279,7 +302,7 @@ OSMetaClassBase::~OSMetaClassBase() bool OSMetaClassBase::isEqualTo(const OSMetaClassBase * anObj) const { - return this == anObj; + return this == anObj; } /********************************************************************* @@ -287,7 +310,7 @@ OSMetaClassBase::isEqualTo(const OSMetaClassBase * anObj) const OSMetaClassBase * OSMetaClassBase::metaCast(const OSMetaClass * toMeta) const { - return toMeta->checkMetaCast(this); + return toMeta->checkMetaCast(this); } /********************************************************************* @@ -295,7 +318,7 @@ OSMetaClassBase::metaCast(const OSMetaClass * toMeta) const OSMetaClassBase * OSMetaClassBase::metaCast(const OSSymbol * toMetaSymb) const { - return OSMetaClass::checkMetaCastWithName(toMetaSymb, this); + return OSMetaClass::checkMetaCastWithName(toMetaSymb, this); } /********************************************************************* @@ -303,13 +326,13 @@ OSMetaClassBase::metaCast(const OSSymbol * toMetaSymb) const OSMetaClassBase * OSMetaClassBase::metaCast(const OSString * toMetaStr) const { - const OSSymbol * tempSymb = OSSymbol::withString(toMetaStr); - OSMetaClassBase * ret = 0; - if (tempSymb) { - ret = metaCast(tempSymb); - tempSymb->release(); - } - return ret; + const OSSymbol * tempSymb = OSSymbol::withString(toMetaStr); + OSMetaClassBase * ret = 0; + if (tempSymb) { + ret = metaCast(tempSymb); + tempSymb->release(); + } + return ret; } /********************************************************************* @@ -317,13 +340,13 @@ OSMetaClassBase::metaCast(const OSString * toMetaStr) const OSMetaClassBase * OSMetaClassBase::metaCast(const char * toMetaCStr) const { - const OSSymbol * tempSymb = OSSymbol::withCString(toMetaCStr); - OSMetaClassBase * ret = 0; - if (tempSymb) { - ret = metaCast(tempSymb); - tempSymb->release(); - } - return ret; + const OSSymbol * tempSymb = OSSymbol::withCString(toMetaCStr); + OSMetaClassBase * ret = 0; + if (tempSymb) { + ret = metaCast(tempSymb); + tempSymb->release(); + } + return ret; } #if PRAGMA_MARK @@ -332,22 +355,30 @@ OSMetaClassBase::metaCast(const char * toMetaCStr) const /********************************************************************* * OSMetaClassMeta - the bootstrap metaclass of OSMetaClass *********************************************************************/ -class OSMetaClassMeta : public OSMetaClass +class OSMetaClassMeta : public OSMetaClass { public: - OSMetaClassMeta(); - OSObject * alloc() const; + OSMetaClassMeta(); + OSObject * alloc() const; }; OSMetaClassMeta::OSMetaClassMeta() - : OSMetaClass("OSMetaClass", 0, sizeof(OSMetaClass)) - { } -OSObject * OSMetaClassMeta::alloc() const { return 0; } + : OSMetaClass("OSMetaClass", 0, sizeof(OSMetaClass)) +{ +} +OSObject * +OSMetaClassMeta::alloc() const +{ + return 0; +} static OSMetaClassMeta sOSMetaClassMeta; const OSMetaClass * const OSMetaClass::metaClass = &sOSMetaClassMeta; -const OSMetaClass * OSMetaClass::getMetaClass() const - { return &sOSMetaClassMeta; } +const OSMetaClass * +OSMetaClass::getMetaClass() const +{ + return &sOSMetaClassMeta; +} #if PRAGMA_MARK #pragma mark OSMetaClass @@ -360,76 +391,100 @@ const OSMetaClass * OSMetaClass::getMetaClass() const /********************************************************************* * Reserved functions. *********************************************************************/ -void OSMetaClass::_RESERVEDOSMetaClass0() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 0); } -void OSMetaClass::_RESERVEDOSMetaClass1() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 1); } -void OSMetaClass::_RESERVEDOSMetaClass2() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 2); } -void OSMetaClass::_RESERVEDOSMetaClass3() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 3); } -void OSMetaClass::_RESERVEDOSMetaClass4() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 4); } -void OSMetaClass::_RESERVEDOSMetaClass5() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 5); } -void OSMetaClass::_RESERVEDOSMetaClass6() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 6); } -void OSMetaClass::_RESERVEDOSMetaClass7() - { panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 7); } +void +OSMetaClass::_RESERVEDOSMetaClass0() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 0); +} +void +OSMetaClass::_RESERVEDOSMetaClass1() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 1); +} +void +OSMetaClass::_RESERVEDOSMetaClass2() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 2); +} +void +OSMetaClass::_RESERVEDOSMetaClass3() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 3); +} +void +OSMetaClass::_RESERVEDOSMetaClass4() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 4); +} +void +OSMetaClass::_RESERVEDOSMetaClass5() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 5); +} +void +OSMetaClass::_RESERVEDOSMetaClass6() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 6); +} +void +OSMetaClass::_RESERVEDOSMetaClass7() +{ + panic("OSMetaClass::_RESERVEDOSMetaClass%d called", 7); +} #endif /********************************************************************* *********************************************************************/ static void OSMetaClassLogErrorForKext( - OSReturn error, - OSKext * aKext) + OSReturn error, + OSKext * aKext) { - const char * message = NULL; + const char * message = NULL; - switch (error) { - case kOSReturnSuccess: - return; - case kOSMetaClassNoInit: // xxx - never returned; logged at fail site - message = "OSMetaClass: preModLoad() wasn't called (runtime internal error)."; - break; - case kOSMetaClassNoDicts: - message = "OSMetaClass: Allocation failure for OSMetaClass internal dictionaries."; - break; - case kOSMetaClassNoKModSet: - message = "OSMetaClass: Allocation failure for internal kext recording set/set missing."; - break; - case kOSMetaClassNoInsKModSet: - message = "OSMetaClass: Failed to record class in kext."; - break; - case kOSMetaClassDuplicateClass: - message = "OSMetaClass: Duplicate class encountered."; - break; - case kOSMetaClassNoSuper: // xxx - never returned - message = "OSMetaClass: Can't associate a class with its superclass."; - break; - case kOSMetaClassInstNoSuper: // xxx - never returned - message = "OSMetaClass: Instance construction error; unknown superclass."; - break; - case kOSMetaClassNoKext: - message = "OSMetaClass: Kext not found for metaclass."; - break; - case kOSMetaClassInternal: - default: - message = "OSMetaClass: Runtime internal error."; - break; - } - - if (message) { - OSKextLog(aKext, kOSMetaClassLogSpec, "%s", message); - } - return; + switch (error) { + case kOSReturnSuccess: + return; + case kOSMetaClassNoInit: // xxx - never returned; logged at fail site + message = "OSMetaClass: preModLoad() wasn't called (runtime internal error)."; + break; + case kOSMetaClassNoDicts: + message = "OSMetaClass: Allocation failure for OSMetaClass internal dictionaries."; + break; + case kOSMetaClassNoKModSet: + message = "OSMetaClass: Allocation failure for internal kext recording set/set missing."; + break; + case kOSMetaClassNoInsKModSet: + message = "OSMetaClass: Failed to record class in kext."; + break; + case kOSMetaClassDuplicateClass: + message = "OSMetaClass: Duplicate class encountered."; + break; + case kOSMetaClassNoSuper: // xxx - never returned + message = "OSMetaClass: Can't associate a class with its superclass."; + break; + case kOSMetaClassInstNoSuper: // xxx - never returned + message = "OSMetaClass: Instance construction error; unknown superclass."; + break; + case kOSMetaClassNoKext: + message = "OSMetaClass: Kext not found for metaclass."; + break; + case kOSMetaClassInternal: + default: + message = "OSMetaClass: Runtime internal error."; + break; + } + + if (message) { + OSKextLog(aKext, kOSMetaClassLogSpec, "%s", message); + } + return; } void OSMetaClass::logError(OSReturn error) { - OSMetaClassLogErrorForKext(error, NULL); + OSMetaClassLogErrorForKext(error, NULL); } /********************************************************************* @@ -442,165 +497,191 @@ OSMetaClass::logError(OSReturn error) * records all the class/kext relationships of the new MetaClasses. *********************************************************************/ OSMetaClass::OSMetaClass( - const char * inClassName, - const OSMetaClass * inSuperClass, - unsigned int inClassSize) + const char * inClassName, + const OSMetaClass * inSuperClass, + unsigned int inClassSize) { - instanceCount = 0; - classSize = inClassSize; - superClassLink = inSuperClass; + instanceCount = 0; + classSize = inClassSize; + superClassLink = inSuperClass; - reserved = IONew(ExpansionData, 1); - bzero(reserved, sizeof(ExpansionData)); + reserved = IONew(ExpansionData, 1); + bzero(reserved, sizeof(ExpansionData)); #if IOTRACKING - uint32_t numSiteQs = 0; - if ((this == &OSSymbol ::gMetaClass) - || (this == &OSString ::gMetaClass) - || (this == &OSNumber ::gMetaClass) - || (this == &OSString ::gMetaClass) - || (this == &OSData ::gMetaClass) - || (this == &OSDictionary::gMetaClass) - || (this == &OSArray ::gMetaClass) - || (this == &OSSet ::gMetaClass)) numSiteQs = 27; - - reserved->tracking = IOTrackingQueueAlloc(inClassName, (uintptr_t) this, - inClassSize, 0, kIOTrackingQueueTypeAlloc, - numSiteQs); -#endif - - /* Hack alert: We are just casting inClassName and storing it in - * an OSString * instance variable. This may be because you can't - * create C++ objects in static constructors, but I really don't know! - */ - className = (const OSSymbol *)inClassName; - - // sStalledClassesLock taken in preModLoad - if (!sStalled) { - /* There's no way we can look up the kext here, unfortunately. - */ - OSKextLog(/* kext */ NULL, kOSMetaClassLogSpec, - "OSMetaClass: preModLoad() wasn't called for class %s " - "(runtime internal error).", - inClassName); - } else if (!sStalled->result) { - // Grow stalled array if neccessary - if (sStalled->count >= sStalled->capacity) { - OSMetaClass **oldStalled = sStalled->classes; - int oldSize = sStalled->capacity * sizeof(OSMetaClass *); - int newSize = oldSize - + kKModCapacityIncrement * sizeof(OSMetaClass *); - - sStalled->classes = (OSMetaClass **)kalloc_tag(newSize, VM_KERN_MEMORY_OSKEXT); - if (!sStalled->classes) { - sStalled->classes = oldStalled; - sStalled->result = kOSMetaClassNoTempData; - return; - } + uint32_t numSiteQs = 0; + if ((this == &OSSymbol ::gMetaClass) + || (this == &OSString ::gMetaClass) + || (this == &OSNumber ::gMetaClass) + || (this == &OSString ::gMetaClass) + || (this == &OSData ::gMetaClass) + || (this == &OSDictionary::gMetaClass) + || (this == &OSArray ::gMetaClass) + || (this == &OSSet ::gMetaClass)) { + numSiteQs = 27; + } - sStalled->capacity += kKModCapacityIncrement; - memmove(sStalled->classes, oldStalled, oldSize); - kfree(oldStalled, oldSize); - OSMETA_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); - } + reserved->tracking = IOTrackingQueueAlloc(inClassName, (uintptr_t) this, + inClassSize, 0, kIOTrackingQueueTypeAlloc, + numSiteQs); +#endif - sStalled->classes[sStalled->count++] = this; - } + /* Hack alert: We are just casting inClassName and storing it in + * an OSString * instance variable. This may be because you can't + * create C++ objects in static constructors, but I really don't know! + */ + className = (const OSSymbol *)inClassName; + + // sStalledClassesLock taken in preModLoad + if (!sStalled) { + /* There's no way we can look up the kext here, unfortunately. + */ + OSKextLog(/* kext */ NULL, kOSMetaClassLogSpec, + "OSMetaClass: preModLoad() wasn't called for class %s " + "(runtime internal error).", + inClassName); + } else if (!sStalled->result) { + // Grow stalled array if neccessary + if (sStalled->count >= sStalled->capacity) { + OSMetaClass **oldStalled = sStalled->classes; + int oldSize = sStalled->capacity * sizeof(OSMetaClass *); + int newSize = oldSize + + kKModCapacityIncrement * sizeof(OSMetaClass *); + + sStalled->classes = (OSMetaClass **)kalloc_tag(newSize, VM_KERN_MEMORY_OSKEXT); + if (!sStalled->classes) { + sStalled->classes = oldStalled; + sStalled->result = kOSMetaClassNoTempData; + return; + } + + sStalled->capacity += kKModCapacityIncrement; + memmove(sStalled->classes, oldStalled, oldSize); + kfree(oldStalled, oldSize); + OSMETA_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); + } + + sStalled->classes[sStalled->count++] = this; + } } /********************************************************************* *********************************************************************/ OSMetaClass::~OSMetaClass() { - OSKext * myKext = reserved ? reserved->kext : 0; // do not release - - /* Hack alert: 'className' is a C string during early C++ init, and - * is converted to a real OSSymbol only when we record the OSKext in - * OSMetaClass::postModLoad(). So only do this bit if we have an OSKext. - * We can't safely cast or check 'className'. - * - * Also, release className *after* calling into the kext, - * as removeClass() may access className. - */ - IOLockLock(sAllClassesLock); - if (sAllClassesDict) { - if (myKext) { - sAllClassesDict->removeObject(className); - } else { - sAllClassesDict->removeObject((const char *)className); - } - } - IOLockUnlock(sAllClassesLock); - - if (myKext) { - if (myKext->removeClass(this) != kOSReturnSuccess) { - // xxx - what can we do? - } - className->release(); - } - - // sStalledClassesLock taken in preModLoad - if (sStalled) { - unsigned int i; - - /* First pass find class in stalled list. If we find it that means - * we started C++ init with constructors but now we're tearing down - * because of some failure. - */ - for (i = 0; i < sStalled->count; i++) { - if (this == sStalled->classes[i]) { - break; - } - } - - /* Remove this metaclass from the stalled list so postModLoad() doesn't - * try to register it. - */ - if (i < sStalled->count) { - sStalled->count--; - if (i < sStalled->count) { - memmove(&sStalled->classes[i], &sStalled->classes[i+1], - (sStalled->count - i) * sizeof(OSMetaClass *)); - } - } - } + OSKext * myKext = reserved ? reserved->kext : 0; // do not release + + /* Hack alert: 'className' is a C string during early C++ init, and + * is converted to a real OSSymbol only when we record the OSKext in + * OSMetaClass::postModLoad(). So only do this bit if we have an OSKext. + * We can't safely cast or check 'className'. + * + * Also, release className *after* calling into the kext, + * as removeClass() may access className. + */ + IOLockLock(sAllClassesLock); + if (sAllClassesDict) { + if (myKext) { + sAllClassesDict->removeObject(className); + } else { + sAllClassesDict->removeObject((const char *)className); + } + } + IOLockUnlock(sAllClassesLock); + + if (myKext) { + if (myKext->removeClass(this) != kOSReturnSuccess) { + // xxx - what can we do? + } + className->release(); + } + + // sStalledClassesLock taken in preModLoad + if (sStalled) { + unsigned int i; + + /* First pass find class in stalled list. If we find it that means + * we started C++ init with constructors but now we're tearing down + * because of some failure. + */ + for (i = 0; i < sStalled->count; i++) { + if (this == sStalled->classes[i]) { + break; + } + } + + /* Remove this metaclass from the stalled list so postModLoad() doesn't + * try to register it. + */ + if (i < sStalled->count) { + sStalled->count--; + if (i < sStalled->count) { + memmove(&sStalled->classes[i], &sStalled->classes[i + 1], + (sStalled->count - i) * sizeof(OSMetaClass *)); + } + } + } #if IOTRACKING - IOTrackingQueueFree(reserved->tracking); + IOTrackingQueueFree(reserved->tracking); #endif - IODelete(reserved, ExpansionData, 1); + IODelete(reserved, ExpansionData, 1); } /********************************************************************* * Empty overrides. *********************************************************************/ -void OSMetaClass::retain() const { } -void OSMetaClass::release() const { } -void OSMetaClass::release(__unused int when) const { } -void OSMetaClass::taggedRetain(__unused const void * tag) const { } -void OSMetaClass::taggedRelease(__unused const void * tag) const { } -void OSMetaClass::taggedRelease(__unused const void * tag, __unused const int when) const { } -int OSMetaClass::getRetainCount() const { return 0; } +void +OSMetaClass::retain() const +{ +} +void +OSMetaClass::release() const +{ +} +void +OSMetaClass::release(__unused int when) const +{ +} +void +OSMetaClass::taggedRetain(__unused const void * tag) const +{ +} +void +OSMetaClass::taggedRelease(__unused const void * tag) const +{ +} +void +OSMetaClass::taggedRelease(__unused const void * tag, __unused const int when) const +{ +} +int +OSMetaClass::getRetainCount() const +{ + return 0; +} /********************************************************************* *********************************************************************/ const char * OSMetaClass::getClassName() const { - if (!className) return NULL; - return className->getCStringNoCopy(); + if (!className) { + return NULL; + } + return className->getCStringNoCopy(); } /********************************************************************* *********************************************************************/ const OSSymbol * OSMetaClass::getClassNameSymbol() const { - return className; + return className; } /********************************************************************* *********************************************************************/ unsigned int OSMetaClass::getClassSize() const { - return classSize; + return classSize; } /********************************************************************* @@ -608,30 +689,30 @@ OSMetaClass::getClassSize() const void * OSMetaClass::preModLoad(const char * kextIdentifier) { - IOLockLock(sStalledClassesLock); - - assert (sStalled == NULL); - sStalled = (StalledData *)kalloc_tag(sizeof(* sStalled), VM_KERN_MEMORY_OSKEXT); - if (sStalled) { - sStalled->classes = (OSMetaClass **) - kalloc_tag(kKModCapacityIncrement * sizeof(OSMetaClass *), VM_KERN_MEMORY_OSKEXT); - if (!sStalled->classes) { - kfree(sStalled, sizeof(*sStalled)); - return 0; - } - OSMETA_ACCUMSIZE((kKModCapacityIncrement * sizeof(OSMetaClass *)) + - sizeof(*sStalled)); + IOLockLock(sStalledClassesLock); + + assert(sStalled == NULL); + sStalled = (StalledData *)kalloc_tag(sizeof(*sStalled), VM_KERN_MEMORY_OSKEXT); + if (sStalled) { + sStalled->classes = (OSMetaClass **) + kalloc_tag(kKModCapacityIncrement * sizeof(OSMetaClass *), VM_KERN_MEMORY_OSKEXT); + if (!sStalled->classes) { + kfree(sStalled, sizeof(*sStalled)); + return 0; + } + OSMETA_ACCUMSIZE((kKModCapacityIncrement * sizeof(OSMetaClass *)) + + sizeof(*sStalled)); + + sStalled->result = kOSReturnSuccess; + sStalled->capacity = kKModCapacityIncrement; + sStalled->count = 0; + sStalled->kextIdentifier = kextIdentifier; + bzero(sStalled->classes, kKModCapacityIncrement * sizeof(OSMetaClass *)); + } - sStalled->result = kOSReturnSuccess; - sStalled->capacity = kKModCapacityIncrement; - sStalled->count = 0; - sStalled->kextIdentifier = kextIdentifier; - bzero(sStalled->classes, kKModCapacityIncrement * sizeof(OSMetaClass *)); - } + // keep sStalledClassesLock locked until postModLoad - // keep sStalledClassesLock locked until postModLoad - - return sStalled; + return sStalled; } /********************************************************************* @@ -639,8 +720,8 @@ OSMetaClass::preModLoad(const char * kextIdentifier) bool OSMetaClass::checkModLoad(void * loadHandle) { - return sStalled && loadHandle == sStalled && - sStalled->result == kOSReturnSuccess; + return sStalled && loadHandle == sStalled && + sStalled->result == kOSReturnSuccess; } /********************************************************************* @@ -648,162 +729,165 @@ OSMetaClass::checkModLoad(void * loadHandle) OSReturn OSMetaClass::postModLoad(void * loadHandle) { - OSReturn result = kOSReturnSuccess; - OSSymbol * myKextName = 0; // must release - OSKext * myKext = 0; // must release - - if (!sStalled || loadHandle != sStalled) { - result = kOSMetaClassInternal; - goto finish; - } - - if (sStalled->result) { - result = sStalled->result; - } else switch (sBootstrapState) { - - case kNoDictionaries: - sBootstrapState = kMakingDictionaries; - // No break; fall through - [[clang::fallthrough]]; - - case kMakingDictionaries: - sAllClassesDict = OSDictionary::withCapacity(kClassCapacityIncrement); - if (!sAllClassesDict) { - result = kOSMetaClassNoDicts; - break; - } - sAllClassesDict->setOptions(OSCollection::kSort, OSCollection::kSort); - - // No break; fall through - [[clang::fallthrough]]; - - case kCompletedBootstrap: - { - unsigned int i; - myKextName = const_cast(OSSymbol::withCStringNoCopy( - sStalled->kextIdentifier)); - - if (!sStalled->count) { - break; // Nothing to do so just get out - } - - myKext = OSKext::lookupKextWithIdentifier(myKextName); - if (!myKext) { - result = kOSMetaClassNoKext; - - /* Log this error here so we can include the kext name. - */ - OSKextLog(/* kext */ NULL, kOSMetaClassLogSpec, - "OSMetaClass: Can't record classes for kext %s - kext not found.", - sStalled->kextIdentifier); - break; - } - - /* First pass checking classes aren't already loaded. If any already - * exist, we don't register any, and so we don't technically have - * to do any C++ teardown. - * - * Hack alert: me->className has been a C string until now. - * We only release the OSSymbol if we store the kext. - */ - IOLockLock(sAllClassesLock); - for (i = 0; i < sStalled->count; i++) { - const OSMetaClass * me = sStalled->classes[i]; - OSMetaClass * orig = OSDynamicCast(OSMetaClass, - sAllClassesDict->getObject((const char *)me->className)); - - if (orig) { - - /* Log this error here so we can include the class name. - * xxx - we should look up the other kext that defines the class - */ + OSReturn result = kOSReturnSuccess; + OSSymbol * myKextName = 0;// must release + OSKext * myKext = 0;// must release + + if (!sStalled || loadHandle != sStalled) { + result = kOSMetaClassInternal; + goto finish; + } + + if (sStalled->result) { + result = sStalled->result; + } else { + switch (sBootstrapState) { + case kNoDictionaries: + sBootstrapState = kMakingDictionaries; + // No break; fall through + [[clang::fallthrough]]; + + case kMakingDictionaries: + sAllClassesDict = OSDictionary::withCapacity(kClassCapacityIncrement); + if (!sAllClassesDict) { + result = kOSMetaClassNoDicts; + break; + } + sAllClassesDict->setOptions(OSCollection::kSort, OSCollection::kSort); + + // No break; fall through + [[clang::fallthrough]]; + + case kCompletedBootstrap: + { + unsigned int i; + myKextName = const_cast(OSSymbol::withCStringNoCopy( + sStalled->kextIdentifier)); + + if (!sStalled->count) { + break; // Nothing to do so just get out + } + + myKext = OSKext::lookupKextWithIdentifier(myKextName); + if (!myKext) { + result = kOSMetaClassNoKext; + + /* Log this error here so we can include the kext name. + */ + OSKextLog(/* kext */ NULL, kOSMetaClassLogSpec, + "OSMetaClass: Can't record classes for kext %s - kext not found.", + sStalled->kextIdentifier); + break; + } + + /* First pass checking classes aren't already loaded. If any already + * exist, we don't register any, and so we don't technically have + * to do any C++ teardown. + * + * Hack alert: me->className has been a C string until now. + * We only release the OSSymbol if we store the kext. + */ + IOLockLock(sAllClassesLock); + for (i = 0; i < sStalled->count; i++) { + const OSMetaClass * me = sStalled->classes[i]; + OSMetaClass * orig = OSDynamicCast(OSMetaClass, + sAllClassesDict->getObject((const char *)me->className)); + + if (orig) { + /* Log this error here so we can include the class name. + * xxx - we should look up the other kext that defines the class + */ #if CONFIG_EMBEDDED - panic( + panic( #else - OSKextLog(myKext, kOSMetaClassLogSpec, + OSKextLog(myKext, kOSMetaClassLogSpec, #endif /* CONFIG_EMBEDDED */ - "OSMetaClass: Kext %s class %s is a duplicate;" - "kext %s already has a class by that name.", - sStalled->kextIdentifier, (const char *)me->className, - ((OSKext *)orig->reserved->kext)->getIdentifierCString()); - result = kOSMetaClassDuplicateClass; - break; - } - unsigned int depth = 1; - while ((me = me->superClassLink)) depth++; - if (depth > sDeepestClass) sDeepestClass = depth; - } - IOLockUnlock(sAllClassesLock); - - /* Bail if we didn't go through the entire list of new classes - * (if we hit a duplicate). - */ - if (i != sStalled->count) { - break; - } - - // Second pass symbolling strings and inserting classes in dictionary - IOLockLock(sAllClassesLock); - for (i = 0; i < sStalled->count; i++) { - OSMetaClass * me = sStalled->classes[i]; - - /* Hack alert: me->className has been a C string until now. - * We only release the OSSymbol in ~OSMetaClass() - * if we set the reference to the kext. - */ - me->className = - OSSymbol::withCStringNoCopy((const char *)me->className); - - // xxx - I suppose if these fail we're going to panic soon.... - sAllClassesDict->setObject(me->className, me); - - /* Do not retain the kext object here. - */ - me->reserved->kext = myKext; - if (myKext) { - result = myKext->addClass(me, sStalled->count); - if (result != kOSReturnSuccess) { - /* OSKext::addClass() logs with kOSMetaClassNoInsKModSet. */ - break; - } - } - } - IOLockUnlock(sAllClassesLock); - sBootstrapState = kCompletedBootstrap; - break; - } - - default: - result = kOSMetaClassInternal; - break; - } - + "OSMetaClass: Kext %s class %s is a duplicate;" + "kext %s already has a class by that name.", + sStalled->kextIdentifier, (const char *)me->className, + ((OSKext *)orig->reserved->kext)->getIdentifierCString()); + result = kOSMetaClassDuplicateClass; + break; + } + unsigned int depth = 1; + while ((me = me->superClassLink)) { + depth++; + } + if (depth > sDeepestClass) { + sDeepestClass = depth; + } + } + IOLockUnlock(sAllClassesLock); + + /* Bail if we didn't go through the entire list of new classes + * (if we hit a duplicate). + */ + if (i != sStalled->count) { + break; + } + + // Second pass symbolling strings and inserting classes in dictionary + IOLockLock(sAllClassesLock); + for (i = 0; i < sStalled->count; i++) { + OSMetaClass * me = sStalled->classes[i]; + + /* Hack alert: me->className has been a C string until now. + * We only release the OSSymbol in ~OSMetaClass() + * if we set the reference to the kext. + */ + me->className = + OSSymbol::withCStringNoCopy((const char *)me->className); + + // xxx - I suppose if these fail we're going to panic soon.... + sAllClassesDict->setObject(me->className, me); + + /* Do not retain the kext object here. + */ + me->reserved->kext = myKext; + if (myKext) { + result = myKext->addClass(me, sStalled->count); + if (result != kOSReturnSuccess) { + /* OSKext::addClass() logs with kOSMetaClassNoInsKModSet. */ + break; + } + } + } + IOLockUnlock(sAllClassesLock); + sBootstrapState = kCompletedBootstrap; + break; + } + + default: + result = kOSMetaClassInternal; + break; + } + } + finish: - /* Don't call logError() for success or the conditions logged above - * or by called function. - */ - if (result != kOSReturnSuccess && - result != kOSMetaClassNoInsKModSet && - result != kOSMetaClassDuplicateClass && - result != kOSMetaClassNoKext) { + /* Don't call logError() for success or the conditions logged above + * or by called function. + */ + if (result != kOSReturnSuccess && + result != kOSMetaClassNoInsKModSet && + result != kOSMetaClassDuplicateClass && + result != kOSMetaClassNoKext) { + OSMetaClassLogErrorForKext(result, myKext); + } - OSMetaClassLogErrorForKext(result, myKext); - } + OSSafeReleaseNULL(myKextName); + OSSafeReleaseNULL(myKext); - OSSafeReleaseNULL(myKextName); - OSSafeReleaseNULL(myKext); + if (sStalled) { + OSMETA_ACCUMSIZE(-(sStalled->capacity * sizeof(OSMetaClass *) + + sizeof(*sStalled))); + kfree(sStalled->classes, sStalled->capacity * sizeof(OSMetaClass *)); + kfree(sStalled, sizeof(*sStalled)); + sStalled = 0; + } - if (sStalled) { - OSMETA_ACCUMSIZE(-(sStalled->capacity * sizeof(OSMetaClass *) + - sizeof(*sStalled))); - kfree(sStalled->classes, sStalled->capacity * sizeof(OSMetaClass *)); - kfree(sStalled, sizeof(*sStalled)); - sStalled = 0; - } - - IOLockUnlock(sStalledClassesLock); + IOLockUnlock(sStalledClassesLock); - return result; + return result; } @@ -812,10 +896,10 @@ finish: void OSMetaClass::instanceConstructed() const { - // if ((0 == OSIncrementAtomic(&(((OSMetaClass *) this)->instanceCount))) && superClassLink) - if ((0 == OSIncrementAtomic(&instanceCount)) && superClassLink) { - superClassLink->instanceConstructed(); - } + // if ((0 == OSIncrementAtomic(&(((OSMetaClass *) this)->instanceCount))) && superClassLink) + if ((0 == OSIncrementAtomic(&instanceCount)) && superClassLink) { + superClassLink->instanceConstructed(); + } } /********************************************************************* @@ -823,18 +907,18 @@ OSMetaClass::instanceConstructed() const void OSMetaClass::instanceDestructed() const { - if ((1 == OSDecrementAtomic(&instanceCount)) && superClassLink) { - superClassLink->instanceDestructed(); - } + if ((1 == OSDecrementAtomic(&instanceCount)) && superClassLink) { + superClassLink->instanceDestructed(); + } - if (((int)instanceCount) < 0) { - OSKext * myKext = reserved->kext; + if (((int)instanceCount) < 0) { + OSKext * myKext = reserved->kext; - OSKextLog(myKext, kOSMetaClassLogSpec, - // xxx - this phrasing is rather cryptic - "OSMetaClass: Class %s - bad retain (%d)", - getClassName(), instanceCount); - } + OSKextLog(myKext, kOSMetaClassLogSpec, + // xxx - this phrasing is rather cryptic + "OSMetaClass: Class %s - bad retain (%d)", + getClassName(), instanceCount); + } } /********************************************************************* @@ -842,19 +926,19 @@ OSMetaClass::instanceDestructed() const bool OSMetaClass::modHasInstance(const char * kextIdentifier) { - bool result = false; - OSKext * theKext = NULL; // must release - - theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); - if (!theKext) { - goto finish; - } - - result = theKext->hasOSMetaClassInstances(); + bool result = false; + OSKext * theKext = NULL; // must release + + theKext = OSKext::lookupKextWithIdentifier(kextIdentifier); + if (!theKext) { + goto finish; + } + + result = theKext->hasOSMetaClassInstances(); finish: - OSSafeReleaseNULL(theKext); - return result; + OSSafeReleaseNULL(theKext); + return result; } /********************************************************************* @@ -862,9 +946,9 @@ finish: void OSMetaClass::reportModInstances(const char * kextIdentifier) { - OSKext::reportOSMetaClassInstances(kextIdentifier, - kOSKextLogExplicitLevel); - return; + OSKext::reportOSMetaClassInstances(kextIdentifier, + kOSKextLogExplicitLevel); + return; } /********************************************************************* *********************************************************************/ @@ -872,133 +956,140 @@ OSMetaClass::reportModInstances(const char * kextIdentifier) void OSMetaClass::addInstance(const OSObject * instance, bool super) const { - if (!super) IOLockLock(sInstancesLock); + if (!super) { + IOLockLock(sInstancesLock); + } - if (!reserved->instances) { - reserved->instances = OSOrderedSet::withCapacity(16); - if (superClassLink) { - superClassLink->addInstance(reserved->instances, true); + if (!reserved->instances) { + reserved->instances = OSOrderedSet::withCapacity(16); + if (superClassLink) { + superClassLink->addInstance(reserved->instances, true); + } } - } - reserved->instances->setLastObject(instance); + reserved->instances->setLastObject(instance); - if (!super) IOLockUnlock(sInstancesLock); + if (!super) { + IOLockUnlock(sInstancesLock); + } } void OSMetaClass::removeInstance(const OSObject * instance, bool super) const { - if (!super) IOLockLock(sInstancesLock); - - if (reserved->instances) { - reserved->instances->removeObject(instance); - if (0 == reserved->instances->getCount()) { - if (superClassLink) { - superClassLink->removeInstance(reserved->instances, true); - } - IOLockLock(sAllClassesLock); - reserved->instances->release(); - reserved->instances = 0; - IOLockUnlock(sAllClassesLock); + if (!super) { + IOLockLock(sInstancesLock); } - } - if (!super) IOLockUnlock(sInstancesLock); + if (reserved->instances) { + reserved->instances->removeObject(instance); + if (0 == reserved->instances->getCount()) { + if (superClassLink) { + superClassLink->removeInstance(reserved->instances, true); + } + IOLockLock(sAllClassesLock); + reserved->instances->release(); + reserved->instances = 0; + IOLockUnlock(sAllClassesLock); + } + } + + if (!super) { + IOLockUnlock(sInstancesLock); + } } void OSMetaClass::applyToInstances(OSOrderedSet * set, - OSMetaClassInstanceApplierFunction applier, - void * context) -{ - enum { kLocalDepth = 24 }; - unsigned int _nextIndex[kLocalDepth]; - OSOrderedSet * _sets[kLocalDepth]; - unsigned int * nextIndex = &_nextIndex[0]; - OSOrderedSet ** sets = &_sets[0]; - OSObject * obj; - OSOrderedSet * childSet; - unsigned int maxDepth; - unsigned int idx; - unsigned int level; - bool done; - - maxDepth = sDeepestClass; - if (maxDepth > kLocalDepth) - { - nextIndex = IONew(typeof(nextIndex[0]), maxDepth); - sets = IONew(typeof(sets[0]), maxDepth); - } - done = false; - level = 0; - idx = 0; - do - { - while (!done && (obj = set->getObject(idx++))) - { - if ((childSet = OSDynamicCast(OSOrderedSet, obj))) - { - if (level >= maxDepth) panic(">maxDepth"); - sets[level] = set; - nextIndex[level] = idx; - level++; - set = childSet; - idx = 0; - break; - } - done = (*applier)(obj, context); + OSMetaClassInstanceApplierFunction applier, + void * context) +{ + enum { kLocalDepth = 24 }; + unsigned int _nextIndex[kLocalDepth]; + OSOrderedSet * _sets[kLocalDepth]; + unsigned int * nextIndex = &_nextIndex[0]; + OSOrderedSet ** sets = &_sets[0]; + OSObject * obj; + OSOrderedSet * childSet; + unsigned int maxDepth; + unsigned int idx; + unsigned int level; + bool done; + + maxDepth = sDeepestClass; + if (maxDepth > kLocalDepth) { + nextIndex = IONew(typeof(nextIndex[0]), maxDepth); + sets = IONew(typeof(sets[0]), maxDepth); } - if (!obj) - { - if (!done && level) - { - level--; - set = sets[level]; - idx = nextIndex[level]; - } else done = true; + done = false; + level = 0; + idx = 0; + do{ + while (!done && (obj = set->getObject(idx++))) { + if ((childSet = OSDynamicCast(OSOrderedSet, obj))) { + if (level >= maxDepth) { + panic(">maxDepth"); + } + sets[level] = set; + nextIndex[level] = idx; + level++; + set = childSet; + idx = 0; + break; + } + done = (*applier)(obj, context); + } + if (!obj) { + if (!done && level) { + level--; + set = sets[level]; + idx = nextIndex[level]; + } else { + done = true; + } + } + }while (!done); + if (maxDepth > kLocalDepth) { + IODelete(nextIndex, typeof(nextIndex[0]), maxDepth); + IODelete(sets, typeof(sets[0]), maxDepth); } - } - while (!done); - if (maxDepth > kLocalDepth) - { - IODelete(nextIndex, typeof(nextIndex[0]), maxDepth); - IODelete(sets, typeof(sets[0]), maxDepth); - } } void OSMetaClass::applyToInstances(OSMetaClassInstanceApplierFunction applier, - void * context) const + void * context) const { - IOLockLock(sInstancesLock); - if (reserved->instances) applyToInstances(reserved->instances, applier, context); - IOLockUnlock(sInstancesLock); + IOLockLock(sInstancesLock); + if (reserved->instances) { + applyToInstances(reserved->instances, applier, context); + } + IOLockUnlock(sInstancesLock); } void OSMetaClass::applyToInstancesOfClassName( - const OSSymbol * name, - OSMetaClassInstanceApplierFunction applier, - void * context) + const OSSymbol * name, + OSMetaClassInstanceApplierFunction applier, + void * context) { - OSMetaClass * meta; - OSOrderedSet * set = 0; - - IOLockLock(sAllClassesLock); - if (sAllClassesDict - && (meta = (OSMetaClass *) sAllClassesDict->getObject(name)) - && (set = meta->reserved->instances)) - { - set->retain(); - } - IOLockUnlock(sAllClassesLock); + OSMetaClass * meta; + OSOrderedSet * set = 0; + + IOLockLock(sAllClassesLock); + if (sAllClassesDict + && (meta = (OSMetaClass *) sAllClassesDict->getObject(name)) + && (set = meta->reserved->instances)) { + set->retain(); + } + IOLockUnlock(sAllClassesLock); - if (!set) return; + if (!set) { + return; + } - IOLockLock(sInstancesLock); - applyToInstances(set, applier, context); - IOLockUnlock(sInstancesLock); - set->release(); + IOLockLock(sInstancesLock); + applyToInstances(set, applier, context); + IOLockUnlock(sInstancesLock); + set->release(); } /********************************************************************* @@ -1006,7 +1097,7 @@ OSMetaClass::applyToInstancesOfClassName( void OSMetaClass::considerUnloads() { - OSKext::considerUnloads(); + OSKext::considerUnloads(); } /********************************************************************* @@ -1014,35 +1105,37 @@ OSMetaClass::considerUnloads() bool OSMetaClass::removeClasses(OSCollection * metaClasses) { - OSCollectionIterator * classIterator; - OSMetaClass * checkClass; - bool result; - - classIterator = OSCollectionIterator::withCollection(metaClasses); - if (!classIterator) return (false); + OSCollectionIterator * classIterator; + OSMetaClass * checkClass; + bool result; - IOLockLock(sAllClassesLock); - - result = false; - do - { - while ((checkClass = (OSMetaClass *)classIterator->getNextObject()) - && !checkClass->getInstanceCount() - && !checkClass->reserved->retain) {} - if (checkClass) break; - classIterator->reset(); - while ((checkClass = (OSMetaClass *)classIterator->getNextObject())) - { - sAllClassesDict->removeObject(checkClass->className); - } - result = true; - } - while (false); + classIterator = OSCollectionIterator::withCollection(metaClasses); + if (!classIterator) { + return false; + } - IOLockUnlock(sAllClassesLock); - OSSafeReleaseNULL(classIterator); + IOLockLock(sAllClassesLock); - return (result); + result = false; + do{ + while ((checkClass = (OSMetaClass *)classIterator->getNextObject()) + && !checkClass->getInstanceCount() + && !checkClass->reserved->retain) { + } + if (checkClass) { + break; + } + classIterator->reset(); + while ((checkClass = (OSMetaClass *)classIterator->getNextObject())) { + sAllClassesDict->removeObject(checkClass->className); + } + result = true; + }while (false); + + IOLockUnlock(sAllClassesLock); + OSSafeReleaseNULL(classIterator); + + return result; } @@ -1051,19 +1144,19 @@ OSMetaClass::removeClasses(OSCollection * metaClasses) const OSMetaClass * OSMetaClass::getMetaClassWithName(const OSSymbol * name) { - OSMetaClass * retMeta = 0; + OSMetaClass * retMeta = 0; - if (!name) { - return 0; - } + if (!name) { + return 0; + } - IOLockLock(sAllClassesLock); - if (sAllClassesDict) { - retMeta = (OSMetaClass *) sAllClassesDict->getObject(name); - } - IOLockUnlock(sAllClassesLock); + IOLockLock(sAllClassesLock); + if (sAllClassesDict) { + retMeta = (OSMetaClass *) sAllClassesDict->getObject(name); + } + IOLockUnlock(sAllClassesLock); - return retMeta; + return retMeta; } /********************************************************************* @@ -1071,19 +1164,23 @@ OSMetaClass::getMetaClassWithName(const OSSymbol * name) const OSMetaClass * OSMetaClass::copyMetaClassWithName(const OSSymbol * name) { - const OSMetaClass * meta; + const OSMetaClass * meta; - if (!name) return (0); + if (!name) { + return 0; + } - meta = 0; - IOLockLock(sAllClassesLock); - if (sAllClassesDict) { - meta = (OSMetaClass *) sAllClassesDict->getObject(name); - if (meta) OSIncrementAtomic(&meta->reserved->retain); - } - IOLockUnlock(sAllClassesLock); + meta = 0; + IOLockLock(sAllClassesLock); + if (sAllClassesDict) { + meta = (OSMetaClass *) sAllClassesDict->getObject(name); + if (meta) { + OSIncrementAtomic(&meta->reserved->retain); + } + } + IOLockUnlock(sAllClassesLock); - return (meta); + return meta; } /********************************************************************* @@ -1091,7 +1188,7 @@ OSMetaClass::copyMetaClassWithName(const OSSymbol * name) void OSMetaClass::releaseMetaClass() const { - OSDecrementAtomic(&reserved->retain); + OSDecrementAtomic(&reserved->retain); } /********************************************************************* @@ -1099,18 +1196,17 @@ OSMetaClass::releaseMetaClass() const OSObject * OSMetaClass::allocClassWithName(const OSSymbol * name) { - const OSMetaClass * meta; - OSObject * result; - - result = 0; - meta = copyMetaClassWithName(name); - if (meta) - { - result = meta->alloc(); - meta->releaseMetaClass(); - } + const OSMetaClass * meta; + OSObject * result; + + result = 0; + meta = copyMetaClassWithName(name); + if (meta) { + result = meta->alloc(); + meta->releaseMetaClass(); + } - return result; + return result; } /********************************************************************* @@ -1118,10 +1214,10 @@ OSMetaClass::allocClassWithName(const OSSymbol * name) OSObject * OSMetaClass::allocClassWithName(const OSString * name) { - const OSSymbol * tmpKey = OSSymbol::withString(name); - OSObject * result = allocClassWithName(tmpKey); - tmpKey->release(); - return result; + const OSSymbol * tmpKey = OSSymbol::withString(name); + OSObject * result = allocClassWithName(tmpKey); + tmpKey->release(); + return result; } /********************************************************************* @@ -1129,10 +1225,10 @@ OSMetaClass::allocClassWithName(const OSString * name) OSObject * OSMetaClass::allocClassWithName(const char * name) { - const OSSymbol * tmpKey = OSSymbol::withCStringNoCopy(name); - OSObject * result = allocClassWithName(tmpKey); - tmpKey->release(); - return result; + const OSSymbol * tmpKey = OSSymbol::withCStringNoCopy(name); + OSObject * result = allocClassWithName(tmpKey); + tmpKey->release(); + return result; } @@ -1140,74 +1236,76 @@ OSMetaClass::allocClassWithName(const char * name) *********************************************************************/ OSMetaClassBase * OSMetaClass::checkMetaCastWithName( - const OSSymbol * name, - const OSMetaClassBase * in) + const OSSymbol * name, + const OSMetaClassBase * in) { - OSMetaClassBase * result = 0; + OSMetaClassBase * result = 0; - const OSMetaClass * const meta = getMetaClassWithName(name); + const OSMetaClass * const meta = getMetaClassWithName(name); - if (meta) { - result = meta->checkMetaCast(in); - } + if (meta) { + result = meta->checkMetaCast(in); + } - return result; + return result; } /********************************************************************* *********************************************************************/ -OSMetaClassBase * OSMetaClass:: +OSMetaClassBase * +OSMetaClass:: checkMetaCastWithName( - const OSString * name, - const OSMetaClassBase * in) + const OSString * name, + const OSMetaClassBase * in) { - const OSSymbol * tmpKey = OSSymbol::withString(name); - OSMetaClassBase * result = checkMetaCastWithName(tmpKey, in); + const OSSymbol * tmpKey = OSSymbol::withString(name); + OSMetaClassBase * result = checkMetaCastWithName(tmpKey, in); - tmpKey->release(); - return result; + tmpKey->release(); + return result; } /********************************************************************* *********************************************************************/ OSMetaClassBase * OSMetaClass::checkMetaCastWithName( - const char * name, - const OSMetaClassBase * in) + const char * name, + const OSMetaClassBase * in) { - const OSSymbol * tmpKey = OSSymbol::withCStringNoCopy(name); - OSMetaClassBase * result = checkMetaCastWithName(tmpKey, in); + const OSSymbol * tmpKey = OSSymbol::withCStringNoCopy(name); + OSMetaClassBase * result = checkMetaCastWithName(tmpKey, in); - tmpKey->release(); - return result; + tmpKey->release(); + return result; } /********************************************************************* - * OSMetaClass::checkMetaCast() - * Check to see if the 'check' object has this object in its metaclass chain. - * Returns check if it is indeed a kind of the current meta class, 0 otherwise. - * - * Generally this method is not invoked directly but is used to implement - * the OSMetaClassBase::metaCast member function. - * - * See also OSMetaClassBase::metaCast +* OSMetaClass::checkMetaCast() +* Check to see if the 'check' object has this object in its metaclass chain. +* Returns check if it is indeed a kind of the current meta class, 0 otherwise. +* +* Generally this method is not invoked directly but is used to implement +* the OSMetaClassBase::metaCast member function. +* +* See also OSMetaClassBase::metaCast *********************************************************************/ -OSMetaClassBase * OSMetaClass::checkMetaCast( - const OSMetaClassBase * check) const +OSMetaClassBase * +OSMetaClass::checkMetaCast( + const OSMetaClassBase * check) const { - const OSMetaClass * const toMeta = this; - const OSMetaClass * fromMeta; - - for (fromMeta = check->getMetaClass(); ; fromMeta = fromMeta->superClassLink) { - if (toMeta == fromMeta) { - return const_cast(check); // Discard const - } - if (!fromMeta->superClassLink) { - break; - } - } + const OSMetaClass * const toMeta = this; + const OSMetaClass * fromMeta; + + for (fromMeta = check->getMetaClass();; fromMeta = fromMeta->superClassLink) { + if (toMeta == fromMeta) { + return const_cast(check); // Discard const + } + if (!fromMeta->superClassLink) { + break; + } + } - return 0; + return 0; } /********************************************************************* @@ -1215,8 +1313,8 @@ OSMetaClassBase * OSMetaClass::checkMetaCast( void OSMetaClass::reservedCalled(int ind) const { - const char * cname = className->getCStringNoCopy(); - panic("%s::_RESERVED%s%d called.", cname, cname, ind); + const char * cname = className->getCStringNoCopy(); + panic("%s::_RESERVED%s%d called.", cname, cname, ind); } /********************************************************************* @@ -1225,7 +1323,7 @@ const OSMetaClass * OSMetaClass::getSuperClass() const { - return superClassLink; + return superClassLink; } /********************************************************************* @@ -1234,11 +1332,11 @@ OSMetaClass::getSuperClass() const const OSSymbol * OSMetaClass::getKmodName() const { - OSKext * myKext = reserved ? reserved->kext : 0; - if (myKext) { - return myKext->getIdentifier(); - } - return OSSymbol::withCStringNoCopy("unknown"); + OSKext * myKext = reserved ? reserved->kext : 0; + if (myKext) { + return myKext->getIdentifier(); + } + return OSSymbol::withCStringNoCopy("unknown"); } /********************************************************************* @@ -1246,7 +1344,7 @@ OSMetaClass::getKmodName() const unsigned int OSMetaClass::getInstanceCount() const { - return instanceCount; + return instanceCount; } /********************************************************************* @@ -1255,28 +1353,28 @@ OSMetaClass::getInstanceCount() const void OSMetaClass::printInstanceCounts() { - OSCollectionIterator * classes; - OSSymbol * className; - OSMetaClass * meta; + OSCollectionIterator * classes; + OSSymbol * className; + OSMetaClass * meta; - IOLockLock(sAllClassesLock); - classes = OSCollectionIterator::withCollection(sAllClassesDict); - assert(classes); + IOLockLock(sAllClassesLock); + classes = OSCollectionIterator::withCollection(sAllClassesDict); + assert(classes); - while( (className = (OSSymbol *)classes->getNextObject())) { - meta = (OSMetaClass *)sAllClassesDict->getObject(className); - assert(meta); + while ((className = (OSSymbol *)classes->getNextObject())) { + meta = (OSMetaClass *)sAllClassesDict->getObject(className); + assert(meta); - printf("%24s count: %03d x 0x%03x = 0x%06x\n", - className->getCStringNoCopy(), - meta->getInstanceCount(), - meta->getClassSize(), - meta->getInstanceCount() * meta->getClassSize() ); - } - printf("\n"); - classes->release(); - IOLockUnlock(sAllClassesLock); - return; + printf("%24s count: %03d x 0x%03x = 0x%06x\n", + className->getCStringNoCopy(), + meta->getInstanceCount(), + meta->getClassSize(), + meta->getInstanceCount() * meta->getClassSize()); + } + printf("\n"); + classes->release(); + IOLockUnlock(sAllClassesLock); + return; } /********************************************************************* @@ -1284,8 +1382,8 @@ OSMetaClass::printInstanceCounts() OSDictionary * OSMetaClass::getClassDictionary() { - panic("OSMetaClass::getClassDictionary() is obsoleted.\n"); - return 0; + panic("OSMetaClass::getClassDictionary() is obsoleted.\n"); + return 0; } /********************************************************************* @@ -1293,8 +1391,8 @@ OSMetaClass::getClassDictionary() bool OSMetaClass::serialize(__unused OSSerialize * s) const { - panic("OSMetaClass::serialize(): Obsoleted\n"); - return false; + panic("OSMetaClass::serialize(): Obsoleted\n"); + return false; } /********************************************************************* @@ -1303,46 +1401,46 @@ OSMetaClass::serialize(__unused OSSerialize * s) const void OSMetaClass::serializeClassDictionary(OSDictionary * serializeDictionary) { - OSDictionary * classDict = NULL; + OSDictionary * classDict = NULL; - IOLockLock(sAllClassesLock); + IOLockLock(sAllClassesLock); - classDict = OSDictionary::withCapacity(sAllClassesDict->getCount()); - if (!classDict) { - goto finish; - } + classDict = OSDictionary::withCapacity(sAllClassesDict->getCount()); + if (!classDict) { + goto finish; + } - do { - OSCollectionIterator * classes; - const OSSymbol * className; + do { + OSCollectionIterator * classes; + const OSSymbol * className; - classes = OSCollectionIterator::withCollection(sAllClassesDict); - if (!classes) { - break; - } - - while ((className = (const OSSymbol *)classes->getNextObject())) { - const OSMetaClass * meta; - OSNumber * count; - - meta = (OSMetaClass *)sAllClassesDict->getObject(className); - count = OSNumber::withNumber(meta->getInstanceCount(), 32); - if (count) { - classDict->setObject(className, count); - count->release(); - } - } - classes->release(); + classes = OSCollectionIterator::withCollection(sAllClassesDict); + if (!classes) { + break; + } + + while ((className = (const OSSymbol *)classes->getNextObject())) { + const OSMetaClass * meta; + OSNumber * count; - serializeDictionary->setObject("Classes", classDict); - } while (0); + meta = (OSMetaClass *)sAllClassesDict->getObject(className); + count = OSNumber::withNumber(meta->getInstanceCount(), 32); + if (count) { + classDict->setObject(className, count); + count->release(); + } + } + classes->release(); + + serializeDictionary->setObject("Classes", classDict); + } while (0); finish: - OSSafeReleaseNULL(classDict); + OSSafeReleaseNULL(classDict); - IOLockUnlock(sAllClassesLock); + IOLockUnlock(sAllClassesLock); - return; + return; } @@ -1351,54 +1449,62 @@ finish: #if IOTRACKING -void *OSMetaClass::trackedNew(size_t size) +void * +OSMetaClass::trackedNew(size_t size) { - IOTracking * mem; + IOTracking * mem; - mem = (typeof(mem)) kalloc_tag_bt(size + sizeof(IOTracking), VM_KERN_MEMORY_LIBKERN); - assert(mem); - if (!mem) return (mem); + mem = (typeof(mem))kalloc_tag_bt(size + sizeof(IOTracking), VM_KERN_MEMORY_LIBKERN); + assert(mem); + if (!mem) { + return mem; + } - memset(mem, 0, size + sizeof(IOTracking)); - mem++; + memset(mem, 0, size + sizeof(IOTracking)); + mem++; - OSIVAR_ACCUMSIZE(size); + OSIVAR_ACCUMSIZE(size); - return (mem); + return mem; } -void OSMetaClass::trackedDelete(void * instance, size_t size) +void +OSMetaClass::trackedDelete(void * instance, size_t size) { - IOTracking * mem = (typeof(mem)) instance; mem--; + IOTracking * mem = (typeof(mem))instance; mem--; - kfree(mem, size + sizeof(IOTracking)); - OSIVAR_ACCUMSIZE(-size); + kfree(mem, size + sizeof(IOTracking)); + OSIVAR_ACCUMSIZE(-size); } -void OSMetaClass::trackedInstance(OSObject * instance) const +void +OSMetaClass::trackedInstance(OSObject * instance) const { - IOTracking * mem = (typeof(mem)) instance; mem--; + IOTracking * mem = (typeof(mem))instance; mem--; - return (IOTrackingAdd(reserved->tracking, mem, classSize, false, VM_KERN_MEMORY_NONE)); + return IOTrackingAdd(reserved->tracking, mem, classSize, false, VM_KERN_MEMORY_NONE); } -void OSMetaClass::trackedFree(OSObject * instance) const +void +OSMetaClass::trackedFree(OSObject * instance) const { - IOTracking * mem = (typeof(mem)) instance; mem--; + IOTracking * mem = (typeof(mem))instance; mem--; - return (IOTrackingRemove(reserved->tracking, mem, classSize)); + return IOTrackingRemove(reserved->tracking, mem, classSize); } -void OSMetaClass::trackedAccumSize(OSObject * instance, size_t size) const +void +OSMetaClass::trackedAccumSize(OSObject * instance, size_t size) const { - IOTracking * mem = (typeof(mem)) instance; mem--; + IOTracking * mem = (typeof(mem))instance; mem--; - return (IOTrackingAccumSize(reserved->tracking, mem, size)); + return IOTrackingAccumSize(reserved->tracking, mem, size); } -IOTrackingQueue * OSMetaClass::getTracking() const +IOTrackingQueue * +OSMetaClass::getTracking() const { - return (reserved->tracking); + return reserved->tracking; } #endif /* IOTRACKING */ diff --git a/libkern/c++/OSNumber.cpp b/libkern/c++/OSNumber.cpp index 36a9ded7e..ffbc9e793 100644 --- a/libkern/c++/OSNumber.cpp +++ b/libkern/c++/OSNumber.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOOffset.m created by rsulack on Wed 17-Sep-1997 */ @@ -49,117 +49,151 @@ OSMetaClassDefineReservedUnused(OSNumber, 5); OSMetaClassDefineReservedUnused(OSNumber, 6); OSMetaClassDefineReservedUnused(OSNumber, 7); -bool OSNumber::init(unsigned long long inValue, unsigned int newNumberOfBits) +bool +OSNumber::init(unsigned long long inValue, unsigned int newNumberOfBits) { - if (!super::init()) - return false; - if (newNumberOfBits > 64) - return false; + if (!super::init()) { + return false; + } + if (newNumberOfBits > 64) { + return false; + } - size = newNumberOfBits; - value = (inValue & sizeMask); + size = newNumberOfBits; + value = (inValue & sizeMask); - return true; + return true; } -bool OSNumber::init(const char *newValue, unsigned int newNumberOfBits) +bool +OSNumber::init(const char *newValue, unsigned int newNumberOfBits) { - return init((unsigned long long)strtoul(newValue, NULL, 0), newNumberOfBits); + return init((unsigned long long)strtoul(newValue, NULL, 0), newNumberOfBits); } -void OSNumber::free() { super::free(); } +void +OSNumber::free() +{ + super::free(); +} -OSNumber *OSNumber::withNumber(unsigned long long value, - unsigned int newNumberOfBits) +OSNumber * +OSNumber::withNumber(unsigned long long value, + unsigned int newNumberOfBits) { - OSNumber *me = new OSNumber; + OSNumber *me = new OSNumber; - if (me && !me->init(value, newNumberOfBits)) { - me->release(); - return 0; - } + if (me && !me->init(value, newNumberOfBits)) { + me->release(); + return 0; + } - return me; + return me; } -OSNumber *OSNumber::withNumber(const char *value, unsigned int newNumberOfBits) +OSNumber * +OSNumber::withNumber(const char *value, unsigned int newNumberOfBits) { - OSNumber *me = new OSNumber; + OSNumber *me = new OSNumber; - if (me && !me->init(value, newNumberOfBits)) { - me->release(); - return 0; - } + if (me && !me->init(value, newNumberOfBits)) { + me->release(); + return 0; + } - return me; + return me; } -unsigned int OSNumber::numberOfBits() const { return size; } +unsigned int +OSNumber::numberOfBits() const +{ + return size; +} -unsigned int OSNumber::numberOfBytes() const { return (size + 7) / 8; } +unsigned int +OSNumber::numberOfBytes() const +{ + return (size + 7) / 8; +} -unsigned char OSNumber::unsigned8BitValue() const +unsigned char +OSNumber::unsigned8BitValue() const { - return (unsigned char) value; + return (unsigned char) value; } -unsigned short OSNumber::unsigned16BitValue() const +unsigned short +OSNumber::unsigned16BitValue() const { - return (unsigned short) value; + return (unsigned short) value; } -unsigned int OSNumber::unsigned32BitValue() const +unsigned int +OSNumber::unsigned32BitValue() const { - return (unsigned int) value; + return (unsigned int) value; } -unsigned long long OSNumber::unsigned64BitValue() const +unsigned long long +OSNumber::unsigned64BitValue() const { - return value; + return value; } -void OSNumber::addValue(signed long long inValue) +void +OSNumber::addValue(signed long long inValue) { - value = ((value + inValue) & sizeMask); + value = ((value + inValue) & sizeMask); } -void OSNumber::setValue(unsigned long long inValue) +void +OSNumber::setValue(unsigned long long inValue) { - value = (inValue & sizeMask); + value = (inValue & sizeMask); } -bool OSNumber::isEqualTo(const OSNumber *integer) const +bool +OSNumber::isEqualTo(const OSNumber *integer) const { - return((value == integer->value)); + return value == integer->value; } -bool OSNumber::isEqualTo(const OSMetaClassBase *obj) const +bool +OSNumber::isEqualTo(const OSMetaClassBase *obj) const { - OSNumber * offset; - if ((offset = OSDynamicCast(OSNumber, obj))) - return isEqualTo(offset); - else - return false; + OSNumber * offset; + if ((offset = OSDynamicCast(OSNumber, obj))) { + return isEqualTo(offset); + } else { + return false; + } } -bool OSNumber::serialize(OSSerialize *s) const +bool +OSNumber::serialize(OSSerialize *s) const { - char temp[32]; - - if (s->previouslySerialized(this)) return true; - - snprintf(temp, sizeof(temp), "integer size=\"%d\"", size); - if (!s->addXMLStartTag(this, temp)) return false; - - //XXX sprintf(temp, "0x%qx", value); - if ((value >> 32)) { - snprintf(temp, sizeof(temp), "0x%lx%08lx", (unsigned long)(value >> 32), - (unsigned long)(value & 0xFFFFFFFF)); - } else { - snprintf(temp, sizeof(temp), "0x%lx", (unsigned long)value); - } - if (!s->addString(temp)) return false; - - return s->addXMLEndTag("integer"); + char temp[32]; + + if (s->previouslySerialized(this)) { + return true; + } + + snprintf(temp, sizeof(temp), "integer size=\"%d\"", size); + if (!s->addXMLStartTag(this, temp)) { + return false; + } + + //XXX sprintf(temp, "0x%qx", value); + if ((value >> 32)) { + snprintf(temp, sizeof(temp), "0x%lx%08lx", (unsigned long)(value >> 32), + (unsigned long)(value & 0xFFFFFFFF)); + } else { + snprintf(temp, sizeof(temp), "0x%lx", (unsigned long)value); + } + if (!s->addString(temp)) { + return false; + } + + return s->addXMLEndTag("integer"); } diff --git a/libkern/c++/OSObject.cpp b/libkern/c++/OSObject.cpp index 2928456f5..de9cc00fa 100644 --- a/libkern/c++/OSObject.cpp +++ b/libkern/c++/OSObject.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSObject.cpp created by gvdl on Fri 1998-11-17 */ @@ -53,27 +53,37 @@ const OSMetaClass * const OSObject::metaClass = &OSObject::gMetaClass; const OSMetaClass * const OSObject::superClass = 0; /* Class member functions - Can't use defaults */ -OSObject::~OSObject() { } -const OSMetaClass * OSObject::getMetaClass() const - { return &gMetaClass; } -OSObject *OSObject::MetaClass::alloc() const { return 0; } +OSObject::~OSObject() +{ +} +const OSMetaClass * +OSObject::getMetaClass() const +{ + return &gMetaClass; +} +OSObject * +OSObject::MetaClass::alloc() const +{ + return 0; +} /* The OSObject::MetaClass constructor */ OSObject::MetaClass::MetaClass() - : OSMetaClass("OSObject", OSObject::superClass, sizeof(OSObject)) - { } + : OSMetaClass("OSObject", OSObject::superClass, sizeof(OSObject)) +{ +} // Virtual Padding -OSMetaClassDefineReservedUnused(OSObject, 0); -OSMetaClassDefineReservedUnused(OSObject, 1); -OSMetaClassDefineReservedUnused(OSObject, 2); -OSMetaClassDefineReservedUnused(OSObject, 3); -OSMetaClassDefineReservedUnused(OSObject, 4); -OSMetaClassDefineReservedUnused(OSObject, 5); -OSMetaClassDefineReservedUnused(OSObject, 6); -OSMetaClassDefineReservedUnused(OSObject, 7); -OSMetaClassDefineReservedUnused(OSObject, 8); -OSMetaClassDefineReservedUnused(OSObject, 9); +OSMetaClassDefineReservedUnused(OSObject, 0); +OSMetaClassDefineReservedUnused(OSObject, 1); +OSMetaClassDefineReservedUnused(OSObject, 2); +OSMetaClassDefineReservedUnused(OSObject, 3); +OSMetaClassDefineReservedUnused(OSObject, 4); +OSMetaClassDefineReservedUnused(OSObject, 5); +OSMetaClassDefineReservedUnused(OSObject, 6); +OSMetaClassDefineReservedUnused(OSObject, 7); +OSMetaClassDefineReservedUnused(OSObject, 8); +OSMetaClassDefineReservedUnused(OSObject, 9); OSMetaClassDefineReservedUnused(OSObject, 10); OSMetaClassDefineReservedUnused(OSObject, 11); OSMetaClassDefineReservedUnused(OSObject, 12); @@ -81,227 +91,263 @@ OSMetaClassDefineReservedUnused(OSObject, 13); OSMetaClassDefineReservedUnused(OSObject, 14); OSMetaClassDefineReservedUnused(OSObject, 15); -static const char *getClassName(const OSObject *obj) +static const char * +getClassName(const OSObject *obj) { - const OSMetaClass *meta = obj->getMetaClass(); - return (meta) ? meta->getClassName() : "unknown class?"; + const OSMetaClass *meta = obj->getMetaClass(); + return (meta) ? meta->getClassName() : "unknown class?"; } -int OSObject::getRetainCount() const +int +OSObject::getRetainCount() const { - return (int) ((UInt16) retainCount); + return (int) ((UInt16) retainCount); } -void OSObject::taggedRetain(const void *tag) const +bool +OSObject::taggedTryRetain(const void *tag) const { - volatile UInt32 *countP = (volatile UInt32 *) &retainCount; - UInt32 inc = 1; - UInt32 origCount; - UInt32 newCount; - - // Increment the collection bucket. - if ((const void *) OSTypeID(OSCollection) == tag) - inc |= (1UL<<16); - - do { - origCount = *countP; - if ( ((UInt16) origCount | 0x1) == 0xffff ) { - const char *msg; - if (origCount & 0x1) { - // If count == 0xffff that means we are freeing now so we can - // just return obviously somebody is cleaning up dangling - // references. - msg = "Attempting to retain a freed object"; - } - else { - // If count == 0xfffe then we have wrapped our reference count. - // We should stop counting now as this reference must be - // leaked rather than accidently wrapping around the clock and - // freeing a very active object later. + volatile UInt32 *countP = (volatile UInt32 *) &retainCount; + UInt32 inc = 1; + UInt32 origCount; + UInt32 newCount; + + // Increment the collection bucket. + if ((const void *) OSTypeID(OSCollection) == tag) { + inc |= (1UL << 16); + } + + do { + origCount = *countP; + if (((UInt16) origCount | 0x1) == 0xffff) { + if (origCount & 0x1) { + // If count == 0xffff that means we are freeing now so we can + // just return obviously somebody is cleaning up dangling + // references. + return false; + } else { + // If count == 0xfffe then we have wrapped our reference count. + // We should stop counting now as this reference must be + // leaked rather than accidently wrapping around the clock and + // freeing a very active object later. #if !DEBUG - break; // Break out of update loop which pegs the reference + break; // Break out of update loop which pegs the reference #else /* DEBUG */ - // @@@ gvdl: eventually need to make this panic optional - // based on a boot argument i.e. debug= boot flag - msg = "About to wrap the reference count, reference leak?"; + // @@@ gvdl: eventually need to make this panic optional + // based on a boot argument i.e. debug= boot flag + panic("OSObject::refcount: " + "About to wrap the reference count, reference leak?"); #endif /* !DEBUG */ - } - panic("OSObject::refcount: %s", msg); - } + } + } - newCount = origCount + inc; - } while (!OSCompareAndSwap(origCount, newCount, const_cast(countP))); + newCount = origCount + inc; + } while (!OSCompareAndSwap(origCount, newCount, const_cast(countP))); + + return true; +} + +void +OSObject::taggedRetain(const void *tag) const +{ + if (!taggedTryRetain(tag)) { + panic("OSObject::refcount: Attempting to retain a freed object"); + } } -void OSObject::taggedRelease(const void *tag) const +void +OSObject::taggedRelease(const void *tag) const { - taggedRelease(tag, 1); + taggedRelease(tag, 1); } -void OSObject::taggedRelease(const void *tag, const int when) const +void +OSObject::taggedRelease(const void *tag, const int when) const { - volatile UInt32 *countP = (volatile UInt32 *) &retainCount; - UInt32 dec = 1; - UInt32 origCount; - UInt32 newCount; - UInt32 actualCount; - - // Increment the collection bucket. - if ((const void *) OSTypeID(OSCollection) == tag) - dec |= (1UL<<16); - - do { - origCount = *countP; - - if ( ((UInt16) origCount | 0x1) == 0xffff ) { - if (origCount & 0x1) { - // If count == 0xffff that means we are freeing now so we can - // just return obviously somebody is cleaning up some dangling - // references. So we blow out immediately. - return; - } - else { - // If count == 0xfffe then we have wrapped our reference - // count. We should stop counting now as this reference must be - // leaked rather than accidently freeing an active object later. + volatile UInt32 *countP = (volatile UInt32 *) &retainCount; + UInt32 dec = 1; + UInt32 origCount; + UInt32 newCount; + UInt32 actualCount; + + // Increment the collection bucket. + if ((const void *) OSTypeID(OSCollection) == tag) { + dec |= (1UL << 16); + } + + do { + origCount = *countP; + + if (((UInt16) origCount | 0x1) == 0xffff) { + if (origCount & 0x1) { + // If count == 0xffff that means we are freeing now so we can + // just return obviously somebody is cleaning up some dangling + // references. So we blow out immediately. + return; + } else { + // If count == 0xfffe then we have wrapped our reference + // count. We should stop counting now as this reference must be + // leaked rather than accidently freeing an active object later. #if !DEBUG - return; // return out of function which pegs the reference + return; // return out of function which pegs the reference #else /* DEBUG */ - // @@@ gvdl: eventually need to make this panic optional - // based on a boot argument i.e. debug= boot flag - panic("OSObject::refcount: %s", - "About to unreference a pegged object, reference leak?"); + // @@@ gvdl: eventually need to make this panic optional + // based on a boot argument i.e. debug= boot flag + panic("OSObject::refcount: %s", + "About to unreference a pegged object, reference leak?"); #endif /* !DEBUG */ - } - } - actualCount = origCount - dec; - if ((UInt16) actualCount < when) - newCount = 0xffff; - else - newCount = actualCount; - - } while (!OSCompareAndSwap(origCount, newCount, const_cast(countP))); - - // - // This panic means that we have just attempted to release an object - // whose retain count has gone to less than the number of collections - // it is a member off. Take a panic immediately. - // In fact the panic MAY not be a registry corruption but it is - // ALWAYS the wrong thing to do. I call it a registry corruption 'cause - // the registry is the biggest single use of a network of collections. - // + } + } + actualCount = origCount - dec; + if ((UInt16) actualCount < when) { + newCount = 0xffff; + } else { + newCount = actualCount; + } + } while (!OSCompareAndSwap(origCount, newCount, const_cast(countP))); + + // + // This panic means that we have just attempted to release an object + // whose retain count has gone to less than the number of collections + // it is a member off. Take a panic immediately. + // In fact the panic MAY not be a registry corruption but it is + // ALWAYS the wrong thing to do. I call it a registry corruption 'cause + // the registry is the biggest single use of a network of collections. + // // xxx - this error message is overly-specific; // xxx - any code in the kernel could trip this, // xxx - and it applies as noted to all collections, not just the registry - if ((UInt16) actualCount < (actualCount >> 16)) { - panic("A kext releasing a(n) %s has corrupted the registry.", - getClassName(this)); - } - - // Check for a 'free' condition and that if we are first through - if (newCount == 0xffff) { - (const_cast(this))->free(); - } + if ((UInt16) actualCount < (actualCount >> 16)) { + panic("A kext releasing a(n) %s has corrupted the registry.", + getClassName(this)); + } + + // Check for a 'free' condition and that if we are first through + if (newCount == 0xffff) { + (const_cast(this))->free(); + } } -void OSObject::release() const +void +OSObject::release() const { - taggedRelease(0); + taggedRelease(0); } -void OSObject::retain() const +void +OSObject::retain() const { - taggedRetain(0); + taggedRetain(0); } extern "C" void osobject_retain(void * object) { - ((OSObject *)object)->retain(); + ((OSObject *)object)->retain(); } extern "C" void osobject_release(void * object) { - ((OSObject *)object)->release(); + ((OSObject *)object)->release(); } -void OSObject::release(int when) const +void +OSObject::release(int when) const { - taggedRelease(0, when); + taggedRelease(0, when); } -bool OSObject::serialize(OSSerialize *s) const +bool +OSObject::serialize(OSSerialize *s) const { - char cstr[128]; - bool ok; + char cstr[128]; + bool ok; - snprintf(cstr, sizeof(cstr), "%s is not serializable", getClassName(this)); + snprintf(cstr, sizeof(cstr), "%s is not serializable", getClassName(this)); - OSString * str; - str = OSString::withCStringNoCopy(cstr); - if (!str) return false; + OSString * str; + str = OSString::withCStringNoCopy(cstr); + if (!str) { + return false; + } - ok = str->serialize(s); - str->release(); + ok = str->serialize(s); + str->release(); - return (ok); + return ok; } -void *OSObject::operator new(size_t size) +void * +OSObject::operator new(size_t size) { #if IOTRACKING - if (kIOTracking & gIOKitDebug) return (OSMetaClass::trackedNew(size)); + if (kIOTracking & gIOKitDebug) { + return OSMetaClass::trackedNew(size); + } #endif - void * mem = kalloc_tag_bt(size, VM_KERN_MEMORY_LIBKERN); - assert(mem); - bzero(mem, size); - OSIVAR_ACCUMSIZE(size); + void * mem = kalloc_tag_bt(size, VM_KERN_MEMORY_LIBKERN); + assert(mem); + bzero(mem, size); + OSIVAR_ACCUMSIZE(size); - return (void *) mem; + return (void *) mem; } -void OSObject::operator delete(void * mem, size_t size) +void +OSObject::operator delete(void * mem, size_t size) { - if (!mem) return; + if (!mem) { + return; + } #if IOTRACKING - if (kIOTracking & gIOKitDebug) return (OSMetaClass::trackedDelete(mem, size)); + if (kIOTracking & gIOKitDebug) { + return OSMetaClass::trackedDelete(mem, size); + } #endif - kfree(mem, size); - OSIVAR_ACCUMSIZE(-size); + kfree(mem, size); + OSIVAR_ACCUMSIZE(-size); } -bool OSObject::init() +bool +OSObject::init() { #if IOTRACKING - if (kIOTracking & gIOKitDebug) getMetaClass()->trackedInstance(this); + if (kIOTracking & gIOKitDebug) { + getMetaClass()->trackedInstance(this); + } #endif - return true; + return true; } -void OSObject::free() +void +OSObject::free() { - const OSMetaClass *meta = getMetaClass(); + const OSMetaClass *meta = getMetaClass(); - if (meta) - { - meta->instanceDestructed(); + if (meta) { + meta->instanceDestructed(); #if IOTRACKING - if (kIOTracking & gIOKitDebug) getMetaClass()->trackedFree(this); + if (kIOTracking & gIOKitDebug) { + getMetaClass()->trackedFree(this); + } #endif - } - delete this; + } + delete this; } #if IOTRACKING -void OSObject::trackingAccumSize(size_t size) +void +OSObject::trackingAccumSize(size_t size) { - if (kIOTracking & gIOKitDebug) getMetaClass()->trackedAccumSize(this, size); + if (kIOTracking & gIOKitDebug) { + getMetaClass()->trackedAccumSize(this, size); + } } #endif @@ -310,12 +356,12 @@ void OSObject::trackingAccumSize(size_t size) OSObject::OSObject() { - retainCount = 1; + retainCount = 1; // if (kIOTracking & gIOKitDebug) getMetaClass()->trackedInstance(this); } OSObject::OSObject(const OSMetaClass *) { - retainCount = 1; + retainCount = 1; // if (kIOTracking & gIOKitDebug) getMetaClass()->trackedInstance(this); } diff --git a/libkern/c++/OSOrderedSet.cpp b/libkern/c++/OSOrderedSet.cpp index ccf2c542b..2b7cd44c8 100644 --- a/libkern/c++/OSOrderedSet.cpp +++ b/libkern/c++/OSOrderedSet.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,386 +44,447 @@ OSMetaClassDefineReservedUnused(OSOrderedSet, 7); struct _Element { - const OSMetaClassBase * obj; + const OSMetaClassBase * obj; // unsigned int pri; }; #define EXT_CAST(obj) \ reinterpret_cast(const_cast(obj)) -bool OSOrderedSet:: +bool +OSOrderedSet:: initWithCapacity(unsigned int inCapacity, - OSOrderFunction inOrdering, void *inOrderingRef) + OSOrderFunction inOrdering, void *inOrderingRef) { - unsigned int size; + unsigned int size; - if (!super::init()) - return false; + if (!super::init()) { + return false; + } - if (inCapacity > (UINT_MAX / sizeof(_Element))) - return false; + if (inCapacity > (UINT_MAX / sizeof(_Element))) { + return false; + } - size = sizeof(_Element) * inCapacity; - array = (_Element *) kalloc_container(size); - if (!array) - return false; + size = sizeof(_Element) * inCapacity; + array = (_Element *) kalloc_container(size); + if (!array) { + return false; + } - count = 0; - capacity = inCapacity; - capacityIncrement = (inCapacity)? inCapacity : 16; - ordering = inOrdering; - orderingRef = inOrderingRef; + count = 0; + capacity = inCapacity; + capacityIncrement = (inCapacity)? inCapacity : 16; + ordering = inOrdering; + orderingRef = inOrderingRef; - bzero(array, size); - OSCONTAINER_ACCUMSIZE(size); + bzero(array, size); + OSCONTAINER_ACCUMSIZE(size); - return true; + return true; } -OSOrderedSet * OSOrderedSet:: +OSOrderedSet * +OSOrderedSet:: withCapacity(unsigned int capacity, - OSOrderFunction ordering, void * orderingRef) + OSOrderFunction ordering, void * orderingRef) { - OSOrderedSet *me = new OSOrderedSet; + OSOrderedSet *me = new OSOrderedSet; - if (me && !me->initWithCapacity(capacity, ordering, orderingRef)) { - me->release(); - me = 0; - } + if (me && !me->initWithCapacity(capacity, ordering, orderingRef)) { + me->release(); + me = 0; + } - return me; + return me; } -void OSOrderedSet::free() +void +OSOrderedSet::free() { - (void) super::setOptions(0, kImmutable); - flushCollection(); + (void) super::setOptions(0, kImmutable); + flushCollection(); - if (array) { - kfree(array, sizeof(_Element) * capacity); - OSCONTAINER_ACCUMSIZE( -(sizeof(_Element) * capacity) ); - } + if (array) { + kfree(array, sizeof(_Element) * capacity); + OSCONTAINER_ACCUMSIZE( -(sizeof(_Element) * capacity)); + } - super::free(); + super::free(); } -unsigned int OSOrderedSet::getCount() const { return count; } -unsigned int OSOrderedSet::getCapacity() const { return capacity; } -unsigned int OSOrderedSet::getCapacityIncrement() const - { return capacityIncrement; } -unsigned int OSOrderedSet::setCapacityIncrement(unsigned int increment) +unsigned int +OSOrderedSet::getCount() const +{ + return count; +} +unsigned int +OSOrderedSet::getCapacity() const +{ + return capacity; +} +unsigned int +OSOrderedSet::getCapacityIncrement() const +{ + return capacityIncrement; +} +unsigned int +OSOrderedSet::setCapacityIncrement(unsigned int increment) { - capacityIncrement = (increment)? increment : 16; - return capacityIncrement; + capacityIncrement = (increment)? increment : 16; + return capacityIncrement; } -unsigned int OSOrderedSet::ensureCapacity(unsigned int newCapacity) +unsigned int +OSOrderedSet::ensureCapacity(unsigned int newCapacity) { - _Element *newArray; - unsigned int finalCapacity; - vm_size_t oldSize, newSize; - - if (newCapacity <= capacity) - return capacity; - - // round up - finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) - * capacityIncrement; - if ((finalCapacity < newCapacity) || - (finalCapacity > (UINT_MAX / sizeof(_Element)))) { - return capacity; - } - newSize = sizeof(_Element) * finalCapacity; - - newArray = (_Element *) kallocp_container(&newSize); - if (newArray) { - // use all of the actual allocation size - finalCapacity = newSize / sizeof(_Element); - - oldSize = sizeof(_Element) * capacity; - - OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); - - bcopy(array, newArray, oldSize); - bzero(&newArray[capacity], newSize - oldSize); - kfree(array, oldSize); - array = newArray; - capacity = finalCapacity; - } - - return capacity; + _Element *newArray; + unsigned int finalCapacity; + vm_size_t oldSize, newSize; + + if (newCapacity <= capacity) { + return capacity; + } + + // round up + finalCapacity = (((newCapacity - 1) / capacityIncrement) + 1) + * capacityIncrement; + if ((finalCapacity < newCapacity) || + (finalCapacity > (UINT_MAX / sizeof(_Element)))) { + return capacity; + } + newSize = sizeof(_Element) * finalCapacity; + + newArray = (_Element *) kallocp_container(&newSize); + if (newArray) { + // use all of the actual allocation size + finalCapacity = newSize / sizeof(_Element); + + oldSize = sizeof(_Element) * capacity; + + OSCONTAINER_ACCUMSIZE(((size_t)newSize) - ((size_t)oldSize)); + + bcopy(array, newArray, oldSize); + bzero(&newArray[capacity], newSize - oldSize); + kfree(array, oldSize); + array = newArray; + capacity = finalCapacity; + } + + return capacity; } -void OSOrderedSet::flushCollection() +void +OSOrderedSet::flushCollection() { - unsigned int i; + unsigned int i; - haveUpdated(); + haveUpdated(); - for (i = 0; i < count; i++) - array[i].obj->taggedRelease(OSTypeID(OSCollection)); + for (i = 0; i < count; i++) { + array[i].obj->taggedRelease(OSTypeID(OSCollection)); + } - count = 0; + count = 0; } /* internal */ -bool OSOrderedSet::setObject(unsigned int index, const OSMetaClassBase *anObject) +bool +OSOrderedSet::setObject(unsigned int index, const OSMetaClassBase *anObject) { - unsigned int i; - unsigned int newCount = count + 1; + unsigned int i; + unsigned int newCount = count + 1; - if ((index > count) || !anObject) - return false; + if ((index > count) || !anObject) { + return false; + } - if (containsObject(anObject)) - return false; + if (containsObject(anObject)) { + return false; + } - // do we need more space? - if (newCount > capacity && newCount > ensureCapacity(newCount)) - return false; + // do we need more space? + if (newCount > capacity && newCount > ensureCapacity(newCount)) { + return false; + } - haveUpdated(); - if (index != count) { - for (i = count; i > index; i--) - array[i] = array[i-1]; - } - array[index].obj = anObject; + haveUpdated(); + if (index != count) { + for (i = count; i > index; i--) { + array[i] = array[i - 1]; + } + } + array[index].obj = anObject; // array[index].pri = pri; - anObject->taggedRetain(OSTypeID(OSCollection)); - count++; + anObject->taggedRetain(OSTypeID(OSCollection)); + count++; - return true; + return true; } -bool OSOrderedSet::setFirstObject(const OSMetaClassBase *anObject) +bool +OSOrderedSet::setFirstObject(const OSMetaClassBase *anObject) { - return( setObject(0, anObject)); + return setObject(0, anObject); } -bool OSOrderedSet::setLastObject(const OSMetaClassBase *anObject) +bool +OSOrderedSet::setLastObject(const OSMetaClassBase *anObject) { - return( setObject( count, anObject)); + return setObject( count, anObject); } -#define ORDER(obj1,obj2) \ +#define ORDER(obj1, obj2) \ (ordering ? ((*ordering)( (const OSObject *) obj1, (const OSObject *) obj2, orderingRef)) : 0) -bool OSOrderedSet::setObject(const OSMetaClassBase *anObject ) +bool +OSOrderedSet::setObject(const OSMetaClassBase *anObject ) { - unsigned int i; + unsigned int i; - // queue it behind those with same priority - for( i = 0; - (i < count) && (ORDER(array[i].obj, anObject) >= 0); - i++ ) {} + // queue it behind those with same priority + for (i = 0; + (i < count) && (ORDER(array[i].obj, anObject) >= 0); + i++) { + } - return( setObject(i, anObject)); + return setObject(i, anObject); } -void OSOrderedSet::removeObject(const OSMetaClassBase *anObject) +void +OSOrderedSet::removeObject(const OSMetaClassBase *anObject) { - bool deleted = false; - unsigned int i; - - for (i = 0; i < count; i++) { - - if (deleted) - array[i-1] = array[i]; - else if (array[i].obj == anObject) { - deleted = true; - haveUpdated(); // Pity we can't flush the log - array[i].obj->taggedRelease(OSTypeID(OSCollection)); - } - } - - if (deleted) - count--; + bool deleted = false; + unsigned int i; + + for (i = 0; i < count; i++) { + if (deleted) { + array[i - 1] = array[i]; + } else if (array[i].obj == anObject) { + deleted = true; + haveUpdated(); // Pity we can't flush the log + array[i].obj->taggedRelease(OSTypeID(OSCollection)); + } + } + + if (deleted) { + count--; + } } -bool OSOrderedSet::containsObject(const OSMetaClassBase *anObject) const +bool +OSOrderedSet::containsObject(const OSMetaClassBase *anObject) const { - return anObject && member(anObject); + return anObject && member(anObject); } -bool OSOrderedSet::member(const OSMetaClassBase *anObject) const +bool +OSOrderedSet::member(const OSMetaClassBase *anObject) const { - unsigned int i; + unsigned int i; - for( i = 0; - (i < count) && (array[i].obj != anObject); - i++ ) {} + for (i = 0; + (i < count) && (array[i].obj != anObject); + i++) { + } - return( i < count); + return i < count; } /* internal */ -OSObject *OSOrderedSet::getObject( unsigned int index ) const +OSObject * +OSOrderedSet::getObject( unsigned int index ) const { - if (index >= count) - return 0; + if (index >= count) { + return 0; + } // if( pri) // *pri = array[index].pri; - return( const_cast((const OSObject *) array[index].obj) ); + return const_cast((const OSObject *) array[index].obj); } -OSObject *OSOrderedSet::getFirstObject() const +OSObject * +OSOrderedSet::getFirstObject() const { - if( count) - return( const_cast((const OSObject *) array[0].obj) ); - else - return( 0 ); + if (count) { + return const_cast((const OSObject *) array[0].obj); + } else { + return 0; + } } -OSObject *OSOrderedSet::getLastObject() const +OSObject * +OSOrderedSet::getLastObject() const { - if( count) - return( const_cast((const OSObject *) array[count-1].obj) ); - else - return( 0 ); + if (count) { + return const_cast((const OSObject *) array[count - 1].obj); + } else { + return 0; + } } -SInt32 OSOrderedSet::orderObject( const OSMetaClassBase * anObject ) +SInt32 +OSOrderedSet::orderObject( const OSMetaClassBase * anObject ) { - return( ORDER( anObject, 0 )); + return ORDER( anObject, 0 ); } -void *OSOrderedSet::getOrderingRef() +void * +OSOrderedSet::getOrderingRef() { - return orderingRef; + return orderingRef; } -bool OSOrderedSet::isEqualTo(const OSOrderedSet *anOrderedSet) const +bool +OSOrderedSet::isEqualTo(const OSOrderedSet *anOrderedSet) const { - unsigned int i; - - if ( this == anOrderedSet ) - return true; + unsigned int i; + + if (this == anOrderedSet) { + return true; + } - if ( count != anOrderedSet->getCount() ) - return false; + if (count != anOrderedSet->getCount()) { + return false; + } - for ( i = 0; i < count; i++ ) { - if ( !array[i].obj->isEqualTo(anOrderedSet->getObject(i)) ) - return false; - } + for (i = 0; i < count; i++) { + if (!array[i].obj->isEqualTo(anOrderedSet->getObject(i))) { + return false; + } + } - return true; + return true; } -bool OSOrderedSet::isEqualTo(const OSMetaClassBase *anObject) const +bool +OSOrderedSet::isEqualTo(const OSMetaClassBase *anObject) const { - OSOrderedSet *oSet; + OSOrderedSet *oSet; - oSet = OSDynamicCast(OSOrderedSet, anObject); - if ( oSet ) - return isEqualTo(oSet); - else - return false; + oSet = OSDynamicCast(OSOrderedSet, anObject); + if (oSet) { + return isEqualTo(oSet); + } else { + return false; + } } -unsigned int OSOrderedSet::iteratorSize() const +unsigned int +OSOrderedSet::iteratorSize() const { - return( sizeof(unsigned int)); + return sizeof(unsigned int); } -bool OSOrderedSet::initIterator(void *inIterator) const +bool +OSOrderedSet::initIterator(void *inIterator) const { - unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int *iteratorP = (unsigned int *) inIterator; - *iteratorP = 0; - return true; + *iteratorP = 0; + return true; } -bool OSOrderedSet:: +bool +OSOrderedSet:: getNextObjectForIterator(void *inIterator, OSObject **ret) const { - unsigned int *iteratorP = (unsigned int *) inIterator; - unsigned int index = (*iteratorP)++; + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; - if (index < count) - *ret = const_cast((const OSObject *) array[index].obj); - else - *ret = 0; + if (index < count) { + *ret = const_cast((const OSObject *) array[index].obj); + } else { + *ret = 0; + } - return (*ret != 0); + return *ret != 0; } -unsigned OSOrderedSet::setOptions(unsigned options, unsigned mask, void *) +unsigned +OSOrderedSet::setOptions(unsigned options, unsigned mask, void *) { - unsigned old = super::setOptions(options, mask); - if ((old ^ options) & mask) { - - // Value changed need to recurse over all of the child collections - for ( unsigned i = 0; i < count; i++ ) { - OSCollection *coll = OSDynamicCast(OSCollection, array[i].obj); - if (coll) - coll->setOptions(options, mask); + unsigned old = super::setOptions(options, mask); + if ((old ^ options) & mask) { + // Value changed need to recurse over all of the child collections + for (unsigned i = 0; i < count; i++) { + OSCollection *coll = OSDynamicCast(OSCollection, array[i].obj); + if (coll) { + coll->setOptions(options, mask); + } + } } - } - return old; + return old; } -OSCollection * OSOrderedSet::copyCollection(OSDictionary *cycleDict) +OSCollection * +OSOrderedSet::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = 0; - OSOrderedSet *newSet = 0; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) - return 0; - } - - do { - // Check for a cycle - ret = super::copyCollection(cycleDict); - if (ret) - continue; - - // Duplicate the set with no contents - newSet = OSOrderedSet::withCapacity(capacity, ordering, orderingRef); - if (!newSet) - continue; - - // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newSet); - - newSet->capacityIncrement = capacityIncrement; - - // Now copy over the contents to the new duplicate - for (unsigned int i = 0; i < count; i++) { - OSObject *obj = EXT_CAST(array[i].obj); - OSCollection *coll = OSDynamicCast(OSCollection, obj); - if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); - if (newColl) { - obj = newColl; // Rely on cycleDict ref for a bit - newColl->release(); + bool allocDict = !cycleDict; + OSCollection *ret = 0; + OSOrderedSet *newSet = 0; + + if (allocDict) { + cycleDict = OSDictionary::withCapacity(16); + if (!cycleDict) { + return 0; + } + } + + do { + // Check for a cycle + ret = super::copyCollection(cycleDict); + if (ret) { + continue; } - else - goto abortCopy; - }; - newSet->setLastObject(obj); - }; - ret = newSet; - newSet = 0; + // Duplicate the set with no contents + newSet = OSOrderedSet::withCapacity(capacity, ordering, orderingRef); + if (!newSet) { + continue; + } - } while (false); + // Insert object into cycle Dictionary + cycleDict->setObject((const OSSymbol *) this, newSet); + + newSet->capacityIncrement = capacityIncrement; + + // Now copy over the contents to the new duplicate + for (unsigned int i = 0; i < count; i++) { + OSObject *obj = EXT_CAST(array[i].obj); + OSCollection *coll = OSDynamicCast(OSCollection, obj); + if (coll) { + OSCollection *newColl = coll->copyCollection(cycleDict); + if (newColl) { + obj = newColl; // Rely on cycleDict ref for a bit + newColl->release(); + } else { + goto abortCopy; + } + } + ; + newSet->setLastObject(obj); + } + ; + + ret = newSet; + newSet = 0; + } while (false); abortCopy: - if (newSet) - newSet->release(); + if (newSet) { + newSet->release(); + } - if (allocDict) - cycleDict->release(); + if (allocDict) { + cycleDict->release(); + } - return ret; + return ret; } diff --git a/libkern/c++/OSRuntime.cpp b/libkern/c++/OSRuntime.cpp index d0a09ca06..122acda60 100644 --- a/libkern/c++/OSRuntime.cpp +++ b/libkern/c++/OSRuntime.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000,2008-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -52,7 +52,7 @@ OSKextLogSpec kOSRuntimeLogSpec = kOSKextLogErrorLevel | kOSKextLogLoadFlag | kOSKextLogKextBookkeepingFlag; - + #if PRAGMA_MARK #pragma mark Logging Bootstrap #endif /* PRAGMA_MARK */ @@ -68,11 +68,11 @@ static bool gKernelCPPInitialized = false; #define OSRuntimeLog(kext, flags, format, args...) \ do { \ - if (gKernelCPPInitialized) { \ - OSKextLog((kext), (flags), (format), ## args); \ - } else { \ - printf((format), ## args); \ - } \ + if (gKernelCPPInitialized) { \ + OSKextLog((kext), (flags), (format), ## args); \ + } else { \ + printf((format), ## args); \ + } \ } while (0) #if PRAGMA_MARK @@ -93,23 +93,23 @@ extern int debug_iomalloc_size; void * kern_os_malloc(size_t size) { - void *mem; - if (size == 0) { - return (0); - } + void *mem; + if (size == 0) { + return 0; + } - mem = kallocp_tag_bt((vm_size_t *)&size, VM_KERN_MEMORY_LIBKERN); - if (!mem) { - return (0); - } + mem = kallocp_tag_bt((vm_size_t *)&size, VM_KERN_MEMORY_LIBKERN); + if (!mem) { + return 0; + } #if OSALLOCDEBUG - OSAddAtomic(size, &debug_iomalloc_size); + OSAddAtomic(size, &debug_iomalloc_size); #endif - bzero(mem, size); + bzero(mem, size); - return mem; + return mem; } /********************************************************************* @@ -117,56 +117,56 @@ kern_os_malloc(size_t size) void kern_os_free(void * addr) { - size_t size; - size = kalloc_size(addr); + size_t size; + size = kalloc_size(addr); #if OSALLOCDEBUG OSAddAtomic(-size, &debug_iomalloc_size); #endif - kfree_addr(addr); + kfree_addr(addr); } /********************************************************************* *********************************************************************/ void * kern_os_realloc( - void * addr, - size_t nsize) + void * addr, + size_t nsize) { - void *nmem; - size_t osize; + void *nmem; + size_t osize; - if (!addr) { - return (kern_os_malloc(nsize)); - } + if (!addr) { + return kern_os_malloc(nsize); + } - osize = kalloc_size(addr); - if (nsize == osize) { - return (addr); - } + osize = kalloc_size(addr); + if (nsize == osize) { + return addr; + } - if (nsize == 0) { - kfree_addr(addr); - return (0); - } + if (nsize == 0) { + kfree_addr(addr); + return 0; + } - nmem = kallocp_tag_bt((vm_size_t *)&nsize, VM_KERN_MEMORY_LIBKERN); - if (!nmem){ - kfree_addr(addr); - return (0); - } + nmem = kallocp_tag_bt((vm_size_t *)&nsize, VM_KERN_MEMORY_LIBKERN); + if (!nmem) { + kfree_addr(addr); + return 0; + } #if OSALLOCDEBUG - OSAddAtomic((nsize - osize), &debug_iomalloc_size); + OSAddAtomic((nsize - osize), &debug_iomalloc_size); #endif - if (nsize > osize) { - (void)memset((char *)nmem + osize, 0, nsize - osize); - } - (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); - kfree_addr(addr); + if (nsize > osize) { + (void)memset((char *)nmem + osize, 0, nsize - osize); + } + (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize); + kfree_addr(addr); - return (nmem); + return nmem; } #if PRAGMA_MARK @@ -177,45 +177,54 @@ kern_os_realloc( *********************************************************************/ #if __GNUC__ >= 3 -void __cxa_pure_virtual( void ) { panic("%s", __FUNCTION__); } +void +__cxa_pure_virtual( void ) +{ + panic("%s", __FUNCTION__); +} #else -void __pure_virtual( void ) { panic("%s", __FUNCTION__); } +void +__pure_virtual( void ) +{ + panic("%s", __FUNCTION__); +} #endif extern lck_grp_t * IOLockGroup; extern kmod_info_t g_kernel_kmod_info; enum { - kOSSectionNamesDefault = 0, - kOSSectionNamesBuiltinKext = 1, - kOSSectionNamesCount = 2, + kOSSectionNamesDefault = 0, + kOSSectionNamesBuiltinKext = 1, + kOSSectionNamesCount = 2, }; enum { - kOSSectionNameInitializer = 0, - kOSSectionNameFinalizer = 1, - kOSSectionNameCount = 2 + kOSSectionNameInitializer = 0, + kOSSectionNameFinalizer = 1, + kOSSectionNameCount = 2 }; static const char * -gOSStructorSectionNames[kOSSectionNamesCount][kOSSectionNameCount] = { - { SECT_MODINITFUNC, SECT_MODTERMFUNC }, - { kBuiltinInitSection, kBuiltinTermSection } + gOSStructorSectionNames[kOSSectionNamesCount][kOSSectionNameCount] = { + { SECT_MODINITFUNC, SECT_MODTERMFUNC }, + { kBuiltinInitSection, kBuiltinTermSection } }; -void OSlibkernInit(void) +void +OSlibkernInit(void) { - // This must be called before calling OSRuntimeInitializeCPP. - OSMetaClassBase::initialize(); + // This must be called before calling OSRuntimeInitializeCPP. + OSMetaClassBase::initialize(); - g_kernel_kmod_info.address = (vm_address_t) &_mh_execute_header; - if (kOSReturnSuccess != OSRuntimeInitializeCPP(NULL)) { - // &g_kernel_kmod_info, gOSSectionNamesStandard, 0, 0)) { - panic("OSRuntime: C++ runtime failed to initialize."); - } + g_kernel_kmod_info.address = (vm_address_t) &_mh_execute_header; + if (kOSReturnSuccess != OSRuntimeInitializeCPP(NULL)) { + // &g_kernel_kmod_info, gOSSectionNamesStandard, 0, 0)) { + panic("OSRuntime: C++ runtime failed to initialize."); + } - gKernelCPPInitialized = true; + gKernelCPPInitialized = true; - return; + return; } __END_DECLS @@ -232,297 +241,294 @@ typedef void (*structor_t)(void); static bool OSRuntimeCallStructorsInSection( - OSKext * theKext, - kmod_info_t * kmodInfo, - void * metaHandle, - kernel_segment_command_t * segment, - const char * sectionName, - uintptr_t textStart, - uintptr_t textEnd) + OSKext * theKext, + kmod_info_t * kmodInfo, + void * metaHandle, + kernel_segment_command_t * segment, + const char * sectionName, + uintptr_t textStart, + uintptr_t textEnd) { - kernel_section_t * section; - bool result = TRUE; - - for (section = firstsect(segment); - section != NULL; - section = nextsect(segment, section)) - { - if (strncmp(section->sectname, sectionName, sizeof(section->sectname) - 1)) continue; - - structor_t * structors = (structor_t *)section->addr; - if (!structors) continue; - - structor_t structor; - unsigned int num_structors = section->size / sizeof(structor_t); - unsigned int hit_null_structor = 0; - unsigned int firstIndex = 0; - - if (textStart) - { - // bsearch for any in range - unsigned int baseIdx; - unsigned int lim; - uintptr_t value; - firstIndex = num_structors; - for (lim = num_structors, baseIdx = 0; lim; lim >>= 1) - { - value = (uintptr_t) structors[baseIdx + (lim >> 1)]; - if (!value) panic("%s: null structor", kmodInfo->name); - if ((value >= textStart) && (value < textEnd)) - { - firstIndex = (baseIdx + (lim >> 1)); - // scan back for the first in range - for (; firstIndex; firstIndex--) - { - value = (uintptr_t) structors[firstIndex - 1]; - if ((value < textStart) || (value >= textEnd)) break; - } - break; - } - if (textStart > value) - { - // move right - baseIdx += (lim >> 1) + 1; - lim--; - } - // else move left - } - baseIdx = (baseIdx + (lim >> 1)); - } - for (; - (firstIndex < num_structors) - && (!metaHandle || OSMetaClass::checkModLoad(metaHandle)); - firstIndex++) - { - if ((structor = structors[firstIndex])) - { - if ((textStart && ((uintptr_t) structor < textStart)) - || (textEnd && ((uintptr_t) structor >= textEnd))) break; - - (*structor)(); - } - else if (!hit_null_structor) - { - hit_null_structor = 1; - OSRuntimeLog(theKext, kOSRuntimeLogSpec, - "Null structor in kext %s segment %s!", - kmodInfo->name, section->segname); - } - } - if (metaHandle) result = OSMetaClass::checkModLoad(metaHandle); - break; - } /* for (section...) */ - return (result); + kernel_section_t * section; + bool result = TRUE; + + for (section = firstsect(segment); + section != NULL; + section = nextsect(segment, section)) { + if (strncmp(section->sectname, sectionName, sizeof(section->sectname) - 1)) { + continue; + } + + structor_t * structors = (structor_t *)section->addr; + if (!structors) { + continue; + } + + structor_t structor; + unsigned int num_structors = section->size / sizeof(structor_t); + unsigned int hit_null_structor = 0; + unsigned int firstIndex = 0; + + if (textStart) { + // bsearch for any in range + unsigned int baseIdx; + unsigned int lim; + uintptr_t value; + firstIndex = num_structors; + for (lim = num_structors, baseIdx = 0; lim; lim >>= 1) { + value = (uintptr_t) structors[baseIdx + (lim >> 1)]; + if (!value) { + panic("%s: null structor", kmodInfo->name); + } + if ((value >= textStart) && (value < textEnd)) { + firstIndex = (baseIdx + (lim >> 1)); + // scan back for the first in range + for (; firstIndex; firstIndex--) { + value = (uintptr_t) structors[firstIndex - 1]; + if ((value < textStart) || (value >= textEnd)) { + break; + } + } + break; + } + if (textStart > value) { + // move right + baseIdx += (lim >> 1) + 1; + lim--; + } + // else move left + } + baseIdx = (baseIdx + (lim >> 1)); + } + for (; + (firstIndex < num_structors) + && (!metaHandle || OSMetaClass::checkModLoad(metaHandle)); + firstIndex++) { + if ((structor = structors[firstIndex])) { + if ((textStart && ((uintptr_t) structor < textStart)) + || (textEnd && ((uintptr_t) structor >= textEnd))) { + break; + } + + (*structor)(); + } else if (!hit_null_structor) { + hit_null_structor = 1; + OSRuntimeLog(theKext, kOSRuntimeLogSpec, + "Null structor in kext %s segment %s!", + kmodInfo->name, section->segname); + } + } + if (metaHandle) { + result = OSMetaClass::checkModLoad(metaHandle); + } + break; + } /* for (section...) */ + return result; } /********************************************************************* *********************************************************************/ kern_return_t OSRuntimeFinalizeCPP( - OSKext * theKext) + OSKext * theKext) { - kern_return_t result = KMOD_RETURN_FAILURE; - void * metaHandle = NULL; // do not free - kernel_mach_header_t * header; - kernel_segment_command_t * segment; - kmod_info_t * kmodInfo; - const char ** sectionNames; - uintptr_t textStart; - uintptr_t textEnd; - - textStart = 0; - textEnd = 0; - sectionNames = gOSStructorSectionNames[kOSSectionNamesDefault]; - if (theKext) { - if (!theKext->isCPPInitialized()) { - result = KMOD_RETURN_SUCCESS; - goto finish; - } - kmodInfo = theKext->kmod_info; - if (!kmodInfo || !kmodInfo->address) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - header = (kernel_mach_header_t *)kmodInfo->address; - if (theKext->flags.builtin) { - header = (kernel_mach_header_t *)g_kernel_kmod_info.address; - textStart = kmodInfo->address; - textEnd = textStart + kmodInfo->size; - sectionNames = gOSStructorSectionNames[kOSSectionNamesBuiltinKext]; - } - } else { - kmodInfo = &g_kernel_kmod_info; - header = (kernel_mach_header_t *)kmodInfo->address; - } - - /* OSKext checks for this condition now, but somebody might call - * this function directly (the symbol is exported....). - */ - if (OSMetaClass::modHasInstance(kmodInfo->name)) { - // xxx - Don't log under errors? this is more of an info thing - OSRuntimeLog(theKext, kOSRuntimeLogSpec, - "Can't tear down kext %s C++; classes have instances:", - kmodInfo->name); - OSKext::reportOSMetaClassInstances(kmodInfo->name, kOSRuntimeLogSpec); - result = kOSMetaClassHasInstances; - goto finish; - } - - /* Tell the meta class system that we are starting to unload. - * metaHandle isn't actually needed on the finalize path, - * so we don't check it here, even though OSMetaClass::postModLoad() will - * return a failure (it only does actual work on the init path anyhow). - */ - metaHandle = OSMetaClass::preModLoad(kmodInfo->name); - - OSSymbol::checkForPageUnload((void *)kmodInfo->address, - (void *)(kmodInfo->address + kmodInfo->size)); - - header = (kernel_mach_header_t *)kmodInfo->address; - segment = firstsegfromheader(header); - - for (segment = firstsegfromheader(header); - segment != 0; - segment = nextsegfromheader(header, segment)) { - - OSRuntimeCallStructorsInSection(theKext, kmodInfo, NULL, segment, - sectionNames[kOSSectionNameFinalizer], textStart, textEnd); - } - - (void)OSMetaClass::postModLoad(metaHandle); - - if (theKext) { - theKext->setCPPInitialized(false); - } - result = KMOD_RETURN_SUCCESS; + kern_return_t result = KMOD_RETURN_FAILURE; + void * metaHandle = NULL;// do not free + kernel_mach_header_t * header; + kernel_segment_command_t * segment; + kmod_info_t * kmodInfo; + const char ** sectionNames; + uintptr_t textStart; + uintptr_t textEnd; + + textStart = 0; + textEnd = 0; + sectionNames = gOSStructorSectionNames[kOSSectionNamesDefault]; + if (theKext) { + if (!theKext->isCPPInitialized()) { + result = KMOD_RETURN_SUCCESS; + goto finish; + } + kmodInfo = theKext->kmod_info; + if (!kmodInfo || !kmodInfo->address) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + header = (kernel_mach_header_t *)kmodInfo->address; + if (theKext->flags.builtin) { + header = (kernel_mach_header_t *)g_kernel_kmod_info.address; + textStart = kmodInfo->address; + textEnd = textStart + kmodInfo->size; + sectionNames = gOSStructorSectionNames[kOSSectionNamesBuiltinKext]; + } + } else { + kmodInfo = &g_kernel_kmod_info; + header = (kernel_mach_header_t *)kmodInfo->address; + } + + /* OSKext checks for this condition now, but somebody might call + * this function directly (the symbol is exported....). + */ + if (OSMetaClass::modHasInstance(kmodInfo->name)) { + // xxx - Don't log under errors? this is more of an info thing + OSRuntimeLog(theKext, kOSRuntimeLogSpec, + "Can't tear down kext %s C++; classes have instances:", + kmodInfo->name); + OSKext::reportOSMetaClassInstances(kmodInfo->name, kOSRuntimeLogSpec); + result = kOSMetaClassHasInstances; + goto finish; + } + + /* Tell the meta class system that we are starting to unload. + * metaHandle isn't actually needed on the finalize path, + * so we don't check it here, even though OSMetaClass::postModLoad() will + * return a failure (it only does actual work on the init path anyhow). + */ + metaHandle = OSMetaClass::preModLoad(kmodInfo->name); + + OSSymbol::checkForPageUnload((void *)kmodInfo->address, + (void *)(kmodInfo->address + kmodInfo->size)); + + header = (kernel_mach_header_t *)kmodInfo->address; + segment = firstsegfromheader(header); + + for (segment = firstsegfromheader(header); + segment != 0; + segment = nextsegfromheader(header, segment)) { + OSRuntimeCallStructorsInSection(theKext, kmodInfo, NULL, segment, + sectionNames[kOSSectionNameFinalizer], textStart, textEnd); + } + + (void)OSMetaClass::postModLoad(metaHandle); + + if (theKext) { + theKext->setCPPInitialized(false); + } + result = KMOD_RETURN_SUCCESS; finish: - return result; + return result; } /********************************************************************* *********************************************************************/ kern_return_t OSRuntimeInitializeCPP( - OSKext * theKext) + OSKext * theKext) { - kern_return_t result = KMOD_RETURN_FAILURE; - kernel_mach_header_t * header = NULL; - void * metaHandle = NULL; // do not free - bool load_success = true; - kernel_segment_command_t * segment = NULL; // do not free - kernel_segment_command_t * failure_segment = NULL; // do not free - kmod_info_t * kmodInfo; - const char ** sectionNames; - uintptr_t textStart; - uintptr_t textEnd; - - textStart = 0; - textEnd = 0; - sectionNames = gOSStructorSectionNames[kOSSectionNamesDefault]; - if (theKext) { - if (theKext->isCPPInitialized()) { - result = KMOD_RETURN_SUCCESS; - goto finish; - } - - kmodInfo = theKext->kmod_info; - if (!kmodInfo || !kmodInfo->address) { - result = kOSKextReturnInvalidArgument; - goto finish; - } - header = (kernel_mach_header_t *)kmodInfo->address; - - if (theKext->flags.builtin) { - header = (kernel_mach_header_t *)g_kernel_kmod_info.address; - textStart = kmodInfo->address; - textEnd = textStart + kmodInfo->size; - sectionNames = gOSStructorSectionNames[kOSSectionNamesBuiltinKext]; - } - } else { - kmodInfo = &g_kernel_kmod_info; - header = (kernel_mach_header_t *)kmodInfo->address; - } - - /* Tell the meta class system that we are starting the load - */ - metaHandle = OSMetaClass::preModLoad(kmodInfo->name); - assert(metaHandle); - if (!metaHandle) { - goto finish; - } - - /* NO GOTO PAST HERE. */ - - /* Scan the header for all constructor sections, in any - * segment, and invoke the constructors within those sections. - */ - for (segment = firstsegfromheader(header); - segment != NULL && load_success; - segment = nextsegfromheader(header, segment)) - { - /* Record the current segment in the event of a failure. - */ - failure_segment = segment; - load_success = OSRuntimeCallStructorsInSection( - theKext, kmodInfo, metaHandle, segment, - sectionNames[kOSSectionNameInitializer], - textStart, textEnd); - } /* for (segment...) */ - - /* We failed so call all of the destructors. We must do this before - * calling OSMetaClass::postModLoad() as the OSMetaClass destructors - * will alter state (in the metaHandle) used by that function. - */ - if (!load_success) { - - /* Scan the header for all destructor sections, in any - * segment, and invoke the constructors within those sections. - */ - for (segment = firstsegfromheader(header); - segment != failure_segment && segment != 0; - segment = nextsegfromheader(header, segment)) { - - OSRuntimeCallStructorsInSection(theKext, kmodInfo, NULL, segment, - sectionNames[kOSSectionNameFinalizer], textStart, textEnd); - - } /* for (segment...) */ - } - - /* Now, regardless of success so far, do the post-init registration - * and cleanup. If we had to call the unloadCPP function, static - * destructors have removed classes from the stalled list so no - * metaclasses will actually be registered. - */ - result = OSMetaClass::postModLoad(metaHandle); - - /* If we've otherwise been fine up to now, but OSMetaClass::postModLoad() - * fails (typically due to a duplicate class), tear down all the C++ - * stuff from the kext. This isn't necessary for libkern/OSMetaClass stuff, - * but may be necessary for other C++ code. We ignore the return value - * because it's only a fail when there are existing instances of libkern - * classes, and there had better not be any created on the C++ init path. - */ - if (load_success && result != KMOD_RETURN_SUCCESS) { - (void)OSRuntimeFinalizeCPP(theKext); //kmodInfo, sectionNames, textStart, textEnd); - } - - if (theKext && load_success && result == KMOD_RETURN_SUCCESS) { - theKext->setCPPInitialized(true); - } + kern_return_t result = KMOD_RETURN_FAILURE; + kernel_mach_header_t * header = NULL; + void * metaHandle = NULL;// do not free + bool load_success = true; + kernel_segment_command_t * segment = NULL;// do not free + kernel_segment_command_t * failure_segment = NULL; // do not free + kmod_info_t * kmodInfo; + const char ** sectionNames; + uintptr_t textStart; + uintptr_t textEnd; + + textStart = 0; + textEnd = 0; + sectionNames = gOSStructorSectionNames[kOSSectionNamesDefault]; + if (theKext) { + if (theKext->isCPPInitialized()) { + result = KMOD_RETURN_SUCCESS; + goto finish; + } + + kmodInfo = theKext->kmod_info; + if (!kmodInfo || !kmodInfo->address) { + result = kOSKextReturnInvalidArgument; + goto finish; + } + header = (kernel_mach_header_t *)kmodInfo->address; + + if (theKext->flags.builtin) { + header = (kernel_mach_header_t *)g_kernel_kmod_info.address; + textStart = kmodInfo->address; + textEnd = textStart + kmodInfo->size; + sectionNames = gOSStructorSectionNames[kOSSectionNamesBuiltinKext]; + } + } else { + kmodInfo = &g_kernel_kmod_info; + header = (kernel_mach_header_t *)kmodInfo->address; + } + + /* Tell the meta class system that we are starting the load + */ + metaHandle = OSMetaClass::preModLoad(kmodInfo->name); + assert(metaHandle); + if (!metaHandle) { + goto finish; + } + + /* NO GOTO PAST HERE. */ + + /* Scan the header for all constructor sections, in any + * segment, and invoke the constructors within those sections. + */ + for (segment = firstsegfromheader(header); + segment != NULL && load_success; + segment = nextsegfromheader(header, segment)) { + /* Record the current segment in the event of a failure. + */ + failure_segment = segment; + load_success = OSRuntimeCallStructorsInSection( + theKext, kmodInfo, metaHandle, segment, + sectionNames[kOSSectionNameInitializer], + textStart, textEnd); + } /* for (segment...) */ + + /* We failed so call all of the destructors. We must do this before + * calling OSMetaClass::postModLoad() as the OSMetaClass destructors + * will alter state (in the metaHandle) used by that function. + */ + if (!load_success) { + /* Scan the header for all destructor sections, in any + * segment, and invoke the constructors within those sections. + */ + for (segment = firstsegfromheader(header); + segment != failure_segment && segment != 0; + segment = nextsegfromheader(header, segment)) { + OSRuntimeCallStructorsInSection(theKext, kmodInfo, NULL, segment, + sectionNames[kOSSectionNameFinalizer], textStart, textEnd); + } /* for (segment...) */ + } + + /* Now, regardless of success so far, do the post-init registration + * and cleanup. If we had to call the unloadCPP function, static + * destructors have removed classes from the stalled list so no + * metaclasses will actually be registered. + */ + result = OSMetaClass::postModLoad(metaHandle); + + /* If we've otherwise been fine up to now, but OSMetaClass::postModLoad() + * fails (typically due to a duplicate class), tear down all the C++ + * stuff from the kext. This isn't necessary for libkern/OSMetaClass stuff, + * but may be necessary for other C++ code. We ignore the return value + * because it's only a fail when there are existing instances of libkern + * classes, and there had better not be any created on the C++ init path. + */ + if (load_success && result != KMOD_RETURN_SUCCESS) { + (void)OSRuntimeFinalizeCPP(theKext); //kmodInfo, sectionNames, textStart, textEnd); + } + + if (theKext && load_success && result == KMOD_RETURN_SUCCESS) { + theKext->setCPPInitialized(true); + } finish: - return result; + return result; } /********************************************************************* -Unload a kernel segment. +* Unload a kernel segment. *********************************************************************/ void OSRuntimeUnloadCPPForSegment( - kernel_segment_command_t * segment) + kernel_segment_command_t * segment) { - OSRuntimeCallStructorsInSection(NULL, &g_kernel_kmod_info, NULL, segment, - gOSStructorSectionNames[kOSSectionNamesDefault][kOSSectionNameFinalizer], 0, 0); + OSRuntimeCallStructorsInSection(NULL, &g_kernel_kmod_info, NULL, segment, + gOSStructorSectionNames[kOSSectionNamesDefault][kOSSectionNameFinalizer], 0, 0); } #if PRAGMA_MARK @@ -534,39 +540,41 @@ OSRuntimeUnloadCPPForSegment( void * operator new(size_t size) { - void * result; + void * result; - result = (void *) kern_os_malloc(size); - return result; + result = (void *) kern_os_malloc(size); + return result; } void operator delete(void * addr) #if __cplusplus >= 201103L - noexcept +noexcept #endif { - kern_os_free(addr); - return; + kern_os_free(addr); + return; } void * operator new[](unsigned long sz) { - if (sz == 0) sz = 1; - return kern_os_malloc(sz); + if (sz == 0) { + sz = 1; + } + return kern_os_malloc(sz); } void operator delete[](void * ptr) #if __cplusplus >= 201103L - noexcept +noexcept #endif { - if (ptr) { - kern_os_free(ptr); - } - return; + if (ptr) { + kern_os_free(ptr); + } + return; } /* PR-6481964 - The compiler is going to check for size overflows in calls to @@ -577,11 +585,9 @@ operator delete[](void * ptr) * compiler expects the name to be mangled. */ namespace std { - void __throw_length_error(const char *msg __unused) { - panic("Size of array created by new[] has overflowed"); + panic("Size of array created by new[] has overflowed"); } - }; diff --git a/libkern/c++/OSRuntimeSupport.c b/libkern/c++/OSRuntimeSupport.c index 2bb1d5f98..26f769e1a 100644 --- a/libkern/c++/OSRuntimeSupport.c +++ b/libkern/c++/OSRuntimeSupport.c @@ -5,7 +5,12 @@ void _ZN11OSMetaClassdlEPvm(void *mem, unsigned long size); void *_ZN11OSMetaClassnwEm(unsigned long size); -void _ZN11OSMetaClassdlEPvm(__attribute__((unused)) void *mem, __attribute__((__unused__)) unsigned long size) { } -void *_ZN11OSMetaClassnwEm(__attribute__((unused)) unsigned long size) { return (void *)0ULL; } - - +void +_ZN11OSMetaClassdlEPvm(__attribute__((unused)) void *mem, __attribute__((__unused__)) unsigned long size) +{ +} +void * +_ZN11OSMetaClassnwEm(__attribute__((unused)) unsigned long size) +{ + return (void *)0ULL; +} diff --git a/libkern/c++/OSSerialize.cpp b/libkern/c++/OSSerialize.cpp index a82a37891..a0366f02d 100644 --- a/libkern/c++/OSSerialize.cpp +++ b/libkern/c++/OSSerialize.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSSerialize.cpp created by rsulack on Wen 25-Nov-1998 */ @@ -53,33 +53,35 @@ OSMetaClassDefineReservedUnused(OSSerialize, 6); OSMetaClassDefineReservedUnused(OSSerialize, 7); -char * OSSerialize::text() const +char * +OSSerialize::text() const { return data; } -void OSSerialize::clearText() +void +OSSerialize::clearText() { - if (binary) - { + if (binary) { length = sizeof(kOSSerializeBinarySignature); bzero(&data[length], capacity - length); endCollection = true; - } - else - { + } else { bzero((void *)data, capacity); length = 1; - } + } tags->flushCollection(); } -bool OSSerialize::previouslySerialized(const OSMetaClassBase *o) +bool +OSSerialize::previouslySerialized(const OSMetaClassBase *o) { char temp[16]; unsigned int tagIdx; - if (binary) return (binarySerialize(o)); + if (binary) { + return binarySerialize(o); + } // look it up tagIdx = tags->getNextIndexOfObject(o, 0); @@ -95,113 +97,139 @@ bool OSSerialize::previouslySerialized(const OSMetaClassBase *o) } // add to tag array - tags->setObject(o);// XXX check return + tags->setObject(o);// XXX check return return false; } -bool OSSerialize::addXMLStartTag(const OSMetaClassBase *o, const char *tagString) +bool +OSSerialize::addXMLStartTag(const OSMetaClassBase *o, const char *tagString) { char temp[16]; unsigned int tagIdx; - if (binary) - { + if (binary) { printf("class %s: xml serialize\n", o->getMetaClass()->getClassName()); - return (false); + return false; } - if (!addChar('<')) return false; - if (!addString(tagString)) return false; - if (!addString(" ID=\"")) return false; + if (!addChar('<')) { + return false; + } + if (!addString(tagString)) { + return false; + } + if (!addString(" ID=\"")) { + return false; + } tagIdx = tags->getNextIndexOfObject(o, 0); assert(tagIdx != -1U); snprintf(temp, sizeof(temp), "%u", tagIdx); - if (!addString(temp)) + if (!addString(temp)) { + return false; + } + if (!addChar('\"')) { + return false; + } + if (!addChar('>')) { return false; - if (!addChar('\"')) return false; - if (!addChar('>')) return false; + } return true; } -bool OSSerialize::addXMLEndTag(const char *tagString) +bool +OSSerialize::addXMLEndTag(const char *tagString) { - - if (!addChar('<')) return false; - if (!addChar('/')) return false; - if (!addString(tagString)) return false; - if (!addChar('>')) return false; + if (!addChar('<')) { + return false; + } + if (!addChar('/')) { + return false; + } + if (!addString(tagString)) { + return false; + } + if (!addChar('>')) { + return false; + } return true; } -bool OSSerialize::addChar(const char c) +bool +OSSerialize::addChar(const char c) { - if (binary) - { + if (binary) { printf("xml serialize\n"); - return (false); + return false; } // add char, possibly extending our capacity - if (length >= capacity && length >=ensureCapacity(capacity+capacityIncrement)) + if (length >= capacity && length >= ensureCapacity(capacity + capacityIncrement)) { return false; + } data[length - 1] = c; length++; - + return true; } -bool OSSerialize::addString(const char *s) +bool +OSSerialize::addString(const char *s) { bool rc = false; - while (*s && (rc = addChar(*s++))) ; + while (*s && (rc = addChar(*s++))) { + ; + } return rc; } -bool OSSerialize::initWithCapacity(unsigned int inCapacity) +bool +OSSerialize::initWithCapacity(unsigned int inCapacity) { - if (!super::init()) - return false; - - tags = OSArray::withCapacity(256); - if (!tags) { - return false; - } - - length = 1; - - if (!inCapacity) { - inCapacity = 1; - } - if (round_page_overflow(inCapacity, &capacity)) { - tags->release(); - tags = 0; - return false; - } - - capacityIncrement = capacity; - - // allocate from the kernel map so that we can safely map this data - // into user space (the primary use of the OSSerialize object) - - kern_return_t rc = kmem_alloc(kernel_map, (vm_offset_t *)&data, capacity, IOMemoryTag(kernel_map)); - if (rc) { - tags->release(); - tags = 0; - return false; - } - bzero((void *)data, capacity); - - - OSCONTAINER_ACCUMSIZE(capacity); - - return true; + if (!super::init()) { + return false; + } + + tags = OSArray::withCapacity(256); + if (!tags) { + return false; + } + + length = 1; + + if (!inCapacity) { + inCapacity = 1; + } + if (round_page_overflow(inCapacity, &capacity)) { + tags->release(); + tags = 0; + return false; + } + + capacityIncrement = capacity; + + // allocate from the kernel map so that we can safely map this data + // into user space (the primary use of the OSSerialize object) + + kern_return_t rc = kmem_alloc(kernel_map, (vm_offset_t *)&data, capacity, IOMemoryTag(kernel_map)); + if (rc) { + tags->release(); + tags = 0; + return false; + } + bzero((void *)data, capacity); + + + OSCONTAINER_ACCUMSIZE(capacity); + + return true; } -OSSerialize *OSSerialize::withCapacity(unsigned int inCapacity) +OSSerialize * +OSSerialize::withCapacity(unsigned int inCapacity) { OSSerialize *me = new OSSerialize; @@ -213,114 +241,141 @@ OSSerialize *OSSerialize::withCapacity(unsigned int inCapacity) return me; } -unsigned int OSSerialize::getLength() const { return length; } -unsigned int OSSerialize::getCapacity() const { return capacity; } -unsigned int OSSerialize::getCapacityIncrement() const { return capacityIncrement; } -unsigned int OSSerialize::setCapacityIncrement(unsigned int increment) +unsigned int +OSSerialize::getLength() const +{ + return length; +} +unsigned int +OSSerialize::getCapacity() const +{ + return capacity; +} +unsigned int +OSSerialize::getCapacityIncrement() const { - capacityIncrement = (increment)? increment : 256; - return capacityIncrement; + return capacityIncrement; +} +unsigned int +OSSerialize::setCapacityIncrement(unsigned int increment) +{ + capacityIncrement = (increment)? increment : 256; + return capacityIncrement; } -unsigned int OSSerialize::ensureCapacity(unsigned int newCapacity) +unsigned int +OSSerialize::ensureCapacity(unsigned int newCapacity) { char *newData; - if (newCapacity <= capacity) + if (newCapacity <= capacity) { return capacity; + } if (round_page_overflow(newCapacity, &newCapacity)) { return capacity; } kern_return_t rc = kmem_realloc(kernel_map, - (vm_offset_t)data, - capacity, - (vm_offset_t *)&newData, - newCapacity, - VM_KERN_MEMORY_IOKIT); + (vm_offset_t)data, + capacity, + (vm_offset_t *)&newData, + newCapacity, + VM_KERN_MEMORY_IOKIT); if (!rc) { - OSCONTAINER_ACCUMSIZE(newCapacity); + OSCONTAINER_ACCUMSIZE(newCapacity); - // kmem realloc does not free the old address range - kmem_free(kernel_map, (vm_offset_t)data, capacity); - OSCONTAINER_ACCUMSIZE(-((size_t)capacity)); - - // kmem realloc does not zero out the new memory - // and this could end up going to user land - bzero(&newData[capacity], newCapacity - capacity); - - data = newData; - capacity = newCapacity; + // kmem realloc does not free the old address range + kmem_free(kernel_map, (vm_offset_t)data, capacity); + OSCONTAINER_ACCUMSIZE(-((size_t)capacity)); + + // kmem realloc does not zero out the new memory + // and this could end up going to user land + bzero(&newData[capacity], newCapacity - capacity); + + data = newData; + capacity = newCapacity; } return capacity; } -void OSSerialize::free() +void +OSSerialize::free() { - if (tags) - tags->release(); - - if (data) { - kmem_free(kernel_map, (vm_offset_t)data, capacity); - OSCONTAINER_ACCUMSIZE( -((size_t)capacity) ); - } - super::free(); + if (tags) { + tags->release(); + } + + if (data) { + kmem_free(kernel_map, (vm_offset_t)data, capacity); + OSCONTAINER_ACCUMSIZE( -((size_t)capacity)); + } + super::free(); } OSDefineMetaClassAndStructors(OSSerializer, OSObject) OSSerializer * OSSerializer::forTarget( void * target, - OSSerializerCallback callback, void * ref ) + OSSerializerCallback callback, void * ref ) { - OSSerializer * thing; - - thing = new OSSerializer; - if( thing && !thing->init()) { - thing->release(); - thing = 0; - } - - if( thing) { - thing->target = target; - thing->ref = ref; - thing->callback = callback; - } - return( thing ); + OSSerializer * thing; + + thing = new OSSerializer; + if (thing && !thing->init()) { + thing->release(); + thing = 0; + } + + if (thing) { + thing->target = target; + thing->ref = ref; + thing->callback = callback; + } + return thing; } -bool OSSerializer::callbackToBlock(void * target __unused, void * ref, - OSSerialize * serializer) +bool +OSSerializer::callbackToBlock(void * target __unused, void * ref, + OSSerialize * serializer) { - return ((OSSerializerBlock)ref)(serializer); + return ((OSSerializerBlock)ref)(serializer); } -OSSerializer * OSSerializer::withBlock( - OSSerializerBlock callback) +OSSerializer * +OSSerializer::withBlock( + OSSerializerBlock callback) { - OSSerializer * serializer; - OSSerializerBlock block; + OSSerializer * serializer; + OSSerializerBlock block; - block = Block_copy(callback); - if (!block) return (0); + block = Block_copy(callback); + if (!block) { + return 0; + } - serializer = (OSSerializer::forTarget(NULL, &OSSerializer::callbackToBlock, block)); + serializer = (OSSerializer::forTarget(NULL, &OSSerializer::callbackToBlock, block)); - if (!serializer) Block_release(block); + if (!serializer) { + Block_release(block); + } - return (serializer); + return serializer; } -void OSSerializer::free(void) +void +OSSerializer::free(void) { - if (callback == &callbackToBlock) Block_release(ref); + if (callback == &callbackToBlock) { + Block_release(ref); + } - super::free(); + super::free(); } -bool OSSerializer::serialize( OSSerialize * s ) const +bool +OSSerializer::serialize( OSSerialize * s ) const { - return( (*callback)(target, ref, s) ); + return (*callback)(target, ref, s); } diff --git a/libkern/c++/OSSerializeBinary.cpp b/libkern/c++/OSSerializeBinary.cpp index 3de4336af..b408296c4 100644 --- a/libkern/c++/OSSerializeBinary.cpp +++ b/libkern/c++/OSSerializeBinary.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,122 +37,139 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #if 0 -#define DEBG(fmt, args...) { kprintf(fmt, args); } +#define DEBG(fmt, args ...) { kprintf(fmt, args); } #else -#define DEBG(fmt, args...) {} +#define DEBG(fmt, args ...) {} #endif /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -OSSerialize *OSSerialize::binaryWithCapacity(unsigned int inCapacity, - Editor editor, void * reference) +OSSerialize * +OSSerialize::binaryWithCapacity(unsigned int inCapacity, + Editor editor, void * reference) { OSSerialize *me; - if (inCapacity < sizeof(uint32_t)) return (0); + if (inCapacity < sizeof(uint32_t)) { + return 0; + } me = OSSerialize::withCapacity(inCapacity); - if (!me) return (0); + if (!me) { + return 0; + } - me->binary = true; - me->endCollection = true; - me->editor = editor; - me->editRef = reference; + me->binary = true; + me->endCollection = true; + me->editor = editor; + me->editRef = reference; bcopy(kOSSerializeBinarySignature, &me->data[0], sizeof(kOSSerializeBinarySignature)); me->length = sizeof(kOSSerializeBinarySignature); - return (me); + return me; } -bool OSSerialize::addBinary(const void * bits, size_t size) +bool +OSSerialize::addBinary(const void * bits, size_t size) { - unsigned int newCapacity; - size_t alignSize; + unsigned int newCapacity; + size_t alignSize; - if (os_add_overflow(size, 3, &alignSize)) return (false); + if (os_add_overflow(size, 3, &alignSize)) { + return false; + } alignSize &= ~3L; - if (os_add_overflow(length, alignSize, &newCapacity)) return (false); - if (newCapacity >= capacity) - { - newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; - if (newCapacity < capacity) return (false); - if (newCapacity > ensureCapacity(newCapacity)) return (false); - } + if (os_add_overflow(length, alignSize, &newCapacity)) { + return false; + } + if (newCapacity >= capacity) { + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; + if (newCapacity < capacity) { + return false; + } + if (newCapacity > ensureCapacity(newCapacity)) { + return false; + } + } bcopy(bits, &data[length], size); length += alignSize; - - return (true); + + return true; } -bool OSSerialize::addBinaryObject(const OSMetaClassBase * o, uint32_t key, - const void * bits, size_t size) +bool +OSSerialize::addBinaryObject(const OSMetaClassBase * o, uint32_t key, + const void * bits, size_t size) { - unsigned int newCapacity; - size_t alignSize; + unsigned int newCapacity; + size_t alignSize; - // add to tag array + // add to tag array tags->setObject(o); - if (os_add3_overflow(size, sizeof(key), 3, &alignSize)) return (false); + if (os_add3_overflow(size, sizeof(key), 3, &alignSize)) { + return false; + } alignSize &= ~3L; - if (os_add_overflow(length, alignSize, &newCapacity)) return (false); - if (newCapacity >= capacity) - { - newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; - if (newCapacity < capacity) return (false); - if (newCapacity > ensureCapacity(newCapacity)) return (false); - } - - if (endCollection) - { - endCollection = false; - key |= kOSSerializeEndCollecton; - } + if (os_add_overflow(length, alignSize, &newCapacity)) { + return false; + } + if (newCapacity >= capacity) { + newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; + if (newCapacity < capacity) { + return false; + } + if (newCapacity > ensureCapacity(newCapacity)) { + return false; + } + } + + if (endCollection) { + endCollection = false; + key |= kOSSerializeEndCollecton; + } bcopy(&key, &data[length], sizeof(key)); bcopy(bits, &data[length + sizeof(key)], size); length += alignSize; - - return (true); + + return true; } -bool OSSerialize::binarySerialize(const OSMetaClassBase *o) +bool +OSSerialize::binarySerialize(const OSMetaClassBase *o) { - OSDictionary * dict; - OSArray * array; - OSSet * set; - OSNumber * num; - OSSymbol * sym; - OSString * str; - OSData * ldata; - OSBoolean * boo; + OSDictionary * dict; + OSArray * array; + OSSet * set; + OSNumber * num; + OSSymbol * sym; + OSString * str; + OSData * ldata; + OSBoolean * boo; unsigned int tagIdx; - uint32_t i, key; - size_t len; - bool ok; + uint32_t i, key; + size_t len; + bool ok; tagIdx = tags->getNextIndexOfObject(o, 0); // does it exist? - if (-1U != tagIdx) - { + if (-1U != tagIdx) { key = (kOSSerializeObject | tagIdx); - if (endCollection) - { - endCollection = false; - key |= kOSSerializeEndCollecton; + if (endCollection) { + endCollection = false; + key |= kOSSerializeEndCollecton; } ok = addBinary(&key, sizeof(key)); - return (ok); + return ok; } - if ((dict = OSDynamicCast(OSDictionary, o))) - { + if ((dict = OSDynamicCast(OSDictionary, o))) { key = (kOSSerializeDictionary | dict->count); ok = addBinaryObject(o, key, NULL, 0); - for (i = 0; ok && (i < dict->count);) - { + for (i = 0; ok && (i < dict->count);) { const OSSymbol * dictKey; const OSMetaClassBase * dictValue; const OSMetaClassBase * nvalue = 0; @@ -160,103 +177,101 @@ bool OSSerialize::binarySerialize(const OSMetaClassBase *o) dictKey = dict->dictionary[i].key; dictValue = dict->dictionary[i].value; i++; - if (editor) - { + if (editor) { dictValue = nvalue = (*editor)(editRef, this, dict, dictKey, dictValue); - if (!dictValue) dictValue = dict; + if (!dictValue) { + dictValue = dict; + } } ok = binarySerialize(dictKey); - if (!ok) break; + if (!ok) { + break; + } endCollection = (i == dict->count); ok = binarySerialize(dictValue); - if (!ok) ok = dictValue->serialize(this); - if (nvalue) nvalue->release(); + if (!ok) { + ok = dictValue->serialize(this); + } + if (nvalue) { + nvalue->release(); + } // if (!ok) ok = binarySerialize(kOSBooleanFalse); - } - } - else if ((array = OSDynamicCast(OSArray, o))) - { + } + } else if ((array = OSDynamicCast(OSArray, o))) { key = (kOSSerializeArray | array->count); ok = addBinaryObject(o, key, NULL, 0); - for (i = 0; ok && (i < array->count);) - { + for (i = 0; ok && (i < array->count);) { i++; endCollection = (i == array->count); - ok = binarySerialize(array->array[i-1]); - if (!ok) ok = array->array[i-1]->serialize(this); + ok = binarySerialize(array->array[i - 1]); + if (!ok) { + ok = array->array[i - 1]->serialize(this); + } // if (!ok) ok = binarySerialize(kOSBooleanFalse); - } - } - else if ((set = OSDynamicCast(OSSet, o))) - { + } + } else if ((set = OSDynamicCast(OSSet, o))) { key = (kOSSerializeSet | set->members->count); ok = addBinaryObject(o, key, NULL, 0); - for (i = 0; ok && (i < set->members->count);) - { + for (i = 0; ok && (i < set->members->count);) { i++; endCollection = (i == set->members->count); - ok = binarySerialize(set->members->array[i-1]); - if (!ok) ok = set->members->array[i-1]->serialize(this); + ok = binarySerialize(set->members->array[i - 1]); + if (!ok) { + ok = set->members->array[i - 1]->serialize(this); + } // if (!ok) ok = binarySerialize(kOSBooleanFalse); - } - } - else if ((num = OSDynamicCast(OSNumber, o))) - { + } + } else if ((num = OSDynamicCast(OSNumber, o))) { key = (kOSSerializeNumber | num->size); ok = addBinaryObject(o, key, &num->value, sizeof(num->value)); - } - else if ((boo = OSDynamicCast(OSBoolean, o))) - { + } else if ((boo = OSDynamicCast(OSBoolean, o))) { key = (kOSSerializeBoolean | (kOSBooleanTrue == boo)); ok = addBinaryObject(o, key, NULL, 0); - } - else if ((sym = OSDynamicCast(OSSymbol, o))) - { + } else if ((sym = OSDynamicCast(OSSymbol, o))) { len = (sym->getLength() + 1); key = (kOSSerializeSymbol | len); ok = addBinaryObject(o, key, sym->getCStringNoCopy(), len); - } - else if ((str = OSDynamicCast(OSString, o))) - { + } else if ((str = OSDynamicCast(OSString, o))) { len = (str->getLength() + 0); key = (kOSSerializeString | len); ok = addBinaryObject(o, key, str->getCStringNoCopy(), len); - } - else if ((ldata = OSDynamicCast(OSData, o))) - { + } else if ((ldata = OSDynamicCast(OSData, o))) { len = ldata->getLength(); - if (ldata->reserved && ldata->reserved->disableSerialization) len = 0; + if (ldata->reserved && ldata->reserved->disableSerialization) { + len = 0; + } key = (kOSSerializeData | len); ok = addBinaryObject(o, key, ldata->getBytesNoCopy(), len); + } else { + return false; } - else return (false); - return (ok); + return ok; } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#define setAtIndex(v, idx, o) \ - if (idx >= v##Capacity) \ - { \ - if (v##Capacity >= v##CapacityMax) ok = false; \ - else \ - { \ - uint32_t ncap = v##Capacity + 64; \ - typeof(v##Array) nbuf = (typeof(v##Array)) kalloc_container(ncap * sizeof(o)); \ - if (!nbuf) ok = false; \ - else \ - { \ - if (v##Array) \ - { \ - bcopy(v##Array, nbuf, v##Capacity * sizeof(o)); \ - kfree(v##Array, v##Capacity * sizeof(o)); \ - } \ - v##Array = nbuf; \ - v##Capacity = ncap; \ - } \ - } \ - } \ +#define setAtIndex(v, idx, o) \ + if (idx >= v##Capacity) \ + { \ + if (v##Capacity >= v##CapacityMax) ok = false; \ + else \ + { \ + uint32_t ncap = v##Capacity + 64; \ + typeof(v##Array) nbuf = (typeof(v##Array)) kalloc_container(ncap * sizeof(o)); \ + if (!nbuf) ok = false; \ + else \ + { \ + if (v##Array) \ + { \ + bcopy(v##Array, nbuf, v##Capacity * sizeof(o)); \ + kfree(v##Array, v##Capacity * sizeof(o)); \ + } \ + v##Array = nbuf; \ + v##Capacity = ncap; \ + } \ + } \ + } \ if (ok) v##Array[idx] = o; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -266,7 +281,7 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin { OSObject ** objsArray; uint32_t objsCapacity; - enum { objsCapacityMax = 16*1024*1024 }; + enum { objsCapacityMax = 16 * 1024 * 1024 }; uint32_t objsIdx; OSObject ** stackArray; @@ -274,31 +289,39 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin enum { stackCapacityMax = 64 }; uint32_t stackIdx; - OSObject * result; - OSObject * parent; - OSDictionary * dict; - OSArray * array; - OSSet * set; - OSDictionary * newDict; - OSArray * newArray; - OSSet * newSet; - OSObject * o; - OSSymbol * sym; - OSString * str; - - size_t bufferPos; - const uint32_t * next; - uint32_t key, len, wordLen; - bool end, newCollect, isRef; - unsigned long long value; - bool ok; - - if (errorString) *errorString = 0; - if (bufferSize < sizeof(kOSSerializeBinarySignature)) return (NULL); - if (0 != strcmp(kOSSerializeBinarySignature, buffer)) return (NULL); - if (3 & ((uintptr_t) buffer)) return (NULL); + OSObject * result; + OSObject * parent; + OSDictionary * dict; + OSArray * array; + OSSet * set; + OSDictionary * newDict; + OSArray * newArray; + OSSet * newSet; + OSObject * o; + OSSymbol * sym; + OSString * str; + + size_t bufferPos; + const uint32_t * next; + uint32_t key, len, wordLen; + bool end, newCollect, isRef; + unsigned long long value; + bool ok; + + if (errorString) { + *errorString = 0; + } + if (bufferSize < sizeof(kOSSerializeBinarySignature)) { + return NULL; + } + if (0 != strcmp(kOSSerializeBinarySignature, buffer)) { + return NULL; + } + if (3 & ((uintptr_t) buffer)) { + return NULL; + } bufferPos = sizeof(kOSSerializeBinarySignature); - next = (typeof(next)) (((uintptr_t) buffer) + bufferPos); + next = (typeof(next))(((uintptr_t) buffer) + bufferPos); DEBG("---------OSUnserializeBinary(%p)\n", buffer); @@ -306,140 +329,166 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin objsIdx = objsCapacity = 0; stackIdx = stackCapacity = 0; - result = 0; - parent = 0; + result = 0; + parent = 0; dict = 0; array = 0; set = 0; sym = 0; ok = true; - while (ok) - { + while (ok) { bufferPos += sizeof(*next); - if (!(ok = (bufferPos <= bufferSize))) break; + if (!(ok = (bufferPos <= bufferSize))) { + break; + } key = *next++; - len = (key & kOSSerializeDataMask); - wordLen = (len + 3) >> 2; + len = (key & kOSSerializeDataMask); + wordLen = (len + 3) >> 2; end = (0 != (kOSSerializeEndCollecton & key)); - DEBG("key 0x%08x: 0x%04x, %d\n", key, len, end); + DEBG("key 0x%08x: 0x%04x, %d\n", key, len, end); - newCollect = isRef = false; + newCollect = isRef = false; o = 0; newDict = 0; newArray = 0; newSet = 0; - - switch (kOSSerializeTypeMask & key) - { - case kOSSerializeDictionary: - o = newDict = OSDictionary::withCapacity(len); - newCollect = (len != 0); - break; - case kOSSerializeArray: - o = newArray = OSArray::withCapacity(len); - newCollect = (len != 0); - break; - case kOSSerializeSet: - o = newSet = OSSet::withCapacity(len); - newCollect = (len != 0); - break; - - case kOSSerializeObject: - if (len >= objsIdx) break; - o = objsArray[len]; - isRef = true; + + switch (kOSSerializeTypeMask & key) { + case kOSSerializeDictionary: + o = newDict = OSDictionary::withCapacity(len); + newCollect = (len != 0); + break; + case kOSSerializeArray: + o = newArray = OSArray::withCapacity(len); + newCollect = (len != 0); + break; + case kOSSerializeSet: + o = newSet = OSSet::withCapacity(len); + newCollect = (len != 0); + break; + + case kOSSerializeObject: + if (len >= objsIdx) { + break; + } + o = objsArray[len]; + isRef = true; + break; + + case kOSSerializeNumber: + bufferPos += sizeof(long long); + if (bufferPos > bufferSize) { break; + } + if ((len != 32) && (len != 64) && (len != 16) && (len != 8)) { + break; + } + value = next[1]; + value <<= 32; + value |= next[0]; + o = OSNumber::withNumber(value, len); + next += 2; + break; + + case kOSSerializeSymbol: + bufferPos += (wordLen * sizeof(uint32_t)); + if (bufferPos > bufferSize) { + break; + } + if (len < 2) { + break; + } + if (0 != ((const char *)next)[len - 1]) { + break; + } + o = (OSObject *) OSSymbol::withCString((const char *) next); + next += wordLen; + break; + + case kOSSerializeString: + bufferPos += (wordLen * sizeof(uint32_t)); + if (bufferPos > bufferSize) { + break; + } + o = OSString::withStringOfLength((const char *) next, len); + next += wordLen; + break; + + case kOSSerializeData: + bufferPos += (wordLen * sizeof(uint32_t)); + if (bufferPos > bufferSize) { + break; + } + o = OSData::withBytes(next, len); + next += wordLen; + break; - case kOSSerializeNumber: - bufferPos += sizeof(long long); - if (bufferPos > bufferSize) break; - if ((len != 32) && (len != 64) && (len != 16) && (len != 8)) break; - value = next[1]; - value <<= 32; - value |= next[0]; - o = OSNumber::withNumber(value, len); - next += 2; - break; - - case kOSSerializeSymbol: - bufferPos += (wordLen * sizeof(uint32_t)); - if (bufferPos > bufferSize) break; - if (len < 2) break; - if (0 != ((const char *)next)[len-1]) break; - o = (OSObject *) OSSymbol::withCString((const char *) next); - next += wordLen; - break; - - case kOSSerializeString: - bufferPos += (wordLen * sizeof(uint32_t)); - if (bufferPos > bufferSize) break; - o = OSString::withStringOfLength((const char *) next, len); - next += wordLen; - break; - - case kOSSerializeData: - bufferPos += (wordLen * sizeof(uint32_t)); - if (bufferPos > bufferSize) break; - o = OSData::withBytes(next, len); - next += wordLen; - break; - - case kOSSerializeBoolean: - o = (len ? kOSBooleanTrue : kOSBooleanFalse); - break; - - default: - break; + case kOSSerializeBoolean: + o = (len ? kOSBooleanTrue : kOSBooleanFalse); + break; + + default: + break; } - if (!(ok = (o != 0))) break; + if (!(ok = (o != 0))) { + break; + } - if (!isRef) - { + if (!isRef) { setAtIndex(objs, objsIdx, o); - if (!ok) - { - o->release(); - break; - } + if (!ok) { + o->release(); + break; + } objsIdx++; } - if (dict) - { - if (!sym) sym = (OSSymbol *) o; - else - { - str = sym; + if (dict) { + if (!sym) { + sym = (OSSymbol *) o; + } else { + str = sym; sym = OSDynamicCast(OSSymbol, sym); - if (!sym && (str = OSDynamicCast(OSString, str))) - { - sym = const_cast(OSSymbol::withString(str)); - ok = (sym != 0); - if (!ok) break; + if (!sym && (str = OSDynamicCast(OSString, str))) { + sym = const_cast(OSSymbol::withString(str)); + ok = (sym != 0); + if (!ok) { + break; + } } DEBG("%s = %s\n", sym->getCStringNoCopy(), o->getMetaClass()->getClassName()); - if (o != dict) ok = dict->setObject(sym, o); - if (sym && (sym != str)) sym->release(); + if (o != dict) { + ok = dict->setObject(sym, o); + } + if (sym && (sym != str)) { + sym->release(); + } sym = 0; } - } - else if (array) ok = array->setObject(o); - else if (set) ok = set->setObject(o); - else if (result) ok = false; - else - { - assert(!parent); - result = o; + } else if (array) { + ok = array->setObject(o); + } else if (set) { + ok = set->setObject(o); + } else if (result) { + ok = false; + } else { + assert(!parent); + result = o; } - if (!ok) break; + if (!ok) { + break; + } - if (end) parent = 0; - if (newCollect) - { - stackIdx++; - setAtIndex(stack, stackIdx, parent); - if (!ok) break; + if (end) { + parent = 0; + } + if (newCollect) { + stackIdx++; + setAtIndex(stack, stackIdx, parent); + if (!ok) { + break; + } DEBG("++stack[%d] %p\n", stackIdx, parent); parent = o; dict = newDict; @@ -448,35 +497,43 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin end = false; } - if (end) - { - while (stackIdx) - { - parent = stackArray[stackIdx]; - DEBG("--stack[%d] %p\n", stackIdx, parent); - stackIdx--; - if (parent) break; - } - if (!parent) break; + if (end) { + while (stackIdx) { + parent = stackArray[stackIdx]; + DEBG("--stack[%d] %p\n", stackIdx, parent); + stackIdx--; + if (parent) { + break; + } + } + if (!parent) { + break; + } set = 0; - dict = 0; + dict = 0; array = 0; - if (!(dict = OSDynamicCast(OSDictionary, parent))) - { - if (!(array = OSDynamicCast(OSArray, parent))) ok = (0 != (set = OSDynamicCast(OSSet, parent))); + if (!(dict = OSDynamicCast(OSDictionary, parent))) { + if (!(array = OSDynamicCast(OSArray, parent))) { + ok = (0 != (set = OSDynamicCast(OSSet, parent))); + } } } } DEBG("ret %p\n", result); - if (!ok) result = 0; + if (!ok) { + result = 0; + } - if (objsCapacity) - { - for (len = (result != 0); len < objsIdx; len++) objsArray[len]->release(); - kfree(objsArray, objsCapacity * sizeof(*objsArray)); - } - if (stackCapacity) kfree(stackArray, stackCapacity * sizeof(*stackArray)); + if (objsCapacity) { + for (len = (result != 0); len < objsIdx; len++) { + objsArray[len]->release(); + } + kfree(objsArray, objsCapacity * sizeof(*objsArray)); + } + if (stackCapacity) { + kfree(stackArray, stackCapacity * sizeof(*stackArray)); + } - return (result); + return result; } diff --git a/libkern/c++/OSSet.cpp b/libkern/c++/OSSet.cpp index 644b9dabf..3c7701dcf 100644 --- a/libkern/c++/OSSet.cpp +++ b/libkern/c++/OSSet.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000, 2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOSet.m created by rsulack on Thu 11-Jun-1998 */ @@ -47,369 +47,425 @@ OSMetaClassDefineReservedUnused(OSSet, 7); #define EXT_CAST(obj) \ reinterpret_cast(const_cast(obj)) -bool OSSet::initWithCapacity(unsigned int inCapacity) +bool +OSSet::initWithCapacity(unsigned int inCapacity) { - if ( !super::init() ) - return false; + if (!super::init()) { + return false; + } - members = OSArray::withCapacity(inCapacity); - if (!members) - return false; + members = OSArray::withCapacity(inCapacity); + if (!members) { + return false; + } - return true; + return true; } -bool OSSet::initWithObjects(const OSObject *inObjects[], - unsigned int inCount, - unsigned int inCapacity) +bool +OSSet::initWithObjects(const OSObject *inObjects[], + unsigned int inCount, + unsigned int inCapacity) { - unsigned int capacity = inCount; + unsigned int capacity = inCount; - if ( inCapacity ) { - if ( inCount > inCapacity ) - return false; + if (inCapacity) { + if (inCount > inCapacity) { + return false; + } - capacity = inCapacity; - } + capacity = inCapacity; + } - if (!inObjects || !initWithCapacity(capacity)) - return false; + if (!inObjects || !initWithCapacity(capacity)) { + return false; + } - for ( unsigned int i = 0; i < inCount; i++ ) { + for (unsigned int i = 0; i < inCount; i++) { // xx-review: no test here for failure of setObject() - if (members->getCount() < capacity) - setObject(inObjects[i]); - else - return false; - } + if (members->getCount() < capacity) { + setObject(inObjects[i]); + } else { + return false; + } + } - return true; + return true; } -bool OSSet::initWithArray(const OSArray *inArray, - unsigned int inCapacity) +bool +OSSet::initWithArray(const OSArray *inArray, + unsigned int inCapacity) { - if ( !inArray ) - return false; - - return initWithObjects((const OSObject **) inArray->array, - inArray->count, inCapacity); + if (!inArray) { + return false; + } + + return initWithObjects((const OSObject **) inArray->array, + inArray->count, inCapacity); } -bool OSSet::initWithSet(const OSSet *inSet, - unsigned int inCapacity) +bool +OSSet::initWithSet(const OSSet *inSet, + unsigned int inCapacity) { - return initWithArray(inSet->members, inCapacity); + return initWithArray(inSet->members, inCapacity); } -OSSet *OSSet::withCapacity(unsigned int capacity) +OSSet * +OSSet::withCapacity(unsigned int capacity) { - OSSet *me = new OSSet; + OSSet *me = new OSSet; - if (me && !me->initWithCapacity(capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithCapacity(capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSSet *OSSet::withObjects(const OSObject *objects[], - unsigned int count, - unsigned int capacity) +OSSet * +OSSet::withObjects(const OSObject *objects[], + unsigned int count, + unsigned int capacity) { - OSSet *me = new OSSet; + OSSet *me = new OSSet; - if (me && !me->initWithObjects(objects, count, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithObjects(objects, count, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSSet *OSSet::withArray(const OSArray *array, - unsigned int capacity) +OSSet * +OSSet::withArray(const OSArray *array, + unsigned int capacity) { - OSSet *me = new OSSet; + OSSet *me = new OSSet; - if (me && !me->initWithArray(array, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithArray(array, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -OSSet *OSSet::withSet(const OSSet *set, - unsigned int capacity) +OSSet * +OSSet::withSet(const OSSet *set, + unsigned int capacity) { - OSSet *me = new OSSet; + OSSet *me = new OSSet; - if (me && !me->initWithSet(set, capacity)) { - me->release(); - return 0; - } + if (me && !me->initWithSet(set, capacity)) { + me->release(); + return 0; + } - return me; + return me; } -void OSSet::free() +void +OSSet::free() { - if (members) { - (void) members->super::setOptions(0, kImmutable); - members->release(); - } + if (members) { + (void) members->super::setOptions(0, kImmutable); + members->release(); + } - super::free(); + super::free(); } -unsigned int OSSet::getCount() const +unsigned int +OSSet::getCount() const { - return members->count; + return members->count; } -unsigned int OSSet::getCapacity() const +unsigned int +OSSet::getCapacity() const { - return members->capacity; + return members->capacity; } -unsigned int OSSet::getCapacityIncrement() const +unsigned int +OSSet::getCapacityIncrement() const { - return members->capacityIncrement; + return members->capacityIncrement; } -unsigned int OSSet::setCapacityIncrement(unsigned int increment) +unsigned int +OSSet::setCapacityIncrement(unsigned int increment) { - return members->setCapacityIncrement(increment); + return members->setCapacityIncrement(increment); } -unsigned int OSSet::ensureCapacity(unsigned int newCapacity) +unsigned int +OSSet::ensureCapacity(unsigned int newCapacity) { - return members->ensureCapacity(newCapacity); + return members->ensureCapacity(newCapacity); } -void OSSet::flushCollection() +void +OSSet::flushCollection() { - haveUpdated(); - members->flushCollection(); + haveUpdated(); + members->flushCollection(); } -bool OSSet::setObject(const OSMetaClassBase *anObject) +bool +OSSet::setObject(const OSMetaClassBase *anObject) { - if (containsObject(anObject)) { - return false; - } else { - haveUpdated(); - return members->setObject(anObject); - } + if (containsObject(anObject)) { + return false; + } else { + haveUpdated(); + return members->setObject(anObject); + } } -bool OSSet::merge(const OSArray * array) +bool +OSSet::merge(const OSArray * array) { - const OSMetaClassBase * anObject = 0; - bool result = true; - - for (int i = 0; (anObject = array->getObject(i)); i++) { - - /* setObject() returns false if the object is already in the set, - * so we have to check beforehand here with containsObject(). - */ - if (containsObject(anObject)) { - continue; - } - if (!setObject(anObject)) { - result = false; - } - } - - return result; + const OSMetaClassBase * anObject = 0; + bool result = true; + + for (int i = 0; (anObject = array->getObject(i)); i++) { + /* setObject() returns false if the object is already in the set, + * so we have to check beforehand here with containsObject(). + */ + if (containsObject(anObject)) { + continue; + } + if (!setObject(anObject)) { + result = false; + } + } + + return result; } -bool OSSet::merge(const OSSet * set) +bool +OSSet::merge(const OSSet * set) { - return merge(set->members); + return merge(set->members); } -void OSSet::removeObject(const OSMetaClassBase *anObject) +void +OSSet::removeObject(const OSMetaClassBase *anObject) { - const OSMetaClassBase *probeObject; - - for (int i = 0; (probeObject = members->getObject(i)); i++) - if (probeObject == anObject) { - haveUpdated(); - members->removeObject(i); - return; - } + const OSMetaClassBase *probeObject; + + for (int i = 0; (probeObject = members->getObject(i)); i++) { + if (probeObject == anObject) { + haveUpdated(); + members->removeObject(i); + return; + } + } } -bool OSSet::containsObject(const OSMetaClassBase *anObject) const +bool +OSSet::containsObject(const OSMetaClassBase *anObject) const { - return anObject && member(anObject); + return anObject && member(anObject); } -bool OSSet::member(const OSMetaClassBase *anObject) const +bool +OSSet::member(const OSMetaClassBase *anObject) const { - OSMetaClassBase *probeObject; + OSMetaClassBase *probeObject; - for (int i = 0; (probeObject = members->getObject(i)); i++) - if (probeObject == anObject) - return true; + for (int i = 0; (probeObject = members->getObject(i)); i++) { + if (probeObject == anObject) { + return true; + } + } - return false; + return false; } -OSObject *OSSet::getAnyObject() const +OSObject * +OSSet::getAnyObject() const { - return members->getObject(0); + return members->getObject(0); } -bool OSSet::isEqualTo(const OSSet *aSet) const +bool +OSSet::isEqualTo(const OSSet *aSet) const { - unsigned int count; - unsigned int i; - const OSMetaClassBase *obj1; - const OSMetaClassBase *obj2; - - if ( this == aSet ) - return true; - - count = members->count; - if ( count != aSet->getCount() ) - return false; - - for ( i = 0; i < count; i++ ) { - obj1 = aSet->members->getObject(i); - if (containsObject(obj1)) - continue; - obj2 = members->getObject(i); - if ( !obj1 || !obj2 ) - return false; - - if ( !obj1->isEqualTo(obj2) ) - return false; - } - - return true; + unsigned int count; + unsigned int i; + const OSMetaClassBase *obj1; + const OSMetaClassBase *obj2; + + if (this == aSet) { + return true; + } + + count = members->count; + if (count != aSet->getCount()) { + return false; + } + + for (i = 0; i < count; i++) { + obj1 = aSet->members->getObject(i); + if (containsObject(obj1)) { + continue; + } + obj2 = members->getObject(i); + if (!obj1 || !obj2) { + return false; + } + + if (!obj1->isEqualTo(obj2)) { + return false; + } + } + + return true; } -bool OSSet::isEqualTo(const OSMetaClassBase *anObject) const +bool +OSSet::isEqualTo(const OSMetaClassBase *anObject) const { - OSSet *otherSet; - - otherSet = OSDynamicCast(OSSet, anObject); - if ( otherSet ) - return isEqualTo(otherSet); - else - return false; + OSSet *otherSet; + + otherSet = OSDynamicCast(OSSet, anObject); + if (otherSet) { + return isEqualTo(otherSet); + } else { + return false; + } } -unsigned int OSSet::iteratorSize() const +unsigned int +OSSet::iteratorSize() const { - return sizeof(unsigned int); + return sizeof(unsigned int); } -bool OSSet::initIterator(void *inIterator) const +bool +OSSet::initIterator(void *inIterator) const { - unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int *iteratorP = (unsigned int *) inIterator; - *iteratorP = 0; - return true; + *iteratorP = 0; + return true; } -bool OSSet::getNextObjectForIterator(void *inIterator, OSObject **ret) const +bool +OSSet::getNextObjectForIterator(void *inIterator, OSObject **ret) const { - unsigned int *iteratorP = (unsigned int *) inIterator; - unsigned int index = (*iteratorP)++; + unsigned int *iteratorP = (unsigned int *) inIterator; + unsigned int index = (*iteratorP)++; - if (index < members->count) - *ret = members->getObject(index); - else - *ret = 0; + if (index < members->count) { + *ret = members->getObject(index); + } else { + *ret = 0; + } - return (*ret != 0); + return *ret != 0; } -bool OSSet::serialize(OSSerialize *s) const +bool +OSSet::serialize(OSSerialize *s) const { - const OSMetaClassBase *o; + const OSMetaClassBase *o; + + if (s->previouslySerialized(this)) { + return true; + } - if (s->previouslySerialized(this)) return true; - - if (!s->addXMLStartTag(this, "set")) return false; + if (!s->addXMLStartTag(this, "set")) { + return false; + } - for (int i = 0; (o = members->getObject(i)); i++) { - if (!o->serialize(s)) return false; - } + for (int i = 0; (o = members->getObject(i)); i++) { + if (!o->serialize(s)) { + return false; + } + } - return s->addXMLEndTag("set"); + return s->addXMLEndTag("set"); } -unsigned OSSet::setOptions(unsigned options, unsigned mask, void *) +unsigned +OSSet::setOptions(unsigned options, unsigned mask, void *) { - unsigned old = super::setOptions(options, mask); - if ((old ^ options) & mask) - members->setOptions(options, mask); + unsigned old = super::setOptions(options, mask); + if ((old ^ options) & mask) { + members->setOptions(options, mask); + } - return old; + return old; } -OSCollection * OSSet::copyCollection(OSDictionary *cycleDict) +OSCollection * +OSSet::copyCollection(OSDictionary *cycleDict) { - bool allocDict = !cycleDict; - OSCollection *ret = 0; - OSSet *newSet = 0; - - if (allocDict) { - cycleDict = OSDictionary::withCapacity(16); - if (!cycleDict) - return 0; - } - - do { - // Check for a cycle - ret = super::copyCollection(cycleDict); - if (ret) - continue; // Found it - - newSet = OSSet::withCapacity(members->capacity); - if (!newSet) - continue; // Couldn't create new set abort - - // Insert object into cycle Dictionary - cycleDict->setObject((const OSSymbol *) this, newSet); - - OSArray *newMembers = newSet->members; - newMembers->capacityIncrement = members->capacityIncrement; - - // Now copy over the contents into the new duplicate - for (unsigned int i = 0; i < members->count; i++) { - OSObject *obj = EXT_CAST(members->array[i]); - OSCollection *coll = OSDynamicCast(OSCollection, obj); - if (coll) { - OSCollection *newColl = coll->copyCollection(cycleDict); - if (newColl) { - obj = newColl; // Rely on cycleDict ref for a bit - newColl->release(); + bool allocDict = !cycleDict; + OSCollection *ret = 0; + OSSet *newSet = 0; + + if (allocDict) { + cycleDict = OSDictionary::withCapacity(16); + if (!cycleDict) { + return 0; } - else - goto abortCopy; - }; - newMembers->setObject(obj); - }; + } - ret = newSet; - newSet = 0; + do { + // Check for a cycle + ret = super::copyCollection(cycleDict); + if (ret) { + continue; // Found it + } + newSet = OSSet::withCapacity(members->capacity); + if (!newSet) { + continue; // Couldn't create new set abort + } + // Insert object into cycle Dictionary + cycleDict->setObject((const OSSymbol *) this, newSet); + + OSArray *newMembers = newSet->members; + newMembers->capacityIncrement = members->capacityIncrement; + + // Now copy over the contents into the new duplicate + for (unsigned int i = 0; i < members->count; i++) { + OSObject *obj = EXT_CAST(members->array[i]); + OSCollection *coll = OSDynamicCast(OSCollection, obj); + if (coll) { + OSCollection *newColl = coll->copyCollection(cycleDict); + if (newColl) { + obj = newColl; // Rely on cycleDict ref for a bit + newColl->release(); + } else { + goto abortCopy; + } + } + ; + newMembers->setObject(obj); + } + ; - } while(false); + ret = newSet; + newSet = 0; + } while (false); abortCopy: - if (newSet) - newSet->release(); + if (newSet) { + newSet->release(); + } - if (allocDict) - cycleDict->release(); + if (allocDict) { + cycleDict->release(); + } - return ret; + return ret; } diff --git a/libkern/c++/OSString.cpp b/libkern/c++/OSString.cpp index 346f7ffc1..91fc3cba2 100644 --- a/libkern/c++/OSString.cpp +++ b/libkern/c++/OSString.cpp @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOString.m created by rsulack on Wed 17-Sep-1997 */ @@ -39,16 +39,16 @@ #define super OSObject OSDefineMetaClassAndStructors(OSString, OSObject) -OSMetaClassDefineReservedUnused(OSString, 0); -OSMetaClassDefineReservedUnused(OSString, 1); -OSMetaClassDefineReservedUnused(OSString, 2); -OSMetaClassDefineReservedUnused(OSString, 3); -OSMetaClassDefineReservedUnused(OSString, 4); -OSMetaClassDefineReservedUnused(OSString, 5); -OSMetaClassDefineReservedUnused(OSString, 6); -OSMetaClassDefineReservedUnused(OSString, 7); -OSMetaClassDefineReservedUnused(OSString, 8); -OSMetaClassDefineReservedUnused(OSString, 9); +OSMetaClassDefineReservedUnused(OSString, 0); +OSMetaClassDefineReservedUnused(OSString, 1); +OSMetaClassDefineReservedUnused(OSString, 2); +OSMetaClassDefineReservedUnused(OSString, 3); +OSMetaClassDefineReservedUnused(OSString, 4); +OSMetaClassDefineReservedUnused(OSString, 5); +OSMetaClassDefineReservedUnused(OSString, 6); +OSMetaClassDefineReservedUnused(OSString, 7); +OSMetaClassDefineReservedUnused(OSString, 8); +OSMetaClassDefineReservedUnused(OSString, 9); OSMetaClassDefineReservedUnused(OSString, 10); OSMetaClassDefineReservedUnused(OSString, 11); OSMetaClassDefineReservedUnused(OSString, 12); @@ -56,276 +56,336 @@ OSMetaClassDefineReservedUnused(OSString, 13); OSMetaClassDefineReservedUnused(OSString, 14); OSMetaClassDefineReservedUnused(OSString, 15); -bool OSString::initWithString(const OSString *aString) +bool +OSString::initWithString(const OSString *aString) { - return initWithCString(aString->string); + return initWithCString(aString->string); } -bool OSString::initWithCString(const char *cString) +bool +OSString::initWithCString(const char *cString) { - unsigned int newLength; - char * newString; + unsigned int newLength; + char * newString; - if (!cString || !super::init()) return false; + if (!cString || !super::init()) { + return false; + } - newLength = strnlen(cString, kMaxStringLength); - if (newLength >= kMaxStringLength) return false; + newLength = strnlen(cString, kMaxStringLength); + if (newLength >= kMaxStringLength) { + return false; + } - newLength++; - newString = (char *) kalloc_container(newLength); - if (!newString) return false; + newLength++; + newString = (char *) kalloc_container(newLength); + if (!newString) { + return false; + } - bcopy(cString, newString, newLength); + bcopy(cString, newString, newLength); - if ( !(flags & kOSStringNoCopy) && string) { - kfree(string, (vm_size_t)length); - OSCONTAINER_ACCUMSIZE(-((size_t)length)); - } - string = newString; - length = newLength; - flags &= ~kOSStringNoCopy; + if (!(flags & kOSStringNoCopy) && string) { + kfree(string, (vm_size_t)length); + OSCONTAINER_ACCUMSIZE(-((size_t)length)); + } + string = newString; + length = newLength; + flags &= ~kOSStringNoCopy; - OSCONTAINER_ACCUMSIZE(length); + OSCONTAINER_ACCUMSIZE(length); - return true; + return true; } -bool OSString::initWithStringOfLength(const char *cString, size_t inlength) +bool +OSString::initWithStringOfLength(const char *cString, size_t inlength) { - unsigned int newLength; - char * newString; + unsigned int newLength; + char * newString; - if (!cString || !super::init()) return false; + if (!cString || !super::init()) { + return false; + } - if (inlength >= kMaxStringLength) return false; + if (inlength >= kMaxStringLength) { + return false; + } - newLength = inlength + 1; - newString = (char *) kalloc_container(newLength); - if (!newString) return false; + if (strnlen(cString, inlength) < inlength) { + return false; + } - bcopy(cString, newString, inlength); - newString[inlength] = 0; + newLength = inlength + 1; + newString = (char *) kalloc_container(newLength); + if (!newString) { + return false; + } - if ( !(flags & kOSStringNoCopy) && string) { - kfree(string, (vm_size_t)length); - OSCONTAINER_ACCUMSIZE(-((size_t)length)); - } + bcopy(cString, newString, inlength); + newString[inlength] = 0; - string = newString; - length = newLength; - flags &= ~kOSStringNoCopy; + if (!(flags & kOSStringNoCopy) && string) { + kfree(string, (vm_size_t)length); + OSCONTAINER_ACCUMSIZE(-((size_t)length)); + } - OSCONTAINER_ACCUMSIZE(length); + string = newString; + length = newLength; + flags &= ~kOSStringNoCopy; - return true; + OSCONTAINER_ACCUMSIZE(length); + + return true; } -bool OSString::initWithCStringNoCopy(const char *cString) +bool +OSString::initWithCStringNoCopy(const char *cString) { - if (!cString || !super::init()) - return false; + if (!cString || !super::init()) { + return false; + } - length = strnlen(cString, kMaxStringLength); - if (length >= kMaxStringLength) return false; + length = strnlen(cString, kMaxStringLength); + if (length >= kMaxStringLength) { + return false; + } - length++; - flags |= kOSStringNoCopy; - string = const_cast(cString); + length++; + flags |= kOSStringNoCopy; + string = const_cast(cString); - return true; + return true; } -OSString *OSString::withString(const OSString *aString) +OSString * +OSString::withString(const OSString *aString) { - OSString *me = new OSString; + OSString *me = new OSString; - if (me && !me->initWithString(aString)) { - me->release(); - return 0; - } + if (me && !me->initWithString(aString)) { + me->release(); + return 0; + } - return me; + return me; } -OSString *OSString::withCString(const char *cString) +OSString * +OSString::withCString(const char *cString) { - OSString *me = new OSString; + OSString *me = new OSString; - if (me && !me->initWithCString(cString)) { - me->release(); - return 0; - } + if (me && !me->initWithCString(cString)) { + me->release(); + return 0; + } - return me; + return me; } -OSString *OSString::withCStringNoCopy(const char *cString) +OSString * +OSString::withCStringNoCopy(const char *cString) { - OSString *me = new OSString; + OSString *me = new OSString; - if (me && !me->initWithCStringNoCopy(cString)) { - me->release(); - return 0; - } + if (me && !me->initWithCStringNoCopy(cString)) { + me->release(); + return 0; + } - return me; + return me; } -OSString *OSString::withStringOfLength(const char *cString, size_t length) +OSString * +OSString::withStringOfLength(const char *cString, size_t length) { - OSString *me = new OSString; + OSString *me = new OSString; - if (me && !me->initWithStringOfLength(cString, length)) { - me->release(); - return 0; - } + if (me && !me->initWithStringOfLength(cString, length)) { + me->release(); + return 0; + } - return me; + return me; } /* @@@ gvdl */ #if 0 -OSString *OSString::stringWithFormat(const char *format, ...) +OSString * +OSString::stringWithFormat(const char *format, ...) { -#ifndef KERNEL // mach3xxx - OSString *me; - va_list argList; - - if (!format) - return 0; - - va_start(argList, format); - me = stringWithCapacity(256); - me->length = vsnprintf(me->string, 256, format, argList); - me->length++; // we include the null in the length - if (me->Length > 256) - me->Length = 256; - va_end (argList); - - return me; +#ifndef KERNEL // mach3xxx + OSString *me; + va_list argList; + + if (!format) { + return 0; + } + + va_start(argList, format); + me = stringWithCapacity(256); + me->length = vsnprintf(me->string, 256, format, argList); + me->length++; // we include the null in the length + if (me->Length > 256) { + me->Length = 256; + } + va_end(argList); + + return me; #else - return 0; + return 0; #endif } #endif /* 0 */ -void OSString::free() +void +OSString::free() { - if ( !(flags & kOSStringNoCopy) && string) { - kfree(string, (vm_size_t)length); - OSCONTAINER_ACCUMSIZE(-((size_t)length)); - } + if (!(flags & kOSStringNoCopy) && string) { + kfree(string, (vm_size_t)length); + OSCONTAINER_ACCUMSIZE(-((size_t)length)); + } - super::free(); + super::free(); } -unsigned int OSString::getLength() const { return length - 1; } +unsigned int +OSString::getLength() const +{ + return length - 1; +} -const char *OSString::getCStringNoCopy() const +const char * +OSString::getCStringNoCopy() const { - return string; + return string; } -bool OSString::setChar(char aChar, unsigned int index) +bool +OSString::setChar(char aChar, unsigned int index) { - if ( !(flags & kOSStringNoCopy) && index < length - 1) { - string[index] = aChar; + if (!(flags & kOSStringNoCopy) && index < length - 1) { + string[index] = aChar; - return true; - } - else - return false; + return true; + } else { + return false; + } } -char OSString::getChar(unsigned int index) const +char +OSString::getChar(unsigned int index) const { - if (index < length) - return string[index]; - else - return '\0'; + if (index < length) { + return string[index]; + } else { + return '\0'; + } } -bool OSString::isEqualTo(const OSString *aString) const +bool +OSString::isEqualTo(const OSString *aString) const { - if (length != aString->length) - return false; - else - return isEqualTo((const char *) aString->string); + if (length != aString->length) { + return false; + } else { + return isEqualTo((const char *) aString->string); + } } -bool OSString::isEqualTo(const char *aCString) const +bool +OSString::isEqualTo(const char *aCString) const { - return strncmp(string, aCString, length) == 0; + return strncmp(string, aCString, length) == 0; } -bool OSString::isEqualTo(const OSMetaClassBase *obj) const +bool +OSString::isEqualTo(const OSMetaClassBase *obj) const { - OSString * str; - OSData * data; - - if ((str = OSDynamicCast(OSString, obj))) - return isEqualTo(str); - else if ((data = OSDynamicCast (OSData, obj))) - return isEqualTo(data); - else - return false; + OSString * str; + OSData * data; + + if ((str = OSDynamicCast(OSString, obj))) { + return isEqualTo(str); + } else if ((data = OSDynamicCast(OSData, obj))) { + return isEqualTo(data); + } else { + return false; + } } -bool OSString::isEqualTo(const OSData *obj) const +bool +OSString::isEqualTo(const OSData *obj) const { - if (NULL == obj) - return false; - - unsigned int dataLen = obj->getLength ();; - const char * dataPtr = (const char *) obj->getBytesNoCopy (); - - if (dataLen != length) { - - // check for the fact that OSData may be a buffer that - // that includes a termination byte and will thus have - // a length of the actual string length PLUS 1. In this - // case we verify that the additional byte is a terminator - // and if so count the two lengths as being the same. - - if ( (dataLen - length) == 1 ) { - if (dataPtr[dataLen-1] != 0) - return false; - dataLen--; - } - else - return false; - } - - for ( unsigned int i=0; i < dataLen; i++ ) { - if ( *dataPtr++ != string[i] ) - return false; - } - - return true; + if (NULL == obj) { + return false; + } + + unsigned int dataLen = obj->getLength();; + const char * dataPtr = (const char *) obj->getBytesNoCopy(); + + if (dataLen != length) { + // check for the fact that OSData may be a buffer that + // that includes a termination byte and will thus have + // a length of the actual string length PLUS 1. In this + // case we verify that the additional byte is a terminator + // and if so count the two lengths as being the same. + + if ((dataLen - length) == 1) { + if (dataPtr[dataLen - 1] != 0) { + return false; + } + dataLen--; + } else { + return false; + } + } + + for (unsigned int i = 0; i < dataLen; i++) { + if (*dataPtr++ != string[i]) { + return false; + } + } + + return true; } -bool OSString::serialize(OSSerialize *s) const +bool +OSString::serialize(OSSerialize *s) const { - char *c = string; - - if (s->previouslySerialized(this)) return true; - - if (!s->addXMLStartTag(this, "string")) return false; - while (*c) { - if (*c == '<') { - if (!s->addString("<")) return false; - } else if (*c == '>') { - if (!s->addString(">")) return false; - } else if (*c == '&') { - if (!s->addString("&")) return false; - } else { - if (!s->addChar(*c)) return false; - } - c++; - } - - return s->addXMLEndTag("string"); + char *c = string; + + if (s->previouslySerialized(this)) { + return true; + } + + if (!s->addXMLStartTag(this, "string")) { + return false; + } + while (*c) { + if (*c == '<') { + if (!s->addString("<")) { + return false; + } + } else if (*c == '>') { + if (!s->addString(">")) { + return false; + } + } else if (*c == '&') { + if (!s->addString("&")) { + return false; + } + } else { + if (!s->addChar(*c)) { + return false; + } + } + c++; + } + + return s->addXMLEndTag("string"); } diff --git a/libkern/c++/OSSymbol.cpp b/libkern/c++/OSSymbol.cpp index 25c16b1d7..455ea10d6 100644 --- a/libkern/c++/OSSymbol.cpp +++ b/libkern/c++/OSSymbol.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOSymbol.cpp created by gvdl on Fri 1998-11-17 */ @@ -47,388 +47,454 @@ typedef struct { unsigned int i, j; } OSSymbolPoolState; #define GROW_POOL() do \ if (count * GROW_FACTOR > nBuckets) { \ - reconstructSymbols(true); \ + reconstructSymbols(true); \ } \ while (0) #define SHRINK_POOL() do \ if (count * SHRINK_FACTOR < nBuckets && \ - nBuckets > INITIAL_POOL_SIZE) { \ - reconstructSymbols(false); \ + nBuckets > INITIAL_POOL_SIZE) { \ + reconstructSymbols(false); \ } \ while (0) class OSSymbolPool { private: - static const unsigned int kInitBucketCount = 16; - - typedef struct { unsigned int count; OSSymbol **symbolP; } Bucket; - - Bucket *buckets; - unsigned int nBuckets; - unsigned int count; - lck_mtx_t *poolGate; - - static inline void hashSymbol(const char *s, - unsigned int *hashP, - unsigned int *lenP) - { - unsigned int hash = 0; - unsigned int len = 0; - - /* Unroll the loop. */ - for (;;) { - if (!*s) break; len++; hash ^= *s++; - if (!*s) break; len++; hash ^= *s++ << 8; - if (!*s) break; len++; hash ^= *s++ << 16; - if (!*s) break; len++; hash ^= *s++ << 24; - } - *lenP = len; - *hashP = hash; - } - - static unsigned long log2(unsigned int x); - static unsigned long exp2ml(unsigned int x); - - void reconstructSymbols(void); - void reconstructSymbols(bool grow); + static const unsigned int kInitBucketCount = 16; + + typedef struct { unsigned int count; OSSymbol **symbolP; } Bucket; + + Bucket *buckets; + unsigned int nBuckets; + unsigned int count; + lck_rw_t *poolGate; + + static inline void + hashSymbol(const char *s, + unsigned int *hashP, + unsigned int *lenP) + { + unsigned int hash = 0; + unsigned int len = 0; + + /* Unroll the loop. */ + for (;;) { + if (!*s) { + break; + } + len++; hash ^= *s++; + if (!*s) { + break; + } + len++; hash ^= *s++ << 8; + if (!*s) { + break; + } + len++; hash ^= *s++ << 16; + if (!*s) { + break; + } + len++; hash ^= *s++ << 24; + } + *lenP = len; + *hashP = hash; + } + + static unsigned long log2(unsigned int x); + static unsigned long exp2ml(unsigned int x); + + void reconstructSymbols(void); + void reconstructSymbols(bool grow); public: - static void *operator new(size_t size); - static void operator delete(void *mem, size_t size); + static void *operator new(size_t size); + static void operator delete(void *mem, size_t size); - OSSymbolPool() { } - OSSymbolPool(const OSSymbolPool *old); - virtual ~OSSymbolPool(); + OSSymbolPool() + { + } + OSSymbolPool(const OSSymbolPool *old); + virtual + ~OSSymbolPool(); - bool init(); + bool init(); - inline void closeGate() { lck_mtx_lock(poolGate); } - inline void openGate() { lck_mtx_unlock(poolGate); } + inline void + closeReadGate() + { + lck_rw_lock(poolGate, LCK_RW_TYPE_SHARED); + } + + inline void + openReadGate() + { + lck_rw_unlock(poolGate, LCK_RW_TYPE_SHARED); + } + + + inline void + closeWriteGate() + { + lck_rw_lock(poolGate, LCK_RW_TYPE_EXCLUSIVE); + } + + inline void + openWriteGate() + { + lck_rw_unlock(poolGate, LCK_RW_TYPE_EXCLUSIVE); + } - OSSymbol *findSymbol(const char *cString) const; - OSSymbol *insertSymbol(OSSymbol *sym); - void removeSymbol(OSSymbol *sym); + LIBKERN_RETURNS_RETAINED OSSymbol *findSymbol(const char *cString) const; + LIBKERN_RETURNS_RETAINED OSSymbol *insertSymbol(OSSymbol *sym); + void removeSymbol(OSSymbol *sym); - OSSymbolPoolState initHashState(); - OSSymbol *nextHashState(OSSymbolPoolState *stateP); + OSSymbolPoolState initHashState(); + LIBKERN_RETURNS_NOT_RETAINED OSSymbol *nextHashState(OSSymbolPoolState *stateP); }; -void * OSSymbolPool::operator new(size_t size) +void * +OSSymbolPool::operator new(size_t size) { - void *mem = (void *)kalloc_tag(size, VM_KERN_MEMORY_LIBKERN); - OSMETA_ACCUMSIZE(size); - assert(mem); - bzero(mem, size); + void *mem = (void *)kalloc_tag(size, VM_KERN_MEMORY_LIBKERN); + OSMETA_ACCUMSIZE(size); + assert(mem); + bzero(mem, size); - return mem; + return mem; } -void OSSymbolPool::operator delete(void *mem, size_t size) +void +OSSymbolPool::operator delete(void *mem, size_t size) { - kfree(mem, size); - OSMETA_ACCUMSIZE(-size); + kfree(mem, size); + OSMETA_ACCUMSIZE(-size); } extern lck_grp_t *IOLockGroup; -bool OSSymbolPool::init() +bool +OSSymbolPool::init() { - count = 0; - nBuckets = INITIAL_POOL_SIZE; - buckets = (Bucket *) kalloc_tag(nBuckets * sizeof(Bucket), VM_KERN_MEMORY_LIBKERN); - OSMETA_ACCUMSIZE(nBuckets * sizeof(Bucket)); - if (!buckets) - return false; + count = 0; + nBuckets = INITIAL_POOL_SIZE; + buckets = (Bucket *) kalloc_tag(nBuckets * sizeof(Bucket), VM_KERN_MEMORY_LIBKERN); + OSMETA_ACCUMSIZE(nBuckets * sizeof(Bucket)); + if (!buckets) { + return false; + } - bzero(buckets, nBuckets * sizeof(Bucket)); + bzero(buckets, nBuckets * sizeof(Bucket)); - poolGate = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); + poolGate = lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL); - return poolGate != 0; + return poolGate != 0; } OSSymbolPool::OSSymbolPool(const OSSymbolPool *old) { - count = old->count; - nBuckets = old->nBuckets; - buckets = old->buckets; + count = old->count; + nBuckets = old->nBuckets; + buckets = old->buckets; - poolGate = 0; // Do not duplicate the poolGate + poolGate = 0; // Do not duplicate the poolGate } OSSymbolPool::~OSSymbolPool() { - if (buckets) { - Bucket *thisBucket; - for (thisBucket = &buckets[0]; thisBucket < &buckets[nBuckets]; thisBucket++) { - if (thisBucket->count > 1) { - kfree(thisBucket->symbolP, thisBucket->count * sizeof(OSSymbol *)); - OSMETA_ACCUMSIZE(-(thisBucket->count * sizeof(OSSymbol *))); - } - } - kfree(buckets, nBuckets * sizeof(Bucket)); - OSMETA_ACCUMSIZE(-(nBuckets * sizeof(Bucket))); - } - - if (poolGate) - lck_mtx_free(poolGate, IOLockGroup); + if (buckets) { + Bucket *thisBucket; + for (thisBucket = &buckets[0]; thisBucket < &buckets[nBuckets]; thisBucket++) { + if (thisBucket->count > 1) { + kfree(thisBucket->symbolP, thisBucket->count * sizeof(OSSymbol *)); + OSMETA_ACCUMSIZE(-(thisBucket->count * sizeof(OSSymbol *))); + } + } + kfree(buckets, nBuckets * sizeof(Bucket)); + OSMETA_ACCUMSIZE(-(nBuckets * sizeof(Bucket))); + } + + if (poolGate) { + lck_rw_free(poolGate, IOLockGroup); + } } -unsigned long OSSymbolPool::log2(unsigned int x) +unsigned long +OSSymbolPool::log2(unsigned int x) { - unsigned long i; + unsigned long i; - for (i = 0; x > 1 ; i++) - x >>= 1; - return i; + for (i = 0; x > 1; i++) { + x >>= 1; + } + return i; } -unsigned long OSSymbolPool::exp2ml(unsigned int x) +unsigned long +OSSymbolPool::exp2ml(unsigned int x) { - return (1 << x) - 1; + return (1 << x) - 1; } -OSSymbolPoolState OSSymbolPool::initHashState() +OSSymbolPoolState +OSSymbolPool::initHashState() { - OSSymbolPoolState newState = { nBuckets, 0 }; - return newState; + OSSymbolPoolState newState = { nBuckets, 0 }; + return newState; } -OSSymbol *OSSymbolPool::nextHashState(OSSymbolPoolState *stateP) +OSSymbol * +OSSymbolPool::nextHashState(OSSymbolPoolState *stateP) { - Bucket *thisBucket = &buckets[stateP->i]; - - while (!stateP->j) { - if (!stateP->i) - return 0; - stateP->i--; - thisBucket--; - stateP->j = thisBucket->count; - } + Bucket *thisBucket = &buckets[stateP->i]; + + while (!stateP->j) { + if (!stateP->i) { + return 0; + } + stateP->i--; + thisBucket--; + stateP->j = thisBucket->count; + } - stateP->j--; - if (thisBucket->count == 1) - return (OSSymbol *) thisBucket->symbolP; - else - return thisBucket->symbolP[stateP->j]; + stateP->j--; + if (thisBucket->count == 1) { + return (OSSymbol *) thisBucket->symbolP; + } else { + return thisBucket->symbolP[stateP->j]; + } } -void OSSymbolPool::reconstructSymbols(void) +void +OSSymbolPool::reconstructSymbols(void) { - this->reconstructSymbols(true); + this->reconstructSymbols(true); } -void OSSymbolPool::reconstructSymbols(bool grow) -{ - unsigned int new_nBuckets = nBuckets; - OSSymbol *insert; - OSSymbolPoolState state; - - if (grow) { - new_nBuckets += new_nBuckets + 1; - } else { - /* Don't shrink the pool below the default initial size. - */ - if (nBuckets <= INITIAL_POOL_SIZE) { - return; - } - new_nBuckets = (new_nBuckets - 1) / 2; - } - - /* Create old pool to iterate after doing above check, cause it - * gets finalized at return. - */ - OSSymbolPool old(this); - - count = 0; - nBuckets = new_nBuckets; - buckets = (Bucket *) kalloc_tag(nBuckets * sizeof(Bucket), VM_KERN_MEMORY_LIBKERN); - OSMETA_ACCUMSIZE(nBuckets * sizeof(Bucket)); - /* @@@ gvdl: Zero test and panic if can't set up pool */ - bzero(buckets, nBuckets * sizeof(Bucket)); - - state = old.initHashState(); - while ( (insert = old.nextHashState(&state)) ) - insertSymbol(insert); +void +OSSymbolPool::reconstructSymbols(bool grow) +{ + unsigned int new_nBuckets = nBuckets; + OSSymbol *insert; + OSSymbolPoolState state; + + if (grow) { + new_nBuckets += new_nBuckets + 1; + } else { + /* Don't shrink the pool below the default initial size. + */ + if (nBuckets <= INITIAL_POOL_SIZE) { + return; + } + new_nBuckets = (new_nBuckets - 1) / 2; + } + + /* Create old pool to iterate after doing above check, cause it + * gets finalized at return. + */ + OSSymbolPool old(this); + + count = 0; + nBuckets = new_nBuckets; + buckets = (Bucket *) kalloc_tag(nBuckets * sizeof(Bucket), VM_KERN_MEMORY_LIBKERN); + OSMETA_ACCUMSIZE(nBuckets * sizeof(Bucket)); + /* @@@ gvdl: Zero test and panic if can't set up pool */ + bzero(buckets, nBuckets * sizeof(Bucket)); + + state = old.initHashState(); + while ((insert = old.nextHashState(&state))) { + insertSymbol(insert); + } } -OSSymbol *OSSymbolPool::findSymbol(const char *cString) const +OSSymbol * +OSSymbolPool::findSymbol(const char *cString) const { - Bucket *thisBucket; - unsigned int j, inLen, hash; - OSSymbol *probeSymbol, **list; + Bucket *thisBucket; + unsigned int j, inLen, hash; + OSSymbol *probeSymbol, **list; - hashSymbol(cString, &hash, &inLen); inLen++; - thisBucket = &buckets[hash % nBuckets]; - j = thisBucket->count; + hashSymbol(cString, &hash, &inLen); inLen++; + thisBucket = &buckets[hash % nBuckets]; + j = thisBucket->count; - if (!j) - return 0; + if (!j) { + return 0; + } - if (j == 1) { - probeSymbol = (OSSymbol *) thisBucket->symbolP; + if (j == 1) { + probeSymbol = (OSSymbol *) thisBucket->symbolP; - if (inLen == probeSymbol->length - && (strncmp(probeSymbol->string, cString, probeSymbol->length) == 0)) - return probeSymbol; - return 0; - } + if (inLen == probeSymbol->length + && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 + && probeSymbol->taggedTryRetain(nullptr)) { + return probeSymbol; + } + return 0; + } - for (list = thisBucket->symbolP; j--; list++) { - probeSymbol = *list; - if (inLen == probeSymbol->length - && (strncmp(probeSymbol->string, cString, probeSymbol->length) == 0)) - return probeSymbol; - } + for (list = thisBucket->symbolP; j--; list++) { + probeSymbol = *list; + if (inLen == probeSymbol->length + && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 + && probeSymbol->taggedTryRetain(nullptr)) { + return probeSymbol; + } + } - return 0; + return 0; } -OSSymbol *OSSymbolPool::insertSymbol(OSSymbol *sym) -{ - const char *cString = sym->string; - Bucket *thisBucket; - unsigned int j, inLen, hash; - OSSymbol *probeSymbol, **list; - - hashSymbol(cString, &hash, &inLen); inLen++; - thisBucket = &buckets[hash % nBuckets]; - j = thisBucket->count; - - if (!j) { - thisBucket->symbolP = (OSSymbol **) sym; - thisBucket->count++; - count++; - return sym; - } - - if (j == 1) { - probeSymbol = (OSSymbol *) thisBucket->symbolP; - - if (inLen == probeSymbol->length - && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0) - return probeSymbol; - - list = (OSSymbol **) kalloc_tag(2 * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); - OSMETA_ACCUMSIZE(2 * sizeof(OSSymbol *)); - /* @@@ gvdl: Zero test and panic if can't set up pool */ - list[0] = sym; - list[1] = probeSymbol; - thisBucket->symbolP = list; - thisBucket->count++; - count++; - GROW_POOL(); - - return sym; - } - - for (list = thisBucket->symbolP; j--; list++) { - probeSymbol = *list; - if (inLen == probeSymbol->length - && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0) - return probeSymbol; - } - - j = thisBucket->count++; - count++; - list = (OSSymbol **) kalloc_tag(thisBucket->count * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); - OSMETA_ACCUMSIZE(thisBucket->count * sizeof(OSSymbol *)); - /* @@@ gvdl: Zero test and panic if can't set up pool */ - list[0] = sym; - bcopy(thisBucket->symbolP, list + 1, j * sizeof(OSSymbol *)); - kfree(thisBucket->symbolP, j * sizeof(OSSymbol *)); - OSMETA_ACCUMSIZE(-(j * sizeof(OSSymbol *))); - thisBucket->symbolP = list; - GROW_POOL(); - - return sym; +OSSymbol * +OSSymbolPool::insertSymbol(OSSymbol *sym) +{ + const char *cString = sym->string; + Bucket *thisBucket; + unsigned int j, inLen, hash; + OSSymbol *probeSymbol, **list; + + hashSymbol(cString, &hash, &inLen); inLen++; + thisBucket = &buckets[hash % nBuckets]; + j = thisBucket->count; + + if (!j) { + thisBucket->symbolP = (OSSymbol **) sym; + thisBucket->count++; + count++; + return nullptr; + } + + if (j == 1) { + probeSymbol = (OSSymbol *) thisBucket->symbolP; + + if (inLen == probeSymbol->length + && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 + && probeSymbol->taggedTryRetain(nullptr)) { + return probeSymbol; + } + + list = (OSSymbol **) kalloc_tag(2 * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); + OSMETA_ACCUMSIZE(2 * sizeof(OSSymbol *)); + /* @@@ gvdl: Zero test and panic if can't set up pool */ + list[0] = sym; + list[1] = probeSymbol; + thisBucket->symbolP = list; + thisBucket->count++; + count++; + GROW_POOL(); + + return nullptr; + } + + for (list = thisBucket->symbolP; j--; list++) { + probeSymbol = *list; + if (inLen == probeSymbol->length + && strncmp(probeSymbol->string, cString, probeSymbol->length) == 0 + && probeSymbol->taggedTryRetain(nullptr)) { + return probeSymbol; + } + } + + j = thisBucket->count++; + count++; + list = (OSSymbol **) kalloc_tag(thisBucket->count * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); + OSMETA_ACCUMSIZE(thisBucket->count * sizeof(OSSymbol *)); + /* @@@ gvdl: Zero test and panic if can't set up pool */ + list[0] = sym; + bcopy(thisBucket->symbolP, list + 1, j * sizeof(OSSymbol *)); + kfree(thisBucket->symbolP, j * sizeof(OSSymbol *)); + OSMETA_ACCUMSIZE(-(j * sizeof(OSSymbol *))); + thisBucket->symbolP = list; + GROW_POOL(); + + return nullptr; } -void OSSymbolPool::removeSymbol(OSSymbol *sym) +void +OSSymbolPool::removeSymbol(OSSymbol *sym) { - Bucket *thisBucket; - unsigned int j, inLen, hash; - OSSymbol *probeSymbol, **list; + Bucket *thisBucket; + unsigned int j, inLen, hash; + OSSymbol *probeSymbol, **list; + + hashSymbol(sym->string, &hash, &inLen); inLen++; + thisBucket = &buckets[hash % nBuckets]; + j = thisBucket->count; + list = thisBucket->symbolP; + + if (!j) { + // couldn't find the symbol; probably means string hash changed + panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); + return; + } - hashSymbol(sym->string, &hash, &inLen); inLen++; - thisBucket = &buckets[hash % nBuckets]; - j = thisBucket->count; - list = thisBucket->symbolP; + if (j == 1) { + probeSymbol = (OSSymbol *) list; + + if (probeSymbol == sym) { + thisBucket->symbolP = 0; + count--; + thisBucket->count--; + SHRINK_POOL(); + return; + } + // couldn't find the symbol; probably means string hash changed + panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); + return; + } - if (!j) { - // couldn't find the symbol; probably means string hash changed - panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); - return; - } - - if (j == 1) { - probeSymbol = (OSSymbol *) list; - - if (probeSymbol == sym) { - thisBucket->symbolP = 0; - count--; - thisBucket->count--; - SHRINK_POOL(); - return; - } - // couldn't find the symbol; probably means string hash changed - panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); - return; - } - - if (j == 2) { - probeSymbol = list[0]; - if (probeSymbol == sym) { - thisBucket->symbolP = (OSSymbol **) list[1]; - kfree(list, 2 * sizeof(OSSymbol *)); - OSMETA_ACCUMSIZE(-(2 * sizeof(OSSymbol *))); - count--; - thisBucket->count--; - SHRINK_POOL(); - return; - } - - probeSymbol = list[1]; - if (probeSymbol == sym) { - thisBucket->symbolP = (OSSymbol **) list[0]; - kfree(list, 2 * sizeof(OSSymbol *)); - OSMETA_ACCUMSIZE(-(2 * sizeof(OSSymbol *))); - count--; - thisBucket->count--; - SHRINK_POOL(); - return; - } + if (j == 2) { + probeSymbol = list[0]; + if (probeSymbol == sym) { + thisBucket->symbolP = (OSSymbol **) list[1]; + kfree(list, 2 * sizeof(OSSymbol *)); + OSMETA_ACCUMSIZE(-(2 * sizeof(OSSymbol *))); + count--; + thisBucket->count--; + SHRINK_POOL(); + return; + } + + probeSymbol = list[1]; + if (probeSymbol == sym) { + thisBucket->symbolP = (OSSymbol **) list[0]; + kfree(list, 2 * sizeof(OSSymbol *)); + OSMETA_ACCUMSIZE(-(2 * sizeof(OSSymbol *))); + count--; + thisBucket->count--; + SHRINK_POOL(); + return; + } + // couldn't find the symbol; probably means string hash changed + panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); + return; + } + + for (; j--; list++) { + probeSymbol = *list; + if (probeSymbol == sym) { + list = (OSSymbol **) + kalloc_tag((thisBucket->count - 1) * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); + OSMETA_ACCUMSIZE((thisBucket->count - 1) * sizeof(OSSymbol *)); + if (thisBucket->count - 1 != j) { + bcopy(thisBucket->symbolP, list, + (thisBucket->count - 1 - j) * sizeof(OSSymbol *)); + } + if (j) { + bcopy(thisBucket->symbolP + thisBucket->count - j, + list + thisBucket->count - 1 - j, + j * sizeof(OSSymbol *)); + } + kfree(thisBucket->symbolP, thisBucket->count * sizeof(OSSymbol *)); + OSMETA_ACCUMSIZE(-(thisBucket->count * sizeof(OSSymbol *))); + thisBucket->symbolP = list; + count--; + thisBucket->count--; + return; + } + } // couldn't find the symbol; probably means string hash changed - panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); - return; - } - - for (; j--; list++) { - probeSymbol = *list; - if (probeSymbol == sym) { - - list = (OSSymbol **) - kalloc_tag((thisBucket->count-1) * sizeof(OSSymbol *), VM_KERN_MEMORY_LIBKERN); - OSMETA_ACCUMSIZE((thisBucket->count-1) * sizeof(OSSymbol *)); - if (thisBucket->count-1 != j) - bcopy(thisBucket->symbolP, list, - (thisBucket->count-1-j) * sizeof(OSSymbol *)); - if (j) - bcopy(thisBucket->symbolP + thisBucket->count-j, - list + thisBucket->count-1-j, - j * sizeof(OSSymbol *)); - kfree(thisBucket->symbolP, thisBucket->count * sizeof(OSSymbol *)); - OSMETA_ACCUMSIZE(-(thisBucket->count * sizeof(OSSymbol *))); - thisBucket->symbolP = list; - count--; - thisBucket->count--; - return; - } - } - // couldn't find the symbol; probably means string hash changed - panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); + panic("removeSymbol %s count %d ", sym->string ? sym->string : "no string", count); } /* @@ -437,7 +503,7 @@ void OSSymbolPool::removeSymbol(OSSymbol *sym) ********************************************************************* */ OSDefineMetaClassAndStructorsWithInit(OSSymbol, OSString, - OSSymbol::initialize()) + OSSymbol::initialize()) OSMetaClassDefineReservedUnused(OSSymbol, 0); OSMetaClassDefineReservedUnused(OSSymbol, 1); OSMetaClassDefineReservedUnused(OSSymbol, 2); @@ -449,148 +515,199 @@ OSMetaClassDefineReservedUnused(OSSymbol, 7); static OSSymbolPool *pool; -void OSSymbol::initialize() +void +OSSymbol::initialize() { - pool = new OSSymbolPool; - assert(pool); + pool = new OSSymbolPool; + assert(pool); - if (pool && !pool->init()) { - delete pool; - assert(false); - }; + if (pool && !pool->init()) { + delete pool; + assert(false); + } + ; } -bool OSSymbol::initWithCStringNoCopy(const char *) { return false; } -bool OSSymbol::initWithCString(const char *) { return false; } -bool OSSymbol::initWithString(const OSString *) { return false; } - -const OSSymbol *OSSymbol::withString(const OSString *aString) -{ - // This string may be a OSSymbol already, cheap check. - if (OSDynamicCast(OSSymbol, aString)) { - aString->retain(); - return (const OSSymbol *) aString; - } - else if (((const OSSymbol *) aString)->flags & kOSStringNoCopy) - return OSSymbol::withCStringNoCopy(aString->getCStringNoCopy()); - else - return OSSymbol::withCString(aString->getCStringNoCopy()); +bool +OSSymbol::initWithCStringNoCopy(const char *) +{ + return false; +} +bool +OSSymbol::initWithCString(const char *) +{ + return false; +} +bool +OSSymbol::initWithString(const OSString *) +{ + return false; +} + +const OSSymbol * +OSSymbol::withString(const OSString *aString) +{ + // This string may be a OSSymbol already, cheap check. + if (OSDynamicCast(OSSymbol, aString)) { + aString->retain(); + return (const OSSymbol *) aString; + } else if (((const OSSymbol *) aString)->flags & kOSStringNoCopy) { + return OSSymbol::withCStringNoCopy(aString->getCStringNoCopy()); + } else { + return OSSymbol::withCString(aString->getCStringNoCopy()); + } +} + +const OSSymbol * +OSSymbol::withCString(const char *cString) +{ + const OSSymbol *symbol; + + // Check if the symbol exists already, we don't need to take a lock here, + // since existingSymbolForCString will take the shared lock. + symbol = OSSymbol::existingSymbolForCString(cString); + if (symbol) { + return symbol; + } + + OSSymbol *newSymb = new OSSymbol; + if (!newSymb) { + return newSymb; + } + + if (newSymb->OSString::initWithCString(cString)) { + pool->closeWriteGate(); + symbol = pool->insertSymbol(newSymb); + pool->openWriteGate(); + + if (symbol) { + // Somebody must have inserted the new symbol so free our copy + newSymb->OSString::free(); + return symbol; + } + } + + return newSymb; // return the newly created & inserted symbol. } -const OSSymbol *OSSymbol::withCString(const char *cString) -{ - pool->closeGate(); - - OSSymbol *oldSymb = pool->findSymbol(cString); - if (!oldSymb) { - OSSymbol *newSymb = new OSSymbol; - if (!newSymb) { - pool->openGate(); - return newSymb; - } - - if (newSymb->OSString::initWithCString(cString)) - oldSymb = pool->insertSymbol(newSymb); - - if (newSymb == oldSymb) { - pool->openGate(); - return newSymb; // return the newly created & inserted symbol. - } - else - // Somebody else inserted the new symbol so free our copy - newSymb->OSString::free(); - } - - if (oldSymb) oldSymb->retain(); // Retain the old symbol before releasing the lock. - - pool->openGate(); - return oldSymb; +const OSSymbol * +OSSymbol::withCStringNoCopy(const char *cString) +{ + const OSSymbol *symbol; + OSSymbol *newSymb; + + // Check if the symbol exists already, we don't need to take a lock here, + // since existingSymbolForCString will take the shared lock. + symbol = OSSymbol::existingSymbolForCString(cString); + if (symbol) { + return symbol; + } + + newSymb = new OSSymbol; + if (!newSymb) { + return newSymb; + } + + if (newSymb->OSString::initWithCStringNoCopy(cString)) { + pool->closeWriteGate(); + symbol = pool->insertSymbol(newSymb); + pool->openWriteGate(); + + if (symbol) { + // Somebody must have inserted the new symbol so free our copy + newSymb->OSString::free(); + return symbol; + } + } + + return newSymb; // return the newly created & inserted symbol. } -const OSSymbol *OSSymbol::withCStringNoCopy(const char *cString) -{ - pool->closeGate(); - - OSSymbol *oldSymb = pool->findSymbol(cString); - if (!oldSymb) { - OSSymbol *newSymb = new OSSymbol; - if (!newSymb) { - pool->openGate(); - return newSymb; - } - - if (newSymb->OSString::initWithCStringNoCopy(cString)) - oldSymb = pool->insertSymbol(newSymb); - - if (newSymb == oldSymb) { - pool->openGate(); - return newSymb; // return the newly created & inserted symbol. - } - else - // Somebody else inserted the new symbol so free our copy - newSymb->OSString::free(); - } - - oldSymb->retain(); // Retain the old symbol before releasing the lock. - - pool->openGate(); - return oldSymb; +const OSSymbol * +OSSymbol::existingSymbolForString(const OSString *aString) +{ + if (OSDynamicCast(OSSymbol, aString)) { + aString->retain(); + return (const OSSymbol *) aString; + } + + return OSSymbol::existingSymbolForCString(aString->getCStringNoCopy()); } -void OSSymbol::checkForPageUnload(void *startAddr, void *endAddr) +const OSSymbol * +OSSymbol::existingSymbolForCString(const char *cString) { - OSSymbol *probeSymbol; - OSSymbolPoolState state; + OSSymbol *symbol; - pool->closeGate(); - state = pool->initHashState(); - while ( (probeSymbol = pool->nextHashState(&state)) ) { - if (probeSymbol->string >= startAddr && probeSymbol->string < endAddr) { - probeSymbol->OSString::initWithCString(probeSymbol->string); - } - } - pool->openGate(); + pool->closeReadGate(); + symbol = pool->findSymbol(cString); + pool->openReadGate(); + + return symbol; } -void OSSymbol::taggedRelease(const void *tag) const +void +OSSymbol::checkForPageUnload(void *startAddr, void *endAddr) { - super::taggedRelease(tag); + OSSymbol *probeSymbol; + OSSymbolPoolState state; + + pool->closeWriteGate(); + state = pool->initHashState(); + while ((probeSymbol = pool->nextHashState(&state))) { + if (probeSymbol->string >= startAddr && probeSymbol->string < endAddr) { + probeSymbol->OSString::initWithCString(probeSymbol->string); + } + } + pool->openWriteGate(); } -void OSSymbol::taggedRelease(const void *tag, const int when) const +void +OSSymbol::taggedRelease(const void *tag) const { - pool->closeGate(); - super::taggedRelease(tag, when); - pool->openGate(); + super::taggedRelease(tag); } -void OSSymbol::free() +void +OSSymbol::taggedRelease(const void *tag, const int when) const { - pool->removeSymbol(this); - super::free(); + super::taggedRelease(tag, when); } -bool OSSymbol::isEqualTo(const char *aCString) const +void +OSSymbol::free() { - return super::isEqualTo(aCString); + pool->closeWriteGate(); + pool->removeSymbol(this); + pool->openWriteGate(); + super::free(); } -bool OSSymbol::isEqualTo(const OSSymbol *aSymbol) const +bool +OSSymbol::isEqualTo(const char *aCString) const { - return aSymbol == this; + return super::isEqualTo(aCString); } -bool OSSymbol::isEqualTo(const OSMetaClassBase *obj) const +bool +OSSymbol::isEqualTo(const OSSymbol *aSymbol) const { - OSSymbol * sym; - OSString * str; + return aSymbol == this; +} - if ((sym = OSDynamicCast(OSSymbol, obj))) - return isEqualTo(sym); - else if ((str = OSDynamicCast(OSString, obj))) - return super::isEqualTo(str); - else - return false; +bool +OSSymbol::isEqualTo(const OSMetaClassBase *obj) const +{ + OSSymbol * sym; + OSString * str; + + if ((sym = OSDynamicCast(OSSymbol, obj))) { + return isEqualTo(sym); + } else if ((str = OSDynamicCast(OSString, obj))) { + return super::isEqualTo(str); + } else { + return false; + } } unsigned int @@ -600,25 +717,22 @@ OSSymbol::bsearch( unsigned int arrayCount, size_t memberSize) { - const void **p; - unsigned int baseIdx = 0; - unsigned int lim; - - for (lim = arrayCount; lim; lim >>= 1) - { - p = (typeof(p)) (((uintptr_t) array) + (baseIdx + (lim >> 1)) * memberSize); - if (key == *p) - { - return (baseIdx + (lim >> 1)); - } - if (key > *p) - { - // move right - baseIdx += (lim >> 1) + 1; - lim--; - } - // else move left - } - // not found, insertion point here - return (baseIdx + (lim >> 1)); + const void **p; + unsigned int baseIdx = 0; + unsigned int lim; + + for (lim = arrayCount; lim; lim >>= 1) { + p = (typeof(p))(((uintptr_t) array) + (baseIdx + (lim >> 1)) * memberSize); + if (key == *p) { + return baseIdx + (lim >> 1); + } + if (key > *p) { + // move right + baseIdx += (lim >> 1) + 1; + lim--; + } + // else move left + } + // not found, insertion point here + return baseIdx + (lim >> 1); } diff --git a/libkern/c++/OSUnserialize.cpp b/libkern/c++/OSUnserialize.cpp index 3d82a99ce..adcccb339 100644 --- a/libkern/c++/OSUnserialize.cpp +++ b/libkern/c++/OSUnserialize.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSUnserialize.y created by rsulack on Nov 21 1998 */ -// "classic" parser for unserializing OSContainer objects +// "classic" parser for unserializing OSContainer objects // // XXX - this code should really be removed! // - the XML format is now prefered @@ -51,47 +51,47 @@ /* A Bison parser, made by GNU Bison 2.3. */ /* Skeleton implementation for Bison's Yacc-like parsers in C - - Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 - Free Software Foundation, Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, - Boston, MA 02110-1301, USA. */ + * + * Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + * Free Software Foundation, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. */ /* As a special exception, you may create a larger work that contains - part or all of the Bison parser skeleton and distribute that work - under terms of your choice, so long as that work isn't itself a - parser generator using the skeleton or a modified version thereof - as a parser skeleton. Alternatively, if you modify or redistribute - the parser skeleton itself, you may (at your option) remove this - special exception, which will cause the skeleton and the resulting - Bison output files to be licensed under the GNU General Public - License without this special exception. - - This special exception was added by the Free Software Foundation in - version 2.2 of Bison. */ + * part or all of the Bison parser skeleton and distribute that work + * under terms of your choice, so long as that work isn't itself a + * parser generator using the skeleton or a modified version thereof + * as a parser skeleton. Alternatively, if you modify or redistribute + * the parser skeleton itself, you may (at your option) remove this + * special exception, which will cause the skeleton and the resulting + * Bison output files to be licensed under the GNU General Public + * License without this special exception. + * + * This special exception was added by the Free Software Foundation in + * version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by - simplifying the original so-called "semantic" parser. */ +* simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid - infringing on user name space. This should be done even for local - variables, as they might otherwise be expanded by user macros. - There are some unavoidable exceptions within include files to - define necessary library symbols; they are noted "INFRINGES ON - USER NAME SPACE" below. */ + * infringing on user name space. This should be done even for local + * variables, as they might otherwise be expanded by user macros. + * There are some unavoidable exceptions within include files to + * define necessary library symbols; they are noted "INFRINGES ON + * USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 @@ -121,15 +121,15 @@ /* Tokens. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - NUMBER = 258, - STRING = 259, - DATA = 260, - BOOLEAN = 261, - SYNTAX_ERROR = 262 - }; +/* Put the tokens into the symbol table, so that GDB and other debuggers + * know about them. */ +enum yytokentype { + NUMBER = 258, + STRING = 259, + DATA = 260, + BOOLEAN = 261, + SYNTAX_ERROR = 262 +}; #endif /* Tokens. */ #define NUMBER 258 @@ -148,16 +148,15 @@ #include #include -typedef struct object { - struct object *next; - struct object *prev; - void *object; - int size; // for data +typedef struct object { + struct object *next; + struct object *prev; + void *object; + int size; // for data union { - void *key; // for dictionary - long long offset; // for offset + void *key; // for dictionary + long long offset; // for offset } u; - } object_t; static int yyerror(const char *s); @@ -181,7 +180,7 @@ static OSObject *retrieveObject(int); static object_t *oo; // resultant object of parsed text -static OSObject *parsedObject; +static OSObject *parsedObject; #define YYSTYPE object_t * @@ -211,7 +210,7 @@ static OSObject *parsedObject; # define YYTOKEN_TABLE 0 #endif -#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED +#if !defined YYSTYPE && !defined YYSTYPE_IS_DECLARED typedef int YYSTYPE; # define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 @@ -239,7 +238,7 @@ typedef unsigned char yytype_uint8; #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #elif (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) typedef signed char yytype_int8; #else typedef short int yytype_int8; @@ -262,8 +261,8 @@ typedef short int yytype_int16; # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t -# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# elif !defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else @@ -286,7 +285,7 @@ typedef short int yytype_int16; #endif /* Suppress unused-variable warnings by "using" E. */ -#if ! defined lint || defined __GNUC__ +#if !defined lint || defined __GNUC__ # define YYUSE(e) ((void) (e)) #else # define YYUSE(e) /* empty */ @@ -297,20 +296,20 @@ typedef short int yytype_int16; # define YYID(n) (n) #else #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static int -YYID (int i) +YYID(int i) #else static int -YYID (i) - int i; + YYID(i) +int i; #endif { - return i; + return i; } #endif -#if ! defined yyoverflow || YYERROR_VERBOSE +#if !defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ @@ -327,8 +326,8 @@ YYID (i) # define alloca _alloca # else # define YYSTACK_ALLOC alloca -# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if !defined _ALLOCA_H && !defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) # include /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 @@ -339,13 +338,13 @@ YYID (i) # endif # ifdef YYSTACK_ALLOC - /* Pacify GCC's `empty if-body' warning. */ -# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) +/* Pacify GCC's `empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */ ; } while (YYID (0)) # ifndef YYSTACK_ALLOC_MAXIMUM - /* The OS might guarantee only one guard page at the bottom of the stack, - and a page size can be as small as 4096 bytes. So we cannot safely - invoke alloca (N) if N exceeds 4096. Use a slightly smaller number - to allow for a few compiler-allocated temporary stack slots. */ +/* The OS might guarantee only one guard page at the bottom of the stack, + * and a page size can be as small as 4096 bytes. So we cannot safely + * invoke alloca (N) if N exceeds 4096. Use a slightly smaller number + * to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else @@ -354,9 +353,9 @@ YYID (i) # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif -# if (defined __cplusplus && ! defined _STDLIB_H \ - && ! ((defined YYMALLOC || defined malloc) \ - && (defined YYFREE || defined free))) +# if (defined __cplusplus && !defined _STDLIB_H \ + && !((defined YYMALLOC || defined malloc) \ + && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 @@ -364,74 +363,73 @@ YYID (i) # endif # ifndef YYMALLOC # define YYMALLOC malloc -# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ +# if !defined malloc && !defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) +void *malloc(YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free -# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -void free (void *); /* INFRINGES ON USER NAME SPACE */ +# if !defined free && !defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) +void free(void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ -#if (! defined yyoverflow \ - && (! defined __cplusplus \ - || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) +#if (!defined yyoverflow \ + && (!defined __cplusplus \ + || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ -union yyalloc -{ - yytype_int16 yyss; - YYSTYPE yyvs; - }; +union yyalloc { + yytype_int16 yyss; + YYSTYPE yyvs; +}; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with - N elements. */ + * N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) /* Copy COUNT objects from FROM to TO. The source and destination do - not overlap. */ + * not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(To, From, Count) \ __builtin_memcpy (To, From, (Count) * sizeof (*(From))) # else -# define YYCOPY(To, From, Count) \ - do \ - { \ - YYSIZE_T yyi; \ - for (yyi = 0; yyi < (Count); yyi++) \ - (To)[yyi] = (From)[yyi]; \ - } \ +# define YYCOPY(To, From, Count) \ + do \ + { \ + YYSIZE_T yyi; \ + for (yyi = 0; yyi < (Count); yyi++) \ + (To)[yyi] = (From)[yyi]; \ + } \ while (YYID (0)) # endif # endif /* Relocate STACK from its old location to the new one. The - local variables YYSIZE and YYSTACKSIZE give the old and new number of - elements in the stack, and YYPTR gives the new location of the - stack. Advance YYPTR to a properly aligned location for the next - stack. */ -# define YYSTACK_RELOCATE(Stack) \ - do \ - { \ - YYSIZE_T yynewbytes; \ - YYCOPY (&yyptr->Stack, Stack, yysize); \ - Stack = &yyptr->Stack; \ + * local variables YYSIZE and YYSTACKSIZE give the old and new number of + * elements in the stack, and YYPTR gives the new location of the + * stack. Advance YYPTR to a properly aligned location for the next + * stack. */ +# define YYSTACK_RELOCATE(Stack) \ + do \ + { \ + YYSIZE_T yynewbytes; \ + YYCOPY (&yyptr->Stack, Stack, yysize); \ + Stack = &yyptr->Stack; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ - yyptr += yynewbytes / sizeof (*yyptr); \ - } \ + yyptr += yynewbytes / sizeof (*yyptr); \ + } \ while (YYID (0)) #endif @@ -454,267 +452,267 @@ union yyalloc #define YYUNDEFTOK 2 #define YYMAXUTOK 262 -#define YYTRANSLATE(YYX) \ +#define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ static const yytype_uint8 yytranslate[] = { - 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 13, 14, 2, 2, 17, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 18, 12, - 2, 11, 2, 2, 8, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 15, 2, 16, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 9, 2, 10, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, - 5, 6, 7 + 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 13, 14, 2, 2, 17, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 18, 12, + 2, 11, 2, 2, 8, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 15, 2, 16, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 9, 2, 10, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, + 5, 6, 7 }; #if YYDEBUG /* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in - YYRHS. */ + * YYRHS. */ static const yytype_uint8 yyprhs[] = { - 0, 0, 3, 4, 6, 8, 10, 12, 14, 16, - 18, 20, 22, 25, 29, 32, 36, 38, 41, 46, - 49, 53, 56, 60, 62, 66, 70, 72, 74 + 0, 0, 3, 4, 6, 8, 10, 12, 14, 16, + 18, 20, 22, 25, 29, 32, 36, 38, 41, 46, + 49, 53, 56, 60, 62, 66, 70, 72, 74 }; /* YYRHS -- A `-1'-separated list of the rules' RHS. */ static const yytype_int8 yyrhs[] = { - 20, 0, -1, -1, 21, -1, 7, -1, 22, -1, - 25, -1, 26, -1, 30, -1, 29, -1, 28, -1, - 31, -1, 8, 3, -1, 21, 8, 3, -1, 9, - 10, -1, 9, 23, 10, -1, 24, -1, 23, 24, - -1, 21, 11, 21, 12, -1, 13, 14, -1, 13, - 27, 14, -1, 15, 16, -1, 15, 27, 16, -1, - 21, -1, 27, 17, 21, -1, 3, 18, 3, -1, - 5, -1, 4, -1, 6, -1 + 20, 0, -1, -1, 21, -1, 7, -1, 22, -1, + 25, -1, 26, -1, 30, -1, 29, -1, 28, -1, + 31, -1, 8, 3, -1, 21, 8, 3, -1, 9, + 10, -1, 9, 23, 10, -1, 24, -1, 23, 24, + -1, 21, 11, 21, 12, -1, 13, 14, -1, 13, + 27, 14, -1, 15, 16, -1, 15, 27, 16, -1, + 21, -1, 27, 17, 21, -1, 3, 18, 3, -1, + 5, -1, 4, -1, 6, -1 }; /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint8 yyrline[] = { - 0, 121, 121, 122, 123, 126, 127, 128, 129, 130, - 131, 132, 133, 142, 150, 151, 154, 155, 158, 168, - 169, 172, 173, 176, 181, 192, 200, 205, 210 + 0, 121, 121, 122, 123, 126, 127, 128, 129, 130, + 131, 132, 133, 142, 150, 151, 154, 155, 158, 168, + 169, 172, 173, 176, 181, 192, 200, 205, 210 }; #endif #if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. - First, the terminals, then, starting at YYNTOKENS, nonterminals. */ + * First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { - "$end", "error", "$undefined", "NUMBER", "STRING", "DATA", "BOOLEAN", - "SYNTAX_ERROR", "'@'", "'{'", "'}'", "'='", "';'", "'('", "')'", "'['", - "']'", "','", "':'", "$accept", "input", "object", "dict", "pairs", - "pair", "array", "set", "elements", "offset", "data", "string", - "boolean", 0 + "$end", "error", "$undefined", "NUMBER", "STRING", "DATA", "BOOLEAN", + "SYNTAX_ERROR", "'@'", "'{'", "'}'", "'='", "';'", "'('", "')'", "'['", + "']'", "','", "':'", "$accept", "input", "object", "dict", "pairs", + "pair", "array", "set", "elements", "offset", "data", "string", + "boolean", 0 }; #endif # ifdef YYPRINT /* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to - token YYLEX-NUM. */ + * token YYLEX-NUM. */ static const yytype_uint16 yytoknum[] = { - 0, 256, 257, 258, 259, 260, 261, 262, 64, 123, - 125, 61, 59, 40, 41, 91, 93, 44, 58 + 0, 256, 257, 258, 259, 260, 261, 262, 64, 123, + 125, 61, 59, 40, 41, 91, 93, 44, 58 }; # endif /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { - 0, 19, 20, 20, 20, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 22, 22, 23, 23, 24, 25, - 25, 26, 26, 27, 27, 28, 29, 30, 31 + 0, 19, 20, 20, 20, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 22, 22, 23, 23, 24, 25, + 25, 26, 26, 27, 27, 28, 29, 30, 31 }; /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { - 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 2, 3, 2, 3, 1, 2, 4, 2, - 3, 2, 3, 1, 3, 3, 1, 1, 1 + 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 3, 2, 3, 1, 2, 4, 2, + 3, 2, 3, 1, 3, 3, 1, 1, 1 }; /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state - STATE-NUM when YYTABLE doesn't specify something else to do. Zero - means the default is an error. */ + * STATE-NUM when YYTABLE doesn't specify something else to do. Zero + * means the default is an error. */ static const yytype_uint8 yydefact[] = { - 2, 0, 27, 26, 28, 4, 0, 0, 0, 0, - 0, 3, 5, 6, 7, 10, 9, 8, 11, 0, - 12, 14, 0, 0, 16, 19, 23, 0, 21, 0, - 1, 0, 25, 0, 15, 17, 20, 0, 22, 13, - 0, 24, 18 + 2, 0, 27, 26, 28, 4, 0, 0, 0, 0, + 0, 3, 5, 6, 7, 10, 9, 8, 11, 0, + 12, 14, 0, 0, 16, 19, 23, 0, 21, 0, + 1, 0, 25, 0, 15, 17, 20, 0, 22, 13, + 0, 24, 18 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int8 yydefgoto[] = { - -1, 10, 22, 12, 23, 24, 13, 14, 27, 15, - 16, 17, 18 + -1, 10, 22, 12, 23, 24, 13, 14, 27, 15, + 16, 17, 18 }; /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing - STATE-NUM. */ + * STATE-NUM. */ #define YYPACT_NINF -14 static const yytype_int8 yypact[] = { - 12, -13, -14, -14, -14, -14, 9, 26, 39, -2, - 10, 20, -14, -14, -14, -14, -14, -14, -14, 35, - -14, -14, 38, 52, -14, -14, 20, 49, -14, 7, - -14, 37, -14, 65, -14, -14, -14, 65, -14, -14, - 14, 20, -14 + 12, -13, -14, -14, -14, -14, 9, 26, 39, -2, + 10, 20, -14, -14, -14, -14, -14, -14, -14, 35, + -14, -14, 38, 52, -14, -14, 20, 49, -14, 7, + -14, 37, -14, 65, -14, -14, -14, 65, -14, -14, + 14, 20, -14 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int8 yypgoto[] = { - -14, -14, 0, -14, -14, 27, -14, -14, 42, -14, - -14, -14, -14 + -14, -14, 0, -14, -14, 27, -14, -14, 42, -14, + -14, -14, -14 }; /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If - positive, shift that token. If negative, reduce the rule which - number is the opposite. If zero, do what YYDEFACT says. - If YYTABLE_NINF, syntax error. */ + * positive, shift that token. If negative, reduce the rule which + * number is the opposite. If zero, do what YYDEFACT says. + * If YYTABLE_NINF, syntax error. */ #define YYTABLE_NINF -1 static const yytype_uint8 yytable[] = { - 11, 1, 2, 3, 4, 19, 6, 7, 26, 26, - 30, 8, 20, 9, 28, 1, 2, 3, 4, 5, - 6, 7, 31, 38, 37, 8, 42, 9, 31, 1, - 2, 3, 4, 40, 6, 7, 21, 41, 32, 8, - 39, 9, 1, 2, 3, 4, 31, 6, 7, 33, - 35, 29, 8, 25, 9, 1, 2, 3, 4, 0, - 6, 7, 34, 36, 0, 8, 37, 9, 1, 2, - 3, 4, 0, 6, 7, 0, 0, 0, 8, 0, - 9 + 11, 1, 2, 3, 4, 19, 6, 7, 26, 26, + 30, 8, 20, 9, 28, 1, 2, 3, 4, 5, + 6, 7, 31, 38, 37, 8, 42, 9, 31, 1, + 2, 3, 4, 40, 6, 7, 21, 41, 32, 8, + 39, 9, 1, 2, 3, 4, 31, 6, 7, 33, + 35, 29, 8, 25, 9, 1, 2, 3, 4, 0, + 6, 7, 34, 36, 0, 8, 37, 9, 1, 2, + 3, 4, 0, 6, 7, 0, 0, 0, 8, 0, + 9 }; static const yytype_int8 yycheck[] = { - 0, 3, 4, 5, 6, 18, 8, 9, 8, 9, - 0, 13, 3, 15, 16, 3, 4, 5, 6, 7, - 8, 9, 8, 16, 17, 13, 12, 15, 8, 3, - 4, 5, 6, 33, 8, 9, 10, 37, 3, 13, - 3, 15, 3, 4, 5, 6, 8, 8, 9, 11, - 23, 9, 13, 14, 15, 3, 4, 5, 6, -1, - 8, 9, 10, 14, -1, 13, 17, 15, 3, 4, - 5, 6, -1, 8, 9, -1, -1, -1, 13, -1, - 15 + 0, 3, 4, 5, 6, 18, 8, 9, 8, 9, + 0, 13, 3, 15, 16, 3, 4, 5, 6, 7, + 8, 9, 8, 16, 17, 13, 12, 15, 8, 3, + 4, 5, 6, 33, 8, 9, 10, 37, 3, 13, + 3, 15, 3, 4, 5, 6, 8, 8, 9, 11, + 23, 9, 13, 14, 15, 3, 4, 5, 6, -1, + 8, 9, 10, 14, -1, 13, 17, 15, 3, 4, + 5, 6, -1, 8, 9, -1, -1, -1, 13, -1, + 15 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing - symbol of state STATE-NUM. */ + * symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { - 0, 3, 4, 5, 6, 7, 8, 9, 13, 15, - 20, 21, 22, 25, 26, 28, 29, 30, 31, 18, - 3, 10, 21, 23, 24, 14, 21, 27, 16, 27, - 0, 8, 3, 11, 10, 24, 14, 17, 16, 3, - 21, 21, 12 + 0, 3, 4, 5, 6, 7, 8, 9, 13, 15, + 20, 21, 22, 25, 26, 28, 29, 30, 31, 18, + 3, 10, 21, 23, 24, 14, 21, 27, 16, 27, + 0, 8, 3, 11, 10, 24, 14, 17, 16, 3, + 21, 21, 12 }; -#define yyerrok (yyerrstatus = 0) -#define yyclearin (yychar = YYEMPTY) -#define YYEMPTY (-2) -#define YYEOF 0 +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY (-2) +#define YYEOF 0 -#define YYACCEPT goto yyacceptlab -#define YYABORT goto yyabortlab -#define YYERROR goto yyerrorlab +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab /* Like YYERROR except do call yyerror. This remains here temporarily - to ease the transition to the new meaning of YYERROR, for GCC. - Once GCC version 2 has supplanted version 1, this can go. */ + * to ease the transition to the new meaning of YYERROR, for GCC. + * Once GCC version 2 has supplanted version 1, this can go. */ -#define YYFAIL goto yyerrlab +#define YYFAIL goto yyerrlab #define YYRECOVERING() (!!yyerrstatus) -#define YYBACKUP(Token, Value) \ -do \ - if (yychar == YYEMPTY && yylen == 1) \ - { \ - yychar = (Token); \ - yylval = (Value); \ - yytoken = YYTRANSLATE (yychar); \ - YYPOPSTACK (1); \ - goto yybackup; \ - } \ - else \ - { \ +#define YYBACKUP(Token, Value) \ +do \ + if (yychar == YYEMPTY && yylen == 1) \ + { \ + yychar = (Token); \ + yylval = (Value); \ + yytoken = YYTRANSLATE (yychar); \ + YYPOPSTACK (1); \ + goto yybackup; \ + } \ + else \ + { \ yyerror (YY_("syntax error: cannot back up")); \ - YYERROR; \ - } \ + YYERROR; \ + } \ while (YYID (0)) -#define YYTERROR 1 -#define YYERRCODE 256 +#define YYTERROR 1 +#define YYERRCODE 256 /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. - If N is 0, then set CURRENT to the empty location which ends - the previous symbol: RHS[0] (always defined). */ + * If N is 0, then set CURRENT to the empty location which ends + * the previous symbol: RHS[0] (always defined). */ #define YYRHSLOC(Rhs, K) ((Rhs)[K]) #ifndef YYLLOC_DEFAULT -# define YYLLOC_DEFAULT(Current, Rhs, N) \ - do \ +# define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ if (YYID (N)) \ - { \ - (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ - (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ - (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ - (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ - } \ - else \ - { \ - (Current).first_line = (Current).last_line = \ - YYRHSLOC (Rhs, 0).last_line; \ - (Current).first_column = (Current).last_column = \ - YYRHSLOC (Rhs, 0).last_column; \ - } \ + { \ + (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ + (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ + (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ + (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ + } \ + else \ + { \ + (Current).first_line = (Current).last_line = \ + YYRHSLOC (Rhs, 0).last_line; \ + (Current).first_column = (Current).last_column = \ + YYRHSLOC (Rhs, 0).last_column; \ + } \ while (YYID (0)) #endif /* YY_LOCATION_PRINT -- Print the location on the stream. - This macro was not mandated originally: define only if we know - we won't break user code: when these are the locations we know. */ + * This macro was not mandated originally: define only if we know + * we won't break user code: when these are the locations we know. */ #ifndef YY_LOCATION_PRINT # if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL -# define YY_LOCATION_PRINT(File, Loc) \ - fprintf (File, "%d.%d-%d.%d", \ - (Loc).first_line, (Loc).first_column, \ +# define YY_LOCATION_PRINT(File, Loc) \ + fprintf (File, "%d.%d-%d.%d", \ + (Loc).first_line, (Loc).first_column, \ (Loc).last_line, (Loc).last_column) # else # define YY_LOCATION_PRINT(File, Loc) ((void) 0) @@ -738,150 +736,152 @@ while (YYID (0)) # define YYFPRINTF fprintf # endif -# define YYDPRINTF(Args) \ -do { \ - if (yydebug) \ - YYFPRINTF Args; \ +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ } while (YYID (0)) -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ -do { \ - if (yydebug) \ - { \ - YYFPRINTF (stderr, "%s ", Title); \ - yy_symbol_print (stderr, \ - Type, Value); \ - YYFPRINTF (stderr, "\n"); \ - } \ +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Type, Value); \ + YYFPRINTF (stderr, "\n"); \ + } \ } while (YYID (0)) /*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ + | Print this symbol on YYOUTPUT. | + | `--------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) +yy_symbol_value_print(FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void -yy_symbol_value_print (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; + yy_symbol_value_print(yyoutput, yytype, yyvaluep) +FILE *yyoutput; +int yytype; +YYSTYPE const * const yyvaluep; #endif { - if (!yyvaluep) - return; + if (!yyvaluep) { + return; + } # ifdef YYPRINT - if (yytype < YYNTOKENS) - YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); + if (yytype < YYNTOKENS) { + YYPRINT(yyoutput, yytoknum[yytype], *yyvaluep); + } # else - YYUSE (yyoutput); + YYUSE(yyoutput); # endif - switch (yytype) - { - default: - break; - } + switch (yytype) { + default: + break; + } } /*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ + | Print this symbol on YYOUTPUT. | + | `--------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) +yy_symbol_print(FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void -yy_symbol_print (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; + yy_symbol_print(yyoutput, yytype, yyvaluep) +FILE *yyoutput; +int yytype; +YYSTYPE const * const yyvaluep; #endif { - if (yytype < YYNTOKENS) - YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); - else - YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); + if (yytype < YYNTOKENS) { + YYFPRINTF(yyoutput, "token %s (", yytname[yytype]); + } else { + YYFPRINTF(yyoutput, "nterm %s (", yytname[yytype]); + } - yy_symbol_value_print (yyoutput, yytype, yyvaluep); - YYFPRINTF (yyoutput, ")"); + yy_symbol_value_print(yyoutput, yytype, yyvaluep); + YYFPRINTF(yyoutput, ")"); } /*------------------------------------------------------------------. -| yy_stack_print -- Print the state stack from its BOTTOM up to its | -| TOP (included). | -`------------------------------------------------------------------*/ + | yy_stack_print -- Print the state stack from its BOTTOM up to its | + | TOP (included). | + | `------------------------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_stack_print (yytype_int16 *bottom, yytype_int16 *top) +yy_stack_print(yytype_int16 *bottom, yytype_int16 *top) #else static void -yy_stack_print (bottom, top) - yytype_int16 *bottom; - yytype_int16 *top; + yy_stack_print(bottom, top) +yytype_int16 *bottom; +yytype_int16 *top; #endif { - YYFPRINTF (stderr, "Stack now"); - for (; bottom <= top; ++bottom) - YYFPRINTF (stderr, " %d", *bottom); - YYFPRINTF (stderr, "\n"); + YYFPRINTF(stderr, "Stack now"); + for (; bottom <= top; ++bottom) { + YYFPRINTF(stderr, " %d", *bottom); + } + YYFPRINTF(stderr, "\n"); } -# define YY_STACK_PRINT(Bottom, Top) \ -do { \ - if (yydebug) \ - yy_stack_print ((Bottom), (Top)); \ +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ } while (YYID (0)) /*------------------------------------------------. -| Report that the YYRULE is going to be reduced. | -`------------------------------------------------*/ + | Report that the YYRULE is going to be reduced. | + | `------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_reduce_print (YYSTYPE *yyvsp, int yyrule) +yy_reduce_print(YYSTYPE *yyvsp, int yyrule) #else static void -yy_reduce_print (yyvsp, yyrule) - YYSTYPE *yyvsp; - int yyrule; + yy_reduce_print(yyvsp, yyrule) +YYSTYPE *yyvsp; +int yyrule; #endif { - int yynrhs = yyr2[yyrule]; - int yyi; - unsigned long int yylno = yyrline[yyrule]; - YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", - yyrule - 1, yylno); - /* The symbols being reduced. */ - for (yyi = 0; yyi < yynrhs; yyi++) - { - fprintf (stderr, " $%d = ", yyi + 1); - yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], - &(yyvsp[(yyi + 1) - (yynrhs)]) - ); - fprintf (stderr, "\n"); - } + int yynrhs = yyr2[yyrule]; + int yyi; + unsigned long int yylno = yyrline[yyrule]; + YYFPRINTF(stderr, "Reducing stack by rule %d (line %lu):\n", + yyrule - 1, yylno); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) { + fprintf(stderr, " $%d = ", yyi + 1); + yy_symbol_print(stderr, yyrhs[yyprhs[yyrule] + yyi], + &(yyvsp[(yyi + 1) - (yynrhs)]) + ); + fprintf(stderr, "\n"); + } } -# define YY_REDUCE_PRINT(Rule) \ -do { \ - if (yydebug) \ +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ yy_reduce_print (yyvsp, Rule); \ } while (YYID (0)) /* Nonzero means print parse trace. It is left uninitialized so that - multiple parsers can coexist. */ + * multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) @@ -892,22 +892,22 @@ int yydebug; /* YYINITDEPTH -- initial size of the parser's stacks. */ -#ifndef YYINITDEPTH +#ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only - if the built-in stack extension method is used). - - Do not make this value too large; the results are undefined if - YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) - evaluated with infinite-precision integer arithmetic. */ + * if the built-in stack extension method is used). + * + * Do not make this value too large; the results are undefined if + * YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) + * evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif - + #if YYERROR_VERBOSE @@ -917,19 +917,20 @@ int yydebug; # else /* Return the length of YYSTR. */ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static YYSIZE_T -yystrlen (const char *yystr) +yystrlen(const char *yystr) #else static YYSIZE_T -yystrlen (yystr) - const char *yystr; + yystrlen(yystr) +const char *yystr; #endif { - YYSIZE_T yylen; - for (yylen = 0; yystr[yylen]; yylen++) - continue; - return yylen; + YYSIZE_T yylen; + for (yylen = 0; yystr[yylen]; yylen++) { + continue; + } + return yylen; } # endif # endif @@ -939,232 +940,229 @@ yystrlen (yystr) # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in - YYDEST. */ + * YYDEST. */ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static char * -yystpcpy (char *yydest, const char *yysrc) +yystpcpy(char *yydest, const char *yysrc) #else static char * -yystpcpy (yydest, yysrc) - char *yydest; - const char *yysrc; +yystpcpy(yydest, yysrc) +char *yydest; +const char *yysrc; #endif { - char *yyd = yydest; - const char *yys = yysrc; + char *yyd = yydest; + const char *yys = yysrc; - while ((*yyd++ = *yys++) != '\0') - continue; + while ((*yyd++ = *yys++) != '\0') { + continue; + } - return yyd - 1; + return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary - quotes and backslashes, so that it's suitable for yyerror. The - heuristic is that double-quoting is unnecessary unless the string - contains an apostrophe, a comma, or backslash (other than - backslash-backslash). YYSTR is taken from yytname. If YYRES is - null, do not copy; instead, return the length of what the result - would have been. */ + * quotes and backslashes, so that it's suitable for yyerror. The + * heuristic is that double-quoting is unnecessary unless the string + * contains an apostrophe, a comma, or backslash (other than + * backslash-backslash). YYSTR is taken from yytname. If YYRES is + * null, do not copy; instead, return the length of what the result + * would have been. */ static YYSIZE_T -yytnamerr (char *yyres, const char *yystr) +yytnamerr(char *yyres, const char *yystr) { - if (*yystr == '"') - { - YYSIZE_T yyn = 0; - char const *yyp = yystr; - - for (;;) - switch (*++yyp) - { - case '\'': - case ',': - goto do_not_strip_quotes; - - case '\\': - if (*++yyp != '\\') - goto do_not_strip_quotes; - /* Fall through. */ - default: - if (yyres) - yyres[yyn] = *yyp; - yyn++; - break; - - case '"': - if (yyres) - yyres[yyn] = '\0'; - return yyn; - } - do_not_strip_quotes: ; - } - - if (! yyres) - return yystrlen (yystr); - - return yystpcpy (yyres, yystr) - yyres; + if (*yystr == '"') { + YYSIZE_T yyn = 0; + char const *yyp = yystr; + + for (;;) { + switch (*++yyp) { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') { + goto do_not_strip_quotes; + } + /* Fall through. */ + default: + if (yyres) { + yyres[yyn] = *yyp; + } + yyn++; + break; + + case '"': + if (yyres) { + yyres[yyn] = '\0'; + } + return yyn; + } + } +do_not_strip_quotes:; + } + + if (!yyres) { + return yystrlen(yystr); + } + + return yystpcpy(yyres, yystr) - yyres; } # endif /* Copy into YYRESULT an error message about the unexpected token - YYCHAR while in state YYSTATE. Return the number of bytes copied, - including the terminating null byte. If YYRESULT is null, do not - copy anything; just return the number of bytes that would be - copied. As a special case, return 0 if an ordinary "syntax error" - message will do. Return YYSIZE_MAXIMUM if overflow occurs during - size calculation. */ + * YYCHAR while in state YYSTATE. Return the number of bytes copied, + * including the terminating null byte. If YYRESULT is null, do not + * copy anything; just return the number of bytes that would be + * copied. As a special case, return 0 if an ordinary "syntax error" + * message will do. Return YYSIZE_MAXIMUM if overflow occurs during + * size calculation. */ static YYSIZE_T -yysyntax_error (char *yyresult, int yystate, int yychar) +yysyntax_error(char *yyresult, int yystate, int yychar) { - int yyn = yypact[yystate]; - - if (! (YYPACT_NINF < yyn && yyn <= YYLAST)) - return 0; - else - { - int yytype = YYTRANSLATE (yychar); - YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]); - YYSIZE_T yysize = yysize0; - YYSIZE_T yysize1; - int yysize_overflow = 0; - enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; - char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; - int yyx; + int yyn = yypact[yystate]; + + if (!(YYPACT_NINF < yyn && yyn <= YYLAST)) { + return 0; + } else { + int yytype = YYTRANSLATE(yychar); + YYSIZE_T yysize0 = yytnamerr(0, yytname[yytype]); + YYSIZE_T yysize = yysize0; + YYSIZE_T yysize1; + int yysize_overflow = 0; + enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + int yyx; # if 0 - /* This is so xgettext sees the translatable formats that are - constructed on the fly. */ - YY_("syntax error, unexpected %s"); - YY_("syntax error, unexpected %s, expecting %s"); - YY_("syntax error, unexpected %s, expecting %s or %s"); - YY_("syntax error, unexpected %s, expecting %s or %s or %s"); - YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); + /* This is so xgettext sees the translatable formats that are + * constructed on the fly. */ + YY_("syntax error, unexpected %s"); + YY_("syntax error, unexpected %s, expecting %s"); + YY_("syntax error, unexpected %s, expecting %s or %s"); + YY_("syntax error, unexpected %s, expecting %s or %s or %s"); + YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); # endif - char *yyfmt; - char const *yyf; - static char const yyunexpected[] = "syntax error, unexpected %s"; - static char const yyexpecting[] = ", expecting %s"; - static char const yyor[] = " or %s"; - char yyformat[sizeof yyunexpected - + sizeof yyexpecting - 1 - + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) - * (sizeof yyor - 1))]; - char const *yyprefix = yyexpecting; - - /* Start YYX at -YYN if negative to avoid negative indexes in - YYCHECK. */ - int yyxbegin = yyn < 0 ? -yyn : 0; - - /* Stay within bounds of both yycheck and yytname. */ - int yychecklim = YYLAST - yyn + 1; - int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; - int yycount = 1; - - yyarg[0] = yytname[yytype]; - yyfmt = yystpcpy (yyformat, yyunexpected); - - for (yyx = yyxbegin; yyx < yyxend; ++yyx) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) - { - if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) - { - yycount = 1; - yysize = yysize0; - yyformat[sizeof yyunexpected - 1] = '\0'; - break; - } - yyarg[yycount++] = yytname[yyx]; - yysize1 = yysize + yytnamerr (0, yytname[yyx]); - yysize_overflow |= (yysize1 < yysize); - yysize = yysize1; - yyfmt = yystpcpy (yyfmt, yyprefix); - yyprefix = yyor; - } - - yyf = YY_(yyformat); - yysize1 = yysize + yystrlen (yyf); - yysize_overflow |= (yysize1 < yysize); - yysize = yysize1; - - if (yysize_overflow) - return YYSIZE_MAXIMUM; - - if (yyresult) - { - /* Avoid sprintf, as that infringes on the user's name space. - Don't have undefined behavior even if the translation - produced a string with the wrong number of "%s"s. */ - char *yyp = yyresult; - int yyi = 0; - while ((*yyp = *yyf) != '\0') - { - if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) - { - yyp += yytnamerr (yyp, yyarg[yyi++]); - yyf += 2; + char *yyfmt; + char const *yyf; + static char const yyunexpected[] = "syntax error, unexpected %s"; + static char const yyexpecting[] = ", expecting %s"; + static char const yyor[] = " or %s"; + char yyformat[sizeof yyunexpected + + sizeof yyexpecting - 1 + + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) + * (sizeof yyor - 1))]; + char const *yyprefix = yyexpecting; + + /* Start YYX at -YYN if negative to avoid negative indexes in + * YYCHECK. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yycount = 1; + + yyarg[0] = yytname[yytype]; + yyfmt = yystpcpy(yyformat, yyunexpected); + + for (yyx = yyxbegin; yyx < yyxend; ++yyx) { + if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) { + if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { + yycount = 1; + yysize = yysize0; + yyformat[sizeof yyunexpected - 1] = '\0'; + break; + } + yyarg[yycount++] = yytname[yyx]; + yysize1 = yysize + yytnamerr(0, yytname[yyx]); + yysize_overflow |= (yysize1 < yysize); + yysize = yysize1; + yyfmt = yystpcpy(yyfmt, yyprefix); + yyprefix = yyor; + } } - else - { - yyp++; - yyf++; + + yyf = YY_(yyformat); + yysize1 = yysize + yystrlen(yyf); + yysize_overflow |= (yysize1 < yysize); + yysize = yysize1; + + if (yysize_overflow) { + return YYSIZE_MAXIMUM; } - } + + if (yyresult) { + /* Avoid sprintf, as that infringes on the user's name space. + * Don't have undefined behavior even if the translation + * produced a string with the wrong number of "%s"s. */ + char *yyp = yyresult; + int yyi = 0; + while ((*yyp = *yyf) != '\0') { + if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) { + yyp += yytnamerr(yyp, yyarg[yyi++]); + yyf += 2; + } else { + yyp++; + yyf++; + } + } + } + return yysize; } - return yysize; - } } #endif /* YYERROR_VERBOSE */ - + /*-----------------------------------------------. -| Release the memory associated to this symbol. | -`-----------------------------------------------*/ + | Release the memory associated to this symbol. | + | `-----------------------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) +yydestruct(const char *yymsg, int yytype, YYSTYPE *yyvaluep) #else static void -yydestruct (yymsg, yytype, yyvaluep) - const char *yymsg; - int yytype; - YYSTYPE *yyvaluep; + yydestruct(yymsg, yytype, yyvaluep) +const char *yymsg; +int yytype; +YYSTYPE *yyvaluep; #endif { - YYUSE (yyvaluep); - - if (!yymsg) - yymsg = "Deleting"; - YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); + YYUSE(yyvaluep); - switch (yytype) - { + if (!yymsg) { + yymsg = "Deleting"; + } + YY_SYMBOL_PRINT(yymsg, yytype, yyvaluep, yylocationp); - default: - break; - } + switch (yytype) { + default: + break; + } } - + /* Prevent warnings from -Wmissing-prototypes. */ #ifdef YYPARSE_PARAM #if defined __STDC__ || defined __cplusplus -int yyparse (void *YYPARSE_PARAM); +int yyparse(void *YYPARSE_PARAM); #else -int yyparse (); +int yyparse(); #endif #else /* ! YYPARSE_PARAM */ #if defined __STDC__ || defined __cplusplus -int yyparse (void); +int yyparse(void); #else -int yyparse (); +int yyparse(); #endif #endif /* ! YYPARSE_PARAM */ @@ -1182,652 +1180,654 @@ int yynerrs; /*----------. -| yyparse. | -`----------*/ + | yyparse. | + | `----------*/ #ifdef YYPARSE_PARAM #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) int -yyparse (void *YYPARSE_PARAM) +yyparse(void *YYPARSE_PARAM) #else int -yyparse (YYPARSE_PARAM) - void *YYPARSE_PARAM; + yyparse(YYPARSE_PARAM) +void *YYPARSE_PARAM; #endif #else /* ! YYPARSE_PARAM */ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) int -yyparse (void) +yyparse(void) #else int -yyparse () +yyparse() #endif #endif { - - int yystate; - int yyn; - int yyresult; - /* Number of tokens to shift before error messages enabled. */ - int yyerrstatus; - /* Look-ahead token as an internal (translated) token number. */ - int yytoken = 0; + int yystate; + int yyn; + int yyresult; + /* Number of tokens to shift before error messages enabled. */ + int yyerrstatus; + /* Look-ahead token as an internal (translated) token number. */ + int yytoken = 0; #if YYERROR_VERBOSE - /* Buffer for error messages, and its allocated size. */ - char yymsgbuf[128]; - char *yymsg = yymsgbuf; - YYSIZE_T yymsg_alloc = sizeof yymsgbuf; + /* Buffer for error messages, and its allocated size. */ + char yymsgbuf[128]; + char *yymsg = yymsgbuf; + YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif - /* Three stacks and their tools: - `yyss': related to states, - `yyvs': related to semantic values, - `yyls': related to locations. + /* Three stacks and their tools: + * `yyss': related to states, + * `yyvs': related to semantic values, + * `yyls': related to locations. + * + * Refer to the stacks thru separate pointers, to allow yyoverflow + * to reallocate them elsewhere. */ - Refer to the stacks thru separate pointers, to allow yyoverflow - to reallocate them elsewhere. */ + /* The state stack. */ + yytype_int16 yyssa[YYINITDEPTH]; + yytype_int16 *yyss = yyssa; + yytype_int16 *yyssp; - /* The state stack. */ - yytype_int16 yyssa[YYINITDEPTH]; - yytype_int16 *yyss = yyssa; - yytype_int16 *yyssp; - - /* The semantic value stack. */ - YYSTYPE yyvsa[YYINITDEPTH]; - YYSTYPE *yyvs = yyvsa; - YYSTYPE *yyvsp; + /* The semantic value stack. */ + YYSTYPE yyvsa[YYINITDEPTH]; + YYSTYPE *yyvs = yyvsa; + YYSTYPE *yyvsp; #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) - YYSIZE_T yystacksize = YYINITDEPTH; + YYSIZE_T yystacksize = YYINITDEPTH; - /* The variables used to return semantic value and location from the - action routines. */ - YYSTYPE yyval; + /* The variables used to return semantic value and location from the + * action routines. */ + YYSTYPE yyval; - /* The number of symbols on the RHS of the reduced rule. - Keep to zero when no symbol should be popped. */ - int yylen = 0; + /* The number of symbols on the RHS of the reduced rule. + * Keep to zero when no symbol should be popped. */ + int yylen = 0; - YYDPRINTF ((stderr, "Starting parse\n")); + YYDPRINTF((stderr, "Starting parse\n")); - yystate = 0; - yyerrstatus = 0; - yynerrs = 0; - yychar = YYEMPTY; /* Cause a token to be read. */ + yystate = 0; + yyerrstatus = 0; + yynerrs = 0; + yychar = YYEMPTY; /* Cause a token to be read. */ - /* Initialize stack pointers. - Waste one element of value and location stack - so that they stay on the same level as the state stack. - The wasted elements are never initialized. */ + /* Initialize stack pointers. + * Waste one element of value and location stack + * so that they stay on the same level as the state stack. + * The wasted elements are never initialized. */ - yyssp = yyss; - yyvsp = yyvs; + yyssp = yyss; + yyvsp = yyvs; - goto yysetstate; + goto yysetstate; /*------------------------------------------------------------. -| yynewstate -- Push a new state, which is found in yystate. | -`------------------------------------------------------------*/ - yynewstate: - /* In all cases, when you get here, the value and location stacks - have just been pushed. So pushing a state here evens the stacks. */ - yyssp++; + | yynewstate -- Push a new state, which is found in yystate. | + | `------------------------------------------------------------*/ +yynewstate: + /* In all cases, when you get here, the value and location stacks + * have just been pushed. So pushing a state here evens the stacks. */ + yyssp++; - yysetstate: - *yyssp = yystate; +yysetstate: + *yyssp = yystate; - if (yyss + yystacksize - 1 <= yyssp) - { - /* Get the current used size of the three stacks, in elements. */ - YYSIZE_T yysize = yyssp - yyss + 1; + if (yyss + yystacksize - 1 <= yyssp) { + /* Get the current used size of the three stacks, in elements. */ + YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow - { - /* Give user a chance to reallocate the stack. Use copies of - these so that the &'s don't force the real ones into - memory. */ - YYSTYPE *yyvs1 = yyvs; - yytype_int16 *yyss1 = yyss; - - - /* Each stack pointer address is followed by the size of the - data in use in that stack, in bytes. This used to be a - conditional around just the two extra args, but that might - be undefined if yyoverflow is a macro. */ - yyoverflow (YY_("memory exhausted"), - &yyss1, yysize * sizeof (*yyssp), - &yyvs1, yysize * sizeof (*yyvsp), - - &yystacksize); - - yyss = yyss1; - yyvs = yyvs1; - } + { + /* Give user a chance to reallocate the stack. Use copies of + * these so that the &'s don't force the real ones into + * memory. */ + YYSTYPE *yyvs1 = yyvs; + yytype_int16 *yyss1 = yyss; + + + /* Each stack pointer address is followed by the size of the + * data in use in that stack, in bytes. This used to be a + * conditional around just the two extra args, but that might + * be undefined if yyoverflow is a macro. */ + yyoverflow(YY_("memory exhausted"), + &yyss1, yysize * sizeof(*yyssp), + &yyvs1, yysize * sizeof(*yyvsp), + + &yystacksize); + + yyss = yyss1; + yyvs = yyvs1; + } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE - goto yyexhaustedlab; + goto yyexhaustedlab; # else - /* Extend the stack our own way. */ - if (YYMAXDEPTH <= yystacksize) - goto yyexhaustedlab; - yystacksize *= 2; - if (YYMAXDEPTH < yystacksize) - yystacksize = YYMAXDEPTH; - - { - yytype_int16 *yyss1 = yyss; - union yyalloc *yyptr = - (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); - if (! yyptr) - goto yyexhaustedlab; - YYSTACK_RELOCATE (yyss); - YYSTACK_RELOCATE (yyvs); + /* Extend the stack our own way. */ + if (YYMAXDEPTH <= yystacksize) { + goto yyexhaustedlab; + } + yystacksize *= 2; + if (YYMAXDEPTH < yystacksize) { + yystacksize = YYMAXDEPTH; + } + + { + yytype_int16 *yyss1 = yyss; + union yyalloc *yyptr = + (union yyalloc *) YYSTACK_ALLOC(YYSTACK_BYTES(yystacksize)); + if (!yyptr) { + goto yyexhaustedlab; + } + YYSTACK_RELOCATE(yyss); + YYSTACK_RELOCATE(yyvs); # undef YYSTACK_RELOCATE - if (yyss1 != yyssa) - YYSTACK_FREE (yyss1); - } + if (yyss1 != yyssa) { + YYSTACK_FREE(yyss1); + } + } # endif #endif /* no yyoverflow */ - yyssp = yyss + yysize - 1; - yyvsp = yyvs + yysize - 1; + yyssp = yyss + yysize - 1; + yyvsp = yyvs + yysize - 1; - YYDPRINTF ((stderr, "Stack size increased to %lu\n", - (unsigned long int) yystacksize)); + YYDPRINTF((stderr, "Stack size increased to %lu\n", + (unsigned long int) yystacksize)); - if (yyss + yystacksize - 1 <= yyssp) - YYABORT; - } + if (yyss + yystacksize - 1 <= yyssp) { + YYABORT; + } + } - YYDPRINTF ((stderr, "Entering state %d\n", yystate)); + YYDPRINTF((stderr, "Entering state %d\n", yystate)); - goto yybackup; + goto yybackup; /*-----------. -| yybackup. | -`-----------*/ + | yybackup. | + | `-----------*/ yybackup: - /* Do appropriate processing given the current state. Read a - look-ahead token if we need one and don't already have one. */ - - /* First try to decide what to do without reference to look-ahead token. */ - yyn = yypact[yystate]; - if (yyn == YYPACT_NINF) - goto yydefault; - - /* Not known => get a look-ahead token if don't already have one. */ - - /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */ - if (yychar == YYEMPTY) - { - YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; - } - - if (yychar <= YYEOF) - { - yychar = yytoken = YYEOF; - YYDPRINTF ((stderr, "Now at end of input.\n")); - } - else - { - yytoken = YYTRANSLATE (yychar); - YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); - } - - /* If the proper action on seeing token YYTOKEN is to reduce or to - detect an error, take that action. */ - yyn += yytoken; - if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) - goto yydefault; - yyn = yytable[yyn]; - if (yyn <= 0) - { - if (yyn == 0 || yyn == YYTABLE_NINF) - goto yyerrlab; - yyn = -yyn; - goto yyreduce; - } - - if (yyn == YYFINAL) - YYACCEPT; - - /* Count tokens shifted since error; after three, turn off error - status. */ - if (yyerrstatus) - yyerrstatus--; - - /* Shift the look-ahead token. */ - YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); - - /* Discard the shifted token unless it is eof. */ - if (yychar != YYEOF) - yychar = YYEMPTY; - - yystate = yyn; - *++yyvsp = yylval; - - goto yynewstate; + /* Do appropriate processing given the current state. Read a + * look-ahead token if we need one and don't already have one. */ + /* First try to decide what to do without reference to look-ahead token. */ + yyn = yypact[yystate]; + if (yyn == YYPACT_NINF) { + goto yydefault; + } -/*-----------------------------------------------------------. -| yydefault -- do the default action for the current state. | -`-----------------------------------------------------------*/ -yydefault: - yyn = yydefact[yystate]; - if (yyn == 0) - goto yyerrlab; - goto yyreduce; + /* Not known => get a look-ahead token if don't already have one. */ + /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */ + if (yychar == YYEMPTY) { + YYDPRINTF((stderr, "Reading a token: ")); + yychar = YYLEX; + } -/*-----------------------------. -| yyreduce -- Do a reduction. | -`-----------------------------*/ -yyreduce: - /* yyn is the number of a rule to reduce with. */ - yylen = yyr2[yyn]; + if (yychar <= YYEOF) { + yychar = yytoken = YYEOF; + YYDPRINTF((stderr, "Now at end of input.\n")); + } else { + yytoken = YYTRANSLATE(yychar); + YY_SYMBOL_PRINT("Next token is", yytoken, &yylval, &yylloc); + } + + /* If the proper action on seeing token YYTOKEN is to reduce or to + * detect an error, take that action. */ + yyn += yytoken; + if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) { + goto yydefault; + } + yyn = yytable[yyn]; + if (yyn <= 0) { + if (yyn == 0 || yyn == YYTABLE_NINF) { + goto yyerrlab; + } + yyn = -yyn; + goto yyreduce; + } + + if (yyn == YYFINAL) { + YYACCEPT; + } + + /* Count tokens shifted since error; after three, turn off error + * status. */ + if (yyerrstatus) { + yyerrstatus--; + } + + /* Shift the look-ahead token. */ + YY_SYMBOL_PRINT("Shifting", yytoken, &yylval, &yylloc); - /* If YYLEN is nonzero, implement the default value of the action: - `$$ = $1'. + /* Discard the shifted token unless it is eof. */ + if (yychar != YYEOF) { + yychar = YYEMPTY; + } + + yystate = yyn; + *++yyvsp = yylval; + + goto yynewstate; - Otherwise, the following line sets YYVAL to garbage. - This behavior is undocumented and Bison - users should not rely upon it. Assigning to YYVAL - unconditionally makes the parser a bit smaller, and it avoids a - GCC warning that YYVAL may be used uninitialized. */ - yyval = yyvsp[1-yylen]; + +/*-----------------------------------------------------------. + | yydefault -- do the default action for the current state. | + | `-----------------------------------------------------------*/ +yydefault: + yyn = yydefact[yystate]; + if (yyn == 0) { + goto yyerrlab; + } + goto yyreduce; - YY_REDUCE_PRINT (yyn); - switch (yyn) - { - case 2: +/*-----------------------------. + | yyreduce -- Do a reduction. | + | `-----------------------------*/ +yyreduce: + /* yyn is the number of a rule to reduce with. */ + yylen = yyr2[yyn]; + + /* If YYLEN is nonzero, implement the default value of the action: + * `$$ = $1'. + * + * Otherwise, the following line sets YYVAL to garbage. + * This behavior is undocumented and Bison + * users should not rely upon it. Assigning to YYVAL + * unconditionally makes the parser a bit smaller, and it avoids a + * GCC warning that YYVAL may be used uninitialized. */ + yyval = yyvsp[1 - yylen]; + + + YY_REDUCE_PRINT(yyn); + switch (yyn) { + case 2: #line 121 "OSUnserialize.y" - { parsedObject = (OSObject *)NULL; YYACCEPT; ;} - break; + { parsedObject = (OSObject *)NULL; YYACCEPT;;} + break; - case 3: + case 3: #line 122 "OSUnserialize.y" - { parsedObject = (OSObject *)(yyvsp[(1) - (1)]); YYACCEPT; ;} - break; + { parsedObject = (OSObject *)(yyvsp[(1) - (1)]); YYACCEPT;;} + break; - case 4: + case 4: #line 123 "OSUnserialize.y" - { yyerror("syntax error"); YYERROR; ;} - break; + { yyerror("syntax error"); YYERROR;;} + break; - case 5: + case 5: #line 126 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSDictionary((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSDictionary((yyvsp[(1) - (1)]));;} + break; - case 6: + case 6: #line 127 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSArray((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSArray((yyvsp[(1) - (1)]));;} + break; - case 7: + case 7: #line 128 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSSet((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSSet((yyvsp[(1) - (1)]));;} + break; - case 8: + case 8: #line 129 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSString((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSString((yyvsp[(1) - (1)]));;} + break; - case 9: + case 9: #line 130 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSData((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSData((yyvsp[(1) - (1)]));;} + break; - case 10: + case 10: #line 131 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSOffset((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSOffset((yyvsp[(1) - (1)]));;} + break; - case 11: + case 11: #line 132 "OSUnserialize.y" - { (yyval) = (object_t *)buildOSBoolean((yyvsp[(1) - (1)])); ;} - break; + { (yyval) = (object_t *)buildOSBoolean((yyvsp[(1) - (1)]));;} + break; - case 12: + case 12: #line 133 "OSUnserialize.y" - { (yyval) = (object_t *)retrieveObject((yyvsp[(2) - (2)])->u.offset); - if ((yyval)) { - ((OSObject *)(yyval))->retain(); - } else { - yyerror("forward reference detected"); - YYERROR; - } - freeObject((yyvsp[(2) - (2)])); - ;} - break; - - case 13: + { (yyval) = (object_t *)retrieveObject((yyvsp[(2) - (2)])->u.offset); + if ((yyval)) { + ((OSObject *)(yyval))->retain(); + } else { + yyerror("forward reference detected"); + YYERROR; + } + freeObject((yyvsp[(2) - (2)])); + ;} + break; + + case 13: #line 142 "OSUnserialize.y" - { (yyval) = (yyvsp[(1) - (3)]); - rememberObject((yyvsp[(3) - (3)])->u.offset, (yyvsp[(1) - (3)])); - freeObject((yyvsp[(3) - (3)])); - ;} - break; + { (yyval) = (yyvsp[(1) - (3)]); + rememberObject((yyvsp[(3) - (3)])->u.offset, (yyvsp[(1) - (3)])); + freeObject((yyvsp[(3) - (3)])); + ;} + break; - case 14: + case 14: #line 150 "OSUnserialize.y" - { (yyval) = NULL; ;} - break; + { (yyval) = NULL;;} + break; - case 15: + case 15: #line 151 "OSUnserialize.y" - { (yyval) = (yyvsp[(2) - (3)]); ;} - break; + { (yyval) = (yyvsp[(2) - (3)]);;} + break; - case 17: + case 17: #line 155 "OSUnserialize.y" - { (yyvsp[(2) - (2)])->next = (yyvsp[(1) - (2)]); (yyvsp[(1) - (2)])->prev = (yyvsp[(2) - (2)]); (yyval) = (yyvsp[(2) - (2)]); ;} - break; + { (yyvsp[(2) - (2)])->next = (yyvsp[(1) - (2)]); (yyvsp[(1) - (2)])->prev = (yyvsp[(2) - (2)]); (yyval) = (yyvsp[(2) - (2)]);;} + break; - case 18: + case 18: #line 158 "OSUnserialize.y" - { (yyval) = newObject(); - (yyval)->next = NULL; - (yyval)->prev = NULL; - (yyval)->u.key = (yyvsp[(1) - (4)]); - (yyval)->object = (yyvsp[(3) - (4)]); - ;} - break; - - case 19: + { (yyval) = newObject(); + (yyval)->next = NULL; + (yyval)->prev = NULL; + (yyval)->u.key = (yyvsp[(1) - (4)]); + (yyval)->object = (yyvsp[(3) - (4)]); + ;} + break; + + case 19: #line 168 "OSUnserialize.y" - { (yyval) = NULL; ;} - break; + { (yyval) = NULL;;} + break; - case 20: + case 20: #line 169 "OSUnserialize.y" - { (yyval) = (yyvsp[(2) - (3)]); ;} - break; + { (yyval) = (yyvsp[(2) - (3)]);;} + break; - case 21: + case 21: #line 172 "OSUnserialize.y" - { (yyval) = NULL; ;} - break; + { (yyval) = NULL;;} + break; - case 22: + case 22: #line 173 "OSUnserialize.y" - { (yyval) = (yyvsp[(2) - (3)]); ;} - break; + { (yyval) = (yyvsp[(2) - (3)]);;} + break; - case 23: + case 23: #line 176 "OSUnserialize.y" - { (yyval) = newObject(); - (yyval)->object = (yyvsp[(1) - (1)]); - (yyval)->next = NULL; - (yyval)->prev = NULL; - ;} - break; - - case 24: + { (yyval) = newObject(); + (yyval)->object = (yyvsp[(1) - (1)]); + (yyval)->next = NULL; + (yyval)->prev = NULL; + ;} + break; + + case 24: #line 181 "OSUnserialize.y" - { oo = newObject(); - oo->object = (yyvsp[(3) - (3)]); - oo->next = (yyvsp[(1) - (3)]); - oo->prev = NULL; - (yyvsp[(1) - (3)])->prev = oo; - (yyval) = oo; - ;} - break; - - case 25: + { oo = newObject(); + oo->object = (yyvsp[(3) - (3)]); + oo->next = (yyvsp[(1) - (3)]); + oo->prev = NULL; + (yyvsp[(1) - (3)])->prev = oo; + (yyval) = oo; + ;} + break; + + case 25: #line 192 "OSUnserialize.y" - { (yyval) = (yyvsp[(1) - (3)]); - (yyval)->size = (yyvsp[(3) - (3)])->u.offset; - freeObject((yyvsp[(3) - (3)])); - ;} - break; + { (yyval) = (yyvsp[(1) - (3)]); + (yyval)->size = (yyvsp[(3) - (3)])->u.offset; + freeObject((yyvsp[(3) - (3)])); + ;} + break; /* Line 1267 of yacc.c. */ #line 1555 "OSUnserialize.tab.c" - default: break; - } - YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); + default: break; + } + YY_SYMBOL_PRINT("-> $$ =", yyr1[yyn], &yyval, &yyloc); - YYPOPSTACK (yylen); - yylen = 0; - YY_STACK_PRINT (yyss, yyssp); + YYPOPSTACK(yylen); + yylen = 0; + YY_STACK_PRINT(yyss, yyssp); - *++yyvsp = yyval; + *++yyvsp = yyval; - /* Now `shift' the result of the reduction. Determine what state - that goes to, based on the state we popped back to and the rule - number reduced by. */ + /* Now `shift' the result of the reduction. Determine what state + * that goes to, based on the state we popped back to and the rule + * number reduced by. */ - yyn = yyr1[yyn]; + yyn = yyr1[yyn]; - yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; - if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) - yystate = yytable[yystate]; - else - yystate = yydefgoto[yyn - YYNTOKENS]; + yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; + if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) { + yystate = yytable[yystate]; + } else { + yystate = yydefgoto[yyn - YYNTOKENS]; + } - goto yynewstate; + goto yynewstate; /*------------------------------------. -| yyerrlab -- here on detecting error | -`------------------------------------*/ + | yyerrlab -- here on detecting error | + | `------------------------------------*/ yyerrlab: - /* If not already recovering from an error, report this error. */ - if (!yyerrstatus) - { - ++yynerrs; -#if ! YYERROR_VERBOSE - yyerror (YY_("syntax error")); + /* If not already recovering from an error, report this error. */ + if (!yyerrstatus) { + ++yynerrs; +#if !YYERROR_VERBOSE + yyerror(YY_("syntax error")); #else - { - YYSIZE_T yysize = yysyntax_error (0, yystate, yychar); - if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) - { - YYSIZE_T yyalloc = 2 * yysize; - if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) - yyalloc = YYSTACK_ALLOC_MAXIMUM; - if (yymsg != yymsgbuf) - YYSTACK_FREE (yymsg); - yymsg = (char *) YYSTACK_ALLOC (yyalloc); - if (yymsg) - yymsg_alloc = yyalloc; - else - { - yymsg = yymsgbuf; - yymsg_alloc = sizeof yymsgbuf; - } - } - - if (0 < yysize && yysize <= yymsg_alloc) - { - (void) yysyntax_error (yymsg, yystate, yychar); - yyerror (yymsg); - } - else - { - yyerror (YY_("syntax error")); - if (yysize != 0) - goto yyexhaustedlab; - } - } + { + YYSIZE_T yysize = yysyntax_error(0, yystate, yychar); + if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) { + YYSIZE_T yyalloc = 2 * yysize; + if (!(yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) { + yyalloc = YYSTACK_ALLOC_MAXIMUM; + } + if (yymsg != yymsgbuf) { + YYSTACK_FREE(yymsg); + } + yymsg = (char *) YYSTACK_ALLOC(yyalloc); + if (yymsg) { + yymsg_alloc = yyalloc; + } else { + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + } + } + + if (0 < yysize && yysize <= yymsg_alloc) { + (void) yysyntax_error(yymsg, yystate, yychar); + yyerror(yymsg); + } else { + yyerror(YY_("syntax error")); + if (yysize != 0) { + goto yyexhaustedlab; + } + } + } #endif - } + } - if (yyerrstatus == 3) - { - /* If just tried and failed to reuse look-ahead token after an - error, discard it. */ + if (yyerrstatus == 3) { + /* If just tried and failed to reuse look-ahead token after an + * error, discard it. */ - if (yychar <= YYEOF) - { - /* Return failure if at end of input. */ - if (yychar == YYEOF) - YYABORT; - } - else - { - yydestruct ("Error: discarding", - yytoken, &yylval); - yychar = YYEMPTY; + if (yychar <= YYEOF) { + /* Return failure if at end of input. */ + if (yychar == YYEOF) { + YYABORT; + } + } else { + yydestruct("Error: discarding", + yytoken, &yylval); + yychar = YYEMPTY; + } } - } - /* Else will try to reuse look-ahead token after shifting the error - token. */ - goto yyerrlab1; + /* Else will try to reuse look-ahead token after shifting the error + * token. */ + goto yyerrlab1; /*---------------------------------------------------. -| yyerrorlab -- error raised explicitly by YYERROR. | -`---------------------------------------------------*/ + | yyerrorlab -- error raised explicitly by YYERROR. | + | `---------------------------------------------------*/ yyerrorlab: - /* Pacify compilers like GCC when the user code never invokes - YYERROR and the label yyerrorlab therefore never appears in user - code. */ - if (/*CONSTCOND*/ 0) - goto yyerrorlab; + /* Pacify compilers like GCC when the user code never invokes + * YYERROR and the label yyerrorlab therefore never appears in user + * code. */ + if (/*CONSTCOND*/ 0) { + goto yyerrorlab; + } - /* Do not reclaim the symbols of the rule which action triggered - this YYERROR. */ - YYPOPSTACK (yylen); - yylen = 0; - YY_STACK_PRINT (yyss, yyssp); - yystate = *yyssp; - goto yyerrlab1; + /* Do not reclaim the symbols of the rule which action triggered + * this YYERROR. */ + YYPOPSTACK(yylen); + yylen = 0; + YY_STACK_PRINT(yyss, yyssp); + yystate = *yyssp; + goto yyerrlab1; /*-------------------------------------------------------------. -| yyerrlab1 -- common code for both syntax error and YYERROR. | -`-------------------------------------------------------------*/ + | yyerrlab1 -- common code for both syntax error and YYERROR. | + | `-------------------------------------------------------------*/ yyerrlab1: - yyerrstatus = 3; /* Each real token shifted decrements this. */ - - for (;;) - { - yyn = yypact[yystate]; - if (yyn != YYPACT_NINF) - { - yyn += YYTERROR; - if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) - { - yyn = yytable[yyn]; - if (0 < yyn) - break; - } - } + yyerrstatus = 3; /* Each real token shifted decrements this. */ + + for (;;) { + yyn = yypact[yystate]; + if (yyn != YYPACT_NINF) { + yyn += YYTERROR; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { + yyn = yytable[yyn]; + if (0 < yyn) { + break; + } + } + } - /* Pop the current state because it cannot handle the error token. */ - if (yyssp == yyss) - YYABORT; + /* Pop the current state because it cannot handle the error token. */ + if (yyssp == yyss) { + YYABORT; + } - yydestruct ("Error: popping", - yystos[yystate], yyvsp); - YYPOPSTACK (1); - yystate = *yyssp; - YY_STACK_PRINT (yyss, yyssp); - } + yydestruct("Error: popping", + yystos[yystate], yyvsp); + YYPOPSTACK(1); + yystate = *yyssp; + YY_STACK_PRINT(yyss, yyssp); + } - if (yyn == YYFINAL) - YYACCEPT; + if (yyn == YYFINAL) { + YYACCEPT; + } - *++yyvsp = yylval; + *++yyvsp = yylval; - /* Shift the error token. */ - YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); + /* Shift the error token. */ + YY_SYMBOL_PRINT("Shifting", yystos[yyn], yyvsp, yylsp); - yystate = yyn; - goto yynewstate; + yystate = yyn; + goto yynewstate; /*-------------------------------------. -| yyacceptlab -- YYACCEPT comes here. | -`-------------------------------------*/ + | yyacceptlab -- YYACCEPT comes here. | + | `-------------------------------------*/ yyacceptlab: - yyresult = 0; - goto yyreturn; + yyresult = 0; + goto yyreturn; /*-----------------------------------. -| yyabortlab -- YYABORT comes here. | -`-----------------------------------*/ + | yyabortlab -- YYABORT comes here. | + | `-----------------------------------*/ yyabortlab: - yyresult = 1; - goto yyreturn; + yyresult = 1; + goto yyreturn; #ifndef yyoverflow /*-------------------------------------------------. -| yyexhaustedlab -- memory exhaustion comes here. | -`-------------------------------------------------*/ + | yyexhaustedlab -- memory exhaustion comes here. | + | `-------------------------------------------------*/ yyexhaustedlab: - yyerror (YY_("memory exhausted")); - yyresult = 2; - /* Fall through. */ + yyerror(YY_("memory exhausted")); + yyresult = 2; + /* Fall through. */ #endif yyreturn: - if (yychar != YYEOF && yychar != YYEMPTY) - yydestruct ("Cleanup: discarding lookahead", - yytoken, &yylval); - /* Do not reclaim the symbols of the rule which action triggered - this YYABORT or YYACCEPT. */ - YYPOPSTACK (yylen); - YY_STACK_PRINT (yyss, yyssp); - while (yyssp != yyss) - { - yydestruct ("Cleanup: popping", - yystos[*yyssp], yyvsp); - YYPOPSTACK (1); - } + if (yychar != YYEOF && yychar != YYEMPTY) { + yydestruct("Cleanup: discarding lookahead", + yytoken, &yylval); + } + /* Do not reclaim the symbols of the rule which action triggered + * this YYABORT or YYACCEPT. */ + YYPOPSTACK(yylen); + YY_STACK_PRINT(yyss, yyssp); + while (yyssp != yyss) { + yydestruct("Cleanup: popping", + yystos[*yyssp], yyvsp); + YYPOPSTACK(1); + } #ifndef yyoverflow - if (yyss != yyssa) - YYSTACK_FREE (yyss); + if (yyss != yyssa) { + YYSTACK_FREE(yyss); + } #endif #if YYERROR_VERBOSE - if (yymsg != yymsgbuf) - YYSTACK_FREE (yymsg); + if (yymsg != yymsgbuf) { + YYSTACK_FREE(yymsg); + } #endif - /* Make sure YYID is used. */ - return YYID (yyresult); + /* Make sure YYID is used. */ + return YYID(yyresult); } #line 213 "OSUnserialize.y" - -static int lineNumber = 0; -static const char *parseBuffer; -static int parseBufferIndex; -#define currentChar() (parseBuffer[parseBufferIndex]) -#define nextChar() (parseBuffer[++parseBufferIndex]) -#define prevChar() (parseBuffer[parseBufferIndex - 1]) +static int lineNumber = 0; +static const char *parseBuffer; +static int parseBufferIndex; + +#define currentChar() (parseBuffer[parseBufferIndex]) +#define nextChar() (parseBuffer[++parseBufferIndex]) +#define prevChar() (parseBuffer[parseBufferIndex - 1]) -#define isSpace(c) ((c) == ' ' || (c) == '\t') -#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) -#define isDigit(c) ((c) >= '0' && (c) <= '9') -#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') -#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) -#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) static char yyerror_message[128]; @@ -1843,16 +1843,26 @@ yylex() { int c; - if (parseBufferIndex == 0) lineNumber = 1; + if (parseBufferIndex == 0) { + lineNumber = 1; + } - top: +top: c = currentChar(); /* skip white space */ - if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + if (isSpace(c)) { + while ((c = nextChar()) != 0 && isSpace(c)) { + } + } + ; /* skip over comments */ - if (c == '#') while ((c = nextChar()) != 0 && c != '\n') {}; + if (c == '#') { + while ((c = nextChar()) != 0 && c != '\n') { + } + } + ; /* keep track of line number, don't return \n's */ if (c == '\n') { @@ -1865,18 +1875,36 @@ yylex() if (c == '.') { bool boolean = false; if (nextChar() == 't') { - if (nextChar() != 'r') return SYNTAX_ERROR; - if (nextChar() != 'u') return SYNTAX_ERROR; - if (nextChar() != 'e') return SYNTAX_ERROR; + if (nextChar() != 'r') { + return SYNTAX_ERROR; + } + if (nextChar() != 'u') { + return SYNTAX_ERROR; + } + if (nextChar() != 'e') { + return SYNTAX_ERROR; + } boolean = true; } else { - if (currentChar() != 'f') return SYNTAX_ERROR; - if (nextChar() != 'a') return SYNTAX_ERROR; - if (nextChar() != 'l') return SYNTAX_ERROR; - if (nextChar() != 's') return SYNTAX_ERROR; - if (nextChar() != 'e') return SYNTAX_ERROR; + if (currentChar() != 'f') { + return SYNTAX_ERROR; + } + if (nextChar() != 'a') { + return SYNTAX_ERROR; + } + if (nextChar() != 'l') { + return SYNTAX_ERROR; + } + if (nextChar() != 's') { + return SYNTAX_ERROR; + } + if (nextChar() != 'e') { + return SYNTAX_ERROR; + } + } + if (nextChar() != '.') { + return SYNTAX_ERROR; } - if (nextChar() != '.') return SYNTAX_ERROR; /* skip over dot */ (void)nextChar(); @@ -1891,7 +1919,7 @@ yylex() start = parseBufferIndex; /* find end of string */ - while (isAlphaNumeric(c)) { + while (isAlphaNumeric(c)) { c = nextChar(); } length = parseBufferIndex - start; @@ -1914,12 +1942,18 @@ yylex() char * tempString; char quoteChar = c; - start = parseBufferIndex + 1; // skip quote + start = parseBufferIndex + 1; // skip quote /* find end of string, line, buffer */ while ((c = nextChar()) != quoteChar) { - if (c == '\\') c = nextChar(); - if (c == '\n') lineNumber++; - if (c == 0) return SYNTAX_ERROR; + if (c == '\\') { + c = nextChar(); + } + if (c == '\n') { + lineNumber++; + } + if (c == 0) { + return SYNTAX_ERROR; + } } length = parseBufferIndex - start; /* skip over trailing quote */ @@ -1932,13 +1966,13 @@ yylex() } int to = 0; - for (int from=start; from < parseBufferIndex; from++) { + for (int from = start; from < parseBufferIndex; from++) { // hack - skip over backslashes if (parseBuffer[from] == '\\') { length--; continue; } - tempString[to] = parseBuffer[from]; + tempString[to] = parseBuffer[from]; to++; } tempString[length] = 0; @@ -1947,8 +1981,7 @@ yylex() } /* process numbers */ - if (isDigit (c)) - { + if (isDigit(c)) { unsigned long long n = 0; int base = 10; @@ -1960,12 +1993,12 @@ yylex() } } if (base == 10) { - while(isDigit(c)) { + while (isDigit(c)) { n = (n * base + c - '0'); c = nextChar(); } } else { - while(isHexDigit(c)) { + while (isHexDigit(c)) { if (isDigit(c)) { n = (n * base + c - '0'); } else { @@ -1977,22 +2010,29 @@ yylex() yylval = newObject(); yylval->u.offset = n; - + return NUMBER; } #define OSDATA_ALLOC_SIZE 4096 - + /* process data */ if (c == '<') { unsigned char *d, *start, *lastStart; start = lastStart = d = (unsigned char *)malloc(OSDATA_ALLOC_SIZE); - c = nextChar(); // skip over '<' + c = nextChar(); // skip over '<' while (c != 0 && c != '>') { - - if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; - if (c == '#') while ((c = nextChar()) != 0 && c != '\n') {}; + if (isSpace(c)) { + while ((c = nextChar()) != 0 && isSpace(c)) { + } + } + ; + if (c == '#') { + while ((c = nextChar()) != 0 && c != '\n') { + } + } + ; if (c == '\n') { lineNumber++; c = nextChar(); @@ -2000,7 +2040,9 @@ yylex() } // get high nibble - if (!isHexDigit(c)) break; + if (!isHexDigit(c)) { + break; + } if (isDigit(c)) { *d = (c - '0') << 4; } else { @@ -2009,13 +2051,15 @@ yylex() // get low nibble c = nextChar(); - if (!isHexDigit(c)) break; + if (!isHexDigit(c)) { + break; + } if (isDigit(c)) { *d |= c - '0'; } else { *d |= 0xa + (c - 'a'); } - + d++; if ((d - lastStart) >= OSDATA_ALLOC_SIZE) { int oldsize = d - start; @@ -2024,7 +2068,7 @@ yylex() } c = nextChar(); } - if (c != '>' ) { + if (c != '>') { free(start); return SYNTAX_ERROR; } @@ -2034,7 +2078,7 @@ yylex() yylval->object = start; yylval->size = d - start; - (void)nextChar(); // skip over '>' + (void)nextChar(); // skip over '>' return DATA; } @@ -2072,7 +2116,7 @@ freeObject(object_t *o) static OSDictionary *tags; -static void +static void rememberObject(int tag, object_t *o) { char key[16]; @@ -2112,7 +2156,7 @@ buildOSDictionary(object_t *o) // XXX the evil frontdoor d->setObject((OSSymbol *)o->u.key, (OSObject *)o->object); } else { - // If it isn't a symbol, I hope it's a string! + // If it isn't a symbol, I hope it's a string! d->setObject((OSString *)o->u.key, (OSObject *)o->object); } #else @@ -2207,7 +2251,7 @@ __BEGIN_DECLS #include __END_DECLS -static lck_mtx_t *lock = 0; +static lck_mtx_t * lock = 0; extern lck_grp_t *IOLockGroup; OSObject* @@ -2220,30 +2264,32 @@ OSUnserialize(const char *buffer, OSString **errorString) lck_mtx_lock(lock); } else { lck_mtx_lock(lock); - } #if DEBUG debugUnserializeAllocCount = 0; #endif - yyerror_message[0] = 0; //just in case + yyerror_message[0] = 0; //just in case parseBuffer = buffer; parseBufferIndex = 0; tags = OSDictionary::withCapacity(128); if (yyparse() == 0) { object = parsedObject; - if (errorString) *errorString = 0; + if (errorString) { + *errorString = 0; + } } else { object = 0; - if (errorString) + if (errorString) { *errorString = OSString::withCString(yyerror_message); + } } tags->release(); #if DEBUG if (debugUnserializeAllocCount) { - printf("OSUnserialize: allocation check failed, count = %d.\n", - debugUnserializeAllocCount); + printf("OSUnserialize: allocation check failed, count = %d.\n", + debugUnserializeAllocCount); } #endif lck_mtx_unlock(lock); @@ -2265,4 +2311,3 @@ OSUnserialize(const char *buffer, OSString **errorString) // // // - diff --git a/libkern/c++/OSUnserializeXML.cpp b/libkern/c++/OSUnserializeXML.cpp index 6905c3979..2c53ef4cc 100644 --- a/libkern/c++/OSUnserializeXML.cpp +++ b/libkern/c++/OSUnserializeXML.cpp @@ -2,7 +2,7 @@ * Copyright (c) 1999-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -51,47 +51,47 @@ /* A Bison parser, made by GNU Bison 2.3. */ /* Skeleton implementation for Bison's Yacc-like parsers in C - - Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 - Free Software Foundation, Inc. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2, or (at your option) - any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, - Boston, MA 02110-1301, USA. */ + * + * Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006 + * Free Software Foundation, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. */ /* As a special exception, you may create a larger work that contains - part or all of the Bison parser skeleton and distribute that work - under terms of your choice, so long as that work isn't itself a - parser generator using the skeleton or a modified version thereof - as a parser skeleton. Alternatively, if you modify or redistribute - the parser skeleton itself, you may (at your option) remove this - special exception, which will cause the skeleton and the resulting - Bison output files to be licensed under the GNU General Public - License without this special exception. - - This special exception was added by the Free Software Foundation in - version 2.2 of Bison. */ + * part or all of the Bison parser skeleton and distribute that work + * under terms of your choice, so long as that work isn't itself a + * parser generator using the skeleton or a modified version thereof + * as a parser skeleton. Alternatively, if you modify or redistribute + * the parser skeleton itself, you may (at your option) remove this + * special exception, which will cause the skeleton and the resulting + * Bison output files to be licensed under the GNU General Public + * License without this special exception. + * + * This special exception was added by the Free Software Foundation in + * version 2.2 of Bison. */ /* C LALR(1) parser skeleton written by Richard Stallman, by - simplifying the original so-called "semantic" parser. */ +* simplifying the original so-called "semantic" parser. */ /* All symbols defined below should begin with yy or YY, to avoid - infringing on user name space. This should be done even for local - variables, as they might otherwise be expanded by user macros. - There are some unavoidable exceptions within include files to - define necessary library symbols; they are noted "INFRINGES ON - USER NAME SPACE" below. */ + * infringing on user name space. This should be done even for local + * variables, as they might otherwise be expanded by user macros. + * There are some unavoidable exceptions within include files to + * define necessary library symbols; they are noted "INFRINGES ON + * USER NAME SPACE" below. */ /* Identify Bison output. */ #define YYBISON 1 @@ -121,20 +121,20 @@ /* Tokens. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - ARRAY = 258, - BOOLEAN = 259, - DATA = 260, - DICTIONARY = 261, - IDREF = 262, - KEY = 263, - NUMBER = 264, - SET = 265, - STRING = 266, - SYNTAX_ERROR = 267 - }; +/* Put the tokens into the symbol table, so that GDB and other debuggers + * know about them. */ +enum yytokentype { + ARRAY = 258, + BOOLEAN = 259, + DATA = 260, + DICTIONARY = 261, + IDREF = 262, + KEY = 263, + NUMBER = 264, + SET = 265, + STRING = 266, + SYNTAX_ERROR = 267 +}; #endif /* Tokens. */ #define ARRAY 258 @@ -159,65 +159,65 @@ #include #include -#define MAX_OBJECTS 131071 -#define MAX_REFED_OBJECTS 65535 +#define MAX_OBJECTS 131071 +#define MAX_REFED_OBJECTS 65535 #define YYSTYPE object_t * -#define YYPARSE_PARAM state -#define YYLEX_PARAM (parser_state_t *)state +#define YYPARSE_PARAM state +#define YYLEX_PARAM (parser_state_t *)state // this is the internal struct used to hold objects on parser stack // it represents objects both before and after they have been created -typedef struct object { - struct object *next; - struct object *free; - struct object *elements; - OSObject *object; - OSSymbol *key; // for dictionary - int size; - void *data; // for data - char *string; // for string & symbol - long long number; // for number - int idref; +typedef struct object { + struct object *next; + struct object *free; + struct object *elements; + OSObject *object; + OSSymbol *key; // for dictionary + int size; + void *data; // for data + char *string; // for string & symbol + long long number; // for number + int idref; } object_t; // this code is reentrant, this structure contains all // state information for the parsing of a single buffer typedef struct parser_state { - const char *parseBuffer; // start of text to be parsed - int parseBufferIndex; // current index into text - int lineNumber; // current line number - object_t *objects; // internal objects in use - object_t *freeObjects; // internal objects that are free - OSDictionary *tags; // used to remember "ID" tags - OSString **errorString; // parse error with line - OSObject *parsedObject; // resultant object of parsed text - int parsedObjectCount; - int retrievedObjectCount; + const char *parseBuffer; // start of text to be parsed + int parseBufferIndex; // current index into text + int lineNumber; // current line number + object_t *objects; // internal objects in use + object_t *freeObjects; // internal objects that are free + OSDictionary *tags; // used to remember "ID" tags + OSString **errorString; // parse error with line + OSObject *parsedObject; // resultant object of parsed text + int parsedObjectCount; + int retrievedObjectCount; } parser_state_t; -#define STATE ((parser_state_t *)state) +#define STATE ((parser_state_t *)state) -#undef yyerror -#define yyerror(s) OSUnserializeerror(STATE, (s)) -static int OSUnserializeerror(parser_state_t *state, const char *s); +#undef yyerror +#define yyerror(s) OSUnserializeerror(STATE, (s)) +static int OSUnserializeerror(parser_state_t *state, const char *s); -static int yylex(YYSTYPE *lvalp, parser_state_t *state); +static int yylex(YYSTYPE *lvalp, parser_state_t *state); -static object_t *newObject(parser_state_t *state); -static void freeObject(parser_state_t *state, object_t *o); -static void rememberObject(parser_state_t *state, int tag, OSObject *o); -static object_t *retrieveObject(parser_state_t *state, int tag); -static void cleanupObjects(parser_state_t *state); +static object_t *newObject(parser_state_t *state); +static void freeObject(parser_state_t *state, object_t *o); +static void rememberObject(parser_state_t *state, int tag, OSObject *o); +static object_t *retrieveObject(parser_state_t *state, int tag); +static void cleanupObjects(parser_state_t *state); -static object_t *buildDictionary(parser_state_t *state, object_t *o); -static object_t *buildArray(parser_state_t *state, object_t *o); -static object_t *buildSet(parser_state_t *state, object_t *o); -static object_t *buildString(parser_state_t *state, object_t *o); -static object_t *buildSymbol(parser_state_t *state, object_t *o); -static object_t *buildData(parser_state_t *state, object_t *o); -static object_t *buildNumber(parser_state_t *state, object_t *o); -static object_t *buildBoolean(parser_state_t *state, object_t *o); +static object_t *buildDictionary(parser_state_t *state, object_t *o); +static object_t *buildArray(parser_state_t *state, object_t *o); +static object_t *buildSet(parser_state_t *state, object_t *o); +static object_t *buildString(parser_state_t *state, object_t *o); +static object_t *buildSymbol(parser_state_t *state, object_t *o); +static object_t *buildData(parser_state_t *state, object_t *o); +static object_t *buildNumber(parser_state_t *state, object_t *o); +static object_t *buildBoolean(parser_state_t *state, object_t *o); #include @@ -245,7 +245,7 @@ static object_t *buildBoolean(parser_state_t *state, object_t *o); # define YYTOKEN_TABLE 0 #endif -#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED +#if !defined YYSTYPE && !defined YYSTYPE_IS_DECLARED typedef int YYSTYPE; # define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 @@ -273,7 +273,7 @@ typedef unsigned char yytype_uint8; #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; #elif (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) typedef signed char yytype_int8; #else typedef short int yytype_int8; @@ -296,8 +296,8 @@ typedef short int yytype_int16; # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t -# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# elif !defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) # include /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else @@ -320,7 +320,7 @@ typedef short int yytype_int16; #endif /* Suppress unused-variable warnings by "using" E. */ -#if ! defined lint || defined __GNUC__ +#if !defined lint || defined __GNUC__ # define YYUSE(e) ((void) (e)) #else # define YYUSE(e) /* empty */ @@ -331,20 +331,20 @@ typedef short int yytype_int16; # define YYID(n) (n) #else #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static int -YYID (int i) +YYID(int i) #else static int -YYID (i) - int i; + YYID(i) +int i; #endif { - return i; + return i; } #endif -#if ! defined yyoverflow || YYERROR_VERBOSE +#if !defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ @@ -361,8 +361,8 @@ YYID (i) # define alloca _alloca # else # define YYSTACK_ALLOC alloca -# if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if !defined _ALLOCA_H && !defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) # include /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 @@ -373,13 +373,13 @@ YYID (i) # endif # ifdef YYSTACK_ALLOC - /* Pacify GCC's `empty if-body' warning. */ -# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) +/* Pacify GCC's `empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */ ; } while (YYID (0)) # ifndef YYSTACK_ALLOC_MAXIMUM - /* The OS might guarantee only one guard page at the bottom of the stack, - and a page size can be as small as 4096 bytes. So we cannot safely - invoke alloca (N) if N exceeds 4096. Use a slightly smaller number - to allow for a few compiler-allocated temporary stack slots. */ +/* The OS might guarantee only one guard page at the bottom of the stack, + * and a page size can be as small as 4096 bytes. So we cannot safely + * invoke alloca (N) if N exceeds 4096. Use a slightly smaller number + * to allow for a few compiler-allocated temporary stack slots. */ # define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */ # endif # else @@ -388,9 +388,9 @@ YYID (i) # ifndef YYSTACK_ALLOC_MAXIMUM # define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM # endif -# if (defined __cplusplus && ! defined _STDLIB_H \ - && ! ((defined YYMALLOC || defined malloc) \ - && (defined YYFREE || defined free))) +# if (defined __cplusplus && !defined _STDLIB_H \ + && !((defined YYMALLOC || defined malloc) \ + && (defined YYFREE || defined free))) # include /* INFRINGES ON USER NAME SPACE */ # ifndef _STDLIB_H # define _STDLIB_H 1 @@ -398,74 +398,73 @@ YYID (i) # endif # ifndef YYMALLOC # define YYMALLOC malloc -# if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ +# if !defined malloc && !defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) +void *malloc(YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free -# if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -void free (void *); /* INFRINGES ON USER NAME SPACE */ +# if !defined free && !defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \ + || defined __cplusplus || defined _MSC_VER) +void free(void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif # endif #endif /* ! defined yyoverflow || YYERROR_VERBOSE */ -#if (! defined yyoverflow \ - && (! defined __cplusplus \ - || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) +#if (!defined yyoverflow \ + && (!defined __cplusplus \ + || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ -union yyalloc -{ - yytype_int16 yyss; - YYSTYPE yyvs; - }; +union yyalloc { + yytype_int16 yyss; + YYSTYPE yyvs; +}; /* The size of the maximum gap between one aligned stack and the next. */ # define YYSTACK_GAP_MAXIMUM (sizeof (union yyalloc) - 1) /* The size of an array large to enough to hold all stacks, each with - N elements. */ + * N elements. */ # define YYSTACK_BYTES(N) \ ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ + YYSTACK_GAP_MAXIMUM) /* Copy COUNT objects from FROM to TO. The source and destination do - not overlap. */ + * not overlap. */ # ifndef YYCOPY # if defined __GNUC__ && 1 < __GNUC__ # define YYCOPY(To, From, Count) \ __builtin_memcpy (To, From, (Count) * sizeof (*(From))) # else -# define YYCOPY(To, From, Count) \ - do \ - { \ - YYSIZE_T yyi; \ - for (yyi = 0; yyi < (Count); yyi++) \ - (To)[yyi] = (From)[yyi]; \ - } \ +# define YYCOPY(To, From, Count) \ + do \ + { \ + YYSIZE_T yyi; \ + for (yyi = 0; yyi < (Count); yyi++) \ + (To)[yyi] = (From)[yyi]; \ + } \ while (YYID (0)) # endif # endif /* Relocate STACK from its old location to the new one. The - local variables YYSIZE and YYSTACKSIZE give the old and new number of - elements in the stack, and YYPTR gives the new location of the - stack. Advance YYPTR to a properly aligned location for the next - stack. */ -# define YYSTACK_RELOCATE(Stack) \ - do \ - { \ - YYSIZE_T yynewbytes; \ - YYCOPY (&yyptr->Stack, Stack, yysize); \ - Stack = &yyptr->Stack; \ + * local variables YYSIZE and YYSTACKSIZE give the old and new number of + * elements in the stack, and YYPTR gives the new location of the + * stack. Advance YYPTR to a properly aligned location for the next + * stack. */ +# define YYSTACK_RELOCATE(Stack) \ + do \ + { \ + YYSIZE_T yynewbytes; \ + YYCOPY (&yyptr->Stack, Stack, yysize); \ + Stack = &yyptr->Stack; \ yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ - yyptr += yynewbytes / sizeof (*yyptr); \ - } \ + yyptr += yynewbytes / sizeof (*yyptr); \ + } \ while (YYID (0)) #endif @@ -488,272 +487,272 @@ union yyalloc #define YYUNDEFTOK 2 #define YYMAXUTOK 267 -#define YYTRANSLATE(YYX) \ +#define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) /* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ static const yytype_uint8 yytranslate[] = { - 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 15, 16, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 17, 2, 18, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 13, 2, 14, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, - 5, 6, 7, 8, 9, 10, 11, 12 + 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 15, 16, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 17, 2, 18, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 13, 2, 14, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, + 5, 6, 7, 8, 9, 10, 11, 12 }; #if YYDEBUG /* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in - YYRHS. */ + * YYRHS. */ static const yytype_uint8 yyprhs[] = { - 0, 0, 3, 4, 6, 8, 10, 12, 14, 16, - 18, 20, 22, 24, 27, 31, 33, 35, 38, 41, - 43, 46, 50, 52, 55, 59, 61, 63, 66, 68, - 70, 72, 74 + 0, 0, 3, 4, 6, 8, 10, 12, 14, 16, + 18, 20, 22, 24, 27, 31, 33, 35, 38, 41, + 43, 46, 50, 52, 55, 59, 61, 63, 66, 68, + 70, 72, 74 }; /* YYRHS -- A `-1'-separated list of the rules' RHS. */ static const yytype_int8 yyrhs[] = { - 20, 0, -1, -1, 21, -1, 12, -1, 22, -1, - 26, -1, 27, -1, 33, -1, 30, -1, 32, -1, - 29, -1, 31, -1, 13, 14, -1, 13, 23, 14, - -1, 6, -1, 24, -1, 23, 24, -1, 25, 21, - -1, 8, -1, 15, 16, -1, 15, 28, 16, -1, - 3, -1, 17, 18, -1, 17, 28, 18, -1, 10, - -1, 21, -1, 28, 21, -1, 4, -1, 5, -1, - 7, -1, 9, -1, 11, -1 + 20, 0, -1, -1, 21, -1, 12, -1, 22, -1, + 26, -1, 27, -1, 33, -1, 30, -1, 32, -1, + 29, -1, 31, -1, 13, 14, -1, 13, 23, 14, + -1, 6, -1, 24, -1, 23, 24, -1, 25, 21, + -1, 8, -1, 15, 16, -1, 15, 28, 16, -1, + 3, -1, 17, 18, -1, 17, 28, 18, -1, 10, + -1, 21, -1, 28, 21, -1, 4, -1, 5, -1, + 7, -1, 9, -1, 11, -1 }; /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { - 0, 149, 149, 152, 157, 162, 174, 186, 198, 210, - 222, 234, 246, 265, 268, 271, 274, 275, 290, 299, - 311, 314, 317, 320, 323, 326, 329, 332, 339, 342, - 345, 348, 351 + 0, 149, 149, 152, 157, 162, 174, 186, 198, 210, + 222, 234, 246, 265, 268, 271, 274, 275, 290, 299, + 311, 314, 317, 320, 323, 326, 329, 332, 339, 342, + 345, 348, 351 }; #endif #if YYDEBUG || YYERROR_VERBOSE || YYTOKEN_TABLE /* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. - First, the terminals, then, starting at YYNTOKENS, nonterminals. */ + * First, the terminals, then, starting at YYNTOKENS, nonterminals. */ static const char *const yytname[] = { - "$end", "error", "$undefined", "ARRAY", "BOOLEAN", "DATA", "DICTIONARY", - "IDREF", "KEY", "NUMBER", "SET", "STRING", "SYNTAX_ERROR", "'{'", "'}'", - "'('", "')'", "'['", "']'", "$accept", "input", "object", "dict", - "pairs", "pair", "key", "array", "set", "elements", "boolean", "data", - "idref", "number", "string", 0 + "$end", "error", "$undefined", "ARRAY", "BOOLEAN", "DATA", "DICTIONARY", + "IDREF", "KEY", "NUMBER", "SET", "STRING", "SYNTAX_ERROR", "'{'", "'}'", + "'('", "')'", "'['", "']'", "$accept", "input", "object", "dict", + "pairs", "pair", "key", "array", "set", "elements", "boolean", "data", + "idref", "number", "string", 0 }; #endif # ifdef YYPRINT /* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to - token YYLEX-NUM. */ + * token YYLEX-NUM. */ static const yytype_uint16 yytoknum[] = { - 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, - 265, 266, 267, 123, 125, 40, 41, 91, 93 + 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, + 265, 266, 267, 123, 125, 40, 41, 91, 93 }; # endif /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ static const yytype_uint8 yyr1[] = { - 0, 19, 20, 20, 20, 21, 21, 21, 21, 21, - 21, 21, 21, 22, 22, 22, 23, 23, 24, 25, - 26, 26, 26, 27, 27, 27, 28, 28, 29, 30, - 31, 32, 33 + 0, 19, 20, 20, 20, 21, 21, 21, 21, 21, + 21, 21, 21, 22, 22, 22, 23, 23, 24, 25, + 26, 26, 26, 27, 27, 27, 28, 28, 29, 30, + 31, 32, 33 }; /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ static const yytype_uint8 yyr2[] = { - 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 2, 3, 1, 1, 2, 2, 1, - 2, 3, 1, 2, 3, 1, 1, 2, 1, 1, - 1, 1, 1 + 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 2, 3, 1, 1, 2, 2, 1, + 2, 3, 1, 2, 3, 1, 1, 2, 1, 1, + 1, 1, 1 }; /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state - STATE-NUM when YYTABLE doesn't specify something else to do. Zero - means the default is an error. */ + * STATE-NUM when YYTABLE doesn't specify something else to do. Zero + * means the default is an error. */ static const yytype_uint8 yydefact[] = { - 2, 22, 28, 29, 15, 30, 31, 25, 32, 4, - 0, 0, 0, 0, 3, 5, 6, 7, 11, 9, - 12, 10, 8, 19, 13, 0, 16, 0, 20, 26, - 0, 23, 0, 1, 14, 17, 18, 21, 27, 24 + 2, 22, 28, 29, 15, 30, 31, 25, 32, 4, + 0, 0, 0, 0, 3, 5, 6, 7, 11, 9, + 12, 10, 8, 19, 13, 0, 16, 0, 20, 26, + 0, 23, 0, 1, 14, 17, 18, 21, 27, 24 }; /* YYDEFGOTO[NTERM-NUM]. */ static const yytype_int8 yydefgoto[] = { - -1, 13, 29, 15, 25, 26, 27, 16, 17, 30, - 18, 19, 20, 21, 22 + -1, 13, 29, 15, 25, 26, 27, 16, 17, 30, + 18, 19, 20, 21, 22 }; /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing - STATE-NUM. */ + * STATE-NUM. */ #define YYPACT_NINF -20 static const yytype_int8 yypact[] = { - 46, -20, -20, -20, -20, -20, -20, -20, -20, -20, - 4, 61, -2, 10, -20, -20, -20, -20, -20, -20, - -20, -20, -20, -20, -20, 6, -20, 91, -20, -20, - 76, -20, 30, -20, -20, -20, -20, -20, -20, -20 + 46, -20, -20, -20, -20, -20, -20, -20, -20, -20, + 4, 61, -2, 10, -20, -20, -20, -20, -20, -20, + -20, -20, -20, -20, -20, 6, -20, 91, -20, -20, + 76, -20, 30, -20, -20, -20, -20, -20, -20, -20 }; /* YYPGOTO[NTERM-NUM]. */ static const yytype_int8 yypgoto[] = { - -20, -20, 0, -20, -20, -19, -20, -20, -20, 5, - -20, -20, -20, -20, -20 + -20, -20, 0, -20, -20, -19, -20, -20, -20, 5, + -20, -20, -20, -20, -20 }; /* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If - positive, shift that token. If negative, reduce the rule which - number is the opposite. If zero, do what YYDEFACT says. - If YYTABLE_NINF, syntax error. */ + * positive, shift that token. If negative, reduce the rule which + * number is the opposite. If zero, do what YYDEFACT says. + * If YYTABLE_NINF, syntax error. */ #define YYTABLE_NINF -1 static const yytype_uint8 yytable[] = { - 14, 1, 2, 3, 4, 5, 35, 6, 7, 8, - 33, 10, 23, 11, 23, 12, 31, 32, 24, 0, - 34, 0, 0, 0, 0, 0, 0, 36, 0, 0, - 38, 0, 38, 1, 2, 3, 4, 5, 0, 6, - 7, 8, 0, 10, 0, 11, 0, 12, 39, 1, - 2, 3, 4, 5, 0, 6, 7, 8, 9, 10, - 0, 11, 0, 12, 1, 2, 3, 4, 5, 0, - 6, 7, 8, 0, 10, 0, 11, 28, 12, 1, - 2, 3, 4, 5, 0, 6, 7, 8, 0, 10, - 0, 11, 37, 12, 1, 2, 3, 4, 5, 0, - 6, 7, 8, 0, 10, 0, 11, 0, 12 + 14, 1, 2, 3, 4, 5, 35, 6, 7, 8, + 33, 10, 23, 11, 23, 12, 31, 32, 24, 0, + 34, 0, 0, 0, 0, 0, 0, 36, 0, 0, + 38, 0, 38, 1, 2, 3, 4, 5, 0, 6, + 7, 8, 0, 10, 0, 11, 0, 12, 39, 1, + 2, 3, 4, 5, 0, 6, 7, 8, 9, 10, + 0, 11, 0, 12, 1, 2, 3, 4, 5, 0, + 6, 7, 8, 0, 10, 0, 11, 28, 12, 1, + 2, 3, 4, 5, 0, 6, 7, 8, 0, 10, + 0, 11, 37, 12, 1, 2, 3, 4, 5, 0, + 6, 7, 8, 0, 10, 0, 11, 0, 12 }; static const yytype_int8 yycheck[] = { - 0, 3, 4, 5, 6, 7, 25, 9, 10, 11, - 0, 13, 8, 15, 8, 17, 18, 12, 14, -1, - 14, -1, -1, -1, -1, -1, -1, 27, -1, -1, - 30, -1, 32, 3, 4, 5, 6, 7, -1, 9, - 10, 11, -1, 13, -1, 15, -1, 17, 18, 3, - 4, 5, 6, 7, -1, 9, 10, 11, 12, 13, - -1, 15, -1, 17, 3, 4, 5, 6, 7, -1, - 9, 10, 11, -1, 13, -1, 15, 16, 17, 3, - 4, 5, 6, 7, -1, 9, 10, 11, -1, 13, - -1, 15, 16, 17, 3, 4, 5, 6, 7, -1, - 9, 10, 11, -1, 13, -1, 15, -1, 17 + 0, 3, 4, 5, 6, 7, 25, 9, 10, 11, + 0, 13, 8, 15, 8, 17, 18, 12, 14, -1, + 14, -1, -1, -1, -1, -1, -1, 27, -1, -1, + 30, -1, 32, 3, 4, 5, 6, 7, -1, 9, + 10, 11, -1, 13, -1, 15, -1, 17, 18, 3, + 4, 5, 6, 7, -1, 9, 10, 11, 12, 13, + -1, 15, -1, 17, 3, 4, 5, 6, 7, -1, + 9, 10, 11, -1, 13, -1, 15, 16, 17, 3, + 4, 5, 6, 7, -1, 9, 10, 11, -1, 13, + -1, 15, 16, 17, 3, 4, 5, 6, 7, -1, + 9, 10, 11, -1, 13, -1, 15, -1, 17 }; /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing - symbol of state STATE-NUM. */ + * symbol of state STATE-NUM. */ static const yytype_uint8 yystos[] = { - 0, 3, 4, 5, 6, 7, 9, 10, 11, 12, - 13, 15, 17, 20, 21, 22, 26, 27, 29, 30, - 31, 32, 33, 8, 14, 23, 24, 25, 16, 21, - 28, 18, 28, 0, 14, 24, 21, 16, 21, 18 + 0, 3, 4, 5, 6, 7, 9, 10, 11, 12, + 13, 15, 17, 20, 21, 22, 26, 27, 29, 30, + 31, 32, 33, 8, 14, 23, 24, 25, 16, 21, + 28, 18, 28, 0, 14, 24, 21, 16, 21, 18 }; -#define yyerrok (yyerrstatus = 0) -#define yyclearin (yychar = YYEMPTY) -#define YYEMPTY (-2) -#define YYEOF 0 +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY (-2) +#define YYEOF 0 -#define YYACCEPT goto yyacceptlab -#define YYABORT goto yyabortlab -#define YYERROR goto yyerrorlab +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab /* Like YYERROR except do call yyerror. This remains here temporarily - to ease the transition to the new meaning of YYERROR, for GCC. - Once GCC version 2 has supplanted version 1, this can go. */ + * to ease the transition to the new meaning of YYERROR, for GCC. + * Once GCC version 2 has supplanted version 1, this can go. */ -#define YYFAIL goto yyerrlab +#define YYFAIL goto yyerrlab #define YYRECOVERING() (!!yyerrstatus) -#define YYBACKUP(Token, Value) \ -do \ - if (yychar == YYEMPTY && yylen == 1) \ - { \ - yychar = (Token); \ - yylval = (Value); \ - yytoken = YYTRANSLATE (yychar); \ - YYPOPSTACK (1); \ - goto yybackup; \ - } \ - else \ - { \ +#define YYBACKUP(Token, Value) \ +do \ + if (yychar == YYEMPTY && yylen == 1) \ + { \ + yychar = (Token); \ + yylval = (Value); \ + yytoken = YYTRANSLATE (yychar); \ + YYPOPSTACK (1); \ + goto yybackup; \ + } \ + else \ + { \ yyerror (YY_("syntax error: cannot back up")); \ - YYERROR; \ - } \ + YYERROR; \ + } \ while (YYID (0)) -#define YYTERROR 1 -#define YYERRCODE 256 +#define YYTERROR 1 +#define YYERRCODE 256 /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. - If N is 0, then set CURRENT to the empty location which ends - the previous symbol: RHS[0] (always defined). */ + * If N is 0, then set CURRENT to the empty location which ends + * the previous symbol: RHS[0] (always defined). */ #define YYRHSLOC(Rhs, K) ((Rhs)[K]) #ifndef YYLLOC_DEFAULT -# define YYLLOC_DEFAULT(Current, Rhs, N) \ - do \ +# define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ if (YYID (N)) \ - { \ - (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ - (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ - (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ - (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ - } \ - else \ - { \ - (Current).first_line = (Current).last_line = \ - YYRHSLOC (Rhs, 0).last_line; \ - (Current).first_column = (Current).last_column = \ - YYRHSLOC (Rhs, 0).last_column; \ - } \ + { \ + (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ + (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ + (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ + (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ + } \ + else \ + { \ + (Current).first_line = (Current).last_line = \ + YYRHSLOC (Rhs, 0).last_line; \ + (Current).first_column = (Current).last_column = \ + YYRHSLOC (Rhs, 0).last_column; \ + } \ while (YYID (0)) #endif /* YY_LOCATION_PRINT -- Print the location on the stream. - This macro was not mandated originally: define only if we know - we won't break user code: when these are the locations we know. */ + * This macro was not mandated originally: define only if we know + * we won't break user code: when these are the locations we know. */ #ifndef YY_LOCATION_PRINT # if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL -# define YY_LOCATION_PRINT(File, Loc) \ - fprintf (File, "%d.%d-%d.%d", \ - (Loc).first_line, (Loc).first_column, \ +# define YY_LOCATION_PRINT(File, Loc) \ + fprintf (File, "%d.%d-%d.%d", \ + (Loc).first_line, (Loc).first_column, \ (Loc).last_line, (Loc).last_column) # else # define YY_LOCATION_PRINT(File, Loc) ((void) 0) @@ -777,150 +776,152 @@ while (YYID (0)) # define YYFPRINTF fprintf # endif -# define YYDPRINTF(Args) \ -do { \ - if (yydebug) \ - YYFPRINTF Args; \ +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ } while (YYID (0)) -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ -do { \ - if (yydebug) \ - { \ - YYFPRINTF (stderr, "%s ", Title); \ - yy_symbol_print (stderr, \ - Type, Value); \ - YYFPRINTF (stderr, "\n"); \ - } \ +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Type, Value); \ + YYFPRINTF (stderr, "\n"); \ + } \ } while (YYID (0)) /*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ + | Print this symbol on YYOUTPUT. | + | `--------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) +yy_symbol_value_print(FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void -yy_symbol_value_print (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; + yy_symbol_value_print(yyoutput, yytype, yyvaluep) +FILE *yyoutput; +int yytype; +YYSTYPE const * const yyvaluep; #endif { - if (!yyvaluep) - return; + if (!yyvaluep) { + return; + } # ifdef YYPRINT - if (yytype < YYNTOKENS) - YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); + if (yytype < YYNTOKENS) { + YYPRINT(yyoutput, yytoknum[yytype], *yyvaluep); + } # else - YYUSE (yyoutput); + YYUSE(yyoutput); # endif - switch (yytype) - { - default: - break; - } + switch (yytype) { + default: + break; + } } /*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ + | Print this symbol on YYOUTPUT. | + | `--------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) +yy_symbol_print(FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) #else static void -yy_symbol_print (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; + yy_symbol_print(yyoutput, yytype, yyvaluep) +FILE *yyoutput; +int yytype; +YYSTYPE const * const yyvaluep; #endif { - if (yytype < YYNTOKENS) - YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); - else - YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); + if (yytype < YYNTOKENS) { + YYFPRINTF(yyoutput, "token %s (", yytname[yytype]); + } else { + YYFPRINTF(yyoutput, "nterm %s (", yytname[yytype]); + } - yy_symbol_value_print (yyoutput, yytype, yyvaluep); - YYFPRINTF (yyoutput, ")"); + yy_symbol_value_print(yyoutput, yytype, yyvaluep); + YYFPRINTF(yyoutput, ")"); } /*------------------------------------------------------------------. -| yy_stack_print -- Print the state stack from its BOTTOM up to its | -| TOP (included). | -`------------------------------------------------------------------*/ + | yy_stack_print -- Print the state stack from its BOTTOM up to its | + | TOP (included). | + | `------------------------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_stack_print (yytype_int16 *bottom, yytype_int16 *top) +yy_stack_print(yytype_int16 *bottom, yytype_int16 *top) #else static void -yy_stack_print (bottom, top) - yytype_int16 *bottom; - yytype_int16 *top; + yy_stack_print(bottom, top) +yytype_int16 *bottom; +yytype_int16 *top; #endif { - YYFPRINTF (stderr, "Stack now"); - for (; bottom <= top; ++bottom) - YYFPRINTF (stderr, " %d", *bottom); - YYFPRINTF (stderr, "\n"); + YYFPRINTF(stderr, "Stack now"); + for (; bottom <= top; ++bottom) { + YYFPRINTF(stderr, " %d", *bottom); + } + YYFPRINTF(stderr, "\n"); } -# define YY_STACK_PRINT(Bottom, Top) \ -do { \ - if (yydebug) \ - yy_stack_print ((Bottom), (Top)); \ +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ } while (YYID (0)) /*------------------------------------------------. -| Report that the YYRULE is going to be reduced. | -`------------------------------------------------*/ + | Report that the YYRULE is going to be reduced. | + | `------------------------------------------------*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yy_reduce_print (YYSTYPE *yyvsp, int yyrule) +yy_reduce_print(YYSTYPE *yyvsp, int yyrule) #else static void -yy_reduce_print (yyvsp, yyrule) - YYSTYPE *yyvsp; - int yyrule; + yy_reduce_print(yyvsp, yyrule) +YYSTYPE *yyvsp; +int yyrule; #endif { - int yynrhs = yyr2[yyrule]; - int yyi; - unsigned long int yylno = yyrline[yyrule]; - YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", - yyrule - 1, yylno); - /* The symbols being reduced. */ - for (yyi = 0; yyi < yynrhs; yyi++) - { - fprintf (stderr, " $%d = ", yyi + 1); - yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], - &(yyvsp[(yyi + 1) - (yynrhs)]) - ); - fprintf (stderr, "\n"); - } + int yynrhs = yyr2[yyrule]; + int yyi; + unsigned long int yylno = yyrline[yyrule]; + YYFPRINTF(stderr, "Reducing stack by rule %d (line %lu):\n", + yyrule - 1, yylno); + /* The symbols being reduced. */ + for (yyi = 0; yyi < yynrhs; yyi++) { + fprintf(stderr, " $%d = ", yyi + 1); + yy_symbol_print(stderr, yyrhs[yyprhs[yyrule] + yyi], + &(yyvsp[(yyi + 1) - (yynrhs)]) + ); + fprintf(stderr, "\n"); + } } -# define YY_REDUCE_PRINT(Rule) \ -do { \ - if (yydebug) \ +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ yy_reduce_print (yyvsp, Rule); \ } while (YYID (0)) /* Nonzero means print parse trace. It is left uninitialized so that - multiple parsers can coexist. */ + * multiple parsers can coexist. */ int yydebug; #else /* !YYDEBUG */ # define YYDPRINTF(Args) @@ -931,22 +932,22 @@ int yydebug; /* YYINITDEPTH -- initial size of the parser's stacks. */ -#ifndef YYINITDEPTH +#ifndef YYINITDEPTH # define YYINITDEPTH 64 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only - if the built-in stack extension method is used). - - Do not make this value too large; the results are undefined if - YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) - evaluated with infinite-precision integer arithmetic. */ + * if the built-in stack extension method is used). + * + * Do not make this value too large; the results are undefined if + * YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH) + * evaluated with infinite-precision integer arithmetic. */ #ifndef YYMAXDEPTH # define YYMAXDEPTH 10000 #endif - + #if YYERROR_VERBOSE @@ -956,19 +957,20 @@ int yydebug; # else /* Return the length of YYSTR. */ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static YYSIZE_T -yystrlen (const char *yystr) +yystrlen(const char *yystr) #else static YYSIZE_T -yystrlen (yystr) - const char *yystr; + yystrlen(yystr) +const char *yystr; #endif { - YYSIZE_T yylen; - for (yylen = 0; yystr[yylen]; yylen++) - continue; - return yylen; + YYSIZE_T yylen; + for (yylen = 0; yystr[yylen]; yylen++) { + continue; + } + return yylen; } # endif # endif @@ -978,232 +980,229 @@ yystrlen (yystr) # define yystpcpy stpcpy # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in - YYDEST. */ + * YYDEST. */ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static char * -yystpcpy (char *yydest, const char *yysrc) +yystpcpy(char *yydest, const char *yysrc) #else static char * -yystpcpy (yydest, yysrc) - char *yydest; - const char *yysrc; +yystpcpy(yydest, yysrc) +char *yydest; +const char *yysrc; #endif { - char *yyd = yydest; - const char *yys = yysrc; + char *yyd = yydest; + const char *yys = yysrc; - while ((*yyd++ = *yys++) != '\0') - continue; + while ((*yyd++ = *yys++) != '\0') { + continue; + } - return yyd - 1; + return yyd - 1; } # endif # endif # ifndef yytnamerr /* Copy to YYRES the contents of YYSTR after stripping away unnecessary - quotes and backslashes, so that it's suitable for yyerror. The - heuristic is that double-quoting is unnecessary unless the string - contains an apostrophe, a comma, or backslash (other than - backslash-backslash). YYSTR is taken from yytname. If YYRES is - null, do not copy; instead, return the length of what the result - would have been. */ + * quotes and backslashes, so that it's suitable for yyerror. The + * heuristic is that double-quoting is unnecessary unless the string + * contains an apostrophe, a comma, or backslash (other than + * backslash-backslash). YYSTR is taken from yytname. If YYRES is + * null, do not copy; instead, return the length of what the result + * would have been. */ static YYSIZE_T -yytnamerr (char *yyres, const char *yystr) +yytnamerr(char *yyres, const char *yystr) { - if (*yystr == '"') - { - YYSIZE_T yyn = 0; - char const *yyp = yystr; - - for (;;) - switch (*++yyp) - { - case '\'': - case ',': - goto do_not_strip_quotes; - - case '\\': - if (*++yyp != '\\') - goto do_not_strip_quotes; - /* Fall through. */ - default: - if (yyres) - yyres[yyn] = *yyp; - yyn++; - break; - - case '"': - if (yyres) - yyres[yyn] = '\0'; - return yyn; - } - do_not_strip_quotes: ; - } - - if (! yyres) - return yystrlen (yystr); - - return yystpcpy (yyres, yystr) - yyres; + if (*yystr == '"') { + YYSIZE_T yyn = 0; + char const *yyp = yystr; + + for (;;) { + switch (*++yyp) { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') { + goto do_not_strip_quotes; + } + /* Fall through. */ + default: + if (yyres) { + yyres[yyn] = *yyp; + } + yyn++; + break; + + case '"': + if (yyres) { + yyres[yyn] = '\0'; + } + return yyn; + } + } +do_not_strip_quotes:; + } + + if (!yyres) { + return yystrlen(yystr); + } + + return yystpcpy(yyres, yystr) - yyres; } # endif /* Copy into YYRESULT an error message about the unexpected token - YYCHAR while in state YYSTATE. Return the number of bytes copied, - including the terminating null byte. If YYRESULT is null, do not - copy anything; just return the number of bytes that would be - copied. As a special case, return 0 if an ordinary "syntax error" - message will do. Return YYSIZE_MAXIMUM if overflow occurs during - size calculation. */ + * YYCHAR while in state YYSTATE. Return the number of bytes copied, + * including the terminating null byte. If YYRESULT is null, do not + * copy anything; just return the number of bytes that would be + * copied. As a special case, return 0 if an ordinary "syntax error" + * message will do. Return YYSIZE_MAXIMUM if overflow occurs during + * size calculation. */ static YYSIZE_T -yysyntax_error (char *yyresult, int yystate, int yychar) +yysyntax_error(char *yyresult, int yystate, int yychar) { - int yyn = yypact[yystate]; - - if (! (YYPACT_NINF < yyn && yyn <= YYLAST)) - return 0; - else - { - int yytype = YYTRANSLATE (yychar); - YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]); - YYSIZE_T yysize = yysize0; - YYSIZE_T yysize1; - int yysize_overflow = 0; - enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; - char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; - int yyx; + int yyn = yypact[yystate]; + + if (!(YYPACT_NINF < yyn && yyn <= YYLAST)) { + return 0; + } else { + int yytype = YYTRANSLATE(yychar); + YYSIZE_T yysize0 = yytnamerr(0, yytname[yytype]); + YYSIZE_T yysize = yysize0; + YYSIZE_T yysize1; + int yysize_overflow = 0; + enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; + char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; + int yyx; # if 0 - /* This is so xgettext sees the translatable formats that are - constructed on the fly. */ - YY_("syntax error, unexpected %s"); - YY_("syntax error, unexpected %s, expecting %s"); - YY_("syntax error, unexpected %s, expecting %s or %s"); - YY_("syntax error, unexpected %s, expecting %s or %s or %s"); - YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); + /* This is so xgettext sees the translatable formats that are + * constructed on the fly. */ + YY_("syntax error, unexpected %s"); + YY_("syntax error, unexpected %s, expecting %s"); + YY_("syntax error, unexpected %s, expecting %s or %s"); + YY_("syntax error, unexpected %s, expecting %s or %s or %s"); + YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"); # endif - char *yyfmt; - char const *yyf; - static char const yyunexpected[] = "syntax error, unexpected %s"; - static char const yyexpecting[] = ", expecting %s"; - static char const yyor[] = " or %s"; - char yyformat[sizeof yyunexpected - + sizeof yyexpecting - 1 - + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) - * (sizeof yyor - 1))]; - char const *yyprefix = yyexpecting; - - /* Start YYX at -YYN if negative to avoid negative indexes in - YYCHECK. */ - int yyxbegin = yyn < 0 ? -yyn : 0; - - /* Stay within bounds of both yycheck and yytname. */ - int yychecklim = YYLAST - yyn + 1; - int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; - int yycount = 1; - - yyarg[0] = yytname[yytype]; - yyfmt = yystpcpy (yyformat, yyunexpected); - - for (yyx = yyxbegin; yyx < yyxend; ++yyx) - if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) - { - if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) - { - yycount = 1; - yysize = yysize0; - yyformat[sizeof yyunexpected - 1] = '\0'; - break; - } - yyarg[yycount++] = yytname[yyx]; - yysize1 = yysize + yytnamerr (0, yytname[yyx]); - yysize_overflow |= (yysize1 < yysize); - yysize = yysize1; - yyfmt = yystpcpy (yyfmt, yyprefix); - yyprefix = yyor; - } - - yyf = YY_(yyformat); - yysize1 = yysize + yystrlen (yyf); - yysize_overflow |= (yysize1 < yysize); - yysize = yysize1; - - if (yysize_overflow) - return YYSIZE_MAXIMUM; - - if (yyresult) - { - /* Avoid sprintf, as that infringes on the user's name space. - Don't have undefined behavior even if the translation - produced a string with the wrong number of "%s"s. */ - char *yyp = yyresult; - int yyi = 0; - while ((*yyp = *yyf) != '\0') - { - if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) - { - yyp += yytnamerr (yyp, yyarg[yyi++]); - yyf += 2; + char *yyfmt; + char const *yyf; + static char const yyunexpected[] = "syntax error, unexpected %s"; + static char const yyexpecting[] = ", expecting %s"; + static char const yyor[] = " or %s"; + char yyformat[sizeof yyunexpected + + sizeof yyexpecting - 1 + + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2) + * (sizeof yyor - 1))]; + char const *yyprefix = yyexpecting; + + /* Start YYX at -YYN if negative to avoid negative indexes in + * YYCHECK. */ + int yyxbegin = yyn < 0 ? -yyn : 0; + + /* Stay within bounds of both yycheck and yytname. */ + int yychecklim = YYLAST - yyn + 1; + int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS; + int yycount = 1; + + yyarg[0] = yytname[yytype]; + yyfmt = yystpcpy(yyformat, yyunexpected); + + for (yyx = yyxbegin; yyx < yyxend; ++yyx) { + if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR) { + if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { + yycount = 1; + yysize = yysize0; + yyformat[sizeof yyunexpected - 1] = '\0'; + break; + } + yyarg[yycount++] = yytname[yyx]; + yysize1 = yysize + yytnamerr(0, yytname[yyx]); + yysize_overflow |= (yysize1 < yysize); + yysize = yysize1; + yyfmt = yystpcpy(yyfmt, yyprefix); + yyprefix = yyor; + } } - else - { - yyp++; - yyf++; + + yyf = YY_(yyformat); + yysize1 = yysize + yystrlen(yyf); + yysize_overflow |= (yysize1 < yysize); + yysize = yysize1; + + if (yysize_overflow) { + return YYSIZE_MAXIMUM; + } + + if (yyresult) { + /* Avoid sprintf, as that infringes on the user's name space. + * Don't have undefined behavior even if the translation + * produced a string with the wrong number of "%s"s. */ + char *yyp = yyresult; + int yyi = 0; + while ((*yyp = *yyf) != '\0') { + if (*yyp == '%' && yyf[1] == 's' && yyi < yycount) { + yyp += yytnamerr(yyp, yyarg[yyi++]); + yyf += 2; + } else { + yyp++; + yyf++; + } + } } - } + return yysize; } - return yysize; - } } #endif /* YYERROR_VERBOSE */ - + /*-----------------------------------------------. -| Release the memory associated to this symbol. | -`-----------------------------------------------*/ + | Release the memory associated to this symbol. | + | `-----------------------------------------------*/ /*ARGSUSED*/ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) static void -yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) +yydestruct(const char *yymsg, int yytype, YYSTYPE *yyvaluep) #else static void -yydestruct (yymsg, yytype, yyvaluep) - const char *yymsg; - int yytype; - YYSTYPE *yyvaluep; + yydestruct(yymsg, yytype, yyvaluep) +const char *yymsg; +int yytype; +YYSTYPE *yyvaluep; #endif { - YYUSE (yyvaluep); - - if (!yymsg) - yymsg = "Deleting"; - YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); + YYUSE(yyvaluep); - switch (yytype) - { + if (!yymsg) { + yymsg = "Deleting"; + } + YY_SYMBOL_PRINT(yymsg, yytype, yyvaluep, yylocationp); - default: - break; - } + switch (yytype) { + default: + break; + } } - + /* Prevent warnings from -Wmissing-prototypes. */ #ifdef YYPARSE_PARAM #if defined __STDC__ || defined __cplusplus -int yyparse (void *YYPARSE_PARAM); +int yyparse(void *YYPARSE_PARAM); #else -int yyparse (); +int yyparse(); #endif #else /* ! YYPARSE_PARAM */ #if defined __STDC__ || defined __cplusplus -int yyparse (void); +int yyparse(void); #else -int yyparse (); +int yyparse(); #endif #endif /* ! YYPARSE_PARAM */ @@ -1213,751 +1212,754 @@ int yyparse (); /*----------. -| yyparse. | -`----------*/ + | yyparse. | + | `----------*/ #ifdef YYPARSE_PARAM #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) int -yyparse (void *YYPARSE_PARAM) +yyparse(void *YYPARSE_PARAM) #else int -yyparse (YYPARSE_PARAM) - void *YYPARSE_PARAM; + yyparse(YYPARSE_PARAM) +void *YYPARSE_PARAM; #endif #else /* ! YYPARSE_PARAM */ #if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) + || defined __cplusplus || defined _MSC_VER) int -yyparse (void) +yyparse(void) #else int -yyparse () +yyparse() #endif #endif { - /* The look-ahead symbol. */ -int yychar; + /* The look-ahead symbol. */ + int yychar; /* The semantic value of the look-ahead symbol. */ -YYSTYPE yylval; + YYSTYPE yylval; /* Number of syntax errors so far. */ -int yynerrs; - - int yystate; - int yyn; - int yyresult; - /* Number of tokens to shift before error messages enabled. */ - int yyerrstatus; - /* Look-ahead token as an internal (translated) token number. */ - int yytoken = 0; + int yynerrs; + + int yystate; + int yyn; + int yyresult; + /* Number of tokens to shift before error messages enabled. */ + int yyerrstatus; + /* Look-ahead token as an internal (translated) token number. */ + int yytoken = 0; #if YYERROR_VERBOSE - /* Buffer for error messages, and its allocated size. */ - char yymsgbuf[128]; - char *yymsg = yymsgbuf; - YYSIZE_T yymsg_alloc = sizeof yymsgbuf; + /* Buffer for error messages, and its allocated size. */ + char yymsgbuf[128]; + char *yymsg = yymsgbuf; + YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif - /* Three stacks and their tools: - `yyss': related to states, - `yyvs': related to semantic values, - `yyls': related to locations. - - Refer to the stacks thru separate pointers, to allow yyoverflow - to reallocate them elsewhere. */ + /* Three stacks and their tools: + * `yyss': related to states, + * `yyvs': related to semantic values, + * `yyls': related to locations. + * + * Refer to the stacks thru separate pointers, to allow yyoverflow + * to reallocate them elsewhere. */ - /* The state stack. */ - yytype_int16 yyssa[YYINITDEPTH]; - yytype_int16 *yyss = yyssa; - yytype_int16 *yyssp; + /* The state stack. */ + yytype_int16 yyssa[YYINITDEPTH]; + yytype_int16 *yyss = yyssa; + yytype_int16 *yyssp; - /* The semantic value stack. */ - YYSTYPE yyvsa[YYINITDEPTH]; - YYSTYPE *yyvs = yyvsa; - YYSTYPE *yyvsp; + /* The semantic value stack. */ + YYSTYPE yyvsa[YYINITDEPTH]; + YYSTYPE *yyvs = yyvsa; + YYSTYPE *yyvsp; #define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) - YYSIZE_T yystacksize = YYINITDEPTH; + YYSIZE_T yystacksize = YYINITDEPTH; - /* The variables used to return semantic value and location from the - action routines. */ - YYSTYPE yyval; + /* The variables used to return semantic value and location from the + * action routines. */ + YYSTYPE yyval; - /* The number of symbols on the RHS of the reduced rule. - Keep to zero when no symbol should be popped. */ - int yylen = 0; + /* The number of symbols on the RHS of the reduced rule. + * Keep to zero when no symbol should be popped. */ + int yylen = 0; - YYDPRINTF ((stderr, "Starting parse\n")); + YYDPRINTF((stderr, "Starting parse\n")); - yystate = 0; - yyerrstatus = 0; - yynerrs = 0; - yychar = YYEMPTY; /* Cause a token to be read. */ + yystate = 0; + yyerrstatus = 0; + yynerrs = 0; + yychar = YYEMPTY; /* Cause a token to be read. */ - /* Initialize stack pointers. - Waste one element of value and location stack - so that they stay on the same level as the state stack. - The wasted elements are never initialized. */ + /* Initialize stack pointers. + * Waste one element of value and location stack + * so that they stay on the same level as the state stack. + * The wasted elements are never initialized. */ - yyssp = yyss; - yyvsp = yyvs; + yyssp = yyss; + yyvsp = yyvs; - goto yysetstate; + goto yysetstate; /*------------------------------------------------------------. -| yynewstate -- Push a new state, which is found in yystate. | -`------------------------------------------------------------*/ - yynewstate: - /* In all cases, when you get here, the value and location stacks - have just been pushed. So pushing a state here evens the stacks. */ - yyssp++; + | yynewstate -- Push a new state, which is found in yystate. | + | `------------------------------------------------------------*/ +yynewstate: + /* In all cases, when you get here, the value and location stacks + * have just been pushed. So pushing a state here evens the stacks. */ + yyssp++; - yysetstate: - *yyssp = yystate; +yysetstate: + *yyssp = yystate; - if (yyss + yystacksize - 1 <= yyssp) - { - /* Get the current used size of the three stacks, in elements. */ - YYSIZE_T yysize = yyssp - yyss + 1; + if (yyss + yystacksize - 1 <= yyssp) { + /* Get the current used size of the three stacks, in elements. */ + YYSIZE_T yysize = yyssp - yyss + 1; #ifdef yyoverflow - { - /* Give user a chance to reallocate the stack. Use copies of - these so that the &'s don't force the real ones into - memory. */ - YYSTYPE *yyvs1 = yyvs; - yytype_int16 *yyss1 = yyss; - - - /* Each stack pointer address is followed by the size of the - data in use in that stack, in bytes. This used to be a - conditional around just the two extra args, but that might - be undefined if yyoverflow is a macro. */ - yyoverflow (YY_("memory exhausted"), - &yyss1, yysize * sizeof (*yyssp), - &yyvs1, yysize * sizeof (*yyvsp), - - &yystacksize); - - yyss = yyss1; - yyvs = yyvs1; - } + { + /* Give user a chance to reallocate the stack. Use copies of + * these so that the &'s don't force the real ones into + * memory. */ + YYSTYPE *yyvs1 = yyvs; + yytype_int16 *yyss1 = yyss; + + + /* Each stack pointer address is followed by the size of the + * data in use in that stack, in bytes. This used to be a + * conditional around just the two extra args, but that might + * be undefined if yyoverflow is a macro. */ + yyoverflow(YY_("memory exhausted"), + &yyss1, yysize * sizeof(*yyssp), + &yyvs1, yysize * sizeof(*yyvsp), + + &yystacksize); + + yyss = yyss1; + yyvs = yyvs1; + } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE - goto yyexhaustedlab; + goto yyexhaustedlab; # else - /* Extend the stack our own way. */ - if (YYMAXDEPTH <= yystacksize) - goto yyexhaustedlab; - yystacksize *= 2; - if (YYMAXDEPTH < yystacksize) - yystacksize = YYMAXDEPTH; - - { - yytype_int16 *yyss1 = yyss; - union yyalloc *yyptr = - (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); - if (! yyptr) - goto yyexhaustedlab; - YYSTACK_RELOCATE (yyss); - YYSTACK_RELOCATE (yyvs); + /* Extend the stack our own way. */ + if (YYMAXDEPTH <= yystacksize) { + goto yyexhaustedlab; + } + yystacksize *= 2; + if (YYMAXDEPTH < yystacksize) { + yystacksize = YYMAXDEPTH; + } + + { + yytype_int16 *yyss1 = yyss; + union yyalloc *yyptr = + (union yyalloc *) YYSTACK_ALLOC(YYSTACK_BYTES(yystacksize)); + if (!yyptr) { + goto yyexhaustedlab; + } + YYSTACK_RELOCATE(yyss); + YYSTACK_RELOCATE(yyvs); # undef YYSTACK_RELOCATE - if (yyss1 != yyssa) - YYSTACK_FREE (yyss1); - } + if (yyss1 != yyssa) { + YYSTACK_FREE(yyss1); + } + } # endif #endif /* no yyoverflow */ - yyssp = yyss + yysize - 1; - yyvsp = yyvs + yysize - 1; + yyssp = yyss + yysize - 1; + yyvsp = yyvs + yysize - 1; - YYDPRINTF ((stderr, "Stack size increased to %lu\n", - (unsigned long int) yystacksize)); + YYDPRINTF((stderr, "Stack size increased to %lu\n", + (unsigned long int) yystacksize)); - if (yyss + yystacksize - 1 <= yyssp) - YYABORT; - } + if (yyss + yystacksize - 1 <= yyssp) { + YYABORT; + } + } - YYDPRINTF ((stderr, "Entering state %d\n", yystate)); + YYDPRINTF((stderr, "Entering state %d\n", yystate)); - goto yybackup; + goto yybackup; /*-----------. -| yybackup. | -`-----------*/ + | yybackup. | + | `-----------*/ yybackup: - /* Do appropriate processing given the current state. Read a - look-ahead token if we need one and don't already have one. */ - - /* First try to decide what to do without reference to look-ahead token. */ - yyn = yypact[yystate]; - if (yyn == YYPACT_NINF) - goto yydefault; - - /* Not known => get a look-ahead token if don't already have one. */ - - /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */ - if (yychar == YYEMPTY) - { - YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; - } - - if (yychar <= YYEOF) - { - yychar = yytoken = YYEOF; - YYDPRINTF ((stderr, "Now at end of input.\n")); - } - else - { - yytoken = YYTRANSLATE (yychar); - YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc); - } - - /* If the proper action on seeing token YYTOKEN is to reduce or to - detect an error, take that action. */ - yyn += yytoken; - if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) - goto yydefault; - yyn = yytable[yyn]; - if (yyn <= 0) - { - if (yyn == 0 || yyn == YYTABLE_NINF) - goto yyerrlab; - yyn = -yyn; - goto yyreduce; - } - - if (yyn == YYFINAL) - YYACCEPT; - - /* Count tokens shifted since error; after three, turn off error - status. */ - if (yyerrstatus) - yyerrstatus--; - - /* Shift the look-ahead token. */ - YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc); - - /* Discard the shifted token unless it is eof. */ - if (yychar != YYEOF) - yychar = YYEMPTY; - - yystate = yyn; - *++yyvsp = yylval; - - goto yynewstate; + /* Do appropriate processing given the current state. Read a + * look-ahead token if we need one and don't already have one. */ + /* First try to decide what to do without reference to look-ahead token. */ + yyn = yypact[yystate]; + if (yyn == YYPACT_NINF) { + goto yydefault; + } -/*-----------------------------------------------------------. -| yydefault -- do the default action for the current state. | -`-----------------------------------------------------------*/ -yydefault: - yyn = yydefact[yystate]; - if (yyn == 0) - goto yyerrlab; - goto yyreduce; + /* Not known => get a look-ahead token if don't already have one. */ + /* YYCHAR is either YYEMPTY or YYEOF or a valid look-ahead symbol. */ + if (yychar == YYEMPTY) { + YYDPRINTF((stderr, "Reading a token: ")); + yychar = YYLEX; + } -/*-----------------------------. -| yyreduce -- Do a reduction. | -`-----------------------------*/ -yyreduce: - /* yyn is the number of a rule to reduce with. */ - yylen = yyr2[yyn]; + if (yychar <= YYEOF) { + yychar = yytoken = YYEOF; + YYDPRINTF((stderr, "Now at end of input.\n")); + } else { + yytoken = YYTRANSLATE(yychar); + YY_SYMBOL_PRINT("Next token is", yytoken, &yylval, &yylloc); + } + + /* If the proper action on seeing token YYTOKEN is to reduce or to + * detect an error, take that action. */ + yyn += yytoken; + if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken) { + goto yydefault; + } + yyn = yytable[yyn]; + if (yyn <= 0) { + if (yyn == 0 || yyn == YYTABLE_NINF) { + goto yyerrlab; + } + yyn = -yyn; + goto yyreduce; + } - /* If YYLEN is nonzero, implement the default value of the action: - `$$ = $1'. + if (yyn == YYFINAL) { + YYACCEPT; + } + + /* Count tokens shifted since error; after three, turn off error + * status. */ + if (yyerrstatus) { + yyerrstatus--; + } + + /* Shift the look-ahead token. */ + YY_SYMBOL_PRINT("Shifting", yytoken, &yylval, &yylloc); + + /* Discard the shifted token unless it is eof. */ + if (yychar != YYEOF) { + yychar = YYEMPTY; + } + + yystate = yyn; + *++yyvsp = yylval; - Otherwise, the following line sets YYVAL to garbage. - This behavior is undocumented and Bison - users should not rely upon it. Assigning to YYVAL - unconditionally makes the parser a bit smaller, and it avoids a - GCC warning that YYVAL may be used uninitialized. */ - yyval = yyvsp[1-yylen]; + goto yynewstate; - YY_REDUCE_PRINT (yyn); - switch (yyn) - { - case 2: +/*-----------------------------------------------------------. + | yydefault -- do the default action for the current state. | + | `-----------------------------------------------------------*/ +yydefault: + yyn = yydefact[yystate]; + if (yyn == 0) { + goto yyerrlab; + } + goto yyreduce; + + +/*-----------------------------. + | yyreduce -- Do a reduction. | + | `-----------------------------*/ +yyreduce: + /* yyn is the number of a rule to reduce with. */ + yylen = yyr2[yyn]; + + /* If YYLEN is nonzero, implement the default value of the action: + * `$$ = $1'. + * + * Otherwise, the following line sets YYVAL to garbage. + * This behavior is undocumented and Bison + * users should not rely upon it. Assigning to YYVAL + * unconditionally makes the parser a bit smaller, and it avoids a + * GCC warning that YYVAL may be used uninitialized. */ + yyval = yyvsp[1 - yylen]; + + + YY_REDUCE_PRINT(yyn); + switch (yyn) { + case 2: #line 149 "OSUnserializeXML.y" - { yyerror("unexpected end of buffer"); - YYERROR; - ;} - break; + { yyerror("unexpected end of buffer"); + YYERROR; + ;} + break; - case 3: + case 3: #line 152 "OSUnserializeXML.y" - { STATE->parsedObject = (yyvsp[(1) - (1)])->object; - (yyvsp[(1) - (1)])->object = 0; - freeObject(STATE, (yyvsp[(1) - (1)])); - YYACCEPT; - ;} - break; - - case 4: + { STATE->parsedObject = (yyvsp[(1) - (1)])->object; + (yyvsp[(1) - (1)])->object = 0; + freeObject(STATE, (yyvsp[(1) - (1)])); + YYACCEPT; + ;} + break; + + case 4: #line 157 "OSUnserializeXML.y" - { yyerror("syntax error"); - YYERROR; - ;} - break; + { yyerror("syntax error"); + YYERROR; + ;} + break; - case 5: + case 5: #line 162 "OSUnserializeXML.y" - { (yyval) = buildDictionary(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildDictionary"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 6: + { (yyval) = buildDictionary(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildDictionary"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 6: #line 174 "OSUnserializeXML.y" - { (yyval) = buildArray(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildArray"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 7: + { (yyval) = buildArray(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildArray"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 7: #line 186 "OSUnserializeXML.y" - { (yyval) = buildSet(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildSet"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 8: + { (yyval) = buildSet(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildSet"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 8: #line 198 "OSUnserializeXML.y" - { (yyval) = buildString(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildString"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 9: + { (yyval) = buildString(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildString"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 9: #line 210 "OSUnserializeXML.y" - { (yyval) = buildData(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildData"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 10: + { (yyval) = buildData(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildData"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 10: #line 222 "OSUnserializeXML.y" - { (yyval) = buildNumber(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildNumber"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 11: + { (yyval) = buildNumber(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildNumber"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 11: #line 234 "OSUnserializeXML.y" - { (yyval) = buildBoolean(STATE, (yyvsp[(1) - (1)])); - - if (!yyval->object) { - yyerror("buildBoolean"); - YYERROR; - } - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 12: + { (yyval) = buildBoolean(STATE, (yyvsp[(1) - (1)])); + + if (!yyval->object) { + yyerror("buildBoolean"); + YYERROR; + } + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 12: #line 246 "OSUnserializeXML.y" - { (yyval) = retrieveObject(STATE, (yyvsp[(1) - (1)])->idref); - if ((yyval)) { - STATE->retrievedObjectCount++; - (yyval)->object->retain(); - if (STATE->retrievedObjectCount > MAX_REFED_OBJECTS) { - yyerror("maximum object reference count"); - YYERROR; - } - } else { - yyerror("forward reference detected"); - YYERROR; - } - freeObject(STATE, (yyvsp[(1) - (1)])); - - STATE->parsedObjectCount++; - if (STATE->parsedObjectCount > MAX_OBJECTS) { - yyerror("maximum object count"); - YYERROR; - } - ;} - break; - - case 13: + { (yyval) = retrieveObject(STATE, (yyvsp[(1) - (1)])->idref); + if ((yyval)) { + STATE->retrievedObjectCount++; + (yyval)->object->retain(); + if (STATE->retrievedObjectCount > MAX_REFED_OBJECTS) { + yyerror("maximum object reference count"); + YYERROR; + } + } else { + yyerror("forward reference detected"); + YYERROR; + } + freeObject(STATE, (yyvsp[(1) - (1)])); + + STATE->parsedObjectCount++; + if (STATE->parsedObjectCount > MAX_OBJECTS) { + yyerror("maximum object count"); + YYERROR; + } + ;} + break; + + case 13: #line 265 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (2)]); - (yyval)->elements = NULL; - ;} - break; + { (yyval) = (yyvsp[(1) - (2)]); + (yyval)->elements = NULL; + ;} + break; - case 14: + case 14: #line 268 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (3)]); - (yyval)->elements = (yyvsp[(2) - (3)]); - ;} - break; + { (yyval) = (yyvsp[(1) - (3)]); + (yyval)->elements = (yyvsp[(2) - (3)]); + ;} + break; - case 17: + case 17: #line 275 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(2) - (2)]); - (yyval)->next = (yyvsp[(1) - (2)]); - - object_t *o; - o = (yyval)->next; - while (o) { - if (o->key == (yyval)->key) { - yyerror("duplicate dictionary key"); - YYERROR; - } - o = o->next; - } - ;} - break; - - case 18: + { (yyval) = (yyvsp[(2) - (2)]); + (yyval)->next = (yyvsp[(1) - (2)]); + + object_t *o; + o = (yyval)->next; + while (o) { + if (o->key == (yyval)->key) { + yyerror("duplicate dictionary key"); + YYERROR; + } + o = o->next; + } + ;} + break; + + case 18: #line 290 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (2)]); - (yyval)->key = (OSSymbol *)(yyval)->object; - (yyval)->object = (yyvsp[(2) - (2)])->object; - (yyval)->next = NULL; - (yyvsp[(2) - (2)])->object = 0; - freeObject(STATE, (yyvsp[(2) - (2)])); - ;} - break; - - case 19: + { (yyval) = (yyvsp[(1) - (2)]); + (yyval)->key = (OSSymbol *)(yyval)->object; + (yyval)->object = (yyvsp[(2) - (2)])->object; + (yyval)->next = NULL; + (yyvsp[(2) - (2)])->object = 0; + freeObject(STATE, (yyvsp[(2) - (2)])); + ;} + break; + + case 19: #line 299 "OSUnserializeXML.y" - { (yyval) = buildSymbol(STATE, (yyvsp[(1) - (1)])); + { (yyval) = buildSymbol(STATE, (yyvsp[(1) - (1)])); // STATE->parsedObjectCount++; // if (STATE->parsedObjectCount > MAX_OBJECTS) { // yyerror("maximum object count"); // YYERROR; // } - ;} - break; + ;} + break; - case 20: + case 20: #line 311 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (2)]); - (yyval)->elements = NULL; - ;} - break; + { (yyval) = (yyvsp[(1) - (2)]); + (yyval)->elements = NULL; + ;} + break; - case 21: + case 21: #line 314 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (3)]); - (yyval)->elements = (yyvsp[(2) - (3)]); - ;} - break; + { (yyval) = (yyvsp[(1) - (3)]); + (yyval)->elements = (yyvsp[(2) - (3)]); + ;} + break; - case 23: + case 23: #line 320 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (2)]); - (yyval)->elements = NULL; - ;} - break; + { (yyval) = (yyvsp[(1) - (2)]); + (yyval)->elements = NULL; + ;} + break; - case 24: + case 24: #line 323 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (3)]); - (yyval)->elements = (yyvsp[(2) - (3)]); - ;} - break; + { (yyval) = (yyvsp[(1) - (3)]); + (yyval)->elements = (yyvsp[(2) - (3)]); + ;} + break; - case 26: + case 26: #line 329 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(1) - (1)]); - (yyval)->next = NULL; - ;} - break; + { (yyval) = (yyvsp[(1) - (1)]); + (yyval)->next = NULL; + ;} + break; - case 27: + case 27: #line 332 "OSUnserializeXML.y" - { (yyval) = (yyvsp[(2) - (2)]); - (yyval)->next = (yyvsp[(1) - (2)]); - ;} - break; + { (yyval) = (yyvsp[(2) - (2)]); + (yyval)->next = (yyvsp[(1) - (2)]); + ;} + break; /* Line 1267 of yacc.c. */ #line 1699 "OSUnserializeXML.tab.c" - default: break; - } - YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); + default: break; + } + YY_SYMBOL_PRINT("-> $$ =", yyr1[yyn], &yyval, &yyloc); - YYPOPSTACK (yylen); - yylen = 0; - YY_STACK_PRINT (yyss, yyssp); + YYPOPSTACK(yylen); + yylen = 0; + YY_STACK_PRINT(yyss, yyssp); - *++yyvsp = yyval; + *++yyvsp = yyval; - /* Now `shift' the result of the reduction. Determine what state - that goes to, based on the state we popped back to and the rule - number reduced by. */ + /* Now `shift' the result of the reduction. Determine what state + * that goes to, based on the state we popped back to and the rule + * number reduced by. */ - yyn = yyr1[yyn]; + yyn = yyr1[yyn]; - yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; - if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) - yystate = yytable[yystate]; - else - yystate = yydefgoto[yyn - YYNTOKENS]; + yystate = yypgoto[yyn - YYNTOKENS] + *yyssp; + if (0 <= yystate && yystate <= YYLAST && yycheck[yystate] == *yyssp) { + yystate = yytable[yystate]; + } else { + yystate = yydefgoto[yyn - YYNTOKENS]; + } - goto yynewstate; + goto yynewstate; /*------------------------------------. -| yyerrlab -- here on detecting error | -`------------------------------------*/ + | yyerrlab -- here on detecting error | + | `------------------------------------*/ yyerrlab: - /* If not already recovering from an error, report this error. */ - if (!yyerrstatus) - { - ++yynerrs; -#if ! YYERROR_VERBOSE - yyerror (YY_("syntax error")); + /* If not already recovering from an error, report this error. */ + if (!yyerrstatus) { + ++yynerrs; +#if !YYERROR_VERBOSE + yyerror(YY_("syntax error")); #else - { - YYSIZE_T yysize = yysyntax_error (0, yystate, yychar); - if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) - { - YYSIZE_T yyalloc = 2 * yysize; - if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) - yyalloc = YYSTACK_ALLOC_MAXIMUM; - if (yymsg != yymsgbuf) - YYSTACK_FREE (yymsg); - yymsg = (char *) YYSTACK_ALLOC (yyalloc); - if (yymsg) - yymsg_alloc = yyalloc; - else - { - yymsg = yymsgbuf; - yymsg_alloc = sizeof yymsgbuf; - } - } - - if (0 < yysize && yysize <= yymsg_alloc) - { - (void) yysyntax_error (yymsg, yystate, yychar); - yyerror (yymsg); - } - else - { - yyerror (YY_("syntax error")); - if (yysize != 0) - goto yyexhaustedlab; - } - } + { + YYSIZE_T yysize = yysyntax_error(0, yystate, yychar); + if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM) { + YYSIZE_T yyalloc = 2 * yysize; + if (!(yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM)) { + yyalloc = YYSTACK_ALLOC_MAXIMUM; + } + if (yymsg != yymsgbuf) { + YYSTACK_FREE(yymsg); + } + yymsg = (char *) YYSTACK_ALLOC(yyalloc); + if (yymsg) { + yymsg_alloc = yyalloc; + } else { + yymsg = yymsgbuf; + yymsg_alloc = sizeof yymsgbuf; + } + } + + if (0 < yysize && yysize <= yymsg_alloc) { + (void) yysyntax_error(yymsg, yystate, yychar); + yyerror(yymsg); + } else { + yyerror(YY_("syntax error")); + if (yysize != 0) { + goto yyexhaustedlab; + } + } + } #endif - } + } - if (yyerrstatus == 3) - { - /* If just tried and failed to reuse look-ahead token after an - error, discard it. */ + if (yyerrstatus == 3) { + /* If just tried and failed to reuse look-ahead token after an + * error, discard it. */ - if (yychar <= YYEOF) - { - /* Return failure if at end of input. */ - if (yychar == YYEOF) - YYABORT; - } - else - { - yydestruct ("Error: discarding", - yytoken, &yylval); - yychar = YYEMPTY; + if (yychar <= YYEOF) { + /* Return failure if at end of input. */ + if (yychar == YYEOF) { + YYABORT; + } + } else { + yydestruct("Error: discarding", + yytoken, &yylval); + yychar = YYEMPTY; + } } - } - /* Else will try to reuse look-ahead token after shifting the error - token. */ - goto yyerrlab1; + /* Else will try to reuse look-ahead token after shifting the error + * token. */ + goto yyerrlab1; /*---------------------------------------------------. -| yyerrorlab -- error raised explicitly by YYERROR. | -`---------------------------------------------------*/ + | yyerrorlab -- error raised explicitly by YYERROR. | + | `---------------------------------------------------*/ yyerrorlab: - /* Pacify compilers like GCC when the user code never invokes - YYERROR and the label yyerrorlab therefore never appears in user - code. */ - if (/*CONSTCOND*/ 0) - goto yyerrorlab; + /* Pacify compilers like GCC when the user code never invokes + * YYERROR and the label yyerrorlab therefore never appears in user + * code. */ + if (/*CONSTCOND*/ 0) { + goto yyerrorlab; + } - /* Do not reclaim the symbols of the rule which action triggered - this YYERROR. */ - YYPOPSTACK (yylen); - yylen = 0; - YY_STACK_PRINT (yyss, yyssp); - yystate = *yyssp; - goto yyerrlab1; + /* Do not reclaim the symbols of the rule which action triggered + * this YYERROR. */ + YYPOPSTACK(yylen); + yylen = 0; + YY_STACK_PRINT(yyss, yyssp); + yystate = *yyssp; + goto yyerrlab1; /*-------------------------------------------------------------. -| yyerrlab1 -- common code for both syntax error and YYERROR. | -`-------------------------------------------------------------*/ + | yyerrlab1 -- common code for both syntax error and YYERROR. | + | `-------------------------------------------------------------*/ yyerrlab1: - yyerrstatus = 3; /* Each real token shifted decrements this. */ - - for (;;) - { - yyn = yypact[yystate]; - if (yyn != YYPACT_NINF) - { - yyn += YYTERROR; - if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) - { - yyn = yytable[yyn]; - if (0 < yyn) - break; - } - } + yyerrstatus = 3; /* Each real token shifted decrements this. */ + + for (;;) { + yyn = yypact[yystate]; + if (yyn != YYPACT_NINF) { + yyn += YYTERROR; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) { + yyn = yytable[yyn]; + if (0 < yyn) { + break; + } + } + } - /* Pop the current state because it cannot handle the error token. */ - if (yyssp == yyss) - YYABORT; + /* Pop the current state because it cannot handle the error token. */ + if (yyssp == yyss) { + YYABORT; + } - yydestruct ("Error: popping", - yystos[yystate], yyvsp); - YYPOPSTACK (1); - yystate = *yyssp; - YY_STACK_PRINT (yyss, yyssp); - } + yydestruct("Error: popping", + yystos[yystate], yyvsp); + YYPOPSTACK(1); + yystate = *yyssp; + YY_STACK_PRINT(yyss, yyssp); + } - if (yyn == YYFINAL) - YYACCEPT; + if (yyn == YYFINAL) { + YYACCEPT; + } - *++yyvsp = yylval; + *++yyvsp = yylval; - /* Shift the error token. */ - YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); + /* Shift the error token. */ + YY_SYMBOL_PRINT("Shifting", yystos[yyn], yyvsp, yylsp); - yystate = yyn; - goto yynewstate; + yystate = yyn; + goto yynewstate; /*-------------------------------------. -| yyacceptlab -- YYACCEPT comes here. | -`-------------------------------------*/ + | yyacceptlab -- YYACCEPT comes here. | + | `-------------------------------------*/ yyacceptlab: - yyresult = 0; - goto yyreturn; + yyresult = 0; + goto yyreturn; /*-----------------------------------. -| yyabortlab -- YYABORT comes here. | -`-----------------------------------*/ + | yyabortlab -- YYABORT comes here. | + | `-----------------------------------*/ yyabortlab: - yyresult = 1; - goto yyreturn; + yyresult = 1; + goto yyreturn; #ifndef yyoverflow /*-------------------------------------------------. -| yyexhaustedlab -- memory exhaustion comes here. | -`-------------------------------------------------*/ + | yyexhaustedlab -- memory exhaustion comes here. | + | `-------------------------------------------------*/ yyexhaustedlab: - yyerror (YY_("memory exhausted")); - yyresult = 2; - /* Fall through. */ + yyerror(YY_("memory exhausted")); + yyresult = 2; + /* Fall through. */ #endif yyreturn: - if (yychar != YYEOF && yychar != YYEMPTY) - yydestruct ("Cleanup: discarding lookahead", - yytoken, &yylval); - /* Do not reclaim the symbols of the rule which action triggered - this YYABORT or YYACCEPT. */ - YYPOPSTACK (yylen); - YY_STACK_PRINT (yyss, yyssp); - while (yyssp != yyss) - { - yydestruct ("Cleanup: popping", - yystos[*yyssp], yyvsp); - YYPOPSTACK (1); - } + if (yychar != YYEOF && yychar != YYEMPTY) { + yydestruct("Cleanup: discarding lookahead", + yytoken, &yylval); + } + /* Do not reclaim the symbols of the rule which action triggered + * this YYABORT or YYACCEPT. */ + YYPOPSTACK(yylen); + YY_STACK_PRINT(yyss, yyssp); + while (yyssp != yyss) { + yydestruct("Cleanup: popping", + yystos[*yyssp], yyvsp); + YYPOPSTACK(1); + } #ifndef yyoverflow - if (yyss != yyssa) - YYSTACK_FREE (yyss); + if (yyss != yyssa) { + YYSTACK_FREE(yyss); + } #endif #if YYERROR_VERBOSE - if (yymsg != yymsgbuf) - YYSTACK_FREE (yymsg); + if (yymsg != yymsgbuf) { + YYSTACK_FREE(yymsg); + } #endif - /* Make sure YYID is used. */ - return YYID (yyresult); + /* Make sure YYID is used. */ + return YYID(yyresult); } @@ -1967,40 +1969,40 @@ yyreturn: int OSUnserializeerror(parser_state_t * state, const char *s) /* Called by yyparse on errors */ { - if (state->errorString) { - char tempString[128]; - snprintf(tempString, 128, "OSUnserializeXML: %s near line %d\n", s, state->lineNumber); - *(state->errorString) = OSString::withCString(tempString); - } - - return 0; + if (state->errorString) { + char tempString[128]; + snprintf(tempString, 128, "OSUnserializeXML: %s near line %d\n", s, state->lineNumber); + *(state->errorString) = OSString::withCString(tempString); + } + + return 0; } -#define TAG_MAX_LENGTH 32 -#define TAG_MAX_ATTRIBUTES 32 -#define TAG_BAD 0 -#define TAG_START 1 -#define TAG_END 2 -#define TAG_EMPTY 3 -#define TAG_IGNORE 4 - -#define currentChar() (state->parseBuffer[state->parseBufferIndex]) -#define nextChar() (state->parseBuffer[++state->parseBufferIndex]) -#define prevChar() (state->parseBuffer[state->parseBufferIndex - 1]) - -#define isSpace(c) ((c) == ' ' || (c) == '\t') -#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) -#define isDigit(c) ((c) >= '0' && (c) <= '9') -#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') -#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) -#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) +#define TAG_MAX_LENGTH 32 +#define TAG_MAX_ATTRIBUTES 32 +#define TAG_BAD 0 +#define TAG_START 1 +#define TAG_END 2 +#define TAG_EMPTY 3 +#define TAG_IGNORE 4 + +#define currentChar() (state->parseBuffer[state->parseBufferIndex]) +#define nextChar() (state->parseBuffer[++state->parseBufferIndex]) +#define prevChar() (state->parseBuffer[state->parseBufferIndex - 1]) + +#define isSpace(c) ((c) == ' ' || (c) == '\t') +#define isAlpha(c) (((c) >= 'A' && (c) <= 'Z') || ((c) >= 'a' && (c) <= 'z')) +#define isDigit(c) ((c) >= '0' && (c) <= '9') +#define isAlphaDigit(c) ((c) >= 'a' && (c) <= 'f') +#define isHexDigit(c) (isDigit(c) || isAlphaDigit(c)) +#define isAlphaNumeric(c) (isAlpha(c) || isDigit(c) || ((c) == '-')) static int getTag(parser_state_t *state, - char tag[TAG_MAX_LENGTH], - int *attributeCount, - char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH], - char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH] ) + char tag[TAG_MAX_LENGTH], + int *attributeCount, + char attributes[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH], + char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH] ) { int length = 0; int c = currentChar(); @@ -2008,116 +2010,151 @@ getTag(parser_state_t *state, *attributeCount = 0; - if (c != '<') return TAG_BAD; - c = nextChar(); // skip '<' + if (c != '<') { + return TAG_BAD; + } + c = nextChar(); // skip '<' // // - if (c == '!') { - c = nextChar(); - bool isComment = (c == '-') && ((c = nextChar()) != 0) && (c == '-'); - if (!isComment && !isAlpha(c)) return TAG_BAD; // lineNumber++; - if (isComment) { - if (c != '-') continue; - c = nextChar(); - if (c != '-') continue; - c = nextChar(); + if (c == '!') { + c = nextChar(); + bool isComment = (c == '-') && ((c = nextChar()) != 0) && (c == '-'); + if (!isComment && !isAlpha(c)) { + return TAG_BAD; // ') { - (void)nextChar(); - return TAG_IGNORE; + while (c && (c = nextChar()) != 0) { + if (c == '\n') { + state->lineNumber++; + } + if (isComment) { + if (c != '-') { + continue; + } + c = nextChar(); + if (c != '-') { + continue; + } + c = nextChar(); + } + if (c == '>') { + (void)nextChar(); + return TAG_IGNORE; + } + if (isComment) { + break; + } } - if (isComment) break; - } - return TAG_BAD; - } - - else - + return TAG_BAD; + } else // - if (c == '?') { - while ((c = nextChar()) != 0) { - if (c == '\n') state->lineNumber++; - if (c != '?') continue; - c = nextChar(); - if (!c) return TAG_IGNORE; - if (c == '>') { - (void)nextChar(); - return TAG_IGNORE; + if (c == '?') { + while ((c = nextChar()) != 0) { + if (c == '\n') { + state->lineNumber++; + } + if (c != '?') { + continue; + } + c = nextChar(); + if (!c) { + return TAG_IGNORE; + } + if (c == '>') { + (void)nextChar(); + return TAG_IGNORE; + } } - } - return TAG_BAD; - } - - else - - // + return TAG_BAD; + } else + // if (c == '/') { - c = nextChar(); // skip '/' + c = nextChar(); // skip '/' tagType = TAG_END; } - if (!isAlpha(c)) return TAG_BAD; + if (!isAlpha(c)) { + return TAG_BAD; + } /* find end of tag while copying it */ while (isAlphaNumeric(c)) { tag[length++] = c; c = nextChar(); - if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + if (length >= (TAG_MAX_LENGTH - 1)) { + return TAG_BAD; + } } tag[length] = 0; // printf("tag %s, type %d\n", tag, tagType); - + // look for attributes of the form attribute = "value" ... while ((c != '>') && (c != '/')) { - while (isSpace(c)) c = nextChar(); + while (isSpace(c)) { + c = nextChar(); + } length = 0; while (isAlphaNumeric(c)) { attributes[*attributeCount][length++] = c; - if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + if (length >= (TAG_MAX_LENGTH - 1)) { + return TAG_BAD; + } c = nextChar(); } attributes[*attributeCount][length] = 0; - while (isSpace(c)) c = nextChar(); - - if (c != '=') return TAG_BAD; + while (isSpace(c)) { + c = nextChar(); + } + + if (c != '=') { + return TAG_BAD; + } c = nextChar(); - - while (isSpace(c)) c = nextChar(); - if (c != '"') return TAG_BAD; + while (isSpace(c)) { + c = nextChar(); + } + + if (c != '"') { + return TAG_BAD; + } c = nextChar(); length = 0; while (c != '"') { values[*attributeCount][length++] = c; - if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; + if (length >= (TAG_MAX_LENGTH - 1)) { + return TAG_BAD; + } c = nextChar(); - if (!c) return TAG_BAD; + if (!c) { + return TAG_BAD; + } } values[*attributeCount][length] = 0; c = nextChar(); // skip closing quote -// printf(" attribute '%s' = '%s', nextchar = '%c'\n", +// printf(" attribute '%s' = '%s', nextchar = '%c'\n", // attributes[*attributeCount], values[*attributeCount], c); (*attributeCount)++; - if (*attributeCount >= TAG_MAX_ATTRIBUTES) return TAG_BAD; + if (*attributeCount >= TAG_MAX_ATTRIBUTES) { + return TAG_BAD; + } } if (c == '/') { - c = nextChar(); // skip '/' + c = nextChar(); // skip '/' tagType = TAG_EMPTY; } - if (c != '>') return TAG_BAD; - c = nextChar(); // skip '>' + if (c != '>') { + return TAG_BAD; + } + c = nextChar(); // skip '>' return tagType; } @@ -2133,14 +2170,18 @@ getString(parser_state_t *state) /* find end of string */ while (c != 0) { - if (c == '\n') state->lineNumber++; + if (c == '\n') { + state->lineNumber++; + } if (c == '<') { break; } c = nextChar(); } - if (c != '<') return 0; + if (c != '<') { + return 0; + } length = state->parseBufferIndex - start; @@ -2160,30 +2201,48 @@ getString(parser_state_t *state) if (c != '&') { tempString[j++] = c; } else { - if ((i+3) > length) goto error; + if ((i + 3) > length) { + goto error; + } c = state->parseBuffer[start + i++]; if (c == 'l') { - if (state->parseBuffer[start + i++] != 't') goto error; - if (state->parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 't') { + goto error; + } + if (state->parseBuffer[start + i++] != ';') { + goto error; + } tempString[j++] = '<'; continue; - } + } if (c == 'g') { - if (state->parseBuffer[start + i++] != 't') goto error; - if (state->parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 't') { + goto error; + } + if (state->parseBuffer[start + i++] != ';') { + goto error; + } tempString[j++] = '>'; continue; - } - if ((i+3) > length) goto error; + } + if ((i + 3) > length) { + goto error; + } if (c == 'a') { - if (state->parseBuffer[start + i++] != 'm') goto error; - if (state->parseBuffer[start + i++] != 'p') goto error; - if (state->parseBuffer[start + i++] != ';') goto error; + if (state->parseBuffer[start + i++] != 'm') { + goto error; + } + if (state->parseBuffer[start + i++] != 'p') { + goto error; + } + if (state->parseBuffer[start + i++] != ';') { + goto error; + } tempString[j++] = '&'; continue; } goto error; - } + } } tempString[j] = 0; @@ -2192,7 +2251,9 @@ getString(parser_state_t *state) return tempString; error: - if (tempString) free(tempString); + if (tempString) { + free(tempString); + } return 0; } @@ -2216,7 +2277,7 @@ getNumber(parser_state_t *state) negate = true; c = nextChar(); } - while(isDigit(c)) { + while (isDigit(c)) { n = (n * base + c - '0'); c = nextChar(); } @@ -2224,7 +2285,7 @@ getNumber(parser_state_t *state) n = (unsigned long long)((long long)n * (long long)-1); } } else { - while(isHexDigit(c)) { + while (isHexDigit(c)) { if (isDigit(c)) { n = (n * base + c - '0'); } else { @@ -2240,22 +2301,22 @@ getNumber(parser_state_t *state) // taken from CFXMLParsing/CFPropertyList.c static const signed char __CFPLDataDecodeTable[128] = { - /* 000 */ -1, -1, -1, -1, -1, -1, -1, -1, - /* 010 */ -1, -1, -1, -1, -1, -1, -1, -1, - /* 020 */ -1, -1, -1, -1, -1, -1, -1, -1, - /* 030 */ -1, -1, -1, -1, -1, -1, -1, -1, - /* ' ' */ -1, -1, -1, -1, -1, -1, -1, -1, - /* '(' */ -1, -1, -1, 62, -1, -1, -1, 63, - /* '0' */ 52, 53, 54, 55, 56, 57, 58, 59, - /* '8' */ 60, 61, -1, -1, -1, 0, -1, -1, - /* '@' */ -1, 0, 1, 2, 3, 4, 5, 6, - /* 'H' */ 7, 8, 9, 10, 11, 12, 13, 14, - /* 'P' */ 15, 16, 17, 18, 19, 20, 21, 22, - /* 'X' */ 23, 24, 25, -1, -1, -1, -1, -1, - /* '`' */ -1, 26, 27, 28, 29, 30, 31, 32, - /* 'h' */ 33, 34, 35, 36, 37, 38, 39, 40, - /* 'p' */ 41, 42, 43, 44, 45, 46, 47, 48, - /* 'x' */ 49, 50, 51, -1, -1, -1, -1, -1 + /* 000 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 010 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 020 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* 030 */ -1, -1, -1, -1, -1, -1, -1, -1, + /* ' ' */ -1, -1, -1, -1, -1, -1, -1, -1, + /* '(' */ -1, -1, -1, 62, -1, -1, -1, 63, + /* '0' */ 52, 53, 54, 55, 56, 57, 58, 59, + /* '8' */ 60, 61, -1, -1, -1, 0, -1, -1, + /* '@' */ -1, 0, 1, 2, 3, 4, 5, 6, + /* 'H' */ 7, 8, 9, 10, 11, 12, 13, 14, + /* 'P' */ 15, 16, 17, 18, 19, 20, 21, 22, + /* 'X' */ 23, 24, 25, -1, -1, -1, -1, -1, + /* '`' */ -1, 26, 27, 28, 29, 30, 31, 32, + /* 'h' */ 33, 34, 35, 36, 37, 38, 39, 40, + /* 'p' */ 41, 42, 43, 44, 45, 46, 47, 48, + /* 'x' */ 49, 50, 51, -1, -1, -1, -1, -1 }; #define DATA_ALLOC_SIZE 4096 @@ -2263,103 +2324,114 @@ static const signed char __CFPLDataDecodeTable[128] = { static void * getCFEncodedData(parser_state_t *state, unsigned int *size) { - int numeq = 0, acc = 0, cntr = 0; - int tmpbufpos = 0, tmpbuflen = 0; - unsigned char *tmpbuf = (unsigned char *)malloc(DATA_ALLOC_SIZE); - - int c = currentChar(); - *size = 0; - - while (c != '<') { - c &= 0x7f; - if (c == 0) { + int numeq = 0, acc = 0, cntr = 0; + int tmpbufpos = 0, tmpbuflen = 0; + unsigned char *tmpbuf = (unsigned char *)malloc(DATA_ALLOC_SIZE); + + int c = currentChar(); + *size = 0; + + while (c != '<') { + c &= 0x7f; + if (c == 0) { + free(tmpbuf); + return 0; + } + if (c == '=') { + numeq++; + } else { + numeq = 0; + } + if (c == '\n') { + state->lineNumber++; + } + if (__CFPLDataDecodeTable[c] < 0) { + c = nextChar(); + continue; + } + cntr++; + acc <<= 6; + acc += __CFPLDataDecodeTable[c]; + if (0 == (cntr & 0x3)) { + if (tmpbuflen <= tmpbufpos + 2) { + tmpbuflen += DATA_ALLOC_SIZE; + tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); + } + tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; + if (numeq < 2) { + tmpbuf[tmpbufpos++] = (acc >> 8) & 0xff; + } + if (numeq < 1) { + tmpbuf[tmpbufpos++] = acc & 0xff; + } + } + c = nextChar(); + } + *size = tmpbufpos; + if (*size == 0) { free(tmpbuf); return 0; } - if (c == '=') numeq++; else numeq = 0; - if (c == '\n') state->lineNumber++; - if (__CFPLDataDecodeTable[c] < 0) { - c = nextChar(); - continue; - } - cntr++; - acc <<= 6; - acc += __CFPLDataDecodeTable[c]; - if (0 == (cntr & 0x3)) { - if (tmpbuflen <= tmpbufpos + 2) { - tmpbuflen += DATA_ALLOC_SIZE; - tmpbuf = (unsigned char *)realloc(tmpbuf, tmpbuflen); - } - tmpbuf[tmpbufpos++] = (acc >> 16) & 0xff; - if (numeq < 2) - tmpbuf[tmpbufpos++] = (acc >> 8) & 0xff; - if (numeq < 1) - tmpbuf[tmpbufpos++] = acc & 0xff; - } - c = nextChar(); - } - *size = tmpbufpos; - if (*size == 0) { - free(tmpbuf); - return 0; - } - return tmpbuf; + return tmpbuf; } static void * getHexData(parser_state_t *state, unsigned int *size) { - int c; - unsigned char *d, *start, *lastStart; + int c; + unsigned char *d, *start, *lastStart; - start = lastStart = d = (unsigned char *)malloc(DATA_ALLOC_SIZE); - c = currentChar(); + start = lastStart = d = (unsigned char *)malloc(DATA_ALLOC_SIZE); + c = currentChar(); - while (c != '<') { + while (c != '<') { + if (isSpace(c)) { + while ((c = nextChar()) != 0 && isSpace(c)) { + } + } + ; + if (c == '\n') { + state->lineNumber++; + c = nextChar(); + continue; + } - if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; - if (c == '\n') { - state->lineNumber++; - c = nextChar(); - continue; - } + // get high nibble + if (isDigit(c)) { + *d = (c - '0') << 4; + } else if (isAlphaDigit(c)) { + *d = (0xa + (c - 'a')) << 4; + } else { + goto error; + } - // get high nibble - if (isDigit(c)) { - *d = (c - '0') << 4; - } else if (isAlphaDigit(c)) { - *d = (0xa + (c - 'a')) << 4; - } else { - goto error; - } + // get low nibble + c = nextChar(); + if (isDigit(c)) { + *d |= c - '0'; + } else if (isAlphaDigit(c)) { + *d |= 0xa + (c - 'a'); + } else { + goto error; + } - // get low nibble - c = nextChar(); - if (isDigit(c)) { - *d |= c - '0'; - } else if (isAlphaDigit(c)) { - *d |= 0xa + (c - 'a'); - } else { - goto error; - } - - d++; - if ((d - lastStart) >= DATA_ALLOC_SIZE) { - int oldsize = d - start; - start = (unsigned char *)realloc(start, oldsize + DATA_ALLOC_SIZE); - d = lastStart = start + oldsize; + d++; + if ((d - lastStart) >= DATA_ALLOC_SIZE) { + int oldsize = d - start; + start = (unsigned char *)realloc(start, oldsize + DATA_ALLOC_SIZE); + d = lastStart = start + oldsize; + } + c = nextChar(); } - c = nextChar(); - } - *size = d - start; - return start; + *size = d - start; + return start; - error: +error: - *size = 0; - free(start); - return 0; + *size = 0; + free(start); + return 0; } static int @@ -2373,11 +2445,15 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) char values[TAG_MAX_ATTRIBUTES][TAG_MAX_LENGTH]; object_t *object; - top: +top: c = currentChar(); /* skip white space */ - if (isSpace(c)) while ((c = nextChar()) != 0 && isSpace(c)) {}; + if (isSpace(c)) { + while ((c = nextChar()) != 0 && isSpace(c)) { + } + } + ; /* keep track of line number, don't return \n's */ if (c == '\n') { @@ -2387,33 +2463,41 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) } // end of the buffer? - if (!c) return 0; + if (!c) { + return 0; + } tagType = getTag(STATE, tag, &attributeCount, attributes, values); - if (tagType == TAG_BAD) return SYNTAX_ERROR; - if (tagType == TAG_IGNORE) goto top; + if (tagType == TAG_BAD) { + return SYNTAX_ERROR; + } + if (tagType == TAG_IGNORE) { + goto top; + } // handle allocation and check for "ID" and "IDREF" tags up front *lvalp = object = newObject(STATE); object->idref = -1; - for (i=0; i < attributeCount; i++) { - if (attributes[i][0] == 'I' && attributes[i][1] == 'D') { - // check for idref's, note: we ignore the tag, for - // this to work correctly, all idrefs must be unique - // across the whole serialization - if (attributes[i][2] == 'R' && attributes[i][3] == 'E' && - attributes[i][4] == 'F' && !attributes[i][5]) { - if (tagType != TAG_EMPTY) return SYNTAX_ERROR; - object->idref = strtol(values[i], NULL, 0); - return IDREF; - } - // check for id's - if (!attributes[i][2]) { - object->idref = strtol(values[i], NULL, 0); - } else { - return SYNTAX_ERROR; + for (i = 0; i < attributeCount; i++) { + if (attributes[i][0] == 'I' && attributes[i][1] == 'D') { + // check for idref's, note: we ignore the tag, for + // this to work correctly, all idrefs must be unique + // across the whole serialization + if (attributes[i][2] == 'R' && attributes[i][3] == 'E' && + attributes[i][4] == 'F' && !attributes[i][5]) { + if (tagType != TAG_EMPTY) { + return SYNTAX_ERROR; + } + object->idref = strtol(values[i], NULL, 0); + return IDREF; + } + // check for id's + if (!attributes[i][2]) { + object->idref = strtol(values[i], NULL, 0); + } else { + return SYNTAX_ERROR; + } } - } } switch (*tag) { @@ -2443,7 +2527,7 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) } bool isHexFormat = false; - for (i=0; i < attributeCount; i++) { + for (i = 0; i < attributeCount; i++) { if (!strcmp(attributes[i], "format") && !strcmp(values[i], "hex")) { isHexFormat = true; break; @@ -2451,9 +2535,9 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) } // CF encoded is the default form if (isHexFormat) { - object->data = getHexData(STATE, &size); + object->data = getHexData(STATE, &size); } else { - object->data = getCFEncodedData(STATE, &size); + object->data = getCFEncodedData(STATE, &size); } object->size = size; if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) || strcmp(tag, "data")) { @@ -2472,8 +2556,8 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) break; case 'i': if (!strcmp(tag, "integer")) { - object->size = 64; // default - for (i=0; i < attributeCount; i++) { + object->size = 64; // default + for (i = 0; i < attributeCount; i++) { if (!strcmp(attributes[i], "size")) { object->size = strtoul(values[i], NULL, 0); } @@ -2491,13 +2575,15 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) break; case 'k': if (!strcmp(tag, "key")) { - if (tagType == TAG_EMPTY) return SYNTAX_ERROR; + if (tagType == TAG_EMPTY) { + return SYNTAX_ERROR; + } object->string = getString(STATE); if (!object->string) { return SYNTAX_ERROR; } if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) - || strcmp(tag, "key")) { + || strcmp(tag, "key")) { return SYNTAX_ERROR; } return KEY; @@ -2512,8 +2598,8 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) case 's': if (!strcmp(tag, "string")) { if (tagType == TAG_EMPTY) { - object->string = (char *)malloc(1); - object->string[0] = 0; + object->string = (char *)malloc(1); + object->string[0] = 0; return STRING; } object->string = getString(STATE); @@ -2521,7 +2607,7 @@ yylex(YYSTYPE *lvalp, parser_state_t *state) return SYNTAX_ERROR; } if ((getTag(STATE, tag, &attributeCount, attributes, values) != TAG_END) - || strcmp(tag, "string")) { + || strcmp(tag, "string")) { return SYNTAX_ERROR; } return STRING; @@ -2576,7 +2662,7 @@ newObject(parser_state_t *state) o->free = state->objects; state->objects = o; } - + return o; } @@ -2584,7 +2670,7 @@ void freeObject(parser_state_t * state, object_t *o) { o->next = state->freeObjects; - state->freeObjects = o; + state->freeObjects = o; } void @@ -2622,7 +2708,7 @@ cleanupObjects(parser_state_t *state) // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# // !@$&)(^Q$&*^!$(*!@$_(^%_(*Q#$(_*&!$_(*&!$_(*&!#$(*!@&^!@#%!_!# -static void +static void rememberObject(parser_state_t *state, int tag, OSObject *o) { char key[16]; @@ -2644,7 +2730,9 @@ retrieveObject(parser_state_t *state, int tag) // printf("retrieve key '%s'\n", key); ref = state->tags->getObject(key); - if (!ref) return 0; + if (!ref) { + return 0; + } o = newObject(state); o->object = ref; @@ -2675,7 +2763,9 @@ buildDictionary(parser_state_t *state, object_t * header) } dict = OSDictionary::withCapacity(count); - if (header->idref >= 0) rememberObject(state, header->idref, dict); + if (header->idref >= 0) { + rememberObject(state, header->idref, dict); + } o = header->elements; while (o) { @@ -2715,7 +2805,9 @@ buildArray(parser_state_t *state, object_t * header) } array = OSArray::withCapacity(count); - if (header->idref >= 0) rememberObject(state, header->idref, array); + if (header->idref >= 0) { + rememberObject(state, header->idref, array); + } o = header->elements; while (o) { @@ -2742,7 +2834,9 @@ buildSet(parser_state_t *state, object_t *header) OSSet *set = OSSet::withArray(array, array->getCapacity()); // write over the reference created in buildArray - if (header->idref >= 0) rememberObject(state, header->idref, set); + if (header->idref >= 0) { + rememberObject(state, header->idref, set); + } array->release(); o->object = set; @@ -2755,7 +2849,9 @@ buildString(parser_state_t *state, object_t *o) OSString *string; string = OSString::withCString(o->string); - if (o->idref >= 0) rememberObject(state, o->idref, string); + if (o->idref >= 0) { + rememberObject(state, o->idref, string); + } free(o->string); o->string = 0; @@ -2770,7 +2866,9 @@ buildSymbol(parser_state_t *state, object_t *o) OSSymbol *symbol; symbol = const_cast(OSSymbol::withCString(o->string)); - if (o->idref >= 0) rememberObject(state, o->idref, symbol); + if (o->idref >= 0) { + rememberObject(state, o->idref, symbol); + } free(o->string); o->string = 0; @@ -2789,9 +2887,13 @@ buildData(parser_state_t *state, object_t *o) } else { data = OSData::withCapacity(0); } - if (o->idref >= 0) rememberObject(state, o->idref, data); + if (o->idref >= 0) { + rememberObject(state, o->idref, data); + } - if (o->size) free(o->data); + if (o->size) { + free(o->data); + } o->data = 0; o->object = data; return o; @@ -2802,7 +2904,9 @@ buildNumber(parser_state_t *state, object_t *o) { OSNumber *number = OSNumber::withNumber(o->number, o->size); - if (o->idref >= 0) rememberObject(state, o->idref, number); + if (o->idref >= 0) { + rememberObject(state, o->idref, number); + } o->object = number; return o; @@ -2821,12 +2925,18 @@ OSUnserializeXML(const char *buffer, OSString **errorString) { OSObject *object; - if (!buffer) return 0; + if (!buffer) { + return 0; + } parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t)); - if (!state) return 0; + if (!state) { + return 0; + } // just in case - if (errorString) *errorString = NULL; + if (errorString) { + *errorString = NULL; + } state->parseBuffer = buffer; state->parseBufferIndex = 0; @@ -2855,13 +2965,21 @@ OSUnserializeXML(const char *buffer, OSString **errorString) OSObject* OSUnserializeXML(const char *buffer, size_t bufferSize, OSString **errorString) { - if (!buffer) return (0); - if (bufferSize < sizeof(kOSSerializeBinarySignature)) return (0); + if (!buffer) { + return 0; + } + if (bufferSize < sizeof(kOSSerializeBinarySignature)) { + return 0; + } - if (!strcmp(kOSSerializeBinarySignature, buffer)) return OSUnserializeBinary(buffer, bufferSize, errorString); + if (!strcmp(kOSSerializeBinarySignature, buffer)) { + return OSUnserializeBinary(buffer, bufferSize, errorString); + } // XML must be null terminated - if (buffer[bufferSize - 1]) return 0; + if (buffer[bufferSize - 1]) { + return 0; + } return OSUnserializeXML(buffer, errorString); } @@ -2880,4 +2998,3 @@ OSUnserializeXML(const char *buffer, size_t bufferSize, OSString **errorString) // // // - diff --git a/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp b/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp index 1efad504b..c7d083287 100644 --- a/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp +++ b/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -40,30 +40,30 @@ __END_DECLS #include const char *testBuffer = "" -"{ string = \"this is a 'string' with spaces\";" -" string2 = 'this is also a \"string\" with spaces';" -" offset = 16384:32;" -" true = .true.;" -" false = .false.;" -" data = <0123 4567 89abcdef>;" -" array = (1:8, 2:16, 3:32, 4:64 );" -" set = [ one, two, three, four ];" -" emptydict = { }@1;" -" emptyarray = ( )@2;" -" emptyset = [ ]@3;" -" emptydata = < >@4;" -" emptydict2 = @1;" -" emptyarray2 = @2;" -" emptyset2 = @3;" -" emptydata2 = @4;" -" dict2 = { string = asdfasdf; };" -" dict3 = { string = asdfasdf; };" -"}@0"; + "{ string = \"this is a 'string' with spaces\";" + " string2 = 'this is also a \"string\" with spaces';" + " offset = 16384:32;" + " true = .true.;" + " false = .false.;" + " data = <0123 4567 89abcdef>;" + " array = (1:8, 2:16, 3:32, 4:64 );" + " set = [ one, two, three, four ];" + " emptydict = { }@1;" + " emptyarray = ( )@2;" + " emptyset = [ ]@3;" + " emptydata = < >@4;" + " emptydict2 = @1;" + " emptyarray2 = @2;" + " emptyset2 = @3;" + " emptydata2 = @4;" + " dict2 = { string = asdfasdf; };" + " dict3 = { string = asdfasdf; };" + "}@0"; kern_return_t test1_start(struct kmod_info *ki, void *data) { - IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); + IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); // test unserialize OSString *errmsg; @@ -77,7 +77,7 @@ test1_start(struct kmod_info *ki, void *data) OSSerialize *s = OSSerialize::withCapacity(5); if (!d->serialize(s)) { IOLog("serialization failed\n"); - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } IOLog("serialized object's length = %d, capacity = %d\n", s->getLength(), s->getCapacity()); @@ -87,21 +87,25 @@ test1_start(struct kmod_info *ki, void *data) OSObject *d2 = OSUnserializeXML(s->text(), &errmsg); if (!d2) { IOLog("%s\n", errmsg->getCStringNoCopy()); - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } IOLog("\nserialized objects compared %ssuccessfully objectwise\n\n", - d->isEqualTo(d2) ? "":"un"); + d->isEqualTo(d2) ? "":"un"); - if (d2) d2->release(); + if (d2) { + d2->release(); + } s->release(); - if (d) d->release(); + if (d) { + d->release(); + } - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } kern_return_t test1_stop(struct kmod_info *ki, void *data) { - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } diff --git a/libkern/c++/Tests/TestSerialization/test2/test2_main.cpp b/libkern/c++/Tests/TestSerialization/test2/test2_main.cpp index 58ac14e1b..7b8e2d046 100644 --- a/libkern/c++/Tests/TestSerialization/test2/test2_main.cpp +++ b/libkern/c++/Tests/TestSerialization/test2/test2_main.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -39,104 +39,105 @@ __END_DECLS #include #include -const char *testBuffer = -" \n" -" \n" -" \n" -" \n" -" \n" -" \n" -" \n" - -" \n" - -" key true \n" -" key false \n" - -" key d0 \n" -" key d1 AQ== \n" -" key d2 ASM= \n" -" key d3 ASNF \n" -" key d4 ASNFZw== \n" - -" key i0 \n" -" key i1 123456789 \n" -" key i2 -123456789 \n" -" key i3 0x12345678 \n" - -" key s0 \n" -" key s1 string 1 \n" -" key s2 string 2 \n" -" key mr � mac roman copyright � \n" -" key uft8 \xc2\xa9 utf-8 copyright \xc2\xa9 \n" -" key <&> <&> \n" - -" key D0 \n" -" \n" - -" key a0 \n" -" \n" - -" key a1 \n" -" array string 1 \n" -" array string 2 \n" -" \n" - -" key r1 \n" -" key r2 \n" -" key r3 \n" -" key r4 \n" -" key r5 \n" - -" key e1 \n" -" key e2 \n" -" key e4 \n" -" key e5 \n" -" key e6 \n" - -" key S0 \n" -" \n" -" key S1 \n" -" set string 1 \n" -" set string 2 \n" -" \n" -" key r6 \n" -" key e3 \n" - -" \n" -" \n" +const char *testBuffer = + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + + " \n" + + " key true \n" + " key false \n" + + " key d0 \n" + " key d1 AQ== \n" + " key d2 ASM= \n" + " key d3 ASNF \n" + " key d4 ASNFZw== \n" + + " key i0 \n" + " key i1 123456789 \n" + " key i2 -123456789 \n" + " key i3 0x12345678 \n" + + " key s0 \n" + " key s1 string 1 \n" + " key s2 string 2 \n" + " key mr � mac roman copyright � \n" + " key uft8 \xc2\xa9 utf-8 copyright \xc2\xa9 \n" + " key <&> <&> \n" + + " key D0 \n" + " \n" + + " key a0 \n" + " \n" + + " key a1 \n" + " array string 1 \n" + " array string 2 \n" + " \n" + + " key r1 \n" + " key r2 \n" + " key r3 \n" + " key r4 \n" + " key r5 \n" + + " key e1 \n" + " key e2 \n" + " key e4 \n" + " key e5 \n" + " key e6 \n" + + " key S0 \n" + " \n" + " key S1 \n" + " set string 1 \n" + " set string 2 \n" + " \n" + " key r6 \n" + " key e3 \n" + + " \n" + " \n" ; /* - this causes the parser to return an empty string? it doesn't look like yyerror gets called - char *testBuffer = "" - -*/ + * this causes the parser to return an empty string? it doesn't look like yyerror gets called + * char *testBuffer = "" + * + */ kern_return_t test2_start(struct kmod_info *ki, void *data) { - IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); + IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); // test unserialize OSString *errmsg = 0; OSObject *d = OSUnserializeXML(testBuffer, &errmsg); if (!d) { - if (errmsg) - IOLog("%s\n", errmsg->getCStringNoCopy()); - else - IOLog("bogus error message\n"); - + if (errmsg) { + IOLog("%s\n", errmsg->getCStringNoCopy()); + } else { + IOLog("bogus error message\n"); + } + return KMOD_RETURN_SUCCESS; } - + // test serialize OSSerialize *s = OSSerialize::withCapacity(5); if (!d->serialize(s)) { IOLog("serialization failed\n"); - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } IOLog("serialized object's length = %d, capacity = %d\n", s->getLength(), s->getCapacity()); @@ -146,34 +147,38 @@ test2_start(struct kmod_info *ki, void *data) OSObject *d2 = OSUnserializeXML(s->text(), &errmsg); if (!d2) { IOLog("%s\n", errmsg->getCStringNoCopy()); - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } OSSerialize *s2 = OSSerialize::withCapacity(5); if (!d2->serialize(s2)) { IOLog("serialization #2 failed\n"); - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } - IOLog("serialized object's length = %d, capacity = %d\n", - s2->getLength(), s2->getCapacity()); + IOLog("serialized object's length = %d, capacity = %d\n", + s2->getLength(), s2->getCapacity()); IOLog("object unformatted = %s\n", s2->text()); IOLog("\nserialized objects compared %ssuccessfully textually\n\n", - strcmp(s->text(), s2->text()) ? "un":""); + strcmp(s->text(), s2->text()) ? "un":""); IOLog("\nserialized objects compared %ssuccessfully objectwise\n\n", - d->isEqualTo(d2) ? "":"un"); + d->isEqualTo(d2) ? "":"un"); s2->release(); - if (d2) d2->release(); + if (d2) { + d2->release(); + } s->release(); - if (d) d->release(); + if (d) { + d->release(); + } - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } kern_return_t test2_stop(struct kmod_info *ki, void *data) { - return KMOD_RETURN_SUCCESS; + return KMOD_RETURN_SUCCESS; } diff --git a/libkern/crypto/corecrypto_aes.c b/libkern/crypto/corecrypto_aes.c index 3fa1ad333..9aa590e14 100644 --- a/libkern/crypto/corecrypto_aes.c +++ b/libkern/crypto/corecrypto_aes.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,104 +32,118 @@ #include #include -aes_rval aes_encrypt_key(const unsigned char *key, int key_len, aes_encrypt_ctx cx[1]) +aes_rval +aes_encrypt_key(const unsigned char *key, int key_len, aes_encrypt_ctx cx[1]) { const struct ccmode_cbc *cbc = g_crypto_funcs->ccaes_cbc_encrypt; - /* Make sure the context size for the mode fits in the one we have */ - if(cbc->size>sizeof(aes_encrypt_ctx)) - panic("%s: inconsistent size for AES encrypt context", __FUNCTION__); + /* Make sure the context size for the mode fits in the one we have */ + if (cbc->size > sizeof(aes_encrypt_ctx)) { + panic("%s: inconsistent size for AES encrypt context", __FUNCTION__); + } cccbc_init(cbc, cx[0].ctx, key_len, key); return aes_good; } -aes_rval aes_encrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk, - unsigned char *out_blk, aes_encrypt_ctx cx[1]) +aes_rval +aes_encrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk, + unsigned char *out_blk, aes_encrypt_ctx cx[1]) { const struct ccmode_cbc *cbc = g_crypto_funcs->ccaes_cbc_encrypt; cccbc_iv_decl(cbc->block_size, ctx_iv); cccbc_set_iv(cbc, ctx_iv, in_iv); - cccbc_update(cbc, cx[0].ctx, ctx_iv, num_blk, in_blk, out_blk); //Actually cbc encrypt. + cccbc_update(cbc, cx[0].ctx, ctx_iv, num_blk, in_blk, out_blk); //Actually cbc encrypt. return aes_good; } #if defined (__i386__) || defined (__x86_64__) || defined (__arm64__) /* This does one block of ECB, using the CBC implementation - this allow to use the same context for both CBC and ECB */ -aes_rval aes_encrypt(const unsigned char *in_blk, unsigned char *out_blk, aes_encrypt_ctx cx[1]) +aes_rval +aes_encrypt(const unsigned char *in_blk, unsigned char *out_blk, aes_encrypt_ctx cx[1]) { - return aes_encrypt_cbc(in_blk, NULL, 1, out_blk, cx); + return aes_encrypt_cbc(in_blk, NULL, 1, out_blk, cx); } #endif -aes_rval aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1]) +aes_rval +aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1]) { const struct ccmode_cbc *cbc = g_crypto_funcs->ccaes_cbc_decrypt; - /* Make sure the context size for the mode fits in the one we have */ - if(cbc->size>sizeof(aes_decrypt_ctx)) - panic("%s: inconsistent size for AES decrypt context", __FUNCTION__); + /* Make sure the context size for the mode fits in the one we have */ + if (cbc->size > sizeof(aes_decrypt_ctx)) { + panic("%s: inconsistent size for AES decrypt context", __FUNCTION__); + } cccbc_init(cbc, cx[0].ctx, key_len, key); return aes_good; } -aes_rval aes_decrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk, - unsigned char *out_blk, aes_decrypt_ctx cx[1]) +aes_rval +aes_decrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk, + unsigned char *out_blk, aes_decrypt_ctx cx[1]) { const struct ccmode_cbc *cbc = g_crypto_funcs->ccaes_cbc_decrypt; cccbc_iv_decl(cbc->block_size, ctx_iv); cccbc_set_iv(cbc, ctx_iv, in_iv); - cccbc_update(cbc, cx[0].ctx, ctx_iv, num_blk, in_blk, out_blk); //Actually cbc decrypt. + cccbc_update(cbc, cx[0].ctx, ctx_iv, num_blk, in_blk, out_blk); //Actually cbc decrypt. return aes_good; } #if defined (__i386__) || defined (__x86_64__) || defined (__arm64__) /* This does one block of ECB, using the CBC implementation - this allow to use the same context for both CBC and ECB */ -aes_rval aes_decrypt(const unsigned char *in_blk, unsigned char *out_blk, aes_decrypt_ctx cx[1]) +aes_rval +aes_decrypt(const unsigned char *in_blk, unsigned char *out_blk, aes_decrypt_ctx cx[1]) { return aes_decrypt_cbc(in_blk, NULL, 1, out_blk, cx); } #endif -aes_rval aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]) +aes_rval +aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]) { return aes_encrypt_key(key, 16, cx); } -aes_rval aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]) +aes_rval +aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]) { return aes_decrypt_key(key, 16, cx); } -aes_rval aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]) +aes_rval +aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]) { return aes_encrypt_key(key, 32, cx); } -aes_rval aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]) +aes_rval +aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]) { return aes_decrypt_key(key, 32, cx); } -aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { - return aes_error; + return aes_error; } return ccgcm_init(gcm, ctx, key_len, key); } -aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { @@ -139,17 +153,19 @@ aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, cons return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv); } -aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { - return aes_error; + return aes_error; } return ccgcm_set_iv(gcm, ctx, len, in_iv); } -aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx) +aes_rval +aes_encrypt_reset_gcm(ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { @@ -159,7 +175,8 @@ aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx) return ccgcm_reset(gcm, ctx); } -aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { @@ -169,33 +186,36 @@ aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv); } -aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { - return aes_error; + return aes_error; } return ccgcm_gmac(gcm, ctx, aad_bytes, aad); } -aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, - unsigned char *out_blk, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, + unsigned char *out_blk, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { - return aes_error; + return aes_error; } - return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt. + return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt. } -aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) +aes_rval +aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) { int rc; const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { - return aes_error; + return aes_error; } rc = ccgcm_finalize(gcm, ctx, tag_bytes, tag); @@ -203,33 +223,36 @@ aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, cc return rc; } -aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { - return aes_error; + return aes_error; } return ccgcm_init(gcm, ctx, key_len, key); } -aes_rval aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx) { - const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; - if (!gcm) { - return aes_error; - } + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; + if (!gcm) { + return aes_error; + } - return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv); + return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv); } -aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) { int rc; const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { - return aes_error; + return aes_error; } rc = ccgcm_reset(gcm, ctx); @@ -237,7 +260,8 @@ aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, cc return rc; } -aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx) +aes_rval +aes_decrypt_reset_gcm(ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { @@ -247,43 +271,47 @@ aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx) return ccgcm_reset(gcm, ctx); } -aes_rval aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) { - const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; - if (!gcm) { - return aes_error; - } + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; + if (!gcm) { + return aes_error; + } - return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv); + return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv); } -aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { - return aes_error; + return aes_error; } return ccgcm_gmac(gcm, ctx, aad_bytes, aad); } -aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, - unsigned char *out_blk, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, + unsigned char *out_blk, ccgcm_ctx *ctx) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { - return aes_error; + return aes_error; } - return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt. + return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt. } -aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) +aes_rval +aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) { int rc; const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { - return aes_error; + return aes_error; } rc = ccgcm_finalize(gcm, ctx, tag_bytes, tag); @@ -291,21 +319,22 @@ aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, cc return rc; } -unsigned aes_encrypt_get_ctx_size_gcm(void) +unsigned +aes_encrypt_get_ctx_size_gcm(void) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { - return 0; + return 0; } - return (cc_ctx_sizeof(ccgcm_ctx, gcm->size)); + return cc_ctx_sizeof(ccgcm_ctx, gcm->size); } -unsigned aes_decrypt_get_ctx_size_gcm(void) +unsigned +aes_decrypt_get_ctx_size_gcm(void) { const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { - return 0; + return 0; } - return (cc_ctx_sizeof(ccgcm_ctx, gcm->size)); + return cc_ctx_sizeof(ccgcm_ctx, gcm->size); } - diff --git a/libkern/crypto/corecrypto_aesxts.c b/libkern/crypto/corecrypto_aesxts.c index ef33084cf..80cd614fd 100644 --- a/libkern/crypto/corecrypto_aesxts.c +++ b/libkern/crypto/corecrypto_aesxts.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #include #include #include @@ -39,67 +39,77 @@ uint32_t xts_start(uint32_t cipher __unused, // ignored - we're doing this for xts-aes only - const uint8_t *IV __unused, // ignored - const uint8_t *key1, int keylen, - const uint8_t *key2, int tweaklen __unused, // both keys are the same size for xts - uint32_t num_rounds __unused, // ignored - uint32_t options __unused, // ignored - symmetric_xts *xts) + const uint8_t *IV __unused, // ignored + const uint8_t *key1, int keylen, + const uint8_t *key2, int tweaklen __unused, // both keys are the same size for xts + uint32_t num_rounds __unused, // ignored + uint32_t options __unused, // ignored + symmetric_xts *xts) { - const struct ccmode_xts *enc, *dec; - - if(!g_crypto_funcs) - panic("%s: corecrypto not registered!\n", __FUNCTION__); - - enc = g_crypto_funcs->ccaes_xts_encrypt; - dec = g_crypto_funcs->ccaes_xts_decrypt; - - if(!enc && !dec) - panic("%s: xts mode not registered? enc=%p, dec=%p\n", __FUNCTION__, enc, dec); - - /* Make sure the context size for the mode fits in the one we have */ - if((enc->size>sizeof(xts->enc)) || (dec->size>sizeof(xts->dec))) - panic("%s: inconsistent size for AES-XTS context", __FUNCTION__); - - enc->init(enc, xts->enc, keylen, key1, key2); - dec->init(dec, xts->dec, keylen, key1, key2); - - return 0; //never fails + const struct ccmode_xts *enc, *dec; + + if (!g_crypto_funcs) { + panic("%s: corecrypto not registered!\n", __FUNCTION__); + } + + enc = g_crypto_funcs->ccaes_xts_encrypt; + dec = g_crypto_funcs->ccaes_xts_decrypt; + + if (!enc && !dec) { + panic("%s: xts mode not registered? enc=%p, dec=%p\n", __FUNCTION__, enc, dec); + } + + /* Make sure the context size for the mode fits in the one we have */ + if ((enc->size > sizeof(xts->enc)) || (dec->size > sizeof(xts->dec))) { + panic("%s: inconsistent size for AES-XTS context", __FUNCTION__); + } + + enc->init(enc, xts->enc, keylen, key1, key2); + dec->init(dec, xts->dec, keylen, key1, key2); + + return 0; //never fails } -int xts_encrypt(const uint8_t *pt, unsigned long ptlen, - uint8_t *ct, - const uint8_t *iv, // this can be considered the sector IV for this use - symmetric_xts *xts) +int +xts_encrypt(const uint8_t *pt, unsigned long ptlen, + uint8_t *ct, + const uint8_t *iv, // this can be considered the sector IV for this use + symmetric_xts *xts) { const struct ccmode_xts *xtsenc = g_crypto_funcs->ccaes_xts_encrypt; ccxts_tweak_decl(xtsenc->tweak_size, tweak); - - if(ptlen%16) panic("xts encrypt not a multiple of block size\n"); + + if (ptlen % 16) { + panic("xts encrypt not a multiple of block size\n"); + } xtsenc->set_tweak(xts->enc, tweak, iv); - xtsenc->xts(xts->enc, tweak, ptlen/16, pt, ct); - + xtsenc->xts(xts->enc, tweak, ptlen / 16, pt, ct); + return 0; //never fails } -int xts_decrypt(const uint8_t *ct, unsigned long ptlen, - uint8_t *pt, - const uint8_t *iv, // this can be considered the sector IV for this use - symmetric_xts *xts) +int +xts_decrypt(const uint8_t *ct, unsigned long ptlen, + uint8_t *pt, + const uint8_t *iv, // this can be considered the sector IV for this use + symmetric_xts *xts) { const struct ccmode_xts *xtsdec = g_crypto_funcs->ccaes_xts_decrypt; ccxts_tweak_decl(xtsdec->tweak_size, tweak); - if(ptlen%16) panic("xts decrypt not a multiple of block size\n"); + if (ptlen % 16) { + panic("xts decrypt not a multiple of block size\n"); + } xtsdec->set_tweak(xts->dec, tweak, iv); - xtsdec->xts(xts->dec, tweak, ptlen/16, ct, pt); + xtsdec->xts(xts->dec, tweak, ptlen / 16, ct, pt); return 0; //never fails } -void xts_done(symmetric_xts *xts __unused) +void +xts_done(symmetric_xts *xts __unused) { cc_clear(sizeof(xts->enc), xts->enc); cc_clear(sizeof(xts->dec), xts->dec); diff --git a/libkern/crypto/corecrypto_chacha20poly1305.c b/libkern/crypto/corecrypto_chacha20poly1305.c index 8957b0708..1c134fae7 100644 --- a/libkern/crypto/corecrypto_chacha20poly1305.c +++ b/libkern/crypto/corecrypto_chacha20poly1305.c @@ -30,57 +30,68 @@ #include #include -static ccchacha20poly1305_fns_t fns(void) +static ccchacha20poly1305_fns_t +fns(void) { - return g_crypto_funcs->ccchacha20poly1305_fns; + return g_crypto_funcs->ccchacha20poly1305_fns; } -static const struct ccchacha20poly1305_info *info(void) +static const struct ccchacha20poly1305_info * +info(void) { - return fns()->info(); + return fns()->info(); } -int chacha20poly1305_init(chacha20poly1305_ctx *ctx, const uint8_t *key) +int +chacha20poly1305_init(chacha20poly1305_ctx *ctx, const uint8_t *key) { - return fns()->init(info(), ctx, key); + return fns()->init(info(), ctx, key); } -int chacha20poly1305_reset(chacha20poly1305_ctx *ctx) +int +chacha20poly1305_reset(chacha20poly1305_ctx *ctx) { - return fns()->reset(info(), ctx); + return fns()->reset(info(), ctx); } -int chacha20poly1305_setnonce(chacha20poly1305_ctx *ctx, const uint8_t *nonce) +int +chacha20poly1305_setnonce(chacha20poly1305_ctx *ctx, const uint8_t *nonce) { - return fns()->setnonce(info(), ctx, nonce); + return fns()->setnonce(info(), ctx, nonce); } -int chacha20poly1305_incnonce(chacha20poly1305_ctx *ctx, uint8_t *nonce) +int +chacha20poly1305_incnonce(chacha20poly1305_ctx *ctx, uint8_t *nonce) { - return fns()->incnonce(info(), ctx, nonce); + return fns()->incnonce(info(), ctx, nonce); } -int chacha20poly1305_aad(chacha20poly1305_ctx *ctx, size_t nbytes, const void *aad) +int +chacha20poly1305_aad(chacha20poly1305_ctx *ctx, size_t nbytes, const void *aad) { - return fns()->aad(info(), ctx, nbytes, aad); + return fns()->aad(info(), ctx, nbytes, aad); } -int chacha20poly1305_encrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ptext, void *ctext) +int +chacha20poly1305_encrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ptext, void *ctext) { - return fns()->encrypt(info(), ctx, nbytes, ptext, ctext); + return fns()->encrypt(info(), ctx, nbytes, ptext, ctext); } -int chacha20poly1305_finalize(chacha20poly1305_ctx *ctx, uint8_t *tag) +int +chacha20poly1305_finalize(chacha20poly1305_ctx *ctx, uint8_t *tag) { - return fns()->finalize(info(), ctx, tag); + return fns()->finalize(info(), ctx, tag); } -int chacha20poly1305_decrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ctext, void *ptext) +int +chacha20poly1305_decrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ctext, void *ptext) { - return fns()->decrypt(info(), ctx, nbytes, ctext, ptext); + return fns()->decrypt(info(), ctx, nbytes, ctext, ptext); } -int chacha20poly1305_verify(chacha20poly1305_ctx *ctx, const uint8_t *tag) +int +chacha20poly1305_verify(chacha20poly1305_ctx *ctx, const uint8_t *tag) { - return fns()->verify(info(), ctx, tag); + return fns()->verify(info(), ctx, tag); } diff --git a/libkern/crypto/corecrypto_des.c b/libkern/crypto/corecrypto_des.c index e916b520b..80406bb02 100644 --- a/libkern/crypto/corecrypto_des.c +++ b/libkern/crypto/corecrypto_des.c @@ -34,25 +34,28 @@ #include /* Single DES ECB - used by ipv6 (esp_core.c) */ -int des_ecb_key_sched(des_cblock *key, des_ecb_key_schedule *ks) +int +des_ecb_key_sched(des_cblock *key, des_ecb_key_schedule *ks) { const struct ccmode_ecb *enc = g_crypto_funcs->ccdes_ecb_encrypt; const struct ccmode_ecb *dec = g_crypto_funcs->ccdes_ecb_decrypt; - /* Make sure the context size for the mode fits in the one we have */ - if((enc->size>sizeof(ks->enc)) || (dec->size>sizeof(ks->dec))) - panic("%s: inconsistent size for DES-ECB context", __FUNCTION__); - + /* Make sure the context size for the mode fits in the one we have */ + if ((enc->size > sizeof(ks->enc)) || (dec->size > sizeof(ks->dec))) { + panic("%s: inconsistent size for DES-ECB context", __FUNCTION__); + } + enc->init(enc, ks->enc, CCDES_KEY_SIZE, key); dec->init(dec, ks->dec, CCDES_KEY_SIZE, key); /* The old DES interface could return -1 or -2 for weak keys and wrong parity, - but this was disabled all the time, so we never fail here */ + * but this was disabled all the time, so we never fail here */ return 0; } /* Simple des - 1 block */ -void des_ecb_encrypt(des_cblock *in, des_cblock *out, des_ecb_key_schedule *ks, int enc) +void +des_ecb_encrypt(des_cblock *in, des_cblock *out, des_ecb_key_schedule *ks, int enc) { const struct ccmode_ecb *ecb = enc ? g_crypto_funcs->ccdes_ecb_encrypt : g_crypto_funcs->ccdes_ecb_decrypt; ccecb_ctx *ctx = enc ? ks->enc : ks->dec; @@ -62,24 +65,27 @@ void des_ecb_encrypt(des_cblock *in, des_cblock *out, des_ecb_key_schedule *ks, /* Triple DES ECB - used by ipv6 (esp_core.c) */ -int des3_ecb_key_sched(des_cblock *key, des3_ecb_key_schedule *ks) +int +des3_ecb_key_sched(des_cblock *key, des3_ecb_key_schedule *ks) { int rc; const struct ccmode_ecb *enc = g_crypto_funcs->cctdes_ecb_encrypt; const struct ccmode_ecb *dec = g_crypto_funcs->cctdes_ecb_decrypt; - /* Make sure the context size for the mode fits in the one we have */ - if((enc->size>sizeof(ks->enc)) || (dec->size>sizeof(ks->dec))) - panic("%s: inconsistent size for 3DES-ECB context", __FUNCTION__); - - rc = enc->init(enc, ks->enc, CCDES_KEY_SIZE*3, key); - rc |= dec->init(dec, ks->dec, CCDES_KEY_SIZE*3, key); + /* Make sure the context size for the mode fits in the one we have */ + if ((enc->size > sizeof(ks->enc)) || (dec->size > sizeof(ks->dec))) { + panic("%s: inconsistent size for 3DES-ECB context", __FUNCTION__); + } + + rc = enc->init(enc, ks->enc, CCDES_KEY_SIZE * 3, key); + rc |= dec->init(dec, ks->dec, CCDES_KEY_SIZE * 3, key); return rc; } /* Simple des - 1 block */ -void des3_ecb_encrypt(des_cblock *in, des_cblock *out, des3_ecb_key_schedule *ks, int enc) +void +des3_ecb_encrypt(des_cblock *in, des_cblock *out, des3_ecb_key_schedule *ks, int enc) { const struct ccmode_ecb *ecb = enc ? g_crypto_funcs->cctdes_ecb_encrypt : g_crypto_funcs->cctdes_ecb_decrypt; ccecb_ctx *ctx = enc ? ks->enc : ks->dec; @@ -89,7 +95,8 @@ void des3_ecb_encrypt(des_cblock *in, des_cblock *out, des3_ecb_key_schedule *ks /* Raw key helper functions */ -int des_is_weak_key(des_cblock *key) +int +des_is_weak_key(des_cblock *key) { return g_crypto_funcs->ccdes_key_is_weak_fn(key, CCDES_KEY_SIZE); } diff --git a/libkern/crypto/corecrypto_md5.c b/libkern/crypto/corecrypto_md5.c index 70225a5a8..63906e576 100644 --- a/libkern/crypto/corecrypto_md5.c +++ b/libkern/crypto/corecrypto_md5.c @@ -1,65 +1,70 @@ - #include #include #include #include -static uint64_t getCount(MD5_CTX *ctx) +static uint64_t +getCount(MD5_CTX *ctx) { - return ( (((uint64_t)ctx->count[0])<<32) | (ctx->count[1]) ); + return (((uint64_t)ctx->count[0]) << 32) | (ctx->count[1]); } -static void setCount(MD5_CTX *ctx, uint64_t count) +static void +setCount(MD5_CTX *ctx, uint64_t count) { - ctx->count[0]=(uint32_t)(count>>32); - ctx->count[1]=(uint32_t)count; + ctx->count[0] = (uint32_t)(count >> 32); + ctx->count[1] = (uint32_t)count; } /* Copy a ccdigest ctx into a legacy MD5 context */ -static void DiToMD5(const struct ccdigest_info *di, struct ccdigest_ctx *di_ctx, MD5_CTX *md5_ctx) +static void +DiToMD5(const struct ccdigest_info *di, struct ccdigest_ctx *di_ctx, MD5_CTX *md5_ctx) { - setCount(md5_ctx, ccdigest_nbits(di, di_ctx)/8+ccdigest_num(di, di_ctx)); + setCount(md5_ctx, ccdigest_nbits(di, di_ctx) / 8 + ccdigest_num(di, di_ctx)); memcpy(md5_ctx->buffer, ccdigest_data(di, di_ctx), di->block_size); memcpy(md5_ctx->state, ccdigest_state_ccn(di, di_ctx), di->state_size); } /* Copy a legacy MD5 context into a ccdigest ctx */ -static void MD5ToDi(const struct ccdigest_info *di, MD5_CTX *md5_ctx, struct ccdigest_ctx *di_ctx) +static void +MD5ToDi(const struct ccdigest_info *di, MD5_CTX *md5_ctx, struct ccdigest_ctx *di_ctx) { uint64_t count = getCount(md5_ctx); - - ccdigest_num(di, di_ctx)=count%di->block_size; - ccdigest_nbits(di, di_ctx)=(count-ccdigest_num(di, di_ctx))*8; + + ccdigest_num(di, di_ctx) = count % di->block_size; + ccdigest_nbits(di, di_ctx) = (count - ccdigest_num(di, di_ctx)) * 8; memcpy(ccdigest_data(di, di_ctx), md5_ctx->buffer, di->block_size); - memcpy(ccdigest_state_ccn(di, di_ctx), md5_ctx->state, di->state_size); + memcpy(ccdigest_state_ccn(di, di_ctx), md5_ctx->state, di->state_size); } -void MD5Init(MD5_CTX *ctx) +void +MD5Init(MD5_CTX *ctx) { - const struct ccdigest_info *di=g_crypto_funcs->ccmd5_di; + const struct ccdigest_info *di = g_crypto_funcs->ccmd5_di; ccdigest_di_decl(di, di_ctx); - + g_crypto_funcs->ccdigest_init_fn(di, di_ctx); - + DiToMD5(di, di_ctx, ctx); } -void MD5Update(MD5_CTX *ctx, const void *data, unsigned int len) +void +MD5Update(MD5_CTX *ctx, const void *data, unsigned int len) { - const struct ccdigest_info *di=g_crypto_funcs->ccmd5_di; + const struct ccdigest_info *di = g_crypto_funcs->ccmd5_di; ccdigest_di_decl(di, di_ctx); - + MD5ToDi(di, ctx, di_ctx); - g_crypto_funcs->ccdigest_update_fn(di, di_ctx, len, data); + g_crypto_funcs->ccdigest_update_fn(di, di_ctx, len, data); DiToMD5(di, di_ctx, ctx); } -void MD5Final(unsigned char digest[MD5_DIGEST_LENGTH], MD5_CTX *ctx) +void +MD5Final(unsigned char digest[MD5_DIGEST_LENGTH], MD5_CTX *ctx) { - const struct ccdigest_info *di=g_crypto_funcs->ccmd5_di; + const struct ccdigest_info *di = g_crypto_funcs->ccmd5_di; ccdigest_di_decl(di, di_ctx); - + MD5ToDi(di, ctx, di_ctx); ccdigest_final(di, di_ctx, digest); } - diff --git a/libkern/crypto/corecrypto_rand.c b/libkern/crypto/corecrypto_rand.c index 115f2d082..4a0bb2496 100644 --- a/libkern/crypto/corecrypto_rand.c +++ b/libkern/crypto/corecrypto_rand.c @@ -46,7 +46,8 @@ cc_rand_generate(void *out, size_t outlen) return error; } -int random_buf(void *buf, size_t buflen) +int +random_buf(void *buf, size_t buflen) { return cc_rand_generate(buf, buflen); } diff --git a/libkern/crypto/corecrypto_rsa.c b/libkern/crypto/corecrypto_rsa.c index 8bf03214a..d47da83a6 100644 --- a/libkern/crypto/corecrypto_rsa.c +++ b/libkern/crypto/corecrypto_rsa.c @@ -31,26 +31,28 @@ #include -int rsa_make_pub(rsa_pub_ctx *pub, - size_t exp_nbytes, const uint8_t *exp, - size_t mod_nbytes, const uint8_t *mod) { - if ((exp_nbytes>RSA_MAX_KEY_BITSIZE/8) - || (mod_nbytes>RSA_MAX_KEY_BITSIZE/8)) { +int +rsa_make_pub(rsa_pub_ctx *pub, + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod) +{ + if ((exp_nbytes > RSA_MAX_KEY_BITSIZE / 8) + || (mod_nbytes > RSA_MAX_KEY_BITSIZE / 8)) { return -1; // Too big } ccrsa_ctx_n(pub->key) = ccn_nof(RSA_MAX_KEY_BITSIZE); return g_crypto_funcs->ccrsa_make_pub_fn(pub->key, - exp_nbytes, exp, - mod_nbytes, mod); + exp_nbytes, exp, + mod_nbytes, mod); } -int rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid, - size_t digest_len, const uint8_t *digest, - size_t sig_len, const uint8_t *sig, - bool *valid) { - return g_crypto_funcs->ccrsa_verify_pkcs1v15_fn(pub->key,oid, - digest_len,digest, - sig_len,sig,valid); +int +rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + bool *valid) +{ + return g_crypto_funcs->ccrsa_verify_pkcs1v15_fn(pub->key, oid, + digest_len, digest, + sig_len, sig, valid); } - - diff --git a/libkern/crypto/corecrypto_sha1.c b/libkern/crypto/corecrypto_sha1.c index 1513287da..3b2b57cca 100644 --- a/libkern/crypto/corecrypto_sha1.c +++ b/libkern/crypto/corecrypto_sha1.c @@ -1,79 +1,87 @@ - #include #include #include #include -static uint64_t getCount(SHA1_CTX *ctx) +static uint64_t +getCount(SHA1_CTX *ctx) { return ctx->c.b64[0]; } -static void setCount(SHA1_CTX *ctx, uint64_t count) +static void +setCount(SHA1_CTX *ctx, uint64_t count) { - ctx->c.b64[0]=count; + ctx->c.b64[0] = count; } /* Copy a ccdigest ctx into a legacy SHA1 context */ -static void DiToSHA1(const struct ccdigest_info *di, struct ccdigest_ctx *di_ctx, SHA1_CTX *sha1_ctx) +static void +DiToSHA1(const struct ccdigest_info *di, struct ccdigest_ctx *di_ctx, SHA1_CTX *sha1_ctx) { - setCount(sha1_ctx, ccdigest_nbits(di, di_ctx)/8+ccdigest_num(di, di_ctx)); + setCount(sha1_ctx, ccdigest_nbits(di, di_ctx) / 8 + ccdigest_num(di, di_ctx)); memcpy(sha1_ctx->m.b8, ccdigest_data(di, di_ctx), di->block_size); memcpy(sha1_ctx->h.b8, ccdigest_state_ccn(di, di_ctx), di->state_size); } /* Copy a legacy SHA1 context into a ccdigest ctx */ -static void SHA1ToDi(const struct ccdigest_info *di, SHA1_CTX *sha1_ctx, struct ccdigest_ctx *di_ctx) +static void +SHA1ToDi(const struct ccdigest_info *di, SHA1_CTX *sha1_ctx, struct ccdigest_ctx *di_ctx) { uint64_t count = getCount(sha1_ctx); - - ccdigest_num(di, di_ctx)=count%di->block_size; - ccdigest_nbits(di, di_ctx)=(count-ccdigest_num(di, di_ctx))*8; + + ccdigest_num(di, di_ctx) = count % di->block_size; + ccdigest_nbits(di, di_ctx) = (count - ccdigest_num(di, di_ctx)) * 8; memcpy(ccdigest_data(di, di_ctx), sha1_ctx->m.b8, di->block_size); - memcpy(ccdigest_state_ccn(di, di_ctx), sha1_ctx->h.b8, di->state_size); + memcpy(ccdigest_state_ccn(di, di_ctx), sha1_ctx->h.b8, di->state_size); } -void SHA1Init(SHA1_CTX *ctx) +void +SHA1Init(SHA1_CTX *ctx) { - const struct ccdigest_info *di=g_crypto_funcs->ccsha1_di; + const struct ccdigest_info *di = g_crypto_funcs->ccsha1_di; ccdigest_di_decl(di, di_ctx); - + g_crypto_funcs->ccdigest_init_fn(di, di_ctx); - + DiToSHA1(di, di_ctx, ctx); } -void SHA1Update(SHA1_CTX *ctx, const void *data, size_t len) +void +SHA1Update(SHA1_CTX *ctx, const void *data, size_t len) { - const struct ccdigest_info *di=g_crypto_funcs->ccsha1_di; + const struct ccdigest_info *di = g_crypto_funcs->ccsha1_di; ccdigest_di_decl(di, di_ctx); - + SHA1ToDi(di, ctx, di_ctx); - g_crypto_funcs->ccdigest_update_fn(di, di_ctx, len, data); + g_crypto_funcs->ccdigest_update_fn(di, di_ctx, len, data); DiToSHA1(di, di_ctx, ctx); } -void SHA1Final(void *digest, SHA1_CTX *ctx) +void +SHA1Final(void *digest, SHA1_CTX *ctx) { - const struct ccdigest_info *di=g_crypto_funcs->ccsha1_di; + const struct ccdigest_info *di = g_crypto_funcs->ccsha1_di; ccdigest_di_decl(di, di_ctx); - + SHA1ToDi(di, ctx, di_ctx); ccdigest_final(di, di_ctx, digest); } #ifdef XNU_KERNEL_PRIVATE -void SHA1UpdateUsePhysicalAddress(SHA1_CTX *ctx, const void *data, size_t len) +void +SHA1UpdateUsePhysicalAddress(SHA1_CTX *ctx, const void *data, size_t len) { //TODO: What the hell ? SHA1Update(ctx, data, len); } #endif -/* This is not publicised in header, but exported in libkern.exports */ +/* This is not publicised in header, but exported in libkern.exports */ void SHA1Final_r(SHA1_CTX *context, void *digest); -void SHA1Final_r(SHA1_CTX *context, void *digest) +void +SHA1Final_r(SHA1_CTX *context, void *digest) { SHA1Final(digest, context); } @@ -92,17 +100,17 @@ void sha1_hardware_hook(Boolean option, InKernelPerformSHA1Func func, void *ref) static void *SHA1Ref; static InKernelPerformSHA1Func performSHA1WithinKernelOnly; -void sha1_hardware_hook(Boolean option, InKernelPerformSHA1Func func, void *ref) +void +sha1_hardware_hook(Boolean option, InKernelPerformSHA1Func func, void *ref) { - if(option) { + if (option) { // Establish the hook. The hardware is ready. OSCompareAndSwapPtr((void*)NULL, (void*)ref, (void * volatile*)&SHA1Ref); - if(!OSCompareAndSwapPtr((void *)NULL, (void *)func, (void * volatile *)&performSHA1WithinKernelOnly)) { + if (!OSCompareAndSwapPtr((void *)NULL, (void *)func, (void * volatile *)&performSHA1WithinKernelOnly)) { panic("sha1_hardware_hook: Called twice.. Should never happen\n"); } - } - else { + } else { // The hardware is going away. Tear down the hook. performSHA1WithinKernelOnly = NULL; SHA1Ref = NULL; diff --git a/libkern/crypto/corecrypto_sha2.c b/libkern/crypto/corecrypto_sha2.c index 78975d48b..7392f0c23 100644 --- a/libkern/crypto/corecrypto_sha2.c +++ b/libkern/crypto/corecrypto_sha2.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,87 +34,99 @@ #if defined(CRYPTO_SHA2) -void SHA256_Init(SHA256_CTX *ctx) +void +SHA256_Init(SHA256_CTX *ctx) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha256_di; + di = g_crypto_funcs->ccsha256_di; + + /* Make sure the context size for the digest info fits in the one we have */ + if (ccdigest_di_size(di) > sizeof(SHA256_CTX)) { + panic("%s: inconsistent size for SHA256 context", __FUNCTION__); + } - /* Make sure the context size for the digest info fits in the one we have */ - if(ccdigest_di_size(di)>sizeof(SHA256_CTX)) - panic("%s: inconsistent size for SHA256 context", __FUNCTION__); - g_crypto_funcs->ccdigest_init_fn(di, ctx->ctx); } -void SHA256_Update(SHA256_CTX *ctx, const void *data, size_t len) +void +SHA256_Update(SHA256_CTX *ctx, const void *data, size_t len) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha256_di; + di = g_crypto_funcs->ccsha256_di; g_crypto_funcs->ccdigest_update_fn(di, ctx->ctx, len, data); } -void SHA256_Final(void *digest, SHA256_CTX *ctx) +void +SHA256_Final(void *digest, SHA256_CTX *ctx) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha256_di; + di = g_crypto_funcs->ccsha256_di; ccdigest_final(di, ctx->ctx, digest); } -void SHA384_Init(SHA384_CTX *ctx) +void +SHA384_Init(SHA384_CTX *ctx) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha384_di; + di = g_crypto_funcs->ccsha384_di; + + /* Make sure the context size for the digest info fits in the one we have */ + if (ccdigest_di_size(di) > sizeof(SHA384_CTX)) { + panic("%s: inconsistent size for SHA384 context", __FUNCTION__); + } - /* Make sure the context size for the digest info fits in the one we have */ - if(ccdigest_di_size(di)>sizeof(SHA384_CTX)) - panic("%s: inconsistent size for SHA384 context", __FUNCTION__); - g_crypto_funcs->ccdigest_init_fn(di, ctx->ctx); } -void SHA384_Update(SHA384_CTX *ctx, const void *data, size_t len) +void +SHA384_Update(SHA384_CTX *ctx, const void *data, size_t len) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha384_di; + di = g_crypto_funcs->ccsha384_di; g_crypto_funcs->ccdigest_update_fn(di, ctx->ctx, len, data); } -void SHA384_Final(void *digest, SHA384_CTX *ctx) +void +SHA384_Final(void *digest, SHA384_CTX *ctx) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha384_di; + di = g_crypto_funcs->ccsha384_di; ccdigest_final(di, ctx->ctx, digest); } -void SHA512_Init(SHA512_CTX *ctx) +void +SHA512_Init(SHA512_CTX *ctx) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha512_di; + di = g_crypto_funcs->ccsha512_di; + + /* Make sure the context size for the digest info fits in the one we have */ + if (ccdigest_di_size(di) > sizeof(SHA512_CTX)) { + panic("%s: inconsistent size for SHA512 context", __FUNCTION__); + } - /* Make sure the context size for the digest info fits in the one we have */ - if(ccdigest_di_size(di)>sizeof(SHA512_CTX)) - panic("%s: inconsistent size for SHA512 context", __FUNCTION__); - g_crypto_funcs->ccdigest_init_fn(di, ctx->ctx); } -void SHA512_Update(SHA512_CTX *ctx, const void *data, size_t len) +void +SHA512_Update(SHA512_CTX *ctx, const void *data, size_t len) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha512_di; + di = g_crypto_funcs->ccsha512_di; g_crypto_funcs->ccdigest_update_fn(di, ctx->ctx, len, data); } -void SHA512_Final(void *digest, SHA512_CTX *ctx) +void +SHA512_Final(void *digest, SHA512_CTX *ctx) { const struct ccdigest_info *di; - di=g_crypto_funcs->ccsha512_di; + di = g_crypto_funcs->ccsha512_di; ccdigest_final(di, ctx->ctx, digest); } @@ -123,15 +135,14 @@ void SHA512_Final(void *digest, SHA512_CTX *ctx) /* As these are part of the KPI, we need to stub them out for any kernel configuration that does not support SHA2. */ -void UNSUPPORTED_API(SHA256_Init, SHA256_CTX *ctx); -void UNSUPPORTED_API(SHA384_Init, SHA384_CTX *ctx); -void UNSUPPORTED_API(SHA512_Init, SHA512_CTX *ctx); +void UNSUPPORTED_API(SHA256_Init, SHA256_CTX *ctx); +void UNSUPPORTED_API(SHA384_Init, SHA384_CTX *ctx); +void UNSUPPORTED_API(SHA512_Init, SHA512_CTX *ctx); void UNSUPPORTED_API(SHA256_Update, SHA256_CTX *ctx, const void *data, size_t len); void UNSUPPORTED_API(SHA384_Update, SHA384_CTX *ctx, const void *data, size_t len); void UNSUPPORTED_API(SHA512_Update, SHA512_CTX *ctx, const void *data, size_t len); -void UNSUPPORTED_API(SHA256_Final, void *digest, SHA256_CTX *ctx); -void UNSUPPORTED_API(SHA384_Final, void *digest, SHA384_CTX *ctx); -void UNSUPPORTED_API(SHA512_Final, void *digest, SHA512_CTX *ctx); +void UNSUPPORTED_API(SHA256_Final, void *digest, SHA256_CTX *ctx); +void UNSUPPORTED_API(SHA384_Final, void *digest, SHA384_CTX *ctx); +void UNSUPPORTED_API(SHA512_Final, void *digest, SHA512_CTX *ctx); #endif - diff --git a/libkern/crypto/register_crypto.c b/libkern/crypto/register_crypto.c index 51d399a82..d43d23df8 100644 --- a/libkern/crypto/register_crypto.c +++ b/libkern/crypto/register_crypto.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,14 +33,14 @@ SECURITY_READ_ONLY_LATE(crypto_functions_t) g_crypto_funcs = NULL; -int register_crypto_functions(const crypto_functions_t funcs) +int +register_crypto_functions(const crypto_functions_t funcs) { - if(g_crypto_funcs) + if (g_crypto_funcs) { return -1; + } g_crypto_funcs = funcs; - + return 0; } - - diff --git a/libkern/firehose/chunk_private.h b/libkern/firehose/chunk_private.h index b4fbcd74a..80d5969fa 100644 --- a/libkern/firehose/chunk_private.h +++ b/libkern/firehose/chunk_private.h @@ -39,7 +39,7 @@ __BEGIN_DECLS #define FIREHOSE_CHUNK_POS_REFCNT_INC (1ULL << 32) #define FIREHOSE_CHUNK_POS_FULL_BIT (1ULL << 56) #define FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(pos, stream) \ - ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream) + ((((pos).fcp_pos >> 48) & 0x1ff) == (uint16_t)stream) typedef union { _Atomic(uint64_t) fcp_atomic_pos; @@ -84,8 +84,8 @@ firehose_chunk_pos_fits(firehose_chunk_pos_u pos, uint16_t size) OS_ALWAYS_INLINE static inline long firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp, - firehose_stream_t stream, uint8_t qos, uint16_t pubsize, - uint16_t privsize, uint8_t **privptr) + firehose_stream_t stream, uint8_t qos, uint16_t pubsize, + uint16_t privsize, uint8_t **privptr) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); firehose_chunk_pos_u orig, pos; @@ -95,42 +95,44 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp, // no acquire barrier because the returned space is written to only os_atomic_rmw_loop(&fc->fc_pos.fcp_atomic_pos, - orig.fcp_pos, pos.fcp_pos, relaxed, { + orig.fcp_pos, pos.fcp_pos, relaxed, { if (orig.fcp_pos == 0) { - // we acquired a really really old reference, and we probably - // just faulted in a new page - os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL); + // we acquired a really really old reference, and we probably + // just faulted in a new page + os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL); } if (!FIREHOSE_CHUNK_POS_USABLE_FOR_STREAM(orig, stream)) { - // nothing to do if the chunk is full, or the stream doesn't match, - // in which case the thread probably: - // - loaded the chunk ref - // - been suspended a long while - // - read the chunk to find a very old thing - os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL); + // nothing to do if the chunk is full, or the stream doesn't match, + // in which case the thread probably: + // - loaded the chunk ref + // - been suspended a long while + // - read the chunk to find a very old thing + os_atomic_rmw_loop_give_up(return FIREHOSE_CHUNK_TRY_RESERVE_FAIL); } pos = orig; if (!firehose_chunk_pos_fits(orig, - ft_size + pubsize + privsize) || !stamp_delta_fits) { - pos.fcp_flag_full = true; - reservation_failed = true; + ft_size + pubsize + privsize) || !stamp_delta_fits) { + pos.fcp_flag_full = true; + reservation_failed = true; } else { - if (qos > pos.fcp_qos) pos.fcp_qos = qos; - // using these *_INC macros is so that the compiler generates better - // assembly: using the struct individual fields forces the compiler - // to handle carry propagations, and we know it won't happen - pos.fcp_pos += roundup(ft_size + pubsize, 8) * - FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC; - pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC; - pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC; - const uint16_t minimum_payload_size = 16; - if (!firehose_chunk_pos_fits(pos, - roundup(ft_size + minimum_payload_size , 8))) { - // if we can't even have minimum_payload_size bytes of payload - // for the next tracepoint, just flush right away - pos.fcp_flag_full = true; + if (qos > pos.fcp_qos) { + pos.fcp_qos = qos; } - reservation_failed = false; + // using these *_INC macros is so that the compiler generates better + // assembly: using the struct individual fields forces the compiler + // to handle carry propagations, and we know it won't happen + pos.fcp_pos += roundup(ft_size + pubsize, 8) * + FIREHOSE_CHUNK_POS_ENTRY_OFFS_INC; + pos.fcp_pos -= privsize * FIREHOSE_CHUNK_POS_PRIVATE_OFFS_INC; + pos.fcp_pos += FIREHOSE_CHUNK_POS_REFCNT_INC; + const uint16_t minimum_payload_size = 16; + if (!firehose_chunk_pos_fits(pos, + roundup(ft_size + minimum_payload_size, 8))) { + // if we can't even have minimum_payload_size bytes of payload + // for the next tracepoint, just flush right away + pos.fcp_flag_full = true; + } + reservation_failed = false; } }); @@ -152,17 +154,17 @@ firehose_chunk_tracepoint_try_reserve(firehose_chunk_t fc, uint64_t stamp, OS_ALWAYS_INLINE static inline firehose_tracepoint_t firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp, - uint16_t pubsize, uint64_t thread_id, long offset) + uint16_t pubsize, uint64_t thread_id, long offset) { firehose_tracepoint_t ft = (firehose_tracepoint_t) - __builtin_assume_aligned(fc->fc_start + offset, 8); + __builtin_assume_aligned(fc->fc_start + offset, 8); stamp -= fc->fc_timestamp; stamp |= (uint64_t)pubsize << 48; // The compiler barrier is needed for userland process death handling, see // (tracepoint-begin) in libdispatch's firehose_buffer_stream_chunk_install. atomic_store_explicit(&ft->ft_atomic_stamp_and_length, stamp, - memory_order_relaxed); - __asm__ __volatile__("" ::: "memory"); + memory_order_relaxed); + __asm__ __volatile__ ("" ::: "memory"); ft->ft_thread = thread_id; return ft; } @@ -170,14 +172,14 @@ firehose_chunk_tracepoint_begin(firehose_chunk_t fc, uint64_t stamp, OS_ALWAYS_INLINE static inline bool firehose_chunk_tracepoint_end(firehose_chunk_t fc, - firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) + firehose_tracepoint_t ft, firehose_tracepoint_id_u ftid) { firehose_chunk_pos_u pos; atomic_store_explicit(&ft->ft_id.ftid_atomic_value, - ftid.ftid_value, memory_order_release); + ftid.ftid_value, memory_order_release); pos.fcp_pos = atomic_fetch_sub_explicit(&fc->fc_pos.fcp_atomic_pos, - FIREHOSE_CHUNK_POS_REFCNT_INC, memory_order_relaxed); + FIREHOSE_CHUNK_POS_REFCNT_INC, memory_order_relaxed); return pos.fcp_refcnt == 1 && pos.fcp_flag_full; } diff --git a/libkern/firehose/firehose_types_private.h b/libkern/firehose/firehose_types_private.h index d99a5819f..c47635c88 100644 --- a/libkern/firehose/firehose_types_private.h +++ b/libkern/firehose/firehose_types_private.h @@ -36,15 +36,15 @@ __BEGIN_DECLS * The lower 8 bits are or-ed in the upper 8 bits of Activity ID and propagated * to children activities */ -OS_ENUM(firehose_activity_flags, unsigned long, - firehose_activity_flags_default = 0x0000, + OS_ENUM(firehose_activity_flags, unsigned long, + firehose_activity_flags_default = 0x0000, - firehose_activity_flags_info_mode = 0x0001, - firehose_activity_flags_debug_mode = 0x0002, - firehose_activity_flags_stream_live_mode = 0x0004, + firehose_activity_flags_info_mode = 0x0001, + firehose_activity_flags_debug_mode = 0x0002, + firehose_activity_flags_stream_live_mode = 0x0004, - firehose_activity_flags_precise_timestamp = 0x0080, -); + firehose_activity_flags_precise_timestamp = 0x0080, + ); /*! * @typedef firehose_activity_id_t @@ -60,25 +60,25 @@ typedef uint64_t firehose_activity_id_t; #define FIREHOSE_ACTIVITY_ID_INVALID ((firehose_activity_id_t)~0ULL) #define FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT 56 #define FIREHOSE_ACTIVITY_ID_FLAGS(aid) \ - ((firehose_activity_flags_t)((aid) >> FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT)) + ((firehose_activity_flags_t)((aid) >> FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT)) #define FIREHOSE_ACTIVITY_ID_MERGE_FLAGS(aid, flags) (\ - ((firehose_activity_id_t)(aid)) | \ - ((firehose_activity_id_t)(flags) << FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT)) + ((firehose_activity_id_t)(aid)) | \ + ((firehose_activity_id_t)(flags) << FIREHOSE_ACTIVITY_ID_FLAGS_SHIFT)) /*! * @enum firehose_stream_t */ OS_ENUM(firehose_stream, uint8_t, - firehose_stream_persist = 0, - firehose_stream_special = 1, - firehose_stream_memory = 2, - firehose_stream_metadata = 3, - firehose_stream_signpost = 4, - firehose_stream_memory_wifi = 5, - firehose_stream_memory_baseband = 6, + firehose_stream_persist = 0, + firehose_stream_special = 1, + firehose_stream_memory = 2, + firehose_stream_metadata = 3, + firehose_stream_signpost = 4, + firehose_stream_memory_wifi = 5, + firehose_stream_memory_baseband = 6, - _firehose_stream_max, -); + _firehose_stream_max, + ); /*! * @enum firehose_tracepoint_namespace_t @@ -87,13 +87,13 @@ OS_ENUM(firehose_stream, uint8_t, * Namespaces of tracepoints. */ OS_ENUM(firehose_tracepoint_namespace, uint8_t, - firehose_tracepoint_namespace_activity = 0x02, - firehose_tracepoint_namespace_trace = 0x03, - firehose_tracepoint_namespace_log = 0x04, - firehose_tracepoint_namespace_metadata = 0x05, - firehose_tracepoint_namespace_signpost = 0x06, - firehose_tracepoint_namespace_loss = 0x07, -); + firehose_tracepoint_namespace_activity = 0x02, + firehose_tracepoint_namespace_trace = 0x03, + firehose_tracepoint_namespace_log = 0x04, + firehose_tracepoint_namespace_metadata = 0x05, + firehose_tracepoint_namespace_signpost = 0x06, + firehose_tracepoint_namespace_loss = 0x07, + ); /*! * @enum firehose_tracepoint_code_t @@ -102,9 +102,9 @@ OS_ENUM(firehose_tracepoint_namespace, uint8_t, * Codes of tracepoints. */ OS_ENUM(firehose_tracepoint_code, uint32_t, - firehose_tracepoint_code_load = 0x01, - firehose_tracepoint_code_unload = 0x02, -); + firehose_tracepoint_code_load = 0x01, + firehose_tracepoint_code_unload = 0x02, + ); /*! * @typedef firehose_tracepoint_type_t @@ -121,18 +121,18 @@ typedef uint8_t firehose_tracepoint_type_t; * Flags for tracepoints. */ OS_ENUM(firehose_tracepoint_flags, uint16_t, - _firehose_tracepoint_flags_base_has_current_aid = 0x0001, -#define _firehose_tracepoint_flags_pc_style_mask (0x0007 << 1) - _firehose_tracepoint_flags_pc_style_none = 0x0000 << 1, - _firehose_tracepoint_flags_pc_style_main_exe = 0x0001 << 1, - _firehose_tracepoint_flags_pc_style_shared_cache = 0x0002 << 1, - _firehose_tracepoint_flags_pc_style_main_plugin = 0x0003 << 1, - _firehose_tracepoint_flags_pc_style_absolute = 0x0004 << 1, - _firehose_tracepoint_flags_pc_style_uuid_relative = 0x0005 << 1, - _firehose_tracepoint_flags_pc_style__unused6 = 0x0006 << 1, - _firehose_tracepoint_flags_pc_style__unused7 = 0x0007 << 1, - _firehose_tracepoint_flags_base_has_unique_pid = 0x0010, -); + _firehose_tracepoint_flags_base_has_current_aid = 0x0001, +#define _firehose_tracepoint_flags_pc_style_mask (0x0007 << 1) + _firehose_tracepoint_flags_pc_style_none = 0x0000 << 1, + _firehose_tracepoint_flags_pc_style_main_exe = 0x0001 << 1, + _firehose_tracepoint_flags_pc_style_shared_cache = 0x0002 << 1, + _firehose_tracepoint_flags_pc_style_main_plugin = 0x0003 << 1, + _firehose_tracepoint_flags_pc_style_absolute = 0x0004 << 1, + _firehose_tracepoint_flags_pc_style_uuid_relative = 0x0005 << 1, + _firehose_tracepoint_flags_pc_style__unused6 = 0x0006 << 1, + _firehose_tracepoint_flags_pc_style__unused7 = 0x0007 << 1, + _firehose_tracepoint_flags_base_has_unique_pid = 0x0010, + ); /*! * @typedef firehose_tracepoint_id_t @@ -149,10 +149,10 @@ typedef uint64_t firehose_tracepoint_id_t; * Types of Activity tracepoints (namespace activity). */ OS_ENUM(_firehose_tracepoint_type_activity, firehose_tracepoint_type_t, - _firehose_tracepoint_type_activity_create = 0x01, - _firehose_tracepoint_type_activity_swap = 0x02, - _firehose_tracepoint_type_activity_useraction = 0x03, -); + _firehose_tracepoint_type_activity_create = 0x01, + _firehose_tracepoint_type_activity_swap = 0x02, + _firehose_tracepoint_type_activity_useraction = 0x03, + ); /*! * @enum firehose_tracepoint_flags_activity_t @@ -161,9 +161,9 @@ OS_ENUM(_firehose_tracepoint_type_activity, firehose_tracepoint_type_t, * Flags for Activity tracepoints (namespace activity). */ OS_ENUM(_firehose_tracepoint_flags_activity, uint16_t, - _firehose_tracepoint_flags_activity_user_interface = 0x0100, - _firehose_tracepoint_flags_activity_has_other_aid = 0x0200, -); + _firehose_tracepoint_flags_activity_user_interface = 0x0100, + _firehose_tracepoint_flags_activity_has_other_aid = 0x0200, + ); /*! * @enum firehose_tracepoint_type_trace_t @@ -172,12 +172,12 @@ OS_ENUM(_firehose_tracepoint_flags_activity, uint16_t, * Types of trace tracepoints (namespace trace). */ OS_ENUM(_firehose_tracepoint_type_trace, firehose_tracepoint_type_t, - _firehose_tracepoint_type_trace_default = 0x00, - _firehose_tracepoint_type_trace_info = 0x01, - _firehose_tracepoint_type_trace_debug = 0x02, - _firehose_tracepoint_type_trace_error = 0x10, - _firehose_tracepoint_type_trace_fault = 0x11, -); + _firehose_tracepoint_type_trace_default = 0x00, + _firehose_tracepoint_type_trace_info = 0x01, + _firehose_tracepoint_type_trace_debug = 0x02, + _firehose_tracepoint_type_trace_error = 0x10, + _firehose_tracepoint_type_trace_fault = 0x11, + ); /*! * @enum firehose_tracepoint_type_log_t @@ -186,12 +186,12 @@ OS_ENUM(_firehose_tracepoint_type_trace, firehose_tracepoint_type_t, * Types of Log tracepoints (namespace log). */ OS_ENUM(_firehose_tracepoint_type_log, firehose_tracepoint_type_t, - _firehose_tracepoint_type_log_default = 0x00, - _firehose_tracepoint_type_log_info = 0x01, - _firehose_tracepoint_type_log_debug = 0x02, - _firehose_tracepoint_type_log_error = 0x10, - _firehose_tracepoint_type_log_fault = 0x11, -); + _firehose_tracepoint_type_log_default = 0x00, + _firehose_tracepoint_type_log_info = 0x01, + _firehose_tracepoint_type_log_debug = 0x02, + _firehose_tracepoint_type_log_error = 0x10, + _firehose_tracepoint_type_log_fault = 0x11, + ); /*! * @enum firehose_tracepoint_flags_log_t @@ -200,12 +200,12 @@ OS_ENUM(_firehose_tracepoint_type_log, firehose_tracepoint_type_t, * Flags for Log tracepoints (namespace log). */ OS_ENUM(_firehose_tracepoint_flags_log, uint16_t, - _firehose_tracepoint_flags_log_has_private_data = 0x0100, - _firehose_tracepoint_flags_log_has_subsystem = 0x0200, - _firehose_tracepoint_flags_log_has_rules = 0x0400, - _firehose_tracepoint_flags_log_has_oversize = 0x0800, - _firehose_tracepoint_flags_log_has_context_data = 0x1000, -); + _firehose_tracepoint_flags_log_has_private_data = 0x0100, + _firehose_tracepoint_flags_log_has_subsystem = 0x0200, + _firehose_tracepoint_flags_log_has_rules = 0x0400, + _firehose_tracepoint_flags_log_has_oversize = 0x0800, + _firehose_tracepoint_flags_log_has_context_data = 0x1000, + ); /*! * @enum _firehose_tracepoint_type_metadata_t @@ -214,10 +214,10 @@ OS_ENUM(_firehose_tracepoint_flags_log, uint16_t, * Types for metadata tracepoints (namespace metadata). */ OS_ENUM(_firehose_tracepoint_type_metadata, firehose_tracepoint_type_t, - _firehose_tracepoint_type_metadata_dyld = 0x01, - _firehose_tracepoint_type_metadata_subsystem = 0x02, - _firehose_tracepoint_type_metadata_kext = 0x03, -); + _firehose_tracepoint_type_metadata_dyld = 0x01, + _firehose_tracepoint_type_metadata_subsystem = 0x02, + _firehose_tracepoint_type_metadata_kext = 0x03, + ); /*! * @enum firehose_tracepoint_type_signpost_t @@ -226,15 +226,15 @@ OS_ENUM(_firehose_tracepoint_type_metadata, firehose_tracepoint_type_t, * Types of Log tracepoints (namespace signpost). */ OS_ENUM(_firehose_tracepoint_type_signpost, firehose_tracepoint_type_t, - _firehose_tracepoint_type_signpost_event = 0x00, - _firehose_tracepoint_type_signpost_interval_begin = 0x01, - _firehose_tracepoint_type_signpost_interval_end = 0x02, + _firehose_tracepoint_type_signpost_event = 0x00, + _firehose_tracepoint_type_signpost_interval_begin = 0x01, + _firehose_tracepoint_type_signpost_interval_end = 0x02, - _firehose_tracepoint_type_signpost_scope_mask = 0xc0, - _firehose_tracepoint_type_signpost_scope_thread = 0x40, - _firehose_tracepoint_type_signpost_scope_process = 0x80, - _firehose_tracepoint_type_signpost_scope_system = 0xc0, -); + _firehose_tracepoint_type_signpost_scope_mask = 0xc0, + _firehose_tracepoint_type_signpost_scope_thread = 0x40, + _firehose_tracepoint_type_signpost_scope_process = 0x80, + _firehose_tracepoint_type_signpost_scope_system = 0xc0, + ); /*! * @enum firehose_tracepoint_flags_signpost_t @@ -245,12 +245,12 @@ OS_ENUM(_firehose_tracepoint_type_signpost, firehose_tracepoint_type_t, * When flags are shared with the log type, they should have the same values. */ OS_ENUM(_firehose_tracepoint_flags_signpost, uint16_t, - _firehose_tracepoint_flags_signpost_has_private_data = 0x0100, - _firehose_tracepoint_flags_signpost_has_subsystem = 0x0200, - _firehose_tracepoint_flags_signpost_has_rules = 0x0400, - _firehose_tracepoint_flags_signpost_has_oversize = 0x0800, - _firehose_tracepoint_flags_signpost_has_context_data = 0x1000, -); + _firehose_tracepoint_flags_signpost_has_private_data = 0x0100, + _firehose_tracepoint_flags_signpost_has_subsystem = 0x0200, + _firehose_tracepoint_flags_signpost_has_rules = 0x0400, + _firehose_tracepoint_flags_signpost_has_oversize = 0x0800, + _firehose_tracepoint_flags_signpost_has_context_data = 0x1000, + ); /* MIG firehose push reply structure */ typedef struct firehose_push_reply_s { @@ -269,6 +269,6 @@ typedef union firehose_buffer_u *firehose_buffer_t; __END_DECLS -OS_ASSUME_NONNULL_END + OS_ASSUME_NONNULL_END #endif // __FIREHOSE_TYPES_PRIVATE__ diff --git a/libkern/firehose/ioctl_private.h b/libkern/firehose/ioctl_private.h index efb828bd8..98b96899d 100644 --- a/libkern/firehose/ioctl_private.h +++ b/libkern/firehose/ioctl_private.h @@ -28,7 +28,7 @@ /* Flushed the log data. Return the updated pointers */ #ifndef LOGFLUSHED -#define LOGFLUSHED _IOW('t', 81, firehose_push_reply_t) +#define LOGFLUSHED _IOW('t', 81, firehose_push_reply_t) #endif /* Map the kernel log buffers to logd's address space */ diff --git a/libkern/firehose/tracepoint_private.h b/libkern/firehose/tracepoint_private.h index 76531fd28..f5ad06986 100644 --- a/libkern/firehose/tracepoint_private.h +++ b/libkern/firehose/tracepoint_private.h @@ -83,10 +83,10 @@ typedef struct firehose_tracepoint_s { #define FIREHOSE_TRACE_ID_MAKE(ns, type, flags, code) \ (((firehose_tracepoint_id_u){ .ftid = { \ - ._namespace = ns, \ - ._type = type, \ - ._flags = flags, \ - ._code = code, \ + ._namespace = ns, \ + ._type = type, \ + ._flags = flags, \ + ._code = code, \ } }).ftid_value) #define FIREHOSE_TRACE_ID_SET_NS(tid, ns) \ @@ -96,12 +96,12 @@ typedef struct firehose_tracepoint_s { ((tid).ftid._type = _firehose_tracepoint_type_##ns##_##type) #define FIREHOSE_TRACE_ID_PC_STYLE(tid) \ - ((tid).ftid._flags & _firehose_tracepoint_flags_pc_style_mask) + ((tid).ftid._flags & _firehose_tracepoint_flags_pc_style_mask) #define FIREHOSE_TRACE_ID_SET_PC_STYLE(tid, flag) ({ \ - firehose_tracepoint_id_u _tmp_tid = (tid); \ - _tmp_tid.ftid._flags &= ~_firehose_tracepoint_flags_pc_style_mask; \ - _tmp_tid.ftid._flags |= _firehose_tracepoint_flags_pc_style_##flag; \ + firehose_tracepoint_id_u _tmp_tid = (tid); \ + _tmp_tid.ftid._flags &= ~_firehose_tracepoint_flags_pc_style_mask; \ + _tmp_tid.ftid._flags |= _firehose_tracepoint_flags_pc_style_##flag; \ }) #define FIREHOSE_TRACE_ID_HAS_FLAG(tid, ns, flag) \ @@ -157,7 +157,7 @@ static inline uint64_t firehose_tracepoint_time(firehose_activity_flags_t flags) { if (firehose_precise_timestamps_enabled() || - (flags & firehose_activity_flags_precise_timestamp)) { + (flags & firehose_activity_flags_precise_timestamp)) { return mach_continuous_time(); } else { return mach_continuous_approximate_time(); @@ -169,10 +169,10 @@ __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) void firehose_trace_metadata(firehose_stream_t stream, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void* pubdata, size_t publen); + uint64_t stamp, const void* pubdata, size_t publen); #endif __END_DECLS -OS_ASSUME_NONNULL_END + OS_ASSUME_NONNULL_END #endif // __FIREHOSE_FIREHOSE__ diff --git a/libkern/gen/OSAtomicOperations.c b/libkern/gen/OSAtomicOperations.c index 5affc1eef..7866c302e 100644 --- a/libkern/gen/OSAtomicOperations.c +++ b/libkern/gen/OSAtomicOperations.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,8 +31,8 @@ #include enum { - false = 0, - true = 1 + false = 0, + true = 1 }; #ifndef NULL @@ -42,9 +42,9 @@ enum { #define ATOMIC_DEBUG DEBUG #if ATOMIC_DEBUG -#define ALIGN_TEST(p,t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p\n",p);}while(0) +#define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) panic("Unaligned atomic pointer %p\n",p);}while(0) #else -#define ALIGN_TEST(p,t) do{}while(0) +#define ALIGN_TEST(p, t) do{}while(0) #endif // 19831745 - start of big hammer! @@ -60,29 +60,33 @@ enum { */ #undef OSCompareAndSwap8 -Boolean OSCompareAndSwap8(UInt8 oldValue, UInt8 newValue, volatile UInt8 *address) +Boolean +OSCompareAndSwap8(UInt8 oldValue, UInt8 newValue, volatile UInt8 *address) { return __c11_atomic_compare_exchange_strong((_Atomic UInt8 *)address, &oldValue, newValue, - memory_order_acq_rel_smp, memory_order_relaxed); + memory_order_acq_rel_smp, memory_order_relaxed); } #undef OSCompareAndSwap16 -Boolean OSCompareAndSwap16(UInt16 oldValue, UInt16 newValue, volatile UInt16 *address) +Boolean +OSCompareAndSwap16(UInt16 oldValue, UInt16 newValue, volatile UInt16 *address) { return __c11_atomic_compare_exchange_strong((_Atomic UInt16 *)address, &oldValue, newValue, - memory_order_acq_rel_smp, memory_order_relaxed); + memory_order_acq_rel_smp, memory_order_relaxed); } #undef OSCompareAndSwap -Boolean OSCompareAndSwap(UInt32 oldValue, UInt32 newValue, volatile UInt32 *address) +Boolean +OSCompareAndSwap(UInt32 oldValue, UInt32 newValue, volatile UInt32 *address) { ALIGN_TEST(address, UInt32); return __c11_atomic_compare_exchange_strong((_Atomic UInt32 *)address, &oldValue, newValue, - memory_order_acq_rel_smp, memory_order_relaxed); + memory_order_acq_rel_smp, memory_order_relaxed); } #undef OSCompareAndSwap64 -Boolean OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *address) +Boolean +OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *address) { /* * _Atomic uint64 requires 8-byte alignment on all architectures. @@ -93,40 +97,45 @@ Boolean OSCompareAndSwap64(UInt64 oldValue, UInt64 newValue, volatile UInt64 *ad ALIGN_TEST(address, UInt64); return __c11_atomic_compare_exchange_strong(aligned_addr, &oldValue, newValue, - memory_order_acq_rel_smp, memory_order_relaxed); + memory_order_acq_rel_smp, memory_order_relaxed); } #undef OSCompareAndSwapPtr -Boolean OSCompareAndSwapPtr(void *oldValue, void *newValue, void * volatile *address) +Boolean +OSCompareAndSwapPtr(void *oldValue, void *newValue, void * volatile *address) { #if __LP64__ - return OSCompareAndSwap64((UInt64)oldValue, (UInt64)newValue, (volatile UInt64 *)address); + return OSCompareAndSwap64((UInt64)oldValue, (UInt64)newValue, (volatile UInt64 *)address); #else - return OSCompareAndSwap((UInt32)oldValue, (UInt32)newValue, (volatile UInt32 *)address); + return OSCompareAndSwap((UInt32)oldValue, (UInt32)newValue, (volatile UInt32 *)address); #endif } -SInt8 OSAddAtomic8(SInt32 amount, volatile SInt8 *address) +SInt8 +OSAddAtomic8(SInt32 amount, volatile SInt8 *address) { return __c11_atomic_fetch_add((_Atomic SInt8*)address, amount, memory_order_relaxed); } -SInt16 OSAddAtomic16(SInt32 amount, volatile SInt16 *address) +SInt16 +OSAddAtomic16(SInt32 amount, volatile SInt16 *address) { return __c11_atomic_fetch_add((_Atomic SInt16*)address, amount, memory_order_relaxed); } #undef OSAddAtomic -SInt32 OSAddAtomic(SInt32 amount, volatile SInt32 *address) +SInt32 +OSAddAtomic(SInt32 amount, volatile SInt32 *address) { ALIGN_TEST(address, UInt32); return __c11_atomic_fetch_add((_Atomic SInt32*)address, amount, memory_order_relaxed); } #undef OSAddAtomic64 -SInt64 OSAddAtomic64(SInt64 amount, volatile SInt64 *address) +SInt64 +OSAddAtomic64(SInt64 amount, volatile SInt64 *address) { - _Atomic SInt64* aligned_address = (_Atomic SInt64*)(uintptr_t)address; + _Atomic SInt64* aligned_address = (_Atomic SInt64*)(uintptr_t)address; ALIGN_TEST(address, SInt64); return __c11_atomic_fetch_add(aligned_address, amount, memory_order_relaxed); @@ -144,62 +153,70 @@ OSAddAtomicLong(long theAmount, volatile long *address) } #undef OSIncrementAtomic -SInt32 OSIncrementAtomic(volatile SInt32 * value) +SInt32 +OSIncrementAtomic(volatile SInt32 * value) { return OSAddAtomic(1, value); } #undef OSDecrementAtomic -SInt32 OSDecrementAtomic(volatile SInt32 * value) +SInt32 +OSDecrementAtomic(volatile SInt32 * value) { return OSAddAtomic(-1, value); } #undef OSBitAndAtomic -UInt32 OSBitAndAtomic(UInt32 mask, volatile UInt32 * value) +UInt32 +OSBitAndAtomic(UInt32 mask, volatile UInt32 * value) { return __c11_atomic_fetch_and((_Atomic UInt32*)value, mask, memory_order_relaxed); } #undef OSBitOrAtomic -UInt32 OSBitOrAtomic(UInt32 mask, volatile UInt32 * value) +UInt32 +OSBitOrAtomic(UInt32 mask, volatile UInt32 * value) { return __c11_atomic_fetch_or((_Atomic UInt32*)value, mask, memory_order_relaxed); } #undef OSBitXorAtomic -UInt32 OSBitXorAtomic(UInt32 mask, volatile UInt32 * value) +UInt32 +OSBitXorAtomic(UInt32 mask, volatile UInt32 * value) { return __c11_atomic_fetch_xor((_Atomic UInt32*)value, mask, memory_order_relaxed); } -static Boolean OSTestAndSetClear(UInt32 bit, Boolean wantSet, volatile UInt8 * startAddress) +static Boolean +OSTestAndSetClear(UInt32 bit, Boolean wantSet, volatile UInt8 * startAddress) { - UInt8 mask = 1; - UInt8 oldValue; - UInt8 wantValue; - + UInt8 mask = 1; + UInt8 oldValue; + UInt8 wantValue; + startAddress += (bit / 8); mask <<= (7 - (bit % 8)); wantValue = wantSet ? mask : 0; - + do { oldValue = *startAddress; if ((oldValue & mask) == wantValue) { break; } - } while (! __c11_atomic_compare_exchange_strong((_Atomic UInt8 *)startAddress, - &oldValue, (oldValue & ~mask) | wantValue, memory_order_relaxed, memory_order_relaxed)); - + } while (!__c11_atomic_compare_exchange_strong((_Atomic UInt8 *)startAddress, + &oldValue, (oldValue & ~mask) | wantValue, memory_order_relaxed, memory_order_relaxed)); + return (oldValue & mask) == wantValue; } -Boolean OSTestAndSet(UInt32 bit, volatile UInt8 * startAddress) +Boolean +OSTestAndSet(UInt32 bit, volatile UInt8 * startAddress) { return OSTestAndSetClear(bit, true, startAddress); } -Boolean OSTestAndClear(UInt32 bit, volatile UInt8 * startAddress) +Boolean +OSTestAndClear(UInt32 bit, volatile UInt8 * startAddress) { return OSTestAndSetClear(bit, false, startAddress); } @@ -208,56 +225,65 @@ Boolean OSTestAndClear(UInt32 bit, volatile UInt8 * startAddress) * silly unaligned versions */ -SInt8 OSIncrementAtomic8(volatile SInt8 * value) +SInt8 +OSIncrementAtomic8(volatile SInt8 * value) { return OSAddAtomic8(1, value); } -SInt8 OSDecrementAtomic8(volatile SInt8 * value) +SInt8 +OSDecrementAtomic8(volatile SInt8 * value) { return OSAddAtomic8(-1, value); } -UInt8 OSBitAndAtomic8(UInt32 mask, volatile UInt8 * value) +UInt8 +OSBitAndAtomic8(UInt32 mask, volatile UInt8 * value) { return __c11_atomic_fetch_and((_Atomic UInt8 *)value, mask, memory_order_relaxed); } -UInt8 OSBitOrAtomic8(UInt32 mask, volatile UInt8 * value) +UInt8 +OSBitOrAtomic8(UInt32 mask, volatile UInt8 * value) { return __c11_atomic_fetch_or((_Atomic UInt8 *)value, mask, memory_order_relaxed); } -UInt8 OSBitXorAtomic8(UInt32 mask, volatile UInt8 * value) +UInt8 +OSBitXorAtomic8(UInt32 mask, volatile UInt8 * value) { return __c11_atomic_fetch_xor((_Atomic UInt8 *)value, mask, memory_order_relaxed); } -SInt16 OSIncrementAtomic16(volatile SInt16 * value) +SInt16 +OSIncrementAtomic16(volatile SInt16 * value) { return OSAddAtomic16(1, value); } -SInt16 OSDecrementAtomic16(volatile SInt16 * value) +SInt16 +OSDecrementAtomic16(volatile SInt16 * value) { return OSAddAtomic16(-1, value); } -UInt16 OSBitAndAtomic16(UInt32 mask, volatile UInt16 * value) +UInt16 +OSBitAndAtomic16(UInt32 mask, volatile UInt16 * value) { return __c11_atomic_fetch_and((_Atomic UInt16 *)value, mask, memory_order_relaxed); } -UInt16 OSBitOrAtomic16(UInt32 mask, volatile UInt16 * value) +UInt16 +OSBitOrAtomic16(UInt32 mask, volatile UInt16 * value) { return __c11_atomic_fetch_or((_Atomic UInt16 *)value, mask, memory_order_relaxed); } -UInt16 OSBitXorAtomic16(UInt32 mask, volatile UInt16 * value) +UInt16 +OSBitXorAtomic16(UInt32 mask, volatile UInt16 * value) { return __c11_atomic_fetch_xor((_Atomic UInt16 *)value, mask, memory_order_relaxed); } // 19831745 - end of big hammer! #pragma clang diagnostic pop - diff --git a/libkern/gen/OSDebug.cpp b/libkern/gen/OSDebug.cpp index cbcdd5728..b806dce3c 100644 --- a/libkern/gen/OSDebug.cpp +++ b/libkern/gen/OSDebug.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2005-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,7 +37,7 @@ #include #include -#include // From bsd's libkern directory +#include // From bsd's libkern directory #include #include @@ -57,7 +57,7 @@ extern boolean_t doprnt_hide_pointers; extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide); extern addr64_t kvtophys(vm_offset_t va); -#if __arm__ +#if __arm__ extern int copyinframe(vm_address_t fp, char *frame); #elif defined(__arm64__) extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); @@ -69,63 +69,66 @@ extern lck_grp_t *IOLockGroup; static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); -/* Use kernel_debug() to log a backtrace */ +/* Use kernel_debug() to log a backtrace */ void -trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) { +trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) +{ void *bt[16]; const unsigned cnt = sizeof(bt) / sizeof(bt[0]); - unsigned i; + unsigned i; int found = 0; - OSBacktrace(bt, cnt); - + OSBacktrace(bt, cnt); + /* find first non-kernel frame */ - for (i = 3; i < cnt && bt[i]; i++) { - if (bt[i] > (void*)&etext) { + for (i = 3; i < cnt && bt[i]; i++) { + if (bt[i] > (void*)&etext) { found = 1; - break; + break; } } - /* + /* * if there are non-kernel frames, only log these * otherwise, log everything but the first two */ - if (!found) i=2; + if (!found) { + i = 2; + } #define safe_bt(a) (uintptr_t)(a VM_MIN_KERNEL_AND_KEXT_ADDRESS) && - (raddr < VM_MAX_KERNEL_ADDRESS)); + return (raddr > VM_MIN_KERNEL_AND_KEXT_ADDRESS) && + (raddr < VM_MAX_KERNEL_ADDRESS); } static unsigned int x86_64_validate_stackptr(vm_offset_t stackptr) { /* Existence and alignment check */ - if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) + if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) { return 0; - + } + /* Is a virtual->physical translation present? */ - if (!kvtophys(stackptr)) + if (!kvtophys(stackptr)) { return 0; - + } + /* Check if the return address lies on the same page; * If not, verify that a translation exists. */ if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) && - !kvtophys(stackptr + x86_64_RETURN_OFFSET)) + !kvtophys(stackptr + x86_64_RETURN_OFFSET)) { return 0; + } return 1; } #endif @@ -169,98 +175,107 @@ OSPrintBacktrace(void) void * btbuf[20]; int tmp = OSBacktrace(btbuf, 20); int i; - for(i=0;i> 1) - vm_offset_t stackptr, stackptr_prev, raddr; - unsigned frame_index = 0; + vm_offset_t stackptr, stackptr_prev, raddr; + unsigned frame_index = 0; /* Obtain current frame pointer */ - __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr)); - if (!x86_64_validate_stackptr(stackptr)) - goto pad; + if (!x86_64_validate_stackptr(stackptr)) { + goto pad; + } - raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); + raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); - if (!x86_64_validate_raddr(raddr)) - goto pad; + if (!x86_64_validate_raddr(raddr)) { + goto pad; + } - bt[frame_index++] = (void *) raddr; + bt[frame_index++] = (void *) raddr; - for ( ; frame_index < maxAddrs; frame_index++) { - stackptr_prev = stackptr; - stackptr = *((vm_offset_t *) stackptr_prev); + for (; frame_index < maxAddrs; frame_index++) { + stackptr_prev = stackptr; + stackptr = *((vm_offset_t *) stackptr_prev); - if (!x86_64_validate_stackptr(stackptr)) - break; - /* Stack grows downwards */ - if (stackptr < stackptr_prev) - break; + if (!x86_64_validate_stackptr(stackptr)) { + break; + } + /* Stack grows downwards */ + if (stackptr < stackptr_prev) { + break; + } - if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) - break; + if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) { + break; + } - raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); + raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); - if (!x86_64_validate_raddr(raddr)) - break; + if (!x86_64_validate_raddr(raddr)) { + break; + } - bt[frame_index] = (void *) raddr; - } + bt[frame_index] = (void *) raddr; + } pad: - frame = frame_index; + frame = frame_index; - for ( ; frame_index < maxAddrs; frame_index++) - bt[frame_index] = (void *) 0; + for (; frame_index < maxAddrs; frame_index++) { + bt[frame_index] = (void *) 0; + } #elif __arm__ || __arm64__ - uint32_t i = 0; - uintptr_t frameb[2]; - uintptr_t fp = 0; - - // get the current frame pointer for this thread + uint32_t i = 0; + uintptr_t frameb[2]; + uintptr_t fp = 0; + + // get the current frame pointer for this thread #if defined(__arm__) #define OSBacktraceFrameAlignOK(x) (((x) & 0x3) == 0) - __asm__ volatile("mov %0,r7" : "=r" (fp)); + __asm__ volatile ("mov %0,r7" : "=r" (fp)); #elif defined(__arm64__) #define OSBacktraceFrameAlignOK(x) (((x) & 0xf) == 0) - __asm__ volatile("mov %0, fp" : "=r" (fp)); + __asm__ volatile ("mov %0, fp" : "=r" (fp)); #else #error Unknown architecture. #endif - - // now crawl up the stack recording the link value of each frame - do { - // check bounds - if ((fp == 0) || (!OSBacktraceFrameAlignOK(fp)) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { - break; - } - // safely read frame + + // now crawl up the stack recording the link value of each frame + do { + // check bounds + if ((fp == 0) || (!OSBacktraceFrameAlignOK(fp)) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { + break; + } + // safely read frame #ifdef __arm64__ - if (copyinframe(fp, (char*)frameb, TRUE) != 0) { + if (copyinframe(fp, (char*)frameb, TRUE) != 0) { #else - if (copyinframe(fp, (char*)frameb) != 0) { + if (copyinframe(fp, (char*)frameb) != 0) { #endif - break; - } - - // No need to use copyin as this is always a kernel address, see check above - bt[i] = (void*)frameb[1]; // link register - fp = frameb[0]; - } while (++i < maxAddrs); - frame= i; + break; + } + + // No need to use copyin as this is always a kernel address, see check above + bt[i] = (void*)frameb[1]; // link register + fp = frameb[0]; + } while (++i < maxAddrs); + frame = i; #else #error arch #endif - return frame; + return frame; } diff --git a/libkern/kernel_mach_header.c b/libkern/kernel_mach_header.c index 579402474..cdb773e05 100644 --- a/libkern/kernel_mach_header.c +++ b/libkern/kernel_mach_header.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,14 +34,14 @@ * time; it's primary use is by kld, and all externally * referenced routines at the present time operate against * the kernel mach header _mh_execute_header, which is the - * header for the currently executing kernel. + * header for the currently executing kernel. * */ #include #include #include -#include // from libsa +#include // from libsa /* * return the last address (first avail) @@ -51,17 +51,18 @@ vm_offset_t getlastaddr(void) { - kernel_segment_command_t *sgp; - vm_offset_t last_addr = 0; + kernel_segment_command_t *sgp; + vm_offset_t last_addr = 0; kernel_mach_header_t *header = &_mh_execute_header; unsigned long i; sgp = (kernel_segment_command_t *) - ((uintptr_t)header + sizeof(kernel_mach_header_t)); - for (i = 0; i < header->ncmds; i++){ + ((uintptr_t)header + sizeof(kernel_mach_header_t)); + for (i = 0; i < header->ncmds; i++) { if (sgp->cmd == LC_SEGMENT_KERNEL) { - if (sgp->vmaddr + sgp->vmsize > last_addr) + if (sgp->vmaddr + sgp->vmsize > last_addr) { last_addr = sgp->vmaddr + sgp->vmsize; + } } sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); } @@ -73,13 +74,14 @@ getlastaddr(void) * the command. If there is no such load command, NULL is returned. */ void * -getcommandfromheader(kernel_mach_header_t *mhp, uint32_t cmd) { +getcommandfromheader(kernel_mach_header_t *mhp, uint32_t cmd) +{ struct load_command *lcp; unsigned long i; lcp = (struct load_command *) (mhp + 1); - for(i = 0; i < mhp->ncmds; i++){ - if(lcp->cmd == cmd) { + for (i = 0; i < mhp->ncmds; i++) { + if (lcp->cmd == cmd) { return (void *)lcp; } @@ -97,17 +99,17 @@ getcommandfromheader(kernel_mach_header_t *mhp, uint32_t cmd) { void * getuuidfromheader(kernel_mach_header_t *mhp, unsigned long *size) { - struct uuid_command *cmd = (struct uuid_command *) - getcommandfromheader(mhp, LC_UUID); + struct uuid_command *cmd = (struct uuid_command *) + getcommandfromheader(mhp, LC_UUID); - if (cmd != NULL) { - if (size) { - *size = sizeof(cmd->uuid); - } - return cmd->uuid; - } + if (cmd != NULL) { + if (size) { + *size = sizeof(cmd->uuid); + } + return cmd->uuid; + } - return NULL; + return NULL; } /* @@ -120,21 +122,21 @@ getuuidfromheader(kernel_mach_header_t *mhp, unsigned long *size) */ void * getsectdatafromheader( - kernel_mach_header_t *mhp, - const char *segname, - const char *sectname, - unsigned long *size) -{ + kernel_mach_header_t *mhp, + const char *segname, + const char *sectname, + unsigned long *size) +{ const kernel_section_t *sp; void *result; sp = getsectbynamefromheader(mhp, segname, sectname); - if(sp == (kernel_section_t *)0){ - *size = 0; - return((char *)0); + if (sp == (kernel_section_t *)0) { + *size = 0; + return (char *)0; } *size = sp->size; - result = (void *)sp->addr; + result = (void *)sp->addr; return result; } @@ -147,15 +149,15 @@ getsectdatafromheader( */ uint32_t getsectoffsetfromheader( - kernel_mach_header_t *mhp, - const char *segname, - const char *sectname) + kernel_mach_header_t *mhp, + const char *segname, + const char *sectname) { const kernel_section_t *sp; sp = getsectbynamefromheader(mhp, segname, sectname); - if(sp == (kernel_section_t *)0){ - return(0); + if (sp == (kernel_section_t *)0) { + return 0; } return sp->offset; @@ -169,7 +171,7 @@ getsectoffsetfromheader( */ void * getsegdatafromheader( - kernel_mach_header_t *mhp, + kernel_mach_header_t *mhp, const char *segname, unsigned long *size) { @@ -177,9 +179,9 @@ getsegdatafromheader( void *result; sc = getsegbynamefromheader(mhp, segname); - if(sc == (kernel_segment_command_t *)0){ - *size = 0; - return((char *)0); + if (sc == (kernel_segment_command_t *)0) { + *size = 0; + return (char *)0; } *size = sc->vmsize; result = (void *)sc->vmaddr; @@ -195,35 +197,37 @@ getsegdatafromheader( */ kernel_section_t * getsectbynamefromheader( - kernel_mach_header_t *mhp, - const char *segname, - const char *sectname) + kernel_mach_header_t *mhp, + const char *segname, + const char *sectname) { kernel_segment_command_t *sgp; kernel_section_t *sp; unsigned long i, j; sgp = (kernel_segment_command_t *) - ((uintptr_t)mhp + sizeof(kernel_mach_header_t)); - for(i = 0; i < mhp->ncmds; i++){ - if(sgp->cmd == LC_SEGMENT_KERNEL) - if(strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0 || - mhp->filetype == MH_OBJECT){ - sp = (kernel_section_t *)((uintptr_t)sgp + - sizeof(kernel_segment_command_t)); - for(j = 0; j < sgp->nsects; j++){ - if(strncmp(sp->sectname, sectname, - sizeof(sp->sectname)) == 0 && - strncmp(sp->segname, segname, - sizeof(sp->segname)) == 0) - return(sp); - sp = (kernel_section_t *)((uintptr_t)sp + - sizeof(kernel_section_t)); - } + ((uintptr_t)mhp + sizeof(kernel_mach_header_t)); + for (i = 0; i < mhp->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT_KERNEL) { + if (strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0 || + mhp->filetype == MH_OBJECT) { + sp = (kernel_section_t *)((uintptr_t)sgp + + sizeof(kernel_segment_command_t)); + for (j = 0; j < sgp->nsects; j++) { + if (strncmp(sp->sectname, sectname, + sizeof(sp->sectname)) == 0 && + strncmp(sp->segname, segname, + sizeof(sp->segname)) == 0) { + return sp; + } + sp = (kernel_section_t *)((uintptr_t)sp + + sizeof(kernel_section_t)); + } + } } - sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); + sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); } - return((kernel_section_t *)NULL); + return (kernel_section_t *)NULL; } /* @@ -231,18 +235,19 @@ getsectbynamefromheader( */ kernel_segment_command_t * getsegbynamefromheader( - kernel_mach_header_t *header, - const char *seg_name) + kernel_mach_header_t *header, + const char *seg_name) { kernel_segment_command_t *sgp; unsigned long i; sgp = (kernel_segment_command_t *) - ((uintptr_t)header + sizeof(kernel_mach_header_t)); - for (i = 0; i < header->ncmds; i++){ - if ( sgp->cmd == LC_SEGMENT_KERNEL - && !strncmp(sgp->segname, seg_name, sizeof(sgp->segname))) + ((uintptr_t)header + sizeof(kernel_mach_header_t)); + for (i = 0; i < header->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT_KERNEL + && !strncmp(sgp->segname, seg_name, sizeof(sgp->segname))) { return sgp; + } sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); } return (kernel_segment_command_t *)NULL; @@ -254,22 +259,23 @@ getsegbynamefromheader( kernel_segment_command_t * firstseg(void) { - return firstsegfromheader(&_mh_execute_header); + return firstsegfromheader(&_mh_execute_header); } kernel_segment_command_t * firstsegfromheader(kernel_mach_header_t *header) { - u_int i = 0; - kernel_segment_command_t *sgp = (kernel_segment_command_t *) - ((uintptr_t)header + sizeof(*header)); - - for (i = 0; i < header->ncmds; i++){ - if (sgp->cmd == LC_SEGMENT_KERNEL) - return sgp; - sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); - } - return (kernel_segment_command_t *)NULL; + u_int i = 0; + kernel_segment_command_t *sgp = (kernel_segment_command_t *) + ((uintptr_t)header + sizeof(*header)); + + for (i = 0; i < header->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT_KERNEL) { + return sgp; + } + sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); + } + return (kernel_segment_command_t *)NULL; } /* @@ -279,30 +285,32 @@ firstsegfromheader(kernel_mach_header_t *header) */ kernel_segment_command_t * nextsegfromheader( - kernel_mach_header_t *header, - kernel_segment_command_t *seg) + kernel_mach_header_t *header, + kernel_segment_command_t *seg) { - u_int i = 0; - kernel_segment_command_t *sgp = (kernel_segment_command_t *) - ((uintptr_t)header + sizeof(*header)); + u_int i = 0; + kernel_segment_command_t *sgp = (kernel_segment_command_t *) + ((uintptr_t)header + sizeof(*header)); - /* Find the index of the passed-in segment */ - for (i = 0; sgp != seg && i < header->ncmds; i++) { - sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); - } + /* Find the index of the passed-in segment */ + for (i = 0; sgp != seg && i < header->ncmds; i++) { + sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); + } - /* Increment to the next load command */ - i++; - sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); + /* Increment to the next load command */ + i++; + sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); - /* Return the next segment command, if any */ - for (; i < header->ncmds; i++) { - if (sgp->cmd == LC_SEGMENT_KERNEL) return sgp; + /* Return the next segment command, if any */ + for (; i < header->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT_KERNEL) { + return sgp; + } - sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); - } + sgp = (kernel_segment_command_t *)((uintptr_t)sgp + sgp->cmdsize); + } - return (kernel_segment_command_t *)NULL; + return (kernel_segment_command_t *)NULL; } @@ -313,7 +321,7 @@ nextsegfromheader( kernel_segment_command_t * getsegbyname(const char *seg_name) { - return(getsegbynamefromheader(&_mh_execute_header, seg_name)); + return getsegbynamefromheader(&_mh_execute_header, seg_name); } /* @@ -323,11 +331,11 @@ getsegbyname(const char *seg_name) */ kernel_section_t * getsectbyname( - const char *segname, - const char *sectname) + const char *segname, + const char *sectname) { - return(getsectbynamefromheader( - (kernel_mach_header_t *)&_mh_execute_header, segname, sectname)); + return getsectbynamefromheader( + (kernel_mach_header_t *)&_mh_execute_header, segname, sectname); } /* @@ -339,10 +347,11 @@ getsectbyname( kernel_section_t * firstsect(kernel_segment_command_t *sgp) { - if (!sgp || sgp->nsects == 0) + if (!sgp || sgp->nsects == 0) { return (kernel_section_t *)NULL; + } - return (kernel_section_t *)(sgp+1); + return (kernel_section_t *)(sgp + 1); } /* @@ -356,8 +365,9 @@ nextsect(kernel_segment_command_t *sgp, kernel_section_t *sp) { kernel_section_t *fsp = firstsect(sgp); - if (((uintptr_t)(sp - fsp) + 1) >= sgp->nsects) + if (((uintptr_t)(sp - fsp) + 1) >= sgp->nsects) { return (kernel_section_t *)NULL; + } - return sp+1; + return sp + 1; } diff --git a/libkern/kmod/c_start.c b/libkern/kmod/c_start.c index a859e223f..a694750e4 100644 --- a/libkern/kmod/c_start.c +++ b/libkern/kmod/c_start.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,25 +22,25 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. - - If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. - KernelModule.bproj/kmod.make - KernelModule.bproj/CreateKModInfo.perl - KernelModule.bproj/kmodc++/pure.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc/c_start.c - KernelModule.bproj/kmodc/c_stop.c - - The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. - - The linkline must look like this. - *.o -lkmodc++ kmod_info.o -lkmod + * Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + * + * If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + * KernelModule.bproj/kmod.make + * KernelModule.bproj/CreateKModInfo.perl + * KernelModule.bproj/kmodc++/pure.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc/c_start.c + * KernelModule.bproj/kmodc/c_stop.c + * + * The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + * + * The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod */ #include #include @@ -51,31 +51,36 @@ extern kmod_info_t KMOD_INFO_NAME; /********************************************************************* *********************************************************************/ -__private_extern__ kern_return_t _start(kmod_info_t *ki, void *data) +__private_extern__ kern_return_t +_start(kmod_info_t *ki, void *data) { - if (_realmain) - return (*_realmain)(ki, data); - else - return KERN_SUCCESS; + if (_realmain) { + return (*_realmain)(ki, data); + } else { + return KERN_SUCCESS; + } } /********************************************************************* *********************************************************************/ -__private_extern__ const char * OSKextGetCurrentIdentifier(void) +__private_extern__ const char * +OSKextGetCurrentIdentifier(void) { - return KMOD_INFO_NAME.name; + return KMOD_INFO_NAME.name; } /********************************************************************* *********************************************************************/ -__private_extern__ const char * OSKextGetCurrentVersionString(void) +__private_extern__ const char * +OSKextGetCurrentVersionString(void) { - return KMOD_INFO_NAME.version; + return KMOD_INFO_NAME.version; } /********************************************************************* *********************************************************************/ -__private_extern__ OSKextLoadTag OSKextGetCurrentLoadTag(void) +__private_extern__ OSKextLoadTag +OSKextGetCurrentLoadTag(void) { - return (OSKextLoadTag)KMOD_INFO_NAME.id; + return (OSKextLoadTag)KMOD_INFO_NAME.id; } diff --git a/libkern/kmod/c_stop.c b/libkern/kmod/c_stop.c index d71816041..e78fc71a0 100644 --- a/libkern/kmod/c_stop.c +++ b/libkern/kmod/c_stop.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,37 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. - - If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. - KernelModule.bproj/kmod.make - KernelModule.bproj/CreateKModInfo.perl - KernelModule.bproj/kmodc++/pure.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc/c_start.c - KernelModule.bproj/kmodc/c_stop.c - - The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. - - The linkline must look like this. - *.o -lkmodc++ kmod_info.o -lkmod + * Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + * + * If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + * KernelModule.bproj/kmod.make + * KernelModule.bproj/CreateKModInfo.perl + * KernelModule.bproj/kmodc++/pure.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc/c_start.c + * KernelModule.bproj/kmodc/c_stop.c + * + * The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + * + * The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod */ #include // These global symbols will be defined by CreateInfo script's info.c file. extern kmod_stop_func_t *_antimain; -__private_extern__ kern_return_t _stop(kmod_info_t *ki, void *data) +__private_extern__ kern_return_t +_stop(kmod_info_t *ki, void *data) { - if (_antimain) - return (*_antimain)(ki, data); - else - return KERN_SUCCESS; + if (_antimain) { + return (*_antimain)(ki, data); + } else { + return KERN_SUCCESS; + } } diff --git a/libkern/kmod/cplus_start.c b/libkern/kmod/cplus_start.c index f0b5f0f64..9f23e486c 100644 --- a/libkern/kmod/cplus_start.c +++ b/libkern/kmod/cplus_start.c @@ -2,7 +2,7 @@ * Copyright (c) 2000,2008-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,23 +22,23 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. - - If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. - KernelModule.bproj/kmod.make - KernelModule.bproj/CreateKModInfo.perl - KernelModule.bproj/kmodc++/pure.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc/c_start.c - KernelModule.bproj/kmodc/c_stop.c - - The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. - - The linkline must look like this. - *.o -lkmodc++ kmod_info.o -lkmod + * Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + * + * If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + * KernelModule.bproj/kmod.make + * KernelModule.bproj/CreateKModInfo.perl + * KernelModule.bproj/kmodc++/pure.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc/c_start.c + * KernelModule.bproj/kmodc/c_stop.c + * + * The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + * + * The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod */ diff --git a/libkern/kmod/cplus_stop.c b/libkern/kmod/cplus_stop.c index f0b5f0f64..9f23e486c 100644 --- a/libkern/kmod/cplus_stop.c +++ b/libkern/kmod/cplus_stop.c @@ -2,7 +2,7 @@ * Copyright (c) 2000,2008-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,23 +22,23 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. - - If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. - KernelModule.bproj/kmod.make - KernelModule.bproj/CreateKModInfo.perl - KernelModule.bproj/kmodc++/pure.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc++/cplus_start.c - KernelModule.bproj/kmodc/c_start.c - KernelModule.bproj/kmodc/c_stop.c - - The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. - - The linkline must look like this. - *.o -lkmodc++ kmod_info.o -lkmod + * Subtle combination of files and libraries make up the C++ runtime system for kernel modules. We are dependant on the KernelModule kmod.make and CreateKModInfo.perl scripts to be exactly instep with both this library module and the libkmod module as well. + * + * If you do any maintenance on any of the following files make sure great care is taken to keep them in Sync. + * KernelModule.bproj/kmod.make + * KernelModule.bproj/CreateKModInfo.perl + * KernelModule.bproj/kmodc++/pure.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc++/cplus_start.c + * KernelModule.bproj/kmodc/c_start.c + * KernelModule.bproj/kmodc/c_stop.c + * + * The trick is that the linkline links all of the developers modules. If any static constructors are used .constructors_used will be left as an undefined symbol. This symbol is exported by the cplus_start.c routine which automatically brings in the appropriate C++ _start routine. However the actual _start symbol is only required by the kmod_info structure that is created and initialized by the CreateKModInfo.perl script. If no C++ was used the _start will be an undefined symbol that is finally satisfied by the c_start module in the kmod library. + * + * The linkline must look like this. + *.o -lkmodc++ kmod_info.o -lkmod */ diff --git a/libkern/kmod/libkmodtest/libkmodtest.cpp b/libkern/kmod/libkmodtest/libkmodtest.cpp index 6886cd35f..73c9e96f0 100644 --- a/libkern/kmod/libkmodtest/libkmodtest.cpp +++ b/libkern/kmod/libkmodtest/libkmodtest.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,8 +33,8 @@ OSDefineMetaClassAndStructors(testlibkmod, super); IOService * testlibkmod::probe( - IOService *provider, - SInt32 *score ) + IOService *provider, + SInt32 *score ) { IOLog("%s\n", __PRETTY_FUNCTION__); return NULL; diff --git a/libkern/kmod/libkmodtest/libkmodtest.h b/libkern/kmod/libkmodtest/libkmodtest.h index 3ce25ed12..bfe0e2e27 100644 --- a/libkern/kmod/libkmodtest/libkmodtest.h +++ b/libkern/kmod/libkmodtest/libkmodtest.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,9 +31,8 @@ class testlibkmod : public IOService { OSDeclareDefaultStructors(testlibkmod); - + virtual IOService * probe( - IOService *provider, - SInt32 *score ); - + IOService *provider, + SInt32 *score ); }; diff --git a/libkern/kxld/WKdmCompress.c b/libkern/kxld/WKdmCompress.c index 64a02a58a..6a957427f 100644 --- a/libkern/kxld/WKdmCompress.c +++ b/libkern/kxld/WKdmCompress.c @@ -7,30 +7,29 @@ /* WK_pack_2bits() * Pack some multiple of four words holding two-bit tags (in the low * two bits of each byte) into an integral number of words, i.e., - * one fourth as many. + * one fourth as many. * NOTE: Pad the input out with zeroes to a multiple of four words! */ static WK_word* WK_pack_2bits(WK_word* source_buf, - WK_word* source_end, - WK_word* dest_buf) { - - WK_word* src_next = source_buf; - WK_word* dest_next = dest_buf; - - while (src_next < source_end) { - WK_word temp = src_next[0]; - temp |= (src_next[1] << 2); - temp |= (src_next[2] << 4); - temp |= (src_next[3] << 6); - - dest_next[0] = temp; - dest_next++; - src_next += 4; - } - - return dest_next; + WK_word* source_end, + WK_word* dest_buf) +{ + WK_word* src_next = source_buf; + WK_word* dest_next = dest_buf; + + while (src_next < source_end) { + WK_word temp = src_next[0]; + temp |= (src_next[1] << 2); + temp |= (src_next[2] << 4); + temp |= (src_next[3] << 6); + + dest_next[0] = temp; + dest_next++; + src_next += 4; + } + return dest_next; } /* WK_pack_4bits() @@ -41,23 +40,23 @@ WK_pack_2bits(WK_word* source_buf, static WK_word* WK_pack_4bits(WK_word* source_buf, - WK_word* source_end, - WK_word* dest_buf) { - WK_word* src_next = source_buf; - WK_word* dest_next = dest_buf; - - /* this loop should probably be unrolled */ - while (src_next < source_end) { - WK_word temp = src_next[0]; - temp |= (src_next[1] << 4); - - dest_next[0] = temp; - dest_next++; - src_next += 2; - } - - return dest_next; + WK_word* source_end, + WK_word* dest_buf) +{ + WK_word* src_next = source_buf; + WK_word* dest_next = dest_buf; + + /* this loop should probably be unrolled */ + while (src_next < source_end) { + WK_word temp = src_next[0]; + temp |= (src_next[1] << 4); + dest_next[0] = temp; + dest_next++; + src_next += 2; + } + + return dest_next; } /* pack_3_tenbits() @@ -66,25 +65,24 @@ WK_pack_4bits(WK_word* source_buf, */ static WK_word* WK_pack_3_tenbits(WK_word* source_buf, - WK_word* source_end, - WK_word* dest_buf) { - - WK_word* src_next = source_buf; - WK_word* dest_next = dest_buf; - - /* this loop should probably be unrolled */ - while (src_next < source_end) { - WK_word temp = src_next[0]; - temp |= (src_next[1] << 10); - temp |= (src_next[2] << 20); - - dest_next[0] = temp; - dest_next++; - src_next += 3; - } - - return dest_next; + WK_word* source_end, + WK_word* dest_buf) +{ + WK_word* src_next = source_buf; + WK_word* dest_next = dest_buf; + + /* this loop should probably be unrolled */ + while (src_next < source_end) { + WK_word temp = src_next[0]; + temp |= (src_next[1] << 10); + temp |= (src_next[2] << 20); + dest_next[0] = temp; + dest_next++; + src_next += 3; + } + + return dest_next; } /*************************************************************************** @@ -92,237 +90,226 @@ WK_pack_3_tenbits(WK_word* source_buf, */ unsigned int -WKdm_compress (WK_word* src_buf, - WK_word* dest_buf, - unsigned int num_input_words) +WKdm_compress(WK_word* src_buf, + WK_word* dest_buf, + unsigned int num_input_words) { - DictionaryElement dictionary[DICTIONARY_SIZE]; + DictionaryElement dictionary[DICTIONARY_SIZE]; - /* arrays that hold output data in intermediate form during modeling */ - /* and whose contents are packed into the actual output after modeling */ + /* arrays that hold output data in intermediate form during modeling */ + /* and whose contents are packed into the actual output after modeling */ - /* sizes of these arrays should be increased if you want to compress - * pages larger than 4KB - */ - WK_word tempTagsArray[300]; /* tags for everything */ - WK_word tempQPosArray[300]; /* queue positions for matches */ - WK_word tempLowBitsArray[1200]; /* low bits for partial matches */ + /* sizes of these arrays should be increased if you want to compress + * pages larger than 4KB + */ + WK_word tempTagsArray[300]; /* tags for everything */ + WK_word tempQPosArray[300]; /* queue positions for matches */ + WK_word tempLowBitsArray[1200]; /* low bits for partial matches */ - /* boundary_tmp will be used for keeping track of what's where in - * the compressed page during packing - */ - WK_word* boundary_tmp; + /* boundary_tmp will be used for keeping track of what's where in + * the compressed page during packing + */ + WK_word* boundary_tmp; - /* Fill pointers for filling intermediate arrays (of queue positions - * and low bits) during encoding. - * Full words go straight to the destination buffer area reserved - * for them. (Right after where the tags go.) - */ - WK_word* next_full_patt; - char* next_tag = (char *) tempTagsArray; - char* next_qp = (char *) tempQPosArray; - WK_word* next_low_bits = tempLowBitsArray; + /* Fill pointers for filling intermediate arrays (of queue positions + * and low bits) during encoding. + * Full words go straight to the destination buffer area reserved + * for them. (Right after where the tags go.) + */ + WK_word* next_full_patt; + char* next_tag = (char *) tempTagsArray; + char* next_qp = (char *) tempQPosArray; + WK_word* next_low_bits = tempLowBitsArray; - WK_word* next_input_word = src_buf; - WK_word* end_of_input = src_buf + num_input_words; + WK_word* next_input_word = src_buf; + WK_word* end_of_input = src_buf + num_input_words; - PRELOAD_DICTIONARY; + PRELOAD_DICTIONARY; - next_full_patt = dest_buf + TAGS_AREA_OFFSET + (num_input_words / 16); + next_full_patt = dest_buf + TAGS_AREA_OFFSET + (num_input_words / 16); #ifdef WK_DEBUG - printf("\nIn WKdm_compress\n"); - printf("About to actually compress, src_buf is %u\n", src_buf); - printf("dictionary is at %u\n", dictionary); - printf("dest_buf is %u next_full_patt is %u\n", dest_buf, next_full_patt); - fflush(stdout); + printf("\nIn WKdm_compress\n"); + printf("About to actually compress, src_buf is %u\n", src_buf); + printf("dictionary is at %u\n", dictionary); + printf("dest_buf is %u next_full_patt is %u\n", dest_buf, next_full_patt); + fflush(stdout); #endif - while (next_input_word < end_of_input) - { - WK_word *dict_location; - WK_word dict_word; - WK_word input_word = *next_input_word; - - /* compute hash value, which is a byte offset into the dictionary, - * and add it to the base address of the dictionary. Cast back and - * forth to/from char * so no shifts are needed - */ - dict_location = - (WK_word *) - ((void*) (((char*) dictionary) + HASH_TO_DICT_BYTE_OFFSET(input_word))); - - dict_word = *dict_location; - - if (input_word == dict_word) - { - RECORD_EXACT(dict_location - dictionary); - } - else if (input_word == 0) { - RECORD_ZERO; - } - else - { - WK_word input_high_bits = HIGH_BITS(input_word); - if (input_high_bits == HIGH_BITS(dict_word)) { - RECORD_PARTIAL(dict_location - dictionary, LOW_BITS(input_word)); - *dict_location = input_word; - } - else { - RECORD_MISS(input_word); - *dict_location = input_word; - } - } - next_input_word++; - } + while (next_input_word < end_of_input) { + WK_word *dict_location; + WK_word dict_word; + WK_word input_word = *next_input_word; + + /* compute hash value, which is a byte offset into the dictionary, + * and add it to the base address of the dictionary. Cast back and + * forth to/from char * so no shifts are needed + */ + dict_location = + (WK_word *) + ((void*) (((char*) dictionary) + HASH_TO_DICT_BYTE_OFFSET(input_word))); + + dict_word = *dict_location; + + if (input_word == dict_word) { + RECORD_EXACT(dict_location - dictionary); + } else if (input_word == 0) { + RECORD_ZERO; + } else { + WK_word input_high_bits = HIGH_BITS(input_word); + if (input_high_bits == HIGH_BITS(dict_word)) { + RECORD_PARTIAL(dict_location - dictionary, LOW_BITS(input_word)); + *dict_location = input_word; + } else { + RECORD_MISS(input_word); + *dict_location = input_word; + } + } + next_input_word++; + } #ifdef WK_DEBUG - printf("AFTER MODELING in WKdm_compress()\n"); fflush(stdout); - printf("tempTagsArray holds %u bytes\n", - next_tag - (char *) tempTagsArray); - printf("tempQPosArray holds %u bytes\n", - next_qp - (char *) tempQPosArray); - printf("tempLowBitsArray holds %u bytes\n", - (char *) next_low_bits - (char *) tempLowBitsArray); - - printf("next_full_patt is %p\n", - next_full_patt); - - printf(" i.e., there are %u full patterns\n", - next_full_patt - (dest_buf + TAGS_AREA_OFFSET + (num_input_words / 16))); - fflush(stdout); - - { int i; - WK_word *arr =(dest_buf + TAGS_AREA_OFFSET + (num_input_words / 16)); - - printf(" first 20 full patterns are: \n"); - for (i = 0; i < 20; i++) { - printf(" %d", arr[i]); - } - printf("\n"); - } + printf("AFTER MODELING in WKdm_compress()\n"); fflush(stdout); + printf("tempTagsArray holds %u bytes\n", + next_tag - (char *) tempTagsArray); + printf("tempQPosArray holds %u bytes\n", + next_qp - (char *) tempQPosArray); + printf("tempLowBitsArray holds %u bytes\n", + (char *) next_low_bits - (char *) tempLowBitsArray); + + printf("next_full_patt is %p\n", + next_full_patt); + + printf(" i.e., there are %u full patterns\n", + next_full_patt - (dest_buf + TAGS_AREA_OFFSET + (num_input_words / 16))); + fflush(stdout); + + { int i; + WK_word *arr = (dest_buf + TAGS_AREA_OFFSET + (num_input_words / 16)); + + printf(" first 20 full patterns are: \n"); + for (i = 0; i < 20; i++) { + printf(" %d", arr[i]); + } + printf("\n");} #endif - /* Record (into the header) where we stopped writing full words, - * which is where we will pack the queue positions. (Recall - * that we wrote the full words directly into the dest buffer - * during modeling. - */ + /* Record (into the header) where we stopped writing full words, + * which is where we will pack the queue positions. (Recall + * that we wrote the full words directly into the dest buffer + * during modeling. + */ - SET_QPOS_AREA_START(dest_buf,next_full_patt); + SET_QPOS_AREA_START(dest_buf, next_full_patt); - /* Pack the tags into the tags area, between the page header - * and the full words area. We don't pad for the packer - * because we assume that the page size is a multiple of 16. - */ + /* Pack the tags into the tags area, between the page header + * and the full words area. We don't pad for the packer + * because we assume that the page size is a multiple of 16. + */ #ifdef WK_DEBUG - printf("about to pack %u bytes holding tags\n", - next_tag - (char *) tempTagsArray); - - { int i; - char* arr = (char *) tempTagsArray; - - printf(" first 200 tags are: \n"); - for (i = 0; i < 200; i++) { - printf(" %d", arr[i]); - } - printf("\n"); - } -#endif + printf("about to pack %u bytes holding tags\n", + next_tag - (char *) tempTagsArray); - boundary_tmp = WK_pack_2bits(tempTagsArray, - (WK_word *) ((void *) next_tag), - dest_buf + HEADER_SIZE_IN_WORDS); + { int i; + char* arr = (char *) tempTagsArray; -#ifdef WK_DEBUG - printf("packing tags stopped at %u\n", boundary_tmp); + printf(" first 200 tags are: \n"); + for (i = 0; i < 200; i++) { + printf(" %d", arr[i]); + } + printf("\n");} #endif - - /* Pack the queue positions into the area just after - * the full words. We have to round up the source - * region to a multiple of two words. - */ - - { - unsigned int num_bytes_to_pack = (unsigned int)(next_qp - (char *) tempQPosArray); - unsigned int num_packed_words = (num_bytes_to_pack + 7) >> 3; // ceil((double) num_bytes_to_pack / 8); - unsigned int num_source_words = num_packed_words * 2; - WK_word* endQPosArray = tempQPosArray + num_source_words; - - /* Pad out the array with zeros to avoid corrupting real packed - values. */ - for (; /* next_qp is already set as desired */ - next_qp < (char*)endQPosArray; - next_qp++) { - *next_qp = 0; - } - -#ifdef WK_DEBUG - printf("about to pack %u (bytes holding) queue posns.\n", - num_bytes_to_pack); - printf("packing them from %u words into %u words\n", - num_source_words, num_packed_words); - printf("dest is range %u to %u\n", - next_full_patt, next_full_patt + num_packed_words); - { int i; - char *arr = (char *) tempQPosArray; - printf(" first 200 queue positions are: \n"); - for (i = 0; i < 200; i++) { - printf(" %d", arr[i]); - } - printf("\n"); - } -#endif - - boundary_tmp = WK_pack_4bits(tempQPosArray, - endQPosArray, - next_full_patt); -#ifdef WK_DEBUG - printf("Packing of queue positions stopped at %u\n", boundary_tmp); -#endif // WK_DEBUG - /* Record (into the header) where we stopped packing queue positions, - * which is where we will start packing low bits. - */ - SET_LOW_BITS_AREA_START(dest_buf,boundary_tmp); + boundary_tmp = WK_pack_2bits(tempTagsArray, + (WK_word *) ((void *) next_tag), + dest_buf + HEADER_SIZE_IN_WORDS); - } +#ifdef WK_DEBUG + printf("packing tags stopped at %u\n", boundary_tmp); +#endif - /* Pack the low bit patterns into the area just after - * the queue positions. We have to round up the source - * region to a multiple of three words. - */ + /* Pack the queue positions into the area just after + * the full words. We have to round up the source + * region to a multiple of two words. + */ + + { + unsigned int num_bytes_to_pack = (unsigned int)(next_qp - (char *) tempQPosArray); + unsigned int num_packed_words = (num_bytes_to_pack + 7) >> 3; // ceil((double) num_bytes_to_pack / 8); + unsigned int num_source_words = num_packed_words * 2; + WK_word* endQPosArray = tempQPosArray + num_source_words; + + /* Pad out the array with zeros to avoid corrupting real packed + * values. */ + for (; /* next_qp is already set as desired */ + next_qp < (char*)endQPosArray; + next_qp++) { + *next_qp = 0; + } - { - unsigned int num_tenbits_to_pack = - (unsigned int)(next_low_bits - tempLowBitsArray); - unsigned int num_packed_words = (num_tenbits_to_pack + 2) / 3; //ceil((double) num_tenbits_to_pack / 3); - unsigned int num_source_words = num_packed_words * 3; - WK_word* endLowBitsArray = tempLowBitsArray + num_source_words; +#ifdef WK_DEBUG + printf("about to pack %u (bytes holding) queue posns.\n", + num_bytes_to_pack); + printf("packing them from %u words into %u words\n", + num_source_words, num_packed_words); + printf("dest is range %u to %u\n", + next_full_patt, next_full_patt + num_packed_words); + { int i; + char *arr = (char *) tempQPosArray; + printf(" first 200 queue positions are: \n"); + for (i = 0; i < 200; i++) { + printf(" %d", arr[i]); + } + printf("\n");} +#endif - /* Pad out the array with zeros to avoid corrupting real packed - values. */ + boundary_tmp = WK_pack_4bits(tempQPosArray, + endQPosArray, + next_full_patt); +#ifdef WK_DEBUG + printf("Packing of queue positions stopped at %u\n", boundary_tmp); +#endif // WK_DEBUG - for (; /* next_low_bits is already set as desired */ - next_low_bits < endLowBitsArray; - next_low_bits++) { - *next_low_bits = 0; - } + /* Record (into the header) where we stopped packing queue positions, + * which is where we will start packing low bits. + */ + SET_LOW_BITS_AREA_START(dest_buf, boundary_tmp); + } + + /* Pack the low bit patterns into the area just after + * the queue positions. We have to round up the source + * region to a multiple of three words. + */ + + { + unsigned int num_tenbits_to_pack = + (unsigned int)(next_low_bits - tempLowBitsArray); + unsigned int num_packed_words = (num_tenbits_to_pack + 2) / 3; //ceil((double) num_tenbits_to_pack / 3); + unsigned int num_source_words = num_packed_words * 3; + WK_word* endLowBitsArray = tempLowBitsArray + num_source_words; + + /* Pad out the array with zeros to avoid corrupting real packed + * values. */ + + for (; /* next_low_bits is already set as desired */ + next_low_bits < endLowBitsArray; + next_low_bits++) { + *next_low_bits = 0; + } #ifdef WK_DEBUG - printf("about to pack low bits\n"); - printf("num_tenbits_to_pack is %u\n", num_tenbits_to_pack); - printf("endLowBitsArray is %u\n", endLowBitsArray); + printf("about to pack low bits\n"); + printf("num_tenbits_to_pack is %u\n", num_tenbits_to_pack); + printf("endLowBitsArray is %u\n", endLowBitsArray); #endif - - boundary_tmp = WK_pack_3_tenbits (tempLowBitsArray, - endLowBitsArray, - boundary_tmp); - SET_LOW_BITS_AREA_END(dest_buf,boundary_tmp); + boundary_tmp = WK_pack_3_tenbits(tempLowBitsArray, + endLowBitsArray, + boundary_tmp); - } + SET_LOW_BITS_AREA_END(dest_buf, boundary_tmp); + } - return (unsigned int)((char *) boundary_tmp - (char *) dest_buf); -} + return (unsigned int)((char *) boundary_tmp - (char *) dest_buf); +} diff --git a/libkern/kxld/WKdmDecompress.c b/libkern/kxld/WKdmDecompress.c index 6e27b62f3..9af3ab95a 100644 --- a/libkern/kxld/WKdmDecompress.c +++ b/libkern/kxld/WKdmDecompress.c @@ -5,7 +5,7 @@ * THE UNPACKING ROUTINES should GO HERE */ -const char hashLookupTable [] = HASH_LOOKUP_TABLE_CONTENTS; +const char hashLookupTable[] = HASH_LOOKUP_TABLE_CONTENTS; #if 0 #define GET_NEXT_TAG tags[tagsIndex++] @@ -21,31 +21,30 @@ const char hashLookupTable [] = HASH_LOOKUP_TABLE_CONTENTS; */ static WK_word* WK_unpack_2bits(WK_word *input_buf, - WK_word *input_end, - WK_word *output_buf) { - - WK_word *input_next = input_buf; - WK_word *output_next = output_buf; - WK_word packing_mask = TWO_BITS_PACKING_MASK; - - /* loop to repeatedly grab one input word and unpack it into - * 4 output words. This loop could be unrolled a little---it's - * designed to be easy to do that. - */ - while (input_next < input_end) { - WK_word temp = input_next[0]; - DEBUG_PRINT_2("Unpacked tags word: %.8x\n", temp); - output_next[0] = temp & packing_mask; - output_next[1] = (temp >> 2) & packing_mask; - output_next[2] = (temp >> 4) & packing_mask; - output_next[3] = (temp >> 6) & packing_mask; - - output_next += 4; - input_next++; - } - - return output_next; - + WK_word *input_end, + WK_word *output_buf) +{ + WK_word *input_next = input_buf; + WK_word *output_next = output_buf; + WK_word packing_mask = TWO_BITS_PACKING_MASK; + + /* loop to repeatedly grab one input word and unpack it into + * 4 output words. This loop could be unrolled a little---it's + * designed to be easy to do that. + */ + while (input_next < input_end) { + WK_word temp = input_next[0]; + DEBUG_PRINT_2("Unpacked tags word: %.8x\n", temp); + output_next[0] = temp & packing_mask; + output_next[1] = (temp >> 2) & packing_mask; + output_next[2] = (temp >> 4) & packing_mask; + output_next[3] = (temp >> 6) & packing_mask; + + output_next += 4; + input_next++; + } + + return output_next; } /* unpack four bits consumes any number of words (between input_buf @@ -56,30 +55,29 @@ WK_unpack_2bits(WK_word *input_buf, */ static WK_word* WK_unpack_4bits(WK_word *input_buf, - WK_word *input_end, - WK_word *output_buf) { - - WK_word *input_next = input_buf; - WK_word *output_next = output_buf; - WK_word packing_mask = FOUR_BITS_PACKING_MASK; - - - /* loop to repeatedly grab one input word and unpack it into - * 4 output words. This loop should probably be unrolled - * a little---it's designed to be easy to do that. - */ - while (input_next < input_end) { - WK_word temp = input_next[0]; - DEBUG_PRINT_2("Unpacked dictionary indices word: %.8x\n", temp); - output_next[0] = temp & packing_mask; - output_next[1] = (temp >> 4) & packing_mask; - - output_next += 2; - input_next++; - } - - return output_next; - + WK_word *input_end, + WK_word *output_buf) +{ + WK_word *input_next = input_buf; + WK_word *output_next = output_buf; + WK_word packing_mask = FOUR_BITS_PACKING_MASK; + + + /* loop to repeatedly grab one input word and unpack it into + * 4 output words. This loop should probably be unrolled + * a little---it's designed to be easy to do that. + */ + while (input_next < input_end) { + WK_word temp = input_next[0]; + DEBUG_PRINT_2("Unpacked dictionary indices word: %.8x\n", temp); + output_next[0] = temp & packing_mask; + output_next[1] = (temp >> 4) & packing_mask; + + output_next += 2; + input_next++; + } + + return output_next; } /* unpack_3_tenbits unpacks three 10-bit items from (the low 30 bits of) @@ -87,197 +85,190 @@ WK_unpack_4bits(WK_word *input_buf, */ static WK_word* WK_unpack_3_tenbits(WK_word *input_buf, - WK_word *input_end, - WK_word *output_buf) { - - WK_word *input_next = input_buf; - WK_word *output_next = output_buf; - WK_word packing_mask = LOW_BITS_MASK; - - /* loop to fetch words of input, splitting each into three - * words of output with 10 meaningful low bits. This loop - * probably ought to be unrolled and maybe coiled - */ - while (input_next < input_end) { - WK_word temp = input_next[0]; - - output_next[0] = temp & packing_mask; - output_next[1] = (temp >> 10) & packing_mask; - output_next[2] = temp >> 20; - - input_next++; - output_next += 3; - } - - return output_next; - + WK_word *input_end, + WK_word *output_buf) +{ + WK_word *input_next = input_buf; + WK_word *output_next = output_buf; + WK_word packing_mask = LOW_BITS_MASK; + + /* loop to fetch words of input, splitting each into three + * words of output with 10 meaningful low bits. This loop + * probably ought to be unrolled and maybe coiled + */ + while (input_next < input_end) { + WK_word temp = input_next[0]; + + output_next[0] = temp & packing_mask; + output_next[1] = (temp >> 10) & packing_mask; + output_next[2] = temp >> 20; + + input_next++; + output_next += 3; + } + + return output_next; } /********************************************************************* - * WKdm_decompress --- THE DECOMPRESSOR + * WKdm_decompress --- THE DECOMPRESSOR * Expects WORD pointers to the source and destination buffers - * and a page size in words. The page size had better be 1024 unless - * somebody finds the places that are dependent on the page size and + * and a page size in words. The page size had better be 1024 unless + * somebody finds the places that are dependent on the page size and * fixes them */ void -WKdm_decompress (WK_word* src_buf, - WK_word* dest_buf, - __unused unsigned int words) +WKdm_decompress(WK_word* src_buf, + WK_word* dest_buf, + __unused unsigned int words) { + DictionaryElement dictionary[DICTIONARY_SIZE]; - DictionaryElement dictionary[DICTIONARY_SIZE]; + /* arrays that hold output data in intermediate form during modeling */ + /* and whose contents are packed into the actual output after modeling */ - /* arrays that hold output data in intermediate form during modeling */ - /* and whose contents are packed into the actual output after modeling */ + /* sizes of these arrays should be increased if you want to compress + * pages larger than 4KB + */ + WK_word tempTagsArray[300]; /* tags for everything */ + WK_word tempQPosArray[300]; /* queue positions for matches */ + WK_word tempLowBitsArray[1200]; /* low bits for partial matches */ - /* sizes of these arrays should be increased if you want to compress - * pages larger than 4KB - */ - WK_word tempTagsArray[300]; /* tags for everything */ - WK_word tempQPosArray[300]; /* queue positions for matches */ - WK_word tempLowBitsArray[1200]; /* low bits for partial matches */ - - PRELOAD_DICTIONARY; + PRELOAD_DICTIONARY; #ifdef WK_DEBUG - printf("\nIn DECOMPRESSOR\n"); - printf("tempTagsArray is at %p\n", tempTagsArray); - printf("tempQPosArray is at %p\n", tempQPosArray); - printf("tempLowBitsArray is at %p\n", tempLowBitsArray); - - printf(" first four words of source buffer are:\n"); - printf(" %u\n %u\n %u\n %u\n", - src_buf[0], src_buf[1], src_buf[2], src_buf[3]); - - { int i; - WK_word *arr =(src_buf + TAGS_AREA_OFFSET + (PAGE_SIZE_IN_WORDS / 16)); - - printf(" first 20 full patterns are: \n"); - for (i = 0; i < 20; i++) { - printf(" %d", arr[i]); - } - printf("\n"); - } + printf("\nIn DECOMPRESSOR\n"); + printf("tempTagsArray is at %p\n", tempTagsArray); + printf("tempQPosArray is at %p\n", tempQPosArray); + printf("tempLowBitsArray is at %p\n", tempLowBitsArray); + + printf(" first four words of source buffer are:\n"); + printf(" %u\n %u\n %u\n %u\n", + src_buf[0], src_buf[1], src_buf[2], src_buf[3]); + + { int i; + WK_word *arr = (src_buf + TAGS_AREA_OFFSET + (PAGE_SIZE_IN_WORDS / 16)); + + printf(" first 20 full patterns are: \n"); + for (i = 0; i < 20; i++) { + printf(" %d", arr[i]); + } + printf("\n");} #endif - WK_unpack_2bits(TAGS_AREA_START(src_buf), - TAGS_AREA_END(src_buf), - tempTagsArray); + WK_unpack_2bits(TAGS_AREA_START(src_buf), + TAGS_AREA_END(src_buf), + tempTagsArray); #ifdef WK_DEBUG - { int i; - char* arr = (char *) tempTagsArray; - - printf(" first 200 tags are: \n"); - for (i = 0; i < 200; i++) { - printf(" %d", arr[i]); - } - printf("\n"); - } + { int i; + char* arr = (char *) tempTagsArray; + + printf(" first 200 tags are: \n"); + for (i = 0; i < 200; i++) { + printf(" %d", arr[i]); + } + printf("\n");} #endif - WK_unpack_4bits(QPOS_AREA_START(src_buf), - QPOS_AREA_END(src_buf), - tempQPosArray); + WK_unpack_4bits(QPOS_AREA_START(src_buf), + QPOS_AREA_END(src_buf), + tempQPosArray); #ifdef WK_DEBUG - { int i; - char* arr = (char *) tempQPosArray; - - printf(" first 200 queue positions are: \n"); - for (i = 0; i < 200; i++) { - printf(" %d", arr[i]); - } - printf("\n"); - } + { int i; + char* arr = (char *) tempQPosArray; + + printf(" first 200 queue positions are: \n"); + for (i = 0; i < 200; i++) { + printf(" %d", arr[i]); + } + printf("\n");} #endif - WK_unpack_3_tenbits(LOW_BITS_AREA_START(src_buf), - LOW_BITS_AREA_END(src_buf), - tempLowBitsArray); + WK_unpack_3_tenbits(LOW_BITS_AREA_START(src_buf), + LOW_BITS_AREA_END(src_buf), + tempLowBitsArray); #ifdef WK_DEBUG - printf("AFTER UNPACKING, about to enter main block \n"); + printf("AFTER UNPACKING, about to enter main block \n"); #endif - { - char *next_tag = (char *) tempTagsArray; - char *tags_area_end = - ((char *) tempTagsArray) + PAGE_SIZE_IN_WORDS; - char *next_q_pos = (char *) tempQPosArray; - WK_word *next_low_bits = tempLowBitsArray; - WK_word *next_full_word = FULL_WORD_AREA_START(src_buf); + { + char *next_tag = (char *) tempTagsArray; + char *tags_area_end = + ((char *) tempTagsArray) + PAGE_SIZE_IN_WORDS; + char *next_q_pos = (char *) tempQPosArray; + WK_word *next_low_bits = tempLowBitsArray; + WK_word *next_full_word = FULL_WORD_AREA_START(src_buf); + + WK_word *next_output = dest_buf; + +#ifdef WK_DEBUG + printf("next_output is %u\n", next_output); + + printf("next_tag is %u \n", next_tag); + printf("tags_area_end is %u\n", tags_area_end); + printf("next_q_pos is %u\n", next_q_pos); + printf("next_low_bits is %u\n", next_low_bits); + printf("next_full_word is %u\n", next_full_word); +#endif - WK_word *next_output = dest_buf; + /* this loop should probably be unrolled. Maybe we should unpack + * as 4 bit values, giving two consecutive tags, and switch on + * that 16 ways to decompress 2 words at a whack + */ + while (next_tag < tags_area_end) { + char tag = next_tag[0]; + + switch (tag) { + case ZERO_TAG: { + *next_output = 0; + break; + } + case EXACT_TAG: { + WK_word *dict_location = dictionary + *(next_q_pos++); + /* no need to replace dict. entry if matched exactly */ + *next_output = *dict_location; + break; + } + case PARTIAL_TAG: { + WK_word *dict_location = dictionary + *(next_q_pos++); + { + WK_word temp = *dict_location; + + /* strip out low bits */ + temp = ((temp >> NUM_LOW_BITS) << NUM_LOW_BITS); + + /* add in stored low bits from temp array */ + temp = temp | *(next_low_bits++); + + *dict_location = temp; /* replace old value in dict. */ + *next_output = temp; /* and echo it to output */ + } + break; + } + case MISS_TAG: { + WK_word missed_word = *(next_full_word++); + WK_word *dict_location = + (WK_word *) + ((void *) (((char *) dictionary) + HASH_TO_DICT_BYTE_OFFSET(missed_word))); + *dict_location = missed_word; + *next_output = missed_word; + break; + } + } + next_tag++; + next_output++; + } #ifdef WK_DEBUG - printf("next_output is %u\n", next_output); - - printf("next_tag is %u \n", next_tag); - printf("tags_area_end is %u\n", tags_area_end); - printf("next_q_pos is %u\n", next_q_pos); - printf("next_low_bits is %u\n", next_low_bits); - printf("next_full_word is %u\n", next_full_word); -#endif - - /* this loop should probably be unrolled. Maybe we should unpack - * as 4 bit values, giving two consecutive tags, and switch on - * that 16 ways to decompress 2 words at a whack - */ - while (next_tag < tags_area_end) { - - char tag = next_tag[0]; - - switch(tag) { - - case ZERO_TAG: { - *next_output = 0; - break; - } - case EXACT_TAG: { - WK_word *dict_location = dictionary + *(next_q_pos++); - /* no need to replace dict. entry if matched exactly */ - *next_output = *dict_location; - break; - } - case PARTIAL_TAG: { - WK_word *dict_location = dictionary + *(next_q_pos++); - { - WK_word temp = *dict_location; - - /* strip out low bits */ - temp = ((temp >> NUM_LOW_BITS) << NUM_LOW_BITS); - - /* add in stored low bits from temp array */ - temp = temp | *(next_low_bits++); - - *dict_location = temp; /* replace old value in dict. */ - *next_output = temp; /* and echo it to output */ - } - break; - } - case MISS_TAG: { - WK_word missed_word = *(next_full_word++); - WK_word *dict_location = - (WK_word *) - ((void *) (((char *) dictionary) + HASH_TO_DICT_BYTE_OFFSET(missed_word))); - *dict_location = missed_word; - *next_output = missed_word; - break; - } - } - next_tag++; - next_output++; - } - -#ifdef WK_DEBUG - printf("AFTER DECOMPRESSING\n"); - printf("next_output is %p\n", next_output); - printf("next_tag is %p\n", next_tag); - printf("next_full_word is %p\n", next_full_word); - printf("next_q_pos is %p\n", next_q_pos); + printf("AFTER DECOMPRESSING\n"); + printf("next_output is %p\n", next_output); + printf("next_tag is %p\n", next_tag); + printf("next_full_word is %p\n", next_full_word); + printf("next_q_pos is %p\n", next_q_pos); #endif - } + } } diff --git a/libkern/kxld/kxld.c b/libkern/kxld/kxld.c index a98a897f2..b53725676 100644 --- a/libkern/kxld/kxld.c +++ b/libkern/kxld/kxld.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008, 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -58,18 +58,18 @@ struct kxld_vtable; struct kxld_context { - KXLDKext *kext; - KXLDArray *section_order; - KXLDArray objects; - KXLDArray dependencies; - KXLDDict defined_symbols_by_name; - KXLDDict defined_cxx_symbols_by_value; - KXLDDict obsolete_symbols_by_name; - KXLDDict vtables_by_name; - KXLDFlags flags; - KXLDAllocateCallback allocate_callback; - cpu_type_t cputype; - cpu_subtype_t cpusubtype; + KXLDKext *kext; + KXLDArray *section_order; + KXLDArray objects; + KXLDArray dependencies; + KXLDDict defined_symbols_by_name; + KXLDDict defined_cxx_symbols_by_value; + KXLDDict obsolete_symbols_by_name; + KXLDDict vtables_by_name; + KXLDFlags flags; + KXLDAllocateCallback allocate_callback; + cpu_type_t cputype; + cpu_subtype_t cpusubtype; }; // set to TRUE if the kext has a vmaddr_TEXT_EXEC != 0 @@ -88,8 +88,8 @@ uint32_t kaslr_offsets_index = 0; /* Certain architectures alter the order of a kext's sections from its input * binary, so we track that order in a dictionary of arrays, with one array for * each architecture. Since the kernel only has one architecture, we can - * eliminate the dictionary and use a simple array. - * XXX: If we ever use the linker in a multithreaded environment, we will need + * eliminate the dictionary and use a simple array. + * XXX: If we ever use the linker in a multithreaded environment, we will need * locks around these global structures. */ #if KXLD_USER_OR_OBJECT @@ -109,115 +109,120 @@ static KXLDObject * get_object_for_file(KXLDContext *context, u_char *file, u_long size, const char *name); static kern_return_t allocate_split_kext(KXLDContext *context, splitKextLinkInfo * link_info); static u_char * allocate_kext(KXLDContext *context, void *callback_data, - kxld_addr_t *vmaddr, u_long *vmsize, u_char **linked_object_alloc_out); + kxld_addr_t *vmaddr, u_long *vmsize, u_char **linked_object_alloc_out); static kern_return_t init_kext_objects(KXLDContext *context, u_char *file, - u_long size, const char *name, KXLDDependency *dependencies, - u_int ndependencies); + u_long size, const char *name, KXLDDependency *dependencies, + u_int ndependencies); static void clear_context(KXLDContext *context); /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_create_context(KXLDContext **_context, +kxld_create_context(KXLDContext **_context, KXLDAllocateCallback allocate_callback, KXLDLoggingCallback logging_callback, KXLDFlags flags, cpu_type_t cputype, cpu_subtype_t cpusubtype, vm_size_t pagesize __KXLD_KERNEL_UNUSED) { - kern_return_t rval = KERN_FAILURE; - KXLDContext * context = NULL; - KXLDArray * section_order = NULL; + kern_return_t rval = KERN_FAILURE; + KXLDContext * context = NULL; + KXLDArray * section_order = NULL; #if !KERNEL - cpu_type_t * cputype_p = NULL; + cpu_type_t * cputype_p = NULL; #endif - check(_context); - if (isOldInterface) { - check(allocate_callback); - } - check(logging_callback); - *_context = NULL; + check(_context); + if (isOldInterface) { + check(allocate_callback); + } + check(logging_callback); + *_context = NULL; - context = kxld_alloc(sizeof(*context)); - require_action(context, finish, rval=KERN_RESOURCE_SHORTAGE); - bzero(context, sizeof(*context)); + context = kxld_alloc(sizeof(*context)); + require_action(context, finish, rval = KERN_RESOURCE_SHORTAGE); + bzero(context, sizeof(*context)); - context->flags = flags; - context->allocate_callback = allocate_callback; - context->cputype = cputype; - context->cpusubtype = cpusubtype; + context->flags = flags; + context->allocate_callback = allocate_callback; + context->cputype = cputype; + context->cpusubtype = cpusubtype; #if !KERNEL - if (pagesize) { - kxld_set_cross_link_page_size(pagesize); - } + if (pagesize) { + kxld_set_cross_link_page_size(pagesize); + } #endif /* !KERNEL */ - kxld_set_logging_callback(logging_callback); + kxld_set_logging_callback(logging_callback); - context->kext = kxld_alloc(kxld_kext_sizeof()); - require_action(context->kext, finish, rval=KERN_RESOURCE_SHORTAGE); - bzero(context->kext, kxld_kext_sizeof()); + context->kext = kxld_alloc(kxld_kext_sizeof()); + require_action(context->kext, finish, rval = KERN_RESOURCE_SHORTAGE); + bzero(context->kext, kxld_kext_sizeof()); - /* Check if we already have an order array for this arch */ + /* Check if we already have an order array for this arch */ #if KXLD_USER_OR_OBJECT -#if KERNEL - context->section_order = s_section_order; +#if KERNEL + context->section_order = s_section_order; #else - /* In userspace, create the dictionary if it doesn't already exist */ - if (!s_order_dict) { - s_order_dict = kxld_alloc(sizeof(*s_order_dict)); - require_action(s_order_dict, finish, rval=KERN_RESOURCE_SHORTAGE); - bzero(s_order_dict, sizeof(*s_order_dict)); - - rval = kxld_dict_init(s_order_dict, kxld_dict_uint32_hash, - kxld_dict_uint32_cmp, 0); - require_noerr(rval, finish); - } - - context->section_order = kxld_dict_find(s_order_dict, &cputype); + /* In userspace, create the dictionary if it doesn't already exist */ + if (!s_order_dict) { + s_order_dict = kxld_alloc(sizeof(*s_order_dict)); + require_action(s_order_dict, finish, rval = KERN_RESOURCE_SHORTAGE); + bzero(s_order_dict, sizeof(*s_order_dict)); + + rval = kxld_dict_init(s_order_dict, kxld_dict_uint32_hash, + kxld_dict_uint32_cmp, 0); + require_noerr(rval, finish); + } + + context->section_order = kxld_dict_find(s_order_dict, &cputype); #endif /* KERNEL */ - /* Create an order array for this arch if needed */ - - if (!context->section_order) { + /* Create an order array for this arch if needed */ - section_order = kxld_alloc(sizeof(*section_order)); - require_action(section_order, finish, rval=KERN_RESOURCE_SHORTAGE); - bzero(section_order, sizeof(*section_order)); + if (!context->section_order) { + section_order = kxld_alloc(sizeof(*section_order)); + require_action(section_order, finish, rval = KERN_RESOURCE_SHORTAGE); + bzero(section_order, sizeof(*section_order)); #if KERNEL - s_section_order = section_order; + s_section_order = section_order; #else - /* In userspace, add the new array to the order dictionary */ - cputype_p = kxld_alloc(sizeof(*cputype_p)); - require_action(cputype_p, finish, rval=KERN_RESOURCE_SHORTAGE); - *cputype_p = cputype; + /* In userspace, add the new array to the order dictionary */ + cputype_p = kxld_alloc(sizeof(*cputype_p)); + require_action(cputype_p, finish, rval = KERN_RESOURCE_SHORTAGE); + *cputype_p = cputype; - rval = kxld_dict_insert(s_order_dict, cputype_p, section_order); - require_noerr(rval, finish); + rval = kxld_dict_insert(s_order_dict, cputype_p, section_order); + require_noerr(rval, finish); - cputype_p = NULL; + cputype_p = NULL; #endif /* KERNEL */ - context->section_order = section_order; + context->section_order = section_order; - section_order = NULL; - } + section_order = NULL; + } #endif /* KXLD_USER_OR_OBJECT */ - rval = KERN_SUCCESS; - *_context = context; - context = NULL; + rval = KERN_SUCCESS; + *_context = context; + context = NULL; finish: - if (context) kxld_destroy_context(context); - if (section_order) kxld_free(section_order, sizeof(*section_order)); + if (context) { + kxld_destroy_context(context); + } + if (section_order) { + kxld_free(section_order, sizeof(*section_order)); + } #if !KERNEL - if (cputype_p) kxld_free(cputype_p, sizeof(*cputype_p)); + if (cputype_p) { + kxld_free(cputype_p, sizeof(*cputype_p)); + } #endif - return rval; + return rval; } /******************************************************************************* @@ -225,214 +230,214 @@ finish: void kxld_destroy_context(KXLDContext *context) { - KXLDObject *object = NULL; - KXLDKext *dep = NULL; - u_int i = 0; + KXLDObject *object = NULL; + KXLDKext *dep = NULL; + u_int i = 0; - check(context); + check(context); - kxld_kext_deinit(context->kext); + kxld_kext_deinit(context->kext); - for (i = 0; i < context->objects.maxitems; ++i) { - object = kxld_array_get_slot(&context->objects, i); - kxld_object_deinit(object); - } - kxld_array_deinit(&context->objects); + for (i = 0; i < context->objects.maxitems; ++i) { + object = kxld_array_get_slot(&context->objects, i); + kxld_object_deinit(object); + } + kxld_array_deinit(&context->objects); - for (i = 0; i < context->dependencies.maxitems; ++i) { - dep = kxld_array_get_slot(&context->dependencies, i); - kxld_kext_deinit(dep); - } - kxld_array_deinit(&context->dependencies); + for (i = 0; i < context->dependencies.maxitems; ++i) { + dep = kxld_array_get_slot(&context->dependencies, i); + kxld_kext_deinit(dep); + } + kxld_array_deinit(&context->dependencies); - kxld_dict_deinit(&context->defined_symbols_by_name); - kxld_dict_deinit(&context->defined_cxx_symbols_by_value); - kxld_dict_deinit(&context->obsolete_symbols_by_name); - kxld_dict_deinit(&context->vtables_by_name); + kxld_dict_deinit(&context->defined_symbols_by_name); + kxld_dict_deinit(&context->defined_cxx_symbols_by_value); + kxld_dict_deinit(&context->obsolete_symbols_by_name); + kxld_dict_deinit(&context->vtables_by_name); - kxld_free(context->kext, kxld_kext_sizeof()); - kxld_free(context, sizeof(*context)); + kxld_free(context->kext, kxld_kext_sizeof()); + kxld_free(context, sizeof(*context)); - kxld_print_memory_report(); + kxld_print_memory_report(); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ kern_return_t kxld_link_split_file( - KXLDContext * context, - splitKextLinkInfo *link_info, - const char * name, - void * callback_data, - KXLDDependency * dependencies, - u_int ndependencies, - kxld_addr_t * kmod_info_kern) + KXLDContext * context, + splitKextLinkInfo *link_info, + const char * name, + void * callback_data, + KXLDDependency * dependencies, + u_int ndependencies, + kxld_addr_t * kmod_info_kern) { - kern_return_t rval = KERN_FAILURE; - KXLDObject * kext_object = NULL; - splitKextLinkInfo * my_link_info = NULL; - - isSplitKext = (link_info->vmaddr_TEXT_EXEC != 0); - isOldInterface = FALSE; - - kxld_set_logging_callback_data(name, callback_data); - - kxld_log(kKxldLogLinking, kKxldLogBasic, "Linking kext %s", name); - - kaslr_offsets_count = 0; - kaslr_offsets_index = 0; - kaslr_offsets = NULL; - - require_action(context, finish, rval=KERN_INVALID_ARGUMENT); - require_action(link_info, finish, rval=KERN_INVALID_ARGUMENT); - require_action(dependencies, finish, rval=KERN_INVALID_ARGUMENT); - require_action(ndependencies, finish, rval=KERN_INVALID_ARGUMENT); - require_action(kmod_info_kern, finish, rval=KERN_INVALID_ARGUMENT); - - rval = init_context(context, ndependencies); - require_noerr(rval, finish); - - rval = init_kext_objects(context, - link_info->kextExecutable, - link_info->kextSize, - name, - dependencies, ndependencies); - require_noerr(rval, finish); - - kext_object = get_object_for_file(context, - link_info->kextExecutable, - link_info->kextSize, - name); - require_action(kext_object, finish, rval=KERN_FAILURE); - - // copy vmaddrs and fileoffsets for split segments into kext_object - kxld_object_set_link_info(kext_object, link_info); - - my_link_info = kxld_object_get_link_info(kext_object); - - rval = allocate_split_kext(context, my_link_info); - require_noerr(rval, finish); - + kern_return_t rval = KERN_FAILURE; + KXLDObject * kext_object = NULL; + splitKextLinkInfo * my_link_info = NULL; + + isSplitKext = (link_info->vmaddr_TEXT_EXEC != 0); + isOldInterface = FALSE; + + kxld_set_logging_callback_data(name, callback_data); + + kxld_log(kKxldLogLinking, kKxldLogBasic, "Linking kext %s", name); + + kaslr_offsets_count = 0; + kaslr_offsets_index = 0; + kaslr_offsets = NULL; + + require_action(context, finish, rval = KERN_INVALID_ARGUMENT); + require_action(link_info, finish, rval = KERN_INVALID_ARGUMENT); + require_action(dependencies, finish, rval = KERN_INVALID_ARGUMENT); + require_action(ndependencies, finish, rval = KERN_INVALID_ARGUMENT); + require_action(kmod_info_kern, finish, rval = KERN_INVALID_ARGUMENT); + + rval = init_context(context, ndependencies); + require_noerr(rval, finish); + + rval = init_kext_objects(context, + link_info->kextExecutable, + link_info->kextSize, + name, + dependencies, ndependencies); + require_noerr(rval, finish); + + kext_object = get_object_for_file(context, + link_info->kextExecutable, + link_info->kextSize, + name); + require_action(kext_object, finish, rval = KERN_FAILURE); + + // copy vmaddrs and fileoffsets for split segments into kext_object + kxld_object_set_link_info(kext_object, link_info); + + my_link_info = kxld_object_get_link_info(kext_object); + + rval = allocate_split_kext(context, my_link_info); + require_noerr(rval, finish); + #if SPLIT_KEXTS_DEBUG - kxld_log(kKxldLogLinking, kKxldLogErr, "Linking kext %s", name); - kxld_show_split_info(link_info); + kxld_log(kKxldLogLinking, kKxldLogErr, "Linking kext %s", name); + kxld_show_split_info(link_info); #endif // SPLIT_KEXTS_DEBUG - - rval = kxld_kext_relocate(context->kext, - (kxld_addr_t)my_link_info, - &context->vtables_by_name, - &context->defined_symbols_by_name, - &context->obsolete_symbols_by_name, - &context->defined_cxx_symbols_by_value); - require_noerr(rval, finish); - - rval = kxld_kext_export_linked_object(context->kext, - (void *) my_link_info, - kmod_info_kern); - require_noerr(rval, finish); - - // pass back info about linked kext - link_info->kaslr_offsets_count = kaslr_offsets_count; - link_info->kaslr_offsets = kaslr_offsets; - link_info->linkedKext = my_link_info->linkedKext; - link_info->linkedKextSize = my_link_info->linkedKextSize; - - if (kaslr_offsets_count != kaslr_offsets_index) { - kxld_log(kKxldLogLinking, kKxldLogErr, "[ERROR] %s: KASLR pointers: count=%d, but only populated %d!", name, kaslr_offsets_count, kaslr_offsets_index); - rval = KERN_FAILURE; - goto finish; - } - - // the values are now the responsibility of the caller - kaslr_offsets_count = 0; - kaslr_offsets_index = 0; - kaslr_offsets = NULL; - - rval = KERN_SUCCESS; + + rval = kxld_kext_relocate(context->kext, + (kxld_addr_t)my_link_info, + &context->vtables_by_name, + &context->defined_symbols_by_name, + &context->obsolete_symbols_by_name, + &context->defined_cxx_symbols_by_value); + require_noerr(rval, finish); + + rval = kxld_kext_export_linked_object(context->kext, + (void *) my_link_info, + kmod_info_kern); + require_noerr(rval, finish); + + // pass back info about linked kext + link_info->kaslr_offsets_count = kaslr_offsets_count; + link_info->kaslr_offsets = kaslr_offsets; + link_info->linkedKext = my_link_info->linkedKext; + link_info->linkedKextSize = my_link_info->linkedKextSize; + + if (kaslr_offsets_count != kaslr_offsets_index) { + kxld_log(kKxldLogLinking, kKxldLogErr, "[ERROR] %s: KASLR pointers: count=%d, but only populated %d!", name, kaslr_offsets_count, kaslr_offsets_index); + rval = KERN_FAILURE; + goto finish; + } + + // the values are now the responsibility of the caller + kaslr_offsets_count = 0; + kaslr_offsets_index = 0; + kaslr_offsets = NULL; + + rval = KERN_SUCCESS; finish: - clear_context(context); - kxld_set_logging_callback_data(NULL, NULL); - - return rval; + clear_context(context); + kxld_set_logging_callback_data(NULL, NULL); + + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ kern_return_t kxld_link_file( - KXLDContext * context, - u_char * file, - u_long size, - const char * name, - void * callback_data, - KXLDDependency * dependencies, - u_int ndependencies, - u_char ** linked_object_out, - kxld_addr_t * kmod_info_kern) + KXLDContext * context, + u_char * file, + u_long size, + const char * name, + void * callback_data, + KXLDDependency * dependencies, + u_int ndependencies, + u_char ** linked_object_out, + kxld_addr_t * kmod_info_kern) { - kern_return_t rval = KERN_FAILURE; - kxld_addr_t vmaddr = 0; - u_long vmsize = 0; - u_char * linked_object = NULL; - u_char * linked_object_alloc = NULL; - - kaslr_offsets_count = 0; - kaslr_offsets_index = 0; - kaslr_offsets = NULL; - - kxld_set_logging_callback_data(name, callback_data); - - kxld_log(kKxldLogLinking, kKxldLogBasic, "Linking kext %s", name); - - require_action(context, finish, rval=KERN_INVALID_ARGUMENT); - require_action(dependencies, finish, rval=KERN_INVALID_ARGUMENT); - require_action(ndependencies, finish, rval=KERN_INVALID_ARGUMENT); - require_action(file, finish, rval=KERN_INVALID_ARGUMENT); - require_action(size, finish, rval=KERN_INVALID_ARGUMENT); - require_action(linked_object_out, finish, rval=KERN_INVALID_ARGUMENT); - require_action(kmod_info_kern, finish, rval=KERN_INVALID_ARGUMENT); - - isSplitKext = FALSE; - isOldInterface = TRUE; - - rval = init_context(context, ndependencies); - require_noerr(rval, finish); - - rval = init_kext_objects(context, file, size, name, - dependencies, ndependencies); - require_noerr(rval, finish); - - linked_object = allocate_kext(context, callback_data, - &vmaddr, &vmsize, &linked_object_alloc); - require_action(linked_object, finish, rval=KERN_RESOURCE_SHORTAGE); - - - rval = kxld_kext_relocate(context->kext, - vmaddr, - &context->vtables_by_name, - &context->defined_symbols_by_name, - &context->obsolete_symbols_by_name, - &context->defined_cxx_symbols_by_value); - require_noerr(rval, finish); - - rval = kxld_kext_export_linked_object(context->kext, - (void *) linked_object, - kmod_info_kern); - require_noerr(rval, finish); - *linked_object_out = linked_object; - - linked_object_alloc = NULL; - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + kxld_addr_t vmaddr = 0; + u_long vmsize = 0; + u_char * linked_object = NULL; + u_char * linked_object_alloc = NULL; + + kaslr_offsets_count = 0; + kaslr_offsets_index = 0; + kaslr_offsets = NULL; + + kxld_set_logging_callback_data(name, callback_data); + + kxld_log(kKxldLogLinking, kKxldLogBasic, "Linking kext %s", name); + + require_action(context, finish, rval = KERN_INVALID_ARGUMENT); + require_action(dependencies, finish, rval = KERN_INVALID_ARGUMENT); + require_action(ndependencies, finish, rval = KERN_INVALID_ARGUMENT); + require_action(file, finish, rval = KERN_INVALID_ARGUMENT); + require_action(size, finish, rval = KERN_INVALID_ARGUMENT); + require_action(linked_object_out, finish, rval = KERN_INVALID_ARGUMENT); + require_action(kmod_info_kern, finish, rval = KERN_INVALID_ARGUMENT); + + isSplitKext = FALSE; + isOldInterface = TRUE; + + rval = init_context(context, ndependencies); + require_noerr(rval, finish); + + rval = init_kext_objects(context, file, size, name, + dependencies, ndependencies); + require_noerr(rval, finish); + + linked_object = allocate_kext(context, callback_data, + &vmaddr, &vmsize, &linked_object_alloc); + require_action(linked_object, finish, rval = KERN_RESOURCE_SHORTAGE); + + + rval = kxld_kext_relocate(context->kext, + vmaddr, + &context->vtables_by_name, + &context->defined_symbols_by_name, + &context->obsolete_symbols_by_name, + &context->defined_cxx_symbols_by_value); + require_noerr(rval, finish); + + rval = kxld_kext_export_linked_object(context->kext, + (void *) linked_object, + kmod_info_kern); + require_noerr(rval, finish); + *linked_object_out = linked_object; + + linked_object_alloc = NULL; + + rval = KERN_SUCCESS; finish: - if (linked_object_alloc) { - kxld_page_free_untracked(linked_object_alloc, vmsize); - } - - clear_context(context); - kxld_set_logging_callback_data(NULL, NULL); - - return rval; + if (linked_object_alloc) { + kxld_page_free_untracked(linked_object_alloc, vmsize); + } + + clear_context(context); + kxld_set_logging_callback_data(NULL, NULL); + + return rval; } @@ -441,120 +446,120 @@ finish: static kern_return_t init_context(KXLDContext *context, u_int ndependencies) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - /* Create an array of objects large enough to hold an object - * for every dependency, an interface for each dependency, and a kext. */ - rval = kxld_array_init(&context->objects, - kxld_object_sizeof(), 2 * ndependencies + 1); - require_noerr(rval, finish); + /* Create an array of objects large enough to hold an object + * for every dependency, an interface for each dependency, and a kext. */ + rval = kxld_array_init(&context->objects, + kxld_object_sizeof(), 2 * ndependencies + 1); + require_noerr(rval, finish); - rval = kxld_array_init(&context->dependencies, - kxld_kext_sizeof(), ndependencies); - require_noerr(rval, finish); + rval = kxld_array_init(&context->dependencies, + kxld_kext_sizeof(), ndependencies); + require_noerr(rval, finish); - rval = kxld_dict_init(&context->defined_symbols_by_name, - kxld_dict_string_hash, kxld_dict_string_cmp, 0); - require_noerr(rval, finish); + rval = kxld_dict_init(&context->defined_symbols_by_name, + kxld_dict_string_hash, kxld_dict_string_cmp, 0); + require_noerr(rval, finish); - rval = kxld_dict_init(&context->defined_cxx_symbols_by_value, - kxld_dict_kxldaddr_hash, kxld_dict_kxldaddr_cmp, 0); - require_noerr(rval, finish); + rval = kxld_dict_init(&context->defined_cxx_symbols_by_value, + kxld_dict_kxldaddr_hash, kxld_dict_kxldaddr_cmp, 0); + require_noerr(rval, finish); - rval = kxld_dict_init(&context->obsolete_symbols_by_name, - kxld_dict_string_hash, kxld_dict_string_cmp, 0); - require_noerr(rval, finish); + rval = kxld_dict_init(&context->obsolete_symbols_by_name, + kxld_dict_string_hash, kxld_dict_string_cmp, 0); + require_noerr(rval, finish); - rval = kxld_dict_init(&context->vtables_by_name, kxld_dict_string_hash, - kxld_dict_string_cmp, 0); - require_noerr(rval, finish); + rval = kxld_dict_init(&context->vtables_by_name, kxld_dict_string_hash, + kxld_dict_string_cmp, 0); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ static kern_return_t init_kext_objects(KXLDContext *context, - u_char *file, - u_long size, - const char *name, - KXLDDependency *dependencies, - u_int ndependencies) + u_char *file, + u_long size, + const char *name, + KXLDDependency *dependencies, + u_int ndependencies) { - kern_return_t rval = KERN_FAILURE; - KXLDKext *kext = NULL; - KXLDObject *kext_object = NULL; - KXLDObject *interface_object = NULL; - u_int i = 0; - - /* Create a kext object for each dependency. If it's a direct dependency, - * export its symbols by name by value. If it's indirect, just export the - * C++ symbols by value. - */ - for (i = 0; i < ndependencies; ++i) { - kext = kxld_array_get_item(&context->dependencies, i); - kext_object = NULL; - interface_object = NULL; - - kext_object = get_object_for_file(context, dependencies[i].kext, - dependencies[i].kext_size, dependencies[i].kext_name); - require_action(kext_object, finish, rval=KERN_FAILURE); - - if (dependencies[i].interface) { - interface_object = get_object_for_file(context, - dependencies[i].interface, dependencies[i].interface_size, - dependencies[i].interface_name); - require_action(interface_object, finish, rval=KERN_FAILURE); - } - - rval = kxld_kext_init(kext, kext_object, interface_object); - require_noerr(rval, finish); - - if (dependencies[i].is_direct_dependency) { - rval = kxld_kext_export_symbols(kext, - &context->defined_symbols_by_name, - &context->obsolete_symbols_by_name, - &context->defined_cxx_symbols_by_value); - require_noerr(rval, finish); - } else { - rval = kxld_kext_export_symbols(kext, - /* defined_symbols */ NULL, /* obsolete_symbols */ NULL, - &context->defined_cxx_symbols_by_value); - require_noerr(rval, finish); - } - } - - /* Export the vtables for all of the dependencies. */ - for (i = 0; i < context->dependencies.nitems; ++i) { - kext = kxld_array_get_item(&context->dependencies, i); - - rval = kxld_kext_export_vtables(kext, - &context->defined_cxx_symbols_by_value, - &context->defined_symbols_by_name, - &context->vtables_by_name); - require_noerr(rval, finish); - } - - /* Create a kext object for the kext we're linking and export its locally - * defined C++ symbols. - */ - kext_object = get_object_for_file(context, file, size, name); - require_action(kext_object, finish, rval=KERN_FAILURE); - - rval = kxld_kext_init(context->kext, kext_object, /* interface */ NULL); - require_noerr(rval, finish); - - rval = kxld_kext_export_symbols(context->kext, - /* defined_symbols */ NULL, /* obsolete_symbols */ NULL, - &context->defined_cxx_symbols_by_value); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDKext *kext = NULL; + KXLDObject *kext_object = NULL; + KXLDObject *interface_object = NULL; + u_int i = 0; + + /* Create a kext object for each dependency. If it's a direct dependency, + * export its symbols by name by value. If it's indirect, just export the + * C++ symbols by value. + */ + for (i = 0; i < ndependencies; ++i) { + kext = kxld_array_get_item(&context->dependencies, i); + kext_object = NULL; + interface_object = NULL; + + kext_object = get_object_for_file(context, dependencies[i].kext, + dependencies[i].kext_size, dependencies[i].kext_name); + require_action(kext_object, finish, rval = KERN_FAILURE); + + if (dependencies[i].interface) { + interface_object = get_object_for_file(context, + dependencies[i].interface, dependencies[i].interface_size, + dependencies[i].interface_name); + require_action(interface_object, finish, rval = KERN_FAILURE); + } + + rval = kxld_kext_init(kext, kext_object, interface_object); + require_noerr(rval, finish); + + if (dependencies[i].is_direct_dependency) { + rval = kxld_kext_export_symbols(kext, + &context->defined_symbols_by_name, + &context->obsolete_symbols_by_name, + &context->defined_cxx_symbols_by_value); + require_noerr(rval, finish); + } else { + rval = kxld_kext_export_symbols(kext, + /* defined_symbols */ NULL, /* obsolete_symbols */ NULL, + &context->defined_cxx_symbols_by_value); + require_noerr(rval, finish); + } + } + + /* Export the vtables for all of the dependencies. */ + for (i = 0; i < context->dependencies.nitems; ++i) { + kext = kxld_array_get_item(&context->dependencies, i); + + rval = kxld_kext_export_vtables(kext, + &context->defined_cxx_symbols_by_value, + &context->defined_symbols_by_name, + &context->vtables_by_name); + require_noerr(rval, finish); + } + + /* Create a kext object for the kext we're linking and export its locally + * defined C++ symbols. + */ + kext_object = get_object_for_file(context, file, size, name); + require_action(kext_object, finish, rval = KERN_FAILURE); + + rval = kxld_kext_init(context->kext, kext_object, /* interface */ NULL); + require_noerr(rval, finish); + + rval = kxld_kext_export_symbols(context->kext, + /* defined_symbols */ NULL, /* obsolete_symbols */ NULL, + &context->defined_cxx_symbols_by_value); + require_noerr(rval, finish); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -563,113 +568,113 @@ static KXLDObject * get_object_for_file(KXLDContext *context, u_char *file, u_long size, const char *name) { - KXLDObject *rval = NULL; - KXLDObject *object = NULL; - kern_return_t result = 0; - u_int i = 0; + KXLDObject *rval = NULL; + KXLDObject *object = NULL; + kern_return_t result = 0; + u_int i = 0; - for (i = 0; i < context->objects.nitems; ++i) { - object = kxld_array_get_item(&context->objects, i); + for (i = 0; i < context->objects.nitems; ++i) { + object = kxld_array_get_item(&context->objects, i); - if (!kxld_object_get_file(object)) { - result = kxld_object_init_from_macho(object, file, size, name, - context->section_order, context->cputype, context->cpusubtype, context->flags); - require_noerr(result, finish); + if (!kxld_object_get_file(object)) { + result = kxld_object_init_from_macho(object, file, size, name, + context->section_order, context->cputype, context->cpusubtype, context->flags); + require_noerr(result, finish); - rval = object; - break; - } + rval = object; + break; + } - if (kxld_object_get_file(object) == file) { - rval = object; - break; - } - } + if (kxld_object_get_file(object) == file) { + rval = object; + break; + } + } finish: - return rval; + return rval; } #include /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ static kern_return_t allocate_split_kext(KXLDContext *context, splitKextLinkInfo * link_info) { - kern_return_t rval = KERN_FAILURE; - u_long vmsize = 0; - u_long header_size = 0; - u_char * linked_object = NULL; - - kxld_kext_get_vmsize(context->kext, &header_size, &vmsize); - - if (isSplitKext) { - /* get __LINKEDIT vmsize */ - kxld_kext_get_vmsize_for_seg_by_name(context->kext, SEG_LINKEDIT, &vmsize); - // add in the gaps - vmsize += (link_info->vmaddr_LINKEDIT - link_info->vmaddr_TEXT); - } - link_info->linkedKextSize = vmsize; - - linked_object = kxld_page_alloc_untracked(link_info->linkedKextSize); - require(linked_object, finish); - link_info->linkedKext = linked_object; - - bzero(linked_object, vmsize); - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + u_long vmsize = 0; + u_long header_size = 0; + u_char * linked_object = NULL; + + kxld_kext_get_vmsize(context->kext, &header_size, &vmsize); + + if (isSplitKext) { + /* get __LINKEDIT vmsize */ + kxld_kext_get_vmsize_for_seg_by_name(context->kext, SEG_LINKEDIT, &vmsize); + // add in the gaps + vmsize += (link_info->vmaddr_LINKEDIT - link_info->vmaddr_TEXT); + } + link_info->linkedKextSize = vmsize; + + linked_object = kxld_page_alloc_untracked(link_info->linkedKextSize); + require(linked_object, finish); + link_info->linkedKext = linked_object; + + bzero(linked_object, vmsize); + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ static u_char * allocate_kext(KXLDContext *context, - void *callback_data, - kxld_addr_t *vmaddr_out, - u_long *vmsize_out, - u_char **linked_object_alloc_out) + void *callback_data, + kxld_addr_t *vmaddr_out, + u_long *vmsize_out, + u_char **linked_object_alloc_out) { - KXLDAllocateFlags flags = 0; - kxld_addr_t vmaddr = 0; - u_long vmsize = 0; - u_long header_size = 0; - u_char * linked_object = NULL; - - *linked_object_alloc_out = NULL; - - kxld_kext_get_vmsize(context->kext, &header_size, &vmsize); - - vmaddr = context->allocate_callback(vmsize, &flags, callback_data); - require_action(!(vmaddr & (kxld_get_effective_page_size()-1)), finish, - kxld_log(kKxldLogLinking, kKxldLogErr, - "Load address %p is not page-aligned.", - (void *) (uintptr_t) vmaddr)); - - if (flags & kKxldAllocateWritable) { - linked_object = (u_char *) (u_long) vmaddr; - } else { - linked_object = kxld_page_alloc_untracked(vmsize); - require(linked_object, finish); - - *linked_object_alloc_out = linked_object; - } - - kxld_kext_set_linked_object_size(context->kext, vmsize); - - /* Zero out the memory before we fill it. We fill this buffer in a - * sparse fashion, and it's simpler to clear it now rather than - * track and zero any pieces we didn't touch after we've written - * all of the sections to memory. - */ - bzero(linked_object, vmsize); - *vmaddr_out = vmaddr; - *vmsize_out = vmsize; - + KXLDAllocateFlags flags = 0; + kxld_addr_t vmaddr = 0; + u_long vmsize = 0; + u_long header_size = 0; + u_char * linked_object = NULL; + + *linked_object_alloc_out = NULL; + + kxld_kext_get_vmsize(context->kext, &header_size, &vmsize); + + vmaddr = context->allocate_callback(vmsize, &flags, callback_data); + require_action(!(vmaddr & (kxld_get_effective_page_size() - 1)), finish, + kxld_log(kKxldLogLinking, kKxldLogErr, + "Load address %p is not page-aligned.", + (void *) (uintptr_t) vmaddr)); + + if (flags & kKxldAllocateWritable) { + linked_object = (u_char *) (u_long) vmaddr; + } else { + linked_object = kxld_page_alloc_untracked(vmsize); + require(linked_object, finish); + + *linked_object_alloc_out = linked_object; + } + + kxld_kext_set_linked_object_size(context->kext, vmsize); + + /* Zero out the memory before we fill it. We fill this buffer in a + * sparse fashion, and it's simpler to clear it now rather than + * track and zero any pieces we didn't touch after we've written + * all of the sections to memory. + */ + bzero(linked_object, vmsize); + *vmaddr_out = vmaddr; + *vmsize_out = vmsize; + finish: - return linked_object; + return linked_object; } /******************************************************************************* @@ -677,28 +682,28 @@ finish: static void clear_context(KXLDContext *context) { - KXLDObject * object = NULL; - KXLDKext * dep = NULL; - u_int i = 0; - - check(context); - - kxld_kext_clear(context->kext); - - for (i = 0; i < context->objects.nitems; ++i) { - object = kxld_array_get_item(&context->objects, i); - kxld_object_clear(object); - } - kxld_array_reset(&context->objects); - - for (i = 0; i < context->dependencies.nitems; ++i) { - dep = kxld_array_get_item(&context->dependencies, i); - kxld_kext_clear(dep); - } - kxld_array_reset(&context->dependencies); - - kxld_dict_clear(&context->defined_symbols_by_name); - kxld_dict_clear(&context->defined_cxx_symbols_by_value); - kxld_dict_clear(&context->obsolete_symbols_by_name); - kxld_dict_clear(&context->vtables_by_name); + KXLDObject * object = NULL; + KXLDKext * dep = NULL; + u_int i = 0; + + check(context); + + kxld_kext_clear(context->kext); + + for (i = 0; i < context->objects.nitems; ++i) { + object = kxld_array_get_item(&context->objects, i); + kxld_object_clear(object); + } + kxld_array_reset(&context->objects); + + for (i = 0; i < context->dependencies.nitems; ++i) { + dep = kxld_array_get_item(&context->dependencies, i); + kxld_kext_clear(dep); + } + kxld_array_reset(&context->dependencies); + + kxld_dict_clear(&context->defined_symbols_by_name); + kxld_dict_clear(&context->defined_cxx_symbols_by_value); + kxld_dict_clear(&context->obsolete_symbols_by_name); + kxld_dict_clear(&context->vtables_by_name); } diff --git a/libkern/kxld/kxld_array.c b/libkern/kxld/kxld_array.c index 55d009ba4..51c6df6ad 100644 --- a/libkern/kxld/kxld_array.c +++ b/libkern/kxld/kxld_array.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -46,87 +46,87 @@ static u_int reinit_pools(KXLDArray *array, u_int nitems); /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_array_init(KXLDArray *array, size_t itemsize, u_int nitems) { - kern_return_t rval = KERN_FAILURE; - KXLDArrayPool *dstpool = NULL, *srcpool = NULL, *tmp = NULL; - KXLDArrayHead srcpools = STAILQ_HEAD_INITIALIZER(srcpools); - size_t srcpool_capacity = 0; - u_long offset = 0; - - check(array); - - if (!nitems) { - kxld_array_reset(array); - rval = KERN_SUCCESS; - goto finish; - } - - require_action(itemsize, finish, rval=KERN_INVALID_ARGUMENT); - - /* If the array has some pools, we need to see if there is enough space in - * those pools to accomodate the requested size array. If there isn't - * enough space, we save the existing pools to a temporary STAILQ and zero - * out the array structure. This will cause a new pool of sufficient size - * to be created, and we then copy the data from the old pools into the new - * pool. - */ - if (array->npools) { - /* Update the array's maxitems based on the new itemsize */ - array->pool_maxitems = (u_int) (array->pool_capacity / itemsize); - array->maxitems = 0; - STAILQ_FOREACH(srcpool, &array->pools, entries) { - array->maxitems += array->pool_maxitems; - } - - /* If there's not enough space, save the pools to a temporary STAILQ - * and zero out the array structure. Otherwise, rescan the pools to - * update their internal nitems counts. - */ - if (array->maxitems < nitems) { - STAILQ_FOREACH_SAFE(srcpool, &array->pools, entries, tmp) { - STAILQ_REMOVE(&array->pools, srcpool, kxld_array_pool, entries); - STAILQ_INSERT_TAIL(&srcpools, srcpool, entries); - } - srcpool_capacity = array->pool_capacity; - bzero(array, sizeof(*array)); - } else { - nitems = reinit_pools(array, nitems); - require_action(nitems == 0, finish, rval=KERN_FAILURE); - } - } - - array->itemsize = itemsize; - - /* If array->maxitems is zero, it means we are either rebuilding an array - * that was too small, or we're initializing an array for the first time. - * In either case, we need to set up a pool of the requested size, and - * if we're rebuilding an old array, we'll also copy the data from the old - * pools into the new pool. - */ - if (array->maxitems == 0) { - - rval = array_init(array, itemsize, nitems); - require_noerr(rval, finish); - - dstpool = STAILQ_FIRST(&array->pools); - require_action(dstpool, finish, rval=KERN_FAILURE); - - STAILQ_FOREACH_SAFE(srcpool, &srcpools, entries, tmp) { - memcpy(dstpool->buffer + offset, srcpool->buffer, srcpool_capacity); - offset += srcpool_capacity; - - STAILQ_REMOVE(&srcpools, srcpool, kxld_array_pool, entries); - pool_destroy(srcpool, srcpool_capacity); - } - - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDArrayPool *dstpool = NULL, *srcpool = NULL, *tmp = NULL; + KXLDArrayHead srcpools = STAILQ_HEAD_INITIALIZER(srcpools); + size_t srcpool_capacity = 0; + u_long offset = 0; + + check(array); + + if (!nitems) { + kxld_array_reset(array); + rval = KERN_SUCCESS; + goto finish; + } + + require_action(itemsize, finish, rval = KERN_INVALID_ARGUMENT); + + /* If the array has some pools, we need to see if there is enough space in + * those pools to accomodate the requested size array. If there isn't + * enough space, we save the existing pools to a temporary STAILQ and zero + * out the array structure. This will cause a new pool of sufficient size + * to be created, and we then copy the data from the old pools into the new + * pool. + */ + if (array->npools) { + /* Update the array's maxitems based on the new itemsize */ + array->pool_maxitems = (u_int) (array->pool_capacity / itemsize); + array->maxitems = 0; + STAILQ_FOREACH(srcpool, &array->pools, entries) { + array->maxitems += array->pool_maxitems; + } + + /* If there's not enough space, save the pools to a temporary STAILQ + * and zero out the array structure. Otherwise, rescan the pools to + * update their internal nitems counts. + */ + if (array->maxitems < nitems) { + STAILQ_FOREACH_SAFE(srcpool, &array->pools, entries, tmp) { + STAILQ_REMOVE(&array->pools, srcpool, kxld_array_pool, entries); + STAILQ_INSERT_TAIL(&srcpools, srcpool, entries); + } + srcpool_capacity = array->pool_capacity; + bzero(array, sizeof(*array)); + } else { + nitems = reinit_pools(array, nitems); + require_action(nitems == 0, finish, rval = KERN_FAILURE); + } + } + + array->itemsize = itemsize; + + /* If array->maxitems is zero, it means we are either rebuilding an array + * that was too small, or we're initializing an array for the first time. + * In either case, we need to set up a pool of the requested size, and + * if we're rebuilding an old array, we'll also copy the data from the old + * pools into the new pool. + */ + if (array->maxitems == 0) { + rval = array_init(array, itemsize, nitems); + require_noerr(rval, finish); + + dstpool = STAILQ_FIRST(&array->pools); + require_action(dstpool, finish, rval = KERN_FAILURE); + + STAILQ_FOREACH_SAFE(srcpool, &srcpools, entries, tmp) { + memcpy(dstpool->buffer + offset, srcpool->buffer, srcpool_capacity); + offset += srcpool_capacity; + + STAILQ_REMOVE(&srcpools, srcpool, kxld_array_pool, entries); + pool_destroy(srcpool, srcpool_capacity); + } + } + + rval = KERN_SUCCESS; finish: - if (rval) kxld_array_deinit(array); - return rval; + if (rval) { + kxld_array_deinit(array); + } + return rval; } /******************************************************************************* @@ -137,37 +137,37 @@ finish: static kern_return_t array_init(KXLDArray *array, size_t itemsize, u_int nitems) { - kern_return_t rval = KERN_FAILURE; - KXLDArrayPool *pool = NULL; - - require_action(itemsize, finish, rval=KERN_INVALID_ARGUMENT); - require_action(array->npools < 2, finish, rval=KERN_INVALID_ARGUMENT); - - array->itemsize = itemsize; - - pool = STAILQ_FIRST(&array->pools); - if (pool) { - require_action(itemsize * nitems < array->pool_capacity, - finish, rval=KERN_FAILURE); - require_action(array->npools == 1, finish, rval=KERN_FAILURE); - bzero(pool->buffer, array->pool_capacity); - } else { - array->pool_capacity = round_page(array->itemsize * nitems); - - pool = pool_create(array->pool_capacity); - require_action(pool, finish, rval=KERN_RESOURCE_SHORTAGE); - STAILQ_INSERT_HEAD(&array->pools, pool, entries); - } - pool->nitems = nitems; - - array->pool_maxitems = (u_int) (array->pool_capacity / array->itemsize); - array->maxitems = array->pool_maxitems; - array->nitems = nitems; - array->npools = 1; - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDArrayPool *pool = NULL; + + require_action(itemsize, finish, rval = KERN_INVALID_ARGUMENT); + require_action(array->npools < 2, finish, rval = KERN_INVALID_ARGUMENT); + + array->itemsize = itemsize; + + pool = STAILQ_FIRST(&array->pools); + if (pool) { + require_action(itemsize * nitems < array->pool_capacity, + finish, rval = KERN_FAILURE); + require_action(array->npools == 1, finish, rval = KERN_FAILURE); + bzero(pool->buffer, array->pool_capacity); + } else { + array->pool_capacity = round_page(array->itemsize * nitems); + + pool = pool_create(array->pool_capacity); + require_action(pool, finish, rval = KERN_RESOURCE_SHORTAGE); + STAILQ_INSERT_HEAD(&array->pools, pool, entries); + } + pool->nitems = nitems; + + array->pool_maxitems = (u_int) (array->pool_capacity / array->itemsize); + array->maxitems = array->pool_maxitems; + array->nitems = nitems; + array->npools = 1; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -175,21 +175,23 @@ finish: static KXLDArrayPool * pool_create(size_t capacity) { - KXLDArrayPool *pool = NULL, *rval = NULL; + KXLDArrayPool *pool = NULL, *rval = NULL; - pool = kxld_alloc(sizeof(*pool)); - require(pool, finish); + pool = kxld_alloc(sizeof(*pool)); + require(pool, finish); - pool->buffer = kxld_page_alloc(capacity); - require(pool->buffer, finish); - bzero(pool->buffer, capacity); + pool->buffer = kxld_page_alloc(capacity); + require(pool->buffer, finish); + bzero(pool->buffer, capacity); - rval = pool; - pool = NULL; + rval = pool; + pool = NULL; finish: - if (pool) pool_destroy(pool, capacity); - return rval; + if (pool) { + pool_destroy(pool, capacity); + } + return rval; } /******************************************************************************* @@ -197,10 +199,12 @@ finish: static void pool_destroy(KXLDArrayPool *pool, size_t capacity) { - if (pool) { - if (pool->buffer) kxld_page_free(pool->buffer, capacity); - kxld_free(pool, sizeof(*pool)); - } + if (pool) { + if (pool->buffer) { + kxld_page_free(pool->buffer, capacity); + } + kxld_free(pool, sizeof(*pool)); + } } /******************************************************************************* @@ -208,88 +212,88 @@ pool_destroy(KXLDArrayPool *pool, size_t capacity) kern_return_t kxld_array_copy(KXLDArray *dstarray, const KXLDArray *srcarray) { - kern_return_t rval = KERN_FAILURE; - KXLDArrayPool *dstpool = NULL, *srcpool = NULL; - u_long needed_capacity = 0; - u_long current_capacity = 0; - u_long copysize = 0; - u_long offset = 0; - - check(dstarray); - check(srcarray); - - /* When copying array, we only want to copy to an array with a single - * pool. If the array has more than one pool or the array is too small, - * we destroy the array and build it from scratch for the copy. - */ - needed_capacity = round_page(srcarray->nitems * srcarray->itemsize); - current_capacity = dstarray->npools * dstarray->pool_capacity; - if (dstarray->npools > 1 || needed_capacity > current_capacity) { - kxld_array_deinit(dstarray); - } - - rval = array_init(dstarray, srcarray->itemsize, srcarray->nitems); - require_noerr(rval, finish); - - dstpool = STAILQ_FIRST(&dstarray->pools); - require_action(dstpool, finish, rval=KERN_FAILURE); - - /* Copy the data from the source pools to the single destination pool. */ - STAILQ_FOREACH(srcpool, &srcarray->pools, entries) { - copysize = srcpool->nitems * srcarray->itemsize; - memcpy(dstpool->buffer + offset, srcpool->buffer, copysize); - offset += copysize; - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDArrayPool *dstpool = NULL, *srcpool = NULL; + u_long needed_capacity = 0; + u_long current_capacity = 0; + u_long copysize = 0; + u_long offset = 0; + + check(dstarray); + check(srcarray); + + /* When copying array, we only want to copy to an array with a single + * pool. If the array has more than one pool or the array is too small, + * we destroy the array and build it from scratch for the copy. + */ + needed_capacity = round_page(srcarray->nitems * srcarray->itemsize); + current_capacity = dstarray->npools * dstarray->pool_capacity; + if (dstarray->npools > 1 || needed_capacity > current_capacity) { + kxld_array_deinit(dstarray); + } + + rval = array_init(dstarray, srcarray->itemsize, srcarray->nitems); + require_noerr(rval, finish); + + dstpool = STAILQ_FIRST(&dstarray->pools); + require_action(dstpool, finish, rval = KERN_FAILURE); + + /* Copy the data from the source pools to the single destination pool. */ + STAILQ_FOREACH(srcpool, &srcarray->pools, entries) { + copysize = srcpool->nitems * srcarray->itemsize; + memcpy(dstpool->buffer + offset, srcpool->buffer, copysize); + offset += copysize; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -void +void kxld_array_reset(KXLDArray *array) { - KXLDArrayPool *pool = NULL; - - if (array) { - STAILQ_FOREACH(pool, &array->pools, entries) { - pool->nitems = 0; - } - array->nitems = 0; - } + KXLDArrayPool *pool = NULL; + + if (array) { + STAILQ_FOREACH(pool, &array->pools, entries) { + pool->nitems = 0; + } + array->nitems = 0; + } } /******************************************************************************* *******************************************************************************/ -void +void kxld_array_clear(KXLDArray *array) { - KXLDArrayPool *pool = NULL; - - if (array) { - kxld_array_reset(array); - STAILQ_FOREACH(pool, &array->pools, entries) { - bzero(pool->buffer, array->pool_capacity); - } - } + KXLDArrayPool *pool = NULL; + + if (array) { + kxld_array_reset(array); + STAILQ_FOREACH(pool, &array->pools, entries) { + bzero(pool->buffer, array->pool_capacity); + } + } } /******************************************************************************* *******************************************************************************/ -void +void kxld_array_deinit(KXLDArray *array) { - KXLDArrayPool *pool = NULL, *tmp = NULL; - - if (array) { - STAILQ_FOREACH_SAFE(pool, &array->pools, entries, tmp) { - STAILQ_REMOVE(&array->pools, pool, kxld_array_pool, entries); - pool_destroy(pool, array->pool_capacity); - } - bzero(array, sizeof(*array)); - } + KXLDArrayPool *pool = NULL, *tmp = NULL; + + if (array) { + STAILQ_FOREACH_SAFE(pool, &array->pools, entries, tmp) { + STAILQ_REMOVE(&array->pools, pool, kxld_array_pool, entries); + pool_destroy(pool, array->pool_capacity); + } + bzero(array, sizeof(*array)); + } } /******************************************************************************* @@ -297,24 +301,26 @@ kxld_array_deinit(KXLDArray *array) void * kxld_array_get_item(const KXLDArray *array, u_int idx) { - KXLDArrayPool *pool = NULL; - void *item = NULL; + KXLDArrayPool *pool = NULL; + void *item = NULL; + + check(array); - check(array); + if (idx >= array->nitems) { + goto finish; + } - if (idx >= array->nitems) goto finish; + STAILQ_FOREACH(pool, &array->pools, entries) { + if (idx < pool->nitems) { + item = (void *) (pool->buffer + (array->itemsize * idx)); + break; + } - STAILQ_FOREACH(pool, &array->pools, entries) { - if (idx < pool->nitems) { - item = (void *) (pool->buffer + (array->itemsize * idx)); - break; - } - - idx -= array->pool_maxitems; - } + idx -= array->pool_maxitems; + } finish: - return item; + return item; } /******************************************************************************* @@ -322,90 +328,92 @@ finish: void * kxld_array_get_slot(const KXLDArray *array, u_int idx) { - KXLDArrayPool *pool = NULL; - void *item = NULL; + KXLDArrayPool *pool = NULL; + void *item = NULL; - check(array); + check(array); - if (idx >= array->maxitems) goto finish; + if (idx >= array->maxitems) { + goto finish; + } - STAILQ_FOREACH(pool, &array->pools, entries) { - if (idx < array->pool_maxitems) { - item = (void *) (pool->buffer + (array->itemsize * idx)); - break; - } - - idx -= array->pool_maxitems; - } + STAILQ_FOREACH(pool, &array->pools, entries) { + if (idx < array->pool_maxitems) { + item = (void *) (pool->buffer + (array->itemsize * idx)); + break; + } + + idx -= array->pool_maxitems; + } finish: - return item; + return item; } /******************************************************************************* *******************************************************************************/ kern_return_t kxld_array_get_index(const KXLDArray *array, const void *item, u_int *_idx) -{ - kern_return_t rval = KERN_FAILURE; - KXLDArrayPool *pool = NULL; - u_long diff = 0; - u_int idx = 0; - u_int base_idx = 0; - const u_char *it; +{ + kern_return_t rval = KERN_FAILURE; + KXLDArrayPool *pool = NULL; + u_long diff = 0; + u_int idx = 0; + u_int base_idx = 0; + const u_char *it; - check(array); - check(item); - check(_idx); + check(array); + check(item); + check(_idx); - it = item; + it = item; - STAILQ_FOREACH(pool, &array->pools, entries) { - if (pool->buffer <= it && it < pool->buffer + array->pool_capacity) { - diff = it - pool->buffer; - idx = (u_int) (diff / array->itemsize); + STAILQ_FOREACH(pool, &array->pools, entries) { + if (pool->buffer <= it && it < pool->buffer + array->pool_capacity) { + diff = it - pool->buffer; + idx = (u_int) (diff / array->itemsize); - idx += base_idx; - *_idx = idx; + idx += base_idx; + *_idx = idx; - rval = KERN_SUCCESS; - goto finish; - } + rval = KERN_SUCCESS; + goto finish; + } - base_idx += array->pool_maxitems; - } + base_idx += array->pool_maxitems; + } - rval = KERN_FAILURE; + rval = KERN_FAILURE; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_array_resize(KXLDArray *array, u_int nitems) { - kern_return_t rval = KERN_FAILURE; - KXLDArrayPool *pool = NULL; + kern_return_t rval = KERN_FAILURE; + KXLDArrayPool *pool = NULL; - /* Grow the list of pools until we have enough to fit all of the entries */ + /* Grow the list of pools until we have enough to fit all of the entries */ - while (nitems > array->maxitems) { - pool = pool_create(array->pool_capacity); - require_action(pool, finish, rval=KERN_FAILURE); + while (nitems > array->maxitems) { + pool = pool_create(array->pool_capacity); + require_action(pool, finish, rval = KERN_FAILURE); - STAILQ_INSERT_TAIL(&array->pools, pool, entries); + STAILQ_INSERT_TAIL(&array->pools, pool, entries); - array->maxitems += array->pool_maxitems; - array->npools += 1; - } + array->maxitems += array->pool_maxitems; + array->npools += 1; + } - nitems = reinit_pools(array, nitems); - require_action(nitems == 0, finish, rval=KERN_FAILURE); + nitems = reinit_pools(array, nitems); + require_action(nitems == 0, finish, rval = KERN_FAILURE); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -416,24 +424,24 @@ finish: static u_int reinit_pools(KXLDArray *array, u_int nitems) { - KXLDArrayPool *pool = NULL; - u_int pool_nitems = 0; - - /* Set the number of items for each pool */ - - pool_nitems = nitems; - STAILQ_FOREACH(pool, &array->pools, entries) { - if (pool_nitems > array->pool_maxitems) { - pool->nitems = array->pool_maxitems; - pool_nitems -= array->pool_maxitems; - } else { - pool->nitems = pool_nitems; - pool_nitems = 0; - } - } - array->nitems = nitems; - - return pool_nitems; + KXLDArrayPool *pool = NULL; + u_int pool_nitems = 0; + + /* Set the number of items for each pool */ + + pool_nitems = nitems; + STAILQ_FOREACH(pool, &array->pools, entries) { + if (pool_nitems > array->pool_maxitems) { + pool->nitems = array->pool_maxitems; + pool_nitems -= array->pool_maxitems; + } else { + pool->nitems = pool_nitems; + pool_nitems = 0; + } + } + array->nitems = nitems; + + return pool_nitems; } /******************************************************************************* @@ -441,46 +449,45 @@ reinit_pools(KXLDArray *array, u_int nitems) kern_return_t kxld_array_remove(KXLDArray *array, u_int idx) { - kern_return_t rval = KERN_FAILURE; - KXLDArrayPool *pool = NULL; - u_char *dst = NULL; - u_char *src = NULL; - u_int nitems = 0; + kern_return_t rval = KERN_FAILURE; + KXLDArrayPool *pool = NULL; + u_char *dst = NULL; + u_char *src = NULL; + u_int nitems = 0; - check(array); + check(array); - if (idx >= array->nitems) { - rval = KERN_SUCCESS; - goto finish; - } + if (idx >= array->nitems) { + rval = KERN_SUCCESS; + goto finish; + } - /* We only support removing an item if all the items are contained in a - * single pool (for now). - */ - require_action(array->npools < 2 || array->nitems < array->pool_maxitems, - finish, rval=KERN_NOT_SUPPORTED); + /* We only support removing an item if all the items are contained in a + * single pool (for now). + */ + require_action(array->npools < 2 || array->nitems < array->pool_maxitems, + finish, rval = KERN_NOT_SUPPORTED); - pool = STAILQ_FIRST(&array->pools); - require_action(pool, finish, rval=KERN_FAILURE); + pool = STAILQ_FIRST(&array->pools); + require_action(pool, finish, rval = KERN_FAILURE); - dst = pool->buffer; - dst += idx * array->itemsize; + dst = pool->buffer; + dst += idx * array->itemsize; - src = pool->buffer; - src += ((idx + 1) * array->itemsize); + src = pool->buffer; + src += ((idx + 1) * array->itemsize); - nitems = pool->nitems - idx - 1; - memmove(dst, src, array->itemsize * nitems); + nitems = pool->nitems - idx - 1; + memmove(dst, src, array->itemsize * nitems); - --pool->nitems; - --array->nitems; - - dst = pool->buffer; - dst += pool->nitems * array->itemsize; - bzero(dst, array->itemsize); + --pool->nitems; + --array->nitems; - rval = KERN_SUCCESS; + dst = pool->buffer; + dst += pool->nitems * array->itemsize; + bzero(dst, array->itemsize); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } - diff --git a/libkern/kxld/kxld_array.h b/libkern/kxld/kxld_array.h index f73bc8607..ae01bdd16 100644 --- a/libkern/kxld/kxld_array.h +++ b/libkern/kxld/kxld_array.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_ARRAY_H_ @@ -32,7 +32,7 @@ #include #if KERNEL #include -#else +#else #include "kxld_types.h" #endif @@ -46,15 +46,15 @@ * one large array in the init function. * * A technique commonly used in kxld is to make an array of objects that -* themselves contain kxld_arrays. To minimize memory churn across links, only +* themselves contain kxld_arrays. To minimize memory churn across links, only * the individual objects contained in an array should be cleared at the end of -* each link, such that they are in a state ready for reinitialization with the -* memory they have already allocated. The array that contains them should not -* be cleared. After all links are complete, to ensure that all memory is -* properly freed, one should call kxld_array_get_slot to walk the entire -* allocated space of the array and clean up all potential instances contained -* therein. Since this technique is somewhat fragile, there are certain -* requirements that must be met, and guarantees that the array implementation +* each link, such that they are in a state ready for reinitialization with the +* memory they have already allocated. The array that contains them should not +* be cleared. After all links are complete, to ensure that all memory is +* properly freed, one should call kxld_array_get_slot to walk the entire +* allocated space of the array and clean up all potential instances contained +* therein. Since this technique is somewhat fragile, there are certain +* requirements that must be met, and guarantees that the array implementation * provides. * * Requirements: @@ -76,21 +76,21 @@ STAILQ_HEAD(kxld_array_head, kxld_array_pool); struct kxld_array { - struct kxld_array_head pools; - size_t itemsize; /* The size of the items that the array contains */ - size_t pool_capacity; /* The size of each pool's internal buffer */ - u_int pool_maxitems; /* The maximum number of items each pool can hold - * given the current size of each pool's buffer. - */ - u_int nitems; /* The current number of items this array contains */ - u_int maxitems; /* The maximum number of items this array can contain */ - u_int npools; /* The number of pools in the pool list */ + struct kxld_array_head pools; + size_t itemsize; /* The size of the items that the array contains */ + size_t pool_capacity; /* The size of each pool's internal buffer */ + u_int pool_maxitems; /* The maximum number of items each pool can hold + * given the current size of each pool's buffer. + */ + u_int nitems; /* The current number of items this array contains */ + u_int maxitems; /* The maximum number of items this array can contain */ + u_int npools; /* The number of pools in the pool list */ }; struct kxld_array_pool { - STAILQ_ENTRY(kxld_array_pool) entries; - u_char *buffer; /* The internal memory buffer */ - u_int nitems; /* The number of items the array contains */ + STAILQ_ENTRY(kxld_array_pool) entries; + u_char *buffer; /* The internal memory buffer */ + u_int nitems; /* The number of items the array contains */ }; typedef struct kxld_array KXLDArray; @@ -103,40 +103,40 @@ typedef struct kxld_array_pool KXLDArrayPool; /* Initializes the array's capacity to a minimum of nitems * itemsize */ kern_return_t kxld_array_init(KXLDArray *array, size_t itemsize, u_int nitems) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Performs a deep copy of the array */ kern_return_t kxld_array_copy(KXLDArray *array, const KXLDArray *src) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Sets the number of items in the array to 0 */ void kxld_array_reset(KXLDArray *array) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); /* Zeroes out the array and sets nitems to 0 */ void kxld_array_clear(KXLDArray *array) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); /* Frees the array's internal buffer */ void kxld_array_deinit(KXLDArray *array) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); /******************************************************************************* -* Accessors +* Accessors *******************************************************************************/ /* Returns the item at the specified index, or NULL if idx > nitems */ void *kxld_array_get_item(const KXLDArray *array, u_int idx) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /* Returns the item at the specified index, or NULL if idx > maxitems */ void *kxld_array_get_slot(const KXLDArray *array, u_int idx) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /* Returns the index of a specified item in the array */ -kern_return_t kxld_array_get_index(const KXLDArray *array, const void *item, +kern_return_t kxld_array_get_index(const KXLDArray *array, const void *item, u_int *idx) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Modifiers @@ -147,12 +147,12 @@ kern_return_t kxld_array_get_index(const KXLDArray *array, const void *item, * array. */ kern_return_t kxld_array_resize(KXLDArray *array, u_int nitems) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Removes an element from the array. This is only supported for arrays with * a single pool. */ kern_return_t kxld_array_remove(KXLDArray *array, u_int idx) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_ARRAY_H_ */ diff --git a/libkern/kxld/kxld_copyright.c b/libkern/kxld/kxld_copyright.c index e1f13c257..ca66c9ab9 100644 --- a/libkern/kxld/kxld_copyright.c +++ b/libkern/kxld/kxld_copyright.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,7 +35,7 @@ #include #include "kxld.h" #include "kxld_types.h" -#else +#else #include #include #include @@ -59,22 +59,22 @@ #include CFStringRef passes[] = { - CFSTR("Copyright © 2008 Apple Inc. All rights reserved."), - CFSTR("Copyright © 2004-2008 Apple Inc. All rights reserved."), - CFSTR("Copyright © 2004,2006 Apple Inc. All rights reserved."), - CFSTR("Copyright © 2004,2006-2008 Apple Inc. All rights reserved."), - CFSTR("Copyright © 2004 , 2006-2008 Apple Inc. All rights reserved."), - CFSTR("Copyright © 1998,2000-2002,2004,2006-2008 Apple Inc. All rights reserved."), - CFSTR("IOPCIFamily 2.1; Copyright © 2004,2006-2008 Apple Inc. All rights reserved."), - CFSTR("Copyright © 2004,2006-2008 Apple Inc. All rights reserved. The quick brown fox jumped over the lazy dog."), - CFSTR("IOPCIFamily 2.1; Copyright © 2004,2006-2008 Apple Inc. All rights reserved. The quick brown fox jumped over the lazy dog.") + CFSTR("Copyright © 2008 Apple Inc. All rights reserved."), + CFSTR("Copyright © 2004-2008 Apple Inc. All rights reserved."), + CFSTR("Copyright © 2004,2006 Apple Inc. All rights reserved."), + CFSTR("Copyright © 2004,2006-2008 Apple Inc. All rights reserved."), + CFSTR("Copyright © 2004 , 2006-2008 Apple Inc. All rights reserved."), + CFSTR("Copyright © 1998,2000-2002,2004,2006-2008 Apple Inc. All rights reserved."), + CFSTR("IOPCIFamily 2.1; Copyright © 2004,2006-2008 Apple Inc. All rights reserved."), + CFSTR("Copyright © 2004,2006-2008 Apple Inc. All rights reserved. The quick brown fox jumped over the lazy dog."), + CFSTR("IOPCIFamily 2.1; Copyright © 2004,2006-2008 Apple Inc. All rights reserved. The quick brown fox jumped over the lazy dog.") }; CFStringRef fails[] = { - CFSTR("Copyright © 2007-08 Apple Inc. All rights reserved."), - CFSTR("Copyright (c) 2007 Apple Inc. All rights reserved."), - CFSTR("Copyright © 2007- Apple Inc. All rights reserved."), - CFSTR("Copyright © 2007 - 2008 Apple Inc. All rights reserved.") + CFSTR("Copyright © 2007-08 Apple Inc. All rights reserved."), + CFSTR("Copyright (c) 2007 Apple Inc. All rights reserved."), + CFSTR("Copyright © 2007- Apple Inc. All rights reserved."), + CFSTR("Copyright © 2007 - 2008 Apple Inc. All rights reserved.") }; extern char *createUTF8CStringForCFString(CFStringRef aString); @@ -85,35 +85,35 @@ extern char *createUTF8CStringForCFString(CFStringRef aString); * Prototypes ******************************************************************************/ -static boolean_t is_space(const char c) - __attribute__((const)); +static boolean_t is_space(const char c) +__attribute__((const)); static boolean_t is_token_delimiter(const char c) - __attribute__((const)); -static boolean_t is_token_break(const char *str) - __attribute__((pure, nonnull)); +__attribute__((const)); +static boolean_t is_token_break(const char *str) +__attribute__((pure, nonnull)); static boolean_t token_is_year(const char *str) - __attribute__((pure, nonnull)); +__attribute__((pure, nonnull)); static boolean_t token_is_yearRange(const char *str) - __attribute__((pure, nonnull)); +__attribute__((pure, nonnull)); static boolean_t dates_are_valid(const char *str, const u_long len) - __attribute__((pure, nonnull)); +__attribute__((pure, nonnull)); /****************************************************************************** ******************************************************************************/ static boolean_t is_space(const char c) { - switch (c) { - case ' ': - case '\t': - case '\n': - case '\v': - case '\f': - case '\r': - return TRUE; - } - - return FALSE; + switch (c) { + case ' ': + case '\t': + case '\n': + case '\v': + case '\f': + case '\r': + return TRUE; + } + + return FALSE; } /****************************************************************************** @@ -121,7 +121,7 @@ is_space(const char c) static boolean_t is_token_delimiter(const char c) { - return (is_space(c) || (',' == c) || ('\0' == c)); + return is_space(c) || (',' == c) || ('\0' == c); } /****************************************************************************** @@ -129,12 +129,12 @@ is_token_delimiter(const char c) * not a token delimiter and the next character is a token delimiter. ******************************************************************************/ static boolean_t -is_token_break(const char *str) +is_token_break(const char *str) { - /* This is safe because '\0' is a token delimiter, so the second check - * will not execute if we reach the end of the string. - */ - return (!is_token_delimiter(str[0]) && is_token_delimiter(str[1])); + /* This is safe because '\0' is a token delimiter, so the second check + * will not execute if we reach the end of the string. + */ + return !is_token_delimiter(str[0]) && is_token_delimiter(str[1]); } /****************************************************************************** @@ -145,18 +145,22 @@ is_token_break(const char *str) static boolean_t token_is_year(const char *str) { - boolean_t result = FALSE; - u_int i = 0; + boolean_t result = FALSE; + u_int i = 0; + + for (i = 0; i < kYearLen - 1; ++i) { + if (str[i] < '0' || str[i] > '9') { + goto finish; + } + } - for (i = 0; i < kYearLen - 1; ++i) { - if (str[i] < '0' || str[i] > '9') goto finish; - } + if (str[i] != '\0') { + goto finish; + } - if (str[i] != '\0') goto finish; - - result = TRUE; + result = TRUE; finish: - return result; + return result; } /****************************************************************************** @@ -167,24 +171,32 @@ finish: static boolean_t token_is_yearRange(const char *str) { - boolean_t result = FALSE; - u_int i = 0; - - for (i = 0; i < kYearLen - 1; ++i) { - if (str[i] < '0' || str[i] > '9') goto finish; - } - - if (str[i] != '-') goto finish; - - for (i = kYearLen; i < kYearRangeLen - 1; ++i) { - if (str[i] < '0' || str[i] > '9') goto finish; - } - - if (str[i] != '\0') goto finish; - - result = TRUE; + boolean_t result = FALSE; + u_int i = 0; + + for (i = 0; i < kYearLen - 1; ++i) { + if (str[i] < '0' || str[i] > '9') { + goto finish; + } + } + + if (str[i] != '-') { + goto finish; + } + + for (i = kYearLen; i < kYearRangeLen - 1; ++i) { + if (str[i] < '0' || str[i] > '9') { + goto finish; + } + } + + if (str[i] != '\0') { + goto finish; + } + + result = TRUE; finish: - return result; + return result; } /****************************************************************************** @@ -195,40 +207,45 @@ finish: static boolean_t dates_are_valid(const char *str, const u_long len) { - boolean_t result = FALSE; - const char *token_ptr = NULL; - char token_buffer[kYearRangeLen]; - u_int token_index = 0; - - token_index = 0; - for (token_ptr = str; token_ptr < str + len; ++token_ptr) { - if (is_token_delimiter(*token_ptr) && !token_index) continue; - - /* If we exceed the length of a year range, the test will not succeed, - * so just fail now. This limits the length of the token buffer that - * we have to keep around. - */ - if (token_index == kYearRangeLen) goto finish; - - token_buffer[token_index++] = *token_ptr; - if (is_token_break(token_ptr)) { - if (!token_index) continue; - - token_buffer[token_index] = '\0'; - - if (!token_is_year(token_buffer) && - !token_is_yearRange(token_buffer)) - { - goto finish; - } - - token_index = 0; - } - } - - result = TRUE; + boolean_t result = FALSE; + const char *token_ptr = NULL; + char token_buffer[kYearRangeLen]; + u_int token_index = 0; + + token_index = 0; + for (token_ptr = str; token_ptr < str + len; ++token_ptr) { + if (is_token_delimiter(*token_ptr) && !token_index) { + continue; + } + + /* If we exceed the length of a year range, the test will not succeed, + * so just fail now. This limits the length of the token buffer that + * we have to keep around. + */ + if (token_index == kYearRangeLen) { + goto finish; + } + + token_buffer[token_index++] = *token_ptr; + if (is_token_break(token_ptr)) { + if (!token_index) { + continue; + } + + token_buffer[token_index] = '\0'; + + if (!token_is_year(token_buffer) && + !token_is_yearRange(token_buffer)) { + goto finish; + } + + token_index = 0; + } + } + + result = TRUE; finish: - return result; + return result; } /****************************************************************************** @@ -237,39 +254,47 @@ finish: * 2) One or more years or year ranges, e.g., "2004,2006-2008" * 3) A rights reserved notice, "Apple Inc. All Rights Reserved." * We check the validity of the string by searching for both the copyright - +* * notice and the rights reserved notice. If both are found, we then check that * the text between the two notices contains only valid years and year ranges. ******************************************************************************/ -boolean_t +boolean_t kxld_validate_copyright_string(const char *str) { - boolean_t result = FALSE; - const char *copyright = NULL; - const char *rights = NULL; - char *date_str = NULL; - u_long len = 0; + boolean_t result = FALSE; + const char *copyright = NULL; + const char *rights = NULL; + char *date_str = NULL; + u_long len = 0; - copyright = kxld_strstr(str, kCopyrightToken); - rights = kxld_strstr(str, kRightsToken); + copyright = kxld_strstr(str, kCopyrightToken); + rights = kxld_strstr(str, kRightsToken); - if (!copyright || !rights || copyright > rights) goto finish; + if (!copyright || !rights || copyright > rights) { + goto finish; + } - str = copyright + const_strlen(kCopyrightToken); + str = copyright + const_strlen(kCopyrightToken); - len = rights - str; - date_str = kxld_alloc(len+1); - if (!date_str) goto finish; + len = rights - str; + date_str = kxld_alloc(len + 1); + if (!date_str) { + goto finish; + } - strncpy(date_str, str, len); - date_str[len] = '\0'; + strncpy(date_str, str, len); + date_str[len] = '\0'; - if (!dates_are_valid(date_str, len)) goto finish; + if (!dates_are_valid(date_str, len)) { + goto finish; + } - result = TRUE; + result = TRUE; finish: - if (date_str) kxld_free(date_str, len+1); - return result; + if (date_str) { + kxld_free(date_str, len + 1); + } + return result; } #if TEST @@ -279,39 +304,42 @@ finish: int main(int argc __unused, char *argv[] __unused) { - int result = 1; - CFStringRef the_string = NULL; - const char *str = NULL; - u_int i = 0; + int result = 1; + CFStringRef the_string = NULL; + const char *str = NULL; + u_int i = 0; - printf("The following %lu strings should pass\n", - const_array_len(passes)); + printf("The following %lu strings should pass\n", + const_array_len(passes)); - for (i = 0; i < const_array_len(passes); ++i) { - the_string = passes[i]; - str = createUTF8CStringForCFString(the_string); - if (!str) goto finish; + for (i = 0; i < const_array_len(passes); ++i) { + the_string = passes[i]; + str = createUTF8CStringForCFString(the_string); + if (!str) { + goto finish; + } - printf("%s: %s\n", - (kxld_validate_copyright_string(str)) ? "pass" : "fail", str); - } + printf("%s: %s\n", + (kxld_validate_copyright_string(str)) ? "pass" : "fail", str); + } - printf("\nThe following %lu strings should fail\n", - const_array_len(fails)); + printf("\nThe following %lu strings should fail\n", + const_array_len(fails)); - for (i = 0; i < const_array_len(fails); ++i) { - the_string = fails[i]; - str = createUTF8CStringForCFString(the_string); - if (!str) goto finish; + for (i = 0; i < const_array_len(fails); ++i) { + the_string = fails[i]; + str = createUTF8CStringForCFString(the_string); + if (!str) { + goto finish; + } - printf("%s: %s\n", - (kxld_validate_copyright_string(str)) ? "pass" : "fail", str); - } + printf("%s: %s\n", + (kxld_validate_copyright_string(str)) ? "pass" : "fail", str); + } - result = 0; + result = 0; finish: - return result; + return result; } #endif /* TEST */ - diff --git a/libkern/kxld/kxld_demangle.c b/libkern/kxld/kxld_demangle.c index 3b0dffe5e..67b7bc697 100644 --- a/libkern/kxld/kxld_demangle.c +++ b/libkern/kxld/kxld_demangle.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,9 +33,9 @@ /* This demangler is part of the C++ ABI. We don't include it directly from * so that we can avoid using C++ in the kernel linker. */ -extern char * +extern char * __cxa_demangle(const char* __mangled_name, char* __output_buffer, - size_t* __length, int* __status); + size_t* __length, int* __status); #endif /* !KERNEL */ @@ -47,26 +47,29 @@ const char * kxld_demangle(const char *str, char **buffer __unused, size_t *length __unused) { #if KERNEL - return str; + return str; #else - const char *rval = NULL; - char *demangled = NULL; - int status; + const char *rval = NULL; + char *demangled = NULL; + int status; + + rval = str; - rval = str; + if (!buffer || !length) { + goto finish; + } - if (!buffer || !length) goto finish; + /* Symbol names in the symbol table have an extra '_' prepended to them, + * so we skip the first character to make the demangler happy. + */ + demangled = __cxa_demangle(str + 1, *buffer, length, &status); + if (!demangled || status) { + goto finish; + } - /* Symbol names in the symbol table have an extra '_' prepended to them, - * so we skip the first character to make the demangler happy. - */ - demangled = __cxa_demangle(str+1, *buffer, length, &status); - if (!demangled || status) goto finish; - - *buffer = demangled; - rval = demangled; + *buffer = demangled; + rval = demangled; finish: - return rval; + return rval; #endif } - diff --git a/libkern/kxld/kxld_demangle.h b/libkern/kxld/kxld_demangle.h index a5250ab49..116b0ffbb 100644 --- a/libkern/kxld/kxld_demangle.h +++ b/libkern/kxld/kxld_demangle.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,21 +32,21 @@ #include /* @function kxld_demangle - - * @abstract Demangles c++ symbols. - * + * + * @abstract Demangles c++ symbols. + * * @param str The C-string to be demangled. * @param buffer A pointer to a character buffer for storing the result. * If NULL, a buffer will be malloc'd and stored here. * If the buffer is not large enough, it will be realloc'd. * * @param length The length of the buffer. - * + * * @result If the input string could be demangled, it returns the * demangled string. Otherwise, returns the input string. - * + * */ const char * kxld_demangle(const char *str, char **buffer, size_t *length) - __attribute__((pure, nonnull(1), visibility("hidden"))); +__attribute__((pure, nonnull(1), visibility("hidden"))); #endif /* !_KXLD_DEMANGLE_H_ */ diff --git a/libkern/kxld/kxld_dict.c b/libkern/kxld/kxld_dict.c index 95588a523..9b5bcb8f3 100644 --- a/libkern/kxld/kxld_dict.c +++ b/libkern/kxld/kxld_dict.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -42,7 +42,7 @@ #define RESIZE_NUMER 7 #define RESIZE_DENOM 10 #define RESIZE_THRESHOLD(x) (((x)*RESIZE_NUMER) / RESIZE_DENOM) -#define MIN_BUCKETS(x) (((x)*RESIZE_DENOM) / RESIZE_NUMER) +#define MIN_BUCKETS(x) (((x)*RESIZE_DENOM) / RESIZE_NUMER) /* Selected for good scaling qualities when resizing dictionary * ... see: http://www.concentric.net/~ttwang/tech/hashsize.htm @@ -52,63 +52,63 @@ typedef struct dict_entry DictEntry; typedef enum { - EMPTY = 0, - USED = 1, - DELETED = 2 + EMPTY = 0, + USED = 1, + DELETED = 2 } DictEntryState; struct dict_entry { - const void *key; - void *value; - DictEntryState state; + const void *key; + void *value; + DictEntryState state; }; /******************************************************************************* * Function prototypes *******************************************************************************/ -static kern_return_t get_locate_index(const KXLDDict *dict, const void *key, +static kern_return_t get_locate_index(const KXLDDict *dict, const void *key, u_int *idx); -static kern_return_t get_insert_index(const KXLDDict *dict, const void *key, +static kern_return_t get_insert_index(const KXLDDict *dict, const void *key, u_int *idx); static kern_return_t resize_dict(KXLDDict *dict); /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_dict_init(KXLDDict * dict, kxld_dict_hash hash, kxld_dict_cmp cmp, - u_int num_entries) +kxld_dict_init(KXLDDict * dict, kxld_dict_hash hash, kxld_dict_cmp cmp, + u_int num_entries) { - kern_return_t rval = KERN_FAILURE; - u_int min_buckets = MIN_BUCKETS(num_entries); - u_int num_buckets = DEFAULT_DICT_SIZE; - - check(dict); - check(hash); - check(cmp); - - /* We want the number of allocated buckets to be at least twice that of the - * number to be inserted. - */ - while (min_buckets > num_buckets) { - num_buckets *= 2; - num_buckets++; - } - - /* Allocate enough buckets for the anticipated number of entries */ - rval = kxld_array_init(&dict->buckets, sizeof(DictEntry), num_buckets); - require_noerr(rval, finish); - - /* Initialize */ - dict->hash = hash; - dict->cmp = cmp; - dict->num_entries = 0; - dict->resize_threshold = RESIZE_THRESHOLD(num_buckets); - - rval = KERN_SUCCESS; - + kern_return_t rval = KERN_FAILURE; + u_int min_buckets = MIN_BUCKETS(num_entries); + u_int num_buckets = DEFAULT_DICT_SIZE; + + check(dict); + check(hash); + check(cmp); + + /* We want the number of allocated buckets to be at least twice that of the + * number to be inserted. + */ + while (min_buckets > num_buckets) { + num_buckets *= 2; + num_buckets++; + } + + /* Allocate enough buckets for the anticipated number of entries */ + rval = kxld_array_init(&dict->buckets, sizeof(DictEntry), num_buckets); + require_noerr(rval, finish); + + /* Initialize */ + dict->hash = hash; + dict->cmp = cmp; + dict->num_entries = 0; + dict->resize_threshold = RESIZE_THRESHOLD(num_buckets); + + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -116,14 +116,14 @@ finish: void kxld_dict_clear(KXLDDict *dict) { - check(dict); - - dict->hash = NULL; - dict->cmp = NULL; - dict->num_entries = 0; - dict->resize_threshold = 0; - kxld_array_clear(&dict->buckets); - kxld_array_clear(&dict->resize_buckets); + check(dict); + + dict->hash = NULL; + dict->cmp = NULL; + dict->num_entries = 0; + dict->resize_threshold = 0; + kxld_array_clear(&dict->buckets); + kxld_array_clear(&dict->resize_buckets); } /******************************************************************************* @@ -131,11 +131,11 @@ kxld_dict_clear(KXLDDict *dict) void kxld_dict_iterator_init(KXLDDictIterator *iter, const KXLDDict *dict) { - check(iter); - check(dict); + check(iter); + check(dict); - iter->idx = 0; - iter->dict = dict; + iter->idx = 0; + iter->dict = dict; } /******************************************************************************* @@ -143,10 +143,10 @@ kxld_dict_iterator_init(KXLDDictIterator *iter, const KXLDDict *dict) void kxld_dict_deinit(KXLDDict *dict) { - check(dict); - - kxld_array_deinit(&dict->buckets); - kxld_array_deinit(&dict->resize_buckets); + check(dict); + + kxld_array_deinit(&dict->buckets); + kxld_array_deinit(&dict->resize_buckets); } /******************************************************************************* @@ -154,9 +154,9 @@ kxld_dict_deinit(KXLDDict *dict) u_int kxld_dict_get_num_entries(const KXLDDict *dict) { - check(dict); + check(dict); - return dict->num_entries; + return dict->num_entries; } /******************************************************************************* @@ -164,56 +164,62 @@ kxld_dict_get_num_entries(const KXLDDict *dict) void * kxld_dict_find(const KXLDDict *dict, const void *key) { - kern_return_t rval = KERN_FAILURE; - DictEntry *entry = NULL; - u_int idx = 0; - - check(dict); - check(key); - - rval = get_locate_index(dict, key, &idx); - if (rval) return NULL; - - entry = kxld_array_get_item(&dict->buckets, idx); - - return entry->value; + kern_return_t rval = KERN_FAILURE; + DictEntry *entry = NULL; + u_int idx = 0; + + check(dict); + check(key); + + rval = get_locate_index(dict, key, &idx); + if (rval) { + return NULL; + } + + entry = kxld_array_get_item(&dict->buckets, idx); + + return entry->value; } /******************************************************************************* -* This dictionary uses linear probing, which means that when there is a -* collision, we just walk along the buckets until a free bucket shows up. -* A consequence of this is that when looking up an item, items that lie between -* its hash value and its actual bucket may have been deleted since it was -* inserted. Thus, we should only stop a lookup when we've wrapped around the -* dictionary or encountered an EMPTY bucket. -********************************************************************************/ + * This dictionary uses linear probing, which means that when there is a + * collision, we just walk along the buckets until a free bucket shows up. + * A consequence of this is that when looking up an item, items that lie between + * its hash value and its actual bucket may have been deleted since it was + * inserted. Thus, we should only stop a lookup when we've wrapped around the + * dictionary or encountered an EMPTY bucket. + ********************************************************************************/ static kern_return_t get_locate_index(const KXLDDict *dict, const void *key, u_int *_idx) { - kern_return_t rval = KERN_FAILURE; - DictEntry *entry = NULL; - u_int base, idx; + kern_return_t rval = KERN_FAILURE; + DictEntry *entry = NULL; + u_int base, idx; - base = idx = dict->hash(dict, key); - - /* Iterate until we match the key, wrap, or hit an empty bucket */ - entry = kxld_array_get_item(&dict->buckets, idx); - while (!dict->cmp(entry->key, key)) { - if (entry->state == EMPTY) goto finish; + base = idx = dict->hash(dict, key); - idx = (idx + 1) % dict->buckets.nitems; - if (idx == base) goto finish; + /* Iterate until we match the key, wrap, or hit an empty bucket */ + entry = kxld_array_get_item(&dict->buckets, idx); + while (!dict->cmp(entry->key, key)) { + if (entry->state == EMPTY) { + goto finish; + } - entry = kxld_array_get_item(&dict->buckets, idx); - } + idx = (idx + 1) % dict->buckets.nitems; + if (idx == base) { + goto finish; + } - check(idx < dict->buckets.nitems); + entry = kxld_array_get_item(&dict->buckets, idx); + } - *_idx = idx; - rval = KERN_SUCCESS; + check(idx < dict->buckets.nitems); + + *_idx = idx; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -221,44 +227,44 @@ finish: kern_return_t kxld_dict_insert(KXLDDict *dict, const void *key, void *value) { - kern_return_t rval = KERN_FAILURE; - DictEntry *entry = NULL; - u_int idx = 0; - - check(dict); - check(key); - check(value); - - /* Resize if we are greater than the capacity threshold. - * Note: this is expensive, but the dictionary can be sized correctly at - * construction to avoid ever having to do this. - */ - while (dict->num_entries > dict->resize_threshold) { - rval = resize_dict(dict); - require_noerr(rval, finish); - } - - /* If this function returns FULL after we've already resized appropriately - * something is very wrong and we should return an error. - */ - rval = get_insert_index(dict, key, &idx); - require_noerr(rval, finish); - - /* Insert the new key-value pair into the bucket, but only count it as a - * new entry if we are not overwriting an existing entry. - */ - entry = kxld_array_get_item(&dict->buckets, idx); - if (entry->state != USED) { - dict->num_entries++; - entry->key = key; - entry->state = USED; - } - entry->value = value; - - rval = KERN_SUCCESS; - + kern_return_t rval = KERN_FAILURE; + DictEntry *entry = NULL; + u_int idx = 0; + + check(dict); + check(key); + check(value); + + /* Resize if we are greater than the capacity threshold. + * Note: this is expensive, but the dictionary can be sized correctly at + * construction to avoid ever having to do this. + */ + while (dict->num_entries > dict->resize_threshold) { + rval = resize_dict(dict); + require_noerr(rval, finish); + } + + /* If this function returns FULL after we've already resized appropriately + * something is very wrong and we should return an error. + */ + rval = get_insert_index(dict, key, &idx); + require_noerr(rval, finish); + + /* Insert the new key-value pair into the bucket, but only count it as a + * new entry if we are not overwriting an existing entry. + */ + entry = kxld_array_get_item(&dict->buckets, idx); + if (entry->state != USED) { + dict->num_entries++; + entry->key = key; + entry->state = USED; + } + entry->value = value; + + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -268,43 +274,43 @@ finish: static kern_return_t resize_dict(KXLDDict *dict) { - kern_return_t rval = KERN_FAILURE; - KXLDArray tmparray; - DictEntry *entry = NULL; - u_int nbuckets = (dict->buckets.nitems * 2 + 1); - u_int i = 0; - - check(dict); - - /* Initialize a new set of buckets to hold more entries */ - rval = kxld_array_init(&dict->resize_buckets, sizeof(DictEntry), nbuckets); - require_noerr(rval, finish); - - /* Swap the new buckets with the old buckets */ - tmparray = dict->buckets; - dict->buckets = dict->resize_buckets; - dict->resize_buckets = tmparray; - - /* Reset dictionary parameters */ - dict->num_entries = 0; - dict->resize_threshold = RESIZE_THRESHOLD(dict->buckets.nitems); - - /* Rehash all of the entries */ - for (i = 0; i < dict->resize_buckets.nitems; ++i) { - entry = kxld_array_get_item(&dict->resize_buckets, i); - if (entry->state == USED) { - rval = kxld_dict_insert(dict, entry->key, entry->value); - require_noerr(rval, finish); - } - } - - /* Clear the old buckets */ - kxld_array_clear(&dict->resize_buckets); - - rval = KERN_SUCCESS; - + kern_return_t rval = KERN_FAILURE; + KXLDArray tmparray; + DictEntry *entry = NULL; + u_int nbuckets = (dict->buckets.nitems * 2 + 1); + u_int i = 0; + + check(dict); + + /* Initialize a new set of buckets to hold more entries */ + rval = kxld_array_init(&dict->resize_buckets, sizeof(DictEntry), nbuckets); + require_noerr(rval, finish); + + /* Swap the new buckets with the old buckets */ + tmparray = dict->buckets; + dict->buckets = dict->resize_buckets; + dict->resize_buckets = tmparray; + + /* Reset dictionary parameters */ + dict->num_entries = 0; + dict->resize_threshold = RESIZE_THRESHOLD(dict->buckets.nitems); + + /* Rehash all of the entries */ + for (i = 0; i < dict->resize_buckets.nitems; ++i) { + entry = kxld_array_get_item(&dict->resize_buckets, i); + if (entry->state == USED) { + rval = kxld_dict_insert(dict, entry->key, entry->value); + require_noerr(rval, finish); + } + } + + /* Clear the old buckets */ + kxld_array_clear(&dict->resize_buckets); + + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -313,27 +319,27 @@ finish: static kern_return_t get_insert_index(const KXLDDict *dict, const void *key, u_int *r_index) { - kern_return_t rval = KERN_FAILURE; - DictEntry *entry = NULL; - u_int base, idx; - - base = idx = dict->hash(dict, key); - - /* Iterate through the buckets until we find an EMPTY bucket, a DELETED - * bucket, or a key match. - */ - entry = kxld_array_get_item(&dict->buckets, idx); - while (entry->state == USED && !dict->cmp(entry->key, key)) { - idx = (idx + 1) % dict->buckets.nitems; - require_action(base != idx, finish, rval=KERN_FAILURE); - entry = kxld_array_get_item(&dict->buckets, idx); - } - - *r_index = idx; - rval = KERN_SUCCESS; - + kern_return_t rval = KERN_FAILURE; + DictEntry *entry = NULL; + u_int base, idx; + + base = idx = dict->hash(dict, key); + + /* Iterate through the buckets until we find an EMPTY bucket, a DELETED + * bucket, or a key match. + */ + entry = kxld_array_get_item(&dict->buckets, idx); + while (entry->state == USED && !dict->cmp(entry->key, key)) { + idx = (idx + 1) % dict->buckets.nitems; + require_action(base != idx, finish, rval = KERN_FAILURE); + entry = kxld_array_get_item(&dict->buckets, idx); + } + + *r_index = idx; + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -341,65 +347,69 @@ finish: void kxld_dict_remove(KXLDDict *dict, const void *key, void **value) { - kern_return_t rval = KERN_FAILURE; - DictEntry *entry = NULL; - u_int idx = 0; - - check(dict); - check(key); - - /* Find the item */ - rval = get_locate_index(dict, key, &idx); - if (rval) { - if (value) *value = NULL; - return; - } - - entry = kxld_array_get_item(&dict->buckets, idx); - - /* Save the value if requested */ - if (value) *value = entry->value; - - /* Delete the item from the dictionary */ - entry->key = NULL; - entry->value = NULL; - entry->state = DELETED; - dict->num_entries--; + kern_return_t rval = KERN_FAILURE; + DictEntry *entry = NULL; + u_int idx = 0; + + check(dict); + check(key); + + /* Find the item */ + rval = get_locate_index(dict, key, &idx); + if (rval) { + if (value) { + *value = NULL; + } + return; + } + + entry = kxld_array_get_item(&dict->buckets, idx); + + /* Save the value if requested */ + if (value) { + *value = entry->value; + } + + /* Delete the item from the dictionary */ + entry->key = NULL; + entry->value = NULL; + entry->state = DELETED; + dict->num_entries--; } /******************************************************************************* *******************************************************************************/ -void -kxld_dict_iterator_get_next(KXLDDictIterator *iter, const void **key, +void +kxld_dict_iterator_get_next(KXLDDictIterator *iter, const void **key, void **value) { - DictEntry *entry = NULL; - - check(iter); - check(key); - check(value); - - *key = NULL; - *value = NULL; - - /* Walk over the dictionary looking for USED buckets */ - for (; iter->idx < iter->dict->buckets.nitems; ++(iter->idx)) { - entry = kxld_array_get_item(&iter->dict->buckets, iter->idx); - if (entry->state == USED) { - *key = entry->key; - *value = entry->value; - ++(iter->idx); - break; - } - } + DictEntry *entry = NULL; + + check(iter); + check(key); + check(value); + + *key = NULL; + *value = NULL; + + /* Walk over the dictionary looking for USED buckets */ + for (; iter->idx < iter->dict->buckets.nitems; ++(iter->idx)) { + entry = kxld_array_get_item(&iter->dict->buckets, iter->idx); + if (entry->state == USED) { + *key = entry->key; + *value = entry->value; + ++(iter->idx); + break; + } + } } /******************************************************************************* *******************************************************************************/ -void +void kxld_dict_iterator_reset(KXLDDictIterator *iter) { - iter->idx = 0; + iter->idx = 0; } /******************************************************************************* @@ -408,64 +418,63 @@ kxld_dict_iterator_reset(KXLDDictIterator *iter) * NOTE: Will not check for a valid pointer - performance *******************************************************************************/ u_int -kxld_dict_string_hash(const KXLDDict *dict, const void *_key) +kxld_dict_string_hash(const KXLDDict *dict, const void *_key) { - const char *key = _key; - u_int c = 0; - u_int hash_val = 5381; - - check(dict); - check(_key); - - while ((c = *key++)) { - /* hash(i) = hash(i-1) *33 ^ name[i] */ - hash_val = ((hash_val << 5) + hash_val) ^ c; - } - - return (hash_val % dict->buckets.nitems); + const char *key = _key; + u_int c = 0; + u_int hash_val = 5381; + + check(dict); + check(_key); + + while ((c = *key++)) { + /* hash(i) = hash(i-1) *33 ^ name[i] */ + hash_val = ((hash_val << 5) + hash_val) ^ c; + } + + return hash_val % dict->buckets.nitems; } u_int kxld_dict_uint32_hash(const KXLDDict *dict, const void *_key) { - uint32_t key = *(const uint32_t *) _key; + uint32_t key = *(const uint32_t *) _key; - check(_key); + check(_key); - return (u_int) (key % dict->buckets.nitems); + return (u_int) (key % dict->buckets.nitems); } u_int kxld_dict_kxldaddr_hash(const KXLDDict *dict, const void *_key) { - kxld_addr_t key = *(const kxld_addr_t *) _key; + kxld_addr_t key = *(const kxld_addr_t *) _key; - check(_key); + check(_key); - return (u_int) (key % dict->buckets.nitems); + return (u_int) (key % dict->buckets.nitems); } u_int kxld_dict_string_cmp(const void *key1, const void *key2) { - return streq(key1, key2); + return streq(key1, key2); } u_int kxld_dict_uint32_cmp(const void *key1, const void *key2) { - const uint32_t *a = key1; - const uint32_t *b = key2; + const uint32_t *a = key1; + const uint32_t *b = key2; - return (a && b && (*a == *b)); + return a && b && (*a == *b); } u_int kxld_dict_kxldaddr_cmp(const void *key1, const void *key2) { - const kxld_addr_t *a = key1; - const kxld_addr_t *b = key2; + const kxld_addr_t *a = key1; + const kxld_addr_t *b = key2; - return (a && b && (*a == *b)); + return a && b && (*a == *b); } - diff --git a/libkern/kxld/kxld_dict.h b/libkern/kxld/kxld_dict.h index 739adfe6a..b332d0f32 100644 --- a/libkern/kxld/kxld_dict.h +++ b/libkern/kxld/kxld_dict.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_DICT_H_ @@ -58,17 +58,17 @@ typedef u_int (*kxld_dict_hash)(const KXLDDict *dict, const void *key); typedef u_int (*kxld_dict_cmp)(const void *key1, const void *key2); struct kxld_dict { - KXLDArray buckets; // The array of buckets - KXLDArray resize_buckets; // A helper array for resizing - kxld_dict_hash hash; // Hash function - kxld_dict_cmp cmp; // Comparison function - u_int num_entries; // Num entries in the dictionary - u_int resize_threshold; // Num entries we must reach to cause a resize + KXLDArray buckets; // The array of buckets + KXLDArray resize_buckets; // A helper array for resizing + kxld_dict_hash hash; // Hash function + kxld_dict_cmp cmp; // Comparison function + u_int num_entries; // Num entries in the dictionary + u_int resize_threshold; // Num entries we must reach to cause a resize }; struct kxld_dict_iterator { - u_int idx; - const KXLDDict *dict; + u_int idx; + const KXLDDict *dict; }; /******************************************************************************* @@ -78,37 +78,37 @@ struct kxld_dict_iterator { /* Initializes a new dictionary object. * num_entries is a hint to the maximum number of entries that will be inserted */ -kern_return_t kxld_dict_init(KXLDDict *dict, kxld_dict_hash hash, - kxld_dict_cmp cmp, u_int num_entries) - __attribute__((nonnull, visibility("hidden"))); +kern_return_t kxld_dict_init(KXLDDict *dict, kxld_dict_hash hash, + kxld_dict_cmp cmp, u_int num_entries) +__attribute__((nonnull, visibility("hidden"))); /* Initializes a new dictionary iterator */ void kxld_dict_iterator_init(KXLDDictIterator *iter, const KXLDDict *dict) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Removes all entries from the dictionary. The dictionary must be * reinitialized before it can be used again. */ -void kxld_dict_clear(KXLDDict *dict) - __attribute__((nonnull, visibility("hidden"))); +void kxld_dict_clear(KXLDDict *dict) +__attribute__((nonnull, visibility("hidden"))); /* Destroys a dictionary and all of its entries */ -void kxld_dict_deinit(KXLDDict *dict) - __attribute__((nonnull, visibility("hidden"))); +void kxld_dict_deinit(KXLDDict *dict) +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ - + /* Returns the number of entries in the dictionary */ u_int kxld_dict_get_num_entries(const KXLDDict *dict) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /* Finds a key-value pair and assigns the value to the 'value' pointer, or NULL * when not found. */ void * kxld_dict_find(const KXLDDict *dict, const void *key) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /******************************************************************************* * Modifiers @@ -118,40 +118,40 @@ void * kxld_dict_find(const KXLDDict *dict, const void *key) * is already in the table. */ kern_return_t kxld_dict_insert(KXLDDict *dict, const void *key, void *value) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Removes a key-value pair and assigns the value to the 'value' pointer. * 'value' pointer will be set to NULL if value to be removed is not found. * 'value pointer may be NULL if removed value is not needed. */ void kxld_dict_remove(KXLDDict *dict, const void *key, void **value) - __attribute__((nonnull(1,2),visibility("hidden"))); +__attribute__((nonnull(1, 2), visibility("hidden"))); /* Gets the next item in the dictionary */ -void kxld_dict_iterator_get_next(KXLDDictIterator *iter, const void **key, +void kxld_dict_iterator_get_next(KXLDDictIterator *iter, const void **key, void **value) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Resets the iterator to the first item in the dictionary */ void kxld_dict_iterator_reset(KXLDDictIterator *iter) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Helpers *******************************************************************************/ u_int kxld_dict_string_hash(const KXLDDict *dict, const void *key) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); u_int kxld_dict_uint32_hash(const KXLDDict *dict, const void *key) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); u_int kxld_dict_kxldaddr_hash(const KXLDDict *dict, const void *key) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); u_int kxld_dict_string_cmp(const void *key1, const void *key2) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); u_int kxld_dict_uint32_cmp(const void *key1, const void *key2) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); u_int kxld_dict_kxldaddr_cmp(const void *key1, const void *key2) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); #endif /* _KXLD_DICT_H_ */ diff --git a/libkern/kxld/kxld_kext.c b/libkern/kxld/kxld_kext.c index 06b57fe66..eafc9d591 100644 --- a/libkern/kxld/kxld_kext.c +++ b/libkern/kxld/kxld_kext.c @@ -2,7 +2,7 @@ * Copyright (c) 2008, 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -67,12 +67,12 @@ extern boolean_t isSplitKext; struct symtab_command; struct kxld_kext { - KXLDObject *kext; - KXLDObject *interface; - KXLDArray vtables; - KXLDDict vtable_index; - boolean_t vtables_created; - boolean_t vtable_index_created; + KXLDObject *kext; + KXLDObject *interface; + KXLDArray vtables; + KXLDDict vtable_index; + boolean_t vtables_created; + boolean_t vtable_index_created; }; /******************************************************************************* @@ -80,28 +80,28 @@ struct kxld_kext { *******************************************************************************/ static kern_return_t export_symbols_through_interface( - const KXLDObject *kext, const KXLDObject *interface, - KXLDDict *defined_symbols_by_name, - KXLDDict *defined_cxx_symbol_by_value, - KXLDDict *obsolete_symbols_by_name); + const KXLDObject *kext, const KXLDObject *interface, + KXLDDict *defined_symbols_by_name, + KXLDDict *defined_cxx_symbol_by_value, + KXLDDict *obsolete_symbols_by_name); static kern_return_t export_symbols(const KXLDObject *kext, - KXLDDict *defined_symbols_by_name, + KXLDDict *defined_symbols_by_name, KXLDDict *defined_cxx_symbols_by_value); static kern_return_t create_vtables(KXLDKext *kext, const KXLDDict *defined_symbols, const KXLDDict *defined_cxx_symbols); -static kern_return_t get_vtable_syms_from_smcp(KXLDKext *kext, - const KXLDDict *defined_symbols, KXLDSym *super_metaclass_ptr_sym, +static kern_return_t get_vtable_syms_from_smcp(KXLDKext *kext, + const KXLDDict *defined_symbols, KXLDSym *super_metaclass_ptr_sym, KXLDSym **vtable_sym_out, KXLDSym **meta_vtable_sym_out); -static kern_return_t resolve_symbols(KXLDKext *kext, +static kern_return_t resolve_symbols(KXLDKext *kext, const KXLDDict *defined_symbols, const KXLDDict *obsolete_symbols); static kern_return_t patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables, const KXLDDict *defined_symbols); static kern_return_t create_vtable_index(KXLDKext *kext); static const KXLDSym *get_metaclass_symbol_from_super_meta_class_pointer_symbol( - KXLDKext *kext, KXLDSym *super_metaclass_pointer_sym); + KXLDKext *kext, KXLDSym *super_metaclass_pointer_sym); static kern_return_t validate_symbols(KXLDKext *kext); @@ -110,32 +110,32 @@ static kern_return_t validate_symbols(KXLDKext *kext); size_t kxld_kext_sizeof(void) { - return sizeof(KXLDKext); + return sizeof(KXLDKext); } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_kext_init(KXLDKext *kext, KXLDObject *kext_object, +kxld_kext_init(KXLDKext *kext, KXLDObject *kext_object, KXLDObject *interface_object) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(kext); - check(kext_object); + check(kext); + check(kext_object); - kext->kext = kext_object; + kext->kext = kext_object; - if (interface_object) { - kext->interface = interface_object; + if (interface_object) { + kext->interface = interface_object; - rval = kxld_object_index_symbols_by_name(kext->kext); - require_noerr(rval, finish); - } - - rval = KERN_SUCCESS; + rval = kxld_object_index_symbols_by_name(kext->kext); + require_noerr(rval, finish); + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -143,71 +143,71 @@ finish: void kxld_kext_clear(KXLDKext *kext) { - KXLDVTable *vtable = NULL; - u_int i; - - check(kext); - - for (i = 0; i < kext->vtables.nitems; ++i) { - vtable = kxld_array_get_item(&kext->vtables, i); - kxld_vtable_clear(vtable); - } - kxld_array_reset(&kext->vtables); - kxld_dict_clear(&kext->vtable_index); - - kext->kext = NULL; - kext->interface = NULL; - kext->vtables_created = FALSE; - kext->vtable_index_created = FALSE; + KXLDVTable *vtable = NULL; + u_int i; + + check(kext); + + for (i = 0; i < kext->vtables.nitems; ++i) { + vtable = kxld_array_get_item(&kext->vtables, i); + kxld_vtable_clear(vtable); + } + kxld_array_reset(&kext->vtables); + kxld_dict_clear(&kext->vtable_index); + + kext->kext = NULL; + kext->interface = NULL; + kext->vtables_created = FALSE; + kext->vtable_index_created = FALSE; } /******************************************************************************* *******************************************************************************/ -void +void kxld_kext_deinit(KXLDKext *kext) { - KXLDVTable *vtable = NULL; - u_int i; + KXLDVTable *vtable = NULL; + u_int i; - check(kext); + check(kext); - for (i = 0; i < kext->vtables.maxitems; ++i) { - vtable = kxld_array_get_slot(&kext->vtables, i); - kxld_vtable_deinit(vtable); - } - kxld_array_deinit(&kext->vtables); - kxld_dict_deinit(&kext->vtable_index); + for (i = 0; i < kext->vtables.maxitems; ++i) { + vtable = kxld_array_get_slot(&kext->vtables, i); + kxld_vtable_deinit(vtable); + } + kxld_array_deinit(&kext->vtables); + kxld_dict_deinit(&kext->vtable_index); - bzero(kext, sizeof(*kext)); + bzero(kext, sizeof(*kext)); } /******************************************************************************* *******************************************************************************/ -kern_return_t -kxld_kext_export_symbols(const KXLDKext *kext, +kern_return_t +kxld_kext_export_symbols(const KXLDKext *kext, KXLDDict *defined_symbols_by_name, KXLDDict *obsolete_symbols_by_name, KXLDDict *defined_cxx_symbols_by_value) { - kern_return_t rval = KERN_FAILURE; - - check(kext); - - if (kext->interface) { - rval = export_symbols_through_interface(kext->kext, kext->interface, - defined_symbols_by_name, obsolete_symbols_by_name, - defined_cxx_symbols_by_value); - require_noerr(rval, finish); - } else { - rval = export_symbols(kext->kext, defined_symbols_by_name, - defined_cxx_symbols_by_value); - require_noerr(rval, finish); - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + + check(kext); + + if (kext->interface) { + rval = export_symbols_through_interface(kext->kext, kext->interface, + defined_symbols_by_name, obsolete_symbols_by_name, + defined_cxx_symbols_by_value); + require_noerr(rval, finish); + } else { + rval = export_symbols(kext->kext, defined_symbols_by_name, + defined_cxx_symbols_by_value); + require_noerr(rval, finish); + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -217,84 +217,84 @@ export_symbols_through_interface(const KXLDObject *kext, const KXLDObject *interface, KXLDDict *defined_symbols_by_name, KXLDDict *obsolete_symbols_by_name, KXLDDict *defined_cxx_symbols_by_value) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - const KXLDSymtab *kext_symtab = NULL; - const KXLDSymtab *interface_symtab = NULL; - KXLDSym *kext_sym = NULL; - const KXLDSym *interface_sym = NULL; - - check(kext); - check(interface); - - kext_symtab = kxld_object_get_symtab(kext); - interface_symtab = kxld_object_get_symtab(interface); - - if (defined_symbols_by_name) { - /* Add exported symbols */ - (void) kxld_symtab_iterator_init(&iter, interface_symtab, - kxld_sym_is_undefined, FALSE); - while ((interface_sym = kxld_symtab_iterator_get_next(&iter))) { - kext_sym = kxld_symtab_get_locally_defined_symbol_by_name(kext_symtab, - interface_sym->name); - if (!kext_sym) { - kxld_log(kKxldLogLinking, kKxldLogWarn, - "In interface %s of %s, couldn't find symbol %s\n", - kxld_object_get_name(interface), kxld_object_get_name(kext), - interface_sym->name); - continue; - } - - rval = kxld_dict_insert(defined_symbols_by_name, - kext_sym->name, kext_sym); - require_noerr(rval, finish); - } - - /* Add indirect symbols */ - (void) kxld_symtab_iterator_init(&iter, interface_symtab, - kxld_sym_is_indirect, FALSE); - while ((interface_sym = kxld_symtab_iterator_get_next(&iter))) { - kext_sym = kxld_symtab_get_locally_defined_symbol_by_name(kext_symtab, - interface_sym->alias); - if (!kext_sym) { - kxld_log(kKxldLogLinking, kKxldLogWarn, - "In interface %s of %s, couldn't find indirect symbol %s (%s)\n", - kxld_object_get_name(interface), kxld_object_get_name(kext), - interface_sym->alias, interface_sym->name); - continue; - } - - rval = kxld_dict_insert(defined_symbols_by_name, - interface_sym->name, kext_sym); - require_noerr(rval, finish); - } - } - - /* Add obsolete symbols */ - if (obsolete_symbols_by_name) { - (void) kxld_symtab_iterator_init(&iter, interface_symtab, - kxld_sym_is_obsolete, FALSE); - while ((kext_sym = kxld_symtab_iterator_get_next(&iter))) { - rval = kxld_dict_insert(obsolete_symbols_by_name, - kext_sym->name, kext_sym); - require_noerr(rval, finish); - } - } - - /* Add C++ symbols */ - if (defined_cxx_symbols_by_value) { - (void) kxld_symtab_iterator_init(&iter, kext_symtab, - kxld_sym_is_cxx, FALSE); - while ((kext_sym = kxld_symtab_iterator_get_next(&iter))) { - rval = kxld_dict_insert(defined_cxx_symbols_by_value, - &kext_sym->link_addr, kext_sym); - require_noerr(rval, finish); - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + const KXLDSymtab *kext_symtab = NULL; + const KXLDSymtab *interface_symtab = NULL; + KXLDSym *kext_sym = NULL; + const KXLDSym *interface_sym = NULL; + + check(kext); + check(interface); + + kext_symtab = kxld_object_get_symtab(kext); + interface_symtab = kxld_object_get_symtab(interface); + + if (defined_symbols_by_name) { + /* Add exported symbols */ + (void) kxld_symtab_iterator_init(&iter, interface_symtab, + kxld_sym_is_undefined, FALSE); + while ((interface_sym = kxld_symtab_iterator_get_next(&iter))) { + kext_sym = kxld_symtab_get_locally_defined_symbol_by_name(kext_symtab, + interface_sym->name); + if (!kext_sym) { + kxld_log(kKxldLogLinking, kKxldLogWarn, + "In interface %s of %s, couldn't find symbol %s\n", + kxld_object_get_name(interface), kxld_object_get_name(kext), + interface_sym->name); + continue; + } + + rval = kxld_dict_insert(defined_symbols_by_name, + kext_sym->name, kext_sym); + require_noerr(rval, finish); + } + + /* Add indirect symbols */ + (void) kxld_symtab_iterator_init(&iter, interface_symtab, + kxld_sym_is_indirect, FALSE); + while ((interface_sym = kxld_symtab_iterator_get_next(&iter))) { + kext_sym = kxld_symtab_get_locally_defined_symbol_by_name(kext_symtab, + interface_sym->alias); + if (!kext_sym) { + kxld_log(kKxldLogLinking, kKxldLogWarn, + "In interface %s of %s, couldn't find indirect symbol %s (%s)\n", + kxld_object_get_name(interface), kxld_object_get_name(kext), + interface_sym->alias, interface_sym->name); + continue; + } + + rval = kxld_dict_insert(defined_symbols_by_name, + interface_sym->name, kext_sym); + require_noerr(rval, finish); + } + } + + /* Add obsolete symbols */ + if (obsolete_symbols_by_name) { + (void) kxld_symtab_iterator_init(&iter, interface_symtab, + kxld_sym_is_obsolete, FALSE); + while ((kext_sym = kxld_symtab_iterator_get_next(&iter))) { + rval = kxld_dict_insert(obsolete_symbols_by_name, + kext_sym->name, kext_sym); + require_noerr(rval, finish); + } + } + + /* Add C++ symbols */ + if (defined_cxx_symbols_by_value) { + (void) kxld_symtab_iterator_init(&iter, kext_symtab, + kxld_sym_is_cxx, FALSE); + while ((kext_sym = kxld_symtab_iterator_get_next(&iter))) { + rval = kxld_dict_insert(defined_cxx_symbols_by_value, + &kext_sym->link_addr, kext_sym); + require_noerr(rval, finish); + } + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -303,163 +303,163 @@ kern_return_t export_symbols(const KXLDObject *kext, KXLDDict *defined_symbols_by_name, KXLDDict *defined_cxx_symbols_by_value) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - - (void) kxld_symtab_iterator_init(&iter, kxld_object_get_symtab(kext), - kxld_sym_is_exported, FALSE); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - if (defined_symbols_by_name) { - rval = kxld_dict_insert(defined_symbols_by_name, sym->name, sym); - require_noerr(rval, finish); - } - - if (kxld_sym_is_cxx(sym) && defined_cxx_symbols_by_value) { - rval = kxld_dict_insert(defined_cxx_symbols_by_value, - &sym->link_addr, sym); - require_noerr(rval, finish); - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + + (void) kxld_symtab_iterator_init(&iter, kxld_object_get_symtab(kext), + kxld_sym_is_exported, FALSE); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + if (defined_symbols_by_name) { + rval = kxld_dict_insert(defined_symbols_by_name, sym->name, sym); + require_noerr(rval, finish); + } + + if (kxld_sym_is_cxx(sym) && defined_cxx_symbols_by_value) { + rval = kxld_dict_insert(defined_cxx_symbols_by_value, + &sym->link_addr, sym); + require_noerr(rval, finish); + } + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_kext_export_vtables(KXLDKext *kext, const KXLDDict *defined_cxx_symbols, const KXLDDict *defined_symbols, KXLDDict *vtables) { - kern_return_t rval = KERN_FAILURE; - KXLDVTable *vtable = NULL; - u_int i = 0; - - check(kext); - check(defined_symbols); - check(defined_cxx_symbols); - check(vtables); - - rval = create_vtables(kext, defined_cxx_symbols, defined_symbols); - require_noerr(rval, finish); - - for (i = 0; i < kext->vtables.nitems; ++i) { - vtable = kxld_array_get_item(&kext->vtables, i); - - rval = kxld_dict_insert(vtables, vtable->name, vtable); - require_noerr(rval, finish); - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDVTable *vtable = NULL; + u_int i = 0; + + check(kext); + check(defined_symbols); + check(defined_cxx_symbols); + check(vtables); + + rval = create_vtables(kext, defined_cxx_symbols, defined_symbols); + require_noerr(rval, finish); + + for (i = 0; i < kext->vtables.nitems; ++i) { + vtable = kxld_array_get_item(&kext->vtables, i); + + rval = kxld_dict_insert(vtables, vtable->name, vtable); + require_noerr(rval, finish); + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_kext_get_vmsize_for_seg_by_name(const KXLDKext *kext, - const char *segname, - u_long *vmsize) + const char *segname, + u_long *vmsize) { - (void) kxld_object_get_vmsize_for_seg_by_name(kext->kext, segname, vmsize); + (void) kxld_object_get_vmsize_for_seg_by_name(kext->kext, segname, vmsize); } /******************************************************************************* *******************************************************************************/ -void -kxld_kext_get_vmsize(const KXLDKext *kext, +void +kxld_kext_get_vmsize(const KXLDKext *kext, u_long *header_size, u_long *vmsize) { - (void) kxld_object_get_vmsize(kext->kext, header_size, vmsize); + (void) kxld_object_get_vmsize(kext->kext, header_size, vmsize); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_kext_set_linked_object_size(KXLDKext *kext, u_long vmsize) { - (void) kxld_object_set_linked_object_size(kext->kext, vmsize); + (void) kxld_object_set_linked_object_size(kext->kext, vmsize); } /******************************************************************************* *******************************************************************************/ -kern_return_t -kxld_kext_export_linked_object(const KXLDKext *kext, - void *linked_object, - kxld_addr_t *kmod_info) +kern_return_t +kxld_kext_export_linked_object(const KXLDKext *kext, + void *linked_object, + kxld_addr_t *kmod_info) { - kern_return_t rval = KERN_FAILURE; - const KXLDSym *kmodsym = NULL; + kern_return_t rval = KERN_FAILURE; + const KXLDSym *kmodsym = NULL; - kmodsym = kxld_symtab_get_locally_defined_symbol_by_name( - kxld_object_get_symtab(kext->kext), KXLD_KMOD_INFO_SYMBOL); + kmodsym = kxld_symtab_get_locally_defined_symbol_by_name( + kxld_object_get_symtab(kext->kext), KXLD_KMOD_INFO_SYMBOL); - require_action(kmodsym, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo)); + require_action(kmodsym, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo)); - *kmod_info = kmodsym->link_addr; + *kmod_info = kmodsym->link_addr; - rval = kxld_object_export_linked_object(kext->kext, linked_object); + rval = kxld_object_export_linked_object(kext->kext, linked_object); finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ kern_return_t kxld_kext_relocate(KXLDKext *kext, - kxld_addr_t link_address, - KXLDDict *patched_vtables, - const KXLDDict *defined_symbols, - const KXLDDict *obsolete_symbols, - const KXLDDict *defined_cxx_symbols) + kxld_addr_t link_address, + KXLDDict *patched_vtables, + const KXLDDict *defined_symbols, + const KXLDDict *obsolete_symbols, + const KXLDDict *defined_cxx_symbols) { - kern_return_t rval = KERN_FAILURE; - - check(kext); - check(patched_vtables); - check(defined_symbols); - check(obsolete_symbols); - - /* Kexts that are being relocated need symbols indexed by value for vtable - * creation and patching. Note that we don't need to index by value for - * dependencies that have already been linked because their symbols are - * already in the global cxx value table. It's important to index the - * symbols by value before we relocate the symbols because the vtable - * entries will still have unrelocated values. - */ - rval = kxld_object_index_cxx_symbols_by_value(kext->kext); - require_noerr(rval, finish); - - rval = kxld_object_index_symbols_by_name(kext->kext); - require_noerr(rval, finish); - - rval = kxld_object_relocate(kext->kext, link_address); - require_noerr(rval, finish); - - rval = resolve_symbols(kext, defined_symbols, obsolete_symbols); - require_noerr(rval, finish); - - rval = create_vtables(kext, defined_cxx_symbols, /* defined_symbols */ NULL); - require_noerr(rval, finish); - - if (isSplitKext == FALSE) { - rval = patch_vtables(kext, patched_vtables, defined_symbols); - require_noerr(rval, finish); - } - - rval = validate_symbols(kext); - require_noerr(rval, finish); - - rval = kxld_object_process_relocations(kext->kext, patched_vtables); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + + check(kext); + check(patched_vtables); + check(defined_symbols); + check(obsolete_symbols); + + /* Kexts that are being relocated need symbols indexed by value for vtable + * creation and patching. Note that we don't need to index by value for + * dependencies that have already been linked because their symbols are + * already in the global cxx value table. It's important to index the + * symbols by value before we relocate the symbols because the vtable + * entries will still have unrelocated values. + */ + rval = kxld_object_index_cxx_symbols_by_value(kext->kext); + require_noerr(rval, finish); + + rval = kxld_object_index_symbols_by_name(kext->kext); + require_noerr(rval, finish); + + rval = kxld_object_relocate(kext->kext, link_address); + require_noerr(rval, finish); + + rval = resolve_symbols(kext, defined_symbols, obsolete_symbols); + require_noerr(rval, finish); + + rval = create_vtables(kext, defined_cxx_symbols, /* defined_symbols */ NULL); + require_noerr(rval, finish); + + if (isSplitKext == FALSE) { + rval = patch_vtables(kext, patched_vtables, defined_symbols); + require_noerr(rval, finish); + } + + rval = validate_symbols(kext); + require_noerr(rval, finish); + + rval = kxld_object_process_relocations(kext->kext, patched_vtables); + require_noerr(rval, finish); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -472,317 +472,317 @@ finish: * linked kexts export their symbols into the global symbol table that is used * for symbol resolution, so we can look there for vtable symbols without * having to index their local symbol table separately. -* +* * Unlinked kexts haven't yet had their symbols exported into the global table, * so we have to index their local symbol table separately. *******************************************************************************/ -static kern_return_t +static kern_return_t create_vtables(KXLDKext *kext, const KXLDDict *defined_cxx_symbols, const KXLDDict *defined_symbols) { - kern_return_t rval = KERN_FAILURE; - const KXLDSymtab *symtab = NULL; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - KXLDSym *vtable_sym = NULL; - KXLDSym *meta_vtable_sym = NULL; - KXLDVTable *vtable = NULL; - KXLDVTable *meta_vtable = NULL; - u_int i = 0; - u_int nvtables = 0; - - if (kext->vtables_created) { - rval = KERN_SUCCESS; - goto finish; - } - - symtab = kxld_object_get_symtab(kext->kext); - - if (kxld_object_is_linked(kext->kext)) { - /* Create a vtable object for every vtable symbol */ - kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_vtable, FALSE); - nvtables = kxld_symtab_iterator_get_num_remaining(&iter); - } else { - /* We walk over the super metaclass pointer symbols because classes - * with them are the only ones that need patching. Then we double the - * number of vtables we're expecting, because every pointer will have a - * class vtable and a MetaClass vtable. - */ - kxld_symtab_iterator_init(&iter, symtab, - kxld_sym_is_super_metaclass_pointer, FALSE); - nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2; - } - - rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables); - require_noerr(rval, finish); - - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - if (kxld_object_is_linked(kext->kext)) { - vtable_sym = sym; - meta_vtable_sym = NULL; - meta_vtable = NULL; - } else { - rval = get_vtable_syms_from_smcp(kext, defined_symbols, sym, - &vtable_sym, &meta_vtable_sym); - require_noerr(rval, finish); - } - - vtable = kxld_array_get_item(&kext->vtables, i++); - rval = kxld_vtable_init(vtable, vtable_sym, kext->kext, - defined_cxx_symbols); - require_noerr(rval, finish); - - /* meta_vtable_sym will be null when we don't support strict - * patching and can't find the metaclass vtable. If that's the - * case, we just reduce the expect number of vtables by 1. - */ - if (!kxld_object_is_linked(kext->kext)) { - if (meta_vtable_sym) { - meta_vtable = kxld_array_get_item(&kext->vtables, i++); - rval = kxld_vtable_init(meta_vtable, meta_vtable_sym, - kext->kext, defined_cxx_symbols); - require_noerr(rval, finish); - } else { - kxld_array_resize(&kext->vtables, --nvtables); - meta_vtable = NULL; - } - } - } - require_action(i == kext->vtables.nitems, finish, - rval=KERN_FAILURE); - - kext->vtables_created = TRUE; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDSymtab *symtab = NULL; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + KXLDSym *vtable_sym = NULL; + KXLDSym *meta_vtable_sym = NULL; + KXLDVTable *vtable = NULL; + KXLDVTable *meta_vtable = NULL; + u_int i = 0; + u_int nvtables = 0; + + if (kext->vtables_created) { + rval = KERN_SUCCESS; + goto finish; + } + + symtab = kxld_object_get_symtab(kext->kext); + + if (kxld_object_is_linked(kext->kext)) { + /* Create a vtable object for every vtable symbol */ + kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_vtable, FALSE); + nvtables = kxld_symtab_iterator_get_num_remaining(&iter); + } else { + /* We walk over the super metaclass pointer symbols because classes + * with them are the only ones that need patching. Then we double the + * number of vtables we're expecting, because every pointer will have a + * class vtable and a MetaClass vtable. + */ + kxld_symtab_iterator_init(&iter, symtab, + kxld_sym_is_super_metaclass_pointer, FALSE); + nvtables = kxld_symtab_iterator_get_num_remaining(&iter) * 2; + } + + rval = kxld_array_init(&kext->vtables, sizeof(KXLDVTable), nvtables); + require_noerr(rval, finish); + + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + if (kxld_object_is_linked(kext->kext)) { + vtable_sym = sym; + meta_vtable_sym = NULL; + meta_vtable = NULL; + } else { + rval = get_vtable_syms_from_smcp(kext, defined_symbols, sym, + &vtable_sym, &meta_vtable_sym); + require_noerr(rval, finish); + } + + vtable = kxld_array_get_item(&kext->vtables, i++); + rval = kxld_vtable_init(vtable, vtable_sym, kext->kext, + defined_cxx_symbols); + require_noerr(rval, finish); + + /* meta_vtable_sym will be null when we don't support strict + * patching and can't find the metaclass vtable. If that's the + * case, we just reduce the expect number of vtables by 1. + */ + if (!kxld_object_is_linked(kext->kext)) { + if (meta_vtable_sym) { + meta_vtable = kxld_array_get_item(&kext->vtables, i++); + rval = kxld_vtable_init(meta_vtable, meta_vtable_sym, + kext->kext, defined_cxx_symbols); + require_noerr(rval, finish); + } else { + kxld_array_resize(&kext->vtables, --nvtables); + meta_vtable = NULL; + } + } + } + require_action(i == kext->vtables.nitems, finish, + rval = KERN_FAILURE); + + kext->vtables_created = TRUE; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ static kern_return_t get_vtable_syms_from_smcp(KXLDKext *kext, const KXLDDict *defined_symbols, - KXLDSym *super_metaclass_ptr_sym, KXLDSym **vtable_sym_out, + KXLDSym *super_metaclass_ptr_sym, KXLDSym **vtable_sym_out, KXLDSym **meta_vtable_sym_out) { - kern_return_t rval = KERN_FAILURE; - const KXLDSymtab *symtab = NULL; - KXLDSym *vtable_sym = NULL; - KXLDSym *meta_vtable_sym = NULL; - char class_name[KXLD_MAX_NAME_LEN]; - char vtable_name[KXLD_MAX_NAME_LEN]; - char meta_vtable_name[KXLD_MAX_NAME_LEN]; - char *demangled_name1 = NULL; - char *demangled_name2 = NULL; - size_t demangled_length1 = 0; - size_t demangled_length2 = 0; - - check(kext); - check(vtable_sym_out); - check(meta_vtable_sym_out); - - require(!kxld_object_is_kernel(kext->kext), finish); - - symtab = kxld_object_get_symtab(kext->kext); - - /* Get the class name from the smc pointer */ - rval = kxld_sym_get_class_name_from_super_metaclass_pointer( - super_metaclass_ptr_sym, class_name, sizeof(class_name)); - require_noerr(rval, finish); - - /* Get the vtable name from the class name */ - rval = kxld_sym_get_vtable_name_from_class_name(class_name, - vtable_name, sizeof(vtable_name)); - require_noerr(rval, finish); - - /* Get the vtable symbol */ - if (defined_symbols) { - vtable_sym = kxld_dict_find(defined_symbols, vtable_name); - } else { - vtable_sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, - vtable_name); - } - require_action(vtable_sym, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, - vtable_name, class_name)); - - /* Get the meta vtable name from the class name */ - rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, - meta_vtable_name, sizeof(meta_vtable_name)); - require_noerr(rval, finish); - - /* Get the meta vtable symbol */ - if (defined_symbols) { - meta_vtable_sym = kxld_dict_find(defined_symbols, meta_vtable_name); - } else { - meta_vtable_sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, - meta_vtable_name); - } - if (!meta_vtable_sym) { - if (kxld_object_target_supports_strict_patching(kext->kext)) { - kxld_log(kKxldLogPatching, kKxldLogErr, - kKxldLogMissingVtable, - meta_vtable_name, class_name); - rval = KERN_FAILURE; - goto finish; - } else { - kxld_log(kKxldLogPatching, kKxldLogErr, - "Warning: " kKxldLogMissingVtable, - kxld_demangle(meta_vtable_name, &demangled_name1, - &demangled_length1), - kxld_demangle(class_name, &demangled_name2, - &demangled_length2)); - } - } - - *vtable_sym_out = vtable_sym; - *meta_vtable_sym_out = meta_vtable_sym; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDSymtab *symtab = NULL; + KXLDSym *vtable_sym = NULL; + KXLDSym *meta_vtable_sym = NULL; + char class_name[KXLD_MAX_NAME_LEN]; + char vtable_name[KXLD_MAX_NAME_LEN]; + char meta_vtable_name[KXLD_MAX_NAME_LEN]; + char *demangled_name1 = NULL; + char *demangled_name2 = NULL; + size_t demangled_length1 = 0; + size_t demangled_length2 = 0; + + check(kext); + check(vtable_sym_out); + check(meta_vtable_sym_out); + + require(!kxld_object_is_kernel(kext->kext), finish); + + symtab = kxld_object_get_symtab(kext->kext); + + /* Get the class name from the smc pointer */ + rval = kxld_sym_get_class_name_from_super_metaclass_pointer( + super_metaclass_ptr_sym, class_name, sizeof(class_name)); + require_noerr(rval, finish); + + /* Get the vtable name from the class name */ + rval = kxld_sym_get_vtable_name_from_class_name(class_name, + vtable_name, sizeof(vtable_name)); + require_noerr(rval, finish); + + /* Get the vtable symbol */ + if (defined_symbols) { + vtable_sym = kxld_dict_find(defined_symbols, vtable_name); + } else { + vtable_sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, + vtable_name); + } + require_action(vtable_sym, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, + vtable_name, class_name)); + + /* Get the meta vtable name from the class name */ + rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, + meta_vtable_name, sizeof(meta_vtable_name)); + require_noerr(rval, finish); + + /* Get the meta vtable symbol */ + if (defined_symbols) { + meta_vtable_sym = kxld_dict_find(defined_symbols, meta_vtable_name); + } else { + meta_vtable_sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, + meta_vtable_name); + } + if (!meta_vtable_sym) { + if (kxld_object_target_supports_strict_patching(kext->kext)) { + kxld_log(kKxldLogPatching, kKxldLogErr, + kKxldLogMissingVtable, + meta_vtable_name, class_name); + rval = KERN_FAILURE; + goto finish; + } else { + kxld_log(kKxldLogPatching, kKxldLogErr, + "Warning: " kKxldLogMissingVtable, + kxld_demangle(meta_vtable_name, &demangled_name1, + &demangled_length1), + kxld_demangle(class_name, &demangled_name2, + &demangled_length2)); + } + } + + *vtable_sym_out = vtable_sym; + *meta_vtable_sym_out = meta_vtable_sym; + rval = KERN_SUCCESS; finish: - if (demangled_name1) kxld_free(demangled_name1, demangled_length1); - if (demangled_name2) kxld_free(demangled_name2, demangled_length2); - - return rval; + if (demangled_name1) { + kxld_free(demangled_name1, demangled_length1); + } + if (demangled_name2) { + kxld_free(demangled_name2, demangled_length2); + } + + return rval; } /******************************************************************************* *******************************************************************************/ static kern_return_t -resolve_symbols(KXLDKext *kext, const KXLDDict *defined_symbols, +resolve_symbols(KXLDKext *kext, const KXLDDict *defined_symbols, const KXLDDict *obsolete_symbols) { - kern_return_t rval = KERN_FAILURE; - const KXLDSymtab *symtab = NULL; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - KXLDSym *defined_sym = NULL; - const char *name = NULL; - boolean_t tests_for_weak = FALSE; - boolean_t error = FALSE; - char *demangled_name = NULL; - size_t demangled_length = 0; - - check(kext->kext); - check(defined_symbols); - check(obsolete_symbols); - - symtab = kxld_object_get_symtab(kext->kext); - - /* Check if the kext tests for weak symbols */ - sym = kxld_symtab_get_symbol_by_name(symtab, KXLD_WEAK_TEST_SYMBOL); - tests_for_weak = (sym != NULL); - - /* Check for duplicate symbols */ - kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_exported, FALSE); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - defined_sym = kxld_dict_find(defined_symbols, sym->name); - if (defined_sym) { - /* Not a problem if the symbols have the same address */ - if (defined_sym->link_addr == sym->link_addr) { - continue; - } - - if (!error) { - error = TRUE; - kxld_log(kKxldLogLinking, kKxldLogErr, - "The following symbols were defined more than once:"); - } - - kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s: %p - %p", - kxld_demangle(sym->name, &demangled_name, &demangled_length), - (void *) (uintptr_t) sym->link_addr, - (void *) (uintptr_t) defined_sym->link_addr); - } - } - require_noerr_action(error, finish, rval=KERN_FAILURE); - - /* Resolve undefined and indirect symbols */ - - /* Iterate over all unresolved symbols */ - kxld_symtab_iterator_init(&iter, symtab, - kxld_sym_is_unresolved, FALSE); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - - /* Common symbols are not supported */ - if (kxld_sym_is_common(sym)) { - - if (!error) { - error = TRUE; - if (kxld_object_target_supports_common_symbols(kext->kext)) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "The following common symbols were not resolved:"); - } else { - kxld_log(kKxldLogLinking, kKxldLogErr, - "Common symbols are not supported in kernel extensions. " - "Use -fno-common to build your kext. " - "The following are common symbols:"); - } - } - kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", - kxld_demangle(sym->name, &demangled_name, &demangled_length)); - - } else { - - /* Find the address of the defined symbol */ - if (kxld_sym_is_undefined(sym)) { - name = sym->name; - } else { - name = sym->alias; - } - defined_sym = kxld_dict_find(defined_symbols, name); - - /* Resolve the symbol. If a definition cannot be found, then: - * 1) Psuedokexts log a warning and proceed - * 2) Actual kexts delay the error until validation in case vtable - * patching replaces the undefined symbol. - */ - - if (defined_sym) { - - rval = kxld_sym_resolve(sym, defined_sym->link_addr); - require_noerr(rval, finish); - - if (obsolete_symbols && kxld_dict_find(obsolete_symbols, name)) { - kxld_log(kKxldLogLinking, kKxldLogWarn, - "This kext uses obsolete symbol %s.", - kxld_demangle(name, &demangled_name, &demangled_length)); - } - - } else if (kxld_sym_is_weak(sym)) { - kxld_addr_t addr = 0; - - /* Make sure that the kext has referenced gOSKextUnresolved. - */ - require_action(tests_for_weak, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, - "This kext has weak references but does not test for " - "them. Test for weak references with " - "OSKextSymbolIsResolved(). (found in )")); + kern_return_t rval = KERN_FAILURE; + const KXLDSymtab *symtab = NULL; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + KXLDSym *defined_sym = NULL; + const char *name = NULL; + boolean_t tests_for_weak = FALSE; + boolean_t error = FALSE; + char *demangled_name = NULL; + size_t demangled_length = 0; + + check(kext->kext); + check(defined_symbols); + check(obsolete_symbols); + + symtab = kxld_object_get_symtab(kext->kext); + + /* Check if the kext tests for weak symbols */ + sym = kxld_symtab_get_symbol_by_name(symtab, KXLD_WEAK_TEST_SYMBOL); + tests_for_weak = (sym != NULL); + + /* Check for duplicate symbols */ + kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_exported, FALSE); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + defined_sym = kxld_dict_find(defined_symbols, sym->name); + if (defined_sym) { + /* Not a problem if the symbols have the same address */ + if (defined_sym->link_addr == sym->link_addr) { + continue; + } + + if (!error) { + error = TRUE; + kxld_log(kKxldLogLinking, kKxldLogErr, + "The following symbols were defined more than once:"); + } + + kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s: %p - %p", + kxld_demangle(sym->name, &demangled_name, &demangled_length), + (void *) (uintptr_t) sym->link_addr, + (void *) (uintptr_t) defined_sym->link_addr); + } + } + require_noerr_action(error, finish, rval = KERN_FAILURE); + + /* Resolve undefined and indirect symbols */ + + /* Iterate over all unresolved symbols */ + kxld_symtab_iterator_init(&iter, symtab, + kxld_sym_is_unresolved, FALSE); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + /* Common symbols are not supported */ + if (kxld_sym_is_common(sym)) { + if (!error) { + error = TRUE; + if (kxld_object_target_supports_common_symbols(kext->kext)) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "The following common symbols were not resolved:"); + } else { + kxld_log(kKxldLogLinking, kKxldLogErr, + "Common symbols are not supported in kernel extensions. " + "Use -fno-common to build your kext. " + "The following are common symbols:"); + } + } + kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", + kxld_demangle(sym->name, &demangled_name, &demangled_length)); + } else { + /* Find the address of the defined symbol */ + if (kxld_sym_is_undefined(sym)) { + name = sym->name; + } else { + name = sym->alias; + } + defined_sym = kxld_dict_find(defined_symbols, name); + + /* Resolve the symbol. If a definition cannot be found, then: + * 1) Psuedokexts log a warning and proceed + * 2) Actual kexts delay the error until validation in case vtable + * patching replaces the undefined symbol. + */ + + if (defined_sym) { + rval = kxld_sym_resolve(sym, defined_sym->link_addr); + require_noerr(rval, finish); + + if (obsolete_symbols && kxld_dict_find(obsolete_symbols, name)) { + kxld_log(kKxldLogLinking, kKxldLogWarn, + "This kext uses obsolete symbol %s.", + kxld_demangle(name, &demangled_name, &demangled_length)); + } + } else if (kxld_sym_is_weak(sym)) { + kxld_addr_t addr = 0; + + /* Make sure that the kext has referenced gOSKextUnresolved. + */ + require_action(tests_for_weak, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, + "This kext has weak references but does not test for " + "them. Test for weak references with " + "OSKextSymbolIsResolved(). (found in )")); #if KERNEL - /* Get the address of the default weak address. - */ - addr = (kxld_addr_t) &kext_weak_symbol_referenced; -#else - /* This is run during symbol generation only, so we only - * need a filler value here. - */ - addr = 0xF00DD00D; + /* Get the address of the default weak address. + */ + addr = (kxld_addr_t) &kext_weak_symbol_referenced; +#else + /* This is run during symbol generation only, so we only + * need a filler value here. + */ + addr = 0xF00DD00D; #endif /* KERNEL */ - rval = kxld_sym_resolve(sym, addr); - require_noerr(rval, finish); - } - } - } - require_noerr_action(error, finish, rval=KERN_FAILURE); + rval = kxld_sym_resolve(sym, addr); + require_noerr(rval, finish); + } + } + } + require_noerr_action(error, finish, rval = KERN_FAILURE); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - if (demangled_name) kxld_free(demangled_name, demangled_length); + if (demangled_name) { + kxld_free(demangled_name, demangled_length); + } - return rval; + return rval; } /******************************************************************************* @@ -811,197 +811,203 @@ finish: #define kOSMetaClassVTableName "__ZTV11OSMetaClass" static kern_return_t -patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables, +patch_vtables(KXLDKext *kext, KXLDDict *patched_vtables, const KXLDDict *defined_symbols) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - const KXLDSymtab *symtab = NULL; - const KXLDSym *metaclass = NULL; - KXLDSym *super_metaclass_pointer = NULL; - KXLDSym *final_sym = NULL; - KXLDVTable *vtable = NULL; - KXLDVTable *super_vtable = NULL; - char class_name[KXLD_MAX_NAME_LEN]; - char super_class_name[KXLD_MAX_NAME_LEN]; - char vtable_name[KXLD_MAX_NAME_LEN]; - char super_vtable_name[KXLD_MAX_NAME_LEN]; - char final_sym_name[KXLD_MAX_NAME_LEN]; - char *demangled_name1 = NULL; - char *demangled_name2 = NULL; - size_t demangled_length1 = 0;; - size_t demangled_length2 = 0; - size_t len = 0; - u_int nvtables = 0; - u_int npatched = 0; - u_int nprogress = 0; - boolean_t failure = FALSE; - - check(kext); - check(patched_vtables); - - symtab = kxld_object_get_symtab(kext->kext); - - rval = create_vtable_index(kext); - require_noerr(rval, finish); - - /* Find each super meta class pointer symbol */ - - kxld_symtab_iterator_init(&iter, symtab, - kxld_sym_is_super_metaclass_pointer, FALSE); - nvtables = kxld_symtab_iterator_get_num_remaining(&iter); - - while (npatched < nvtables) { - npatched = 0; - nprogress = 0; - kxld_symtab_iterator_reset(&iter); - while((super_metaclass_pointer = kxld_symtab_iterator_get_next(&iter))) - { - /* Get the class name from the smc pointer */ - rval = kxld_sym_get_class_name_from_super_metaclass_pointer( - super_metaclass_pointer, class_name, sizeof(class_name)); - require_noerr(rval, finish); - - /* Get the vtable name from the class name */ - rval = kxld_sym_get_vtable_name_from_class_name(class_name, - vtable_name, sizeof(vtable_name)); - require_noerr(rval, finish); - - /* Get the vtable and make sure it hasn't been patched */ - vtable = kxld_dict_find(&kext->vtable_index, vtable_name); - require_action(vtable, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, - vtable_name, class_name)); - - if (!vtable->is_patched) { - - /* Find the SMCP's meta class symbol */ - metaclass = get_metaclass_symbol_from_super_meta_class_pointer_symbol( - kext, super_metaclass_pointer); - require_action(metaclass, finish, rval=KERN_FAILURE); - - /* Get the super class name from the super metaclass */ - rval = kxld_sym_get_class_name_from_metaclass(metaclass, - super_class_name, sizeof(super_class_name)); - require_noerr(rval, finish); - - /* Get the super vtable name from the class name */ - rval = kxld_sym_get_vtable_name_from_class_name(super_class_name, - super_vtable_name, sizeof(super_vtable_name)); - require_noerr(rval, finish); - - /* Get the super vtable if it's been patched */ - super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); - - if (failure) { - const KXLDVTable *unpatched_super_vtable; - unpatched_super_vtable = kxld_dict_find(&kext->vtable_index, - super_vtable_name); - - /* If the parent's vtable hasn't been patched, warn that - * this vtable is unpatchable because of the parent. - */ - if (!super_vtable) { - kxld_log(kKxldLogPatching, kKxldLogErr, - "The %s was not patched because its parent, " - "the %s, was not %s.", - kxld_demangle(vtable_name, &demangled_name1, - &demangled_length1), - kxld_demangle(super_vtable_name, &demangled_name2, - &demangled_length2), - (unpatched_super_vtable) ? "patchable" : "found"); - } - continue; - } - - if (!super_vtable) continue; - - /* Get the final symbol's name from the super vtable */ - rval = kxld_sym_get_final_sym_name_from_class_name(super_class_name, - final_sym_name, sizeof(final_sym_name)); - require_noerr(rval, finish); - - /* Verify that the final symbol does not exist. First check - * all the externally defined symbols, then check locally. - */ - final_sym = kxld_dict_find(defined_symbols, final_sym_name); - if (!final_sym) { - final_sym = kxld_symtab_get_locally_defined_symbol_by_name( - symtab, final_sym_name); - } - if (final_sym) { - kxld_log(kKxldLogPatching, kKxldLogErr, - "Class '%s' is a subclass of final class '%s'.", - kxld_demangle(class_name, &demangled_name1, - &demangled_length1), - kxld_demangle(super_class_name, &demangled_name2, - &demangled_length2)); - continue; - } - - /* Patch the class's vtable */ - rval = kxld_vtable_patch(vtable, super_vtable, kext->kext); - if (rval) continue; - - /* Add the class's vtable to the set of patched vtables */ - rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); - require_noerr(rval, finish); - - /* Get the meta vtable name from the class name */ - rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, - vtable_name, sizeof(vtable_name)); - require_noerr(rval, finish); - - /* Get the meta vtable. Whether or not it should exist has already - * been tested in create_vtables(), so if it doesn't exist and we're - * still running, we can safely skip it. - */ - vtable = kxld_dict_find(&kext->vtable_index, vtable_name); - if (!vtable) { - ++nprogress; - ++npatched; - continue; - } - require_action(!vtable->is_patched, finish, rval=KERN_FAILURE); - - /* There is no way to look up a metaclass vtable at runtime, but - * we know that every class's metaclass inherits directly from - * OSMetaClass, so we just hardcode that vtable name here. - */ - len = strlcpy(super_vtable_name, kOSMetaClassVTableName, - sizeof(super_vtable_name)); - require_action(len == const_strlen(kOSMetaClassVTableName), - finish, rval=KERN_FAILURE); - - /* Get the super meta vtable */ - super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); - require_action(super_vtable && super_vtable->is_patched, - finish, rval=KERN_FAILURE); - - /* Patch the meta class's vtable */ - rval = kxld_vtable_patch(vtable, super_vtable, kext->kext); - require_noerr(rval, finish); - - /* Add the MetaClass's vtable to the set of patched vtables */ - rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); - require_noerr(rval, finish); - - ++nprogress; - } - - ++npatched; - } - - require_action(!failure, finish, rval=KERN_FAILURE); - failure = (nprogress == 0); - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + const KXLDSymtab *symtab = NULL; + const KXLDSym *metaclass = NULL; + KXLDSym *super_metaclass_pointer = NULL; + KXLDSym *final_sym = NULL; + KXLDVTable *vtable = NULL; + KXLDVTable *super_vtable = NULL; + char class_name[KXLD_MAX_NAME_LEN]; + char super_class_name[KXLD_MAX_NAME_LEN]; + char vtable_name[KXLD_MAX_NAME_LEN]; + char super_vtable_name[KXLD_MAX_NAME_LEN]; + char final_sym_name[KXLD_MAX_NAME_LEN]; + char *demangled_name1 = NULL; + char *demangled_name2 = NULL; + size_t demangled_length1 = 0;; + size_t demangled_length2 = 0; + size_t len = 0; + u_int nvtables = 0; + u_int npatched = 0; + u_int nprogress = 0; + boolean_t failure = FALSE; + + check(kext); + check(patched_vtables); + + symtab = kxld_object_get_symtab(kext->kext); + + rval = create_vtable_index(kext); + require_noerr(rval, finish); + + /* Find each super meta class pointer symbol */ + + kxld_symtab_iterator_init(&iter, symtab, + kxld_sym_is_super_metaclass_pointer, FALSE); + nvtables = kxld_symtab_iterator_get_num_remaining(&iter); + + while (npatched < nvtables) { + npatched = 0; + nprogress = 0; + kxld_symtab_iterator_reset(&iter); + while ((super_metaclass_pointer = kxld_symtab_iterator_get_next(&iter))) { + /* Get the class name from the smc pointer */ + rval = kxld_sym_get_class_name_from_super_metaclass_pointer( + super_metaclass_pointer, class_name, sizeof(class_name)); + require_noerr(rval, finish); + + /* Get the vtable name from the class name */ + rval = kxld_sym_get_vtable_name_from_class_name(class_name, + vtable_name, sizeof(vtable_name)); + require_noerr(rval, finish); + + /* Get the vtable and make sure it hasn't been patched */ + vtable = kxld_dict_find(&kext->vtable_index, vtable_name); + require_action(vtable, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMissingVtable, + vtable_name, class_name)); + + if (!vtable->is_patched) { + /* Find the SMCP's meta class symbol */ + metaclass = get_metaclass_symbol_from_super_meta_class_pointer_symbol( + kext, super_metaclass_pointer); + require_action(metaclass, finish, rval = KERN_FAILURE); + + /* Get the super class name from the super metaclass */ + rval = kxld_sym_get_class_name_from_metaclass(metaclass, + super_class_name, sizeof(super_class_name)); + require_noerr(rval, finish); + + /* Get the super vtable name from the class name */ + rval = kxld_sym_get_vtable_name_from_class_name(super_class_name, + super_vtable_name, sizeof(super_vtable_name)); + require_noerr(rval, finish); + + /* Get the super vtable if it's been patched */ + super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); + + if (failure) { + const KXLDVTable *unpatched_super_vtable; + unpatched_super_vtable = kxld_dict_find(&kext->vtable_index, + super_vtable_name); + + /* If the parent's vtable hasn't been patched, warn that + * this vtable is unpatchable because of the parent. + */ + if (!super_vtable) { + kxld_log(kKxldLogPatching, kKxldLogErr, + "The %s was not patched because its parent, " + "the %s, was not %s.", + kxld_demangle(vtable_name, &demangled_name1, + &demangled_length1), + kxld_demangle(super_vtable_name, &demangled_name2, + &demangled_length2), + (unpatched_super_vtable) ? "patchable" : "found"); + } + continue; + } + + if (!super_vtable) { + continue; + } + + /* Get the final symbol's name from the super vtable */ + rval = kxld_sym_get_final_sym_name_from_class_name(super_class_name, + final_sym_name, sizeof(final_sym_name)); + require_noerr(rval, finish); + + /* Verify that the final symbol does not exist. First check + * all the externally defined symbols, then check locally. + */ + final_sym = kxld_dict_find(defined_symbols, final_sym_name); + if (!final_sym) { + final_sym = kxld_symtab_get_locally_defined_symbol_by_name( + symtab, final_sym_name); + } + if (final_sym) { + kxld_log(kKxldLogPatching, kKxldLogErr, + "Class '%s' is a subclass of final class '%s'.", + kxld_demangle(class_name, &demangled_name1, + &demangled_length1), + kxld_demangle(super_class_name, &demangled_name2, + &demangled_length2)); + continue; + } + + /* Patch the class's vtable */ + rval = kxld_vtable_patch(vtable, super_vtable, kext->kext); + if (rval) { + continue; + } + + /* Add the class's vtable to the set of patched vtables */ + rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); + require_noerr(rval, finish); + + /* Get the meta vtable name from the class name */ + rval = kxld_sym_get_meta_vtable_name_from_class_name(class_name, + vtable_name, sizeof(vtable_name)); + require_noerr(rval, finish); + + /* Get the meta vtable. Whether or not it should exist has already + * been tested in create_vtables(), so if it doesn't exist and we're + * still running, we can safely skip it. + */ + vtable = kxld_dict_find(&kext->vtable_index, vtable_name); + if (!vtable) { + ++nprogress; + ++npatched; + continue; + } + require_action(!vtable->is_patched, finish, rval = KERN_FAILURE); + + /* There is no way to look up a metaclass vtable at runtime, but + * we know that every class's metaclass inherits directly from + * OSMetaClass, so we just hardcode that vtable name here. + */ + len = strlcpy(super_vtable_name, kOSMetaClassVTableName, + sizeof(super_vtable_name)); + require_action(len == const_strlen(kOSMetaClassVTableName), + finish, rval = KERN_FAILURE); + + /* Get the super meta vtable */ + super_vtable = kxld_dict_find(patched_vtables, super_vtable_name); + require_action(super_vtable && super_vtable->is_patched, + finish, rval = KERN_FAILURE); + + /* Patch the meta class's vtable */ + rval = kxld_vtable_patch(vtable, super_vtable, kext->kext); + require_noerr(rval, finish); + + /* Add the MetaClass's vtable to the set of patched vtables */ + rval = kxld_dict_insert(patched_vtables, vtable->name, vtable); + require_noerr(rval, finish); + + ++nprogress; + } + + ++npatched; + } + + require_action(!failure, finish, rval = KERN_FAILURE); + failure = (nprogress == 0); + } + + rval = KERN_SUCCESS; finish: - if (demangled_name1) kxld_free(demangled_name1, demangled_length1); - if (demangled_name2) kxld_free(demangled_name2, demangled_length2); - - return rval; + if (demangled_name1) { + kxld_free(demangled_name1, demangled_length1); + } + if (demangled_name2) { + kxld_free(demangled_name2, demangled_length2); + } + + return rval; } /******************************************************************************* @@ -1009,67 +1015,67 @@ finish: static kern_return_t create_vtable_index(KXLDKext *kext) { - kern_return_t rval = KERN_FAILURE; - KXLDVTable *vtable = NULL; - u_int i = 0; - - if (kext->vtable_index_created) { - rval = KERN_SUCCESS; - goto finish; - } - - /* Map vtable names to the vtable structures */ - rval = kxld_dict_init(&kext->vtable_index, kxld_dict_string_hash, - kxld_dict_string_cmp, kext->vtables.nitems); - require_noerr(rval, finish); - - for (i = 0; i < kext->vtables.nitems; ++i) { - vtable = kxld_array_get_item(&kext->vtables, i); - rval = kxld_dict_insert(&kext->vtable_index, vtable->name, vtable); - require_noerr(rval, finish); - } - - kext->vtable_index_created = TRUE; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDVTable *vtable = NULL; + u_int i = 0; + + if (kext->vtable_index_created) { + rval = KERN_SUCCESS; + goto finish; + } + + /* Map vtable names to the vtable structures */ + rval = kxld_dict_init(&kext->vtable_index, kxld_dict_string_hash, + kxld_dict_string_cmp, kext->vtables.nitems); + require_noerr(rval, finish); + + for (i = 0; i < kext->vtables.nitems; ++i) { + vtable = kxld_array_get_item(&kext->vtables, i); + rval = kxld_dict_insert(&kext->vtable_index, vtable->name, vtable); + require_noerr(rval, finish); + } + + kext->vtable_index_created = TRUE; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ static const KXLDSym * get_metaclass_symbol_from_super_meta_class_pointer_symbol(KXLDKext *kext, - KXLDSym *super_metaclass_pointer_sym) + KXLDSym *super_metaclass_pointer_sym) { - kern_return_t rval = KERN_FAILURE; - const KXLDReloc *reloc = NULL; - const KXLDSect *sect = NULL; - const KXLDSym *metaclass = NULL; - - check(kext); - check(super_metaclass_pointer_sym); - - /* Get the relocation entry that fills in the super metaclass pointer. */ - reloc = kxld_object_get_reloc_at_symbol(kext->kext, - super_metaclass_pointer_sym); - require_action(reloc, finish, rval=KERN_FAILURE); - - /* Get the section of the super metaclass pointer. */ - sect = kxld_object_get_section_by_index(kext->kext, - super_metaclass_pointer_sym->sectnum); - require_action(sect, finish, rval=KERN_FAILURE); - - /* Get the symbol that will be filled into the super metaclass pointer. */ - metaclass = kxld_object_get_symbol_of_reloc(kext->kext, reloc, sect); - - + kern_return_t rval = KERN_FAILURE; + const KXLDReloc *reloc = NULL; + const KXLDSect *sect = NULL; + const KXLDSym *metaclass = NULL; + + check(kext); + check(super_metaclass_pointer_sym); + + /* Get the relocation entry that fills in the super metaclass pointer. */ + reloc = kxld_object_get_reloc_at_symbol(kext->kext, + super_metaclass_pointer_sym); + require_action(reloc, finish, rval = KERN_FAILURE); + + /* Get the section of the super metaclass pointer. */ + sect = kxld_object_get_section_by_index(kext->kext, + super_metaclass_pointer_sym->sectnum); + require_action(sect, finish, rval = KERN_FAILURE); + + /* Get the symbol that will be filled into the super metaclass pointer. */ + metaclass = kxld_object_get_symbol_of_reloc(kext->kext, reloc, sect); + + finish: - if (metaclass == NULL) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "metaclass == NULL kxld_sym %s <%s>", - super_metaclass_pointer_sym->name, __func__); - } - return metaclass; + if (metaclass == NULL) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "metaclass == NULL kxld_sym %s <%s>", + super_metaclass_pointer_sym->name, __func__); + } + return metaclass; } @@ -1078,31 +1084,32 @@ finish: static kern_return_t validate_symbols(KXLDKext *kext) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - u_int error = FALSE; - char *demangled_name = NULL; - size_t demangled_length = 0; - - /* Check for any unresolved symbols */ - kxld_symtab_iterator_init(&iter, kxld_object_get_symtab(kext->kext), - kxld_sym_is_unresolved, FALSE); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - if (!error) { - error = TRUE; - kxld_log(kKxldLogLinking, kKxldLogErr, - "The following symbols are unresolved for this kext:"); - } - kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", - kxld_demangle(sym->name, &demangled_name, &demangled_length)); - } - require_noerr_action(error, finish, rval=KERN_FAILURE); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + u_int error = FALSE; + char *demangled_name = NULL; + size_t demangled_length = 0; + + /* Check for any unresolved symbols */ + kxld_symtab_iterator_init(&iter, kxld_object_get_symtab(kext->kext), + kxld_sym_is_unresolved, FALSE); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + if (!error) { + error = TRUE; + kxld_log(kKxldLogLinking, kKxldLogErr, + "The following symbols are unresolved for this kext:"); + } + kxld_log(kKxldLogLinking, kKxldLogErr, "\t%s", + kxld_demangle(sym->name, &demangled_name, &demangled_length)); + } + require_noerr_action(error, finish, rval = KERN_FAILURE); + + rval = KERN_SUCCESS; finish: - if (demangled_name) kxld_free(demangled_name, demangled_length); - return rval; + if (demangled_name) { + kxld_free(demangled_name, demangled_length); + } + return rval; } - diff --git a/libkern/kxld/kxld_kext.h b/libkern/kxld/kxld_kext.h index 58b68bce3..82ed4c05e 100644 --- a/libkern/kxld/kxld_kext.h +++ b/libkern/kxld/kxld_kext.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_KEXT_H_ @@ -31,7 +31,7 @@ #include #if KERNEL #include -#else +#else #include "kxld_types.h" #endif @@ -50,61 +50,61 @@ typedef struct kxld_kext KXLDKext; *******************************************************************************/ size_t kxld_kext_sizeof(void) - __attribute__((const, visibility("hidden"))); +__attribute__((const, visibility("hidden"))); kern_return_t kxld_kext_init(KXLDKext *kext, struct kxld_object *kext_object, struct kxld_object *interface_object) - __attribute__((nonnull(1,2), visibility("hidden"))); +__attribute__((nonnull(1, 2), visibility("hidden"))); void kxld_kext_clear(KXLDKext *kext) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_kext_deinit(KXLDKext *kext) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ -kern_return_t kxld_kext_export_symbols(const KXLDKext *kext, +kern_return_t kxld_kext_export_symbols(const KXLDKext *kext, struct kxld_dict *defined_symbols_by_name, struct kxld_dict *obsolete_symbols_by_name, struct kxld_dict *defined_cxx_symbols_by_value) - __attribute__((nonnull(1), visibility("hidden"))); +__attribute__((nonnull(1), visibility("hidden"))); void kxld_kext_get_vmsize_for_seg_by_name(const KXLDKext *kext, - const char *segname, - u_long *vmsize) + const char *segname, + u_long *vmsize) __attribute__((nonnull, visibility("hidden"))); -void kxld_kext_get_vmsize(const KXLDKext *kext, +void kxld_kext_get_vmsize(const KXLDKext *kext, u_long *header_size, u_long *vmsize) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_kext_set_linked_object_size(KXLDKext *kext, u_long vmsize) __attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_kext_export_linked_object(const KXLDKext *kext, - void *linked_object, - kxld_addr_t *kmod_info) - __attribute__((nonnull, visibility("hidden"))); + void *linked_object, + kxld_addr_t *kmod_info) +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Modifiers *******************************************************************************/ -kern_return_t kxld_kext_export_vtables(KXLDKext *kext, - const struct kxld_dict *defined_cxx_symbols, +kern_return_t kxld_kext_export_vtables(KXLDKext *kext, + const struct kxld_dict *defined_cxx_symbols, const struct kxld_dict *defined_symbols, struct kxld_dict *vtables) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_kext_relocate(KXLDKext *kext, - kxld_addr_t link_address, - struct kxld_dict *patched_vtables, - const struct kxld_dict *defined_symbols, - const struct kxld_dict *obsolete_symbols, - const struct kxld_dict *defined_cxx_symbols) -__attribute__((nonnull(1,3,4), visibility("hidden"))); + kxld_addr_t link_address, + struct kxld_dict *patched_vtables, + const struct kxld_dict *defined_symbols, + const struct kxld_dict *obsolete_symbols, + const struct kxld_dict *defined_cxx_symbols) +__attribute__((nonnull(1, 3, 4), visibility("hidden"))); #endif /* _KXLD_KEXT_H_ */ diff --git a/libkern/kxld/kxld_object.c b/libkern/kxld/kxld_object.c index 3413a2ce7..51cbb170b 100644 --- a/libkern/kxld/kxld_object.c +++ b/libkern/kxld/kxld_object.c @@ -2,7 +2,7 @@ * Copyright (c) 2009-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -34,9 +34,9 @@ #include #include #else /* !KERNEL */ - /* Get machine.h from the kernel source so we can support all platforms - * that the kernel supports. Otherwise we're at the mercy of the host. - */ +/* Get machine.h from the kernel source so we can support all platforms + * that the kernel supports. Otherwise we're at the mercy of the host. + */ #include "../../osfmk/mach/machine.h" #include @@ -76,39 +76,39 @@ extern boolean_t isOldInterface; *******************************************************************************/ struct kxld_object { - u_char *file; // used by old interface - u_long size; // used by old interface - const char *name; - uint32_t filetype; - cpu_type_t cputype; - cpu_subtype_t cpusubtype; - KXLDArray segs; - KXLDArray sects; - KXLDArray extrelocs; - KXLDArray locrelocs; - KXLDRelocator relocator; - KXLDuuid uuid; - KXLDversionmin versionmin; - KXLDsrcversion srcversion; - KXLDSymtab *symtab; - struct dysymtab_command *dysymtab_hdr; - KXLDsplitinfolc splitinfolc; - splitKextLinkInfo split_info; - kxld_addr_t link_addr; - u_long output_buffer_size; - boolean_t is_kernel; - boolean_t is_final_image; - boolean_t is_linked; - boolean_t got_is_created; + u_char *file; // used by old interface + u_long size; // used by old interface + const char *name; + uint32_t filetype; + cpu_type_t cputype; + cpu_subtype_t cpusubtype; + KXLDArray segs; + KXLDArray sects; + KXLDArray extrelocs; + KXLDArray locrelocs; + KXLDRelocator relocator; + KXLDuuid uuid; + KXLDversionmin versionmin; + KXLDsrcversion srcversion; + KXLDSymtab *symtab; + struct dysymtab_command *dysymtab_hdr; + KXLDsplitinfolc splitinfolc; + splitKextLinkInfo split_info; + kxld_addr_t link_addr; + u_long output_buffer_size; + boolean_t is_kernel; + boolean_t is_final_image; + boolean_t is_linked; + boolean_t got_is_created; #if KXLD_USER_OR_OBJECT - KXLDArray *section_order; + KXLDArray *section_order; #endif #if KXLD_PIC_KEXTS - boolean_t include_kaslr_relocs; + boolean_t include_kaslr_relocs; #endif #if !KERNEL - enum NXByteOrder host_order; - enum NXByteOrder target_order; + enum NXByteOrder host_order; + enum NXByteOrder target_order; #endif }; @@ -116,25 +116,25 @@ struct kxld_object { * Prototypes *******************************************************************************/ -static kern_return_t get_target_machine_info(KXLDObject *object, +static kern_return_t get_target_machine_info(KXLDObject *object, cpu_type_t cputype, cpu_subtype_t cpusubtype); -static kern_return_t get_macho_slice_for_arch(KXLDObject *object, +static kern_return_t get_macho_slice_for_arch(KXLDObject *object, u_char *file, u_long size); static u_long get_macho_header_size(const KXLDObject *object); static u_long get_macho_data_size(const KXLDObject *object) __unused; static kern_return_t init_from_execute(KXLDObject *object); -static kern_return_t init_from_final_linked_image(KXLDObject *object, +static kern_return_t init_from_final_linked_image(KXLDObject *object, u_int *filetype_out, struct symtab_command **symtab_hdr_out); static boolean_t target_supports_protected_segments(const KXLDObject *object) - __attribute__((pure)); +__attribute__((pure)); static void set_is_object_linked(KXLDObject *object); #if KXLD_USER_OR_BUNDLE -static boolean_t target_supports_bundle(const KXLDObject *object) - __attribute((pure)); +static boolean_t target_supports_bundle(const KXLDObject *object) +__attribute((pure)); static kern_return_t init_from_bundle(KXLDObject *object); static kern_return_t process_relocs_from_tables(KXLDObject *object); static KXLDSeg *get_seg_by_base_addr(KXLDObject *object, @@ -144,8 +144,8 @@ static void add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit); #endif /* KXLD_USER_OR_BUNDLE */ #if KXLD_USER_OR_OBJECT -static boolean_t target_supports_object(const KXLDObject *object) - __attribute((pure)); +static boolean_t target_supports_object(const KXLDObject *object) +__attribute((pure)); static kern_return_t init_from_object(KXLDObject *object); static kern_return_t process_relocs_from_sections(KXLDObject *object); #endif /* KXLD_USER_OR_OBJECT */ @@ -155,18 +155,18 @@ static boolean_t target_supports_slideable_kexts(const KXLDObject *object); #endif /* KXLD_PIC_KEXTS */ -static kern_return_t export_macho_header(const KXLDObject *object, u_char *buf, +static kern_return_t export_macho_header(const KXLDObject *object, u_char *buf, u_int ncmds, u_long *header_offset, u_long header_size); #if KXLD_USER_OR_ILP32 -static u_long get_macho_cmd_data_32(u_char *file, u_long offset, +static u_long get_macho_cmd_data_32(u_char *file, u_long offset, u_int *filetype, u_int *ncmds); -static kern_return_t export_macho_header_32(const KXLDObject *object, +static kern_return_t export_macho_header_32(const KXLDObject *object, u_char *buf, u_int ncmds, u_long *header_offset, u_long header_size); #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 static u_long get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmds); -static kern_return_t export_macho_header_64(const KXLDObject *object, +static kern_return_t export_macho_header_64(const KXLDObject *object, u_char *buf, u_int ncmds, u_long *header_offset, u_long header_size); #endif /* KXLD_USER_OR_LP64 */ @@ -198,275 +198,274 @@ static KXLDSect * kxld_object_get_sect_by_name(const KXLDObject *object, const c /******************************************************************************* *******************************************************************************/ -size_t +size_t kxld_object_sizeof(void) { - return sizeof(KXLDObject); + return sizeof(KXLDObject); } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_object_init_from_macho(KXLDObject *object, u_char *file, u_long size, - const char *name, KXLDArray *section_order __unused, + const char *name, KXLDArray *section_order __unused, cpu_type_t cputype, cpu_subtype_t cpusubtype, KXLDFlags flags __unused) { - kern_return_t rval = KERN_FAILURE; - KXLDSeg * seg = NULL; - u_int i = 0; - u_char * my_file; + kern_return_t rval = KERN_FAILURE; + KXLDSeg * seg = NULL; + u_int i = 0; + u_char * my_file; - check(object); - check(file); - check(name); + check(object); + check(file); + check(name); - object->name = name; + object->name = name; #if KXLD_USER_OR_OBJECT - object->section_order = section_order; + object->section_order = section_order; #endif #if KXLD_PIC_KEXTS - object->include_kaslr_relocs = ((flags & kKXLDFlagIncludeRelocs) == kKXLDFlagIncludeRelocs); + object->include_kaslr_relocs = ((flags & kKXLDFlagIncludeRelocs) == kKXLDFlagIncludeRelocs); #endif - - /* Find the local architecture */ - - rval = get_target_machine_info(object, cputype, cpusubtype); - require_noerr(rval, finish); - - /* Find the Mach-O slice for the target architecture */ - - rval = get_macho_slice_for_arch(object, file, size); - require_noerr(rval, finish); - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - /* Allocate the symbol table */ - - if (!object->symtab) { - object->symtab = kxld_alloc(kxld_symtab_sizeof()); - require_action(object->symtab, finish, rval=KERN_RESOURCE_SHORTAGE); - bzero(object->symtab, kxld_symtab_sizeof()); - } - - /* Build the relocator */ - - rval = kxld_relocator_init(&object->relocator, - my_file, - object->symtab, &object->sects, - object->cputype, - object->cpusubtype, - kxld_object_target_needs_swap(object)); - require_noerr(rval, finish); - - /* There are four types of Mach-O files that we can support: - * 1) 32-bit MH_OBJECT - Snow Leopard and earlier - * 2) 32-bit MH_KEXT_BUNDLE - Lion and Later - * 3) 64-bit MH_OBJECT - Unsupported - * 4) 64-bit MH_KEXT_BUNDLE - Snow Leopard and Later - */ - - if (kxld_object_is_32_bit(object)) { - struct mach_header *mach_hdr = (struct mach_header *) ((void *) my_file); - object->filetype = mach_hdr->filetype; - } else { - struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) my_file); - object->filetype = mach_hdr->filetype; - } - - switch (object->filetype) { + + /* Find the local architecture */ + + rval = get_target_machine_info(object, cputype, cpusubtype); + require_noerr(rval, finish); + + /* Find the Mach-O slice for the target architecture */ + + rval = get_macho_slice_for_arch(object, file, size); + require_noerr(rval, finish); + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + /* Allocate the symbol table */ + + if (!object->symtab) { + object->symtab = kxld_alloc(kxld_symtab_sizeof()); + require_action(object->symtab, finish, rval = KERN_RESOURCE_SHORTAGE); + bzero(object->symtab, kxld_symtab_sizeof()); + } + + /* Build the relocator */ + + rval = kxld_relocator_init(&object->relocator, + my_file, + object->symtab, &object->sects, + object->cputype, + object->cpusubtype, + kxld_object_target_needs_swap(object)); + require_noerr(rval, finish); + + /* There are four types of Mach-O files that we can support: + * 1) 32-bit MH_OBJECT - Snow Leopard and earlier + * 2) 32-bit MH_KEXT_BUNDLE - Lion and Later + * 3) 64-bit MH_OBJECT - Unsupported + * 4) 64-bit MH_KEXT_BUNDLE - Snow Leopard and Later + */ + + if (kxld_object_is_32_bit(object)) { + struct mach_header *mach_hdr = (struct mach_header *) ((void *) my_file); + object->filetype = mach_hdr->filetype; + } else { + struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) my_file); + object->filetype = mach_hdr->filetype; + } + + switch (object->filetype) { #if KXLD_USER_OR_BUNDLE - case MH_KEXT_BUNDLE: - rval = init_from_bundle(object); - require_noerr(rval, finish); - break; + case MH_KEXT_BUNDLE: + rval = init_from_bundle(object); + require_noerr(rval, finish); + break; #endif /* KXLD_USER_OR_BUNDLE */ #if KXLD_USER_OR_OBJECT - case MH_OBJECT: - rval = init_from_object(object); - require_noerr(rval, finish); - break; + case MH_OBJECT: + rval = init_from_object(object); + require_noerr(rval, finish); + break; #endif /* KXLD_USER_OR_OBJECT */ - case MH_EXECUTE: - object->is_kernel = TRUE; - rval = init_from_execute(object); - require_noerr(rval, finish); - break; - default: - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogFiletypeNotSupported, object->filetype); - goto finish; - } - - if (!kxld_object_is_kernel(object)) { - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - kxld_seg_set_vm_protections(seg, - target_supports_protected_segments(object)); - } - - seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); - if (seg) { - (void) kxld_seg_populate_linkedit(seg, object->symtab, - kxld_object_is_32_bit(object) + case MH_EXECUTE: + object->is_kernel = TRUE; + rval = init_from_execute(object); + require_noerr(rval, finish); + break; + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogFiletypeNotSupported, object->filetype); + goto finish; + } + + if (!kxld_object_is_kernel(object)) { + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + kxld_seg_set_vm_protections(seg, + target_supports_protected_segments(object)); + } + + seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); + if (seg) { + (void) kxld_seg_populate_linkedit(seg, object->symtab, + kxld_object_is_32_bit(object) #if KXLD_PIC_KEXTS - , &object->locrelocs, &object->extrelocs, - target_supports_slideable_kexts(object) + , &object->locrelocs, &object->extrelocs, + target_supports_slideable_kexts(object) #endif - , isOldInterface ? 0 : object->splitinfolc.datasize - ); - } - } - - (void) set_is_object_linked(object); - - rval = KERN_SUCCESS; + , isOldInterface ? 0 : object->splitinfolc.datasize + ); + } + } + + (void) set_is_object_linked(object); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ splitKextLinkInfo * kxld_object_get_link_info(KXLDObject *object) { - check(object); - - return &object->split_info; + check(object); + + return &object->split_info; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_object_set_link_info(KXLDObject *object, splitKextLinkInfo *link_info) { - check(object); - check(link_info); + check(object); + check(link_info); - object->split_info.vmaddr_TEXT = link_info->vmaddr_TEXT; - object->split_info.vmaddr_TEXT_EXEC = link_info->vmaddr_TEXT_EXEC; - object->split_info.vmaddr_DATA = link_info->vmaddr_DATA; - object->split_info.vmaddr_DATA_CONST = link_info->vmaddr_DATA_CONST; - object->split_info.vmaddr_LLVM_COV = link_info->vmaddr_LLVM_COV; - object->split_info.vmaddr_LINKEDIT = link_info->vmaddr_LINKEDIT; + object->split_info.vmaddr_TEXT = link_info->vmaddr_TEXT; + object->split_info.vmaddr_TEXT_EXEC = link_info->vmaddr_TEXT_EXEC; + object->split_info.vmaddr_DATA = link_info->vmaddr_DATA; + object->split_info.vmaddr_DATA_CONST = link_info->vmaddr_DATA_CONST; + object->split_info.vmaddr_LLVM_COV = link_info->vmaddr_LLVM_COV; + object->split_info.vmaddr_LINKEDIT = link_info->vmaddr_LINKEDIT; - return; + return; } /******************************************************************************* *******************************************************************************/ kern_return_t -get_target_machine_info(KXLDObject *object, cpu_type_t cputype __unused, +get_target_machine_info(KXLDObject *object, cpu_type_t cputype __unused, cpu_subtype_t cpusubtype __unused) { #if KERNEL - /* Because the kernel can only link for its own architecture, we know what - * the host and target architectures are at compile time, so we can use - * a vastly simplified version of this function. - */ + /* Because the kernel can only link for its own architecture, we know what + * the host and target architectures are at compile time, so we can use + * a vastly simplified version of this function. + */ - check(object); + check(object); #if defined(__x86_64__) - object->cputype = CPU_TYPE_X86_64; + object->cputype = CPU_TYPE_X86_64; /* FIXME: we need clang to provide a __x86_64h__ macro for the sub-type. Using * __AVX2__ is a temporary solution until this is available. */ #if defined(__AVX2__) - object->cpusubtype = CPU_SUBTYPE_X86_64_H; + object->cpusubtype = CPU_SUBTYPE_X86_64_H; #else - object->cpusubtype = CPU_SUBTYPE_X86_64_ALL; + object->cpusubtype = CPU_SUBTYPE_X86_64_ALL; #endif - return KERN_SUCCESS; + return KERN_SUCCESS; #elif defined(__arm__) - object->cputype = CPU_TYPE_ARM; - object->cpusubtype = CPU_SUBTYPE_ARM_ALL; - return KERN_SUCCESS; + object->cputype = CPU_TYPE_ARM; + object->cpusubtype = CPU_SUBTYPE_ARM_ALL; + return KERN_SUCCESS; #elif defined(__arm64__) - object->cputype = CPU_TYPE_ARM64; - object->cpusubtype = CPU_SUBTYPE_ARM64_ALL; - return KERN_SUCCESS; -#else - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogArchNotSupported, _mh_execute_header->cputype); - return KERN_NOT_SUPPORTED; + object->cputype = CPU_TYPE_ARM64; + object->cpusubtype = CPU_SUBTYPE_ARM64_ALL; + return KERN_SUCCESS; +#else + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogArchNotSupported, _mh_execute_header->cputype); + return KERN_NOT_SUPPORTED; #endif /* Supported architecture defines */ #else /* !KERNEL */ - /* User-space must look up the architecture it's running on and the target - * architecture at run-time. - */ - - kern_return_t rval = KERN_FAILURE; - const NXArchInfo *host_arch = NULL; - - check(object); - - host_arch = NXGetLocalArchInfo(); - require_action(host_arch, finish, rval=KERN_FAILURE); - - object->host_order = host_arch->byteorder; - - /* If the user did not specify a cputype, use the local architecture. - */ - - if (cputype) { - object->cputype = cputype; - object->cpusubtype = cpusubtype; - } else { - object->cputype = host_arch->cputype; - object->target_order = object->host_order; - - switch (object->cputype) { - case CPU_TYPE_I386: - object->cpusubtype = CPU_SUBTYPE_I386_ALL; - break; - case CPU_TYPE_X86_64: - object->cpusubtype = CPU_SUBTYPE_X86_64_ALL; - break; - case CPU_TYPE_ARM: - object->cpusubtype = CPU_SUBTYPE_ARM_ALL; - break; - case CPU_TYPE_ARM64: - object->cpusubtype = CPU_SUBTYPE_ARM64_ALL; - break; - default: - object->cpusubtype = 0; - break; - } - } - - /* Validate that we support the target architecture and record its - * endianness. - */ - - switch(object->cputype) { - case CPU_TYPE_ARM: - case CPU_TYPE_ARM64: - case CPU_TYPE_I386: - case CPU_TYPE_X86_64: - object->target_order = NX_LittleEndian; - break; - default: - rval = KERN_NOT_SUPPORTED; - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogArchNotSupported, object->cputype); - goto finish; - } - - rval = KERN_SUCCESS; + /* User-space must look up the architecture it's running on and the target + * architecture at run-time. + */ + + kern_return_t rval = KERN_FAILURE; + const NXArchInfo *host_arch = NULL; + + check(object); + + host_arch = NXGetLocalArchInfo(); + require_action(host_arch, finish, rval = KERN_FAILURE); + + object->host_order = host_arch->byteorder; + + /* If the user did not specify a cputype, use the local architecture. + */ + + if (cputype) { + object->cputype = cputype; + object->cpusubtype = cpusubtype; + } else { + object->cputype = host_arch->cputype; + object->target_order = object->host_order; + + switch (object->cputype) { + case CPU_TYPE_I386: + object->cpusubtype = CPU_SUBTYPE_I386_ALL; + break; + case CPU_TYPE_X86_64: + object->cpusubtype = CPU_SUBTYPE_X86_64_ALL; + break; + case CPU_TYPE_ARM: + object->cpusubtype = CPU_SUBTYPE_ARM_ALL; + break; + case CPU_TYPE_ARM64: + object->cpusubtype = CPU_SUBTYPE_ARM64_ALL; + break; + default: + object->cpusubtype = 0; + break; + } + } + + /* Validate that we support the target architecture and record its + * endianness. + */ + + switch (object->cputype) { + case CPU_TYPE_ARM: + case CPU_TYPE_ARM64: + case CPU_TYPE_I386: + case CPU_TYPE_X86_64: + object->target_order = NX_LittleEndian; + break; + default: + rval = KERN_NOT_SUPPORTED; + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogArchNotSupported, object->cputype); + goto finish; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; #endif /* KERNEL */ } @@ -475,96 +474,95 @@ finish: static kern_return_t get_macho_slice_for_arch(KXLDObject *object, u_char *file, u_long size) { - kern_return_t rval = KERN_FAILURE; - struct mach_header *mach_hdr = NULL; + kern_return_t rval = KERN_FAILURE; + struct mach_header *mach_hdr = NULL; #if !KERNEL - struct fat_header *fat = (struct fat_header *) ((void *) file); - struct fat_arch *archs = (struct fat_arch *) &fat[1]; - boolean_t swap = FALSE; + struct fat_header *fat = (struct fat_header *) ((void *) file); + struct fat_arch *archs = (struct fat_arch *) &fat[1]; + boolean_t swap = FALSE; #endif /* KERNEL */ - u_char *my_file = file; - u_long my_file_size = size; - - check(object); - check(file); - check(size); + u_char *my_file = file; + u_long my_file_size = size; + + check(object); + check(file); + check(size); - /* We are assuming that we will never receive a fat file in the kernel */ + /* We are assuming that we will never receive a fat file in the kernel */ #if !KERNEL - require_action(size >= sizeof(*fat), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - /* The fat header is always big endian, so swap if necessary */ - if (fat->magic == FAT_CIGAM) { - (void) swap_fat_header(fat, object->host_order); - swap = TRUE; - } - - if (fat->magic == FAT_MAGIC) { - struct fat_arch *arch = NULL; - u_long arch_size; - boolean_t ovr = os_mul_and_add_overflow(fat->nfat_arch, sizeof(*archs), sizeof(*fat), &arch_size); - - require_action(!ovr && size >= arch_size, - finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - /* Swap the fat_arch structures if necessary */ - if (swap) { - (void) swap_fat_arch(archs, fat->nfat_arch, object->host_order); - } - - /* Locate the Mach-O for the requested architecture */ - - arch = NXFindBestFatArch(object->cputype, object->cpusubtype, archs, fat->nfat_arch); - require_action(arch, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogArchNotFound)); - require_action(size >= arch->offset + arch->size, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - my_file = my_file + arch->offset; - my_file_size = arch->size; - } + require_action(size >= sizeof(*fat), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + /* The fat header is always big endian, so swap if necessary */ + if (fat->magic == FAT_CIGAM) { + (void) swap_fat_header(fat, object->host_order); + swap = TRUE; + } + + if (fat->magic == FAT_MAGIC) { + struct fat_arch *arch = NULL; + u_long arch_size; + boolean_t ovr = os_mul_and_add_overflow(fat->nfat_arch, sizeof(*archs), sizeof(*fat), &arch_size); + + require_action(!ovr && size >= arch_size, + finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + /* Swap the fat_arch structures if necessary */ + if (swap) { + (void) swap_fat_arch(archs, fat->nfat_arch, object->host_order); + } + + /* Locate the Mach-O for the requested architecture */ + + arch = NXFindBestFatArch(object->cputype, object->cpusubtype, archs, fat->nfat_arch); + require_action(arch, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogArchNotFound)); + require_action(size >= arch->offset + arch->size, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + my_file = my_file + arch->offset; + my_file_size = arch->size; + } #endif /* !KERNEL */ - /* Swap the Mach-O's headers to this architecture if necessary */ - if (kxld_object_is_32_bit(object)) { - rval = validate_and_swap_macho_32(my_file, my_file_size + /* Swap the Mach-O's headers to this architecture if necessary */ + if (kxld_object_is_32_bit(object)) { + rval = validate_and_swap_macho_32(my_file, my_file_size #if !KERNEL - , object->host_order + , object->host_order #endif /* !KERNEL */ - ); - } else { - rval = validate_and_swap_macho_64(my_file, my_file_size + ); + } else { + rval = validate_and_swap_macho_64(my_file, my_file_size #if !KERNEL - , object->host_order + , object->host_order #endif /* !KERNEL */ - ); - } - require_noerr(rval, finish); - - mach_hdr = (struct mach_header *) ((void *) my_file); - require_action(object->cputype == mach_hdr->cputype, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - object->cpusubtype = mach_hdr->cpusubtype; /* */ - - if (isOldInterface) { - object->file = my_file; - object->size = my_file_size; - } - else { - object->split_info.kextExecutable = my_file; - object->split_info.kextSize = my_file_size; - } - - rval = KERN_SUCCESS; + ); + } + require_noerr(rval, finish); + + mach_hdr = (struct mach_header *) ((void *) my_file); + require_action(object->cputype == mach_hdr->cputype, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + object->cpusubtype = mach_hdr->cpusubtype; /* */ + + if (isOldInterface) { + object->file = my_file; + object->size = my_file_size; + } else { + object->split_info.kextExecutable = my_file; + object->split_info.kextSize = my_file_size; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -573,229 +571,239 @@ static kern_return_t init_from_final_linked_image(KXLDObject *object, u_int *filetype_out, struct symtab_command **symtab_hdr_out) { - kern_return_t rval = KERN_FAILURE; - KXLDSeg *seg = NULL; - KXLDSect *sect = NULL; - struct load_command *cmd_hdr = NULL; - struct symtab_command *symtab_hdr = NULL; - struct uuid_command *uuid_hdr = NULL; - struct version_min_command *versionmin_hdr = NULL; - struct build_version_command *build_version_hdr = NULL; - struct source_version_command *source_version_hdr = NULL; - u_long base_offset = 0; - u_long offset = 0; - u_long sect_offset = 0; - u_int filetype = 0; - u_int i = 0; - u_int j = 0; - u_int segi = 0; - u_int secti = 0; - u_int nsegs = 0; - u_int nsects = 0; - u_int ncmds = 0; - u_char *my_file; - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - KXLD_3264_FUNC(kxld_object_is_32_bit(object), base_offset, - get_macho_cmd_data_32, get_macho_cmd_data_64, - my_file, offset, &filetype, &ncmds); - - /* First pass to count segments and sections */ - - offset = base_offset; - for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { - cmd_hdr = (struct load_command *) ((void *) (my_file + offset)); - - switch(cmd_hdr->cmd) { + kern_return_t rval = KERN_FAILURE; + KXLDSeg *seg = NULL; + KXLDSect *sect = NULL; + struct load_command *cmd_hdr = NULL; + struct symtab_command *symtab_hdr = NULL; + struct uuid_command *uuid_hdr = NULL; + struct version_min_command *versionmin_hdr = NULL; + struct build_version_command *build_version_hdr = NULL; + struct source_version_command *source_version_hdr = NULL; + u_long base_offset = 0; + u_long offset = 0; + u_long sect_offset = 0; + u_int filetype = 0; + u_int i = 0; + u_int j = 0; + u_int segi = 0; + u_int secti = 0; + u_int nsegs = 0; + u_int nsects = 0; + u_int ncmds = 0; + u_char *my_file; + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + KXLD_3264_FUNC(kxld_object_is_32_bit(object), base_offset, + get_macho_cmd_data_32, get_macho_cmd_data_64, + my_file, offset, &filetype, &ncmds); + + /* First pass to count segments and sections */ + + offset = base_offset; + for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { + cmd_hdr = (struct load_command *) ((void *) (my_file + offset)); + + switch (cmd_hdr->cmd) { #if KXLD_USER_OR_ILP32 - case LC_SEGMENT: - { - struct segment_command *seg_hdr = - (struct segment_command *) cmd_hdr; - - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; - - ++nsegs; - nsects += seg_hdr->nsects; - } - break; + case LC_SEGMENT: + { + struct segment_command *seg_hdr = + (struct segment_command *) cmd_hdr; + + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } + + ++nsegs; + nsects += seg_hdr->nsects; + } + break; #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 - case LC_SEGMENT_64: - { - struct segment_command_64 *seg_hdr = - (struct segment_command_64 *) ((void *) cmd_hdr); - - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; - - ++nsegs; - nsects += seg_hdr->nsects; - } - break; + case LC_SEGMENT_64: + { + struct segment_command_64 *seg_hdr = + (struct segment_command_64 *) ((void *) cmd_hdr); + + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } + + ++nsegs; + nsects += seg_hdr->nsects; + } + break; #endif /* KXLD_USER_OR_LP64 */ - default: - continue; - } - } + default: + continue; + } + } - /* Allocate the segments and sections */ + /* Allocate the segments and sections */ - if (nsegs) { - rval = kxld_array_init(&object->segs, sizeof(KXLDSeg), nsegs); - require_noerr(rval, finish); + if (nsegs) { + rval = kxld_array_init(&object->segs, sizeof(KXLDSeg), nsegs); + require_noerr(rval, finish); - rval = kxld_array_init(&object->sects, sizeof(KXLDSect), nsects); - require_noerr(rval, finish); - } + rval = kxld_array_init(&object->sects, sizeof(KXLDSect), nsects); + require_noerr(rval, finish); + } - /* Initialize the segments and sections */ + /* Initialize the segments and sections */ - offset = base_offset; - for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { - cmd_hdr = (struct load_command *) ((void *) (my_file + offset)); - seg = NULL; + offset = base_offset; + for (i = 0; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { + cmd_hdr = (struct load_command *) ((void *) (my_file + offset)); + seg = NULL; - switch(cmd_hdr->cmd) { + switch (cmd_hdr->cmd) { #if KXLD_USER_OR_ILP32 - case LC_SEGMENT: - { - struct segment_command *seg_hdr = - (struct segment_command *) cmd_hdr; + case LC_SEGMENT: + { + struct segment_command *seg_hdr = + (struct segment_command *) cmd_hdr; - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } - seg = kxld_array_get_item(&object->segs, segi++); + seg = kxld_array_get_item(&object->segs, segi++); - rval = kxld_seg_init_from_macho_32(seg, seg_hdr); - require_noerr(rval, finish); + rval = kxld_seg_init_from_macho_32(seg, seg_hdr); + require_noerr(rval, finish); - sect_offset = offset + sizeof(*seg_hdr); - } - break; + sect_offset = offset + sizeof(*seg_hdr); + } + break; #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 - case LC_SEGMENT_64: - { - struct segment_command_64 *seg_hdr = - (struct segment_command_64 *) ((void *) cmd_hdr); + case LC_SEGMENT_64: + { + struct segment_command_64 *seg_hdr = + (struct segment_command_64 *) ((void *) cmd_hdr); - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } - seg = kxld_array_get_item(&object->segs, segi++); + seg = kxld_array_get_item(&object->segs, segi++); - rval = kxld_seg_init_from_macho_64(seg, seg_hdr); - require_noerr(rval, finish); + rval = kxld_seg_init_from_macho_64(seg, seg_hdr); + require_noerr(rval, finish); - sect_offset = offset + sizeof(*seg_hdr); - } - break; + sect_offset = offset + sizeof(*seg_hdr); + } + break; #endif /* KXLD_USER_OR_LP64 */ - case LC_SYMTAB: - symtab_hdr = (struct symtab_command *) cmd_hdr; - break; - case LC_UUID: - uuid_hdr = (struct uuid_command *) cmd_hdr; - kxld_uuid_init_from_macho(&object->uuid, uuid_hdr); - break; - case LC_VERSION_MIN_MACOSX: - case LC_VERSION_MIN_IPHONEOS: - case LC_VERSION_MIN_TVOS: - case LC_VERSION_MIN_WATCHOS: - versionmin_hdr = (struct version_min_command *) cmd_hdr; - kxld_versionmin_init_from_macho(&object->versionmin, versionmin_hdr); - break; - case LC_BUILD_VERSION: - build_version_hdr = (struct build_version_command *)cmd_hdr; - kxld_versionmin_init_from_build_cmd(&object->versionmin, build_version_hdr); - break; - case LC_SOURCE_VERSION: - source_version_hdr = (struct source_version_command *) (void *) cmd_hdr; - kxld_srcversion_init_from_macho(&object->srcversion, source_version_hdr); - break; - case LC_DYSYMTAB: - object->dysymtab_hdr = (struct dysymtab_command *) cmd_hdr; - rval = kxld_reloc_create_macho(&object->extrelocs, &object->relocator, - (struct relocation_info *) ((void *) (my_file + object->dysymtab_hdr->extreloff)), - object->dysymtab_hdr->nextrel); - require_noerr(rval, finish); - - rval = kxld_reloc_create_macho(&object->locrelocs, &object->relocator, - (struct relocation_info *) ((void *) (my_file + object->dysymtab_hdr->locreloff)), - object->dysymtab_hdr->nlocrel); - require_noerr(rval, finish); - - break; - case LC_UNIXTHREAD: - case LC_MAIN: - /* Don't need to do anything with UNIXTHREAD or MAIN for the kernel */ - require_action(kxld_object_is_kernel(object), - finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "LC_UNIXTHREAD/LC_MAIN segment is not valid in a kext.")); - break; - case LC_SEGMENT_SPLIT_INFO: - if (isSplitKext) { - struct linkedit_data_command *split_info_hdr = NULL; - split_info_hdr = (struct linkedit_data_command *) (void *) cmd_hdr; - kxld_splitinfolc_init_from_macho(&object->splitinfolc, split_info_hdr); - } - break; - case LC_NOTE: - /* binary blob of data */ - break; - case LC_CODE_SIGNATURE: - case LC_DYLD_INFO: - case LC_DYLD_INFO_ONLY: - case LC_FUNCTION_STARTS: - case LC_DATA_IN_CODE: - case LC_DYLIB_CODE_SIGN_DRS: - /* Various metadata that might be stored in the linkedit segment */ - break; - default: - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid load command type in MH_KEXT_BUNDLE kext: %u.", cmd_hdr->cmd); - goto finish; - } - - if (seg) { - - /* Initialize the sections */ - for (j = 0; j < seg->sects.nitems; ++j, ++secti) { - sect = kxld_array_get_item(&object->sects, secti); - - KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, - kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64, - sect, my_file, §_offset, secti, &object->relocator); - require_noerr(rval, finish); - - /* Add the section to the segment. This will also make sure - * that the sections and segments have the same segname. - */ - rval = kxld_seg_add_section(seg, sect); - require_noerr(rval, finish); - } - rval = kxld_seg_finish_init(seg); - require_noerr(rval, finish); - } - } - - if (filetype_out) *filetype_out = filetype; - if (symtab_hdr_out) *symtab_hdr_out = symtab_hdr; - object->is_final_image = TRUE; - rval = KERN_SUCCESS; + case LC_SYMTAB: + symtab_hdr = (struct symtab_command *) cmd_hdr; + break; + case LC_UUID: + uuid_hdr = (struct uuid_command *) cmd_hdr; + kxld_uuid_init_from_macho(&object->uuid, uuid_hdr); + break; + case LC_VERSION_MIN_MACOSX: + case LC_VERSION_MIN_IPHONEOS: + case LC_VERSION_MIN_TVOS: + case LC_VERSION_MIN_WATCHOS: + versionmin_hdr = (struct version_min_command *) cmd_hdr; + kxld_versionmin_init_from_macho(&object->versionmin, versionmin_hdr); + break; + case LC_BUILD_VERSION: + build_version_hdr = (struct build_version_command *)cmd_hdr; + kxld_versionmin_init_from_build_cmd(&object->versionmin, build_version_hdr); + break; + case LC_SOURCE_VERSION: + source_version_hdr = (struct source_version_command *) (void *) cmd_hdr; + kxld_srcversion_init_from_macho(&object->srcversion, source_version_hdr); + break; + case LC_DYSYMTAB: + object->dysymtab_hdr = (struct dysymtab_command *) cmd_hdr; + rval = kxld_reloc_create_macho(&object->extrelocs, &object->relocator, + (struct relocation_info *) ((void *) (my_file + object->dysymtab_hdr->extreloff)), + object->dysymtab_hdr->nextrel); + require_noerr(rval, finish); + + rval = kxld_reloc_create_macho(&object->locrelocs, &object->relocator, + (struct relocation_info *) ((void *) (my_file + object->dysymtab_hdr->locreloff)), + object->dysymtab_hdr->nlocrel); + require_noerr(rval, finish); + + break; + case LC_UNIXTHREAD: + case LC_MAIN: + /* Don't need to do anything with UNIXTHREAD or MAIN for the kernel */ + require_action(kxld_object_is_kernel(object), + finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "LC_UNIXTHREAD/LC_MAIN segment is not valid in a kext.")); + break; + case LC_SEGMENT_SPLIT_INFO: + if (isSplitKext) { + struct linkedit_data_command *split_info_hdr = NULL; + split_info_hdr = (struct linkedit_data_command *) (void *) cmd_hdr; + kxld_splitinfolc_init_from_macho(&object->splitinfolc, split_info_hdr); + } + break; + case LC_NOTE: + /* binary blob of data */ + break; + case LC_CODE_SIGNATURE: + case LC_DYLD_INFO: + case LC_DYLD_INFO_ONLY: + case LC_FUNCTION_STARTS: + case LC_DATA_IN_CODE: + case LC_DYLIB_CODE_SIGN_DRS: + /* Various metadata that might be stored in the linkedit segment */ + break; + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid load command type in MH_KEXT_BUNDLE kext: %u.", cmd_hdr->cmd); + goto finish; + } + + if (seg) { + /* Initialize the sections */ + for (j = 0; j < seg->sects.nitems; ++j, ++secti) { + sect = kxld_array_get_item(&object->sects, secti); + + KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, + kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64, + sect, my_file, §_offset, secti, &object->relocator); + require_noerr(rval, finish); + + /* Add the section to the segment. This will also make sure + * that the sections and segments have the same segname. + */ + rval = kxld_seg_add_section(seg, sect); + require_noerr(rval, finish); + } + rval = kxld_seg_finish_init(seg); + require_noerr(rval, finish); + } + } + + if (filetype_out) { + *filetype_out = filetype; + } + if (symtab_hdr_out) { + *symtab_hdr_out = symtab_hdr; + } + object->is_final_image = TRUE; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -803,81 +811,79 @@ finish: static kern_return_t init_from_execute(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - struct symtab_command *symtab_hdr = NULL; - u_int filetype = 0; - KXLDSeg * kernel_linkedit_seg = NULL; // used if running kernel + kern_return_t rval = KERN_FAILURE; + struct symtab_command *symtab_hdr = NULL; + u_int filetype = 0; + KXLDSeg * kernel_linkedit_seg = NULL; // used if running kernel #if KXLD_USER_OR_OBJECT - KXLDSeg *seg = NULL; - KXLDSect *sect = NULL; - KXLDSectionName *sname = NULL; - u_int i = 0, j = 0, k = 0; + KXLDSeg *seg = NULL; + KXLDSect *sect = NULL; + KXLDSectionName *sname = NULL; + u_int i = 0, j = 0, k = 0; #endif /* KXLD_USER_OR_OBJECT */ - u_char *my_file; - - check(object); - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - require_action(kxld_object_is_kernel(object), finish, rval=KERN_FAILURE); - - rval = init_from_final_linked_image(object, &filetype, &symtab_hdr); - require_noerr(rval, finish); - - require_action(filetype == MH_EXECUTE, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "The kernel file is not of type MH_EXECUTE.")); - - /* Initialize the symbol table. If this is the running kernel - * we will work from the in-memory linkedit segment; - * otherwise we work from the whole mach-o image. - */ + u_char *my_file; + + check(object); + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + require_action(kxld_object_is_kernel(object), finish, rval = KERN_FAILURE); + + rval = init_from_final_linked_image(object, &filetype, &symtab_hdr); + require_noerr(rval, finish); + + require_action(filetype == MH_EXECUTE, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "The kernel file is not of type MH_EXECUTE.")); + + /* Initialize the symbol table. If this is the running kernel + * we will work from the in-memory linkedit segment; + * otherwise we work from the whole mach-o image. + */ #if KERNEL - kernel_linkedit_seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); - require_action(kernel_linkedit_seg, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO)); + kernel_linkedit_seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); + require_action(kernel_linkedit_seg, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO)); #endif - KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, - kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, - object->symtab, symtab_hdr, my_file, kernel_linkedit_seg); - require_noerr(rval, finish); + KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, + kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, + object->symtab, symtab_hdr, my_file, kernel_linkedit_seg); + require_noerr(rval, finish); #if KXLD_USER_OR_OBJECT - /* Save off the order of section names so that we can lay out kext - * sections for MH_OBJECT-based systems. - */ - if (target_supports_object(object)) { - - rval = kxld_array_init(object->section_order, sizeof(KXLDSectionName), - object->sects.nitems); - require_noerr(rval, finish); - - /* Copy the section names into the section_order array for future kext - * section ordering. - */ - for (i = 0, k = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - - for (j = 0; j < seg->sects.nitems; ++j, ++k) { - sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, j); - sname = kxld_array_get_item(object->section_order, k); - - strlcpy(sname->segname, sect->segname, sizeof(sname->segname)); - strlcpy(sname->sectname, sect->sectname, sizeof(sname->sectname)); - } - } - } + /* Save off the order of section names so that we can lay out kext + * sections for MH_OBJECT-based systems. + */ + if (target_supports_object(object)) { + rval = kxld_array_init(object->section_order, sizeof(KXLDSectionName), + object->sects.nitems); + require_noerr(rval, finish); + + /* Copy the section names into the section_order array for future kext + * section ordering. + */ + for (i = 0, k = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + + for (j = 0; j < seg->sects.nitems; ++j, ++k) { + sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, j); + sname = kxld_array_get_item(object->section_order, k); + + strlcpy(sname->segname, sect->segname, sizeof(sname->segname)); + strlcpy(sname->sectname, sect->sectname, sizeof(sname->sectname)); + } + } + } #endif /* KXLD_USER_OR_OBJECT */ - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_BUNDLE @@ -886,260 +892,260 @@ finish: static boolean_t target_supports_bundle(const KXLDObject *object __unused) { - return TRUE; + return TRUE; } /******************************************************************************* *******************************************************************************/ -static kern_return_t +static kern_return_t init_from_bundle(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - struct symtab_command *symtab_hdr = NULL; - u_int filetype = 0; - u_char *my_file; - - check(object); - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - require_action(target_supports_bundle(object), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogFiletypeNotSupported, MH_KEXT_BUNDLE)); - - rval = init_from_final_linked_image(object, &filetype, &symtab_hdr); - require_noerr(rval, finish); - - require_action(filetype == MH_KEXT_BUNDLE, finish, - rval=KERN_FAILURE); - - KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, - kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, - object->symtab, symtab_hdr, my_file, - /* kernel_linkedit_seg */ NULL); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + struct symtab_command *symtab_hdr = NULL; + u_int filetype = 0; + u_char *my_file; + + check(object); + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + require_action(target_supports_bundle(object), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogFiletypeNotSupported, MH_KEXT_BUNDLE)); + + rval = init_from_final_linked_image(object, &filetype, &symtab_hdr); + require_noerr(rval, finish); + + require_action(filetype == MH_KEXT_BUNDLE, finish, + rval = KERN_FAILURE); + + KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, + kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, + object->symtab, symtab_hdr, my_file, + /* kernel_linkedit_seg */ NULL); + require_noerr(rval, finish); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_BUNDLE */ #if KXLD_USER_OR_OBJECT /******************************************************************************* *******************************************************************************/ -static boolean_t target_supports_object(const KXLDObject *object) +static boolean_t +target_supports_object(const KXLDObject *object) { - return (object->cputype == CPU_TYPE_I386); + return object->cputype == CPU_TYPE_I386; } /******************************************************************************* *******************************************************************************/ -static kern_return_t +static kern_return_t init_from_object(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - struct load_command *cmd_hdr = NULL; - struct symtab_command *symtab_hdr = NULL; - struct uuid_command *uuid_hdr = NULL; - KXLDSect *sect = NULL; - u_long offset = 0; - u_long sect_offset = 0; - u_int filetype = 0; - u_int ncmds = 0; - u_int nsects = 0; - u_int i = 0; - boolean_t has_segment = FALSE; - u_char *my_file; - - check(object); - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - require_action(target_supports_object(object), - finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogFiletypeNotSupported, MH_OBJECT)); - - KXLD_3264_FUNC(kxld_object_is_32_bit(object), offset, - get_macho_cmd_data_32, get_macho_cmd_data_64, - my_file, offset, &filetype, &ncmds); - - require_action(filetype == MH_OBJECT, finish, rval=KERN_FAILURE); - - /* MH_OBJECTs use one unnamed segment to contain all of the sections. We - * loop over all of the load commands to initialize the structures we - * expect. Then, we'll use the unnamed segment to get to all of the - * sections, and then use those sections to create the actual segments. - */ - - for (; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { - cmd_hdr = (struct load_command *) ((void *) (my_file + offset)); - - switch(cmd_hdr->cmd) { + kern_return_t rval = KERN_FAILURE; + struct load_command *cmd_hdr = NULL; + struct symtab_command *symtab_hdr = NULL; + struct uuid_command *uuid_hdr = NULL; + KXLDSect *sect = NULL; + u_long offset = 0; + u_long sect_offset = 0; + u_int filetype = 0; + u_int ncmds = 0; + u_int nsects = 0; + u_int i = 0; + boolean_t has_segment = FALSE; + u_char *my_file; + + check(object); + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + require_action(target_supports_object(object), + finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogFiletypeNotSupported, MH_OBJECT)); + + KXLD_3264_FUNC(kxld_object_is_32_bit(object), offset, + get_macho_cmd_data_32, get_macho_cmd_data_64, + my_file, offset, &filetype, &ncmds); + + require_action(filetype == MH_OBJECT, finish, rval = KERN_FAILURE); + + /* MH_OBJECTs use one unnamed segment to contain all of the sections. We + * loop over all of the load commands to initialize the structures we + * expect. Then, we'll use the unnamed segment to get to all of the + * sections, and then use those sections to create the actual segments. + */ + + for (; i < ncmds; ++i, offset += cmd_hdr->cmdsize) { + cmd_hdr = (struct load_command *) ((void *) (my_file + offset)); + + switch (cmd_hdr->cmd) { #if KXLD_USER_OR_ILP32 - case LC_SEGMENT: - { - struct segment_command *seg_hdr = - (struct segment_command *) cmd_hdr; - - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; - - /* Ignore LINKEDIT segments */ - if (streq_safe(seg_hdr->segname, SEG_LINKEDIT, - const_strlen(SEG_LINKEDIT))) - { - continue; - } - - require_action(kxld_object_is_32_bit(object), finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "LC_SEGMENT in 64-bit kext.")); - require_action(!has_segment, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Multiple segments in an MH_OBJECT kext.")); - - nsects = seg_hdr->nsects; - sect_offset = offset + sizeof(*seg_hdr); - has_segment = TRUE; - } - break; + case LC_SEGMENT: + { + struct segment_command *seg_hdr = + (struct segment_command *) cmd_hdr; + + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } + + /* Ignore LINKEDIT segments */ + if (streq_safe(seg_hdr->segname, SEG_LINKEDIT, + const_strlen(SEG_LINKEDIT))) { + continue; + } + + require_action(kxld_object_is_32_bit(object), finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "LC_SEGMENT in 64-bit kext.")); + require_action(!has_segment, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Multiple segments in an MH_OBJECT kext.")); + + nsects = seg_hdr->nsects; + sect_offset = offset + sizeof(*seg_hdr); + has_segment = TRUE; + } + break; #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 - case LC_SEGMENT_64: - { - struct segment_command_64 *seg_hdr = - (struct segment_command_64 *) ((void *) cmd_hdr); - - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; - - /* Ignore LINKEDIT segments */ - if (streq_safe(seg_hdr->segname, SEG_LINKEDIT, - const_strlen(SEG_LINKEDIT))) - { - continue; - } - - require_action(!kxld_object_is_32_bit(object), finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "LC_SEGMENT_64 in a 32-bit kext.")); - require_action(!has_segment, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Multiple segments in an MH_OBJECT kext.")); - - nsects = seg_hdr->nsects; - sect_offset = offset + sizeof(*seg_hdr); - has_segment = TRUE; - } - break; + case LC_SEGMENT_64: + { + struct segment_command_64 *seg_hdr = + (struct segment_command_64 *) ((void *) cmd_hdr); + + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } + + /* Ignore LINKEDIT segments */ + if (streq_safe(seg_hdr->segname, SEG_LINKEDIT, + const_strlen(SEG_LINKEDIT))) { + continue; + } + + require_action(!kxld_object_is_32_bit(object), finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "LC_SEGMENT_64 in a 32-bit kext.")); + require_action(!has_segment, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Multiple segments in an MH_OBJECT kext.")); + + nsects = seg_hdr->nsects; + sect_offset = offset + sizeof(*seg_hdr); + has_segment = TRUE; + } + break; #endif /* KXLD_USER_OR_LP64 */ - case LC_SYMTAB: - symtab_hdr = (struct symtab_command *) cmd_hdr; - - KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, - kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, - object->symtab, symtab_hdr, my_file, - /* kernel_linkedit_seg */ NULL); - require_noerr(rval, finish); - break; - case LC_UUID: - uuid_hdr = (struct uuid_command *) cmd_hdr; - kxld_uuid_init_from_macho(&object->uuid, uuid_hdr); - break; - case LC_UNIXTHREAD: - case LC_MAIN: - /* Don't need to do anything with UNIXTHREAD or MAIN */ - break; - case LC_CODE_SIGNATURE: - case LC_DYLD_INFO: - case LC_DYLD_INFO_ONLY: - case LC_FUNCTION_STARTS: - case LC_DATA_IN_CODE: - case LC_DYLIB_CODE_SIGN_DRS: - /* Various metadata that might be stored in the linkedit segment */ - break; - case LC_NOTE: - /* bag-of-bits carried with the binary: ignore */ - break; - case LC_BUILD_VERSION: - /* should be able to ignore build version commands */ - kxld_log(kKxldLogLinking, kKxldLogWarn, - "Ignoring LC_BUILD_VERSION (%u) in MH_OBJECT kext: (platform:%d)", - cmd_hdr->cmd, ((struct build_version_command *)cmd_hdr)->platform); - break; - case LC_VERSION_MIN_MACOSX: - case LC_VERSION_MIN_IPHONEOS: - case LC_VERSION_MIN_TVOS: - case LC_VERSION_MIN_WATCHOS: - case LC_SOURCE_VERSION: - /* Not supported for object files, fall through */ - default: - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid load command type in MH_OBJECT kext: %u.", cmd_hdr->cmd); - goto finish; - } - } - - if (has_segment) { - - /* Get the number of sections from the segment and build the section index */ - - rval = kxld_array_init(&object->sects, sizeof(KXLDSect), nsects); - require_noerr(rval, finish); - - /* Loop over all of the sections to initialize the section index */ - - for (i = 0; i < nsects; ++i) { - sect = kxld_array_get_item(&object->sects, i); - - KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, - kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64, - sect, my_file, §_offset, i, &object->relocator); - require_noerr(rval, finish); - } - - /* Create special sections */ + case LC_SYMTAB: + symtab_hdr = (struct symtab_command *) cmd_hdr; + + KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, + kxld_symtab_init_from_macho_32, kxld_symtab_init_from_macho_64, + object->symtab, symtab_hdr, my_file, + /* kernel_linkedit_seg */ NULL); + require_noerr(rval, finish); + break; + case LC_UUID: + uuid_hdr = (struct uuid_command *) cmd_hdr; + kxld_uuid_init_from_macho(&object->uuid, uuid_hdr); + break; + case LC_UNIXTHREAD: + case LC_MAIN: + /* Don't need to do anything with UNIXTHREAD or MAIN */ + break; + case LC_CODE_SIGNATURE: + case LC_DYLD_INFO: + case LC_DYLD_INFO_ONLY: + case LC_FUNCTION_STARTS: + case LC_DATA_IN_CODE: + case LC_DYLIB_CODE_SIGN_DRS: + /* Various metadata that might be stored in the linkedit segment */ + break; + case LC_NOTE: + /* bag-of-bits carried with the binary: ignore */ + break; + case LC_BUILD_VERSION: + /* should be able to ignore build version commands */ + kxld_log(kKxldLogLinking, kKxldLogWarn, + "Ignoring LC_BUILD_VERSION (%u) in MH_OBJECT kext: (platform:%d)", + cmd_hdr->cmd, ((struct build_version_command *)cmd_hdr)->platform); + break; + case LC_VERSION_MIN_MACOSX: + case LC_VERSION_MIN_IPHONEOS: + case LC_VERSION_MIN_TVOS: + case LC_VERSION_MIN_WATCHOS: + case LC_SOURCE_VERSION: + /* Not supported for object files, fall through */ + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid load command type in MH_OBJECT kext: %u.", cmd_hdr->cmd); + goto finish; + } + } + + if (has_segment) { + /* Get the number of sections from the segment and build the section index */ + + rval = kxld_array_init(&object->sects, sizeof(KXLDSect), nsects); + require_noerr(rval, finish); + + /* Loop over all of the sections to initialize the section index */ + + for (i = 0; i < nsects; ++i) { + sect = kxld_array_get_item(&object->sects, i); + + KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, + kxld_sect_init_from_macho_32, kxld_sect_init_from_macho_64, + sect, my_file, §_offset, i, &object->relocator); + require_noerr(rval, finish); + } + + /* Create special sections */ #if KXLD_USER_OR_GOT - rval = create_got(object); - require_noerr(rval, finish); + rval = create_got(object); + require_noerr(rval, finish); #endif /* KXLD_USER_OR_GOT */ #if KXLD_USER_OR_COMMON - rval = resolve_common_symbols(object); - require_noerr(rval, finish); + rval = resolve_common_symbols(object); + require_noerr(rval, finish); #endif /* KXLD_USER_OR_COMMON */ - /* Create the segments from the section index */ + /* Create the segments from the section index */ - rval = kxld_seg_create_seg_from_sections(&object->segs, &object->sects); - require_noerr(rval, finish); + rval = kxld_seg_create_seg_from_sections(&object->segs, &object->sects); + require_noerr(rval, finish); - rval = kxld_seg_finalize_object_segment(&object->segs, - object->section_order, get_macho_header_size(object)); - require_noerr(rval, finish); + rval = kxld_seg_finalize_object_segment(&object->segs, + object->section_order, get_macho_header_size(object)); + require_noerr(rval, finish); - rval = kxld_seg_init_linkedit(&object->segs); - require_noerr(rval, finish); - } + rval = kxld_seg_init_linkedit(&object->segs); + require_noerr(rval, finish); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_OBJECT */ @@ -1149,12 +1155,16 @@ finish: static u_long get_macho_cmd_data_32(u_char *file, u_long offset, u_int *filetype, u_int *ncmds) { - struct mach_header *mach_hdr = (struct mach_header *) ((void *) (file + offset)); + struct mach_header *mach_hdr = (struct mach_header *) ((void *) (file + offset)); - if (filetype) *filetype = mach_hdr->filetype; - if (ncmds) *ncmds = mach_hdr->ncmds; + if (filetype) { + *filetype = mach_hdr->filetype; + } + if (ncmds) { + *ncmds = mach_hdr->ncmds; + } - return sizeof(*mach_hdr); + return sizeof(*mach_hdr); } #endif /* KXLD_USER_OR_ILP32 */ @@ -1163,14 +1173,18 @@ get_macho_cmd_data_32(u_char *file, u_long offset, u_int *filetype, u_int *ncmds /******************************************************************************* *******************************************************************************/ static u_long -get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmds) +get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmds) { - struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) (file + offset)); + struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) (file + offset)); - if (filetype) *filetype = mach_hdr->filetype; - if (ncmds) *ncmds = mach_hdr->ncmds; + if (filetype) { + *filetype = mach_hdr->filetype; + } + if (ncmds) { + *ncmds = mach_hdr->ncmds; + } - return sizeof(*mach_hdr); + return sizeof(*mach_hdr); } #endif /* KXLD_USER_OR_LP64 */ @@ -1179,47 +1193,47 @@ get_macho_cmd_data_64(u_char *file, u_long offset, u_int *filetype, u_int *ncmd static u_long get_macho_header_size(const KXLDObject *object) { - KXLDSeg *seg = NULL; - u_long header_size = 0; - u_int i = 0; - boolean_t object_is_32_bit = kxld_object_is_32_bit(object); + KXLDSeg *seg = NULL; + u_long header_size = 0; + u_int i = 0; + boolean_t object_is_32_bit = kxld_object_is_32_bit(object); - check(object); + check(object); - /* Mach, segment, symtab, and UUID headers */ + /* Mach, segment, symtab, and UUID headers */ - header_size += object_is_32_bit ? sizeof(struct mach_header) : sizeof(struct mach_header_64); + header_size += object_is_32_bit ? sizeof(struct mach_header) : sizeof(struct mach_header_64); - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - header_size += kxld_seg_get_macho_header_size(seg, object_is_32_bit); - } + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + header_size += kxld_seg_get_macho_header_size(seg, object_is_32_bit); + } - header_size += kxld_symtab_get_macho_header_size(); + header_size += kxld_symtab_get_macho_header_size(); #if KXLD_PIC_KEXTS - if (target_supports_slideable_kexts(object)) { - header_size += kxld_reloc_get_macho_header_size(); - } -#endif /* KXLD_PIC_KEXTS */ + if (target_supports_slideable_kexts(object)) { + header_size += kxld_reloc_get_macho_header_size(); + } +#endif /* KXLD_PIC_KEXTS */ + + if (object->uuid.has_uuid) { + header_size += kxld_uuid_get_macho_header_size(); + } - if (object->uuid.has_uuid) { - header_size += kxld_uuid_get_macho_header_size(); - } + if (object->versionmin.has_versionmin) { + header_size += kxld_versionmin_get_macho_header_size(&object->versionmin); + } - if (object->versionmin.has_versionmin) { - header_size += kxld_versionmin_get_macho_header_size(&object->versionmin); - } + if (object->srcversion.has_srcversion) { + header_size += kxld_srcversion_get_macho_header_size(); + } - if (object->srcversion.has_srcversion) { - header_size += kxld_srcversion_get_macho_header_size(); - } - - if (isSplitKext && object->splitinfolc.has_splitinfolc) { - header_size += kxld_splitinfolc_get_macho_header_size(); - } - - return header_size; + if (isSplitKext && object->splitinfolc.has_splitinfolc) { + header_size += kxld_splitinfolc_get_macho_header_size(); + } + + return header_size; } /******************************************************************************* @@ -1227,67 +1241,67 @@ get_macho_header_size(const KXLDObject *object) static u_long get_macho_data_size(const KXLDObject *object) { - KXLDSeg *seg = NULL; - u_long data_size = 0; - u_int i = 0; + KXLDSeg *seg = NULL; + u_long data_size = 0; + u_int i = 0; - check(object); + check(object); - /* total all segment vmsize values */ - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - data_size += (u_long) kxld_seg_get_vmsize(seg); - } + /* total all segment vmsize values */ + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + data_size += (u_long) kxld_seg_get_vmsize(seg); + } #if KXLD_PIC_KEXTS - { - /* ensure that when we eventually emit the final linked object, - * appending the __DYSYMTAB data after the __LINKEDIT data will - * not overflow the space allocated for the __LINKEDIT segment - */ - - u_long seg_vmsize = 0; - u_long symtab_size = 0; - u_long reloc_size = 0; - - /* get current __LINKEDIT sizes */ - seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); - - seg_vmsize = (u_long) kxld_seg_get_vmsize(seg); - - /* get size of symbol table data that will eventually be dumped - * into the __LINKEDIT segment - */ - symtab_size = kxld_symtab_get_macho_data_size(object->symtab, kxld_object_is_32_bit(object)); - - if (target_supports_slideable_kexts(object)) { - /* get size of __DYSYMTAB relocation entries */ - reloc_size = kxld_reloc_get_macho_data_size(&object->locrelocs, &object->extrelocs); - } - - /* combine, and ensure they'll both fit within the page(s) - * allocated for the __LINKEDIT segment. If they'd overflow, - * increase the vmsize appropriately so no overflow will occur - */ - if ((symtab_size + reloc_size) > seg_vmsize) { - u_long overflow = (symtab_size + reloc_size) - seg_vmsize; - data_size += kxld_round_page_cross_safe(overflow); - } - } + { + /* ensure that when we eventually emit the final linked object, + * appending the __DYSYMTAB data after the __LINKEDIT data will + * not overflow the space allocated for the __LINKEDIT segment + */ + + u_long seg_vmsize = 0; + u_long symtab_size = 0; + u_long reloc_size = 0; + + /* get current __LINKEDIT sizes */ + seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); + + seg_vmsize = (u_long) kxld_seg_get_vmsize(seg); + + /* get size of symbol table data that will eventually be dumped + * into the __LINKEDIT segment + */ + symtab_size = kxld_symtab_get_macho_data_size(object->symtab, kxld_object_is_32_bit(object)); + + if (target_supports_slideable_kexts(object)) { + /* get size of __DYSYMTAB relocation entries */ + reloc_size = kxld_reloc_get_macho_data_size(&object->locrelocs, &object->extrelocs); + } + + /* combine, and ensure they'll both fit within the page(s) + * allocated for the __LINKEDIT segment. If they'd overflow, + * increase the vmsize appropriately so no overflow will occur + */ + if ((symtab_size + reloc_size) > seg_vmsize) { + u_long overflow = (symtab_size + reloc_size) - seg_vmsize; + data_size += kxld_round_page_cross_safe(overflow); + } + } #endif // KXLD_PIC_KEXTS - return data_size; + return data_size; } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_object_target_needs_swap(const KXLDObject *object __unused) { #if KERNEL - return FALSE; + return FALSE; #else - return (object->target_order != object->host_order); + return object->target_order != object->host_order; #endif /* KERNEL */ } @@ -1296,139 +1310,139 @@ kxld_object_target_needs_swap(const KXLDObject *object __unused) KXLDSeg * kxld_object_get_seg_by_name(const KXLDObject *object, const char *segname) { - KXLDSeg *seg = NULL; - u_int i = 0; + KXLDSeg *seg = NULL; + u_int i = 0; - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); - if (streq_safe(segname, seg->segname, sizeof(seg->segname))) break; + if (streq_safe(segname, seg->segname, sizeof(seg->segname))) { + break; + } - seg = NULL; - } + seg = NULL; + } - return seg; + return seg; } /******************************************************************************* *******************************************************************************/ -const KXLDRelocator * +const KXLDRelocator * kxld_object_get_relocator(const KXLDObject * object) { - check(object); + check(object); - return &object->relocator; + return &object->relocator; } /******************************************************************************* *******************************************************************************/ KXLDSect * -kxld_object_get_sect_by_name(const KXLDObject *object, const char *segname, +kxld_object_get_sect_by_name(const KXLDObject *object, const char *segname, const char *sectname) { - KXLDSect *sect = NULL; - u_int i = 0; + KXLDSect *sect = NULL; + u_int i = 0; - for (i = 0; i < object->sects.nitems; ++i) { - sect = kxld_array_get_item(&object->sects, i); + for (i = 0; i < object->sects.nitems; ++i) { + sect = kxld_array_get_item(&object->sects, i); - if (streq_safe(segname, sect->segname, sizeof(sect->segname)) && - streq_safe(sectname, sect->sectname, sizeof(sect->sectname))) - { - break; - } + if (streq_safe(segname, sect->segname, sizeof(sect->segname)) && + streq_safe(sectname, sect->sectname, sizeof(sect->sectname))) { + break; + } - sect = NULL; - } + sect = NULL; + } - return sect; + return sect; } /******************************************************************************* *******************************************************************************/ -const KXLDReloc * +const KXLDReloc * kxld_object_get_reloc_at_symbol(const KXLDObject *object, const KXLDSym *sym) { - const KXLDReloc *reloc = NULL; - const KXLDSect *sect = NULL; - uint32_t offset = 0; + const KXLDReloc *reloc = NULL; + const KXLDSect *sect = NULL; + uint32_t offset = 0; - check(object); - check(sym); + check(object); + check(sym); - sect = kxld_object_get_section_by_index(object, sym->sectnum); - require(sect, finish); + sect = kxld_object_get_section_by_index(object, sym->sectnum); + require(sect, finish); - if (kxld_object_is_final_image(object)) { - reloc = kxld_reloc_get_reloc_by_offset(&object->extrelocs, - sym->base_addr); - if (!reloc) { - reloc = kxld_reloc_get_reloc_by_offset(&object->locrelocs, - sym->base_addr); - } - } else { - offset = kxld_sym_get_section_offset(sym, sect); - reloc = kxld_reloc_get_reloc_by_offset(§->relocs, offset); - } + if (kxld_object_is_final_image(object)) { + reloc = kxld_reloc_get_reloc_by_offset(&object->extrelocs, + sym->base_addr); + if (!reloc) { + reloc = kxld_reloc_get_reloc_by_offset(&object->locrelocs, + sym->base_addr); + } + } else { + offset = kxld_sym_get_section_offset(sym, sect); + reloc = kxld_reloc_get_reloc_by_offset(§->relocs, offset); + } finish: - return reloc; + return reloc; } /******************************************************************************* *******************************************************************************/ -const KXLDSym * -kxld_object_get_symbol_of_reloc(const KXLDObject *object, +const KXLDSym * +kxld_object_get_symbol_of_reloc(const KXLDObject *object, const KXLDReloc *reloc, const KXLDSect *sect) { - const KXLDSym *sym = NULL; - u_char *my_file; - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - if (kxld_object_is_final_image(object)) { - sym = kxld_reloc_get_symbol(&object->relocator, reloc, my_file); - } else { - sym = kxld_reloc_get_symbol(&object->relocator, reloc, sect->data); - } - return sym; + const KXLDSym *sym = NULL; + u_char *my_file; + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + if (kxld_object_is_final_image(object)) { + sym = kxld_reloc_get_symbol(&object->relocator, reloc, my_file); + } else { + sym = kxld_reloc_get_symbol(&object->relocator, reloc, sect->data); + } + return sym; } /******************************************************************************* *******************************************************************************/ -const KXLDSect * +const KXLDSect * kxld_object_get_section_by_index(const KXLDObject *object, u_int sectnum) { - KXLDSect *sect = NULL; - - check(object); + KXLDSect *sect = NULL; - if (sectnum < object->sects.nitems) { - sect = kxld_array_get_item(&object->sects, sectnum); - } + check(object); - return sect; + if (sectnum < object->sects.nitems) { + sect = kxld_array_get_item(&object->sects, sectnum); + } + + return sect; } /******************************************************************************* *******************************************************************************/ -const KXLDArray * +const KXLDArray * kxld_object_get_extrelocs(const KXLDObject *object) { - const KXLDArray *rval = NULL; - - check(object); + const KXLDArray *rval = NULL; + + check(object); - if (kxld_object_is_final_image(object)) { - rval = &object->extrelocs; - } + if (kxld_object_is_final_image(object)) { + rval = &object->extrelocs; + } - return rval; + return rval; } /******************************************************************************* @@ -1436,9 +1450,9 @@ kxld_object_get_extrelocs(const KXLDObject *object) const KXLDSymtab * kxld_object_get_symtab(const KXLDObject *object) { - check(object); + check(object); - return object->symtab; + return object->symtab; } #if KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON @@ -1447,18 +1461,18 @@ kxld_object_get_symtab(const KXLDObject *object) static kern_return_t add_section(KXLDObject *object, KXLDSect **sect) { - kern_return_t rval = KERN_FAILURE; - u_int nsects = object->sects.nitems; + kern_return_t rval = KERN_FAILURE; + u_int nsects = object->sects.nitems; - rval = kxld_array_resize(&object->sects, nsects + 1); - require_noerr(rval, finish); + rval = kxld_array_resize(&object->sects, nsects + 1); + require_noerr(rval, finish); - *sect = kxld_array_get_item(&object->sects, nsects); + *sect = kxld_array_get_item(&object->sects, nsects); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_GOT || KXLD_USER_OR_COMMON */ @@ -1471,72 +1485,74 @@ finish: static kern_return_t resolve_common_symbols(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - KXLDSect *sect = NULL; - kxld_addr_t base_addr = 0; - kxld_size_t size = 0; - kxld_size_t total_size = 0; - u_int align = 0; - u_int max_align = 0; - u_int sectnum = 0; - - if (!kxld_object_target_supports_common_symbols(object)) { - rval = KERN_SUCCESS; - goto finish; - } - - /* Iterate over the common symbols to calculate their total aligned size */ - kxld_symtab_iterator_init(&iter, object->symtab, kxld_sym_is_common, FALSE); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - align = kxld_sym_get_common_align(sym); - size = kxld_sym_get_common_size(sym); - - if (align > max_align) max_align = align; - - total_size = kxld_align_address(total_size, align) + size; - } - - /* If there are common symbols, grow or create the __DATA __common section - * to hold them. - */ - if (total_size) { - sect = kxld_object_get_sect_by_name(object, SEG_DATA, SECT_COMMON); - if (sect) { - base_addr = sect->base_addr + sect->size; - - kxld_sect_grow(sect, total_size, max_align); - } else { - base_addr = 0; - - rval = add_section(object, §); - require_noerr(rval, finish); - - kxld_sect_init_zerofill(sect, SEG_DATA, SECT_COMMON, - total_size, max_align); - } - - /* Resolve the common symbols against the new section */ - rval = kxld_array_get_index(&object->sects, sect, §num); - require_noerr(rval, finish); - - kxld_symtab_iterator_reset(&iter); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - align = kxld_sym_get_common_align(sym); - size = kxld_sym_get_common_size(sym); - - base_addr = kxld_align_address(base_addr, align); - kxld_sym_resolve_common(sym, sectnum, base_addr); - - base_addr += size; - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + KXLDSect *sect = NULL; + kxld_addr_t base_addr = 0; + kxld_size_t size = 0; + kxld_size_t total_size = 0; + u_int align = 0; + u_int max_align = 0; + u_int sectnum = 0; + + if (!kxld_object_target_supports_common_symbols(object)) { + rval = KERN_SUCCESS; + goto finish; + } + + /* Iterate over the common symbols to calculate their total aligned size */ + kxld_symtab_iterator_init(&iter, object->symtab, kxld_sym_is_common, FALSE); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + align = kxld_sym_get_common_align(sym); + size = kxld_sym_get_common_size(sym); + + if (align > max_align) { + max_align = align; + } + + total_size = kxld_align_address(total_size, align) + size; + } + + /* If there are common symbols, grow or create the __DATA __common section + * to hold them. + */ + if (total_size) { + sect = kxld_object_get_sect_by_name(object, SEG_DATA, SECT_COMMON); + if (sect) { + base_addr = sect->base_addr + sect->size; + + kxld_sect_grow(sect, total_size, max_align); + } else { + base_addr = 0; + + rval = add_section(object, §); + require_noerr(rval, finish); + + kxld_sect_init_zerofill(sect, SEG_DATA, SECT_COMMON, + total_size, max_align); + } + + /* Resolve the common symbols against the new section */ + rval = kxld_array_get_index(&object->sects, sect, §num); + require_noerr(rval, finish); + + kxld_symtab_iterator_reset(&iter); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + align = kxld_sym_get_common_align(sym); + size = kxld_sym_get_common_size(sym); + + base_addr = kxld_align_address(base_addr, align); + kxld_sym_resolve_common(sym, sectnum, base_addr); + + base_addr += size; + } + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_COMMON */ @@ -1546,7 +1562,7 @@ finish: static boolean_t target_has_got(const KXLDObject *object) { - return FALSE: + return FALSE: } /******************************************************************************* @@ -1555,33 +1571,33 @@ target_has_got(const KXLDObject *object) static kern_return_t create_got(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *sect = NULL; - u_int ngots = 0; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSect *sect = NULL; + u_int ngots = 0; + u_int i = 0; - if (!target_has_got(object)) { - rval = KERN_SUCCESS; - goto finish; - } + if (!target_has_got(object)) { + rval = KERN_SUCCESS; + goto finish; + } - for (i = 0; i < object->sects.nitems; ++i) { - sect = kxld_array_get_item(&object->sects, i); - ngots += kxld_sect_get_ngots(sect, &object->relocator, - object->symtab); - } + for (i = 0; i < object->sects.nitems; ++i) { + sect = kxld_array_get_item(&object->sects, i); + ngots += kxld_sect_get_ngots(sect, &object->relocator, + object->symtab); + } - rval = add_section(object, §); - require_noerr(rval, finish); + rval = add_section(object, §); + require_noerr(rval, finish); - rval = kxld_sect_init_got(sect, ngots); - require_noerr(rval, finish); + rval = kxld_sect_init_got(sect, ngots); + require_noerr(rval, finish); - object->got_is_created = TRUE; - rval = KERN_SUCCESS; + object->got_is_created = TRUE; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -1589,32 +1605,31 @@ finish: static kern_return_t populate_got(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *sect = NULL; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSect *sect = NULL; + u_int i = 0; - if (!target_has_got(object) || !object->got_is_created) { - rval = KERN_SUCCESS; - goto finish; - } + if (!target_has_got(object) || !object->got_is_created) { + rval = KERN_SUCCESS; + goto finish; + } - for (i = 0; i < object->sects.nitems; ++i) { - sect = kxld_array_get_item(&object->sects, i); - if (streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)) && - streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT))) - { - kxld_sect_populate_got(sect, object->symtab, - kxld_object_target_needs_swap(object)); - break; - } - } + for (i = 0; i < object->sects.nitems; ++i) { + sect = kxld_array_get_item(&object->sects, i); + if (streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)) && + streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT))) { + kxld_sect_populate_got(sect, object->symtab, + kxld_object_target_needs_swap(object)); + break; + } + } - require_action(i < object->sects.nitems, finish, rval=KXLD_MISSING_GOT); + require_action(i < object->sects.nitems, finish, rval = KXLD_MISSING_GOT); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_GOT */ @@ -1623,10 +1638,10 @@ finish: static boolean_t target_supports_protected_segments(const KXLDObject *object) { - return (object->is_final_image && - (object->cputype == CPU_TYPE_X86_64 || - object->cputype == CPU_TYPE_ARM || - object->cputype == CPU_TYPE_ARM64)); + return object->is_final_image && + (object->cputype == CPU_TYPE_X86_64 || + object->cputype == CPU_TYPE_ARM || + object->cputype == CPU_TYPE_ARM64); } /******************************************************************************* @@ -1634,145 +1649,146 @@ target_supports_protected_segments(const KXLDObject *object) static void set_is_object_linked(KXLDObject *object) { - u_int i = 0; + u_int i = 0; - if (kxld_object_is_kernel(object)) { - object->is_linked = TRUE; - return; - } + if (kxld_object_is_kernel(object)) { + object->is_linked = TRUE; + return; + } - if (object->is_final_image) { - object->is_linked = !object->extrelocs.nitems; - return; - } + if (object->is_final_image) { + object->is_linked = !object->extrelocs.nitems; + return; + } - object->is_linked = TRUE; - for (i = 0; i < object->sects.nitems; ++i) { - KXLDSect *sect = kxld_array_get_item(&object->sects, i); - if (sect->relocs.nitems) { - object->is_linked = FALSE; - break; - } - } + object->is_linked = TRUE; + for (i = 0; i < object->sects.nitems; ++i) { + KXLDSect *sect = kxld_array_get_item(&object->sects, i); + if (sect->relocs.nitems) { + object->is_linked = FALSE; + break; + } + } } /******************************************************************************* *******************************************************************************/ -void kxld_object_clear(KXLDObject *object) +void +kxld_object_clear(KXLDObject *object) { - KXLDSeg *seg = NULL; - KXLDSect *sect = NULL; - u_int i; - u_char *my_file; - - check(object); + KXLDSeg *seg = NULL; + KXLDSect *sect = NULL; + u_int i; + u_char *my_file; + + check(object); - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } #if !KERNEL - if (kxld_object_is_kernel(object)) { - unswap_macho(my_file, object->host_order, object->target_order); - } + if (kxld_object_is_kernel(object)) { + unswap_macho(my_file, object->host_order, object->target_order); + } #endif /* !KERNEL */ - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - kxld_seg_clear(seg); - } - kxld_array_reset(&object->segs); - - for (i = 0; i < object->sects.nitems; ++i) { - sect = kxld_array_get_item(&object->sects, i); - kxld_sect_clear(sect); - } - kxld_array_reset(&object->sects); - - kxld_array_reset(&object->extrelocs); - kxld_array_reset(&object->locrelocs); - kxld_relocator_clear(&object->relocator); - kxld_uuid_clear(&object->uuid); - kxld_versionmin_clear(&object->versionmin); - kxld_srcversion_clear(&object->srcversion); - - if (object->symtab) kxld_symtab_clear(object->symtab); - - if (isOldInterface) { - object->file = NULL; - object->size = 0; - } - else { - kxld_splitinfolc_clear(&object->splitinfolc); - object->split_info.kextExecutable = NULL; - object->split_info.kextSize = 0; - } - object->filetype = 0; - object->cputype = 0; - object->cpusubtype = 0; - object->is_kernel = FALSE; - object->is_final_image = FALSE; - object->is_linked = FALSE; - object->got_is_created = FALSE; + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + kxld_seg_clear(seg); + } + kxld_array_reset(&object->segs); + + for (i = 0; i < object->sects.nitems; ++i) { + sect = kxld_array_get_item(&object->sects, i); + kxld_sect_clear(sect); + } + kxld_array_reset(&object->sects); + + kxld_array_reset(&object->extrelocs); + kxld_array_reset(&object->locrelocs); + kxld_relocator_clear(&object->relocator); + kxld_uuid_clear(&object->uuid); + kxld_versionmin_clear(&object->versionmin); + kxld_srcversion_clear(&object->srcversion); + + if (object->symtab) { + kxld_symtab_clear(object->symtab); + } + + if (isOldInterface) { + object->file = NULL; + object->size = 0; + } else { + kxld_splitinfolc_clear(&object->splitinfolc); + object->split_info.kextExecutable = NULL; + object->split_info.kextSize = 0; + } + object->filetype = 0; + object->cputype = 0; + object->cpusubtype = 0; + object->is_kernel = FALSE; + object->is_final_image = FALSE; + object->is_linked = FALSE; + object->got_is_created = FALSE; #if KXLD_USER_OR_OBJECT - object->section_order = NULL; + object->section_order = NULL; #endif #if !KERNEL - object->host_order = 0; - object->target_order = 0; + object->host_order = 0; + object->target_order = 0; #endif } /******************************************************************************* *******************************************************************************/ -void kxld_object_deinit(KXLDObject *object __unused) +void +kxld_object_deinit(KXLDObject *object __unused) { - KXLDSeg *seg = NULL; - KXLDSect *sect = NULL; - u_int i; - u_char *my_file; - - check(object); - - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } + KXLDSeg *seg = NULL; + KXLDSect *sect = NULL; + u_int i; + u_char *my_file; + + check(object); + + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } #if !KERNEL - if (my_file && kxld_object_is_kernel(object)) { - unswap_macho(my_file, object->host_order, object->target_order); - } + if (my_file && kxld_object_is_kernel(object)) { + unswap_macho(my_file, object->host_order, object->target_order); + } #endif /* !KERNEL */ - for (i = 0; i < object->segs.maxitems; ++i) { - seg = kxld_array_get_slot(&object->segs, i); - kxld_seg_deinit(seg); - } - kxld_array_deinit(&object->segs); + for (i = 0; i < object->segs.maxitems; ++i) { + seg = kxld_array_get_slot(&object->segs, i); + kxld_seg_deinit(seg); + } + kxld_array_deinit(&object->segs); - for (i = 0; i < object->sects.maxitems; ++i) { - sect = kxld_array_get_slot(&object->sects, i); - kxld_sect_deinit(sect); - } - kxld_array_deinit(&object->sects); + for (i = 0; i < object->sects.maxitems; ++i) { + sect = kxld_array_get_slot(&object->sects, i); + kxld_sect_deinit(sect); + } + kxld_array_deinit(&object->sects); - kxld_array_deinit(&object->extrelocs); - kxld_array_deinit(&object->locrelocs); + kxld_array_deinit(&object->extrelocs); + kxld_array_deinit(&object->locrelocs); - if (object->symtab) { - kxld_symtab_deinit(object->symtab); - kxld_free(object->symtab, kxld_symtab_sizeof()); - } + if (object->symtab) { + kxld_symtab_deinit(object->symtab); + kxld_free(object->symtab, kxld_symtab_sizeof()); + } - bzero(object, sizeof(*object)); + bzero(object, sizeof(*object)); } /******************************************************************************* @@ -1780,18 +1796,17 @@ void kxld_object_deinit(KXLDObject *object __unused) const u_char * kxld_object_get_file(const KXLDObject *object) { - const u_char *my_file; - - check(object); + const u_char *my_file; + + check(object); - if (isOldInterface) { - my_file = object->file; - } - else { - my_file = object->split_info.kextExecutable; - } - - return my_file; + if (isOldInterface) { + my_file = object->file; + } else { + my_file = object->split_info.kextExecutable; + } + + return my_file; } /******************************************************************************* @@ -1799,49 +1814,49 @@ kxld_object_get_file(const KXLDObject *object) const char * kxld_object_get_name(const KXLDObject *object) { - check(object); + check(object); - return object->name; + return object->name; } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_object_is_32_bit(const KXLDObject *object) { - check(object); + check(object); - return kxld_is_32_bit(object->cputype); + return kxld_is_32_bit(object->cputype); } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_object_is_final_image(const KXLDObject *object) { - check(object); + check(object); - return object->is_final_image; + return object->is_final_image; } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_object_is_kernel(const KXLDObject *object) { - check(object); + check(object); - return object->is_kernel; + return object->is_kernel; } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_object_is_linked(const KXLDObject *object) { - check(object); + check(object); - return object->is_linked; + return object->is_linked; } /******************************************************************************* @@ -1849,9 +1864,9 @@ kxld_object_is_linked(const KXLDObject *object) boolean_t kxld_object_target_supports_strict_patching(const KXLDObject *object) { - check(object); + check(object); - return (object->cputype != CPU_TYPE_I386); + return object->cputype != CPU_TYPE_I386; } /******************************************************************************* @@ -1859,220 +1874,216 @@ kxld_object_target_supports_strict_patching(const KXLDObject *object) boolean_t kxld_object_target_supports_common_symbols(const KXLDObject *object) { - check(object); + check(object); - return (object->cputype == CPU_TYPE_I386); + return object->cputype == CPU_TYPE_I386; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_object_get_vmsize_for_seg_by_name(const KXLDObject *object, - const char *segname, - u_long *vmsize) + const char *segname, + u_long *vmsize) { - check(object); - check(segname); - check(vmsize); + check(object); + check(segname); + check(vmsize); + + KXLDSeg *seg = NULL; + u_long my_size = 0; + + /* segment vmsize */ + seg = kxld_object_get_seg_by_name(object, segname); - KXLDSeg *seg = NULL; - u_long my_size = 0; + my_size = (u_long) kxld_seg_get_vmsize(seg); - /* segment vmsize */ - seg = kxld_object_get_seg_by_name(object, segname); - - my_size = (u_long) kxld_seg_get_vmsize(seg); - #if KXLD_PIC_KEXTS - if (kxld_seg_is_linkedit_seg(seg)) - { - u_long reloc_size = 0; - - if (target_supports_slideable_kexts(object)) { - /* get size of __DYSYMTAB relocation entries */ - reloc_size = kxld_reloc_get_macho_data_size(&object->locrelocs, &object->extrelocs); - my_size += reloc_size; - } - } + if (kxld_seg_is_linkedit_seg(seg)) { + u_long reloc_size = 0; + + if (target_supports_slideable_kexts(object)) { + /* get size of __DYSYMTAB relocation entries */ + reloc_size = kxld_reloc_get_macho_data_size(&object->locrelocs, &object->extrelocs); + my_size += reloc_size; + } + } #endif - - *vmsize = my_size; + + *vmsize = my_size; } /******************************************************************************* *******************************************************************************/ void -kxld_object_get_vmsize(const KXLDObject *object, u_long *header_size, +kxld_object_get_vmsize(const KXLDObject *object, u_long *header_size, u_long *vmsize) { - check(object); - check(header_size); - check(vmsize); - *header_size = 0; - *vmsize = 0; + check(object); + check(header_size); + check(vmsize); + *header_size = 0; + *vmsize = 0; - /* vmsize is the padded header page(s) + segment vmsizes */ + /* vmsize is the padded header page(s) + segment vmsizes */ - *header_size = (object->is_final_image) ? - 0 : (u_long)kxld_round_page_cross_safe(get_macho_header_size(object)); - - *vmsize = *header_size + get_macho_data_size(object); + *header_size = (object->is_final_image) ? + 0 : (u_long)kxld_round_page_cross_safe(get_macho_header_size(object)); + *vmsize = *header_size + get_macho_data_size(object); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_object_set_linked_object_size(KXLDObject *object, u_long vmsize) { - check(object); - - if (isOldInterface) { - object->output_buffer_size = vmsize; /* cache this for use later */ - } - else { - object->split_info.linkedKextSize = vmsize; - } - return; -} - -/******************************************************************************* -*******************************************************************************/ -kern_return_t -kxld_object_export_linked_object(const KXLDObject *object, - void *linked_object - ) -{ - kern_return_t rval = KERN_FAILURE; - KXLDSeg *seg = NULL; - u_long size = 0; - u_long header_size = 0; - u_long header_offset = 0; - u_long data_offset = 0; - u_int ncmds = 0; - u_int i = 0; - boolean_t is_32bit_object = kxld_object_is_32_bit(object); - kxld_addr_t link_addr; - u_char *my_linked_object; - - check(object); - check(linked_object); - - if (isOldInterface) { - size = object->output_buffer_size; - link_addr = object->link_addr; - my_linked_object = (u_char *) linked_object; - } - else { - size = ((splitKextLinkInfo *)linked_object)->linkedKextSize; - link_addr = ((splitKextLinkInfo *)linked_object)->vmaddr_TEXT; - my_linked_object = ((splitKextLinkInfo *)linked_object)->linkedKext; - } - - /* Calculate the size of the headers and data */ - - header_size = get_macho_header_size(object); - - /* Copy data to the file */ - - ncmds = object->segs.nitems + 1 /* LC_SYMTAB */; + check(object); + + if (isOldInterface) { + object->output_buffer_size = vmsize; /* cache this for use later */ + } else { + object->split_info.linkedKextSize = vmsize; + } + return; +} + +/******************************************************************************* +*******************************************************************************/ +kern_return_t +kxld_object_export_linked_object(const KXLDObject *object, + void *linked_object + ) +{ + kern_return_t rval = KERN_FAILURE; + KXLDSeg *seg = NULL; + u_long size = 0; + u_long header_size = 0; + u_long header_offset = 0; + u_long data_offset = 0; + u_int ncmds = 0; + u_int i = 0; + boolean_t is_32bit_object = kxld_object_is_32_bit(object); + kxld_addr_t link_addr; + u_char *my_linked_object; + + check(object); + check(linked_object); + + if (isOldInterface) { + size = object->output_buffer_size; + link_addr = object->link_addr; + my_linked_object = (u_char *) linked_object; + } else { + size = ((splitKextLinkInfo *)linked_object)->linkedKextSize; + link_addr = ((splitKextLinkInfo *)linked_object)->vmaddr_TEXT; + my_linked_object = ((splitKextLinkInfo *)linked_object)->linkedKext; + } + + /* Calculate the size of the headers and data */ + + header_size = get_macho_header_size(object); + + /* Copy data to the file */ + + ncmds = object->segs.nitems + 1 /* LC_SYMTAB */; #if KXLD_PIC_KEXTS - /* don't write out a DYSYMTAB segment for targets that can't digest it - */ - if (target_supports_slideable_kexts(object)) { - ncmds++; /* dysymtab */ - } -#endif /* KXLD_PIC_KEXTS */ - - if (object->uuid.has_uuid == TRUE) { - ncmds++; - } - - if (object->versionmin.has_versionmin == TRUE) { - ncmds++; - } - - if (object->srcversion.has_srcversion == TRUE) { - ncmds++; - } - - if (isSplitKext && object->splitinfolc.has_splitinfolc) { - ncmds++; - } - - rval = export_macho_header(object, my_linked_object, ncmds, &header_offset, header_size); - require_noerr(rval, finish); - - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - - rval = kxld_seg_export_macho_to_vm(seg, my_linked_object, &header_offset, - header_size, size, link_addr, is_32bit_object); - require_noerr(rval, finish); - } - - seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); - data_offset = (u_long) (seg->link_addr - link_addr); - - // data_offset is used to set the fileoff in the macho header load commands - rval = kxld_symtab_export_macho(object->symtab, - my_linked_object, - &header_offset, - header_size, - &data_offset, size, is_32bit_object); - require_noerr(rval, finish); - - // data_offset now points past the symbol tab and strings data in the linkedit - // segment - (it was used to set new values for symoff and stroff) + /* don't write out a DYSYMTAB segment for targets that can't digest it + */ + if (target_supports_slideable_kexts(object)) { + ncmds++; /* dysymtab */ + } +#endif /* KXLD_PIC_KEXTS */ + + if (object->uuid.has_uuid == TRUE) { + ncmds++; + } + + if (object->versionmin.has_versionmin == TRUE) { + ncmds++; + } + + if (object->srcversion.has_srcversion == TRUE) { + ncmds++; + } + + if (isSplitKext && object->splitinfolc.has_splitinfolc) { + ncmds++; + } + + rval = export_macho_header(object, my_linked_object, ncmds, &header_offset, header_size); + require_noerr(rval, finish); + + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + + rval = kxld_seg_export_macho_to_vm(seg, my_linked_object, &header_offset, + header_size, size, link_addr, is_32bit_object); + require_noerr(rval, finish); + } + + seg = kxld_object_get_seg_by_name(object, SEG_LINKEDIT); + data_offset = (u_long) (seg->link_addr - link_addr); + + // data_offset is used to set the fileoff in the macho header load commands + rval = kxld_symtab_export_macho(object->symtab, + my_linked_object, + &header_offset, + header_size, + &data_offset, size, is_32bit_object); + require_noerr(rval, finish); + + // data_offset now points past the symbol tab and strings data in the linkedit + // segment - (it was used to set new values for symoff and stroff) #if KXLD_PIC_KEXTS - if (target_supports_slideable_kexts(object)) { - rval = kxld_reloc_export_macho(&object->relocator, - &object->locrelocs, - &object->extrelocs, - my_linked_object, - &header_offset, - header_size, - &data_offset, size); - require_noerr(rval, finish); - } -#endif /* KXLD_PIC_KEXTS */ - - if (object->uuid.has_uuid) { - rval = kxld_uuid_export_macho(&object->uuid, my_linked_object, &header_offset, header_size); - require_noerr(rval, finish); - } - - if (object->versionmin.has_versionmin) { - rval = kxld_versionmin_export_macho(&object->versionmin, my_linked_object, &header_offset, header_size); - require_noerr(rval, finish); - } - - if (object->srcversion.has_srcversion) { - rval = kxld_srcversion_export_macho(&object->srcversion, my_linked_object, &header_offset, header_size); - require_noerr(rval, finish); - } - - if (isSplitKext && object->splitinfolc.has_splitinfolc) { - rval = kxld_splitinfolc_export_macho(&object->splitinfolc, - linked_object, - &header_offset, - header_size, - &data_offset, - size); - require_noerr(rval, finish); - } + if (target_supports_slideable_kexts(object)) { + rval = kxld_reloc_export_macho(&object->relocator, + &object->locrelocs, + &object->extrelocs, + my_linked_object, + &header_offset, + header_size, + &data_offset, size); + require_noerr(rval, finish); + } +#endif /* KXLD_PIC_KEXTS */ + + if (object->uuid.has_uuid) { + rval = kxld_uuid_export_macho(&object->uuid, my_linked_object, &header_offset, header_size); + require_noerr(rval, finish); + } + + if (object->versionmin.has_versionmin) { + rval = kxld_versionmin_export_macho(&object->versionmin, my_linked_object, &header_offset, header_size); + require_noerr(rval, finish); + } + + if (object->srcversion.has_srcversion) { + rval = kxld_srcversion_export_macho(&object->srcversion, my_linked_object, &header_offset, header_size); + require_noerr(rval, finish); + } + + if (isSplitKext && object->splitinfolc.has_splitinfolc) { + rval = kxld_splitinfolc_export_macho(&object->splitinfolc, + linked_object, + &header_offset, + header_size, + &data_offset, + size); + require_noerr(rval, finish); + } #if !KERNEL - unswap_macho(my_linked_object, object->host_order, object->target_order); + unswap_macho(my_linked_object, object->host_order, object->target_order); #endif /* KERNEL */ - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -2081,21 +2092,21 @@ static kern_return_t export_macho_header(const KXLDObject *object, u_char *buf, u_int ncmds, u_long *header_offset, u_long header_size) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(object); - check(buf); - check(header_offset); + check(object); + check(buf); + check(header_offset); - KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, - export_macho_header_32, export_macho_header_64, - object, buf, ncmds, header_offset, header_size); - require_noerr(rval, finish); + KXLD_3264_FUNC(kxld_object_is_32_bit(object), rval, + export_macho_header_32, export_macho_header_64, + object, buf, ncmds, header_offset, header_size); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_ILP32 @@ -2105,31 +2116,31 @@ static kern_return_t export_macho_header_32(const KXLDObject *object, u_char *buf, u_int ncmds, u_long *header_offset, u_long header_size) { - kern_return_t rval = KERN_FAILURE; - struct mach_header *mach = NULL; - - check(object); - check(buf); - check(header_offset); - - require_action(sizeof(*mach) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - mach = (struct mach_header *) ((void *) (buf + *header_offset)); - - mach->magic = MH_MAGIC; - mach->cputype = object->cputype; - mach->cpusubtype = object->cpusubtype; - mach->filetype = object->filetype; - mach->ncmds = ncmds; - mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach)); - mach->flags = MH_NOUNDEFS; - - *header_offset += sizeof(*mach); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + struct mach_header *mach = NULL; + + check(object); + check(buf); + check(header_offset); + + require_action(sizeof(*mach) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + mach = (struct mach_header *) ((void *) (buf + *header_offset)); + + mach->magic = MH_MAGIC; + mach->cputype = object->cputype; + mach->cpusubtype = object->cpusubtype; + mach->filetype = object->filetype; + mach->ncmds = ncmds; + mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach)); + mach->flags = MH_NOUNDEFS; + + *header_offset += sizeof(*mach); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -2140,90 +2151,90 @@ static kern_return_t export_macho_header_64(const KXLDObject *object, u_char *buf, u_int ncmds, u_long *header_offset, u_long header_size) { - kern_return_t rval = KERN_FAILURE; - struct mach_header_64 *mach = NULL; - - check(object); - check(buf); - check(header_offset); - - require_action(sizeof(*mach) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - mach = (struct mach_header_64 *) ((void *) (buf + *header_offset)); - - mach->magic = MH_MAGIC_64; - mach->cputype = object->cputype; - mach->cpusubtype = object->cpusubtype; - mach->filetype = object->filetype; - mach->ncmds = ncmds; - mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach)); - mach->flags = MH_NOUNDEFS; - - *header_offset += sizeof(*mach); - + kern_return_t rval = KERN_FAILURE; + struct mach_header_64 *mach = NULL; + + check(object); + check(buf); + check(header_offset); + + require_action(sizeof(*mach) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + mach = (struct mach_header_64 *) ((void *) (buf + *header_offset)); + + mach->magic = MH_MAGIC_64; + mach->cputype = object->cputype; + mach->cpusubtype = object->cpusubtype; + mach->filetype = object->filetype; + mach->ncmds = ncmds; + mach->sizeofcmds = (uint32_t) (header_size - sizeof(*mach)); + mach->flags = MH_NOUNDEFS; + + *header_offset += sizeof(*mach); + #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p >>> Start of macho header (size %lu) <%s>", - (void *) mach, - sizeof(*mach), - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p <<< End of macho header <%s>", - (void *) ((u_char *)mach + sizeof(*mach)), - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p >>> Start of macho header (size %lu) <%s>", + (void *) mach, + sizeof(*mach), + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p <<< End of macho header <%s>", + (void *) ((u_char *)mach + sizeof(*mach)), + __func__); + } #endif - - rval = KERN_SUCCESS; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_object_index_symbols_by_name(KXLDObject *object) { - return kxld_symtab_index_symbols_by_name(object->symtab); + return kxld_symtab_index_symbols_by_name(object->symtab); } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_object_index_cxx_symbols_by_value(KXLDObject *object) { - return kxld_symtab_index_cxx_symbols_by_value(object->symtab); + return kxld_symtab_index_cxx_symbols_by_value(object->symtab); } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_object_relocate(KXLDObject *object, kxld_addr_t link_address) { - kern_return_t rval = KERN_FAILURE; - KXLDSeg *seg = NULL; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSeg *seg = NULL; + u_int i = 0; - check(object); + check(object); - object->link_addr = link_address; + object->link_addr = link_address; - /* Relocate segments (which relocates the sections) */ - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - kxld_seg_relocate(seg, link_address); - } // for... + /* Relocate segments (which relocates the sections) */ + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + kxld_seg_relocate(seg, link_address); + } // for... - /* Relocate symbols */ - rval = kxld_symtab_relocate(object->symtab, &object->sects); - require_noerr(rval, finish); + /* Relocate symbols */ + rval = kxld_symtab_relocate(object->symtab, &object->sects); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -2231,126 +2242,126 @@ finish: static KXLDSym * get_mutable_sym(const KXLDObject *object, const KXLDSym *sym) { - KXLDSym *rval = NULL; - kern_return_t result = KERN_FAILURE; - u_int i = 0; + KXLDSym *rval = NULL; + kern_return_t result = KERN_FAILURE; + u_int i = 0; - result = kxld_symtab_get_sym_index(object->symtab, sym, &i); - require_noerr(result, finish); + result = kxld_symtab_get_sym_index(object->symtab, sym, &i); + require_noerr(result, finish); - rval = kxld_symtab_get_symbol_by_index(object->symtab, i); - require_action(rval == sym, finish, rval=NULL); + rval = kxld_symtab_get_symbol_by_index(object->symtab, i); + require_action(rval == sym, finish, rval = NULL); finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t -kxld_object_resolve_symbol(KXLDObject *object, +kern_return_t +kxld_object_resolve_symbol(KXLDObject *object, const KXLDSym *sym, kxld_addr_t addr) { - kern_return_t rval = KERN_FAILURE; - KXLDSym *resolved_sym = NULL; + kern_return_t rval = KERN_FAILURE; + KXLDSym *resolved_sym = NULL; - resolved_sym = get_mutable_sym(object, sym); - require_action(resolved_sym, finish, rval=KERN_FAILURE); + resolved_sym = get_mutable_sym(object, sym); + require_action(resolved_sym, finish, rval = KERN_FAILURE); - rval = kxld_sym_resolve(resolved_sym, addr); - require_noerr(rval, finish); + rval = kxld_sym_resolve(resolved_sym, addr); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_object_patch_symbol(KXLDObject *object, const struct kxld_sym *sym) { - kern_return_t rval = KERN_FAILURE; - KXLDSym *patched_sym = NULL; + kern_return_t rval = KERN_FAILURE; + KXLDSym *patched_sym = NULL; - patched_sym = get_mutable_sym(object, sym); - require_action(patched_sym, finish, rval=KERN_FAILURE); + patched_sym = get_mutable_sym(object, sym); + require_action(patched_sym, finish, rval = KERN_FAILURE); - (void) kxld_sym_patch(patched_sym); - rval = KERN_SUCCESS; + (void) kxld_sym_patch(patched_sym); + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t -kxld_object_add_symbol(KXLDObject *object, char *name, kxld_addr_t link_addr, +kern_return_t +kxld_object_add_symbol(KXLDObject *object, char *name, kxld_addr_t link_addr, const KXLDSym **sym_out) { - kern_return_t rval = KERN_FAILURE; - KXLDSym *sym = NULL; + kern_return_t rval = KERN_FAILURE; + KXLDSym *sym = NULL; - rval = kxld_symtab_add_symbol(object->symtab, name, link_addr, &sym); - require_noerr(rval, finish); + rval = kxld_symtab_add_symbol(object->symtab, name, link_addr, &sym); + require_noerr(rval, finish); - *sym_out = sym; - rval = KERN_SUCCESS; + *sym_out = sym; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t -kxld_object_process_relocations(KXLDObject *object, +kern_return_t +kxld_object_process_relocations(KXLDObject *object, const KXLDDict *patched_vtables) { - kern_return_t rval = KERN_FAILURE; - - (void) kxld_relocator_set_vtables(&object->relocator, patched_vtables); - - /* Process relocation entries and populate the global offset table. - * - * For final linked images: the relocation entries are contained in a couple - * of tables hanging off the end of the symbol table. The GOT has its own - * section created by the linker; we simply need to fill it. - * - * For object files: the relocation entries are bound to each section. - * The GOT, if it exists for the target architecture, is created by kxld, - * and we must populate it according to our internal structures. - */ - if (object->is_final_image) { + kern_return_t rval = KERN_FAILURE; + + (void) kxld_relocator_set_vtables(&object->relocator, patched_vtables); + + /* Process relocation entries and populate the global offset table. + * + * For final linked images: the relocation entries are contained in a couple + * of tables hanging off the end of the symbol table. The GOT has its own + * section created by the linker; we simply need to fill it. + * + * For object files: the relocation entries are bound to each section. + * The GOT, if it exists for the target architecture, is created by kxld, + * and we must populate it according to our internal structures. + */ + if (object->is_final_image) { #if KXLD_USER_OR_BUNDLE - rval = process_symbol_pointers(object); - require_noerr(rval, finish); + rval = process_symbol_pointers(object); + require_noerr(rval, finish); - rval = process_relocs_from_tables(object); - require_noerr(rval, finish); + rval = process_relocs_from_tables(object); + require_noerr(rval, finish); #else - require_action(FALSE, finish, rval=KERN_FAILURE); + require_action(FALSE, finish, rval = KERN_FAILURE); #endif /* KXLD_USER_OR_BUNDLE */ - } else { + } else { #if KXLD_USER_OR_GOT - /* Populate GOT */ - rval = populate_got(object); - require_noerr(rval, finish); + /* Populate GOT */ + rval = populate_got(object); + require_noerr(rval, finish); #endif /* KXLD_USER_OR_GOT */ #if KXLD_USER_OR_OBJECT - rval = process_relocs_from_sections(object); - require_noerr(rval, finish); + rval = process_relocs_from_sections(object); + require_noerr(rval, finish); #else - require_action(FALSE, finish, rval=KERN_FAILURE); + require_action(FALSE, finish, rval = KERN_FAILURE); #endif /* KXLD_USER_OR_OBJECT */ - } + } - /* Populate kmod info structure */ - rval = populate_kmod_info(object); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; + /* Populate kmod info structure */ + rval = populate_kmod_info(object); + require_noerr(rval, finish); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_BUNDLE @@ -2371,92 +2382,91 @@ static boolean_t kxld_show_ptr_value; static kern_return_t process_symbol_pointers(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *sect = NULL; - KXLDSym *sym = NULL; - int32_t *symidx = NULL; - u_char *symptr = NULL; - u_long symptrsize = 0; - u_int nsyms = 0; - u_int firstsym = 0; - u_int i = 0; - - check(object); - - require_action(object->is_final_image && object->dysymtab_hdr, - finish, rval=KERN_FAILURE); - - /* Get the __DATA,__nl_symbol_ptr section. If it doesn't exist, we have - * nothing to do. - */ - - sect = kxld_object_get_sect_by_name(object, SEG_DATA, SECT_SYM_PTRS); - if (!sect || !(sect->flags & S_NON_LAZY_SYMBOL_POINTERS)) { - rval = KERN_SUCCESS; - goto finish; - } - - /* Calculate the table offset and number of entries in the section */ - - if (kxld_object_is_32_bit(object)) { - symptrsize = sizeof(uint32_t); - } else { - symptrsize = sizeof(uint64_t); - } - - nsyms = (u_int) (sect->size / symptrsize); - firstsym = sect->reserved1; - - require_action(firstsym + nsyms <= object->dysymtab_hdr->nindirectsyms, - finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "firstsym + nsyms > object->dysymtab_hdr->nindirectsyms")); - - /* Iterate through the indirect symbol table and fill in the section of - * symbol pointers. There are three cases: - * 1) A normal symbol - put its value directly in the table - * 2) An INDIRECT_SYMBOL_LOCAL - symbols that are local and already have - * their offset from the start of the file in the section. Simply - * add the file's link address to fill this entry. - * 3) An INDIRECT_SYMBOL_ABS - prepopulated absolute symbols. No - * action is required. - */ - - if (isOldInterface) { - symidx = (int32_t *) ((void *) (object->file + object->dysymtab_hdr->indirectsymoff)); - } - else { - symidx = (int32_t *) ((void *) (object->split_info.kextExecutable + object->dysymtab_hdr->indirectsymoff)); - } - - symidx += firstsym; - symptr = sect->data; - for (i = 0; i < nsyms; ++i, ++symidx, symptr+=symptrsize) { - if (*symidx & INDIRECT_SYMBOL_LOCAL) { - if (*symidx & INDIRECT_SYMBOL_ABS) continue; - - if (isOldInterface) { - add_to_ptr(symptr, object->link_addr, kxld_object_is_32_bit(object)); - } - else { - add_to_ptr(symptr, object->split_info.vmaddr_TEXT, kxld_object_is_32_bit(object)); - } - } else { - sym = kxld_symtab_get_symbol_by_index(object->symtab, *symidx); - require_action(sym, finish, rval=KERN_FAILURE); - - if (isOldInterface) { - add_to_ptr(symptr, sym->link_addr, kxld_object_is_32_bit(object)); - } - else { - add_to_ptr(symptr, object->split_info.vmaddr_TEXT, kxld_object_is_32_bit(object)); - } - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSect *sect = NULL; + KXLDSym *sym = NULL; + int32_t *symidx = NULL; + u_char *symptr = NULL; + u_long symptrsize = 0; + u_int nsyms = 0; + u_int firstsym = 0; + u_int i = 0; + + check(object); + + require_action(object->is_final_image && object->dysymtab_hdr, + finish, rval = KERN_FAILURE); + + /* Get the __DATA,__nl_symbol_ptr section. If it doesn't exist, we have + * nothing to do. + */ + + sect = kxld_object_get_sect_by_name(object, SEG_DATA, SECT_SYM_PTRS); + if (!sect || !(sect->flags & S_NON_LAZY_SYMBOL_POINTERS)) { + rval = KERN_SUCCESS; + goto finish; + } + + /* Calculate the table offset and number of entries in the section */ + + if (kxld_object_is_32_bit(object)) { + symptrsize = sizeof(uint32_t); + } else { + symptrsize = sizeof(uint64_t); + } + + nsyms = (u_int) (sect->size / symptrsize); + firstsym = sect->reserved1; + + require_action(firstsym + nsyms <= object->dysymtab_hdr->nindirectsyms, + finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "firstsym + nsyms > object->dysymtab_hdr->nindirectsyms")); + + /* Iterate through the indirect symbol table and fill in the section of + * symbol pointers. There are three cases: + * 1) A normal symbol - put its value directly in the table + * 2) An INDIRECT_SYMBOL_LOCAL - symbols that are local and already have + * their offset from the start of the file in the section. Simply + * add the file's link address to fill this entry. + * 3) An INDIRECT_SYMBOL_ABS - prepopulated absolute symbols. No + * action is required. + */ + + if (isOldInterface) { + symidx = (int32_t *) ((void *) (object->file + object->dysymtab_hdr->indirectsymoff)); + } else { + symidx = (int32_t *) ((void *) (object->split_info.kextExecutable + object->dysymtab_hdr->indirectsymoff)); + } + + symidx += firstsym; + symptr = sect->data; + for (i = 0; i < nsyms; ++i, ++symidx, symptr += symptrsize) { + if (*symidx & INDIRECT_SYMBOL_LOCAL) { + if (*symidx & INDIRECT_SYMBOL_ABS) { + continue; + } + + if (isOldInterface) { + add_to_ptr(symptr, object->link_addr, kxld_object_is_32_bit(object)); + } else { + add_to_ptr(symptr, object->split_info.vmaddr_TEXT, kxld_object_is_32_bit(object)); + } + } else { + sym = kxld_symtab_get_symbol_by_index(object->symtab, *symidx); + require_action(sym, finish, rval = KERN_FAILURE); + + if (isOldInterface) { + add_to_ptr(symptr, sym->link_addr, kxld_object_is_32_bit(object)); + } else { + add_to_ptr(symptr, object->split_info.vmaddr_TEXT, kxld_object_is_32_bit(object)); + } + } + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -2464,20 +2474,22 @@ finish: static KXLDSeg * get_seg_by_base_addr(KXLDObject *object, kxld_addr_t base_addr) { - KXLDSeg *seg = NULL; - kxld_addr_t start = 0; - kxld_addr_t end = 0; - u_int i = 0; + KXLDSeg *seg = NULL; + kxld_addr_t start = 0; + kxld_addr_t end = 0; + u_int i = 0; - for (i = 0; i < object->segs.nitems; ++i) { - seg = kxld_array_get_item(&object->segs, i); - start = seg->base_addr; - end = seg->base_addr + seg->vmsize; + for (i = 0; i < object->segs.nitems; ++i) { + seg = kxld_array_get_item(&object->segs, i); + start = seg->base_addr; + end = seg->base_addr + seg->vmsize; - if (start <= base_addr && base_addr < end) return seg; - } + if (start <= base_addr && base_addr < end) { + return seg; + } + } - return NULL; + return NULL; } /******************************************************************************* @@ -2485,90 +2497,80 @@ get_seg_by_base_addr(KXLDObject *object, kxld_addr_t base_addr) static kern_return_t process_relocs_from_tables(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - KXLDSeg *seg = NULL; - u_int i = 0; - - /* Process external relocations */ - for (i = 0; i < object->extrelocs.nitems; ++i) { - reloc = kxld_array_get_item(&object->extrelocs, i); - - seg = get_seg_by_base_addr(object, reloc->address); - require_action(seg, finish, rval=KERN_FAILURE); - - if (isOldInterface) { - rval = kxld_relocator_process_table_reloc(&object->relocator, reloc, - seg, object->link_addr); - } - else { - kxld_addr_t my_link_addr = object->split_info.vmaddr_TEXT; - if (isSplitKext) { - if (kxld_seg_is_text_exec_seg(seg)) { - my_link_addr = object->split_info.vmaddr_TEXT_EXEC; - } - else if (kxld_seg_is_data_seg(seg)) { - my_link_addr = object->split_info.vmaddr_DATA; - } - else if (kxld_seg_is_data_const_seg(seg)) { - my_link_addr = object->split_info.vmaddr_DATA_CONST; - } - else if (kxld_seg_is_llvm_cov_seg(seg)) { - my_link_addr = object->split_info.vmaddr_LLVM_COV; - } - else if (kxld_seg_is_linkedit_seg(seg)) { - my_link_addr = object->split_info.vmaddr_LINKEDIT; - } - } - rval = kxld_relocator_process_table_reloc(&object->relocator, - reloc, - seg, - my_link_addr); - } - require_noerr(rval, finish); - } - - /* Process local relocations */ - for (i = 0; i < object->locrelocs.nitems; ++i) { - reloc = kxld_array_get_item(&object->locrelocs, i); - - seg = get_seg_by_base_addr(object, reloc->address); - require_action(seg, finish, rval=KERN_FAILURE); - - if (isOldInterface) { - rval = kxld_relocator_process_table_reloc(&object->relocator, reloc, - seg, object->link_addr); - } - else { - kxld_addr_t my_link_addr = object->split_info.vmaddr_TEXT; - if (isSplitKext) { - if (kxld_seg_is_text_exec_seg(seg)) { - my_link_addr = object->split_info.vmaddr_TEXT_EXEC; - } - else if (kxld_seg_is_data_seg(seg)) { - my_link_addr = object->split_info.vmaddr_DATA; - } - else if (kxld_seg_is_data_const_seg(seg)) { - my_link_addr = object->split_info.vmaddr_DATA_CONST; - } - else if (kxld_seg_is_llvm_cov_seg(seg)) { - my_link_addr = object->split_info.vmaddr_LLVM_COV; - } - else if (kxld_seg_is_linkedit_seg(seg)) { - my_link_addr = object->split_info.vmaddr_LINKEDIT; - } - } - rval = kxld_relocator_process_table_reloc(&object->relocator, - reloc, - seg, - my_link_addr); - } - require_noerr(rval, finish); - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + KXLDSeg *seg = NULL; + u_int i = 0; + + /* Process external relocations */ + for (i = 0; i < object->extrelocs.nitems; ++i) { + reloc = kxld_array_get_item(&object->extrelocs, i); + + seg = get_seg_by_base_addr(object, reloc->address); + require_action(seg, finish, rval = KERN_FAILURE); + + if (isOldInterface) { + rval = kxld_relocator_process_table_reloc(&object->relocator, reloc, + seg, object->link_addr); + } else { + kxld_addr_t my_link_addr = object->split_info.vmaddr_TEXT; + if (isSplitKext) { + if (kxld_seg_is_text_exec_seg(seg)) { + my_link_addr = object->split_info.vmaddr_TEXT_EXEC; + } else if (kxld_seg_is_data_seg(seg)) { + my_link_addr = object->split_info.vmaddr_DATA; + } else if (kxld_seg_is_data_const_seg(seg)) { + my_link_addr = object->split_info.vmaddr_DATA_CONST; + } else if (kxld_seg_is_llvm_cov_seg(seg)) { + my_link_addr = object->split_info.vmaddr_LLVM_COV; + } else if (kxld_seg_is_linkedit_seg(seg)) { + my_link_addr = object->split_info.vmaddr_LINKEDIT; + } + } + rval = kxld_relocator_process_table_reloc(&object->relocator, + reloc, + seg, + my_link_addr); + } + require_noerr(rval, finish); + } + + /* Process local relocations */ + for (i = 0; i < object->locrelocs.nitems; ++i) { + reloc = kxld_array_get_item(&object->locrelocs, i); + + seg = get_seg_by_base_addr(object, reloc->address); + require_action(seg, finish, rval = KERN_FAILURE); + + if (isOldInterface) { + rval = kxld_relocator_process_table_reloc(&object->relocator, reloc, + seg, object->link_addr); + } else { + kxld_addr_t my_link_addr = object->split_info.vmaddr_TEXT; + if (isSplitKext) { + if (kxld_seg_is_text_exec_seg(seg)) { + my_link_addr = object->split_info.vmaddr_TEXT_EXEC; + } else if (kxld_seg_is_data_seg(seg)) { + my_link_addr = object->split_info.vmaddr_DATA; + } else if (kxld_seg_is_data_const_seg(seg)) { + my_link_addr = object->split_info.vmaddr_DATA_CONST; + } else if (kxld_seg_is_llvm_cov_seg(seg)) { + my_link_addr = object->split_info.vmaddr_LLVM_COV; + } else if (kxld_seg_is_linkedit_seg(seg)) { + my_link_addr = object->split_info.vmaddr_LINKEDIT; + } + } + rval = kxld_relocator_process_table_reloc(&object->relocator, + reloc, + seg, + my_link_addr); + } + require_noerr(rval, finish); + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -2576,20 +2578,19 @@ finish: static void add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit) { - if (is_32_bit) { - uint32_t *ptr = (uint32_t *) ((void *) symptr); - - *ptr += (uint32_t) val; - } else { - uint64_t *ptr = (uint64_t *) ((void *) symptr); - - *ptr += (uint64_t) val; - } - + if (is_32_bit) { + uint32_t *ptr = (uint32_t *) ((void *) symptr); + + *ptr += (uint32_t) val; + } else { + uint64_t *ptr = (uint64_t *) ((void *) symptr); + + *ptr += (uint64_t) val; + } + #if SPLIT_KEXTS_DEBUG - kxld_show_ptr_value = FALSE; + kxld_show_ptr_value = FALSE; #endif - } #endif /* KXLD_USER_OR_BUNDLE */ @@ -2599,19 +2600,19 @@ add_to_ptr(u_char *symptr, kxld_addr_t val, boolean_t is_32_bit) static kern_return_t process_relocs_from_sections(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *sect = NULL; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSect *sect = NULL; + u_int i = 0; - for (i = 0; i < object->sects.nitems; ++i) { - sect = kxld_array_get_item(&object->sects, i); - rval = kxld_sect_process_relocs(sect, &object->relocator); - require_noerr(rval, finish); - } + for (i = 0; i < object->sects.nitems; ++i) { + sect = kxld_array_get_item(&object->sects, i); + rval = kxld_sect_process_relocs(sect, &object->relocator); + require_noerr(rval, finish); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_OBJECT */ @@ -2620,109 +2621,106 @@ finish: static kern_return_t populate_kmod_info(KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *kmodsect = NULL; - KXLDSym *kmodsym = NULL; - kmod_info_t *kmod_info = NULL; - u_long kmod_offset = 0; - u_long header_size; - u_long size; - - if (kxld_object_is_kernel(object)) { - rval = KERN_SUCCESS; - goto finish; - } - - kxld_object_get_vmsize(object, &header_size, &size); - - kmodsym = kxld_symtab_get_locally_defined_symbol_by_name(object->symtab, - KXLD_KMOD_INFO_SYMBOL); - require_action(kmodsym, finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo)); - - kmodsect = kxld_array_get_item(&object->sects, kmodsym->sectnum); - - kmod_offset = (u_long) (kmodsym->base_addr - kmodsect->base_addr); - kmod_info = (kmod_info_t *) ((void *) (kmodsect->data + kmod_offset)); - - if (kxld_object_is_32_bit(object)) { - kmod_info_32_v1_t *kmod = (kmod_info_32_v1_t *) (kmod_info); - - if (isOldInterface) { - kmod->address = (uint32_t) object->link_addr; - } - else { - kmod->address = (uint32_t) object->split_info.vmaddr_TEXT; - } - - kmod->size = (uint32_t) size; - kmod->hdr_size = (uint32_t) header_size; + kern_return_t rval = KERN_FAILURE; + KXLDSect *kmodsect = NULL; + KXLDSym *kmodsym = NULL; + kmod_info_t *kmod_info = NULL; + u_long kmod_offset = 0; + u_long header_size; + u_long size; + + if (kxld_object_is_kernel(object)) { + rval = KERN_SUCCESS; + goto finish; + } + + kxld_object_get_vmsize(object, &header_size, &size); + + kmodsym = kxld_symtab_get_locally_defined_symbol_by_name(object->symtab, + KXLD_KMOD_INFO_SYMBOL); + require_action(kmodsym, finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogNoKmodInfo)); + + kmodsect = kxld_array_get_item(&object->sects, kmodsym->sectnum); + + kmod_offset = (u_long) (kmodsym->base_addr - kmodsect->base_addr); + kmod_info = (kmod_info_t *) ((void *) (kmodsect->data + kmod_offset)); + + if (kxld_object_is_32_bit(object)) { + kmod_info_32_v1_t *kmod = (kmod_info_32_v1_t *) (kmod_info); + + if (isOldInterface) { + kmod->address = (uint32_t) object->link_addr; + } else { + kmod->address = (uint32_t) object->split_info.vmaddr_TEXT; + } + + kmod->size = (uint32_t) size; + kmod->hdr_size = (uint32_t) header_size; #if !KERNEL - if (kxld_object_target_needs_swap(object)) { - kmod->address = OSSwapInt32(kmod->address); - kmod->size = OSSwapInt32(kmod->size); - kmod->hdr_size = OSSwapInt32(kmod->hdr_size); - } + if (kxld_object_target_needs_swap(object)) { + kmod->address = OSSwapInt32(kmod->address); + kmod->size = OSSwapInt32(kmod->size); + kmod->hdr_size = OSSwapInt32(kmod->hdr_size); + } #endif /* !KERNEL */ - } else { - kmod_info_64_v1_t *kmod = (kmod_info_64_v1_t *) (kmod_info); - - if (isOldInterface) { - kmod->address = object->link_addr; - } - else { - kmod->address = object->split_info.vmaddr_TEXT; - } - - kmod->size = size; - kmod->hdr_size = header_size; + } else { + kmod_info_64_v1_t *kmod = (kmod_info_64_v1_t *) (kmod_info); + + if (isOldInterface) { + kmod->address = object->link_addr; + } else { + kmod->address = object->split_info.vmaddr_TEXT; + } + + kmod->size = size; + kmod->hdr_size = header_size; #if !KERNEL - if (kxld_object_target_needs_swap(object)) { - kmod->address = OSSwapInt64(kmod->address); - kmod->size = OSSwapInt64(kmod->size); - kmod->hdr_size = OSSwapInt64(kmod->hdr_size); - } + if (kxld_object_target_needs_swap(object)) { + kmod->address = OSSwapInt64(kmod->address); + kmod->size = OSSwapInt64(kmod->size); + kmod->hdr_size = OSSwapInt64(kmod->hdr_size); + } #endif /* !KERNEL */ - + #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - " kmodsect %p kmod_info %p = kmodsect->data %p + kmod_offset %lu <%s>", - (void *) kmodsect, - (void *) kmod_info, - (void *) kmodsect->data, - kmod_offset, - __func__); - - kxld_log(kKxldLogLinking, kKxldLogErr, - " kmod_info data: address %p size %llu hdr_size %llu start_addr %p stop_addr %p <%s>", - (void *) kmod->address, - kmod->size, - kmod->hdr_size, - (void *) kmod->start_addr, - (void *) kmod->stop_addr, - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + " kmodsect %p kmod_info %p = kmodsect->data %p + kmod_offset %lu <%s>", + (void *) kmodsect, + (void *) kmod_info, + (void *) kmodsect->data, + kmod_offset, + __func__); + + kxld_log(kKxldLogLinking, kKxldLogErr, + " kmod_info data: address %p size %llu hdr_size %llu start_addr %p stop_addr %p <%s>", + (void *) kmod->address, + kmod->size, + kmod->hdr_size, + (void *) kmod->start_addr, + (void *) kmod->stop_addr, + __func__); + } #endif + } - } - - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_PIC_KEXTS /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ static boolean_t target_supports_slideable_kexts(const KXLDObject *object) { - check(object); + check(object); - return (object->cputype != CPU_TYPE_I386 && object->include_kaslr_relocs); + return object->cputype != CPU_TYPE_I386 && object->include_kaslr_relocs; } #endif /* KXLD_PIC_KEXTS */ diff --git a/libkern/kxld/kxld_object.h b/libkern/kxld/kxld_object.h index 45d00530a..cea4d9bff 100644 --- a/libkern/kxld/kxld_object.h +++ b/libkern/kxld/kxld_object.h @@ -2,7 +2,7 @@ * Copyright (c) 2009, 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_OBJECT_H_ @@ -43,104 +43,104 @@ struct kxld_sect; struct kxld_sym; struct kxld_symtab; -typedef struct kxld_object KXLDObject; +typedef struct kxld_object KXLDObject; /******************************************************************************* * Constructors and destructors *******************************************************************************/ size_t kxld_object_sizeof(void) - __attribute__((const, visibility("hidden"))); - -kern_return_t kxld_object_init_from_macho(KXLDObject *object, +__attribute__((const, visibility("hidden"))); + +kern_return_t kxld_object_init_from_macho(KXLDObject *object, u_char *file, u_long size, const char *name, struct kxld_array *section_order, cpu_type_t cputype, cpu_subtype_t cpusubtype, KXLDFlags flags) - __attribute__((nonnull(1,2,4), visibility("hidden"))); +__attribute__((nonnull(1, 2, 4), visibility("hidden"))); void kxld_object_clear(KXLDObject *object) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_object_deinit(KXLDObject *object) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ const u_char * kxld_object_get_file(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); const char * kxld_object_get_name(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_object_is_32_bit(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_object_is_final_image(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_object_is_kernel(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_object_is_linked(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_object_target_supports_strict_patching(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_object_target_supports_common_symbols(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); const struct kxld_relocator * kxld_object_get_relocator( - const KXLDObject * object) - __attribute__((pure, nonnull, visibility("hidden"))); + const KXLDObject * object) +__attribute__((pure, nonnull, visibility("hidden"))); const struct kxld_reloc * kxld_object_get_reloc_at_symbol( - const KXLDObject *object, const struct kxld_sym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); + const KXLDObject *object, const struct kxld_sym *sym) +__attribute__((pure, nonnull, visibility("hidden"))); const struct kxld_sym * kxld_object_get_symbol_of_reloc( - const KXLDObject *object, const struct kxld_reloc *reloc, - const struct kxld_sect *sect) - __attribute__((pure, nonnull, visibility("hidden"))); + const KXLDObject *object, const struct kxld_reloc *reloc, + const struct kxld_sect *sect) +__attribute__((pure, nonnull, visibility("hidden"))); const struct kxld_sect * kxld_object_get_section_by_index( - const KXLDObject *object, u_int sectnum) - __attribute__((pure, nonnull, visibility("hidden"))); + const KXLDObject *object, u_int sectnum) +__attribute__((pure, nonnull, visibility("hidden"))); const struct kxld_array * kxld_object_get_extrelocs( - const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); + const KXLDObject *object) +__attribute__((pure, nonnull, visibility("hidden"))); const struct kxld_symtab * kxld_object_get_symtab(const KXLDObject *object) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); -void kxld_object_get_vmsize(const KXLDObject *object, u_long *header_size, +void kxld_object_get_vmsize(const KXLDObject *object, u_long *header_size, u_long *vmsize) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_object_set_linked_object_size(KXLDObject *object, u_long vmsize) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_object_get_vmsize_for_seg_by_name(const KXLDObject *object, - const char *segname, - u_long *vmsize) + const char *segname, + u_long *vmsize) __attribute__((nonnull, visibility("hidden"))); splitKextLinkInfo * kxld_object_get_link_info(KXLDObject *object) __attribute__((nonnull, visibility("hidden"))); void kxld_object_set_link_info(KXLDObject *object, - splitKextLinkInfo *link_info) + splitKextLinkInfo *link_info) __attribute__((nonnull, visibility("hidden"))); /* This will be the same size as kxld_kext_get_vmsize */ kern_return_t kxld_object_export_linked_object(const KXLDObject *object, - void *linked_object - ) - __attribute__((nonnull, visibility("hidden"))); + void *linked_object + ) +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* @@ -148,29 +148,28 @@ kern_return_t kxld_object_export_linked_object(const KXLDObject *object, *******************************************************************************/ kern_return_t kxld_object_index_symbols_by_name(KXLDObject *object) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_object_index_cxx_symbols_by_value(KXLDObject *object) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_object_relocate(KXLDObject *object, kxld_addr_t link_address) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -kern_return_t kxld_object_resolve_symbol(KXLDObject *object, +kern_return_t kxld_object_resolve_symbol(KXLDObject *object, const struct kxld_sym *sym, kxld_addr_t addr) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_object_patch_symbol(KXLDObject *object, const struct kxld_sym *sym) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -kern_return_t kxld_object_add_symbol(KXLDObject *object, char *name, +kern_return_t kxld_object_add_symbol(KXLDObject *object, char *name, kxld_addr_t link_addr, const struct kxld_sym **sym_out) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -kern_return_t kxld_object_process_relocations(KXLDObject *object, +kern_return_t kxld_object_process_relocations(KXLDObject *object, const struct kxld_dict *patched_vtables) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_OBJECT_H_ */ - diff --git a/libkern/kxld/kxld_reloc.c b/libkern/kxld/kxld_reloc.c index a2186be8f..7a289f744 100644 --- a/libkern/kxld/kxld_reloc.c +++ b/libkern/kxld/kxld_reloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -36,9 +36,9 @@ #include #include - /* Get machine.h from the kernel source so we can support all platforms - * that the kernel supports. Otherwise we're at the mercy of the host. - */ +/* Get machine.h from the kernel source so we can support all platforms + * that the kernel supports. Otherwise we're at the mercy of the host. + */ #include "../../osfmk/mach/machine.h" #endif @@ -105,7 +105,7 @@ extern uint32_t *kaslr_offsets; #define FLIP_PREDICT_BIT(x) x ^= 0x00200000 #define SIGN_EXTEND_MASK(n) (1 << ((n) - 1)) -#define SIGN_EXTEND(x,n) (((x) ^ SIGN_EXTEND_MASK(n)) - SIGN_EXTEND_MASK(n)) +#define SIGN_EXTEND(x, n) (((x) ^ SIGN_EXTEND_MASK(n)) - SIGN_EXTEND_MASK(n)) #define BR14_NBITS_DISPLACEMENT 16 #define BR24_NBITS_DISPLACEMENT 26 @@ -115,85 +115,85 @@ extern uint32_t *kaslr_offsets; * Prototypes *******************************************************************************/ #if KXLD_USER_OR_I386 -static boolean_t generic_reloc_has_pair(u_int _type) - __attribute__((const)); +static boolean_t generic_reloc_has_pair(u_int _type) +__attribute__((const)); static u_int generic_reloc_get_pair_type(u_int _prev_type) - __attribute__((const)); +__attribute__((const)); static boolean_t generic_reloc_has_got(u_int _type) - __attribute__((const)); +__attribute__((const)); static kern_return_t generic_process_reloc(const KXLDRelocator *relocator, - u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, - kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, + u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, + kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, kxld_addr_t pair_target, boolean_t swap); #endif /* KXLD_USER_OR_I386 */ -#if KXLD_USER_OR_X86_64 -static boolean_t x86_64_reloc_has_pair(u_int _type) - __attribute__((const)); -static u_int x86_64_reloc_get_pair_type(u_int _prev_type) - __attribute__((const)); +#if KXLD_USER_OR_X86_64 +static boolean_t x86_64_reloc_has_pair(u_int _type) +__attribute__((const)); +static u_int x86_64_reloc_get_pair_type(u_int _prev_type) +__attribute__((const)); static boolean_t x86_64_reloc_has_got(u_int _type) - __attribute__((const)); -static kern_return_t x86_64_process_reloc(const KXLDRelocator *relocator, - u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, - kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, +__attribute__((const)); +static kern_return_t x86_64_process_reloc(const KXLDRelocator *relocator, + u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, + kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, kxld_addr_t pair_target, boolean_t swap); -static kern_return_t calculate_displacement_x86_64(uint64_t target, +static kern_return_t calculate_displacement_x86_64(uint64_t target, uint64_t adjustment, int32_t *instr32); #endif /* KXLD_USER_OR_X86_64 */ #if KXLD_USER_OR_ARM -static boolean_t arm_reloc_has_pair(u_int _type) - __attribute__((const)); -static u_int arm_reloc_get_pair_type(u_int _prev_type) - __attribute__((const)); +static boolean_t arm_reloc_has_pair(u_int _type) +__attribute__((const)); +static u_int arm_reloc_get_pair_type(u_int _prev_type) +__attribute__((const)); static boolean_t arm_reloc_has_got(u_int _type) - __attribute__((const)); -static kern_return_t arm_process_reloc(const KXLDRelocator *relocator, - u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, - kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, +__attribute__((const)); +static kern_return_t arm_process_reloc(const KXLDRelocator *relocator, + u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, + kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, kxld_addr_t pair_target, boolean_t swap); #endif /* KXLD_USER_OR_ARM */ #if KXLD_USER_OR_ARM64 -static boolean_t arm64_reloc_has_pair(u_int _type) - __attribute__((const)); -static u_int arm64_reloc_get_pair_type(u_int _prev_type) - __attribute__((const)); +static boolean_t arm64_reloc_has_pair(u_int _type) +__attribute__((const)); +static u_int arm64_reloc_get_pair_type(u_int _prev_type) +__attribute__((const)); static boolean_t arm64_reloc_has_got(u_int _type) - __attribute__((const)); -static kern_return_t arm64_process_reloc(const KXLDRelocator *relocator, - u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, - kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, +__attribute__((const)); +static kern_return_t arm64_process_reloc(const KXLDRelocator *relocator, + u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, + kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, kxld_addr_t pair_target, boolean_t swap); #endif /* KXLD_USER_OR_ARM64 */ #if KXLD_USER_OR_ILP32 -static kxld_addr_t get_pointer_at_addr_32(const KXLDRelocator *relocator, +static kxld_addr_t get_pointer_at_addr_32(const KXLDRelocator *relocator, const u_char *data, u_long offset) - __attribute__((pure, nonnull)); +__attribute__((pure, nonnull)); #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 -static kxld_addr_t get_pointer_at_addr_64(const KXLDRelocator *relocator, +static kxld_addr_t get_pointer_at_addr_64(const KXLDRelocator *relocator, const u_char *data, u_long offset) - __attribute__((pure, nonnull)); +__attribute__((pure, nonnull)); #endif /* KXLD_USER_OR_LP64 */ -static u_int count_relocatable_relocs(const KXLDRelocator *relocator, +static u_int count_relocatable_relocs(const KXLDRelocator *relocator, const struct relocation_info *relocs, u_int nrelocs) - __attribute__((pure)); +__attribute__((pure)); -static kern_return_t calculate_targets(KXLDRelocator *relocator, +static kern_return_t calculate_targets(KXLDRelocator *relocator, kxld_addr_t *_target, kxld_addr_t *_pair_target, const KXLDReloc *reloc); -static kxld_addr_t align_raw_function_address(const KXLDRelocator *relocator, +static kxld_addr_t align_raw_function_address(const KXLDRelocator *relocator, kxld_addr_t value); -static kern_return_t get_target_by_address_lookup(kxld_addr_t *target, +static kern_return_t get_target_by_address_lookup(kxld_addr_t *target, kxld_addr_t addr, const KXLDArray *sectarray); static kern_return_t check_for_direct_pure_virtual_call( - const KXLDRelocator *relocator, u_long offset); + const KXLDRelocator *relocator, u_long offset); #if KXLD_PIC_KEXTS static u_long get_macho_data_size_for_array(const KXLDArray *relocs); @@ -204,201 +204,199 @@ static kern_return_t export_macho_for_array(const KXLDRelocator *relocator, /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_relocator_init(KXLDRelocator *relocator, u_char *file, - const KXLDSymtab *symtab, const KXLDArray *sectarray, cpu_type_t cputype, + const KXLDSymtab *symtab, const KXLDArray *sectarray, cpu_type_t cputype, cpu_subtype_t cpusubtype __unused, boolean_t swap) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(relocator); + check(relocator); - switch(cputype) { + switch (cputype) { #if KXLD_USER_OR_I386 - case CPU_TYPE_I386: - relocator->reloc_has_pair = generic_reloc_has_pair; - relocator->reloc_get_pair_type = generic_reloc_get_pair_type; - relocator->reloc_has_got = generic_reloc_has_got; - relocator->process_reloc = generic_process_reloc; - relocator->function_align = 0; - relocator->is_32_bit = TRUE; - relocator->may_scatter = TRUE; - break; + case CPU_TYPE_I386: + relocator->reloc_has_pair = generic_reloc_has_pair; + relocator->reloc_get_pair_type = generic_reloc_get_pair_type; + relocator->reloc_has_got = generic_reloc_has_got; + relocator->process_reloc = generic_process_reloc; + relocator->function_align = 0; + relocator->is_32_bit = TRUE; + relocator->may_scatter = TRUE; + break; #endif /* KXLD_USER_OR_I386 */ #if KXLD_USER_OR_X86_64 - case CPU_TYPE_X86_64: - relocator->reloc_has_pair = x86_64_reloc_has_pair; - relocator->reloc_get_pair_type = x86_64_reloc_get_pair_type; - relocator->reloc_has_got = x86_64_reloc_has_got; - relocator->process_reloc = x86_64_process_reloc; - relocator->function_align = 0; - relocator->is_32_bit = FALSE; - relocator->may_scatter = FALSE; - break; + case CPU_TYPE_X86_64: + relocator->reloc_has_pair = x86_64_reloc_has_pair; + relocator->reloc_get_pair_type = x86_64_reloc_get_pair_type; + relocator->reloc_has_got = x86_64_reloc_has_got; + relocator->process_reloc = x86_64_process_reloc; + relocator->function_align = 0; + relocator->is_32_bit = FALSE; + relocator->may_scatter = FALSE; + break; #endif /* KXLD_USER_OR_X86_64 */ #if KXLD_USER_OR_ARM - case CPU_TYPE_ARM: - relocator->reloc_has_pair = arm_reloc_has_pair; - relocator->reloc_get_pair_type = arm_reloc_get_pair_type; - relocator->reloc_has_got = arm_reloc_has_got; - relocator->process_reloc = arm_process_reloc; - relocator->function_align = 1; - relocator->is_32_bit = TRUE; - relocator->may_scatter = FALSE; - break; + case CPU_TYPE_ARM: + relocator->reloc_has_pair = arm_reloc_has_pair; + relocator->reloc_get_pair_type = arm_reloc_get_pair_type; + relocator->reloc_has_got = arm_reloc_has_got; + relocator->process_reloc = arm_process_reloc; + relocator->function_align = 1; + relocator->is_32_bit = TRUE; + relocator->may_scatter = FALSE; + break; #endif /* KXLD_USER_OR_ARM */ #if KXLD_USER_OR_ARM64 - case CPU_TYPE_ARM64: - relocator->reloc_has_pair = arm64_reloc_has_pair; - relocator->reloc_get_pair_type = arm64_reloc_get_pair_type; - relocator->reloc_has_got = arm64_reloc_has_got; - relocator->process_reloc = arm64_process_reloc; - relocator->function_align = 0; - relocator->is_32_bit = FALSE; - relocator->may_scatter = FALSE; - break; + case CPU_TYPE_ARM64: + relocator->reloc_has_pair = arm64_reloc_has_pair; + relocator->reloc_get_pair_type = arm64_reloc_get_pair_type; + relocator->reloc_has_got = arm64_reloc_has_got; + relocator->process_reloc = arm64_process_reloc; + relocator->function_align = 0; + relocator->is_32_bit = FALSE; + relocator->may_scatter = FALSE; + break; #endif /* KXLD_USER_OR_ARM64 */ - default: - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogArchNotSupported, cputype); - goto finish; - } + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogArchNotSupported, cputype); + goto finish; + } - relocator->file = file; - relocator->symtab = symtab; - relocator->sectarray = sectarray; - relocator->is_32_bit = kxld_is_32_bit(cputype); - relocator->swap = swap; + relocator->file = file; + relocator->symtab = symtab; + relocator->sectarray = sectarray; + relocator->is_32_bit = kxld_is_32_bit(cputype); + relocator->swap = swap; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_reloc_create_macho(KXLDArray *relocarray, const KXLDRelocator *relocator, +kxld_reloc_create_macho(KXLDArray *relocarray, const KXLDRelocator *relocator, const struct relocation_info *srcs, u_int nsrcs) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - u_int nrelocs = 0; - const struct relocation_info *src = NULL; - const struct scattered_relocation_info *scatsrc = NULL; - u_int i = 0; - u_int reloc_index = 0; - - check(relocarray); - check(srcs); - - /* If there are no relocation entries, just return */ - if (!nsrcs) { - rval = KERN_SUCCESS; - goto finish; - } - - /* Count the number of non-pair relocs */ - nrelocs = count_relocatable_relocs(relocator, srcs, nsrcs); - - if (nrelocs) { - - /* Allocate the array of relocation entries */ - - rval = kxld_array_init(relocarray, sizeof(KXLDReloc), nrelocs); - require_noerr(rval, finish); - - /* Initialize the relocation entries */ - - for (i = 0; i < nsrcs; ++i) { - src = srcs + i; - scatsrc = (const struct scattered_relocation_info *) src; - - /* A section-based relocation entry can be skipped for absolute - * symbols. - */ - - if (!(relocator->may_scatter && (src->r_address & R_SCATTERED)) && - !(src->r_extern) && (R_ABS == src->r_symbolnum)) - { - continue; - } - - /* Pull out the data from the relocation entries. The target_type - * depends on the r_extern bit: - * Scattered -> Section Lookup by Address - * Local (not extern) -> Section by Index - * Extern -> Symbolnum by Index - */ - reloc = kxld_array_get_item(relocarray, reloc_index++); - if (relocator->may_scatter && (src->r_address & R_SCATTERED)) { - reloc->address = scatsrc->r_address; - reloc->pcrel = scatsrc->r_pcrel; - reloc->length = scatsrc->r_length; - reloc->reloc_type = scatsrc->r_type; - reloc->target = scatsrc->r_value; - reloc->target_type = KXLD_TARGET_LOOKUP; - } else { - reloc->address = src->r_address; - reloc->pcrel = src->r_pcrel; - reloc->length = src->r_length; - reloc->reloc_type = src->r_type; - reloc->target = src->r_symbolnum; - - if (0 == src->r_extern) { - reloc->target_type = KXLD_TARGET_SECTNUM; - reloc->target -= 1; - } else { - reloc->target_type = KXLD_TARGET_SYMBOLNUM; - } - } - - /* Find the pair entry if it exists */ - - if (relocator->reloc_has_pair(reloc->reloc_type)) { - ++i; - require_action(i < nsrcs, finish, rval=KERN_FAILURE); - - src = srcs + i; - scatsrc = (const struct scattered_relocation_info *) src; - - if (relocator->may_scatter && (src->r_address & R_SCATTERED)) { - require_action(relocator->reloc_get_pair_type( - reloc->reloc_type) == scatsrc->r_type, - finish, rval=KERN_FAILURE); - reloc->pair_address= scatsrc->r_address; - reloc->pair_target = scatsrc->r_value; - reloc->pair_target_type = KXLD_TARGET_LOOKUP; - } else { - require_action(relocator->reloc_get_pair_type( - reloc->reloc_type) == scatsrc->r_type, - finish, rval=KERN_FAILURE); - reloc->pair_address = scatsrc->r_address; - if (src->r_extern) { - reloc->pair_target = src->r_symbolnum; - reloc->pair_target_type = KXLD_TARGET_SYMBOLNUM; - } else { - reloc->pair_target = src->r_address; - reloc->pair_target_type = KXLD_TARGET_VALUE; - } - } - } else { - reloc->pair_target = 0; - if (relocator->reloc_has_got(reloc->reloc_type)) { - reloc->pair_target_type = KXLD_TARGET_GOT; - } else { - reloc->pair_target_type = KXLD_TARGET_NONE; - } - } - } // for... - } - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + u_int nrelocs = 0; + const struct relocation_info *src = NULL; + const struct scattered_relocation_info *scatsrc = NULL; + u_int i = 0; + u_int reloc_index = 0; + + check(relocarray); + check(srcs); + + /* If there are no relocation entries, just return */ + if (!nsrcs) { + rval = KERN_SUCCESS; + goto finish; + } + + /* Count the number of non-pair relocs */ + nrelocs = count_relocatable_relocs(relocator, srcs, nsrcs); + + if (nrelocs) { + /* Allocate the array of relocation entries */ + + rval = kxld_array_init(relocarray, sizeof(KXLDReloc), nrelocs); + require_noerr(rval, finish); + + /* Initialize the relocation entries */ + + for (i = 0; i < nsrcs; ++i) { + src = srcs + i; + scatsrc = (const struct scattered_relocation_info *) src; + + /* A section-based relocation entry can be skipped for absolute + * symbols. + */ + + if (!(relocator->may_scatter && (src->r_address & R_SCATTERED)) && + !(src->r_extern) && (R_ABS == src->r_symbolnum)) { + continue; + } + + /* Pull out the data from the relocation entries. The target_type + * depends on the r_extern bit: + * Scattered -> Section Lookup by Address + * Local (not extern) -> Section by Index + * Extern -> Symbolnum by Index + */ + reloc = kxld_array_get_item(relocarray, reloc_index++); + if (relocator->may_scatter && (src->r_address & R_SCATTERED)) { + reloc->address = scatsrc->r_address; + reloc->pcrel = scatsrc->r_pcrel; + reloc->length = scatsrc->r_length; + reloc->reloc_type = scatsrc->r_type; + reloc->target = scatsrc->r_value; + reloc->target_type = KXLD_TARGET_LOOKUP; + } else { + reloc->address = src->r_address; + reloc->pcrel = src->r_pcrel; + reloc->length = src->r_length; + reloc->reloc_type = src->r_type; + reloc->target = src->r_symbolnum; + + if (0 == src->r_extern) { + reloc->target_type = KXLD_TARGET_SECTNUM; + reloc->target -= 1; + } else { + reloc->target_type = KXLD_TARGET_SYMBOLNUM; + } + } + + /* Find the pair entry if it exists */ + + if (relocator->reloc_has_pair(reloc->reloc_type)) { + ++i; + require_action(i < nsrcs, finish, rval = KERN_FAILURE); + + src = srcs + i; + scatsrc = (const struct scattered_relocation_info *) src; + + if (relocator->may_scatter && (src->r_address & R_SCATTERED)) { + require_action(relocator->reloc_get_pair_type( + reloc->reloc_type) == scatsrc->r_type, + finish, rval = KERN_FAILURE); + reloc->pair_address = scatsrc->r_address; + reloc->pair_target = scatsrc->r_value; + reloc->pair_target_type = KXLD_TARGET_LOOKUP; + } else { + require_action(relocator->reloc_get_pair_type( + reloc->reloc_type) == scatsrc->r_type, + finish, rval = KERN_FAILURE); + reloc->pair_address = scatsrc->r_address; + if (src->r_extern) { + reloc->pair_target = src->r_symbolnum; + reloc->pair_target_type = KXLD_TARGET_SYMBOLNUM; + } else { + reloc->pair_target = src->r_address; + reloc->pair_target_type = KXLD_TARGET_VALUE; + } + } + } else { + reloc->pair_target = 0; + if (relocator->reloc_has_got(reloc->reloc_type)) { + reloc->pair_target_type = KXLD_TARGET_GOT; + } else { + reloc->pair_target_type = KXLD_TARGET_NONE; + } + } + } // for... + } + rval = KERN_SUCCESS; finish: - return rval; + return rval; } @@ -411,40 +409,39 @@ static u_int count_relocatable_relocs(const KXLDRelocator *relocator, const struct relocation_info *relocs, u_int nrelocs) { - u_int num_nonpair_relocs = 0; - u_int i = 0; - const struct relocation_info *reloc = NULL; - const struct scattered_relocation_info *sreloc = NULL; - - check(relocator); - check(relocs); - - /* Loop over all of the relocation entries */ - - num_nonpair_relocs = 1; - for (i = 1; i < nrelocs; ++i) { - reloc = relocs + i; - - if (reloc->r_address & R_SCATTERED) { - /* A scattered relocation entry is relocatable as long as it's not a - * pair. - */ - sreloc = (const struct scattered_relocation_info *) reloc; - - num_nonpair_relocs += - !relocator->reloc_has_pair(sreloc->r_type); - } else { - /* A normal relocation entry is relocatable if it is not a pair and - * if it is not a section-based relocation for an absolute symbol. - */ - num_nonpair_relocs += - !(relocator->reloc_has_pair(reloc->r_type) - || (0 == reloc->r_extern && R_ABS == reloc->r_symbolnum)); - } - - } - - return num_nonpair_relocs; + u_int num_nonpair_relocs = 0; + u_int i = 0; + const struct relocation_info *reloc = NULL; + const struct scattered_relocation_info *sreloc = NULL; + + check(relocator); + check(relocs); + + /* Loop over all of the relocation entries */ + + num_nonpair_relocs = 1; + for (i = 1; i < nrelocs; ++i) { + reloc = relocs + i; + + if (reloc->r_address & R_SCATTERED) { + /* A scattered relocation entry is relocatable as long as it's not a + * pair. + */ + sreloc = (const struct scattered_relocation_info *) reloc; + + num_nonpair_relocs += + !relocator->reloc_has_pair(sreloc->r_type); + } else { + /* A normal relocation entry is relocatable if it is not a pair and + * if it is not a section-based relocation for an absolute symbol. + */ + num_nonpair_relocs += + !(relocator->reloc_has_pair(reloc->r_type) + || (0 == reloc->r_extern && R_ABS == reloc->r_symbolnum)); + } + } + + return num_nonpair_relocs; } /******************************************************************************* @@ -452,38 +449,38 @@ count_relocatable_relocs(const KXLDRelocator *relocator, void kxld_relocator_clear(KXLDRelocator *relocator) { - bzero(relocator, sizeof(*relocator)); + bzero(relocator, sizeof(*relocator)); } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_relocator_has_pair(const KXLDRelocator *relocator, u_int r_type) { - check(relocator); + check(relocator); - return relocator->reloc_has_pair(r_type); + return relocator->reloc_has_pair(r_type); } /******************************************************************************* *******************************************************************************/ -u_int +u_int kxld_relocator_get_pair_type(const KXLDRelocator *relocator, u_int prev_r_type) { - check(relocator); + check(relocator); - return relocator->reloc_get_pair_type(prev_r_type); + return relocator->reloc_get_pair_type(prev_r_type); } /******************************************************************************* *******************************************************************************/ -boolean_t +boolean_t kxld_relocator_has_got(const KXLDRelocator *relocator, u_int r_type) { - check(relocator); + check(relocator); - return relocator->reloc_has_got(r_type); + return relocator->reloc_has_got(r_type); } /******************************************************************************* @@ -492,55 +489,57 @@ KXLDSym * kxld_reloc_get_symbol(const KXLDRelocator *relocator, const KXLDReloc *reloc, const u_char *data) { - KXLDSym *sym = NULL; - kxld_addr_t value = 0; - - check(reloc); - - switch (reloc->target_type) { - case KXLD_TARGET_SYMBOLNUM: - sym = kxld_symtab_get_symbol_by_index(relocator->symtab, reloc->target); - break; - case KXLD_TARGET_SECTNUM: - if (data) { - value = kxld_relocator_get_pointer_at_addr(relocator, data, - reloc->address); - sym = kxld_symtab_get_cxx_symbol_by_value(relocator->symtab, value); - } - break; - default: - sym = NULL; - break; - } - - return sym; + KXLDSym *sym = NULL; + kxld_addr_t value = 0; + + check(reloc); + + switch (reloc->target_type) { + case KXLD_TARGET_SYMBOLNUM: + sym = kxld_symtab_get_symbol_by_index(relocator->symtab, reloc->target); + break; + case KXLD_TARGET_SECTNUM: + if (data) { + value = kxld_relocator_get_pointer_at_addr(relocator, data, + reloc->address); + sym = kxld_symtab_get_cxx_symbol_by_value(relocator->symtab, value); + } + break; + default: + sym = NULL; + break; + } + + return sym; } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_reloc_get_reloc_index_by_offset(const KXLDArray *relocs, +kxld_reloc_get_reloc_index_by_offset(const KXLDArray *relocs, kxld_size_t offset, u_int *idx) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - u_int i = 0; - - for (i = 0; i < relocs->nitems; ++i) { - reloc = kxld_array_get_item(relocs, i); - if (reloc->address == offset) break; - } - - if (i >= relocs->nitems) { - rval = KERN_FAILURE; - goto finish; - } - - *idx = i; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + u_int i = 0; + + for (i = 0; i < relocs->nitems; ++i) { + reloc = kxld_array_get_item(relocs, i); + if (reloc->address == offset) { + break; + } + } + + if (i >= relocs->nitems) { + rval = KERN_FAILURE; + goto finish; + } + + *idx = i; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -548,17 +547,19 @@ finish: KXLDReloc * kxld_reloc_get_reloc_by_offset(const KXLDArray *relocs, kxld_addr_t offset) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + u_int i = 0; + + rval = kxld_reloc_get_reloc_index_by_offset(relocs, offset, &i); + if (rval) { + goto finish; + } - rval = kxld_reloc_get_reloc_index_by_offset(relocs, offset, &i); - if (rval) goto finish; + reloc = kxld_array_get_item(relocs, i); - reloc = kxld_array_get_item(relocs, i); - finish: - return reloc; + return reloc; } #if KXLD_PIC_KEXTS @@ -567,22 +568,22 @@ finish: u_long kxld_reloc_get_macho_header_size() { - return sizeof(struct dysymtab_command); + return sizeof(struct dysymtab_command); } /******************************************************************************* *******************************************************************************/ u_long kxld_reloc_get_macho_data_size( - const KXLDArray *locrelocs, - const KXLDArray *extrelocs) + const KXLDArray *locrelocs, + const KXLDArray *extrelocs) { - u_long rval = 0; + u_long rval = 0; - rval += get_macho_data_size_for_array(locrelocs); - rval += get_macho_data_size_for_array(extrelocs); + rval += get_macho_data_size_for_array(locrelocs); + rval += get_macho_data_size_for_array(extrelocs); - return (rval); + return rval; } /******************************************************************************* @@ -593,80 +594,80 @@ kxld_reloc_export_macho(const KXLDRelocator *relocator, u_char *buf, u_long *header_offset, u_long header_size, u_long *data_offset, u_long size) { - kern_return_t rval = KERN_FAILURE; - struct dysymtab_command *dysymtabhdr = NULL; - struct relocation_info *start = NULL; - struct relocation_info *dst = NULL; - u_long count = 0; - u_long data_size = 0; - - check(locrelocs); - check(extrelocs); - check(buf); - check(header_offset); - check(data_offset); - - require_action(sizeof(*dysymtabhdr) <= header_size - *header_offset, finish, rval=KERN_FAILURE); - dysymtabhdr = (struct dysymtab_command *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*dysymtabhdr); - - data_size = kxld_reloc_get_macho_data_size(locrelocs, extrelocs); - require_action((*data_offset + data_size) <= size, finish, rval=KERN_FAILURE); - - start = dst = (struct relocation_info *) ((void *) (buf + *data_offset)); - - if (kaslr_offsets == NULL) { - kaslr_offsets_index = 0; - kaslr_offsets_count = locrelocs->nitems + extrelocs->nitems; - kaslr_offsets = (uint32_t *)malloc(kaslr_offsets_count * sizeof(*kaslr_offsets)); - bzero(kaslr_offsets, kaslr_offsets_count * sizeof(*kaslr_offsets)); - } - - // copies the reloc data into the __LINKEDIT segment - // data_offset is the new value for locreloff - rval = export_macho_for_array(relocator, locrelocs, &dst); - require_noerr(rval, finish); - - rval = export_macho_for_array(relocator, extrelocs, &dst); - require_noerr(rval, finish); - - count = dst - start; - - memset(dysymtabhdr, 0, sizeof(*dysymtabhdr)); - dysymtabhdr->cmd = LC_DYSYMTAB; - dysymtabhdr->cmdsize = (uint32_t) sizeof(*dysymtabhdr); - dysymtabhdr->locreloff = (uint32_t) *data_offset; - dysymtabhdr->nlocrel = (uint32_t) count; - - *data_offset += count * sizeof(struct relocation_info); - + kern_return_t rval = KERN_FAILURE; + struct dysymtab_command *dysymtabhdr = NULL; + struct relocation_info *start = NULL; + struct relocation_info *dst = NULL; + u_long count = 0; + u_long data_size = 0; + + check(locrelocs); + check(extrelocs); + check(buf); + check(header_offset); + check(data_offset); + + require_action(sizeof(*dysymtabhdr) <= header_size - *header_offset, finish, rval = KERN_FAILURE); + dysymtabhdr = (struct dysymtab_command *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*dysymtabhdr); + + data_size = kxld_reloc_get_macho_data_size(locrelocs, extrelocs); + require_action((*data_offset + data_size) <= size, finish, rval = KERN_FAILURE); + + start = dst = (struct relocation_info *) ((void *) (buf + *data_offset)); + + if (kaslr_offsets == NULL) { + kaslr_offsets_index = 0; + kaslr_offsets_count = locrelocs->nitems + extrelocs->nitems; + kaslr_offsets = (uint32_t *)malloc(kaslr_offsets_count * sizeof(*kaslr_offsets)); + bzero(kaslr_offsets, kaslr_offsets_count * sizeof(*kaslr_offsets)); + } + + // copies the reloc data into the __LINKEDIT segment + // data_offset is the new value for locreloff + rval = export_macho_for_array(relocator, locrelocs, &dst); + require_noerr(rval, finish); + + rval = export_macho_for_array(relocator, extrelocs, &dst); + require_noerr(rval, finish); + + count = dst - start; + + memset(dysymtabhdr, 0, sizeof(*dysymtabhdr)); + dysymtabhdr->cmd = LC_DYSYMTAB; + dysymtabhdr->cmdsize = (uint32_t) sizeof(*dysymtabhdr); + dysymtabhdr->locreloff = (uint32_t) *data_offset; + dysymtabhdr->nlocrel = (uint32_t) count; + + *data_offset += count * sizeof(struct relocation_info); + #if SPLIT_KEXTS_DEBUG - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p >>> Start of dysymtabhdr (size %lu) <%s> ", - (void *) dysymtabhdr, - sizeof(*dysymtabhdr), - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p <<< End of dysymtabhdr <%s> ", - (void *) ((u_char *)dysymtabhdr + sizeof(*dysymtabhdr)), - __func__); - - kxld_log(kKxldLogLinking, kKxldLogErr, - "dysymtabhdr at %p: cmdsize %u indirectsymoff %u nindirectsyms %u extreloff %u nextrel %u locreloff %u nlocrel %u <%s>", - (void *) dysymtabhdr, - dysymtabhdr->cmdsize, - dysymtabhdr->indirectsymoff, - dysymtabhdr->nindirectsyms, - dysymtabhdr->extreloff, - dysymtabhdr->nextrel, - dysymtabhdr->locreloff, - dysymtabhdr->nlocrel, - __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p >>> Start of dysymtabhdr (size %lu) <%s> ", + (void *) dysymtabhdr, + sizeof(*dysymtabhdr), + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p <<< End of dysymtabhdr <%s> ", + (void *) ((u_char *)dysymtabhdr + sizeof(*dysymtabhdr)), + __func__); + + kxld_log(kKxldLogLinking, kKxldLogErr, + "dysymtabhdr at %p: cmdsize %u indirectsymoff %u nindirectsyms %u extreloff %u nextrel %u locreloff %u nlocrel %u <%s>", + (void *) dysymtabhdr, + dysymtabhdr->cmdsize, + dysymtabhdr->indirectsymoff, + dysymtabhdr->nindirectsyms, + dysymtabhdr->extreloff, + dysymtabhdr->nextrel, + dysymtabhdr->locreloff, + dysymtabhdr->nlocrel, + __func__); #endif - - rval = KERN_SUCCESS; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_PIC_KEXTS */ @@ -676,34 +677,34 @@ kxld_addr_t kxld_relocator_get_pointer_at_addr(const KXLDRelocator *relocator, const u_char *data, u_long offset) { - kxld_addr_t value; + kxld_addr_t value; - KXLD_3264_FUNC(relocator->is_32_bit, value, - get_pointer_at_addr_32, get_pointer_at_addr_64, - relocator, data, offset); + KXLD_3264_FUNC(relocator->is_32_bit, value, + get_pointer_at_addr_32, get_pointer_at_addr_64, + relocator, data, offset); - return value; + return value; } #if KXLD_USER_OR_ILP32 /******************************************************************************* *******************************************************************************/ static kxld_addr_t -get_pointer_at_addr_32(const KXLDRelocator *relocator, +get_pointer_at_addr_32(const KXLDRelocator *relocator, const u_char *data, u_long offset) { - uint32_t addr = 0; - - check(relocator); + uint32_t addr = 0; + + check(relocator); - addr = *(const uint32_t *) ((const void *) (data + offset)); + addr = *(const uint32_t *) ((const void *) (data + offset)); #if !KERNEL - if (relocator->swap) { - addr = OSSwapInt32(addr); - } + if (relocator->swap) { + addr = OSSwapInt32(addr); + } #endif - return align_raw_function_address(relocator, addr); + return align_raw_function_address(relocator, addr); } #endif /* KXLD_USER_OR_ILP32 */ @@ -711,31 +712,31 @@ get_pointer_at_addr_32(const KXLDRelocator *relocator, /******************************************************************************* *******************************************************************************/ static kxld_addr_t -get_pointer_at_addr_64(const KXLDRelocator *relocator, +get_pointer_at_addr_64(const KXLDRelocator *relocator, const u_char *data, u_long offset) { - uint64_t addr = 0; - - check(relocator); + uint64_t addr = 0; + + check(relocator); + + addr = *(const uint64_t *) ((const void *) (data + offset)); - addr = *(const uint64_t *) ((const void *) (data + offset)); - #if !KERNEL - if (relocator->swap) { - addr = OSSwapInt64(addr); - } + if (relocator->swap) { + addr = OSSwapInt64(addr); + } #endif - return align_raw_function_address(relocator, addr); + return align_raw_function_address(relocator, addr); } #endif /* KXLD_USER_OR_LP64 */ /******************************************************************************* *******************************************************************************/ -void +void kxld_relocator_set_vtables(KXLDRelocator *relocator, const KXLDDict *vtables) { - relocator->vtables = vtables; + relocator->vtables = vtables; } /******************************************************************************* @@ -748,247 +749,249 @@ kxld_relocator_set_vtables(KXLDRelocator *relocator, const KXLDDict *vtables) static kxld_addr_t align_raw_function_address(const KXLDRelocator *relocator, kxld_addr_t value) { - if (relocator->function_align) { - value &= ~((1ULL << relocator->function_align) - 1); - } + if (relocator->function_align) { + value &= ~((1ULL << relocator->function_align) - 1); + } - return value; + return value; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_relocator_process_sect_reloc(KXLDRelocator *relocator, const KXLDReloc *reloc, const KXLDSect *sect) { - kern_return_t rval = KERN_FAILURE; - u_char *instruction = NULL; - kxld_addr_t target = 0; - kxld_addr_t pair_target = 0; - kxld_addr_t base_pc = 0; - kxld_addr_t link_pc = 0; - kxld_addr_t link_disp = 0; + kern_return_t rval = KERN_FAILURE; + u_char *instruction = NULL; + kxld_addr_t target = 0; + kxld_addr_t pair_target = 0; + kxld_addr_t base_pc = 0; + kxld_addr_t link_pc = 0; + kxld_addr_t link_disp = 0; + + check(relocator); + check(reloc); + check(sect); - check(relocator); - check(reloc); - check(sect); + /* Find the instruction */ - /* Find the instruction */ + instruction = sect->data + reloc->address; - instruction = sect->data + reloc->address; + /* Calculate the target */ - /* Calculate the target */ + rval = calculate_targets(relocator, &target, &pair_target, reloc); + require_noerr(rval, finish); - rval = calculate_targets(relocator, &target, &pair_target, reloc); - require_noerr(rval, finish); + base_pc = reloc->address; + link_pc = base_pc + sect->link_addr; + link_disp = sect->link_addr - sect->base_addr; - base_pc = reloc->address; - link_pc = base_pc + sect->link_addr; - link_disp = sect->link_addr - sect->base_addr; + /* Relocate */ - /* Relocate */ + rval = relocator->process_reloc(relocator, instruction, reloc->length, + reloc->pcrel, base_pc, link_pc, link_disp, reloc->reloc_type, target, + pair_target, relocator->swap); + require_noerr(rval, finish); - rval = relocator->process_reloc(relocator, instruction, reloc->length, - reloc->pcrel, base_pc, link_pc, link_disp, reloc->reloc_type, target, - pair_target, relocator->swap); - require_noerr(rval, finish); - - /* Return */ + /* Return */ - relocator->current_vtable = NULL; - rval = KERN_SUCCESS; + relocator->current_vtable = NULL; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_reloc_update_symindex(KXLDReloc *reloc, u_int symindex) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - require_action(reloc->target_type == KXLD_TARGET_SYMBOLNUM, - finish, rval = KERN_FAILURE); + require_action(reloc->target_type == KXLD_TARGET_SYMBOLNUM, + finish, rval = KERN_FAILURE); - reloc->target = symindex; + reloc->target = symindex; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_relocator_process_table_reloc(KXLDRelocator *relocator, - const KXLDReloc *reloc, - const KXLDSeg *seg, - kxld_addr_t link_addr) + const KXLDReloc *reloc, + const KXLDSeg *seg, + kxld_addr_t link_addr) { - kern_return_t rval = KERN_FAILURE; - u_char *instruction = NULL; - kxld_addr_t target = 0; - kxld_addr_t pair_target = 0; - kxld_addr_t base_pc = 0; - kxld_addr_t link_pc = 0; - u_long offset = 0; + kern_return_t rval = KERN_FAILURE; + u_char *instruction = NULL; + kxld_addr_t target = 0; + kxld_addr_t pair_target = 0; + kxld_addr_t base_pc = 0; + kxld_addr_t link_pc = 0; + u_long offset = 0; - check(relocator); - check(reloc); + check(relocator); + check(reloc); - /* Find the instruction in original kext file we are trying to link */ + /* Find the instruction in original kext file we are trying to link */ - offset = (u_long)(seg->fileoff + (reloc->address - seg->base_addr)); - instruction = relocator->file + offset; + offset = (u_long)(seg->fileoff + (reloc->address - seg->base_addr)); + instruction = relocator->file + offset; - /* Calculate the target */ + /* Calculate the target */ - rval = calculate_targets(relocator, &target, &pair_target, reloc); - require_noerr(rval, finish); + rval = calculate_targets(relocator, &target, &pair_target, reloc); + require_noerr(rval, finish); - base_pc = reloc->address; - link_pc = base_pc + link_addr; - if (kxld_seg_is_split_seg(seg)) { - // link_pc for split segment special case, do not add in the base_pc - link_pc = link_addr; - } + base_pc = reloc->address; + link_pc = base_pc + link_addr; + if (kxld_seg_is_split_seg(seg)) { + // link_pc for split segment special case, do not add in the base_pc + link_pc = link_addr; + } - /* Relocate */ + /* Relocate */ - rval = relocator->process_reloc(relocator, instruction, reloc->length, - reloc->pcrel, base_pc, link_pc, link_addr, reloc->reloc_type, target, - pair_target, relocator->swap); - require_noerr(rval, finish); - - /* Return */ + rval = relocator->process_reloc(relocator, instruction, reloc->length, + reloc->pcrel, base_pc, link_pc, link_addr, reloc->reloc_type, target, + pair_target, relocator->swap); + require_noerr(rval, finish); - relocator->current_vtable = NULL; - rval = KERN_SUCCESS; + /* Return */ + + relocator->current_vtable = NULL; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ static kern_return_t -calculate_targets(KXLDRelocator *relocator, kxld_addr_t *_target, +calculate_targets(KXLDRelocator *relocator, kxld_addr_t *_target, kxld_addr_t *_pair_target, const KXLDReloc *reloc) { - kern_return_t rval = KERN_FAILURE; - const KXLDSect *sect = NULL; - const KXLDSym *sym = NULL; - kxld_addr_t target = 0; - kxld_addr_t pair_target = 0; - char *demangled_name = NULL; - size_t demangled_length = 0; - - check(_target); - check(_pair_target); - *_target = 0; - *_pair_target = 0; - - /* Find the target based on the lookup type */ - - switch(reloc->target_type) { - case KXLD_TARGET_LOOKUP: - require_action(reloc->pair_target_type == KXLD_TARGET_NONE || - reloc->pair_target_type == KXLD_TARGET_LOOKUP || - reloc->pair_target_type == KXLD_TARGET_VALUE, - finish, rval=KERN_FAILURE); - - rval = get_target_by_address_lookup(&target, reloc->target, - relocator->sectarray); - require_noerr(rval, finish); - - if (reloc->pair_target_type == KXLD_TARGET_LOOKUP) { - rval = get_target_by_address_lookup(&pair_target, - reloc->pair_target, relocator->sectarray); - require_noerr(rval, finish); - } else if (reloc->pair_target_type == KXLD_TARGET_VALUE) { - pair_target = reloc->pair_target; - } - break; - case KXLD_TARGET_SECTNUM: - require_action(reloc->pair_target_type == KXLD_TARGET_NONE || - reloc->pair_target_type == KXLD_TARGET_VALUE, - finish, rval=KERN_FAILURE); - - /* Get the target's section by section number */ - sect = kxld_array_get_item(relocator->sectarray, reloc->target); - require_action(sect, finish, rval=KERN_FAILURE); - - /* target is the change in the section's address */ - target = sect->link_addr - sect->base_addr; - - if (reloc->pair_target_type) { - pair_target = reloc->pair_target; - } else { - /* x86_64 needs to know when we have a non-external relocation, - * so we hack that information in here. - */ - pair_target = TRUE; - } - break; - case KXLD_TARGET_SYMBOLNUM: - require_action(reloc->pair_target_type == KXLD_TARGET_NONE || - reloc->pair_target_type == KXLD_TARGET_GOT || - reloc->pair_target_type == KXLD_TARGET_SYMBOLNUM || - reloc->pair_target_type == KXLD_TARGET_VALUE, finish, - rval=KERN_FAILURE); - - /* Get the target's symbol by symbol number */ - sym = kxld_symtab_get_symbol_by_index(relocator->symtab, reloc->target); - require_action(sym, finish, rval=KERN_FAILURE); - - /* If this symbol is a padslot that has already been replaced, then the - * only way a relocation entry can still reference it is if there is a - * vtable that has not been patched. The vtable patcher uses the - * MetaClass structure to find classes for patching, so an unpatched - * vtable means that there is an OSObject-dervied class that is missing - * its OSDeclare/OSDefine macros. - */ - require_action(!kxld_sym_is_padslot(sym) || !kxld_sym_is_replaced(sym), - finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogRelocatingPatchedSym, - kxld_demangle(sym->name, &demangled_name, &demangled_length))); - - target = sym->link_addr; - - if (kxld_sym_is_vtable(sym)) { - relocator->current_vtable = kxld_dict_find(relocator->vtables, sym->name); - } - - /* Some relocation types need the GOT entry address instead of the - * symbol's actual address. These types don't have pair relocation - * entries, so we store the GOT entry address as the pair target. - */ - if (reloc->pair_target_type == KXLD_TARGET_VALUE) { - pair_target = reloc->pair_target; - } else if (reloc->pair_target_type == KXLD_TARGET_SYMBOLNUM ) { - sym = kxld_symtab_get_symbol_by_index(relocator->symtab, - reloc->pair_target); - require_action(sym, finish, rval=KERN_FAILURE); - pair_target = sym->link_addr; - } else if (reloc->pair_target_type == KXLD_TARGET_GOT) { - pair_target = sym->got_addr; - } - break; - default: - rval = KERN_FAILURE; - goto finish; - } - - *_target = target; - *_pair_target = pair_target; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDSect *sect = NULL; + const KXLDSym *sym = NULL; + kxld_addr_t target = 0; + kxld_addr_t pair_target = 0; + char *demangled_name = NULL; + size_t demangled_length = 0; + + check(_target); + check(_pair_target); + *_target = 0; + *_pair_target = 0; + + /* Find the target based on the lookup type */ + + switch (reloc->target_type) { + case KXLD_TARGET_LOOKUP: + require_action(reloc->pair_target_type == KXLD_TARGET_NONE || + reloc->pair_target_type == KXLD_TARGET_LOOKUP || + reloc->pair_target_type == KXLD_TARGET_VALUE, + finish, rval = KERN_FAILURE); + + rval = get_target_by_address_lookup(&target, reloc->target, + relocator->sectarray); + require_noerr(rval, finish); + + if (reloc->pair_target_type == KXLD_TARGET_LOOKUP) { + rval = get_target_by_address_lookup(&pair_target, + reloc->pair_target, relocator->sectarray); + require_noerr(rval, finish); + } else if (reloc->pair_target_type == KXLD_TARGET_VALUE) { + pair_target = reloc->pair_target; + } + break; + case KXLD_TARGET_SECTNUM: + require_action(reloc->pair_target_type == KXLD_TARGET_NONE || + reloc->pair_target_type == KXLD_TARGET_VALUE, + finish, rval = KERN_FAILURE); + + /* Get the target's section by section number */ + sect = kxld_array_get_item(relocator->sectarray, reloc->target); + require_action(sect, finish, rval = KERN_FAILURE); + + /* target is the change in the section's address */ + target = sect->link_addr - sect->base_addr; + + if (reloc->pair_target_type) { + pair_target = reloc->pair_target; + } else { + /* x86_64 needs to know when we have a non-external relocation, + * so we hack that information in here. + */ + pair_target = TRUE; + } + break; + case KXLD_TARGET_SYMBOLNUM: + require_action(reloc->pair_target_type == KXLD_TARGET_NONE || + reloc->pair_target_type == KXLD_TARGET_GOT || + reloc->pair_target_type == KXLD_TARGET_SYMBOLNUM || + reloc->pair_target_type == KXLD_TARGET_VALUE, finish, + rval = KERN_FAILURE); + + /* Get the target's symbol by symbol number */ + sym = kxld_symtab_get_symbol_by_index(relocator->symtab, reloc->target); + require_action(sym, finish, rval = KERN_FAILURE); + + /* If this symbol is a padslot that has already been replaced, then the + * only way a relocation entry can still reference it is if there is a + * vtable that has not been patched. The vtable patcher uses the + * MetaClass structure to find classes for patching, so an unpatched + * vtable means that there is an OSObject-dervied class that is missing + * its OSDeclare/OSDefine macros. + */ + if (kxld_sym_is_padslot(sym) && kxld_sym_is_replaced(sym)) { + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogRelocatingPatchedSym, + kxld_demangle(sym->name, &demangled_name, &demangled_length)); + } + + target = sym->link_addr; + + if (kxld_sym_is_vtable(sym)) { + relocator->current_vtable = kxld_dict_find(relocator->vtables, sym->name); + } + + /* Some relocation types need the GOT entry address instead of the + * symbol's actual address. These types don't have pair relocation + * entries, so we store the GOT entry address as the pair target. + */ + if (reloc->pair_target_type == KXLD_TARGET_VALUE) { + pair_target = reloc->pair_target; + } else if (reloc->pair_target_type == KXLD_TARGET_SYMBOLNUM) { + sym = kxld_symtab_get_symbol_by_index(relocator->symtab, + reloc->pair_target); + require_action(sym, finish, rval = KERN_FAILURE); + pair_target = sym->link_addr; + } else if (reloc->pair_target_type == KXLD_TARGET_GOT) { + pair_target = sym->got_addr; + } + break; + default: + rval = KERN_FAILURE; + goto finish; + } + + *_target = target; + *_pair_target = pair_target; + rval = KERN_SUCCESS; finish: - if (demangled_name) kxld_free(demangled_name, demangled_length); - return rval; + if (demangled_name) { + kxld_free(demangled_name, demangled_length); + } + return rval; } /******************************************************************************* @@ -997,32 +1000,34 @@ static kern_return_t get_target_by_address_lookup(kxld_addr_t *target, kxld_addr_t addr, const KXLDArray *sectarray) { - kern_return_t rval = KERN_FAILURE; - const KXLDSect *sect = NULL; - kxld_addr_t start = 0; - kxld_addr_t end = 0; - u_int i = 0; - - check(target); - check(sectarray); - *target = 0; - - for (i = 0; i < sectarray->nitems; ++i) { - sect = kxld_array_get_item(sectarray, i); - start = sect->base_addr; - end = start + sect->size; - - if (start <= addr && addr < end) break; - - sect = NULL; - } - require_action(sect, finish, rval=KERN_FAILURE); - - *target = sect->link_addr - sect->base_addr; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDSect *sect = NULL; + kxld_addr_t start = 0; + kxld_addr_t end = 0; + u_int i = 0; + + check(target); + check(sectarray); + *target = 0; + + for (i = 0; i < sectarray->nitems; ++i) { + sect = kxld_array_get_item(sectarray, i); + start = sect->base_addr; + end = start + sect->size; + + if (start <= addr && addr < end) { + break; + } + + sect = NULL; + } + require_action(sect, finish, rval = KERN_FAILURE); + + *target = sect->link_addr - sect->base_addr; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -1030,22 +1035,22 @@ finish: static kern_return_t check_for_direct_pure_virtual_call(const KXLDRelocator *relocator, u_long offset) { - kern_return_t rval = KERN_FAILURE; - const KXLDVTableEntry *entry = NULL; - - if (relocator->current_vtable) { - entry = kxld_vtable_get_entry_for_offset(relocator->current_vtable, - offset, relocator->is_32_bit); - require_action(!entry || !entry->patched.name || - !kxld_sym_name_is_pure_virtual(entry->patched.name), - finish, rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, - kKxldLogDirectPureVirtualCall)); - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDVTableEntry *entry = NULL; + + if (relocator->current_vtable) { + entry = kxld_vtable_get_entry_for_offset(relocator->current_vtable, + offset, relocator->is_32_bit); + require_action(!entry || !entry->patched.name || + !kxld_sym_name_is_pure_virtual(entry->patched.name), + finish, rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, + kKxldLogDirectPureVirtualCall)); + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_PIC_KEXTS @@ -1054,23 +1059,23 @@ finish: static u_long get_macho_data_size_for_array(const KXLDArray *relocs) { - const KXLDReloc *reloc = NULL; - u_int i = 0; - u_long size = 0; - - check(relocs); - - for (i = 0; i < relocs->nitems; ++i) { - reloc = kxld_array_get_item(relocs, i); - if (!reloc->pcrel) { - size += sizeof(struct relocation_info); - if(reloc->pair_target_type != KXLD_TARGET_NONE) { - size += sizeof(struct relocation_info); - } - } - } - - return size; + const KXLDReloc *reloc = NULL; + u_int i = 0; + u_long size = 0; + + check(relocs); + + for (i = 0; i < relocs->nitems; ++i) { + reloc = kxld_array_get_item(relocs, i); + if (!reloc->pcrel) { + size += sizeof(struct relocation_info); + if (reloc->pair_target_type != KXLD_TARGET_NONE) { + size += sizeof(struct relocation_info); + } + } + } + + return size; } /******************************************************************************* @@ -1079,556 +1084,577 @@ static kern_return_t export_macho_for_array(const KXLDRelocator *relocator, const KXLDArray *relocs, struct relocation_info **dstp) { - kern_return_t rval = KERN_FAILURE; - const KXLDReloc *reloc = NULL; - struct relocation_info *dst = NULL; - struct scattered_relocation_info *scatdst = NULL; - u_int i = 0; - - dst = *dstp; - - for (i = 0; i < relocs->nitems; ++i) { - reloc = kxld_array_get_item(relocs, i); - scatdst = (struct scattered_relocation_info *) dst; - - if (reloc->pcrel) { - continue; - } - - switch (reloc->target_type) { - case KXLD_TARGET_LOOKUP: - if (kaslr_offsets) { - if (kaslr_offsets_index >= kaslr_offsets_count) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "kaslr_offsets overflow %d > %d <%s> ", - kaslr_offsets_index, kaslr_offsets_count, - __func__); - abort(); - } - // reloc->address is really an offset from the start of the kext - *(kaslr_offsets + kaslr_offsets_index++) = reloc->address; - } - scatdst->r_address = reloc->address; - scatdst->r_pcrel = reloc->pcrel; - scatdst->r_length = reloc->length; - scatdst->r_type = reloc->reloc_type; - scatdst->r_value = reloc->target; - scatdst->r_scattered = 1; - break; - case KXLD_TARGET_SECTNUM: - if (kaslr_offsets) { - if (kaslr_offsets_index >= kaslr_offsets_count) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "kaslr_offsets overflow <%s> ", __func__); - abort(); - } - // reloc->address is really an offset from the start of the kext - *(kaslr_offsets + kaslr_offsets_index++) = reloc->address; - } - dst->r_address = reloc->address; - dst->r_pcrel = reloc->pcrel; - dst->r_length = reloc->length; - dst->r_type = reloc->reloc_type; - dst->r_symbolnum = reloc->target + 1; - dst->r_extern = 0; - break; - case KXLD_TARGET_SYMBOLNUM: - /* Assume that everything will be slid together; otherwise, - * there is no sensible value for the section number. - */ - if (kaslr_offsets) { - if (kaslr_offsets_index >= kaslr_offsets_count) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "kaslr_offsets overflow <%s> ", __func__); - abort(); - } - // reloc->address is really an offset from the start of the kext - *(kaslr_offsets + kaslr_offsets_index++) = reloc->address; - } - dst->r_address = reloc->address; - dst->r_pcrel = reloc->pcrel; - dst->r_length = reloc->length; - dst->r_type = reloc->reloc_type; - dst->r_symbolnum = 1; - dst->r_extern = 0; - break; - default: - rval = KERN_FAILURE; - goto finish; - } - - ++dst; - - if(reloc->pair_target_type != KXLD_TARGET_NONE) { - ++i; - require_action(i < relocs->nitems, finish, rval=KERN_FAILURE); - scatdst = (struct scattered_relocation_info *) dst; - switch (reloc->pair_target_type) { - case KXLD_TARGET_LOOKUP: - scatdst->r_address = reloc->pair_address; - scatdst->r_pcrel = reloc->pcrel; - scatdst->r_length = reloc->length; - scatdst->r_type = relocator->reloc_get_pair_type(reloc->reloc_type); - scatdst->r_value = reloc->pair_target; - scatdst->r_scattered = 1; - break; - case KXLD_TARGET_SECTNUM: - dst->r_address = reloc->pair_address; - dst->r_pcrel = reloc->pcrel; - dst->r_length = reloc->length; - dst->r_type = relocator->reloc_get_pair_type(reloc->reloc_type); - dst->r_symbolnum = reloc->pair_target + 1; - dst->r_extern = 0; - break; - case KXLD_TARGET_SYMBOLNUM: - dst->r_address = reloc->pair_address; - dst->r_pcrel = reloc->pcrel; - dst->r_length = reloc->length; - dst->r_type = relocator->reloc_get_pair_type(reloc->reloc_type); - dst->r_symbolnum = 1; - dst->r_extern = 0; - break; - default: - rval = KERN_FAILURE; - goto finish; - } - ++dst; - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDReloc *reloc = NULL; + struct relocation_info *dst = NULL; + struct scattered_relocation_info *scatdst = NULL; + u_int i = 0; + + dst = *dstp; + + for (i = 0; i < relocs->nitems; ++i) { + reloc = kxld_array_get_item(relocs, i); + scatdst = (struct scattered_relocation_info *) dst; + + if (reloc->pcrel) { + continue; + } + + switch (reloc->target_type) { + case KXLD_TARGET_LOOKUP: + if (kaslr_offsets) { + if (kaslr_offsets_index >= kaslr_offsets_count) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "kaslr_offsets overflow %d > %d <%s> ", + kaslr_offsets_index, kaslr_offsets_count, + __func__); + abort(); + } + // reloc->address is really an offset from the start of the kext + *(kaslr_offsets + kaslr_offsets_index++) = reloc->address; + } + scatdst->r_address = reloc->address; + scatdst->r_pcrel = reloc->pcrel; + scatdst->r_length = reloc->length; + scatdst->r_type = reloc->reloc_type; + scatdst->r_value = reloc->target; + scatdst->r_scattered = 1; + break; + case KXLD_TARGET_SECTNUM: + if (kaslr_offsets) { + if (kaslr_offsets_index >= kaslr_offsets_count) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "kaslr_offsets overflow <%s> ", __func__); + abort(); + } + // reloc->address is really an offset from the start of the kext + *(kaslr_offsets + kaslr_offsets_index++) = reloc->address; + } + dst->r_address = reloc->address; + dst->r_pcrel = reloc->pcrel; + dst->r_length = reloc->length; + dst->r_type = reloc->reloc_type; + dst->r_symbolnum = reloc->target + 1; + dst->r_extern = 0; + break; + case KXLD_TARGET_SYMBOLNUM: + /* Assume that everything will be slid together; otherwise, + * there is no sensible value for the section number. + */ + if (kaslr_offsets) { + if (kaslr_offsets_index >= kaslr_offsets_count) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "kaslr_offsets overflow <%s> ", __func__); + abort(); + } + // reloc->address is really an offset from the start of the kext + *(kaslr_offsets + kaslr_offsets_index++) = reloc->address; + } + dst->r_address = reloc->address; + dst->r_pcrel = reloc->pcrel; + dst->r_length = reloc->length; + dst->r_type = reloc->reloc_type; + dst->r_symbolnum = 1; + dst->r_extern = 0; + break; + default: + rval = KERN_FAILURE; + goto finish; + } + + ++dst; + + if (reloc->pair_target_type != KXLD_TARGET_NONE) { + ++i; + require_action(i < relocs->nitems, finish, rval = KERN_FAILURE); + scatdst = (struct scattered_relocation_info *) dst; + switch (reloc->pair_target_type) { + case KXLD_TARGET_LOOKUP: + scatdst->r_address = reloc->pair_address; + scatdst->r_pcrel = reloc->pcrel; + scatdst->r_length = reloc->length; + scatdst->r_type = relocator->reloc_get_pair_type(reloc->reloc_type); + scatdst->r_value = reloc->pair_target; + scatdst->r_scattered = 1; + break; + case KXLD_TARGET_SECTNUM: + dst->r_address = reloc->pair_address; + dst->r_pcrel = reloc->pcrel; + dst->r_length = reloc->length; + dst->r_type = relocator->reloc_get_pair_type(reloc->reloc_type); + dst->r_symbolnum = reloc->pair_target + 1; + dst->r_extern = 0; + break; + case KXLD_TARGET_SYMBOLNUM: + dst->r_address = reloc->pair_address; + dst->r_pcrel = reloc->pcrel; + dst->r_length = reloc->length; + dst->r_type = relocator->reloc_get_pair_type(reloc->reloc_type); + dst->r_symbolnum = 1; + dst->r_extern = 0; + break; + default: + rval = KERN_FAILURE; + goto finish; + } + ++dst; + } + } + + rval = KERN_SUCCESS; finish: - *dstp = dst; - return rval; + *dstp = dst; + return rval; } #endif /* KXLD_PIC_KEXTS */ -#if KXLD_USER_OR_I386 +#if KXLD_USER_OR_I386 /******************************************************************************* *******************************************************************************/ static boolean_t generic_reloc_has_pair(u_int _type) { - enum reloc_type_generic type = _type; + enum reloc_type_generic type = _type; - return (type == GENERIC_RELOC_SECTDIFF || - type == GENERIC_RELOC_LOCAL_SECTDIFF); + return type == GENERIC_RELOC_SECTDIFF || + type == GENERIC_RELOC_LOCAL_SECTDIFF; } /******************************************************************************* *******************************************************************************/ -static u_int +static u_int generic_reloc_get_pair_type(u_int _prev_type __unused) { - return GENERIC_RELOC_PAIR; + return GENERIC_RELOC_PAIR; } /******************************************************************************* *******************************************************************************/ -static boolean_t generic_reloc_has_got(u_int _type __unused) +static boolean_t +generic_reloc_has_got(u_int _type __unused) { - return FALSE; + return FALSE; } /******************************************************************************* *******************************************************************************/ -static kern_return_t -generic_process_reloc(const KXLDRelocator *relocator, u_char *instruction, - u_int length, u_int pcrel, kxld_addr_t _base_pc, kxld_addr_t _link_pc, - kxld_addr_t _link_disp __unused, u_int _type, kxld_addr_t _target, +static kern_return_t +generic_process_reloc(const KXLDRelocator *relocator, u_char *instruction, + u_int length, u_int pcrel, kxld_addr_t _base_pc, kxld_addr_t _link_pc, + kxld_addr_t _link_disp __unused, u_int _type, kxld_addr_t _target, kxld_addr_t _pair_target, boolean_t swap __unused) { - kern_return_t rval = KERN_FAILURE; - uint32_t base_pc = (uint32_t) _base_pc; - uint32_t link_pc = (uint32_t) _link_pc; - uint32_t *instr_addr = NULL; - uint32_t instr_data = 0; - uint32_t target = (uint32_t) _target; - uint32_t pair_target = (uint32_t) _pair_target; - enum reloc_type_generic type = _type; + kern_return_t rval = KERN_FAILURE; + uint32_t base_pc = (uint32_t) _base_pc; + uint32_t link_pc = (uint32_t) _link_pc; + uint32_t *instr_addr = NULL; + uint32_t instr_data = 0; + uint32_t target = (uint32_t) _target; + uint32_t pair_target = (uint32_t) _pair_target; + enum reloc_type_generic type = _type; - check(instruction); - require_action(length == 2, finish, rval=KERN_FAILURE); + check(instruction); + require_action(length == 2, finish, rval = KERN_FAILURE); - if (pcrel) target = target + base_pc - link_pc; + if (pcrel) { + target = target + base_pc - link_pc; + } - instr_addr = (uint32_t *) ((void *) instruction); - instr_data = *instr_addr; + instr_addr = (uint32_t *) ((void *) instruction); + instr_data = *instr_addr; #if !KERNEL - if (swap) instr_data = OSSwapInt32(instr_data); + if (swap) { + instr_data = OSSwapInt32(instr_data); + } #endif - rval = check_for_direct_pure_virtual_call(relocator, instr_data); - require_noerr(rval, finish); - - switch (type) { - case GENERIC_RELOC_VANILLA: - instr_data += target; - break; - case GENERIC_RELOC_SECTDIFF: - case GENERIC_RELOC_LOCAL_SECTDIFF: - instr_data = instr_data + target - pair_target; - break; - case GENERIC_RELOC_PB_LA_PTR: - rval = KERN_FAILURE; - goto finish; - case GENERIC_RELOC_PAIR: - default: - rval = KERN_FAILURE; - goto finish; - } + rval = check_for_direct_pure_virtual_call(relocator, instr_data); + require_noerr(rval, finish); + + switch (type) { + case GENERIC_RELOC_VANILLA: + instr_data += target; + break; + case GENERIC_RELOC_SECTDIFF: + case GENERIC_RELOC_LOCAL_SECTDIFF: + instr_data = instr_data + target - pair_target; + break; + case GENERIC_RELOC_PB_LA_PTR: + rval = KERN_FAILURE; + goto finish; + case GENERIC_RELOC_PAIR: + default: + rval = KERN_FAILURE; + goto finish; + } #if !KERNEL - if (swap) instr_data = OSSwapInt32(instr_data); + if (swap) { + instr_data = OSSwapInt32(instr_data); + } #endif - *instr_addr = instr_data; + *instr_addr = instr_data; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_I386 */ #if KXLD_USER_OR_X86_64 /******************************************************************************* *******************************************************************************/ -static boolean_t +static boolean_t x86_64_reloc_has_pair(u_int _type) { - enum reloc_type_x86_64 type = _type; + enum reloc_type_x86_64 type = _type; - return (type == X86_64_RELOC_SUBTRACTOR); + return type == X86_64_RELOC_SUBTRACTOR; } /******************************************************************************* *******************************************************************************/ -static u_int +static u_int x86_64_reloc_get_pair_type(u_int _prev_type __unused) { - return X86_64_RELOC_UNSIGNED; + return X86_64_RELOC_UNSIGNED; } /******************************************************************************* *******************************************************************************/ -static boolean_t +static boolean_t x86_64_reloc_has_got(u_int _type) { - enum reloc_type_x86_64 type = _type; + enum reloc_type_x86_64 type = _type; - return (type == X86_64_RELOC_GOT_LOAD || type == X86_64_RELOC_GOT); + return type == X86_64_RELOC_GOT_LOAD || type == X86_64_RELOC_GOT; } /******************************************************************************* *******************************************************************************/ -static kern_return_t -x86_64_process_reloc(const KXLDRelocator *relocator __unused, u_char *instruction, - u_int length, u_int pcrel, kxld_addr_t _base_pc __unused, - kxld_addr_t _link_pc, kxld_addr_t _link_disp, u_int _type, +static kern_return_t +x86_64_process_reloc(const KXLDRelocator *relocator __unused, u_char *instruction, + u_int length, u_int pcrel, kxld_addr_t _base_pc __unused, + kxld_addr_t _link_pc, kxld_addr_t _link_disp, u_int _type, kxld_addr_t _target, kxld_addr_t _pair_target, boolean_t swap __unused) { - kern_return_t rval = KERN_FAILURE; - enum reloc_type_x86_64 type = _type; - int32_t *instr32p = NULL; - int32_t instr32 = 0; - uint64_t *instr64p = NULL; - uint64_t instr64 = 0; - uint64_t target = _target; - uint64_t pair_target = _pair_target; - uint64_t link_pc = (uint64_t) _link_pc; - uint64_t link_disp = (uint64_t) _link_disp; - uint64_t adjustment = 0; - - check(instruction); - require_action(length == 2 || length == 3, - finish, rval=KERN_FAILURE); - - if (length == 2) { - instr32p = (int32_t *) ((void *) instruction); - instr32 = *instr32p; + kern_return_t rval = KERN_FAILURE; + enum reloc_type_x86_64 type = _type; + int32_t *instr32p = NULL; + int32_t instr32 = 0; + uint64_t *instr64p = NULL; + uint64_t instr64 = 0; + uint64_t target = _target; + uint64_t pair_target = _pair_target; + uint64_t link_pc = (uint64_t) _link_pc; + uint64_t link_disp = (uint64_t) _link_disp; + uint64_t adjustment = 0; + + check(instruction); + require_action(length == 2 || length == 3, + finish, rval = KERN_FAILURE); + + if (length == 2) { + instr32p = (int32_t *) ((void *) instruction); + instr32 = *instr32p; #if !KERNEL - if (swap) instr32 = OSSwapInt32(instr32); + if (swap) { + instr32 = OSSwapInt32(instr32); + } #endif - rval = check_for_direct_pure_virtual_call(relocator, instr32); - require_noerr(rval, finish); - - /* There are a number of different small adjustments for pc-relative - * relocation entries. The general case is to subtract the size of the - * relocation (represented by the length parameter), and it applies to - * the GOT types and external SIGNED types. The non-external signed types - * have a different adjustment corresponding to the specific type. - */ - switch (type) { - case X86_64_RELOC_SIGNED: - if (pair_target) { - adjustment = 0; - break; - } - /* Fall through */ - case X86_64_RELOC_SIGNED_1: - if (pair_target) { - adjustment = 1; - break; - } - /* Fall through */ - case X86_64_RELOC_SIGNED_2: - if (pair_target) { - adjustment = 2; - break; - } - /* Fall through */ - case X86_64_RELOC_SIGNED_4: - if (pair_target) { - adjustment = 4; - break; - } - /* Fall through */ - case X86_64_RELOC_BRANCH: - case X86_64_RELOC_GOT: - case X86_64_RELOC_GOT_LOAD: - adjustment = (1 << length); - break; - default: - break; - } - - /* Perform the actual relocation. All of the 32-bit relocations are - * pc-relative except for SUBTRACTOR, so a good chunk of the logic is - * stuck in calculate_displacement_x86_64. The signed relocations are - * a special case, because when they are non-external, the instruction - * already contains the pre-relocation displacement, so we only need to - * find the difference between how far the PC was relocated, and how - * far the target is relocated. Since the target variable already - * contains the difference between the target's base and link - * addresses, we add the difference between the PC's base and link - * addresses to the adjustment variable. This will yield the - * appropriate displacement in calculate_displacement. - */ - switch (type) { - case X86_64_RELOC_BRANCH: - require_action(pcrel, finish, rval=KERN_FAILURE); - adjustment += link_pc; - break; - case X86_64_RELOC_SIGNED: - case X86_64_RELOC_SIGNED_1: - case X86_64_RELOC_SIGNED_2: - case X86_64_RELOC_SIGNED_4: - require_action(pcrel, finish, rval=KERN_FAILURE); - adjustment += (pair_target) ? (link_disp) : (link_pc); - break; - case X86_64_RELOC_GOT: - case X86_64_RELOC_GOT_LOAD: - require_action(pcrel, finish, rval=KERN_FAILURE); - adjustment += link_pc; - target = pair_target; - break; - case X86_64_RELOC_SUBTRACTOR: - require_action(!pcrel, finish, rval=KERN_FAILURE); - instr32 = (int32_t) (target - pair_target); - break; - case X86_64_RELOC_UNSIGNED: - default: - rval = KERN_FAILURE; - goto finish; - } - - /* Call calculate_displacement for the pc-relative relocations */ - if (pcrel) { - rval = calculate_displacement_x86_64(target, adjustment, &instr32); - require_noerr(rval, finish); - } + rval = check_for_direct_pure_virtual_call(relocator, instr32); + require_noerr(rval, finish); + + /* There are a number of different small adjustments for pc-relative + * relocation entries. The general case is to subtract the size of the + * relocation (represented by the length parameter), and it applies to + * the GOT types and external SIGNED types. The non-external signed types + * have a different adjustment corresponding to the specific type. + */ + switch (type) { + case X86_64_RELOC_SIGNED: + if (pair_target) { + adjustment = 0; + break; + } + /* Fall through */ + case X86_64_RELOC_SIGNED_1: + if (pair_target) { + adjustment = 1; + break; + } + /* Fall through */ + case X86_64_RELOC_SIGNED_2: + if (pair_target) { + adjustment = 2; + break; + } + /* Fall through */ + case X86_64_RELOC_SIGNED_4: + if (pair_target) { + adjustment = 4; + break; + } + /* Fall through */ + case X86_64_RELOC_BRANCH: + case X86_64_RELOC_GOT: + case X86_64_RELOC_GOT_LOAD: + adjustment = (1 << length); + break; + default: + break; + } + + /* Perform the actual relocation. All of the 32-bit relocations are + * pc-relative except for SUBTRACTOR, so a good chunk of the logic is + * stuck in calculate_displacement_x86_64. The signed relocations are + * a special case, because when they are non-external, the instruction + * already contains the pre-relocation displacement, so we only need to + * find the difference between how far the PC was relocated, and how + * far the target is relocated. Since the target variable already + * contains the difference between the target's base and link + * addresses, we add the difference between the PC's base and link + * addresses to the adjustment variable. This will yield the + * appropriate displacement in calculate_displacement. + */ + switch (type) { + case X86_64_RELOC_BRANCH: + require_action(pcrel, finish, rval = KERN_FAILURE); + adjustment += link_pc; + break; + case X86_64_RELOC_SIGNED: + case X86_64_RELOC_SIGNED_1: + case X86_64_RELOC_SIGNED_2: + case X86_64_RELOC_SIGNED_4: + require_action(pcrel, finish, rval = KERN_FAILURE); + adjustment += (pair_target) ? (link_disp) : (link_pc); + break; + case X86_64_RELOC_GOT: + case X86_64_RELOC_GOT_LOAD: + require_action(pcrel, finish, rval = KERN_FAILURE); + adjustment += link_pc; + target = pair_target; + break; + case X86_64_RELOC_SUBTRACTOR: + require_action(!pcrel, finish, rval = KERN_FAILURE); + instr32 = (int32_t) (target - pair_target); + break; + case X86_64_RELOC_UNSIGNED: + default: + rval = KERN_FAILURE; + goto finish; + } + + /* Call calculate_displacement for the pc-relative relocations */ + if (pcrel) { + rval = calculate_displacement_x86_64(target, adjustment, &instr32); + require_noerr(rval, finish); + } #if !KERNEL - if (swap) instr32 = OSSwapInt32(instr32); + if (swap) { + instr32 = OSSwapInt32(instr32); + } #endif - - *instr32p = instr32; - } else { - instr64p = (uint64_t *) ((void *) instruction); - instr64 = *instr64p; + + *instr32p = instr32; + } else { + instr64p = (uint64_t *) ((void *) instruction); + instr64 = *instr64p; #if !KERNEL - if (swap) instr64 = OSSwapInt64(instr64); + if (swap) { + instr64 = OSSwapInt64(instr64); + } #endif - rval = check_for_direct_pure_virtual_call(relocator, (u_long) instr64); - require_noerr(rval, finish); - - switch (type) { - case X86_64_RELOC_UNSIGNED: - require_action(!pcrel, finish, rval=KERN_FAILURE); - - instr64 += target; - break; - case X86_64_RELOC_SUBTRACTOR: - require_action(!pcrel, finish, rval=KERN_FAILURE); - - instr64 = target - pair_target; - break; - case X86_64_RELOC_SIGNED_1: - case X86_64_RELOC_SIGNED_2: - case X86_64_RELOC_SIGNED_4: - case X86_64_RELOC_GOT_LOAD: - case X86_64_RELOC_BRANCH: - case X86_64_RELOC_SIGNED: - case X86_64_RELOC_GOT: - default: - rval = KERN_FAILURE; - goto finish; - } + rval = check_for_direct_pure_virtual_call(relocator, (u_long) instr64); + require_noerr(rval, finish); + + switch (type) { + case X86_64_RELOC_UNSIGNED: + require_action(!pcrel, finish, rval = KERN_FAILURE); + + instr64 += target; + break; + case X86_64_RELOC_SUBTRACTOR: + require_action(!pcrel, finish, rval = KERN_FAILURE); + + instr64 = target - pair_target; + break; + case X86_64_RELOC_SIGNED_1: + case X86_64_RELOC_SIGNED_2: + case X86_64_RELOC_SIGNED_4: + case X86_64_RELOC_GOT_LOAD: + case X86_64_RELOC_BRANCH: + case X86_64_RELOC_SIGNED: + case X86_64_RELOC_GOT: + default: + rval = KERN_FAILURE; + goto finish; + } #if !KERNEL - if (swap) instr64 = OSSwapInt64(instr64); + if (swap) { + instr64 = OSSwapInt64(instr64); + } #endif - *instr64p = instr64; - } + *instr64p = instr64; + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ static kern_return_t -calculate_displacement_x86_64(uint64_t target, uint64_t adjustment, +calculate_displacement_x86_64(uint64_t target, uint64_t adjustment, int32_t *instr32) { - kern_return_t rval = KERN_FAILURE; - int64_t displacement; - uint64_t difference; + kern_return_t rval = KERN_FAILURE; + int64_t displacement; + uint64_t difference; - displacement = *instr32 + target - adjustment; - difference = ABSOLUTE_VALUE(displacement); - require_action(difference < X86_64_RIP_RELATIVE_LIMIT, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogRelocationOverflow)); + displacement = *instr32 + target - adjustment; + difference = ABSOLUTE_VALUE(displacement); + require_action(difference < X86_64_RIP_RELATIVE_LIMIT, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogRelocationOverflow)); - *instr32 = (int32_t) displacement; - rval = KERN_SUCCESS; + *instr32 = (int32_t) displacement; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_X86_64 */ #if KXLD_USER_OR_ARM /******************************************************************************* *******************************************************************************/ -static boolean_t +static boolean_t arm_reloc_has_pair(u_int _type) { - enum reloc_type_arm type = _type; - - switch(type) { - case ARM_RELOC_SECTDIFF: - return TRUE; - default: - return FALSE; - } - return FALSE; + enum reloc_type_arm type = _type; + + switch (type) { + case ARM_RELOC_SECTDIFF: + return TRUE; + default: + return FALSE; + } + return FALSE; } /******************************************************************************* *******************************************************************************/ -static u_int +static u_int arm_reloc_get_pair_type(u_int _prev_type __unused) { - return ARM_RELOC_PAIR; + return ARM_RELOC_PAIR; } /******************************************************************************* *******************************************************************************/ -static boolean_t +static boolean_t arm_reloc_has_got(u_int _type __unused) { - return FALSE; + return FALSE; } /******************************************************************************* *******************************************************************************/ -static kern_return_t -arm_process_reloc(const KXLDRelocator *relocator __unused, u_char *instruction, - u_int length, u_int pcrel, kxld_addr_t _base_pc __unused, +static kern_return_t +arm_process_reloc(const KXLDRelocator *relocator __unused, u_char *instruction, + u_int length, u_int pcrel, kxld_addr_t _base_pc __unused, kxld_addr_t _link_pc __unused, kxld_addr_t _link_disp __unused, - u_int _type __unused, kxld_addr_t _target __unused, - kxld_addr_t _pair_target __unused, boolean_t swap __unused) + u_int _type __unused, kxld_addr_t _target __unused, + kxld_addr_t _pair_target __unused, boolean_t swap __unused) { - kern_return_t rval = KERN_FAILURE; - uint32_t *instr_addr = NULL; - uint32_t instr_data = 0; - uint32_t base_pc = (uint32_t) _base_pc; - uint32_t link_pc = (uint32_t) _link_pc; - uint32_t target = (uint32_t) _target; - int32_t displacement = 0; - enum reloc_type_arm type = _type; - - check(instruction); - require_action(length == 2, finish, rval=KERN_FAILURE); - - if (pcrel) displacement = target + base_pc - link_pc; - - instr_addr = (uint32_t *) ((void *) instruction); - instr_data = *instr_addr; - + kern_return_t rval = KERN_FAILURE; + uint32_t *instr_addr = NULL; + uint32_t instr_data = 0; + uint32_t base_pc = (uint32_t) _base_pc; + uint32_t link_pc = (uint32_t) _link_pc; + uint32_t target = (uint32_t) _target; + int32_t displacement = 0; + enum reloc_type_arm type = _type; + + check(instruction); + require_action(length == 2, finish, rval = KERN_FAILURE); + + if (pcrel) { + displacement = target + base_pc - link_pc; + } + + instr_addr = (uint32_t *) ((void *) instruction); + instr_data = *instr_addr; + #if !KERNEL - if (swap) instr_data = OSSwapInt32(instr_data); + if (swap) { + instr_data = OSSwapInt32(instr_data); + } #endif - rval = check_for_direct_pure_virtual_call(relocator, instr_data); - require_noerr(rval, finish); - - switch (type) { - case ARM_RELOC_VANILLA: - instr_data += target; - break; - - /* - * If the displacement is 0 (the offset between the pc and the target has - * not changed), then we don't need to do anything for BR24 and BR22 - * relocs. As it turns out, because kexts build with -mlong-calls all - * relocations currently end up being either vanilla (handled above) or - * BR22/BR24 with a displacement of 0. - * We could handle other displacements here but to keep things simple, we - * won't until it is needed (at which point the kernelcache will fail to - * link) - */ - case ARM_RELOC_BR24: - require_action(pcrel, finish, rval=KERN_FAILURE); - require_action(displacement == 0, finish, rval=KERN_FAILURE); - break; - case ARM_THUMB_RELOC_BR22: - require_action(pcrel, finish, rval=KERN_FAILURE); - require_action(displacement == 0, finish, rval=KERN_FAILURE); - break; - - case ARM_RELOC_SECTDIFF: - case ARM_RELOC_LOCAL_SECTDIFF: - case ARM_RELOC_PB_LA_PTR: - rval = KERN_FAILURE; - goto finish; - - case ARM_RELOC_PAIR: - default: - rval = KERN_FAILURE; - goto finish; - } + rval = check_for_direct_pure_virtual_call(relocator, instr_data); + require_noerr(rval, finish); + + switch (type) { + case ARM_RELOC_VANILLA: + instr_data += target; + break; + + /* + * If the displacement is 0 (the offset between the pc and the target has + * not changed), then we don't need to do anything for BR24 and BR22 + * relocs. As it turns out, because kexts build with -mlong-calls all + * relocations currently end up being either vanilla (handled above) or + * BR22/BR24 with a displacement of 0. + * We could handle other displacements here but to keep things simple, we + * won't until it is needed (at which point the kernelcache will fail to + * link) + */ + case ARM_RELOC_BR24: + require_action(pcrel, finish, rval = KERN_FAILURE); + require_action(displacement == 0, finish, rval = KERN_FAILURE); + break; + case ARM_THUMB_RELOC_BR22: + require_action(pcrel, finish, rval = KERN_FAILURE); + require_action(displacement == 0, finish, rval = KERN_FAILURE); + break; + + case ARM_RELOC_SECTDIFF: + case ARM_RELOC_LOCAL_SECTDIFF: + case ARM_RELOC_PB_LA_PTR: + rval = KERN_FAILURE; + goto finish; + + case ARM_RELOC_PAIR: + default: + rval = KERN_FAILURE; + goto finish; + } #if !KERNEL - if (swap) instr_data = OSSwapInt32(instr_data); + if (swap) { + instr_data = OSSwapInt32(instr_data); + } #endif - *instr_addr = instr_data; + *instr_addr = instr_data; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ARM */ @@ -1639,7 +1665,7 @@ finish: boolean_t arm64_reloc_has_pair(u_int _type) { - return (_type == ARM64_RELOC_SUBTRACTOR); + return _type == ARM64_RELOC_SUBTRACTOR; } /******************************************************************************* @@ -1647,11 +1673,11 @@ arm64_reloc_has_pair(u_int _type) u_int arm64_reloc_get_pair_type(u_int _prev_type __unused) { - if (_prev_type == ARM64_RELOC_SUBTRACTOR) { - return ARM64_RELOC_UNSIGNED; - } else { - return -1u; - } + if (_prev_type == ARM64_RELOC_SUBTRACTOR) { + return ARM64_RELOC_UNSIGNED; + } else { + return -1u; + } } /******************************************************************************* @@ -1659,8 +1685,8 @@ arm64_reloc_get_pair_type(u_int _prev_type __unused) boolean_t arm64_reloc_has_got(u_int _type) { - return (_type == ARM64_RELOC_GOT_LOAD_PAGE21 || - _type == ARM64_RELOC_GOT_LOAD_PAGEOFF12); + return _type == ARM64_RELOC_GOT_LOAD_PAGE21 || + _type == ARM64_RELOC_GOT_LOAD_PAGEOFF12; } /******************************************************************************* @@ -1671,77 +1697,85 @@ arm64_process_reloc(const KXLDRelocator *relocator __unused, u_char *instruction kxld_addr_t _link_disp __unused, u_int _type, kxld_addr_t _target, kxld_addr_t _pair_target __unused, boolean_t swap) { - kern_return_t rval = KERN_FAILURE; - enum reloc_type_arm64 type = _type; - uint64_t target = _target; - uint64_t link_pc = (uint64_t) _link_pc; - uint64_t difference = 0; - int64_t displacement = 0; - uint32_t addend = 0; + kern_return_t rval = KERN_FAILURE; + enum reloc_type_arm64 type = _type; + uint64_t target = _target; + uint64_t link_pc = (uint64_t) _link_pc; + uint64_t difference = 0; + int64_t displacement = 0; + uint32_t addend = 0; - check(instruction); - require_action((length == 2 || length == 3), finish, rval=KERN_FAILURE); + check(instruction); + require_action((length == 2 || length == 3), finish, rval = KERN_FAILURE); - if (length == 2) { - uint32_t *instr32p = (uint32_t *) (void *) instruction; - uint32_t instr32 = *instr32p; + if (length == 2) { + uint32_t *instr32p = (uint32_t *) (void *) instruction; + uint32_t instr32 = *instr32p; #if !KERNEL - if (swap) instr32 = OSSwapInt32(instr32); + if (swap) { + instr32 = OSSwapInt32(instr32); + } #endif - switch (type) { - case ARM64_RELOC_BRANCH26: - require_action(pcrel, finish, rval=KERN_FAILURE); - addend = (instr32 & 0x03FFFFFF) << 2; - addend = SIGN_EXTEND(addend, 27); - displacement = (target - link_pc + addend); - difference = ABSOLUTE_VALUE(displacement); - displacement = (displacement >> 2); - require_action(difference < (128 * 1024 * 1024), finish, - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogRelocationOverflow)); - instr32 = (instr32 & 0xFC000000) | (displacement & 0x03FFFFFF); - break; - - default: - rval = KERN_FAILURE; - goto finish; - } + switch (type) { + case ARM64_RELOC_BRANCH26: + require_action(pcrel, finish, rval = KERN_FAILURE); + addend = (instr32 & 0x03FFFFFF) << 2; + addend = SIGN_EXTEND(addend, 27); + displacement = (target - link_pc + addend); + difference = ABSOLUTE_VALUE(displacement); + displacement = (displacement >> 2); + require_action(difference < (128 * 1024 * 1024), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogRelocationOverflow)); + instr32 = (instr32 & 0xFC000000) | (displacement & 0x03FFFFFF); + break; + + default: + rval = KERN_FAILURE; + goto finish; + } #if !KERNEL - if (swap) instr32 = OSSwapInt32(instr32); + if (swap) { + instr32 = OSSwapInt32(instr32); + } #endif - - *instr32p = instr32; - } else { /* length == 3 */ - uint64_t *instr64p = (uint64_t *) (void *) instruction; - uint64_t instr64 = *instr64p; + + *instr32p = instr32; + } else { /* length == 3 */ + uint64_t *instr64p = (uint64_t *) (void *) instruction; + uint64_t instr64 = *instr64p; #if !KERNEL - if (swap) instr64 = OSSwapInt64(instr64); + if (swap) { + instr64 = OSSwapInt64(instr64); + } #endif - switch (type) { - case ARM64_RELOC_UNSIGNED: - require_action(!pcrel, finish, rval=KERN_FAILURE); - instr64 += target; - break; - default: - rval = KERN_FAILURE; - goto finish; - } + switch (type) { + case ARM64_RELOC_UNSIGNED: + require_action(!pcrel, finish, rval = KERN_FAILURE); + instr64 += target; + break; + default: + rval = KERN_FAILURE; + goto finish; + } #if !KERNEL - if (swap) instr64 = OSSwapInt64(instr64); + if (swap) { + instr64 = OSSwapInt64(instr64); + } #endif - - *instr64p = instr64; - } - rval = KERN_SUCCESS; + *instr64p = instr64; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } diff --git a/libkern/kxld/kxld_reloc.h b/libkern/kxld/kxld_reloc.h index c95d679fb..27539084d 100644 --- a/libkern/kxld/kxld_reloc.h +++ b/libkern/kxld/kxld_reloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_RELOC_H @@ -51,97 +51,97 @@ typedef struct kxld_reloc KXLDReloc; typedef boolean_t (*RelocHasPair)(u_int r_type); typedef u_int (*RelocGetPairType)(u_int prev_r_type); typedef boolean_t (*RelocHasGot)(u_int r_type); -typedef kern_return_t(*ProcessReloc)(const KXLDRelocator *relocator, - u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, - kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, +typedef kern_return_t (*ProcessReloc)(const KXLDRelocator *relocator, + u_char *instruction, u_int length, u_int pcrel, kxld_addr_t base_pc, + kxld_addr_t link_pc, kxld_addr_t link_disp, u_int type, kxld_addr_t target, kxld_addr_t pair_target, boolean_t swap); struct kxld_relocator { - RelocHasPair reloc_has_pair; - RelocGetPairType reloc_get_pair_type; - RelocHasGot reloc_has_got; - ProcessReloc process_reloc; - const struct kxld_symtab *symtab; - const struct kxld_array *sectarray; - const struct kxld_dict *vtables; - const struct kxld_vtable *current_vtable; - u_char *file; - u_int function_align; /* Power of two alignment of functions */ - boolean_t is_32_bit; - boolean_t swap; - boolean_t may_scatter; + RelocHasPair reloc_has_pair; + RelocGetPairType reloc_get_pair_type; + RelocHasGot reloc_has_got; + ProcessReloc process_reloc; + const struct kxld_symtab *symtab; + const struct kxld_array *sectarray; + const struct kxld_dict *vtables; + const struct kxld_vtable *current_vtable; + u_char *file; + u_int function_align; /* Power of two alignment of functions */ + boolean_t is_32_bit; + boolean_t swap; + boolean_t may_scatter; }; struct kxld_reloc { - u_int address; - u_int pair_address; - u_int target; - u_int pair_target; - u_int target_type:3; - u_int pair_target_type:3; - u_int reloc_type:4; - u_int length:2; - u_int pcrel:1; + u_int address; + u_int pair_address; + u_int target; + u_int pair_target; + u_int target_type:3; + u_int pair_target_type:3; + u_int reloc_type:4; + u_int length:2; + u_int pcrel:1; }; /******************************************************************************* * Constructors and Destructors *******************************************************************************/ kern_return_t kxld_relocator_init(KXLDRelocator *relocator, u_char *file, - const struct kxld_symtab *symtab, const struct kxld_array *sectarray, + const struct kxld_symtab *symtab, const struct kxld_array *sectarray, cpu_type_t cputype, cpu_subtype_t cpusubtype, boolean_t swap) - __attribute__((nonnull,visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_reloc_create_macho(struct kxld_array *relocarray, - const KXLDRelocator *relocator, const struct relocation_info *srcs, + const KXLDRelocator *relocator, const struct relocation_info *srcs, u_int nsrcs) __attribute__((nonnull, visibility("hidden"))); void kxld_relocator_clear(KXLDRelocator *relocator) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ boolean_t kxld_relocator_has_pair(const KXLDRelocator *relocator, u_int r_type) - __attribute__((pure, nonnull,visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); u_int kxld_relocator_get_pair_type(const KXLDRelocator *relocator, u_int last_r_type) - __attribute__((pure, nonnull,visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_relocator_has_got(const KXLDRelocator *relocator, u_int r_type) - __attribute__((pure, nonnull,visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); kxld_addr_t kxld_relocator_get_pointer_at_addr(const KXLDRelocator *relocator, const u_char *data, u_long offset) - __attribute__((pure, nonnull,visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); struct kxld_sym * kxld_reloc_get_symbol(const KXLDRelocator *relocator, const KXLDReloc *reloc, const u_char *data) - __attribute__((pure, nonnull(1,2), visibility("hidden"))); +__attribute__((pure, nonnull(1, 2), visibility("hidden"))); -kern_return_t kxld_reloc_get_reloc_index_by_offset(const struct kxld_array *relocs, +kern_return_t kxld_reloc_get_reloc_index_by_offset(const struct kxld_array *relocs, kxld_size_t offset, u_int *idx) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -KXLDReloc * kxld_reloc_get_reloc_by_offset(const struct kxld_array *relocs, +KXLDReloc * kxld_reloc_get_reloc_by_offset(const struct kxld_array *relocs, kxld_addr_t offset) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #if KXLD_PIC_KEXTS u_long kxld_reloc_get_macho_header_size(void) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); u_long kxld_reloc_get_macho_data_size(const struct kxld_array *locrelocs, const struct kxld_array *extrelocs) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); kern_return_t kxld_reloc_export_macho(const KXLDRelocator *relocator, const struct kxld_array *locrelocs, const struct kxld_array *extrelocs, - u_char *buf, u_long *header_offset, u_long header_size, + u_char *buf, u_long *header_offset, u_long header_size, u_long *data_offset, u_long size) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_PIC_KEXTS */ /******************************************************************************* @@ -149,22 +149,20 @@ kern_return_t kxld_reloc_export_macho(const KXLDRelocator *relocator, *******************************************************************************/ kern_return_t kxld_reloc_update_symindex(KXLDReloc *reloc, u_int symindex) - __attribute__((nonnull,visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -void kxld_relocator_set_vtables(KXLDRelocator *relocator, +void kxld_relocator_set_vtables(KXLDRelocator *relocator, const struct kxld_dict *vtables) - __attribute__((nonnull,visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_relocator_process_sect_reloc(KXLDRelocator *relocator, const KXLDReloc *reloc, const struct kxld_sect *sect) - __attribute__((nonnull,visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_relocator_process_table_reloc(KXLDRelocator *relocator, - const KXLDReloc *reloc, - const struct kxld_seg *seg, - kxld_addr_t link_addr) - __attribute__((nonnull,visibility("hidden"))); + const KXLDReloc *reloc, + const struct kxld_seg *seg, + kxld_addr_t link_addr) +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_RELOC_H */ - - diff --git a/libkern/kxld/kxld_sect.c b/libkern/kxld/kxld_sect.c index 4a41a6844..5ed66111e 100644 --- a/libkern/kxld/kxld_sect.c +++ b/libkern/kxld/kxld_sect.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -39,14 +39,14 @@ #include "kxld_symtab.h" #include "kxld_util.h" -static kern_return_t export_macho(const KXLDSect *sect, u_char *buf, u_long offset, +static kern_return_t export_macho(const KXLDSect *sect, u_char *buf, u_long offset, u_long bufsize); #if KXLD_USER_OR_ILP32 -static kern_return_t sect_export_macho_header_32(const KXLDSect *sect, u_char *buf, +static kern_return_t sect_export_macho_header_32(const KXLDSect *sect, u_char *buf, u_long *header_offset, u_long header_size, u_long data_offset); #endif #if KXLD_USER_OR_LP64 -static kern_return_t sect_export_macho_header_64(const KXLDSect *sect, u_char *buf, +static kern_return_t sect_export_macho_header_64(const KXLDSect *sect, u_char *buf, u_long *header_offset, u_long header_size, u_long data_offset); #endif extern boolean_t isSplitKext; @@ -58,44 +58,46 @@ kern_return_t kxld_sect_init_from_macho_32(KXLDSect *sect, u_char *macho, u_long *sect_offset, u_int sectnum, const KXLDRelocator *relocator) { - kern_return_t rval = KERN_FAILURE; - struct section *src = (struct section *) ((void *) (macho + *sect_offset)); - struct relocation_info *relocs = NULL; - - check(sect); - check(macho); - check(src); - - strlcpy(sect->segname, src->segname, sizeof(sect->segname)); - strlcpy(sect->sectname, src->sectname, sizeof(sect->sectname)); - sect->base_addr = src->addr; - sect->link_addr = src->addr; - sect->size = src->size; - sect->sectnum = sectnum; - sect->flags = src->flags; - sect->align = src->align; - sect->reserved1 = src->reserved1; - sect->reserved2 = src->reserved2; - - if (src->offset) { - sect->data = macho + src->offset; - } else { - sect->data = NULL; - } - - relocs = (struct relocation_info *) ((void *) (macho + src->reloff)); - - rval = kxld_reloc_create_macho(§->relocs, relocator, - relocs, src->nreloc); - require_noerr(rval, finish); - - *sect_offset += sizeof(*src); - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + struct section *src = (struct section *) ((void *) (macho + *sect_offset)); + struct relocation_info *relocs = NULL; + + check(sect); + check(macho); + check(src); + + strlcpy(sect->segname, src->segname, sizeof(sect->segname)); + strlcpy(sect->sectname, src->sectname, sizeof(sect->sectname)); + sect->base_addr = src->addr; + sect->link_addr = src->addr; + sect->size = src->size; + sect->sectnum = sectnum; + sect->flags = src->flags; + sect->align = src->align; + sect->reserved1 = src->reserved1; + sect->reserved2 = src->reserved2; + + if (src->offset) { + sect->data = macho + src->offset; + } else { + sect->data = NULL; + } + + relocs = (struct relocation_info *) ((void *) (macho + src->reloff)); + + rval = kxld_reloc_create_macho(§->relocs, relocator, + relocs, src->nreloc); + require_noerr(rval, finish); + + *sect_offset += sizeof(*src); + rval = KERN_SUCCESS; finish: - if (rval) kxld_sect_deinit(sect); + if (rval) { + kxld_sect_deinit(sect); + } - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -106,44 +108,46 @@ kern_return_t kxld_sect_init_from_macho_64(KXLDSect *sect, u_char *macho, u_long *sect_offset, u_int sectnum, const KXLDRelocator *relocator) { - kern_return_t rval = KERN_FAILURE; - struct section_64 *src = (struct section_64 *) ((void *) (macho + *sect_offset)); - struct relocation_info *relocs = NULL; - - check(sect); - check(macho); - check(src); - - strlcpy(sect->segname, src->segname, sizeof(sect->segname)); - strlcpy(sect->sectname, src->sectname, sizeof(sect->sectname)); - sect->base_addr = src->addr; - sect->link_addr = src->addr; - sect->size = src->size; - sect->sectnum = sectnum; - sect->flags = src->flags; - sect->align = src->align; - sect->reserved1 = src->reserved1; - sect->reserved2 = src->reserved2; - - if (src->offset) { - sect->data = macho + src->offset; - } else { - sect->data = NULL; - } - - relocs = (struct relocation_info *) ((void *) (macho + src->reloff)); - - rval = kxld_reloc_create_macho(§->relocs, relocator, - relocs, src->nreloc); - require_noerr(rval, finish); - - *sect_offset += sizeof(*src); - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + struct section_64 *src = (struct section_64 *) ((void *) (macho + *sect_offset)); + struct relocation_info *relocs = NULL; + + check(sect); + check(macho); + check(src); + + strlcpy(sect->segname, src->segname, sizeof(sect->segname)); + strlcpy(sect->sectname, src->sectname, sizeof(sect->sectname)); + sect->base_addr = src->addr; + sect->link_addr = src->addr; + sect->size = src->size; + sect->sectnum = sectnum; + sect->flags = src->flags; + sect->align = src->align; + sect->reserved1 = src->reserved1; + sect->reserved2 = src->reserved2; + + if (src->offset) { + sect->data = macho + src->offset; + } else { + sect->data = NULL; + } + + relocs = (struct relocation_info *) ((void *) (macho + src->reloff)); + + rval = kxld_reloc_create_macho(§->relocs, relocator, + relocs, src->nreloc); + require_noerr(rval, finish); + + *sect_offset += sizeof(*src); + rval = KERN_SUCCESS; finish: - if (rval) kxld_sect_deinit(sect); + if (rval) { + kxld_sect_deinit(sect); + } - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ @@ -154,29 +158,29 @@ finish: kern_return_t kxld_sect_init_got(KXLDSect *sect, u_int ngots) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(sect); + check(sect); - strlcpy(sect->segname, KXLD_SEG_GOT, sizeof(sect->segname)); - strlcpy(sect->sectname, KXLD_SECT_GOT, sizeof(sect->sectname)); - sect->base_addr = 0; - sect->link_addr = 0; - sect->flags = 0; - sect->align = 4; - sect->reserved1 = 0; - sect->reserved2 = 0; + strlcpy(sect->segname, KXLD_SEG_GOT, sizeof(sect->segname)); + strlcpy(sect->sectname, KXLD_SECT_GOT, sizeof(sect->sectname)); + sect->base_addr = 0; + sect->link_addr = 0; + sect->flags = 0; + sect->align = 4; + sect->reserved1 = 0; + sect->reserved2 = 0; - sect->size = ngots * sizeof(kxld_addr_t); - sect->data = kxld_alloc((u_long) sect->size); - require_action(sect->data, finish, rval=KERN_RESOURCE_SHORTAGE); + sect->size = ngots * sizeof(kxld_addr_t); + sect->data = kxld_alloc((u_long) sect->size); + require_action(sect->data, finish, rval = KERN_RESOURCE_SHORTAGE); - sect->allocated = TRUE; + sect->allocated = TRUE; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_GOT */ @@ -184,20 +188,20 @@ finish: /******************************************************************************* *******************************************************************************/ void -kxld_sect_init_zerofill(KXLDSect *sect, const char *segname, +kxld_sect_init_zerofill(KXLDSect *sect, const char *segname, const char *sectname, kxld_size_t size, u_int align) { - check(sect); - check(segname); - check(sectname); - - strlcpy(sect->segname, segname, sizeof(sect->segname)); - strlcpy(sect->sectname, sectname, sizeof(sect->sectname)); - sect->size = size; - sect->align = align; - sect->base_addr = 0; - sect->link_addr = 0; - sect->flags = S_ZEROFILL; + check(sect); + check(segname); + check(sectname); + + strlcpy(sect->segname, segname, sizeof(sect->segname)); + strlcpy(sect->sectname, sectname, sizeof(sect->sectname)); + sect->size = size; + sect->align = align; + sect->base_addr = 0; + sect->link_addr = 0; + sect->flags = S_ZEROFILL; } #endif /* KXLD_USER_OR_COMMON */ @@ -206,24 +210,24 @@ kxld_sect_init_zerofill(KXLDSect *sect, const char *segname, void kxld_sect_clear(KXLDSect *sect) { - check(sect); - - if (sect->allocated) { - kxld_free(sect->data, (u_long) sect->size); - sect->allocated = FALSE; - } - - bzero(sect->sectname, sizeof(sect->sectname)); - bzero(sect->segname, sizeof(sect->segname)); - sect->data = NULL; - sect->base_addr = 0; - sect->link_addr = 0; - sect->size = 0; - sect->flags = 0; - sect->align = 0; - sect->reserved1 = 0; - sect->reserved2 = 0; - kxld_array_clear(§->relocs); + check(sect); + + if (sect->allocated) { + kxld_free(sect->data, (u_long) sect->size); + sect->allocated = FALSE; + } + + bzero(sect->sectname, sizeof(sect->sectname)); + bzero(sect->segname, sizeof(sect->segname)); + sect->data = NULL; + sect->base_addr = 0; + sect->link_addr = 0; + sect->size = 0; + sect->flags = 0; + sect->align = 0; + sect->reserved1 = 0; + sect->reserved2 = 0; + kxld_array_clear(§->relocs); } /******************************************************************************* @@ -231,24 +235,24 @@ kxld_sect_clear(KXLDSect *sect) void kxld_sect_deinit(KXLDSect *sect) { - check(sect); + check(sect); - if (streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT))) { - kxld_free(sect->data, (u_long) sect->size); - } + if (streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT))) { + kxld_free(sect->data, (u_long) sect->size); + } - kxld_array_deinit(§->relocs); - bzero(sect, sizeof(*sect)); + kxld_array_deinit(§->relocs); + bzero(sect, sizeof(*sect)); } /******************************************************************************* *******************************************************************************/ -u_int +u_int kxld_sect_get_num_relocs(const KXLDSect *sect) { - check(sect); + check(sect); - return sect->relocs.nitems; + return sect->relocs.nitems; } /******************************************************************************* @@ -256,11 +260,11 @@ kxld_sect_get_num_relocs(const KXLDSect *sect) u_long kxld_sect_get_macho_header_size(boolean_t is_32_bit) { - if (is_32_bit) { - return sizeof(struct section); - } else { - return sizeof(struct section_64); - } + if (is_32_bit) { + return sizeof(struct section); + } else { + return sizeof(struct section_64); + } } /******************************************************************************* @@ -268,15 +272,15 @@ kxld_sect_get_macho_header_size(boolean_t is_32_bit) u_long kxld_sect_get_macho_data_size(const KXLDSect *sect) { - u_long size = 0; + u_long size = 0; - check(sect); + check(sect); - if (sect->data) { - size = (u_long) sect->size; - } + if (sect->data) { + size = (u_long) sect->size; + } - return size; + return size; } #if KXLD_USER_OR_GOT @@ -286,115 +290,115 @@ u_int kxld_sect_get_ngots(const KXLDSect *sect, const KXLDRelocator *relocator, const KXLDSymtab *symtab) { - const KXLDReloc *reloc = NULL; - KXLDSym *sym = NULL; - u_int ngots = 0; - u_int i = 0; - - for (i = 0; i < sect->relocs.nitems; ++i) { - reloc = kxld_array_get_item(§->relocs, i); - - if (relocator->reloc_has_got(reloc->reloc_type)) { - /* @TODO This assumes 64-bit symbols (which is valid at the - * moment since only x86_64 has a GOT) - */ - sym = kxld_reloc_get_symbol(relocator, reloc, sect->data, symtab); - if (!kxld_sym_is_got(sym)) { - kxld_sym_set_got(sym); - ++ngots; - } - } - } - - return ngots; + const KXLDReloc *reloc = NULL; + KXLDSym *sym = NULL; + u_int ngots = 0; + u_int i = 0; + + for (i = 0; i < sect->relocs.nitems; ++i) { + reloc = kxld_array_get_item(§->relocs, i); + + if (relocator->reloc_has_got(reloc->reloc_type)) { + /* @TODO This assumes 64-bit symbols (which is valid at the + * moment since only x86_64 has a GOT) + */ + sym = kxld_reloc_get_symbol(relocator, reloc, sect->data, symtab); + if (!kxld_sym_is_got(sym)) { + kxld_sym_set_got(sym); + ++ngots; + } + } + } + + return ngots; } #endif /* KXLD_USER_OR_GOT */ /******************************************************************************* -* Each section must be aligned at a certain power of two. To figure out that -* alignment, we mask for the low bits that may need to be adjusted. If they are -* non zero, we then subtract them from the target alignment to find the offset, +* Each section must be aligned at a certain power of two. To figure out that +* alignment, we mask for the low bits that may need to be adjusted. If they are +* non zero, we then subtract them from the target alignment to find the offset, * and then add that offset to the link address. *******************************************************************************/ kxld_addr_t kxld_sect_align_address(const KXLDSect *sect, kxld_addr_t address) { - return kxld_align_address(address, sect->align); + return kxld_align_address(address, sect->align); } /******************************************************************************* *******************************************************************************/ kern_return_t kxld_sect_export_macho_to_file_buffer(const KXLDSect *sect, u_char *buf, - u_long *header_offset, u_long header_size, u_long *data_offset, + u_long *header_offset, u_long header_size, u_long *data_offset, u_long data_size, boolean_t is_32_bit __unused) { - kern_return_t rval = KERN_FAILURE; - - check(sect); - check(buf); - check(header_offset); - check(data_offset); - - /* If there is no data to export, we only need to write the header. We - * make it a separate call so that we don't modify data_offset. - */ - if (!sect->data) { - KXLD_3264_FUNC(is_32_bit, rval, - sect_export_macho_header_32, sect_export_macho_header_64, - sect, buf, header_offset, header_size, /* data_offset */ 0); - require_noerr(rval, finish); - } else { - *data_offset = (u_long) kxld_sect_align_address(sect, *data_offset); - - KXLD_3264_FUNC(is_32_bit, rval, - sect_export_macho_header_32, sect_export_macho_header_64, - sect, buf, header_offset, header_size, *data_offset); - require_noerr(rval, finish); - - rval = export_macho(sect, buf, *data_offset, data_size); - require_noerr(rval, finish); - - *data_offset += (u_long) sect->size; - } - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + + check(sect); + check(buf); + check(header_offset); + check(data_offset); + + /* If there is no data to export, we only need to write the header. We + * make it a separate call so that we don't modify data_offset. + */ + if (!sect->data) { + KXLD_3264_FUNC(is_32_bit, rval, + sect_export_macho_header_32, sect_export_macho_header_64, + sect, buf, header_offset, header_size, /* data_offset */ 0); + require_noerr(rval, finish); + } else { + *data_offset = (u_long) kxld_sect_align_address(sect, *data_offset); + + KXLD_3264_FUNC(is_32_bit, rval, + sect_export_macho_header_32, sect_export_macho_header_64, + sect, buf, header_offset, header_size, *data_offset); + require_noerr(rval, finish); + + rval = export_macho(sect, buf, *data_offset, data_size); + require_noerr(rval, finish); + + *data_offset += (u_long) sect->size; + } + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ kern_return_t kxld_sect_export_macho_to_vm(const KXLDSect *sect, - u_char *buf, - u_long *header_offset, - u_long header_size, - kxld_addr_t link_addr, - u_long data_size, - boolean_t is_32_bit __unused) + u_char *buf, + u_long *header_offset, + u_long header_size, + kxld_addr_t link_addr, + u_long data_size, + boolean_t is_32_bit __unused) { - kern_return_t rval = KERN_FAILURE; - u_long data_offset; - - check(sect); - check(buf); - check(header_offset); - - data_offset = (u_long) (sect->link_addr - link_addr); - - KXLD_3264_FUNC(is_32_bit, rval, - sect_export_macho_header_32, sect_export_macho_header_64, - sect, buf, header_offset, header_size, data_offset); - require_noerr(rval, finish); - - rval = export_macho(sect, buf, data_offset, data_size); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; - + kern_return_t rval = KERN_FAILURE; + u_long data_offset; + + check(sect); + check(buf); + check(header_offset); + + data_offset = (u_long) (sect->link_addr - link_addr); + + KXLD_3264_FUNC(is_32_bit, rval, + sect_export_macho_header_32, sect_export_macho_header_64, + sect, buf, header_offset, header_size, data_offset); + require_noerr(rval, finish); + + rval = export_macho(sect, buf, data_offset, data_size); + require_noerr(rval, finish); + + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -402,144 +406,144 @@ finish: static kern_return_t export_macho(const KXLDSect *sect, u_char *buf, u_long offset, u_long bufsize) { - kern_return_t rval = KERN_FAILURE; - - check(sect); - check(buf); - - if (!sect->data) { - rval = KERN_SUCCESS; - goto finish; - } - - if (!isSplitKext) { - /* Verify that the section is properly aligned */ - if (kxld_sect_align_address(sect, offset) != offset) { - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Alignment error: %llu != %lu for %s %s <%s>", - kxld_sect_align_address(sect, offset), offset, - sect->segname, sect->sectname, __func__); - goto finish; - } - } - - /* Verify that we have enough space to copy */ - if (buf + offset + sect->size > buf + bufsize) { - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Overflow: offset %lu + sect->size %llu > bufsize %lu for %s %s", - offset, sect->size, bufsize, - sect->segname, sect->sectname); - goto finish; - } - - /* Copy section data */ - switch (sect->flags & SECTION_TYPE) { - case S_NON_LAZY_SYMBOL_POINTERS: - case S_MOD_INIT_FUNC_POINTERS: - case S_MOD_TERM_FUNC_POINTERS: - case S_REGULAR: - case S_CSTRING_LITERALS: - case S_4BYTE_LITERALS: - case S_8BYTE_LITERALS: - case S_LITERAL_POINTERS: - case S_COALESCED: - case S_16BYTE_LITERALS: - case S_SYMBOL_STUBS: + kern_return_t rval = KERN_FAILURE; + + check(sect); + check(buf); + + if (!sect->data) { + rval = KERN_SUCCESS; + goto finish; + } + + if (!isSplitKext) { + /* Verify that the section is properly aligned */ + if (kxld_sect_align_address(sect, offset) != offset) { + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Alignment error: %llu != %lu for %s %s <%s>", + kxld_sect_align_address(sect, offset), offset, + sect->segname, sect->sectname, __func__); + goto finish; + } + } + + /* Verify that we have enough space to copy */ + if (buf + offset + sect->size > buf + bufsize) { + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Overflow: offset %lu + sect->size %llu > bufsize %lu for %s %s", + offset, sect->size, bufsize, + sect->segname, sect->sectname); + goto finish; + } + + /* Copy section data */ + switch (sect->flags & SECTION_TYPE) { + case S_NON_LAZY_SYMBOL_POINTERS: + case S_MOD_INIT_FUNC_POINTERS: + case S_MOD_TERM_FUNC_POINTERS: + case S_REGULAR: + case S_CSTRING_LITERALS: + case S_4BYTE_LITERALS: + case S_8BYTE_LITERALS: + case S_LITERAL_POINTERS: + case S_COALESCED: + case S_16BYTE_LITERALS: + case S_SYMBOL_STUBS: #if SPLIT_KEXTS_DEBUG - kxld_log(kKxldLogLinking, kKxldLogErr, - " sectname %s copy from %p (sect->data) for %llu bytes (sect->size) to %p (buf %p + offset %lu <%s>", - sect->sectname[0] ? sect->sectname : "none", - (void *) sect->data, - sect->size, - (void *) (buf + offset), - (void *) buf, - offset, - __func__); - - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p >>> Start of %s section data (sect->size %llu) <%s>", - (void *) (buf + offset), - sect->sectname[0] ? sect->sectname : "none", - sect->size, - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p <<< End of %s section data <%s>", - (void *) (buf + offset + sect->size), - sect->sectname[0] ? sect->sectname : "none", - __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + " sectname %s copy from %p (sect->data) for %llu bytes (sect->size) to %p (buf %p + offset %lu <%s>", + sect->sectname[0] ? sect->sectname : "none", + (void *) sect->data, + sect->size, + (void *) (buf + offset), + (void *) buf, + offset, + __func__); + + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p >>> Start of %s section data (sect->size %llu) <%s>", + (void *) (buf + offset), + sect->sectname[0] ? sect->sectname : "none", + sect->size, + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p <<< End of %s section data <%s>", + (void *) (buf + offset + sect->size), + sect->sectname[0] ? sect->sectname : "none", + __func__); #endif - memcpy(buf + offset, sect->data, (size_t)sect->size); - break; - case S_ZEROFILL: /* sect->data should be NULL, so we'll never get here */ - case S_LAZY_SYMBOL_POINTERS: - case S_GB_ZEROFILL: - case S_INTERPOSING: - case S_DTRACE_DOF: - default: - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid section type: %u.", sect->flags & SECTION_TYPE); - goto finish; - } - - rval = KERN_SUCCESS; + memcpy(buf + offset, sect->data, (size_t)sect->size); + break; + case S_ZEROFILL: /* sect->data should be NULL, so we'll never get here */ + case S_LAZY_SYMBOL_POINTERS: + case S_GB_ZEROFILL: + case S_INTERPOSING: + case S_DTRACE_DOF: + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid section type: %u.", sect->flags & SECTION_TYPE); + goto finish; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_ILP32 /******************************************************************************* *******************************************************************************/ static kern_return_t -sect_export_macho_header_32(const KXLDSect *sect, u_char *buf, +sect_export_macho_header_32(const KXLDSect *sect, u_char *buf, u_long *header_offset, u_long header_size, u_long data_offset) { - kern_return_t rval = KERN_FAILURE; - struct section *secthdr = NULL; - - check(sect); - check(buf); - check(header_offset); - - require_action(sizeof(*secthdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - secthdr = (struct section *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*secthdr); - - /* Initalize header */ - - strlcpy(secthdr->sectname, sect->sectname, sizeof(secthdr->sectname)); - strlcpy(secthdr->segname, sect->segname, sizeof(secthdr->segname)); - secthdr->addr = (uint32_t) sect->link_addr; - secthdr->size = (uint32_t) sect->size; - secthdr->offset = (uint32_t) ((sect->data) ? data_offset : 0); - secthdr->align = sect->align; - secthdr->reloff = 0; - secthdr->nreloc = 0; - secthdr->flags = sect->flags; - secthdr->reserved1 = sect->reserved1; - secthdr->reserved2 = sect->reserved2; + kern_return_t rval = KERN_FAILURE; + struct section *secthdr = NULL; + + check(sect); + check(buf); + check(header_offset); + + require_action(sizeof(*secthdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + secthdr = (struct section *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*secthdr); + + /* Initalize header */ + + strlcpy(secthdr->sectname, sect->sectname, sizeof(secthdr->sectname)); + strlcpy(secthdr->segname, sect->segname, sizeof(secthdr->segname)); + secthdr->addr = (uint32_t) sect->link_addr; + secthdr->size = (uint32_t) sect->size; + secthdr->offset = (uint32_t) ((sect->data) ? data_offset : 0); + secthdr->align = sect->align; + secthdr->reloff = 0; + secthdr->nreloc = 0; + secthdr->flags = sect->flags; + secthdr->reserved1 = sect->reserved1; + secthdr->reserved2 = sect->reserved2; #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - "sectname %s secthdr: %p addr %p size %02X %u offset %02X %u <%s>", - sect->sectname[0] ? sect->sectname : "none", - (void *) secthdr, - (void *) ((uint64_t)secthdr->addr), - secthdr->size, - secthdr->size, - secthdr->offset, - secthdr->offset, - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + "sectname %s secthdr: %p addr %p size %02X %u offset %02X %u <%s>", + sect->sectname[0] ? sect->sectname : "none", + (void *) secthdr, + (void *) ((uint64_t)secthdr->addr), + secthdr->size, + secthdr->size, + secthdr->offset, + secthdr->offset, + __func__); + } #endif - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -547,61 +551,61 @@ finish: /******************************************************************************* *******************************************************************************/ static kern_return_t -sect_export_macho_header_64(const KXLDSect *sect, u_char *buf, +sect_export_macho_header_64(const KXLDSect *sect, u_char *buf, u_long *header_offset, u_long header_size, u_long data_offset) { - kern_return_t rval = KERN_FAILURE; - struct section_64 *secthdr = NULL; - - check(sect); - check(buf); - check(header_offset); - - - require_action(sizeof(*secthdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - secthdr = (struct section_64 *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*secthdr); - - /* Initalize header */ - - strlcpy(secthdr->sectname, sect->sectname, sizeof(secthdr->sectname)); - strlcpy(secthdr->segname, sect->segname, sizeof(secthdr->segname)); - secthdr->addr = (uint64_t) sect->link_addr; - secthdr->size = (uint64_t) sect->size; - secthdr->offset = (uint32_t) ((sect->data) ? data_offset : 0); - secthdr->align = sect->align; - secthdr->reloff = 0; - secthdr->nreloc = 0; - secthdr->flags = sect->flags; - secthdr->reserved1 = sect->reserved1; - secthdr->reserved2 = sect->reserved2; + kern_return_t rval = KERN_FAILURE; + struct section_64 *secthdr = NULL; + + check(sect); + check(buf); + check(header_offset); + + + require_action(sizeof(*secthdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + secthdr = (struct section_64 *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*secthdr); + + /* Initalize header */ + + strlcpy(secthdr->sectname, sect->sectname, sizeof(secthdr->sectname)); + strlcpy(secthdr->segname, sect->segname, sizeof(secthdr->segname)); + secthdr->addr = (uint64_t) sect->link_addr; + secthdr->size = (uint64_t) sect->size; + secthdr->offset = (uint32_t) ((sect->data) ? data_offset : 0); + secthdr->align = sect->align; + secthdr->reloff = 0; + secthdr->nreloc = 0; + secthdr->flags = sect->flags; + secthdr->reserved1 = sect->reserved1; + secthdr->reserved2 = sect->reserved2; #if SPLIT_KEXTS_DEBUG - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p >>> Start of %s secthdr (size %lu) <%s>", - (void *) secthdr, - sect->sectname[0] ? sect->sectname : "none", - sizeof(*secthdr), - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p <<< End of %s secthdr <%s>", - (void *) ((u_char *)secthdr + sizeof(*secthdr)), - sect->sectname[0] ? sect->sectname : "none", - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - " secthdr: addr %p size %llu offset %u sectname %s <%s>", - (void *) secthdr->addr, - secthdr->size, - secthdr->offset, - sect->sectname[0] ? sect->sectname : "none", - __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p >>> Start of %s secthdr (size %lu) <%s>", + (void *) secthdr, + sect->sectname[0] ? sect->sectname : "none", + sizeof(*secthdr), + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p <<< End of %s secthdr <%s>", + (void *) ((u_char *)secthdr + sizeof(*secthdr)), + sect->sectname[0] ? sect->sectname : "none", + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + " secthdr: addr %p size %llu offset %u sectname %s <%s>", + (void *) secthdr->addr, + secthdr->size, + secthdr->offset, + sect->sectname[0] ? sect->sectname : "none", + __func__); #endif - - rval = KERN_SUCCESS; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ @@ -611,12 +615,14 @@ finish: kxld_size_t kxld_sect_grow(KXLDSect *sect, kxld_size_t nbytes, u_int align) { - kxld_size_t size = kxld_align_address(sect->size, align); + kxld_size_t size = kxld_align_address(sect->size, align); - if (align > sect->align) sect->align = align; - sect->size = size + nbytes; + if (align > sect->align) { + sect->align = align; + } + sect->size = size + nbytes; - return size; + return size; } #endif /* KXLD_USER_OR_COMMON */ @@ -626,65 +632,67 @@ void kxld_sect_relocate(KXLDSect *sect, kxld_addr_t link_addr) { #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p >>> Start of %s section (sect->size %llu) <%s>", - (void *) (kxld_sect_align_address(sect, sect->link_addr + link_addr)), - sect->sectname[0] ? sect->sectname : "none", - sect->size, - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p <<< End of %s section <%s>", - (void *) (kxld_sect_align_address(sect, sect->link_addr + link_addr) + sect->size), - sect->sectname[0] ? sect->sectname : "none", - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p >>> Start of %s section (sect->size %llu) <%s>", + (void *) (kxld_sect_align_address(sect, sect->link_addr + link_addr)), + sect->sectname[0] ? sect->sectname : "none", + sect->size, + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p <<< End of %s section <%s>", + (void *) (kxld_sect_align_address(sect, sect->link_addr + link_addr) + sect->size), + sect->sectname[0] ? sect->sectname : "none", + __func__); + } #endif - sect->link_addr = kxld_sect_align_address(sect, - sect->link_addr + link_addr); + sect->link_addr = kxld_sect_align_address(sect, + sect->link_addr + link_addr); } #if KXLD_USER_OR_GOT /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sect_populate_got(KXLDSect *sect, KXLDSymtab *symtab, +kxld_sect_populate_got(KXLDSect *sect, KXLDSymtab *symtab, boolean_t swap __unused) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - kxld_addr_t *entry = NULL; - kxld_addr_t entry_addr = 0; - - check(sect); - check(symtab); - require(streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)), - finish); - require(streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT)), - finish); - - kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_got, FALSE); - - entry = (kxld_addr_t *) sect->data; - entry_addr = sect->link_addr; - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - *entry = sym->link_addr; - sym->got_addr = entry_addr; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + kxld_addr_t *entry = NULL; + kxld_addr_t entry_addr = 0; + + check(sect); + check(symtab); + require(streq_safe(sect->segname, KXLD_SEG_GOT, sizeof(KXLD_SEG_GOT)), + finish); + require(streq_safe(sect->sectname, KXLD_SECT_GOT, sizeof(KXLD_SECT_GOT)), + finish); + + kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_got, FALSE); + + entry = (kxld_addr_t *) sect->data; + entry_addr = sect->link_addr; + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + *entry = sym->link_addr; + sym->got_addr = entry_addr; #if !KERNEL - if (swap) *entry = OSSwapInt64(*entry); + if (swap) { + *entry = OSSwapInt64(*entry); + } #endif /* !KERNEL */ - ++entry; - entry_addr += sizeof(*entry); - } + ++entry; + entry_addr += sizeof(*entry); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_GOT */ @@ -693,18 +701,17 @@ finish: kern_return_t kxld_sect_process_relocs(KXLDSect *sect, KXLDRelocator *relocator) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + u_int i = 0; - for (i = 0; i < sect->relocs.nitems; ++i) { - reloc = kxld_array_get_item(§->relocs, i); - rval = kxld_relocator_process_sect_reloc(relocator, reloc, sect); - require_noerr(rval, finish); - } + for (i = 0; i < sect->relocs.nitems; ++i) { + reloc = kxld_array_get_item(§->relocs, i); + rval = kxld_relocator_process_sect_reloc(relocator, reloc, sect); + require_noerr(rval, finish); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } - diff --git a/libkern/kxld/kxld_sect.h b/libkern/kxld/kxld_sect.h index f3bbdae93..4045eb7db 100644 --- a/libkern/kxld/kxld_sect.h +++ b/libkern/kxld/kxld_sect.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_SECT_H_ @@ -48,19 +48,19 @@ struct section_64; typedef struct kxld_sect KXLDSect; struct kxld_sect { - char sectname[16]; // The name of the section - char segname[16]; // The segment to which the section belongs - u_char *data; // The start of the section in memory - KXLDArray relocs; // The section's relocation entries - kxld_addr_t base_addr; // The base address of the section - kxld_addr_t link_addr; // The relocated address of the section - kxld_size_t size; // The size of the section - u_int sectnum; // The number of the section (for relocation) - u_int flags; // Flags describing the section - u_int align; // The section's alignment as a power of 2 - u_int reserved1; // Dependent on the section type - u_int reserved2; // Dependent on the section type - boolean_t allocated; // This section's data is allocated internally + char sectname[16]; // The name of the section + char segname[16]; // The segment to which the section belongs + u_char *data; // The start of the section in memory + KXLDArray relocs; // The section's relocation entries + kxld_addr_t base_addr; // The base address of the section + kxld_addr_t link_addr; // The relocated address of the section + kxld_size_t size; // The size of the section + u_int sectnum; // The number of the section (for relocation) + u_int flags; // Flags describing the section + u_int align; // The section's alignment as a power of 2 + u_int reserved1; // Dependent on the section type + u_int reserved2; // Dependent on the section type + boolean_t allocated; // This section's data is allocated internally }; /******************************************************************************* @@ -72,8 +72,8 @@ struct kxld_sect { * section offset to point to the next section header. */ kern_return_t kxld_sect_init_from_macho_32(KXLDSect *sect, u_char *macho, - u_long *sect_offset, u_int sectnum, const struct kxld_relocator *relocator) - __attribute__((nonnull, visibility("hidden"))); + u_long *sect_offset, u_int sectnum, const struct kxld_relocator *relocator) +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 @@ -81,8 +81,8 @@ kern_return_t kxld_sect_init_from_macho_32(KXLDSect *sect, u_char *macho, * section offset to point to the next section header. */ kern_return_t kxld_sect_init_from_macho_64(KXLDSect *sect, u_char *macho, - u_long *sect_offset, u_int sectnum, const struct kxld_relocator *relocator) - __attribute__((nonnull, visibility("hidden"))); + u_long *sect_offset, u_int sectnum, const struct kxld_relocator *relocator) +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_LP64 */ #if KXLD_USER_OR_GOT @@ -90,23 +90,23 @@ kern_return_t kxld_sect_init_from_macho_64(KXLDSect *sect, u_char *macho, * have. */ kern_return_t kxld_sect_init_got(KXLDSect *sect, u_int ngots) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_GOT */ #if KXLD_USER_OR_COMMON /* Initializes a zerofill section of the specified size and alignment */ -void kxld_sect_init_zerofill(KXLDSect *sect, const char *segname, +void kxld_sect_init_zerofill(KXLDSect *sect, const char *segname, const char *sectname, kxld_size_t size, u_int align) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_COMMON */ - + /* Clears the section object */ void kxld_sect_clear(KXLDSect *sect) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* Denitializes the section object and frees its array of relocs */ void kxld_sect_deinit(KXLDSect *sect) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors @@ -114,42 +114,42 @@ void kxld_sect_deinit(KXLDSect *sect) /* Gets the number of relocation entries in the section */ u_int kxld_sect_get_num_relocs(const KXLDSect *sect) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /* Returns the address parameter adjusted to the minimum alignment required by * the section. */ kxld_addr_t kxld_sect_align_address(const KXLDSect *sect, kxld_addr_t address) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /* Returns the space required by the exported Mach-O header */ u_long kxld_sect_get_macho_header_size(boolean_t is_32_bit) - __attribute__((const, visibility("hidden"))); +__attribute__((const, visibility("hidden"))); /* Returns the space required by the exported Mach-O data */ u_long kxld_sect_get_macho_data_size(const KXLDSect *sect) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #if KXLD_USER_OR_LP64 /* Returns the number of GOT entries required by relocation entries in the * given section. */ -u_int kxld_sect_get_ngots(const KXLDSect *sect, +u_int kxld_sect_get_ngots(const KXLDSect *sect, const struct kxld_relocator *relocator, const struct kxld_symtab *symtab) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_LP64 */ kern_return_t kxld_sect_export_macho_to_file_buffer(const KXLDSect *sect, u_char *buf, - u_long *header_offset, u_long header_size, u_long *data_offset, + u_long *header_offset, u_long header_size, u_long *data_offset, u_long data_size, boolean_t is_32_bit) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_sect_export_macho_to_vm(const KXLDSect *sect, u_char *buf, - u_long *header_offset, - u_long header_size, - kxld_addr_t link_addr, - u_long data_size, - boolean_t is_32_bit) + u_long *header_offset, + u_long header_size, + kxld_addr_t link_addr, + u_long data_size, + boolean_t is_32_bit) __attribute__((nonnull, visibility("hidden"))); /******************************************************************************* @@ -158,27 +158,26 @@ __attribute__((nonnull, visibility("hidden"))); /* Relocates the section to the given link address */ void kxld_sect_relocate(KXLDSect *sect, kxld_addr_t link_addr) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #if KXLD_USER_OR_COMMON /* Adds a number of bytes to the section's size. Returns the size of the * section before it was grown. */ kxld_size_t kxld_sect_grow(KXLDSect *sect, kxld_size_t nbytes, u_int align) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_COMMON */ #if KXLD_USER_OR_GOT /* Popluates the entries of a GOT section */ kern_return_t kxld_sect_populate_got(KXLDSect *sect, struct kxld_symtab *symtab, boolean_t swap) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_GOT */ /* Processes all of a section's relocation entries */ -kern_return_t kxld_sect_process_relocs(KXLDSect *sect, +kern_return_t kxld_sect_process_relocs(KXLDSect *sect, struct kxld_relocator *relocator) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_SECT_H_ */ - diff --git a/libkern/kxld/kxld_seg.c b/libkern/kxld/kxld_seg.c index b10193b3f..79242262e 100644 --- a/libkern/kxld/kxld_seg.c +++ b/libkern/kxld/kxld_seg.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -55,7 +55,7 @@ extern boolean_t isOldInterface; #if KXLD_USER_OR_OBJECT static kern_return_t reorder_sections(KXLDSeg *seg, KXLDArray *section_order); -static void reorder_section(KXLDArray *sects, u_int *sect_reorder_index, +static void reorder_section(KXLDArray *sects, u_int *sect_reorder_index, KXLDSect **reorder_buffer, u_int reorder_buffer_index); #endif /* KXLD_USER_OR_OBJECT */ @@ -80,26 +80,26 @@ static KXLDSect * get_sect_by_index(const KXLDSeg *seg, u_int idx); kern_return_t kxld_seg_init_from_macho_32(KXLDSeg *seg, struct segment_command *src) { - kern_return_t rval = KERN_FAILURE; - check(seg); - check(src); + kern_return_t rval = KERN_FAILURE; + check(seg); + check(src); - strlcpy(seg->segname, src->segname, sizeof(seg->segname)); - seg->base_addr = src->vmaddr; - seg->link_addr = src->vmaddr; - seg->vmsize = src->vmsize; - seg->fileoff = src->fileoff; - seg->maxprot = src->maxprot; - seg->initprot = src->initprot; - seg->flags = src->flags; + strlcpy(seg->segname, src->segname, sizeof(seg->segname)); + seg->base_addr = src->vmaddr; + seg->link_addr = src->vmaddr; + seg->vmsize = src->vmsize; + seg->fileoff = src->fileoff; + seg->maxprot = src->maxprot; + seg->initprot = src->initprot; + seg->flags = src->flags; - rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects); - require_noerr(rval, finish); + rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -109,27 +109,27 @@ finish: kern_return_t kxld_seg_init_from_macho_64(KXLDSeg *seg, struct segment_command_64 *src) { - kern_return_t rval = KERN_FAILURE; - check(seg); - check(src); - - strlcpy(seg->segname, src->segname, sizeof(seg->segname)); - seg->base_addr = src->vmaddr; - seg->link_addr = src->vmaddr; - seg->vmsize = src->vmsize; - - seg->fileoff = src->fileoff; - seg->maxprot = src->maxprot; - seg->initprot = src->initprot; - seg->flags = src->flags; - - rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + check(seg); + check(src); + + strlcpy(seg->segname, src->segname, sizeof(seg->segname)); + seg->base_addr = src->vmaddr; + seg->link_addr = src->vmaddr; + seg->vmsize = src->vmsize; + + seg->fileoff = src->fileoff; + seg->maxprot = src->maxprot; + seg->initprot = src->initprot; + seg->flags = src->flags; + + rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), src->nsects); + require_noerr(rval, finish); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ @@ -139,39 +139,39 @@ finish: kern_return_t kxld_seg_create_seg_from_sections(KXLDArray *segarray, KXLDArray *sectarray) { - kern_return_t rval = KERN_FAILURE; - KXLDSeg *seg = NULL; - KXLDSect *sect = NULL; - KXLDSect **sectp = NULL; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSeg *seg = NULL; + KXLDSect *sect = NULL; + KXLDSect **sectp = NULL; + u_int i = 0; - /* Initialize the segment array to one segment */ + /* Initialize the segment array to one segment */ - rval = kxld_array_init(segarray, sizeof(KXLDSeg), 1); - require_noerr(rval, finish); + rval = kxld_array_init(segarray, sizeof(KXLDSeg), 1); + require_noerr(rval, finish); - /* Initialize the segment */ + /* Initialize the segment */ - seg = kxld_array_get_item(segarray, 0); - seg->initprot = VM_PROT_ALL; - seg->maxprot = VM_PROT_ALL; - seg->link_addr = 0; + seg = kxld_array_get_item(segarray, 0); + seg->initprot = VM_PROT_ALL; + seg->maxprot = VM_PROT_ALL; + seg->link_addr = 0; - /* Add the sections to the segment */ + /* Add the sections to the segment */ - rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), sectarray->nitems); - require_noerr(rval, finish); + rval = kxld_array_init(&seg->sects, sizeof(KXLDSect *), sectarray->nitems); + require_noerr(rval, finish); - for (i = 0; i < sectarray->nitems; ++i) { - sect = kxld_array_get_item(sectarray, i); - sectp = kxld_array_get_item(&seg->sects, i); + for (i = 0; i < sectarray->nitems; ++i) { + sect = kxld_array_get_item(sectarray, i); + sectp = kxld_array_get_item(&seg->sects, i); - *sectp = sect; - } + *sectp = sect; + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -180,44 +180,44 @@ kern_return_t kxld_seg_finalize_object_segment(KXLDArray *segarray, KXLDArray *section_order, u_long hdrsize) { - kern_return_t rval = KERN_FAILURE; - KXLDSeg *seg = NULL; - KXLDSect *sect = NULL; - u_long sect_offset = 0; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSeg *seg = NULL; + KXLDSect *sect = NULL; + u_long sect_offset = 0; + u_int i = 0; + + check(segarray); + check(section_order); + require_action(segarray->nitems == 1, finish, rval = KERN_FAILURE); - check(segarray); - check(section_order); - require_action(segarray->nitems == 1, finish, rval=KERN_FAILURE); + seg = kxld_array_get_item(segarray, 0); - seg = kxld_array_get_item(segarray, 0); - - /* Reorder the sections */ - - rval = reorder_sections(seg, section_order); - require_noerr(rval, finish); + /* Reorder the sections */ - /* Set the initial link address at the end of the header pages */ + rval = reorder_sections(seg, section_order); + require_noerr(rval, finish); - seg->link_addr = kxld_round_page_cross_safe(hdrsize); + /* Set the initial link address at the end of the header pages */ - /* Fix up all of the section addresses */ + seg->link_addr = kxld_round_page_cross_safe(hdrsize); - sect_offset = (u_long) seg->link_addr; - for (i = 0; i < seg->sects.nitems; ++i) { - sect = *(KXLDSect **)kxld_array_get_item(&seg->sects, i); + /* Fix up all of the section addresses */ - sect->link_addr = kxld_sect_align_address(sect, sect_offset); - sect_offset = (u_long) (sect->link_addr + sect->size); - } + sect_offset = (u_long) seg->link_addr; + for (i = 0; i < seg->sects.nitems; ++i) { + sect = *(KXLDSect **)kxld_array_get_item(&seg->sects, i); - /* Finish initializing the segment */ + sect->link_addr = kxld_sect_align_address(sect, sect_offset); + sect_offset = (u_long) (sect->link_addr + sect->size); + } - seg->vmsize = kxld_round_page_cross_safe(sect_offset) - seg->link_addr; + /* Finish initializing the segment */ - rval = KERN_SUCCESS; + seg->vmsize = kxld_round_page_cross_safe(sect_offset) - seg->link_addr; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -268,144 +268,147 @@ finish: static kern_return_t reorder_sections(KXLDSeg *seg, KXLDArray *section_order) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *sect = NULL; - KXLDSect **reorder_buffer = NULL; - KXLDSectionName *section_name = NULL; - const char *segname = NULL; - u_int sect_index = 0, legacy_index = 0, sect_reorder_index = 0; - u_int i = 0, j = 0; - u_int sect_start = 0, sect_end = 0, legacy_start = 0, legacy_end = 0; - u_int nsects = 0; - - check(seg); - check(section_order); - - /* Allocate the reorder buffer with enough space to hold all of the - * sections. - */ - - reorder_buffer = kxld_alloc( - seg->sects.nitems * sizeof(*reorder_buffer)); - require_action(reorder_buffer, finish, rval=KERN_RESOURCE_SHORTAGE); - - while (legacy_index < section_order->nitems) { - - /* Find the next group of sections with a common segment in the - * section_order array. - */ - - legacy_start = legacy_index++; - legacy_end = legacy_index; - - section_name = kxld_array_get_item(section_order, legacy_start); - segname = section_name->segname; - while (legacy_index < section_order->nitems) { - section_name = kxld_array_get_item(section_order, legacy_index); - if (!streq_safe(segname, section_name->segname, - sizeof(section_name->segname))) - { - break; - } - - ++legacy_index; - ++legacy_end; - } - - /* Find a group of sections in the kext that match the current - * section_order segment. - */ - - sect_start = sect_index; - sect_end = sect_index; - - while (sect_index < seg->sects.nitems) { - sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, sect_index); - if (!streq_safe(segname, sect->segname, sizeof(sect->segname))) { - break; - } - - ++sect_index; - ++sect_end; - } - nsects = sect_end - sect_start; - - if (!nsects) continue; - - /* Populate the reorder buffer with the current group of kext sections */ - - for (i = sect_start; i < sect_end; ++i) { - reorder_buffer[i - sect_start] = - *(KXLDSect **) kxld_array_get_item(&seg->sects, i); - } - - /* For each section_order section, scan the reorder buffer for a matching - * kext section. If one is found, copy it into the next slot in the - * segment's section index. - */ - - sect_reorder_index = sect_start; - for (i = legacy_start; i < legacy_end; ++i) { - section_name = kxld_array_get_item(section_order, i); - sect = NULL; - - for (j = 0; j < nsects; ++j) { - sect = reorder_buffer[j]; - if (!sect) continue; - - if (streq_safe(section_name->sectname, sect->sectname, - sizeof(section_name->sectname))) - { - break; - } - - sect = NULL; - } - - if (sect) { - (void) reorder_section(&seg->sects, §_reorder_index, - reorder_buffer, j); - } - } - - /* If any sections remain in the reorder buffer, they are not specified - * in the section_order array, so append them to the section index in - * in the order they are found. - */ - - for (i = 0; i < nsects; ++i) { - if (!reorder_buffer[i]) continue; - reorder_section(&seg->sects, §_reorder_index, reorder_buffer, i); - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSect *sect = NULL; + KXLDSect **reorder_buffer = NULL; + KXLDSectionName *section_name = NULL; + const char *segname = NULL; + u_int sect_index = 0, legacy_index = 0, sect_reorder_index = 0; + u_int i = 0, j = 0; + u_int sect_start = 0, sect_end = 0, legacy_start = 0, legacy_end = 0; + u_int nsects = 0; + + check(seg); + check(section_order); + + /* Allocate the reorder buffer with enough space to hold all of the + * sections. + */ + + reorder_buffer = kxld_alloc( + seg->sects.nitems * sizeof(*reorder_buffer)); + require_action(reorder_buffer, finish, rval = KERN_RESOURCE_SHORTAGE); + + while (legacy_index < section_order->nitems) { + /* Find the next group of sections with a common segment in the + * section_order array. + */ + + legacy_start = legacy_index++; + legacy_end = legacy_index; + + section_name = kxld_array_get_item(section_order, legacy_start); + segname = section_name->segname; + while (legacy_index < section_order->nitems) { + section_name = kxld_array_get_item(section_order, legacy_index); + if (!streq_safe(segname, section_name->segname, + sizeof(section_name->segname))) { + break; + } + + ++legacy_index; + ++legacy_end; + } + + /* Find a group of sections in the kext that match the current + * section_order segment. + */ + + sect_start = sect_index; + sect_end = sect_index; + + while (sect_index < seg->sects.nitems) { + sect = *(KXLDSect **) kxld_array_get_item(&seg->sects, sect_index); + if (!streq_safe(segname, sect->segname, sizeof(sect->segname))) { + break; + } + + ++sect_index; + ++sect_end; + } + nsects = sect_end - sect_start; + + if (!nsects) { + continue; + } + + /* Populate the reorder buffer with the current group of kext sections */ + + for (i = sect_start; i < sect_end; ++i) { + reorder_buffer[i - sect_start] = + *(KXLDSect **) kxld_array_get_item(&seg->sects, i); + } + + /* For each section_order section, scan the reorder buffer for a matching + * kext section. If one is found, copy it into the next slot in the + * segment's section index. + */ + + sect_reorder_index = sect_start; + for (i = legacy_start; i < legacy_end; ++i) { + section_name = kxld_array_get_item(section_order, i); + sect = NULL; + + for (j = 0; j < nsects; ++j) { + sect = reorder_buffer[j]; + if (!sect) { + continue; + } + + if (streq_safe(section_name->sectname, sect->sectname, + sizeof(section_name->sectname))) { + break; + } + + sect = NULL; + } + + if (sect) { + (void) reorder_section(&seg->sects, §_reorder_index, + reorder_buffer, j); + } + } + + /* If any sections remain in the reorder buffer, they are not specified + * in the section_order array, so append them to the section index in + * in the order they are found. + */ + + for (i = 0; i < nsects; ++i) { + if (!reorder_buffer[i]) { + continue; + } + reorder_section(&seg->sects, §_reorder_index, reorder_buffer, i); + } + } + + rval = KERN_SUCCESS; finish: - if (reorder_buffer) { - kxld_free(reorder_buffer, seg->sects.nitems * sizeof(*reorder_buffer)); - reorder_buffer = NULL; - } + if (reorder_buffer) { + kxld_free(reorder_buffer, seg->sects.nitems * sizeof(*reorder_buffer)); + reorder_buffer = NULL; + } - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ static void -reorder_section(KXLDArray *sects, u_int *sect_reorder_index, +reorder_section(KXLDArray *sects, u_int *sect_reorder_index, KXLDSect **reorder_buffer, u_int reorder_buffer_index) { - KXLDSect **tmp = NULL; + KXLDSect **tmp = NULL; - tmp = kxld_array_get_item(sects, *sect_reorder_index); + tmp = kxld_array_get_item(sects, *sect_reorder_index); - *tmp = reorder_buffer[reorder_buffer_index]; - reorder_buffer[reorder_buffer_index]->sectnum = *sect_reorder_index; - reorder_buffer[reorder_buffer_index] = NULL; + *tmp = reorder_buffer[reorder_buffer_index]; + reorder_buffer[reorder_buffer_index]->sectnum = *sect_reorder_index; + reorder_buffer[reorder_buffer_index] = NULL; - ++(*sect_reorder_index); + ++(*sect_reorder_index); } /******************************************************************************* @@ -413,25 +416,25 @@ reorder_section(KXLDArray *sects, u_int *sect_reorder_index, kern_return_t kxld_seg_init_linkedit(KXLDArray *segs) { - kern_return_t rval = KERN_FAILURE; - KXLDSeg *seg = NULL; - KXLDSeg *le = NULL; - - rval = kxld_array_resize(segs, 2); - require_noerr(rval, finish); + kern_return_t rval = KERN_FAILURE; + KXLDSeg *seg = NULL; + KXLDSeg *le = NULL; + + rval = kxld_array_resize(segs, 2); + require_noerr(rval, finish); - seg = kxld_array_get_item(segs, 0); - le = kxld_array_get_item(segs, 1); + seg = kxld_array_get_item(segs, 0); + le = kxld_array_get_item(segs, 1); - strlcpy(le->segname, SEG_LINKEDIT, sizeof(le->segname)); - le->link_addr = kxld_round_page_cross_safe(seg->link_addr + seg->vmsize); - le->maxprot = VM_PROT_ALL; - le->initprot = VM_PROT_DEFAULT; + strlcpy(le->segname, SEG_LINKEDIT, sizeof(le->segname)); + le->link_addr = kxld_round_page_cross_safe(seg->link_addr + seg->vmsize); + le->maxprot = VM_PROT_ALL; + le->initprot = VM_PROT_DEFAULT; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_OBJECT */ @@ -440,41 +443,41 @@ finish: void kxld_seg_clear(KXLDSeg *seg) { - check(seg); - - bzero(seg->segname, sizeof(seg->segname)); - seg->base_addr = 0; - seg->link_addr = 0; - seg->vmsize = 0; - seg->flags = 0; - seg->maxprot = 0; - seg->initprot = 0; - - /* Don't clear the individual sections here because kxld_kext.c will take - * care of that. - */ - kxld_array_clear(&seg->sects); + check(seg); + + bzero(seg->segname, sizeof(seg->segname)); + seg->base_addr = 0; + seg->link_addr = 0; + seg->vmsize = 0; + seg->flags = 0; + seg->maxprot = 0; + seg->initprot = 0; + + /* Don't clear the individual sections here because kxld_kext.c will take + * care of that. + */ + kxld_array_clear(&seg->sects); } /******************************************************************************* *******************************************************************************/ -void +void kxld_seg_deinit(KXLDSeg *seg) { - check(seg); + check(seg); - kxld_array_deinit(&seg->sects); - bzero(seg, sizeof(*seg)); + kxld_array_deinit(&seg->sects); + bzero(seg, sizeof(*seg)); } /******************************************************************************* *******************************************************************************/ -kxld_size_t +kxld_size_t kxld_seg_get_vmsize(const KXLDSeg *seg) { - check(seg); - - return seg->vmsize; + check(seg); + + return seg->vmsize; } /******************************************************************************* @@ -482,18 +485,18 @@ kxld_seg_get_vmsize(const KXLDSeg *seg) u_long kxld_seg_get_macho_header_size(const KXLDSeg *seg, boolean_t is_32_bit) { - u_long size = 0; - - check(seg); - - if (is_32_bit) { - size += sizeof(struct segment_command); - } else { - size += sizeof(struct segment_command_64); - } - size += seg->sects.nitems * kxld_sect_get_macho_header_size(is_32_bit); - - return size; + u_long size = 0; + + check(seg); + + if (is_32_bit) { + size += sizeof(struct segment_command); + } else { + size += sizeof(struct segment_command_64); + } + size += seg->sects.nitems * kxld_sect_get_macho_header_size(is_32_bit); + + return size; } /******************************************************************************* @@ -503,86 +506,85 @@ kxld_seg_get_macho_header_size(const KXLDSeg *seg, boolean_t is_32_bit) u_long kxld_seg_get_macho_data_size(const KXLDSeg *seg) { - u_long size = 0; - u_int i = 0; - KXLDSect *sect = NULL; + u_long size = 0; + u_int i = 0; + KXLDSect *sect = NULL; - check(seg); + check(seg); - for (i = 0; i < seg->sects.nitems; ++i) { - sect = get_sect_by_index(seg, i); - size = (u_long) kxld_sect_align_address(sect, size); - size += kxld_sect_get_macho_data_size(sect); - } + for (i = 0; i < seg->sects.nitems; ++i) { + sect = get_sect_by_index(seg, i); + size = (u_long) kxld_sect_align_address(sect, size); + size += kxld_sect_get_macho_data_size(sect); + } - return kxld_round_page_cross_safe(size); + return kxld_round_page_cross_safe(size); } #endif /******************************************************************************* *******************************************************************************/ -static KXLDSect * +static KXLDSect * get_sect_by_index(const KXLDSeg *seg, u_int idx) { - check(seg); + check(seg); - return *(KXLDSect **) kxld_array_get_item(&seg->sects, idx); + return *(KXLDSect **) kxld_array_get_item(&seg->sects, idx); } /******************************************************************************* *******************************************************************************/ kern_return_t kxld_seg_export_macho_to_file_buffer(const KXLDSeg *seg, u_char *buf, - u_long *header_offset, u_long header_size, + u_long *header_offset, u_long header_size, u_long *data_offset, u_long data_size, boolean_t is_32_bit) { - kern_return_t rval = KERN_FAILURE; - KXLDSect *sect = NULL; - u_long base_data_offset = *data_offset; - u_int i = 0; - struct segment_command *hdr32 = - (struct segment_command *) ((void *) (buf + *header_offset)); - struct segment_command_64 *hdr64 = - (struct segment_command_64 *) ((void *) (buf + *header_offset)); + kern_return_t rval = KERN_FAILURE; + KXLDSect *sect = NULL; + u_long base_data_offset = *data_offset; + u_int i = 0; + struct segment_command *hdr32 = + (struct segment_command *) ((void *) (buf + *header_offset)); + struct segment_command_64 *hdr64 = + (struct segment_command_64 *) ((void *) (buf + *header_offset)); - check(seg); - check(buf); - check(header_offset); - check(data_offset); + check(seg); + check(buf); + check(header_offset); + check(data_offset); - /* Write out the header */ + /* Write out the header */ - KXLD_3264_FUNC(is_32_bit, rval, - seg_export_macho_header_32, seg_export_macho_header_64, - seg, buf, header_offset, header_size, *data_offset); - require_noerr(rval, finish); + KXLD_3264_FUNC(is_32_bit, rval, + seg_export_macho_header_32, seg_export_macho_header_64, + seg, buf, header_offset, header_size, *data_offset); + require_noerr(rval, finish); - /* Write out each section */ + /* Write out each section */ - for (i = 0; i < seg->sects.nitems; ++i) { - sect = get_sect_by_index(seg, i); + for (i = 0; i < seg->sects.nitems; ++i) { + sect = get_sect_by_index(seg, i); - rval = kxld_sect_export_macho_to_file_buffer(sect, buf, header_offset, - header_size, data_offset, data_size, is_32_bit); - require_noerr(rval, finish); - } + rval = kxld_sect_export_macho_to_file_buffer(sect, buf, header_offset, + header_size, data_offset, data_size, is_32_bit); + require_noerr(rval, finish); + } - /* Update the filesize */ + /* Update the filesize */ - if (is_32_bit) { - hdr32->filesize = (uint32_t) (*data_offset - base_data_offset); - } else { - hdr64->filesize = (uint64_t) (*data_offset - base_data_offset); - } + if (is_32_bit) { + hdr32->filesize = (uint32_t) (*data_offset - base_data_offset); + } else { + hdr64->filesize = (uint64_t) (*data_offset - base_data_offset); + } - *data_offset = (u_long)kxld_round_page_cross_safe(*data_offset); + *data_offset = (u_long)kxld_round_page_cross_safe(*data_offset); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; - + return rval; } @@ -590,49 +592,49 @@ finish: *******************************************************************************/ kern_return_t kxld_seg_export_macho_to_vm(const KXLDSeg *seg, - u_char *buf, - u_long *header_offset, - u_long header_size, - u_long data_size, - kxld_addr_t file_link_addr, - boolean_t is_32_bit) + u_char *buf, + u_long *header_offset, + u_long header_size, + u_long data_size, + kxld_addr_t file_link_addr, + boolean_t is_32_bit) { - kern_return_t rval = KERN_FAILURE; - KXLDSect * sect = NULL; - - // data_offset is used to set fileoff field in segment header - u_long data_offset; - u_int i = 0; + kern_return_t rval = KERN_FAILURE; + KXLDSect * sect = NULL; + + // data_offset is used to set fileoff field in segment header + u_long data_offset; + u_int i = 0; + + check(seg); + check(buf); + check(header_offset); - check(seg); - check(buf); - check(header_offset); - - data_offset = (u_long) (seg->link_addr - file_link_addr); + data_offset = (u_long) (seg->link_addr - file_link_addr); - /* Write out the header */ + /* Write out the header */ - KXLD_3264_FUNC(is_32_bit, rval, - seg_export_macho_header_32, seg_export_macho_header_64, - seg, - buf, - header_offset, header_size, data_offset); - require_noerr(rval, finish); + KXLD_3264_FUNC(is_32_bit, rval, + seg_export_macho_header_32, seg_export_macho_header_64, + seg, + buf, + header_offset, header_size, data_offset); + require_noerr(rval, finish); - /* Write out each section */ + /* Write out each section */ - for (i = 0; i < seg->sects.nitems; ++i) { - sect = get_sect_by_index(seg, i); + for (i = 0; i < seg->sects.nitems; ++i) { + sect = get_sect_by_index(seg, i); - rval = kxld_sect_export_macho_to_vm(sect, buf, header_offset, - header_size, file_link_addr, data_size, is_32_bit); - require_noerr(rval, finish); - } + rval = kxld_sect_export_macho_to_vm(sect, buf, header_offset, + header_size, file_link_addr, data_size, is_32_bit); + require_noerr(rval, finish); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_ILP32 @@ -642,134 +644,134 @@ static kern_return_t seg_export_macho_header_32(const KXLDSeg *seg, u_char *buf, u_long *header_offset, u_long header_size, u_long data_offset) { - kern_return_t rval = KERN_FAILURE; - struct segment_command *seghdr = NULL; - - check(seg); - check(buf); - check(header_offset); - - require_action(sizeof(*seghdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - seghdr = (struct segment_command *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*seghdr); - - seghdr->cmd = LC_SEGMENT; - seghdr->cmdsize = (uint32_t) sizeof(*seghdr); - seghdr->cmdsize += - (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(TRUE)); - strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname)); - seghdr->vmaddr = (uint32_t) seg->link_addr; - seghdr->vmsize = (uint32_t) seg->vmsize; - seghdr->fileoff = (uint32_t) data_offset; - seghdr->filesize = (uint32_t) seg->vmsize; - seghdr->maxprot = seg->maxprot; - seghdr->initprot = seg->initprot; - seghdr->nsects = seg->sects.nitems; - seghdr->flags = 0; + kern_return_t rval = KERN_FAILURE; + struct segment_command *seghdr = NULL; + + check(seg); + check(buf); + check(header_offset); + + require_action(sizeof(*seghdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + seghdr = (struct segment_command *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*seghdr); + + seghdr->cmd = LC_SEGMENT; + seghdr->cmdsize = (uint32_t) sizeof(*seghdr); + seghdr->cmdsize += + (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(TRUE)); + strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname)); + seghdr->vmaddr = (uint32_t) seg->link_addr; + seghdr->vmsize = (uint32_t) seg->vmsize; + seghdr->fileoff = (uint32_t) data_offset; + seghdr->filesize = (uint32_t) seg->vmsize; + seghdr->maxprot = seg->maxprot; + seghdr->initprot = seg->initprot; + seghdr->nsects = seg->sects.nitems; + seghdr->flags = 0; #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - "segname %s seghdr %p vmaddr %p vmsize 0x%02X %u fileoff 0x%02X %u <%s>", - seg->segname[0] ? seg->segname : "none", - (void *) seghdr, - (void *) ((uint64_t)seghdr->vmaddr), - seghdr->vmsize, - seghdr->vmsize, - seghdr->fileoff, - seghdr->fileoff, - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + "segname %s seghdr %p vmaddr %p vmsize 0x%02X %u fileoff 0x%02X %u <%s>", + seg->segname[0] ? seg->segname : "none", + (void *) seghdr, + (void *) ((uint64_t)seghdr->vmaddr), + seghdr->vmsize, + seghdr->vmsize, + seghdr->fileoff, + seghdr->fileoff, + __func__); + } #endif - - rval = KERN_SUCCESS; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ -#if KXLD_USER_OR_LP64 +#if KXLD_USER_OR_LP64 /******************************************************************************* *******************************************************************************/ static kern_return_t seg_export_macho_header_64(const KXLDSeg *seg, u_char *buf, u_long *header_offset, u_long header_size, u_long data_offset) { - kern_return_t rval = KERN_FAILURE; - struct segment_command_64 *seghdr = NULL; + kern_return_t rval = KERN_FAILURE; + struct segment_command_64 *seghdr = NULL; - check(seg); - check(buf); - check(header_offset); + check(seg); + check(buf); + check(header_offset); + + require_action(sizeof(*seghdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); - require_action(sizeof(*seghdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - #if SPLIT_KEXTS_DEBUG - { - struct mach_header_64 *mach; - - mach = (struct mach_header_64 *) ((void *) buf); - - if (mach->magic != MH_MAGIC_64) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "bad macho header at %p <%s>", - (void *) mach, __func__); - goto finish; - } - } + { + struct mach_header_64 *mach; + + mach = (struct mach_header_64 *) ((void *) buf); + + if (mach->magic != MH_MAGIC_64) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "bad macho header at %p <%s>", + (void *) mach, __func__); + goto finish; + } + } #endif - - seghdr = (struct segment_command_64 *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*seghdr); - - seghdr->cmd = LC_SEGMENT_64; - seghdr->cmdsize = (uint32_t) sizeof(*seghdr); - seghdr->cmdsize += - (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(FALSE)); - strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname)); - seghdr->vmaddr = (uint64_t) seg->link_addr; - seghdr->vmsize = (uint64_t) seg->vmsize; - seghdr->fileoff = (uint64_t) data_offset; - seghdr->filesize = (uint64_t) seg->vmsize; - seghdr->maxprot = seg->maxprot; - seghdr->initprot = seg->initprot; - seghdr->nsects = seg->sects.nitems; - seghdr->flags = 0; + + seghdr = (struct segment_command_64 *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*seghdr); + + seghdr->cmd = LC_SEGMENT_64; + seghdr->cmdsize = (uint32_t) sizeof(*seghdr); + seghdr->cmdsize += + (uint32_t) (seg->sects.nitems * kxld_sect_get_macho_header_size(FALSE)); + strlcpy(seghdr->segname, seg->segname, sizeof(seghdr->segname)); + seghdr->vmaddr = (uint64_t) seg->link_addr; + seghdr->vmsize = (uint64_t) seg->vmsize; + seghdr->fileoff = (uint64_t) data_offset; + seghdr->filesize = (uint64_t) seg->vmsize; + seghdr->maxprot = seg->maxprot; + seghdr->initprot = seg->initprot; + seghdr->nsects = seg->sects.nitems; + seghdr->flags = 0; #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p >>> Start of %s seghdr (size %lu) <%s>", - (void *) seghdr, - seg->segname[0] ? seg->segname : "none", - sizeof(*seghdr), - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p <<< End of %s seghdr <%s>", - (void *) ((u_char *)seghdr + sizeof(*seghdr)), - seg->segname[0] ? seg->segname : "none", - __func__); - - kxld_log(kKxldLogLinking, kKxldLogErr, - "%s seghdr, cmdsize %d vmaddr %p vmsize %p %llu fileoff %p %llu <%s>", - seg->segname[0] ? seg->segname : "none", - seghdr->cmdsize, - (void *) seghdr->vmaddr, - (void *) seghdr->vmsize, - seghdr->vmsize, - (void *) seghdr->fileoff, - seghdr->fileoff, - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p >>> Start of %s seghdr (size %lu) <%s>", + (void *) seghdr, + seg->segname[0] ? seg->segname : "none", + sizeof(*seghdr), + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p <<< End of %s seghdr <%s>", + (void *) ((u_char *)seghdr + sizeof(*seghdr)), + seg->segname[0] ? seg->segname : "none", + __func__); + + kxld_log(kKxldLogLinking, kKxldLogErr, + "%s seghdr, cmdsize %d vmaddr %p vmsize %p %llu fileoff %p %llu <%s>", + seg->segname[0] ? seg->segname : "none", + seghdr->cmdsize, + (void *) seghdr->vmaddr, + (void *) seghdr->vmsize, + seghdr->vmsize, + (void *) seghdr->fileoff, + seghdr->fileoff, + __func__); + } #endif - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ @@ -778,31 +780,31 @@ finish: kern_return_t kxld_seg_add_section(KXLDSeg *seg, KXLDSect *sect) { - kern_return_t rval = KERN_FAILURE; - KXLDSect **sectp = NULL; - u_int i; - - check(seg); - check(sect); - require_action(streq_safe(seg->segname, sect->segname, sizeof(seg->segname)), - finish, rval=KERN_FAILURE); - - /* Add the section into the section index */ - - for (i = 0; i < seg->sects.nitems; ++i) { - sectp = kxld_array_get_item(&seg->sects, i); - if (NULL == *sectp) { - *sectp = sect; - break; - } - } - require_action(i < seg->sects.nitems, finish, rval=KERN_FAILURE); - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSect **sectp = NULL; + u_int i; + + check(seg); + check(sect); + require_action(streq_safe(seg->segname, sect->segname, sizeof(seg->segname)), + finish, rval = KERN_FAILURE); + + /* Add the section into the section index */ + + for (i = 0; i < seg->sects.nitems; ++i) { + sectp = kxld_array_get_item(&seg->sects, i); + if (NULL == *sectp) { + *sectp = sect; + break; + } + } + require_action(i < seg->sects.nitems, finish, rval = KERN_FAILURE); + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -810,37 +812,36 @@ finish: kern_return_t kxld_seg_finish_init(KXLDSeg *seg) { - kern_return_t rval = KERN_FAILURE; - u_int i = 0; - KXLDSect *sect = NULL; - kxld_addr_t maxaddr = 0; - kxld_size_t maxsize = 0; - - /* If we already have a size for this segment (e.g. from the mach-o load - * command) then don't recalculate the segment size. This is safer since - * when we recalculate we are making assumptions about page alignment and - * padding that the kext mach-o file was built with. Better to trust the - * macho-o info, if we have it. If we don't (i.e. vmsize == 0) then add up - * the section sizes and take a best guess at page padding. - */ - if ((seg->vmsize == 0) && (seg->sects.nitems)) { - for (i = 0; i < seg->sects.nitems; ++i) { - sect = get_sect_by_index(seg, i); - require_action(sect, finish, rval=KERN_FAILURE); - if (sect->base_addr > maxaddr) { - maxaddr = sect->base_addr; - maxsize = sect->size; - } - } - seg->vmsize = kxld_round_page_cross_safe(maxaddr + - maxsize - seg->base_addr); - - } - - rval = KERN_SUCCESS; - + kern_return_t rval = KERN_FAILURE; + u_int i = 0; + KXLDSect *sect = NULL; + kxld_addr_t maxaddr = 0; + kxld_size_t maxsize = 0; + + /* If we already have a size for this segment (e.g. from the mach-o load + * command) then don't recalculate the segment size. This is safer since + * when we recalculate we are making assumptions about page alignment and + * padding that the kext mach-o file was built with. Better to trust the + * macho-o info, if we have it. If we don't (i.e. vmsize == 0) then add up + * the section sizes and take a best guess at page padding. + */ + if ((seg->vmsize == 0) && (seg->sects.nitems)) { + for (i = 0; i < seg->sects.nitems; ++i) { + sect = get_sect_by_index(seg, i); + require_action(sect, finish, rval = KERN_FAILURE); + if (sect->base_addr > maxaddr) { + maxaddr = sect->base_addr; + maxsize = sect->size; + } + } + seg->vmsize = kxld_round_page_cross_safe(maxaddr + + maxsize - seg->base_addr); + } + + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -848,18 +849,18 @@ finish: void kxld_seg_set_vm_protections(KXLDSeg *seg, boolean_t strict_protections) { - if (strict_protections) { - if (!strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname))) { - seg->initprot = TEXT_SEG_PROT; - seg->maxprot = TEXT_SEG_PROT; - } else { - seg->initprot = DATA_SEG_PROT; - seg->maxprot = DATA_SEG_PROT; - } - } else { - seg->initprot = VM_PROT_ALL; - seg->maxprot = VM_PROT_ALL; - } + if (strict_protections) { + if (!strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname))) { + seg->initprot = TEXT_SEG_PROT; + seg->maxprot = TEXT_SEG_PROT; + } else { + seg->initprot = DATA_SEG_PROT; + seg->maxprot = DATA_SEG_PROT; + } + } else { + seg->initprot = VM_PROT_ALL; + seg->maxprot = VM_PROT_ALL; + } } /******************************************************************************* @@ -867,213 +868,204 @@ kxld_seg_set_vm_protections(KXLDSeg *seg, boolean_t strict_protections) void kxld_seg_relocate(KXLDSeg *seg, kxld_addr_t link_addr) { - KXLDSect *sect = NULL; - u_int i = 0; - splitKextLinkInfo * link_info = (splitKextLinkInfo *) link_addr; - kxld_addr_t my_link_addr; - - if (isOldInterface) { - seg->link_addr += link_addr; - } - else { - if (isSplitKext) { - // we have a split kext - if (kxld_seg_is_text_seg(seg)) { - // assumes this is the beginning of the kext - my_link_addr = link_info->vmaddr_TEXT; - seg->link_addr = my_link_addr; - } - else if (kxld_seg_is_text_exec_seg(seg)) { - my_link_addr = link_info->vmaddr_TEXT_EXEC; - seg->link_addr = my_link_addr; - // vmaddr_TEXT_EXEC is the actual vmaddr for this segment so we need - // to adjust for kxld_sect_relocate assuming the link addr is - // the address of the kext (macho header in __TEXT) - my_link_addr -= seg->base_addr; - } - else if (kxld_seg_is_data_seg(seg)) { - my_link_addr = link_info->vmaddr_DATA; - seg->link_addr = my_link_addr; - // vmaddr_DATA is the actual vmaddr for this segment so we need - // to adjust for kxld_sect_relocate assuming the link addr is - // the address of the kext (macho header in __TEXT) - my_link_addr -= seg->base_addr; - } - else if (kxld_seg_is_data_const_seg(seg)) { - my_link_addr = link_info->vmaddr_DATA_CONST; - seg->link_addr = my_link_addr; - // vmaddr_DATA_CONST is the actual vmaddr for this segment so we need - // to adjust for kxld_sect_relocate assuming the link addr is - // the address of the kext (macho header in __TEXT) - my_link_addr -= seg->base_addr; - } - else if (kxld_seg_is_llvm_cov_seg(seg)) { - my_link_addr = link_info->vmaddr_LLVM_COV; - seg->link_addr = my_link_addr; - // vmaddr_LLVM_COV is the actual vmaddr for this segment so we need - // to adjust for kxld_sect_relocate assuming the link addr is - // the address of the kext (macho header in __TEXT) - my_link_addr -= seg->base_addr; - } - else if (kxld_seg_is_linkedit_seg(seg)) { - my_link_addr = link_info->vmaddr_LINKEDIT; - seg->link_addr = my_link_addr; - // vmaddr_DATA is the actual vmaddr for this segment so we need - // to adjust for kxld_sect_relocate assuming the link addr is - // the address of the kext (macho header in __TEXT) - my_link_addr -= seg->base_addr; - } - else { - kxld_log(kKxldLogLinking, kKxldLogErr, - " not expecting this segment %s!!! <%s>", - seg->segname[0] ? seg->segname : "none", - __func__); - my_link_addr = link_info->vmaddr_TEXT; - seg->link_addr += my_link_addr; - } - } - else { - my_link_addr = link_info->vmaddr_TEXT; - seg->link_addr += my_link_addr; - } - } - + KXLDSect *sect = NULL; + u_int i = 0; + splitKextLinkInfo * link_info = (splitKextLinkInfo *) link_addr; + kxld_addr_t my_link_addr; + + if (isOldInterface) { + seg->link_addr += link_addr; + } else { + if (isSplitKext) { + // we have a split kext + if (kxld_seg_is_text_seg(seg)) { + // assumes this is the beginning of the kext + my_link_addr = link_info->vmaddr_TEXT; + seg->link_addr = my_link_addr; + } else if (kxld_seg_is_text_exec_seg(seg)) { + my_link_addr = link_info->vmaddr_TEXT_EXEC; + seg->link_addr = my_link_addr; + // vmaddr_TEXT_EXEC is the actual vmaddr for this segment so we need + // to adjust for kxld_sect_relocate assuming the link addr is + // the address of the kext (macho header in __TEXT) + my_link_addr -= seg->base_addr; + } else if (kxld_seg_is_data_seg(seg)) { + my_link_addr = link_info->vmaddr_DATA; + seg->link_addr = my_link_addr; + // vmaddr_DATA is the actual vmaddr for this segment so we need + // to adjust for kxld_sect_relocate assuming the link addr is + // the address of the kext (macho header in __TEXT) + my_link_addr -= seg->base_addr; + } else if (kxld_seg_is_data_const_seg(seg)) { + my_link_addr = link_info->vmaddr_DATA_CONST; + seg->link_addr = my_link_addr; + // vmaddr_DATA_CONST is the actual vmaddr for this segment so we need + // to adjust for kxld_sect_relocate assuming the link addr is + // the address of the kext (macho header in __TEXT) + my_link_addr -= seg->base_addr; + } else if (kxld_seg_is_llvm_cov_seg(seg)) { + my_link_addr = link_info->vmaddr_LLVM_COV; + seg->link_addr = my_link_addr; + // vmaddr_LLVM_COV is the actual vmaddr for this segment so we need + // to adjust for kxld_sect_relocate assuming the link addr is + // the address of the kext (macho header in __TEXT) + my_link_addr -= seg->base_addr; + } else if (kxld_seg_is_linkedit_seg(seg)) { + my_link_addr = link_info->vmaddr_LINKEDIT; + seg->link_addr = my_link_addr; + // vmaddr_DATA is the actual vmaddr for this segment so we need + // to adjust for kxld_sect_relocate assuming the link addr is + // the address of the kext (macho header in __TEXT) + my_link_addr -= seg->base_addr; + } else { + kxld_log(kKxldLogLinking, kKxldLogErr, + " not expecting this segment %s!!! <%s>", + seg->segname[0] ? seg->segname : "none", + __func__); + my_link_addr = link_info->vmaddr_TEXT; + seg->link_addr += my_link_addr; + } + } else { + my_link_addr = link_info->vmaddr_TEXT; + seg->link_addr += my_link_addr; + } + } + #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p >>> Start of %s segment (vmsize %llu) <%s>)", - (void *) seg->link_addr, - seg->segname[0] ? seg->segname : "none", - seg->vmsize, - __func__); - kxld_log(kKxldLogLinking, kKxldLogErr, - "%p <<< End of %s segment <%s>", - (void *) (seg->link_addr + seg->vmsize), - seg->segname[0] ? seg->segname : "none", - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p >>> Start of %s segment (vmsize %llu) <%s>)", + (void *) seg->link_addr, + seg->segname[0] ? seg->segname : "none", + seg->vmsize, + __func__); + kxld_log(kKxldLogLinking, kKxldLogErr, + "%p <<< End of %s segment <%s>", + (void *) (seg->link_addr + seg->vmsize), + seg->segname[0] ? seg->segname : "none", + __func__); + } #endif - - for (i = 0; i < seg->sects.nitems; ++i) { - sect = get_sect_by_index(seg, i); - if (isOldInterface) { - kxld_sect_relocate(sect, link_addr); - } - else { - kxld_sect_relocate(sect, my_link_addr); - } - } + + for (i = 0; i < seg->sects.nitems; ++i) { + sect = get_sect_by_index(seg, i); + if (isOldInterface) { + kxld_sect_relocate(sect, link_addr); + } else { + kxld_sect_relocate(sect, my_link_addr); + } + } } /******************************************************************************* *******************************************************************************/ -void -kxld_seg_populate_linkedit(KXLDSeg *seg, const KXLDSymtab *symtab, boolean_t is_32_bit +void +kxld_seg_populate_linkedit(KXLDSeg *seg, const KXLDSymtab *symtab, boolean_t is_32_bit #if KXLD_PIC_KEXTS , const KXLDArray *locrelocs , const KXLDArray *extrelocs , boolean_t target_supports_slideable_kexts #endif /* KXLD_PIC_KEXTS */ , uint32_t splitinfolc_size - ) + ) { - u_long size = 0; + u_long size = 0; - size += kxld_symtab_get_macho_data_size(symtab, is_32_bit); + size += kxld_symtab_get_macho_data_size(symtab, is_32_bit); #if KXLD_PIC_KEXTS - if (target_supports_slideable_kexts) { - size += kxld_reloc_get_macho_data_size(locrelocs, extrelocs); - } -#endif /* KXLD_PIC_KEXTS */ + if (target_supports_slideable_kexts) { + size += kxld_reloc_get_macho_data_size(locrelocs, extrelocs); + } +#endif /* KXLD_PIC_KEXTS */ - // 0 unless this is a split kext - size += splitinfolc_size; + // 0 unless this is a split kext + size += splitinfolc_size; - seg->vmsize = kxld_round_page_cross_safe(size); + seg->vmsize = kxld_round_page_cross_safe(size); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ boolean_t kxld_seg_is_split_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; - - check(seg); - if (isSplitKext) { - if (kxld_seg_is_data_seg(seg) || kxld_seg_is_linkedit_seg(seg) || - kxld_seg_is_text_exec_seg(seg) || kxld_seg_is_data_const_seg(seg) || - kxld_seg_is_llvm_cov_seg(seg)) { - result = TRUE; - } - } - - return result; + boolean_t result = FALSE; + + check(seg); + if (isSplitKext) { + if (kxld_seg_is_data_seg(seg) || kxld_seg_is_linkedit_seg(seg) || + kxld_seg_is_text_exec_seg(seg) || kxld_seg_is_data_const_seg(seg) || + kxld_seg_is_llvm_cov_seg(seg)) { + result = TRUE; + } + } + + return result; } boolean_t kxld_seg_is_text_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; - - check(seg); - result = !strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname)); - - return result; + boolean_t result = FALSE; + + check(seg); + result = !strncmp(seg->segname, SEG_TEXT, sizeof(seg->segname)); + + return result; } boolean_t kxld_seg_is_text_exec_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; - - check(seg); - result = !strncmp(seg->segname, "__TEXT_EXEC", sizeof(seg->segname)); - - return result; + boolean_t result = FALSE; + + check(seg); + result = !strncmp(seg->segname, "__TEXT_EXEC", sizeof(seg->segname)); + + return result; } boolean_t kxld_seg_is_data_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; - - check(seg); - result = !strncmp(seg->segname, SEG_DATA, sizeof(seg->segname)); - - return result; + boolean_t result = FALSE; + + check(seg); + result = !strncmp(seg->segname, SEG_DATA, sizeof(seg->segname)); + + return result; } boolean_t kxld_seg_is_data_const_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; - - check(seg); - result = !strncmp(seg->segname, "__DATA_CONST", sizeof(seg->segname)); - - return result; + boolean_t result = FALSE; + + check(seg); + result = !strncmp(seg->segname, "__DATA_CONST", sizeof(seg->segname)); + + return result; } boolean_t kxld_seg_is_linkedit_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; - - check(seg); - result = !strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)); - - return result; + boolean_t result = FALSE; + + check(seg); + result = !strncmp(seg->segname, SEG_LINKEDIT, sizeof(seg->segname)); + + return result; } boolean_t kxld_seg_is_llvm_cov_seg(const KXLDSeg *seg) { - boolean_t result = FALSE; + boolean_t result = FALSE; - check(seg); - result = !strncmp(seg->segname, "__LLVM_COV", sizeof(seg->segname)); + check(seg); + result = !strncmp(seg->segname, "__LLVM_COV", sizeof(seg->segname)); - return result; + return result; } diff --git a/libkern/kxld/kxld_seg.h b/libkern/kxld/kxld_seg.h index 8f8551b13..7015f5283 100644 --- a/libkern/kxld/kxld_seg.h +++ b/libkern/kxld/kxld_seg.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_SEG_H_ @@ -45,15 +45,15 @@ struct segment_command_64; typedef struct kxld_seg KXLDSeg; struct kxld_seg { - char segname[16]; - kxld_addr_t base_addr; - kxld_addr_t link_addr; - kxld_size_t vmsize; - kxld_size_t fileoff; - KXLDArray sects; - u_int flags; - vm_prot_t maxprot; - vm_prot_t initprot; + char segname[16]; + kxld_addr_t base_addr; + kxld_addr_t link_addr; + kxld_size_t vmsize; + kxld_size_t fileoff; + KXLDArray sects; + u_int flags; + vm_prot_t maxprot; + vm_prot_t initprot; }; /******************************************************************************* @@ -62,32 +62,32 @@ struct kxld_seg { #if KXLD_USER_OR_ILP32 kern_return_t kxld_seg_init_from_macho_32(KXLDSeg *seg, struct segment_command *src) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 kern_return_t kxld_seg_init_from_macho_64(KXLDSeg *seg, struct segment_command_64 *src) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_LP64 */ #if KXLD_USER_OR_OBJECT -kern_return_t kxld_seg_create_seg_from_sections(KXLDArray *segarray, +kern_return_t kxld_seg_create_seg_from_sections(KXLDArray *segarray, KXLDArray *sectarray) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_seg_finalize_object_segment(KXLDArray *segarray, KXLDArray *section_order, u_long hdrsize) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_seg_init_linkedit(KXLDArray *segs) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_OBJECT */ void kxld_seg_clear(KXLDSeg *seg) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_seg_deinit(KXLDSeg *seg) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* @@ -95,32 +95,32 @@ void kxld_seg_deinit(KXLDSeg *seg) *******************************************************************************/ kxld_size_t kxld_seg_get_vmsize(const KXLDSeg *seg) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); u_long kxld_seg_get_macho_header_size(const KXLDSeg *seg, boolean_t is_32_bit) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #if 0 /* This is no longer used, but may be useful some day... */ u_long kxld_seg_get_macho_data_size(const KXLDSeg *seg) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #endif kern_return_t kxld_seg_export_macho_to_file_buffer(const KXLDSeg *seg, u_char *buf, - u_long *header_offset, u_long header_size, + u_long *header_offset, u_long header_size, u_long *data_offset, u_long data_size, boolean_t is_32_bit) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_seg_export_macho_to_vm(const KXLDSeg *seg, - u_char *buf, - u_long *header_offset, - u_long header_size, - u_long data_size, - kxld_addr_t file_link_addr, - boolean_t is_32_bit) + u_char *buf, + u_long *header_offset, + u_long header_size, + u_long data_size, + kxld_addr_t file_link_addr, + boolean_t is_32_bit) __attribute__((nonnull, visibility("hidden"))); /******************************************************************************* @@ -128,14 +128,14 @@ __attribute__((nonnull, visibility("hidden"))); *******************************************************************************/ kern_return_t kxld_seg_add_section(KXLDSeg *seg, struct kxld_sect *sect) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /* To be called after all sections are added */ kern_return_t kxld_seg_finish_init(KXLDSeg *seg) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_seg_set_vm_protections(KXLDSeg *seg, boolean_t strict_protections) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_seg_relocate(KXLDSeg *seg, kxld_addr_t link_addr) __attribute__((nonnull, visibility("hidden"))); @@ -148,8 +148,8 @@ void kxld_seg_populate_linkedit(KXLDSeg *seg, const struct kxld_symtab *symtab, , boolean_t target_supports_slideable_kexts #endif /* KXLD_PIC_KEXTS */ , uint32_t splitinfolc_size - ) - __attribute__((nonnull, visibility("hidden"))); + ) +__attribute__((nonnull, visibility("hidden"))); boolean_t kxld_seg_is_split_seg(const KXLDSeg *seg) __attribute__((pure, nonnull, visibility("hidden"))); @@ -173,4 +173,3 @@ boolean_t kxld_seg_is_llvm_cov_seg(const KXLDSeg *seg) __attribute__((pure, nonnull, visibility("hidden"))); #endif /* _KXLD_SEG_H_ */ - diff --git a/libkern/kxld/kxld_splitinfolc.c b/libkern/kxld/kxld_splitinfolc.c index dd3fde261..80472a3f0 100644 --- a/libkern/kxld/kxld_splitinfolc.c +++ b/libkern/kxld/kxld_splitinfolc.c @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -36,44 +36,44 @@ #include "kxld_splitinfolc.h" /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_splitinfolc_init_from_macho(KXLDsplitinfolc *splitinfolc, struct linkedit_data_command *src) { - check(splitinfolc); - check(src); + check(splitinfolc); + check(src); - splitinfolc->cmdsize = src->cmdsize; - splitinfolc->dataoff = src->dataoff; - splitinfolc->datasize = src->datasize; - splitinfolc->has_splitinfolc = TRUE; + splitinfolc->cmdsize = src->cmdsize; + splitinfolc->dataoff = src->dataoff; + splitinfolc->datasize = src->datasize; + splitinfolc->has_splitinfolc = TRUE; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_splitinfolc_clear(KXLDsplitinfolc *splitinfolc) { - bzero(splitinfolc, sizeof(*splitinfolc)); + bzero(splitinfolc, sizeof(*splitinfolc)); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ u_long kxld_splitinfolc_get_macho_header_size(void) { - return sizeof(struct linkedit_data_command); + return sizeof(struct linkedit_data_command); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ kern_return_t kxld_splitinfolc_export_macho(const KXLDsplitinfolc *splitinfolc, - splitKextLinkInfo *linked_object, - u_long *header_offset, - u_long header_size, - u_long *data_offset, - u_long size) + splitKextLinkInfo *linked_object, + u_long *header_offset, + u_long header_size, + u_long *data_offset, + u_long size) { kern_return_t rval = KERN_FAILURE; struct linkedit_data_command *splitinfolc_hdr = NULL; @@ -86,24 +86,24 @@ kxld_splitinfolc_export_macho(const KXLDsplitinfolc *splitinfolc, buf = (u_char *)(linked_object->linkedKext); require_action(sizeof(*splitinfolc_hdr) <= header_size - *header_offset, - finish, - rval=KERN_FAILURE); + finish, + rval = KERN_FAILURE); splitinfolc_hdr = (struct linkedit_data_command *)((void *)(buf + *header_offset)); *header_offset += sizeof(*splitinfolc_hdr); - if (buf + *data_offset > buf + size) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "\n OVERFLOW! linkedKext %p to %p (%lu) copy %p to %p (%u) <%s>", - (void *) buf, - (void *) (buf + size), - size, - (void *) (buf + *data_offset), - (void *) (buf + *data_offset + splitinfolc->datasize), - splitinfolc->datasize, - __func__); - goto finish; - } - + if (buf + *data_offset > buf + size) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "\n OVERFLOW! linkedKext %p to %p (%lu) copy %p to %p (%u) <%s>", + (void *) buf, + (void *) (buf + size), + size, + (void *) (buf + *data_offset), + (void *) (buf + *data_offset + splitinfolc->datasize), + splitinfolc->datasize, + __func__); + goto finish; + } + // copy in the split info reloc data from kextExecutable. For example dataoff // in LC_SEGMENT_SPLIT_INFO load command points to the reloc data in the // __LINKEDIT segment. In this case 65768 into the kextExecutable file is @@ -114,37 +114,37 @@ kxld_splitinfolc_export_macho(const KXLDsplitinfolc *splitinfolc, // dataoff 65768 // datasize 920 - + memcpy(buf + *data_offset, linked_object->kextExecutable + splitinfolc->dataoff, splitinfolc->datasize); #if SPLIT_KEXTS_DEBUG - u_char *dataPtr = buf + *data_offset; - - kxld_log(kKxldLogLinking, kKxldLogErr, - "\n\n linkedKext %p to %p (%lu) copy %p to %p (%u) <%s>", - (void *) buf, - (void *) (buf + size), - size, - (void *) (dataPtr), - (void *) (dataPtr + splitinfolc->datasize), - splitinfolc->datasize, - __func__); - - if (*(dataPtr + 0) != 0x7F) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "\n\n bad LC_SEGMENT_SPLIT_INFO: 0x%02X %02X %02X %02X %02X %02X %02X %02X at %p (buf %p + %lu) <%s>", - *(dataPtr +0), - *(dataPtr +1), - *(dataPtr +2), - *(dataPtr +3), - *(dataPtr +4), - *(dataPtr +5), - *(dataPtr +6), - *(dataPtr +7), - (void *) dataPtr, - (void *) buf, - *data_offset, __func__); - } + u_char *dataPtr = buf + *data_offset; + + kxld_log(kKxldLogLinking, kKxldLogErr, + "\n\n linkedKext %p to %p (%lu) copy %p to %p (%u) <%s>", + (void *) buf, + (void *) (buf + size), + size, + (void *) (dataPtr), + (void *) (dataPtr + splitinfolc->datasize), + splitinfolc->datasize, + __func__); + + if (*(dataPtr + 0) != 0x7F) { + kxld_log(kKxldLogLinking, kKxldLogErr, + "\n\n bad LC_SEGMENT_SPLIT_INFO: 0x%02X %02X %02X %02X %02X %02X %02X %02X at %p (buf %p + %lu) <%s>", + *(dataPtr + 0), + *(dataPtr + 1), + *(dataPtr + 2), + *(dataPtr + 3), + *(dataPtr + 4), + *(dataPtr + 5), + *(dataPtr + 6), + *(dataPtr + 7), + (void *) dataPtr, + (void *) buf, + *data_offset, __func__); + } #endif // update the load command header @@ -160,4 +160,3 @@ kxld_splitinfolc_export_macho(const KXLDsplitinfolc *splitinfolc, finish: return rval; } - diff --git a/libkern/kxld/kxld_splitinfolc.h b/libkern/kxld/kxld_splitinfolc.h index 61bfde198..38c194d00 100644 --- a/libkern/kxld/kxld_splitinfolc.h +++ b/libkern/kxld/kxld_splitinfolc.h @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_SPLITINFOLC_H_ @@ -39,15 +39,15 @@ struct linkedit_data_command; typedef struct kxld_splitinfolc KXLDsplitinfolc; struct kxld_splitinfolc { - uint32_t cmdsize; - uint32_t dataoff; - uint32_t datasize; - boolean_t has_splitinfolc; + uint32_t cmdsize; + uint32_t dataoff; + uint32_t datasize; + boolean_t has_splitinfolc; }; /******************************************************************************* - * Constructors and destructors - *******************************************************************************/ +* Constructors and destructors +*******************************************************************************/ void kxld_splitinfolc_init_from_macho(KXLDsplitinfolc *splitinfolc, struct linkedit_data_command *src) __attribute__((nonnull, visibility("hidden"))); @@ -56,19 +56,19 @@ void kxld_splitinfolc_clear(KXLDsplitinfolc *splitinfolc) __attribute__((nonnull, visibility("hidden"))); /******************************************************************************* - * Accessors - *******************************************************************************/ +* Accessors +*******************************************************************************/ u_long kxld_splitinfolc_get_macho_header_size(void) __attribute__((pure, visibility("hidden"))); kern_return_t kxld_splitinfolc_export_macho(const KXLDsplitinfolc *splitinfolc, - splitKextLinkInfo *linked_object, - u_long *header_offset, - u_long header_size, - u_long *data_offset, - u_long size) + splitKextLinkInfo *linked_object, + u_long *header_offset, + u_long header_size, + u_long *data_offset, + u_long size) __attribute__((pure, nonnull, visibility("hidden"))); #endif /* _KXLD_SPLITINFOLC_H_ */ diff --git a/libkern/kxld/kxld_srcversion.c b/libkern/kxld/kxld_srcversion.c index cd8adb871..ca93db8f9 100644 --- a/libkern/kxld/kxld_srcversion.c +++ b/libkern/kxld/kxld_srcversion.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -36,58 +36,57 @@ #include "kxld_srcversion.h" /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_srcversion_init_from_macho(KXLDsrcversion *srcversion, struct source_version_command *src) { - check(srcversion); - check(src); + check(srcversion); + check(src); - srcversion->version = src->version; - srcversion->has_srcversion = TRUE; + srcversion->version = src->version; + srcversion->has_srcversion = TRUE; } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ void kxld_srcversion_clear(KXLDsrcversion *srcversion) { - bzero(srcversion, sizeof(*srcversion)); + bzero(srcversion, sizeof(*srcversion)); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ u_long kxld_srcversion_get_macho_header_size(void) { - return sizeof(struct source_version_command); + return sizeof(struct source_version_command); } /******************************************************************************* - *******************************************************************************/ +*******************************************************************************/ kern_return_t -kxld_srcversion_export_macho(const KXLDsrcversion *srcversion, u_char *buf, - u_long *header_offset, u_long header_size) +kxld_srcversion_export_macho(const KXLDsrcversion *srcversion, u_char *buf, + u_long *header_offset, u_long header_size) { - kern_return_t rval = KERN_FAILURE; - struct source_version_command *srcversionhdr = NULL; + kern_return_t rval = KERN_FAILURE; + struct source_version_command *srcversionhdr = NULL; - check(srcversion); - check(buf); - check(header_offset); + check(srcversion); + check(buf); + check(header_offset); - require_action(sizeof(*srcversionhdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - srcversionhdr = (struct source_version_command *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*srcversionhdr); + require_action(sizeof(*srcversionhdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + srcversionhdr = (struct source_version_command *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*srcversionhdr); - srcversionhdr->cmd = LC_SOURCE_VERSION; - srcversionhdr->cmdsize = (uint32_t) sizeof(*srcversionhdr); - srcversionhdr->version = srcversion->version; + srcversionhdr->cmd = LC_SOURCE_VERSION; + srcversionhdr->cmdsize = (uint32_t) sizeof(*srcversionhdr); + srcversionhdr->version = srcversion->version; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } - diff --git a/libkern/kxld/kxld_srcversion.h b/libkern/kxld/kxld_srcversion.h index b6cdf387b..b74ad0014 100644 --- a/libkern/kxld/kxld_srcversion.h +++ b/libkern/kxld/kxld_srcversion.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_SRCVERSION_H_ @@ -39,13 +39,13 @@ struct source_version_command; typedef struct kxld_srcversion KXLDsrcversion; struct kxld_srcversion { - uint64_t version; - boolean_t has_srcversion; + uint64_t version; + boolean_t has_srcversion; }; /******************************************************************************* - * Constructors and destructors - *******************************************************************************/ +* Constructors and destructors +*******************************************************************************/ void kxld_srcversion_init_from_macho(KXLDsrcversion *srcversion, struct source_version_command *src) __attribute__((nonnull, visibility("hidden"))); @@ -54,15 +54,15 @@ void kxld_srcversion_clear(KXLDsrcversion *srcversion) __attribute__((nonnull, visibility("hidden"))); /******************************************************************************* - * Accessors - *******************************************************************************/ +* Accessors +*******************************************************************************/ u_long kxld_srcversion_get_macho_header_size(void) __attribute__((pure, visibility("hidden"))); kern_return_t -kxld_srcversion_export_macho(const KXLDsrcversion *srcversion, u_char *buf, - u_long *header_offset, u_long header_size) +kxld_srcversion_export_macho(const KXLDsrcversion *srcversion, u_char *buf, + u_long *header_offset, u_long header_size) __attribute__((pure, nonnull, visibility("hidden"))); #endif /* _KXLD_SRCVERSION_H_ */ diff --git a/libkern/kxld/kxld_stubs.c b/libkern/kxld/kxld_stubs.c index 31fcf7b51..e2201eb0c 100644 --- a/libkern/kxld/kxld_stubs.c +++ b/libkern/kxld/kxld_stubs.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,35 +46,35 @@ kxld_create_context(KXLDContext **_context __unused, KXLDFlags flags __unused, cpu_type_t cputype __unused, cpu_subtype_t cpusubtype __unused, vm_size_t pagesize __unused) { - return KERN_SUCCESS; + return KERN_SUCCESS; } void kxld_destroy_context(KXLDContext *context __unused) { - /* Do nothing */ + /* Do nothing */ } kern_return_t kxld_link_file( - KXLDContext * context __unused, - u_char * file __unused, - u_long size __unused, - const char * name __unused, - void * callback_data __unused, - KXLDDependency * dependencies __unused, - u_int ndependencies __unused, - u_char ** linked_object_out __unused, - kxld_addr_t * kmod_info_kern __unused) + KXLDContext * context __unused, + u_char * file __unused, + u_long size __unused, + const char * name __unused, + void * callback_data __unused, + KXLDDependency * dependencies __unused, + u_int ndependencies __unused, + u_char ** linked_object_out __unused, + kxld_addr_t * kmod_info_kern __unused) { - panic("%s (%s) called in kernel without kxld support", __PRETTY_FUNCTION__, name); - return KERN_SUCCESS; + panic("%s (%s) called in kernel without kxld support", __PRETTY_FUNCTION__, name); + return KERN_SUCCESS; } -boolean_t +boolean_t kxld_validate_copyright_string(const char *str __unused) { - return TRUE; + return TRUE; } #endif diff --git a/libkern/kxld/kxld_sym.c b/libkern/kxld/kxld_sym.c index 2da6477ac..bc2ace856 100644 --- a/libkern/kxld/kxld_sym.c +++ b/libkern/kxld/kxld_sym.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -54,44 +54,44 @@ *******************************************************************************/ static kern_return_t init_predicates(KXLDSym *sym, u_char n_type, u_short n_desc) - __attribute__((nonnull)); +__attribute__((nonnull)); static void init_sym_sectnum(KXLDSym *sym, u_int n_sect) - __attribute__((nonnull)); -static kern_return_t extract_inner_string(const char *str, const char *prefix, +__attribute__((nonnull)); +static kern_return_t extract_inner_string(const char *str, const char *prefix, const char *suffix, char *buf, u_long len); #if KXLD_USER_OR_ILP32 /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_init_from_macho32(KXLDSym *sym, char *strtab, const struct nlist *src) +kxld_sym_init_from_macho32(KXLDSym *sym, char *strtab, const struct nlist *src) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(sym); - check(strtab); - check(src); + check(sym); + check(strtab); + check(src); - bzero(sym, sizeof(*sym)); - sym->name = strtab + src->n_un.n_strx; - sym->type = src->n_type; - sym->desc = src->n_desc; - sym->base_addr = src->n_value; - sym->link_addr = sym->base_addr; - - rval = init_predicates(sym, src->n_type, src->n_desc); - require_noerr(rval, finish); + bzero(sym, sizeof(*sym)); + sym->name = strtab + src->n_un.n_strx; + sym->type = src->n_type; + sym->desc = src->n_desc; + sym->base_addr = src->n_value; + sym->link_addr = sym->base_addr; - (void) init_sym_sectnum(sym, src->n_sect); + rval = init_predicates(sym, src->n_type, src->n_desc); + require_noerr(rval, finish); - if (kxld_sym_is_indirect(sym)) { - sym->alias = strtab + src->n_value; - } + (void) init_sym_sectnum(sym, src->n_sect); - rval = KERN_SUCCESS; + if (kxld_sym_is_indirect(sym)) { + sym->alias = strtab + src->n_value; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -99,55 +99,54 @@ finish: /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_init_from_macho64(KXLDSym *sym, char *strtab, const struct nlist_64 *src) +kxld_sym_init_from_macho64(KXLDSym *sym, char *strtab, const struct nlist_64 *src) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; + + check(sym); + check(strtab); + check(src); - check(sym); - check(strtab); - check(src); + bzero(sym, sizeof(*sym)); + sym->name = strtab + src->n_un.n_strx; + sym->type = src->n_type; + sym->desc = src->n_desc; + sym->base_addr = src->n_value; + sym->link_addr = sym->base_addr; - bzero(sym, sizeof(*sym)); - sym->name = strtab + src->n_un.n_strx; - sym->type = src->n_type; - sym->desc = src->n_desc; - sym->base_addr = src->n_value; - sym->link_addr = sym->base_addr; + rval = init_predicates(sym, src->n_type, src->n_desc); + require_noerr(rval, finish); - rval = init_predicates(sym, src->n_type, src->n_desc); - require_noerr(rval, finish); + (void) init_sym_sectnum(sym, src->n_sect); - (void) init_sym_sectnum(sym, src->n_sect); + if (kxld_sym_is_indirect(sym)) { + sym->alias = strtab + src->n_value; + } - if (kxld_sym_is_indirect(sym)) { - sym->alias = strtab + src->n_value; - } - - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ /******************************************************************************* *******************************************************************************/ -void +void kxld_sym_init_absolute(KXLDSym *sym, char *name, kxld_addr_t link_addr) { - check(sym); - check(name); + check(sym); + check(name); - bzero(sym, sizeof(*sym)); + bzero(sym, sizeof(*sym)); - sym->name = name; - sym->link_addr = link_addr; - sym->type = N_ABS | N_EXT; - sym->sectnum = NO_SECT; + sym->name = name; + sym->link_addr = link_addr; + sym->type = N_ABS | N_EXT; + sym->sectnum = NO_SECT; - init_predicates(sym, N_ABS | N_EXT, 0); - sym->is_resolved = TRUE; - + init_predicates(sym, N_ABS | N_EXT, 0); + sym->is_resolved = TRUE; } /******************************************************************************* @@ -155,141 +154,138 @@ kxld_sym_init_absolute(KXLDSym *sym, char *name, kxld_addr_t link_addr) static kern_return_t init_predicates(KXLDSym *sym, u_char n_type, u_short n_desc) { - kern_return_t rval = KERN_FAILURE; - - check(sym); - - /* The type field is interpreted differently for normal symbols and stabs */ - if (n_type & N_STAB) { - sym->is_stab = 1; - - switch (n_type) { - /* Labeled as NO_SECT in stab.h */ - case N_GSYM: - case N_FNAME: - case N_RSYM: - case N_SSYM: - case N_LSYM: - case N_BINCL: - case N_PARAMS: - case N_VERSION: - case N_OLEVEL: - case N_PSYM: - case N_EINCL: - case N_EXCL: - case N_BCOMM: - case N_LENG: - case N_OPT: - case N_OSO: - sym->is_absolute = 1; - break; - /* Labeled as n_sect in stab.h */ - case N_FUN: - case N_STSYM: - case N_LCSYM: - case N_BNSYM: - case N_SLINE: - case N_ENSYM: - case N_SO: - case N_SOL: - case N_ENTRY: - case N_ECOMM: - case N_ECOML: - /* These are labeled as NO_SECT in stab.h, but they are actually - * section-based on OS X. We must mark them as such so they get - * relocated. - */ - case N_RBRAC: - case N_LBRAC: - sym->is_section = 1; - break; - default: - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid N_STAB symbol type: %u.", n_type); - goto finish; - } - - /* Don't care about the C++ predicates for stabs */ - - } else { - u_char type = n_type & N_TYPE; - - /* The first set of type fields are mutually exclusive, so they can be - * set with a switch statement. - */ - switch (type) { - case N_ABS: - sym->is_absolute = 1; - break; - case N_SECT: - sym->is_section = 1; - break; - case N_UNDF: - if (sym->base_addr) { - sym->is_common = 1; - } else { - sym->is_undefined = 1; - } - break; - case N_INDR: - sym->is_indirect = 1; - break; - default: - rval = KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid symbol type: %u.", type); - goto finish; - } - - /* Set the type-independent fields */ - if ((n_type & N_EXT) && !(n_type & N_PEXT)) { - sym->is_external = 1; - } - - if (n_desc & N_DESC_DISCARDED) { - sym->is_obsolete = 1; - } - - if (n_desc & N_WEAK_REF) { - sym->is_weak = 1; - } - - if (n_desc & N_ARM_THUMB_DEF) { - sym->is_thumb = 1; - sym->base_addr |= 1; - sym->link_addr |= 1; - } - - /* Set the C++-specific fields */ - if (!strncmp(CXX_PREFIX, sym->name, const_strlen(CXX_PREFIX))) { - sym->is_cxx = 1; - - if (streq_safe(sym->name, METACLASS_VTABLE_PREFIX, - const_strlen(METACLASS_VTABLE_PREFIX))) - { - sym->is_meta_vtable = 1; - } else if (streq_safe(sym->name, VTABLE_PREFIX, - const_strlen(VTABLE_PREFIX))) - { - sym->is_class_vtable = 1; - } else if (kxld_strstr(sym->name, RESERVED_TOKEN)) { - sym->is_padslot = 1; - } else if (kxld_strstr(sym->name, METACLASS_TOKEN)) { - sym->is_metaclass = 1; - } else if (kxld_strstr(sym->name, SUPER_METACLASS_POINTER_TOKEN)) { - sym->is_super_metaclass_pointer = 1; - } - } else if (kxld_sym_name_is_pure_virtual(sym->name)) { - sym->is_cxx = 1; - sym->is_pure_virtual = 1; - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + + check(sym); + + /* The type field is interpreted differently for normal symbols and stabs */ + if (n_type & N_STAB) { + sym->is_stab = 1; + + switch (n_type) { + /* Labeled as NO_SECT in stab.h */ + case N_GSYM: + case N_FNAME: + case N_RSYM: + case N_SSYM: + case N_LSYM: + case N_BINCL: + case N_PARAMS: + case N_VERSION: + case N_OLEVEL: + case N_PSYM: + case N_EINCL: + case N_EXCL: + case N_BCOMM: + case N_LENG: + case N_OPT: + case N_OSO: + sym->is_absolute = 1; + break; + /* Labeled as n_sect in stab.h */ + case N_FUN: + case N_STSYM: + case N_LCSYM: + case N_BNSYM: + case N_SLINE: + case N_ENSYM: + case N_SO: + case N_SOL: + case N_ENTRY: + case N_ECOMM: + case N_ECOML: + /* These are labeled as NO_SECT in stab.h, but they are actually + * section-based on OS X. We must mark them as such so they get + * relocated. + */ + case N_RBRAC: + case N_LBRAC: + sym->is_section = 1; + break; + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid N_STAB symbol type: %u.", n_type); + goto finish; + } + + /* Don't care about the C++ predicates for stabs */ + } else { + u_char type = n_type & N_TYPE; + + /* The first set of type fields are mutually exclusive, so they can be + * set with a switch statement. + */ + switch (type) { + case N_ABS: + sym->is_absolute = 1; + break; + case N_SECT: + sym->is_section = 1; + break; + case N_UNDF: + if (sym->base_addr) { + sym->is_common = 1; + } else { + sym->is_undefined = 1; + } + break; + case N_INDR: + sym->is_indirect = 1; + break; + default: + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid symbol type: %u.", type); + goto finish; + } + + /* Set the type-independent fields */ + if ((n_type & N_EXT) && !(n_type & N_PEXT)) { + sym->is_external = 1; + } + + if (n_desc & N_DESC_DISCARDED) { + sym->is_obsolete = 1; + } + + if (n_desc & N_WEAK_REF) { + sym->is_weak = 1; + } + + if (n_desc & N_ARM_THUMB_DEF) { + sym->is_thumb = 1; + sym->base_addr |= 1; + sym->link_addr |= 1; + } + + /* Set the C++-specific fields */ + if (!strncmp(CXX_PREFIX, sym->name, const_strlen(CXX_PREFIX))) { + sym->is_cxx = 1; + + if (streq_safe(sym->name, METACLASS_VTABLE_PREFIX, + const_strlen(METACLASS_VTABLE_PREFIX))) { + sym->is_meta_vtable = 1; + } else if (streq_safe(sym->name, VTABLE_PREFIX, + const_strlen(VTABLE_PREFIX))) { + sym->is_class_vtable = 1; + } else if (kxld_strstr(sym->name, RESERVED_TOKEN)) { + sym->is_padslot = 1; + } else if (kxld_strstr(sym->name, METACLASS_TOKEN)) { + sym->is_metaclass = 1; + } else if (kxld_strstr(sym->name, SUPER_METACLASS_POINTER_TOKEN)) { + sym->is_super_metaclass_pointer = 1; + } + } else if (kxld_sym_name_is_pure_virtual(sym->name)) { + sym->is_cxx = 1; + sym->is_pure_virtual = 1; + } + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -297,22 +293,21 @@ finish: static void init_sym_sectnum(KXLDSym *sym, u_int n_sect) { - /* The n_sect field is set to 0 when the symbol is not section-based, and - * the number of the section in which the symbol exists otherwise. - * Sometimes, symbols can be labeled as section-based, so we make sure that - * they have a valid section number, and set them as absolute if they don't. - */ - - if (kxld_sym_is_section(sym)) { - if (n_sect) { - /* Convert the section number to an index into the section index */ - sym->sectnum = n_sect - 1; - } else { - sym->is_absolute = 1; - sym->is_section = 0; - } - } - + /* The n_sect field is set to 0 when the symbol is not section-based, and + * the number of the section in which the symbol exists otherwise. + * Sometimes, symbols can be labeled as section-based, so we make sure that + * they have a valid section number, and set them as absolute if they don't. + */ + + if (kxld_sym_is_section(sym)) { + if (n_sect) { + /* Convert the section number to an index into the section index */ + sym->sectnum = n_sect - 1; + } else { + sym->is_absolute = 1; + sym->is_section = 0; + } + } } /******************************************************************************* @@ -320,7 +315,7 @@ init_sym_sectnum(KXLDSym *sym, u_int n_sect) void kxld_sym_deinit(KXLDSym *sym __unused) { - check(sym); + check(sym); } /******************************************************************************* @@ -328,9 +323,9 @@ kxld_sym_deinit(KXLDSym *sym __unused) void kxld_sym_destroy(KXLDSym *sym) { - check(sym); - kxld_sym_deinit(sym); - kxld_free(sym, sizeof(*sym)); + check(sym); + kxld_sym_deinit(sym); + kxld_free(sym, sizeof(*sym)); } @@ -339,9 +334,9 @@ kxld_sym_destroy(KXLDSym *sym) boolean_t kxld_sym_is_absolute(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_absolute); + return 0 != sym->is_absolute; } /******************************************************************************* @@ -349,9 +344,9 @@ kxld_sym_is_absolute(const KXLDSym *sym) boolean_t kxld_sym_is_section(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_section); + return 0 != sym->is_section; } /******************************************************************************* @@ -359,10 +354,10 @@ kxld_sym_is_section(const KXLDSym *sym) boolean_t kxld_sym_is_defined(const KXLDSym *sym) { - check(sym); + check(sym); - return ((kxld_sym_is_absolute(sym) || kxld_sym_is_section(sym)) && - !kxld_sym_is_replaced(sym)); + return (kxld_sym_is_absolute(sym) || kxld_sym_is_section(sym)) && + !kxld_sym_is_replaced(sym); } @@ -371,9 +366,9 @@ kxld_sym_is_defined(const KXLDSym *sym) boolean_t kxld_sym_is_defined_locally(const KXLDSym *sym) { - check(sym); + check(sym); - return (kxld_sym_is_defined(sym) && !sym->is_resolved); + return kxld_sym_is_defined(sym) && !sym->is_resolved; } /******************************************************************************* @@ -381,9 +376,9 @@ kxld_sym_is_defined_locally(const KXLDSym *sym) boolean_t kxld_sym_is_external(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_external); + return 0 != sym->is_external; } /******************************************************************************* @@ -391,9 +386,9 @@ kxld_sym_is_external(const KXLDSym *sym) boolean_t kxld_sym_is_exported(const KXLDSym *sym) { - check(sym); + check(sym); - return (kxld_sym_is_defined_locally(sym) && kxld_sym_is_external(sym)); + return kxld_sym_is_defined_locally(sym) && kxld_sym_is_external(sym); } /******************************************************************************* @@ -401,9 +396,9 @@ kxld_sym_is_exported(const KXLDSym *sym) boolean_t kxld_sym_is_undefined(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_undefined); + return 0 != sym->is_undefined; } /******************************************************************************* @@ -411,9 +406,9 @@ kxld_sym_is_undefined(const KXLDSym *sym) boolean_t kxld_sym_is_indirect(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_indirect); + return 0 != sym->is_indirect; } /******************************************************************************* @@ -421,9 +416,9 @@ kxld_sym_is_indirect(const KXLDSym *sym) boolean_t kxld_sym_is_replaced(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_replaced); + return 0 != sym->is_replaced; } /******************************************************************************* @@ -431,9 +426,9 @@ kxld_sym_is_replaced(const KXLDSym *sym) boolean_t kxld_sym_is_common(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_common); + return 0 != sym->is_common; } /******************************************************************************* @@ -441,8 +436,8 @@ kxld_sym_is_common(const KXLDSym *sym) boolean_t kxld_sym_is_unresolved(const KXLDSym *sym) { - return ((kxld_sym_is_undefined(sym) && !kxld_sym_is_replaced(sym)) || - kxld_sym_is_indirect(sym) || kxld_sym_is_common(sym)); + return (kxld_sym_is_undefined(sym) && !kxld_sym_is_replaced(sym)) || + kxld_sym_is_indirect(sym) || kxld_sym_is_common(sym); } /******************************************************************************* @@ -450,7 +445,7 @@ kxld_sym_is_unresolved(const KXLDSym *sym) boolean_t kxld_sym_is_obsolete(const KXLDSym *sym) { - return (0 != sym->is_obsolete); + return 0 != sym->is_obsolete; } #if KXLD_USER_OR_GOT @@ -459,9 +454,9 @@ kxld_sym_is_obsolete(const KXLDSym *sym) boolean_t kxld_sym_is_got(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_got); + return 0 != sym->is_got; } #endif /* KXLD_USER_OR_GOT */ @@ -470,9 +465,9 @@ kxld_sym_is_got(const KXLDSym *sym) boolean_t kxld_sym_is_stab(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_stab); + return 0 != sym->is_stab; } /******************************************************************************* @@ -480,9 +475,9 @@ kxld_sym_is_stab(const KXLDSym *sym) boolean_t kxld_sym_is_weak(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_weak); + return 0 != sym->is_weak; } /******************************************************************************* @@ -490,9 +485,9 @@ kxld_sym_is_weak(const KXLDSym *sym) boolean_t kxld_sym_is_cxx(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_cxx); + return 0 != sym->is_cxx; } /******************************************************************************* @@ -500,7 +495,7 @@ kxld_sym_is_cxx(const KXLDSym *sym) boolean_t kxld_sym_is_pure_virtual(const KXLDSym *sym) { - return (0 != sym->is_pure_virtual); + return 0 != sym->is_pure_virtual; } /******************************************************************************* @@ -508,9 +503,9 @@ kxld_sym_is_pure_virtual(const KXLDSym *sym) boolean_t kxld_sym_is_vtable(const KXLDSym *sym) { - check(sym); + check(sym); - return kxld_sym_is_class_vtable(sym) || kxld_sym_is_metaclass_vtable(sym); + return kxld_sym_is_class_vtable(sym) || kxld_sym_is_metaclass_vtable(sym); } /******************************************************************************* @@ -518,9 +513,9 @@ kxld_sym_is_vtable(const KXLDSym *sym) boolean_t kxld_sym_is_class_vtable(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_class_vtable); + return 0 != sym->is_class_vtable; } /******************************************************************************* @@ -528,9 +523,9 @@ kxld_sym_is_class_vtable(const KXLDSym *sym) boolean_t kxld_sym_is_metaclass_vtable(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_meta_vtable); + return 0 != sym->is_meta_vtable; } /******************************************************************************* @@ -538,9 +533,9 @@ kxld_sym_is_metaclass_vtable(const KXLDSym *sym) boolean_t kxld_sym_is_padslot(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_padslot); + return 0 != sym->is_padslot; } /******************************************************************************* @@ -548,9 +543,9 @@ kxld_sym_is_padslot(const KXLDSym *sym) boolean_t kxld_sym_is_metaclass(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_metaclass); + return 0 != sym->is_metaclass; } /******************************************************************************* @@ -558,9 +553,9 @@ kxld_sym_is_metaclass(const KXLDSym *sym) boolean_t kxld_sym_is_super_metaclass_pointer(const KXLDSym *sym) { - check(sym); + check(sym); - return (0 != sym->is_super_metaclass_pointer); + return 0 != sym->is_super_metaclass_pointer; } /******************************************************************************* @@ -568,7 +563,7 @@ kxld_sym_is_super_metaclass_pointer(const KXLDSym *sym) boolean_t kxld_sym_name_is_pure_virtual(const char *name) { - return streq_safe(CXX_PURE_VIRTUAL, name, sizeof(CXX_PURE_VIRTUAL)); + return streq_safe(CXX_PURE_VIRTUAL, name, sizeof(CXX_PURE_VIRTUAL)); } /******************************************************************************* @@ -576,9 +571,9 @@ kxld_sym_name_is_pure_virtual(const char *name) boolean_t kxld_sym_name_is_padslot(const char *name) { - check(name); + check(name); - return (kxld_strstr(name, RESERVED_TOKEN) != 0); + return kxld_strstr(name, RESERVED_TOKEN) != 0; } /******************************************************************************* @@ -586,9 +581,9 @@ kxld_sym_name_is_padslot(const char *name) u_int kxld_sym_get_section_offset(const KXLDSym *sym, const KXLDSect *sect) { - check(sym); + check(sym); - return (u_int) (sym->base_addr - sect->base_addr); + return (u_int) (sym->base_addr - sect->base_addr); } #if KXLD_USER_OR_COMMON @@ -597,7 +592,7 @@ kxld_sym_get_section_offset(const KXLDSym *sym, const KXLDSect *sect) kxld_size_t kxld_sym_get_common_size(const KXLDSym *sym) { - return sym->base_addr; + return sym->base_addr; } /******************************************************************************* @@ -605,10 +600,12 @@ kxld_sym_get_common_size(const KXLDSym *sym) u_int kxld_sym_get_common_align(const KXLDSym *sym) { - u_int align = GET_COMM_ALIGN(sym->desc); - if (!align) align = 3; + u_int align = GET_COMM_ALIGN(sym->desc); + if (!align) { + align = 3; + } - return align; + return align; } #endif /* KXLD_USER_OR_COMMON */ @@ -618,18 +615,18 @@ kern_return_t kxld_sym_get_class_name_from_metaclass(const KXLDSym *sym, char class_name[], u_long class_name_len) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(sym); - require_action(kxld_sym_is_metaclass(sym), finish, rval=KERN_FAILURE); + check(sym); + require_action(kxld_sym_is_metaclass(sym), finish, rval = KERN_FAILURE); - rval = extract_inner_string(sym->name, OSOBJ_PREFIX, METACLASS_TOKEN, - class_name, class_name_len); - require_noerr(rval, finish); + rval = extract_inner_string(sym->name, OSOBJ_PREFIX, METACLASS_TOKEN, + class_name, class_name_len); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -638,19 +635,19 @@ kern_return_t kxld_sym_get_class_name_from_super_metaclass_pointer(const KXLDSym *sym, char class_name[], u_long class_name_len) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(sym); - require_action(kxld_sym_is_super_metaclass_pointer(sym), finish, - rval=KERN_FAILURE); + check(sym); + require_action(kxld_sym_is_super_metaclass_pointer(sym), finish, + rval = KERN_FAILURE); - rval = extract_inner_string(sym->name, OSOBJ_PREFIX, - SUPER_METACLASS_POINTER_TOKEN, class_name, class_name_len); - require_noerr(rval, finish); + rval = extract_inner_string(sym->name, OSOBJ_PREFIX, + SUPER_METACLASS_POINTER_TOKEN, class_name, class_name_len); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -659,124 +656,124 @@ kern_return_t kxld_sym_get_class_name_from_vtable(const KXLDSym *sym, char class_name[], u_long class_name_len) { - kern_return_t rval = KERN_FAILURE; - - check(sym); - require_action(kxld_sym_is_class_vtable(sym), finish, rval=KERN_FAILURE); + kern_return_t rval = KERN_FAILURE; + + check(sym); + require_action(kxld_sym_is_class_vtable(sym), finish, rval = KERN_FAILURE); + + rval = kxld_sym_get_class_name_from_vtable_name(sym->name, + class_name, class_name_len); + require_noerr(rval, finish); - rval = kxld_sym_get_class_name_from_vtable_name(sym->name, - class_name, class_name_len); - require_noerr(rval, finish); - - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_sym_get_class_name_from_vtable_name(const char *vtable_name, char class_name[], u_long class_name_len) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(vtable_name); + check(vtable_name); - rval = extract_inner_string(vtable_name, VTABLE_PREFIX, NULL, - class_name, class_name_len); - require_noerr(rval, finish); + rval = extract_inner_string(vtable_name, VTABLE_PREFIX, NULL, + class_name, class_name_len); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_get_vtable_name_from_class_name(const char *class_name, +kxld_sym_get_vtable_name_from_class_name(const char *class_name, char vtable_name[], u_long vtable_name_len) { - kern_return_t rval = KERN_FAILURE; - u_long outlen = 0; + kern_return_t rval = KERN_FAILURE; + u_long outlen = 0; - check(class_name); - check(vtable_name); + check(class_name); + check(vtable_name); - outlen = strlcpy(vtable_name, VTABLE_PREFIX, vtable_name_len); - require_action(outlen < vtable_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcpy(vtable_name, VTABLE_PREFIX, vtable_name_len); + require_action(outlen < vtable_name_len, finish, + rval = KERN_FAILURE); - outlen = strlcat(vtable_name, class_name, vtable_name_len); - require_action(outlen < vtable_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcat(vtable_name, class_name, vtable_name_len); + require_action(outlen < vtable_name_len, finish, + rval = KERN_FAILURE); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_get_meta_vtable_name_from_class_name(const char *class_name, +kxld_sym_get_meta_vtable_name_from_class_name(const char *class_name, char meta_vtable_name[], u_long meta_vtable_name_len) { - kern_return_t rval = KERN_FAILURE; - u_long outlen = 0; + kern_return_t rval = KERN_FAILURE; + u_long outlen = 0; - check(class_name); - check(meta_vtable_name); + check(class_name); + check(meta_vtable_name); - outlen = strlcpy(meta_vtable_name, METACLASS_VTABLE_PREFIX, - meta_vtable_name_len); - require_action(outlen < meta_vtable_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcpy(meta_vtable_name, METACLASS_VTABLE_PREFIX, + meta_vtable_name_len); + require_action(outlen < meta_vtable_name_len, finish, + rval = KERN_FAILURE); - outlen = strlcat(meta_vtable_name, class_name, meta_vtable_name_len); - require_action(outlen < meta_vtable_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcat(meta_vtable_name, class_name, meta_vtable_name_len); + require_action(outlen < meta_vtable_name_len, finish, + rval = KERN_FAILURE); - outlen = strlcat(meta_vtable_name, METACLASS_VTABLE_SUFFIX, - meta_vtable_name_len); - require_action(outlen < meta_vtable_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcat(meta_vtable_name, METACLASS_VTABLE_SUFFIX, + meta_vtable_name_len); + require_action(outlen < meta_vtable_name_len, finish, + rval = KERN_FAILURE); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_get_final_sym_name_from_class_name(const char *class_name, +kxld_sym_get_final_sym_name_from_class_name(const char *class_name, char final_sym_name[], u_long final_sym_name_len) { - kern_return_t rval = KERN_FAILURE; - u_long outlen = 0; + kern_return_t rval = KERN_FAILURE; + u_long outlen = 0; - check(class_name); - check(final_sym_name); + check(class_name); + check(final_sym_name); - outlen = strlcpy(final_sym_name, OSOBJ_PREFIX, final_sym_name_len); - require_action(outlen < final_sym_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcpy(final_sym_name, OSOBJ_PREFIX, final_sym_name_len); + require_action(outlen < final_sym_name_len, finish, + rval = KERN_FAILURE); - outlen = strlcat(final_sym_name, class_name, final_sym_name_len); - require_action(outlen < final_sym_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcat(final_sym_name, class_name, final_sym_name_len); + require_action(outlen < final_sym_name_len, finish, + rval = KERN_FAILURE); - outlen = strlcat(final_sym_name, FINAL_CLASS_TOKEN, final_sym_name_len); - require_action(outlen < final_sym_name_len, finish, - rval=KERN_FAILURE); + outlen = strlcat(final_sym_name, FINAL_CLASS_TOKEN, final_sym_name_len); + require_action(outlen < final_sym_name_len, finish, + rval = KERN_FAILURE); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -785,47 +782,47 @@ u_long kxld_sym_get_function_prefix_from_class_name(const char *class_name, char function_prefix[], u_long function_prefix_len) { - u_long rval = 0; - u_long outlen = 0; + u_long rval = 0; + u_long outlen = 0; - check(class_name); - check(function_prefix); + check(class_name); + check(function_prefix); - outlen = strlcpy(function_prefix, OSOBJ_PREFIX, function_prefix_len); - require(outlen < function_prefix_len, finish); + outlen = strlcpy(function_prefix, OSOBJ_PREFIX, function_prefix_len); + require(outlen < function_prefix_len, finish); - outlen = strlcat(function_prefix, class_name, function_prefix_len); - require(outlen < function_prefix_len, finish); + outlen = strlcat(function_prefix, class_name, function_prefix_len); + require(outlen < function_prefix_len, finish); - rval = outlen; + rval = outlen; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ static kern_return_t -extract_inner_string(const char *str, const char *prefix, const char *suffix, +extract_inner_string(const char *str, const char *prefix, const char *suffix, char *buf, u_long len) { - kern_return_t rval = KERN_FAILURE; - u_long prelen = 0, suflen = 0, striplen = 0; + kern_return_t rval = KERN_FAILURE; + u_long prelen = 0, suflen = 0, striplen = 0; - check(str); - check(buf); + check(str); + check(buf); - prelen = (prefix) ? strlen(prefix) : 0; - suflen = (suffix) ? strlen(suffix) : 0; - striplen = strlen(str) - prelen - suflen; + prelen = (prefix) ? strlen(prefix) : 0; + suflen = (suffix) ? strlen(suffix) : 0; + striplen = strlen(str) - prelen - suflen; - require_action(striplen < len, finish, rval=KERN_FAILURE); + require_action(striplen < len, finish, rval = KERN_FAILURE); - strncpy(buf, str + prelen, striplen); - buf[striplen] = '\0'; + strncpy(buf, str + prelen, striplen); + buf[striplen] = '\0'; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_GOT @@ -834,7 +831,7 @@ finish: void kxld_sym_set_got(KXLDSym *sym) { - sym->is_got = 1; + sym->is_got = 1; } #endif /* KXLD_USER_OR_GOT */ @@ -843,50 +840,50 @@ kxld_sym_set_got(KXLDSym *sym) void kxld_sym_relocate(KXLDSym *sym, const KXLDSect *sect) { - if (kxld_sym_is_section(sym)) { - sym->link_addr = sym->base_addr - sect->base_addr + sect->link_addr; - sym->relocated_sectnum = sect->sectnum; - } + if (kxld_sym_is_section(sym)) { + sym->link_addr = sym->base_addr - sect->base_addr + sect->link_addr; + sym->relocated_sectnum = sect->sectnum; + } } #if KXLD_USER_OR_ILP32 /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_export_macho_32(const KXLDSym *sym, u_char *_nl, char *strtab, +kxld_sym_export_macho_32(const KXLDSym *sym, u_char *_nl, char *strtab, u_long *stroff, u_long strsize) { - kern_return_t rval = KERN_FAILURE; - struct nlist *nl = (struct nlist *) ((void *) _nl); - char *str = NULL; - long bytes = 0; - - check(sym); - check(nl); - check(strtab); - check(stroff); - - bytes = strlen(sym->name) + 1; - require_action((u_long)bytes <= strsize - *stroff, finish, - rval = KERN_FAILURE); - - nl->n_type = sym->type; - nl->n_sect = (kxld_sym_is_section(sym)) ? sym->relocated_sectnum + 1 : 0; - nl->n_desc = sym->desc; - nl->n_un.n_strx = (uint32_t) *stroff; - nl->n_value = (uint32_t) sym->link_addr; - if (sym->is_thumb) { - nl->n_value &= ~0x1U; - } - - str = (char *) (strtab + *stroff); - strlcpy(str, sym->name, strsize - *stroff); - - *stroff += bytes; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + struct nlist *nl = (struct nlist *) ((void *) _nl); + char *str = NULL; + long bytes = 0; + + check(sym); + check(nl); + check(strtab); + check(stroff); + + bytes = strlen(sym->name) + 1; + require_action((u_long)bytes <= strsize - *stroff, finish, + rval = KERN_FAILURE); + + nl->n_type = sym->type; + nl->n_sect = (kxld_sym_is_section(sym)) ? sym->relocated_sectnum + 1 : 0; + nl->n_desc = sym->desc; + nl->n_un.n_strx = (uint32_t) *stroff; + nl->n_value = (uint32_t) sym->link_addr; + if (sym->is_thumb) { + nl->n_value &= ~0x1U; + } + + str = (char *) (strtab + *stroff); + strlcpy(str, sym->name, strsize - *stroff); + + *stroff += bytes; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -897,76 +894,76 @@ kern_return_t kxld_sym_export_macho_64(const KXLDSym *sym, u_char *_nl, char *strtab, u_long *stroff, u_long strsize) { - kern_return_t rval = KERN_FAILURE; - struct nlist_64 *nl = (struct nlist_64 *) ((void *) _nl); - char *str = NULL; - long bytes = 0; + kern_return_t rval = KERN_FAILURE; + struct nlist_64 *nl = (struct nlist_64 *) ((void *) _nl); + char *str = NULL; + long bytes = 0; - check(sym); - check(nl); - check(strtab); - check(stroff); + check(sym); + check(nl); + check(strtab); + check(stroff); - bytes = strlen(sym->name) + 1; - require_action((u_long)bytes <= strsize - *stroff, finish, - rval = KERN_FAILURE); + bytes = strlen(sym->name) + 1; + require_action((u_long)bytes <= strsize - *stroff, finish, + rval = KERN_FAILURE); - nl->n_type = sym->type; - nl->n_sect = (kxld_sym_is_section(sym)) ? sym->relocated_sectnum + 1 : 0; - nl->n_desc = sym->desc; - nl->n_un.n_strx = (uint32_t) *stroff; - nl->n_value = (uint64_t) sym->link_addr; - if (sym->is_thumb) { - nl->n_value &= ~0x1ULL; - } + nl->n_type = sym->type; + nl->n_sect = (kxld_sym_is_section(sym)) ? sym->relocated_sectnum + 1 : 0; + nl->n_desc = sym->desc; + nl->n_un.n_strx = (uint32_t) *stroff; + nl->n_value = (uint64_t) sym->link_addr; + if (sym->is_thumb) { + nl->n_value &= ~0x1ULL; + } - str = (char *) (strtab + *stroff); + str = (char *) (strtab + *stroff); - strlcpy(str, sym->name, strsize - *stroff); + strlcpy(str, sym->name, strsize - *stroff); - *stroff += bytes; + *stroff += bytes; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_sym_resolve(KXLDSym *sym, kxld_addr_t addr) +kxld_sym_resolve(KXLDSym *sym, kxld_addr_t addr) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; + + check(sym); + + require_action(kxld_sym_is_undefined(sym) || kxld_sym_is_indirect(sym), + finish, rval = KERN_FAILURE); - check(sym); + /* Set the n_list data types */ - require_action(kxld_sym_is_undefined(sym) || kxld_sym_is_indirect(sym), - finish, rval=KERN_FAILURE); + sym->link_addr = addr; + sym->type = N_ABS | N_EXT; + sym->sectnum = NO_SECT; - /* Set the n_list data types */ + /* Set the predicate bits for an externally resolved symbol. */ - sym->link_addr = addr; - sym->type = N_ABS | N_EXT; - sym->sectnum = NO_SECT; - - /* Set the predicate bits for an externally resolved symbol. */ - - sym->is_external = TRUE; - sym->is_absolute = TRUE; - sym->is_resolved = TRUE; + sym->is_external = TRUE; + sym->is_absolute = TRUE; + sym->is_resolved = TRUE; - /* Clear the predicate bits for types that can be resolved */ + /* Clear the predicate bits for types that can be resolved */ - sym->is_undefined = FALSE; - sym->is_indirect = FALSE; + sym->is_undefined = FALSE; + sym->is_indirect = FALSE; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_COMMON @@ -975,31 +972,31 @@ finish: kern_return_t kxld_sym_resolve_common(KXLDSym *sym, u_int sectnum, kxld_addr_t base_addr) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - check(sym); + check(sym); - require_action(kxld_sym_is_common(sym), finish, - rval=KERN_FAILURE); + require_action(kxld_sym_is_common(sym), finish, + rval = KERN_FAILURE); - sym->base_addr = base_addr; - sym->link_addr = base_addr; - sym->type = N_SECT | N_EXT; - sym->sectnum = sectnum; - sym->desc = 0; + sym->base_addr = base_addr; + sym->link_addr = base_addr; + sym->type = N_SECT | N_EXT; + sym->sectnum = sectnum; + sym->desc = 0; - sym->is_absolute = FALSE; - sym->is_section = TRUE; - sym->is_undefined = FALSE; - sym->is_indirect = FALSE; - sym->is_common = FALSE; - sym->is_external = TRUE; + sym->is_absolute = FALSE; + sym->is_section = TRUE; + sym->is_undefined = FALSE; + sym->is_indirect = FALSE; + sym->is_common = FALSE; + sym->is_external = TRUE; - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_COMMON */ @@ -1008,10 +1005,10 @@ finish: void kxld_sym_delete(KXLDSym *sym) { - check(sym); + check(sym); - bzero(sym, sizeof(*sym)); - sym->is_replaced = TRUE; + bzero(sym, sizeof(*sym)); + sym->is_replaced = TRUE; } @@ -1020,9 +1017,9 @@ kxld_sym_delete(KXLDSym *sym) void kxld_sym_patch(KXLDSym *sym) { - check(sym); + check(sym); - sym->is_replaced = TRUE; + sym->is_replaced = TRUE; } /******************************************************************************* @@ -1030,8 +1027,8 @@ kxld_sym_patch(KXLDSym *sym) void kxld_sym_mark_private(KXLDSym *sym) { - check(sym); + check(sym); - sym->type |= N_PEXT; - sym->is_external = FALSE; + sym->type |= N_PEXT; + sym->is_external = FALSE; } diff --git a/libkern/kxld/kxld_sym.h b/libkern/kxld/kxld_sym.h index 81fe4a4ab..2019aeec0 100644 --- a/libkern/kxld/kxld_sym.h +++ b/libkern/kxld/kxld_sym.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_SYMBOL_H_ @@ -42,36 +42,36 @@ typedef struct kxld_sym KXLDSym; typedef boolean_t (*KXLDSymPredicateTest)(const KXLDSym *sym); struct kxld_sym { - char *name; // The symbol's name - char *alias; // The indirect symbol's alias name - kxld_addr_t base_addr; // The symbol's base address - kxld_addr_t link_addr; // The relocated address - kxld_addr_t got_addr; // The address of this symbol's GOT entry - uint16_t desc; - uint8_t type; - uint8_t sectnum; // The symbol's section number - uint8_t relocated_sectnum; - u_int is_absolute:1, // Set for absolute symbols - is_section:1, // Set for section symbols - is_undefined:1, // Set for undefined symbols - is_indirect:1, // Set for indirect symbols - is_common:1, // Set for common symbols - is_external:1, // Set for external symbols - is_stab:1, // Set for stab symbols - is_weak:1, // Set for weak definition symbols - is_resolved:1, // For symbols that have been resolved - // externally and should not be exported - is_obsolete:1, // For symbols marked as obsolete - is_replaced:1, // Set for symbols replaced by patching - is_got:1, // Has an entry in the GOT - is_cxx:1, // Set for C++ symbols - is_pure_virtual:1, // Set for pure virtual symbols - is_class_vtable:1, // Set for vtable symbols of classes - is_meta_vtable:1, // Set for vtable symbols of MetaClasses - is_padslot:1, // Set for pad slot symbols - is_metaclass:1, // Set for metaclass symbols - is_super_metaclass_pointer:1, // Set for super metaclass pointer syms - is_thumb:1; // Set for thumb symbols (ARM only) + char *name; // The symbol's name + char *alias; // The indirect symbol's alias name + kxld_addr_t base_addr; // The symbol's base address + kxld_addr_t link_addr; // The relocated address + kxld_addr_t got_addr; // The address of this symbol's GOT entry + uint16_t desc; + uint8_t type; + uint8_t sectnum; // The symbol's section number + uint8_t relocated_sectnum; + u_int is_absolute:1, // Set for absolute symbols + is_section:1, // Set for section symbols + is_undefined:1, // Set for undefined symbols + is_indirect:1, // Set for indirect symbols + is_common:1, // Set for common symbols + is_external:1, // Set for external symbols + is_stab:1, // Set for stab symbols + is_weak:1, // Set for weak definition symbols + is_resolved:1, // For symbols that have been resolved + // externally and should not be exported + is_obsolete:1, // For symbols marked as obsolete + is_replaced:1, // Set for symbols replaced by patching + is_got:1, // Has an entry in the GOT + is_cxx:1, // Set for C++ symbols + is_pure_virtual:1, // Set for pure virtual symbols + is_class_vtable:1, // Set for vtable symbols of classes + is_meta_vtable:1, // Set for vtable symbols of MetaClasses + is_padslot:1, // Set for pad slot symbols + is_metaclass:1, // Set for metaclass symbols + is_super_metaclass_pointer:1, // Set for super metaclass pointer syms + is_thumb:1; // Set for thumb symbols (ARM only) }; /******************************************************************************* @@ -79,162 +79,162 @@ struct kxld_sym { *******************************************************************************/ #if KXLD_USER_OR_ILP32 -kern_return_t kxld_sym_init_from_macho32(KXLDSym *sym, char *strtab, +kern_return_t kxld_sym_init_from_macho32(KXLDSym *sym, char *strtab, const struct nlist *src) __attribute__((nonnull, visibility("hidden"))); #endif #if KXLD_USER_OR_LP64 -kern_return_t kxld_sym_init_from_macho64(KXLDSym *sym, char *strtab, +kern_return_t kxld_sym_init_from_macho64(KXLDSym *sym, char *strtab, const struct nlist_64 *src) __attribute__((nonnull, visibility("hidden"))); #endif void kxld_sym_init_absolute(KXLDSym *sym, char *name, kxld_addr_t link_addr) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_sym_deinit(KXLDSym *sym) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_sym_destroy(KXLDSym *sym) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ boolean_t kxld_sym_is_absolute(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); -boolean_t kxld_sym_is_section(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +boolean_t kxld_sym_is_section(const KXLDSym *sym) +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_defined(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_defined_locally(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_external(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_exported(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_undefined(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_indirect(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_replaced(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /* We don't wrap this in KXLD_USER_OR_COMMON because even though common symbols * aren't always supported, we always need to be able to detect them. */ boolean_t kxld_sym_is_common(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_unresolved(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_obsolete(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #if KXLD_USER_OR_GOT boolean_t kxld_sym_is_got(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_GOT */ boolean_t kxld_sym_is_stab(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_weak(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_cxx(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_pure_virtual(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_vtable(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_class_vtable(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_metaclass_vtable(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_padslot(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_metaclass(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_is_super_metaclass_pointer(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_name_is_pure_virtual(const char *name) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); boolean_t kxld_sym_name_is_padslot(const char *name) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); -u_int kxld_sym_get_section_offset(const KXLDSym *sym, +u_int kxld_sym_get_section_offset(const KXLDSym *sym, const struct kxld_sect *sect) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #if KXLD_USER_OR_COMMON kxld_size_t kxld_sym_get_common_size(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); u_int kxld_sym_get_common_align(const KXLDSym *sym) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_COMMON */ kern_return_t kxld_sym_get_class_name_from_metaclass(const KXLDSym *sym, char class_name[], u_long class_name_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_sym_get_class_name_from_super_metaclass_pointer( - const KXLDSym *sym, char class_name[], u_long class_name_len) - __attribute__((nonnull, visibility("hidden"))); + const KXLDSym *sym, char class_name[], u_long class_name_len) +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_sym_get_class_name_from_vtable(const KXLDSym *sym, char class_name[], u_long class_name_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_sym_get_class_name_from_vtable_name(const char *vtable_name, char class_name[], u_long class_name_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -kern_return_t kxld_sym_get_vtable_name_from_class_name(const char *class_name, +kern_return_t kxld_sym_get_vtable_name_from_class_name(const char *class_name, char vtable_name[], u_long vtable_name_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -kern_return_t kxld_sym_get_meta_vtable_name_from_class_name(const char *class_name, +kern_return_t kxld_sym_get_meta_vtable_name_from_class_name(const char *class_name, char meta_vtable_name[], u_long meta_vtable_name_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); -kern_return_t kxld_sym_get_final_sym_name_from_class_name(const char *class_name, +kern_return_t kxld_sym_get_final_sym_name_from_class_name(const char *class_name, char final_sym_name[], u_long final_sym_name_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); u_long kxld_sym_get_function_prefix_from_class_name(const char *class_name, char function_prefix[], u_long function_prefix_len) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #if KXLD_USER_OR_ILP32 -kern_return_t kxld_sym_export_macho_32(const KXLDSym *sym, u_char *nl, +kern_return_t kxld_sym_export_macho_32(const KXLDSym *sym, u_char *nl, char *strtab, u_long *stroff, u_long strsize) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif #if KXLD_USER_OR_LP64 -kern_return_t kxld_sym_export_macho_64(const KXLDSym *sym, u_char *nl, +kern_return_t kxld_sym_export_macho_64(const KXLDSym *sym, u_char *nl, char *strtab, u_long *stroff, u_long strsize) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /******************************************************************************* @@ -242,30 +242,29 @@ kern_return_t kxld_sym_export_macho_64(const KXLDSym *sym, u_char *nl, *******************************************************************************/ void kxld_sym_relocate(KXLDSym *sym, const struct kxld_sect *sect) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #if KXLD_USER_OR_GOT void kxld_sym_set_got(KXLDSym *sym) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_GOT */ kern_return_t kxld_sym_resolve(KXLDSym *sym, const kxld_addr_t addr) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #if KXLD_USER_OR_COMMON -kern_return_t kxld_sym_resolve_common(KXLDSym *sym, u_int sectnum, +kern_return_t kxld_sym_resolve_common(KXLDSym *sym, u_int sectnum, kxld_addr_t base_addr) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* KXLD_USER_OR_COMMON */ void kxld_sym_delete(KXLDSym *sym) - __attribute__((nonnull, visibility("hidden"))); - +__attribute__((nonnull, visibility("hidden"))); + void kxld_sym_patch(KXLDSym *sym) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_sym_mark_private(KXLDSym *sym) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_SYMBOL_H_ */ - diff --git a/libkern/kxld/kxld_symtab.c b/libkern/kxld/kxld_symtab.c index 53bb6762b..4b081469d 100644 --- a/libkern/kxld/kxld_symtab.c +++ b/libkern/kxld/kxld_symtab.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -42,13 +42,13 @@ #include "kxld_util.h" struct kxld_symtab { - KXLDArray syms; - KXLDDict cxx_index; - KXLDDict name_index; - char *strings; - u_int strsize; - boolean_t cxx_index_initialized; - boolean_t name_index_initialized; + KXLDArray syms; + KXLDDict cxx_index; + KXLDDict name_index; + char *strings; + u_int strsize; + boolean_t cxx_index_initialized; + boolean_t name_index_initialized; }; /******************************************************************************* @@ -58,7 +58,7 @@ struct kxld_symtab { static kern_return_t init_macho(KXLDSymtab *symtab, struct symtab_command *src, u_char *macho, KXLDSeg * kernel_linkedit_seg, boolean_t is_32_bit) - __attribute__((nonnull(1,2))); +__attribute__((nonnull(1, 2))); #if KXLD_USER_OR_ILP32 static kern_return_t init_syms_32(KXLDSymtab *symtab, u_char *macho, u_long offset, @@ -70,7 +70,7 @@ static kern_return_t init_syms_64(KXLDSymtab *symtab, u_char *macho, u_long offs #endif static void restrict_private_symbols(KXLDSymtab *symtab) - __attribute__((nonnull)); +__attribute__((nonnull)); static boolean_t sym_is_defined_cxx(const KXLDSym *sym); static boolean_t sym_is_name_indexed(const KXLDSym *sym); @@ -79,7 +79,7 @@ static boolean_t sym_is_name_indexed(const KXLDSym *sym); size_t kxld_symtab_sizeof() { - return sizeof(KXLDSymtab); + return sizeof(KXLDSymtab); } #if KXLD_USER_OR_ILP32 @@ -89,8 +89,8 @@ kern_return_t kxld_symtab_init_from_macho_32(KXLDSymtab *symtab, struct symtab_command *src, u_char *macho, KXLDSeg * kernel_linkedit_seg) { - return init_macho(symtab, src, macho, kernel_linkedit_seg, - /* is_32_bit */ TRUE); + return init_macho(symtab, src, macho, kernel_linkedit_seg, + /* is_32_bit */ TRUE); } #endif /* KXLD_USER_ILP32 */ @@ -101,8 +101,8 @@ kern_return_t kxld_symtab_init_from_macho_64(KXLDSymtab *symtab, struct symtab_command *src, u_char *macho, KXLDSeg * kernel_linkedit_seg) { - return init_macho(symtab, src, macho, kernel_linkedit_seg, - /* is_32_bit */ FALSE); + return init_macho(symtab, src, macho, kernel_linkedit_seg, + /* is_32_bit */ FALSE); } #endif /* KXLD_USER_OR_LP64 */ @@ -113,66 +113,65 @@ init_macho(KXLDSymtab *symtab, struct symtab_command *src, u_char *macho, KXLDSeg * kernel_linkedit_seg, boolean_t is_32_bit __unused) { - kern_return_t rval = KERN_FAILURE; - u_long symoff; - u_char * macho_or_linkedit = macho; - - check(symtab); - check(src); - check(macho); - - /* Initialize the symbol array */ - - rval = kxld_array_init(&symtab->syms, sizeof(KXLDSym), src->nsyms); - require_noerr(rval, finish); - - /* Initialize the string table */ - - if (kernel_linkedit_seg) { - - /* If initing the kernel file in memory, we can't trust - * the symtab offsets directly, because the kernel file has been mapped - * into memory and the mach-o offsets are disk-based. - * - * The symoff is an offset relative to the linkedit segment - * so we just subtract the fileoffset of the linkedit segment - * to get its relative start. - * - * The strings table is an actual pointer, so we calculate that from - * the linkedit's vmaddr. - * - * Further, the init_syms_... functions need an adjusted base - * pointer instead of the beginning of the macho, so we substitute - * the base of the linkedit segment. - */ - - symoff = (u_long)(src->symoff - kernel_linkedit_seg->fileoff); - symtab->strings = (char *)(uintptr_t)kernel_linkedit_seg->base_addr + - src->stroff - kernel_linkedit_seg->fileoff; - macho_or_linkedit = (u_char *)(uintptr_t)kernel_linkedit_seg->base_addr; - } else { - symoff = (u_long)src->symoff; - symtab->strings = (char *) (macho + src->stroff); - } - - symtab->strsize = src->strsize; - - /* Initialize the symbols */ - - KXLD_3264_FUNC(is_32_bit, rval, - init_syms_32, init_syms_64, - symtab, macho_or_linkedit, symoff, src->nsyms); - require_noerr(rval, finish); - - /* Some symbols must be forced private for compatibility */ - (void) restrict_private_symbols(symtab); - - /* Save the output */ - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + u_long symoff; + u_char * macho_or_linkedit = macho; + + check(symtab); + check(src); + check(macho); + + /* Initialize the symbol array */ + + rval = kxld_array_init(&symtab->syms, sizeof(KXLDSym), src->nsyms); + require_noerr(rval, finish); + + /* Initialize the string table */ + + if (kernel_linkedit_seg) { + /* If initing the kernel file in memory, we can't trust + * the symtab offsets directly, because the kernel file has been mapped + * into memory and the mach-o offsets are disk-based. + * + * The symoff is an offset relative to the linkedit segment + * so we just subtract the fileoffset of the linkedit segment + * to get its relative start. + * + * The strings table is an actual pointer, so we calculate that from + * the linkedit's vmaddr. + * + * Further, the init_syms_... functions need an adjusted base + * pointer instead of the beginning of the macho, so we substitute + * the base of the linkedit segment. + */ + + symoff = (u_long)(src->symoff - kernel_linkedit_seg->fileoff); + symtab->strings = (char *)(uintptr_t)kernel_linkedit_seg->base_addr + + src->stroff - kernel_linkedit_seg->fileoff; + macho_or_linkedit = (u_char *)(uintptr_t)kernel_linkedit_seg->base_addr; + } else { + symoff = (u_long)src->symoff; + symtab->strings = (char *) (macho + src->stroff); + } + + symtab->strsize = src->strsize; + + /* Initialize the symbols */ + + KXLD_3264_FUNC(is_32_bit, rval, + init_syms_32, init_syms_64, + symtab, macho_or_linkedit, symoff, src->nsyms); + require_noerr(rval, finish); + + /* Some symbols must be forced private for compatibility */ + (void) restrict_private_symbols(symtab); + + /* Save the output */ + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if KXLD_USER_OR_ILP32 @@ -182,23 +181,23 @@ finish: static kern_return_t init_syms_32(KXLDSymtab *symtab, u_char *macho, u_long offset, u_int nsyms) { - kern_return_t rval = KERN_FAILURE; - KXLDSym *sym = NULL; - u_int i = 0; - struct nlist *src_syms = (struct nlist *) ((void *) (macho + offset)); + kern_return_t rval = KERN_FAILURE; + KXLDSym *sym = NULL; + u_int i = 0; + struct nlist *src_syms = (struct nlist *) ((void *) (macho + offset)); - for (i = 0; i < nsyms; ++i) { - sym = kxld_array_get_item(&symtab->syms, i); - require_action(sym, finish, rval=KERN_FAILURE); + for (i = 0; i < nsyms; ++i) { + sym = kxld_array_get_item(&symtab->syms, i); + require_action(sym, finish, rval = KERN_FAILURE); - rval = kxld_sym_init_from_macho32(sym, symtab->strings, &src_syms[i]); - require_noerr(rval, finish); - } + rval = kxld_sym_init_from_macho32(sym, symtab->strings, &src_syms[i]); + require_noerr(rval, finish); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_ILP32 */ @@ -209,58 +208,58 @@ finish: static kern_return_t init_syms_64(KXLDSymtab *symtab, u_char *macho, u_long offset, u_int nsyms) { - kern_return_t rval = KERN_FAILURE; - KXLDSym *sym = NULL; - u_int i = 0; - struct nlist_64 *src_syms = (struct nlist_64 *) ((void *) (macho + offset)); + kern_return_t rval = KERN_FAILURE; + KXLDSym *sym = NULL; + u_int i = 0; + struct nlist_64 *src_syms = (struct nlist_64 *) ((void *) (macho + offset)); - for (i = 0; i < nsyms; ++i) { - sym = kxld_array_get_item(&symtab->syms, i); - require_action(sym, finish, rval=KERN_FAILURE); + for (i = 0; i < nsyms; ++i) { + sym = kxld_array_get_item(&symtab->syms, i); + require_action(sym, finish, rval = KERN_FAILURE); - rval = kxld_sym_init_from_macho64(sym, symtab->strings, &src_syms[i]); - require_noerr(rval, finish); - } + rval = kxld_sym_init_from_macho64(sym, symtab->strings, &src_syms[i]); + require_noerr(rval, finish); + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #endif /* KXLD_USER_OR_LP64 */ /******************************************************************************* -* Temporary workaround for PR-6668105 +* Temporary workaround for PR-6668105 * new, new[], delete, and delete[] may be overridden globally in a kext. -* We should do this with some sort of weak symbols, but we'll use a whitelist -* for now to minimize risk. +* We should do this with some sort of weak symbols, but we'll use a whitelist +* for now to minimize risk. *******************************************************************************/ static void restrict_private_symbols(KXLDSymtab *symtab) { - const char *private_symbols[] = { - KXLD_KMOD_INFO_SYMBOL, - KXLD_OPERATOR_NEW_SYMBOL, - KXLD_OPERATOR_NEW_ARRAY_SYMBOL, - KXLD_OPERATOR_DELETE_SYMBOL, - KXLD_OPERATOR_DELETE_ARRAY_SYMBOL - }; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - const char *name = NULL; - u_int i = 0; - - kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_exported, FALSE); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - for (i = 0; i < const_array_len(private_symbols); ++i) { - name = private_symbols[i]; - if (!streq(sym->name, name)) { - continue; - } - - kxld_sym_mark_private(sym); - } - } + const char *private_symbols[] = { + KXLD_KMOD_INFO_SYMBOL, + KXLD_OPERATOR_NEW_SYMBOL, + KXLD_OPERATOR_NEW_ARRAY_SYMBOL, + KXLD_OPERATOR_DELETE_SYMBOL, + KXLD_OPERATOR_DELETE_ARRAY_SYMBOL + }; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + const char *name = NULL; + u_int i = 0; + + kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_exported, FALSE); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + for (i = 0; i < const_array_len(private_symbols); ++i) { + name = private_symbols[i]; + if (!streq(sym->name, name)) { + continue; + } + + kxld_sym_mark_private(sym); + } + } } @@ -270,30 +269,30 @@ void kxld_symtab_iterator_init(KXLDSymtabIterator *iter, const KXLDSymtab *symtab, KXLDSymPredicateTest test, boolean_t negate) { - check(iter); - check(symtab); - check(test); - - iter->symtab = symtab; - iter->idx = 0; - iter->test = test; - iter->negate = negate; + check(iter); + check(symtab); + check(test); + + iter->symtab = symtab; + iter->idx = 0; + iter->test = test; + iter->negate = negate; } /******************************************************************************* *******************************************************************************/ -void +void kxld_symtab_clear(KXLDSymtab *symtab) { - check(symtab); - - kxld_array_clear(&symtab->syms); - kxld_dict_clear(&symtab->cxx_index); - kxld_dict_clear(&symtab->name_index); - symtab->strings = NULL; - symtab->strsize = 0; - symtab->cxx_index_initialized = 0; - symtab->name_index_initialized = 0; + check(symtab); + + kxld_array_clear(&symtab->syms); + kxld_dict_clear(&symtab->cxx_index); + kxld_dict_clear(&symtab->name_index); + symtab->strings = NULL; + symtab->strsize = 0; + symtab->cxx_index_initialized = 0; + symtab->name_index_initialized = 0; } /******************************************************************************* @@ -301,12 +300,12 @@ kxld_symtab_clear(KXLDSymtab *symtab) void kxld_symtab_deinit(KXLDSymtab *symtab) { - check(symtab); + check(symtab); - kxld_array_deinit(&symtab->syms); - kxld_dict_deinit(&symtab->cxx_index); - kxld_dict_deinit(&symtab->name_index); - bzero(symtab, sizeof(*symtab)); + kxld_array_deinit(&symtab->syms); + kxld_dict_deinit(&symtab->cxx_index); + kxld_dict_deinit(&symtab->name_index); + bzero(symtab, sizeof(*symtab)); } /******************************************************************************* @@ -314,9 +313,9 @@ kxld_symtab_deinit(KXLDSymtab *symtab) u_int kxld_symtab_get_num_symbols(const KXLDSymtab *symtab) { - check(symtab); + check(symtab); - return symtab->syms.nitems; + return symtab->syms.nitems; } /******************************************************************************* @@ -324,40 +323,40 @@ kxld_symtab_get_num_symbols(const KXLDSymtab *symtab) KXLDSym * kxld_symtab_get_symbol_by_index(const KXLDSymtab *symtab, u_int idx) { - check(symtab); + check(symtab); - return kxld_array_get_item(&symtab->syms, idx); + return kxld_array_get_item(&symtab->syms, idx); } /******************************************************************************* *******************************************************************************/ -KXLDSym * +KXLDSym * kxld_symtab_get_symbol_by_name(const KXLDSymtab *symtab, const char *name) { - KXLDSym *sym = NULL; - u_int i = 0; - - for (i = 0; i < symtab->syms.nitems; ++i) { - sym = kxld_array_get_item(&symtab->syms, i); - - if (streq(sym->name, name)) { - return sym; - } - } - - return NULL; + KXLDSym *sym = NULL; + u_int i = 0; + + for (i = 0; i < symtab->syms.nitems; ++i) { + sym = kxld_array_get_item(&symtab->syms, i); + + if (streq(sym->name, name)) { + return sym; + } + } + + return NULL; } /******************************************************************************* *******************************************************************************/ KXLDSym * -kxld_symtab_get_locally_defined_symbol_by_name(const KXLDSymtab *symtab, +kxld_symtab_get_locally_defined_symbol_by_name(const KXLDSymtab *symtab, const char *name) { - check(symtab); - check(name); + check(symtab); + check(name); - return kxld_dict_find(&symtab->name_index, name); + return kxld_dict_find(&symtab->name_index, name); } /******************************************************************************* @@ -365,26 +364,26 @@ kxld_symtab_get_locally_defined_symbol_by_name(const KXLDSymtab *symtab, KXLDSym * kxld_symtab_get_cxx_symbol_by_value(const KXLDSymtab *symtab, kxld_addr_t value) { - check(symtab); + check(symtab); - return kxld_dict_find(&symtab->cxx_index, &value); + return kxld_dict_find(&symtab->cxx_index, &value); } /******************************************************************************* *******************************************************************************/ -kern_return_t +kern_return_t kxld_symtab_get_sym_index(const KXLDSymtab *symtab, const KXLDSym *sym, u_int *symindex) { - kern_return_t rval = KERN_FAILURE; + kern_return_t rval = KERN_FAILURE; - rval = kxld_array_get_index(&symtab->syms, sym, symindex); - require_noerr(rval, finish); + rval = kxld_array_get_index(&symtab->syms, sym, symindex); + require_noerr(rval, finish); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -392,147 +391,147 @@ finish: u_long kxld_symtab_get_macho_header_size(void) { - return sizeof(struct symtab_command); + return sizeof(struct symtab_command); } /******************************************************************************* *******************************************************************************/ -u_long +u_long kxld_symtab_get_macho_data_size(const KXLDSymtab *symtab, boolean_t is_32_bit) { - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - u_long size = 1; /* strtab start padding */ - u_int nsyms = 0; - - check(symtab); - - kxld_symtab_iterator_init(&iter, symtab, - kxld_sym_is_defined_locally, FALSE); - - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - size += strlen(sym->name) + 1; - ++nsyms; - } - - if (is_32_bit) { - size += nsyms * sizeof(struct nlist); - } else { - size += nsyms * sizeof(struct nlist_64); - } - - size = (size + 7) & ~7; - - return size; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + u_long size = 1; /* strtab start padding */ + u_int nsyms = 0; + + check(symtab); + + kxld_symtab_iterator_init(&iter, symtab, + kxld_sym_is_defined_locally, FALSE); + + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + size += strlen(sym->name) + 1; + ++nsyms; + } + + if (is_32_bit) { + size += nsyms * sizeof(struct nlist); + } else { + size += nsyms * sizeof(struct nlist_64); + } + + size = (size + 7) & ~7; + + return size; } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_symtab_export_macho(const KXLDSymtab *symtab, u_char *buf, +kxld_symtab_export_macho(const KXLDSymtab *symtab, u_char *buf, u_long *header_offset, u_long header_size, u_long *data_offset, u_long data_size, boolean_t is_32_bit) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - struct symtab_command *symtabhdr = NULL; - u_char *nl = NULL; - u_long nlistsize = 0; - char *strtab = NULL; - u_long stroff = 1; /* strtab start padding */ - - check(symtab); - check(buf); - check(header_offset); - check(data_offset); - - require_action(sizeof(*symtabhdr) <= header_size - *header_offset, - finish, rval=KERN_FAILURE); - symtabhdr = (struct symtab_command *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*symtabhdr); - - /* Initialize the symbol table header */ - - // note - this assumes LC_SYMTAB is always before the LC_DYSYMTAB in the - // macho header we are processing. - symtabhdr->cmd = LC_SYMTAB; - symtabhdr->cmdsize = (uint32_t) sizeof(*symtabhdr); - symtabhdr->symoff = (uint32_t) *data_offset; - symtabhdr->strsize = 1; /* strtab start padding */ - - /* Find the size of the symbol and string tables */ - - kxld_symtab_iterator_init(&iter, symtab, - kxld_sym_is_defined_locally, FALSE); - - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - symtabhdr->nsyms++; - symtabhdr->strsize += (uint32_t) (strlen(sym->name) + 1); - } - - if (is_32_bit) { - nlistsize = sizeof(struct nlist); - } else { - nlistsize = sizeof(struct nlist_64); - } - - symtabhdr->stroff = (uint32_t) (symtabhdr->symoff + - (symtabhdr->nsyms * nlistsize)); - require_action(symtabhdr->stroff + symtabhdr->strsize <= data_size, finish, - rval=KERN_FAILURE); - - /* Get pointers to the symbol and string tables */ - nl = buf + symtabhdr->symoff; - strtab = (char *) (buf + symtabhdr->stroff); - - /* Copy over the symbols */ - - kxld_symtab_iterator_reset(&iter); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - KXLD_3264_FUNC(is_32_bit, rval, - kxld_sym_export_macho_32, kxld_sym_export_macho_64, - sym, nl, strtab, &stroff, symtabhdr->strsize); - require_noerr(rval, finish); - - nl += nlistsize; - stroff += rval; - } - - /* Update the data offset */ - *data_offset += (symtabhdr->nsyms * nlistsize) + stroff; - - *data_offset = (*data_offset + 7) & ~7; - // at this point data_offset will be the offset just past the - // symbols and strings in the __LINKEDIT data - + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + struct symtab_command *symtabhdr = NULL; + u_char *nl = NULL; + u_long nlistsize = 0; + char *strtab = NULL; + u_long stroff = 1; /* strtab start padding */ + + check(symtab); + check(buf); + check(header_offset); + check(data_offset); + + require_action(sizeof(*symtabhdr) <= header_size - *header_offset, + finish, rval = KERN_FAILURE); + symtabhdr = (struct symtab_command *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*symtabhdr); + + /* Initialize the symbol table header */ + + // note - this assumes LC_SYMTAB is always before the LC_DYSYMTAB in the + // macho header we are processing. + symtabhdr->cmd = LC_SYMTAB; + symtabhdr->cmdsize = (uint32_t) sizeof(*symtabhdr); + symtabhdr->symoff = (uint32_t) *data_offset; + symtabhdr->strsize = 1; /* strtab start padding */ + + /* Find the size of the symbol and string tables */ + + kxld_symtab_iterator_init(&iter, symtab, + kxld_sym_is_defined_locally, FALSE); + + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + symtabhdr->nsyms++; + symtabhdr->strsize += (uint32_t) (strlen(sym->name) + 1); + } + + if (is_32_bit) { + nlistsize = sizeof(struct nlist); + } else { + nlistsize = sizeof(struct nlist_64); + } + + symtabhdr->stroff = (uint32_t) (symtabhdr->symoff + + (symtabhdr->nsyms * nlistsize)); + require_action(symtabhdr->stroff + symtabhdr->strsize <= data_size, finish, + rval = KERN_FAILURE); + + /* Get pointers to the symbol and string tables */ + nl = buf + symtabhdr->symoff; + strtab = (char *) (buf + symtabhdr->stroff); + + /* Copy over the symbols */ + + kxld_symtab_iterator_reset(&iter); + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + KXLD_3264_FUNC(is_32_bit, rval, + kxld_sym_export_macho_32, kxld_sym_export_macho_64, + sym, nl, strtab, &stroff, symtabhdr->strsize); + require_noerr(rval, finish); + + nl += nlistsize; + stroff += rval; + } + + /* Update the data offset */ + *data_offset += (symtabhdr->nsyms * nlistsize) + stroff; + + *data_offset = (*data_offset + 7) & ~7; + // at this point data_offset will be the offset just past the + // symbols and strings in the __LINKEDIT data + #if SPLIT_KEXTS_DEBUG - { - kxld_log(kKxldLogLinking, kKxldLogErr, - " %p to %p (size %lu) symtabhdr <%s>", - (void *) symtabhdr, - (void *) ((u_char *)symtabhdr + sizeof(*symtabhdr)), - sizeof(*symtabhdr), - __func__); - - kxld_log(kKxldLogLinking, kKxldLogErr, - " symtabhdr %p cmdsize %u symoff %u nsyms %u stroff %u strsize %u <%s>", - (void *) symtabhdr, - symtabhdr->cmdsize, - symtabhdr->symoff, - symtabhdr->nsyms, - symtabhdr->stroff, - symtabhdr->strsize, - __func__); - } + { + kxld_log(kKxldLogLinking, kKxldLogErr, + " %p to %p (size %lu) symtabhdr <%s>", + (void *) symtabhdr, + (void *) ((u_char *)symtabhdr + sizeof(*symtabhdr)), + sizeof(*symtabhdr), + __func__); + + kxld_log(kKxldLogLinking, kKxldLogErr, + " symtabhdr %p cmdsize %u symoff %u nsyms %u stroff %u strsize %u <%s>", + (void *) symtabhdr, + symtabhdr->cmdsize, + symtabhdr->symoff, + symtabhdr->nsyms, + symtabhdr->stroff, + symtabhdr->strsize, + __func__); + } #endif - rval = KERN_SUCCESS; - + rval = KERN_SUCCESS; + finish: - return rval; + return rval; } /******************************************************************************* @@ -540,16 +539,16 @@ finish: u_int kxld_symtab_iterator_get_num_remaining(const KXLDSymtabIterator *iter) { - u_int idx = 0; - u_int count = 0; + u_int idx = 0; + u_int count = 0; - check(iter); + check(iter); - for (idx = iter->idx; idx < iter->symtab->syms.nitems; ++idx) { - count += iter->test(kxld_array_get_item(&iter->symtab->syms, idx)); - } + for (idx = iter->idx; idx < iter->symtab->syms.nitems; ++idx) { + count += iter->test(kxld_array_get_item(&iter->symtab->syms, idx)); + } - return count; + return count; } /******************************************************************************* @@ -557,38 +556,38 @@ kxld_symtab_iterator_get_num_remaining(const KXLDSymtabIterator *iter) kern_return_t kxld_symtab_index_cxx_symbols_by_value(KXLDSymtab *symtab) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - u_int nsyms = 0; - - check(symtab); - - if (symtab->cxx_index_initialized) { - rval = KERN_SUCCESS; - goto finish; - } - - /* Count the number of C++ symbols */ - kxld_symtab_iterator_init(&iter, symtab, sym_is_defined_cxx, FALSE); - nsyms = kxld_symtab_iterator_get_num_remaining(&iter); - - /* Create the dictionary */ - rval = kxld_dict_init(&symtab->cxx_index, kxld_dict_kxldaddr_hash, - kxld_dict_kxldaddr_cmp, nsyms); - require_noerr(rval, finish); - - /* Insert the non-stab symbols */ - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - rval = kxld_dict_insert(&symtab->cxx_index, &sym->base_addr, sym); - require_noerr(rval, finish); - } - - - symtab->cxx_index_initialized = TRUE; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + u_int nsyms = 0; + + check(symtab); + + if (symtab->cxx_index_initialized) { + rval = KERN_SUCCESS; + goto finish; + } + + /* Count the number of C++ symbols */ + kxld_symtab_iterator_init(&iter, symtab, sym_is_defined_cxx, FALSE); + nsyms = kxld_symtab_iterator_get_num_remaining(&iter); + + /* Create the dictionary */ + rval = kxld_dict_init(&symtab->cxx_index, kxld_dict_kxldaddr_hash, + kxld_dict_kxldaddr_cmp, nsyms); + require_noerr(rval, finish); + + /* Insert the non-stab symbols */ + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + rval = kxld_dict_insert(&symtab->cxx_index, &sym->base_addr, sym); + require_noerr(rval, finish); + } + + + symtab->cxx_index_initialized = TRUE; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -596,7 +595,7 @@ finish: static boolean_t sym_is_defined_cxx(const KXLDSym *sym) { - return (kxld_sym_is_defined_locally(sym) && kxld_sym_is_cxx(sym)); + return kxld_sym_is_defined_locally(sym) && kxld_sym_is_cxx(sym); } /******************************************************************************* @@ -604,45 +603,45 @@ sym_is_defined_cxx(const KXLDSym *sym) kern_return_t kxld_symtab_index_symbols_by_name(KXLDSymtab *symtab) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - u_int nsyms = 0; - - check(symtab); - - if (symtab->name_index_initialized) { - rval = KERN_SUCCESS; - goto finish; - } - - /* Count the number of symbols we need to index by name */ - kxld_symtab_iterator_init(&iter, symtab, sym_is_name_indexed, FALSE); - nsyms = kxld_symtab_iterator_get_num_remaining(&iter); - - /* Create the dictionary */ - rval = kxld_dict_init(&symtab->name_index, kxld_dict_string_hash, - kxld_dict_string_cmp, nsyms); - require_noerr(rval, finish); - - /* Insert the non-stab symbols */ - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - rval = kxld_dict_insert(&symtab->name_index, sym->name, sym); - require_noerr(rval, finish); - } - - symtab->name_index_initialized = TRUE; - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + u_int nsyms = 0; + + check(symtab); + + if (symtab->name_index_initialized) { + rval = KERN_SUCCESS; + goto finish; + } + + /* Count the number of symbols we need to index by name */ + kxld_symtab_iterator_init(&iter, symtab, sym_is_name_indexed, FALSE); + nsyms = kxld_symtab_iterator_get_num_remaining(&iter); + + /* Create the dictionary */ + rval = kxld_dict_init(&symtab->name_index, kxld_dict_string_hash, + kxld_dict_string_cmp, nsyms); + require_noerr(rval, finish); + + /* Insert the non-stab symbols */ + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + rval = kxld_dict_insert(&symtab->name_index, sym->name, sym); + require_noerr(rval, finish); + } + + symtab->name_index_initialized = TRUE; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ static boolean_t sym_is_name_indexed(const KXLDSym *sym) { - return (kxld_sym_is_defined_locally(sym) && !kxld_sym_is_stab(sym)); + return kxld_sym_is_defined_locally(sym) && !kxld_sym_is_stab(sym); } /******************************************************************************* @@ -650,27 +649,27 @@ sym_is_name_indexed(const KXLDSym *sym) kern_return_t kxld_symtab_relocate(KXLDSymtab *symtab, const KXLDArray *sectarray) { - kern_return_t rval = KERN_FAILURE; - KXLDSymtabIterator iter; - KXLDSym *sym = NULL; - const KXLDSect *sect = NULL; - - check(symtab); - check(sectarray); + kern_return_t rval = KERN_FAILURE; + KXLDSymtabIterator iter; + KXLDSym *sym = NULL; + const KXLDSect *sect = NULL; - kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_section, FALSE); + check(symtab); + check(sectarray); - while ((sym = kxld_symtab_iterator_get_next(&iter))) { - sect = kxld_array_get_item(sectarray, sym->sectnum); - require_action(sect, finish, rval=KERN_FAILURE); - kxld_sym_relocate(sym, sect); - } + kxld_symtab_iterator_init(&iter, symtab, kxld_sym_is_section, FALSE); - rval = KERN_SUCCESS; + while ((sym = kxld_symtab_iterator_get_next(&iter))) { + sect = kxld_array_get_item(sectarray, sym->sectnum); + require_action(sect, finish, rval = KERN_FAILURE); + kxld_sym_relocate(sym, sect); + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -683,24 +682,24 @@ kern_return_t kxld_symtab_add_symbol(KXLDSymtab *symtab, char *name, kxld_addr_t link_addr, KXLDSym **symout) { - kern_return_t rval = KERN_FAILURE; - KXLDSym *sym = NULL; - u_int symindex = symtab->syms.nitems; + kern_return_t rval = KERN_FAILURE; + KXLDSym *sym = NULL; + u_int symindex = symtab->syms.nitems; + + rval = kxld_array_resize(&symtab->syms, symindex + 1); + require_noerr(rval, finish); - rval = kxld_array_resize(&symtab->syms, symindex + 1); - require_noerr(rval, finish); + sym = kxld_array_get_item(&symtab->syms, symindex); + kxld_sym_init_absolute(sym, name, link_addr); - sym = kxld_array_get_item(&symtab->syms, symindex); - kxld_sym_init_absolute(sym, name, link_addr); + rval = kxld_dict_insert(&symtab->name_index, sym->name, sym); + require_noerr(rval, finish); - rval = kxld_dict_insert(&symtab->name_index, sym->name, sym); - require_noerr(rval, finish); + rval = KERN_SUCCESS; + *symout = sym; - rval = KERN_SUCCESS; - *symout = sym; - finish: - return rval; + return rval; } /******************************************************************************* @@ -708,25 +707,27 @@ finish: KXLDSym * kxld_symtab_iterator_get_next(KXLDSymtabIterator *iter) { - KXLDSym *sym = NULL; - KXLDSym *tmp = NULL; - boolean_t cmp = FALSE; - - check(iter); - - for (; iter->idx < iter->symtab->syms.nitems; ++iter->idx) { - tmp = kxld_array_get_item(&iter->symtab->syms, iter->idx); - cmp = iter->test(tmp); - if (iter->negate) cmp = !cmp; - - if (cmp) { - sym = tmp; - ++iter->idx; - break; - } - } - - return sym; + KXLDSym *sym = NULL; + KXLDSym *tmp = NULL; + boolean_t cmp = FALSE; + + check(iter); + + for (; iter->idx < iter->symtab->syms.nitems; ++iter->idx) { + tmp = kxld_array_get_item(&iter->symtab->syms, iter->idx); + cmp = iter->test(tmp); + if (iter->negate) { + cmp = !cmp; + } + + if (cmp) { + sym = tmp; + ++iter->idx; + break; + } + } + + return sym; } @@ -735,6 +736,6 @@ kxld_symtab_iterator_get_next(KXLDSymtabIterator *iter) void kxld_symtab_iterator_reset(KXLDSymtabIterator *iter) { - check(iter); - iter->idx = 0; + check(iter); + iter->idx = 0; } diff --git a/libkern/kxld/kxld_symtab.h b/libkern/kxld/kxld_symtab.h index ff4b557c5..ff64ba421 100644 --- a/libkern/kxld/kxld_symtab.h +++ b/libkern/kxld/kxld_symtab.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_SYMTAB_H_ @@ -44,10 +44,10 @@ typedef struct kxld_symtab KXLDSymtab; typedef struct kxld_symtab_iterator KXLDSymtabIterator; struct kxld_symtab_iterator { - const KXLDSymtab *symtab; - u_int idx; - KXLDSymPredicateTest test; - boolean_t negate; + const KXLDSymtab *symtab; + u_int idx; + KXLDSymPredicateTest test; + boolean_t negate; }; /******************************************************************************* @@ -55,96 +55,95 @@ struct kxld_symtab_iterator { *******************************************************************************/ size_t kxld_symtab_sizeof(void) - __attribute__((const, visibility("hidden"))); +__attribute__((const, visibility("hidden"))); #if KXLD_USER_OR_ILP32 kern_return_t kxld_symtab_init_from_macho_32(KXLDSymtab *symtab, struct symtab_command *src, u_char *macho, KXLDSeg * kernel_linkedit_seg) - __attribute__((nonnull(1,2), visibility("hidden"))); +__attribute__((nonnull(1, 2), visibility("hidden"))); #endif /* KXLD_USER_OR_ILP32 */ #if KXLD_USER_OR_LP64 kern_return_t kxld_symtab_init_from_macho_64(KXLDSymtab *symtab, struct symtab_command *src, u_char *macho, KXLDSeg * kernel_linkedit_seg) - __attribute__((nonnull(1,2), visibility("hidden"))); +__attribute__((nonnull(1, 2), visibility("hidden"))); #endif /* KXLD_USER_OR_ILP64 */ -void kxld_symtab_iterator_init(KXLDSymtabIterator *iter, +void kxld_symtab_iterator_init(KXLDSymtabIterator *iter, const KXLDSymtab *symtab, KXLDSymPredicateTest test, boolean_t negate) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_symtab_clear(KXLDSymtab *symtab) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_symtab_deinit(KXLDSymtab *symtab) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ u_int kxld_symtab_get_num_symbols(const KXLDSymtab *symtab) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); KXLDSym * kxld_symtab_get_symbol_by_index(const KXLDSymtab *symtab, u_int idx) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); -KXLDSym * kxld_symtab_get_symbol_by_name(const KXLDSymtab *symtab, +KXLDSym * kxld_symtab_get_symbol_by_name(const KXLDSymtab *symtab, const char *name) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); KXLDSym * kxld_symtab_get_locally_defined_symbol_by_name( - const KXLDSymtab *symtab, const char *name) - __attribute__((pure, nonnull, visibility("hidden"))); + const KXLDSymtab *symtab, const char *name) +__attribute__((pure, nonnull, visibility("hidden"))); KXLDSym * kxld_symtab_get_cxx_symbol_by_value(const KXLDSymtab *symtab, kxld_addr_t value) - __attribute__((pure, nonnull, visibility("hidden"))); - -kern_return_t kxld_symtab_get_sym_index(const KXLDSymtab *symtab, +__attribute__((pure, nonnull, visibility("hidden"))); + +kern_return_t kxld_symtab_get_sym_index(const KXLDSymtab *symtab, const KXLDSym * sym, u_int *idx) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); u_long kxld_symtab_get_macho_header_size(void) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); -u_long kxld_symtab_get_macho_data_size(const KXLDSymtab *symtab, +u_long kxld_symtab_get_macho_data_size(const KXLDSymtab *symtab, boolean_t is_32_bit) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); kern_return_t -kxld_symtab_export_macho(const KXLDSymtab *symtab, u_char *buf, +kxld_symtab_export_macho(const KXLDSymtab *symtab, u_char *buf, u_long *header_offset, u_long header_size, - u_long *data_offset, u_long data_size, + u_long *data_offset, u_long data_size, boolean_t is_32_bit) - __attribute__((nonnull, visibility("hidden"))); - +__attribute__((nonnull, visibility("hidden"))); + u_int kxld_symtab_iterator_get_num_remaining(const KXLDSymtabIterator *iter) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /******************************************************************************* -* Modifiers +* Modifiers *******************************************************************************/ kern_return_t kxld_symtab_index_symbols_by_name(KXLDSymtab *symtab) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_symtab_index_cxx_symbols_by_value(KXLDSymtab *symtab) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); kern_return_t kxld_symtab_relocate(KXLDSymtab *symtab, const struct kxld_array *sectarray) - __attribute__((nonnull, visibility("hidden"))); - -kern_return_t kxld_symtab_add_symbol(KXLDSymtab *symtab, char *name, +__attribute__((nonnull, visibility("hidden"))); + +kern_return_t kxld_symtab_add_symbol(KXLDSymtab *symtab, char *name, kxld_addr_t link_addr, KXLDSym **symout) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); KXLDSym * kxld_symtab_iterator_get_next(KXLDSymtabIterator *iter) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_symtab_iterator_reset(KXLDSymtabIterator *iter) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_SYMTAB_H_ */ - diff --git a/libkern/kxld/kxld_util.c b/libkern/kxld/kxld_util.c index 480a454c4..f40a3d173 100644 --- a/libkern/kxld/kxld_util.c +++ b/libkern/kxld/kxld_util.c @@ -48,9 +48,9 @@ #include "kxld_util.h" #if !KERNEL -static void unswap_macho_32(u_char *file, enum NXByteOrder host_order, +static void unswap_macho_32(u_char *file, enum NXByteOrder host_order, enum NXByteOrder target_order); -static void unswap_macho_64(u_char *file, enum NXByteOrder host_order, +static void unswap_macho_64(u_char *file, enum NXByteOrder host_order, enum NXByteOrder target_order); #endif /* !KERNEL */ @@ -73,67 +73,68 @@ static kxld_size_t s_cross_link_page_size = PAGE_SIZE; /******************************************************************************* *******************************************************************************/ -void +void kxld_set_logging_callback(KXLDLoggingCallback logging_callback) { - s_logging_callback = logging_callback; + s_logging_callback = logging_callback; } /******************************************************************************* *******************************************************************************/ -void +void kxld_set_logging_callback_data(const char *name, void *user_data) { - if (name) { - (void)strlcpy(s_callback_name, name, sizeof(s_callback_name)); - /* disallow format strings in the kxld logging callback name */ - for (size_t i = 0; i < sizeof(s_callback_name); i++) { - if (s_callback_name[i] == '%') { - s_callback_name[i] = '.'; - } - } - } else { - (void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name)); - } - - s_callback_data = user_data; + if (name) { + (void)strlcpy(s_callback_name, name, sizeof(s_callback_name)); + /* disallow format strings in the kxld logging callback name */ + for (size_t i = 0; i < sizeof(s_callback_name); i++) { + if (s_callback_name[i] == '%') { + s_callback_name[i] = '.'; + } + } + } else { + (void)strlcpy(s_callback_name, "internal", sizeof(s_callback_name)); + } + + s_callback_data = user_data; } /******************************************************************************* *******************************************************************************/ -void -kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, +void +kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, const char *in_format, ...) { - char stack_buffer[256]; - char *alloc_buffer = NULL; - char *format = stack_buffer; - u_int length = 0; - va_list ap; - - if (s_logging_callback) { - - length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s", - s_callback_name, in_format); - - if (length >= sizeof(stack_buffer)) { - length += 1; - alloc_buffer = kxld_alloc(length); - if (!alloc_buffer) return; - - snprintf(alloc_buffer, length, "kxld[%s]: %s", - s_callback_name, in_format); - format = alloc_buffer; - } - - va_start(ap, in_format); - s_logging_callback(subsystem, level, format, ap, s_callback_data); - va_end(ap); - - if (alloc_buffer) { - kxld_free(alloc_buffer, length); - } - } + char stack_buffer[256]; + char *alloc_buffer = NULL; + char *format = stack_buffer; + u_int length = 0; + va_list ap; + + if (s_logging_callback) { + length = snprintf(stack_buffer, sizeof(stack_buffer), "kxld[%s]: %s", + s_callback_name, in_format); + + if (length >= sizeof(stack_buffer)) { + length += 1; + alloc_buffer = kxld_alloc(length); + if (!alloc_buffer) { + return; + } + + snprintf(alloc_buffer, length, "kxld[%s]: %s", + s_callback_name, in_format); + format = alloc_buffer; + } + + va_start(ap, in_format); + s_logging_callback(subsystem, level, format, ap, s_callback_data); + va_end(ap); + + if (alloc_buffer) { + kxld_free(alloc_buffer, length); + } + } } /* We'll use kalloc for any page-based allocations under this threshold, and @@ -143,25 +144,25 @@ kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, /******************************************************************************* *******************************************************************************/ -void * +void * kxld_alloc(size_t size) { - void * ptr = NULL; - + void * ptr = NULL; + #if KERNEL - ptr = kalloc(size); + ptr = kalloc(size); #else - ptr = malloc(size); + ptr = malloc(size); #endif #if DEBUG - if (ptr) { - ++num_allocations; - bytes_allocated += size; - } + if (ptr) { + ++num_allocations; + bytes_allocated += size; + } #endif - return ptr; + return ptr; } /******************************************************************************* @@ -169,26 +170,28 @@ kxld_alloc(size_t size) void * kxld_page_alloc_untracked(size_t size) { - void * ptr = NULL; + void * ptr = NULL; #if KERNEL - kern_return_t rval = 0; - vm_offset_t addr = 0; + kern_return_t rval = 0; + vm_offset_t addr = 0; #endif /* KERNEL */ - size = round_page(size); + size = round_page(size); #if KERNEL - if (size < KALLOC_MAX) { - ptr = kalloc(size); - } else { - rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT); - if (!rval) ptr = (void *) addr; - } + if (size < KALLOC_MAX) { + ptr = kalloc(size); + } else { + rval = kmem_alloc(kernel_map, &addr, size, VM_KERN_MEMORY_OSKEXT); + if (!rval) { + ptr = (void *) addr; + } + } #else /* !KERNEL */ - ptr = malloc(size); + ptr = malloc(size); #endif /* KERNEL */ - return ptr; + return ptr; } /******************************************************************************* @@ -196,17 +199,17 @@ kxld_page_alloc_untracked(size_t size) void * kxld_page_alloc(size_t size) { - void * ptr = NULL; + void * ptr = NULL; - ptr = kxld_page_alloc_untracked(size); + ptr = kxld_page_alloc_untracked(size); #if DEBUG - if (ptr) { - ++num_allocations; - bytes_allocated += round_page(size); - } + if (ptr) { + ++num_allocations; + bytes_allocated += round_page(size); + } #endif /* DEBUG */ - return ptr; + return ptr; } /******************************************************************************* @@ -214,18 +217,20 @@ kxld_page_alloc(size_t size) void * kxld_alloc_pageable(size_t size) { - size = round_page(size); + size = round_page(size); #if KERNEL - kern_return_t rval = 0; - vm_offset_t ptr = 0; + kern_return_t rval = 0; + vm_offset_t ptr = 0; - rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT); - if (rval) ptr = 0; + rval = kmem_alloc_pageable(kernel_map, &ptr, size, VM_KERN_MEMORY_OSKEXT); + if (rval) { + ptr = 0; + } - return (void *) ptr; + return (void *) ptr; #else - return kxld_page_alloc_untracked(size); + return kxld_page_alloc_untracked(size); #endif } @@ -235,14 +240,14 @@ void kxld_free(void *ptr, size_t size __unused) { #if DEBUG - ++num_frees; - bytes_freed += size; + ++num_frees; + bytes_freed += size; #endif #if KERNEL - kfree(ptr, size); + kfree(ptr, size); #else - free(ptr); + free(ptr); #endif } @@ -252,18 +257,18 @@ void kxld_page_free_untracked(void *ptr, size_t size __unused) { #if KERNEL - size = round_page(size); + size = round_page(size); - if (size < KALLOC_MAX) { - kfree(ptr, size); - } else { - kmem_free(kernel_map, (vm_offset_t) ptr, size); - } + if (size < KALLOC_MAX) { + kfree(ptr, size); + } else { + kmem_free(kernel_map, (vm_offset_t) ptr, size); + } #else /* !KERNEL */ - free(ptr); + free(ptr); #endif /* KERNEL */ } - + /******************************************************************************* *******************************************************************************/ @@ -271,10 +276,10 @@ void kxld_page_free(void *ptr, size_t size) { #if DEBUG - ++num_frees; - bytes_freed += round_page(size); + ++num_frees; + bytes_freed += round_page(size); #endif /* DEBUG */ - kxld_page_free_untracked(ptr, size); + kxld_page_free_untracked(ptr, size); } /******************************************************************************* @@ -286,166 +291,176 @@ validate_and_swap_macho_32(u_char *file, u_long size #endif /* !KERNEL */ ) { - kern_return_t rval = KERN_FAILURE; - struct mach_header *mach_hdr = (struct mach_header *) ((void *) file); - struct load_command *load_hdr = NULL; - struct segment_command *seg_hdr = NULL; - struct section *sects = NULL; - struct relocation_info *relocs = NULL; - struct symtab_command *symtab_hdr = NULL; - struct nlist *symtab = NULL; - u_long offset = 0; - u_int cmd = 0; - u_int cmdsize = 0; - u_int i = 0; - u_int j = 0; + kern_return_t rval = KERN_FAILURE; + struct mach_header *mach_hdr = (struct mach_header *) ((void *) file); + struct load_command *load_hdr = NULL; + struct segment_command *seg_hdr = NULL; + struct section *sects = NULL; + struct relocation_info *relocs = NULL; + struct symtab_command *symtab_hdr = NULL; + struct nlist *symtab = NULL; + u_long offset = 0; + u_int cmd = 0; + u_int cmdsize = 0; + u_int i = 0; + u_int j = 0; #if !KERNEL - boolean_t swap = FALSE; + boolean_t swap = FALSE; #endif /* !KERNEL */ - check(file); - check(size); + check(file); + check(size); - /* Verify that the file is big enough for the mach header */ - require_action(size >= sizeof(*mach_hdr), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - offset = sizeof(*mach_hdr); + /* Verify that the file is big enough for the mach header */ + require_action(size >= sizeof(*mach_hdr), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + offset = sizeof(*mach_hdr); #if !KERNEL - /* Swap the mach header if necessary */ - if (mach_hdr->magic == MH_CIGAM) { - swap = TRUE; - (void) swap_mach_header(mach_hdr, host_order); - } + /* Swap the mach header if necessary */ + if (mach_hdr->magic == MH_CIGAM) { + swap = TRUE; + (void) swap_mach_header(mach_hdr, host_order); + } #endif /* !KERNEL */ - /* Validate the mach_header's magic number */ - require_action(mach_hdr->magic == MH_MAGIC, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid magic number: 0x%x.", mach_hdr->magic)); + /* Validate the mach_header's magic number */ + require_action(mach_hdr->magic == MH_MAGIC, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid magic number: 0x%x.", mach_hdr->magic)); - /* If in the running kernel, and asked to validate the kernel - * (which is the only file of type MH_EXECUTE we should ever see), - * then just assume it's ok or we wouldn't be running to begin with. - */ + /* If in the running kernel, and asked to validate the kernel + * (which is the only file of type MH_EXECUTE we should ever see), + * then just assume it's ok or we wouldn't be running to begin with. + */ #if KERNEL - if (mach_hdr->filetype == MH_EXECUTE) { - rval = KERN_SUCCESS; - goto finish; - } + if (mach_hdr->filetype == MH_EXECUTE) { + rval = KERN_SUCCESS; + goto finish; + } #endif /* KERNEL */ - /* Validate and potentially swap the load commands */ - for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) { - - /* Get the load command and size */ - load_hdr = (struct load_command *) ((void *) (file + offset)); - cmd = load_hdr->cmd; - cmdsize = load_hdr->cmdsize; + /* Validate and potentially swap the load commands */ + for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) { + /* Get the load command and size */ + load_hdr = (struct load_command *) ((void *) (file + offset)); + cmd = load_hdr->cmd; + cmdsize = load_hdr->cmdsize; #if !KERNEL - if (swap) { - cmd = OSSwapInt32(load_hdr->cmd); - cmdsize = OSSwapInt32(load_hdr->cmdsize); - } + if (swap) { + cmd = OSSwapInt32(load_hdr->cmd); + cmdsize = OSSwapInt32(load_hdr->cmdsize); + } #endif /* !KERNEL */ - /* Verify that the file is big enough to contain the load command */ - require_action(size >= offset + cmdsize, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + /* Verify that the file is big enough to contain the load command */ + require_action(size >= offset + cmdsize, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - switch(cmd) { - case LC_SEGMENT: - /* Get and swap the segment header */ - seg_hdr = (struct segment_command *) load_hdr; + switch (cmd) { + case LC_SEGMENT: + /* Get and swap the segment header */ + seg_hdr = (struct segment_command *) load_hdr; #if !KERNEL - if (swap) swap_segment_command(seg_hdr, host_order); + if (swap) { + swap_segment_command(seg_hdr, host_order); + } #endif /* !KERNEL */ - /* Get and swap the section headers */ - sects = (struct section *) &seg_hdr[1]; + /* Get and swap the section headers */ + sects = (struct section *) &seg_hdr[1]; #if !KERNEL - if (swap) swap_section(sects, seg_hdr->nsects, host_order); + if (swap) { + swap_section(sects, seg_hdr->nsects, host_order); + } #endif /* !KERNEL */ - /* Ignore segments with no vm size */ - if (!seg_hdr->vmsize) continue; - - /* Verify that the file is big enough for the segment data. */ - require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - for (j = 0; j < seg_hdr->nsects; ++j) { - - /* Verify that, if the section is not to be zero filled on - * demand, that file is big enough for the section's data. - */ - require_action((sects[j].flags & S_ZEROFILL) || - (size >= sects[j].offset + sects[j].size), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - /* Verify that the file is big enough for the section's - * relocation entries. - */ - require_action(size >= - sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - /* Swap the relocation entries */ - relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff)); + /* Ignore segments with no vm size */ + if (!seg_hdr->vmsize) { + continue; + } + + /* Verify that the file is big enough for the segment data. */ + require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + for (j = 0; j < seg_hdr->nsects; ++j) { + /* Verify that, if the section is not to be zero filled on + * demand, that file is big enough for the section's data. + */ + require_action((sects[j].flags & S_ZEROFILL) || + (size >= sects[j].offset + sects[j].size), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + /* Verify that the file is big enough for the section's + * relocation entries. + */ + require_action(size >= + sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + /* Swap the relocation entries */ + relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff)); #if !KERNEL - if (swap) { - swap_relocation_info(relocs, sects[j].nreloc, - host_order); - } + if (swap) { + swap_relocation_info(relocs, sects[j].nreloc, + host_order); + } #endif /* !KERNEL */ - } + } - break; - case LC_SYMTAB: - /* Get and swap the symtab header */ - symtab_hdr = (struct symtab_command *) load_hdr; + break; + case LC_SYMTAB: + /* Get and swap the symtab header */ + symtab_hdr = (struct symtab_command *) load_hdr; #if !KERNEL - if (swap) swap_symtab_command(symtab_hdr, host_order); + if (swap) { + swap_symtab_command(symtab_hdr, host_order); + } #endif /* !KERNEL */ - /* Verify that the file is big enough for the symbol table */ - require_action(size >= - symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + /* Verify that the file is big enough for the symbol table */ + require_action(size >= + symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - /* Verify that the file is big enough for the string table */ - require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + /* Verify that the file is big enough for the string table */ + require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); #if !KERNEL - /* Swap the symbol table entries */ - symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff)); - if (swap) swap_nlist(symtab, symtab_hdr->nsyms, host_order); + /* Swap the symbol table entries */ + symtab = (struct nlist *) ((void *) (file + symtab_hdr->symoff)); + if (swap) { + swap_nlist(symtab, symtab_hdr->nsyms, host_order); + } #endif /* !KERNEL */ - break; - default: + break; + default: #if !KERNEL - /* Swap the load command */ - if (swap) swap_load_command(load_hdr, host_order); + /* Swap the load command */ + if (swap) { + swap_load_command(load_hdr, host_order); + } #endif /* !KERNEL */ - break; - } - } + break; + } + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -457,312 +472,332 @@ validate_and_swap_macho_64(u_char *file, u_long size #endif /* !KERNEL */ ) { - kern_return_t rval = KERN_FAILURE; - struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file); - struct load_command *load_hdr = NULL; - struct segment_command_64 *seg_hdr = NULL; - struct section_64 *sects = NULL; - struct relocation_info *relocs = NULL; - struct symtab_command *symtab_hdr = NULL; - struct nlist_64 *symtab = NULL; - u_long offset = 0; - u_int cmd = 0; - u_int cmdsize = 0; - u_int i = 0; - u_int j = 0; + kern_return_t rval = KERN_FAILURE; + struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file); + struct load_command *load_hdr = NULL; + struct segment_command_64 *seg_hdr = NULL; + struct section_64 *sects = NULL; + struct relocation_info *relocs = NULL; + struct symtab_command *symtab_hdr = NULL; + struct nlist_64 *symtab = NULL; + u_long offset = 0; + u_int cmd = 0; + u_int cmdsize = 0; + u_int i = 0; + u_int j = 0; #if !KERNEL - boolean_t swap = FALSE; + boolean_t swap = FALSE; #endif /* !KERNEL */ - check(file); - check(size); + check(file); + check(size); - /* Verify that the file is big enough for the mach header */ - require_action(size >= sizeof(*mach_hdr), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - offset = sizeof(*mach_hdr); + /* Verify that the file is big enough for the mach header */ + require_action(size >= sizeof(*mach_hdr), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + offset = sizeof(*mach_hdr); #if !KERNEL - /* Swap the mach header if necessary */ - if (mach_hdr->magic == MH_CIGAM_64) { - swap = TRUE; - (void) swap_mach_header_64(mach_hdr, host_order); - } + /* Swap the mach header if necessary */ + if (mach_hdr->magic == MH_CIGAM_64) { + swap = TRUE; + (void) swap_mach_header_64(mach_hdr, host_order); + } #endif /* !KERNEL */ - /* Validate the mach_header's magic number */ - require_action(mach_hdr->magic == MH_MAGIC_64, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO - "Invalid magic number: 0x%x.", mach_hdr->magic)); + /* Validate the mach_header's magic number */ + require_action(mach_hdr->magic == MH_MAGIC_64, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogMalformedMachO + "Invalid magic number: 0x%x.", mach_hdr->magic)); - /* If in the running kernel, and asked to validate the kernel - * (which is the only file of type MH_EXECUTE we should ever see), - * then just assume it's ok or we wouldn't be running to begin with. - */ + /* If in the running kernel, and asked to validate the kernel + * (which is the only file of type MH_EXECUTE we should ever see), + * then just assume it's ok or we wouldn't be running to begin with. + */ #if KERNEL - if (mach_hdr->filetype == MH_EXECUTE) { - rval = KERN_SUCCESS; - goto finish; - } + if (mach_hdr->filetype == MH_EXECUTE) { + rval = KERN_SUCCESS; + goto finish; + } #endif /* KERNEL */ - /* Validate and potentially swap the load commands */ - for(i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) { - /* Get the load command and size */ - load_hdr = (struct load_command *) ((void *) (file + offset)); - cmd = load_hdr->cmd; - cmdsize = load_hdr->cmdsize; + /* Validate and potentially swap the load commands */ + for (i = 0; i < mach_hdr->ncmds; ++i, offset += cmdsize) { + /* Get the load command and size */ + load_hdr = (struct load_command *) ((void *) (file + offset)); + cmd = load_hdr->cmd; + cmdsize = load_hdr->cmdsize; #if !KERNEL - if (swap) { - cmd = OSSwapInt32(load_hdr->cmd); - cmdsize = OSSwapInt32(load_hdr->cmdsize); - } + if (swap) { + cmd = OSSwapInt32(load_hdr->cmd); + cmdsize = OSSwapInt32(load_hdr->cmdsize); + } #endif /* !KERNEL */ - /* Verify that the file is big enough to contain the load command */ - require_action(size >= offset + cmdsize, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - switch(cmd) { - case LC_SEGMENT_64: - /* Get and swap the segment header */ - seg_hdr = (struct segment_command_64 *) ((void *) load_hdr); + /* Verify that the file is big enough to contain the load command */ + require_action(size >= offset + cmdsize, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + switch (cmd) { + case LC_SEGMENT_64: + /* Get and swap the segment header */ + seg_hdr = (struct segment_command_64 *) ((void *) load_hdr); #if !KERNEL - if (swap) swap_segment_command_64(seg_hdr, host_order); + if (swap) { + swap_segment_command_64(seg_hdr, host_order); + } #endif /* !KERNEL */ - /* Get and swap the section headers */ - sects = (struct section_64 *) &seg_hdr[1]; + /* Get and swap the section headers */ + sects = (struct section_64 *) &seg_hdr[1]; #if !KERNEL - if (swap) swap_section_64(sects, seg_hdr->nsects, host_order); + if (swap) { + swap_section_64(sects, seg_hdr->nsects, host_order); + } #endif /* !KERNEL */ - /* If the segment has no vm footprint, skip it */ - if (!seg_hdr->vmsize) continue; - - /* Verify that the file is big enough for the segment data. */ - require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - for (j = 0; j < seg_hdr->nsects; ++j) { - - /* Verify that, if the section is not to be zero filled on - * demand, that file is big enough for the section's data. - */ - require_action((sects[j].flags & S_ZEROFILL) || - (size >= sects[j].offset + sects[j].size), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - /* Verify that the file is big enough for the section's - * relocation entries. - */ - require_action(size >= - sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - - /* Swap the relocation entries */ - relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff)); + /* If the segment has no vm footprint, skip it */ + if (!seg_hdr->vmsize) { + continue; + } + + /* Verify that the file is big enough for the segment data. */ + require_action(size >= seg_hdr->fileoff + seg_hdr->filesize, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + for (j = 0; j < seg_hdr->nsects; ++j) { + /* Verify that, if the section is not to be zero filled on + * demand, that file is big enough for the section's data. + */ + require_action((sects[j].flags & S_ZEROFILL) || + (size >= sects[j].offset + sects[j].size), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + /* Verify that the file is big enough for the section's + * relocation entries. + */ + require_action(size >= + sects[j].reloff + sects[j].nreloc * sizeof(*relocs), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + + /* Swap the relocation entries */ + relocs = (struct relocation_info *) ((void *) (file + sects[j].reloff)); #if !KERNEL - if (swap) { - swap_relocation_info(relocs, sects[j].nreloc, - host_order); - } + if (swap) { + swap_relocation_info(relocs, sects[j].nreloc, + host_order); + } #endif /* !KERNEL */ - } + } - break; - case LC_SYMTAB: - /* Get and swap the symtab header */ - symtab_hdr = (struct symtab_command *) load_hdr; + break; + case LC_SYMTAB: + /* Get and swap the symtab header */ + symtab_hdr = (struct symtab_command *) load_hdr; #if !KERNEL - if (swap) swap_symtab_command(symtab_hdr, host_order); + if (swap) { + swap_symtab_command(symtab_hdr, host_order); + } #endif /* !KERNEL */ - /* Verify that the file is big enough for the symbol table */ - require_action(size >= - symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + /* Verify that the file is big enough for the symbol table */ + require_action(size >= + symtab_hdr->symoff + symtab_hdr->nsyms * sizeof(*symtab), finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); - /* Verify that the file is big enough for the string table */ - require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); + /* Verify that the file is big enough for the string table */ + require_action(size >= symtab_hdr->stroff + symtab_hdr->strsize, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogLinking, kKxldLogErr, kKxldLogTruncatedMachO)); #if !KERNEL - /* Swap the symbol table entries */ - symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff)); - if (swap) swap_nlist_64(symtab, symtab_hdr->nsyms, host_order); + /* Swap the symbol table entries */ + symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff)); + if (swap) { + swap_nlist_64(symtab, symtab_hdr->nsyms, host_order); + } #endif /* !KERNEL */ - break; - default: + break; + default: #if !KERNEL - /* Swap the load command */ - if (swap) swap_load_command(load_hdr, host_order); + /* Swap the load command */ + if (swap) { + swap_load_command(load_hdr, host_order); + } #endif /* !KERNEL */ - break; - } - } + break; + } + } - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } #if !KERNEL /******************************************************************************* *******************************************************************************/ -void unswap_macho(u_char *file, enum NXByteOrder host_order, +void +unswap_macho(u_char *file, enum NXByteOrder host_order, enum NXByteOrder target_order) { - struct mach_header *hdr = (struct mach_header *) ((void *) file); + struct mach_header *hdr = (struct mach_header *) ((void *) file); - if (!hdr) return; + if (!hdr) { + return; + } - if (hdr->magic == MH_MAGIC) { - unswap_macho_32(file, host_order, target_order); - } else if (hdr->magic == MH_MAGIC_64) { - unswap_macho_64(file, host_order, target_order); - } + if (hdr->magic == MH_MAGIC) { + unswap_macho_32(file, host_order, target_order); + } else if (hdr->magic == MH_MAGIC_64) { + unswap_macho_64(file, host_order, target_order); + } } /******************************************************************************* *******************************************************************************/ static void -unswap_macho_32(u_char *file, enum NXByteOrder host_order, +unswap_macho_32(u_char *file, enum NXByteOrder host_order, enum NXByteOrder target_order) { - struct mach_header *mach_hdr = (struct mach_header *) ((void *) file); - struct load_command *load_hdr = NULL; - struct segment_command *seg_hdr = NULL; - struct section *sects = NULL; - struct symtab_command *symtab_hdr = NULL; - struct nlist *symtab = NULL; - u_long offset = 0; - u_int cmd = 0; - u_int size = 0; - u_int i = 0; - - check(file); - - if (target_order == host_order) return; - - offset = sizeof(*mach_hdr); - for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) { - load_hdr = (struct load_command *) ((void *) (file + offset)); - cmd = load_hdr->cmd; - size = load_hdr->cmdsize; - - switch(cmd) { - case LC_SEGMENT: - seg_hdr = (struct segment_command *) load_hdr; - sects = (struct section *) &seg_hdr[1]; - - /* We don't need to unswap relocations because this function is - * called when linking is completed (so there are no relocations). - */ - - swap_section(sects, seg_hdr->nsects, target_order); - swap_segment_command(seg_hdr, target_order); - break; - case LC_SYMTAB: - symtab_hdr = (struct symtab_command *) load_hdr; - symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff)); - - swap_nlist(symtab, symtab_hdr->nsyms, target_order); - swap_symtab_command(symtab_hdr, target_order); - - break; - default: - swap_load_command(load_hdr, target_order); - break; - } - } - - (void) swap_mach_header(mach_hdr, target_order); + struct mach_header *mach_hdr = (struct mach_header *) ((void *) file); + struct load_command *load_hdr = NULL; + struct segment_command *seg_hdr = NULL; + struct section *sects = NULL; + struct symtab_command *symtab_hdr = NULL; + struct nlist *symtab = NULL; + u_long offset = 0; + u_int cmd = 0; + u_int size = 0; + u_int i = 0; + + check(file); + + if (target_order == host_order) { + return; + } + + offset = sizeof(*mach_hdr); + for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) { + load_hdr = (struct load_command *) ((void *) (file + offset)); + cmd = load_hdr->cmd; + size = load_hdr->cmdsize; + + switch (cmd) { + case LC_SEGMENT: + seg_hdr = (struct segment_command *) load_hdr; + sects = (struct section *) &seg_hdr[1]; + + /* We don't need to unswap relocations because this function is + * called when linking is completed (so there are no relocations). + */ + + swap_section(sects, seg_hdr->nsects, target_order); + swap_segment_command(seg_hdr, target_order); + break; + case LC_SYMTAB: + symtab_hdr = (struct symtab_command *) load_hdr; + symtab = (struct nlist*) ((void *) (file + symtab_hdr->symoff)); + + swap_nlist(symtab, symtab_hdr->nsyms, target_order); + swap_symtab_command(symtab_hdr, target_order); + + break; + default: + swap_load_command(load_hdr, target_order); + break; + } + } + + (void) swap_mach_header(mach_hdr, target_order); } /******************************************************************************* *******************************************************************************/ static void -unswap_macho_64(u_char *file, enum NXByteOrder host_order, +unswap_macho_64(u_char *file, enum NXByteOrder host_order, enum NXByteOrder target_order) { - struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file); - struct load_command *load_hdr = NULL; - struct segment_command_64 *seg_hdr = NULL; - struct section_64 *sects = NULL; - struct symtab_command *symtab_hdr = NULL; - struct nlist_64 *symtab = NULL; - u_long offset = 0; - u_int cmd = 0; - u_int size = 0; - u_int i = 0; - - check(file); - - if (target_order == host_order) return; - - offset = sizeof(*mach_hdr); - for(i = 0; i < mach_hdr->ncmds; ++i, offset += size) { - load_hdr = (struct load_command *) ((void *) (file + offset)); - cmd = load_hdr->cmd; - size = load_hdr->cmdsize; - - switch(cmd) { - case LC_SEGMENT_64: - seg_hdr = (struct segment_command_64 *) ((void *) load_hdr); - sects = (struct section_64 *) &seg_hdr[1]; - - /* We don't need to unswap relocations because this function is - * called when linking is completed (so there are no relocations). - */ - - swap_section_64(sects, seg_hdr->nsects, target_order); - swap_segment_command_64(seg_hdr, target_order); - break; - case LC_SYMTAB: - symtab_hdr = (struct symtab_command *) load_hdr; - symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff)); - - swap_nlist_64(symtab, symtab_hdr->nsyms, target_order); - swap_symtab_command(symtab_hdr, target_order); - - break; - default: - swap_load_command(load_hdr, target_order); - break; - } - } - - (void) swap_mach_header_64(mach_hdr, target_order); + struct mach_header_64 *mach_hdr = (struct mach_header_64 *) ((void *) file); + struct load_command *load_hdr = NULL; + struct segment_command_64 *seg_hdr = NULL; + struct section_64 *sects = NULL; + struct symtab_command *symtab_hdr = NULL; + struct nlist_64 *symtab = NULL; + u_long offset = 0; + u_int cmd = 0; + u_int size = 0; + u_int i = 0; + + check(file); + + if (target_order == host_order) { + return; + } + + offset = sizeof(*mach_hdr); + for (i = 0; i < mach_hdr->ncmds; ++i, offset += size) { + load_hdr = (struct load_command *) ((void *) (file + offset)); + cmd = load_hdr->cmd; + size = load_hdr->cmdsize; + + switch (cmd) { + case LC_SEGMENT_64: + seg_hdr = (struct segment_command_64 *) ((void *) load_hdr); + sects = (struct section_64 *) &seg_hdr[1]; + + /* We don't need to unswap relocations because this function is + * called when linking is completed (so there are no relocations). + */ + + swap_section_64(sects, seg_hdr->nsects, target_order); + swap_segment_command_64(seg_hdr, target_order); + break; + case LC_SYMTAB: + symtab_hdr = (struct symtab_command *) load_hdr; + symtab = (struct nlist_64 *) ((void *) (file + symtab_hdr->symoff)); + + swap_nlist_64(symtab, symtab_hdr->nsyms, target_order); + swap_symtab_command(symtab_hdr, target_order); + + break; + default: + swap_load_command(load_hdr, target_order); + break; + } + } + + (void) swap_mach_header_64(mach_hdr, target_order); } #endif /* !KERNEL */ - + /******************************************************************************* *******************************************************************************/ kxld_addr_t kxld_align_address(kxld_addr_t address, u_int align) { - kxld_addr_t alignment = (1 << align); - kxld_addr_t low_bits = 0; + kxld_addr_t alignment = (1 << align); + kxld_addr_t low_bits = 0; - if (!align) return address; + if (!align) { + return address; + } - low_bits = (address) & (alignment - 1); - if (low_bits) { - address += (alignment - low_bits); - } + low_bits = (address) & (alignment - 1); + if (low_bits) { + address += (alignment - low_bits); + } - return address; + return address; } /******************************************************************************* @@ -770,11 +805,11 @@ kxld_align_address(kxld_addr_t address, u_int align) boolean_t kxld_is_32_bit(cpu_type_t cputype) { - return !(cputype & CPU_ARCH_ABI64); + return !(cputype & CPU_ARCH_ABI64); } /******************************************************************************* -* Borrowed (and slightly modified) the libc implementation for the kernel +* Borrowed (and slightly modified) the libc implementation for the kernel * until the kernel has a supported strstr(). * Find the first occurrence of find in s. *******************************************************************************/ @@ -782,23 +817,25 @@ const char * kxld_strstr(const char *s, const char *find) { #if KERNEL - char c, sc; - size_t len; - if (!s || !find) - return s; - if ((c = *find++) != 0) { - len = strlen(find); - do { - do { - if ((sc = *s++) == 0) - return (NULL); - } while (sc != c); - } while (strncmp(s, find, len) != 0); - s--; - } - return s; + char c, sc; + size_t len; + if (!s || !find) { + return s; + } + if ((c = *find++) != 0) { + len = strlen(find); + do { + do { + if ((sc = *s++) == 0) { + return NULL; + } + } while (sc != c); + } while (strncmp(s, find, len) != 0); + s--; + } + return s; #else - return strstr(s, find); + return strstr(s, find); #endif /* KERNEL */ } @@ -808,100 +845,103 @@ void kxld_print_memory_report(void) { #if DEBUG - kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n" - "\tNumber of allocations: %8lu\n" - "\tNumber of frees: %8lu\n" - "\tAverage allocation size: %8lu\n" - "\tTotal bytes allocated: %8lu\n" - "\tTotal bytes freed: %8lu\n" - "\tTotal bytes leaked: %8lu", - num_allocations, num_frees, bytes_allocated / num_allocations, - bytes_allocated, bytes_freed, bytes_allocated - bytes_freed); + kxld_log(kKxldLogLinking, kKxldLogExplicit, "kxld memory usage report:\n" + "\tNumber of allocations: %8lu\n" + "\tNumber of frees: %8lu\n" + "\tAverage allocation size: %8lu\n" + "\tTotal bytes allocated: %8lu\n" + "\tTotal bytes freed: %8lu\n" + "\tTotal bytes leaked: %8lu", + num_allocations, num_frees, bytes_allocated / num_allocations, + bytes_allocated, bytes_freed, bytes_allocated - bytes_freed); #endif } /********************************************************************* *********************************************************************/ #if !KERNEL -boolean_t kxld_set_cross_link_page_size(kxld_size_t target_page_size) +boolean_t +kxld_set_cross_link_page_size(kxld_size_t target_page_size) { - // verify radix 2 - if ((target_page_size != 0) && - ((target_page_size & (target_page_size - 1)) == 0)) { - - s_cross_link_enabled = TRUE; - s_cross_link_page_size = target_page_size; - - return TRUE; - } else { - return FALSE; - } + // verify radix 2 + if ((target_page_size != 0) && + ((target_page_size & (target_page_size - 1)) == 0)) { + s_cross_link_enabled = TRUE; + s_cross_link_page_size = target_page_size; + + return TRUE; + } else { + return FALSE; + } } #endif /* !KERNEL */ /********************************************************************* *********************************************************************/ -kxld_size_t kxld_get_effective_page_size(void) +kxld_size_t +kxld_get_effective_page_size(void) { #if KERNEL - return PAGE_SIZE; + return PAGE_SIZE; #else - if (s_cross_link_enabled) { - return s_cross_link_page_size; - } else { - return PAGE_SIZE; - } + if (s_cross_link_enabled) { + return s_cross_link_page_size; + } else { + return PAGE_SIZE; + } #endif /* KERNEL */ } /********************************************************************* *********************************************************************/ -kxld_addr_t kxld_round_page_cross_safe(kxld_addr_t offset) +kxld_addr_t +kxld_round_page_cross_safe(kxld_addr_t offset) { #if KERNEL - return round_page(offset); + return round_page(offset); #else - // assume s_cross_link_page_size is power of 2 - if (s_cross_link_enabled) { - return (offset + (s_cross_link_page_size - 1)) & - (~(s_cross_link_page_size - 1)); - } else { - return round_page(offset); - } + // assume s_cross_link_page_size is power of 2 + if (s_cross_link_enabled) { + return (offset + (s_cross_link_page_size - 1)) & + (~(s_cross_link_page_size - 1)); + } else { + return round_page(offset); + } #endif /* KERNEL */ } #if SPLIT_KEXTS_DEBUG -void kxld_show_split_info(splitKextLinkInfo *info) +void +kxld_show_split_info(splitKextLinkInfo *info) { - kxld_log(kKxldLogLinking, kKxldLogErr, - "splitKextLinkInfo: \n" - "kextExecutable %p to %p kextSize %lu \n" - "linkedKext %p to %p linkedKextSize %lu \n" - "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p " - "vmaddr_DATA %p vmaddr_DATA_CONST %p " - "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p", - (void *) info->kextExecutable, - (void *) (info->kextExecutable + info->kextSize), - info->kextSize, - (void*) info->linkedKext, - (void*) (info->linkedKext + info->linkedKextSize), - info->linkedKextSize, - (void *) info->vmaddr_TEXT, - (void *) info->vmaddr_TEXT_EXEC, - (void *) info->vmaddr_DATA, - (void *) info->vmaddr_DATA_CONST, - (void *) info->vmaddr_LLVM_COV, - (void *) info->vmaddr_LINKEDIT); + kxld_log(kKxldLogLinking, kKxldLogErr, + "splitKextLinkInfo: \n" + "kextExecutable %p to %p kextSize %lu \n" + "linkedKext %p to %p linkedKextSize %lu \n" + "vmaddr_TEXT %p vmaddr_TEXT_EXEC %p " + "vmaddr_DATA %p vmaddr_DATA_CONST %p " + "vmaddr_LLVM_COV %p vmaddr_LINKEDIT %p", + (void *) info->kextExecutable, + (void *) (info->kextExecutable + info->kextSize), + info->kextSize, + (void*) info->linkedKext, + (void*) (info->linkedKext + info->linkedKextSize), + info->linkedKextSize, + (void *) info->vmaddr_TEXT, + (void *) info->vmaddr_TEXT_EXEC, + (void *) info->vmaddr_DATA, + (void *) info->vmaddr_DATA_CONST, + (void *) info->vmaddr_LLVM_COV, + (void *) info->vmaddr_LINKEDIT); } -boolean_t isTargetKextName(const char * the_name) +boolean_t +isTargetKextName(const char * the_name) { - if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) { - return(TRUE); - } - return(FALSE); + if (the_name && 0 == strcmp(the_name, KXLD_TARGET_KEXT)) { + return TRUE; + } + return FALSE; } #endif - diff --git a/libkern/kxld/kxld_util.h b/libkern/kxld/kxld_util.h index d8be6faef..ec9bb482b 100644 --- a/libkern/kxld/kxld_util.h +++ b/libkern/kxld/kxld_util.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_UTIL_H_ @@ -36,9 +36,9 @@ #include #include "kxld_types.h" - /* Get machine.h from the kernel source so we can support all platforms - * that the kernel supports. Otherwise we're at the mercy of the host. - */ +/* Get machine.h from the kernel source so we can support all platforms + * that the kernel supports. Otherwise we're at the mercy of the host. + */ #include "../../osfmk/mach/machine.h" #endif @@ -47,28 +47,28 @@ #define KXLD_3264_FUNC(cond32, rval, func32, func64, ...) \ do { \ - if (cond32) { \ - (rval) = (func32)(__VA_ARGS__); \ - } else { \ - (rval) = (func64)(__VA_ARGS__); \ - } \ + if (cond32) { \ + (rval) = (func32)(__VA_ARGS__); \ + } else { \ + (rval) = (func64)(__VA_ARGS__); \ + } \ } while(0) #elif defined(__LP64__) #define KXLD_3264_FUNC(cond32, rval, func32, func64, ...) \ do { \ - (rval) = (func64)(__VA_ARGS__); \ - } while(0) + (rval) = (func64)(__VA_ARGS__); \ + } while(0) #else #define KXLD_3264_FUNC(cond32, rval, func32, func64, ...) \ do { \ - (rval) = (func32)(__VA_ARGS__); \ + (rval) = (func32)(__VA_ARGS__); \ } while(0) \ -#endif +#endif /* Misc string functions */ #define streq(str1, str2) (((str1) && (str2)) ? !strcmp((str1), (str2)) : 0) @@ -83,7 +83,7 @@ #define START_TIMER() gettimeofday(&start, NULL); #define END_TIMER() gettimeofday(&end, NULL); #define PRINT_TIMER(msg) kxld_log("%s: %ds, %dus\n", (msg), \ - (end.tv_sec - start.tv_sec), (end.tv_usec - start.tv_usec)); + (end.tv_sec - start.tv_sec), (end.tv_usec - start.tv_usec)); /* Misc definitions */ #define KXLD_MAX_NAME_LEN 256 @@ -97,8 +97,8 @@ #define KXLD_OPERATOR_DELETE_ARRAY_SYMBOL "__ZdaPv" struct kxld_section_name { - char segname[16]; - char sectname[16]; + char segname[16]; + char sectname[16]; }; typedef struct kxld_section_name KXLDSectionName; @@ -107,14 +107,14 @@ typedef struct kxld_section_name KXLDSectionName; *******************************************************************************/ void kxld_set_logging_callback(KXLDLoggingCallback logging_callback) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); void kxld_set_logging_callback_data(const char * name, void *user_data) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); -void kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, +void kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, const char *format, ...) - __attribute__((visibility("hidden"), format(printf, 3, 4))); +__attribute__((visibility("hidden"), format(printf, 3, 4))); /* Common logging strings */ #define kKxldLogArchNotSupported "The target architecture (cputype 0x%x) is not supported by kxld." @@ -128,40 +128,41 @@ void kxld_log(KXLDLogSubsystem subsystem, KXLDLogLevel level, #define kKxldLogParentOutOfDate "The super class vtable '%s' for vtable '%s' is out of date. Make sure your kext has been built against the correct headers." #define kKxldLogNoKmodInfo "The kext is missing its kmod_info structure." #define kKxldLogRelocationOverflow "A relocation entry has overflowed. The kext may be too far from one " \ - "of its dependencies. Check your kext's load address." -#define kKxldLogRelocatingPatchedSym "Relocation failed because some class in this kext " \ - "didn't use the OSDeclareDefaultStructors and OSDefineMetaClassAndStructors, so it still " \ + "of its dependencies. Check your kext's load address." +#define kKxldLogRelocatingPatchedSym "Warning: relocation failed because some class in this kext " \ + "didn't use the OSDeclareDefaultStructors and OSDefineMetaClassAndStructors, " \ + "or didn't export all vtable symbols, so it still " \ "references %s, which has been patched with another symbol for binary compatibility. " \ "Please make sure all classes that inherit from OSObject use these macros." /******************************************************************************* -* Allocators +* Allocators *******************************************************************************/ - -void * kxld_alloc(size_t size) - __attribute__((malloc, visibility("hidden"))); -void * kxld_page_alloc(size_t size) - __attribute__((malloc, visibility("hidden"))); +void * kxld_alloc(size_t size) +__attribute__((malloc, visibility("hidden"))); -void * kxld_page_alloc_untracked(size_t size) - __attribute__((malloc, visibility("hidden"))); +void * kxld_page_alloc(size_t size) +__attribute__((malloc, visibility("hidden"))); -void * kxld_alloc_pageable(size_t size) - __attribute__((malloc, visibility("hidden"))); +void * kxld_page_alloc_untracked(size_t size) +__attribute__((malloc, visibility("hidden"))); + +void * kxld_alloc_pageable(size_t size) +__attribute__((malloc, visibility("hidden"))); /******************************************************************************* * Deallocators *******************************************************************************/ -void kxld_free(void *ptr, size_t size) - __attribute__((visibility("hidden"))); +void kxld_free(void *ptr, size_t size) +__attribute__((visibility("hidden"))); + +void kxld_page_free(void *ptr, size_t size) +__attribute__((visibility("hidden"))); -void kxld_page_free(void *ptr, size_t size) - __attribute__((visibility("hidden"))); - -void kxld_page_free_untracked(void *ptr, size_t size) - __attribute__((visibility("hidden"))); +void kxld_page_free_untracked(void *ptr, size_t size) +__attribute__((visibility("hidden"))); /******************************************************************************* * Mach-O Functions @@ -180,9 +181,9 @@ kern_return_t validate_and_swap_macho_64(u_char *file, u_long size ) __attribute__((visibility("hidden"))); #if !KERNEL -void unswap_macho(u_char *file, enum NXByteOrder host_order, +void unswap_macho(u_char *file, enum NXByteOrder host_order, enum NXByteOrder target_order) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); #endif /* !KERNEL */ /******************************************************************************* @@ -190,20 +191,20 @@ void unswap_macho(u_char *file, enum NXByteOrder host_order, *******************************************************************************/ kxld_addr_t kxld_align_address(kxld_addr_t address, u_int align) - __attribute__((const, visibility("hidden"))); +__attribute__((const, visibility("hidden"))); boolean_t kxld_is_32_bit(cpu_type_t) - __attribute__((const, visibility("hidden"))); +__attribute__((const, visibility("hidden"))); const char * kxld_strstr(const char *s, const char *find) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); /******************************************************************************* * Debugging *******************************************************************************/ -void kxld_print_memory_report(void) - __attribute__((visibility("hidden"))); +void kxld_print_memory_report(void) +__attribute__((visibility("hidden"))); /******************************************************************************* * Cross Linking diff --git a/libkern/kxld/kxld_uuid.c b/libkern/kxld/kxld_uuid.c index 0cbfcf24b..75d3c2dc4 100644 --- a/libkern/kxld/kxld_uuid.c +++ b/libkern/kxld/kxld_uuid.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -40,11 +40,11 @@ void kxld_uuid_init_from_macho(KXLDuuid *uuid, struct uuid_command *src) { - check(uuid); - check(src); + check(uuid); + check(src); - memcpy(uuid->uuid, src->uuid, sizeof(uuid->uuid)); - uuid->has_uuid = TRUE; + memcpy(uuid->uuid, src->uuid, sizeof(uuid->uuid)); + uuid->has_uuid = TRUE; } /******************************************************************************* @@ -52,7 +52,7 @@ kxld_uuid_init_from_macho(KXLDuuid *uuid, struct uuid_command *src) void kxld_uuid_clear(KXLDuuid *uuid) { - bzero(uuid, sizeof(*uuid)); + bzero(uuid, sizeof(*uuid)); } /******************************************************************************* @@ -60,34 +60,33 @@ kxld_uuid_clear(KXLDuuid *uuid) u_long kxld_uuid_get_macho_header_size(void) { - return sizeof(struct uuid_command); + return sizeof(struct uuid_command); } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_uuid_export_macho(const KXLDuuid *uuid, u_char *buf, +kxld_uuid_export_macho(const KXLDuuid *uuid, u_char *buf, u_long *header_offset, u_long header_size) { - kern_return_t rval = KERN_FAILURE; - struct uuid_command *uuidhdr = NULL; + kern_return_t rval = KERN_FAILURE; + struct uuid_command *uuidhdr = NULL; - check(uuid); - check(buf); - check(header_offset); + check(uuid); + check(buf); + check(header_offset); - require_action(sizeof(*uuidhdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - uuidhdr = (struct uuid_command *) ((void *) (buf + *header_offset)); - *header_offset += sizeof(*uuidhdr); + require_action(sizeof(*uuidhdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + uuidhdr = (struct uuid_command *) ((void *) (buf + *header_offset)); + *header_offset += sizeof(*uuidhdr); - uuidhdr->cmd = LC_UUID; - uuidhdr->cmdsize = (uint32_t) sizeof(*uuidhdr); - memcpy(uuidhdr->uuid, uuid->uuid, sizeof(uuidhdr->uuid)); + uuidhdr->cmd = LC_UUID; + uuidhdr->cmdsize = (uint32_t) sizeof(*uuidhdr); + memcpy(uuidhdr->uuid, uuid->uuid, sizeof(uuidhdr->uuid)); - rval = KERN_SUCCESS; + rval = KERN_SUCCESS; finish: - return rval; + return rval; } - diff --git a/libkern/kxld/kxld_uuid.h b/libkern/kxld/kxld_uuid.h index 2a180a59c..3083bc87d 100644 --- a/libkern/kxld/kxld_uuid.h +++ b/libkern/kxld/kxld_uuid.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_UUID_H_ @@ -39,8 +39,8 @@ struct uuid_command; typedef struct kxld_uuid KXLDuuid; struct kxld_uuid { - u_char uuid[16]; - boolean_t has_uuid; + u_char uuid[16]; + boolean_t has_uuid; }; /******************************************************************************* @@ -48,22 +48,21 @@ struct kxld_uuid { *******************************************************************************/ void kxld_uuid_init_from_macho(KXLDuuid *uuid, struct uuid_command *src) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_uuid_clear(KXLDuuid *uuid) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ u_long kxld_uuid_get_macho_header_size(void) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); kern_return_t -kxld_uuid_export_macho(const KXLDuuid *uuid, u_char *buf, +kxld_uuid_export_macho(const KXLDuuid *uuid, u_char *buf, u_long *header_offset, u_long header_size) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #endif /* _KXLD_UUID_H_ */ - diff --git a/libkern/kxld/kxld_versionmin.c b/libkern/kxld/kxld_versionmin.c index 36c22203f..6632770f0 100644 --- a/libkern/kxld/kxld_versionmin.c +++ b/libkern/kxld/kxld_versionmin.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -40,52 +40,52 @@ void kxld_versionmin_init_from_macho(KXLDversionmin *versionmin, struct version_min_command *src) { - check(versionmin); - check(src); - check((src->cmd == LC_VERSION_MIN_MACOSX) || (src->cmd == LC_VERSION_MIN_IPHONEOS) || (src->cmd == LC_VERSION_MIN_TVOS) || (src->cmd == LC_VERSION_MIN_WATCHOS)); - - switch (src->cmd) { - case LC_VERSION_MIN_MACOSX: - versionmin->platform = kKxldVersionMinMacOSX; - break; - case LC_VERSION_MIN_IPHONEOS: - versionmin->platform = kKxldVersionMiniPhoneOS; - break; - case LC_VERSION_MIN_TVOS: - versionmin->platform = kKxldVersionMinAppleTVOS; - break; - case LC_VERSION_MIN_WATCHOS: - versionmin->platform = kKxldVersionMinWatchOS; - break; - } - - versionmin->version = src->version; - versionmin->has_versionmin = TRUE; + check(versionmin); + check(src); + check((src->cmd == LC_VERSION_MIN_MACOSX) || (src->cmd == LC_VERSION_MIN_IPHONEOS) || (src->cmd == LC_VERSION_MIN_TVOS) || (src->cmd == LC_VERSION_MIN_WATCHOS)); + + switch (src->cmd) { + case LC_VERSION_MIN_MACOSX: + versionmin->platform = kKxldVersionMinMacOSX; + break; + case LC_VERSION_MIN_IPHONEOS: + versionmin->platform = kKxldVersionMiniPhoneOS; + break; + case LC_VERSION_MIN_TVOS: + versionmin->platform = kKxldVersionMinAppleTVOS; + break; + case LC_VERSION_MIN_WATCHOS: + versionmin->platform = kKxldVersionMinWatchOS; + break; + } + + versionmin->version = src->version; + versionmin->has_versionmin = TRUE; } void kxld_versionmin_init_from_build_cmd(KXLDversionmin *versionmin, struct build_version_command *src) { - check(versionmin); - check(src); - switch (src->platform) { - case PLATFORM_MACOS: - versionmin->platform = kKxldVersionMinMacOSX; - break; - case PLATFORM_IOS: - versionmin->platform = kKxldVersionMiniPhoneOS; - break; - case PLATFORM_TVOS: - versionmin->platform = kKxldVersionMinAppleTVOS; - break; - case PLATFORM_WATCHOS: - versionmin->platform = kKxldVersionMinWatchOS; - break; - default: - return; - } - versionmin->version = src->minos; - versionmin->has_versionmin = TRUE; + check(versionmin); + check(src); + switch (src->platform) { + case PLATFORM_MACOS: + versionmin->platform = kKxldVersionMinMacOSX; + break; + case PLATFORM_IOS: + versionmin->platform = kKxldVersionMiniPhoneOS; + break; + case PLATFORM_TVOS: + versionmin->platform = kKxldVersionMinAppleTVOS; + break; + case PLATFORM_WATCHOS: + versionmin->platform = kKxldVersionMinWatchOS; + break; + default: + return; + } + versionmin->version = src->minos; + versionmin->has_versionmin = TRUE; } /******************************************************************************* @@ -93,7 +93,7 @@ kxld_versionmin_init_from_build_cmd(KXLDversionmin *versionmin, struct build_ver void kxld_versionmin_clear(KXLDversionmin *versionmin) { - bzero(versionmin, sizeof(*versionmin)); + bzero(versionmin, sizeof(*versionmin)); } /******************************************************************************* @@ -101,53 +101,52 @@ kxld_versionmin_clear(KXLDversionmin *versionmin) u_long kxld_versionmin_get_macho_header_size(__unused const KXLDversionmin *versionmin) { - /* TODO: eventually we can just use struct build_version_command */ - return sizeof(struct version_min_command); + /* TODO: eventually we can just use struct build_version_command */ + return sizeof(struct version_min_command); } /******************************************************************************* *******************************************************************************/ kern_return_t -kxld_versionmin_export_macho(const KXLDversionmin *versionmin, u_char *buf, +kxld_versionmin_export_macho(const KXLDversionmin *versionmin, u_char *buf, u_long *header_offset, u_long header_size) { - kern_return_t rval = KERN_FAILURE; - struct version_min_command *versionminhdr = NULL; - - check(versionmin); - check(buf); - check(header_offset); - - - require_action(sizeof(*versionminhdr) <= header_size - *header_offset, finish, - rval=KERN_FAILURE); - versionminhdr = (struct version_min_command *) ((void *) (buf + *header_offset)); - bzero(versionminhdr, sizeof(*versionminhdr)); - *header_offset += sizeof(*versionminhdr); - - switch (versionmin->platform) { - case kKxldVersionMinMacOSX: - versionminhdr->cmd = LC_VERSION_MIN_MACOSX; - break; - case kKxldVersionMiniPhoneOS: - versionminhdr->cmd = LC_VERSION_MIN_IPHONEOS; - break; - case kKxldVersionMinAppleTVOS: - versionminhdr->cmd = LC_VERSION_MIN_TVOS; - break; - case kKxldVersionMinWatchOS: - versionminhdr->cmd = LC_VERSION_MIN_WATCHOS; - break; - default: - goto finish; - } - versionminhdr->cmdsize = (uint32_t) sizeof(*versionminhdr); - versionminhdr->version = versionmin->version; - versionminhdr->sdk = 0; - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + struct version_min_command *versionminhdr = NULL; + + check(versionmin); + check(buf); + check(header_offset); + + + require_action(sizeof(*versionminhdr) <= header_size - *header_offset, finish, + rval = KERN_FAILURE); + versionminhdr = (struct version_min_command *) ((void *) (buf + *header_offset)); + bzero(versionminhdr, sizeof(*versionminhdr)); + *header_offset += sizeof(*versionminhdr); + + switch (versionmin->platform) { + case kKxldVersionMinMacOSX: + versionminhdr->cmd = LC_VERSION_MIN_MACOSX; + break; + case kKxldVersionMiniPhoneOS: + versionminhdr->cmd = LC_VERSION_MIN_IPHONEOS; + break; + case kKxldVersionMinAppleTVOS: + versionminhdr->cmd = LC_VERSION_MIN_TVOS; + break; + case kKxldVersionMinWatchOS: + versionminhdr->cmd = LC_VERSION_MIN_WATCHOS; + break; + default: + goto finish; + } + versionminhdr->cmdsize = (uint32_t) sizeof(*versionminhdr); + versionminhdr->version = versionmin->version; + versionminhdr->sdk = 0; + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } - diff --git a/libkern/kxld/kxld_versionmin.h b/libkern/kxld/kxld_versionmin.h index 8b3df067e..cd9b224eb 100644 --- a/libkern/kxld/kxld_versionmin.h +++ b/libkern/kxld/kxld_versionmin.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_VERSIONMIN_H_ @@ -39,16 +39,16 @@ struct version_min_command; typedef struct kxld_versionmin KXLDversionmin; enum kxld_versionmin_platforms { - kKxldVersionMinMacOSX, - kKxldVersionMiniPhoneOS, - kKxldVersionMinAppleTVOS, - kKxldVersionMinWatchOS + kKxldVersionMinMacOSX, + kKxldVersionMiniPhoneOS, + kKxldVersionMinAppleTVOS, + kKxldVersionMinWatchOS }; struct kxld_versionmin { - enum kxld_versionmin_platforms platform; - uint32_t version; - boolean_t has_versionmin; + enum kxld_versionmin_platforms platform; + uint32_t version; + boolean_t has_versionmin; }; /******************************************************************************* @@ -56,25 +56,24 @@ struct kxld_versionmin { *******************************************************************************/ void kxld_versionmin_init_from_macho(KXLDversionmin *versionmin, struct version_min_command *src) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_versionmin_init_from_build_cmd(KXLDversionmin *versionmin, struct build_version_command *src) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_versionmin_clear(KXLDversionmin *versionmin) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); /******************************************************************************* * Accessors *******************************************************************************/ u_long kxld_versionmin_get_macho_header_size(const KXLDversionmin *versionmin) - __attribute__((pure, visibility("hidden"))); +__attribute__((pure, visibility("hidden"))); kern_return_t -kxld_versionmin_export_macho(const KXLDversionmin *versionmin, u_char *buf, +kxld_versionmin_export_macho(const KXLDversionmin *versionmin, u_char *buf, u_long *header_offset, u_long header_size) - __attribute__((pure, nonnull, visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); #endif /* _KXLD_VERSIONMIN_H_ */ - diff --git a/libkern/kxld/kxld_vtable.c b/libkern/kxld/kxld_vtable.c index 940814ffd..634c19d4d 100644 --- a/libkern/kxld/kxld_vtable.c +++ b/libkern/kxld/kxld_vtable.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -31,7 +31,7 @@ #if KERNEL #ifdef MACH_ASSERT - #undef MACH_ASSERT + #undef MACH_ASSERT #endif #define MACH_ASSERT 1 #include @@ -66,8 +66,8 @@ static void get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size static kern_return_t init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDSect *sect, const KXLDRelocator *relocator); -static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, - const KXLDSym *vtable_sym, const KXLDRelocator *relocator, +static kern_return_t init_by_entries_and_relocs(KXLDVTable *vtable, + const KXLDSym *vtable_sym, const KXLDRelocator *relocator, const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols); static kern_return_t init_by_entries(KXLDVTable *vtable, @@ -75,90 +75,91 @@ static kern_return_t init_by_entries(KXLDVTable *vtable, /******************************************************************************* *******************************************************************************/ -kern_return_t -kxld_vtable_init(KXLDVTable *vtable, const KXLDSym *vtable_sym, +kern_return_t +kxld_vtable_init(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDObject *object, const KXLDDict *defined_cxx_symbols) { - kern_return_t rval = KERN_FAILURE; - const KXLDArray *extrelocs = NULL; - const KXLDRelocator *relocator = NULL; - const KXLDSect *vtable_sect = NULL; - char *demangled_name = NULL; - size_t demangled_length = 0; - - check(vtable); - check(vtable_sym); - check(object); - - relocator = kxld_object_get_relocator(object); - - vtable_sect = kxld_object_get_section_by_index(object, - vtable_sym->sectnum); - require_action(vtable_sect, finish, rval=KERN_FAILURE); - - vtable->name = vtable_sym->name; - vtable->vtable = vtable_sect->data + - kxld_sym_get_section_offset(vtable_sym, vtable_sect); - - if (kxld_object_is_linked(object)) { - rval = init_by_entries(vtable, relocator, defined_cxx_symbols); - require_noerr(rval, finish); - - vtable->is_patched = TRUE; - } else { - if (kxld_object_is_final_image(object)) { - extrelocs = kxld_object_get_extrelocs(object); - - require_action(extrelocs, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, - kKxldLogMalformedVTable, - kxld_demangle(vtable->name, - &demangled_name, &demangled_length))); - - rval = init_by_entries_and_relocs(vtable, vtable_sym, - relocator, extrelocs, defined_cxx_symbols); - require_noerr(rval, finish); - } else { - - require_action(kxld_sect_get_num_relocs(vtable_sect) > 0, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, - kKxldLogMalformedVTable, - kxld_demangle(vtable->name, - &demangled_name, &demangled_length))); - - rval = init_by_relocs(vtable, vtable_sym, vtable_sect, relocator); - require_noerr(rval, finish); - } - - vtable->is_patched = FALSE; - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + const KXLDArray *extrelocs = NULL; + const KXLDRelocator *relocator = NULL; + const KXLDSect *vtable_sect = NULL; + char *demangled_name = NULL; + size_t demangled_length = 0; + + check(vtable); + check(vtable_sym); + check(object); + + relocator = kxld_object_get_relocator(object); + + vtable_sect = kxld_object_get_section_by_index(object, + vtable_sym->sectnum); + require_action(vtable_sect, finish, rval = KERN_FAILURE); + + vtable->name = vtable_sym->name; + vtable->vtable = vtable_sect->data + + kxld_sym_get_section_offset(vtable_sym, vtable_sect); + + if (kxld_object_is_linked(object)) { + rval = init_by_entries(vtable, relocator, defined_cxx_symbols); + require_noerr(rval, finish); + + vtable->is_patched = TRUE; + } else { + if (kxld_object_is_final_image(object)) { + extrelocs = kxld_object_get_extrelocs(object); + + require_action(extrelocs, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, + kKxldLogMalformedVTable, + kxld_demangle(vtable->name, + &demangled_name, &demangled_length))); + + rval = init_by_entries_and_relocs(vtable, vtable_sym, + relocator, extrelocs, defined_cxx_symbols); + require_noerr(rval, finish); + } else { + require_action(kxld_sect_get_num_relocs(vtable_sect) > 0, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, + kKxldLogMalformedVTable, + kxld_demangle(vtable->name, + &demangled_name, &demangled_length))); + + rval = init_by_relocs(vtable, vtable_sym, vtable_sect, relocator); + require_noerr(rval, finish); + } + + vtable->is_patched = FALSE; + } + + rval = KERN_SUCCESS; finish: - if (demangled_name) kxld_free(demangled_name, demangled_length); + if (demangled_name) { + kxld_free(demangled_name, demangled_length); + } - return rval; + return rval; } /******************************************************************************* *******************************************************************************/ -static void +static void get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size, u_int *vtable_header_size) { - check(vtable_entry_size); - check(vtable_header_size); - - if (is_32_bit) { - *vtable_entry_size = VTABLE_ENTRY_SIZE_32; - *vtable_header_size = VTABLE_HEADER_SIZE_32; - } else { - *vtable_entry_size = VTABLE_ENTRY_SIZE_64; - *vtable_header_size = VTABLE_HEADER_SIZE_64; - } + check(vtable_entry_size); + check(vtable_header_size); + + if (is_32_bit) { + *vtable_entry_size = VTABLE_ENTRY_SIZE_32; + *vtable_header_size = VTABLE_HEADER_SIZE_32; + } else { + *vtable_entry_size = VTABLE_ENTRY_SIZE_64; + *vtable_header_size = VTABLE_HEADER_SIZE_64; + } } /******************************************************************************* @@ -166,86 +167,88 @@ get_vtable_base_sizes(boolean_t is_32_bit, u_int *vtable_entry_size, * entries and finding the corresponding symbols. *******************************************************************************/ static kern_return_t -init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, +init_by_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDSect *sect, const KXLDRelocator *relocator) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - KXLDVTableEntry *entry = NULL; - KXLDSym *sym = NULL; - kxld_addr_t vtable_base_offset = 0; - kxld_addr_t entry_offset = 0; - u_int i = 0; - u_int nentries = 0; - u_int vtable_entry_size = 0; - u_int vtable_header_size = 0; - u_int base_reloc_index = 0; - u_int reloc_index = 0; - - check(vtable); - check(vtable_sym); - check(sect); - check(relocator); - - /* Find the first entry past the vtable padding */ - - (void) get_vtable_base_sizes(relocator->is_32_bit, - &vtable_entry_size, &vtable_header_size); - - vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) + - vtable_header_size; - - /* Find the relocation entry at the start of the vtable */ - - rval = kxld_reloc_get_reloc_index_by_offset(§->relocs, - vtable_base_offset, &base_reloc_index); - require_noerr(rval, finish); - - /* Count the number of consecutive relocation entries to find the number of - * vtable entries. For some reason, the __TEXT,__const relocations are - * sorted in descending order, so we have to walk backwards. Also, make - * sure we don't run off the end of the section's relocs. - */ - - reloc_index = base_reloc_index; - entry_offset = vtable_base_offset; - reloc = kxld_array_get_item(§->relocs, reloc_index); - while (reloc->address == entry_offset) { - ++nentries; - if (!reloc_index) break; - - --reloc_index; - - reloc = kxld_array_get_item(§->relocs, reloc_index); - entry_offset += vtable_entry_size; - } - - /* Allocate the symbol index */ - - rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); - require_noerr(rval, finish); - - /* Find the symbols for each vtable entry */ - - for (i = 0; i < vtable->entries.nitems; ++i) { - reloc = kxld_array_get_item(§->relocs, base_reloc_index - i); - entry = kxld_array_get_item(&vtable->entries, i); - - /* If we can't find a symbol, it means it is a locally-defined, - * non-external symbol that has been stripped. We don't patch over - * locally-defined symbols, so we leave the symbol as NULL and just - * skip it. We won't be able to patch subclasses with this symbol, - * but there isn't much we can do about that. - */ - sym = kxld_reloc_get_symbol(relocator, reloc, sect->data); - - entry->unpatched.sym = sym; - entry->unpatched.reloc = reloc; - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + KXLDVTableEntry *entry = NULL; + KXLDSym *sym = NULL; + kxld_addr_t vtable_base_offset = 0; + kxld_addr_t entry_offset = 0; + u_int i = 0; + u_int nentries = 0; + u_int vtable_entry_size = 0; + u_int vtable_header_size = 0; + u_int base_reloc_index = 0; + u_int reloc_index = 0; + + check(vtable); + check(vtable_sym); + check(sect); + check(relocator); + + /* Find the first entry past the vtable padding */ + + (void) get_vtable_base_sizes(relocator->is_32_bit, + &vtable_entry_size, &vtable_header_size); + + vtable_base_offset = kxld_sym_get_section_offset(vtable_sym, sect) + + vtable_header_size; + + /* Find the relocation entry at the start of the vtable */ + + rval = kxld_reloc_get_reloc_index_by_offset(§->relocs, + vtable_base_offset, &base_reloc_index); + require_noerr(rval, finish); + + /* Count the number of consecutive relocation entries to find the number of + * vtable entries. For some reason, the __TEXT,__const relocations are + * sorted in descending order, so we have to walk backwards. Also, make + * sure we don't run off the end of the section's relocs. + */ + + reloc_index = base_reloc_index; + entry_offset = vtable_base_offset; + reloc = kxld_array_get_item(§->relocs, reloc_index); + while (reloc->address == entry_offset) { + ++nentries; + if (!reloc_index) { + break; + } + + --reloc_index; + + reloc = kxld_array_get_item(§->relocs, reloc_index); + entry_offset += vtable_entry_size; + } + + /* Allocate the symbol index */ + + rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); + require_noerr(rval, finish); + + /* Find the symbols for each vtable entry */ + + for (i = 0; i < vtable->entries.nitems; ++i) { + reloc = kxld_array_get_item(§->relocs, base_reloc_index - i); + entry = kxld_array_get_item(&vtable->entries, i); + + /* If we can't find a symbol, it means it is a locally-defined, + * non-external symbol that has been stripped. We don't patch over + * locally-defined symbols, so we leave the symbol as NULL and just + * skip it. We won't be able to patch subclasses with this symbol, + * but there isn't much we can do about that. + */ + sym = kxld_reloc_get_symbol(relocator, reloc, sect->data); + + entry->unpatched.sym = sym; + entry->unpatched.reloc = reloc; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -256,67 +259,68 @@ static kern_return_t init_by_entries(KXLDVTable *vtable, const KXLDRelocator *relocator, const KXLDDict *defined_cxx_symbols) { - kern_return_t rval = KERN_FAILURE; - KXLDVTableEntry *tmpentry = NULL; - KXLDSym *sym = NULL; - kxld_addr_t entry_value = 0; - u_long entry_offset; - u_int vtable_entry_size = 0; - u_int vtable_header_size = 0; - u_int nentries = 0; - u_int i = 0; - - check(vtable); - check(relocator); - - (void) get_vtable_base_sizes(relocator->is_32_bit, - &vtable_entry_size, &vtable_header_size); - - /* Count the number of entries (the vtable is null-terminated) */ - - entry_offset = vtable_header_size; - while (1) { - entry_value = kxld_relocator_get_pointer_at_addr(relocator, - vtable->vtable, entry_offset); - if (!entry_value) break; - - entry_offset += vtable_entry_size; - ++nentries; - } - - /* Allocate the symbol index */ - - rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); - require_noerr(rval, finish); - - /* Look up the symbols for each entry */ - - for (i = 0, entry_offset = vtable_header_size; - i < vtable->entries.nitems; - ++i, entry_offset += vtable_entry_size) - { - entry_value = kxld_relocator_get_pointer_at_addr(relocator, - vtable->vtable, entry_offset); - - /* If we can't find the symbol, it means that the virtual function was - * defined inline. There's not much I can do about this; it just means - * I can't patch this function. - */ - tmpentry = kxld_array_get_item(&vtable->entries, i); - sym = kxld_dict_find(defined_cxx_symbols, &entry_value); - - if (sym) { - tmpentry->patched.name = sym->name; - tmpentry->patched.addr = sym->link_addr; - } else { - tmpentry->patched.name = NULL; - tmpentry->patched.addr = 0; - } - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDVTableEntry *tmpentry = NULL; + KXLDSym *sym = NULL; + kxld_addr_t entry_value = 0; + u_long entry_offset; + u_int vtable_entry_size = 0; + u_int vtable_header_size = 0; + u_int nentries = 0; + u_int i = 0; + + check(vtable); + check(relocator); + + (void) get_vtable_base_sizes(relocator->is_32_bit, + &vtable_entry_size, &vtable_header_size); + + /* Count the number of entries (the vtable is null-terminated) */ + + entry_offset = vtable_header_size; + while (1) { + entry_value = kxld_relocator_get_pointer_at_addr(relocator, + vtable->vtable, entry_offset); + if (!entry_value) { + break; + } + + entry_offset += vtable_entry_size; + ++nentries; + } + + /* Allocate the symbol index */ + + rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); + require_noerr(rval, finish); + + /* Look up the symbols for each entry */ + + for (i = 0, entry_offset = vtable_header_size; + i < vtable->entries.nitems; + ++i, entry_offset += vtable_entry_size) { + entry_value = kxld_relocator_get_pointer_at_addr(relocator, + vtable->vtable, entry_offset); + + /* If we can't find the symbol, it means that the virtual function was + * defined inline. There's not much I can do about this; it just means + * I can't patch this function. + */ + tmpentry = kxld_array_get_item(&vtable->entries, i); + sym = kxld_dict_find(defined_cxx_symbols, &entry_value); + + if (sym) { + tmpentry->patched.name = sym->name; + tmpentry->patched.addr = sym->link_addr; + } else { + tmpentry->patched.name = NULL; + tmpentry->patched.addr = 0; + } + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -330,97 +334,98 @@ finish: * external symbols. *******************************************************************************/ static kern_return_t -init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, +init_by_entries_and_relocs(KXLDVTable *vtable, const KXLDSym *vtable_sym, const KXLDRelocator *relocator, const KXLDArray *relocs, const KXLDDict *defined_cxx_symbols) { - kern_return_t rval = KERN_FAILURE; - KXLDReloc *reloc = NULL; - KXLDVTableEntry *tmpentry = NULL; - KXLDSym *sym = NULL; - u_int vtable_entry_size = 0; - u_int vtable_header_size = 0; - kxld_addr_t entry_value = 0; - u_long entry_offset = 0; - u_int nentries = 0; - u_int i = 0; - char *demangled_name1 = NULL; - size_t demangled_length1 = 0; - - check(vtable); - check(vtable_sym); - check(relocator); - check(relocs); - - /* Find the first entry and its offset past the vtable padding */ - - (void) get_vtable_base_sizes(relocator->is_32_bit, - &vtable_entry_size, &vtable_header_size); - - /* In a final linked image, a vtable slot is valid if it is nonzero - * (meaning the userspace linker has already resolved it) or if it has - * a relocation entry. We'll know the end of the vtable when we find a - * slot that meets neither of these conditions. - */ - entry_offset = vtable_header_size; - while (1) { - entry_value = kxld_relocator_get_pointer_at_addr(relocator, - vtable->vtable, entry_offset); - if (!entry_value) { - reloc = kxld_reloc_get_reloc_by_offset(relocs, - vtable_sym->base_addr + entry_offset); - if (!reloc) break; - } - - ++nentries; - entry_offset += vtable_entry_size; - } - - /* Allocate the symbol index */ - - rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); - require_noerr(rval, finish); - - /* Find the symbols for each vtable entry */ - - for (i = 0, entry_offset = vtable_header_size; - i < vtable->entries.nitems; - ++i, entry_offset += vtable_entry_size) - { - entry_value = kxld_relocator_get_pointer_at_addr(relocator, - vtable->vtable, entry_offset); - - /* If we can't find a symbol, it means it is a locally-defined, - * non-external symbol that has been stripped. We don't patch over - * locally-defined symbols, so we leave the symbol as NULL and just - * skip it. We won't be able to patch subclasses with this symbol, - * but there isn't much we can do about that. - */ - if (entry_value) { - reloc = NULL; - sym = kxld_dict_find(defined_cxx_symbols, &entry_value); - } else { - reloc = kxld_reloc_get_reloc_by_offset(relocs, - vtable_sym->base_addr + entry_offset); - - require_action(reloc, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, - kKxldLogMalformedVTable, - kxld_demangle(vtable->name, &demangled_name1, - &demangled_length1))); - - sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL); - } - - tmpentry = kxld_array_get_item(&vtable->entries, i); - tmpentry->unpatched.reloc = reloc; - tmpentry->unpatched.sym = sym; - } - - rval = KERN_SUCCESS; + kern_return_t rval = KERN_FAILURE; + KXLDReloc *reloc = NULL; + KXLDVTableEntry *tmpentry = NULL; + KXLDSym *sym = NULL; + u_int vtable_entry_size = 0; + u_int vtable_header_size = 0; + kxld_addr_t entry_value = 0; + u_long entry_offset = 0; + u_int nentries = 0; + u_int i = 0; + char *demangled_name1 = NULL; + size_t demangled_length1 = 0; + + check(vtable); + check(vtable_sym); + check(relocator); + check(relocs); + + /* Find the first entry and its offset past the vtable padding */ + + (void) get_vtable_base_sizes(relocator->is_32_bit, + &vtable_entry_size, &vtable_header_size); + + /* In a final linked image, a vtable slot is valid if it is nonzero + * (meaning the userspace linker has already resolved it) or if it has + * a relocation entry. We'll know the end of the vtable when we find a + * slot that meets neither of these conditions. + */ + entry_offset = vtable_header_size; + while (1) { + entry_value = kxld_relocator_get_pointer_at_addr(relocator, + vtable->vtable, entry_offset); + if (!entry_value) { + reloc = kxld_reloc_get_reloc_by_offset(relocs, + vtable_sym->base_addr + entry_offset); + if (!reloc) { + break; + } + } + + ++nentries; + entry_offset += vtable_entry_size; + } + + /* Allocate the symbol index */ + + rval = kxld_array_init(&vtable->entries, sizeof(KXLDVTableEntry), nentries); + require_noerr(rval, finish); + + /* Find the symbols for each vtable entry */ + + for (i = 0, entry_offset = vtable_header_size; + i < vtable->entries.nitems; + ++i, entry_offset += vtable_entry_size) { + entry_value = kxld_relocator_get_pointer_at_addr(relocator, + vtable->vtable, entry_offset); + + /* If we can't find a symbol, it means it is a locally-defined, + * non-external symbol that has been stripped. We don't patch over + * locally-defined symbols, so we leave the symbol as NULL and just + * skip it. We won't be able to patch subclasses with this symbol, + * but there isn't much we can do about that. + */ + if (entry_value) { + reloc = NULL; + sym = kxld_dict_find(defined_cxx_symbols, &entry_value); + } else { + reloc = kxld_reloc_get_reloc_by_offset(relocs, + vtable_sym->base_addr + entry_offset); + + require_action(reloc, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, + kKxldLogMalformedVTable, + kxld_demangle(vtable->name, &demangled_name1, + &demangled_length1))); + + sym = kxld_reloc_get_symbol(relocator, reloc, /* data */ NULL); + } + + tmpentry = kxld_array_get_item(&vtable->entries, i); + tmpentry->unpatched.reloc = reloc; + tmpentry->unpatched.sym = sym; + } + + rval = KERN_SUCCESS; finish: - return rval; + return rval; } /******************************************************************************* @@ -428,12 +433,12 @@ finish: void kxld_vtable_clear(KXLDVTable *vtable) { - check(vtable); + check(vtable); - vtable->vtable = NULL; - vtable->name = NULL; - vtable->is_patched = FALSE; - kxld_array_clear(&vtable->entries); + vtable->vtable = NULL; + vtable->name = NULL; + vtable->is_patched = FALSE; + kxld_array_clear(&vtable->entries); } /******************************************************************************* @@ -441,34 +446,34 @@ kxld_vtable_clear(KXLDVTable *vtable) void kxld_vtable_deinit(KXLDVTable *vtable) { - check(vtable); + check(vtable); - kxld_array_deinit(&vtable->entries); - bzero(vtable, sizeof(*vtable)); + kxld_array_deinit(&vtable->entries); + bzero(vtable, sizeof(*vtable)); } /******************************************************************************* *******************************************************************************/ -KXLDVTableEntry * -kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset, +KXLDVTableEntry * +kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset, boolean_t is_32_bit) { - KXLDVTableEntry *rval = NULL; - u_int vtable_entry_size = 0; - u_int vtable_header_size = 0; - u_int vtable_entry_idx = 0; + KXLDVTableEntry *rval = NULL; + u_int vtable_entry_size = 0; + u_int vtable_header_size = 0; + u_int vtable_entry_idx = 0; - (void) get_vtable_base_sizes(is_32_bit, - &vtable_entry_size, &vtable_header_size); + (void) get_vtable_base_sizes(is_32_bit, + &vtable_entry_size, &vtable_header_size); - if (offset % vtable_entry_size) { - goto finish; - } + if (offset % vtable_entry_size) { + goto finish; + } - vtable_entry_idx = (u_int) ((offset - vtable_header_size) / vtable_entry_size); - rval = kxld_array_get_item(&vtable->entries, vtable_entry_idx); + vtable_entry_idx = (u_int) ((offset - vtable_header_size) / vtable_entry_size); + rval = kxld_array_get_item(&vtable->entries, vtable_entry_idx); finish: - return rval; + return rval; } /******************************************************************************* @@ -478,208 +483,218 @@ kern_return_t kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable, KXLDObject *object) { - kern_return_t rval = KERN_FAILURE; - const KXLDSymtab *symtab = NULL; - const KXLDSym *sym = NULL; - KXLDVTableEntry *child_entry = NULL; - KXLDVTableEntry *parent_entry = NULL; - u_int symindex = 0; - u_int i = 0; - char *demangled_name1 = NULL; - char *demangled_name2 = NULL; - char *demangled_name3 = NULL; - size_t demangled_length1 = 0; - size_t demangled_length2 = 0; - size_t demangled_length3 = 0; - boolean_t failure = FALSE; - - check(vtable); - check(super_vtable); - - symtab = kxld_object_get_symtab(object); - - require_action(!vtable->is_patched, finish, rval=KERN_SUCCESS); - require_action(super_vtable->is_patched, finish, rval=KERN_FAILURE); - require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish, - rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, - kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); - - for (i = 0; i < super_vtable->entries.nitems; ++i) { - child_entry = kxld_array_get_item(&vtable->entries, i); - parent_entry = kxld_array_get_item(&super_vtable->entries, i); - - /* The child entry can be NULL when a locally-defined, non-external - * symbol is stripped. We wouldn't patch this entry anyway, so we - * just skip it. - */ - - if (!child_entry->unpatched.sym) continue; - - /* It's possible for the patched parent entry not to have a symbol - * (e.g. when the definition is inlined). We can't patch this entry no - * matter what, so we'll just skip it and die later if it's a problem - * (which is not likely). - */ - - if (!parent_entry->patched.name) continue; - - /* 1) If the symbol is defined locally, do not patch */ - - if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) continue; - - /* 2) If the child is a pure virtual function, do not patch. - * In general, we want to proceed with patching when the symbol is - * externally defined because pad slots fall into this category. - * The pure virtual function symbol is special case, as the pure - * virtual property itself overrides the parent's implementation. - */ - - if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) continue; - - /* 3) If the symbols are the same, do not patch */ - - if (streq(child_entry->unpatched.sym->name, - parent_entry->patched.name)) - { - continue; - } - - /* 4) If the parent vtable entry is a pad slot, and the child does not - * match it, then the child was built against a newer version of the - * libraries, so it is binary-incompatible. - */ - - require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name), - finish, rval=KERN_FAILURE; - kxld_log(kKxldLogPatching, kKxldLogErr, - kKxldLogParentOutOfDate, - kxld_demangle(super_vtable->name, &demangled_name1, - &demangled_length1), - kxld_demangle(vtable->name, &demangled_name2, - &demangled_length2))); + kern_return_t rval = KERN_FAILURE; + const KXLDSymtab *symtab = NULL; + const KXLDSym *sym = NULL; + KXLDVTableEntry *child_entry = NULL; + KXLDVTableEntry *parent_entry = NULL; + u_int symindex = 0; + u_int i = 0; + char *demangled_name1 = NULL; + char *demangled_name2 = NULL; + char *demangled_name3 = NULL; + size_t demangled_length1 = 0; + size_t demangled_length2 = 0; + size_t demangled_length3 = 0; + boolean_t failure = FALSE; + + check(vtable); + check(super_vtable); + + symtab = kxld_object_get_symtab(object); + + require_action(!vtable->is_patched, finish, rval = KERN_SUCCESS); + require_action(super_vtable->is_patched, finish, rval = KERN_FAILURE); + require_action(vtable->entries.nitems >= super_vtable->entries.nitems, finish, + rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, kKxldLogMalformedVTable, + kxld_demangle(vtable->name, &demangled_name1, &demangled_length1))); + + for (i = 0; i < super_vtable->entries.nitems; ++i) { + child_entry = kxld_array_get_item(&vtable->entries, i); + parent_entry = kxld_array_get_item(&super_vtable->entries, i); + + /* The child entry can be NULL when a locally-defined, non-external + * symbol is stripped. We wouldn't patch this entry anyway, so we + * just skip it. + */ + + if (!child_entry->unpatched.sym) { + continue; + } + + /* It's possible for the patched parent entry not to have a symbol + * (e.g. when the definition is inlined). We can't patch this entry no + * matter what, so we'll just skip it and die later if it's a problem + * (which is not likely). + */ + + if (!parent_entry->patched.name) { + continue; + } + + /* 1) If the symbol is defined locally, do not patch */ + + if (kxld_sym_is_defined_locally(child_entry->unpatched.sym)) { + continue; + } + + /* 2) If the child is a pure virtual function, do not patch. + * In general, we want to proceed with patching when the symbol is + * externally defined because pad slots fall into this category. + * The pure virtual function symbol is special case, as the pure + * virtual property itself overrides the parent's implementation. + */ + + if (kxld_sym_is_pure_virtual(child_entry->unpatched.sym)) { + continue; + } + + /* 3) If the symbols are the same, do not patch */ + + if (streq(child_entry->unpatched.sym->name, + parent_entry->patched.name)) { + continue; + } + + /* 4) If the parent vtable entry is a pad slot, and the child does not + * match it, then the child was built against a newer version of the + * libraries, so it is binary-incompatible. + */ + + require_action(!kxld_sym_name_is_padslot(parent_entry->patched.name), + finish, rval = KERN_FAILURE; + kxld_log(kKxldLogPatching, kKxldLogErr, + kKxldLogParentOutOfDate, + kxld_demangle(super_vtable->name, &demangled_name1, + &demangled_length1), + kxld_demangle(vtable->name, &demangled_name2, + &demangled_length2))); #if KXLD_USER_OR_STRICT_PATCHING - /* 5) If we are doing strict patching, we prevent kexts from declaring - * virtual functions and not implementing them. We can tell if a - * virtual function is declared but not implemented because we resolve - * symbols before patching; an unimplemented function will still be - * undefined at this point. We then look at whether the symbol has - * the same class prefix as the vtable. If it does, the symbol was - * declared as part of the class and not inherited, which means we - * should not patch it. - */ - - if (kxld_object_target_supports_strict_patching(object) && - !kxld_sym_is_defined(child_entry->unpatched.sym)) - { - char class_name[KXLD_MAX_NAME_LEN]; - char function_prefix[KXLD_MAX_NAME_LEN]; - u_long function_prefix_len = 0; - - rval = kxld_sym_get_class_name_from_vtable_name(vtable->name, - class_name, sizeof(class_name)); - require_noerr(rval, finish); - - function_prefix_len = - kxld_sym_get_function_prefix_from_class_name(class_name, - function_prefix, sizeof(function_prefix)); - require(function_prefix_len, finish); - - if (!strncmp(child_entry->unpatched.sym->name, - function_prefix, function_prefix_len)) - { - failure = TRUE; - kxld_log(kKxldLogPatching, kKxldLogErr, - "The %s is unpatchable because its class declares the " - "method '%s' without providing an implementation.", - kxld_demangle(vtable->name, - &demangled_name1, &demangled_length1), - kxld_demangle(child_entry->unpatched.sym->name, - &demangled_name2, &demangled_length2)); - continue; - } - } + /* 5) If we are doing strict patching, we prevent kexts from declaring + * virtual functions and not implementing them. We can tell if a + * virtual function is declared but not implemented because we resolve + * symbols before patching; an unimplemented function will still be + * undefined at this point. We then look at whether the symbol has + * the same class prefix as the vtable. If it does, the symbol was + * declared as part of the class and not inherited, which means we + * should not patch it. + */ + + if (kxld_object_target_supports_strict_patching(object) && + !kxld_sym_is_defined(child_entry->unpatched.sym)) { + char class_name[KXLD_MAX_NAME_LEN]; + char function_prefix[KXLD_MAX_NAME_LEN]; + u_long function_prefix_len = 0; + + rval = kxld_sym_get_class_name_from_vtable_name(vtable->name, + class_name, sizeof(class_name)); + require_noerr(rval, finish); + + function_prefix_len = + kxld_sym_get_function_prefix_from_class_name(class_name, + function_prefix, sizeof(function_prefix)); + require(function_prefix_len, finish); + + if (!strncmp(child_entry->unpatched.sym->name, + function_prefix, function_prefix_len)) { + failure = TRUE; + kxld_log(kKxldLogPatching, kKxldLogErr, + "The %s is unpatchable because its class declares the " + "method '%s' without providing an implementation.", + kxld_demangle(vtable->name, + &demangled_name1, &demangled_length1), + kxld_demangle(child_entry->unpatched.sym->name, + &demangled_name2, &demangled_length2)); + continue; + } + } #endif /* KXLD_USER_OR_STRICT_PATCHING */ - - /* 6) The child symbol is unresolved and different from its parent, so - * we need to patch it up. We do this by modifying the relocation - * entry of the vtable entry to point to the symbol of the parent - * vtable entry. If that symbol does not exist (i.e. we got the data - * from a link state object's vtable representation), then we create a - * new symbol in the symbol table and point the relocation entry to - * that. - */ - - sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, - parent_entry->patched.name); - if (!sym) { - rval = kxld_object_add_symbol(object, parent_entry->patched.name, - parent_entry->patched.addr, &sym); - require_noerr(rval, finish); - } - require_action(sym, finish, rval=KERN_FAILURE); - - rval = kxld_symtab_get_sym_index(symtab, sym, &symindex); - require_noerr(rval, finish); - - rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex); - require_noerr(rval, finish); - - kxld_log(kKxldLogPatching, kKxldLogDetail, - "In vtable '%s', patching '%s' with '%s'.", - kxld_demangle(vtable->name, &demangled_name1, &demangled_length1), - kxld_demangle(child_entry->unpatched.sym->name, - &demangled_name2, &demangled_length2), - kxld_demangle(sym->name, &demangled_name3, &demangled_length3)); - - rval = kxld_object_patch_symbol(object, child_entry->unpatched.sym); - require_noerr(rval, finish); - - child_entry->unpatched.sym = sym; - - /* - * The C++ ABI requires that functions be aligned on a 2-byte boundary: - * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers - * If the LSB of any virtual function's link address is 1, then the - * compiler has violated that part of the ABI, and we're going to panic - * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some - * context. - */ - assert(kxld_sym_is_pure_virtual(sym) || !(sym->link_addr & 1)); - } - - require_action(!failure, finish, rval=KERN_FAILURE); - - /* Change the vtable representation from the unpatched layout to the - * patched layout. - */ - - for (i = 0; i < vtable->entries.nitems; ++i) { - char *name; - kxld_addr_t addr; - - child_entry = kxld_array_get_item(&vtable->entries, i); - if (child_entry->unpatched.sym) { - name = child_entry->unpatched.sym->name; - addr = child_entry->unpatched.sym->link_addr; - } else { - name = NULL; - addr = 0; - } - - child_entry->patched.name = name; - child_entry->patched.addr = addr; - } - - vtable->is_patched = TRUE; - rval = KERN_SUCCESS; + + /* 6) The child symbol is unresolved and different from its parent, so + * we need to patch it up. We do this by modifying the relocation + * entry of the vtable entry to point to the symbol of the parent + * vtable entry. If that symbol does not exist (i.e. we got the data + * from a link state object's vtable representation), then we create a + * new symbol in the symbol table and point the relocation entry to + * that. + */ + + sym = kxld_symtab_get_locally_defined_symbol_by_name(symtab, + parent_entry->patched.name); + if (!sym) { + rval = kxld_object_add_symbol(object, parent_entry->patched.name, + parent_entry->patched.addr, &sym); + require_noerr(rval, finish); + } + require_action(sym, finish, rval = KERN_FAILURE); + + rval = kxld_symtab_get_sym_index(symtab, sym, &symindex); + require_noerr(rval, finish); + + rval = kxld_reloc_update_symindex(child_entry->unpatched.reloc, symindex); + require_noerr(rval, finish); + + kxld_log(kKxldLogPatching, kKxldLogDetail, + "In vtable '%s', patching '%s' with '%s'.", + kxld_demangle(vtable->name, &demangled_name1, &demangled_length1), + kxld_demangle(child_entry->unpatched.sym->name, + &demangled_name2, &demangled_length2), + kxld_demangle(sym->name, &demangled_name3, &demangled_length3)); + + rval = kxld_object_patch_symbol(object, child_entry->unpatched.sym); + require_noerr(rval, finish); + + child_entry->unpatched.sym = sym; + + /* + * The C++ ABI requires that functions be aligned on a 2-byte boundary: + * http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers + * If the LSB of any virtual function's link address is 1, then the + * compiler has violated that part of the ABI, and we're going to panic + * in _ptmf2ptf() (in OSMetaClass.h). Better to panic here with some + * context. + */ + assert(kxld_sym_is_pure_virtual(sym) || !(sym->link_addr & 1)); + } + + require_action(!failure, finish, rval = KERN_FAILURE); + + /* Change the vtable representation from the unpatched layout to the + * patched layout. + */ + + for (i = 0; i < vtable->entries.nitems; ++i) { + char *name; + kxld_addr_t addr; + + child_entry = kxld_array_get_item(&vtable->entries, i); + if (child_entry->unpatched.sym) { + name = child_entry->unpatched.sym->name; + addr = child_entry->unpatched.sym->link_addr; + } else { + name = NULL; + addr = 0; + } + + child_entry->patched.name = name; + child_entry->patched.addr = addr; + } + + vtable->is_patched = TRUE; + rval = KERN_SUCCESS; finish: - if (demangled_name1) kxld_free(demangled_name1, demangled_length1); - if (demangled_name2) kxld_free(demangled_name2, demangled_length2); - if (demangled_name3) kxld_free(demangled_name3, demangled_length3); - - return rval; + if (demangled_name1) { + kxld_free(demangled_name1, demangled_length1); + } + if (demangled_name2) { + kxld_free(demangled_name2, demangled_length2); + } + if (demangled_name3) { + kxld_free(demangled_name3, demangled_length3); + } + + return rval; } - diff --git a/libkern/kxld/kxld_vtable.h b/libkern/kxld/kxld_vtable.h index 4dd304a76..19ebb241c 100644 --- a/libkern/kxld/kxld_vtable.h +++ b/libkern/kxld/kxld_vtable.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_VTABLE_H_ @@ -51,41 +51,41 @@ typedef struct kxld_vtable KXLDVTable; typedef union kxld_vtable_entry KXLDVTableEntry; struct kxld_vtable { - u_char *vtable; - const char *name; - KXLDArray entries; - boolean_t is_patched; + u_char *vtable; + const char *name; + KXLDArray entries; + boolean_t is_patched; }; struct kxld_vtable_patched_entry { - char *name; - kxld_addr_t addr; + char *name; + kxld_addr_t addr; }; struct kxld_vtable_unpatched_entry { - const struct kxld_sym *sym; - struct kxld_reloc *reloc; + const struct kxld_sym *sym; + struct kxld_reloc *reloc; }; union kxld_vtable_entry { - struct kxld_vtable_patched_entry patched; - struct kxld_vtable_unpatched_entry unpatched; + struct kxld_vtable_patched_entry patched; + struct kxld_vtable_unpatched_entry unpatched; }; /******************************************************************************* * Constructors and destructors *******************************************************************************/ -kern_return_t kxld_vtable_init(KXLDVTable *vtable, +kern_return_t kxld_vtable_init(KXLDVTable *vtable, const struct kxld_sym *vtable_sym, const struct kxld_object *object, const struct kxld_dict *defined_cxx_symbols) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); void kxld_vtable_clear(KXLDVTable *vtable) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); void kxld_vtable_deinit(KXLDVTable *vtable) - __attribute__((visibility("hidden"))); +__attribute__((visibility("hidden"))); /******************************************************************************* * Accessors @@ -93,7 +93,7 @@ void kxld_vtable_deinit(KXLDVTable *vtable) KXLDVTableEntry * kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, u_long offset, boolean_t is_32_bit) - __attribute__((pure,nonnull,visibility("hidden"))); +__attribute__((pure, nonnull, visibility("hidden"))); /******************************************************************************* * Modifiers @@ -102,7 +102,6 @@ KXLDVTableEntry * kxld_vtable_get_entry_for_offset(const KXLDVTable *vtable, /* With strict patching, the vtable patcher with only patch pad slots */ kern_return_t kxld_vtable_patch(KXLDVTable *vtable, const KXLDVTable *super_vtable, struct kxld_object *object) - __attribute__((nonnull, visibility("hidden"))); +__attribute__((nonnull, visibility("hidden"))); #endif /* _KXLD_VTABLE_H_ */ - diff --git a/libkern/kxld/tests/kextcopyright.c b/libkern/kxld/tests/kextcopyright.c index b99feebf8..8895fcb90 100644 --- a/libkern/kxld/tests/kextcopyright.c +++ b/libkern/kxld/tests/kextcopyright.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,11 +46,11 @@ static char *convert_cfstring(CFStringRef the_string); static void usage(void) { - printf("usage: %s [path to kext]\n\n" - "This program validates the copyright string in a kext's info " - "dictionary.\n\n", gProgname); + printf("usage: %s [path to kext]\n\n" + "This program validates the copyright string in a kext's info " + "dictionary.\n\n", gProgname); - printFormat(); + printFormat(); } /****************************************************************************** @@ -58,17 +58,17 @@ usage(void) static void printFormat(void) { - fprintf(stderr, - "The copyright string should be contained in the NSHumanReadableCopyright key.\n" - "It should be of the format:\n" - "\tCopyright © [year(s) of publication] Apple Inc. All rights reserved.\n\n" - "where [year(s) of publication] is a comma-separated list of years and/or\n" - "year ranges, e.g., 2004, 2006-2008. Years must be four digits. Year ranges\n" - "may not contain spaces and must use four digits for both years.\n\n" - "The following are examples of valid copyright strings:\n" - "\tCopyright © 2008 Apple Inc. All rights reserved.\n" - "\tCopyright © 2004-2008 Apple Inc. All rights reserved.\n" - "\tCopyright © 1998,2000-2002,2004,2006-2008 Apple Inc. All rights reserved.\n"); + fprintf(stderr, + "The copyright string should be contained in the NSHumanReadableCopyright key.\n" + "It should be of the format:\n" + "\tCopyright © [year(s) of publication] Apple Inc. All rights reserved.\n\n" + "where [year(s) of publication] is a comma-separated list of years and/or\n" + "year ranges, e.g., 2004, 2006-2008. Years must be four digits. Year ranges\n" + "may not contain spaces and must use four digits for both years.\n\n" + "The following are examples of valid copyright strings:\n" + "\tCopyright © 2008 Apple Inc. All rights reserved.\n" + "\tCopyright © 2004-2008 Apple Inc. All rights reserved.\n" + "\tCopyright © 1998,2000-2002,2004,2006-2008 Apple Inc. All rights reserved.\n"); } /****************************************************************************** @@ -76,44 +76,46 @@ printFormat(void) char * convert_cfstring(CFStringRef the_string) { - char *result = NULL; - CFDataRef the_data = NULL; - const UInt8 *data_bytes = NULL; - char *converted_string = NULL; - u_long converted_len = 0; - u_long bytes_copied = 0; - - the_data = CFStringCreateExternalRepresentation(kCFAllocatorDefault, - the_string, kCFStringEncodingUTF8, 0); - if (!the_data) { - fprintf(stderr, "Failed to convert string\n"); - goto finish; - } - - data_bytes = CFDataGetBytePtr(the_data); - if (!data_bytes) { - fprintf(stderr, "Failed to get converted string bytes\n"); - goto finish; - } - - converted_len = strlen((const char *)data_bytes) + 1; // +1 for nul - converted_string = malloc(converted_len); - if (!converted_string) { - fprintf(stderr, "Failed to allocate memory\n"); - goto finish; - } - - bytes_copied = strlcpy(converted_string, (const char *) data_bytes, - converted_len) + 1; // +1 for nul - if (bytes_copied != converted_len) { - fprintf(stderr, "Failed to copy converted string\n"); - goto finish; - } - - result = converted_string; + char *result = NULL; + CFDataRef the_data = NULL; + const UInt8 *data_bytes = NULL; + char *converted_string = NULL; + u_long converted_len = 0; + u_long bytes_copied = 0; + + the_data = CFStringCreateExternalRepresentation(kCFAllocatorDefault, + the_string, kCFStringEncodingUTF8, 0); + if (!the_data) { + fprintf(stderr, "Failed to convert string\n"); + goto finish; + } + + data_bytes = CFDataGetBytePtr(the_data); + if (!data_bytes) { + fprintf(stderr, "Failed to get converted string bytes\n"); + goto finish; + } + + converted_len = strlen((const char *)data_bytes) + 1; // +1 for nul + converted_string = malloc(converted_len); + if (!converted_string) { + fprintf(stderr, "Failed to allocate memory\n"); + goto finish; + } + + bytes_copied = strlcpy(converted_string, (const char *) data_bytes, + converted_len) + 1; // +1 for nul + if (bytes_copied != converted_len) { + fprintf(stderr, "Failed to copy converted string\n"); + goto finish; + } + + result = converted_string; finish: - if (the_data) CFRelease(the_data); - return result; + if (the_data) { + CFRelease(the_data); + } + return result; } /****************************************************************************** @@ -121,88 +123,99 @@ finish: int main(int argc, const char *argv[]) { - int result = 1; - boolean_t infoCopyrightIsValid = false; - boolean_t readableCopyrightIsValid = false; - CFURLRef anURL = NULL; // must release - CFBundleRef aBundle = NULL; // must release - CFDictionaryRef aDict = NULL; // do not release - CFStringRef infoCopyrightString = NULL; // do not release - CFStringRef readableCopyrightString = NULL; // do not release - char *infoStr = NULL; // must free - char *readableStr = NULL; // must free - - gProgname = argv[0]; - - if (argc != 2) { - usage(); - goto finish; - } - - anURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, - (const UInt8 *) argv[1], strlen(argv[1]), /* isDirectory */ FALSE); - if (!anURL) { - fprintf(stderr, "Can't create path from %s\n", argv[1]); - goto finish; - } - - aBundle = CFBundleCreate(kCFAllocatorDefault, anURL); - if (!aBundle) { - fprintf(stderr, "Can't create bundle at path %s\n", argv[1]); - goto finish; - } - - aDict = CFBundleGetInfoDictionary(aBundle); - if (!aDict) { - fprintf(stderr, "Can't get info dictionary from bundle\n"); - goto finish; - } - - infoCopyrightString = CFDictionaryGetValue(aDict, kCFBundleGetInfoStringKey); - readableCopyrightString = CFDictionaryGetValue(aDict, kNSHumanReadableCopyrightKey); - - if (!infoCopyrightString && !readableCopyrightString) { - fprintf(stderr, "This kext does not have a value for NSHumanReadableCopyright"); - goto finish; - } - - if (infoCopyrightString) { - fprintf(stderr, "Warning: This kext has a value for CFBundleGetInfoString.\n" - "This key is obsolete, and may be removed from the kext's Info.plist.\n" - "It has been replaced by CFBundleVersion and NSHumanReadableCopyright.\n\n"); - - infoStr = convert_cfstring(infoCopyrightString); - if (!infoStr) goto finish; - - infoCopyrightIsValid = kxld_validate_copyright_string(infoStr); - } - - if (readableCopyrightString) { - readableStr = convert_cfstring(readableCopyrightString); - if (!readableStr) goto finish; - - readableCopyrightIsValid = kxld_validate_copyright_string(readableStr); - } - - if (!readableCopyrightIsValid) { - if (infoCopyrightIsValid) { - fprintf(stderr, "Warning: The copyright string in NSHumanReadableCopyright is invalid,\n" - "but the string in CFBundleGetInfoString is valid. CFBundleGetInfoString is\n" - "obsolete. Please migrate your copyright string to NSHumanReadableCopyright.\n\n"); - } else { - fprintf(stderr, "Error: There is no valid copyright string for this kext.\n\n"); - printFormat(); - goto finish; - } - } - - result = 0; + int result = 1; + boolean_t infoCopyrightIsValid = false; + boolean_t readableCopyrightIsValid = false; + CFURLRef anURL = NULL; // must release + CFBundleRef aBundle = NULL; // must release + CFDictionaryRef aDict = NULL; // do not release + CFStringRef infoCopyrightString = NULL; // do not release + CFStringRef readableCopyrightString = NULL; // do not release + char *infoStr = NULL; // must free + char *readableStr = NULL; // must free + + gProgname = argv[0]; + + if (argc != 2) { + usage(); + goto finish; + } + + anURL = CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, + (const UInt8 *) argv[1], strlen(argv[1]), /* isDirectory */ FALSE); + if (!anURL) { + fprintf(stderr, "Can't create path from %s\n", argv[1]); + goto finish; + } + + aBundle = CFBundleCreate(kCFAllocatorDefault, anURL); + if (!aBundle) { + fprintf(stderr, "Can't create bundle at path %s\n", argv[1]); + goto finish; + } + + aDict = CFBundleGetInfoDictionary(aBundle); + if (!aDict) { + fprintf(stderr, "Can't get info dictionary from bundle\n"); + goto finish; + } + + infoCopyrightString = CFDictionaryGetValue(aDict, kCFBundleGetInfoStringKey); + readableCopyrightString = CFDictionaryGetValue(aDict, kNSHumanReadableCopyrightKey); + + if (!infoCopyrightString && !readableCopyrightString) { + fprintf(stderr, "This kext does not have a value for NSHumanReadableCopyright"); + goto finish; + } + + if (infoCopyrightString) { + fprintf(stderr, "Warning: This kext has a value for CFBundleGetInfoString.\n" + "This key is obsolete, and may be removed from the kext's Info.plist.\n" + "It has been replaced by CFBundleVersion and NSHumanReadableCopyright.\n\n"); + + infoStr = convert_cfstring(infoCopyrightString); + if (!infoStr) { + goto finish; + } + + infoCopyrightIsValid = kxld_validate_copyright_string(infoStr); + } + + if (readableCopyrightString) { + readableStr = convert_cfstring(readableCopyrightString); + if (!readableStr) { + goto finish; + } + + readableCopyrightIsValid = kxld_validate_copyright_string(readableStr); + } + + if (!readableCopyrightIsValid) { + if (infoCopyrightIsValid) { + fprintf(stderr, "Warning: The copyright string in NSHumanReadableCopyright is invalid,\n" + "but the string in CFBundleGetInfoString is valid. CFBundleGetInfoString is\n" + "obsolete. Please migrate your copyright string to NSHumanReadableCopyright.\n\n"); + } else { + fprintf(stderr, "Error: There is no valid copyright string for this kext.\n\n"); + printFormat(); + goto finish; + } + } + + result = 0; finish: - if (anURL) CFRelease(anURL); - if (aBundle) CFRelease(aBundle); - if (infoStr) free(infoStr); - if (readableStr) free(readableStr); - - return result; + if (anURL) { + CFRelease(anURL); + } + if (aBundle) { + CFRelease(aBundle); + } + if (infoStr) { + free(infoStr); + } + if (readableStr) { + free(readableStr); + } + + return result; } - diff --git a/libkern/kxld/tests/kxld_array_test.c b/libkern/kxld/tests/kxld_array_test.c index 4791712e1..78acdf967 100644 --- a/libkern/kxld/tests/kxld_array_test.c +++ b/libkern/kxld/tests/kxld_array_test.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -41,120 +41,120 @@ int main(int argc __unused, char *argv[] __unused) { - kern_return_t rval = KERN_FAILURE; - KXLDArray array; - u_int *item = 0; - u_int test_num = 0; - u_int idx = 0; - u_int titems = 0; - u_int storageTestItems[kNumStorageTestItems]; - u_int i = 0; - - bzero(&array, sizeof(array)); - - kxld_set_logging_callback(kxld_test_log); - kxld_set_logging_callback_data("kxld_array_test", NULL); - - kxld_log(0, 0, "%d: Initialize", ++test_num); - - titems = PAGE_SIZE / sizeof(u_int); - rval = kxld_array_init(&array, sizeof(u_int), titems); - assert(rval == KERN_SUCCESS); - assert(array.nitems == titems); - - kxld_log(0, 0, "%d: Get item", ++test_num); - idx = 0; - item = kxld_array_get_item(&array, idx); - assert(item); - assert(item == kxld_array_get_slot(&array, idx)); - - idx = titems - 1; - item = kxld_array_get_item(&array, idx); - assert(item); - assert(item == kxld_array_get_slot(&array, idx)); - - idx = titems; - item = kxld_array_get_item(&array, idx); - assert(!item); - /* We allocated the max number of items that could be stored in a page, - * so get_slot() and get_item() are equivalent. - */ - assert(item == kxld_array_get_slot(&array, idx)); - - kxld_log(0, 0, "%d: Resize", ++test_num); - - titems = 2 * PAGE_SIZE / sizeof(u_int) + 100; - rval = kxld_array_resize(&array, titems); - assert(rval == KERN_SUCCESS); - assert(array.nitems == titems); - - kxld_log(0, 0, "%d: Get more items", ++test_num); - idx = 0; - item = kxld_array_get_item(&array, idx); - assert(item); - assert(item == kxld_array_get_slot(&array, idx)); - - idx = titems - 1; - item = kxld_array_get_item(&array, idx); - assert(item); - assert(item == kxld_array_get_slot(&array, idx)); - - idx = titems; - item = kxld_array_get_item(&array, idx); - assert(!item); - /* We allocated fewer items than could fit in a page, so get_slot() will - * return items even when get_item() does not. See below for details. - */ - assert(item != kxld_array_get_slot(&array, idx)); - - kxld_log(0, 0, "%d: Clear and attempt to get an item", ++test_num); - (void) kxld_array_clear(&array); - item = kxld_array_get_item(&array, 0); - assert(!item); - - kxld_log(0, 0, "%d: Get slot", ++test_num); - /* The array allocates its internal storage in pages. Because get_slot() - * fetches items based on the allocated size, not the logical size, we - * calculate the max items get_slot() can retrieve based on page size. - */ - titems = (u_int) (round_page(titems * sizeof(u_int)) / sizeof(u_int)); - assert(!item); - item = kxld_array_get_slot(&array, 0); - assert(item); - item = kxld_array_get_slot(&array, titems - 1); - assert(item); - item = kxld_array_get_slot(&array, titems); - assert(!item); - - kxld_log(0, 0, "%d: Reinitialize", ++test_num); - - titems = kNumStorageTestItems; - rval = kxld_array_init(&array, sizeof(u_int), titems); - assert(rval == KERN_SUCCESS); - assert(array.nitems == titems); - - kxld_log(0, 0, "%d: Storage test - %d insertions and finds", - ++test_num, kNumStorageTestItems); - for (i = 0; i < titems; ++i) { - item = kxld_array_get_item(&array, i); - assert(item); - - *item = (u_int) (random() % UINT_MAX); - storageTestItems[i] = *item; - } - - for (i = 0; i < titems; ++i) { - item = kxld_array_get_item(&array, i); - assert(item); - assert(*item == storageTestItems[i]); - } - - (void) kxld_array_deinit(&array); - - kxld_log(0, 0, " "); - kxld_log(0, 0, "All tests passed! Now check for memory leaks..."); - - kxld_print_memory_report(); - - return 0; + kern_return_t rval = KERN_FAILURE; + KXLDArray array; + u_int *item = 0; + u_int test_num = 0; + u_int idx = 0; + u_int titems = 0; + u_int storageTestItems[kNumStorageTestItems]; + u_int i = 0; + + bzero(&array, sizeof(array)); + + kxld_set_logging_callback(kxld_test_log); + kxld_set_logging_callback_data("kxld_array_test", NULL); + + kxld_log(0, 0, "%d: Initialize", ++test_num); + + titems = PAGE_SIZE / sizeof(u_int); + rval = kxld_array_init(&array, sizeof(u_int), titems); + assert(rval == KERN_SUCCESS); + assert(array.nitems == titems); + + kxld_log(0, 0, "%d: Get item", ++test_num); + idx = 0; + item = kxld_array_get_item(&array, idx); + assert(item); + assert(item == kxld_array_get_slot(&array, idx)); + + idx = titems - 1; + item = kxld_array_get_item(&array, idx); + assert(item); + assert(item == kxld_array_get_slot(&array, idx)); + + idx = titems; + item = kxld_array_get_item(&array, idx); + assert(!item); + /* We allocated the max number of items that could be stored in a page, + * so get_slot() and get_item() are equivalent. + */ + assert(item == kxld_array_get_slot(&array, idx)); + + kxld_log(0, 0, "%d: Resize", ++test_num); + + titems = 2 * PAGE_SIZE / sizeof(u_int) + 100; + rval = kxld_array_resize(&array, titems); + assert(rval == KERN_SUCCESS); + assert(array.nitems == titems); + + kxld_log(0, 0, "%d: Get more items", ++test_num); + idx = 0; + item = kxld_array_get_item(&array, idx); + assert(item); + assert(item == kxld_array_get_slot(&array, idx)); + + idx = titems - 1; + item = kxld_array_get_item(&array, idx); + assert(item); + assert(item == kxld_array_get_slot(&array, idx)); + + idx = titems; + item = kxld_array_get_item(&array, idx); + assert(!item); + /* We allocated fewer items than could fit in a page, so get_slot() will + * return items even when get_item() does not. See below for details. + */ + assert(item != kxld_array_get_slot(&array, idx)); + + kxld_log(0, 0, "%d: Clear and attempt to get an item", ++test_num); + (void) kxld_array_clear(&array); + item = kxld_array_get_item(&array, 0); + assert(!item); + + kxld_log(0, 0, "%d: Get slot", ++test_num); + /* The array allocates its internal storage in pages. Because get_slot() + * fetches items based on the allocated size, not the logical size, we + * calculate the max items get_slot() can retrieve based on page size. + */ + titems = (u_int) (round_page(titems * sizeof(u_int)) / sizeof(u_int)); + assert(!item); + item = kxld_array_get_slot(&array, 0); + assert(item); + item = kxld_array_get_slot(&array, titems - 1); + assert(item); + item = kxld_array_get_slot(&array, titems); + assert(!item); + + kxld_log(0, 0, "%d: Reinitialize", ++test_num); + + titems = kNumStorageTestItems; + rval = kxld_array_init(&array, sizeof(u_int), titems); + assert(rval == KERN_SUCCESS); + assert(array.nitems == titems); + + kxld_log(0, 0, "%d: Storage test - %d insertions and finds", + ++test_num, kNumStorageTestItems); + for (i = 0; i < titems; ++i) { + item = kxld_array_get_item(&array, i); + assert(item); + + *item = (u_int) (random() % UINT_MAX); + storageTestItems[i] = *item; + } + + for (i = 0; i < titems; ++i) { + item = kxld_array_get_item(&array, i); + assert(item); + assert(*item == storageTestItems[i]); + } + + (void) kxld_array_deinit(&array); + + kxld_log(0, 0, " "); + kxld_log(0, 0, "All tests passed! Now check for memory leaks..."); + + kxld_print_memory_report(); + + return 0; } diff --git a/libkern/kxld/tests/kxld_dict_test.c b/libkern/kxld/tests/kxld_dict_test.c index a9b2f5f23..c203a2654 100644 --- a/libkern/kxld/tests/kxld_dict_test.c +++ b/libkern/kxld/tests/kxld_dict_test.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -36,124 +36,123 @@ #define STRESSNUM 10000 typedef struct { - char * key; - int * value; + char * key; + int * value; } Stress; -int +int main(int argc __unused, char *argv[] __unused) { - kern_return_t result = KERN_SUCCESS; - KXLDDict dict; - int a1 = 1, a2 = 3, i = 0, j = 0; - void * b = NULL; - u_int test_num = 0; - u_long size = 0; - Stress stress_test[STRESSNUM]; - - kxld_set_logging_callback(kxld_test_log); - kxld_set_logging_callback_data("kxld_dict_test", NULL); - - bzero(&dict, sizeof(dict)); - - kxld_log(0, 0, "%d: Initialize", ++test_num); - result = kxld_dict_init(&dict, kxld_dict_string_hash, kxld_dict_string_cmp, 10); - assert(result == KERN_SUCCESS); - size = kxld_dict_get_num_entries(&dict); - assert(size == 0); - - kxld_log(0, 0, "%d: Find nonexistant key", ++test_num); - b = kxld_dict_find(&dict, "hi"); - assert(b == NULL); - - kxld_log(0, 0, "%d: Insert and find", ++test_num); - result = kxld_dict_insert(&dict, "hi", &a1); - assert(result == KERN_SUCCESS); - b = kxld_dict_find(&dict, "hi"); - assert(b && *(int*)b == a1); - size = kxld_dict_get_num_entries(&dict); - assert(size == 1); - - kxld_log(0, 0, "%d: Insert same key with different values", ++test_num); - result = kxld_dict_insert(&dict, "hi", &a2); - assert(result == KERN_SUCCESS); - b = kxld_dict_find(&dict, "hi"); - assert(b && *(int*)b == a2); - size = kxld_dict_get_num_entries(&dict); - assert(size == 1); - - kxld_log(0, 0, "%d: Clear and find of nonexistant key", ++test_num); - kxld_dict_clear(&dict); - result = kxld_dict_init(&dict, kxld_dict_string_hash, kxld_dict_string_cmp, 10); - assert(result == KERN_SUCCESS); - b = kxld_dict_find(&dict, "hi"); - assert(b == NULL); - size = kxld_dict_get_num_entries(&dict); - assert(size == 0); - - kxld_log(0, 0, "%d: Insert multiple keys", ++test_num); - result = kxld_dict_insert(&dict, "hi", &a1); - assert(result == KERN_SUCCESS); - result = kxld_dict_insert(&dict, "hello", &a2); - assert(result == KERN_SUCCESS); - b = kxld_dict_find(&dict, "hi"); - assert(result == KERN_SUCCESS); - assert(b && *(int*)b == a1); - b = kxld_dict_find(&dict, "hello"); - assert(b && *(int*)b == a2); - size = kxld_dict_get_num_entries(&dict); - assert(size == 2); - - kxld_log(0, 0, "%d: Remove keys", ++test_num); - kxld_dict_remove(&dict, "hi", &b); - assert(b && *(int*)b == a1); - b = kxld_dict_find(&dict, "hi"); - assert(b == NULL); - kxld_dict_remove(&dict, "hi", &b); - assert(b == NULL); - size = kxld_dict_get_num_entries(&dict); - assert(size == 1); - - kxld_log(0, 0, "%d: Stress test - %d insertions and finds", ++test_num, STRESSNUM); - - kxld_dict_clear(&dict); - result = kxld_dict_init(&dict, kxld_dict_string_hash, kxld_dict_string_cmp, 10); - assert(result == KERN_SUCCESS); - for (i = 0; i < STRESSNUM; ++i) { - int * tmp_value = kxld_alloc(sizeof(int)); - char * tmp_key = kxld_alloc(sizeof(char) * (KEYLEN + 1)); - - *tmp_value = i; - for (j = 0; j < KEYLEN; ++j) { - tmp_key[j] = (random() % 26) + 'a'; - } - tmp_key[KEYLEN] = '\0'; - - kxld_dict_insert(&dict, tmp_key, tmp_value); - stress_test[i].key = tmp_key; - stress_test[i].value = tmp_value; - } - - for (i = 0; i < STRESSNUM; ++i) { - int target_value; - void * tmp_value; - char * key = stress_test[i].key; - - target_value = *stress_test[i].value; - tmp_value = kxld_dict_find(&dict, key); - assert(target_value == *(int *)tmp_value); - - kxld_free(stress_test[i].key, sizeof(char) * (KEYLEN + 1)); - kxld_free(stress_test[i].value, sizeof(int)); - } - - kxld_log(0, 0, "%d: Destroy", ++test_num); - kxld_dict_deinit(&dict); - - kxld_log(0, 0, "\nAll tests passed! Now check for memory leaks..."); - - kxld_print_memory_report(); - - return 0; -} + kern_return_t result = KERN_SUCCESS; + KXLDDict dict; + int a1 = 1, a2 = 3, i = 0, j = 0; + void * b = NULL; + u_int test_num = 0; + u_long size = 0; + Stress stress_test[STRESSNUM]; + + kxld_set_logging_callback(kxld_test_log); + kxld_set_logging_callback_data("kxld_dict_test", NULL); + + bzero(&dict, sizeof(dict)); + + kxld_log(0, 0, "%d: Initialize", ++test_num); + result = kxld_dict_init(&dict, kxld_dict_string_hash, kxld_dict_string_cmp, 10); + assert(result == KERN_SUCCESS); + size = kxld_dict_get_num_entries(&dict); + assert(size == 0); + + kxld_log(0, 0, "%d: Find nonexistant key", ++test_num); + b = kxld_dict_find(&dict, "hi"); + assert(b == NULL); + + kxld_log(0, 0, "%d: Insert and find", ++test_num); + result = kxld_dict_insert(&dict, "hi", &a1); + assert(result == KERN_SUCCESS); + b = kxld_dict_find(&dict, "hi"); + assert(b && *(int*)b == a1); + size = kxld_dict_get_num_entries(&dict); + assert(size == 1); + + kxld_log(0, 0, "%d: Insert same key with different values", ++test_num); + result = kxld_dict_insert(&dict, "hi", &a2); + assert(result == KERN_SUCCESS); + b = kxld_dict_find(&dict, "hi"); + assert(b && *(int*)b == a2); + size = kxld_dict_get_num_entries(&dict); + assert(size == 1); + + kxld_log(0, 0, "%d: Clear and find of nonexistant key", ++test_num); + kxld_dict_clear(&dict); + result = kxld_dict_init(&dict, kxld_dict_string_hash, kxld_dict_string_cmp, 10); + assert(result == KERN_SUCCESS); + b = kxld_dict_find(&dict, "hi"); + assert(b == NULL); + size = kxld_dict_get_num_entries(&dict); + assert(size == 0); + + kxld_log(0, 0, "%d: Insert multiple keys", ++test_num); + result = kxld_dict_insert(&dict, "hi", &a1); + assert(result == KERN_SUCCESS); + result = kxld_dict_insert(&dict, "hello", &a2); + assert(result == KERN_SUCCESS); + b = kxld_dict_find(&dict, "hi"); + assert(result == KERN_SUCCESS); + assert(b && *(int*)b == a1); + b = kxld_dict_find(&dict, "hello"); + assert(b && *(int*)b == a2); + size = kxld_dict_get_num_entries(&dict); + assert(size == 2); + kxld_log(0, 0, "%d: Remove keys", ++test_num); + kxld_dict_remove(&dict, "hi", &b); + assert(b && *(int*)b == a1); + b = kxld_dict_find(&dict, "hi"); + assert(b == NULL); + kxld_dict_remove(&dict, "hi", &b); + assert(b == NULL); + size = kxld_dict_get_num_entries(&dict); + assert(size == 1); + + kxld_log(0, 0, "%d: Stress test - %d insertions and finds", ++test_num, STRESSNUM); + + kxld_dict_clear(&dict); + result = kxld_dict_init(&dict, kxld_dict_string_hash, kxld_dict_string_cmp, 10); + assert(result == KERN_SUCCESS); + for (i = 0; i < STRESSNUM; ++i) { + int * tmp_value = kxld_alloc(sizeof(int)); + char * tmp_key = kxld_alloc(sizeof(char) * (KEYLEN + 1)); + + *tmp_value = i; + for (j = 0; j < KEYLEN; ++j) { + tmp_key[j] = (random() % 26) + 'a'; + } + tmp_key[KEYLEN] = '\0'; + + kxld_dict_insert(&dict, tmp_key, tmp_value); + stress_test[i].key = tmp_key; + stress_test[i].value = tmp_value; + } + + for (i = 0; i < STRESSNUM; ++i) { + int target_value; + void * tmp_value; + char * key = stress_test[i].key; + + target_value = *stress_test[i].value; + tmp_value = kxld_dict_find(&dict, key); + assert(target_value == *(int *)tmp_value); + + kxld_free(stress_test[i].key, sizeof(char) * (KEYLEN + 1)); + kxld_free(stress_test[i].value, sizeof(int)); + } + + kxld_log(0, 0, "%d: Destroy", ++test_num); + kxld_dict_deinit(&dict); + + kxld_log(0, 0, "\nAll tests passed! Now check for memory leaks..."); + + kxld_print_memory_report(); + + return 0; +} diff --git a/libkern/kxld/tests/kxld_test.c b/libkern/kxld/tests/kxld_test.c index d802cc7a1..2144d4acd 100644 --- a/libkern/kxld/tests/kxld_test.c +++ b/libkern/kxld/tests/kxld_test.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -30,16 +30,14 @@ #include "kxld_test.h" #include "kxld_util.h" -void +void kxld_test_log(KXLDLogSubsystem sys __unused, KXLDLogLevel level __unused, const char *format, va_list ap, void *user_data __unused) { - va_list args; + va_list args; - va_copy(args, ap); - vfprintf(stderr, format, args); - fprintf(stderr, "\n"); - va_end(args); + va_copy(args, ap); + vfprintf(stderr, format, args); + fprintf(stderr, "\n"); + va_end(args); } - - diff --git a/libkern/kxld/tests/kxld_test.h b/libkern/kxld/tests/kxld_test.h index 98e05c778..35ab9109d 100644 --- a/libkern/kxld/tests/kxld_test.h +++ b/libkern/kxld/tests/kxld_test.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,4 +30,3 @@ void kxld_test_log(KXLDLogSubsystem sys, KXLDLogLevel level, const char *format, va_list ap, void *user_data); - diff --git a/libkern/libclosure/libclosuredata.c b/libkern/libclosure/libclosuredata.c index 27e906f31..c357b88f1 100644 --- a/libkern/libclosure/libclosuredata.c +++ b/libkern/libclosure/libclosuredata.c @@ -9,12 +9,12 @@ */ /******************** -NSBlock support - -We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto. - -We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block. -**********************/ + * NSBlock support + * + * We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto. + * + * We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block. + **********************/ void * _NSConcreteStackBlock[32] = { 0 }; void * _NSConcreteMallocBlock[32] = { 0 }; diff --git a/libkern/libclosure/runtime.cpp b/libkern/libclosure/runtime.cpp index 42e379848..95bf5a48c 100644 --- a/libkern/libclosure/runtime.cpp +++ b/libkern/libclosure/runtime.cpp @@ -38,18 +38,20 @@ #if TARGET_OS_WIN32 #define _CRT_SECURE_NO_WARNINGS 1 #include -static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) -{ - // fixme barrier is overkill -- see objc-os.h - long original = InterlockedCompareExchange(dst, newl, oldl); - return (original == oldl); +static __inline bool +OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) +{ + // fixme barrier is overkill -- see objc-os.h + long original = InterlockedCompareExchange(dst, newl, oldl); + return original == oldl; } -static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) -{ - // fixme barrier is overkill -- see objc-os.h - int original = InterlockedCompareExchange(dst, newi, oldi); - return (original == oldi); +static __inline bool +OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) +{ + // fixme barrier is overkill -- see objc-os.h + int original = InterlockedCompareExchange(dst, newi, oldi); + return original == oldi; } #else #define OSAtomicCompareAndSwapLong(_Old, _New, _Ptr) __sync_bool_compare_and_swap(_Ptr, _Old, _New) @@ -58,75 +60,90 @@ static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile /******************************************************************************* -Internal Utilities -********************************************************************************/ - -static int32_t latching_incr_int(volatile int32_t *where) { - while (1) { - int32_t old_value = *where; - if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { - return BLOCK_REFCOUNT_MASK; - } - if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) { - return old_value+2; - } - } + * Internal Utilities + ********************************************************************************/ + +static int32_t +latching_incr_int(volatile int32_t *where) +{ + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return BLOCK_REFCOUNT_MASK; + } + if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) { + return old_value + 2; + } + } } -static bool latching_incr_int_not_deallocating(volatile int32_t *where) { - while (1) { - int32_t old_value = *where; - if (old_value & BLOCK_DEALLOCATING) { - // if deallocating we can't do this - return false; - } - if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { - // if latched, we're leaking this block, and we succeed - return true; - } - if (OSAtomicCompareAndSwapInt(old_value, old_value+2, where)) { - // otherwise, we must store a new retained value without the deallocating bit set - return true; - } - } +static bool +latching_incr_int_not_deallocating(volatile int32_t *where) +{ + while (1) { + int32_t old_value = *where; + if (old_value & BLOCK_DEALLOCATING) { + // if deallocating we can't do this + return false; + } + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + // if latched, we're leaking this block, and we succeed + return true; + } + if (OSAtomicCompareAndSwapInt(old_value, old_value + 2, where)) { + // otherwise, we must store a new retained value without the deallocating bit set + return true; + } + } } // return should_deallocate? -static bool latching_decr_int_should_deallocate(volatile int32_t *where) { - while (1) { - int32_t old_value = *where; - if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { - return false; // latched high - } - if ((old_value & BLOCK_REFCOUNT_MASK) == 0) { - return false; // underflow, latch low - } - int32_t new_value = old_value - 2; - bool result = false; - if ((old_value & (BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING)) == 2) { - new_value = old_value - 1; - result = true; - } - if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) { - return result; - } - } +static bool +latching_decr_int_should_deallocate(volatile int32_t *where) +{ + while (1) { + int32_t old_value = *where; + if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) { + return false; // latched high + } + if ((old_value & BLOCK_REFCOUNT_MASK) == 0) { + return false; // underflow, latch low + } + int32_t new_value = old_value - 2; + bool result = false; + if ((old_value & (BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING)) == 2) { + new_value = old_value - 1; + result = true; + } + if (OSAtomicCompareAndSwapInt(old_value, new_value, where)) { + return result; + } + } } /************************************************************************** -Framework callback functions and their default implementations. -***************************************************************************/ + * Framework callback functions and their default implementations. + ***************************************************************************/ #if !TARGET_OS_WIN32 #pragma mark Framework Callback Routines #endif -static void _Block_retain_object_default(const void *ptr __unused) { } +static void +_Block_retain_object_default(const void *ptr __unused) +{ +} -static void _Block_release_object_default(const void *ptr __unused) { } +static void +_Block_release_object_default(const void *ptr __unused) +{ +} -static void _Block_destructInstance_default(const void *aBlock __unused) {} +static void +_Block_destructInstance_default(const void *aBlock __unused) +{ +} static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default; static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default; @@ -134,101 +151,120 @@ static void (*_Block_destructInstance) (const void *aBlock) = _Block_destructIns /************************************************************************** -Callback registration from ObjC runtime and CoreFoundation -***************************************************************************/ + * Callback registration from ObjC runtime and CoreFoundation + ***************************************************************************/ -void _Block_use_RR2(const Block_callbacks_RR *callbacks) { - _Block_retain_object = callbacks->retain; - _Block_release_object = callbacks->release; - _Block_destructInstance = callbacks->destructInstance; +void +_Block_use_RR2(const Block_callbacks_RR *callbacks) +{ + _Block_retain_object = callbacks->retain; + _Block_release_object = callbacks->release; + _Block_destructInstance = callbacks->destructInstance; } /**************************************************************************** -Accessors for block descriptor fields -*****************************************************************************/ + * Accessors for block descriptor fields + *****************************************************************************/ #if 0 -static struct Block_descriptor_1 * _Block_descriptor_1(struct Block_layout *aBlock) +static struct Block_descriptor_1 * +_Block_descriptor_1(struct Block_layout *aBlock) { - return aBlock->descriptor; + return aBlock->descriptor; } #endif -static struct Block_descriptor_2 * _Block_descriptor_2(struct Block_layout *aBlock) +static struct Block_descriptor_2 * +_Block_descriptor_2(struct Block_layout *aBlock) { - if (! (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) return NULL; - uint8_t *desc = (uint8_t *)aBlock->descriptor; - desc += sizeof(struct Block_descriptor_1); - return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc); + if (!(aBlock->flags & BLOCK_HAS_COPY_DISPOSE)) { + return NULL; + } + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + return __IGNORE_WCASTALIGN((struct Block_descriptor_2 *)desc); } -static struct Block_descriptor_3 * _Block_descriptor_3(struct Block_layout *aBlock) +static struct Block_descriptor_3 * +_Block_descriptor_3(struct Block_layout *aBlock) { - if (! (aBlock->flags & BLOCK_HAS_SIGNATURE)) return NULL; - uint8_t *desc = (uint8_t *)aBlock->descriptor; - desc += sizeof(struct Block_descriptor_1); - if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { - desc += sizeof(struct Block_descriptor_2); - } - return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc); + if (!(aBlock->flags & BLOCK_HAS_SIGNATURE)) { + return NULL; + } + uint8_t *desc = (uint8_t *)aBlock->descriptor; + desc += sizeof(struct Block_descriptor_1); + if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE) { + desc += sizeof(struct Block_descriptor_2); + } + return __IGNORE_WCASTALIGN((struct Block_descriptor_3 *)desc); } -static void _Block_call_copy_helper(void *result, struct Block_layout *aBlock) +static void +_Block_call_copy_helper(void *result, struct Block_layout *aBlock) { - struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); - if (!desc) return; + struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); + if (!desc) { + return; + } - (*desc->copy)(result, aBlock); // do fixup + (*desc->copy)(result, aBlock); // do fixup } -static void _Block_call_dispose_helper(struct Block_layout *aBlock) +static void +_Block_call_dispose_helper(struct Block_layout *aBlock) { - struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); - if (!desc) return; + struct Block_descriptor_2 *desc = _Block_descriptor_2(aBlock); + if (!desc) { + return; + } - (*desc->dispose)(aBlock); + (*desc->dispose)(aBlock); } /******************************************************************************* -Internal Support routines for copying -********************************************************************************/ + * Internal Support routines for copying + ********************************************************************************/ #if !TARGET_OS_WIN32 #pragma mark Copy/Release support #endif // Copy, or bump refcount, of a block. If really copying, call the copy helper if present. -void *_Block_copy(const void *arg) { - struct Block_layout *aBlock; - - if (!arg) return NULL; - - // The following would be better done as a switch statement - aBlock = (struct Block_layout *)arg; - if (aBlock->flags & BLOCK_NEEDS_FREE) { - // latches on high - latching_incr_int(&aBlock->flags); - return aBlock; - } - else if (aBlock->flags & BLOCK_IS_GLOBAL) { - return aBlock; - } - else { - // Its a stack block. Make a copy. - struct Block_layout *result = (typeof(result)) malloc(aBlock->descriptor->size); - if (!result) return NULL; - memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first +void * +_Block_copy(const void *arg) +{ + struct Block_layout *aBlock; + + if (!arg) { + return NULL; + } + + // The following would be better done as a switch statement + aBlock = (struct Block_layout *)arg; + if (aBlock->flags & BLOCK_NEEDS_FREE) { + // latches on high + latching_incr_int(&aBlock->flags); + return aBlock; + } else if (aBlock->flags & BLOCK_IS_GLOBAL) { + return aBlock; + } else { + // Its a stack block. Make a copy. + struct Block_layout *result = (typeof(result))malloc(aBlock->descriptor->size); + if (!result) { + return NULL; + } + memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first #if __has_feature(ptrauth_calls) - // Resign the invoke pointer as it uses address authentication. - result->invoke = aBlock->invoke; + // Resign the invoke pointer as it uses address authentication. + result->invoke = aBlock->invoke; #endif - // reset refcount - result->flags &= ~(BLOCK_REFCOUNT_MASK|BLOCK_DEALLOCATING); // XXX not needed - result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1 - _Block_call_copy_helper(result, aBlock); - // Set isa last so memory analysis tools see a fully-initialized object. - result->isa = _NSConcreteMallocBlock; - return result; - } + // reset refcount + result->flags &= ~(BLOCK_REFCOUNT_MASK | BLOCK_DEALLOCATING); // XXX not needed + result->flags |= BLOCK_NEEDS_FREE | 2; // logical refcount 1 + _Block_call_copy_helper(result, aBlock); + // Set isa last so memory analysis tools see a fully-initialized object. + result->isa = _NSConcreteMallocBlock; + return result; + } } @@ -238,66 +274,69 @@ void *_Block_copy(const void *arg) { // Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr. // We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment and return it. // Otherwise we need to copy it and update the stack forwarding pointer -static struct Block_byref *_Block_byref_copy(const void *arg) { - struct Block_byref *src = (struct Block_byref *)arg; - - if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { - // src points to stack - struct Block_byref *copy = (struct Block_byref *)malloc(src->size); - copy->isa = NULL; - // byref value 4 is logical refcount of 2: one for caller, one for stack - copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4; - copy->forwarding = copy; // patch heap copy to point to itself - src->forwarding = copy; // patch stack to point to heap copy - copy->size = src->size; - - if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { - // Trust copy helper to copy everything of interest - // If more than one field shows up in a byref block this is wrong XXX - struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src+1); - struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy+1); - copy2->byref_keep = src2->byref_keep; - copy2->byref_destroy = src2->byref_destroy; - - if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) { - struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2+1); - struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2+1); - copy3->layout = src3->layout; - } - - (*src2->byref_keep)(copy, src); - } - else { - // Bitwise copy. - // This copy includes Block_byref_3, if any. - memmove(copy+1, src+1, src->size - sizeof(*src)); - } - } - // already copied to heap - else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) { - latching_incr_int(&src->forwarding->flags); - } - - return src->forwarding; +static struct Block_byref * +_Block_byref_copy(const void *arg) +{ + struct Block_byref *src = (struct Block_byref *)arg; + + if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) { + // src points to stack + struct Block_byref *copy = (struct Block_byref *)malloc(src->size); + copy->isa = NULL; + // byref value 4 is logical refcount of 2: one for caller, one for stack + copy->flags = src->flags | BLOCK_BYREF_NEEDS_FREE | 4; + copy->forwarding = copy; // patch heap copy to point to itself + src->forwarding = copy; // patch stack to point to heap copy + copy->size = src->size; + + if (src->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { + // Trust copy helper to copy everything of interest + // If more than one field shows up in a byref block this is wrong XXX + struct Block_byref_2 *src2 = (struct Block_byref_2 *)(src + 1); + struct Block_byref_2 *copy2 = (struct Block_byref_2 *)(copy + 1); + copy2->byref_keep = src2->byref_keep; + copy2->byref_destroy = src2->byref_destroy; + + if (src->flags & BLOCK_BYREF_LAYOUT_EXTENDED) { + struct Block_byref_3 *src3 = (struct Block_byref_3 *)(src2 + 1); + struct Block_byref_3 *copy3 = (struct Block_byref_3*)(copy2 + 1); + copy3->layout = src3->layout; + } + + (*src2->byref_keep)(copy, src); + } else { + // Bitwise copy. + // This copy includes Block_byref_3, if any. + memmove(copy + 1, src + 1, src->size - sizeof(*src)); + } + } + // already copied to heap + else if ((src->forwarding->flags & BLOCK_BYREF_NEEDS_FREE) == BLOCK_BYREF_NEEDS_FREE) { + latching_incr_int(&src->forwarding->flags); + } + + return src->forwarding; } -static void _Block_byref_release(const void *arg) { - struct Block_byref *byref = (struct Block_byref *)arg; - - // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?) - byref = byref->forwarding; - - if (byref->flags & BLOCK_BYREF_NEEDS_FREE) { - __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK; - os_assert(refcount); - if (latching_decr_int_should_deallocate(&byref->flags)) { - if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { - struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref+1); - (*byref2->byref_destroy)(byref); - } - free(byref); - } - } +static void +_Block_byref_release(const void *arg) +{ + struct Block_byref *byref = (struct Block_byref *)arg; + + // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?) + byref = byref->forwarding; + + if (byref->flags & BLOCK_BYREF_NEEDS_FREE) { + __assert_only int32_t refcount = byref->flags & BLOCK_REFCOUNT_MASK; + os_assert(refcount); + if (latching_decr_int_should_deallocate(&byref->flags)) { + if (byref->flags & BLOCK_BYREF_HAS_COPY_DISPOSE) { + struct Block_byref_2 *byref2 = (struct Block_byref_2 *)(byref + 1); + (*byref2->byref_destroy)(byref); + } + free(byref); + } + } } @@ -314,27 +353,39 @@ static void _Block_byref_release(const void *arg) { // API entry point to release a copied Block -void _Block_release(const void *arg) { - struct Block_layout *aBlock = (struct Block_layout *)arg; - if (!aBlock) return; - if (aBlock->flags & BLOCK_IS_GLOBAL) return; - if (! (aBlock->flags & BLOCK_NEEDS_FREE)) return; - - if (latching_decr_int_should_deallocate(&aBlock->flags)) { - _Block_call_dispose_helper(aBlock); - _Block_destructInstance(aBlock); - free(aBlock); - } +void +_Block_release(const void *arg) +{ + struct Block_layout *aBlock = (struct Block_layout *)arg; + if (!aBlock) { + return; + } + if (aBlock->flags & BLOCK_IS_GLOBAL) { + return; + } + if (!(aBlock->flags & BLOCK_NEEDS_FREE)) { + return; + } + + if (latching_decr_int_should_deallocate(&aBlock->flags)) { + _Block_call_dispose_helper(aBlock); + _Block_destructInstance(aBlock); + free(aBlock); + } } -bool _Block_tryRetain(const void *arg) { - struct Block_layout *aBlock = (struct Block_layout *)arg; - return latching_incr_int_not_deallocating(&aBlock->flags); +bool +_Block_tryRetain(const void *arg) +{ + struct Block_layout *aBlock = (struct Block_layout *)arg; + return latching_incr_int_not_deallocating(&aBlock->flags); } -bool _Block_isDeallocating(const void *arg) { - struct Block_layout *aBlock = (struct Block_layout *)arg; - return (aBlock->flags & BLOCK_DEALLOCATING) != 0; +bool +_Block_isDeallocating(const void *arg) +{ + struct Block_layout *aBlock = (struct Block_layout *)arg; + return (aBlock->flags & BLOCK_DEALLOCATING) != 0; } @@ -344,190 +395,216 @@ bool _Block_isDeallocating(const void *arg) { * ***********************************************************/ -size_t Block_size(void *aBlock) { - return ((struct Block_layout *)aBlock)->descriptor->size; +size_t +Block_size(void *aBlock) +{ + return ((struct Block_layout *)aBlock)->descriptor->size; } -bool _Block_use_stret(void *aBlock) { - struct Block_layout *layout = (struct Block_layout *)aBlock; +bool +_Block_use_stret(void *aBlock) +{ + struct Block_layout *layout = (struct Block_layout *)aBlock; - int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET; - return (layout->flags & requiredFlags) == requiredFlags; + int requiredFlags = BLOCK_HAS_SIGNATURE | BLOCK_USE_STRET; + return (layout->flags & requiredFlags) == requiredFlags; } // Checks for a valid signature, not merely the BLOCK_HAS_SIGNATURE bit. -bool _Block_has_signature(void *aBlock) { - return _Block_signature(aBlock) ? true : false; +bool +_Block_has_signature(void *aBlock) +{ + return _Block_signature(aBlock) ? true : false; } -const char * _Block_signature(void *aBlock) +const char * +_Block_signature(void *aBlock) { - struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); - if (!desc3) return NULL; + struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); + if (!desc3) { + return NULL; + } - return desc3->signature; + return desc3->signature; } -const char * _Block_layout(void *aBlock) +const char * +_Block_layout(void *aBlock) { - // Don't return extended layout to callers expecting old GC layout - struct Block_layout *layout = (struct Block_layout *)aBlock; - if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) return NULL; - - struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); - if (!desc3) return NULL; - - return desc3->layout; + // Don't return extended layout to callers expecting old GC layout + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT) { + return NULL; + } + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); + if (!desc3) { + return NULL; + } + + return desc3->layout; } -const char * _Block_extended_layout(void *aBlock) +const char * +_Block_extended_layout(void *aBlock) { - // Don't return old GC layout to callers expecting extended layout - struct Block_layout *layout = (struct Block_layout *)aBlock; - if (! (layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) return NULL; - - struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); - if (!desc3) return NULL; - - // Return empty string (all non-object bytes) instead of NULL - // so callers can distinguish "empty layout" from "no layout". - if (!desc3->layout) return ""; - else return desc3->layout; + // Don't return old GC layout to callers expecting extended layout + struct Block_layout *layout = (struct Block_layout *)aBlock; + if (!(layout->flags & BLOCK_HAS_EXTENDED_LAYOUT)) { + return NULL; + } + + struct Block_descriptor_3 *desc3 = _Block_descriptor_3((struct Block_layout *)aBlock); + if (!desc3) { + return NULL; + } + + // Return empty string (all non-object bytes) instead of NULL + // so callers can distinguish "empty layout" from "no layout". + if (!desc3->layout) { + return ""; + } else { + return desc3->layout; + } } #if !TARGET_OS_WIN32 #pragma mark Compiler SPI entry points #endif - -/******************************************************* - -Entry points used by the compiler - the real API! - - -A Block can reference four different kinds of things that require help when the Block is copied to the heap. -1) C++ stack based objects -2) References to Objective-C objects -3) Other Blocks -4) __block variables - -In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest. -The flags parameter of _Block_object_assign and _Block_object_dispose is set to - * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object, - * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and - * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable. -If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16) - -So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24. - -When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied. - -So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities: - __block id 128+3 (0x83) - __block (^Block) 128+7 (0x87) - __weak __block id 128+3+16 (0x93) - __weak __block (^Block) 128+7+16 (0x97) - - -********************************************************/ +/******************************************************* + * + * Entry points used by the compiler - the real API! + * + * + * A Block can reference four different kinds of things that require help when the Block is copied to the heap. + * 1) C++ stack based objects + * 2) References to Objective-C objects + * 3) Other Blocks + * 4) __block variables + * + * In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest. + * + * The flags parameter of _Block_object_assign and _Block_object_dispose is set to + * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object, + * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and + * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable. + * If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16) + * + * So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24. + * + * When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied. + * + * So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities: + * __block id 128+3 (0x83) + * __block (^Block) 128+7 (0x87) + * __weak __block id 128+3+16 (0x93) + * __weak __block (^Block) 128+7+16 (0x97) + * + * + ********************************************************/ // // When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point // to do the assignment. // -void _Block_object_assign(void *destArg, const void *object, const int flags) { - const void **dest = (const void **)destArg; - switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { - case BLOCK_FIELD_IS_OBJECT: - /******* - id object = ...; - [^{ object; } copy]; - ********/ - - _Block_retain_object(object); - *dest = object; - break; - - case BLOCK_FIELD_IS_BLOCK: - /******* - void (^object)(void) = ...; - [^{ object; } copy]; - ********/ - - *dest = _Block_copy(object); - break; - - case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: - case BLOCK_FIELD_IS_BYREF: - /******* - // copy the onstack __block container to the heap - // Note this __weak is old GC-weak/MRC-unretained. - // ARC-style __weak is handled by the copy helper directly. - __block ... x; - __weak __block ... x; - [^{ x; } copy]; - ********/ - - *dest = _Block_byref_copy(object); - break; - - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: - /******* - // copy the actual field held in the __block container - // Note this is MRC unretained __block only. - // ARC retained __block is handled by the copy helper directly. - __block id object; - __block void (^object)(void); - [^{ object; } copy]; - ********/ - - *dest = object; - break; - - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: - /******* - // copy the actual field held in the __block container - // Note this __weak is old GC-weak/MRC-unretained. - // ARC-style __weak is handled by the copy helper directly. - __weak __block id object; - __weak __block void (^object)(void); - [^{ object; } copy]; - ********/ - - *dest = object; - break; - - default: - break; - } +void +_Block_object_assign(void *destArg, const void *object, const int flags) +{ + const void **dest = (const void **)destArg; + switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { + case BLOCK_FIELD_IS_OBJECT: + /******* + * id object = ...; + * [^{ object; } copy]; + ********/ + + _Block_retain_object(object); + *dest = object; + break; + + case BLOCK_FIELD_IS_BLOCK: + /******* + * void (^object)(void) = ...; + * [^{ object; } copy]; + ********/ + + *dest = _Block_copy(object); + break; + + case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: + case BLOCK_FIELD_IS_BYREF: + /******* + * // copy the onstack __block container to the heap + * // Note this __weak is old GC-weak/MRC-unretained. + * // ARC-style __weak is handled by the copy helper directly. + * __block ... x; + * __weak __block ... x; + * [^{ x; } copy]; + ********/ + + *dest = _Block_byref_copy(object); + break; + + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: + /******* + * // copy the actual field held in the __block container + * // Note this is MRC unretained __block only. + * // ARC retained __block is handled by the copy helper directly. + * __block id object; + * __block void (^object)(void); + * [^{ object; } copy]; + ********/ + + *dest = object; + break; + + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: + /******* + * // copy the actual field held in the __block container + * // Note this __weak is old GC-weak/MRC-unretained. + * // ARC-style __weak is handled by the copy helper directly. + * __weak __block id object; + * __weak __block void (^object)(void); + * [^{ object; } copy]; + ********/ + + *dest = object; + break; + + default: + break; + } } // When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point // to help dispose of the contents -void _Block_object_dispose(const void *object, const int flags) { - switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { - case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: - case BLOCK_FIELD_IS_BYREF: - // get rid of the __block data structure held in a Block - _Block_byref_release(object); - break; - case BLOCK_FIELD_IS_BLOCK: - _Block_release(object); - break; - case BLOCK_FIELD_IS_OBJECT: - _Block_release_object(object); - break; - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: - case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: - break; - default: - break; - } +void +_Block_object_dispose(const void *object, const int flags) +{ + switch (os_assumes(flags & BLOCK_ALL_COPY_DISPOSE_FLAGS)) { + case BLOCK_FIELD_IS_BYREF | BLOCK_FIELD_IS_WEAK: + case BLOCK_FIELD_IS_BYREF: + // get rid of the __block data structure held in a Block + _Block_byref_release(object); + break; + case BLOCK_FIELD_IS_BLOCK: + _Block_release(object); + break; + case BLOCK_FIELD_IS_OBJECT: + _Block_release_object(object); + break; + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_WEAK: + case BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_WEAK: + break; + default: + break; + } } @@ -537,4 +614,3 @@ static int let_there_be_data = 42; #undef malloc #undef free - diff --git a/libkern/libkern/Block.h b/libkern/libkern/Block.h index 5509250df..52b192b28 100644 --- a/libkern/libkern/Block.h +++ b/libkern/libkern/Block.h @@ -12,7 +12,7 @@ #if !defined(BLOCK_EXPORT) # if defined(__cplusplus) -# define BLOCK_EXPORT extern "C" +# define BLOCK_EXPORT extern "C" # else # define BLOCK_EXPORT extern # endif @@ -31,26 +31,26 @@ extern "C" { // This must be paired with Block_release to recover memory, even when running // under Objective-C Garbage Collection. BLOCK_EXPORT void *_Block_copy(const void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); // Lose the reference, and if heap based and last reference, recover the memory BLOCK_EXPORT void _Block_release(const void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); // Used by the compiler. Do not call this function yourself. BLOCK_EXPORT void _Block_object_assign(void *, const void *, const int) - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); // Used by the compiler. Do not call this function yourself. BLOCK_EXPORT void _Block_object_dispose(const void *, const int) - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); // Used by the compiler. Do not use these variables yourself. BLOCK_EXPORT void * _NSConcreteGlobalBlock[32] - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); BLOCK_EXPORT void * _NSConcreteStackBlock[32] - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); #if __cplusplus diff --git a/libkern/libkern/Block_private.h b/libkern/libkern/Block_private.h index d122f92d5..4c24211dc 100644 --- a/libkern/libkern/Block_private.h +++ b/libkern/libkern/Block_private.h @@ -38,24 +38,24 @@ struct Block_byref; #include #endif -#if __has_feature(ptrauth_calls) && __cplusplus < 201103L +#if __has_feature(ptrauth_calls) && __cplusplus < 201103L // C ptrauth or old C++ ptrauth #define _Block_set_function_pointer(field, value) \ ((value) \ ? ((field) = \ - (__typeof__(field)) \ - ptrauth_auth_and_resign((void*)(value), \ - ptrauth_key_function_pointer, 0, \ - ptrauth_key_block_function, &(field))) \ + (__typeof__(field)) \ + ptrauth_auth_and_resign((void*)(value), \ + ptrauth_key_function_pointer, 0, \ + ptrauth_key_block_function, &(field))) \ : ((field) = 0)) #define _Block_get_function_pointer(field) \ ((field) \ ? (__typeof__(field)) \ ptrauth_auth_function((void*)(field), \ - ptrauth_key_block_function, &(field)) \ + ptrauth_key_block_function, &(field)) \ : (__typeof__(field))0) #else @@ -71,7 +71,7 @@ struct Block_byref; #endif -#if __has_feature(ptrauth_calls) && __cplusplus >= 201103L +#if __has_feature(ptrauth_calls) && __cplusplus >= 201103L // StorageSignedFunctionPointer stores a function pointer of type // Fn but signed with the given ptrauth key and with the address of its @@ -79,124 +79,140 @@ struct Block_byref; // Function pointers inside block objects are signed this way. template class StorageSignedFunctionPointer { - uintptr_t bits; - - public: - - // Authenticate function pointer fn as a C function pointer. - // Re-sign it with our key and the storage address as extra data. - // DOES NOT actually write to our storage. - uintptr_t prepareWrite(Fn fn) const - { - if (fn == nullptr) { - return 0; - } else { - return (uintptr_t) - ptrauth_auth_and_resign(fn, ptrauth_key_function_pointer, 0, - Key, &bits); - } - } - - // Authenticate otherBits at otherStorage. - // Re-sign it with our storage address. - // DOES NOT actually write to our storage. - uintptr_t prepareWrite(const StorageSignedFunctionPointer& other) const - { - if (other.bits == 0) { - return 0; - } else { - return (uintptr_t) - ptrauth_auth_and_resign((void*)other.bits, Key, &other.bits, - Key, &bits); - } - } - - // Authenticate ptr as if it were stored at our storage address. - // Re-sign it as a C function pointer. - // DOES NOT actually read from our storage. - Fn completeReadFn(uintptr_t ptr) const - { - if (ptr == 0) { - return nullptr; - } else { - return ptrauth_auth_function((Fn)ptr, Key, &bits); - } - } - - // Authenticate ptr as if it were at our storage address. - // Return it as a dereferenceable pointer. - // DOES NOT actually read from our storage. - void* completeReadRaw(uintptr_t ptr) const - { - if (ptr == 0) { - return nullptr; - } else { - return ptrauth_auth_data((void*)ptr, Key, &bits); - } - } - - StorageSignedFunctionPointer() { } - - StorageSignedFunctionPointer(Fn value) - : bits(prepareWrite(value)) { } - - StorageSignedFunctionPointer(const StorageSignedFunctionPointer& value) - : bits(prepareWrite(value)) { } - - StorageSignedFunctionPointer& - operator = (Fn rhs) { - bits = prepareWrite(rhs); - return *this; - } - - StorageSignedFunctionPointer& - operator = (const StorageSignedFunctionPointer& rhs) { - bits = prepareWrite(rhs); - return *this; - } - - operator Fn () const { - return completeReadFn(bits); - } - - explicit operator void* () const { - return completeReadRaw(bits); - } - - explicit operator bool () const { - return completeReadRaw(bits) != nullptr; - } + uintptr_t bits; + +public: + +// Authenticate function pointer fn as a C function pointer. +// Re-sign it with our key and the storage address as extra data. +// DOES NOT actually write to our storage. + uintptr_t + prepareWrite(Fn fn) const + { + if (fn == nullptr) { + return 0; + } else { + return (uintptr_t) + ptrauth_auth_and_resign(fn, ptrauth_key_function_pointer, 0, + Key, &bits); + } + } + +// Authenticate otherBits at otherStorage. +// Re-sign it with our storage address. +// DOES NOT actually write to our storage. + uintptr_t + prepareWrite(const StorageSignedFunctionPointer& other) const + { + if (other.bits == 0) { + return 0; + } else { + return (uintptr_t) + ptrauth_auth_and_resign((void*)other.bits, Key, &other.bits, + Key, &bits); + } + } + +// Authenticate ptr as if it were stored at our storage address. +// Re-sign it as a C function pointer. +// DOES NOT actually read from our storage. + Fn + completeReadFn(uintptr_t ptr) const + { + if (ptr == 0) { + return nullptr; + } else { + return ptrauth_auth_function((Fn)ptr, Key, &bits); + } + } + +// Authenticate ptr as if it were at our storage address. +// Return it as a dereferenceable pointer. +// DOES NOT actually read from our storage. + void* + completeReadRaw(uintptr_t ptr) const + { + if (ptr == 0) { + return nullptr; + } else { + return ptrauth_auth_data((void*)ptr, Key, &bits); + } + } + + StorageSignedFunctionPointer() + { + } + + StorageSignedFunctionPointer(Fn value) + : bits(prepareWrite(value)) + { + } + + StorageSignedFunctionPointer(const StorageSignedFunctionPointer& value) + : bits(prepareWrite(value)) + { + } + + StorageSignedFunctionPointer& + operator =(Fn rhs) + { + bits = prepareWrite(rhs); + return *this; + } + + StorageSignedFunctionPointer& + operator =(const StorageSignedFunctionPointer& rhs) + { + bits = prepareWrite(rhs); + return *this; + } + + operator Fn() const { + return completeReadFn(bits); + } + + explicit + operator void*() const + { + return completeReadRaw(bits); + } + + explicit + operator bool() const + { + return completeReadRaw(bits) != nullptr; + } }; using BlockCopyFunction = StorageSignedFunctionPointer - ; + ; using BlockDisposeFunction = StorageSignedFunctionPointer - ; + ; using BlockInvokeFunction = StorageSignedFunctionPointer - ; + ; using BlockByrefKeepFunction = StorageSignedFunctionPointer - ; + ; using BlockByrefDestroyFunction = StorageSignedFunctionPointer - ; + ; // c++11 and ptrauth_calls #elif !__has_feature(ptrauth_calls) // not ptrauth_calls -typedef void(*BlockCopyFunction)(void *, const void *); -typedef void(*BlockDisposeFunction)(const void *); -typedef void(*BlockInvokeFunction)(void *, ...); -typedef void(*BlockByrefKeepFunction)(struct Block_byref*, struct Block_byref*); -typedef void(*BlockByrefDestroyFunction)(struct Block_byref *); +typedef void (*BlockCopyFunction)(void *, const void *); +typedef void (*BlockDisposeFunction)(const void *); +typedef void (*BlockInvokeFunction)(void *, ...); +typedef void (*BlockByrefKeepFunction)(struct Block_byref*, struct Block_byref*); +typedef void (*BlockByrefDestroyFunction)(struct Block_byref *); #else // ptrauth_calls but not c++11 @@ -212,83 +228,83 @@ typedef uintptr_t BlockByrefDestroyFunction; // Values for Block_layout->flags to describe block objects enum { - BLOCK_DEALLOCATING = (0x0001), // runtime - BLOCK_REFCOUNT_MASK = (0xfffe), // runtime - BLOCK_NEEDS_FREE = (1 << 24), // runtime - BLOCK_HAS_COPY_DISPOSE = (1 << 25), // compiler - BLOCK_HAS_CTOR = (1 << 26), // compiler: helpers have C++ code - BLOCK_IS_GC = (1 << 27), // runtime - BLOCK_IS_GLOBAL = (1 << 28), // compiler - BLOCK_USE_STRET = (1 << 29), // compiler: undefined if !BLOCK_HAS_SIGNATURE - BLOCK_HAS_SIGNATURE = (1 << 30), // compiler - BLOCK_HAS_EXTENDED_LAYOUT=(1 << 31) // compiler + BLOCK_DEALLOCATING = (0x0001),// runtime + BLOCK_REFCOUNT_MASK = (0xfffe),// runtime + BLOCK_NEEDS_FREE = (1 << 24),// runtime + BLOCK_HAS_COPY_DISPOSE = (1 << 25),// compiler + BLOCK_HAS_CTOR = (1 << 26),// compiler: helpers have C++ code + BLOCK_IS_GC = (1 << 27),// runtime + BLOCK_IS_GLOBAL = (1 << 28),// compiler + BLOCK_USE_STRET = (1 << 29),// compiler: undefined if !BLOCK_HAS_SIGNATURE + BLOCK_HAS_SIGNATURE = (1 << 30),// compiler + BLOCK_HAS_EXTENDED_LAYOUT=(1 << 31) // compiler }; #define BLOCK_DESCRIPTOR_1 1 struct Block_descriptor_1 { - uintptr_t reserved; - uintptr_t size; + uintptr_t reserved; + uintptr_t size; }; #define BLOCK_DESCRIPTOR_2 1 struct Block_descriptor_2 { - // requires BLOCK_HAS_COPY_DISPOSE - BlockCopyFunction copy; - BlockDisposeFunction dispose; + // requires BLOCK_HAS_COPY_DISPOSE + BlockCopyFunction copy; + BlockDisposeFunction dispose; }; #define BLOCK_DESCRIPTOR_3 1 struct Block_descriptor_3 { - // requires BLOCK_HAS_SIGNATURE - const char *signature; - const char *layout; // contents depend on BLOCK_HAS_EXTENDED_LAYOUT + // requires BLOCK_HAS_SIGNATURE + const char *signature; + const char *layout; // contents depend on BLOCK_HAS_EXTENDED_LAYOUT }; struct Block_layout { - void *isa; - volatile int32_t flags; // contains ref count - int32_t reserved; - BlockInvokeFunction invoke; - struct Block_descriptor_1 *descriptor; - // imported variables + void *isa; + volatile int32_t flags; // contains ref count + int32_t reserved; + BlockInvokeFunction invoke; + struct Block_descriptor_1 *descriptor; + // imported variables }; // Values for Block_byref->flags to describe __block variables enum { - // Byref refcount must use the same bits as Block_layout's refcount. - // BLOCK_DEALLOCATING = (0x0001), // runtime - // BLOCK_REFCOUNT_MASK = (0xfffe), // runtime + // Byref refcount must use the same bits as Block_layout's refcount. + // BLOCK_DEALLOCATING = (0x0001), // runtime + // BLOCK_REFCOUNT_MASK = (0xfffe), // runtime - BLOCK_BYREF_LAYOUT_MASK = (0xf << 28), // compiler - BLOCK_BYREF_LAYOUT_EXTENDED = ( 1 << 28), // compiler - BLOCK_BYREF_LAYOUT_NON_OBJECT = ( 2 << 28), // compiler - BLOCK_BYREF_LAYOUT_STRONG = ( 3 << 28), // compiler - BLOCK_BYREF_LAYOUT_WEAK = ( 4 << 28), // compiler - BLOCK_BYREF_LAYOUT_UNRETAINED = ( 5 << 28), // compiler + BLOCK_BYREF_LAYOUT_MASK = (0xf << 28),// compiler + BLOCK_BYREF_LAYOUT_EXTENDED = (1 << 28),// compiler + BLOCK_BYREF_LAYOUT_NON_OBJECT = (2 << 28), // compiler + BLOCK_BYREF_LAYOUT_STRONG = (3 << 28),// compiler + BLOCK_BYREF_LAYOUT_WEAK = (4 << 28),// compiler + BLOCK_BYREF_LAYOUT_UNRETAINED = (5 << 28), // compiler - BLOCK_BYREF_IS_GC = ( 1 << 27), // runtime + BLOCK_BYREF_IS_GC = (1 << 27),// runtime - BLOCK_BYREF_HAS_COPY_DISPOSE = ( 1 << 25), // compiler - BLOCK_BYREF_NEEDS_FREE = ( 1 << 24), // runtime + BLOCK_BYREF_HAS_COPY_DISPOSE = (1 << 25),// compiler + BLOCK_BYREF_NEEDS_FREE = (1 << 24),// runtime }; struct Block_byref { - void *isa; - struct Block_byref *forwarding; - volatile int32_t flags; // contains ref count - uint32_t size; + void *isa; + struct Block_byref *forwarding; + volatile int32_t flags; // contains ref count + uint32_t size; }; struct Block_byref_2 { - // requires BLOCK_BYREF_HAS_COPY_DISPOSE - BlockByrefKeepFunction byref_keep; - BlockByrefDestroyFunction byref_destroy; + // requires BLOCK_BYREF_HAS_COPY_DISPOSE + BlockByrefKeepFunction byref_keep; + BlockByrefDestroyFunction byref_destroy; }; struct Block_byref_3 { - // requires BLOCK_BYREF_LAYOUT_EXTENDED - const char *layout; + // requires BLOCK_BYREF_LAYOUT_EXTENDED + const char *layout; }; @@ -297,32 +313,32 @@ struct Block_byref_3 { // Values for Block_descriptor_3->layout with BLOCK_HAS_EXTENDED_LAYOUT // and for Block_byref_3->layout with BLOCK_BYREF_LAYOUT_EXTENDED -// If the layout field is less than 0x1000, then it is a compact encoding -// of the form 0xXYZ: X strong pointers, then Y byref pointers, +// If the layout field is less than 0x1000, then it is a compact encoding +// of the form 0xXYZ: X strong pointers, then Y byref pointers, // then Z weak pointers. -// If the layout field is 0x1000 or greater, it points to a +// If the layout field is 0x1000 or greater, it points to a // string of layout bytes. Each byte is of the form 0xPN. // Operator P is from the list below. Value N is a parameter for the operator. // Byte 0x00 terminates the layout; remaining block data is non-pointer bytes. enum { - BLOCK_LAYOUT_ESCAPE = 0, // N=0 halt, rest is non-pointer. N!=0 reserved. - BLOCK_LAYOUT_NON_OBJECT_BYTES = 1, // N bytes non-objects - BLOCK_LAYOUT_NON_OBJECT_WORDS = 2, // N words non-objects - BLOCK_LAYOUT_STRONG = 3, // N words strong pointers - BLOCK_LAYOUT_BYREF = 4, // N words byref pointers - BLOCK_LAYOUT_WEAK = 5, // N words weak pointers - BLOCK_LAYOUT_UNRETAINED = 6, // N words unretained pointers - BLOCK_LAYOUT_UNKNOWN_WORDS_7 = 7, // N words, reserved - BLOCK_LAYOUT_UNKNOWN_WORDS_8 = 8, // N words, reserved - BLOCK_LAYOUT_UNKNOWN_WORDS_9 = 9, // N words, reserved - BLOCK_LAYOUT_UNKNOWN_WORDS_A = 0xA, // N words, reserved - BLOCK_LAYOUT_UNUSED_B = 0xB, // unspecified, reserved - BLOCK_LAYOUT_UNUSED_C = 0xC, // unspecified, reserved - BLOCK_LAYOUT_UNUSED_D = 0xD, // unspecified, reserved - BLOCK_LAYOUT_UNUSED_E = 0xE, // unspecified, reserved - BLOCK_LAYOUT_UNUSED_F = 0xF, // unspecified, reserved + BLOCK_LAYOUT_ESCAPE = 0, // N=0 halt, rest is non-pointer. N!=0 reserved. + BLOCK_LAYOUT_NON_OBJECT_BYTES = 1, // N bytes non-objects + BLOCK_LAYOUT_NON_OBJECT_WORDS = 2, // N words non-objects + BLOCK_LAYOUT_STRONG = 3,// N words strong pointers + BLOCK_LAYOUT_BYREF = 4,// N words byref pointers + BLOCK_LAYOUT_WEAK = 5,// N words weak pointers + BLOCK_LAYOUT_UNRETAINED = 6,// N words unretained pointers + BLOCK_LAYOUT_UNKNOWN_WORDS_7 = 7,// N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_8 = 8,// N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_9 = 9,// N words, reserved + BLOCK_LAYOUT_UNKNOWN_WORDS_A = 0xA,// N words, reserved + BLOCK_LAYOUT_UNUSED_B = 0xB,// unspecified, reserved + BLOCK_LAYOUT_UNUSED_C = 0xC,// unspecified, reserved + BLOCK_LAYOUT_UNUSED_D = 0xD,// unspecified, reserved + BLOCK_LAYOUT_UNUSED_E = 0xE,// unspecified, reserved + BLOCK_LAYOUT_UNUSED_F = 0xF,// unspecified, reserved }; @@ -330,18 +346,18 @@ enum { // Values for _Block_object_assign() and _Block_object_dispose() parameters enum { - // see function implementation for a more complete description of these fields and combinations - BLOCK_FIELD_IS_OBJECT = 3, // id, NSObject, __attribute__((NSObject)), block, ... - BLOCK_FIELD_IS_BLOCK = 7, // a block variable - BLOCK_FIELD_IS_BYREF = 8, // the on stack structure holding the __block variable - BLOCK_FIELD_IS_WEAK = 16, // declared __weak, only used in byref copy helpers - BLOCK_BYREF_CALLER = 128, // called from __block (byref) copy/dispose support routines. + // see function implementation for a more complete description of these fields and combinations + BLOCK_FIELD_IS_OBJECT = 3,// id, NSObject, __attribute__((NSObject)), block, ... + BLOCK_FIELD_IS_BLOCK = 7,// a block variable + BLOCK_FIELD_IS_BYREF = 8,// the on stack structure holding the __block variable + BLOCK_FIELD_IS_WEAK = 16,// declared __weak, only used in byref copy helpers + BLOCK_BYREF_CALLER = 128,// called from __block (byref) copy/dispose support routines. }; enum { - BLOCK_ALL_COPY_DISPOSE_FLAGS = - BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_BYREF | - BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER + BLOCK_ALL_COPY_DISPOSE_FLAGS = + BLOCK_FIELD_IS_OBJECT | BLOCK_FIELD_IS_BLOCK | BLOCK_FIELD_IS_BYREF | + BLOCK_FIELD_IS_WEAK | BLOCK_BYREF_CALLER }; @@ -350,41 +366,41 @@ enum { static inline __typeof__(void (*)(void *, ...)) _Block_get_invoke_fn(struct Block_layout *block) { - return (void (*)(void *, ...))_Block_get_function_pointer(block->invoke); + return (void (*)(void *, ...))_Block_get_function_pointer(block->invoke); } -static inline void +static inline void _Block_set_invoke_fn(struct Block_layout *block, void (*fn)(void *, ...)) { - _Block_set_function_pointer(block->invoke, fn); + _Block_set_function_pointer(block->invoke, fn); } static inline __typeof__(void (*)(void *, const void *)) _Block_get_copy_fn(struct Block_descriptor_2 *desc) { - return (void (*)(void *, const void *))_Block_get_function_pointer(desc->copy); + return (void (*)(void *, const void *))_Block_get_function_pointer(desc->copy); } -static inline void +static inline void _Block_set_copy_fn(struct Block_descriptor_2 *desc, - void (*fn)(void *, const void *)) + void (*fn)(void *, const void *)) { - _Block_set_function_pointer(desc->copy, fn); + _Block_set_function_pointer(desc->copy, fn); } static inline __typeof__(void (*)(const void *)) _Block_get_dispose_fn(struct Block_descriptor_2 *desc) { - return (void (*)(const void *))_Block_get_function_pointer(desc->dispose); + return (void (*)(const void *))_Block_get_function_pointer(desc->dispose); } -static inline void +static inline void _Block_set_dispose_fn(struct Block_descriptor_2 *desc, - void (*fn)(const void *)) + void (*fn)(const void *)) { - _Block_set_function_pointer(desc->dispose, fn); + _Block_set_function_pointer(desc->dispose, fn); } @@ -396,59 +412,59 @@ BLOCK_EXPORT size_t Block_size(void *aBlock); // indicates whether block was compiled with compiler that sets the ABI related metadata bits BLOCK_EXPORT bool _Block_has_signature(void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); // returns TRUE if return value of block is on the stack, FALSE otherwise BLOCK_EXPORT bool _Block_use_stret(void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); // Returns a string describing the block's parameter and return types. // The encoding scheme is the same as Objective-C @encode. // Returns NULL for blocks compiled with some compilers. BLOCK_EXPORT const char * _Block_signature(void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); // Returns a string describing the block's GC layout. // This uses the GC skip/scan encoding. // May return NULL. BLOCK_EXPORT const char * _Block_layout(void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); // Returns a string describing the block's layout. // This uses the "extended layout" form described above. // May return NULL. BLOCK_EXPORT const char * _Block_extended_layout(void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_7_0); +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_7_0); // Callable only from the ARR weak subsystem while in exclusion zone BLOCK_EXPORT bool _Block_tryRetain(const void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); // Callable only from the ARR weak subsystem while in exclusion zone BLOCK_EXPORT bool _Block_isDeallocating(const void *aBlock) - __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); // the raw data space for runtime classes for blocks // class+meta used for stack, malloc, and collectable based blocks BLOCK_EXPORT void * _NSConcreteMallocBlock[32] - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); BLOCK_EXPORT void * _NSConcreteAutoBlock[32] - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32] - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32] - __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); // declared in Block.h // BLOCK_EXPORT void * _NSConcreteGlobalBlock[32]; // BLOCK_EXPORT void * _NSConcreteStackBlock[32]; struct Block_callbacks_RR { - size_t size; // size == sizeof(struct Block_callbacks_RR) - void (*retain)(const void *); - void (*release)(const void *); - void (*destructInstance)(const void *); + size_t size; // size == sizeof(struct Block_callbacks_RR) + void (*retain)(const void *); + void (*release)(const void *); + void (*destructInstance)(const void *); }; typedef struct Block_callbacks_RR Block_callbacks_RR; diff --git a/libkern/libkern/OSAtomic.h b/libkern/libkern/OSAtomic.h index 76b945443..375151929 100644 --- a/libkern/libkern/OSAtomic.h +++ b/libkern/libkern/OSAtomic.h @@ -65,7 +65,7 @@ extern "C" { * This header declares the OSAtomic group of functions for atomic * reading and updating of values. */ - + /*! * @function OSCompareAndSwap64 * @@ -76,9 +76,9 @@ extern "C" { * See OSCompareAndSwap. */ extern Boolean OSCompareAndSwap64( - UInt64 oldValue, - UInt64 newValue, - volatile UInt64 * address); + UInt64 oldValue, + UInt64 newValue, + volatile UInt64 * address); #define OSCompareAndSwap64(a, b, c) \ (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c))) @@ -92,8 +92,8 @@ extern Boolean OSCompareAndSwap64( * See OSAddAtomic. */ extern SInt64 OSAddAtomic64( - SInt64 theAmount, - volatile SInt64 * address); + SInt64 theAmount, + volatile SInt64 * address); #define OSAddAtomic64(a, b) \ (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b))) @@ -105,10 +105,11 @@ extern SInt64 OSAddAtomic64( * * @discussion * See OSIncrementAtomic. -*/ -inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address) + */ +inline static SInt64 +OSIncrementAtomic64(volatile SInt64 * address) { - return OSAddAtomic64(1LL, address); + return OSAddAtomic64(1LL, address); } /*! @@ -119,10 +120,11 @@ inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address) * * @discussion * See OSDecrementAtomic. -*/ -inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address) + */ +inline static SInt64 +OSDecrementAtomic64(volatile SInt64 * address) { - return OSAddAtomic64(-1LL, address); + return OSAddAtomic64(-1LL, address); } #if XNU_KERNEL_PRIVATE @@ -137,8 +139,8 @@ inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address) * See OSAddAtomic. */ extern long OSAddAtomicLong( - long theAmount, - volatile long * address); + long theAmount, + volatile long * address); #define OSAddAtomicLong(a, b) \ (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b))) @@ -151,10 +153,11 @@ extern long OSAddAtomicLong( * * @discussion * See OSIncrementAtomic. -*/ -inline static long OSIncrementAtomicLong(volatile long * address) + */ +inline static long +OSIncrementAtomicLong(volatile long * address) { - return OSAddAtomicLong(1L, address); + return OSAddAtomicLong(1L, address); } /* Not to be included in headerdoc. @@ -165,9 +168,10 @@ inline static long OSIncrementAtomicLong(volatile long * address) * 32/64-bit decrement, depending on sizeof(long) *@discussion See OSDecrementAtomic. */ -inline static long OSDecrementAtomicLong(volatile long * address) +inline static long +OSDecrementAtomicLong(volatile long * address) { - return OSAddAtomicLong(-1L, address); + return OSAddAtomicLong(-1L, address); } #endif /* XNU_KERNEL_PRIVATE */ @@ -189,9 +193,9 @@ inline static long OSDecrementAtomicLong(volatile long * address) * @result true if newValue was written to the address. */ extern Boolean OSCompareAndSwap8( - UInt8 oldValue, - UInt8 newValue, - volatile UInt8 * address); + UInt8 oldValue, + UInt8 newValue, + volatile UInt8 * address); #define OSCompareAndSwap8(a, b, c) \ (OSCompareAndSwap8(a, b, __SAFE_CAST_PTR(volatile UInt8*,c))) @@ -212,9 +216,9 @@ extern Boolean OSCompareAndSwap8( * @result true if newValue was written to the address. */ extern Boolean OSCompareAndSwap16( - UInt16 oldValue, - UInt16 newValue, - volatile UInt16 * address); + UInt16 oldValue, + UInt16 newValue, + volatile UInt16 * address); #define OSCompareAndSwap16(a, b, c) \ (OSCompareAndSwap16(a, b, __SAFE_CAST_PTR(volatile UInt16*,c))) @@ -237,9 +241,9 @@ extern Boolean OSCompareAndSwap16( * @result true if newValue was written to the address. */ extern Boolean OSCompareAndSwap( - UInt32 oldValue, - UInt32 newValue, - volatile UInt32 * address); + UInt32 oldValue, + UInt32 newValue, + volatile UInt32 * address); #define OSCompareAndSwap(a, b, c) \ (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c))) @@ -259,9 +263,9 @@ extern Boolean OSCompareAndSwap( * @result true if newValue was written to the address. */ extern Boolean OSCompareAndSwapPtr( - void * oldValue, - void * newValue, - void * volatile * address); + void * oldValue, + void * newValue, + void * volatile * address); #define OSCompareAndSwapPtr(a, b, c) \ (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c))) @@ -280,8 +284,8 @@ extern Boolean OSCompareAndSwapPtr( * @result The value before the addition */ extern SInt32 OSAddAtomic( - SInt32 amount, - volatile SInt32 * address); + SInt32 amount, + volatile SInt32 * address); #define OSAddAtomic(a, b) \ (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b))) @@ -299,8 +303,8 @@ extern SInt32 OSAddAtomic( * @result The value before the addition */ extern SInt16 OSAddAtomic16( - SInt32 amount, - volatile SInt16 * address); + SInt32 amount, + volatile SInt16 * address); /*! * @function OSAddAtomic8 @@ -317,8 +321,8 @@ extern SInt16 OSAddAtomic16( * @result The value before the addition. */ extern SInt8 OSAddAtomic8( - SInt32 amount, - volatile SInt8 * address); + SInt32 amount, + volatile SInt8 * address); /*! * @function OSIncrementAtomic @@ -429,8 +433,8 @@ extern SInt8 OSDecrementAtomic8(volatile SInt8 * address); * @result The value before the bitwise operation */ extern UInt32 OSBitAndAtomic( - UInt32 mask, - volatile UInt32 * address); + UInt32 mask, + volatile UInt32 * address); #define OSBitAndAtomic(a, b) \ (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) @@ -449,8 +453,8 @@ extern UInt32 OSBitAndAtomic( * @result The value before the bitwise operation. */ extern UInt16 OSBitAndAtomic16( - UInt32 mask, - volatile UInt16 * address); + UInt32 mask, + volatile UInt16 * address); /*! * @function OSBitAndAtomic8 @@ -467,8 +471,8 @@ extern UInt16 OSBitAndAtomic16( * @result The value before the bitwise operation. */ extern UInt8 OSBitAndAtomic8( - UInt32 mask, - volatile UInt8 * address); + UInt32 mask, + volatile UInt8 * address); /*! * @function OSBitOrAtomic @@ -485,8 +489,8 @@ extern UInt8 OSBitAndAtomic8( * @result The value before the bitwise operation. */ extern UInt32 OSBitOrAtomic( - UInt32 mask, - volatile UInt32 * address); + UInt32 mask, + volatile UInt32 * address); #define OSBitOrAtomic(a, b) \ (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) @@ -505,8 +509,8 @@ extern UInt32 OSBitOrAtomic( * @result The value before the bitwise operation. */ extern UInt16 OSBitOrAtomic16( - UInt32 mask, - volatile UInt16 * address); + UInt32 mask, + volatile UInt16 * address); /*! * @function OSBitOrAtomic8 @@ -523,8 +527,8 @@ extern UInt16 OSBitOrAtomic16( * @result The value before the bitwise operation. */ extern UInt8 OSBitOrAtomic8( - UInt32 mask, - volatile UInt8 * address); + UInt32 mask, + volatile UInt8 * address); /*! * @function OSBitXorAtomic @@ -541,8 +545,8 @@ extern UInt8 OSBitOrAtomic8( * @result The value before the bitwise operation. */ extern UInt32 OSBitXorAtomic( - UInt32 mask, - volatile UInt32 * address); + UInt32 mask, + volatile UInt32 * address); #define OSBitXorAtomic(a, b) \ (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b))) @@ -561,8 +565,8 @@ extern UInt32 OSBitXorAtomic( * @result The value before the bitwise operation. */ extern UInt16 OSBitXorAtomic16( - UInt32 mask, - volatile UInt16 * address); + UInt32 mask, + volatile UInt16 * address); /*! * @function OSBitXorAtomic8 @@ -579,8 +583,8 @@ extern UInt16 OSBitXorAtomic16( * @result The value before the bitwise operation. */ extern UInt8 OSBitXorAtomic8( - UInt32 mask, - volatile UInt8 * address); + UInt32 mask, + volatile UInt8 * address); /*! * @function OSTestAndSet @@ -597,8 +601,8 @@ extern UInt8 OSBitXorAtomic8( * @result true if the bit was already set, false otherwise. */ extern Boolean OSTestAndSet( - UInt32 bit, - volatile UInt8 * startAddress); + UInt32 bit, + volatile UInt8 * startAddress); /*! * @function OSTestAndClear @@ -615,8 +619,8 @@ extern Boolean OSTestAndSet( * @result true if the bit was already clear, false otherwise. */ extern Boolean OSTestAndClear( - UInt32 bit, - volatile UInt8 * startAddress); + UInt32 bit, + volatile UInt8 * startAddress); /*! * @defined OS_SPINLOCK_INIT @@ -627,9 +631,9 @@ extern Boolean OSTestAndClear( * @discussion * The convention is that unlocked is zero, locked is nonzero. */ -#define OS_SPINLOCK_INIT 0 +#define OS_SPINLOCK_INIT 0 -/*! +/*! * @typedef OSSpinLock * * @abstract @@ -651,7 +655,7 @@ typedef SInt32 OSSpinLock; * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode. * * @result - * Returns false if the lock was already held by another thread, true if it took the lock successfully. + * Returns false if the lock was already held by another thread, true if it took the lock successfully. */ extern Boolean OSSpinLockTry(volatile OSSpinLock * lock); @@ -679,21 +683,26 @@ extern void OSSpinLockUnlock(volatile OSSpinLock * lock); #if defined(__arm__) || defined(__arm64__) extern void OSSynchronizeIO(void); #else -static __inline__ void OSSynchronizeIO(void) +static __inline__ void +OSSynchronizeIO(void) { } #endif -#if defined(KERNEL_PRIVATE) +#if defined(KERNEL_PRIVATE) -#if defined(__arm__) || defined(__arm64__) -static inline void OSMemoryBarrier(void) { - __asm__ volatile("dmb ish" ::: "memory"); +#if defined(__arm__) || defined(__arm64__) +static inline void +OSMemoryBarrier(void) +{ + __asm__ volatile ("dmb ish" ::: "memory"); } #elif defined(__i386__) || defined(__x86_64__) -#if defined(XNU_KERNEL_PRIVATE) -static inline void OSMemoryBarrier(void) { - __asm__ volatile("mfence" ::: "memory"); +#if defined(XNU_KERNEL_PRIVATE) +static inline void +OSMemoryBarrier(void) +{ + __asm__ volatile ("mfence" ::: "memory"); } #endif /* XNU_KERNEL_PRIVATE */ #endif diff --git a/libkern/libkern/OSBase.h b/libkern/libkern/OSBase.h index 651ba8728..7f3aa25c8 100644 --- a/libkern/libkern/OSBase.h +++ b/libkern/libkern/OSBase.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -43,48 +43,48 @@ __BEGIN_DECLS -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE OS_INLINE uint64_t __OSAbsoluteTime( - AbsoluteTime abstime) + AbsoluteTime abstime) { - return (*(uint64_t *)&abstime); + return *(uint64_t *)&abstime; } OS_INLINE uint64_t * __OSAbsoluteTimePtr( - AbsoluteTime *abstime) + AbsoluteTime *abstime) { - return ((uint64_t *)abstime); + return (uint64_t *)abstime; } -#define AbsoluteTime_to_scalar(x) (*(uint64_t *)(x)) +#define AbsoluteTime_to_scalar(x) (*(uint64_t *)(x)) /* t1 < = > t2 */ -#define CMP_ABSOLUTETIME(t1, t2) \ - (AbsoluteTime_to_scalar(t1) > \ - AbsoluteTime_to_scalar(t2)? (int)+1 : \ - (AbsoluteTime_to_scalar(t1) < \ - AbsoluteTime_to_scalar(t2)? (int)-1 : 0)) +#define CMP_ABSOLUTETIME(t1, t2) \ + (AbsoluteTime_to_scalar(t1) > \ + AbsoluteTime_to_scalar(t2)? (int)+1 : \ + (AbsoluteTime_to_scalar(t1) < \ + AbsoluteTime_to_scalar(t2)? (int)-1 : 0)) /* t1 += t2 */ -#define ADD_ABSOLUTETIME(t1, t2) \ - (AbsoluteTime_to_scalar(t1) += \ - AbsoluteTime_to_scalar(t2)) +#define ADD_ABSOLUTETIME(t1, t2) \ + (AbsoluteTime_to_scalar(t1) += \ + AbsoluteTime_to_scalar(t2)) /* t1 -= t2 */ -#define SUB_ABSOLUTETIME(t1, t2) \ - (AbsoluteTime_to_scalar(t1) -= \ - AbsoluteTime_to_scalar(t2)) +#define SUB_ABSOLUTETIME(t1, t2) \ + (AbsoluteTime_to_scalar(t1) -= \ + AbsoluteTime_to_scalar(t2)) -#define ADD_ABSOLUTETIME_TICKS(t1, ticks) \ - (AbsoluteTime_to_scalar(t1) += \ - (int32_t)(ticks)) +#define ADD_ABSOLUTETIME_TICKS(t1, ticks) \ + (AbsoluteTime_to_scalar(t1) += \ + (int32_t)(ticks)) -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ __END_DECLS diff --git a/libkern/libkern/OSByteOrder.h b/libkern/libkern/OSByteOrder.h index 2a1d1da5d..eed25a420 100644 --- a/libkern/libkern/OSByteOrder.h +++ b/libkern/libkern/OSByteOrder.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,9 +33,9 @@ #include /* Macros for swapping constant values in the preprocessing stage. */ -#define OSSwapConstInt16(x) __DARWIN_OSSwapConstInt16(x) -#define OSSwapConstInt32(x) __DARWIN_OSSwapConstInt32(x) -#define OSSwapConstInt64(x) __DARWIN_OSSwapConstInt64(x) +#define OSSwapConstInt16(x) __DARWIN_OSSwapConstInt16(x) +#define OSSwapConstInt32(x) __DARWIN_OSSwapConstInt32(x) +#define OSSwapConstInt64(x) __DARWIN_OSSwapConstInt64(x) #if defined(__GNUC__) @@ -53,67 +53,68 @@ #endif /* __GNUC__ */ -#define OSSwapInt16(x) __DARWIN_OSSwapInt16(x) -#define OSSwapInt32(x) __DARWIN_OSSwapInt32(x) -#define OSSwapInt64(x) __DARWIN_OSSwapInt64(x) +#define OSSwapInt16(x) __DARWIN_OSSwapInt16(x) +#define OSSwapInt32(x) __DARWIN_OSSwapInt32(x) +#define OSSwapInt64(x) __DARWIN_OSSwapInt64(x) enum { - OSUnknownByteOrder, - OSLittleEndian, - OSBigEndian + OSUnknownByteOrder, + OSLittleEndian, + OSBigEndian }; OS_INLINE int32_t -OSHostByteOrder(void) { +OSHostByteOrder(void) +{ #if defined(__LITTLE_ENDIAN__) - return OSLittleEndian; + return OSLittleEndian; #elif defined(__BIG_ENDIAN__) - return OSBigEndian; + return OSBigEndian; #else - return OSUnknownByteOrder; + return OSUnknownByteOrder; #endif } -#define OSReadBigInt(x, y) OSReadBigInt32(x, y) -#define OSWriteBigInt(x, y, z) OSWriteBigInt32(x, y, z) -#define OSSwapBigToHostInt(x) OSSwapBigToHostInt32(x) -#define OSSwapHostToBigInt(x) OSSwapHostToBigInt32(x) -#define OSReadLittleInt(x, y) OSReadLittleInt32(x, y) -#define OSWriteLittleInt(x, y, z) OSWriteLittleInt32(x, y, z) -#define OSSwapHostToLittleInt(x) OSSwapHostToLittleInt32(x) -#define OSSwapLittleToHostInt(x) OSSwapLittleToHostInt32(x) +#define OSReadBigInt(x, y) OSReadBigInt32(x, y) +#define OSWriteBigInt(x, y, z) OSWriteBigInt32(x, y, z) +#define OSSwapBigToHostInt(x) OSSwapBigToHostInt32(x) +#define OSSwapHostToBigInt(x) OSSwapHostToBigInt32(x) +#define OSReadLittleInt(x, y) OSReadLittleInt32(x, y) +#define OSWriteLittleInt(x, y, z) OSWriteLittleInt32(x, y, z) +#define OSSwapHostToLittleInt(x) OSSwapHostToLittleInt32(x) +#define OSSwapLittleToHostInt(x) OSSwapLittleToHostInt32(x) /* Functions for loading native endian values. */ OS_INLINE uint16_t _OSReadInt16( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - return *(volatile uint16_t *)((uintptr_t)base + byteOffset); + return *(volatile uint16_t *)((uintptr_t)base + byteOffset); } OS_INLINE uint32_t _OSReadInt32( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - return *(volatile uint32_t *)((uintptr_t)base + byteOffset); + return *(volatile uint32_t *)((uintptr_t)base + byteOffset); } OS_INLINE uint64_t _OSReadInt64( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - return *(volatile uint64_t *)((uintptr_t)base + byteOffset); + return *(volatile uint64_t *)((uintptr_t)base + byteOffset); } /* Functions for storing native endian values. */ @@ -121,37 +122,37 @@ _OSReadInt64( OS_INLINE void _OSWriteInt16( - volatile void * base, - uintptr_t byteOffset, - uint16_t data -) + volatile void * base, + uintptr_t byteOffset, + uint16_t data + ) { - *(volatile uint16_t *)((uintptr_t)base + byteOffset) = data; + *(volatile uint16_t *)((uintptr_t)base + byteOffset) = data; } OS_INLINE void _OSWriteInt32( - volatile void * base, - uintptr_t byteOffset, - uint32_t data -) + volatile void * base, + uintptr_t byteOffset, + uint32_t data + ) { - *(volatile uint32_t *)((uintptr_t)base + byteOffset) = data; + *(volatile uint32_t *)((uintptr_t)base + byteOffset) = data; } OS_INLINE void _OSWriteInt64( - volatile void * base, - uintptr_t byteOffset, - uint64_t data -) + volatile void * base, + uintptr_t byteOffset, + uint64_t data + ) { - *(volatile uint64_t *)((uintptr_t)base + byteOffset) = data; + *(volatile uint64_t *)((uintptr_t)base + byteOffset) = data; } -#if defined(__BIG_ENDIAN__) +#if defined(__BIG_ENDIAN__) /* Functions for loading big endian to host endianess. */ @@ -192,8 +193,8 @@ _OSWriteInt64( /* Host endianess to little endian byte swapping macros for constants. */ #define OSSwapHostToLittleConstInt16(x) OSSwapConstInt16(x) -#define OSSwapHostToLittleConstInt32(x) OSSwapConstInt32(x) -#define OSSwapHostToLittleConstInt64(x) OSSwapConstInt64(x) +#define OSSwapHostToLittleConstInt32(x) OSSwapConstInt32(x) +#define OSSwapHostToLittleConstInt64(x) OSSwapConstInt64(x) /* Generic host endianess to little endian byte swapping functions. */ @@ -202,7 +203,7 @@ _OSWriteInt64( #define OSSwapHostToLittleInt64(x) OSSwapInt64(x) /* Big endian to host endianess byte swapping macros for constants. */ - + #define OSSwapBigToHostConstInt16(x) ((uint16_t)(x)) #define OSSwapBigToHostConstInt32(x) ((uint32_t)(x)) #define OSSwapBigToHostConstInt64(x) ((uint64_t)(x)) @@ -214,7 +215,7 @@ _OSWriteInt64( #define OSSwapBigToHostInt64(x) ((uint64_t)(x)) /* Little endian to host endianess byte swapping macros for constants. */ - + #define OSSwapLittleToHostConstInt16(x) OSSwapConstInt16(x) #define OSSwapLittleToHostConstInt32(x) OSSwapConstInt32(x) #define OSSwapLittleToHostConstInt64(x) OSSwapConstInt64(x) @@ -225,7 +226,7 @@ _OSWriteInt64( #define OSSwapLittleToHostInt32(x) OSSwapInt32(x) #define OSSwapLittleToHostInt64(x) OSSwapInt64(x) -#elif defined(__LITTLE_ENDIAN__) +#elif defined(__LITTLE_ENDIAN__) /* Functions for loading big endian to host endianess. */ @@ -267,7 +268,7 @@ _OSWriteInt64( #define OSSwapHostToLittleConstInt16(x) ((uint16_t)(x)) #define OSSwapHostToLittleConstInt32(x) ((uint32_t)(x)) -#define OSSwapHostToLittleConstInt64(x) ((uint64_t)(x)) +#define OSSwapHostToLittleConstInt64(x) ((uint64_t)(x)) /* Generic host endianess to little endian byte swapping functions. */ @@ -304,5 +305,3 @@ _OSWriteInt64( #endif #endif /* ! _OS_OSBYTEORDER_H */ - - diff --git a/libkern/libkern/OSCrossEndian.h b/libkern/libkern/OSCrossEndian.h index 6038319ef..6a5838673 100644 --- a/libkern/libkern/OSCrossEndian.h +++ b/libkern/libkern/OSCrossEndian.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,19 +22,19 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * This private header exports 3 APIs. - * _OSRosettaCheck() - An inline function that returns true if we are + * _OSRosettaCheck() - An inline function that returns true if we are * currently running under Rosetta. * IF_ROSETTA() - Which is used to as a regular conditional * expression that is true only if the current * code is executing in the Rosetta * translation space. - * ROSETTA_ONLY(exprs) - Which is used to create a block code that only + * ROSETTA_ONLY(exprs) - Which is used to create a block code that only * executes if we are running in Rosetta. * * for example @@ -44,7 +44,7 @@ * outdata = OSSwap??(indata); * } * else { - * // Do straight through + * // Do straight through * outdata = indata; * } * @@ -60,15 +60,19 @@ #include -static __inline__ int _OSRosettaCheck(void) { return 0; } +static __inline__ int +_OSRosettaCheck(void) +{ + return 0; +} #define IF_ROSETTA() if (__builtin_expect(_OSRosettaCheck(), 0) ) -#define ROSETTA_ONLY(exprs) \ -do { \ - IF_ROSETTA() { \ - exprs \ - } \ +#define ROSETTA_ONLY(exprs) \ +do { \ + IF_ROSETTA() { \ + exprs \ + } \ } while(0) #endif /* _LIBKERN_OSCROSSENDIAN_H */ diff --git a/libkern/libkern/OSDebug.h b/libkern/libkern/OSDebug.h index 84611f320..14d65743c 100644 --- a/libkern/libkern/OSDebug.h +++ b/libkern/libkern/OSDebug.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,7 +42,7 @@ __BEGIN_DECLS extern int log_leaks; -/* Use kernel_debug() to log a backtrace */ +/* Use kernel_debug() to log a backtrace */ extern void trace_backtrace(unsigned int debugid, unsigned int debugid2, unsigned long size, unsigned long data); /* Report a message with a 4 entry backtrace - very slow */ extern void OSReportWithBacktrace(const char *str, ...); @@ -52,18 +52,18 @@ extern unsigned OSBacktrace(void **bt, unsigned maxAddrs); extern void OSPrintBacktrace(void); /*! @function OSKernelStackRemaining - @abstract Returns bytes available below the current stack frame. - @discussion Returns bytes available below the current stack frame. Safe for interrupt or thread context. - @result Approximate byte count available. */ + * @abstract Returns bytes available below the current stack frame. + * @discussion Returns bytes available below the current stack frame. Safe for interrupt or thread context. + * @result Approximate byte count available. */ vm_offset_t OSKernelStackRemaining( void ); __END_DECLS -#define TRACE_MACHLEAKS(a,b,c,d) \ -do { \ - if (log_leaks) \ - trace_backtrace(a,b,c,d); \ +#define TRACE_MACHLEAKS(a, b, c, d) \ +do { \ + if (log_leaks) \ + trace_backtrace(a,b,c,d); \ } while(0) #endif /* !_OS_OSDEBBUG_H */ diff --git a/libkern/libkern/OSKextLib.h b/libkern/libkern/OSKextLib.h index a08218c2e..2729d3c83 100644 --- a/libkern/libkern/OSKextLib.h +++ b/libkern/libkern/OSKextLib.h @@ -402,59 +402,59 @@ __BEGIN_DECLS */ /*! -* @define kOSKextKernelIdentifier -* @abstract -* This is the CFBundleIdentifier user for the kernel itself. -*/ + * @define kOSKextKernelIdentifier + * @abstract + * This is the CFBundleIdentifier user for the kernel itself. + */ #define kOSKextKernelIdentifier "__kernel__" /*! -* @define kOSBundleRequiredRoot -* @abstract -* This @link kOSBundleRequiredKey OSBundleRequired@/link -* value indicates that the kext may be needed to mount the root filesystem -* whether starting from a local or a network volume. -*/ + * @define kOSBundleRequiredRoot + * @abstract + * This @link kOSBundleRequiredKey OSBundleRequired@/link + * value indicates that the kext may be needed to mount the root filesystem + * whether starting from a local or a network volume. + */ #define kOSBundleRequiredRoot "Root" /*! -* @define kOSBundleRequiredLocalRoot -* @abstract -* This @link kOSBundleRequiredKey OSBundleRequired@/link -* value indicates that the kext may be needed to mount the root filesystem -* when starting from a local disk. -*/ + * @define kOSBundleRequiredLocalRoot + * @abstract + * This @link kOSBundleRequiredKey OSBundleRequired@/link + * value indicates that the kext may be needed to mount the root filesystem + * when starting from a local disk. + */ #define kOSBundleRequiredLocalRoot "Local-Root" /*! -* @define kOSBundleRequiredNetworkRoot -* @abstract -* This @link kOSBundleRequiredKey OSBundleRequired@/link -* value indicates that the kext may be needed to mount the root filesystem -* when starting over a network connection. -*/ + * @define kOSBundleRequiredNetworkRoot + * @abstract + * This @link kOSBundleRequiredKey OSBundleRequired@/link + * value indicates that the kext may be needed to mount the root filesystem + * when starting over a network connection. + */ #define kOSBundleRequiredNetworkRoot "Network-Root" /*! -* @define kOSBundleRequiredSafeBoot -* @abstract -* This @link kOSBundleRequiredKey OSBundleRequired@/link -* value indicates that the kext can be loaded during a safe startup. -* This value does not normally cause the kext to be read by the booter -* or included in startup kext caches. -*/ + * @define kOSBundleRequiredSafeBoot + * @abstract + * This @link kOSBundleRequiredKey OSBundleRequired@/link + * value indicates that the kext can be loaded during a safe startup. + * This value does not normally cause the kext to be read by the booter + * or included in startup kext caches. + */ #define kOSBundleRequiredSafeBoot "Safe Boot" /*! -* @define kOSBundleRequiredConsole -* @abstract -* This @link kOSBundleRequiredKey OSBundleRequired@/link -* value indicates that the kext may be needed for console access -* (specifically in a single-user startup when -* @link //apple_ref/doc/man/8/kextd kextd(8)@/link. -* does not run) -* and should be loaded during early startup. -*/ + * @define kOSBundleRequiredConsole + * @abstract + * This @link kOSBundleRequiredKey OSBundleRequired@/link + * value indicates that the kext may be needed for console access + * (specifically in a single-user startup when + * @link //apple_ref/doc/man/8/kextd kextd(8)@/link. + * does not run) + * and should be loaded during early startup. + */ #define kOSBundleRequiredConsole "Console" @@ -574,7 +574,7 @@ const char * OSKextGetCurrentVersionString(void); * @group Kext Loading C Functions * Functions for loading and tracking kexts in the kernel. */ - + /*! * @function OSKextLoadKextWithIdentifier * @@ -753,11 +753,11 @@ typedef uint32_t OSKextRequestTag; * OSKextRequestResource@/link. */ typedef void (* OSKextRequestResourceCallback)( - OSKextRequestTag requestTag, - OSReturn result, - const void * resourceData, - uint32_t resourceDataLength, - void * context); + OSKextRequestTag requestTag, + OSReturn result, + const void * resourceData, + uint32_t resourceDataLength, + void * context); /*! * @function OSKextRequestResource @@ -843,11 +843,11 @@ typedef void (* OSKextRequestResourceCallback)( * outside of the stop function. */ OSReturn OSKextRequestResource( - const char * kextIdentifier, - const char * resourceName, - OSKextRequestResourceCallback callback, - void * context, - OSKextRequestTag * requestTagOut); + const char * kextIdentifier, + const char * resourceName, + OSKextRequestResourceCallback callback, + void * context, + OSKextRequestTag * requestTagOut); /*! * @function OSKextCancelRequest @@ -881,8 +881,8 @@ OSReturn OSKextRequestResource( * before the stop function is called. */ OSReturn OSKextCancelRequest( - OSKextRequestTag requestTag, - void ** contextOut); + OSKextRequestTag requestTag, + void ** contextOut); /*! @@ -907,11 +907,11 @@ OSReturn OSKextCancelRequest( */ int OSKextGrabPgoData(uuid_t uuid, - uint64_t *pSize, - char *pBuffer, - uint64_t bufferSize, - int wait_for_unload, - int metadata); + uint64_t *pSize, + char *pBuffer, + uint64_t bufferSize, + int wait_for_unload, + int metadata); /*! * @function OSKextResetPgoCountersLock @@ -955,11 +955,11 @@ OSKextResetPgoCounters(void); /*! * @var gOSKextUnresolved * - * @abstract + * @abstract * The value to which a kext's unresolved, weakly-referenced symbols are bound. * * @discussion - * A kext must test a weak symbol before using it. A weak symbol + * A kext must test a weak symbol before using it. A weak symbol * is only safe to use if it is not equal to gOSKextUnresolved. * * Example for a weak symbol named foo: @@ -978,18 +978,18 @@ extern const void * gOSKextUnresolved; /*! * @define OSKextSymbolIsResolved * - * @abstract + * @abstract * Checks whether a weakly-referenced symbol has been resolved. * * @param weak_sym The weak symbol to be tested for resolution. * - * @result - * TRUE if weak_sym is resolved, or FALSE + * @result + * TRUE if weak_sym is resolved, or FALSE * if weak_sym is unresolved. * * @discussion * This is a convenience macro for testing if weak symbols are resolved. - * + * * Example for a weak symbol named foo: *
  * @textblock
@@ -1017,9 +1017,9 @@ extern const void * gOSKextUnresolved;
 // Kernel External Components for FIPS compliance (KEC_FIPS)
 // WARNING - ath_hash is owned by the kernel, do not free
 typedef struct AppleTEXTHash {
-    int       		ath_version;    // version of this structure (value is 1 or 2)
-    int             ath_length;     // length of hash data
-    void *          ath_hash;       // hash extracted from AppleTextHashes dict 
+	int                 ath_version;// version of this structure (value is 1 or 2)
+	int             ath_length; // length of hash data
+	void *          ath_hash;   // hash extracted from AppleTextHashes dict
 } AppleTEXTHash_t;
 #endif // CONFIG_KEC_FIPS
 
diff --git a/libkern/libkern/OSKextLibPrivate.h b/libkern/libkern/OSKextLibPrivate.h
index 147ab96f6..2167f212b 100644
--- a/libkern/libkern/OSKextLibPrivate.h
+++ b/libkern/libkern/OSKextLibPrivate.h
@@ -2,7 +2,7 @@
  * Copyright (c) 1998-2000 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
@@ -11,10 +11,10 @@
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
@@ -78,14 +78,14 @@ typedef uint8_t OSKextExcludeLevel;
  * @abstract A boolean value indicating whether the kext should only load on
  *           Developer devices.
  */
-#define kOSBundleDeveloperOnlyKey		"OSBundleDeveloperOnly"
+#define kOSBundleDeveloperOnlyKey               "OSBundleDeveloperOnly"
 
 /*!
  * @define   kOSBundleRamDiskOnlyKey
  * @abstract A boolean value indicating whether the kext should only load when
  *           booted from a ram disk.
  */
-#define kOSBundleRamDiskOnlyKey		"OSBundleRamDiskOnly"
+#define kOSBundleRamDiskOnlyKey         "OSBundleRamDiskOnly"
 
 
 /*!
@@ -93,14 +93,14 @@ typedef uint8_t OSKextExcludeLevel;
  * @abstract A boolean value indicating whether the kext registers
  *           MACF hooks.
  */
-#define kAppleSecurityExtensionKey	"AppleSecurityExtension"
+#define kAppleSecurityExtensionKey      "AppleSecurityExtension"
 
 /*!
  * @define   kAppleKernelExternalComponentKey
  * @abstract A boolean value indicating whether the kext is vending kernel
  *           KPI, and needs special loading behavior.
  */
-#define kAppleKernelExternalComponentKey	"AppleKernelExternalComponent"
+#define kAppleKernelExternalComponentKey        "AppleKernelExternalComponent"
 
 // properties found in the registry root
 #define kOSKernelCPUTypeKey             "OSKernelCPUType"
@@ -185,7 +185,7 @@ typedef uint8_t OSKextExcludeLevel;
  *   
  • A level from 0-7 in the lowest-order nibble (0x7).
  • *
  • A flag bit in the lowest-order nibble (0x8) indicating whether * log messages tied to individual kexts are always printed (1) - * or printed only if the kext has an + * or printed only if the kext has an * @link //apple_ref/c/macro/kOSBundleEnableKextLoggingKey * OSBundleEnableKextLogging@/link set to true. *
  • A set of activity flags in the remaining nibbles (0xFFFFFFF0), @@ -576,8 +576,8 @@ typedef uint32_t OSKextLogSpec; * set to true. */ #define kOSKextLogShowAllKextsFilter ((OSKextLogSpec) \ - (kOSKextLogShowAllFilter | \ - kOSKextLogKextOrGlobalMask)) + (kOSKextLogShowAllFilter | \ + kOSKextLogKextOrGlobalMask)) #if PRAGMA_MARK #pragma mark - @@ -589,7 +589,7 @@ typedef uint32_t OSKextLogSpec; * @group Kext Version String Processing * Functions for working with kext versions and compatible versions. */ - + /*! * @typedef OSKextVersion * @abstract An encoded kext version that can be compared arithmetically. @@ -683,9 +683,9 @@ OSKextVersion OSKextParseVersionString(const char * versionString); * the resulting string is "(invalid)". */ Boolean OSKextVersionGetString( - OSKextVersion aVersion, - char * buffer, - uint32_t bufferSize); + OSKextVersion aVersion, + char * buffer, + uint32_t bufferSize); #ifdef KERNEL @@ -792,7 +792,7 @@ void kext_dump_panic_lists(int (*printf_func)(const char *fmt, ...)); * when there are no C++ objects and the kext reference count is zero. */ uint32_t OSKextGetLoadTagForBundleIdentifier( - const char * kextIdentifier); + const char * kextIdentifier); /*! @@ -820,7 +820,7 @@ uint32_t OSKextGetLoadTagForBundleIdentifier( * OSKextRetainKextWithLoadTag@/link * with its own load tag * (the id field of its kmod_info_t struct), - * followed by + * followed by * @link //apple_ref/c/func/OSKextReleaseKextWithLoadTag * OSKextReleaseKextWithLoadTag@/link; * this will schedule the kext for unload on a separate thread. @@ -862,20 +862,20 @@ OSReturn OSKextUnloadKextWithLoadTag(uint32_t loadTag); * @field loadTag The kext's load tag. * @field flags Internal tracking flags. * @field reference_list who this refs (links on). - * + * * @discussion * The OSKextLoadedKextSummary structure contains a basic set of information * about the kext to facilitate kext debugging and panic debug log output. */ typedef struct _loaded_kext_summary { - char name[KMOD_MAX_NAME]; - uuid_t uuid; - uint64_t address; - uint64_t size; - uint64_t version; - uint32_t loadTag; - uint32_t flags; - uint64_t reference_list; + char name[KMOD_MAX_NAME]; + uuid_t uuid; + uint64_t address; + uint64_t size; + uint64_t version; + uint32_t loadTag; + uint32_t flags; + uint64_t reference_list; } OSKextLoadedKextSummary; /*! @@ -896,11 +896,11 @@ typedef struct _loaded_kext_summary { * that declares an executable and is not an interface to the kernel. */ typedef struct _loaded_kext_summary_header { - uint32_t version; - uint32_t entry_size; - uint32_t numSummaries; - uint32_t reserved; /* explicit alignment for gdb */ - OSKextLoadedKextSummary summaries[0]; + uint32_t version; + uint32_t entry_size; + uint32_t numSummaries; + uint32_t reserved; /* explicit alignment for gdb */ + OSKextLoadedKextSummary summaries[0]; } OSKextLoadedKextSummaryHeader; /*! @@ -933,7 +933,7 @@ void OSKextLoadedKextSummariesUpdated(void); extern const vm_allocation_site_t * OSKextGetAllocationSiteForCaller(uintptr_t address); extern uint32_t OSKextGetKmodIDForSite(const vm_allocation_site_t * site, - char * name, vm_size_t namelen); + char * name, vm_size_t namelen); extern void OSKextFreeSite(vm_allocation_site_t * site); #if CONFIG_IMAGEBOOT diff --git a/libkern/libkern/OSMalloc.h b/libkern/libkern/OSMalloc.h index 2d14ce94d..200b99265 100644 --- a/libkern/libkern/OSMalloc.h +++ b/libkern/libkern/OSMalloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef LIBKERN_OSMALLOC_h +#ifndef LIBKERN_OSMALLOC_h #define LIBKERN_OSMALLOC_h #include @@ -34,8 +34,8 @@ __BEGIN_DECLS #include -#ifdef MACH_KERNEL_PRIVATE -#include +#ifdef MACH_KERNEL_PRIVATE +#include #endif /*! @@ -58,17 +58,17 @@ __BEGIN_DECLS * None of the OSMalloc functions are safe to call * in a primary interrupt handler. */ - + #ifdef MACH_KERNEL_PRIVATE #define OSMT_MAX_NAME (64) typedef struct _OSMallocTag_ { - queue_chain_t OSMT_link; - uint32_t OSMT_refcnt; - uint32_t OSMT_state; - uint32_t OSMT_attr; - char OSMT_name[OSMT_MAX_NAME]; + queue_chain_t OSMT_link; + uint32_t OSMT_refcnt; + uint32_t OSMT_state; + uint32_t OSMT_attr; + char OSMT_name[OSMT_MAX_NAME]; } * OSMallocTag; #define OSMT_VALID_MASK 0xFFFF0000 @@ -155,8 +155,8 @@ typedef struct __OSMallocTag__ * OSMallocTag_t; * */ extern OSMallocTag OSMalloc_Tagalloc( - const char * name, - uint32_t flags); + const char * name, + uint32_t flags); /*! @@ -199,8 +199,8 @@ extern void OSMalloc_Tagfree(OSMallocTag tag); * otherwise it is wired. */ extern void * OSMalloc( - uint32_t size, - OSMallocTag tag) __attribute__((alloc_size(1))); + uint32_t size, + OSMallocTag tag) __attribute__((alloc_size(1))); /*! @@ -210,8 +210,8 @@ extern void * OSMalloc( * Equivalent to @link OSMalloc_noblock OSMalloc_noblock@/link. */ extern void * OSMalloc_nowait( - uint32_t size, - OSMallocTag tag) __attribute__((alloc_size(1))); + uint32_t size, + OSMallocTag tag) __attribute__((alloc_size(1))); /*! @@ -240,8 +240,8 @@ extern void * OSMalloc_nowait( * This function is guaranteed not to block. */ extern void * OSMalloc_noblock( - uint32_t size, - OSMallocTag tag) __attribute__((alloc_size(1))); + uint32_t size, + OSMallocTag tag) __attribute__((alloc_size(1))); /*! @@ -256,9 +256,9 @@ extern void * OSMalloc_noblock( * with which addr was originally allocated. */ extern void OSFree( - void * addr, - uint32_t size, - OSMallocTag tag); + void * addr, + uint32_t size, + OSMallocTag tag); #ifdef XNU_KERNEL_PRIVATE /*! @@ -270,9 +270,9 @@ extern void OSFree( * @param addr A pointer to the memory block allocated via OSMalloc. */ extern uint32_t OSMalloc_size( - void * addr); + void * addr); #endif /* XNU_KERNEL_PRIVATE */ __END_DECLS -#endif /* LIBKERN_OSMALLOC_h */ +#endif /* LIBKERN_OSMALLOC_h */ diff --git a/libkern/libkern/OSReturn.h b/libkern/libkern/OSReturn.h index 723fd6256..e15e7dac8 100644 --- a/libkern/libkern/OSReturn.h +++ b/libkern/libkern/OSReturn.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998 Apple Inc. All rights reserved. + * Copyright (c) 1998 Apple Inc. All rights reserved. * * HISTORY * */ - + /* * Core OSReturn values. */ @@ -69,7 +69,7 @@ __BEGIN_DECLS * in the Libkern C++ run-time typing information system * based on @link //apple_ref/doc/class/OSMetaClass OSMetaClass@/link; * you are unlikely to ever see them. - * + * */ #ifdef XNU_KERNEL_PRIVATE @@ -93,8 +93,8 @@ typedef kern_return_t OSReturn; #define sub_libkern_metaclass err_sub(1) #define sub_libkern_reserved err_sub(-1) -#define libkern_common_err(return) (sys_libkern|sub_libkern_common|(return)) -#define libkern_metaclass_err(return) (sys_libkern|sub_libkern_metaclass|(return)) +#define libkern_common_err(return ) (sys_libkern|sub_libkern_common|(return)) +#define libkern_metaclass_err(return ) (sys_libkern|sub_libkern_metaclass|(return)) /* See OSKextLib.h for these * #define sub_libkern_kext err_sub(2) @@ -112,7 +112,7 @@ typedef kern_return_t OSReturn; /*! * @define kOSReturnError * @abstract Unspecified Libkern error. - * Not equal to + * Not equal to * @link //apple_ref/c/econst/KERN_FAILURE * KERN_FAILURE@/link. */ diff --git a/libkern/libkern/OSRuntime.h b/libkern/libkern/OSRuntime.h index bf7232a1c..67b7567f9 100644 --- a/libkern/libkern/OSRuntime.h +++ b/libkern/libkern/OSRuntime.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libkern/libkern/OSSerializeBinary.h b/libkern/libkern/OSSerializeBinary.h index f6d5d3bfb..bab81dba9 100644 --- a/libkern/libkern/OSSerializeBinary.h +++ b/libkern/libkern/OSSerializeBinary.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,27 +22,26 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _OS_OSSERIALIZEBINARY_H #define _OS_OSSERIALIZEBINARY_H -enum -{ - kOSSerializeDictionary = 0x01000000U, - kOSSerializeArray = 0x02000000U, - kOSSerializeSet = 0x03000000U, - kOSSerializeNumber = 0x04000000U, - kOSSerializeSymbol = 0x08000000U, - kOSSerializeString = 0x09000000U, - kOSSerializeData = 0x0a000000U, - kOSSerializeBoolean = 0x0b000000U, - kOSSerializeObject = 0x0c000000U, - kOSSerializeTypeMask = 0x7F000000U, - kOSSerializeDataMask = 0x00FFFFFFU, - kOSSerializeEndCollecton = 0x80000000U, +enum{ + kOSSerializeDictionary = 0x01000000U, + kOSSerializeArray = 0x02000000U, + kOSSerializeSet = 0x03000000U, + kOSSerializeNumber = 0x04000000U, + kOSSerializeSymbol = 0x08000000U, + kOSSerializeString = 0x09000000U, + kOSSerializeData = 0x0a000000U, + kOSSerializeBoolean = 0x0b000000U, + kOSSerializeObject = 0x0c000000U, + kOSSerializeTypeMask = 0x7F000000U, + kOSSerializeDataMask = 0x00FFFFFFU, + kOSSerializeEndCollecton = 0x80000000U, }; #define kOSSerializeBinarySignature "\323\0\0" diff --git a/libkern/libkern/OSTypes.h b/libkern/libkern/OSTypes.h index ce87c6443..d04aa8930 100644 --- a/libkern/libkern/OSTypes.h +++ b/libkern/libkern/OSTypes.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,48 +22,48 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if !defined(KERNEL) #include -#endif /* !KERNEL */ +#endif /* !KERNEL */ #ifndef _OS_OSTYPES_H #define _OS_OSTYPES_H -#define OSTYPES_K64_REV 2 +#define OSTYPES_K64_REV 2 -typedef unsigned int UInt; +typedef unsigned int UInt; typedef signed int SInt; #if defined(KERNEL) -typedef unsigned char UInt8; -typedef unsigned short UInt16; +typedef unsigned char UInt8; +typedef unsigned short UInt16; #if __LP64__ typedef unsigned int UInt32; #else typedef unsigned long UInt32; #endif typedef unsigned long long UInt64; -#if defined(__BIG_ENDIAN__) -typedef struct __attribute__((deprecated)) UnsignedWide { - UInt32 hi; - UInt32 lo; -} UnsignedWide __attribute__((deprecated)); -#elif defined(__LITTLE_ENDIAN__) -typedef struct __attribute__((deprecated)) UnsignedWide { - UInt32 lo; - UInt32 hi; -} UnsignedWide __attribute__((deprecated)); +#if defined(__BIG_ENDIAN__) +typedef struct __attribute__((deprecated)) UnsignedWide { + UInt32 hi; + UInt32 lo; +} UnsignedWide __attribute__((deprecated)); +#elif defined(__LITTLE_ENDIAN__) +typedef struct __attribute__((deprecated)) UnsignedWide { + UInt32 lo; + UInt32 hi; +} UnsignedWide __attribute__((deprecated)); #else #error Unknown endianess. #endif -typedef signed char SInt8; -typedef signed short SInt16; +typedef signed char SInt8; +typedef signed short SInt16; #if __LP64__ typedef signed int SInt32; #else @@ -71,14 +71,14 @@ typedef signed long SInt32; #endif typedef signed long long SInt64; -typedef SInt32 OSStatus; +typedef SInt32 OSStatus; #ifndef ABSOLUTETIME_SCALAR_TYPE #define ABSOLUTETIME_SCALAR_TYPE 1 #endif -typedef UInt64 AbsoluteTime; +typedef UInt64 AbsoluteTime; -typedef UInt32 OptionBits __attribute__((deprecated)); +typedef UInt32 OptionBits __attribute__((deprecated)); #if defined(__LP64__) /* @@ -88,20 +88,20 @@ typedef UInt32 OptionBits __attribute__((deprecated)); */ #ifdef __cplusplus typedef bool Boolean; -#else /* !__cplusplus */ +#else /* !__cplusplus */ #if defined(__STDC_VERSION__) && ((__STDC_VERSION__ - 199901L) > 0L) /* only use this if we are sure we are using a c99 compiler */ typedef _Bool Boolean; -#else /* !c99 */ +#else /* !c99 */ /* Fall back to previous definition unless c99 */ typedef unsigned char Boolean; -#endif /* !c99 */ -#endif /* !__cplusplus */ -#else /* !__LP64__ */ +#endif /* !c99 */ +#endif /* !__cplusplus */ +#else /* !__LP64__ */ typedef unsigned char Boolean; -#endif /* !__LP64__ */ +#endif /* !__LP64__ */ -#endif /* KERNEL */ +#endif /* KERNEL */ #include diff --git a/libkern/libkern/_OSByteOrder.h b/libkern/libkern/_OSByteOrder.h index 5ffcba282..0b399b97b 100644 --- a/libkern/libkern/_OSByteOrder.h +++ b/libkern/libkern/_OSByteOrder.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,23 +42,23 @@ /* Macros for swapping constant values in the preprocessing stage. */ #define __DARWIN_OSSwapConstInt16(x) \ ((__uint16_t)((((__uint16_t)(x) & 0xff00) >> 8) | \ - (((__uint16_t)(x) & 0x00ff) << 8))) + (((__uint16_t)(x) & 0x00ff) << 8))) #define __DARWIN_OSSwapConstInt32(x) \ ((__uint32_t)((((__uint32_t)(x) & 0xff000000) >> 24) | \ - (((__uint32_t)(x) & 0x00ff0000) >> 8) | \ - (((__uint32_t)(x) & 0x0000ff00) << 8) | \ - (((__uint32_t)(x) & 0x000000ff) << 24))) + (((__uint32_t)(x) & 0x00ff0000) >> 8) | \ + (((__uint32_t)(x) & 0x0000ff00) << 8) | \ + (((__uint32_t)(x) & 0x000000ff) << 24))) #define __DARWIN_OSSwapConstInt64(x) \ ((__uint64_t)((((__uint64_t)(x) & 0xff00000000000000ULL) >> 56) | \ - (((__uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \ - (((__uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \ - (((__uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \ - (((__uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \ - (((__uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \ - (((__uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \ - (((__uint64_t)(x) & 0x00000000000000ffULL) << 56))) + (((__uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \ + (((__uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \ + (((__uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \ + (((__uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \ + (((__uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \ + (((__uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \ + (((__uint64_t)(x) & 0x00000000000000ffULL) << 56))) #if defined(__GNUC__) @@ -97,28 +97,28 @@ __DARWIN_OS_INLINE uint16_t _OSSwapInt16( - uint16_t data -) + uint16_t data + ) { - return __DARWIN_OSSwapConstInt16(data); + return __DARWIN_OSSwapConstInt16(data); } __DARWIN_OS_INLINE uint32_t _OSSwapInt32( - uint32_t data -) + uint32_t data + ) { - return __DARWIN_OSSwapConstInt32(data); + return __DARWIN_OSSwapConstInt32(data); } __DARWIN_OS_INLINE uint64_t _OSSwapInt64( - uint64_t data -) + uint64_t data + ) { - return __DARWIN_OSSwapConstInt64(data); + return __DARWIN_OSSwapConstInt64(data); } #endif diff --git a/libkern/libkern/arm/OSByteOrder.h b/libkern/libkern/arm/OSByteOrder.h index 81279a1f8..e35ea88b6 100644 --- a/libkern/libkern/arm/OSByteOrder.h +++ b/libkern/libkern/arm/OSByteOrder.h @@ -14,49 +14,49 @@ OS_INLINE uint16_t _OSSwapInt16( - uint16_t data -) + uint16_t data + ) { - /* Reduces to 'rev16' with clang */ - return (uint16_t)(data << 8 | data >> 8); + /* Reduces to 'rev16' with clang */ + return (uint16_t)(data << 8 | data >> 8); } OS_INLINE uint32_t _OSSwapInt32( - uint32_t data -) + uint32_t data + ) { #if defined(__llvm__) - data = __builtin_bswap32(data); + data = __builtin_bswap32(data); #else - /* This actually generates the best code */ - data = (((data ^ (data >> 16 | (data << 16))) & 0xFF00FFFF) >> 8) ^ (data >> 8 | data << 24); + /* This actually generates the best code */ + data = (((data ^ (data >> 16 | (data << 16))) & 0xFF00FFFF) >> 8) ^ (data >> 8 | data << 24); #endif - - return data; + + return data; } OS_INLINE uint64_t _OSSwapInt64( - uint64_t data -) + uint64_t data + ) { #if defined(__llvm__) - return __builtin_bswap64(data); + return __builtin_bswap64(data); #else - union { - uint64_t ull; - uint32_t ul[2]; - } u; - - /* This actually generates the best code */ - u.ul[0] = (uint32_t)(data >> 32); - u.ul[1] = (uint32_t)(data & 0xffffffff); - u.ul[0] = _OSSwapInt32(u.ul[0]); - u.ul[1] = _OSSwapInt32(u.ul[1]); - return u.ull; + union { + uint64_t ull; + uint32_t ul[2]; + } u; + + /* This actually generates the best code */ + u.ul[0] = (uint32_t)(data >> 32); + u.ul[1] = (uint32_t)(data & 0xffffffff); + u.ul[0] = _OSSwapInt32(u.ul[0]); + u.ul[1] = _OSSwapInt32(u.ul[1]); + return u.ull; #endif } @@ -65,48 +65,48 @@ _OSSwapInt64( OS_INLINE uint16_t OSReadSwapInt16( - const volatile void * base, - uintptr_t offset -) + const volatile void * base, + uintptr_t offset + ) { - uint16_t result; + uint16_t result; - result = *(volatile uint16_t *)((volatile uintptr_t)base + offset); - return _OSSwapInt16(result); + result = *(volatile uint16_t *)((volatile uintptr_t)base + offset); + return _OSSwapInt16(result); } OS_INLINE uint32_t OSReadSwapInt32( - const volatile void * base, - uintptr_t offset -) + const volatile void * base, + uintptr_t offset + ) { - uint32_t result; + uint32_t result; - result = *(volatile uint32_t *)((volatile uintptr_t)base + offset); - return _OSSwapInt32(result); + result = *(volatile uint32_t *)((volatile uintptr_t)base + offset); + return _OSSwapInt32(result); } OS_INLINE uint64_t OSReadSwapInt64( - const volatile void * base, - uintptr_t offset -) + const volatile void * base, + uintptr_t offset + ) { - volatile uint32_t * inp; - union ullc { - uint64_t ull; - uint32_t ul[2]; - } outv; - - inp = (volatile uint32_t *)((volatile uintptr_t)base + offset); - outv.ul[0] = inp[1]; - outv.ul[1] = inp[0]; - outv.ul[0] = _OSSwapInt32(outv.ul[0]); - outv.ul[1] = _OSSwapInt32(outv.ul[1]); - return outv.ull; + volatile uint32_t * inp; + union ullc { + uint64_t ull; + uint32_t ul[2]; + } outv; + + inp = (volatile uint32_t *)((volatile uintptr_t)base + offset); + outv.ul[0] = inp[1]; + outv.ul[1] = inp[0]; + outv.ul[0] = _OSSwapInt32(outv.ul[0]); + outv.ul[1] = _OSSwapInt32(outv.ul[1]); + return outv.ull; } /* Functions for byte reversed stores. */ @@ -114,34 +114,34 @@ OSReadSwapInt64( OS_INLINE void OSWriteSwapInt16( - volatile void * base, - uintptr_t offset, - uint16_t data -) + volatile void * base, + uintptr_t offset, + uint16_t data + ) { - *(volatile uint16_t *)((volatile uintptr_t)base + offset) = _OSSwapInt16(data); + *(volatile uint16_t *)((volatile uintptr_t)base + offset) = _OSSwapInt16(data); } OS_INLINE void OSWriteSwapInt32( - volatile void * base, - uintptr_t offset, - uint32_t data -) + volatile void * base, + uintptr_t offset, + uint32_t data + ) { - *(volatile uint32_t *)((volatile uintptr_t)base + offset) = _OSSwapInt32(data); + *(volatile uint32_t *)((volatile uintptr_t)base + offset) = _OSSwapInt32(data); } OS_INLINE void OSWriteSwapInt64( - volatile void * base, - uintptr_t offset, - uint64_t data -) + volatile void * base, + uintptr_t offset, + uint64_t data + ) { - *(volatile uint64_t *)((volatile uintptr_t)base + offset) = _OSSwapInt64(data); + *(volatile uint64_t *)((volatile uintptr_t)base + offset) = _OSSwapInt64(data); } #endif /* ! _OS_OSBYTEORDERARM_H */ diff --git a/libkern/libkern/c++/OSArray.h b/libkern/libkern/c++/OSArray.h index 39d70ec07..d094b0e08 100644 --- a/libkern/libkern/c++/OSArray.h +++ b/libkern/libkern/c++/OSArray.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOArray.h created by rsulack on Thu 11-Sep-1997 */ @@ -41,8 +41,8 @@ class OSSerialize; * @abstract * This header declares the OSArray collection class. */ - - + + /*! * @class OSArray * @@ -75,7 +75,7 @@ class OSSerialize; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSArray provides no concurrency protection; @@ -87,652 +87,652 @@ class OSSerialize; */ class OSArray : public OSCollection { - friend class OSSet; - friend class OSSerialize; + friend class OSSet; + friend class OSSerialize; - OSDeclareDefaultStructors(OSArray) + OSDeclareDefaultStructors(OSArray) #if APPLE_KEXT_ALIGN_CONTAINERS protected: - unsigned int count; - unsigned int capacity; - unsigned int capacityIncrement; - const OSMetaClassBase ** array; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; + const OSMetaClassBase ** array; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - const OSMetaClassBase ** array; - unsigned int count; - unsigned int capacity; - unsigned int capacityIncrement; + const OSMetaClassBase ** array; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; - struct ExpansionData { }; + struct ExpansionData { }; - /* Reserved for future use. (Internal use only) */ - ExpansionData * reserved; +/* Reserved for future use. (Internal use only) */ + ExpansionData * reserved; #endif /* APPLE_KEXT_ALIGN_CONTAINERS */ - /* OSCollectionIterator interfaces. */ - virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; - virtual bool initIterator(void * iterator) const APPLE_KEXT_OVERRIDE; - virtual bool getNextObjectForIterator(void * iterator, OSObject ** ret) const APPLE_KEXT_OVERRIDE; +/* OSCollectionIterator interfaces. */ + virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; + virtual bool initIterator(void * iterator) const APPLE_KEXT_OVERRIDE; + virtual bool getNextObjectForIterator(void * iterator, OSObject ** ret) const APPLE_KEXT_OVERRIDE; public: - /*! - * @function withCapacity - * - * @abstract - * Creates and initializes an empty OSArray. - * - * @param capacity The initial storage capacity of the array object. - * - * @result - * An empty instance of OSArray with a retain count of 1; - * NULL on failure. - * - * @discussion - * capacity must be nonzero. - * The new array will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, - * for which the initial capacity is a hard limit). - */ - static OSArray * withCapacity(unsigned int capacity); - - - /*! - * @function withObjects - * - * @abstract - * Creates and initializes an OSArray populated with objects provided. - * - * @param objects A C array of OSObject-derived instances. - * @param count The number of objects to be placed into the array. - * @param capacity The initial storage capacity of the array object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * An instance of OSArray containing the objects provided, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * objects must be non-NULL, and count must be nonzero. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new array will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, - * for which the initial capacity is a hard limit). - */ - static OSArray * withObjects( - const OSObject * objects[], - unsigned int count, - unsigned int capacity = 0); - - - /*! - * @function withArray - * - * @abstract - * Creates and initializes an OSArray populated with the contents of another array. - * - * @param array An OSArray whose contents will be stored - * in the new instance. - * @param capacity The initial storage capacity of the array object. - * If 0, the capacity is set to the number of objects - * in array; - * otherwise capacity must be - * greater than or equal to the number of objects - * in array. - * - * @result - * An instance of OSArray containing the objects of array, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * array must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new array will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, - * for which the initial capacity is a hard limit). - * - * The objects in array are retained - * for storage in the new OSArray, - * not copied. - */ - static OSArray * withArray( - const OSArray * array, - unsigned int capacity = 0); - - - /*! - * @function initWithCapacity - * - * @abstract - * Initializes a new instance of OSArray. - * - * @param capacity The initial storage capacity of the array object. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link //apple_ref/cpp/clm/OSArray/withCapacity/staticOSArray*\/(unsignedint) - * withCapacity@/link - * instead. - * - * capacity must be nonzero. - * The new array will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, - * for which the initial capacity is a hard limit). - */ - virtual bool initWithCapacity(unsigned int capacity); - - - /*! - * @function initWithObjects - * - * @abstract - * Initializes a new OSArray populated with objects provided. - * - * @param objects A C array of OSObject-derived objects. - * @param count The number of objects to be placed into the array. - * @param capacity The initial storage capacity of the array object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSArray/withObjects/staticOSArray*\/(constOSObject*,unsignedint,unsignedint) - * withObjects@/link - * instead. - * - * objects must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new array will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, - * for which the initial capacity is a hard limit). - */ - virtual bool initWithObjects( - const OSObject * objects[], - unsigned int count, - unsigned int capacity = 0); - - /*! - * @function initWithArray - * - * @abstract - * Initializes a new OSArray populated with the contents of another array. - * - * @param anArray The array whose contents will be placed - * in the new instance. - * @param capacity The initial storage capacity of the array object. - * If 0, the capacity is set to the number of objects - * in array; - * otherwise capacity must be - * greater than or equal to the number of objects - * in array. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link //apple_ref/cpp/clm/OSArray/withArray/staticOSArray*\/(constOSArray*,unsignedint) - * withArray@/link instead. - * - * array must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new array will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, - * for which the initial capacity is a hard limit). - * - * The objects in array are retained for storage in the new OSArray, - * not copied. - */ - virtual bool initWithArray( - const OSArray * anArray, - unsigned int capacity = 0); - - - /*! - * @function free - * - * @abstract - * Deallocates or releases any resources - * used by the OSArray instance. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCount - * - * @abstract - * Returns the current number of objects within the array. - * - * @result - * The current number of objects within the array. - */ - virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacity - * - * @abstract - * Returns the number of objects the array can store - * without reallocating. - * - * @result - * The number objects the array can store - * without reallocating. - * - * @discussion - * OSArray objects grow when full to accommodate additional objects. - * See - * @link - * //apple_ref/cpp/instm/OSArray/getCapacity/virtualunsignedint/() - * getCapacityIncrement@/link - * and - * @link - * //apple_ref/cpp/instm/OSArray/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity.@/link - */ - virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacityIncrement - * - * @abstract - * Returns the storage increment of the array. - * - * @result - * The storage increment of the array. - * - * @discussion - * An OSArray allocates storage for objects in multiples - * of the capacity increment. - */ - virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setCapacityIncrement - * - * @abstract - * Sets the storage increment of the array. - * - * @result - * The new storage increment of the array, - * which may be different from the number requested. - * - * @discussion - * An OSArray allocates storage for objects in multiples - * of the capacity increment. - * Calling this function does not immediately reallocate storage. - */ - virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; - - - /*! - * @function ensureCapacity - * - * @abstract - * Ensures the array has enough space - * to store the requested number of objects. - * - * @param newCapacity The total number of objects the array - * should be able to store. - * - * @result - * The new capacity of the array, - * which may be different from the number requested - * (if smaller, reallocation of storage failed). - * - * @discussion - * This function immediately resizes the array, if necessary, - * to accommodate at least newCapacity objects. - * If newCapacity is not greater than the current capacity, - * or if an allocation error occurs, the original capacity is returned. - * - * There is no way to reduce the capacity of an OSArray. - */ - virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; - - - /*! - * @function flushCollection - * - * @abstract - * Removes and releases all objects within the array. - * - * @discussion - * The array's capacity (and therefore direct memory consumption) - * is not reduced by this function. - */ - virtual void flushCollection() APPLE_KEXT_OVERRIDE; - - - /*! - * @function setObject - * - * @abstract - * Appends an object onto the end of the array, - * increasing storage if necessary. - * - * @param anObject The object to add to the OSArray instance. - * - * @result - * true if the addition of anObject was successful, - * false if not. - * - * @discussion - * The array adds storage to accomodate the new object, if necessary. - * If successfully added, the object is retained. - */ - virtual bool setObject(const OSMetaClassBase * anObject); - - - /*! - * @function setObject - * - * @abstract - * Inserts or appends an object into the array - * at a particular index. - * - * @param index The index in the array at which to insert the object. - * Must be less than or equal to the array's count. - * @param anObject The object to add to the array. - * - * @result - * true if the addition of anObject - * was successful, false if not. - * - * @discussion - * This function moves existing objects from index on, - * in order to accommodate the new object; - * it does not replace an existing object at index. See - * @link - * //apple_ref/cpp/instm/OSArray/replaceObject/virtualvoid/(unsignedint,constOSMetaClassBase*) - * replaceObject@/link. - * If successfully added, the object is retained. - * - * The array adds storage to accomodate the new object, if necessary. - * Note, however, that this function does not allow for arbirtrary growth - * of an array by specifying an index larger than the current count. - * If you need to immediately grow an array by an arbitrary amount, - * use - * @link - * //apple_ref/cpp/instm/OSArray/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link. - */ - virtual bool setObject( - unsigned int index, - const OSMetaClassBase * anObject); - - - /*! - * @function merge - * - * @abstract - * Appends the contents of an array onto the receiving array. - * - * @param otherArray The array whose contents will be appended - * to the receiving array. - * @result - * true if merging was successful, false otherwise. - * - * @discussion - * This function merely appends one array onto another. - * Duplicates are not avoided and no sorting is performed. - * Objects successfully added to the receiver are retained. - */ - virtual bool merge(const OSArray * otherArray); - - - /*! - * @function replaceObject - * - * @abstract - * Replaces an object in an array at a given index. - * - * @param index The index of the object to be replaced. - * Must be less than the array's count. - * @param anObject The object to be placed into the array. - * - * @discussion - * The original object is released and the new object is retained. - */ - virtual void replaceObject( - unsigned int index, - const OSMetaClassBase * anObject); - - - /*! - * @function removeObject - * - * @abstract - * Removes an object from the array. - * - * @param index The index of the object to be removed. - * - * @discussion - * This function moves existing objects to fill the vacated index - * so that there are no gaps. - * The object removed is released. - */ - virtual void removeObject(unsigned int index); - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSArray objects. - * - * @param anArray The array object being compared against the receiver. - * - * @result - * true if the two arrays are equivalent, - *false otherwise. - * - * @discussion - * Two OSArray objects are considered equal if they have same count - * and if the objects at corresponding indices compare as equal using - * @link - * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) - * isEqualTo@/link. - */ - virtual bool isEqualTo(const OSArray * anArray) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSArray to an arbitrary object. - * - * @param anObject The object to be compared against the receiver. - * - * @result - * true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * An OSArray is considered equal to another object - * if that object is derived from OSArray - * and contains the same or equivalent objects. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getObject - * - * @abstract - * Return the object stored at a given index. - * - * @param index The index of the object to be returned to caller. - * - * @result - * The object stored at index, - * or NULL if index lies past the end of the array. - * - * @discussion - * The returned object will be released if removed from the array; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getObject(unsigned int index) const; - - - /*! - * @function getLastObject - * - * @abstract - * Returns the last object in the array. - * - * @result - * The last object in the array, - * or NULL if the array is empty. - * - * @discussion - * The returned object will be released if removed from the array; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getLastObject() const; - - - /*! - * @function getNextIndexOfObject - * - * @abstract - * Scans the array for the next instance of a specific object - * at or beyond a given index. - * - * @param anObject The object to scan for. - * @param index The index at which to begin the scan. - * - * @result - * The next index of anObject in the array or (-1) - * if none is found. - * - * @discussion - * This function uses pointer equivalence, and does not use - * @link - * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) - * isEqualTo@/link. - */ - virtual unsigned int getNextIndexOfObject( - const OSMetaClassBase * anObject, - unsigned int index) const; - - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setOptions - * - * @abstract - * Recursively sets option bits in an array - * and all child collections. - * - * @param options A bitfield whose values turn the options on (1) or off (0). - * @param mask A mask indicating which bits - * in options to change. - * Pass 0 to get the whole current options bitfield - * without changing any settings. - * @param context Unused. - * - * @result - * The options bitfield as it was before the set operation. - * - * @discussion - * Kernel extensions should not call this function. - * - * Child collections' options are changed only if the receiving array's - * options actually change. - */ - virtual unsigned setOptions( - unsigned options, - unsigned mask, - void * context = 0) APPLE_KEXT_OVERRIDE; - - - /*! - * @function copyCollection - * - * @abstract - * Creates a deep copy of an array and its child collections. - * - * @param cycleDict A dictionary of all of the collections - * that have been copied so far, - * which is used to track circular references. - * To start the copy at the top level, - * pass NULL. - * - * @result - * The newly copied array, with a retain count of 1, - * or NULL if there is insufficient memory to do the copy. - * - * @discussion - * The receiving array, and any collections it contains, - * recursively, are copied. - * Objects that are not derived from OSCollection are retained - * rather than copied. - */ - OSCollection * copyCollection(OSDictionary * cycleDict = 0) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(OSArray, 0); - OSMetaClassDeclareReservedUnused(OSArray, 1); - OSMetaClassDeclareReservedUnused(OSArray, 2); - OSMetaClassDeclareReservedUnused(OSArray, 3); - OSMetaClassDeclareReservedUnused(OSArray, 4); - OSMetaClassDeclareReservedUnused(OSArray, 5); - OSMetaClassDeclareReservedUnused(OSArray, 6); - OSMetaClassDeclareReservedUnused(OSArray, 7); +/*! + * @function withCapacity + * + * @abstract + * Creates and initializes an empty OSArray. + * + * @param capacity The initial storage capacity of the array object. + * + * @result + * An empty instance of OSArray with a retain count of 1; + * NULL on failure. + * + * @discussion + * capacity must be nonzero. + * The new array will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, + * for which the initial capacity is a hard limit). + */ + static OSArray * withCapacity(unsigned int capacity); + + +/*! + * @function withObjects + * + * @abstract + * Creates and initializes an OSArray populated with objects provided. + * + * @param objects A C array of OSObject-derived instances. + * @param count The number of objects to be placed into the array. + * @param capacity The initial storage capacity of the array object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * An instance of OSArray containing the objects provided, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * objects must be non-NULL, and count must be nonzero. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new array will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, + * for which the initial capacity is a hard limit). + */ + static OSArray * withObjects( + const OSObject * objects[], + unsigned int count, + unsigned int capacity = 0); + + +/*! + * @function withArray + * + * @abstract + * Creates and initializes an OSArray populated with the contents of another array. + * + * @param array An OSArray whose contents will be stored + * in the new instance. + * @param capacity The initial storage capacity of the array object. + * If 0, the capacity is set to the number of objects + * in array; + * otherwise capacity must be + * greater than or equal to the number of objects + * in array. + * + * @result + * An instance of OSArray containing the objects of array, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * array must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new array will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, + * for which the initial capacity is a hard limit). + * + * The objects in array are retained + * for storage in the new OSArray, + * not copied. + */ + static OSArray * withArray( + const OSArray * array, + unsigned int capacity = 0); + + +/*! + * @function initWithCapacity + * + * @abstract + * Initializes a new instance of OSArray. + * + * @param capacity The initial storage capacity of the array object. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link //apple_ref/cpp/clm/OSArray/withCapacity/staticOSArray*\/(unsignedint) + * withCapacity@/link + * instead. + * + * capacity must be nonzero. + * The new array will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, + * for which the initial capacity is a hard limit). + */ + virtual bool initWithCapacity(unsigned int capacity); + + +/*! + * @function initWithObjects + * + * @abstract + * Initializes a new OSArray populated with objects provided. + * + * @param objects A C array of OSObject-derived objects. + * @param count The number of objects to be placed into the array. + * @param capacity The initial storage capacity of the array object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSArray/withObjects/staticOSArray*\/(constOSObject*,unsignedint,unsignedint) + * withObjects@/link + * instead. + * + * objects must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new array will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, + * for which the initial capacity is a hard limit). + */ + virtual bool initWithObjects( + const OSObject * objects[], + unsigned int count, + unsigned int capacity = 0); + +/*! + * @function initWithArray + * + * @abstract + * Initializes a new OSArray populated with the contents of another array. + * + * @param anArray The array whose contents will be placed + * in the new instance. + * @param capacity The initial storage capacity of the array object. + * If 0, the capacity is set to the number of objects + * in array; + * otherwise capacity must be + * greater than or equal to the number of objects + * in array. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link //apple_ref/cpp/clm/OSArray/withArray/staticOSArray*\/(constOSArray*,unsignedint) + * withArray@/link instead. + * + * array must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new array will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001502 CFMutableArray@/link, + * for which the initial capacity is a hard limit). + * + * The objects in array are retained for storage in the new OSArray, + * not copied. + */ + virtual bool initWithArray( + const OSArray * anArray, + unsigned int capacity = 0); + + +/*! + * @function free + * + * @abstract + * Deallocates or releases any resources + * used by the OSArray instance. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCount + * + * @abstract + * Returns the current number of objects within the array. + * + * @result + * The current number of objects within the array. + */ + virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacity + * + * @abstract + * Returns the number of objects the array can store + * without reallocating. + * + * @result + * The number objects the array can store + * without reallocating. + * + * @discussion + * OSArray objects grow when full to accommodate additional objects. + * See + * @link + * //apple_ref/cpp/instm/OSArray/getCapacity/virtualunsignedint/() + * getCapacityIncrement@/link + * and + * @link + * //apple_ref/cpp/instm/OSArray/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity.@/link + */ + virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacityIncrement + * + * @abstract + * Returns the storage increment of the array. + * + * @result + * The storage increment of the array. + * + * @discussion + * An OSArray allocates storage for objects in multiples + * of the capacity increment. + */ + virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setCapacityIncrement + * + * @abstract + * Sets the storage increment of the array. + * + * @result + * The new storage increment of the array, + * which may be different from the number requested. + * + * @discussion + * An OSArray allocates storage for objects in multiples + * of the capacity increment. + * Calling this function does not immediately reallocate storage. + */ + virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; + + +/*! + * @function ensureCapacity + * + * @abstract + * Ensures the array has enough space + * to store the requested number of objects. + * + * @param newCapacity The total number of objects the array + * should be able to store. + * + * @result + * The new capacity of the array, + * which may be different from the number requested + * (if smaller, reallocation of storage failed). + * + * @discussion + * This function immediately resizes the array, if necessary, + * to accommodate at least newCapacity objects. + * If newCapacity is not greater than the current capacity, + * or if an allocation error occurs, the original capacity is returned. + * + * There is no way to reduce the capacity of an OSArray. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; + + +/*! + * @function flushCollection + * + * @abstract + * Removes and releases all objects within the array. + * + * @discussion + * The array's capacity (and therefore direct memory consumption) + * is not reduced by this function. + */ + virtual void flushCollection() APPLE_KEXT_OVERRIDE; + + +/*! + * @function setObject + * + * @abstract + * Appends an object onto the end of the array, + * increasing storage if necessary. + * + * @param anObject The object to add to the OSArray instance. + * + * @result + * true if the addition of anObject was successful, + * false if not. + * + * @discussion + * The array adds storage to accomodate the new object, if necessary. + * If successfully added, the object is retained. + */ + virtual bool setObject(const OSMetaClassBase * anObject); + + +/*! + * @function setObject + * + * @abstract + * Inserts or appends an object into the array + * at a particular index. + * + * @param index The index in the array at which to insert the object. + * Must be less than or equal to the array's count. + * @param anObject The object to add to the array. + * + * @result + * true if the addition of anObject + * was successful, false if not. + * + * @discussion + * This function moves existing objects from index on, + * in order to accommodate the new object; + * it does not replace an existing object at index. See + * @link + * //apple_ref/cpp/instm/OSArray/replaceObject/virtualvoid/(unsignedint,constOSMetaClassBase*) + * replaceObject@/link. + * If successfully added, the object is retained. + * + * The array adds storage to accomodate the new object, if necessary. + * Note, however, that this function does not allow for arbirtrary growth + * of an array by specifying an index larger than the current count. + * If you need to immediately grow an array by an arbitrary amount, + * use + * @link + * //apple_ref/cpp/instm/OSArray/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link. + */ + virtual bool setObject( + unsigned int index, + const OSMetaClassBase * anObject); + + +/*! + * @function merge + * + * @abstract + * Appends the contents of an array onto the receiving array. + * + * @param otherArray The array whose contents will be appended + * to the receiving array. + * @result + * true if merging was successful, false otherwise. + * + * @discussion + * This function merely appends one array onto another. + * Duplicates are not avoided and no sorting is performed. + * Objects successfully added to the receiver are retained. + */ + virtual bool merge(const OSArray * otherArray); + + +/*! + * @function replaceObject + * + * @abstract + * Replaces an object in an array at a given index. + * + * @param index The index of the object to be replaced. + * Must be less than the array's count. + * @param anObject The object to be placed into the array. + * + * @discussion + * The original object is released and the new object is retained. + */ + virtual void replaceObject( + unsigned int index, + const OSMetaClassBase * anObject); + + +/*! + * @function removeObject + * + * @abstract + * Removes an object from the array. + * + * @param index The index of the object to be removed. + * + * @discussion + * This function moves existing objects to fill the vacated index + * so that there are no gaps. + * The object removed is released. + */ + virtual void removeObject(unsigned int index); + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSArray objects. + * + * @param anArray The array object being compared against the receiver. + * + * @result + * true if the two arrays are equivalent, + *false otherwise. + * + * @discussion + * Two OSArray objects are considered equal if they have same count + * and if the objects at corresponding indices compare as equal using + * @link + * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) + * isEqualTo@/link. + */ + virtual bool isEqualTo(const OSArray * anArray) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSArray to an arbitrary object. + * + * @param anObject The object to be compared against the receiver. + * + * @result + * true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * An OSArray is considered equal to another object + * if that object is derived from OSArray + * and contains the same or equivalent objects. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getObject + * + * @abstract + * Return the object stored at a given index. + * + * @param index The index of the object to be returned to caller. + * + * @result + * The object stored at index, + * or NULL if index lies past the end of the array. + * + * @discussion + * The returned object will be released if removed from the array; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getObject(unsigned int index) const; + + +/*! + * @function getLastObject + * + * @abstract + * Returns the last object in the array. + * + * @result + * The last object in the array, + * or NULL if the array is empty. + * + * @discussion + * The returned object will be released if removed from the array; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getLastObject() const; + + +/*! + * @function getNextIndexOfObject + * + * @abstract + * Scans the array for the next instance of a specific object + * at or beyond a given index. + * + * @param anObject The object to scan for. + * @param index The index at which to begin the scan. + * + * @result + * The next index of anObject in the array or (-1) + * if none is found. + * + * @discussion + * This function uses pointer equivalence, and does not use + * @link + * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) + * isEqualTo@/link. + */ + virtual unsigned int getNextIndexOfObject( + const OSMetaClassBase * anObject, + unsigned int index) const; + +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setOptions + * + * @abstract + * Recursively sets option bits in an array + * and all child collections. + * + * @param options A bitfield whose values turn the options on (1) or off (0). + * @param mask A mask indicating which bits + * in options to change. + * Pass 0 to get the whole current options bitfield + * without changing any settings. + * @param context Unused. + * + * @result + * The options bitfield as it was before the set operation. + * + * @discussion + * Kernel extensions should not call this function. + * + * Child collections' options are changed only if the receiving array's + * options actually change. + */ + virtual unsigned setOptions( + unsigned options, + unsigned mask, + void * context = 0) APPLE_KEXT_OVERRIDE; + + +/*! + * @function copyCollection + * + * @abstract + * Creates a deep copy of an array and its child collections. + * + * @param cycleDict A dictionary of all of the collections + * that have been copied so far, + * which is used to track circular references. + * To start the copy at the top level, + * pass NULL. + * + * @result + * The newly copied array, with a retain count of 1, + * or NULL if there is insufficient memory to do the copy. + * + * @discussion + * The receiving array, and any collections it contains, + * recursively, are copied. + * Objects that are not derived from OSCollection are retained + * rather than copied. + */ + OSCollection * copyCollection(OSDictionary * cycleDict = 0) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(OSArray, 0); + OSMetaClassDeclareReservedUnused(OSArray, 1); + OSMetaClassDeclareReservedUnused(OSArray, 2); + OSMetaClassDeclareReservedUnused(OSArray, 3); + OSMetaClassDeclareReservedUnused(OSArray, 4); + OSMetaClassDeclareReservedUnused(OSArray, 5); + OSMetaClassDeclareReservedUnused(OSArray, 6); + OSMetaClassDeclareReservedUnused(OSArray, 7); }; #endif /* !_OS_OSARRAY_H */ diff --git a/libkern/libkern/c++/OSBoolean.h b/libkern/libkern/c++/OSBoolean.h index 8821a1864..207bb4da8 100644 --- a/libkern/libkern/c++/OSBoolean.h +++ b/libkern/libkern/c++/OSBoolean.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSBoolean.cpp created by rsulack on Tue Oct 12 1999 */ @@ -40,7 +40,7 @@ class OSString; * @abstract * This header declares the OSBoolean container class. */ - + /*! * @class OSBoolean @@ -62,179 +62,179 @@ class OSString; */ class OSBoolean : public OSObject { - OSDeclareDefaultStructors(OSBoolean) - friend class OSSerialize; + OSDeclareDefaultStructors(OSBoolean) + friend class OSSerialize; protected: - bool value; + bool value; - /*! - * @function taggedRelease - * - * @abstract - * Overrides the reference counting mechanism - * for the shared global instances. - * - * @param tag Unused. - * @param when Unused. - */ - virtual void taggedRelease( - const void * tag, - const int when) const APPLE_KEXT_OVERRIDE; +/*! + * @function taggedRelease + * + * @abstract + * Overrides the reference counting mechanism + * for the shared global instances. + * + * @param tag Unused. + * @param when Unused. + */ + virtual void taggedRelease( + const void * tag, + const int when) const APPLE_KEXT_OVERRIDE; public: - static void initialize(); + static void initialize(); - /*! - * @function withBoolean - * - * @abstract - * Returns one of the global instances of OSBoolean. - * - * @param value A boolean value. - * - * @result - * The global instance of OSBoolean with the boolean value. - * - * @discussion - * This function actually returns either - * @link kOSBooleanTrue kOSBooleanTrue@/link or - * @link kOSBooleanFalse kOSBooleanFalse@/link, - * so that you can always use pointer comparison with OSBoolean objects. - */ - static OSBoolean * withBoolean(bool value); +/*! + * @function withBoolean + * + * @abstract + * Returns one of the global instances of OSBoolean. + * + * @param value A boolean value. + * + * @result + * The global instance of OSBoolean with the boolean value. + * + * @discussion + * This function actually returns either + * @link kOSBooleanTrue kOSBooleanTrue@/link or + * @link kOSBooleanFalse kOSBooleanFalse@/link, + * so that you can always use pointer comparison with OSBoolean objects. + */ + static OSBoolean * withBoolean(bool value); - /*! - * @function free - * - * @abstract - * Overridden to prevent deallocation of the shared global instances. - * - * @discussion - * This function should never be called. - */ - virtual void free() APPLE_KEXT_OVERRIDE; +/*! + * @function free + * + * @abstract + * Overridden to prevent deallocation of the shared global instances. + * + * @discussion + * This function should never be called. + */ + virtual void free() APPLE_KEXT_OVERRIDE; - /*! - * @function taggedRetain - * - * @abstract - * Overrides the reference counting mechanism for the shared global instances. - * - * @param tag Unused. - */ - virtual void taggedRetain(const void * tag) const APPLE_KEXT_OVERRIDE; +/*! + * @function taggedRetain + * + * @abstract + * Overrides the reference counting mechanism for the shared global instances. + * + * @param tag Unused. + */ + virtual void taggedRetain(const void * tag) const APPLE_KEXT_OVERRIDE; - /*! - * @function isTrue - * - * @abstract - * Checks whether the OSBoolean object - * represents a true bool value. - * - * @result - * true if the OSBoolean object is true, - * false otherwise. - * - * @discussion - * You can also use == against - * @link kOSBooleanTrue kOSBooleanTrue@/link. - */ - virtual bool isTrue() const; +/*! + * @function isTrue + * + * @abstract + * Checks whether the OSBoolean object + * represents a true bool value. + * + * @result + * true if the OSBoolean object is true, + * false otherwise. + * + * @discussion + * You can also use == against + * @link kOSBooleanTrue kOSBooleanTrue@/link. + */ + virtual bool isTrue() const; - /*! - * @function isFalse - * - * @abstract - * Checks whether the OSBoolean object - * represents a false bool value. - * - * @result - * true if the OSBoolean object is false, - * true otherwise. - * - * @discussion - * You can also use == against - * @link kOSBooleanFalse kOSBooleanFalse@/link. - */ - virtual bool isFalse() const; +/*! + * @function isFalse + * + * @abstract + * Checks whether the OSBoolean object + * represents a false bool value. + * + * @result + * true if the OSBoolean object is false, + * true otherwise. + * + * @discussion + * You can also use == against + * @link kOSBooleanFalse kOSBooleanFalse@/link. + */ + virtual bool isFalse() const; - /*! - * @function getValue - * - * @abstract - * Returns the C++ bool value for the OSBoolean object. - * - * @result - * Returns the C++ bool value of the OSBoolean object. - */ - virtual bool getValue() const; +/*! + * @function getValue + * + * @abstract + * Returns the C++ bool value for the OSBoolean object. + * + * @result + * Returns the C++ bool value of the OSBoolean object. + */ + virtual bool getValue() const; - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSBoolean objects. - * - * @param aBoolean The OSBoolean to be compared against the receiver. - * - * @result - * true if the OSBoolean objects are equal, - * false if not. - * - * @discussion - * Two OSBoolean objects are considered equal - * if they are the same exact object (pointer equality). - */ - virtual bool isEqualTo(const OSBoolean * aBoolean) const; +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSBoolean objects. + * + * @param aBoolean The OSBoolean to be compared against the receiver. + * + * @result + * true if the OSBoolean objects are equal, + * false if not. + * + * @discussion + * Two OSBoolean objects are considered equal + * if they are the same exact object (pointer equality). + */ + virtual bool isEqualTo(const OSBoolean * aBoolean) const; - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality an OSBoolean to an arbitrary object. - * - * @param anObject An object to be compared against the receiver. - * - * @result - * true if the objects are equal, false if not. - * - * @discussion - * An OSBoolean is considered equal to another object - * if that object is derived from OSBoolean - * and represents the same C++ bool value. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality an OSBoolean to an arbitrary object. + * + * @param anObject An object to be compared against the receiver. + * + * @result + * true if the objects are equal, false if not. + * + * @discussion + * An OSBoolean is considered equal to another object + * if that object is derived from OSBoolean + * and represents the same C++ bool value. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - OSMetaClassDeclareReservedUnused(OSBoolean, 0); - OSMetaClassDeclareReservedUnused(OSBoolean, 1); - OSMetaClassDeclareReservedUnused(OSBoolean, 2); - OSMetaClassDeclareReservedUnused(OSBoolean, 3); - OSMetaClassDeclareReservedUnused(OSBoolean, 4); - OSMetaClassDeclareReservedUnused(OSBoolean, 5); - OSMetaClassDeclareReservedUnused(OSBoolean, 6); - OSMetaClassDeclareReservedUnused(OSBoolean, 7); + OSMetaClassDeclareReservedUnused(OSBoolean, 0); + OSMetaClassDeclareReservedUnused(OSBoolean, 1); + OSMetaClassDeclareReservedUnused(OSBoolean, 2); + OSMetaClassDeclareReservedUnused(OSBoolean, 3); + OSMetaClassDeclareReservedUnused(OSBoolean, 4); + OSMetaClassDeclareReservedUnused(OSBoolean, 5); + OSMetaClassDeclareReservedUnused(OSBoolean, 6); + OSMetaClassDeclareReservedUnused(OSBoolean, 7); }; /*! @@ -248,7 +248,7 @@ public: * This object does not need to be retained or released (but it can be). * Comparisons of the form * booleanObject == kOSBooleanTrue - * are acceptable and are equivalent to + * are acceptable and are equivalent to * booleanObject->getValue() == true. */ extern OSBoolean * const & kOSBooleanTrue; diff --git a/libkern/libkern/c++/OSCPPDebug.h b/libkern/libkern/c++/OSCPPDebug.h index f532c7726..11e45a375 100644 --- a/libkern/libkern/c++/OSCPPDebug.h +++ b/libkern/libkern/c++/OSCPPDebug.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libkern/libkern/c++/OSCollection.h b/libkern/libkern/c++/OSCollection.h index f162bbdc3..f6c7e01b7 100644 --- a/libkern/libkern/c++/OSCollection.h +++ b/libkern/libkern/c++/OSCollection.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOCollection.h created by gvdl on Thu 1998-10-22 */ @@ -41,8 +41,8 @@ class OSDictionary; * @abstract * This header declares the OSDictionary collection class. */ - - + + /*! * @class OSCollection * @@ -67,7 +67,7 @@ class OSDictionary; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSCollection provides no concurrency protection; @@ -79,424 +79,422 @@ class OSDictionary; */ class OSCollection : public OSObject { - friend class OSCollectionIterator; + friend class OSCollectionIterator; - OSDeclareAbstractStructors(OSCollection); + OSDeclareAbstractStructors(OSCollection); + + struct ExpansionData { }; - struct ExpansionData { }; - protected: - /* Not to be included in headerdoc. - * - * @var updateStamp - * - * @abstract - * A counter for changes to the collection object. - * - * @discussion - * The update stamp is used primarily to track validity - * of iteration contexts. - * See @link //apple_ref/cpp/class/OSIterator OSIterator@/link and - * @link //apple_ref/cpp/class/OSCollectionIterator OSCollectionIterator@/link - * for more information. - */ - unsigned int updateStamp; +/* Not to be included in headerdoc. + * + * @var updateStamp + * + * @abstract + * A counter for changes to the collection object. + * + * @discussion + * The update stamp is used primarily to track validity + * of iteration contexts. + * See @link //apple_ref/cpp/class/OSIterator OSIterator@/link and + * @link //apple_ref/cpp/class/OSCollectionIterator OSCollectionIterator@/link + * for more information. + */ + unsigned int updateStamp; #ifdef XNU_KERNEL_PRIVATE protected: #else private: #endif /* XNU_KERNEL_PRIVATE */ - /* Reserved for future use. (Internal use only) */ - // ExpansionData * reserved; - unsigned int fOptions; +/* Reserved for future use. (Internal use only) */ +// ExpansionData * reserved; + unsigned int fOptions; protected: - // Member functions used by the OSCollectionIterator class. - - - /*! - * @function iteratorSize - * - * @abstract - * Returns the size in bytes of a subclass's iteration context. - * - * @result - * The size in bytes of the iteration context - * needed by the subclass of OSCollection. - * - * @discussion - * This pure virtual member function, which subclasses must implement, - * is called by an - * @link //apple_ref/doc/class/OSCollectionIterator OSCollectionIterator@/link - * object so that it can allocate the storage needed - * for the iteration context. - * An iteration context contains the data necessary - * to iterate through the collection. - */ - virtual unsigned int iteratorSize() const = 0; - - - /*! - * @function initIterator - * - * @abstract - * Initializes the iteration context for a collection subclass. - * - * @param iterationContext The iteration context to initialize. - * - * @result - * true if initialization was successful, - * false otherwise. - * - * @discussion - * This pure virtual member function, which subclasses must implement, - * is called by an - * @link //apple_ref/doc/class/OSCollectionIterator OSCollectionIterator@/link - * object to initialize an iteration context for a collection. - * The collection object should interpret iterationContext appropriately - * and initialize its contents to begin an iteration. - * - * This function can be called repeatedly for a given context, - * whenever the iterator is reset via the - * @link //apple_ref/cpp/instm/OSCollectionIterator/reset/virtualvoid/() - * OSCollectionIterator::reset@/link - * function. - */ - virtual bool initIterator(void * iterationContext) const = 0; - - - /*! - * @function getNextObjectForIterator - * - * @abstract - * Returns the next member of a collection. - * - * @param iterationContext The iteration context. - * @param nextObject The object returned by reference to the caller. - * - * @result - * true if an object was found, false otherwise. - * - * @discussion - * This pure virtual member function, which subclasses must implement, - * is called by an - * @link //apple_ref/doc/class/OSCollectionIterator OSCollectionIterator@/link - * to get the next object for a given iteration context. - * The collection object should interpret - * iterationContext appropriately, - * advance the context from its current object - * to the next object (if it exists), - * return that object by reference in nextObject, - * and return true for the function call. - * If there is no next object, the collection object must return false. - * - * For associative collections, the object returned should be the key - * used to access its associated value, and not the value itself. - */ - virtual bool getNextObjectForIterator( - void * iterationContext, - OSObject ** nextObject) const = 0; - - - /*! - * @function init - * - * @abstract - * Initializes the OSCollection object. - * - * @result - * true on success, false otherwise. - * - * @discussion - * This function is used to initialize state - * within a newly created OSCollection object. - */ - virtual bool init() APPLE_KEXT_OVERRIDE; +// Member functions used by the OSCollectionIterator class. + + +/*! + * @function iteratorSize + * + * @abstract + * Returns the size in bytes of a subclass's iteration context. + * + * @result + * The size in bytes of the iteration context + * needed by the subclass of OSCollection. + * + * @discussion + * This pure virtual member function, which subclasses must implement, + * is called by an + * @link //apple_ref/doc/class/OSCollectionIterator OSCollectionIterator@/link + * object so that it can allocate the storage needed + * for the iteration context. + * An iteration context contains the data necessary + * to iterate through the collection. + */ + virtual unsigned int iteratorSize() const = 0; + + +/*! + * @function initIterator + * + * @abstract + * Initializes the iteration context for a collection subclass. + * + * @param iterationContext The iteration context to initialize. + * + * @result + * true if initialization was successful, + * false otherwise. + * + * @discussion + * This pure virtual member function, which subclasses must implement, + * is called by an + * @link //apple_ref/doc/class/OSCollectionIterator OSCollectionIterator@/link + * object to initialize an iteration context for a collection. + * The collection object should interpret iterationContext appropriately + * and initialize its contents to begin an iteration. + * + * This function can be called repeatedly for a given context, + * whenever the iterator is reset via the + * @link //apple_ref/cpp/instm/OSCollectionIterator/reset/virtualvoid/() + * OSCollectionIterator::reset@/link + * function. + */ + virtual bool initIterator(void * iterationContext) const = 0; + + +/*! + * @function getNextObjectForIterator + * + * @abstract + * Returns the next member of a collection. + * + * @param iterationContext The iteration context. + * @param nextObject The object returned by reference to the caller. + * + * @result + * true if an object was found, false otherwise. + * + * @discussion + * This pure virtual member function, which subclasses must implement, + * is called by an + * @link //apple_ref/doc/class/OSCollectionIterator OSCollectionIterator@/link + * to get the next object for a given iteration context. + * The collection object should interpret + * iterationContext appropriately, + * advance the context from its current object + * to the next object (if it exists), + * return that object by reference in nextObject, + * and return true for the function call. + * If there is no next object, the collection object must return false. + * + * For associative collections, the object returned should be the key + * used to access its associated value, and not the value itself. + */ + virtual bool getNextObjectForIterator( + void * iterationContext, + OSObject ** nextObject) const = 0; + + +/*! + * @function init + * + * @abstract + * Initializes the OSCollection object. + * + * @result + * true on success, false otherwise. + * + * @discussion + * This function is used to initialize state + * within a newly created OSCollection object. + */ + virtual bool init() APPLE_KEXT_OVERRIDE; public: - /*! - * @typedef _OSCollectionFlags - * - * @const kImmutable - * @discussion - * Used with @link setOptions setOptions@/link - * to indicate the collection's contents should - * or should not change. - * - * An @link //apple_ref/doc/class/IORegistryEntry IORegistryEntry@/link - * object marks collections immutable when set - * as properties of a registry entry that's attached to a plane. - * This is generally an advisory flag, used for debugging; - * setting it does not mean a collection will in fact - * disallow modifications. - */ - typedef enum { - kImmutable = 0x00000001, - kSort = 0x00000002, - kMASK = (unsigned) -1 - } _OSCollectionFlags; +/*! + * @typedef _OSCollectionFlags + * + * @const kImmutable + * @discussion + * Used with @link setOptions setOptions@/link + * to indicate the collection's contents should + * or should not change. + * + * An @link //apple_ref/doc/class/IORegistryEntry IORegistryEntry@/link + * object marks collections immutable when set + * as properties of a registry entry that's attached to a plane. + * This is generally an advisory flag, used for debugging; + * setting it does not mean a collection will in fact + * disallow modifications. + */ + typedef enum { + kImmutable = 0x00000001, + kSort = 0x00000002, + kMASK = (unsigned) - 1 + } _OSCollectionFlags; // xx-review: should be protected, not public - /*! - * @function haveUpdated - * - * @abstract - * Tracks updates to the collection. - * - * @discussion - * Subclasses call this function before - * making any change to their contents (not after, as the name implies). - * Update tracking is used for collection iterators, - * and to enforce certain protections in the IORegistry. - */ - void haveUpdated(); - - - /*! - * @function getCount - * - * @abstract - * Returns the number of objects in the collection. - * - * @result - * The number of objects in the collection. - * - * @discussion - * Subclasses must implement this pure virtual member function. - */ - virtual unsigned int getCount() const = 0; - - - /*! - * @function getCapacity - * - * @abstract - * Returns the number of objects the collection - * can store without reallocating. - * - * @result - * The number objects the collection - * can store without reallocating. - * - * @discussion - * Subclasses must implement this pure virtual member function. - */ - virtual unsigned int getCapacity() const = 0; - - - /*! - * @function getCapacityIncrement - * - * @abstract - * Returns the storage increment of the collection. - * - * @result - * The storage increment of the collection. - * - * @discussion - * Subclasses must implement this pure virtual member function. - * Most collection subclasses allocate their storage - * in multiples of the capacity increment. - * - * See - * @link - * //apple_ref/cpp/instm/OSCollection/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link - * for how the capacity increment is used. - */ - virtual unsigned int getCapacityIncrement() const = 0; - - - /*! - * @function setCapacityIncrement - * - * @abstract - * Sets the storage increment of the collection. - * - * @result - * The new storage increment of the collection, - * which may be different from the number requested. - * - * @discussion - * Subclasses must implement this pure virtual member function. - * Most collection subclasses allocate their storage - * in multiples of the capacity increment. - * - * Collection subclasses should gracefully handle - * an increment of zero - * by applying (and returning) a positive minimum capacity. - * - * Setting the capacity increment does not trigger an immediate adjustment - * of a collection's storage. - * - * See - * @link - * //apple_ref/cpp/instm/OSCollection/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link - * for how the capacity increment is used. - */ - virtual unsigned int setCapacityIncrement(unsigned increment) = 0; - - - /*! - * @function ensureCapacity - * - * @abstract - * Ensures the collection has enough space to store - * the requested number of objects. - * - * @param newCapacity The total number of objects the collection - * should be able to store. - * - * @result - * The new capacity of the collection, - * which may be different from the number requested - * (if smaller, reallocation of storage failed). - * - * @discussion - * Subclasses implement this pure virtual member function - * to adjust their storage so that they can hold - * at least newCapacity objects. - * Libkern collections generally allocate storage - * in multiples of their capacity increment. - * - * Subclass methods that add objects to the collection - * should call this function before adding any object, - * and should check the return value for success. - * - * Collection subclasses may reduce their storage - * when the number of contained objects falls below some threshold, - * but no Libkern collections currently do. - */ - virtual unsigned int ensureCapacity(unsigned int newCapacity) = 0; - - - /*! - * @function flushCollection - * - * @abstract - * Empties the collection, releasing any objects retained. - * - * @discussion - * Subclasses implement this pure virtual member function - * to remove their entire contents. - * This must not release the collection itself. - */ - virtual void flushCollection() = 0; - - - /*! - * @function setOptions - * - * @abstract - * Recursively sets option bits in this collection - * and all child collections. - * - * @param options A bitfield whose values turn the options on (1) or off (0). - * @param mask A mask indicating which bits - * in options to change. - * Pass 0 to get the whole current options bitfield - * without changing any settings. - * @param context Unused. - * - * @result - * The options bitfield as it was before the set operation. - * - * @discussion - * Kernel extensions should not call this function. - * - * The only option currently in use is - * @link //apple_ref/doc/title:econst/OSCollectionFlags/kImmutable - * kImmutable@/link. - * - * Subclasses should override this function to recursively apply - * the options to their contents if the options actually change. - */ - virtual unsigned setOptions( - unsigned options, - unsigned mask, - void * context = 0); - OSMetaClassDeclareReservedUsed(OSCollection, 0); - - /*! - * @function copyCollection - * - * @abstract - * Creates a deep copy of a collection. - * - * @param cycleDict A dictionary of all of the collections - * that have been copied so far, - * to start the copy at the top level - * pass NULL for cycleDict. - * - * @result - * The newly copied collecton, - * NULL on failure. - * - * @discussion - * This function copies the collection - * and all of the contained collections recursively. - * Objects that are not derived from OSCollection are retained - * rather than copied. - * - * Subclasses of OSCollection must override this function - * to properly support deep copies. - */ - virtual OSCollection *copyCollection(OSDictionary * cycleDict = 0); - OSMetaClassDeclareReservedUsed(OSCollection, 1); - - /*! - * @function iterateObjects - * - * @abstract - * Invoke a callback for each member of the collection. - * - * @param refcon A reference constant for the callback. - * @param callback The callback function, - * called with the refcon and each member object - * of the collection in turn, on the callers thread. - * The callback should return true to early terminate - * the iteration, false otherwise. - * - * @result - * False if the collection iteration was made invalid - * (see OSCollectionIterator::isValid()) otherwise true. - */ - bool iterateObjects(void * refcon, bool (*callback)(void * refcon, OSObject * object)); +/*! + * @function haveUpdated + * + * @abstract + * Tracks updates to the collection. + * + * @discussion + * Subclasses call this function before + * making any change to their contents (not after, as the name implies). + * Update tracking is used for collection iterators, + * and to enforce certain protections in the IORegistry. + */ + void haveUpdated(); + + +/*! + * @function getCount + * + * @abstract + * Returns the number of objects in the collection. + * + * @result + * The number of objects in the collection. + * + * @discussion + * Subclasses must implement this pure virtual member function. + */ + virtual unsigned int getCount() const = 0; + + +/*! + * @function getCapacity + * + * @abstract + * Returns the number of objects the collection + * can store without reallocating. + * + * @result + * The number objects the collection + * can store without reallocating. + * + * @discussion + * Subclasses must implement this pure virtual member function. + */ + virtual unsigned int getCapacity() const = 0; + + +/*! + * @function getCapacityIncrement + * + * @abstract + * Returns the storage increment of the collection. + * + * @result + * The storage increment of the collection. + * + * @discussion + * Subclasses must implement this pure virtual member function. + * Most collection subclasses allocate their storage + * in multiples of the capacity increment. + * + * See + * @link + * //apple_ref/cpp/instm/OSCollection/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link + * for how the capacity increment is used. + */ + virtual unsigned int getCapacityIncrement() const = 0; + + +/*! + * @function setCapacityIncrement + * + * @abstract + * Sets the storage increment of the collection. + * + * @result + * The new storage increment of the collection, + * which may be different from the number requested. + * + * @discussion + * Subclasses must implement this pure virtual member function. + * Most collection subclasses allocate their storage + * in multiples of the capacity increment. + * + * Collection subclasses should gracefully handle + * an increment of zero + * by applying (and returning) a positive minimum capacity. + * + * Setting the capacity increment does not trigger an immediate adjustment + * of a collection's storage. + * + * See + * @link + * //apple_ref/cpp/instm/OSCollection/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link + * for how the capacity increment is used. + */ + virtual unsigned int setCapacityIncrement(unsigned increment) = 0; + + +/*! + * @function ensureCapacity + * + * @abstract + * Ensures the collection has enough space to store + * the requested number of objects. + * + * @param newCapacity The total number of objects the collection + * should be able to store. + * + * @result + * The new capacity of the collection, + * which may be different from the number requested + * (if smaller, reallocation of storage failed). + * + * @discussion + * Subclasses implement this pure virtual member function + * to adjust their storage so that they can hold + * at least newCapacity objects. + * Libkern collections generally allocate storage + * in multiples of their capacity increment. + * + * Subclass methods that add objects to the collection + * should call this function before adding any object, + * and should check the return value for success. + * + * Collection subclasses may reduce their storage + * when the number of contained objects falls below some threshold, + * but no Libkern collections currently do. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity) = 0; + + +/*! + * @function flushCollection + * + * @abstract + * Empties the collection, releasing any objects retained. + * + * @discussion + * Subclasses implement this pure virtual member function + * to remove their entire contents. + * This must not release the collection itself. + */ + virtual void flushCollection() = 0; + + +/*! + * @function setOptions + * + * @abstract + * Recursively sets option bits in this collection + * and all child collections. + * + * @param options A bitfield whose values turn the options on (1) or off (0). + * @param mask A mask indicating which bits + * in options to change. + * Pass 0 to get the whole current options bitfield + * without changing any settings. + * @param context Unused. + * + * @result + * The options bitfield as it was before the set operation. + * + * @discussion + * Kernel extensions should not call this function. + * + * The only option currently in use is + * @link //apple_ref/doc/title:econst/OSCollectionFlags/kImmutable + * kImmutable@/link. + * + * Subclasses should override this function to recursively apply + * the options to their contents if the options actually change. + */ + virtual unsigned setOptions( + unsigned options, + unsigned mask, + void * context = 0); + OSMetaClassDeclareReservedUsed(OSCollection, 0); + +/*! + * @function copyCollection + * + * @abstract + * Creates a deep copy of a collection. + * + * @param cycleDict A dictionary of all of the collections + * that have been copied so far, + * to start the copy at the top level + * pass NULL for cycleDict. + * + * @result + * The newly copied collecton, + * NULL on failure. + * + * @discussion + * This function copies the collection + * and all of the contained collections recursively. + * Objects that are not derived from OSCollection are retained + * rather than copied. + * + * Subclasses of OSCollection must override this function + * to properly support deep copies. + */ + virtual OSCollection *copyCollection(OSDictionary * cycleDict = 0); + OSMetaClassDeclareReservedUsed(OSCollection, 1); + +/*! + * @function iterateObjects + * + * @abstract + * Invoke a callback for each member of the collection. + * + * @param refcon A reference constant for the callback. + * @param callback The callback function, + * called with the refcon and each member object + * of the collection in turn, on the callers thread. + * The callback should return true to early terminate + * the iteration, false otherwise. + * + * @result + * False if the collection iteration was made invalid + * (see OSCollectionIterator::isValid()) otherwise true. + */ + bool iterateObjects(void * refcon, bool (*callback)(void * refcon, OSObject * object)); #ifdef __BLOCKS__ - /*! - * @function iterateObjects - * - * @abstract - * Invoke a block for each member of the collection. - * - * @param block The block, - * called with the refcon and each member object - * of the collection in turn, on the callers thread. - * The block should return true to early terminate - * the iteration, false otherwise. - * - * @result - * False if the collection iteration was made invalid - * (see OSCollectionIterator::isValid()) otherwise true. - */ - bool iterateObjects(bool (^block)(OSObject * object)); +/*! + * @function iterateObjects + * + * @abstract + * Invoke a block for each member of the collection. + * + * @param block The block, + * called with the refcon and each member object + * of the collection in turn, on the callers thread. + * The block should return true to early terminate + * the iteration, false otherwise. + * + * @result + * False if the collection iteration was made invalid + * (see OSCollectionIterator::isValid()) otherwise true. + */ + bool iterateObjects(bool (^block)(OSObject * object)); #endif /* __BLOCKS__ */ - OSMetaClassDeclareReservedUnused(OSCollection, 2); - OSMetaClassDeclareReservedUnused(OSCollection, 3); - OSMetaClassDeclareReservedUnused(OSCollection, 4); - OSMetaClassDeclareReservedUnused(OSCollection, 5); - OSMetaClassDeclareReservedUnused(OSCollection, 6); - OSMetaClassDeclareReservedUnused(OSCollection, 7); + OSMetaClassDeclareReservedUnused(OSCollection, 2); + OSMetaClassDeclareReservedUnused(OSCollection, 3); + OSMetaClassDeclareReservedUnused(OSCollection, 4); + OSMetaClassDeclareReservedUnused(OSCollection, 5); + OSMetaClassDeclareReservedUnused(OSCollection, 6); + OSMetaClassDeclareReservedUnused(OSCollection, 7); }; #endif /* !_OS_OSCOLLECTION_H */ - - diff --git a/libkern/libkern/c++/OSCollectionIterator.h b/libkern/libkern/c++/OSCollectionIterator.h index 235877add..d82cff509 100644 --- a/libkern/libkern/c++/OSCollectionIterator.h +++ b/libkern/libkern/c++/OSCollectionIterator.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOCollectionIterator.h created by gvdl on Fri 1998-10-30 */ @@ -40,7 +40,7 @@ class OSCollection; * @abstract * This header declares the OSCollectionIterator collection class. */ - + /*! * @class OSCollectionIterator @@ -83,126 +83,126 @@ class OSCollection; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSCollectionIterator provides no concurrency protection. */ class OSCollectionIterator : public OSIterator { - OSDeclareDefaultStructors(OSCollectionIterator) + OSDeclareDefaultStructors(OSCollectionIterator) protected: // xx-review: Do we want to document these? - const OSCollection * collection; - void * collIterator; - unsigned int initialUpdateStamp; - bool valid; + const OSCollection * collection; + void * collIterator; + unsigned int initialUpdateStamp; + bool valid; public: - /*! - * @function withCollection - * - * @abstract - * Creates and initializes an OSCollectionIterator - * for the provided collection object. - * - * @param inColl The OSCollection-derived collection object to be iteratated. - * - * @result - * A new instance of OSCollectionIterator, or NULL on failure. - */ - static OSCollectionIterator * withCollection(const OSCollection * inColl); - - - /*! - * @function initWithCollection - * - * @abstract - * Initializes an OSCollectionIterator - * for the provided collection object. - * - * @param inColl The OSCollection-derived collection object to be iteratated. - * @result - * true if the initialization was successful, - * or false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withCollection withCollection@/link instead. - */ - virtual bool initWithCollection(const OSCollection * inColl); - - - /*! - * @function free - * - * @abstract - * Releases or deallocates any resources used - * by the OSCollectionIterator object. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function reset - * - * @abstract - * Resets the iterator to the beginning of the collection, - * as if it had just been created. - */ - virtual void reset() APPLE_KEXT_OVERRIDE; - - - /*! - * @function isValid - * - * @abstract - * Checks that the collection hasn't been modified during iteration. - * - * @return - * true if the iterator is valid for continued use, - * false otherwise - * (typically because the iteration context has been modified). - */ - virtual bool isValid() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getNextObject - * - * @abstract - * Advances to and returns the next object in the iteration. - * - * @return - * The next object in the iteration context, - * NULL if there is no next object - * or if the iterator is no longer valid. - * - * @discussion - * This function first calls - * @link //apple_ref/cpp/instm/OSCollectionIterator/isValid/virtualbool/() - * isValid@/link - * and returns NULL if that function - * returns false. - * - * Subclasses must implement this pure virtual function - * to check for validity with - * @link - * //apple_ref/cpp/instm/OSCollectionIterator/isValid/virtualbool/() - * isValid@/link, - * and then to advance the iteration context to the next object (if any) - * and return that next object, or NULL if there is none. - */ - virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; +/*! + * @function withCollection + * + * @abstract + * Creates and initializes an OSCollectionIterator + * for the provided collection object. + * + * @param inColl The OSCollection-derived collection object to be iteratated. + * + * @result + * A new instance of OSCollectionIterator, or NULL on failure. + */ + static OSCollectionIterator * withCollection(const OSCollection * inColl); + + +/*! + * @function initWithCollection + * + * @abstract + * Initializes an OSCollectionIterator + * for the provided collection object. + * + * @param inColl The OSCollection-derived collection object to be iteratated. + * @result + * true if the initialization was successful, + * or false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withCollection withCollection@/link instead. + */ + virtual bool initWithCollection(const OSCollection * inColl); + + +/*! + * @function free + * + * @abstract + * Releases or deallocates any resources used + * by the OSCollectionIterator object. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function reset + * + * @abstract + * Resets the iterator to the beginning of the collection, + * as if it had just been created. + */ + virtual void reset() APPLE_KEXT_OVERRIDE; + + +/*! + * @function isValid + * + * @abstract + * Checks that the collection hasn't been modified during iteration. + * + * @return + * true if the iterator is valid for continued use, + * false otherwise + * (typically because the iteration context has been modified). + */ + virtual bool isValid() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getNextObject + * + * @abstract + * Advances to and returns the next object in the iteration. + * + * @return + * The next object in the iteration context, + * NULL if there is no next object + * or if the iterator is no longer valid. + * + * @discussion + * This function first calls + * @link //apple_ref/cpp/instm/OSCollectionIterator/isValid/virtualbool/() + * isValid@/link + * and returns NULL if that function + * returns false. + * + * Subclasses must implement this pure virtual function + * to check for validity with + * @link + * //apple_ref/cpp/instm/OSCollectionIterator/isValid/virtualbool/() + * isValid@/link, + * and then to advance the iteration context to the next object (if any) + * and return that next object, or NULL if there is none. + */ + virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE; }; #endif /* !_OS_OSCOLLECTIONITERATOR_H */ diff --git a/libkern/libkern/c++/OSContainers.h b/libkern/libkern/c++/OSContainers.h index 32a433fcd..f3e07940e 100644 --- a/libkern/libkern/c++/OSContainers.h +++ b/libkern/libkern/c++/OSContainers.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOContainers.h created by rsulack on Fri 26-Jun-1998 */ diff --git a/libkern/libkern/c++/OSData.h b/libkern/libkern/c++/OSData.h index 031a056a3..b1547ae8e 100644 --- a/libkern/libkern/c++/OSData.h +++ b/libkern/libkern/c++/OSData.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOData.h created by rsulack on Wed 17-Sep-1997 */ @@ -33,7 +33,7 @@ #include -class OSString; +class OSString; /*! * @header @@ -41,8 +41,8 @@ class OSString; * @abstract * This header declares the OSData container class. */ - - + + /*! * @class OSData * @@ -61,7 +61,7 @@ class OSString; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSData provides no concurrency protection; @@ -73,675 +73,674 @@ class OSString; */ class OSData : public OSObject { - friend class OSSerialize; + friend class OSSerialize; - OSDeclareDefaultStructors(OSData) + OSDeclareDefaultStructors(OSData) #if APPLE_KEXT_ALIGN_CONTAINERS protected: - unsigned int length; - unsigned int capacity; - unsigned int capacityIncrement; - void * data; + unsigned int length; + unsigned int capacity; + unsigned int capacityIncrement; + void * data; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - void * data; - unsigned int length; - unsigned int capacity; - unsigned int capacityIncrement; + void * data; + unsigned int length; + unsigned int capacity; + unsigned int capacityIncrement; #endif /* APPLE_KEXT_ALIGN_CONTAINERS */ #ifdef XNU_KERNEL_PRIVATE - /* Available within xnu source only */ +/* Available within xnu source only */ public: - typedef void (*DeallocFunction)(void * ptr, unsigned int length); + typedef void (*DeallocFunction)(void * ptr, unsigned int length); protected: - struct ExpansionData - { + struct ExpansionData { DeallocFunction deallocFunction; bool disableSerialization; }; #else /* XNU_KERNEL_PRIVATE */ private: - typedef void (*DeallocFunction)(void * ptr, unsigned int length); + typedef void (*DeallocFunction)(void * ptr, unsigned int length); protected: struct ExpansionData; #endif /* XNU_KERNEL_PRIVATE */ - - /* Reserved for future use. (Internal use only) */ - ExpansionData * reserved; + +/* Reserved for future use. (Internal use only) */ + ExpansionData * reserved; public: - /*! - * @function withCapacity - * - * @abstract - * Creates and initializes an empty instance of OSData. - * - * @param capacity The initial capacity of the OSData object in bytes. - * - * @result - * An instance of OSData with a reference count of 1; - * NULL on failure. - * - * @discussion - * capacity may be zero. - * The OSData object will allocate a buffer internally - * when necessary, and will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - static OSData * withCapacity(unsigned int capacity); - - - /*! - * @function withBytes - * - * @abstract - * Creates and initializes an instance of OSData - * with a copy of the provided data buffer. - * - * @param bytes The buffer of data to copy. - * @param numBytes The length of bytes. - * - * @result - * An instance of OSData containing a copy of the provided byte array, - * with a reference count of 1; - * NULL on failure. - * - * @discussion - * The new OSData object will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - static OSData * withBytes( - const void * bytes, - unsigned int numBytes); - - - /*! - * @function withBytesNoCopy - * - * @abstract - * Creates and initializes an instance of OSData - * that shares the provided data buffer. - * - * @param bytes The buffer of data to represent. - * @param numBytes The length of bytes. - * - * @result - * A instance of OSData that shares the provided byte array, - * with a reference count of 1; - * NULL on failure. - * - * @discussion - * An OSData object created with this function - * does not claim ownership - * of the data buffer, but shares it with the caller. - * When the caller determines that the OSData object has actually been freed, - * it can safely dispose of the data buffer. - * Conversely, if it frees the shared data buffer, - * it must not attempt to use the OSData object and should release it. - * - * An OSData object created with shared external data cannot append bytes, - * but you can get the byte pointer and - * modify bytes within the shared buffer. - */ - static OSData * withBytesNoCopy( - void * bytes, - unsigned int numBytes); - - - /*! - * @function withData - * - * @abstract - * Creates and initializes an instance of OSData - * with contents copied from another OSData object. - * - * @param inData An OSData object that provides the initial data. - * - * @result - * An instance of OSData containing a copy of the data in inData, - * with a reference count of 1; - * NULL on failure. - * - * @discussion - * The new OSData object will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - static OSData * withData(const OSData * inData); - - - /*! - * @function withData - * - * @abstract - * Creates and initializes an instance of OSData - * with contents copied from a range within another OSData object. - * - * @param inData An OSData object that provides the initial data. - * @param start The starting index from which bytes will be copied. - * @param numBytes The number of bytes to be copied from start. - * - * @result - * An instance of OSData containing a copy - * of the specified data range from inData, - * with a reference count of 1; - * NULL on failure. - * - * @discussion - * The new OSData object will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - static OSData * withData( - const OSData * inData, - unsigned int start, - unsigned int numBytes); - - - /*! - * @function initWithCapacity - * - * @abstract - * Initializes an instance of OSData. - * - * @param capacity The initial capacity of the OSData object in bytes. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSData/withCapacity/staticOSData*\/(unsignedint) - * withCapacity@/link instead. - * - * capacity may be zero. - * The OSData object will allocate a buffer internally - * when necessary, and will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - virtual bool initWithCapacity(unsigned int capacity); - - - /*! - * @function initWithBytes - * - * @abstract - * Initializes an instance of OSData - * with a copy of the provided data buffer. - * - * @param bytes The buffer of data to copy. - * @param numBytes The length of bytes. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withBytes withBytes@/link instead. - * - * The new OSData object will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - virtual bool initWithBytes( - const void * bytes, - unsigned int numBytes); - - - /*! - * @function initWithBytesNoCopy - * - * @abstract - * Initializes an instance of OSData - * to share the provided data buffer. - * - * @param bytes The buffer of data to represent. - * @param numBytes The length of bytes. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withBytesNoCopy withBytesNoCopy@/link instead. - * - * An OSData object initialized with this function - * does not claim ownership - * of the data buffer, but merely shares it with the caller. - * - * An OSData object created with shared external data cannot append bytes, - * but you can get the byte pointer and - * modify bytes within the shared buffer. - */ - virtual bool initWithBytesNoCopy( - void * bytes, - unsigned int numBytes); - - - /*! - * @function initWithData - * - * @abstract - * Creates and initializes an instance of OSData - * with contents copied from another OSData object. - * - * @param inData An OSData object that provides the initial data. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSData/withData/staticOSData*\/(constOSData*) - * withData(OSData *)@/link - * instead. - * - * The new OSData object will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - virtual bool initWithData(const OSData * inData); - - - /*! - * @function initWithData - * - * @abstract - * Initializes an instance of OSData - * with contents copied from a range within another OSData object. - * - * @param inData An OSData object that provides the initial data. - * @param start The starting index from which bytes will be copied. - * @param numBytes The number of bytes to be copied from start. - * - * @result - * Returns true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSData/withData/staticOSData*\/(constOSData*,unsignedint,unsignedint) - * withData(OSData *, unsigned int, unsigned int)@/link - * instead. - * - * The new OSData object will grow as needed to accommodate more bytes - * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, - * for which a nonzero initial capacity is a hard limit). - */ - virtual bool initWithData( - const OSData * inData, - unsigned int start, - unsigned int numBytes); - - - /*! - * @function free - * - * @abstract - * Deallocates or releases any resources - * used by the OSData instance. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getLength - * - * @abstract - * Returns the number of bytes in or referenced by the OSData object. - * - * @result - * The number of bytes in or referenced by the OSData object. - */ - virtual unsigned int getLength() const; - - - /*! - * @function getCapacity - * - * @abstract - * Returns the total number of bytes the OSData can store without reallocating. - * - * @result - * The total number bytes the OSData can store without reallocating. - * - * @discussion - * OSData objects grow when full to accommodate additional bytes. - * See - * @link - * //apple_ref/cpp/instm/OSData/getCapacityIncrement/virtualunsignedint/() - * getCapacityIncrement@/link - * and - * @link - * //apple_ref/cpp/instm/OSData/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link. - * - * OSData objects created or initialized to use a shared buffer - * do not make use of this attribute, and return -1 from this function. - */ - virtual unsigned int getCapacity() const; - - - /*! - * @function getCapacityIncrement - * - * @abstract - * Returns the storage increment of the OSData object. - * - * @result - * The storage increment of the OSData object. - * - * @discussion - * An OSData object allocates storage for bytes in multiples - * of the capacity increment. - * - * OSData objects created or initialized to use a shared buffer - * do not make use of this attribute. - */ - virtual unsigned int getCapacityIncrement() const; - - - /*! - * @function setCapacityIncrement - * - * @abstract - * Sets the storage increment of the array. - * - * @result - * The original storage increment of the array. - * - * @discussion - * An OSArray allocates storage for objects in multiples - * of the capacity increment. - * - * OSData objects created or initialized to use a shared buffer - * do not make use of this attribute. - */ - virtual unsigned int setCapacityIncrement(unsigned increment); +/*! + * @function withCapacity + * + * @abstract + * Creates and initializes an empty instance of OSData. + * + * @param capacity The initial capacity of the OSData object in bytes. + * + * @result + * An instance of OSData with a reference count of 1; + * NULL on failure. + * + * @discussion + * capacity may be zero. + * The OSData object will allocate a buffer internally + * when necessary, and will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + static OSData * withCapacity(unsigned int capacity); + + +/*! + * @function withBytes + * + * @abstract + * Creates and initializes an instance of OSData + * with a copy of the provided data buffer. + * + * @param bytes The buffer of data to copy. + * @param numBytes The length of bytes. + * + * @result + * An instance of OSData containing a copy of the provided byte array, + * with a reference count of 1; + * NULL on failure. + * + * @discussion + * The new OSData object will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + static OSData * withBytes( + const void * bytes, + unsigned int numBytes); + + +/*! + * @function withBytesNoCopy + * + * @abstract + * Creates and initializes an instance of OSData + * that shares the provided data buffer. + * + * @param bytes The buffer of data to represent. + * @param numBytes The length of bytes. + * + * @result + * A instance of OSData that shares the provided byte array, + * with a reference count of 1; + * NULL on failure. + * + * @discussion + * An OSData object created with this function + * does not claim ownership + * of the data buffer, but shares it with the caller. + * When the caller determines that the OSData object has actually been freed, + * it can safely dispose of the data buffer. + * Conversely, if it frees the shared data buffer, + * it must not attempt to use the OSData object and should release it. + * + * An OSData object created with shared external data cannot append bytes, + * but you can get the byte pointer and + * modify bytes within the shared buffer. + */ + static OSData * withBytesNoCopy( + void * bytes, + unsigned int numBytes); + + +/*! + * @function withData + * + * @abstract + * Creates and initializes an instance of OSData + * with contents copied from another OSData object. + * + * @param inData An OSData object that provides the initial data. + * + * @result + * An instance of OSData containing a copy of the data in inData, + * with a reference count of 1; + * NULL on failure. + * + * @discussion + * The new OSData object will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + static OSData * withData(const OSData * inData); + + +/*! + * @function withData + * + * @abstract + * Creates and initializes an instance of OSData + * with contents copied from a range within another OSData object. + * + * @param inData An OSData object that provides the initial data. + * @param start The starting index from which bytes will be copied. + * @param numBytes The number of bytes to be copied from start. + * + * @result + * An instance of OSData containing a copy + * of the specified data range from inData, + * with a reference count of 1; + * NULL on failure. + * + * @discussion + * The new OSData object will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + static OSData * withData( + const OSData * inData, + unsigned int start, + unsigned int numBytes); + + +/*! + * @function initWithCapacity + * + * @abstract + * Initializes an instance of OSData. + * + * @param capacity The initial capacity of the OSData object in bytes. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSData/withCapacity/staticOSData*\/(unsignedint) + * withCapacity@/link instead. + * + * capacity may be zero. + * The OSData object will allocate a buffer internally + * when necessary, and will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + virtual bool initWithCapacity(unsigned int capacity); + + +/*! + * @function initWithBytes + * + * @abstract + * Initializes an instance of OSData + * with a copy of the provided data buffer. + * + * @param bytes The buffer of data to copy. + * @param numBytes The length of bytes. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withBytes withBytes@/link instead. + * + * The new OSData object will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + virtual bool initWithBytes( + const void * bytes, + unsigned int numBytes); + + +/*! + * @function initWithBytesNoCopy + * + * @abstract + * Initializes an instance of OSData + * to share the provided data buffer. + * + * @param bytes The buffer of data to represent. + * @param numBytes The length of bytes. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withBytesNoCopy withBytesNoCopy@/link instead. + * + * An OSData object initialized with this function + * does not claim ownership + * of the data buffer, but merely shares it with the caller. + * + * An OSData object created with shared external data cannot append bytes, + * but you can get the byte pointer and + * modify bytes within the shared buffer. + */ + virtual bool initWithBytesNoCopy( + void * bytes, + unsigned int numBytes); + + +/*! + * @function initWithData + * + * @abstract + * Creates and initializes an instance of OSData + * with contents copied from another OSData object. + * + * @param inData An OSData object that provides the initial data. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSData/withData/staticOSData*\/(constOSData*) + * withData(OSData *)@/link + * instead. + * + * The new OSData object will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + virtual bool initWithData(const OSData * inData); + + +/*! + * @function initWithData + * + * @abstract + * Initializes an instance of OSData + * with contents copied from a range within another OSData object. + * + * @param inData An OSData object that provides the initial data. + * @param start The starting index from which bytes will be copied. + * @param numBytes The number of bytes to be copied from start. + * + * @result + * Returns true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSData/withData/staticOSData*\/(constOSData*,unsignedint,unsignedint) + * withData(OSData *, unsigned int, unsigned int)@/link + * instead. + * + * The new OSData object will grow as needed to accommodate more bytes + * (unlike @link //apple_ref/doc/uid/20001498 CFMutableData@/link, + * for which a nonzero initial capacity is a hard limit). + */ + virtual bool initWithData( + const OSData * inData, + unsigned int start, + unsigned int numBytes); + + +/*! + * @function free + * + * @abstract + * Deallocates or releases any resources + * used by the OSData instance. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getLength + * + * @abstract + * Returns the number of bytes in or referenced by the OSData object. + * + * @result + * The number of bytes in or referenced by the OSData object. + */ + virtual unsigned int getLength() const; + + +/*! + * @function getCapacity + * + * @abstract + * Returns the total number of bytes the OSData can store without reallocating. + * + * @result + * The total number bytes the OSData can store without reallocating. + * + * @discussion + * OSData objects grow when full to accommodate additional bytes. + * See + * @link + * //apple_ref/cpp/instm/OSData/getCapacityIncrement/virtualunsignedint/() + * getCapacityIncrement@/link + * and + * @link + * //apple_ref/cpp/instm/OSData/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link. + * + * OSData objects created or initialized to use a shared buffer + * do not make use of this attribute, and return -1 from this function. + */ + virtual unsigned int getCapacity() const; + + +/*! + * @function getCapacityIncrement + * + * @abstract + * Returns the storage increment of the OSData object. + * + * @result + * The storage increment of the OSData object. + * + * @discussion + * An OSData object allocates storage for bytes in multiples + * of the capacity increment. + * + * OSData objects created or initialized to use a shared buffer + * do not make use of this attribute. + */ + virtual unsigned int getCapacityIncrement() const; + + +/*! + * @function setCapacityIncrement + * + * @abstract + * Sets the storage increment of the array. + * + * @result + * The original storage increment of the array. + * + * @discussion + * An OSArray allocates storage for objects in multiples + * of the capacity increment. + * + * OSData objects created or initialized to use a shared buffer + * do not make use of this attribute. + */ + virtual unsigned int setCapacityIncrement(unsigned increment); // xx-review: does not check for capacity == EXTERNAL - /*! - * @function ensureCapacity - * - * @abstract - * Ensures the array has enough space - * to store the requested number of bytes. - * - * @param newCapacity The total number of bytes the OSData object - * should be able to store. - * - * @result - * Returns the new capacity of the OSData object, - * which may be different from the number requested - * (if smaller, reallocation of storage failed). - * - * @discussion - * This function immediately resizes the OSData's buffer, if necessary, - * to accommodate at least newCapacity bytes. - * If newCapacity is not greater than the current capacity, - * or if an allocation error occurs, the original capacity is returned. - * - * There is no way to reduce the capacity of an OSData. - * - * An OSData object created "NoCopy" does not allow resizing. - */ - virtual unsigned int ensureCapacity(unsigned int newCapacity); - - - /*! - * @function appendBytes - * - * @abstract - * Appends a buffer of bytes to the OSData object's internal data buffer. - * - * @param bytes A pointer to the data to append. - * If bytes is NULL - * then a zero-filled buffer of length numBytes - * is appended. - * @param numBytes The number of bytes from bytes to append. - * - * @result - * true if the new data was successfully added, - * false on failure. - * - * @discussion - * This function immediately resizes the OSData's buffer, if necessary, - * to accommodate the new total size. - * - * An OSData object created "NoCopy" does not allow bytes - * to be appended. - */ - virtual bool appendBytes( - const void * bytes, - unsigned int numBytes); - - - /*! - * @function appendBytes - * - * @abstract - * Appends the data contained in another OSData object. - * - * @param aDataObj The OSData object whose contents will be appended. - * - * @result - * true if the new data was successfully added, - * false on failure. - * - * @discussion - * This function immediately resizes the OSData's buffer, if necessary, - * to accommodate the new total size. - * - * An OSData object created "NoCopy" does not allow bytes - * to be appended. - */ - virtual bool appendBytes(const OSData * aDataObj); - - - /*! - * @function getBytesNoCopy - * - * @abstract - * Returns a pointer to the OSData object's internal data buffer. - * - * @result - * A pointer to the OSData object's internal data buffer. - * - * @discussion - * You can modify the existing contents of an OSData object - * via this function. - * It works with OSData objects that have their own data buffers - * as well as with OSData objects that have shared buffers. - * - * If you append bytes or characters to an OSData object, - * it may have to reallocate its internal storage, - * rendering invalid an extrated pointer to that storage. - */ - virtual const void * getBytesNoCopy() const; - - - /*! - * @function getBytesNoCopy - * - * @abstract - * Returns a pointer into the OSData object's internal data buffer - * with a given offset and length. - * - * @param start The offset from the base of the internal data buffer. - * @param numBytes The length of the window. - * - * @result - * A pointer to the bytes in the specified range - * within the OSData object, - * or 0 if that range does not lie completely - * within the object's buffer. - * - * @discussion - * You can modify the existing contents of an OSData object - * via this function. - * It works with OSData objects that have their own data buffers - * as well as with OSData objects that have shared buffers. - * - * If you append bytes or characters to an OSData object, - * it may have to reallocate its internal storage, - * rendering invalid an extrated pointer to that storage. - */ - virtual const void * getBytesNoCopy( - unsigned int start, - unsigned int numBytes) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSData objects. - * - * @param aDataObj The OSData object being compared against the receiver. - * - * @result - * true if the two OSData objects are equivalent, - * false otherwise. - * - * @discussion - * Two OSData objects are considered equal - * if they have same length and if their - * byte buffers hold the same contents. - */ - virtual bool isEqualTo(const OSData * aDataObj) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSData object's contents - * to a C array of bytes. - * - * @param bytes A pointer to the bytes to compare. - * @param numBytes The number of bytes to compare. - * - * @result - * true if the data buffers are equal - * over the given length, - * false otherwise. - */ - virtual bool isEqualTo( - const void * bytes, - unsigned int numBytes) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSData object to an arbitrary object. - * - * @param anObject The object to be compared against the receiver. - * - * @result - * true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * An OSData is considered equal to another object - * if that object is derived from OSData - * and contains the equivalent bytes of the same length. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSData object to an OSString. - * - * @param aString The string object to be compared against the receiver. - * - * @result - * true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * This function compares the bytes of the OSData object - * against those of the OSString, - * accounting for the possibility that an OSData - * might explicitly include a nul - * character as part of its total length. - * Thus, for example, an OSData object containing - * either the bytes <'u', 's', 'b', '\0'> - * or <'u', 's', 'b'> - * will compare as equal to the OSString containing "usb". - */ - virtual bool isEqualTo(const OSString * aString) const; - - - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/IORegistryEntry OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function appendByte - * - * @abstract - * Appends a single byte value - * to the OSData object's internal data buffer - * a specified number of times. - * - * @param byte The byte value to append. - * @param numBytes The number of copies of byte to append. - * - * @result - * true if the new data was successfully added, - * false if not. - * - * @discussion - * This function immediately resizes the OSData's buffer, if necessary, - * to accommodate the new total size. - * - * An OSData object created "NoCopy" does not allow bytes - * to be appended. - */ - virtual bool appendByte( - unsigned char byte, - unsigned int numBytes); - - - void setSerializable(bool serializable); +/*! + * @function ensureCapacity + * + * @abstract + * Ensures the array has enough space + * to store the requested number of bytes. + * + * @param newCapacity The total number of bytes the OSData object + * should be able to store. + * + * @result + * Returns the new capacity of the OSData object, + * which may be different from the number requested + * (if smaller, reallocation of storage failed). + * + * @discussion + * This function immediately resizes the OSData's buffer, if necessary, + * to accommodate at least newCapacity bytes. + * If newCapacity is not greater than the current capacity, + * or if an allocation error occurs, the original capacity is returned. + * + * There is no way to reduce the capacity of an OSData. + * + * An OSData object created "NoCopy" does not allow resizing. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity); + + +/*! + * @function appendBytes + * + * @abstract + * Appends a buffer of bytes to the OSData object's internal data buffer. + * + * @param bytes A pointer to the data to append. + * If bytes is NULL + * then a zero-filled buffer of length numBytes + * is appended. + * @param numBytes The number of bytes from bytes to append. + * + * @result + * true if the new data was successfully added, + * false on failure. + * + * @discussion + * This function immediately resizes the OSData's buffer, if necessary, + * to accommodate the new total size. + * + * An OSData object created "NoCopy" does not allow bytes + * to be appended. + */ + virtual bool appendBytes( + const void * bytes, + unsigned int numBytes); + + +/*! + * @function appendBytes + * + * @abstract + * Appends the data contained in another OSData object. + * + * @param aDataObj The OSData object whose contents will be appended. + * + * @result + * true if the new data was successfully added, + * false on failure. + * + * @discussion + * This function immediately resizes the OSData's buffer, if necessary, + * to accommodate the new total size. + * + * An OSData object created "NoCopy" does not allow bytes + * to be appended. + */ + virtual bool appendBytes(const OSData * aDataObj); + + +/*! + * @function getBytesNoCopy + * + * @abstract + * Returns a pointer to the OSData object's internal data buffer. + * + * @result + * A pointer to the OSData object's internal data buffer. + * + * @discussion + * You can modify the existing contents of an OSData object + * via this function. + * It works with OSData objects that have their own data buffers + * as well as with OSData objects that have shared buffers. + * + * If you append bytes or characters to an OSData object, + * it may have to reallocate its internal storage, + * rendering invalid an extrated pointer to that storage. + */ + virtual const void * getBytesNoCopy() const; + + +/*! + * @function getBytesNoCopy + * + * @abstract + * Returns a pointer into the OSData object's internal data buffer + * with a given offset and length. + * + * @param start The offset from the base of the internal data buffer. + * @param numBytes The length of the window. + * + * @result + * A pointer to the bytes in the specified range + * within the OSData object, + * or 0 if that range does not lie completely + * within the object's buffer. + * + * @discussion + * You can modify the existing contents of an OSData object + * via this function. + * It works with OSData objects that have their own data buffers + * as well as with OSData objects that have shared buffers. + * + * If you append bytes or characters to an OSData object, + * it may have to reallocate its internal storage, + * rendering invalid an extrated pointer to that storage. + */ + virtual const void * getBytesNoCopy( + unsigned int start, + unsigned int numBytes) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSData objects. + * + * @param aDataObj The OSData object being compared against the receiver. + * + * @result + * true if the two OSData objects are equivalent, + * false otherwise. + * + * @discussion + * Two OSData objects are considered equal + * if they have same length and if their + * byte buffers hold the same contents. + */ + virtual bool isEqualTo(const OSData * aDataObj) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSData object's contents + * to a C array of bytes. + * + * @param bytes A pointer to the bytes to compare. + * @param numBytes The number of bytes to compare. + * + * @result + * true if the data buffers are equal + * over the given length, + * false otherwise. + */ + virtual bool isEqualTo( + const void * bytes, + unsigned int numBytes) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSData object to an arbitrary object. + * + * @param anObject The object to be compared against the receiver. + * + * @result + * true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * An OSData is considered equal to another object + * if that object is derived from OSData + * and contains the equivalent bytes of the same length. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSData object to an OSString. + * + * @param aString The string object to be compared against the receiver. + * + * @result + * true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * This function compares the bytes of the OSData object + * against those of the OSString, + * accounting for the possibility that an OSData + * might explicitly include a nul + * character as part of its total length. + * Thus, for example, an OSData object containing + * either the bytes <'u', 's', 'b', '\0'> + * or <'u', 's', 'b'> + * will compare as equal to the OSString containing "usb". + */ + virtual bool isEqualTo(const OSString * aString) const; + + +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/IORegistryEntry OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function appendByte + * + * @abstract + * Appends a single byte value + * to the OSData object's internal data buffer + * a specified number of times. + * + * @param byte The byte value to append. + * @param numBytes The number of copies of byte to append. + * + * @result + * true if the new data was successfully added, + * false if not. + * + * @discussion + * This function immediately resizes the OSData's buffer, if necessary, + * to accommodate the new total size. + * + * An OSData object created "NoCopy" does not allow bytes + * to be appended. + */ + virtual bool appendByte( + unsigned char byte, + unsigned int numBytes); + + + void setSerializable(bool serializable); #ifdef XNU_KERNEL_PRIVATE /* Available within xnu source only */ @@ -749,18 +748,18 @@ public: #else private: #endif - virtual void setDeallocFunction(DeallocFunction func); - OSMetaClassDeclareReservedUsed(OSData, 0); - bool isSerializable(void); + virtual void setDeallocFunction(DeallocFunction func); + OSMetaClassDeclareReservedUsed(OSData, 0); + bool isSerializable(void); private: - OSMetaClassDeclareReservedUnused(OSData, 1); - OSMetaClassDeclareReservedUnused(OSData, 2); - OSMetaClassDeclareReservedUnused(OSData, 3); - OSMetaClassDeclareReservedUnused(OSData, 4); - OSMetaClassDeclareReservedUnused(OSData, 5); - OSMetaClassDeclareReservedUnused(OSData, 6); - OSMetaClassDeclareReservedUnused(OSData, 7); + OSMetaClassDeclareReservedUnused(OSData, 1); + OSMetaClassDeclareReservedUnused(OSData, 2); + OSMetaClassDeclareReservedUnused(OSData, 3); + OSMetaClassDeclareReservedUnused(OSData, 4); + OSMetaClassDeclareReservedUnused(OSData, 5); + OSMetaClassDeclareReservedUnused(OSData, 6); + OSMetaClassDeclareReservedUnused(OSData, 7); }; #endif /* !_OS_OSDATA_H */ diff --git a/libkern/libkern/c++/OSDictionary.h b/libkern/libkern/c++/OSDictionary.h index 5168ca4d8..a7dcfcdb0 100644 --- a/libkern/libkern/c++/OSDictionary.h +++ b/libkern/libkern/c++/OSDictionary.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -49,8 +49,8 @@ class OSString; * @abstract * This header declares the OSDictionary collection class. */ - - + + /*! * @class OSDictionary * @@ -100,7 +100,7 @@ class OSString; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSDictionary provides no concurrency protection; @@ -112,869 +112,876 @@ class OSString; */ class OSDictionary : public OSCollection { - friend class OSSerialize; + friend class OSSerialize; - OSDeclareDefaultStructors(OSDictionary) + OSDeclareDefaultStructors(OSDictionary) #if APPLE_KEXT_ALIGN_CONTAINERS protected: - unsigned int count; - unsigned int capacity; - unsigned int capacityIncrement; - struct dictEntry { - const OSSymbol * key; - const OSMetaClassBase * value; - }; - dictEntry * dictionary; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; + struct dictEntry { + const OSSymbol * key; + const OSMetaClassBase * value; +#if XNU_KERNEL_PRIVATE + static int compare(const void *, const void *); +#endif + }; + dictEntry * dictionary; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - struct dictEntry { - const OSSymbol * key; - const OSMetaClassBase * value; - }; - dictEntry * dictionary; - unsigned int count; - unsigned int capacity; - unsigned int capacityIncrement; + struct dictEntry { + const OSSymbol * key; + const OSMetaClassBase * value; +#if XNU_KERNEL_PRIVATE + static int compare(const void *, const void *); +#endif + }; + dictEntry * dictionary; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; - struct ExpansionData { }; + struct ExpansionData { }; - /* Reserved for future use. (Internal use only) */ - ExpansionData * reserved; +/* Reserved for future use. (Internal use only) */ + ExpansionData * reserved; #endif /* APPLE_KEXT_ALIGN_CONTAINERS */ - // Member functions used by the OSCollectionIterator class. - virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; - virtual bool initIterator(void * iterator) const APPLE_KEXT_OVERRIDE; - virtual bool getNextObjectForIterator(void * iterator, OSObject ** ret) const APPLE_KEXT_OVERRIDE; +// Member functions used by the OSCollectionIterator class. + virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; + virtual bool initIterator(void * iterator) const APPLE_KEXT_OVERRIDE; + virtual bool getNextObjectForIterator(void * iterator, OSObject ** ret) const APPLE_KEXT_OVERRIDE; public: - /*! - * @function withCapacity - * - * @abstract - * Creates and initializes an empty OSDictionary. - * - * @param capacity The initial storage capacity of the new dictionary object. - * - * @result - * An empty instance of OSDictionary - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * capacity must be nonzero. - * The new dictionary will grow as needed to accommodate more key/object pairs - * (unlike @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - */ - static OSDictionary * withCapacity(unsigned int capacity); - - - /*! - * @function withObjects - * - * @abstract Creates and initializes an OSDictionary - * populated with keys and objects provided. - * - * @param objects A C array of OSMetaClassBase-derived objects. - * @param keys A C array of OSSymbol keys - * for the corresponding objects in objects. - * @param count The number of keys and objects - * to be placed into the dictionary. - * @param capacity The initial storage capacity of the new dictionary object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * An instance of OSDictionary - * containing the key/object pairs provided, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * objects and keys must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new dictionary will grow as needed - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - */ - static OSDictionary * withObjects( - const OSObject * objects[], - const OSSymbol * keys[], - unsigned int count, - unsigned int capacity = 0); - - /*! - * @function withObjects - * - * @abstract - * Creates and initializes an OSDictionary - * populated with keys and objects provided. - * - * @param objects A C array of OSMetaClassBase-derived objects. - * @param keys A C array of OSString keys for the corresponding objects - * in objects. - * @param count The number of keys and objects - * to be placed into the dictionary. - * @param capacity The initial storage capacity of the new dictionary object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * An instance of OSDictionary - * containing the key/object pairs provided, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * objects and keys must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, it must be greater than or equal to count. - * The new dictionary will grow as needed - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - */ - static OSDictionary * withObjects( - const OSObject * objects[], - const OSString * keys[], - unsigned int count, - unsigned int capacity = 0); - - - /*! - * @function withDictionary - * - * @abstract - * Creates and initializes an OSDictionary - * populated with the contents of another dictionary. - * - * @param dict A dictionary whose contents will be stored - * in the new instance. - * @param capacity The initial storage capacity of the new dictionary object. - * If 0, the capacity is set to the number of key/value pairs - * in dict; - * otherwise capacity must be greater than or equal to - * the number of key/value pairs in dict. - * - * @result - * An instance of OSDictionary - * containing the key/value pairs of dict, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * dict must be non-NULL. - * If capacity is nonzero, it must be greater than or equal to count. - * The new dictionary will grow as needed - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - * - * The keys and objects in dict are retained for storage - * in the new OSDictionary, - * not copied. - */ - static OSDictionary * withDictionary( - const OSDictionary * dict, - unsigned int capacity = 0); - - - /*! - * @function initWithCapacity - * - * @abstract - * Initializes a new instance of OSDictionary. - * - * @param capacity The initial storage capacity of the new dictionary object. - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link //apple_ref/cpp/clm/OSDictionary/withCapacity/staticOSDictionary*\/(unsignedint) - * withCapacity@/link - * instead. - * - * capacity must be nonzero. - * The new dictionary will grow as needed - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - */ - virtual bool initWithCapacity(unsigned int capacity); - - - /*! - * @function initWithObjects - * - * @abstract Initializes a new OSDictionary with keys and objects provided. - * - * @param objects A C array of OSMetaClassBase-derived objects. - * @param keys A C array of OSSymbol keys - * for the corresponding objects in objects. - * @param count The number of keys and objects to be placed - * into the dictionary. - * @param capacity The initial storage capacity of the new dictionary object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSDictionary/withObjects/staticOSDictionary*\/(constOSObject*,constOSString*,unsignedint,unsignedint) - * withObjects@/link - * instead. - * - * objects and keys must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new dictionary will grow as neede - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - */ - virtual bool initWithObjects( - const OSObject * objects[], - const OSSymbol * keys[], - unsigned int count, - unsigned int capacity = 0); - - - /*! - * @function initWithObjects - * - * @abstract - * Initializes a new OSDictionary with keys and objects provided. - * - * @param objects A C array of OSMetaClassBase-derived objects. - * @param keys A C array of OSString keys - * for the corresponding objects in objects. - * @param count The number of keys and objects - * to be placed into the dictionary. - * @param capacity The initial storage capacity of the new dictionary object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSDictionary/withObjects/staticOSDictionary*\/(constOSObject*,constOSString*,unsignedint,unsignedint) - * withObjects@/link - * instead. - * - * objects and keys must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, it must be greater than or equal to count. - * The new dictionary will grow as needed - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - */ - virtual bool initWithObjects( - const OSObject * objects[], - const OSString * keys[], - unsigned int count, - unsigned int capacity = 0); - - - /*! - * @function initWithDictionary - * - * @abstract - * Initializes a new OSDictionary - * with the contents of another dictionary. - * - * @param dict A dictionary whose contents will be placed - * in the new instance. - * @param capacity The initial storage capacity of the new dictionary object. - * If 0, the capacity is set to the number of key/value pairs - * in dict; - * otherwise capacity must be greater than or equal to - * the number of key/value pairs in dict. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withDictionary withDictionary@/link instead. - * - * dict must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new dictionary will grow as needed - * to accommodate more key/object pairs - * (unlike - * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, - * for which the initial capacity is a hard limit). - * - * The keys and objects in dict are retained for storage - * in the new OSDictionary, - * not copied. - */ - virtual bool initWithDictionary( - const OSDictionary * dict, - unsigned int capacity = 0); - - - /*! - * @function free - * - * @abstract - * Deallocates or releases any resources - * used by the OSDictionary instance. - * - * @discussion - * This function should not be called directly, - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCount - * - * @abstract - * Returns the current number of key/object pairs - * contained within the dictionary. - * - * @result - * The current number of key/object pairs - * contained within the dictionary. - */ - virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacity - * - * @abstract - * Returns the number of objects the dictionary can store without reallocating. - * - * @result - * The number objects the dictionary can store without reallocating. - * - * @discussion - * OSDictionary objects grow when full - * to accommodate additional key/object pairs. - * See - * @link - * //apple_ref/cpp/instm/OSDictionary/getCapacityIncrement/virtualunsignedint/() - * getCapacityIncrement@/link - * and - * @link - * //apple_ref/cpp/instm/OSDictionary/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link. - */ - virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacityIncrement - * - * @abstract - * Returns the storage increment of the dictionary. - * - * @result - * The storage increment of the dictionary. - * - * @discussion - * An OSDictionary allocates storage for key/object pairs in multiples - * of the capacity increment. - */ - virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setCapacityIncrement - * - * @abstract - * Sets the storage increment of the dictionary. - * - * @result - * The new storage increment of the dictionary, - * which may be different from the number requested. - * - * @discussion - * An OSDictionary allocates storage for key/object pairs in multiples - * of the capacity increment. - * Calling this function does not immediately reallocate storage. - */ - virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; - - - /*! - * @function ensureCapacity - * - * @abstract - * Ensures the dictionary has enough space - * to store the requested number of key/object pairs. - * - * @param newCapacity The total number of key/object pairs the dictionary - * should be able to store. - * - * @result - * The new capacity of the dictionary, - * which may be different from the number requested - * (if smaller, reallocation of storage failed). - * - * @discussion - * This function immediately resizes the dictionary, if necessary, - * to accommodate at least newCapacity key/object pairs. - * If newCapacity is not greater than the current capacity, - * or if an allocation error occurs, the original capacity is returned. - * - * There is no way to reduce the capacity of an OSDictionary. - */ - virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; - - - /*! - * @function flushCollection - * - * @abstract - * Removes and releases all keys and objects within the dictionary. - * - * @discussion - * The dictionary's capacity (and therefore direct memory consumption) - * is not reduced by this function. - */ - virtual void flushCollection() APPLE_KEXT_OVERRIDE; - - - /*! - * @function setObject - * - * @abstract - * Stores an object in the dictionary under a key. - * - * @param aKey An OSSymbol identifying the object - * placed within the dictionary. - * It is automatically retained. - * @param anObject The object to be stored in the dictionary. - * It is automatically retained. - * - * @result - * true if the addition was successful, - * false otherwise. - * - * @discussion - * An object already stored under aKey is released. - */ - virtual bool setObject( - const OSSymbol * aKey, - const OSMetaClassBase * anObject); - - - /*! - * @function setObject - * - * @abstract Stores an object in the dictionary under a key. - * - * @param aKey An OSString identifying the object - * placed within the dictionary. - * @param anObject The object to be stored in the dictionary. - * It is automatically retained. - * - * @result - * true if the addition was successful, - * false otherwise. - * - * @discussion - * An OSSymbol for aKey is created internally. - * An object already stored under aKey is released. - */ - virtual bool setObject( - const OSString * aKey, - const OSMetaClassBase * anObject); - - - /*! - * @function setObject - * - * @abstract - * Stores an object in the dictionary under a key. - * - * @param aKey A C string identifying the object - * placed within the dictionary. - * @param anObject The object to be stored in the dictionary. - * It is automatically retained. - * - * @result - * true if the addition was successful, - * false otherwise. - * - * @discussion - * An OSSymbol for aKey is created internally. - * An object already stored under aKey is released. - */ - virtual bool setObject( - const char * aKey, - const OSMetaClassBase * anObject); - - - /*! - * @function removeObject - * - * @abstract - * Removes a key/object pair from the dictionary. - * - * @param aKey An OSSymbol identifying the object - * to be removed from the dictionary. - * - * @discussion - * The removed key (not necessarily aKey itself) - * and object are automatically released. - */ - virtual void removeObject(const OSSymbol * aKey); - - - /*! - * @function removeObject - * - * @abstract - * Removes a key/object pair from the dictionary. - * - * @param aKey A OSString identifying the object - * to be removed from the dictionary. - * - * @discussion - * The removed key (not necessarily aKey itself) - * and object are automatically released. - */ - virtual void removeObject(const OSString * aKey); - - - /*! - * @function removeObject - * - * @abstract - * Removes a key/object pair from the dictionary. - * - * @param aKey A C string identifying the object - * to be removed from the dictionary. - * - * @discussion - * The removed key (internally an OSSymbol) - * and object are automatically released. - */ - virtual void removeObject(const char * aKey); - - - /*! - * @function merge - * - * @abstract - * Merges the contents of a dictionary into the receiver. - * - * @param aDictionary The dictionary whose contents - * are to be merged with the receiver. - * @result - * true if the merge succeeds, false otherwise. - * - * @discussion - * If there are keys in aDictionary that match keys - * in the receiving dictionary, - * then the objects in the receiver are replaced - * by those from aDictionary, - * and the replaced objects are released. - */ - virtual bool merge(const OSDictionary * aDictionary); - - - /*! - * @function getObject - * - * @abstract - * Returns the object stored under a given key. - * - * @param aKey An OSSymbol key identifying the object - * to be returned to the caller. - * - * @result - * The object stored under aKey, - * or NULL if the key does not exist in the dictionary. - * - * @discussion - * The returned object will be released if removed from the dictionary; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getObject(const OSSymbol * aKey) const; - - - /*! - * @function getObject - * - * @abstract Returns the object stored under a given key. - * - * @param aKey An OSString key identifying the object - * to be returned to caller. - * - * @result - * The object stored under aKey, - * or NULL if the key does not exist in the dictionary. - * - * @discussion - * The returned object will be released if removed from the dictionary; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getObject(const OSString * aKey) const; - - - /*! - * @function getObject - * - * @abstract - * Returns the object stored under a given key. - * - * @param aKey A C string key identifying the object - * to be returned to caller. - * - * @result - * The object stored under aKey, - * or NULL if the key does not exist in the dictionary. - * - * @discussion - * The returned object will be released if removed from the dictionary; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getObject(const char * aKey) const; - - - /*! - * @function isEqualTo - * - * @abstract Tests the equality of two OSDictionary objects - * over a subset of keys. - * - * @param aDictionary The dictionary to be compared against the receiver. - * @param keys An OSArray or OSDictionary containing the keys - * (as @link //apple_ref/cpp/cl/OSString OSStrings@/link or - * @link //apple_ref/cpp/cl/OSSymbol OSSymbols@/link) - * describing the intersection for the comparison. - * - * @result - * true if the intersections - * of the two dictionaries are equal. - * - * @discussion - * Two OSDictionary objects are considered equal by this function - * if both have objects stored for all keys provided, - * and if the objects stored in each under - * a given key compare as equal using - * @link - * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) - * isEqualTo@/link. - */ - virtual bool isEqualTo( - const OSDictionary * aDictionary, - const OSCollection * keys) const; - - - /*! - * @function isEqualTo - * - * @abstract Tests the equality of two OSDictionary objects. - * - * @param aDictionary The dictionary to be compared against the receiver. - * - * @result - * true if the dictionaries are equal, - * false if not. - * - * @discussion - * Two OSDictionary objects are considered equal if they have same count, - * the same keys, and if the objects stored in each under - * a given key compare as equal using - * @link - * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) - * isEqualTo@/link. - */ - virtual bool isEqualTo(const OSDictionary * aDictionary) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSDictionary to an arbitrary object. - * - * @param anObject An object to be compared against the receiver. - * - * @result - * true if the objects are equal. - * - * @discussion - * An OSDictionary is considered equal to another object - * if that object is derived from OSDictionary - * and contains the same or equivalent objects. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setOptions - * - * @abstract - * Recursively sets option bits in the dictionary - * and all child collections. - * - * @param options A bitfield whose values turn the options on (1) or off (0). - * @param mask A mask indicating which bits - * in options to change. - * Pass 0 to get the whole current options bitfield - * without changing any settings. - * @param context Unused. - * - * @result - * The options bitfield as it was before the set operation. - * - * @discussion - * Kernel extensions should not call this function. - * - * Child collections' options are changed only if the receiving dictionary's - * options actually change. - */ - virtual unsigned setOptions( - unsigned options, - unsigned mask, - void * context = 0) APPLE_KEXT_OVERRIDE; - - - /*! - * @function copyCollection - * - * @abstract - * Creates a deep copy of the dictionary - * and its child collections. - * - * @param cycleDict A dictionary of all of the collections - * that have been copied so far, - * which is used to track circular references. - * To start the copy at the top level, - * pass NULL. - * - * @result - * The newly copied dictionary, with a retain count of 1, - * or NULL if there is insufficient memory to do the copy. - * - * @discussion - * The receiving dictionary, and any collections it contains, recursively, - * are copied. - * Objects that are not derived from OSCollection are retained - * rather than copied. - */ - OSCollection * copyCollection(OSDictionary * cycleDict = 0) APPLE_KEXT_OVERRIDE; +/*! + * @function withCapacity + * + * @abstract + * Creates and initializes an empty OSDictionary. + * + * @param capacity The initial storage capacity of the new dictionary object. + * + * @result + * An empty instance of OSDictionary + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * capacity must be nonzero. + * The new dictionary will grow as needed to accommodate more key/object pairs + * (unlike @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + */ + static OSDictionary * withCapacity(unsigned int capacity); + + +/*! + * @function withObjects + * + * @abstract Creates and initializes an OSDictionary + * populated with keys and objects provided. + * + * @param objects A C array of OSMetaClassBase-derived objects. + * @param keys A C array of OSSymbol keys + * for the corresponding objects in objects. + * @param count The number of keys and objects + * to be placed into the dictionary. + * @param capacity The initial storage capacity of the new dictionary object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * An instance of OSDictionary + * containing the key/object pairs provided, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * objects and keys must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new dictionary will grow as needed + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + */ + static OSDictionary * withObjects( + const OSObject * objects[], + const OSSymbol * keys[], + unsigned int count, + unsigned int capacity = 0); + +/*! + * @function withObjects + * + * @abstract + * Creates and initializes an OSDictionary + * populated with keys and objects provided. + * + * @param objects A C array of OSMetaClassBase-derived objects. + * @param keys A C array of OSString keys for the corresponding objects + * in objects. + * @param count The number of keys and objects + * to be placed into the dictionary. + * @param capacity The initial storage capacity of the new dictionary object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * An instance of OSDictionary + * containing the key/object pairs provided, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * objects and keys must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, it must be greater than or equal to count. + * The new dictionary will grow as needed + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + */ + static OSDictionary * withObjects( + const OSObject * objects[], + const OSString * keys[], + unsigned int count, + unsigned int capacity = 0); + + +/*! + * @function withDictionary + * + * @abstract + * Creates and initializes an OSDictionary + * populated with the contents of another dictionary. + * + * @param dict A dictionary whose contents will be stored + * in the new instance. + * @param capacity The initial storage capacity of the new dictionary object. + * If 0, the capacity is set to the number of key/value pairs + * in dict; + * otherwise capacity must be greater than or equal to + * the number of key/value pairs in dict. + * + * @result + * An instance of OSDictionary + * containing the key/value pairs of dict, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * dict must be non-NULL. + * If capacity is nonzero, it must be greater than or equal to count. + * The new dictionary will grow as needed + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + * + * The keys and objects in dict are retained for storage + * in the new OSDictionary, + * not copied. + */ + static OSDictionary * withDictionary( + const OSDictionary * dict, + unsigned int capacity = 0); + + +/*! + * @function initWithCapacity + * + * @abstract + * Initializes a new instance of OSDictionary. + * + * @param capacity The initial storage capacity of the new dictionary object. + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link //apple_ref/cpp/clm/OSDictionary/withCapacity/staticOSDictionary*\/(unsignedint) + * withCapacity@/link + * instead. + * + * capacity must be nonzero. + * The new dictionary will grow as needed + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + */ + virtual bool initWithCapacity(unsigned int capacity); + + +/*! + * @function initWithObjects + * + * @abstract Initializes a new OSDictionary with keys and objects provided. + * + * @param objects A C array of OSMetaClassBase-derived objects. + * @param keys A C array of OSSymbol keys + * for the corresponding objects in objects. + * @param count The number of keys and objects to be placed + * into the dictionary. + * @param capacity The initial storage capacity of the new dictionary object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSDictionary/withObjects/staticOSDictionary*\/(constOSObject*,constOSString*,unsignedint,unsignedint) + * withObjects@/link + * instead. + * + * objects and keys must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new dictionary will grow as neede + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + */ + virtual bool initWithObjects( + const OSObject * objects[], + const OSSymbol * keys[], + unsigned int count, + unsigned int capacity = 0); + + +/*! + * @function initWithObjects + * + * @abstract + * Initializes a new OSDictionary with keys and objects provided. + * + * @param objects A C array of OSMetaClassBase-derived objects. + * @param keys A C array of OSString keys + * for the corresponding objects in objects. + * @param count The number of keys and objects + * to be placed into the dictionary. + * @param capacity The initial storage capacity of the new dictionary object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSDictionary/withObjects/staticOSDictionary*\/(constOSObject*,constOSString*,unsignedint,unsignedint) + * withObjects@/link + * instead. + * + * objects and keys must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, it must be greater than or equal to count. + * The new dictionary will grow as needed + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + */ + virtual bool initWithObjects( + const OSObject * objects[], + const OSString * keys[], + unsigned int count, + unsigned int capacity = 0); + + +/*! + * @function initWithDictionary + * + * @abstract + * Initializes a new OSDictionary + * with the contents of another dictionary. + * + * @param dict A dictionary whose contents will be placed + * in the new instance. + * @param capacity The initial storage capacity of the new dictionary object. + * If 0, the capacity is set to the number of key/value pairs + * in dict; + * otherwise capacity must be greater than or equal to + * the number of key/value pairs in dict. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withDictionary withDictionary@/link instead. + * + * dict must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new dictionary will grow as needed + * to accommodate more key/object pairs + * (unlike + * @link //apple_ref/doc/uid/20001497 CFMutableDictionary@/link, + * for which the initial capacity is a hard limit). + * + * The keys and objects in dict are retained for storage + * in the new OSDictionary, + * not copied. + */ + virtual bool initWithDictionary( + const OSDictionary * dict, + unsigned int capacity = 0); + + +/*! + * @function free + * + * @abstract + * Deallocates or releases any resources + * used by the OSDictionary instance. + * + * @discussion + * This function should not be called directly, + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCount + * + * @abstract + * Returns the current number of key/object pairs + * contained within the dictionary. + * + * @result + * The current number of key/object pairs + * contained within the dictionary. + */ + virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacity + * + * @abstract + * Returns the number of objects the dictionary can store without reallocating. + * + * @result + * The number objects the dictionary can store without reallocating. + * + * @discussion + * OSDictionary objects grow when full + * to accommodate additional key/object pairs. + * See + * @link + * //apple_ref/cpp/instm/OSDictionary/getCapacityIncrement/virtualunsignedint/() + * getCapacityIncrement@/link + * and + * @link + * //apple_ref/cpp/instm/OSDictionary/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link. + */ + virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacityIncrement + * + * @abstract + * Returns the storage increment of the dictionary. + * + * @result + * The storage increment of the dictionary. + * + * @discussion + * An OSDictionary allocates storage for key/object pairs in multiples + * of the capacity increment. + */ + virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setCapacityIncrement + * + * @abstract + * Sets the storage increment of the dictionary. + * + * @result + * The new storage increment of the dictionary, + * which may be different from the number requested. + * + * @discussion + * An OSDictionary allocates storage for key/object pairs in multiples + * of the capacity increment. + * Calling this function does not immediately reallocate storage. + */ + virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; + + +/*! + * @function ensureCapacity + * + * @abstract + * Ensures the dictionary has enough space + * to store the requested number of key/object pairs. + * + * @param newCapacity The total number of key/object pairs the dictionary + * should be able to store. + * + * @result + * The new capacity of the dictionary, + * which may be different from the number requested + * (if smaller, reallocation of storage failed). + * + * @discussion + * This function immediately resizes the dictionary, if necessary, + * to accommodate at least newCapacity key/object pairs. + * If newCapacity is not greater than the current capacity, + * or if an allocation error occurs, the original capacity is returned. + * + * There is no way to reduce the capacity of an OSDictionary. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; + + +/*! + * @function flushCollection + * + * @abstract + * Removes and releases all keys and objects within the dictionary. + * + * @discussion + * The dictionary's capacity (and therefore direct memory consumption) + * is not reduced by this function. + */ + virtual void flushCollection() APPLE_KEXT_OVERRIDE; + + +/*! + * @function setObject + * + * @abstract + * Stores an object in the dictionary under a key. + * + * @param aKey An OSSymbol identifying the object + * placed within the dictionary. + * It is automatically retained. + * @param anObject The object to be stored in the dictionary. + * It is automatically retained. + * + * @result + * true if the addition was successful, + * false otherwise. + * + * @discussion + * An object already stored under aKey is released. + */ + virtual bool setObject( + const OSSymbol * aKey, + const OSMetaClassBase * anObject); + + +/*! + * @function setObject + * + * @abstract Stores an object in the dictionary under a key. + * + * @param aKey An OSString identifying the object + * placed within the dictionary. + * @param anObject The object to be stored in the dictionary. + * It is automatically retained. + * + * @result + * true if the addition was successful, + * false otherwise. + * + * @discussion + * An OSSymbol for aKey is created internally. + * An object already stored under aKey is released. + */ + virtual bool setObject( + const OSString * aKey, + const OSMetaClassBase * anObject); + + +/*! + * @function setObject + * + * @abstract + * Stores an object in the dictionary under a key. + * + * @param aKey A C string identifying the object + * placed within the dictionary. + * @param anObject The object to be stored in the dictionary. + * It is automatically retained. + * + * @result + * true if the addition was successful, + * false otherwise. + * + * @discussion + * An OSSymbol for aKey is created internally. + * An object already stored under aKey is released. + */ + virtual bool setObject( + const char * aKey, + const OSMetaClassBase * anObject); + + +/*! + * @function removeObject + * + * @abstract + * Removes a key/object pair from the dictionary. + * + * @param aKey An OSSymbol identifying the object + * to be removed from the dictionary. + * + * @discussion + * The removed key (not necessarily aKey itself) + * and object are automatically released. + */ + virtual void removeObject(const OSSymbol * aKey); + + +/*! + * @function removeObject + * + * @abstract + * Removes a key/object pair from the dictionary. + * + * @param aKey A OSString identifying the object + * to be removed from the dictionary. + * + * @discussion + * The removed key (not necessarily aKey itself) + * and object are automatically released. + */ + virtual void removeObject(const OSString * aKey); + + +/*! + * @function removeObject + * + * @abstract + * Removes a key/object pair from the dictionary. + * + * @param aKey A C string identifying the object + * to be removed from the dictionary. + * + * @discussion + * The removed key (internally an OSSymbol) + * and object are automatically released. + */ + virtual void removeObject(const char * aKey); + + +/*! + * @function merge + * + * @abstract + * Merges the contents of a dictionary into the receiver. + * + * @param aDictionary The dictionary whose contents + * are to be merged with the receiver. + * @result + * true if the merge succeeds, false otherwise. + * + * @discussion + * If there are keys in aDictionary that match keys + * in the receiving dictionary, + * then the objects in the receiver are replaced + * by those from aDictionary, + * and the replaced objects are released. + */ + virtual bool merge(const OSDictionary * aDictionary); + + +/*! + * @function getObject + * + * @abstract + * Returns the object stored under a given key. + * + * @param aKey An OSSymbol key identifying the object + * to be returned to the caller. + * + * @result + * The object stored under aKey, + * or NULL if the key does not exist in the dictionary. + * + * @discussion + * The returned object will be released if removed from the dictionary; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getObject(const OSSymbol * aKey) const; + + +/*! + * @function getObject + * + * @abstract Returns the object stored under a given key. + * + * @param aKey An OSString key identifying the object + * to be returned to caller. + * + * @result + * The object stored under aKey, + * or NULL if the key does not exist in the dictionary. + * + * @discussion + * The returned object will be released if removed from the dictionary; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getObject(const OSString * aKey) const; + + +/*! + * @function getObject + * + * @abstract + * Returns the object stored under a given key. + * + * @param aKey A C string key identifying the object + * to be returned to caller. + * + * @result + * The object stored under aKey, + * or NULL if the key does not exist in the dictionary. + * + * @discussion + * The returned object will be released if removed from the dictionary; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getObject(const char * aKey) const; + + +/*! + * @function isEqualTo + * + * @abstract Tests the equality of two OSDictionary objects + * over a subset of keys. + * + * @param aDictionary The dictionary to be compared against the receiver. + * @param keys An OSArray or OSDictionary containing the keys + * (as @link //apple_ref/cpp/cl/OSString OSStrings@/link or + * @link //apple_ref/cpp/cl/OSSymbol OSSymbols@/link) + * describing the intersection for the comparison. + * + * @result + * true if the intersections + * of the two dictionaries are equal. + * + * @discussion + * Two OSDictionary objects are considered equal by this function + * if both have objects stored for all keys provided, + * and if the objects stored in each under + * a given key compare as equal using + * @link + * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) + * isEqualTo@/link. + */ + virtual bool isEqualTo( + const OSDictionary * aDictionary, + const OSCollection * keys) const; + + +/*! + * @function isEqualTo + * + * @abstract Tests the equality of two OSDictionary objects. + * + * @param aDictionary The dictionary to be compared against the receiver. + * + * @result + * true if the dictionaries are equal, + * false if not. + * + * @discussion + * Two OSDictionary objects are considered equal if they have same count, + * the same keys, and if the objects stored in each under + * a given key compare as equal using + * @link + * //apple_ref/cpp/instm/OSMetaClassBase/isEqualTo/virtualbool/(constOSMetaClassBase*) + * isEqualTo@/link. + */ + virtual bool isEqualTo(const OSDictionary * aDictionary) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSDictionary to an arbitrary object. + * + * @param anObject An object to be compared against the receiver. + * + * @result + * true if the objects are equal. + * + * @discussion + * An OSDictionary is considered equal to another object + * if that object is derived from OSDictionary + * and contains the same or equivalent objects. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setOptions + * + * @abstract + * Recursively sets option bits in the dictionary + * and all child collections. + * + * @param options A bitfield whose values turn the options on (1) or off (0). + * @param mask A mask indicating which bits + * in options to change. + * Pass 0 to get the whole current options bitfield + * without changing any settings. + * @param context Unused. + * + * @result + * The options bitfield as it was before the set operation. + * + * @discussion + * Kernel extensions should not call this function. + * + * Child collections' options are changed only if the receiving dictionary's + * options actually change. + */ + virtual unsigned setOptions( + unsigned options, + unsigned mask, + void * context = 0) APPLE_KEXT_OVERRIDE; + + +/*! + * @function copyCollection + * + * @abstract + * Creates a deep copy of the dictionary + * and its child collections. + * + * @param cycleDict A dictionary of all of the collections + * that have been copied so far, + * which is used to track circular references. + * To start the copy at the top level, + * pass NULL. + * + * @result + * The newly copied dictionary, with a retain count of 1, + * or NULL if there is insufficient memory to do the copy. + * + * @discussion + * The receiving dictionary, and any collections it contains, recursively, + * are copied. + * Objects that are not derived from OSCollection are retained + * rather than copied. + */ + OSCollection * copyCollection(OSDictionary * cycleDict = 0) APPLE_KEXT_OVERRIDE; #if XNU_KERNEL_PRIVATE - bool setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject, bool onlyAdd); - OSArray * copyKeys(void); + bool setObject(const OSSymbol *aKey, const OSMetaClassBase *anObject, bool onlyAdd); + OSArray * copyKeys(void); + void sortBySymbol(void); #endif /* XNU_KERNEL_PRIVATE */ - /*! - * @function iterateObjects - * - * @abstract - * Invoke a callback for each member of the collection. - * - * @param refcon A reference constant for the callback. - * @param callback The callback function, - * called with the refcon and each member key & object - * of the dictionary in turn, on the callers thread. - * The callback should return true to early terminate - * the iteration, false otherwise. - * - * @result - * False if the dictionary iteration was made invalid - * (see OSCollectionIterator::isValid()) otherwise true. - */ - bool iterateObjects(void * refcon, bool (*callback)(void * refcon, const OSSymbol * key, OSObject * object)); +/*! + * @function iterateObjects + * + * @abstract + * Invoke a callback for each member of the collection. + * + * @param refcon A reference constant for the callback. + * @param callback The callback function, + * called with the refcon and each member key & object + * of the dictionary in turn, on the callers thread. + * The callback should return true to early terminate + * the iteration, false otherwise. + * + * @result + * False if the dictionary iteration was made invalid + * (see OSCollectionIterator::isValid()) otherwise true. + */ + bool iterateObjects(void * refcon, bool (*callback)(void * refcon, const OSSymbol * key, OSObject * object)); #ifdef __BLOCKS__ - /*! - * @function iterateObjects - * - * @abstract - * Invoke a block for each member of the collection. - * - * @param block The block, - * called with the refcon and each member key & object - * of the dictionary in turn, on the callers thread. - * The callback should return true to early terminate - * the iteration, false otherwise. - * - * @result - * False if the dictionary iteration was made invalid - * (see OSCollectionIterator::isValid()) otherwise true. - */ - bool iterateObjects(bool (^block)(const OSSymbol * key, OSObject * object)); +/*! + * @function iterateObjects + * + * @abstract + * Invoke a block for each member of the collection. + * + * @param block The block, + * called with the refcon and each member key & object + * of the dictionary in turn, on the callers thread. + * The callback should return true to early terminate + * the iteration, false otherwise. + * + * @result + * False if the dictionary iteration was made invalid + * (see OSCollectionIterator::isValid()) otherwise true. + */ + bool iterateObjects(bool (^block)(const OSSymbol * key, OSObject * object)); #endif /* __BLOCKS__ */ - OSMetaClassDeclareReservedUnused(OSDictionary, 0); - OSMetaClassDeclareReservedUnused(OSDictionary, 1); - OSMetaClassDeclareReservedUnused(OSDictionary, 2); - OSMetaClassDeclareReservedUnused(OSDictionary, 3); - OSMetaClassDeclareReservedUnused(OSDictionary, 4); - OSMetaClassDeclareReservedUnused(OSDictionary, 5); - OSMetaClassDeclareReservedUnused(OSDictionary, 6); - OSMetaClassDeclareReservedUnused(OSDictionary, 7); + OSMetaClassDeclareReservedUnused(OSDictionary, 0); + OSMetaClassDeclareReservedUnused(OSDictionary, 1); + OSMetaClassDeclareReservedUnused(OSDictionary, 2); + OSMetaClassDeclareReservedUnused(OSDictionary, 3); + OSMetaClassDeclareReservedUnused(OSDictionary, 4); + OSMetaClassDeclareReservedUnused(OSDictionary, 5); + OSMetaClassDeclareReservedUnused(OSDictionary, 6); + OSMetaClassDeclareReservedUnused(OSDictionary, 7); }; #endif /* !_IOKIT_IODICTIONARY_H */ diff --git a/libkern/libkern/c++/OSEndianTypes.h b/libkern/libkern/c++/OSEndianTypes.h index 9bde256fa..487eb3ec7 100644 --- a/libkern/libkern/c++/OSEndianTypes.h +++ b/libkern/libkern/c++/OSEndianTypes.h @@ -2,7 +2,7 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * * HISTORY @@ -143,9 +143,9 @@ private: \ \ void writeValue(Value v) { \ if (__builtin_constant_p(v)) \ - mValue = OSSwapHostTo ## argend ## ConstInt ## argsize(v); \ + mValue = OSSwapHostTo ## argend ## ConstInt ## argsize(v); \ else \ - OSWrite ## argend ## Int ## argsize(&mValue, 0, (UValue) v); \ + OSWrite ## argend ## Int ## argsize(&mValue, 0, (UValue) v); \ }; \ \ Value readValue() const { \ @@ -162,12 +162,12 @@ public: \ operator Value () const { return readValue(); }; \ } -class BigUInt16 __OSEndianSignIntSizeDEF(BigUInt16, Big, UInt, 16); -class BigSInt16 __OSEndianSignIntSizeDEF(BigSInt16, Big, SInt, 16); -class BigUInt32 __OSEndianSignIntSizeDEF(BigUInt32, Big, UInt, 32); -class BigSInt32 __OSEndianSignIntSizeDEF(BigSInt32, Big, SInt, 32); -class BigUInt64 __OSEndianSignIntSizeDEF(BigUInt64, Big, UInt, 64); -class BigSInt64 __OSEndianSignIntSizeDEF(BigSInt64, Big, SInt, 64); +class BigUInt16 __OSEndianSignIntSizeDEF(BigUInt16, Big, UInt, 16); +class BigSInt16 __OSEndianSignIntSizeDEF(BigSInt16, Big, SInt, 16); +class BigUInt32 __OSEndianSignIntSizeDEF(BigUInt32, Big, UInt, 32); +class BigSInt32 __OSEndianSignIntSizeDEF(BigSInt32, Big, SInt, 32); +class BigUInt64 __OSEndianSignIntSizeDEF(BigUInt64, Big, UInt, 64); +class BigSInt64 __OSEndianSignIntSizeDEF(BigSInt64, Big, SInt, 64); class LittleUInt16 __OSEndianSignIntSizeDEF(LittleUInt16, Little, UInt, 16); class LittleSInt16 __OSEndianSignIntSizeDEF(LittleSInt16, Little, SInt, 16); class LittleUInt32 __OSEndianSignIntSizeDEF(LittleUInt32, Little, UInt, 32); @@ -178,10 +178,7 @@ class LittleSInt64 __OSEndianSignIntSizeDEF(LittleSInt64, Little, SInt, 64); #undef __OSEndianSignIntSizeDEF #endif /* __cplusplus - */ + */ #endif /* ! _OS_OSENDIANHELPER_H - */ - - - + */ diff --git a/libkern/libkern/c++/OSIterator.h b/libkern/libkern/c++/OSIterator.h index 6cbb18905..f23dd782c 100644 --- a/libkern/libkern/c++/OSIterator.h +++ b/libkern/libkern/c++/OSIterator.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Copyright (c) 1998-1999 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-1999 Apple Computer, Inc. All rights reserved. * * HISTORY * @@ -43,8 +43,8 @@ * @abstract * This header declares the OSIterator collection class. */ - - + + /*! * @class OSIterator * @abstract @@ -60,77 +60,77 @@ * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSIterator provides no concurrency protection. */ class OSIterator : public OSObject { - OSDeclareAbstractStructors(OSIterator) + OSDeclareAbstractStructors(OSIterator) public: - /*! - * @function reset - * - * @abstract - * Resets the iterator to the beginning of the collection, - * as if it had just been created. - * - * @discussion - * Subclasses must implement this pure virtual member function. - */ - virtual void reset() = 0; +/*! + * @function reset + * + * @abstract + * Resets the iterator to the beginning of the collection, + * as if it had just been created. + * + * @discussion + * Subclasses must implement this pure virtual member function. + */ + virtual void reset() = 0; - /*! - * @function isValid - * - * @abstract - * Check that the collection hasn't been modified during iteration. - * - * @result - * true if the iterator is valid for continued use, - * false otherwise - * (typically because the collection being iterated has been modified). - * - * @discussion - * Subclasses must implement this pure virtual member function. - */ - virtual bool isValid() = 0; +/*! + * @function isValid + * + * @abstract + * Check that the collection hasn't been modified during iteration. + * + * @result + * true if the iterator is valid for continued use, + * false otherwise + * (typically because the collection being iterated has been modified). + * + * @discussion + * Subclasses must implement this pure virtual member function. + */ + virtual bool isValid() = 0; - /*! - * @function getNextObject - * - * @abstract - * Advances to and returns the next object in the iteration. - * - * @return - * The next object in the iteration context, - * NULL if there is no next object - * or if the iterator is no longer valid. - * - * @discussion - * The returned object will be released if removed from the collection; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - * - * Subclasses must implement this pure virtual function - * to check for validity with - * @link isValid isValid@/link, - * and then to advance the iteration context to the next object (if any) - * and return that next object, or NULL if there is none. - */ - virtual OSObject *getNextObject() = 0; +/*! + * @function getNextObject + * + * @abstract + * Advances to and returns the next object in the iteration. + * + * @return + * The next object in the iteration context, + * NULL if there is no next object + * or if the iterator is no longer valid. + * + * @discussion + * The returned object will be released if removed from the collection; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + * + * Subclasses must implement this pure virtual function + * to check for validity with + * @link isValid isValid@/link, + * and then to advance the iteration context to the next object (if any) + * and return that next object, or NULL if there is none. + */ + virtual OSObject *getNextObject() = 0; - OSMetaClassDeclareReservedUnused(OSIterator, 0); - OSMetaClassDeclareReservedUnused(OSIterator, 1); - OSMetaClassDeclareReservedUnused(OSIterator, 2); - OSMetaClassDeclareReservedUnused(OSIterator, 3); + OSMetaClassDeclareReservedUnused(OSIterator, 0); + OSMetaClassDeclareReservedUnused(OSIterator, 1); + OSMetaClassDeclareReservedUnused(OSIterator, 2); + OSMetaClassDeclareReservedUnused(OSIterator, 3); }; #endif /* ! _OS_OSITERATOR_H */ diff --git a/libkern/libkern/c++/OSKext.h b/libkern/libkern/c++/OSKext.h index 2abc2929c..898217069 100644 --- a/libkern/libkern/c++/OSKext.h +++ b/libkern/libkern/c++/OSKext.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -57,11 +57,11 @@ void osdata_phys_free(void * ptr, unsigned int length); void osdata_vm_deallocate(void * ptr, unsigned int length); void osdata_kext_free(void * ptr, unsigned int length); void kxld_log_callback( - KXLDLogSubsystem subsystem, - KXLDLogLevel level, - const char * format, - va_list argList, - void * user_data); + KXLDLogSubsystem subsystem, + KXLDLogLevel level, + const char * format, + va_list argList, + void * user_data); }; #endif /* XNU_KERNEL_PRIVATE */ @@ -71,35 +71,34 @@ void kxld_log_callback( class OSKext; extern "C" { - void OSKextLog( - OSKext * aKext, - OSKextLogSpec msgLogSpec, - const char * format, ...) - __attribute__((format(printf, 3, 4))); + OSKext * aKext, + OSKextLogSpec msgLogSpec, + const char * format, ...) +__attribute__((format(printf, 3, 4))); void OSKextVLog( - OSKext * aKext, - OSKextLogSpec msgLogSpec, - const char * format, - va_list srcArgList); + OSKext * aKext, + OSKextLogSpec msgLogSpec, + const char * format, + va_list srcArgList); #ifdef XNU_KERNEL_PRIVATE void OSKextRemoveKextBootstrap(void); kern_return_t OSRuntimeInitializeCPP( - OSKext * kext); + OSKext * kext); kern_return_t OSRuntimeFinalizeCPP( - OSKext * kext); + OSKext * kext); void OSRuntimeUnloadCPPForSegment( - kernel_segment_command_t * segment); + kernel_segment_command_t * segment); kern_return_t is_io_catalog_send_data( - mach_port_t masterPort, - uint32_t flag, - io_buf_ptr_t inData, - mach_msg_type_number_t inDataCount, - kern_return_t * result); + mach_port_t masterPort, + uint32_t flag, + io_buf_ptr_t inData, + mach_msg_type_number_t inDataCount, + kern_return_t * result); void kmod_dump_log(vm_offset_t*, unsigned int, boolean_t); void *OSKextKextForAddress(const void *addr); @@ -113,38 +112,36 @@ void *OSKextKextForAddress(const void *addr); #endif struct list_head { - struct list_head *prev; - struct list_head *next; + struct list_head *prev; + struct list_head *next; }; struct OSKextGrabPgoStruct { - bool metadata; - uint64_t *pSize; - char *pBuffer; - uint64_t bufferSize; - int err; - struct list_head list_head; + bool metadata; + uint64_t *pSize; + char *pBuffer; + uint64_t bufferSize; + int err; + struct list_head list_head; }; #ifndef container_of -#define container_of(ptr,type,member) ((type*)(((uintptr_t)ptr) - offsetof(type, member))) +#define container_of(ptr, type, member) ((type*)(((uintptr_t)ptr) - offsetof(type, member))) #endif /********************************************************************/ #if XNU_KERNEL_PRIVATE -struct OSKextAccount -{ - vm_allocation_site_t site; - uint32_t loadTag; - OSKext * kext; +struct OSKextAccount { + vm_allocation_site_t site; + uint32_t loadTag; + OSKext * kext; }; -struct OSKextActiveAccount -{ - uintptr_t address; - uintptr_t address_end; - OSKextAccount * account; +struct OSKextActiveAccount { + uintptr_t address; + uintptr_t address_end; + OSKextAccount * account; }; typedef struct OSKextActiveAccount OSKextActiveAccount; @@ -156,144 +153,144 @@ typedef struct OSKextActiveAccount OSKextActiveAccount; /********************************************************************/ class OSKext : public OSObject { - OSDeclareDefaultStructors(OSKext) + OSDeclareDefaultStructors(OSKext) #if PRAGMA_MARK /**************************************/ #pragma mark Friend Declarations /**************************************/ #endif - friend class IOCatalogue; - friend class KLDBootstrap; - friend class OSMetaClass; + friend class IOCatalogue; + friend class KLDBootstrap; + friend class OSMetaClass; - friend int OSKextGrabPgoData(uuid_t uuid, - uint64_t *pSize, - char *pBuffer, - uint64_t bufferSize, - int wait_for_unload, - int metadata); + friend int OSKextGrabPgoData(uuid_t uuid, + uint64_t *pSize, + char *pBuffer, + uint64_t bufferSize, + int wait_for_unload, + int metadata); #ifdef XNU_KERNEL_PRIVATE - friend void OSKextVLog( - OSKext * aKext, - OSKextLogSpec msgLogSpec, - const char * format, - va_list srcArgList); - - friend void OSKextRemoveKextBootstrap(void); - friend OSReturn OSKextUnloadKextWithLoadTag(uint32_t); - - friend kern_return_t kext_request( - host_priv_t hostPriv, - /* in only */ uint32_t clientLogSpec, - /* in only */ vm_offset_t requestIn, - /* in only */ mach_msg_type_number_t requestLengthIn, - /* out only */ vm_offset_t * responseOut, - /* out only */ mach_msg_type_number_t * responseLengthOut, - /* out only */ vm_offset_t * logDataOut, - /* out only */ mach_msg_type_number_t * logDataLengthOut, - /* out only */ kern_return_t * op_result); - - friend kxld_addr_t kern_allocate( - u_long size, - KXLDAllocateFlags * flags, - void * user_data); - - friend void kxld_log_shim( - KXLDLogSubsystem subsystem, - KXLDLogLevel level, - const char * format, - va_list argList, - void * user_data); - - friend void _OSKextConsiderUnloads( - __unused thread_call_param_t p0, - __unused thread_call_param_t p1); - - friend kern_return_t OSRuntimeInitializeCPP( - OSKext * kext); - friend kern_return_t OSRuntimeFinalizeCPP( - OSKext * kext); + friend void OSKextVLog( + OSKext * aKext, + OSKextLogSpec msgLogSpec, + const char * format, + va_list srcArgList); + + friend void OSKextRemoveKextBootstrap(void); + friend OSReturn OSKextUnloadKextWithLoadTag(uint32_t); + + friend kern_return_t kext_request( + host_priv_t hostPriv, + /* in only */ uint32_t clientLogSpec, + /* in only */ vm_offset_t requestIn, + /* in only */ mach_msg_type_number_t requestLengthIn, + /* out only */ vm_offset_t * responseOut, + /* out only */ mach_msg_type_number_t * responseLengthOut, + /* out only */ vm_offset_t * logDataOut, + /* out only */ mach_msg_type_number_t * logDataLengthOut, + /* out only */ kern_return_t * op_result); + + friend kxld_addr_t kern_allocate( + u_long size, + KXLDAllocateFlags * flags, + void * user_data); + + friend void kxld_log_shim( + KXLDLogSubsystem subsystem, + KXLDLogLevel level, + const char * format, + va_list argList, + void * user_data); + + friend void _OSKextConsiderUnloads( + __unused thread_call_param_t p0, + __unused thread_call_param_t p1); + + friend kern_return_t OSRuntimeInitializeCPP( + OSKext * kext); + friend kern_return_t OSRuntimeFinalizeCPP( + OSKext * kext); friend void OSRuntimeUnloadCPPForSegment( - kernel_segment_command_t * segment); + kernel_segment_command_t * segment); - friend kern_return_t is_io_catalog_send_data( - mach_port_t masterPort, - uint32_t flag, - io_buf_ptr_t inData, - mach_msg_type_number_t inDataCount, - kern_return_t * result); + friend kern_return_t is_io_catalog_send_data( + mach_port_t masterPort, + uint32_t flag, + io_buf_ptr_t inData, + mach_msg_type_number_t inDataCount, + kern_return_t * result); - friend void kmod_panic_dump(vm_offset_t*, unsigned int); - friend void kmod_dump_log(vm_offset_t*, unsigned int, boolean_t); - friend void kext_dump_panic_lists(int (*printf_func)(const char * fmt, ...)); - friend void *OSKextKextForAddress(const void *addr); + friend void kmod_panic_dump(vm_offset_t*, unsigned int); + friend void kmod_dump_log(vm_offset_t*, unsigned int, boolean_t); + friend void kext_dump_panic_lists(int (*printf_func)(const char * fmt, ...)); + friend void *OSKextKextForAddress(const void *addr); #endif /* XNU_KERNEL_PRIVATE */ private: - /************************* - * Instance variables - *************************/ - OSDictionary * infoDict; - - const OSSymbol * bundleID; - OSString * path; // not necessarily correct :-/ - OSString * executableRelPath; // relative to bundle - - OSKextVersion version; // parsed - OSKextVersion compatibleVersion; // parsed - - /* These fields are required for tracking loaded kexts and - * will always have values for a loaded kext. - */ - OSKextLoadTag loadTag; // 'id' from old kmod_info; - // kOSKextInvalidLoadTag invalid - kmod_info_t * kmod_info; // address into linkedExec./alloced for interface - - OSArray * dependencies; // kernel resource does not have any; - // links directly to kernel - - /* Only real kexts have these; interface kexts do not. - */ - OSData * linkedExecutable; - OSSet * metaClasses; // for C++/OSMetaClass kexts - - /* Only interface kexts have these; non-interface kexts can get at them - * in the linked Executable. - */ - OSData * interfaceUUID; - - struct { - unsigned int loggingEnabled:1; - - unsigned int hasAllDependencies:1; - unsigned int hasBleedthrough:1; - - unsigned int interface:1; - unsigned int kernelComponent:1; - unsigned int prelinked:1; - unsigned int builtin:1; - unsigned int loaded:1; - unsigned int dtraceInitialized:1; - unsigned int starting:1; - unsigned int started:1; - unsigned int stopping:1; - unsigned int unloading:1; - - unsigned int autounloadEnabled:1; - unsigned int delayAutounload:1; // for development - - unsigned int CPPInitialized:1; - unsigned int jettisonLinkeditSeg:1; - } flags; - - struct list_head pendingPgoHead; - uuid_t instance_uuid; - OSKextAccount * account; - uint32_t builtinKmodIdx; +/************************* +* Instance variables +*************************/ + OSDictionary * infoDict; + + const OSSymbol * bundleID; + OSString * path; // not necessarily correct :-/ + OSString * executableRelPath;// relative to bundle + + OSKextVersion version; // parsed + OSKextVersion compatibleVersion;// parsed + +/* These fields are required for tracking loaded kexts and + * will always have values for a loaded kext. + */ + OSKextLoadTag loadTag; // 'id' from old kmod_info; + // kOSKextInvalidLoadTag invalid + kmod_info_t * kmod_info; // address into linkedExec./alloced for interface + + OSArray * dependencies; // kernel resource does not have any; + // links directly to kernel + +/* Only real kexts have these; interface kexts do not. + */ + OSData * linkedExecutable; + OSSet * metaClasses; // for C++/OSMetaClass kexts + +/* Only interface kexts have these; non-interface kexts can get at them + * in the linked Executable. + */ + OSData * interfaceUUID; + + struct { + unsigned int loggingEnabled:1; + + unsigned int hasAllDependencies:1; + unsigned int hasBleedthrough:1; + + unsigned int interface:1; + unsigned int kernelComponent:1; + unsigned int prelinked:1; + unsigned int builtin:1; + unsigned int loaded:1; + unsigned int dtraceInitialized:1; + unsigned int starting:1; + unsigned int started:1; + unsigned int stopping:1; + unsigned int unloading:1; + + unsigned int autounloadEnabled:1; + unsigned int delayAutounload:1; // for development + + unsigned int CPPInitialized:1; + unsigned int jettisonLinkeditSeg:1; + } flags; + + struct list_head pendingPgoHead; + uuid_t instance_uuid; + OSKextAccount * account; + uint32_t builtinKmodIdx; #if PRAGMA_MARK /**************************************/ @@ -302,280 +299,280 @@ private: #endif #ifdef XNU_KERNEL_PRIVATE - /* Startup/shutdown phases. - */ +/* Startup/shutdown phases. + */ public: - static void initialize(void); - static OSDictionary * copyKexts(void); - static OSReturn removeKextBootstrap(void); - static void willShutdown(void); // called by IOPMrootDomain on shutdown - static void reportOSMetaClassInstances( - const char * kextIdentifier, - OSKextLogSpec msgLogSpec); + static void initialize(void); + static OSDictionary * copyKexts(void); + static OSReturn removeKextBootstrap(void); + static void willShutdown(void);// called by IOPMrootDomain on shutdown + static void reportOSMetaClassInstances( + const char * kextIdentifier, + OSKextLogSpec msgLogSpec); #endif /* XNU_KERNEL_PRIVATE */ private: - /* Called by power management at sleep/shutdown. - */ - static bool setLoadEnabled(bool flag); - static bool setUnloadEnabled(bool flag); - static bool setAutounloadsEnabled(bool flag); - static bool setKernelRequestsEnabled(bool flag); - - // all getters subject to race condition, caller beware - static bool getLoadEnabled(void); - static bool getUnloadEnabled(void); - static bool getAutounloadEnabled(void); - static bool getKernelRequestsEnabled(void); - - /* Instance life cycle. - */ - static OSKext * withBooterData( - OSString * deviceTreeName, - OSData * booterData); - virtual bool initWithBooterData( - OSString * deviceTreeName, - OSData * booterData); - - static OSKext * withPrelinkedInfoDict( - OSDictionary * infoDict, - bool doCoalesedSlides); - virtual bool initWithPrelinkedInfoDict( - OSDictionary * infoDict, - bool doCoalesedSlides); - - static void setAllVMAttributes(void); - - static OSKext * withMkext2Info( - OSDictionary * anInfoDict, - OSData * mkextData); - virtual bool initWithMkext2Info( - OSDictionary * anInfoDict, - OSData * mkextData); - - virtual bool setInfoDictionaryAndPath( - OSDictionary * aDictionary, - OSString * aPath); - virtual bool setExecutable( - OSData * anExecutable, - OSData * externalData = NULL, - bool externalDataIsMkext = false); - virtual bool registerIdentifier(void); - - virtual void free(void) APPLE_KEXT_OVERRIDE; - - static OSReturn removeKext( - OSKext * aKext, - bool terminateServicesAndRemovePersonalitiesFlag = false); - - virtual bool isInExcludeList(void); - - /* Mkexts. - */ - static OSReturn readMkextArchive( - OSData * mkextData, - uint32_t * checksumPtr = NULL); - static OSReturn readMkext2Archive( - OSData * mkextData, - OSDictionary ** mkextPlistOut, - uint32_t * checksumPtr = NULL); - virtual OSData * createMkext2FileEntry( - OSData * mkextData, - OSNumber * offsetNum, - const char * entryName); - virtual OSData * extractMkext2FileData( - UInt8 * data, - const char * name, - uint32_t compressedSize, - uint32_t fullSize); - - /* Dependencies. - */ - virtual bool resolveDependencies( - OSArray * loopStack = NULL); // priv/prot - virtual bool addBleedthroughDependencies(OSArray * anArray); - virtual bool flushDependencies(bool forceFlag = false); // priv/prot - virtual uint32_t getNumDependencies(void); - virtual OSArray * getDependencies(void); - - /* User-space requests (load/generic). - */ - static OSReturn loadFromMkext( - OSKextLogSpec clientLogSpec, - char * mkextBuffer, - uint32_t mkextBufferLength, - char ** logInfoOut, - uint32_t * logInfoLengthOut); - static OSReturn handleRequest( - host_priv_t hostPriv, - OSKextLogSpec clientLogSpec, - char * requestBuffer, - uint32_t requestLength, - char ** responseOut, - uint32_t * responseLengthOut, - char ** logInfoOut, - uint32_t * logInfoLengthOut); - static OSReturn serializeLogInfo( - OSArray * logInfoArray, - char ** logInfoOut, - uint32_t * logInfoLengthOut); - - /* Loading. - */ - virtual OSReturn load( - OSKextExcludeLevel startOpt = kOSKextExcludeNone, - OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, - OSArray * personalityNames = NULL); // priv/prot - virtual OSReturn unload(void); - virtual OSReturn queueKextNotification( - const char * notificationName, - OSString * kextIdentifier); - - static void recordIdentifierRequest( - OSString * kextIdentifier); - - virtual OSReturn slidePrelinkedExecutable(bool doCoalesedSlides); - virtual OSReturn loadExecutable(void); - virtual void jettisonLinkeditSegment(void); - virtual void jettisonDATASegmentPadding(void); - static void considerDestroyingLinkContext(void); - virtual OSData * getExecutable(void); - virtual void setLinkedExecutable(OSData * anExecutable); - +/* Called by power management at sleep/shutdown. + */ + static bool setLoadEnabled(bool flag); + static bool setUnloadEnabled(bool flag); + static bool setAutounloadsEnabled(bool flag); + static bool setKernelRequestsEnabled(bool flag); + +// all getters subject to race condition, caller beware + static bool getLoadEnabled(void); + static bool getUnloadEnabled(void); + static bool getAutounloadEnabled(void); + static bool getKernelRequestsEnabled(void); + +/* Instance life cycle. + */ + static OSKext * withBooterData( + OSString * deviceTreeName, + OSData * booterData); + virtual bool initWithBooterData( + OSString * deviceTreeName, + OSData * booterData); + + static OSKext * withPrelinkedInfoDict( + OSDictionary * infoDict, + bool doCoalesedSlides); + virtual bool initWithPrelinkedInfoDict( + OSDictionary * infoDict, + bool doCoalesedSlides); + + static void setAllVMAttributes(void); + + static OSKext * withMkext2Info( + OSDictionary * anInfoDict, + OSData * mkextData); + virtual bool initWithMkext2Info( + OSDictionary * anInfoDict, + OSData * mkextData); + + virtual bool setInfoDictionaryAndPath( + OSDictionary * aDictionary, + OSString * aPath); + virtual bool setExecutable( + OSData * anExecutable, + OSData * externalData = NULL, + bool externalDataIsMkext = false); + virtual bool registerIdentifier(void); + + virtual void free(void) APPLE_KEXT_OVERRIDE; + + static OSReturn removeKext( + OSKext * aKext, + bool terminateServicesAndRemovePersonalitiesFlag = false); + + virtual bool isInExcludeList(void); + +/* Mkexts. + */ + static OSReturn readMkextArchive( + OSData * mkextData, + uint32_t * checksumPtr = NULL); + static OSReturn readMkext2Archive( + OSData * mkextData, + OSDictionary ** mkextPlistOut, + uint32_t * checksumPtr = NULL); + virtual OSData * createMkext2FileEntry( + OSData * mkextData, + OSNumber * offsetNum, + const char * entryName); + virtual OSData * extractMkext2FileData( + UInt8 * data, + const char * name, + uint32_t compressedSize, + uint32_t fullSize); + +/* Dependencies. + */ + virtual bool resolveDependencies( + OSArray * loopStack = NULL); // priv/prot + virtual bool addBleedthroughDependencies(OSArray * anArray); + virtual bool flushDependencies(bool forceFlag = false); // priv/prot + virtual uint32_t getNumDependencies(void); + virtual OSArray * getDependencies(void); + +/* User-space requests (load/generic). + */ + static OSReturn loadFromMkext( + OSKextLogSpec clientLogSpec, + char * mkextBuffer, + uint32_t mkextBufferLength, + char ** logInfoOut, + uint32_t * logInfoLengthOut); + static OSReturn handleRequest( + host_priv_t hostPriv, + OSKextLogSpec clientLogSpec, + char * requestBuffer, + uint32_t requestLength, + char ** responseOut, + uint32_t * responseLengthOut, + char ** logInfoOut, + uint32_t * logInfoLengthOut); + static OSReturn serializeLogInfo( + OSArray * logInfoArray, + char ** logInfoOut, + uint32_t * logInfoLengthOut); + +/* Loading. + */ + virtual OSReturn load( + OSKextExcludeLevel startOpt = kOSKextExcludeNone, + OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, + OSArray * personalityNames = NULL);// priv/prot + virtual OSReturn unload(void); + virtual OSReturn queueKextNotification( + const char * notificationName, + OSString * kextIdentifier); + + static void recordIdentifierRequest( + OSString * kextIdentifier); + + virtual OSReturn slidePrelinkedExecutable(bool doCoalesedSlides); + virtual OSReturn loadExecutable(void); + virtual void jettisonLinkeditSegment(void); + virtual void jettisonDATASegmentPadding(void); + static void considerDestroyingLinkContext(void); + virtual OSData * getExecutable(void); + virtual void setLinkedExecutable(OSData * anExecutable); + #if CONFIG_DTRACE - friend void OSKextRegisterKextsWithDTrace(void); - static void registerKextsWithDTrace(void); - virtual void registerWithDTrace(void); - virtual void unregisterWithDTrace(void); + friend void OSKextRegisterKextsWithDTrace(void); + static void registerKextsWithDTrace(void); + virtual void registerWithDTrace(void); + virtual void unregisterWithDTrace(void); #endif /* CONFIG_DTRACE */ - virtual OSReturn start(bool startDependenciesFlag = true); - virtual OSReturn stop(void); - virtual OSReturn setVMAttributes(bool protect, bool wire); - virtual boolean_t segmentShouldBeWired(kernel_segment_command_t *seg); - virtual OSReturn validateKextMapping(bool startFlag); - virtual boolean_t verifySegmentMapping(kernel_segment_command_t *seg); - - static OSArray * copyAllKextPersonalities( - bool filterSafeBootFlag = false); - - static void setPrelinkedPersonalities(OSArray * personalitiesArray); - - static void sendAllKextPersonalitiesToCatalog( - bool startMatching = false); - virtual OSReturn sendPersonalitiesToCatalog( - bool startMatching = false, - OSArray * personalityNames = NULL); - - static bool canUnloadKextWithIdentifier( - OSString * kextIdentifier, - bool checkClassesFlag = true); - - static OSReturn autounloadKext(OSKext * aKext); - - /* Sync with user space. - */ - static OSReturn pingKextd(void); - - /* Getting info about loaded kexts (kextstat). - */ - static OSDictionary * copyLoadedKextInfo( - OSArray * kextIdentifiers = NULL, - OSArray * keys = NULL); - static OSDictionary * copyLoadedKextInfoByUUID( - OSArray * kextIdentifiers = NULL, - OSArray * keys = NULL); - static OSData * copyKextUUIDForAddress(OSNumber *address = NULL); - virtual OSDictionary * copyInfo(OSArray * keys = NULL); - - /* Logging to user space. - */ - static OSKextLogSpec setUserSpaceLogFilter( - OSKextLogSpec userLogSpec, - bool captureFlag = false); - static OSArray * clearUserSpaceLogFilter(void); - static OSKextLogSpec getUserSpaceLogFilter(void); - - /* OSMetaClasses defined by kext. - */ - virtual OSReturn addClass( - OSMetaClass * aClass, - uint32_t numClasses); - virtual OSReturn removeClass( - OSMetaClass * aClass); - virtual bool hasOSMetaClassInstances(void); - virtual OSSet * getMetaClasses(void); - - virtual void reportOSMetaClassInstances( - OSKextLogSpec msgLogSpec); - - /* Resource requests and other callback stuff. - */ - static OSReturn dispatchResource(OSDictionary * requestDict); - - static OSReturn dequeueCallbackForRequestTag( - OSKextRequestTag requestTag, - OSDictionary ** callbackRecordOut); - static OSReturn dequeueCallbackForRequestTag( - OSNumber * requestTagNum, - OSDictionary ** callbackRecordOut); - static void invokeRequestCallback( - OSDictionary * callbackRecord, - OSReturn requestResult); - virtual void invokeOrCancelRequestCallbacks( - OSReturn callbackResult, - bool invokeFlag = true); - virtual uint32_t countRequestCallbacks(void); - - /* panic() support. - */ + virtual OSReturn start(bool startDependenciesFlag = true); + virtual OSReturn stop(void); + virtual OSReturn setVMAttributes(bool protect, bool wire); + virtual boolean_t segmentShouldBeWired(kernel_segment_command_t *seg); + virtual OSReturn validateKextMapping(bool startFlag); + virtual boolean_t verifySegmentMapping(kernel_segment_command_t *seg); + + static OSArray * copyAllKextPersonalities( + bool filterSafeBootFlag = false); + + static void setPrelinkedPersonalities(OSArray * personalitiesArray); + + static void sendAllKextPersonalitiesToCatalog( + bool startMatching = false); + virtual OSReturn sendPersonalitiesToCatalog( + bool startMatching = false, + OSArray * personalityNames = NULL); + + static bool canUnloadKextWithIdentifier( + OSString * kextIdentifier, + bool checkClassesFlag = true); + + static OSReturn autounloadKext(OSKext * aKext); + +/* Sync with user space. + */ + static OSReturn pingKextd(void); + +/* Getting info about loaded kexts (kextstat). + */ + static OSDictionary * copyLoadedKextInfo( + OSArray * kextIdentifiers = NULL, + OSArray * keys = NULL); + static OSDictionary * copyLoadedKextInfoByUUID( + OSArray * kextIdentifiers = NULL, + OSArray * keys = NULL); + static OSData * copyKextUUIDForAddress(OSNumber *address = NULL); + virtual OSDictionary * copyInfo(OSArray * keys = NULL); + +/* Logging to user space. + */ + static OSKextLogSpec setUserSpaceLogFilter( + OSKextLogSpec userLogSpec, + bool captureFlag = false); + static OSArray * clearUserSpaceLogFilter(void); + static OSKextLogSpec getUserSpaceLogFilter(void); + +/* OSMetaClasses defined by kext. + */ + virtual OSReturn addClass( + OSMetaClass * aClass, + uint32_t numClasses); + virtual OSReturn removeClass( + OSMetaClass * aClass); + virtual bool hasOSMetaClassInstances(void); + virtual OSSet * getMetaClasses(void); + + virtual void reportOSMetaClassInstances( + OSKextLogSpec msgLogSpec); + +/* Resource requests and other callback stuff. + */ + static OSReturn dispatchResource(OSDictionary * requestDict); + + static OSReturn dequeueCallbackForRequestTag( + OSKextRequestTag requestTag, + OSDictionary ** callbackRecordOut); + static OSReturn dequeueCallbackForRequestTag( + OSNumber * requestTagNum, + OSDictionary ** callbackRecordOut); + static void invokeRequestCallback( + OSDictionary * callbackRecord, + OSReturn requestResult); + virtual void invokeOrCancelRequestCallbacks( + OSReturn callbackResult, + bool invokeFlag = true); + virtual uint32_t countRequestCallbacks(void); + +/* panic() support. + */ public: - enum { - kPrintKextsLock = 0x01, - kPrintKextsUnslide = 0x02, - kPrintKextsTerse = 0x04 - }; - static void printKextsInBacktrace( - vm_offset_t * addr, - unsigned int cnt, - int (* printf_func)(const char *fmt, ...), - uint32_t flags); + enum { + kPrintKextsLock = 0x01, + kPrintKextsUnslide = 0x02, + kPrintKextsTerse = 0x04 + }; + static void printKextsInBacktrace( + vm_offset_t * addr, + unsigned int cnt, + int (* printf_func)(const char *fmt, ...), + uint32_t flags); private: - static OSKextLoadedKextSummary *summaryForAddress(const uintptr_t addr); - static void *kextForAddress(const void *addr); - static boolean_t summaryIsInBacktrace( - OSKextLoadedKextSummary * summary, - vm_offset_t * addr, - unsigned int cnt); - static void printSummary( - OSKextLoadedKextSummary * summary, - int (* printf_func)(const char *fmt, ...), - uint32_t flags); - - static int saveLoadedKextPanicListTyped( - const char * prefix, - int invertFlag, - int libsFlag, - char * paniclist, - uint32_t list_size); - static void saveLoadedKextPanicList(void); - void savePanicString(bool isLoading); - static void printKextPanicLists(int (*printf_func)(const char *fmt, ...)); - - /* Kext summary support. - */ - static void updateLoadedKextSummaries(void); - void updateLoadedKextSummary(OSKextLoadedKextSummary *summary); - void updateActiveAccount(OSKextActiveAccount *accountp); + static OSKextLoadedKextSummary *summaryForAddress(const uintptr_t addr); + static void *kextForAddress(const void *addr); + static boolean_t summaryIsInBacktrace( + OSKextLoadedKextSummary * summary, + vm_offset_t * addr, + unsigned int cnt); + static void printSummary( + OSKextLoadedKextSummary * summary, + int (* printf_func)(const char *fmt, ...), + uint32_t flags); + + static int saveLoadedKextPanicListTyped( + const char * prefix, + int invertFlag, + int libsFlag, + char * paniclist, + uint32_t list_size); + static void saveLoadedKextPanicList(void); + void savePanicString(bool isLoading); + static void printKextPanicLists(int (*printf_func)(const char *fmt, ...)); + +/* Kext summary support. + */ + static void updateLoadedKextSummaries(void); + void updateLoadedKextSummary(OSKextLoadedKextSummary *summary); + void updateActiveAccount(OSKextActiveAccount *accountp); #ifdef XNU_KERNEL_PRIVATE public: #endif /* XNU_KERNEL_PRIVATE */ - /* C++ Initialization. - */ - virtual void setCPPInitialized(bool initialized=true); +/* C++ Initialization. + */ + virtual void setCPPInitialized(bool initialized = true); #if PRAGMA_MARK /**************************************/ @@ -583,97 +580,97 @@ public: /**************************************/ #endif public: - // caller must release - static OSKext * lookupKextWithIdentifier(const char * kextIdentifier); - static OSKext * lookupKextWithIdentifier(OSString * kextIdentifier); - static OSKext * lookupKextWithLoadTag(OSKextLoadTag aTag); - static OSKext * lookupKextWithAddress(vm_address_t address); - static OSKext * lookupKextWithUUID(uuid_t uuid); - - kernel_section_t *lookupSection(const char *segname, const char*secname); - - static bool isKextWithIdentifierLoaded(const char * kextIdentifier); - - static OSReturn loadKextWithIdentifier( - const char * kextIdentifier, - Boolean allowDeferFlag = true, - Boolean delayAutounloadFlag = false, - OSKextExcludeLevel startOpt = kOSKextExcludeNone, - OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, - OSArray * personalityNames = NULL); - static OSReturn loadKextWithIdentifier( - OSString * kextIdentifier, - Boolean allowDeferFlag = true, - Boolean delayAutounloadFlag = false, - OSKextExcludeLevel startOpt = kOSKextExcludeNone, - OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, - OSArray * personalityNames = NULL); - static OSReturn removeKextWithIdentifier( - const char * kextIdentifier, - bool terminateServicesAndRemovePersonalitiesFlag = false); - static OSReturn removeKextWithLoadTag( - OSKextLoadTag loadTag, - bool terminateServicesAndRemovePersonalitiesFlag = false); - - static OSReturn requestResource( - const char * kextIdentifier, - const char * resourceName, - OSKextRequestResourceCallback callback, - void * context, - OSKextRequestTag * requestTagOut); - static OSReturn cancelRequest( - OSKextRequestTag requestTag, - void ** contextOut); - - static void considerUnloads(Boolean rescheduleOnlyFlag = false); - static void flushNonloadedKexts(Boolean flushPrelinkedKexts); - static void setKextdActive(Boolean active = true); - static void setDeferredLoadSucceeded(Boolean succeeded = true); - static void considerRebuildOfPrelinkedKernel(void); - static void createExcludeListFromBooterData( - OSDictionary * theDictionary, - OSCollectionIterator * theIterator); - static void createExcludeListFromPrelinkInfo(OSArray * theInfoArray); - static boolean_t updateExcludeList(OSDictionary * infoDict); - - static bool isWaitingKextd(void); - - virtual bool setAutounloadEnabled(bool flag); - - virtual const OSSymbol * getIdentifier(void); - virtual const char * getIdentifierCString(void); - virtual OSKextVersion getVersion(void); - virtual OSKextVersion getCompatibleVersion(void); - virtual bool isLibrary(void); - virtual bool isCompatibleWithVersion(OSKextVersion aVersion); - virtual OSObject * getPropertyForHostArch(const char * key); - - virtual OSKextLoadTag getLoadTag(void); - virtual void getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize); - virtual OSData * copyUUID(void); - OSData * copyTextUUID(void); - OSData * copyMachoUUID(const kernel_mach_header_t * header); - virtual OSArray * copyPersonalitiesArray(void); - - /* This removes personalities naming the kext (by CFBundleIdentifier), - * not all personalities defined by the kext (IOPersonalityPublisher or CFBundleIdentifier). - */ - virtual void removePersonalitiesFromCatalog(void); - - /* Converts common string-valued properties to OSSymbols for lower memory consumption. - */ - static void uniquePersonalityProperties(OSDictionary * personalityDict); - - virtual bool declaresExecutable(void); // might be missing - virtual bool isInterface(void); - virtual bool isKernel(void); - virtual bool isKernelComponent(void); - virtual bool isExecutable(void); - virtual bool isLoadableInSafeBoot(void); - virtual bool isPrelinked(void); - virtual bool isLoaded(void); - virtual bool isStarted(void); - virtual bool isCPPInitialized(void); +// caller must release + static OSKext * lookupKextWithIdentifier(const char * kextIdentifier); + static OSKext * lookupKextWithIdentifier(OSString * kextIdentifier); + static OSKext * lookupKextWithLoadTag(OSKextLoadTag aTag); + static OSKext * lookupKextWithAddress(vm_address_t address); + static OSKext * lookupKextWithUUID(uuid_t uuid); + + kernel_section_t *lookupSection(const char *segname, const char*secname); + + static bool isKextWithIdentifierLoaded(const char * kextIdentifier); + + static OSReturn loadKextWithIdentifier( + const char * kextIdentifier, + Boolean allowDeferFlag = true, + Boolean delayAutounloadFlag = false, + OSKextExcludeLevel startOpt = kOSKextExcludeNone, + OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, + OSArray * personalityNames = NULL); + static OSReturn loadKextWithIdentifier( + OSString * kextIdentifier, + Boolean allowDeferFlag = true, + Boolean delayAutounloadFlag = false, + OSKextExcludeLevel startOpt = kOSKextExcludeNone, + OSKextExcludeLevel startMatchingOpt = kOSKextExcludeAll, + OSArray * personalityNames = NULL); + static OSReturn removeKextWithIdentifier( + const char * kextIdentifier, + bool terminateServicesAndRemovePersonalitiesFlag = false); + static OSReturn removeKextWithLoadTag( + OSKextLoadTag loadTag, + bool terminateServicesAndRemovePersonalitiesFlag = false); + + static OSReturn requestResource( + const char * kextIdentifier, + const char * resourceName, + OSKextRequestResourceCallback callback, + void * context, + OSKextRequestTag * requestTagOut); + static OSReturn cancelRequest( + OSKextRequestTag requestTag, + void ** contextOut); + + static void considerUnloads(Boolean rescheduleOnlyFlag = false); + static void flushNonloadedKexts(Boolean flushPrelinkedKexts); + static void setKextdActive(Boolean active = true); + static void setDeferredLoadSucceeded(Boolean succeeded = true); + static void considerRebuildOfPrelinkedKernel(void); + static void createExcludeListFromBooterData( + OSDictionary * theDictionary, + OSCollectionIterator * theIterator); + static void createExcludeListFromPrelinkInfo(OSArray * theInfoArray); + static boolean_t updateExcludeList(OSDictionary * infoDict); + + static bool isWaitingKextd(void); + + virtual bool setAutounloadEnabled(bool flag); + + virtual const OSSymbol * getIdentifier(void); + virtual const char * getIdentifierCString(void); + virtual OSKextVersion getVersion(void); + virtual OSKextVersion getCompatibleVersion(void); + virtual bool isLibrary(void); + virtual bool isCompatibleWithVersion(OSKextVersion aVersion); + virtual OSObject * getPropertyForHostArch(const char * key); + + virtual OSKextLoadTag getLoadTag(void); + virtual void getSizeInfo(uint32_t *loadSize, uint32_t *wiredSize); + virtual OSData * copyUUID(void); + OSData * copyTextUUID(void); + OSData * copyMachoUUID(const kernel_mach_header_t * header); + virtual OSArray * copyPersonalitiesArray(void); + +/* This removes personalities naming the kext (by CFBundleIdentifier), + * not all personalities defined by the kext (IOPersonalityPublisher or CFBundleIdentifier). + */ + virtual void removePersonalitiesFromCatalog(void); + +/* Converts common string-valued properties to OSSymbols for lower memory consumption. + */ + static void uniquePersonalityProperties(OSDictionary * personalityDict); + + virtual bool declaresExecutable(void); // might be missing + virtual bool isInterface(void); + virtual bool isKernel(void); + virtual bool isKernelComponent(void); + virtual bool isExecutable(void); + virtual bool isLoadableInSafeBoot(void); + virtual bool isPrelinked(void); + virtual bool isLoaded(void); + virtual bool isStarted(void); + virtual bool isCPPInitialized(void); }; diff --git a/libkern/libkern/c++/OSLib.h b/libkern/libkern/c++/OSLib.h index 578733374..93c2548e1 100644 --- a/libkern/libkern/c++/OSLib.h +++ b/libkern/libkern/c++/OSLib.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _OS_OSLIB_H @@ -50,10 +50,10 @@ __END_DECLS #if XNU_KERNEL_PRIVATE #include -#define kalloc_container(size) \ +#define kalloc_container(size) \ ({ kalloc_tag_bt(size, VM_KERN_MEMORY_LIBKERN); }) -#define kallocp_container(size) \ +#define kallocp_container(size) \ ({ kallocp_tag_bt(size, VM_KERN_MEMORY_LIBKERN); }) #if OSALLOCDEBUG @@ -85,4 +85,3 @@ extern "C" int debug_ivars_size; #endif #endif /* _OS_OSLIB_H */ - diff --git a/libkern/libkern/c++/OSMetaClass.h b/libkern/libkern/c++/OSMetaClass.h index f05a9b858..21b4f40e6 100644 --- a/libkern/libkern/c++/OSMetaClass.h +++ b/libkern/libkern/c++/OSMetaClass.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _LIBKERN_OSMETACLASS_H @@ -33,6 +33,64 @@ #include #include +/* + * LIBKERN_ macros below can be used to describe the ownership semantics + * of functions handling subclasses of OSObject. + * The attributes propagate with inheritance, but can be overriden. + * New versions of the Clang Static Analyzer can use this knowledge to + * check the code for leaks or uses-after-free. + */ + +/* + * By default, methods returning OSObjects are assumed to have the following + * owneship semantics: + * - Methods which start with "get" are "Get" and which are not returning + * a subclass of OSIterator are assumed to be getters. + * They return at "+0" and the caller is not responsible for releasing the + * returned object. + * + * - All other methods are assumed to return at "+1", and the caller is + * responsible for releasing the returned object. + * + * The semantics implied by the naming convention described above can be + * overriden using either LIBKERN_RETURNS_RETAINED or LIBKERN_RETURNS_NOT_RETAINED + * attribute applied to a function. + * In the former case, it stipulates that the function is returning at "+1", + * and in the latter case "+0". + */ +#if __has_attribute(os_returns_retained) +#define LIBKERN_RETURNS_RETAINED __attribute__((os_returns_retained)) +#else +#define LIBKERN_RETURNS_RETAINED +#endif +#if __has_attribute(os_returns_not_retained) +#define LIBKERN_RETURNS_NOT_RETAINED __attribute__((os_returns_not_retained)) +#else +#define LIBKERN_RETURNS_NOT_RETAINED +#endif + +/* + * LIBKERN_CONSUMED attribute can be applied to parameters. + * It specifies that this function call would consume the reference to the + * annotated parameter. + */ +#if __has_attribute(os_consumed) +#define LIBKERN_CONSUMED __attribute__((os_consumed)) +#else +#define LIBKERN_CONSUMED +#endif + +/* + * LIBKERN_CONSUMES_THIS attribute can be applied to methods. + * It specifies that this method call consumes a reference to "this" (e.g. + * by storing a reference to "this" in a passed parameter). + */ +#if __has_attribute(os_consumes_this) +#define LIBKERN_CONSUMES_THIS __attribute__((os_consumes_this)) +#else +#define LIBKERN_CONSUMES_THIS +#endif + class OSMetaClass; class OSObject; class OSString; @@ -53,8 +111,8 @@ class OSCollection; * which together form the basis of the Libkern and I/O Kit C++ class hierarchy * and run-time type information facility. */ - - + + /*! @parseOnly */ #define APPLE_KEXT_COMPATIBILITY @@ -103,11 +161,11 @@ class OSCollection; #if __cplusplus >= 201103L -#define APPLE_KEXT_OVERRIDE override +#define APPLE_KEXT_OVERRIDE override #if defined(__LP64__) #define APPLE_KEXT_COMPATIBILITY_OVERRIDE #else -#define APPLE_KEXT_COMPATIBILITY_OVERRIDE APPLE_KEXT_OVERRIDE +#define APPLE_KEXT_COMPATIBILITY_OVERRIDE APPLE_KEXT_OVERRIDE #endif #else #define APPLE_KEXT_OVERRIDE @@ -179,134 +237,134 @@ class OSMetaClassBase public: - /*! - * @define OSTypeAlloc - * @hidecontents - * - * @abstract - * Allocates an instance of the named object class. - * - * @param type The name of the desired class to be created, - * as a raw token, not a string or macro. - * - * @result - * A pointer to the new, uninitialized object on success; - * NULL on failure. - * - * @discussion - * See also - * @link - * //apple_ref/cpp/clm/OSMetaClass/allocClassWithName/staticOSObject*\/(constchar*) - * OSMetaClass::allocClassWithName(const char *)@/link - * and - * @link - * //apple_ref/cpp/instm/OSMetaClass/alloc/virtualOSObject*\/() - * OSMetaClass::alloc@/link. - * - * The OSTypeAlloc macro is used to avoid binary compatibility difficulties - * presented by the C++ new operator. - */ +/*! + * @define OSTypeAlloc + * @hidecontents + * + * @abstract + * Allocates an instance of the named object class. + * + * @param type The name of the desired class to be created, + * as a raw token, not a string or macro. + * + * @result + * A pointer to the new, uninitialized object on success; + * NULL on failure. + * + * @discussion + * See also + * @link + * //apple_ref/cpp/clm/OSMetaClass/allocClassWithName/staticOSObject*\/(constchar*) + * OSMetaClass::allocClassWithName(const char *)@/link + * and + * @link + * //apple_ref/cpp/instm/OSMetaClass/alloc/virtualOSObject*\/() + * OSMetaClass::alloc@/link. + * + * The OSTypeAlloc macro is used to avoid binary compatibility difficulties + * presented by the C++ new operator. + */ #define OSTypeAlloc(type) ((type *) ((type::metaClass)->alloc())) - /*! - * @define OSTypeID - * @hidecontents - * - * @abstract - * Returns the type ID (metaclass) of a class based on its name. - * - * @param type The name of the desired class, as a raw token, - * not a string or macro. - * - * @result - * The unique type ID (metaclass) for the class. - * - * @discussion - * It is typically more useful to determine whether a class is derived - * from another; see - * @link //apple_ref/cpp/macro/OSDynamicCast OSDynamicCast@/link - * and - * @link //apple_ref/cpp/macro/OSCheckTypeInst OSCheckTypeInst@/link. - */ +/*! + * @define OSTypeID + * @hidecontents + * + * @abstract + * Returns the type ID (metaclass) of a class based on its name. + * + * @param type The name of the desired class, as a raw token, + * not a string or macro. + * + * @result + * The unique type ID (metaclass) for the class. + * + * @discussion + * It is typically more useful to determine whether a class is derived + * from another; see + * @link //apple_ref/cpp/macro/OSDynamicCast OSDynamicCast@/link + * and + * @link //apple_ref/cpp/macro/OSCheckTypeInst OSCheckTypeInst@/link. + */ #define OSTypeID(type) (type::metaClass) - /*! - * @define OSTypeIDInst - * @hidecontents - * - * @abstract - * Returns the type ID (metaclass) for the class of an object instance. - * - * @param typeinst An instance of an OSObject subclass. - * - * @result - * The type ID of that object's class; that is, its metaclass. - * - * @discussion - * It is typically more useful to determine whether an object is derived - * from a particular class; see - * @link //apple_ref/cpp/macro/OSDynamicCast OSDynamicCast@/link - * and - * @link //apple_ref/cpp/macro/OSCheckTypeInst OSCheckTypeInst@/link. - */ +/*! + * @define OSTypeIDInst + * @hidecontents + * + * @abstract + * Returns the type ID (metaclass) for the class of an object instance. + * + * @param typeinst An instance of an OSObject subclass. + * + * @result + * The type ID of that object's class; that is, its metaclass. + * + * @discussion + * It is typically more useful to determine whether an object is derived + * from a particular class; see + * @link //apple_ref/cpp/macro/OSDynamicCast OSDynamicCast@/link + * and + * @link //apple_ref/cpp/macro/OSCheckTypeInst OSCheckTypeInst@/link. + */ #define OSTypeIDInst(typeinst) ((typeinst)->getMetaClass()) - /*! - * @define OSDynamicCast - * @hidecontents - * - * @abstract - * Safe type-casting for Libkern C++ objects. - * - * @param type The name of the desired class type, as a raw token, - * not a string or macro. - * It is assumed you intend to cast to a pointer - * to an object of this type. - * Type qualifiers, such as const, - * are not recognized and will cause - * a (usually obscure) compile error. - * @param inst A pointer to the object instance to be cast. - * May be NULL. - * - * @result - * inst if it is non-NULL - * and derived from type; - * otherwise NULL. - * - * @discussion - * OSDynamicCast is a rough equivalent - * to the standard C++ RTTI dynamic_cast<T> operator. - * Your code should use this instead of raw C type-casting, - * and check the resulting value. - * If the result is non-NULL, - * the object is safe to use as the type-cast class; - * if the result is NULL, - * the object does not derive from the type-cast class - * and your code should take appropriate steps to handle the error. - */ +/*! + * @define OSDynamicCast + * @hidecontents + * + * @abstract + * Safe type-casting for Libkern C++ objects. + * + * @param type The name of the desired class type, as a raw token, + * not a string or macro. + * It is assumed you intend to cast to a pointer + * to an object of this type. + * Type qualifiers, such as const, + * are not recognized and will cause + * a (usually obscure) compile error. + * @param inst A pointer to the object instance to be cast. + * May be NULL. + * + * @result + * inst if it is non-NULL + * and derived from type; + * otherwise NULL. + * + * @discussion + * OSDynamicCast is a rough equivalent + * to the standard C++ RTTI dynamic_cast<T> operator. + * Your code should use this instead of raw C type-casting, + * and check the resulting value. + * If the result is non-NULL, + * the object is safe to use as the type-cast class; + * if the result is NULL, + * the object does not derive from the type-cast class + * and your code should take appropriate steps to handle the error. + */ #define OSDynamicCast(type, inst) \ ((type *) OSMetaClassBase::safeMetaCast((inst), OSTypeID(type))) - /*! - * @define OSCheckTypeInst - * @hidecontents - * - * @abstract - * Checks whether two objects are type-compatible. - * - * @param typeinst The reference object. - * @param inst The object to check for type compatibility. - * - * @result - * true if both inst and - * typeinst are non-NULL - * and inst is derived from the class of typeinst; - * otherwise false. - */ +/*! + * @define OSCheckTypeInst + * @hidecontents + * + * @abstract + * Checks whether two objects are type-compatible. + * + * @param typeinst The reference object. + * @param inst The object to check for type compatibility. + * + * @result + * true if both inst and + * typeinst are non-NULL + * and inst is derived from the class of typeinst; + * otherwise false. + */ #define OSCheckTypeInst(typeinst, inst) \ OSMetaClassBase::checkTypeInst(inst, typeinst) @@ -320,11 +378,11 @@ public: */ #define OSSafeReleaseNULL(inst) do { if (inst != NULL) (inst)->release(); (inst) = NULL; } while (0) -typedef void (*_ptf_t)(void); + typedef void (*_ptf_t)(void); #if defined(__arm__) || defined(__arm64__) - static _ptf_t _ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::*func)(void)); + static _ptf_t _ptmf2ptf(const OSMetaClassBase * self, void (OSMetaClassBase::*func)(void)); #elif defined(__i386__) || defined(__x86_64__) @@ -332,433 +390,434 @@ typedef void (*_ptf_t)(void); // the same for kexts compiled with the standard Itanium C++ // ABI -static inline _ptf_t -_ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::*func)(void)) -{ - union { - void (OSMetaClassBase::*fIn)(void); - uintptr_t fVTOffset; - _ptf_t fPFN; - } map; - - map.fIn = func; - - if (map.fVTOffset & 1) { - // virtual - union { - const OSMetaClassBase *fObj; - _ptf_t **vtablep; - } u; - u.fObj = self; - - // Virtual member function so dereference vtable - return *(_ptf_t *)(((uintptr_t)*u.vtablep) + map.fVTOffset - 1); - } else { - // Not virtual, i.e. plain member func - return map.fPFN; - } -} + static inline _ptf_t + _ptmf2ptf(const OSMetaClassBase *self, void (OSMetaClassBase::*func)(void)) + { + union { + void (OSMetaClassBase::*fIn)(void); + uintptr_t fVTOffset; + _ptf_t fPFN; + } map; + + map.fIn = func; + + if (map.fVTOffset & 1) { + // virtual + union { + const OSMetaClassBase *fObj; + _ptf_t **vtablep; + } u; + u.fObj = self; + + // Virtual member function so dereference vtable + return *(_ptf_t *)(((uintptr_t)*u.vtablep) + map.fVTOffset - 1); + } else { + // Not virtual, i.e. plain member func + return map.fPFN; + } + } #else #error Unknown architecture. #endif /* __arm__ */ - /*! - * @define OSMemberFunctionCast - * @hidecontents - * - * @abstract - * Converts a C++ member function pointer, relative to an instance, - * to a C-style pointer to function. - * - * @param cptrtype The function type declaration to cast to - * (typically provided as a typedef by I/O KitKit classes). - * @param self The this pointer of the object whose function - * you wish to cache. - * @param func The pointer to the member function itself, - * something like &Class::function. - * - * @result - * A pointer to a function of the given type referencing self. - * - * @discussion - * This function is used to generate pointers to C++ functions for instances, - * such that they can be registered as callbacks with I/O Kit objects. - * - * No warnings are generated. - * - * This function will panic if an attempt is made to call it - * with a multiply-inheriting class. - */ +/*! + * @define OSMemberFunctionCast + * @hidecontents + * + * @abstract + * Converts a C++ member function pointer, relative to an instance, + * to a C-style pointer to function. + * + * @param cptrtype The function type declaration to cast to + * (typically provided as a typedef by I/O KitKit classes). + * @param self The this pointer of the object whose function + * you wish to cache. + * @param func The pointer to the member function itself, + * something like &Class::function. + * + * @result + * A pointer to a function of the given type referencing self. + * + * @discussion + * This function is used to generate pointers to C++ functions for instances, + * such that they can be registered as callbacks with I/O Kit objects. + * + * No warnings are generated. + * + * This function will panic if an attempt is made to call it + * with a multiply-inheriting class. + */ #define OSMemberFunctionCast(cptrtype, self, func) \ (cptrtype) OSMetaClassBase:: \ - _ptmf2ptf(self, (void (OSMetaClassBase::*)(void)) func) + _ptmf2ptf(self, (void (OSMetaClassBase::*)(void)) func) protected: - OSMetaClassBase(); - virtual ~OSMetaClassBase(); + OSMetaClassBase(); + virtual + ~OSMetaClassBase(); private: - // Disable copy constructors of OSMetaClassBase based objects - /* Not to be included in headerdoc. - * - * @function operator = - * - * @abstract - * Disable implicit copy constructor by making private - * - * @param src Reference to source object that isn't allowed to be copied. - */ - void operator =(OSMetaClassBase &src); - - /* Not to be included in headerdoc. - * - * @function OSMetaClassBase - * - * @abstract - * Disable implicit copy constructor by making private - * - * @param src Reference to source object that isn't allowed to be copied. - */ - OSMetaClassBase(OSMetaClassBase &src); +// Disable copy constructors of OSMetaClassBase based objects +/* Not to be included in headerdoc. + * + * @function operator = + * + * @abstract + * Disable implicit copy constructor by making private + * + * @param src Reference to source object that isn't allowed to be copied. + */ + void operator =(OSMetaClassBase &src); + +/* Not to be included in headerdoc. + * + * @function OSMetaClassBase + * + * @abstract + * Disable implicit copy constructor by making private + * + * @param src Reference to source object that isn't allowed to be copied. + */ + OSMetaClassBase(OSMetaClassBase &src); public: // xx-review: the original comment for this makes it sound to me like we don't // xx-review: catch over-releasing an object...? - /*! - * @function release - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/(int) - * release(int freeWhen)@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/(int) - * release(int freeWhen)@/link. - */ - virtual void release(int freeWhen) const = 0; - - - /*! - * @function getRetainCount - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/getRetainCount/virtualint/() - * getRetainCount()@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/getRetainCount/virtualint/() - * OSObject::getRetainCount()@/link. - */ - virtual int getRetainCount() const = 0; - - - /*! - * @function retain - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain()@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * OSObject::retain()@/link. - */ - virtual void retain() const = 0; - - - /*! - * @function release - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * OSObject::release@/link. - */ - virtual void release() const = 0; - - - /*! - * @function serialize - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/serialize/virtualbool/(OSSerialize*) - * serialize@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/serialize/virtualbool/(OSSerialize*) - * OSObject::serialize@/link. - */ - virtual bool serialize(OSSerialize * serializer) const = 0; - - - /*! - * @function getMetaClass - * - * @abstract - * Returns the OSMetaClass representing - * an OSMetaClassBase subclass. - * - * @discussion - * OSObject overrides this abstract member function - * to return the OSMetaClass object that represents - * each class for run-time typing. - */ - virtual const OSMetaClass * getMetaClass() const = 0; - - - /*! - * @function isEqualTo - * - * @abstract - * Checks whether another object is equal to the receiver. - * - * @param anObject The object to copmare to the receiver. - * - * @result - * true if the objects are equal, false otherwise. - * - * @discussion - * OSMetaClassBase implements this as a direct pointer comparison, - * since it has no other information to judge equality by. - * Subclasses generally override this function - * to do a more meaningful comparison. - * For example, OSString implements it to return - * true if anObject - * is derived from OSString and represents the same C string. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const; - - - /*! - * @function metaCast - * - * @abstract - * Casts this object is to the class managed by the given OSMetaClass. - * - * @param toMeta A pointer to a constant OSMetaClass - * for the desired target type. - * - * @result - * this if the object is derived - * from the class managed by toMeta, - * otherwise NULL. - * - * @discussion - * It is far more convenient to use - * @link OSDynamicCast OSDynamicCast@/link. - */ - OSMetaClassBase * metaCast(const OSMetaClass * toMeta) const; - - - /*! - * @function metaCast - * - * @abstract - * Casts this object is to the class managed by the named OSMetaClass. - * - * @param toMeta An OSSymbol naming the desired target type. - * - * @result - * this if the object is derived - * from the class named by toMeta, - * otherwise NULL. - * - * @discussion - * It is far more convenient to use - * @link OSDynamicCast OSDynamicCast@/link. - */ - OSMetaClassBase * metaCast(const OSSymbol * toMeta) const; - - - /*! - * @function metaCast - * - * @abstract - * Casts this object is to the class managed by the named OSMetaClass. - * - * @param toMeta An OSString naming the desired target type. - * @result - * this if the object is derived - * from the class named by toMeta, - * otherwise NULL. - * - * @discussion - * It is far more convenient to use - * @link OSDynamicCast OSDynamicCast@/link. - */ - OSMetaClassBase * metaCast(const OSString * toMeta) const; - - - /*! - * @function metaCast - * - * @abstract - * Casts this object is to the class managed by the named OSMetaClass. - * - * @param toMeta A C string naming the desired target type. - * @result - * this if the object is derived - * from the class named by toMeta, - * otherwise NULL. - * - * @discussion - * It is far more convenient to use - * @link OSDynamicCast OSDynamicCast@/link. - */ - OSMetaClassBase * metaCast(const char * toMeta) const; - - // Helper inlines for run-time type preprocessor macros - /*! - * @function safeMetaCast - * - * @abstract - * Casts an object is to the class managed by the given OSMetaClass. - * - * @param anObject A pointer to the object to be cast. - * @param toMeta A pointer to a constant OSMetaClass - * for the desired target type. - * - * @result - * anObject if the object is derived - * from the class managed by toMeta, - * otherwise NULL. - * - * @discussion - * It is far more convenient to use - * @link OSDynamicCast OSDynamicCast@/link. - */ - static OSMetaClassBase * safeMetaCast( - const OSMetaClassBase * anObject, - const OSMetaClass * toMeta); - - /*! - * @function checkTypeInst - * - * @abstract - * Checks whether an object instance is of the same class - * as another object instance (or a subclass of that class). - * - * @param inst A pointer to the object to check. - * @param typeinst A pointer to an object of the class being checked. - * - * @result - * true if the object is derived - * from the class of typeinst - * or a subclass of that class, - * otherwise false. - * - * @discussion - * It is far more convenient to use - * @link OSCheckTypeInst OSCheckTypeInst@/link. - */ - static bool checkTypeInst( - const OSMetaClassBase * inst, - const OSMetaClassBase * typeinst); - - static void initialize(void); +/*! + * @function release + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/(int) + * release(int freeWhen)@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/(int) + * release(int freeWhen)@/link. + */ + virtual void release(int freeWhen) const = 0; + + +/*! + * @function getRetainCount + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/getRetainCount/virtualint/() + * getRetainCount()@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/getRetainCount/virtualint/() + * OSObject::getRetainCount()@/link. + */ + virtual int getRetainCount() const = 0; + + +/*! + * @function retain + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain()@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * OSObject::retain()@/link. + */ + virtual void retain() const = 0; + + +/*! + * @function release + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * OSObject::release@/link. + */ + virtual void release() const = 0; + + +/*! + * @function serialize + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/serialize/virtualbool/(OSSerialize*) + * serialize@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/serialize/virtualbool/(OSSerialize*) + * OSObject::serialize@/link. + */ + virtual bool serialize(OSSerialize * serializer) const = 0; + + +/*! + * @function getMetaClass + * + * @abstract + * Returns the OSMetaClass representing + * an OSMetaClassBase subclass. + * + * @discussion + * OSObject overrides this abstract member function + * to return the OSMetaClass object that represents + * each class for run-time typing. + */ + virtual const OSMetaClass * getMetaClass() const = 0; + + +/*! + * @function isEqualTo + * + * @abstract + * Checks whether another object is equal to the receiver. + * + * @param anObject The object to copmare to the receiver. + * + * @result + * true if the objects are equal, false otherwise. + * + * @discussion + * OSMetaClassBase implements this as a direct pointer comparison, + * since it has no other information to judge equality by. + * Subclasses generally override this function + * to do a more meaningful comparison. + * For example, OSString implements it to return + * true if anObject + * is derived from OSString and represents the same C string. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const; + + +/*! + * @function metaCast + * + * @abstract + * Casts this object is to the class managed by the given OSMetaClass. + * + * @param toMeta A pointer to a constant OSMetaClass + * for the desired target type. + * + * @result + * this if the object is derived + * from the class managed by toMeta, + * otherwise NULL. + * + * @discussion + * It is far more convenient to use + * @link OSDynamicCast OSDynamicCast@/link. + */ + OSMetaClassBase * metaCast(const OSMetaClass * toMeta) const; + + +/*! + * @function metaCast + * + * @abstract + * Casts this object is to the class managed by the named OSMetaClass. + * + * @param toMeta An OSSymbol naming the desired target type. + * + * @result + * this if the object is derived + * from the class named by toMeta, + * otherwise NULL. + * + * @discussion + * It is far more convenient to use + * @link OSDynamicCast OSDynamicCast@/link. + */ + OSMetaClassBase * metaCast(const OSSymbol * toMeta) const; + + +/*! + * @function metaCast + * + * @abstract + * Casts this object is to the class managed by the named OSMetaClass. + * + * @param toMeta An OSString naming the desired target type. + * @result + * this if the object is derived + * from the class named by toMeta, + * otherwise NULL. + * + * @discussion + * It is far more convenient to use + * @link OSDynamicCast OSDynamicCast@/link. + */ + OSMetaClassBase * metaCast(const OSString * toMeta) const; + + +/*! + * @function metaCast + * + * @abstract + * Casts this object is to the class managed by the named OSMetaClass. + * + * @param toMeta A C string naming the desired target type. + * @result + * this if the object is derived + * from the class named by toMeta, + * otherwise NULL. + * + * @discussion + * It is far more convenient to use + * @link OSDynamicCast OSDynamicCast@/link. + */ + OSMetaClassBase * metaCast(const char * toMeta) const; + +// Helper inlines for run-time type preprocessor macros +/*! + * @function safeMetaCast + * + * @abstract + * Casts an object is to the class managed by the given OSMetaClass. + * + * @param anObject A pointer to the object to be cast. + * @param toMeta A pointer to a constant OSMetaClass + * for the desired target type. + * + * @result + * anObject if the object is derived + * from the class managed by toMeta, + * otherwise NULL. + * + * @discussion + * It is far more convenient to use + * @link OSDynamicCast OSDynamicCast@/link. + */ + static OSMetaClassBase * safeMetaCast( + const OSMetaClassBase * anObject, + const OSMetaClass * toMeta); + +/*! + * @function checkTypeInst + * + * @abstract + * Checks whether an object instance is of the same class + * as another object instance (or a subclass of that class). + * + * @param inst A pointer to the object to check. + * @param typeinst A pointer to an object of the class being checked. + * + * @result + * true if the object is derived + * from the class of typeinst + * or a subclass of that class, + * otherwise false. + * + * @discussion + * It is far more convenient to use + * @link OSCheckTypeInst OSCheckTypeInst@/link. + */ + static bool checkTypeInst( + const OSMetaClassBase * inst, + const OSMetaClassBase * typeinst); + + static void initialize(void); public: - /*! - * @function taggedRetain - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/taggedRetain/virtualvoid/(constvoid*) - * taggedRetain(const void *)@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/taggedRetain/virtualvoid/(constvoid*) - * OSObject::taggedRetain(const void *)@/link. - */ - // WAS: virtual void _RESERVEDOSMetaClassBase0(); - virtual void taggedRetain(const void * tag = 0) const = 0; - - - /*! - * @function taggedRelease - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*) - * taggedRelease(const void *)@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*) - * OSObject::taggedRelease(const void *)@/link. - */ - // WAS: virtual void _RESERVEDOSMetaClassBase1(); - virtual void taggedRelease(const void * tag = 0) const = 0; +/*! + * @function taggedRetain + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/taggedRetain/virtualvoid/(constvoid*) + * taggedRetain(const void *)@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/taggedRetain/virtualvoid/(constvoid*) + * OSObject::taggedRetain(const void *)@/link. + */ +// WAS: virtual void _RESERVEDOSMetaClassBase0(); + virtual void taggedRetain(const void * tag = 0) const = 0; + + +/*! + * @function taggedRelease + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*) + * taggedRelease(const void *)@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*) + * OSObject::taggedRelease(const void *)@/link. + */ +// WAS: virtual void _RESERVEDOSMetaClassBase1(); + virtual void taggedRelease(const void * tag = 0) const = 0; protected: - /*! - * @function taggedRelease - * - * @abstract - * Abstract declaration of - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) - * taggedRelease(const void *, const int freeWhen)@/link. - * - * @discussion - * See - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) - * OSObject::taggedRelease(const void *, const int freeWhen)@/link. - */ - // WAS: virtual void _RESERVEDOSMetaClassBase2(); - virtual void taggedRelease( - const void * tag, - const int freeWhen) const = 0; +/*! + * @function taggedRelease + * + * @abstract + * Abstract declaration of + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) + * taggedRelease(const void *, const int freeWhen)@/link. + * + * @discussion + * See + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) + * OSObject::taggedRelease(const void *, const int freeWhen)@/link. + */ +// WAS: virtual void _RESERVEDOSMetaClassBase2(); + virtual void taggedRelease( + const void * tag, + const int freeWhen) const = 0; private: #if APPLE_KEXT_VTABLE_PADDING - // Virtual Padding - virtual void _RESERVEDOSMetaClassBase3(); - virtual void _RESERVEDOSMetaClassBase4(); - virtual void _RESERVEDOSMetaClassBase5(); - virtual void _RESERVEDOSMetaClassBase6(); - virtual void _RESERVEDOSMetaClassBase7(); +// Virtual Padding + virtual void _RESERVEDOSMetaClassBase3(); + virtual void _RESERVEDOSMetaClassBase4(); + virtual void _RESERVEDOSMetaClassBase5(); + virtual void _RESERVEDOSMetaClassBase6(); + virtual void _RESERVEDOSMetaClassBase7(); #endif } APPLE_KEXT_COMPATIBILITY; #ifdef XNU_KERNEL_PRIVATE typedef bool (*OSMetaClassInstanceApplierFunction)(const OSObject * instance, - void * context); + void * context); #endif /* XNU_KERNEL_PRIVATE */ /*! @@ -844,790 +903,794 @@ typedef bool (*OSMetaClassInstanceApplierFunction)(const OSObject * instance, */ class OSMetaClass : private OSMetaClassBase { - friend class OSKext; + friend class OSKext; #if IOKITSTATS friend class IOStatistics; #endif private: - // Can never be allocated must be created at compile time - static void * operator new(size_t size); - - /* Reserved for future use. (Internal use only) */ - struct ExpansionData *reserved; - - /* superClass Handle to the superclass's meta class. */ - const OSMetaClass *superClassLink; - - /* className OSSymbol of the class' name. */ - const OSSymbol *className; - - /* classSize How big is a single instance of this class. */ - unsigned int classSize; - - /* instanceCount Roughly number of instances of the object, - * +1 for each direct subclass with a nonzero refcount. - * Used primarily as a code-in-use flag. - */ - mutable unsigned int instanceCount; - - /* Not to be included in headerdoc. - * - * @function OSMetaClass - * - * @abstract - * The default private constructor. - */ - OSMetaClass(); - - // Called by postModLoad - /* Not to be included in headerdoc. - * - * @function logError - * - * @abstract - * Logs an error string for an OSReturn value - * using printf. - * - * @param result The OSReturn value for which to log a message. - * - * @discussion - * This function is used to log errors loading kernel extensions. - * Kernel extensions themselves should not call it. - */ - static void logError(OSReturn result); +// Can never be allocated must be created at compile time + static void * operator new(size_t size); -public: +/* Reserved for future use. (Internal use only) */ + struct ExpansionData *reserved; - /*! - * @function getMetaClassWithName - * - * @abstract - * Look up a metaclass in the run-time type information system. - * - * @param name The name of the desired class's metaclass. - * - * @result - * A pointer to the metaclass object if found, NULL otherwise. - */ - static const OSMetaClass * getMetaClassWithName(const OSSymbol * name); +/* superClass Handle to the superclass's meta class. */ + const OSMetaClass *superClassLink; -#if XNU_KERNEL_PRIVATE +/* className OSSymbol of the class' name. */ + const OSSymbol *className; - /*! - * @function copyMetaClassWithName - * - * @abstract - * Look up a metaclass in the run-time type information system. - * - * @param name The name of the desired class's metaclass. - * - * @result - * A pointer to the metaclass object if found, NULL otherwise. - * The metaclass will be protected from unloading until releaseMetaClass() - * is called. - */ - static const OSMetaClass * copyMetaClassWithName(const OSSymbol * name); - /*! - * @function releaseMetaClass - * - * @abstract - * Releases reference obtained from copyMetaClassWithName(). - * - * @discussion - * The metaclass will be protected from unloading until releaseMetaClass() - * is called. - */ - void releaseMetaClass() const; +/* classSize How big is a single instance of this class. */ + unsigned int classSize; -#endif /* XNU_KERNEL_PRIVATE */ +/* instanceCount Roughly number of instances of the object, + * +1 for each direct subclass with a nonzero refcount. + * Used primarily as a code-in-use flag. + */ + mutable unsigned int instanceCount; -protected: - /*! - * @function retain - * - * @abstract - * Implements the abstract retain function to do nothing. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual void retain() const; - - - /*! - * @function release - * - * @abstract - * Implements the abstract release function to do nothing. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual void release() const; - - - /*! - * @function release - * - * @abstract - * Implements the abstract release(int freeWhen) - * function to do nothing. - * - * @param freeWhen Unused. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual void release(int freeWhen) const; - - - /*! - * @function taggedRetain - * - * @abstract - * Implements the abstract taggedRetain(const void *) - * function to do nothing. - * - * @param tag Unused. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual void taggedRetain(const void * tag = 0) const; - - - /*! - * @function taggedRelease - * - * @abstract - * Implements the abstract taggedRelease(const void *) - * function to do nothing. - * - * @param tag Unused. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual void taggedRelease(const void * tag = 0) const; - - - /*! - * @function taggedRelease - * - * @abstract - * Implements the abstract taggedRelease(const void *, cont int) - * function to do nothing. - * - * @param tag Unused. - * @param freeWhen Unused. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual void taggedRelease( - const void * tag, - const int freeWhen) const; - - - /*! - * @function getRetainCount - * - * @abstract - * Implements the abstract getRetainCount - * function to return 0. - * - * @result - * Always returns 0. - * - * @discussion - * Since an OSMetaClass instance must remain in existence - * for as long as its kernel extension is loaded, - * OSMetaClass does not use reference-counting. - */ - virtual int getRetainCount() const; - - - /* Not to be included in headerdoc. - * - * @function getMetaClass - * - * @abstract - * Returns the meta-metaclass. - * - * @result - * The metaclass of the OSMetaClass object. - */ - virtual const OSMetaClass * getMetaClass() const; - - - /*! - * @function OSMetaClass - * - * @abstract - * Constructor for OSMetaClass objects. - * - * @param className A C string naming the C++ class - * that this OSMetaClass represents. - * @param superclass The OSMetaClass object representing the superclass - * of this metaclass's class. - * @param classSize The allocation size of the represented C++ class. - * - * @discussion - * This constructor is protected and cannot be used - * to instantiate OSMetaClass directly, as OSMetaClass is an abstract class. - * This function is called during kext loading - * to queue C++ classes for registration. - * See @link preModLoad preModLoad@/link and - * @link postModLoad postModLoad@/link. - */ - OSMetaClass(const char * className, - const OSMetaClass * superclass, - unsigned int classSize); - - - /*! - * @function ~OSMetaClass - * - * @abstract - * Destructor for OSMetaClass objects. - * - * @discussion - * This function is called when the kernel extension that implements - * the metaclass's class is unloaded. - * The destructor removes all references to the class - * from the run-time type information system. - */ - virtual ~OSMetaClass(); - - // Needs to be overriden as NULL as all OSMetaClass objects are allocated - // statically at compile time, don't accidently try to free them. - void operator delete(void *, size_t) { } +/* Not to be included in headerdoc. + * + * @function OSMetaClass + * + * @abstract + * The default private constructor. + */ + OSMetaClass(); -public: - static const OSMetaClass * const metaClass; - - /*! - * @function preModLoad - * - * @abstract - * Prepares the run-time type system - * for the creation of new metaclasses - * during loading of a kernel extension (module). - * - * @param kextID The bundle ID of the kext being loaded. - * - * @result - * An opaque handle to the load context - * for the kernel extension on success; - * NULL on failure. - * - * @discussion - * Not for use by kernel extensions. - * - * Prepares the run-time type information system to record and register - * metaclasses created by static constructors until a subsequent call to - * @link postModLoad postModLoad@/link. - * preModLoad takes a lock to ensure processing of a single - * load operation at a time; the lock is released by - * @link postModLoad postModLoad@/link. - * Any OSMetaClass constructed between these two function calls - * will be associated with kextID. - */ - static void * preModLoad(const char * kextID); - - - /*! - * @function checkModLoad - * - * @abstract - * Checks whether the current kext load operation can proceed. - * - * @param loadHandle The opaque handle returned - * by @link preModLoad preModLoad@/link. - * @result - * true if no errors are outstanding - * and the system is ready to process more metaclasses. - * - * @discussion - * Not for use by kernel extensions. - */ - static bool checkModLoad(void * loadHandle); - - - /*! - * @function postModLoad - * - * @abstract - * Registers the metaclasses created during loading of a kernel extension. - * - * @param loadHandle The opaque handle returned - * by @link preModLoad preModLoad@/link. - * @result - * The error code of the first error encountered, - * or - * @link - * //apple_ref/cpp/macro/kOSReturnSuccess - * kOSReturnSuccess@/link - * if no error occurred. - * - * @discussion - * Not for use by kernel extensions. - * - * Called after all static constructors in a kernel extension - * have created metaclasses, - * this function checks for duplicate class names, - * then registers the new metaclasses under the kext ID - * that @link preModLoad preModLoad@/link was called with, - * so that they can be dynamically allocated - * and have their instance counts tracked. - * postModLoad releases the lock taken by - * @link preModLoad preModLoad@/link. - */ - static OSReturn postModLoad(void * loadHandle); - - /*! - * @function modHasInstance - * - * @abstract - * Returns whether any classes defined by the named - * kernel extension (or their subclasses) have existing instances. - * - * @param kextID The bundle ID of the kernel extension to check. - * - * @result - * true if the kext is found and - * if any class defined by that kext - * has a nonzero instance count, - * false otherwise. - * - * @discussion - * This function is called before a kernel extension's static destructors - * are invoked, prior to unloading the extension. - * If any classes stil have instances or subclasses with instances, - * those classes are logged - * (using @link reportModInstances reportModInstances@/link) and - * the kernel extension is not be unloaded. - */ - static bool modHasInstance(const char * kextID); - - - /*! - * @function reportModInstances - * - * @abstract - * Logs the instance counts for classes - * defined by a kernel extension. - * - * @param kextID The bundle ID of the kernel extension to report on. - * - * @discussion - * This function prints the names and instance counts - * of any class defined by kextID - * that has a nonzero instance count. - * It's called by @link modHasInstance modHasInstance@/link - * to help diagnose problems unloading kernel extensions. - */ - static void reportModInstances(const char * kextID); - - - /*! - * @function considerUnloads - * - * @abstract - * Schedule automatic unloading of unused kernel extensions. - * - * @discussion - * This function schedules a check for kernel extensions - * that can be automatically unloaded, - * canceling any currently scheduled check. - * At that time, any such kexts with no Libkern C++ instances - * and no external references are unloaded. - * - * The I/O Kit calls this function when matching goes idle. - * - * Kernel extensions that define subclasses of - * @link //apple_ref/doc/class/IOService IOService@/link - * are eligible for automatic unloading. - * - * (On releases of Mac OS X prior to Snow Leopard (10.6), - * any kernel extension defining any Libkern C++ class - * was eligible for automatic unloading, - * but that unload did not call the module stop routine. - * Non-I/O Kit kernel extensions that define Libkern C++ subclasses - * should be sure to have OSBundleLibraries declarations that ensure - * they will not load on releases prior to Snow Leopard.) - */ - static void considerUnloads(); +// Called by postModLoad +/* Not to be included in headerdoc. + * + * @function logError + * + * @abstract + * Logs an error string for an OSReturn value + * using printf. + * + * @param result The OSReturn value for which to log a message. + * + * @discussion + * This function is used to log errors loading kernel extensions. + * Kernel extensions themselves should not call it. + */ + static void logError(OSReturn result); -#if XNU_KERNEL_PRIVATE - static bool removeClasses(OSCollection * metaClasses); -#endif /* XNU_KERNEL_PRIVATE */ +public: - /*! - * @function allocClassWithName - * - * @abstract - * Allocates an instance of a named OSObject-derived class. - * - * @param name The name of the desired class. - * - * @result - * A pointer to the newly-allocated, uninitialized object on success; - * NULL on failure. - * - * @discussion - * Kernel extensions should not need to use this function - * directly, instead using static instance-creation functions - * defined by classes. - * - * This function consults the run-time type information system - * to find the metaclass for the named class. - * If it exists, it calls the metaclass's @link alloc alloc@/link - * function and returns the result. - */ - static OSObject * allocClassWithName(const OSSymbol * name); - - - /*! - * function allocClassWithName - * - * @abstract - * Allocates an instance of a named OSObject-derived class. - * - * @param name The name of the desired class. - * - * @result - * A pointer to the newly-allocated, uninitialized object on success; - * NULL on failure. - * - * @discussion - * Kernel extensions should not need to use this function - * directly, instead using static instance-creation functions - * defined by classes. - * - * This function consults the run-time type information system - * to find the metaclass for the named class. - * If it exists, it calls the metaclass's @link alloc alloc@/link - * function and returns the result. - */ - static OSObject * allocClassWithName(const OSString * name); - - - /*! - * function allocClassWithName - * - * @abstract - * Allocates an instance of a named OSObject-derived class. - * - * @param name The name of the desired class. - * - * @result - * A pointer to the newly-allocated, uninitialized object on success; - * NULL on failure. - * - * @discussion - * Kernel extensions should not need to use this function - * directly, instead using static instance-creation functions - * defined by classes. - * - * This function consults the run-time type information system - * to find the metaclass for the named class. - * If it exists, it calls the metaclass's @link alloc alloc@/link - * function and returns the result. - */ - static OSObject * allocClassWithName(const char * name); - - - /*! - * @function checkMetaCastWithName - * - * @abstract - * Search the metaclass inheritance hierarchy by name for an object instance. - * - * @param className The name of the desired class or superclass. - * @param object The object whose metaclass begins the search. - * - * @result - * object if it's derived from className; - * NULL otherwise. - * - * @discussion - * This function is the basis of the Libkern run-time type-checking system. - * Kernel extensions should not use it directly, - * instead using @link OSDynamicCast OSDynamicCast@/link or - * @link OSCheckTypeInst OSCheckTypeInst@/link. - */ - static OSMetaClassBase * checkMetaCastWithName( - const OSSymbol * className, - const OSMetaClassBase * object); - - /*! - * @function checkMetaCastWithName - * - * @abstract - * Search the metaclass inheritance hierarchy by name for an object instance. - * - * @param className The name of the desired class or superclass. - * @param object The object whose metaclass begins the search. - * - * @result - * object if it's derived from className; - * NULL otherwise. - * - * @discussion - * Kernel extensions should not use this function directly, - * instead using @link OSDynamicCast OSDynamicCast@/link or - * @link OSCheckTypeInst OSCheckTypeInst@/link. - */ - static OSMetaClassBase * checkMetaCastWithName( - const OSString * className, - const OSMetaClassBase * object); - - /*! - * @function checkMetaCastWithName - * - * @abstract - * Search the metaclass inheritance hierarchy by name for an object instance. - * - * @param className The name of the desired class or superclass. - * @param object The object whose metaclass begins the search. - * - * @result - * object if it's derived from className; - * NULL otherwise. - * - * @discussion - * Kernel extensions should not use this function directly, - * instead using @link OSDynamicCast OSDynamicCast@/link or - * @link OSCheckTypeInst OSCheckTypeInst@/link. - */ - static OSMetaClassBase * checkMetaCastWithName( - const char * className, - const OSMetaClassBase * object); - - - /*! - * @function instanceConstructed - * - * @abstract - * Counts the instances of the class managed by this metaclass. - * - * @discussion - * Not for use by kernel extensions. - * - * Every non-abstract class that inherits from OSObject - * has a default constructor that calls it's own metaclass's - * instanceConstructed function. - * This constructor is defined by the - * @link - * OSDefineMetaClassAndStructors - * OSDefineMetaClassAndStructors@/link - * macro that all OSObject subclasses must use. - * - * If a class's instance count goes from 0 to 1--that is, - * upon the creation of the first instance of that class--the - * superclass's instance count is also incremented. - * This propagates reference counts up the inheritance chain so that - * superclasses are counted as "in use" when subclasses have instances. - */ - void instanceConstructed() const; - - - /*! - * @function instanceDestructed - * - * @abstract - * Counts the instances of the class managed by this metaclass. - * - * @discussion - * Every non-abstract class that inherits from OSObject - * has a default destructor that calls it's own metaclass's - * instanceDestructed function. - * This constructor is defined by the - * @link OSDefineMetaClassAndStructors OSDefineMetaClassAndStructors@/link - * macro that all OSObject subclasses must use. - * - * If a class's instance count goes from 1 to 0--that is, - * upon the destruction of the last instance of that class--the - * superclass's instance count is also decremented. - * This reduces "in use" counts from superclasses when their subclasses - * no longer have instances. - */ - void instanceDestructed() const; - - - /*! - * @function checkMetaCast - * - * @abstract - * Check whether a given object is an instance of the receiving - * metaclass's class or one derived from it. - * - * @param object The object to check for inheritance. - * - * @result - * object if it is derived from the receiver's class, - * NULL if not. - */ - OSMetaClassBase * checkMetaCast(const OSMetaClassBase * object) const; - - - /*! - * @function getInstanceCount - * - * @abstract - * Returns the number of existing instances of the metaclass's class. - * - * @result - * The number of existing instances of the metaclass's class, - * plus 1 for each subclass with any instance. - */ - unsigned int getInstanceCount() const; - - - /*! - * @function getSuperClass - * - * @abstract - * Returns the super-metaclass of the receiver. - * - * @result - * Returns a pointer to the super-metaclass of the receiving - * OSMetaClass, or NULL for OSObject's metaclass. - */ - const OSMetaClass * getSuperClass() const; - - /*! - * @function getKmodName - * - * @abstract - * Returns the bundle identifier of the kernel extension - * that defines this metaclass. - * - * @result - * The bundle identifier of the kernel extension that defines this metaclass. - * - * @discussion - * "Kmod" is an older term for kernel extension. - */ - const OSSymbol * getKmodName() const; - - - /*! - * @function getClassName - * - * @abstract - * Returns the name of the C++ class managed by this metaclass. - * - * @result - * Returns the name of the C++ class managed by this metaclass. - */ - const char * getClassName() const; - const OSSymbol * getClassNameSymbol() const; - - - /*! - * @function getClassSize - * - * @abstract - * Returns the allocation size of the C++ class managed by this metaclass. - * - * @result - * The allocation size of the C++ class managed by this metaclass. - */ - unsigned int getClassSize() const; - - - /*! - * @function alloc - * - * @abstract - * Allocates an instance of the C++ class managed by this metaclass. - * - * @result - * A pointer to the newly allocated, uninitialized instance, - * with a retain count of 1; NULL on allocation failure. - * - * @discussion - * This function is automatically created by the metaclass-registration macros - * to enable dynamic instance allocation. - */ - virtual OSObject * alloc() const = 0; +/*! + * @function getMetaClassWithName + * + * @abstract + * Look up a metaclass in the run-time type information system. + * + * @param name The name of the desired class's metaclass. + * + * @result + * A pointer to the metaclass object if found, NULL otherwise. + */ + static const OSMetaClass * getMetaClassWithName(const OSSymbol * name); -#ifdef XNU_KERNEL_PRIVATE - void addInstance(const OSObject * instance, bool super = false) const; - void removeInstance(const OSObject * instance, bool super = false) const; - void applyToInstances(OSMetaClassInstanceApplierFunction applier, - void * context) const; - static void applyToInstancesOfClassName( - const OSSymbol * name, - OSMetaClassInstanceApplierFunction applier, - void * context); -private: - static void applyToInstances(OSOrderedSet * set, - OSMetaClassInstanceApplierFunction applier, - void * context); -public: -#endif /* XNU_KERNEL_PRIVATE */ +#if XNU_KERNEL_PRIVATE - /* Not to be included in headerdoc. - * - * @define OSDeclareCommonStructors - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - */ -#define OSDeclareCommonStructors(className) \ - private: \ - static const OSMetaClass * const superClass; \ +/*! + * @function copyMetaClassWithName + * + * @abstract + * Look up a metaclass in the run-time type information system. + * + * @param name The name of the desired class's metaclass. + * + * @result + * A pointer to the metaclass object if found, NULL otherwise. + * The metaclass will be protected from unloading until releaseMetaClass() + * is called. + */ + static const OSMetaClass * copyMetaClassWithName(const OSSymbol * name); +/*! + * @function releaseMetaClass + * + * @abstract + * Releases reference obtained from copyMetaClassWithName(). + * + * @discussion + * The metaclass will be protected from unloading until releaseMetaClass() + * is called. + */ + void releaseMetaClass() const; + +#endif /* XNU_KERNEL_PRIVATE */ + +protected: +/*! + * @function retain + * + * @abstract + * Implements the abstract retain function to do nothing. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual void retain() const; + + +/*! + * @function release + * + * @abstract + * Implements the abstract release function to do nothing. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual void release() const; + + +/*! + * @function release + * + * @abstract + * Implements the abstract release(int freeWhen) + * function to do nothing. + * + * @param freeWhen Unused. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual void release(int freeWhen) const; + + +/*! + * @function taggedRetain + * + * @abstract + * Implements the abstract taggedRetain(const void *) + * function to do nothing. + * + * @param tag Unused. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual void taggedRetain(const void * tag = 0) const; + + +/*! + * @function taggedRelease + * + * @abstract + * Implements the abstract taggedRelease(const void *) + * function to do nothing. + * + * @param tag Unused. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual void taggedRelease(const void * tag = 0) const; + + +/*! + * @function taggedRelease + * + * @abstract + * Implements the abstract taggedRelease(const void *, cont int) + * function to do nothing. + * + * @param tag Unused. + * @param freeWhen Unused. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual void taggedRelease( + const void * tag, + const int freeWhen) const; + + +/*! + * @function getRetainCount + * + * @abstract + * Implements the abstract getRetainCount + * function to return 0. + * + * @result + * Always returns 0. + * + * @discussion + * Since an OSMetaClass instance must remain in existence + * for as long as its kernel extension is loaded, + * OSMetaClass does not use reference-counting. + */ + virtual int getRetainCount() const; + + +/* Not to be included in headerdoc. + * + * @function getMetaClass + * + * @abstract + * Returns the meta-metaclass. + * + * @result + * The metaclass of the OSMetaClass object. + */ + virtual const OSMetaClass * getMetaClass() const; + + +/*! + * @function OSMetaClass + * + * @abstract + * Constructor for OSMetaClass objects. + * + * @param className A C string naming the C++ class + * that this OSMetaClass represents. + * @param superclass The OSMetaClass object representing the superclass + * of this metaclass's class. + * @param classSize The allocation size of the represented C++ class. + * + * @discussion + * This constructor is protected and cannot be used + * to instantiate OSMetaClass directly, as OSMetaClass is an abstract class. + * This function is called during kext loading + * to queue C++ classes for registration. + * See @link preModLoad preModLoad@/link and + * @link postModLoad postModLoad@/link. + */ + OSMetaClass(const char * className, + const OSMetaClass * superclass, + unsigned int classSize); + + +/*! + * @function ~OSMetaClass + * + * @abstract + * Destructor for OSMetaClass objects. + * + * @discussion + * This function is called when the kernel extension that implements + * the metaclass's class is unloaded. + * The destructor removes all references to the class + * from the run-time type information system. + */ + virtual + ~OSMetaClass(); + +// Needs to be overriden as NULL as all OSMetaClass objects are allocated +// statically at compile time, don't accidently try to free them. + void + operator delete(void *, size_t) + { + } + +public: + static const OSMetaClass * const metaClass; + +/*! + * @function preModLoad + * + * @abstract + * Prepares the run-time type system + * for the creation of new metaclasses + * during loading of a kernel extension (module). + * + * @param kextID The bundle ID of the kext being loaded. + * + * @result + * An opaque handle to the load context + * for the kernel extension on success; + * NULL on failure. + * + * @discussion + * Not for use by kernel extensions. + * + * Prepares the run-time type information system to record and register + * metaclasses created by static constructors until a subsequent call to + * @link postModLoad postModLoad@/link. + * preModLoad takes a lock to ensure processing of a single + * load operation at a time; the lock is released by + * @link postModLoad postModLoad@/link. + * Any OSMetaClass constructed between these two function calls + * will be associated with kextID. + */ + static void * preModLoad(const char * kextID); + + +/*! + * @function checkModLoad + * + * @abstract + * Checks whether the current kext load operation can proceed. + * + * @param loadHandle The opaque handle returned + * by @link preModLoad preModLoad@/link. + * @result + * true if no errors are outstanding + * and the system is ready to process more metaclasses. + * + * @discussion + * Not for use by kernel extensions. + */ + static bool checkModLoad(void * loadHandle); + + +/*! + * @function postModLoad + * + * @abstract + * Registers the metaclasses created during loading of a kernel extension. + * + * @param loadHandle The opaque handle returned + * by @link preModLoad preModLoad@/link. + * @result + * The error code of the first error encountered, + * or + * @link + * //apple_ref/cpp/macro/kOSReturnSuccess + * kOSReturnSuccess@/link + * if no error occurred. + * + * @discussion + * Not for use by kernel extensions. + * + * Called after all static constructors in a kernel extension + * have created metaclasses, + * this function checks for duplicate class names, + * then registers the new metaclasses under the kext ID + * that @link preModLoad preModLoad@/link was called with, + * so that they can be dynamically allocated + * and have their instance counts tracked. + * postModLoad releases the lock taken by + * @link preModLoad preModLoad@/link. + */ + static OSReturn postModLoad(void * loadHandle); + +/*! + * @function modHasInstance + * + * @abstract + * Returns whether any classes defined by the named + * kernel extension (or their subclasses) have existing instances. + * + * @param kextID The bundle ID of the kernel extension to check. + * + * @result + * true if the kext is found and + * if any class defined by that kext + * has a nonzero instance count, + * false otherwise. + * + * @discussion + * This function is called before a kernel extension's static destructors + * are invoked, prior to unloading the extension. + * If any classes stil have instances or subclasses with instances, + * those classes are logged + * (using @link reportModInstances reportModInstances@/link) and + * the kernel extension is not be unloaded. + */ + static bool modHasInstance(const char * kextID); + + +/*! + * @function reportModInstances + * + * @abstract + * Logs the instance counts for classes + * defined by a kernel extension. + * + * @param kextID The bundle ID of the kernel extension to report on. + * + * @discussion + * This function prints the names and instance counts + * of any class defined by kextID + * that has a nonzero instance count. + * It's called by @link modHasInstance modHasInstance@/link + * to help diagnose problems unloading kernel extensions. + */ + static void reportModInstances(const char * kextID); + + +/*! + * @function considerUnloads + * + * @abstract + * Schedule automatic unloading of unused kernel extensions. + * + * @discussion + * This function schedules a check for kernel extensions + * that can be automatically unloaded, + * canceling any currently scheduled check. + * At that time, any such kexts with no Libkern C++ instances + * and no external references are unloaded. + * + * The I/O Kit calls this function when matching goes idle. + * + * Kernel extensions that define subclasses of + * @link //apple_ref/doc/class/IOService IOService@/link + * are eligible for automatic unloading. + * + * (On releases of Mac OS X prior to Snow Leopard (10.6), + * any kernel extension defining any Libkern C++ class + * was eligible for automatic unloading, + * but that unload did not call the module stop routine. + * Non-I/O Kit kernel extensions that define Libkern C++ subclasses + * should be sure to have OSBundleLibraries declarations that ensure + * they will not load on releases prior to Snow Leopard.) + */ + static void considerUnloads(); + +#if XNU_KERNEL_PRIVATE + static bool removeClasses(OSCollection * metaClasses); +#endif /* XNU_KERNEL_PRIVATE */ + +/*! + * @function allocClassWithName + * + * @abstract + * Allocates an instance of a named OSObject-derived class. + * + * @param name The name of the desired class. + * + * @result + * A pointer to the newly-allocated, uninitialized object on success; + * NULL on failure. + * + * @discussion + * Kernel extensions should not need to use this function + * directly, instead using static instance-creation functions + * defined by classes. + * + * This function consults the run-time type information system + * to find the metaclass for the named class. + * If it exists, it calls the metaclass's @link alloc alloc@/link + * function and returns the result. + */ + static OSObject * allocClassWithName(const OSSymbol * name); + + +/*! + * function allocClassWithName + * + * @abstract + * Allocates an instance of a named OSObject-derived class. + * + * @param name The name of the desired class. + * + * @result + * A pointer to the newly-allocated, uninitialized object on success; + * NULL on failure. + * + * @discussion + * Kernel extensions should not need to use this function + * directly, instead using static instance-creation functions + * defined by classes. + * + * This function consults the run-time type information system + * to find the metaclass for the named class. + * If it exists, it calls the metaclass's @link alloc alloc@/link + * function and returns the result. + */ + static OSObject * allocClassWithName(const OSString * name); + + +/*! + * function allocClassWithName + * + * @abstract + * Allocates an instance of a named OSObject-derived class. + * + * @param name The name of the desired class. + * + * @result + * A pointer to the newly-allocated, uninitialized object on success; + * NULL on failure. + * + * @discussion + * Kernel extensions should not need to use this function + * directly, instead using static instance-creation functions + * defined by classes. + * + * This function consults the run-time type information system + * to find the metaclass for the named class. + * If it exists, it calls the metaclass's @link alloc alloc@/link + * function and returns the result. + */ + static OSObject * allocClassWithName(const char * name); + + +/*! + * @function checkMetaCastWithName + * + * @abstract + * Search the metaclass inheritance hierarchy by name for an object instance. + * + * @param className The name of the desired class or superclass. + * @param object The object whose metaclass begins the search. + * + * @result + * object if it's derived from className; + * NULL otherwise. + * + * @discussion + * This function is the basis of the Libkern run-time type-checking system. + * Kernel extensions should not use it directly, + * instead using @link OSDynamicCast OSDynamicCast@/link or + * @link OSCheckTypeInst OSCheckTypeInst@/link. + */ + static OSMetaClassBase * checkMetaCastWithName( + const OSSymbol * className, + const OSMetaClassBase * object); + +/*! + * @function checkMetaCastWithName + * + * @abstract + * Search the metaclass inheritance hierarchy by name for an object instance. + * + * @param className The name of the desired class or superclass. + * @param object The object whose metaclass begins the search. + * + * @result + * object if it's derived from className; + * NULL otherwise. + * + * @discussion + * Kernel extensions should not use this function directly, + * instead using @link OSDynamicCast OSDynamicCast@/link or + * @link OSCheckTypeInst OSCheckTypeInst@/link. + */ + static OSMetaClassBase * checkMetaCastWithName( + const OSString * className, + const OSMetaClassBase * object); + +/*! + * @function checkMetaCastWithName + * + * @abstract + * Search the metaclass inheritance hierarchy by name for an object instance. + * + * @param className The name of the desired class or superclass. + * @param object The object whose metaclass begins the search. + * + * @result + * object if it's derived from className; + * NULL otherwise. + * + * @discussion + * Kernel extensions should not use this function directly, + * instead using @link OSDynamicCast OSDynamicCast@/link or + * @link OSCheckTypeInst OSCheckTypeInst@/link. + */ + static OSMetaClassBase * checkMetaCastWithName( + const char * className, + const OSMetaClassBase * object); + + +/*! + * @function instanceConstructed + * + * @abstract + * Counts the instances of the class managed by this metaclass. + * + * @discussion + * Not for use by kernel extensions. + * + * Every non-abstract class that inherits from OSObject + * has a default constructor that calls it's own metaclass's + * instanceConstructed function. + * This constructor is defined by the + * @link + * OSDefineMetaClassAndStructors + * OSDefineMetaClassAndStructors@/link + * macro that all OSObject subclasses must use. + * + * If a class's instance count goes from 0 to 1--that is, + * upon the creation of the first instance of that class--the + * superclass's instance count is also incremented. + * This propagates reference counts up the inheritance chain so that + * superclasses are counted as "in use" when subclasses have instances. + */ + void instanceConstructed() const; + + +/*! + * @function instanceDestructed + * + * @abstract + * Counts the instances of the class managed by this metaclass. + * + * @discussion + * Every non-abstract class that inherits from OSObject + * has a default destructor that calls it's own metaclass's + * instanceDestructed function. + * This constructor is defined by the + * @link OSDefineMetaClassAndStructors OSDefineMetaClassAndStructors@/link + * macro that all OSObject subclasses must use. + * + * If a class's instance count goes from 1 to 0--that is, + * upon the destruction of the last instance of that class--the + * superclass's instance count is also decremented. + * This reduces "in use" counts from superclasses when their subclasses + * no longer have instances. + */ + void instanceDestructed() const; + + +/*! + * @function checkMetaCast + * + * @abstract + * Check whether a given object is an instance of the receiving + * metaclass's class or one derived from it. + * + * @param object The object to check for inheritance. + * + * @result + * object if it is derived from the receiver's class, + * NULL if not. + */ + OSMetaClassBase * checkMetaCast(const OSMetaClassBase * object) const; + + +/*! + * @function getInstanceCount + * + * @abstract + * Returns the number of existing instances of the metaclass's class. + * + * @result + * The number of existing instances of the metaclass's class, + * plus 1 for each subclass with any instance. + */ + unsigned int getInstanceCount() const; + + +/*! + * @function getSuperClass + * + * @abstract + * Returns the super-metaclass of the receiver. + * + * @result + * Returns a pointer to the super-metaclass of the receiving + * OSMetaClass, or NULL for OSObject's metaclass. + */ + const OSMetaClass * getSuperClass() const; + +/*! + * @function getKmodName + * + * @abstract + * Returns the bundle identifier of the kernel extension + * that defines this metaclass. + * + * @result + * The bundle identifier of the kernel extension that defines this metaclass. + * + * @discussion + * "Kmod" is an older term for kernel extension. + */ + const OSSymbol * getKmodName() const; + + +/*! + * @function getClassName + * + * @abstract + * Returns the name of the C++ class managed by this metaclass. + * + * @result + * Returns the name of the C++ class managed by this metaclass. + */ + const char * getClassName() const; + const OSSymbol * getClassNameSymbol() const; + + +/*! + * @function getClassSize + * + * @abstract + * Returns the allocation size of the C++ class managed by this metaclass. + * + * @result + * The allocation size of the C++ class managed by this metaclass. + */ + unsigned int getClassSize() const; + + +/*! + * @function alloc + * + * @abstract + * Allocates an instance of the C++ class managed by this metaclass. + * + * @result + * A pointer to the newly allocated, uninitialized instance, + * with a retain count of 1; NULL on allocation failure. + * + * @discussion + * This function is automatically created by the metaclass-registration macros + * to enable dynamic instance allocation. + */ + virtual OSObject * alloc() const = 0; + +#ifdef XNU_KERNEL_PRIVATE + void addInstance(const OSObject * instance, bool super = false) const; + void removeInstance(const OSObject * instance, bool super = false) const; + void applyToInstances(OSMetaClassInstanceApplierFunction applier, + void * context) const; + static void applyToInstancesOfClassName( + const OSSymbol * name, + OSMetaClassInstanceApplierFunction applier, + void * context); +private: + static void applyToInstances(OSOrderedSet * set, + OSMetaClassInstanceApplierFunction applier, + void * context); +public: +#endif /* XNU_KERNEL_PRIVATE */ + +/* Not to be included in headerdoc. + * + * @define OSDeclareCommonStructors + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + */ +#define OSDeclareCommonStructors(className) \ + private: \ + static const OSMetaClass * const superClass; \ public: \ static const OSMetaClass * const metaClass; \ - static class MetaClass : public OSMetaClass { \ - public: \ - MetaClass(); \ - virtual OSObject *alloc() const; \ - } gMetaClass; \ - friend class className ::MetaClass; \ - virtual const OSMetaClass * getMetaClass() const APPLE_KEXT_OVERRIDE; \ + static class MetaClass : public OSMetaClass { \ + public: \ + MetaClass(); \ + virtual OSObject *alloc() const; \ + } gMetaClass; \ + friend class className ::MetaClass; \ + virtual const OSMetaClass * getMetaClass() const APPLE_KEXT_OVERRIDE; \ protected: \ className (const OSMetaClass *); \ virtual ~ className () APPLE_KEXT_OVERRIDE - /*! - * @define OSDeclareDefaultStructors - * @hidecontents - * - * @abstract - * Declares run-time type information and functions - * for a concrete Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * - * @discussion - * Concrete Libkern C++ classes should "call" this macro - * immediately after the opening brace in a class declaration. - * It leaves the current privacy state as protected:. - */ +/*! + * @define OSDeclareDefaultStructors + * @hidecontents + * + * @abstract + * Declares run-time type information and functions + * for a concrete Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * + * @discussion + * Concrete Libkern C++ classes should "call" this macro + * immediately after the opening brace in a class declaration. + * It leaves the current privacy state as protected:. + */ #define OSDeclareDefaultStructors(className) \ OSDeclareCommonStructors(className); \ public: \ @@ -1635,361 +1698,361 @@ public: protected: - /*! - * @define OSDeclareAbstractStructors - * @hidecontents - * - * @abstract - * Declares run-time type information and functions - * for an abstract Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * - * @discussion - * Abstract Libkern C++ classes--those with at least one - * pure virtual method--should "call" this macro - * immediately after the opening brace in a class declaration. - * It leaves the current privacy state as protected:. - */ +/*! + * @define OSDeclareAbstractStructors + * @hidecontents + * + * @abstract + * Declares run-time type information and functions + * for an abstract Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * + * @discussion + * Abstract Libkern C++ classes--those with at least one + * pure virtual method--should "call" this macro + * immediately after the opening brace in a class declaration. + * It leaves the current privacy state as protected:. + */ #define OSDeclareAbstractStructors(className) \ OSDeclareCommonStructors(className); \ private: \ className (); /* Make primary constructor private in abstract */ \ protected: - /*! - * @define OSDeclareFinalStructors - * @hidecontents - * - * @abstract - * Declares run-time type information and functions - * for a final (non-subclassable) Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * - * @discussion - * Final Libkern C++ classes--those that do not allow subclassing--should - * "call" this macro immediately after the opening brace in a class declaration. - * (Final classes in the kernel may actually have subclasses in the kernel, - * but kexts cannot define any subclasses of a final class.) - * It leaves the current privacy state as protected:. - * - * Note: If the class is exported by a pseudokext (symbol set), - * the final symbol generated by this macro must be exported - * for the final-class attribute to be enforced. - * - * Warning: Changing a class from "Default" to "Final" will break - * binary compatibility. - */ +/*! + * @define OSDeclareFinalStructors + * @hidecontents + * + * @abstract + * Declares run-time type information and functions + * for a final (non-subclassable) Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * + * @discussion + * Final Libkern C++ classes--those that do not allow subclassing--should + * "call" this macro immediately after the opening brace in a class declaration. + * (Final classes in the kernel may actually have subclasses in the kernel, + * but kexts cannot define any subclasses of a final class.) + * It leaves the current privacy state as protected:. + * + * Note: If the class is exported by a pseudokext (symbol set), + * the final symbol generated by this macro must be exported + * for the final-class attribute to be enforced. + * + * Warning: Changing a class from "Default" to "Final" will break + * binary compatibility. + */ #define OSDeclareFinalStructors(className) \ - OSDeclareDefaultStructors(className) \ + OSDeclareDefaultStructors(className) \ private: \ - void __OSFinalClass(void); \ + void __OSFinalClass(void); \ protected: - /* Not to be included in headerdoc. - * - * @define OSDefineMetaClassWithInit - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * @param init A function to call in the constructor - * of the class's OSMetaClass. - */ +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClassWithInit + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + */ #define OSDefineMetaClassWithInit(className, superclassName, init) \ - /* Class global data */ \ + /* Class global data */ \ className ::MetaClass className ::gMetaClass; \ const OSMetaClass * const className ::metaClass = \ - & className ::gMetaClass; \ + & className ::gMetaClass; \ const OSMetaClass * const className ::superClass = \ - & superclassName ::gMetaClass; \ - /* Class member functions */ \ + & superclassName ::gMetaClass; \ + /* Class member functions */ \ className :: className(const OSMetaClass *meta) \ - : superclassName (meta) { } \ + : superclassName (meta) { } \ className ::~ className() { } \ const OSMetaClass * className ::getMetaClass() const \ - { return &gMetaClass; } \ - /* The ::MetaClass constructor */ \ + { return &gMetaClass; } \ + /* The ::MetaClass constructor */ \ className ::MetaClass::MetaClass() \ - : OSMetaClass(#className, className::superClass, sizeof(className)) \ - { init; } - - - /* Not to be included in headerdoc. - * - * @define OSDefineAbstractStructors - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - */ + : OSMetaClass(#className, className::superClass, sizeof(className)) \ + { init; } + + +/* Not to be included in headerdoc. + * + * @define OSDefineAbstractStructors + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + */ #define OSDefineAbstractStructors(className, superclassName) \ OSObject * className ::MetaClass::alloc() const { return 0; } - /* Not to be included in headerdoc. - * - * @define OSDefineDefaultStructors - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - */ +/* Not to be included in headerdoc. + * + * @define OSDefineDefaultStructors + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + */ #define OSDefineDefaultStructors(className, superclassName) \ OSObject * className ::MetaClass::alloc() const \ { return new className; } \ className :: className () : superclassName (&gMetaClass) \ { gMetaClass.instanceConstructed(); } - /* Not to be included in headerdoc. - * - * @define OSDefineDefaultStructors - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - */ +/* Not to be included in headerdoc. + * + * @define OSDefineDefaultStructors + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + */ #define OSDefineFinalStructors(className, superclassName) \ OSDefineDefaultStructors(className, superclassName) \ void className ::__OSFinalClass(void) { } - /* Not to be included in headerdoc. - * - * @define OSDefineMetaClassAndStructorsWithInit - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * @param init A function to call in the constructor - * of the class's OSMetaClass. - */ +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClassAndStructorsWithInit + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + */ #define OSDefineMetaClassAndStructorsWithInit(className, superclassName, init) \ OSDefineMetaClassWithInit(className, superclassName, init) \ OSDefineDefaultStructors(className, superclassName) - /* Not to be included in headerdoc. - * - * @define OSDefineMetaClassAndAbstractStructorsWithInit - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * @param init A function to call in the constructor - * of the class's OSMetaClass. - */ +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClassAndAbstractStructorsWithInit + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + */ #define OSDefineMetaClassAndAbstractStructorsWithInit(className, superclassName, init) \ OSDefineMetaClassWithInit(className, superclassName, init) \ OSDefineAbstractStructors(className, superclassName) - /* Not to be included in headerdoc. - * - * @define OSDefineMetaClassAndFinalStructorsWithInit - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * @param init A function to call in the constructor - * of the class's OSMetaClass. - */ +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClassAndFinalStructorsWithInit + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + */ #define OSDefineMetaClassAndFinalStructorsWithInit(className, superclassName, init) \ OSDefineMetaClassWithInit(className, superclassName, init) \ OSDefineFinalStructors(className, superclassName) - /* Helpers */ - - /* Not to be included in headerdoc. - * - * @define OSDefineMetaClass - * @hidecontents - * - * @abstract - * Helper macro for for the standard metaclass-registration macros. - * DO NOT USE. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * @param init A function to call in the constructor - * of the class's OSMetaClass. - */ +/* Helpers */ + +/* Not to be included in headerdoc. + * + * @define OSDefineMetaClass + * @hidecontents + * + * @abstract + * Helper macro for for the standard metaclass-registration macros. + * DO NOT USE. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * @param init A function to call in the constructor + * of the class's OSMetaClass. + */ #define OSDefineMetaClass(className, superclassName) \ OSDefineMetaClassWithInit(className, superclassName, ) - /*! - * @define OSDefineMetaClassAndStructors - * @hidecontents - * - * @abstract - * Defines an OSMetaClass and associated routines - * for a concrete Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * - * @discussion - * Concrete Libkern C++ classes should "call" this macro - * at the beginning of their implementation files, - * before any function implementations for the class. - */ +/*! + * @define OSDefineMetaClassAndStructors + * @hidecontents + * + * @abstract + * Defines an OSMetaClass and associated routines + * for a concrete Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * + * @discussion + * Concrete Libkern C++ classes should "call" this macro + * at the beginning of their implementation files, + * before any function implementations for the class. + */ #define OSDefineMetaClassAndStructors(className, superclassName) \ OSDefineMetaClassAndStructorsWithInit(className, superclassName, ) - /*! - * @define OSDefineMetaClassAndAbstractStructors - * @hidecontents - * - * @abstract - * Defines an OSMetaClass and associated routines - * for an abstract Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * - * @discussion - * Abstract Libkern C++ classes--those with at least one - * pure virtual method--should "call" this macro - * at the beginning of their implementation files, - * before any function implementations for the class. - */ +/*! + * @define OSDefineMetaClassAndAbstractStructors + * @hidecontents + * + * @abstract + * Defines an OSMetaClass and associated routines + * for an abstract Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * + * @discussion + * Abstract Libkern C++ classes--those with at least one + * pure virtual method--should "call" this macro + * at the beginning of their implementation files, + * before any function implementations for the class. + */ #define OSDefineMetaClassAndAbstractStructors(className, superclassName) \ OSDefineMetaClassAndAbstractStructorsWithInit (className, superclassName, ) - /*! - * @define OSDefineMetaClassAndFinalStructors - * @hidecontents - * - * @abstract - * Defines an OSMetaClass and associated routines - * for a final (non-subclassable) Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param superclassName The name of the superclass of the C++ class, - * as a raw token, - * not a string or macro. - * - * @discussion - * Final Libkern C++ classes--those that do not allow - * subclassing--should "call" this macro at the beginning - * of their implementation files, - * before any function implementations for the class. - * (Final classes in the kernel may actually have subclasses in the kernel, - * but kexts cannot define any subclasses of a final class.) - * - * Note: If the class is exported by a pseudokext (symbol set), - * the final symbol generated by this macro must be exported - * for the final-class attribute to be enforced. - * - * Warning: Changing a class from "Default" to "Final" will break - * binary compatibility. - */ +/*! + * @define OSDefineMetaClassAndFinalStructors + * @hidecontents + * + * @abstract + * Defines an OSMetaClass and associated routines + * for a final (non-subclassable) Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param superclassName The name of the superclass of the C++ class, + * as a raw token, + * not a string or macro. + * + * @discussion + * Final Libkern C++ classes--those that do not allow + * subclassing--should "call" this macro at the beginning + * of their implementation files, + * before any function implementations for the class. + * (Final classes in the kernel may actually have subclasses in the kernel, + * but kexts cannot define any subclasses of a final class.) + * + * Note: If the class is exported by a pseudokext (symbol set), + * the final symbol generated by this macro must be exported + * for the final-class attribute to be enforced. + * + * Warning: Changing a class from "Default" to "Final" will break + * binary compatibility. + */ #define OSDefineMetaClassAndFinalStructors(className, superclassName) \ OSDefineMetaClassAndFinalStructorsWithInit(className, superclassName, ) - // Dynamic vtable patchup support routines and types - void reservedCalled(int ind) const; - - - /*! - * @define OSMetaClassDeclareReservedUnused - * @hidecontents - * - * @abstract - * Reserves vtable space for new virtual functions - * in a Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param index The numeric index of the vtable slot, - * as a raw constant, beginning from 0. - * - * @discussion - * Libkern C++ classes in kernel extensions that can be used as libraries - * can provide for backward compatibility by declaring a number - * of reserved vtable slots - * that can be replaced with new functions as they are added. - * Each reserved declaration must be accompanied in the implementation - * by a corresponding reference to - * @link OSMetaClassDefineReservedUnused - * OSMetaClassDefineReservedUnused@/link. - * - * When replacing a reserved slot, change the macro from "Unused" - * to "Used" to document the fact that the slot used to be reserved, - * and declare the new function immediately after the "Used" macro - * to preserve vtable ordering. - * See - * @link OSMetaClassDeclareReservedUsed - * OSMetaClassDeclareReservedUsed@/link. - */ +// Dynamic vtable patchup support routines and types + void reservedCalled(int ind) const; + + +/*! + * @define OSMetaClassDeclareReservedUnused + * @hidecontents + * + * @abstract + * Reserves vtable space for new virtual functions + * in a Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param index The numeric index of the vtable slot, + * as a raw constant, beginning from 0. + * + * @discussion + * Libkern C++ classes in kernel extensions that can be used as libraries + * can provide for backward compatibility by declaring a number + * of reserved vtable slots + * that can be replaced with new functions as they are added. + * Each reserved declaration must be accompanied in the implementation + * by a corresponding reference to + * @link OSMetaClassDefineReservedUnused + * OSMetaClassDefineReservedUnused@/link. + * + * When replacing a reserved slot, change the macro from "Unused" + * to "Used" to document the fact that the slot used to be reserved, + * and declare the new function immediately after the "Used" macro + * to preserve vtable ordering. + * See + * @link OSMetaClassDeclareReservedUsed + * OSMetaClassDeclareReservedUsed@/link. + */ #if APPLE_KEXT_VTABLE_PADDING #define OSMetaClassDeclareReservedUnused(className, index) \ private: \ @@ -1999,63 +2062,63 @@ public: #endif - /*! - * @define OSMetaClassDeclareReservedUsed - * @hidecontents - * - * @abstract - * Documents use of reserved vtable space for new virtual functions - * in a Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param index The numeric index of the vtable slot, - * as a raw constant, beginning from 0. - * - * @discussion - * This macro evaluates to nothing, and is used to document reserved - * vtable slots as they are filled. - * See - * @link OSMetaClassDeclareReservedUnused - * OSMetaClassDeclareReservedUnused@/link. - */ +/*! + * @define OSMetaClassDeclareReservedUsed + * @hidecontents + * + * @abstract + * Documents use of reserved vtable space for new virtual functions + * in a Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param index The numeric index of the vtable slot, + * as a raw constant, beginning from 0. + * + * @discussion + * This macro evaluates to nothing, and is used to document reserved + * vtable slots as they are filled. + * See + * @link OSMetaClassDeclareReservedUnused + * OSMetaClassDeclareReservedUnused@/link. + */ #define OSMetaClassDeclareReservedUsed(className, index) - /*! - * @define OSMetaClassDefineReservedUnused - * @hidecontents - * - * @abstract - * Defines a reserved vtable slot for a Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param index The numeric index of the vtable slot, - * as a raw constant, beginning from 0. - * - * @discussion - * Libkern C++ classes in kernel extensions that can be used as libraries - * can provide for backward compatibility by declaring a number - * of reserved vtable slots - * that can be replaced with new functions as they are added. - * Each reserved defintion accompanies - * a corresponding declaration created with - * @link OSMetaClassDeclareReservedUnused - * OSMetaClassDeclareReservedUnused@/link. - * - * This macro is used in the implementation file - * to provide a placeholder definition for the reserved vtable slot, - * as a function that calls panic with an error message. - * - * When replacing a reserved slot, change the macro from "Unused" - * to "Used" to document the fact that the slot used to be reserved, - * and declare the new function immediately after the "Used" macro - * to preserve vtable ordering. - * See - * @link OSMetaClassDefineReservedUsed - * OSMetaClassDefineReservedUsed@/link. - */ +/*! + * @define OSMetaClassDefineReservedUnused + * @hidecontents + * + * @abstract + * Defines a reserved vtable slot for a Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param index The numeric index of the vtable slot, + * as a raw constant, beginning from 0. + * + * @discussion + * Libkern C++ classes in kernel extensions that can be used as libraries + * can provide for backward compatibility by declaring a number + * of reserved vtable slots + * that can be replaced with new functions as they are added. + * Each reserved defintion accompanies + * a corresponding declaration created with + * @link OSMetaClassDeclareReservedUnused + * OSMetaClassDeclareReservedUnused@/link. + * + * This macro is used in the implementation file + * to provide a placeholder definition for the reserved vtable slot, + * as a function that calls panic with an error message. + * + * When replacing a reserved slot, change the macro from "Unused" + * to "Used" to document the fact that the slot used to be reserved, + * and declare the new function immediately after the "Used" macro + * to preserve vtable ordering. + * See + * @link OSMetaClassDefineReservedUsed + * OSMetaClassDefineReservedUsed@/link. + */ #if APPLE_KEXT_VTABLE_PADDING #define OSMetaClassDefineReservedUnused(className, index) \ void className ::_RESERVED ## className ## index () \ @@ -2065,56 +2128,56 @@ void className ::_RESERVED ## className ## index () \ #endif - /*! - * @define OSMetaClassDefineReservedUsed - * @hidecontents - * - * @abstract - * Reserves vtable space for new virtual functions in a Libkern C++ class. - * - * @param className The name of the C++ class, as a raw token, - * not a string or macro. - * @param index The numeric index of the vtable slot, - * as a raw constant, beginning from 0. - * - * @discussion - * This macro evaluates to nothing, and is used to document reserved - * vtable slots as they are filled. - * See - * @link OSMetaClassDefineReservedUnused - * OSMetaClassDefineReservedUnused@/link. - */ +/*! + * @define OSMetaClassDefineReservedUsed + * @hidecontents + * + * @abstract + * Reserves vtable space for new virtual functions in a Libkern C++ class. + * + * @param className The name of the C++ class, as a raw token, + * not a string or macro. + * @param index The numeric index of the vtable slot, + * as a raw constant, beginning from 0. + * + * @discussion + * This macro evaluates to nothing, and is used to document reserved + * vtable slots as they are filled. + * See + * @link OSMetaClassDefineReservedUnused + * OSMetaClassDefineReservedUnused@/link. + */ #define OSMetaClassDefineReservedUsed(className, index) - // I/O Kit debug internal routines. - static void printInstanceCounts(); - static void serializeClassDictionary(OSDictionary * dict); +// I/O Kit debug internal routines. + static void printInstanceCounts(); + static void serializeClassDictionary(OSDictionary * dict); #ifdef XNU_KERNEL_PRIVATE #if IOTRACKING public: - static void * trackedNew(size_t size); - static void trackedDelete(void * mem, size_t size); - void trackedInstance(OSObject * instance) const; - void trackedFree(OSObject * instance) const; - void trackedAccumSize(OSObject * instance, size_t size) const; - struct IOTrackingQueue * getTracking() const; + static void * trackedNew(size_t size); + static void trackedDelete(void * mem, size_t size); + void trackedInstance(OSObject * instance) const; + void trackedFree(OSObject * instance) const; + void trackedAccumSize(OSObject * instance, size_t size) const; + struct IOTrackingQueue * getTracking() const; #endif /* IOTRACKING */ #endif /* XNU_KERNEL_PRIVATE */ private: - // Obsolete APIs - static OSDictionary * getClassDictionary(); - virtual bool serialize(OSSerialize * serializer) const; - - // Virtual Padding functions for MetaClass's - OSMetaClassDeclareReservedUnused(OSMetaClass, 0); - OSMetaClassDeclareReservedUnused(OSMetaClass, 1); - OSMetaClassDeclareReservedUnused(OSMetaClass, 2); - OSMetaClassDeclareReservedUnused(OSMetaClass, 3); - OSMetaClassDeclareReservedUnused(OSMetaClass, 4); - OSMetaClassDeclareReservedUnused(OSMetaClass, 5); - OSMetaClassDeclareReservedUnused(OSMetaClass, 6); - OSMetaClassDeclareReservedUnused(OSMetaClass, 7); +// Obsolete APIs + static OSDictionary * getClassDictionary(); + virtual bool serialize(OSSerialize * serializer) const; + +// Virtual Padding functions for MetaClass's + OSMetaClassDeclareReservedUnused(OSMetaClass, 0); + OSMetaClassDeclareReservedUnused(OSMetaClass, 1); + OSMetaClassDeclareReservedUnused(OSMetaClass, 2); + OSMetaClassDeclareReservedUnused(OSMetaClass, 3); + OSMetaClassDeclareReservedUnused(OSMetaClass, 4); + OSMetaClassDeclareReservedUnused(OSMetaClass, 5); + OSMetaClassDeclareReservedUnused(OSMetaClass, 6); + OSMetaClassDeclareReservedUnused(OSMetaClass, 7); }; #endif /* !_LIBKERN_OSMETACLASS_H */ diff --git a/libkern/libkern/c++/OSNumber.h b/libkern/libkern/c++/OSNumber.h index e157d9e5c..ebb81a616 100644 --- a/libkern/libkern/c++/OSNumber.h +++ b/libkern/libkern/c++/OSNumber.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOOffset.h created by rsulack on Wed 17-Sep-1997 */ @@ -39,8 +39,8 @@ * @abstract * This header declares the OSNumber container class. */ - - + + /*! * @class OSNumber * @@ -58,7 +58,7 @@ * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSNumber provides no concurrency protection; @@ -70,369 +70,369 @@ */ class OSNumber : public OSObject { - friend class OSSerialize; + friend class OSSerialize; - OSDeclareDefaultStructors(OSNumber) + OSDeclareDefaultStructors(OSNumber) #if APPLE_KEXT_ALIGN_CONTAINERS protected: - unsigned int size; - unsigned long long value; + unsigned int size; + unsigned long long value; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - unsigned long long value; - unsigned int size; + unsigned long long value; + unsigned int size; + + struct ExpansionData { }; - struct ExpansionData { }; - - /* Reserved for future use. (Internal use only) */ - ExpansionData * reserved; +/* Reserved for future use. (Internal use only) */ + ExpansionData * reserved; #endif /* APPLE_KEXT_ALIGN_CONTAINERS */ public: - /*! - * @function withNumber - * - * @abstract - * Creates and initializes an instance of OSNumber - * with an integer value. - * - * @param value The numeric integer value for the OSNumber to store. - * @param numberOfBits The number of bits to limit storage to. - * - * @result - * An instance of OSNumber with a reference count of 1; - * NULL on failure. - * - * @discussion - * value is masked to the provided numberOfBits - * when the OSNumber object is initialized. - * - * You can change the value of an OSNumber later - * using @link setValue setValue@/link - * and @link addValue addValue@/link, - * but you can't change the bit size. - */ - static OSNumber * withNumber( - unsigned long long value, - unsigned int numberOfBits); - - - /*! - * @function withNumber - * - * @abstract - * Creates and initializes an instance of OSNumber - * with an unsigned integer value represented as a C string. - * - * @param valueString A C string representing a numeric value - * for the OSNumber to store. - * @param numberOfBits The number of bits to limit storage to. - * - * @result - * An instance of OSNumber with a reference count of 1; - * NULL on failure. - * - * @discussion - * This function does not work in I/O Kit versions prior to 8.0 (Mac OS X 10.4). - * In I/O Kit version 8.0 and later, it works - * but is limited to parsing unsigned 32 bit quantities. - * The format of the C string may be decimal, hexadecimal ("0x" prefix), - * binary ("0b" prefix), or octal ("0" prefix). - * - * The parsed value is masked to the provided numberOfBits - * when the OSNumber object is initialized. - * - * You can change the value of an OSNumber later - * using @link setValue setValue@/link - * and @link addValue addValue@/link, - * but you can't change the bit size. - */ - static OSNumber * withNumber( - const char * valueString, - unsigned int numberOfBits); - - - /*! - * @function init - * - * @abstract - * Initializes an instance of OSNumber with an integer value. - * - * @param value The numeric integer value for the OSNumber to store. - * @param numberOfBits The number of bits to limit storage to. - * - * @result - * true if initialization succeeds, - * false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSNumber/withNumber/staticOSNumber*\/(constchar*,unsignedint) - * withNumber(unsigned long long, unsigned int)@/link - * instead. - */ - virtual bool init( - unsigned long long value, - unsigned int numberOfBits); - - - /*! - * @function init - * - * @abstract - * Initializes an instance of OSNumber - * with an unsigned integer value represented as a C string. - * - * @param valueString A C string representing a numeric value - * for the OSNumber to store. - * @param numberOfBits The number of bits to limit storage to. - * - * @result - * true if initialization succeeds, - * false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSNumber/withNumber/staticOSNumber*\/(constchar*,unsignedint) - * withNumber(const char *, unsigned int)@/link - * instead. - */ - virtual bool init( - const char * valueString, - unsigned int numberOfBits); - - - /*! - * @function free - * - * @abstract - * Deallocates or releases any resources - * used by the OSNumber instance. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function numberOfBits - * - * @abstract - * Returns the number of bits used to represent - * the OSNumber object's integer value. - * - * @result - * The number of bits used to represent - * the OSNumber object's integer value. - * - * @discussion - * The number of bits is used to limit the stored value of the OSNumber. - * Any change to its value is performed as an unsigned long long - * and then truncated to the number of bits. - */ - virtual unsigned int numberOfBits() const; - - - /*! - * @function numberOfBytes - * - * @abstract - * Returns the number of bytes used to represent - * the OSNumber object's integer value. - * - * @result - * The number of bytes used to represent - * the OSNumber object's integer value. - * See @link numberOfBits numberOfBits@/link. - */ - virtual unsigned int numberOfBytes() const; +/*! + * @function withNumber + * + * @abstract + * Creates and initializes an instance of OSNumber + * with an integer value. + * + * @param value The numeric integer value for the OSNumber to store. + * @param numberOfBits The number of bits to limit storage to. + * + * @result + * An instance of OSNumber with a reference count of 1; + * NULL on failure. + * + * @discussion + * value is masked to the provided numberOfBits + * when the OSNumber object is initialized. + * + * You can change the value of an OSNumber later + * using @link setValue setValue@/link + * and @link addValue addValue@/link, + * but you can't change the bit size. + */ + static OSNumber * withNumber( + unsigned long long value, + unsigned int numberOfBits); + + +/*! + * @function withNumber + * + * @abstract + * Creates and initializes an instance of OSNumber + * with an unsigned integer value represented as a C string. + * + * @param valueString A C string representing a numeric value + * for the OSNumber to store. + * @param numberOfBits The number of bits to limit storage to. + * + * @result + * An instance of OSNumber with a reference count of 1; + * NULL on failure. + * + * @discussion + * This function does not work in I/O Kit versions prior to 8.0 (Mac OS X 10.4). + * In I/O Kit version 8.0 and later, it works + * but is limited to parsing unsigned 32 bit quantities. + * The format of the C string may be decimal, hexadecimal ("0x" prefix), + * binary ("0b" prefix), or octal ("0" prefix). + * + * The parsed value is masked to the provided numberOfBits + * when the OSNumber object is initialized. + * + * You can change the value of an OSNumber later + * using @link setValue setValue@/link + * and @link addValue addValue@/link, + * but you can't change the bit size. + */ + static OSNumber * withNumber( + const char * valueString, + unsigned int numberOfBits); + + +/*! + * @function init + * + * @abstract + * Initializes an instance of OSNumber with an integer value. + * + * @param value The numeric integer value for the OSNumber to store. + * @param numberOfBits The number of bits to limit storage to. + * + * @result + * true if initialization succeeds, + * false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSNumber/withNumber/staticOSNumber*\/(constchar*,unsignedint) + * withNumber(unsigned long long, unsigned int)@/link + * instead. + */ + virtual bool init( + unsigned long long value, + unsigned int numberOfBits); + + +/*! + * @function init + * + * @abstract + * Initializes an instance of OSNumber + * with an unsigned integer value represented as a C string. + * + * @param valueString A C string representing a numeric value + * for the OSNumber to store. + * @param numberOfBits The number of bits to limit storage to. + * + * @result + * true if initialization succeeds, + * false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSNumber/withNumber/staticOSNumber*\/(constchar*,unsignedint) + * withNumber(const char *, unsigned int)@/link + * instead. + */ + virtual bool init( + const char * valueString, + unsigned int numberOfBits); + + +/*! + * @function free + * + * @abstract + * Deallocates or releases any resources + * used by the OSNumber instance. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function numberOfBits + * + * @abstract + * Returns the number of bits used to represent + * the OSNumber object's integer value. + * + * @result + * The number of bits used to represent + * the OSNumber object's integer value. + * + * @discussion + * The number of bits is used to limit the stored value of the OSNumber. + * Any change to its value is performed as an unsigned long long + * and then truncated to the number of bits. + */ + virtual unsigned int numberOfBits() const; + + +/*! + * @function numberOfBytes + * + * @abstract + * Returns the number of bytes used to represent + * the OSNumber object's integer value. + * + * @result + * The number of bytes used to represent + * the OSNumber object's integer value. + * See @link numberOfBits numberOfBits@/link. + */ + virtual unsigned int numberOfBytes() const; // xx-review: should switch to explicitly-sized int types // xx-review: but that messes up C++ mangled symbols :-( - /*! - * @function unsigned8BitValue - * - * @abstract - * Returns the OSNumber object's integer value - * cast as an unsigned 8-bit integer. - * - * @result - * The OSNumber object's integer value - * cast as an unsigned 8-bit integer. - * - * @discussion - * This function merely casts the internal integer value, - * giving no indication of truncation or other potential conversion problems. - */ - virtual unsigned char unsigned8BitValue() const; - - - /*! - * @function unsigned16BitValue - * - * @abstract - * Returns the OSNumber object's integer value - * cast as an unsigned 16-bit integer. - * - * @result - * Returns the OSNumber object's integer value - * cast as an unsigned 16-bit integer. - * - * @discussion - * This function merely casts the internal integer value, - * giving no indication of truncation or other potential conversion problems. - */ - virtual unsigned short unsigned16BitValue() const; - - - /*! - * @function unsigned32BitValue - * - * @abstract - * Returns the OSNumber object's integer value - * cast as an unsigned 32-bit integer. - * - * @result - * Returns the OSNumber object's integer value - * cast as an unsigned 32-bit integer. - * - * @discussion - * This function merely casts the internal integer value, - * giving no indication of truncation or other potential conversion problems. - */ - virtual unsigned int unsigned32BitValue() const; - - - /*! - * @function unsigned64BitValue - * - * @abstract - * Returns the OSNumber object's integer value - * cast as an unsigned 64-bit integer. - * - * @result - * Returns the OSNumber object's integer value - * cast as an unsigned 64-bit integer. - * - * @discussion - * This function merely casts the internal integer value, - * giving no indication of truncation or other potential conversion problems. - */ - virtual unsigned long long unsigned64BitValue() const; +/*! + * @function unsigned8BitValue + * + * @abstract + * Returns the OSNumber object's integer value + * cast as an unsigned 8-bit integer. + * + * @result + * The OSNumber object's integer value + * cast as an unsigned 8-bit integer. + * + * @discussion + * This function merely casts the internal integer value, + * giving no indication of truncation or other potential conversion problems. + */ + virtual unsigned char unsigned8BitValue() const; + + +/*! + * @function unsigned16BitValue + * + * @abstract + * Returns the OSNumber object's integer value + * cast as an unsigned 16-bit integer. + * + * @result + * Returns the OSNumber object's integer value + * cast as an unsigned 16-bit integer. + * + * @discussion + * This function merely casts the internal integer value, + * giving no indication of truncation or other potential conversion problems. + */ + virtual unsigned short unsigned16BitValue() const; + + +/*! + * @function unsigned32BitValue + * + * @abstract + * Returns the OSNumber object's integer value + * cast as an unsigned 32-bit integer. + * + * @result + * Returns the OSNumber object's integer value + * cast as an unsigned 32-bit integer. + * + * @discussion + * This function merely casts the internal integer value, + * giving no indication of truncation or other potential conversion problems. + */ + virtual unsigned int unsigned32BitValue() const; + + +/*! + * @function unsigned64BitValue + * + * @abstract + * Returns the OSNumber object's integer value + * cast as an unsigned 64-bit integer. + * + * @result + * Returns the OSNumber object's integer value + * cast as an unsigned 64-bit integer. + * + * @discussion + * This function merely casts the internal integer value, + * giving no indication of truncation or other potential conversion problems. + */ + virtual unsigned long long unsigned64BitValue() const; // xx-review: wow, there's no addNumber(OSNumber *)! - /*! - * @function addValue - * - * @abstract - * Adds a signed integer value to the internal integer value - * of the OSNumber object. - * - * @param value The value to be added. - * - * @discussion - * This function adds values as 64-bit integers, - * but masks the result by the bit size - * (see @link numberOfBits numberOfBits@/link), - * so addition overflows will not necessarily - * be the same as for plain C integers. - */ - virtual void addValue(signed long long value); - - - /*! - * @function setValue - * - * @abstract - * Replaces the current internal integer value - * of the OSNumber object by the value given. - * - * @param value The new value for the OSNumber object, - * which is truncated by the bit size of the OSNumber object - * (see @link numberOfBits numberOfBits@/link). - */ - virtual void setValue(unsigned long long value); - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSNumber objects. - * - * @param aNumber The OSNumber to be compared against the receiver. - * - * @result - * true if the OSNumber objects are equal, - * false if not. - * - * @discussion - * Two OSNumber objects are considered equal - * if they represent the same C integer value. - */ - virtual bool isEqualTo(const OSNumber * aNumber) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality an OSNumber to an arbitrary object. - * - * @param anObject An object to be compared against the receiver. - * - * @result - * true if the objects are equal, - * false if not. - * - * @discussion - * An OSNumber is considered equal to another object if that object is - * derived from OSNumber and represents the same C integer value. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - - - OSMetaClassDeclareReservedUnused(OSNumber, 0); - OSMetaClassDeclareReservedUnused(OSNumber, 1); - OSMetaClassDeclareReservedUnused(OSNumber, 2); - OSMetaClassDeclareReservedUnused(OSNumber, 3); - OSMetaClassDeclareReservedUnused(OSNumber, 4); - OSMetaClassDeclareReservedUnused(OSNumber, 5); - OSMetaClassDeclareReservedUnused(OSNumber, 6); - OSMetaClassDeclareReservedUnused(OSNumber, 7); +/*! + * @function addValue + * + * @abstract + * Adds a signed integer value to the internal integer value + * of the OSNumber object. + * + * @param value The value to be added. + * + * @discussion + * This function adds values as 64-bit integers, + * but masks the result by the bit size + * (see @link numberOfBits numberOfBits@/link), + * so addition overflows will not necessarily + * be the same as for plain C integers. + */ + virtual void addValue(signed long long value); + + +/*! + * @function setValue + * + * @abstract + * Replaces the current internal integer value + * of the OSNumber object by the value given. + * + * @param value The new value for the OSNumber object, + * which is truncated by the bit size of the OSNumber object + * (see @link numberOfBits numberOfBits@/link). + */ + virtual void setValue(unsigned long long value); + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSNumber objects. + * + * @param aNumber The OSNumber to be compared against the receiver. + * + * @result + * true if the OSNumber objects are equal, + * false if not. + * + * @discussion + * Two OSNumber objects are considered equal + * if they represent the same C integer value. + */ + virtual bool isEqualTo(const OSNumber * aNumber) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality an OSNumber to an arbitrary object. + * + * @param anObject An object to be compared against the receiver. + * + * @result + * true if the objects are equal, + * false if not. + * + * @discussion + * An OSNumber is considered equal to another object if that object is + * derived from OSNumber and represents the same C integer value. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + + + OSMetaClassDeclareReservedUnused(OSNumber, 0); + OSMetaClassDeclareReservedUnused(OSNumber, 1); + OSMetaClassDeclareReservedUnused(OSNumber, 2); + OSMetaClassDeclareReservedUnused(OSNumber, 3); + OSMetaClassDeclareReservedUnused(OSNumber, 4); + OSMetaClassDeclareReservedUnused(OSNumber, 5); + OSMetaClassDeclareReservedUnused(OSNumber, 6); + OSMetaClassDeclareReservedUnused(OSNumber, 7); }; #endif /* !_OS_OSNUMBER_H */ diff --git a/libkern/libkern/c++/OSObject.h b/libkern/libkern/c++/OSObject.h index 14c7defeb..036730372 100644 --- a/libkern/libkern/c++/OSObject.h +++ b/libkern/libkern/c++/OSObject.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -Copyright (c) 1998 Apple Computer, Inc. All rights reserved. -HISTORY - 1998-10-30 Godfrey van der Linden(gvdl) - Created -*/ + * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. + * HISTORY + * 1998-10-30 Godfrey van der Linden(gvdl) + * Created + */ #ifndef _LIBKERN_OSOBJECT_H #define _LIBKERN_OSOBJECT_H @@ -51,7 +51,7 @@ class OSString; * This header declares the OSObject class, * which is the concrete root of the Libkern C++ class hierarchy. */ - + /*! * @class OSObject @@ -111,7 +111,7 @@ class OSString; * OSObject provides reference counting services using the * @link * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link, + * retain@/link, * @link * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() * release()@/link, @@ -126,8 +126,8 @@ class OSString; * The public interface to the reference counting is * @link * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link, - * and + * retain@/link, + * and * @link * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() * release@/link; @@ -152,7 +152,7 @@ class OSString; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * Concurrency Protection @@ -167,287 +167,288 @@ class OSString; */ class OSObject : public OSMetaClassBase { - OSDeclareAbstractStructors(OSObject) + OSDeclareAbstractStructors(OSObject) #if IOKITSTATS friend class IOStatistics; #endif private: - /* Not to be included in headerdoc. - * - * @var retainCount Number of references held on this instance. - */ - mutable int retainCount; +/* Not to be included in headerdoc. + * + * @var retainCount Number of references held on this instance. + */ + mutable int retainCount; protected: // xx-review: seems not to be used, should we deprecate? - /*! - * @function release - * - * @abstract - * Releases a reference to an object, - * freeing it immediately if the reference count - * drops below the specified threshold. - * - * @param freeWhen If decrementing the reference count makes it - * >= freeWhen, the object is immediately freed. - * - * @discussion - * If the receiver has freeWhen or fewer references - * after its reference count is decremented, - * it is immediately freed. - * - * This version of release - * can be used to break certain retain cycles in object graphs. - * In general, however, it should be avoided. - */ - virtual void release(int freeWhen) const APPLE_KEXT_OVERRIDE; - - /*! - * @function taggedRelease - * - * @abstract - * Releases a tagged reference to an object, - * freeing it immediately if the reference count - * drops below the specified threshold. - * - * @param tag Used for tracking collection references. - * @param freeWhen If decrementing the reference count makes it - * >= freeWhen, the object is immediately freed. - * - * @discussion - * Kernel extensions should not use this function. - * It is for use by OSCollection and subclasses to track - * inclusion in collections. - * - * If the receiver has freeWhen or fewer references - * after its reference count is decremented, - * it is immediately freed. - * - * This version of release - * can be used to break certain retain cycles in object graphs. - * In general, however, it should be avoided. - */ - virtual void taggedRelease(const void * tag, const int freeWhen) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function init - * - * @abstract - * Initializes a newly-allocated object. - * - * @result - * true on success, false on failure. - * - * @discussion - * Classes derived from OSObject must override the primary init method - * of their parent. - * In general most implementations call - * super::init() - * before doing local initialisation. - * If the superclass call fails then return false immediately. - * If the subclass encounters a failure then it should return false. - */ - virtual bool init(); - - - /*! - * @function free - * - * @abstract - * Deallocates/releases resources held by the object. - * - * @discussion - * Classes derived from OSObject should override this function - * to deallocate or release all dynamic resources held by the instance, - * then call the superclass's implementation. - * - * Caution: - *
      - *
    1. You can not assume that you have completed initialization - * before free is called, - * so be very careful in your implementation.
    2. - *
    3. OSObject's implementation performs the C++ delete - * of the instance, so be sure that you call the superclass - * implementation last in your implementation.
    4. - *
    5. free must not fail; - * all resources must be deallocated or released on completion.
    6. - *
    - */ - virtual void free(); - - - /*! - * @function operator delete - * - * @abstract - * Frees the memory of the object itself. - * - * @param mem A pointer to the object's memory. - * @param size The size of the object's block of memory. - * - * @discussion - * Never use delete on objects derived from OSObject; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - static void operator delete(void * mem, size_t size); +/*! + * @function release + * + * @abstract + * Releases a reference to an object, + * freeing it immediately if the reference count + * drops below the specified threshold. + * + * @param freeWhen If decrementing the reference count makes it + * >= freeWhen, the object is immediately freed. + * + * @discussion + * If the receiver has freeWhen or fewer references + * after its reference count is decremented, + * it is immediately freed. + * + * This version of release + * can be used to break certain retain cycles in object graphs. + * In general, however, it should be avoided. + */ + virtual void release(int freeWhen) const APPLE_KEXT_OVERRIDE; + +/*! + * @function taggedRelease + * + * @abstract + * Releases a tagged reference to an object, + * freeing it immediately if the reference count + * drops below the specified threshold. + * + * @param tag Used for tracking collection references. + * @param freeWhen If decrementing the reference count makes it + * >= freeWhen, the object is immediately freed. + * + * @discussion + * Kernel extensions should not use this function. + * It is for use by OSCollection and subclasses to track + * inclusion in collections. + * + * If the receiver has freeWhen or fewer references + * after its reference count is decremented, + * it is immediately freed. + * + * This version of release + * can be used to break certain retain cycles in object graphs. + * In general, however, it should be avoided. + */ + virtual void taggedRelease(const void * tag, const int freeWhen) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function init + * + * @abstract + * Initializes a newly-allocated object. + * + * @result + * true on success, false on failure. + * + * @discussion + * Classes derived from OSObject must override the primary init method + * of their parent. + * In general most implementations call + * super::init() + * before doing local initialisation. + * If the superclass call fails then return false immediately. + * If the subclass encounters a failure then it should return false. + */ + virtual bool init(); + + +/*! + * @function free + * + * @abstract + * Deallocates/releases resources held by the object. + * + * @discussion + * Classes derived from OSObject should override this function + * to deallocate or release all dynamic resources held by the instance, + * then call the superclass's implementation. + * + * Caution: + *
      + *
    1. You can not assume that you have completed initialization + * before free is called, + * so be very careful in your implementation.
    2. + *
    3. OSObject's implementation performs the C++ delete + * of the instance, so be sure that you call the superclass + * implementation last in your implementation.
    4. + *
    5. free must not fail; + * all resources must be deallocated or released on completion.
    6. + *
    + */ + virtual void free(); + + +/*! + * @function operator delete + * + * @abstract + * Frees the memory of the object itself. + * + * @param mem A pointer to the object's memory. + * @param size The size of the object's block of memory. + * + * @discussion + * Never use delete on objects derived from OSObject; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + static void operator delete(void * mem, size_t size); public: - /*! - * @function operator new - * - * @abstract - * Allocates memory for an instance of the class. - * - * @param size The number of bytes to allocate - * - * @result - * A pointer to block of memory if available, NULL otherwise. - */ - static void * operator new(size_t size); - - - /*! - * @function getRetainCount - * - * @abstract - * Returns the reference count of the object. - * - * @result - * The reference count of the object. - */ - virtual int getRetainCount() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function retain - * - * @abstract - * Retains a reference to the object. - * - * @discussion - * This function increments the reference count of the receiver by 1. - * If you need to maintain a reference to an object - * outside the context in which you received it, - * you should always retain it immediately. - */ - virtual void retain() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function release - * - * @abstract - * Releases a reference to the object, - * freeing it immediately if the reference count drops to zero. - * - * @discussion - * This function decrements the reference count of the receiver by 1. - * If the reference count drops to zero, - * the object is immediately freed using - * @link - * //apple_ref/cpp/instm/OSObject/free/virtualvoid/() - * free@/link. - */ - virtual void release() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function taggedRetain - * - * @abstract - * Retains a reference to the object with an optional - * tag used for reference-tracking. - * - * @param tag Used for tracking collection references. - * - * @discussion - * Kernel extensions should not use this function. - * It is for use by OSCollection and subclasses to track - * inclusion in collections. - * - * If you need to maintain a reference to an object - * outside the context in which you received it, - * you should always retain it immediately. - */ - virtual void taggedRetain(const void * tag = 0) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function taggedRelease - * - * @abstract - * Releases a tagged reference to an object, - * freeing it immediately if the reference count - * drops to zero. - * - * @param tag Used for tracking collection references. - * - * @discussion - * Kernel extensions should not use this function. - * It is for use by OSCollection and subclasses to track - * inclusion in collections. - */ - virtual void taggedRelease(const void * tag = 0) const APPLE_KEXT_OVERRIDE; - // xx-review: used to say, "Remove a reference on this object with this tag, if an attempt is made to remove a reference that isn't associated with this tag the kernel will panic immediately", but I don't see that in the implementation - - - /*! - * @function serialize - * - * @abstract - * Overridden by subclasses to archive the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - * - * @discussion - * OSObject's implementation writes a string indicating that - * the class of the object receiving the function call - * is not serializable. - * Subclasses that can meaningfully encode themselves - * in I/O Kit-style property list XML can override this function to do so. - * See - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link - * for more information. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; +/*! + * @function operator new + * + * @abstract + * Allocates memory for an instance of the class. + * + * @param size The number of bytes to allocate + * + * @result + * A pointer to block of memory if available, NULL otherwise. + */ + static void * operator new(size_t size); + + +/*! + * @function getRetainCount + * + * @abstract + * Returns the reference count of the object. + * + * @result + * The reference count of the object. + */ + virtual int getRetainCount() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function retain + * + * @abstract + * Retains a reference to the object. + * + * @discussion + * This function increments the reference count of the receiver by 1. + * If you need to maintain a reference to an object + * outside the context in which you received it, + * you should always retain it immediately. + */ + virtual void retain() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function release + * + * @abstract + * Releases a reference to the object, + * freeing it immediately if the reference count drops to zero. + * + * @discussion + * This function decrements the reference count of the receiver by 1. + * If the reference count drops to zero, + * the object is immediately freed using + * @link + * //apple_ref/cpp/instm/OSObject/free/virtualvoid/() + * free@/link. + */ + virtual void release() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function taggedRetain + * + * @abstract + * Retains a reference to the object with an optional + * tag used for reference-tracking. + * + * @param tag Used for tracking collection references. + * + * @discussion + * Kernel extensions should not use this function. + * It is for use by OSCollection and subclasses to track + * inclusion in collections. + * + * If you need to maintain a reference to an object + * outside the context in which you received it, + * you should always retain it immediately. + */ + virtual void taggedRetain(const void * tag = 0) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function taggedRelease + * + * @abstract + * Releases a tagged reference to an object, + * freeing it immediately if the reference count + * drops to zero. + * + * @param tag Used for tracking collection references. + * + * @discussion + * Kernel extensions should not use this function. + * It is for use by OSCollection and subclasses to track + * inclusion in collections. + */ + virtual void taggedRelease(const void * tag = 0) const APPLE_KEXT_OVERRIDE; +// xx-review: used to say, "Remove a reference on this object with this tag, if an attempt is made to remove a reference that isn't associated with this tag the kernel will panic immediately", but I don't see that in the implementation + + +/*! + * @function serialize + * + * @abstract + * Overridden by subclasses to archive the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + * + * @discussion + * OSObject's implementation writes a string indicating that + * the class of the object receiving the function call + * is not serializable. + * Subclasses that can meaningfully encode themselves + * in I/O Kit-style property list XML can override this function to do so. + * See + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link + * for more information. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; #ifdef XNU_KERNEL_PRIVATE #if IOTRACKING - void trackingAccumSize(size_t size); -#endif + void trackingAccumSize(size_t size); #endif - // Unused Padding - OSMetaClassDeclareReservedUnused(OSObject, 0); - OSMetaClassDeclareReservedUnused(OSObject, 1); - OSMetaClassDeclareReservedUnused(OSObject, 2); - OSMetaClassDeclareReservedUnused(OSObject, 3); - OSMetaClassDeclareReservedUnused(OSObject, 4); - OSMetaClassDeclareReservedUnused(OSObject, 5); - OSMetaClassDeclareReservedUnused(OSObject, 6); - OSMetaClassDeclareReservedUnused(OSObject, 7); - OSMetaClassDeclareReservedUnused(OSObject, 8); - OSMetaClassDeclareReservedUnused(OSObject, 9); - OSMetaClassDeclareReservedUnused(OSObject, 10); - OSMetaClassDeclareReservedUnused(OSObject, 11); - OSMetaClassDeclareReservedUnused(OSObject, 12); - OSMetaClassDeclareReservedUnused(OSObject, 13); - OSMetaClassDeclareReservedUnused(OSObject, 14); - OSMetaClassDeclareReservedUnused(OSObject, 15); + bool taggedTryRetain(const void *tag) const; +#endif +// Unused Padding + OSMetaClassDeclareReservedUnused(OSObject, 0); + OSMetaClassDeclareReservedUnused(OSObject, 1); + OSMetaClassDeclareReservedUnused(OSObject, 2); + OSMetaClassDeclareReservedUnused(OSObject, 3); + OSMetaClassDeclareReservedUnused(OSObject, 4); + OSMetaClassDeclareReservedUnused(OSObject, 5); + OSMetaClassDeclareReservedUnused(OSObject, 6); + OSMetaClassDeclareReservedUnused(OSObject, 7); + OSMetaClassDeclareReservedUnused(OSObject, 8); + OSMetaClassDeclareReservedUnused(OSObject, 9); + OSMetaClassDeclareReservedUnused(OSObject, 10); + OSMetaClassDeclareReservedUnused(OSObject, 11); + OSMetaClassDeclareReservedUnused(OSObject, 12); + OSMetaClassDeclareReservedUnused(OSObject, 13); + OSMetaClassDeclareReservedUnused(OSObject, 14); + OSMetaClassDeclareReservedUnused(OSObject, 15); }; #endif /* !_LIBKERN_OSOBJECT_H */ diff --git a/libkern/libkern/c++/OSOrderedSet.h b/libkern/libkern/c++/OSOrderedSet.h index 390b8c190..2a24e321f 100644 --- a/libkern/libkern/c++/OSOrderedSet.h +++ b/libkern/libkern/c++/OSOrderedSet.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,8 +40,8 @@ class OSOffset; * @abstract * This header declares the OSOrderedSet collection class. */ - - + + /*! * @class OSOrderedSet * @@ -82,7 +82,7 @@ class OSOffset; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSOrderedSet provides no concurrency protection; @@ -94,675 +94,675 @@ class OSOffset; */ class OSOrderedSet : public OSCollection { - OSDeclareDefaultStructors(OSOrderedSet) + OSDeclareDefaultStructors(OSOrderedSet) public: - /*! - * @typedef OSOrderFunction - * - * @abstract - * The sorting function used by an OSOrderedSet to order objects. - * - * @param obj1 An object from the ordered set. May be NULL. - * @param obj2 The object being ordered within the ordered set. - * May be NULL. - * @param context A pointer to a user-provided context. May be NULL. - * - * @result - * A comparison result of the object: - *
      - *
    • a negative value if obj2 should precede obj1,
    • - *
    • a positive value if obj1 should precede obj2,
    • - *
    • and 0 if obj1 and obj2 have an equivalent ordering.
    • - *
    - */ - typedef SInt32 (*OSOrderFunction)(const OSMetaClassBase * obj1, - const OSMetaClassBase * obj2, - void * context); +/*! + * @typedef OSOrderFunction + * + * @abstract + * The sorting function used by an OSOrderedSet to order objects. + * + * @param obj1 An object from the ordered set. May be NULL. + * @param obj2 The object being ordered within the ordered set. + * May be NULL. + * @param context A pointer to a user-provided context. May be NULL. + * + * @result + * A comparison result of the object: + *
      + *
    • a negative value if obj2 should precede obj1,
    • + *
    • a positive value if obj1 should precede obj2,
    • + *
    • and 0 if obj1 and obj2 have an equivalent ordering.
    • + *
    + */ + typedef SInt32 (*OSOrderFunction)(const OSMetaClassBase * obj1, + const OSMetaClassBase * obj2, + void * context); protected: - struct _Element * array; - OSOrderFunction ordering; - void * orderingRef; - unsigned int count; - unsigned int capacity; - unsigned int capacityIncrement; - - struct ExpansionData { }; - - /* Reserved for future use. (Internal use only) */ - ExpansionData *reserved; + struct _Element * array; + OSOrderFunction ordering; + void * orderingRef; + unsigned int count; + unsigned int capacity; + unsigned int capacityIncrement; + + struct ExpansionData { }; + +/* Reserved for future use. (Internal use only) */ + ExpansionData *reserved; protected: - /* OSCollectionIterator interfaces. */ - virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; - virtual bool initIterator(void *iterator) const APPLE_KEXT_OVERRIDE; - virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const APPLE_KEXT_OVERRIDE; +/* OSCollectionIterator interfaces. */ + virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; + virtual bool initIterator(void *iterator) const APPLE_KEXT_OVERRIDE; + virtual bool getNextObjectForIterator(void *iterator, OSObject **ret) const APPLE_KEXT_OVERRIDE; public: - /*! - * @function withCapacity - * - * @abstract - * Creates and initializes an empty OSOrderedSet. - * - * @param capacity The initial storage capacity - * of the new ordered set object. - * @param orderFunc A C function that implements the sorting algorithm - * for the set. - * @param orderingContext An ordering context, - * which is passed to orderFunc. - * @result - * An empty instance of OSOrderedSet - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * capacity must be nonzero. - * The new OSOrderedSet will grow as needed - * to accommodate more key/object pairs - * (unlike Core Foundation collections, - * for which the initial capacity is a hard limit). - * - * If orderFunc is provided, it is used by - * @link - * //apple_ref/cpp/instm/OSOrderedSet/setObject/virtualbool/(constOSMetaClassBase*) - * setObject(const OSMetaClassBase *)@/link - * to determine where to insert a new object. - * Other object-setting functions ignore ordering. - * - * orderingContext is not retained or otherwise memory-managed - * by the ordered set. - * If it needs to be deallocated, - * you must track references to it and the ordered set - * in order to deallocate it appropriately. - * See - * @link getOrderingRef getOrderingRef@/link. - */ - static OSOrderedSet * withCapacity( - unsigned int capacity, - OSOrderFunction orderFunc = 0, - void * orderingContext = 0); - - - /*! - * @function initWithCapacity - * - * @abstract - * Initializes a new instance of OSOrderedSet. - * - * @param capacity The initial storage capacity - * of the new ordered set object. - * @param orderFunc A C function that implements the sorting algorithm - * for the set. - * @param orderingContext An ordering context, - * which is passed to orderFunc. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSOrderedSet/withCapacity/staticOSOrderedSet*\/(unsignedint,OSOrderFunction,void*) - * withCapacity@/link - * instead. - * - * capacity must be nonzero. - * The new set will grow as needed to accommodate more key/object pairs - * (unlike Core Foundation collections, - * for which the initial capacity is a hard limit). - * - * If orderFunc is provided, it is used by - * @link - * //apple_ref/cpp/instm/OSOrderedSet/setObject/virtualbool/(constOSMetaClassBase*) - * setObject(const OSMetaClassBase *)@/link - * to determine where to insert a new object. - * Other object-setting functions ignore ordering. - * - * orderingContext is not retained or otherwise memory-managed - * by the ordered set. - * If it needs to be deallocated, - * you must track references to it and the ordered set - * in order to deallocate it appropriately. - * See - * @link getOrderingRef getOrderingRef@/link. - */ - virtual bool initWithCapacity( - unsigned int capacity, - OSOrderFunction orderFunc = 0, - void * orderingContext = 0); - - - /*! - * @function free - * - * @abstract - * Deallocatesand releases any resources - * used by the OSOrderedSet instance. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCount - * - * @abstract - * Returns the current number of objects within the ordered set. - * - * @result - * The current number of objects within the ordered set. - */ - virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacity - * - * @abstract - * Returns the number of objects the ordered set - * can store without reallocating. - * - * @result - * The number objects the ordered set - * can store without reallocating. - * - * @discussion - * OSOrderedSet objects grow when full to accommodate additional objects. - * See - * @link - * //apple_ref/cpp/instm/OSOrderedSet/getCapacityIncrement/virtualunsignedint/() - * getCapacityIncrement@/link - * and - * @link - * //apple_ref/cpp/instm/OSOrderedSet/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link. - */ - virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacityIncrement - * - * @abstract - * Returns the storage increment of the ordered set. - * - * @result - * The storage increment of the ordered set. - * - * @discussion - * An OSOrderedSet allocates storage for objects in multiples - * of the capacity increment. - */ - virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setCapacityIncrement - * - * @abstract - * Sets the storage increment of the ordered set. - * - * @result - * The new storage increment of the ordered set, - * which may be different from the number requested. - * - * @discussion - * An OSOrderedSet allocates storage for objects in multiples - * of the capacity increment. - * Calling this function does not immediately reallocate storage. - */ - virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; - - - /*! - * @function ensureCapacity - * - * @abstract - * Ensures the set has enough space - * to store the requested number of distinct objects. - * - * @param newCapacity The total number of distinct objects the ordered set - * should be able to store. - * - * @result - * The new capacity of the ordered set, - * which may be different from the number requested - * (if smaller, reallocation of storage failed). - * - * @discussion - * This function immediately resizes the ordered set, if necessary, - * to accommodate at least newCapacity distinct objects. - * If newCapacity is not greater than the current capacity, - * or if an allocation error occurs, the original capacity is returned. - * - * There is no way to reduce the capacity of an OSOrderedSet. - */ - virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; - - - /*! - * @function flushCollection - * - * @abstract - * Removes and releases all objects within the ordered set. - * - * @discussion - * The ordered set's capacity (and therefore direct memory consumption) - * is not reduced by this function. - */ - virtual void flushCollection() APPLE_KEXT_OVERRIDE; - - - /*! - * @function setObject - * - * @abstract - * Adds an object to the OSOrderedSet if it is not already present, - * storing it in sorted order if there is an order function. - * - * @param anObject The OSMetaClassBase-derived object to be added - * to the ordered set. - * @result - * true if anObject was successfully - * added to the ordered set, false otherwise - * (including if it was already in the ordered set). - * - * @discussion - * The set adds storage to accomodate the new object, if necessary. - * If successfully added, the object is retained. - * - * If anObject is not already in the ordered set - * and there is an order function, - * this function loops through the existing objects, - * calling the @link OSOrderFunction order function@/link - * with arguments each existingObject, anObject, - * and the ordering context - * (or NULL if none was set), - * until the order function returns - * a value greater than or equal to 0. - * It then inserts anObject at the index of the existing object. - * - * If there is no order function, the object is inserted at index 0. - * - * A false return value can mean either - * that anObject is already present in the set, - * or that a memory allocation failure occurred. - * If you need to know whether the object - * is already present, use - * @link - * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) - * containsObject(const OSMetaClassBase *)@/link. - */ - virtual bool setObject(const OSMetaClassBase * anObject); - - - /*! - * @function setFirstObject - * - * @abstract - * Adds an object to the OSOrderedSet at index 0 - * if it is not already present. - * - * @param anObject The OSMetaClassBase-derived object - * to be added to the ordered set. - * @result - * true if anObject was successfully added - * to the ordered set, false otherwise - * (including if it was already in the ordered set at any index). - * - * @discussion - * The set adds storage to accomodate the new object, if necessary. - * If successfully added, the object is retained. - * - * This function ignores any ordering function of the ordered set, - * and can disrupt the automatic sorting mechanism. - * Only call this function if you are managing the ordered set directly. - * - * A false return value can mean either that anObject - * is already present in the set, - * or that a memory allocation failure occurred. - * If you need to know whether the object - * is already present, use - * @link - * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) - * containsObject(const OSMetaClassBase *)@/link. - */ - virtual bool setFirstObject(const OSMetaClassBase * anObject); - - - /*! - * @function setLastObject - * - * @abstract - * Adds an object at the end of the OSOrderedSet - * if it is not already present. - * - * @param anObject The OSMetaClassBase-derived object to be added - * to the ordered set. - * @result - * true if anObject was successfully added - * to the ordered set, false otherwise - * (including if it was already in the ordered set at any index). - * - * @discussion - * The set adds storage to accomodate the new object, if necessary. - * If successfully added, the object is retained. - * - * This function ignores any ordering function of the ordered set, - * and can disrupt the automatic sorting mechanism. - * Only call this function if you are managing the ordered set directly. - * - * A false return value can mean either that anObject - * is already present in the set, - * or that a memory allocation failure occurred. - * If you need to know whether the object - * is already present, use - * @link - * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) - * containsObject(const OSMetaClassBase *)@/link. - */ - virtual bool setLastObject(const OSMetaClassBase * anObject); - - - /*! - * @function removeObject - * - * @abstract - * Removes an object from the ordered set. - * - * @param anObject The OSMetaClassBase-derived object - * to be removed from the ordered set. - * - * @discussion - * The object removed from the ordered set is released. - */ - virtual void removeObject(const OSMetaClassBase * anObject); - - - /*! - * @function containsObject - * - * @abstract - * Checks the ordered set for the presence of an object. - * - * @param anObject The OSMetaClassBase-derived object to check for - * in the ordered set. - * - * @result - * true if anObject is present - * within the ordered set, false otherwise. - * - * @discussion - * Pointer equality is used. - * This function returns false if passed NULL. - */ - virtual bool containsObject(const OSMetaClassBase * anObject) const; - - - /*! - * @function member - * - * @abstract - * Checks the ordered set for the presence of an object. - * - * @param anObject The OSMetaClassBase-derived object to check for - * in the ordered set. - * - * @result - * true if anObject is present - * within the ordered set, false otherwise. - * - * @discussion - * Pointer equality is used. - * Returns false if passed NULL. - * - * @link - * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) - * containsObject(const OSMetaClassBase *)@/link - * checks for NULL before scanning the contents, - * and is therefore more efficient than this function. - */ - virtual bool member(const OSMetaClassBase * anObject) const; - - - /*! - * @function getFirstObject - * - * @abstract - * The object at index 0 in the ordered set if there is one, - * otherwise NULL. - * - * @discussion - * The returned object will be released if removed from the ordered set; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getFirstObject() const; - - - /*! - * @function getLastObject - * - * @abstract - * The last object in the ordered set if there is one, - * otherwise NULL. - * - * @discussion - * The returned object will be released if removed from the ordered set; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getLastObject() const; - - - /*! - * @function orderObject - * - * @abstract - * Calls the ordered set's order function against a NULL object. - * - * @param anObject The object to be ordered. - * - * @result - * The ordering value for the object. - * - * @discussion - * This function calls the ordered set's - * @link OSOrderFunction order function@/link - * with anObject, NULL, and the ordering context - * (or NULL if none was set), - * and returns the result of that function. - */ - virtual SInt32 orderObject(const OSMetaClassBase * anObject); - - - /*! - * @function setObject - * - * @abstract - * Adds an object to an OSOrderedSet at a specified index - * if it is not already present. - * - * @param index The index at which to insert the new object. - * @param anObject The OSMetaClassBase-derived object to be added - * to the ordered set. - * - * @result - * true if the object was successfully added - * to the ordered set, false otherwise - * (including if it was already in the set). - * - * @discussion - * The set adds storage to accomodate the new object, if necessary. - * If successfully added, the object is retained. - * - * This function ignores any ordering function of the ordered set, - * and can disrupt the automatic sorting mechanism. - * Only call this function if you are managing the ordered set directly. - * - * A false return value can mean either that the object - * is already present in the set, - * or that a memory allocation failure occurred. - * If you need to know whether the object - * is already present, use - * @link //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) - * containsObject containsObject@/link. - */ - virtual bool setObject( - unsigned int index, - const OSMetaClassBase * anObject); - - - /*! - * @function getObject - * - * @abstract - * Gets the object at a particular index. - * - * @param index The index into the set. - * @result - * The object at the given index, - * or NULL if none exists at that location. - * - * @discussion - * The returned object will be released if removed from the set; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getObject(unsigned int index) const; - - - /*! - * @function getOrderingRef - * - * @abstract - * Returns the ordering context the ordered set was created with. - * - * @result - * The ordered set's ordering context, - * or NULL if it doesn't have one. - */ - virtual void * getOrderingRef(); - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSOrderedSet objects. - * - * @param anOrderedSet The ordered set object being compared - * against the receiver. - * @result - * true if the two sets are equivalent, - * false otherwise. - * - * @discussion - * Two OSOrderedSet objects are considered equal if they have same count - * and the same object pointer values in the same order. - */ - virtual bool isEqualTo(const OSOrderedSet * anOrderedSet) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSOrderedSet - * against an arbitrary object. - * - * @param anObject The object being compared against the receiver. - * @result - * true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * An OSOrderedSet object is considered equal to another object - * if the other object is derived from OSOrderedSet - * and compares equal as an OSOrderedSet. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setOptions - * - * Recursively sets option bits in the ordered set - * and all child collections. - * - * @param options A bitfield whose values turn the options on (1) or off (0). - * @param mask A mask indicating which bits - * in options to change. - * Pass 0 to get the whole current options bitfield - * without changing any settings. - * @param context Unused. - * - * @result - * The options bitfield as it was before the set operation. - * - * @discussion - * Kernel extensions should not call this function. - * - * Child collections' options are changed only if the receiving ordered set's - * options actually change. - */ - virtual unsigned setOptions( - unsigned options, - unsigned mask, - void * context = 0) APPLE_KEXT_OVERRIDE; - - - /*! - * @function copyCollection - * - * @abstract - * Creates a deep copy of this ordered set and its child collections. - * - * @param cycleDict A dictionary of all of the collections - * that have been copied so far, - * which is used to track circular references. - * To start the copy at the top level, - * pass NULL. - * - * @result - * The newly copied ordered set, with a retain count of 1, - * or NULL if there is insufficient memory to do the copy. - * - * @discussion - * The receiving ordered set, and any collections it contains, - * recursively, are copied. - * Objects that are not derived from OSCollection are retained - * rather than copied. - */ - OSCollection *copyCollection(OSDictionary * cycleDict = 0) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(OSOrderedSet, 0); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 1); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 2); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 3); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 4); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 5); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 6); - OSMetaClassDeclareReservedUnused(OSOrderedSet, 7); +/*! + * @function withCapacity + * + * @abstract + * Creates and initializes an empty OSOrderedSet. + * + * @param capacity The initial storage capacity + * of the new ordered set object. + * @param orderFunc A C function that implements the sorting algorithm + * for the set. + * @param orderingContext An ordering context, + * which is passed to orderFunc. + * @result + * An empty instance of OSOrderedSet + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * capacity must be nonzero. + * The new OSOrderedSet will grow as needed + * to accommodate more key/object pairs + * (unlike Core Foundation collections, + * for which the initial capacity is a hard limit). + * + * If orderFunc is provided, it is used by + * @link + * //apple_ref/cpp/instm/OSOrderedSet/setObject/virtualbool/(constOSMetaClassBase*) + * setObject(const OSMetaClassBase *)@/link + * to determine where to insert a new object. + * Other object-setting functions ignore ordering. + * + * orderingContext is not retained or otherwise memory-managed + * by the ordered set. + * If it needs to be deallocated, + * you must track references to it and the ordered set + * in order to deallocate it appropriately. + * See + * @link getOrderingRef getOrderingRef@/link. + */ + static OSOrderedSet * withCapacity( + unsigned int capacity, + OSOrderFunction orderFunc = 0, + void * orderingContext = 0); + + +/*! + * @function initWithCapacity + * + * @abstract + * Initializes a new instance of OSOrderedSet. + * + * @param capacity The initial storage capacity + * of the new ordered set object. + * @param orderFunc A C function that implements the sorting algorithm + * for the set. + * @param orderingContext An ordering context, + * which is passed to orderFunc. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSOrderedSet/withCapacity/staticOSOrderedSet*\/(unsignedint,OSOrderFunction,void*) + * withCapacity@/link + * instead. + * + * capacity must be nonzero. + * The new set will grow as needed to accommodate more key/object pairs + * (unlike Core Foundation collections, + * for which the initial capacity is a hard limit). + * + * If orderFunc is provided, it is used by + * @link + * //apple_ref/cpp/instm/OSOrderedSet/setObject/virtualbool/(constOSMetaClassBase*) + * setObject(const OSMetaClassBase *)@/link + * to determine where to insert a new object. + * Other object-setting functions ignore ordering. + * + * orderingContext is not retained or otherwise memory-managed + * by the ordered set. + * If it needs to be deallocated, + * you must track references to it and the ordered set + * in order to deallocate it appropriately. + * See + * @link getOrderingRef getOrderingRef@/link. + */ + virtual bool initWithCapacity( + unsigned int capacity, + OSOrderFunction orderFunc = 0, + void * orderingContext = 0); + + +/*! + * @function free + * + * @abstract + * Deallocatesand releases any resources + * used by the OSOrderedSet instance. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCount + * + * @abstract + * Returns the current number of objects within the ordered set. + * + * @result + * The current number of objects within the ordered set. + */ + virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacity + * + * @abstract + * Returns the number of objects the ordered set + * can store without reallocating. + * + * @result + * The number objects the ordered set + * can store without reallocating. + * + * @discussion + * OSOrderedSet objects grow when full to accommodate additional objects. + * See + * @link + * //apple_ref/cpp/instm/OSOrderedSet/getCapacityIncrement/virtualunsignedint/() + * getCapacityIncrement@/link + * and + * @link + * //apple_ref/cpp/instm/OSOrderedSet/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link. + */ + virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacityIncrement + * + * @abstract + * Returns the storage increment of the ordered set. + * + * @result + * The storage increment of the ordered set. + * + * @discussion + * An OSOrderedSet allocates storage for objects in multiples + * of the capacity increment. + */ + virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setCapacityIncrement + * + * @abstract + * Sets the storage increment of the ordered set. + * + * @result + * The new storage increment of the ordered set, + * which may be different from the number requested. + * + * @discussion + * An OSOrderedSet allocates storage for objects in multiples + * of the capacity increment. + * Calling this function does not immediately reallocate storage. + */ + virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; + + +/*! + * @function ensureCapacity + * + * @abstract + * Ensures the set has enough space + * to store the requested number of distinct objects. + * + * @param newCapacity The total number of distinct objects the ordered set + * should be able to store. + * + * @result + * The new capacity of the ordered set, + * which may be different from the number requested + * (if smaller, reallocation of storage failed). + * + * @discussion + * This function immediately resizes the ordered set, if necessary, + * to accommodate at least newCapacity distinct objects. + * If newCapacity is not greater than the current capacity, + * or if an allocation error occurs, the original capacity is returned. + * + * There is no way to reduce the capacity of an OSOrderedSet. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; + + +/*! + * @function flushCollection + * + * @abstract + * Removes and releases all objects within the ordered set. + * + * @discussion + * The ordered set's capacity (and therefore direct memory consumption) + * is not reduced by this function. + */ + virtual void flushCollection() APPLE_KEXT_OVERRIDE; + + +/*! + * @function setObject + * + * @abstract + * Adds an object to the OSOrderedSet if it is not already present, + * storing it in sorted order if there is an order function. + * + * @param anObject The OSMetaClassBase-derived object to be added + * to the ordered set. + * @result + * true if anObject was successfully + * added to the ordered set, false otherwise + * (including if it was already in the ordered set). + * + * @discussion + * The set adds storage to accomodate the new object, if necessary. + * If successfully added, the object is retained. + * + * If anObject is not already in the ordered set + * and there is an order function, + * this function loops through the existing objects, + * calling the @link OSOrderFunction order function@/link + * with arguments each existingObject, anObject, + * and the ordering context + * (or NULL if none was set), + * until the order function returns + * a value greater than or equal to 0. + * It then inserts anObject at the index of the existing object. + * + * If there is no order function, the object is inserted at index 0. + * + * A false return value can mean either + * that anObject is already present in the set, + * or that a memory allocation failure occurred. + * If you need to know whether the object + * is already present, use + * @link + * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) + * containsObject(const OSMetaClassBase *)@/link. + */ + virtual bool setObject(const OSMetaClassBase * anObject); + + +/*! + * @function setFirstObject + * + * @abstract + * Adds an object to the OSOrderedSet at index 0 + * if it is not already present. + * + * @param anObject The OSMetaClassBase-derived object + * to be added to the ordered set. + * @result + * true if anObject was successfully added + * to the ordered set, false otherwise + * (including if it was already in the ordered set at any index). + * + * @discussion + * The set adds storage to accomodate the new object, if necessary. + * If successfully added, the object is retained. + * + * This function ignores any ordering function of the ordered set, + * and can disrupt the automatic sorting mechanism. + * Only call this function if you are managing the ordered set directly. + * + * A false return value can mean either that anObject + * is already present in the set, + * or that a memory allocation failure occurred. + * If you need to know whether the object + * is already present, use + * @link + * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) + * containsObject(const OSMetaClassBase *)@/link. + */ + virtual bool setFirstObject(const OSMetaClassBase * anObject); + + +/*! + * @function setLastObject + * + * @abstract + * Adds an object at the end of the OSOrderedSet + * if it is not already present. + * + * @param anObject The OSMetaClassBase-derived object to be added + * to the ordered set. + * @result + * true if anObject was successfully added + * to the ordered set, false otherwise + * (including if it was already in the ordered set at any index). + * + * @discussion + * The set adds storage to accomodate the new object, if necessary. + * If successfully added, the object is retained. + * + * This function ignores any ordering function of the ordered set, + * and can disrupt the automatic sorting mechanism. + * Only call this function if you are managing the ordered set directly. + * + * A false return value can mean either that anObject + * is already present in the set, + * or that a memory allocation failure occurred. + * If you need to know whether the object + * is already present, use + * @link + * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) + * containsObject(const OSMetaClassBase *)@/link. + */ + virtual bool setLastObject(const OSMetaClassBase * anObject); + + +/*! + * @function removeObject + * + * @abstract + * Removes an object from the ordered set. + * + * @param anObject The OSMetaClassBase-derived object + * to be removed from the ordered set. + * + * @discussion + * The object removed from the ordered set is released. + */ + virtual void removeObject(const OSMetaClassBase * anObject); + + +/*! + * @function containsObject + * + * @abstract + * Checks the ordered set for the presence of an object. + * + * @param anObject The OSMetaClassBase-derived object to check for + * in the ordered set. + * + * @result + * true if anObject is present + * within the ordered set, false otherwise. + * + * @discussion + * Pointer equality is used. + * This function returns false if passed NULL. + */ + virtual bool containsObject(const OSMetaClassBase * anObject) const; + + +/*! + * @function member + * + * @abstract + * Checks the ordered set for the presence of an object. + * + * @param anObject The OSMetaClassBase-derived object to check for + * in the ordered set. + * + * @result + * true if anObject is present + * within the ordered set, false otherwise. + * + * @discussion + * Pointer equality is used. + * Returns false if passed NULL. + * + * @link + * //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) + * containsObject(const OSMetaClassBase *)@/link + * checks for NULL before scanning the contents, + * and is therefore more efficient than this function. + */ + virtual bool member(const OSMetaClassBase * anObject) const; + + +/*! + * @function getFirstObject + * + * @abstract + * The object at index 0 in the ordered set if there is one, + * otherwise NULL. + * + * @discussion + * The returned object will be released if removed from the ordered set; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getFirstObject() const; + + +/*! + * @function getLastObject + * + * @abstract + * The last object in the ordered set if there is one, + * otherwise NULL. + * + * @discussion + * The returned object will be released if removed from the ordered set; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getLastObject() const; + + +/*! + * @function orderObject + * + * @abstract + * Calls the ordered set's order function against a NULL object. + * + * @param anObject The object to be ordered. + * + * @result + * The ordering value for the object. + * + * @discussion + * This function calls the ordered set's + * @link OSOrderFunction order function@/link + * with anObject, NULL, and the ordering context + * (or NULL if none was set), + * and returns the result of that function. + */ + virtual SInt32 orderObject(const OSMetaClassBase * anObject); + + +/*! + * @function setObject + * + * @abstract + * Adds an object to an OSOrderedSet at a specified index + * if it is not already present. + * + * @param index The index at which to insert the new object. + * @param anObject The OSMetaClassBase-derived object to be added + * to the ordered set. + * + * @result + * true if the object was successfully added + * to the ordered set, false otherwise + * (including if it was already in the set). + * + * @discussion + * The set adds storage to accomodate the new object, if necessary. + * If successfully added, the object is retained. + * + * This function ignores any ordering function of the ordered set, + * and can disrupt the automatic sorting mechanism. + * Only call this function if you are managing the ordered set directly. + * + * A false return value can mean either that the object + * is already present in the set, + * or that a memory allocation failure occurred. + * If you need to know whether the object + * is already present, use + * @link //apple_ref/cpp/instm/OSOrderedSet/containsObject/virtualbool/(constOSMetaClassBase*) + * containsObject containsObject@/link. + */ + virtual bool setObject( + unsigned int index, + const OSMetaClassBase * anObject); + + +/*! + * @function getObject + * + * @abstract + * Gets the object at a particular index. + * + * @param index The index into the set. + * @result + * The object at the given index, + * or NULL if none exists at that location. + * + * @discussion + * The returned object will be released if removed from the set; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getObject(unsigned int index) const; + + +/*! + * @function getOrderingRef + * + * @abstract + * Returns the ordering context the ordered set was created with. + * + * @result + * The ordered set's ordering context, + * or NULL if it doesn't have one. + */ + virtual void * getOrderingRef(); + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSOrderedSet objects. + * + * @param anOrderedSet The ordered set object being compared + * against the receiver. + * @result + * true if the two sets are equivalent, + * false otherwise. + * + * @discussion + * Two OSOrderedSet objects are considered equal if they have same count + * and the same object pointer values in the same order. + */ + virtual bool isEqualTo(const OSOrderedSet * anOrderedSet) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSOrderedSet + * against an arbitrary object. + * + * @param anObject The object being compared against the receiver. + * @result + * true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * An OSOrderedSet object is considered equal to another object + * if the other object is derived from OSOrderedSet + * and compares equal as an OSOrderedSet. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setOptions + * + * Recursively sets option bits in the ordered set + * and all child collections. + * + * @param options A bitfield whose values turn the options on (1) or off (0). + * @param mask A mask indicating which bits + * in options to change. + * Pass 0 to get the whole current options bitfield + * without changing any settings. + * @param context Unused. + * + * @result + * The options bitfield as it was before the set operation. + * + * @discussion + * Kernel extensions should not call this function. + * + * Child collections' options are changed only if the receiving ordered set's + * options actually change. + */ + virtual unsigned setOptions( + unsigned options, + unsigned mask, + void * context = 0) APPLE_KEXT_OVERRIDE; + + +/*! + * @function copyCollection + * + * @abstract + * Creates a deep copy of this ordered set and its child collections. + * + * @param cycleDict A dictionary of all of the collections + * that have been copied so far, + * which is used to track circular references. + * To start the copy at the top level, + * pass NULL. + * + * @result + * The newly copied ordered set, with a retain count of 1, + * or NULL if there is insufficient memory to do the copy. + * + * @discussion + * The receiving ordered set, and any collections it contains, + * recursively, are copied. + * Objects that are not derived from OSCollection are retained + * rather than copied. + */ + OSCollection *copyCollection(OSDictionary * cycleDict = 0) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(OSOrderedSet, 0); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 1); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 2); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 3); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 4); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 5); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 6); + OSMetaClassDeclareReservedUnused(OSOrderedSet, 7); }; #endif /* ! _OS_OSORDEREDSET_H */ diff --git a/libkern/libkern/c++/OSSerialize.h b/libkern/libkern/c++/OSSerialize.h index 53de72aa6..061830af2 100644 --- a/libkern/libkern/c++/OSSerialize.h +++ b/libkern/libkern/c++/OSSerialize.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSSerialize.h created by rsulack on Wen 25-Nov-1998 */ @@ -43,10 +43,10 @@ class OSArray; * @abstract * This header declares the OSSerialize class. */ - + OSObject * OSUnserializeBinary(const void *buffer, size_t bufferSize); - + /*! * @class OSSerialize * @@ -70,7 +70,7 @@ OSUnserializeBinary(const void *buffer, size_t bufferSize); * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSSerialize provides no concurrency protection; @@ -80,239 +80,239 @@ OSUnserializeBinary(const void *buffer, size_t bufferSize); * handle synchronization via defined member functions * for serializing properties. */ - + class OSSerialize : public OSObject { - OSDeclareDefaultStructors(OSSerialize) - friend class OSBoolean; + OSDeclareDefaultStructors(OSSerialize) + friend class OSBoolean; private: - char * data; // container for serialized data - unsigned int length; // of serialized data (counting NULL) - unsigned int capacity; // of container - unsigned int capacityIncrement; // of container + char * data; // container for serialized data + unsigned int length; // of serialized data (counting NULL) + unsigned int capacity; // of container + unsigned int capacityIncrement;// of container - OSArray * tags; // tags for all objects seen + OSArray * tags; // tags for all objects seen #ifdef XNU_KERNEL_PRIVATE public: - typedef const OSMetaClassBase * (*Editor)(void * reference, - OSSerialize * s, - OSCollection * container, - const OSSymbol * name, - const OSMetaClassBase * value); + typedef const OSMetaClassBase * (*Editor)(void * reference, + OSSerialize * s, + OSCollection * container, + const OSSymbol * name, + const OSMetaClassBase * value); #else - typedef void * Editor; + typedef void * Editor; #endif - bool binary; - bool endCollection; - Editor editor; - void * editRef; + bool binary; + bool endCollection; + Editor editor; + void * editRef; - bool binarySerialize(const OSMetaClassBase *o); - bool addBinary(const void * data, size_t size); - bool addBinaryObject(const OSMetaClassBase * o, uint32_t key, const void * _bits, size_t size); + bool binarySerialize(const OSMetaClassBase *o); + bool addBinary(const void * data, size_t size); + bool addBinaryObject(const OSMetaClassBase * o, uint32_t key, const void * _bits, size_t size); public: - /*! - * @function withCapacity - * - * @abstract - * Creates and initializes an empty OSSerialize object. - * - * @param capacity The initial size of the XML buffer. - * - * @result - * A new instance of OSSerialize - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * The serializer will grow as needed to accommodate more data. - */ - static OSSerialize * withCapacity(unsigned int capacity); - - static OSSerialize * binaryWithCapacity(unsigned int inCapacity, Editor editor = 0, void * reference = 0); - - /*! - * @function text - * - * @abstract - * Returns the XML text serialized so far. - * - * @result - * The nul-terminated XML data serialized so far. - */ - virtual char * text() const; - - - /*! - * @function clearText - * - * @abstract - * Resets the OSSerialize object. - * - * @discussion - * This function is a useful optimization if you are serializing - * the same object repeatedly. - */ - virtual void clearText(); - - // stuff to serialize your object - - /*! - * @function previouslySerialized - * - * @abstract - * Checks whether the object has already been serialized - * into the XML stream, emitting a reference if it has. - * - * @param object The object to check. - * - * @result - * true if object has already been serialized - * by this OSSerialize object and a reference - * to it is successfully added to the XML stream, - * false otherwise. - * - * - * @discussion - * This function both reduces the size of generated XML - * by emitting shorter references to existing objects with the same - * value (particularly for OSString, OSSymbol, and OSData), - * and also preserves instance references - * so that the user-space I/O Kit library can reconstruct - * an identical graph of object relationships. - * - * All classes that override - * @link - * //apple_ref/cpp/instm/OSObject/serialize/virtualbool/(OSSerialize*) - * OSObject::serialize@/link. - * should call this function before doing any actual serialization; - * if it returns true, the serialize implementation - * can immediately return true. - */ - virtual bool previouslySerialized(const OSMetaClassBase * object); - - - /*! - * @function addXMLStartTag - * - * @abstract - * Appends an XML start tag to the XML stream. - * - * @param object The object being serialized. - * @param tagString The name of the XML tag to emit; for example, "string". - * - * @result - * true if an XML start tag for tagString - * is successfully added to the XML stream, false otherwise. - * - * @discussion - * This function emits the named tag, - * enclosed within a pair of angle brackets. - * - * A class that implements serialization should call this function - * with the name of the XML tag that best represents the serialized - * contents of the object. - * A limited number of tags are supported by the user-space - * I/O Kit library: - *
      - *
    • array
    • - *
    • dict
    • - *
    • integer
    • - *
    • key
    • - *
    • set
    • - *
    • string
    • - *
    - * - * A call to this function must be balanced with one to - * @link addXMLEndTag addXMLEndTag@/link - * using the same tagString. - */ - virtual bool addXMLStartTag( - const OSMetaClassBase * object, - const char * tagString); - - - /*! - * @function addXMLEndTag - * - * @abstract - * Appends an XML end tag to the XML stream. - * - * @param tagString The name of the XML tag to emit; for example, "string". - * - * @result - * true if an XML end tag for tagString - * is successfully added to the XML stream, false otherwise. - * - * @discussion - * This function emits the named tag, - * preceded by a slash character to indicate the closing of an entity, - * all enclosed within a pair of angle brackets. - * - * A call to this function must balance an earlier call to - * @link addXMLStartTag addXMLStartTag@/link - * using the same tagString. - */ - virtual bool addXMLEndTag(const char * tagString); - - - /*! - * @function addChar - * - * @abstract - * Appends a single character to the XML stream. - * - * @param aChar The character to append to the XML stream. - * - * @result - * true if char - * is successfully added to the XML stream, false otherwise. - */ - virtual bool addChar(const char aChar); - - - /*! - * @function addString - * - * @abstract - * Appends a C string to the XML stream. - * - * @param cString The C string to append to the XML stream. - * - * @result - * true if cString - * is successfully added to the XML stream, false otherwise. - */ - virtual bool addString(const char * cString); - - // stuff you should never have to use (in theory) - - virtual bool initWithCapacity(unsigned int inCapacity); - virtual unsigned int getLength() const; - virtual unsigned int getCapacity() const; - virtual unsigned int getCapacityIncrement() const; - virtual unsigned int setCapacityIncrement(unsigned increment); - virtual unsigned int ensureCapacity(unsigned int newCapacity); - virtual void free() APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(OSSerialize, 0); - OSMetaClassDeclareReservedUnused(OSSerialize, 1); - OSMetaClassDeclareReservedUnused(OSSerialize, 2); - OSMetaClassDeclareReservedUnused(OSSerialize, 3); - OSMetaClassDeclareReservedUnused(OSSerialize, 4); - OSMetaClassDeclareReservedUnused(OSSerialize, 5); - OSMetaClassDeclareReservedUnused(OSSerialize, 6); - OSMetaClassDeclareReservedUnused(OSSerialize, 7); +/*! + * @function withCapacity + * + * @abstract + * Creates and initializes an empty OSSerialize object. + * + * @param capacity The initial size of the XML buffer. + * + * @result + * A new instance of OSSerialize + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * The serializer will grow as needed to accommodate more data. + */ + static OSSerialize * withCapacity(unsigned int capacity); + + static OSSerialize * binaryWithCapacity(unsigned int inCapacity, Editor editor = 0, void * reference = 0); + +/*! + * @function text + * + * @abstract + * Returns the XML text serialized so far. + * + * @result + * The nul-terminated XML data serialized so far. + */ + virtual char * text() const; + + +/*! + * @function clearText + * + * @abstract + * Resets the OSSerialize object. + * + * @discussion + * This function is a useful optimization if you are serializing + * the same object repeatedly. + */ + virtual void clearText(); + +// stuff to serialize your object + +/*! + * @function previouslySerialized + * + * @abstract + * Checks whether the object has already been serialized + * into the XML stream, emitting a reference if it has. + * + * @param object The object to check. + * + * @result + * true if object has already been serialized + * by this OSSerialize object and a reference + * to it is successfully added to the XML stream, + * false otherwise. + * + * + * @discussion + * This function both reduces the size of generated XML + * by emitting shorter references to existing objects with the same + * value (particularly for OSString, OSSymbol, and OSData), + * and also preserves instance references + * so that the user-space I/O Kit library can reconstruct + * an identical graph of object relationships. + * + * All classes that override + * @link + * //apple_ref/cpp/instm/OSObject/serialize/virtualbool/(OSSerialize*) + * OSObject::serialize@/link. + * should call this function before doing any actual serialization; + * if it returns true, the serialize implementation + * can immediately return true. + */ + virtual bool previouslySerialized(const OSMetaClassBase * object); + + +/*! + * @function addXMLStartTag + * + * @abstract + * Appends an XML start tag to the XML stream. + * + * @param object The object being serialized. + * @param tagString The name of the XML tag to emit; for example, "string". + * + * @result + * true if an XML start tag for tagString + * is successfully added to the XML stream, false otherwise. + * + * @discussion + * This function emits the named tag, + * enclosed within a pair of angle brackets. + * + * A class that implements serialization should call this function + * with the name of the XML tag that best represents the serialized + * contents of the object. + * A limited number of tags are supported by the user-space + * I/O Kit library: + *
      + *
    • array
    • + *
    • dict
    • + *
    • integer
    • + *
    • key
    • + *
    • set
    • + *
    • string
    • + *
    + * + * A call to this function must be balanced with one to + * @link addXMLEndTag addXMLEndTag@/link + * using the same tagString. + */ + virtual bool addXMLStartTag( + const OSMetaClassBase * object, + const char * tagString); + + +/*! + * @function addXMLEndTag + * + * @abstract + * Appends an XML end tag to the XML stream. + * + * @param tagString The name of the XML tag to emit; for example, "string". + * + * @result + * true if an XML end tag for tagString + * is successfully added to the XML stream, false otherwise. + * + * @discussion + * This function emits the named tag, + * preceded by a slash character to indicate the closing of an entity, + * all enclosed within a pair of angle brackets. + * + * A call to this function must balance an earlier call to + * @link addXMLStartTag addXMLStartTag@/link + * using the same tagString. + */ + virtual bool addXMLEndTag(const char * tagString); + + +/*! + * @function addChar + * + * @abstract + * Appends a single character to the XML stream. + * + * @param aChar The character to append to the XML stream. + * + * @result + * true if char + * is successfully added to the XML stream, false otherwise. + */ + virtual bool addChar(const char aChar); + + +/*! + * @function addString + * + * @abstract + * Appends a C string to the XML stream. + * + * @param cString The C string to append to the XML stream. + * + * @result + * true if cString + * is successfully added to the XML stream, false otherwise. + */ + virtual bool addString(const char * cString); + +// stuff you should never have to use (in theory) + + virtual bool initWithCapacity(unsigned int inCapacity); + virtual unsigned int getLength() const; + virtual unsigned int getCapacity() const; + virtual unsigned int getCapacityIncrement() const; + virtual unsigned int setCapacityIncrement(unsigned increment); + virtual unsigned int ensureCapacity(unsigned int newCapacity); + virtual void free() APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(OSSerialize, 0); + OSMetaClassDeclareReservedUnused(OSSerialize, 1); + OSMetaClassDeclareReservedUnused(OSSerialize, 2); + OSMetaClassDeclareReservedUnused(OSSerialize, 3); + OSMetaClassDeclareReservedUnused(OSSerialize, 4); + OSMetaClassDeclareReservedUnused(OSSerialize, 5); + OSMetaClassDeclareReservedUnused(OSSerialize, 6); + OSMetaClassDeclareReservedUnused(OSSerialize, 7); }; typedef bool (*OSSerializerCallback)(void * target, void * ref, - OSSerialize * serializer); + OSSerialize * serializer); #ifdef __BLOCKS__ typedef bool (^OSSerializerBlock)(OSSerialize * serializer); @@ -321,32 +321,32 @@ typedef bool (^OSSerializerBlock)(OSSerialize * serializer); class OSSerializer : public OSObject { - OSDeclareDefaultStructors(OSSerializer) + OSDeclareDefaultStructors(OSSerializer) + + void * target; + void * ref; + OSSerializerCallback callback; - void * target; - void * ref; - OSSerializerCallback callback; - public: - static OSSerializer * forTarget( - void * target, - OSSerializerCallback callback, - void * ref = 0); + static OSSerializer * forTarget( + void * target, + OSSerializerCallback callback, + void * ref = 0); #ifdef __BLOCKS__ - static OSSerializer * withBlock( - OSSerializerBlock callback); + static OSSerializer * withBlock( + OSSerializerBlock callback); #endif - virtual void free( void ) APPLE_KEXT_OVERRIDE; + virtual void free( void ) APPLE_KEXT_OVERRIDE; #if XNU_KERNEL_PRIVATE - static bool callbackToBlock(void * target, void * ref, - OSSerialize * serializer); + static bool callbackToBlock(void * target, void * ref, + OSSerialize * serializer); #endif /* XNU_KERNEL_PRIVATE */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; }; #endif /* _OS_OSSERIALIZE_H */ diff --git a/libkern/libkern/c++/OSSet.h b/libkern/libkern/c++/OSSet.h index b64ddb575..bec190e9f 100644 --- a/libkern/libkern/c++/OSSet.h +++ b/libkern/libkern/c++/OSSet.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOSet.h created by rsulack on Thu 11-Jun-1998 */ @@ -41,8 +41,8 @@ class OSArray; * @abstract * This header declares the OSSet collection class. */ - - + + /*! * @class OSSet * @@ -72,7 +72,7 @@ class OSArray; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSSet provides no concurrency protection; @@ -84,706 +84,706 @@ class OSArray; */ class OSSet : public OSCollection { - friend class OSSerialize; + friend class OSSerialize; - OSDeclareDefaultStructors(OSSet) + OSDeclareDefaultStructors(OSSet) #if APPLE_KEXT_ALIGN_CONTAINERS private: - OSArray * members; + OSArray * members; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ private: - OSArray * members; + OSArray * members; protected: - struct ExpansionData { }; + struct ExpansionData { }; - /* Reserved for future use. (Internal use only) */ - ExpansionData * reserved; +/* Reserved for future use. (Internal use only) */ + ExpansionData * reserved; #endif /* APPLE_KEXT_ALIGN_CONTAINERS */ - /* - * OSCollectionIterator interfaces. - */ - virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; - virtual bool initIterator(void * iterator) const APPLE_KEXT_OVERRIDE; - virtual bool getNextObjectForIterator(void * iterator, OSObject ** ret) const APPLE_KEXT_OVERRIDE; +/* + * OSCollectionIterator interfaces. + */ + virtual unsigned int iteratorSize() const APPLE_KEXT_OVERRIDE; + virtual bool initIterator(void * iterator) const APPLE_KEXT_OVERRIDE; + virtual bool getNextObjectForIterator(void * iterator, OSObject ** ret) const APPLE_KEXT_OVERRIDE; public: - /*! - * @function withCapacity - * - * @abstract - * Creates and initializes an empty OSSet. - * - * @param capacity The initial storage capacity of the new set object. - * - * @result - * An empty instance of OSSet - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * capacity must be nonzero. - * The new OSSet will grow as needed to accommodate more key/object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - */ - static OSSet * withCapacity(unsigned int capacity); - - - /*! - * @function withObjects - * - * @abstract - * Creates and initializes an OSSet - * populated with objects provided. - * - * @param objects A C array of OSMetaClassBase-derived objects. - * @param count The number of objects to be placed into the set. - * @param capacity The initial storage capacity of the new set object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * An instance of OSSet - * containing the objects provided, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * objects must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new OSSet will grow as needed to accommodate more objects - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - * - * The objects in objects are retained for storage in the new set, - * not copied. - */ - static OSSet * withObjects( - const OSObject * objects[], - unsigned int count, - unsigned int capacity = 0); - - - /*! - * @function withArray - * - * @abstract - * Creates and initializes an OSSet - * populated with the contents of an OSArray. - * - * @param array An array whose objects will be stored in the new OSSet. - * @param capacity The initial storage capacity of the new set object. - * If 0, the capacity is set to the number of objects - * in array; - * otherwise capacity must be greater than or equal to - * the number of objects in array. - * @result - * An instance of OSSet containing - * the objects of array, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * Each distinct object in array is added to the new set. - * - * array must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new OSSet will grow as needed to accommodate more key-object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - * - * The objects in array are retained for storage in the new set, - * not copied. - */ - static OSSet * withArray( - const OSArray * array, - unsigned int capacity = 0); - - - /*! - * @function withSet - * - * @abstract - * Creates and initializes an OSSet - * populated with the contents of another OSSet. - * - * @param set An OSSet whose contents will be stored - * in the new instance. - * @param capacity The initial storage capacity of the set object. - * If 0, the capacity is set to the number of objects - * in set; - * otherwise capacity must be greater than or equal to - * the number of objects in array. - * @result - * An instance of OSArray - * containing the objects of set, - * with a retain count of 1; - * NULL on failure. - * - * @discussion - * set must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The array will grow as needed to accommodate more key-object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - * - * The objects in set are retained for storage in the new set, - * not copied. - */ - static OSSet * withSet(const OSSet * set, - unsigned int capacity = 0); - - - /*! - * @function initWithCapacity - * - * @abstract - * Initializes a new instance of OSSet. - * - * @param capacity The initial storage capacity of the new set object. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSSet/withCapacity/staticOSSet*\/(unsignedint) - * withCapacity@/link - * instead. - * - * capacity must be nonzero. - * The new set will grow as needed to accommodate more key/object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - */ - virtual bool initWithCapacity(unsigned int capacity); - - - /*! - * @function initWithObjects - * - * @abstract - * Initializes a new OSSet populated with objects provided. - * - * @param objects A C array of OSObject-derived objects. - * @param count The number of objects to be placed into the set. - * @param capacity The initial storage capacity of the new set object. - * If 0, count is used; otherwise this value - * must be greater than or equal to count. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSSet/withObjects/staticOSSet*\/(constOSObject*,unsignedint,unsignedint) - * withObjects@/link - * instead. - * - * objects must be non-NULL, - * and count must be nonzero. - * If capacity is nonzero, it must be greater than or equal to count. - * The new array will grow as needed to accommodate more key-object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - * - * The objects in objects are retained for storage in the new set, - * not copied. - */ - virtual bool initWithObjects( - const OSObject * objects[], - unsigned int count, - unsigned int capacity = 0); - - - /*! - * @function initWithArray - * - * @abstract Initializes a new OSSet - * populated with the contents of an OSArray. - * - * @param array An OSAray whose contents will be placed - * in the new instance. - * @param capacity The initial storage capacity of the new set object. - * If 0, the capacity is set - * to the number of objects in array; - * otherwise capacity must be greater than or equal to - * the number of objects in array. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link - * //apple_ref/cpp/clm/OSSet/withArray/staticOSSet*\/(constOSArray*,unsignedint) - * withArray@/link - * instead. - * - * array must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new array will grow as needed to accommodate more key-object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - * - * The objects in array are retained for storage in the new set, - * not copied. - */ - virtual bool initWithArray( - const OSArray * array, - unsigned int capacity = 0); - - - /*! - * @function initWithSet - * - * @abstract - * Initializes a new OSSet - * populated with the contents of another OSSet. - * - * @param set A set whose contents will be placed in the new instance. - * @param capacity The initial storage capacity of the new set object. - * If 0, the capacity is set - * to the number of objects in set; - * otherwise capacity must be greater than or equal to - * the number of objects in set. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withSet withSet@/link instead. - * - * set must be non-NULL. - * If capacity is nonzero, - * it must be greater than or equal to count. - * The new set will grow as needed to accommodate more key-object pairs - * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, - * for which the initial capacity is a hard limit). - * - * The objects in set are retained for storage in the new set, - * not copied. - */ - virtual bool initWithSet(const OSSet *set, - unsigned int capacity = 0); - - - /*! - * @function free - * - * @abstract - * Deallocates or releases any resources - * used by the OSSet instance. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCount - * - * @abstract - * Returns the current number of objects within the set. - * - * @result - * The current number of objects within the set. - */ - virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacity - * - * @abstract - * Returns the number of objects the set - * can store without reallocating. - * - * @result - * The number objects the set - * can store without reallocating. - * - * @discussion - * OSSet objects grow when full to accommodate additional objects. - * See - * @link - * //apple_ref/cpp/instm/OSSet/getCapacityIncrement/virtualunsignedint/() - * getCapacityIncrement@/link - * and - * @link - * //apple_ref/cpp/instm/OSSet/ensureCapacity/virtualunsignedint/(unsignedint) - * ensureCapacity@/link. - */ - virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function getCapacityIncrement - * - * @abstract - * Returns the storage increment of the set. - * - * @result - * The storage increment of the set. - * - * @discussion - * An OSSet allocates storage for objects in multiples - * of the capacity increment. - */ - virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setCapacityIncrement - * - * @abstract - * Sets the storage increment of the set. - * - * @result - * The new storage increment of the set, - * which may be different from the number requested. - * - * @discussion - * An OSSet allocates storage for objects in multiples - * of the capacity increment. - * Calling this function does not immediately reallocate storage. - */ - virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; - - - /*! - * @function ensureCapacity - * - * @abstract - * Ensures the set has enough space - * to store the requested number of distinct objects. - * - * @param newCapacity The total number of distinct objects the set - * should be able to store. - * @result - * The new capacity of the set, - * which may be different from the number requested - * (if smaller, reallocation of storage failed). - * - * @discussion - * This function immediately resizes the set, if necessary, - * to accommodate at least newCapacity distinct objects. - * If newCapacity is not greater than the current capacity, - * or if an allocation error occurs, the original capacity is returned. - * - * There is no way to reduce the capacity of an OSSet. - */ - virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; - - - /*! - * @function flushCollection - * - * @abstract - * Removes and releases all objects within the set. - * - * @discussion - * The set's capacity (and therefore direct memory consumption) - * is not reduced by this function. - */ - virtual void flushCollection() APPLE_KEXT_OVERRIDE; - - - /*! - * @function setObject - * - * @abstract - * Adds an object to the OSSet if it is not already present. - * - * @param anObject The OSMetaClassBase-derived object to be added to the set. - * - * @result - * true if anObject was successfully - * added to the set, false otherwise - * (including if it was already in the set). - * - * @discussion - * The set adds storage to accomodate the new object, if necessary. - * If successfully added, the object is retained. - * - * A false return value can mean either - * that anObject is already present in the set, - * or that a memory allocation failure occurred. - * If you need to know whether the object - * is already present, use - * @link containsObject containsObject@/link. - */ - virtual bool setObject(const OSMetaClassBase * anObject); - - - /*! - * @function merge - * - * @abstract - * Adds the contents of an OSArray to the set. - * - * @param array The OSArray object containing the objects to be added. - * - * @result - * true if all objects from array - * are successfully added the receiver (or were already present), - * false otherwise. - * - * @discussion - * This functions adds to the receiving set - * all objects from array - * that are not already in the receiving set. - * Objects added to the receiver are retained. - * - * In releases prior to 10.7, this function would return false - * if an object from array was already present in the set, - * or if array was empty. - * This is no longer the case, so this function correctly returns true - * when the semantic of merging is met. - */ - virtual bool merge(const OSArray * array); - - - /*! - * @function merge - * - * @abstract - * Adds the contents of an OSet to the set. - * - * @param set The OSSet object containing the objects to be added. - * - * @result - * true if any object from set - * are successfully added the receiver (or were already present), - * false otherwise. - * - * @discussion - * This functions adds to the receiving set - * all objects from set - * that are not already in the receiving set. - * Objects added to the receiver are retained. - * - * In releases prior to 10.7, this function would return false - * if an object from set was already present in the set, - * or if set was empty. - * This is no longer the case, so this function correctly returns true - * when the semantic of merging is met. - */ - virtual bool merge(const OSSet * set); - - - /*! - * @function removeObject - * - * @abstract - * Removes an object from the set. - * - * @param anObject The OSMetaClassBase-derived object - * to be removed from the set. - * - * @discussion - * The object removed from the set is released. - */ - virtual void removeObject(const OSMetaClassBase * anObject); - - - /*! - * @function containsObject - * - * @abstract - * Checks the set for the presence of an object. - * - * @param anObject The OSMetaClassBase-derived object - * to check for in the set. - * - * @result - * true if anObject is present within the set, - * false otherwise. - * - * @discussion - * Pointer equality is used. - * This function returns false if passed NULL. - */ - virtual bool containsObject(const OSMetaClassBase * anObject) const; - - - /*! - * @function member - * - * @abstract - * Checks the set for the presence of an object. - * - * @param anObject The OSMetaClassBase-derived object - * to check for in the set. - * - * @result - * true if anObject is present - * within the set, false otherwise. - * - * @discussion - * Pointer equality is used. This function returns false - * if passed NULL. - * - * @link containsObject containsObject@/link - * checks for NULL first, - * and is therefore more efficient than this function. - */ - virtual bool member(const OSMetaClassBase * anObject) const; - - - /*! - * @function getAnyObject - * - * @abstract - * Returns an arbitrary (not random) object from the set. - * - * @result - * An arbitrary (not random) object - * if one exists within the set. - * - * @discussion - * The returned object will be released if removed from the set; - * if you plan to store the reference, you should call - * @link - * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() - * retain@/link - * on that object. - */ - virtual OSObject * getAnyObject() const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSSet objects. - * - * @param aSet The set object being compared against the receiver. - * @result - * true if the two sets are equivalent, - * false otherwise. - * - * @discussion - * Two OSSet objects are considered equal if they have same count - * and the same object pointer values. - */ - virtual bool isEqualTo(const OSSet * aSet) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSSet against an arbitrary object. - * - * @param anObject The object being compared against the receiver. - * @result - * true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * An OSSet object is considered equal to another object if the other object - * is derived from OSSet and compares equal as a set. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function setOptions - * - * @abstract - * Recursively sets option bits in the set - * and all child collections. - * - * @param options A bitfield whose values turn the options on (1) or off (0). - * @param mask A mask indicating which bits - * in options to change. - * Pass 0 to get the whole current options bitfield - * without changing any settings. - * @param context Unused. - * - * @result - * The options bitfield as it was before the set operation. - * - * @discussion - * Kernel extensions should not call this function. - * - * Child collections' options are changed only if the receiving set's - * options actually change. - */ - virtual unsigned setOptions(unsigned options, unsigned mask, void * context = 0) APPLE_KEXT_OVERRIDE; - - - /*! - * @function copyCollection - * - * @abstract - * Creates a deep copy of this set and its child collections. - * - * @param cycleDict A dictionary of all of the collections - * that have been copied so far, - * which is used to track circular references. - * To start the copy at the top level, - * pass NULL. - * - * @result - * The newly copied set, with a retain count of 1, - * or NULL if there is insufficient memory to do the copy. - * - * @discussion - * The receiving set, and any collections it contains, - * recursively, are copied. - * Objects that are not derived from OSCollection are retained - * rather than copied. - */ - OSCollection *copyCollection(OSDictionary *cycleDict = 0) APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(OSSet, 0); - OSMetaClassDeclareReservedUnused(OSSet, 1); - OSMetaClassDeclareReservedUnused(OSSet, 2); - OSMetaClassDeclareReservedUnused(OSSet, 3); - OSMetaClassDeclareReservedUnused(OSSet, 4); - OSMetaClassDeclareReservedUnused(OSSet, 5); - OSMetaClassDeclareReservedUnused(OSSet, 6); - OSMetaClassDeclareReservedUnused(OSSet, 7); +/*! + * @function withCapacity + * + * @abstract + * Creates and initializes an empty OSSet. + * + * @param capacity The initial storage capacity of the new set object. + * + * @result + * An empty instance of OSSet + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * capacity must be nonzero. + * The new OSSet will grow as needed to accommodate more key/object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + */ + static OSSet * withCapacity(unsigned int capacity); + + +/*! + * @function withObjects + * + * @abstract + * Creates and initializes an OSSet + * populated with objects provided. + * + * @param objects A C array of OSMetaClassBase-derived objects. + * @param count The number of objects to be placed into the set. + * @param capacity The initial storage capacity of the new set object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * An instance of OSSet + * containing the objects provided, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * objects must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new OSSet will grow as needed to accommodate more objects + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + * + * The objects in objects are retained for storage in the new set, + * not copied. + */ + static OSSet * withObjects( + const OSObject * objects[], + unsigned int count, + unsigned int capacity = 0); + + +/*! + * @function withArray + * + * @abstract + * Creates and initializes an OSSet + * populated with the contents of an OSArray. + * + * @param array An array whose objects will be stored in the new OSSet. + * @param capacity The initial storage capacity of the new set object. + * If 0, the capacity is set to the number of objects + * in array; + * otherwise capacity must be greater than or equal to + * the number of objects in array. + * @result + * An instance of OSSet containing + * the objects of array, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * Each distinct object in array is added to the new set. + * + * array must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new OSSet will grow as needed to accommodate more key-object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + * + * The objects in array are retained for storage in the new set, + * not copied. + */ + static OSSet * withArray( + const OSArray * array, + unsigned int capacity = 0); + + +/*! + * @function withSet + * + * @abstract + * Creates and initializes an OSSet + * populated with the contents of another OSSet. + * + * @param set An OSSet whose contents will be stored + * in the new instance. + * @param capacity The initial storage capacity of the set object. + * If 0, the capacity is set to the number of objects + * in set; + * otherwise capacity must be greater than or equal to + * the number of objects in array. + * @result + * An instance of OSArray + * containing the objects of set, + * with a retain count of 1; + * NULL on failure. + * + * @discussion + * set must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The array will grow as needed to accommodate more key-object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + * + * The objects in set are retained for storage in the new set, + * not copied. + */ + static OSSet * withSet(const OSSet * set, + unsigned int capacity = 0); + + +/*! + * @function initWithCapacity + * + * @abstract + * Initializes a new instance of OSSet. + * + * @param capacity The initial storage capacity of the new set object. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSSet/withCapacity/staticOSSet*\/(unsignedint) + * withCapacity@/link + * instead. + * + * capacity must be nonzero. + * The new set will grow as needed to accommodate more key/object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + */ + virtual bool initWithCapacity(unsigned int capacity); + + +/*! + * @function initWithObjects + * + * @abstract + * Initializes a new OSSet populated with objects provided. + * + * @param objects A C array of OSObject-derived objects. + * @param count The number of objects to be placed into the set. + * @param capacity The initial storage capacity of the new set object. + * If 0, count is used; otherwise this value + * must be greater than or equal to count. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSSet/withObjects/staticOSSet*\/(constOSObject*,unsignedint,unsignedint) + * withObjects@/link + * instead. + * + * objects must be non-NULL, + * and count must be nonzero. + * If capacity is nonzero, it must be greater than or equal to count. + * The new array will grow as needed to accommodate more key-object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + * + * The objects in objects are retained for storage in the new set, + * not copied. + */ + virtual bool initWithObjects( + const OSObject * objects[], + unsigned int count, + unsigned int capacity = 0); + + +/*! + * @function initWithArray + * + * @abstract Initializes a new OSSet + * populated with the contents of an OSArray. + * + * @param array An OSAray whose contents will be placed + * in the new instance. + * @param capacity The initial storage capacity of the new set object. + * If 0, the capacity is set + * to the number of objects in array; + * otherwise capacity must be greater than or equal to + * the number of objects in array. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link + * //apple_ref/cpp/clm/OSSet/withArray/staticOSSet*\/(constOSArray*,unsignedint) + * withArray@/link + * instead. + * + * array must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new array will grow as needed to accommodate more key-object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + * + * The objects in array are retained for storage in the new set, + * not copied. + */ + virtual bool initWithArray( + const OSArray * array, + unsigned int capacity = 0); + + +/*! + * @function initWithSet + * + * @abstract + * Initializes a new OSSet + * populated with the contents of another OSSet. + * + * @param set A set whose contents will be placed in the new instance. + * @param capacity The initial storage capacity of the new set object. + * If 0, the capacity is set + * to the number of objects in set; + * otherwise capacity must be greater than or equal to + * the number of objects in set. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withSet withSet@/link instead. + * + * set must be non-NULL. + * If capacity is nonzero, + * it must be greater than or equal to count. + * The new set will grow as needed to accommodate more key-object pairs + * (unlike @link //apple_ref/doc/uid/20001503 CFMutableSet@/link, + * for which the initial capacity is a hard limit). + * + * The objects in set are retained for storage in the new set, + * not copied. + */ + virtual bool initWithSet(const OSSet *set, + unsigned int capacity = 0); + + +/*! + * @function free + * + * @abstract + * Deallocates or releases any resources + * used by the OSSet instance. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCount + * + * @abstract + * Returns the current number of objects within the set. + * + * @result + * The current number of objects within the set. + */ + virtual unsigned int getCount() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacity + * + * @abstract + * Returns the number of objects the set + * can store without reallocating. + * + * @result + * The number objects the set + * can store without reallocating. + * + * @discussion + * OSSet objects grow when full to accommodate additional objects. + * See + * @link + * //apple_ref/cpp/instm/OSSet/getCapacityIncrement/virtualunsignedint/() + * getCapacityIncrement@/link + * and + * @link + * //apple_ref/cpp/instm/OSSet/ensureCapacity/virtualunsignedint/(unsignedint) + * ensureCapacity@/link. + */ + virtual unsigned int getCapacity() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function getCapacityIncrement + * + * @abstract + * Returns the storage increment of the set. + * + * @result + * The storage increment of the set. + * + * @discussion + * An OSSet allocates storage for objects in multiples + * of the capacity increment. + */ + virtual unsigned int getCapacityIncrement() const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setCapacityIncrement + * + * @abstract + * Sets the storage increment of the set. + * + * @result + * The new storage increment of the set, + * which may be different from the number requested. + * + * @discussion + * An OSSet allocates storage for objects in multiples + * of the capacity increment. + * Calling this function does not immediately reallocate storage. + */ + virtual unsigned int setCapacityIncrement(unsigned increment) APPLE_KEXT_OVERRIDE; + + +/*! + * @function ensureCapacity + * + * @abstract + * Ensures the set has enough space + * to store the requested number of distinct objects. + * + * @param newCapacity The total number of distinct objects the set + * should be able to store. + * @result + * The new capacity of the set, + * which may be different from the number requested + * (if smaller, reallocation of storage failed). + * + * @discussion + * This function immediately resizes the set, if necessary, + * to accommodate at least newCapacity distinct objects. + * If newCapacity is not greater than the current capacity, + * or if an allocation error occurs, the original capacity is returned. + * + * There is no way to reduce the capacity of an OSSet. + */ + virtual unsigned int ensureCapacity(unsigned int newCapacity) APPLE_KEXT_OVERRIDE; + + +/*! + * @function flushCollection + * + * @abstract + * Removes and releases all objects within the set. + * + * @discussion + * The set's capacity (and therefore direct memory consumption) + * is not reduced by this function. + */ + virtual void flushCollection() APPLE_KEXT_OVERRIDE; + + +/*! + * @function setObject + * + * @abstract + * Adds an object to the OSSet if it is not already present. + * + * @param anObject The OSMetaClassBase-derived object to be added to the set. + * + * @result + * true if anObject was successfully + * added to the set, false otherwise + * (including if it was already in the set). + * + * @discussion + * The set adds storage to accomodate the new object, if necessary. + * If successfully added, the object is retained. + * + * A false return value can mean either + * that anObject is already present in the set, + * or that a memory allocation failure occurred. + * If you need to know whether the object + * is already present, use + * @link containsObject containsObject@/link. + */ + virtual bool setObject(const OSMetaClassBase * anObject); + + +/*! + * @function merge + * + * @abstract + * Adds the contents of an OSArray to the set. + * + * @param array The OSArray object containing the objects to be added. + * + * @result + * true if all objects from array + * are successfully added the receiver (or were already present), + * false otherwise. + * + * @discussion + * This functions adds to the receiving set + * all objects from array + * that are not already in the receiving set. + * Objects added to the receiver are retained. + * + * In releases prior to 10.7, this function would return false + * if an object from array was already present in the set, + * or if array was empty. + * This is no longer the case, so this function correctly returns true + * when the semantic of merging is met. + */ + virtual bool merge(const OSArray * array); + + +/*! + * @function merge + * + * @abstract + * Adds the contents of an OSet to the set. + * + * @param set The OSSet object containing the objects to be added. + * + * @result + * true if any object from set + * are successfully added the receiver (or were already present), + * false otherwise. + * + * @discussion + * This functions adds to the receiving set + * all objects from set + * that are not already in the receiving set. + * Objects added to the receiver are retained. + * + * In releases prior to 10.7, this function would return false + * if an object from set was already present in the set, + * or if set was empty. + * This is no longer the case, so this function correctly returns true + * when the semantic of merging is met. + */ + virtual bool merge(const OSSet * set); + + +/*! + * @function removeObject + * + * @abstract + * Removes an object from the set. + * + * @param anObject The OSMetaClassBase-derived object + * to be removed from the set. + * + * @discussion + * The object removed from the set is released. + */ + virtual void removeObject(const OSMetaClassBase * anObject); + + +/*! + * @function containsObject + * + * @abstract + * Checks the set for the presence of an object. + * + * @param anObject The OSMetaClassBase-derived object + * to check for in the set. + * + * @result + * true if anObject is present within the set, + * false otherwise. + * + * @discussion + * Pointer equality is used. + * This function returns false if passed NULL. + */ + virtual bool containsObject(const OSMetaClassBase * anObject) const; + + +/*! + * @function member + * + * @abstract + * Checks the set for the presence of an object. + * + * @param anObject The OSMetaClassBase-derived object + * to check for in the set. + * + * @result + * true if anObject is present + * within the set, false otherwise. + * + * @discussion + * Pointer equality is used. This function returns false + * if passed NULL. + * + * @link containsObject containsObject@/link + * checks for NULL first, + * and is therefore more efficient than this function. + */ + virtual bool member(const OSMetaClassBase * anObject) const; + + +/*! + * @function getAnyObject + * + * @abstract + * Returns an arbitrary (not random) object from the set. + * + * @result + * An arbitrary (not random) object + * if one exists within the set. + * + * @discussion + * The returned object will be released if removed from the set; + * if you plan to store the reference, you should call + * @link + * //apple_ref/cpp/instm/OSObject/retain/virtualvoid/() + * retain@/link + * on that object. + */ + virtual OSObject * getAnyObject() const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSSet objects. + * + * @param aSet The set object being compared against the receiver. + * @result + * true if the two sets are equivalent, + * false otherwise. + * + * @discussion + * Two OSSet objects are considered equal if they have same count + * and the same object pointer values. + */ + virtual bool isEqualTo(const OSSet * aSet) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSSet against an arbitrary object. + * + * @param anObject The object being compared against the receiver. + * @result + * true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * An OSSet object is considered equal to another object if the other object + * is derived from OSSet and compares equal as a set. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function setOptions + * + * @abstract + * Recursively sets option bits in the set + * and all child collections. + * + * @param options A bitfield whose values turn the options on (1) or off (0). + * @param mask A mask indicating which bits + * in options to change. + * Pass 0 to get the whole current options bitfield + * without changing any settings. + * @param context Unused. + * + * @result + * The options bitfield as it was before the set operation. + * + * @discussion + * Kernel extensions should not call this function. + * + * Child collections' options are changed only if the receiving set's + * options actually change. + */ + virtual unsigned setOptions(unsigned options, unsigned mask, void * context = 0) APPLE_KEXT_OVERRIDE; + + +/*! + * @function copyCollection + * + * @abstract + * Creates a deep copy of this set and its child collections. + * + * @param cycleDict A dictionary of all of the collections + * that have been copied so far, + * which is used to track circular references. + * To start the copy at the top level, + * pass NULL. + * + * @result + * The newly copied set, with a retain count of 1, + * or NULL if there is insufficient memory to do the copy. + * + * @discussion + * The receiving set, and any collections it contains, + * recursively, are copied. + * Objects that are not derived from OSCollection are retained + * rather than copied. + */ + OSCollection *copyCollection(OSDictionary *cycleDict = 0) APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(OSSet, 0); + OSMetaClassDeclareReservedUnused(OSSet, 1); + OSMetaClassDeclareReservedUnused(OSSet, 2); + OSMetaClassDeclareReservedUnused(OSSet, 3); + OSMetaClassDeclareReservedUnused(OSSet, 4); + OSMetaClassDeclareReservedUnused(OSSet, 5); + OSMetaClassDeclareReservedUnused(OSSet, 6); + OSMetaClassDeclareReservedUnused(OSSet, 7); }; #endif /* !_OS_OSSET_H */ diff --git a/libkern/libkern/c++/OSString.h b/libkern/libkern/c++/OSString.h index 16fc61ddc..c761c9d28 100644 --- a/libkern/libkern/c++/OSString.h +++ b/libkern/libkern/c++/OSString.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOString.h created by rsulack on Wed 17-Sep-1997 */ @@ -42,13 +42,13 @@ class OSData; * @abstract * This header declares the OSString container class. */ - - - /* Not to be included in headerdoc. - * - * For internal use. - */ - enum { kOSStringNoCopy = 0x00000001 }; + + +/* Not to be included in headerdoc. + * + * For internal use. + */ +enum { kOSStringNoCopy = 0x00000001 }; /*! @@ -90,7 +90,7 @@ class OSData; * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSString provides no concurrency protection; @@ -102,355 +102,354 @@ class OSData; */ class OSString : public OSObject { + OSDeclareDefaultStructors(OSString) - OSDeclareDefaultStructors(OSString) - - enum { kMaxStringLength = 262142 }; + enum { kMaxStringLength = 262142 }; #if APPLE_KEXT_ALIGN_CONTAINERS protected: - unsigned int flags:14, - length:18; - char * string; + unsigned int flags:14, + length:18; + char * string; #else /* APPLE_KEXT_ALIGN_CONTAINERS */ protected: - char * string; - unsigned int flags; - unsigned int length; + char * string; + unsigned int flags; + unsigned int length; #endif /* APPLE_KEXT_ALIGN_CONTAINERS */ public: - /*! - * @function withString - * - * @abstract - * Creates and initializes an OSString from another OSString. - * - * @param aString The OSString object whose contents to copy. - * - * @result - * An instance of OSString representing - * the same characters as aString, - * and with a reference count of 1; - * NULL on failure. - * - * @discussion - * The new OSString is a distinct instance from aString, - * and is not merely the original object - * with the reference count incremented. - * Changes to one will not be reflected in the other. - */ - static OSString * withString(const OSString * aString); - - - /*! - * @function withCString - * - * @abstract - * Creates and initializes an OSString from a C string. - * - * @param cString The C string to copy into the new OSString. - * - * @result - * An instance of OSString representing - * the same characters as aString, - * and with a reference count of 1; - * NULL on failure. - */ - static OSString * withCString(const char * cString); - - - /*! - * @function withCStringNoCopy - * - * @abstract - * Creates and initializes an immutable OSString - * that shares the provided C string buffer. - * - * @param cString The C string to reference. - * - * @result - * An instance of OSString containing cString, - * and with a reference count of 1; - * NULL on failure. - * - * @discussion - * An OSString object created with this function - * does not claim ownership of the C string, - * but shares it with the caller. - * When the caller determines that the OSString object has actually been freed, - * it can safely dispose of the data buffer. - * Conversely, if it frees the shared data buffer, - * it must not attempt to use the OSString object and should release it. - * - * An OSString object created with this function does not - * allow changing the string via @link setChar setChar@/link. - */ - static OSString * withCStringNoCopy(const char * cString); +/*! + * @function withString + * + * @abstract + * Creates and initializes an OSString from another OSString. + * + * @param aString The OSString object whose contents to copy. + * + * @result + * An instance of OSString representing + * the same characters as aString, + * and with a reference count of 1; + * NULL on failure. + * + * @discussion + * The new OSString is a distinct instance from aString, + * and is not merely the original object + * with the reference count incremented. + * Changes to one will not be reflected in the other. + */ + static OSString * withString(const OSString * aString); + + +/*! + * @function withCString + * + * @abstract + * Creates and initializes an OSString from a C string. + * + * @param cString The C string to copy into the new OSString. + * + * @result + * An instance of OSString representing + * the same characters as aString, + * and with a reference count of 1; + * NULL on failure. + */ + static OSString * withCString(const char * cString); + + +/*! + * @function withCStringNoCopy + * + * @abstract + * Creates and initializes an immutable OSString + * that shares the provided C string buffer. + * + * @param cString The C string to reference. + * + * @result + * An instance of OSString containing cString, + * and with a reference count of 1; + * NULL on failure. + * + * @discussion + * An OSString object created with this function + * does not claim ownership of the C string, + * but shares it with the caller. + * When the caller determines that the OSString object has actually been freed, + * it can safely dispose of the data buffer. + * Conversely, if it frees the shared data buffer, + * it must not attempt to use the OSString object and should release it. + * + * An OSString object created with this function does not + * allow changing the string via @link setChar setChar@/link. + */ + static OSString * withCStringNoCopy(const char * cString); #if XNU_KERNEL_PRIVATE - static OSString * withStringOfLength(const char *cString, size_t length); + static OSString * withStringOfLength(const char *cString, size_t length); #endif /* XNU_KERNEL_PRIVATE */ - /*! - * @function initWithString - * - * @abstract - * Initializes an OSString from another OSString. - * - * @param aString The OSString object whose contents to copy. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withString withString@/link instead. - */ - virtual bool initWithString(const OSString * aString); - - - /*! - * @function initWithCString - * - * @abstract - * Initializes an OSString from a C string. - * - * @param cString The C string to copy into the new OSString. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withCString withCString@/link instead. - */ - virtual bool initWithCString(const char * cString); - - - /*! - * @function initWithCStringNoCopy - * - * @abstract - * Initializes an immutable OSString - * to share the provided C string buffer. - * - * @param cString The C string to reference. - * - * @result - * true on success, false on failure. - * - * @discussion - * Not for general use. Use the static instance creation method - * @link withCStringNoCopy withCStringNoCopy@/link instead. - * - * An OSString object initialized with this function - * does not claim ownership of the C string, - * but shares it with the caller. - * When the caller determines that the OSString object has actually been freed, - * it can safely dispose of the data buffer. - * Conversely, if it frees the shared data buffer, - * it must not attempt to use the OSString object and should release it. - * - * An OSString object created with this function does not - * allow changing the string via @link setChar setChar@/link. - */ - virtual bool initWithCStringNoCopy(const char * cString); +/*! + * @function initWithString + * + * @abstract + * Initializes an OSString from another OSString. + * + * @param aString The OSString object whose contents to copy. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withString withString@/link instead. + */ + virtual bool initWithString(const OSString * aString); + + +/*! + * @function initWithCString + * + * @abstract + * Initializes an OSString from a C string. + * + * @param cString The C string to copy into the new OSString. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withCString withCString@/link instead. + */ + virtual bool initWithCString(const char * cString); + + +/*! + * @function initWithCStringNoCopy + * + * @abstract + * Initializes an immutable OSString + * to share the provided C string buffer. + * + * @param cString The C string to reference. + * + * @result + * true on success, false on failure. + * + * @discussion + * Not for general use. Use the static instance creation method + * @link withCStringNoCopy withCStringNoCopy@/link instead. + * + * An OSString object initialized with this function + * does not claim ownership of the C string, + * but shares it with the caller. + * When the caller determines that the OSString object has actually been freed, + * it can safely dispose of the data buffer. + * Conversely, if it frees the shared data buffer, + * it must not attempt to use the OSString object and should release it. + * + * An OSString object created with this function does not + * allow changing the string via @link setChar setChar@/link. + */ + virtual bool initWithCStringNoCopy(const char * cString); #if XNU_KERNEL_PRIVATE - bool initWithStringOfLength(const char *cString, size_t inlength); + bool initWithStringOfLength(const char *cString, size_t inlength); #endif /* XNU_KERNEL_PRIVATE */ - /*! - * @function free - * - * @abstract - * Deallocates or releases any resources - * used by the OSString instance. - * - * @discussion - * This function should not be called directly; - * use - * @link - * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() - * release@/link - * instead. - */ - virtual void free() APPLE_KEXT_OVERRIDE; - - - /*! - * @function getLength - * - * @abstract - * Returns the number of characters in the OSString object. - * - * @result - * The number of characters in the OSString object. - */ - virtual unsigned int getLength() const; - - - /*! - * @function getChar - * - * @abstract - * Returns the character at a given index in the string object. - * - * @param index The index into the string. - * - * @result - * The character at index within the string, - * or '\0' if index is past the end of the string. - */ - virtual char getChar(unsigned int index) const; - - - /*! - * @function setChar - * - * @abstract - * Replaces a character at a given index in the string object. - * - * @param aChar The character value to set. - * @param index The index into the string. - * - * @result - * true if the character was replaced, - * false if the was created "NoCopy" - * or index is past the end of the string. - */ - virtual bool setChar(char aChar, unsigned int index); - - - /*! - * @function getCStringNoCopy - * - * @abstract - * Returns a pointer to the internal C string buffer. - * - * @result - * A pointer to the internal C string buffer. - */ - virtual const char * getCStringNoCopy() const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSString objects. - * - * @param aString The OSString object being compared against the receiver. - * - * @result - * true if the two OSString objects are equivalent, - * false otherwise. - * - * @discussion - * Two OSString objects are considered equal if they have same length - * and if their byte buffers hold the same contents. - */ - virtual bool isEqualTo(const OSString * aString) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSString object with a C string. - * - * @param cString The C string to compare against the receiver. - * - * @result - * true if the OSString's characters - * are equivalent to the C string's, - * false otherwise. - */ - virtual bool isEqualTo(const char * cString) const; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSString object to an arbitrary object. - * - * @param anObject The object to be compared against the receiver. - * - * @result - * Returns true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * An OSString is considered equal to another object - * if that object is derived from OSString - * and contains the equivalent bytes of the same length. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of an OSData object and the OSString instance. - * - * @param aDataObject An OSData object. - * - * @result - * true if the two objects are equivalent, false otherwise. - * - * @discussion - * This function compares the bytes of the OSData object - * against those of the OSString, - * accounting for the possibility that an OSData - * might explicitly include a nul - * character as part of its total length. - * Thus, for example, an OSData object containing - * either the bytes <'u', 's', 'b', '\0'> - * or <'u', 's', 'b'> - * will compare as equal to the OSString containing "usb". - */ - virtual bool isEqualTo(const OSData * aDataObject) const; - - - /*! - * @function serialize - * - * @abstract - * Archives the receiver into the provided - * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. - * - * @param serializer The OSSerialize object. - * - * @result - * true if serialization succeeds, false if not. - */ - virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; - - OSMetaClassDeclareReservedUnused(OSString, 0); - OSMetaClassDeclareReservedUnused(OSString, 1); - OSMetaClassDeclareReservedUnused(OSString, 2); - OSMetaClassDeclareReservedUnused(OSString, 3); - OSMetaClassDeclareReservedUnused(OSString, 4); - OSMetaClassDeclareReservedUnused(OSString, 5); - OSMetaClassDeclareReservedUnused(OSString, 6); - OSMetaClassDeclareReservedUnused(OSString, 7); - OSMetaClassDeclareReservedUnused(OSString, 8); - OSMetaClassDeclareReservedUnused(OSString, 9); - OSMetaClassDeclareReservedUnused(OSString, 10); - OSMetaClassDeclareReservedUnused(OSString, 11); - OSMetaClassDeclareReservedUnused(OSString, 12); - OSMetaClassDeclareReservedUnused(OSString, 13); - OSMetaClassDeclareReservedUnused(OSString, 14); - OSMetaClassDeclareReservedUnused(OSString, 15); +/*! + * @function free + * + * @abstract + * Deallocates or releases any resources + * used by the OSString instance. + * + * @discussion + * This function should not be called directly; + * use + * @link + * //apple_ref/cpp/instm/OSObject/release/virtualvoid/() + * release@/link + * instead. + */ + virtual void free() APPLE_KEXT_OVERRIDE; + + +/*! + * @function getLength + * + * @abstract + * Returns the number of characters in the OSString object. + * + * @result + * The number of characters in the OSString object. + */ + virtual unsigned int getLength() const; + + +/*! + * @function getChar + * + * @abstract + * Returns the character at a given index in the string object. + * + * @param index The index into the string. + * + * @result + * The character at index within the string, + * or '\0' if index is past the end of the string. + */ + virtual char getChar(unsigned int index) const; + + +/*! + * @function setChar + * + * @abstract + * Replaces a character at a given index in the string object. + * + * @param aChar The character value to set. + * @param index The index into the string. + * + * @result + * true if the character was replaced, + * false if the was created "NoCopy" + * or index is past the end of the string. + */ + virtual bool setChar(char aChar, unsigned int index); + + +/*! + * @function getCStringNoCopy + * + * @abstract + * Returns a pointer to the internal C string buffer. + * + * @result + * A pointer to the internal C string buffer. + */ + virtual const char * getCStringNoCopy() const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSString objects. + * + * @param aString The OSString object being compared against the receiver. + * + * @result + * true if the two OSString objects are equivalent, + * false otherwise. + * + * @discussion + * Two OSString objects are considered equal if they have same length + * and if their byte buffers hold the same contents. + */ + virtual bool isEqualTo(const OSString * aString) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSString object with a C string. + * + * @param cString The C string to compare against the receiver. + * + * @result + * true if the OSString's characters + * are equivalent to the C string's, + * false otherwise. + */ + virtual bool isEqualTo(const char * cString) const; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSString object to an arbitrary object. + * + * @param anObject The object to be compared against the receiver. + * + * @result + * Returns true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * An OSString is considered equal to another object + * if that object is derived from OSString + * and contains the equivalent bytes of the same length. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of an OSData object and the OSString instance. + * + * @param aDataObject An OSData object. + * + * @result + * true if the two objects are equivalent, false otherwise. + * + * @discussion + * This function compares the bytes of the OSData object + * against those of the OSString, + * accounting for the possibility that an OSData + * might explicitly include a nul + * character as part of its total length. + * Thus, for example, an OSData object containing + * either the bytes <'u', 's', 'b', '\0'> + * or <'u', 's', 'b'> + * will compare as equal to the OSString containing "usb". + */ + virtual bool isEqualTo(const OSData * aDataObject) const; + + +/*! + * @function serialize + * + * @abstract + * Archives the receiver into the provided + * @link //apple_ref/doc/class/OSSerialize OSSerialize@/link object. + * + * @param serializer The OSSerialize object. + * + * @result + * true if serialization succeeds, false if not. + */ + virtual bool serialize(OSSerialize * serializer) const APPLE_KEXT_OVERRIDE; + + OSMetaClassDeclareReservedUnused(OSString, 0); + OSMetaClassDeclareReservedUnused(OSString, 1); + OSMetaClassDeclareReservedUnused(OSString, 2); + OSMetaClassDeclareReservedUnused(OSString, 3); + OSMetaClassDeclareReservedUnused(OSString, 4); + OSMetaClassDeclareReservedUnused(OSString, 5); + OSMetaClassDeclareReservedUnused(OSString, 6); + OSMetaClassDeclareReservedUnused(OSString, 7); + OSMetaClassDeclareReservedUnused(OSString, 8); + OSMetaClassDeclareReservedUnused(OSString, 9); + OSMetaClassDeclareReservedUnused(OSString, 10); + OSMetaClassDeclareReservedUnused(OSString, 11); + OSMetaClassDeclareReservedUnused(OSString, 12); + OSMetaClassDeclareReservedUnused(OSString, 13); + OSMetaClassDeclareReservedUnused(OSString, 14); + OSMetaClassDeclareReservedUnused(OSString, 15); }; #endif /* !_OS_OSSTRING_H */ diff --git a/libkern/libkern/c++/OSSymbol.h b/libkern/libkern/c++/OSSymbol.h index 1bb08101c..03490a026 100644 --- a/libkern/libkern/c++/OSSymbol.h +++ b/libkern/libkern/c++/OSSymbol.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* IOSymbol.h created by gvdl on Fri 1998-10-30 */ @@ -39,7 +39,7 @@ * @abstract * This header declares the OSSymbol container class. */ - + // xx-review: OSSymbol does not override setChar /*! @@ -68,7 +68,7 @@ * With very few exceptions in the I/O Kit, all Libkern-based C++ * classes, functions, and macros are unsafe * to use in a primary interrupt context. - * Consult the I/O Kit documentation related to primary interrupts + * Consult the I/O Kit documentation related to primary interrupts * for more information. * * OSSymbol provides no concurrency protection; @@ -80,307 +80,344 @@ */ class OSSymbol : public OSString { - friend class OSSymbolPool; + friend class OSSymbolPool; - OSDeclareAbstractStructors(OSSymbol) + OSDeclareAbstractStructors(OSSymbol) private: - static void initialize(); - - /*! - * @function initWithString - * - * @abstract - * Overridden to prevent creation of duplicate symbols. - * - * @param aString Unused. - * - * @result - * false. - * - * @discussion - * Overrides OSString's implementation to prevent creation - * of distinct OSSymbols with the same string value. - */ - virtual bool initWithString(const OSString * aString) APPLE_KEXT_OVERRIDE; - - - /*! - * @function initWithCString - * - * @abstract - * Overridden to prevent creation of duplicate symbols. - * - * @param cString Unused. - * - * @result - * false. - * - * @discussion - * Overrides OSString's implementation to prevent creation - * of distinct OSSymbols with the same string value. - */ - virtual bool initWithCString(const char * cString) APPLE_KEXT_OVERRIDE; - - - /*! - * @function initWithCStringNoCopy - * - * @abstract - * Overridden to prevent creation of duplicate symbols. - * - * @param cString Unused. - * - * @result - * false. - * - * @discussion - * Overrides OSString's implementation to prevent creation - * of distinct OSSymbols with the same string value. - */ - virtual bool initWithCStringNoCopy(const char *cString) APPLE_KEXT_OVERRIDE; + static void initialize(); + +/*! + * @function initWithString + * + * @abstract + * Overridden to prevent creation of duplicate symbols. + * + * @param aString Unused. + * + * @result + * false. + * + * @discussion + * Overrides OSString's implementation to prevent creation + * of distinct OSSymbols with the same string value. + */ + virtual bool initWithString(const OSString * aString) APPLE_KEXT_OVERRIDE; + + +/*! + * @function initWithCString + * + * @abstract + * Overridden to prevent creation of duplicate symbols. + * + * @param cString Unused. + * + * @result + * false. + * + * @discussion + * Overrides OSString's implementation to prevent creation + * of distinct OSSymbols with the same string value. + */ + virtual bool initWithCString(const char * cString) APPLE_KEXT_OVERRIDE; + + +/*! + * @function initWithCStringNoCopy + * + * @abstract + * Overridden to prevent creation of duplicate symbols. + * + * @param cString Unused. + * + * @result + * false. + * + * @discussion + * Overrides OSString's implementation to prevent creation + * of distinct OSSymbols with the same string value. + */ + virtual bool initWithCStringNoCopy(const char *cString) APPLE_KEXT_OVERRIDE; protected: // xx-review: should we just omit this from headerdoc? - /*! - * @function taggedRelease - * - * @abstract - * Overrides - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) - * OSObject::taggedRelease(const void *, const int)@/link - * to synchronize with the symbol pool. - * - * @param tag Used for tracking collection references. - * @param freeWhen If decrementing the reference count makes it - * >= freeWhen, the object is immediately freed. - * - * @discussion - * Because OSSymbol shares instances, the reference-counting functions - * must synchronize access to the class-internal tables - * used to track those instances. - */ - virtual void taggedRelease( - const void * tag, - const int freeWhen) const APPLE_KEXT_OVERRIDE; +/*! + * @function taggedRelease + * + * @abstract + * Overrides + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) + * OSObject::taggedRelease(const void *, const int)@/link + * to synchronize with the symbol pool. + * + * @param tag Used for tracking collection references. + * @param freeWhen If decrementing the reference count makes it + * >= freeWhen, the object is immediately freed. + * + * @discussion + * Because OSSymbol shares instances, the reference-counting functions + * must synchronize access to the class-internal tables + * used to track those instances. + */ + virtual void taggedRelease( + const void * tag, + const int freeWhen) const APPLE_KEXT_OVERRIDE; // xx-review: should we just omit this from headerdoc? - /*! - * @function free - * - * @abstract - * Overrides - * @link - * //apple_ref/cpp/instm/OSObject/free/virtualvoid/() - * OSObject::free@/link - * to synchronize with the symbol pool. - * - * @discussion - * Because OSSymbol shares instances, the reference-counting functions - * must synchronize access to the class-internal tables - * used to track those instances. - */ - virtual void free() APPLE_KEXT_OVERRIDE; +/*! + * @function free + * + * @abstract + * Overrides + * @link + * //apple_ref/cpp/instm/OSObject/free/virtualvoid/() + * OSObject::free@/link + * to synchronize with the symbol pool. + * + * @discussion + * Because OSSymbol shares instances, the reference-counting functions + * must synchronize access to the class-internal tables + * used to track those instances. + */ + virtual void free() APPLE_KEXT_OVERRIDE; public: // xx-review: should we just omit this from headerdoc? - /*! - * @function taggedRelease - * - * @abstract - * Overrides - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*) - * OSObject::taggedRelease(const void *)@/link - * to synchronize with the symbol pool. - * - * @param tag Used for tracking collection references. - * - * @discussion - * Because OSSymbol shares instances, the reference-counting functions - * must synchronize access to the class-internal tables - * used to track those instances. - */ - - /* Original note (not for headerdoc): - * The C++ language has forced me to override this method - * even though I have implemented it as - * { super::taggedRelease(tag) }. - * It seems that C++ is confused about the appearance of the protected - * taggedRelease with 2 parameters and refuses to only inherit one function. - * See - * @link - * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) - * OSObject::taggedRelease(const void *, const int)@/link. - */ - virtual void taggedRelease(const void * tag) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function withString - * - * @abstract - * Returns an OSSymbol created from an OSString, - * or the existing unique instance of the same value. - * - * @param aString The OSString object to look up or copy. - * - * @result - * An instance of OSSymbol - * representing the same characters as aString; - * NULL on failure. - * - * @discussion - * This function creates or returns the unique OSSymbol instance - * representing the string value of aString. - * You can compare it with other OSSymbols using the == operator. - * - * OSSymbols are reference-counted normally. - * This function either returns a - * new OSSymbol with a retain count of 1, - * or increments the retain count of the existing instance. - */ - static const OSSymbol * withString(const OSString * aString); - - - /*! - * @function withCString - * - * @abstract - * Returns an OSSymbol created from a C string, - * or the existing unique instance of the same value. - * - * @param cString The C string to look up or copy. - * - * @result - * An instance of OSSymbol representing - * the same characters as cString; - * NULL on failure. - * - * @discussion - * This function returns the unique OSSymbol instance - * representing the string value of cString. - * You can compare it with other OSSymbols using the == operator. - * - * OSSymbols are reference-counted normally. - * This function either returns a - * new OSSymbol with a retain count of 1, - * or increments the retain count of the existing instance. - */ - static const OSSymbol * withCString(const char * cString); - - - /*! - * @function withCStringNoCopy - * - * @abstract - * Returns an OSSymbol created from a C string, - * without copying that string, - * or the existing unique instance of the same value. - * - * @param cString The C string to look up or use. - * @result - * An instance of OSSymbol representing - * the same characters as cString; - * NULL. - * - * @discussion - * Avoid using this function; - * OSSymbols should own their internal string buffers. - * - * This function returns the unique OSSymbol instance - * representing the string value of cString. - * You can compare it with other OSSymbols using the == operator. - * - * OSSymbols are reference-counted normally. - * This function either returns a - * new OSSymbol with a retain count of 1, - * or increments the retain count of the existing instance. - */ - static const OSSymbol * withCStringNoCopy(const char * cString); - - - /*! - * @function isEqualTo - * - * @abstract - * Tests the equality of two OSSymbol objects. - * - * @param aSymbol The OSSymbol object being compared against the receiver. - * - * @result - * true if the two OSSymbol objects are equivalent, - * false otherwise. - * - * @discussion - * Two OSSymbol objects are considered equal if they have the same address; - * that is, this function is equivalent to the == operator. - */ - virtual bool isEqualTo(const OSSymbol * aSymbol) const; - - - /*! - * @function isEqualTo - * - * @abstract Tests the equality of an OSSymbol object with a C string. - * - * @param cString The C string to compare against the receiver. - * - * @result - * true if the OSSymbol's characters - * are equivalent to the C string's, - * false otherwise. - */ - virtual bool isEqualTo(const char * cString) const APPLE_KEXT_OVERRIDE; - - - /*! - * @function isEqualTo - * - * @abstract Tests the equality of an OSSymbol object to an arbitrary object. - * - * @param anObject The object to be compared against the receiver. - * @result Returns true if the two objects are equivalent, - * false otherwise. - * - * @discussion - * An OSSymbol is considered equal to another object - * if that object is derived from - * @link //apple_ref/doc/class/OSMetaClassBase OSString@/link - * and contains the equivalent bytes of the same length. - */ - virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; +/*! + * @function taggedRelease + * + * @abstract + * Overrides + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*) + * OSObject::taggedRelease(const void *)@/link + * to synchronize with the symbol pool. + * + * @param tag Used for tracking collection references. + * + * @discussion + * Because OSSymbol shares instances, the reference-counting functions + * must synchronize access to the class-internal tables + * used to track those instances. + */ + +/* Original note (not for headerdoc): + * The C++ language has forced me to override this method + * even though I have implemented it as + * { super::taggedRelease(tag) }. + * It seems that C++ is confused about the appearance of the protected + * taggedRelease with 2 parameters and refuses to only inherit one function. + * See + * @link + * //apple_ref/cpp/instm/OSObject/taggedRelease/virtualvoid/(constvoid*,constint) + * OSObject::taggedRelease(const void *, const int)@/link. + */ + virtual void taggedRelease(const void * tag) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function withString + * + * @abstract + * Returns an OSSymbol created from an OSString, + * or the existing unique instance of the same value. + * + * @param aString The OSString object to look up or copy. + * + * @result + * An instance of OSSymbol + * representing the same characters as aString; + * NULL on failure. + * + * @discussion + * This function creates or returns the unique OSSymbol instance + * representing the string value of aString. + * You can compare it with other OSSymbols using the == operator. + * + * OSSymbols are reference-counted normally. + * This function either returns a + * new OSSymbol with a retain count of 1, + * or increments the retain count of the existing instance. + */ + static const OSSymbol * withString(const OSString * aString); + + +/*! + * @function withCString + * + * @abstract + * Returns an OSSymbol created from a C string, + * or the existing unique instance of the same value. + * + * @param cString The C string to look up or copy. + * + * @result + * An instance of OSSymbol representing + * the same characters as cString; + * NULL on failure. + * + * @discussion + * This function returns the unique OSSymbol instance + * representing the string value of cString. + * You can compare it with other OSSymbols using the == operator. + * + * OSSymbols are reference-counted normally. + * This function either returns a + * new OSSymbol with a retain count of 1, + * or increments the retain count of the existing instance. + */ + static const OSSymbol * withCString(const char * cString); + + +/*! + * @function withCStringNoCopy + * + * @abstract + * Returns an OSSymbol created from a C string, + * without copying that string, + * or the existing unique instance of the same value. + * + * @param cString The C string to look up or use. + * @result + * An instance of OSSymbol representing + * the same characters as cString; + * NULL. + * + * @discussion + * Avoid using this function; + * OSSymbols should own their internal string buffers. + * + * This function returns the unique OSSymbol instance + * representing the string value of cString. + * You can compare it with other OSSymbols using the == operator. + * + * OSSymbols are reference-counted normally. + * This function either returns a + * new OSSymbol with a retain count of 1, + * or increments the retain count of the existing instance. + */ + static const OSSymbol * withCStringNoCopy(const char * cString); + +/*! + * @function existingSymbolForString + * + * @abstract + * Returns an existing OSSymbol for the given OSString. + * + * @param aString The OSString Object to look up. + * + * @result + * An existing instance of OSSymbol representing + * the same characters as aString; + * NULL if none is found. + * + * @discussion + * The returned OSSymbol object is returned with an incremented refcount + * that needs to be released. + */ + static const OSSymbol* existingSymbolForString(const OSString *aString); + +/*! + * @function existingSymbolForCString + * + * @abstract + * Returns an existing OSSymbol for the given C string. + * + * @param aCString The C string to look up. + * + * @result + * An existing instance of OSSymbol representing + * the same characters as aString; + * NULL if none is found. + * + * @discussion + * The returned OSSymbol object is returned with an incremented refcount + * that needs to be released. + */ + static const OSSymbol* existingSymbolForCString(const char *aCString); + +/*! + * @function isEqualTo + * + * @abstract + * Tests the equality of two OSSymbol objects. + * + * @param aSymbol The OSSymbol object being compared against the receiver. + * + * @result + * true if the two OSSymbol objects are equivalent, + * false otherwise. + * + * @discussion + * Two OSSymbol objects are considered equal if they have the same address; + * that is, this function is equivalent to the == operator. + */ + virtual bool isEqualTo(const OSSymbol * aSymbol) const; + + +/*! + * @function isEqualTo + * + * @abstract Tests the equality of an OSSymbol object with a C string. + * + * @param cString The C string to compare against the receiver. + * + * @result + * true if the OSSymbol's characters + * are equivalent to the C string's, + * false otherwise. + */ + virtual bool isEqualTo(const char * cString) const APPLE_KEXT_OVERRIDE; + + +/*! + * @function isEqualTo + * + * @abstract Tests the equality of an OSSymbol object to an arbitrary object. + * + * @param anObject The object to be compared against the receiver. + * @result Returns true if the two objects are equivalent, + * false otherwise. + * + * @discussion + * An OSSymbol is considered equal to another object + * if that object is derived from + * @link //apple_ref/doc/class/OSMetaClassBase OSString@/link + * and contains the equivalent bytes of the same length. + */ + virtual bool isEqualTo(const OSMetaClassBase * anObject) const APPLE_KEXT_OVERRIDE; #ifdef XNU_KERNEL_PRIVATE - /* OSRuntime only INTERNAL API - DO NOT USE */ - /* Not to be included in headerdoc. */ - // xx-review: this should be removed from the symbol set. - - static void checkForPageUnload( - void * startAddr, - void * endAddr); - - static unsigned int bsearch( - const void * key, - const void * array, - unsigned int arrayCount, - size_t memberSize); +/* OSRuntime only INTERNAL API - DO NOT USE */ +/* Not to be included in headerdoc. */ +// xx-review: this should be removed from the symbol set. + + static void checkForPageUnload( + void * startAddr, + void * endAddr); + + static unsigned int bsearch( + const void * key, + const void * array, + unsigned int arrayCount, + size_t memberSize); #endif /* XNU_KERNEL_PRIVATE */ - OSMetaClassDeclareReservedUnused(OSSymbol, 0); - OSMetaClassDeclareReservedUnused(OSSymbol, 1); - OSMetaClassDeclareReservedUnused(OSSymbol, 2); - OSMetaClassDeclareReservedUnused(OSSymbol, 3); - OSMetaClassDeclareReservedUnused(OSSymbol, 4); - OSMetaClassDeclareReservedUnused(OSSymbol, 5); - OSMetaClassDeclareReservedUnused(OSSymbol, 6); - OSMetaClassDeclareReservedUnused(OSSymbol, 7); + OSMetaClassDeclareReservedUnused(OSSymbol, 0); + OSMetaClassDeclareReservedUnused(OSSymbol, 1); + OSMetaClassDeclareReservedUnused(OSSymbol, 2); + OSMetaClassDeclareReservedUnused(OSSymbol, 3); + OSMetaClassDeclareReservedUnused(OSSymbol, 4); + OSMetaClassDeclareReservedUnused(OSSymbol, 5); + OSMetaClassDeclareReservedUnused(OSSymbol, 6); + OSMetaClassDeclareReservedUnused(OSSymbol, 7); }; #endif /* !_OS_OSSYMBOL_H */ diff --git a/libkern/libkern/c++/OSUnserialize.h b/libkern/libkern/c++/OSUnserialize.h index 2e6e7f3ba..a3f8fc378 100644 --- a/libkern/libkern/c++/OSUnserialize.h +++ b/libkern/libkern/c++/OSUnserialize.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* OSUnserialize.h created by rsulack on Mon 23-Nov-1998 */ @@ -42,7 +42,7 @@ class OSString; * @abstract * This header declares the OSUnserializeXML function. */ - + /*! * @function OSUnserializeXML @@ -65,8 +65,8 @@ class OSString; * Not safe to call in a primary interrupt handler. */ extern "C++" OSObject * OSUnserializeXML( - const char * buffer, - OSString ** errorString = 0); + const char * buffer, + OSString ** errorString = 0); /*! * @function OSUnserializeXML @@ -91,9 +91,9 @@ extern "C++" OSObject * OSUnserializeXML( * Not safe to call in a primary interrupt handler. */ extern "C++" OSObject * OSUnserializeXML( - const char * buffer, - size_t bufferSize, - OSString ** errorString = 0); + const char * buffer, + size_t bufferSize, + OSString ** errorString = 0); extern "C++" OSObject * OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorString); diff --git a/libkern/libkern/crc.h b/libkern/libkern/crc.h index bf7f42b61..f34eab147 100644 --- a/libkern/libkern/crc.h +++ b/libkern/libkern/crc.h @@ -34,8 +34,8 @@ __BEGIN_DECLS -uint16_t crc16(uint16_t crc, const void *bufp, size_t len); -uint32_t crc32(uint32_t crc, const void *bufp, size_t len); +uint16_t crc16(uint16_t crc, const void *bufp, size_t len); +uint32_t crc32(uint32_t crc, const void *bufp, size_t len); __END_DECLS diff --git a/libkern/libkern/crypto/aes.h b/libkern/libkern/crypto/aes.h index bcb704d20..bd8c84c63 100644 --- a/libkern/libkern/crypto/aes.h +++ b/libkern/libkern/crypto/aes.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -55,8 +55,7 @@ typedef struct{ cccbc_ctx_decl(AES_CBC_CTX_MAX_SIZE, ctx); } aes_encrypt_ctx; -typedef struct -{ +typedef struct{ aes_decrypt_ctx decrypt; aes_encrypt_ctx encrypt; } aes_ctx; @@ -82,7 +81,7 @@ aes_rval aes_encrypt(const unsigned char *in, unsigned char *out, aes_encrypt_ct #endif aes_rval aes_encrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk, - unsigned char *out_blk, aes_encrypt_ctx cx[1]); + unsigned char *out_blk, aes_encrypt_ctx cx[1]); aes_rval aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1]); @@ -94,7 +93,7 @@ aes_rval aes_decrypt(const unsigned char *in, unsigned char *out, aes_decrypt_ct #endif aes_rval aes_decrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk, - unsigned char *out_blk, aes_decrypt_ctx cx[1]); + unsigned char *out_blk, aes_decrypt_ctx cx[1]); aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx); aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx); diff --git a/libkern/libkern/crypto/aesxts.h b/libkern/libkern/crypto/aesxts.h index ad1da4310..3246e7892 100644 --- a/libkern/libkern/crypto/aesxts.h +++ b/libkern/libkern/crypto/aesxts.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,22 +54,22 @@ typedef struct { uint32_t xts_start(uint32_t cipher, // ignored - we're doing this for xts-aes only - const uint8_t *IV, // ignored - const uint8_t *key1, int keylen, - const uint8_t *key2, int tweaklen, // both keys are the same size for xts - uint32_t num_rounds, // ignored - uint32_t options, // ignored - symmetric_xts *xts); + const uint8_t *IV, // ignored + const uint8_t *key1, int keylen, + const uint8_t *key2, int tweaklen, // both keys are the same size for xts + uint32_t num_rounds, // ignored + uint32_t options, // ignored + symmetric_xts *xts); int xts_encrypt(const uint8_t *pt, unsigned long ptlen, - uint8_t *ct, - const uint8_t *tweak, // this can be considered the sector IV for this use - symmetric_xts *xts); + uint8_t *ct, + const uint8_t *tweak, // this can be considered the sector IV for this use + symmetric_xts *xts); int xts_decrypt(const uint8_t *ct, unsigned long ptlen, - uint8_t *pt, - const uint8_t *tweak, // this can be considered the sector IV for this use - symmetric_xts *xts); + uint8_t *pt, + const uint8_t *tweak, // this can be considered the sector IV for this use + symmetric_xts *xts); void xts_done(symmetric_xts *xts); diff --git a/libkern/libkern/crypto/chacha20poly1305.h b/libkern/libkern/crypto/chacha20poly1305.h index 598f59746..58620a792 100644 --- a/libkern/libkern/crypto/chacha20poly1305.h +++ b/libkern/libkern/crypto/chacha20poly1305.h @@ -33,20 +33,20 @@ extern "C" { #endif - + #include - + typedef ccchacha20poly1305_ctx chacha20poly1305_ctx; - -int chacha20poly1305_init(chacha20poly1305_ctx *ctx, const uint8_t *key); + +int chacha20poly1305_init(chacha20poly1305_ctx *ctx, const uint8_t *key); int chacha20poly1305_reset(chacha20poly1305_ctx *ctx); int chacha20poly1305_setnonce(chacha20poly1305_ctx *ctx, const uint8_t *nonce); int chacha20poly1305_incnonce(chacha20poly1305_ctx *ctx, uint8_t *nonce); -int chacha20poly1305_aad(chacha20poly1305_ctx *ctx, size_t nbytes, const void *aad); -int chacha20poly1305_encrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ptext, void *ctext); -int chacha20poly1305_finalize(chacha20poly1305_ctx *ctx, uint8_t *tag); -int chacha20poly1305_decrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ctext, void *ptext); -int chacha20poly1305_verify(chacha20poly1305_ctx *ctx, const uint8_t *tag); +int chacha20poly1305_aad(chacha20poly1305_ctx *ctx, size_t nbytes, const void *aad); +int chacha20poly1305_encrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ptext, void *ctext); +int chacha20poly1305_finalize(chacha20poly1305_ctx *ctx, uint8_t *tag); +int chacha20poly1305_decrypt(chacha20poly1305_ctx *ctx, size_t nbytes, const void *ctext, void *ptext); +int chacha20poly1305_verify(chacha20poly1305_ctx *ctx, const uint8_t *tag); #if defined(__cplusplus) } diff --git a/libkern/libkern/crypto/crypto_internal.h b/libkern/libkern/crypto/crypto_internal.h index 82c98b151..3abbe5dd2 100644 --- a/libkern/libkern/crypto/crypto_internal.h +++ b/libkern/libkern/crypto/crypto_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libkern/libkern/crypto/des.h b/libkern/libkern/crypto/des.h index 62627e6cd..e2df46dbe 100644 --- a/libkern/libkern/crypto/des.h +++ b/libkern/libkern/crypto/des.h @@ -61,13 +61,13 @@ typedef struct{ typedef des_ecb_key_schedule des_key_schedule[1]; #define des_set_key des_ecb_key_sched -#define DES_ENCRYPT 1 -#define DES_DECRYPT 0 +#define DES_ENCRYPT 1 +#define DES_DECRYPT 0 /* Single DES ECB - 1 block */ int des_ecb_key_sched(des_cblock *key, des_ecb_key_schedule *ks); -void des_ecb_encrypt(des_cblock *in, des_cblock *out, des_ecb_key_schedule *ks, int encrypt); +void des_ecb_encrypt(des_cblock * in, des_cblock *out, des_ecb_key_schedule *ks, int encrypt); /* Triple DES ECB - 1 block */ int des3_ecb_key_sched(des_cblock *key, des3_ecb_key_schedule *ks); diff --git a/libkern/libkern/crypto/md5.h b/libkern/libkern/crypto/md5.h index 57e826af8..87102b3d3 100644 --- a/libkern/libkern/crypto/md5.h +++ b/libkern/libkern/crypto/md5.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,20 +54,20 @@ */ #ifndef _CRYPTO_MD5_H_ -#define _CRYPTO_MD5_H_ +#define _CRYPTO_MD5_H_ #include #include __BEGIN_DECLS -#define MD5_DIGEST_LENGTH 16 +#define MD5_DIGEST_LENGTH 16 /* MD5 context. */ typedef struct { - u_int32_t state[4]; /* state (ABCD) */ - u_int32_t count[2]; /* number of bits, modulo 2^64 (lsb first) */ - unsigned char buffer[64]; /* input buffer */ + u_int32_t state[4]; /* state (ABCD) */ + u_int32_t count[2]; /* number of bits, modulo 2^64 (lsb first) */ + unsigned char buffer[64]; /* input buffer */ } MD5_CTX; extern void MD5Init(MD5_CTX *); diff --git a/libkern/libkern/crypto/register_crypto.h b/libkern/libkern/crypto/register_crypto.h index a29592700..d804f53ac 100644 --- a/libkern/libkern/crypto/register_crypto.h +++ b/libkern/libkern/crypto/register_crypto.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,132 +46,132 @@ extern "C" { /* digests */ typedef void (*ccdigest_init_fn_t)(const struct ccdigest_info *di, ccdigest_ctx_t ctx); typedef void (*ccdigest_update_fn_t)(const struct ccdigest_info *di, ccdigest_ctx_t ctx, - unsigned long len, const void *data); + unsigned long len, const void *data); typedef void (*ccdigest_final_fn_t)(const struct ccdigest_info *di, ccdigest_ctx_t ctx, - void *digest); + void *digest); typedef void (*ccdigest_fn_t)(const struct ccdigest_info *di, unsigned long len, - const void *data, void *digest); + const void *data, void *digest); /* hmac */ typedef void (*cchmac_init_fn_t)(const struct ccdigest_info *di, cchmac_ctx_t ctx, - unsigned long key_len, const void *key); + unsigned long key_len, const void *key); typedef void (*cchmac_update_fn_t)(const struct ccdigest_info *di, cchmac_ctx_t ctx, - unsigned long data_len, const void *data); + unsigned long data_len, const void *data); typedef void (*cchmac_final_fn_t)(const struct ccdigest_info *di, cchmac_ctx_t ctx, - unsigned char *mac); + unsigned char *mac); typedef void (*cchmac_fn_t)(const struct ccdigest_info *di, unsigned long key_len, - const void *key, unsigned long data_len, const void *data, - unsigned char *mac); + const void *key, unsigned long data_len, const void *data, + unsigned char *mac); /* gcm */ typedef int (*ccgcm_init_with_iv_fn_t)(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, - size_t key_nbytes, const void *key, - const void *iv); + size_t key_nbytes, const void *key, + const void *iv); typedef int (*ccgcm_inc_iv_fn_t)(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, void *iv); - + typedef const struct ccchacha20poly1305_fns { - const struct ccchacha20poly1305_info *(*info)(void); - int (*init)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, const uint8_t *key); - int (*reset)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx); - int (*setnonce)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, const uint8_t *nonce); - int (*incnonce)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, uint8_t *nonce); - int (*aad)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, size_t nbytes, const void *aad); - int (*encrypt)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, size_t nbytes, const void *ptext, void *ctext); - int (*finalize)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, uint8_t *tag); - int (*decrypt)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, size_t nbytes, const void *ctext, void *ptext); - int (*verify)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, const uint8_t *tag); + const struct ccchacha20poly1305_info *(*info)(void); + int (*init)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, const uint8_t *key); + int (*reset)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx); + int (*setnonce)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, const uint8_t *nonce); + int (*incnonce)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, uint8_t *nonce); + int (*aad)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, size_t nbytes, const void *aad); + int (*encrypt)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, size_t nbytes, const void *ptext, void *ctext); + int (*finalize)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, uint8_t *tag); + int (*decrypt)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, size_t nbytes, const void *ctext, void *ptext); + int (*verify)(const struct ccchacha20poly1305_info *info, ccchacha20poly1305_ctx *ctx, const uint8_t *tag); } *ccchacha20poly1305_fns_t; /* pbkdf2 */ typedef void (*ccpbkdf2_hmac_fn_t)(const struct ccdigest_info *di, - unsigned long passwordLen, const void *password, - unsigned long saltLen, const void *salt, - unsigned long iterations, - unsigned long dkLen, void *dk); + unsigned long passwordLen, const void *password, + unsigned long saltLen, const void *salt, + unsigned long iterations, + unsigned long dkLen, void *dk); /* des weak key testing */ typedef int (*ccdes_key_is_weak_fn_t)(void *key, unsigned long length); typedef void (*ccdes_key_set_odd_parity_fn_t)(void *key, unsigned long length); /* XTS padding */ -typedef void (*ccpad_xts_decrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - unsigned long nbytes, const void *in, void *out); +typedef void (*ccpad_xts_decrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx *ctx, + unsigned long nbytes, const void *in, void *out); -typedef void (*ccpad_xts_encrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - unsigned long nbytes, const void *in, void *out); +typedef void (*ccpad_xts_encrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx *ctx, + unsigned long nbytes, const void *in, void *out); /* CBC padding (such as PKCS7 or CTSx per NIST standard) */ typedef size_t (*ccpad_cts3_crypt_fn_t)(const struct ccmode_cbc *cbc, cccbc_ctx *cbc_key, - cccbc_iv *iv, size_t nbytes, const void *in, void *out); + cccbc_iv *iv, size_t nbytes, const void *in, void *out); /* rng */ typedef struct ccrng_state *(*ccrng_fn_t)(int *error); /* rsa */ typedef int (*ccrsa_make_pub_fn_t)(ccrsa_pub_ctx_t pubk, - size_t exp_nbytes, const uint8_t *exp, - size_t mod_nbytes, const uint8_t *mod); + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod); typedef int (*ccrsa_verify_pkcs1v15_fn_t)(ccrsa_pub_ctx_t key, const uint8_t *oid, - size_t digest_len, const uint8_t *digest, - size_t sig_len, const uint8_t *sig, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, bool *valid); typedef struct crypto_functions { - /* digests common functions */ - ccdigest_init_fn_t ccdigest_init_fn; - ccdigest_update_fn_t ccdigest_update_fn; - ccdigest_final_fn_t ccdigest_final_fn; - ccdigest_fn_t ccdigest_fn; - /* digest implementations */ - const struct ccdigest_info * ccmd5_di; - const struct ccdigest_info * ccsha1_di; - const struct ccdigest_info * ccsha256_di; - const struct ccdigest_info * ccsha384_di; - const struct ccdigest_info * ccsha512_di; - - /* hmac common function */ - cchmac_init_fn_t cchmac_init_fn; - cchmac_update_fn_t cchmac_update_fn; - cchmac_final_fn_t cchmac_final_fn; - cchmac_fn_t cchmac_fn; - - /* ciphers modes implementations */ - /* AES, ecb, cbc and xts */ - const struct ccmode_ecb *ccaes_ecb_encrypt; - const struct ccmode_ecb *ccaes_ecb_decrypt; - const struct ccmode_cbc *ccaes_cbc_encrypt; - const struct ccmode_cbc *ccaes_cbc_decrypt; - const struct ccmode_ctr *ccaes_ctr_crypt; - const struct ccmode_xts *ccaes_xts_encrypt; - const struct ccmode_xts *ccaes_xts_decrypt; - const struct ccmode_gcm *ccaes_gcm_encrypt; - const struct ccmode_gcm *ccaes_gcm_decrypt; - - ccgcm_init_with_iv_fn_t ccgcm_init_with_iv_fn; - ccgcm_inc_iv_fn_t ccgcm_inc_iv_fn; - - ccchacha20poly1305_fns_t ccchacha20poly1305_fns; - - /* DES, ecb and cbc */ - const struct ccmode_ecb *ccdes_ecb_encrypt; - const struct ccmode_ecb *ccdes_ecb_decrypt; - const struct ccmode_cbc *ccdes_cbc_encrypt; - const struct ccmode_cbc *ccdes_cbc_decrypt; - /* Triple DES, ecb and cbc */ - const struct ccmode_ecb *cctdes_ecb_encrypt; - const struct ccmode_ecb *cctdes_ecb_decrypt; - const struct ccmode_cbc *cctdes_cbc_encrypt; - const struct ccmode_cbc *cctdes_cbc_decrypt; - /* RC4 */ + /* digests common functions */ + ccdigest_init_fn_t ccdigest_init_fn; + ccdigest_update_fn_t ccdigest_update_fn; + ccdigest_final_fn_t ccdigest_final_fn; + ccdigest_fn_t ccdigest_fn; + /* digest implementations */ + const struct ccdigest_info * ccmd5_di; + const struct ccdigest_info * ccsha1_di; + const struct ccdigest_info * ccsha256_di; + const struct ccdigest_info * ccsha384_di; + const struct ccdigest_info * ccsha512_di; + + /* hmac common function */ + cchmac_init_fn_t cchmac_init_fn; + cchmac_update_fn_t cchmac_update_fn; + cchmac_final_fn_t cchmac_final_fn; + cchmac_fn_t cchmac_fn; + + /* ciphers modes implementations */ + /* AES, ecb, cbc and xts */ + const struct ccmode_ecb *ccaes_ecb_encrypt; + const struct ccmode_ecb *ccaes_ecb_decrypt; + const struct ccmode_cbc *ccaes_cbc_encrypt; + const struct ccmode_cbc *ccaes_cbc_decrypt; + const struct ccmode_ctr *ccaes_ctr_crypt; + const struct ccmode_xts *ccaes_xts_encrypt; + const struct ccmode_xts *ccaes_xts_decrypt; + const struct ccmode_gcm *ccaes_gcm_encrypt; + const struct ccmode_gcm *ccaes_gcm_decrypt; + + ccgcm_init_with_iv_fn_t ccgcm_init_with_iv_fn; + ccgcm_inc_iv_fn_t ccgcm_inc_iv_fn; + + ccchacha20poly1305_fns_t ccchacha20poly1305_fns; + + /* DES, ecb and cbc */ + const struct ccmode_ecb *ccdes_ecb_encrypt; + const struct ccmode_ecb *ccdes_ecb_decrypt; + const struct ccmode_cbc *ccdes_cbc_encrypt; + const struct ccmode_cbc *ccdes_cbc_decrypt; + /* Triple DES, ecb and cbc */ + const struct ccmode_ecb *cctdes_ecb_encrypt; + const struct ccmode_ecb *cctdes_ecb_decrypt; + const struct ccmode_cbc *cctdes_cbc_encrypt; + const struct ccmode_cbc *cctdes_cbc_decrypt; + /* RC4 */ const struct ccrc4_info *ccrc4_info; /* Blowfish - ECB only */ - const struct ccmode_ecb *ccblowfish_ecb_encrypt; - const struct ccmode_ecb *ccblowfish_ecb_decrypt; + const struct ccmode_ecb *ccblowfish_ecb_encrypt; + const struct ccmode_ecb *ccblowfish_ecb_decrypt; /* CAST - ECB only */ - const struct ccmode_ecb *cccast_ecb_encrypt; - const struct ccmode_ecb *cccast_ecb_decrypt; + const struct ccmode_ecb *cccast_ecb_encrypt; + const struct ccmode_ecb *cccast_ecb_decrypt; /* DES key helper functions */ ccdes_key_is_weak_fn_t ccdes_key_is_weak_fn; ccdes_key_set_odd_parity_fn_t ccdes_key_set_odd_parity_fn; @@ -180,14 +180,14 @@ typedef struct crypto_functions { ccpad_xts_decrypt_fn_t ccpad_xts_decrypt_fn; /* CTS3 padding+encrypt functions */ ccpad_cts3_crypt_fn_t ccpad_cts3_encrypt_fn; - ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn; + ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn; - /* rng */ - ccrng_fn_t ccrng_fn; + /* rng */ + ccrng_fn_t ccrng_fn; - /* rsa */ - ccrsa_make_pub_fn_t ccrsa_make_pub_fn; - ccrsa_verify_pkcs1v15_fn_t ccrsa_verify_pkcs1v15_fn; + /* rsa */ + ccrsa_make_pub_fn_t ccrsa_make_pub_fn; + ccrsa_verify_pkcs1v15_fn_t ccrsa_verify_pkcs1v15_fn; } *crypto_functions_t; int register_crypto_functions(const crypto_functions_t funcs); diff --git a/libkern/libkern/crypto/rsa.h b/libkern/libkern/crypto/rsa.h index 2084dfdea..d8a6a541f 100644 --- a/libkern/libkern/crypto/rsa.h +++ b/libkern/libkern/crypto/rsa.h @@ -38,17 +38,17 @@ extern "C" #define RSA_MAX_KEY_BITSIZE 4096 typedef struct{ - ccrsa_pub_ctx_decl(ccn_sizeof(RSA_MAX_KEY_BITSIZE),key); + ccrsa_pub_ctx_decl(ccn_sizeof(RSA_MAX_KEY_BITSIZE), key); } rsa_pub_ctx; int rsa_make_pub(rsa_pub_ctx *pub, - size_t exp_nbytes, const uint8_t *exp, - size_t mod_nbytes, const uint8_t *mod); + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod); int rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid, - size_t digest_len, const uint8_t *digest, - size_t sig_len, const uint8_t *sig, - bool *valid); + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + bool *valid); #if defined(__cplusplus) } diff --git a/libkern/libkern/crypto/sha1.h b/libkern/libkern/crypto/sha1.h index ad6b798f9..58f2265b2 100644 --- a/libkern/libkern/crypto/sha1.h +++ b/libkern/libkern/crypto/sha1.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _CRYPTO_SHA1_H_ -#define _CRYPTO_SHA1_H_ +#define _CRYPTO_SHA1_H_ #ifdef __cplusplus extern "C" { #endif -#define SHA_DIGEST_LENGTH 20 -#define SHA1_RESULTLEN SHA_DIGEST_LENGTH +#define SHA_DIGEST_LENGTH 20 +#define SHA1_RESULTLEN SHA_DIGEST_LENGTH typedef struct sha1_ctxt { union { - u_int8_t b8[20]; - u_int32_t b32[5]; /* state (ABCDE) */ + u_int8_t b8[20]; + u_int32_t b32[5]; /* state (ABCDE) */ } h; union { - u_int8_t b8[8]; - u_int32_t b32[2]; - u_int64_t b64[1]; /* # of bits, modulo 2^64 (msb first) */ + u_int8_t b8[8]; + u_int32_t b32[2]; + u_int64_t b64[1]; /* # of bits, modulo 2^64 (msb first) */ } c; union { - u_int8_t b8[64]; - u_int32_t b32[16]; /* input buffer */ + u_int8_t b8[64]; + u_int32_t b32[16]; /* input buffer */ } m; - u_int8_t count; /* unused; for compatibility only */ + u_int8_t count; /* unused; for compatibility only */ } SHA1_CTX; /* For compatibility with the other SHA-1 implementation. */ -#define sha1_init(c) SHA1Init(c) -#define sha1_loop(c, b, l) SHA1Update(c, b, l) -#define sha1_result(c, b) SHA1Final(b, c) +#define sha1_init(c) SHA1Init(c) +#define sha1_loop(c, b, l) SHA1Update(c, b, l) +#define sha1_result(c, b) SHA1Final(b, c) extern void SHA1Init(SHA1_CTX *); extern void SHA1Update(SHA1_CTX *, const void *, size_t); diff --git a/libkern/libkern/crypto/sha2.h b/libkern/libkern/crypto/sha2.h index 8fe2a54dc..2e93f35d7 100644 --- a/libkern/libkern/crypto/sha2.h +++ b/libkern/libkern/crypto/sha2.h @@ -2,7 +2,7 @@ * copyright (c) 2012 apple computer, inc. all rights reserved. * * @apple_osreference_license_header_start@ - * + * * this file contains original code and/or modifications of original code * as defined in and that are subject to the apple public source license * version 2.0 (the 'license'). you may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an apple operating system software license agreement. - * + * * please obtain a copy of the license at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * the original code and all software distributed under the license are * distributed on an 'as is' basis, without warranty of any kind, either * express or implied, and apple hereby disclaims all such warranties, @@ -22,7 +22,7 @@ * fitness for a particular purpose, quiet enjoyment or non-infringement. * please see the license for the specific language governing rights and * limitations under the license. - * + * * @apple_osreference_license_header_end@ */ @@ -36,15 +36,15 @@ extern "C" { #include /*** SHA-256/384/512 Various Length Definitions ***********************/ -#define SHA256_BLOCK_LENGTH CCSHA256_BLOCK_SIZE -#define SHA256_DIGEST_LENGTH CCSHA256_OUTPUT_SIZE -#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1) -#define SHA384_BLOCK_LENGTH CCSHA512_BLOCK_SIZE -#define SHA384_DIGEST_LENGTH CCSHA384_OUTPUT_SIZE -#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1) -#define SHA512_BLOCK_LENGTH CCSHA512_BLOCK_SIZE -#define SHA512_DIGEST_LENGTH CCSHA512_OUTPUT_SIZE -#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1) +#define SHA256_BLOCK_LENGTH CCSHA256_BLOCK_SIZE +#define SHA256_DIGEST_LENGTH CCSHA256_OUTPUT_SIZE +#define SHA256_DIGEST_STRING_LENGTH (SHA256_DIGEST_LENGTH * 2 + 1) +#define SHA384_BLOCK_LENGTH CCSHA512_BLOCK_SIZE +#define SHA384_DIGEST_LENGTH CCSHA384_OUTPUT_SIZE +#define SHA384_DIGEST_STRING_LENGTH (SHA384_DIGEST_LENGTH * 2 + 1) +#define SHA512_BLOCK_LENGTH CCSHA512_BLOCK_SIZE +#define SHA512_DIGEST_LENGTH CCSHA512_OUTPUT_SIZE +#define SHA512_DIGEST_STRING_LENGTH (SHA512_DIGEST_LENGTH * 2 + 1) typedef struct { ccdigest_ctx_decl(CCSHA256_STATE_SIZE, CCSHA256_BLOCK_SIZE, ctx); @@ -70,7 +70,7 @@ void SHA512_Init(SHA512_CTX *ctx); void SHA512_Update(SHA512_CTX *ctx, const void *data, size_t len); void SHA512_Final(void *digest, SHA512_CTX *ctx); -#ifdef __cplusplus +#ifdef __cplusplus } #endif /* __cplusplus */ diff --git a/libkern/libkern/i386/OSByteOrder.h b/libkern/libkern/i386/OSByteOrder.h index f38e0652e..84c632bb5 100644 --- a/libkern/libkern/i386/OSByteOrder.h +++ b/libkern/libkern/i386/OSByteOrder.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,40 +38,40 @@ OS_INLINE uint16_t OSReadSwapInt16( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - uint16_t result; + uint16_t result; - result = *(volatile uint16_t *)((uintptr_t)base + byteOffset); - return _OSSwapInt16(result); + result = *(volatile uint16_t *)((uintptr_t)base + byteOffset); + return _OSSwapInt16(result); } OS_INLINE uint32_t OSReadSwapInt32( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - uint32_t result; + uint32_t result; - result = *(volatile uint32_t *)((uintptr_t)base + byteOffset); - return _OSSwapInt32(result); + result = *(volatile uint32_t *)((uintptr_t)base + byteOffset); + return _OSSwapInt32(result); } OS_INLINE uint64_t OSReadSwapInt64( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - uint64_t result; + uint64_t result; - result = *(volatile uint64_t *)((uintptr_t)base + byteOffset); - return _OSSwapInt64(result); + result = *(volatile uint64_t *)((uintptr_t)base + byteOffset); + return _OSSwapInt64(result); } /* Functions for byte reversed stores. */ @@ -79,34 +79,34 @@ OSReadSwapInt64( OS_INLINE void OSWriteSwapInt16( - volatile void * base, - uintptr_t byteOffset, - uint16_t data -) + volatile void * base, + uintptr_t byteOffset, + uint16_t data + ) { - *(volatile uint16_t *)((uintptr_t)base + byteOffset) = _OSSwapInt16(data); + *(volatile uint16_t *)((uintptr_t)base + byteOffset) = _OSSwapInt16(data); } OS_INLINE void OSWriteSwapInt32( - volatile void * base, - uintptr_t byteOffset, - uint32_t data -) + volatile void * base, + uintptr_t byteOffset, + uint32_t data + ) { - *(volatile uint32_t *)((uintptr_t)base + byteOffset) = _OSSwapInt32(data); + *(volatile uint32_t *)((uintptr_t)base + byteOffset) = _OSSwapInt32(data); } OS_INLINE void OSWriteSwapInt64( - volatile void * base, - uintptr_t byteOffset, - uint64_t data -) + volatile void * base, + uintptr_t byteOffset, + uint64_t data + ) { - *(volatile uint64_t *)((uintptr_t)base + byteOffset) = _OSSwapInt64(data); + *(volatile uint64_t *)((uintptr_t)base + byteOffset) = _OSSwapInt64(data); } #endif /* ! _OS_OSBYTEORDERI386_H */ diff --git a/libkern/libkern/i386/_OSByteOrder.h b/libkern/libkern/i386/_OSByteOrder.h index e60b4fd97..e95c3975d 100644 --- a/libkern/libkern/i386/_OSByteOrder.h +++ b/libkern/libkern/i386/_OSByteOrder.h @@ -2,7 +2,7 @@ * Copyright (c) 2006-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,23 +44,23 @@ __DARWIN_OS_INLINE __uint16_t _OSSwapInt16( - __uint16_t _data -) + __uint16_t _data + ) { - return ((__uint16_t)((_data << 8) | (_data >> 8))); + return (__uint16_t)((_data << 8) | (_data >> 8)); } __DARWIN_OS_INLINE __uint32_t _OSSwapInt32( - __uint32_t _data -) + __uint32_t _data + ) { #if defined(__llvm__) - return __builtin_bswap32(_data); + return __builtin_bswap32(_data); #else - __asm__ ("bswap %0" : "+r" (_data)); - return _data; + __asm__ ("bswap %0" : "+r" (_data)); + return _data; #endif } @@ -68,34 +68,34 @@ _OSSwapInt32( __DARWIN_OS_INLINE __uint64_t _OSSwapInt64( - __uint64_t _data -) + __uint64_t _data + ) { - return __builtin_bswap64(_data); + return __builtin_bswap64(_data); } #elif defined(__i386__) __DARWIN_OS_INLINE __uint64_t _OSSwapInt64( - __uint64_t _data -) + __uint64_t _data + ) { - __asm__ ("bswap %%eax\n\t" - "bswap %%edx\n\t" - "xchgl %%eax, %%edx" - : "+A" (_data)); - return _data; + __asm__ ("bswap %%eax\n\t" + "bswap %%edx\n\t" + "xchgl %%eax, %%edx" + : "+A" (_data)); + return _data; } #elif defined(__x86_64__) __DARWIN_OS_INLINE __uint64_t _OSSwapInt64( - __uint64_t _data -) + __uint64_t _data + ) { - __asm__ ("bswap %0" : "+r" (_data)); - return _data; + __asm__ ("bswap %0" : "+r" (_data)); + return _data; } #else #error Unknown architecture diff --git a/libkern/libkern/img4/interface.h b/libkern/libkern/img4/interface.h index f88d89b61..01d749e4f 100644 --- a/libkern/libkern/img4/interface.h +++ b/libkern/libkern/img4/interface.h @@ -37,11 +37,24 @@ #include #include +#if MACH_KERNEL_PRIVATE +#define _SYS_TYPES_H_ 1 +#include +#include +#else +#include +#include +#endif + /* * We rely on img4.h's logic for either including sys/types.h or declaring - * errno_t ourselves. + * errno_t ourselves. So when building the kernel, include img4.h from our + * external headers. Avoid this inclusion if we're building AppleImage4, which + * will have included its own internal version of the header. */ +#if MACH_KERNEL_PRIVATE || !_DARWIN_BUILDING_PROJECT_APPLEIMAGE4 #include +#endif /*! * @const IMG4_INTERFACE_VERSION @@ -50,130 +63,210 @@ * it can be tested at build-time and not require rev-locked submissions of xnu * and AppleImage4. */ -#define IMG4_INTERFACE_VERSION (1u) +#define IMG4_INTERFACE_VERSION (2u) /*! * @typedef img4_init_t - * A type describing the img4_init() function. + * A type describing a pointer to the {@link img4_init} function. */ -typedef errno_t (*img4_init_t)( +typedef errno_t (*const img4_init_t)( img4_t *i4, img4_flags_t flags, const uint8_t *bytes, size_t len, img4_destructor_t destructor -); - -/*! - * @typedef img4_init_t - * A type describing the img4_set_custom_tag_handler() function. - */ -typedef void (*img4_set_custom_tag_handler_t)( - img4_t *i4, - const img4_custom_tag_t *tags, - size_t tags_cnt -); + ); /*! - * @typedef img4_init_t - * A type describing the img4_get_trusted_payload() function. + * @typedef img4_get_trusted_payload_t + * A type describing a pointer to the {@link img4_get_trusted_payload} function. */ -typedef errno_t (*img4_get_trusted_payload_t)( +typedef errno_t (*const img4_get_trusted_payload_t)( img4_t *i4, img4_tag_t tag, const img4_environment_t *env, - void *ctx, const uint8_t **bytes, size_t *len -); + ); /*! - * @typedef img4_init_t - * A type describing the img4_get_trusted_external_payload() function. + * @typedef img4_get_trusted_external_payload_t + * A type describing a pointer to the {@link img4_get_trusted_external_payload} + * function. */ -typedef errno_t (*img4_get_trusted_external_payload_t)( +typedef errno_t (*const img4_get_trusted_external_payload_t)( img4_t *img4, img4_payload_t *payload, const img4_environment_t *env, - void *ctx, const uint8_t **bytes, size_t *len -); + ); /*! - * @typedef img4_init_t - * A type describing the img4_get_entitlement_bool() function. + * @typedef img4_set_nonce_t + * A type describing a pointer to the {@link img4_set_nonce} function. */ -typedef bool (*img4_get_entitlement_bool_t)( - img4_t *i4, - img4_tag_t entitlement -); +typedef void (*const img4_set_nonce_t)(img4_t *i4, + const void *bytes, + size_t len + ); /*! - * @typedef img4_init_t - * A type describing the img4_get_object_entitlement_bool() function. + * @typedef img4_destroy_t + * A type describing the {@link img4_destroy} function. + */ +typedef void (*const img4_destroy_t)( + img4_t *i4 + ); + +/*! + * @typedef img4_payload_init_t + * A type describing the {@link img4_payload_init} function. + */ +typedef errno_t (*const img4_payload_init_t)( + img4_payload_t *i4p, + img4_tag_t tag, + img4_payload_flags_t flags, + const uint8_t *bytes, + size_t len, + img4_destructor_t destructor + ); + +/*! + * @typedef img4_payload_destroy_t + * A type describing the {@link img4_payload_destroy} function. */ -typedef bool (*img4_get_object_entitlement_bool_t)( +typedef void (*const img4_payload_destroy_t)( + img4_payload_t *i4 + ); + +/*! + * @typedef img4_payload_destroy_t + * A type describing the {@link img4_set_nonce_domain} function. + */ +typedef void (*const img4_set_nonce_domain_t)( img4_t *i4, - img4_tag_t object, - img4_tag_t entitlement -); + const img4_nonce_domain_t *nd + ); /*! - * @typedef img4_init_t - * A type describing the img4_destroy() function. + * @typedef img4_nonce_domain_copy_nonce_t + * A type describing the {@link img4_nonce_domain_copy_nonce} function. */ -typedef void (*img4_destroy_t)( - img4_t *i4 -); +typedef errno_t (*const img4_nonce_domain_copy_nonce_t)( + const img4_nonce_domain_t *nd, + img4_nonce_t *n + ); + +/*! + * @typedef img4_nonce_domain_roll_nonce_t + * A type describing the {@link img4_nonce_domain_roll_nonce} function. + */ +typedef errno_t (*const img4_nonce_domain_roll_nonce_t)( + const img4_nonce_domain_t *nd + ); + +/*! + * @typedef img4_payload_init_with_vnode_4xnu_t + * A type describing the {@link img4_payload_init_with_vnode_4xnu} function. + */ +typedef errno_t (*img4_payload_init_with_vnode_4xnu_t)( + img4_payload_t *i4p, + img4_tag_t tag, + vnode_t vn, + img4_payload_flags_t flags + ); /*! * @typedef img4_interface_t * A structure describing the interface to the AppleImage4 kext. * - * @property i4if_version + * @field i4if_version * The version of the structure supported by the implementation. * - * @property i4if_init - * A pointer to the img4_init function. + * @field i4if_init + * A pointer to the {@link img4_init} function. + * + * @field i4if_get_trusted_payload + * A pointer to the {@link img4_get_trusted_payload} function. + * + * @field i4if_get_trusted_external_payload + * A pointer to the {@link img4_get_trusted_external_payload} function. * - * @property i4if_set_custom_tag_handler - * A pointer to the img4_set_custom_tag_handler function. + * @field i4if_destroy + * A pointer to the {@link img4_destroy} function. * - * @property i4if_get_trusted_payload - * A pointer to the img4_get_trusted_payload function. + * @field i4if_payload_init + * A pointer to the {@link img4_payload_init} function. * - * @property i4if_get_trusted_external_payload - * A pointer to the img4_get_trusted_external_payload function. + * @field i4if_destroy + * A pointer to the {@link img4_payload_destroy} function. * - * @property i4if_get_entitlement_bool - * A pointer to the img4_get_entitlement_bool function. + * @field i4if_environment_platform + * The {@link IMG4_ENVIRONMENT_PLATFORM} global. * - * @property i4if_get_object_entitlement_bool - * A pointer to the img4_get_object_entitlement_bool function. + * @field i4if_environment_reserved + * Reserved for use by the implementation. * - * @property i4if_destroy - * A pointer to the img4_destroy function. + * @field i4if_environment_trust_cache + * The {@link IMG4_ENVIRONMENT_TRUST_CACHE} global. * - * @property i4if_v1 - * All members added in version 1 of the structure. + * @field i4if_v1 + * All fields added in version 1 of the structure. * - * @property environment_platform - * The IMG4_ENVIRONMENT_PLATFORM global. + * @field i4if_v1.set_nonce_domain + * A pointer to the @{link img4_set_nonce_domain} function. + * + * @field i4if_v1.nonce_domain_copy_nonce + * A pointer to the {@link img4_nonce_domain_copy_nonce} function. + * + * @field i4if_v1.nonce_domain_roll_nonce + * A pointer to the {@link img4_nonce_domain_roll_nonce} function. + * + * @field i4if_v1.nonce_domain_trust_cache + * The {@link IMG4_NONCE_DOMAIN_TRUST_CACHE} global. + * + * @field i4if_v2 + * All fields added in version 2 of the structure. + * + * @field i4if_v2.payload_init_with_vnode_4xnu + * A pointer to the {@link img4_payload_init_with_vnode_4xnu} function. + * + * @field i4if_v3 + * All fields added in version 3 of the structure. + * + * @field i4if_v3.nonce_domain_pdi + * The {@link IMG4_NONCE_DOMAIN_PDI} global. + * + * @field i4if_v3.nonce_domain_cryptex + * The {@link IMG4_NONCE_DOMAIN_CRYPTEX} global. + * + * @field i4if_v4.environment_init_identity + * A pointer to the {@link img4_environment_init_identity} function. */ + typedef struct _img4_interface { const uint32_t i4if_version; - const img4_init_t i4if_init; - const img4_set_custom_tag_handler_t i4if_set_custom_tag_handler; - const img4_get_trusted_payload_t i4if_get_trusted_payload; - const img4_get_trusted_external_payload_t i4if_get_trusted_external_payload; - const img4_get_entitlement_bool_t i4if_get_entitlement_bool; - const img4_get_object_entitlement_bool_t i4if_get_object_entitlement_bool; - const img4_destroy_t i4if_destroy; + img4_init_t i4if_init; + img4_set_nonce_t i4if_set_nonce; + img4_get_trusted_payload_t i4if_get_trusted_payload; + img4_get_trusted_external_payload_t i4if_get_trusted_external_payload; + img4_destroy_t i4if_destroy; + img4_payload_init_t i4if_payload_init; + img4_payload_destroy_t i4if_payload_destroy; + const img4_environment_t *i4if_environment_platform; + const img4_environment_t *i4if_environment_reserved; + const img4_environment_t *i4if_environment_trust_cache; struct { - const img4_environment_t *environment_platform; + img4_set_nonce_domain_t set_nonce_domain; + img4_nonce_domain_copy_nonce_t nonce_domain_copy_nonce; + img4_nonce_domain_roll_nonce_t nonce_domain_roll_nonce; + const img4_nonce_domain_t *nonce_domain_trust_cache; } i4if_v1; - void *__reserved[23]; + struct { + img4_payload_init_with_vnode_4xnu_t payload_init_with_vnode_4xnu; + } i4if_v2; + void *__reserved[17]; } img4_interface_t; __BEGIN_DECLS; @@ -186,7 +279,7 @@ extern const img4_interface_t *img4if; /*! * @function img4_interface_register - * Registers the AppleImage4 kext interface with xnu. + * Registers the AppleImage4 kext interface for use within the kernel proper. * * @param i4 * The interface to register. diff --git a/libkern/libkern/kernel_mach_header.h b/libkern/libkern/kernel_mach_header.h index bf5b0f911..ba418a09c 100644 --- a/libkern/libkern/kernel_mach_header.h +++ b/libkern/libkern/kernel_mach_header.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,8 +36,8 @@ * */ -#ifndef _KERNEL_MACH_HEADER_ -#define _KERNEL_MACH_HEADER_ +#ifndef _KERNEL_MACH_HEADER_ +#define _KERNEL_MACH_HEADER_ #ifdef __cplusplus extern "C" { @@ -48,15 +48,15 @@ extern "C" { #include #include -#if !KERNEL +#if !KERNEL #error this header for kernel use only #endif #if defined(__LP64__) -typedef struct mach_header_64 kernel_mach_header_t; +typedef struct mach_header_64 kernel_mach_header_t; typedef struct segment_command_64 kernel_segment_command_t; -typedef struct section_64 kernel_section_t; +typedef struct section_64 kernel_section_t; typedef struct nlist_64 kernel_nlist_t; #define MH_MAGIC_KERNEL MH_MAGIC_64 @@ -64,20 +64,20 @@ typedef struct nlist_64 kernel_nlist_t; #else -typedef struct mach_header kernel_mach_header_t; -typedef struct segment_command kernel_segment_command_t; -typedef struct section kernel_section_t; +typedef struct mach_header kernel_mach_header_t; +typedef struct segment_command kernel_segment_command_t; +typedef struct section kernel_section_t; typedef struct nlist kernel_nlist_t; #define MH_MAGIC_KERNEL MH_MAGIC -#define LC_SEGMENT_KERNEL LC_SEGMENT -#define SECT_CONSTRUCTOR "__constructor" -#define SECT_DESTRUCTOR "__destructor" +#define LC_SEGMENT_KERNEL LC_SEGMENT +#define SECT_CONSTRUCTOR "__constructor" +#define SECT_DESTRUCTOR "__destructor" #endif -#define SECT_MODINITFUNC "__mod_init_func" -#define SECT_MODTERMFUNC "__mod_term_func" +#define SECT_MODINITFUNC "__mod_init_func" +#define SECT_MODTERMFUNC "__mod_term_func" extern kernel_mach_header_t _mh_execute_header; @@ -86,22 +86,22 @@ vm_offset_t getlastaddr(void); kernel_segment_command_t *firstseg(void); kernel_segment_command_t *firstsegfromheader(kernel_mach_header_t *header); kernel_segment_command_t *nextsegfromheader( - kernel_mach_header_t *header, - kernel_segment_command_t *seg); + kernel_mach_header_t *header, + kernel_segment_command_t *seg); kernel_segment_command_t *getsegbyname(const char *seg_name); kernel_segment_command_t *getsegbynamefromheader( - kernel_mach_header_t *header, - const char *seg_name); + kernel_mach_header_t *header, + const char *seg_name); void *getsegdatafromheader(kernel_mach_header_t *, const char *, unsigned long *); kernel_section_t *getsectbyname(const char *seg_name, const char *sect_name); kernel_section_t *getsectbynamefromheader( - kernel_mach_header_t *header, - const char *seg_name, - const char *sect_name); + kernel_mach_header_t *header, + const char *seg_name, + const char *sect_name); uint32_t getsectoffsetfromheader( - kernel_mach_header_t *mhp, - const char *segname, - const char *sectname); + kernel_mach_header_t *mhp, + const char *segname, + const char *sectname); void *getsectdatafromheader(kernel_mach_header_t *, const char *, const char *, unsigned long *); kernel_section_t *firstsect(kernel_segment_command_t *sgp); kernel_section_t *nextsect(kernel_segment_command_t *sgp, kernel_section_t *sp); @@ -112,4 +112,4 @@ void *getuuidfromheader(kernel_mach_header_t *, unsigned long *); } #endif -#endif /* _KERNEL_MACH_HEADER_ */ +#endif /* _KERNEL_MACH_HEADER_ */ diff --git a/libkern/libkern/kext_panic_report.h b/libkern/libkern/kext_panic_report.h index 5aa4cdea6..73f2985fe 100644 --- a/libkern/libkern/kext_panic_report.h +++ b/libkern/libkern/kext_panic_report.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KEXT_PANIC_REPORT_H_ -#define _KEXT_PANIC_REPORT_H_ +#ifndef _KEXT_PANIC_REPORT_H_ +#define _KEXT_PANIC_REPORT_H_ #include @@ -37,8 +37,8 @@ __BEGIN_DECLS *******************************************************************************/ typedef struct subs_entry_t { - const char * substring; - char substitute; + const char * substring; + char substitute; } subs_entry_t; /* Prefix substitution list. Common prefixes are replaced with a single @@ -48,30 +48,30 @@ typedef struct subs_entry_t { * be in descending frequency order. */ subs_entry_t kext_identifier_prefix_subs[] = { - { "com.apple.driver.", '>' }, - { "com.apple.iokit.", '|' }, - { "com.apple.security.", '$' }, - { "com.apple.", '@' }, + { "com.apple.driver.", '>' }, + { "com.apple.iokit.", '|' }, + { "com.apple.security.", '$' }, + { "com.apple.", '@' }, - { (char *)0, '\0' } + { (char *)0, '\0' } }; /* Substring substitution list. Substrings are replaced with a '!' followed * by a single letter mapping to the original string. - * + * * List should be in descending frequency order, and within * groups containing same prefix, in descending length order. */ subs_entry_t kext_identifier_substring_subs[] = { - { "AppleUSB", 'U' }, - { "Apple", 'A' }, - { "Family", 'F' }, - { "Storage", 'S' }, - { "Controller", 'C' }, - { "Bluetooth", 'B' }, - { "Intel", 'I' }, + { "AppleUSB", 'U' }, + { "Apple", 'A' }, + { "Family", 'F' }, + { "Storage", 'S' }, + { "Controller", 'C' }, + { "Bluetooth", 'B' }, + { "Intel", 'I' }, - { (char *)0, '\0' } + { (char *)0, '\0' } }; __END_DECLS diff --git a/libkern/libkern/kext_request_keys.h b/libkern/libkern/kext_request_keys.h index a04611bc5..0cd79f5ff 100644 --- a/libkern/libkern/kext_request_keys.h +++ b/libkern/libkern/kext_request_keys.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -77,7 +77,7 @@ extern "C" { * These requests do not require a privileged host port, as they just * return information about loaded kexts. **********/ - + /* Predicate: Get Loaded Kext Info * Argument: (None) * Response: An array of information about loaded kexts (see OSKextLib.h). @@ -128,7 +128,7 @@ extern "C" { * must have access to a privileged host port or these requests result * in an op_result of kOSKextReturnNotPrivileged. **********/ - + /* Predicate: Get Kernel Requests * Argument: (None) * Response: An array of kernel requests (see below). @@ -206,7 +206,7 @@ extern "C" { * These requests come from within the kernel, and kextd retrieves * them using kKextRequestPredicateGetKernelRequests. **********/ - + /* Predicate: Kext Load Request * Argument: kKextRequestArgumentBundleIdentifierKey * Response: Asynchronous via a kKextRequestPredicateLoad from kextd @@ -380,7 +380,7 @@ extern "C" { * load behavior, but the OSKext user-level library makes them all * available in OSKextLoadWithOptions(). **********/ - + /* Argument: StartExclude * Type: Integer, corresponding to OSKextExcludeLevel * Default: kOSKextExcludeNone if not specified diff --git a/libkern/libkern/kxld.h b/libkern/libkern/kxld.h index 5b7d74bfb..588ddf0af 100644 --- a/libkern/libkern/kxld.h +++ b/libkern/libkern/kxld.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_H @@ -53,72 +53,71 @@ * pagesize The target page size (0 for host page size) *******************************************************************************/ kern_return_t kxld_create_context( - KXLDContext **context, - KXLDAllocateCallback allocate_callback, - KXLDLoggingCallback log_callback, - KXLDFlags flags, - cpu_type_t cputype, - cpu_subtype_t cpusubtype, - vm_size_t pagesize) -__attribute__((nonnull(1),visibility("default"))); + KXLDContext **context, + KXLDAllocateCallback allocate_callback, + KXLDLoggingCallback log_callback, + KXLDFlags flags, + cpu_type_t cputype, + cpu_subtype_t cpusubtype, + vm_size_t pagesize) +__attribute__((nonnull(1), visibility("default"))); /******************************************************************************* * Destroys a link context and frees all associated memory. Should be called at * the end of a link thread's life. *******************************************************************************/ void kxld_destroy_context( - KXLDContext *context) - __attribute__((nonnull,visibility("default"))); + KXLDContext *context) +__attribute__((nonnull, visibility("default"))); /******************************************************************************* -* Links a kext against its dependencies, using a callback to allocate the memory -* at which it will be located. -* NOTE: The object data itself must be mmapped with PROT_WRITE and MAP_PRIVATE -* context The KXLDContext object for the current link thread. -* file The kext object file read into memory. -* Supported formats: Mach-O, Mach-O64, Fat. -* size The size of the kext in memory. Must be nonzero. -* name The name, usually the bundle identifier, of the kext -* callback_data Data that is to be passed to the callback functions. -* dependencies An array of pointers to the kexts upon which this kext -* is dependent. -* num_dependencies Number of entries in the 'dependencies' array. -* linked_object This will be set to the address of the linked kext -* object. If the address provided by the -* kxld_alloc_callback is considered writable, this -* pointer will be set to that address. Otherwise, the -* linked object will be written to a temporary buffer -* that should be freed by the caller. -* kmod_info_kern Kernel address of the kmod_info_t structure. -******************************************************************************/ + * Links a kext against its dependencies, using a callback to allocate the memory + * at which it will be located. + * NOTE: The object data itself must be mmapped with PROT_WRITE and MAP_PRIVATE + * context The KXLDContext object for the current link thread. + * file The kext object file read into memory. + * Supported formats: Mach-O, Mach-O64, Fat. + * size The size of the kext in memory. Must be nonzero. + * name The name, usually the bundle identifier, of the kext + * callback_data Data that is to be passed to the callback functions. + * dependencies An array of pointers to the kexts upon which this kext + * is dependent. + * num_dependencies Number of entries in the 'dependencies' array. + * linked_object This will be set to the address of the linked kext + * object. If the address provided by the + * kxld_alloc_callback is considered writable, this + * pointer will be set to that address. Otherwise, the + * linked object will be written to a temporary buffer + * that should be freed by the caller. + * kmod_info_kern Kernel address of the kmod_info_t structure. + ******************************************************************************/ kern_return_t kxld_link_file( - KXLDContext *context, - u_char *file, - u_long size, - const char *name, - void *callback_data, - KXLDDependency *dependencies, - u_int num_dependencies, - u_char **linked_object, - kxld_addr_t *kmod_info_kern) -__attribute__((nonnull(1,2,4,6,8,9), visibility("default"))); + KXLDContext *context, + u_char *file, + u_long size, + const char *name, + void *callback_data, + KXLDDependency *dependencies, + u_int num_dependencies, + u_char **linked_object, + kxld_addr_t *kmod_info_kern) +__attribute__((nonnull(1, 2, 4, 6, 8, 9), visibility("default"))); kern_return_t kxld_link_split_file( - KXLDContext *context, - splitKextLinkInfo *link_info, - const char *name, - void *callback_data, - KXLDDependency *dependencies, - u_int num_dependencies, - kxld_addr_t *kmod_info_kern) -__attribute__((nonnull(1,2,3,5,7), visibility("default"))); + KXLDContext *context, + splitKextLinkInfo *link_info, + const char *name, + void *callback_data, + KXLDDependency *dependencies, + u_int num_dependencies, + kxld_addr_t *kmod_info_kern) +__attribute__((nonnull(1, 2, 3, 5, 7), visibility("default"))); /******************************************************************************* *******************************************************************************/ boolean_t kxld_validate_copyright_string(const char *str) - __attribute__((pure, nonnull, visibility("default"))); +__attribute__((pure, nonnull, visibility("default"))); #endif // _KXLD_H_ - diff --git a/libkern/libkern/kxld_types.h b/libkern/libkern/kxld_types.h index 417aaf54e..595aada74 100644 --- a/libkern/libkern/kxld_types.h +++ b/libkern/libkern/kxld_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KXLD_TYPES_H @@ -34,7 +34,7 @@ #include /******************************************************************************* -* Macros +* Macros *******************************************************************************/ /* For 32-bit-specific linking code */ @@ -106,7 +106,7 @@ typedef struct kxld_context KXLDContext; /* Unless we're in a 32-bit kernel, all internal math is performed in 64 bits * and cast to smaller values as needed by the architecture for which we are - * linking. All returned arguments should be handled similarly. + * linking. All returned arguments should be handled similarly. * Note: This size can be increased for future architectural size increases */ #if KERNEL && !__LP64__ @@ -118,31 +118,31 @@ typedef uint64_t kxld_size_t; #endif /* KERNEL && !__LP64__ */ typedef struct splitKextLinkInfo { - u_char * kextExecutable; // kext we will link - size_t kextSize; // size of kextExecutable - u_char * linkedKext; // linked kext - size_t linkedKextSize; // size of linkedKext - uint64_t vmaddr_TEXT; // vmaddr of kext __TEXT segment - uint64_t vmaddr_TEXT_EXEC; // vmaddr of kext __TEXT_EXEC segment - uint64_t vmaddr_DATA; // vmaddr of kext __DATA segment - uint64_t vmaddr_DATA_CONST; // vmaddr of kext __DATA_CONST segment - uint64_t vmaddr_LINKEDIT; // vmaddr of kext __LINKEDIT segment - uint64_t vmaddr_LLVM_COV; // vmaddr of kext __LLVM_COV segment - uint32_t kaslr_offsets_count; // offsets into the kext to slide - uint32_t * kaslr_offsets; // offsets into the kext to slide + u_char * kextExecutable; // kext we will link + size_t kextSize; // size of kextExecutable + u_char * linkedKext; // linked kext + size_t linkedKextSize; // size of linkedKext + uint64_t vmaddr_TEXT; // vmaddr of kext __TEXT segment + uint64_t vmaddr_TEXT_EXEC;// vmaddr of kext __TEXT_EXEC segment + uint64_t vmaddr_DATA; // vmaddr of kext __DATA segment + uint64_t vmaddr_DATA_CONST;// vmaddr of kext __DATA_CONST segment + uint64_t vmaddr_LINKEDIT;// vmaddr of kext __LINKEDIT segment + uint64_t vmaddr_LLVM_COV;// vmaddr of kext __LLVM_COV segment + uint32_t kaslr_offsets_count;// offsets into the kext to slide + uint32_t * kaslr_offsets; // offsets into the kext to slide } splitKextLinkInfo; /* Flags for general linker behavior */ enum kxld_flags { - kKxldFlagDefault = 0x0, - kKXLDFlagIncludeRelocs = 0x01 + kKxldFlagDefault = 0x0, + kKXLDFlagIncludeRelocs = 0x01 }; typedef enum kxld_flags KXLDFlags; /* Flags for the allocation callback */ enum kxld_allocate_flags { - kKxldAllocateDefault = 0x0, - kKxldAllocateWritable = 0x1, /* kxld may write into the allocated memory */ + kKxldAllocateDefault = 0x0, + kKxldAllocateWritable = 0x1, /* kxld may write into the allocated memory */ }; typedef enum kxld_allocate_flags KXLDAllocateFlags; @@ -151,45 +151,44 @@ typedef enum kxld_allocate_flags KXLDAllocateFlags; * respectively. Note that it is compatible with the standard allocators (e.g. * malloc). */ -typedef kxld_addr_t (*KXLDAllocateCallback)(size_t size, +typedef kxld_addr_t (*KXLDAllocateCallback)(size_t size, KXLDAllocateFlags *flags, void *user_data); /* Flags for the logging callback */ typedef enum kxld_log_subsystem { - kKxldLogLinking = 0x0, - kKxldLogPatching = 0x01 + kKxldLogLinking = 0x0, + kKxldLogPatching = 0x01 } KXLDLogSubsystem; typedef enum kxld_log_level { - kKxldLogExplicit = 0x0, - kKxldLogErr = 0x1, - kKxldLogWarn = 0x2, - kKxldLogBasic = 0x3, - kKxldLogDetail = 0x4, - kKxldLogDebug = 0x5 + kKxldLogExplicit = 0x0, + kKxldLogErr = 0x1, + kKxldLogWarn = 0x2, + kKxldLogBasic = 0x3, + kKxldLogDetail = 0x4, + kKxldLogDebug = 0x5 } KXLDLogLevel; /* This structure is used to describe a dependency kext. The kext field * is a pointer to the binary executable of the dependency. The interface * field is a pointer to an optional interface kext that restricts the * symbols that may be accessed in the dependency kext. - * + * * For example, to use this structure with the KPIs, set the kext field * to point to the kernel's Mach-O binary, and set interface to point * to the KPI's Mach-O binary. */ typedef struct kxld_dependency { - u_char * kext; - u_long kext_size; - char * kext_name; - u_char * interface; - u_long interface_size; - char * interface_name; - boolean_t is_direct_dependency; + u_char * kext; + u_long kext_size; + char * kext_name; + u_char * interface; + u_long interface_size; + char * interface_name; + boolean_t is_direct_dependency; } KXLDDependency; -typedef void (*KXLDLoggingCallback) (KXLDLogSubsystem sys, KXLDLogLevel level, +typedef void (*KXLDLoggingCallback) (KXLDLogSubsystem sys, KXLDLogLevel level, const char *format, va_list ap, void *user_data); #endif /* _KXLD_TYPES_H */ - diff --git a/libkern/libkern/locks.h b/libkern/libkern/locks.h index d25747139..cde5b1393 100644 --- a/libkern/libkern/locks.h +++ b/libkern/libkern/locks.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef LIBKERN_LOCKS_H +#ifndef LIBKERN_LOCKS_H #define LIBKERN_LOCKS_H #include @@ -37,4 +37,4 @@ __BEGIN_DECLS __END_DECLS -#endif /* LIBKERN_LOCKS_H */ +#endif /* LIBKERN_LOCKS_H */ diff --git a/libkern/libkern/machine/OSByteOrder.h b/libkern/libkern/machine/OSByteOrder.h index 48602154e..6fd731437 100644 --- a/libkern/libkern/machine/OSByteOrder.h +++ b/libkern/libkern/machine/OSByteOrder.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,28 +46,28 @@ OS_INLINE uint16_t _OSSwapInt16( - uint16_t data -) + uint16_t data + ) { - return OSSwapConstInt16(data); + return OSSwapConstInt16(data); } OS_INLINE uint32_t _OSSwapInt32( - uint32_t data -) + uint32_t data + ) { - return OSSwapConstInt32(data); + return OSSwapConstInt32(data); } OS_INLINE uint64_t _OSSwapInt64( - uint64_t data -) + uint64_t data + ) { - return OSSwapConstInt64(data); + return OSSwapConstInt64(data); } /* Functions for byte reversed loads. */ @@ -75,34 +75,34 @@ _OSSwapInt64( OS_INLINE uint16_t OSReadSwapInt16( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - uint16_t data = *(volatile uint16_t *)((uintptr_t)base + byteOffset); - return _OSSwapInt16(data); + uint16_t data = *(volatile uint16_t *)((uintptr_t)base + byteOffset); + return _OSSwapInt16(data); } OS_INLINE uint32_t OSReadSwapInt32( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - uint32_t data = *(volatile uint32_t *)((uintptr_t)base + byteOffset); - return _OSSwapInt32(data); + uint32_t data = *(volatile uint32_t *)((uintptr_t)base + byteOffset); + return _OSSwapInt32(data); } OS_INLINE uint64_t OSReadSwapInt64( - const volatile void * base, - uintptr_t byteOffset -) + const volatile void * base, + uintptr_t byteOffset + ) { - uint64_t data = *(volatile uint64_t *)((uintptr_t)base + byteOffset); - return _OSSwapInt64(data); + uint64_t data = *(volatile uint64_t *)((uintptr_t)base + byteOffset); + return _OSSwapInt64(data); } /* Functions for byte reversed stores. */ @@ -110,34 +110,34 @@ OSReadSwapInt64( OS_INLINE void OSWriteSwapInt16( - volatile void * base, - uintptr_t byteOffset, - uint16_t data -) + volatile void * base, + uintptr_t byteOffset, + uint16_t data + ) { - *(volatile uint16_t *)((uintptr_t)base + byteOffset) = _OSSwapInt16(data); + *(volatile uint16_t *)((uintptr_t)base + byteOffset) = _OSSwapInt16(data); } OS_INLINE void OSWriteSwapInt32( - volatile void * base, - uintptr_t byteOffset, - uint32_t data -) + volatile void * base, + uintptr_t byteOffset, + uint32_t data + ) { - *(volatile uint32_t *)((uintptr_t)base + byteOffset) = _OSSwapInt32(data); + *(volatile uint32_t *)((uintptr_t)base + byteOffset) = _OSSwapInt32(data); } OS_INLINE void OSWriteSwapInt64( - volatile void * base, - uintptr_t byteOffset, - uint64_t data -) + volatile void * base, + uintptr_t byteOffset, + uint64_t data + ) { - *(volatile uint64_t *)((uintptr_t)base + byteOffset) = _OSSwapInt64(data); + *(volatile uint64_t *)((uintptr_t)base + byteOffset) = _OSSwapInt64(data); } #endif /* ! _OS_OSBYTEORDERMACHINE_H */ diff --git a/libkern/libkern/mkext.h b/libkern/libkern/mkext.h index 0024e84a0..b16d63bdb 100644 --- a/libkern/libkern/mkext.h +++ b/libkern/libkern/mkext.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -86,7 +86,7 @@ cpu_subtype_t cpusubtype; typedef struct mkext_basic_header { - MKEXT_HEADER_CORE + MKEXT_HEADER_CORE } mkext_basic_header; #define MKEXT_HDR_CAST(hdr) ((mkext_basic_header *)(hdr)) @@ -159,16 +159,16 @@ typedef struct mkext_basic_header { #define kMKEXTLoadRequestDisableAutounloadKey "Disable Autounload" typedef struct mkext2_file_entry { - uint32_t compressed_size; // if zero, file is not compressed - uint32_t full_size; // full size of data w/o this struct - uint8_t data[0]; // data is inline to this struct + uint32_t compressed_size;// if zero, file is not compressed + uint32_t full_size; // full size of data w/o this struct + uint8_t data[0]; // data is inline to this struct } mkext2_file_entry; typedef struct mkext2_header { - MKEXT_HEADER_CORE - uint32_t plist_offset; - uint32_t plist_compressed_size; - uint32_t plist_full_size; + MKEXT_HEADER_CORE + uint32_t plist_offset; + uint32_t plist_compressed_size; + uint32_t plist_full_size; } mkext2_header; #define MKEXT2_GET_ENTRY_COMPSIZE(ptr) MKEXT_SWAP((ptr)->compressed_size) @@ -197,21 +197,21 @@ typedef struct mkext2_header { // If all fields are 0 then this file slot is empty // If compsize is zero then the file isn't compressed. typedef struct mkext_file { - uint32_t offset; // 4 bytes - uint32_t compsize; // 4 bytes - uint32_t realsize; // 4 bytes - uint32_t modifiedsecs; // 4 bytes; cast to time_t to use + uint32_t offset; // 4 bytes + uint32_t compsize; // 4 bytes + uint32_t realsize; // 4 bytes + uint32_t modifiedsecs; // 4 bytes; cast to time_t to use } mkext_file; // The plist file entry is mandatory, but module may be empty typedef struct mkext_kext { - mkext_file plist; // 16 bytes - mkext_file module; // 16 bytes + mkext_file plist; // 16 bytes + mkext_file module; // 16 bytes } mkext_kext; typedef struct mkext_header { - MKEXT_HEADER_CORE - mkext_kext kext[1]; // 32 bytes/entry + MKEXT_HEADER_CORE + mkext_kext kext[1]; // 32 bytes/entry } mkext_header; typedef mkext_header mkext1_header; @@ -222,9 +222,9 @@ typedef mkext_header mkext1_header; #define MKEXT1_GET_ENTRY_FULLSIZE(ptr) (MKEXT_SWAP(MKEXT1_ENTRY_CAST(ptr)->realsize)) #define MKEXT1_GET_ENTRY_MODTIME(ptr) ((time_t)MKEXT_SWAP(MKEXT1_ENTRY_CAST(ptr)->modifiedsecs)) #define MKEXT1_ENTRY_EXISTS(ptr) (MKEXT1_GET_ENTRY_OFFSET(ptr) || \ - MKEXT1_GET_ENTRY_FULLSIZE(ptr) || \ - MKEXT_GET_ENTRY_COMPSIZE(ptr) || \ - MKEXT_GET_ENTRY_COMPSIZE(ptr)) + MKEXT1_GET_ENTRY_FULLSIZE(ptr) || \ + MKEXT_GET_ENTRY_COMPSIZE(ptr) || \ + MKEXT_GET_ENTRY_COMPSIZE(ptr)) #define MKEXT1_GET_KEXT(hdr, i) ((mkext_kext *)&(MKEXT1_HDR_CAST(hdr)->kext[(i)])) #define MKEXT1_GET_KEXT_PLIST(hdr, i) (MKEXT1_ENTRY_CAST(&(MKEXT1_GET_KEXT((hdr), (i))->plist))) diff --git a/libkern/libkern/prelink.h b/libkern/libkern/prelink.h index 38996dbb9..eb27f88ac 100644 --- a/libkern/libkern/prelink.h +++ b/libkern/libkern/prelink.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -58,4 +58,3 @@ #define kPrelinkInfoKCIDKey "_PrelinkKCID" #endif /* _PRELINK_H_ */ - diff --git a/libkern/libkern/section_keywords.h b/libkern/libkern/section_keywords.h index 511d9db46..02d71ee0c 100644 --- a/libkern/libkern/section_keywords.h +++ b/libkern/libkern/section_keywords.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,10 +33,10 @@ /* Default behaviour */ #ifndef SECURITY_READ_ONLY_EARLY #define __PLACE_IN_SECTION(__segment__section) \ - __attribute__((used, section(__segment__section))) + __attribute__((used, section(__segment__section))) -#define SECURITY_READ_ONLY_SPECIAL_SECTION(_t,__segment__section) \ - const _t __PLACE_IN_SECTION(__segment__section) +#define SECURITY_READ_ONLY_SPECIAL_SECTION(_t, __segment__section) \ + const _t __PLACE_IN_SECTION(__segment__section) #define SECURITY_READ_ONLY_EARLY(_t) const _t diff --git a/libkern/libkern/stack_protector.h b/libkern/libkern/stack_protector.h index d2d3c82b2..2ffcbabca 100644 --- a/libkern/libkern/stack_protector.h +++ b/libkern/libkern/stack_protector.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,4 +36,3 @@ extern unsigned long __stack_chk_guard; extern void __stack_chk_fail(void); #endif // _STACK_PROTECTOR_H_ - diff --git a/libkern/libkern/sysctl.h b/libkern/libkern/sysctl.h index 38a6c2ac7..e8df6d60c 100644 --- a/libkern/libkern/sysctl.h +++ b/libkern/libkern/sysctl.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef LIBKERN_SYSCTL_H +#ifndef LIBKERN_SYSCTL_H #define LIBKERN_SYSCTL_H #include @@ -115,4 +115,4 @@ int sysctlbyname(const char *, void *, size_t *, void *, size_t); __END_DECLS -#endif /* LIBKERN_SYSCTL_H */ +#endif /* LIBKERN_SYSCTL_H */ diff --git a/libkern/libkern/tree.h b/libkern/libkern/tree.h index 3a26162bd..15b663639 100644 --- a/libkern/libkern/tree.h +++ b/libkern/libkern/tree.h @@ -53,8 +53,8 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef _LIBKERN_TREE_H_ -#define _LIBKERN_TREE_H_ +#ifndef _LIBKERN_TREE_H_ +#define _LIBKERN_TREE_H_ /* * This file defines data structures for different types of trees: @@ -83,720 +83,720 @@ * The maximum height of a red-black tree is 2lg (n+1). */ -#define SPLAY_HEAD(name, type) \ -struct name { \ - struct type *sph_root; /* root of the tree */ \ +#define SPLAY_HEAD(name, type) \ +struct name { \ + struct type *sph_root; /* root of the tree */ \ } -#define SPLAY_INITIALIZER(root) \ +#define SPLAY_INITIALIZER(root) \ { NULL } -#define SPLAY_INIT(root) do { \ - (root)->sph_root = NULL; \ -} while (/*CONSTCOND*/ 0) +#define SPLAY_INIT(root) do { \ + (root)->sph_root = NULL; \ +} while ( /*CONSTCOND*/ 0) -#define SPLAY_ENTRY(type) \ -struct { \ - struct type *spe_left; /* left element */ \ - struct type *spe_right; /* right element */ \ +#define SPLAY_ENTRY(type) \ +struct { \ + struct type *spe_left; /* left element */ \ + struct type *spe_right; /* right element */ \ } -#define SPLAY_LEFT(elm, field) (elm)->field.spe_left -#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right -#define SPLAY_ROOT(head) (head)->sph_root -#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) +#define SPLAY_LEFT(elm, field) (elm)->field.spe_left +#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right +#define SPLAY_ROOT(head) (head)->sph_root +#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ -#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ - SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ - SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ - (head)->sph_root = tmp; \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ - SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ - SPLAY_LEFT(tmp, field) = (head)->sph_root; \ - (head)->sph_root = tmp; \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_LINKLEFT(head, tmp, field) do { \ - SPLAY_LEFT(tmp, field) = (head)->sph_root; \ - tmp = (head)->sph_root; \ - (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_LINKRIGHT(head, tmp, field) do { \ - SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ - tmp = (head)->sph_root; \ - (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ -} while (/*CONSTCOND*/ 0) - -#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ - SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ +#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while ( /*CONSTCOND*/ 0) + +#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + (head)->sph_root = tmp; \ +} while ( /*CONSTCOND*/ 0) + +#define SPLAY_LINKLEFT(head, tmp, field) do { \ + SPLAY_LEFT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ +} while ( /*CONSTCOND*/ 0) + +#define SPLAY_LINKRIGHT(head, tmp, field) do { \ + SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ + tmp = (head)->sph_root; \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ +} while ( /*CONSTCOND*/ 0) + +#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ + SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ - SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ - SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ -} while (/*CONSTCOND*/ 0) + SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ + SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ +} while ( /*CONSTCOND*/ 0) /* Generates prototypes and inline functions */ -#define SPLAY_PROTOTYPE(name, type, field, cmp) \ -void name##_SPLAY(struct name *, struct type *); \ -void name##_SPLAY_MINMAX(struct name *, int); \ -struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ -struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ - \ -/* Finds the node with the same key as elm */ \ -static __inline struct type * \ -name##_SPLAY_FIND(struct name *head, struct type *elm) \ -{ \ - if (SPLAY_EMPTY(head)) \ - return(NULL); \ - name##_SPLAY(head, elm); \ - if ((cmp)(elm, (head)->sph_root) == 0) \ - return (head->sph_root); \ - return (NULL); \ -} \ - \ -static __inline struct type * \ -name##_SPLAY_NEXT(struct name *head, struct type *elm) \ -{ \ - name##_SPLAY(head, elm); \ - if (SPLAY_RIGHT(elm, field) != NULL) { \ - elm = SPLAY_RIGHT(elm, field); \ - while (SPLAY_LEFT(elm, field) != NULL) { \ - elm = SPLAY_LEFT(elm, field); \ - } \ - } else \ - elm = NULL; \ - return (elm); \ -} \ - \ -static __inline struct type * \ -name##_SPLAY_MIN_MAX(struct name *head, int val) \ -{ \ - name##_SPLAY_MINMAX(head, val); \ - return (SPLAY_ROOT(head)); \ +#define SPLAY_PROTOTYPE(name, type, field, cmp) \ +void name##_SPLAY(struct name *, struct type *); \ +void name##_SPLAY_MINMAX(struct name *, int); \ +struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ +struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ + \ +/* Finds the node with the same key as elm */ \ +static __inline struct type * \ +name##_SPLAY_FIND(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) \ + return(NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) \ + return (head->sph_root); \ + return (NULL); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_NEXT(struct name *head, struct type *elm) \ +{ \ + name##_SPLAY(head, elm); \ + if (SPLAY_RIGHT(elm, field) != NULL) { \ + elm = SPLAY_RIGHT(elm, field); \ + while (SPLAY_LEFT(elm, field) != NULL) { \ + elm = SPLAY_LEFT(elm, field); \ + } \ + } else \ + elm = NULL; \ + return (elm); \ +} \ + \ +static __inline struct type * \ +name##_SPLAY_MIN_MAX(struct name *head, int val) \ +{ \ + name##_SPLAY_MINMAX(head, val); \ + return (SPLAY_ROOT(head)); \ } /* Main splay operation. * Moves node close to the key of elm to top */ -#define SPLAY_GENERATE(name, type, field, cmp) \ -struct type * \ -name##_SPLAY_INSERT(struct name *head, struct type *elm) \ -{ \ - if (SPLAY_EMPTY(head)) { \ - SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ - } else { \ - int __comp; \ - name##_SPLAY(head, elm); \ - __comp = (cmp)(elm, (head)->sph_root); \ - if(__comp < 0) { \ - SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ - SPLAY_RIGHT(elm, field) = (head)->sph_root; \ - SPLAY_LEFT((head)->sph_root, field) = NULL; \ - } else if (__comp > 0) { \ - SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ - SPLAY_LEFT(elm, field) = (head)->sph_root; \ - SPLAY_RIGHT((head)->sph_root, field) = NULL; \ - } else \ - return ((head)->sph_root); \ - } \ - (head)->sph_root = (elm); \ - return (NULL); \ -} \ - \ -struct type * \ -name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ -{ \ - struct type *__tmp; \ - if (SPLAY_EMPTY(head)) \ - return (NULL); \ - name##_SPLAY(head, elm); \ - if ((cmp)(elm, (head)->sph_root) == 0) { \ - if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ - (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ - } else { \ - __tmp = SPLAY_RIGHT((head)->sph_root, field); \ - (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ - name##_SPLAY(head, elm); \ - SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ - } \ - return (elm); \ - } \ - return (NULL); \ -} \ - \ -void \ -name##_SPLAY(struct name *head, struct type *elm) \ -{ \ - struct type __node, *__left, *__right, *__tmp; \ - int __comp; \ +#define SPLAY_GENERATE(name, type, field, cmp) \ +struct type * \ +name##_SPLAY_INSERT(struct name *head, struct type *elm) \ +{ \ + if (SPLAY_EMPTY(head)) { \ + SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ + } else { \ + int __comp; \ + name##_SPLAY(head, elm); \ + __comp = (cmp)(elm, (head)->sph_root); \ + if(__comp < 0) { \ + SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ + SPLAY_RIGHT(elm, field) = (head)->sph_root; \ + SPLAY_LEFT((head)->sph_root, field) = NULL; \ + } else if (__comp > 0) { \ + SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ + SPLAY_LEFT(elm, field) = (head)->sph_root; \ + SPLAY_RIGHT((head)->sph_root, field) = NULL; \ + } else \ + return ((head)->sph_root); \ + } \ + (head)->sph_root = (elm); \ + return (NULL); \ +} \ + \ +struct type * \ +name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *__tmp; \ + if (SPLAY_EMPTY(head)) \ + return (NULL); \ + name##_SPLAY(head, elm); \ + if ((cmp)(elm, (head)->sph_root) == 0) { \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ + (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ + } else { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ + name##_SPLAY(head, elm); \ + SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ + } \ + return (elm); \ + } \ + return (NULL); \ +} \ + \ +void \ +name##_SPLAY(struct name *head, struct type *elm) \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ + int __comp; \ \ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ - __left = __right = &__node; \ + __left = __right = &__node; \ \ - while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ - if (__comp < 0) { \ - __tmp = SPLAY_LEFT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if ((cmp)(elm, __tmp) < 0){ \ - SPLAY_ROTATE_RIGHT(head, __tmp, field); \ - if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ - break; \ - } \ - SPLAY_LINKLEFT(head, __right, field); \ - } else if (__comp > 0) { \ - __tmp = SPLAY_RIGHT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if ((cmp)(elm, __tmp) > 0){ \ - SPLAY_ROTATE_LEFT(head, __tmp, field); \ - if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ - break; \ - } \ - SPLAY_LINKRIGHT(head, __left, field); \ - } \ - } \ - SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ -} \ - \ -/* Splay with either the minimum or the maximum element \ - * Used to find minimum or maximum element in tree. \ - */ \ + while ((__comp = (cmp)(elm, (head)->sph_root)) != 0) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if ((cmp)(elm, __tmp) > 0){ \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ +} \ + \ +/* Splay with either the minimum or the maximum element \ + * Used to find minimum or maximum element in tree. \ + */ \ void name##_SPLAY_MINMAX(struct name *head, int __comp) \ -{ \ - struct type __node, *__left, *__right, *__tmp; \ +{ \ + struct type __node, *__left, *__right, *__tmp; \ \ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ - __left = __right = &__node; \ + __left = __right = &__node; \ \ - while (1) { \ - if (__comp < 0) { \ - __tmp = SPLAY_LEFT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if (__comp < 0){ \ - SPLAY_ROTATE_RIGHT(head, __tmp, field); \ - if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ - break; \ - } \ - SPLAY_LINKLEFT(head, __right, field); \ - } else if (__comp > 0) { \ - __tmp = SPLAY_RIGHT((head)->sph_root, field); \ - if (__tmp == NULL) \ - break; \ - if (__comp > 0) { \ - SPLAY_ROTATE_LEFT(head, __tmp, field); \ - if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ - break; \ - } \ - SPLAY_LINKRIGHT(head, __left, field); \ - } \ - } \ - SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ + while (1) { \ + if (__comp < 0) { \ + __tmp = SPLAY_LEFT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp < 0){ \ + SPLAY_ROTATE_RIGHT(head, __tmp, field); \ + if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKLEFT(head, __right, field); \ + } else if (__comp > 0) { \ + __tmp = SPLAY_RIGHT((head)->sph_root, field); \ + if (__tmp == NULL) \ + break; \ + if (__comp > 0) { \ + SPLAY_ROTATE_LEFT(head, __tmp, field); \ + if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ + break; \ + } \ + SPLAY_LINKRIGHT(head, __left, field); \ + } \ + } \ + SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ } -#define SPLAY_NEGINF -1 -#define SPLAY_INF 1 - -#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) -#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) -#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) -#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) -#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ - : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) -#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ - : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) - -#define SPLAY_FOREACH(x, name, head) \ - for ((x) = SPLAY_MIN(name, head); \ - (x) != NULL; \ +#define SPLAY_NEGINF -1 +#define SPLAY_INF 1 + +#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) +#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) +#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) +#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) +#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) +#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ + : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) + +#define SPLAY_FOREACH(x, name, head) \ + for ((x) = SPLAY_MIN(name, head); \ + (x) != NULL; \ (x) = SPLAY_NEXT(name, head, x)) /* Macros that define a red-black tree */ -#define RB_HEAD(name, type) \ -struct name { \ - struct type *rbh_root; /* root of the tree */ \ +#define RB_HEAD(name, type) \ +struct name { \ + struct type *rbh_root; /* root of the tree */ \ } -#define RB_INITIALIZER(root) \ +#define RB_INITIALIZER(root) \ { NULL } -#define RB_INIT(root) do { \ - (root)->rbh_root = NULL; \ -} while (/*CONSTCOND*/ 0) - -#define RB_BLACK 0 -#define RB_RED 1 -#define RB_PLACEHOLDER NULL -#define RB_ENTRY(type) \ -struct { \ - struct type *rbe_left; /* left element */ \ - struct type *rbe_right; /* right element */ \ - struct type *rbe_parent; /* parent element */ \ +#define RB_INIT(root) do { \ + (root)->rbh_root = NULL; \ +} while ( /*CONSTCOND*/ 0) + +#define RB_BLACK 0 +#define RB_RED 1 +#define RB_PLACEHOLDER NULL +#define RB_ENTRY(type) \ +struct { \ + struct type *rbe_left; /* left element */ \ + struct type *rbe_right; /* right element */ \ + struct type *rbe_parent; /* parent element */ \ } -#define RB_COLOR_MASK (uintptr_t)0x1 -#define RB_LEFT(elm, field) (elm)->field.rbe_left -#define RB_RIGHT(elm, field) (elm)->field.rbe_right -#define _RB_PARENT(elm, field) (elm)->field.rbe_parent -#define RB_ROOT(head) (head)->rbh_root -#define RB_EMPTY(head) (RB_ROOT(head) == NULL) +#define RB_COLOR_MASK (uintptr_t)0x1 +#define RB_LEFT(elm, field) (elm)->field.rbe_left +#define RB_RIGHT(elm, field) (elm)->field.rbe_right +#define _RB_PARENT(elm, field) (elm)->field.rbe_parent +#define RB_ROOT(head) (head)->rbh_root +#define RB_EMPTY(head) (RB_ROOT(head) == NULL) -#define RB_SET(name, elm, parent, field) do { \ - name##_RB_SETPARENT(elm, parent); \ - RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ - name##_RB_SETCOLOR(elm, RB_RED); \ -} while (/*CONSTCOND*/ 0) +#define RB_SET(name, elm, parent, field) do { \ + name##_RB_SETPARENT(elm, parent); \ + RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ + name##_RB_SETCOLOR(elm, RB_RED); \ +} while ( /*CONSTCOND*/ 0) -#define RB_SET_BLACKRED(name, black, red, field) do { \ - name##_RB_SETCOLOR(black, RB_BLACK); \ - name##_RB_SETCOLOR(red, RB_RED); \ -} while (/*CONSTCOND*/ 0) +#define RB_SET_BLACKRED(name, black, red, field) do { \ + name##_RB_SETCOLOR(black, RB_BLACK); \ + name##_RB_SETCOLOR(red, RB_RED); \ +} while ( /*CONSTCOND*/ 0) #ifndef RB_AUGMENT #define RB_AUGMENT(x) (void)(x) #endif -#define RB_ROTATE_LEFT(name, head, elm, tmp, field) do { \ - (tmp) = RB_RIGHT(elm, field); \ - if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ - name##_RB_SETPARENT(RB_LEFT(tmp, field),(elm)); \ - } \ - RB_AUGMENT(elm); \ - if (name##_RB_SETPARENT(tmp, name##_RB_GETPARENT(elm)) != NULL) { \ - if ((elm) == RB_LEFT(name##_RB_GETPARENT(elm), field)) \ - RB_LEFT(name##_RB_GETPARENT(elm), field) = (tmp); \ - else \ - RB_RIGHT(name##_RB_GETPARENT(elm), field) = (tmp); \ - } else \ - (head)->rbh_root = (tmp); \ - RB_LEFT(tmp, field) = (elm); \ - name##_RB_SETPARENT(elm, (tmp)); \ - RB_AUGMENT(tmp); \ - if ((name##_RB_GETPARENT(tmp))) \ - RB_AUGMENT(name##_RB_GETPARENT(tmp)); \ -} while (/*CONSTCOND*/ 0) - -#define RB_ROTATE_RIGHT(name, head, elm, tmp, field) do { \ - (tmp) = RB_LEFT(elm, field); \ - if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ - name##_RB_SETPARENT(RB_RIGHT(tmp, field), (elm)); \ - } \ - RB_AUGMENT(elm); \ - if (name##_RB_SETPARENT(tmp, name##_RB_GETPARENT(elm)) != NULL) { \ - if ((elm) == RB_LEFT(name##_RB_GETPARENT(elm), field)) \ - RB_LEFT(name##_RB_GETPARENT(elm), field) = (tmp); \ - else \ - RB_RIGHT(name##_RB_GETPARENT(elm), field) = (tmp); \ - } else \ - (head)->rbh_root = (tmp); \ - RB_RIGHT(tmp, field) = (elm); \ - name##_RB_SETPARENT(elm, tmp); \ - RB_AUGMENT(tmp); \ - if ((name##_RB_GETPARENT(tmp))) \ - RB_AUGMENT(name##_RB_GETPARENT(tmp)); \ -} while (/*CONSTCOND*/ 0) +#define RB_ROTATE_LEFT(name, head, elm, tmp, field) do { \ + (tmp) = RB_RIGHT(elm, field); \ + if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field)) != NULL) { \ + name##_RB_SETPARENT(RB_LEFT(tmp, field),(elm)); \ + } \ + RB_AUGMENT(elm); \ + if (name##_RB_SETPARENT(tmp, name##_RB_GETPARENT(elm)) != NULL) { \ + if ((elm) == RB_LEFT(name##_RB_GETPARENT(elm), field)) \ + RB_LEFT(name##_RB_GETPARENT(elm), field) = (tmp); \ + else \ + RB_RIGHT(name##_RB_GETPARENT(elm), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_LEFT(tmp, field) = (elm); \ + name##_RB_SETPARENT(elm, (tmp)); \ + RB_AUGMENT(tmp); \ + if ((name##_RB_GETPARENT(tmp))) \ + RB_AUGMENT(name##_RB_GETPARENT(tmp)); \ +} while ( /*CONSTCOND*/ 0) + +#define RB_ROTATE_RIGHT(name, head, elm, tmp, field) do { \ + (tmp) = RB_LEFT(elm, field); \ + if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field)) != NULL) { \ + name##_RB_SETPARENT(RB_RIGHT(tmp, field), (elm)); \ + } \ + RB_AUGMENT(elm); \ + if (name##_RB_SETPARENT(tmp, name##_RB_GETPARENT(elm)) != NULL) { \ + if ((elm) == RB_LEFT(name##_RB_GETPARENT(elm), field)) \ + RB_LEFT(name##_RB_GETPARENT(elm), field) = (tmp); \ + else \ + RB_RIGHT(name##_RB_GETPARENT(elm), field) = (tmp); \ + } else \ + (head)->rbh_root = (tmp); \ + RB_RIGHT(tmp, field) = (elm); \ + name##_RB_SETPARENT(elm, tmp); \ + RB_AUGMENT(tmp); \ + if ((name##_RB_GETPARENT(tmp))) \ + RB_AUGMENT(name##_RB_GETPARENT(tmp)); \ +} while ( /*CONSTCOND*/ 0) /* Generates prototypes and inline functions */ -#define RB_PROTOTYPE(name, type, field, cmp) \ -void name##_RB_INSERT_COLOR(struct name *, struct type *); \ +#define RB_PROTOTYPE(name, type, field, cmp) \ +void name##_RB_INSERT_COLOR(struct name *, struct type *); \ void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ -struct type *name##_RB_REMOVE(struct name *, struct type *); \ -struct type *name##_RB_INSERT(struct name *, struct type *); \ -struct type *name##_RB_FIND(struct name *, struct type *); \ -struct type *name##_RB_NEXT(struct type *); \ -struct type *name##_RB_MINMAX(struct name *, int); \ -struct type *name##_RB_GETPARENT(struct type*); \ -struct type *name##_RB_SETPARENT(struct type*, struct type*); \ -int name##_RB_GETCOLOR(struct type*); \ -void name##_RB_SETCOLOR(struct type*,int); +struct type *name##_RB_REMOVE(struct name *, struct type *); \ +struct type *name##_RB_INSERT(struct name *, struct type *); \ +struct type *name##_RB_FIND(struct name *, struct type *); \ +struct type *name##_RB_NEXT(struct type *); \ +struct type *name##_RB_MINMAX(struct name *, int); \ +struct type *name##_RB_GETPARENT(struct type*); \ +struct type *name##_RB_SETPARENT(struct type*, struct type*); \ +int name##_RB_GETCOLOR(struct type*); \ +void name##_RB_SETCOLOR(struct type*,int); /* Generates prototypes (with storage class) and inline functions */ -#define RB_PROTOTYPE_SC(_sc_, name, type, field, cmp) \ -_sc_ void name##_RB_INSERT_COLOR(struct name *, struct type *); \ +#define RB_PROTOTYPE_SC(_sc_, name, type, field, cmp) \ +_sc_ void name##_RB_INSERT_COLOR(struct name *, struct type *); \ _sc_ void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *); \ -_sc_ struct type *name##_RB_REMOVE(struct name *, struct type *); \ -_sc_ struct type *name##_RB_INSERT(struct name *, struct type *); \ -_sc_ struct type *name##_RB_FIND(struct name *, struct type *); \ -_sc_ struct type *name##_RB_NEXT(struct type *); \ -_sc_ struct type *name##_RB_MINMAX(struct name *, int); \ -_sc_ struct type *name##_RB_GETPARENT(struct type*); \ -_sc_ struct type *name##_RB_SETPARENT(struct type*, struct type*); \ -_sc_ int name##_RB_GETCOLOR(struct type*); \ +_sc_ struct type *name##_RB_REMOVE(struct name *, struct type *); \ +_sc_ struct type *name##_RB_INSERT(struct name *, struct type *); \ +_sc_ struct type *name##_RB_FIND(struct name *, struct type *); \ +_sc_ struct type *name##_RB_NEXT(struct type *); \ +_sc_ struct type *name##_RB_MINMAX(struct name *, int); \ +_sc_ struct type *name##_RB_GETPARENT(struct type*); \ +_sc_ struct type *name##_RB_SETPARENT(struct type*, struct type*); \ +_sc_ int name##_RB_GETCOLOR(struct type*); \ _sc_ void name##_RB_SETCOLOR(struct type*,int); /* Main rb operation. * Moves node close to the key of elm to top */ -#define RB_GENERATE(name, type, field, cmp) \ -struct type *name##_RB_GETPARENT(struct type *elm) { \ - struct type *parent = _RB_PARENT(elm, field); \ - if( parent != NULL) { \ - parent = (struct type*)((uintptr_t)parent & ~RB_COLOR_MASK);\ - return( (struct type*) ( (parent == (struct type*) RB_PLACEHOLDER) ? NULL: parent));\ - } \ - return((struct type*)NULL); \ -} \ -int name##_RB_GETCOLOR(struct type *elm) { \ - int color = 0; \ +#define RB_GENERATE(name, type, field, cmp) \ +struct type *name##_RB_GETPARENT(struct type *elm) { \ + struct type *parent = _RB_PARENT(elm, field); \ + if( parent != NULL) { \ + parent = (struct type*)((uintptr_t)parent & ~RB_COLOR_MASK);\ + return( (struct type*) ( (parent == (struct type*) RB_PLACEHOLDER) ? NULL: parent));\ + } \ + return((struct type*)NULL); \ +} \ +int name##_RB_GETCOLOR(struct type *elm) { \ + int color = 0; \ color = (int)((uintptr_t)_RB_PARENT(elm,field) & RB_COLOR_MASK);\ - return(color); \ -} \ -void name##_RB_SETCOLOR(struct type *elm,int color) { \ - struct type *parent = name##_RB_GETPARENT(elm); \ - if(parent == (struct type*)NULL) \ - parent = (struct type*) RB_PLACEHOLDER; \ + return(color); \ +} \ +void name##_RB_SETCOLOR(struct type *elm,int color) { \ + struct type *parent = name##_RB_GETPARENT(elm); \ + if(parent == (struct type*)NULL) \ + parent = (struct type*) RB_PLACEHOLDER; \ _RB_PARENT(elm, field) = (struct type*)((uintptr_t)parent | (unsigned int)color);\ -} \ -struct type *name##_RB_SETPARENT(struct type *elm, struct type *parent) { \ - int color = name##_RB_GETCOLOR(elm); \ - _RB_PARENT(elm, field) = parent; \ - if(color) name##_RB_SETCOLOR(elm, color); \ - return(name##_RB_GETPARENT(elm)); \ -} \ - \ -void \ -name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ -{ \ - struct type *parent, *gparent, *tmp; \ - while ((parent = name##_RB_GETPARENT(elm)) != NULL && \ - name##_RB_GETCOLOR(parent) == RB_RED) { \ - gparent = name##_RB_GETPARENT(parent); \ - if (parent == RB_LEFT(gparent, field)) { \ - tmp = RB_RIGHT(gparent, field); \ - if (tmp && name##_RB_GETCOLOR(tmp) == RB_RED) { \ - name##_RB_SETCOLOR(tmp, RB_BLACK); \ - RB_SET_BLACKRED(name, parent, gparent, field);\ - elm = gparent; \ - continue; \ - } \ - if (RB_RIGHT(parent, field) == elm) { \ - RB_ROTATE_LEFT(name, head, parent, tmp, field);\ - tmp = parent; \ - parent = elm; \ - elm = tmp; \ - } \ - RB_SET_BLACKRED(name, parent, gparent, field); \ - RB_ROTATE_RIGHT(name,head, gparent, tmp, field); \ - } else { \ - tmp = RB_LEFT(gparent, field); \ - if (tmp && name##_RB_GETCOLOR(tmp) == RB_RED) { \ - name##_RB_SETCOLOR(tmp, RB_BLACK); \ - RB_SET_BLACKRED(name, parent, gparent, field);\ - elm = gparent; \ - continue; \ - } \ - if (RB_LEFT(parent, field) == elm) { \ - RB_ROTATE_RIGHT(name, head, parent, tmp, field);\ - tmp = parent; \ - parent = elm; \ - elm = tmp; \ - } \ - RB_SET_BLACKRED(name, parent, gparent, field); \ - RB_ROTATE_LEFT(name, head, gparent, tmp, field); \ - } \ - } \ - name##_RB_SETCOLOR(head->rbh_root, RB_BLACK); \ -} \ - \ -void \ +} \ +struct type *name##_RB_SETPARENT(struct type *elm, struct type *parent) { \ + int color = name##_RB_GETCOLOR(elm); \ + _RB_PARENT(elm, field) = parent; \ + if(color) name##_RB_SETCOLOR(elm, color); \ + return(name##_RB_GETPARENT(elm)); \ +} \ + \ +void \ +name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ +{ \ + struct type *parent, *gparent, *tmp; \ + while ((parent = name##_RB_GETPARENT(elm)) != NULL && \ + name##_RB_GETCOLOR(parent) == RB_RED) { \ + gparent = name##_RB_GETPARENT(parent); \ + if (parent == RB_LEFT(gparent, field)) { \ + tmp = RB_RIGHT(gparent, field); \ + if (tmp && name##_RB_GETCOLOR(tmp) == RB_RED) { \ + name##_RB_SETCOLOR(tmp, RB_BLACK); \ + RB_SET_BLACKRED(name, parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_RIGHT(parent, field) == elm) { \ + RB_ROTATE_LEFT(name, head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(name, parent, gparent, field); \ + RB_ROTATE_RIGHT(name,head, gparent, tmp, field); \ + } else { \ + tmp = RB_LEFT(gparent, field); \ + if (tmp && name##_RB_GETCOLOR(tmp) == RB_RED) { \ + name##_RB_SETCOLOR(tmp, RB_BLACK); \ + RB_SET_BLACKRED(name, parent, gparent, field);\ + elm = gparent; \ + continue; \ + } \ + if (RB_LEFT(parent, field) == elm) { \ + RB_ROTATE_RIGHT(name, head, parent, tmp, field);\ + tmp = parent; \ + parent = elm; \ + elm = tmp; \ + } \ + RB_SET_BLACKRED(name, parent, gparent, field); \ + RB_ROTATE_LEFT(name, head, gparent, tmp, field); \ + } \ + } \ + name##_RB_SETCOLOR(head->rbh_root, RB_BLACK); \ +} \ + \ +void \ name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ -{ \ - struct type *tmp; \ - while ((elm == NULL || name##_RB_GETCOLOR(elm) == RB_BLACK) && \ - elm != RB_ROOT(head)) { \ - if (RB_LEFT(parent, field) == elm) { \ - tmp = RB_RIGHT(parent, field); \ - if (name##_RB_GETCOLOR(tmp) == RB_RED) { \ - RB_SET_BLACKRED(name, tmp, parent, field); \ - RB_ROTATE_LEFT(name, head, parent, tmp, field);\ - tmp = RB_RIGHT(parent, field); \ - } \ - if ((RB_LEFT(tmp, field) == NULL || \ - name##_RB_GETCOLOR(RB_LEFT(tmp, field)) == RB_BLACK) &&\ - (RB_RIGHT(tmp, field) == NULL || \ - name##_RB_GETCOLOR(RB_RIGHT(tmp, field)) == RB_BLACK)) {\ - name##_RB_SETCOLOR(tmp, RB_RED); \ - elm = parent; \ - parent = name##_RB_GETPARENT(elm); \ - } else { \ - if (RB_RIGHT(tmp, field) == NULL || \ - name##_RB_GETCOLOR(RB_RIGHT(tmp, field)) == RB_BLACK) {\ - struct type *oleft; \ - if ((oleft = RB_LEFT(tmp, field)) \ - != NULL) \ - name##_RB_SETCOLOR(oleft, RB_BLACK);\ - name##_RB_SETCOLOR(tmp, RB_RED); \ - RB_ROTATE_RIGHT(name, head, tmp, oleft, field);\ - tmp = RB_RIGHT(parent, field); \ - } \ - name##_RB_SETCOLOR(tmp, (name##_RB_GETCOLOR(parent)));\ - name##_RB_SETCOLOR(parent, RB_BLACK); \ - if (RB_RIGHT(tmp, field)) \ - name##_RB_SETCOLOR(RB_RIGHT(tmp, field),RB_BLACK);\ - RB_ROTATE_LEFT(name, head, parent, tmp, field);\ - elm = RB_ROOT(head); \ - break; \ - } \ - } else { \ - tmp = RB_LEFT(parent, field); \ - if (name##_RB_GETCOLOR(tmp) == RB_RED) { \ - RB_SET_BLACKRED(name, tmp, parent, field); \ - RB_ROTATE_RIGHT(name, head, parent, tmp, field);\ - tmp = RB_LEFT(parent, field); \ - } \ - if ((RB_LEFT(tmp, field) == NULL || \ - name##_RB_GETCOLOR(RB_LEFT(tmp, field)) == RB_BLACK) &&\ - (RB_RIGHT(tmp, field) == NULL || \ - name##_RB_GETCOLOR(RB_RIGHT(tmp, field)) == RB_BLACK)) {\ - name##_RB_SETCOLOR(tmp, RB_RED); \ - elm = parent; \ - parent = name##_RB_GETPARENT(elm); \ - } else { \ - if (RB_LEFT(tmp, field) == NULL || \ - name##_RB_GETCOLOR(RB_LEFT(tmp, field)) == RB_BLACK) {\ - struct type *oright; \ - if ((oright = RB_RIGHT(tmp, field)) \ - != NULL) \ - name##_RB_SETCOLOR(oright, RB_BLACK);\ - name##_RB_SETCOLOR(tmp, RB_RED); \ - RB_ROTATE_LEFT(name, head, tmp, oright, field);\ - tmp = RB_LEFT(parent, field); \ - } \ - name##_RB_SETCOLOR(tmp,(name##_RB_GETCOLOR(parent)));\ - name##_RB_SETCOLOR(parent, RB_BLACK); \ - if (RB_LEFT(tmp, field)) \ - name##_RB_SETCOLOR(RB_LEFT(tmp, field), RB_BLACK);\ - RB_ROTATE_RIGHT(name, head, parent, tmp, field);\ - elm = RB_ROOT(head); \ - break; \ - } \ - } \ - } \ - if (elm) \ - name##_RB_SETCOLOR(elm, RB_BLACK); \ -} \ - \ -struct type * \ -name##_RB_REMOVE(struct name *head, struct type *elm) \ -{ \ - struct type *child, *parent, *old = elm; \ - int color; \ - if (RB_LEFT(elm, field) == NULL) \ - child = RB_RIGHT(elm, field); \ - else if (RB_RIGHT(elm, field) == NULL) \ - child = RB_LEFT(elm, field); \ - else { \ - struct type *left; \ - elm = RB_RIGHT(elm, field); \ - while ((left = RB_LEFT(elm, field)) != NULL) \ - elm = left; \ - child = RB_RIGHT(elm, field); \ - parent = name##_RB_GETPARENT(elm); \ - color = name##_RB_GETCOLOR(elm); \ - if (child) \ - name##_RB_SETPARENT(child, parent); \ - if (parent) { \ - if (RB_LEFT(parent, field) == elm) \ - RB_LEFT(parent, field) = child; \ - else \ - RB_RIGHT(parent, field) = child; \ - RB_AUGMENT(parent); \ - } else \ - RB_ROOT(head) = child; \ - if (name##_RB_GETPARENT(elm) == old) \ - parent = elm; \ - (elm)->field = (old)->field; \ - if (name##_RB_GETPARENT(old)) { \ - if (RB_LEFT(name##_RB_GETPARENT(old), field) == old)\ - RB_LEFT(name##_RB_GETPARENT(old), field) = elm;\ - else \ - RB_RIGHT(name##_RB_GETPARENT(old), field) = elm;\ - RB_AUGMENT(name##_RB_GETPARENT(old)); \ - } else \ - RB_ROOT(head) = elm; \ - name##_RB_SETPARENT(RB_LEFT(old, field), elm); \ - if (RB_RIGHT(old, field)) \ - name##_RB_SETPARENT(RB_RIGHT(old, field), elm); \ - if (parent) { \ - left = parent; \ - do { \ - RB_AUGMENT(left); \ - } while ((left = name##_RB_GETPARENT(left)) != NULL); \ - } \ - goto color; \ - } \ - parent = name##_RB_GETPARENT(elm); \ - color = name##_RB_GETCOLOR(elm); \ - if (child) \ - name##_RB_SETPARENT(child, parent); \ - if (parent) { \ - if (RB_LEFT(parent, field) == elm) \ - RB_LEFT(parent, field) = child; \ - else \ - RB_RIGHT(parent, field) = child; \ - RB_AUGMENT(parent); \ - } else \ - RB_ROOT(head) = child; \ -color: \ - if (color == RB_BLACK) \ - name##_RB_REMOVE_COLOR(head, parent, child); \ - return (old); \ -} \ - \ -/* Inserts a node into the RB tree */ \ -struct type * \ -name##_RB_INSERT(struct name *head, struct type *elm) \ -{ \ - struct type *tmp; \ - struct type *parent = NULL; \ - int comp = 0; \ - tmp = RB_ROOT(head); \ - while (tmp) { \ - parent = tmp; \ - comp = (cmp)(elm, parent); \ - if (comp < 0) \ - tmp = RB_LEFT(tmp, field); \ - else if (comp > 0) \ - tmp = RB_RIGHT(tmp, field); \ - else \ - return (tmp); \ - } \ - RB_SET(name, elm, parent, field); \ - if (parent != NULL) { \ - if (comp < 0) \ - RB_LEFT(parent, field) = elm; \ - else \ - RB_RIGHT(parent, field) = elm; \ - RB_AUGMENT(parent); \ - } else \ - RB_ROOT(head) = elm; \ - name##_RB_INSERT_COLOR(head, elm); \ - return (NULL); \ -} \ - \ -/* Finds the node with the same key as elm */ \ -struct type * \ -name##_RB_FIND(struct name *head, struct type *elm) \ -{ \ - struct type *tmp = RB_ROOT(head); \ - int comp; \ - while (tmp) { \ - comp = cmp(elm, tmp); \ - if (comp < 0) \ - tmp = RB_LEFT(tmp, field); \ - else if (comp > 0) \ - tmp = RB_RIGHT(tmp, field); \ - else \ - return (tmp); \ - } \ - return (NULL); \ -} \ - \ -/* ARGSUSED */ \ -struct type * \ -name##_RB_NEXT(struct type *elm) \ -{ \ - if (RB_RIGHT(elm, field)) { \ - elm = RB_RIGHT(elm, field); \ - while (RB_LEFT(elm, field)) \ - elm = RB_LEFT(elm, field); \ - } else { \ - if (name##_RB_GETPARENT(elm) && \ - (elm == RB_LEFT(name##_RB_GETPARENT(elm), field))) \ - elm = name##_RB_GETPARENT(elm); \ - else { \ - while (name##_RB_GETPARENT(elm) && \ - (elm == RB_RIGHT(name##_RB_GETPARENT(elm), field)))\ - elm = name##_RB_GETPARENT(elm); \ - elm = name##_RB_GETPARENT(elm); \ - } \ - } \ - return (elm); \ -} \ - \ -struct type * \ -name##_RB_MINMAX(struct name *head, int val) \ -{ \ - struct type *tmp = RB_ROOT(head); \ - struct type *parent = NULL; \ - while (tmp) { \ - parent = tmp; \ - if (val < 0) \ - tmp = RB_LEFT(tmp, field); \ - else \ - tmp = RB_RIGHT(tmp, field); \ - } \ - return (parent); \ +{ \ + struct type *tmp; \ + while ((elm == NULL || name##_RB_GETCOLOR(elm) == RB_BLACK) && \ + elm != RB_ROOT(head)) { \ + if (RB_LEFT(parent, field) == elm) { \ + tmp = RB_RIGHT(parent, field); \ + if (name##_RB_GETCOLOR(tmp) == RB_RED) { \ + RB_SET_BLACKRED(name, tmp, parent, field); \ + RB_ROTATE_LEFT(name, head, parent, tmp, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + name##_RB_GETCOLOR(RB_LEFT(tmp, field)) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + name##_RB_GETCOLOR(RB_RIGHT(tmp, field)) == RB_BLACK)) {\ + name##_RB_SETCOLOR(tmp, RB_RED); \ + elm = parent; \ + parent = name##_RB_GETPARENT(elm); \ + } else { \ + if (RB_RIGHT(tmp, field) == NULL || \ + name##_RB_GETCOLOR(RB_RIGHT(tmp, field)) == RB_BLACK) {\ + struct type *oleft; \ + if ((oleft = RB_LEFT(tmp, field)) \ + != NULL) \ + name##_RB_SETCOLOR(oleft, RB_BLACK);\ + name##_RB_SETCOLOR(tmp, RB_RED); \ + RB_ROTATE_RIGHT(name, head, tmp, oleft, field);\ + tmp = RB_RIGHT(parent, field); \ + } \ + name##_RB_SETCOLOR(tmp, (name##_RB_GETCOLOR(parent)));\ + name##_RB_SETCOLOR(parent, RB_BLACK); \ + if (RB_RIGHT(tmp, field)) \ + name##_RB_SETCOLOR(RB_RIGHT(tmp, field),RB_BLACK);\ + RB_ROTATE_LEFT(name, head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } else { \ + tmp = RB_LEFT(parent, field); \ + if (name##_RB_GETCOLOR(tmp) == RB_RED) { \ + RB_SET_BLACKRED(name, tmp, parent, field); \ + RB_ROTATE_RIGHT(name, head, parent, tmp, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + if ((RB_LEFT(tmp, field) == NULL || \ + name##_RB_GETCOLOR(RB_LEFT(tmp, field)) == RB_BLACK) &&\ + (RB_RIGHT(tmp, field) == NULL || \ + name##_RB_GETCOLOR(RB_RIGHT(tmp, field)) == RB_BLACK)) {\ + name##_RB_SETCOLOR(tmp, RB_RED); \ + elm = parent; \ + parent = name##_RB_GETPARENT(elm); \ + } else { \ + if (RB_LEFT(tmp, field) == NULL || \ + name##_RB_GETCOLOR(RB_LEFT(tmp, field)) == RB_BLACK) {\ + struct type *oright; \ + if ((oright = RB_RIGHT(tmp, field)) \ + != NULL) \ + name##_RB_SETCOLOR(oright, RB_BLACK);\ + name##_RB_SETCOLOR(tmp, RB_RED); \ + RB_ROTATE_LEFT(name, head, tmp, oright, field);\ + tmp = RB_LEFT(parent, field); \ + } \ + name##_RB_SETCOLOR(tmp,(name##_RB_GETCOLOR(parent)));\ + name##_RB_SETCOLOR(parent, RB_BLACK); \ + if (RB_LEFT(tmp, field)) \ + name##_RB_SETCOLOR(RB_LEFT(tmp, field), RB_BLACK);\ + RB_ROTATE_RIGHT(name, head, parent, tmp, field);\ + elm = RB_ROOT(head); \ + break; \ + } \ + } \ + } \ + if (elm) \ + name##_RB_SETCOLOR(elm, RB_BLACK); \ +} \ + \ +struct type * \ +name##_RB_REMOVE(struct name *head, struct type *elm) \ +{ \ + struct type *child, *parent, *old = elm; \ + int color; \ + if (RB_LEFT(elm, field) == NULL) \ + child = RB_RIGHT(elm, field); \ + else if (RB_RIGHT(elm, field) == NULL) \ + child = RB_LEFT(elm, field); \ + else { \ + struct type *left; \ + elm = RB_RIGHT(elm, field); \ + while ((left = RB_LEFT(elm, field)) != NULL) \ + elm = left; \ + child = RB_RIGHT(elm, field); \ + parent = name##_RB_GETPARENT(elm); \ + color = name##_RB_GETCOLOR(elm); \ + if (child) \ + name##_RB_SETPARENT(child, parent); \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ + if (name##_RB_GETPARENT(elm) == old) \ + parent = elm; \ + (elm)->field = (old)->field; \ + if (name##_RB_GETPARENT(old)) { \ + if (RB_LEFT(name##_RB_GETPARENT(old), field) == old)\ + RB_LEFT(name##_RB_GETPARENT(old), field) = elm;\ + else \ + RB_RIGHT(name##_RB_GETPARENT(old), field) = elm;\ + RB_AUGMENT(name##_RB_GETPARENT(old)); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_SETPARENT(RB_LEFT(old, field), elm); \ + if (RB_RIGHT(old, field)) \ + name##_RB_SETPARENT(RB_RIGHT(old, field), elm); \ + if (parent) { \ + left = parent; \ + do { \ + RB_AUGMENT(left); \ + } while ((left = name##_RB_GETPARENT(left)) != NULL); \ + } \ + goto color; \ + } \ + parent = name##_RB_GETPARENT(elm); \ + color = name##_RB_GETCOLOR(elm); \ + if (child) \ + name##_RB_SETPARENT(child, parent); \ + if (parent) { \ + if (RB_LEFT(parent, field) == elm) \ + RB_LEFT(parent, field) = child; \ + else \ + RB_RIGHT(parent, field) = child; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = child; \ +color: \ + if (color == RB_BLACK) \ + name##_RB_REMOVE_COLOR(head, parent, child); \ + return (old); \ +} \ + \ +/* Inserts a node into the RB tree */ \ +struct type * \ +name##_RB_INSERT(struct name *head, struct type *elm) \ +{ \ + struct type *tmp; \ + struct type *parent = NULL; \ + int comp = 0; \ + tmp = RB_ROOT(head); \ + while (tmp) { \ + parent = tmp; \ + comp = (cmp)(elm, parent); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + RB_SET(name, elm, parent, field); \ + if (parent != NULL) { \ + if (comp < 0) \ + RB_LEFT(parent, field) = elm; \ + else \ + RB_RIGHT(parent, field) = elm; \ + RB_AUGMENT(parent); \ + } else \ + RB_ROOT(head) = elm; \ + name##_RB_INSERT_COLOR(head, elm); \ + return (NULL); \ +} \ + \ +/* Finds the node with the same key as elm */ \ +struct type * \ +name##_RB_FIND(struct name *head, struct type *elm) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + int comp; \ + while (tmp) { \ + comp = cmp(elm, tmp); \ + if (comp < 0) \ + tmp = RB_LEFT(tmp, field); \ + else if (comp > 0) \ + tmp = RB_RIGHT(tmp, field); \ + else \ + return (tmp); \ + } \ + return (NULL); \ +} \ + \ +/* ARGSUSED */ \ +struct type * \ +name##_RB_NEXT(struct type *elm) \ +{ \ + if (RB_RIGHT(elm, field)) { \ + elm = RB_RIGHT(elm, field); \ + while (RB_LEFT(elm, field)) \ + elm = RB_LEFT(elm, field); \ + } else { \ + if (name##_RB_GETPARENT(elm) && \ + (elm == RB_LEFT(name##_RB_GETPARENT(elm), field))) \ + elm = name##_RB_GETPARENT(elm); \ + else { \ + while (name##_RB_GETPARENT(elm) && \ + (elm == RB_RIGHT(name##_RB_GETPARENT(elm), field)))\ + elm = name##_RB_GETPARENT(elm); \ + elm = name##_RB_GETPARENT(elm); \ + } \ + } \ + return (elm); \ +} \ + \ +struct type * \ +name##_RB_MINMAX(struct name *head, int val) \ +{ \ + struct type *tmp = RB_ROOT(head); \ + struct type *parent = NULL; \ + while (tmp) { \ + parent = tmp; \ + if (val < 0) \ + tmp = RB_LEFT(tmp, field); \ + else \ + tmp = RB_RIGHT(tmp, field); \ + } \ + return (parent); \ } -#define RB_PROTOTYPE_PREV(name, type, field, cmp) \ - RB_PROTOTYPE(name, type, field, cmp) \ +#define RB_PROTOTYPE_PREV(name, type, field, cmp) \ + RB_PROTOTYPE(name, type, field, cmp) \ struct type *name##_RB_PREV(struct type *); -#define RB_PROTOTYPE_SC_PREV(_sc_, name, type, field, cmp) \ - RB_PROTOTYPE_SC(_sc_, name, type, field, cmp) \ +#define RB_PROTOTYPE_SC_PREV(_sc_, name, type, field, cmp) \ + RB_PROTOTYPE_SC(_sc_, name, type, field, cmp) \ _sc_ struct type *name##_RB_PREV(struct type *); -#define RB_GENERATE_PREV(name, type, field, cmp) \ - RB_GENERATE(name, type, field, cmp) \ -struct type * \ -name##_RB_PREV(struct type *elm) \ -{ \ - if (RB_LEFT(elm, field)) { \ - elm = RB_LEFT(elm, field); \ - while (RB_RIGHT(elm, field)) \ - elm = RB_RIGHT(elm, field); \ - } else { \ - if (name##_RB_GETPARENT(elm) && \ - (elm == RB_RIGHT(name##_RB_GETPARENT(elm), field))) \ - elm = name##_RB_GETPARENT(elm); \ - else { \ - while (name##_RB_GETPARENT(elm) && \ - (elm == RB_LEFT(name##_RB_GETPARENT(elm), field)))\ - elm = name##_RB_GETPARENT(elm); \ - elm = name##_RB_GETPARENT(elm); \ - } \ - } \ - return (elm); \ -} \ - -#define RB_NEGINF -1 -#define RB_INF 1 - -#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) -#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) -#define RB_FIND(name, x, y) name##_RB_FIND(x, y) -#define RB_NEXT(name, x, y) name##_RB_NEXT(y) -#define RB_PREV(name, x, y) name##_RB_PREV(y) -#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) -#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) - -#define RB_FOREACH(x, name, head) \ - for ((x) = RB_MIN(name, head); \ - (x) != NULL; \ +#define RB_GENERATE_PREV(name, type, field, cmp) \ + RB_GENERATE(name, type, field, cmp) \ +struct type * \ +name##_RB_PREV(struct type *elm) \ +{ \ + if (RB_LEFT(elm, field)) { \ + elm = RB_LEFT(elm, field); \ + while (RB_RIGHT(elm, field)) \ + elm = RB_RIGHT(elm, field); \ + } else { \ + if (name##_RB_GETPARENT(elm) && \ + (elm == RB_RIGHT(name##_RB_GETPARENT(elm), field))) \ + elm = name##_RB_GETPARENT(elm); \ + else { \ + while (name##_RB_GETPARENT(elm) && \ + (elm == RB_LEFT(name##_RB_GETPARENT(elm), field)))\ + elm = name##_RB_GETPARENT(elm); \ + elm = name##_RB_GETPARENT(elm); \ + } \ + } \ + return (elm); \ +} \ + +#define RB_NEGINF -1 +#define RB_INF 1 + +#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) +#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) +#define RB_FIND(name, x, y) name##_RB_FIND(x, y) +#define RB_NEXT(name, x, y) name##_RB_NEXT(y) +#define RB_PREV(name, x, y) name##_RB_PREV(y) +#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) +#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) + +#define RB_FOREACH(x, name, head) \ + for ((x) = RB_MIN(name, head); \ + (x) != NULL; \ (x) = name##_RB_NEXT(x)) #define RB_FOREACH_FROM(x, name, y) \ for ((x) = (y); \ ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ - (x) = (y)) + (x) = (y)) -#define RB_FOREACH_REVERSE_FROM(x, name, y) \ - for ((x) = (y); \ - ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ +#define RB_FOREACH_REVERSE_FROM(x, name, y) \ + for ((x) = (y); \ + ((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \ (x) = (y)) -#define RB_FOREACH_SAFE(x, name, head, y) \ - for ((x) = RB_MIN(name, head); \ - ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ +#define RB_FOREACH_SAFE(x, name, head, y) \ + for ((x) = RB_MIN(name, head); \ + ((x) != NULL) && ((y) = name##_RB_NEXT(x), (x) != NULL); \ (x) = (y)) -#endif /* _LIBKERN_TREE_H_ */ +#endif /* _LIBKERN_TREE_H_ */ diff --git a/libkern/libkern/zconf.h b/libkern/libkern/zconf.h index ce944764a..919a81322 100644 --- a/libkern/libkern/zconf.h +++ b/libkern/libkern/zconf.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* zconf.h -- configuration of the zlib compression library @@ -153,7 +153,7 @@ #endif /* Some Mac compilers merge all .h files incorrectly: */ -#if defined(__MWERKS__)||defined(applec)||defined(THINK_C)||defined(__SC__) +#if defined(__MWERKS__) || defined(applec) || defined(THINK_C) || defined(__SC__) # define NO_DUMMY_DECL #endif @@ -176,19 +176,19 @@ #endif /* The memory requirements for deflate are (in bytes): - (1 << (windowBits+2)) + (1 << (memLevel+9)) - that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) - plus a few kilobytes for small objects. For example, if you want to reduce - the default memory requirements from 256K to 128K, compile with - make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" - Of course this will generally degrade compression (there's no free lunch). - - The memory requirements for inflate are (in bytes) 1 << windowBits - that is, 32K for windowBits=15 (default value) plus a few kilobytes - for small objects. -*/ + * (1 << (windowBits+2)) + (1 << (memLevel+9)) + * that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) + * plus a few kilobytes for small objects. For example, if you want to reduce + * the default memory requirements from 256K to 128K, compile with + * make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" + * Of course this will generally degrade compression (there's no free lunch). + * + * The memory requirements for inflate are (in bytes) 1 << windowBits + * that is, 32K for windowBits=15 (default value) plus a few kilobytes + * for small objects. + */ - /* Type declarations */ +/* Type declarations */ #ifndef OF /* function prototypes */ # ifdef STDC @@ -206,7 +206,7 @@ */ #ifdef SYS16BIT # if defined(M_I86SM) || defined(M_I86MM) - /* MSC small or medium model */ +/* MSC small or medium model */ # define SMALL_MEDIUM # ifdef _MSC_VER # define FAR _far @@ -215,7 +215,7 @@ # endif # endif # if (defined(__SMALL__) || defined(__MEDIUM__)) - /* Turbo C small or medium model */ +/* Turbo C small or medium model */ # define SMALL_MEDIUM # ifdef __BORLANDC__ # define FAR _far @@ -226,9 +226,9 @@ #endif #if defined(WINDOWS) || defined(WIN32) - /* If building or using zlib as a DLL, define ZLIB_DLL. - * This is not mandatory, but it offers a little performance increase. - */ +/* If building or using zlib as a DLL, define ZLIB_DLL. + * This is not mandatory, but it offers a little performance increase. + */ # ifdef ZLIB_DLL # if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) # ifdef ZLIB_INTERNAL @@ -238,17 +238,17 @@ # endif # endif # endif /* ZLIB_DLL */ - /* If building or using zlib with the WINAPI/WINAPIV calling convention, - * define ZLIB_WINAPI. - * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. - */ +/* If building or using zlib with the WINAPI/WINAPIV calling convention, + * define ZLIB_WINAPI. + * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. + */ # ifdef ZLIB_WINAPI # ifdef FAR # undef FAR # endif # include - /* No need for _export, use ZLIB.DEF instead. */ - /* For complete Windows compatibility, use WINAPI, not __stdcall. */ +/* No need for _export, use ZLIB.DEF instead. */ +/* For complete Windows compatibility, use WINAPI, not __stdcall. */ # define ZEXPORT WINAPI # ifdef WIN32 # define ZEXPORTVA WINAPIV @@ -291,10 +291,10 @@ typedef unsigned int uInt; /* 16 bits or more */ typedef unsigned long uLong; /* 32 bits or more */ #ifdef SMALL_MEDIUM - /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ +/* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ # define Bytef Byte FAR #else - typedef Byte FAR Bytef; +typedef Byte FAR Bytef; #endif typedef char FAR charf; typedef int FAR intf; @@ -302,13 +302,13 @@ typedef uInt FAR uIntf; typedef uLong FAR uLongf; #ifdef STDC - typedef void const *voidpc; - typedef void FAR *voidpf; - typedef void *voidp; +typedef void const *voidpc; +typedef void FAR *voidpf; +typedef void *voidp; #else - typedef Byte const *voidpc; - typedef Byte FAR *voidpf; - typedef Byte *voidp; +typedef Byte const *voidpc; +typedef Byte FAR *voidpf; +typedef Byte *voidp; #endif #if 0 /* HAVE_UNISTD_H -- this line is updated by ./configure */ diff --git a/libkern/libkern/zlib.h b/libkern/libkern/zlib.h index c859769d1..1883b6f5a 100644 --- a/libkern/libkern/zlib.h +++ b/libkern/libkern/zlib.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,38 +22,38 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.3, July 18th, 2005 - - Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - - - The data format used by the zlib library is described by RFCs (Request for - Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt - (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). -*/ + * version 1.2.3, July 18th, 2005 + * + * Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + * + * Jean-loup Gailly Mark Adler + * jloup@gzip.org madler@alumni.caltech.edu + * + * + * The data format used by the zlib library is described by RFCs (Request for + * Comments) 1950 to 1952 in the files http://www.ietf.org/rfc/rfc1950.txt + * (zlib format), rfc1951.txt (deflate format) and rfc1952.txt (gzip format). + */ #ifndef ZLIB_H #define ZLIB_H @@ -68,38 +68,38 @@ extern "C" { #define ZLIB_VERNUM 0x1230 /* - The 'zlib' compression library provides in-memory compression and - decompression functions, including integrity checks of the uncompressed - data. This version of the library supports only one compression method - (deflation) but other algorithms will be added later and will have the same - stream interface. - - Compression can be done in a single step if the buffers are large - enough (for example if an input file is mmap'ed), or can be done by - repeated calls of the compression function. In the latter case, the - application must provide more input and/or consume the output - (providing more output space) before each call. - - The compressed data format used by default by the in-memory functions is - the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped - around a deflate stream, which is itself documented in RFC 1951. - - The library also supports reading and writing files in gzip (.gz) format - with an interface similar to that of stdio using the functions that start - with "gz". The gzip format is different from the zlib format. gzip is a - gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. - - This library can optionally read and write gzip streams in memory as well. - - The zlib format was designed to be compact and fast for use in memory - and on communications channels. The gzip format was designed for single- - file compression on file systems, has a larger header than zlib to maintain - directory information, and uses a different, slower check method than zlib. - - The library does not install any signal handler. The decoder checks - the consistency of the compressed data, so the library should never - crash even in case of corrupted input. -*/ + * The 'zlib' compression library provides in-memory compression and + * decompression functions, including integrity checks of the uncompressed + * data. This version of the library supports only one compression method + * (deflation) but other algorithms will be added later and will have the same + * stream interface. + * + * Compression can be done in a single step if the buffers are large + * enough (for example if an input file is mmap'ed), or can be done by + * repeated calls of the compression function. In the latter case, the + * application must provide more input and/or consume the output + * (providing more output space) before each call. + * + * The compressed data format used by default by the in-memory functions is + * the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped + * around a deflate stream, which is itself documented in RFC 1951. + * + * The library also supports reading and writing files in gzip (.gz) format + * with an interface similar to that of stdio using the functions that start + * with "gz". The gzip format is different from the zlib format. gzip is a + * gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. + * + * This library can optionally read and write gzip streams in memory as well. + * + * The zlib format was designed to be compact and fast for use in memory + * and on communications channels. The gzip format was designed for single- + * file compression on file systems, has a larger header than zlib to maintain + * directory information, and uses a different, slower check method than zlib. + * + * The library does not install any signal handler. The decoder checks + * the consistency of the compressed data, so the library should never + * crash even in case of corrupted input. + */ typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); typedef void (*free_func) OF((voidpf opaque, voidpf address)); @@ -107,84 +107,84 @@ typedef void (*free_func) OF((voidpf opaque, voidpf address)); struct internal_state; typedef struct z_stream_s { - Bytef *next_in; /* next input byte */ - uInt avail_in; /* number of bytes available at next_in */ - uLong total_in; /* total nb of input bytes read so far */ + Bytef *next_in;/* next input byte */ + uInt avail_in;/* number of bytes available at next_in */ + uLong total_in;/* total nb of input bytes read so far */ - Bytef *next_out; /* next output byte should be put there */ - uInt avail_out; /* remaining free space at next_out */ - uLong total_out; /* total nb of bytes output so far */ + Bytef *next_out;/* next output byte should be put there */ + uInt avail_out;/* remaining free space at next_out */ + uLong total_out;/* total nb of bytes output so far */ - char *msg; /* last error message, NULL if no error */ - struct internal_state FAR *state; /* not visible by applications */ + char *msg; /* last error message, NULL if no error */ + struct internal_state FAR *state; /* not visible by applications */ - alloc_func zalloc; /* used to allocate the internal state */ - free_func zfree; /* used to free the internal state */ - voidpf opaque; /* private data object passed to zalloc and zfree */ + alloc_func zalloc; /* used to allocate the internal state */ + free_func zfree;/* used to free the internal state */ + voidpf opaque;/* private data object passed to zalloc and zfree */ - int data_type; /* best guess about the data type: binary or text */ - uLong adler; /* adler32 value of the uncompressed data */ - uLong reserved; /* reserved for future use */ + int data_type;/* best guess about the data type: binary or text */ + uLong adler; /* adler32 value of the uncompressed data */ + uLong reserved;/* reserved for future use */ } z_stream; typedef z_stream FAR *z_streamp; /* - gzip header information passed to and from zlib routines. See RFC 1952 - for more details on the meanings of these fields. -*/ + * gzip header information passed to and from zlib routines. See RFC 1952 + * for more details on the meanings of these fields. + */ typedef struct gz_header_s { - int text; /* true if compressed data believed to be text */ - uLong time; /* modification time */ - int xflags; /* extra flags (not used when writing a gzip file) */ - int os; /* operating system */ - Bytef *extra; /* pointer to extra field or Z_NULL if none */ - uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ - uInt extra_max; /* space at extra (only when reading header) */ - Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ - uInt name_max; /* space at name (only when reading header) */ - Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ - uInt comm_max; /* space at comment (only when reading header) */ - int hcrc; /* true if there was or will be a header crc */ - int done; /* true when done reading gzip header (not used - when writing a gzip file) */ + int text; /* true if compressed data believed to be text */ + uLong time; /* modification time */ + int xflags; /* extra flags (not used when writing a gzip file) */ + int os; /* operating system */ + Bytef *extra; /* pointer to extra field or Z_NULL if none */ + uInt extra_len;/* extra field length (valid if extra != Z_NULL) */ + uInt extra_max;/* space at extra (only when reading header) */ + Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ + uInt name_max;/* space at name (only when reading header) */ + Bytef *comment;/* pointer to zero-terminated comment or Z_NULL */ + uInt comm_max;/* space at comment (only when reading header) */ + int hcrc; /* true if there was or will be a header crc */ + int done; /* true when done reading gzip header (not used + * when writing a gzip file) */ } gz_header; typedef gz_header FAR *gz_headerp; /* - The application must update next_in and avail_in when avail_in has - dropped to zero. It must update next_out and avail_out when avail_out - has dropped to zero. The application must initialize zalloc, zfree and - opaque before calling the init function. All other fields are set by the - compression library and must not be updated by the application. - - The opaque value provided by the application will be passed as the first - parameter for calls of zalloc and zfree. This can be useful for custom - memory management. The compression library attaches no meaning to the - opaque value. - - zalloc must return Z_NULL if there is not enough memory for the object. - If zlib is used in a multi-threaded application, zalloc and zfree must be - thread safe. - - On 16-bit systems, the functions zalloc and zfree must be able to allocate - exactly 65536 bytes, but will not be required to allocate more than this - if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, - pointers returned by zalloc for objects of exactly 65536 bytes *must* - have their offset normalized to zero. The default allocation function - provided by this library ensures this (see zutil.c). To reduce memory - requirements and avoid any allocation of 64K objects, at the expense of - compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). - - The fields total_in and total_out can be used for statistics or - progress reports. After compression, total_in holds the total size of - the uncompressed data and may be saved for use in the decompressor - (particularly if the decompressor wants to decompress everything in - a single step). -*/ - - /* constants */ + * The application must update next_in and avail_in when avail_in has + * dropped to zero. It must update next_out and avail_out when avail_out + * has dropped to zero. The application must initialize zalloc, zfree and + * opaque before calling the init function. All other fields are set by the + * compression library and must not be updated by the application. + * + * The opaque value provided by the application will be passed as the first + * parameter for calls of zalloc and zfree. This can be useful for custom + * memory management. The compression library attaches no meaning to the + * opaque value. + * + * zalloc must return Z_NULL if there is not enough memory for the object. + * If zlib is used in a multi-threaded application, zalloc and zfree must be + * thread safe. + * + * On 16-bit systems, the functions zalloc and zfree must be able to allocate + * exactly 65536 bytes, but will not be required to allocate more than this + * if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, + * pointers returned by zalloc for objects of exactly 65536 bytes *must* + * have their offset normalized to zero. The default allocation function + * provided by this library ensures this (see zutil.c). To reduce memory + * requirements and avoid any allocation of 64K objects, at the expense of + * compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). + * + * The fields total_in and total_out can be used for statistics or + * progress reports. After compression, total_in holds the total size of + * the uncompressed data and may be saved for use in the decompressor + * (particularly if the decompressor wants to decompress everything in + * a single step). + */ + +/* constants */ #define Z_NO_FLUSH 0 #define Z_PARTIAL_FLUSH 1 /* will be removed, use Z_SYNC_FLUSH instead */ @@ -236,390 +236,390 @@ typedef gz_header FAR *gz_headerp; #define zlib_version zlibVersion() /* for compatibility with versions < 1.0.2 */ - /* basic functions */ +/* basic functions */ ZEXTERN const char * ZEXPORT zlibVersion OF((void)); /* The application can compare zlibVersion and ZLIB_VERSION for consistency. - If the first character differs, the library code actually used is - not compatible with the zlib.h header file used by the application. - This check is automatically made by deflateInit and inflateInit. + * If the first character differs, the library code actually used is + * not compatible with the zlib.h header file used by the application. + * This check is automatically made by deflateInit and inflateInit. */ /* -ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); - - Initializes the internal stream state for compression. The fields - zalloc, zfree and opaque must be initialized before by the caller. - If zalloc and zfree are set to Z_NULL, deflateInit updates them to - use default allocation functions. - - The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: - 1 gives best speed, 9 gives best compression, 0 gives no compression at - all (the input data is simply copied a block at a time). - Z_DEFAULT_COMPRESSION requests a default compromise between speed and - compression (currently equivalent to level 6). - - deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if level is not a valid compression level, - Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible - with the version assumed by the caller (ZLIB_VERSION). - msg is set to null if there is no error message. deflateInit does not - perform any compression: this will be done by deflate(). -*/ + * ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); + * + * Initializes the internal stream state for compression. The fields + * zalloc, zfree and opaque must be initialized before by the caller. + * If zalloc and zfree are set to Z_NULL, deflateInit updates them to + * use default allocation functions. + * + * The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: + * 1 gives best speed, 9 gives best compression, 0 gives no compression at + * all (the input data is simply copied a block at a time). + * Z_DEFAULT_COMPRESSION requests a default compromise between speed and + * compression (currently equivalent to level 6). + * + * deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not + * enough memory, Z_STREAM_ERROR if level is not a valid compression level, + * Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible + * with the version assumed by the caller (ZLIB_VERSION). + * msg is set to null if there is no error message. deflateInit does not + * perform any compression: this will be done by deflate(). + */ ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); /* - deflate compresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce some - output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. deflate performs one or both of the - following actions: - - - Compress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in and avail_in are updated and - processing will resume at this point for the next call of deflate(). - - - Provide more output starting at next_out and update next_out and avail_out - accordingly. This action is forced if the parameter flush is non zero. - Forcing flush frequently degrades the compression ratio, so this parameter - should be set only when necessary (in interactive applications). - Some output may be provided even if flush is not set. - - Before the call of deflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming - more output, and updating avail_in or avail_out accordingly; avail_out - should never be zero before the call. The application can consume the - compressed output when it wants, for example when the output buffer is full - (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK - and with zero avail_out, it must be called again after making room in the - output buffer because there might be more output pending. - - Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to - decide how much data to accumualte before producing output, in order to - maximize compression. - - If the parameter flush is set to Z_SYNC_FLUSH, all pending output is - flushed to the output buffer and the output is aligned on a byte boundary, so - that the decompressor can get all input data available so far. (In particular - avail_in is zero after the call if enough output space has been provided - before the call.) Flushing may degrade compression for some compression - algorithms and so it should be used only when necessary. - - If flush is set to Z_FULL_FLUSH, all output is flushed as with - Z_SYNC_FLUSH, and the compression state is reset so that decompression can - restart from this point if previous compressed data has been damaged or if - random access is desired. Using Z_FULL_FLUSH too often can seriously degrade - compression. - - If deflate returns with avail_out == 0, this function must be called again - with the same value of the flush parameter and more output space (updated - avail_out), until the flush is complete (deflate returns with non-zero - avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that - avail_out is greater than six to avoid repeated flush markers due to - avail_out == 0 on return. - - If the parameter flush is set to Z_FINISH, pending input is processed, - pending output is flushed and deflate returns with Z_STREAM_END if there - was enough output space; if deflate returns with Z_OK, this function must be - called again with Z_FINISH and more output space (updated avail_out) but no - more input data, until it returns with Z_STREAM_END or an error. After - deflate has returned Z_STREAM_END, the only possible operations on the - stream are deflateReset or deflateEnd. - - Z_FINISH can be used immediately after deflateInit if all the compression - is to be done in a single step. In this case, avail_out must be at least - the value returned by deflateBound (see below). If deflate does not return - Z_STREAM_END, then it must be called again as described above. - - deflate() sets strm->adler to the adler32 checksum of all input read - so far (that is, total_in bytes). - - deflate() may update strm->data_type if it can make a good guess about - the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered - binary. This field is only for information purposes and does not affect - the compression algorithm in any manner. - - deflate() returns Z_OK if some progress has been made (more input - processed or more output produced), Z_STREAM_END if all input has been - consumed and all output has been produced (only when flush is set to - Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example - if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible - (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not - fatal, and deflate() can be called again with more input and more output - space to continue compressing. -*/ + * deflate compresses as much data as possible, and stops when the input + * buffer becomes empty or the output buffer becomes full. It may introduce some + * output latency (reading input without producing any output) except when + * forced to flush. + * + * The detailed semantics are as follows. deflate performs one or both of the + * following actions: + * + * - Compress more input starting at next_in and update next_in and avail_in + * accordingly. If not all input can be processed (because there is not + * enough room in the output buffer), next_in and avail_in are updated and + * processing will resume at this point for the next call of deflate(). + * + * - Provide more output starting at next_out and update next_out and avail_out + * accordingly. This action is forced if the parameter flush is non zero. + * Forcing flush frequently degrades the compression ratio, so this parameter + * should be set only when necessary (in interactive applications). + * Some output may be provided even if flush is not set. + * + * Before the call of deflate(), the application should ensure that at least + * one of the actions is possible, by providing more input and/or consuming + * more output, and updating avail_in or avail_out accordingly; avail_out + * should never be zero before the call. The application can consume the + * compressed output when it wants, for example when the output buffer is full + * (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK + * and with zero avail_out, it must be called again after making room in the + * output buffer because there might be more output pending. + * + * Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to + * decide how much data to accumualte before producing output, in order to + * maximize compression. + * + * If the parameter flush is set to Z_SYNC_FLUSH, all pending output is + * flushed to the output buffer and the output is aligned on a byte boundary, so + * that the decompressor can get all input data available so far. (In particular + * avail_in is zero after the call if enough output space has been provided + * before the call.) Flushing may degrade compression for some compression + * algorithms and so it should be used only when necessary. + * + * If flush is set to Z_FULL_FLUSH, all output is flushed as with + * Z_SYNC_FLUSH, and the compression state is reset so that decompression can + * restart from this point if previous compressed data has been damaged or if + * random access is desired. Using Z_FULL_FLUSH too often can seriously degrade + * compression. + * + * If deflate returns with avail_out == 0, this function must be called again + * with the same value of the flush parameter and more output space (updated + * avail_out), until the flush is complete (deflate returns with non-zero + * avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that + * avail_out is greater than six to avoid repeated flush markers due to + * avail_out == 0 on return. + * + * If the parameter flush is set to Z_FINISH, pending input is processed, + * pending output is flushed and deflate returns with Z_STREAM_END if there + * was enough output space; if deflate returns with Z_OK, this function must be + * called again with Z_FINISH and more output space (updated avail_out) but no + * more input data, until it returns with Z_STREAM_END or an error. After + * deflate has returned Z_STREAM_END, the only possible operations on the + * stream are deflateReset or deflateEnd. + * + * Z_FINISH can be used immediately after deflateInit if all the compression + * is to be done in a single step. In this case, avail_out must be at least + * the value returned by deflateBound (see below). If deflate does not return + * Z_STREAM_END, then it must be called again as described above. + * + * deflate() sets strm->adler to the adler32 checksum of all input read + * so far (that is, total_in bytes). + * + * deflate() may update strm->data_type if it can make a good guess about + * the input data type (Z_BINARY or Z_TEXT). In doubt, the data is considered + * binary. This field is only for information purposes and does not affect + * the compression algorithm in any manner. + * + * deflate() returns Z_OK if some progress has been made (more input + * processed or more output produced), Z_STREAM_END if all input has been + * consumed and all output has been produced (only when flush is set to + * Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example + * if next_in or next_out was NULL), Z_BUF_ERROR if no progress is possible + * (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not + * fatal, and deflate() can be called again with more input and more output + * space to continue compressing. + */ ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); /* - All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any - pending output. - - deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the - stream state was inconsistent, Z_DATA_ERROR if the stream was freed - prematurely (some input or output was discarded). In the error case, - msg may be set but then points to a static string (which must not be - deallocated). -*/ + * All dynamically allocated data structures for this stream are freed. + * This function discards any unprocessed input and does not flush any + * pending output. + * + * deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the + * stream state was inconsistent, Z_DATA_ERROR if the stream was freed + * prematurely (some input or output was discarded). In the error case, + * msg may be set but then points to a static string (which must not be + * deallocated). + */ /* -ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); - - Initializes the internal stream state for decompression. The fields - next_in, avail_in, zalloc, zfree and opaque must be initialized before by - the caller. If next_in is not Z_NULL and avail_in is large enough (the exact - value depends on the compression method), inflateInit determines the - compression method from the zlib header and allocates all data structures - accordingly; otherwise the allocation will be deferred to the first call of - inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to - use default allocation functions. - - inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_VERSION_ERROR if the zlib library version is incompatible with the - version assumed by the caller. msg is set to null if there is no error - message. inflateInit does not perform any decompression apart from reading - the zlib header if present: this will be done by inflate(). (So next_in and - avail_in may be modified, but next_out and avail_out are unchanged.) -*/ + * ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); + * + * Initializes the internal stream state for decompression. The fields + * next_in, avail_in, zalloc, zfree and opaque must be initialized before by + * the caller. If next_in is not Z_NULL and avail_in is large enough (the exact + * value depends on the compression method), inflateInit determines the + * compression method from the zlib header and allocates all data structures + * accordingly; otherwise the allocation will be deferred to the first call of + * inflate. If zalloc and zfree are set to Z_NULL, inflateInit updates them to + * use default allocation functions. + * + * inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough + * memory, Z_VERSION_ERROR if the zlib library version is incompatible with the + * version assumed by the caller. msg is set to null if there is no error + * message. inflateInit does not perform any decompression apart from reading + * the zlib header if present: this will be done by inflate(). (So next_in and + * avail_in may be modified, but next_out and avail_out are unchanged.) + */ ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); /* - inflate decompresses as much data as possible, and stops when the input - buffer becomes empty or the output buffer becomes full. It may introduce - some output latency (reading input without producing any output) except when - forced to flush. - - The detailed semantics are as follows. inflate performs one or both of the - following actions: - - - Decompress more input starting at next_in and update next_in and avail_in - accordingly. If not all input can be processed (because there is not - enough room in the output buffer), next_in is updated and processing - will resume at this point for the next call of inflate(). - - - Provide more output starting at next_out and update next_out and avail_out - accordingly. inflate() provides as much output as possible, until there - is no more input data or no more space in the output buffer (see below - about the flush parameter). - - Before the call of inflate(), the application should ensure that at least - one of the actions is possible, by providing more input and/or consuming - more output, and updating the next_* and avail_* values accordingly. - The application can consume the uncompressed output when it wants, for - example when the output buffer is full (avail_out == 0), or after each - call of inflate(). If inflate returns Z_OK and with zero avail_out, it - must be called again after making room in the output buffer because there - might be more output pending. - - The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, - Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much - output as possible to the output buffer. Z_BLOCK requests that inflate() stop - if and when it gets to the next deflate block boundary. When decoding the - zlib or gzip format, this will cause inflate() to return immediately after - the header and before the first block. When doing a raw inflate, inflate() - will go ahead and process the first block, and will return when it gets to - the end of that block, or when it runs out of data. - - The Z_BLOCK option assists in appending to or combining deflate streams. - Also to assist in this, on return inflate() will set strm->data_type to the - number of unused bits in the last byte taken from strm->next_in, plus 64 - if inflate() is currently decoding the last block in the deflate stream, - plus 128 if inflate() returned immediately after decoding an end-of-block - code or decoding the complete header up to just before the first byte of the - deflate stream. The end-of-block will not be indicated until all of the - uncompressed data from that block has been written to strm->next_out. The - number of unused bits may in general be greater than seven, except when - bit 7 of data_type is set, in which case the number of unused bits will be - less than eight. - - inflate() should normally be called until it returns Z_STREAM_END or an - error. However if all decompression is to be performed in a single step - (a single call of inflate), the parameter flush should be set to - Z_FINISH. In this case all pending input is processed and all pending - output is flushed; avail_out must be large enough to hold all the - uncompressed data. (The size of the uncompressed data may have been saved - by the compressor for this purpose.) The next operation on this stream must - be inflateEnd to deallocate the decompression state. The use of Z_FINISH - is never required, but can be used to inform inflate that a faster approach - may be used for the single inflate() call. - - In this implementation, inflate() always flushes as much output as - possible to the output buffer, and always uses the faster approach on the - first call. So the only effect of the flush parameter in this implementation - is on the return value of inflate(), as noted below, or when it returns early - because Z_BLOCK is used. - - If a preset dictionary is needed after this call (see inflateSetDictionary - below), inflate sets strm->adler to the adler32 checksum of the dictionary - chosen by the compressor and returns Z_NEED_DICT; otherwise it sets - strm->adler to the adler32 checksum of all output produced so far (that is, - total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described - below. At the end of the stream, inflate() checks that its computed adler32 - checksum is equal to that saved by the compressor and returns Z_STREAM_END - only if the checksum is correct. - - inflate() will decompress and check either zlib-wrapped or gzip-wrapped - deflate data. The header type is detected automatically. Any information - contained in the gzip header is not retained, so applications that need that - information should instead use raw inflate, see inflateInit2() below, or - inflateBack() and perform their own processing of the gzip header and - trailer. - - inflate() returns Z_OK if some progress has been made (more input processed - or more output produced), Z_STREAM_END if the end of the compressed data has - been reached and all uncompressed output has been produced, Z_NEED_DICT if a - preset dictionary is needed at this point, Z_DATA_ERROR if the input data was - corrupted (input stream not conforming to the zlib format or incorrect check - value), Z_STREAM_ERROR if the stream structure was inconsistent (for example - if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, - Z_BUF_ERROR if no progress is possible or if there was not enough room in the - output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and - inflate() can be called again with more input and more output space to - continue decompressing. If Z_DATA_ERROR is returned, the application may then - call inflateSync() to look for a good compression block if a partial recovery - of the data is desired. -*/ + * inflate decompresses as much data as possible, and stops when the input + * buffer becomes empty or the output buffer becomes full. It may introduce + * some output latency (reading input without producing any output) except when + * forced to flush. + * + * The detailed semantics are as follows. inflate performs one or both of the + * following actions: + * + * - Decompress more input starting at next_in and update next_in and avail_in + * accordingly. If not all input can be processed (because there is not + * enough room in the output buffer), next_in is updated and processing + * will resume at this point for the next call of inflate(). + * + * - Provide more output starting at next_out and update next_out and avail_out + * accordingly. inflate() provides as much output as possible, until there + * is no more input data or no more space in the output buffer (see below + * about the flush parameter). + * + * Before the call of inflate(), the application should ensure that at least + * one of the actions is possible, by providing more input and/or consuming + * more output, and updating the next_* and avail_* values accordingly. + * The application can consume the uncompressed output when it wants, for + * example when the output buffer is full (avail_out == 0), or after each + * call of inflate(). If inflate returns Z_OK and with zero avail_out, it + * must be called again after making room in the output buffer because there + * might be more output pending. + * + * The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, + * Z_FINISH, or Z_BLOCK. Z_SYNC_FLUSH requests that inflate() flush as much + * output as possible to the output buffer. Z_BLOCK requests that inflate() stop + * if and when it gets to the next deflate block boundary. When decoding the + * zlib or gzip format, this will cause inflate() to return immediately after + * the header and before the first block. When doing a raw inflate, inflate() + * will go ahead and process the first block, and will return when it gets to + * the end of that block, or when it runs out of data. + * + * The Z_BLOCK option assists in appending to or combining deflate streams. + * Also to assist in this, on return inflate() will set strm->data_type to the + * number of unused bits in the last byte taken from strm->next_in, plus 64 + * if inflate() is currently decoding the last block in the deflate stream, + * plus 128 if inflate() returned immediately after decoding an end-of-block + * code or decoding the complete header up to just before the first byte of the + * deflate stream. The end-of-block will not be indicated until all of the + * uncompressed data from that block has been written to strm->next_out. The + * number of unused bits may in general be greater than seven, except when + * bit 7 of data_type is set, in which case the number of unused bits will be + * less than eight. + * + * inflate() should normally be called until it returns Z_STREAM_END or an + * error. However if all decompression is to be performed in a single step + * (a single call of inflate), the parameter flush should be set to + * Z_FINISH. In this case all pending input is processed and all pending + * output is flushed; avail_out must be large enough to hold all the + * uncompressed data. (The size of the uncompressed data may have been saved + * by the compressor for this purpose.) The next operation on this stream must + * be inflateEnd to deallocate the decompression state. The use of Z_FINISH + * is never required, but can be used to inform inflate that a faster approach + * may be used for the single inflate() call. + * + * In this implementation, inflate() always flushes as much output as + * possible to the output buffer, and always uses the faster approach on the + * first call. So the only effect of the flush parameter in this implementation + * is on the return value of inflate(), as noted below, or when it returns early + * because Z_BLOCK is used. + * + * If a preset dictionary is needed after this call (see inflateSetDictionary + * below), inflate sets strm->adler to the adler32 checksum of the dictionary + * chosen by the compressor and returns Z_NEED_DICT; otherwise it sets + * strm->adler to the adler32 checksum of all output produced so far (that is, + * total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described + * below. At the end of the stream, inflate() checks that its computed adler32 + * checksum is equal to that saved by the compressor and returns Z_STREAM_END + * only if the checksum is correct. + * + * inflate() will decompress and check either zlib-wrapped or gzip-wrapped + * deflate data. The header type is detected automatically. Any information + * contained in the gzip header is not retained, so applications that need that + * information should instead use raw inflate, see inflateInit2() below, or + * inflateBack() and perform their own processing of the gzip header and + * trailer. + * + * inflate() returns Z_OK if some progress has been made (more input processed + * or more output produced), Z_STREAM_END if the end of the compressed data has + * been reached and all uncompressed output has been produced, Z_NEED_DICT if a + * preset dictionary is needed at this point, Z_DATA_ERROR if the input data was + * corrupted (input stream not conforming to the zlib format or incorrect check + * value), Z_STREAM_ERROR if the stream structure was inconsistent (for example + * if next_in or next_out was NULL), Z_MEM_ERROR if there was not enough memory, + * Z_BUF_ERROR if no progress is possible or if there was not enough room in the + * output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and + * inflate() can be called again with more input and more output space to + * continue decompressing. If Z_DATA_ERROR is returned, the application may then + * call inflateSync() to look for a good compression block if a partial recovery + * of the data is desired. + */ ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); /* - All dynamically allocated data structures for this stream are freed. - This function discards any unprocessed input and does not flush any - pending output. - - inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state - was inconsistent. In the error case, msg may be set but then points to a - static string (which must not be deallocated). -*/ + * All dynamically allocated data structures for this stream are freed. + * This function discards any unprocessed input and does not flush any + * pending output. + * + * inflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state + * was inconsistent. In the error case, msg may be set but then points to a + * static string (which must not be deallocated). + */ - /* Advanced functions */ +/* Advanced functions */ /* - The following functions are needed only in some special applications. -*/ + * The following functions are needed only in some special applications. + */ /* -ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, - int level, - int method, - int windowBits, - int memLevel, - int strategy)); - - This is another version of deflateInit with more compression options. The - fields next_in, zalloc, zfree and opaque must be initialized before by - the caller. - - The method parameter is the compression method. It must be Z_DEFLATED in - this version of the library. - - The windowBits parameter is the base two logarithm of the window size - (the size of the history buffer). It should be in the range 8..15 for this - version of the library. Larger values of this parameter result in better - compression at the expense of memory usage. The default value is 15 if - deflateInit is used instead. - - windowBits can also be -8..-15 for raw deflate. In this case, -windowBits - determines the window size. deflate() will then generate raw deflate data - with no zlib header or trailer, and will not compute an adler32 check value. - - windowBits can also be greater than 15 for optional gzip encoding. Add - 16 to windowBits to write a simple gzip header and trailer around the - compressed data instead of a zlib wrapper. The gzip header will have no - file name, no extra data, no comment, no modification time (set to zero), - no header crc, and the operating system will be set to 255 (unknown). If a - gzip stream is being written, strm->adler is a crc32 instead of an adler32. - - The memLevel parameter specifies how much memory should be allocated - for the internal compression state. memLevel=1 uses minimum memory but - is slow and reduces compression ratio; memLevel=9 uses maximum memory - for optimal speed. The default value is 8. See zconf.h for total memory - usage as a function of windowBits and memLevel. - - The strategy parameter is used to tune the compression algorithm. Use the - value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a - filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no - string match), or Z_RLE to limit match distances to one (run-length - encoding). Filtered data consists mostly of small values with a somewhat - random distribution. In this case, the compression algorithm is tuned to - compress them better. The effect of Z_FILTERED is to force more Huffman - coding and less string matching; it is somewhat intermediate between - Z_DEFAULT and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as - Z_HUFFMAN_ONLY, but give better compression for PNG image data. The strategy - parameter only affects the compression ratio but not the correctness of the - compressed output even if it is not set appropriately. Z_FIXED prevents the - use of dynamic Huffman codes, allowing for a simpler decoder for special - applications. - - deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid - method). msg is set to null if there is no error message. deflateInit2 does - not perform any compression: this will be done by deflate(). -*/ + * ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, + * int level, + * int method, + * int windowBits, + * int memLevel, + * int strategy)); + * + * This is another version of deflateInit with more compression options. The + * fields next_in, zalloc, zfree and opaque must be initialized before by + * the caller. + * + * The method parameter is the compression method. It must be Z_DEFLATED in + * this version of the library. + * + * The windowBits parameter is the base two logarithm of the window size + * (the size of the history buffer). It should be in the range 8..15 for this + * version of the library. Larger values of this parameter result in better + * compression at the expense of memory usage. The default value is 15 if + * deflateInit is used instead. + * + * windowBits can also be -8..-15 for raw deflate. In this case, -windowBits + * determines the window size. deflate() will then generate raw deflate data + * with no zlib header or trailer, and will not compute an adler32 check value. + * + * windowBits can also be greater than 15 for optional gzip encoding. Add + * 16 to windowBits to write a simple gzip header and trailer around the + * compressed data instead of a zlib wrapper. The gzip header will have no + * file name, no extra data, no comment, no modification time (set to zero), + * no header crc, and the operating system will be set to 255 (unknown). If a + * gzip stream is being written, strm->adler is a crc32 instead of an adler32. + * + * The memLevel parameter specifies how much memory should be allocated + * for the internal compression state. memLevel=1 uses minimum memory but + * is slow and reduces compression ratio; memLevel=9 uses maximum memory + * for optimal speed. The default value is 8. See zconf.h for total memory + * usage as a function of windowBits and memLevel. + * + * The strategy parameter is used to tune the compression algorithm. Use the + * value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a + * filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no + * string match), or Z_RLE to limit match distances to one (run-length + * encoding). Filtered data consists mostly of small values with a somewhat + * random distribution. In this case, the compression algorithm is tuned to + * compress them better. The effect of Z_FILTERED is to force more Huffman + * coding and less string matching; it is somewhat intermediate between + * Z_DEFAULT and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as + * Z_HUFFMAN_ONLY, but give better compression for PNG image data. The strategy + * parameter only affects the compression ratio but not the correctness of the + * compressed output even if it is not set appropriately. Z_FIXED prevents the + * use of dynamic Huffman codes, allowing for a simpler decoder for special + * applications. + * + * deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + * memory, Z_STREAM_ERROR if a parameter is invalid (such as an invalid + * method). msg is set to null if there is no error message. deflateInit2 does + * not perform any compression: this will be done by deflate(). + */ ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, - const Bytef *dictionary, - uInt dictLength)); + const Bytef *dictionary, + uInt dictLength)); /* - Initializes the compression dictionary from the given byte sequence - without producing any compressed output. This function must be called - immediately after deflateInit, deflateInit2 or deflateReset, before any - call of deflate. The compressor and decompressor must use exactly the same - dictionary (see inflateSetDictionary). - - The dictionary should consist of strings (byte sequences) that are likely - to be encountered later in the data to be compressed, with the most commonly - used strings preferably put towards the end of the dictionary. Using a - dictionary is most useful when the data to be compressed is short and can be - predicted with good accuracy; the data can then be compressed better than - with the default empty dictionary. - - Depending on the size of the compression data structures selected by - deflateInit or deflateInit2, a part of the dictionary may in effect be - discarded, for example if the dictionary is larger than the window size in - deflate or deflate2. Thus the strings most likely to be useful should be - put at the end of the dictionary, not at the front. In addition, the - current implementation of deflate will use at most the window size minus - 262 bytes of the provided dictionary. - - Upon return of this function, strm->adler is set to the adler32 value - of the dictionary; the decompressor may later use this value to determine - which dictionary has been used by the compressor. (The adler32 value - applies to the whole dictionary even if only a subset of the dictionary is - actually used by the compressor.) If a raw deflate was requested, then the - adler32 value is not computed and strm->adler is not set. - - deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a - parameter is invalid (such as NULL dictionary) or the stream state is - inconsistent (for example if deflate has already been called for this stream - or if the compression method is bsort). deflateSetDictionary does not - perform any compression: this will be done by deflate(). -*/ + * Initializes the compression dictionary from the given byte sequence + * without producing any compressed output. This function must be called + * immediately after deflateInit, deflateInit2 or deflateReset, before any + * call of deflate. The compressor and decompressor must use exactly the same + * dictionary (see inflateSetDictionary). + * + * The dictionary should consist of strings (byte sequences) that are likely + * to be encountered later in the data to be compressed, with the most commonly + * used strings preferably put towards the end of the dictionary. Using a + * dictionary is most useful when the data to be compressed is short and can be + * predicted with good accuracy; the data can then be compressed better than + * with the default empty dictionary. + * + * Depending on the size of the compression data structures selected by + * deflateInit or deflateInit2, a part of the dictionary may in effect be + * discarded, for example if the dictionary is larger than the window size in + * deflate or deflate2. Thus the strings most likely to be useful should be + * put at the end of the dictionary, not at the front. In addition, the + * current implementation of deflate will use at most the window size minus + * 262 bytes of the provided dictionary. + * + * Upon return of this function, strm->adler is set to the adler32 value + * of the dictionary; the decompressor may later use this value to determine + * which dictionary has been used by the compressor. (The adler32 value + * applies to the whole dictionary even if only a subset of the dictionary is + * actually used by the compressor.) If a raw deflate was requested, then the + * adler32 value is not computed and strm->adler is not set. + * + * deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a + * parameter is invalid (such as NULL dictionary) or the stream state is + * inconsistent (for example if deflate has already been called for this stream + * or if the compression method is bsort). deflateSetDictionary does not + * perform any compression: this will be done by deflate(). + */ ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, - z_streamp source)); + z_streamp source)); /* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when several compression strategies will be - tried, for example when there are several ways of pre-processing the input - data with a filter. The streams that will be discarded should then be freed - by calling deflateEnd. Note that deflateCopy duplicates the internal - compression state which can be quite large, so this strategy is slow and - can consume lots of memory. - - deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being NULL). msg is left unchanged in both source and - destination. -*/ + * Sets the destination stream as a complete copy of the source stream. + * + * This function can be useful when several compression strategies will be + * tried, for example when there are several ways of pre-processing the input + * data with a filter. The streams that will be discarded should then be freed + * by calling deflateEnd. Note that deflateCopy duplicates the internal + * compression state which can be quite large, so this strategy is slow and + * can consume lots of memory. + * + * deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + * enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + * (such as zalloc being NULL). msg is left unchanged in both source and + * destination. + */ #if XNU_KERNEL_PRIVATE @@ -632,473 +632,473 @@ ZEXTERN int ZEXPORT deflateResetWithIO(z_streamp strm, z_input_func zinput, z_ou ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); /* - This function is equivalent to deflateEnd followed by deflateInit, - but does not free and reallocate all the internal compression state. - The stream will keep the same compression level and any other attributes - that may have been set by deflateInit2. - - deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being NULL). -*/ + * This function is equivalent to deflateEnd followed by deflateInit, + * but does not free and reallocate all the internal compression state. + * The stream will keep the same compression level and any other attributes + * that may have been set by deflateInit2. + * + * deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + * stream state was inconsistent (such as zalloc or state being NULL). + */ ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, - int level, - int strategy)); + int level, + int strategy)); /* - Dynamically update the compression level and compression strategy. The - interpretation of level and strategy is as in deflateInit2. This can be - used to switch between compression and straight copy of the input data, or - to switch to a different kind of input data requiring a different - strategy. If the compression level is changed, the input available so far - is compressed with the old level (and may be flushed); the new level will - take effect only at the next call of deflate(). - - Before the call of deflateParams, the stream state must be set as for - a call of deflate(), since the currently available input may have to - be compressed and flushed. In particular, strm->avail_out must be non-zero. - - deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source - stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR - if strm->avail_out was zero. -*/ + * Dynamically update the compression level and compression strategy. The + * interpretation of level and strategy is as in deflateInit2. This can be + * used to switch between compression and straight copy of the input data, or + * to switch to a different kind of input data requiring a different + * strategy. If the compression level is changed, the input available so far + * is compressed with the old level (and may be flushed); the new level will + * take effect only at the next call of deflate(). + * + * Before the call of deflateParams, the stream state must be set as for + * a call of deflate(), since the currently available input may have to + * be compressed and flushed. In particular, strm->avail_out must be non-zero. + * + * deflateParams returns Z_OK if success, Z_STREAM_ERROR if the source + * stream state was inconsistent or if a parameter was invalid, Z_BUF_ERROR + * if strm->avail_out was zero. + */ ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, - int good_length, - int max_lazy, - int nice_length, - int max_chain)); + int good_length, + int max_lazy, + int nice_length, + int max_chain)); /* - Fine tune deflate's internal compression parameters. This should only be - used by someone who understands the algorithm used by zlib's deflate for - searching for the best matching string, and even then only by the most - fanatic optimizer trying to squeeze out the last compressed bit for their - specific input data. Read the deflate.c source code for the meaning of the - max_lazy, good_length, nice_length, and max_chain parameters. - - deflateTune() can be called after deflateInit() or deflateInit2(), and - returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. + * Fine tune deflate's internal compression parameters. This should only be + * used by someone who understands the algorithm used by zlib's deflate for + * searching for the best matching string, and even then only by the most + * fanatic optimizer trying to squeeze out the last compressed bit for their + * specific input data. Read the deflate.c source code for the meaning of the + * max_lazy, good_length, nice_length, and max_chain parameters. + * + * deflateTune() can be called after deflateInit() or deflateInit2(), and + * returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. */ ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, - uLong sourceLen)); + uLong sourceLen)); /* - deflateBound() returns an upper bound on the compressed size after - deflation of sourceLen bytes. It must be called after deflateInit() - or deflateInit2(). This would be used to allocate an output buffer - for deflation in a single pass, and so would be called before deflate(). -*/ + * deflateBound() returns an upper bound on the compressed size after + * deflation of sourceLen bytes. It must be called after deflateInit() + * or deflateInit2(). This would be used to allocate an output buffer + * for deflation in a single pass, and so would be called before deflate(). + */ ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, - int bits, - int value)); + int bits, + int value)); /* - deflatePrime() inserts bits in the deflate output stream. The intent - is that this function is used to start off the deflate output with the - bits leftover from a previous deflate stream when appending to it. As such, - this function can only be used for raw deflate, and must be used before the - first deflate() call after a deflateInit2() or deflateReset(). bits must be - less than or equal to 16, and that many of the least significant bits of - value will be inserted in the output. - - deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ + * deflatePrime() inserts bits in the deflate output stream. The intent + * is that this function is used to start off the deflate output with the + * bits leftover from a previous deflate stream when appending to it. As such, + * this function can only be used for raw deflate, and must be used before the + * first deflate() call after a deflateInit2() or deflateReset(). bits must be + * less than or equal to 16, and that many of the least significant bits of + * value will be inserted in the output. + * + * deflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + * stream state was inconsistent. + */ ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, - gz_headerp head)); + gz_headerp head)); /* - deflateSetHeader() provides gzip header information for when a gzip - stream is requested by deflateInit2(). deflateSetHeader() may be called - after deflateInit2() or deflateReset() and before the first call of - deflate(). The text, time, os, extra field, name, and comment information - in the provided gz_header structure are written to the gzip header (xflag is - ignored -- the extra flags are set according to the compression level). The - caller must assure that, if not Z_NULL, name and comment are terminated with - a zero byte, and that if extra is not Z_NULL, that extra_len bytes are - available there. If hcrc is true, a gzip header crc is included. Note that - the current versions of the command-line version of gzip (up through version - 1.3.x) do not support header crc's, and will report that it is a "multi-part - gzip file" and give up. - - If deflateSetHeader is not used, the default gzip header has text false, - the time set to zero, and os set to 255, with no extra, name, or comment - fields. The gzip header is returned to the default state by deflateReset(). - - deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ + * deflateSetHeader() provides gzip header information for when a gzip + * stream is requested by deflateInit2(). deflateSetHeader() may be called + * after deflateInit2() or deflateReset() and before the first call of + * deflate(). The text, time, os, extra field, name, and comment information + * in the provided gz_header structure are written to the gzip header (xflag is + * ignored -- the extra flags are set according to the compression level). The + * caller must assure that, if not Z_NULL, name and comment are terminated with + * a zero byte, and that if extra is not Z_NULL, that extra_len bytes are + * available there. If hcrc is true, a gzip header crc is included. Note that + * the current versions of the command-line version of gzip (up through version + * 1.3.x) do not support header crc's, and will report that it is a "multi-part + * gzip file" and give up. + * + * If deflateSetHeader is not used, the default gzip header has text false, + * the time set to zero, and os set to 255, with no extra, name, or comment + * fields. The gzip header is returned to the default state by deflateReset(). + * + * deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + * stream state was inconsistent. + */ /* -ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, - int windowBits)); - - This is another version of inflateInit with an extra parameter. The - fields next_in, avail_in, zalloc, zfree and opaque must be initialized - before by the caller. - - The windowBits parameter is the base two logarithm of the maximum window - size (the size of the history buffer). It should be in the range 8..15 for - this version of the library. The default value is 15 if inflateInit is used - instead. windowBits must be greater than or equal to the windowBits value - provided to deflateInit2() while compressing, or it must be equal to 15 if - deflateInit2() was not used. If a compressed stream with a larger window - size is given as input, inflate() will return with the error code - Z_DATA_ERROR instead of trying to allocate a larger window. - - windowBits can also be -8..-15 for raw inflate. In this case, -windowBits - determines the window size. inflate() will then process raw deflate data, - not looking for a zlib or gzip header, not generating a check value, and not - looking for any check values for comparison at the end of the stream. This - is for use with other formats that use the deflate compressed data format - such as zip. Those formats provide their own check values. If a custom - format is developed using the raw deflate format for compressed data, it is - recommended that a check value such as an adler32 or a crc32 be applied to - the uncompressed data as is done in the zlib, gzip, and zip formats. For - most applications, the zlib format should be used as is. Note that comments - above on the use in deflateInit2() applies to the magnitude of windowBits. - - windowBits can also be greater than 15 for optional gzip decoding. Add - 32 to windowBits to enable zlib and gzip decoding with automatic header - detection, or add 16 to decode only the gzip format (the zlib format will - return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is - a crc32 instead of an adler32. - - inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg - is set to null if there is no error message. inflateInit2 does not perform - any decompression apart from reading the zlib header if present: this will - be done by inflate(). (So next_in and avail_in may be modified, but next_out - and avail_out are unchanged.) -*/ + * ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, + * int windowBits)); + * + * This is another version of inflateInit with an extra parameter. The + * fields next_in, avail_in, zalloc, zfree and opaque must be initialized + * before by the caller. + * + * The windowBits parameter is the base two logarithm of the maximum window + * size (the size of the history buffer). It should be in the range 8..15 for + * this version of the library. The default value is 15 if inflateInit is used + * instead. windowBits must be greater than or equal to the windowBits value + * provided to deflateInit2() while compressing, or it must be equal to 15 if + * deflateInit2() was not used. If a compressed stream with a larger window + * size is given as input, inflate() will return with the error code + * Z_DATA_ERROR instead of trying to allocate a larger window. + * + * windowBits can also be -8..-15 for raw inflate. In this case, -windowBits + * determines the window size. inflate() will then process raw deflate data, + * not looking for a zlib or gzip header, not generating a check value, and not + * looking for any check values for comparison at the end of the stream. This + * is for use with other formats that use the deflate compressed data format + * such as zip. Those formats provide their own check values. If a custom + * format is developed using the raw deflate format for compressed data, it is + * recommended that a check value such as an adler32 or a crc32 be applied to + * the uncompressed data as is done in the zlib, gzip, and zip formats. For + * most applications, the zlib format should be used as is. Note that comments + * above on the use in deflateInit2() applies to the magnitude of windowBits. + * + * windowBits can also be greater than 15 for optional gzip decoding. Add + * 32 to windowBits to enable zlib and gzip decoding with automatic header + * detection, or add 16 to decode only the gzip format (the zlib format will + * return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is + * a crc32 instead of an adler32. + * + * inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + * memory, Z_STREAM_ERROR if a parameter is invalid (such as a null strm). msg + * is set to null if there is no error message. inflateInit2 does not perform + * any decompression apart from reading the zlib header if present: this will + * be done by inflate(). (So next_in and avail_in may be modified, but next_out + * and avail_out are unchanged.) + */ ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, - const Bytef *dictionary, - uInt dictLength)); + const Bytef *dictionary, + uInt dictLength)); /* - Initializes the decompression dictionary from the given uncompressed byte - sequence. This function must be called immediately after a call of inflate, - if that call returned Z_NEED_DICT. The dictionary chosen by the compressor - can be determined from the adler32 value returned by that call of inflate. - The compressor and decompressor must use exactly the same dictionary (see - deflateSetDictionary). For raw inflate, this function can be called - immediately after inflateInit2() or inflateReset() and before any call of - inflate() to set the dictionary. The application must insure that the - dictionary that was used for compression is provided. - - inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a - parameter is invalid (such as NULL dictionary) or the stream state is - inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the - expected one (incorrect adler32 value). inflateSetDictionary does not - perform any decompression: this will be done by subsequent calls of - inflate(). -*/ + * Initializes the decompression dictionary from the given uncompressed byte + * sequence. This function must be called immediately after a call of inflate, + * if that call returned Z_NEED_DICT. The dictionary chosen by the compressor + * can be determined from the adler32 value returned by that call of inflate. + * The compressor and decompressor must use exactly the same dictionary (see + * deflateSetDictionary). For raw inflate, this function can be called + * immediately after inflateInit2() or inflateReset() and before any call of + * inflate() to set the dictionary. The application must insure that the + * dictionary that was used for compression is provided. + * + * inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a + * parameter is invalid (such as NULL dictionary) or the stream state is + * inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the + * expected one (incorrect adler32 value). inflateSetDictionary does not + * perform any decompression: this will be done by subsequent calls of + * inflate(). + */ ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); /* - Skips invalid compressed data until a full flush point (see above the - description of deflate with Z_FULL_FLUSH) can be found, or until all - available input is skipped. No output is provided. - - inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR - if no more input was provided, Z_DATA_ERROR if no flush point has been found, - or Z_STREAM_ERROR if the stream structure was inconsistent. In the success - case, the application may save the current current value of total_in which - indicates where valid compressed data was found. In the error case, the - application may repeatedly call inflateSync, providing more input each time, - until success or end of the input data. -*/ + * Skips invalid compressed data until a full flush point (see above the + * description of deflate with Z_FULL_FLUSH) can be found, or until all + * available input is skipped. No output is provided. + * + * inflateSync returns Z_OK if a full flush point has been found, Z_BUF_ERROR + * if no more input was provided, Z_DATA_ERROR if no flush point has been found, + * or Z_STREAM_ERROR if the stream structure was inconsistent. In the success + * case, the application may save the current current value of total_in which + * indicates where valid compressed data was found. In the error case, the + * application may repeatedly call inflateSync, providing more input each time, + * until success or end of the input data. + */ ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, - z_streamp source)); + z_streamp source)); /* - Sets the destination stream as a complete copy of the source stream. - - This function can be useful when randomly accessing a large stream. The - first pass through the stream can periodically record the inflate state, - allowing restarting inflate at those points when randomly accessing the - stream. - - inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_STREAM_ERROR if the source stream state was inconsistent - (such as zalloc being NULL). msg is left unchanged in both source and - destination. -*/ + * Sets the destination stream as a complete copy of the source stream. + * + * This function can be useful when randomly accessing a large stream. The + * first pass through the stream can periodically record the inflate state, + * allowing restarting inflate at those points when randomly accessing the + * stream. + * + * inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not + * enough memory, Z_STREAM_ERROR if the source stream state was inconsistent + * (such as zalloc being NULL). msg is left unchanged in both source and + * destination. + */ ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); /* - This function is equivalent to inflateEnd followed by inflateInit, - but does not free and reallocate all the internal decompression state. - The stream will keep attributes that may have been set by inflateInit2. - - inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent (such as zalloc or state being NULL). -*/ + * This function is equivalent to inflateEnd followed by inflateInit, + * but does not free and reallocate all the internal decompression state. + * The stream will keep attributes that may have been set by inflateInit2. + * + * inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source + * stream state was inconsistent (such as zalloc or state being NULL). + */ ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, - int bits, - int value)); + int bits, + int value)); /* - This function inserts bits in the inflate input stream. The intent is - that this function is used to start inflating at a bit position in the - middle of a byte. The provided bits will be used before any bytes are used - from next_in. This function should only be used with raw inflate, and - should be used before the first inflate() call after inflateInit2() or - inflateReset(). bits must be less than or equal to 16, and that many of the - least significant bits of value will be inserted in the input. - - inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ + * This function inserts bits in the inflate input stream. The intent is + * that this function is used to start inflating at a bit position in the + * middle of a byte. The provided bits will be used before any bytes are used + * from next_in. This function should only be used with raw inflate, and + * should be used before the first inflate() call after inflateInit2() or + * inflateReset(). bits must be less than or equal to 16, and that many of the + * least significant bits of value will be inserted in the input. + * + * inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source + * stream state was inconsistent. + */ ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, - gz_headerp head)); + gz_headerp head)); /* - inflateGetHeader() requests that gzip header information be stored in the - provided gz_header structure. inflateGetHeader() may be called after - inflateInit2() or inflateReset(), and before the first call of inflate(). - As inflate() processes the gzip stream, head->done is zero until the header - is completed, at which time head->done is set to one. If a zlib stream is - being decoded, then head->done is set to -1 to indicate that there will be - no gzip header information forthcoming. Note that Z_BLOCK can be used to - force inflate() to return immediately after header processing is complete - and before any actual data is decompressed. - - The text, time, xflags, and os fields are filled in with the gzip header - contents. hcrc is set to true if there is a header CRC. (The header CRC - was valid if done is set to one.) If extra is not Z_NULL, then extra_max - contains the maximum number of bytes to write to extra. Once done is true, - extra_len contains the actual extra field length, and extra contains the - extra field, or that field truncated if extra_max is less than extra_len. - If name is not Z_NULL, then up to name_max characters are written there, - terminated with a zero unless the length is greater than name_max. If - comment is not Z_NULL, then up to comm_max characters are written there, - terminated with a zero unless the length is greater than comm_max. When - any of extra, name, or comment are not Z_NULL and the respective field is - not present in the header, then that field is set to Z_NULL to signal its - absence. This allows the use of deflateSetHeader() with the returned - structure to duplicate the header. However if those fields are set to - allocated memory, then the application will need to save those pointers - elsewhere so that they can be eventually freed. - - If inflateGetHeader is not used, then the header information is simply - discarded. The header is always checked for validity, including the header - CRC if present. inflateReset() will reset the process to discard the header - information. The application would need to call inflateGetHeader() again to - retrieve the header from the next gzip stream. - - inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source - stream state was inconsistent. -*/ + * inflateGetHeader() requests that gzip header information be stored in the + * provided gz_header structure. inflateGetHeader() may be called after + * inflateInit2() or inflateReset(), and before the first call of inflate(). + * As inflate() processes the gzip stream, head->done is zero until the header + * is completed, at which time head->done is set to one. If a zlib stream is + * being decoded, then head->done is set to -1 to indicate that there will be + * no gzip header information forthcoming. Note that Z_BLOCK can be used to + * force inflate() to return immediately after header processing is complete + * and before any actual data is decompressed. + * + * The text, time, xflags, and os fields are filled in with the gzip header + * contents. hcrc is set to true if there is a header CRC. (The header CRC + * was valid if done is set to one.) If extra is not Z_NULL, then extra_max + * contains the maximum number of bytes to write to extra. Once done is true, + * extra_len contains the actual extra field length, and extra contains the + * extra field, or that field truncated if extra_max is less than extra_len. + * If name is not Z_NULL, then up to name_max characters are written there, + * terminated with a zero unless the length is greater than name_max. If + * comment is not Z_NULL, then up to comm_max characters are written there, + * terminated with a zero unless the length is greater than comm_max. When + * any of extra, name, or comment are not Z_NULL and the respective field is + * not present in the header, then that field is set to Z_NULL to signal its + * absence. This allows the use of deflateSetHeader() with the returned + * structure to duplicate the header. However if those fields are set to + * allocated memory, then the application will need to save those pointers + * elsewhere so that they can be eventually freed. + * + * If inflateGetHeader is not used, then the header information is simply + * discarded. The header is always checked for validity, including the header + * CRC if present. inflateReset() will reset the process to discard the header + * information. The application would need to call inflateGetHeader() again to + * retrieve the header from the next gzip stream. + * + * inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source + * stream state was inconsistent. + */ /* -ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, - unsigned char FAR *window)); - - Initialize the internal stream state for decompression using inflateBack() - calls. The fields zalloc, zfree and opaque in strm must be initialized - before the call. If zalloc and zfree are Z_NULL, then the default library- - derived memory allocation routines are used. windowBits is the base two - logarithm of the window size, in the range 8..15. window is a caller - supplied buffer of that size. Except for special applications where it is - assured that deflate was used with small window sizes, windowBits must be 15 - and a 32K byte window must be supplied to be able to decompress general - deflate streams. - - See inflateBack() for the usage of these routines. - - inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of - the paramaters are invalid, Z_MEM_ERROR if the internal state could not - be allocated, or Z_VERSION_ERROR if the version of the library does not - match the version of the header file. -*/ + * ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, + * unsigned char FAR *window)); + * + * Initialize the internal stream state for decompression using inflateBack() + * calls. The fields zalloc, zfree and opaque in strm must be initialized + * before the call. If zalloc and zfree are Z_NULL, then the default library- + * derived memory allocation routines are used. windowBits is the base two + * logarithm of the window size, in the range 8..15. window is a caller + * supplied buffer of that size. Except for special applications where it is + * assured that deflate was used with small window sizes, windowBits must be 15 + * and a 32K byte window must be supplied to be able to decompress general + * deflate streams. + * + * See inflateBack() for the usage of these routines. + * + * inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of + * the paramaters are invalid, Z_MEM_ERROR if the internal state could not + * be allocated, or Z_VERSION_ERROR if the version of the library does not + * match the version of the header file. + */ typedef unsigned (*in_func) OF((void FAR *, unsigned char FAR * FAR *)); typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, - in_func in, void FAR *in_desc, - out_func out, void FAR *out_desc)); + in_func in, void FAR *in_desc, + out_func out, void FAR *out_desc)); /* - inflateBack() does a raw inflate with a single call using a call-back - interface for input and output. This is more efficient than inflate() for - file i/o applications in that it avoids copying between the output and the - sliding window by simply making the window itself the output buffer. This - function trusts the application to not change the output buffer passed by - the output function, at least until inflateBack() returns. - - inflateBackInit() must be called first to allocate the internal state - and to initialize the state with the user-provided window buffer. - inflateBack() may then be used multiple times to inflate a complete, raw - deflate stream with each call. inflateBackEnd() is then called to free - the allocated state. - - A raw deflate stream is one with no zlib or gzip header or trailer. - This routine would normally be used in a utility that reads zip or gzip - files and writes out uncompressed files. The utility would decode the - header and process the trailer on its own, hence this routine expects - only the raw deflate stream to decompress. This is different from the - normal behavior of inflate(), which expects either a zlib or gzip header and - trailer around the deflate stream. - - inflateBack() uses two subroutines supplied by the caller that are then - called by inflateBack() for input and output. inflateBack() calls those - routines until it reads a complete deflate stream and writes out all of the - uncompressed data, or until it encounters an error. The function's - parameters and return types are defined above in the in_func and out_func - typedefs. inflateBack() will call in(in_desc, &buf) which should return the - number of bytes of provided input, and a pointer to that input in buf. If - there is no input available, in() must return zero--buf is ignored in that - case--and inflateBack() will return a buffer error. inflateBack() will call - out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() - should return zero on success, or non-zero on failure. If out() returns - non-zero, inflateBack() will return with an error. Neither in() nor out() - are permitted to change the contents of the window provided to - inflateBackInit(), which is also the buffer that out() uses to write from. - The length written by out() will be at most the window size. Any non-zero - amount of input may be provided by in(). - - For convenience, inflateBack() can be provided input on the first call by - setting strm->next_in and strm->avail_in. If that input is exhausted, then - in() will be called. Therefore strm->next_in must be initialized before - calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called - immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in - must also be initialized, and then if strm->avail_in is not zero, input will - initially be taken from strm->next_in[0 .. strm->avail_in - 1]. - - The in_desc and out_desc parameters of inflateBack() is passed as the - first parameter of in() and out() respectively when they are called. These - descriptors can be optionally used to pass any information that the caller- - supplied in() and out() functions need to do their job. - - On return, inflateBack() will set strm->next_in and strm->avail_in to - pass back any unused input that was provided by the last in() call. The - return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR - if in() or out() returned an error, Z_DATA_ERROR if there was a format - error in the deflate stream (in which case strm->msg is set to indicate the - nature of the error), or Z_STREAM_ERROR if the stream was not properly - initialized. In the case of Z_BUF_ERROR, an input or output error can be - distinguished using strm->next_in which will be Z_NULL only if in() returned - an error. If strm->next is not Z_NULL, then the Z_BUF_ERROR was due to - out() returning non-zero. (in() will always be called before out(), so - strm->next_in is assured to be defined if out() returns non-zero.) Note - that inflateBack() cannot return Z_OK. -*/ + * inflateBack() does a raw inflate with a single call using a call-back + * interface for input and output. This is more efficient than inflate() for + * file i/o applications in that it avoids copying between the output and the + * sliding window by simply making the window itself the output buffer. This + * function trusts the application to not change the output buffer passed by + * the output function, at least until inflateBack() returns. + * + * inflateBackInit() must be called first to allocate the internal state + * and to initialize the state with the user-provided window buffer. + * inflateBack() may then be used multiple times to inflate a complete, raw + * deflate stream with each call. inflateBackEnd() is then called to free + * the allocated state. + * + * A raw deflate stream is one with no zlib or gzip header or trailer. + * This routine would normally be used in a utility that reads zip or gzip + * files and writes out uncompressed files. The utility would decode the + * header and process the trailer on its own, hence this routine expects + * only the raw deflate stream to decompress. This is different from the + * normal behavior of inflate(), which expects either a zlib or gzip header and + * trailer around the deflate stream. + * + * inflateBack() uses two subroutines supplied by the caller that are then + * called by inflateBack() for input and output. inflateBack() calls those + * routines until it reads a complete deflate stream and writes out all of the + * uncompressed data, or until it encounters an error. The function's + * parameters and return types are defined above in the in_func and out_func + * typedefs. inflateBack() will call in(in_desc, &buf) which should return the + * number of bytes of provided input, and a pointer to that input in buf. If + * there is no input available, in() must return zero--buf is ignored in that + * case--and inflateBack() will return a buffer error. inflateBack() will call + * out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() + * should return zero on success, or non-zero on failure. If out() returns + * non-zero, inflateBack() will return with an error. Neither in() nor out() + * are permitted to change the contents of the window provided to + * inflateBackInit(), which is also the buffer that out() uses to write from. + * The length written by out() will be at most the window size. Any non-zero + * amount of input may be provided by in(). + * + * For convenience, inflateBack() can be provided input on the first call by + * setting strm->next_in and strm->avail_in. If that input is exhausted, then + * in() will be called. Therefore strm->next_in must be initialized before + * calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called + * immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in + * must also be initialized, and then if strm->avail_in is not zero, input will + * initially be taken from strm->next_in[0 .. strm->avail_in - 1]. + * + * The in_desc and out_desc parameters of inflateBack() is passed as the + * first parameter of in() and out() respectively when they are called. These + * descriptors can be optionally used to pass any information that the caller- + * supplied in() and out() functions need to do their job. + * + * On return, inflateBack() will set strm->next_in and strm->avail_in to + * pass back any unused input that was provided by the last in() call. The + * return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR + * if in() or out() returned an error, Z_DATA_ERROR if there was a format + * error in the deflate stream (in which case strm->msg is set to indicate the + * nature of the error), or Z_STREAM_ERROR if the stream was not properly + * initialized. In the case of Z_BUF_ERROR, an input or output error can be + * distinguished using strm->next_in which will be Z_NULL only if in() returned + * an error. If strm->next is not Z_NULL, then the Z_BUF_ERROR was due to + * out() returning non-zero. (in() will always be called before out(), so + * strm->next_in is assured to be defined if out() returns non-zero.) Note + * that inflateBack() cannot return Z_OK. + */ ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); /* - All memory allocated by inflateBackInit() is freed. - - inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream - state was inconsistent. -*/ + * All memory allocated by inflateBackInit() is freed. + * + * inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream + * state was inconsistent. + */ ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); /* Return flags indicating compile-time options. - - Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: - 1.0: size of uInt - 3.2: size of uLong - 5.4: size of voidpf (pointer) - 7.6: size of z_off_t - - Compiler, assembler, and debug options: - 8: DEBUG - 9: ASMV or ASMINF -- use ASM code - 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention - 11: 0 (reserved) - - One-time table building (smaller code, but not thread-safe if true): - 12: BUILDFIXED -- build static block decoding tables when needed - 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed - 14,15: 0 (reserved) - - Library content (indicates missing functionality): - 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking - deflate code when not needed) - 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect - and decode gzip streams (to avoid linking crc code) - 18-19: 0 (reserved) - - Operation variations (changes in library functionality): - 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate - 21: FASTEST -- deflate algorithm with only one, lowest compression level - 22,23: 0 (reserved) - - The sprintf variant used by gzprintf (zero is best): - 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format - 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! - 26: 0 = returns value, 1 = void -- 1 means inferred string length returned - - Remainder: - 27-31: 0 (reserved) + * + * Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: + * 1.0: size of uInt + * 3.2: size of uLong + * 5.4: size of voidpf (pointer) + * 7.6: size of z_off_t + * + * Compiler, assembler, and debug options: + * 8: DEBUG + * 9: ASMV or ASMINF -- use ASM code + * 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention + * 11: 0 (reserved) + * + * One-time table building (smaller code, but not thread-safe if true): + * 12: BUILDFIXED -- build static block decoding tables when needed + * 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed + * 14,15: 0 (reserved) + * + * Library content (indicates missing functionality): + * 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking + * deflate code when not needed) + * 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect + * and decode gzip streams (to avoid linking crc code) + * 18-19: 0 (reserved) + * + * Operation variations (changes in library functionality): + * 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate + * 21: FASTEST -- deflate algorithm with only one, lowest compression level + * 22,23: 0 (reserved) + * + * The sprintf variant used by gzprintf (zero is best): + * 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format + * 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! + * 26: 0 = returns value, 1 = void -- 1 means inferred string length returned + * + * Remainder: + * 27-31: 0 (reserved) */ - /* utility functions */ +/* utility functions */ /* - The following utility functions are implemented on top of the - basic stream-oriented functions. To simplify the interface, some - default options are assumed (compression level and memory usage, - standard memory allocation functions). The source code of these - utility functions can easily be modified if you need special options. -*/ - -ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen)); + * The following utility functions are implemented on top of the + * basic stream-oriented functions. To simplify the interface, some + * default options are assumed (compression level and memory usage, + * standard memory allocation functions). The source code of these + * utility functions can easily be modified if you need special options. + */ + +ZEXTERN int ZEXPORT compress OF((Bytef * dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); /* - Compresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be at least the value returned - by compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed buffer. - This function can be used to compress a whole file at once if the - input file is mmap'ed. - compress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer. -*/ - -ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen, - int level)); + * Compresses the source buffer into the destination buffer. sourceLen is + * the byte length of the source buffer. Upon entry, destLen is the total + * size of the destination buffer, which must be at least the value returned + * by compressBound(sourceLen). Upon exit, destLen is the actual size of the + * compressed buffer. + * This function can be used to compress a whole file at once if the + * input file is mmap'ed. + * compress returns Z_OK if success, Z_MEM_ERROR if there was not + * enough memory, Z_BUF_ERROR if there was not enough room in the output + * buffer. + */ + +ZEXTERN int ZEXPORT compress2 OF((Bytef * dest, uLongf *destLen, + const Bytef *source, uLong sourceLen, + int level)); /* - Compresses the source buffer into the destination buffer. The level - parameter has the same meaning as in deflateInit. sourceLen is the byte - length of the source buffer. Upon entry, destLen is the total size of the - destination buffer, which must be at least the value returned by - compressBound(sourceLen). Upon exit, destLen is the actual size of the - compressed buffer. - - compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough - memory, Z_BUF_ERROR if there was not enough room in the output buffer, - Z_STREAM_ERROR if the level parameter is invalid. -*/ + * Compresses the source buffer into the destination buffer. The level + * parameter has the same meaning as in deflateInit. sourceLen is the byte + * length of the source buffer. Upon entry, destLen is the total size of the + * destination buffer, which must be at least the value returned by + * compressBound(sourceLen). Upon exit, destLen is the actual size of the + * compressed buffer. + * + * compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough + * memory, Z_BUF_ERROR if there was not enough room in the output buffer, + * Z_STREAM_ERROR if the level parameter is invalid. + */ ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); /* - compressBound() returns an upper bound on the compressed size after - compress() or compress2() on sourceLen bytes. It would be used before - a compress() or compress2() call to allocate the destination buffer. -*/ + * compressBound() returns an upper bound on the compressed size after + * compress() or compress2() on sourceLen bytes. It would be used before + * a compress() or compress2() call to allocate the destination buffer. + */ -ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, - const Bytef *source, uLong sourceLen)); +ZEXTERN int ZEXPORT uncompress OF((Bytef * dest, uLongf *destLen, + const Bytef *source, uLong sourceLen)); /* - Decompresses the source buffer into the destination buffer. sourceLen is - the byte length of the source buffer. Upon entry, destLen is the total - size of the destination buffer, which must be large enough to hold the - entire uncompressed data. (The size of the uncompressed data must have - been saved previously by the compressor and transmitted to the decompressor - by some mechanism outside the scope of this compression library.) - Upon exit, destLen is the actual size of the compressed buffer. - This function can be used to decompress a whole file at once if the - input file is mmap'ed. - - uncompress returns Z_OK if success, Z_MEM_ERROR if there was not - enough memory, Z_BUF_ERROR if there was not enough room in the output - buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. -*/ + * Decompresses the source buffer into the destination buffer. sourceLen is + * the byte length of the source buffer. Upon entry, destLen is the total + * size of the destination buffer, which must be large enough to hold the + * entire uncompressed data. (The size of the uncompressed data must have + * been saved previously by the compressor and transmitted to the decompressor + * by some mechanism outside the scope of this compression library.) + * Upon exit, destLen is the actual size of the compressed buffer. + * This function can be used to decompress a whole file at once if the + * input file is mmap'ed. + * + * uncompress returns Z_OK if success, Z_MEM_ERROR if there was not + * enough memory, Z_BUF_ERROR if there was not enough room in the output + * buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. + */ #if XNU_KERNEL_PRIVATE @@ -1112,285 +1112,285 @@ typedef voidp gzFile; ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); /* - Opens a gzip (.gz) file for reading or writing. The mode parameter - is as in fopen ("rb" or "wb") but can also include a compression level - ("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for - Huffman only compression as in "wb1h", or 'R' for run-length encoding - as in "wb1R". (See the description of deflateInit2 for more information - about the strategy parameter.) - - gzopen can be used to read a file which is not in gzip format; in this - case gzread will directly read from the file without decompression. - - gzopen returns NULL if the file could not be opened or if there was - insufficient memory to allocate the (de)compression state; errno - can be checked to distinguish the two cases (if errno is zero, the - zlib error is Z_MEM_ERROR). */ + * Opens a gzip (.gz) file for reading or writing. The mode parameter + * is as in fopen ("rb" or "wb") but can also include a compression level + * ("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for + * Huffman only compression as in "wb1h", or 'R' for run-length encoding + * as in "wb1R". (See the description of deflateInit2 for more information + * about the strategy parameter.) + * + * gzopen can be used to read a file which is not in gzip format; in this + * case gzread will directly read from the file without decompression. + * + * gzopen returns NULL if the file could not be opened or if there was + * insufficient memory to allocate the (de)compression state; errno + * can be checked to distinguish the two cases (if errno is zero, the + * zlib error is Z_MEM_ERROR). */ ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); /* - gzdopen() associates a gzFile with the file descriptor fd. File - descriptors are obtained from calls like open, dup, creat, pipe or - fileno (in the file has been previously opened with fopen). - The mode parameter is as in gzopen. - The next call of gzclose on the returned gzFile will also close the - file descriptor fd, just like fclose(fdopen(fd), mode) closes the file - descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode). - gzdopen returns NULL if there was insufficient memory to allocate - the (de)compression state. -*/ + * gzdopen() associates a gzFile with the file descriptor fd. File + * descriptors are obtained from calls like open, dup, creat, pipe or + * fileno (in the file has been previously opened with fopen). + * The mode parameter is as in gzopen. + * The next call of gzclose on the returned gzFile will also close the + * file descriptor fd, just like fclose(fdopen(fd), mode) closes the file + * descriptor fd. If you want to keep fd open, use gzdopen(dup(fd), mode). + * gzdopen returns NULL if there was insufficient memory to allocate + * the (de)compression state. + */ ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); /* - Dynamically update the compression level or strategy. See the description - of deflateInit2 for the meaning of these parameters. - gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not - opened for writing. -*/ + * Dynamically update the compression level or strategy. See the description + * of deflateInit2 for the meaning of these parameters. + * gzsetparams returns Z_OK if success, or Z_STREAM_ERROR if the file was not + * opened for writing. + */ ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); /* - Reads the given number of uncompressed bytes from the compressed file. - If the input file was not in gzip format, gzread copies the given number - of bytes into the buffer. - gzread returns the number of uncompressed bytes actually read (0 for - end of file, -1 for error). */ + * Reads the given number of uncompressed bytes from the compressed file. + * If the input file was not in gzip format, gzread copies the given number + * of bytes into the buffer. + * gzread returns the number of uncompressed bytes actually read (0 for + * end of file, -1 for error). */ ZEXTERN int ZEXPORT gzwrite OF((gzFile file, - voidpc buf, unsigned len)); + voidpc buf, unsigned len)); /* - Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of uncompressed bytes actually written - (0 in case of error). -*/ + * Writes the given number of uncompressed bytes into the compressed file. + * gzwrite returns the number of uncompressed bytes actually written + * (0 in case of error). + */ ZEXTERN int ZEXPORTVA gzprintf OF((gzFile file, const char *format, ...)); /* - Converts, formats, and writes the args to the compressed file under - control of the format string, as in fprintf. gzprintf returns the number of - uncompressed bytes actually written (0 in case of error). The number of - uncompressed bytes written is limited to 4095. The caller should assure that - this limit is not exceeded. If it is exceeded, then gzprintf() will return - return an error (0) with nothing written. In this case, there may also be a - buffer overflow with unpredictable consequences, which is possible only if - zlib was compiled with the insecure functions sprintf() or vsprintf() - because the secure snprintf() or vsnprintf() functions were not available. -*/ + * Converts, formats, and writes the args to the compressed file under + * control of the format string, as in fprintf. gzprintf returns the number of + * uncompressed bytes actually written (0 in case of error). The number of + * uncompressed bytes written is limited to 4095. The caller should assure that + * this limit is not exceeded. If it is exceeded, then gzprintf() will return + * return an error (0) with nothing written. In this case, there may also be a + * buffer overflow with unpredictable consequences, which is possible only if + * zlib was compiled with the insecure functions sprintf() or vsprintf() + * because the secure snprintf() or vsnprintf() functions were not available. + */ ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); /* - Writes the given null-terminated string to the compressed file, excluding - the terminating null character. - gzputs returns the number of characters written, or -1 in case of error. -*/ + * Writes the given null-terminated string to the compressed file, excluding + * the terminating null character. + * gzputs returns the number of characters written, or -1 in case of error. + */ ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); /* - Reads bytes from the compressed file until len-1 characters are read, or - a newline character is read and transferred to buf, or an end-of-file - condition is encountered. The string is then terminated with a null - character. - gzgets returns buf, or Z_NULL in case of error. -*/ + * Reads bytes from the compressed file until len-1 characters are read, or + * a newline character is read and transferred to buf, or an end-of-file + * condition is encountered. The string is then terminated with a null + * character. + * gzgets returns buf, or Z_NULL in case of error. + */ ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); /* - Writes c, converted to an unsigned char, into the compressed file. - gzputc returns the value that was written, or -1 in case of error. -*/ + * Writes c, converted to an unsigned char, into the compressed file. + * gzputc returns the value that was written, or -1 in case of error. + */ ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); /* - Reads one byte from the compressed file. gzgetc returns this byte - or -1 in case of end of file or error. -*/ + * Reads one byte from the compressed file. gzgetc returns this byte + * or -1 in case of end of file or error. + */ ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); /* - Push one character back onto the stream to be read again later. - Only one character of push-back is allowed. gzungetc() returns the - character pushed, or -1 on failure. gzungetc() will fail if a - character has been pushed but not read yet, or if c is -1. The pushed - character will be discarded if the stream is repositioned with gzseek() - or gzrewind(). -*/ + * Push one character back onto the stream to be read again later. + * Only one character of push-back is allowed. gzungetc() returns the + * character pushed, or -1 on failure. gzungetc() will fail if a + * character has been pushed but not read yet, or if c is -1. The pushed + * character will be discarded if the stream is repositioned with gzseek() + * or gzrewind(). + */ ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); /* - Flushes all pending output into the compressed file. The parameter - flush is as in the deflate() function. The return value is the zlib - error number (see function gzerror below). gzflush returns Z_OK if - the flush parameter is Z_FINISH and all output could be flushed. - gzflush should be called only when strictly necessary because it can - degrade compression. -*/ + * Flushes all pending output into the compressed file. The parameter + * flush is as in the deflate() function. The return value is the zlib + * error number (see function gzerror below). gzflush returns Z_OK if + * the flush parameter is Z_FINISH and all output could be flushed. + * gzflush should be called only when strictly necessary because it can + * degrade compression. + */ ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, - z_off_t offset, int whence)); + z_off_t offset, int whence)); /* - Sets the starting position for the next gzread or gzwrite on the - given compressed file. The offset represents a number of bytes in the - uncompressed data stream. The whence parameter is defined as in lseek(2); - the value SEEK_END is not supported. - If the file is opened for reading, this function is emulated but can be - extremely slow. If the file is opened for writing, only forward seeks are - supported; gzseek then compresses a sequence of zeroes up to the new - starting position. - - gzseek returns the resulting offset location as measured in bytes from - the beginning of the uncompressed stream, or -1 in case of error, in - particular if the file is opened for writing and the new starting position - would be before the current position. -*/ + * Sets the starting position for the next gzread or gzwrite on the + * given compressed file. The offset represents a number of bytes in the + * uncompressed data stream. The whence parameter is defined as in lseek(2); + * the value SEEK_END is not supported. + * If the file is opened for reading, this function is emulated but can be + * extremely slow. If the file is opened for writing, only forward seeks are + * supported; gzseek then compresses a sequence of zeroes up to the new + * starting position. + * + * gzseek returns the resulting offset location as measured in bytes from + * the beginning of the uncompressed stream, or -1 in case of error, in + * particular if the file is opened for writing and the new starting position + * would be before the current position. + */ ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); /* - Rewinds the given file. This function is supported only for reading. - - gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) -*/ + * Rewinds the given file. This function is supported only for reading. + * + * gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) + */ ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); /* - Returns the starting position for the next gzread or gzwrite on the - given compressed file. This position represents a number of bytes in the - uncompressed data stream. - - gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) -*/ + * Returns the starting position for the next gzread or gzwrite on the + * given compressed file. This position represents a number of bytes in the + * uncompressed data stream. + * + * gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) + */ ZEXTERN int ZEXPORT gzeof OF((gzFile file)); /* - Returns 1 when EOF has previously been detected reading the given - input stream, otherwise zero. -*/ + * Returns 1 when EOF has previously been detected reading the given + * input stream, otherwise zero. + */ ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); /* - Returns 1 if file is being read directly without decompression, otherwise - zero. -*/ + * Returns 1 if file is being read directly without decompression, otherwise + * zero. + */ ZEXTERN int ZEXPORT gzclose OF((gzFile file)); /* - Flushes all pending output if necessary, closes the compressed file - and deallocates all the (de)compression state. The return value is the zlib - error number (see function gzerror below). -*/ + * Flushes all pending output if necessary, closes the compressed file + * and deallocates all the (de)compression state. The return value is the zlib + * error number (see function gzerror below). + */ ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); /* - Returns the error message for the last error which occurred on the - given compressed file. errnum is set to zlib error number. If an - error occurred in the file system and not in the compression library, - errnum is set to Z_ERRNO and the application may consult errno - to get the exact error code. -*/ + * Returns the error message for the last error which occurred on the + * given compressed file. errnum is set to zlib error number. If an + * error occurred in the file system and not in the compression library, + * errnum is set to Z_ERRNO and the application may consult errno + * to get the exact error code. + */ ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); /* - Clears the error and end-of-file flags for file. This is analogous to the - clearerr() function in stdio. This is useful for continuing to read a gzip - file that is being written concurrently. -*/ + * Clears the error and end-of-file flags for file. This is analogous to the + * clearerr() function in stdio. This is useful for continuing to read a gzip + * file that is being written concurrently. + */ #endif /* KERNEL */ - /* checksum functions */ +/* checksum functions */ /* - These functions are not related to compression but are exported - anyway because they might be useful in applications using the - compression library. -*/ + * These functions are not related to compression but are exported + * anyway because they might be useful in applications using the + * compression library. + */ ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); /* - Update a running Adler-32 checksum with the bytes buf[0..len-1] and - return the updated checksum. If buf is NULL, this function returns - the required initial value for the checksum. - An Adler-32 checksum is almost as reliable as a CRC32 but can be computed - much faster. Usage example: - - uLong adler = adler32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - adler = adler32(adler, buffer, length); - } - if (adler != original_adler) error(); -*/ + * Update a running Adler-32 checksum with the bytes buf[0..len-1] and + * return the updated checksum. If buf is NULL, this function returns + * the required initial value for the checksum. + * An Adler-32 checksum is almost as reliable as a CRC32 but can be computed + * much faster. Usage example: + * + * uLong adler = adler32(0L, Z_NULL, 0); + * + * while (read_buffer(buffer, length) != EOF) { + * adler = adler32(adler, buffer, length); + * } + * if (adler != original_adler) error(); + */ ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, - z_off_t len2)); + z_off_t len2)); /* - Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 - and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for - each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of - seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. -*/ + * Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 + * and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for + * each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of + * seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. + */ ZEXTERN uLong ZEXPORT z_crc32 OF((uLong crc, const Bytef *buf, uInt len)); /* - Update a running CRC-32 with the bytes buf[0..len-1] and return the - updated CRC-32. If buf is NULL, this function returns the required initial - value for the for the crc. Pre- and post-conditioning (one's complement) is - performed within this function so it shouldn't be done by the application. - Usage example: - - uLong crc = crc32(0L, Z_NULL, 0); - - while (read_buffer(buffer, length) != EOF) { - crc = crc32(crc, buffer, length); - } - if (crc != original_crc) error(); -*/ + * Update a running CRC-32 with the bytes buf[0..len-1] and return the + * updated CRC-32. If buf is NULL, this function returns the required initial + * value for the for the crc. Pre- and post-conditioning (one's complement) is + * performed within this function so it shouldn't be done by the application. + * Usage example: + * + * uLong crc = crc32(0L, Z_NULL, 0); + * + * while (read_buffer(buffer, length) != EOF) { + * crc = crc32(crc, buffer, length); + * } + * if (crc != original_crc) error(); + */ ZEXTERN uLong ZEXPORT z_crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); /* - Combine two CRC-32 check values into one. For two sequences of bytes, - seq1 and seq2 with lengths len1 and len2, CRC-32 check values were - calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 - check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and - len2. -*/ + * Combine two CRC-32 check values into one. For two sequences of bytes, + * seq1 and seq2 with lengths len1 and len2, CRC-32 check values were + * calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 + * check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and + * len2. + */ - /* various hacks, don't look :) */ +/* various hacks, don't look :) */ /* deflateInit and inflateInit are macros to allow checking the zlib version * and the compiler's view of z_stream: */ ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, - const char *version, int stream_size)); + const char *version, int stream_size)); ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, - const char *version, int stream_size)); + const char *version, int stream_size)); ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, - int windowBits, int memLevel, - int strategy, const char *version, - int stream_size)); + int windowBits, int memLevel, + int strategy, const char *version, + int stream_size)); ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, - const char *version, int stream_size)); + const char *version, int stream_size)); ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, - unsigned char FAR *window, - const char *version, - int stream_size)); + unsigned char FAR *window, + const char *version, + int stream_size)); #define deflateInit(strm, level) \ - deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream)) + deflateInit_((strm), (level), ZLIB_VERSION, sizeof(z_stream)) #define inflateInit(strm) \ - inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream)) + inflateInit_((strm), ZLIB_VERSION, sizeof(z_stream)) #define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ - deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ - (strategy), ZLIB_VERSION, sizeof(z_stream)) + deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ + (strategy), ZLIB_VERSION, sizeof(z_stream)) #define inflateInit2(strm, windowBits) \ - inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream)) + inflateInit2_((strm), (windowBits), ZLIB_VERSION, sizeof(z_stream)) #define inflateBackInit(strm, windowBits, window) \ - inflateBackInit_((strm), (windowBits), (window), \ - ZLIB_VERSION, sizeof(z_stream)) + inflateBackInit_((strm), (windowBits), (window), \ + ZLIB_VERSION, sizeof(z_stream)) #if !defined(ZUTIL_H) && !defined(NO_DUMMY_DECL) - struct internal_state {int dummy;}; /* hack for buggy compilers */ +struct internal_state {int dummy;}; /* hack for buggy compilers */ #endif ZEXTERN const char * ZEXPORT zError OF((int)); diff --git a/libkern/net/inet_aton.c b/libkern/net/inet_aton.c index ff518373e..73ea8a678 100644 --- a/libkern/net/inet_aton.c +++ b/libkern/net/inet_aton.c @@ -33,7 +33,7 @@ static inline int isspace(char c) { - return (c == ' ' || c == '\t' || c == '\n' || c == '\12'); + return c == ' ' || c == '\t' || c == '\n' || c == '\12'; } #endif @@ -59,8 +59,9 @@ inet_aton(const char *cp, struct in_addr *addr) l = strtoul(c, &endptr, 0); - if (l == ULONG_MAX || (l == 0 && endptr == c)) - return (0); + if (l == ULONG_MAX || (l == 0 && endptr == c)) { + return 0; + } val = (in_addr_t)l; @@ -70,18 +71,20 @@ inet_aton(const char *cp, struct in_addr *addr) * gone '.12' or something which would get past * the next check. */ - if (endptr == c) - return (0); + if (endptr == c) { + return 0; + } parts[n] = val; c = endptr; /* Check the next character past the previous number's end */ switch (*c) { - case '.' : + case '.': /* Make sure we only do 3 dots .. */ - if (n == 3) /* Whoops. Quit. */ - return (0); + if (n == 3) { /* Whoops. Quit. */ + return 0; + } n++; c++; break; @@ -95,44 +98,46 @@ inet_aton(const char *cp, struct in_addr *addr) gotend = 1; break; } else { - /* Invalid character, then fail. */ - return (0); + return 0; } } - } /* Concoct the address according to the number of parts specified. */ switch (n) { - case 0: /* a -- 32 bits */ + case 0: /* a -- 32 bits */ /* * Nothing is necessary here. Overflow checking was * already done in strtoul(). */ break; - case 1: /* a.b -- 8.24 bits */ - if (val > 0xffffff || parts[0] > 0xff) - return (0); + case 1: /* a.b -- 8.24 bits */ + if (val > 0xffffff || parts[0] > 0xff) { + return 0; + } val |= parts[0] << 24; break; - case 2: /* a.b.c -- 8.8.16 bits */ - if (val > 0xffff || parts[0] > 0xff || parts[1] > 0xff) - return (0); + case 2: /* a.b.c -- 8.8.16 bits */ + if (val > 0xffff || parts[0] > 0xff || parts[1] > 0xff) { + return 0; + } val |= (parts[0] << 24) | (parts[1] << 16); break; - case 3: /* a.b.c.d -- 8.8.8.8 bits */ + case 3: /* a.b.c.d -- 8.8.8.8 bits */ if (val > 0xff || parts[0] > 0xff || parts[1] > 0xff || - parts[2] > 0xff) - return (0); + parts[2] > 0xff) { + return 0; + } val |= (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8); break; } - if (addr != NULL) + if (addr != NULL) { addr->s_addr = htonl(val); - return (1); + } + return 1; } diff --git a/libkern/net/inet_ntoa.c b/libkern/net/inet_ntoa.c index d912bdaeb..0513bc405 100644 --- a/libkern/net/inet_ntoa.c +++ b/libkern/net/inet_ntoa.c @@ -34,14 +34,14 @@ char * inet_ntoa(struct in_addr ina) { - static char buf[4*sizeof "123"]; + static char buf[4 * sizeof "123"]; unsigned char *ucp = (unsigned char *)&ina; snprintf(buf, sizeof(buf), "%d.%d.%d.%d", - ucp[0] & 0xff, - ucp[1] & 0xff, - ucp[2] & 0xff, - ucp[3] & 0xff); + ucp[0] & 0xff, + ucp[1] & 0xff, + ucp[2] & 0xff, + ucp[3] & 0xff); return buf; } @@ -51,9 +51,9 @@ inet_ntoa_r(struct in_addr ina, char *buf, size_t buflen) unsigned char *ucp = (unsigned char *)&ina; snprintf(buf, buflen, "%d.%d.%d.%d", - ucp[0] & 0xff, - ucp[1] & 0xff, - ucp[2] & 0xff, - ucp[3] & 0xff); + ucp[0] & 0xff, + ucp[1] & 0xff, + ucp[2] & 0xff, + ucp[3] & 0xff); return buf; } diff --git a/libkern/net/inet_ntop.c b/libkern/net/inet_ntop.c index d15bc57ff..c1fdba73c 100644 --- a/libkern/net/inet_ntop.c +++ b/libkern/net/inet_ntop.c @@ -30,8 +30,8 @@ static const char rcsid[] = "$Id: inet_ntop.c,v 1.3.18.2 2005/11/03 23:02:22 mar * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ -static char *inet_ntop4(const u_char *src, char *dst, socklen_t size); -static char *inet_ntop6(const u_char *src, char *dst, socklen_t size); +static char *inet_ntop4(const u_char *src, char *dst, socklen_t size); +static char *inet_ntop6(const u_char *src, char *dst, socklen_t size); /* char * * inet_ntop(af, src, dst, size) @@ -46,11 +46,11 @@ inet_ntop(int af, const void *src, char *dst, socklen_t size) { switch (af) { case AF_INET: - return (inet_ntop4(src, dst, size)); + return inet_ntop4(src, dst, size); case AF_INET6: - return (inet_ntop6(src, dst, size)); + return inet_ntop6(src, dst, size); default: - return (NULL); + return NULL; } /* NOTREACHED */ } @@ -75,10 +75,10 @@ inet_ntop4(const u_char *src, char *dst, socklen_t size) l = snprintf(tmp, sizeof(tmp), fmt, src[0], src[1], src[2], src[3]); if (l <= 0 || (socklen_t) l >= size) { - return (NULL); + return NULL; } strlcpy(dst, tmp, size); - return (dst); + return dst; } /* const char * @@ -99,8 +99,8 @@ inet_ntop6(const u_char *src, char *dst, socklen_t size) */ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; struct { int base, len; } best, cur; -#define NS_IN6ADDRSZ 16 -#define NS_INT16SZ 2 +#define NS_IN6ADDRSZ 16 +#define NS_INT16SZ 2 u_int words[NS_IN6ADDRSZ / NS_INT16SZ]; int i; @@ -110,8 +110,9 @@ inet_ntop6(const u_char *src, char *dst, socklen_t size) * Find the longest run of 0x00's in src[] for :: shorthanding. */ memset(words, '\0', sizeof words); - for (i = 0; i < NS_IN6ADDRSZ; i++) + for (i = 0; i < NS_IN6ADDRSZ; i++) { words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); + } best.base = -1; best.len = 0; cur.base = -1; @@ -126,18 +127,21 @@ inet_ntop6(const u_char *src, char *dst, socklen_t size) } } else { if (cur.base != -1) { - if (best.base == -1 || cur.len > best.len) + if (best.base == -1 || cur.len > best.len) { best = cur; + } cur.base = -1; } } } if (cur.base != -1) { - if (best.base == -1 || cur.len > best.len) + if (best.base == -1 || cur.len > best.len) { best = cur; + } } - if (best.base != -1 && best.len < 2) + if (best.base != -1 && best.len < 2) { best.base = -1; + } /* * Format the result. @@ -147,38 +151,42 @@ inet_ntop6(const u_char *src, char *dst, socklen_t size) /* Are we inside the best run of 0x00's? */ if (best.base != -1 && i >= best.base && i < (best.base + best.len)) { - if (i == best.base) + if (i == best.base) { *tp++ = ':'; + } continue; } /* Are we following an initial run of 0x00s or any real hex? */ - if (i != 0) + if (i != 0) { *tp++ = ':'; + } /* Is this address an encapsulated IPv4? */ if (i == 6 && best.base == 0 && (best.len == 6 || (best.len == 7 && words[7] != 0x0001) || (best.len == 5 && words[5] == 0xffff))) { - if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) - return (NULL); + if (!inet_ntop4(src + 12, tp, sizeof tmp - (tp - tmp))) { + return NULL; + } tp += strlen(tp); break; } tp += snprintf(tp, sizeof(tmp), "%x", words[i]); } /* Was it a trailing run of 0x00's? */ - if (best.base != -1 && (best.base + best.len) == - (NS_IN6ADDRSZ / NS_INT16SZ)) + if (best.base != -1 && (best.base + best.len) == + (NS_IN6ADDRSZ / NS_INT16SZ)) { *tp++ = ':'; + } *tp++ = '\0'; /* * Check for overflow, copy, and we're done. */ if ((socklen_t)(tp - tmp) > size) { - return (NULL); + return NULL; } strlcpy(dst, tmp, size); - return (dst); + return dst; } /*! \file */ diff --git a/libkern/net/inet_pton.c b/libkern/net/inet_pton.c index 417326d3d..19543ced5 100644 --- a/libkern/net/inet_pton.c +++ b/libkern/net/inet_pton.c @@ -31,8 +31,8 @@ static const char rcsid[] = "$Id: inet_pton.c,v 1.3.18.2 2005/07/28 07:38:07 mar * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. */ -static int inet_pton4(const char *src, u_char *dst); -static int inet_pton6(const char *src, u_char *dst); +static int inet_pton4(const char *src, u_char *dst); +static int inet_pton6(const char *src, u_char *dst); /* int * inet_pton(af, src, dst) @@ -50,11 +50,11 @@ inet_pton(int af, const char *src, void *dst) { switch (af) { case AF_INET: - return (inet_pton4(src, dst)); + return inet_pton4(src, dst); case AF_INET6: - return (inet_pton6(src, dst)); + return inet_pton6(src, dst); default: - return (-1); + return -1; } /* NOTREACHED */ } @@ -74,7 +74,7 @@ inet_pton4(const char *src, u_char *dst) { static const char digits[] = "0123456789"; int saw_digit, octets, ch; -#define NS_INADDRSZ 4 +#define NS_INADDRSZ 4 u_char tmp[NS_INADDRSZ], *tp; saw_digit = 0; @@ -86,28 +86,34 @@ inet_pton4(const char *src, u_char *dst) if ((pch = strchr(digits, ch)) != NULL) { u_int new = *tp * 10 + (pch - digits); - if (saw_digit && *tp == 0) - return (0); - if (new > 255) - return (0); + if (saw_digit && *tp == 0) { + return 0; + } + if (new > 255) { + return 0; + } *tp = new; if (!saw_digit) { - if (++octets > 4) - return (0); + if (++octets > 4) { + return 0; + } saw_digit = 1; } } else if (ch == '.' && saw_digit) { - if (octets == 4) - return (0); + if (octets == 4) { + return 0; + } *++tp = 0; saw_digit = 0; - } else - return (0); + } else { + return 0; + } + } + if (octets < 4) { + return 0; } - if (octets < 4) - return (0); memcpy(dst, tmp, NS_INADDRSZ); - return (1); + return 1; } /* int @@ -127,9 +133,9 @@ static int inet_pton6(const char *src, u_char *dst) { static const char xdigits_l[] = "0123456789abcdef", - xdigits_u[] = "0123456789ABCDEF"; -#define NS_IN6ADDRSZ 16 -#define NS_INT16SZ 2 + xdigits_u[] = "0123456789ABCDEF"; +#define NS_IN6ADDRSZ 16 +#define NS_INT16SZ 2 u_char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp; const char *xdigits, *curtok; int ch, seen_xdigits; @@ -139,36 +145,42 @@ inet_pton6(const char *src, u_char *dst) endp = tp + NS_IN6ADDRSZ; colonp = NULL; /* Leading :: requires some special handling. */ - if (*src == ':') - if (*++src != ':') - return (0); + if (*src == ':') { + if (*++src != ':') { + return 0; + } + } curtok = src; seen_xdigits = 0; val = 0; while ((ch = *src++) != '\0') { const char *pch; - if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) + if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) { pch = strchr((xdigits = xdigits_u), ch); + } if (pch != NULL) { val <<= 4; val |= (pch - xdigits); - if (++seen_xdigits > 4) - return (0); + if (++seen_xdigits > 4) { + return 0; + } continue; } if (ch == ':') { curtok = src; if (!seen_xdigits) { - if (colonp) - return (0); + if (colonp) { + return 0; + } colonp = tp; continue; } else if (*src == '\0') { - return (0); + return 0; + } + if (tp + NS_INT16SZ > endp) { + return 0; } - if (tp + NS_INT16SZ > endp) - return (0); *tp++ = (u_char) (val >> 8) & 0xff; *tp++ = (u_char) val & 0xff; seen_xdigits = 0; @@ -179,13 +191,14 @@ inet_pton6(const char *src, u_char *dst) inet_pton4(curtok, tp) > 0) { tp += NS_INADDRSZ; seen_xdigits = 0; - break; /*%< '\\0' was seen by inet_pton4(). */ + break; /*%< '\\0' was seen by inet_pton4(). */ } - return (0); + return 0; } if (seen_xdigits) { - if (tp + NS_INT16SZ > endp) - return (0); + if (tp + NS_INT16SZ > endp) { + return 0; + } *tp++ = (u_char) (val >> 8) & 0xff; *tp++ = (u_char) val & 0xff; } @@ -197,16 +210,18 @@ inet_pton6(const char *src, u_char *dst) const int n = tp - colonp; int i; - if (tp == endp) - return (0); + if (tp == endp) { + return 0; + } for (i = 1; i <= n; i++) { - endp[- i] = colonp[n - i]; + endp[-i] = colonp[n - i]; colonp[n - i] = 0; } tp = endp; } - if (tp != endp) - return (0); + if (tp != endp) { + return 0; + } memcpy(dst, tmp, NS_IN6ADDRSZ); - return (1); + return 1; } diff --git a/libkern/os/Makefile b/libkern/os/Makefile index dc30508ab..5db93b07d 100644 --- a/libkern/os/Makefile +++ b/libkern/os/Makefile @@ -16,6 +16,7 @@ KERNELFILES = \ overflow.h PRIVATE_KERNELFILES = \ + hash.h \ object_private.h \ reason_private.h \ refcnt.h diff --git a/libkern/os/base.h b/libkern/os/base.h index e37800e81..62b98b453 100644 --- a/libkern/os/base.h +++ b/libkern/os/base.h @@ -73,7 +73,7 @@ #define OS_ALWAYS_INLINE __attribute__((__always_inline__)) #define OS_TRANSPARENT_UNION __attribute__((__transparent_union__)) #define OS_ALIGNED(n) __attribute__((__aligned__((n)))) -#define OS_FORMAT_PRINTF(x,y) __attribute__((__format__(printf,x,y))) +#define OS_FORMAT_PRINTF(x, y) __attribute__((__format__(printf,x,y))) #define OS_EXPORT extern __attribute__((__visibility__("default"))) #define OS_INLINE static __inline__ #define OS_EXPECT(x, v) __builtin_expect((x), (v)) @@ -110,7 +110,7 @@ #define OS_ALWAYS_INLINE #define OS_TRANSPARENT_UNION #define OS_ALIGNED(n) -#define OS_FORMAT_PRINTF(x,y) +#define OS_FORMAT_PRINTF(x, y) #define OS_EXPORT extern #define OS_INLINE static inline #define OS_EXPECT(x, v) (x) @@ -144,16 +144,16 @@ #if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums) #define OS_ENUM(_name, _type, ...) \ - typedef enum : _type { __VA_ARGS__ } _name##_t + typedef enum : _type { __VA_ARGS__ } _name##_t #else #define OS_ENUM(_name, _type, ...) \ - enum { __VA_ARGS__ }; typedef _type _name##_t + enum { __VA_ARGS__ }; typedef _type _name##_t #endif #if __has_feature(attribute_availability_swift) // equivalent to __SWIFT_UNAVAILABLE from Availability.h #define OS_SWIFT_UNAVAILABLE(_msg) \ - __attribute__((__availability__(swift, unavailable, message=_msg))) + __attribute__((__availability__(swift, unavailable, message=_msg))) #else #define OS_SWIFT_UNAVAILABLE(_msg) #endif diff --git a/libkern/os/firehose.h b/libkern/os/firehose.h index fb0a3ce3a..67a21b614 100644 --- a/libkern/os/firehose.h +++ b/libkern/os/firehose.h @@ -32,7 +32,7 @@ __BEGIN_DECLS * @abstract * Called by the dispatch firehose apis to notify logd that a chunk is available */ -__WATCHOS_AVAILABLE(3.0) __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) + __WATCHOS_AVAILABLE(3.0) __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) void __firehose_buffer_push_to_logd(firehose_buffer_t fb, bool for_io); /*! diff --git a/libkern/os/hash.h b/libkern/os/hash.h new file mode 100644 index 000000000..264146fb9 --- /dev/null +++ b/libkern/os/hash.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _OS_HASH_H_ +#define _OS_HASH_H_ +#if PRIVATE + +#include + +__BEGIN_DECLS + +/*! + * @function os_hash_jenkins + * + * @brief + * The original Jenkins "one at a time" hash. + * + * @discussion + * TBD: There may be some value to unrolling here, + * depending on the architecture. + * + * @param data + * The address of the data to hash. + * + * @param length + * The length of the data to hash + * + * @returns + * The jenkins hash for this data. + */ +static inline uint32_t +os_hash_jenkins(const void *data, size_t length) +{ + const uint8_t *key = (const uint8_t *)data; + uint32_t hash = 0; + + for (size_t i = 0; i < length; i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash; +} + +/*! + * @function os_hash_kernel_pointer + * + * @brief + * Hashes a pointer from a zone. + * + * @discussion + * This is a really cheap and fast hash that will behave well for pointers + * allocated by the kernel. + * + * This should be not used for untrusted pointer values from userspace, + * or cases when the pointer is somehow under the control of userspace. + * + * This hash function utilizes knowledge about the span of the kernel + * address space and inherent alignment of zalloc/kalloc. + * + * @param pointer + * The pointer to hash. + * + * @returns + * The hash for this pointer. + */ +static inline uint32_t +os_hash_kernel_pointer(const void *pointer) +{ + uintptr_t key = (uintptr_t)pointer >> 4; + key *= 0x5052acdb; + return (uint32_t)key ^ __builtin_bswap32((uint32_t)key); +} + +__END_DECLS + +#endif // PRIVATE +#endif // _OS_HASH_H_ diff --git a/libkern/os/internal.c b/libkern/os/internal.c index fca4dc083..1b39417f5 100644 --- a/libkern/os/internal.c +++ b/libkern/os/internal.c @@ -28,58 +28,58 @@ static bool _os_trace_addr_in_text_segment_32(const void *dso, const void *addr) { - const struct mach_header *mhp = (const struct mach_header *) dso; - const struct segment_command *sgp = (const struct segment_command *)(const void *)((const char *)mhp + sizeof(struct mach_header)); + const struct mach_header *mhp = (const struct mach_header *) dso; + const struct segment_command *sgp = (const struct segment_command *)(const void *)((const char *)mhp + sizeof(struct mach_header)); - for (uint32_t i = 0; i < mhp->ncmds; i++) { - if (sgp->cmd == LC_SEGMENT) { - if (strncmp(sgp->segname, SEG_TEXT, sizeof(sgp->segname)) == 0) { - return ((uintptr_t)addr >= (sgp->vmaddr) && (uintptr_t)addr < (sgp->vmaddr + sgp->vmsize)); - } - } - sgp = (const struct segment_command *)(const void *)((const char *)sgp + sgp->cmdsize); - } + for (uint32_t i = 0; i < mhp->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT) { + if (strncmp(sgp->segname, SEG_TEXT, sizeof(sgp->segname)) == 0) { + return (uintptr_t)addr >= (sgp->vmaddr) && (uintptr_t)addr < (sgp->vmaddr + sgp->vmsize); + } + } + sgp = (const struct segment_command *)(const void *)((const char *)sgp + sgp->cmdsize); + } - return false; + return false; } static bool _os_trace_addr_in_text_segment_64(const void *dso, const void *addr) { - const struct mach_header_64 *mhp = (const struct mach_header_64 *) dso; - const struct segment_command_64 *sgp = (const struct segment_command_64 *)(const void *)((const char *)mhp + sizeof(struct mach_header_64)); + const struct mach_header_64 *mhp = (const struct mach_header_64 *) dso; + const struct segment_command_64 *sgp = (const struct segment_command_64 *)(const void *)((const char *)mhp + sizeof(struct mach_header_64)); - for (uint32_t i = 0; i < mhp->ncmds; i++) { - if (sgp->cmd == LC_SEGMENT_64) { - if (strncmp(sgp->segname, SEG_TEXT, sizeof(sgp->segname)) == 0) { - return ((uintptr_t)addr >= (sgp->vmaddr) && (uintptr_t)addr < (sgp->vmaddr + sgp->vmsize)); - } - } - sgp = (const struct segment_command_64 *)(const void *)((const char *)sgp + sgp->cmdsize); - } + for (uint32_t i = 0; i < mhp->ncmds; i++) { + if (sgp->cmd == LC_SEGMENT_64) { + if (strncmp(sgp->segname, SEG_TEXT, sizeof(sgp->segname)) == 0) { + return (uintptr_t)addr >= (sgp->vmaddr) && (uintptr_t)addr < (sgp->vmaddr + sgp->vmsize); + } + } + sgp = (const struct segment_command_64 *)(const void *)((const char *)sgp + sgp->cmdsize); + } - return false; + return false; } bool _os_trace_addr_in_text_segment(const void *dso, const void *addr) { - const struct mach_header *mhp = (const struct mach_header *) dso; - bool retval = false; + const struct mach_header *mhp = (const struct mach_header *) dso; + bool retval = false; - switch (mhp->magic) { - case MH_MAGIC: - retval = _os_trace_addr_in_text_segment_32(dso, addr); - break; + switch (mhp->magic) { + case MH_MAGIC: + retval = _os_trace_addr_in_text_segment_32(dso, addr); + break; - case MH_MAGIC_64: - retval = _os_trace_addr_in_text_segment_64(dso, addr); - break; + case MH_MAGIC_64: + retval = _os_trace_addr_in_text_segment_64(dso, addr); + break; - default: - retval = false; - break; - } + default: + retval = false; + break; + } - return retval; + return retval; } diff --git a/libkern/os/log.c b/libkern/os/log.c index d4a8a3e7a..8a64d5e32 100644 --- a/libkern/os/log.c +++ b/libkern/os/log.c @@ -57,9 +57,12 @@ extern void bsd_log_unlock(void); extern void logwakeup(struct msgbuf *); decl_lck_spin_data(extern, oslog_stream_lock) +#define stream_lock() lck_spin_lock(&oslog_stream_lock) +#define stream_unlock() lck_spin_unlock(&oslog_stream_lock) + extern void oslog_streamwakeup(void); void oslog_streamwrite_locked(firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen); + uint64_t stamp, const void *pubdata, size_t publen); extern void oslog_streamwrite_metadata_locked(oslog_stream_buf_entry_t m_entry); extern int oslog_stream_open; @@ -91,31 +94,31 @@ uint64_t startup_serial_num_procs = 300; // XXX firehose_tracepoint_id_t firehose_debug_trace(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, - uint64_t timestamp, const char *format, const void *pubdata, size_t publen); + uint64_t timestamp, const char *format, const void *pubdata, size_t publen); static inline firehose_tracepoint_id_t _firehose_trace(firehose_stream_t stream, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen); + uint64_t stamp, const void *pubdata, size_t publen); static oslog_stream_buf_entry_t oslog_stream_create_buf_entry(oslog_stream_link_type_t type, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void* pubdata, size_t publen); + uint64_t stamp, const void* pubdata, size_t publen); static void _os_log_with_args_internal(os_log_t oslog __unused, os_log_type_t type __unused, - const char *format, va_list args, void *addr, void *dso); + const char *format, va_list args, void *addr, void *dso); static void _os_log_to_msgbuf_internal(const char *format, va_list args, bool safe, bool logging); static void _os_log_to_log_internal(os_log_t oslog, os_log_type_t type, - const char *format, va_list args, void *addr, void *dso); + const char *format, va_list args, void *addr, void *dso); static void _os_log_actual(os_log_t oslog, os_log_type_t type, const char *format, void - *dso, void *addr, os_log_buffer_context_t context); + *dso, void *addr, os_log_buffer_context_t context); bool os_log_info_enabled(os_log_t log __unused) @@ -141,48 +144,48 @@ _os_log_string_is_public(const char *str __unused) return true; } -__attribute__((noinline,not_tail_called)) void +__attribute__((noinline, not_tail_called)) void _os_log_internal(void *dso, os_log_t log, uint8_t type, const char *message, ...) { - va_list args; - void *addr = __builtin_return_address(0); + va_list args; + void *addr = __builtin_return_address(0); - va_start(args, message); + va_start(args, message); - _os_log_with_args_internal(log, type, message, args, addr, dso); + _os_log_with_args_internal(log, type, message, args, addr, dso); - va_end(args); + va_end(args); - return; + return; } #pragma mark - shim functions -__attribute__((noinline,not_tail_called)) void +__attribute__((noinline, not_tail_called)) void os_log_with_args(os_log_t oslog, os_log_type_t type, const char *format, va_list args, void *addr) { - // if no address passed, look it up - if (addr == NULL) { - addr = __builtin_return_address(0); - } + // if no address passed, look it up + if (addr == NULL) { + addr = __builtin_return_address(0); + } - _os_log_with_args_internal(oslog, type, format, args, addr, NULL); + _os_log_with_args_internal(oslog, type, format, args, addr, NULL); } static void _os_log_with_args_internal(os_log_t oslog, os_log_type_t type, - const char *format, va_list args, void *addr, void *dso) + const char *format, va_list args, void *addr, void *dso) { - uint32_t logging_config = atm_get_diagnostic_config(); - boolean_t safe; - boolean_t logging; + uint32_t logging_config = atm_get_diagnostic_config(); + boolean_t safe; + boolean_t logging; - if (format[0] == '\0') { - return; - } + if (format[0] == '\0') { + return; + } - /* early boot can log to dmesg for later replay (27307943) */ - safe = (!early_boot_complete || oslog_is_safe()); + /* early boot can log to dmesg for later replay (27307943) */ + safe = (!early_boot_complete || oslog_is_safe()); if (logging_config & ATM_TRACE_DISABLE || logging_config & ATM_TRACE_OFF) { logging = false; @@ -190,169 +193,181 @@ _os_log_with_args_internal(os_log_t oslog, os_log_type_t type, logging = true; } - if (oslog != &_os_log_replay) { - _os_log_to_msgbuf_internal(format, args, safe, logging); - } + if (oslog != &_os_log_replay) { + _os_log_to_msgbuf_internal(format, args, safe, logging); + } - if (safe && logging) { - _os_log_to_log_internal(oslog, type, format, args, addr, dso); - } + if (safe && logging) { + _os_log_to_log_internal(oslog, type, format, args, addr, dso); + } } static void _os_log_to_msgbuf_internal(const char *format, va_list args, bool safe, bool logging) { - static int msgbufreplay = -1; - va_list args_copy; + static int msgbufreplay = -1; + va_list args_copy; #if DEVELOPMENT || DEBUG - if (safe) { - bsd_log_lock(); - } + if (safe) { + bsd_log_lock(); + } #else - bsd_log_lock(); + bsd_log_lock(); #endif - if (!safe) { - if (-1 == msgbufreplay) msgbufreplay = msgbufp->msg_bufx; - } else if (logging && (-1 != msgbufreplay)) { - uint32_t i; - uint32_t localbuff_size; - int newl, position; - char *localbuff, *p, *s, *next, ch; - - position = msgbufreplay; - msgbufreplay = -1; - localbuff_size = (msgbufp->msg_size + 2); /* + '\n' + '\0' */ - /* Size for non-blocking */ - if (localbuff_size > 4096) localbuff_size = 4096; - bsd_log_unlock(); - /* Allocate a temporary non-circular buffer */ - if ((localbuff = (char *)kalloc_noblock(localbuff_size))) { - /* in between here, the log could become bigger, but that's fine */ - bsd_log_lock(); - /* - * The message buffer is circular; start at the replay pointer, and - * make one loop up to write pointer - 1. - */ - p = msgbufp->msg_bufc + position; - for (i = newl = 0; p != msgbufp->msg_bufc + msgbufp->msg_bufx - 1; ++p) { - if (p >= msgbufp->msg_bufc + msgbufp->msg_size) - p = msgbufp->msg_bufc; - ch = *p; - if (ch == '\0') continue; - newl = (ch == '\n'); - localbuff[i++] = ch; - if (i >= (localbuff_size - 2)) break; - } - bsd_log_unlock(); - - if (!newl) localbuff[i++] = '\n'; - localbuff[i++] = 0; - - s = localbuff; - while ((next = strchr(s, '\n'))) { - next++; - ch = next[0]; - next[0] = 0; - os_log(&_os_log_replay, "%s", s); - next[0] = ch; - s = next; - } - kfree(localbuff, localbuff_size); - } - bsd_log_lock(); - } + if (!safe) { + if (-1 == msgbufreplay) { + msgbufreplay = msgbufp->msg_bufx; + } + } else if (logging && (-1 != msgbufreplay)) { + uint32_t i; + uint32_t localbuff_size; + int newl, position; + char *localbuff, *p, *s, *next, ch; + + position = msgbufreplay; + msgbufreplay = -1; + localbuff_size = (msgbufp->msg_size + 2); /* + '\n' + '\0' */ + /* Size for non-blocking */ + if (localbuff_size > 4096) { + localbuff_size = 4096; + } + bsd_log_unlock(); + /* Allocate a temporary non-circular buffer */ + if ((localbuff = (char *)kalloc_noblock(localbuff_size))) { + /* in between here, the log could become bigger, but that's fine */ + bsd_log_lock(); + /* + * The message buffer is circular; start at the replay pointer, and + * make one loop up to write pointer - 1. + */ + p = msgbufp->msg_bufc + position; + for (i = newl = 0; p != msgbufp->msg_bufc + msgbufp->msg_bufx - 1; ++p) { + if (p >= msgbufp->msg_bufc + msgbufp->msg_size) { + p = msgbufp->msg_bufc; + } + ch = *p; + if (ch == '\0') { + continue; + } + newl = (ch == '\n'); + localbuff[i++] = ch; + if (i >= (localbuff_size - 2)) { + break; + } + } + bsd_log_unlock(); + + if (!newl) { + localbuff[i++] = '\n'; + } + localbuff[i++] = 0; + + s = localbuff; + while ((next = strchr(s, '\n'))) { + next++; + ch = next[0]; + next[0] = 0; + os_log(&_os_log_replay, "%s", s); + next[0] = ch; + s = next; + } + kfree(localbuff, localbuff_size); + } + bsd_log_lock(); + } - va_copy(args_copy, args); - vprintf_log_locked(format, args_copy); - va_end(args_copy); + va_copy(args_copy, args); + vprintf_log_locked(format, args_copy); + va_end(args_copy); #if DEVELOPMENT || DEBUG - if (safe) { - bsd_log_unlock(); - logwakeup(msgbufp); - } + if (safe) { + bsd_log_unlock(); + logwakeup(msgbufp); + } #else - bsd_log_unlock(); - if (safe) logwakeup(msgbufp); + bsd_log_unlock(); + if (safe) { + logwakeup(msgbufp); + } #endif } static void _os_log_to_log_internal(os_log_t oslog, os_log_type_t type, - const char *format, va_list args, void *addr, void *dso) + const char *format, va_list args, void *addr, void *dso) { - struct os_log_buffer_context_s context; - unsigned char buffer_data[OS_LOG_BUFFER_MAX_SIZE] __attribute__((aligned(8))); - os_log_buffer_t buffer = (os_log_buffer_t)buffer_data; - uint8_t pubdata[OS_LOG_BUFFER_MAX_SIZE]; - va_list args_copy; - - if (addr == NULL) { - return; - } + struct os_log_buffer_context_s context; + unsigned char buffer_data[OS_LOG_BUFFER_MAX_SIZE] __attribute__((aligned(8))); + os_log_buffer_t buffer = (os_log_buffer_t)buffer_data; + uint8_t pubdata[OS_LOG_BUFFER_MAX_SIZE]; + va_list args_copy; + + if (addr == NULL) { + return; + } #if FIREHOSE_USES_SHARED_CACHE - dso = (void *) segLOWESTTEXT; + dso = (void *) segLOWESTTEXT; #else /* FIREHOSE_USES_SHARED_CACHE */ - if (dso == NULL) { - dso = (void *) OSKextKextForAddress(format); - if (dso == NULL) { - return; - } - } + if (dso == NULL) { + dso = (void *) OSKextKextForAddress(format); + if (dso == NULL) { + return; + } + } - if (!_os_trace_addr_in_text_segment(dso, format)) { - return; - } + if (!_os_trace_addr_in_text_segment(dso, format)) { + return; + } - void *dso_addr = (void *) OSKextKextForAddress(addr); - if (dso != dso_addr) { - return; - } + void *dso_addr = (void *) OSKextKextForAddress(addr); + if (dso != dso_addr) { + return; + } #endif /* FIREHOSE_USES_SHARED_CACHE */ - memset(&context, 0, sizeof(context)); - memset(buffer, 0, OS_LOG_BUFFER_MAX_SIZE); + memset(&context, 0, sizeof(context)); + memset(buffer, 0, OS_LOG_BUFFER_MAX_SIZE); - context.shimmed = true; - context.buffer = buffer; - context.content_sz = OS_LOG_BUFFER_MAX_SIZE - sizeof(*buffer); - context.pubdata = pubdata; - context.pubdata_sz = sizeof(pubdata); + context.shimmed = true; + context.buffer = buffer; + context.content_sz = OS_LOG_BUFFER_MAX_SIZE - sizeof(*buffer); + context.pubdata = pubdata; + context.pubdata_sz = sizeof(pubdata); - va_copy(args_copy, args); + va_copy(args_copy, args); - (void)hw_atomic_add(&oslog_p_total_msgcount, 1); - if (_os_log_encode(format, args_copy, 0, &context)) { - _os_log_actual(oslog, type, format, dso, addr, &context); - } - else { - (void)hw_atomic_add(&oslog_p_error_count, 1); - } + (void)hw_atomic_add(&oslog_p_total_msgcount, 1); + if (_os_log_encode(format, args_copy, 0, &context)) { + _os_log_actual(oslog, type, format, dso, addr, &context); + } else { + (void)hw_atomic_add(&oslog_p_error_count, 1); + } - va_end(args_copy); + va_end(args_copy); } static inline size_t _os_trace_write_location_for_address(uint8_t buf[static sizeof(uint64_t)], - void *dso, const void *address, firehose_tracepoint_flags_t *flags) + void *dso, const void *address, firehose_tracepoint_flags_t *flags) { #if FIREHOSE_USES_SHARED_CACHE - *flags = _firehose_tracepoint_flags_pc_style_shared_cache; - memcpy(buf, (uint32_t[]){ (uintptr_t)address - (uintptr_t)dso }, - sizeof(uint32_t)); + *flags = _firehose_tracepoint_flags_pc_style_shared_cache; + memcpy(buf, (uint32_t[]){ (uintptr_t)address - (uintptr_t)dso }, + sizeof(uint32_t)); return sizeof(uint32_t); #else /* FIREHOSE_USES_SHARED_CACHE */ - kernel_mach_header_t *mh = dso; + kernel_mach_header_t *mh = dso; if (mh->filetype == MH_EXECUTE) { *flags = _firehose_tracepoint_flags_pc_style_main_exe; memcpy(buf, (uint32_t[]){ (uintptr_t)address - (uintptr_t)dso }, - sizeof(uint32_t)); + sizeof(uint32_t)); return sizeof(uint32_t); } else { *flags = _firehose_tracepoint_flags_pc_style_absolute; @@ -370,7 +385,7 @@ _os_trace_write_location_for_address(uint8_t buf[static sizeof(uint64_t)], OS_ALWAYS_INLINE static inline size_t _os_log_buffer_pack(uint8_t *buffdata, size_t buffdata_sz, - os_log_buffer_context_t ctx) + os_log_buffer_context_t ctx) { os_log_buffer_t buffer = ctx->buffer; size_t buffer_sz = sizeof(*ctx->buffer) + ctx->content_sz; @@ -387,7 +402,7 @@ _os_log_buffer_pack(uint8_t *buffdata, size_t buffdata_sz, static void _os_log_actual(os_log_t oslog __unused, os_log_type_t type, const char *format, - void *dso, void *addr, os_log_buffer_context_t context) + void *dso, void *addr, os_log_buffer_context_t context) { firehose_stream_t stream; firehose_tracepoint_flags_t flags = 0; @@ -400,7 +415,7 @@ _os_log_actual(os_log_t oslog __unused, os_log_type_t type, const char *format, // dso == the start of the binary that was loaded addr_len = _os_trace_write_location_for_address(buffdata, dso, addr, &flags); buffdata_sz = _os_log_buffer_pack(buffdata + addr_len, - sizeof(buffdata) - addr_len, context); + sizeof(buffdata) - addr_len, context); if (buffdata_sz == 0) { return; } @@ -411,16 +426,15 @@ _os_log_actual(os_log_t oslog __unused, os_log_type_t type, const char *format, // create trace_id after we've set additional flags trace_id.ftid_value = FIREHOSE_TRACE_ID_MAKE(firehose_tracepoint_namespace_log, - type, flags, _os_trace_offset(dso, format, flags)); + type, flags, _os_trace_offset(dso, format, flags)); if (FALSE) { firehose_debug_trace(stream, trace_id.ftid_value, timestamp, - format, buffdata, buffdata_sz); + format, buffdata, buffdata_sz); } if (type == OS_LOG_TYPE_INFO || type == OS_LOG_TYPE_DEBUG) { stream = firehose_stream_memory; - } - else { + } else { stream = firehose_stream_persist; } _firehose_trace(stream, trace_id, timestamp, buffdata, buffdata_sz); @@ -428,11 +442,11 @@ _os_log_actual(os_log_t oslog __unused, os_log_type_t type, const char *format, static inline firehose_tracepoint_id_t _firehose_trace(firehose_stream_t stream, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen) + uint64_t stamp, const void *pubdata, size_t publen) { const uint16_t ft_size = offsetof(struct firehose_tracepoint_s, ft_data); const size_t _firehose_chunk_payload_size = - sizeof(((struct firehose_chunk_s *)0)->fc_data); + sizeof(((struct firehose_chunk_s *)0)->fc_data); firehose_tracepoint_t ft; @@ -443,16 +457,15 @@ _firehose_trace(firehose_stream_t stream, firehose_tracepoint_id_u ftid, } if (oslog_stream_open && (stream != firehose_stream_metadata)) { - - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (!oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); goto out; } oslog_s_total_msgcount++; oslog_streamwrite_locked(ftid, stamp, pubdata, publen); - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); oslog_streamwakeup(); } @@ -462,8 +475,7 @@ out: if (oslog_boot_done) { if (stream == firehose_stream_metadata) { (void)hw_atomic_add(&oslog_p_metadata_dropped_msgcount, 1); - } - else { + } else { // If we run out of space in the persistence buffer we're // dropping the message. (void)hw_atomic_add(&oslog_p_dropped_msgcount, 1); @@ -475,14 +487,14 @@ out: //only stream available during boot is persist offset = firehose_chunk_tracepoint_try_reserve(fbc, stamp, - firehose_stream_persist, 0, publen, 0, NULL); + firehose_stream_persist, 0, publen, 0, NULL); if (offset <= 0) { (void)hw_atomic_add(&oslog_p_boot_dropped_msgcount, 1); return 0; } ft = firehose_chunk_tracepoint_begin(fbc, stamp, publen, - thread_tid(current_thread()), offset); + thread_tid(current_thread()), offset); memcpy(ft->ft_data, pubdata, publen); firehose_chunk_tracepoint_end(fbc, ft, ftid); (void)hw_atomic_add(&oslog_p_saved_msgcount, 1); @@ -496,8 +508,7 @@ out: __firehose_buffer_tracepoint_flush(ft, ftid); if (stream == firehose_stream_metadata) { (void)hw_atomic_add(&oslog_p_metadata_saved_msgcount, 1); - } - else { + } else { (void)hw_atomic_add(&oslog_p_saved_msgcount, 1); } return ftid.ftid_value; @@ -505,7 +516,7 @@ out: static oslog_stream_buf_entry_t oslog_stream_create_buf_entry(oslog_stream_link_type_t type, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void* pubdata, size_t publen) + uint64_t stamp, const void* pubdata, size_t publen) { oslog_stream_buf_entry_t m_entry = NULL; firehose_tracepoint_t ft = NULL; @@ -516,7 +527,7 @@ oslog_stream_create_buf_entry(oslog_stream_link_type_t type, firehose_tracepoint } m_entry_len = sizeof(struct oslog_stream_buf_entry_s) + - sizeof(struct firehose_tracepoint_s) + publen; + sizeof(struct firehose_tracepoint_s) + publen; m_entry = (oslog_stream_buf_entry_t) kalloc(m_entry_len); if (!m_entry) { return NULL; @@ -538,38 +549,38 @@ oslog_stream_create_buf_entry(oslog_stream_link_type_t type, firehose_tracepoint #ifdef KERNEL void firehose_trace_metadata(firehose_stream_t stream, firehose_tracepoint_id_u ftid, - uint64_t stamp, const void *pubdata, size_t publen) + uint64_t stamp, const void *pubdata, size_t publen) { oslog_stream_buf_entry_t m_entry = NULL; // If streaming mode is not on, only log the metadata // in the persistence buffer - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (!oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); goto finish; } - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); // Setup and write the stream metadata entry m_entry = oslog_stream_create_buf_entry(oslog_stream_link_type_metadata, ftid, - stamp, pubdata, publen); + stamp, pubdata, publen); if (!m_entry) { (void)hw_atomic_add(&oslog_s_error_count, 1); goto finish; } - lck_spin_lock(&oslog_stream_lock); + stream_lock(); if (!oslog_stream_open) { - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); kfree(m_entry, sizeof(struct oslog_stream_buf_entry_s) + - sizeof(struct firehose_tracepoint_s) + publen); + sizeof(struct firehose_tracepoint_s) + publen); goto finish; } oslog_s_metadata_msgcount++; oslog_streamwrite_metadata_locked(m_entry); - lck_spin_unlock(&oslog_stream_lock); + stream_unlock(); finish: _firehose_trace(stream, ftid, stamp, pubdata, publen); @@ -578,64 +589,66 @@ finish: firehose_tracepoint_id_t firehose_debug_trace(firehose_stream_t stream, firehose_tracepoint_id_t trace_id, - uint64_t timestamp, const char *format, const void *pubdata, size_t publen) + uint64_t timestamp, const char *format, const void *pubdata, size_t publen) { kprintf("[os_log stream 0x%x trace_id 0x%llx timestamp %llu format '%s' data %p len %lu]\n", - (unsigned int)stream, (unsigned long long)trace_id, timestamp, - format, pubdata, publen); + (unsigned int)stream, (unsigned long long)trace_id, timestamp, + format, pubdata, publen); size_t i; const unsigned char *cdata = (const unsigned char *)pubdata; - for (i=0; i < publen; i += 8) { + for (i = 0; i < publen; i += 8) { kprintf(">oslog 0x%08x: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n", - (unsigned int)i, - (i+0) < publen ? cdata[i+0] : 0, - (i+1) < publen ? cdata[i+1] : 0, - (i+2) < publen ? cdata[i+2] : 0, - (i+3) < publen ? cdata[i+3] : 0, - (i+4) < publen ? cdata[i+4] : 0, - (i+5) < publen ? cdata[i+5] : 0, - (i+6) < publen ? cdata[i+6] : 0, - (i+7) < publen ? cdata[i+7] : 0 - ); + (unsigned int)i, + (i + 0) < publen ? cdata[i + 0] : 0, + (i + 1) < publen ? cdata[i + 1] : 0, + (i + 2) < publen ? cdata[i + 2] : 0, + (i + 3) < publen ? cdata[i + 3] : 0, + (i + 4) < publen ? cdata[i + 4] : 0, + (i + 5) < publen ? cdata[i + 5] : 0, + (i + 6) < publen ? cdata[i + 6] : 0, + (i + 7) < publen ? cdata[i + 7] : 0 + ); } return trace_id; } void -__firehose_buffer_push_to_logd(firehose_buffer_t fb __unused, bool for_io __unused) { - oslogwakeup(); - return; +__firehose_buffer_push_to_logd(firehose_buffer_t fb __unused, bool for_io __unused) +{ + oslogwakeup(); + return; } void __firehose_allocate(vm_offset_t *addr, vm_size_t size __unused) { - firehose_chunk_t kernel_buffer = (firehose_chunk_t)kernel_firehose_addr; - - if (kernel_firehose_addr) { - *addr = kernel_firehose_addr; - } - else { - *addr = 0; - return; - } - // Now that we are done adding logs to this chunk, set the number of writers to 0 - // Without this, logd won't flush when the page is full - firehose_boot_chunk->fc_pos.fcp_refcnt = 0; - memcpy(&kernel_buffer[FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT - 1], (const void *)firehose_boot_chunk, FIREHOSE_CHUNK_SIZE); - return; + firehose_chunk_t kernel_buffer = (firehose_chunk_t)kernel_firehose_addr; + + if (kernel_firehose_addr) { + *addr = kernel_firehose_addr; + } else { + *addr = 0; + return; + } + // Now that we are done adding logs to this chunk, set the number of writers to 0 + // Without this, logd won't flush when the page is full + firehose_boot_chunk->fc_pos.fcp_refcnt = 0; + memcpy(&kernel_buffer[FIREHOSE_BUFFER_KERNEL_CHUNK_COUNT - 1], (const void *)firehose_boot_chunk, FIREHOSE_CHUNK_SIZE); + return; } // There isnt a lock held in this case. void -__firehose_critical_region_enter(void) { - disable_preemption(); - return; +__firehose_critical_region_enter(void) +{ + disable_preemption(); + return; } void -__firehose_critical_region_leave(void) { - enable_preemption(); - return; +__firehose_critical_region_leave(void) +{ + enable_preemption(); + return; } #ifdef CONFIG_XNUPOST @@ -662,17 +675,17 @@ kern_return_t test_os_log_parallel(void); #define GENOSLOGHELPER(fname, ident, callout_f) \ void fname(uint32_t uniqid, uint64_t count) \ { \ - int32_t datalen = 0; \ - uint32_t checksum = 0; \ - char databuffer[256]; \ - T_LOG("Doing os_log of %llu TESTLOG msgs for fn " ident, count); \ - for (uint64_t i = 0; i < count; i++) \ - { \ - datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT(ident), uniqid, i + 1, count); \ - checksum = crc32(0, databuffer, datalen); \ - callout_f(OS_LOG_DEFAULT, TESTOSLOG(ident), checksum, uniqid, i + 1, count); \ - /*T_LOG(TESTOSLOG(ident), checksum, uniqid, i + 1, count);*/ \ - } \ + int32_t datalen = 0; \ + uint32_t checksum = 0; \ + char databuffer[256]; \ + T_LOG("Doing os_log of %llu TESTLOG msgs for fn " ident, count); \ + for (uint64_t i = 0; i < count; i++) \ + { \ + datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT(ident), uniqid, i + 1, count); \ + checksum = crc32(0, databuffer, datalen); \ + callout_f(OS_LOG_DEFAULT, TESTOSLOG(ident), checksum, uniqid, i + 1, count); \ + /*T_LOG(TESTOSLOG(ident), checksum, uniqid, i + 1, count);*/ \ + } \ } GENOSLOGHELPER(test_oslog_info_helper, "oslog_info_helper", os_log_info); @@ -681,217 +694,219 @@ GENOSLOGHELPER(test_oslog_debug_helper, "oslog_debug_helper", os_log_debug); GENOSLOGHELPER(test_oslog_error_helper, "oslog_error_helper", os_log_error); GENOSLOGHELPER(test_oslog_default_helper, "oslog_default_helper", os_log); -kern_return_t test_os_log() +kern_return_t +test_os_log() { - char databuffer[256]; - uint32_t uniqid = RandomULong(); - uint32_t match_count = 0; - uint32_t checksum = 0; - uint32_t total_msg = 0; - uint32_t saved_msg = 0; - uint32_t dropped_msg = 0; - int datalen = 0; - uint64_t a = mach_absolute_time(); - uint64_t seqno = 1; - uint64_t total_seqno = 2; - - os_log_t log_handle = os_log_create("com.apple.xnu.test.t1", "kpost"); - - T_ASSERT_EQ_PTR(&_os_log_default, log_handle, "os_log_create returns valid value."); - T_ASSERT_EQ_INT(TRUE, os_log_info_enabled(log_handle), "os_log_info is enabled"); - T_ASSERT_EQ_INT(TRUE, os_log_debug_enabled(log_handle), "os_log_debug is enabled"); - T_ASSERT_EQ_PTR(&_os_log_default, OS_LOG_DEFAULT, "ensure OS_LOG_DEFAULT is _os_log_default"); - - total_msg = oslog_p_total_msgcount; - saved_msg = oslog_p_saved_msgcount; - dropped_msg = oslog_p_dropped_msgcount; - T_LOG("oslog internal counters total %u , saved %u, dropped %u", total_msg, saved_msg, dropped_msg); - - T_LOG("Validating with uniqid %u u64 %llu", uniqid, a); - T_ASSERT_NE_UINT(0, uniqid, "random number should not be zero"); - T_ASSERT_NE_ULLONG(0, a, "absolute time should not be zero"); - - datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno); - checksum = crc32(0, databuffer, datalen); - printf(TESTOSLOG("printf_only") "mat%llu\n", checksum, uniqid, seqno, total_seqno, a); - - seqno += 1; - datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno); - checksum = crc32(0, databuffer, datalen); - printf(TESTOSLOG("printf_only") "mat%llu\n", checksum, uniqid, seqno, total_seqno, a); - - datalen = snprintf(databuffer, sizeof(databuffer), "kernel^0^test^printf_only#mat%llu", a); - match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno); - T_EXPECT_EQ_UINT(match_count, 2, "verify printf_only goes to systemlog buffer"); - - uint32_t logging_config = atm_get_diagnostic_config(); - T_LOG("checking atm_diagnostic_config 0x%X", logging_config); - - if ((logging_config & ATM_TRACE_OFF) || (logging_config & ATM_TRACE_DISABLE)) - { - T_LOG("ATM_TRACE_OFF / ATM_TRACE_DISABLE is set. Would not see oslog messages. skipping the rest of test."); - return KERN_SUCCESS; - } + char databuffer[256]; + uint32_t uniqid = RandomULong(); + uint32_t match_count = 0; + uint32_t checksum = 0; + uint32_t total_msg = 0; + uint32_t saved_msg = 0; + uint32_t dropped_msg = 0; + int datalen = 0; + uint64_t a = mach_absolute_time(); + uint64_t seqno = 1; + uint64_t total_seqno = 2; + + os_log_t log_handle = os_log_create("com.apple.xnu.test.t1", "kpost"); + + T_ASSERT_EQ_PTR(&_os_log_default, log_handle, "os_log_create returns valid value."); + T_ASSERT_EQ_INT(TRUE, os_log_info_enabled(log_handle), "os_log_info is enabled"); + T_ASSERT_EQ_INT(TRUE, os_log_debug_enabled(log_handle), "os_log_debug is enabled"); + T_ASSERT_EQ_PTR(&_os_log_default, OS_LOG_DEFAULT, "ensure OS_LOG_DEFAULT is _os_log_default"); + + total_msg = oslog_p_total_msgcount; + saved_msg = oslog_p_saved_msgcount; + dropped_msg = oslog_p_dropped_msgcount; + T_LOG("oslog internal counters total %u , saved %u, dropped %u", total_msg, saved_msg, dropped_msg); + + T_LOG("Validating with uniqid %u u64 %llu", uniqid, a); + T_ASSERT_NE_UINT(0, uniqid, "random number should not be zero"); + T_ASSERT_NE_ULLONG(0, a, "absolute time should not be zero"); + + datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno); + checksum = crc32(0, databuffer, datalen); + printf(TESTOSLOG("printf_only") "mat%llu\n", checksum, uniqid, seqno, total_seqno, a); + + seqno += 1; + datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("printf_only"), uniqid, seqno, total_seqno); + checksum = crc32(0, databuffer, datalen); + printf(TESTOSLOG("printf_only") "mat%llu\n", checksum, uniqid, seqno, total_seqno, a); + + datalen = snprintf(databuffer, sizeof(databuffer), "kernel^0^test^printf_only#mat%llu", a); + match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno); + T_EXPECT_EQ_UINT(match_count, 2, "verify printf_only goes to systemlog buffer"); + + uint32_t logging_config = atm_get_diagnostic_config(); + T_LOG("checking atm_diagnostic_config 0x%X", logging_config); + + if ((logging_config & ATM_TRACE_OFF) || (logging_config & ATM_TRACE_DISABLE)) { + T_LOG("ATM_TRACE_OFF / ATM_TRACE_DISABLE is set. Would not see oslog messages. skipping the rest of test."); + return KERN_SUCCESS; + } - /* for enabled logging printfs should be saved in oslog as well */ - T_EXPECT_GE_UINT((oslog_p_total_msgcount - total_msg), 2, "atleast 2 msgs should be seen by oslog system"); + /* for enabled logging printfs should be saved in oslog as well */ + T_EXPECT_GE_UINT((oslog_p_total_msgcount - total_msg), 2, "atleast 2 msgs should be seen by oslog system"); - a = mach_absolute_time(); - total_seqno = 1; - seqno = 1; - total_msg = oslog_p_total_msgcount; - saved_msg = oslog_p_saved_msgcount; - dropped_msg = oslog_p_dropped_msgcount; - datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("oslog_info"), uniqid, seqno, total_seqno); - checksum = crc32(0, databuffer, datalen); - os_log_info(log_handle, TESTOSLOG("oslog_info") "mat%llu", checksum, uniqid, seqno, total_seqno, a); - T_EXPECT_GE_UINT((oslog_p_total_msgcount - total_msg), 1, "total message count in buffer"); + a = mach_absolute_time(); + total_seqno = 1; + seqno = 1; + total_msg = oslog_p_total_msgcount; + saved_msg = oslog_p_saved_msgcount; + dropped_msg = oslog_p_dropped_msgcount; + datalen = snprintf(databuffer, sizeof(databuffer), TESTOSLOGFMT("oslog_info"), uniqid, seqno, total_seqno); + checksum = crc32(0, databuffer, datalen); + os_log_info(log_handle, TESTOSLOG("oslog_info") "mat%llu", checksum, uniqid, seqno, total_seqno, a); + T_EXPECT_GE_UINT((oslog_p_total_msgcount - total_msg), 1, "total message count in buffer"); - datalen = snprintf(databuffer, sizeof(databuffer), "kernel^0^test^oslog_info#mat%llu", a); - match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno); - T_EXPECT_EQ_UINT(match_count, 1, "verify oslog_info does not go to systemlog buffer"); + datalen = snprintf(databuffer, sizeof(databuffer), "kernel^0^test^oslog_info#mat%llu", a); + match_count = find_pattern_in_buffer(databuffer, datalen, total_seqno); + T_EXPECT_EQ_UINT(match_count, 1, "verify oslog_info does not go to systemlog buffer"); - total_msg = oslog_p_total_msgcount; - test_oslog_info_helper(uniqid, 10); - T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_info_helper: Should have seen 10 msgs"); + total_msg = oslog_p_total_msgcount; + test_oslog_info_helper(uniqid, 10); + T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_info_helper: Should have seen 10 msgs"); - total_msg = oslog_p_total_msgcount; - test_oslog_debug_helper(uniqid, 10); - T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_debug_helper:Should have seen 10 msgs"); + total_msg = oslog_p_total_msgcount; + test_oslog_debug_helper(uniqid, 10); + T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_debug_helper:Should have seen 10 msgs"); - total_msg = oslog_p_total_msgcount; - test_oslog_error_helper(uniqid, 10); - T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_error_helper:Should have seen 10 msgs"); + total_msg = oslog_p_total_msgcount; + test_oslog_error_helper(uniqid, 10); + T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_error_helper:Should have seen 10 msgs"); - total_msg = oslog_p_total_msgcount; - test_oslog_default_helper(uniqid, 10); - T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_default_helper:Should have seen 10 msgs"); + total_msg = oslog_p_total_msgcount; + test_oslog_default_helper(uniqid, 10); + T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_default_helper:Should have seen 10 msgs"); - total_msg = oslog_p_total_msgcount; - test_oslog_fault_helper(uniqid, 10); - T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_fault_helper:Should have seen 10 msgs"); + total_msg = oslog_p_total_msgcount; + test_oslog_fault_helper(uniqid, 10); + T_EXPECT_GE_UINT(oslog_p_total_msgcount - total_msg, 10, "test_oslog_fault_helper:Should have seen 10 msgs"); - T_LOG("oslog internal counters total %u , saved %u, dropped %u", oslog_p_total_msgcount, oslog_p_saved_msgcount, - oslog_p_dropped_msgcount); + T_LOG("oslog internal counters total %u , saved %u, dropped %u", oslog_p_total_msgcount, oslog_p_saved_msgcount, + oslog_p_dropped_msgcount); - return KERN_SUCCESS; + return KERN_SUCCESS; } static uint32_t _test_log_loop_count = 0; -void _test_log_loop(void * arg __unused, wait_result_t wres __unused) +void +_test_log_loop(void * arg __unused, wait_result_t wres __unused) { - uint32_t uniqid = RandomULong(); - test_oslog_debug_helper(uniqid, 100); - (void)hw_atomic_add(&_test_log_loop_count, 100); + uint32_t uniqid = RandomULong(); + test_oslog_debug_helper(uniqid, 100); + (void)hw_atomic_add(&_test_log_loop_count, 100); } -kern_return_t test_os_log_parallel(void) +kern_return_t +test_os_log_parallel(void) { - thread_t thread[2]; - kern_return_t kr; - uint32_t uniqid = RandomULong(); + thread_t thread[2]; + kern_return_t kr; + uint32_t uniqid = RandomULong(); - printf("oslog internal counters total %u , saved %u, dropped %u", oslog_p_total_msgcount, oslog_p_saved_msgcount, - oslog_p_dropped_msgcount); + printf("oslog internal counters total %u , saved %u, dropped %u", oslog_p_total_msgcount, oslog_p_saved_msgcount, + oslog_p_dropped_msgcount); - kr = kernel_thread_start(_test_log_loop, NULL, &thread[0]); - T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully"); + kr = kernel_thread_start(_test_log_loop, NULL, &thread[0]); + T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully"); - kr = kernel_thread_start(_test_log_loop, NULL, &thread[1]); - T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully"); + kr = kernel_thread_start(_test_log_loop, NULL, &thread[1]); + T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully"); - test_oslog_info_helper(uniqid, 100); + test_oslog_info_helper(uniqid, 100); - /* wait until other thread has also finished */ - while (_test_log_loop_count < 200) - { - delay(1000); - } + /* wait until other thread has also finished */ + while (_test_log_loop_count < 200) { + delay(1000); + } - thread_deallocate(thread[0]); - thread_deallocate(thread[1]); + thread_deallocate(thread[0]); + thread_deallocate(thread[1]); - T_LOG("oslog internal counters total %u , saved %u, dropped %u", oslog_p_total_msgcount, oslog_p_saved_msgcount, - oslog_p_dropped_msgcount); - T_PASS("parallel_logging tests is now complete"); + T_LOG("oslog internal counters total %u , saved %u, dropped %u", oslog_p_total_msgcount, oslog_p_saved_msgcount, + oslog_p_dropped_msgcount); + T_PASS("parallel_logging tests is now complete"); - return KERN_SUCCESS; + return KERN_SUCCESS; } -void test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t len) +void +test_oslog_handleOSLogCtl(int32_t * in, int32_t * out, int32_t len) { - if (!in || !out || len != 4) - return; - switch (in[0]) { - case 1: - { - /* send out counters */ - out[1] = oslog_p_total_msgcount; - out[2] = oslog_p_saved_msgcount; - out[3] = oslog_p_dropped_msgcount; - out[0] = KERN_SUCCESS; - break; - } - case 2: - { - /* mini stress run */ - out[0] = test_os_log_parallel(); - break; - } - case 3: - { - /* drop msg tests */ - out[1] = RandomULong(); - out[0] = test_stresslog_dropmsg(out[1]); - break; - } - case 4: - { - /* invoke log helpers */ - uint32_t uniqid = in[3]; - int32_t msgcount = in[2]; - if (uniqid == 0 || msgcount == 0) - { - out[0] = KERN_INVALID_VALUE; - return; - } - - switch (in[1]) { - case OS_LOG_TYPE_INFO: test_oslog_info_helper(uniqid, msgcount); break; - case OS_LOG_TYPE_DEBUG: test_oslog_debug_helper(uniqid, msgcount); break; - case OS_LOG_TYPE_ERROR: test_oslog_error_helper(uniqid, msgcount); break; - case OS_LOG_TYPE_FAULT: test_oslog_fault_helper(uniqid, msgcount); break; - case OS_LOG_TYPE_DEFAULT: - default: test_oslog_default_helper(uniqid, msgcount); break; - } - out[0] = KERN_SUCCESS; - break; - /* end of case 4 */ - } - default: - { - out[0] = KERN_INVALID_VALUE; - break; - } - } - return; + if (!in || !out || len != 4) { + return; + } + switch (in[0]) { + case 1: + { + /* send out counters */ + out[1] = oslog_p_total_msgcount; + out[2] = oslog_p_saved_msgcount; + out[3] = oslog_p_dropped_msgcount; + out[0] = KERN_SUCCESS; + break; + } + case 2: + { + /* mini stress run */ + out[0] = test_os_log_parallel(); + break; + } + case 3: + { + /* drop msg tests */ + out[1] = RandomULong(); + out[0] = test_stresslog_dropmsg(out[1]); + break; + } + case 4: + { + /* invoke log helpers */ + uint32_t uniqid = in[3]; + int32_t msgcount = in[2]; + if (uniqid == 0 || msgcount == 0) { + out[0] = KERN_INVALID_VALUE; + return; + } + + switch (in[1]) { + case OS_LOG_TYPE_INFO: test_oslog_info_helper(uniqid, msgcount); break; + case OS_LOG_TYPE_DEBUG: test_oslog_debug_helper(uniqid, msgcount); break; + case OS_LOG_TYPE_ERROR: test_oslog_error_helper(uniqid, msgcount); break; + case OS_LOG_TYPE_FAULT: test_oslog_fault_helper(uniqid, msgcount); break; + case OS_LOG_TYPE_DEFAULT: + default: test_oslog_default_helper(uniqid, msgcount); break; + } + out[0] = KERN_SUCCESS; + break; + /* end of case 4 */ + } + default: + { + out[0] = KERN_INVALID_VALUE; + break; + } + } + return; } -kern_return_t test_stresslog_dropmsg(uint32_t uniqid) +kern_return_t +test_stresslog_dropmsg(uint32_t uniqid) { - uint32_t total, saved, dropped; - total = oslog_p_total_msgcount; - saved = oslog_p_saved_msgcount; - dropped = oslog_p_dropped_msgcount; - uniqid = RandomULong(); - test_oslog_debug_helper(uniqid, 100); - while ((oslog_p_dropped_msgcount - dropped) == 0) - { - test_oslog_debug_helper(uniqid, 100); - } - printf("test_stresslog_dropmsg: logged %u msgs, saved %u and caused a drop of %u msgs. \n", oslog_p_total_msgcount - total, - oslog_p_saved_msgcount - saved, oslog_p_dropped_msgcount - dropped); - return KERN_SUCCESS; + uint32_t total, saved, dropped; + total = oslog_p_total_msgcount; + saved = oslog_p_saved_msgcount; + dropped = oslog_p_dropped_msgcount; + uniqid = RandomULong(); + test_oslog_debug_helper(uniqid, 100); + while ((oslog_p_dropped_msgcount - dropped) == 0) { + test_oslog_debug_helper(uniqid, 100); + } + printf("test_stresslog_dropmsg: logged %u msgs, saved %u and caused a drop of %u msgs. \n", oslog_p_total_msgcount - total, + oslog_p_saved_msgcount - saved, oslog_p_dropped_msgcount - dropped); + return KERN_SUCCESS; } #endif diff --git a/libkern/os/log.h b/libkern/os/log.h index 1da91a8f3..8b58e484e 100644 --- a/libkern/os/log.h +++ b/libkern/os/log.h @@ -54,7 +54,10 @@ extern uint64_t startup_serial_num_procs; #endif /* XNU_KERNEL_PRIVATE */ OS_ALWAYS_INLINE static inline void _os_log_verify_format_str(__unused const char *msg, ...) __attribute__((format(os_log, 1, 2))); -OS_ALWAYS_INLINE static inline void _os_log_verify_format_str(__unused const char *msg, ...) { /* placeholder */ } +OS_ALWAYS_INLINE static inline void +_os_log_verify_format_str(__unused const char *msg, ...) /* placeholder */ +{ +} #if OS_OBJECT_USE_OBJC OS_OBJECT_DECL(os_log); @@ -77,7 +80,7 @@ typedef struct os_log_s *os_log_t; * Use this to log a message in accordance with current system settings. */ #define OS_LOG_DEFAULT OS_OBJECT_GLOBAL_OBJECT(os_log_t, _os_log_default) -__OSX_AVAILABLE_STARTING(__MAC_10_12,__IPHONE_10_0) +__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) OS_EXPORT struct os_log_s _os_log_default; @@ -105,11 +108,11 @@ struct os_log_s _os_log_default; * potentially more than one process, usually used by daemons and services. */ OS_ENUM(os_log_type, uint8_t, - OS_LOG_TYPE_DEFAULT = 0x00, - OS_LOG_TYPE_INFO = 0x01, - OS_LOG_TYPE_DEBUG = 0x02, - OS_LOG_TYPE_ERROR = 0x10, - OS_LOG_TYPE_FAULT = 0x11); + OS_LOG_TYPE_DEFAULT = 0x00, + OS_LOG_TYPE_INFO = 0x01, + OS_LOG_TYPE_DEBUG = 0x02, + OS_LOG_TYPE_ERROR = 0x10, + OS_LOG_TYPE_FAULT = 0x11); /*! * @function os_log_create @@ -124,7 +127,7 @@ OS_ENUM(os_log_type, uint8_t, * behavior for messages. * * A log object may customize logging system behavior for its messages by - * adding a configuration file in /Library/LogPreferences. Most options + * adding a configuration file in /Library/LogPreferences. Most options * accept 3 values: "Default", "Yes" or "No" as strings, where "Default" * signifies follow system behavior for the level of messages. * @@ -143,7 +146,7 @@ OS_ENUM(os_log_type, uint8_t, * Default * Persist * No - * TTL + * TTL * Default * * @@ -165,7 +168,7 @@ OS_ENUM(os_log_type, uint8_t, * * Persist * Yes - * TTL + * TTL * 2d * * @@ -198,7 +201,7 @@ OS_ENUM(os_log_type, uint8_t, * * A value will always be returned to allow for dynamic enablement. */ -__OSX_AVAILABLE_STARTING(__MAC_10_12,__IPHONE_10_0) +__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_OBJECT_RETURNS_RETAINED os_log_t os_log_create(const char *subsystem, const char *category); @@ -250,7 +253,7 @@ os_log_debug_enabled(os_log_t log); * Insert a log message into the Unified Logging and Tracing system. * * @discussion - * Insert a log message into the Unified Logging and Tracing system in + * Insert a log message into the Unified Logging and Tracing system in * accordance with the preferences specified by the provided log object. * These messages cannot be disabled and therefore always captured either * to memory or disk. @@ -287,7 +290,7 @@ os_log_debug_enabled(os_log_t log); * Insert a development log message into the Unified Logging and Tracing system. * * @discussion - * Insert a log message into the Unified Logging and Tracing system in + * Insert a log message into the Unified Logging and Tracing system in * accordance with the preferences specified by the provided log object. * * When an os_activity_id_t is present, the log message will also be scoped by @@ -393,7 +396,7 @@ os_log_debug_enabled(os_log_t log); * @discussion * Log a fault message issue into the Unified Logging and Tracing system * signifying a multi-process (i.e., system error) related issue, either - * due to interaction via IPC or some other. Faults will gather information + * due to interaction via IPC or some other. Faults will gather information * from the entire process chain and record it for later inspection. * * When an os_activity_id_t is present, the log message will also be scoped by diff --git a/libkern/os/log_encode.h b/libkern/os/log_encode.h index d214bab21..4591be6dc 100644 --- a/libkern/os/log_encode.h +++ b/libkern/os/log_encode.h @@ -40,105 +40,105 @@ extern boolean_t doprnt_hide_pointers; static bool _encode_data(os_log_buffer_value_t content, const void *arg, uint16_t arg_len, os_log_buffer_context_t context) { - struct os_log_arginfo_s arginfo; - void *databuf; - - if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { - databuf = context->privdata + context->privdata_off; - arginfo.length = MIN(arg_len, (context->privdata_sz - context->privdata_off)); - arginfo.offset = context->privdata_off; - } else { - databuf = context->pubdata + context->pubdata_off; - arginfo.length = MIN(arg_len, (context->pubdata_sz - context->pubdata_off)); - arginfo.offset = context->pubdata_off; - } - - if (context->arg_content_sz > 0) { - arginfo.length = MIN(context->arg_content_sz, arginfo.length); - } - - memcpy(content->value, &arginfo, sizeof(arginfo)); - content->size = sizeof(arginfo); - - if (arginfo.length) { - if (content->type == OS_LOG_BUFFER_VALUE_TYPE_STRING + struct os_log_arginfo_s arginfo; + void *databuf; + + if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { + databuf = context->privdata + context->privdata_off; + arginfo.length = MIN(arg_len, (context->privdata_sz - context->privdata_off)); + arginfo.offset = context->privdata_off; + } else { + databuf = context->pubdata + context->pubdata_off; + arginfo.length = MIN(arg_len, (context->pubdata_sz - context->pubdata_off)); + arginfo.offset = context->pubdata_off; + } + + if (context->arg_content_sz > 0) { + arginfo.length = MIN(context->arg_content_sz, arginfo.length); + } + + memcpy(content->value, &arginfo, sizeof(arginfo)); + content->size = sizeof(arginfo); + + if (arginfo.length) { + if (content->type == OS_LOG_BUFFER_VALUE_TYPE_STRING #ifndef KERNEL - || content->type == OS_LOG_BUFFER_VALUE_TYPE_OBJECT + || content->type == OS_LOG_BUFFER_VALUE_TYPE_OBJECT #endif - ) { - strlcpy(databuf, arg, arginfo.length); - } else { - memcpy(databuf, arg, arginfo.length); - } - } - - if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { - context->privdata_off += arginfo.length; - } else { - context->pubdata_off += arginfo.length; - } - - context->content_off += sizeof(*content) + content->size; - context->arg_content_sz = 0; - - return true; + ) { + strlcpy(databuf, arg, arginfo.length); + } else { + memcpy(databuf, arg, arginfo.length); + } + } + + if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { + context->privdata_off += arginfo.length; + } else { + context->pubdata_off += arginfo.length; + } + + context->content_off += sizeof(*content) + content->size; + context->arg_content_sz = 0; + + return true; } #ifndef KERNEL static void _os_log_parse_annotated(char *annotated, const char **visibility, const char **library, const char **type) { - char *values[3] = { NULL }; - int cnt = 0; - int idx = 0; - - for (; cnt < 3;) { - char *token = strsep(&annotated, ", {}"); - if (token == NULL) { - break; - } - - if (*token == '\0') { - continue; - } - - values[cnt++] = token; - } - - if ((cnt > 0) && (!strcmp(values[0], "public") || !strcmp(values[0], "private"))) { - if (visibility != NULL) { - (*visibility) = values[0]; - } - - idx++; - } - - if (idx < cnt && (library != NULL) && (type != NULL)) { - char *decoder = values[idx]; - - for (cnt = 0; cnt < 3; ) { - char *token = strsep(&decoder, ": {}"); - if (token == NULL) { - break; - } - - if (*token == '\0') { - continue; - } - - values[cnt++] = token; - } - - if (cnt == 2) { - (*library) = values[0]; - (*type) = values[1]; - } - - if (cnt == 1) { - (*library) = "builtin"; - (*type) = values[0]; - } - } + char *values[3] = { NULL }; + int cnt = 0; + int idx = 0; + + for (; cnt < 3;) { + char *token = strsep(&annotated, ", {}"); + if (token == NULL) { + break; + } + + if (*token == '\0') { + continue; + } + + values[cnt++] = token; + } + + if ((cnt > 0) && (!strcmp(values[0], "public") || !strcmp(values[0], "private"))) { + if (visibility != NULL) { + (*visibility) = values[0]; + } + + idx++; + } + + if (idx < cnt && (library != NULL) && (type != NULL)) { + char *decoder = values[idx]; + + for (cnt = 0; cnt < 3;) { + char *token = strsep(&decoder, ": {}"); + if (token == NULL) { + break; + } + + if (*token == '\0') { + continue; + } + + values[cnt++] = token; + } + + if (cnt == 2) { + (*library) = values[0]; + (*type) = values[1]; + } + + if (cnt == 1) { + (*library) = "builtin"; + (*type) = values[0]; + } + } } #endif /* !KERNEL */ @@ -146,384 +146,384 @@ OS_ALWAYS_INLINE static inline bool _os_log_encode_arg(void *arg, uint16_t arg_len, os_log_value_type_t ctype, bool is_private, os_log_buffer_context_t context) { - os_log_buffer_value_t content = (os_log_buffer_value_t) &context->buffer->content[context->content_off]; - size_t content_sz = sizeof(*content) + arg_len; - char tempString[OS_LOG_BUFFER_MAX_SIZE] = {}; + os_log_buffer_value_t content = (os_log_buffer_value_t) &context->buffer->content[context->content_off]; + size_t content_sz = sizeof(*content) + arg_len; + char tempString[OS_LOG_BUFFER_MAX_SIZE] = {}; #ifndef KERNEL - bool obj_private = true; + bool obj_private = true; #endif #ifdef KERNEL - /* scrub kernel pointers */ - if (doprnt_hide_pointers && - ctype == OS_LOG_BUFFER_VALUE_TYPE_SCALAR && - arg_len >= sizeof(void *)) { - unsigned long long value = 0; - memcpy(&value, arg, arg_len); + /* scrub kernel pointers */ + if (doprnt_hide_pointers && + ctype == OS_LOG_BUFFER_VALUE_TYPE_SCALAR && + arg_len >= sizeof(void *)) { + unsigned long long value = 0; + memcpy(&value, arg, arg_len); #if __has_feature(ptrauth_calls) - /** - * Strip out the pointer authentication code before - * checking whether the pointer is a kernel address. - */ - value = (unsigned long long)VM_KERNEL_STRIP_PTR(value); + /** + * Strip out the pointer authentication code before + * checking whether the pointer is a kernel address. + */ + value = (unsigned long long)VM_KERNEL_STRIP_PTR(value); #endif /* __has_feature(ptrauth_calls) */ - if (value >= VM_MIN_KERNEL_AND_KEXT_ADDRESS && value <= VM_MAX_KERNEL_ADDRESS) { - is_private = true; - bzero(arg, arg_len); - } - } + if (value >= VM_MIN_KERNEL_AND_KEXT_ADDRESS && value <= VM_MAX_KERNEL_ADDRESS) { + is_private = true; + bzero(arg, arg_len); + } + } #endif - content->type = ctype; - content->flags = (is_private ? OS_LOG_CONTENT_FLAG_PRIVATE : 0); - + content->type = ctype; + content->flags = (is_private ? OS_LOG_CONTENT_FLAG_PRIVATE : 0); + #ifndef KERNEL - if (context->annotated != NULL) { - const char *visibility = NULL; - - _os_log_parse_annotated(context->annotated, &visibility, NULL, NULL); - if (visibility) { - if (!strcasecmp(visibility, "private")) { - content->flags |= OS_LOG_CONTENT_FLAG_PRIVATE; - } else if (!strcasecmp(visibility, "public")) { - content->flags &= ~OS_LOG_CONTENT_FLAG_PRIVATE; - } - } - - context->annotated = NULL; - } + if (context->annotated != NULL) { + const char *visibility = NULL; + + _os_log_parse_annotated(context->annotated, &visibility, NULL, NULL); + if (visibility) { + if (!strcasecmp(visibility, "private")) { + content->flags |= OS_LOG_CONTENT_FLAG_PRIVATE; + } else if (!strcasecmp(visibility, "public")) { + content->flags &= ~OS_LOG_CONTENT_FLAG_PRIVATE; + } + } + + context->annotated = NULL; + } #endif /* !KERNEL */ - - switch (ctype) { - case OS_LOG_BUFFER_VALUE_TYPE_COUNT: - case OS_LOG_BUFFER_VALUE_TYPE_SCALAR: - if (is_private) { - _encode_data(content, tempString, strlen(tempString) + 1, context); - } else { - if ((context->content_off + content_sz) > context->content_sz) { - return false; - } - - memcpy(content->value, arg, arg_len); - content->size = arg_len; - context->content_off += content_sz; - } - break; - - case OS_LOG_BUFFER_VALUE_TYPE_STRING: - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - if (_os_log_string_is_public(arg)) { - content->flags &= ~OS_LOG_CONTENT_FLAG_PRIVATE; - } - - _encode_data(content, arg, arg_len, context); - break; - + + switch (ctype) { + case OS_LOG_BUFFER_VALUE_TYPE_COUNT: + case OS_LOG_BUFFER_VALUE_TYPE_SCALAR: + if (is_private) { + _encode_data(content, tempString, strlen(tempString) + 1, context); + } else { + if ((context->content_off + content_sz) > context->content_sz) { + return false; + } + + memcpy(content->value, arg, arg_len); + content->size = arg_len; + context->content_off += content_sz; + } + break; + + case OS_LOG_BUFFER_VALUE_TYPE_STRING: + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + if (_os_log_string_is_public(arg)) { + content->flags &= ~OS_LOG_CONTENT_FLAG_PRIVATE; + } + + _encode_data(content, arg, arg_len, context); + break; + #ifndef KERNEL - case OS_LOG_BUFFER_VALUE_TYPE_POINTER: - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - _encode_data(content, arg, arg_len, context); - break; - - case OS_LOG_BUFFER_VALUE_TYPE_OBJECT: - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - if (!_NSCF2data(arg, tempString, sizeof(tempString), &obj_private)) { - tempString[0] = '\0'; - } - - if (!obj_private) { - content->flags &= ~OS_LOG_CONTENT_FLAG_PRIVATE; - } - - _encode_data(content, tempString, strlen(tempString) + 1, context); - break; + case OS_LOG_BUFFER_VALUE_TYPE_POINTER: + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + _encode_data(content, arg, arg_len, context); + break; + + case OS_LOG_BUFFER_VALUE_TYPE_OBJECT: + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + if (!_NSCF2data(arg, tempString, sizeof(tempString), &obj_private)) { + tempString[0] = '\0'; + } + + if (!obj_private) { + content->flags &= ~OS_LOG_CONTENT_FLAG_PRIVATE; + } + + _encode_data(content, tempString, strlen(tempString) + 1, context); + break; #endif /* !KERNEL */ - } - - if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { - context->buffer->flags |= OS_LOG_BUFFER_HAS_PRIVATE; - } - - context->arg_idx++; - - return true; + } + + if (content->flags & OS_LOG_CONTENT_FLAG_PRIVATE) { + context->buffer->flags |= OS_LOG_BUFFER_HAS_PRIVATE; + } + + context->arg_idx++; + + return true; } static bool _os_log_encode(const char *format, va_list args, int saved_errno, os_log_buffer_context_t context) { - const char *percent = strchr(format, '%'); + const char *percent = strchr(format, '%'); #ifndef KERNEL - char annotated[256]; + char annotated[256]; #endif - - while (percent != NULL) { - ++percent; - if (percent[0] != '%') { - struct os_log_format_value_s value; - int type = OST_INT; + + while (percent != NULL) { + ++percent; + if (percent[0] != '%') { + struct os_log_format_value_s value; + int type = OST_INT; #ifndef KERNEL - bool long_double = false; + bool long_double = false; #endif - int prec = 0; - char ch; - - for (bool done = false; !done; percent++) { - switch (ch = percent[0]) { - /* type of types or other */ - case 'l': // longer - type++; - break; - - case 'h': // shorter - type--; - break; - - case 'z': - type = OST_SIZE; - break; - - case 'j': - type = OST_INTMAX; - break; - - case 't': - type = OST_PTRDIFF; - break; - - case '.': // precision - if ((percent[1]) == '*') { - prec = va_arg(args, int); - _os_log_encode_arg(&prec, sizeof(prec), OS_LOG_BUFFER_VALUE_TYPE_COUNT, false, context); - percent++; - continue; - } else { - // we have to read the precision and do the right thing - const char *fmt = percent + 1; - prec = 0; - while (isdigit(ch = *fmt++)) { - prec = 10 * prec + (ch - '0'); - } - - if (prec > 1024) { - prec = 1024; - } - - _os_log_encode_arg(&prec, sizeof(prec), OS_LOG_BUFFER_VALUE_TYPE_COUNT, false, context); - } - break; - - case '-': // left-align - case '+': // force sign - case ' ': // prefix non-negative with space - case '#': // alternate - case '\'': // group by thousands - break; - - /* fixed types */ - case 'd': // integer - case 'i': // integer - case 'o': // octal - case 'u': // unsigned - case 'x': // hex - case 'X': // upper-hex - switch (type) { - case OST_CHAR: - value.type.ch = va_arg(args, int); - _os_log_encode_arg(&value.type.ch, sizeof(value.type.ch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_SHORT: - value.type.s = va_arg(args, int); - _os_log_encode_arg(&value.type.s, sizeof(value.type.s), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_INT: - value.type.i = va_arg(args, int); - _os_log_encode_arg(&value.type.i, sizeof(value.type.i), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_LONG: - value.type.l = va_arg(args, long); - _os_log_encode_arg(&value.type.l, sizeof(value.type.l), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_LONGLONG: - value.type.ll = va_arg(args, long long); - _os_log_encode_arg(&value.type.ll, sizeof(value.type.ll), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_SIZE: - value.type.z = va_arg(args, size_t); - _os_log_encode_arg(&value.type.z, sizeof(value.type.z), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_INTMAX: - value.type.im = va_arg(args, intmax_t); - _os_log_encode_arg(&value.type.im, sizeof(value.type.im), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - case OST_PTRDIFF: - value.type.pd = va_arg(args, ptrdiff_t); - _os_log_encode_arg(&value.type.pd, sizeof(value.type.pd), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - break; - - default: - return false; - } - done = true; - break; - + int prec = 0; + char ch; + + for (bool done = false; !done; percent++) { + switch (ch = percent[0]) { + /* type of types or other */ + case 'l': // longer + type++; + break; + + case 'h': // shorter + type--; + break; + + case 'z': + type = OST_SIZE; + break; + + case 'j': + type = OST_INTMAX; + break; + + case 't': + type = OST_PTRDIFF; + break; + + case '.': // precision + if ((percent[1]) == '*') { + prec = va_arg(args, int); + _os_log_encode_arg(&prec, sizeof(prec), OS_LOG_BUFFER_VALUE_TYPE_COUNT, false, context); + percent++; + continue; + } else { + // we have to read the precision and do the right thing + const char *fmt = percent + 1; + prec = 0; + while (isdigit(ch = *fmt++)) { + prec = 10 * prec + (ch - '0'); + } + + if (prec > 1024) { + prec = 1024; + } + + _os_log_encode_arg(&prec, sizeof(prec), OS_LOG_BUFFER_VALUE_TYPE_COUNT, false, context); + } + break; + + case '-': // left-align + case '+': // force sign + case ' ': // prefix non-negative with space + case '#': // alternate + case '\'': // group by thousands + break; + + /* fixed types */ + case 'd': // integer + case 'i': // integer + case 'o': // octal + case 'u': // unsigned + case 'x': // hex + case 'X': // upper-hex + switch (type) { + case OST_CHAR: + value.type.ch = va_arg(args, int); + _os_log_encode_arg(&value.type.ch, sizeof(value.type.ch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_SHORT: + value.type.s = va_arg(args, int); + _os_log_encode_arg(&value.type.s, sizeof(value.type.s), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_INT: + value.type.i = va_arg(args, int); + _os_log_encode_arg(&value.type.i, sizeof(value.type.i), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_LONG: + value.type.l = va_arg(args, long); + _os_log_encode_arg(&value.type.l, sizeof(value.type.l), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_LONGLONG: + value.type.ll = va_arg(args, long long); + _os_log_encode_arg(&value.type.ll, sizeof(value.type.ll), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_SIZE: + value.type.z = va_arg(args, size_t); + _os_log_encode_arg(&value.type.z, sizeof(value.type.z), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_INTMAX: + value.type.im = va_arg(args, intmax_t); + _os_log_encode_arg(&value.type.im, sizeof(value.type.im), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + case OST_PTRDIFF: + value.type.pd = va_arg(args, ptrdiff_t); + _os_log_encode_arg(&value.type.pd, sizeof(value.type.pd), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + break; + + default: + return false; + } + done = true; + break; + #ifndef KERNEL - case '{': - // we do not support this for shimmed code - if (context->shimmed) { - return false; - } - - for (const char *curr2 = percent + 1; (ch = (*curr2)) != NUL; curr2++) { - if (ch == '}') { - strlcpy(annotated, percent, MIN(curr2 - (percent + 1), sizeof(annotated))); - context->annotated = annotated; - percent = curr2; - break; - } - } - break; + case '{': + // we do not support this for shimmed code + if (context->shimmed) { + return false; + } + + for (const char *curr2 = percent + 1; (ch = (*curr2)) != NUL; curr2++) { + if (ch == '}') { + strlcpy(annotated, percent, MIN(curr2 - (percent + 1), sizeof(annotated))); + context->annotated = annotated; + percent = curr2; + break; + } + } + break; #endif /* !KERNEL */ - - case 'p': // pointer - value.type.p = va_arg(args, void *); - _os_log_encode_arg(&value.type.p, sizeof(value.type.p), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - done = true; - break; - + + case 'p': // pointer + value.type.p = va_arg(args, void *); + _os_log_encode_arg(&value.type.p, sizeof(value.type.p), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + done = true; + break; + #ifndef KERNEL - case 'P': // pointer data - if (context->shimmed) { // we do not support this for shimmed code - return false; - } - - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - value.type.p = va_arg(args, void *); - - // capture the string pointer to generate a symptom - if (context->log && context->log->generate_symptoms && context->arg_idx == 1 && value.type.pch && prec) { - context->symptom_ptr = value.type.p; - context->symptom_ptr_len = prec; - } - - _os_log_encode_arg(value.type.p, prec, OS_LOG_BUFFER_VALUE_TYPE_POINTER, false, context); - prec = 0; - done = true; - break; + case 'P': // pointer data + if (context->shimmed) { // we do not support this for shimmed code + return false; + } + + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + value.type.p = va_arg(args, void *); + + // capture the string pointer to generate a symptom + if (context->log && context->log->generate_symptoms && context->arg_idx == 1 && value.type.pch && prec) { + context->symptom_ptr = value.type.p; + context->symptom_ptr_len = prec; + } + + _os_log_encode_arg(value.type.p, prec, OS_LOG_BUFFER_VALUE_TYPE_POINTER, false, context); + prec = 0; + done = true; + break; #endif /* !KERNEL */ - + #ifndef KERNEL - case 'L': // long double - long_double = true; - break; - - case 'a': case 'A': case 'e': case 'E': // floating types - case 'f': case 'F': case 'g': case 'G': - if (long_double) { - value.type.ld = va_arg(args, long double); - _os_log_encode_arg(&value.type.ld, sizeof(value.type.ld), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - } else { - value.type.d = va_arg(args, double); - _os_log_encode_arg(&value.type.d, sizeof(value.type.d), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - } - done = true; - break; + case 'L': // long double + long_double = true; + break; + + case 'a': case 'A': case 'e': case 'E': // floating types + case 'f': case 'F': case 'g': case 'G': + if (long_double) { + value.type.ld = va_arg(args, long double); + _os_log_encode_arg(&value.type.ld, sizeof(value.type.ld), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + } else { + value.type.d = va_arg(args, double); + _os_log_encode_arg(&value.type.d, sizeof(value.type.d), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + } + done = true; + break; #endif /* !KERNEL */ - - case 'c': // char - value.type.ch = va_arg(args, int); - _os_log_encode_arg(&value.type.ch, sizeof(value.type.ch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - done = true; - break; - + + case 'c': // char + value.type.ch = va_arg(args, int); + _os_log_encode_arg(&value.type.ch, sizeof(value.type.ch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + done = true; + break; + #ifndef KERNEL - case 'C': // wide-char - value.type.wch = va_arg(args, wint_t); - _os_log_encode_arg(&value.type.wch, sizeof(value.type.wch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - done = true; - break; + case 'C': // wide-char + value.type.wch = va_arg(args, wint_t); + _os_log_encode_arg(&value.type.wch, sizeof(value.type.wch), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + done = true; + break; #endif /* !KERNEL */ - - case 's': // string - value.type.pch = va_arg(args, char *); - if (!prec && value.type.pch != NULL) { - prec = (int) strlen(value.type.pch) + 1; - } - + + case 's': // string + value.type.pch = va_arg(args, char *); + if (!prec && value.type.pch != NULL) { + prec = (int) strlen(value.type.pch) + 1; + } + #ifndef KERNEL - // capture the string pointer to generate a symptom - if (context->log && context->log->generate_symptoms && context->arg_idx == 0 && value.type.pch) { - context->symptom_str = value.type.pch; - } + // capture the string pointer to generate a symptom + if (context->log && context->log->generate_symptoms && context->arg_idx == 0 && value.type.pch) { + context->symptom_str = value.type.pch; + } #endif - - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - _os_log_encode_arg(value.type.pch, prec, OS_LOG_BUFFER_VALUE_TYPE_STRING, false, context); - prec = 0; - done = true; - break; - + + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + _os_log_encode_arg(value.type.pch, prec, OS_LOG_BUFFER_VALUE_TYPE_STRING, false, context); + prec = 0; + done = true; + break; + #ifndef KERNEL - case 'S': // wide-string - value.type.pwch = va_arg(args, wchar_t *); - if (!prec && value.type.pwch != NULL) { - prec = (int) wcslen(value.type.pwch) + 1; - } - - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - _os_log_encode_arg(value.type.pwch, prec, OS_LOG_BUFFER_VALUE_TYPE_STRING, false, context); - prec = 0; - done = true; - break; + case 'S': // wide-string + value.type.pwch = va_arg(args, wchar_t *); + if (!prec && value.type.pwch != NULL) { + prec = (int) wcslen(value.type.pwch) + 1; + } + + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + _os_log_encode_arg(value.type.pwch, prec, OS_LOG_BUFFER_VALUE_TYPE_STRING, false, context); + prec = 0; + done = true; + break; #endif /* !KERNEL */ - + #ifndef KERNEL - case '@': // CFTypeRef aka NSObject * - context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; - _os_log_encode_arg(va_arg(args, void *), 0, OS_LOG_BUFFER_VALUE_TYPE_OBJECT, false, context); - done = true; - break; + case '@': // CFTypeRef aka NSObject * + context->buffer->flags |= OS_LOG_BUFFER_HAS_NON_SCALAR; + _os_log_encode_arg(va_arg(args, void *), 0, OS_LOG_BUFFER_VALUE_TYPE_OBJECT, false, context); + done = true; + break; #endif /* !KERNEL */ - - case 'm': - value.type.i = saved_errno; - _os_log_encode_arg(&value.type.i, sizeof(value.type.i), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); - done = true; - break; - - default: - if (isdigit(ch)) { // [0-9] - continue; - } - return false; - } - - if (done) { - percent = strchr(percent, '%'); // Find next format - break; - } - } - } else { - percent = strchr(percent+1, '%'); // Find next format after %% - } - } - - context->buffer->arg_cnt = context->arg_idx; - context->content_sz = context->content_off; - context->pubdata_sz = context->pubdata_off; - context->privdata_sz = context->privdata_off; - context->arg_idx = context->content_off = context->pubdata_off = context->privdata_off = 0; - - return true; + + case 'm': + value.type.i = saved_errno; + _os_log_encode_arg(&value.type.i, sizeof(value.type.i), OS_LOG_BUFFER_VALUE_TYPE_SCALAR, false, context); + done = true; + break; + + default: + if (isdigit(ch)) { // [0-9] + continue; + } + return false; + } + + if (done) { + percent = strchr(percent, '%'); // Find next format + break; + } + } + } else { + percent = strchr(percent + 1, '%'); // Find next format after %% + } + } + + context->buffer->arg_cnt = context->arg_idx; + context->content_sz = context->content_off; + context->pubdata_sz = context->pubdata_off; + context->privdata_sz = context->privdata_off; + context->arg_idx = context->content_off = context->pubdata_off = context->privdata_off = 0; + + return true; } #endif /* log_encode_h */ diff --git a/libkern/os/log_encode_types.h b/libkern/os/log_encode_types.h index ae14192c3..e07364752 100644 --- a/libkern/os/log_encode_types.h +++ b/libkern/os/log_encode_types.h @@ -39,63 +39,63 @@ #pragma mark - buffer support structures, enums OS_ENUM(os_log_value_type, uint8_t, - OS_LOG_BUFFER_VALUE_TYPE_SCALAR = 0, - OS_LOG_BUFFER_VALUE_TYPE_COUNT = 1, - OS_LOG_BUFFER_VALUE_TYPE_STRING = 2, + OS_LOG_BUFFER_VALUE_TYPE_SCALAR = 0, + OS_LOG_BUFFER_VALUE_TYPE_COUNT = 1, + OS_LOG_BUFFER_VALUE_TYPE_STRING = 2, #ifndef KERNEL - OS_LOG_BUFFER_VALUE_TYPE_POINTER = 3, - OS_LOG_BUFFER_VALUE_TYPE_OBJECT = 4, + OS_LOG_BUFFER_VALUE_TYPE_POINTER = 3, + OS_LOG_BUFFER_VALUE_TYPE_OBJECT = 4, #endif - ); + ); OS_ENUM(os_log_value_subtype, uint8_t, - OS_LOG_BUFFER_VALUE_SUBTYPE_NONE = 0, - OS_LOG_BUFFER_VALUE_SUBTYPE_INTEGER = 1, + OS_LOG_BUFFER_VALUE_SUBTYPE_NONE = 0, + OS_LOG_BUFFER_VALUE_SUBTYPE_INTEGER = 1, #ifndef KERNEL - OS_LOG_BUFFER_VALUE_SUBTYPE_FLOAT = 2, + OS_LOG_BUFFER_VALUE_SUBTYPE_FLOAT = 2, #endif - ); + ); enum os_log_int_types_t { - OST_CHAR = -2, - OST_SHORT = -1, - OST_INT = 0, - OST_LONG = 1, - OST_LONGLONG = 2, - OST_SIZE = 3, - OST_INTMAX = 4, - OST_PTRDIFF = 5, + OST_CHAR = -2, + OST_SHORT = -1, + OST_INT = 0, + OST_LONG = 1, + OST_LONGLONG = 2, + OST_SIZE = 3, + OST_INTMAX = 4, + OST_PTRDIFF = 5, }; union os_log_format_types_u { - uint16_t u16; - uint32_t u32; - uint64_t u64; - char ch; - short s; - int i; - void *p; - char *pch; + uint16_t u16; + uint32_t u32; + uint64_t u64; + char ch; + short s; + int i; + void *p; + char *pch; #ifndef KERNEL - wchar_t wch; - wchar_t *pwch; + wchar_t wch; + wchar_t *pwch; #endif - size_t z; - intmax_t im; - ptrdiff_t pd; - long l; - long long ll; + size_t z; + intmax_t im; + ptrdiff_t pd; + long l; + long long ll; #ifndef KERNEL - double d; - float f; - long double ld; + double d; + float f; + long double ld; #endif }; typedef struct os_log_format_value_s { - union os_log_format_types_u type; - os_log_value_type_t ctype; - uint16_t size; + union os_log_format_types_u type; + os_log_value_type_t ctype; + uint16_t size; } *os_log_format_value_t; #define OST_FORMAT_MAX_ARGS 48 @@ -109,10 +109,10 @@ typedef struct os_log_format_value_s { typedef struct os_log_buffer_value_s { #define OS_LOG_CONTENT_FLAG_PRIVATE 0x1 - uint8_t flags : 4; - os_log_value_type_t type : 4; - uint8_t size; - uint8_t value[]; + uint8_t flags : 4; + os_log_value_type_t type : 4; + uint8_t size; + uint8_t value[]; } *os_log_buffer_value_t; typedef struct os_log_buffer_s { @@ -123,48 +123,48 @@ typedef struct os_log_buffer_s { #else #define OS_LOG_BUFFER_MAX_SIZE 1024 #endif - uint8_t flags; - uint8_t arg_cnt; - uint8_t content[]; + uint8_t flags; + uint8_t arg_cnt; + uint8_t content[]; } *os_log_buffer_t; typedef struct os_log_buffer_context_s { - os_log_t log; - os_log_buffer_t buffer; - uint8_t *pubdata; - uint8_t *privdata; - - // composed string - char *comp; - size_t comp_off; - size_t comp_sz; - - // sizes and offsets - uint16_t content_off; // offset into buffer->content - uint16_t content_sz; // size not including the header - uint16_t pubdata_off; - uint16_t pubdata_sz; - uint16_t privdata_off; - uint16_t privdata_sz; - - uint8_t arg_idx; - - // if argument content was limited with %.* or %.# + os_log_t log; + os_log_buffer_t buffer; + uint8_t *pubdata; + uint8_t *privdata; + + // composed string + char *comp; + size_t comp_off; + size_t comp_sz; + + // sizes and offsets + uint16_t content_off; // offset into buffer->content + uint16_t content_sz; // size not including the header + uint16_t pubdata_off; + uint16_t pubdata_sz; + uint16_t privdata_off; + uint16_t privdata_sz; + + uint8_t arg_idx; + + // if argument content was limited with %.* or %.# #ifndef KERNEL - const char *symptom_str; - const void *symptom_ptr; - uint16_t symptom_ptr_len; - char *annotated; + const char *symptom_str; + const void *symptom_ptr; + uint16_t symptom_ptr_len; + char *annotated; #endif - int arg_content_sz; - bool need_size; - bool shimmed; + int arg_content_sz; + bool need_size; + bool shimmed; } *os_log_buffer_context_t; typedef struct os_log_arginfo_s { - uint16_t offset; - uint16_t length; + uint16_t offset; + uint16_t length; } *os_log_arginfo_t; /* Clients of these interfaces/structures may be expected to provide implementations of the following functions */ diff --git a/libkern/os/log_private.h b/libkern/os/log_private.h index 47660ede8..ce06763d4 100644 --- a/libkern/os/log_private.h +++ b/libkern/os/log_private.h @@ -64,7 +64,7 @@ __BEGIN_DECLS * va_list from variadic arguments. The caller must be the same binary * that generated the message and provided the format string. */ -__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) + __OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) OS_EXPORT OS_NOTHROW OS_LOG_NOTAILCALL void os_log_with_args(os_log_t oslog, os_log_type_t type, const char *format, va_list args, void *ret_addr); @@ -73,9 +73,9 @@ os_log_with_args(os_log_t oslog, os_log_type_t type, const char *format, va_list * @enum oslog_stream_link_type_t */ OS_ENUM(oslog_stream_link_type, uint8_t, - oslog_stream_link_type_log = 0x0, - oslog_stream_link_type_metadata = 0x1, -); + oslog_stream_link_type_log = 0x0, + oslog_stream_link_type_metadata = 0x1, + ); /*! * @typedef oslog_stream_buf_entry_t diff --git a/libkern/os/object.h b/libkern/os/object.h index a42ae8bd5..16cfea0f0 100644 --- a/libkern/os/object.h +++ b/libkern/os/object.h @@ -51,8 +51,8 @@ #ifndef OS_OBJECT_HAVE_OBJC_SUPPORT #if defined(__OBJC__) && defined(__OBJC2__) && !defined(__OBJC_GC__) && ( \ - __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_8 || \ - __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_6_0) + __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_8 || \ + __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_6_0) #define OS_OBJECT_HAVE_OBJC_SUPPORT 1 #else #define OS_OBJECT_HAVE_OBJC_SUPPORT 0 @@ -83,14 +83,14 @@ #endif #define OS_OBJECT_CLASS(name) OS_##name #define OS_OBJECT_DECL_IMPL(name, ...) \ - @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ - @end \ - typedef NSObject \ - * OS_OBJC_INDEPENDENT_CLASS name##_t + @protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \ + @end \ + typedef NSObject \ + * OS_OBJC_INDEPENDENT_CLASS name##_t #define OS_OBJECT_DECL(name, ...) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, ) #define OS_OBJECT_DECL_SUBCLASS(name, super) \ - OS_OBJECT_DECL_IMPL(name, ) + OS_OBJECT_DECL_IMPL(name, ) #if defined(__has_attribute) #if __has_attribute(ns_returns_retained) #define OS_OBJECT_RETURNS_RETAINED __attribute__((__ns_returns_retained__)) @@ -163,7 +163,7 @@ __BEGIN_DECLS * @result * The retained object. */ -__OSX_AVAILABLE_STARTING(__MAC_10_12,__IPHONE_10_0) + __OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) OS_EXPORT void* os_retain(void *object); @@ -185,7 +185,7 @@ os_retain(void *object); * @param object * The object to release. */ -__OSX_AVAILABLE_STARTING(__MAC_10_12,__IPHONE_10_0) +__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) OS_EXPORT void os_release(void *object); diff --git a/libkern/os/object_private.h b/libkern/os/object_private.h index 0f2f01dff..5908a640b 100644 --- a/libkern/os/object_private.h +++ b/libkern/os/object_private.h @@ -66,24 +66,24 @@ #define _OS_OBJECT_GLOBAL_REFCNT INT_MAX #define _OS_OBJECT_HEADER(isa, ref_cnt, xref_cnt) \ - isa; /* must be pointer-sized */ \ - int volatile ref_cnt; \ - int volatile xref_cnt + isa; /* must be pointer-sized */ \ + int volatile ref_cnt; \ + int volatile xref_cnt #if OS_OBJECT_HAVE_OBJC_SUPPORT // Must match size of compiler-generated OBJC_CLASS structure rdar://10640168 #define _OS_OBJECT_CLASS_HEADER() \ - void *_os_obj_objc_class_t[5] + void *_os_obj_objc_class_t[5] #else #define _OS_OBJECT_CLASS_HEADER() \ - void (*_os_obj_xref_dispose)(_os_object_t); \ - void (*_os_obj_dispose)(_os_object_t) + void (*_os_obj_xref_dispose)(_os_object_t); \ + void (*_os_obj_dispose)(_os_object_t) #endif #define OS_OBJECT_CLASS(name) OS_##name #if OS_OBJECT_USE_OBJC -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT @interface OS_OBJECT_CLASS(object) : NSObject - (void)_xref_dispose; @@ -91,9 +91,9 @@ OS_OBJECT_EXPORT @end typedef OS_OBJECT_CLASS(object) *_os_object_t; #define _OS_OBJECT_DECL_SUBCLASS_INTERFACE(name, super) \ - @interface OS_OBJECT_CLASS(name) : OS_OBJECT_CLASS(super) \ - \ - @end + @interface OS_OBJECT_CLASS(name) : OS_OBJECT_CLASS(super) \ + \ + @end #else typedef struct _os_object_s *_os_object_t; #endif @@ -102,41 +102,41 @@ __BEGIN_DECLS #if !_OS_OBJECT_OBJC_ARC -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) + __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW _os_object_t _os_object_alloc(const void *cls, size_t size); -__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0) +__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0) OS_OBJECT_EXPORT OS_OBJECT_MALLOC OS_OBJECT_WARN_RESULT OS_OBJECT_NOTHROW _os_object_t _os_object_alloc_realized(const void *cls, size_t size); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW void _os_object_dealloc(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW _os_object_t _os_object_retain(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_11,__IPHONE_9_0) +__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW _os_object_t _os_object_retain_with_resurrect(_os_object_t obj); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW void _os_object_release(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW _os_object_t _os_object_retain_internal(_os_object_t object); -__OSX_AVAILABLE_STARTING(__MAC_10_8,__IPHONE_6_0) +__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0) OS_OBJECT_EXPORT OS_OBJECT_NONNULL OS_OBJECT_NOTHROW void _os_object_release_internal(_os_object_t object); diff --git a/libkern/os/overflow.h b/libkern/os/overflow.h index 05ddbef39..abf04917a 100644 --- a/libkern/os/overflow.h +++ b/libkern/os/overflow.h @@ -52,8 +52,8 @@ __os_warn_unused(__const bool x) } #if __has_builtin(__builtin_add_overflow) && \ - __has_builtin(__builtin_sub_overflow) && \ - __has_builtin(__builtin_mul_overflow) + __has_builtin(__builtin_sub_overflow) && \ + __has_builtin(__builtin_mul_overflow) #define os_add_overflow(a, b, res) __os_warn_unused(__builtin_add_overflow((a), (b), (res))) #define os_sub_overflow(a, b, res) __os_warn_unused(__builtin_sub_overflow((a), (b), (res))) @@ -71,35 +71,35 @@ __os_warn_unused(__const bool x) #else #define __OS_TYPE_CHECK(x, y) do { \ _Static_assert(__builtin_types_compatible_p(__typeof(x),__typeof(y)), \ - "overflow arithmetic: incompatible types"); \ + "overflow arithmetic: incompatible types"); \ } while (0) #endif -#define __os_add_overflow_func(T,U,V) _Generic((T), \ - unsigned: __builtin_uadd_overflow, \ - unsigned long: __builtin_uaddl_overflow, \ - unsigned long long: __builtin_uaddll_overflow, \ - int: __builtin_sadd_overflow, \ - long: __builtin_saddl_overflow, \ - long long: __builtin_saddll_overflow \ +#define __os_add_overflow_func(T, U, V) _Generic((T),\ + unsigned: __builtin_uadd_overflow, \ + unsigned long: __builtin_uaddl_overflow, \ + unsigned long long: __builtin_uaddll_overflow, \ + int: __builtin_sadd_overflow, \ + long: __builtin_saddl_overflow, \ + long long: __builtin_saddll_overflow \ )(T,U,V) -#define __os_sub_overflow_func(T,U,V) _Generic((T), \ - unsigned: __builtin_usub_overflow, \ - unsigned long: __builtin_usubl_overflow, \ - unsigned long long: __builtin_usubll_overflow, \ - int: __builtin_ssub_overflow, \ - long: __builtin_ssubl_overflow, \ - long long: __builtin_ssubll_overflow \ +#define __os_sub_overflow_func(T, U, V) _Generic((T),\ + unsigned: __builtin_usub_overflow, \ + unsigned long: __builtin_usubl_overflow, \ + unsigned long long: __builtin_usubll_overflow, \ + int: __builtin_ssub_overflow, \ + long: __builtin_ssubl_overflow, \ + long long: __builtin_ssubll_overflow \ )(T,U,V) -#define __os_mul_overflow_func(T,U,V) _Generic((T), \ - unsigned: __builtin_umul_overflow, \ - unsigned long: __builtin_umull_overflow, \ - unsigned long long: __builtin_umulll_overflow, \ - int: __builtin_smul_overflow, \ - long: __builtin_smull_overflow, \ - long long: __builtin_smulll_overflow \ +#define __os_mul_overflow_func(T, U, V) _Generic((T),\ + unsigned: __builtin_umul_overflow, \ + unsigned long: __builtin_umull_overflow, \ + unsigned long long: __builtin_umulll_overflow, \ + int: __builtin_smul_overflow, \ + long: __builtin_smull_overflow, \ + long long: __builtin_smulll_overflow \ )(T,U,V) #define os_add_overflow(a, b, res) __os_warn_unused(__extension__({ \ diff --git a/libkern/os/reason_private.h b/libkern/os/reason_private.h index 56a68f1f1..a83940b07 100644 --- a/libkern/os/reason_private.h +++ b/libkern/os/reason_private.h @@ -35,10 +35,10 @@ /* Codes in the OS_REASON_LIBSYSTEM namespace */ OS_ENUM(os_reason_libsystem_code, uint64_t, - OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK = 1, - OS_REASON_LIBSYSTEM_CODE_FAULT = 2, /* generated by os_log_fault */ - OS_REASON_LIBSYSTEM_CODE_SECINIT_INITIALIZER = 3, -); + OS_REASON_LIBSYSTEM_CODE_WORKLOOP_OWNERSHIP_LEAK = 1, + OS_REASON_LIBSYSTEM_CODE_FAULT = 2, /* generated by os_log_fault */ + OS_REASON_LIBSYSTEM_CODE_SECINIT_INITIALIZER = 3, + ); #ifndef KERNEL @@ -52,8 +52,8 @@ OS_ENUM(os_reason_libsystem_code, uint64_t, */ int os_fault_with_payload(uint32_t reason_namespace, uint64_t reason_code, - void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags); + void *payload, uint32_t payload_size, const char *reason_string, + uint64_t reason_flags); #endif // !KERNEL diff --git a/libkern/os/refcnt.c b/libkern/os/refcnt.c index 539659869..67deb068f 100644 --- a/libkern/os/refcnt.c +++ b/libkern/os/refcnt.c @@ -33,30 +33,51 @@ ref_grp_name(struct os_refcnt __debug_only *rc) return ""; } +__attribute__((cold, noinline, not_tail_called, noreturn)) static void +os_ref_panic_underflow(struct os_refcnt *rc) +{ + panic("os_refcnt: underflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc)); + __builtin_unreachable(); +} + +static inline void os_ref_check_underflow(struct os_refcnt *rc, os_ref_count_t count) { if (__improbable(count == 0)) { - panic("os_refcnt: underflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc)); - __builtin_unreachable(); + os_ref_panic_underflow(rc); } } +__attribute__((cold, noinline, not_tail_called, noreturn)) static void +os_ref_panic_resurrection(struct os_refcnt *rc) +{ + panic("os_refcnt: used unsafely when zero (rc=%p, grp=%s)\n", rc, ref_grp_name(rc)); + __builtin_unreachable(); +} + +static inline void os_ref_assert_referenced(struct os_refcnt *rc, os_ref_count_t count) { if (__improbable(count == 0)) { - panic("os_refcnt: used unsafely when zero (rc=%p, grp=%s)\n", rc, ref_grp_name(rc)); - __builtin_unreachable(); + os_ref_panic_resurrection(rc); } } +__attribute__((cold, noinline, not_tail_called, noreturn)) static void +os_ref_panic_overflow(struct os_refcnt *rc) +{ + panic("os_refcnt: overflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc)); + __builtin_unreachable(); +} + +static inline void os_ref_check_overflow(struct os_refcnt *rc, os_ref_count_t count) { if (__improbable(count >= OS_REFCNT_MAX_COUNT)) { - panic("os_refcnt: overflow (rc=%p, grp=%s)\n", rc, ref_grp_name(rc)); - __builtin_unreachable(); + os_ref_panic_overflow(rc); } } @@ -113,17 +134,21 @@ ref_log_init(struct os_refgrp *grp) return; } + /* + * Enable refcount statistics if the rlog boot-arg is present, + * even when no specific group is logged. + */ + ref_debug_enable = true; + const char *g; while ((g = strsep(&refgrp, ",")) != NULL) { if (strcmp(g, grp->grp_name) == 0) { /* enable logging on this refgrp */ grp->grp_log = btlog_create(ref_log_nrecords, REFLOG_BTDEPTH, true); assert(grp->grp_log); - ref_debug_enable = true; return; } } - } /* @@ -210,8 +235,10 @@ os_ref_retain(struct os_refcnt *rc) os_ref_check_retain(rc, old); #if OS_REFCNT_DEBUG - ref_retain_group(rc->ref_group); - ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN); + if (__improbable(ref_debug_enable)) { + ref_retain_group(rc->ref_group); + ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN); + } #endif } @@ -227,11 +254,13 @@ os_ref_retain_try(struct os_refcnt *rc) os_ref_check_retain(rc, cur); - if (atomic_compare_exchange_weak_explicit(&rc->ref_count, &cur, cur+1, - memory_order_relaxed, memory_order_relaxed)) { + if (atomic_compare_exchange_weak_explicit(&rc->ref_count, &cur, cur + 1, + memory_order_relaxed, memory_order_relaxed)) { #if OS_REFCNT_DEBUG - ref_retain_group(rc->ref_group); - ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN); + if (__improbable(ref_debug_enable)) { + ref_retain_group(rc->ref_group); + ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN); + } #endif return true; } @@ -247,7 +276,9 @@ os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory * under us. */ struct os_refgrp *grp = rc->ref_group; - ref_log_op(grp, (void *)rc, REFLOG_RELEASE); + if (__improbable(ref_debug_enable)) { + ref_log_op(grp, (void *)rc, REFLOG_RELEASE); + } #endif os_ref_count_t val = atomic_fetch_sub_explicit(&rc->ref_count, 1, release_order); @@ -255,12 +286,16 @@ os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory if (__improbable(--val == 0)) { atomic_load_explicit(&rc->ref_count, dealloc_order); #if OS_REFCNT_DEBUG - ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */ + if (__improbable(ref_debug_enable)) { + ref_log_drop(grp, (void *)rc); /* rc is only used as an identifier */ + } #endif } #if OS_REFCNT_DEBUG - ref_release_group(grp, !val); + if (__improbable(ref_debug_enable)) { + ref_release_group(grp, !val); + } #endif return val; @@ -269,30 +304,33 @@ os_ref_release_explicit(struct os_refcnt *rc, memory_order release_order, memory void os_ref_retain_locked(struct os_refcnt *rc) { - os_ref_count_t val = rc->ref_count; + os_ref_count_t val = atomic_load_explicit(&rc->ref_count, memory_order_relaxed); os_ref_check_retain(rc, val); - rc->ref_count = ++val; + atomic_store_explicit(&rc->ref_count, ++val, memory_order_relaxed); #if OS_REFCNT_DEBUG - ref_retain_group(rc->ref_group); - ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN); + if (__improbable(ref_debug_enable)) { + ref_retain_group(rc->ref_group); + ref_log_op(rc->ref_group, (void *)rc, REFLOG_RETAIN); + } #endif } os_ref_count_t os_ref_release_locked(struct os_refcnt *rc) { - os_ref_count_t val = rc->ref_count; + os_ref_count_t val = atomic_load_explicit(&rc->ref_count, memory_order_relaxed); os_ref_check_underflow(rc, val); - rc->ref_count = --val; + atomic_store_explicit(&rc->ref_count, --val, memory_order_relaxed); #if OS_REFCNT_DEBUG - ref_release_group(rc->ref_group, !val); - ref_log_op(rc->ref_group, (void *)rc, REFLOG_RELEASE); - if (val == 0) { - ref_log_drop(rc->ref_group, (void *)rc); + if (__improbable(ref_debug_enable)) { + ref_release_group(rc->ref_group, !val); + ref_log_op(rc->ref_group, (void *)rc, REFLOG_RELEASE); + if (val == 0) { + ref_log_drop(rc->ref_group, (void *)rc); + } } #endif return val; } - diff --git a/libkern/os/refcnt.h b/libkern/os/refcnt.h index 6148059ee..e306d3552 100644 --- a/libkern/os/refcnt.h +++ b/libkern/os/refcnt.h @@ -98,25 +98,25 @@ __BEGIN_DECLS */ #define os_ref_init(rc, grp) os_ref_init_count((rc), (grp), 1) void os_ref_init_count(struct os_refcnt *, struct os_refgrp *, os_ref_count_t count) - os_error_if(count == 0, "Reference count must be non-zero initialized"); +os_error_if(count == 0, "Reference count must be non-zero initialized"); #if OS_REFCNT_DEBUG # define os_refgrp_decl(qual, var, name, parent) \ qual struct os_refgrp __attribute__((section("__DATA,__refgrps"))) var = { \ - .grp_name = (name), \ - .grp_children = ATOMIC_VAR_INIT(0), \ - .grp_count = ATOMIC_VAR_INIT(0), \ - .grp_retain_total = ATOMIC_VAR_INIT(0), \ - .grp_release_total = ATOMIC_VAR_INIT(0), \ - .grp_parent = (parent), \ - .grp_log = NULL, \ + .grp_name = (name), \ + .grp_children = ATOMIC_VAR_INIT(0), \ + .grp_count = ATOMIC_VAR_INIT(0), \ + .grp_retain_total = ATOMIC_VAR_INIT(0), \ + .grp_release_total = ATOMIC_VAR_INIT(0), \ + .grp_parent = (parent), \ + .grp_log = NULL, \ } /* Create a default group based on the init() callsite if no explicit group * is provided. */ # define os_ref_init_count(rc, grp, count) ({ \ - os_refgrp_decl(static, __grp, __func__, NULL); \ - (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \ + os_refgrp_decl(static, __grp, __func__, NULL); \ + (os_ref_init_count)((rc), (grp) ? (grp) : &__grp, (count)); \ }) #else # define os_refgrp_decl(...) @@ -141,7 +141,7 @@ void os_ref_init_count(struct os_refcnt *, struct os_refgrp *, os_ref_count_t co void os_ref_retain(struct os_refcnt *); os_ref_count_t os_ref_release_explicit(struct os_refcnt *rc, - memory_order release_order, memory_order dealloc_order) OS_WARN_RESULT; + memory_order release_order, memory_order dealloc_order) OS_WARN_RESULT; static inline os_ref_count_t OS_WARN_RESULT os_ref_release(struct os_refcnt *rc) @@ -159,7 +159,7 @@ static inline void os_ref_release_live(struct os_refcnt *rc) { if (__improbable(os_ref_release_explicit(rc, - memory_order_release, memory_order_relaxed) == 0)) { + memory_order_release, memory_order_relaxed) == 0)) { panic("os_refcnt: unexpected release of final reference (rc=%p)\n", rc); __builtin_unreachable(); } diff --git a/libkern/os/trace.h b/libkern/os/trace.h index f658f035c..318c21c87 100644 --- a/libkern/os/trace.h +++ b/libkern/os/trace.h @@ -2,14 +2,14 @@ * Copyright (c) 2013-2015 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -46,7 +46,8 @@ extern void *__dso_handle; OS_ALWAYS_INLINE static inline void -_os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2))) +_os_trace_verify_printf(const char *msg, ...) +__attribute__((format(printf, 1, 2))) { #pragma unused(msg) } @@ -57,10 +58,10 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 #endif /* use old macros for anything less than iOS 10 and MacOS 10.12 */ -#if (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0) \ - || (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && __WATCH_OS_VERSION_MIN_REQUIRED < __WATCHOS_3_0) \ - || (defined(__TV_OS_VERSION_MIN_REQUIRED) && __TV_OS_VERSION_MIN_REQUIRED < __TVOS_10_0) \ - || (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) +#if (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0) \ + || (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && __WATCH_OS_VERSION_MIN_REQUIRED < __WATCHOS_3_0) \ + || (defined(__TV_OS_VERSION_MIN_REQUIRED) && __TV_OS_VERSION_MIN_REQUIRED < __TVOS_10_0) \ + || (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_12) #define _os_trace_0(_l, _m, _t) __extension__({ \ _os_trace_verify_printf(_l); \ @@ -69,98 +70,98 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 }) #define _os_trace_1(_l, _m, _t, _1) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ _os_trace_verify_printf(_l, _c1); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - uint8_t _s[1]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + uint8_t _s[1]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._cnt = 1, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._cnt = 1, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_2(_l, _m, _t, _1, _2) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ _os_trace_verify_printf(_l, _c1, _c2); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - uint8_t _s[2]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + uint8_t _s[2]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._cnt = 2, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._cnt = 2, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_3(_l, _m, _t, _1, _2, _3) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ _os_trace_verify_printf(_l, _c1, _c2, _c3); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - uint8_t _s[3]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + uint8_t _s[3]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._cnt = 3, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._cnt = 3, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_4(_l, _m, _t, _1, _2, _3, _4) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ const __typeof__(_4) _c4 = _4; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - uint8_t _s[4]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + uint8_t _s[4]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._cnt = 4, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._cnt = 4, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_5(_l, _m, _t, _1, _2, _3, _4, _5) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ @@ -168,29 +169,29 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 const __typeof__(_5) _c5 = _5; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4, _c5); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - __typeof__(_c5) _f5; \ - uint8_t _s[5]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + __typeof__(_c5) _f5; \ + uint8_t _s[5]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._f5 = _c5, ._s[4] = sizeof(_c5), \ - ._cnt = 5, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._f5 = _c5, ._s[4] = sizeof(_c5), \ + ._cnt = 5, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_6(_l, _m, _t, _1, _2, _3, _4, _5, _6) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ @@ -199,31 +200,31 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 const __typeof__(_6) _c6 = _6; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4, _c5, _c6); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - __typeof__(_c5) _f5; \ - __typeof__(_c6) _f6; \ - uint8_t _s[6]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + __typeof__(_c5) _f5; \ + __typeof__(_c6) _f6; \ + uint8_t _s[6]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._f5 = _c5, ._s[4] = sizeof(_c5), \ - ._f6 = _c6, ._s[5] = sizeof(_c6), \ - ._cnt = 6, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._f5 = _c5, ._s[4] = sizeof(_c5), \ + ._f6 = _c6, ._s[5] = sizeof(_c6), \ + ._cnt = 6, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_7(_l, _m, _t, _1, _2, _3, _4, _5, _6, _7) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ @@ -233,28 +234,28 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 const __typeof__(_7) _c7 = _7; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4, _c5, _c6, _c7); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - __typeof__(_c5) _f5; \ - __typeof__(_c6) _f6; \ - __typeof__(_c7) _f7; \ - uint8_t _s[7]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + __typeof__(_c5) _f5; \ + __typeof__(_c6) _f6; \ + __typeof__(_c7) _f7; \ + uint8_t _s[7]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._f5 = _c5, ._s[4] = sizeof(_c5), \ - ._f6 = _c6, ._s[5] = sizeof(_c6), \ - ._f7 = _c7, ._s[6] = sizeof(_c7), \ - ._cnt = 7, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._f5 = _c5, ._s[4] = sizeof(_c5), \ + ._f6 = _c6, ._s[5] = sizeof(_c6), \ + ._f7 = _c7, ._s[6] = sizeof(_c7), \ + ._cnt = 7, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), NULL); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_1(_l, _m, _t, _payload) __extension__({ \ @@ -264,98 +265,98 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 }) #define _os_trace_with_payload_2(_l, _m, _t, _1, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ _os_trace_verify_printf(_l, _c1); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - uint8_t _s[1]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + uint8_t _s[1]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._cnt = 1, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._cnt = 1, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_3(_l, _m, _t, _1, _2, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ _os_trace_verify_printf(_l, _c1, _c2); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - uint8_t _s[2]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + uint8_t _s[2]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._cnt = 2, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._cnt = 2, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_4(_l, _m, _t, _1, _2, _3, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ _os_trace_verify_printf(_l, _c1, _c2, _c3); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - uint8_t _s[3]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + uint8_t _s[3]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._cnt = 3, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._cnt = 3, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_5(_l, _m, _t, _1, _2, _3, _4, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ const __typeof__(_4) _c4 = _4; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - uint8_t _s[4]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + uint8_t _s[4]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._cnt = 4, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._cnt = 4, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_6(_l, _m, _t, _1, _2, _3, _4, _5, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ @@ -363,29 +364,29 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 const __typeof__(_4) _c5 = _5; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4, _c5); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - __typeof__(_c5) _f5; \ - uint8_t _s[5]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + __typeof__(_c5) _f5; \ + uint8_t _s[5]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._f5 = _c5, ._s[4] = sizeof(_c5), \ - ._cnt = 5, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._f5 = _c5, ._s[4] = sizeof(_c5), \ + ._cnt = 5, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_7(_l, _m, _t, _1, _2, _3, _4, _5, _6, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ @@ -394,31 +395,31 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 const __typeof__(_6) _c6 = _6; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4, _c5, _c6); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - __typeof__(_c5) _f5; \ - __typeof__(_c6) _f6; \ - uint8_t _s[6]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + __typeof__(_c5) _f5; \ + __typeof__(_c6) _f6; \ + uint8_t _s[6]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._f5 = _c5, ._s[4] = sizeof(_c5), \ - ._f6 = _c6, ._s[5] = sizeof(_c6), \ - ._cnt = 6, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._f5 = _c5, ._s[4] = sizeof(_c5), \ + ._f6 = _c6, ._s[5] = sizeof(_c6), \ + ._cnt = 6, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define _os_trace_with_payload_8(_l, _m, _t, _1, _2, _3, _4, _5, _6, _7, _payload) __extension__({ \ - _Pragma("clang diagnostic push") \ - _Pragma("clang diagnostic ignored \"-Wpacked\"") \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wpacked\"") \ const __typeof__(_1) _c1 = _1; \ const __typeof__(_2) _c2 = _2; \ const __typeof__(_3) _c3 = _3; \ @@ -428,28 +429,28 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 const __typeof__(_7) _c7 = _7; \ _os_trace_verify_printf(_l, _c1, _c2, _c3, _c4, _c5, _c6, _c7); \ const struct __attribute__((packed)) { \ - __typeof__(_c1) _f1; \ - __typeof__(_c2) _f2; \ - __typeof__(_c3) _f3; \ - __typeof__(_c4) _f4; \ - __typeof__(_c5) _f5; \ - __typeof__(_c6) _f6; \ - __typeof__(_c7) _f7; \ - uint8_t _s[7]; \ - uint8_t _cnt; \ + __typeof__(_c1) _f1; \ + __typeof__(_c2) _f2; \ + __typeof__(_c3) _f3; \ + __typeof__(_c4) _f4; \ + __typeof__(_c5) _f5; \ + __typeof__(_c6) _f6; \ + __typeof__(_c7) _f7; \ + uint8_t _s[7]; \ + uint8_t _cnt; \ } _buf = { \ - ._f1 = _c1, ._s[0] = sizeof(_c1), \ - ._f2 = _c2, ._s[1] = sizeof(_c2), \ - ._f3 = _c3, ._s[2] = sizeof(_c3), \ - ._f4 = _c4, ._s[3] = sizeof(_c4), \ - ._f5 = _c5, ._s[4] = sizeof(_c5), \ - ._f6 = _c6, ._s[5] = sizeof(_c6), \ - ._f7 = _c7, ._s[6] = sizeof(_c7), \ - ._cnt = 7, \ + ._f1 = _c1, ._s[0] = sizeof(_c1), \ + ._f2 = _c2, ._s[1] = sizeof(_c2), \ + ._f3 = _c3, ._s[2] = sizeof(_c3), \ + ._f4 = _c4, ._s[3] = sizeof(_c4), \ + ._f5 = _c5, ._s[4] = sizeof(_c5), \ + ._f6 = _c6, ._s[5] = sizeof(_c6), \ + ._f7 = _c7, ._s[6] = sizeof(_c7), \ + ._cnt = 7, \ }; \ _os_trace_with_buffer(&__dso_handle, _m, _t, &_buf, sizeof(_buf), _payload); \ __asm__(""); /* avoid tailcall */ \ - _Pragma("clang diagnostic pop") \ + _Pragma("clang diagnostic pop") \ }) #define OS_TRACE_CALL(format, _m, _t, ...) __extension__({ \ @@ -608,9 +609,9 @@ _os_trace_verify_printf(const char *msg, ...) __attribute__((format(printf, 1, 2 * @typedef os_trace_payload_t * A block that populates an xpc_object_t of type XPC_TYPE_DICTIONARY to represent * complex data. This block will only be invoked under conditions where tools - * have attached to the process. The payload can be used to send arbitrary data - * via the trace call. Tools may use the data to validate state for integration - * tests or provide other introspection services. No assumptions are made about + * have attached to the process. The payload can be used to send arbitrary data + * via the trace call. Tools may use the data to validate state for integration + * tests or provide other introspection services. No assumptions are made about * the format or structure of the data. */ typedef void (^os_trace_payload_t)(xpc_object_t xdict); @@ -643,10 +644,10 @@ typedef void (^os_trace_payload_t)(xpc_object_t xdict); }) -#if (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_10_0) \ - || (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && __WATCH_OS_VERSION_MIN_REQUIRED >= __WATCHOS_3_0) \ - || (defined(__TV_OS_VERSION_MIN_REQUIRED) && __TV_OS_VERSION_MIN_REQUIRED >= __TVOS_10_0) \ - || (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_12) +#if (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= __IPHONE_10_0) \ + || (defined(__WATCH_OS_VERSION_MIN_REQUIRED) && __WATCH_OS_VERSION_MIN_REQUIRED >= __WATCHOS_3_0) \ + || (defined(__TV_OS_VERSION_MIN_REQUIRED) && __TV_OS_VERSION_MIN_REQUIRED >= __TVOS_10_0) \ + || (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_12) /*! * @function os_trace_info diff --git a/libkern/os/trace_internal.h b/libkern/os/trace_internal.h index dc40b5fa0..8073b0772 100644 --- a/libkern/os/trace_internal.h +++ b/libkern/os/trace_internal.h @@ -35,8 +35,8 @@ OS_ALWAYS_INLINE inline uint32_t _os_trace_offset(const void *dso, const void *addr, _firehose_tracepoint_flags_activity_t flags __unused) { - assert((uintptr_t)addr >= (uintptr_t)dso); - return (uint32_t) ((uintptr_t)addr - (uintptr_t)dso); + assert((uintptr_t)addr >= (uintptr_t)dso); + return (uint32_t) ((uintptr_t)addr - (uintptr_t)dso); } bool diff --git a/libkern/stack_protector.c b/libkern/stack_protector.c index 21c4d06ae..df42cdac3 100644 --- a/libkern/stack_protector.c +++ b/libkern/stack_protector.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,6 +34,5 @@ unsigned long __stack_chk_guard = 0UL; void __stack_chk_fail(void) { - panic("Kernel stack memory corruption detected"); + panic("Kernel stack memory corruption detected"); } - diff --git a/libkern/stdio/scanf.c b/libkern/stdio/scanf.c index 82791cdde..6c1968cde 100644 --- a/libkern/stdio/scanf.c +++ b/libkern/stdio/scanf.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -69,7 +69,7 @@ static inline int isspace(char c) { - return (c == ' ' || c == '\t' || c == '\n' || c == '\12'); + return c == ' ' || c == '\t' || c == '\n' || c == '\12'; } #endif #include @@ -77,41 +77,41 @@ isspace(char c) #include #include -#define BUF 32 /* Maximum length of numeric string. */ +#define BUF 32 /* Maximum length of numeric string. */ /* * Flags used during conversion. */ -#define LONG 0x01 /* l: long or double */ -#define SHORT 0x04 /* h: short */ -#define SUPPRESS 0x08 /* *: suppress assignment */ -#define POINTER 0x10 /* p: void * (as hex) */ -#define NOSKIP 0x20 /* [ or c: do not skip blanks */ -#define LONGLONG 0x400 /* ll: long long (+ deprecated q: quad) */ -#define SHORTSHORT 0x4000 /* hh: char */ -#define UNSIGNED 0x8000 /* %[oupxX] conversions */ +#define LONG 0x01 /* l: long or double */ +#define SHORT 0x04 /* h: short */ +#define SUPPRESS 0x08 /* *: suppress assignment */ +#define POINTER 0x10 /* p: void * (as hex) */ +#define NOSKIP 0x20 /* [ or c: do not skip blanks */ +#define LONGLONG 0x400 /* ll: long long (+ deprecated q: quad) */ +#define SHORTSHORT 0x4000 /* hh: char */ +#define UNSIGNED 0x8000 /* %[oupxX] conversions */ /* * The following are used in numeric conversions only: * SIGNOK, NDIGITS, DPTOK, and EXPOK are for floating point; * SIGNOK, NDIGITS, PFXOK, and NZDIGITS are for integral. */ -#define SIGNOK 0x40 /* +/- is (still) legal */ -#define NDIGITS 0x80 /* no digits detected */ +#define SIGNOK 0x40 /* +/- is (still) legal */ +#define NDIGITS 0x80 /* no digits detected */ -#define DPTOK 0x100 /* (float) decimal point is still legal */ -#define EXPOK 0x200 /* (float) exponent (e+3, etc) still legal */ +#define DPTOK 0x100 /* (float) decimal point is still legal */ +#define EXPOK 0x200 /* (float) exponent (e+3, etc) still legal */ -#define PFXOK 0x100 /* 0x prefix is (still) legal */ -#define NZDIGITS 0x200 /* no zero digits detected */ +#define PFXOK 0x100 /* 0x prefix is (still) legal */ +#define NZDIGITS 0x200 /* no zero digits detected */ /* * Conversion types. */ -#define CT_CHAR 0 /* %c conversion */ -#define CT_CCL 1 /* %[...] conversion */ -#define CT_STRING 2 /* %s conversion */ -#define CT_INT 3 /* %[dioupxX] conversion */ +#define CT_CHAR 0 /* %c conversion */ +#define CT_CCL 1 /* %[...] conversion */ +#define CT_STRING 2 /* %s conversion */ +#define CT_INT 3 /* %[dioupxX] conversion */ static const u_char *__sccl(char *, const u_char *); @@ -120,11 +120,11 @@ sscanf(const char *ibuf, const char *fmt, ...) { va_list ap; int ret; - + va_start(ap, fmt); ret = vsscanf(ibuf, fmt, ap); va_end(ap); - return(ret); + return ret; } int @@ -132,33 +132,34 @@ vsscanf(const char *inp, char const *fmt0, va_list ap) { int inr; const u_char *fmt = (const u_char *)fmt0; - int c; /* character from format, or conversion */ - size_t width; /* field width, or 0 */ - char *p; /* points into all kinds of strings */ - int n; /* handy integer */ - int flags; /* flags as defined above */ - char *p0; /* saves original value of p when necessary */ - int nassigned; /* number of fields assigned */ - int nconversions; /* number of conversions */ - int nread; /* number of characters consumed from fp */ - int base; /* base argument to conversion function */ - char ccltab[256]; /* character class table for %[...] */ - char buf[BUF]; /* buffer for numeric conversions */ + int c; /* character from format, or conversion */ + size_t width; /* field width, or 0 */ + char *p; /* points into all kinds of strings */ + int n; /* handy integer */ + int flags; /* flags as defined above */ + char *p0; /* saves original value of p when necessary */ + int nassigned; /* number of fields assigned */ + int nconversions; /* number of conversions */ + int nread; /* number of characters consumed from fp */ + int base; /* base argument to conversion function */ + char ccltab[256]; /* character class table for %[...] */ + char buf[BUF]; /* buffer for numeric conversions */ /* `basefix' is used to avoid `if' tests in the integer scanner */ static short basefix[17] = - { 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; + { 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 }; inr = strlen(inp); - + nassigned = 0; nconversions = 0; nread = 0; - base = 0; /* XXX just to keep gcc happy */ + base = 0; /* XXX just to keep gcc happy */ for (;;) { c = *fmt++; - if (c == 0) - return (nassigned); + if (c == 0) { + return nassigned; + } if (isspace(c)) { while (inr > 0 && isspace(*inp)) { nread++; @@ -167,22 +168,25 @@ vsscanf(const char *inp, char const *fmt0, va_list ap) } continue; } - if (c != '%') + if (c != '%') { goto literal; + } width = 0; flags = 0; /* * switch on the format. continue if done; * break once format type is derived. */ -again: c = *fmt++; +again: c = *fmt++; switch (c) { case '%': literal: - if (inr <= 0) + if (inr <= 0) { goto input_failure; - if (*inp != c) + } + if (*inp != c) { goto match_failure; + } inr--; inp++; nread++; @@ -195,18 +199,20 @@ literal: if (flags & LONG) { flags &= ~LONG; flags |= LONGLONG; - } else + } else { flags |= LONG; + } goto again; case 'q': - flags |= LONGLONG; /* not quite */ + flags |= LONGLONG; /* not quite */ goto again; case 'h': if (flags & SHORT) { flags &= ~SHORT; flags |= SHORTSHORT; - } else + } else { flags |= SHORT; + } goto again; case '0': case '1': case '2': case '3': case '4': @@ -241,7 +247,7 @@ literal: case 'X': case 'x': - flags |= PFXOK; /* enable 0x prefixing */ + flags |= PFXOK; /* enable 0x prefixing */ c = CT_INT; flags |= UNSIGNED; base = 16; @@ -262,7 +268,7 @@ literal: c = CT_CHAR; break; - case 'p': /* pointer format is like hex */ + case 'p': /* pointer format is like hex */ flags |= POINTER | PFXOK; c = CT_INT; flags |= UNSIGNED; @@ -271,26 +277,29 @@ literal: case 'n': nconversions++; - if (flags & SUPPRESS) /* ??? */ + if (flags & SUPPRESS) { /* ??? */ continue; - if (flags & SHORTSHORT) + } + if (flags & SHORTSHORT) { *va_arg(ap, char *) = nread; - else if (flags & SHORT) + } else if (flags & SHORT) { *va_arg(ap, short *) = nread; - else if (flags & LONG) + } else if (flags & LONG) { *va_arg(ap, long *) = nread; - else if (flags & LONGLONG) + } else if (flags & LONGLONG) { *va_arg(ap, long long *) = nread; - else + } else { *va_arg(ap, int *) = nread; + } continue; } /* * We have a conversion that requires input. */ - if (inr <= 0) + if (inr <= 0) { goto input_failure; + } /* * Consume leading white space, except for formats @@ -299,10 +308,11 @@ literal: if ((flags & NOSKIP) == 0) { while (isspace(*inp)) { nread++; - if (--inr > 0) + if (--inr > 0) { inp++; - else + } else { goto input_failure; + } } /* * Note that there is at least one character in @@ -315,11 +325,11 @@ literal: * Do the conversion. */ switch (c) { - case CT_CHAR: /* scan arbitrary characters (sets NOSKIP) */ - if (width == 0) + if (width == 0) { width = 1; + } if (flags & SUPPRESS) { size_t sum = 0; for (;;) { @@ -327,8 +337,9 @@ literal: sum += n; width -= n; inp += n; - if (sum == 0) + if (sum == 0) { goto input_failure; + } break; } else { sum += width; @@ -350,8 +361,9 @@ literal: case CT_CCL: /* scan a (nonempty) character class (sets NOSKIP) */ - if (width == 0) - width = (size_t)~0; /* `infinity' */ + if (width == 0) { + width = (size_t)~0; /* `infinity' */ + } /* take only those things in the class */ if (flags & SUPPRESS) { n = 0; @@ -359,32 +371,38 @@ literal: n++; inr--; inp++; - if (--width == 0) + if (--width == 0) { break; + } if (inr <= 0) { - if (n == 0) + if (n == 0) { goto input_failure; + } break; } } - if (n == 0) + if (n == 0) { goto match_failure; + } } else { p0 = p = va_arg(ap, char *); while (ccltab[(unsigned char)*inp]) { inr--; *p++ = *inp++; - if (--width == 0) + if (--width == 0) { break; + } if (inr <= 0) { - if (p == p0) + if (p == p0) { goto input_failure; + } break; } } n = p - p0; - if (n == 0) + if (n == 0) { goto match_failure; + } *p = 0; nassigned++; } @@ -394,18 +412,21 @@ literal: case CT_STRING: /* like CCL, but zero-length string OK, & no NOSKIP */ - if (width == 0) + if (width == 0) { width = (size_t)~0; + } if (flags & SUPPRESS) { n = 0; while (!isspace(*inp)) { n++; inr--; inp++; - if (--width == 0) + if (--width == 0) { break; - if (inr <= 0) + } + if (inr <= 0) { break; + } } nread += n; } else { @@ -413,10 +434,12 @@ literal: while (!isspace(*inp)) { inr--; *p++ = *inp++; - if (--width == 0) + if (--width == 0) { break; - if (inr <= 0) + } + if (inr <= 0) { break; + } } *p = 0; nread += p - p0; @@ -428,12 +451,14 @@ literal: case CT_INT: /* scan an integer as if by the conversion function */ #ifdef hardway - if (width == 0 || width > sizeof(buf) - 1) + if (width == 0 || width > sizeof(buf) - 1) { width = sizeof(buf) - 1; + } #else /* size_t is unsigned, hence this optimisation */ - if (--width > sizeof(buf) - 2) + if (--width > sizeof(buf) - 2) { width = sizeof(buf) - 2; + } width++; #endif flags |= SIGNOK | NDIGITS | NZDIGITS; @@ -444,7 +469,6 @@ literal: * if we accept it as a part of number. */ switch (c) { - /* * The digit 0 is always legal, but is * special. For %i conversions, if no @@ -462,10 +486,11 @@ literal: base = 8; flags |= PFXOK; } - if (flags & NZDIGITS) - flags &= ~(SIGNOK|NZDIGITS|NDIGITS); - else - flags &= ~(SIGNOK|PFXOK|NDIGITS); + if (flags & NZDIGITS) { + flags &= ~(SIGNOK | NZDIGITS | NDIGITS); + } else { + flags &= ~(SIGNOK | PFXOK | NDIGITS); + } goto ok; /* 1 through 7 always legal */ @@ -478,8 +503,9 @@ literal: /* digits 8 and 9 ok iff decimal or hex */ case '8': case '9': base = basefix[base]; - if (base <= 8) - break; /* not legal here */ + if (base <= 8) { + break; /* not legal here */ + } flags &= ~(SIGNOK | PFXOK | NDIGITS); goto ok; @@ -489,8 +515,9 @@ literal: case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': /* no need to fix base here */ - if (base <= 10) - break; /* not legal here */ + if (base <= 10) { + break; /* not legal here */ + } flags &= ~(SIGNOK | PFXOK | NDIGITS); goto ok; @@ -505,7 +532,7 @@ literal: /* x ok iff flag still set & 2nd char */ case 'x': case 'X': if (flags & PFXOK && p == buf + 1) { - base = 16; /* if %i */ + base = 16; /* if %i */ flags &= ~PFXOK; goto ok; } @@ -517,15 +544,16 @@ literal: * for a number. Stop accumulating digits. */ break; - ok: +ok: /* * c is legal: store it and look at the next. */ *p++ = c; - if (--inr > 0) + if (--inr > 0) { inp++; - else - break; /* end of input */ + } else { + break; /* end of input */ + } } /* * If we had only a sign, it is no good; push @@ -550,35 +578,36 @@ literal: u_quad_t res; *p = 0; - if ((flags & UNSIGNED) == 0) - res = strtoq(buf, (char **)NULL, base); - else - res = strtouq(buf, (char **)NULL, base); - if (flags & POINTER) + if ((flags & UNSIGNED) == 0) { + res = strtoq(buf, (char **)NULL, base); + } else { + res = strtouq(buf, (char **)NULL, base); + } + if (flags & POINTER) { *va_arg(ap, void **) = - (void *)(uintptr_t)res; - else if (flags & SHORTSHORT) + (void *)(uintptr_t)res; + } else if (flags & SHORTSHORT) { *va_arg(ap, char *) = res; - else if (flags & SHORT) + } else if (flags & SHORT) { *va_arg(ap, short *) = res; - else if (flags & LONG) + } else if (flags & LONG) { *va_arg(ap, long *) = res; - else if (flags & LONGLONG) + } else if (flags & LONGLONG) { *va_arg(ap, long long *) = res; - else + } else { *va_arg(ap, int *) = res; + } nassigned++; } nread += p - buf; nconversions++; break; - } } input_failure: - return (nconversions != 0 ? nassigned : -1); + return nconversions != 0 ? nassigned : -1; match_failure: - return (nassigned); + return nassigned; } /* @@ -593,19 +622,19 @@ __sccl(char *tab, const u_char *fmt) int c, n, v; /* first `clear' the whole table */ - c = *fmt++; /* first char hat => negated scanset */ + c = *fmt++; /* first char hat => negated scanset */ if (c == '^') { - v = 1; /* default => accept */ - c = *fmt++; /* get new first char */ - } else - v = 0; /* default => reject */ - + v = 1; /* default => accept */ + c = *fmt++; /* get new first char */ + } else { + v = 0; /* default => reject */ + } /* XXX: Will not work if sizeof(tab*) > sizeof(char) */ (void) memset(tab, v, 256); - if (c == 0) - return (fmt - 1);/* format ended before closing ] */ - + if (c == 0) { + return fmt - 1;/* format ended before closing ] */ + } /* * Now set the entries corresponding to the actual scanset * to the opposite of the above. @@ -615,13 +644,12 @@ __sccl(char *tab, const u_char *fmt) */ v = 1 - v; for (;;) { - tab[c] = v; /* take character c */ + tab[c] = v; /* take character c */ doswitch: - n = *fmt++; /* and examine the next */ + n = *fmt++; /* and examine the next */ switch (n) { - - case 0: /* format ended too soon */ - return (fmt - 1); + case 0: /* format ended too soon */ + return fmt - 1; case '-': /* @@ -645,12 +673,12 @@ doswitch: n = *fmt; if (n == ']' || n < c) { c = '-'; - break; /* resume the for(;;) */ + break; /* resume the for(;;) */ } fmt++; /* fill in the range */ do { - tab[++c] = v; + tab[++c] = v; } while (c < n); c = n; /* @@ -660,10 +688,10 @@ doswitch: */ goto doswitch; - case ']': /* end of scanset */ - return (fmt); + case ']': /* end of scanset */ + return fmt; - default: /* just another character */ + default: /* just another character */ c = n; break; } diff --git a/libkern/uuid/uuid.c b/libkern/uuid/uuid.c index eec3f4943..6bdfcec9d 100644 --- a/libkern/uuid/uuid.c +++ b/libkern/uuid/uuid.c @@ -14,7 +14,7 @@ * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. - * + * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF @@ -46,8 +46,9 @@ static void read_node(uint8_t *node) { #if NETWORKING - if (uuid_get_ethernet(node) == 0) + if (uuid_get_ethernet(node) == 0) { return; + } #endif /* NETWORKING */ read_random(node, 6); @@ -120,7 +121,7 @@ uuid_generate_time(uuid_t out) out[5] = (uint8_t)(time >> 32); out[6] = (uint8_t)(time >> 56); out[7] = (uint8_t)(time >> 48); - + out[6] = (out[6] & 0x0F) | 0x10; out[8] = (out[8] & 0x3F) | 0x80; } @@ -143,52 +144,52 @@ uuid_parse(const uuid_string_t in, uuid_t uu) int n = 0; sscanf(in, - "%2hhx%2hhx%2hhx%2hhx-" - "%2hhx%2hhx-" - "%2hhx%2hhx-" - "%2hhx%2hhx-" - "%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%n", - &uu[0], &uu[1], &uu[2], &uu[3], - &uu[4], &uu[5], - &uu[6], &uu[7], - &uu[8], &uu[9], - &uu[10], &uu[11], &uu[12], &uu[13], &uu[14], &uu[15], &n); - - return (n != 36 || in[n] != '\0' ? -1 : 0); + "%2hhx%2hhx%2hhx%2hhx-" + "%2hhx%2hhx-" + "%2hhx%2hhx-" + "%2hhx%2hhx-" + "%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx%n", + &uu[0], &uu[1], &uu[2], &uu[3], + &uu[4], &uu[5], + &uu[6], &uu[7], + &uu[8], &uu[9], + &uu[10], &uu[11], &uu[12], &uu[13], &uu[14], &uu[15], &n); + + return n != 36 || in[n] != '\0' ? -1 : 0; } void uuid_unparse_lower(const uuid_t uu, uuid_string_t out) { snprintf(out, - sizeof(uuid_string_t), - "%02x%02x%02x%02x-" - "%02x%02x-" - "%02x%02x-" - "%02x%02x-" - "%02x%02x%02x%02x%02x%02x", - uu[0], uu[1], uu[2], uu[3], - uu[4], uu[5], - uu[6], uu[7], - uu[8], uu[9], - uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]); + sizeof(uuid_string_t), + "%02x%02x%02x%02x-" + "%02x%02x-" + "%02x%02x-" + "%02x%02x-" + "%02x%02x%02x%02x%02x%02x", + uu[0], uu[1], uu[2], uu[3], + uu[4], uu[5], + uu[6], uu[7], + uu[8], uu[9], + uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]); } void uuid_unparse_upper(const uuid_t uu, uuid_string_t out) { snprintf(out, - sizeof(uuid_string_t), - "%02X%02X%02X%02X-" - "%02X%02X-" - "%02X%02X-" - "%02X%02X-" - "%02X%02X%02X%02X%02X%02X", - uu[0], uu[1], uu[2], uu[3], - uu[4], uu[5], - uu[6], uu[7], - uu[8], uu[9], - uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]); + sizeof(uuid_string_t), + "%02X%02X%02X%02X-" + "%02X%02X-" + "%02X%02X-" + "%02X%02X-" + "%02X%02X%02X%02X%02X%02X", + uu[0], uu[1], uu[2], uu[3], + uu[4], uu[5], + uu[6], uu[7], + uu[8], uu[9], + uu[10], uu[11], uu[12], uu[13], uu[14], uu[15]); } void diff --git a/libsa/bootstrap.cpp b/libsa/bootstrap.cpp index fecfc43db..8f33073a2 100644 --- a/libsa/bootstrap.cpp +++ b/libsa/bootstrap.cpp @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ extern "C" { @@ -63,8 +63,8 @@ extern "C" { * be linked against. *********************************************************************/ extern "C" { - extern void (*record_startup_extensions_function)(void); - extern void (*load_security_extensions_function)(void); +extern void (*record_startup_extensions_function)(void); +extern void (*load_security_extensions_function)(void); }; static void bootstrapRecordStartupExtensions(void); @@ -95,21 +95,21 @@ extern "C" bool IORamDiskBSDRoot(void); * all the boot kexts and have them load up. *********************************************************************/ static const char * sKernelComponentNames[] = { - // The kexts for these IDs must have a version matching 'osrelease'. - "com.apple.kernel", - "com.apple.kpi.bsd", - "com.apple.kpi.dsep", - "com.apple.kpi.iokit", - "com.apple.kpi.kasan", - "com.apple.kpi.libkern", - "com.apple.kpi.mach", - "com.apple.kpi.private", - "com.apple.kpi.unsupported", - "com.apple.iokit.IONVRAMFamily", - "com.apple.driver.AppleNMI", - "com.apple.iokit.IOSystemManagementFamily", - "com.apple.iokit.ApplePlatformFamily", - NULL + // The kexts for these IDs must have a version matching 'osrelease'. + "com.apple.kernel", + "com.apple.kpi.bsd", + "com.apple.kpi.dsep", + "com.apple.kpi.iokit", + "com.apple.kpi.kasan", + "com.apple.kpi.libkern", + "com.apple.kpi.mach", + "com.apple.kpi.private", + "com.apple.kpi.unsupported", + "com.apple.iokit.IONVRAMFamily", + "com.apple.driver.AppleNMI", + "com.apple.iokit.IOSystemManagementFamily", + "com.apple.iokit.ApplePlatformFamily", + NULL }; static int __whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offset_t *segAddrs, int segCount ); @@ -117,19 +117,19 @@ static int __whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offse #define PLK_SEGMENTS 12 static const char * plk_segNames[] = { - "__TEXT", - "__TEXT_EXEC", - "__DATA", - "__DATA_CONST", - "__LINKEDIT", - "__PRELINK_TEXT", - "__PLK_TEXT_EXEC", - "__PRELINK_DATA", - "__PLK_DATA_CONST", - "__PLK_LLVM_COV", - "__PLK_LINKEDIT", - "__PRELINK_INFO", - NULL + "__TEXT", + "__TEXT_EXEC", + "__DATA", + "__DATA_CONST", + "__LINKEDIT", + "__PRELINK_TEXT", + "__PLK_TEXT_EXEC", + "__PRELINK_DATA", + "__PLK_DATA_CONST", + "__PLK_LLVM_COV", + "__PLK_LINKEDIT", + "__PRELINK_INFO", + NULL }; #if PRAGMA_MARK @@ -143,25 +143,25 @@ static const char * plk_segNames[] = { * hide the instance through which we invoke the functions. *********************************************************************/ class KLDBootstrap { - friend void bootstrapRecordStartupExtensions(void); - friend void bootstrapLoadSecurityExtensions(void); + friend void bootstrapRecordStartupExtensions(void); + friend void bootstrapLoadSecurityExtensions(void); private: - void readStartupExtensions(void); - - void readPrelinkedExtensions( - kernel_section_t * prelinkInfoSect); - void readBooterExtensions(void); - - OSReturn loadKernelComponentKexts(void); - void loadKernelExternalComponents(void); - void readBuiltinPersonalities(void); - - void loadSecurityExtensions(void); - + void readStartupExtensions(void); + + void readPrelinkedExtensions( + kernel_section_t * prelinkInfoSect); + void readBooterExtensions(void); + + OSReturn loadKernelComponentKexts(void); + void loadKernelExternalComponents(void); + void readBuiltinPersonalities(void); + + void loadSecurityExtensions(void); + public: - KLDBootstrap(void); - ~KLDBootstrap(void); + KLDBootstrap(void); + ~KLDBootstrap(void); }; static KLDBootstrap sBootstrapObject; @@ -172,11 +172,11 @@ static KLDBootstrap sBootstrapObject; *********************************************************************/ KLDBootstrap::KLDBootstrap(void) { - if (this != &sBootstrapObject) { - panic("Attempt to access bootstrap segment."); - } - record_startup_extensions_function = &bootstrapRecordStartupExtensions; - load_security_extensions_function = &bootstrapLoadSecurityExtensions; + if (this != &sBootstrapObject) { + panic("Attempt to access bootstrap segment."); + } + record_startup_extensions_function = &bootstrapRecordStartupExtensions; + load_security_extensions_function = &bootstrapLoadSecurityExtensions; } /********************************************************************* @@ -185,13 +185,13 @@ KLDBootstrap::KLDBootstrap(void) *********************************************************************/ KLDBootstrap::~KLDBootstrap(void) { - if (this != &sBootstrapObject) { - panic("Attempt to access bootstrap segment."); - } + if (this != &sBootstrapObject) { + panic("Attempt to access bootstrap segment."); + } - record_startup_extensions_function = 0; - load_security_extensions_function = 0; + record_startup_extensions_function = 0; + load_security_extensions_function = 0; } /********************************************************************* @@ -199,360 +199,359 @@ KLDBootstrap::~KLDBootstrap(void) void KLDBootstrap::readStartupExtensions(void) { - kernel_section_t * prelinkInfoSect = NULL; // do not free - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag | kOSKextLogDirectoryScanFlag | - kOSKextLogKextBookkeepingFlag, - "Reading startup extensions."); - - /* If the prelink info segment has a nonzero size, we are prelinked - * and won't have any individual kexts or mkexts to read. - * Otherwise, we need to read kexts or the mkext from what the booter - * has handed us. - */ - prelinkInfoSect = getsectbyname(kPrelinkInfoSegment, kPrelinkInfoSection); - if (prelinkInfoSect->size) { - readPrelinkedExtensions(prelinkInfoSect); - } else { - readBooterExtensions(); - } - - loadKernelComponentKexts(); - loadKernelExternalComponents(); - readBuiltinPersonalities(); - OSKext::sendAllKextPersonalitiesToCatalog(); - - return; + kernel_section_t * prelinkInfoSect = NULL; // do not free + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag | kOSKextLogDirectoryScanFlag | + kOSKextLogKextBookkeepingFlag, + "Reading startup extensions."); + + /* If the prelink info segment has a nonzero size, we are prelinked + * and won't have any individual kexts or mkexts to read. + * Otherwise, we need to read kexts or the mkext from what the booter + * has handed us. + */ + prelinkInfoSect = getsectbyname(kPrelinkInfoSegment, kPrelinkInfoSection); + if (prelinkInfoSect->size) { + readPrelinkedExtensions(prelinkInfoSect); + } else { + readBooterExtensions(); + } + + loadKernelComponentKexts(); + loadKernelExternalComponents(); + readBuiltinPersonalities(); + OSKext::sendAllKextPersonalitiesToCatalog(); + + return; } typedef struct kaslrPackedOffsets { - uint32_t count; /* number of offsets */ - uint32_t offsetsArray[]; /* offsets to slide */ + uint32_t count; /* number of offsets */ + uint32_t offsetsArray[]; /* offsets to slide */ } kaslrPackedOffsets; /********************************************************************* *********************************************************************/ void KLDBootstrap::readPrelinkedExtensions( - kernel_section_t * prelinkInfoSect) + kernel_section_t * prelinkInfoSect) { - OSArray * infoDictArray = NULL; // do not release - OSObject * parsedXML = NULL; // must release - OSDictionary * prelinkInfoDict = NULL; // do not release - OSString * errorString = NULL; // must release - OSKext * theKernel = NULL; // must release - OSData * kernelcacheUUID = NULL; // do not release + OSArray * infoDictArray = NULL;// do not release + OSObject * parsedXML = NULL;// must release + OSDictionary * prelinkInfoDict = NULL;// do not release + OSString * errorString = NULL;// must release + OSKext * theKernel = NULL;// must release + OSData * kernelcacheUUID = NULL;// do not release - kernel_segment_command_t * prelinkTextSegment = NULL; // see code - kernel_segment_command_t * prelinkInfoSegment = NULL; // see code + kernel_segment_command_t * prelinkTextSegment = NULL;// see code + kernel_segment_command_t * prelinkInfoSegment = NULL;// see code - /* We make some copies of data, but if anything fails we're basically - * going to fail the boot, so these won't be cleaned up on error. - */ - void * prelinkData = NULL; // see code - vm_size_t prelinkLength = 0; + /* We make some copies of data, but if anything fails we're basically + * going to fail the boot, so these won't be cleaned up on error. + */ + void * prelinkData = NULL;// see code + vm_size_t prelinkLength = 0; - OSDictionary * infoDict = NULL; // do not release + OSDictionary * infoDict = NULL;// do not release - IORegistryEntry * registryRoot = NULL; // do not release - OSNumber * prelinkCountObj = NULL; // must release + IORegistryEntry * registryRoot = NULL;// do not release + OSNumber * prelinkCountObj = NULL;// must release - u_int i = 0; + u_int i = 0; #if NO_KEXTD - bool ramDiskBoot; - bool developerDevice; - bool dontLoad; + bool ramDiskBoot; + bool developerDevice; + bool dontLoad; #endif - OSData * kaslrOffsets = NULL; - unsigned long plk_segSizes[PLK_SEGMENTS]; - vm_offset_t plk_segAddrs[PLK_SEGMENTS]; - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "Starting from prelinked kernel."); - - prelinkTextSegment = getsegbyname(kPrelinkTextSegment); - if (!prelinkTextSegment) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "Can't find prelinked kexts' text segment."); - goto finish; - } + OSData * kaslrOffsets = NULL; + unsigned long plk_segSizes[PLK_SEGMENTS]; + vm_offset_t plk_segAddrs[PLK_SEGMENTS]; + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, + "Starting from prelinked kernel."); + + prelinkTextSegment = getsegbyname(kPrelinkTextSegment); + if (!prelinkTextSegment) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, + "Can't find prelinked kexts' text segment."); + goto finish; + } #if KASLR_KEXT_DEBUG - unsigned long scratchSize; - vm_offset_t scratchAddr; - - IOLog("kaslr: prelinked kernel address info: \n"); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __TEXT \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __DATA \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __LINKEDIT \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __KLD \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __PRELINK_TEXT \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); - - scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &scratchSize); - IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __PRELINK_INFO \n", - (unsigned long)scratchAddr, - (unsigned long)(scratchAddr + scratchSize), - scratchSize); + unsigned long scratchSize; + vm_offset_t scratchAddr; + + IOLog("kaslr: prelinked kernel address info: \n"); + + scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__TEXT", &scratchSize); + IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __TEXT \n", + (unsigned long)scratchAddr, + (unsigned long)(scratchAddr + scratchSize), + scratchSize); + + scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__DATA", &scratchSize); + IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __DATA \n", + (unsigned long)scratchAddr, + (unsigned long)(scratchAddr + scratchSize), + scratchSize); + + scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__LINKEDIT", &scratchSize); + IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __LINKEDIT \n", + (unsigned long)scratchAddr, + (unsigned long)(scratchAddr + scratchSize), + scratchSize); + + scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__KLD", &scratchSize); + IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __KLD \n", + (unsigned long)scratchAddr, + (unsigned long)(scratchAddr + scratchSize), + scratchSize); + + scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_TEXT", &scratchSize); + IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __PRELINK_TEXT \n", + (unsigned long)scratchAddr, + (unsigned long)(scratchAddr + scratchSize), + scratchSize); + + scratchAddr = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, "__PRELINK_INFO", &scratchSize); + IOLog("kaslr: start 0x%lx end 0x%lx length %lu for __PRELINK_INFO \n", + (unsigned long)scratchAddr, + (unsigned long)(scratchAddr + scratchSize), + scratchSize); #endif - prelinkData = (void *) prelinkTextSegment->vmaddr; - prelinkLength = prelinkTextSegment->vmsize; - - /* build arrays of plk info for later use */ - const char ** segNamePtr; - - for (segNamePtr = &plk_segNames[0], i = 0; *segNamePtr && i < PLK_SEGMENTS; segNamePtr++, i++) { - plk_segSizes[i] = 0; - plk_segAddrs[i] = (vm_offset_t)getsegdatafromheader(&_mh_execute_header, *segNamePtr, &plk_segSizes[i]); - } - - - /* Unserialize the info dictionary from the prelink info section. - */ - parsedXML = OSUnserializeXML((const char *)prelinkInfoSect->addr, - &errorString); - if (parsedXML) { - prelinkInfoDict = OSDynamicCast(OSDictionary, parsedXML); - } - if (!prelinkInfoDict) { - const char * errorCString = "(unknown error)"; - - if (errorString && errorString->getCStringNoCopy()) { - errorCString = errorString->getCStringNoCopy(); - } else if (parsedXML) { - errorCString = "not a dictionary"; - } - OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, - "Error unserializing prelink plist: %s.", errorCString); - goto finish; - } + prelinkData = (void *) prelinkTextSegment->vmaddr; + prelinkLength = prelinkTextSegment->vmsize; + + /* build arrays of plk info for later use */ + const char ** segNamePtr; + + for (segNamePtr = &plk_segNames[0], i = 0; *segNamePtr && i < PLK_SEGMENTS; segNamePtr++, i++) { + plk_segSizes[i] = 0; + plk_segAddrs[i] = (vm_offset_t)getsegdatafromheader(&_mh_execute_header, *segNamePtr, &plk_segSizes[i]); + } + + + /* Unserialize the info dictionary from the prelink info section. + */ + parsedXML = OSUnserializeXML((const char *)prelinkInfoSect->addr, + &errorString); + if (parsedXML) { + prelinkInfoDict = OSDynamicCast(OSDictionary, parsedXML); + } + if (!prelinkInfoDict) { + const char * errorCString = "(unknown error)"; + + if (errorString && errorString->getCStringNoCopy()) { + errorCString = errorString->getCStringNoCopy(); + } else if (parsedXML) { + errorCString = "not a dictionary"; + } + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "Error unserializing prelink plist: %s.", errorCString); + goto finish; + } #if NO_KEXTD - /* Check if we should keep developer kexts around. - * TODO: Check DeviceTree instead of a boot-arg - */ - developerDevice = true; - PE_parse_boot_argn("developer", &developerDevice, sizeof(developerDevice)); + /* Check if we should keep developer kexts around. + * TODO: Check DeviceTree instead of a boot-arg + */ + developerDevice = true; + PE_parse_boot_argn("developer", &developerDevice, sizeof(developerDevice)); - ramDiskBoot = IORamDiskBSDRoot(); + ramDiskBoot = IORamDiskBSDRoot(); #endif /* NO_KEXTD */ - /* Copy in the kernelcache UUID */ - kernelcacheUUID = OSDynamicCast(OSData, - prelinkInfoDict->getObject(kPrelinkInfoKCIDKey)); - if (kernelcacheUUID) { - if (kernelcacheUUID->getLength() != sizeof(kernelcache_uuid)) { - panic("kernelcacheUUID length is %d, expected %lu", kernelcacheUUID->getLength(), - sizeof(kernelcache_uuid)); - } else { - kernelcache_uuid_valid = TRUE; - memcpy((void *)&kernelcache_uuid, (const void *)kernelcacheUUID->getBytesNoCopy(), kernelcacheUUID->getLength()); - uuid_unparse_upper(kernelcache_uuid, kernelcache_uuid_string); - } - } - - infoDictArray = OSDynamicCast(OSArray, - prelinkInfoDict->getObject(kPrelinkInfoDictionaryKey)); - if (!infoDictArray) { - OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, - "The prelinked kernel has no kext info dictionaries"); - goto finish; - } - - /* kaslrOffsets are available use them to slide local relocations */ - kaslrOffsets = OSDynamicCast(OSData, - prelinkInfoDict->getObject(kPrelinkLinkKASLROffsetsKey)); - - /* Create dictionary of excluded kexts - */ + /* Copy in the kernelcache UUID */ + kernelcacheUUID = OSDynamicCast(OSData, + prelinkInfoDict->getObject(kPrelinkInfoKCIDKey)); + if (kernelcacheUUID) { + if (kernelcacheUUID->getLength() != sizeof(kernelcache_uuid)) { + panic("kernelcacheUUID length is %d, expected %lu", kernelcacheUUID->getLength(), + sizeof(kernelcache_uuid)); + } else { + kernelcache_uuid_valid = TRUE; + memcpy((void *)&kernelcache_uuid, (const void *)kernelcacheUUID->getBytesNoCopy(), kernelcacheUUID->getLength()); + uuid_unparse_upper(kernelcache_uuid, kernelcache_uuid_string); + } + } + + infoDictArray = OSDynamicCast(OSArray, + prelinkInfoDict->getObject(kPrelinkInfoDictionaryKey)); + if (!infoDictArray) { + OSKextLog(/* kext */ NULL, kOSKextLogErrorLevel | kOSKextLogArchiveFlag, + "The prelinked kernel has no kext info dictionaries"); + goto finish; + } + + /* kaslrOffsets are available use them to slide local relocations */ + kaslrOffsets = OSDynamicCast(OSData, + prelinkInfoDict->getObject(kPrelinkLinkKASLROffsetsKey)); + + /* Create dictionary of excluded kexts + */ #ifndef CONFIG_EMBEDDED - OSKext::createExcludeListFromPrelinkInfo(infoDictArray); + OSKext::createExcludeListFromPrelinkInfo(infoDictArray); #endif - /* Create OSKext objects for each info dictionary. - */ - for (i = 0; i < infoDictArray->getCount(); ++i) { - infoDict = OSDynamicCast(OSDictionary, infoDictArray->getObject(i)); - if (!infoDict) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "Can't find info dictionary for prelinked kext #%d.", i); - continue; - } + /* Create OSKext objects for each info dictionary. + */ + for (i = 0; i < infoDictArray->getCount(); ++i) { + infoDict = OSDynamicCast(OSDictionary, infoDictArray->getObject(i)); + if (!infoDict) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, + "Can't find info dictionary for prelinked kext #%d.", i); + continue; + } #if NO_KEXTD - dontLoad = false; - - /* If we're not on a developer device, skip and free developer kexts. - */ - if (developerDevice == false) { - OSBoolean *devOnlyBool = OSDynamicCast(OSBoolean, - infoDict->getObject(kOSBundleDeveloperOnlyKey)); - if (devOnlyBool == kOSBooleanTrue) { - dontLoad = true; - } - } - - /* Skip and free kexts that are only needed when booted from a ram disk. - */ - if (ramDiskBoot == false) { - OSBoolean *ramDiskOnlyBool = OSDynamicCast(OSBoolean, - infoDict->getObject(kOSBundleRamDiskOnlyKey)); - if (ramDiskOnlyBool == kOSBooleanTrue) { - dontLoad = true; - } - } - - if (dontLoad == true) { - OSString *bundleID = OSDynamicCast(OSString, - infoDict->getObject(kCFBundleIdentifierKey)); - if (bundleID) { - OSKextLog(NULL, kOSKextLogWarningLevel | kOSKextLogGeneralFlag, - "Kext %s not loading.", bundleID->getCStringNoCopy()); - } - - OSNumber *addressNum = OSDynamicCast(OSNumber, - infoDict->getObject(kPrelinkExecutableLoadKey)); - OSNumber *lengthNum = OSDynamicCast(OSNumber, - infoDict->getObject(kPrelinkExecutableSizeKey)); - if (addressNum && lengthNum) { + dontLoad = false; + + /* If we're not on a developer device, skip and free developer kexts. + */ + if (developerDevice == false) { + OSBoolean *devOnlyBool = OSDynamicCast(OSBoolean, + infoDict->getObject(kOSBundleDeveloperOnlyKey)); + if (devOnlyBool == kOSBooleanTrue) { + dontLoad = true; + } + } + + /* Skip and free kexts that are only needed when booted from a ram disk. + */ + if (ramDiskBoot == false) { + OSBoolean *ramDiskOnlyBool = OSDynamicCast(OSBoolean, + infoDict->getObject(kOSBundleRamDiskOnlyKey)); + if (ramDiskOnlyBool == kOSBooleanTrue) { + dontLoad = true; + } + } + + if (dontLoad == true) { + OSString *bundleID = OSDynamicCast(OSString, + infoDict->getObject(kCFBundleIdentifierKey)); + if (bundleID) { + OSKextLog(NULL, kOSKextLogWarningLevel | kOSKextLogGeneralFlag, + "Kext %s not loading.", bundleID->getCStringNoCopy()); + } + + OSNumber *addressNum = OSDynamicCast(OSNumber, + infoDict->getObject(kPrelinkExecutableLoadKey)); + OSNumber *lengthNum = OSDynamicCast(OSNumber, + infoDict->getObject(kPrelinkExecutableSizeKey)); + if (addressNum && lengthNum) { #if __arm__ || __arm64__ - vm_offset_t data = ml_static_slide(addressNum->unsigned64BitValue()); - vm_size_t length = (vm_size_t) (lengthNum->unsigned32BitValue()); - ml_static_mfree(data, length); + vm_offset_t data = ml_static_slide(addressNum->unsigned64BitValue()); + vm_size_t length = (vm_size_t) (lengthNum->unsigned32BitValue()); + ml_static_mfree(data, length); #else #error Pick the right way to free prelinked data on this arch #endif - } + } - infoDictArray->removeObject(i--); - continue; - } + infoDictArray->removeObject(i--); + continue; + } #endif /* NO_KEXTD */ - /* Create the kext for the entry, then release it, because the - * kext system keeps them around until explicitly removed. - * Any creation/registration failures are already logged for us. - */ - OSKext * newKext = OSKext::withPrelinkedInfoDict(infoDict, (kaslrOffsets ? TRUE : FALSE)); - OSSafeReleaseNULL(newKext); - } - - /* slide kxld relocations */ - if (kaslrOffsets && vm_kernel_slide > 0) { - int slidKextAddrCount = 0; - int badSlideAddr = 0; - int badSlideTarget = 0; - - const kaslrPackedOffsets * myOffsets = NULL; - myOffsets = (const kaslrPackedOffsets *) kaslrOffsets->getBytesNoCopy(); - - for (uint32_t j = 0; j < myOffsets->count; j++) { - - uint64_t slideOffset = (uint64_t) myOffsets->offsetsArray[j]; - uintptr_t * slideAddr = (uintptr_t *) ((uint64_t)prelinkData + slideOffset); - int slideAddrSegIndex = -1; - int addrToSlideSegIndex = -1; - - slideAddrSegIndex = __whereIsAddr( (vm_offset_t)slideAddr, &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); - if (slideAddrSegIndex >= 0) { - addrToSlideSegIndex = __whereIsAddr(ml_static_slide((vm_offset_t)(*slideAddr)), &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); - if (addrToSlideSegIndex < 0) { - badSlideTarget++; - continue; - } - } - else { - badSlideAddr++; - continue; - } - - slidKextAddrCount++; - *slideAddr = ml_static_slide(*slideAddr); - } // for ... - - /* All kexts are now slid, set VM protections for them */ - OSKext::setAllVMAttributes(); - } - - /* Store the number of prelinked kexts in the registry so we can tell - * when the system has been started from a prelinked kernel. - */ - registryRoot = IORegistryEntry::getRegistryRoot(); - assert(registryRoot); - - prelinkCountObj = OSNumber::withNumber( - (unsigned long long)infoDictArray->getCount(), - 8 * sizeof(uint32_t)); - assert(prelinkCountObj); - if (prelinkCountObj) { - registryRoot->setProperty(kOSPrelinkKextCountKey, prelinkCountObj); - } - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogGeneralFlag | kOSKextLogKextBookkeepingFlag | - kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, - "%u prelinked kexts", - infoDictArray->getCount()); + /* Create the kext for the entry, then release it, because the + * kext system keeps them around until explicitly removed. + * Any creation/registration failures are already logged for us. + */ + OSKext * newKext = OSKext::withPrelinkedInfoDict(infoDict, (kaslrOffsets ? TRUE : FALSE)); + OSSafeReleaseNULL(newKext); + } + + /* slide kxld relocations */ + if (kaslrOffsets && vm_kernel_slide > 0) { + int slidKextAddrCount = 0; + int badSlideAddr = 0; + int badSlideTarget = 0; + + const kaslrPackedOffsets * myOffsets = NULL; + myOffsets = (const kaslrPackedOffsets *) kaslrOffsets->getBytesNoCopy(); + + for (uint32_t j = 0; j < myOffsets->count; j++) { + uint64_t slideOffset = (uint64_t) myOffsets->offsetsArray[j]; + uintptr_t * slideAddr = (uintptr_t *) ((uint64_t)prelinkData + slideOffset); + int slideAddrSegIndex = -1; + int addrToSlideSegIndex = -1; + + slideAddrSegIndex = __whereIsAddr((vm_offset_t)slideAddr, &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); + if (slideAddrSegIndex >= 0) { + addrToSlideSegIndex = __whereIsAddr(ml_static_slide((vm_offset_t)(*slideAddr)), &plk_segSizes[0], &plk_segAddrs[0], PLK_SEGMENTS ); + if (addrToSlideSegIndex < 0) { + badSlideTarget++; + continue; + } + } else { + badSlideAddr++; + continue; + } + + slidKextAddrCount++; + *slideAddr = ml_static_slide(*slideAddr); + } // for ... + + /* All kexts are now slid, set VM protections for them */ + OSKext::setAllVMAttributes(); + } + + /* Store the number of prelinked kexts in the registry so we can tell + * when the system has been started from a prelinked kernel. + */ + registryRoot = IORegistryEntry::getRegistryRoot(); + assert(registryRoot); + + prelinkCountObj = OSNumber::withNumber( + (unsigned long long)infoDictArray->getCount(), + 8 * sizeof(uint32_t)); + assert(prelinkCountObj); + if (prelinkCountObj) { + registryRoot->setProperty(kOSPrelinkKextCountKey, prelinkCountObj); + } + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogGeneralFlag | kOSKextLogKextBookkeepingFlag | + kOSKextLogDirectoryScanFlag | kOSKextLogArchiveFlag, + "%u prelinked kexts", + infoDictArray->getCount()); #if CONFIG_KEXT_BASEMENT - /* On CONFIG_KEXT_BASEMENT systems, kexts are copied to their own - * special VM region during OSKext init time, so we can free the whole - * segment now. - */ - ml_static_mfree((vm_offset_t) prelinkData, prelinkLength); + /* On CONFIG_KEXT_BASEMENT systems, kexts are copied to their own + * special VM region during OSKext init time, so we can free the whole + * segment now. + */ + ml_static_mfree((vm_offset_t) prelinkData, prelinkLength); #endif /* __x86_64__ */ - /* Free the prelink info segment, we're done with it. - */ - prelinkInfoSegment = getsegbyname(kPrelinkInfoSegment); - if (prelinkInfoSegment) { - ml_static_mfree((vm_offset_t)prelinkInfoSegment->vmaddr, - (vm_size_t)prelinkInfoSegment->vmsize); - } + /* Free the prelink info segment, we're done with it. + */ + prelinkInfoSegment = getsegbyname(kPrelinkInfoSegment); + if (prelinkInfoSegment) { + ml_static_mfree((vm_offset_t)prelinkInfoSegment->vmaddr, + (vm_size_t)prelinkInfoSegment->vmsize); + } finish: - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(theKernel); - OSSafeReleaseNULL(prelinkCountObj); - return; + OSSafeReleaseNULL(errorString); + OSSafeReleaseNULL(parsedXML); + OSSafeReleaseNULL(theKernel); + OSSafeReleaseNULL(prelinkCountObj); + return; } -static int __whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offset_t *segAddrs, int segCount) +static int +__whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offset_t *segAddrs, int segCount) { int i; @@ -574,147 +573,145 @@ static int __whereIsAddr(vm_offset_t theAddr, unsigned long * segSizes, vm_offse #define BOOTER_KEXT_PREFIX "Driver-" typedef struct _DeviceTreeBuffer { - uint32_t paddr; - uint32_t length; + uint32_t paddr; + uint32_t length; } _DeviceTreeBuffer; void KLDBootstrap::readBooterExtensions(void) { - IORegistryEntry * booterMemoryMap = NULL; // must release - OSDictionary * propertyDict = NULL; // must release - OSCollectionIterator * keyIterator = NULL; // must release - OSString * deviceTreeName = NULL; // do not release - - const _DeviceTreeBuffer * deviceTreeBuffer = NULL; // do not free - char * booterDataPtr = NULL; // do not free - OSData * booterData = NULL; // must release - - OSKext * aKext = NULL; // must release - - OSKextLog(/* kext */ NULL, - kOSKextLogProgressLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogKextBookkeepingFlag, - "Reading startup extensions from booter memory."); - - booterMemoryMap = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane); - - if (!booterMemoryMap) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag | kOSKextLogDirectoryScanFlag, - "Can't read booter memory map."); - goto finish; - } - - propertyDict = booterMemoryMap->dictionaryWithProperties(); - if (!propertyDict) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag, - "Can't get property dictionary from memory map."); - goto finish; - } - - keyIterator = OSCollectionIterator::withCollection(propertyDict); - if (!keyIterator) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Can't allocate iterator for driver images."); - goto finish; - } - - /* Create dictionary of excluded kexts - */ + IORegistryEntry * booterMemoryMap = NULL;// must release + OSDictionary * propertyDict = NULL;// must release + OSCollectionIterator * keyIterator = NULL;// must release + OSString * deviceTreeName = NULL;// do not release + + const _DeviceTreeBuffer * deviceTreeBuffer = NULL;// do not free + char * booterDataPtr = NULL;// do not free + OSData * booterData = NULL;// must release + + OSKext * aKext = NULL;// must release + + OSKextLog(/* kext */ NULL, + kOSKextLogProgressLevel | + kOSKextLogDirectoryScanFlag | kOSKextLogKextBookkeepingFlag, + "Reading startup extensions from booter memory."); + + booterMemoryMap = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane); + + if (!booterMemoryMap) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag | kOSKextLogDirectoryScanFlag, + "Can't read booter memory map."); + goto finish; + } + + propertyDict = booterMemoryMap->dictionaryWithProperties(); + if (!propertyDict) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag, + "Can't get property dictionary from memory map."); + goto finish; + } + + keyIterator = OSCollectionIterator::withCollection(propertyDict); + if (!keyIterator) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Can't allocate iterator for driver images."); + goto finish; + } + + /* Create dictionary of excluded kexts + */ #ifndef CONFIG_EMBEDDED - OSKext::createExcludeListFromBooterData(propertyDict, keyIterator); + OSKext::createExcludeListFromBooterData(propertyDict, keyIterator); #endif - keyIterator->reset(); - - while ( ( deviceTreeName = - OSDynamicCast(OSString, keyIterator->getNextObject() ))) { - - const char * devTreeNameCString = deviceTreeName->getCStringNoCopy(); - OSData * deviceTreeEntry = OSDynamicCast(OSData, - propertyDict->getObject(deviceTreeName)); - - /* Clear out the booterData from the prior iteration. - */ - OSSafeReleaseNULL(booterData); - - /* If there is no entry for the name, we can't do much with it. */ - if (!deviceTreeEntry) { - continue; - } - - /* Make sure it is a kext */ - if (strncmp(devTreeNameCString, - BOOTER_KEXT_PREFIX, - CONST_STRLEN(BOOTER_KEXT_PREFIX))) { - continue; - } - - deviceTreeBuffer = (const _DeviceTreeBuffer *) - deviceTreeEntry->getBytesNoCopy(0, sizeof(deviceTreeBuffer)); - if (!deviceTreeBuffer) { - /* We can't get to the data, so we can't do anything, - * not even free it from physical memory (if it's there). - */ - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag, - "Device tree entry %s has NULL pointer.", - devTreeNameCString); - goto finish; // xxx - continue, panic? - } - - booterDataPtr = (char *)ml_static_ptovirt(deviceTreeBuffer->paddr); - if (!booterDataPtr) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag, - "Can't get virtual address for device tree entry %s.", - devTreeNameCString); - goto finish; - } - - /* Wrap the booter data buffer in an OSData and set a dealloc function - * so it will take care of the physical memory when freed. Kexts will - * retain the booterData for as long as they need it. Remove the entry - * from the booter memory map after this is done. - */ - booterData = OSData::withBytesNoCopy(booterDataPtr, - deviceTreeBuffer->length); - if (!booterData) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Error - Can't allocate OSData wrapper for device tree entry %s.", - devTreeNameCString); - goto finish; - } - booterData->setDeallocFunction(osdata_phys_free); - - /* Create the kext for the entry, then release it, because the - * kext system keeps them around until explicitly removed. - * Any creation/registration failures are already logged for us. - */ - OSKext * newKext = OSKext::withBooterData(deviceTreeName, booterData); - OSSafeReleaseNULL(newKext); - - booterMemoryMap->removeProperty(deviceTreeName); - - } /* while ( (deviceTreeName = OSDynamicCast(OSString, ...) ) ) */ + keyIterator->reset(); + + while ((deviceTreeName = + OSDynamicCast(OSString, keyIterator->getNextObject()))) { + const char * devTreeNameCString = deviceTreeName->getCStringNoCopy(); + OSData * deviceTreeEntry = OSDynamicCast(OSData, + propertyDict->getObject(deviceTreeName)); + + /* Clear out the booterData from the prior iteration. + */ + OSSafeReleaseNULL(booterData); + + /* If there is no entry for the name, we can't do much with it. */ + if (!deviceTreeEntry) { + continue; + } + + /* Make sure it is a kext */ + if (strncmp(devTreeNameCString, + BOOTER_KEXT_PREFIX, + CONST_STRLEN(BOOTER_KEXT_PREFIX))) { + continue; + } + + deviceTreeBuffer = (const _DeviceTreeBuffer *) + deviceTreeEntry->getBytesNoCopy(0, sizeof(deviceTreeBuffer)); + if (!deviceTreeBuffer) { + /* We can't get to the data, so we can't do anything, + * not even free it from physical memory (if it's there). + */ + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag, + "Device tree entry %s has NULL pointer.", + devTreeNameCString); + goto finish; // xxx - continue, panic? + } + + booterDataPtr = (char *)ml_static_ptovirt(deviceTreeBuffer->paddr); + if (!booterDataPtr) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag, + "Can't get virtual address for device tree entry %s.", + devTreeNameCString); + goto finish; + } + + /* Wrap the booter data buffer in an OSData and set a dealloc function + * so it will take care of the physical memory when freed. Kexts will + * retain the booterData for as long as they need it. Remove the entry + * from the booter memory map after this is done. + */ + booterData = OSData::withBytesNoCopy(booterDataPtr, + deviceTreeBuffer->length); + if (!booterData) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Error - Can't allocate OSData wrapper for device tree entry %s.", + devTreeNameCString); + goto finish; + } + booterData->setDeallocFunction(osdata_phys_free); + + /* Create the kext for the entry, then release it, because the + * kext system keeps them around until explicitly removed. + * Any creation/registration failures are already logged for us. + */ + OSKext * newKext = OSKext::withBooterData(deviceTreeName, booterData); + OSSafeReleaseNULL(newKext); + + booterMemoryMap->removeProperty(deviceTreeName); + } /* while ( (deviceTreeName = OSDynamicCast(OSString, ...) ) ) */ finish: - OSSafeReleaseNULL(booterMemoryMap); - OSSafeReleaseNULL(propertyDict); - OSSafeReleaseNULL(keyIterator); - OSSafeReleaseNULL(booterData); - OSSafeReleaseNULL(aKext); - return; + OSSafeReleaseNULL(booterMemoryMap); + OSSafeReleaseNULL(propertyDict); + OSSafeReleaseNULL(keyIterator); + OSSafeReleaseNULL(booterData); + OSSafeReleaseNULL(aKext); + return; } /********************************************************************* @@ -724,65 +721,63 @@ finish: void KLDBootstrap::loadSecurityExtensions(void) { - OSDictionary * extensionsDict = NULL; // must release - OSCollectionIterator * keyIterator = NULL; // must release - OSString * bundleID = NULL; // don't release - OSKext * theKext = NULL; // don't release - OSBoolean * isSecurityKext = NULL; // don't release - - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Loading security extensions."); - - extensionsDict = OSKext::copyKexts(); - if (!extensionsDict) { - return; - } - - keyIterator = OSCollectionIterator::withCollection(extensionsDict); - if (!keyIterator) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Failed to allocate iterator for security extensions."); - goto finish; - } - - while ((bundleID = OSDynamicCast(OSString, keyIterator->getNextObject()))) { - - const char * bundle_id = bundleID->getCStringNoCopy(); - - /* Skip extensions whose bundle IDs don't start with "com.apple.". - */ - if (!bundle_id || - (strncmp(bundle_id, COM_APPLE, CONST_STRLEN(COM_APPLE)) != 0)) { - - continue; - } - - theKext = OSDynamicCast(OSKext, extensionsDict->getObject(bundleID)); - if (!theKext) { - continue; - } - - isSecurityKext = OSDynamicCast(OSBoolean, - theKext->getPropertyForHostArch(kAppleSecurityExtensionKey)); - if (isSecurityKext && isSecurityKext->isTrue()) { - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Loading security extension %s.", bundleID->getCStringNoCopy()); - OSKext::loadKextWithIdentifier(bundleID->getCStringNoCopy(), - /* allowDefer */ false); - } - } + OSDictionary * extensionsDict = NULL;// must release + OSCollectionIterator * keyIterator = NULL;// must release + OSString * bundleID = NULL;// don't release + OSKext * theKext = NULL;// don't release + OSBoolean * isSecurityKext = NULL;// don't release + + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Loading security extensions."); + + extensionsDict = OSKext::copyKexts(); + if (!extensionsDict) { + return; + } + + keyIterator = OSCollectionIterator::withCollection(extensionsDict); + if (!keyIterator) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Failed to allocate iterator for security extensions."); + goto finish; + } + + while ((bundleID = OSDynamicCast(OSString, keyIterator->getNextObject()))) { + const char * bundle_id = bundleID->getCStringNoCopy(); + + /* Skip extensions whose bundle IDs don't start with "com.apple.". + */ + if (!bundle_id || + (strncmp(bundle_id, COM_APPLE, CONST_STRLEN(COM_APPLE)) != 0)) { + continue; + } + + theKext = OSDynamicCast(OSKext, extensionsDict->getObject(bundleID)); + if (!theKext) { + continue; + } + + isSecurityKext = OSDynamicCast(OSBoolean, + theKext->getPropertyForHostArch(kAppleSecurityExtensionKey)); + if (isSecurityKext && isSecurityKext->isTrue()) { + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Loading security extension %s.", bundleID->getCStringNoCopy()); + OSKext::loadKextWithIdentifier(bundleID->getCStringNoCopy(), + /* allowDefer */ false); + } + } finish: - OSSafeReleaseNULL(keyIterator); - OSSafeReleaseNULL(extensionsDict); + OSSafeReleaseNULL(keyIterator); + OSSafeReleaseNULL(extensionsDict); - return; + return; } /********************************************************************* @@ -799,31 +794,29 @@ finish: OSReturn KLDBootstrap::loadKernelComponentKexts(void) { - OSReturn result = kOSReturnSuccess; // optimistic - OSKext * theKext = NULL; // must release - const char ** kextIDPtr = NULL; // do not release - - for (kextIDPtr = &sKernelComponentNames[0]; *kextIDPtr; kextIDPtr++) { - - OSSafeReleaseNULL(theKext); - theKext = OSKext::lookupKextWithIdentifier(*kextIDPtr); - - if (theKext) { - if (kOSReturnSuccess != OSKext::loadKextWithIdentifier( - *kextIDPtr, /* allowDefer */ false)) { - - // xxx - check KextBookkeeping, might be redundant - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogDirectoryScanFlag | kOSKextLogKextBookkeepingFlag, - "Failed to initialize kernel component %s.", *kextIDPtr); - result = kOSReturnError; - } - } - } - - OSSafeReleaseNULL(theKext); - return result; + OSReturn result = kOSReturnSuccess;// optimistic + OSKext * theKext = NULL; // must release + const char ** kextIDPtr = NULL; // do not release + + for (kextIDPtr = &sKernelComponentNames[0]; *kextIDPtr; kextIDPtr++) { + OSSafeReleaseNULL(theKext); + theKext = OSKext::lookupKextWithIdentifier(*kextIDPtr); + + if (theKext) { + if (kOSReturnSuccess != OSKext::loadKextWithIdentifier( + *kextIDPtr, /* allowDefer */ false)) { + // xxx - check KextBookkeeping, might be redundant + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogDirectoryScanFlag | kOSKextLogKextBookkeepingFlag, + "Failed to initialize kernel component %s.", *kextIDPtr); + result = kOSReturnError; + } + } + } + + OSSafeReleaseNULL(theKext); + return result; } /********************************************************************* @@ -839,186 +832,183 @@ KLDBootstrap::loadKernelComponentKexts(void) void KLDBootstrap::loadKernelExternalComponents(void) { - OSDictionary * extensionsDict = NULL; // must release - OSCollectionIterator * keyIterator = NULL; // must release - OSString * bundleID = NULL; // don't release - OSKext * theKext = NULL; // don't release - OSBoolean * isKernelExternalComponent = NULL; // don't release - - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Loading Kernel External Components."); - - extensionsDict = OSKext::copyKexts(); - if (!extensionsDict) { - return; - } - - keyIterator = OSCollectionIterator::withCollection(extensionsDict); - if (!keyIterator) { - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogGeneralFlag, - "Failed to allocate iterator for Kernel External Components."); - goto finish; - } - - while ((bundleID = OSDynamicCast(OSString, keyIterator->getNextObject()))) { - - const char * bundle_id = bundleID->getCStringNoCopy(); - - /* Skip extensions whose bundle IDs don't start with "com.apple.kec.". - */ - if (!bundle_id || - (strncmp(bundle_id, COM_APPLE_KEC, CONST_STRLEN(COM_APPLE_KEC)) != 0)) { - - continue; - } - - theKext = OSDynamicCast(OSKext, extensionsDict->getObject(bundleID)); - if (!theKext) { - continue; - } - - isKernelExternalComponent = OSDynamicCast(OSBoolean, - theKext->getPropertyForHostArch(kAppleKernelExternalComponentKey)); - if (isKernelExternalComponent && isKernelExternalComponent->isTrue()) { - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Loading kernel external component %s.", bundleID->getCStringNoCopy()); - OSKext::loadKextWithIdentifier(bundleID->getCStringNoCopy(), - /* allowDefer */ false); - } - } + OSDictionary * extensionsDict = NULL;// must release + OSCollectionIterator * keyIterator = NULL;// must release + OSString * bundleID = NULL;// don't release + OSKext * theKext = NULL;// don't release + OSBoolean * isKernelExternalComponent = NULL;// don't release + + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Loading Kernel External Components."); + + extensionsDict = OSKext::copyKexts(); + if (!extensionsDict) { + return; + } + + keyIterator = OSCollectionIterator::withCollection(extensionsDict); + if (!keyIterator) { + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogGeneralFlag, + "Failed to allocate iterator for Kernel External Components."); + goto finish; + } + + while ((bundleID = OSDynamicCast(OSString, keyIterator->getNextObject()))) { + const char * bundle_id = bundleID->getCStringNoCopy(); + + /* Skip extensions whose bundle IDs don't start with "com.apple.kec.". + */ + if (!bundle_id || + (strncmp(bundle_id, COM_APPLE_KEC, CONST_STRLEN(COM_APPLE_KEC)) != 0)) { + continue; + } + + theKext = OSDynamicCast(OSKext, extensionsDict->getObject(bundleID)); + if (!theKext) { + continue; + } + + isKernelExternalComponent = OSDynamicCast(OSBoolean, + theKext->getPropertyForHostArch(kAppleKernelExternalComponentKey)); + if (isKernelExternalComponent && isKernelExternalComponent->isTrue()) { + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Loading kernel external component %s.", bundleID->getCStringNoCopy()); + OSKext::loadKextWithIdentifier(bundleID->getCStringNoCopy(), + /* allowDefer */ false); + } + } finish: - OSSafeReleaseNULL(keyIterator); - OSSafeReleaseNULL(extensionsDict); + OSSafeReleaseNULL(keyIterator); + OSSafeReleaseNULL(extensionsDict); - return; + return; } /********************************************************************* - *********************************************************************/ +*********************************************************************/ void KLDBootstrap::readBuiltinPersonalities(void) { - OSObject * parsedXML = NULL; // must release - OSArray * builtinExtensions = NULL; // do not release - OSArray * allPersonalities = NULL; // must release - OSString * errorString = NULL; // must release - kernel_section_t * infosect = NULL; // do not free - OSCollectionIterator * personalitiesIterator = NULL; // must release - unsigned int count, i; - - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Reading built-in kernel personalities for I/O Kit drivers."); - - /* Look in the __BUILTIN __info segment for an array of Info.plist - * entries. For each one, extract the personalities dictionary, add - * it to our array, then push them all (without matching) to - * the IOCatalogue. This can be used to augment the personalities - * in gIOKernelConfigTables, especially when linking entire kexts into - * the mach_kernel image. - */ - infosect = getsectbyname("__BUILTIN", "__info"); - if (!infosect) { - // this isn't fatal - goto finish; - } - - parsedXML = OSUnserializeXML((const char *) (uintptr_t)infosect->addr, - &errorString); - if (parsedXML) { - builtinExtensions = OSDynamicCast(OSArray, parsedXML); - } - if (!builtinExtensions) { - const char * errorCString = "(unknown error)"; - - if (errorString && errorString->getCStringNoCopy()) { - errorCString = errorString->getCStringNoCopy(); - } else if (parsedXML) { - errorCString = "not an array"; - } - OSKextLog(/* kext */ NULL, - kOSKextLogErrorLevel | - kOSKextLogLoadFlag, - "Error unserializing built-in personalities: %s.", errorCString); - goto finish; - } - - // estimate 3 personalities per Info.plist/kext - count = builtinExtensions->getCount(); - allPersonalities = OSArray::withCapacity(count * 3); - - for (i = 0; i < count; i++) { - OSDictionary * infoDict = NULL; // do not release - OSString * moduleName = NULL; // do not release - OSDictionary * personalities; // do not release - OSString * personalityName; // do not release - - OSSafeReleaseNULL(personalitiesIterator); - - infoDict = OSDynamicCast(OSDictionary, - builtinExtensions->getObject(i)); - if (!infoDict) { - continue; - } - - moduleName = OSDynamicCast(OSString, - infoDict->getObject(kCFBundleIdentifierKey)); - if (!moduleName) { - continue; - } - - OSKextLog(/* kext */ NULL, - kOSKextLogStepLevel | - kOSKextLogLoadFlag, - "Adding personalities for built-in driver %s:", - moduleName->getCStringNoCopy()); - - personalities = OSDynamicCast(OSDictionary, - infoDict->getObject("IOKitPersonalities")); - if (!personalities) { - continue; - } - - personalitiesIterator = OSCollectionIterator::withCollection(personalities); - if (!personalitiesIterator) { - continue; // xxx - well really, what can we do? should we panic? - } - - while ((personalityName = OSDynamicCast(OSString, - personalitiesIterator->getNextObject()))) { - - OSDictionary * personality = OSDynamicCast(OSDictionary, - personalities->getObject(personalityName)); - - OSKextLog(/* kext */ NULL, - kOSKextLogDetailLevel | - kOSKextLogLoadFlag, - "Adding built-in driver personality %s.", - personalityName->getCStringNoCopy()); - + OSObject * parsedXML = NULL;// must release + OSArray * builtinExtensions = NULL;// do not release + OSArray * allPersonalities = NULL;// must release + OSString * errorString = NULL;// must release + kernel_section_t * infosect = NULL;// do not free + OSCollectionIterator * personalitiesIterator = NULL;// must release + unsigned int count, i; + + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Reading built-in kernel personalities for I/O Kit drivers."); + + /* Look in the __BUILTIN __info segment for an array of Info.plist + * entries. For each one, extract the personalities dictionary, add + * it to our array, then push them all (without matching) to + * the IOCatalogue. This can be used to augment the personalities + * in gIOKernelConfigTables, especially when linking entire kexts into + * the mach_kernel image. + */ + infosect = getsectbyname("__BUILTIN", "__info"); + if (!infosect) { + // this isn't fatal + goto finish; + } + + parsedXML = OSUnserializeXML((const char *) (uintptr_t)infosect->addr, + &errorString); + if (parsedXML) { + builtinExtensions = OSDynamicCast(OSArray, parsedXML); + } + if (!builtinExtensions) { + const char * errorCString = "(unknown error)"; + + if (errorString && errorString->getCStringNoCopy()) { + errorCString = errorString->getCStringNoCopy(); + } else if (parsedXML) { + errorCString = "not an array"; + } + OSKextLog(/* kext */ NULL, + kOSKextLogErrorLevel | + kOSKextLogLoadFlag, + "Error unserializing built-in personalities: %s.", errorCString); + goto finish; + } + + // estimate 3 personalities per Info.plist/kext + count = builtinExtensions->getCount(); + allPersonalities = OSArray::withCapacity(count * 3); + + for (i = 0; i < count; i++) { + OSDictionary * infoDict = NULL;// do not release + OSString * moduleName = NULL;// do not release + OSDictionary * personalities;// do not release + OSString * personalityName;// do not release + + OSSafeReleaseNULL(personalitiesIterator); + + infoDict = OSDynamicCast(OSDictionary, + builtinExtensions->getObject(i)); + if (!infoDict) { + continue; + } + + moduleName = OSDynamicCast(OSString, + infoDict->getObject(kCFBundleIdentifierKey)); + if (!moduleName) { + continue; + } + + OSKextLog(/* kext */ NULL, + kOSKextLogStepLevel | + kOSKextLogLoadFlag, + "Adding personalities for built-in driver %s:", + moduleName->getCStringNoCopy()); + + personalities = OSDynamicCast(OSDictionary, + infoDict->getObject("IOKitPersonalities")); + if (!personalities) { + continue; + } + + personalitiesIterator = OSCollectionIterator::withCollection(personalities); + if (!personalitiesIterator) { + continue; // xxx - well really, what can we do? should we panic? + } + + while ((personalityName = OSDynamicCast(OSString, + personalitiesIterator->getNextObject()))) { + OSDictionary * personality = OSDynamicCast(OSDictionary, + personalities->getObject(personalityName)); + + OSKextLog(/* kext */ NULL, + kOSKextLogDetailLevel | + kOSKextLogLoadFlag, + "Adding built-in driver personality %s.", + personalityName->getCStringNoCopy()); + if (personality && !personality->getObject(kCFBundleIdentifierKey)) { personality->setObject(kCFBundleIdentifierKey, moduleName); } - allPersonalities->setObject(personality); - } - } - - gIOCatalogue->addDrivers(allPersonalities, false); + allPersonalities->setObject(personality); + } + } + + gIOCatalogue->addDrivers(allPersonalities, false); finish: - OSSafeReleaseNULL(parsedXML); - OSSafeReleaseNULL(allPersonalities); - OSSafeReleaseNULL(errorString); - OSSafeReleaseNULL(personalitiesIterator); - return; + OSSafeReleaseNULL(parsedXML); + OSSafeReleaseNULL(allPersonalities); + OSSafeReleaseNULL(errorString); + OSSafeReleaseNULL(personalitiesIterator); + return; } #if PRAGMA_MARK @@ -1027,15 +1017,17 @@ finish: /********************************************************************* * Bootstrap Functions *********************************************************************/ -static void bootstrapRecordStartupExtensions(void) +static void +bootstrapRecordStartupExtensions(void) { - sBootstrapObject.readStartupExtensions(); - return; + sBootstrapObject.readStartupExtensions(); + return; } -static void bootstrapLoadSecurityExtensions(void) +static void +bootstrapLoadSecurityExtensions(void) { - sBootstrapObject.loadSecurityExtensions(); - return; + sBootstrapObject.loadSecurityExtensions(); + return; } diff --git a/libsa/lastkernelconstructor.c b/libsa/lastkernelconstructor.c index aeffadaba..7dbeedf72 100644 --- a/libsa/lastkernelconstructor.c +++ b/libsa/lastkernelconstructor.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,11 +30,11 @@ extern void iokit_post_constructor_init(void); static void last_kernel_constructor(void) __attribute__ ((constructor)); -static void last_kernel_constructor(void) +static void +last_kernel_constructor(void) { - iokit_post_constructor_init(); + iokit_post_constructor_init(); } __asm__(".globl _last_kernel_symbol"); __asm__(".zerofill __LAST, __last, _last_kernel_symbol, 0"); - diff --git a/libsa/lastkerneldataconst.c b/libsa/lastkerneldataconst.c index 580756f0f..f22e95e74 100644 --- a/libsa/lastkerneldataconst.c +++ b/libsa/lastkerneldataconst.c @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,11 +40,11 @@ #if defined(__arm64__) /* PAGE_SIZE on ARM64 is an expression derived from a non-const global variable */ -#define PAD_SIZE PAGE_MAX_SIZE +#define PAD_SIZE PAGE_MAX_SIZE #else -#define PAD_SIZE PAGE_SIZE +#define PAD_SIZE PAGE_SIZE #endif -static const uint8_t __attribute__((section("__DATA,__const"))) data_const_padding[PAD_SIZE] = {[0 ... PAD_SIZE-1] = 0xFF}; +static const uint8_t __attribute__((section("__DATA,__const"))) data_const_padding[PAD_SIZE] = {[0 ... PAD_SIZE - 1] = 0xFF}; const vm_offset_t __attribute__((section("__DATA,__data"))) _lastkerneldataconst = (vm_offset_t)&data_const_padding[0]; const vm_size_t __attribute__((section("__DATA,__data"))) _lastkerneldataconst_padsize = sizeof(data_const_padding); diff --git a/libsyscall/Libsyscall.xcconfig b/libsyscall/Libsyscall.xcconfig index 737c41068..7153bda3e 100644 --- a/libsyscall/Libsyscall.xcconfig +++ b/libsyscall/Libsyscall.xcconfig @@ -13,7 +13,7 @@ EXECUTABLE_PREFIX = libsystem_ PRODUCT_NAME = kernel ALWAYS_SEARCH_USER_PATHS = NO ORDER_FILE[sdk=iphoneos*] = $(SDKROOT)/$(APPLE_INTERNAL_DIR)/OrderFiles/libsystem_kernel.order -OTHER_CFLAGS = -fdollars-in-identifiers -no-cpp-precomp -fno-common -fno-stack-protector -momit-leaf-frame-pointer -DLIBSYSCALL_INTERFACE -D__DARWIN_VERS_1050=1 +OTHER_CFLAGS = -fdollars-in-identifiers -no-cpp-precomp -fno-common -fno-stack-protector -fno-stack-check -momit-leaf-frame-pointer -DLIBSYSCALL_INTERFACE -D__DARWIN_VERS_1050=1 OTHER_CFLAGS[sdk=macosx*] = $(inherited) -DSYSCALL_PRE1050 OTHER_CFLAGS[sdk=macosx*][arch=x86_64*] = $(inherited) -DNO_SYSCALL_LEGACY OTHER_CFLAGS[sdk=iphoneos*] = $(inherited) -DNO_SYSCALL_LEGACY diff --git a/libsyscall/custom/custom.s b/libsyscall/custom/custom.s index de2e0c6bd..4d8f04fed 100644 --- a/libsyscall/custom/custom.s +++ b/libsyscall/custom/custom.s @@ -96,7 +96,7 @@ __thread_set_tsd_base: .globl _i386_get_ldt ALIGN _i386_get_ldt: - movl $6,%eax + movl $SYSCALL_CONSTRUCT_MDEP(6), %eax MACHDEP_SYSCALL_TRAP jnb 2f movq %rax, %rdi @@ -107,7 +107,7 @@ _i386_get_ldt: .globl _i386_set_ldt ALIGN _i386_set_ldt: - movl $5,%eax + movl $SYSCALL_CONSTRUCT_MDEP(5), %eax MACHDEP_SYSCALL_TRAP jnb 2f movq %rax, %rdi diff --git a/libsyscall/custom/errno.c b/libsyscall/custom/errno.c index 86f754a65..88658cbe2 100644 --- a/libsyscall/custom/errno.c +++ b/libsyscall/custom/errno.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/mach/abort.h b/libsyscall/mach/abort.h index 5954a2187..d80b8d6c0 100644 --- a/libsyscall/mach/abort.h +++ b/libsyscall/mach/abort.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,7 +35,8 @@ extern int __kill(int pid, int signum, int posix); extern int __exit(int) __attribute__((noreturn)); static inline void __attribute__((noreturn)) -abort(void) { +abort(void) +{ (void)__kill(__getpid(), __SIGABRT, 0); __exit(1); } diff --git a/libsyscall/mach/clock_sleep.c b/libsyscall/mach/clock_sleep.c index 8cf83d094..fb0863d18 100644 --- a/libsyscall/mach/clock_sleep.c +++ b/libsyscall/mach/clock_sleep.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -32,9 +32,9 @@ kern_return_t clock_sleep(mach_port_t clock_name, - sleep_type_t clock_type, - mach_timespec_t sleep_time, - mach_timespec_t *wake_time) + sleep_type_t clock_type, + mach_timespec_t sleep_time, + mach_timespec_t *wake_time) { - return clock_sleep_trap(clock_name, clock_type, sleep_time.tv_sec, sleep_time.tv_nsec, wake_time); + return clock_sleep_trap(clock_name, clock_type, sleep_time.tv_sec, sleep_time.tv_nsec, wake_time); } diff --git a/libsyscall/mach/error_codes.c b/libsyscall/mach/error_codes.c index 085f468dc..4ec633dfa 100644 --- a/libsyscall/mach/error_codes.c +++ b/libsyscall/mach/error_codes.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ @@ -72,7 +72,7 @@ #include "err_server.sub" #include "err_us.sub" -const struct error_system _mach_errors[err_max_system+1] = { +const struct error_system _mach_errors[err_max_system + 1] = { /* 0; err_kern */ { errlib_count(err_os_sub), @@ -150,7 +150,7 @@ const struct error_system _mach_errors[err_max_system+1] = { errlib_count(err_iokit_sub_map) }, - /* 0x39 */ errorlib_system_null, + /* 0x39 */ errorlib_system_null, /* 0x3a */ errorlib_system_null, /* 0x3b */ errorlib_system_null, /* 0x3c */ errorlib_system_null, /* 0x3d */ errorlib_system_null, /* 0x3e */ errorlib_system_null, /* 0x3f */ errorlib_system_null, diff --git a/libsyscall/mach/errorlib.h b/libsyscall/mach/errorlib.h index 931184372..f78f3d7a1 100644 --- a/libsyscall/mach/errorlib.h +++ b/libsyscall/mach/errorlib.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ @@ -68,47 +68,47 @@ #include #include -#define MACH_IPC_SEND_MOD (err_mach_ipc|err_sub(0)) -#define MACH_IPC_RCV_MOD (err_mach_ipc|err_sub(1)) -#define MACH_IPC_MIG_MOD (err_mach_ipc|err_sub(2)) +#define MACH_IPC_SEND_MOD (err_mach_ipc|err_sub(0)) +#define MACH_IPC_RCV_MOD (err_mach_ipc|err_sub(1)) +#define MACH_IPC_MIG_MOD (err_mach_ipc|err_sub(2)) -#define IPC_SEND_MOD (err_ipc|err_sub(0)) -#define IPC_RCV_MOD (err_ipc|err_sub(1)) -#define IPC_MIG_MOD (err_ipc|err_sub(2)) +#define IPC_SEND_MOD (err_ipc|err_sub(0)) +#define IPC_RCV_MOD (err_ipc|err_sub(1)) +#define IPC_MIG_MOD (err_ipc|err_sub(2)) -#define SERV_NETNAME_MOD (err_server|err_sub(0)) -#define SERV_ENV_MOD (err_server|err_sub(1)) -#define SERV_EXECD_MOD (err_server|err_sub(2)) +#define SERV_NETNAME_MOD (err_server|err_sub(0)) +#define SERV_ENV_MOD (err_server|err_sub(1)) +#define SERV_EXECD_MOD (err_server|err_sub(2)) -#define NO_SUCH_ERROR "unknown error code" +#define NO_SUCH_ERROR "unknown error code" struct error_sparse_map { - unsigned start; - unsigned end; + unsigned start; + unsigned end; }; #define err_sub_map_entry(start, end) { err_get_sub(start), err_get_sub(end) } #define err_code_map_entry(start, end) { err_get_code(start), err_get_code(end) } struct error_subsystem { - const char *subsys_name; - int max_code; - const char * const *codes; - const struct error_sparse_map *map_table; - int map_count; + const char *subsys_name; + int max_code; + const char * const *codes; + const struct error_sparse_map *map_table; + int map_count; }; -#define errorlib_system_null { 0, NULL, NULL, NULL, 0 } +#define errorlib_system_null { 0, NULL, NULL, NULL, 0 } struct error_system { - int max_sub; - const char *bad_sub; - const struct error_subsystem *subsystem; - const struct error_sparse_map *map_table; - int map_count; + int max_sub; + const char *bad_sub; + const struct error_subsystem *subsystem; + const struct error_sparse_map *map_table; + int map_count; }; -#define errorlib_sub_null { NULL, 0, NULL, NULL, 0 } +#define errorlib_sub_null { NULL, 0, NULL, NULL, 0 } -extern const struct error_system _mach_errors[err_max_system+1]; +extern const struct error_system _mach_errors[err_max_system + 1]; char *mach_error_string_int(mach_error_t, boolean_t *); -#define errlib_count(s) (sizeof(s)/sizeof(s[0])) +#define errlib_count(s) (sizeof(s)/sizeof(s[0])) diff --git a/libsyscall/mach/exc_catcher.c b/libsyscall/mach/exc_catcher.c index 9915eb2a0..a0d9f7165 100644 --- a/libsyscall/mach/exc_catcher.c +++ b/libsyscall/mach/exc_catcher.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -41,16 +41,16 @@ __private_extern__ kern_return_t internal_catch_exception_raise( - mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t exception, - exception_data_t code, - mach_msg_type_number_t codeCnt) + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt) { #if defined(__DYNAMIC__) static _libkernel_exc_raise_func_t exc_raise_func = (void*)-1; - + if (exc_raise_func == ((void*)-1)) { exc_raise_func = _dlsym(RTLD_DEFAULT, "catch_exception_raise"); } @@ -64,4 +64,3 @@ internal_catch_exception_raise( return catch_exception_raise(exception_port, thread, task, exception, code, codeCnt); #endif } - diff --git a/libsyscall/mach/exc_catcher.h b/libsyscall/mach/exc_catcher.h index a7db99753..ab1354bc1 100644 --- a/libsyscall/mach/exc_catcher.h +++ b/libsyscall/mach/exc_catcher.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,34 +31,34 @@ #include "_libkernel_init.h" -typedef kern_return_t (*_libkernel_exc_raise_func_t)(mach_port_t, - mach_port_t, - mach_port_t, - exception_type_t, - exception_data_t, - mach_msg_type_number_t); +typedef kern_return_t (*_libkernel_exc_raise_func_t)(mach_port_t, + mach_port_t, + mach_port_t, + exception_type_t, + exception_data_t, + mach_msg_type_number_t); -typedef kern_return_t (*_libkernel_exc_raise_state_func_t)(mach_port_t, - exception_type_t, - exception_data_t, - mach_msg_type_number_t, - int *, - thread_state_t, - mach_msg_type_number_t, - thread_state_t, - mach_msg_type_number_t *); +typedef kern_return_t (*_libkernel_exc_raise_state_func_t)(mach_port_t, + exception_type_t, + exception_data_t, + mach_msg_type_number_t, + int *, + thread_state_t, + mach_msg_type_number_t, + thread_state_t, + mach_msg_type_number_t *); -typedef kern_return_t (*_libkernel_exec_raise_state_identity_t)(mach_port_t, - mach_port_t, mach_port_t, - exception_type_t, - exception_data_t, - mach_msg_type_number_t, - int *, thread_state_t, - mach_msg_type_number_t, - thread_state_t, - mach_msg_type_number_t *); +typedef kern_return_t (*_libkernel_exec_raise_state_identity_t)(mach_port_t, + mach_port_t, mach_port_t, + exception_type_t, + exception_data_t, + mach_msg_type_number_t, + int *, thread_state_t, + mach_msg_type_number_t, + thread_state_t, + mach_msg_type_number_t *); -#define RTLD_DEFAULT ((void *) -2) +#define RTLD_DEFAULT ((void *) -2) extern void* (*_dlsym)(void*, const char*); #endif // __EXC_CATCHER_H diff --git a/libsyscall/mach/exc_catcher_state.c b/libsyscall/mach/exc_catcher_state.c index c6674d56c..deedf57d1 100644 --- a/libsyscall/mach/exc_catcher_state.c +++ b/libsyscall/mach/exc_catcher_state.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,19 +42,19 @@ __private_extern__ kern_return_t internal_catch_exception_raise_state( - mach_port_t exception_port, - exception_type_t exception, - exception_data_t code, - mach_msg_type_number_t codeCnt, - int *flavor, - thread_state_t old_state, - mach_msg_type_number_t old_stateCnt, - thread_state_t new_state, - mach_msg_type_number_t *new_stateCnt) + mach_port_t exception_port, + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt, + int *flavor, + thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t *new_stateCnt) { #if defined(__DYNAMIC__) static _libkernel_exc_raise_state_func_t exc_raise_state_func = (void*)-1; - + if (exc_raise_state_func == ((void*)-1)) { exc_raise_state_func = _dlsym(RTLD_DEFAULT, "catch_exception_raise_state"); } @@ -68,4 +68,3 @@ internal_catch_exception_raise_state( return catch_exception_raise_state(exception_port, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt); #endif } - diff --git a/libsyscall/mach/exc_catcher_state_identity.c b/libsyscall/mach/exc_catcher_state_identity.c index b92f5892e..1eac28e6c 100644 --- a/libsyscall/mach/exc_catcher_state_identity.c +++ b/libsyscall/mach/exc_catcher_state_identity.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,24 +42,24 @@ __private_extern__ kern_return_t internal_catch_exception_raise_state_identity( - mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t exception, - exception_data_t code, - mach_msg_type_number_t codeCnt, - int *flavor, - thread_state_t old_state, - mach_msg_type_number_t old_stateCnt, - thread_state_t new_state, - mach_msg_type_number_t *new_stateCnt) + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt, + int *flavor, + thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t *new_stateCnt) { #if defined(__DYNAMIC__) static _libkernel_exec_raise_state_identity_t exc_raise_state_identity_func = (void*)-1; - + if (exc_raise_state_identity_func == ((void*)-1)) { exc_raise_state_identity_func = _dlsym(RTLD_DEFAULT, "catch_exception_raise_state_identity"); - } + } if (exc_raise_state_identity_func == 0) { /* The user hasn't defined catch_exception_raise in their binary */ abort(); @@ -70,4 +70,3 @@ internal_catch_exception_raise_state_identity( return catch_exception_raise_state_identity(exception_port, thread, task, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt); #endif } - diff --git a/libsyscall/mach/externs.h b/libsyscall/mach/externs.h index 765140455..398f64585 100644 --- a/libsyscall/mach/externs.h +++ b/libsyscall/mach/externs.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/libsyscall/mach/fprintf_stderr.c b/libsyscall/mach/fprintf_stderr.c index 4d92bfc1c..53f7b4cc3 100644 --- a/libsyscall/mach/fprintf_stderr.c +++ b/libsyscall/mach/fprintf_stderr.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ - * + * */ #include @@ -37,7 +37,7 @@ int (*vprintf_stderr_func)(const char *format, va_list ap); -#define __STDERR_FILENO 2 +#define __STDERR_FILENO 2 int write(int fd, const char* cbuf, int nbyte); /* This function allows the writing of a mach error message to an diff --git a/libsyscall/mach/host.c b/libsyscall/mach/host.c index 651d148d8..2aa1c8423 100644 --- a/libsyscall/mach/host.c +++ b/libsyscall/mach/host.c @@ -35,7 +35,7 @@ kern_return_t host_get_atm_diagnostic_flag(host_t host __unused, - uint32_t *diagnostic_flag) + uint32_t *diagnostic_flag) { volatile uint32_t *diagnostic_flag_address = (volatile uint32_t *)(uintptr_t)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG); *diagnostic_flag = *diagnostic_flag_address; @@ -44,7 +44,7 @@ host_get_atm_diagnostic_flag(host_t host __unused, kern_return_t host_get_multiuser_config_flags(host_t host __unused, - uint32_t *multiuser_flags) + uint32_t *multiuser_flags) { #if TARGET_OS_EMBEDDED volatile uint32_t *multiuser_flag_address = (volatile uint32_t *)(uintptr_t)(_COMM_PAGE_MULTIUSER_CONFIG); @@ -58,15 +58,16 @@ host_get_multiuser_config_flags(host_t host __unused, kern_return_t host_check_multiuser_mode(host_t host __unused, - uint32_t *multiuser_mode) + uint32_t *multiuser_mode) { #if TARGET_OS_EMBEDDED uint32_t multiuser_flags; kern_return_t kr; kr = host_get_multiuser_config_flags(host, &multiuser_flags); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } *multiuser_mode = (multiuser_flags & kIsMultiUserDevice) == kIsMultiUserDevice; return KERN_SUCCESS; #else @@ -77,15 +78,15 @@ host_check_multiuser_mode(host_t host __unused, extern kern_return_t _kernelrpc_host_create_mach_voucher(mach_port_name_t host, - mach_voucher_attr_raw_recipe_array_t recipes, - mach_voucher_attr_recipe_size_t recipesCnt, - mach_port_name_t *voucher); + mach_voucher_attr_raw_recipe_array_t recipes, + mach_voucher_attr_recipe_size_t recipesCnt, + mach_port_name_t *voucher); kern_return_t host_create_mach_voucher(mach_port_name_t host, - mach_voucher_attr_raw_recipe_array_t recipes, - mach_voucher_attr_recipe_size_t recipesCnt, - mach_port_name_t *voucher) + mach_voucher_attr_raw_recipe_array_t recipes, + mach_voucher_attr_recipe_size_t recipesCnt, + mach_port_name_t *voucher) { kern_return_t rv; @@ -93,16 +94,19 @@ host_create_mach_voucher(mach_port_name_t host, #ifdef __x86_64__ /* REMOVE once XBS kernel has new trap */ - if (rv == ((1 << 24) | 70)) /* see mach/i386/syscall_sw.h */ + if (rv == ((1 << 24) | 70)) { /* see mach/i386/syscall_sw.h */ rv = MACH_SEND_INVALID_DEST; + } #elif defined(__i386__) /* REMOVE once XBS kernel has new trap */ - if (rv == (kern_return_t)(-70)) + if (rv == (kern_return_t)(-70)) { rv = MACH_SEND_INVALID_DEST; + } #endif - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_host_create_mach_voucher(host, recipes, recipesCnt, voucher); + } return rv; } diff --git a/libsyscall/mach/mach/errorlib.h b/libsyscall/mach/mach/errorlib.h index a5b6daf32..88bdc138f 100644 --- a/libsyscall/mach/mach/errorlib.h +++ b/libsyscall/mach/mach/errorlib.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ @@ -65,37 +65,37 @@ #include -#define MACH_IPC_SEND_MOD (err_mach_ipc|err_sub(0)) -#define MACH_IPC_RCV_MOD (err_mach_ipc|err_sub(1)) -#define MACH_IPC_MIG_MOD (err_mach_ipc|err_sub(2)) +#define MACH_IPC_SEND_MOD (err_mach_ipc|err_sub(0)) +#define MACH_IPC_RCV_MOD (err_mach_ipc|err_sub(1)) +#define MACH_IPC_MIG_MOD (err_mach_ipc|err_sub(2)) -#define IPC_SEND_MOD (err_ipc|err_sub(0)) -#define IPC_RCV_MOD (err_ipc|err_sub(1)) -#define IPC_MIG_MOD (err_ipc|err_sub(2)) +#define IPC_SEND_MOD (err_ipc|err_sub(0)) +#define IPC_RCV_MOD (err_ipc|err_sub(1)) +#define IPC_MIG_MOD (err_ipc|err_sub(2)) -#define SERV_NETNAME_MOD (err_server|err_sub(0)) -#define SERV_ENV_MOD (err_server|err_sub(1)) -#define SERV_EXECD_MOD (err_server|err_sub(2)) +#define SERV_NETNAME_MOD (err_server|err_sub(0)) +#define SERV_ENV_MOD (err_server|err_sub(1)) +#define SERV_EXECD_MOD (err_server|err_sub(2)) -#define NO_SUCH_ERROR "unknown error code" +#define NO_SUCH_ERROR "unknown error code" struct error_subsystem { - const char *subsys_name; - int max_code; - const char * const *codes; + const char *subsys_name; + int max_code; + const char * const *codes; }; struct error_system { - int max_sub; - const char *bad_sub; - const struct error_subsystem *subsystem; + int max_sub; + const char *bad_sub; + const struct error_subsystem *subsystem; }; #include __BEGIN_DECLS -extern const struct error_system errors[err_max_system+1]; +extern const struct error_system errors[err_max_system + 1]; __END_DECLS -#define errlib_count(s) (sizeof(s)/sizeof(s[0])) +#define errlib_count(s) (sizeof(s)/sizeof(s[0])) diff --git a/libsyscall/mach/mach/mach.h b/libsyscall/mach/mach/mach.h index fbe13755a..9ebf5c8c7 100644 --- a/libsyscall/mach/mach/mach.h +++ b/libsyscall/mach/mach/mach.h @@ -2,7 +2,7 @@ * Copyright (c) 1999-2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* +/* * Includes all the types that a normal user * of Mach programs should need */ -#ifndef _MACH_H_ -#define _MACH_H_ +#ifndef _MACH_H_ +#define _MACH_H_ #define __MACH30__ #define MACH_IPC_FLAVOR UNTYPED @@ -70,7 +70,7 @@ #include #include -#include /* for compatibility only */ +#include /* for compatibility only */ #include #include @@ -82,66 +82,66 @@ __BEGIN_DECLS /* * Standard prototypes */ -extern void panic_init(mach_port_t); -extern void panic(const char *, ...); +extern void panic_init(mach_port_t); +extern void panic(const char *, ...); -extern void safe_gets(char *, - char *, - int); +extern void safe_gets(char *, + char *, + int); -extern void slot_name(cpu_type_t, - cpu_subtype_t, - char **, - char **); +extern void slot_name(cpu_type_t, + cpu_subtype_t, + char **, + char **); -extern void mig_reply_setup(mach_msg_header_t *, - mach_msg_header_t *); +extern void mig_reply_setup(mach_msg_header_t *, + mach_msg_header_t *); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern void mach_msg_destroy(mach_msg_header_t *); +extern void mach_msg_destroy(mach_msg_header_t *); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg_receive(mach_msg_header_t *); +extern mach_msg_return_t mach_msg_receive(mach_msg_header_t *); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg_send(mach_msg_header_t *); +extern mach_msg_return_t mach_msg_send(mach_msg_header_t *); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg_server_once(boolean_t (*) - (mach_msg_header_t *, - mach_msg_header_t *), - mach_msg_size_t, - mach_port_t, - mach_msg_options_t); +extern mach_msg_return_t mach_msg_server_once(boolean_t (*) + (mach_msg_header_t *, + mach_msg_header_t *), + mach_msg_size_t, + mach_port_t, + mach_msg_options_t); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg_server(boolean_t (*) - (mach_msg_header_t *, - mach_msg_header_t *), - mach_msg_size_t, - mach_port_t, - mach_msg_options_t); +extern mach_msg_return_t mach_msg_server(boolean_t (*) + (mach_msg_header_t *, + mach_msg_header_t *), + mach_msg_size_t, + mach_port_t, + mach_msg_options_t); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg_server_importance(boolean_t (*) - (mach_msg_header_t *, - mach_msg_header_t *), - mach_msg_size_t, - mach_port_t, - mach_msg_options_t); +extern mach_msg_return_t mach_msg_server_importance(boolean_t (*) + (mach_msg_header_t *, + mach_msg_header_t *), + mach_msg_size_t, + mach_port_t, + mach_msg_options_t); /* * Prototypes for compatibility */ -extern kern_return_t clock_get_res(mach_port_t, - clock_res_t *); -extern kern_return_t clock_set_res(mach_port_t, - clock_res_t); +extern kern_return_t clock_get_res(mach_port_t, + clock_res_t *); +extern kern_return_t clock_set_res(mach_port_t, + clock_res_t); -extern kern_return_t clock_sleep(mach_port_t, - int, - mach_timespec_t, - mach_timespec_t *); +extern kern_return_t clock_sleep(mach_port_t, + int, + mach_timespec_t, + mach_timespec_t *); /*! * @group voucher_mach_msg Prototypes @@ -242,4 +242,4 @@ extern void voucher_mach_msg_revert(voucher_mach_msg_state_t state); __END_DECLS -#endif /* _MACH_H_ */ +#endif /* _MACH_H_ */ diff --git a/libsyscall/mach/mach/mach_error.h b/libsyscall/mach/mach/mach_error.h index 5840bd575..538b5cb1b 100644 --- a/libsyscall/mach/mach/mach_error.h +++ b/libsyscall/mach/mach/mach_error.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,31 +22,31 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ @@ -59,35 +59,35 @@ * Definitions of routines in mach_error.c */ -#ifndef _MACH_ERROR_ -#define _MACH_ERROR_ 1 +#ifndef _MACH_ERROR_ +#define _MACH_ERROR_ 1 #include #include __BEGIN_DECLS -char *mach_error_string( +char *mach_error_string( /* * Returns a string appropriate to the error argument given */ mach_error_t error_value - ); + ); -void mach_error( +void mach_error( /* * Prints an appropriate message on the standard error stream */ - const char *str, - mach_error_t error_value - ); + const char *str, + mach_error_t error_value + ); -char *mach_error_type( +char *mach_error_type( /* * Returns a string with the error system, subsystem and code */ - mach_error_t error_value - ); + mach_error_t error_value + ); __END_DECLS -#endif /* _MACH_ERROR_ */ +#endif /* _MACH_ERROR_ */ diff --git a/libsyscall/mach/mach/mach_init.h b/libsyscall/mach/mach/mach_init.h index 85e8319ab..4d9d51f46 100644 --- a/libsyscall/mach/mach/mach_init.h +++ b/libsyscall/mach/mach/mach_init.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,31 +22,31 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987,1986 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -55,8 +55,8 @@ * Items provided by the Mach environment initialization. */ -#ifndef _MACH_INIT_ -#define _MACH_INIT_ 1 +#ifndef _MACH_INIT_ +#define _MACH_INIT_ 1 #include #include @@ -73,9 +73,9 @@ extern mach_port_t mach_host_self(void); extern mach_port_t mach_thread_self(void); extern kern_return_t host_page_size(host_t, vm_size_t *); -extern mach_port_t mach_task_self_; -#define mach_task_self() mach_task_self_ -#define current_task() mach_task_self() +extern mach_port_t mach_task_self_; +#define mach_task_self() mach_task_self_ +#define current_task() mach_task_self() __END_DECLS #include @@ -85,18 +85,18 @@ __BEGIN_DECLS * Other important ports in the Mach user environment */ -extern mach_port_t bootstrap_port; +extern mach_port_t bootstrap_port; /* * Where these ports occur in the "mach_ports_register" * collection... only servers or the runtime library need know. */ -#define NAME_SERVER_SLOT 0 -#define ENVIRONMENT_SLOT 1 -#define SERVICE_SLOT 2 +#define NAME_SERVER_SLOT 0 +#define ENVIRONMENT_SLOT 1 +#define SERVICE_SLOT 2 -#define MACH_PORTS_SLOTS_USED 3 +#define MACH_PORTS_SLOTS_USED 3 /* * fprintf_stderr uses vprintf_stderr_func to produce @@ -107,4 +107,4 @@ extern int (*vprintf_stderr_func)(const char *format, va_list ap); __END_DECLS -#endif /* _MACH_INIT_ */ +#endif /* _MACH_INIT_ */ diff --git a/libsyscall/mach/mach/mach_interface.h b/libsyscall/mach/mach/mach_interface.h index b0f7a01c9..e6c6b7acf 100644 --- a/libsyscall/mach/mach/mach_interface.h +++ b/libsyscall/mach/mach/mach_interface.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,7 +36,7 @@ * wraps up all the new interface headers generated from * each of the new .defs resulting from that decomposition. */ -#ifndef _MACH_INTERFACE_H_ +#ifndef _MACH_INTERFACE_H_ #define _MACH_INTERFACE_H_ #include diff --git a/libsyscall/mach/mach/mach_right_private.h b/libsyscall/mach/mach/mach_right_private.h index c8d9a6195..eb2277761 100644 --- a/libsyscall/mach/mach/mach_right_private.h +++ b/libsyscall/mach/mach/mach_right_private.h @@ -55,7 +55,7 @@ typedef struct _mach_right_recv { #define MACH_RIGHT_RECV_NULL (mach_right_recv_t{MACH_PORT_NULL}) #elif defined(__cplusplus) #define MACH_RIGHT_RECV_NULL \ - (mach_right_recv_t((mach_right_recv_t){MACH_PORT_NULL})) + (mach_right_recv_t((mach_right_recv_t){MACH_PORT_NULL})) #else #define MACH_RIGHT_RECV_NULL {MACH_PORT_NULL} #endif @@ -78,7 +78,7 @@ typedef struct _mach_right_send { #define MACH_RIGHT_SEND_NULL (mach_right_send_t{MACH_PORT_NULL}) #elif defined(__cplusplus) #define MACH_RIGHT_SEND_NULL \ - (mach_right_send_t((mach_right_send_t){MACH_PORT_NULL})) + (mach_right_send_t((mach_right_send_t){MACH_PORT_NULL})) #else #define MACH_RIGHT_SEND_NULL {MACH_PORT_NULL} #endif @@ -101,7 +101,7 @@ typedef struct _mach_right_send_once { #define MACH_RIGHT_SEND_ONCE_NULL (mach_right_send_once_t{MACH_PORT_NULL}) #elif defined(__cplusplus) #define MACH_RIGHT_SEND_ONCE_NULL \ - (mach_right_send_once_t((mach_right_send_once_t){MACH_PORT_NULL})) + (mach_right_send_once_t((mach_right_send_once_t){MACH_PORT_NULL})) #else #define MACH_RIGHT_SEND_ONCE_NULL {MACH_PORT_NULL} #endif @@ -209,9 +209,9 @@ mach_right_send_once_valid(mach_right_send_once_t mrso) * destruction. */ OS_ENUM(mach_right_flags, uint64_t, - MACH_RIGHT_RECV_FLAG_INIT = 0, - MACH_RIGHT_RECV_FLAG_UNGUARDED = (1 << 0), -); + MACH_RIGHT_RECV_FLAG_INIT = 0, + MACH_RIGHT_RECV_FLAG_UNGUARDED = (1 << 0), + ); /*! * @function mach_right_recv_construct @@ -244,7 +244,7 @@ OS_ENUM(mach_right_flags, uint64_t, OS_EXPORT OS_WARN_RESULT mach_right_recv_t mach_right_recv_construct(mach_right_flags_t flags, - mach_right_send_t *_Nullable sr, uintptr_t ctx); + mach_right_send_t *_Nullable sr, uintptr_t ctx); /*! * @function mach_right_recv_destruct @@ -280,7 +280,7 @@ mach_right_recv_construct(mach_right_flags_t flags, OS_EXPORT void mach_right_recv_destruct(mach_right_recv_t r, mach_right_send_t *_Nullable s, - uintptr_t ctx); + uintptr_t ctx); /*! * @function mach_right_send_create diff --git a/libsyscall/mach/mach/mach_sync_ipc.h b/libsyscall/mach/mach/mach_sync_ipc.h index 032e7acb1..972de51fa 100644 --- a/libsyscall/mach/mach/mach_sync_ipc.h +++ b/libsyscall/mach/mach/mach_sync_ipc.h @@ -2,7 +2,7 @@ * Copyright (c) 2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,37 +22,37 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifndef _MACH_SYNC_IPC_H_ -#define _MACH_SYNC_IPC_H_ +#ifndef _MACH_SYNC_IPC_H_ +#define _MACH_SYNC_IPC_H_ #include @@ -130,4 +130,4 @@ extern void mig_dealloc_special_reply_port(mach_port_t migport); __END_DECLS -#endif /* _MACH_SYNC_IPC_H_ */ +#endif /* _MACH_SYNC_IPC_H_ */ diff --git a/libsyscall/mach/mach/port_obj.h b/libsyscall/mach/mach/port_obj.h index d2b5f89a8..99165c745 100644 --- a/libsyscall/mach/mach/port_obj.h +++ b/libsyscall/mach/mach/port_obj.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -56,48 +56,48 @@ extern int port_obj_table_size; #ifndef PORT_OBJ_ASSERT -#define port_set_obj_value_type(pname, value, type) \ -do { \ - int ndx; \ - \ - if (!port_obj_table) \ - port_obj_init(port_obj_table_size); \ - ndx = MACH_PORT_INDEX(pname); \ - port_obj_table[ndx].pos_value = (value); \ - port_obj_table[ndx].pos_type = (type); \ +#define port_set_obj_value_type(pname, value, type) \ +do { \ + int ndx; \ + \ + if (!port_obj_table) \ + port_obj_init(port_obj_table_size); \ + ndx = MACH_PORT_INDEX(pname); \ + port_obj_table[ndx].pos_value = (value); \ + port_obj_table[ndx].pos_type = (type); \ } while (0) -#define port_get_obj_value(pname) \ +#define port_get_obj_value(pname) \ (port_obj_table[MACH_PORT_INDEX(pname)].pos_value) -#define port_get_obj_type(pname) \ +#define port_get_obj_type(pname) \ (port_obj_table[MACH_PORT_INDEX(pname)].pos_type) -#else /* PORT_OBJ_ASSERT */ - -#define port_set_obj_value_type(pname, value, type) \ -do { \ - int ndx; \ - \ - if (!port_obj_table) \ - port_obj_init(port_obj_table_size); \ - ndx = MACH_PORT_INDEX(pname); \ - assert(ndx > 0); \ - assert(ndx < port_obj_table_size); \ - port_obj_table[ndx].pos_value = (value); \ - port_obj_table[ndx].pos_type = (type); \ +#else /* PORT_OBJ_ASSERT */ + +#define port_set_obj_value_type(pname, value, type) \ +do { \ + int ndx; \ + \ + if (!port_obj_table) \ + port_obj_init(port_obj_table_size); \ + ndx = MACH_PORT_INDEX(pname); \ + assert(ndx > 0); \ + assert(ndx < port_obj_table_size); \ + port_obj_table[ndx].pos_value = (value); \ + port_obj_table[ndx].pos_type = (type); \ } while (0) -#define port_get_obj_value(pname) \ - ((MACH_PORT_INDEX(pname) < (unsigned)port_obj_table_size) ? \ - port_obj_table[MACH_PORT_INDEX(pname)].pos_value : \ +#define port_get_obj_value(pname) \ + ((MACH_PORT_INDEX(pname) < (unsigned)port_obj_table_size) ? \ + port_obj_table[MACH_PORT_INDEX(pname)].pos_value : \ (panic("port_get_obj_value: index too big"), (void *)-1)) -#define port_get_obj_type(pname) \ - ((MACH_PORT_INDEX(pname) < (unsigned)port_obj_table_size) ? \ - port_obj_table[MACH_PORT_INDEX(pname)].pos_type : \ +#define port_get_obj_type(pname) \ + ((MACH_PORT_INDEX(pname) < (unsigned)port_obj_table_size) ? \ + port_obj_table[MACH_PORT_INDEX(pname)].pos_type : \ (panic("port_get_obj_type: index too big"), -1)) -#endif /* PORT_OBJ_ASSERT */ +#endif /* PORT_OBJ_ASSERT */ -#endif /* PORT_OBJ_H */ +#endif /* PORT_OBJ_H */ diff --git a/libsyscall/mach/mach/sync.h b/libsyscall/mach/mach/sync.h index 0a567c244..b3057205a 100644 --- a/libsyscall/mach/mach/sync.h +++ b/libsyscall/mach/mach/sync.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/libsyscall/mach/mach/thread_state.h b/libsyscall/mach/mach/thread_state.h index 67afb6835..77b492e10 100644 --- a/libsyscall/mach/mach/thread_state.h +++ b/libsyscall/mach/mach/thread_state.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_THREAD_STATE_H_ +#ifndef _MACH_THREAD_STATE_H_ #define _MACH_THREAD_STATE_H_ #include @@ -57,7 +57,7 @@ */ __API_AVAILABLE(macosx(10.14), ios(12.0), tvos(9.0), watchos(5.0)) kern_return_t thread_get_register_pointer_values(thread_t thread, - uintptr_t *sp, size_t *length, uintptr_t *values); + uintptr_t *sp, size_t *length, uintptr_t *values); #endif #endif /* _MACH_THREAD_STATE_H_ */ diff --git a/libsyscall/mach/mach/vm_page_size.h b/libsyscall/mach/mach/vm_page_size.h index fd1a92c73..26d7a7303 100644 --- a/libsyscall/mach/mach/vm_page_size.h +++ b/libsyscall/mach/mach/vm_page_size.h @@ -35,19 +35,19 @@ __BEGIN_DECLS - /* - * Globally interesting numbers. - * These macros assume vm_page_size is a power-of-2. - */ -extern vm_size_t vm_page_size; -extern vm_size_t vm_page_mask; -extern int vm_page_shift; +/* + * Globally interesting numbers. + * These macros assume vm_page_size is a power-of-2. + */ +extern vm_size_t vm_page_size; +extern vm_size_t vm_page_mask; +extern int vm_page_shift; /* * These macros assume vm_page_size is a power-of-2. */ -#define trunc_page(x) ((x) & (~(vm_page_size - 1))) -#define round_page(x) trunc_page((x) + (vm_page_size - 1)) +#define trunc_page(x) ((x) & (~(vm_page_size - 1))) +#define round_page(x) trunc_page((x) + (vm_page_size - 1)) /* * Page-size rounding macros for the fixed-width VM types. @@ -56,9 +56,9 @@ extern int vm_page_shift; #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + vm_page_mask) & ~((signed)vm_page_mask)) -extern vm_size_t vm_kernel_page_size __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); -extern vm_size_t vm_kernel_page_mask __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); -extern int vm_kernel_page_shift __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); +extern vm_size_t vm_kernel_page_size __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); +extern vm_size_t vm_kernel_page_mask __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); +extern int vm_kernel_page_shift __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); #define trunc_page_kernel(x) ((x) & (~vm_kernel_page_mask)) #define round_page_kernel(x) trunc_page_kernel((x) + vm_kernel_page_mask) diff --git a/libsyscall/mach/mach/vm_task.h b/libsyscall/mach/mach/vm_task.h index d2401ace0..3040cfc12 100644 --- a/libsyscall/mach/mach/vm_task.h +++ b/libsyscall/mach/mach/vm_task.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* ** This file contains compatibilty wrapper header for things that are -** generated from mach/vm_map.defs into mach/vm_map.h. -** +** generated from mach/vm_map.defs into mach/vm_map.h. +** ** This file will go away eventually - please switch. */ #include diff --git a/libsyscall/mach/mach_error.c b/libsyscall/mach/mach_error.c index 4b9542726..03efb272e 100644 --- a/libsyscall/mach/mach_error.c +++ b/libsyscall/mach/mach_error.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,37 +22,37 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* - * File: mach_error.c + * File: mach_error.c * Author: Douglas Orr, Carnegie Mellon University * Date: Mar 1988 * diff --git a/libsyscall/mach/mach_error_string.c b/libsyscall/mach/mach_error_string.c index 82dc4da99..cfa94a6b4 100644 --- a/libsyscall/mach/mach_error_string.c +++ b/libsyscall/mach/mach_error_string.c @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -68,25 +68,26 @@ static void do_compat(mach_error_t *); static void do_compat(mach_error_t *org_err) { - mach_error_t err = *org_err; + mach_error_t err = *org_err; - /* - * map old error numbers to - * to new error sys & subsystem + /* + * map old error numbers to + * to new error sys & subsystem */ - if ((-200 < err) && (err <= -100)) + if ((-200 < err) && (err <= -100)) { err = -(err + 100) | IPC_SEND_MOD; - else if ((-300 < err) && (err <= -200)) + } else if ((-300 < err) && (err <= -200)) { err = -(err + 200) | IPC_RCV_MOD; - else if ((-400 < err) && (err <= -300)) + } else if ((-400 < err) && (err <= -300)) { err = -(err + 300) | MACH_IPC_MIG_MOD; - else if ((1000 <= err) && (err < 1100)) + } else if ((1000 <= err) && (err < 1100)) { err = (err - 1000) | SERV_NETNAME_MOD; - else if ((1600 <= err) && (err < 1700)) + } else if ((1600 <= err) && (err < 1700)) { err = (err - 1600) | SERV_ENV_MOD; - else if ((27600 <= err) && (err < 27700)) + } else if ((27600 <= err) && (err < 27700)) { err = (err - 27600) | SERV_EXECD_MOD; + } *org_err = err; } @@ -123,11 +124,13 @@ mach_error_type(mach_error_t err) sys_p = &_mach_errors[system]; sub = err_get_sub(err); - if (system <= err_max_system && sys_p->map_table) + if (system <= err_max_system && sys_p->map_table) { sub = err_sparse_mapit(sub, sys_p->map_table, sys_p->map_count); + } - if (system > err_max_system || sub >= sys_p->max_sub) - return((char *)"(?/?)"); + if (system > err_max_system || sub >= sys_p->max_sub) { + return (char *)"(?/?)"; + } return (char *) (sys_p->subsystem[sub].subsys_name); } @@ -149,22 +152,26 @@ mach_error_string_int(mach_error_t err, boolean_t *diag) *diag = TRUE; - if (system > err_max_system) - return((char *)"(?/?) unknown error system"); - else if (sys_p->map_table) + if (system > err_max_system) { + return (char *)"(?/?) unknown error system"; + } else if (sys_p->map_table) { sub = err_sparse_mapit(sub, sys_p->map_table, sys_p->map_count); + } - if (sub >= sys_p->max_sub) - return((char *)sys_p->bad_sub); + if (sub >= sys_p->max_sub) { + return (char *)sys_p->bad_sub; + } sub_p = &sys_p->subsystem[sub]; - if (sub_p->map_table) + if (sub_p->map_table) { code = err_sparse_mapit(code, sub_p->map_table, sub_p->map_count); - if (code >= sub_p->max_code) - return ((char *)NO_SUCH_ERROR); + } + if (code >= sub_p->max_code) { + return (char *)NO_SUCH_ERROR; + } *diag = mach_error_full_diag; - return( (char *)sub_p->codes[code] ); + return (char *)sub_p->codes[code]; } char * diff --git a/libsyscall/mach/mach_init.c b/libsyscall/mach/mach_init.c index 338f7c95b..0c832bfbc 100644 --- a/libsyscall/mach/mach_init.c +++ b/libsyscall/mach/mach_init.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,31 +22,31 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -92,7 +92,7 @@ host_page_size(__unused host_t host, vm_size_t *out_page_size) return KERN_SUCCESS; } -/* +/* * mach_init() must be called explicitly in static executables (including dyld). * called by libSystem_initializer() in dynamic executables */ @@ -130,7 +130,7 @@ mach_init_doit(void) _task_reply_port = mach_reply_port(); if (vm_kernel_page_shift == 0) { -#ifdef _COMM_PAGE_KERNEL_PAGE_SHIFT +#ifdef _COMM_PAGE_KERNEL_PAGE_SHIFT vm_kernel_page_shift = *(uint8_t*) _COMM_PAGE_KERNEL_PAGE_SHIFT; vm_kernel_page_size = 1 << vm_kernel_page_shift; vm_kernel_page_mask = vm_kernel_page_size - 1; @@ -140,7 +140,7 @@ mach_init_doit(void) vm_kernel_page_shift = PAGE_SHIFT; #endif /* _COMM_PAGE_KERNEL_PAGE_SHIFT */ } - + if (vm_page_shift == 0) { #if defined(__arm64__) vm_page_shift = *(uint8_t*) _COMM_PAGE_USER_PAGE_SHIFT_64; diff --git a/libsyscall/mach/mach_legacy.c b/libsyscall/mach/mach_legacy.c index f425d78f8..3593a7f1b 100644 --- a/libsyscall/mach/mach_legacy.c +++ b/libsyscall/mach/mach_legacy.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/mach/mach_msg.c b/libsyscall/mach/mach_msg.c index 4b90d19e5..88c2583b7 100644 --- a/libsyscall/mach/mach_msg.c +++ b/libsyscall/mach/mach_msg.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,31 +22,31 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -66,7 +66,7 @@ extern int proc_importance_assertion_complete(uint64_t assertion_handle); #define MACH_MSG_TRAP(msg, opt, ssize, rsize, rname, to, not) \ mach_msg_trap((msg), (opt), (ssize), (rsize), (rname), (to), (not)) -#define LIBMACH_OPTIONS (MACH_SEND_INTERRUPT|MACH_RCV_INTERRUPT) +#define LIBMACH_OPTIONS (MACH_SEND_INTERRUPT|MACH_RCV_INTERRUPT) /* * Routine: mach_msg @@ -74,17 +74,17 @@ extern int proc_importance_assertion_complete(uint64_t assertion_handle); * Send and/or receive a message. If the message operation * is interrupted, and the user did not request an indication * of that fact, then restart the appropriate parts of the - * operation. + * operation. */ mach_msg_return_t mach_msg(msg, option, send_size, rcv_size, rcv_name, timeout, notify) - mach_msg_header_t *msg; - mach_msg_option_t option; - mach_msg_size_t send_size; - mach_msg_size_t rcv_size; - mach_port_t rcv_name; - mach_msg_timeout_t timeout; - mach_port_t notify; +mach_msg_header_t *msg; +mach_msg_option_t option; +mach_msg_size_t send_size; +mach_msg_size_t rcv_size; +mach_port_t rcv_name; +mach_msg_timeout_t timeout; +mach_port_t notify; { mach_msg_return_t mr; @@ -100,25 +100,30 @@ mach_msg(msg, option, send_size, rcv_size, rcv_name, timeout, notify) * the kernel's fast paths (when it checks the option value). */ - mr = MACH_MSG_TRAP(msg, option &~ LIBMACH_OPTIONS, - send_size, rcv_size, rcv_name, - timeout, notify); - if (mr == MACH_MSG_SUCCESS) + mr = MACH_MSG_TRAP(msg, option & ~LIBMACH_OPTIONS, + send_size, rcv_size, rcv_name, + timeout, notify); + if (mr == MACH_MSG_SUCCESS) { return MACH_MSG_SUCCESS; + } - if ((option & MACH_SEND_INTERRUPT) == 0) - while (mr == MACH_SEND_INTERRUPTED) + if ((option & MACH_SEND_INTERRUPT) == 0) { + while (mr == MACH_SEND_INTERRUPTED) { mr = MACH_MSG_TRAP(msg, - option &~ LIBMACH_OPTIONS, - send_size, rcv_size, rcv_name, - timeout, notify); + option & ~LIBMACH_OPTIONS, + send_size, rcv_size, rcv_name, + timeout, notify); + } + } - if ((option & MACH_RCV_INTERRUPT) == 0) - while (mr == MACH_RCV_INTERRUPTED) + if ((option & MACH_RCV_INTERRUPT) == 0) { + while (mr == MACH_RCV_INTERRUPTED) { mr = MACH_MSG_TRAP(msg, - option &~ (LIBMACH_OPTIONS|MACH_SEND_MSG), - 0, rcv_size, rcv_name, - timeout, notify); + option & ~(LIBMACH_OPTIONS | MACH_SEND_MSG), + 0, rcv_size, rcv_name, + timeout, notify); + } + } return mr; } @@ -129,7 +134,7 @@ mach_msg(msg, option, send_size, rcv_size, rcv_name, timeout, notify) * Send and/or receive a message. If the message operation * is interrupted, and the user did not request an indication * of that fact, then restart the appropriate parts of the - * operation. + * operation. * * Distinct send and receive buffers may be specified. If * no separate receive buffer is specified, the msg parameter @@ -140,17 +145,17 @@ mach_msg(msg, option, send_size, rcv_size, rcv_name, timeout, notify) * receiving of the message. */ mach_msg_return_t -mach_msg_overwrite(msg, option, send_size, rcv_limit, rcv_name, timeout, - notify, rcv_msg, rcv_scatter_size) - mach_msg_header_t *msg; - mach_msg_option_t option; - mach_msg_size_t send_size; - mach_msg_size_t rcv_limit; - mach_port_t rcv_name; - mach_msg_timeout_t timeout; - mach_port_t notify; - mach_msg_header_t *rcv_msg; - mach_msg_size_t rcv_scatter_size; +mach_msg_overwrite(msg, option, send_size, rcv_limit, rcv_name, timeout, + notify, rcv_msg, rcv_scatter_size) +mach_msg_header_t *msg; +mach_msg_option_t option; +mach_msg_size_t send_size; +mach_msg_size_t rcv_limit; +mach_port_t rcv_name; +mach_msg_timeout_t timeout; +mach_port_t notify; +mach_msg_header_t *rcv_msg; +mach_msg_size_t rcv_scatter_size; { mach_msg_return_t mr; @@ -166,25 +171,30 @@ mach_msg_overwrite(msg, option, send_size, rcv_limit, rcv_name, timeout, * the kernel's fast paths (when it checks the option value). */ - mr = mach_msg_overwrite_trap(msg, option &~ LIBMACH_OPTIONS, - send_size, rcv_limit, rcv_name, - timeout, notify, rcv_msg, rcv_scatter_size); - if (mr == MACH_MSG_SUCCESS) + mr = mach_msg_overwrite_trap(msg, option & ~LIBMACH_OPTIONS, + send_size, rcv_limit, rcv_name, + timeout, notify, rcv_msg, rcv_scatter_size); + if (mr == MACH_MSG_SUCCESS) { return MACH_MSG_SUCCESS; + } - if ((option & MACH_SEND_INTERRUPT) == 0) - while (mr == MACH_SEND_INTERRUPTED) + if ((option & MACH_SEND_INTERRUPT) == 0) { + while (mr == MACH_SEND_INTERRUPTED) { mr = mach_msg_overwrite_trap(msg, - option &~ LIBMACH_OPTIONS, - send_size, rcv_limit, rcv_name, - timeout, notify, rcv_msg, rcv_scatter_size); + option & ~LIBMACH_OPTIONS, + send_size, rcv_limit, rcv_name, + timeout, notify, rcv_msg, rcv_scatter_size); + } + } - if ((option & MACH_RCV_INTERRUPT) == 0) - while (mr == MACH_RCV_INTERRUPTED) + if ((option & MACH_RCV_INTERRUPT) == 0) { + while (mr == MACH_RCV_INTERRUPTED) { mr = mach_msg_overwrite_trap(msg, - option &~ (LIBMACH_OPTIONS|MACH_SEND_MSG), - 0, rcv_limit, rcv_name, - timeout, notify, rcv_msg, rcv_scatter_size); + option & ~(LIBMACH_OPTIONS | MACH_SEND_MSG), + 0, rcv_limit, rcv_name, + timeout, notify, rcv_msg, rcv_scatter_size); + } + } return mr; } @@ -194,57 +204,60 @@ mach_msg_return_t mach_msg_send(mach_msg_header_t *msg) { return mach_msg(msg, MACH_SEND_MSG, - msg->msgh_size, 0, MACH_PORT_NULL, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + msg->msgh_size, 0, MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); } mach_msg_return_t mach_msg_receive(mach_msg_header_t *msg) { return mach_msg(msg, MACH_RCV_MSG, - 0, msg->msgh_size, msg->msgh_local_port, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + 0, msg->msgh_size, msg->msgh_local_port, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); } static void mach_msg_destroy_port(mach_port_t port, mach_msg_type_name_t type) { - if (MACH_PORT_VALID(port)) switch (type) { - case MACH_MSG_TYPE_MOVE_SEND: - case MACH_MSG_TYPE_MOVE_SEND_ONCE: - /* destroy the send/send-once right */ - (void) mach_port_deallocate(mach_task_self_, port); - break; - - case MACH_MSG_TYPE_MOVE_RECEIVE: - /* destroy the receive right */ - (void) mach_port_mod_refs(mach_task_self_, port, - MACH_PORT_RIGHT_RECEIVE, -1); - break; - - case MACH_MSG_TYPE_MAKE_SEND: - /* create a send right and then destroy it */ - (void) mach_port_insert_right(mach_task_self_, port, - port, MACH_MSG_TYPE_MAKE_SEND); - (void) mach_port_deallocate(mach_task_self_, port); - break; - - case MACH_MSG_TYPE_MAKE_SEND_ONCE: - /* create a send-once right and then destroy it */ - (void) mach_port_extract_right(mach_task_self_, port, - MACH_MSG_TYPE_MAKE_SEND_ONCE, - &port, &type); - (void) mach_port_deallocate(mach_task_self_, port); - break; - } + if (MACH_PORT_VALID(port)) { + switch (type) { + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + /* destroy the send/send-once right */ + (void) mach_port_deallocate(mach_task_self_, port); + break; + + case MACH_MSG_TYPE_MOVE_RECEIVE: + /* destroy the receive right */ + (void) mach_port_mod_refs(mach_task_self_, port, + MACH_PORT_RIGHT_RECEIVE, -1); + break; + + case MACH_MSG_TYPE_MAKE_SEND: + /* create a send right and then destroy it */ + (void) mach_port_insert_right(mach_task_self_, port, + port, MACH_MSG_TYPE_MAKE_SEND); + (void) mach_port_deallocate(mach_task_self_, port); + break; + + case MACH_MSG_TYPE_MAKE_SEND_ONCE: + /* create a send-once right and then destroy it */ + (void) mach_port_extract_right(mach_task_self_, port, + MACH_MSG_TYPE_MAKE_SEND_ONCE, + &port, &type); + (void) mach_port_deallocate(mach_task_self_, port); + break; + } + } } static void mach_msg_destroy_memory(vm_offset_t addr, vm_size_t size) { - if (size != 0) - (void) vm_deallocate(mach_task_self_, addr, size); + if (size != 0) { + (void) vm_deallocate(mach_task_self_, addr, size); + } } @@ -267,94 +280,92 @@ mach_msg_destroy_memory(vm_offset_t addr, vm_size_t size) void mach_msg_destroy(mach_msg_header_t *msg) { - mach_msg_bits_t mbits = msg->msgh_bits; - - /* - * The msgh_local_port field doesn't hold a port right. - * The receive operation consumes the destination port right. - */ - - mach_msg_destroy_port(msg->msgh_remote_port, MACH_MSGH_BITS_REMOTE(mbits)); - mach_msg_destroy_port(msg->msgh_voucher_port, MACH_MSGH_BITS_VOUCHER(mbits)); - - if (mbits & MACH_MSGH_BITS_COMPLEX) { - mach_msg_base_t *base; - mach_msg_type_number_t count, i; - mach_msg_descriptor_t *daddr; - - base = (mach_msg_base_t *) msg; - count = base->body.msgh_descriptor_count; - - daddr = (mach_msg_descriptor_t *) (base + 1); - for (i = 0; i < count; i++) { - - switch (daddr->type.type) { - - case MACH_MSG_PORT_DESCRIPTOR: { - mach_msg_port_descriptor_t *dsc; - - /* - * Destroy port rights carried in the message - */ - dsc = &daddr->port; - mach_msg_destroy_port(dsc->name, dsc->disposition); - daddr = (mach_msg_descriptor_t *)(dsc + 1); - break; - } - - case MACH_MSG_OOL_DESCRIPTOR: { - mach_msg_ool_descriptor_t *dsc; - - /* - * Destroy memory carried in the message - */ - dsc = &daddr->out_of_line; - if (dsc->deallocate) { - mach_msg_destroy_memory((vm_offset_t)dsc->address, - dsc->size); - } - daddr = (mach_msg_descriptor_t *)(dsc + 1); - break; - } - - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: { - mach_msg_ool_descriptor_t *dsc; - - /* - * Just skip it. - */ - dsc = &daddr->out_of_line; - daddr = (mach_msg_descriptor_t *)(dsc + 1); - break; - } - - case MACH_MSG_OOL_PORTS_DESCRIPTOR: { - mach_port_t *ports; - mach_msg_ool_ports_descriptor_t *dsc; - mach_msg_type_number_t j; - - /* - * Destroy port rights carried in the message - */ - dsc = &daddr->ool_ports; - ports = (mach_port_t *) dsc->address; - for (j = 0; j < dsc->count; j++, ports++) { - mach_msg_destroy_port(*ports, dsc->disposition); - } - - /* - * Destroy memory carried in the message - */ - if (dsc->deallocate) { - mach_msg_destroy_memory((vm_offset_t)dsc->address, - dsc->count * sizeof(mach_port_t)); - } - daddr = (mach_msg_descriptor_t *)(dsc + 1); - break; - } - } + mach_msg_bits_t mbits = msg->msgh_bits; + + /* + * The msgh_local_port field doesn't hold a port right. + * The receive operation consumes the destination port right. + */ + + mach_msg_destroy_port(msg->msgh_remote_port, MACH_MSGH_BITS_REMOTE(mbits)); + mach_msg_destroy_port(msg->msgh_voucher_port, MACH_MSGH_BITS_VOUCHER(mbits)); + + if (mbits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_base_t *base; + mach_msg_type_number_t count, i; + mach_msg_descriptor_t *daddr; + + base = (mach_msg_base_t *) msg; + count = base->body.msgh_descriptor_count; + + daddr = (mach_msg_descriptor_t *) (base + 1); + for (i = 0; i < count; i++) { + switch (daddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc; + + /* + * Destroy port rights carried in the message + */ + dsc = &daddr->port; + mach_msg_destroy_port(dsc->name, dsc->disposition); + daddr = (mach_msg_descriptor_t *)(dsc + 1); + break; + } + + case MACH_MSG_OOL_DESCRIPTOR: { + mach_msg_ool_descriptor_t *dsc; + + /* + * Destroy memory carried in the message + */ + dsc = &daddr->out_of_line; + if (dsc->deallocate) { + mach_msg_destroy_memory((vm_offset_t)dsc->address, + dsc->size); + } + daddr = (mach_msg_descriptor_t *)(dsc + 1); + break; + } + + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: { + mach_msg_ool_descriptor_t *dsc; + + /* + * Just skip it. + */ + dsc = &daddr->out_of_line; + daddr = (mach_msg_descriptor_t *)(dsc + 1); + break; + } + + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + mach_port_t *ports; + mach_msg_ool_ports_descriptor_t *dsc; + mach_msg_type_number_t j; + + /* + * Destroy port rights carried in the message + */ + dsc = &daddr->ool_ports; + ports = (mach_port_t *) dsc->address; + for (j = 0; j < dsc->count; j++, ports++) { + mach_msg_destroy_port(*ports, dsc->disposition); + } + + /* + * Destroy memory carried in the message + */ + if (dsc->deallocate) { + mach_msg_destroy_memory((vm_offset_t)dsc->address, + dsc->count * sizeof(mach_port_t)); + } + daddr = (mach_msg_descriptor_t *)(dsc + 1); + break; + } + } + } } - } } /* @@ -363,8 +374,8 @@ mach_msg_destroy(mach_msg_header_t *msg) * A simple generic server function. It allows more flexibility * than mach_msg_server by processing only one message request * and then returning to the user. Note that more in the way - * of error codes are returned to the user; specifically, any - * failing error from mach_msg calls will be returned + * of error codes are returned to the user; specifically, any + * failing error from mach_msg calls will be returned * (though errors from the demux routine or the routine it * calls will not be). */ @@ -385,66 +396,68 @@ mach_msg_server_once( mach_port_t self = mach_task_self_; voucher_mach_msg_state_t old_state = VOUCHER_MACH_MSG_STATE_UNCHANGED; - options &= ~(MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_VOUCHER); + options &= ~(MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_VOUCHER); trailer_alloc = REQUESTED_TRAILER_SIZE(options); request_alloc = (mach_msg_size_t)round_page(max_size + trailer_alloc); request_size = (options & MACH_RCV_LARGE) ? - request_alloc : max_size + trailer_alloc; + request_alloc : max_size + trailer_alloc; - reply_alloc = (mach_msg_size_t)round_page((options & MACH_SEND_TRAILER) ? - (max_size + MAX_TRAILER_SIZE) : - max_size); + reply_alloc = (mach_msg_size_t)round_page((options & MACH_SEND_TRAILER) ? + (max_size + MAX_TRAILER_SIZE) : + max_size); kr = vm_allocate(self, - (vm_address_t *)&bufReply, - reply_alloc, - VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE); - if (kr != KERN_SUCCESS) - return kr; + (vm_address_t *)&bufReply, + reply_alloc, + VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE); + if (kr != KERN_SUCCESS) { + return kr; + } for (;;) { mach_msg_size_t new_request_alloc; kr = vm_allocate(self, - (vm_address_t *)&bufRequest, - request_alloc, - VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE); + (vm_address_t *)&bufRequest, + request_alloc, + VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE); if (kr != KERN_SUCCESS) { vm_deallocate(self, - (vm_address_t)bufReply, - reply_alloc); + (vm_address_t)bufReply, + reply_alloc); return kr; - } - - mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG|MACH_RCV_VOUCHER|options, - 0, request_size, rcv_name, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); - - if (!((mr == MACH_RCV_TOO_LARGE) && (options & MACH_RCV_LARGE))) + } + + mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG | MACH_RCV_VOUCHER | options, + 0, request_size, rcv_name, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + + if (!((mr == MACH_RCV_TOO_LARGE) && (options & MACH_RCV_LARGE))) { break; + } new_request_alloc = (mach_msg_size_t)round_page(bufRequest->Head.msgh_size + - trailer_alloc); + trailer_alloc); vm_deallocate(self, - (vm_address_t) bufRequest, - request_alloc); + (vm_address_t) bufRequest, + request_alloc); request_size = request_alloc = new_request_alloc; } if (mr == MACH_MSG_SUCCESS) { - /* we have a request message */ + /* we have a request message */ old_state = voucher_mach_msg_adopt(&bufRequest->Head); (void) (*demux)(&bufRequest->Head, &bufReply->Head); if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - if (bufReply->RetCode == MIG_NO_REPLY) + if (bufReply->RetCode == MIG_NO_REPLY) { bufReply->Head.msgh_remote_port = MACH_PORT_NULL; - else if ((bufReply->RetCode != KERN_SUCCESS) && - (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { + } else if ((bufReply->RetCode != KERN_SUCCESS) && + (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { /* destroy the request - but not the reply port */ bufRequest->Head.msgh_remote_port = MACH_PORT_NULL; mach_msg_destroy(&bufRequest->Head); @@ -461,33 +474,34 @@ mach_msg_server_once( * we only supply MACH_SEND_TIMEOUT when absolutely necessary. */ if (bufReply->Head.msgh_remote_port != MACH_PORT_NULL) { - mr = mach_msg(&bufReply->Head, - (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE) ? - MACH_SEND_MSG|options : - MACH_SEND_MSG|MACH_SEND_TIMEOUT|options, - bufReply->Head.msgh_size, 0, MACH_PORT_NULL, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); - + (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE) ? + MACH_SEND_MSG | options : + MACH_SEND_MSG | MACH_SEND_TIMEOUT | options, + bufReply->Head.msgh_size, 0, MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + if ((mr != MACH_SEND_INVALID_DEST) && - (mr != MACH_SEND_TIMED_OUT)) + (mr != MACH_SEND_TIMED_OUT)) { goto done_once; + } mr = MACH_MSG_SUCCESS; } - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { mach_msg_destroy(&bufReply->Head); + } } - done_once: +done_once: voucher_mach_msg_revert(old_state); (void)vm_deallocate(self, - (vm_address_t) bufRequest, - request_alloc); + (vm_address_t) bufRequest, + request_alloc); (void)vm_deallocate(self, - (vm_address_t) bufReply, - reply_alloc); + (vm_address_t) bufReply, + reply_alloc); return mr; } @@ -495,7 +509,7 @@ mach_msg_server_once( * Routine: mach_msg_server * Purpose: * A simple generic server function. Note that changes here - * should be considered for duplication above. + * should be considered for duplication above. */ mach_msg_return_t mach_msg_server( @@ -516,43 +530,44 @@ mach_msg_server( voucher_mach_msg_state_t old_state = VOUCHER_MACH_MSG_STATE_UNCHANGED; boolean_t buffers_swapped = FALSE; - options &= ~(MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_VOUCHER|MACH_RCV_OVERWRITE); + options &= ~(MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_VOUCHER | MACH_RCV_OVERWRITE); reply_alloc = (mach_msg_size_t)round_page((options & MACH_SEND_TRAILER) ? - (max_size + MAX_TRAILER_SIZE) : max_size); + (max_size + MAX_TRAILER_SIZE) : max_size); kr = vm_allocate(self, - (vm_address_t *)&bufReply, - reply_alloc, - VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE); - if (kr != KERN_SUCCESS) + (vm_address_t *)&bufReply, + reply_alloc, + VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE); + if (kr != KERN_SUCCESS) { return kr; + } request_alloc = 0; trailer_alloc = REQUESTED_TRAILER_SIZE(options); new_request_alloc = (mach_msg_size_t)round_page(max_size + trailer_alloc); request_size = (options & MACH_RCV_LARGE) ? - new_request_alloc : max_size + trailer_alloc; + new_request_alloc : max_size + trailer_alloc; for (;;) { if (request_alloc < new_request_alloc) { request_alloc = new_request_alloc; kr = vm_allocate(self, - (vm_address_t *)&bufRequest, - request_alloc, - VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE); + (vm_address_t *)&bufRequest, + request_alloc, + VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE); if (kr != KERN_SUCCESS) { vm_deallocate(self, - (vm_address_t)bufReply, - reply_alloc); + (vm_address_t)bufReply, + reply_alloc); return kr; } } - mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG|MACH_RCV_VOUCHER|options, - 0, request_size, rcv_name, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG | MACH_RCV_VOUCHER | options, + 0, request_size, rcv_name, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); while (mr == MACH_MSG_SUCCESS) { /* we have another request message */ @@ -564,10 +579,10 @@ mach_msg_server( (void) (*demux)(&bufRequest->Head, &bufReply->Head); if (!(bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { - if (bufReply->RetCode == MIG_NO_REPLY) + if (bufReply->RetCode == MIG_NO_REPLY) { bufReply->Head.msgh_remote_port = MACH_PORT_NULL; - else if ((bufReply->RetCode != KERN_SUCCESS) && - (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { + } else if ((bufReply->RetCode != KERN_SUCCESS) && + (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { /* destroy the request - but not the reply port */ bufRequest->Head.msgh_remote_port = MACH_PORT_NULL; mach_msg_destroy(&bufRequest->Head); @@ -588,13 +603,13 @@ mach_msg_server( mig_reply_error_t *bufTemp; mr = mach_msg( - &bufReply->Head, - (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE) ? - MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_TIMEOUT|MACH_RCV_VOUCHER|options : - MACH_SEND_MSG|MACH_RCV_MSG|MACH_SEND_TIMEOUT|MACH_RCV_TIMEOUT|MACH_RCV_VOUCHER|options, - bufReply->Head.msgh_size, request_size, rcv_name, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + &bufReply->Head, + (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) == + MACH_MSG_TYPE_MOVE_SEND_ONCE) ? + MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_TIMEOUT | MACH_RCV_VOUCHER | options : + MACH_SEND_MSG | MACH_RCV_MSG | MACH_SEND_TIMEOUT | MACH_RCV_TIMEOUT | MACH_RCV_VOUCHER | options, + bufReply->Head.msgh_size, request_size, rcv_name, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); /* swap request and reply */ bufTemp = bufRequest; @@ -605,9 +620,9 @@ mach_msg_server( mr = mach_msg_overwrite( &bufReply->Head, (MACH_MSGH_BITS_REMOTE(bufReply->Head.msgh_bits) == - MACH_MSG_TYPE_MOVE_SEND_ONCE) ? - MACH_SEND_MSG|MACH_RCV_MSG|MACH_RCV_TIMEOUT|MACH_RCV_VOUCHER|options : - MACH_SEND_MSG|MACH_RCV_MSG|MACH_SEND_TIMEOUT|MACH_RCV_TIMEOUT|MACH_RCV_VOUCHER|options, + MACH_MSG_TYPE_MOVE_SEND_ONCE) ? + MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_TIMEOUT | MACH_RCV_VOUCHER | options : + MACH_SEND_MSG | MACH_RCV_MSG | MACH_SEND_TIMEOUT | MACH_RCV_TIMEOUT | MACH_RCV_VOUCHER | options, bufReply->Head.msgh_size, request_size, rcv_name, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL, &bufRequest->Head, 0); @@ -616,14 +631,13 @@ mach_msg_server( if ((mr != MACH_SEND_INVALID_DEST) && (mr != MACH_SEND_TIMED_OUT) && (mr != MACH_RCV_TIMED_OUT)) { - voucher_mach_msg_revert(old_state); old_state = VOUCHER_MACH_MSG_STATE_UNCHANGED; continue; } } - /* + /* * Need to destroy the reply msg in case if there was a send timeout or * invalid destination. The reply msg would be swapped with request msg * if buffers_swapped is true, thus destroy request msg instead of @@ -631,42 +645,42 @@ mach_msg_server( */ if (mr != MACH_RCV_TIMED_OUT) { if (buffers_swapped) { - if (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) + if (bufRequest->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { mach_msg_destroy(&bufRequest->Head); + } } else { - if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) + if (bufReply->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX) { mach_msg_destroy(&bufReply->Head); + } } } voucher_mach_msg_revert(old_state); old_state = VOUCHER_MACH_MSG_STATE_UNCHANGED; - mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG|MACH_RCV_VOUCHER|options, - 0, request_size, rcv_name, - MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); - + mr = mach_msg(&bufRequest->Head, MACH_RCV_MSG | MACH_RCV_VOUCHER | options, + 0, request_size, rcv_name, + MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); } /* while (mr == MACH_MSG_SUCCESS) */ if ((mr == MACH_RCV_TOO_LARGE) && (options & MACH_RCV_LARGE)) { new_request_alloc = (mach_msg_size_t)round_page(bufRequest->Head.msgh_size + - trailer_alloc); + trailer_alloc); request_size = new_request_alloc; vm_deallocate(self, - (vm_address_t) bufRequest, - request_alloc); + (vm_address_t) bufRequest, + request_alloc); continue; } break; - } /* for(;;) */ (void)vm_deallocate(self, - (vm_address_t) bufRequest, - request_alloc); + (vm_address_t) bufRequest, + request_alloc); (void)vm_deallocate(self, - (vm_address_t) bufReply, - reply_alloc); + (vm_address_t) bufReply, + reply_alloc); return mr; } @@ -674,7 +688,7 @@ mach_msg_server( * Routine: mach_msg_server_importance * Purpose: * A simple generic server function which handles importance - * promotion assertions for adaptive daemons. + * promotion assertions for adaptive daemons. */ mach_msg_return_t mach_msg_server_importance( @@ -688,7 +702,7 @@ mach_msg_server_importance( kern_return_t mach_voucher_deallocate( - mach_voucher_t voucher) + mach_voucher_t voucher) { return mach_port_deallocate(mach_task_self(), voucher); } diff --git a/libsyscall/mach/mach_port.c b/libsyscall/mach/mach_port.c index 3219d7301..52f731b99 100644 --- a/libsyscall/mach/mach_port.c +++ b/libsyscall/mach/mach_port.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,9 +44,9 @@ mach_port_names( kern_return_t rv; rv = _kernelrpc_mach_port_names(task, names, namesCnt, types, - typesCnt); + typesCnt); - return (rv); + return rv; } kern_return_t @@ -59,7 +59,7 @@ mach_port_type( rv = _kernelrpc_mach_port_type(task, name, ptype); - return (rv); + return rv; } kern_return_t @@ -72,7 +72,7 @@ mach_port_rename( rv = _kernelrpc_mach_port_rename(task, old_name, new_name); - return (rv); + return rv; } kern_return_t @@ -85,7 +85,7 @@ mach_port_allocate_name( rv = _kernelrpc_mach_port_allocate_name(task, right, name); - return (rv); + return rv; } kern_return_t @@ -98,10 +98,11 @@ mach_port_allocate( rv = _kernelrpc_mach_port_allocate_trap(task, right, name); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_allocate(task, right, name); + } - return (rv); + return rv; } kern_return_t @@ -113,10 +114,11 @@ mach_port_destroy( rv = _kernelrpc_mach_port_destroy_trap(task, name); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_destroy(task, name); + } - return (rv); + return rv; } kern_return_t @@ -126,12 +128,13 @@ mach_port_deallocate( { kern_return_t rv; - rv = _kernelrpc_mach_port_deallocate_trap(task, name); + rv = _kernelrpc_mach_port_deallocate_trap(task, name); - if (rv == MACH_SEND_INVALID_DEST) - rv = _kernelrpc_mach_port_deallocate(task,name); + if (rv == MACH_SEND_INVALID_DEST) { + rv = _kernelrpc_mach_port_deallocate(task, name); + } - return (rv); + return rv; } kern_return_t @@ -145,7 +148,7 @@ mach_port_get_refs( rv = _kernelrpc_mach_port_get_refs(task, name, right, refs); - return (rv); + return rv; } kern_return_t @@ -157,32 +160,33 @@ mach_port_mod_refs( { kern_return_t rv; - rv = _kernelrpc_mach_port_mod_refs_trap(task, name, right, delta); + rv = _kernelrpc_mach_port_mod_refs_trap(task, name, right, delta); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_mod_refs(task, name, right, delta); + } - return (rv); + return rv; } kern_return_t mach_port_peek( - ipc_space_t task, - mach_port_name_t name, + ipc_space_t task, + mach_port_name_t name, mach_msg_trailer_type_t trailer_type, - mach_port_seqno_t *seqnop, - mach_msg_size_t *msg_sizep, - mach_msg_id_t *msg_idp, + mach_port_seqno_t *seqnop, + mach_msg_size_t *msg_sizep, + mach_msg_id_t *msg_idp, mach_msg_trailer_info_t trailer_infop, - mach_msg_type_number_t *trailer_sizep) + mach_msg_type_number_t *trailer_sizep) { kern_return_t rv; - rv = _kernelrpc_mach_port_peek(task, name, trailer_type, - seqnop, msg_sizep, msg_idp, - trailer_infop, trailer_sizep); + rv = _kernelrpc_mach_port_peek(task, name, trailer_type, + seqnop, msg_sizep, msg_idp, + trailer_infop, trailer_sizep); - return (rv); + return rv; } kern_return_t @@ -195,7 +199,7 @@ mach_port_set_mscount( rv = _kernelrpc_mach_port_set_mscount(task, name, mscount); - return (rv); + return rv; } kern_return_t @@ -208,9 +212,9 @@ mach_port_get_set_status( kern_return_t rv; rv = _kernelrpc_mach_port_get_set_status(task, name, members, - membersCnt); + membersCnt); - return (rv); + return rv; } kern_return_t @@ -223,10 +227,11 @@ mach_port_move_member( rv = _kernelrpc_mach_port_move_member_trap(task, member, after); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_move_member(task, member, after); + } - return (rv); + return rv; } kern_return_t @@ -242,9 +247,9 @@ mach_port_request_notification( kern_return_t rv; rv = _kernelrpc_mach_port_request_notification(task, name, msgid, - sync, notify, notifyPoly, previous); + sync, notify, notifyPoly, previous); - return (rv); + return rv; } kern_return_t @@ -256,13 +261,14 @@ mach_port_insert_right( { kern_return_t rv; - rv = _kernelrpc_mach_port_insert_right_trap(task, name, poly, polyPoly); + rv = _kernelrpc_mach_port_insert_right_trap(task, name, poly, polyPoly); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_insert_right(task, name, poly, polyPoly); + } - return (rv); + return rv; } kern_return_t @@ -276,9 +282,9 @@ mach_port_extract_right( kern_return_t rv; rv = _kernelrpc_mach_port_extract_right(task, name, msgt_name, - poly, polyPoly); + poly, polyPoly); - return (rv); + return rv; } kern_return_t @@ -291,7 +297,7 @@ mach_port_set_seqno( rv = _kernelrpc_mach_port_set_seqno(task, name, seqno); - return (rv); + return rv; } kern_return_t @@ -305,23 +311,26 @@ mach_port_get_attributes( kern_return_t rv; rv = _kernelrpc_mach_port_get_attributes_trap(task, name, flavor, - port_info_out, port_info_outCnt); + port_info_out, port_info_outCnt); #ifdef __x86_64__ /* REMOVE once XBS kernel has new trap */ - if (rv == ((1 << 24) | 40)) /* see mach/i386/syscall_sw.h */ + if (rv == ((1 << 24) | 40)) { /* see mach/i386/syscall_sw.h */ rv = MACH_SEND_INVALID_DEST; + } #elif defined(__i386__) /* REMOVE once XBS kernel has new trap */ - if (rv == (kern_return_t)(-40)) + if (rv == (kern_return_t)(-40)) { rv = MACH_SEND_INVALID_DEST; + } #endif - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_get_attributes(task, name, flavor, - port_info_out, port_info_outCnt); + port_info_out, port_info_outCnt); + } - return (rv); + return rv; } kern_return_t @@ -335,9 +344,9 @@ mach_port_set_attributes( kern_return_t rv; rv = _kernelrpc_mach_port_set_attributes(task, name, flavor, - port_info, port_infoCnt); + port_info, port_infoCnt); - return (rv); + return rv; } kern_return_t @@ -351,7 +360,7 @@ mach_port_allocate_qos( rv = _kernelrpc_mach_port_allocate_qos(task, right, qos, name); - return (rv); + return rv; } kern_return_t @@ -366,7 +375,7 @@ mach_port_allocate_full( rv = _kernelrpc_mach_port_allocate_full(task, right, proto, qos, name); - return (rv); + return rv; } kern_return_t @@ -378,7 +387,7 @@ task_set_port_space( rv = _kernelrpc_task_set_port_space(task, table_entries); - return (rv); + return rv; } kern_return_t @@ -391,7 +400,7 @@ mach_port_get_srights( rv = _kernelrpc_mach_port_get_srights(task, name, srights); - return (rv); + return rv; } kern_return_t @@ -406,9 +415,9 @@ mach_port_space_info( kern_return_t rv; rv = _kernelrpc_mach_port_space_info(task, space_info, table_info, - table_infoCnt, tree_info, tree_infoCnt); + table_infoCnt, tree_info, tree_infoCnt); - return (rv); + return rv; } kern_return_t @@ -420,7 +429,7 @@ mach_port_space_basic_info( rv = _kernelrpc_mach_port_space_basic_info(task, space_basic_info); - return (rv); + return rv; } static inline mach_port_t @@ -520,9 +529,9 @@ mach_port_dnrequest_info( kern_return_t rv; rv = _kernelrpc_mach_port_dnrequest_info(task, name, dnr_total, - dnr_used); + dnr_used); - return (rv); + return rv; } kern_return_t @@ -535,9 +544,9 @@ mach_port_kernel_object( kern_return_t rv; rv = _kernelrpc_mach_port_kernel_object(task, name, - object_type, object_addr); + object_type, object_addr); - return (rv); + return rv; } kern_return_t @@ -550,10 +559,11 @@ mach_port_insert_member( rv = _kernelrpc_mach_port_insert_member_trap(task, name, pset); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_insert_member(task, name, pset); + } - return (rv); + return rv; } kern_return_t @@ -564,12 +574,13 @@ mach_port_extract_member( { kern_return_t rv; - rv = _kernelrpc_mach_port_extract_member_trap(task, name, pset); + rv = _kernelrpc_mach_port_extract_member_trap(task, name, pset); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_extract_member(task, name, pset); + } - return (rv); + return rv; } kern_return_t @@ -587,7 +598,7 @@ mach_port_get_context( *context = (mach_port_context_t)wide_context; } - return (rv); + return rv; } kern_return_t @@ -600,7 +611,7 @@ mach_port_set_context( rv = _kernelrpc_mach_port_set_context(task, name, context); - return (rv); + return rv; } kern_return_t @@ -614,85 +625,86 @@ mach_port_kobject( rv = _kernelrpc_mach_port_kobject(task, name, object_type, object_addr); - return (rv); + return rv; } kern_return_t mach_port_construct( - ipc_space_t task, - mach_port_options_t *options, - mach_port_context_t context, - mach_port_name_t *name) + ipc_space_t task, + mach_port_options_t *options, + mach_port_context_t context, + mach_port_name_t *name) { kern_return_t rv; rv = _kernelrpc_mach_port_construct_trap(task, options, (uint64_t) context, name); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_construct(task, options, (uint64_t) context, name); + } - return (rv); + return rv; } kern_return_t mach_port_destruct( - ipc_space_t task, - mach_port_name_t name, - mach_port_delta_t srdelta, - mach_port_context_t guard) + ipc_space_t task, + mach_port_name_t name, + mach_port_delta_t srdelta, + mach_port_context_t guard) { kern_return_t rv; rv = _kernelrpc_mach_port_destruct_trap(task, name, srdelta, (uint64_t) guard); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_destruct(task, name, srdelta, (uint64_t) guard); + } - return (rv); - + return rv; } kern_return_t mach_port_guard( - ipc_space_t task, - mach_port_name_t name, - mach_port_context_t guard, - boolean_t strict) + ipc_space_t task, + mach_port_name_t name, + mach_port_context_t guard, + boolean_t strict) { kern_return_t rv; rv = _kernelrpc_mach_port_guard_trap(task, name, (uint64_t) guard, strict); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_guard(task, name, (uint64_t) guard, strict); + } - return (rv); - + return rv; } kern_return_t mach_port_unguard( - ipc_space_t task, - mach_port_name_t name, - mach_port_context_t guard) + ipc_space_t task, + mach_port_name_t name, + mach_port_context_t guard) { kern_return_t rv; rv = _kernelrpc_mach_port_unguard_trap(task, name, (uint64_t) guard); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_port_unguard(task, name, (uint64_t) guard); + } - return (rv); - + return rv; } extern kern_return_t _kernelrpc_mach_voucher_extract_attr_recipe( - mach_port_name_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_raw_recipe_t recipe, - mach_msg_type_number_t *recipe_size); + mach_port_name_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_raw_recipe_t recipe, + mach_msg_type_number_t *recipe_size); kern_return_t mach_voucher_extract_attr_recipe( @@ -705,8 +717,9 @@ mach_voucher_extract_attr_recipe( rv = mach_voucher_extract_attr_recipe_trap(voucher, key, recipe, recipe_size); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_voucher_extract_attr_recipe(voucher, key, recipe, recipe_size); + } return rv; } @@ -714,8 +727,8 @@ mach_voucher_extract_attr_recipe( kern_return_t thread_destruct_special_reply_port( - mach_port_name_t port, - thread_destruct_special_reply_port_rights_t rights) + mach_port_name_t port, + thread_destruct_special_reply_port_rights_t rights) { switch (rights) { case THREAD_SPECIAL_REPLY_PORT_ALL: diff --git a/libsyscall/mach/mach_right.c b/libsyscall/mach/mach_right.c index c69133e3a..f1857fe65 100644 --- a/libsyscall/mach/mach_right.c +++ b/libsyscall/mach/mach_right.c @@ -34,15 +34,15 @@ #pragma mark Utilities #define _assert_mach(__op, __kr) \ do { \ - if (kr != KERN_SUCCESS) { \ - __builtin_trap(); \ - } \ + if (kr != KERN_SUCCESS) { \ + __builtin_trap(); \ + } \ } while (0) #pragma mark API mach_right_recv_t mach_right_recv_construct(mach_right_flags_t flags, - mach_right_send_t *_Nullable sr, uintptr_t ctx) + mach_right_send_t *_Nullable sr, uintptr_t ctx) { kern_return_t kr = KERN_FAILURE; mach_port_t p = MACH_PORT_NULL; @@ -72,7 +72,7 @@ mach_right_recv_construct(mach_right_flags_t flags, void mach_right_recv_destruct(mach_right_recv_t r, mach_right_send_t *s, - uintptr_t ctx) + uintptr_t ctx) { kern_return_t kr = KERN_FAILURE; mach_port_delta_t srd = 0; @@ -80,7 +80,7 @@ mach_right_recv_destruct(mach_right_recv_t r, mach_right_send_t *s, if (s) { if (r.mrr_name != s->mrs_name) { _os_set_crash_log_cause_and_message(s->mrs_name, - "api misuse: bad send right"); + "api misuse: bad send right"); __builtin_trap(); } @@ -97,7 +97,7 @@ mach_right_send_create(mach_right_recv_t r) kern_return_t kr = KERN_FAILURE; kr = mach_port_insert_right(mach_task_self(), r.mrr_name, r.mrr_name, - MACH_MSG_TYPE_MAKE_SEND); + MACH_MSG_TYPE_MAKE_SEND); _mach_assert("create send right", kr); return mach_right_send(r.mrr_name); @@ -110,7 +110,7 @@ mach_right_send_retain(mach_right_send_t s) mach_right_send_t rs = MACH_RIGHT_SEND_NULL; kr = mach_port_mod_refs(mach_task_self(), s.mrs_name, - MACH_PORT_RIGHT_SEND, 1); + MACH_PORT_RIGHT_SEND, 1); switch (kr) { case 0: rs = s; @@ -119,9 +119,9 @@ mach_right_send_retain(mach_right_send_t s) rs.mrs_name = MACH_PORT_DEAD; break; case KERN_INVALID_NAME: - // mach_port_mod_refs() will return success when given either - // MACH_PORT_DEAD or MACH_PORT_NULL with send or send-once right - // operations, so this is always fatal. + // mach_port_mod_refs() will return success when given either + // MACH_PORT_DEAD or MACH_PORT_NULL with send or send-once right + // operations, so this is always fatal. default: _mach_assert("retain send right", kr); } @@ -135,13 +135,13 @@ mach_right_send_release(mach_right_send_t s) kern_return_t kr = KERN_FAILURE; kr = mach_port_mod_refs(mach_task_self(), s.mrs_name, - MACH_PORT_RIGHT_SEND, -1); + MACH_PORT_RIGHT_SEND, -1); switch (kr) { case 0: break; case KERN_INVALID_RIGHT: kr = mach_port_mod_refs(mach_task_self(), s.mrs_name, - MACH_PORT_RIGHT_DEAD_NAME, -1); + MACH_PORT_RIGHT_DEAD_NAME, -1); _mach_assert("release dead name", kr); break; default: @@ -155,7 +155,7 @@ mach_right_send_once_create(mach_right_recv_t r) mach_msg_type_name_t right = 0; mach_port_t so = MACH_PORT_NULL; kern_return_t kr = mach_port_extract_right(mach_task_self(), r.mrr_name, - MACH_MSG_TYPE_MAKE_SEND_ONCE, &so, &right); + MACH_MSG_TYPE_MAKE_SEND_ONCE, &so, &right); _mach_assert("create send-once right", kr); return mach_right_send_once(so); @@ -167,13 +167,13 @@ mach_right_send_once_consume(mach_right_send_once_t so) kern_return_t kr = KERN_FAILURE; kr = mach_port_mod_refs(mach_task_self(), so.mrso_name, - MACH_PORT_RIGHT_SEND_ONCE, -1); + MACH_PORT_RIGHT_SEND_ONCE, -1); switch (kr) { case 0: break; case KERN_INVALID_RIGHT: kr = mach_port_mod_refs(mach_task_self(), so.mrso_name, - MACH_PORT_RIGHT_DEAD_NAME, -1); + MACH_PORT_RIGHT_DEAD_NAME, -1); _mach_assert("release dead name", kr); break; default: diff --git a/libsyscall/mach/mach_vm.c b/libsyscall/mach/mach_vm.c index 00bf511a2..fe89c6513 100644 --- a/libsyscall/mach/mach_vm.c +++ b/libsyscall/mach/mach_vm.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,15 +54,16 @@ mach_vm_allocate( rv = _kernelrpc_mach_vm_allocate_trap(target, address, size, flags); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_vm_allocate(target, address, size, flags); + } if (__syscall_logger) { int userTagFlags = flags & VM_FLAGS_ALIAS_MASK; __syscall_logger(stack_logging_type_vm_allocate | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } - return (rv); + return rv; } kern_return_t @@ -79,10 +80,11 @@ mach_vm_deallocate( rv = _kernelrpc_mach_vm_deallocate_trap(target, address, size); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_vm_deallocate(target, address, size); + } - return (rv); + return rv; } kern_return_t @@ -96,13 +98,14 @@ mach_vm_protect( kern_return_t rv; rv = _kernelrpc_mach_vm_protect_trap(task, address, size, set_maximum, - new_protection); + new_protection); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_vm_protect(task, address, size, - set_maximum, new_protection); + set_maximum, new_protection); + } - return (rv); + return rv; } kern_return_t @@ -123,7 +126,7 @@ vm_allocate( *address = (vm_address_t)(mach_addr & ((vm_address_t)-1)); #endif - return (rv); + return rv; } kern_return_t @@ -136,7 +139,7 @@ vm_deallocate( rv = mach_vm_deallocate(task, address, size); - return (rv); + return rv; } kern_return_t @@ -151,7 +154,7 @@ vm_protect( rv = mach_vm_protect(task, address, size, set_maximum, new_protection); - return (rv); + return rv; } kern_return_t @@ -171,13 +174,15 @@ mach_vm_map( kern_return_t rv = MACH_SEND_INVALID_DEST; if (object == MEMORY_OBJECT_NULL && max_protection == VM_PROT_ALL && - inheritance == VM_INHERIT_DEFAULT) + inheritance == VM_INHERIT_DEFAULT) { rv = _kernelrpc_mach_vm_map_trap(target, address, size, mask, flags, - cur_protection); + cur_protection); + } - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_vm_map(target, address, size, mask, flags, object, - offset, copy, cur_protection, max_protection, inheritance); + offset, copy, cur_protection, max_protection, inheritance); + } if (__syscall_logger) { int eventTypeFlags = stack_logging_type_vm_allocate | stack_logging_type_mapped_file_or_shared_mem; @@ -185,7 +190,7 @@ mach_vm_map( __syscall_logger(eventTypeFlags | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } - return (rv); + return rv; } kern_return_t @@ -200,13 +205,13 @@ mach_vm_remap( boolean_t copy, vm_prot_t *cur_protection, vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_inherit_t inheritance) { kern_return_t rv; rv = _kernelrpc_mach_vm_remap(target, address, size, mask, flags, - src_task, src_address, copy, cur_protection, max_protection, - inheritance); + src_task, src_address, copy, cur_protection, max_protection, + inheritance); if (__syscall_logger) { int eventTypeFlags = stack_logging_type_vm_allocate | stack_logging_type_mapped_file_or_shared_mem; @@ -214,7 +219,7 @@ mach_vm_remap( __syscall_logger(eventTypeFlags | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } - return (rv); + return rv; } kern_return_t @@ -236,7 +241,7 @@ mach_vm_read( __syscall_logger(eventTypeFlags, (uintptr_t)mach_task_self(), (uintptr_t)*dataCnt, 0, *data, 0); } - return (rv); + return rv; } kern_return_t @@ -256,7 +261,7 @@ vm_map( kern_return_t rv; rv = _kernelrpc_vm_map(target, address, size, mask, flags, object, - offset, copy, cur_protection, max_protection, inheritance); + offset, copy, cur_protection, max_protection, inheritance); if (__syscall_logger) { int eventTypeFlags = stack_logging_type_vm_allocate | stack_logging_type_mapped_file_or_shared_mem; @@ -264,7 +269,7 @@ vm_map( __syscall_logger(eventTypeFlags | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } - return (rv); + return rv; } kern_return_t @@ -279,13 +284,13 @@ vm_remap( boolean_t copy, vm_prot_t *cur_protection, vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_inherit_t inheritance) { kern_return_t rv; rv = _kernelrpc_vm_remap(target, address, size, mask, flags, - src_task, src_address, copy, cur_protection, max_protection, - inheritance); + src_task, src_address, copy, cur_protection, max_protection, + inheritance); if (__syscall_logger) { int eventTypeFlags = stack_logging_type_vm_allocate | stack_logging_type_mapped_file_or_shared_mem; @@ -293,7 +298,7 @@ vm_remap( __syscall_logger(eventTypeFlags | userTagFlags, (uintptr_t)target, (uintptr_t)size, 0, (uintptr_t)*address, 0); } - return (rv); + return rv; } kern_return_t @@ -315,35 +320,36 @@ vm_read( __syscall_logger(eventTypeFlags, (uintptr_t)mach_task_self(), (uintptr_t)*dataCnt, 0, *data, 0); } - return (rv); + return rv; } kern_return_t mach_vm_purgable_control( - mach_port_name_t target, - mach_vm_offset_t address, - vm_purgable_t control, - int *state) + mach_port_name_t target, + mach_vm_offset_t address, + vm_purgable_t control, + int *state) { kern_return_t rv; rv = _kernelrpc_mach_vm_purgable_control_trap(target, address, control, state); - if (rv == MACH_SEND_INVALID_DEST) + if (rv == MACH_SEND_INVALID_DEST) { rv = _kernelrpc_mach_vm_purgable_control(target, address, control, state); + } - return (rv); + return rv; } kern_return_t vm_purgable_control( - mach_port_name_t task, - vm_offset_t address, - vm_purgable_t control, - int *state) + mach_port_name_t task, + vm_offset_t address, + vm_purgable_t control, + int *state) { return mach_vm_purgable_control(task, - (mach_vm_offset_t) address, - control, - state); + (mach_vm_offset_t) address, + control, + state); } diff --git a/libsyscall/mach/mig_allocate.c b/libsyscall/mach/mig_allocate.c index ed1288662..6d827ac8d 100644 --- a/libsyscall/mach/mig_allocate.c +++ b/libsyscall/mach/mig_allocate.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,32 +22,32 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,9 +60,10 @@ void mig_allocate(vm_address_t *addr_p, vm_size_t size) { if (vm_allocate(mach_task_self_, - addr_p, - size, - VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|TRUE) - != KERN_SUCCESS) + addr_p, + size, + VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE) + != KERN_SUCCESS) { *addr_p = 0; + } } diff --git a/libsyscall/mach/mig_deallocate.c b/libsyscall/mach/mig_deallocate.c index 0f406a43d..ff6ed10c9 100644 --- a/libsyscall/mach/mig_deallocate.c +++ b/libsyscall/mach/mig_deallocate.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,32 +22,32 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,6 +60,6 @@ void mig_deallocate(vm_address_t addr, vm_size_t size) { (void)vm_deallocate(mach_task_self_, - addr, - size); + addr, + size); } diff --git a/libsyscall/mach/mig_reply_port.c b/libsyscall/mach/mig_reply_port.c index ee7e867cd..521b5d626 100644 --- a/libsyscall/mach/mig_reply_port.c +++ b/libsyscall/mach/mig_reply_port.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/mach/mig_reply_setup.c b/libsyscall/mach/mig_reply_setup.c index 96df01b1d..62511981a 100644 --- a/libsyscall/mach/mig_reply_setup.c +++ b/libsyscall/mach/mig_reply_setup.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,32 +22,32 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -68,11 +68,11 @@ void mig_reply_setup(mach_msg_header_t *request, mach_msg_header_t *reply) { -#define InP (request) -#define OutP ((mig_reply_error_t *) reply) +#define InP (request) +#define OutP ((mig_reply_error_t *) reply) OutP->Head.msgh_bits = - MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0); + MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0); OutP->Head.msgh_size = sizeof(mig_reply_error_t); OutP->Head.msgh_remote_port = InP->msgh_local_port; OutP->Head.msgh_local_port = MACH_PORT_NULL; diff --git a/libsyscall/mach/mig_strncpy.c b/libsyscall/mach/mig_strncpy.c index 3bc188adf..731340426 100644 --- a/libsyscall/mach/mig_strncpy.c +++ b/libsyscall/mach/mig_strncpy.c @@ -2,7 +2,7 @@ * Copyright (c) 1999-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,31 +22,31 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -54,16 +54,16 @@ * mig_strncpy.c - by Joshua Block * * mig_strncpy -- Bounded string copy. Does what the library routine strncpy - * OUGHT to do: Copies the (null terminated) string in src into dest, a + * OUGHT to do: Copies the (null terminated) string in src into dest, a * buffer of length len. Assures that the copy is still null terminated * and doesn't overflow the buffer, truncating the copy if necessary. * * Parameters: - * + * * dest - Pointer to destination buffer. - * + * * src - Pointer to source string. - * + * * len - Length of destination buffer. * * Result: @@ -73,24 +73,24 @@ int mig_strncpy( - char *dest, - const char *src, - int len) + char *dest, + const char *src, + int len) { - int i; + int i; - if (len <= 0) { + if (len <= 0) { return 0; } - for (i = 1; i < len; i++) { + for (i = 1; i < len; i++) { if (!(*dest++ = *src++)) { return i; } } - *dest = '\0'; - return i; + *dest = '\0'; + return i; } /* @@ -114,9 +114,9 @@ mig_strncpy( */ int mig_strncpy_zerofill( - char *dest, - const char *src, - int len) + char *dest, + const char *src, + int len) { int i; boolean_t terminated = FALSE; diff --git a/libsyscall/mach/ms_thread_switch.c b/libsyscall/mach/ms_thread_switch.c index 2d1f16fc8..6b71c2eae 100644 --- a/libsyscall/mach/ms_thread_switch.c +++ b/libsyscall/mach/ms_thread_switch.c @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ #include #include -extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t); // From pthread_internals.h +extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t); // From pthread_internals.h kern_return_t thread_switch( @@ -70,7 +70,7 @@ thread_switch( mach_msg_timeout_t option_time) { kern_return_t result; - + result = syscall_thread_switch(thread, option, option_time); return result; } diff --git a/libsyscall/mach/panic.c b/libsyscall/mach/panic.c index 7049b9561..ab4321572 100644 --- a/libsyscall/mach/panic.c +++ b/libsyscall/mach/panic.c @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -78,8 +78,8 @@ panic(const char *s, ...) char buffer[1024]; int len = _mach_snprintf(buffer, sizeof(buffer), "panic: %s\n", s); write(__STDERR_FILENO, buffer, len); - -#define RB_DEBUGGER 0x1000 /* enter debugger NOW */ + +#define RB_DEBUGGER 0x1000 /* enter debugger NOW */ (void) host_reboot(master_host_port, RB_DEBUGGER); /* 4279008 - don't return */ diff --git a/libsyscall/mach/port_descriptions.c b/libsyscall/mach/port_descriptions.c index a5d8a93e1..035cf2237 100644 --- a/libsyscall/mach/port_descriptions.c +++ b/libsyscall/mach/port_descriptions.c @@ -71,7 +71,7 @@ mach_host_special_port_description(int port) [HOST_SYSPOLICYD_PORT] = "syspolicyd", }; _Static_assert(HOST_SYSPOLICYD_PORT == HOST_MAX_SPECIAL_PORT, - "all host special ports must have descriptions"); + "all host special ports must have descriptions"); return hsp_descs[port_index]; } @@ -96,7 +96,7 @@ mach_task_special_port_description(int port) [TASK_RESOURCE_NOTIFY_PORT] = "resource notify", }; _Static_assert(TASK_RESOURCE_NOTIFY_PORT == TASK_MAX_SPECIAL_PORT, - "all task special ports must have descriptions"); + "all task special ports must have descriptions"); return tsp_descs[port_index]; } @@ -152,7 +152,7 @@ mach_host_special_port_for_id(const char *id) }; return port_for_id_internal(id, hsp_ids, - sizeof(hsp_ids) / sizeof(hsp_ids[0])); + sizeof(hsp_ids) / sizeof(hsp_ids[0])); } int @@ -171,5 +171,5 @@ mach_task_special_port_for_id(const char *id) }; return port_for_id_internal(id, tsp_ids, - sizeof(tsp_ids) / sizeof(tsp_ids[0])); + sizeof(tsp_ids) / sizeof(tsp_ids[0])); } diff --git a/libsyscall/mach/port_obj.c b/libsyscall/mach/port_obj.c index 788adcee9..41a41a915 100644 --- a/libsyscall/mach/port_obj.c +++ b/libsyscall/mach/port_obj.c @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,7 +37,7 @@ #include #include -#define DEFAULT_TABLE_SIZE (64 * 1024) +#define DEFAULT_TABLE_SIZE (64 * 1024) struct port_obj_tentry *port_obj_table; int port_obj_table_size = DEFAULT_TABLE_SIZE; @@ -48,9 +48,10 @@ port_obj_init(int maxsize) kern_return_t kr; kr = vm_allocate(mach_task_self_, - (vm_offset_t *)&port_obj_table, - (vm_size_t)(maxsize * sizeof (*port_obj_table)), - TRUE); - if (kr != KERN_SUCCESS) + (vm_offset_t *)&port_obj_table, + (vm_size_t)(maxsize * sizeof(*port_obj_table)), + TRUE); + if (kr != KERN_SUCCESS) { panic("port_obj_init: can't vm_allocate"); + } } diff --git a/libsyscall/mach/semaphore.c b/libsyscall/mach/semaphore.c index 26a88594b..d5a359711 100644 --- a/libsyscall/mach/semaphore.c +++ b/libsyscall/mach/semaphore.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,48 +36,48 @@ kern_return_t semaphore_signal(mach_port_t signal_semaphore) { - return semaphore_signal_trap(signal_semaphore); + return semaphore_signal_trap(signal_semaphore); } kern_return_t semaphore_signal_all(mach_port_t signal_semaphore) { - return semaphore_signal_all_trap(signal_semaphore); + return semaphore_signal_all_trap(signal_semaphore); } kern_return_t semaphore_signal_thread(mach_port_t signal_semaphore, mach_port_t thread_act) { - return semaphore_signal_thread_trap(signal_semaphore, thread_act); + return semaphore_signal_thread_trap(signal_semaphore, thread_act); } kern_return_t semaphore_wait(mach_port_t wait_semaphore) { - return semaphore_wait_trap(wait_semaphore); + return semaphore_wait_trap(wait_semaphore); } kern_return_t semaphore_timedwait(mach_port_t wait_semaphore, mach_timespec_t wait_time) { - return semaphore_timedwait_trap(wait_semaphore, - wait_time.tv_sec, - wait_time.tv_nsec); + return semaphore_timedwait_trap(wait_semaphore, + wait_time.tv_sec, + wait_time.tv_nsec); } kern_return_t semaphore_wait_signal(mach_port_t wait_semaphore, mach_port_t signal_semaphore) { - return semaphore_wait_signal_trap(wait_semaphore, signal_semaphore); + return semaphore_wait_signal_trap(wait_semaphore, signal_semaphore); } kern_return_t semaphore_timedwait_signal(mach_port_t wait_semaphore, - mach_port_t signal_semaphore, - mach_timespec_t wait_time) + mach_port_t signal_semaphore, + mach_timespec_t wait_time) { - return semaphore_timedwait_signal_trap(wait_semaphore, - signal_semaphore, - wait_time.tv_sec, - wait_time.tv_nsec); + return semaphore_timedwait_signal_trap(wait_semaphore, + signal_semaphore, + wait_time.tv_sec, + wait_time.tv_nsec); } diff --git a/libsyscall/mach/servers/key_defs.h b/libsyscall/mach/servers/key_defs.h index 5d46904f3..7215c5f3a 100644 --- a/libsyscall/mach/servers/key_defs.h +++ b/libsyscall/mach/servers/key_defs.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -52,57 +52,57 @@ * */ -#ifndef _KEY_DEFS_ -#define _KEY_DEFS_ +#ifndef _KEY_DEFS_ +#define _KEY_DEFS_ /* * An encrytion key. */ typedef union { - unsigned char key_bytes[16]; - unsigned long key_longs[4]; + unsigned char key_bytes[16]; + unsigned long key_longs[4]; } key_t, *key_ptr_t; -#define KEY_EQUAL(key1, key2) \ - ((key1.key_longs[0] == key2.key_longs[0]) \ - && (key1.key_longs[1] == key2.key_longs[1]) \ - && (key1.key_longs[2] == key2.key_longs[2]) \ +#define KEY_EQUAL(key1, key2) \ + ((key1.key_longs[0] == key2.key_longs[0]) \ + && (key1.key_longs[1] == key2.key_longs[1]) \ + && (key1.key_longs[2] == key2.key_longs[2]) \ && (key1.key_longs[3] == key2.key_longs[3])) -#define KEY_IS_NULL(key) \ - (((key).key_longs[0] == 0) && ((key).key_longs[1] == 0) \ +#define KEY_IS_NULL(key) \ + (((key).key_longs[0] == 0) && ((key).key_longs[1] == 0) \ && ((key).key_longs[2] == 0) && ((key).key_longs[3] == 0)) /* * Macros to convert keys between network and host byte order. */ -#define NTOH_KEY(key) { \ - (key).key_longs[0] = ntohl((key).key_longs[0]); \ - (key).key_longs[1] = ntohl((key).key_longs[1]); \ - (key).key_longs[2] = ntohl((key).key_longs[2]); \ - (key).key_longs[3] = ntohl((key).key_longs[3]); \ +#define NTOH_KEY(key) { \ + (key).key_longs[0] = ntohl((key).key_longs[0]); \ + (key).key_longs[1] = ntohl((key).key_longs[1]); \ + (key).key_longs[2] = ntohl((key).key_longs[2]); \ + (key).key_longs[3] = ntohl((key).key_longs[3]); \ } -#define HTON_KEY(key) { \ - (key).key_longs[0] = htonl((key).key_longs[0]); \ - (key).key_longs[1] = htonl((key).key_longs[1]); \ - (key).key_longs[2] = htonl((key).key_longs[2]); \ - (key).key_longs[3] = htonl((key).key_longs[3]); \ +#define HTON_KEY(key) { \ + (key).key_longs[0] = htonl((key).key_longs[0]); \ + (key).key_longs[1] = htonl((key).key_longs[1]); \ + (key).key_longs[2] = htonl((key).key_longs[2]); \ + (key).key_longs[3] = htonl((key).key_longs[3]); \ } /* * Structure used to transmit or store a token or a key. */ typedef union { - key_t si_key; - key_t si_token; + key_t si_key; + key_t si_token; } secure_info_t, *secure_info_ptr_t; /* * Security Level of ports and messages. */ -#define PORT_NOT_SECURE 0 -#define MESSAGE_NOT_SECURE 0 +#define PORT_NOT_SECURE 0 +#define MESSAGE_NOT_SECURE 0 -#endif /* _KEY_DEFS_ */ +#endif /* _KEY_DEFS_ */ diff --git a/libsyscall/mach/servers/ls_defs.h b/libsyscall/mach/servers/ls_defs.h index 5415727fb..9b4733341 100644 --- a/libsyscall/mach/servers/ls_defs.h +++ b/libsyscall/mach/servers/ls_defs.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * Copyright (c) 1988 Carnegie-Mellon University @@ -39,150 +39,150 @@ */ -#ifndef _LS_DEFS_ -#define _LS_DEFS_ +#ifndef _LS_DEFS_ +#define _LS_DEFS_ #include /* * Definition for a log record. */ -typedef struct { - long code; - long thread; - long a1; - long a2; - long a3; - long a4; - long a5; - long a6; +typedef struct { + long code; + long thread; + long a1; + long a2; + long a3; + long a4; + long a5; + long a6; } log_rec_t; -typedef log_rec_t *log_ptr_t; +typedef log_rec_t *log_ptr_t; /* * Statistics record. */ -typedef struct { - int datagram_pkts_sent; - int datagram_pkts_rcvd; - int srr_requests_sent; - int srr_bcasts_sent; - int srr_requests_rcvd; - int srr_bcasts_rcvd; - int srr_replies_sent; - int srr_replies_rcvd; - int srr_retries_sent; - int srr_retries_rcvd; - int srr_cfailures_sent; - int srr_cfailures_rcvd; - int deltat_dpkts_sent; - int deltat_acks_rcvd; - int deltat_dpkts_rcvd; - int deltat_acks_sent; - int deltat_oldpkts_rcvd; - int deltat_oospkts_rcvd; - int deltat_retries_sent; - int deltat_retries_rcvd; - int deltat_cfailures_sent; - int deltat_cfailures_rcvd; - int deltat_aborts_sent; - int deltat_aborts_rcvd; - int vmtp_requests_sent; - int vmtp_requests_rcvd; - int vmtp_replies_sent; - int vmtp_replies_rcvd; - int ipc_in_messages; - int ipc_out_messages; - int ipc_unblocks_sent; - int ipc_unblocks_rcvd; - int pc_requests_sent; - int pc_requests_rcvd; - int pc_replies_rcvd; - int pc_startups_rcvd; - int nn_requests_sent; - int nn_requests_rcvd; - int nn_replies_rcvd; - int po_ro_hints_sent; - int po_ro_hints_rcvd; - int po_token_requests_sent; - int po_token_requests_rcvd; - int po_token_replies_rcvd; - int po_xfer_requests_sent; - int po_xfer_requests_rcvd; - int po_xfer_replies_rcvd; - int po_deaths_sent; - int po_deaths_rcvd; - int ps_requests_sent; - int ps_requests_rcvd; - int ps_replies_rcvd; - int ps_auth_requests_sent; - int ps_auth_requests_rcvd; - int ps_auth_replies_rcvd; - int mallocs_or_vm_allocates; - int mem_allocs; - int mem_deallocs; - int mem_allocobjs; - int mem_deallocobjs; - int pkts_encrypted; - int pkts_decrypted; - int vmtp_segs_encrypted; - int vmtp_segs_decrypted; - int tcp_requests_sent; - int tcp_replies_sent; - int tcp_requests_rcvd; - int tcp_replies_rcvd; - int tcp_send; - int tcp_recv; - int tcp_connect; - int tcp_accept; - int tcp_close; +typedef struct { + int datagram_pkts_sent; + int datagram_pkts_rcvd; + int srr_requests_sent; + int srr_bcasts_sent; + int srr_requests_rcvd; + int srr_bcasts_rcvd; + int srr_replies_sent; + int srr_replies_rcvd; + int srr_retries_sent; + int srr_retries_rcvd; + int srr_cfailures_sent; + int srr_cfailures_rcvd; + int deltat_dpkts_sent; + int deltat_acks_rcvd; + int deltat_dpkts_rcvd; + int deltat_acks_sent; + int deltat_oldpkts_rcvd; + int deltat_oospkts_rcvd; + int deltat_retries_sent; + int deltat_retries_rcvd; + int deltat_cfailures_sent; + int deltat_cfailures_rcvd; + int deltat_aborts_sent; + int deltat_aborts_rcvd; + int vmtp_requests_sent; + int vmtp_requests_rcvd; + int vmtp_replies_sent; + int vmtp_replies_rcvd; + int ipc_in_messages; + int ipc_out_messages; + int ipc_unblocks_sent; + int ipc_unblocks_rcvd; + int pc_requests_sent; + int pc_requests_rcvd; + int pc_replies_rcvd; + int pc_startups_rcvd; + int nn_requests_sent; + int nn_requests_rcvd; + int nn_replies_rcvd; + int po_ro_hints_sent; + int po_ro_hints_rcvd; + int po_token_requests_sent; + int po_token_requests_rcvd; + int po_token_replies_rcvd; + int po_xfer_requests_sent; + int po_xfer_requests_rcvd; + int po_xfer_replies_rcvd; + int po_deaths_sent; + int po_deaths_rcvd; + int ps_requests_sent; + int ps_requests_rcvd; + int ps_replies_rcvd; + int ps_auth_requests_sent; + int ps_auth_requests_rcvd; + int ps_auth_replies_rcvd; + int mallocs_or_vm_allocates; + int mem_allocs; + int mem_deallocs; + int mem_allocobjs; + int mem_deallocobjs; + int pkts_encrypted; + int pkts_decrypted; + int vmtp_segs_encrypted; + int vmtp_segs_decrypted; + int tcp_requests_sent; + int tcp_replies_sent; + int tcp_requests_rcvd; + int tcp_replies_rcvd; + int tcp_send; + int tcp_recv; + int tcp_connect; + int tcp_accept; + int tcp_close; } stat_t; -typedef stat_t *stat_ptr_t; +typedef stat_t *stat_ptr_t; /* * Debugging flags record. */ -typedef struct { - int print_level; - int ipc_in; - int ipc_out; - int tracing; - int vmtp; - int netname; - int deltat; - int tcp; - int mem; +typedef struct { + int print_level; + int ipc_in; + int ipc_out; + int tracing; + int vmtp; + int netname; + int deltat; + int tcp; + int mem; } debug_t; -typedef debug_t *debug_ptr_t; +typedef debug_t *debug_ptr_t; /* * Parameters record. */ typedef struct { - int srr_max_tries; - int srr_retry_sec; - int srr_retry_usec; - int deltat_max_tries; - int deltat_retry_sec; - int deltat_retry_usec; - int deltat_msg_life; - int pc_checkup_interval; - int crypt_algorithm; - int transport_default; - int conf_network; - int conf_netport; - int timer_quantum; - int tcp_conn_steady; - int tcp_conn_opening; - int tcp_conn_max; - int compat; - int syslog; - int old_nmmonitor; + int srr_max_tries; + int srr_retry_sec; + int srr_retry_usec; + int deltat_max_tries; + int deltat_retry_sec; + int deltat_retry_usec; + int deltat_msg_life; + int pc_checkup_interval; + int crypt_algorithm; + int transport_default; + int conf_network; + int conf_netport; + int timer_quantum; + int tcp_conn_steady; + int tcp_conn_opening; + int tcp_conn_max; + int compat; + int syslog; + int old_nmmonitor; } param_t; typedef param_t *param_ptr_t; @@ -192,29 +192,29 @@ typedef param_t *param_ptr_t; * Port statistics record. */ typedef struct { - u_int port_id; - u_int alive; - u_int nport_id_high; - u_int nport_id_low; - u_int nport_receiver; - u_int nport_owner; - u_int messages_sent; - u_int messages_rcvd; - u_int send_rights_sent; - u_int send_rights_rcvd_sender; - u_int send_rights_rcvd_recown; - u_int rcv_rights_xferd; - u_int own_rights_xferd; - u_int all_rights_xferd; - u_int tokens_sent; - u_int tokens_requested; - u_int xfer_hints_sent; - u_int xfer_hints_rcvd; + u_int port_id; + u_int alive; + u_int nport_id_high; + u_int nport_id_low; + u_int nport_receiver; + u_int nport_owner; + u_int messages_sent; + u_int messages_rcvd; + u_int send_rights_sent; + u_int send_rights_rcvd_sender; + u_int send_rights_rcvd_recown; + u_int rcv_rights_xferd; + u_int own_rights_xferd; + u_int all_rights_xferd; + u_int tokens_sent; + u_int tokens_requested; + u_int xfer_hints_sent; + u_int xfer_hints_rcvd; } port_stat_t, *port_stat_ptr_t; -extern port_stat_ptr_t port_stat_cur; -extern port_stat_ptr_t port_stat_end; -extern struct mutex port_stat_lock; +extern port_stat_ptr_t port_stat_cur; +extern port_stat_ptr_t port_stat_end; +extern struct mutex port_stat_lock; /* @@ -223,16 +223,16 @@ extern struct mutex port_stat_lock; * XXX These must be faked, because we cannot include mem.h here * (mutual includes). */ -typedef char *mem_class_ptr_t; -typedef char *mem_nam_ptr_t; -typedef int *mem_bucket_ptr_t; +typedef char *mem_class_ptr_t; +typedef char *mem_nam_ptr_t; +typedef int *mem_bucket_ptr_t; + - /* * Definitions for print_level. */ -#define LS_PRINT_NEVER 5 -#define LS_PRINT_LOG 3 -#define LS_PRINT_ALWAYS 0 +#define LS_PRINT_NEVER 5 +#define LS_PRINT_LOG 3 +#define LS_PRINT_ALWAYS 0 -#endif /* _LS_DEFS_ */ +#endif /* _LS_DEFS_ */ diff --git a/libsyscall/mach/servers/netname_defs.h b/libsyscall/mach/servers/netname_defs.h index 0c39ad41d..e5ac69e4b 100644 --- a/libsyscall/mach/servers/netname_defs.h +++ b/libsyscall/mach/servers/netname_defs.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -53,18 +53,18 @@ * */ -#ifndef _NETNAME_DEFS_ -#define _NETNAME_DEFS_ +#ifndef _NETNAME_DEFS_ +#define _NETNAME_DEFS_ -#define NETNAME_SUCCESS (0) -#define NETNAME_PENDING (-1) -#define NETNAME_NOT_YOURS (1000) -#define NAME_NOT_YOURS (1000) -#define NETNAME_NOT_CHECKED_IN (1001) -#define NAME_NOT_CHECKED_IN (1001) -#define NETNAME_NO_SUCH_HOST (1002) -#define NETNAME_HOST_NOT_FOUND (1003) -#define NETNAME_INVALID_PORT (1004) +#define NETNAME_SUCCESS (0) +#define NETNAME_PENDING (-1) +#define NETNAME_NOT_YOURS (1000) +#define NAME_NOT_YOURS (1000) +#define NETNAME_NOT_CHECKED_IN (1001) +#define NAME_NOT_CHECKED_IN (1001) +#define NETNAME_NO_SUCH_HOST (1002) +#define NETNAME_HOST_NOT_FOUND (1003) +#define NETNAME_INVALID_PORT (1004) typedef char netname_name_t[80]; diff --git a/libsyscall/mach/servers/nm_defs.h b/libsyscall/mach/servers/nm_defs.h index 80baa6ab2..8373a39c3 100644 --- a/libsyscall/mach/servers/nm_defs.h +++ b/libsyscall/mach/servers/nm_defs.h @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1987 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies @@ -57,28 +57,27 @@ * */ -#ifndef _NM_DEFS_ -#define _NM_DEFS_ +#ifndef _NM_DEFS_ +#define _NM_DEFS_ /* * netaddr_t is declared with the kernel files, * in . */ -#include +#include -#ifdef notdef -typedef unsigned long netaddr_t; -#endif /* notdef */ +#ifdef notdef +typedef unsigned long netaddr_t; +#endif /* notdef */ typedef union { - struct { - unsigned char ia_net_owner; - unsigned char ia_net_node_type; - unsigned char ia_host_high; - unsigned char ia_host_low; - } ia_bytes; - netaddr_t ia_netaddr; + struct { + unsigned char ia_net_owner; + unsigned char ia_net_node_type; + unsigned char ia_host_high; + unsigned char ia_host_low; + } ia_bytes; + netaddr_t ia_netaddr; } ip_addr_t; -#endif /* _NM_DEFS_ */ - +#endif /* _NM_DEFS_ */ diff --git a/libsyscall/mach/slot_name.c b/libsyscall/mach/slot_name.c index 180b97b45..fefeb9d50 100644 --- a/libsyscall/mach/slot_name.c +++ b/libsyscall/mach/slot_name.c @@ -2,7 +2,7 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -47,27 +47,31 @@ #include kern_return_t -msg_rpc(void) { +msg_rpc(void) +{ return KERN_FAILURE; } kern_return_t -msg_send(void) { +msg_send(void) +{ return KERN_FAILURE; } kern_return_t -msg_receive(void) { +msg_receive(void) +{ return KERN_FAILURE; } mach_port_t -task_self_(void) { +task_self_(void) +{ return mach_task_self(); } mach_port_t -host_self(void) { +host_self(void) +{ return mach_host_self(); } - diff --git a/libsyscall/mach/stack_logging_internal.h b/libsyscall/mach/stack_logging_internal.h index fdda28e0b..5dd5b7efe 100644 --- a/libsyscall/mach/stack_logging_internal.h +++ b/libsyscall/mach/stack_logging_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ // These declarations must match those in Libc's stack_logging.h -#include // to get VM_FLAGS_ALIAS_MASK +#include // to get VM_FLAGS_ALIAS_MASK #define stack_logging_type_vm_allocate 16 // mach_vm_allocate, mmap, mach_vm_map, mach_vm_remap, etc #define stack_logging_type_vm_deallocate 32 // mach_vm_deallocate or munmap diff --git a/libsyscall/mach/string.c b/libsyscall/mach/string.c index 8dca9927c..fcb9e6fd2 100644 --- a/libsyscall/mach/string.c +++ b/libsyscall/mach/string.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,14 +48,14 @@ _mach_hex(char **buffer, int *length, unsigned long long n) { char buf[32]; char *cp = buf + sizeof(buf); - + if (n) { *--cp = '\0'; while (n) { *--cp = hex[n & 0xf]; n >>= 4; } - + int width = _mach_strlen(cp); while (width > 0 && length > 0) { *(*buffer)++ = *cp++; @@ -70,7 +70,7 @@ _mach_vsnprintf(char *buffer, int length, const char *fmt, va_list ap) { int width, max = length; char *out_ptr = buffer; - + // we only ever write n-1 bytes so we can put a \0 at the end length--; while (length > 0 && *fmt) { @@ -86,26 +86,27 @@ _mach_vsnprintf(char *buffer, int length, const char *fmt, va_list ap) // only going to support a specific subset of sprintf flags // namely %s, %x, with no padding modifiers switch (*fmt++) { - case 's': - { - char *cp = va_arg(ap, char*); - width = _mach_strlen(cp); - while (width > 0 && length > 0) { - *(out_ptr++) = *(cp++); - width--; - length--; - } - break; - } - case 'x': - { - _mach_hex(&out_ptr, &length, va_arg(ap, unsigned int)); - break; + case 's': + { + char *cp = va_arg(ap, char*); + width = _mach_strlen(cp); + while (width > 0 && length > 0) { + *(out_ptr++) = *(cp++); + width--; + length--; } + break; + } + case 'x': + { + _mach_hex(&out_ptr, &length, va_arg(ap, unsigned int)); + break; + } } } - if (max > 0) + if (max > 0) { *out_ptr = '\0'; + } return max - (length + 1); /* don't include the final NULL in the return value */ } diff --git a/libsyscall/mach/string.h b/libsyscall/mach/string.h index 7d668126a..833c6af05 100644 --- a/libsyscall/mach/string.h +++ b/libsyscall/mach/string.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/os/alloc_once.c b/libsyscall/os/alloc_once.c index 13632b5b9..a2bc41e52 100644 --- a/libsyscall/os/alloc_once.c +++ b/libsyscall/os/alloc_once.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ // Keep in sync with libplatform alloc_once.c -#define OS_ALLOC_ONCE_KEY_MAX 100 +#define OS_ALLOC_ONCE_KEY_MAX 100 struct _os_alloc_once_s { long once; diff --git a/libsyscall/os/thread_self_restrict.h b/libsyscall/os/thread_self_restrict.h index 153f516a0..226bbe4df 100644 --- a/libsyscall/os/thread_self_restrict.h +++ b/libsyscall/os/thread_self_restrict.h @@ -29,4 +29,3 @@ #ifndef OS_THREAD_SELF_RESTRICT_H #define OS_THREAD_SELF_RESTRICT_H #endif /* OS_THREAD_SELF_RESTRICT_H */ - diff --git a/libsyscall/wrappers/__commpage_gettimeofday.c b/libsyscall/wrappers/__commpage_gettimeofday.c index e1e7e001d..a763dfae4 100644 --- a/libsyscall/wrappers/__commpage_gettimeofday.c +++ b/libsyscall/wrappers/__commpage_gettimeofday.c @@ -2,14 +2,14 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -42,7 +42,7 @@ int __commpage_gettimeofday_internal(struct timeval *tp, uint64_t *tbr_out) { uint64_t now, over; - uint64_t delta,frac; + uint64_t delta, frac; uint64_t TimeStamp_tick; uint64_t TimeStamp_sec; uint64_t TimeStamp_frac; @@ -81,36 +81,39 @@ __commpage_gettimeofday_internal(struct timeval *tp, uint64_t *tbr_out) * This barrier prevents the reordering of the second read of gtod_TimeStamp_tick_p * w.r.t the values read just after mach_absolute_time is invoked. */ -#if (__ARM_ARCH__ >= 7) - __asm__ volatile("dmb ishld" ::: "memory"); +#if (__ARM_ARCH__ >= 7) + __asm__ volatile ("dmb ishld" ::: "memory"); #endif } while (TimeStamp_tick != *gtod_TimeStamp_tick_p); - if (TimeStamp_tick == 0) - return(1); + if (TimeStamp_tick == 0) { + return 1; + } delta = now - TimeStamp_tick; /* If more than one second force a syscall */ - if (delta >= Ticks_per_sec) - return(1); + if (delta >= Ticks_per_sec) { + return 1; + } if (TimeStamp_sec > __LONG_MAX__) { - return(1); + return 1; } tp->tv_sec = (__darwin_time_t)TimeStamp_sec; over = multi_overflow(Tick_scale, delta); - if(over){ + if (over) { tp->tv_sec += over; } /* Sum scale*delta to TimeStamp_frac, if it overflows increment sec */ frac = TimeStamp_frac; frac += Tick_scale * delta; - if( TimeStamp_frac > frac ) + if (TimeStamp_frac > frac) { tp->tv_sec++; + } /* * Convert frac (64 bit frac of a sec) to usec @@ -122,5 +125,5 @@ __commpage_gettimeofday_internal(struct timeval *tp, uint64_t *tbr_out) *tbr_out = now; } - return(0); + return 0; } diff --git a/libsyscall/wrappers/_errno.h b/libsyscall/wrappers/_errno.h index 792e927c3..015f235ab 100644 --- a/libsyscall/wrappers/_errno.h +++ b/libsyscall/wrappers/_errno.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/_libc_funcptr.c b/libsyscall/wrappers/_libc_funcptr.c index 9c65ef3ff..30a31c68c 100644 --- a/libsyscall/wrappers/_libc_funcptr.c +++ b/libsyscall/wrappers/_libc_funcptr.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -58,9 +58,10 @@ void * reallocf(void *ptr, size_t size) { void *nptr = realloc(ptr, size); - if (!nptr && ptr) + if (!nptr && ptr) { free(ptr); - return (nptr); + } + return nptr; } __attribute__((visibility("hidden"))) @@ -72,14 +73,16 @@ _pthread_exit_if_canceled(int error) __attribute__((visibility("hidden"))) void -_pthread_set_self(void *ptr __attribute__((__unused__))) {} +_pthread_set_self(void *ptr __attribute__((__unused__))) +{ +} __attribute__((visibility("hidden"))) void _pthread_clear_qos_tsd(mach_port_t thread_port) { if (_libkernel_functions->version >= 3 && - _libkernel_functions->pthread_clear_qos_tsd) { + _libkernel_functions->pthread_clear_qos_tsd) { return _libkernel_functions->pthread_clear_qos_tsd(thread_port); } } @@ -89,7 +92,7 @@ _pthread_clear_qos_tsd(mach_port_t thread_port) */ static const struct _libkernel_string_functions - _libkernel_generic_string_functions = { + _libkernel_generic_string_functions = { .bzero = _libkernel_bzero, .memmove = _libkernel_memmove, .memset = _libkernel_memset, @@ -100,7 +103,7 @@ static const struct _libkernel_string_functions .strlen = _libkernel_strlen, }; static _libkernel_string_functions_t _libkernel_string_functions = - &_libkernel_generic_string_functions; + &_libkernel_generic_string_functions; kern_return_t __libkernel_platform_init(_libkernel_string_functions_t fns) @@ -240,9 +243,9 @@ strstr(const char *s, const char *find) */ static const struct _libkernel_voucher_functions - _libkernel_voucher_functions_empty; + _libkernel_voucher_functions_empty; static _libkernel_voucher_functions_t _libkernel_voucher_functions = - &_libkernel_voucher_functions_empty; + &_libkernel_voucher_functions_empty; kern_return_t __libkernel_voucher_init(_libkernel_voucher_functions_t fns) @@ -284,4 +287,3 @@ voucher_mach_msg_revert(voucher_mach_msg_state_t state) return _libkernel_voucher_functions->voucher_mach_msg_revert(state); } } - diff --git a/libsyscall/wrappers/_libkernel_init.c b/libsyscall/wrappers/_libkernel_init.c index 31e6cb47a..127d65efd 100644 --- a/libsyscall/wrappers/_libkernel_init.c +++ b/libsyscall/wrappers/_libkernel_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -49,9 +49,9 @@ _libkernel_functions_t _libkernel_functions; void __libkernel_init(_libkernel_functions_t fns, - const char *envp[] __attribute__((unused)), - const char *apple[] __attribute__((unused)), - const struct ProgramVars *vars __attribute__((unused))) + const char *envp[] __attribute__((unused)), + const char *apple[] __attribute__((unused)), + const struct ProgramVars *vars __attribute__((unused))) { _libkernel_functions = fns; if (fns->dlsym) { diff --git a/libsyscall/wrappers/_libkernel_init.h b/libsyscall/wrappers/_libkernel_init.h index b081ebc90..514afef02 100644 --- a/libsyscall/wrappers/_libkernel_init.h +++ b/libsyscall/wrappers/_libkernel_init.h @@ -2,7 +2,7 @@ * Copyright (c) 2010-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -103,7 +103,7 @@ typedef const struct _libkernel_voucher_functions { struct ProgramVars; /* forward reference */ void __libkernel_init(_libkernel_functions_t fns, const char *envp[], - const char *apple[], const struct ProgramVars *vars); + const char *apple[], const struct ProgramVars *vars); kern_return_t __libkernel_platform_init(_libkernel_string_functions_t fns); diff --git a/libsyscall/wrappers/cancelable/fcntl-base.c b/libsyscall/wrappers/cancelable/fcntl-base.c index fc98ea7ae..0b295ba10 100644 --- a/libsyscall/wrappers/cancelable/fcntl-base.c +++ b/libsyscall/wrappers/cancelable/fcntl-base.c @@ -2,14 +2,14 @@ * Copyright (c) 2004, 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -32,47 +32,47 @@ int __FCNTL(int, int, void *); int fcntl(int fd, int cmd, ...) { - va_list ap; + va_list ap; void *arg; va_start(ap, cmd); - switch(cmd) { - case F_GETLK: - case F_GETLKPID: - case F_SETLK: - case F_SETLKW: - case F_SETLKWTIMEOUT: + switch (cmd) { + case F_GETLK: + case F_GETLKPID: + case F_SETLK: + case F_SETLKW: + case F_SETLKWTIMEOUT: case F_OFD_GETLK: case F_OFD_GETLKPID: case F_OFD_SETLK: case F_OFD_SETLKW: case F_OFD_SETLKWTIMEOUT: - case F_PREALLOCATE: - case F_PUNCHHOLE: - case F_SETSIZE: - case F_RDADVISE: - case F_LOG2PHYS: - case F_LOG2PHYS_EXT: - case F_GETPATH: - case F_GETPATH_MTMINFO: - case F_GETCODEDIR: - case F_PATHPKG_CHECK: - case F_OPENFROM: - case F_UNLINKFROM: - case F_ADDSIGS: - case F_ADDFILESIGS: - case F_ADDFILESIGS_FOR_DYLD_SIM: - case F_ADDFILESIGS_RETURN: - case F_FINDSIGS: - case F_TRANSCODEKEY: - case F_TRIM_ACTIVE_FILE: - case F_CHECK_LV: + case F_PREALLOCATE: + case F_PUNCHHOLE: + case F_SETSIZE: + case F_RDADVISE: + case F_LOG2PHYS: + case F_LOG2PHYS_EXT: + case F_GETPATH: + case F_GETPATH_MTMINFO: + case F_GETCODEDIR: + case F_PATHPKG_CHECK: + case F_OPENFROM: + case F_UNLINKFROM: + case F_ADDSIGS: + case F_ADDFILESIGS: + case F_ADDFILESIGS_FOR_DYLD_SIM: + case F_ADDFILESIGS_RETURN: + case F_FINDSIGS: + case F_TRANSCODEKEY: + case F_TRIM_ACTIVE_FILE: + case F_CHECK_LV: arg = va_arg(ap, void *); break; - default: + default: arg = (void *)((unsigned long)va_arg(ap, int)); break; } va_end(ap); - return (__FCNTL(fd, cmd, arg)); + return __FCNTL(fd, cmd, arg); } diff --git a/libsyscall/wrappers/cancelable/fcntl-cancel.c b/libsyscall/wrappers/cancelable/fcntl-cancel.c index 3354657ed..5724fb279 100644 --- a/libsyscall/wrappers/cancelable/fcntl-cancel.c +++ b/libsyscall/wrappers/cancelable/fcntl-cancel.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,14 +17,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #if !defined(__i386__) #include -#define __FCNTL __fcntl +#define __FCNTL __fcntl #include "fcntl-base.c" diff --git a/libsyscall/wrappers/cancelable/fcntl.c b/libsyscall/wrappers/cancelable/fcntl.c index 830a79f5d..7afeed57f 100644 --- a/libsyscall/wrappers/cancelable/fcntl.c +++ b/libsyscall/wrappers/cancelable/fcntl.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -27,7 +27,7 @@ #define __DARWIN_NON_CANCELABLE 1 #include -#define __FCNTL __fcntl_nocancel +#define __FCNTL __fcntl_nocancel #include "fcntl-base.c" diff --git a/libsyscall/wrappers/cancelable/pselect-darwinext-cancel.c b/libsyscall/wrappers/cancelable/pselect-darwinext-cancel.c index 54ea91375..9593246cb 100644 --- a/libsyscall/wrappers/cancelable/pselect-darwinext-cancel.c +++ b/libsyscall/wrappers/cancelable/pselect-darwinext-cancel.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/cancelable/pselect-darwinext.c b/libsyscall/wrappers/cancelable/pselect-darwinext.c index 4bfb1b756..b0c47da06 100644 --- a/libsyscall/wrappers/cancelable/pselect-darwinext.c +++ b/libsyscall/wrappers/cancelable/pselect-darwinext.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/cancelable/select-cancel.c b/libsyscall/wrappers/cancelable/select-cancel.c index dba3fc291..dc9e8b047 100644 --- a/libsyscall/wrappers/cancelable/select-cancel.c +++ b/libsyscall/wrappers/cancelable/select-cancel.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/cancelable/select.c b/libsyscall/wrappers/cancelable/select.c index af06d655f..86d4916a3 100644 --- a/libsyscall/wrappers/cancelable/select.c +++ b/libsyscall/wrappers/cancelable/select.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/cancelable/sigsuspend-cancel.c b/libsyscall/wrappers/cancelable/sigsuspend-cancel.c index a7e7a320d..fd9a457d4 100644 --- a/libsyscall/wrappers/cancelable/sigsuspend-cancel.c +++ b/libsyscall/wrappers/cancelable/sigsuspend-cancel.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,10 +17,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ -#define __SIGSUSPEND __sigsuspend +#define __SIGSUSPEND __sigsuspend #include "../sigsuspend-base.c" diff --git a/libsyscall/wrappers/cancelable/sigsuspend.c b/libsyscall/wrappers/cancelable/sigsuspend.c index 2b1e2d877..2fed16f9f 100644 --- a/libsyscall/wrappers/cancelable/sigsuspend.c +++ b/libsyscall/wrappers/cancelable/sigsuspend.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,13 +17,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #undef __DARWIN_NON_CANCELABLE #define __DARWIN_NON_CANCELABLE 1 -#define __SIGSUSPEND __sigsuspend_nocancel +#define __SIGSUSPEND __sigsuspend_nocancel #include "../sigsuspend-base.c" diff --git a/libsyscall/wrappers/carbon_delete.c b/libsyscall/wrappers/carbon_delete.c index 8c2e49d68..fb94f6b5f 100644 --- a/libsyscall/wrappers/carbon_delete.c +++ b/libsyscall/wrappers/carbon_delete.c @@ -1,16 +1,15 @@ - /* * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -18,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -29,6 +28,8 @@ int __carbon_delete(const char *path) { int res = __delete(path); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/clonefile.c b/libsyscall/wrappers/clonefile.c index 33b6beabd..809f85be9 100644 --- a/libsyscall/wrappers/clonefile.c +++ b/libsyscall/wrappers/clonefile.c @@ -27,5 +27,5 @@ int clonefile(const char *old, const char *new, uint32_t flags) { - return (clonefileat(AT_FDCWD, old, AT_FDCWD, new, flags)); + return clonefileat(AT_FDCWD, old, AT_FDCWD, new, flags); } diff --git a/libsyscall/wrappers/coalition.c b/libsyscall/wrappers/coalition.c index ecc36b1ec..33da1103d 100644 --- a/libsyscall/wrappers/coalition.c +++ b/libsyscall/wrappers/coalition.c @@ -2,14 +2,14 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -31,33 +31,39 @@ int __coalition(uint32_t operation, uint64_t *cid, uint32_t flags); int __coalition_info(uint32_t operation, uint64_t *cid, void *buffer, size_t *bufsize); -int coalition_create(uint64_t *cid_out, uint32_t flags) +int +coalition_create(uint64_t *cid_out, uint32_t flags) { return __coalition(COALITION_OP_CREATE, cid_out, flags); } -int coalition_terminate(uint64_t cid, uint32_t flags) +int +coalition_terminate(uint64_t cid, uint32_t flags) { return __coalition(COALITION_OP_TERMINATE, &cid, flags); } -int coalition_reap(uint64_t cid, uint32_t flags) +int +coalition_reap(uint64_t cid, uint32_t flags) { return __coalition(COALITION_OP_REAP, &cid, flags); } -int coalition_info_resource_usage(uint64_t cid, struct coalition_resource_usage *cru, size_t sz) +int +coalition_info_resource_usage(uint64_t cid, struct coalition_resource_usage *cru, size_t sz) { return __coalition_info(COALITION_INFO_RESOURCE_USAGE, &cid, cru, &sz); } -int coalition_info_set_name(uint64_t cid, const char *name, size_t size) +int +coalition_info_set_name(uint64_t cid, const char *name, size_t size) { return __coalition_info(COALITION_INFO_SET_NAME, &cid, (void *)name, &size); } -int coalition_info_set_efficiency(uint64_t cid, uint64_t flags) +int +coalition_info_set_efficiency(uint64_t cid, uint64_t flags) { - size_t size = sizeof(flags); - return __coalition_info(COALITION_INFO_SET_EFFICIENCY, &cid, (void *)&flags, &size); + size_t size = sizeof(flags); + return __coalition_info(COALITION_INFO_SET_EFFICIENCY, &cid, (void *)&flags, &size); } diff --git a/libsyscall/wrappers/csr.c b/libsyscall/wrappers/csr.c index c5944c507..f65407918 100644 --- a/libsyscall/wrappers/csr.c +++ b/libsyscall/wrappers/csr.c @@ -2,14 +2,14 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -27,12 +27,14 @@ /* Syscall entry point */ int __csrctl(csr_op_t op, void *buffer, size_t size); -int csr_check(csr_config_t mask) +int +csr_check(csr_config_t mask) { return __csrctl(CSR_SYSCALL_CHECK, &mask, sizeof(csr_config_t)); } -int csr_get_active_config(csr_config_t *config) +int +csr_get_active_config(csr_config_t *config) { return __csrctl(CSR_SYSCALL_GET_ACTIVE_CONFIG, config, sizeof(csr_config_t)); } diff --git a/libsyscall/wrappers/fs_snapshot.c b/libsyscall/wrappers/fs_snapshot.c index 944aaf513..88b567203 100644 --- a/libsyscall/wrappers/fs_snapshot.c +++ b/libsyscall/wrappers/fs_snapshot.c @@ -43,11 +43,11 @@ fs_snapshot_list(int dirfd, struct attrlist *alist, void *attrbuf, size_t bufsiz { if (flags != 0) { errno = EINVAL; - return (-1); + return -1; } - return (getattrlistbulk(dirfd, alist, attrbuf, bufsize, - FSOPT_LIST_SNAPSHOT)); + return getattrlistbulk(dirfd, alist, attrbuf, bufsize, + FSOPT_LIST_SNAPSHOT); } int @@ -65,19 +65,19 @@ fs_snapshot_rename(int dirfd, const char *old, const char *new, uint32_t flags) int fs_snapshot_revert(int dirfd, const char *name, uint32_t flags) { - return __fs_snapshot(SNAPSHOT_OP_REVERT, dirfd, name, NULL, NULL, flags); + return __fs_snapshot(SNAPSHOT_OP_REVERT, dirfd, name, NULL, NULL, flags); } int fs_snapshot_root(int dirfd, const char *name, uint32_t flags) { - return __fs_snapshot(SNAPSHOT_OP_ROOT, dirfd, name, NULL, NULL, flags); + return __fs_snapshot(SNAPSHOT_OP_ROOT, dirfd, name, NULL, NULL, flags); } int fs_snapshot_mount(int dirfd, const char *dir, const char *snapshot, uint32_t flags) { - return (__fs_snapshot(SNAPSHOT_OP_MOUNT, dirfd, snapshot, dir, - NULL, flags)); + return __fs_snapshot(SNAPSHOT_OP_MOUNT, dirfd, snapshot, dir, + NULL, flags); } diff --git a/libsyscall/wrappers/gethostuuid.c b/libsyscall/wrappers/gethostuuid.c index 691c4fa86..daa0ea30a 100644 --- a/libsyscall/wrappers/gethostuuid.c +++ b/libsyscall/wrappers/gethostuuid.c @@ -37,7 +37,7 @@ int gethostuuid(uuid_t uuid, const struct timespec *timeout) { int result; - + result = __gethostuuid(uuid, timeout, 0); if ((result == -1) && (errno == EPERM)) { if (_gethostuuid_callback) { @@ -47,7 +47,7 @@ gethostuuid(uuid_t uuid, const struct timespec *timeout) memset(uuid, 0x00, sizeof(*uuid)); } } - + return result; } @@ -61,7 +61,6 @@ _getprivatesystemidentifier(uuid_t uuid, const struct timespec *timeout) int _register_gethostuuid_callback(int (*new_callback)(uuid_t)) { - if (__sync_bool_compare_and_swap((void **)&_gethostuuid_callback, (void *)0, (void *)new_callback)) { return 0; } else { diff --git a/libsyscall/wrappers/getiopolicy_np.c b/libsyscall/wrappers/getiopolicy_np.c index 335717f2b..db097ad44 100644 --- a/libsyscall/wrappers/getiopolicy_np.c +++ b/libsyscall/wrappers/getiopolicy_np.c @@ -2,14 +2,14 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -34,7 +34,7 @@ getiopolicy_np(int iotype, int scope) struct _iopol_param_t iop_param; if ((iotype != IOPOL_TYPE_DISK && iotype != IOPOL_TYPE_VFS_ATIME_UPDATES) || - (scope != IOPOL_SCOPE_PROCESS && scope != IOPOL_SCOPE_THREAD)) { + (scope != IOPOL_SCOPE_PROCESS && scope != IOPOL_SCOPE_THREAD)) { errno = EINVAL; policy = -1; goto exit; @@ -51,7 +51,7 @@ getiopolicy_np(int iotype, int scope) policy = iop_param.iop_policy; - exit: +exit: return policy; } @@ -68,7 +68,7 @@ setiopolicy_np(int iotype, int scope, int policy) int rv = __iopolicysys(IOPOL_CMD_SET, &iop_param); if (rv == -2) { /* not an actual error but indication that __iopolicysys removed the thread QoS */ - _pthread_clear_qos_tsd(MACH_PORT_NULL); + _pthread_clear_qos_tsd(MACH_PORT_NULL); rv = 0; } diff --git a/libsyscall/wrappers/guarded_open_dprotected_np.c b/libsyscall/wrappers/guarded_open_dprotected_np.c index 152fd147d..fab15c364 100644 --- a/libsyscall/wrappers/guarded_open_dprotected_np.c +++ b/libsyscall/wrappers/guarded_open_dprotected_np.c @@ -2,14 +2,14 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -35,11 +35,11 @@ guarded_open_dprotected_np(const char *path, { int mode = 0; - if (flags & O_CREAT) { + if (flags & O_CREAT) { va_list ap; va_start(ap, dpflags); mode = va_arg(ap, int); va_end(ap); } - return (__guarded_open_dprotected_np(path, guard, guardflags, flags, dpclass, dpflags, mode)); + return __guarded_open_dprotected_np(path, guard, guardflags, flags, dpclass, dpflags, mode); } diff --git a/libsyscall/wrappers/guarded_open_np.c b/libsyscall/wrappers/guarded_open_np.c index 1322e0598..bd5c6a184 100644 --- a/libsyscall/wrappers/guarded_open_np.c +++ b/libsyscall/wrappers/guarded_open_np.c @@ -2,14 +2,14 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -35,11 +35,11 @@ guarded_open_np(const char *path, { int mode = 0; - if (flags & O_CREAT) { + if (flags & O_CREAT) { va_list ap; va_start(ap, flags); mode = va_arg(ap, int); va_end(ap); } - return (__guarded_open_np(path, guard, guardflags, flags, mode)); + return __guarded_open_np(path, guard, guardflags, flags, mode); } diff --git a/libsyscall/wrappers/init_cpu_capabilities.c b/libsyscall/wrappers/init_cpu_capabilities.c index 70271d320..3feb3209d 100644 --- a/libsyscall/wrappers/init_cpu_capabilities.c +++ b/libsyscall/wrappers/init_cpu_capabilities.c @@ -2,14 +2,14 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,13 +17,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ -#define __APPLE_API_PRIVATE +#define __APPLE_API_PRIVATE #include -#undef __APPLE_API_PRIVATE +#undef __APPLE_API_PRIVATE #if defined(__i386__) || defined(__x86_64__) @@ -43,7 +43,7 @@ _init_cpu_capabilities( void ) extern int _get_cpu_capabilities(void); int _cpu_capabilities = 0; -int _cpu_has_altivec = 0; // DEPRECATED: use _cpu_capabilities instead +int _cpu_has_altivec = 0; // DEPRECATED: use _cpu_capabilities instead void _init_cpu_capabilities( void ) diff --git a/libsyscall/wrappers/ioctl.c b/libsyscall/wrappers/ioctl.c index a0f12a27e..71fa3e21b 100644 --- a/libsyscall/wrappers/ioctl.c +++ b/libsyscall/wrappers/ioctl.c @@ -2,14 +2,14 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -33,13 +33,13 @@ int __ioctl(int, unsigned long, void *); int ioctl(int d, unsigned long request, ...) { - va_list ap; + va_list ap; void *arg; va_start(ap, request); arg = va_arg(ap, void *); va_end(ap); - return (__ioctl(d, request, arg)); + return __ioctl(d, request, arg); } #endif diff --git a/libsyscall/wrappers/kdebug_trace.c b/libsyscall/wrappers/kdebug_trace.c index 8234f4582..e42794a49 100644 --- a/libsyscall/wrappers/kdebug_trace.c +++ b/libsyscall/wrappers/kdebug_trace.c @@ -36,12 +36,12 @@ extern int __kdebug_typefilter(void** addr, size_t* size); extern int __kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, - uint64_t arg3, uint64_t arg4); + uint64_t arg3, uint64_t arg4); extern uint64_t __kdebug_trace_string(uint32_t debugid, uint64_t str_id, - const char *str); + const char *str); static int kdebug_signpost_internal(uint32_t debugid, uintptr_t arg1, - uintptr_t arg2, uintptr_t arg3, uintptr_t arg4); + uintptr_t arg2, uintptr_t arg3, uintptr_t arg4); /* * GENERAL API DESIGN NOTE! @@ -95,7 +95,7 @@ kdebug_is_enabled(uint32_t debugid) * Typefilter rules... * * If no typefilter is available (even if due to error), - * debugids are allowed. + * debugids are allowed. * * The typefilter will always allow DBG_TRACE; this is a kernel * invariant. There is no need for an explicit check here. @@ -118,7 +118,7 @@ kdebug_is_enabled(uint32_t debugid) int kdebug_trace(uint32_t debugid, uint64_t arg1, uint64_t arg2, uint64_t arg3, - uint64_t arg4) + uint64_t arg4) { if (!kdebug_is_enabled(debugid)) { return 0; diff --git a/libsyscall/wrappers/kill.c b/libsyscall/wrappers/kill.c index 74e3ca2fe..55bd7d3c8 100644 --- a/libsyscall/wrappers/kill.c +++ b/libsyscall/wrappers/kill.c @@ -2,14 +2,14 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -36,8 +36,8 @@ int kill(pid_t pid, int sig) { #if __DARWIN_UNIX03 - return(__kill(pid, sig, 1)); -#else /* !__DARWIN_UNIX03 */ - return(__kill(pid, sig, 0)); -#endif /* !__DARWIN_UNIX03 */ + return __kill(pid, sig, 1); +#else /* !__DARWIN_UNIX03 */ + return __kill(pid, sig, 0); +#endif /* !__DARWIN_UNIX03 */ } diff --git a/libsyscall/wrappers/legacy/accept.c b/libsyscall/wrappers/legacy/accept.c index 216b76685..b7e9d62d3 100644 --- a/libsyscall/wrappers/legacy/accept.c +++ b/libsyscall/wrappers/legacy/accept.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,8 +48,9 @@ accept(int s, struct sockaddr *addr, socklen_t *addrlen) int ret = __accept_nocancel(s, addr, addrlen); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/bind.c b/libsyscall/wrappers/legacy/bind.c index f30281d61..ecf7a7e1f 100644 --- a/libsyscall/wrappers/legacy/bind.c +++ b/libsyscall/wrappers/legacy/bind.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,8 +48,9 @@ bind(int s, const struct sockaddr *name, socklen_t namelen) int ret = __bind(s, name, namelen); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/connect.c b/libsyscall/wrappers/legacy/connect.c index 39910566b..5941174c0 100644 --- a/libsyscall/wrappers/legacy/connect.c +++ b/libsyscall/wrappers/legacy/connect.c @@ -2,14 +2,14 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,8 +48,9 @@ connect(int s, const struct sockaddr *name, socklen_t namelen) int ret = __connect_nocancel(s, name, namelen); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/getattrlist.c b/libsyscall/wrappers/legacy/getattrlist.c index a0444a3da..4d2737862 100644 --- a/libsyscall/wrappers/legacy/getattrlist.c +++ b/libsyscall/wrappers/legacy/getattrlist.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,17 +48,18 @@ extern int __getattrlist(const char *, void *, void *, size_t, unsigned long); int #ifdef __LP64__ getattrlist(const char *path, void *attrList, void *attrBuf, - size_t attrBufSize, unsigned int options) + size_t attrBufSize, unsigned int options) #else /* !__LP64__ */ getattrlist(const char *path, void *attrList, void *attrBuf, - size_t attrBufSize, unsigned long options) + size_t attrBufSize, unsigned long options) #endif /* __LP64__ */ { int ret = __getattrlist(path, attrList, attrBuf, attrBufSize, options); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/getaudit.c b/libsyscall/wrappers/legacy/getaudit.c index 087053212..e3c8ee8d3 100644 --- a/libsyscall/wrappers/legacy/getaudit.c +++ b/libsyscall/wrappers/legacy/getaudit.c @@ -33,8 +33,9 @@ getaudit(struct auditinfo *ainfo) int err; auditinfo_addr_t aia; - if ((err = getaudit_addr(&aia, sizeof(aia))) != 0) - return (err); + if ((err = getaudit_addr(&aia, sizeof(aia))) != 0) { + return err; + } ainfo->ai_auid = aia.ai_auid; ainfo->ai_mask = aia.ai_mask; @@ -42,7 +43,7 @@ getaudit(struct auditinfo *ainfo) ainfo->ai_termid.machine = aia.ai_termid.at_addr[0]; ainfo->ai_asid = aia.ai_asid; - return (0); + return 0; } int @@ -53,8 +54,9 @@ setaudit(const struct auditinfo *ainfo) auditinfo_addr_t aia; /* Get the current ai_flags so they are preserved. */ - if ((err = getaudit_addr(&aia, sizeof(aia))) != 0) - return (err); + if ((err = getaudit_addr(&aia, sizeof(aia))) != 0) { + return err; + } aia.ai_auid = ai->ai_auid; aia.ai_mask = ai->ai_mask; @@ -63,11 +65,12 @@ setaudit(const struct auditinfo *ainfo) aia.ai_termid.at_addr[0] = ai->ai_termid.machine; aia.ai_asid = ai->ai_asid; - if ((err = setaudit_addr(&aia, sizeof(aia))) != 0) - return (err); + if ((err = setaudit_addr(&aia, sizeof(aia))) != 0) { + return err; + } /* The session ID may have been assigned by kernel so copy that back. */ ai->ai_asid = aia.ai_asid; - return (0); + return 0; } diff --git a/libsyscall/wrappers/legacy/getpeername.c b/libsyscall/wrappers/legacy/getpeername.c index a5619ece0..0e0dc9ad9 100644 --- a/libsyscall/wrappers/legacy/getpeername.c +++ b/libsyscall/wrappers/legacy/getpeername.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -43,13 +43,14 @@ extern int __getpeername(int, struct sockaddr * __restrict, socklen_t * __restri */ int getpeername(int socket, struct sockaddr * __restrict address, - socklen_t * __restrict address_len) + socklen_t * __restrict address_len) { int ret = __getpeername(socket, address, address_len); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/getsockname.c b/libsyscall/wrappers/legacy/getsockname.c index 9a2a94cd5..54586a31b 100644 --- a/libsyscall/wrappers/legacy/getsockname.c +++ b/libsyscall/wrappers/legacy/getsockname.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -43,13 +43,14 @@ extern int __getsockname(int, struct sockaddr * __restrict, socklen_t * __restri */ int getsockname(int socket, struct sockaddr * __restrict address, - socklen_t * __restrict address_len) + socklen_t * __restrict address_len) { int ret = __getsockname(socket, address, address_len); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/kill.c b/libsyscall/wrappers/legacy/kill.c index 1f25079e9..6d28cce98 100644 --- a/libsyscall/wrappers/legacy/kill.c +++ b/libsyscall/wrappers/legacy/kill.c @@ -2,14 +2,14 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/legacy/lchown.c b/libsyscall/wrappers/legacy/lchown.c index 05279fe4c..c57537c9c 100644 --- a/libsyscall/wrappers/legacy/lchown.c +++ b/libsyscall/wrappers/legacy/lchown.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -47,8 +47,9 @@ lchown(const char *path, uid_t owner, gid_t group) int ret = __lchown(path, owner, group); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/listen.c b/libsyscall/wrappers/legacy/listen.c index 0e21db52b..9c547957c 100644 --- a/libsyscall/wrappers/legacy/listen.c +++ b/libsyscall/wrappers/legacy/listen.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -47,8 +47,9 @@ listen(int socket, int backlog) int ret = __listen(socket, backlog); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/mprotect.c b/libsyscall/wrappers/legacy/mprotect.c index 666b8974d..97f08f4da 100644 --- a/libsyscall/wrappers/legacy/mprotect.c +++ b/libsyscall/wrappers/legacy/mprotect.c @@ -2,14 +2,14 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -42,9 +42,9 @@ extern int __mprotect(void *, size_t, int); int mprotect(void *addr, size_t len, int prot) { - void *aligned_addr; - size_t offset; - int rv; + void *aligned_addr; + size_t offset; + int rv; /* * Page-align "addr" since the system now requires it diff --git a/libsyscall/wrappers/legacy/msync.c b/libsyscall/wrappers/legacy/msync.c index 7ba2a82c7..213d20d86 100644 --- a/libsyscall/wrappers/legacy/msync.c +++ b/libsyscall/wrappers/legacy/msync.c @@ -2,14 +2,14 @@ * Copyright (c) 2004, 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #ifndef NO_SYSCALL_LEGACY @@ -37,7 +37,7 @@ int __msync_nocancel(void *, size_t, int); int msync(void *addr, size_t len, int flags) { - size_t offset; + size_t offset; /* * Page-align "addr" since the system now requires it diff --git a/libsyscall/wrappers/legacy/munmap.c b/libsyscall/wrappers/legacy/munmap.c index fb9e726a7..30f00d820 100644 --- a/libsyscall/wrappers/legacy/munmap.c +++ b/libsyscall/wrappers/legacy/munmap.c @@ -2,14 +2,14 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -43,7 +43,7 @@ extern int __munmap(void *, size_t); int munmap(void *addr, size_t len) { - size_t offset; + size_t offset; if (len == 0) { /* diff --git a/libsyscall/wrappers/legacy/open.c b/libsyscall/wrappers/legacy/open.c index c11f4e919..8635b0c7f 100644 --- a/libsyscall/wrappers/legacy/open.c +++ b/libsyscall/wrappers/legacy/open.c @@ -2,14 +2,14 @@ * Copyright (c) 2005, 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -41,14 +41,14 @@ open(const char *path, int flags, ...) { mode_t mode = 0; - if(flags & O_CREAT) { + if (flags & O_CREAT) { va_list ap; va_start(ap, flags); // compiler warns to pass int (not mode_t) to va_arg mode = va_arg(ap, int); va_end(ap); } - return(__open_nocancel(path, flags | O_NOCTTY, mode)); + return __open_nocancel(path, flags | O_NOCTTY, mode); } #endif /* NO_SYSCALL_LEGACY */ diff --git a/libsyscall/wrappers/legacy/recvfrom.c b/libsyscall/wrappers/legacy/recvfrom.c index 1f53ed8ba..d148a154c 100644 --- a/libsyscall/wrappers/legacy/recvfrom.c +++ b/libsyscall/wrappers/legacy/recvfrom.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -47,8 +47,9 @@ recvfrom(int s, void *buf, size_t len, int flags, struct sockaddr * __restrict f int ret = __recvfrom_nocancel(s, buf, len, flags, from, fromlen); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/recvmsg.c b/libsyscall/wrappers/legacy/recvmsg.c index dea590555..10bd154dd 100644 --- a/libsyscall/wrappers/legacy/recvmsg.c +++ b/libsyscall/wrappers/legacy/recvmsg.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -47,8 +47,9 @@ recvmsg(int s, struct msghdr *msg, int flags) int ret = __recvmsg_nocancel(s, msg, flags); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/select-pre1050.c b/libsyscall/wrappers/legacy/select-pre1050.c index 2b8bd8e17..9128ed0b0 100644 --- a/libsyscall/wrappers/legacy/select-pre1050.c +++ b/libsyscall/wrappers/legacy/select-pre1050.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -25,7 +25,7 @@ #undef __DARWIN_VERS_1050 #define __DARWIN_VERS_1050 0 -#define VARIANT_PRE1050 +#define VARIANT_PRE1050 #include "../select-base.c" diff --git a/libsyscall/wrappers/legacy/select.c b/libsyscall/wrappers/legacy/select.c index 2ababf8cc..564a09d8b 100644 --- a/libsyscall/wrappers/legacy/select.c +++ b/libsyscall/wrappers/legacy/select.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/legacy/sendmsg.c b/libsyscall/wrappers/legacy/sendmsg.c index e337f2515..879f1a69a 100644 --- a/libsyscall/wrappers/legacy/sendmsg.c +++ b/libsyscall/wrappers/legacy/sendmsg.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,8 +48,9 @@ sendmsg(int s, const struct msghdr *msg, int flags) int ret = __sendmsg_nocancel(s, msg, flags); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/sendto.c b/libsyscall/wrappers/legacy/sendto.c index 095282119..c47565db7 100644 --- a/libsyscall/wrappers/legacy/sendto.c +++ b/libsyscall/wrappers/legacy/sendto.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,8 +48,9 @@ sendto(int s, const void *msg, size_t len, int flags, const struct sockaddr *to, int ret = __sendto_nocancel(s, msg, len, flags, to, tolen); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/setattrlist.c b/libsyscall/wrappers/legacy/setattrlist.c index d9e5a5e37..806471d12 100644 --- a/libsyscall/wrappers/legacy/setattrlist.c +++ b/libsyscall/wrappers/legacy/setattrlist.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -48,17 +48,18 @@ extern int __setattrlist(const char *, void *, void *, size_t, unsigned long); int #ifdef __LP64__ setattrlist(const char *path, void *attrList, void *attrBuf, - size_t attrBufSize, unsigned int options) + size_t attrBufSize, unsigned int options) #else /* !__LP64__ */ setattrlist(const char *path, void *attrList, void *attrBuf, - size_t attrBufSize, unsigned long options) + size_t attrBufSize, unsigned long options) #endif /* __LP64__ */ { int ret = __setattrlist(path, attrList, attrBuf, attrBufSize, options); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/legacy/sigsuspend.c b/libsyscall/wrappers/legacy/sigsuspend.c index 98ffc8c7f..dc5d5fb50 100644 --- a/libsyscall/wrappers/legacy/sigsuspend.c +++ b/libsyscall/wrappers/legacy/sigsuspend.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,14 +17,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #ifndef NO_SYSCALL_LEGACY #define _NONSTD_SOURCE -#define __SIGSUSPEND __sigsuspend_nocancel +#define __SIGSUSPEND __sigsuspend_nocancel #include "../sigsuspend-base.c" diff --git a/libsyscall/wrappers/legacy/socketpair.c b/libsyscall/wrappers/legacy/socketpair.c index 8249814e8..cbd8732fc 100644 --- a/libsyscall/wrappers/legacy/socketpair.c +++ b/libsyscall/wrappers/legacy/socketpair.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -38,7 +38,7 @@ #include "_errno.h" -extern int __socketpair(int, int, int, int [2]); +extern int __socketpair(int, int, int, int[2]); /* * socketpair stub, legacy version @@ -49,8 +49,9 @@ socketpair(int domain, int type, int protocol, int socket_vector[2]) int ret = __socketpair(domain, type, protocol, socket_vector); /* use ENOTSUP for legacy behavior */ - if (ret < 0 && errno == EOPNOTSUPP) + if (ret < 0 && errno == EOPNOTSUPP) { errno = ENOTSUP; + } return ret; } diff --git a/libsyscall/wrappers/libproc/libproc.c b/libsyscall/wrappers/libproc/libproc.c index 5cc1f7258..4c6fc2356 100644 --- a/libsyscall/wrappers/libproc/libproc.c +++ b/libsyscall/wrappers/libproc/libproc.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2018 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -41,78 +41,84 @@ __private_extern__ int proc_setthreadname(void * buffer, int buffersize); int __process_policy(int scope, int action, int policy, int policy_subtype, proc_policy_attribute_t * attrp, pid_t target_pid, uint64_t target_threadid); int proc_rlimit_control(pid_t pid, int flavor, void *arg); -int -proc_listpids(uint32_t type, uint32_t typeinfo, void *buffer, int buffersize) +int +proc_listpids(uint32_t type, uint32_t typeinfo, void *buffer, int buffersize) { int retval; - + if ((type >= PROC_ALL_PIDS) || (type <= PROC_PPID_ONLY)) { - if ((retval = __proc_info(PROC_INFO_CALL_LISTPIDS, type, typeinfo,(uint64_t)0, buffer, buffersize)) == -1) - return(0); + if ((retval = __proc_info(PROC_INFO_CALL_LISTPIDS, type, typeinfo, (uint64_t)0, buffer, buffersize)) == -1) { + return 0; + } } else { errno = EINVAL; retval = 0; } - return(retval); + return retval; } -int +int proc_listallpids(void * buffer, int buffersize) { int numpids; numpids = proc_listpids(PROC_ALL_PIDS, (uint32_t)0, buffer, buffersize); - if (numpids == -1) - return(-1); - else - return(numpids/sizeof(int)); + if (numpids == -1) { + return -1; + } else { + return numpids / sizeof(int); + } } -int +int proc_listpgrppids(pid_t pgrpid, void * buffer, int buffersize) { int numpids; numpids = proc_listpids(PROC_PGRP_ONLY, (uint32_t)pgrpid, buffer, buffersize); - if (numpids == -1) - return(-1); - else - return(numpids/sizeof(int)); + if (numpids == -1) { + return -1; + } else { + return numpids / sizeof(int); + } } -int +int proc_listchildpids(pid_t ppid, void * buffer, int buffersize) { int numpids; numpids = proc_listpids(PROC_PPID_ONLY, (uint32_t)ppid, buffer, buffersize); - if (numpids == -1) - return(-1); - else - return(numpids/sizeof(int)); + if (numpids == -1) { + return -1; + } else { + return numpids / sizeof(int); + } } -int -proc_pidinfo(int pid, int flavor, uint64_t arg, void *buffer, int buffersize) +int +proc_pidinfo(int pid, int flavor, uint64_t arg, void *buffer, int buffersize) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_PIDINFO, pid, flavor, arg, buffer, buffersize)) == -1) - return(0); - - return(retval); + if ((retval = __proc_info(PROC_INFO_CALL_PIDINFO, pid, flavor, arg, buffer, buffersize)) == -1) { + return 0; + } + + return retval; } -int +int proc_pidoriginatorinfo(int flavor, void *buffer, int buffersize) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_PIDORIGINATORINFO, getpid(), flavor, 0, buffer, buffersize)) == -1) - return(0); - - return(retval); + if ((retval = __proc_info(PROC_INFO_CALL_PIDORIGINATORINFO, getpid(), flavor, 0, buffer, buffersize)) == -1) { + return 0; + } + + return retval; } int @@ -120,8 +126,9 @@ proc_listcoalitions(int flavor, int coaltype, void *buffer, int buffersize) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_LISTCOALITIONS, flavor, coaltype, 0, buffer, buffersize)) == -1) + if ((retval = __proc_info(PROC_INFO_CALL_LISTCOALITIONS, flavor, coaltype, 0, buffer, buffersize)) == -1) { return 0; + } return retval; } @@ -129,7 +136,7 @@ proc_listcoalitions(int flavor, int coaltype, void *buffer, int buffersize) int proc_pid_rusage(int pid, int flavor, rusage_info_t *buffer) { - return (__proc_info(PROC_INFO_CALL_PIDRUSAGE, pid, flavor, 0, buffer, 0)); + return __proc_info(PROC_INFO_CALL_PIDRUSAGE, pid, flavor, 0, buffer, 0); } int @@ -145,18 +152,19 @@ proc_setthread_cpupercent(uint8_t percentage, uint32_t ms_refill) arg = ((ms_refill << 8) | percentage); - return (proc_rlimit_control(-1, RLIMIT_THREAD_CPULIMITS, (void *)(uintptr_t)arg)); + return proc_rlimit_control(-1, RLIMIT_THREAD_CPULIMITS, (void *)(uintptr_t)arg); } -int +int proc_pidfdinfo(int pid, int fd, int flavor, void * buffer, int buffersize) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_PIDFDINFO, pid, flavor, (uint64_t)fd, buffer, buffersize)) == -1) - return(0); - - return (retval); + if ((retval = __proc_info(PROC_INFO_CALL_PIDFDINFO, pid, flavor, (uint64_t)fd, buffer, buffersize)) == -1) { + return 0; + } + + return retval; } @@ -165,9 +173,10 @@ proc_pidfileportinfo(int pid, uint32_t fileport, int flavor, void *buffer, int b { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_PIDFILEPORTINFO, pid, flavor, (uint64_t)fileport, buffer, buffersize)) == -1) - return (0); - return (retval); + if ((retval = __proc_info(PROC_INFO_CALL_PIDFILEPORTINFO, pid, flavor, (uint64_t)fileport, buffer, buffersize)) == -1) { + return 0; + } + return retval; } int @@ -185,7 +194,7 @@ proc_piddynkqueueinfo(int pid, int flavor, kqueue_id_t kq_id, void *buffer, int int proc_udata_info(int pid, int flavor, void *buffer, int buffersize) { - return (__proc_info(PROC_INFO_CALL_UDATA_INFO, pid, flavor, 0, buffer, buffersize)); + return __proc_info(PROC_INFO_CALL_UDATA_INFO, pid, flavor, 0, buffer, buffersize); } int @@ -193,11 +202,11 @@ proc_name(int pid, void * buffer, uint32_t buffersize) { int retval = 0, len; struct proc_bsdinfo pbsd; - - + + if (buffersize < sizeof(pbsd.pbi_name)) { errno = ENOMEM; - return(0); + return 0; } retval = proc_pidinfo(pid, PROC_PIDTBSDINFO, (uint64_t)0, &pbsd, sizeof(struct proc_bsdinfo)); @@ -208,27 +217,27 @@ proc_name(int pid, void * buffer, uint32_t buffersize) bcopy(&pbsd.pbi_comm, buffer, sizeof(pbsd.pbi_comm)); } len = (int)strlen(buffer); - return(len); + return len; } - return(0); + return 0; } -int +int proc_regionfilename(int pid, uint64_t address, void * buffer, uint32_t buffersize) { int retval; struct proc_regionwithpathinfo reginfo; - + if (buffersize < MAXPATHLEN) { errno = ENOMEM; - return(0); + return 0; } - + retval = proc_pidinfo(pid, PROC_PIDREGIONPATHINFO2, (uint64_t)address, ®info, sizeof(struct proc_regionwithpathinfo)); if (retval != -1) { - return ((int)(strlcpy(buffer, reginfo.prp_vip.vip_path, MAXPATHLEN))); + return (int)(strlcpy(buffer, reginfo.prp_vip.vip_path, MAXPATHLEN)); } - return(0); + return 0; } int @@ -236,9 +245,10 @@ proc_kmsgbuf(void * buffer, uint32_t buffersize) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_KERNMSGBUF, 0, 0, (uint64_t)0, buffer, buffersize)) == -1) - return(0); - return (retval); + if ((retval = __proc_info(PROC_INFO_CALL_KERNMSGBUF, 0, 0, (uint64_t)0, buffer, buffersize)) == -1) { + return 0; + } + return retval; } int @@ -248,45 +258,48 @@ proc_pidpath(int pid, void * buffer, uint32_t buffersize) if (buffersize < PROC_PIDPATHINFO_SIZE) { errno = ENOMEM; - return(0); + return 0; } - if (buffersize > PROC_PIDPATHINFO_MAXSIZE) { + if (buffersize > PROC_PIDPATHINFO_MAXSIZE) { errno = EOVERFLOW; - return(0); + return 0; } - retval = __proc_info(PROC_INFO_CALL_PIDINFO, pid, PROC_PIDPATHINFO, (uint64_t)0, buffer, buffersize); + retval = __proc_info(PROC_INFO_CALL_PIDINFO, pid, PROC_PIDPATHINFO, (uint64_t)0, buffer, buffersize); if (retval != -1) { len = (int)strlen(buffer); - return(len); + return len; } - return (0); + return 0; } -int +int proc_libversion(int *major, int * minor) { - - if (major != NULL) + if (major != NULL) { *major = 1; - if (minor != NULL) + } + if (minor != NULL) { *minor = 1; - return(0); + } + return 0; } int proc_setpcontrol(const int control) { - int retval ; + int retval; + + if (control < PROC_SETPC_NONE || control > PROC_SETPC_TERMINATE) { + return EINVAL; + } - if (control < PROC_SETPC_NONE || control > PROC_SETPC_TERMINATE) - return(EINVAL); + if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_PCONTROL, (uint64_t)control, NULL, 0)) == -1) { + return errno; + } - if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_PCONTROL, (uint64_t)control, NULL, 0)) == -1) - return(errno); - - return(0); + return 0; } @@ -295,12 +308,13 @@ proc_setthreadname(void * buffer, int buffersize) { int retval; - retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_THREADNAME, (uint64_t)0, buffer, buffersize); + retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_THREADNAME, (uint64_t)0, buffer, buffersize); - if (retval == -1) - return(errno); - else - return(0); + if (retval == -1) { + return errno; + } else { + return 0; + } } int @@ -309,7 +323,7 @@ proc_track_dirty(pid_t pid, uint32_t flags) if (__proc_info(PROC_INFO_CALL_DIRTYCONTROL, pid, PROC_DIRTYCONTROL_TRACK, flags, NULL, 0) == -1) { return errno; } - + return 0; } @@ -317,7 +331,7 @@ int proc_set_dirty(pid_t pid, bool dirty) { if (__proc_info(PROC_INFO_CALL_DIRTYCONTROL, pid, PROC_DIRTYCONTROL_SET, dirty, NULL, 0) == -1) { - return errno; + return errno; } return 0; @@ -327,16 +341,16 @@ int proc_get_dirty(pid_t pid, uint32_t *flags) { int retval; - + if (!flags) { return EINVAL; } - + retval = __proc_info(PROC_INFO_CALL_DIRTYCONTROL, pid, PROC_DIRTYCONTROL_GET, 0, NULL, 0); if (retval == -1) { - return errno; + return errno; } - + *flags = retval; return 0; @@ -346,7 +360,7 @@ int proc_clear_dirty(pid_t pid, uint32_t flags) { if (__proc_info(PROC_INFO_CALL_DIRTYCONTROL, pid, PROC_DIRTYCONTROL_CLEAR, flags, NULL, 0) == -1) { - return errno; + return errno; } return 0; @@ -356,18 +370,18 @@ int proc_terminate(pid_t pid, int *sig) { int retval; - + if (!sig) { return EINVAL; } - + retval = __proc_info(PROC_INFO_CALL_TERMINATE, pid, 0, 0, NULL, 0); if (retval == -1) { - return errno; + return errno; } - + *sig = retval; - + return 0; } @@ -381,18 +395,18 @@ proc_set_cpumon_params(pid_t pid, int percentage, int interval) { proc_policy_cpuusage_attr_t attr; - /* no argument validation ... - * task_set_cpuusage() ignores 0 values and squashes negative - * values into uint32_t. - */ + /* no argument validation ... + * task_set_cpuusage() ignores 0 values and squashes negative + * values into uint32_t. + */ attr.ppattr_cpu_attr = PROC_POLICY_RSRCACT_NOTIFY_EXC; attr.ppattr_cpu_percentage = percentage; attr.ppattr_cpu_attr_interval = (uint64_t)interval; attr.ppattr_cpu_attr_deadline = 0; - return(__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_SET, PROC_POLICY_RESOURCE_USAGE, - PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0)); + return __process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_SET, PROC_POLICY_RESOURCE_USAGE, + PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0); } int @@ -402,7 +416,7 @@ proc_get_cpumon_params(pid_t pid, int *percentage, int *interval) int ret; ret = __process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_GET, PROC_POLICY_RESOURCE_USAGE, - PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0); + PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0); if ((ret == 0) && (attr.ppattr_cpu_attr == PROC_POLICY_RSRCACT_NOTIFY_EXC)) { *percentage = attr.ppattr_cpu_percentage; @@ -412,7 +426,7 @@ proc_get_cpumon_params(pid_t pid, int *percentage, int *interval) *interval = 0; } - return (ret); + return ret; } int @@ -425,18 +439,18 @@ proc_set_cpumon_defaults(pid_t pid) attr.ppattr_cpu_attr_interval = 0; attr.ppattr_cpu_attr_deadline = 0; - return(__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_SET, PROC_POLICY_RESOURCE_USAGE, - PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0)); + return __process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_SET, PROC_POLICY_RESOURCE_USAGE, + PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0); } int proc_resume_cpumon(pid_t pid) { return __process_policy(PROC_POLICY_SCOPE_PROCESS, - PROC_POLICY_ACTION_ENABLE, - PROC_POLICY_RESOURCE_USAGE, - PROC_POLICY_RUSAGE_CPU, - NULL, pid, 0); + PROC_POLICY_ACTION_ENABLE, + PROC_POLICY_RESOURCE_USAGE, + PROC_POLICY_RUSAGE_CPU, + NULL, pid, 0); } int @@ -449,8 +463,8 @@ proc_disable_cpumon(pid_t pid) attr.ppattr_cpu_attr_interval = 0; attr.ppattr_cpu_attr_deadline = 0; - return(__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_SET, PROC_POLICY_RESOURCE_USAGE, - PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0)); + return __process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_SET, PROC_POLICY_RESOURCE_USAGE, + PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, 0); } @@ -468,9 +482,9 @@ proc_set_cpumon_params_fatal(pid_t pid, int percentage, int interval) int current_interval = 0; /* intervals are in seconds */ int ret = 0; - if ((percentage <= 0) || (interval <= 0)) { + if ((percentage <= 0) || (interval <= 0)) { errno = EINVAL; - return (-1); + return -1; } /* @@ -478,33 +492,32 @@ proc_set_cpumon_params_fatal(pid_t pid, int percentage, int interval) * already active. If either the percentage or the * interval is nonzero, then CPU monitoring is * already in use for this process. - * + * * XXX: need set...() and set..fatal() to behave similarly. * Currently, this check prevents 1st party apps (which get a * default non-fatal monitor) not to get a fatal monitor. */ (void)proc_get_cpumon_params(pid, ¤t_percentage, ¤t_interval); - if (current_percentage || current_interval) - { + if (current_percentage || current_interval) { /* * The CPU monitor appears to be active. * We choose not to disturb those settings. */ errno = EBUSY; - return (-1); + return -1; } - + if ((ret = proc_set_cpumon_params(pid, percentage, interval)) != 0) { /* Failed to activate the CPU monitor */ - return (ret); + return ret; } - + if ((ret = proc_rlimit_control(pid, RLIMIT_CPU_USAGE_MONITOR, (void *)(uintptr_t)CPUMON_MAKE_FATAL)) != 0) { /* Failed to set termination, back out the CPU monitor settings. */ (void)proc_disable_cpumon(pid); } - return (ret); + return ret; } int @@ -515,7 +528,7 @@ proc_set_wakemon_params(pid_t pid, int rate_hz, int flags __unused) params.wm_flags = WAKEMON_ENABLE; params.wm_rate = rate_hz; - return (proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms)); + return proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms); } #ifndef WAKEMON_GET_PARAMS @@ -532,13 +545,13 @@ proc_get_wakemon_params(pid_t pid, int *rate_hz, int *flags) params.wm_flags = WAKEMON_GET_PARAMS; if ((error = proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms)) != 0) { - return (error); + return error; } *rate_hz = params.wm_rate; *flags = params.wm_flags; - return (0); + return 0; } int @@ -549,7 +562,7 @@ proc_set_wakemon_defaults(pid_t pid) params.wm_flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS; params.wm_rate = -1; - return (proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms)); + return proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms); } int @@ -560,25 +573,25 @@ proc_disable_wakemon(pid_t pid) params.wm_flags = WAKEMON_DISABLE; params.wm_rate = -1; - return (proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms)); + return proc_rlimit_control(pid, RLIMIT_WAKEUPS_MONITOR, ¶ms); } int proc_list_uptrs(int pid, uint64_t *buf, uint32_t bufsz) { return __proc_info(PROC_INFO_CALL_PIDINFO, pid, PROC_PIDLISTUPTRS, 0, - buf, bufsz); + buf, bufsz); } int proc_list_dynkqueueids(int pid, kqueue_id_t *buf, uint32_t bufsz) { return __proc_info(PROC_INFO_CALL_PIDINFO, pid, PROC_PIDLISTDYNKQUEUES, 0, - buf, bufsz); + buf, bufsz); } -int +int proc_setcpu_percentage(pid_t pid, int action, int percentage) { proc_policy_cpuusage_attr_t attr; @@ -586,25 +599,27 @@ proc_setcpu_percentage(pid_t pid, int action, int percentage) bzero(&attr, sizeof(proc_policy_cpuusage_attr_t)); attr.ppattr_cpu_attr = action; attr.ppattr_cpu_percentage = percentage; - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } int proc_reset_footprint_interval(pid_t pid) { - return (proc_rlimit_control(pid, RLIMIT_FOOTPRINT_INTERVAL, (void *)(uintptr_t)FOOTPRINT_INTERVAL_RESET)); + return proc_rlimit_control(pid, RLIMIT_FOOTPRINT_INTERVAL, (void *)(uintptr_t)FOOTPRINT_INTERVAL_RESET); } int proc_clear_cpulimits(pid_t pid) { - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_RESTORE, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, NULL, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_RESTORE, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, NULL, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } #if TARGET_OS_EMBEDDED @@ -617,11 +632,11 @@ proc_setcpu_deadline(pid_t pid, int action, uint64_t deadline) bzero(&attr, sizeof(proc_policy_cpuusage_attr_t)); attr.ppattr_cpu_attr = action; attr.ppattr_cpu_attr_deadline = deadline; - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); - + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } int @@ -633,10 +648,11 @@ proc_setcpu_percentage_withdeadline(pid_t pid, int action, int percentage, uint6 attr.ppattr_cpu_attr = action; attr.ppattr_cpu_percentage = percentage; attr.ppattr_cpu_attr_deadline = deadline; - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } int @@ -645,12 +661,13 @@ proc_appstate(int pid, int * appstatep) int state; if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_GET, PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_STATE, (proc_policy_attribute_t*)&state, pid, (uint64_t)0) != -1) { - if (appstatep != NULL) + if (appstatep != NULL) { *appstatep = state; - return(0); - } else - return(errno); - + } + return 0; + } else { + return errno; + } } int @@ -659,109 +676,113 @@ proc_setappstate(int pid, int appstate) int state = appstate; switch (state) { - case PROC_APPSTATE_NONE: - case PROC_APPSTATE_ACTIVE: - case PROC_APPSTATE_INACTIVE: - case PROC_APPSTATE_BACKGROUND: - case PROC_APPSTATE_NONUI: - break; - default: - return(EINVAL); - } - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_STATE, (proc_policy_attribute_t*)&state, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); -} - -int + case PROC_APPSTATE_NONE: + case PROC_APPSTATE_ACTIVE: + case PROC_APPSTATE_INACTIVE: + case PROC_APPSTATE_BACKGROUND: + case PROC_APPSTATE_NONUI: + break; + default: + return EINVAL; + } + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_STATE, (proc_policy_attribute_t*)&state, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } +} + +int proc_devstatusnotify(int devicestatus) { int state = devicestatus; switch (devicestatus) { - case PROC_DEVSTATUS_SHORTTERM: - case PROC_DEVSTATUS_LONGTERM: - break; - default: - return(EINVAL); + case PROC_DEVSTATUS_SHORTTERM: + case PROC_DEVSTATUS_LONGTERM: + break; + default: + return EINVAL; } if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_DEVSTATUS, (proc_policy_attribute_t*)&state, getpid(), (uint64_t)0) != -1) { - return(0); - } else - return(errno); - + return 0; + } else { + return errno; + } } int proc_pidbind(int pid, uint64_t threadid, int bind) { - int state = bind; + int state = bind; pid_t passpid = pid; switch (bind) { - case PROC_PIDBIND_CLEAR: - passpid = getpid(); /* ignore pid on clear */ - break; - case PROC_PIDBIND_SET: - break; - default: - return(EINVAL); + case PROC_PIDBIND_CLEAR: + passpid = getpid(); /* ignore pid on clear */ + break; + case PROC_PIDBIND_SET: + break; + default: + return EINVAL; + } + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_PIDBIND, (proc_policy_attribute_t*)&state, passpid, threadid) != -1) { + return 0; + } else { + return errno; } - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_APP_LIFECYCLE, PROC_POLICY_APPLIFE_PIDBIND, (proc_policy_attribute_t*)&state, passpid, threadid) != -1) - return(0); - else - return(errno); } int proc_can_use_foreground_hw(int pid, uint32_t *reason) { - return __proc_info(PROC_INFO_CALL_CANUSEFGHW, pid, 0, 0, reason, sizeof(*reason)); + return __proc_info(PROC_INFO_CALL_CANUSEFGHW, pid, 0, 0, reason, sizeof(*reason)); } #endif /* TARGET_OS_EMBEDDED */ /* Donate importance to adaptive processes from this process */ -int +int proc_donate_importance_boost() { int rval; #if TARGET_OS_EMBEDDED rval = __process_policy(PROC_POLICY_SCOPE_PROCESS, - PROC_POLICY_ACTION_ENABLE, - PROC_POLICY_APPTYPE, - PROC_POLICY_IOS_DONATEIMP, - NULL, getpid(), (uint64_t)0); + PROC_POLICY_ACTION_ENABLE, + PROC_POLICY_APPTYPE, + PROC_POLICY_IOS_DONATEIMP, + NULL, getpid(), (uint64_t)0); #else /* TARGET_OS_EMBEDDED */ rval = __process_policy(PROC_POLICY_SCOPE_PROCESS, - PROC_POLICY_ACTION_SET, - PROC_POLICY_BOOST, - PROC_POLICY_IMP_DONATION, - NULL, getpid(), 0); + PROC_POLICY_ACTION_SET, + PROC_POLICY_BOOST, + PROC_POLICY_IMP_DONATION, + NULL, getpid(), 0); #endif /* TARGET_OS_EMBEDDED */ - if (rval == 0) - return (0); - else - return (errno); + if (rval == 0) { + return 0; + } else { + return errno; + } } static __attribute__((noinline)) void -proc_importance_bad_assertion(char *reason) { - (void)reason; +proc_importance_bad_assertion(char *reason) +{ + (void)reason; } -/* +/* * Use the address of these variables as the token. This way, they can be * printed in the debugger as useful names. */ uint64_t important_boost_assertion_token = 0xfafafafafafafafa; uint64_t normal_boost_assertion_token = 0xfbfbfbfbfbfbfbfb; uint64_t non_boost_assertion_token = 0xfcfcfcfcfcfcfcfc; -uint64_t denap_boost_assertion_token = 0xfdfdfdfdfdfdfdfd; +uint64_t denap_boost_assertion_token = 0xfdfdfdfdfdfdfdfd; /* * Accept the boost on a message, or request another boost assertion @@ -774,48 +795,48 @@ uint64_t denap_boost_assertion_token = 0xfdfdfdfdfdfdfdfd; */ int proc_importance_assertion_begin_with_msg(mach_msg_header_t *msg, - __unused mach_msg_trailer_t *trailer, - uint64_t *assertion_token) + __unused mach_msg_trailer_t *trailer, + uint64_t *assertion_token) { int rval = 0; - if (assertion_token == NULL) - return (EINVAL); + if (assertion_token == NULL) { + return EINVAL; + } #define LEGACYBOOSTMASK (MACH_MSGH_BITS_VOUCHER_MASK | MACH_MSGH_BITS_RAISEIMP) #define LEGACYBOOSTED(m) (((m)->msgh_bits & LEGACYBOOSTMASK) == MACH_MSGH_BITS_RAISEIMP) /* Is this a legacy boosted message? */ if (LEGACYBOOSTED(msg)) { - - /* + /* * Have we accepted the implicit boost for this message yet? - * If we haven't accepted it yet, no need to call into kernel. + * If we haven't accepted it yet, no need to call into kernel. */ if ((msg->msgh_bits & MACH_MSGH_BITS_IMPHOLDASRT) == 0) { msg->msgh_bits |= MACH_MSGH_BITS_IMPHOLDASRT; *assertion_token = (uint64_t) &important_boost_assertion_token; - return (0); + return 0; } /* Request an additional boost count */ rval = __process_policy(PROC_POLICY_SCOPE_PROCESS, - PROC_POLICY_ACTION_HOLD, - PROC_POLICY_BOOST, - PROC_POLICY_IMP_IMPORTANT, - NULL, getpid(), 0); + PROC_POLICY_ACTION_HOLD, + PROC_POLICY_BOOST, + PROC_POLICY_IMP_IMPORTANT, + NULL, getpid(), 0); if (rval == 0) { *assertion_token = (uint64_t) &important_boost_assertion_token; - return (0); + return 0; } else if (errno == EOVERFLOW) { proc_importance_bad_assertion("Attempted to take assertion while not boosted"); - return (errno); + return errno; } else { - return (errno); + return errno; } } - - return (EIO); + + return EIO; } @@ -828,26 +849,27 @@ proc_importance_assertion_complete(uint64_t assertion_token) { int rval = 0; - if (assertion_token == 0) - return (0); + if (assertion_token == 0) { + return 0; + } if (assertion_token == (uint64_t) &important_boost_assertion_token) { rval = __process_policy(PROC_POLICY_SCOPE_PROCESS, - PROC_POLICY_ACTION_DROP, - PROC_POLICY_BOOST, - PROC_POLICY_IMP_IMPORTANT, - NULL, getpid(), 0); + PROC_POLICY_ACTION_DROP, + PROC_POLICY_BOOST, + PROC_POLICY_IMP_IMPORTANT, + NULL, getpid(), 0); if (rval == 0) { - return (0); + return 0; } else if (errno == EOVERFLOW) { proc_importance_bad_assertion("Attempted to drop too many assertions"); - return (errno); + return errno; } else { - return (errno); + return errno; } } else { proc_importance_bad_assertion("Attempted to drop assertion with invalid token"); - return (EIO); + return EIO; } } @@ -860,7 +882,7 @@ proc_importance_assertion_complete(uint64_t assertion_token) */ int proc_denap_assertion_begin_with_msg(mach_msg_header_t *msg, - uint64_t *assertion_token) + uint64_t *assertion_token) { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdeprecated-declarations" @@ -886,85 +908,88 @@ proc_denap_assertion_complete(uint64_t assertion_token) int proc_clear_vmpressure(pid_t pid) { - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_RESTORE, PROC_POLICY_RESOURCE_STARVATION, PROC_POLICY_RS_VIRTUALMEM, NULL, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_RESTORE, PROC_POLICY_RESOURCE_STARVATION, PROC_POLICY_RS_VIRTUALMEM, NULL, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } /* set the current process as one who can resume suspended processes due to low virtual memory. Need to be root */ -int +int proc_set_owner_vmpressure(void) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_VMRSRCOWNER, (uint64_t)0, NULL, 0)) == -1) - return(errno); - - return(0); + if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_VMRSRCOWNER, (uint64_t)0, NULL, 0)) == -1) { + return errno; + } + + return 0; } /* mark yourself to delay idle sleep on disk IO */ -int +int proc_set_delayidlesleep(void) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_DELAYIDLESLEEP, (uint64_t)1, NULL, 0)) == -1) - return(errno); + if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_DELAYIDLESLEEP, (uint64_t)1, NULL, 0)) == -1) { + return errno; + } - return(0); + return 0; } /* Reset yourself to delay idle sleep on disk IO, if already set */ -int +int proc_clear_delayidlesleep(void) { int retval; - if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_DELAYIDLESLEEP, (uint64_t)0, NULL, 0)) == -1) - return(errno); + if ((retval = __proc_info(PROC_INFO_CALL_SETCONTROL, getpid(), PROC_SELFSET_DELAYIDLESLEEP, (uint64_t)0, NULL, 0)) == -1) { + return errno; + } - return(0); + return 0; } /* disable the launch time backgroudn policy and restore the process to default group */ -int +int proc_disable_apptype(pid_t pid, int apptype) { switch (apptype) { - case PROC_POLICY_OSX_APPTYPE_TAL: - case PROC_POLICY_OSX_APPTYPE_DASHCLIENT: - break; - default: - return(EINVAL); + case PROC_POLICY_OSX_APPTYPE_TAL: + case PROC_POLICY_OSX_APPTYPE_DASHCLIENT: + break; + default: + return EINVAL; } - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_DISABLE, PROC_POLICY_APPTYPE, apptype, NULL, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); - + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_DISABLE, PROC_POLICY_APPTYPE, apptype, NULL, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } /* re-enable the launch time background policy if it had been disabled. */ -int +int proc_enable_apptype(pid_t pid, int apptype) { switch (apptype) { - case PROC_POLICY_OSX_APPTYPE_TAL: - case PROC_POLICY_OSX_APPTYPE_DASHCLIENT: - break; - default: - return(EINVAL); - + case PROC_POLICY_OSX_APPTYPE_TAL: + case PROC_POLICY_OSX_APPTYPE_DASHCLIENT: + break; + default: + return EINVAL; } - if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_ENABLE, PROC_POLICY_APPTYPE, apptype, NULL, pid, (uint64_t)0) != -1) - return(0); - else - return(errno); - + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_ENABLE, PROC_POLICY_APPTYPE, apptype, NULL, pid, (uint64_t)0) != -1) { + return 0; + } else { + return errno; + } } #if !TARGET_IPHONE_SIMULATOR @@ -978,6 +1003,3 @@ proc_suppress(__unused pid_t pid, __unused uint64_t *generation) #endif /* !TARGET_IPHONE_SIMULATOR */ #endif /* !TARGET_OS_EMBEDDED */ - - - diff --git a/libsyscall/wrappers/libproc/libproc.h b/libsyscall/wrappers/libproc/libproc.h index 61b69d9b6..053e039b7 100644 --- a/libsyscall/wrappers/libproc/libproc.h +++ b/libsyscall/wrappers/libproc/libproc.h @@ -2,14 +2,14 @@ * Copyright (c) 2006, 2007, 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #ifndef _LIBPROC_H_ @@ -37,61 +37,61 @@ #include /* - * This header file contains private interfaces to obtain process information. + * This header file contains private interfaces to obtain process information. * These interfaces are subject to change in future releases. */ /*! - @define PROC_LISTPIDSPATH_PATH_IS_VOLUME - @discussion This flag indicates that all processes that hold open - file references on the volume associated with the specified - path should be returned. + * @define PROC_LISTPIDSPATH_PATH_IS_VOLUME + * @discussion This flag indicates that all processes that hold open + * file references on the volume associated with the specified + * path should be returned. */ -#define PROC_LISTPIDSPATH_PATH_IS_VOLUME 1 +#define PROC_LISTPIDSPATH_PATH_IS_VOLUME 1 /*! - @define PROC_LISTPIDSPATH_EXCLUDE_EVTONLY - @discussion This flag indicates that file references that were opened - with the O_EVTONLY flag should be excluded from the matching - criteria. + * @define PROC_LISTPIDSPATH_EXCLUDE_EVTONLY + * @discussion This flag indicates that file references that were opened + * with the O_EVTONLY flag should be excluded from the matching + * criteria. */ -#define PROC_LISTPIDSPATH_EXCLUDE_EVTONLY 2 +#define PROC_LISTPIDSPATH_EXCLUDE_EVTONLY 2 __BEGIN_DECLS /*! - @function proc_listpidspath - @discussion A function which will search through the current - processes looking for open file references which match - a specified path or volume. - @param type types of processes to be searched (see proc_listpids) - @param typeinfo adjunct information for type - @param path file or volume path - @param pathflags flags to control which files should be considered - during the process search. - @param buffer a C array of int-sized values to be filled with - process identifiers that hold an open file reference - matching the specified path or volume. Pass NULL to - obtain the minimum buffer size needed to hold the - currently active processes. - @param buffersize the size (in bytes) of the provided buffer. - @result the number of bytes of data returned in the provided buffer; - -1 if an error was encountered; + * @function proc_listpidspath + * @discussion A function which will search through the current + * processes looking for open file references which match + * a specified path or volume. + * @param type types of processes to be searched (see proc_listpids) + * @param typeinfo adjunct information for type + * @param path file or volume path + * @param pathflags flags to control which files should be considered + * during the process search. + * @param buffer a C array of int-sized values to be filled with + * process identifiers that hold an open file reference + * matching the specified path or volume. Pass NULL to + * obtain the minimum buffer size needed to hold the + * currently active processes. + * @param buffersize the size (in bytes) of the provided buffer. + * @result the number of bytes of data returned in the provided buffer; + * -1 if an error was encountered; */ -int proc_listpidspath(uint32_t type, - uint32_t typeinfo, - const char *path, - uint32_t pathflags, - void *buffer, - int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int proc_listpidspath(uint32_t type, + uint32_t typeinfo, + const char *path, + uint32_t pathflags, + void *buffer, + int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); int proc_listpids(uint32_t type, uint32_t typeinfo, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); int proc_listallpids(void * buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_1); int proc_listpgrppids(pid_t pgrpid, void * buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_1); int proc_listchildpids(pid_t ppid, void * buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_1); -int proc_pidinfo(int pid, int flavor, uint64_t arg, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int proc_pidinfo(int pid, int flavor, uint64_t arg, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); int proc_pidfdinfo(int pid, int fd, int flavor, void * buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); int proc_pidfileportinfo(int pid, uint32_t fileport, int flavor, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); int proc_name(int pid, void * buffer, uint32_t buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); @@ -107,14 +107,14 @@ int proc_libversion(int *major, int * minor) __OSX_AVAILABLE_STARTING(__MAC_10_5 */ int proc_pid_rusage(int pid, int flavor, rusage_info_t *buffer) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); -/* - * A process can use the following api to set its own process control +/* + * A process can use the following api to set its own process control * state on resoure starvation. The argument can have one of the PROC_SETPC_XX values */ -#define PROC_SETPC_NONE 0 -#define PROC_SETPC_THROTTLEMEM 1 -#define PROC_SETPC_SUSPEND 2 -#define PROC_SETPC_TERMINATE 3 +#define PROC_SETPC_NONE 0 +#define PROC_SETPC_THROTTLEMEM 1 +#define PROC_SETPC_SUSPEND 2 +#define PROC_SETPC_TERMINATE 3 int proc_setpcontrol(const int control) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); int proc_setpcontrol(const int control); @@ -142,7 +142,7 @@ int proc_list_uptrs(pid_t pid, uint64_t *buffer, uint32_t buffersize); int proc_list_dynkqueueids(int pid, kqueue_id_t *buf, uint32_t bufsz); int proc_piddynkqueueinfo(int pid, int flavor, kqueue_id_t kq_id, void *buffer, - int buffersize); + int buffersize); #endif /* PRIVATE */ int proc_udata_info(int pid, int flavor, void *buffer, int buffersize); diff --git a/libsyscall/wrappers/libproc/libproc_internal.h b/libsyscall/wrappers/libproc/libproc_internal.h index 513fda9ba..1f4bc60da 100644 --- a/libsyscall/wrappers/libproc/libproc_internal.h +++ b/libsyscall/wrappers/libproc/libproc_internal.h @@ -2,14 +2,14 @@ * Copyright (c) 2010-2018 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #ifndef _LIBPROC_INTERNALH_ @@ -32,8 +32,8 @@ __BEGIN_DECLS /* CPU monitor action */ -#define PROC_SETCPU_ACTION_NONE 0 -#define PROC_SETCPU_ACTION_THROTTLE 1 +#define PROC_SETCPU_ACTION_NONE 0 +#define PROC_SETCPU_ACTION_THROTTLE 1 int proc_setcpu_percentage(pid_t pid, int action, int percentage) __OSX_AVAILABLE_STARTING(__MAC_10_12_2, __IPHONE_5_0); int proc_clear_cpulimits(pid_t pid) __OSX_AVAILABLE_STARTING(__MAC_10_12_2, __IPHONE_5_0); @@ -44,29 +44,29 @@ int proc_setthread_cpupercent(uint8_t percentage, uint32_t ms_refill) __OSX_AVAI #if TARGET_OS_EMBEDDED /* CPU monitor action, continued */ -#define PROC_SETCPU_ACTION_SUSPEND 2 -#define PROC_SETCPU_ACTION_TERMINATE 3 -#define PROC_SETCPU_ACTION_NOTIFY 4 +#define PROC_SETCPU_ACTION_SUSPEND 2 +#define PROC_SETCPU_ACTION_TERMINATE 3 +#define PROC_SETCPU_ACTION_NOTIFY 4 int proc_setcpu_deadline(pid_t pid, int action, uint64_t deadline) __OSX_AVAILABLE_STARTING(__MAC_NA, __IPHONE_5_0); int proc_setcpu_percentage_withdeadline(pid_t pid, int action, int percentage, uint64_t deadline) __OSX_AVAILABLE_STARTING(__MAC_NA, __IPHONE_5_0); -#define PROC_APPSTATE_NONE 0 -#define PROC_APPSTATE_ACTIVE 1 -#define PROC_APPSTATE_BACKGROUND 2 -#define PROC_APPSTATE_NONUI 3 -#define PROC_APPSTATE_INACTIVE 4 +#define PROC_APPSTATE_NONE 0 +#define PROC_APPSTATE_ACTIVE 1 +#define PROC_APPSTATE_BACKGROUND 2 +#define PROC_APPSTATE_NONUI 3 +#define PROC_APPSTATE_INACTIVE 4 int proc_setappstate(int pid, int appstate); int proc_appstate(int pid, int * appstatep); -#define PROC_DEVSTATUS_SHORTTERM 1 -#define PROC_DEVSTATUS_LONGTERM 2 +#define PROC_DEVSTATUS_SHORTTERM 1 +#define PROC_DEVSTATUS_LONGTERM 2 int proc_devstatusnotify(int devicestatus); -#define PROC_PIDBIND_CLEAR 0 -#define PROC_PIDBIND_SET 1 +#define PROC_PIDBIND_CLEAR 0 +#define PROC_PIDBIND_SET 1 int proc_pidbind(int pid, uint64_t threadid, int bind); /* @@ -105,7 +105,7 @@ int proc_clear_delayidlesleep(void); #define PROC_POLICY_OSX_APPTYPE_WIDGET 2 /* for dashboard client */ #define PROC_POLICY_OSX_APPTYPE_DASHCLIENT 2 /* rename to move away from widget */ -/* +/* * Resumes the backgrounded TAL or dashboard client. Only priv users can disable TAL apps. * Valid apptype are PROC_POLICY_OSX_APPTYPE_DASHCLIENT and PROC_POLICY_OSX_APPTYPE_TAL. * Returns 0 on success otherwise appropriate error code. @@ -121,8 +121,8 @@ int proc_donate_importance_boost(void); /* DEPRECATED: supported for backward compatibility only */ /* check the message for an importance boost and take an assertion on it */ int proc_importance_assertion_begin_with_msg(mach_msg_header_t *msg, - mach_msg_trailer_t *trailer, - uint64_t *assertion_token) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_8, __MAC_10_10, __IPHONE_6_0, __IPHONE_8_0); + mach_msg_trailer_t *trailer, + uint64_t *assertion_token) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_8, __MAC_10_10, __IPHONE_6_0, __IPHONE_8_0); /* DEPRECATED: supported for backward compatibility only */ /* drop an assertion */ @@ -130,7 +130,7 @@ int proc_importance_assertion_complete(uint64_t assertion_handle); /* check the message for a App De-Nap boost and take an assertion on it */ int proc_denap_assertion_begin_with_msg(mach_msg_header_t *msg, - uint64_t *assertion_token); + uint64_t *assertion_token); /* drop a de-nap assertion */ int proc_denap_assertion_complete(uint64_t assertion_handle); @@ -156,7 +156,7 @@ int proc_reset_footprint_interval(pid_t pid) __OSX_AVAILABLE_STARTING(__MAC_10_1 int proc_trace_log(pid_t pid, uint64_t uniqueid) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); /* proc_info call to get the originator information */ -int proc_pidoriginatorinfo(int flavor, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int proc_pidoriginatorinfo(int flavor, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); int proc_listcoalitions(int flavor, int coaltype, void *buffer, int buffersize) __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_8_3); @@ -173,4 +173,3 @@ int proc_suppress(pid_t pid, uint64_t *generation); __END_DECLS #endif /* _LIBPROC_INTERNALH_ */ - diff --git a/libsyscall/wrappers/libproc/proc_listpidspath.c b/libsyscall/wrappers/libproc/proc_listpidspath.c index c263c7f2c..e5812bcdc 100644 --- a/libsyscall/wrappers/libproc/proc_listpidspath.c +++ b/libsyscall/wrappers/libproc/proc_listpidspath.c @@ -2,14 +2,14 @@ * Copyright (c) 2007, 2008 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -32,26 +32,25 @@ typedef struct { // process IDs - int *pids; - int pids_count; - size_t pids_size; + int *pids; + int pids_count; + size_t pids_size; // threads - uint64_t *threads; - int thr_count; - size_t thr_size; + uint64_t *threads; + int thr_count; + size_t thr_size; // open file descriptors - struct proc_fdinfo *fds; - int fds_count; - size_t fds_size; + struct proc_fdinfo *fds; + int fds_count; + size_t fds_size; // file/volume of interest - struct stat match_stat; + struct stat match_stat; // flags - uint32_t flags; - + uint32_t flags; } fdOpenInfo, *fdOpenInfoRef; @@ -61,35 +60,36 @@ typedef struct { static fdOpenInfoRef check_init(const char *path, uint32_t flags) { - fdOpenInfoRef info; - int status; + fdOpenInfoRef info; + int status; info = malloc(sizeof(*info)); - if (!info) + if (!info) { return NULL; + } - info->pids = NULL; - info->pids_count = 0; - info->pids_size = 0; + info->pids = NULL; + info->pids_count = 0; + info->pids_size = 0; - info->threads = NULL; - info->thr_count = 0; - info->thr_size = 0; + info->threads = NULL; + info->thr_count = 0; + info->thr_size = 0; - info->fds = NULL; - info->fds_count = 0; - info->fds_size = 0; + info->fds = NULL; + info->fds_count = 0; + info->fds_size = 0; status = stat(path, &info->match_stat); if (status == -1) { goto fail; } - info->flags = flags; + info->flags = flags; return info; - fail : +fail: free(info); return NULL; @@ -165,9 +165,9 @@ check_file(fdOpenInfoRef info, struct vinfo_stat *sb) static int check_process_vnodes(fdOpenInfoRef info, int pid) { - int buf_used; - int status; - struct proc_vnodepathinfo vpi; + int buf_used; + int status; + struct proc_vnodepathinfo vpi; buf_used = proc_pidinfo(pid, PROC_PIDVNODEPATHINFO, 0, &vpi, sizeof(vpi)); if (buf_used <= 0) { @@ -211,12 +211,11 @@ check_process_vnodes(fdOpenInfoRef info, int pid) static int check_process_text(fdOpenInfoRef info, int pid) { - int status; - int buf_used; - struct proc_regionwithpathinfo rwpi; + int status; + int buf_used; + struct proc_regionwithpathinfo rwpi; if (info->flags & PROC_LISTPIDSPATH_PATH_IS_VOLUME) { - // ask for first memory region that matches mountpoint buf_used = proc_pidinfo(pid, PROC_PIDREGIONPATHINFO3, info->match_stat.st_dev, &rwpi, sizeof(rwpi)); if (buf_used <= 0) { @@ -229,16 +228,16 @@ check_process_text(fdOpenInfoRef info, int pid) // if we didn't get enough information return -1; } - + status = check_file(info, &rwpi.prp_vip.vip_vi.vi_stat); if (status != 0) { // if error or match return status; } } else { - uint64_t a = 0; - - while (1) { // for all memory regions + uint64_t a = 0; + + while (1) { // for all memory regions // processing next address buf_used = proc_pidinfo(pid, PROC_PIDREGIONPATHINFO2, a, &rwpi, sizeof(rwpi)); if (buf_used <= 0) { @@ -251,13 +250,13 @@ check_process_text(fdOpenInfoRef info, int pid) // if we didn't get enough information return -1; } - + status = check_file(info, &rwpi.prp_vip.vip_vi.vi_stat); if (status != 0) { // if error or match return status; } - + a = rwpi.prp_prinfo.pri_address + rwpi.prp_prinfo.pri_size; } } @@ -278,9 +277,9 @@ check_process_text(fdOpenInfoRef info, int pid) static int check_process_fds(fdOpenInfoRef info, int pid) { - int buf_used; - int i; - int status; + int buf_used; + int i; + int status; // get list of open file descriptors buf_used = proc_pidinfo(pid, PROC_PIDLISTFDS, 0, NULL, 0); @@ -322,46 +321,46 @@ check_process_fds(fdOpenInfoRef info, int pid) // iterate through each file descriptor for (i = 0; i < info->fds_count; i++) { - struct proc_fdinfo *fdp; + struct proc_fdinfo *fdp; fdp = &info->fds[i]; switch (fdp->proc_fdtype) { - case PROX_FDTYPE_VNODE : { - int buf_used; - struct vnode_fdinfo vi; - - buf_used = proc_pidfdinfo(pid, fdp->proc_fd, PROC_PIDFDVNODEINFO, &vi, sizeof(vi)); - if (buf_used <= 0) { - if (errno == ENOENT) { - /* - * The file descriptor's vnode may have been revoked. This is a - * bit of a hack, since an ENOENT error might not always mean the - * descriptor's vnode has been revoked. As the libproc API - * matures, this code may need to be revisited. - */ - continue; - } - return -1; - } else if (buf_used < sizeof(vi)) { - // if we didn't get enough information - return -1; - } + case PROX_FDTYPE_VNODE: { + int buf_used; + struct vnode_fdinfo vi; - if ((info->flags & PROC_LISTPIDSPATH_EXCLUDE_EVTONLY) && - (vi.pfi.fi_openflags & O_EVTONLY)) { - // if this file should be excluded + buf_used = proc_pidfdinfo(pid, fdp->proc_fd, PROC_PIDFDVNODEINFO, &vi, sizeof(vi)); + if (buf_used <= 0) { + if (errno == ENOENT) { + /* + * The file descriptor's vnode may have been revoked. This is a + * bit of a hack, since an ENOENT error might not always mean the + * descriptor's vnode has been revoked. As the libproc API + * matures, this code may need to be revisited. + */ continue; } + return -1; + } else if (buf_used < sizeof(vi)) { + // if we didn't get enough information + return -1; + } - status = check_file(info, &vi.pvi.vi_stat); - if (status != 0) { - // if error or match - return status; - } - break; + if ((info->flags & PROC_LISTPIDSPATH_EXCLUDE_EVTONLY) && + (vi.pfi.fi_openflags & O_EVTONLY)) { + // if this file should be excluded + continue; + } + + status = check_file(info, &vi.pvi.vi_stat); + if (status != 0) { + // if error or match + return status; } - default : - break; + break; + } + default: + break; } } @@ -381,9 +380,9 @@ check_process_fds(fdOpenInfoRef info, int pid) static int check_process_threads(fdOpenInfoRef info, int pid) { - int buf_used; - int status; - struct proc_taskallinfo tai; + int buf_used; + int status; + struct proc_taskallinfo tai; buf_used = proc_pidinfo(pid, PROC_PIDTASKALLINFO, 0, &tai, sizeof(tai)); if (buf_used <= 0) { @@ -399,7 +398,7 @@ check_process_threads(fdOpenInfoRef info, int pid) // check thread info if (tai.pbsd.pbi_flags & PROC_FLAG_THCWD) { - int i; + int i; // get list of threads buf_used = tai.ptinfo.pti_threadnum * sizeof(uint64_t); @@ -438,8 +437,8 @@ check_process_threads(fdOpenInfoRef info, int pid) // iterate through each thread for (i = 0; i < info->thr_count; i++) { - uint64_t thr = info->threads[i]; - struct proc_threadwithpathinfo tpi; + uint64_t thr = info->threads[i]; + struct proc_threadwithpathinfo tpi; buf_used = proc_pidinfo(pid, PROC_PIDTHREADPATHINFO, thr, &tpi, sizeof(tpi)); if (buf_used <= 0) { @@ -478,7 +477,7 @@ check_process_threads(fdOpenInfoRef info, int pid) static int check_process_phase1(fdOpenInfoRef info, int pid) { - int status; + int status; // check root and current working directory status = check_process_vnodes(info, pid); @@ -516,7 +515,7 @@ check_process_phase1(fdOpenInfoRef info, int pid) static int check_process_phase2(fdOpenInfoRef info, int pid) { - int status; + int status; // check process text (memory) status = check_process_text(info, pid); @@ -543,18 +542,18 @@ check_process_phase2(fdOpenInfoRef info, int pid) * that contains valid information. */ int -proc_listpidspath(uint32_t type, - uint32_t typeinfo, - const char *path, - uint32_t pathflags, - void *buffer, - int buffersize) +proc_listpidspath(uint32_t type, + uint32_t typeinfo, + const char *path, + uint32_t pathflags, + void *buffer, + int buffersize) { - int buf_used; - int *buf_next = (int *)buffer; - int i; - fdOpenInfoRef info; - int status = -1; + int buf_used; + int *buf_next = (int *)buffer; + int i; + fdOpenInfoRef info; + int status = -1; if (buffer == NULL) { // if this is a sizing request @@ -615,8 +614,8 @@ proc_listpidspath(uint32_t type, // iterate through each process buf_used = 0; for (i = info->pids_count - 1; i >= 0; i--) { - int pid; - int pstatus; + int pid; + int pstatus; pid = info->pids[i]; if (pid == 0) { @@ -646,8 +645,8 @@ proc_listpidspath(uint32_t type, // do a more expensive search if we still have buffer space for (i = info->pids_count - 1; i >= 0; i--) { - int pid; - int pstatus; + int pid; + int pstatus; pid = info->pids[i]; if (pid == 0) { @@ -671,7 +670,7 @@ proc_listpidspath(uint32_t type, status = buf_used; - done : +done: // cleanup check_free(info); diff --git a/libsyscall/wrappers/mach_approximate_time.c b/libsyscall/wrappers/mach_approximate_time.c index 1e0a4e472..cb199cf07 100644 --- a/libsyscall/wrappers/mach_approximate_time.c +++ b/libsyscall/wrappers/mach_approximate_time.c @@ -2,14 +2,14 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -27,12 +27,13 @@ extern uint64_t mach_absolute_time(void); #if defined(__arm64__) || defined(__x86_64__) -uint64_t mach_approximate_time(void) { +uint64_t +mach_approximate_time(void) +{ uint8_t supported = *((uint8_t *)_COMM_PAGE_APPROX_TIME_SUPPORTED); - if (supported) - { - return *((uint64_t *)_COMM_PAGE_APPROX_TIME); - } + if (supported) { + return *((uint64_t *)_COMM_PAGE_APPROX_TIME); + } return mach_absolute_time(); } diff --git a/libsyscall/wrappers/mach_boottime.c b/libsyscall/wrappers/mach_boottime.c index 23b22f93b..5028f3a65 100644 --- a/libsyscall/wrappers/mach_boottime.c +++ b/libsyscall/wrappers/mach_boottime.c @@ -2,14 +2,14 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include diff --git a/libsyscall/wrappers/mach_bridge_remote_time.c b/libsyscall/wrappers/mach_bridge_remote_time.c index cc56a81b2..e90846034 100644 --- a/libsyscall/wrappers/mach_bridge_remote_time.c +++ b/libsyscall/wrappers/mach_bridge_remote_time.c @@ -71,8 +71,8 @@ mach_bridge_remote_time(__unused uint64_t local_time) * This barrier prevents the second read of base_local_ts from being reordered * w.r.t the reads of other values in bt_params. */ - __asm__ volatile("dmb ishld" ::: "memory"); - } while(params.base_local_ts && (params.base_local_ts != commpage_bt_params_p->base_local_ts)); + __asm__ volatile ("dmb ishld" ::: "memory"); + } while (params.base_local_ts && (params.base_local_ts != commpage_bt_params_p->base_local_ts)); if (!local_time) { local_time = now; diff --git a/libsyscall/wrappers/mach_continuous_time.c b/libsyscall/wrappers/mach_continuous_time.c index 61b996de7..353ef0d87 100644 --- a/libsyscall/wrappers/mach_continuous_time.c +++ b/libsyscall/wrappers/mach_continuous_time.c @@ -2,14 +2,14 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -32,21 +32,22 @@ _mach_continuous_time_base(void) { #if !defined(__x86_64__) && !defined(__arm64__) // Deal with the lack of 64-bit loads on arm32 (see mach_approximate_time.s) - while(1) { + while (1) { volatile uint64_t *base_ptr = (volatile uint64_t*)_COMM_PAGE_CONT_TIMEBASE; uint64_t read1, read2; read1 = *base_ptr; #if defined(__arm__) - __asm__ volatile("dsb sy" ::: "memory"); + __asm__ volatile ("dsb sy" ::: "memory"); #elif defined(__i386__) - __asm__ volatile("lfence" ::: "memory"); + __asm__ volatile ("lfence" ::: "memory"); #else #error "unsupported arch" #endif read2 = *base_ptr; - if(__builtin_expect((read1 == read2), 1)) + if (__builtin_expect((read1 == read2), 1)) { return read1; + } } #else // 64-bit return *(volatile uint64_t*)_COMM_PAGE_CONT_TIMEBASE; @@ -61,7 +62,7 @@ _mach_continuous_hwclock(uint64_t *cont_time __unused) uint8_t cont_hwclock = *((uint8_t*)_COMM_PAGE_CONT_HWCLOCK); uint64_t timebase; if (cont_hwclock) { - __asm__ volatile("isb\n" "mrs %0, CNTPCT_EL0" : "=r"(timebase)); + __asm__ volatile ("isb\n" "mrs %0, CNTPCT_EL0" : "=r"(timebase)); *cont_time = timebase; return KERN_SUCCESS; } @@ -78,20 +79,24 @@ _mach_continuous_time(uint64_t* absolute_time, uint64_t* cont_time) volatile uint64_t absolute; do { - read1 = *base_ptr; - absolute = mach_absolute_time(); -#if defined(__arm__) || defined(__arm64__) - /* - * mach_absolute_time() contains an instruction barrier which will - * prevent the speculation of read2 above this point, so we don't - * need another barrier here. - */ + read1 = *base_ptr; + absolute = mach_absolute_time(); +#if defined(__arm__) || defined(__arm64__) + /* + * mach_absolute_time() contains an instruction barrier which will + * prevent the speculation of read2 above this point, so we don't + * need another barrier here. + */ #endif read2 = *base_ptr; } while (__builtin_expect((read1 != read2), 0)); - if (absolute_time) *absolute_time = absolute; - if (cont_time) *cont_time = absolute + read1; + if (absolute_time) { + *absolute_time = absolute; + } + if (cont_time) { + *cont_time = absolute + read1; + } return KERN_SUCCESS; } @@ -100,8 +105,9 @@ uint64_t mach_continuous_time(void) { uint64_t cont_time; - if (_mach_continuous_hwclock(&cont_time) != KERN_SUCCESS) + if (_mach_continuous_hwclock(&cont_time) != KERN_SUCCESS) { _mach_continuous_time(NULL, &cont_time); + } return cont_time; } @@ -111,7 +117,7 @@ mach_continuous_approximate_time(void) /* * No retry loop here because if we use a slightly too old timebase that's * okay, we are approximate time anyway. - */ + */ volatile register uint64_t time_base = _mach_continuous_time_base(); return time_base + mach_approximate_time(); } diff --git a/libsyscall/wrappers/mach_get_times.c b/libsyscall/wrappers/mach_get_times.c index b078c8eb8..f44b4b1eb 100644 --- a/libsyscall/wrappers/mach_get_times.c +++ b/libsyscall/wrappers/mach_get_times.c @@ -36,7 +36,8 @@ extern uint64_t _mach_continuous_time_base(void); extern int __gettimeofday_with_mach(struct timeval *, struct timezone *, uint64_t *); kern_return_t -mach_get_times(uint64_t* absolute_time, uint64_t* cont_time, struct timespec *tp) { +mach_get_times(uint64_t* absolute_time, uint64_t* cont_time, struct timespec *tp) +{ if (tp == NULL) { return _mach_continuous_time(absolute_time, cont_time); } @@ -75,8 +76,12 @@ mach_get_times(uint64_t* absolute_time, uint64_t* cont_time, struct timespec *tp continuous_time_base_post = _mach_continuous_time_base(); } while (__builtin_expect(continuous_time_base_prior != continuous_time_base_post, 0)); - if (absolute_time) *absolute_time = tbr; - if (cont_time) *cont_time = continuous_time_base_prior + tbr; + if (absolute_time) { + *absolute_time = tbr; + } + if (cont_time) { + *cont_time = continuous_time_base_prior + tbr; + } tp->tv_sec = tv.tv_sec; tp->tv_nsec = tv.tv_usec * NSEC_PER_USEC; diff --git a/libsyscall/wrappers/mach_timebase_info.c b/libsyscall/wrappers/mach_timebase_info.c index 80cd559ad..a56559d79 100644 --- a/libsyscall/wrappers/mach_timebase_info.c +++ b/libsyscall/wrappers/mach_timebase_info.c @@ -2,14 +2,14 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -26,20 +26,23 @@ extern kern_return_t mach_timebase_info_trap(mach_timebase_info_t info); kern_return_t -mach_timebase_info(mach_timebase_info_t info){ - static mach_timebase_info_data_t cached_info; +mach_timebase_info(mach_timebase_info_t info) +{ + static mach_timebase_info_data_t cached_info; /* * This is racy, but because it is safe to initialize twice we avoid a * barrier in the fast path by risking double initialization. */ - if (cached_info.numer == 0 || cached_info.denom == 0){ - kern_return_t kr = mach_timebase_info_trap(&cached_info); - if (kr != KERN_SUCCESS) return kr; - } + if (cached_info.numer == 0 || cached_info.denom == 0) { + kern_return_t kr = mach_timebase_info_trap(&cached_info); + if (kr != KERN_SUCCESS) { + return kr; + } + } info->numer = cached_info.numer; info->denom = cached_info.denom; - return KERN_SUCCESS; + return KERN_SUCCESS; } diff --git a/libsyscall/wrappers/open_dprotected_np.c b/libsyscall/wrappers/open_dprotected_np.c index afd213d78..35a4d93bd 100644 --- a/libsyscall/wrappers/open_dprotected_np.c +++ b/libsyscall/wrappers/open_dprotected_np.c @@ -2,14 +2,14 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ #include @@ -27,15 +27,16 @@ int __open_dprotected_np(const char* path, int flags, int class, int dpflags, int mode); -int open_dprotected_np(const char *path, int flags, int class, int dpflags, ...) { +int +open_dprotected_np(const char *path, int flags, int class, int dpflags, ...) +{ int mode = 0; - if (flags & O_CREAT) { + if (flags & O_CREAT) { va_list ap; va_start(ap, dpflags); mode = va_arg(ap, int); va_end(ap); } - return (__open_dprotected_np(path, flags, class, dpflags, mode)); + return __open_dprotected_np(path, flags, class, dpflags, mode); } - diff --git a/libsyscall/wrappers/persona.c b/libsyscall/wrappers/persona.c index e167cc38f..67bee1546 100644 --- a/libsyscall/wrappers/persona.c +++ b/libsyscall/wrappers/persona.c @@ -31,19 +31,22 @@ /* syscall entry point */ int __persona(uint32_t operation, uint32_t flags, struct kpersona_info *info, uid_t *id, size_t *idlen); -int kpersona_alloc(struct kpersona_info *info, uid_t *id) +int +kpersona_alloc(struct kpersona_info *info, uid_t *id) { size_t idlen = 1; return __persona(PERSONA_OP_ALLOC, 0, info, id, &idlen); } -int kpersona_dealloc(uid_t id) +int +kpersona_dealloc(uid_t id) { size_t idlen = 1; return __persona(PERSONA_OP_DEALLOC, 0, NULL, &id, &idlen); } -int kpersona_get(uid_t *id) +int +kpersona_get(uid_t *id) { /* persona is a process-static identifier: cache it in a global */ static uid_t p_id = PERSONA_ID_NONE; @@ -51,27 +54,31 @@ int kpersona_get(uid_t *id) int ret = 0; size_t idlen = 1; ret = __persona(PERSONA_OP_GET, 0, NULL, &p_id, &idlen); - if (ret != 0) + if (ret != 0) { return ret; + } } *id = p_id; return 0; } -int kpersona_info(uid_t id, struct kpersona_info *info) +int +kpersona_info(uid_t id, struct kpersona_info *info) { size_t idlen = 1; return __persona(PERSONA_OP_INFO, 0, info, &id, &idlen); } -int kpersona_pidinfo(pid_t pid, struct kpersona_info *info) +int +kpersona_pidinfo(pid_t pid, struct kpersona_info *info) { size_t idlen = 1; uid_t id = (uid_t)pid; return __persona(PERSONA_OP_PIDINFO, 0, info, &id, &idlen); } -int kpersona_find(const char *name, uid_t uid, uid_t *id, size_t *idlen) +int +kpersona_find(const char *name, uid_t uid, uid_t *id, size_t *idlen) { int ret; struct kpersona_info kinfo; @@ -82,10 +89,12 @@ int kpersona_find(const char *name, uid_t uid, uid_t *id, size_t *idlen) kinfo.persona_ngroups = 0; kinfo.persona_groups[0] = 0; kinfo.persona_name[0] = 0; - if (name) + if (name) { strlcpy(kinfo.persona_name, name, sizeof(kinfo.persona_name)); + } ret = __persona(PERSONA_OP_FIND, 0, &kinfo, id, idlen); - if (ret < 0) + if (ret < 0) { return ret; + } return (int)(*idlen); } diff --git a/libsyscall/wrappers/pid_shutdown_networking.c b/libsyscall/wrappers/pid_shutdown_networking.c index 7d96044be..a48f0fe7d 100644 --- a/libsyscall/wrappers/pid_shutdown_networking.c +++ b/libsyscall/wrappers/pid_shutdown_networking.c @@ -2,14 +2,14 @@ * Copyright (c) 20017 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/posix_sem_obsolete.c b/libsyscall/wrappers/posix_sem_obsolete.c index 396a1c6ed..15c33549a 100644 --- a/libsyscall/wrappers/posix_sem_obsolete.c +++ b/libsyscall/wrappers/posix_sem_obsolete.c @@ -29,19 +29,22 @@ * syscalls.master. Instead, provide simple stubs here. */ -int sem_destroy(sem_t *s __unused) +int +sem_destroy(sem_t *s __unused) { errno = ENOSYS; return -1; } -int sem_getvalue(sem_t * __restrict __unused s, int * __restrict __unused x) +int +sem_getvalue(sem_t * __restrict __unused s, int * __restrict __unused x) { errno = ENOSYS; return -1; } -int sem_init(sem_t * __unused s, int __unused x, unsigned int __unused y) +int +sem_init(sem_t * __unused s, int __unused x, unsigned int __unused y) { errno = ENOSYS; return -1; diff --git a/libsyscall/wrappers/quota_obsolete.c b/libsyscall/wrappers/quota_obsolete.c index aa566c0c6..1aff8f182 100644 --- a/libsyscall/wrappers/quota_obsolete.c +++ b/libsyscall/wrappers/quota_obsolete.c @@ -34,12 +34,14 @@ extern int quota(void); extern int setquota(void); -int quota(void) +int +quota(void) { return kill(getpid(), SIGSYS); } -int setquota(void) +int +setquota(void) { return kill(getpid(), SIGSYS); } diff --git a/libsyscall/wrappers/reboot.c b/libsyscall/wrappers/reboot.c index c703ed4c3..f213958b7 100644 --- a/libsyscall/wrappers/reboot.c +++ b/libsyscall/wrappers/reboot.c @@ -2,14 +2,14 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ diff --git a/libsyscall/wrappers/remove-counter.c b/libsyscall/wrappers/remove-counter.c index f1757a654..d2110f519 100644 --- a/libsyscall/wrappers/remove-counter.c +++ b/libsyscall/wrappers/remove-counter.c @@ -2,14 +2,14 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -33,7 +33,8 @@ static int32_t __remove_counter = 0; #endif __uint64_t -__get_remove_counter(void) { +__get_remove_counter(void) +{ #if defined(__arm__) && !defined(_ARM_ARCH_6) return __remove_counter; #else diff --git a/libsyscall/wrappers/rename.c b/libsyscall/wrappers/rename.c index a73c0ec82..e4210d861 100644 --- a/libsyscall/wrappers/rename.c +++ b/libsyscall/wrappers/rename.c @@ -2,14 +2,14 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -28,6 +28,8 @@ int rename(const char *old, const char *new) { int res = __rename(old, new); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/renameat.c b/libsyscall/wrappers/renameat.c index 727760f67..512d34186 100644 --- a/libsyscall/wrappers/renameat.c +++ b/libsyscall/wrappers/renameat.c @@ -28,6 +28,8 @@ int renameat(int oldfd, const char *old, int newfd, const char *new) { int res = __renameat(oldfd, old, newfd, new); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/renamex.c b/libsyscall/wrappers/renamex.c index 8bdfdcd0d..89898b190 100644 --- a/libsyscall/wrappers/renamex.c +++ b/libsyscall/wrappers/renamex.c @@ -30,7 +30,9 @@ int renameatx_np(int oldfd, const char *old, int newfd, const char *new, unsigned int flags) { int res = __renameatx_np(oldfd, old, newfd, new, flags); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/rmdir.c b/libsyscall/wrappers/rmdir.c index 07bfb9588..8d6c663cd 100644 --- a/libsyscall/wrappers/rmdir.c +++ b/libsyscall/wrappers/rmdir.c @@ -2,14 +2,14 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -28,6 +28,8 @@ int rmdir(const char *path) { int res = __rmdir(path); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/select-base.c b/libsyscall/wrappers/select-base.c index f688d6f36..1a6502241 100644 --- a/libsyscall/wrappers/select-base.c +++ b/libsyscall/wrappers/select-base.c @@ -2,14 +2,14 @@ * Copyright (c) 2005, 2007 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -39,17 +39,17 @@ #if defined(VARIANT_CANCELABLE) || defined(VARIANT_PRE1050) #if !defined(VARIANT_DARWIN_EXTSN) extern int __select(int, fd_set * __restrict, fd_set * __restrict, - fd_set * __restrict, struct timeval * __restrict); + fd_set * __restrict, struct timeval * __restrict); #endif int __pselect(int, fd_set * __restrict, fd_set * __restrict, - fd_set * __restrict, const struct timespec * __restrict, const sigset_t * __restrict); + fd_set * __restrict, const struct timespec * __restrict, const sigset_t * __restrict); #else /* !VARIANT_CANCELABLE && !VARIANT_PRE1050 */ #if !defined(VARIANT_DARWIN_EXTSN) int __select_nocancel(int, fd_set * __restrict, fd_set * __restrict, - fd_set * __restrict, struct timeval * __restrict); + fd_set * __restrict, struct timeval * __restrict); #endif int __pselect_nocancel(int, fd_set * __restrict, fd_set * __restrict, - fd_set * __restrict, const struct timespec * __restrict, const sigset_t * __restrict); + fd_set * __restrict, const struct timespec * __restrict, const sigset_t * __restrict); #endif /* VARIANT_CANCELABLE || VARIANT_PRE1050 */ #if !defined(VARIANT_DARWIN_EXTSN) @@ -60,13 +60,13 @@ int __pselect_nocancel(int, fd_set * __restrict, fd_set * __restrict, */ int select(int nfds, fd_set * __restrict readfds, fd_set * __restrict writefds, - fd_set * __restrict exceptfds, struct timeval * __restrict + fd_set * __restrict exceptfds, struct timeval * __restrict #if defined(VARIANT_LEGACY) || defined(VARIANT_PRE1050) - intimeout + intimeout #else /* !VARIANT_LEGACY && !VARIANT_PRE1050 */ - timeout + timeout #endif /* VARIANT_LEGACY || VARIANT_PRE1050 */ - ) + ) { #if defined(VARIANT_LEGACY) || defined(VARIANT_PRE1050) struct timeval tb, *timeout; @@ -104,8 +104,8 @@ select(int nfds, fd_set * __restrict readfds, fd_set * __restrict writefds, extern int __pthread_sigmask(int, const sigset_t *, sigset_t *); static int _pselect_emulated(int count, fd_set * __restrict rfds, fd_set * __restrict wfds, - fd_set * __restrict efds, const struct timespec * __restrict timo, - const sigset_t * __restrict mask) + fd_set * __restrict efds, const struct timespec * __restrict timo, + const sigset_t * __restrict mask) { sigset_t omask; struct timeval tvtimo, *tvp; @@ -121,8 +121,9 @@ _pselect_emulated(int count, fd_set * __restrict rfds, fd_set * __restrict wfds, if (mask != 0) { rv = __pthread_sigmask(SIG_SETMASK, mask, &omask); - if (rv != 0) + if (rv != 0) { return rv; + } } rv = select(count, rfds, wfds, efds, tvp); @@ -142,13 +143,13 @@ _pselect_emulated(int count, fd_set * __restrict rfds, fd_set * __restrict wfds, */ int pselect(int nfds, fd_set * __restrict readfds, fd_set * __restrict writefds, - fd_set * __restrict exceptfds, const struct timespec * __restrict + fd_set * __restrict exceptfds, const struct timespec * __restrict #if defined(VARIANT_LEGACY) || defined(VARIANT_PRE1050) - intimeout, + intimeout, #else /* !VARIANT_LEGACY && !VARIANT_PRE1050 */ - timeout, + timeout, #endif /* VARIANT_LEGACY || VARIANT_PRE1050 */ - const sigset_t * __restrict sigmask) + const sigset_t * __restrict sigmask) { int ret; #if defined(VARIANT_LEGACY) || defined(VARIANT_PRE1050) diff --git a/libsyscall/wrappers/sfi.c b/libsyscall/wrappers/sfi.c index 949f261b0..19bf2ce4c 100644 --- a/libsyscall/wrappers/sfi.c +++ b/libsyscall/wrappers/sfi.c @@ -26,32 +26,38 @@ #include #include -int system_set_sfi_window(uint64_t sfi_window_usec) +int +system_set_sfi_window(uint64_t sfi_window_usec) { return __sfi_ctl(SFI_CTL_OPERATION_SFI_SET_WINDOW, 0, sfi_window_usec, NULL); } -int system_get_sfi_window(uint64_t *sfi_window_usec) +int +system_get_sfi_window(uint64_t *sfi_window_usec) { return __sfi_ctl(SFI_CTL_OPERATION_SFI_GET_WINDOW, 0, 0, sfi_window_usec); } -int sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usec) +int +sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usec) { return __sfi_ctl(SFI_CTL_OPERATION_SET_CLASS_OFFTIME, class_id, offtime_usec, NULL); } -int sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usec) +int +sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usec) { return __sfi_ctl(SFI_CTL_OPERATION_GET_CLASS_OFFTIME, class_id, 0, offtime_usec); } -int sfi_process_set_flags(pid_t pid, uint32_t flags) +int +sfi_process_set_flags(pid_t pid, uint32_t flags) { return __sfi_pidctl(SFI_PIDCTL_OPERATION_PID_SET_FLAGS, pid, flags, NULL); } -int sfi_process_get_flags(pid_t pid, uint32_t *flags) +int +sfi_process_get_flags(pid_t pid, uint32_t *flags) { return __sfi_pidctl(SFI_PIDCTL_OPERATION_PID_GET_FLAGS, pid, 0, flags); } diff --git a/libsyscall/wrappers/sigsuspend-base.c b/libsyscall/wrappers/sigsuspend-base.c index 98f3fc3d0..3ff4819df 100644 --- a/libsyscall/wrappers/sigsuspend-base.c +++ b/libsyscall/wrappers/sigsuspend-base.c @@ -2,14 +2,14 @@ * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ /* @(#)sigsuspend.c 1.0 9/22/95 (c) 1995 NeXT */ @@ -28,14 +28,14 @@ int __SIGSUSPEND(const sigset_t); int -sigsuspend (const sigset_t *sigmask_p) +sigsuspend(const sigset_t *sigmask_p) { - sigset_t mask; + sigset_t mask; - if (sigmask_p) - mask = *sigmask_p; - else - sigemptyset(&mask); - return __SIGSUSPEND(mask); + if (sigmask_p) { + mask = *sigmask_p; + } else { + sigemptyset(&mask); + } + return __SIGSUSPEND(mask); } - diff --git a/libsyscall/wrappers/spawn/posix_spawn.c b/libsyscall/wrappers/spawn/posix_spawn.c index 20083809a..819863128 100644 --- a/libsyscall/wrappers/spawn/posix_spawn.c +++ b/libsyscall/wrappers/spawn/posix_spawn.c @@ -2,14 +2,14 @@ * Copyright (c) 2006-2012 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -33,7 +33,7 @@ #include #include #include -#include /* for OPEN_MAX, PATH_MAX */ +#include /* for OPEN_MAX, PATH_MAX */ #include #include #include @@ -70,12 +70,11 @@ int posix_spawnattr_init(posix_spawnattr_t *attr) { _posix_spawnattr_t *psattrp = (_posix_spawnattr_t *)attr; - int err = 0; + int err = 0; if ((*psattrp = (_posix_spawnattr_t)malloc(sizeof(struct _posix_spawnattr))) == NULL) { err = ENOMEM; } else { - /* * The default value of this attribute shall be as if no * flags were set @@ -92,11 +91,11 @@ posix_spawnattr_init(posix_spawnattr_t *attr) (*psattrp)->psa_sigmask = 0; /* The default value of this attribute shall be zero */ - (*psattrp)->psa_pgroup = 0; /* doesn't matter */ + (*psattrp)->psa_pgroup = 0; /* doesn't matter */ /* Default is no binary preferences, i.e. use normal grading */ - memset((*psattrp)->psa_binprefs, 0, - sizeof((*psattrp)->psa_binprefs)); + memset((*psattrp)->psa_binprefs, 0, + sizeof((*psattrp)->psa_binprefs)); /* Default is no port actions to take */ (*psattrp)->psa_ports = NULL; @@ -108,11 +107,11 @@ posix_spawnattr_init(posix_spawnattr_t *attr) (*psattrp)->psa_pcontrol = 0; /* - * Initializing the alignment paddings. + * Initializing the alignment paddings. */ - (*psattrp)->short_padding = 0; - (*psattrp)->flags_padding = 0; + (*psattrp)->short_padding = 0; + (*psattrp)->flags_padding = 0; /* Default is no new apptype requested */ (*psattrp)->psa_apptype = POSIX_SPAWN_PROCESS_TYPE_DEFAULT; @@ -157,7 +156,7 @@ posix_spawnattr_init(posix_spawnattr_t *attr) (*psattrp)->psa_max_addr = 0; } - return (err); + return err; } @@ -190,8 +189,9 @@ posix_spawnattr_destroy(posix_spawnattr_t *attr) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; posix_spawn_destroyportactions_np(attr); @@ -201,7 +201,7 @@ posix_spawnattr_destroy(posix_spawnattr_t *attr) free(psattr); *attr = NULL; - return (0); + return 0; } @@ -226,13 +226,14 @@ posix_spawnattr_setflags(posix_spawnattr_t *attr, short flags) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_flags = flags; - return (0); + return 0; } @@ -260,17 +261,18 @@ posix_spawnattr_setflags(posix_spawnattr_t *attr, short flags) */ int posix_spawnattr_getflags(const posix_spawnattr_t * __restrict attr, - short * __restrict flagsp) + short * __restrict flagsp) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *flagsp = psattr->psa_flags; - return (0); + return 0; } @@ -296,17 +298,18 @@ posix_spawnattr_getflags(const posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_getsigdefault(const posix_spawnattr_t * __restrict attr, - sigset_t * __restrict sigdefault) + sigset_t * __restrict sigdefault) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *sigdefault = psattr->psa_sigdefault; - return (0); + return 0; } @@ -331,17 +334,18 @@ posix_spawnattr_getsigdefault(const posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_getpgroup(const posix_spawnattr_t * __restrict attr, - pid_t * __restrict pgroup) + pid_t * __restrict pgroup) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *pgroup = psattr->psa_pgroup; - return (0); + return 0; } @@ -367,24 +371,25 @@ posix_spawnattr_getpgroup(const posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_getsigmask(const posix_spawnattr_t * __restrict attr, - sigset_t * __restrict sigmask) + sigset_t * __restrict sigmask) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *sigmask = psattr->psa_sigmask; - return (0); + return 0; } /* * posix_spawnattr_getbinpref_np * - * Description: Obtain the value of the spawn binary preferences attribute from - * the spawn attributes object referenced by 'attr' and place the + * Description: Obtain the value of the spawn binary preferences attribute from + * the spawn attributes object referenced by 'attr' and place the * result into the memory referenced by 'pref'. * * Parameters: attr The spawn attributes object whose @@ -394,30 +399,32 @@ posix_spawnattr_getsigmask(const posix_spawnattr_t * __restrict attr, * ocount The actual number copied * * Returns: 0 No binary preferences found - * > 0 The number of cpu types (less than - * count) copied over from 'attr'. + * > 0 The number of cpu types (less than + * count) copied over from 'attr'. * * Implicit Returns: - * *pref (modified) The binary preferences array + * *pref (modified) The binary preferences array * from the spawn attributes object */ int posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict attr, - size_t count, cpu_type_t *pref, size_t * __restrict ocount) + size_t count, cpu_type_t *pref, size_t * __restrict ocount) { _posix_spawnattr_t psattr; int i = 0; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; for (i = 0; i < count && i < 4; i++) { pref[i] = psattr->psa_binprefs[i]; } - if (ocount) + if (ocount) { *ocount = i; + } return 0; } @@ -444,17 +451,18 @@ posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_getpcontrol_np(const posix_spawnattr_t * __restrict attr, - int * __restrict pcontrol) + int * __restrict pcontrol) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *pcontrol = psattr->psa_pcontrol; - return (0); + return 0; } /* @@ -479,17 +487,18 @@ posix_spawnattr_getpcontrol_np(const posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_getprocesstype_np(const posix_spawnattr_t * __restrict attr, - int * __restrict proctype) + int * __restrict proctype) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *proctype = psattr->psa_apptype; - return (0); + return 0; } /* * posix_spawnattr_setsigdefault @@ -508,17 +517,18 @@ posix_spawnattr_getprocesstype_np(const posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_setsigdefault(posix_spawnattr_t * __restrict attr, - const sigset_t * __restrict sigdefault) + const sigset_t * __restrict sigdefault) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_sigdefault = *sigdefault; - return (0); + return 0; } @@ -541,13 +551,14 @@ posix_spawnattr_setpgroup(posix_spawnattr_t * attr, pid_t pgroup) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_pgroup = pgroup; - return (0); + return 0; } @@ -568,17 +579,18 @@ posix_spawnattr_setpgroup(posix_spawnattr_t * attr, pid_t pgroup) */ int posix_spawnattr_setsigmask(posix_spawnattr_t * __restrict attr, - const sigset_t * __restrict sigmask) + const sigset_t * __restrict sigmask) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_sigmask = *sigmask; - return (0); + return 0; } @@ -590,27 +602,28 @@ posix_spawnattr_setsigmask(posix_spawnattr_t * __restrict attr, * cpu_type_t array referenced by 'pref', size of 'count' * * Parameters: attr The spawn attributes object whose - * binary preferences are to be set - * count Size of the array pointed to by 'pref' - * pref cpu_type_t array of binary preferences + * binary preferences are to be set + * count Size of the array pointed to by 'pref' + * pref cpu_type_t array of binary preferences * ocount The actual number copied * * Returns: 0 No preferences copied - * > 0 Number of preferences copied + * > 0 Number of preferences copied * - * Note: The posix_spawnattr_t currently only holds four cpu_type_t's. - * If the caller provides more preferences than this limit, they - * will be ignored, as reflected in the return value. + * Note: The posix_spawnattr_t currently only holds four cpu_type_t's. + * If the caller provides more preferences than this limit, they + * will be ignored, as reflected in the return value. */ int posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict attr, - size_t count, cpu_type_t *pref, size_t * __restrict ocount) + size_t count, cpu_type_t *pref, size_t * __restrict ocount) { _posix_spawnattr_t psattr; int i = 0; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; for (i = 0; i < count && i < 4; i++) { @@ -618,8 +631,9 @@ posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict attr, } /* return number of binprefs copied over */ - if (ocount) + if (ocount) { *ocount = i; + } return 0; } @@ -640,17 +654,18 @@ posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_setpcontrol_np(posix_spawnattr_t * __restrict attr, - const int pcontrol) + const int pcontrol) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_pcontrol = pcontrol; - return (0); + return 0; } @@ -670,23 +685,24 @@ posix_spawnattr_setpcontrol_np(posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_setprocesstype_np(posix_spawnattr_t * __restrict attr, - const int proctype) + const int proctype) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_apptype = proctype; - return (0); + return 0; } /* * posix_spawn_createportactions_np * Description: create a new posix_spawn_port_actions struct and link - * it into the posix_spawnattr. + * it into the posix_spawnattr. */ static int posix_spawn_createportactions_np(posix_spawnattr_t *attr) @@ -694,14 +710,16 @@ posix_spawn_createportactions_np(posix_spawnattr_t *attr) _posix_spawnattr_t psattr; _posix_spawn_port_actions_t acts; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; acts = (_posix_spawn_port_actions_t)malloc(PS_PORT_ACTIONS_SIZE(2)); - if (acts == NULL) + if (acts == NULL) { return ENOMEM; - + } + acts->pspa_alloc = 2; acts->pspa_count = 0; @@ -711,34 +729,39 @@ posix_spawn_createportactions_np(posix_spawnattr_t *attr) /* * posix_spawn_growportactions_np - * Description: Enlarge the size of portactions if necessary + * Description: Enlarge the size of portactions if necessary */ static int posix_spawn_growportactions_np(posix_spawnattr_t *attr) { _posix_spawnattr_t psattr; - _posix_spawn_port_actions_t acts; + _posix_spawn_port_actions_t acts; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; - acts = psattr->psa_ports; - if (acts == NULL) + acts = psattr->psa_ports; + if (acts == NULL) { return EINVAL; - + } + /* Double number of port actions allocated for */ int newnum = 0; - if (os_mul_overflow(acts->pspa_alloc, 2, &newnum)) + if (os_mul_overflow(acts->pspa_alloc, 2, &newnum)) { return ENOMEM; + } size_t newsize = PS_PORT_ACTIONS_SIZE(newnum); - if (newsize == 0) + if (newsize == 0) { return ENOMEM; + } acts = realloc(acts, newsize); - if (acts == NULL) + if (acts == NULL) { return ENOMEM; - + } + acts->pspa_alloc = newnum; psattr->psa_ports = acts; return 0; @@ -752,16 +775,18 @@ static int posix_spawn_destroyportactions_np(posix_spawnattr_t *attr) { _posix_spawnattr_t psattr; - _posix_spawn_port_actions_t acts; + _posix_spawn_port_actions_t acts; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; - acts = psattr->psa_ports; - if (acts == NULL) + acts = psattr->psa_ports; + if (acts == NULL) { return EINVAL; - + } + free(acts); return 0; } @@ -776,13 +801,15 @@ posix_spawn_destroycoalition_info_np(posix_spawnattr_t *attr) _posix_spawnattr_t psattr; struct _posix_spawn_coalition_info *coal_info; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; coal_info = psattr->psa_coalition_info; - if (coal_info == NULL) + if (coal_info == NULL) { return EINVAL; + } psattr->psa_coalition_info = NULL; free(coal_info); @@ -799,13 +826,15 @@ posix_spawn_destroypersona_info_np(posix_spawnattr_t *attr) _posix_spawnattr_t psattr; struct _posix_spawn_persona_info *persona; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; persona = psattr->psa_persona_info; - if (persona == NULL) + if (persona == NULL) { return EINVAL; + } psattr->psa_persona_info = NULL; free(persona); @@ -857,22 +886,22 @@ posix_spawn_appendportaction_np(posix_spawnattr_t *attr, _ps_port_action_t *act) /* * posix_spawnattr_setspecialport_np * - * Description: Set a new value for a mach special port in the spawned task. + * Description: Set a new value for a mach special port in the spawned task. * * Parameters: attr The spawn attributes object for the - * new process - * new_port The new value for the special port - * which The particular port to be set - * (see task_set_special_port for details) + * new process + * new_port The new value for the special port + * which The particular port to be set + * (see task_set_special_port for details) * * Returns: 0 Success - * ENOMEM Couldn't allocate memory + * ENOMEM Couldn't allocate memory */ -int +int posix_spawnattr_setspecialport_np( - posix_spawnattr_t *attr, - mach_port_t new_port, - int which) + posix_spawnattr_t *attr, + mach_port_t new_port, + int which) { _ps_port_action_t action = { .port_type = PSPA_SPECIAL, @@ -888,23 +917,23 @@ posix_spawnattr_setspecialport_np( * Description: Set a new port for a set of exception ports in the spawned task. * * Parameters: attr The spawn attributes object for the - * new process - * mask A bitfield indicating which exceptions - * to associate the port with - * new_port The new value for the exception port - * behavior The default behavior for the port - * flavor The default flavor for the port - * (see task_set_exception_ports) + * new process + * mask A bitfield indicating which exceptions + * to associate the port with + * new_port The new value for the exception port + * behavior The default behavior for the port + * flavor The default flavor for the port + * (see task_set_exception_ports) * * Returns: 0 Success */ -int +int posix_spawnattr_setexceptionports_np( - posix_spawnattr_t *attr, - exception_mask_t mask, - mach_port_t new_port, - exception_behavior_t behavior, - thread_state_flavor_t flavor) + posix_spawnattr_t *attr, + exception_mask_t mask, + mach_port_t new_port, + exception_behavior_t behavior, + thread_state_flavor_t flavor) { _ps_port_action_t action = { .port_type = PSPA_EXCEPTION, @@ -924,15 +953,15 @@ posix_spawnattr_setexceptionports_np( * the new task. * * Parameters: attr The spawn attributes object for the - * new process - * au_sessionport The audit session send port right + * new process + * au_sessionport The audit session send port right * * Returns: 0 Success */ -int +int posix_spawnattr_setauditsessionport_np( - posix_spawnattr_t *attr, - mach_port_t au_sessionport) + posix_spawnattr_t *attr, + mach_port_t au_sessionport) { _ps_port_action_t action = { .port_type = PSPA_AU_SESSION, @@ -972,7 +1001,7 @@ int posix_spawn_file_actions_init(posix_spawn_file_actions_t *file_actions) { _posix_spawn_file_actions_t *psactsp = (_posix_spawn_file_actions_t *)file_actions; - int err = 0; + int err = 0; if ((*psactsp = (_posix_spawn_file_actions_t)malloc(PSF_ACTIONS_SIZE(PSF_ACTIONS_INIT_COUNT))) == NULL) { err = ENOMEM; @@ -981,7 +1010,7 @@ posix_spawn_file_actions_init(posix_spawn_file_actions_t *file_actions) (*psactsp)->psfa_act_count = 0; } - return (err); + return err; } @@ -1010,14 +1039,15 @@ posix_spawn_file_actions_destroy(posix_spawn_file_actions_t *file_actions) { _posix_spawn_file_actions_t psacts; - if (file_actions == NULL || *file_actions == NULL) + if (file_actions == NULL || *file_actions == NULL) { return EINVAL; + } psacts = *(_posix_spawn_file_actions_t *)file_actions; free(psacts); *file_actions = NULL; - return (0); + return 0; } @@ -1042,12 +1072,14 @@ static int _posix_spawn_file_actions_grow(_posix_spawn_file_actions_t *psactsp) { int newnum = 0; - if (os_mul_overflow((*psactsp)->psfa_act_alloc, 2, &newnum)) + if (os_mul_overflow((*psactsp)->psfa_act_alloc, 2, &newnum)) { return ENOMEM; + } size_t newsize = PSF_ACTIONS_SIZE(newnum); - if (newsize == 0) + if (newsize == 0) { return ENOMEM; + } /* * XXX may want to impose an administrative limit here; POSIX does @@ -1092,26 +1124,29 @@ _posix_spawn_file_actions_grow(_posix_spawn_file_actions_t *psactsp) */ int posix_spawn_file_actions_addopen( - posix_spawn_file_actions_t * __restrict file_actions, - int filedes, const char * __restrict path, int oflag, - mode_t mode) + posix_spawn_file_actions_t * __restrict file_actions, + int filedes, const char * __restrict path, int oflag, + mode_t mode) { _posix_spawn_file_actions_t *psactsp; _psfa_action_t *psfileact; - if (file_actions == NULL || *file_actions == NULL) + if (file_actions == NULL || *file_actions == NULL) { return EINVAL; + } psactsp = (_posix_spawn_file_actions_t *)file_actions; /* Range check; required by POSIX */ - if (filedes < 0 || filedes >= OPEN_MAX) - return (EBADF); + if (filedes < 0 || filedes >= OPEN_MAX) { + return EBADF; + } /* If we do not have enough slots, grow the structure */ if ((*psactsp)->psfa_act_count == (*psactsp)->psfa_act_alloc) { /* need to grow file actions structure */ - if (_posix_spawn_file_actions_grow(psactsp)) - return (ENOMEM); + if (_posix_spawn_file_actions_grow(psactsp)) { + return ENOMEM; + } } /* @@ -1125,7 +1160,7 @@ posix_spawn_file_actions_addopen( psfileact->psfaa_openargs.psfao_mode = mode; strlcpy(psfileact->psfaa_openargs.psfao_path, path, PATH_MAX); - return (0); + return 0; } @@ -1151,24 +1186,27 @@ posix_spawn_file_actions_addopen( */ int posix_spawn_file_actions_addclose(posix_spawn_file_actions_t *file_actions, - int filedes) + int filedes) { _posix_spawn_file_actions_t *psactsp; _psfa_action_t *psfileact; - if (file_actions == NULL || *file_actions == NULL) + if (file_actions == NULL || *file_actions == NULL) { return EINVAL; + } psactsp = (_posix_spawn_file_actions_t *)file_actions; /* Range check; required by POSIX */ - if (filedes < 0 || filedes >= OPEN_MAX) - return (EBADF); + if (filedes < 0 || filedes >= OPEN_MAX) { + return EBADF; + } /* If we do not have enough slots, grow the structure */ if ((*psactsp)->psfa_act_count == (*psactsp)->psfa_act_alloc) { /* need to grow file actions structure */ - if (_posix_spawn_file_actions_grow(psactsp)) - return (ENOMEM); + if (_posix_spawn_file_actions_grow(psactsp)) { + return ENOMEM; + } } /* @@ -1179,7 +1217,7 @@ posix_spawn_file_actions_addclose(posix_spawn_file_actions_t *file_actions, psfileact->psfaa_type = PSFA_CLOSE; psfileact->psfaa_filedes = filedes; - return (0); + return 0; } @@ -1207,25 +1245,28 @@ posix_spawn_file_actions_addclose(posix_spawn_file_actions_t *file_actions, */ int posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *file_actions, - int filedes, int newfiledes) + int filedes, int newfiledes) { _posix_spawn_file_actions_t *psactsp; _psfa_action_t *psfileact; - if (file_actions == NULL || *file_actions == NULL) + if (file_actions == NULL || *file_actions == NULL) { return EINVAL; + } psactsp = (_posix_spawn_file_actions_t *)file_actions; /* Range check; required by POSIX */ if (filedes < 0 || filedes >= OPEN_MAX || - newfiledes < 0 || newfiledes >= OPEN_MAX) - return (EBADF); + newfiledes < 0 || newfiledes >= OPEN_MAX) { + return EBADF; + } /* If we do not have enough slots, grow the structure */ if ((*psactsp)->psfa_act_count == (*psactsp)->psfa_act_alloc) { /* need to grow file actions structure */ - if (_posix_spawn_file_actions_grow(psactsp)) - return (ENOMEM); + if (_posix_spawn_file_actions_grow(psactsp)) { + return ENOMEM; + } } /* @@ -1237,7 +1278,7 @@ posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *file_actions, psfileact->psfaa_filedes = filedes; psfileact->psfaa_openargs.psfao_oflag = newfiledes; - return (0); + return 0; } /* @@ -1275,25 +1316,28 @@ posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *file_actions, */ int posix_spawn_file_actions_addinherit_np(posix_spawn_file_actions_t *file_actions, - int filedes) + int filedes) { _posix_spawn_file_actions_t *psactsp; _psfa_action_t *psfileact; - if (file_actions == NULL || *file_actions == NULL) - return (EINVAL); + if (file_actions == NULL || *file_actions == NULL) { + return EINVAL; + } psactsp = (_posix_spawn_file_actions_t *)file_actions; /* Range check; required by POSIX */ - if (filedes < 0 || filedes >= OPEN_MAX) - return (EBADF); + if (filedes < 0 || filedes >= OPEN_MAX) { + return EBADF; + } -#if defined(POSIX_SPAWN_CLOEXEC_DEFAULT) // TODO: delete this check +#if defined(POSIX_SPAWN_CLOEXEC_DEFAULT) // TODO: delete this check /* If we do not have enough slots, grow the structure */ if ((*psactsp)->psfa_act_count == (*psactsp)->psfa_act_alloc) { /* need to grow file actions structure */ - if (_posix_spawn_file_actions_grow(psactsp)) - return (ENOMEM); + if (_posix_spawn_file_actions_grow(psactsp)) { + return ENOMEM; + } } /* @@ -1304,47 +1348,49 @@ posix_spawn_file_actions_addinherit_np(posix_spawn_file_actions_t *file_actions, psfileact->psfaa_type = PSFA_INHERIT; psfileact->psfaa_filedes = filedes; #endif - return (0); + return 0; } int posix_spawnattr_setcpumonitor_default(posix_spawnattr_t * __restrict attr) { - return (posix_spawnattr_setcpumonitor(attr, PROC_POLICY_CPUMON_DEFAULTS, 0)); + return posix_spawnattr_setcpumonitor(attr, PROC_POLICY_CPUMON_DEFAULTS, 0); } int posix_spawnattr_setcpumonitor(posix_spawnattr_t * __restrict attr, - uint64_t percent, uint64_t interval) + uint64_t percent, uint64_t interval) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) - return (EINVAL); + if (attr == NULL || *attr == NULL) { + return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_cpumonitor_percent = percent; psattr->psa_cpumonitor_interval = interval; - return (0); + return 0; } int posix_spawnattr_getcpumonitor(posix_spawnattr_t * __restrict attr, - uint64_t *percent, uint64_t *interval) + uint64_t *percent, uint64_t *interval) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) - return (EINVAL); + if (attr == NULL || *attr == NULL) { + return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; *percent = psattr->psa_cpumonitor_percent; *interval = psattr->psa_cpumonitor_interval; - return (0); + return 0; } #if TARGET_OS_EMBEDDED @@ -1367,19 +1413,19 @@ posix_spawnattr_getcpumonitor(posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_setjetsam(posix_spawnattr_t * __restrict attr, - short flags, int priority, int memlimit) + short flags, int priority, int memlimit) { short flags_ext = flags; - if (flags & POSIX_SPAWN_JETSAM_MEMLIMIT_FATAL) { - flags_ext |= POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL; - flags_ext |= POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL; - } else { - flags_ext &= ~POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL; - flags_ext &= ~POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL; - } + if (flags & POSIX_SPAWN_JETSAM_MEMLIMIT_FATAL) { + flags_ext |= POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL; + flags_ext |= POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL; + } else { + flags_ext &= ~POSIX_SPAWN_JETSAM_MEMLIMIT_ACTIVE_FATAL; + flags_ext &= ~POSIX_SPAWN_JETSAM_MEMLIMIT_INACTIVE_FATAL; + } - return (posix_spawnattr_setjetsam_ext(attr, flags_ext, priority, memlimit, memlimit)); + return posix_spawnattr_setjetsam_ext(attr, flags_ext, priority, memlimit, memlimit); } #endif /* TARGET_OS_EMBEDDED */ @@ -1402,12 +1448,13 @@ posix_spawnattr_setjetsam(posix_spawnattr_t * __restrict attr, */ int posix_spawnattr_setjetsam_ext(posix_spawnattr_t * __restrict attr, - short flags, int priority, int memlimit_active, int memlimit_inactive) + short flags, int priority, int memlimit_active, int memlimit_inactive) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; @@ -1417,24 +1464,24 @@ posix_spawnattr_setjetsam_ext(posix_spawnattr_t * __restrict attr, psattr->psa_memlimit_active = memlimit_active; psattr->psa_memlimit_inactive = memlimit_inactive; - return (0); + return 0; } int posix_spawnattr_set_threadlimit_ext(posix_spawnattr_t * __restrict attr, - int thread_limit) + int thread_limit) { _posix_spawnattr_t psattr; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_thread_limit = thread_limit; - return (0); - + return 0; } @@ -1442,12 +1489,12 @@ posix_spawnattr_set_threadlimit_ext(posix_spawnattr_t * __restrict attr, * posix_spawnattr_set_importancewatch_port_np * * Description: Mark ports referred to by these rights - * to boost the new task instead of their current task + * to boost the new task instead of their current task * for the spawn attribute object referred to by 'attr'. * Ports must be valid at posix_spawn time. They will NOT be * consumed by the kernel, so they must be deallocated after the spawn returns. * (If you are SETEXEC-ing, they are cleaned up by the exec operation). - * + * * The maximum number of watch ports allowed is defined by POSIX_SPAWN_IMPORTANCE_PORT_COUNT. * * Parameters: count Number of ports in portarray @@ -1458,9 +1505,9 @@ posix_spawnattr_set_threadlimit_ext(posix_spawnattr_t * __restrict attr, * ENOMEM Insufficient memory exists to add to * the spawn port actions object. */ -int +int posix_spawnattr_set_importancewatch_port_np(posix_spawnattr_t * __restrict attr, - int count, mach_port_t portarray[]) + int count, mach_port_t portarray[]) { int err = 0, i; @@ -1489,31 +1536,35 @@ posix_spawnattr_macpolicyinfo_lookup(_posix_spawn_mac_policy_extensions_t psmx, { int i; - if (psmx == NULL) + if (psmx == NULL) { return NULL; - + } + for (i = 0; i < psmx->psmx_count; i++) { _ps_mac_policy_extension_t *extension = &psmx->psmx_extensions[i]; - if (strcmp(extension->policyname, policyname) == 0) + if (strcmp(extension->policyname, policyname) == 0) { return extension; + } } return NULL; } int posix_spawnattr_getmacpolicyinfo_np(const posix_spawnattr_t * __restrict attr, - const char *policyname, void **datap, size_t *datalenp) + const char *policyname, void **datap, size_t *datalenp) { _posix_spawnattr_t psattr; _ps_mac_policy_extension_t *extension; - if (attr == NULL || *attr == NULL || policyname == NULL || datap == NULL) + if (attr == NULL || *attr == NULL || policyname == NULL || datap == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; extension = posix_spawnattr_macpolicyinfo_lookup(psattr->psa_mac_extensions, policyname); - if (extension == NULL) + if (extension == NULL) { return ESRCH; + } *datap = (void *)(uintptr_t)extension->data; if (datalenp != NULL) { *datalenp = (size_t)extension->datalen; @@ -1523,14 +1574,15 @@ posix_spawnattr_getmacpolicyinfo_np(const posix_spawnattr_t * __restrict attr, int posix_spawnattr_setmacpolicyinfo_np(posix_spawnattr_t * __restrict attr, - const char *policyname, void *data, size_t datalen) + const char *policyname, void *data, size_t datalen) { _posix_spawnattr_t psattr; _posix_spawn_mac_policy_extensions_t psmx; _ps_mac_policy_extension_t *extension; - if (attr == NULL || *attr == NULL || policyname == NULL) + if (attr == NULL || *attr == NULL || policyname == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psmx = psattr->psa_mac_extensions; @@ -1539,24 +1591,26 @@ posix_spawnattr_setmacpolicyinfo_np(posix_spawnattr_t * __restrict attr, extension->data = (uintptr_t)data; extension->datalen = datalen; return 0; - } - else if (psmx == NULL) { + } else if (psmx == NULL) { psmx = psattr->psa_mac_extensions = malloc(PS_MAC_EXTENSIONS_SIZE(PS_MAC_EXTENSIONS_INIT_COUNT)); - if (psmx == NULL) + if (psmx == NULL) { return ENOMEM; + } psmx->psmx_alloc = PS_MAC_EXTENSIONS_INIT_COUNT; psmx->psmx_count = 0; - } - else if (psmx->psmx_count == psmx->psmx_alloc) { + } else if (psmx->psmx_count == psmx->psmx_alloc) { int newnum = 0; - if (os_mul_overflow(psmx->psmx_alloc, 2, &newnum)) + if (os_mul_overflow(psmx->psmx_alloc, 2, &newnum)) { return ENOMEM; + } size_t extsize = PS_MAC_EXTENSIONS_SIZE(newnum); - if (extsize == 0) + if (extsize == 0) { return ENOMEM; + } psmx = psattr->psa_mac_extensions = reallocf(psmx, extsize); - if (psmx == NULL) + if (psmx == NULL) { return ENOMEM; + } psmx->psmx_alloc = newnum; } extension = &psmx->psmx_extensions[psmx->psmx_count]; @@ -1567,8 +1621,9 @@ posix_spawnattr_setmacpolicyinfo_np(posix_spawnattr_t * __restrict attr, return 0; } -int posix_spawnattr_setcoalition_np(const posix_spawnattr_t * __restrict attr, - uint64_t coalitionid, int type, int role) +int +posix_spawnattr_setcoalition_np(const posix_spawnattr_t * __restrict attr, + uint64_t coalitionid, int type, int role) { _posix_spawnattr_t psattr; struct _posix_spawn_coalition_info *coal_info; @@ -1576,16 +1631,18 @@ int posix_spawnattr_setcoalition_np(const posix_spawnattr_t * __restrict attr, if (attr == NULL || *attr == NULL) { return EINVAL; } - if (type < 0 || type > COALITION_TYPE_MAX) + if (type < 0 || type > COALITION_TYPE_MAX) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; coal_info = psattr->psa_coalition_info; if (!coal_info) { coal_info = (struct _posix_spawn_coalition_info *)malloc(sizeof(*coal_info)); - if (!coal_info) + if (!coal_info) { return ENOMEM; + } memset(coal_info, 0, sizeof(*coal_info)); psattr->psa_coalition_info = coal_info; } @@ -1597,7 +1654,8 @@ int posix_spawnattr_setcoalition_np(const posix_spawnattr_t * __restrict attr, } -int posix_spawnattr_set_qos_clamp_np(const posix_spawnattr_t * __restrict attr, uint64_t qos_clamp) +int +posix_spawnattr_set_qos_clamp_np(const posix_spawnattr_t * __restrict attr, uint64_t qos_clamp) { _posix_spawnattr_t psattr; @@ -1605,8 +1663,9 @@ int posix_spawnattr_set_qos_clamp_np(const posix_spawnattr_t * __restrict attr, return EINVAL; } - if (qos_clamp >= POSIX_SPAWN_PROC_CLAMP_LAST) + if (qos_clamp >= POSIX_SPAWN_PROC_CLAMP_LAST) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; psattr->psa_qos_clamp = qos_clamp; @@ -1626,10 +1685,11 @@ posix_spawnattr_get_qos_clamp_np(const posix_spawnattr_t * __restrict attr, uint psattr = *(_posix_spawnattr_t *)attr; *qos_clampp = psattr->psa_qos_clamp; - return (0); + return 0; } -int posix_spawnattr_set_darwin_role_np(const posix_spawnattr_t * __restrict attr, uint64_t darwin_role) +int +posix_spawnattr_set_darwin_role_np(const posix_spawnattr_t * __restrict attr, uint64_t darwin_role) { _posix_spawnattr_t psattr; @@ -1655,7 +1715,7 @@ posix_spawnattr_get_darwin_role_np(const posix_spawnattr_t * __restrict attr, ui psattr = *(_posix_spawnattr_t *)attr; *darwin_rolep = psattr->psa_darwin_role; - return (0); + return 0; } @@ -1665,19 +1725,22 @@ posix_spawnattr_set_persona_np(const posix_spawnattr_t * __restrict attr, uid_t _posix_spawnattr_t psattr; struct _posix_spawn_persona_info *persona; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } - if (flags & ~POSIX_SPAWN_PERSONA_ALL_FLAGS) + if (flags & ~POSIX_SPAWN_PERSONA_ALL_FLAGS) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; persona = psattr->psa_persona_info; if (!persona) { persona = (struct _posix_spawn_persona_info *)malloc(sizeof(*persona)); - if (!persona) + if (!persona) { return ENOMEM; + } persona->pspi_uid = 0; persona->pspi_gid = 0; persona->pspi_ngroups = 0; @@ -1698,16 +1761,19 @@ posix_spawnattr_set_persona_uid_np(const posix_spawnattr_t * __restrict attr, ui _posix_spawnattr_t psattr; struct _posix_spawn_persona_info *persona; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; persona = psattr->psa_persona_info; - if (!persona) + if (!persona) { return EINVAL; + } - if (!(persona->pspi_flags & (POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE | POSIX_SPAWN_PERSONA_FLAGS_VERIFY))) + if (!(persona->pspi_flags & (POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE | POSIX_SPAWN_PERSONA_FLAGS_VERIFY))) { return EINVAL; + } persona->pspi_uid = uid; @@ -1722,16 +1788,19 @@ posix_spawnattr_set_persona_gid_np(const posix_spawnattr_t * __restrict attr, gi _posix_spawnattr_t psattr; struct _posix_spawn_persona_info *persona; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; persona = psattr->psa_persona_info; - if (!persona) + if (!persona) { return EINVAL; + } - if (!(persona->pspi_flags & (POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE | POSIX_SPAWN_PERSONA_FLAGS_VERIFY))) + if (!(persona->pspi_flags & (POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE | POSIX_SPAWN_PERSONA_FLAGS_VERIFY))) { return EINVAL; + } persona->pspi_gid = gid; @@ -1746,26 +1815,32 @@ posix_spawnattr_set_persona_groups_np(const posix_spawnattr_t * __restrict attr, _posix_spawnattr_t psattr; struct _posix_spawn_persona_info *persona; - if (attr == NULL || *attr == NULL) + if (attr == NULL || *attr == NULL) { return EINVAL; + } - if (gidarray == NULL) + if (gidarray == NULL) { return EINVAL; + } - if (ngroups > NGROUPS || ngroups < 0) + if (ngroups > NGROUPS || ngroups < 0) { return EINVAL; + } psattr = *(_posix_spawnattr_t *)attr; persona = psattr->psa_persona_info; - if (!persona) + if (!persona) { return EINVAL; + } - if (!(persona->pspi_flags & (POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE | POSIX_SPAWN_PERSONA_FLAGS_VERIFY))) + if (!(persona->pspi_flags & (POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE | POSIX_SPAWN_PERSONA_FLAGS_VERIFY))) { return EINVAL; + } persona->pspi_ngroups = ngroups; - for (int i = 0; i < ngroups; i++) + for (int i = 0; i < ngroups; i++) { persona->pspi_groups[i] = gidarray[i]; + } persona->pspi_gmuid = gmuid; @@ -1819,14 +1894,14 @@ posix_spawnattr_set_max_addr_np(const posix_spawnattr_t * __restrict attr, uint6 * 0 or a -1, with the 'errno' variable being set. */ extern int __posix_spawn(pid_t * __restrict, const char * __restrict, - struct _posix_spawn_args_desc *, - char *const argv[ __restrict], char *const envp[ __restrict]); + struct _posix_spawn_args_desc *, + char *const argv[__restrict], char *const envp[__restrict]); int posix_spawn(pid_t * __restrict pid, const char * __restrict path, - const posix_spawn_file_actions_t *file_actions, - const posix_spawnattr_t * __restrict attrp, - char *const argv[ __restrict], char *const envp[ __restrict]) + const posix_spawn_file_actions_t *file_actions, + const posix_spawnattr_t * __restrict attrp, + char *const argv[__restrict], char *const envp[__restrict]) { int saveerrno = errno; int ret; @@ -1843,8 +1918,8 @@ posix_spawn(pid_t * __restrict pid, const char * __restrict path, * kernel efficiency, even though it would mean copying * the data in user space. */ - if ((file_actions != NULL && (*file_actions != NULL) && (*(_posix_spawn_file_actions_t *)file_actions)->psfa_act_count > 0) || attrp != NULL) { - struct _posix_spawn_args_desc ad; + if ((file_actions != NULL && (*file_actions != NULL) && (*(_posix_spawn_file_actions_t *)file_actions)->psfa_act_count > 0) || attrp != NULL) { + struct _posix_spawn_args_desc ad; memset(&ad, 0, sizeof(ad)); if (attrp != NULL && *attrp != NULL) { @@ -1883,7 +1958,7 @@ posix_spawn(pid_t * __restrict pid, const char * __restrict path, } if (file_actions != NULL && *file_actions != NULL) { _posix_spawn_file_actions_t psactsp = - *(_posix_spawn_file_actions_t *)file_actions; + *(_posix_spawn_file_actions_t *)file_actions; if (psactsp->psfa_act_count > 0) { size_t fa_size = PSF_ACTIONS_SIZE(psactsp->psfa_act_count); @@ -1898,13 +1973,14 @@ posix_spawn(pid_t * __restrict pid, const char * __restrict path, } ret = __posix_spawn(pid, path, &ad, argv, envp); - } else + } else { ret = __posix_spawn(pid, path, NULL, argv, envp); + } out: - if (ret < 0) + if (ret < 0) { ret = errno; + } errno = saveerrno; return ret; } - diff --git a/libsyscall/wrappers/spawn/spawn.h b/libsyscall/wrappers/spawn/spawn.h index 663dd3ca5..7fa018967 100644 --- a/libsyscall/wrappers/spawn/spawn.h +++ b/libsyscall/wrappers/spawn/spawn.h @@ -2,14 +2,14 @@ * Copyright (c) 2006, 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,13 +17,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ -#ifndef _SPAWN_H_ -#define _SPAWN_H_ +#ifndef _SPAWN_H_ +#define _SPAWN_H_ /* * [SPN] Support for _POSIX_SPAWN @@ -58,86 +58,86 @@ __BEGIN_DECLS */ __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn(pid_t * __restrict, const char * __restrict, - const posix_spawn_file_actions_t *, - const posix_spawnattr_t * __restrict, - char *const __argv[ __restrict], - char *const __envp[ __restrict]) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawn(pid_t * __restrict, const char * __restrict, + const posix_spawn_file_actions_t *, + const posix_spawnattr_t * __restrict, + char *const __argv[__restrict], + char *const __envp[__restrict]) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnp(pid_t * __restrict, const char * __restrict, - const posix_spawn_file_actions_t *, - const posix_spawnattr_t * __restrict, - char *const __argv[ __restrict], - char *const __envp[ __restrict]) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnp(pid_t * __restrict, const char * __restrict, + const posix_spawn_file_actions_t *, + const posix_spawnattr_t * __restrict, + char *const __argv[__restrict], + char *const __envp[__restrict]) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn_file_actions_addclose(posix_spawn_file_actions_t *, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawn_file_actions_addclose(posix_spawn_file_actions_t *, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *, int, - int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawn_file_actions_adddup2(posix_spawn_file_actions_t *, int, + int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn_file_actions_addopen( - posix_spawn_file_actions_t * __restrict, int, - const char * __restrict, int, mode_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawn_file_actions_addopen( + posix_spawn_file_actions_t * __restrict, int, + const char * __restrict, int, mode_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn_file_actions_destroy(posix_spawn_file_actions_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawn_file_actions_destroy(posix_spawn_file_actions_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn_file_actions_init(posix_spawn_file_actions_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawn_file_actions_init(posix_spawn_file_actions_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_destroy(posix_spawnattr_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_destroy(posix_spawnattr_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_getsigdefault(const posix_spawnattr_t * __restrict, - sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_getsigdefault(const posix_spawnattr_t * __restrict, + sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_getflags(const posix_spawnattr_t * __restrict, - short * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_getflags(const posix_spawnattr_t * __restrict, + short * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_getpgroup(const posix_spawnattr_t * __restrict, - pid_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_getpgroup(const posix_spawnattr_t * __restrict, + pid_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_getsigmask(const posix_spawnattr_t * __restrict, - sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_getsigmask(const posix_spawnattr_t * __restrict, + sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_init(posix_spawnattr_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_init(posix_spawnattr_t *) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setsigdefault(posix_spawnattr_t * __restrict, - const sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setsigdefault(posix_spawnattr_t * __restrict, + const sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setflags(posix_spawnattr_t *, short) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setflags(posix_spawnattr_t *, short) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setpgroup(posix_spawnattr_t *, pid_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setpgroup(posix_spawnattr_t *, pid_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setsigmask(posix_spawnattr_t * __restrict, - const sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setsigmask(posix_spawnattr_t * __restrict, + const sigset_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); -#if 0 /* _POSIX_PRIORITY_SCHEDULING [PS] : not supported */ -int posix_spawnattr_setschedparam(posix_spawnattr_t * __restrict, - const struct sched_param * __restrict); -int posix_spawnattr_setschedpolicy(posix_spawnattr_t *, int); -int posix_spawnattr_getschedparam(const posix_spawnattr_t * __restrict, - struct sched_param * __restrict); -int posix_spawnattr_getschedpolicy(const posix_spawnattr_t * __restrict, - int * __restrict); -#endif /* 0 */ +#if 0 /* _POSIX_PRIORITY_SCHEDULING [PS] : not supported */ +int posix_spawnattr_setschedparam(posix_spawnattr_t * __restrict, + const struct sched_param * __restrict); +int posix_spawnattr_setschedpolicy(posix_spawnattr_t *, int); +int posix_spawnattr_getschedparam(const posix_spawnattr_t * __restrict, + struct sched_param * __restrict); +int posix_spawnattr_getschedpolicy(const posix_spawnattr_t * __restrict, + int * __restrict); +#endif /* 0 */ __END_DECLS -#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) +#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) /* * Darwin-specific extensions below */ @@ -150,31 +150,31 @@ __END_DECLS __BEGIN_DECLS __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict, - size_t, cpu_type_t *__restrict, size_t *__restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_getbinpref_np(const posix_spawnattr_t * __restrict, + size_t, cpu_type_t *__restrict, size_t *__restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setauditsessionport_np(posix_spawnattr_t *__restrict, - mach_port_t) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); +int posix_spawnattr_setauditsessionport_np(posix_spawnattr_t * __restrict, + mach_port_t) __OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict, - size_t, cpu_type_t *__restrict, size_t *__restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setbinpref_np(posix_spawnattr_t * __restrict, + size_t, cpu_type_t *__restrict, size_t *__restrict) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setexceptionports_np(posix_spawnattr_t *__restrict, - exception_mask_t, mach_port_t, - exception_behavior_t, thread_state_flavor_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setexceptionports_np(posix_spawnattr_t * __restrict, + exception_mask_t, mach_port_t, + exception_behavior_t, thread_state_flavor_t) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawnattr_setspecialport_np(posix_spawnattr_t *__restrict, - mach_port_t, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); +int posix_spawnattr_setspecialport_np(posix_spawnattr_t * __restrict, + mach_port_t, int) __OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0); __WATCHOS_PROHIBITED __TVOS_PROHIBITED -int posix_spawn_file_actions_addinherit_np(posix_spawn_file_actions_t *, - int) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); +int posix_spawn_file_actions_addinherit_np(posix_spawn_file_actions_t *, + int) __OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_4_3); __END_DECLS #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ -#endif /* _SPAWN_H_ */ +#endif /* _SPAWN_H_ */ diff --git a/libsyscall/wrappers/spawn/spawn_private.h b/libsyscall/wrappers/spawn/spawn_private.h index e0ea9d495..41878e746 100644 --- a/libsyscall/wrappers/spawn/spawn_private.h +++ b/libsyscall/wrappers/spawn/spawn_private.h @@ -36,30 +36,30 @@ int posix_spawnattr_setpcontrol_np(posix_spawnattr_t *, const int) __OSX_AVA int posix_spawnattr_getprocesstype_np(const posix_spawnattr_t * __restrict, int * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0); int posix_spawnattr_setprocesstype_np(posix_spawnattr_t *, const int) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0); -int posix_spawnattr_setcpumonitor(posix_spawnattr_t * __restrict, uint64_t, uint64_t) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0); -int posix_spawnattr_getcpumonitor(posix_spawnattr_t * __restrict, uint64_t *, uint64_t *) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0); -int posix_spawnattr_setcpumonitor_default(posix_spawnattr_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0); +int posix_spawnattr_setcpumonitor(posix_spawnattr_t * __restrict, uint64_t, uint64_t) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0); +int posix_spawnattr_getcpumonitor(posix_spawnattr_t * __restrict, uint64_t *, uint64_t *) __OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0); +int posix_spawnattr_setcpumonitor_default(posix_spawnattr_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0); #if TARGET_OS_EMBEDDED int posix_spawnattr_setjetsam(posix_spawnattr_t * __restrict attr, - short flags, int priority, int memlimit) __OSX_AVAILABLE_STARTING(__MAC_NA, __IPHONE_5_0); + short flags, int priority, int memlimit) __OSX_AVAILABLE_STARTING(__MAC_NA, __IPHONE_5_0); #endif /* TARGET_OS_EMBEDDED */ int posix_spawnattr_setjetsam_ext(posix_spawnattr_t * __restrict attr, - short flags, int priority, int memlimit_active, int memlimit_inactive) __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0); + short flags, int priority, int memlimit_active, int memlimit_inactive) __OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0); -int posix_spawnattr_set_threadlimit_ext(posix_spawnattr_t * __restrict attr, - int thread_limit); +int posix_spawnattr_set_threadlimit_ext(posix_spawnattr_t * __restrict attr, + int thread_limit); #define POSIX_SPAWN_IMPORTANCE_PORT_COUNT 32 -int posix_spawnattr_set_importancewatch_port_np(posix_spawnattr_t * __restrict attr, - int count, mach_port_t portarray[]) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0); +int posix_spawnattr_set_importancewatch_port_np(posix_spawnattr_t * __restrict attr, + int count, mach_port_t portarray[]) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_6_0); #define POSIX_SPAWN_MACPOLICYINFO_WITHSIZE 1 -int posix_spawnattr_getmacpolicyinfo_np(const posix_spawnattr_t * __restrict, const char *, void **, size_t *) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); -int posix_spawnattr_setmacpolicyinfo_np(posix_spawnattr_t * __restrict, const char *, void *, size_t) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); +int posix_spawnattr_getmacpolicyinfo_np(const posix_spawnattr_t * __restrict, const char *, void **, size_t *) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); +int posix_spawnattr_setmacpolicyinfo_np(posix_spawnattr_t * __restrict, const char *, void *, size_t) __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0); -int posix_spawnattr_setcoalition_np(const posix_spawnattr_t * __restrict, uint64_t, int, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); +int posix_spawnattr_setcoalition_np(const posix_spawnattr_t * __restrict, uint64_t, int, int) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); int posix_spawnattr_set_qos_clamp_np(const posix_spawnattr_t * __restrict, uint64_t) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); int posix_spawnattr_get_qos_clamp_np(const posix_spawnattr_t * __restrict, uint64_t * __restrict) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0); diff --git a/libsyscall/wrappers/stackshot.c b/libsyscall/wrappers/stackshot.c index 3a7489e02..d819d3470 100644 --- a/libsyscall/wrappers/stackshot.c +++ b/libsyscall/wrappers/stackshot.c @@ -136,14 +136,14 @@ stackshot_capture_with_config(stackshot_config_t *stackshot_config) } s_config = (stackshot_config_t *) stackshot_config; - if (s_config->sc_buffer != 0) { + if (s_config->sc_buffer != 0) { return EINVAL; } s_config->sc_out_buffer_addr = (uintptr_t)&s_config->sc_buffer; s_config->sc_out_size_addr = (uintptr_t)&s_config->sc_size; ret = __stack_snapshot_with_config(STACKSHOT_CONFIG_TYPE, (uintptr_t)s_config, sizeof(stackshot_config_t)); - + if (ret != 0) { ret = errno; s_config->sc_buffer = 0; @@ -171,7 +171,7 @@ stackshot_config_get_stackshot_buffer(stackshot_config_t *stackshot_config) } s_config = (stackshot_config_t *) stackshot_config; - return ((void *)s_config->sc_buffer); + return (void *)s_config->sc_buffer; } /* diff --git a/libsyscall/wrappers/string/index.c b/libsyscall/wrappers/string/index.c index fcbc147da..8863e815b 100644 --- a/libsyscall/wrappers/string/index.c +++ b/libsyscall/wrappers/string/index.c @@ -37,10 +37,12 @@ _libkernel_strchr(const char *p, int ch) c = ch; for (;; ++p) { - if (*p == c) - return ((char *)p); - if (*p == '\0') - return (NULL); + if (*p == c) { + return (char *)p; + } + if (*p == '\0') { + return NULL; + } } /* NOTREACHED */ } diff --git a/libsyscall/wrappers/string/memcpy.c b/libsyscall/wrappers/string/memcpy.c index 53d527b6a..28920058c 100644 --- a/libsyscall/wrappers/string/memcpy.c +++ b/libsyscall/wrappers/string/memcpy.c @@ -36,10 +36,10 @@ * sizeof(word) MUST BE A POWER OF TWO * SO THAT wmask BELOW IS ALL ONES */ -typedef int word; /* "word" used for optimal copy speed */ +typedef int word; /* "word" used for optimal copy speed */ -#define wsize sizeof(word) -#define wmask (wsize - 1) +#define wsize sizeof(word) +#define wmask (wsize - 1) /* * Copy a block of memory, handling overlap. @@ -48,35 +48,38 @@ typedef int word; /* "word" used for optimal copy speed */ */ __attribute__((visibility("hidden"))) -void * _libkernel_memmove(void *dst0, const void *src0, size_t length) +void * +_libkernel_memmove(void *dst0, const void *src0, size_t length) { char *dst = dst0; const char *src = src0; size_t t; - - if (length == 0 || dst == src) /* nothing to do */ + + if (length == 0 || dst == src) { /* nothing to do */ goto done; - + } + /* * Macros: loop-t-times; and loop-t-times, t>0 */ -#define TLOOP(s) if (t) TLOOP1(s) -#define TLOOP1(s) do { s; } while (--t) - +#define TLOOP(s) if (t) TLOOP1(s) +#define TLOOP1(s) do { s; } while (--t) + if ((unsigned long)dst < (unsigned long)src) { /* * Copy forward. */ - t = (uintptr_t)src; /* only need low bits */ + t = (uintptr_t)src; /* only need low bits */ if ((t | (uintptr_t)dst) & wmask) { /* * Try to align operands. This cannot be done * unless the low bits match. */ - if ((t ^ (uintptr_t)dst) & wmask || length < wsize) + if ((t ^ (uintptr_t)dst) & wmask || length < wsize) { t = length; - else + } else { t = wsize - (t & wmask); + } length -= t; TLOOP1(*dst++ = *src++); } @@ -97,10 +100,11 @@ void * _libkernel_memmove(void *dst0, const void *src0, size_t length) dst += length; t = (uintptr_t)src; if ((t | (uintptr_t)dst) & wmask) { - if ((t ^ (uintptr_t)dst) & wmask || length <= wsize) + if ((t ^ (uintptr_t)dst) & wmask || length <= wsize) { t = length; - else + } else { t &= wmask; + } length -= t; TLOOP1(*--dst = *--src); } @@ -110,7 +114,7 @@ void * _libkernel_memmove(void *dst0, const void *src0, size_t length) TLOOP(*--dst = *--src); } done: - return (dst0); + return dst0; } __attribute__((visibility("hidden"))) diff --git a/libsyscall/wrappers/string/memset.c b/libsyscall/wrappers/string/memset.c index 82c1eb0a9..f8fc25597 100644 --- a/libsyscall/wrappers/string/memset.c +++ b/libsyscall/wrappers/string/memset.c @@ -33,8 +33,8 @@ #include "strings.h" #include -#define wsize sizeof(u_int) -#define wmask (wsize - 1) +#define wsize sizeof(u_int) +#define wmask (wsize - 1) // n.b. this must be compiled with -fno-builtin or it might get optimized into // a recursive call to bzero. @@ -42,12 +42,12 @@ __attribute__((visibility("hidden"))) void _libkernel_bzero(void *dst0, size_t length) { - return (void)_libkernel_memset(dst0, 0, length); + return (void)_libkernel_memset(dst0, 0, length); } -#define RETURN return (dst0) -#define VAL c0 -#define WIDEVAL c +#define RETURN return (dst0) +#define VAL c0 +#define WIDEVAL c __attribute__((visibility("hidden"))) void * @@ -79,13 +79,13 @@ _libkernel_memset(void *dst0, int c0, size_t length) RETURN; } - if ((c = (u_char)c0) != 0) { /* Fill the word. */ - c = (c << 8) | c; /* u_int is 16 bits. */ + if ((c = (u_char)c0) != 0) { /* Fill the word. */ + c = (c << 8) | c; /* u_int is 16 bits. */ #if UINT_MAX > 0xffff - c = (c << 16) | c; /* u_int is 32 bits. */ + c = (c << 16) | c; /* u_int is 32 bits. */ #endif #if UINT_MAX > 0xffffffff - c = (c << 32) | c; /* u_int is 64 bits. */ + c = (c << 32) | c; /* u_int is 64 bits. */ #endif } /* Align destination by filling in bytes. */ @@ -106,9 +106,10 @@ _libkernel_memset(void *dst0, int c0, size_t length) /* Mop up trailing bytes, if any. */ t = length & wmask; - if (t != 0) + if (t != 0) { do { *dst++ = VAL; } while (--t != 0); + } RETURN; } diff --git a/libsyscall/wrappers/string/strcmp.c b/libsyscall/wrappers/string/strcmp.c index cfe403516..d28024703 100644 --- a/libsyscall/wrappers/string/strcmp.c +++ b/libsyscall/wrappers/string/strcmp.c @@ -39,8 +39,10 @@ __attribute__((visibility("hidden"))) int _libkernel_strcmp(const char *s1, const char *s2) { - while (*s1 == *s2++) - if (*s1++ == '\0') - return (0); - return (*(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1)); + while (*s1 == *s2++) { + if (*s1++ == '\0') { + return 0; + } + } + return *(const unsigned char *)s1 - *(const unsigned char *)(s2 - 1); } diff --git a/libsyscall/wrappers/string/strcpy.c b/libsyscall/wrappers/string/strcpy.c index e67282e06..695699d71 100644 --- a/libsyscall/wrappers/string/strcpy.c +++ b/libsyscall/wrappers/string/strcpy.c @@ -2,14 +2,14 @@ * Copyright (c) 2011 Apple, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -25,11 +25,12 @@ __attribute__((visibility("hidden"))) char * -_libkernel_strcpy(char * restrict dst, const char * restrict src) { +_libkernel_strcpy(char * restrict dst, const char * restrict src) +{ const size_t length = _libkernel_strlen(src); - // The stpcpy() and strcpy() functions copy the string src to dst - // (including the terminating '\0' character). - _libkernel_memmove(dst, src, length+1); - // The strcpy() and strncpy() functions return dst. - return dst; + // The stpcpy() and strcpy() functions copy the string src to dst + // (including the terminating '\0' character). + _libkernel_memmove(dst, src, length + 1); + // The strcpy() and strncpy() functions return dst. + return dst; } diff --git a/libsyscall/wrappers/string/strings.h b/libsyscall/wrappers/string/strings.h index a8222044a..f2167a542 100644 --- a/libsyscall/wrappers/string/strings.h +++ b/libsyscall/wrappers/string/strings.h @@ -2,14 +2,14 @@ * Copyright (c) 2000, 2007, 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ /*- @@ -61,18 +61,18 @@ #include <_types.h> #include -void *memmove(void *, const void *, size_t); -void *memset(void *, int, size_t); -int strcmp(const char *, const char *); -char *strcpy(char *, const char *); -size_t strlen(const char *); -size_t strlcpy(char *, const char *, size_t); -char *strsep(char **, const char *); +void *memmove(void *, const void *, size_t); +void *memset(void *, int, size_t); +int strcmp(const char *, const char *); +char *strcpy(char *, const char *); +size_t strlen(const char *); +size_t strlcpy(char *, const char *, size_t); +char *strsep(char **, const char *); -void bcopy(const void *, void *, size_t); -void bzero(void *, size_t); -char *index(const char *, int); -char *strchr(const char *, int); +void bcopy(const void *, void *, size_t); +void bzero(void *, size_t); +char *index(const char *, int); +char *strchr(const char *, int); #include "string.h" diff --git a/libsyscall/wrappers/string/strlcpy.c b/libsyscall/wrappers/string/strlcpy.c index 1be4fe333..0da15ab7d 100644 --- a/libsyscall/wrappers/string/strlcpy.c +++ b/libsyscall/wrappers/string/strlcpy.c @@ -2,14 +2,14 @@ * Copyright (c) 2011 Apple, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -25,13 +25,14 @@ __attribute__((visibility("hidden"))) size_t -_libkernel_strlcpy(char * restrict dst, const char * restrict src, size_t maxlen) { - const size_t srclen = _libkernel_strlen(src); - if (srclen < maxlen) { - _libkernel_memmove(dst, src, srclen+1); - } else if (maxlen != 0) { - _libkernel_memmove(dst, src, maxlen-1); - dst[maxlen-1] = '\0'; - } - return srclen; +_libkernel_strlcpy(char * restrict dst, const char * restrict src, size_t maxlen) +{ + const size_t srclen = _libkernel_strlen(src); + if (srclen < maxlen) { + _libkernel_memmove(dst, src, srclen + 1); + } else if (maxlen != 0) { + _libkernel_memmove(dst, src, maxlen - 1); + dst[maxlen - 1] = '\0'; + } + return srclen; } diff --git a/libsyscall/wrappers/string/strlen.c b/libsyscall/wrappers/string/strlen.c index 9054ac39e..8a599d454 100644 --- a/libsyscall/wrappers/string/strlen.c +++ b/libsyscall/wrappers/string/strlen.c @@ -61,16 +61,16 @@ static const unsigned long mask80 = 0x8080808080808080; #error Unsupported word size #endif -#define LONGPTR_MASK (sizeof(long) - 1) +#define LONGPTR_MASK (sizeof(long) - 1) /* * Helper macro to return string length if we caught the zero * byte. */ -#define testbyte(x) \ - do { \ - if (p[x] == '\0') \ - return (p - str + x); \ +#define testbyte(x) \ + do { \ + if (p[x] == '\0') \ + return (p - str + x); \ } while (0) __attribute__((visibility("hidden"))) @@ -81,27 +81,29 @@ _libkernel_strlen(const char *str) const unsigned long *lp; /* Skip the first few bytes until we have an aligned p */ - for (p = str; (uintptr_t)p & LONGPTR_MASK; p++) - if (*p == '\0') - return (p - str); + for (p = str; (uintptr_t)p & LONGPTR_MASK; p++) { + if (*p == '\0') { + return p - str; + } + } /* Scan the rest of the string using word sized operation */ - for (lp = (const unsigned long *)p; ; lp++) - if ((*lp - mask01) & mask80) { - p = (const char *)(lp); - testbyte(0); - testbyte(1); - testbyte(2); - testbyte(3); + for (lp = (const unsigned long *)p;; lp++) { + if ((*lp - mask01) & mask80) { + p = (const char *)(lp); + testbyte(0); + testbyte(1); + testbyte(2); + testbyte(3); #if (LONG_BIT >= 64) - testbyte(4); - testbyte(5); - testbyte(6); - testbyte(7); + testbyte(4); + testbyte(5); + testbyte(6); + testbyte(7); #endif - } + } + } /* NOTREACHED */ - return (0); + return 0; } - diff --git a/libsyscall/wrappers/string/strsep.c b/libsyscall/wrappers/string/strsep.c index 029d0f3c8..f3aee7edf 100644 --- a/libsyscall/wrappers/string/strsep.c +++ b/libsyscall/wrappers/string/strsep.c @@ -49,19 +49,21 @@ strsep(char **stringp, const char *delim) int c, sc; char *tok; - if ((s = *stringp) == NULL) - return (NULL); + if ((s = *stringp) == NULL) { + return NULL; + } for (tok = s;;) { c = *s++; spanp = delim; do { if ((sc = *spanp++) == c) { - if (c == 0) + if (c == 0) { s = NULL; - else + } else { s[-1] = 0; + } *stringp = s; - return (tok); + return tok; } } while (sc != 0); } diff --git a/libsyscall/wrappers/terminate_with_reason.c b/libsyscall/wrappers/terminate_with_reason.c index 6277f4be0..52082bb68 100644 --- a/libsyscall/wrappers/terminate_with_reason.c +++ b/libsyscall/wrappers/terminate_with_reason.c @@ -28,38 +28,39 @@ /* System call entry points */ int __terminate_with_payload(int pid, uint32_t reason_namespace, uint64_t reason_code, - void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags); + void *payload, uint32_t payload_size, const char *reason_string, + uint64_t reason_flags); void __abort_with_payload(uint32_t reason_namespace, uint64_t reason_code, - void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags); + void *payload, uint32_t payload_size, const char *reason_string, + uint64_t reason_flags); static void abort_with_payload_wrapper_internal(uint32_t reason_namespace, uint64_t reason_code, - void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags) __attribute__((noreturn)); + void *payload, uint32_t payload_size, const char *reason_string, + uint64_t reason_flags) __attribute__((noreturn)); /* System call wrappers */ int terminate_with_reason(int pid, uint32_t reason_namespace, uint64_t reason_code, - const char *reason_string, uint64_t reason_flags) + const char *reason_string, uint64_t reason_flags) { return __terminate_with_payload(pid, reason_namespace, reason_code, 0, 0, - reason_string, reason_flags); + reason_string, reason_flags); } int terminate_with_payload(int pid, uint32_t reason_namespace, uint64_t reason_code, - void *payload, uint32_t payload_size, - const char *reason_string, uint64_t reason_flags) + void *payload, uint32_t payload_size, + const char *reason_string, uint64_t reason_flags) { return __terminate_with_payload(pid, reason_namespace, reason_code, payload, - payload_size, reason_string, reason_flags); + payload_size, reason_string, reason_flags); } -static void abort_with_payload_wrapper_internal(uint32_t reason_namespace, uint64_t reason_code, - void *payload, uint32_t payload_size, const char *reason_string, - uint64_t reason_flags) +static void +abort_with_payload_wrapper_internal(uint32_t reason_namespace, uint64_t reason_code, + void *payload, uint32_t payload_size, const char *reason_string, + uint64_t reason_flags) { sigset_t unmask_signal; @@ -69,28 +70,27 @@ static void abort_with_payload_wrapper_internal(uint32_t reason_namespace, uint6 sigprocmask(SIG_UNBLOCK, &unmask_signal, NULL); __abort_with_payload(reason_namespace, reason_code, payload, payload_size, - reason_string, reason_flags); + reason_string, reason_flags); /* If sending a SIGABRT failed, we fall back to SIGKILL */ terminate_with_payload(getpid(), reason_namespace, reason_code, payload, payload_size, - reason_string, reason_flags | OS_REASON_FLAG_ABORT); + reason_string, reason_flags | OS_REASON_FLAG_ABORT); __builtin_unreachable(); } void abort_with_reason(uint32_t reason_namespace, uint64_t reason_code, const char *reason_string, - uint64_t reason_flags) + uint64_t reason_flags) { abort_with_payload_wrapper_internal(reason_namespace, reason_code, 0, 0, reason_string, reason_flags); } void abort_with_payload(uint32_t reason_namespace, uint64_t reason_code, void *payload, - uint32_t payload_size, const char *reason_string, - uint64_t reason_flags) + uint32_t payload_size, const char *reason_string, + uint64_t reason_flags) { abort_with_payload_wrapper_internal(reason_namespace, reason_code, payload, payload_size, - reason_string, reason_flags); + reason_string, reason_flags); } - diff --git a/libsyscall/wrappers/thread_register_state.c b/libsyscall/wrappers/thread_register_state.c index d2a9f3268..f061ed08d 100644 --- a/libsyscall/wrappers/thread_register_state.c +++ b/libsyscall/wrappers/thread_register_state.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -30,110 +30,120 @@ kern_return_t thread_get_register_pointer_values(thread_t thread, uintptr_t *sp, size_t *length, uintptr_t *values) { - if (!length) return KERN_INVALID_ARGUMENT; - if (*length > 0 && values == NULL) return KERN_INVALID_ARGUMENT; + if (!length) { + return KERN_INVALID_ARGUMENT; + } + if (*length > 0 && values == NULL) { + return KERN_INVALID_ARGUMENT; + } - size_t in_length = *length; - size_t out_length = 0; + size_t in_length = *length; + size_t out_length = 0; #if defined(__i386__) - i386_thread_state_t state = {}; - thread_state_flavor_t flavor = x86_THREAD_STATE32; - mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; + i386_thread_state_t state = {}; + thread_state_flavor_t flavor = x86_THREAD_STATE32; + mach_msg_type_number_t count = i386_THREAD_STATE_COUNT; #elif defined(__x86_64__) - x86_thread_state64_t state = {}; - thread_state_flavor_t flavor = x86_THREAD_STATE64; - mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; + x86_thread_state64_t state = {}; + thread_state_flavor_t flavor = x86_THREAD_STATE64; + mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT; #elif defined(__arm__) - arm_thread_state_t state = {}; - thread_state_flavor_t flavor = ARM_THREAD_STATE; - mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT; + arm_thread_state_t state = {}; + thread_state_flavor_t flavor = ARM_THREAD_STATE; + mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT; #elif defined(__arm64__) - arm_thread_state64_t state = {}; - thread_state_flavor_t flavor = ARM_THREAD_STATE64; - mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT; + arm_thread_state64_t state = {}; + thread_state_flavor_t flavor = ARM_THREAD_STATE64; + mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT; #else #error thread_get_register_pointer_values not defined for this architecture #endif - kern_return_t ret = thread_get_state(thread, flavor, (thread_state_t)&state, &count); - if (ret != KERN_SUCCESS){ - return ret; - } + kern_return_t ret = thread_get_state(thread, flavor, (thread_state_t)&state, &count); + if (ret != KERN_SUCCESS) { + return ret; + } - // If the provided pointer value is > PAGE_SIZE, add it to the output array - // if there's available space. (Values between 0 and PAGE_SIZE are the NULL page - // and not valid pointers.) + // If the provided pointer value is > PAGE_SIZE, add it to the output array + // if there's available space. (Values between 0 and PAGE_SIZE are the NULL page + // and not valid pointers.) #define push_register_value(p) do { \ if ((uintptr_t)p > PAGE_SIZE) { \ - if (out_length < in_length && values) \ - values[out_length] = p; \ - out_length++; \ + if (out_length < in_length && values) \ + values[out_length] = p; \ + out_length++; \ } } while (0) #if defined(__i386__) - if (sp) *sp = state.__esp; + if (sp) { + *sp = state.__esp; + } - push_register_value(state.__eax); - push_register_value(state.__ebx); - push_register_value(state.__ecx); - push_register_value(state.__edx); - push_register_value(state.__edi); - push_register_value(state.__esi); - push_register_value(state.__ebp); + push_register_value(state.__eax); + push_register_value(state.__ebx); + push_register_value(state.__ecx); + push_register_value(state.__edx); + push_register_value(state.__edi); + push_register_value(state.__esi); + push_register_value(state.__ebp); #elif defined(__x86_64__) - if (sp) { - if (state.__rsp > 128) - *sp = state.__rsp - 128 /* redzone */; - else - *sp = 0; - } + if (sp) { + if (state.__rsp > 128) { + *sp = state.__rsp - 128 /* redzone */; + } else { + *sp = 0; + } + } - push_register_value(state.__rax); - push_register_value(state.__rbx); - push_register_value(state.__rcx); - push_register_value(state.__rdx); - push_register_value(state.__rdi); - push_register_value(state.__rbp); - push_register_value(state.__r8); - push_register_value(state.__r9); - push_register_value(state.__r10); - push_register_value(state.__r11); - push_register_value(state.__r12); - push_register_value(state.__r13); - push_register_value(state.__r14); - push_register_value(state.__r15); + push_register_value(state.__rax); + push_register_value(state.__rbx); + push_register_value(state.__rcx); + push_register_value(state.__rdx); + push_register_value(state.__rdi); + push_register_value(state.__rbp); + push_register_value(state.__r8); + push_register_value(state.__r9); + push_register_value(state.__r10); + push_register_value(state.__r11); + push_register_value(state.__r12); + push_register_value(state.__r13); + push_register_value(state.__r14); + push_register_value(state.__r15); #elif defined(__arm__) - if (sp) *sp = state.__sp; + if (sp) { + *sp = state.__sp; + } - push_register_value(state.__lr); + push_register_value(state.__lr); - for (int i = 0; i < 13; i++){ - push_register_value(state.__r[i]); - } + for (int i = 0; i < 13; i++) { + push_register_value(state.__r[i]); + } #elif defined(__arm64__) - if (sp) { - uintptr_t __sp = arm_thread_state64_get_sp(state); - if (__sp > 128) - *sp = __sp - 128 /* redzone */; - else - *sp = 0; - } + if (sp) { + uintptr_t __sp = arm_thread_state64_get_sp(state); + if (__sp > 128) { + *sp = __sp - 128 /* redzone */; + } else { + *sp = 0; + } + } - push_register_value(arm_thread_state64_get_lr(state)); + push_register_value(arm_thread_state64_get_lr(state)); - for (int i = 0; i < 29; i++){ - push_register_value(state.__x[i]); - } + for (int i = 0; i < 29; i++) { + push_register_value(state.__x[i]); + } #else #error thread_get_register_pointer_values not defined for this architecture #endif - *length = out_length; + *length = out_length; - if (in_length == 0 || out_length > in_length){ - return KERN_INSUFFICIENT_BUFFER_SIZE; - } + if (in_length == 0 || out_length > in_length) { + return KERN_INSUFFICIENT_BUFFER_SIZE; + } return KERN_SUCCESS; } diff --git a/libsyscall/wrappers/unix03/chmod.c b/libsyscall/wrappers/unix03/chmod.c index ca5077780..ef3805405 100644 --- a/libsyscall/wrappers/unix03/chmod.c +++ b/libsyscall/wrappers/unix03/chmod.c @@ -2,14 +2,14 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -42,20 +42,24 @@ chmod(const char *path, mode_t mode) { int res = __chmod(path, mode); - if (res >= 0 || errno != EPERM || (mode & (S_ISUID | S_ISGID)) == 0) + if (res >= 0 || errno != EPERM || (mode & (S_ISUID | S_ISGID)) == 0) { return res; + } if (mode & S_ISGID) { res = __chmod(path, mode ^ S_ISGID); - if (res >= 0 || errno != EPERM) + if (res >= 0 || errno != EPERM) { return res; + } } if (mode & S_ISUID) { res = __chmod(path, mode ^ S_ISUID); - if (res >= 0 || errno != EPERM) + if (res >= 0 || errno != EPERM) { return res; + } } - if ((mode & (S_ISUID | S_ISGID)) == (S_ISUID | S_ISGID)) + if ((mode & (S_ISUID | S_ISGID)) == (S_ISUID | S_ISGID)) { res = __chmod(path, mode ^ (S_ISUID | S_ISGID)); + } return res; } diff --git a/libsyscall/wrappers/unix03/fchmod.c b/libsyscall/wrappers/unix03/fchmod.c index 648c53edc..b3f81742a 100644 --- a/libsyscall/wrappers/unix03/fchmod.c +++ b/libsyscall/wrappers/unix03/fchmod.c @@ -2,14 +2,14 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -42,20 +42,24 @@ fchmod(int fd, mode_t mode) { int res = __fchmod(fd, mode); - if (res >= 0 || errno != EPERM || (mode & (S_ISUID | S_ISGID)) == 0) + if (res >= 0 || errno != EPERM || (mode & (S_ISUID | S_ISGID)) == 0) { return res; + } if (mode & S_ISGID) { res = __fchmod(fd, mode ^ S_ISGID); - if (res >= 0 || errno != EPERM) + if (res >= 0 || errno != EPERM) { return res; + } } if (mode & S_ISUID) { res = __fchmod(fd, mode ^ S_ISUID); - if (res >= 0 || errno != EPERM) + if (res >= 0 || errno != EPERM) { return res; + } } - if ((mode & (S_ISUID | S_ISGID)) == (S_ISUID | S_ISGID)) + if ((mode & (S_ISUID | S_ISGID)) == (S_ISUID | S_ISGID)) { res = __fchmod(fd, mode ^ (S_ISUID | S_ISGID)); + } return res; } diff --git a/libsyscall/wrappers/unix03/getrlimit.c b/libsyscall/wrappers/unix03/getrlimit.c index ab38b7170..6832afb2c 100644 --- a/libsyscall/wrappers/unix03/getrlimit.c +++ b/libsyscall/wrappers/unix03/getrlimit.c @@ -2,14 +2,14 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -40,7 +40,7 @@ int getrlimit(int resource, struct rlimit *rlp) { resource |= _RLIMIT_POSIX_FLAG; - return(__getrlimit(resource, rlp)); + return __getrlimit(resource, rlp); } #endif /* __DARWIN_UNIX03 */ diff --git a/libsyscall/wrappers/unix03/mmap.c b/libsyscall/wrappers/unix03/mmap.c index 6d52fbe3a..203301e3f 100644 --- a/libsyscall/wrappers/unix03/mmap.c +++ b/libsyscall/wrappers/unix03/mmap.c @@ -2,14 +2,14 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -44,7 +44,7 @@ mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) { /* * Preemptory failures: - * + * * o off is not a multiple of the page size * o flags does not contain either MAP_PRIVATE or MAP_SHARED * o len is zero @@ -52,14 +52,14 @@ mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off) extern void cerror_nocancel(int); if ((off & PAGE_MASK) || (((flags & MAP_PRIVATE) != MAP_PRIVATE) && - ((flags & MAP_SHARED) != MAP_SHARED)) || + ((flags & MAP_SHARED) != MAP_SHARED)) || (len == 0)) { cerror_nocancel(EINVAL); - return(MAP_FAILED); + return MAP_FAILED; } void *ptr = __mmap(addr, len, prot, flags, fildes, off); - + if (__syscall_logger) { int stackLoggingFlags = stack_logging_type_vm_allocate; if (flags & MAP_ANON) { diff --git a/libsyscall/wrappers/unix03/munmap.c b/libsyscall/wrappers/unix03/munmap.c index 8dd5d9b57..e280c45e6 100644 --- a/libsyscall/wrappers/unix03/munmap.c +++ b/libsyscall/wrappers/unix03/munmap.c @@ -2,14 +2,14 @@ * Copyright (c) 2012 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -39,13 +39,13 @@ extern int __munmap(void *, size_t); int munmap(void *addr, size_t len) { - if (__syscall_logger) { - __syscall_logger(stack_logging_type_vm_deallocate, (uintptr_t)mach_task_self(), (uintptr_t)addr, len, 0, 0); - } - - int result = __munmap(addr, len); + if (__syscall_logger) { + __syscall_logger(stack_logging_type_vm_deallocate, (uintptr_t)mach_task_self(), (uintptr_t)addr, len, 0, 0); + } + + int result = __munmap(addr, len); - return result; + return result; } #endif /* __DARWIN_UNIX03 */ diff --git a/libsyscall/wrappers/unix03/setrlimit.c b/libsyscall/wrappers/unix03/setrlimit.c index ebc872deb..992b33795 100644 --- a/libsyscall/wrappers/unix03/setrlimit.c +++ b/libsyscall/wrappers/unix03/setrlimit.c @@ -2,14 +2,14 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -40,7 +40,7 @@ int setrlimit(int resource, const struct rlimit *rlp) { resource |= _RLIMIT_POSIX_FLAG; - return(__setrlimit(resource, rlp)); + return __setrlimit(resource, rlp); } #endif /* __DARWIN_UNIX03 */ diff --git a/libsyscall/wrappers/unlink.c b/libsyscall/wrappers/unlink.c index 8f2144a82..0e6cff1d6 100644 --- a/libsyscall/wrappers/unlink.c +++ b/libsyscall/wrappers/unlink.c @@ -2,14 +2,14 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -28,6 +28,8 @@ int unlink(const char *path) { int res = __unlink(path); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/unlinkat.c b/libsyscall/wrappers/unlinkat.c index 265235199..3b87e9211 100644 --- a/libsyscall/wrappers/unlinkat.c +++ b/libsyscall/wrappers/unlinkat.c @@ -28,6 +28,8 @@ int unlinkat(int fd, const char *path, int flag) { int res = __unlinkat(fd, path, flag); - if (res == 0) __inc_remove_counter(); + if (res == 0) { + __inc_remove_counter(); + } return res; } diff --git a/libsyscall/wrappers/utimensat.c b/libsyscall/wrappers/utimensat.c index 6deaf45a3..f1601a630 100644 --- a/libsyscall/wrappers/utimensat.c +++ b/libsyscall/wrappers/utimensat.c @@ -2,14 +2,14 @@ * Copyright (c) 2006, 2017 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -43,15 +43,15 @@ static struct timespec times_now[2] = { */ static int prepare_times_array_and_attrs(struct timespec times_in[2], - struct timespec times_out[2], size_t *times_out_size) + struct timespec times_out[2], size_t *times_out_size) { if (times_in[0].tv_nsec == UTIME_OMIT && - times_in[1].tv_nsec == UTIME_OMIT) { + times_in[1].tv_nsec == UTIME_OMIT) { return 0; } if (times_in[0].tv_nsec == UTIME_NOW || - times_in[1].tv_nsec == UTIME_NOW) { + times_in[1].tv_nsec == UTIME_NOW) { struct timespec now = {}; { /* diff --git a/libsyscall/wrappers/work_interval.c b/libsyscall/wrappers/work_interval.c index 32b43da06..643c68173 100644 --- a/libsyscall/wrappers/work_interval.c +++ b/libsyscall/wrappers/work_interval.c @@ -58,7 +58,7 @@ work_interval_create(work_interval_t *interval_handle, uint32_t create_flags) }; ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_CREATE2, 0, - &create_params, sizeof(create_params)); + &create_params, sizeof(create_params)); if (ret == -1) { return ret; } @@ -80,8 +80,8 @@ work_interval_create(work_interval_t *interval_handle, uint32_t create_flags) int work_interval_notify(work_interval_t interval_handle, uint64_t start, - uint64_t finish, uint64_t deadline, uint64_t next_start, - uint32_t notify_flags) + uint64_t finish, uint64_t deadline, uint64_t next_start, + uint32_t notify_flags) { int ret; uint64_t work_interval_id; @@ -102,16 +102,16 @@ work_interval_notify(work_interval_t interval_handle, uint64_t start, work_interval_id = interval_handle->work_interval_id; ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_NOTIFY, work_interval_id, - ¬ification, sizeof(notification)); + ¬ification, sizeof(notification)); return ret; } int work_interval_notify_simple(work_interval_t interval_handle, uint64_t start, - uint64_t deadline, uint64_t next_start) + uint64_t deadline, uint64_t next_start) { return work_interval_notify(interval_handle, start, mach_absolute_time(), - deadline, next_start, 0); + deadline, next_start, 0); } @@ -160,7 +160,7 @@ work_interval_destroy(work_interval_t interval_handle) uint64_t work_interval_id = interval_handle->work_interval_id; int ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_DESTROY, - work_interval_id, NULL, 0); + work_interval_id, NULL, 0); interval_handle->work_interval_id = 0; @@ -203,14 +203,14 @@ work_interval_join_port(mach_port_t port) } return __work_interval_ctl(WORK_INTERVAL_OPERATION_JOIN, - (uint64_t)port, NULL, 0); + (uint64_t)port, NULL, 0); } int work_interval_leave(void) { return __work_interval_ctl(WORK_INTERVAL_OPERATION_JOIN, - (uint64_t)MACH_PORT_NULL, NULL, 0); + (uint64_t)MACH_PORT_NULL, NULL, 0); } int @@ -236,7 +236,7 @@ work_interval_copy_port(work_interval_t interval_handle, mach_port_t *port) mach_port_t wi_port = interval_handle->wi_port; kern_return_t kr = mach_port_mod_refs(mach_task_self(), wi_port, - MACH_PORT_RIGHT_SEND, 1); + MACH_PORT_RIGHT_SEND, 1); if (kr != KERN_SUCCESS) { *port = MACH_PORT_NULL; @@ -248,7 +248,3 @@ work_interval_copy_port(work_interval_t interval_handle, mach_port_t *port) return 0; } - - - - diff --git a/makedefs/MakeInc.def b/makedefs/MakeInc.def index 73f7cdc57..de10f2053 100644 --- a/makedefs/MakeInc.def +++ b/makedefs/MakeInc.def @@ -164,8 +164,7 @@ WARNFLAGS_STD := \ WARNFLAGS_STD := $(WARNFLAGS_STD) \ -Wno-unknown-warning-option \ - -Wno-error=shadow-field \ - -Wno-error=cast-qual + -Wno-error=atomic-implicit-seq-cst CWARNFLAGS_STD = \ $(WARNFLAGS_STD) @@ -310,7 +309,16 @@ ifeq ($(KASAN),1) SAN=1 BUILD_LTO = 0 KASAN_SHIFT_ARM64=0xdffffff800000000 -KASAN_SHIFT_X86_64=0xdffffe1000000000 +# +# To calculate the kasan shift, subtract the lowest KVA to sanitize, shifted right by 3 bits, +# from the base address of the kasan shadow area, (e.g. solve the following equation: +# SHIFT = {VA mapped by the first KASAN PML4 [Currently #494]} - (LOWEST_KVA >> 3) +# SHIFT = (0ULL - (512GiB * (512 - 494))) - (LOWEST_SAN_KVA >> 3) +# SHIFT = FFFFF70000000000 - ((0ULL - (512GiB * (512 - 496))) >> 3) [PML4 #496 is the first possible KVA] +# SHIFT = FFFFF70000000000 - (FFFFF80000000000 >> 3) +# SHIFT = DFFFF80000000000 +# ). +KASAN_SHIFT_X86_64=0xdffff80000000000 KASAN_SHIFT_X86_64H=$(KASAN_SHIFT_X86_64) KASAN_SHIFT=$($(addsuffix $(CURRENT_ARCH_CONFIG),KASAN_SHIFT_)) CFLAGS_GEN += -DKASAN=1 -DKASAN_SHIFT=$(KASAN_SHIFT) -fsanitize=address \ @@ -554,6 +562,8 @@ LDFLAGS_KERNEL_GENARM64 = \ -Wl,-sectalign,__DATA,__const,0x4000 \ -Wl,-rename_section,__DATA,__mod_init_func,__DATA_CONST,__mod_init_func \ -Wl,-rename_section,__DATA,__mod_term_func,__DATA_CONST,__mod_term_func \ + -Wl,-rename_section,__DATA,__auth_ptr,__DATA_CONST,__auth_ptr \ + -Wl,-rename_section,__DATA,__auth_got,__DATA_CONST,__auth_got \ -Wl,-rename_section,__DATA,__const,__DATA_CONST,__const \ -Wl,-rename_section,__TEXT,__text,__TEXT_EXEC,__text \ -Wl,-rename_section,__TEXT,__stubs,__TEXT_EXEC,__stubs \ diff --git a/makedefs/MakeInc.top b/makedefs/MakeInc.top index 6d9bcf146..3c8521dd6 100644 --- a/makedefs/MakeInc.top +++ b/makedefs/MakeInc.top @@ -558,7 +558,7 @@ else ifeq ($(RC_ProjectName),xnu_headers_Sim) install: installhdrs else -install: installhdrs install_textfiles install_config install_kernels install_aliases +install: installhdrs install_textfiles install_config install_kernels install_aliases checkstyle endif .PHONY: install_embedded install_release_embedded install_development_embedded install_desktop @@ -676,12 +676,19 @@ TAGS: cscope.files @rm -f cscope.files 2> /dev/null # -# Re-indent source code using xnu clang-format style +# Check or reformat source code for official xnu code style # -.PHONY: reindent +.PHONY: checkstyle restyle check_uncrustify uncrustify -reindent: - $(_v)$(SRCROOT)/tools/reindent.sh +# User-friendly aliases for those who prefer to remember the name of the tool. +check_uncrustify: checkstyle +uncrustify: restyle + +checkstyle: + ${_V}$(SRCROOT)/tools/uncrustify.sh + +restyle: + ${_V}$(SRCROOT)/tools/uncrustify.sh -f .PHONY: help diff --git a/osfmk/UserNotification/KUNCUserNotifications.c b/osfmk/UserNotification/KUNCUserNotifications.c index 710f6fa8e..1f56c2323 100644 --- a/osfmk/UserNotification/KUNCUserNotifications.c +++ b/osfmk/UserNotification/KUNCUserNotifications.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -54,26 +54,26 @@ */ struct UNDReply { - decl_lck_mtx_data(,lock) /* UNDReply lock */ - int userLandNotificationKey; - KUNCUserNotificationCallBack callback; - boolean_t inprogress; - ipc_port_t self_port; /* Our port */ + decl_lck_mtx_data(, lock) /* UNDReply lock */ + int userLandNotificationKey; + KUNCUserNotificationCallBack callback; + boolean_t inprogress; + ipc_port_t self_port; /* Our port */ }; -#define UNDReply_lock(reply) lck_mtx_lock(&reply->lock) -#define UNDReply_unlock(reply) lck_mtx_unlock(&reply->lock) +#define UNDReply_lock(reply) lck_mtx_lock(&reply->lock) +#define UNDReply_unlock(reply) lck_mtx_unlock(&reply->lock) extern lck_grp_t LockCompatGroup; /* forward declarations */ void UNDReply_deallocate( - UNDReplyRef reply); + UNDReplyRef reply); void UNDReply_deallocate( - UNDReplyRef reply) + UNDReplyRef reply) { ipc_port_t port; @@ -103,43 +103,45 @@ UNDServer_reference(void) static void UNDServer_deallocate( - UNDServerRef UNDServer) + UNDServerRef UNDServer) { - if (IP_VALID(UNDServer)) + if (IP_VALID(UNDServer)) { ipc_port_release_send(UNDServer); + } } -/* +/* * UND Mig Callbacks -*/ + */ kern_return_t -UNDAlertCompletedWithResult_rpc ( - UNDReplyRef reply, - int result, - xmlData_t keyRef, /* raw XML bytes */ +UNDAlertCompletedWithResult_rpc( + UNDReplyRef reply, + int result, + xmlData_t keyRef, /* raw XML bytes */ #ifdef KERNEL_CF - mach_msg_type_number_t keyLen) + mach_msg_type_number_t keyLen) #else - __unused mach_msg_type_number_t keyLen) + __unused mach_msg_type_number_t keyLen) #endif { #ifdef KERNEL_CF - CFStringRef xmlError = NULL; - CFDictionaryRef dict = NULL; + CFStringRef xmlError = NULL; + CFDictionaryRef dict = NULL; #else const void *dict = (const void *)keyRef; #endif - if (reply == UND_REPLY_NULL || !reply->inprogress) + if (reply == UND_REPLY_NULL || !reply->inprogress) { return KERN_INVALID_ARGUMENT; + } /* * JMM - No C vesion of the Unserialize code in-kernel * and no C type for a CFDictionary either. For now, * just pass the raw keyRef through. */ -#ifdef KERNEL_CF +#ifdef KERNEL_CF if (keyRef && keyLen) { dict = IOCFUnserialize(keyRef, NULL, NULL, &xmlError); } @@ -171,12 +173,13 @@ UNDAlertCompletedWithResult_rpc ( * to identify that request. */ kern_return_t -UNDNotificationCreated_rpc ( - UNDReplyRef reply, - int userLandNotificationKey) +UNDNotificationCreated_rpc( + UNDReplyRef reply, + int userLandNotificationKey) { - if (reply == UND_REPLY_NULL) + if (reply == UND_REPLY_NULL) { return KERN_INVALID_ARGUMENT; + } UNDReply_lock(reply); if (reply->inprogress || reply->userLandNotificationKey != -1) { @@ -190,7 +193,7 @@ UNDNotificationCreated_rpc ( /* * KUNC Functions -*/ + */ KUNCUserNotificationID @@ -209,17 +212,17 @@ KUNCGetNotificationID(void) reply->userLandNotificationKey = -1; reply->inprogress = FALSE; ipc_kobject_set(reply->self_port, - (ipc_kobject_t)reply, - IKOT_UND_REPLY); + (ipc_kobject_t)reply, + IKOT_UND_REPLY); } } return (KUNCUserNotificationID) reply; } -kern_return_t KUNCExecute(char executionPath[1024], int uid, int gid) +kern_return_t +KUNCExecute(char executionPath[1024], int uid, int gid) { - UNDServerRef UNDServer; UNDServer = UNDServer_reference(); @@ -232,15 +235,17 @@ kern_return_t KUNCExecute(char executionPath[1024], int uid, int gid) return MACH_SEND_INVALID_DEST; } -kern_return_t KUNCUserNotificationCancel( +kern_return_t +KUNCUserNotificationCancel( KUNCUserNotificationID id) { UNDReplyRef reply = (UNDReplyRef)id; kern_return_t kr; int ulkey; - if (reply == UND_REPLY_NULL) + if (reply == UND_REPLY_NULL) { return KERN_INVALID_ARGUMENT; + } UNDReply_lock(reply); if (!reply->inprogress) { @@ -257,10 +262,11 @@ kern_return_t KUNCUserNotificationCancel( UNDServer = UNDServer_reference(); if (IP_VALID(UNDServer)) { - kr = UNDCancelNotification_rpc(UNDServer,ulkey); + kr = UNDCancelNotification_rpc(UNDServer, ulkey); UNDServer_deallocate(UNDServer); - } else + } else { kr = MACH_SEND_INVALID_DEST; + } } else { UNDReply_unlock(reply); kr = KERN_SUCCESS; @@ -271,14 +277,14 @@ kern_return_t KUNCUserNotificationCancel( kern_return_t KUNCUserNotificationDisplayNotice( - int noticeTimeout, - unsigned flags, - char *iconPath, - char *soundPath, - char *localizationPath, - char *alertHeader, - char *alertMessage, - char *defaultButtonTitle) + int noticeTimeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle) { UNDServerRef UNDServer; @@ -286,14 +292,14 @@ KUNCUserNotificationDisplayNotice( if (IP_VALID(UNDServer)) { kern_return_t kr; kr = UNDDisplayNoticeSimple_rpc(UNDServer, - noticeTimeout, - flags, - iconPath, - soundPath, - localizationPath, - alertHeader, - alertMessage, - defaultButtonTitle); + noticeTimeout, + flags, + iconPath, + soundPath, + localizationPath, + alertHeader, + alertMessage, + defaultButtonTitle); UNDServer_deallocate(UNDServer); return kr; } @@ -302,35 +308,35 @@ KUNCUserNotificationDisplayNotice( kern_return_t KUNCUserNotificationDisplayAlert( - int alertTimeout, - unsigned flags, - char *iconPath, - char *soundPath, - char *localizationPath, - char *alertHeader, - char *alertMessage, - char *defaultButtonTitle, - char *alternateButtonTitle, - char *otherButtonTitle, - unsigned *responseFlags) + int alertTimeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle, + char *alternateButtonTitle, + char *otherButtonTitle, + unsigned *responseFlags) { - UNDServerRef UNDServer; - + UNDServerRef UNDServer; + UNDServer = UNDServer_reference(); if (IP_VALID(UNDServer)) { - kern_return_t kr; + kern_return_t kr; kr = UNDDisplayAlertSimple_rpc(UNDServer, - alertTimeout, - flags, - iconPath, - soundPath, - localizationPath, - alertHeader, - alertMessage, - defaultButtonTitle, - alternateButtonTitle, - otherButtonTitle, - responseFlags); + alertTimeout, + flags, + iconPath, + soundPath, + localizationPath, + alertHeader, + alertMessage, + defaultButtonTitle, + alternateButtonTitle, + otherButtonTitle, + responseFlags); UNDServer_deallocate(UNDServer); return kr; } @@ -339,21 +345,22 @@ KUNCUserNotificationDisplayAlert( kern_return_t KUNCUserNotificationDisplayFromBundle( - KUNCUserNotificationID id, - char *bundlePath, - char *fileName, - char *fileExtension, - char *messageKey, - char *tokenString, + KUNCUserNotificationID id, + char *bundlePath, + char *fileName, + char *fileExtension, + char *messageKey, + char *tokenString, KUNCUserNotificationCallBack callback, - __unused int contextKey) + __unused int contextKey) { UNDReplyRef reply = (UNDReplyRef)id; UNDServerRef UNDServer; ipc_port_t reply_port; - if (reply == UND_REPLY_NULL) + if (reply == UND_REPLY_NULL) { return KERN_INVALID_ARGUMENT; + } UNDReply_lock(reply); if (reply->inprogress == TRUE || reply->userLandNotificationKey != -1) { UNDReply_unlock(reply); @@ -369,12 +376,12 @@ KUNCUserNotificationDisplayFromBundle( kern_return_t kr; kr = UNDDisplayCustomFromBundle_rpc(UNDServer, - reply_port, - bundlePath, - fileName, - fileExtension, - messageKey, - tokenString); + reply_port, + bundlePath, + fileName, + fileExtension, + messageKey, + tokenString); UNDServer_deallocate(UNDServer); return kr; } @@ -416,10 +423,10 @@ convert_port_to_UNDReply( kern_return_t host_set_UNDServer( - host_priv_t host_priv, - UNDServerRef server) + host_priv_t host_priv, + UNDServerRef server) { - return (host_set_user_notification_port(host_priv, server)); + return host_set_user_notification_port(host_priv, server); } /* @@ -429,7 +436,7 @@ host_set_UNDServer( kern_return_t host_get_UNDServer( host_priv_t host_priv, - UNDServerRef *serverp) + UNDServerRef *serverp) { - return (host_get_user_notification_port(host_priv, serverp)); + return host_get_user_notification_port(host_priv, serverp); } diff --git a/osfmk/UserNotification/KUNCUserNotifications.h b/osfmk/UserNotification/KUNCUserNotifications.h index 8b3e3f0bb..ea3c50b55 100644 --- a/osfmk/UserNotification/KUNCUserNotifications.h +++ b/osfmk/UserNotification/KUNCUserNotifications.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,14 +43,14 @@ __BEGIN_DECLS */ kern_return_t KUNCUserNotificationDisplayNotice( - int noticeTimeout, - unsigned flags, - char *iconPath, - char *soundPath, - char *localizationPath, - char *alertHeader, - char *alertMessage, - char *defaultButtonTitle) __attribute__((deprecated)); + int noticeTimeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle) __attribute__((deprecated)); /* * ***BLOCKING*** alert call, returned int value corresponds to the @@ -58,36 +58,36 @@ KUNCUserNotificationDisplayNotice( */ kern_return_t KUNCUserNotificationDisplayAlert( - int alertTimeout, - unsigned flags, - char *iconPath, - char *soundPath, - char *localizationPath, - char *alertHeader, - char *alertMessage, - char *defaultButtonTitle, - char *alternateButtonTitle, - char *otherButtonTitle, - unsigned *responseFlags) __attribute__((deprecated)); + int alertTimeout, + unsigned flags, + char *iconPath, + char *soundPath, + char *localizationPath, + char *alertHeader, + char *alertMessage, + char *defaultButtonTitle, + char *alternateButtonTitle, + char *otherButtonTitle, + unsigned *responseFlags) __attribute__((deprecated)); /* * Execute a userland executable with the given path, user and type */ - -#define kOpenApplicationPath 0 /* essentially executes the path */ -#define kOpenPreferencePanel 1 /* runs the preferences with the foo.preference opened. foo.preference must exist in /System/Library/Preferences */ -#define kOpenApplication 2 /* essentially runs /usr/bin/open on the passed in application name */ - - -#define kOpenAppAsRoot 0 -#define kOpenAppAsConsoleUser 1 - + +#define kOpenApplicationPath 0 /* essentially executes the path */ +#define kOpenPreferencePanel 1 /* runs the preferences with the foo.preference opened. foo.preference must exist in /System/Library/Preferences */ +#define kOpenApplication 2 /* essentially runs /usr/bin/open on the passed in application name */ + + +#define kOpenAppAsRoot 0 +#define kOpenAppAsConsoleUser 1 + kern_return_t KUNCExecute( - char *executionPath, - int openAsUser, - int pathExecutionType) __attribute__((deprecated)); + char *executionPath, + int openAsUser, + int pathExecutionType) __attribute__((deprecated)); /* KUNC User Notification XML Keys @@ -101,30 +101,30 @@ KUNCExecute( * * Key Type * Header string (header displayed on dialog) - * corresponds to kCFUserNotificationAlertHeaderKey + * corresponds to kCFUserNotificationAlertHeaderKey * * Icon URL string (url of the icon to display) - * corresponds to kCFUserNotificationIconURLKey + * corresponds to kCFUserNotificationIconURLKey * * Sound URL string (url of the sound to play on display) - * corresponds to kCFUserNotificationSoundURLKey + * corresponds to kCFUserNotificationSoundURLKey * * Localization URL string (url of bundle to retrieve localization * info from, using Localizable.strings files) - * corresponds to kCFUserNotificationLocalizationURLKey + * corresponds to kCFUserNotificationLocalizationURLKey * * Message string (text of the message, can contain %@'s - * which are filled from tokenString passed in) - * corresponds to kCFUserNotificationAlertMessageKey + * which are filled from tokenString passed in) + * corresponds to kCFUserNotificationAlertMessageKey * - * OK Button Title string (title of the "main" button) - * corresponds to kCFUserNotificationDefaultButtonTitleKey + * OK Button Title string (title of the "main" button) + * corresponds to kCFUserNotificationDefaultButtonTitleKey * - * Alternate Button Title string (title of the "alternate" button, usually cancel) - * corresponds to kCFUserNotificationAlternateButtonTitleKey + * Alternate Button Title string (title of the "alternate" button, usually cancel) + * corresponds to kCFUserNotificationAlternateButtonTitleKey * - * Other Button Title string (title of the "other" button) - * corresponds to kCFUserNotificationOtherButtonTitleKey + * Other Button Title string (title of the "other" button) + * corresponds to kCFUserNotificationOtherButtonTitleKey * * Timeout string (numeric, int - seconds until the dialog * goes away on it's own) @@ -135,7 +135,7 @@ KUNCExecute( * have no buttons) * * Text Field Strings array of strings (each becomes a text field) - * corresponds to kCFUserNotificationTextFieldTitlesKey + * corresponds to kCFUserNotificationTextFieldTitlesKey * * Password Fields array of strings (numeric - each indicates a * pwd field) @@ -146,7 +146,7 @@ KUNCExecute( * Radio Button Strings array of strings (each becomes a radio button) * * Check Box Strings array of strings (each becomes a check box) - * corresponds to kCFUserNotificationCheckBoxTitlesKey + * corresponds to kCFUserNotificationCheckBoxTitlesKey * * Selected Radio string (numeric - which radio is selected) * @@ -169,7 +169,7 @@ KUNCExecute( * This WILL change soon to expect the CFBundleIdentifier instead of a bundle path * fileName * filename in bundle to retrive the xml from (i.e. "Messages") - * fileExtension + * fileExtension * if fileName has an extension, it goes here (i.e., "dict"); * messageKey * name of the xml key in the dictionary in the file to retrieve @@ -196,23 +196,23 @@ typedef uintptr_t KUNCUserNotificationID; */ enum { - kKUNCDefaultResponse = 0, - kKUNCAlternateResponse = 1, - kKUNCOtherResponse = 2, - kKUNCCancelResponse = 3 + kKUNCDefaultResponse = 0, + kKUNCAlternateResponse = 1, + kKUNCOtherResponse = 2, + kKUNCCancelResponse = 3 }; -#define KUNCCheckBoxChecked(i) (1 << (8 + i)) /* can be used for radio's too */ -#define KUNCPopUpSelection(n) (n << 24) +#define KUNCCheckBoxChecked(i) (1 << (8 + i)) /* can be used for radio's too */ +#define KUNCPopUpSelection(n) (n << 24) /* * Callback function for KUNCNotifications */ typedef void (*KUNCUserNotificationCallBack)( - int contextKey, - int responseFlags, - const void *xmlData); + int contextKey, + int responseFlags, + const void *xmlData); /* * Get a notification ID @@ -223,19 +223,19 @@ KUNCUserNotificationID KUNCGetNotificationID(void) __attribute__((deprecated)); kern_return_t KUNCUserNotificationDisplayFromBundle( - KUNCUserNotificationID notificationID, - char *bundleIdentifier, - char *fileName, - char *fileExtension, - char *messageKey, - char *tokenString, - KUNCUserNotificationCallBack callback, - int contextKey) __attribute__((deprecated)); + KUNCUserNotificationID notificationID, + char *bundleIdentifier, + char *fileName, + char *fileExtension, + char *messageKey, + char *tokenString, + KUNCUserNotificationCallBack callback, + int contextKey) __attribute__((deprecated)); kern_return_t KUNCUserNotificationCancel( - KUNCUserNotificationID notification) __attribute__((deprecated)); + KUNCUserNotificationID notification) __attribute__((deprecated)); __END_DECLS diff --git a/osfmk/UserNotification/UNDTypes.h b/osfmk/UserNotification/UNDTypes.h index 01d2960b8..49691e0ac 100644 --- a/osfmk/UserNotification/UNDTypes.h +++ b/osfmk/UserNotification/UNDTypes.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -73,9 +73,8 @@ typedef mach_port_t UNDReplyRef; #endif /* ! KERNEL_PRIVATE */ #define UND_REPLY_NULL ((UNDReplyRef)0) -#define XML_DATA_NULL ((xmlData_t)0) +#define XML_DATA_NULL ((xmlData_t)0) #endif /* __APPLE_API_PRIVATE */ -#endif /* __USERNOTIFICATION_UNDTPES_H */ - +#endif /* __USERNOTIFICATION_UNDTPES_H */ diff --git a/osfmk/arm/arch.h b/osfmk/arm/arch.h index 8c38de577..7b18742da 100644 --- a/osfmk/arm/arch.h +++ b/osfmk/arm/arch.h @@ -42,17 +42,17 @@ #endif #if defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) || \ - defined (_ARM_ARCH_6Z) || defined (_ARM_ARCH_6K) + defined (_ARM_ARCH_6Z) || defined (_ARM_ARCH_6K) #define _ARM_ARCH_6 #endif #if defined (_ARM_ARCH_6) || defined (__ARM_ARCH_5E__) || \ - defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5TEJ__) + defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5TEJ__) #define _ARM_ARCH_5E #endif #if defined (_ARM_ARCH_5E) || defined (__ARM_ARCH_5__) || \ - defined (__ARM_ARCH_5T__) + defined (__ARM_ARCH_5T__) #define _ARM_ARCH_5 #endif diff --git a/osfmk/arm/arm_init.c b/osfmk/arm/arm_init.c index b38086203..cf4906915 100644 --- a/osfmk/arm/arm_init.c +++ b/osfmk/arm/arm_init.c @@ -78,8 +78,8 @@ #include #endif /* MONOTONIC */ -extern void patch_low_glo(void); -extern int serial_init(void); +extern void patch_low_glo(void); +extern int serial_init(void); extern void sleep_token_buffer_init(void); extern vm_offset_t intstack_top; @@ -110,7 +110,7 @@ boot_args const_boot_args __attribute__((section("__DATA, __const"))); boot_args *BootArgs __attribute__((section("__DATA, __const"))); unsigned int arm_diag; -#ifdef APPLETYPHOON +#ifdef APPLETYPHOON static unsigned cpus_defeatures = 0x0; extern void cpu_defeatures_set(unsigned int); #endif @@ -121,15 +121,13 @@ extern volatile boolean_t arm64_stall_sleep; extern boolean_t force_immediate_debug_halt; -#define MIN_LOW_GLO_MASK (0x144) - /* * Forward definition */ void arm_init(boot_args * args); #if __arm64__ -unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */ +unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */ #endif /* __arm64__ */ @@ -156,15 +154,13 @@ rebase_chain(uintptr_t chainStartAddress, uint64_t stepMultiplier, uintptr_t bas // Add in the offset from the mach_header newValue += baseAddress; *(uint64_t*)address = newValue; - - } else - { + } else { // Regular pointer which needs to fit in 51-bits of value. // C++ RTTI uses the top bit, so we'll allow the whole top-byte // and the bottom 43-bits to be fit in to 51-bits. uint64_t top8Bits = value & 0x0007F80000000000ULL; uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL; - uint64_t targetValue = ( top8Bits << 13 ) | (((intptr_t)(bottom43Bits << 21) >> 21) & 0x00FFFFFFFFFFFFFF); + uint64_t targetValue = (top8Bits << 13) | (((intptr_t)(bottom43Bits << 21) >> 21) & 0x00FFFFFFFFFFFFFF); targetValue = targetValue + slide; *(uint64_t*)address = targetValue; } @@ -173,21 +169,22 @@ rebase_chain(uintptr_t chainStartAddress, uint64_t stepMultiplier, uintptr_t bas // The delta is bits [51..61] // And bit 62 is to tell us if we are a rebase (0) or bind (1) value &= ~(1ULL << 62); - delta = ( value & 0x3FF8000000000000 ) >> 51; + delta = (value & 0x3FF8000000000000) >> 51; address += delta * stepMultiplier; - } while ( delta != 0 ); + } while (delta != 0); } // Note, the following method should come from a header from dyld static bool rebase_threaded_starts(uint32_t *threadArrayStart, uint32_t *threadArrayEnd, - uintptr_t macho_header_addr, uintptr_t macho_header_vmaddr, size_t slide) + uintptr_t macho_header_addr, uintptr_t macho_header_vmaddr, size_t slide) { uint32_t threadStartsHeader = *threadArrayStart; uint64_t stepMultiplier = (threadStartsHeader & 1) == 1 ? 8 : 4; for (uint32_t* threadOffset = threadArrayStart + 1; threadOffset != threadArrayEnd; ++threadOffset) { - if (*threadOffset == 0xFFFFFFFF) + if (*threadOffset == 0xFFFFFFFF) { break; + } rebase_chain(macho_header_addr + *threadOffset, stepMultiplier, macho_header_vmaddr, slide); } return true; @@ -203,7 +200,7 @@ extern uint32_t __thread_starts_sect_end[] __asm("section$end$__TEXT$__thread_ void arm_init( - boot_args *args) + boot_args *args) { unsigned int maxmem; uint32_t memsize; @@ -211,14 +208,13 @@ arm_init( thread_t thread; processor_t my_master_proc; - // rebase and sign jops - if (&__thread_starts_sect_end[0] != &__thread_starts_sect_start[0]) - { + // rebase and sign jops + if (&__thread_starts_sect_end[0] != &__thread_starts_sect_start[0]) { uintptr_t mh = (uintptr_t) &_mh_execute_header; uintptr_t slide = mh - VM_KERNEL_LINK_ADDRESS; rebase_threaded_starts( &__thread_starts_sect_start[0], - &__thread_starts_sect_end[0], - mh, mh - slide, slide); + &__thread_starts_sect_end[0], + mh, mh - slide, slide); } /* If kernel integrity is supported, use a constant copy of the boot args. */ @@ -235,15 +231,16 @@ arm_init( { unsigned int tmp_16k = 0; -#ifdef XXXX +#ifdef XXXX /* * Select the advertised kernel page size; without the boot-arg * we default to the hardware page size for the current platform. */ - if (PE_parse_boot_argn("-vm16k", &tmp_16k, sizeof(tmp_16k))) + if (PE_parse_boot_argn("-vm16k", &tmp_16k, sizeof(tmp_16k))) { PAGE_SHIFT_CONST = PAGE_MAX_SHIFT; - else + } else { PAGE_SHIFT_CONST = ARM_PGSHIFT; + } #else /* * Select the advertised kernel page size; with the boot-arg @@ -252,7 +249,7 @@ arm_init( int radar_20804515 = 1; /* default: new mode */ PE_parse_boot_argn("radar_20804515", &radar_20804515, sizeof(radar_20804515)); if (radar_20804515) { - if (args->memSize > 1ULL*1024*1024*1024) { + if (args->memSize > 1ULL * 1024 * 1024 * 1024) { /* * arm64 device with > 1GB of RAM: * kernel uses 16KB pages. @@ -270,21 +267,23 @@ arm_init( page_shift_user32 = PAGE_MAX_SHIFT; } else { /* kernel page size: */ - if (PE_parse_boot_argn("-use_hwpagesize", &tmp_16k, sizeof(tmp_16k))) + if (PE_parse_boot_argn("-use_hwpagesize", &tmp_16k, sizeof(tmp_16k))) { PAGE_SHIFT_CONST = ARM_PGSHIFT; - else + } else { PAGE_SHIFT_CONST = PAGE_MAX_SHIFT; + } /* old mode: 32-bit apps see same page size as kernel */ page_shift_user32 = PAGE_SHIFT_CONST; } #endif -#ifdef APPLETYPHOON +#ifdef APPLETYPHOON if (PE_parse_boot_argn("cpus_defeatures", &cpus_defeatures, sizeof(cpus_defeatures))) { - if ((cpus_defeatures & 0xF) != 0) + if ((cpus_defeatures & 0xF) != 0) { cpu_defeatures_set(cpus_defeatures & 0xF); + } } #endif - } + } #endif ml_parse_cpu_topology(); @@ -293,45 +292,45 @@ arm_init( assert(master_cpu >= 0 && master_cpu <= ml_get_max_cpu_number()); BootCpuData.cpu_number = (unsigned short)master_cpu; -#if __arm__ +#if __arm__ BootCpuData.cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable; #endif - BootCpuData.intstack_top = (vm_offset_t) & intstack_top; + BootCpuData.intstack_top = (vm_offset_t) &intstack_top; BootCpuData.istackptr = BootCpuData.intstack_top; #if __arm64__ - BootCpuData.excepstack_top = (vm_offset_t) & excepstack_top; + BootCpuData.excepstack_top = (vm_offset_t) &excepstack_top; BootCpuData.excepstackptr = BootCpuData.excepstack_top; #else - BootCpuData.fiqstack_top = (vm_offset_t) & fiqstack_top; + BootCpuData.fiqstack_top = (vm_offset_t) &fiqstack_top; BootCpuData.fiqstackptr = BootCpuData.fiqstack_top; #endif BootCpuData.cpu_processor = cpu_processor_alloc(TRUE); BootCpuData.cpu_console_buf = (void *)NULL; CpuDataEntries[master_cpu].cpu_data_vaddr = &BootCpuData; CpuDataEntries[master_cpu].cpu_data_paddr = (void *)((uintptr_t)(args->physBase) - + ((uintptr_t)&BootCpuData - - (uintptr_t)(args->virtBase))); + + ((uintptr_t)&BootCpuData + - (uintptr_t)(args->virtBase))); thread_bootstrap(); thread = current_thread(); /* * Preemption is enabled for this thread so that it can lock mutexes without * tripping the preemption check. In reality scheduling is not enabled until - * this thread completes, and there are no other threads to switch to, so + * this thread completes, and there are no other threads to switch to, so * preemption level is not really meaningful for the bootstrap thread. */ thread->machine.preemption_count = 0; thread->machine.CpuDatap = &BootCpuData; -#if __arm__ && __ARM_USER_PROTECT__ - { - unsigned int ttbr0_val, ttbr1_val, ttbcr_val; - __asm__ volatile("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); - __asm__ volatile("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); - __asm__ volatile("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val)); +#if __arm__ && __ARM_USER_PROTECT__ + { + unsigned int ttbr0_val, ttbr1_val, ttbcr_val; + __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val)); thread->machine.uptw_ttb = ttbr0_val; thread->machine.kptw_ttb = ttbr1_val; thread->machine.uptw_ttc = ttbcr_val; - } + } #endif BootCpuData.cpu_processor->processor_data.kernel_timer = &thread->system_timer; BootCpuData.cpu_processor->processor_data.thread_timer = &thread->system_timer; @@ -349,14 +348,15 @@ arm_init( processor_bootstrap(); my_master_proc = master_processor; - (void)PE_parse_boot_argn("diag", &arm_diag, sizeof (arm_diag)); + (void)PE_parse_boot_argn("diag", &arm_diag, sizeof(arm_diag)); - if (PE_parse_boot_argn("maxmem", &maxmem, sizeof (maxmem))) - xmaxmem = (uint64_t) maxmem *(1024 * 1024); - else if (PE_get_default("hw.memsize", &memsize, sizeof (memsize))) + if (PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) { + xmaxmem = (uint64_t) maxmem * (1024 * 1024); + } else if (PE_get_default("hw.memsize", &memsize, sizeof(memsize))) { xmaxmem = (uint64_t) memsize; - else + } else { xmaxmem = 0; + } if (PE_parse_boot_argn("up_style_idle_exit", &up_style_idle_exit, sizeof(up_style_idle_exit))) { up_style_idle_exit = 1; @@ -365,8 +365,8 @@ arm_init( int wdt_boot_arg = 0; /* Disable if WDT is disabled or no_interrupt_mask_debug in boot-args */ if (PE_parse_boot_argn("no_interrupt_masked_debug", &interrupt_masked_debug, - sizeof(interrupt_masked_debug)) || (PE_parse_boot_argn("wdt", &wdt_boot_arg, - sizeof(wdt_boot_arg)) && (wdt_boot_arg == -1))) { + sizeof(interrupt_masked_debug)) || (PE_parse_boot_argn("wdt", &wdt_boot_arg, + sizeof(wdt_boot_arg)) && (wdt_boot_arg == -1))) { interrupt_masked_debug = 0; } @@ -385,8 +385,9 @@ arm_init( uint32_t debugmode; if (PE_parse_boot_argn("debug", &debugmode, sizeof(debugmode)) && - ((debugmode & MIN_LOW_GLO_MASK) == MIN_LOW_GLO_MASK)) + debugmode) { patch_low_glo(); + } printf_init(); panic_init(); @@ -407,17 +408,17 @@ arm_init( serialmode = 0; /* Assume normal keyboard and console */ if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { /* Do we want a serial - * keyboard and/or - * console? */ + * keyboard and/or + * console? */ kprintf("Serial mode specified: %08X\n", serialmode); int force_sync = serialmode & SERIALMODE_SYNCDRAIN; if (force_sync || PE_parse_boot_argn("drain_uart_sync", &force_sync, sizeof(force_sync))) { if (force_sync) { serialmode |= SERIALMODE_SYNCDRAIN; kprintf( - "WARNING: Forcing uart driver to output synchronously." - "printf()s/IOLogs will impact kernel performance.\n" - "You are advised to avoid using 'drain_uart_sync' boot-arg.\n"); + "WARNING: Forcing uart driver to output synchronously." + "printf()s/IOLogs will impact kernel performance.\n" + "You are advised to avoid using 'drain_uart_sync' boot-arg.\n"); } } } @@ -442,9 +443,10 @@ arm_init( cpu_machine_idle_init(TRUE); -#if (__ARM_ARCH__ == 7) - if (arm_diag & 0x8000) +#if (__ARM_ARCH__ == 7) + if (arm_diag & 0x8000) { set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC); + } #endif PE_init_platform(TRUE, &BootCpuData); @@ -475,7 +477,7 @@ arm_init( void arm_init_cpu( - cpu_data_t *cpu_data_ptr) + cpu_data_t *cpu_data_ptr) { #if __ARM_PAN_AVAILABLE__ __builtin_arm_wsr("pan", 1); @@ -499,13 +501,15 @@ arm_init_cpu( cpu_init(); -#if (__ARM_ARCH__ == 7) - if (arm_diag & 0x8000) +#if (__ARM_ARCH__ == 7) + if (arm_diag & 0x8000) { set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC); + } #endif -#ifdef APPLETYPHOON - if ((cpus_defeatures & (0xF << 4*cpu_data_ptr->cpu_number)) != 0) - cpu_defeatures_set((cpus_defeatures >> 4*cpu_data_ptr->cpu_number) & 0xF); +#ifdef APPLETYPHOON + if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) { + cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF); + } #endif /* Initialize the timebase before serial_init, as some serial * drivers use mach_absolute_time() to implement rate control @@ -554,7 +558,7 @@ arm_init_cpu( */ void __attribute__((noreturn)) arm_init_idle_cpu( - cpu_data_t *cpu_data_ptr) + cpu_data_t *cpu_data_ptr) { #if __ARM_PAN_AVAILABLE__ __builtin_arm_wsr("pan", 1); @@ -572,13 +576,15 @@ arm_init_idle_cpu( __builtin_arm_wsr("DAIFClr", DAIFSC_ASYNCF); #endif -#if (__ARM_ARCH__ == 7) - if (arm_diag & 0x8000) +#if (__ARM_ARCH__ == 7) + if (arm_diag & 0x8000) { set_mmu_control((get_mmu_control()) ^ SCTLR_PREDIC); + } #endif -#ifdef APPLETYPHOON - if ((cpus_defeatures & (0xF << 4*cpu_data_ptr->cpu_number)) != 0) - cpu_defeatures_set((cpus_defeatures >> 4*cpu_data_ptr->cpu_number) & 0xF); +#ifdef APPLETYPHOON + if ((cpus_defeatures & (0xF << 4 * cpu_data_ptr->cpu_number)) != 0) { + cpu_defeatures_set((cpus_defeatures >> 4 * cpu_data_ptr->cpu_number) & 0xF); + } #endif fiq_context_init(FALSE); diff --git a/osfmk/arm/arm_timer.c b/osfmk/arm/arm_timer.c index a38ff056d..da4a0c3b5 100644 --- a/osfmk/arm/arm_timer.c +++ b/osfmk/arm/arm_timer.c @@ -56,7 +56,7 @@ #include /* - * Event timer interrupt. + * Event timer interrupt. * * XXX a drawback of this implementation is that events serviced earlier must not set deadlines * that occur before the entire chain completes. @@ -71,8 +71,8 @@ timer_intr(__unused int inuser, __unused uint64_t iaddr) cpu_data_t *cpu_data_ptr; cpu_data_ptr = getCpuDatap(); - mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the event timer */ - abstime = mach_absolute_time(); /* Get the time now */ + mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the event timer */ + abstime = mach_absolute_time(); /* Get the time now */ /* is it time for an idle timer event? */ if ((cpu_data_ptr->idle_timer_deadline > 0) && (cpu_data_ptr->idle_timer_deadline <= abstime)) { @@ -88,17 +88,17 @@ timer_intr(__unused int inuser, __unused uint64_t iaddr) clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); } - abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ + abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ } /* has a pending clock timer expired? */ - if (mytimer->deadline <= abstime) { /* Have we expired the - * deadline? */ - mytimer->has_expired = TRUE; /* Remember that we popped */ - mytimer->deadline = EndOfAllTime; /* Set timer request to - * the end of all time - * in case we have no - * more events */ + if (mytimer->deadline <= abstime) { /* Have we expired the + * deadline? */ + mytimer->has_expired = TRUE; /* Remember that we popped */ + mytimer->deadline = EndOfAllTime; /* Set timer request to + * the end of all time + * in case we have no + * more events */ mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); mytimer->has_expired = FALSE; abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ @@ -120,18 +120,18 @@ timer_intr(__unused int inuser, __unused uint64_t iaddr) /* * Set the clock deadline */ -void +void timer_set_deadline(uint64_t deadline) { rtclock_timer_t *mytimer; spl_t s; cpu_data_t *cpu_data_ptr; - s = splclock(); /* no interruptions */ + s = splclock(); /* no interruptions */ cpu_data_ptr = getCpuDatap(); - mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the timer itself */ - mytimer->deadline = deadline; /* Set the new expiration time */ + mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the timer itself */ + mytimer->deadline = deadline; /* Set the new expiration time */ timer_resync_deadlines(); @@ -161,7 +161,7 @@ timer_resync_deadlines(void) { uint64_t deadline; rtclock_timer_t *mytimer; - spl_t s = splclock(); /* No interruptions please */ + spl_t s = splclock(); /* No interruptions please */ cpu_data_t *cpu_data_ptr; cpu_data_ptr = getCpuDatap(); @@ -169,19 +169,22 @@ timer_resync_deadlines(void) deadline = 0; /* if we have a clock timer set sooner, pop on that */ - mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the timer itself */ - if ((!mytimer->has_expired) && (mytimer->deadline > 0)) + mytimer = &cpu_data_ptr->rtclock_timer; /* Point to the timer itself */ + if ((!mytimer->has_expired) && (mytimer->deadline > 0)) { deadline = mytimer->deadline; + } /* if we have a idle timer event coming up, how about that? */ if ((cpu_data_ptr->idle_timer_deadline > 0) - && (cpu_data_ptr->idle_timer_deadline < deadline)) + && (cpu_data_ptr->idle_timer_deadline < deadline)) { deadline = cpu_data_ptr->idle_timer_deadline; + } /* If we have the quantum timer setup, check that */ if ((cpu_data_ptr->quantum_timer_deadline > 0) - && (cpu_data_ptr->quantum_timer_deadline < deadline)) + && (cpu_data_ptr->quantum_timer_deadline < deadline)) { deadline = cpu_data_ptr->quantum_timer_deadline; + } if ((deadline == EndOfAllTime) || ((deadline > 0) && (cpu_data_ptr->rtcPop != deadline))) { @@ -189,8 +192,8 @@ timer_resync_deadlines(void) decr = setPop(deadline); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE, + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) | DBG_FUNC_NONE, decr, 2, 0, 0, 0); } splx(s); @@ -198,38 +201,41 @@ timer_resync_deadlines(void) boolean_t -timer_resort_threshold(__unused uint64_t skew) { - return FALSE; +timer_resort_threshold(__unused uint64_t skew) +{ + return FALSE; } mpqueue_head_t * timer_queue_assign( - uint64_t deadline) + uint64_t deadline) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); - mpqueue_head_t *queue; + cpu_data_t *cpu_data_ptr = getCpuDatap(); + mpqueue_head_t *queue; if (cpu_data_ptr->cpu_running) { queue = &cpu_data_ptr->rtclock_timer.queue; - if (deadline < cpu_data_ptr->rtclock_timer.deadline) + if (deadline < cpu_data_ptr->rtclock_timer.deadline) { timer_set_deadline(deadline); - } - else + } + } else { queue = &cpu_datap(master_cpu)->rtclock_timer.queue; + } - return (queue); + return queue; } void timer_queue_cancel( - mpqueue_head_t *queue, - uint64_t deadline, - uint64_t new_deadline) + mpqueue_head_t *queue, + uint64_t deadline, + uint64_t new_deadline) { if (queue == &getCpuDatap()->rtclock_timer.queue) { - if (deadline < new_deadline) + if (deadline < new_deadline) { timer_set_deadline(new_deadline); + } } } @@ -269,11 +275,12 @@ static timer_coalescing_priority_params_ns_t tcoal_prio_params_init = .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC, .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC, .latency_qos_scale = {3, 2, 1, -2, -15, -15}, - .latency_qos_ns_max ={1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, - 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, + .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, + 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE}, }; -timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void) +timer_coalescing_priority_params_ns_t * +timer_call_get_priority_params(void) { return &tcoal_prio_params_init; } diff --git a/osfmk/arm/arm_vm_init.c b/osfmk/arm/arm_vm_init.c index ebdfe7735..5a2488458 100644 --- a/osfmk/arm/arm_vm_init.c +++ b/osfmk/arm/arm_vm_init.c @@ -82,7 +82,7 @@ vm_offset_t vm_prelink_edata; vm_offset_t vm_kernel_builtinkmod_text; vm_offset_t vm_kernel_builtinkmod_text_end; -unsigned long gVirtBase, gPhysBase, gPhysSize; /* Used by */ +unsigned long gVirtBase, gPhysBase, gPhysSize; /* Used by */ vm_offset_t mem_size; /* Size of actual physical memory present * minus any performance buffer and possibly @@ -155,12 +155,12 @@ extern vm_offset_t ExceptionVectorsBase; /* the code we want to load there */ vm_map_address_t phystokv(pmap_paddr_t pa) { - return (pa - gPhysBase + gVirtBase); + return pa - gPhysBase + gVirtBase; } static void -arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, - int pte_prot_APX, int pte_prot_XN) +arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, + int pte_prot_APX, int pte_prot_XN) { if (va & ARM_TT_L1_PT_OFFMASK) { /* ragged edge hanging over a ARM_TT_L1_PT_SIZE boundary */ va &= (~ARM_TT_L1_PT_OFFMASK); @@ -172,8 +172,9 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, pa = va - gVirtBase + gPhysBase; - if (pa >= avail_end) + if (pa >= avail_end) { return; + } assert(_end >= va); @@ -186,24 +187,26 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, avail_start += ARM_PGBYTES; bzero(ppte, ARM_PGBYTES); - for (i = 0; i < 4; ++i) + for (i = 0; i < 4; ++i) { tte[i] = pa_to_tte(kvtophys((vm_offset_t)ppte) + (i * 0x400)) | ARM_TTE_TYPE_TABLE; + } } vm_offset_t len = _end - va; - if ((pa + len) > avail_end) + if ((pa + len) > avail_end) { _end -= (pa + len - avail_end); + } assert((start - gVirtBase + gPhysBase) >= gPhysBase); /* Apply the desired protections to the specified page range */ for (i = 0; i < (ARM_PGBYTES / sizeof(*ppte)); i++) { if (start <= va && va < _end) { - ptmp = pa | ARM_PTE_AF | ARM_PTE_SH | ARM_PTE_TYPE; ptmp = ptmp | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); ptmp = ptmp | ARM_PTE_AP(pte_prot_APX); - if (pte_prot_XN) + if (pte_prot_XN) { ptmp = ptmp | ARM_PTE_NX; + } ppte[i] = ptmp; } @@ -215,8 +218,8 @@ arm_vm_page_granular_helper(vm_offset_t start, vm_offset_t _end, vm_offset_t va, } static void -arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, - int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, int force_page_granule) +arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, + int tte_prot_XN, int pte_prot_APX, int pte_prot_XN, int force_page_granule) { vm_offset_t _end = start + size; vm_offset_t align_start = (start + ARM_TT_L1_PT_OFFMASK) & ~ARM_TT_L1_PT_OFFMASK; @@ -226,8 +229,8 @@ arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, while (align_start < align_end) { if (force_page_granule) { - arm_vm_page_granular_helper(align_start, align_end, align_start + 1, - pte_prot_APX, pte_prot_XN); + arm_vm_page_granular_helper(align_start, align_end, align_start + 1, + pte_prot_APX, pte_prot_XN); } else { tt_entry_t *tte = &cpu_tte[ttenum(align_start)]; for (int i = 0; i < 4; ++i) { @@ -235,8 +238,9 @@ arm_vm_page_granular_prot(vm_offset_t start, unsigned long size, tmplate = (tmplate & ~ARM_TTE_BLOCK_APMASK) | ARM_TTE_BLOCK_AP(pte_prot_APX); tmplate = (tmplate & ~ARM_TTE_BLOCK_NX_MASK); - if (tte_prot_XN) + if (tte_prot_XN) { tmplate = tmplate | ARM_TTE_BLOCK_NX; + } tte[i] = tmplate; } @@ -281,9 +285,9 @@ arm_vm_prot_init(boot_args * args) #endif /* * Enforce W^X protections on segments that have been identified so far. This will be - * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions() + * further refined for each KEXT's TEXT and DATA segments in readPrelinkedExtensions() */ - + /* * Protection on kernel text is loose here to allow shenanigans early on (e.g. copying exception vectors) * and storing an address into "error_buffer" (see arm_init.c) !?! @@ -313,7 +317,7 @@ arm_vm_prot_init(boot_args * args) arm_vm_page_granular_RWNX(segLASTB, segSizeLAST, FALSE); // __LAST may be empty, but we cannot assume this arm_vm_page_granular_RWNX(segPRELINKTEXTB, segSizePRELINKTEXT, TRUE); // Refined in OSKext::readPrelinkedExtensions arm_vm_page_granular_RWNX(segPRELINKTEXTB + segSizePRELINKTEXT, - end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap); // PreLinkInfoDictionary + end_kern - (segPRELINKTEXTB + segSizePRELINKTEXT), force_coarse_physmap); // PreLinkInfoDictionary arm_vm_page_granular_RWNX(end_kern, phystokv(args->topOfKernelData) - end_kern, force_coarse_physmap); // Device Tree, RAM Disk (if present), bootArgs, trust caches arm_vm_page_granular_RNX(segEXTRADATA, segSizeEXTRADATA, FALSE); // tighter trust cache protection arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData), ARM_PGBYTES * 8, FALSE); // boot_tte, cpu_tte @@ -331,7 +335,7 @@ arm_vm_prot_init(boot_args * args) /* Map the remainder of xnu owned memory. */ arm_vm_page_granular_RWNX(phystokv(args->topOfKernelData) + ARM_PGBYTES * 10, - static_memory_end - (phystokv(args->topOfKernelData) + ARM_PGBYTES * 10), force_coarse_physmap); /* rest of physmem */ + static_memory_end - (phystokv(args->topOfKernelData) + ARM_PGBYTES * 10), force_coarse_physmap); /* rest of physmem */ /* * Special case write protection for the mapping of ExceptionVectorsBase (EVB) at 0xFFFF0000. @@ -371,8 +375,8 @@ arm_vm_prot_finalize(boot_args * args) /* used in the chosen/memory-map node, populated by iBoot. */ typedef struct MemoryMapFileInfo { - vm_offset_t paddr; - size_t length; + vm_offset_t paddr; + size_t length; } MemoryMapFileInfo; @@ -393,10 +397,12 @@ arm_vm_init(uint64_t memory_size, boot_args * args) gPhysBase = args->physBase; gPhysSize = args->memSize; mem_size = args->memSize; - if ((memory_size != 0) && (mem_size > memory_size)) + if ((memory_size != 0) && (mem_size > memory_size)) { mem_size = memory_size; - if (mem_size > MEM_SIZE_MAX ) + } + if (mem_size > MEM_SIZE_MAX) { mem_size = MEM_SIZE_MAX; + } static_memory_end = gVirtBase + mem_size; /* Calculate the nubmer of ~256MB segments of memory */ @@ -424,18 +430,20 @@ arm_vm_init(uint64_t memory_size, boot_args * args) /* Hands off [gVirtBase, gVirtBase + gPhysSize) please. */ if (gPhysBase < gVirtBase) { - if (gPhysBase + gPhysSize > gVirtBase) + if (gPhysBase + gPhysSize > gVirtBase) { tte_limit = &cpu_tte[ttenum(gVirtBase)]; + } } else { - if (gPhysBase < gVirtBase + gPhysSize) + if (gPhysBase < gVirtBase + gPhysSize) { tte = &cpu_tte[ttenum(gVirtBase + gPhysSize)]; + } } while (tte < tte_limit) { - *tte = ARM_TTE_TYPE_FAULT; + *tte = ARM_TTE_TYPE_FAULT; tte++; } - + /* Skip 6 pages (four L1 + two L2 entries) */ avail_start = cpu_ttep + ARM_PGBYTES * 6; avail_end = gPhysBase + mem_size; @@ -482,7 +490,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) * Special handling for the __DATA,__const *section*. * A page of padding named lastkerneldataconst is at the end of the __DATA,__const * so we can safely truncate the size. __DATA,__const is also aligned, but - * just in case we will round that to a page, too. + * just in case we will round that to a page, too. */ segDATA = getsegbynamefromheader(&_mh_execute_header, "__DATA"); sectDCONST = getsectbynamefromheader(&_mh_execute_header, "__DATA", "__const"); @@ -537,9 +545,9 @@ arm_vm_init(uint64_t memory_size, boot_args * args) unsigned int ttbr0_val, ttbr1_val, ttbcr_val; thread_t thread = current_thread(); - __asm__ volatile("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); - __asm__ volatile("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); - __asm__ volatile("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,2\n" : "=r"(ttbcr_val)); thread->machine.uptw_ttb = ttbr0_val; thread->machine.kptw_ttb = ttbr1_val; thread->machine.uptw_ttc = ttbcr_val; @@ -554,7 +562,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) sane_size = mem_size - (avail_start - gPhysBase); max_mem = mem_size; - vm_kernel_slide = gVirtBase-VM_KERNEL_LINK_ADDRESS; + vm_kernel_slide = gVirtBase - VM_KERNEL_LINK_ADDRESS; vm_kernel_stext = segTEXTB; vm_kernel_etext = segTEXTB + segSizeTEXT; vm_kernel_base = gVirtBase; @@ -564,7 +572,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) vm_kernel_slid_base = segTEXTB; vm_kernel_slid_top = vm_kext_top; - pmap_bootstrap((gVirtBase+MEM_SIZE_MAX+0x3FFFFF) & 0xFFC00000); + pmap_bootstrap((gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000); arm_vm_prot_init(args); @@ -576,7 +584,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args) off_end = (2 + (mem_segments * 3)) << 20; off_end += (unsigned int) round_page(args->Video.v_height * args->Video.v_rowBytes); - for (off = 0, va = (gVirtBase+MEM_SIZE_MAX+0x3FFFFF) & 0xFFC00000; off < off_end; off += ARM_TT_L1_PT_SIZE) { + for (off = 0, va = (gVirtBase + MEM_SIZE_MAX + 0x3FFFFF) & 0xFFC00000; off < off_end; off += ARM_TT_L1_PT_SIZE) { pt_entry_t *ptp; pmap_paddr_t ptp_phys; @@ -585,10 +593,10 @@ arm_vm_init(uint64_t memory_size, boot_args * args) avail_start += ARM_PGBYTES; pmap_init_pte_page(kernel_pmap, ptp, va + off, 2, TRUE); tte = &cpu_tte[ttenum(va + off)]; - *tte = pa_to_tte((ptp_phys )) | ARM_TTE_TYPE_TABLE;; - *(tte+1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE;; - *(tte+2) = pa_to_tte((ptp_phys + 0x800)) | ARM_TTE_TYPE_TABLE;; - *(tte+3) = pa_to_tte((ptp_phys + 0xC00)) | ARM_TTE_TYPE_TABLE;; + *tte = pa_to_tte((ptp_phys)) | ARM_TTE_TYPE_TABLE;; + *(tte + 1) = pa_to_tte((ptp_phys + 0x400)) | ARM_TTE_TYPE_TABLE;; + *(tte + 2) = pa_to_tte((ptp_phys + 0x800)) | ARM_TTE_TYPE_TABLE;; + *(tte + 3) = pa_to_tte((ptp_phys + 0xC00)) | ARM_TTE_TYPE_TABLE;; } avail_start = (avail_start + PAGE_MASK) & ~PAGE_MASK; @@ -596,4 +604,3 @@ arm_vm_init(uint64_t memory_size, boot_args * args) first_avail = avail_start; patch_low_glo_static_region(args->topOfKernelData, avail_start - args->topOfKernelData); } - diff --git a/osfmk/arm/atomic.h b/osfmk/arm/atomic.h index 8f83828ed..380286cde 100644 --- a/osfmk/arm/atomic.h +++ b/osfmk/arm/atomic.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,23 +33,23 @@ #include // Parameter for __builtin_arm_dmb -#define DMB_NSH 0x7 -#define DMB_ISHLD 0x9 -#define DMB_ISHST 0xa -#define DMB_ISH 0xb -#define DMB_SY 0xf +#define DMB_NSH 0x7 +#define DMB_ISHLD 0x9 +#define DMB_ISHST 0xa +#define DMB_ISH 0xb +#define DMB_SY 0xf // Parameter for __builtin_arm_dsb -#define DSB_NSH 0x7 -#define DSB_ISHLD 0x9 -#define DSB_ISHST 0xa -#define DSB_ISH 0xb -#define DSB_SY 0xf +#define DSB_NSH 0x7 +#define DSB_ISHLD 0x9 +#define DSB_ISHST 0xa +#define DSB_ISH 0xb +#define DSB_SY 0xf // Parameter for __builtin_arm_isb -#define ISB_SY 0xf +#define ISB_SY 0xf -#if __SMP__ +#if __SMP__ #define memory_order_consume_smp memory_order_consume #define memory_order_acquire_smp memory_order_acquire @@ -105,12 +105,12 @@ memory_order_has_release(enum memory_order ord) #ifdef ATOMIC_PRIVATE -#define clear_exclusive() __builtin_arm_clrex() +#define clear_exclusive() __builtin_arm_clrex() __unused static uint32_t load_exclusive32(uint32_t *target, enum memory_order ord) { - uint32_t value; + uint32_t value; #if __arm__ if (memory_order_has_release(ord)) { @@ -119,11 +119,12 @@ load_exclusive32(uint32_t *target, enum memory_order ord) } value = __builtin_arm_ldrex(target); #else - if (memory_order_has_acquire(ord)) - value = __builtin_arm_ldaex(target); // ldaxr - else - value = __builtin_arm_ldrex(target); // ldxr -#endif // __arm__ + if (memory_order_has_acquire(ord)) { + value = __builtin_arm_ldaex(target); // ldaxr + } else { + value = __builtin_arm_ldrex(target); // ldxr + } +#endif // __arm__ return value; } @@ -139,11 +140,12 @@ store_exclusive32(uint32_t *target, uint32_t value, enum memory_order ord) atomic_thread_fence(memory_order_acquire); } #else - if (memory_order_has_release(ord)) - err = __builtin_arm_stlex(value, target); // stlxr - else - err = __builtin_arm_strex(value, target); // stxr -#endif // __arm__ + if (memory_order_has_release(ord)) { + err = __builtin_arm_stlex(value, target); // stlxr + } else { + err = __builtin_arm_strex(value, target); // stxr + } +#endif // __arm__ return !err; } @@ -153,14 +155,30 @@ load_exclusive(uintptr_t *target, enum memory_order ord) #if !__LP64__ return load_exclusive32((uint32_t *)target, ord); #else - uintptr_t value; + uintptr_t value; + + if (memory_order_has_acquire(ord)) { + value = __builtin_arm_ldaex(target); // ldaxr + } else { + value = __builtin_arm_ldrex(target); // ldxr + } + return value; +#endif // __arm__ +} - if (memory_order_has_acquire(ord)) - value = __builtin_arm_ldaex(target); // ldaxr - else - value = __builtin_arm_ldrex(target); // ldxr +__unused static uint8_t +load_exclusive_acquire8(uint8_t *target) +{ + uint8_t value; +#if __arm__ + value = __builtin_arm_ldrex(target); + __c11_atomic_thread_fence(__ATOMIC_ACQUIRE); +#else + value = __builtin_arm_ldaex(target); // ldaxr + /* "Compiler barrier", no barrier instructions are emitted */ + atomic_signal_fence(memory_order_acquire); +#endif return value; -#endif // __arm__ } __unused static boolean_t @@ -171,20 +189,21 @@ store_exclusive(uintptr_t *target, uintptr_t value, enum memory_order ord) #else boolean_t err; - if (memory_order_has_release(ord)) - err = __builtin_arm_stlex(value, target); // stlxr - else - err = __builtin_arm_strex(value, target); // stxr + if (memory_order_has_release(ord)) { + err = __builtin_arm_stlex(value, target); // stlxr + } else { + err = __builtin_arm_strex(value, target); // stxr + } return !err; #endif } __unused static boolean_t atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval, - enum memory_order orig_ord, boolean_t wait) + enum memory_order orig_ord, boolean_t wait) { - enum memory_order ord = orig_ord; - uintptr_t value; + enum memory_order ord = orig_ord; + uintptr_t value; #if __arm__ @@ -196,10 +215,11 @@ atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval, do { value = load_exclusive(target, ord); if (value != oldval) { - if (wait) - wait_for_event(); // Wait with monitor held - else - clear_exclusive(); // Clear exclusive monitor + if (wait) { + wait_for_event(); // Wait with monitor held + } else { + clear_exclusive(); // Clear exclusive monitor + } return FALSE; } } while (!store_exclusive(target, newval, ord)); @@ -216,70 +236,70 @@ atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval, #if __arm__ #undef os_atomic_rmw_loop #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - boolean_t _result = FALSE; uint32_t _err = 0; \ - typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \ - for (;;) { \ - ov = __builtin_arm_ldrex(_p); \ - __VA_ARGS__; \ - if (!_err && memory_order_has_release(memory_order_##m)) { \ - /* only done for the first loop iteration */ \ - atomic_thread_fence(memory_order_release); \ - } \ - _err = __builtin_arm_strex(nv, _p); \ - if (__builtin_expect(!_err, 1)) { \ - if (memory_order_has_acquire(memory_order_##m)) { \ - atomic_thread_fence(memory_order_acquire); \ - } \ - _result = TRUE; \ - break; \ - } \ - } \ - _result; \ + boolean_t _result = FALSE; uint32_t _err = 0; \ + typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \ + for (;;) { \ + ov = __builtin_arm_ldrex(_p); \ + __VA_ARGS__; \ + if (!_err && memory_order_has_release(memory_order_##m)) { \ + /* only done for the first loop iteration */ \ + atomic_thread_fence(memory_order_release); \ + } \ + _err = __builtin_arm_strex(nv, _p); \ + if (__builtin_expect(!_err, 1)) { \ + if (memory_order_has_acquire(memory_order_##m)) { \ + atomic_thread_fence(memory_order_acquire); \ + } \ + _result = TRUE; \ + break; \ + } \ + } \ + _result; \ }) #undef os_atomic_rmw_loop_give_up #define os_atomic_rmw_loop_give_up(expr) \ - ({ __builtin_arm_clrex(); expr; __builtin_trap(); }) + ({ __builtin_arm_clrex(); expr; __builtin_trap(); }) #else #undef os_atomic_rmw_loop #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - boolean_t _result = FALSE; \ - typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \ - do { \ - if (memory_order_has_acquire(memory_order_##m)) { \ - ov = __builtin_arm_ldaex(_p); \ - } else { \ - ov = __builtin_arm_ldrex(_p); \ - } \ - __VA_ARGS__; \ - if (memory_order_has_release(memory_order_##m)) { \ - _result = !__builtin_arm_stlex(nv, _p); \ - } else { \ - _result = !__builtin_arm_strex(nv, _p); \ - } \ - } while (__builtin_expect(!_result, 0)); \ - _result; \ + boolean_t _result = FALSE; \ + typeof(atomic_load(p)) *_p = (typeof(atomic_load(p)) *)(p); \ + do { \ + if (memory_order_has_acquire(memory_order_##m)) { \ + ov = __builtin_arm_ldaex(_p); \ + } else { \ + ov = __builtin_arm_ldrex(_p); \ + } \ + __VA_ARGS__; \ + if (memory_order_has_release(memory_order_##m)) { \ + _result = !__builtin_arm_stlex(nv, _p); \ + } else { \ + _result = !__builtin_arm_strex(nv, _p); \ + } \ + } while (__builtin_expect(!_result, 0)); \ + _result; \ }) #undef os_atomic_rmw_loop_give_up #define os_atomic_rmw_loop_give_up(expr) \ - ({ __builtin_arm_clrex(); expr; __builtin_trap(); }) + ({ __builtin_arm_clrex(); expr; __builtin_trap(); }) #endif #undef os_atomic_force_dependency_on #if defined(__arm64__) #define os_atomic_force_dependency_on(p, e) ({ \ - unsigned long _v; \ - __asm__("and %x[_v], %x[_e], xzr" : [_v] "=r" (_v) : [_e] "r" (e)); \ - (typeof(*(p)) *)((char *)(p) + _v); \ + unsigned long _v; \ + __asm__("and %x[_v], %x[_e], xzr" : [_v] "=r" (_v) : [_e] "r" (e)); \ + (typeof(*(p)) *)((char *)(p) + _v); \ }) #else #define os_atomic_force_dependency_on(p, e) ({ \ - unsigned long _v; \ - __asm__("and %[_v], %[_e], #0" : [_v] "=r" (_v) : [_e] "r" (e)); \ - (typeof(*(p)) *)((char *)(p) + _v); \ + unsigned long _v; \ + __asm__("and %[_v], %[_e], #0" : [_v] "=r" (_v) : [_e] "r" (e)); \ + (typeof(*(p)) *)((char *)(p) + _v); \ }) #endif // defined(__arm64__) diff --git a/osfmk/arm/bsd_arm.c b/osfmk/arm/bsd_arm.c index 5845d01de..ef4fe3d84 100644 --- a/osfmk/arm/bsd_arm.c +++ b/osfmk/arm/bsd_arm.c @@ -25,7 +25,7 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef MACH_BSD +#ifdef MACH_BSD #include #include @@ -54,7 +54,7 @@ kern_return_t thread_setsinglestep(__unused thread_t thread, __unused int on) { - return (KERN_FAILURE); /* XXX TODO */ + return KERN_FAILURE; /* XXX TODO */ } #if CONFIG_DTRACE @@ -68,4 +68,4 @@ dtrace_get_cpu_int_stack_top(void) } #endif /* CONFIG_DTRACE */ -#endif /* MACH_BSD */ +#endif /* MACH_BSD */ diff --git a/osfmk/arm/caches.c b/osfmk/arm/caches.c index 91d489d08..f76a19edf 100644 --- a/osfmk/arm/caches.c +++ b/osfmk/arm/caches.c @@ -50,10 +50,10 @@ /* * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info */ -#define LWOpDone 1 -#define BWOpDone 3 +#define LWOpDone 1 +#define BWOpDone 3 -#ifndef __ARM_COHERENT_IO__ +#ifndef __ARM_COHERENT_IO__ extern boolean_t up_style_idle_exit; @@ -63,38 +63,42 @@ flush_dcache( unsigned length, boolean_t phys) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); if (phys) { - pmap_paddr_t paddr; - vm_offset_t vaddr; + pmap_paddr_t paddr; + vm_offset_t vaddr; paddr = CAST_DOWN(pmap_paddr_t, addr); - if (!isphysmem(paddr)) + if (!isphysmem(paddr)) { return; + } vaddr = phystokv(paddr); - FlushPoC_DcacheRegion( (vm_offset_t) vaddr, length); + FlushPoC_DcacheRegion((vm_offset_t) vaddr, length); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, length); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, length); + } return; } if (cpu_data_ptr->cpu_cache_dispatch == (cache_dispatch_t) NULL) { - FlushPoC_DcacheRegion( (vm_offset_t) addr, length); + FlushPoC_DcacheRegion((vm_offset_t) addr, length); } else { - addr64_t paddr; - uint32_t count; + addr64_t paddr; + uint32_t count; while (length > 0) { count = PAGE_SIZE - (addr & PAGE_MASK); - if (count > length) + if (count > length) { count = length; - FlushPoC_DcacheRegion( (vm_offset_t) addr, count); + } + FlushPoC_DcacheRegion((vm_offset_t) addr, count); paddr = kvtophys(addr); - if (paddr) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, count); + if (paddr) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, count); + } addr += count; length -= count; } @@ -108,40 +112,44 @@ clean_dcache( unsigned length, boolean_t phys) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); if (phys) { - pmap_paddr_t paddr; - vm_offset_t vaddr; + pmap_paddr_t paddr; + vm_offset_t vaddr; paddr = CAST_DOWN(pmap_paddr_t, addr); - if (!isphysmem(paddr)) + if (!isphysmem(paddr)) { return; + } vaddr = phystokv(paddr); - CleanPoC_DcacheRegion( (vm_offset_t) vaddr, length); + CleanPoC_DcacheRegion((vm_offset_t) vaddr, length); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, length); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, length); + } return; } - + if (cpu_data_ptr->cpu_cache_dispatch == (cache_dispatch_t) NULL) { - CleanPoC_DcacheRegion( (vm_offset_t) addr, length); + CleanPoC_DcacheRegion((vm_offset_t) addr, length); } else { - addr64_t paddr; - uint32_t count; + addr64_t paddr; + uint32_t count; while (length > 0) { count = PAGE_SIZE - (addr & PAGE_MASK); - if (count > length) + if (count > length) { count = length; - CleanPoC_DcacheRegion( (vm_offset_t) addr, count); + } + CleanPoC_DcacheRegion((vm_offset_t) addr, count); paddr = kvtophys(addr); - if (paddr) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, count); + if (paddr) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, count); + } addr += count; length -= count; } @@ -154,16 +162,17 @@ flush_dcache_syscall( vm_offset_t va, unsigned length) { - if ((cache_info()->c_bulksize_op !=0) && (length >= (cache_info()->c_bulksize_op))) { -#if __ARM_SMP__ && defined(ARMA7) + if ((cache_info()->c_bulksize_op != 0) && (length >= (cache_info()->c_bulksize_op))) { +#if __ARM_SMP__ && defined(ARMA7) cache_xcall(LWFlush); #else FlushPoC_Dcache(); - if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch) ( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); + if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + } #endif } else { - FlushPoC_DcacheRegion( (vm_offset_t) va, length); + FlushPoC_DcacheRegion((vm_offset_t) va, length); } return; } @@ -179,23 +188,25 @@ dcache_incoherent_io_flush64( pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa); cpu_data_t *cpu_data_ptr = getCpuDatap(); - if ((cache_info()->c_bulksize_op !=0) && (remaining >= (cache_info()->c_bulksize_op))) { -#if __ARM_SMP__ && defined (ARMA7) + if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) { +#if __ARM_SMP__ && defined (ARMA7) cache_xcall(LWFlush); #else FlushPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + } #endif *res = BWOpDone; } else { if (isphysmem(paddr)) { vaddr = phystokv(pa); { - FlushPoC_DcacheRegion( (vm_offset_t) vaddr, size); + FlushPoC_DcacheRegion((vm_offset_t) vaddr, size); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) pa, size); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) pa, size); + } } } else { /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */ @@ -206,14 +217,15 @@ dcache_incoherent_io_flush64( while (size > 0) { count = PAGE_SIZE - (paddr & PAGE_MASK); - if (count > size) + if (count > size) { count = size; + } wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT)); - index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ|VM_PROT_WRITE, wimg_bits); + index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits); vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK); - CleanPoC_DcacheRegion( (vm_offset_t) vaddr, count); + CleanPoC_DcacheRegion((vm_offset_t) vaddr, count); pmap_unmap_cpu_windows_copy(index); @@ -246,25 +258,28 @@ dcache_incoherent_io_store64( } } - if ((cache_info()->c_bulksize_op !=0) && (remaining >= (cache_info()->c_bulksize_op))) { -#if __ARM_SMP__ && defined (ARMA7) + if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) { +#if __ARM_SMP__ && defined (ARMA7) cache_xcall(LWClean); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); + } #else CleanPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); + } #endif *res = BWOpDone; } else { if (isphysmem(paddr)) { vaddr = phystokv(pa); { - CleanPoC_DcacheRegion( (vm_offset_t) vaddr, size); + CleanPoC_DcacheRegion((vm_offset_t) vaddr, size); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) pa, size); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) pa, size); + } } } else { /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */ @@ -275,14 +290,15 @@ dcache_incoherent_io_store64( while (size > 0) { count = PAGE_SIZE - (paddr & PAGE_MASK); - if (count > size) + if (count > size) { count = size; + } wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT)); - index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ|VM_PROT_WRITE, wimg_bits); + index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits); vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK); - CleanPoC_DcacheRegion( (vm_offset_t) vaddr, count); + CleanPoC_DcacheRegion((vm_offset_t) vaddr, count); pmap_unmap_cpu_windows_copy(index); @@ -300,9 +316,9 @@ dcache_incoherent_io_store64( void cache_sync_page( ppnum_t pp -) + ) { - pmap_paddr_t paddr = ptoa(pp); + pmap_paddr_t paddr = ptoa(pp); if (isphysmem(paddr)) { vm_offset_t vaddr = phystokv(paddr); @@ -325,60 +341,62 @@ platform_cache_init( { cache_info_t *cpuid_cache_info; unsigned int cache_size = 0x0UL; - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); cpuid_cache_info = cache_info(); if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL); + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL); - if ( cpuid_cache_info->c_l2size == 0x0 ) { - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize , (unsigned int)&cache_size); + if (cpuid_cache_info->c_l2size == 0x0) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size); cpuid_cache_info->c_l2size = cache_size; } } - } void platform_cache_flush( void) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); FlushPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + } } void platform_cache_clean( void) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); CleanPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL); + } } void platform_cache_shutdown( void) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); CleanPoC_Dcache(); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( - cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL , 0x0UL); + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch)( + cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL); + } } void @@ -388,14 +406,14 @@ platform_cache_disable(void) uint32_t sctlr_value = 0; /* Disable dcache allocation. */ - __asm__ volatile("mrc p15, 0, %0, c1, c0, 0" - : "=r"(sctlr_value)); + __asm__ volatile ("mrc p15, 0, %0, c1, c0, 0" + : "=r"(sctlr_value)); sctlr_value &= ~SCTLR_DCACHE; - __asm__ volatile("mcr p15, 0, %0, c1, c0, 0\n" - "isb" - :: "r"(sctlr_value)); + __asm__ volatile ("mcr p15, 0, %0, c1, c0, 0\n" + "isb" + :: "r"(sctlr_value)); #endif /* (__ARM_ARCH__ < 8) */ } @@ -403,7 +421,7 @@ void platform_cache_idle_enter( void) { -#if __ARM_SMP__ +#if __ARM_SMP__ platform_cache_disable(); /* @@ -412,15 +430,15 @@ platform_cache_idle_enter( * on CPU data that would normally be modified by other * CPUs. */ - if (up_style_idle_exit && (real_ncpus == 1)) + if (up_style_idle_exit && (real_ncpus == 1)) { CleanPoU_Dcache(); - else { + } else { FlushPoU_Dcache(); #if (__ARM_ARCH__ < 8) - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); cpu_data_ptr->cpu_CLW_active = 0; - __asm__ volatile("dmb ish"); + __asm__ volatile ("dmb ish"); cpu_data_ptr->cpu_CLWFlush_req = 0; cpu_data_ptr->cpu_CLWClean_req = 0; CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); @@ -430,24 +448,24 @@ platform_cache_idle_enter( CleanPoU_Dcache(); #endif -#if defined (__ARM_SMP__) && defined (ARMA7) +#if defined (__ARM_SMP__) && defined (ARMA7) uint32_t actlr_value = 0; /* Leave the coherency domain */ - __asm__ volatile("clrex\n" - "mrc p15, 0, %0, c1, c0, 1\n" - : "=r"(actlr_value)); + __asm__ volatile ("clrex\n" + "mrc p15, 0, %0, c1, c0, 1\n" + : "=r"(actlr_value)); actlr_value &= ~0x40; - __asm__ volatile("mcr p15, 0, %0, c1, c0, 1\n" - /* Ensures any pending fwd request gets serviced and ends up */ - "dsb\n" - /* Forces the processor to re-fetch, so any pending fwd request gets into the core */ - "isb\n" - /* Ensures the second possible pending fwd request ends up. */ - "dsb\n" - :: "r"(actlr_value)); + __asm__ volatile ("mcr p15, 0, %0, c1, c0, 1\n" + /* Ensures any pending fwd request gets serviced and ends up */ + "dsb\n" + /* Forces the processor to re-fetch, so any pending fwd request gets into the core */ + "isb\n" + /* Ensures the second possible pending fwd request ends up. */ + "dsb\n" + :: "r"(actlr_value)); #endif } @@ -473,27 +491,27 @@ platform_cache_idle_exit( } /* Rejoin the coherency domain */ - __asm__ volatile("mrc p15, 0, %0, c1, c0, 1\n" - : "=r"(actlr_value)); + __asm__ volatile ("mrc p15, 0, %0, c1, c0, 1\n" + : "=r"(actlr_value)); actlr_value |= 0x40; - __asm__ volatile("mcr p15, 0, %0, c1, c0, 1\n" - "isb\n" - :: "r"(actlr_value)); + __asm__ volatile ("mcr p15, 0, %0, c1, c0, 1\n" + "isb\n" + :: "r"(actlr_value)); #if __ARM_SMP__ uint32_t sctlr_value = 0; /* Enable dcache allocation. */ - __asm__ volatile("mrc p15, 0, %0, c1, c0, 0\n" - : "=r"(sctlr_value)); + __asm__ volatile ("mrc p15, 0, %0, c1, c0, 0\n" + : "=r"(sctlr_value)); sctlr_value |= SCTLR_DCACHE; - __asm__ volatile("mcr p15, 0, %0, c1, c0, 0\n" - "isb" - :: "r"(sctlr_value)); + __asm__ volatile ("mcr p15, 0, %0, c1, c0, 0\n" + "isb" + :: "r"(sctlr_value)); getCpuDatap()->cpu_CLW_active = 1; #endif #endif @@ -501,13 +519,15 @@ platform_cache_idle_exit( boolean_t platform_cache_batch_wimg( - __unused unsigned int new_wimg, + __unused unsigned int new_wimg, __unused unsigned int size ) { - boolean_t do_cache_op = FALSE; + boolean_t do_cache_op = FALSE; - if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) do_cache_op = TRUE; + if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) { + do_cache_op = TRUE; + } return do_cache_op; } @@ -515,23 +535,24 @@ platform_cache_batch_wimg( void platform_cache_flush_wimg( __unused unsigned int new_wimg -) + ) { -#if __ARM_SMP__ && defined (ARMA7) +#if __ARM_SMP__ && defined (ARMA7) cache_xcall(LWFlush); #else FlushPoC_Dcache(); - if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) - ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch) ( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); + if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) { + ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch)( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL); + } #endif } -#if __ARM_SMP__ && defined(ARMA7) +#if __ARM_SMP__ && defined(ARMA7) void cache_xcall_handler(unsigned int op) { - cpu_data_t *cdp; - uint64_t abstime; + cpu_data_t *cdp; + uint64_t abstime; cdp = getCpuDatap(); @@ -540,7 +561,7 @@ cache_xcall_handler(unsigned int op) abstime = ml_get_timebase(); cdp->cpu_CLWFlush_last = abstime; cdp->cpu_CLWClean_last = abstime; - } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) { + } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) { CleanPoU_Dcache(); abstime = ml_get_timebase(); cdp->cpu_CLWClean_last = abstime; @@ -551,87 +572,102 @@ cache_xcall_handler(unsigned int op) void cache_xcall(unsigned int op) { - boolean_t intr; - cpu_data_t *cdp; - cpu_data_t *target_cdp; - unsigned int cpu; - unsigned int signal; - uint64_t abstime; + boolean_t intr; + cpu_data_t *cdp; + cpu_data_t *target_cdp; + unsigned int cpu; + unsigned int signal; + uint64_t abstime; intr = ml_set_interrupts_enabled(FALSE); cdp = getCpuDatap(); abstime = ml_get_timebase(); - if (op == LWClean) + if (op == LWClean) { signal = SIGPLWClean; - else + } else { signal = SIGPLWFlush; + } - for (cpu=0; cpu < MAX_CPUS; cpu++) { - + for (cpu = 0; cpu < MAX_CPUS; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if(target_cdp == (cpu_data_t *)NULL) + if (target_cdp == (cpu_data_t *)NULL) { break; + } - if (target_cdp->cpu_CLW_active == 0) + if (target_cdp->cpu_CLW_active == 0) { continue; + } - if (op == LWFlush) + if (op == LWFlush) { target_cdp->cpu_CLWFlush_req = abstime; - else if (op == LWClean) + } else if (op == LWClean) { target_cdp->cpu_CLWClean_req = abstime; - __asm__ volatile("dmb ish"); + } + __asm__ volatile ("dmb ish"); if (target_cdp->cpu_CLW_active == 0) { - if (op == LWFlush) + if (op == LWFlush) { target_cdp->cpu_CLWFlush_req = 0x0ULL; - else if (op == LWClean) + } else if (op == LWClean) { target_cdp->cpu_CLWClean_req = 0x0ULL; + } continue; } - if (target_cdp == cdp) + if (target_cdp == cdp) { continue; + } - if(KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) { - if (op == LWFlush) + if (KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) { + if (op == LWFlush) { target_cdp->cpu_CLWFlush_req = 0x0ULL; - else if (op == LWClean) + } else if (op == LWClean) { target_cdp->cpu_CLWClean_req = 0x0ULL; + } } - if (cpu == real_ncpus) + if (cpu == real_ncpus) { break; + } } - cache_xcall_handler (op); + cache_xcall_handler(op); (void) ml_set_interrupts_enabled(intr); - for (cpu=0; cpu < MAX_CPUS; cpu++) { - + for (cpu = 0; cpu < MAX_CPUS; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if(target_cdp == (cpu_data_t *)NULL) + if (target_cdp == (cpu_data_t *)NULL) { break; + } - if (target_cdp == cdp) + if (target_cdp == cdp) { continue; + } - if (op == LWFlush) - while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime)); - else if (op == LWClean) - while ((target_cdp->cpu_CLWClean_req != 0x0ULL ) && (target_cdp->cpu_CLWClean_last < abstime)); + if (op == LWFlush) { + while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime)) { + ; + } + } else if (op == LWClean) { + while ((target_cdp->cpu_CLWClean_req != 0x0ULL) && (target_cdp->cpu_CLWClean_last < abstime)) { + ; + } + } - if (cpu == real_ncpus) + if (cpu == real_ncpus) { break; + } } - if (op == LWFlush) + if (op == LWFlush) { FlushPoC_Dcache(); - else if (op == LWClean) + } else if (op == LWClean) { CleanPoC_Dcache(); + } } #endif -#else /* __ARM_COHERENT_IO__ */ +#else /* __ARM_COHERENT_IO__ */ void flush_dcache( @@ -639,7 +675,7 @@ flush_dcache( __unused unsigned length, __unused boolean_t phys) { - __asm__ volatile ("dsb sy"); + __asm__ volatile ("dsb sy"); } void @@ -648,7 +684,7 @@ clean_dcache( __unused unsigned length, __unused boolean_t phys) { - __asm__ volatile ("dsb sy"); + __asm__ volatile ("dsb sy"); } void @@ -656,7 +692,7 @@ flush_dcache_syscall( __unused vm_offset_t va, __unused unsigned length) { - __asm__ volatile ("dsb sy"); + __asm__ volatile ("dsb sy"); } void @@ -666,7 +702,7 @@ dcache_incoherent_io_flush64( __unused unsigned int remaining, __unused unsigned int *res) { - __asm__ volatile ("dsb sy"); + __asm__ volatile ("dsb sy"); *res = LWOpDone; return; } @@ -678,7 +714,7 @@ dcache_incoherent_io_store64( __unused unsigned int remaining, __unused unsigned int *res) { - __asm__ volatile ("dsb sy"); + __asm__ volatile ("dsb sy"); *res = LWOpDone; return; } @@ -686,9 +722,9 @@ dcache_incoherent_io_store64( void cache_sync_page( ppnum_t pp -) + ) { - pmap_paddr_t paddr = ptoa(pp); + pmap_paddr_t paddr = ptoa(pp); if (isphysmem(paddr)) { vm_offset_t vaddr = phystokv(paddr); @@ -698,7 +734,7 @@ cache_sync_page( #else InvalidatePoU_Icache(); #endif - } + } } void @@ -739,7 +775,7 @@ platform_cache_idle_exit( boolean_t platform_cache_batch_wimg( - __unused unsigned int new_wimg, + __unused unsigned int new_wimg, __unused unsigned int size ) { @@ -752,4 +788,4 @@ platform_cache_flush_wimg( { } -#endif /* __ARM_COHERENT_IO__ */ +#endif /* __ARM_COHERENT_IO__ */ diff --git a/osfmk/arm/caches_internal.h b/osfmk/arm/caches_internal.h index e8058a858..60a1d40b5 100644 --- a/osfmk/arm/caches_internal.h +++ b/osfmk/arm/caches_internal.h @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _ARM_CACHES_INTERNAL -#define _ARM_CACHES_INTERNAL 1 +#define _ARM_CACHES_INTERNAL 1 #include @@ -40,7 +40,7 @@ extern void flush_dcache64(addr64_t addr, unsigned count, int phys); extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); -#if __ARM_SMP__ && defined(ARMA7) +#if __ARM_SMP__ && defined(ARMA7) #define LWFlush 1 #define LWClean 2 extern void cache_xcall(unsigned int op); @@ -64,7 +64,7 @@ extern void CleanPoC_DcacheRegion(vm_offset_t va, unsigned length); /* * Always actually flushes the cache, even on platforms - * where AP caches are snooped by all agents. You + * where AP caches are snooped by all agents. You * probably don't need to use this. Intended for use in * panic save routine (where caches will be yanked by reset * and coherency doesn't help). @@ -77,7 +77,7 @@ extern void FlushPoC_Dcache(void); extern void FlushPoU_Dcache(void); extern void FlushPoC_DcacheRegion(vm_offset_t va, unsigned length); -#ifdef __arm__ +#ifdef __arm__ extern void invalidate_mmu_cache(void); extern void invalidate_mmu_dcache(void); extern void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length); @@ -86,7 +86,7 @@ extern void invalidate_mmu_dcache_region(vm_offset_t va, unsigned length); extern void InvalidatePoU_Icache(void); extern void InvalidatePoU_IcacheRegion(vm_offset_t va, unsigned length); -extern void cache_sync_page(ppnum_t pp); +extern void cache_sync_page(ppnum_t pp); extern void platform_cache_init(void); extern void platform_cache_idle_enter(void); diff --git a/osfmk/arm/commpage/commpage.c b/osfmk/arm/commpage/commpage.c index 74aa72f31..a9b48ce7e 100644 --- a/osfmk/arm/commpage/commpage.c +++ b/osfmk/arm/commpage/commpage.c @@ -3,7 +3,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +12,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +23,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -47,10 +47,12 @@ #include #include #include -#include /* for cpuid_info() & cache_info() */ +#include /* for cpuid_info() & cache_info() */ #include #include #include +#include +#include #include @@ -61,64 +63,65 @@ static void commpage_init_cpu_capabilities( void ); static int commpage_cpus( void ); -SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr=0; -SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_addr = 0; -SECURITY_READ_ONLY_LATE(uint32_t) _cpu_capabilities = 0; +SECURITY_READ_ONLY_LATE(vm_address_t) commPagePtr = 0; +SECURITY_READ_ONLY_LATE(vm_address_t) sharedpage_rw_addr = 0; +SECURITY_READ_ONLY_LATE(uint32_t) _cpu_capabilities = 0; /* For sysctl access from BSD side */ -extern int gARMv81Atomics; -extern int gARMv8Crc32; +extern int gARMv81Atomics; +extern int gARMv8Crc32; void commpage_populate( void) { - uint16_t c2; + uint16_t c2; int cpufamily; sharedpage_rw_addr = pmap_create_sharedpage(); commPagePtr = (vm_address_t)_COMM_PAGE_BASE_ADDRESS; - *((uint16_t*)(_COMM_PAGE_VERSION+_COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION; + *((uint16_t*)(_COMM_PAGE_VERSION + _COMM_PAGE_RW_OFFSET)) = (uint16_t) _COMM_PAGE_THIS_VERSION; commpage_init_cpu_capabilities(); commpage_set_timestamp(0, 0, 0, 0, 0); - if (_cpu_capabilities & kCache32) + if (_cpu_capabilities & kCache32) { c2 = 32; - else if (_cpu_capabilities & kCache64) + } else if (_cpu_capabilities & kCache64) { c2 = 64; - else if (_cpu_capabilities & kCache128) + } else if (_cpu_capabilities & kCache128) { c2 = 128; - else + } else { c2 = 0; + } - *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE+_COMM_PAGE_RW_OFFSET)) = c2; - *((uint32_t*)(_COMM_PAGE_SPIN_COUNT+_COMM_PAGE_RW_OFFSET)) = 1; + *((uint16_t*)(_COMM_PAGE_CACHE_LINESIZE + _COMM_PAGE_RW_OFFSET)) = c2; + *((uint32_t*)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = 1; commpage_update_active_cpus(); cpufamily = cpuid_get_cpufamily(); /* machine_info valid after ml_get_max_cpus() */ - *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS+_COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max; - *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS+_COMM_PAGE_RW_OFFSET))= (uint8_t) machine_info.logical_cpu_max; - *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE+_COMM_PAGE_RW_OFFSET)) = machine_info.max_mem; - *((uint32_t*)(_COMM_PAGE_CPUFAMILY+_COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily; - *((uint32_t*)(_COMM_PAGE_DEV_FIRM+_COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL); - *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE+_COMM_PAGE_RW_OFFSET)) = user_timebase_allowed(); - *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK+_COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed(); - *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT+_COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift; + *((uint8_t*)(_COMM_PAGE_PHYSICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.physical_cpu_max; + *((uint8_t*)(_COMM_PAGE_LOGICAL_CPUS + _COMM_PAGE_RW_OFFSET)) = (uint8_t) machine_info.logical_cpu_max; + *((uint64_t*)(_COMM_PAGE_MEMORY_SIZE + _COMM_PAGE_RW_OFFSET)) = machine_info.max_mem; + *((uint32_t*)(_COMM_PAGE_CPUFAMILY + _COMM_PAGE_RW_OFFSET)) = (uint32_t)cpufamily; + *((uint32_t*)(_COMM_PAGE_DEV_FIRM + _COMM_PAGE_RW_OFFSET)) = (uint32_t)PE_i_can_has_debugger(NULL); + *((uint8_t*)(_COMM_PAGE_USER_TIMEBASE + _COMM_PAGE_RW_OFFSET)) = user_timebase_allowed(); + *((uint8_t*)(_COMM_PAGE_CONT_HWCLOCK + _COMM_PAGE_RW_OFFSET)) = user_cont_hwclock_allowed(); + *((uint8_t*)(_COMM_PAGE_KERNEL_PAGE_SHIFT + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift; #if __arm64__ - *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32; - *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT; + *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) page_shift_user32; + *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT; #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS) /* enforce 16KB alignment for watch targets with new ABI */ - *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT; - *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT; + *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT; + *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) SIXTEENK_PAGE_SHIFT; #else /* __arm64__ */ - *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32+_COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT; - *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64+_COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT; + *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_32 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT; + *((uint8_t*)(_COMM_PAGE_USER_PAGE_SHIFT_64 + _COMM_PAGE_RW_OFFSET)) = (uint8_t) PAGE_SHIFT; #endif /* __arm64__ */ commpage_update_timebase(); @@ -129,15 +132,15 @@ commpage_populate( clock_get_boottime_microtime(&secs, µsecs); commpage_update_boottime(secs * USEC_PER_SEC + microsecs); - /* - * set commpage approximate time to zero for initialization. + /* + * set commpage approximate time to zero for initialization. * scheduler shall populate correct value before running user thread */ - *((uint64_t *)(_COMM_PAGE_APPROX_TIME+ _COMM_PAGE_RW_OFFSET)) = 0; + *((uint64_t *)(_COMM_PAGE_APPROX_TIME + _COMM_PAGE_RW_OFFSET)) = 0; #ifdef CONFIG_MACH_APPROXIMATE_TIME - *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED+_COMM_PAGE_RW_OFFSET)) = 1; + *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 1; #else - *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED+_COMM_PAGE_RW_OFFSET)) = 0; + *((uint8_t *)(_COMM_PAGE_APPROX_TIME_SUPPORTED + _COMM_PAGE_RW_OFFSET)) = 0; #endif commpage_update_kdebug_state(); @@ -146,41 +149,44 @@ commpage_populate( commpage_update_atm_diagnostic_config(atm_get_diagnostic_config()); #endif + + *((uint64_t*)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET)) = BT_RESET_SENTINEL_TS; } struct mu { - uint64_t m; // magic number - int32_t a; // add indicator - int32_t s; // shift amount + uint64_t m; // magic number + int32_t a; // add indicator + int32_t s; // shift amount }; void commpage_set_timestamp( - uint64_t tbr, - uint64_t secs, - uint64_t frac, - uint64_t scale, - uint64_t tick_per_sec) + uint64_t tbr, + uint64_t secs, + uint64_t frac, + uint64_t scale, + uint64_t tick_per_sec) { new_commpage_timeofday_data_t *commpage_timeofday_datap; - if (commPagePtr == 0) + if (commPagePtr == 0) { return; + } - commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA+_COMM_PAGE_RW_OFFSET); + commpage_timeofday_datap = (new_commpage_timeofday_data_t *)(_COMM_PAGE_NEWTIMEOFDAY_DATA + _COMM_PAGE_RW_OFFSET); commpage_timeofday_datap->TimeStamp_tick = 0x0ULL; -#if (__ARM_ARCH__ >= 7) - __asm__ volatile("dmb ish"); +#if (__ARM_ARCH__ >= 7) + __asm__ volatile ("dmb ish"); #endif commpage_timeofday_datap->TimeStamp_sec = secs; commpage_timeofday_datap->TimeStamp_frac = frac; commpage_timeofday_datap->Ticks_scale = scale; commpage_timeofday_datap->Ticks_per_sec = tick_per_sec; -#if (__ARM_ARCH__ >= 7) - __asm__ volatile("dmb ish"); +#if (__ARM_ARCH__ >= 7) + __asm__ volatile ("dmb ish"); #endif commpage_timeofday_datap->TimeStamp_tick = tbr; } @@ -191,11 +197,12 @@ commpage_set_timestamp( void commpage_set_memory_pressure( - unsigned int pressure ) + unsigned int pressure ) { - if (commPagePtr == 0) + if (commPagePtr == 0) { return; - *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE+_COMM_PAGE_RW_OFFSET)) = pressure; + } + *((uint32_t *)(_COMM_PAGE_MEMORY_PRESSURE + _COMM_PAGE_RW_OFFSET)) = pressure; } /* @@ -204,14 +211,16 @@ commpage_set_memory_pressure( void commpage_set_spin_count( - unsigned int count ) + unsigned int count ) { - if (count == 0) /* we test for 0 after decrement, not before */ - count = 1; + if (count == 0) { /* we test for 0 after decrement, not before */ + count = 1; + } - if (commPagePtr == 0) + if (commPagePtr == 0) { return; - *((uint32_t *)(_COMM_PAGE_SPIN_COUNT+_COMM_PAGE_RW_OFFSET)) = count; + } + *((uint32_t *)(_COMM_PAGE_SPIN_COUNT + _COMM_PAGE_RW_OFFSET)) = count; } /* @@ -222,12 +231,14 @@ commpage_cpus( void ) { int cpus; - cpus = ml_get_max_cpus(); // NB: this call can block + cpus = ml_get_max_cpus(); // NB: this call can block - if (cpus == 0) + if (cpus == 0) { panic("commpage cpus==0"); - if (cpus > 0xFF) + } + if (cpus > 0xFF) { cpus = 0xFF; + } return cpus; } @@ -252,41 +263,45 @@ commpage_init_cpu_capabilities( void ) ml_cpu_get_info(&cpu_info); switch (cpu_info.cache_line_size) { - case 128: - bits |= kCache128; - break; - case 64: - bits |= kCache64; - break; - case 32: - bits |= kCache32; - break; - default: - break; + case 128: + bits |= kCache128; + break; + case 64: + bits |= kCache64; + break; + case 32: + bits |= kCache32; + break; + default: + break; } cpus = commpage_cpus(); - if (cpus == 1) + if (cpus == 1) { bits |= kUP; + } bits |= (cpus << kNumCPUsShift); bits |= kFastThreadLocalStorage; // TPIDRURO for TLS -#if __ARM_VFP__ +#if __ARM_VFP__ bits |= kHasVfp; arm_mvfp_info_t *mvfp_info = arm_mvfp_info(); - if (mvfp_info->neon) + if (mvfp_info->neon) { bits |= kHasNeon; - if (mvfp_info->neon_hpfp) + } + if (mvfp_info->neon_hpfp) { bits |= kHasNeonHPFP; - if (mvfp_info->neon_fp16) + } + if (mvfp_info->neon_fp16) { bits |= kHasNeonFP16; + } #endif #if defined(__arm64__) bits |= kHasFMA; #endif -#if __ARM_ENABLE_WFE_ +#if __ARM_ENABLE_WFE_ #ifdef __arm64__ if (arm64_wfe_allowed()) { bits |= kHasEvent; @@ -295,7 +310,7 @@ commpage_init_cpu_capabilities( void ) bits |= kHasEvent; #endif #endif -#if __ARM_V8_CRYPTO_EXTENSIONS__ +#if __ARM_V8_CRYPTO_EXTENSIONS__ bits |= kHasARMv8Crypto; #endif #ifdef __arm64__ @@ -311,7 +326,7 @@ commpage_init_cpu_capabilities( void ) #endif _cpu_capabilities = bits; - *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES+_COMM_PAGE_RW_OFFSET)) = _cpu_capabilities; + *((uint32_t *)(_COMM_PAGE_CPU_CAPABILITIES + _COMM_PAGE_RW_OFFSET)) = _cpu_capabilities; } /* @@ -320,9 +335,10 @@ commpage_init_cpu_capabilities( void ) void commpage_update_active_cpus(void) { - if (!commPagePtr) - return; - *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS+_COMM_PAGE_RW_OFFSET)) = processor_avail_count; + if (!commPagePtr) { + return; + } + *((uint8_t *)(_COMM_PAGE_ACTIVE_CPUS + _COMM_PAGE_RW_OFFSET)) = processor_avail_count; } /* @@ -332,7 +348,7 @@ void commpage_update_timebase(void) { if (commPagePtr) { - *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET+_COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime; + *((uint64_t*)(_COMM_PAGE_TIMEBASE_OFFSET + _COMM_PAGE_RW_OFFSET)) = rtclock_base_abstime; } } @@ -347,16 +363,18 @@ commpage_update_timebase(void) void commpage_update_kdebug_state(void) { - if (commPagePtr) - *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE+_COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state(); + if (commPagePtr) { + *((volatile uint32_t*)(_COMM_PAGE_KDEBUG_ENABLE + _COMM_PAGE_RW_OFFSET)) = kdebug_commpage_state(); + } } /* Ditto for atm_diagnostic_config */ void commpage_update_atm_diagnostic_config(uint32_t diagnostic_config) { - if (commPagePtr) - *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG+_COMM_PAGE_RW_OFFSET)) = diagnostic_config; + if (commPagePtr) { + *((volatile uint32_t*)(_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG + _COMM_PAGE_RW_OFFSET)) = diagnostic_config; + } } /* @@ -367,12 +385,13 @@ commpage_update_atm_diagnostic_config(uint32_t diagnostic_config) void commpage_update_multiuser_config(uint32_t multiuser_config) { - if (commPagePtr) - *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG+_COMM_PAGE_RW_OFFSET)) = multiuser_config; + if (commPagePtr) { + *((volatile uint32_t *)(_COMM_PAGE_MULTIUSER_CONFIG + _COMM_PAGE_RW_OFFSET)) = multiuser_config; + } } /* - * update the commpage data for + * update the commpage data for * last known value of mach_absolute_time() */ @@ -385,14 +404,14 @@ commpage_update_mach_approximate_time(uint64_t abstime) if (commPagePtr) { saved_data = atomic_load_explicit((_Atomic uint64_t *)approx_time_base, - memory_order_relaxed); + memory_order_relaxed); if (saved_data < abstime) { /* ignoring the success/fail return value assuming that * if the value has been updated since we last read it, * "someone" has a newer timestamp than us and ours is * now invalid. */ - atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base, - &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); + atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)approx_time_base, + &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); } } #else @@ -401,7 +420,7 @@ commpage_update_mach_approximate_time(uint64_t abstime) } /* - * update the commpage data's total system sleep time for + * update the commpage data's total system sleep time for * userspace call to mach_continuous_time() */ void @@ -415,7 +434,7 @@ commpage_update_mach_continuous_time(uint64_t sleeptime) uint64_t old; do { old = *c_time_base; - } while(!OSCompareAndSwap64(old, sleeptime, c_time_base)); + } while (!OSCompareAndSwap64(old, sleeptime, c_time_base)); #endif /* __arm64__ */ } } @@ -439,6 +458,30 @@ commpage_update_boottime(uint64_t value) } } +/* + * set the commpage's remote time params for + * userspace call to mach_bridge_remote_time() + */ +void +commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts) +{ + if (commPagePtr) { +#ifdef __arm64__ + struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET); + paramsp->base_local_ts = 0; + __asm__ volatile ("dmb ish" ::: "memory"); + paramsp->rate = rate; + paramsp->base_remote_ts = base_remote_ts; + __asm__ volatile ("dmb ish" ::: "memory"); + paramsp->base_local_ts = base_local_ts; //This will act as a generation count +#else + (void)rate; + (void)base_local_ts; + (void)base_remote_ts; +#endif /* __arm64__ */ + } +} + /* * After this counter has incremented, all running CPUs are guaranteed to @@ -453,13 +496,14 @@ commpage_update_boottime(uint64_t value) uint64_t commpage_increment_cpu_quiescent_counter(void) { - if (!commPagePtr) + if (!commPagePtr) { return 0; + } uint64_t old_gen; _Atomic uint64_t *sched_gen = (_Atomic uint64_t *)(_COMM_PAGE_CPU_QUIESCENT_COUNTER + - _COMM_PAGE_RW_OFFSET); + _COMM_PAGE_RW_OFFSET); /* * On 32bit architectures, double-wide atomic load or stores are a CAS, * so the atomic increment is the most efficient way to increment the @@ -476,4 +520,3 @@ commpage_increment_cpu_quiescent_counter(void) #endif return old_gen; } - diff --git a/osfmk/arm/commpage/commpage.h b/osfmk/arm/commpage/commpage.h index d7f349c29..6eeb63799 100644 --- a/osfmk/arm/commpage/commpage.h +++ b/osfmk/arm/commpage/commpage.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,30 +22,30 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _ARM_COMMPAGE_H #define _ARM_COMMPAGE_H -#ifndef __ASSEMBLER__ +#ifndef __ASSEMBLER__ #include #endif /* __ASSEMBLER__ */ -extern void commpage_set_timestamp(uint64_t tbr, uint64_t secs, uint64_t frac, uint64_t scale, uint64_t tick_per_sec); +extern void commpage_set_timestamp(uint64_t tbr, uint64_t secs, uint64_t frac, uint64_t scale, uint64_t tick_per_sec); #define commpage_disable_timestamp() commpage_set_timestamp( 0, 0, 0, 0, 0 ); -extern void commpage_set_memory_pressure( unsigned int pressure ); +extern void commpage_set_memory_pressure( unsigned int pressure ); extern void commpage_update_active_cpus(void); extern void commpage_set_spin_count(unsigned int count); -extern void commpage_update_timebase(void); -extern void commpage_update_mach_approximate_time(uint64_t); -extern void commpage_update_kdebug_state(void); -extern void commpage_update_atm_diagnostic_config(uint32_t); -extern void commpage_update_mach_continuous_time(uint64_t sleeptime); -extern void commpage_update_multiuser_config(uint32_t); +extern void commpage_update_timebase(void); +extern void commpage_update_mach_approximate_time(uint64_t); +extern void commpage_update_kdebug_state(void); +extern void commpage_update_atm_diagnostic_config(uint32_t); +extern void commpage_update_mach_continuous_time(uint64_t sleeptime); +extern void commpage_update_multiuser_config(uint32_t); extern void commpage_update_boottime(uint64_t boottime_usec); -extern void commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts); +extern void commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts); extern uint64_t commpage_increment_cpu_quiescent_counter(void); #endif /* _ARM_COMMPAGE_H */ diff --git a/osfmk/arm/commpage/commpage_sigs.h b/osfmk/arm/commpage/commpage_sigs.h index d392097a1..ab17f5dac 100644 --- a/osfmk/arm/commpage/commpage_sigs.h +++ b/osfmk/arm/commpage/commpage_sigs.h @@ -32,7 +32,7 @@ (((x) >> 24) & 0x000000ff) #define BSWAP_32_OFFSET(x) \ - BSWAP_32(x + _COMM_PAGE_SIGS_OFFSET) + BSWAP_32(x + _COMM_PAGE_SIGS_OFFSET) #define COMMPAGE_SIGS_BEGIN \ .const_data ; \ @@ -45,13 +45,13 @@ _commpage_sigs_begin: _commpage_sigs_end: ; \ #define COMMPAGE_SIG_START(x) \ -.private_extern _commpage_sig ## x ; \ +.private_extern _commpage_sig ## x ; \ _commpage_sig ## x ## : ; \ .long BSWAP_32(0x14400000) ; \ .long BSWAP_32(0x00000001) ; \ - .asciz # x ; \ + .asciz # x ; \ .align 2 ; \ - .long BSWAP_32(0x14400000) + .long BSWAP_32(0x14400000) #define COMMPAGE_SIG_END(x) \ .long BSWAP_32(0x4e800020) ; \ @@ -64,20 +64,20 @@ _commpage_sig ## x ## : ; \ #define OBJCRTP_SIG_START(x) COMMPAGE_SIG_START(x) #define OBJCRTP_SIG_END(x) \ - .long BSWAP_32(0x14400000) ; \ - .long BSWAP_32(0x00000000) ; \ - .asciz # x ; \ - .align 2 ; \ - .long BSWAP_32(0x14400000) + .long BSWAP_32(0x14400000) ; \ + .long BSWAP_32(0x00000000) ; \ + .asciz # x ; \ + .align 2 ; \ + .long BSWAP_32(0x14400000) #define OBJCRTP_SIG_CALL_SUBJECT(x) \ - .long BSWAP_32(0x14400002) ; \ - .long BSWAP_32(0x00000000) ; \ - .long BSWAP_32(0x00040000) ; \ - .long BSWAP_32(0x00000000) ; \ - .asciz # x ; \ - .align 2 ; \ - .long BSWAP_32(0x14400002) + .long BSWAP_32(0x14400002) ; \ + .long BSWAP_32(0x00000000) ; \ + .long BSWAP_32(0x00040000) ; \ + .long BSWAP_32(0x00000000) ; \ + .asciz # x ; \ + .align 2 ; \ + .long BSWAP_32(0x14400002) #define ARG(n) \ ((((n * 2) + 6) << 20) + 4) diff --git a/osfmk/arm/cpu.c b/osfmk/arm/cpu.c index 49b5833cc..4109f698e 100644 --- a/osfmk/arm/cpu.c +++ b/osfmk/arm/cpu.c @@ -64,17 +64,17 @@ extern unsigned int start_cpu; unsigned int start_cpu_paddr; -extern boolean_t idle_enable; -extern unsigned int real_ncpus; -extern uint64_t wake_abstime; +extern boolean_t idle_enable; +extern unsigned int real_ncpus; +extern uint64_t wake_abstime; extern void* wfi_inst; unsigned wfi_fast = 1; unsigned patch_to_nop = 0xe1a00000; -void *LowExceptionVectorsAddr; -#define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80)) -#define IOS_STATE_SIZE (0x08UL) +void *LowExceptionVectorsAddr; +#define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80)) +#define IOS_STATE_SIZE (0x08UL) static const uint8_t suspend_signature[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'}; static const uint8_t running_signature[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'}; @@ -105,7 +105,6 @@ cpu_sleep(void) CleanPoC_Dcache(); PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id); - } _Atomic uint32_t cpu_idle_count = 0; @@ -118,22 +117,26 @@ void __attribute__((noreturn)) cpu_idle(void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); - uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; + uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; - if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) + if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) { Idle_load_context(); - if (!SetIdlePop()) + } + if (!SetIdlePop()) { Idle_load_context(); + } lastPop = cpu_data_ptr->rtcPop; pmap_switch_user_ttb(kernel_pmap); cpu_data_ptr->cpu_active_thread = current_thread(); - if (cpu_data_ptr->cpu_user_debug) + if (cpu_data_ptr->cpu_user_debug) { arm_debug_set(NULL); + } cpu_data_ptr->cpu_user_debug = NULL; - if (cpu_data_ptr->cpu_idle_notify) - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); + if (cpu_data_ptr->cpu_idle_notify) { + ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); + } if (cpu_data_ptr->idle_timer_notify != 0) { if (new_idle_timeout_ticks == 0x0ULL) { @@ -144,8 +147,9 @@ cpu_idle(void) clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); } timer_resync_deadlines(); - if (cpu_data_ptr->rtcPop != lastPop) + if (cpu_data_ptr->rtcPop != lastPop) { SetIdlePop(); + } } #if KPC @@ -167,7 +171,7 @@ cpu_idle(void) void cpu_idle_exit(boolean_t from_reset __unused) { - uint64_t new_idle_timeout_ticks = 0x0ULL; + uint64_t new_idle_timeout_ticks = 0x0ULL; cpu_data_t *cpu_data_ptr = getCpuDatap(); #if KPC @@ -177,8 +181,9 @@ cpu_idle_exit(boolean_t from_reset __unused) pmap_set_pmap(cpu_data_ptr->cpu_active_thread->map->pmap, current_thread()); - if (cpu_data_ptr->cpu_idle_notify) - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); + if (cpu_data_ptr->cpu_idle_notify) { + ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); + } if (cpu_data_ptr->idle_timer_notify != 0) { if (new_idle_timeout_ticks == 0x0ULL) { @@ -201,7 +206,6 @@ cpu_init(void) arm_cpu_info_t *cpu_info_p; if (cdp->cpu_type != CPU_TYPE_ARM) { - cdp->cpu_type = CPU_TYPE_ARM; timer_call_queue_init(&cdp->rtclock_timer.queue); @@ -231,10 +235,11 @@ cpu_init(void) break; case CPU_ARCH_ARMv5TE: case CPU_ARCH_ARMv5TEJ: - if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL) + if (cpu_info_p->arm_info.arm_implementor == CPU_VID_INTEL) { cdp->cpu_subtype = CPU_SUBTYPE_ARM_XSCALE; - else + } else { cdp->cpu_subtype = CPU_SUBTYPE_ARM_V5TEJ; + } break; case CPU_ARCH_ARMv6: cdp->cpu_subtype = CPU_SUBTYPE_ARM_V6; @@ -264,33 +269,34 @@ cpu_init(void) cdp->cpu_running = TRUE; cdp->cpu_sleep_token_last = cdp->cpu_sleep_token; cdp->cpu_sleep_token = 0x0UL; - } void cpu_stack_alloc(cpu_data_t *cpu_data_ptr) { - vm_offset_t irq_stack = 0; - vm_offset_t fiq_stack = 0; + vm_offset_t irq_stack = 0; + vm_offset_t fiq_stack = 0; kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack, - INTSTACK_SIZE + (2 * PAGE_SIZE), - PAGE_MASK, - KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, - VM_KERN_MEMORY_STACK); - if (kr != KERN_SUCCESS) + INTSTACK_SIZE + (2 * PAGE_SIZE), + PAGE_MASK, + KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, + VM_KERN_MEMORY_STACK); + if (kr != KERN_SUCCESS) { panic("Unable to allocate cpu interrupt stack\n"); + } cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE; cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top; kr = kernel_memory_allocate(kernel_map, &fiq_stack, - FIQSTACK_SIZE + (2 * PAGE_SIZE), - PAGE_MASK, - KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, - VM_KERN_MEMORY_STACK); - if (kr != KERN_SUCCESS) + FIQSTACK_SIZE + (2 * PAGE_SIZE), + PAGE_MASK, + KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, + VM_KERN_MEMORY_STACK); + if (kr != KERN_SUCCESS) { panic("Unable to allocate cpu exception stack\n"); + } cpu_data_ptr->fiqstack_top = fiq_stack + PAGE_SIZE + FIQSTACK_SIZE; cpu_data_ptr->fiqstackptr = cpu_data_ptr->fiqstack_top; @@ -299,12 +305,13 @@ cpu_stack_alloc(cpu_data_t *cpu_data_ptr) void cpu_data_free(cpu_data_t *cpu_data_ptr) { - if (cpu_data_ptr == &BootCpuData) - return; + if (cpu_data_ptr == &BootCpuData) { + return; + } cpu_processor_free( cpu_data_ptr->cpu_processor); - kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); - kfree( (void *)(cpu_data_ptr->fiqstack_top - FIQSTACK_SIZE), FIQSTACK_SIZE); + (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); + (kfree)((void *)(cpu_data_ptr->fiqstack_top - FIQSTACK_SIZE), FIQSTACK_SIZE); kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t)); } @@ -314,7 +321,7 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) uint32_t i = 0; cpu_data_ptr->cpu_flags = 0; -#if __arm__ +#if __arm__ cpu_data_ptr->cpu_exc_vectors = (vm_offset_t)&ExceptionVectorsTable; #endif cpu_data_ptr->interrupts_enabled = 0; @@ -360,7 +367,7 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) cpu_data_ptr->cpu_xcall_p0 = NULL; cpu_data_ptr->cpu_xcall_p1 = NULL; -#if __ARM_SMP__ && defined(ARMA7) +#if __ARM_SMP__ && defined(ARMA7) cpu_data_ptr->cpu_CLWFlush_req = 0x0ULL; cpu_data_ptr->cpu_CLWFlush_last = 0x0ULL; cpu_data_ptr->cpu_CLWClean_req = 0x0ULL; @@ -392,7 +399,7 @@ cpu_data_register(cpu_data_t *cpu_data_ptr) cpu_data_ptr->cpu_number = cpu; CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr; - CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr); + CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr); return KERN_SUCCESS; } @@ -405,18 +412,19 @@ cpu_start(int cpu) return KERN_SUCCESS; } else { #if __ARM_SMP__ - cpu_data_t *cpu_data_ptr; - thread_t first_thread; + cpu_data_t *cpu_data_ptr; + thread_t first_thread; cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; cpu_data_ptr->cpu_reset_handler = (vm_offset_t) start_cpu_paddr; cpu_data_ptr->cpu_pmap_cpu_data.cpu_user_pmap = NULL; - if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL) + if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL) { first_thread = cpu_data_ptr->cpu_processor->next_thread; - else + } else { first_thread = cpu_data_ptr->cpu_processor->idle_thread; + } cpu_data_ptr->cpu_active_thread = first_thread; first_thread->machine.CpuDatap = cpu_data_ptr; @@ -452,7 +460,7 @@ cpu_timebase_init(boolean_t from_boot __unused) cdp->cpu_base_timebase_low = rtclock_base_abstime_low; cdp->cpu_base_timebase_high = rtclock_base_abstime_high; #else - *((uint64_t *) & cdp->cpu_base_timebase_low) = rtclock_base_abstime; + *((uint64_t *) &cdp->cpu_base_timebase_low) = rtclock_base_abstime; #endif } @@ -464,31 +472,34 @@ ml_arm_sleep(void) cpu_data_t *cpu_data_ptr = getCpuDatap(); if (cpu_data_ptr == &BootCpuData) { - cpu_data_t *target_cdp; - unsigned int cpu; + cpu_data_t *target_cdp; + unsigned int cpu; - for (cpu=0; cpu < MAX_CPUS; cpu++) { + for (cpu = 0; cpu < MAX_CPUS; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if(target_cdp == (cpu_data_t *)NULL) + if (target_cdp == (cpu_data_t *)NULL) { break; + } - if (target_cdp == cpu_data_ptr) + if (target_cdp == cpu_data_ptr) { continue; + } - while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH); + while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) { + ; + } } /* Now that the other cores have entered the sleep path, set * the abstime fixup we'll use when we resume.*/ rtclock_base_abstime = ml_get_timebase(); wake_abstime = rtclock_base_abstime; - } else { platform_cache_disable(); CleanPoU_Dcache(); } cpu_data_ptr->cpu_sleep_token = ARM_CPU_ON_SLEEP_PATH; -#if __ARM_SMP__ && defined(ARMA7) +#if __ARM_SMP__ && defined(ARMA7) cpu_data_ptr->cpu_CLWFlush_req = 0; cpu_data_ptr->cpu_CLWClean_req = 0; __builtin_arm_dmb(DMB_ISH); @@ -498,8 +509,9 @@ ml_arm_sleep(void) platform_cache_disable(); platform_cache_shutdown(); bcopy((const void *)suspend_signature, (void *)(IOS_STATE), IOS_STATE_SIZE); - } else + } else { CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); + } __builtin_arm_dsb(DSB_SY); while (TRUE) { @@ -512,32 +524,37 @@ ml_arm_sleep(void) void cpu_machine_idle_init(boolean_t from_boot) { - static const unsigned int *BootArgs_paddr = (unsigned int *)NULL; - static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL; - static unsigned int resume_idle_cpu_paddr = (unsigned int )NULL; - cpu_data_t *cpu_data_ptr = getCpuDatap(); + static const unsigned int *BootArgs_paddr = (unsigned int *)NULL; + static const unsigned int *CpuDataEntries_paddr = (unsigned int *)NULL; + static unsigned int resume_idle_cpu_paddr = (unsigned int)NULL; + cpu_data_t *cpu_data_ptr = getCpuDatap(); if (from_boot) { unsigned int jtag = 0; unsigned int wfi; - if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) { - if (jtag != 0) + if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) { + if (jtag != 0) { idle_enable = FALSE; - else + } else { idle_enable = TRUE; - } else + } + } else { idle_enable = TRUE; + } - if (!PE_parse_boot_argn("wfi", &wfi, sizeof (wfi))) + if (!PE_parse_boot_argn("wfi", &wfi, sizeof(wfi))) { wfi = 1; + } - if (wfi == 0) + if (wfi == 0) { bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&patch_to_nop), - (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned)); - if (wfi == 2) + (addr64_t)ml_static_vtop((vm_offset_t)&wfi_inst), sizeof(unsigned)); + } + if (wfi == 2) { wfi_fast = 0; + } LowExceptionVectorsAddr = (void *)ml_io_map(ml_vtophys((vm_offset_t)gPhysBase), PAGE_SIZE); @@ -549,25 +566,25 @@ cpu_machine_idle_init(boolean_t from_boot) BootArgs_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)BootArgs); bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&BootArgs_paddr), - (addr64_t)((unsigned int)(gPhysBase) + - ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)), - 4); + (addr64_t)((unsigned int)(gPhysBase) + + ((unsigned int)&(ResetHandlerData.boot_args) - (unsigned int)&ExceptionLowVectorsBase)), + 4); CpuDataEntries_paddr = (unsigned int *)ml_static_vtop((vm_offset_t)CpuDataEntries); bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)&CpuDataEntries_paddr), - (addr64_t)((unsigned int)(gPhysBase) + - ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)), - 4); + (addr64_t)((unsigned int)(gPhysBase) + + ((unsigned int)&(ResetHandlerData.cpu_data_entries) - (unsigned int)&ExceptionLowVectorsBase)), + 4); CleanPoC_DcacheRegion((vm_offset_t) phystokv(gPhysBase), PAGE_SIZE); resume_idle_cpu_paddr = (unsigned int)ml_static_vtop((vm_offset_t)&resume_idle_cpu); - } if (cpu_data_ptr == &BootCpuData) { bcopy(((const void *)running_signature), (void *)(IOS_STATE), IOS_STATE_SIZE); - }; + } + ; cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr; clean_dcache((vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t), FALSE); @@ -576,9 +593,9 @@ cpu_machine_idle_init(boolean_t from_boot) void machine_track_platform_idle(boolean_t entry) { - if (entry) + if (entry) { (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED); - else + } else { (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED); + } } - diff --git a/osfmk/arm/cpu_capabilities.h b/osfmk/arm/cpu_capabilities.h index 32044d7d0..c43156ceb 100644 --- a/osfmk/arm/cpu_capabilities.h +++ b/osfmk/arm/cpu_capabilities.h @@ -25,57 +25,58 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef PRIVATE +#ifdef PRIVATE #ifndef _ARM_CPU_CAPABILITIES_H #define _ARM_CPU_CAPABILITIES_H -#ifndef __ASSEMBLER__ +#ifndef __ASSEMBLER__ #include #include #endif - + /* * This is the authoritative way to determine from user mode what * implementation-specific processor features are available. * This API only supported for Apple internal use. - * + * */ /* * Bit definitions for _cpu_capabilities: */ -#define kHasNeonFP16 0x00000008 // ARM v8.2 NEON FP16 supported -#define kCache32 0x00000010 // cache line size is 32 bytes -#define kCache64 0x00000020 // cache line size is 64 bytes -#define kCache128 0x00000040 // cache line size is 128 bytes -#define kFastThreadLocalStorage 0x00000080 // TLS ptr is kept in a user-mode-readable register -#define kHasNeon 0x00000100 // Advanced SIMD is supported -#define kHasNeonHPFP 0x00000200 // Advanced SIMD half-precision -#define kHasVfp 0x00000400 // VFP is supported -#define kHasEvent 0x00001000 // WFE/SVE and period event wakeup -#define kHasFMA 0x00002000 // Fused multiply add is supported -#define kUP 0x00008000 // set if (kNumCPUs == 1) -#define kNumCPUs 0x00FF0000 // number of CPUs (see _NumCPUs() below) -#define kHasARMv8Crypto 0x01000000 // Optional ARMv8 Crypto extensions -#define kHasARMv81Atomics 0x02000000 // ARMv8.1 Atomic instructions supported -#define kHasARMv8Crc32 0x04000000 // Optional ARMv8 crc32 instructions (required in ARMv8.1) - -#define kNumCPUsShift 16 // see _NumCPUs() below - - /* - * Bit definitions for multiuser_config: - */ -#define kIsMultiUserDevice 0x80000000 // this device is in multiuser mode -#define kMultiUserCurrentUserMask 0x7fffffff // the current user UID of the multiuser device - -#ifndef __ASSEMBLER__ +#define kHasNeonFP16 0x00000008 // ARM v8.2 NEON FP16 supported +#define kCache32 0x00000010 // cache line size is 32 bytes +#define kCache64 0x00000020 // cache line size is 64 bytes +#define kCache128 0x00000040 // cache line size is 128 bytes +#define kFastThreadLocalStorage 0x00000080 // TLS ptr is kept in a user-mode-readable register +#define kHasNeon 0x00000100 // Advanced SIMD is supported +#define kHasNeonHPFP 0x00000200 // Advanced SIMD half-precision +#define kHasVfp 0x00000400 // VFP is supported +#define kHasEvent 0x00001000 // WFE/SVE and period event wakeup +#define kHasFMA 0x00002000 // Fused multiply add is supported +#define kUP 0x00008000 // set if (kNumCPUs == 1) +#define kNumCPUs 0x00FF0000 // number of CPUs (see _NumCPUs() below) +#define kHasARMv8Crypto 0x01000000 // Optional ARMv8 Crypto extensions +#define kHasARMv81Atomics 0x02000000 // ARMv8.1 Atomic instructions supported +#define kHasARMv8Crc32 0x04000000 // Optional ARMv8 crc32 instructions (required in ARMv8.1) + +#define kNumCPUsShift 16 // see _NumCPUs() below + +/* + * Bit definitions for multiuser_config: + */ +#define kIsMultiUserDevice 0x80000000 // this device is in multiuser mode +#define kMultiUserCurrentUserMask 0x7fffffff // the current user UID of the multiuser device + +#ifndef __ASSEMBLER__ #include extern int _get_cpu_capabilities( void ); __inline static -int _NumCPUs( void ) +int +_NumCPUs( void ) { return (_get_cpu_capabilities() & kNumCPUs) >> kNumCPUsShift; } @@ -86,12 +87,12 @@ typedef struct { volatile uint32_t TimeStamp_usec; volatile uint32_t TimeBaseTicks_per_sec; volatile uint32_t TimeBaseTicks_per_usec; - volatile uint64_t TimeBase_magic; - volatile uint32_t TimeBase_add; - volatile uint32_t TimeBase_shift; + volatile uint64_t TimeBase_magic; + volatile uint32_t TimeBase_add; + volatile uint32_t TimeBase_shift; } commpage_timeofday_data_t; -extern vm_address_t _get_commpage_priv_address(void); +extern vm_address_t _get_commpage_priv_address(void); #endif /* __ASSEMBLER__ */ @@ -102,111 +103,112 @@ extern vm_address_t _get_commpage_priv_address(void); #if defined(__LP64__) -#define _COMM_PAGE64_BASE_ADDRESS (0x0000000FFFFFC000ULL) /* In TTBR0 */ -#define _COMM_HIGH_PAGE64_BASE_ADDRESS (0xFFFFFFF0001FC000ULL) /* Just below the kernel, safely in TTBR1; only used for testing */ +#define _COMM_PAGE64_BASE_ADDRESS (0x0000000FFFFFC000ULL) /* In TTBR0 */ +#define _COMM_HIGH_PAGE64_BASE_ADDRESS (0xFFFFFFF0001FC000ULL) /* Just below the kernel, safely in TTBR1; only used for testing */ -#define _COMM_PAGE64_AREA_LENGTH (_COMM_PAGE32_AREA_LENGTH) -#define _COMM_PAGE64_AREA_USED (-1) +#define _COMM_PAGE64_AREA_LENGTH (_COMM_PAGE32_AREA_LENGTH) +#define _COMM_PAGE64_AREA_USED (-1) -#define _COMM_PAGE_PRIV(_addr_) ((_addr_) - (_COMM_PAGE_START_ADDRESS) + _get_commpage_priv_address()) +#define _COMM_PAGE_PRIV(_addr_) ((_addr_) - (_COMM_PAGE_START_ADDRESS) + _get_commpage_priv_address()) #ifdef KERNEL_PRIVATE -#define _COMM_PAGE_RW_OFFSET (0) -#define _COMM_PAGE_AREA_LENGTH (PAGE_SIZE) +#define _COMM_PAGE_RW_OFFSET (0) +#define _COMM_PAGE_AREA_LENGTH (PAGE_SIZE) -#define _COMM_PAGE_BASE_ADDRESS (_get_commpage_priv_address()) -#define _COMM_PAGE_START_ADDRESS (_get_commpage_priv_address()) +#define _COMM_PAGE_BASE_ADDRESS (_get_commpage_priv_address()) +#define _COMM_PAGE_START_ADDRESS (_get_commpage_priv_address()) #else /* KERNEL_PRIVATE */ -#define _COMM_PAGE_AREA_LENGTH (4096) +#define _COMM_PAGE_AREA_LENGTH (4096) -#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE64_BASE_ADDRESS -#define _COMM_PAGE_START_ADDRESS _COMM_PAGE64_BASE_ADDRESS +#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE64_BASE_ADDRESS +#define _COMM_PAGE_START_ADDRESS _COMM_PAGE64_BASE_ADDRESS #endif /* KERNEL_PRIVATE */ #else -#define _COMM_PAGE64_BASE_ADDRESS (-1) -#define _COMM_PAGE64_AREA_LENGTH (-1) -#define _COMM_PAGE64_AREA_USED (-1) +#define _COMM_PAGE64_BASE_ADDRESS (-1) +#define _COMM_PAGE64_AREA_LENGTH (-1) +#define _COMM_PAGE64_AREA_USED (-1) // macro to change a user comm page address to one that is accessible from privileged mode // this macro is stubbed as PAN is not available on AARCH32, // but this may still be required for compatibility -#define _COMM_PAGE_PRIV(_addr_) (_addr_) +#define _COMM_PAGE_PRIV(_addr_) (_addr_) #ifdef KERNEL_PRIVATE -#define _COMM_PAGE_RW_OFFSET (_get_commpage_priv_address()-_COMM_PAGE_BASE_ADDRESS) -#define _COMM_PAGE_AREA_LENGTH (PAGE_SIZE) +#define _COMM_PAGE_RW_OFFSET (_get_commpage_priv_address()-_COMM_PAGE_BASE_ADDRESS) +#define _COMM_PAGE_AREA_LENGTH (PAGE_SIZE) #else -#define _COMM_PAGE_AREA_LENGTH (4096) +#define _COMM_PAGE_AREA_LENGTH (4096) #endif -#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE32_BASE_ADDRESS -#define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_BASE_ADDRESS +#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE32_BASE_ADDRESS +#define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_BASE_ADDRESS #endif -#define _COMM_PAGE32_BASE_ADDRESS (0xFFFF4000) /* Must be outside of normal map bounds */ -#define _COMM_PAGE32_AREA_LENGTH (_COMM_PAGE_AREA_LENGTH) +#define _COMM_PAGE32_BASE_ADDRESS (0xFFFF4000) /* Must be outside of normal map bounds */ +#define _COMM_PAGE32_AREA_LENGTH (_COMM_PAGE_AREA_LENGTH) + +#define _COMM_PAGE_TEXT_START (-1) +#define _COMM_PAGE32_TEXT_START (-1) +#define _COMM_PAGE64_TEXT_START (-1) +#define _COMM_PAGE_PFZ_START_OFFSET (-1) +#define _COMM_PAGE_PFZ_END_OFFSET (-1) -#define _COMM_PAGE_TEXT_START (-1) -#define _COMM_PAGE32_TEXT_START (-1) -#define _COMM_PAGE64_TEXT_START (-1) -#define _COMM_PAGE_PFZ_START_OFFSET (-1) -#define _COMM_PAGE_PFZ_END_OFFSET (-1) +#define _COMM_PAGE32_OBJC_SIZE 0ULL +#define _COMM_PAGE32_OBJC_BASE 0ULL +#define _COMM_PAGE64_OBJC_SIZE 0ULL +#define _COMM_PAGE64_OBJC_BASE 0ULL -#define _COMM_PAGE32_OBJC_SIZE 0ULL -#define _COMM_PAGE32_OBJC_BASE 0ULL -#define _COMM_PAGE64_OBJC_SIZE 0ULL -#define _COMM_PAGE64_OBJC_BASE 0ULL - /* * data in the comm pages * apply _COMM_PAGE_PRIV macro to use these in privileged mode */ -#define _COMM_PAGE_SIGNATURE (_COMM_PAGE_START_ADDRESS+0x000) // first few bytes are a signature -#define _COMM_PAGE_VERSION (_COMM_PAGE_START_ADDRESS+0x01E) // 16-bit version# -#define _COMM_PAGE_THIS_VERSION 3 // version of the commarea format - -#define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_START_ADDRESS+0x020) // uint32_t _cpu_capabilities -#define _COMM_PAGE_NCPUS (_COMM_PAGE_START_ADDRESS+0x022) // uint8_t number of configured CPUs -#define _COMM_PAGE_USER_PAGE_SHIFT_32 (_COMM_PAGE_START_ADDRESS+0x024) // VM page shift for 32-bit processes -#define _COMM_PAGE_USER_PAGE_SHIFT_64 (_COMM_PAGE_START_ADDRESS+0x025) // VM page shift for 64-bit processes -#define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_START_ADDRESS+0x026) // uint16_t cache line size -#define _COMM_PAGE_SCHED_GEN (_COMM_PAGE_START_ADDRESS+0x028) // uint32_t scheduler generation number (count of pre-emptions) -#define _COMM_PAGE_SPIN_COUNT (_COMM_PAGE_START_ADDRESS+0x02C) // uint32_t max spin count for mutex's -#define _COMM_PAGE_MEMORY_PRESSURE (_COMM_PAGE_START_ADDRESS+0x030) // uint32_t copy of vm_memory_pressure -#define _COMM_PAGE_ACTIVE_CPUS (_COMM_PAGE_START_ADDRESS+0x034) // uint8_t number of active CPUs (hw.activecpu) -#define _COMM_PAGE_PHYSICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x035) // uint8_t number of physical CPUs (hw.physicalcpu_max) -#define _COMM_PAGE_LOGICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x036) // uint8_t number of logical CPUs (hw.logicalcpu_max) -#define _COMM_PAGE_KERNEL_PAGE_SHIFT (_COMM_PAGE_START_ADDRESS+0x037) // uint8_t kernel vm page shift */ -#define _COMM_PAGE_MEMORY_SIZE (_COMM_PAGE_START_ADDRESS+0x038) // uint64_t max memory size */ -#define _COMM_PAGE_TIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x040) // used by gettimeofday(). Currently, sizeof(commpage_timeofday_data_t) = 40. A new struct is used on gettimeofday but space is reserved on the commpage for compatibility -#define _COMM_PAGE_CPUFAMILY (_COMM_PAGE_START_ADDRESS+0x080) // used by memcpy() resolver -#define _COMM_PAGE_DEV_FIRM (_COMM_PAGE_START_ADDRESS+0x084) // uint32_t handle on PE_i_can_has_debugger -#define _COMM_PAGE_TIMEBASE_OFFSET (_COMM_PAGE_START_ADDRESS+0x088) // uint64_t timebase offset for constructing mach_absolute_time() -#define _COMM_PAGE_USER_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x090) // uint8_t is userspace mach_absolute_time supported (can read the timebase) -#define _COMM_PAGE_CONT_HWCLOCK (_COMM_PAGE_START_ADDRESS+0x091) // uint8_t is always-on hardware clock present for mach_continuous_time() -#define _COMM_PAGE_UNUSED0 (_COMM_PAGE_START_ADDRESS+0x092) // 6 unused bytes -#define _COMM_PAGE_CONT_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x098) // uint64_t base for mach_continuous_time() -#define _COMM_PAGE_BOOTTIME_USEC (_COMM_PAGE_START_ADDRESS+0x0A0) // uint64_t boottime in microseconds +#define _COMM_PAGE_SIGNATURE (_COMM_PAGE_START_ADDRESS+0x000) // first few bytes are a signature +#define _COMM_PAGE_VERSION (_COMM_PAGE_START_ADDRESS+0x01E) // 16-bit version# +#define _COMM_PAGE_THIS_VERSION 3 // version of the commarea format + +#define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_START_ADDRESS+0x020) // uint32_t _cpu_capabilities +#define _COMM_PAGE_NCPUS (_COMM_PAGE_START_ADDRESS+0x022) // uint8_t number of configured CPUs +#define _COMM_PAGE_USER_PAGE_SHIFT_32 (_COMM_PAGE_START_ADDRESS+0x024) // VM page shift for 32-bit processes +#define _COMM_PAGE_USER_PAGE_SHIFT_64 (_COMM_PAGE_START_ADDRESS+0x025) // VM page shift for 64-bit processes +#define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_START_ADDRESS+0x026) // uint16_t cache line size +#define _COMM_PAGE_SCHED_GEN (_COMM_PAGE_START_ADDRESS+0x028) // uint32_t scheduler generation number (count of pre-emptions) +#define _COMM_PAGE_SPIN_COUNT (_COMM_PAGE_START_ADDRESS+0x02C) // uint32_t max spin count for mutex's +#define _COMM_PAGE_MEMORY_PRESSURE (_COMM_PAGE_START_ADDRESS+0x030) // uint32_t copy of vm_memory_pressure +#define _COMM_PAGE_ACTIVE_CPUS (_COMM_PAGE_START_ADDRESS+0x034) // uint8_t number of active CPUs (hw.activecpu) +#define _COMM_PAGE_PHYSICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x035) // uint8_t number of physical CPUs (hw.physicalcpu_max) +#define _COMM_PAGE_LOGICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x036) // uint8_t number of logical CPUs (hw.logicalcpu_max) +#define _COMM_PAGE_KERNEL_PAGE_SHIFT (_COMM_PAGE_START_ADDRESS+0x037) // uint8_t kernel vm page shift */ +#define _COMM_PAGE_MEMORY_SIZE (_COMM_PAGE_START_ADDRESS+0x038) // uint64_t max memory size */ +#define _COMM_PAGE_TIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x040) // used by gettimeofday(). Currently, sizeof(commpage_timeofday_data_t) = 40. A new struct is used on gettimeofday but space is reserved on the commpage for compatibility +#define _COMM_PAGE_CPUFAMILY (_COMM_PAGE_START_ADDRESS+0x080) // used by memcpy() resolver +#define _COMM_PAGE_DEV_FIRM (_COMM_PAGE_START_ADDRESS+0x084) // uint32_t handle on PE_i_can_has_debugger +#define _COMM_PAGE_TIMEBASE_OFFSET (_COMM_PAGE_START_ADDRESS+0x088) // uint64_t timebase offset for constructing mach_absolute_time() +#define _COMM_PAGE_USER_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x090) // uint8_t is userspace mach_absolute_time supported (can read the timebase) +#define _COMM_PAGE_CONT_HWCLOCK (_COMM_PAGE_START_ADDRESS+0x091) // uint8_t is always-on hardware clock present for mach_continuous_time() +#define _COMM_PAGE_UNUSED0 (_COMM_PAGE_START_ADDRESS+0x092) // 6 unused bytes +#define _COMM_PAGE_CONT_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x098) // uint64_t base for mach_continuous_time() +#define _COMM_PAGE_BOOTTIME_USEC (_COMM_PAGE_START_ADDRESS+0x0A0) // uint64_t boottime in microseconds // aligning to 64byte for cacheline size -#define _COMM_PAGE_APPROX_TIME (_COMM_PAGE_START_ADDRESS+0x0C0) // uint64_t last known mach_absolute_time() -#define _COMM_PAGE_APPROX_TIME_SUPPORTED (_COMM_PAGE_START_ADDRESS+0x0C8) // uint8_t is mach_approximate_time supported -#define _COMM_PAGE_UNUSED1 (_COMM_PAGE_START_ADDRESS+0x0C9) // 55 unused bytes, align next mutable value to a separate cache line +#define _COMM_PAGE_APPROX_TIME (_COMM_PAGE_START_ADDRESS+0x0C0) // uint64_t last known mach_absolute_time() +#define _COMM_PAGE_APPROX_TIME_SUPPORTED (_COMM_PAGE_START_ADDRESS+0x0C8) // uint8_t is mach_approximate_time supported +#define _COMM_PAGE_UNUSED1 (_COMM_PAGE_START_ADDRESS+0x0C9) // 55 unused bytes, align next mutable value to a separate cache line -#define _COMM_PAGE_KDEBUG_ENABLE (_COMM_PAGE_START_ADDRESS+0x100) // uint32_t export kdebug status bits to userspace -#define _COMM_PAGE_ATM_DIAGNOSTIC_CONFIG (_COMM_PAGE_START_ADDRESS+0x104) // uint32_t export "atm_diagnostic_config" to userspace -#define _COMM_PAGE_MULTIUSER_CONFIG (_COMM_PAGE_START_ADDRESS+0x108) // uint32_t export "multiuser_config" to userspace +#define _COMM_PAGE_KDEBUG_ENABLE (_COMM_PAGE_START_ADDRESS+0x100) // uint32_t export kdebug status bits to userspace +#define _COMM_PAGE_ATM_DIAGNOSTIC_CONFIG (_COMM_PAGE_START_ADDRESS+0x104) // uint32_t export "atm_diagnostic_config" to userspace +#define _COMM_PAGE_MULTIUSER_CONFIG (_COMM_PAGE_START_ADDRESS+0x108) // uint32_t export "multiuser_config" to userspace -#define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x120) // used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40. +#define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x120) // used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40. +#define _COMM_PAGE_REMOTETIME_PARAMS (_COMM_PAGE_START_ADDRESS+0x148) // used by mach_bridge_remote_time(). Currently, sizeof(struct bt_params) = 24 // aligning to 128 bytes for cacheline/fabric size #define _COMM_PAGE_CPU_QUIESCENT_COUNTER (_COMM_PAGE_START_ADDRESS+0x180) // uint64_t, but reserve the whole 128 (0x80) bytes -#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0x1000) // end of common page +#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0x1000) // end of common page #endif /* _ARM_CPU_CAPABILITIES_H */ #endif /* PRIVATE */ diff --git a/osfmk/arm/cpu_common.c b/osfmk/arm/cpu_common.c index d976ce5c1..85f1cf13b 100644 --- a/osfmk/arm/cpu_common.c +++ b/osfmk/arm/cpu_common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -59,79 +59,106 @@ void kperf_signal_handler(unsigned int cpu_number); #endif +cpu_data_t BootCpuData; +cpu_data_entry_t CpuDataEntries[MAX_CPUS]; + struct processor BootProcessor; -unsigned int real_ncpus = 1; -boolean_t idle_enable = FALSE; -uint64_t wake_abstime=0x0ULL; +unsigned int real_ncpus = 1; +boolean_t idle_enable = FALSE; +uint64_t wake_abstime = 0x0ULL; cpu_data_t * cpu_datap(int cpu) { assert(cpu < MAX_CPUS); - return (CpuDataEntries[cpu].cpu_data_vaddr); + return CpuDataEntries[cpu].cpu_data_vaddr; } kern_return_t cpu_control(int slot_num, - processor_info_t info, - unsigned int count) + processor_info_t info, + unsigned int count) { printf("cpu_control(%d,%p,%d) not implemented\n", - slot_num, info, count); - return (KERN_FAILURE); + slot_num, info, count); + return KERN_FAILURE; } kern_return_t cpu_info_count(processor_flavor_t flavor, - unsigned int *count) + unsigned int *count) { - switch (flavor) { case PROCESSOR_CPU_STAT: *count = PROCESSOR_CPU_STAT_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; + + case PROCESSOR_CPU_STAT64: + *count = PROCESSOR_CPU_STAT64_COUNT; + return KERN_SUCCESS; default: *count = 0; - return (KERN_FAILURE); + return KERN_FAILURE; } } kern_return_t -cpu_info(processor_flavor_t flavor, - int slot_num, - processor_info_t info, - unsigned int *count) +cpu_info(processor_flavor_t flavor, int slot_num, processor_info_t info, + unsigned int *count) { + cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr; + switch (flavor) { case PROCESSOR_CPU_STAT: - { - processor_cpu_stat_t cpu_stat; - cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr; - - if (*count < PROCESSOR_CPU_STAT_COUNT) - return (KERN_FAILURE); - - cpu_stat = (processor_cpu_stat_t) info; - cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt; - cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt; - cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt; - cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt; - cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt; - cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt; - cpu_stat->vfp_shortv_cnt = 0; - cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt; - cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt; - - *count = PROCESSOR_CPU_STAT_COUNT; - - return (KERN_SUCCESS); + { + if (*count < PROCESSOR_CPU_STAT_COUNT) { + return KERN_FAILURE; + } + + processor_cpu_stat_t cpu_stat = (processor_cpu_stat_t)info; + cpu_stat->irq_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.irq_ex_cnt; + cpu_stat->ipi_cnt = (uint32_t)cpu_data_ptr->cpu_stat.ipi_cnt; + cpu_stat->timer_cnt = (uint32_t)cpu_data_ptr->cpu_stat.timer_cnt; + cpu_stat->undef_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.undef_ex_cnt; + cpu_stat->unaligned_cnt = (uint32_t)cpu_data_ptr->cpu_stat.unaligned_cnt; + cpu_stat->vfp_cnt = (uint32_t)cpu_data_ptr->cpu_stat.vfp_cnt; + cpu_stat->vfp_shortv_cnt = 0; + cpu_stat->data_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.data_ex_cnt; + cpu_stat->instr_ex_cnt = (uint32_t)cpu_data_ptr->cpu_stat.instr_ex_cnt; + + *count = PROCESSOR_CPU_STAT_COUNT; + + return KERN_SUCCESS; + } + + case PROCESSOR_CPU_STAT64: + { + if (*count < PROCESSOR_CPU_STAT64_COUNT) { + return KERN_FAILURE; } + processor_cpu_stat64_t cpu_stat = (processor_cpu_stat64_t)info; + cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt; + cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt; + cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt; + cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt; + cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt; + cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt; + cpu_stat->vfp_shortv_cnt = 0; + cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt; + cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt; + cpu_stat->pmi_cnt = cpu_data_ptr->cpu_stat.pmi_cnt; + + *count = PROCESSOR_CPU_STAT64_COUNT; + + return KERN_SUCCESS; + } + default: - return (KERN_FAILURE); + return KERN_FAILURE; } } @@ -140,8 +167,8 @@ cpu_info(processor_flavor_t flavor, * Function: */ void -cpu_doshutdown(void (*doshutdown) (processor_t), - processor_t processor) +cpu_doshutdown(void (*doshutdown)(processor_t), + processor_t processor) { doshutdown(processor); } @@ -153,9 +180,9 @@ cpu_doshutdown(void (*doshutdown) (processor_t), void cpu_idle_tickle(void) { - boolean_t intr; - cpu_data_t *cpu_data_ptr; - uint64_t new_idle_timeout_ticks = 0x0ULL; + boolean_t intr; + cpu_data_t *cpu_data_ptr; + uint64_t new_idle_timeout_ticks = 0x0ULL; intr = ml_set_interrupts_enabled(FALSE); cpu_data_ptr = getCpuDatap(); @@ -177,12 +204,12 @@ cpu_idle_tickle(void) static void cpu_handle_xcall(cpu_data_t *cpu_data_ptr) { - broadcastFunc xfunc; - void *xparam; + broadcastFunc xfunc; + void *xparam; __c11_atomic_thread_fence(memory_order_acquire_smp); /* Come back around if cpu_signal_internal is running on another CPU and has just - * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/ + * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/ if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) { xfunc = cpu_data_ptr->cpu_xcall_p0; xparam = cpu_data_ptr->cpu_xcall_p1; @@ -192,21 +219,20 @@ cpu_handle_xcall(cpu_data_t *cpu_data_ptr) hw_atomic_and_noret(&cpu_data_ptr->cpu_signal, ~SIGPxcall); xfunc(xparam); } - } unsigned int cpu_broadcast_xcall(uint32_t *synch, - boolean_t self_xcall, - broadcastFunc func, - void *parm) + boolean_t self_xcall, + broadcastFunc func, + void *parm) { - boolean_t intr; - cpu_data_t *cpu_data_ptr; - cpu_data_t *target_cpu_datap; - unsigned int failsig; - int cpu; - int max_cpu; + boolean_t intr; + cpu_data_t *cpu_data_ptr; + cpu_data_t *target_cpu_datap; + unsigned int failsig; + int cpu; + int max_cpu; intr = ml_set_interrupts_enabled(FALSE); cpu_data_ptr = getCpuDatap(); @@ -219,13 +245,14 @@ cpu_broadcast_xcall(uint32_t *synch, } max_cpu = ml_get_max_cpu_number(); - for (cpu=0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= max_cpu; cpu++) { target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) + if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) { continue; + } - if(KERN_SUCCESS != cpu_signal(target_cpu_datap, SIGPxcall, (void *)func, parm)) { + if (KERN_SUCCESS != cpu_signal(target_cpu_datap, SIGPxcall, (void *)func, parm)) { failsig++; } } @@ -238,55 +265,60 @@ cpu_broadcast_xcall(uint32_t *synch, (void) ml_set_interrupts_enabled(intr); if (synch != NULL) { - if (hw_atomic_sub(synch, (!self_xcall)? failsig+1 : failsig) == 0) + if (hw_atomic_sub(synch, (!self_xcall)? failsig + 1 : failsig) == 0) { clear_wait(current_thread(), THREAD_AWAKENED); - else + } else { thread_block(THREAD_CONTINUE_NULL); + } } - if (!self_xcall) - return (real_ncpus - failsig - 1); - else - return (real_ncpus - failsig); + if (!self_xcall) { + return real_ncpus - failsig - 1; + } else { + return real_ncpus - failsig; + } } kern_return_t cpu_xcall(int cpu_number, broadcastFunc func, void *param) { - cpu_data_t *target_cpu_datap; + cpu_data_t *target_cpu_datap; - if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) + if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number())) { return KERN_INVALID_ARGUMENT; + } - target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr; - if (target_cpu_datap == NULL) + target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr; + if (target_cpu_datap == NULL) { return KERN_INVALID_ARGUMENT; + } return cpu_signal(target_cpu_datap, SIGPxcall, (void*)func, param); } static kern_return_t cpu_signal_internal(cpu_data_t *target_proc, - unsigned int signal, - void *p0, - void *p1, - boolean_t defer) + unsigned int signal, + void *p0, + void *p1, + boolean_t defer) { - unsigned int Check_SIGPdisabled; - int current_signals; - Boolean swap_success; - boolean_t interruptible = ml_set_interrupts_enabled(FALSE); - cpu_data_t *current_proc = getCpuDatap(); + unsigned int Check_SIGPdisabled; + int current_signals; + Boolean swap_success; + boolean_t interruptible = ml_set_interrupts_enabled(FALSE); + cpu_data_t *current_proc = getCpuDatap(); /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */ if (defer) { assert(signal == SIGPnop); } - if (current_proc != target_proc) + if (current_proc != target_proc) { Check_SIGPdisabled = SIGPdisabled; - else + } else { Check_SIGPdisabled = 0; + } if (signal == SIGPxcall) { do { @@ -301,14 +333,14 @@ cpu_signal_internal(cpu_data_t *target_proc, return KERN_FAILURE; } swap_success = OSCompareAndSwap(current_signals & (~SIGPxcall), current_signals | SIGPxcall, - &target_proc->cpu_signal); + &target_proc->cpu_signal); /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn * be trying to xcall us. Since we have interrupts disabled that can deadlock, * so break the deadlock by draining pending xcalls. */ - if (!swap_success && (current_proc->cpu_signal & SIGPxcall)) + if (!swap_success && (current_proc->cpu_signal & SIGPxcall)) { cpu_handle_xcall(current_proc); - + } } while (!swap_success); target_proc->cpu_xcall_p0 = p0; @@ -316,7 +348,7 @@ cpu_signal_internal(cpu_data_t *target_proc, } else { do { current_signals = target_proc->cpu_signal; - if ((Check_SIGPdisabled !=0 ) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) { + if ((Check_SIGPdisabled != 0) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) { #if DEBUG || DEVELOPMENT target_proc->failed_signal = signal; OSIncrementAtomicLong(&target_proc->failed_signal_count); @@ -326,7 +358,7 @@ cpu_signal_internal(cpu_data_t *target_proc, } swap_success = OSCompareAndSwap(current_signals, current_signals | signal, - &target_proc->cpu_signal); + &target_proc->cpu_signal); } while (!swap_success); } @@ -347,14 +379,14 @@ cpu_signal_internal(cpu_data_t *target_proc, } ml_set_interrupts_enabled(interruptible); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t cpu_signal(cpu_data_t *target_proc, - unsigned int signal, - void *p0, - void *p1) + unsigned int signal, + void *p0, + void *p1) { return cpu_signal_internal(target_proc, signal, p0, p1, FALSE); } @@ -384,7 +416,7 @@ void cpu_signal_handler_internal(boolean_t disable_signal) { cpu_data_t *cpu_data_ptr = getCpuDatap(); - unsigned int cpu_signal; + unsigned int cpu_signal; cpu_data_ptr->cpu_stat.ipi_cnt++; @@ -394,10 +426,11 @@ cpu_signal_handler_internal(boolean_t disable_signal) cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0); - if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) + if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE)) { (void)hw_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled); - else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) + } else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE)) { (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdisabled); + } while (cpu_signal & ~SIGPdisabled) { if (cpu_signal & SIGPdec) { @@ -421,7 +454,7 @@ cpu_signal_handler_internal(boolean_t disable_signal) (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdebug); DebuggerXCall(cpu_data_ptr->cpu_int_state); } -#if __ARM_SMP__ && defined(ARMA7) +#if __ARM_SMP__ && defined(ARMA7) if (cpu_signal & SIGPLWFlush) { (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWFlush); cache_xcall_handler(LWFlush); @@ -439,24 +472,33 @@ cpu_signal_handler_internal(boolean_t disable_signal) void cpu_exit_wait(int cpu) { - if ( cpu != master_cpu) { - cpu_data_t *cpu_data_ptr; + if (cpu != master_cpu) { + cpu_data_t *cpu_data_ptr; cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr; - while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {}; + while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) { + } + ; } } +boolean_t +cpu_can_exit(__unused int cpu) +{ + return TRUE; +} + void cpu_machine_init(void) { static boolean_t started = FALSE; - cpu_data_t *cpu_data_ptr; + cpu_data_t *cpu_data_ptr; cpu_data_ptr = getCpuDatap(); started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState); - if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) + if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { platform_cache_init(); + } PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started); cpu_data_ptr->cpu_flags |= StartedState; ml_init_interrupt(); @@ -467,12 +509,14 @@ cpu_processor_alloc(boolean_t is_boot_cpu) { processor_t proc; - if (is_boot_cpu) + if (is_boot_cpu) { return &BootProcessor; + } proc = kalloc(sizeof(*proc)); - if (!proc) + if (!proc) { return NULL; + } bzero((void *) proc, sizeof(*proc)); return proc; @@ -481,8 +525,9 @@ cpu_processor_alloc(boolean_t is_boot_cpu) void cpu_processor_free(processor_t proc) { - if (proc != NULL && proc != &BootProcessor) - kfree((void *) proc, sizeof(*proc)); + if (proc != NULL && proc != &BootProcessor) { + kfree(proc, sizeof(*proc)); + } } processor_t @@ -495,10 +540,11 @@ processor_t cpu_to_processor(int cpu) { cpu_data_t *cpu_data = cpu_datap(cpu); - if (cpu_data != NULL) + if (cpu_data != NULL) { return cpu_data->cpu_processor; - else + } else { return NULL; + } } cpu_data_t * @@ -518,13 +564,14 @@ processor_to_cpu_datap(processor_t processor) cpu_data_t * cpu_data_alloc(boolean_t is_boot_cpu) { - cpu_data_t *cpu_data_ptr = NULL; + cpu_data_t *cpu_data_ptr = NULL; - if (is_boot_cpu) + if (is_boot_cpu) { cpu_data_ptr = &BootCpuData; - else { - if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) + } else { + if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) { goto cpu_data_alloc_error; + } bzero((void *)cpu_data_ptr, sizeof(cpu_data_t)); @@ -532,8 +579,9 @@ cpu_data_alloc(boolean_t is_boot_cpu) } cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu); - if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) + if (cpu_data_ptr->cpu_processor == (struct processor *)NULL) { goto cpu_data_alloc_error; + } return cpu_data_ptr; @@ -545,49 +593,49 @@ cpu_data_alloc_error: ast_t * ast_pending(void) { - return (&getCpuDatap()->cpu_pending_ast); + return &getCpuDatap()->cpu_pending_ast; } cpu_type_t slot_type(int slot_num) { - return (cpu_datap(slot_num)->cpu_type); + return cpu_datap(slot_num)->cpu_type; } cpu_subtype_t slot_subtype(int slot_num) { - return (cpu_datap(slot_num)->cpu_subtype); + return cpu_datap(slot_num)->cpu_subtype; } cpu_threadtype_t slot_threadtype(int slot_num) { - return (cpu_datap(slot_num)->cpu_threadtype); + return cpu_datap(slot_num)->cpu_threadtype; } cpu_type_t cpu_type(void) { - return (getCpuDatap()->cpu_type); + return getCpuDatap()->cpu_type; } cpu_subtype_t cpu_subtype(void) { - return (getCpuDatap()->cpu_subtype); + return getCpuDatap()->cpu_subtype; } cpu_threadtype_t cpu_threadtype(void) { - return (getCpuDatap()->cpu_threadtype); + return getCpuDatap()->cpu_threadtype; } int cpu_number(void) { - return (getCpuDatap()->cpu_number); + return getCpuDatap()->cpu_number; } uint64_t @@ -595,4 +643,3 @@ ml_get_wake_timebase(void) { return wake_abstime; } - diff --git a/osfmk/arm/cpu_data.h b/osfmk/arm/cpu_data.h index 3b8c88854..b99f054e1 100644 --- a/osfmk/arm/cpu_data.h +++ b/osfmk/arm/cpu_data.h @@ -27,10 +27,10 @@ */ /* * @OSF_COPYRIGHT@ - * + * */ -#ifndef ARM_CPU_DATA +#ifndef ARM_CPU_DATA #define ARM_CPU_DATA #ifdef MACH_KERNEL_PRIVATE @@ -46,14 +46,15 @@ #include #include -#define current_thread() current_thread_fast() +#define current_thread() current_thread_fast() -static inline __pure2 thread_t current_thread_fast(void) +static inline __pure2 thread_t +current_thread_fast(void) { #if defined(__arm64__) return (thread_t)(__builtin_arm_rsr64("TPIDR_EL1")); #else - return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW + return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW #endif } @@ -72,31 +73,33 @@ static inline __pure2 thread_t current_thread_fast(void) * the window between the thread pointer update and the branch to * the new pc. */ -static inline thread_t current_thread_volatile(void) +static inline thread_t +current_thread_volatile(void) { /* The compiler treats rsr64 as const, which can allow - it to eliminate redundant calls, which we don't want here. - Thus we use volatile asm. The mrc used for arm32 should be - treated as volatile however. */ + * it to eliminate redundant calls, which we don't want here. + * Thus we use volatile asm. The mrc used for arm32 should be + * treated as volatile however. */ #if defined(__arm64__) thread_t result; - __asm__ volatile("mrs %0, TPIDR_EL1" : "=r" (result)); + __asm__ volatile ("mrs %0, TPIDR_EL1" : "=r" (result)); return result; #else - return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW + return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW #endif } #if defined(__arm64__) -static inline vm_offset_t exception_stack_pointer(void) +static inline vm_offset_t +exception_stack_pointer(void) { vm_offset_t result = 0; - __asm__ volatile( - "msr SPSel, #1 \n" - "mov %0, sp \n" - "msr SPSel, #0 \n" - : "=r" (result)); + __asm__ volatile ( + "msr SPSel, #1 \n" + "mov %0, sp \n" + "msr SPSel, #0 \n" + : "=r" (result)); return result; } @@ -104,16 +107,16 @@ static inline vm_offset_t exception_stack_pointer(void) #endif /* defined(__arm64__) */ #define getCpuDatap() current_thread()->machine.CpuDatap -#define current_cpu_datap() getCpuDatap() +#define current_cpu_datap() getCpuDatap() -extern int get_preemption_level(void); -extern void _enable_preemption_no_check(void); +extern int get_preemption_level(void); +extern void _enable_preemption_no_check(void); -#define enable_preemption_no_check() _enable_preemption_no_check() -#define mp_disable_preemption() _disable_preemption() -#define mp_enable_preemption() _enable_preemption() -#define mp_enable_preemption_no_check() _enable_preemption_no_check() +#define enable_preemption_no_check() _enable_preemption_no_check() +#define mp_disable_preemption() _disable_preemption() +#define mp_enable_preemption() _enable_preemption() +#define mp_enable_preemption_no_check() _enable_preemption_no_check() #endif /* MACH_KERNEL_PRIVATE */ -#endif /* ARM_CPU_DATA */ +#endif /* ARM_CPU_DATA */ diff --git a/osfmk/arm/cpu_data_internal.h b/osfmk/arm/cpu_data_internal.h index 29acbc1e8..ac6569f7c 100644 --- a/osfmk/arm/cpu_data_internal.h +++ b/osfmk/arm/cpu_data_internal.h @@ -27,10 +27,10 @@ */ /* * @OSF_COPYRIGHT@ - * + * */ -#ifndef ARM_CPU_DATA_INTERNAL +#ifndef ARM_CPU_DATA_INTERNAL #define ARM_CPU_DATA_INTERNAL #include @@ -47,43 +47,43 @@ #include #endif /* MONOTONIC */ -#define NSEC_PER_HZ (NSEC_PER_SEC / 100) +#define NSEC_PER_HZ (NSEC_PER_SEC / 100) typedef struct reset_handler_data { - vm_offset_t assist_reset_handler; /* Assist handler phys address */ - vm_offset_t cpu_data_entries; /* CpuDataEntries phys address */ + vm_offset_t assist_reset_handler; /* Assist handler phys address */ + vm_offset_t cpu_data_entries; /* CpuDataEntries phys address */ #if !__arm64__ - vm_offset_t boot_args; /* BootArgs phys address */ + vm_offset_t boot_args; /* BootArgs phys address */ #endif } reset_handler_data_t; -extern reset_handler_data_t ResetHandlerData; +extern reset_handler_data_t ResetHandlerData; #if __ARM_SMP__ #ifdef CPU_COUNT -#define MAX_CPUS CPU_COUNT +#define MAX_CPUS CPU_COUNT #else -#define MAX_CPUS 2 +#define MAX_CPUS 2 #endif #else -#define MAX_CPUS 1 +#define MAX_CPUS 1 #endif -#define CPUWINDOWS_MAX 4 -#ifdef __arm__ -#define CPUWINDOWS_BASE 0xFFF00000UL +#define CPUWINDOWS_MAX 4 +#ifdef __arm__ +#define CPUWINDOWS_BASE 0xFFF00000UL #else -#define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFF00000UL -#define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) +#define CPUWINDOWS_BASE_MASK 0xFFFFFFFFFFF00000UL +#define CPUWINDOWS_BASE (VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) #endif -#define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * PAGE_SIZE)) +#define CPUWINDOWS_TOP (CPUWINDOWS_BASE + (MAX_CPUS * CPUWINDOWS_MAX * PAGE_SIZE)) typedef struct cpu_data_entry { - void *cpu_data_paddr; /* Cpu data physical address */ - struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ + void *cpu_data_paddr; /* Cpu data physical address */ + struct cpu_data *cpu_data_vaddr; /* Cpu data virtual address */ #if __arm__ - uint32_t cpu_data_offset_8; - uint32_t cpu_data_offset_12; + uint32_t cpu_data_offset_8; + uint32_t cpu_data_offset_12; #elif __arm64__ #else #error Check cpu_data_entry padding for this architecture @@ -92,229 +92,232 @@ typedef struct cpu_data_entry { typedef struct rtclock_timer { - mpqueue_head_t queue; - uint64_t deadline; - uint32_t is_set:1, - has_expired:1, - :0; + mpqueue_head_t queue; + uint64_t deadline; + uint32_t is_set:1, + has_expired:1, + :0; } rtclock_timer_t; -typedef struct { - uint32_t irq_ex_cnt; - uint32_t irq_ex_cnt_wake; - uint32_t ipi_cnt; - uint32_t ipi_cnt_wake; - uint32_t timer_cnt; - uint32_t timer_cnt_wake; - uint32_t undef_ex_cnt; - uint32_t unaligned_cnt; - uint32_t vfp_cnt; - uint32_t data_ex_cnt; - uint32_t instr_ex_cnt; +typedef struct { + /* + * The wake variants of these counters are reset to 0 when the CPU wakes. + */ + uint64_t irq_ex_cnt; + uint64_t irq_ex_cnt_wake; + uint64_t ipi_cnt; + uint64_t ipi_cnt_wake; + uint64_t timer_cnt; + uint64_t timer_cnt_wake; + uint64_t pmi_cnt; + uint64_t pmi_cnt_wake; + uint64_t undef_ex_cnt; + uint64_t unaligned_cnt; + uint64_t vfp_cnt; + uint64_t data_ex_cnt; + uint64_t instr_ex_cnt; } cpu_stat_t; -typedef struct cpu_data -{ - unsigned short cpu_number; - unsigned short cpu_flags; - vm_offset_t istackptr; - vm_offset_t intstack_top; +typedef struct cpu_data { + unsigned short cpu_number; + unsigned short cpu_flags; + vm_offset_t istackptr; + vm_offset_t intstack_top; #if __arm64__ - vm_offset_t excepstackptr; - vm_offset_t excepstack_top; - boolean_t cluster_master; + vm_offset_t excepstackptr; + vm_offset_t excepstack_top; + boolean_t cluster_master; #else - vm_offset_t fiqstackptr; - vm_offset_t fiqstack_top; + vm_offset_t fiqstackptr; + vm_offset_t fiqstack_top; #endif - boolean_t interrupts_enabled; - thread_t cpu_active_thread; - vm_offset_t cpu_active_stack; - unsigned int cpu_ident; - cpu_id_t cpu_id; - unsigned volatile int cpu_signal; + boolean_t interrupts_enabled; + thread_t cpu_active_thread; + vm_offset_t cpu_active_stack; + unsigned int cpu_ident; + cpu_id_t cpu_id; + unsigned volatile int cpu_signal; #if DEBUG || DEVELOPMENT - void *failed_xcall; - unsigned int failed_signal; - volatile long failed_signal_count; + void *failed_xcall; + unsigned int failed_signal; + volatile long failed_signal_count; #endif - void *cpu_cache_dispatch; - ast_t cpu_pending_ast; - struct processor *cpu_processor; - int cpu_type; - int cpu_subtype; - int cpu_threadtype; - int cpu_running; + void *cpu_cache_dispatch; + ast_t cpu_pending_ast; + struct processor *cpu_processor; + int cpu_type; + int cpu_subtype; + int cpu_threadtype; + int cpu_running; #ifdef __LP64__ - uint64_t cpu_base_timebase; - uint64_t cpu_timebase; + uint64_t cpu_base_timebase; + uint64_t cpu_timebase; #else union { - struct { - uint32_t low; - uint32_t high; - } split; - struct { - uint64_t val; - } raw; - } cbtb; -#define cpu_base_timebase_low cbtb.split.low -#define cpu_base_timebase_high cbtb.split.high + struct { + uint32_t low; + uint32_t high; + } split; + struct { + uint64_t val; + } raw; + } cbtb; +#define cpu_base_timebase_low cbtb.split.low +#define cpu_base_timebase_high cbtb.split.high union { - struct { - uint32_t low; - uint32_t high; - } split; - struct { - uint64_t val; - } raw; - } ctb; -#define cpu_timebase_low ctb.split.low -#define cpu_timebase_high ctb.split.high + struct { + uint32_t low; + uint32_t high; + } split; + struct { + uint64_t val; + } raw; + } ctb; +#define cpu_timebase_low ctb.split.low +#define cpu_timebase_high ctb.split.high #endif - uint32_t cpu_decrementer; - void *cpu_get_decrementer_func; - void *cpu_set_decrementer_func; - void *cpu_get_fiq_handler; + uint32_t cpu_decrementer; + void *cpu_get_decrementer_func; + void *cpu_set_decrementer_func; + void *cpu_get_fiq_handler; - void *cpu_tbd_hardware_addr; - void *cpu_tbd_hardware_val; + void *cpu_tbd_hardware_addr; + void *cpu_tbd_hardware_val; - void *cpu_console_buf; + void *cpu_console_buf; - void *cpu_idle_notify; - uint64_t cpu_idle_latency; - uint64_t cpu_idle_pop; + void *cpu_idle_notify; + uint64_t cpu_idle_latency; + uint64_t cpu_idle_pop; -#if __arm__ || __ARM_KERNEL_PROTECT__ - vm_offset_t cpu_exc_vectors; +#if __arm__ || __ARM_KERNEL_PROTECT__ + vm_offset_t cpu_exc_vectors; #endif /* __ARM_KERNEL_PROTECT__ */ - vm_offset_t cpu_reset_handler; - uint32_t cpu_reset_type; - uintptr_t cpu_reset_assist; - - void *cpu_int_state; - IOInterruptHandler interrupt_handler; - void *interrupt_nub; - unsigned int interrupt_source; - void *interrupt_target; - void *interrupt_refCon; - - void *idle_timer_notify; - void *idle_timer_refcon; - uint64_t idle_timer_deadline; - - uint64_t quantum_timer_deadline; - uint64_t rtcPop; - rtclock_timer_t rtclock_timer; - struct _rtclock_data_ *rtclock_datap; - - arm_debug_state_t *cpu_user_debug; /* Current debug state */ - vm_offset_t cpu_debug_interface_map; - - volatile int debugger_active; - - void *cpu_xcall_p0; - void *cpu_xcall_p1; - -#if __ARM_SMP__ && defined(ARMA7) - volatile uint32_t cpu_CLW_active; - volatile uint64_t cpu_CLWFlush_req; - volatile uint64_t cpu_CLWFlush_last; - volatile uint64_t cpu_CLWClean_req; - volatile uint64_t cpu_CLWClean_last; + vm_offset_t cpu_reset_handler; + uint32_t cpu_reset_type; + uintptr_t cpu_reset_assist; + + void *cpu_int_state; + IOInterruptHandler interrupt_handler; + void *interrupt_nub; + unsigned int interrupt_source; + void *interrupt_target; + void *interrupt_refCon; + + void *idle_timer_notify; + void *idle_timer_refcon; + uint64_t idle_timer_deadline; + + uint64_t quantum_timer_deadline; + uint64_t rtcPop; + rtclock_timer_t rtclock_timer; + struct _rtclock_data_ *rtclock_datap; + + arm_debug_state_t *cpu_user_debug; /* Current debug state */ + vm_offset_t cpu_debug_interface_map; + + volatile int debugger_active; + + void *cpu_xcall_p0; + void *cpu_xcall_p1; + +#if __ARM_SMP__ && defined(ARMA7) + volatile uint32_t cpu_CLW_active; + volatile uint64_t cpu_CLWFlush_req; + volatile uint64_t cpu_CLWFlush_last; + volatile uint64_t cpu_CLWClean_req; + volatile uint64_t cpu_CLWClean_last; #endif -#if __arm64__ - vm_offset_t coresight_base[CORESIGHT_REGIONS]; +#if __arm64__ + vm_offset_t coresight_base[CORESIGHT_REGIONS]; #endif /* CCC ARMv8 registers */ - uint64_t cpu_regmap_paddr; + uint64_t cpu_regmap_paddr; - uint32_t cpu_phys_id; - uint32_t cpu_l2_access_penalty; - void *platform_error_handler; + uint32_t cpu_phys_id; + uint32_t cpu_l2_access_penalty; + void *platform_error_handler; - int cpu_mcount_off; + int cpu_mcount_off; - #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL - volatile unsigned int cpu_sleep_token; - unsigned int cpu_sleep_token_last; + #define ARM_CPU_ON_SLEEP_PATH 0x50535553UL + volatile unsigned int cpu_sleep_token; + unsigned int cpu_sleep_token_last; - cpu_stat_t cpu_stat; + cpu_stat_t cpu_stat; - volatile int PAB_active; /* Tells the console if we are dumping backtraces */ + volatile int PAB_active; /* Tells the console if we are dumping backtraces */ #if KPC - /* double-buffered performance counter data */ - uint64_t *cpu_kpc_buf[2]; + /* double-buffered performance counter data */ + uint64_t *cpu_kpc_buf[2]; /* PMC shadow and reload value buffers */ - uint64_t *cpu_kpc_shadow; - uint64_t *cpu_kpc_reload; + uint64_t *cpu_kpc_shadow; + uint64_t *cpu_kpc_reload; #endif #if MONOTONIC - struct mt_cpu cpu_monotonic; + struct mt_cpu cpu_monotonic; #endif /* MONOTONIC */ - cluster_type_t cpu_cluster_type; - uint32_t cpu_cluster_id; - uint32_t cpu_l2_id; - uint32_t cpu_l2_size; - uint32_t cpu_l3_id; - uint32_t cpu_l3_size; - - struct pmap_cpu_data cpu_pmap_cpu_data; - dbgwrap_thread_state_t halt_state; + cluster_type_t cpu_cluster_type; + uint32_t cpu_cluster_id; + uint32_t cpu_l2_id; + uint32_t cpu_l2_size; + uint32_t cpu_l3_id; + uint32_t cpu_l3_size; + + struct pmap_cpu_data cpu_pmap_cpu_data; + dbgwrap_thread_state_t halt_state; enum { CPU_NOT_HALTED = 0, CPU_HALTED, CPU_HALTED_WITH_STATE - } halt_status; + } halt_status; } cpu_data_t; /* * cpu_flags */ -#define SleepState 0x0800 -#define StartedState 0x1000 +#define SleepState 0x0800 +#define StartedState 0x1000 -extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; -extern cpu_data_t BootCpuData; -extern boot_args *BootArgs; +extern cpu_data_entry_t CpuDataEntries[MAX_CPUS]; +extern cpu_data_t BootCpuData; +extern boot_args *BootArgs; #if __arm__ -extern unsigned int *ExceptionLowVectorsBase; -extern unsigned int *ExceptionVectorsTable; +extern unsigned int *ExceptionLowVectorsBase; +extern unsigned int *ExceptionVectorsTable; #elif __arm64__ -extern unsigned int LowResetVectorBase; -extern unsigned int LowResetVectorEnd; +extern unsigned int LowResetVectorBase; +extern unsigned int LowResetVectorEnd; #if WITH_CLASSIC_S2R -extern uint8_t SleepToken[8]; +extern uint8_t SleepToken[8]; #endif -extern unsigned int LowExceptionVectorBase; +extern unsigned int LowExceptionVectorBase; #else #error Unknown arch #endif -extern cpu_data_t *cpu_datap(int cpu); -extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); -extern void cpu_stack_alloc(cpu_data_t*); -extern void cpu_data_init(cpu_data_t *cpu_data_ptr); -extern void cpu_data_free(cpu_data_t *cpu_data_ptr); -extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); -extern cpu_data_t *processor_to_cpu_datap( processor_t processor); +extern cpu_data_t *cpu_datap(int cpu); +extern cpu_data_t *cpu_data_alloc(boolean_t is_boot); +extern void cpu_stack_alloc(cpu_data_t*); +extern void cpu_data_init(cpu_data_t *cpu_data_ptr); +extern void cpu_data_free(cpu_data_t *cpu_data_ptr); +extern kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr); +extern cpu_data_t *processor_to_cpu_datap( processor_t processor); #if __arm64__ -typedef struct sysreg_restore -{ - uint64_t tcr_el1; +typedef struct sysreg_restore { + uint64_t tcr_el1; } sysreg_restore_t; extern sysreg_restore_t sysreg_restore; #endif /* __arm64__ */ -#endif /* ARM_CPU_DATA_INTERNAL */ +#endif /* ARM_CPU_DATA_INTERNAL */ diff --git a/osfmk/arm/cpu_internal.h b/osfmk/arm/cpu_internal.h index 34b4bce72..f40941de5 100644 --- a/osfmk/arm/cpu_internal.h +++ b/osfmk/arm/cpu_internal.h @@ -28,50 +28,50 @@ /* * @OSF_COPYRIGHT@ */ -#ifndef _ARM_CPU_INTERNAL_H_ -#define _ARM_CPU_INTERNAL_H_ +#ifndef _ARM_CPU_INTERNAL_H_ +#define _ARM_CPU_INTERNAL_H_ #include #include -extern void cpu_bootstrap( - void); +extern void cpu_bootstrap( + void); -extern void cpu_init( - void); +extern void cpu_init( + void); -extern void cpu_timebase_init(boolean_t from_boot); +extern void cpu_timebase_init(boolean_t from_boot); -extern kern_return_t cpu_signal( - cpu_data_t *target, - unsigned int signal, - void *p0, - void *p1); +extern kern_return_t cpu_signal( + cpu_data_t *target, + unsigned int signal, + void *p0, + void *p1); -extern kern_return_t cpu_signal_deferred( - cpu_data_t *target); +extern kern_return_t cpu_signal_deferred( + cpu_data_t *target); -extern void cpu_signal_cancel( - cpu_data_t *target); +extern void cpu_signal_cancel( + cpu_data_t *target); -#define SIGPnop 0x00000000U /* Send IPI with no service */ -#define SIGPdec 0x00000001U /* Request decremeter service */ +#define SIGPnop 0x00000000U /* Send IPI with no service */ +#define SIGPdec 0x00000001U /* Request decremeter service */ /* 0x2U unused */ -#define SIGPxcall 0x00000004U /* Call a function on a processor */ -#define SIGPast 0x00000008U /* Request AST check */ -#define SIGPdebug 0x00000010U /* Request Debug call */ -#define SIGPLWFlush 0x00000020UL /* Request LWFlush call */ -#define SIGPLWClean 0x00000040UL /* Request LWClean call */ -#define SIGPkptimer 0x00000100U /* Request kperf timer */ +#define SIGPxcall 0x00000004U /* Call a function on a processor */ +#define SIGPast 0x00000008U /* Request AST check */ +#define SIGPdebug 0x00000010U /* Request Debug call */ +#define SIGPLWFlush 0x00000020UL /* Request LWFlush call */ +#define SIGPLWClean 0x00000040UL /* Request LWClean call */ +#define SIGPkptimer 0x00000100U /* Request kperf timer */ -#define SIGPdisabled 0x80000000U /* Signal disabled */ +#define SIGPdisabled 0x80000000U /* Signal disabled */ extern unsigned int real_ncpus; -#if defined(CONFIG_XNUPOST) && __arm64__ +#if defined(CONFIG_XNUPOST) && __arm64__ extern void arm64_ipi_test(void); #endif /* defined(CONFIG_XNUPOST) && __arm64__ */ -#endif /* _ARM_CPU_INTERNAL_H_ */ +#endif /* _ARM_CPU_INTERNAL_H_ */ diff --git a/osfmk/arm/cpu_number.h b/osfmk/arm/cpu_number.h index d781fa067..6e2b59cb8 100644 --- a/osfmk/arm/cpu_number.h +++ b/osfmk/arm/cpu_number.h @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,20 +61,20 @@ * Machine-dependent definitions for cpu identification. * */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _ARM_CPU_NUMBER_H_ -#define _ARM_CPU_NUMBER_H_ +#ifndef _ARM_CPU_NUMBER_H_ +#define _ARM_CPU_NUMBER_H_ #include __BEGIN_DECLS -extern int cpu_number(void); -extern int cpu_cluster_id(void); +extern int cpu_number(void); +extern int cpu_cluster_id(void); __END_DECLS -#endif /* _ARM_CPU_NUMBER_H_ */ +#endif /* _ARM_CPU_NUMBER_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/arm/cpuid.c b/osfmk/arm/cpuid.c index 22435e76b..147bfaa1d 100644 --- a/osfmk/arm/cpuid.c +++ b/osfmk/arm/cpuid.c @@ -43,13 +43,13 @@ typedef struct { uint32_t - Ctype1:3, /* 2:0 */ - Ctype2:3, /* 5:3 */ - Ctype3:3, /* 8:6 */ - Ctypes:15, /* 6:23 - Don't Care */ - LoC:3, /* 26-24 - Level of Coherency */ - LoU:3, /* 29:27 - Level of Unification */ - RAZ:2; /* 31:30 - Read-As-Zero */ + Ctype1:3, /* 2:0 */ + Ctype2:3, /* 5:3 */ + Ctype3:3, /* 8:6 */ + Ctypes:15, /* 6:23 - Don't Care */ + LoC:3, /* 26-24 - Level of Coherency */ + LoU:3, /* 29:27 - Level of Unification */ + RAZ:2; /* 31:30 - Read-As-Zero */ } arm_cache_clidr_t; typedef union { @@ -61,10 +61,10 @@ typedef union { typedef struct { uint32_t - LineSize:3, /* 2:0 - Number of words in cache line */ - Assoc:10, /* 12:3 - Associativity of cache */ - NumSets:15, /* 27:13 - Number of sets in cache */ - c_type:4; /* 31:28 - Cache type */ + LineSize:3, /* 2:0 - Number of words in cache line */ + Assoc:10, /* 12:3 - Associativity of cache */ + NumSets:15, /* 27:13 - Number of sets in cache */ + c_type:4; /* 31:28 - Cache type */ } arm_cache_ccsidr_t; @@ -85,11 +85,11 @@ void do_cpuid(void) { cpuid_cpu_info.value = machine_read_midr(); -#if (__ARM_ARCH__ == 8) +#if (__ARM_ARCH__ == 8) cpuid_cpu_info.arm_info.arm_arch = CPU_ARCH_ARMv8; -#elif (__ARM_ARCH__ == 7) +#elif (__ARM_ARCH__ == 7) #ifdef __ARM_SUB_ARCH__ cpuid_cpu_info.arm_info.arm_arch = __ARM_SUB_ARCH__; #else @@ -209,7 +209,8 @@ do_mvfpid(void) } arm_mvfp_info_t -*arm_mvfp_info(void) +* +arm_mvfp_info(void) { return machine_arm_mvfp_info(); } @@ -247,7 +248,7 @@ do_cacheid(void) cpuid_cache_info.c_type = CACHE_UNKNOWN; } - cpuid_cache_info.c_linesz = 4 * (1<<(arm_cache_ccsidr_info.bits.LineSize + 2)); + cpuid_cache_info.c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2)); cpuid_cache_info.c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1); /* I cache size */ @@ -259,7 +260,6 @@ do_cacheid(void) if ((arm_cache_clidr_info.bits.Ctype3 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x4) || (arm_cache_clidr_info.bits.Ctype2 == 0x2)) { - if (arm_cache_clidr_info.bits.Ctype3 == 0x4) { /* Select L3 (LLC) if the SoC is new enough to have that. * This will be the second-level cache for the highest-performing ACC. */ @@ -270,7 +270,7 @@ do_cacheid(void) } arm_cache_ccsidr_info.value = machine_read_ccsidr(); - cpuid_cache_info.c_linesz = 4 * (1<<(arm_cache_ccsidr_info.bits.LineSize + 2)); + cpuid_cache_info.c_linesz = 4 * (1 << (arm_cache_ccsidr_info.bits.LineSize + 2)); cpuid_cache_info.c_assoc = (arm_cache_ccsidr_info.bits.Assoc + 1); cpuid_cache_info.c_l2size = (arm_cache_ccsidr_info.bits.NumSets + 1) * cpuid_cache_info.c_linesz * cpuid_cache_info.c_assoc; cpuid_cache_info.c_inner_cache_size = cpuid_cache_info.c_dsize; @@ -300,15 +300,15 @@ do_cacheid(void) } kprintf("%s() - %u bytes %s cache (I:%u D:%u (%s)), %u-way assoc, %u bytes/line\n", - __FUNCTION__, - cpuid_cache_info.c_dsize + cpuid_cache_info.c_isize, - ((cpuid_cache_info.c_type == CACHE_WRITE_BACK) ? "WB" : - (cpuid_cache_info.c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")), - cpuid_cache_info.c_isize, - cpuid_cache_info.c_dsize, - (cpuid_cache_info.c_unified) ? "unified" : "separate", - cpuid_cache_info.c_assoc, - cpuid_cache_info.c_linesz); + __FUNCTION__, + cpuid_cache_info.c_dsize + cpuid_cache_info.c_isize, + ((cpuid_cache_info.c_type == CACHE_WRITE_BACK) ? "WB" : + (cpuid_cache_info.c_type == CACHE_WRITE_THROUGH ? "WT" : "Unknown")), + cpuid_cache_info.c_isize, + cpuid_cache_info.c_dsize, + (cpuid_cache_info.c_unified) ? "unified" : "separate", + cpuid_cache_info.c_assoc, + cpuid_cache_info.c_linesz); } cache_info_t * diff --git a/osfmk/arm/cpuid.h b/osfmk/arm/cpuid.h index bf642b6c5..bc6468a96 100644 --- a/osfmk/arm/cpuid.h +++ b/osfmk/arm/cpuid.h @@ -41,46 +41,46 @@ #include typedef struct { -uint32_t arm_rev : 4, /* 00:03 revision number */ - arm_part : 12, /* 04:15 primary part number */ - arm_arch : 4, /* 16:19 architecture */ - arm_variant : 4, /* 20:23 variant */ - arm_implementor : 8; /* 24:31 implementor (0x41) */ + uint32_t arm_rev : 4,/* 00:03 revision number */ + arm_part : 12, /* 04:15 primary part number */ + arm_arch : 4, /* 16:19 architecture */ + arm_variant : 4, /* 20:23 variant */ + arm_implementor : 8; /* 24:31 implementor (0x41) */ } arm_cpuid_bits_t; typedef union { - arm_cpuid_bits_t arm_info; /* ARM9xx, ARM11xx, and later processors */ - uint32_t value; + arm_cpuid_bits_t arm_info; /* ARM9xx, ARM11xx, and later processors */ + uint32_t value; } arm_cpu_info_t; /* Implementor codes */ -#define CPU_VID_ARM 0x41 // ARM Limited -#define CPU_VID_DEC 0x44 // Digital Equipment Corporation -#define CPU_VID_MOTOROLA 0x4D // Motorola - Freescale Semiconductor Inc. -#define CPU_VID_MARVELL 0x56 // Marvell Semiconductor Inc. -#define CPU_VID_INTEL 0x69 // Intel ARM parts. -#define CPU_VID_APPLE 0x61 // Apple Inc. +#define CPU_VID_ARM 0x41 // ARM Limited +#define CPU_VID_DEC 0x44 // Digital Equipment Corporation +#define CPU_VID_MOTOROLA 0x4D // Motorola - Freescale Semiconductor Inc. +#define CPU_VID_MARVELL 0x56 // Marvell Semiconductor Inc. +#define CPU_VID_INTEL 0x69 // Intel ARM parts. +#define CPU_VID_APPLE 0x61 // Apple Inc. /* ARM Architecture Codes */ -#define CPU_ARCH_ARMv4 0x1 /* ARMv4 */ -#define CPU_ARCH_ARMv4T 0x2 /* ARMv4 + Thumb */ -#define CPU_ARCH_ARMv5 0x3 /* ARMv5 */ -#define CPU_ARCH_ARMv5T 0x4 /* ARMv5 + Thumb */ -#define CPU_ARCH_ARMv5TE 0x5 /* ARMv5 + Thumb + Extensions(?) */ -#define CPU_ARCH_ARMv5TEJ 0x6 /* ARMv5 + Thumb + Extensions(?) + //Jazelle(?) XXX */ -#define CPU_ARCH_ARMv6 0x7 /* ARMv6 */ -#define CPU_ARCH_ARMv7 0x8 /* ARMv7 */ -#define CPU_ARCH_ARMv7f 0x9 /* ARMv7 for Cortex A9 */ -#define CPU_ARCH_ARMv7s 0xa /* ARMv7 for Swift */ -#define CPU_ARCH_ARMv7k 0xb /* ARMv7 for Cortex A7 */ +#define CPU_ARCH_ARMv4 0x1 /* ARMv4 */ +#define CPU_ARCH_ARMv4T 0x2 /* ARMv4 + Thumb */ +#define CPU_ARCH_ARMv5 0x3 /* ARMv5 */ +#define CPU_ARCH_ARMv5T 0x4 /* ARMv5 + Thumb */ +#define CPU_ARCH_ARMv5TE 0x5 /* ARMv5 + Thumb + Extensions(?) */ +#define CPU_ARCH_ARMv5TEJ 0x6 /* ARMv5 + Thumb + Extensions(?) + //Jazelle(?) XXX */ +#define CPU_ARCH_ARMv6 0x7 /* ARMv6 */ +#define CPU_ARCH_ARMv7 0x8 /* ARMv7 */ +#define CPU_ARCH_ARMv7f 0x9 /* ARMv7 for Cortex A9 */ +#define CPU_ARCH_ARMv7s 0xa /* ARMv7 for Swift */ +#define CPU_ARCH_ARMv7k 0xb /* ARMv7 for Cortex A7 */ -#define CPU_ARCH_ARMv8 0xc /* Subtype for CPU_TYPE_ARM64 */ +#define CPU_ARCH_ARMv8 0xc /* Subtype for CPU_TYPE_ARM64 */ /* special code indicating we need to look somewhere else for the architecture version */ -#define CPU_ARCH_EXTENDED 0xF +#define CPU_ARCH_EXTENDED 0xF /* ARM Part Numbers */ /* @@ -89,54 +89,54 @@ typedef union { */ /* ARM9 (ARMv4T architecture) */ -#define CPU_PART_920T 0x920 -#define CPU_PART_926EJS 0x926 /* ARM926EJ-S */ +#define CPU_PART_920T 0x920 +#define CPU_PART_926EJS 0x926 /* ARM926EJ-S */ /* ARM11 (ARMv6 architecture) */ -#define CPU_PART_1136JFS 0xB36 /* ARM1136JF-S or ARM1136J-S */ -#define CPU_PART_1176JZFS 0xB76 /* ARM1176JZF-S */ +#define CPU_PART_1136JFS 0xB36 /* ARM1136JF-S or ARM1136J-S */ +#define CPU_PART_1176JZFS 0xB76 /* ARM1176JZF-S */ /* G1 (ARMv7 architecture) */ -#define CPU_PART_CORTEXA5 0xC05 +#define CPU_PART_CORTEXA5 0xC05 /* M7 (ARMv7 architecture) */ -#define CPU_PART_CORTEXA7 0xC07 +#define CPU_PART_CORTEXA7 0xC07 /* H2 H3 (ARMv7 architecture) */ -#define CPU_PART_CORTEXA8 0xC08 +#define CPU_PART_CORTEXA8 0xC08 /* H4 (ARMv7 architecture) */ -#define CPU_PART_CORTEXA9 0xC09 +#define CPU_PART_CORTEXA9 0xC09 /* H5 (SWIFT architecture) */ -#define CPU_PART_SWIFT 0x0 +#define CPU_PART_SWIFT 0x0 /* H6 (ARMv8 architecture) */ -#define CPU_PART_CYCLONE 0x1 +#define CPU_PART_CYCLONE 0x1 /* H7 (ARMv8 architecture) */ -#define CPU_PART_TYPHOON 0x2 +#define CPU_PART_TYPHOON 0x2 /* H7G (ARMv8 architecture) */ -#define CPU_PART_TYPHOON_CAPRI 0x3 +#define CPU_PART_TYPHOON_CAPRI 0x3 /* H8 (ARMv8 architecture) */ -#define CPU_PART_TWISTER 0x4 +#define CPU_PART_TWISTER 0x4 /* H8G H8M (ARMv8 architecture) */ -#define CPU_PART_TWISTER_ELBA_MALTA 0x5 +#define CPU_PART_TWISTER_ELBA_MALTA 0x5 /* H9 (ARMv8 architecture) */ -#define CPU_PART_HURRICANE 0x6 +#define CPU_PART_HURRICANE 0x6 /* H9G (ARMv8 architecture) */ #define CPU_PART_HURRICANE_MYST 0x7 /* H10 p-Core (ARMv8 architecture) */ -#define CPU_PART_MONSOON 0x8 +#define CPU_PART_MONSOON 0x8 /* H10 e-Core (ARMv8 architecture) */ -#define CPU_PART_MISTRAL 0x9 +#define CPU_PART_MISTRAL 0x9 /* Cache type identification */ @@ -151,32 +151,32 @@ typedef enum { } cache_type_t; typedef struct { - boolean_t c_unified; /* unified I & D cache? */ - uint32_t c_isize; /* in Bytes (ARM caches can be 0.5 KB) */ - boolean_t c_i_ppage; /* protected page restriction for I cache - * (see B6-11 in ARM DDI 0100I document). */ - uint32_t c_dsize; /* in Bytes (ARM caches can be 0.5 KB) */ - boolean_t c_d_ppage; /* protected page restriction for I cache - * (see B6-11 in ARM DDI 0100I document). */ - cache_type_t c_type; /* WB or WT */ - uint32_t c_linesz; /* number of bytes */ - uint32_t c_assoc; /* n-way associativity */ - uint32_t c_l2size; /* L2 size, if present */ - uint32_t c_bulksize_op; /* bulk operation size limit. 0 if disabled */ - uint32_t c_inner_cache_size; /* inner dache size */ + boolean_t c_unified; /* unified I & D cache? */ + uint32_t c_isize; /* in Bytes (ARM caches can be 0.5 KB) */ + boolean_t c_i_ppage; /* protected page restriction for I cache + * (see B6-11 in ARM DDI 0100I document). */ + uint32_t c_dsize; /* in Bytes (ARM caches can be 0.5 KB) */ + boolean_t c_d_ppage; /* protected page restriction for I cache + * (see B6-11 in ARM DDI 0100I document). */ + cache_type_t c_type; /* WB or WT */ + uint32_t c_linesz; /* number of bytes */ + uint32_t c_assoc; /* n-way associativity */ + uint32_t c_l2size; /* L2 size, if present */ + uint32_t c_bulksize_op;/* bulk operation size limit. 0 if disabled */ + uint32_t c_inner_cache_size; /* inner dache size */ } cache_info_t; typedef struct { uint32_t - RB:4, /* 3:0 - 32x64-bit media register bank supported: 0x2 */ - SP:4, /* 7:4 - Single precision supported in VFPv3: 0x2 */ - DP:4, /* 8:11 - Double precision supported in VFPv3: 0x2 */ - TE:4, /* 12-15 - Only untrapped exception handling can be selected: 0x0 */ - D:4, /* 19:16 - VFP hardware divide supported: 0x1 */ - SR:4, /* 23:20 - VFP hardware square root supported: 0x1 */ - SV:4, /* 27:24 - VFP short vector supported: 0x1 */ - RM:4; /* 31:28 - All VFP rounding modes supported: 0x1 */ + RB:4, /* 3:0 - 32x64-bit media register bank supported: 0x2 */ + SP:4, /* 7:4 - Single precision supported in VFPv3: 0x2 */ + DP:4, /* 8:11 - Double precision supported in VFPv3: 0x2 */ + TE:4, /* 12-15 - Only untrapped exception handling can be selected: 0x0 */ + D:4, /* 19:16 - VFP hardware divide supported: 0x1 */ + SR:4, /* 23:20 - VFP hardware square root supported: 0x1 */ + SV:4, /* 27:24 - VFP short vector supported: 0x1 */ + RM:4; /* 31:28 - All VFP rounding modes supported: 0x1 */ } arm_mvfr0_t; typedef union { @@ -187,13 +187,13 @@ typedef union { typedef struct { uint32_t - FZ:4, /* 3:0 - Full denormal arithmetic supported for VFP: 0x1 */ - DN:4, /* 7:4 - Propagation of NaN values supported for VFP: 0x1 */ - LS:4, /* 11:8 - Load/store instructions supported for NEON: 0x1 */ - I:4, /* 15:12 - Integer instructions supported for NEON: 0x1 */ - SP:4, /* 19:16 - Single precision floating-point instructions supported for NEON: 0x1 */ - HPFP:4, /* 23:20 - Half precision floating-point instructions supported */ - RSVP:8; /* 31:24 - Reserved */ + FZ:4, /* 3:0 - Full denormal arithmetic supported for VFP: 0x1 */ + DN:4, /* 7:4 - Propagation of NaN values supported for VFP: 0x1 */ + LS:4, /* 11:8 - Load/store instructions supported for NEON: 0x1 */ + I:4, /* 15:12 - Integer instructions supported for NEON: 0x1 */ + SP:4, /* 19:16 - Single precision floating-point instructions supported for NEON: 0x1 */ + HPFP:4, /* 23:20 - Half precision floating-point instructions supported */ + RSVP:8; /* 31:24 - Reserved */ } arm_mvfr1_t; typedef union { @@ -202,9 +202,9 @@ typedef union { } arm_mvfr1_info_t; typedef struct { - uint32_t neon; - uint32_t neon_hpfp; - uint32_t neon_fp16; + uint32_t neon; + uint32_t neon_hpfp; + uint32_t neon_fp16; } arm_mvfp_info_t; #ifdef __cplusplus diff --git a/osfmk/arm/data.s b/osfmk/arm/data.s index 1ffa7e5d4..917e68c2f 100644 --- a/osfmk/arm/data.s +++ b/osfmk/arm/data.s @@ -91,17 +91,6 @@ LEXT(kd_early_buffer) // space for kdebug's early event buffer .space 16*1024,0 .section __DATA, __data // Aligned data - - .globl EXT(CpuDataEntries) - .align 12 // Page aligned -LEXT(CpuDataEntries) // Cpu Data Entry Array - .space (cdeSize_NUM*MAX_CPUS_NUM),0 // (filled with 0s) - - .globl EXT(BootCpuData) - .align 12 // Page aligned -LEXT(BootCpuData) // Per cpu data area - .space cdSize_NUM,0 // (filled with 0s) - .align 3 // unsigned long long aligned Section .globl EXT(RTClockData) LEXT(RTClockData) // Real Time clock area diff --git a/osfmk/arm/dbgwrap.c b/osfmk/arm/dbgwrap.c index 73aa8b658..3d548bade 100644 --- a/osfmk/arm/dbgwrap.c +++ b/osfmk/arm/dbgwrap.c @@ -46,8 +46,7 @@ ml_dbgwrap_halt_cpu(int cpu_index __unused, uint64_t timeout_ns __unused) } dbgwrap_status_t -ml_dbgwrap_halt_cpu_with_state(int cpu_index __unused, uint64_t timeout_ns __unused, dbgwrap_thread_state_t *state __unused) +ml_dbgwrap_halt_cpu_with_state(int cpu_index __unused, uint64_t timeout_ns __unused, dbgwrap_thread_state_t *state __unused) { return DBGWRAP_ERR_UNSUPPORTED; } - diff --git a/osfmk/arm/dbgwrap.h b/osfmk/arm/dbgwrap.h index a91fd5dc4..7acd056ef 100644 --- a/osfmk/arm/dbgwrap.h +++ b/osfmk/arm/dbgwrap.h @@ -52,20 +52,19 @@ typedef enum { } dbgwrap_status_t; static inline const char* -ml_dbgwrap_strerror(dbgwrap_status_t status) { +ml_dbgwrap_strerror(dbgwrap_status_t status) +{ switch (status) { - - case DBGWRAP_ERR_SELF_HALT: return "CPU attempted to halt itself"; - case DBGWRAP_ERR_UNSUPPORTED: return "halt not supported for this configuration"; - case DBGWRAP_ERR_INPROGRESS: return "halt in progress on another CPU"; - case DBGWRAP_ERR_INSTR_ERROR: return "instruction-stuffing failure"; - case DBGWRAP_ERR_INSTR_TIMEOUT: return "instruction-stuffing timeout"; - case DBGWRAP_ERR_HALT_TIMEOUT: return "halt ack timeout, CPU likely wedged"; - case DBGWRAP_SUCCESS: return "halt succeeded"; - case DBGWRAP_WARN_ALREADY_HALTED: return "CPU already halted"; - case DBGWRAP_WARN_CPU_OFFLINE: return "CPU offline"; - default: return "unrecognized status"; - + case DBGWRAP_ERR_SELF_HALT: return "CPU attempted to halt itself"; + case DBGWRAP_ERR_UNSUPPORTED: return "halt not supported for this configuration"; + case DBGWRAP_ERR_INPROGRESS: return "halt in progress on another CPU"; + case DBGWRAP_ERR_INSTR_ERROR: return "instruction-stuffing failure"; + case DBGWRAP_ERR_INSTR_TIMEOUT: return "instruction-stuffing timeout"; + case DBGWRAP_ERR_HALT_TIMEOUT: return "halt ack timeout, CPU likely wedged"; + case DBGWRAP_SUCCESS: return "halt succeeded"; + case DBGWRAP_WARN_ALREADY_HALTED: return "CPU already halted"; + case DBGWRAP_WARN_CPU_OFFLINE: return "CPU offline"; + default: return "unrecognized status"; } } @@ -75,7 +74,6 @@ dbgwrap_status_t ml_dbgwrap_wait_cpu_halted(int cpu_index, uint64_t timeout_ns); dbgwrap_status_t ml_dbgwrap_halt_cpu(int cpu_index, uint64_t timeout_ns); -dbgwrap_status_t ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_thread_state_t *state); +dbgwrap_status_t ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_thread_state_t *state); __END_DECLS - diff --git a/osfmk/arm/exception.h b/osfmk/arm/exception.h index bafe40b7d..ee18970ab 100644 --- a/osfmk/arm/exception.h +++ b/osfmk/arm/exception.h @@ -29,28 +29,28 @@ * @OSF_COPYRIGHT@ */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,8 +61,8 @@ /* * ARM Exception Info */ -#ifndef _ARM_EXCEPTION_H_ -#define _ARM_EXCEPTION_H_ +#ifndef _ARM_EXCEPTION_H_ +#define _ARM_EXCEPTION_H_ #define VECT_RESET 0x0 #define VECT_UNDEF_INST 0x4 @@ -74,4 +74,4 @@ /* can put actual code for FIQ here, avoiding extra fetch */ -#endif /* _ARM_EXCEPTION_H_ */ +#endif /* _ARM_EXCEPTION_H_ */ diff --git a/osfmk/arm/genassym.c b/osfmk/arm/genassym.c index 36e59b9c7..435383706 100644 --- a/osfmk/arm/genassym.c +++ b/osfmk/arm/genassym.c @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -85,11 +86,6 @@ #include #include -#if CONFIG_DTRACE -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> -#endif /* CONFIG_DTRACE */ - /* * genassym.c is used to produce an * assembly file which, intermingled with unuseful assembly code, @@ -103,60 +99,59 @@ * the values, but we cannot run anything on the target machine. */ -#define DECLARE(SYM,VAL) \ +#define DECLARE(SYM, VAL) \ __asm("DEFINITION__define__" SYM ":\t .ascii \"%0\"" : : "n" ((u_int)(VAL))) -int main( - int argc, - char ** argv); +int main( + int argc, + char ** argv); int main( - int argc, - char **argv) + int argc, + char **argv) { + DECLARE("T_PREFETCH_ABT", T_PREFETCH_ABT); + DECLARE("T_DATA_ABT", T_DATA_ABT); - DECLARE("T_PREFETCH_ABT", T_PREFETCH_ABT); - DECLARE("T_DATA_ABT", T_DATA_ABT); - - DECLARE("AST_URGENT", AST_URGENT); - DECLARE("AST_PREEMPTION", AST_PREEMPTION); + DECLARE("AST_URGENT", AST_URGENT); + DECLARE("AST_PREEMPTION", AST_PREEMPTION); - DECLARE("TH_RECOVER", offsetof(struct thread, recover)); - DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation)); - DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack)); - DECLARE("TH_KSTACKPTR", offsetof(struct thread, machine.kstackptr)); - DECLARE("TH_UTHREAD", offsetof(struct thread, uthread)); + DECLARE("TH_RECOVER", offsetof(struct thread, recover)); + DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation)); + DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack)); + DECLARE("TH_KSTACKPTR", offsetof(struct thread, machine.kstackptr)); + DECLARE("TH_UTHREAD", offsetof(struct thread, uthread)); DECLARE("TASK_MACH_EXC_PORT", - offsetof(struct task, exc_actions[EXC_MACH_SYSCALL].port)); + offsetof(struct task, exc_actions[EXC_MACH_SYSCALL].port)); /* These fields are being added on demand */ - DECLARE("ACT_TASK", offsetof(struct thread, task)); - DECLARE("ACT_PCBDATA", offsetof(struct thread, machine.PcbData)); + DECLARE("ACT_TASK", offsetof(struct thread, task)); + DECLARE("ACT_PCBDATA", offsetof(struct thread, machine.PcbData)); #if __ARM_VFP__ - DECLARE("ACT_UVFP", offsetof(struct thread, machine.uVFPdata)); - DECLARE("ACT_KVFP", offsetof(struct thread, machine.kVFPdata)); + DECLARE("ACT_UVFP", offsetof(struct thread, machine.uVFPdata)); + DECLARE("ACT_KVFP", offsetof(struct thread, machine.kVFPdata)); #endif - DECLARE("TH_CTH_SELF", offsetof(struct thread, machine.cthread_self)); - DECLARE("TH_CTH_DATA", offsetof(struct thread, machine.cthread_data)); - DECLARE("ACT_PCBDATA_PC", offsetof(struct thread, machine.PcbData.pc)); - DECLARE("ACT_PCBDATA_R0", offsetof(struct thread, machine.PcbData.r[0])); - DECLARE("ACT_PREEMPT_CNT", offsetof(struct thread, machine.preemption_count)); - DECLARE("ACT_CPUDATAP", offsetof(struct thread, machine.CpuDatap)); - DECLARE("ACT_MAP", offsetof(struct thread, map)); + DECLARE("TH_CTH_SELF", offsetof(struct thread, machine.cthread_self)); + DECLARE("TH_CTH_DATA", offsetof(struct thread, machine.cthread_data)); + DECLARE("ACT_PCBDATA_PC", offsetof(struct thread, machine.PcbData.pc)); + DECLARE("ACT_PCBDATA_R0", offsetof(struct thread, machine.PcbData.r[0])); + DECLARE("ACT_PREEMPT_CNT", offsetof(struct thread, machine.preemption_count)); + DECLARE("ACT_CPUDATAP", offsetof(struct thread, machine.CpuDatap)); + DECLARE("ACT_MAP", offsetof(struct thread, map)); #if __ARM_USER_PROTECT__ DECLARE("ACT_UPTW_TTC", offsetof(struct thread, machine.uptw_ttc)); DECLARE("ACT_UPTW_TTB", offsetof(struct thread, machine.uptw_ttb)); DECLARE("ACT_KPTW_TTB", offsetof(struct thread, machine.kptw_ttb)); DECLARE("ACT_ASID", offsetof(struct thread, machine.asid)); #endif - DECLARE("ACT_DEBUGDATA", offsetof(struct thread, machine.DebugData)); - DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); - DECLARE("TH_RWLOCK_CNT", offsetof(struct thread, rwlock_count)); - DECLARE("TH_SCHED_FLAGS", offsetof(struct thread, sched_flags)); - DECLARE("TH_SFLAG_RW_PROMOTED", TH_SFLAG_RW_PROMOTED); + DECLARE("ACT_DEBUGDATA", offsetof(struct thread, machine.DebugData)); + DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); + DECLARE("TH_RWLOCK_CNT", offsetof(struct thread, rwlock_count)); + DECLARE("TH_SCHED_FLAGS", offsetof(struct thread, sched_flags)); + DECLARE("TH_SFLAG_RW_PROMOTED", TH_SFLAG_RW_PROMOTED); DECLARE("TH_MACH_SYSCALLS", offsetof(struct thread, syscalls_mach)); DECLARE("TH_UNIX_SYSCALLS", offsetof(struct thread, syscalls_unix)); @@ -165,9 +160,9 @@ main( DECLARE("MACH_TRAP_TABLE_COUNT", MACH_TRAP_TABLE_COUNT); DECLARE("MACH_TRAP_TABLE_ENTRY_SIZE", sizeof(mach_trap_t)); - DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); + DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); - DECLARE("SS_SIZE", sizeof(struct arm_saved_state)); + DECLARE("SS_SIZE", sizeof(struct arm_saved_state)); DECLARE("SS_LR", offsetof(struct arm_saved_state, lr)); DECLARE("SS_CPSR", offsetof(struct arm_saved_state, cpsr)); DECLARE("SS_PC", offsetof(struct arm_saved_state, pc)); @@ -186,8 +181,8 @@ main( DECLARE("VSS_FPEXC", offsetof(struct arm_vfpsaved_state, fpexc)); DECLARE("EXC_CTX_SIZE", sizeof(struct arm_saved_state) + - sizeof(struct arm_vfpsaved_state) + - VFPSAVE_ALIGN); + sizeof(struct arm_vfpsaved_state) + + VFPSAVE_ALIGN); DECLARE("VSS_ALIGN", VFPSAVE_ALIGN); #else DECLARE("EXC_CTX_SIZE", sizeof(struct arm_saved_state)); @@ -198,173 +193,173 @@ main( DECLARE("PGSHIFT", ARM_PGSHIFT); DECLARE("PGMASK", ARM_PGMASK); - DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); - DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); - DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); - DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); + DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); + DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); + DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); + DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); - DECLARE("KERN_INVALID_ADDRESS", KERN_INVALID_ADDRESS); + DECLARE("KERN_INVALID_ADDRESS", KERN_INVALID_ADDRESS); - DECLARE("MAX_CPUS", MAX_CPUS); + DECLARE("MAX_CPUS", MAX_CPUS); DECLARE("cdeSize", - sizeof(struct cpu_data_entry)); + sizeof(struct cpu_data_entry)); DECLARE("cdSize", - sizeof(struct cpu_data)); - - DECLARE("CPU_ACTIVE_THREAD", - offsetof(cpu_data_t, cpu_active_thread)); - DECLARE("CPU_ACTIVE_STACK", - offsetof(cpu_data_t, cpu_active_stack)); - DECLARE("CPU_ISTACKPTR", - offsetof(cpu_data_t, istackptr)); - DECLARE("CPU_INTSTACK_TOP", - offsetof(cpu_data_t, intstack_top)); - DECLARE("CPU_FIQSTACKPTR", - offsetof(cpu_data_t, fiqstackptr)); - DECLARE("CPU_FIQSTACK_TOP", - offsetof(cpu_data_t, fiqstack_top)); - DECLARE("CPU_NUMBER_GS", - offsetof(cpu_data_t,cpu_number)); - DECLARE("CPU_IDENT", - offsetof(cpu_data_t,cpu_ident)); - DECLARE("CPU_RUNNING", - offsetof(cpu_data_t,cpu_running)); - DECLARE("CPU_MCOUNT_OFF", - offsetof(cpu_data_t,cpu_mcount_off)); + sizeof(struct cpu_data)); + + DECLARE("CPU_ACTIVE_THREAD", + offsetof(cpu_data_t, cpu_active_thread)); + DECLARE("CPU_ACTIVE_STACK", + offsetof(cpu_data_t, cpu_active_stack)); + DECLARE("CPU_ISTACKPTR", + offsetof(cpu_data_t, istackptr)); + DECLARE("CPU_INTSTACK_TOP", + offsetof(cpu_data_t, intstack_top)); + DECLARE("CPU_FIQSTACKPTR", + offsetof(cpu_data_t, fiqstackptr)); + DECLARE("CPU_FIQSTACK_TOP", + offsetof(cpu_data_t, fiqstack_top)); + DECLARE("CPU_NUMBER_GS", + offsetof(cpu_data_t, cpu_number)); + DECLARE("CPU_IDENT", + offsetof(cpu_data_t, cpu_ident)); + DECLARE("CPU_RUNNING", + offsetof(cpu_data_t, cpu_running)); + DECLARE("CPU_MCOUNT_OFF", + offsetof(cpu_data_t, cpu_mcount_off)); DECLARE("CPU_PENDING_AST", - offsetof(cpu_data_t,cpu_pending_ast)); + offsetof(cpu_data_t, cpu_pending_ast)); DECLARE("CPU_PROCESSOR", - offsetof(cpu_data_t,cpu_processor)); + offsetof(cpu_data_t, cpu_processor)); DECLARE("CPU_CACHE_DISPATCH", - offsetof(cpu_data_t,cpu_cache_dispatch)); - DECLARE("CPU_BASE_TIMEBASE_LOW", - offsetof(cpu_data_t,cpu_base_timebase_low)); - DECLARE("CPU_BASE_TIMEBASE_HIGH", - offsetof(cpu_data_t,cpu_base_timebase_high)); - DECLARE("CPU_TIMEBASE_LOW", - offsetof(cpu_data_t,cpu_timebase_low)); - DECLARE("CPU_TIMEBASE_HIGH", - offsetof(cpu_data_t,cpu_timebase_high)); + offsetof(cpu_data_t, cpu_cache_dispatch)); + DECLARE("CPU_BASE_TIMEBASE_LOW", + offsetof(cpu_data_t, cpu_base_timebase_low)); + DECLARE("CPU_BASE_TIMEBASE_HIGH", + offsetof(cpu_data_t, cpu_base_timebase_high)); + DECLARE("CPU_TIMEBASE_LOW", + offsetof(cpu_data_t, cpu_timebase_low)); + DECLARE("CPU_TIMEBASE_HIGH", + offsetof(cpu_data_t, cpu_timebase_high)); DECLARE("CPU_DECREMENTER", - offsetof(cpu_data_t,cpu_decrementer)); + offsetof(cpu_data_t, cpu_decrementer)); DECLARE("CPU_GET_DECREMENTER_FUNC", - offsetof(cpu_data_t,cpu_get_decrementer_func)); + offsetof(cpu_data_t, cpu_get_decrementer_func)); DECLARE("CPU_SET_DECREMENTER_FUNC", - offsetof(cpu_data_t,cpu_set_decrementer_func)); + offsetof(cpu_data_t, cpu_set_decrementer_func)); DECLARE("CPU_GET_FIQ_HANDLER", - offsetof(cpu_data_t,cpu_get_fiq_handler)); + offsetof(cpu_data_t, cpu_get_fiq_handler)); DECLARE("CPU_TBD_HARDWARE_ADDR", - offsetof(cpu_data_t,cpu_tbd_hardware_addr)); + offsetof(cpu_data_t, cpu_tbd_hardware_addr)); DECLARE("CPU_TBD_HARDWARE_VAL", - offsetof(cpu_data_t,cpu_tbd_hardware_val)); + offsetof(cpu_data_t, cpu_tbd_hardware_val)); DECLARE("CPU_INT_STATE", - offsetof(cpu_data_t,cpu_int_state)); + offsetof(cpu_data_t, cpu_int_state)); DECLARE("INTERRUPT_HANDLER", - offsetof(cpu_data_t,interrupt_handler)); + offsetof(cpu_data_t, interrupt_handler)); DECLARE("INTERRUPT_TARGET", - offsetof(cpu_data_t,interrupt_target)); + offsetof(cpu_data_t, interrupt_target)); DECLARE("INTERRUPT_REFCON", - offsetof(cpu_data_t,interrupt_refCon)); + offsetof(cpu_data_t, interrupt_refCon)); DECLARE("INTERRUPT_NUB", - offsetof(cpu_data_t,interrupt_nub)); + offsetof(cpu_data_t, interrupt_nub)); DECLARE("INTERRUPT_SOURCE", - offsetof(cpu_data_t,interrupt_source)); + offsetof(cpu_data_t, interrupt_source)); DECLARE("CPU_USER_DEBUG", - offsetof(cpu_data_t, cpu_user_debug)); + offsetof(cpu_data_t, cpu_user_debug)); DECLARE("CPU_STAT_IRQ", - offsetof(cpu_data_t, cpu_stat.irq_ex_cnt)); + offsetof(cpu_data_t, cpu_stat.irq_ex_cnt)); DECLARE("CPU_STAT_IRQ_WAKE", - offsetof(cpu_data_t, cpu_stat.irq_ex_cnt_wake)); + offsetof(cpu_data_t, cpu_stat.irq_ex_cnt_wake)); DECLARE("CPU_RESET_HANDLER", - offsetof(cpu_data_t, cpu_reset_handler)); + offsetof(cpu_data_t, cpu_reset_handler)); DECLARE("CPU_RESET_ASSIST", - offsetof(cpu_data_t, cpu_reset_assist)); + offsetof(cpu_data_t, cpu_reset_assist)); DECLARE("RTCLOCK_DATAP", - offsetof(cpu_data_t, rtclock_datap)); -#ifdef __arm__ + offsetof(cpu_data_t, rtclock_datap)); +#ifdef __arm__ DECLARE("CPU_EXC_VECTORS", - offsetof(cpu_data_t, cpu_exc_vectors)); + offsetof(cpu_data_t, cpu_exc_vectors)); #endif DECLARE("RTCLOCKDataSize", - sizeof(rtclock_data_t)); + sizeof(rtclock_data_t)); DECLARE("RTCLOCK_ADJ_ABSTIME_LOW", - offsetof(rtclock_data_t, rtc_adj.abstime_val.low)); + offsetof(rtclock_data_t, rtc_adj.abstime_val.low)); DECLARE("RTCLOCK_ADJ_ABSTIME_HIGH", - offsetof(rtclock_data_t, rtc_adj.abstime_val.high)); + offsetof(rtclock_data_t, rtc_adj.abstime_val.high)); DECLARE("RTCLOCK_BASE_ABSTIME_LOW", - offsetof(rtclock_data_t, rtc_base.abstime_val.low)); + offsetof(rtclock_data_t, rtc_base.abstime_val.low)); DECLARE("RTCLOCK_BASE_ABSTIME_HIGH", - offsetof(rtclock_data_t, rtc_base.abstime_val.high)); + offsetof(rtclock_data_t, rtc_base.abstime_val.high)); DECLARE("RTCLOCK_TB_FUNC", - offsetof(rtclock_data_t, rtc_timebase_func)); + offsetof(rtclock_data_t, rtc_timebase_func)); DECLARE("RTCLOCK_TB_ADDR", - offsetof(rtclock_data_t, rtc_timebase_addr)); + offsetof(rtclock_data_t, rtc_timebase_addr)); DECLARE("RTCLOCK_TB_VAL", - offsetof(rtclock_data_t, rtc_timebase_val)); + offsetof(rtclock_data_t, rtc_timebase_val)); - DECLARE("SIGPdec", SIGPdec); + DECLARE("SIGPdec", SIGPdec); DECLARE("rhdSize", - sizeof(struct reset_handler_data)); + sizeof(struct reset_handler_data)); - DECLARE("CPU_DATA_ENTRIES", offsetof(struct reset_handler_data, cpu_data_entries)); - DECLARE("BOOT_ARGS", offsetof(struct reset_handler_data, boot_args)); - DECLARE("ASSIST_RESET_HANDLER", offsetof(struct reset_handler_data, assist_reset_handler)); + DECLARE("CPU_DATA_ENTRIES", offsetof(struct reset_handler_data, cpu_data_entries)); + DECLARE("BOOT_ARGS", offsetof(struct reset_handler_data, boot_args)); + DECLARE("ASSIST_RESET_HANDLER", offsetof(struct reset_handler_data, assist_reset_handler)); - DECLARE("CPU_DATA_PADDR", offsetof(struct cpu_data_entry, cpu_data_paddr)); + DECLARE("CPU_DATA_PADDR", offsetof(struct cpu_data_entry, cpu_data_paddr)); - DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); - DECLARE("FIQSTACK_SIZE", FIQSTACK_SIZE); + DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); + DECLARE("FIQSTACK_SIZE", FIQSTACK_SIZE); - DECLARE("PAGE_MAX_SIZE", PAGE_MAX_SIZE); + DECLARE("PAGE_MAX_SIZE", PAGE_MAX_SIZE); /* values from kern/timer.h */ DECLARE("TIMER_LOW", - offsetof(struct timer, low_bits)); + offsetof(struct timer, low_bits)); DECLARE("TIMER_HIGH", - offsetof(struct timer, high_bits)); + offsetof(struct timer, high_bits)); DECLARE("TIMER_HIGHCHK", - offsetof(struct timer, high_bits_check)); + offsetof(struct timer, high_bits_check)); DECLARE("TIMER_TSTAMP", - offsetof(struct timer, tstamp)); + offsetof(struct timer, tstamp)); DECLARE("THREAD_TIMER", - offsetof(struct processor, processor_data.thread_timer)); + offsetof(struct processor, processor_data.thread_timer)); DECLARE("KERNEL_TIMER", - offsetof(struct processor, processor_data.kernel_timer)); + offsetof(struct processor, processor_data.kernel_timer)); DECLARE("SYSTEM_STATE", - offsetof(struct processor, processor_data.system_state)); + offsetof(struct processor, processor_data.system_state)); DECLARE("USER_STATE", - offsetof(struct processor, processor_data.user_state)); + offsetof(struct processor, processor_data.user_state)); DECLARE("CURRENT_STATE", - offsetof(struct processor, processor_data.current_state)); + offsetof(struct processor, processor_data.current_state)); DECLARE("SYSTEM_TIMER", - offsetof(struct thread, system_timer)); + offsetof(struct thread, system_timer)); DECLARE("USER_TIMER", - offsetof(struct thread, user_timer)); + offsetof(struct thread, user_timer)); #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME DECLARE("PRECISE_USER_KERNEL_TIME", - offsetof(struct thread, precise_user_kernel_time)); + offsetof(struct thread, precise_user_kernel_time)); #endif DECLARE("BA_VIRT_BASE", - offsetof(struct boot_args, virtBase)); + offsetof(struct boot_args, virtBase)); DECLARE("BA_PHYS_BASE", - offsetof(struct boot_args, physBase)); + offsetof(struct boot_args, physBase)); DECLARE("BA_MEM_SIZE", - offsetof(struct boot_args, memSize)); + offsetof(struct boot_args, memSize)); DECLARE("BA_TOP_OF_KERNEL_DATA", - offsetof(struct boot_args, topOfKernelData)); + offsetof(struct boot_args, topOfKernelData)); DECLARE("ENTROPY_INDEX_PTR", - offsetof(entropy_data_t, index_ptr)); + offsetof(entropy_data_t, index_ptr)); DECLARE("ENTROPY_BUFFER", - offsetof(entropy_data_t, buffer)); + offsetof(entropy_data_t, buffer)); DECLARE("ENTROPY_DATA_SIZE", sizeof(struct entropy_data)); - return (0); + return 0; } diff --git a/osfmk/arm/hw_lock_types.h b/osfmk/arm/hw_lock_types.h index 342445c33..7baaaea89 100644 --- a/osfmk/arm/hw_lock_types.h +++ b/osfmk/arm/hw_lock_types.h @@ -33,41 +33,41 @@ * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifndef _ARM_HW_LOCK_TYPES_H_ -#define _ARM_HW_LOCK_TYPES_H_ +#ifndef _ARM_HW_LOCK_TYPES_H_ +#define _ARM_HW_LOCK_TYPES_H_ struct hslock { - uintptr_t lock_data; + uintptr_t lock_data; }; typedef struct hslock hw_lock_data_t, *hw_lock_t; -#define hw_lock_addr(hwl) (&((hwl).lock_data)) +#define hw_lock_addr(hwl) (&((hwl).lock_data)) -#endif /* _ARM_HW_LOCK_TYPES_H_ */ +#endif /* _ARM_HW_LOCK_TYPES_H_ */ diff --git a/osfmk/arm/io_map.c b/osfmk/arm/io_map.c index 2aa718001..bae84c780 100644 --- a/osfmk/arm/io_map.c +++ b/osfmk/arm/io_map.c @@ -31,23 +31,23 @@ /* * Mach Operating System Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright notice * and this permission notice appear in all copies of the software, * derivative works or modified versions, and any portions thereof, and that * both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science Carnegie Mellon University Pittsburgh PA * 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon the * rights to redistribute these changes. */ @@ -61,7 +61,7 @@ #include #include -extern vm_offset_t virtual_space_start; /* Next available kernel VA */ +extern vm_offset_t virtual_space_start; /* Next available kernel VA */ /* * Allocate and map memory for devices that may need to be mapped before @@ -78,35 +78,35 @@ io_map(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags) if (kernel_map == VM_MAP_NULL) { /* - * VM is not initialized. Grab memory. - */ + * VM is not initialized. Grab memory. + */ start = virtual_space_start; virtual_space_start += round_page(size); assert(flags == VM_WIMG_WCOMB || flags == VM_WIMG_IO); - if (flags == VM_WIMG_WCOMB) { + if (flags == VM_WIMG_WCOMB) { (void) pmap_map_bd_with_options(start, phys_addr, phys_addr + round_page(size), - VM_PROT_READ | VM_PROT_WRITE, PMAP_MAP_BD_WCOMB); + VM_PROT_READ | VM_PROT_WRITE, PMAP_MAP_BD_WCOMB); } else { (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size), - VM_PROT_READ | VM_PROT_WRITE); + VM_PROT_READ | VM_PROT_WRITE); } } else { (void) kmem_alloc_pageable(kernel_map, &start, round_page(size), VM_KERN_MEMORY_IOKIT); (void) pmap_map(start, phys_addr, phys_addr + round_page(size), - VM_PROT_READ | VM_PROT_WRITE, flags); + VM_PROT_READ | VM_PROT_WRITE, flags); } #if KASAN kasan_notify_address(start + start_offset, size); #endif - return (start + start_offset); + return start + start_offset; } /* just wrap this since io_map handles it */ -vm_offset_t +vm_offset_t io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags) { - return (io_map(phys_addr, size, flags)); + return io_map(phys_addr, size, flags); } diff --git a/osfmk/arm/io_map_entries.h b/osfmk/arm/io_map_entries.h index 1a96d9764..4b97c77f5 100644 --- a/osfmk/arm/io_map_entries.h +++ b/osfmk/arm/io_map_entries.h @@ -28,22 +28,21 @@ /* * @OSF_COPYRIGHT@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _ARM_IO_MAP_ENTRIES #define _ARM_IO_MAP_ENTRIES #include -#ifdef __APPLE_API_PRIVATE -extern vm_offset_t io_map( - vm_map_offset_t phys_addr, - vm_size_t size, - unsigned int flags); +#ifdef __APPLE_API_PRIVATE +extern vm_offset_t io_map( + vm_map_offset_t phys_addr, + vm_size_t size, + unsigned int flags); extern vm_offset_t io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags); -#endif /* __APPLE_API_PRIVATE */ +#endif /* __APPLE_API_PRIVATE */ #endif /* _ARM_IO_MAP_ENTRIES */ -#endif /* KERNEL_PRIVATE */ - +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/arm/kpc_arm.c b/osfmk/arm/kpc_arm.c index 11a544584..b5c060a8a 100644 --- a/osfmk/arm/kpc_arm.c +++ b/osfmk/arm/kpc_arm.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -53,70 +53,70 @@ static uint32_t kpc_enabled_counters = 0; static int first_time = 1; /* Private */ - -static boolean_t + +static boolean_t enable_counter(uint32_t counter) { boolean_t enabled; uint32_t PMCNTENSET; /* Cycle counter is MSB; configurable counters reside in LSBs */ uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1)); - + /* Enabled? */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET)); - + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (PMCNTENSET)); + enabled = (PMCNTENSET & mask); if (!enabled) { /* Counter interrupt enable (PMINTENSET) */ - __asm__ volatile("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask)); - + __asm__ volatile ("mcr p15, 0, %0, c9, c14, 1;" : : "r" (mask)); + /* Individual counter enable set (PMCNTENSET) */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask)); - - kpc_enabled_counters++; - + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (mask)); + + kpc_enabled_counters++; + /* 1st enabled counter? Set the master enable bit in PMCR */ if (kpc_enabled_counters == 1) { uint32_t PMCR = 1; - __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); } } return enabled; } -static boolean_t +static boolean_t disable_counter(uint32_t counter) { boolean_t enabled; uint32_t PMCNTENCLR; /* Cycle counter is MSB; configurable counters reside in LSBs */ uint32_t mask = (counter == 0) ? (1 << 31) : (1 << (counter - 1)); - + /* Enabled? */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR)); - + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 2;" : "=r" (PMCNTENCLR)); + enabled = (PMCNTENCLR & mask); if (enabled) { /* Individual counter enable clear (PMCNTENCLR) */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask)); - + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 2;" : : "r" (mask)); + /* Counter interrupt disable (PMINTENCLR) */ - __asm__ volatile("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask)); - - kpc_enabled_counters--; - + __asm__ volatile ("mcr p15, 0, %0, c9, c14, 2;" : : "r" (mask)); + + kpc_enabled_counters--; + /* Last enabled counter? Clear the master enable bit in PMCR */ if (kpc_enabled_counters == 0) { uint32_t PMCR = 0; - __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); } } - + return enabled; } -static uint64_t +static uint64_t read_counter(uint32_t counter) { uint32_t low = 0; @@ -124,47 +124,47 @@ read_counter(uint32_t counter) switch (counter) { case 0: /* Fixed counter */ - __asm__ volatile("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low)); + __asm__ volatile ("mrc p15, 0, %0, c9, c13, 0;" : "=r" (low)); break; case 1: case 2: case 3: case 4: /* Configurable. Set PMSELR... */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); /* ...then read PMXEVCNTR */ - __asm__ volatile("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low)); + __asm__ volatile ("mrc p15, 0, %0, c9, c13, 2;" : "=r" (low)); break; default: /* ??? */ - break; + break; } - + return (uint64_t)low; } static void -write_counter(uint32_t counter, uint64_t value) +write_counter(uint32_t counter, uint64_t value) { uint32_t low = value & 0xFFFFFFFF; switch (counter) { case 0: /* Fixed counter */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low)); + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 0;" : : "r" (low)); break; case 1: case 2: case 3: case 4: /* Configurable. Set PMSELR... */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); /* ...then write PMXEVCNTR */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low)); + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 2;" : : "r" (low)); break; default: /* ??? */ - break; + break; } } @@ -184,8 +184,8 @@ set_running_fixed(boolean_t on) int n = KPC_ARM_FIXED_COUNT; enabled = ml_set_interrupts_enabled(FALSE); - - for( i = 0; i < n; i++ ) { + + for (i = 0; i < n; i++) { if (on) { enable_counter(i); } else { @@ -203,10 +203,11 @@ set_running_configurable(uint64_t target_mask, uint64_t state_mask) boolean_t enabled; enabled = ml_set_interrupts_enabled(FALSE); - + for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & target_mask) == 0) + if (((1ULL << i) & target_mask) == 0) { continue; + } assert(kpc_controls_counter(offset + i)); if ((1ULL << i) & state_mask) { @@ -231,37 +232,37 @@ kpc_pmi_handler(cpu_id_t source) /* The pmi must be delivered to the CPU that generated it */ if (source != getCpuDatap()->interrupt_nub) { - panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub); + panic("pmi from IOCPU %p delivered to IOCPU %p", source, getCpuDatap()->interrupt_nub); } for (ctr = 0; - ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT); - ctr++) - { + ctr < (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT); + ctr++) { uint32_t PMOVSR; uint32_t mask; - - /* check the counter for overflow */ + + /* check the counter for overflow */ if (ctr == 0) { mask = 1 << 31; } else { mask = 1 << (ctr - 1); } - + /* read PMOVSR */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); - + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); + if (PMOVSR & mask) { extra = kpc_reload_counter(ctr); FIXED_SHADOW(ctr) - += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra; + += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* wrap */) + extra; - if (FIXED_ACTIONID(ctr)) + if (FIXED_ACTIONID(ctr)) { kpc_sample_kperf(FIXED_ACTIONID(ctr)); - + } + /* clear PMOVSR bit */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 3;" : : "r" (mask)); } } @@ -274,11 +275,12 @@ kpc_set_running_xcall( void *vstate ) struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate; assert(mp_config); - if (kpc_controls_fixed_counters()) + if (kpc_controls_fixed_counters()) { set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK); - + } + set_running_configurable(mp_config->cfg_target_mask, - mp_config->cfg_state_mask); + mp_config->cfg_state_mask); if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) { thread_wakeup((event_t) &kpc_xcall_sync); @@ -295,9 +297,9 @@ get_counter_config(uint32_t counter) /* Fixed counter accessed via top bit... */ counter = 31; /* Write PMSELR.SEL */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter)); /* Read PMXEVTYPER */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config)); + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : "=r" (config)); break; case 1: case 2: @@ -306,35 +308,35 @@ get_counter_config(uint32_t counter) /* Offset */ counter -= 1; /* Write PMSELR.SEL to select the configurable counter */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter)); /* Read PMXEVTYPER to get the config */ - __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config)); + __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (config)); break; default: break; } - + return config; } static void set_counter_config(uint32_t counter, uint64_t config) -{ +{ switch (counter) { case 0: /* Write PMSELR.SEL */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); /* Write PMXEVTYPER */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF)); + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF)); break; case 1: case 2: case 3: case 4: /* Write PMSELR.SEL */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (counter - 1)); /* Write PMXEVTYPER */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF)); + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (config & 0xFFFFFFFF)); break; default: break; @@ -348,11 +350,11 @@ kpc_arch_init(void) { uint32_t PMCR; uint32_t event_counters; - + /* read PMOVSR and determine the number of event counters */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR)); + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 0;" : "=r" (PMCR)); event_counters = (PMCR >> 11) & 0x1F; - + assert(event_counters >= KPC_ARM_CONFIGURABLE_COUNT); } @@ -417,24 +419,25 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) uint32_t PMOVSR; uint32_t mask; uint64_t ctr; - - if (((1ULL << i) & pmc_mask) == 0) + + if (((1ULL << i) & pmc_mask) == 0) { continue; + } ctr = read_counter(i + offset); /* check the counter for overflow */ mask = 1 << i; - + /* read PMOVSR */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); if (PMOVSR & mask) { - ctr = CONFIGURABLE_SHADOW(i) + - (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + - ctr; + ctr = CONFIGURABLE_SHADOW(i) + + (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + + ctr; } else { ctr = CONFIGURABLE_SHADOW(i) + - (ctr - CONFIGURABLE_RELOAD(i)); + (ctr - CONFIGURABLE_RELOAD(i)); } *counterv++ = ctr; @@ -452,19 +455,19 @@ kpc_get_fixed_counters(uint64_t *counterv) /* check the counter for overflow */ mask = 1 << 31; - + /* read PMOVSR */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (PMOVSR)); ctr = read_counter(0); if (PMOVSR & mask) { ctr = FIXED_SHADOW(0) + - (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) + - (ctr & 0xFFFFFFFF); + (kpc_fixed_max() - FIXED_RELOAD(0) + 1 /* Wrap */) + + (ctr & 0xFFFFFFFF); } else { ctr = FIXED_SHADOW(0) + - (ctr - FIXED_RELOAD(0)); + (ctr - FIXED_RELOAD(0)); } counterv[0] = ctr; @@ -491,24 +494,25 @@ kpc_set_running_arch(struct kpc_running_remote *mp_config) unsigned int cpu; assert(mp_config); - + if (first_time) { kprintf( "kpc: setting PMI handler\n" ); PE_cpu_perfmon_interrupt_install_handler(kpc_pmi_handler); - for (cpu = 0; cpu < real_ncpus; cpu++) + for (cpu = 0; cpu < real_ncpus; cpu++) { PE_cpu_perfmon_interrupt_enable(cpu_datap(cpu)->cpu_id, - TRUE); + TRUE); + } first_time = 0; } /* dispatch to all CPUs */ cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, - mp_config); + mp_config); kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask; kpc_running_classes = mp_config->classes; kpc_configured = 1; - + return 0; } @@ -519,32 +523,32 @@ save_regs(void) int cpuid = current_processor()->cpu_id; uint32_t PMCR = 0; - __asm__ volatile("dmb ish"); + __asm__ volatile ("dmb ish"); /* Clear master enable */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); /* Save individual enable state */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid])); + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 1;" : "=r" (saved_PMCNTENSET[cpuid])); /* Save PMOVSR */ - __asm__ volatile("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid])); + __asm__ volatile ("mrc p15, 0, %0, c9, c12, 3;" : "=r" (saved_PMOVSR[cpuid])); /* Select fixed counter with PMSELR.SEL */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); /* Read PMXEVTYPER */ - __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0])); + __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][0])); /* Save configurable event selections */ for (i = 0; i < 4; i++) { /* Select counter with PMSELR.SEL */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i)); /* Read PMXEVTYPER */ - __asm__ volatile("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1])); + __asm__ volatile ("mrc p15, 0, %0, c9, c13, 1;" : "=r" (saved_PMXEVTYPER[cpuid][i + 1])); } /* Finally, save count for each counter */ - for (i=0; i < 5; i++) { + for (i = 0; i < 5; i++) { saved_counter[cpuid][i] = read_counter(i); } } @@ -555,55 +559,56 @@ restore_regs(void) int i; int cpuid = current_processor()->cpu_id; uint64_t extra; - uint32_t PMCR = 1; + uint32_t PMCR = 1; /* Restore counter values */ for (i = 0; i < 5; i++) { /* did we overflow? if so handle it now since we won't get a pmi */ uint32_t mask; - /* check the counter for overflow */ + /* check the counter for overflow */ if (i == 0) { mask = 1 << 31; } else { mask = 1 << (i - 1); } - + if (saved_PMOVSR[cpuid] & mask) { extra = kpc_reload_counter(i); - /* + /* * CONFIGURABLE_* directly follows FIXED, so we can simply * increment the index here. Although it's ugly. */ FIXED_SHADOW(i) - += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra; + += (kpc_fixed_max() - FIXED_RELOAD(i) + 1 /* Wrap */) + extra; - if (FIXED_ACTIONID(i)) + if (FIXED_ACTIONID(i)) { kpc_sample_kperf(FIXED_ACTIONID(i)); + } } else { write_counter(i, saved_counter[cpuid][i]); } } /* Restore configuration - first, the fixed... */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (31)); /* Write PMXEVTYPER */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0])); - + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][0])); + /* ...then the configurable */ for (i = 0; i < 4; i++) { /* Select counter with PMSELR.SEL */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 5;" : : "r" (i)); /* Write PMXEVTYPER */ - __asm__ volatile("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1])); + __asm__ volatile ("mcr p15, 0, %0, c9, c13, 1;" : : "r" (saved_PMXEVTYPER[cpuid][i + 1])); } /* Restore enable state */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid])); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 1;" : : "r" (saved_PMCNTENSET[cpuid])); /* Counter master re-enable */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 0;" : : "r" (PMCR)); } static void @@ -628,8 +633,9 @@ kpc_set_reload_xcall(void *vmp_config) /* set the new period */ count = kpc_fixed_count(); for (uint32_t i = 0; i < count; ++i) { - if (*new_period == 0) + if (*new_period == 0) { *new_period = kpc_fixed_max(); + } FIXED_RELOAD(i) = max - *new_period; /* reload the counter if possible */ kpc_reload_counter(i); @@ -652,10 +658,12 @@ kpc_set_reload_xcall(void *vmp_config) count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) { /* ignore the counter */ - if (((1ULL << i) & mp_config->pmc_mask) == 0) + if (((1ULL << i) & mp_config->pmc_mask) == 0) { continue; - if (*new_period == 0) + } + if (*new_period == 0) { *new_period = kpc_configurable_max(); + } CONFIGURABLE_RELOAD(i) = max - *new_period; /* reload the counter */ kpc_reload_counter(offset + i); @@ -666,8 +674,9 @@ kpc_set_reload_xcall(void *vmp_config) ml_set_interrupts_enabled(enabled); - if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) + if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) { thread_wakeup((event_t) &kpc_reload_sync); + } } @@ -686,12 +695,14 @@ int kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) { uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); - + assert(configv); - for (uint32_t i = 0; i < cfg_count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < cfg_count; ++i) { + if ((1ULL << i) & pmc_mask) { *configv++ = get_counter_config(i + offset); + } + } return 0; } @@ -707,8 +718,9 @@ kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) enabled = ml_set_interrupts_enabled(FALSE); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & pmc_mask) == 0) + if (((1ULL << i) & pmc_mask) == 0) { continue; + } assert(kpc_controls_counter(i + offset)); set_counter_config(i + offset, *configv++); @@ -737,13 +749,14 @@ kpc_set_config_xcall(void *vmp_config) new_config += kpc_popcount(mp_config->pmc_mask); } - if (hw_atomic_sub(&kpc_config_sync, 1) == 0) + if (hw_atomic_sub(&kpc_config_sync, 1) == 0) { thread_wakeup((event_t) &kpc_config_sync); + } } int kpc_set_config_arch(struct kpc_config_remote *mp_config) -{ +{ /* dispatch to all CPUs */ cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config); @@ -752,18 +765,20 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config) return 0; } -void +void kpc_idle(void) { - if (kpc_configured) + if (kpc_configured) { save_regs(); + } } -void -kpc_idle_exit(void) +void +kpc_idle_exit(void) { - if (kpc_configured) + if (kpc_configured) { restore_regs(); + } } static uint32_t kpc_xread_sync; @@ -771,7 +786,7 @@ static void kpc_get_curcpu_counters_xcall(void *args) { struct kpc_get_counters_remote *handler = args; - int offset=0, r=0; + int offset = 0, r = 0; assert(handler); assert(handler->buf); @@ -782,8 +797,9 @@ kpc_get_curcpu_counters_xcall(void *args) /* number of counters added by this CPU, needs to be atomic */ hw_atomic_add(&(handler->nb_counters), r); - if (hw_atomic_sub(&kpc_xread_sync, 1) == 0) + if (hw_atomic_sub(&kpc_xread_sync, 1) == 0) { thread_wakeup((event_t) &kpc_xread_sync); + } } int @@ -801,8 +817,9 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) enabled = ml_set_interrupts_enabled(FALSE); - if (curcpu) + if (curcpu) { *curcpu = current_processor()->cpu_id; + } cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl); ml_set_interrupts_enabled(enabled); @@ -818,12 +835,12 @@ kpc_get_pmu_version(void) int kpc_set_sw_inc( uint32_t mask ) -{ +{ /* Only works with the configurable counters set to count the increment event (0x0) */ /* Write to PMSWINC */ - __asm__ volatile("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask)); - + __asm__ volatile ("mcr p15, 0, %0, c9, c12, 4;" : : "r" (mask)); + return 0; } @@ -933,14 +950,14 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config __unused) return ENOTSUP; } -void +void kpc_idle(void) { // do nothing } -void -kpc_idle_exit(void) +void +kpc_idle_exit(void) { // do nothing } diff --git a/osfmk/arm/lock.h b/osfmk/arm/lock.h index 943a5f345..18b842009 100644 --- a/osfmk/arm/lock.h +++ b/osfmk/arm/lock.h @@ -33,39 +33,39 @@ * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _ARM_LOCK_H_ -#define _ARM_LOCK_H_ +#ifndef _ARM_LOCK_H_ +#define _ARM_LOCK_H_ #warning This header is deprecated. Use instead. -#endif /* _ARM_LOCK_H_ */ +#endif /* _ARM_LOCK_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/arm/locks.h b/osfmk/arm/locks.h index 16acddb90..41941a9d1 100644 --- a/osfmk/arm/locks.h +++ b/osfmk/arm/locks.h @@ -26,234 +26,234 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _ARM_LOCKS_H_ -#define _ARM_LOCKS_H_ +#ifndef _ARM_LOCKS_H_ +#define _ARM_LOCKS_H_ #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE -extern unsigned int LcksOpts; +extern unsigned int LcksOpts; -#define enaLkDeb 0x00000001 /* Request debug in default attribute */ -#define enaLkStat 0x00000002 /* Request statistic in default attribute */ -#define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */ +#define enaLkDeb 0x00000001 /* Request debug in default attribute */ +#define enaLkStat 0x00000002 /* Request statistic in default attribute */ +#define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */ +#define enaLkTimeStat 0x00000008 /* Request time statistics in default attribute */ -#define disLkType 0x80000000 /* Disable type checking */ -#define disLktypeb 0 -#define disLkThread 0x40000000 /* Disable ownership checking */ -#define disLkThreadb 1 -#define enaLkExtStck 0x20000000 /* Enable extended backtrace */ -#define enaLkExtStckb 2 -#define disLkMyLck 0x10000000 /* Disable recursive lock dectection */ -#define disLkMyLckb 3 +#define disLkType 0x80000000 /* Disable type checking */ +#define disLktypeb 0 +#define disLkThread 0x40000000 /* Disable ownership checking */ +#define disLkThreadb 1 +#define enaLkExtStck 0x20000000 /* Enable extended backtrace */ +#define enaLkExtStckb 2 +#define disLkMyLck 0x10000000 /* Disable recursive lock dectection */ +#define disLkMyLckb 3 #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE typedef struct { - struct hslock hwlock; - uintptr_t type; + struct hslock hwlock; + uintptr_t type; } lck_spin_t; #define lck_spin_data hwlock.lock_data -#define LCK_SPIN_TAG_DESTROYED 0xdead /* lock marked as Destroyed */ +#define LCK_SPIN_TAG_DESTROYED 0xdead /* lock marked as Destroyed */ -#define LCK_SPIN_TYPE 0x00000011 +#define LCK_SPIN_TYPE 0x00000011 #else -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE typedef struct { - uintptr_t opaque[2]; + uintptr_t opaque[2]; } lck_spin_t; #else -typedef struct __lck_spin_t__ lck_spin_t; -#endif // KERNEL_PRIVATE -#endif // MACH_KERNEL_PRIVATE +typedef struct __lck_spin_t__ lck_spin_t; +#endif // KERNEL_PRIVATE +#endif // MACH_KERNEL_PRIVATE -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE typedef struct _lck_mtx_ { union { - - uintptr_t lck_mtx_data; /* Thread pointer plus lock bits */ - uintptr_t lck_mtx_tag; /* Tag for type */ - }; /* arm: 4 arm64: 8 */ + uintptr_t lck_mtx_data; /* Thread pointer plus lock bits */ + uintptr_t lck_mtx_tag; /* Tag for type */ + }; /* arm: 4 arm64: 8 */ union { struct { - uint16_t lck_mtx_waiters;/* Number of waiters */ - uint8_t lck_mtx_pri; /* Priority to inherit */ - uint8_t lck_mtx_type; /* Type */ + uint16_t lck_mtx_waiters;/* Number of waiters */ + uint8_t lck_mtx_pri; /* Priority to inherit */ + uint8_t lck_mtx_type; /* Type */ }; struct { - struct _lck_mtx_ext_ *lck_mtx_ptr; /* Indirect pointer */ + struct _lck_mtx_ext_ *lck_mtx_ptr; /* Indirect pointer */ }; - }; /* arm: 4 arm64: 8 */ -} lck_mtx_t; /* arm: 8 arm64: 16 */ + }; /* arm: 4 arm64: 8 */ +} lck_mtx_t; /* arm: 8 arm64: 16 */ /* Shared between mutex and read-write locks */ -#define LCK_ILOCK_BIT 0 -#define ARM_LCK_WAITERS_BIT 1 -#define LCK_ILOCK (1 << LCK_ILOCK_BIT) -#define ARM_LCK_WAITERS (1 << ARM_LCK_WAITERS_BIT) +#define LCK_ILOCK_BIT 0 +#define ARM_LCK_WAITERS_BIT 1 +#define LCK_ILOCK (1 << LCK_ILOCK_BIT) +#define ARM_LCK_WAITERS (1 << ARM_LCK_WAITERS_BIT) -#define LCK_MTX_TYPE 0x22 /* lock type */ +#define LCK_MTX_TYPE 0x22 /* lock type */ -#define LCK_MTX_TAG_INDIRECT 0x00001007 /* lock marked as Indirect */ -#define LCK_MTX_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */ +#define LCK_MTX_TAG_INDIRECT 0x00001007 /* lock marked as Indirect */ +#define LCK_MTX_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */ -#define LCK_FRAMES_MAX 8 +#define LCK_FRAMES_MAX 8 -extern uint64_t MutexSpin; +extern uint64_t MutexSpin; typedef struct { - unsigned int type; - vm_offset_t stack[LCK_FRAMES_MAX]; - vm_offset_t thread; + unsigned int type; + vm_offset_t stack[LCK_FRAMES_MAX]; + vm_offset_t thread; } lck_mtx_deb_t; #define MUTEX_TAG 0x4d4d typedef struct { - unsigned int lck_mtx_stat_data; + unsigned int lck_mtx_stat_data; } lck_mtx_stat_t; typedef struct _lck_mtx_ext_ { - lck_mtx_t lck_mtx; /* arm: 12 arm64: 24 */ - struct _lck_grp_ *lck_mtx_grp; /* arm: 4 arm64: 8 */ - unsigned int lck_mtx_attr; /* arm: 4 arm64: 4 */ - lck_mtx_stat_t lck_mtx_stat; /* arm: 4 arm64: 4 */ - lck_mtx_deb_t lck_mtx_deb; /* arm: 40 arm64: 80 */ + lck_mtx_t lck_mtx; /* arm: 12 arm64: 24 */ + struct _lck_grp_ *lck_mtx_grp; /* arm: 4 arm64: 8 */ + unsigned int lck_mtx_attr; /* arm: 4 arm64: 4 */ + lck_mtx_stat_t lck_mtx_stat; /* arm: 4 arm64: 4 */ + lck_mtx_deb_t lck_mtx_deb; /* arm: 40 arm64: 80 */ } lck_mtx_ext_t; /* arm: 64 arm64: 120 */ -#define LCK_MTX_ATTR_DEBUG 0x1 -#define LCK_MTX_ATTR_DEBUGb 31 -#define LCK_MTX_ATTR_STAT 0x2 -#define LCK_MTX_ATTR_STATb 30 +#define LCK_MTX_ATTR_DEBUG 0x1 +#define LCK_MTX_ATTR_DEBUGb 31 +#define LCK_MTX_ATTR_STAT 0x2 +#define LCK_MTX_ATTR_STATb 30 #define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))) #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))) #else -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE typedef struct { - uintptr_t opaque[2]; + uintptr_t opaque[2]; } lck_mtx_t; typedef struct { #if defined(__arm64__) - unsigned long opaque[16]; + unsigned long opaque[16]; #else /* __arm__ */ - unsigned int opaque[16]; -#endif + unsigned int opaque[16]; +#endif } lck_mtx_ext_t; #else -typedef struct __lck_mtx_t__ lck_mtx_t; +typedef struct __lck_mtx_t__ lck_mtx_t; #endif #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE typedef union { struct { - uint16_t shared_count; /* No. of shared granted request */ - uint16_t interlock: 1, /* Interlock */ - priv_excl: 1, /* priority for Writer */ - want_upgrade: 1, /* Read-to-write upgrade waiting */ - want_excl: 1, /* Writer is waiting, or locked for write */ - r_waiting: 1, /* Someone is sleeping on lock */ - w_waiting: 1, /* Writer is sleeping on lock */ - can_sleep: 1, /* Can attempts to lock go to sleep? */ - _pad2: 8, /* padding */ - tag_valid: 1; /* Field is actually a tag, not a bitfield */ + uint16_t shared_count; /* No. of shared granted request */ + uint16_t interlock: 1, /* Interlock */ + priv_excl: 1, /* priority for Writer */ + want_upgrade: 1, /* Read-to-write upgrade waiting */ + want_excl: 1, /* Writer is waiting, or locked for write */ + r_waiting: 1, /* Someone is sleeping on lock */ + w_waiting: 1, /* Writer is sleeping on lock */ + can_sleep: 1, /* Can attempts to lock go to sleep? */ + _pad2: 8, /* padding */ + tag_valid: 1; /* Field is actually a tag, not a bitfield */ #if __arm64__ - uint32_t _pad4; + uint32_t _pad4; #endif }; struct { - uint32_t data; /* Single word version of bitfields and shared count */ + uint32_t data; /* Single word version of bitfields and shared count */ #if __arm64__ - uint32_t lck_rw_pad4; + uint32_t lck_rw_pad4; #endif }; } lck_rw_word_t; typedef struct { - lck_rw_word_t word; - thread_t lck_rw_owner; -} lck_rw_t; /* arm: 8 arm64: 16 */ - -#define lck_rw_shared_count word.shared_count -#define lck_rw_interlock word.interlock -#define lck_rw_priv_excl word.priv_excl -#define lck_rw_want_upgrade word.want_upgrade -#define lck_rw_want_excl word.want_excl -#define lck_r_waiting word.r_waiting -#define lck_w_waiting word.w_waiting -#define lck_rw_can_sleep word.can_sleep -#define lck_rw_data word.data + lck_rw_word_t word; + thread_t lck_rw_owner; +} lck_rw_t; /* arm: 8 arm64: 16 */ + +#define lck_rw_shared_count word.shared_count +#define lck_rw_interlock word.interlock +#define lck_rw_priv_excl word.priv_excl +#define lck_rw_want_upgrade word.want_upgrade +#define lck_rw_want_excl word.want_excl +#define lck_r_waiting word.r_waiting +#define lck_w_waiting word.w_waiting +#define lck_rw_can_sleep word.can_sleep +#define lck_rw_data word.data // tag and data reference the same memory. When the tag_valid bit is set, // the data word should be treated as a tag instead of a bitfield. -#define lck_rw_tag_valid word.tag_valid -#define lck_rw_tag word.data - -#define LCK_RW_SHARED_READER_OFFSET 0 -#define LCK_RW_INTERLOCK_BIT 16 -#define LCK_RW_PRIV_EXCL_BIT 17 -#define LCK_RW_WANT_UPGRADE_BIT 18 -#define LCK_RW_WANT_EXCL_BIT 19 -#define LCK_RW_R_WAITING_BIT 20 -#define LCK_RW_W_WAITING_BIT 21 -#define LCK_RW_CAN_SLEEP_BIT 22 +#define lck_rw_tag_valid word.tag_valid +#define lck_rw_tag word.data + +#define LCK_RW_SHARED_READER_OFFSET 0 +#define LCK_RW_INTERLOCK_BIT 16 +#define LCK_RW_PRIV_EXCL_BIT 17 +#define LCK_RW_WANT_UPGRADE_BIT 18 +#define LCK_RW_WANT_EXCL_BIT 19 +#define LCK_RW_R_WAITING_BIT 20 +#define LCK_RW_W_WAITING_BIT 21 +#define LCK_RW_CAN_SLEEP_BIT 22 // 23-30 -#define LCK_RW_TAG_VALID_BIT 31 +#define LCK_RW_TAG_VALID_BIT 31 -#define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT) -#define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT) -#define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT) -#define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT) -#define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT) -#define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT) -#define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT) -#define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_READER_OFFSET) -#define LCK_RW_SHARED_READER (0x1 << LCK_RW_SHARED_READER_OFFSET) +#define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT) +#define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT) +#define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT) +#define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT) +#define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT) +#define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT) +#define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT) +#define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_READER_OFFSET) +#define LCK_RW_SHARED_READER (0x1 << LCK_RW_SHARED_READER_OFFSET) -#define LCK_RW_TAG_DESTROYED ((LCK_RW_TAG_VALID | 0xdddddeadu)) /* lock marked as Destroyed */ +#define LCK_RW_TAG_DESTROYED ((LCK_RW_TAG_VALID | 0xdddddeadu)) /* lock marked as Destroyed */ -#define LCK_RW_WRITER_EVENT(lck) (event_t)((uintptr_t)(lck)+1) -#define LCK_RW_READER_EVENT(lck) (event_t)((uintptr_t)(lck)+2) -#define WRITE_EVENT_TO_RWLOCK(event) ((lck_rw_t *)((uintptr_t)(event)-1)) -#define READ_EVENT_TO_RWLOCK(event) ((lck_rw_t *)((uintptr_t)(event)-2)) +#define LCK_RW_WRITER_EVENT(lck) (event_t)((uintptr_t)(lck)+1) +#define LCK_RW_READER_EVENT(lck) (event_t)((uintptr_t)(lck)+2) +#define WRITE_EVENT_TO_RWLOCK(event) ((lck_rw_t *)((uintptr_t)(event)-1)) +#define READ_EVENT_TO_RWLOCK(event) ((lck_rw_t *)((uintptr_t)(event)-2)) #if __ARM_ENABLE_WFE_ -#define wait_for_event() __builtin_arm_wfe() +#define wait_for_event() __builtin_arm_wfe() #if __arm__ -#define set_event() do{__builtin_arm_dsb(DSB_ISHST);__builtin_arm_sev();}while(0) -#define LOCK_SNOOP_SPINS 4 +#define set_event() do{__builtin_arm_dsb(DSB_ISHST);__builtin_arm_sev();}while(0) +#define LOCK_SNOOP_SPINS 4 #else -#define set_event() do{}while(0) // arm64 sev is implicit in stlxr -#define LOCK_SNOOP_SPINS 0x300 +#define set_event() do{}while(0) // arm64 sev is implicit in stlxr +#define LOCK_SNOOP_SPINS 0x300 #endif #else -#define wait_for_event() __builtin_arm_clrex() -#define set_event() do{}while(0) -#define LOCK_SNOOP_SPINS 0x300 +#define wait_for_event() __builtin_arm_clrex() +#define set_event() do{}while(0) +#define LOCK_SNOOP_SPINS 0x300 #endif // __ARM_ENABLE_WFE_ #if LOCK_PRIVATE -#define LOCK_PANIC_TIMEOUT 0xc00000 // 12.5 m ticks = 250ms with 24MHz OSC +#define LOCK_PANIC_TIMEOUT 0xc00000 // 12.5 m ticks ~= 524ms with 24MHz OSC #define PLATFORM_LCK_ILOCK LCK_ILOCK @@ -262,12 +262,12 @@ typedef struct { * Lock state to thread pointer * Clear the bottom bits */ -#define LCK_MTX_STATE_TO_THREAD(s) (thread_t)(s & ~(LCK_ILOCK | ARM_LCK_WAITERS)) +#define LCK_MTX_STATE_TO_THREAD(s) (thread_t)(s & ~(LCK_ILOCK | ARM_LCK_WAITERS)) /* * Thread pointer to lock state * arm thread pointers are aligned such that the bottom two bits are clear */ -#define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t) +#define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t) /* * Thread pointer mask */ @@ -277,55 +277,59 @@ typedef struct { #define preemption_disabled_for_thread(t) (((volatile thread_t)t)->machine.preemption_count > 0) -__unused static void disable_interrupts_noread(void) +__unused static void +disable_interrupts_noread(void) { #if __arm__ __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ #else - __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); // Mask IRQ FIQ + __builtin_arm_wsr64("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF)); // Mask IRQ FIQ #endif } -__unused static inline long get_interrupts(void) +__unused static inline long +get_interrupts(void) { - long state; + long state; #if __arm__ - __asm__ volatile ("mrs %[state], cpsr" :[state] "=r" (state)); // Read cpsr + __asm__ volatile ("mrs %[state], cpsr" :[state] "=r" (state)); // Read cpsr #else - state = __builtin_arm_rsr64("DAIF"); // Read interrupt state + state = __builtin_arm_rsr64("DAIF"); // Read interrupt state #endif return state; } -__unused static inline long disable_interrupts(void) +__unused static inline long +disable_interrupts(void) { - long state; - - state = get_interrupts(); // Get previous state - disable_interrupts_noread(); // Disable + long state; + + state = get_interrupts(); // Get previous state + disable_interrupts_noread(); // Disable return state; } -__unused static inline void restore_interrupts(long state) +__unused static inline void +restore_interrupts(long state) { #if __arm__ __asm__ volatile ("msr cpsr, %[state]" :: [state] "r" (state) : "cc", "memory"); // Restore CPSR #elif __arm64__ - __builtin_arm_wsr64("DAIF", state); // Restore masks + __builtin_arm_wsr64("DAIF", state); // Restore masks #endif } #endif // LOCK_PRIVATE #else -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE typedef struct { - uintptr_t opaque[2]; + uintptr_t opaque[2]; } lck_rw_t; #else -typedef struct __lck_rw_t__ lck_rw_t; +typedef struct __lck_rw_t__ lck_rw_t; #endif #endif -#endif /* _ARM_LOCKS_H_ */ +#endif /* _ARM_LOCKS_H_ */ diff --git a/osfmk/arm/locks_arm.c b/osfmk/arm/locks_arm.c index b43f665db..5b6917ac3 100644 --- a/osfmk/arm/locks_arm.c +++ b/osfmk/arm/locks_arm.c @@ -31,23 +31,23 @@ /* * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie * Mellon University All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright notice * and this permission notice appear in all copies of the software, * derivative works or modified versions, and any portions thereof, and that * both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science Carnegie Mellon University Pittsburgh PA * 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon the * rights to redistribute these changes. */ @@ -65,6 +65,7 @@ #include #include +#include #include #include #include @@ -83,30 +84,21 @@ #include -/* - * We need only enough declarations from the BSD-side to be able to - * test if our probe is active, and to call __dtrace_probe(). Setting - * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in. - */ -#if CONFIG_DTRACE -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> - -#define DTRACE_RW_SHARED 0x0 //reader -#define DTRACE_RW_EXCL 0x1 //writer -#define DTRACE_NO_FLAG 0x0 //not applicable - -#endif /* CONFIG_DTRACE */ +#if CONFIG_DTRACE +#define DTRACE_RW_SHARED 0x0 //reader +#define DTRACE_RW_EXCL 0x1 //writer +#define DTRACE_NO_FLAG 0x0 //not applicable +#endif /* CONFIG_DTRACE */ -#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100 -#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101 -#define LCK_RW_LCK_SHARED_CODE 0x102 -#define LCK_RW_LCK_SH_TO_EX_CODE 0x103 -#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104 -#define LCK_RW_LCK_EX_TO_SH_CODE 0x105 +#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100 +#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101 +#define LCK_RW_LCK_SHARED_CODE 0x102 +#define LCK_RW_LCK_SH_TO_EX_CODE 0x103 +#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104 +#define LCK_RW_LCK_EX_TO_SH_CODE 0x105 -#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) +#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) // Panic in tests that check lock usage correctness // These are undesirable when in a panic or a debugger is runnning. @@ -114,6 +106,23 @@ unsigned int LcksOpts = 0; +#define ADAPTIVE_SPIN_ENABLE 0x1 + +#if __SMP__ +int lck_mtx_adaptive_spin_mode = ADAPTIVE_SPIN_ENABLE; +#else /* __SMP__ */ +int lck_mtx_adaptive_spin_mode = 0; +#endif /* __SMP__ */ + +#define SPINWAIT_OWNER_CHECK_COUNT 4 + +typedef enum { + SPINWAIT_ACQUIRED, /* Got the lock. */ + SPINWAIT_INTERLOCK, /* Got the interlock, no owner, but caller must finish acquiring the lock. */ + SPINWAIT_DID_SPIN, /* Got the interlock, spun, but failed to get the lock. */ + SPINWAIT_DID_NOT_SPIN, /* Got the interlock, did not spin. */ +} spinwait_result_t; + #if CONFIG_DTRACE && __SMP__ extern uint64_t dtrace_spin_threshold; #endif @@ -121,7 +130,7 @@ extern uint64_t dtrace_spin_threshold; /* Forwards */ -#if USLOCK_DEBUG +#if USLOCK_DEBUG /* * Perform simple lock checks. */ @@ -129,7 +138,7 @@ int uslock_check = 1; int max_lock_loops = 100000000; decl_simple_lock_data(extern, printf_lock) decl_simple_lock_data(extern, panic_lock) -#endif /* USLOCK_DEBUG */ +#endif /* USLOCK_DEBUG */ extern unsigned int not_in_kdp; @@ -139,35 +148,35 @@ extern unsigned int not_in_kdp; * is only used for debugging and statistics. */ typedef void *pc_t; -#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS) -#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS) +#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS) +#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS) -#ifdef lint +#ifdef lint /* * Eliminate lint complaints about unused local pc variables. */ -#define OBTAIN_PC(pc,l) ++pc -#else /* lint */ -#define OBTAIN_PC(pc,l) -#endif /* lint */ +#define OBTAIN_PC(pc, l) ++pc +#else /* lint */ +#define OBTAIN_PC(pc, l) +#endif /* lint */ /* * Portable lock package implementation of usimple_locks. */ -#if USLOCK_DEBUG -#define USLDBG(stmt) stmt - void usld_lock_init(usimple_lock_t, unsigned short); - void usld_lock_pre(usimple_lock_t, pc_t); - void usld_lock_post(usimple_lock_t, pc_t); - void usld_unlock(usimple_lock_t, pc_t); - void usld_lock_try_pre(usimple_lock_t, pc_t); - void usld_lock_try_post(usimple_lock_t, pc_t); - int usld_lock_common_checks(usimple_lock_t, const char *); -#else /* USLOCK_DEBUG */ -#define USLDBG(stmt) -#endif /* USLOCK_DEBUG */ +#if USLOCK_DEBUG +#define USLDBG(stmt) stmt +void usld_lock_init(usimple_lock_t, unsigned short); +void usld_lock_pre(usimple_lock_t, pc_t); +void usld_lock_post(usimple_lock_t, pc_t); +void usld_unlock(usimple_lock_t, pc_t); +void usld_lock_try_pre(usimple_lock_t, pc_t); +void usld_lock_try_post(usimple_lock_t, pc_t); +int usld_lock_common_checks(usimple_lock_t, const char *); +#else /* USLOCK_DEBUG */ +#define USLDBG(stmt) +#endif /* USLOCK_DEBUG */ /* * Owner thread pointer when lock held in spin mode @@ -175,39 +184,39 @@ typedef void *pc_t; #define LCK_MTX_SPIN_TAG 0xfffffff0 -#define interlock_lock(lock) hw_lock_bit ((hw_lock_bit_t*)(&(lock)->lck_mtx_data), LCK_ILOCK_BIT) -#define interlock_try(lock) hw_lock_bit_try((hw_lock_bit_t*)(&(lock)->lck_mtx_data), LCK_ILOCK_BIT) -#define interlock_unlock(lock) hw_unlock_bit ((hw_lock_bit_t*)(&(lock)->lck_mtx_data), LCK_ILOCK_BIT) -#define lck_rw_ilk_lock(lock) hw_lock_bit ((hw_lock_bit_t*)(&(lock)->lck_rw_tag), LCK_RW_INTERLOCK_BIT) -#define lck_rw_ilk_unlock(lock) hw_unlock_bit((hw_lock_bit_t*)(&(lock)->lck_rw_tag), LCK_RW_INTERLOCK_BIT) +#define interlock_lock(lock) hw_lock_bit ((hw_lock_bit_t*)(&(lock)->lck_mtx_data), LCK_ILOCK_BIT, LCK_GRP_NULL) +#define interlock_try(lock) hw_lock_bit_try((hw_lock_bit_t*)(&(lock)->lck_mtx_data), LCK_ILOCK_BIT, LCK_GRP_NULL) +#define interlock_unlock(lock) hw_unlock_bit ((hw_lock_bit_t*)(&(lock)->lck_mtx_data), LCK_ILOCK_BIT) +#define lck_rw_ilk_lock(lock) hw_lock_bit ((hw_lock_bit_t*)(&(lock)->lck_rw_tag), LCK_RW_INTERLOCK_BIT, LCK_GRP_NULL) +#define lck_rw_ilk_unlock(lock) hw_unlock_bit((hw_lock_bit_t*)(&(lock)->lck_rw_tag), LCK_RW_INTERLOCK_BIT) -#define memory_barrier() __c11_atomic_thread_fence(memory_order_acq_rel_smp) -#define load_memory_barrier() __c11_atomic_thread_fence(memory_order_acquire_smp) -#define store_memory_barrier() __c11_atomic_thread_fence(memory_order_release_smp) +#define memory_barrier() __c11_atomic_thread_fence(memory_order_acq_rel_smp) +#define load_memory_barrier() __c11_atomic_thread_fence(memory_order_acquire_smp) +#define store_memory_barrier() __c11_atomic_thread_fence(memory_order_release_smp) // Enforce program order of loads and stores. #define ordered_load(target, type) \ - __c11_atomic_load((_Atomic type *)(target), memory_order_relaxed) + __c11_atomic_load((_Atomic type *)(target), memory_order_relaxed) #define ordered_store(target, type, value) \ - __c11_atomic_store((_Atomic type *)(target), value, memory_order_relaxed) + __c11_atomic_store((_Atomic type *)(target), value, memory_order_relaxed) -#define ordered_load_mtx(lock) ordered_load(&(lock)->lck_mtx_data, uintptr_t) -#define ordered_store_mtx(lock, value) ordered_store(&(lock)->lck_mtx_data, uintptr_t, (value)) -#define ordered_load_rw(lock) ordered_load(&(lock)->lck_rw_data, uint32_t) -#define ordered_store_rw(lock, value) ordered_store(&(lock)->lck_rw_data, uint32_t, (value)) -#define ordered_load_rw_owner(lock) ordered_load(&(lock)->lck_rw_owner, thread_t) -#define ordered_store_rw_owner(lock, value) ordered_store(&(lock)->lck_rw_owner, thread_t, (value)) -#define ordered_load_hw(lock) ordered_load(&(lock)->lock_data, uintptr_t) -#define ordered_store_hw(lock, value) ordered_store(&(lock)->lock_data, uintptr_t, (value)) -#define ordered_load_bit(lock) ordered_load((lock), uint32_t) -#define ordered_store_bit(lock, value) ordered_store((lock), uint32_t, (value)) +#define ordered_load_mtx(lock) ordered_load(&(lock)->lck_mtx_data, uintptr_t) +#define ordered_store_mtx(lock, value) ordered_store(&(lock)->lck_mtx_data, uintptr_t, (value)) +#define ordered_load_rw(lock) ordered_load(&(lock)->lck_rw_data, uint32_t) +#define ordered_store_rw(lock, value) ordered_store(&(lock)->lck_rw_data, uint32_t, (value)) +#define ordered_load_rw_owner(lock) ordered_load(&(lock)->lck_rw_owner, thread_t) +#define ordered_store_rw_owner(lock, value) ordered_store(&(lock)->lck_rw_owner, thread_t, (value)) +#define ordered_load_hw(lock) ordered_load(&(lock)->lock_data, uintptr_t) +#define ordered_store_hw(lock, value) ordered_store(&(lock)->lock_data, uintptr_t, (value)) +#define ordered_load_bit(lock) ordered_load((lock), uint32_t) +#define ordered_store_bit(lock, value) ordered_store((lock), uint32_t, (value)) // Prevent the compiler from reordering memory operations around this -#define compiler_memory_fence() __asm__ volatile ("" ::: "memory") +#define compiler_memory_fence() __asm__ volatile ("" ::: "memory") -#define LOCK_PANIC_TIMEOUT 0xc00000 -#define NOINLINE __attribute__((noinline)) +#define LOCK_PANIC_TIMEOUT 0xc00000 +#define NOINLINE __attribute__((noinline)) #if __arm__ @@ -218,8 +227,8 @@ typedef void *pc_t; #if __arm__ -#define enable_fiq() __asm__ volatile ("cpsie f" ::: "memory"); -#define enable_interrupts() __asm__ volatile ("cpsie if" ::: "memory"); +#define enable_fiq() __asm__ volatile ("cpsie f" ::: "memory"); +#define enable_interrupts() __asm__ volatile ("cpsie if" ::: "memory"); #endif /* @@ -247,7 +256,7 @@ static boolean_t lck_rw_grab(lck_rw_t *lock, int mode, boolean_t wait); static uint32_t atomic_exchange_begin32(uint32_t *target, uint32_t *previous, enum memory_order ord) { - uint32_t val; + uint32_t val; val = load_exclusive32(target, ord); *previous = val; @@ -257,7 +266,7 @@ atomic_exchange_begin32(uint32_t *target, uint32_t *previous, enum memory_order static boolean_t atomic_exchange_complete32(uint32_t *target, uint32_t previous, uint32_t newval, enum memory_order ord) { - (void)previous; // Previous not needed, monitor is held + (void)previous; // Previous not needed, monitor is held return store_exclusive32(target, newval, ord); } @@ -270,72 +279,78 @@ atomic_exchange_abort(void) static boolean_t atomic_test_and_set32(uint32_t *target, uint32_t test_mask, uint32_t set_mask, enum memory_order ord, boolean_t wait) { - uint32_t value, prev; + uint32_t value, prev; - for ( ; ; ) { + for (;;) { value = atomic_exchange_begin32(target, &prev, ord); if (value & test_mask) { - if (wait) - wait_for_event(); // Wait with monitor held - else - atomic_exchange_abort(); // Clear exclusive monitor + if (wait) { + wait_for_event(); // Wait with monitor held + } else { + atomic_exchange_abort(); // Clear exclusive monitor + } return FALSE; } value |= set_mask; - if (atomic_exchange_complete32(target, prev, value, ord)) + if (atomic_exchange_complete32(target, prev, value, ord)) { return TRUE; + } } } -void _disable_preemption(void) +void +_disable_preemption(void) { - thread_t thread = current_thread(); - unsigned int count; + thread_t thread = current_thread(); + unsigned int count; count = thread->machine.preemption_count + 1; ordered_store(&thread->machine.preemption_count, unsigned int, count); } -void _enable_preemption(void) +void +_enable_preemption(void) { - thread_t thread = current_thread(); - long state; - unsigned int count; + thread_t thread = current_thread(); + long state; + unsigned int count; #if __arm__ #define INTERRUPT_MASK PSR_IRQF -#else // __arm__ +#else // __arm__ #define INTERRUPT_MASK DAIF_IRQF -#endif // __arm__ +#endif // __arm__ count = thread->machine.preemption_count; - if (count == 0) - panic("Preemption count negative"); // Count will go negative when released + if (count == 0) { + panic("Preemption count negative"); // Count will go negative when released + } count--; - if (count > 0) - goto update_count; // Preemption is still disabled, just update - state = get_interrupts(); // Get interrupt state - if (state & INTERRUPT_MASK) - goto update_count; // Interrupts are already masked, can't take AST here - - disable_interrupts_noread(); // Disable interrupts + if (count > 0) { + goto update_count; // Preemption is still disabled, just update + } + state = get_interrupts(); // Get interrupt state + if (state & INTERRUPT_MASK) { + goto update_count; // Interrupts are already masked, can't take AST here + } + disable_interrupts_noread(); // Disable interrupts ordered_store(&thread->machine.preemption_count, unsigned int, count); if (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) { #if __arm__ #if __ARM_USER_PROTECT__ - uintptr_t up = arm_user_protect_begin(thread); -#endif // __ARM_USER_PROTECT__ + uintptr_t up = arm_user_protect_begin(thread); +#endif // __ARM_USER_PROTECT__ enable_fiq(); -#endif // __arm__ +#endif // __arm__ ast_taken_kernel(); // Handle urgent AST #if __arm__ #if __ARM_USER_PROTECT__ arm_user_protect_end(thread, up, TRUE); -#endif // __ARM_USER_PROTECT__ +#endif // __ARM_USER_PROTECT__ enable_interrupts(); - return; // Return early on arm only due to FIQ enabling -#endif // __arm__ + return; // Return early on arm only due to FIQ enabling +#endif // __arm__ } - restore_interrupts(state); // Enable interrupts + restore_interrupts(state); // Enable interrupts return; update_count: @@ -343,71 +358,73 @@ update_count: return; } -int get_preemption_level(void) +int +get_preemption_level(void) { return current_thread()->machine.preemption_count; } -/* Forward declarations for unexported functions that are used externally */ -void hw_lock_bit(hw_lock_bit_t *lock, unsigned int bit); -void hw_unlock_bit(hw_lock_bit_t *lock, unsigned int bit); - -#if __SMP__ +#if __SMP__ static unsigned int -hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout); +hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)); #endif static inline unsigned int -hw_lock_bit_to_internal(hw_lock_bit_t *lock, unsigned int bit, uint32_t timeout) +hw_lock_bit_to_internal(hw_lock_bit_t *lock, unsigned int bit, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)) { unsigned int success = 0; - uint32_t mask = (1 << bit); -#if !__SMP__ - uint32_t state; + uint32_t mask = (1 << bit); +#if !__SMP__ + uint32_t state; #endif -#if __SMP__ - if (__improbable(!atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE))) - success = hw_lock_bit_to_contended(lock, mask, timeout); - else +#if __SMP__ + if (__improbable(!atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE))) { + success = hw_lock_bit_to_contended(lock, mask, timeout LCK_GRP_ARG(grp)); + } else { success = 1; -#else // __SMP__ + } +#else // __SMP__ (void)timeout; state = ordered_load_bit(lock); if (!(mask & state)) { ordered_store_bit(lock, state | mask); success = 1; } -#endif // __SMP__ +#endif // __SMP__ -#if CONFIG_DTRACE - if (success) - LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, bit); -#endif + if (success) { + lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); + } return success; } -unsigned int -hw_lock_bit_to(hw_lock_bit_t *lock, unsigned int bit, uint32_t timeout) +unsigned +int +(hw_lock_bit_to)(hw_lock_bit_t * lock, unsigned int bit, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)) { _disable_preemption(); - return hw_lock_bit_to_internal(lock, bit, timeout); + return hw_lock_bit_to_internal(lock, bit, timeout LCK_GRP_ARG(grp)); } -#if __SMP__ +#if __SMP__ static unsigned int NOINLINE -hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout) +hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout LCK_GRP_ARG(lck_grp_t *grp)) { - uint64_t end = 0; - int i; -#if CONFIG_DTRACE - uint64_t begin; - boolean_t dtrace_enabled = lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0; - if (__improbable(dtrace_enabled)) + uint64_t end = 0; + int i; +#if CONFIG_DTRACE || LOCK_STATS + uint64_t begin = 0; + boolean_t stat_enabled = lck_grp_spin_spin_enabled(lock LCK_GRP_ARG(grp)); +#endif /* CONFIG_DTRACE || LOCK_STATS */ + +#if LOCK_STATS || CONFIG_DTRACE + if (__improbable(stat_enabled)) { begin = mach_absolute_time(); -#endif - for ( ; ; ) { + } +#endif /* LOCK_STATS || CONFIG_DTRACE */ + for (;;) { for (i = 0; i < LOCK_SNOOP_SPINS; i++) { // Always load-exclusive before wfe // This grabs the monitor and wakes up on a release event @@ -415,30 +432,32 @@ hw_lock_bit_to_contended(hw_lock_bit_t *lock, uint32_t mask, uint32_t timeout) goto end; } } - if (end == 0) + if (end == 0) { end = ml_get_timebase() + timeout; - else if (ml_get_timebase() >= end) + } else if (ml_get_timebase() >= end) { break; + } } return 0; end: -#if CONFIG_DTRACE - if (__improbable(dtrace_enabled)) { - uint64_t spintime = mach_absolute_time() - begin; - if (spintime > dtrace_spin_threshold) - LOCKSTAT_RECORD2(LS_LCK_SPIN_LOCK_SPIN, lock, spintime, mask); +#if CONFIG_DTRACE || LOCK_STATS + if (__improbable(stat_enabled)) { + lck_grp_spin_update_spin(lock LCK_GRP_ARG(grp), mach_absolute_time() - begin); } -#endif + lck_grp_spin_update_miss(lock LCK_GRP_ARG(grp)); +#endif /* CONFIG_DTRACE || LCK_GRP_STAT */ + return 1; } -#endif // __SMP__ +#endif // __SMP__ void -hw_lock_bit(hw_lock_bit_t *lock, unsigned int bit) +(hw_lock_bit)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp)) { - if (hw_lock_bit_to(lock, bit, LOCK_PANIC_TIMEOUT)) + if (hw_lock_bit_to(lock, bit, LOCK_PANIC_TIMEOUT, LCK_GRP_PROBEARG(grp))) { return; -#if __SMP__ + } +#if __SMP__ panic("hw_lock_bit(): timed out (%p)", lock); #else panic("hw_lock_bit(): interlock held (%p)", lock); @@ -446,30 +465,33 @@ hw_lock_bit(hw_lock_bit_t *lock, unsigned int bit) } void -hw_lock_bit_nopreempt(hw_lock_bit_t *lock, unsigned int bit) +(hw_lock_bit_nopreempt)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp)) { - if (__improbable(get_preemption_level() == 0)) + if (__improbable(get_preemption_level() == 0)) { panic("Attempt to take no-preempt bitlock %p in preemptible context", lock); - if (hw_lock_bit_to_internal(lock, bit, LOCK_PANIC_TIMEOUT)) + } + if (hw_lock_bit_to_internal(lock, bit, LOCK_PANIC_TIMEOUT LCK_GRP_ARG(grp))) { return; -#if __SMP__ + } +#if __SMP__ panic("hw_lock_bit_nopreempt(): timed out (%p)", lock); #else panic("hw_lock_bit_nopreempt(): interlock held (%p)", lock); #endif } -unsigned int -hw_lock_bit_try(hw_lock_bit_t *lock, unsigned int bit) +unsigned +int +(hw_lock_bit_try)(hw_lock_bit_t * lock, unsigned int bit LCK_GRP_ARG(lck_grp_t *grp)) { - uint32_t mask = (1 << bit); -#if !__SMP__ - uint32_t state; + uint32_t mask = (1 << bit); +#if !__SMP__ + uint32_t state; #endif - boolean_t success = FALSE; + boolean_t success = FALSE; _disable_preemption(); -#if __SMP__ +#if __SMP__ // TODO: consider weak (non-looping) atomic test-and-set success = atomic_test_and_set32(lock, mask, mask, memory_order_acquire, FALSE); #else @@ -478,14 +500,14 @@ hw_lock_bit_try(hw_lock_bit_t *lock, unsigned int bit) ordered_store_bit(lock, state | mask); success = TRUE; } -#endif // __SMP__ - if (!success) +#endif // __SMP__ + if (!success) { _enable_preemption(); + } -#if CONFIG_DTRACE - if (success) - LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, bit); -#endif + if (success) { + lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); + } return success; } @@ -493,18 +515,18 @@ hw_lock_bit_try(hw_lock_bit_t *lock, unsigned int bit) static inline void hw_unlock_bit_internal(hw_lock_bit_t *lock, unsigned int bit) { - uint32_t mask = (1 << bit); -#if !__SMP__ - uint32_t state; + uint32_t mask = (1 << bit); +#if !__SMP__ + uint32_t state; #endif -#if __SMP__ +#if __SMP__ __c11_atomic_fetch_and((_Atomic uint32_t *)lock, ~mask, memory_order_release); set_event(); -#else // __SMP__ +#else // __SMP__ state = ordered_load_bit(lock); ordered_store_bit(lock, state & ~mask); -#endif // __SMP__ +#endif // __SMP__ #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, bit); #endif @@ -517,34 +539,62 @@ hw_unlock_bit_internal(hw_lock_bit_t *lock, unsigned int bit) * Decrement the preemption level. */ void -hw_unlock_bit(hw_lock_bit_t *lock, unsigned int bit) +hw_unlock_bit(hw_lock_bit_t * lock, unsigned int bit) { hw_unlock_bit_internal(lock, bit); _enable_preemption(); } void -hw_unlock_bit_nopreempt(hw_lock_bit_t *lock, unsigned int bit) +hw_unlock_bit_nopreempt(hw_lock_bit_t * lock, unsigned int bit) { - if (__improbable(get_preemption_level() == 0)) + if (__improbable(get_preemption_level() == 0)) { panic("Attempt to release no-preempt bitlock %p in preemptible context", lock); + } hw_unlock_bit_internal(lock, bit); } +#if __SMP__ +static inline boolean_t +interlock_try_disable_interrupts( + lck_mtx_t *mutex, + boolean_t *istate) +{ + *istate = ml_set_interrupts_enabled(FALSE); + + if (interlock_try(mutex)) { + return 1; + } else { + ml_set_interrupts_enabled(*istate); + return 0; + } +} + +static inline void +interlock_unlock_enable_interrupts( + lck_mtx_t *mutex, + boolean_t istate) +{ + interlock_unlock(mutex); + ml_set_interrupts_enabled(istate); +} +#endif /* __SMP__ */ + /* * Routine: lck_spin_alloc_init */ lck_spin_t * lck_spin_alloc_init( - lck_grp_t * grp, - lck_attr_t * attr) + lck_grp_t * grp, + lck_attr_t * attr) { lck_spin_t *lck; - if ((lck = (lck_spin_t *) kalloc(sizeof(lck_spin_t))) != 0) + if ((lck = (lck_spin_t *) kalloc(sizeof(lck_spin_t))) != 0) { lck_spin_init(lck, grp, attr); + } - return (lck); + return lck; } /* @@ -552,11 +602,11 @@ lck_spin_alloc_init( */ void lck_spin_free( - lck_spin_t * lck, - lck_grp_t * grp) + lck_spin_t * lck, + lck_grp_t * grp) { lck_spin_destroy(lck, grp); - kfree((void *) lck, sizeof(lck_spin_t)); + kfree(lck, sizeof(lck_spin_t)); } /* @@ -564,9 +614,9 @@ lck_spin_free( */ void lck_spin_init( - lck_spin_t * lck, - lck_grp_t * grp, - __unused lck_attr_t * attr) + lck_spin_t * lck, + lck_grp_t * grp, + __unused lck_attr_t * attr) { hw_lock_init(&lck->hwlock); lck->type = LCK_SPIN_TYPE; @@ -593,11 +643,24 @@ arm_usimple_lock_init(simple_lock_t lck, __unused unsigned short initial_value) void lck_spin_lock(lck_spin_t *lock) { -#if DEVELOPMENT || DEBUG - if (lock->type != LCK_SPIN_TYPE) +#if DEVELOPMENT || DEBUG + if (lock->type != LCK_SPIN_TYPE) { + panic("Invalid spinlock %p", lock); + } +#endif // DEVELOPMENT || DEBUG + hw_lock_lock(&lock->hwlock, LCK_GRP_NULL); +} + +void +lck_spin_lock_grp(lck_spin_t *lock, lck_grp_t *grp) +{ +#pragma unused(grp) +#if DEVELOPMENT || DEBUG + if (lock->type != LCK_SPIN_TYPE) { panic("Invalid spinlock %p", lock); -#endif // DEVELOPMENT || DEBUG - hw_lock_lock(&lock->hwlock); + } +#endif // DEVELOPMENT || DEBUG + hw_lock_lock(&lock->hwlock, grp); } /* @@ -606,11 +669,24 @@ lck_spin_lock(lck_spin_t *lock) void lck_spin_lock_nopreempt(lck_spin_t *lock) { -#if DEVELOPMENT || DEBUG - if (lock->type != LCK_SPIN_TYPE) +#if DEVELOPMENT || DEBUG + if (lock->type != LCK_SPIN_TYPE) { + panic("Invalid spinlock %p", lock); + } +#endif // DEVELOPMENT || DEBUG + hw_lock_lock_nopreempt(&lock->hwlock, LCK_GRP_NULL); +} + +void +lck_spin_lock_nopreempt_grp(lck_spin_t *lock, lck_grp_t *grp) +{ +#pragma unused(grp) +#if DEVELOPMENT || DEBUG + if (lock->type != LCK_SPIN_TYPE) { panic("Invalid spinlock %p", lock); -#endif // DEVELOPMENT || DEBUG - hw_lock_lock_nopreempt(&lock->hwlock); + } +#endif // DEVELOPMENT || DEBUG + hw_lock_lock_nopreempt(&lock->hwlock, grp); } /* @@ -619,7 +695,14 @@ lck_spin_lock_nopreempt(lck_spin_t *lock) int lck_spin_try_lock(lck_spin_t *lock) { - return hw_lock_try(&lock->hwlock); + return hw_lock_try(&lock->hwlock, LCK_GRP_NULL); +} + +int +lck_spin_try_lock_grp(lck_spin_t *lock, lck_grp_t *grp) +{ +#pragma unused(grp) + return hw_lock_try(&lock->hwlock, grp); } /* @@ -628,7 +711,14 @@ lck_spin_try_lock(lck_spin_t *lock) int lck_spin_try_lock_nopreempt(lck_spin_t *lock) { - return hw_lock_try_nopreempt(&lock->hwlock); + return hw_lock_try_nopreempt(&lock->hwlock, LCK_GRP_NULL); +} + +int +lck_spin_try_lock_nopreempt_grp(lck_spin_t *lock, lck_grp_t *grp) +{ +#pragma unused(grp) + return hw_lock_try_nopreempt(&lock->hwlock, grp); } /* @@ -637,12 +727,14 @@ lck_spin_try_lock_nopreempt(lck_spin_t *lock) void lck_spin_unlock(lck_spin_t *lock) { -#if DEVELOPMENT || DEBUG - if ((LCK_MTX_STATE_TO_THREAD(lock->lck_spin_data) != current_thread()) && LOCK_CORRECTNESS_PANIC()) +#if DEVELOPMENT || DEBUG + if ((LCK_MTX_STATE_TO_THREAD(lock->lck_spin_data) != current_thread()) && LOCK_CORRECTNESS_PANIC()) { panic("Spinlock not owned by thread %p = %lx", lock, lock->lck_spin_data); - if (lock->type != LCK_SPIN_TYPE) + } + if (lock->type != LCK_SPIN_TYPE) { panic("Invalid spinlock type %p", lock); -#endif // DEVELOPMENT || DEBUG + } +#endif // DEVELOPMENT || DEBUG hw_lock_unlock(&lock->hwlock); } @@ -652,12 +744,14 @@ lck_spin_unlock(lck_spin_t *lock) void lck_spin_unlock_nopreempt(lck_spin_t *lock) { -#if DEVELOPMENT || DEBUG - if ((LCK_MTX_STATE_TO_THREAD(lock->lck_spin_data) != current_thread()) && LOCK_CORRECTNESS_PANIC()) +#if DEVELOPMENT || DEBUG + if ((LCK_MTX_STATE_TO_THREAD(lock->lck_spin_data) != current_thread()) && LOCK_CORRECTNESS_PANIC()) { panic("Spinlock not owned by thread %p = %lx", lock, lock->lck_spin_data); - if (lock->type != LCK_SPIN_TYPE) + } + if (lock->type != LCK_SPIN_TYPE) { panic("Invalid spinlock type %p", lock); -#endif // DEVELOPMENT || DEBUG + } +#endif // DEVELOPMENT || DEBUG hw_lock_unlock_nopreempt(&lock->hwlock); } @@ -666,11 +760,12 @@ lck_spin_unlock_nopreempt(lck_spin_t *lock) */ void lck_spin_destroy( - lck_spin_t * lck, - lck_grp_t * grp) + lck_spin_t * lck, + lck_grp_t * grp) { - if (lck->lck_spin_data == LCK_SPIN_TAG_DESTROYED) + if (lck->lck_spin_data == LCK_SPIN_TAG_DESTROYED) { return; + } lck->lck_spin_data = LCK_SPIN_TAG_DESTROYED; lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN); lck_grp_deallocate(grp); @@ -681,7 +776,8 @@ lck_spin_destroy( * NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ boolean_t -kdp_lck_spin_is_acquired(lck_spin_t *lck) { +kdp_lck_spin_is_acquired(lck_spin_t *lck) +{ if (not_in_kdp) { panic("panic: spinlock acquired check done outside of kernel debugger"); } @@ -695,10 +791,10 @@ kdp_lck_spin_is_acquired(lck_spin_t *lck) { */ void usimple_lock_init( - usimple_lock_t l, - unsigned short tag) + usimple_lock_t l, + unsigned short tag) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK USLDBG(usld_lock_init(l, tag)); hw_lock_init(&l->lck_spin_data); #else @@ -715,22 +811,24 @@ usimple_lock_init( * maintaining preemption state. */ void -usimple_lock( - usimple_lock_t l) +(usimple_lock)( + usimple_lock_t l + LCK_GRP_ARG(lck_grp_t *grp)) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK pc_t pc; OBTAIN_PC(pc, l); USLDBG(usld_lock_pre(l, pc)); - if (!hw_lock_to(&l->lck_spin_data, LockTimeOut)) /* Try to get the lock - * with a timeout */ + if (!hw_lock_to(&l->lck_spin_data, LockTimeOut, LCK_GRP_ARG(grp))) { /* Try to get the lock + * with a timeout */ panic("simple lock deadlock detection - l=%p, cpu=%d, ret=%p", &l, cpu_number(), pc); + } USLDBG(usld_lock_post(l, pc)); #else - simple_lock((simple_lock_t) l); + simple_lock((simple_lock_t) l, LCK_GRP_PROBEARG(grp)); #endif } @@ -745,10 +843,10 @@ extern void sync(void); * maintaining preemption state. */ void -usimple_unlock( - usimple_lock_t l) +(usimple_unlock)( + usimple_lock_t l) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK pc_t pc; OBTAIN_PC(pc, l); @@ -756,7 +854,7 @@ usimple_unlock( sync(); hw_lock_unlock(&l->lck_spin_data); #else - simple_unlock((simple_lock_t) l); + simple_unlock((simple_lock_t)l); #endif } @@ -773,36 +871,38 @@ usimple_unlock( * behavior from the original assembly-language code, but * doesn't it make sense to log misses? XXX */ -unsigned int -usimple_lock_try( - usimple_lock_t l) +unsigned +int +(usimple_lock_try)( + usimple_lock_t l + LCK_GRP_ARG(lck_grp_t *grp)) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK pc_t pc; unsigned int success; OBTAIN_PC(pc, l); USLDBG(usld_lock_try_pre(l, pc)); - if ((success = hw_lock_try(&l->lck_spin_data))) { + if ((success = hw_lock_try(&l->lck_spin_data LCK_GRP_ARG(grp)))) { USLDBG(usld_lock_try_post(l, pc)); } return success; #else - return (simple_lock_try((simple_lock_t) l)); + return simple_lock_try((simple_lock_t) l, grp); #endif } -#if USLOCK_DEBUG +#if USLOCK_DEBUG /* * States of a usimple_lock. The default when initializing * a usimple_lock is setting it up for debug checking. */ -#define USLOCK_CHECKED 0x0001 /* lock is being checked */ -#define USLOCK_TAKEN 0x0002 /* lock has been taken */ -#define USLOCK_INIT 0xBAA0 /* lock has been initialized */ -#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED) -#define USLOCK_CHECKING(l) (uslock_check && \ - ((l)->debug.state & USLOCK_CHECKED)) +#define USLOCK_CHECKED 0x0001 /* lock is being checked */ +#define USLOCK_TAKEN 0x0002 /* lock has been taken */ +#define USLOCK_INIT 0xBAA0 /* lock has been initialized */ +#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED) +#define USLOCK_CHECKING(l) (uslock_check && \ + ((l)->debug.state & USLOCK_CHECKED)) /* * Trace activities of a particularly interesting lock. @@ -816,11 +916,12 @@ void usl_trace(usimple_lock_t, int, pc_t, const char *); */ void usld_lock_init( - usimple_lock_t l, - __unused unsigned short tag) + usimple_lock_t l, + __unused unsigned short tag) { - if (l == USIMPLE_LOCK_NULL) + if (l == USIMPLE_LOCK_NULL) { panic("lock initialization: null lock pointer"); + } l->lock_type = USLOCK_TAG; l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0; l->debug.lock_cpu = l->debug.unlock_cpu = 0; @@ -839,16 +940,19 @@ usld_lock_init( */ int usld_lock_common_checks( - usimple_lock_t l, - const char *caller) + usimple_lock_t l, + const char *caller) { - if (l == USIMPLE_LOCK_NULL) + if (l == USIMPLE_LOCK_NULL) { panic("%s: null lock pointer", caller); - if (l->lock_type != USLOCK_TAG) + } + if (l->lock_type != USLOCK_TAG) { panic("%s: 0x%x is not a usimple lock", caller, (integer_t) l); - if (!(l->debug.state & USLOCK_INIT)) + } + if (!(l->debug.state & USLOCK_INIT)) { panic("%s: 0x%x is not an initialized lock", - caller, (integer_t) l); + caller, (integer_t) l); + } return USLOCK_CHECKING(l); } @@ -860,14 +964,15 @@ usld_lock_common_checks( /* ARGSUSED */ void usld_lock_pre( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { const char *caller = "usimple_lock"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } /* * Note that we have a weird case where we are getting a lock when we are] @@ -880,9 +985,9 @@ usld_lock_pre( if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread && l->debug.lock_thread == (void *) current_thread()) { printf("%s: lock 0x%x already locked (at %p) by", - caller, (integer_t) l, l->debug.lock_pc); + caller, (integer_t) l, l->debug.lock_pc); printf(" current thread %p (new attempt at pc %p)\n", - l->debug.lock_thread, pc); + l->debug.lock_thread, pc); panic("%s", caller); } mp_disable_preemption(); @@ -899,22 +1004,25 @@ usld_lock_pre( */ void usld_lock_post( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { int mycpu; const char *caller = "successful usimple_lock"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } - if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) + if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) { panic("%s: lock 0x%x became uninitialized", - caller, (integer_t) l); - if ((l->debug.state & USLOCK_TAKEN)) + caller, (integer_t) l); + } + if ((l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%x became TAKEN by someone else", - caller, (integer_t) l); + caller, (integer_t) l); + } mycpu = cpu_number(); l->debug.lock_thread = (void *) current_thread(); @@ -936,27 +1044,30 @@ usld_lock_post( */ void usld_unlock( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { int mycpu; const char *caller = "usimple_unlock"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } mycpu = cpu_number(); - if (!(l->debug.state & USLOCK_TAKEN)) + if (!(l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%x hasn't been taken", - caller, (integer_t) l); - if (l->debug.lock_thread != (void *) current_thread()) + caller, (integer_t) l); + } + if (l->debug.lock_thread != (void *) current_thread()) { panic("%s: unlocking lock 0x%x, owned by thread %p", - caller, (integer_t) l, l->debug.lock_thread); + caller, (integer_t) l, l->debug.lock_thread); + } if (l->debug.lock_cpu != mycpu) { printf("%s: unlocking lock 0x%x on cpu 0x%x", - caller, (integer_t) l, mycpu); + caller, (integer_t) l, mycpu); printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu); panic("%s", caller); } @@ -978,13 +1089,14 @@ usld_unlock( */ void usld_lock_try_pre( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { const char *caller = "usimple_lock_try"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } mp_disable_preemption(); usl_trace(l, cpu_number(), pc, caller); mp_enable_preemption(); @@ -1001,21 +1113,24 @@ usld_lock_try_pre( */ void usld_lock_try_post( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { int mycpu; const char *caller = "successful usimple_lock_try"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } - if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) + if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) { panic("%s: lock 0x%x became uninitialized", - caller, (integer_t) l); - if ((l->debug.state & USLOCK_TAKEN)) + caller, (integer_t) l); + } + if ((l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%x became TAKEN by someone else", - caller, (integer_t) l); + caller, (integer_t) l); + } mycpu = cpu_number(); l->debug.lock_thread = (void *) current_thread(); @@ -1038,10 +1153,10 @@ unsigned int lock_seq; void usl_trace( - usimple_lock_t l, - int mycpu, - pc_t pc, - const char *op_name) + usimple_lock_t l, + int mycpu, + pc_t pc, + const char *op_name) { if (traced_lock == l) { XPR(XPR_SLOCK, @@ -1053,7 +1168,7 @@ usl_trace( } -#endif /* USLOCK_DEBUG */ +#endif /* USLOCK_DEBUG */ /* * The C portion of the shared/exclusive locks package. @@ -1063,11 +1178,11 @@ usl_trace( * compute the deadline to spin against when * waiting for a change of state on a lck_rw_t */ -#if __SMP__ +#if __SMP__ static inline uint64_t lck_rw_deadline_for_spin(lck_rw_t *lck) { - lck_rw_word_t word; + lck_rw_word_t word; word.data = ordered_load_rw(lck); if (word.can_sleep) { @@ -1082,46 +1197,52 @@ lck_rw_deadline_for_spin(lck_rw_t *lck) * to be at 0, we'll not bother spinning since the latency for this to happen is * unpredictable... */ - return (mach_absolute_time()); + return mach_absolute_time(); } - return (mach_absolute_time() + MutexSpin); - } else - return (mach_absolute_time() + (100000LL * 1000000000LL)); + return mach_absolute_time() + MutexSpin; + } else { + return mach_absolute_time() + (100000LL * 1000000000LL); + } } -#endif // __SMP__ +#endif // __SMP__ static boolean_t lck_rw_drain_status(lck_rw_t *lock, uint32_t status_mask, boolean_t wait __unused) { -#if __SMP__ - uint64_t deadline = 0; - uint32_t data; +#if __SMP__ + uint64_t deadline = 0; + uint32_t data; - if (wait) + if (wait) { deadline = lck_rw_deadline_for_spin(lock); + } - for ( ; ; ) { + for (;;) { data = load_exclusive32(&lock->lck_rw_data, memory_order_acquire_smp); - if ((data & status_mask) == 0) + if ((data & status_mask) == 0) { break; - if (wait) + } + if (wait) { wait_for_event(); - else + } else { clear_exclusive(); - if (!wait || (mach_absolute_time() >= deadline)) + } + if (!wait || (mach_absolute_time() >= deadline)) { return FALSE; + } } clear_exclusive(); return TRUE; #else - uint32_t data; + uint32_t data; data = ordered_load_rw(lock); - if ((data & status_mask) == 0) + if ((data & status_mask) == 0) { return TRUE; - else + } else { return FALSE; -#endif // __SMP__ + } +#endif // __SMP__ } /* @@ -1131,13 +1252,13 @@ static inline void lck_rw_interlock_spin(lck_rw_t *lock) { #if __SMP__ - uint32_t data; + uint32_t data; - for ( ; ; ) { + for (;;) { data = load_exclusive32(&lock->lck_rw_data, memory_order_relaxed); - if (data & LCK_RW_INTERLOCK) + if (data & LCK_RW_INTERLOCK) { wait_for_event(); - else { + } else { clear_exclusive(); return; } @@ -1155,9 +1276,9 @@ lck_rw_interlock_spin(lck_rw_t *lock) static inline boolean_t lck_interlock_lock(lck_rw_t *lck) { - boolean_t istate; + boolean_t istate; - istate = ml_set_interrupts_enabled(FALSE); + istate = ml_set_interrupts_enabled(FALSE); lck_rw_ilk_lock(lck); return istate; } @@ -1170,24 +1291,25 @@ lck_interlock_unlock(lck_rw_t *lck, boolean_t istate) } -#define LCK_RW_GRAB_WANT 0 -#define LCK_RW_GRAB_SHARED 1 +#define LCK_RW_GRAB_WANT 0 +#define LCK_RW_GRAB_SHARED 1 static boolean_t lck_rw_grab(lck_rw_t *lock, int mode, boolean_t wait) { - uint64_t deadline = 0; - uint32_t data, prev; - boolean_t do_exch; + uint64_t deadline = 0; + uint32_t data, prev; + boolean_t do_exch; #if __SMP__ - if (wait) + if (wait) { deadline = lck_rw_deadline_for_spin(lock); + } #else - wait = FALSE; // Don't spin on UP systems + wait = FALSE; // Don't spin on UP systems #endif - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { atomic_exchange_abort(); @@ -1200,23 +1322,26 @@ lck_rw_grab(lck_rw_t *lock, int mode, boolean_t wait) data |= LCK_RW_WANT_EXCL; do_exch = TRUE; } - } else { // LCK_RW_GRAB_SHARED + } else { // LCK_RW_GRAB_SHARED if (((data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE)) == 0) || - (((data & LCK_RW_SHARED_MASK)) && ((data & LCK_RW_PRIV_EXCL) == 0))) { + (((data & LCK_RW_SHARED_MASK)) && ((data & LCK_RW_PRIV_EXCL) == 0))) { data += LCK_RW_SHARED_READER; do_exch = TRUE; } } if (do_exch) { - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) { return TRUE; + } } else { - if (wait) // Non-waiting + if (wait) { // Non-waiting wait_for_event(); - else + } else { atomic_exchange_abort(); - if (!wait || (mach_absolute_time() >= deadline)) + } + if (!wait || (mach_absolute_time() >= deadline)) { return FALSE; + } } } } @@ -1227,13 +1352,14 @@ lck_rw_grab(lck_rw_t *lock, int mode, boolean_t wait) */ lck_rw_t * lck_rw_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr) + lck_grp_t *grp, + lck_attr_t *attr) { - lck_rw_t *lck; + lck_rw_t *lck; - if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) + if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) { lck_rw_init(lck, grp, attr); + } return lck; } @@ -1243,8 +1369,8 @@ lck_rw_alloc_init( */ void lck_rw_free( - lck_rw_t *lck, - lck_grp_t *grp) + lck_rw_t *lck, + lck_grp_t *grp) { lck_rw_destroy(lck, grp); kfree(lck, sizeof(lck_rw_t)); @@ -1255,16 +1381,18 @@ lck_rw_free( */ void lck_rw_init( - lck_rw_t *lck, - lck_grp_t *grp, - lck_attr_t *attr) + lck_rw_t *lck, + lck_grp_t *grp, + lck_attr_t *attr) { - if (attr == LCK_ATTR_NULL) + if (attr == LCK_ATTR_NULL) { attr = &LockDefaultLckAttr; + } memset(lck, 0, sizeof(lck_rw_t)); lck->lck_rw_can_sleep = TRUE; - if ((attr->lck_attr_val & LCK_ATTR_RW_SHARED_PRIORITY) == 0) + if ((attr->lck_attr_val & LCK_ATTR_RW_SHARED_PRIORITY) == 0) { lck->lck_rw_priv_excl = TRUE; + } lck_grp_reference(grp); lck_grp_lckcnt_incr(grp, LCK_TYPE_RW); @@ -1276,11 +1404,12 @@ lck_rw_init( */ void lck_rw_destroy( - lck_rw_t *lck, - lck_grp_t *grp) + lck_rw_t *lck, + lck_grp_t *grp) { - if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED) + if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED) { return; + } #if MACH_LDEBUG lck_rw_assert(lck, LCK_RW_ASSERT_NOTHELD); #endif @@ -1295,15 +1424,16 @@ lck_rw_destroy( */ void lck_rw_lock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type) + lck_rw_t *lck, + lck_rw_type_t lck_rw_type) { - if (lck_rw_type == LCK_RW_TYPE_SHARED) + if (lck_rw_type == LCK_RW_TYPE_SHARED) { lck_rw_lock_shared(lck); - else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) + } else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) { lck_rw_lock_exclusive(lck); - else + } else { panic("lck_rw_lock(): Invalid RW lock type: %x", lck_rw_type); + } } /* @@ -1312,17 +1442,18 @@ lck_rw_lock( void lck_rw_lock_exclusive(lck_rw_t *lock) { - thread_t thread = current_thread(); + thread_t thread = current_thread(); thread->rwlock_count++; if (atomic_test_and_set32(&lock->lck_rw_data, - (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), - LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) { -#if CONFIG_DTRACE + (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), + LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) { +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); -#endif /* CONFIG_DTRACE */ - } else +#endif /* CONFIG_DTRACE */ + } else { lck_rw_lock_exclusive_gen(lock); + } #if MACH_ASSERT thread_t owner = ordered_load_rw_owner(lock); assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); @@ -1336,10 +1467,10 @@ lck_rw_lock_exclusive(lck_rw_t *lock) void lck_rw_lock_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; current_thread()->rwlock_count++; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK)) { atomic_exchange_abort(); @@ -1347,17 +1478,18 @@ lck_rw_lock_shared(lck_rw_t *lock) break; } data += LCK_RW_SHARED_READER; - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) { break; + } cpu_pause(); } #if MACH_ASSERT thread_t owner = ordered_load_rw_owner(lock); assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); #endif -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, lock, DTRACE_RW_SHARED); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return; } @@ -1367,9 +1499,9 @@ lck_rw_lock_shared(lck_rw_t *lock) boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { atomic_exchange_abort(); @@ -1378,29 +1510,33 @@ lck_rw_lock_shared_to_exclusive(lck_rw_t *lock) } if (data & LCK_RW_WANT_UPGRADE) { data -= LCK_RW_SHARED_READER; - if ((data & LCK_RW_SHARED_MASK) == 0) /* we were the last reader */ - data &= ~(LCK_RW_W_WAITING); /* so clear the wait indicator */ - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) + if ((data & LCK_RW_SHARED_MASK) == 0) { /* we were the last reader */ + data &= ~(LCK_RW_W_WAITING); /* so clear the wait indicator */ + } + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) { return lck_rw_lock_shared_to_exclusive_failure(lock, prev); + } } else { - data |= LCK_RW_WANT_UPGRADE; /* ask for WANT_UPGRADE */ - data -= LCK_RW_SHARED_READER; /* and shed our read count */ - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) + data |= LCK_RW_WANT_UPGRADE; /* ask for WANT_UPGRADE */ + data -= LCK_RW_SHARED_READER; /* and shed our read count */ + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) { break; + } } cpu_pause(); } - /* we now own the WANT_UPGRADE */ - if (data & LCK_RW_SHARED_MASK) /* check to see if all of the readers are drained */ - lck_rw_lock_shared_to_exclusive_success(lock); /* if not, we need to go wait */ + /* we now own the WANT_UPGRADE */ + if (data & LCK_RW_SHARED_MASK) { /* check to see if all of the readers are drained */ + lck_rw_lock_shared_to_exclusive_success(lock); /* if not, we need to go wait */ + } #if MACH_ASSERT thread_t owner = ordered_load_rw_owner(lock); assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); #endif ordered_store_rw_owner(lock, current_thread()); -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, lock, 0); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return TRUE; } @@ -1415,11 +1551,11 @@ lck_rw_lock_shared_to_exclusive(lck_rw_t *lock) */ static boolean_t lck_rw_lock_shared_to_exclusive_failure( - lck_rw_t *lck, - uint32_t prior_lock_state) + lck_rw_t *lck, + uint32_t prior_lock_state) { - thread_t thread = current_thread(); - uint32_t rwlock_count; + thread_t thread = current_thread(); + uint32_t rwlock_count; /* Check if dropping the lock means that we need to unpromote */ rwlock_count = thread->rwlock_count--; @@ -1429,7 +1565,7 @@ lck_rw_lock_shared_to_exclusive_failure( } #endif if ((prior_lock_state & LCK_RW_W_WAITING) && - ((prior_lock_state & LCK_RW_SHARED_MASK) == LCK_RW_SHARED_READER)) { + ((prior_lock_state & LCK_RW_SHARED_MASK) == LCK_RW_SHARED_READER)) { /* * Someone else has requested upgrade. * Since we've released the read lock, wake @@ -1444,9 +1580,9 @@ lck_rw_lock_shared_to_exclusive_failure( } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(lck), lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0); + VM_KERNEL_UNSLIDE_OR_PERM(lck), lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0); - return (FALSE); + return FALSE; } /* @@ -1459,26 +1595,25 @@ lck_rw_lock_shared_to_exclusive_failure( */ static boolean_t lck_rw_lock_shared_to_exclusive_success( - lck_rw_t *lock) -{ - __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lock); - int slept = 0; - lck_rw_word_t word; - wait_result_t res; - boolean_t istate; - boolean_t not_shared; - -#if CONFIG_DTRACE - uint64_t wait_interval = 0; - int readers_at_sleep = 0; - boolean_t dtrace_ls_initialized = FALSE; - boolean_t dtrace_rwl_shared_to_excl_spin, dtrace_rwl_shared_to_excl_block, dtrace_ls_enabled = FALSE; + lck_rw_t *lock) +{ + __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lock); + int slept = 0; + lck_rw_word_t word; + wait_result_t res; + boolean_t istate; + boolean_t not_shared; + +#if CONFIG_DTRACE + uint64_t wait_interval = 0; + int readers_at_sleep = 0; + boolean_t dtrace_ls_initialized = FALSE; + boolean_t dtrace_rwl_shared_to_excl_spin, dtrace_rwl_shared_to_excl_block, dtrace_ls_enabled = FALSE; #endif while (!lck_rw_drain_status(lock, LCK_RW_SHARED_MASK, FALSE)) { - word.data = ordered_load_rw(lock); -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (dtrace_ls_initialized == FALSE) { dtrace_ls_initialized = TRUE; dtrace_rwl_shared_to_excl_spin = (lockstat_probemap[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN] != 0); @@ -1496,15 +1631,16 @@ lck_rw_lock_shared_to_exclusive_success( #endif KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_SPIN_CODE) | DBG_FUNC_START, - trace_lck, word.shared_count, 0, 0, 0); + trace_lck, word.shared_count, 0, 0, 0); not_shared = lck_rw_drain_status(lock, LCK_RW_SHARED_MASK, TRUE); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_SPIN_CODE) | DBG_FUNC_END, - trace_lck, lock->lck_rw_shared_count, 0, 0, 0); + trace_lck, lock->lck_rw_shared_count, 0, 0, 0); - if (not_shared) + if (not_shared) { break; + } /* * if we get here, the spin deadline in lck_rw_wait_on_status() @@ -1512,20 +1648,19 @@ lck_rw_lock_shared_to_exclusive_success( * check to see if we're allowed to do a thread_block */ if (word.can_sleep) { - istate = lck_interlock_lock(lock); - + word.data = ordered_load_rw(lock); if (word.shared_count != 0) { KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_WAIT_CODE) | DBG_FUNC_START, - trace_lck, word.shared_count, 0, 0, 0); + trace_lck, word.shared_count, 0, 0, 0); word.w_waiting = 1; ordered_store_rw(lock, word.data); thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockUpgrade); res = assert_wait(LCK_RW_WRITER_EVENT(lock), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lock, istate); if (res == THREAD_WAITING) { @@ -1533,29 +1668,29 @@ lck_rw_lock_shared_to_exclusive_success( slept++; } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_WAIT_CODE) | DBG_FUNC_END, - trace_lck, res, slept, 0, 0); + trace_lck, res, slept, 0, 0); } else { lck_interlock_unlock(lock, istate); break; } } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * We infer whether we took the sleep/spin path above by checking readers_at_sleep. */ if (dtrace_ls_enabled == TRUE) { if (slept == 0) { - LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lock, mach_absolute_time() - wait_interval, 0); + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lock, mach_absolute_time() - wait_interval, 0); } else { - LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, lock, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, lock, mach_absolute_time() - wait_interval, 1, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, lock, 1); #endif - return (TRUE); + return TRUE; } @@ -1563,32 +1698,36 @@ lck_rw_lock_shared_to_exclusive_success( * Routine: lck_rw_lock_exclusive_to_shared */ -void lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) +void +lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; assertf(lock->lck_rw_owner == current_thread(), "state=0x%x, owner=%p", lock->lck_rw_data, lock->lck_rw_owner); ordered_store_rw_owner(lock, THREAD_NULL); - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_release_smp); if (data & LCK_RW_INTERLOCK) { #if __SMP__ atomic_exchange_abort(); - lck_rw_interlock_spin(lock); /* wait for interlock to clear */ + lck_rw_interlock_spin(lock); /* wait for interlock to clear */ continue; #else panic("lck_rw_lock_exclusive_to_shared(): Interlock locked (%p): %x", lock, data); #endif // __SMP__ } data += LCK_RW_SHARED_READER; - if (data & LCK_RW_WANT_UPGRADE) + if (data & LCK_RW_WANT_UPGRADE) { data &= ~(LCK_RW_WANT_UPGRADE); - else + } else { data &= ~(LCK_RW_WANT_EXCL); - if (!((prev & LCK_RW_W_WAITING) && (prev & LCK_RW_PRIV_EXCL))) + } + if (!((prev & LCK_RW_W_WAITING) && (prev & LCK_RW_PRIV_EXCL))) { data &= ~(LCK_RW_W_WAITING); - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_release_smp)) + } + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_release_smp)) { break; + } cpu_pause(); } return lck_rw_lock_exclusive_to_shared_gen(lock, prev); @@ -1596,7 +1735,7 @@ void lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) /* * Routine: lck_rw_lock_exclusive_to_shared_gen - * Function: + * Function: * Fast path has already dropped * our exclusive state and bumped lck_rw_shared_count * all we need to do here is determine if anyone @@ -1604,11 +1743,11 @@ void lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) */ static void lck_rw_lock_exclusive_to_shared_gen( - lck_rw_t *lck, - uint32_t prior_lock_state) + lck_rw_t *lck, + uint32_t prior_lock_state) { - __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); - lck_rw_word_t fake_lck; + __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); + lck_rw_word_t fake_lck; /* * prior_lock state is a snapshot of the 1st word of the @@ -1619,7 +1758,7 @@ lck_rw_lock_exclusive_to_shared_gen( fake_lck.data = prior_lock_state; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START, - trace_lck, fake_lck->want_excl, fake_lck->want_upgrade, 0, 0); + trace_lck, fake_lck->want_excl, fake_lck->want_upgrade, 0, 0); /* * don't wake up anyone waiting to take the lock exclusively @@ -1629,11 +1768,12 @@ lck_rw_lock_exclusive_to_shared_gen( * wake up any waiting readers if we don't have any writers waiting, * or the lock is NOT marked as rw_priv_excl (writers have privilege) */ - if (!(fake_lck.priv_excl && fake_lck.w_waiting) && fake_lck.r_waiting) + if (!(fake_lck.priv_excl && fake_lck.w_waiting) && fake_lck.r_waiting) { thread_wakeup(LCK_RW_READER_EVENT(lck)); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END, - trace_lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0); + trace_lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0); #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, lck, 0); @@ -1646,15 +1786,16 @@ lck_rw_lock_exclusive_to_shared_gen( */ boolean_t lck_rw_try_lock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type) + lck_rw_t *lck, + lck_rw_type_t lck_rw_type) { - if (lck_rw_type == LCK_RW_TYPE_SHARED) + if (lck_rw_type == LCK_RW_TYPE_SHARED) { return lck_rw_try_lock_shared(lck); - else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) + } else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) { return lck_rw_try_lock_exclusive(lck); - else + } else { panic("lck_rw_try_lock(): Invalid rw lock type: %x", lck_rw_type); + } return FALSE; } @@ -1662,11 +1803,12 @@ lck_rw_try_lock( * Routine: lck_rw_try_lock_shared */ -boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) +boolean_t +lck_rw_try_lock_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { #if __SMP__ @@ -1679,11 +1821,12 @@ boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) } if (data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE)) { atomic_exchange_abort(); - return FALSE; /* lock is busy */ + return FALSE; /* lock is busy */ } - data += LCK_RW_SHARED_READER; /* Increment reader refcount */ - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) + data += LCK_RW_SHARED_READER; /* Increment reader refcount */ + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) { break; + } cpu_pause(); } #if MACH_ASSERT @@ -1691,9 +1834,9 @@ boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); #endif current_thread()->rwlock_count++; -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, lock, DTRACE_RW_SHARED); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return TRUE; } @@ -1702,12 +1845,13 @@ boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) * Routine: lck_rw_try_lock_exclusive */ -boolean_t lck_rw_try_lock_exclusive(lck_rw_t *lock) +boolean_t +lck_rw_try_lock_exclusive(lck_rw_t *lock) { - uint32_t data, prev; - thread_t thread; + uint32_t data, prev; + thread_t thread; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { #if __SMP__ @@ -1723,8 +1867,9 @@ boolean_t lck_rw_try_lock_exclusive(lck_rw_t *lock) return FALSE; } data |= LCK_RW_WANT_EXCL; - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_acquire_smp)) { break; + } cpu_pause(); } thread = current_thread(); @@ -1734,9 +1879,9 @@ boolean_t lck_rw_try_lock_exclusive(lck_rw_t *lock) assertf(owner == THREAD_NULL, "state=0x%x, owner=%p", ordered_load_rw(lock), owner); #endif ordered_store_rw_owner(lock, thread); -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return TRUE; } @@ -1746,15 +1891,16 @@ boolean_t lck_rw_try_lock_exclusive(lck_rw_t *lock) */ void lck_rw_unlock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type) + lck_rw_t *lck, + lck_rw_type_t lck_rw_type) { - if (lck_rw_type == LCK_RW_TYPE_SHARED) + if (lck_rw_type == LCK_RW_TYPE_SHARED) { lck_rw_unlock_shared(lck); - else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) + } else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) { lck_rw_unlock_exclusive(lck); - else + } else { panic("lck_rw_unlock(): Invalid RW lock type: %d", lck_rw_type); + } } @@ -1763,16 +1909,17 @@ lck_rw_unlock( */ void lck_rw_unlock_shared( - lck_rw_t *lck) + lck_rw_t *lck) { - lck_rw_type_t ret; + lck_rw_type_t ret; assertf(lck->lck_rw_owner == THREAD_NULL, "state=0x%x, owner=%p", lck->lck_rw_data, lck->lck_rw_owner); assertf(lck->lck_rw_shared_count > 0, "shared_count=0x%x", lck->lck_rw_shared_count); ret = lck_rw_done(lck); - if (ret != LCK_RW_TYPE_SHARED) + if (ret != LCK_RW_TYPE_SHARED) { panic("lck_rw_unlock_shared(): lock %p held in mode: %d", lck, ret); + } } @@ -1781,15 +1928,16 @@ lck_rw_unlock_shared( */ void lck_rw_unlock_exclusive( - lck_rw_t *lck) + lck_rw_t *lck) { - lck_rw_type_t ret; + lck_rw_type_t ret; assertf(lck->lck_rw_owner == current_thread(), "state=0x%x, owner=%p", lck->lck_rw_data, lck->lck_rw_owner); ret = lck_rw_done(lck); - if (ret != LCK_RW_TYPE_EXCLUSIVE) + if (ret != LCK_RW_TYPE_EXCLUSIVE) { panic("lck_rw_unlock_exclusive(): lock %p held in mode: %d", lck, ret); + } } @@ -1798,19 +1946,19 @@ lck_rw_unlock_exclusive( */ static void lck_rw_lock_exclusive_gen( - lck_rw_t *lock) + lck_rw_t *lock) { - __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lock); - lck_rw_word_t word; - int slept = 0; - boolean_t gotlock = 0; - boolean_t not_shared_or_upgrade = 0; - wait_result_t res = 0; - boolean_t istate; + __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lock); + lck_rw_word_t word; + int slept = 0; + boolean_t gotlock = 0; + boolean_t not_shared_or_upgrade = 0; + wait_result_t res = 0; + boolean_t istate; -#if CONFIG_DTRACE +#if CONFIG_DTRACE boolean_t dtrace_ls_initialized = FALSE; - boolean_t dtrace_rwl_excl_spin, dtrace_rwl_excl_block, dtrace_ls_enabled= FALSE; + boolean_t dtrace_rwl_excl_spin, dtrace_rwl_excl_block, dtrace_ls_enabled = FALSE; uint64_t wait_interval = 0; int readers_at_sleep = 0; #endif @@ -1819,8 +1967,7 @@ lck_rw_lock_exclusive_gen( * Try to acquire the lck_rw_want_excl bit. */ while (!lck_rw_grab(lock, LCK_RW_GRAB_WANT, FALSE)) { - -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (dtrace_ls_initialized == FALSE) { dtrace_ls_initialized = TRUE; dtrace_rwl_excl_spin = (lockstat_probemap[LS_LCK_RW_LOCK_EXCL_SPIN] != 0); @@ -1843,8 +1990,9 @@ lck_rw_lock_exclusive_gen( KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_END, trace_lck, 0, 0, gotlock, 0); - if (gotlock) + if (gotlock) { break; + } /* * if we get here, the deadline has expired w/o us * being able to grab the lock exclusively @@ -1852,12 +2000,10 @@ lck_rw_lock_exclusive_gen( */ word.data = ordered_load_rw(lock); if (word.can_sleep) { - istate = lck_interlock_lock(lock); word.data = ordered_load_rw(lock); if (word.want_excl) { - KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0); word.w_waiting = 1; @@ -1865,7 +2011,7 @@ lck_rw_lock_exclusive_gen( thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockWrite); res = assert_wait(LCK_RW_WRITER_EVENT(lock), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lock, istate); if (res == THREAD_WAITING) { @@ -1885,8 +2031,7 @@ lck_rw_lock_exclusive_gen( * Wait for readers (and upgrades) to finish... */ while (!lck_rw_drain_status(lock, LCK_RW_SHARED_MASK | LCK_RW_WANT_UPGRADE, FALSE)) { - -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * Either sleeping or spinning is happening, start * a timing of our delay interval now. If we set it @@ -1915,8 +2060,9 @@ lck_rw_lock_exclusive_gen( KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_END, trace_lck, 0, 0, not_shared_or_upgrade, 0); - if (not_shared_or_upgrade) + if (not_shared_or_upgrade) { break; + } /* * if we get here, the deadline has expired w/o us * being able to grab the lock exclusively @@ -1924,7 +2070,6 @@ lck_rw_lock_exclusive_gen( */ word.data = ordered_load_rw(lock); if (word.can_sleep) { - istate = lck_interlock_lock(lock); word.data = ordered_load_rw(lock); @@ -1936,7 +2081,7 @@ lck_rw_lock_exclusive_gen( thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockWrite); res = assert_wait(LCK_RW_WRITER_EVENT(lock), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lock, istate); if (res == THREAD_WAITING) { @@ -1956,7 +2101,7 @@ lck_rw_lock_exclusive_gen( } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * Decide what latencies we suffered that are Dtrace events. * If we have set wait_interval, then we either spun or slept. @@ -1968,7 +2113,7 @@ lck_rw_lock_exclusive_gen( */ if (dtrace_ls_enabled == TRUE) { if (slept == 0) { - LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN, lock, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_SPIN, lock, mach_absolute_time() - wait_interval, 1); } else { /* @@ -1977,27 +2122,28 @@ lck_rw_lock_exclusive_gen( * Notice that above we recorded this before we dropped * the interlock so the count is accurate. */ - LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK, lock, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_BLOCK, lock, mach_absolute_time() - wait_interval, 1, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, 1); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ } /* * Routine: lck_rw_done */ -lck_rw_type_t lck_rw_done(lck_rw_t *lock) +lck_rw_type_t +lck_rw_done(lck_rw_t *lock) { - uint32_t data, prev; - boolean_t once = FALSE; + uint32_t data, prev; + boolean_t once = FALSE; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->lck_rw_data, &prev, memory_order_release_smp); - if (data & LCK_RW_INTERLOCK) { /* wait for interlock to clear */ + if (data & LCK_RW_INTERLOCK) { /* wait for interlock to clear */ #if __SMP__ atomic_exchange_abort(); lck_rw_interlock_spin(lock); @@ -2006,19 +2152,21 @@ lck_rw_type_t lck_rw_done(lck_rw_t *lock) panic("lck_rw_done(): Interlock locked (%p): %x", lock, data); #endif // __SMP__ } - if (data & LCK_RW_SHARED_MASK) { /* lock is held shared */ + if (data & LCK_RW_SHARED_MASK) { /* lock is held shared */ assertf(lock->lck_rw_owner == THREAD_NULL, "state=0x%x, owner=%p", lock->lck_rw_data, lock->lck_rw_owner); data -= LCK_RW_SHARED_READER; - if ((data & LCK_RW_SHARED_MASK) == 0) /* if reader count has now gone to 0, check for waiters */ + if ((data & LCK_RW_SHARED_MASK) == 0) { /* if reader count has now gone to 0, check for waiters */ goto check_waiters; - } else { /* if reader count == 0, must be exclusive lock */ + } + } else { /* if reader count == 0, must be exclusive lock */ if (data & LCK_RW_WANT_UPGRADE) { data &= ~(LCK_RW_WANT_UPGRADE); } else { - if (data & LCK_RW_WANT_EXCL) + if (data & LCK_RW_WANT_EXCL) { data &= ~(LCK_RW_WANT_EXCL); - else /* lock is not 'owned', panic */ + } else { /* lock is not 'owned', panic */ panic("Releasing non-exclusive RW lock without a reader refcount!"); + } } if (!once) { // Only check for holder and clear it once @@ -2036,13 +2184,16 @@ check_waiters: */ if (prev & LCK_RW_W_WAITING) { data &= ~(LCK_RW_W_WAITING); - if ((prev & LCK_RW_PRIV_EXCL) == 0) + if ((prev & LCK_RW_PRIV_EXCL) == 0) { data &= ~(LCK_RW_R_WAITING); - } else + } + } else { data &= ~(LCK_RW_R_WAITING); + } } - if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_release_smp)) + if (atomic_exchange_complete32(&lock->lck_rw_data, prev, data, memory_order_release_smp)) { break; + } cpu_pause(); } return lck_rw_done_gen(lock, prev); @@ -2053,13 +2204,13 @@ check_waiters: * * called from the assembly language wrapper... * prior_lock_state is the value in the 1st - * word of the lock at the time of a successful + * word of the lock at the time of a successful * atomic compare and exchange with the new value... - * it represents the state of the lock before we + * it represents the state of the lock before we * decremented the rw_shared_count or cleared either - * rw_want_upgrade or rw_want_write and + * rw_want_upgrade or rw_want_write and * the lck_x_waiting bits... since the wrapper - * routine has already changed the state atomically, + * routine has already changed the state atomically, * we just need to decide if we should * wake up anyone and what value to return... we do * this by examining the state of the lock before @@ -2067,13 +2218,13 @@ check_waiters: */ static lck_rw_type_t lck_rw_done_gen( - lck_rw_t *lck, - uint32_t prior_lock_state) + lck_rw_t *lck, + uint32_t prior_lock_state) { - lck_rw_word_t fake_lck; - lck_rw_type_t lock_type; - thread_t thread; - uint32_t rwlock_count; + lck_rw_word_t fake_lck; + lck_rw_type_t lock_type; + thread_t thread; + uint32_t rwlock_count; /* * prior_lock state is a snapshot of the 1st word of the @@ -2084,23 +2235,27 @@ lck_rw_done_gen( fake_lck.data = prior_lock_state; if (fake_lck.shared_count <= 1) { - if (fake_lck.w_waiting) + if (fake_lck.w_waiting) { thread_wakeup(LCK_RW_WRITER_EVENT(lck)); + } - if (!(fake_lck.priv_excl && fake_lck.w_waiting) && fake_lck.r_waiting) + if (!(fake_lck.priv_excl && fake_lck.w_waiting) && fake_lck.r_waiting) { thread_wakeup(LCK_RW_READER_EVENT(lck)); + } } - if (fake_lck.shared_count) + if (fake_lck.shared_count) { lock_type = LCK_RW_TYPE_SHARED; - else + } else { lock_type = LCK_RW_TYPE_EXCLUSIVE; + } /* Check if dropping the lock means that we need to unpromote */ thread = current_thread(); rwlock_count = thread->rwlock_count--; #if MACH_LDEBUG - if (rwlock_count == 0) + if (rwlock_count == 0) { panic("rw lock count underflow for thread %p", thread); + } #endif if ((rwlock_count == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { /* sched_flags checked without lock, but will be rechecked while clearing */ @@ -2121,25 +2276,24 @@ lck_rw_done_gen( */ static void lck_rw_lock_shared_gen( - lck_rw_t *lck) + lck_rw_t *lck) { - __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); - lck_rw_word_t word; - boolean_t gotlock = 0; - int slept = 0; - wait_result_t res = 0; - boolean_t istate; + __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lck); + lck_rw_word_t word; + boolean_t gotlock = 0; + int slept = 0; + wait_result_t res = 0; + boolean_t istate; -#if CONFIG_DTRACE +#if CONFIG_DTRACE uint64_t wait_interval = 0; int readers_at_sleep = 0; boolean_t dtrace_ls_initialized = FALSE; boolean_t dtrace_rwl_shared_spin, dtrace_rwl_shared_block, dtrace_ls_enabled = FALSE; #endif /* CONFIG_DTRACE */ - while ( !lck_rw_grab(lck, LCK_RW_GRAB_SHARED, FALSE)) { - -#if CONFIG_DTRACE + while (!lck_rw_grab(lck, LCK_RW_GRAB_SHARED, FALSE)) { +#if CONFIG_DTRACE if (dtrace_ls_initialized == FALSE) { dtrace_ls_initialized = TRUE; dtrace_rwl_shared_spin = (lockstat_probemap[LS_LCK_RW_LOCK_SHARED_SPIN] != 0); @@ -2157,37 +2311,36 @@ lck_rw_lock_shared_gen( #endif KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_SPIN_CODE) | DBG_FUNC_START, - trace_lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, 0, 0); + trace_lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, 0, 0); gotlock = lck_rw_grab(lck, LCK_RW_GRAB_SHARED, TRUE); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_SPIN_CODE) | DBG_FUNC_END, - trace_lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, gotlock, 0); + trace_lck, lck->lck_rw_want_excl, lck->lck_rw_want_upgrade, gotlock, 0); - if (gotlock) + if (gotlock) { break; + } /* * if we get here, the deadline has expired w/o us * being able to grab the lock for read * check to see if we're allowed to do a thread_block */ if (lck->lck_rw_can_sleep) { - istate = lck_interlock_lock(lck); word.data = ordered_load_rw(lck); if ((word.want_excl || word.want_upgrade) && ((word.shared_count == 0) || word.priv_excl)) { - KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_WAIT_CODE) | DBG_FUNC_START, - trace_lck, word.want_excl, word.want_upgrade, 0, 0); + trace_lck, word.want_excl, word.want_upgrade, 0, 0); word.r_waiting = 1; ordered_store_rw(lck, word.data); thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockRead); res = assert_wait(LCK_RW_READER_EVENT(lck), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lck, istate); if (res == THREAD_WAITING) { @@ -2195,7 +2348,7 @@ lck_rw_lock_shared_gen( slept++; } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_WAIT_CODE) | DBG_FUNC_END, - trace_lck, res, slept, 0, 0); + trace_lck, res, slept, 0, 0); } else { word.shared_count++; ordered_store_rw(lck, word.data); @@ -2205,25 +2358,25 @@ lck_rw_lock_shared_gen( } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (dtrace_ls_enabled == TRUE) { if (slept == 0) { - LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN, lck, mach_absolute_time() - wait_interval, 0); + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_SPIN, lck, mach_absolute_time() - wait_interval, 0); } else { - LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK, lck, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_BLOCK, lck, mach_absolute_time() - wait_interval, 0, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, lck, 0); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ } void lck_rw_assert( - lck_rw_t *lck, - unsigned int type) + lck_rw_t *lck, + unsigned int type) { switch (type) { case LCK_RW_ASSERT_SHARED: @@ -2234,22 +2387,23 @@ lck_rw_assert( break; case LCK_RW_ASSERT_EXCLUSIVE: if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) && - (lck->lck_rw_shared_count == 0) && + (lck->lck_rw_shared_count == 0) && (lck->lck_rw_owner == current_thread())) { return; } break; case LCK_RW_ASSERT_HELD: - if (lck->lck_rw_shared_count != 0) - return; // Held shared + if (lck->lck_rw_shared_count != 0) { + return; // Held shared + } if ((lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) && (lck->lck_rw_owner == current_thread())) { - return; // Held exclusive + return; // Held exclusive } break; case LCK_RW_ASSERT_NOTHELD: if ((lck->lck_rw_shared_count == 0) && - !(lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) && + !(lck->lck_rw_want_excl || lck->lck_rw_want_upgrade) && (lck->lck_rw_owner == THREAD_NULL)) { return; } @@ -2266,7 +2420,8 @@ lck_rw_assert( * NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ boolean_t -kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) { +kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) +{ if (not_in_kdp) { panic("panic: rw lock exclusive check done outside of kernel debugger"); } @@ -2282,26 +2437,27 @@ kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) { * Forward declaration */ -void +void lck_mtx_ext_init( - lck_mtx_ext_t * lck, - lck_grp_t * grp, - lck_attr_t * attr); + lck_mtx_ext_t * lck, + lck_grp_t * grp, + lck_attr_t * attr); /* * Routine: lck_mtx_alloc_init */ lck_mtx_t * lck_mtx_alloc_init( - lck_grp_t * grp, - lck_attr_t * attr) + lck_grp_t * grp, + lck_attr_t * attr) { lck_mtx_t *lck; - if ((lck = (lck_mtx_t *) kalloc(sizeof(lck_mtx_t))) != 0) + if ((lck = (lck_mtx_t *) kalloc(sizeof(lck_mtx_t))) != 0) { lck_mtx_init(lck, grp, attr); + } - return (lck); + return lck; } /* @@ -2309,11 +2465,11 @@ lck_mtx_alloc_init( */ void lck_mtx_free( - lck_mtx_t * lck, - lck_grp_t * grp) + lck_mtx_t * lck, + lck_grp_t * grp) { lck_mtx_destroy(lck, grp); - kfree((void *) lck, sizeof(lck_mtx_t)); + kfree(lck, sizeof(lck_mtx_t)); } /* @@ -2321,21 +2477,22 @@ lck_mtx_free( */ void lck_mtx_init( - lck_mtx_t * lck, - lck_grp_t * grp, - lck_attr_t * attr) + lck_mtx_t * lck, + lck_grp_t * grp, + lck_attr_t * attr) { -#ifdef BER_XXX +#ifdef BER_XXX lck_mtx_ext_t *lck_ext; #endif lck_attr_t *lck_attr; - if (attr != LCK_ATTR_NULL) + if (attr != LCK_ATTR_NULL) { lck_attr = attr; - else + } else { lck_attr = &LockDefaultLckAttr; + } -#ifdef BER_XXX +#ifdef BER_XXX if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) { if ((lck_ext = (lck_mtx_ext_t *) kalloc(sizeof(lck_mtx_ext_t))) != 0) { lck_mtx_ext_init(lck_ext, grp, lck_attr); @@ -2346,7 +2503,7 @@ lck_mtx_init( } else #endif { - lck->lck_mtx_ptr = NULL; // Clear any padding in the union fields below + lck->lck_mtx_ptr = NULL; // Clear any padding in the union fields below lck->lck_mtx_waiters = 0; lck->lck_mtx_pri = 0; lck->lck_mtx_type = LCK_MTX_TYPE; @@ -2361,17 +2518,18 @@ lck_mtx_init( */ void lck_mtx_init_ext( - lck_mtx_t * lck, - lck_mtx_ext_t * lck_ext, - lck_grp_t * grp, - lck_attr_t * attr) + lck_mtx_t * lck, + lck_mtx_ext_t * lck_ext, + lck_grp_t * grp, + lck_attr_t * attr) { lck_attr_t *lck_attr; - if (attr != LCK_ATTR_NULL) + if (attr != LCK_ATTR_NULL) { lck_attr = attr; - else + } else { lck_attr = &LockDefaultLckAttr; + } if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) { lck_mtx_ext_init(lck_ext, grp, lck_attr); @@ -2393,9 +2551,9 @@ lck_mtx_init_ext( */ void lck_mtx_ext_init( - lck_mtx_ext_t * lck, - lck_grp_t * grp, - lck_attr_t * attr) + lck_mtx_ext_t * lck, + lck_grp_t * grp, + lck_attr_t * attr) { bzero((void *) lck, sizeof(lck_mtx_ext_t)); @@ -2407,8 +2565,9 @@ lck_mtx_ext_init( } lck->lck_mtx_grp = grp; - if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT) + if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT) { lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT; + } } /* The slow versions */ @@ -2416,6 +2575,9 @@ static void lck_mtx_lock_contended(lck_mtx_t *lock, thread_t thread, boolean_t i static boolean_t lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread); static void lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t interlocked); +/* The adaptive spin function */ +static spinwait_result_t lck_mtx_lock_contended_spinwait_arm(lck_mtx_t *lock, thread_t thread, boolean_t interlocked); + /* * Routine: lck_mtx_verify * @@ -2424,12 +2586,14 @@ static void lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t static inline void lck_mtx_verify(lck_mtx_t *lock) { - if (lock->lck_mtx_type != LCK_MTX_TYPE) + if (lock->lck_mtx_type != LCK_MTX_TYPE) { panic("Invalid mutex %p", lock); -#if DEVELOPMENT || DEBUG - if (lock->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) + } +#if DEVELOPMENT || DEBUG + if (lock->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) { panic("Mutex destroyed %p", lock); -#endif /* DEVELOPMENT || DEBUG */ + } +#endif /* DEVELOPMENT || DEBUG */ } /* @@ -2441,11 +2605,12 @@ lck_mtx_verify(lck_mtx_t *lock) static inline void lck_mtx_check_preemption(lck_mtx_t *lock) { -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG int pl = get_preemption_level(); - if (pl != 0) + if (pl != 0) { panic("Attempt to take mutex with preemption disabled. Lock=%p, level=%d", lock, pl); + } #else (void)lock; #endif @@ -2457,14 +2622,14 @@ lck_mtx_check_preemption(lck_mtx_t *lock) void lck_mtx_lock(lck_mtx_t *lock) { - thread_t thread; + thread_t thread; lck_mtx_verify(lock); lck_mtx_check_preemption(lock); thread = current_thread(); if (atomic_compare_exchange(&lock->lck_mtx_data, 0, LCK_MTX_THREAD_TO_STATE(thread), - memory_order_acquire_smp, FALSE)) { -#if CONFIG_DTRACE + memory_order_acquire_smp, FALSE)) { +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lock, 0); #endif /* CONFIG_DTRACE */ return; @@ -2473,79 +2638,254 @@ lck_mtx_lock(lck_mtx_t *lock) } /* - This is the slow version of mutex locking. + * This is the slow version of mutex locking. */ static void NOINLINE lck_mtx_lock_contended(lck_mtx_t *lock, thread_t thread, boolean_t interlocked) { - thread_t holding_thread; - uintptr_t state; - int waiters; - - if (interlocked) - goto interlock_held; - - /* TODO: short-duration spin for on-core contention */ + thread_t holding_thread; + uintptr_t state; + int waiters = 0; + spinwait_result_t sw_res; /* Loop waiting until I see that the mutex is unowned */ - for ( ; ; ) { - interlock_lock(lock); -interlock_held: + for (;;) { + sw_res = lck_mtx_lock_contended_spinwait_arm(lock, thread, interlocked); + interlocked = FALSE; + + switch (sw_res) { + case SPINWAIT_ACQUIRED: + goto done; + case SPINWAIT_INTERLOCK: + goto set_owner; + default: + break; + } + state = ordered_load_mtx(lock); holding_thread = LCK_MTX_STATE_TO_THREAD(state); - if (holding_thread == NULL) + if (holding_thread == NULL) { break; + } ordered_store_mtx(lock, (state | LCK_ILOCK | ARM_LCK_WAITERS)); // Set waiters bit and wait lck_mtx_lock_wait(lock, holding_thread); /* returns interlock unlocked */ } +set_owner: /* Hooray, I'm the new owner! */ - waiters = lck_mtx_lock_acquire(lock); + state = ordered_load_mtx(lock); + + if (state & ARM_LCK_WAITERS) { + /* Skip lck_mtx_lock_acquire if there are no waiters. */ + waiters = lck_mtx_lock_acquire(lock); + } + state = LCK_MTX_THREAD_TO_STATE(thread); - if (waiters != 0) + if (waiters != 0) { state |= ARM_LCK_WAITERS; + } #if __SMP__ - state |= LCK_ILOCK; // Preserve interlock - ordered_store_mtx(lock, state); // Set ownership - interlock_unlock(lock); // Release interlock, enable preemption + state |= LCK_ILOCK; // Preserve interlock + ordered_store_mtx(lock, state); // Set ownership + interlock_unlock(lock); // Release interlock, enable preemption #else - ordered_store_mtx(lock, state); // Set ownership + ordered_store_mtx(lock, state); // Set ownership enable_preemption(); #endif + +done: load_memory_barrier(); -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lock, 0); #endif /* CONFIG_DTRACE */ } +/* + * Routine: lck_mtx_lock_spinwait_arm + * + * Invoked trying to acquire a mutex when there is contention but + * the holder is running on another processor. We spin for up to a maximum + * time waiting for the lock to be released. + */ +static spinwait_result_t +lck_mtx_lock_contended_spinwait_arm(lck_mtx_t *lock, thread_t thread, boolean_t interlocked) +{ + int has_interlock = (int)interlocked; +#if __SMP__ + __kdebug_only uintptr_t trace_lck = VM_KERNEL_UNSLIDE_OR_PERM(lock); + thread_t holder; + uint64_t overall_deadline; + uint64_t check_owner_deadline; + uint64_t cur_time; + spinwait_result_t retval = SPINWAIT_DID_SPIN; + int loopcount = 0; + uintptr_t state; + boolean_t istate; + + if (__improbable(!(lck_mtx_adaptive_spin_mode & ADAPTIVE_SPIN_ENABLE))) { + if (!has_interlock) { + interlock_lock(lock); + } + + return SPINWAIT_DID_NOT_SPIN; + } + + state = ordered_load_mtx(lock); + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_START, + trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(LCK_MTX_STATE_TO_THREAD(state)), lock->lck_mtx_waiters, 0, 0); + + cur_time = mach_absolute_time(); + overall_deadline = cur_time + MutexSpin; + check_owner_deadline = cur_time; + + if (has_interlock) { + istate = ml_get_interrupts_enabled(); + } + + /* Snoop the lock state */ + state = ordered_load_mtx(lock); + + /* + * Spin while: + * - mutex is locked, and + * - it's locked as a spin lock, and + * - owner is running on another processor, and + * - owner (processor) is not idling, and + * - we haven't spun for long enough. + */ + do { + if (!(state & LCK_ILOCK) || has_interlock) { + if (!has_interlock) { + has_interlock = interlock_try_disable_interrupts(lock, &istate); + } + + if (has_interlock) { + state = ordered_load_mtx(lock); + holder = LCK_MTX_STATE_TO_THREAD(state); + + if (holder == NULL) { + retval = SPINWAIT_INTERLOCK; + + if (istate) { + ml_set_interrupts_enabled(istate); + } + + break; + } + + if (!(holder->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU) || + (holder->state & TH_IDLE)) { + if (loopcount == 0) { + retval = SPINWAIT_DID_NOT_SPIN; + } + + if (istate) { + ml_set_interrupts_enabled(istate); + } + + break; + } + + interlock_unlock_enable_interrupts(lock, istate); + has_interlock = 0; + } + } + + cur_time = mach_absolute_time(); + + if (cur_time >= overall_deadline) { + break; + } + + check_owner_deadline = cur_time + (MutexSpin / SPINWAIT_OWNER_CHECK_COUNT); + + if (cur_time < check_owner_deadline) { + machine_delay_until(check_owner_deadline - cur_time, check_owner_deadline); + } + + /* Snoop the lock state */ + state = ordered_load_mtx(lock); + + if (state == 0) { + /* Try to grab the lock. */ + if (os_atomic_cmpxchg(&lock->lck_mtx_data, + 0, LCK_MTX_THREAD_TO_STATE(thread), acquire)) { + retval = SPINWAIT_ACQUIRED; + break; + } + } + + loopcount++; + } while (TRUE); + +#if CONFIG_DTRACE + /* + * We've already kept a count via overall_deadline of how long we spun. + * If dtrace is active, then we compute backwards to decide how + * long we spun. + * + * Note that we record a different probe id depending on whether + * this is a direct or indirect mutex. This allows us to + * penalize only lock groups that have debug/stats enabled + * with dtrace processing if desired. + */ + if (__probable(lock->lck_mtx_tag != LCK_MTX_TAG_INDIRECT)) { + LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, lock, + mach_absolute_time() - (overall_deadline - MutexSpin)); + } else { + LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, lock, + mach_absolute_time() - (overall_deadline - MutexSpin)); + } + /* The lockstat acquire event is recorded by the caller. */ +#endif + + state = ordered_load_mtx(lock); + + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_END, + trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(LCK_MTX_STATE_TO_THREAD(state)), lock->lck_mtx_waiters, retval, 0); +#else /* __SMP__ */ + /* Spinwaiting is not useful on UP systems. */ +#pragma unused(lock, thread) + int retval = SPINWAIT_DID_NOT_SPIN; +#endif /* __SMP__ */ + if ((!has_interlock) && (retval != SPINWAIT_ACQUIRED)) { + /* We must own either the lock or the interlock on return. */ + interlock_lock(lock); + } + + return retval; +} + /* * Common code for mutex locking as spinlock */ static inline void lck_mtx_lock_spin_internal(lck_mtx_t *lock, boolean_t allow_held_as_mutex) { - uintptr_t state; + uintptr_t state; interlock_lock(lock); state = ordered_load_mtx(lock); if (LCK_MTX_STATE_TO_THREAD(state)) { - if (allow_held_as_mutex) + if (allow_held_as_mutex) { lck_mtx_lock_contended(lock, current_thread(), TRUE); - else + } else { // "Always" variants can never block. If the lock is held and blocking is not allowed // then someone is mixing always and non-always calls on the same lock, which is // forbidden. panic("Attempting to block on a lock taken as spin-always %p", lock); + } return; } - state &= ARM_LCK_WAITERS; // Preserve waiters bit - state |= (LCK_MTX_SPIN_TAG | LCK_ILOCK); // Add spin tag and maintain interlock + state &= ARM_LCK_WAITERS; // Preserve waiters bit + state |= (LCK_MTX_SPIN_TAG | LCK_ILOCK); // Add spin tag and maintain interlock ordered_store_mtx(lock, state); load_memory_barrier(); -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0); #endif /* CONFIG_DTRACE */ } @@ -2575,12 +2915,12 @@ lck_mtx_lock_spin_always(lck_mtx_t *lock) boolean_t lck_mtx_try_lock(lck_mtx_t *lock) { - thread_t thread = current_thread(); + thread_t thread = current_thread(); lck_mtx_verify(lock); if (atomic_compare_exchange(&lock->lck_mtx_data, 0, LCK_MTX_THREAD_TO_STATE(thread), - memory_order_acquire_smp, FALSE)) { -#if CONFIG_DTRACE + memory_order_acquire_smp, FALSE)) { +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, lock, 0); #endif /* CONFIG_DTRACE */ return TRUE; @@ -2591,11 +2931,11 @@ lck_mtx_try_lock(lck_mtx_t *lock) static boolean_t NOINLINE lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread) { - thread_t holding_thread; - uintptr_t state; - int waiters; + thread_t holding_thread; + uintptr_t state; + int waiters; -#if __SMP__ +#if __SMP__ interlock_lock(lock); state = ordered_load_mtx(lock); holding_thread = LCK_MTX_STATE_TO_THREAD(state); @@ -2606,8 +2946,9 @@ lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread) #else disable_preemption_for_thread(thread); state = ordered_load_mtx(lock); - if (state & LCK_ILOCK) + if (state & LCK_ILOCK) { panic("Unexpected interlock set (%p)", lock); + } holding_thread = LCK_MTX_STATE_TO_THREAD(state); if (holding_thread) { enable_preemption(); @@ -2615,17 +2956,18 @@ lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread) } state |= LCK_ILOCK; ordered_store_mtx(lock, state); -#endif // __SMP__ +#endif // __SMP__ waiters = lck_mtx_lock_acquire(lock); state = LCK_MTX_THREAD_TO_STATE(thread); - if (waiters != 0) + if (waiters != 0) { state |= ARM_LCK_WAITERS; + } #if __SMP__ - state |= LCK_ILOCK; // Preserve interlock - ordered_store_mtx(lock, state); // Set ownership - interlock_unlock(lock); // Release interlock, enable preemption + state |= LCK_ILOCK; // Preserve interlock + ordered_store_mtx(lock, state); // Set ownership + interlock_unlock(lock); // Release interlock, enable preemption #else - ordered_store_mtx(lock, state); // Set ownership + ordered_store_mtx(lock, state); // Set ownership enable_preemption(); #endif load_memory_barrier(); @@ -2635,28 +2977,30 @@ lck_mtx_try_lock_contended(lck_mtx_t *lock, thread_t thread) static inline boolean_t lck_mtx_try_lock_spin_internal(lck_mtx_t *lock, boolean_t allow_held_as_mutex) { - uintptr_t state; + uintptr_t state; - if (!interlock_try(lock)) + if (!interlock_try(lock)) { return FALSE; + } state = ordered_load_mtx(lock); - if(LCK_MTX_STATE_TO_THREAD(state)) { + if (LCK_MTX_STATE_TO_THREAD(state)) { // Lock is held as mutex - if (allow_held_as_mutex) + if (allow_held_as_mutex) { interlock_unlock(lock); - else + } else { // "Always" variants can never block. If the lock is held as a normal mutex // then someone is mixing always and non-always calls on the same lock, which is // forbidden. panic("Spin-mutex held as full mutex %p", lock); + } return FALSE; } - state &= ARM_LCK_WAITERS; // Preserve waiters bit - state |= (LCK_MTX_SPIN_TAG | LCK_ILOCK); // Add spin tag and maintain interlock + state &= ARM_LCK_WAITERS; // Preserve waiters bit + state |= (LCK_MTX_SPIN_TAG | LCK_ILOCK); // Add spin tag and maintain interlock ordered_store_mtx(lock, state); load_memory_barrier(); -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0); #endif /* CONFIG_DTRACE */ return TRUE; @@ -2688,22 +3032,23 @@ lck_mtx_try_lock_spin_always(lck_mtx_t *lock) void lck_mtx_unlock(lck_mtx_t *lock) { - thread_t thread = current_thread(); - uintptr_t state; - boolean_t ilk_held = FALSE; + thread_t thread = current_thread(); + uintptr_t state; + boolean_t ilk_held = FALSE; lck_mtx_verify(lock); state = ordered_load_mtx(lock); if (state & LCK_ILOCK) { - if(LCK_MTX_STATE_TO_THREAD(state) == (thread_t)LCK_MTX_SPIN_TAG) - ilk_held = TRUE; // Interlock is held by (presumably) this thread + if (LCK_MTX_STATE_TO_THREAD(state) == (thread_t)LCK_MTX_SPIN_TAG) { + ilk_held = TRUE; // Interlock is held by (presumably) this thread + } goto slow_case; } // Locked as a mutex if (atomic_compare_exchange(&lock->lck_mtx_data, LCK_MTX_THREAD_TO_STATE(thread), 0, - memory_order_release_smp, FALSE)) { -#if CONFIG_DTRACE + memory_order_release_smp, FALSE)) { +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lock, 0); #endif /* CONFIG_DTRACE */ return; @@ -2715,23 +3060,26 @@ slow_case: static void NOINLINE lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t ilk_held) { - uintptr_t state; + uintptr_t state; if (ilk_held) { state = ordered_load_mtx(lock); } else { -#if __SMP__ +#if __SMP__ interlock_lock(lock); state = ordered_load_mtx(lock); - if (thread != LCK_MTX_STATE_TO_THREAD(state)) + if (thread != LCK_MTX_STATE_TO_THREAD(state)) { panic("lck_mtx_unlock(): Attempt to release lock not owned by thread (%p)", lock); + } #else disable_preemption_for_thread(thread); state = ordered_load_mtx(lock); - if (state & LCK_ILOCK) + if (state & LCK_ILOCK) { panic("lck_mtx_unlock(): Unexpected interlock set (%p)", lock); - if (thread != LCK_MTX_STATE_TO_THREAD(state)) + } + if (thread != LCK_MTX_STATE_TO_THREAD(state)) { panic("lck_mtx_unlock(): Attempt to release lock not owned by thread (%p)", lock); + } state |= LCK_ILOCK; ordered_store_mtx(lock, state); #endif @@ -2739,7 +3087,7 @@ lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t ilk_held) lck_mtx_unlock_wakeup(lock, thread); state = ordered_load_mtx(lock); } else { - assertf(lock->lck_mtx_pri == 0, "pri=0x%x", lock->lck_mtx_pri); + assertf(lock->lck_mtx_pri == 0, "pri=0x%x", lock->lck_mtx_pri); } } state &= ARM_LCK_WAITERS; /* Clear state, retain waiters bit */ @@ -2752,7 +3100,7 @@ lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t ilk_held) enable_preemption(); #endif -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, lock, 0); #endif /* CONFIG_DTRACE */ } @@ -2763,24 +3111,27 @@ lck_mtx_unlock_contended(lck_mtx_t *lock, thread_t thread, boolean_t ilk_held) void lck_mtx_assert(lck_mtx_t *lock, unsigned int type) { - thread_t thread, holder; - uintptr_t state; + thread_t thread, holder; + uintptr_t state; state = ordered_load_mtx(lock); holder = LCK_MTX_STATE_TO_THREAD(state); if (holder == (thread_t)LCK_MTX_SPIN_TAG) { - // Lock is held in spin mode, owner is unknown. - return; // Punt + // Lock is held in spin mode, owner is unknown. + return; // Punt } thread = current_thread(); if (type == LCK_MTX_ASSERT_OWNED) { - if (thread != holder) + if (thread != holder) { panic("lck_mtx_assert(): mutex (%p) owned", lock); + } } else if (type == LCK_MTX_ASSERT_NOTOWNED) { - if (thread == holder) + if (thread == holder) { panic("lck_mtx_assert(): mutex (%p) not owned", lock); - } else + } + } else { panic("lck_mtx_assert(): invalid arg (%u)", type); + } } /* @@ -2801,27 +3152,30 @@ lck_mtx_ilk_unlock(lck_mtx_t *lock) void lck_mtx_convert_spin(lck_mtx_t *lock) { - thread_t thread = current_thread(); - uintptr_t state; - int waiters; + thread_t thread = current_thread(); + uintptr_t state; + int waiters; state = ordered_load_mtx(lock); - if (LCK_MTX_STATE_TO_THREAD(state) == thread) - return; // Already owned as mutex, return - if ((state & LCK_ILOCK) == 0 || (LCK_MTX_STATE_TO_THREAD(state) != (thread_t)LCK_MTX_SPIN_TAG)) + if (LCK_MTX_STATE_TO_THREAD(state) == thread) { + return; // Already owned as mutex, return + } + if ((state & LCK_ILOCK) == 0 || (LCK_MTX_STATE_TO_THREAD(state) != (thread_t)LCK_MTX_SPIN_TAG)) { panic("lck_mtx_convert_spin: Not held as spinlock (%p)", lock); - state &= ~(LCK_MTX_THREAD_MASK); // Clear the spin tag + } + state &= ~(LCK_MTX_THREAD_MASK); // Clear the spin tag ordered_store_mtx(lock, state); - waiters = lck_mtx_lock_acquire(lock); // Acquire to manage priority boosts + waiters = lck_mtx_lock_acquire(lock); // Acquire to manage priority boosts state = LCK_MTX_THREAD_TO_STATE(thread); - if (waiters != 0) + if (waiters != 0) { state |= ARM_LCK_WAITERS; + } #if __SMP__ state |= LCK_ILOCK; - ordered_store_mtx(lock, state); // Set ownership - interlock_unlock(lock); // Release interlock, enable preemption + ordered_store_mtx(lock, state); // Set ownership + interlock_unlock(lock); // Release interlock, enable preemption #else - ordered_store_mtx(lock, state); // Set ownership + ordered_store_mtx(lock, state); // Set ownership enable_preemption(); #endif } @@ -2832,13 +3186,15 @@ lck_mtx_convert_spin(lck_mtx_t *lock) */ void lck_mtx_destroy( - lck_mtx_t * lck, - lck_grp_t * grp) + lck_mtx_t * lck, + lck_grp_t * grp) { - if (lck->lck_mtx_type != LCK_MTX_TYPE) + if (lck->lck_mtx_type != LCK_MTX_TYPE) { panic("Destroying invalid mutex %p", lck); - if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) + } + if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) { panic("Destroying previously destroyed lock %p", lck); + } lck_mtx_assert(lck, LCK_MTX_ASSERT_NOTOWNED); lck->lck_mtx_tag = LCK_MTX_TAG_DESTROYED; lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX); @@ -2852,39 +3208,46 @@ lck_mtx_destroy( void lck_spin_assert(lck_spin_t *lock, unsigned int type) { - thread_t thread, holder; - uintptr_t state; + thread_t thread, holder; + uintptr_t state; - if (lock->type != LCK_SPIN_TYPE) + if (lock->type != LCK_SPIN_TYPE) { panic("Invalid spinlock %p", lock); + } state = lock->lck_spin_data; holder = (thread_t)(state & ~LCK_ILOCK); thread = current_thread(); if (type == LCK_ASSERT_OWNED) { - if (holder == 0) + if (holder == 0) { panic("Lock not owned %p = %lx", lock, state); - if (holder != thread) + } + if (holder != thread) { panic("Lock not owned by current thread %p = %lx", lock, state); - if ((state & LCK_ILOCK) == 0) + } + if ((state & LCK_ILOCK) == 0) { panic("Lock bit not set %p = %lx", lock, state); + } } else if (type == LCK_ASSERT_NOTOWNED) { if (holder != 0) { - if (holder == thread) + if (holder == thread) { panic("Lock owned by current thread %p = %lx", lock, state); - else + } else { panic("Lock %p owned by thread %p", lock, holder); + } } - if (state & LCK_ILOCK) + if (state & LCK_ILOCK) { panic("Lock bit set %p = %lx", lock, state); - } else + } + } else { panic("lck_spin_assert(): invalid arg (%u)", type); + } } boolean_t lck_rw_lock_yield_shared(lck_rw_t *lck, boolean_t force_yield) { - lck_rw_word_t word; + lck_rw_word_t word; lck_rw_assert(lck, LCK_RW_ASSERT_SHARED); @@ -2906,16 +3269,18 @@ lck_rw_lock_yield_shared(lck_rw_t *lck, boolean_t force_yield) boolean_t kdp_lck_mtx_lock_spin_is_acquired(lck_mtx_t *lck) { - uintptr_t state; + uintptr_t state; if (not_in_kdp) { panic("panic: spinlock acquired check done outside of kernel debugger"); } state = ordered_load_mtx(lck); - if (state == LCK_MTX_TAG_DESTROYED) + if (state == LCK_MTX_TAG_DESTROYED) { return FALSE; - if (LCK_MTX_STATE_TO_THREAD(state) || (state & LCK_ILOCK)) + } + if (LCK_MTX_STATE_TO_THREAD(state) || (state & LCK_ILOCK)) { return TRUE; + } return FALSE; } @@ -2938,18 +3303,18 @@ kdp_lck_mtx_find_owner(__unused struct waitq * waitq, event64_t event, thread_wa void kdp_rwlck_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo) { - lck_rw_t *rwlck = NULL; - switch(waitinfo->wait_type) { - case kThreadWaitKernelRWLockRead: - rwlck = READ_EVENT_TO_RWLOCK(event); - break; - case kThreadWaitKernelRWLockWrite: - case kThreadWaitKernelRWLockUpgrade: - rwlck = WRITE_EVENT_TO_RWLOCK(event); - break; - default: - panic("%s was called with an invalid blocking type", __FUNCTION__); - break; + lck_rw_t *rwlck = NULL; + switch (waitinfo->wait_type) { + case kThreadWaitKernelRWLockRead: + rwlck = READ_EVENT_TO_RWLOCK(event); + break; + case kThreadWaitKernelRWLockWrite: + case kThreadWaitKernelRWLockUpgrade: + rwlck = WRITE_EVENT_TO_RWLOCK(event); + break; + default: + panic("%s was called with an invalid blocking type", __FUNCTION__); + break; } waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(rwlck); waitinfo->owner = thread_tid(rwlck->lck_rw_owner); diff --git a/osfmk/arm/loose_ends.c b/osfmk/arm/loose_ends.c index 35999a7f7..883f8a1ea 100644 --- a/osfmk/arm/loose_ends.c +++ b/osfmk/arm/loose_ends.c @@ -70,8 +70,8 @@ bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes) wimg_bits_dst = pmap_cache_attributes(pn_dst); if (mmu_kvtop_wpreflight(phystokv((pmap_paddr_t) dst)) && - ((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) && - ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) { + ((wimg_bits_src & VM_WIMG_MASK) == VM_WIMG_DEFAULT) && + ((wimg_bits_dst & VM_WIMG_MASK) == VM_WIMG_DEFAULT)) { /* Fast path - dst is writable and both source and destination have default attributes */ bcopy((char *)phystokv((pmap_paddr_t) src), (char *)phystokv((pmap_paddr_t) dst), bytes); return; @@ -80,17 +80,18 @@ bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes) src_offset = src & PAGE_MASK; dst_offset = dst & PAGE_MASK; - if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE) + if ((src_offset + bytes) > PAGE_SIZE || (dst_offset + bytes) > PAGE_SIZE) { panic("bcopy extends beyond copy windows"); + } mp_disable_preemption(); cpu_num = cpu_number(); src_index = pmap_map_cpu_windows_copy(pn_src, VM_PROT_READ, wimg_bits_src); - dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ|VM_PROT_WRITE, wimg_bits_dst); + dst_index = pmap_map_cpu_windows_copy(pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits_dst); - bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index)+src_offset), - (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index)+dst_offset), - bytes); + bcopy((char *)(pmap_cpu_windows_copy_addr(cpu_num, src_index) + src_offset), + (char *)(pmap_cpu_windows_copy_addr(cpu_num, dst_index) + dst_offset), + bytes); pmap_unmap_cpu_windows_copy(src_index); pmap_unmap_cpu_windows_copy(dst_index); @@ -123,8 +124,9 @@ bzero_phys(addr64_t src, vm_size_t bytes) vm_offset_t offset = src & PAGE_MASK; uint32_t count = PAGE_SIZE - offset; - if (count > bytes) + if (count > bytes) { count = bytes; + } unsigned int index = pmap_map_cpu_windows_copy(src >> PAGE_SHIFT, VM_PROT_READ | VM_PROT_WRITE, wimg_bits); @@ -162,18 +164,18 @@ ml_phys_read_data(pmap_paddr_t paddr, int size) copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);; switch (size) { - case 1: - s1 = *(volatile unsigned char *)(copywindow_vaddr); - result = s1; - break; - case 2: - s2 = *(volatile unsigned short *)(copywindow_vaddr); - result = s2; - break; - case 4: - default: - result = *(volatile unsigned int *)(copywindow_vaddr); - break; + case 1: + s1 = *(volatile unsigned char *)(copywindow_vaddr); + result = s1; + break; + case 2: + s2 = *(volatile unsigned short *)(copywindow_vaddr); + result = s2; + break; + case 4: + default: + result = *(volatile unsigned int *)(copywindow_vaddr); + break; } pmap_unmap_cpu_windows_copy(index); @@ -195,7 +197,7 @@ ml_phys_read_long_long(pmap_paddr_t paddr) index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits); result = *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index) - | ((uint32_t)paddr & PAGE_MASK)); + | ((uint32_t)paddr & PAGE_MASK)); pmap_unmap_cpu_windows_copy(index); mp_enable_preemption(); @@ -203,54 +205,64 @@ ml_phys_read_long_long(pmap_paddr_t paddr) return result; } -unsigned int ml_phys_read( vm_offset_t paddr) +unsigned int +ml_phys_read( vm_offset_t paddr) { - return ml_phys_read_data((pmap_paddr_t)paddr, 4); + return ml_phys_read_data((pmap_paddr_t)paddr, 4); } -unsigned int ml_phys_read_word(vm_offset_t paddr) { - - return ml_phys_read_data((pmap_paddr_t)paddr, 4); +unsigned int +ml_phys_read_word(vm_offset_t paddr) +{ + return ml_phys_read_data((pmap_paddr_t)paddr, 4); } -unsigned int ml_phys_read_64(addr64_t paddr64) +unsigned int +ml_phys_read_64(addr64_t paddr64) { - return ml_phys_read_data((pmap_paddr_t)paddr64, 4); + return ml_phys_read_data((pmap_paddr_t)paddr64, 4); } -unsigned int ml_phys_read_word_64(addr64_t paddr64) +unsigned int +ml_phys_read_word_64(addr64_t paddr64) { - return ml_phys_read_data((pmap_paddr_t)paddr64, 4); + return ml_phys_read_data((pmap_paddr_t)paddr64, 4); } -unsigned int ml_phys_read_half(vm_offset_t paddr) +unsigned int +ml_phys_read_half(vm_offset_t paddr) { - return ml_phys_read_data((pmap_paddr_t)paddr, 2); + return ml_phys_read_data((pmap_paddr_t)paddr, 2); } -unsigned int ml_phys_read_half_64(addr64_t paddr64) +unsigned int +ml_phys_read_half_64(addr64_t paddr64) { - return ml_phys_read_data((pmap_paddr_t)paddr64, 2); + return ml_phys_read_data((pmap_paddr_t)paddr64, 2); } -unsigned int ml_phys_read_byte(vm_offset_t paddr) +unsigned int +ml_phys_read_byte(vm_offset_t paddr) { - return ml_phys_read_data((pmap_paddr_t)paddr, 1); + return ml_phys_read_data((pmap_paddr_t)paddr, 1); } -unsigned int ml_phys_read_byte_64(addr64_t paddr64) +unsigned int +ml_phys_read_byte_64(addr64_t paddr64) { - return ml_phys_read_data((pmap_paddr_t)paddr64, 1); + return ml_phys_read_data((pmap_paddr_t)paddr64, 1); } -unsigned long long ml_phys_read_double(vm_offset_t paddr) +unsigned long long +ml_phys_read_double(vm_offset_t paddr) { - return ml_phys_read_long_long((pmap_paddr_t)paddr); + return ml_phys_read_long_long((pmap_paddr_t)paddr); } -unsigned long long ml_phys_read_double_64(addr64_t paddr64) +unsigned long long +ml_phys_read_double_64(addr64_t paddr64) { - return ml_phys_read_long_long((pmap_paddr_t)paddr64); + return ml_phys_read_long_long((pmap_paddr_t)paddr64); } @@ -269,20 +281,20 @@ ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size) mp_disable_preemption(); wimg_bits = pmap_cache_attributes(pn); - index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits); + index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits); copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t) paddr & PAGE_MASK); switch (size) { - case 1: - *(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data; - break; - case 2: - *(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data; - break; - case 4: - default: - *(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data; - break; + case 1: + *(volatile unsigned char *)(copywindow_vaddr) = (unsigned char)data; + break; + case 2: + *(volatile unsigned short *)(copywindow_vaddr) = (unsigned short)data; + break; + case 4: + default: + *(volatile unsigned int *)(copywindow_vaddr) = (uint32_t)data; + break; } pmap_unmap_cpu_windows_copy(index); @@ -298,10 +310,10 @@ ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data) mp_disable_preemption(); wimg_bits = pmap_cache_attributes(pn); - index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits); + index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits); *(volatile unsigned long long *)(pmap_cpu_windows_copy_addr(cpu_number(), index) - | ((uint32_t)paddr & PAGE_MASK)) = data; + | ((uint32_t)paddr & PAGE_MASK)) = data; pmap_unmap_cpu_windows_copy(index); mp_enable_preemption(); @@ -309,54 +321,64 @@ ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data) -void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_byte(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 1); + ml_phys_write_data((pmap_paddr_t)paddr, data, 1); } -void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); } -void ml_phys_write_half(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_half(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 2); + ml_phys_write_data((pmap_paddr_t)paddr, data, 2); } -void ml_phys_write_half_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_half_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); } -void ml_phys_write(vm_offset_t paddr, unsigned int data) +void +ml_phys_write(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr, data, 4); } -void ml_phys_write_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); } -void ml_phys_write_word(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_word(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr, data, 4); } -void ml_phys_write_word_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_word_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); } -void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) +void +ml_phys_write_double(vm_offset_t paddr, unsigned long long data) { - ml_phys_write_long_long((pmap_paddr_t)paddr, data); + ml_phys_write_long_long((pmap_paddr_t)paddr, data); } -void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) +void +ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) { - ml_phys_write_long_long((pmap_paddr_t)paddr64, data); + ml_phys_write_long_long((pmap_paddr_t)paddr64, data); } @@ -395,15 +417,18 @@ ffsbit(int *s) { int offset; - for (offset = 0; !*s; offset += INT_SIZE, ++s); + for (offset = 0; !*s; offset += INT_SIZE, ++s) { + ; + } return offset + __builtin_ctz(*s); } int ffs(unsigned int mask) { - if (mask == 0) + if (mask == 0) { return 0; + } /* * NOTE: cannot use __builtin_ffs because it generates a call to @@ -415,8 +440,9 @@ ffs(unsigned int mask) int ffsll(unsigned long long mask) { - if (mask == 0) + if (mask == 0) { return 0; + } /* * NOTE: cannot use __builtin_ffsll because it generates a call to @@ -431,37 +457,41 @@ ffsll(unsigned long long mask) int fls(unsigned int mask) { - if (mask == 0) + if (mask == 0) { return 0; + } - return (sizeof (mask) << 3) - __builtin_clz(mask); + return (sizeof(mask) << 3) - __builtin_clz(mask); } int flsll(unsigned long long mask) { - if (mask == 0) + if (mask == 0) { return 0; + } - return (sizeof (mask) << 3) - __builtin_clzll(mask); + return (sizeof(mask) << 3) - __builtin_clzll(mask); } -int +int bcmp( - const void *pa, - const void *pb, - size_t len) + const void *pa, + const void *pb, + size_t len) { const char *a = (const char *) pa; const char *b = (const char *) pb; - if (len == 0) + if (len == 0) { return 0; + } - do - if (*a++ != *b++) + do{ + if (*a++ != *b++) { break; - while (--len); + } + } while (--len); return len; } @@ -473,55 +503,61 @@ memcmp(const void *s1, const void *s2, size_t n) const unsigned char *p1 = s1, *p2 = s2; do { - if (*p1++ != *p2++) - return (*--p1 - *--p2); + if (*p1++ != *p2++) { + return *--p1 - *--p2; + } } while (--n != 0); } - return (0); + return 0; } kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) { kern_return_t retval = KERN_SUCCESS; - void *from, *to; - unsigned int from_wimg_bits, to_wimg_bits; + void *from, *to; + unsigned int from_wimg_bits, to_wimg_bits; from = CAST_DOWN(void *, source); to = CAST_DOWN(void *, sink); - if ((which & (cppvPsrc | cppvPsnk)) == 0) /* Make sure that only - * one is virtual */ - panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ - - if (which & cppvPsrc) + if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only + * one is virtual */ + panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ + } + if (which & cppvPsrc) { from = (void *)phystokv((pmap_paddr_t)from); - if (which & cppvPsnk) + } + if (which & cppvPsnk) { to = (void *)phystokv((pmap_paddr_t)to); + } - if ((which & (cppvPsrc | cppvKmap)) == 0) /* Source is virtual in - * current map */ + if ((which & (cppvPsrc | cppvKmap)) == 0) { /* Source is virtual in + * current map */ retval = copyin((user_addr_t) from, to, size); - else if ((which & (cppvPsnk | cppvKmap)) == 0) /* Sink is virtual in - * current map */ + } else if ((which & (cppvPsnk | cppvKmap)) == 0) { /* Sink is virtual in + * current map */ retval = copyout(from, (user_addr_t) to, size); - else /* both addresses are physical or kernel map */ + } else { /* both addresses are physical or kernel map */ bcopy(from, to, size); + } if (which & cppvFsrc) { flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc)); } else if (which & cppvPsrc) { from_wimg_bits = pmap_cache_attributes(source >> PAGE_SHIFT); - if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU)) + if ((from_wimg_bits != VM_WIMG_COPYBACK) && (from_wimg_bits != VM_WIMG_WTHRU)) { flush_dcache64(source, size, TRUE); + } } if (which & cppvFsnk) { flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk)); - } else if (which & cppvPsnk) { + } else if (which & cppvPsnk) { to_wimg_bits = pmap_cache_attributes(sink >> PAGE_SHIFT); - if (to_wimg_bits != VM_WIMG_COPYBACK) + if (to_wimg_bits != VM_WIMG_COPYBACK) { flush_dcache64(sink, size, TRUE); + } } return retval; } @@ -552,38 +588,41 @@ copy_validate(const user_addr_t user_addr, if (__improbable(kernel_addr < VM_MIN_KERNEL_ADDRESS || kernel_addr > VM_MAX_KERNEL_ADDRESS || kernel_addr_last < kernel_addr || - kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) + kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) { panic("%s(%p, %p, %u) - kaddr not in kernel", __func__, (void *)user_addr, (void *)kernel_addr, nbytes); + } user_addr_t user_addr_last = user_addr + nbytes; if (__improbable((user_addr_last < user_addr) || ((user_addr + nbytes) > vm_map_max(current_thread()->map)) || - (user_addr < vm_map_min(current_thread()->map)))) - return (EFAULT); + (user_addr < vm_map_min(current_thread()->map)))) { + return EFAULT; + } - if (__improbable(nbytes > copysize_limit_panic)) + if (__improbable(nbytes > copysize_limit_panic)) { panic("%s(%p, %p, %u) - transfer too large", __func__, (void *)user_addr, (void *)kernel_addr, nbytes); + } - return (0); + return 0; } int copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes) { - return (copy_validate(ua, ka, nbytes)); + return copy_validate(ua, ka, nbytes); } int copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes) { - return (copy_validate(ua, ka, nbytes)); + return copy_validate(ua, ka, nbytes); } #if MACH_ASSERT -extern int copyinframe(vm_address_t fp, char *frame); +extern int copyinframe(vm_address_t fp, char *frame); /* * Machine-dependent routine to fill in an array with up to callstack_max @@ -591,34 +630,37 @@ extern int copyinframe(vm_address_t fp, char *frame); */ void machine_callstack( - uintptr_t * buf, - vm_size_t callstack_max) + uintptr_t * buf, + vm_size_t callstack_max) { /* Captures the USER call stack */ - uint32_t i=0; + uint32_t i = 0; uint32_t frame[2]; struct arm_saved_state* state = find_user_regs(current_thread()); if (!state) { - while (ipc; frame[0] = state->r[7]; - while (i: Reduce print noise - // kprintf("ml_thread_policy() unimplemented\n"); + // : Reduce print noise + // kprintf("ml_thread_policy() unimplemented\n"); } #if !MACH_KDP diff --git a/osfmk/arm/lowglobals.h b/osfmk/arm/lowglobals.h index 447a28b9a..a63fe6f5b 100644 --- a/osfmk/arm/lowglobals.h +++ b/osfmk/arm/lowglobals.h @@ -28,44 +28,44 @@ /* * Header files for the Low Memory Globals (lg) */ -#ifndef _LOW_MEMORY_GLOBALS_H_ -#define _LOW_MEMORY_GLOBALS_H_ +#ifndef _LOW_MEMORY_GLOBALS_H_ +#define _LOW_MEMORY_GLOBALS_H_ #ifndef __arm__ -#error Wrong architecture - this file is meant for arm +#error Wrong architecture - this file is meant for arm #endif -#define LOWGLO_LAYOUT_MAGIC 0xC0DEC0DE +#define LOWGLO_LAYOUT_MAGIC 0xC0DEC0DE -#pragma pack(4) /* Make sure the structure stays as we defined it */ +#pragma pack(4) /* Make sure the structure stays as we defined it */ typedef struct lowglo { - unsigned char lgVerCode[8]; /* 0xffff1000 System verification code */ - uint32_t lgZero[2]; /* 0xffff1008 Double constant 0 */ - uint32_t lgStext; /* 0xffff1010 Start of kernel text */ - uint32_t lgRsv014[2]; /* 0xffff1014 Reserved */ - uint32_t lgVersion; /* 0xffff101C Pointer to kernel version string */ - uint32_t lgRsv020[216]; /* 0xffff1020 Reserved */ - uint32_t lgKmodptr; /* 0xffff1380 Pointer to kmod, debugging aid */ - uint32_t lgTransOff; /* 0xffff1384 Pointer to kdp_trans_off, debugging aid */ - uint32_t lgRsv388[3]; /* 0xffff1388 Reserved */ - uint32_t lgOSVersion; /* 0xffff1394 Pointer to OS version string */ - uint32_t lgRsv398; /* 0xffff1398 Reserved */ - uint32_t lgRebootFlag; /* 0xffff139C Pointer to debugger reboot trigger */ - uint32_t lgManualPktAddr; /* 0xffff13A0 Pointer to manual packet structure */ - uint32_t lgRsv3A4; /* 0xffff13A4 Reserved */ - uint32_t lgPmapMemQ; /* 0xffff13A8 Pointer to PMAP memory queue */ - uint32_t lgPmapMemPageOffset;/* 0xffff13AC Offset of physical page member in vm_page_with_ppnum_t */ - uint32_t lgPmapMemChainOffset;/*0xffff13B0 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ - uint32_t lgStaticAddr; /* 0xffff13B4 Static allocation address */ - uint32_t lgStaticSize; /* 0xffff13B8 Static allocation size */ - uint32_t lgLayoutMajorVersion; /* 0xffff13BC Lowglo layout major version */ - uint32_t lgLayoutMagic; /* 0xffff13C0 Magic value evaluated to determine if lgLayoutVersion is valid */ - uint32_t lgPmapMemStartAddr; /* 0xffff13C4 Pointer to start of vm_page_t array */ - uint32_t lgPmapMemEndAddr; /* 0xffff13C8 Pointer to end of vm_page_t array */ - uint32_t lgPmapMemPagesize; /* 0xffff13CC size of vm_page_t */ - uint32_t lgPmapMemFirstppnum; /* 0xffff13D0 physical page number of the first vm_page_t in the array */ - uint32_t lgLayoutMinorVersion; /* 0xffff13D4 Lowglo layout minor version */ - uint32_t lgPageShift; /* 0xffff13D8 Number of shifts from page number to size */ + unsigned char lgVerCode[8]; /* 0xffff1000 System verification code */ + uint32_t lgZero[2]; /* 0xffff1008 Double constant 0 */ + uint32_t lgStext; /* 0xffff1010 Start of kernel text */ + uint32_t lgRsv014[2]; /* 0xffff1014 Reserved */ + uint32_t lgVersion; /* 0xffff101C Pointer to kernel version string */ + uint32_t lgRsv020[216]; /* 0xffff1020 Reserved */ + uint32_t lgKmodptr; /* 0xffff1380 Pointer to kmod, debugging aid */ + uint32_t lgTransOff; /* 0xffff1384 Pointer to kdp_trans_off, debugging aid */ + uint32_t lgRsv388[3]; /* 0xffff1388 Reserved */ + uint32_t lgOSVersion; /* 0xffff1394 Pointer to OS version string */ + uint32_t lgRsv398; /* 0xffff1398 Reserved */ + uint32_t lgRebootFlag; /* 0xffff139C Pointer to debugger reboot trigger */ + uint32_t lgManualPktAddr; /* 0xffff13A0 Pointer to manual packet structure */ + uint32_t lgRsv3A4; /* 0xffff13A4 Reserved */ + uint32_t lgPmapMemQ; /* 0xffff13A8 Pointer to PMAP memory queue */ + uint32_t lgPmapMemPageOffset;/* 0xffff13AC Offset of physical page member in vm_page_with_ppnum_t */ + uint32_t lgPmapMemChainOffset;/*0xffff13B0 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ + uint32_t lgStaticAddr; /* 0xffff13B4 Static allocation address */ + uint32_t lgStaticSize; /* 0xffff13B8 Static allocation size */ + uint32_t lgLayoutMajorVersion; /* 0xffff13BC Lowglo layout major version */ + uint32_t lgLayoutMagic; /* 0xffff13C0 Magic value evaluated to determine if lgLayoutVersion is valid */ + uint32_t lgPmapMemStartAddr; /* 0xffff13C4 Pointer to start of vm_page_t array */ + uint32_t lgPmapMemEndAddr; /* 0xffff13C8 Pointer to end of vm_page_t array */ + uint32_t lgPmapMemPagesize; /* 0xffff13CC size of vm_page_t */ + uint32_t lgPmapMemFirstppnum; /* 0xffff13D0 physical page number of the first vm_page_t in the array */ + uint32_t lgLayoutMinorVersion; /* 0xffff13D4 Lowglo layout minor version */ + uint32_t lgPageShift; /* 0xffff13D8 Number of shifts from page number to size */ } lowglo; #pragma pack() diff --git a/osfmk/arm/lowmem_vectors.c b/osfmk/arm/lowmem_vectors.c index eb6bcdf4f..77fe49fdd 100644 --- a/osfmk/arm/lowmem_vectors.c +++ b/osfmk/arm/lowmem_vectors.c @@ -33,16 +33,16 @@ #include extern vm_offset_t vm_kernel_stext; -extern void *version; -extern void *kmod; -extern void *kdp_trans_off; -extern void *osversion; -extern void *flag_kdp_trigger_reboot; -extern void *manual_pkt; -extern struct vm_object pmap_object_store; /* store pt pages */ +extern void *version; +extern void *kmod; +extern void *kdp_trans_off; +extern void *osversion; +extern void *flag_kdp_trigger_reboot; +extern void *manual_pkt; +extern struct vm_object pmap_object_store; /* store pt pages */ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { - .lgVerCode = { 'O','c','t','o','p','u','s',' ' }, + .lgVerCode = { 'O', 'c', 't', 'o', 'p', 'u', 's', ' ' }, // Increment the major version for changes that break the current Astris // usage of lowGlo values // Increment the minor version for changes that provide additonal info/function @@ -58,7 +58,7 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { #endif .lgOSVersion = (uint32_t)&osversion, #if MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING - .lgRebootFlag = (uint32_t)&flag_kdp_trigger_reboot, + .lgRebootFlag = (uint32_t)&flag_kdp_trigger_reboot, .lgManualPktAddr = (uint32_t)&manual_pkt, #endif .lgPmapMemQ = (uint32_t)&(pmap_object_store.memq), @@ -71,19 +71,22 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { .lgPmapMemFirstppnum = -1 }; -void patch_low_glo(void) +void +patch_low_glo(void) { lowGlo.lgStext = (uint32_t)vm_kernel_stext; } -void patch_low_glo_static_region(uint32_t address, uint32_t size) +void +patch_low_glo_static_region(uint32_t address, uint32_t size) { lowGlo.lgStaticAddr = address; lowGlo.lgStaticSize = size; } -void patch_low_glo_vm_page_info(void * start_addr, void * end_addr, uint32_t first_ppnum) +void +patch_low_glo_vm_page_info(void * start_addr, void * end_addr, uint32_t first_ppnum) { lowGlo.lgPmapMemStartAddr = (uint32_t)start_addr; lowGlo.lgPmapMemEndAddr = (uint32_t)end_addr; diff --git a/osfmk/arm/machdep_call.c b/osfmk/arm/machdep_call.c index b77c3c7ce..408c882e7 100644 --- a/osfmk/arm/machdep_call.c +++ b/osfmk/arm/machdep_call.c @@ -48,11 +48,11 @@ extern kern_return_t kern_invalid(void); uintptr_t get_tpidrro(void) { - uintptr_t uthread; + uintptr_t uthread; #if __arm__ - uthread = __builtin_arm_mrc(15, 0, 13, 0, 3); // TPIDRURO + uthread = __builtin_arm_mrc(15, 0, 13, 0, 3); // TPIDRURO #else - __asm__ volatile("mrs %0, TPIDRRO_EL0" : "=r" (uthread)); + __asm__ volatile ("mrs %0, TPIDRRO_EL0" : "=r" (uthread)); #endif return uthread; } @@ -61,9 +61,9 @@ void set_tpidrro(uintptr_t uthread) { #if __arm__ - __builtin_arm_mcr(15, 0, uthread, 13, 0, 3); // TPIDRURO + __builtin_arm_mcr(15, 0, uthread, 13, 0, 3); // TPIDRURO #else - __asm__ volatile("msr TPIDRRO_EL0, %0" : : "r" (uthread)); + __asm__ volatile ("msr TPIDRRO_EL0, %0" : : "r" (uthread)); #endif } @@ -76,17 +76,16 @@ thread_set_cthread_self(vm_address_t self) vm_address_t thread_get_cthread_self(void) { - uintptr_t self; + uintptr_t self; self = get_tpidrro(); #if __arm__ self &= ~3; assert( self == current_thread()->machine.cthread_self); - return ((kern_return_t) current_thread()->machine.cthread_self); + return (kern_return_t) current_thread()->machine.cthread_self; #else self &= MACHDEP_CTHREAD_MASK; assert( self == current_thread()->machine.cthread_self); return self; #endif } - diff --git a/osfmk/arm/machdep_call.h b/osfmk/arm/machdep_call.h index 1f7f1606f..cb0e5c105 100644 --- a/osfmk/arm/machdep_call.h +++ b/osfmk/arm/machdep_call.h @@ -37,29 +37,28 @@ */ typedef union { - kern_return_t (*args_0)(void); - kern_return_t (*args_1)(vm_address_t); - kern_return_t (*args_2)(vm_address_t,vm_address_t); - kern_return_t (*args_3)(vm_address_t,vm_address_t,vm_address_t); - kern_return_t (*args_4)(vm_address_t, vm_address_t,vm_address_t,vm_address_t); - kern_return_t (*args_var)(vm_address_t,...); + kern_return_t (*args_0)(void); + kern_return_t (*args_1)(vm_address_t); + kern_return_t (*args_2)(vm_address_t, vm_address_t); + kern_return_t (*args_3)(vm_address_t, vm_address_t, vm_address_t); + kern_return_t (*args_4)(vm_address_t, vm_address_t, vm_address_t, vm_address_t); + kern_return_t (*args_var)(vm_address_t, ...); } machdep_call_routine_t; -#define MACHDEP_CALL_ROUTINE(func,args) \ +#define MACHDEP_CALL_ROUTINE(func, args) \ { { .args_ ## args = func }, args } typedef struct { - machdep_call_routine_t routine; - int nargs; + machdep_call_routine_t routine; + int nargs; } machdep_call_t; -extern const machdep_call_t machdep_call_table[]; -extern int machdep_call_count; +extern const machdep_call_t machdep_call_table[]; +extern int machdep_call_count; -extern vm_address_t thread_get_cthread_self(void); -extern kern_return_t thread_set_cthread_self(vm_address_t); +extern vm_address_t thread_get_cthread_self(void); +extern kern_return_t thread_set_cthread_self(vm_address_t); // Read and write raw TPIDRURO / TPIDRRO_EL0 -uintptr_t get_tpidrro(void); -void set_tpidrro(uintptr_t); - +uintptr_t get_tpidrro(void); +void set_tpidrro(uintptr_t); diff --git a/osfmk/arm/machine_cpu.h b/osfmk/arm/machine_cpu.h index d6584de3f..e4dc9a923 100644 --- a/osfmk/arm/machine_cpu.h +++ b/osfmk/arm/machine_cpu.h @@ -55,6 +55,6 @@ extern void arm_init_idle_cpu(cpu_data_t *args); extern void init_cpu_timebase(boolean_t enable_fiq); -#define cpu_pause() do {} while (0) /* Not for this architecture */ +#define cpu_pause() do {} while (0) /* Not for this architecture */ #endif /* _ARM_MACHINE_CPU_H_ */ diff --git a/osfmk/arm/machine_cpuid.c b/osfmk/arm/machine_cpuid.c index ac54a0be5..b79253632 100644 --- a/osfmk/arm/machine_cpuid.c +++ b/osfmk/arm/machine_cpuid.c @@ -38,10 +38,10 @@ uint32_t machine_read_midr(void) { #if __arm__ - uint32_t midr = __builtin_arm_mrc(15,0,0,0,0); + uint32_t midr = __builtin_arm_mrc(15, 0, 0, 0, 0); #else uint64_t midr; - __asm__ volatile("mrs %0, MIDR_EL1" : "=r" (midr)); + __asm__ volatile ("mrs %0, MIDR_EL1" : "=r" (midr)); #endif return (uint32_t)midr; } @@ -50,10 +50,10 @@ uint32_t machine_read_clidr(void) { #if __arm__ - uint32_t clidr = __builtin_arm_mrc(15,1,0,0,1); + uint32_t clidr = __builtin_arm_mrc(15, 1, 0, 0, 1); #else uint64_t clidr; - __asm__ volatile("mrs %0, CLIDR_EL1" : "=r" (clidr)); + __asm__ volatile ("mrs %0, CLIDR_EL1" : "=r" (clidr)); #endif return (uint32_t)clidr; } @@ -62,10 +62,10 @@ uint32_t machine_read_ccsidr(void) { #if __arm__ - uint32_t ccsidr = __builtin_arm_mrc(15,1,0,0,0); + uint32_t ccsidr = __builtin_arm_mrc(15, 1, 0, 0, 0); #else uint64_t ccsidr; - __asm__ volatile("mrs %0, CCSIDR_EL1" : "=r" (ccsidr)); + __asm__ volatile ("mrs %0, CCSIDR_EL1" : "=r" (ccsidr)); #endif return (uint32_t)ccsidr; } @@ -75,7 +75,7 @@ arm_isa_feat1_reg machine_read_isa_feat1(void) { arm_isa_feat1_reg isa; - isa.value = __builtin_arm_mrc(15,0,0,2,1); + isa.value = __builtin_arm_mrc(15, 0, 0, 2, 1); return isa; } #endif // __arm__ @@ -85,10 +85,10 @@ machine_write_csselr(csselr_cache_level level, csselr_cache_type type) { #if __arm__ uint32_t csselr = (level | type); - __builtin_arm_mcr(15,2,csselr,0,0,0); + __builtin_arm_mcr(15, 2, csselr, 0, 0, 0); #else uint64_t csselr = (level | type); - __asm__ volatile("msr CSSELR_EL1, %0" : : "r" (csselr)); + __asm__ volatile ("msr CSSELR_EL1, %0" : : "r" (csselr)); #endif __builtin_arm_isb(ISB_SY); } @@ -101,23 +101,23 @@ machine_do_debugid(void) arm_debug_dbgdidr dbgdidr; /* read CPUID ID_DFR0 */ - id_dfr0.value = __builtin_arm_mrc(15,0,0,1,2); + id_dfr0.value = __builtin_arm_mrc(15, 0, 0, 1, 2); /* read DBGDIDR */ - dbgdidr.value = __builtin_arm_mrc(14,0,0,0,0); + dbgdidr.value = __builtin_arm_mrc(14, 0, 0, 0, 0); cpuid_debug_info.coprocessor_core_debug = id_dfr0.debug_feature.coprocessor_core_debug != 0; cpuid_debug_info.memory_mapped_core_debug = (id_dfr0.debug_feature.memory_mapped_core_debug != 0) && (getCpuDatap()->cpu_debug_interface_map != 0); if (cpuid_debug_info.coprocessor_core_debug || cpuid_debug_info.memory_mapped_core_debug) { - cpuid_debug_info.num_watchpoint_pairs = dbgdidr.debug_id.wrps + 1; - cpuid_debug_info.num_breakpoint_pairs = dbgdidr.debug_id.brps + 1; + cpuid_debug_info.num_watchpoint_pairs = dbgdidr.debug_id.wrps + 1; + cpuid_debug_info.num_breakpoint_pairs = dbgdidr.debug_id.brps + 1; } #else arm_cpuid_id_aa64dfr0_el1 id_dfr0; /* read ID_AA64DFR0_EL1 */ - __asm__ volatile("mrs %0, ID_AA64DFR0_EL1" : "=r"(id_dfr0.value)); + __asm__ volatile ("mrs %0, ID_AA64DFR0_EL1" : "=r"(id_dfr0.value)); if (id_dfr0.debug_feature.debug_arch_version) { cpuid_debug_info.num_watchpoint_pairs = id_dfr0.debug_feature.wrps + 1; @@ -136,11 +136,11 @@ void machine_do_mvfpid() { #if __arm__ - arm_mvfr0_info_t arm_mvfr0_info; - arm_mvfr1_info_t arm_mvfr1_info; + arm_mvfr0_info_t arm_mvfr0_info; + arm_mvfr1_info_t arm_mvfr1_info; - __asm__ volatile("vmrs %0, mvfr0":"=r"(arm_mvfr0_info.value)); - __asm__ volatile("vmrs %0, mvfr1":"=r"(arm_mvfr1_info.value)); + __asm__ volatile ("vmrs %0, mvfr0" :"=r"(arm_mvfr0_info.value)); + __asm__ volatile ("vmrs %0, mvfr1" :"=r"(arm_mvfr1_info.value)); cpuid_mvfp_info.neon = arm_mvfr1_info.bits.SP; cpuid_mvfp_info.neon_hpfp = arm_mvfr1_info.bits.HPFP; @@ -148,7 +148,6 @@ machine_do_mvfpid() cpuid_mvfp_info.neon = 1; cpuid_mvfp_info.neon_hpfp = 1; #endif /* __arm__ */ - } arm_mvfp_info_t * @@ -156,4 +155,3 @@ machine_arm_mvfp_info(void) { return &cpuid_mvfp_info; } - diff --git a/osfmk/arm/machine_cpuid.h b/osfmk/arm/machine_cpuid.h index e50ac9302..7396332fe 100644 --- a/osfmk/arm/machine_cpuid.h +++ b/osfmk/arm/machine_cpuid.h @@ -31,29 +31,28 @@ /* CPU feature identification */ typedef struct { -uint32_t arm_32bit_isa : 4, - arm_thumb_ver : 4, - arm_jazelle : 4, - arm_thumb2 : 4, - reserved : 16; + uint32_t arm_32bit_isa : 4, + arm_thumb_ver : 4, + arm_jazelle : 4, + arm_thumb2 : 4, + reserved : 16; } arm_feature_bits_t; typedef union { - arm_feature_bits_t field; - uint32_t value; + arm_feature_bits_t field; + uint32_t value; } arm_feature0_reg_t; // Register 0, subtype 21: Instruction Set Features #1 -typedef struct -{ - uint32_t endianness_support : 4; - uint32_t exception_1_support : 4; - uint32_t exception_2_support : 4; - uint32_t sign_zero_ext_support : 4; - uint32_t if_then_support : 4; - uint32_t immediate_support : 4; - uint32_t interworking_support : 4; - uint32_t jazelle_support : 4; +typedef struct{ + uint32_t endianness_support : 4; + uint32_t exception_1_support : 4; + uint32_t exception_2_support : 4; + uint32_t sign_zero_ext_support : 4; + uint32_t if_then_support : 4; + uint32_t immediate_support : 4; + uint32_t interworking_support : 4; + uint32_t jazelle_support : 4; } syscp_ID_instructions_feat_1_reg; @@ -69,12 +68,12 @@ arm_isa_feat1_reg machine_read_isa_feat1(void); /* ID_DFR0 */ typedef union { struct { - uint32_t coprocessor_core_debug : 4, - coprocessor_secure_debug : 4, - memory_mapped_core_debug : 4, - coprocessor_trace_debug : 4, - memory_mapped_trace_debug : 4, - microcontroller_debug : 4; + uint32_t coprocessor_core_debug : 4, + coprocessor_secure_debug : 4, + memory_mapped_core_debug : 4, + coprocessor_trace_debug : 4, + memory_mapped_trace_debug : 4, + microcontroller_debug : 4; } debug_feature; uint32_t value; } arm_cpuid_id_dfr0; @@ -82,26 +81,26 @@ typedef union { /* DBGDIDR */ typedef union { struct { - uint32_t revision : 4, - variant : 4, - : 4, - se_imp : 1, - pcsr_imp : 1, - nsuhd_imp : 1, - : 1, - version : 4, - ctx_cmps : 4, - brps : 4, - wrps : 4; + uint32_t revision : 4, + variant : 4, + : 4, + se_imp : 1, + pcsr_imp : 1, + nsuhd_imp : 1, + : 1, + version : 4, + ctx_cmps : 4, + brps : 4, + wrps : 4; } debug_id; uint32_t value; } arm_debug_dbgdidr; typedef struct { - boolean_t memory_mapped_core_debug; - boolean_t coprocessor_core_debug; - uint32_t num_watchpoint_pairs; - uint32_t num_breakpoint_pairs; + boolean_t memory_mapped_core_debug; + boolean_t coprocessor_core_debug; + uint32_t num_watchpoint_pairs; + uint32_t num_breakpoint_pairs; } arm_debug_info_t; #endif /* _ARM_MACHINE_CPUID_H_ */ diff --git a/osfmk/arm/machine_kpc.h b/osfmk/arm/machine_kpc.h index ec7aed315..39245b47d 100644 --- a/osfmk/arm/machine_kpc.h +++ b/osfmk/arm/machine_kpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_ARM_KPC_H @@ -30,17 +30,17 @@ #ifdef ARMA7 -#define KPC_ARM_FIXED_COUNT 1 -#define KPC_ARM_CONFIGURABLE_COUNT 4 +#define KPC_ARM_FIXED_COUNT 1 +#define KPC_ARM_CONFIGURABLE_COUNT 4 -#define KPC_ARM_TOTAL_COUNT (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT) +#define KPC_ARM_TOTAL_COUNT (KPC_ARM_FIXED_COUNT + KPC_ARM_CONFIGURABLE_COUNT) #define KPC_ARM_COUNTER_WIDTH 32 #else -#define KPC_ARM_FIXED_COUNT 2 -#define KPC_ARM_CONFIGURABLE_COUNT 6 +#define KPC_ARM_FIXED_COUNT 2 +#define KPC_ARM_CONFIGURABLE_COUNT 6 #define KPC_ARM_COUNTER_WIDTH 39 #define KPC_ARM_COUNTER_MASK ((1ull << KPC_ARM_COUNTER_WIDTH) - 1) diff --git a/osfmk/arm/machine_routines.c b/osfmk/arm/machine_routines.c index 94fc76bf4..f201ddcc8 100644 --- a/osfmk/arm/machine_routines.c +++ b/osfmk/arm/machine_routines.c @@ -49,6 +49,7 @@ #include #include #include +#include #include @@ -64,6 +65,7 @@ static unsigned int avail_cpus = 0; uint32_t LockTimeOut; uint32_t LockTimeOutUsec; +uint64_t TLockTimeOut; uint64_t MutexSpin; boolean_t is_clock_configured = FALSE; @@ -77,12 +79,12 @@ machine_startup(__unused boot_args * args) { int boot_arg; - PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert)); + PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert)); - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { default_preemption_rate = boot_arg; } - if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) { default_bg_preemption_rate = boot_arg; } @@ -97,10 +99,10 @@ machine_startup(__unused boot_args * args) char * machine_boot_info( - __unused char *buf, - __unused vm_size_t size) + __unused char *buf, + __unused vm_size_t size) { - return (PE_boot_args()); + return PE_boot_args(); } void @@ -115,15 +117,16 @@ machine_init(void) debug_log_init(); clock_config(); is_clock_configured = TRUE; - if (debug_enabled) + if (debug_enabled) { pmap_map_globals(); + } } -void +void slave_machine_init(__unused void *param) { - cpu_machine_init(); /* Initialize the processor */ - clock_init(); /* Init the clock */ + cpu_machine_init(); /* Initialize the processor */ + clock_init(); /* Init the clock */ } /* @@ -132,11 +135,11 @@ slave_machine_init(__unused void *param) */ thread_t machine_processor_shutdown( - __unused thread_t thread, - void (*doshutdown) (processor_t), - processor_t processor) + __unused thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor) { - return (Shutdown_context(doshutdown, processor)); + return Shutdown_context(doshutdown, processor); } /* @@ -153,8 +156,9 @@ ml_init_max_cpus(unsigned int max_cpus) machine_info.max_cpus = max_cpus; machine_info.physical_cpu_max = max_cpus; machine_info.logical_cpu_max = max_cpus; - if (max_cpus_initialized == MAX_CPUS_WAIT) - thread_wakeup((event_t) & max_cpus_initialized); + if (max_cpus_initialized == MAX_CPUS_WAIT) { + thread_wakeup((event_t) &max_cpus_initialized); + } max_cpus_initialized = MAX_CPUS_SET; } (void) ml_set_interrupts_enabled(current_state); @@ -172,11 +176,11 @@ ml_get_max_cpus(void) current_state = ml_set_interrupts_enabled(FALSE); if (max_cpus_initialized != MAX_CPUS_SET) { max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT); + assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT); (void) thread_block(THREAD_CONTINUE_NULL); } (void) ml_set_interrupts_enabled(current_state); - return (machine_info.max_cpus); + return machine_info.max_cpus; } /* @@ -188,22 +192,25 @@ ml_init_lock_timeout(void) { uint64_t abstime; uint64_t mtxspin; - uint64_t default_timeout_ns = NSEC_PER_SEC>>2; + uint64_t default_timeout_ns = NSEC_PER_SEC >> 2; uint32_t slto; - if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) + if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) { default_timeout_ns = slto * NSEC_PER_USEC; + } nanoseconds_to_absolutetime(default_timeout_ns, &abstime); - LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC); + LockTimeOutUsec = (uint32_t)(default_timeout_ns / NSEC_PER_USEC); LockTimeOut = (uint32_t)abstime; + TLockTimeOut = LockTimeOut; - if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { - if (mtxspin > USEC_PER_SEC>>4) - mtxspin = USEC_PER_SEC>>4; - nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); + if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) { + if (mtxspin > USEC_PER_SEC >> 4) { + mtxspin = USEC_PER_SEC >> 4; + } + nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime); } else { - nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); + nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime); } MutexSpin = abstime; } @@ -226,11 +233,11 @@ ml_cpu_up(void) void ml_cpu_down(void) { - cpu_data_t *cpu_data_ptr; + cpu_data_t *cpu_data_ptr; hw_atomic_sub(&machine_info.physical_cpu, 1); hw_atomic_sub(&machine_info.logical_cpu, 1); - + /* * If we want to deal with outstanding IPIs, we need to * do relatively early in the processor_doshutdown path, @@ -277,34 +284,34 @@ ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) unsigned int ml_get_machine_mem(void) { - return (machine_info.memory_size); + return machine_info.memory_size; } /* Return max offset */ vm_map_offset_t ml_get_max_offset( - boolean_t is64, + boolean_t is64, unsigned int option) { - unsigned int pmap_max_offset_option = 0; + unsigned int pmap_max_offset_option = 0; switch (option) { case MACHINE_MAX_OFFSET_DEFAULT: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEFAULT; - break; - case MACHINE_MAX_OFFSET_MIN: + break; + case MACHINE_MAX_OFFSET_MIN: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MIN; - break; - case MACHINE_MAX_OFFSET_MAX: + break; + case MACHINE_MAX_OFFSET_MAX: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_MAX; - break; - case MACHINE_MAX_OFFSET_DEVICE: + break; + case MACHINE_MAX_OFFSET_DEVICE: pmap_max_offset_option = ARM_PMAP_MAX_OFFSET_DEVICE; - break; - default: + break; + default: panic("ml_get_max_offset(): Illegal option 0x%x\n", option); - break; - } + break; + } return pmap_max_offset(is64, pmap_max_offset_option); } @@ -316,11 +323,11 @@ ml_wants_panic_trap_to_debugger(void) void ml_panic_trap_to_debugger(__unused const char *panic_format_str, - __unused va_list *panic_args, - __unused unsigned int reason, - __unused void *ctx, - __unused uint64_t panic_options_mask, - __unused unsigned long panic_caller) + __unused va_list *panic_args, + __unused unsigned int reason, + __unused void *ctx, + __unused uint64_t panic_options_mask, + __unused unsigned long panic_caller) { return; } @@ -336,7 +343,9 @@ halt_all_cpus(boolean_t reboot) printf("CPU halted\n"); PEHaltRestart(kPEHaltCPU); } - while (1); + while (1) { + ; + } } __attribute__((noreturn)) @@ -352,7 +361,7 @@ halt_cpu(void) */ void machine_signal_idle( - processor_t processor) + processor_t processor) { cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -360,7 +369,7 @@ machine_signal_idle( void machine_signal_idle_deferred( - processor_t processor) + processor_t processor) { cpu_signal_deferred(processor_to_cpu_datap(processor)); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -368,7 +377,7 @@ machine_signal_idle_deferred( void machine_signal_idle_cancel( - processor_t processor) + processor_t processor) { cpu_signal_cancel(processor_to_cpu_datap(processor)); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -378,13 +387,13 @@ machine_signal_idle_cancel( * Routine: ml_install_interrupt_handler * Function: Initialize Interrupt Handler */ -void +void ml_install_interrupt_handler( - void *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon) + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) { cpu_data_t *cpu_data_ptr; boolean_t current_state; @@ -408,7 +417,7 @@ ml_install_interrupt_handler( * Routine: ml_init_interrupt * Function: Initialize Interrupts */ -void +void ml_init_interrupt(void) { } @@ -417,11 +426,12 @@ ml_init_interrupt(void) * Routine: ml_init_timebase * Function: register and setup Timebase, Decremeter services */ -void ml_init_timebase( - void *args, - tbd_ops_t tbd_funcs, - vm_offset_t int_address, - vm_offset_t int_value) +void +ml_init_timebase( + void *args, + tbd_ops_t tbd_funcs, + vm_offset_t int_address, + vm_offset_t int_value) { cpu_data_t *cpu_data_ptr; @@ -456,16 +466,17 @@ ml_parse_cpu_topology(void) assert(err == kSuccess); while (kSuccess == DTIterateEntries(&iter, &child)) { - #if MACH_ASSERT unsigned int propSize; void *prop = NULL; if (avail_cpus == 0) { - if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) + if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) { panic("unable to retrieve state for cpu %u", avail_cpus); + } - if (strncmp((char*)prop, "running", propSize) != 0) + if (strncmp((char*)prop, "running", propSize) != 0) { panic("cpu 0 has not been marked as running!"); + } } assert(kSuccess == DTGetProperty(child, "reg", &prop, &propSize)); assert(avail_cpus == *((uint32_t*)prop)); @@ -475,11 +486,13 @@ ml_parse_cpu_topology(void) cpu_boot_arg = avail_cpus; if (PE_parse_boot_argn("cpus", &cpu_boot_arg, sizeof(cpu_boot_arg)) && - (avail_cpus > cpu_boot_arg)) + (avail_cpus > cpu_boot_arg)) { avail_cpus = cpu_boot_arg; + } - if (avail_cpus == 0) + if (avail_cpus == 0) { panic("No cpus found!"); + } } unsigned int @@ -513,10 +526,9 @@ ml_get_max_cpu_number(void) } kern_return_t -ml_processor_register( - ml_processor_info_t * in_processor_info, - processor_t * processor_out, - ipi_handler_t * ipi_handler) +ml_processor_register(ml_processor_info_t *in_processor_info, + processor_t * processor_out, ipi_handler_t *ipi_handler_out, + perfmon_interrupt_handler_func *pmi_handler_out) { cpu_data_t *this_cpu_datap; boolean_t is_boot_cpu; @@ -536,8 +548,9 @@ ml_processor_register( /* Fail the registration if the number of CPUs has been limited by boot-arg. */ if ((in_processor_info->phys_id >= avail_cpus) || - (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) + (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number())) { return KERN_FAILURE; + } if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { is_boot_cpu = FALSE; @@ -551,12 +564,14 @@ ml_processor_register( this_cpu_datap->cpu_id = in_processor_info->cpu_id; this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); - if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) + if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) { goto processor_register_error; + } if (!is_boot_cpu) { - if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) + if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) { goto processor_register_error; + } } this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; @@ -574,7 +589,7 @@ ml_processor_register( if (!is_boot_cpu) { processor_init((struct processor *)this_cpu_datap->cpu_processor, - this_cpu_datap->cpu_number, processor_pset(master_processor)); + this_cpu_datap->cpu_number, processor_pset(master_processor)); if (this_cpu_datap->cpu_l2_access_penalty) { /* @@ -584,22 +599,26 @@ ml_processor_register( * preferentially. */ processor_set_primary(this_cpu_datap->cpu_processor, - master_processor); + master_processor); } } *processor_out = this_cpu_datap->cpu_processor; - *ipi_handler = cpu_signal_handler; - if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) + *ipi_handler_out = cpu_signal_handler; + *pmi_handler_out = NULL; + if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) { *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; + } #if KPC - if (kpc_register_cpu(this_cpu_datap) != TRUE) + if (kpc_register_cpu(this_cpu_datap) != TRUE) { goto processor_register_error; + } #endif - if (!is_boot_cpu) + if (!is_boot_cpu) { early_random_cpu_init(this_cpu_datap->cpu_number); + } return KERN_SUCCESS; @@ -607,15 +626,16 @@ processor_register_error: #if KPC kpc_unregister_cpu(this_cpu_datap); #endif - if (!is_boot_cpu) + if (!is_boot_cpu) { cpu_data_free(this_cpu_datap); + } return KERN_FAILURE; } void ml_init_arm_debug_interface( - void * in_cpu_datap, - vm_offset_t virt_address) + void * in_cpu_datap, + vm_offset_t virt_address) { ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; do_debugid(); @@ -627,7 +647,7 @@ ml_init_arm_debug_interface( */ void init_ast_check( - __unused processor_t processor) + __unused processor_t processor) { } @@ -637,7 +657,7 @@ init_ast_check( */ void cause_ast_check( - processor_t processor) + processor_t processor) { if (current_processor() != processor) { cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); @@ -647,7 +667,9 @@ cause_ast_check( extern uint32_t cpu_idle_count; -void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { +void +ml_get_power_state(boolean_t *icp, boolean_t *pidlep) +{ *icp = ml_at_interrupt_context(); *pidlep = (cpu_idle_count == real_ncpus); } @@ -656,59 +678,60 @@ void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { * Routine: ml_cause_interrupt * Function: Generate a fake interrupt */ -void +void ml_cause_interrupt(void) { - return; /* BS_XXX */ + return; /* BS_XXX */ } /* Map memory map IO space */ vm_offset_t ml_io_map( - vm_offset_t phys_addr, - vm_size_t size) + vm_offset_t phys_addr, + vm_size_t size) { - return (io_map(phys_addr, size, VM_WIMG_IO)); + return io_map(phys_addr, size, VM_WIMG_IO); } vm_offset_t ml_io_map_wcomb( - vm_offset_t phys_addr, - vm_size_t size) + vm_offset_t phys_addr, + vm_size_t size) { - return (io_map(phys_addr, size, VM_WIMG_WCOMB)); + return io_map(phys_addr, size, VM_WIMG_WCOMB); } /* boot memory allocation */ -vm_offset_t +vm_offset_t ml_static_malloc( - __unused vm_size_t size) + __unused vm_size_t size) { - return ((vm_offset_t) NULL); + return (vm_offset_t) NULL; } vm_map_address_t ml_map_high_window( - vm_offset_t phys_addr, - vm_size_t len) + vm_offset_t phys_addr, + vm_size_t len) { return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); } vm_offset_t ml_static_ptovirt( - vm_offset_t paddr) + vm_offset_t paddr) { return phystokv(paddr); } vm_offset_t ml_static_vtop( - vm_offset_t vaddr) + vm_offset_t vaddr) { - if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) + if (((vm_address_t)(vaddr) - gVirtBase) >= gPhysSize) { panic("ml_static_ptovirt(): illegal vaddr: %p\n", (void*)vaddr); - return ((vm_address_t)(vaddr) - gVirtBase + gPhysBase); + } + return (vm_address_t)(vaddr) - gVirtBase + gPhysBase; } vm_offset_t @@ -737,8 +760,9 @@ ml_static_protect( ppnum_t ppn; kern_return_t result = KERN_SUCCESS; - if (vaddr < VM_MIN_KERNEL_ADDRESS) + if (vaddr < VM_MIN_KERNEL_ADDRESS) { return KERN_FAILURE; + } assert((vaddr & (ARM_PGBYTES - 1)) == 0); /* must be page aligned */ @@ -761,8 +785,8 @@ ml_static_protect( } for (vaddr_cur = vaddr; - vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); - vaddr_cur += ARM_PGBYTES) { + vaddr_cur < ((vaddr + size) & ~ARM_PGMASK); + vaddr_cur += ARM_PGBYTES) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t) NULL) { tt_entry_t *ttp = &kernel_pmap->tte[ttenum(vaddr_cur)]; @@ -793,8 +817,9 @@ ml_static_protect( } } - if (vaddr_cur > vaddr) + if (vaddr_cur > vaddr) { flush_mmu_tlb_region(vaddr, (vm_size_t)(vaddr_cur - vaddr)); + } return result; } @@ -805,22 +830,23 @@ ml_static_protect( */ void ml_static_mfree( - vm_offset_t vaddr, - vm_size_t size) + vm_offset_t vaddr, + vm_size_t size) { vm_offset_t vaddr_cur; ppnum_t ppn; uint32_t freed_pages = 0; /* It is acceptable (if bad) to fail to free. */ - if (vaddr < VM_MIN_KERNEL_ADDRESS) + if (vaddr < VM_MIN_KERNEL_ADDRESS) { return; + } - assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ + assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ for (vaddr_cur = vaddr; - vaddr_cur < trunc_page_32(vaddr + size); - vaddr_cur += PAGE_SIZE) { + vaddr_cur < trunc_page_32(vaddr + size); + vaddr_cur += PAGE_SIZE) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t) NULL) { /* @@ -847,7 +873,7 @@ ml_static_mfree( vm_page_wire_count -= freed_pages; vm_page_wire_count_initial -= freed_pages; vm_page_unlock_queues(); -#if DEBUG +#if DEBUG kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); #endif } @@ -867,25 +893,30 @@ ml_vtophys(vm_offset_t vaddr) * assumed to be wired; e.g., no attempt is made to guarantee that the * translations obtained remain valid for the duration of the copy process. */ -vm_size_t +vm_size_t ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) { addr64_t cur_phys_dst, cur_phys_src; uint32_t count, nbytes = 0; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { break; - if (!(cur_phys_dst = kvtophys(virtdst))) + } + if (!(cur_phys_dst = kvtophys(virtdst))) { break; + } if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || - !pmap_valid_address(trunc_page_64(cur_phys_src))) + !pmap_valid_address(trunc_page_64(cur_phys_src))) { break; + } count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); - if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) + if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) { count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); - if (count > size) + } + if (count > size) { count = size; + } bcopy_phys(cur_phys_src, cur_phys_dst, count); @@ -908,20 +939,24 @@ ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) * FALSE otherwise. */ -boolean_t ml_validate_nofault( +boolean_t +ml_validate_nofault( vm_offset_t virtsrc, vm_size_t size) { addr64_t cur_phys_src; uint32_t count; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { return FALSE; - if (!pmap_valid_address(trunc_page_64(cur_phys_src))) + } + if (!pmap_valid_address(trunc_page_64(cur_phys_src))) { return FALSE; + } count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); - if (count > size) + if (count > size) { count = (uint32_t)size; + } virtsrc += count; size -= count; @@ -946,11 +981,11 @@ active_rt_threads(__unused boolean_t active) } void -thread_tell_urgency(__unused int urgency, - __unused uint64_t rt_period, - __unused uint64_t rt_deadline, - __unused uint64_t sched_latency, - __unused thread_t nthread) +thread_tell_urgency(__unused thread_urgency_t urgency, + __unused uint64_t rt_period, + __unused uint64_t rt_deadline, + __unused uint64_t sched_latency, + __unused thread_t nthread) { } @@ -962,15 +997,17 @@ machine_run_count(__unused uint32_t count) processor_t machine_choose_processor(__unused processor_set_t pset, processor_t processor) { - return (processor); + return processor; } -boolean_t machine_timeout_suspended(void) { +boolean_t +machine_timeout_suspended(void) +{ return FALSE; } -kern_return_t -ml_interrupt_prewarm(__unused uint64_t deadline) +kern_return_t +ml_interrupt_prewarm(__unused uint64_t deadline) { return KERN_FAILURE; } @@ -1009,29 +1046,38 @@ ml_delay_should_spin(uint64_t interval) } } -void ml_delay_on_yield(void) {} +void +ml_delay_on_yield(void) +{ +} -boolean_t ml_thread_is64bit(thread_t thread) +boolean_t +ml_thread_is64bit(thread_t thread) { - return (thread_is_64bit_addr(thread)); + return thread_is_64bit_addr(thread); } -void ml_timer_evaluate(void) { +void +ml_timer_evaluate(void) +{ } boolean_t -ml_timer_forced_evaluation(void) { +ml_timer_forced_evaluation(void) +{ return FALSE; } uint64_t -ml_energy_stat(__unused thread_t t) { +ml_energy_stat(__unused thread_t t) +{ return 0; } void -ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { +ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) +{ #if CONFIG_EMBEDDED /* * For now: update the resource coalition stats of the @@ -1042,7 +1088,8 @@ ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { } uint64_t -ml_gpu_stat(__unused thread_t t) { +ml_gpu_stat(__unused thread_t t) +{ return 0; } @@ -1051,7 +1098,9 @@ static void timer_state_event(boolean_t switch_to_kernel) { thread_t thread = current_thread(); - if (!thread->precise_user_kernel_time) return; + if (!thread->precise_user_kernel_time) { + return; + } processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; uint64_t now = ml_get_timebase(); @@ -1078,6 +1127,15 @@ timer_state_event_kernel_to_user(void) } #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ +uint32_t +get_arm_cpu_version(void) +{ + uint32_t value = machine_read_midr(); + + /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ + return ((value & MIDR_REV_MASK) >> MIDR_REV_SHIFT) | ((value & MIDR_VAR_MASK) >> (MIDR_VAR_SHIFT - 4)); +} + boolean_t user_cont_hwclock_allowed(void) { @@ -1117,27 +1175,28 @@ current_thread(void) uintptr_t arm_user_protect_begin(thread_t thread) { - uintptr_t ttbr0, asid = 0; // kernel asid + uintptr_t ttbr0, asid = 0; // kernel asid - ttbr0 = __builtin_arm_mrc(15,0,2,0,0); // Get TTBR0 - if (ttbr0 != thread->machine.kptw_ttb) { - __builtin_arm_mcr(15,0,thread->machine.kptw_ttb,2,0,0); // Set TTBR0 - __builtin_arm_mcr(15,0,asid,13,0,1); // Set CONTEXTIDR - __builtin_arm_isb(ISB_SY); - } - return ttbr0; + ttbr0 = __builtin_arm_mrc(15, 0, 2, 0, 0); // Get TTBR0 + if (ttbr0 != thread->machine.kptw_ttb) { + __builtin_arm_mcr(15, 0, thread->machine.kptw_ttb, 2, 0, 0); // Set TTBR0 + __builtin_arm_mcr(15, 0, asid, 13, 0, 1); // Set CONTEXTIDR + __builtin_arm_isb(ISB_SY); + } + return ttbr0; } void arm_user_protect_end(thread_t thread, uintptr_t ttbr0, boolean_t disable_interrupts) { - if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { - if (disable_interrupts) - __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ - __builtin_arm_mcr(15,0,thread->machine.uptw_ttb,2,0,0); // Set TTBR0 - __builtin_arm_mcr(15,0,thread->machine.asid,13,0,1); // Set CONTEXTIDR with thread asid - __builtin_arm_dsb(DSB_ISH); - __builtin_arm_isb(ISB_SY); - } + if ((ttbr0 != thread->machine.kptw_ttb) && (thread->machine.uptw_ttb != thread->machine.kptw_ttb)) { + if (disable_interrupts) { + __asm__ volatile ("cpsid if" ::: "memory"); // Disable FIQ/IRQ + } + __builtin_arm_mcr(15, 0, thread->machine.uptw_ttb, 2, 0, 0); // Set TTBR0 + __builtin_arm_mcr(15, 0, thread->machine.asid, 13, 0, 1); // Set CONTEXTIDR with thread asid + __builtin_arm_dsb(DSB_ISH); + __builtin_arm_isb(ISB_SY); + } } #endif // __ARM_USER_PROTECT__ diff --git a/osfmk/arm/machine_routines.h b/osfmk/arm/machine_routines.h index d5a77dd9b..545403eee 100644 --- a/osfmk/arm/machine_routines.h +++ b/osfmk/arm/machine_routines.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2015 Apple Inc. All rights reserved. + * Copyright (c) 2007-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -29,8 +29,8 @@ * @OSF_COPYRIGHT@ */ -#ifndef _ARM_MACHINE_ROUTINES_H_ -#define _ARM_MACHINE_ROUTINES_H_ +#ifndef _ARM_MACHINE_ROUTINES_H_ +#define _ARM_MACHINE_ROUTINES_H_ #include #include @@ -60,6 +60,7 @@ boolean_t ml_get_interrupts_enabled(void); /* Set Interrupts Enabled */ boolean_t ml_set_interrupts_enabled(boolean_t enable); +boolean_t ml_early_set_interrupts_enabled(boolean_t enable); /* Check if running at interrupt context */ boolean_t ml_at_interrupt_context(void); @@ -76,6 +77,7 @@ void ml_check_interrupts_disabled_duration(thread_t thread); #endif #ifdef XNU_KERNEL_PRIVATE +extern bool ml_snoop_thread_is_on_core(thread_t thread); extern boolean_t ml_is_quiescing(void); extern void ml_set_is_quiescing(boolean_t); extern uint64_t ml_get_booter_memory_size(void); @@ -88,18 +90,18 @@ typedef void (*time_base_enable_t)(cpu_id_t cpu_id, boolean_t enable); typedef void (*cache_dispatch_t)(cpu_id_t cpu_id, unsigned int select, unsigned int param0, unsigned int param1); #endif -#define CacheConfig 0x00000000UL -#define CacheControl 0x00000001UL -#define CacheClean 0x00000002UL -#define CacheCleanRegion 0x00000003UL -#define CacheCleanFlush 0x00000004UL -#define CacheCleanFlushRegion 0x00000005UL -#define CacheShutdown 0x00000006UL +#define CacheConfig 0x00000000UL +#define CacheControl 0x00000001UL +#define CacheClean 0x00000002UL +#define CacheCleanRegion 0x00000003UL +#define CacheCleanFlush 0x00000004UL +#define CacheCleanFlushRegion 0x00000005UL +#define CacheShutdown 0x00000006UL -#define CacheControlEnable 0x00000000UL +#define CacheControlEnable 0x00000000UL -#define CacheConfigCCSIDR 0x00000001UL -#define CacheConfigSize 0x00000100UL +#define CacheConfigCCSIDR 0x00000001UL +#define CacheConfigSize 0x00000100UL /* Type for the Processor Idle function */ typedef void (*processor_idle_t)(cpu_id_t cpu_id, boolean_t enter, uint64_t *new_timeout_ticks); @@ -120,73 +122,70 @@ typedef void (*lockdown_handler_t)(void *); typedef void (*platform_error_handler_t)(void *refcon, vm_offset_t fault_addr); /* - * The exception callback (ex_cb) module allows kernel drivers to - * register and receive callbacks for exceptions, and indicate + * The exception callback (ex_cb) module allows kernel drivers to + * register and receive callbacks for exceptions, and indicate * actions to be taken by the platform kernel - * Currently this is supported for ARM64 but extending support for ARM32 + * Currently this is supported for ARM64 but extending support for ARM32 * should be straightforward */ /* Supported exception classes for callbacks */ -typedef enum -{ +typedef enum{ EXCB_CLASS_ILLEGAL_INSTR_SET, #ifdef CONFIG_XNUPOST EXCB_CLASS_TEST1, EXCB_CLASS_TEST2, EXCB_CLASS_TEST3, #endif - EXCB_CLASS_MAX // this must be last + EXCB_CLASS_MAX // this must be last } ex_cb_class_t; /* Actions indicated by callbacks to be taken by platform kernel */ -typedef enum -{ - EXCB_ACTION_RERUN, // re-run the faulting instruction - EXCB_ACTION_NONE, // continue normal exception handling +typedef enum{ + EXCB_ACTION_RERUN, // re-run the faulting instruction + EXCB_ACTION_NONE, // continue normal exception handling #ifdef CONFIG_XNUPOST EXCB_ACTION_TEST_FAIL, #endif } ex_cb_action_t; -/* - * Exception state - * We cannot use a private kernel data structure such as arm_saved_state_t - * The CPSR and ESR are not clobbered when the callback function is invoked so +/* + * Exception state + * We cannot use a private kernel data structure such as arm_saved_state_t + * The CPSR and ESR are not clobbered when the callback function is invoked so * those registers can be examined by the callback function; * the same is done in the platform error handlers */ -typedef struct -{ +typedef struct{ vm_offset_t far; } ex_cb_state_t; /* callback type definition */ typedef ex_cb_action_t (*ex_cb_t) ( - ex_cb_class_t cb_class, - void *refcon,// provided at registration - const ex_cb_state_t *state // exception state + ex_cb_class_t cb_class, + void *refcon,// provided at registration + const ex_cb_state_t *state // exception state ); -/* - * Callback registration - * Currently we support only one registered callback per class but +/* + * Callback registration + * Currently we support only one registered callback per class but * it should be possible to support more callbacks */ kern_return_t ex_cb_register( - ex_cb_class_t cb_class, - ex_cb_t cb, - void *refcon ); + ex_cb_class_t cb_class, + ex_cb_t cb, + void *refcon ); /* * Called internally by platform kernel to invoke the registered callback for class */ ex_cb_action_t ex_cb_invoke( - ex_cb_class_t cb_class, - vm_offset_t far); + ex_cb_class_t cb_class, + vm_offset_t far); void ml_parse_cpu_topology(void); @@ -201,14 +200,14 @@ int ml_get_max_cpu_number(void); /* Struct for ml_cpu_get_info */ struct ml_cpu_info { - unsigned long vector_unit; - unsigned long cache_line_size; - unsigned long l1_icache_size; - unsigned long l1_dcache_size; - unsigned long l2_settings; - unsigned long l2_cache_size; - unsigned long l3_settings; - unsigned long l3_cache_size; + unsigned long vector_unit; + unsigned long cache_line_size; + unsigned long l1_icache_size; + unsigned long l1_dcache_size; + unsigned long l2_settings; + unsigned long l2_cache_size; + unsigned long l3_settings; + unsigned long l3_cache_size; }; typedef struct ml_cpu_info ml_cpu_info_t; @@ -220,48 +219,69 @@ cluster_type_t ml_get_boot_cluster(void); /* Struct for ml_processor_register */ struct ml_processor_info { - cpu_id_t cpu_id; - vm_offset_t start_paddr; - boolean_t supports_nap; - void *platform_cache_dispatch; - time_base_enable_t time_base_enable; - processor_idle_t processor_idle; - idle_tickle_t *idle_tickle; - idle_timer_t idle_timer; - void *idle_timer_refcon; - vm_offset_t powergate_stub_addr; - uint32_t powergate_stub_length; - uint32_t powergate_latency; - platform_error_handler_t platform_error_handler; - uint64_t regmap_paddr; - uint32_t phys_id; - uint32_t log_id; - uint32_t l2_access_penalty; - uint32_t cluster_id; - cluster_type_t cluster_type; - uint32_t l2_cache_id; - uint32_t l2_cache_size; - uint32_t l3_cache_id; - uint32_t l3_cache_size; + cpu_id_t cpu_id; + vm_offset_t start_paddr; + boolean_t supports_nap; + void *platform_cache_dispatch; + time_base_enable_t time_base_enable; + processor_idle_t processor_idle; + idle_tickle_t *idle_tickle; + idle_timer_t idle_timer; + void *idle_timer_refcon; + vm_offset_t powergate_stub_addr; + uint32_t powergate_stub_length; + uint32_t powergate_latency; + platform_error_handler_t platform_error_handler; + uint64_t regmap_paddr; + uint32_t phys_id; + uint32_t log_id; + uint32_t l2_access_penalty; + uint32_t cluster_id; + cluster_type_t cluster_type; + uint32_t l2_cache_id; + uint32_t l2_cache_size; + uint32_t l3_cache_id; + uint32_t l3_cache_size; }; typedef struct ml_processor_info ml_processor_info_t; -#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) /* Struct for ml_init_timebase */ struct tbd_ops { - void (*tbd_fiq_handler)(void); - uint32_t (*tbd_get_decrementer)(void); - void (*tbd_set_decrementer)(uint32_t dec_value); + void (*tbd_fiq_handler)(void); + uint32_t (*tbd_get_decrementer)(void); + void (*tbd_set_decrementer)(uint32_t dec_value); }; typedef struct tbd_ops *tbd_ops_t; typedef struct tbd_ops tbd_ops_data_t; #endif -/* Register a processor */ -kern_return_t ml_processor_register( - ml_processor_info_t *ml_processor_info, - processor_t *processor, - ipi_handler_t *ipi_handler); +/*! + * @function ml_processor_register + * + * @abstract callback from platform kext to register processor + * + * @discussion This function is called by the platform kext when a processor is + * being registered. This is called while running on the CPU itself, as part of + * its initialization. + * + * @param ml_processor_info provides machine-specific information about the + * processor to xnu. + * + * @param processor is set as an out-parameter to an opaque handle that should + * be used by the platform kext when referring to this processor in the future. + * + * @param ipi_handler is set as an out-parameter to the function that should be + * registered as the IPI handler. + * + * @param pmi_handler is set as an out-parameter to the function that should be + * registered as the PMI handler. + * + * @returns KERN_SUCCESS on success and an error code, otherwise. + */ +kern_return_t ml_processor_register(ml_processor_info_t *ml_processor_info, + processor_t *processor, ipi_handler_t *ipi_handler, + perfmon_interrupt_handler_func *pmi_handler); /* Register a lockdown handler */ kern_return_t ml_lockdown_handler_register(lockdown_handler_t, void *); @@ -274,27 +294,27 @@ boolean_t ml_wants_panic_trap_to_debugger(void); /* Machine layer routine for intercepting panics */ void ml_panic_trap_to_debugger(const char *panic_format_str, - va_list *panic_args, - unsigned int reason, - void *ctx, - uint64_t panic_options_mask, - unsigned long panic_caller); + va_list *panic_args, + unsigned int reason, + void *ctx, + uint64_t panic_options_mask, + unsigned long panic_caller); #endif /* XNU_KERNEL_PRIVATE */ /* Initialize Interrupts */ void ml_install_interrupt_handler( - void *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon); + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon); vm_offset_t -ml_static_vtop( + ml_static_vtop( vm_offset_t); vm_offset_t -ml_static_ptovirt( + ml_static_ptovirt( vm_offset_t); vm_offset_t ml_static_slide( @@ -346,6 +366,12 @@ unsigned int ml_io_read16(uintptr_t iovaddr); unsigned int ml_io_read32(uintptr_t iovaddr); unsigned long long ml_io_read64(uintptr_t iovaddr); +extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size); +extern void ml_io_write8(uintptr_t vaddr, uint8_t val); +extern void ml_io_write16(uintptr_t vaddr, uint16_t val); +extern void ml_io_write32(uintptr_t vaddr, uint32_t val); +extern void ml_io_write64(uintptr_t vaddr, uint64_t val); + /* Read physical address double word */ unsigned long long ml_phys_read_double( vm_offset_t paddr); @@ -386,9 +412,9 @@ void ml_static_mfree( kern_return_t ml_static_protect( - vm_offset_t start, - vm_size_t size, - vm_prot_t new_prot); + vm_offset_t start, + vm_size_t size, + vm_prot_t new_prot); /* virtual to physical on wired pages */ vm_offset_t ml_vtophys( @@ -400,24 +426,24 @@ void ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info); #endif /* __APPLE_API_UNSTABLE */ #ifdef __APPLE_API_PRIVATE -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE vm_size_t ml_nofault_copy( - vm_offset_t virtsrc, - vm_offset_t virtdst, + vm_offset_t virtsrc, + vm_offset_t virtdst, vm_size_t size); boolean_t ml_validate_nofault( vm_offset_t virtsrc, vm_size_t size); #endif /* XNU_KERNEL_PRIVATE */ -#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) /* IO memory map services */ /* Map memory map IO space */ vm_offset_t ml_io_map( - vm_offset_t phys_addr, + vm_offset_t phys_addr, vm_size_t size); vm_offset_t ml_io_map_wcomb( - vm_offset_t phys_addr, + vm_offset_t phys_addr, vm_size_t size); void ml_get_bouncepool_info( @@ -425,18 +451,18 @@ void ml_get_bouncepool_info( vm_size_t *size); vm_map_address_t ml_map_high_window( - vm_offset_t phys_addr, - vm_size_t len); + vm_offset_t phys_addr, + vm_size_t len); /* boot memory allocation */ vm_offset_t ml_static_malloc( vm_size_t size); void ml_init_timebase( - void *args, - tbd_ops_t tbd_funcs, - vm_offset_t int_address, - vm_offset_t int_value); + void *args, + tbd_ops_t tbd_funcs, + vm_offset_t int_address, + vm_offset_t int_value); uint64_t ml_get_timebase(void); @@ -472,12 +498,12 @@ void ml_init_arm_debug_interface(void *args, vm_offset_t virt_address); /* These calls are only valid if __ARM_USER_PROTECT__ is defined */ uintptr_t arm_user_protect_begin( - thread_t thread); + thread_t thread); void arm_user_protect_end( - thread_t thread, - uintptr_t up, - boolean_t disable_interrupts); + thread_t thread, + uintptr_t up, + boolean_t disable_interrupts); #endif /* PEXPERT_KERNEL_PRIVATE || MACH_KERNEL_PRIVATE */ @@ -493,10 +519,10 @@ void ml_thread_policy( unsigned policy_id, unsigned policy_info); -#define MACHINE_GROUP 0x00000001 -#define MACHINE_NETWORK_GROUP 0x10000000 -#define MACHINE_NETWORK_WORKLOOP 0x00000001 -#define MACHINE_NETWORK_NETISR 0x00000002 +#define MACHINE_GROUP 0x00000001 +#define MACHINE_NETWORK_GROUP 0x10000000 +#define MACHINE_NETWORK_WORKLOOP 0x00000001 +#define MACHINE_NETWORK_NETISR 0x00000002 /* Initialize the maximum number of CPUs */ void ml_init_max_cpus( @@ -512,17 +538,17 @@ unsigned int ml_get_machine_mem(void); #ifdef XNU_KERNEL_PRIVATE /* Return max offset */ vm_map_offset_t ml_get_max_offset( - boolean_t is64, + boolean_t is64, unsigned int option); -#define MACHINE_MAX_OFFSET_DEFAULT 0x01 -#define MACHINE_MAX_OFFSET_MIN 0x02 -#define MACHINE_MAX_OFFSET_MAX 0x04 -#define MACHINE_MAX_OFFSET_DEVICE 0x08 +#define MACHINE_MAX_OFFSET_DEFAULT 0x01 +#define MACHINE_MAX_OFFSET_MIN 0x02 +#define MACHINE_MAX_OFFSET_MAX 0x04 +#define MACHINE_MAX_OFFSET_DEVICE 0x08 #endif -extern void ml_cpu_up(void); -extern void ml_cpu_down(void); -extern void ml_arm_sleep(void); +extern void ml_cpu_up(void); +extern void ml_cpu_down(void); +extern void ml_arm_sleep(void); extern uint64_t ml_get_wake_timebase(void); extern uint64_t ml_get_conttime_wake_time(void); @@ -540,27 +566,27 @@ extern kern_return_t ml_interrupt_prewarm(uint64_t deadline); vm_offset_t ml_stack_remaining(void); #ifdef MACH_KERNEL_PRIVATE -uint32_t get_fpscr(void); -void set_fpscr(uint32_t); +uint32_t get_fpscr(void); +void set_fpscr(uint32_t); #ifdef __arm64__ unsigned long update_mdscr(unsigned long clear, unsigned long set); #endif /* __arm64__ */ -extern void init_vfp(void); -extern boolean_t get_vfp_enabled(void); -extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); -extern void fiq_context_init(boolean_t enable_fiq); -extern void fiq_context_bootstrap(boolean_t enable_fiq); +extern void init_vfp(void); +extern boolean_t get_vfp_enabled(void); +extern void arm_debug_set_cp14(arm_debug_state_t *debug_state); +extern void fiq_context_init(boolean_t enable_fiq); +extern void fiq_context_bootstrap(boolean_t enable_fiq); -extern void reenable_async_aborts(void); -extern void cpu_idle_wfi(boolean_t wfi_fast); +extern void reenable_async_aborts(void); +extern void cpu_idle_wfi(boolean_t wfi_fast); #ifdef MONITOR -#define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ -#define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */ -unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, - uintptr_t arg2, uintptr_t arg3); +#define MONITOR_SET_ENTRY 0x800 /* Set kernel entry point from monitor */ +#define MONITOR_LOCKDOWN 0x801 /* Enforce kernel text/rodata integrity */ +unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, + uintptr_t arg2, uintptr_t arg3); #endif /* MONITOR */ #if defined(KERNEL_INTEGRITY_KTRR) @@ -573,11 +599,11 @@ extern void set_vbar_el1(uint64_t); #endif /* __ARM_KERNEL_PROTECT__ */ #endif /* MACH_KERNEL_PRIVATE */ -extern uint32_t arm_debug_read_dscr(void); +extern uint32_t arm_debug_read_dscr(void); -extern int set_be_bit(void); -extern int clr_be_bit(void); -extern int be_tracing(void); +extern int set_be_bit(void); +extern int clr_be_bit(void); +extern int be_tracing(void); typedef void (*broadcastFunc) (void *); unsigned int cpu_broadcast_xcall(uint32_t *, boolean_t, broadcastFunc, void *); @@ -596,57 +622,57 @@ void cpu_qos_update_register(cpu_qos_update_t); #endif /* __arm64__ */ struct going_on_core { - uint64_t thread_id; - uint16_t qos_class; - uint16_t urgency; /* XCPM compatibility */ - uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */ - uint32_t is_kernel_thread : 1; - uint64_t thread_group_id; - void *thread_group_data; - uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */ - uint64_t start_time; - uint64_t scheduling_latency_at_same_basepri; - uint32_t energy_estimate_nj; /* return: In nanojoules */ - /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */ + uint64_t thread_id; + uint16_t qos_class; + uint16_t urgency; /* XCPM compatibility */ + uint32_t is_32_bit : 1; /* uses 32-bit ISA/register state in userspace (which may differ from address space size) */ + uint32_t is_kernel_thread : 1; + uint64_t thread_group_id; + void *thread_group_data; + uint64_t scheduling_latency; /* absolute time between when thread was made runnable and this ctx switch */ + uint64_t start_time; + uint64_t scheduling_latency_at_same_basepri; + uint32_t energy_estimate_nj; /* return: In nanojoules */ + /* smaller of the time between last change to base priority and ctx switch and scheduling_latency */ }; typedef struct going_on_core *going_on_core_t; struct going_off_core { - uint64_t thread_id; - uint32_t energy_estimate_nj; /* return: In nanojoules */ - uint32_t reserved; - uint64_t end_time; - uint64_t thread_group_id; - void *thread_group_data; + uint64_t thread_id; + uint32_t energy_estimate_nj; /* return: In nanojoules */ + uint32_t reserved; + uint64_t end_time; + uint64_t thread_group_id; + void *thread_group_data; }; typedef struct going_off_core *going_off_core_t; struct thread_group_data { - uint64_t thread_group_id; - void *thread_group_data; - uint32_t thread_group_size; - uint32_t thread_group_flags; + uint64_t thread_group_id; + void *thread_group_data; + uint32_t thread_group_size; + uint32_t thread_group_flags; }; typedef struct thread_group_data *thread_group_data_t; struct perfcontrol_max_runnable_latency { - uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */]; + uint64_t max_scheduling_latencies[4 /* THREAD_URGENCY_MAX */]; }; typedef struct perfcontrol_max_runnable_latency *perfcontrol_max_runnable_latency_t; struct perfcontrol_work_interval { - uint64_t thread_id; - uint16_t qos_class; - uint16_t urgency; - uint32_t flags; // notify - uint64_t work_interval_id; - uint64_t start; - uint64_t finish; - uint64_t deadline; - uint64_t next_start; - uint64_t thread_group_id; - void *thread_group_data; - uint32_t create_flags; + uint64_t thread_id; + uint16_t qos_class; + uint16_t urgency; + uint32_t flags; // notify + uint64_t work_interval_id; + uint64_t start; + uint64_t finish; + uint64_t deadline; + uint64_t next_start; + uint64_t thread_group_id; + void *thread_group_data; + uint32_t create_flags; }; typedef struct perfcontrol_work_interval *perfcontrol_work_interval_t; @@ -657,27 +683,27 @@ typedef enum { } work_interval_ctl_t; struct perfcontrol_work_interval_instance { - work_interval_ctl_t ctl; - uint32_t create_flags; - uint64_t complexity; - uint64_t thread_id; - uint64_t work_interval_id; - uint64_t instance_id; /* out: start, in: update/finish */ - uint64_t start; - uint64_t finish; - uint64_t deadline; - uint64_t thread_group_id; - void *thread_group_data; + work_interval_ctl_t ctl; + uint32_t create_flags; + uint64_t complexity; + uint64_t thread_id; + uint64_t work_interval_id; + uint64_t instance_id; /* out: start, in: update/finish */ + uint64_t start; + uint64_t finish; + uint64_t deadline; + uint64_t thread_group_id; + void *thread_group_data; }; typedef struct perfcontrol_work_interval_instance *perfcontrol_work_interval_instance_t; -/* - * Structure to export per-CPU counters as part of the CLPC callout. - * Contains only the fixed CPU counters (instructions and cycles); CLPC - * would call back into XNU to get the configurable counters if needed. +/* + * Structure to export per-CPU counters as part of the CLPC callout. + * Contains only the fixed CPU counters (instructions and cycles); CLPC + * would call back into XNU to get the configurable counters if needed. */ struct perfcontrol_cpu_counters { - uint64_t instructions; + uint64_t instructions; uint64_t cycles; }; @@ -687,7 +713,7 @@ struct perfcontrol_cpu_counters { struct perfcontrol_thread_data { /* * Energy estimate (return value) - * The field is populated by CLPC and used to update the + * The field is populated by CLPC and used to update the * energy estimate of the thread */ uint32_t energy_estimate_nj; @@ -697,11 +723,11 @@ struct perfcontrol_thread_data { uint64_t thread_id; /* Thread Group ID */ uint64_t thread_group_id; - /* - * Scheduling latency for threads at the same base priority. - * Calculated by the scheduler and passed into CLPC. The field is - * populated only in the thread_data structure for the thread - * going on-core. + /* + * Scheduling latency for threads at the same base priority. + * Calculated by the scheduler and passed into CLPC. The field is + * populated only in the thread_data structure for the thread + * going on-core. */ uint64_t scheduling_latency_at_same_basepri; /* Thread Group data pointer */ @@ -794,7 +820,7 @@ typedef void (*sched_perfcontrol_deadline_passed_t)(uint64_t deadline); /* * Context Switch Callout - * + * * Parameters: * event - The perfcontrol_event for this callout * cpu_id - The CPU doing the context switch @@ -866,13 +892,15 @@ extern void sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t c * Non-recommended cores can still be used to field interrupts or run bound threads. * This should be called with interrupts enabled and no scheduler locks held. */ -#define ALL_CORES_RECOMMENDED (~(uint32_t)0) +#define ALL_CORES_RECOMMENDED (~(uint32_t)0) extern void sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores); extern void sched_perfcontrol_thread_group_recommend(void *data, cluster_type_t recommendation); extern void sched_override_recommended_cores_for_sleep(void); extern void sched_restore_recommended_cores_after_sleep(void); +extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); + /* * Update the deadline after which sched_perfcontrol_deadline_passed will be called. * Returns TRUE if it successfully canceled a previously set callback, @@ -886,23 +914,23 @@ extern void sched_restore_recommended_cores_after_sleep(void); extern boolean_t sched_perfcontrol_update_callback_deadline(uint64_t deadline); typedef enum perfcontrol_callout_type { - PERFCONTROL_CALLOUT_ON_CORE, - PERFCONTROL_CALLOUT_OFF_CORE, - PERFCONTROL_CALLOUT_CONTEXT, - PERFCONTROL_CALLOUT_STATE_UPDATE, - /* Add other callout types here */ - PERFCONTROL_CALLOUT_MAX + PERFCONTROL_CALLOUT_ON_CORE, + PERFCONTROL_CALLOUT_OFF_CORE, + PERFCONTROL_CALLOUT_CONTEXT, + PERFCONTROL_CALLOUT_STATE_UPDATE, + /* Add other callout types here */ + PERFCONTROL_CALLOUT_MAX } perfcontrol_callout_type_t; typedef enum perfcontrol_callout_stat { - PERFCONTROL_STAT_INSTRS, - PERFCONTROL_STAT_CYCLES, - /* Add other stat types here */ - PERFCONTROL_STAT_MAX + PERFCONTROL_STAT_INSTRS, + PERFCONTROL_STAT_CYCLES, + /* Add other stat types here */ + PERFCONTROL_STAT_MAX } perfcontrol_callout_stat_t; uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, - perfcontrol_callout_stat_t stat); + perfcontrol_callout_stat_t stat); #endif /* KERNEL_PRIVATE */ @@ -910,6 +938,7 @@ uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, boolean_t machine_timeout_suspended(void); void ml_get_power_state(boolean_t *, boolean_t *); +uint32_t get_arm_cpu_version(void); boolean_t user_cont_hwclock_allowed(void); boolean_t user_timebase_allowed(void); boolean_t ml_thread_is64bit(thread_t thread); diff --git a/osfmk/arm/machine_routines_common.c b/osfmk/arm/machine_routines_common.c index 0c6da73f7..2cb596872 100644 --- a/osfmk/arm/machine_routines_common.c +++ b/osfmk/arm/machine_routines_common.c @@ -76,20 +76,20 @@ sched_perfcontrol_thread_group_default(thread_group_data_t data __unused) { } -static void +static void sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused) { } static void sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused, - perfcontrol_work_interval_t work_interval __unused) + perfcontrol_work_interval_t work_interval __unused) { } static void sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused, - perfcontrol_work_interval_instance_t instance __unused) + perfcontrol_work_interval_instance_t instance __unused) { } @@ -100,9 +100,9 @@ sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline) static void sched_perfcontrol_csw_default( - __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp, - __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore, - __unused struct perfcontrol_thread_data *oncore, + __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp, + __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore, + __unused struct perfcontrol_thread_data *oncore, __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused) { } @@ -139,7 +139,6 @@ sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, un if (callbacks) { - if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) { if (callbacks->work_interval_ctl != NULL) { sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl; @@ -193,7 +192,7 @@ sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, un } else { sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default; } - + if (callbacks->work_interval_notify != NULL) { sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify; } else { @@ -217,9 +216,9 @@ sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, un static void -machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data, - thread_t thread, - uint64_t same_pri_latency) +machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data, + thread_t thread, + uint64_t same_pri_latency) { bzero(data, sizeof(struct perfcontrol_thread_data)); data->perfctl_class = thread_get_perfcontrol_class(thread); @@ -246,45 +245,50 @@ static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX]; #if MONOTONIC static inline -bool perfcontrol_callout_counters_begin(uint64_t *counters) +bool +perfcontrol_callout_counters_begin(uint64_t *counters) { - if (!perfcontrol_callout_stats_enabled) - return false; - mt_fixed_counts(counters); - return true; + if (!perfcontrol_callout_stats_enabled) { + return false; + } + mt_fixed_counts(counters); + return true; } static inline -void perfcontrol_callout_counters_end(uint64_t *start_counters, - perfcontrol_callout_type_t type) +void +perfcontrol_callout_counters_end(uint64_t *start_counters, + perfcontrol_callout_type_t type) { - uint64_t end_counters[MT_CORE_NFIXED]; - mt_fixed_counts(end_counters); - atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES], - end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed); + uint64_t end_counters[MT_CORE_NFIXED]; + mt_fixed_counts(end_counters); + atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES], + end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed); #ifdef MT_CORE_INSTRS - atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS], - end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed); + atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS], + end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed); #endif /* defined(MT_CORE_INSTRS) */ - atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed); + atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed); } #endif /* MONOTONIC */ -uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, - perfcontrol_callout_stat_t stat) +uint64_t +perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type, + perfcontrol_callout_stat_t stat) { - if (!perfcontrol_callout_stats_enabled) - return 0; - return (perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type]); + if (!perfcontrol_callout_stats_enabled) { + return 0; + } + return perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type]; } void machine_switch_perfcontrol_context(perfcontrol_event event, - uint64_t timestamp, - uint32_t flags, - uint64_t new_thread_same_pri_latency, - thread_t old, - thread_t new) + uint64_t timestamp, + uint32_t flags, + uint64_t new_thread_same_pri_latency, + thread_t old, + thread_t new) { if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) { perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old); @@ -298,7 +302,7 @@ machine_switch_perfcontrol_context(perfcontrol_event event, struct perfcontrol_thread_data offcore, oncore; machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0); machine_switch_populate_perfcontrol_thread_data(&oncore, new, - new_thread_same_pri_latency); + new_thread_same_pri_latency); machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters); #if MONOTONIC @@ -306,9 +310,11 @@ machine_switch_perfcontrol_context(perfcontrol_event event, bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); #endif /* MONOTONIC */ sched_perfcontrol_csw(event, cpu_id, timestamp, flags, - &offcore, &oncore, &cpu_counters, NULL); + &offcore, &oncore, &cpu_counters, NULL); #if MONOTONIC - if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT); + if (ctrs_enabled) { + perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT); + } #endif /* MONOTONIC */ #if __arm64__ @@ -320,12 +326,13 @@ machine_switch_perfcontrol_context(perfcontrol_event event, void machine_switch_perfcontrol_state_update(perfcontrol_event event, - uint64_t timestamp, - uint32_t flags, - thread_t thread) + uint64_t timestamp, + uint32_t flags, + thread_t thread) { - if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) + if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default) { return; + } uint32_t cpu_id = (uint32_t)cpu_number(); struct perfcontrol_thread_data data; machine_switch_populate_perfcontrol_thread_data(&data, thread, 0); @@ -334,10 +341,12 @@ machine_switch_perfcontrol_state_update(perfcontrol_event event, uint64_t counters[MT_CORE_NFIXED]; bool ctrs_enabled = perfcontrol_callout_counters_begin(counters); #endif /* MONOTONIC */ - sched_perfcontrol_state_update(event, cpu_id, timestamp, flags, - &data, NULL); + sched_perfcontrol_state_update(event, cpu_id, timestamp, flags, + &data, NULL); #if MONOTONIC - if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE); + if (ctrs_enabled) { + perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE); + } #endif /* MONOTONIC */ #if __arm64__ @@ -347,14 +356,14 @@ machine_switch_perfcontrol_state_update(perfcontrol_event event, void machine_thread_going_on_core(thread_t new_thread, - int urgency, - uint64_t sched_latency, - uint64_t same_pri_latency, - uint64_t timestamp) + thread_urgency_t urgency, + uint64_t sched_latency, + uint64_t same_pri_latency, + uint64_t timestamp) { - - if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) + if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default) { return; + } struct going_on_core on_core; perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread); @@ -374,7 +383,9 @@ machine_thread_going_on_core(thread_t new_thread, #endif /* MONOTONIC */ sched_perfcontrol_oncore(state, &on_core); #if MONOTONIC - if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE); + if (ctrs_enabled) { + perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE); + } #endif /* MONOTONIC */ #if __arm64__ @@ -383,10 +394,12 @@ machine_thread_going_on_core(thread_t new_thread, } void -machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch) +machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, + uint64_t last_dispatch, __unused boolean_t thread_runnable) { - if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) + if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default) { return; + } struct going_off_core off_core; perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread); @@ -400,7 +413,9 @@ machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, #endif /* MONOTONIC */ sched_perfcontrol_offcore(state, &off_core, thread_terminating); #if MONOTONIC - if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE); + if (ctrs_enabled) { + perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE); + } #endif /* MONOTONIC */ #if __arm64__ @@ -411,11 +426,12 @@ machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, void machine_max_runnable_latency(uint64_t bg_max_latency, - uint64_t default_max_latency, - uint64_t realtime_max_latency) + uint64_t default_max_latency, + uint64_t realtime_max_latency) { - if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) + if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default) { return; + } struct perfcontrol_max_runnable_latency latencies = { .max_scheduling_latencies = { [THREAD_URGENCY_NONE] = 0, @@ -430,10 +446,11 @@ machine_max_runnable_latency(uint64_t bg_max_latency, void machine_work_interval_notify(thread_t thread, - struct kern_work_interval_args* kwi_args) + struct kern_work_interval_args* kwi_args) { - if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) + if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default) { return; + } perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread); struct perfcontrol_work_interval work_interval = { .thread_id = thread->thread_id, @@ -454,15 +471,16 @@ machine_work_interval_notify(thread_t thread, void machine_perfcontrol_deadline_passed(uint64_t deadline) { - if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) + if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default) { sched_perfcontrol_deadline_passed(deadline); + } } #if INTERRUPT_MASKED_DEBUG /* * ml_spin_debug_reset() * Reset the timestamp on a thread that has been unscheduled - * to avoid false alarms. Alarm will go off if interrupts are held + * to avoid false alarms. Alarm will go off if interrupts are held * disabled for too long, starting from now. */ void @@ -509,10 +527,10 @@ ml_check_interrupts_disabled_duration(thread_t thread) #ifndef KASAN /* - * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the - * mechanism enabled so that KASAN can catch any bugs in the mechanism itself. - */ - panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer)/timebase.denom)); + * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the + * mechanism enabled so that KASAN can catch any bugs in the mechanism itself. + */ + panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer) / timebase.denom)); #endif } } @@ -525,8 +543,8 @@ ml_check_interrupts_disabled_duration(thread_t thread) boolean_t ml_set_interrupts_enabled(boolean_t enable) { - thread_t thread; - uint64_t state; + thread_t thread; + uint64_t state; #if __arm__ #define INTERRUPT_MASK PSR_IRQF @@ -543,7 +561,7 @@ ml_set_interrupts_enabled(boolean_t enable) ml_check_interrupts_disabled_duration(thread); thread->machine.intmask_timestamp = 0; } -#endif // INTERRUPT_MASKED_DEBUG +#endif // INTERRUPT_MASKED_DEBUG if (get_preemption_level() == 0) { thread = current_thread(); while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) { @@ -574,7 +592,13 @@ ml_set_interrupts_enabled(boolean_t enable) } #endif } - return ((state & INTERRUPT_MASK) == 0); + return (state & INTERRUPT_MASK) == 0; +} + +boolean_t +ml_early_set_interrupts_enabled(boolean_t enable) +{ + return ml_set_interrupts_enabled(enable); } /* @@ -593,10 +617,10 @@ ml_at_interrupt_context(void) * up checking the interrupt state of a different CPU, resulting in a false * positive. But if interrupts are disabled, we also know we cannot be * preempted. */ - return (!ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL)); + return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL); } -vm_offset_t +vm_offset_t ml_stack_remaining(void) { uintptr_t local = (uintptr_t) &local; @@ -608,38 +632,43 @@ ml_stack_remaining(void) * something has gone horribly wrong. */ intstack_top_ptr = getCpuDatap()->intstack_top; if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) { - return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE)); + return local - (getCpuDatap()->intstack_top - INTSTACK_SIZE); } else { - return (local - current_thread()->kernel_stack); + return local - current_thread()->kernel_stack; } } static boolean_t ml_quiescing; -void ml_set_is_quiescing(boolean_t quiescing) +void +ml_set_is_quiescing(boolean_t quiescing) { assert(FALSE == ml_get_interrupts_enabled()); ml_quiescing = quiescing; } -boolean_t ml_is_quiescing(void) +boolean_t +ml_is_quiescing(void) { assert(FALSE == ml_get_interrupts_enabled()); - return (ml_quiescing); + return ml_quiescing; } -uint64_t ml_get_booter_memory_size(void) +uint64_t +ml_get_booter_memory_size(void) { uint64_t size; - uint64_t roundsize = 512*1024*1024ULL; + uint64_t roundsize = 512 * 1024 * 1024ULL; size = BootArgs->memSizeActual; if (!size) { size = BootArgs->memSize; - if (size < (2 * roundsize)) roundsize >>= 1; + if (size < (2 * roundsize)) { + roundsize >>= 1; + } size = (size + roundsize - 1) & ~(roundsize - 1); size -= BootArgs->memSize; } - return (size); + return size; } uint64_t @@ -651,7 +680,7 @@ ml_get_abstime_offset(void) uint64_t ml_get_conttime_offset(void) { - return (rtclock_base_abstime + mach_absolutetime_asleep); + return rtclock_base_abstime + mach_absolutetime_asleep; } uint64_t @@ -668,3 +697,24 @@ ml_get_conttime_wake_time(void) return ml_get_conttime_offset(); } +/* + * ml_snoop_thread_is_on_core(thread_t thread) + * Check if the given thread is currently on core. This function does not take + * locks, disable preemption, or otherwise guarantee synchronization. The + * result should be considered advisory. + */ +bool +ml_snoop_thread_is_on_core(thread_t thread) +{ + unsigned int cur_cpu_num = 0; + + for (cur_cpu_num = 0; cur_cpu_num < MAX_CPUS; cur_cpu_num++) { + if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr) { + if (CpuDataEntries[cur_cpu_num].cpu_data_vaddr->cpu_active_thread == thread) { + return true; + } + } + } + + return false; +} diff --git a/osfmk/arm/machine_task.c b/osfmk/arm/machine_task.c index 517f4fa1a..a8c2c0248 100644 --- a/osfmk/arm/machine_task.c +++ b/osfmk/arm/machine_task.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,77 +62,77 @@ extern zone_t ads_zone; kern_return_t machine_task_set_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { switch (flavor) { case ARM_DEBUG_STATE: { arm_debug_state_t *tstate = (arm_debug_state_t *) state; - + if (state_count != ARM_DEBUG_STATE_COUNT) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { task->task_debug = zalloc(ads_zone); - if (task->task_debug == NULL) + if (task->task_debug == NULL) { return KERN_FAILURE; + } } - + copy_debug_state(tstate, (arm_debug_state_t*) task->task_debug, FALSE); - + return KERN_SUCCESS; } - case THREAD_STATE_NONE: /* Using this flavor to clear task_debug */ + case THREAD_STATE_NONE: /* Using this flavor to clear task_debug */ { if (task->task_debug != NULL) { zfree(ads_zone, task->task_debug); task->task_debug = NULL; - + return KERN_SUCCESS; } return KERN_FAILURE; } default: - { + { return KERN_INVALID_ARGUMENT; - } + } } return KERN_FAILURE; } -kern_return_t -machine_task_get_state(task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t *state_count) +kern_return_t +machine_task_get_state(task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count) { switch (flavor) { case ARM_DEBUG_STATE: { arm_debug_state_t *tstate = (arm_debug_state_t *) state; - + if (*state_count != ARM_DEBUG_STATE_COUNT) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { - bzero(state, sizeof(*tstate)); + bzero(state, sizeof(*tstate)); } else { copy_debug_state((arm_debug_state_t*) task->task_debug, tstate, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - } - + } + return KERN_SUCCESS; } default: - { + { return KERN_INVALID_ARGUMENT; - } - + } } return KERN_FAILURE; } @@ -147,15 +147,15 @@ machine_task_terminate(task_t task) if (task_debug != NULL) { task->task_debug = NULL; zfree(ads_zone, task_debug); - } + } } } kern_return_t machine_thread_inherit_taskwide( - thread_t thread, - task_t parent_task) + thread_t thread, + task_t parent_task) { if (parent_task->task_debug) { int flavor; @@ -173,7 +173,7 @@ machine_thread_inherit_taskwide( void machine_task_init(__unused task_t new_task, - __unused task_t parent_task, - __unused boolean_t memory_inherit) -{ + __unused task_t parent_task, + __unused boolean_t memory_inherit) +{ } diff --git a/osfmk/arm/machlimits.h b/osfmk/arm/machlimits.h index 0ab749b6b..0443af7d2 100644 --- a/osfmk/arm/machlimits.h +++ b/osfmk/arm/machlimits.h @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:41 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,9 +38,9 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.2.1 1996/12/09 16:55:05 stephen - * nmklinux_1.0b3_shared into pmk1.1 - * New file based on hp_pa - * [1996/12/09 11:09:22 stephen] + * nmklinux_1.0b3_shared into pmk1.1 + * New file based on hp_pa + * [1996/12/09 11:09:22 stephen] * * $EndLog$ */ @@ -65,34 +65,34 @@ #ifndef _MACH_MACHLIMITS_H_ #define _MACH_MACHLIMITS_H_ -#define CHAR_BIT 8 /* number of bits in a char */ +#define CHAR_BIT 8 /* number of bits in a char */ -#define SCHAR_MAX 127 /* max value for a signed char */ -#define SCHAR_MIN (-128) /* min value for a signed char */ +#define SCHAR_MAX 127 /* max value for a signed char */ +#define SCHAR_MIN (-128) /* min value for a signed char */ -#define UCHAR_MAX 255U /* max value for an unsigned char */ -#define CHAR_MAX 127 /* max value for a char */ -#define CHAR_MIN (-128) /* min value for a char */ +#define UCHAR_MAX 255U /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ -#define USHRT_MAX 65535U /* max value for an unsigned short */ -#define SHRT_MAX 32767 /* max value for a short */ -#define SHRT_MIN (-32768) /* min value for a short */ +#define USHRT_MAX 65535U /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ -#define UINT_MAX 0xFFFFFFFFU /* max value for an unsigned int */ -#define INT_MAX 2147483647 /* max value for an int */ -#define INT_MIN (-2147483647-1) /* min value for an int */ +#define UINT_MAX 0xFFFFFFFFU /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ #ifdef __LP64__ -#define ULONG_MAX 0xffffffffffffffffUL /* max unsigned long */ -#define LONG_MAX 0x7fffffffffffffffL /* max signed long */ -#define LONG_MIN (-0x7fffffffffffffffL-1)/* min signed long */ +#define ULONG_MAX 0xffffffffffffffffUL /* max unsigned long */ +#define LONG_MAX 0x7fffffffffffffffL /* max signed long */ +#define LONG_MIN (-0x7fffffffffffffffL-1)/* min signed long */ #else /* !__LP64__ */ -#define ULONG_MAX 0xffffffffUL /* max value for an unsigned long */ -#define LONG_MAX 2147483647L /* max value for a long */ -#define LONG_MIN (-2147483647L-1) /* min value for a long */ +#define ULONG_MAX 0xffffffffUL /* max value for an unsigned long */ +#define LONG_MAX 2147483647L /* max value for a long */ +#define LONG_MIN (-2147483647L-1) /* min value for a long */ #endif /* __LP64__ */ /* Must be at least two, for internationalization (NLS/KJI) */ -#define MB_LEN_MAX 4 /* multibyte characters */ +#define MB_LEN_MAX 4 /* multibyte characters */ #endif /* _MACH_MACHLIMITS_H_ */ diff --git a/osfmk/arm/machparam.h b/osfmk/arm/machparam.h index 4d5ec30de..a1138274c 100644 --- a/osfmk/arm/machparam.h +++ b/osfmk/arm/machparam.h @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,4 +61,3 @@ * * SPLs are true functions on i386, defined elsewhere. */ - diff --git a/osfmk/arm/misc_protos.h b/osfmk/arm/misc_protos.h index a36edabcb..ca995fb37 100644 --- a/osfmk/arm/misc_protos.h +++ b/osfmk/arm/misc_protos.h @@ -29,8 +29,8 @@ * @OSF_COPYRIGHT@ */ -#ifndef _ARM_MISC_PROTOS_H_ -#define _ARM_MISC_PROTOS_H_ +#ifndef _ARM_MISC_PROTOS_H_ +#define _ARM_MISC_PROTOS_H_ #include @@ -71,11 +71,11 @@ extern void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsign extern void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res); #if defined(__arm__) -extern void copy_debug_state(arm_debug_state_t *src, arm_debug_state_t *target, __unused boolean_t all); +extern void copy_debug_state(arm_debug_state_t * src, arm_debug_state_t *target, __unused boolean_t all); #elif defined(__arm64__) -extern void copy_legacy_debug_state(arm_legacy_debug_state_t *src, arm_legacy_debug_state_t *target, __unused boolean_t all); -extern void copy_debug_state32(arm_debug_state32_t *src, arm_debug_state32_t *target, __unused boolean_t all); -extern void copy_debug_state64(arm_debug_state64_t *src, arm_debug_state64_t *target, __unused boolean_t all); +extern void copy_legacy_debug_state(arm_legacy_debug_state_t * src, arm_legacy_debug_state_t *target, __unused boolean_t all); +extern void copy_debug_state32(arm_debug_state32_t * src, arm_debug_state32_t *target, __unused boolean_t all); +extern void copy_debug_state64(arm_debug_state64_t * src, arm_debug_state64_t *target, __unused boolean_t all); extern boolean_t debug_legacy_state_is_valid(arm_legacy_debug_state_t *ds); extern boolean_t debug_state_is_valid32(arm_debug_state32_t *ds); @@ -85,9 +85,9 @@ extern int copyio_check_user_addr(user_addr_t user_addr, vm_size_t nbytes); /* Top-Byte-Ignore */ extern boolean_t user_tbi; -#define TBI_MASK 0xff00000000000000 -#define user_tbi_enabled() (user_tbi) -#define tbi_clear(addr) ((addr) & ~(TBI_MASK)) +#define TBI_MASK 0xff00000000000000 +#define user_tbi_enabled() (user_tbi) +#define tbi_clear(addr) ((addr) & ~(TBI_MASK)) #else #error Unknown architecture. diff --git a/osfmk/arm/model_dep.c b/osfmk/arm/model_dep.c index 49503dfeb..f178db28e 100644 --- a/osfmk/arm/model_dep.c +++ b/osfmk/arm/model_dep.c @@ -68,7 +68,7 @@ #include #include #include -#include /* for btop */ +#include /* for btop */ #include #include @@ -80,51 +80,65 @@ #include #if MACH_KDP -void kdp_trap(unsigned int, struct arm_saved_state *); +void kdp_trap(unsigned int, struct arm_saved_state *); #endif -extern kern_return_t do_stackshot(void *); -extern void kdp_snapshot_preflight(int pid, void *tracebuf, - uint32_t tracebuf_size, uint32_t flags, - kcdata_descriptor_t data_p, - boolean_t enable_faulting); -extern int kdp_stack_snapshot_bytes_traced(void); +extern kern_return_t do_stackshot(void *); +extern void kdp_snapshot_preflight(int pid, void *tracebuf, + uint32_t tracebuf_size, uint32_t flags, + kcdata_descriptor_t data_p, + boolean_t enable_faulting); +extern int kdp_stack_snapshot_bytes_traced(void); /* * Increment the PANICLOG_VERSION if you change the format of the panic * log in any way. */ -#define PANICLOG_VERSION 11 +#define PANICLOG_VERSION 13 static struct kcdata_descriptor kc_panic_data; extern char firmware_version[]; -extern volatile uint32_t debug_enabled; +extern volatile uint32_t debug_enabled; extern unsigned int not_in_kdp; -extern int copyinframe(vm_address_t fp, uint32_t * frame); -extern void kdp_callouts(kdp_event_t event); +extern int copyinframe(vm_address_t fp, uint32_t * frame); +extern void kdp_callouts(kdp_event_t event); /* #include */ #define MAXCOMLEN 16 -extern int proc_pid(void *p); -extern void proc_name_kdp(task_t, char *, int); +extern int proc_pid(void *p); +extern void proc_name_kdp(task_t, char *, int); + +/* + * Make sure there's enough space to include the relevant bits in the format required + * within the space allocated for the panic version string in the panic header. + * The format required by OSAnalytics/DumpPanic is 'Product Version (OS Version)' + */ +#define PANIC_HEADER_VERSION_FMT_STR "%.14s (%.14s)" + +extern const char version[]; +extern char osversion[]; +extern char osproductversion[]; + +#if defined(XNU_TARGET_OS_BRIDGE) +extern char macosproductversion[]; +extern char macosversion[]; +#endif -extern const char version[]; -extern char osversion[]; extern uint8_t gPlatformECID[8]; extern uint32_t gPlatformMemoryID; -extern uint64_t last_hwaccess_thread; +extern uint64_t last_hwaccess_thread; /*Choosing the size for gTargetTypeBuffer as 8 and size for gModelTypeBuffer as 32 - since the target name and model name typically doesn't exceed this size */ + * since the target name and model name typically doesn't exceed this size */ extern char gTargetTypeBuffer[8]; extern char gModelTypeBuffer[32]; -decl_simple_lock_data(extern,clock_lock) -extern struct timeval gIOLastSleepTime; -extern struct timeval gIOLastWakeTime; -extern boolean_t is_clock_configured; +decl_simple_lock_data(extern, clock_lock) +extern struct timeval gIOLastSleepTime; +extern struct timeval gIOLastWakeTime; +extern boolean_t is_clock_configured; extern boolean_t kernelcache_uuid_valid; extern uuid_t kernelcache_uuid; @@ -138,7 +152,7 @@ extern uuid_t kernelcache_uuid; #define DEBUG_ACK_TIMEOUT ((uint64_t) 10000000) /* Forward functions definitions */ -void panic_display_times(void) ; +void panic_display_times(void); void panic_print_symbol_name(vm_address_t search); @@ -160,22 +174,22 @@ uint64_t PE_smc_stashed_x86_prev_power_transitions = UINT64_MAX; uint32_t PE_pcie_stashed_link_state = UINT32_MAX; #endif - -// Convenient macros to easily validate one or more pointers if + +// Convenient macros to easily validate one or more pointers if // they have defined types #define VALIDATE_PTR(ptr) \ validate_ptr((vm_offset_t)(ptr), sizeof(*(ptr)), #ptr) #define VALIDATE_PTR_2(ptr0, ptr1) \ - VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1) - + VALIDATE_PTR(ptr0) && VALIDATE_PTR(ptr1) + #define VALIDATE_PTR_3(ptr0, ptr1, ptr2) \ VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR(ptr2) #define VALIDATE_PTR_4(ptr0, ptr1, ptr2, ptr3) \ VALIDATE_PTR_2(ptr0, ptr1) && VALIDATE_PTR_2(ptr2, ptr3) -#define GET_MACRO(_1,_2,_3,_4,NAME,...) NAME +#define GET_MACRO(_1, _2, _3, _4, NAME, ...) NAME #define VALIDATE_PTR_LIST(...) GET_MACRO(__VA_ARGS__, VALIDATE_PTR_4, VALIDATE_PTR_3, VALIDATE_PTR_2, VALIDATE_PTR)(__VA_ARGS__) @@ -183,7 +197,8 @@ uint32_t PE_pcie_stashed_link_state = UINT32_MAX; * Evaluate if a pointer is valid * Print a message if pointer is invalid */ -static boolean_t validate_ptr( +static boolean_t +validate_ptr( vm_offset_t ptr, vm_size_t size, const char * ptr_name) { if (ptr) { @@ -191,7 +206,7 @@ static boolean_t validate_ptr( return TRUE; } else { paniclog_append_noflush("Invalid %s pointer: %p size: %d\n", - ptr_name, (void *)ptr, (int)size); + ptr_name, (void *)ptr, (int)size); return FALSE; } } else { @@ -205,32 +220,36 @@ static boolean_t validate_ptr( */ static void print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, - boolean_t is_64_bit) + boolean_t is_64_bit) { - int i = 0; - addr64_t lr; - addr64_t fp; - addr64_t fp_for_ppn; - ppnum_t ppn; - boolean_t dump_kernel_stack; + int i = 0; + addr64_t lr; + addr64_t fp; + addr64_t fp_for_ppn; + ppnum_t ppn; + boolean_t dump_kernel_stack; fp = topfp; fp_for_ppn = 0; ppn = (ppnum_t)NULL; - if (fp >= VM_MIN_KERNEL_ADDRESS) + if (fp >= VM_MIN_KERNEL_ADDRESS) { dump_kernel_stack = TRUE; - else + } else { dump_kernel_stack = FALSE; + } do { - if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) + if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) { break; - if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) + } + if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) { break; - if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS)) + } + if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) { break; - + } + /* * Check to see if current address will result in a different * ppn than previously computed (to avoid recomputation) via @@ -291,16 +310,14 @@ extern void panic_print_vnodes(void); static void do_print_all_backtraces( - const char *message) + const char *message) { - int logversion = PANICLOG_VERSION; + int logversion = PANICLOG_VERSION; thread_t cur_thread = current_thread(); - uintptr_t cur_fp; + uintptr_t cur_fp; task_t task; - int i; - size_t index; int print_vnodes = 0; - const char *nohilite_thread_marker="\t"; + const char *nohilite_thread_marker = "\t"; /* end_marker_bytes set to 200 for printing END marker + stackshot summary info always */ int bytes_traced = 0, bytes_remaining = 0, end_marker_bytes = 200; @@ -309,26 +326,27 @@ do_print_all_backtraces( char *stackshot_begin_loc = NULL; #if defined(__arm__) - __asm__ volatile("mov %0, r7":"=r"(cur_fp)); + __asm__ volatile ("mov %0, r7":"=r"(cur_fp)); #elif defined(__arm64__) - __asm__ volatile("add %0, xzr, fp":"=r"(cur_fp)); + __asm__ volatile ("add %0, xzr, fp":"=r"(cur_fp)); #else #error Unknown architecture. #endif - if (panic_bt_depth != 0) + if (panic_bt_depth != 0) { return; + } panic_bt_depth++; /* Truncate panic string to 1200 bytes -- WDT log can be ~1100 bytes */ paniclog_append_noflush("Debugger message: %.1200s\n", message); if (debug_enabled) { paniclog_append_noflush("Device: %s\n", - ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet"); + ('\0' != gTargetTypeBuffer[0]) ? gTargetTypeBuffer : "Not set yet"); paniclog_append_noflush("Hardware Model: %s\n", - ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet"); + ('\0' != gModelTypeBuffer[0]) ? gModelTypeBuffer:"Not set yet"); paniclog_append_noflush("ECID: %02X%02X%02X%02X%02X%02X%02X%02X\n", gPlatformECID[7], - gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3], - gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]); + gPlatformECID[6], gPlatformECID[5], gPlatformECID[4], gPlatformECID[3], + gPlatformECID[2], gPlatformECID[1], gPlatformECID[0]); if (last_hwaccess_thread) { paniclog_append_noflush("AppleHWAccess Thread: 0x%llx\n", last_hwaccess_thread); } @@ -336,12 +354,16 @@ do_print_all_backtraces( } paniclog_append_noflush("Memory ID: 0x%x\n", gPlatformMemoryID); paniclog_append_noflush("OS version: %.256s\n", - ('\0' != osversion[0]) ? osversion : "Not set yet"); + ('\0' != osversion[0]) ? osversion : "Not set yet"); +#if defined(XNU_TARGET_OS_BRIDGE) + paniclog_append_noflush("macOS version: %.256s\n", + ('\0' != macosversion[0]) ? macosversion : "Not set"); +#endif paniclog_append_noflush("Kernel version: %.512s\n", version); if (kernelcache_uuid_valid) { paniclog_append_noflush("KernelCache UUID: "); - for (index = 0; index < sizeof(uuid_t); index++) { + for (size_t index = 0; index < sizeof(uuid_t); index++) { paniclog_append_noflush("%02X", kernelcache_uuid[index]); } paniclog_append_noflush("\n"); @@ -388,6 +410,14 @@ do_print_all_backtraces( paniclog_append_noflush("not available\n"); } #endif + if (panic_data_buffers != NULL) { + paniclog_append_noflush("%s data: ", panic_data_buffers->producer_name); + uint8_t *panic_buffer_data = (uint8_t *) panic_data_buffers->buf; + for (int i = 0; i < panic_data_buffers->len; i++) { + paniclog_append_noflush("%02X", panic_buffer_data[i]); + } + paniclog_append_noflush("\n"); + } paniclog_append_noflush("Paniclog version: %d\n", logversion); panic_display_kernel_aslr(); @@ -409,24 +439,23 @@ do_print_all_backtraces( // Just print threads with high CPU usage for WDT timeouts if (strncmp(message, "WDT timeout", 11) == 0) { - thread_t top_runnable[5] = {0}; - thread_t thread; - int total_cpu_usage = 0; + thread_t top_runnable[5] = {0}; + thread_t thread; + int total_cpu_usage = 0; print_vnodes = 1; - + for (thread = (thread_t)queue_first(&threads); - VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread); - thread = (thread_t)queue_next(&thread->threads)) { - + VALIDATE_PTR(thread) && !queue_end(&threads, (queue_entry_t)thread); + thread = (thread_t)queue_next(&thread->threads)) { total_cpu_usage += thread->cpu_usage; - + // Look for the 5 runnable threads with highest priority if (thread->state & TH_RUN) { - int k; - thread_t comparison_thread = thread; - + int k; + thread_t comparison_thread = thread; + for (k = 0; k < TOP_RUNNABLE_LIMIT; k++) { if (top_runnable[k] == 0) { top_runnable[k] = comparison_thread; @@ -439,36 +468,33 @@ do_print_all_backtraces( } // loop through highest priority runnable threads } // Check if thread is runnable } // Loop through all threads - + // Print the relevant info for each thread identified paniclog_append_noflush("Total cpu_usage: %d\n", total_cpu_usage); paniclog_append_noflush("Thread task pri cpu_usage\n"); - for (i = 0; i < TOP_RUNNABLE_LIMIT; i++) { - + for (int i = 0; i < TOP_RUNNABLE_LIMIT; i++) { if (top_runnable[i] && VALIDATE_PTR(top_runnable[i]->task) && - validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) { - + validate_ptr((vm_offset_t)top_runnable[i]->task->bsd_info, 1, "bsd_info")) { char name[MAXCOMLEN + 1]; proc_name_kdp(top_runnable[i]->task, name, sizeof(name)); paniclog_append_noflush("%p %s %d %d\n", - top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage); - } + top_runnable[i], name, top_runnable[i]->sched_pri, top_runnable[i]->cpu_usage); + } } // Loop through highest priority runnable threads paniclog_append_noflush("\n"); } // Check if message is "WDT timeout" - // print current task info + // print current task info if (VALIDATE_PTR_LIST(cur_thread, cur_thread->task)) { - task = cur_thread->task; if (VALIDATE_PTR_LIST(task->map, task->map->pmap)) { paniclog_append_noflush("Panicked task %p: %d pages, %d threads: ", - task, task->map->pmap->stats.resident_count, task->thread_count); + task, task->map->pmap->stats.resident_count, task->thread_count); } else { paniclog_append_noflush("Panicked task %p: %d threads: ", - task, task->thread_count); + task, task->thread_count); } if (validate_ptr((vm_offset_t)task->bsd_info, 1, "bsd_info")) { @@ -485,7 +511,7 @@ do_print_all_backtraces( if (cur_fp < VM_MAX_KERNEL_ADDRESS) { paniclog_append_noflush("Panicked thread: %p, backtrace: 0x%llx, tid: %llu\n", - cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread)); + cur_thread, (addr64_t)cur_fp, thread_tid(cur_thread)); #if __LP64__ print_one_backtrace(kernel_pmap, cur_fp, nohilite_thread_marker, TRUE); #else @@ -493,11 +519,22 @@ do_print_all_backtraces( #endif } else { paniclog_append_noflush("Could not print panicked thread backtrace:" - "frame pointer outside kernel vm.\n"); + "frame pointer outside kernel vm.\n"); } paniclog_append_noflush("\n"); panic_info->eph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->eph_panic_log_offset; + /* set the os version data in the panic header in the format 'Product Version (OS Version)' (only if they have been set) */ + if ((osversion[0] != '\0') && (osproductversion[0] != '\0')) { + snprintf((char *)&panic_info->eph_os_version, sizeof(panic_info->eph_os_version), PANIC_HEADER_VERSION_FMT_STR, + osproductversion, osversion); + } +#if defined(XNU_TARGET_OS_BRIDGE) + if ((macosversion[0] != '\0') && (macosproductversion[0] != '\0')) { + snprintf((char *)&panic_info->eph_macos_version, sizeof(panic_info->eph_macos_version), PANIC_HEADER_VERSION_FMT_STR, + macosproductversion, macosversion); + } +#endif if (debug_ack_timeout_count) { panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_STACKSHOT_FAILED_DEBUGGERSYNC; @@ -514,13 +551,13 @@ do_print_all_backtraces( bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base); err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)debug_buf_ptr, - KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes, - KCFLAG_USE_MEMCOPY); + KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining - end_marker_bytes, + KCFLAG_USE_MEMCOPY); if (err == KERN_SUCCESS) { kdp_snapshot_preflight(-1, stackshot_begin_loc, bytes_remaining - end_marker_bytes, - (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | - STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | - STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); + (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | + STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | + STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); err = do_stackshot(NULL); bytes_traced = kdp_stack_snapshot_bytes_traced(); if (bytes_traced > 0 && !err) { @@ -557,8 +594,9 @@ do_print_all_backtraces( assert(panic_info->eph_other_log_offset != 0); - if (print_vnodes != 0) + if (print_vnodes != 0) { panic_print_vnodes(); + } panic_bt_depth--; } @@ -567,7 +605,7 @@ do_print_all_backtraces( * Entry to print_all_backtraces is serialized by the debugger lock */ static void -print_all_backtraces(const char *message) +print_all_backtraces(const char *message) { unsigned int initial_not_in_kdp = not_in_kdp; @@ -597,15 +635,16 @@ panic_display_times() return; } - if ((is_clock_configured) && (simple_lock_try(&clock_lock))) { - clock_sec_t secs, boot_secs; - clock_usec_t usecs, boot_usecs; + if ((is_clock_configured) && (simple_lock_try(&clock_lock, LCK_GRP_NULL))) { + clock_sec_t secs, boot_secs; + clock_usec_t usecs, boot_usecs; simple_unlock(&clock_lock); clock_get_calendar_microtime(&secs, &usecs); clock_get_boottime_microtime(&boot_secs, &boot_usecs); + paniclog_append_noflush("mach_absolute_time: 0x%llx\n", mach_absolute_time()); paniclog_append_noflush("Epoch Time: sec usec\n"); paniclog_append_noflush(" Boot : 0x%08x 0x%08x\n", (unsigned int)boot_secs, (unsigned int)boot_usecs); paniclog_append_noflush(" Sleep : 0x%08x 0x%08x\n", (unsigned int)gIOLastSleepTime.tv_sec, (unsigned int)gIOLastSleepTime.tv_usec); @@ -614,7 +653,8 @@ panic_display_times() } } -void panic_print_symbol_name(vm_address_t search) +void +panic_print_symbol_name(vm_address_t search) { #pragma unused(search) // empty stub. Really only used on x86_64. @@ -625,7 +665,6 @@ void SavePanicInfo( const char *message, __unused void *panic_data, __unused uint64_t panic_options) { - /* This should be initialized by the time we get here */ assert(panic_info->eph_panic_log_offset != 0); @@ -654,8 +693,9 @@ SavePanicInfo( PE_sync_panic_buffers(); // extra precaution; panic path likely isn't reliable if we're here } - if (PanicInfoSaved || (debug_buf_size == 0)) + if (PanicInfoSaved || (debug_buf_size == 0)) { return; + } PanicInfoSaved = TRUE; @@ -680,8 +720,9 @@ paniclog_flush() unsigned int panicbuf_length = 0; panicbuf_length = (unsigned int)(debug_buf_ptr - gPanicBase); - if (!panicbuf_length) + if (!panicbuf_length) { return; + } /* * Updates the log length of the last part of the panic log. @@ -726,13 +767,14 @@ DebuggerXCallEnter( uint64_t max_mabs_time, current_mabs_time; int cpu; int max_cpu; - cpu_data_t *target_cpu_datap; - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *target_cpu_datap; + cpu_data_t *cpu_data_ptr = getCpuDatap(); /* Check for nested debugger entry. */ cpu_data_ptr->debugger_active++; - if (cpu_data_ptr->debugger_active != 1) + if (cpu_data_ptr->debugger_active != 1) { return KERN_SUCCESS; + } /* * If debugger_sync is not 0, someone responded excessively late to the last @@ -759,17 +801,19 @@ DebuggerXCallEnter( max_cpu = ml_get_max_cpu_number(); boolean_t immediate_halt = FALSE; - if (proceed_on_sync_failure && force_immediate_debug_halt) - immediate_halt = TRUE; + if (proceed_on_sync_failure && force_immediate_debug_halt) { + immediate_halt = TRUE; + } if (!immediate_halt) { - for (cpu=0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= max_cpu; cpu++) { target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) + if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) { continue; + } - if(KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) { + if (KERN_SUCCESS == cpu_signal(target_cpu_datap, SIGPdebug, (void *)NULL, NULL)) { (void)hw_atomic_add(&debugger_sync, 1); } else { cpu_signal_failed = true; @@ -789,9 +833,9 @@ DebuggerXCallEnter( * all other CPUs have either responded or are spinning in a context that is * debugger safe. */ - while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) + while ((debugger_sync != 0) && (current_mabs_time < max_mabs_time)) { current_mabs_time = mach_absolute_time(); - + } } if (cpu_signal_failed && !proceed_on_sync_failure) { @@ -803,46 +847,53 @@ DebuggerXCallEnter( * but will be sufficient to let the other core respond. */ __builtin_arm_dmb(DMB_ISH); - for (cpu=0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= max_cpu; cpu++) { target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) + if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr)) { continue; - if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) + } + if (!(target_cpu_datap->cpu_signal & SIGPdebug) && !immediate_halt) { continue; + } if (proceed_on_sync_failure) { paniclog_append_noflush("Attempting to forcibly halt cpu %d\n", cpu); dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu(cpu, 0); - if (halt_status < 0) + if (halt_status < 0) { paniclog_append_noflush("cpu %d failed to halt with error %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); - else { - if (halt_status > 0) + } else { + if (halt_status > 0) { paniclog_append_noflush("cpu %d halted with warning %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); - else + } else { paniclog_append_noflush("cpu %d successfully halted\n", cpu); + } target_cpu_datap->halt_status = CPU_HALTED; } - } else + } else { kprintf("Debugger synch pending on cpu %d\n", cpu); + } } if (proceed_on_sync_failure) { for (cpu = 0; cpu <= max_cpu; cpu++) { target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr) || - (target_cpu_datap->halt_status == CPU_NOT_HALTED)) + (target_cpu_datap->halt_status == CPU_NOT_HALTED)) { continue; + } dbgwrap_status_t halt_status = ml_dbgwrap_halt_cpu_with_state(cpu, NSEC_PER_SEC, &target_cpu_datap->halt_state); - if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) + if ((halt_status < 0) || (halt_status == DBGWRAP_WARN_CPU_OFFLINE)) { paniclog_append_noflush("Unable to obtain state for cpu %d with status %d: %s\n", cpu, halt_status, ml_dbgwrap_strerror(halt_status)); - else + } else { target_cpu_datap->halt_status = CPU_HALTED_WITH_STATE; + } } - if (immediate_halt) + if (immediate_halt) { paniclog_append_noflush("Immediate halt requested on all cores\n"); - else + } else { paniclog_append_noflush("Debugger synchronization timed out; waited %llu nanoseconds\n", DEBUG_ACK_TIMEOUT); + } debug_ack_timeout_count++; return KERN_SUCCESS; } else { @@ -865,11 +916,12 @@ void DebuggerXCallReturn( void) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); cpu_data_ptr->debugger_active--; - if (cpu_data_ptr->debugger_active != 0) + if (cpu_data_ptr->debugger_active != 0) { return; + } mp_kdp_trap = 0; debugger_sync = 0; @@ -880,11 +932,11 @@ DebuggerXCallReturn( void DebuggerXCall( - void *ctx) + void *ctx) { - boolean_t save_context = FALSE; - vm_offset_t kstackptr = 0; - arm_saved_state_t *regs = (arm_saved_state_t *) ctx; + boolean_t save_context = FALSE; + vm_offset_t kstackptr = 0; + arm_saved_state_t *regs = (arm_saved_state_t *) ctx; if (regs != NULL) { #if defined(__arm64__) @@ -900,7 +952,6 @@ DebuggerXCall( if (save_context) { /* Save the interrupted context before acknowledging the signal */ *state = *regs; - } else if (regs) { /* zero old state so machine_trace_thread knows not to backtrace it */ set_saved_state_fp(state, 0); @@ -911,7 +962,9 @@ DebuggerXCall( (void)hw_atomic_sub(&debugger_sync, 1); __builtin_arm_dmb(DMB_ISH); - while (mp_kdp_trap); + while (mp_kdp_trap) { + ; + } /* Any cleanup for our pushed context should go here */ } @@ -919,10 +972,10 @@ DebuggerXCall( void DebuggerCall( - unsigned int reason, - void *ctx) + unsigned int reason, + void *ctx) { -#if !MACH_KDP +#if !MACH_KDP #pragma unused(reason,ctx) #endif /* !MACH_KDP */ @@ -930,11 +983,9 @@ DebuggerCall( alternate_debugger_enter(); #endif -#if MACH_KDP +#if MACH_KDP kdp_trap(reason, (struct arm_saved_state *)ctx); #else /* TODO: decide what to do if no debugger config */ #endif } - - diff --git a/osfmk/arm/monotonic_arm.c b/osfmk/arm/monotonic_arm.c index 5526c515b..8ed24d4eb 100644 --- a/osfmk/arm/monotonic_arm.c +++ b/osfmk/arm/monotonic_arm.c @@ -27,6 +27,7 @@ */ #include +#include #include bool mt_core_supported = false; @@ -51,7 +52,7 @@ mt_cur_cpu(void) int mt_microstackshot_start_arch(__unused uint64_t period) { - return 1; + return ENOTSUP; } struct mt_device mt_devices[0]; diff --git a/osfmk/arm/pal_routines.c b/osfmk/arm/pal_routines.c index 535e3cc80..540733a0f 100644 --- a/osfmk/arm/pal_routines.c +++ b/osfmk/arm/pal_routines.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/arm/pal_routines.h b/osfmk/arm/pal_routines.h index f100f6e99..c9056284e 100644 --- a/osfmk/arm/pal_routines.h +++ b/osfmk/arm/pal_routines.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _ARM_PAL_ROUTINES_H @@ -48,15 +48,15 @@ extern void pal_serial_putc(char a); extern void pal_serial_putc_nocr(char a); extern int pal_serial_getc(void); -#define panic_display_pal_info() do { } while(0) -#define pal_kernel_announce() do { } while(0) +#define panic_display_pal_info() do { } while(0) +#define pal_kernel_announce() do { } while(0) -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ /* Allows us to set a property on the IOResources object. Unused on ARM. */ -static inline void -pal_get_resource_property(const char **property_name, - int *property_value) +static inline void +pal_get_resource_property(const char **property_name, + int *property_value) { *property_name = 0; (void) property_value; diff --git a/osfmk/arm/pcb.c b/osfmk/arm/pcb.c index 60510d8b2..2ec9f9dcb 100644 --- a/osfmk/arm/pcb.c +++ b/osfmk/arm/pcb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Apple Inc. All rights reserved. + * Copyright (c) 2007-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -102,6 +102,12 @@ machine_switch_context( new->machine.CpuDatap = cpu_data_ptr; +#if __SMP__ + /* TODO: Should this be ordered? */ + old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; + new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; +#endif /* __SMP__ */ + machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new); retval = Switch_context(old, continuation, new); assert(retval != NULL); @@ -270,6 +276,13 @@ machine_stack_handoff( pmap_set_pmap(new->map->pmap, new); new->machine.CpuDatap = cpu_data_ptr; + +#if __SMP__ + /* TODO: Should this be ordered? */ + old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; + new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; +#endif /* __SMP__ */ + machine_set_current_thread(new); thread_initialize_kernel_state(new); @@ -405,3 +418,14 @@ machine_thread_set_tsd_base( return KERN_SUCCESS; } + +void +machine_tecs(__unused thread_t thr) +{ +} + +int +machine_csv(__unused cpuvn_e cve) +{ + return 0; +} diff --git a/osfmk/arm/pmap.c b/osfmk/arm/pmap.c index 754b84930..8f33cff28 100644 --- a/osfmk/arm/pmap.c +++ b/osfmk/arm/pmap.c @@ -74,7 +74,7 @@ #include #include -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) #include #include #if CONFIG_PGTRACE @@ -99,10 +99,10 @@ extern int pmap_ledgers_panic; extern int pmap_ledgers_panic_leeway; int pmap_stats_assert = 1; -#define PMAP_STATS_ASSERTF(cond, pmap, fmt, ...) \ - MACRO_BEGIN \ +#define PMAP_STATS_ASSERTF(cond, pmap, fmt, ...) \ + MACRO_BEGIN \ if (pmap_stats_assert && (pmap)->pmap_stats_assert) \ - assertf(cond, fmt, ##__VA_ARGS__); \ + assertf(cond, fmt, ##__VA_ARGS__); \ MACRO_END #else /* MACH_ASSERT */ #define PMAP_STATS_ASSERTF(cond, pmap, fmt, ...) @@ -127,13 +127,13 @@ int panic_on_unsigned_execute = 0; /* Virtual memory region for early allocation */ -#if (__ARM_VMSA__ == 7) -#define VREGION1_START (VM_HIGH_KERNEL_WINDOW & ~ARM_TT_L1_PT_OFFMASK) +#if (__ARM_VMSA__ == 7) +#define VREGION1_START (VM_HIGH_KERNEL_WINDOW & ~ARM_TT_L1_PT_OFFMASK) #else -#define VREGION1_HIGH_WINDOW (PE_EARLY_BOOT_VA) -#define VREGION1_START ((VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VREGION1_HIGH_WINDOW) +#define VREGION1_HIGH_WINDOW (PE_EARLY_BOOT_VA) +#define VREGION1_START ((VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VREGION1_HIGH_WINDOW) #endif -#define VREGION1_SIZE (trunc_page(VM_MAX_KERNEL_ADDRESS - (VREGION1_START))) +#define VREGION1_SIZE (trunc_page(VM_MAX_KERNEL_ADDRESS - (VREGION1_START))) extern unsigned int not_in_kdp; @@ -142,8 +142,8 @@ extern vm_offset_t first_avail; extern pmap_paddr_t avail_start; extern pmap_paddr_t avail_end; -extern vm_offset_t virtual_space_start; /* Next available kernel VA */ -extern vm_offset_t virtual_space_end; /* End of kernel address space */ +extern vm_offset_t virtual_space_start; /* Next available kernel VA */ +extern vm_offset_t virtual_space_end; /* End of kernel address space */ extern vm_offset_t static_memory_end; extern int hard_maxproc; @@ -174,85 +174,85 @@ const uint64_t arm64_root_pgtable_num_ttes = 0; struct pmap kernel_pmap_store MARK_AS_PMAP_DATA; SECURITY_READ_ONLY_LATE(pmap_t) kernel_pmap = &kernel_pmap_store; -struct vm_object pmap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* store pt pages */ +struct vm_object pmap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* store pt pages */ vm_object_t pmap_object = &pmap_object_store; -static struct zone *pmap_zone; /* zone of pmap structures */ +static struct zone *pmap_zone; /* zone of pmap structures */ decl_simple_lock_data(, pmaps_lock MARK_AS_PMAP_DATA) -unsigned int pmap_stamp MARK_AS_PMAP_DATA; -queue_head_t map_pmap_list MARK_AS_PMAP_DATA; +unsigned int pmap_stamp MARK_AS_PMAP_DATA; +queue_head_t map_pmap_list MARK_AS_PMAP_DATA; decl_simple_lock_data(, pt_pages_lock MARK_AS_PMAP_DATA) -queue_head_t pt_page_list MARK_AS_PMAP_DATA; /* pt page ptd entries list */ +queue_head_t pt_page_list MARK_AS_PMAP_DATA; /* pt page ptd entries list */ decl_simple_lock_data(, pmap_pages_lock MARK_AS_PMAP_DATA) typedef struct page_free_entry { - struct page_free_entry *next; + struct page_free_entry *next; } page_free_entry_t; -#define PAGE_FREE_ENTRY_NULL ((page_free_entry_t *) 0) +#define PAGE_FREE_ENTRY_NULL ((page_free_entry_t *) 0) -page_free_entry_t *pmap_pages_reclaim_list MARK_AS_PMAP_DATA; /* Reclaimed pt page list */ -unsigned int pmap_pages_request_count MARK_AS_PMAP_DATA; /* Pending requests to reclaim pt page */ -unsigned long long pmap_pages_request_acum MARK_AS_PMAP_DATA; +page_free_entry_t *pmap_pages_reclaim_list MARK_AS_PMAP_DATA; /* Reclaimed pt page list */ +unsigned int pmap_pages_request_count MARK_AS_PMAP_DATA; /* Pending requests to reclaim pt page */ +unsigned long long pmap_pages_request_acum MARK_AS_PMAP_DATA; typedef struct tt_free_entry { - struct tt_free_entry *next; + struct tt_free_entry *next; } tt_free_entry_t; -#define TT_FREE_ENTRY_NULL ((tt_free_entry_t *) 0) +#define TT_FREE_ENTRY_NULL ((tt_free_entry_t *) 0) -tt_free_entry_t *free_page_size_tt_list MARK_AS_PMAP_DATA; -unsigned int free_page_size_tt_count MARK_AS_PMAP_DATA; -unsigned int free_page_size_tt_max MARK_AS_PMAP_DATA; -#define FREE_PAGE_SIZE_TT_MAX 4 -tt_free_entry_t *free_two_page_size_tt_list MARK_AS_PMAP_DATA; -unsigned int free_two_page_size_tt_count MARK_AS_PMAP_DATA; -unsigned int free_two_page_size_tt_max MARK_AS_PMAP_DATA; -#define FREE_TWO_PAGE_SIZE_TT_MAX 4 -tt_free_entry_t *free_tt_list MARK_AS_PMAP_DATA; -unsigned int free_tt_count MARK_AS_PMAP_DATA; -unsigned int free_tt_max MARK_AS_PMAP_DATA; +tt_free_entry_t *free_page_size_tt_list MARK_AS_PMAP_DATA; +unsigned int free_page_size_tt_count MARK_AS_PMAP_DATA; +unsigned int free_page_size_tt_max MARK_AS_PMAP_DATA; +#define FREE_PAGE_SIZE_TT_MAX 4 +tt_free_entry_t *free_two_page_size_tt_list MARK_AS_PMAP_DATA; +unsigned int free_two_page_size_tt_count MARK_AS_PMAP_DATA; +unsigned int free_two_page_size_tt_max MARK_AS_PMAP_DATA; +#define FREE_TWO_PAGE_SIZE_TT_MAX 4 +tt_free_entry_t *free_tt_list MARK_AS_PMAP_DATA; +unsigned int free_tt_count MARK_AS_PMAP_DATA; +unsigned int free_tt_max MARK_AS_PMAP_DATA; -#define TT_FREE_ENTRY_NULL ((tt_free_entry_t *) 0) +#define TT_FREE_ENTRY_NULL ((tt_free_entry_t *) 0) boolean_t pmap_gc_allowed MARK_AS_PMAP_DATA = TRUE; boolean_t pmap_gc_forced MARK_AS_PMAP_DATA = FALSE; boolean_t pmap_gc_allowed_by_time_throttle = TRUE; -unsigned int inuse_user_ttepages_count MARK_AS_PMAP_DATA = 0; /* non-root, non-leaf user pagetable pages, in units of PAGE_SIZE */ -unsigned int inuse_user_ptepages_count MARK_AS_PMAP_DATA = 0; /* leaf user pagetable pages, in units of PAGE_SIZE */ -unsigned int inuse_user_tteroot_count MARK_AS_PMAP_DATA = 0; /* root user pagetables, in units of PMAP_ROOT_ALLOC_SIZE */ +unsigned int inuse_user_ttepages_count MARK_AS_PMAP_DATA = 0; /* non-root, non-leaf user pagetable pages, in units of PAGE_SIZE */ +unsigned int inuse_user_ptepages_count MARK_AS_PMAP_DATA = 0; /* leaf user pagetable pages, in units of PAGE_SIZE */ +unsigned int inuse_user_tteroot_count MARK_AS_PMAP_DATA = 0; /* root user pagetables, in units of PMAP_ROOT_ALLOC_SIZE */ unsigned int inuse_kernel_ttepages_count MARK_AS_PMAP_DATA = 0; /* non-root, non-leaf kernel pagetable pages, in units of PAGE_SIZE */ unsigned int inuse_kernel_ptepages_count MARK_AS_PMAP_DATA = 0; /* leaf kernel pagetable pages, in units of PAGE_SIZE */ -unsigned int inuse_kernel_tteroot_count MARK_AS_PMAP_DATA = 0; /* root kernel pagetables, in units of PMAP_ROOT_ALLOC_SIZE */ -unsigned int inuse_pmap_pages_count = 0; /* debugging */ +unsigned int inuse_kernel_tteroot_count MARK_AS_PMAP_DATA = 0; /* root kernel pagetables, in units of PMAP_ROOT_ALLOC_SIZE */ +unsigned int inuse_pmap_pages_count = 0; /* debugging */ SECURITY_READ_ONLY_LATE(tt_entry_t *) invalid_tte = 0; SECURITY_READ_ONLY_LATE(pmap_paddr_t) invalid_ttep = 0; -SECURITY_READ_ONLY_LATE(tt_entry_t *) cpu_tte = 0; /* set by arm_vm_init() - keep out of bss */ -SECURITY_READ_ONLY_LATE(pmap_paddr_t) cpu_ttep = 0; /* set by arm_vm_init() - phys tte addr */ +SECURITY_READ_ONLY_LATE(tt_entry_t *) cpu_tte = 0; /* set by arm_vm_init() - keep out of bss */ +SECURITY_READ_ONLY_LATE(pmap_paddr_t) cpu_ttep = 0; /* set by arm_vm_init() - phys tte addr */ #if DEVELOPMENT || DEBUG -int nx_enabled = 1; /* enable no-execute protection */ -int allow_data_exec = 0; /* No apps may execute data */ -int allow_stack_exec = 0; /* No apps may execute from the stack */ +int nx_enabled = 1; /* enable no-execute protection */ +int allow_data_exec = 0; /* No apps may execute data */ +int allow_stack_exec = 0; /* No apps may execute from the stack */ #else /* DEVELOPMENT || DEBUG */ -const int nx_enabled = 1; /* enable no-execute protection */ -const int allow_data_exec = 0; /* No apps may execute data */ -const int allow_stack_exec = 0; /* No apps may execute from the stack */ +const int nx_enabled = 1; /* enable no-execute protection */ +const int allow_data_exec = 0; /* No apps may execute data */ +const int allow_stack_exec = 0; /* No apps may execute from the stack */ #endif /* DEVELOPMENT || DEBUG */ /* * pv_entry_t - structure to track the active mappings for a given page */ typedef struct pv_entry { - struct pv_entry *pve_next; /* next alias */ - pt_entry_t *pve_ptep; /* page table entry */ + struct pv_entry *pve_next; /* next alias */ + pt_entry_t *pve_ptep; /* page table entry */ #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers * are 32-bit: @@ -264,7 +264,7 @@ typedef struct pv_entry { } pv_entry_t; #endif -#define PV_ENTRY_NULL ((pv_entry_t *) 0) +#define PV_ENTRY_NULL ((pv_entry_t *) 0) /* * PMAP LEDGERS: @@ -273,122 +273,124 @@ typedef struct pv_entry { * These macros set, clear and test for this marker and extract the actual * value of the "pve_next" pointer. */ -#define PVE_NEXT_ALTACCT ((uintptr_t) 0x1) +#define PVE_NEXT_ALTACCT ((uintptr_t) 0x1) #define PVE_NEXT_SET_ALTACCT(pve_next_p) \ *(pve_next_p) = (struct pv_entry *) (((uintptr_t) *(pve_next_p)) | \ - PVE_NEXT_ALTACCT) + PVE_NEXT_ALTACCT) #define PVE_NEXT_CLR_ALTACCT(pve_next_p) \ *(pve_next_p) = (struct pv_entry *) (((uintptr_t) *(pve_next_p)) & \ - ~PVE_NEXT_ALTACCT) -#define PVE_NEXT_IS_ALTACCT(pve_next) \ + ~PVE_NEXT_ALTACCT) +#define PVE_NEXT_IS_ALTACCT(pve_next) \ ((((uintptr_t) (pve_next)) & PVE_NEXT_ALTACCT) ? TRUE : FALSE) #define PVE_NEXT_PTR(pve_next) \ ((struct pv_entry *)(((uintptr_t) (pve_next)) & \ - ~PVE_NEXT_ALTACCT)) + ~PVE_NEXT_ALTACCT)) #if MACH_ASSERT static void pmap_check_ledgers(pmap_t pmap); #else -static inline void pmap_check_ledgers(__unused pmap_t pmap) {} +static inline void +pmap_check_ledgers(__unused pmap_t pmap) +{ +} #endif /* MACH_ASSERT */ -SECURITY_READ_ONLY_LATE(pv_entry_t **) pv_head_table; /* array of pv entry pointers */ +SECURITY_READ_ONLY_LATE(pv_entry_t * *) pv_head_table; /* array of pv entry pointers */ -pv_entry_t *pv_free_list MARK_AS_PMAP_DATA; -pv_entry_t *pv_kern_free_list MARK_AS_PMAP_DATA; -decl_simple_lock_data(,pv_free_list_lock MARK_AS_PMAP_DATA) -decl_simple_lock_data(,pv_kern_free_list_lock MARK_AS_PMAP_DATA) +pv_entry_t *pv_free_list MARK_AS_PMAP_DATA; +pv_entry_t *pv_kern_free_list MARK_AS_PMAP_DATA; +decl_simple_lock_data(, pv_free_list_lock MARK_AS_PMAP_DATA) +decl_simple_lock_data(, pv_kern_free_list_lock MARK_AS_PMAP_DATA) -decl_simple_lock_data(,phys_backup_lock) +decl_simple_lock_data(, phys_backup_lock) /* * pt_desc - structure to keep info on page assigned to page tables */ #if (__ARM_VMSA__ == 7) -#define PT_INDEX_MAX 1 +#define PT_INDEX_MAX 1 #else #if (ARM_PGSHIFT == 14) -#define PT_INDEX_MAX 1 +#define PT_INDEX_MAX 1 #else -#define PT_INDEX_MAX 4 +#define PT_INDEX_MAX 4 #endif #endif -#define PT_DESC_REFCOUNT 0x4000U -#define PT_DESC_IOMMU_REFCOUNT 0x8000U +#define PT_DESC_REFCOUNT 0x4000U +#define PT_DESC_IOMMU_REFCOUNT 0x8000U typedef struct pt_desc { - queue_chain_t pt_page; + queue_chain_t pt_page; struct { /* * For non-leaf pagetables, should always be PT_DESC_REFCOUNT * For leaf pagetables, should reflect the number of non-empty PTEs * For IOMMU pages, should always be PT_DESC_IOMMU_REFCOUNT */ - unsigned short refcnt; + unsigned short refcnt; /* * For non-leaf pagetables, should be 0 * For leaf pagetables, should reflect the number of wired entries * For IOMMU pages, may optionally reflect a driver-defined refcount (IOMMU operations are implicitly wired) */ - unsigned short wiredcnt; + unsigned short wiredcnt; } pt_cnt[PT_INDEX_MAX]; union { - struct pmap *pmap; + struct pmap *pmap; }; struct { - vm_offset_t va; + vm_offset_t va; } pt_map[PT_INDEX_MAX]; } pt_desc_t; -#define PTD_ENTRY_NULL ((pt_desc_t *) 0) +#define PTD_ENTRY_NULL ((pt_desc_t *) 0) SECURITY_READ_ONLY_LATE(pt_desc_t *) ptd_root_table; -pt_desc_t *ptd_free_list MARK_AS_PMAP_DATA = PTD_ENTRY_NULL; +pt_desc_t *ptd_free_list MARK_AS_PMAP_DATA = PTD_ENTRY_NULL; SECURITY_READ_ONLY_LATE(boolean_t) ptd_preboot = TRUE; -unsigned int ptd_free_count MARK_AS_PMAP_DATA = 0; -decl_simple_lock_data(,ptd_free_list_lock MARK_AS_PMAP_DATA) +unsigned int ptd_free_count MARK_AS_PMAP_DATA = 0; +decl_simple_lock_data(, ptd_free_list_lock MARK_AS_PMAP_DATA) /* * physical page attribute */ -typedef u_int16_t pp_attr_t; +typedef u_int16_t pp_attr_t; -#define PP_ATTR_WIMG_MASK 0x003F -#define PP_ATTR_WIMG(x) ((x) & PP_ATTR_WIMG_MASK) +#define PP_ATTR_WIMG_MASK 0x003F +#define PP_ATTR_WIMG(x) ((x) & PP_ATTR_WIMG_MASK) -#define PP_ATTR_REFERENCED 0x0040 -#define PP_ATTR_MODIFIED 0x0080 +#define PP_ATTR_REFERENCED 0x0040 +#define PP_ATTR_MODIFIED 0x0080 -#define PP_ATTR_INTERNAL 0x0100 -#define PP_ATTR_REUSABLE 0x0200 -#define PP_ATTR_ALTACCT 0x0400 -#define PP_ATTR_NOENCRYPT 0x0800 +#define PP_ATTR_INTERNAL 0x0100 +#define PP_ATTR_REUSABLE 0x0200 +#define PP_ATTR_ALTACCT 0x0400 +#define PP_ATTR_NOENCRYPT 0x0800 -#define PP_ATTR_REFFAULT 0x1000 -#define PP_ATTR_MODFAULT 0x2000 +#define PP_ATTR_REFFAULT 0x1000 +#define PP_ATTR_MODFAULT 0x2000 -SECURITY_READ_ONLY_LATE(pp_attr_t*) pp_attr_table; +SECURITY_READ_ONLY_LATE(pp_attr_t*) pp_attr_table; -typedef struct pmap_io_range -{ +typedef struct pmap_io_range { uint64_t addr; uint32_t len; uint32_t wimg; // treated as pp_attr_t } __attribute__((packed)) pmap_io_range_t; -SECURITY_READ_ONLY_LATE(pmap_io_range_t*) io_attr_table; +SECURITY_READ_ONLY_LATE(pmap_io_range_t*) io_attr_table; -SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_first_phys = (pmap_paddr_t) 0; -SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_last_phys = (pmap_paddr_t) 0; +SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_first_phys = (pmap_paddr_t) 0; +SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_last_phys = (pmap_paddr_t) 0; -SECURITY_READ_ONLY_LATE(pmap_paddr_t) io_rgn_start = 0; -SECURITY_READ_ONLY_LATE(pmap_paddr_t) io_rgn_end = 0; -SECURITY_READ_ONLY_LATE(unsigned int) num_io_rgns = 0; +SECURITY_READ_ONLY_LATE(pmap_paddr_t) io_rgn_start = 0; +SECURITY_READ_ONLY_LATE(pmap_paddr_t) io_rgn_end = 0; +SECURITY_READ_ONLY_LATE(unsigned int) num_io_rgns = 0; -SECURITY_READ_ONLY_LATE(boolean_t) pmap_initialized = FALSE; /* Has pmap_init completed? */ +SECURITY_READ_ONLY_LATE(boolean_t) pmap_initialized = FALSE; /* Has pmap_init completed? */ SECURITY_READ_ONLY_LATE(uint64_t) pmap_nesting_size_min; SECURITY_READ_ONLY_LATE(uint64_t) pmap_nesting_size_max; @@ -401,160 +403,160 @@ SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm64_pmap_max_offset_default = 0x0; /* free address spaces (1 means free) */ static uint32_t asid_bitmap[MAX_ASID / (sizeof(uint32_t) * NBBY)] MARK_AS_PMAP_DATA; -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #endif -#define pa_index(pa) \ +#define pa_index(pa) \ (atop((pa) - vm_first_phys)) -#define pai_to_pvh(pai) \ +#define pai_to_pvh(pai) \ (&pv_head_table[pai]) -#define pa_valid(x) \ +#define pa_valid(x) \ ((x) >= vm_first_phys && (x) < vm_last_phys) /* PTE Define Macros */ -#define pte_is_wired(pte) \ +#define pte_is_wired(pte) \ (((pte) & ARM_PTE_WIRED_MASK) == ARM_PTE_WIRED) -#define pte_set_wired(ptep, wired) \ - do { \ - SInt16 *ptd_wiredcnt_ptr; \ - ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_ptd(ptep)->pt_cnt[ARM_PT_DESC_INDEX(ptep)].wiredcnt); \ - if (wired) { \ - *ptep |= ARM_PTE_WIRED; \ - OSAddAtomic16(1, ptd_wiredcnt_ptr); \ - } else { \ - *ptep &= ~ARM_PTE_WIRED; \ - OSAddAtomic16(-1, ptd_wiredcnt_ptr); \ - } \ +#define pte_set_wired(ptep, wired) \ + do { \ + SInt16 *ptd_wiredcnt_ptr; \ + ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_ptd(ptep)->pt_cnt[ARM_PT_DESC_INDEX(ptep)].wiredcnt); \ + if (wired) { \ + *ptep |= ARM_PTE_WIRED; \ + OSAddAtomic16(1, ptd_wiredcnt_ptr); \ + } else { \ + *ptep &= ~ARM_PTE_WIRED; \ + OSAddAtomic16(-1, ptd_wiredcnt_ptr); \ + } \ } while(0) -#define pte_is_ffr(pte) \ +#define pte_was_writeable(pte) \ (((pte) & ARM_PTE_WRITEABLE) == ARM_PTE_WRITEABLE) -#define pte_set_ffr(pte, ffr) \ - do { \ - if (ffr) { \ - pte |= ARM_PTE_WRITEABLE; \ - } else { \ - pte &= ~ARM_PTE_WRITEABLE; \ - } \ +#define pte_set_was_writeable(pte, was_writeable) \ + do { \ + if ((was_writeable)) { \ + (pte) |= ARM_PTE_WRITEABLE; \ + } else { \ + (pte) &= ~ARM_PTE_WRITEABLE; \ + } \ } while(0) /* PVE Define Macros */ -#define pve_next(pve) \ +#define pve_next(pve) \ ((pve)->pve_next) -#define pve_link_field(pve) \ +#define pve_link_field(pve) \ (&pve_next(pve)) -#define pve_link(pp, e) \ +#define pve_link(pp, e) \ ((pve_next(e) = pve_next(pp)), (pve_next(pp) = (e))) -#define pve_unlink(pp, e) \ +#define pve_unlink(pp, e) \ (pve_next(pp) = pve_next(e)) /* bits held in the ptep pointer field */ -#define pve_get_ptep(pve) \ +#define pve_get_ptep(pve) \ ((pve)->pve_ptep) -#define pve_set_ptep(pve, ptep_new) \ - do { \ - (pve)->pve_ptep = (ptep_new); \ +#define pve_set_ptep(pve, ptep_new) \ + do { \ + (pve)->pve_ptep = (ptep_new); \ } while (0) /* PTEP Define Macros */ -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) -#define ARM_PT_DESC_INDEX_MASK 0x00000 -#define ARM_PT_DESC_INDEX_SHIFT 0 +#define ARM_PT_DESC_INDEX_MASK 0x00000 +#define ARM_PT_DESC_INDEX_SHIFT 0 - /* - * mask for page descriptor index: 4MB per page table - */ -#define ARM_TT_PT_INDEX_MASK 0xfffU /* mask for page descriptor index: 4MB per page table */ +/* + * mask for page descriptor index: 4MB per page table + */ +#define ARM_TT_PT_INDEX_MASK 0xfffU /* mask for page descriptor index: 4MB per page table */ - /* - * Shift value used for reconstructing the virtual address for a PTE. - */ -#define ARM_TT_PT_ADDR_SHIFT (10U) +/* + * Shift value used for reconstructing the virtual address for a PTE. + */ +#define ARM_TT_PT_ADDR_SHIFT (10U) -#define ptep_get_va(ptep) \ +#define ptep_get_va(ptep) \ ((((pt_desc_t *) (pvh_list(pai_to_pvh(pa_index(ml_static_vtop((((vm_offset_t)(ptep) & ~0xFFF))))))))->pt_map[ARM_PT_DESC_INDEX(ptep)].va)+ ((((unsigned)(ptep)) & ARM_TT_PT_INDEX_MASK)<pmap)) +#define ptep_get_pmap(ptep) \ + ((((pt_desc_t *) (pvh_list(pai_to_pvh(pa_index(ml_static_vtop((((vm_offset_t)(ptep) & ~0xFFF))))))))->pmap)) #else #if (ARM_PGSHIFT == 12) -#define ARM_PT_DESC_INDEX_MASK ((PAGE_SHIFT_CONST == ARM_PGSHIFT )? 0x00000ULL : 0x03000ULL) -#define ARM_PT_DESC_INDEX_SHIFT ((PAGE_SHIFT_CONST == ARM_PGSHIFT )? 0 : 12) - /* - * mask for page descriptor index: 2MB per page table - */ -#define ARM_TT_PT_INDEX_MASK (0x0fffULL) - /* - * Shift value used for reconstructing the virtual address for a PTE. - */ -#define ARM_TT_PT_ADDR_SHIFT (9ULL) +#define ARM_PT_DESC_INDEX_MASK ((PAGE_SHIFT_CONST == ARM_PGSHIFT )? 0x00000ULL : 0x03000ULL) +#define ARM_PT_DESC_INDEX_SHIFT ((PAGE_SHIFT_CONST == ARM_PGSHIFT )? 0 : 12) +/* + * mask for page descriptor index: 2MB per page table + */ +#define ARM_TT_PT_INDEX_MASK (0x0fffULL) +/* + * Shift value used for reconstructing the virtual address for a PTE. + */ +#define ARM_TT_PT_ADDR_SHIFT (9ULL) - /* TODO: Give this a better name/documentation than "other" */ -#define ARM_TT_PT_OTHER_MASK (0x0fffULL) +/* TODO: Give this a better name/documentation than "other" */ +#define ARM_TT_PT_OTHER_MASK (0x0fffULL) #else -#define ARM_PT_DESC_INDEX_MASK (0x00000) -#define ARM_PT_DESC_INDEX_SHIFT (0) - /* - * mask for page descriptor index: 32MB per page table - */ -#define ARM_TT_PT_INDEX_MASK (0x3fffULL) - /* - * Shift value used for reconstructing the virtual address for a PTE. - */ -#define ARM_TT_PT_ADDR_SHIFT (11ULL) +#define ARM_PT_DESC_INDEX_MASK (0x00000) +#define ARM_PT_DESC_INDEX_SHIFT (0) +/* + * mask for page descriptor index: 32MB per page table + */ +#define ARM_TT_PT_INDEX_MASK (0x3fffULL) +/* + * Shift value used for reconstructing the virtual address for a PTE. + */ +#define ARM_TT_PT_ADDR_SHIFT (11ULL) - /* TODO: Give this a better name/documentation than "other" */ -#define ARM_TT_PT_OTHER_MASK (0x3fffULL) +/* TODO: Give this a better name/documentation than "other" */ +#define ARM_TT_PT_OTHER_MASK (0x3fffULL) #endif -#define ARM_PT_DESC_INDEX(ptep) \ +#define ARM_PT_DESC_INDEX(ptep) \ (((unsigned)(ptep) & ARM_PT_DESC_INDEX_MASK) >> ARM_PT_DESC_INDEX_SHIFT) -#define ptep_get_va(ptep) \ - ((((pt_desc_t *) (pvh_list(pai_to_pvh(pa_index(ml_static_vtop((((vm_offset_t)(ptep) & ~ARM_TT_PT_OTHER_MASK))))))))->pt_map[ARM_PT_DESC_INDEX(ptep)].va)+ ((((unsigned)(ptep)) & ARM_TT_PT_INDEX_MASK)<pt_map[ARM_PT_DESC_INDEX(ptep)].va)+ ((((unsigned)(ptep)) & ARM_TT_PT_INDEX_MASK)<pmap)) +#define ptep_get_pmap(ptep) \ + ((((pt_desc_t *) (pvh_list(pai_to_pvh(pa_index(ml_static_vtop((((vm_offset_t)(ptep) & ~ARM_TT_PT_OTHER_MASK))))))))->pmap)) #endif -#define ARM_PT_DESC_INDEX(ptep) \ +#define ARM_PT_DESC_INDEX(ptep) \ (((unsigned)(ptep) & ARM_PT_DESC_INDEX_MASK) >> ARM_PT_DESC_INDEX_SHIFT) -#define ptep_get_ptd(ptep) \ +#define ptep_get_ptd(ptep) \ ((struct pt_desc *)(pvh_list(pai_to_pvh(pa_index(ml_static_vtop((vm_offset_t)(ptep))))))) /* PVH Define Macros */ /* pvhead type */ -#define PVH_TYPE_NULL 0x0UL -#define PVH_TYPE_PVEP 0x1UL -#define PVH_TYPE_PTEP 0x2UL -#define PVH_TYPE_PTDP 0x3UL +#define PVH_TYPE_NULL 0x0UL +#define PVH_TYPE_PVEP 0x1UL +#define PVH_TYPE_PTEP 0x2UL +#define PVH_TYPE_PTDP 0x3UL #define PVH_TYPE_MASK (0x3UL) -#ifdef __arm64__ +#ifdef __arm64__ #define PVH_FLAG_IOMMU 0x4UL #define PVH_FLAG_IOMMU_TABLE (1ULL << 63) @@ -573,117 +575,117 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #endif -#define PVH_LIST_MASK (~PVH_TYPE_MASK) +#define PVH_LIST_MASK (~PVH_TYPE_MASK) -#define pvh_test_type(h, b) \ +#define pvh_test_type(h, b) \ ((*(vm_offset_t *)(h) & (PVH_TYPE_MASK)) == (b)) -#define pvh_ptep(h) \ +#define pvh_ptep(h) \ ((pt_entry_t *)((*(vm_offset_t *)(h) & PVH_LIST_MASK) | PVH_HIGH_FLAGS)) -#define pvh_list(h) \ +#define pvh_list(h) \ ((pv_entry_t *)((*(vm_offset_t *)(h) & PVH_LIST_MASK) | PVH_HIGH_FLAGS)) -#define pvh_get_flags(h) \ +#define pvh_get_flags(h) \ (*(vm_offset_t *)(h) & PVH_HIGH_FLAGS) -#define pvh_set_flags(h, f) \ - do { \ - __c11_atomic_store((_Atomic vm_offset_t *)(h), (*(vm_offset_t *)(h) & ~PVH_HIGH_FLAGS) | (f), \ - memory_order_relaxed); \ +#define pvh_set_flags(h, f) \ + do { \ + __c11_atomic_store((_Atomic vm_offset_t *)(h), (*(vm_offset_t *)(h) & ~PVH_HIGH_FLAGS) | (f), \ + memory_order_relaxed); \ } while (0) -#define pvh_update_head(h, e, t) \ - do { \ - assert(*(vm_offset_t *)(h) & PVH_FLAG_LOCK); \ - __c11_atomic_store((_Atomic vm_offset_t *)(h), (vm_offset_t)(e) | (t) | PVH_FLAG_LOCK, \ - memory_order_relaxed); \ +#define pvh_update_head(h, e, t) \ + do { \ + assert(*(vm_offset_t *)(h) & PVH_FLAG_LOCK); \ + __c11_atomic_store((_Atomic vm_offset_t *)(h), (vm_offset_t)(e) | (t) | PVH_FLAG_LOCK, \ + memory_order_relaxed); \ } while (0) -#define pvh_update_head_unlocked(h, e, t) \ - do { \ - assert(!(*(vm_offset_t *)(h) & PVH_FLAG_LOCK)); \ - *(vm_offset_t *)(h) = ((vm_offset_t)(e) | (t)) & ~PVH_FLAG_LOCK; \ +#define pvh_update_head_unlocked(h, e, t) \ + do { \ + assert(!(*(vm_offset_t *)(h) & PVH_FLAG_LOCK)); \ + *(vm_offset_t *)(h) = ((vm_offset_t)(e) | (t)) & ~PVH_FLAG_LOCK; \ } while (0) -#define pvh_add(h, e) \ - do { \ - assert(!pvh_test_type((h), PVH_TYPE_PTEP)); \ - pve_next(e) = pvh_list(h); \ - pvh_update_head((h), (e), PVH_TYPE_PVEP); \ +#define pvh_add(h, e) \ + do { \ + assert(!pvh_test_type((h), PVH_TYPE_PTEP)); \ + pve_next(e) = pvh_list(h); \ + pvh_update_head((h), (e), PVH_TYPE_PVEP); \ } while (0) -#define pvh_remove(h, p, e) \ - do { \ - assert(!PVE_NEXT_IS_ALTACCT(pve_next((e)))); \ - if ((p) == (h)) { \ - if (PVE_NEXT_PTR(pve_next((e))) == PV_ENTRY_NULL) { \ - pvh_update_head((h), PV_ENTRY_NULL, PVH_TYPE_NULL); \ - } else { \ - pvh_update_head((h), PVE_NEXT_PTR(pve_next((e))), PVH_TYPE_PVEP); \ - } \ - } else { \ - /* \ - * PMAP LEDGERS: \ - * preserve the "alternate accounting" bit \ - * when updating "p" (the previous entry's \ - * "pve_next"). \ - */ \ - boolean_t __is_altacct; \ - __is_altacct = PVE_NEXT_IS_ALTACCT(*(p)); \ - *(p) = PVE_NEXT_PTR(pve_next((e))); \ - if (__is_altacct) { \ - PVE_NEXT_SET_ALTACCT((p)); \ - } else { \ - PVE_NEXT_CLR_ALTACCT((p)); \ - } \ - } \ +#define pvh_remove(h, p, e) \ + do { \ + assert(!PVE_NEXT_IS_ALTACCT(pve_next((e)))); \ + if ((p) == (h)) { \ + if (PVE_NEXT_PTR(pve_next((e))) == PV_ENTRY_NULL) { \ + pvh_update_head((h), PV_ENTRY_NULL, PVH_TYPE_NULL); \ + } else { \ + pvh_update_head((h), PVE_NEXT_PTR(pve_next((e))), PVH_TYPE_PVEP); \ + } \ + } else { \ + /* \ + * PMAP LEDGERS: \ + * preserve the "alternate accounting" bit \ + * when updating "p" (the previous entry's \ + * "pve_next"). \ + */ \ + boolean_t __is_altacct; \ + __is_altacct = PVE_NEXT_IS_ALTACCT(*(p)); \ + *(p) = PVE_NEXT_PTR(pve_next((e))); \ + if (__is_altacct) { \ + PVE_NEXT_SET_ALTACCT((p)); \ + } else { \ + PVE_NEXT_CLR_ALTACCT((p)); \ + } \ + } \ } while (0) /* PPATTR Define Macros */ -#define ppattr_set_bits(h, b) \ - do { \ - while (!OSCompareAndSwap16(*(pp_attr_t *)(h), *(pp_attr_t *)(h) | (b), (pp_attr_t *)(h))); \ +#define ppattr_set_bits(h, b) \ + do { \ + while (!OSCompareAndSwap16(*(pp_attr_t *)(h), *(pp_attr_t *)(h) | (b), (pp_attr_t *)(h))); \ } while (0) -#define ppattr_clear_bits(h, b) \ - do { \ - while (!OSCompareAndSwap16(*(pp_attr_t *)(h), *(pp_attr_t *)(h) & ~(b), (pp_attr_t *)(h))); \ +#define ppattr_clear_bits(h, b) \ + do { \ + while (!OSCompareAndSwap16(*(pp_attr_t *)(h), *(pp_attr_t *)(h) & ~(b), (pp_attr_t *)(h))); \ } while (0) -#define ppattr_test_bits(h, b) \ +#define ppattr_test_bits(h, b) \ ((*(pp_attr_t *)(h) & (b)) == (b)) -#define pa_set_bits(x, b) \ - do { \ - if (pa_valid(x)) \ - ppattr_set_bits(&pp_attr_table[pa_index(x)], \ - (b)); \ +#define pa_set_bits(x, b) \ + do { \ + if (pa_valid(x)) \ + ppattr_set_bits(&pp_attr_table[pa_index(x)], \ + (b)); \ } while (0) -#define pa_test_bits(x, b) \ +#define pa_test_bits(x, b) \ (pa_valid(x) ? ppattr_test_bits(&pp_attr_table[pa_index(x)],\ - (b)) : FALSE) + (b)) : FALSE) -#define pa_clear_bits(x, b) \ - do { \ - if (pa_valid(x)) \ - ppattr_clear_bits(&pp_attr_table[pa_index(x)], \ - (b)); \ +#define pa_clear_bits(x, b) \ + do { \ + if (pa_valid(x)) \ + ppattr_clear_bits(&pp_attr_table[pa_index(x)], \ + (b)); \ } while (0) -#define pa_set_modify(x) \ +#define pa_set_modify(x) \ pa_set_bits(x, PP_ATTR_MODIFIED) -#define pa_clear_modify(x) \ +#define pa_clear_modify(x) \ pa_clear_bits(x, PP_ATTR_MODIFIED) -#define pa_set_reference(x) \ +#define pa_set_reference(x) \ pa_set_bits(x, PP_ATTR_REFERENCED) -#define pa_clear_reference(x) \ +#define pa_clear_reference(x) \ pa_clear_bits(x, PP_ATTR_REFERENCED) @@ -701,21 +703,21 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #define CLR_REUSABLE_PAGE(pai) \ ppattr_clear_bits(&pp_attr_table[pai], PP_ATTR_REUSABLE) -#define IS_ALTACCT_PAGE(pai, pve_p) \ - (((pve_p) == NULL) \ - ? ppattr_test_bits(&pp_attr_table[pai], PP_ATTR_ALTACCT) \ +#define IS_ALTACCT_PAGE(pai, pve_p) \ + (((pve_p) == NULL) \ + ? ppattr_test_bits(&pp_attr_table[pai], PP_ATTR_ALTACCT) \ : PVE_NEXT_IS_ALTACCT(pve_next((pve_p)))) -#define SET_ALTACCT_PAGE(pai, pve_p) \ - if ((pve_p) == NULL) { \ - ppattr_set_bits(&pp_attr_table[pai], PP_ATTR_ALTACCT); \ - } else { \ - PVE_NEXT_SET_ALTACCT(&pve_next((pve_p))); \ +#define SET_ALTACCT_PAGE(pai, pve_p) \ + if ((pve_p) == NULL) { \ + ppattr_set_bits(&pp_attr_table[pai], PP_ATTR_ALTACCT); \ + } else { \ + PVE_NEXT_SET_ALTACCT(&pve_next((pve_p))); \ } -#define CLR_ALTACCT_PAGE(pai, pve_p) \ - if ((pve_p) == NULL) { \ - ppattr_clear_bits(&pp_attr_table[pai], PP_ATTR_ALTACCT); \ - } else { \ - PVE_NEXT_CLR_ALTACCT(&pve_next((pve_p))); \ +#define CLR_ALTACCT_PAGE(pai, pve_p) \ + if ((pve_p) == NULL) { \ + ppattr_clear_bits(&pp_attr_table[pai], PP_ATTR_ALTACCT); \ + } else { \ + PVE_NEXT_CLR_ALTACCT(&pve_next((pve_p))); \ } #define IS_REFFAULT_PAGE(pai) \ @@ -732,30 +734,30 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #define CLR_MODFAULT_PAGE(pai) \ ppattr_clear_bits(&pp_attr_table[pai], PP_ATTR_MODFAULT) -#define tte_get_ptd(tte) \ +#define tte_get_ptd(tte) \ ((struct pt_desc *)(pvh_list(pai_to_pvh(pa_index((vm_offset_t)((tte) & ~PAGE_MASK)))))) -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) -#define tte_index(pmap, addr) \ +#define tte_index(pmap, addr) \ ttenum((addr)) #else -#define tt0_index(pmap, addr) \ +#define tt0_index(pmap, addr) \ (((addr) & ARM_TT_L0_INDEX_MASK) >> ARM_TT_L0_SHIFT) -#define tt1_index(pmap, addr) \ +#define tt1_index(pmap, addr) \ (((addr) & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT) -#define tt2_index(pmap, addr) \ +#define tt2_index(pmap, addr) \ (((addr) & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT) -#define tt3_index(pmap, addr) \ +#define tt3_index(pmap, addr) \ (((addr) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT) -#define tte_index(pmap, addr) \ +#define tte_index(pmap, addr) \ (((addr) & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT) #endif @@ -764,21 +766,23 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; * Lock on pmap system */ -#define PMAP_LOCK_INIT(pmap) { \ - simple_lock_init(&(pmap)->lock, 0); \ - } +lck_grp_t pmap_lck_grp; + +#define PMAP_LOCK_INIT(pmap) { \ + simple_lock_init(&(pmap)->lock, 0); \ + } -#define PMAP_LOCK(pmap) { \ - pmap_simple_lock(&(pmap)->lock); \ +#define PMAP_LOCK(pmap) { \ + pmap_simple_lock(&(pmap)->lock); \ } -#define PMAP_UNLOCK(pmap) { \ - pmap_simple_unlock(&(pmap)->lock); \ +#define PMAP_UNLOCK(pmap) { \ + pmap_simple_unlock(&(pmap)->lock); \ } #if MACH_ASSERT -#define PMAP_ASSERT_LOCKED(pmap) { \ - simple_lock_assert(&(pmap)->lock, LCK_ASSERT_OWNED); \ +#define PMAP_ASSERT_LOCKED(pmap) { \ + simple_lock_assert(&(pmap)->lock, LCK_ASSERT_OWNED); \ } #else #define PMAP_ASSERT_LOCKED(pmap) @@ -790,95 +794,95 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #define PVH_LOCK_WORD 0 #endif -#define ASSERT_PVH_LOCKED(index) \ - do { \ - assert((vm_offset_t)(pv_head_table[index]) & PVH_FLAG_LOCK); \ +#define ASSERT_PVH_LOCKED(index) \ + do { \ + assert((vm_offset_t)(pv_head_table[index]) & PVH_FLAG_LOCK); \ } while (0) -#define LOCK_PVH(index) \ - do { \ - pmap_lock_bit((uint32_t*)(&pv_head_table[index]) + PVH_LOCK_WORD, PVH_LOCK_BIT - (PVH_LOCK_WORD * 32)); \ +#define LOCK_PVH(index) \ + do { \ + pmap_lock_bit((uint32_t*)(&pv_head_table[index]) + PVH_LOCK_WORD, PVH_LOCK_BIT - (PVH_LOCK_WORD * 32)); \ } while (0) -#define UNLOCK_PVH(index) \ - do { \ - ASSERT_PVH_LOCKED(index); \ - pmap_unlock_bit((uint32_t*)(&pv_head_table[index]) + PVH_LOCK_WORD, PVH_LOCK_BIT - (PVH_LOCK_WORD * 32)); \ +#define UNLOCK_PVH(index) \ + do { \ + ASSERT_PVH_LOCKED(index); \ + pmap_unlock_bit((uint32_t*)(&pv_head_table[index]) + PVH_LOCK_WORD, PVH_LOCK_BIT - (PVH_LOCK_WORD * 32)); \ } while (0) -#define PMAP_UPDATE_TLBS(pmap, s, e) { \ - flush_mmu_tlb_region_asid_async(s, (unsigned)(e - s), pmap); \ - sync_tlb_flush(); \ +#define PMAP_UPDATE_TLBS(pmap, s, e) { \ + flush_mmu_tlb_region_asid_async(s, (unsigned)(e - s), pmap); \ + sync_tlb_flush(); \ } -#ifdef __ARM_L1_PTW__ +#ifdef __ARM_L1_PTW__ -#define FLUSH_PTE_RANGE(spte, epte) \ +#define FLUSH_PTE_RANGE(spte, epte) \ __builtin_arm_dmb(DMB_ISH); -#define FLUSH_PTE(pte_p) \ +#define FLUSH_PTE(pte_p) \ __builtin_arm_dmb(DMB_ISH); -#define FLUSH_PTE_STRONG(pte_p) \ +#define FLUSH_PTE_STRONG(pte_p) \ __builtin_arm_dsb(DSB_ISH); -#define FLUSH_PTE_RANGE_STRONG(spte, epte) \ +#define FLUSH_PTE_RANGE_STRONG(spte, epte) \ __builtin_arm_dsb(DSB_ISH); #else /* __ARM_L1_PTW */ -#define FLUSH_PTE_RANGE(spte, epte) \ - CleanPoU_DcacheRegion((vm_offset_t)spte, \ - (vm_offset_t)epte - (vm_offset_t)spte); - -#define FLUSH_PTE(pte_p) \ - __unreachable_ok_push \ - if (TEST_PAGE_RATIO_4) \ - FLUSH_PTE_RANGE((pte_p), (pte_p) + 4); \ - else \ - FLUSH_PTE_RANGE((pte_p), (pte_p) + 1); \ - CleanPoU_DcacheRegion((vm_offset_t)pte_p, sizeof(pt_entry_t)); \ +#define FLUSH_PTE_RANGE(spte, epte) \ + CleanPoU_DcacheRegion((vm_offset_t)spte, \ + (vm_offset_t)epte - (vm_offset_t)spte); + +#define FLUSH_PTE(pte_p) \ + __unreachable_ok_push \ + if (TEST_PAGE_RATIO_4) \ + FLUSH_PTE_RANGE((pte_p), (pte_p) + 4); \ + else \ + FLUSH_PTE_RANGE((pte_p), (pte_p) + 1); \ + CleanPoU_DcacheRegion((vm_offset_t)pte_p, sizeof(pt_entry_t)); \ __unreachable_ok_pop -#define FLUSH_PTE_STRONG(pte_p) FLUSH_PTE(pte_p) +#define FLUSH_PTE_STRONG(pte_p) FLUSH_PTE(pte_p) #define FLUSH_PTE_RANGE_STRONG(spte, epte) FLUSH_PTE_RANGE(spte, epte) #endif /* !defined(__ARM_L1_PTW) */ -#define WRITE_PTE_FAST(pte_p, pte_entry) \ - __unreachable_ok_push \ - if (TEST_PAGE_RATIO_4) { \ - if (((unsigned)(pte_p)) & 0x1f) \ - panic("WRITE_PTE\n"); \ - if (((pte_entry) & ~ARM_PTE_COMPRESSED_MASK) == ARM_PTE_EMPTY) { \ - *(pte_p) = (pte_entry); \ - *((pte_p)+1) = (pte_entry); \ - *((pte_p)+2) = (pte_entry); \ - *((pte_p)+3) = (pte_entry); \ - } else { \ - *(pte_p) = (pte_entry); \ - *((pte_p)+1) = (pte_entry) | 0x1000; \ - *((pte_p)+2) = (pte_entry) | 0x2000; \ - *((pte_p)+3) = (pte_entry) | 0x3000; \ - } \ - } else { \ - *(pte_p) = (pte_entry); \ - } \ +#define WRITE_PTE_FAST(pte_p, pte_entry) \ + __unreachable_ok_push \ + if (TEST_PAGE_RATIO_4) { \ + if (((unsigned)(pte_p)) & 0x1f) \ + panic("WRITE_PTE\n"); \ + if (((pte_entry) & ~ARM_PTE_COMPRESSED_MASK) == ARM_PTE_EMPTY) { \ + *(pte_p) = (pte_entry); \ + *((pte_p)+1) = (pte_entry); \ + *((pte_p)+2) = (pte_entry); \ + *((pte_p)+3) = (pte_entry); \ + } else { \ + *(pte_p) = (pte_entry); \ + *((pte_p)+1) = (pte_entry) | 0x1000; \ + *((pte_p)+2) = (pte_entry) | 0x2000; \ + *((pte_p)+3) = (pte_entry) | 0x3000; \ + } \ + } else { \ + *(pte_p) = (pte_entry); \ + } \ __unreachable_ok_pop -#define WRITE_PTE(pte_p, pte_entry) \ - WRITE_PTE_FAST(pte_p, pte_entry); \ +#define WRITE_PTE(pte_p, pte_entry) \ + WRITE_PTE_FAST(pte_p, pte_entry); \ FLUSH_PTE(pte_p); -#define WRITE_PTE_STRONG(pte_p, pte_entry) \ - WRITE_PTE_FAST(pte_p, pte_entry); \ +#define WRITE_PTE_STRONG(pte_p, pte_entry) \ + WRITE_PTE_FAST(pte_p, pte_entry); \ FLUSH_PTE_STRONG(pte_p); /* * Other useful macros. */ -#define current_pmap() \ +#define current_pmap() \ (vm_map_pmap(current_thread()->map)) @@ -889,7 +893,7 @@ SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap; #if DEVELOPMENT || DEBUG -/* +/* * Trace levels are controlled by a bitmask in which each * level can be enabled/disabled by the (1< 7) +#if (__ARM_VMSA__ > 7) static inline tt_entry_t *pmap_tt1e( - pmap_t, vm_map_address_t); + pmap_t, vm_map_address_t); static inline tt_entry_t *pmap_tt2e( - pmap_t, vm_map_address_t); + pmap_t, vm_map_address_t); static inline pt_entry_t *pmap_tt3e( - pmap_t, vm_map_address_t); + pmap_t, vm_map_address_t); static void pmap_unmap_sharedpage( - pmap_t pmap); + pmap_t pmap); static boolean_t - pmap_is_64bit(pmap_t); +pmap_is_64bit(pmap_t); #endif static inline tt_entry_t *pmap_tte( - pmap_t, vm_map_address_t); + pmap_t, vm_map_address_t); static inline pt_entry_t *pmap_pte( - pmap_t, vm_map_address_t); + pmap_t, vm_map_address_t); static void pmap_update_cache_attributes_locked( - ppnum_t, unsigned); + ppnum_t, unsigned); boolean_t arm_clear_fast_fault( - ppnum_t ppnum, - vm_prot_t fault_type); + ppnum_t ppnum, + vm_prot_t fault_type); -static pmap_paddr_t pmap_pages_reclaim( - void); +static pmap_paddr_t pmap_pages_reclaim( + void); static kern_return_t pmap_pages_alloc( - pmap_paddr_t *pa, - unsigned size, - unsigned option); + pmap_paddr_t *pa, + unsigned size, + unsigned option); -#define PMAP_PAGES_ALLOCATE_NOWAIT 0x1 -#define PMAP_PAGES_RECLAIM_NOWAIT 0x2 +#define PMAP_PAGES_ALLOCATE_NOWAIT 0x1 +#define PMAP_PAGES_RECLAIM_NOWAIT 0x2 static void pmap_pages_free( - pmap_paddr_t pa, - unsigned size); + pmap_paddr_t pa, + unsigned size); static void pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes); @@ -1068,222 +1072,227 @@ static void pmap_trim_subord(pmap_t subord); static __return_type __function_name##_internal __function_args; PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -arm_fast_fault, (pmap_t pmap, - vm_map_address_t va, - vm_prot_t fault_type, - boolean_t from_user), ARM_FAST_FAULT_INDEX); + kern_return_t, + arm_fast_fault, (pmap_t pmap, + vm_map_address_t va, + vm_prot_t fault_type, + boolean_t from_user), ARM_FAST_FAULT_INDEX); PMAP_SUPPORT_PROTOTYPES( -boolean_t, -arm_force_fast_fault, (ppnum_t ppnum, - vm_prot_t allow_mode, - int options), ARM_FORCE_FAST_FAULT_INDEX); + boolean_t, + arm_force_fast_fault, (ppnum_t ppnum, + vm_prot_t allow_mode, + int options), ARM_FORCE_FAST_FAULT_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -mapping_free_prime, (void), MAPPING_FREE_PRIME_INDEX); + kern_return_t, + mapping_free_prime, (void), MAPPING_FREE_PRIME_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -mapping_replenish, (void), MAPPING_REPLENISH_INDEX); + kern_return_t, + mapping_replenish, (void), MAPPING_REPLENISH_INDEX); PMAP_SUPPORT_PROTOTYPES( -boolean_t, -pmap_batch_set_cache_attributes, (ppnum_t pn, - unsigned int cacheattr, - unsigned int page_cnt, - unsigned int page_index, - boolean_t doit, - unsigned int *res), PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX); + boolean_t, + pmap_batch_set_cache_attributes, (ppnum_t pn, + unsigned int cacheattr, + unsigned int page_cnt, + unsigned int page_index, + boolean_t doit, + unsigned int *res), PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_change_wiring, (pmap_t pmap, - vm_map_address_t v, - boolean_t wired), PMAP_CHANGE_WIRING_INDEX); + void, + pmap_change_wiring, (pmap_t pmap, + vm_map_address_t v, + boolean_t wired), PMAP_CHANGE_WIRING_INDEX); PMAP_SUPPORT_PROTOTYPES( -pmap_t, -pmap_create, (ledger_t ledger, - vm_map_size_t size, - boolean_t is_64bit), PMAP_CREATE_INDEX); + pmap_t, + pmap_create, (ledger_t ledger, + vm_map_size_t size, + boolean_t is_64bit), PMAP_CREATE_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_destroy, (pmap_t pmap), PMAP_DESTROY_INDEX); + void, + pmap_destroy, (pmap_t pmap), PMAP_DESTROY_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -pmap_enter_options, (pmap_t pmap, - vm_map_address_t v, - ppnum_t pn, - vm_prot_t prot, - vm_prot_t fault_type, - unsigned int flags, - boolean_t wired, - unsigned int options), PMAP_ENTER_OPTIONS_INDEX); + kern_return_t, + pmap_enter_options, (pmap_t pmap, + vm_map_address_t v, + ppnum_t pn, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options), PMAP_ENTER_OPTIONS_INDEX); PMAP_SUPPORT_PROTOTYPES( -vm_offset_t, -pmap_extract, (pmap_t pmap, - vm_map_address_t va), PMAP_EXTRACT_INDEX); + vm_offset_t, + pmap_extract, (pmap_t pmap, + vm_map_address_t va), PMAP_EXTRACT_INDEX); PMAP_SUPPORT_PROTOTYPES( -ppnum_t, -pmap_find_phys, (pmap_t pmap, - addr64_t va), PMAP_FIND_PHYS_INDEX); + ppnum_t, + pmap_find_phys, (pmap_t pmap, + addr64_t va), PMAP_FIND_PHYS_INDEX); #if (__ARM_VMSA__ > 7) PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -pmap_insert_sharedpage, (pmap_t pmap), PMAP_INSERT_SHAREDPAGE_INDEX); + kern_return_t, + pmap_insert_sharedpage, (pmap_t pmap), PMAP_INSERT_SHAREDPAGE_INDEX); #endif PMAP_SUPPORT_PROTOTYPES( -boolean_t, -pmap_is_empty, (pmap_t pmap, - vm_map_offset_t va_start, - vm_map_offset_t va_end), PMAP_IS_EMPTY_INDEX); + boolean_t, + pmap_is_empty, (pmap_t pmap, + vm_map_offset_t va_start, + vm_map_offset_t va_end), PMAP_IS_EMPTY_INDEX); PMAP_SUPPORT_PROTOTYPES( -unsigned int, -pmap_map_cpu_windows_copy, (ppnum_t pn, - vm_prot_t prot, - unsigned int wimg_bits), PMAP_MAP_CPU_WINDOWS_COPY_INDEX); + unsigned int, + pmap_map_cpu_windows_copy, (ppnum_t pn, + vm_prot_t prot, + unsigned int wimg_bits), PMAP_MAP_CPU_WINDOWS_COPY_INDEX); + +PMAP_SUPPORT_PROTOTYPES( + kern_return_t, + pmap_nest, (pmap_t grand, + pmap_t subord, + addr64_t vstart, + addr64_t nstart, + uint64_t size), PMAP_NEST_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -pmap_nest, (pmap_t grand, - pmap_t subord, - addr64_t vstart, - addr64_t nstart, - uint64_t size), PMAP_NEST_INDEX); + void, + pmap_page_protect_options, (ppnum_t ppnum, + vm_prot_t prot, + unsigned int options), PMAP_PAGE_PROTECT_OPTIONS_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_page_protect_options, (ppnum_t ppnum, - vm_prot_t prot, - unsigned int options), PMAP_PAGE_PROTECT_OPTIONS_INDEX); + void, + pmap_protect_options, (pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + vm_prot_t prot, + unsigned int options, + void *args), PMAP_PROTECT_OPTIONS_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_protect_options, (pmap_t pmap, - vm_map_address_t start, - vm_map_address_t end, - vm_prot_t prot, - unsigned int options, - void *args), PMAP_PROTECT_OPTIONS_INDEX); + kern_return_t, + pmap_query_page_info, (pmap_t pmap, + vm_map_offset_t va, + int *disp_p), PMAP_QUERY_PAGE_INFO_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -pmap_query_page_info, (pmap_t pmap, - vm_map_offset_t va, - int *disp_p), PMAP_QUERY_PAGE_INFO_INDEX); + mach_vm_size_t, + pmap_query_resident, (pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + mach_vm_size_t * compressed_bytes_p), PMAP_QUERY_RESIDENT_INDEX); PMAP_SUPPORT_PROTOTYPES( -mach_vm_size_t, -pmap_query_resident, (pmap_t pmap, - vm_map_address_t start, - vm_map_address_t end, - mach_vm_size_t *compressed_bytes_p), PMAP_QUERY_RESIDENT_INDEX); + void, + pmap_reference, (pmap_t pmap), PMAP_REFERENCE_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_reference, (pmap_t pmap), PMAP_REFERENCE_INDEX); + int, + pmap_remove_options, (pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + int options), PMAP_REMOVE_OPTIONS_INDEX); PMAP_SUPPORT_PROTOTYPES( -int, -pmap_remove_options, (pmap_t pmap, - vm_map_address_t start, - vm_map_address_t end, - int options), PMAP_REMOVE_OPTIONS_INDEX); + kern_return_t, + pmap_return, (boolean_t do_panic, + boolean_t do_recurse), PMAP_RETURN_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -pmap_return, (boolean_t do_panic, - boolean_t do_recurse), PMAP_RETURN_INDEX); + void, + pmap_set_cache_attributes, (ppnum_t pn, + unsigned int cacheattr), PMAP_SET_CACHE_ATTRIBUTES_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_set_cache_attributes, (ppnum_t pn, - unsigned int cacheattr), PMAP_SET_CACHE_ATTRIBUTES_INDEX); + void, + pmap_update_compressor_page, (ppnum_t pn, + unsigned int prev_cacheattr, unsigned int new_cacheattr), PMAP_UPDATE_COMPRESSOR_PAGE_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_set_nested, (pmap_t pmap), PMAP_SET_NESTED_INDEX); + void, + pmap_set_nested, (pmap_t pmap), PMAP_SET_NESTED_INDEX); #if MACH_ASSERT PMAP_SUPPORT_PROTOTYPES( -void, -pmap_set_process, (pmap_t pmap, - int pid, - char *procname), PMAP_SET_PROCESS_INDEX); + void, + pmap_set_process, (pmap_t pmap, + int pid, + char *procname), PMAP_SET_PROCESS_INDEX); #endif PMAP_SUPPORT_PROTOTYPES( -void, -pmap_unmap_cpu_windows_copy, (unsigned int index), PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX); + void, + pmap_unmap_cpu_windows_copy, (unsigned int index), PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX); PMAP_SUPPORT_PROTOTYPES( -kern_return_t, -pmap_unnest_options, (pmap_t grand, - addr64_t vaddr, - uint64_t size, - unsigned int option), PMAP_UNNEST_OPTIONS_INDEX); + kern_return_t, + pmap_unnest_options, (pmap_t grand, + addr64_t vaddr, + uint64_t size, + unsigned int option), PMAP_UNNEST_OPTIONS_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -phys_attribute_set, (ppnum_t pn, - unsigned int bits), PHYS_ATTRIBUTE_SET_INDEX); + void, + phys_attribute_set, (ppnum_t pn, + unsigned int bits), PHYS_ATTRIBUTE_SET_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -phys_attribute_clear, (ppnum_t pn, - unsigned int bits, - int options, - void *arg), PHYS_ATTRIBUTE_CLEAR_INDEX); + void, + phys_attribute_clear, (ppnum_t pn, + unsigned int bits, + int options, + void *arg), PHYS_ATTRIBUTE_CLEAR_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_switch, (pmap_t pmap), PMAP_SWITCH_INDEX); + void, + pmap_switch, (pmap_t pmap), PMAP_SWITCH_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_switch_user_ttb, (pmap_t pmap), PMAP_SWITCH_USER_TTB_INDEX); + void, + pmap_switch_user_ttb, (pmap_t pmap), PMAP_SWITCH_USER_TTB_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_clear_user_ttb, (void), PMAP_CLEAR_USER_TTB_INDEX); + void, + pmap_clear_user_ttb, (void), PMAP_CLEAR_USER_TTB_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_set_jit_entitled, (pmap_t pmap), PMAP_SET_JIT_ENTITLED_INDEX); + void, + pmap_set_jit_entitled, (pmap_t pmap), PMAP_SET_JIT_ENTITLED_INDEX); PMAP_SUPPORT_PROTOTYPES( -void, -pmap_trim, (pmap_t grand, - pmap_t subord, - addr64_t vstart, - addr64_t nstart, - uint64_t size), PMAP_TRIM_INDEX); + void, + pmap_trim, (pmap_t grand, + pmap_t subord, + addr64_t vstart, + addr64_t nstart, + uint64_t size), PMAP_TRIM_INDEX); -void pmap_footprint_suspend(vm_map_t map, - boolean_t suspend); +void pmap_footprint_suspend(vm_map_t map, + boolean_t suspend); PMAP_SUPPORT_PROTOTYPES( void, pmap_footprint_suspend, (vm_map_t map, - boolean_t suspend), + boolean_t suspend), PMAP_FOOTPRINT_SUSPEND_INDEX); @@ -1291,25 +1300,25 @@ PMAP_SUPPORT_PROTOTYPES( boolean_t pgtrace_enabled = 0; typedef struct { - queue_chain_t chain; - - /* - pmap - pmap for below addresses - ova - original va page address - cva - clone va addresses for pre, target and post pages - cva_spte - clone saved ptes - range - trace range in this map - cloned - has been cloned or not - */ - pmap_t pmap; - vm_map_offset_t ova; - vm_map_offset_t cva[3]; - pt_entry_t cva_spte[3]; - struct { - pmap_paddr_t start; - pmap_paddr_t end; - } range; - bool cloned; + queue_chain_t chain; + + /* + * pmap - pmap for below addresses + * ova - original va page address + * cva - clone va addresses for pre, target and post pages + * cva_spte - clone saved ptes + * range - trace range in this map + * cloned - has been cloned or not + */ + pmap_t pmap; + vm_map_offset_t ova; + vm_map_offset_t cva[3]; + pt_entry_t cva_spte[3]; + struct { + pmap_paddr_t start; + pmap_paddr_t end; + } range; + bool cloned; } pmap_pgtrace_map_t; static void pmap_pgtrace_init(void); @@ -1318,7 +1327,7 @@ static void pmap_pgtrace_remove_clone(pmap_t pmap, pmap_paddr_t pa_page, vm_map_ static void pmap_pgtrace_remove_all_clone(pmap_paddr_t pa); #endif -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) /* * The low global vector page is mapped at a fixed alias. * Since the page size is 16k for H8 and newer we map the globals to a 16k @@ -1327,13 +1336,13 @@ static void pmap_pgtrace_remove_all_clone(pmap_paddr_t pa); * we leave H6 and H7 where they were. */ #if (ARM_PGSHIFT == 14) -#define LOWGLOBAL_ALIAS (LOW_GLOBAL_BASE_ADDRESS + 0x4000) +#define LOWGLOBAL_ALIAS (LOW_GLOBAL_BASE_ADDRESS + 0x4000) #else -#define LOWGLOBAL_ALIAS (LOW_GLOBAL_BASE_ADDRESS + 0x2000) +#define LOWGLOBAL_ALIAS (LOW_GLOBAL_BASE_ADDRESS + 0x2000) #endif #else -#define LOWGLOBAL_ALIAS (0xFFFF1000) +#define LOWGLOBAL_ALIAS (0xFFFF1000) #endif long long alloc_tteroot_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0LL; @@ -1341,7 +1350,7 @@ long long alloc_ttepages_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0 long long alloc_ptepages_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0LL; long long alloc_pmap_pages_count __attribute__((aligned(8))) = 0LL; -int pt_fake_zone_index = -1; /* index of pmap fake zone */ +int pt_fake_zone_index = -1; /* index of pmap fake zone */ @@ -1385,9 +1394,9 @@ pmap_paddr_t pmap_pages_reclaim( void) { - boolean_t found_page; - unsigned i; - pt_desc_t *ptdp; + boolean_t found_page; + unsigned i; + pt_desc_t *ptdp; /* @@ -1409,15 +1418,14 @@ pmap_pages_reclaim( pmap_pages_request_acum++; while (1) { - if (pmap_pages_reclaim_list != (page_free_entry_t *)NULL) { - page_free_entry_t *page_entry; + page_free_entry_t *page_entry; page_entry = pmap_pages_reclaim_list; pmap_pages_reclaim_list = pmap_pages_reclaim_list->next; pmap_simple_unlock(&pmap_pages_lock); - return((pmap_paddr_t)ml_static_vtop((vm_offset_t)page_entry)); + return (pmap_paddr_t)ml_static_vtop((vm_offset_t)page_entry); } pmap_simple_unlock(&pmap_pages_lock); @@ -1429,12 +1437,11 @@ pmap_pages_reclaim( while (!queue_end(&pt_page_list, (queue_entry_t)ptdp)) { if ((ptdp->pmap->nested == FALSE) && (pmap_simple_lock_try(&ptdp->pmap->lock))) { - assert(ptdp->pmap != kernel_pmap); unsigned refcnt_acc = 0; unsigned wiredcnt_acc = 0; - for (i = 0 ; i < PT_INDEX_MAX ; i++) { + for (i = 0; i < PT_INDEX_MAX; i++) { if (ptdp->pt_cnt[i].refcnt == PT_DESC_REFCOUNT) { /* Do not attempt to free a page that contains an L2 table */ refcnt_acc = 0; @@ -1457,38 +1464,38 @@ pmap_pages_reclaim( if (!found_page) { panic("pmap_pages_reclaim(): No eligible page in pt_page_list\n"); } else { - int remove_count = 0; - vm_map_address_t va; - pmap_t pmap; - pt_entry_t *bpte, *epte; - pt_entry_t *pte_p; - tt_entry_t *tte_p; - uint32_t rmv_spte=0; + int remove_count = 0; + vm_map_address_t va; + pmap_t pmap; + pt_entry_t *bpte, *epte; + pt_entry_t *pte_p; + tt_entry_t *tte_p; + uint32_t rmv_spte = 0; pmap_simple_unlock(&pt_pages_lock); pmap = ptdp->pmap; PMAP_ASSERT_LOCKED(pmap); // pmap lock should be held from loop above - for (i = 0 ; i < PT_INDEX_MAX ; i++) { + for (i = 0; i < PT_INDEX_MAX; i++) { va = ptdp->pt_map[i].va; /* If the VA is bogus, this may represent an unallocated region * or one which is in transition (already being freed or expanded). * Don't try to remove mappings here. */ - if (va == (vm_offset_t)-1) + if (va == (vm_offset_t)-1) { continue; + } tte_p = pmap_tte(pmap, va); if ((tte_p != (tt_entry_t *) NULL) && ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE)) { - -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) pte_p = (pt_entry_t *) ttetokv(*tte_p); bpte = &pte_p[ptenum(va)]; - epte = bpte + PAGE_SIZE/sizeof(pt_entry_t); + epte = bpte + PAGE_SIZE / sizeof(pt_entry_t); #else pte_p = (pt_entry_t *) ttetokv(*tte_p); bpte = &pte_p[tt3_index(pmap, va)]; - epte = bpte + PAGE_SIZE/sizeof(pt_entry_t); + epte = bpte + PAGE_SIZE / sizeof(pt_entry_t); #endif /* * Use PMAP_OPTIONS_REMOVE to clear any @@ -1504,22 +1511,23 @@ pmap_pages_reclaim( remove_count += pmap_remove_range_options( pmap, va, bpte, epte, &rmv_spte, PMAP_OPTIONS_REMOVE); - if (ptdp->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt != 0) + if (ptdp->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt != 0) { panic("pmap_pages_reclaim(): ptdp %p, count %d\n", ptdp, ptdp->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt); -#if (__ARM_VMSA__ == 7) + } +#if (__ARM_VMSA__ == 7) pmap_tte_deallocate(pmap, tte_p, PMAP_TT_L1_LEVEL); flush_mmu_tlb_entry_async((va & ~ARM_TT_L1_PT_OFFMASK) | (pmap->asid & 0xff)); flush_mmu_tlb_entry_async(((va & ~ARM_TT_L1_PT_OFFMASK) + ARM_TT_L1_SIZE) | (pmap->asid & 0xff)); - flush_mmu_tlb_entry_async(((va & ~ARM_TT_L1_PT_OFFMASK) + 2*ARM_TT_L1_SIZE)| (pmap->asid & 0xff)); - flush_mmu_tlb_entry_async(((va & ~ARM_TT_L1_PT_OFFMASK) + 3*ARM_TT_L1_SIZE)| (pmap->asid & 0xff)); + flush_mmu_tlb_entry_async(((va & ~ARM_TT_L1_PT_OFFMASK) + 2 * ARM_TT_L1_SIZE) | (pmap->asid & 0xff)); + flush_mmu_tlb_entry_async(((va & ~ARM_TT_L1_PT_OFFMASK) + 3 * ARM_TT_L1_SIZE) | (pmap->asid & 0xff)); #else pmap_tte_deallocate(pmap, tte_p, PMAP_TT_L2_LEVEL); flush_mmu_tlb_entry_async(tlbi_addr(va & ~ARM_TT_L2_OFFMASK) | tlbi_asid(pmap->asid)); #endif if (remove_count > 0) { -#if (__ARM_VMSA__ == 7) - flush_mmu_tlb_region_asid_async(va, 4*ARM_TT_L1_SIZE, pmap); +#if (__ARM_VMSA__ == 7) + flush_mmu_tlb_region_asid_async(va, 4 * ARM_TT_L1_SIZE, pmap); #else flush_mmu_tlb_region_asid_async(va, ARM_TT_L2_SIZE, pmap); #endif @@ -1537,20 +1545,20 @@ pmap_pages_reclaim( static kern_return_t pmap_pages_alloc( - pmap_paddr_t *pa, - unsigned size, - unsigned option) + pmap_paddr_t *pa, + unsigned size, + unsigned option) { vm_page_t m = VM_PAGE_NULL, m_prev; - if(option & PMAP_PAGES_RECLAIM_NOWAIT) { + if (option & PMAP_PAGES_RECLAIM_NOWAIT) { assert(size == PAGE_SIZE); *pa = pmap_pages_reclaim(); return KERN_SUCCESS; } if (size == PAGE_SIZE) { while ((m = vm_page_grab()) == VM_PAGE_NULL) { - if(option & PMAP_PAGES_ALLOCATE_NOWAIT) { + if (option & PMAP_PAGES_ALLOCATE_NOWAIT) { return KERN_RESOURCE_SHORTAGE; } @@ -1560,10 +1568,11 @@ pmap_pages_alloc( vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); vm_page_unlock_queues(); } - if (size == 2*PAGE_SIZE) { + if (size == 2 * PAGE_SIZE) { while (cpm_allocate(size, &m, 0, 1, TRUE, 0) != KERN_SUCCESS) { - if(option & PMAP_PAGES_ALLOCATE_NOWAIT) + if (option & PMAP_PAGES_ALLOCATE_NOWAIT) { return KERN_RESOURCE_SHORTAGE; + } VM_PAGE_WAIT(); } @@ -1580,8 +1589,8 @@ pmap_pages_alloc( } vm_object_unlock(pmap_object); - OSAddAtomic(size>>PAGE_SHIFT, &inuse_pmap_pages_count); - OSAddAtomic64(size>>PAGE_SHIFT, &alloc_pmap_pages_count); + OSAddAtomic(size >> PAGE_SHIFT, &inuse_pmap_pages_count); + OSAddAtomic64(size >> PAGE_SHIFT, &alloc_pmap_pages_count); return KERN_SUCCESS; } @@ -1589,13 +1598,13 @@ pmap_pages_alloc( static void pmap_pages_free( - pmap_paddr_t pa, - unsigned size) + pmap_paddr_t pa, + unsigned size) { pmap_simple_lock(&pmap_pages_lock); if (pmap_pages_request_count != 0) { - page_free_entry_t *page_entry; + page_free_entry_t *page_entry; pmap_pages_request_count--; page_entry = (page_free_entry_t *)phystokv(pa); @@ -1609,9 +1618,9 @@ pmap_pages_free( pmap_simple_unlock(&pmap_pages_lock); vm_page_t m; - pmap_paddr_t pa_max; + pmap_paddr_t pa_max; - OSAddAtomic(-(size>>PAGE_SHIFT), &inuse_pmap_pages_count); + OSAddAtomic(-(size >> PAGE_SHIFT), &inuse_pmap_pages_count); for (pa_max = pa + size; pa < pa_max; pa = pa + PAGE_SIZE) { vm_object_lock(pmap_object); @@ -1642,8 +1651,8 @@ PMAP_ZINFO_PFREE( static inline void pmap_tt_ledger_credit( - pmap_t pmap, - vm_size_t size) + pmap_t pmap, + vm_size_t size) { if (pmap != kernel_pmap) { pmap_ledger_credit(pmap, task_ledgers.phys_footprint, size); @@ -1653,8 +1662,8 @@ pmap_tt_ledger_credit( static inline void pmap_tt_ledger_debit( - pmap_t pmap, - vm_size_t size) + pmap_t pmap, + vm_size_t size) { if (pmap != kernel_pmap) { pmap_ledger_debit(pmap, task_ledgers.phys_footprint, size); @@ -1701,7 +1710,7 @@ alloc_asid( assert(((asid_bitmap_index * sizeof(uint32_t) * NBBY + temp) % ARM_MAX_ASID) != 1); #endif /* __ARM_KERNEL_PROTECT__ */ - return (asid_bitmap_index * sizeof(uint32_t) * NBBY + temp); + return asid_bitmap_index * sizeof(uint32_t) * NBBY + temp; } } pmap_simple_unlock(&pmaps_lock); @@ -1753,7 +1762,7 @@ uint32_t pv_alloc_chunk MARK_AS_PMAP_DATA; uint32_t pv_kern_alloc_chunk MARK_AS_PMAP_DATA; thread_t mapping_replenish_thread; -event_t mapping_replenish_event; +event_t mapping_replenish_event; event_t pmap_user_pv_throttle_event; volatile uint32_t mappingrecurse = 0; @@ -1778,12 +1787,12 @@ pv_init( pv_kern_free_count = 0x0U; } -static inline void PV_ALLOC(pv_entry_t **pv_ep); -static inline void PV_KERN_ALLOC(pv_entry_t **pv_e); -static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt); -static inline void PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt); +static inline void PV_ALLOC(pv_entry_t **pv_ep); +static inline void PV_KERN_ALLOC(pv_entry_t **pv_e); +static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt); +static inline void PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt); -static inline void pmap_pv_throttle(pmap_t p); +static inline void pmap_pv_throttle(pmap_t p); static boolean_t pv_alloc( @@ -1791,28 +1800,28 @@ pv_alloc( unsigned int pai, pv_entry_t **pvepp) { - if (pmap != NULL) + if (pmap != NULL) { PMAP_ASSERT_LOCKED(pmap); + } ASSERT_PVH_LOCKED(pai); PV_ALLOC(pvepp); if (PV_ENTRY_NULL == *pvepp) { - if ((pmap == NULL) || (kernel_pmap == pmap)) { - PV_KERN_ALLOC(pvepp); if (PV_ENTRY_NULL == *pvepp) { - pv_entry_t *pv_e; - pv_entry_t *pv_eh; - pv_entry_t *pv_et; - int pv_cnt; - unsigned j; + pv_entry_t *pv_e; + pv_entry_t *pv_eh; + pv_entry_t *pv_et; + int pv_cnt; + unsigned j; pmap_paddr_t pa; - kern_return_t ret; + kern_return_t ret; UNLOCK_PVH(pai); - if (pmap != NULL) + if (pmap != NULL) { PMAP_UNLOCK(pmap); + } ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT); @@ -1822,9 +1831,9 @@ pv_alloc( if (ret != KERN_SUCCESS) { panic("%s: failed to alloc page for kernel, ret=%d, " - "pmap=%p, pai=%u, pvepp=%p", - __FUNCTION__, ret, - pmap, pai, pvepp); + "pmap=%p, pai=%u, pvepp=%p", + __FUNCTION__, ret, + pmap, pai, pvepp); } pv_page_count++; @@ -1835,18 +1844,20 @@ pv_alloc( *pvepp = pv_e; pv_e++; - for (j = 1; j < (PAGE_SIZE/sizeof(pv_entry_t)) ; j++) { + for (j = 1; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { pv_e->pve_next = pv_eh; pv_eh = pv_e; - if (pv_et == PV_ENTRY_NULL) + if (pv_et == PV_ENTRY_NULL) { pv_et = pv_e; + } pv_cnt++; pv_e++; } PV_KERN_FREE_LIST(pv_eh, pv_et, pv_cnt); - if (pmap != NULL) + if (pmap != NULL) { PMAP_LOCK(pmap); + } LOCK_PVH(pai); return FALSE; } @@ -1855,21 +1866,21 @@ pv_alloc( PMAP_UNLOCK(pmap); pmap_pv_throttle(pmap); { - pv_entry_t *pv_e; - pv_entry_t *pv_eh; - pv_entry_t *pv_et; - int pv_cnt; - unsigned j; + pv_entry_t *pv_e; + pv_entry_t *pv_eh; + pv_entry_t *pv_et; + int pv_cnt; + unsigned j; pmap_paddr_t pa; - kern_return_t ret; + kern_return_t ret; ret = pmap_pages_alloc(&pa, PAGE_SIZE, 0); if (ret != KERN_SUCCESS) { panic("%s: failed to alloc page, ret=%d, " - "pmap=%p, pai=%u, pvepp=%p", - __FUNCTION__, ret, - pmap, pai, pvepp); + "pmap=%p, pai=%u, pvepp=%p", + __FUNCTION__, ret, + pmap, pai, pvepp); } pv_page_count++; @@ -1880,12 +1891,13 @@ pv_alloc( *pvepp = pv_e; pv_e++; - for (j = 1; j < (PAGE_SIZE/sizeof(pv_entry_t)) ; j++) { + for (j = 1; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { pv_e->pve_next = pv_eh; pv_eh = pv_e; - if (pv_et == PV_ENTRY_NULL) + if (pv_et == PV_ENTRY_NULL) { pv_et = pv_e; + } pv_cnt++; pv_e++; } @@ -1920,12 +1932,15 @@ static inline void pv_water_mark_check(void) { if ((pv_free_count < pv_low_water_mark) || (pv_kern_free_count < pv_kern_low_water_mark)) { - if (!mappingrecurse && hw_compare_and_store(0,1, &mappingrecurse)) + if (!mappingrecurse && hw_compare_and_store(0, 1, &mappingrecurse)) { thread_wakeup(&mapping_replenish_event); + } } } -static inline void PV_ALLOC(pv_entry_t **pv_ep) { +static inline void +PV_ALLOC(pv_entry_t **pv_ep) +{ assert(*pv_ep == PV_ENTRY_NULL); pmap_simple_lock(&pv_free_list_lock); /* @@ -1941,7 +1956,9 @@ static inline void PV_ALLOC(pv_entry_t **pv_ep) { pmap_simple_unlock(&pv_free_list_lock); } -static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt) { +static inline void +PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt) +{ pmap_simple_lock(&pv_free_list_lock); pv_et->pve_next = (pv_entry_t *)pv_free_list; pv_free_list = pv_eh; @@ -1949,7 +1966,9 @@ static inline void PV_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt pmap_simple_unlock(&pv_free_list_lock); } -static inline void PV_KERN_ALLOC(pv_entry_t **pv_e) { +static inline void +PV_KERN_ALLOC(pv_entry_t **pv_e) +{ assert(*pv_e == PV_ENTRY_NULL); pmap_simple_lock(&pv_kern_free_list_lock); @@ -1963,7 +1982,9 @@ static inline void PV_KERN_ALLOC(pv_entry_t **pv_e) { pmap_simple_unlock(&pv_kern_free_list_lock); } -static inline void PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt) { +static inline void +PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int pv_cnt) +{ pmap_simple_lock(&pv_kern_free_list_lock); pv_et->pve_next = pv_kern_free_list; pv_kern_free_list = pv_eh; @@ -1971,7 +1992,9 @@ static inline void PV_KERN_FREE_LIST(pv_entry_t *pv_eh, pv_entry_t *pv_et, int p pmap_simple_unlock(&pv_kern_free_list_lock); } -static inline void pmap_pv_throttle(__unused pmap_t p) { +static inline void +pmap_pv_throttle(__unused pmap_t p) +{ assert(p != kernel_pmap); /* Apply throttle on non-kernel mappings */ if (pv_kern_free_count < (pv_kern_low_water_mark / 2)) { @@ -2045,12 +2068,13 @@ mapping_free_prime_internal(void) pv_e = (pv_entry_t *)phystokv(pa); - for (j = 0; j < (PAGE_SIZE/sizeof(pv_entry_t)) ; j++) { + for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { pv_e->pve_next = pv_eh; pv_eh = pv_e; - if (pv_et == PV_ENTRY_NULL) + if (pv_et == PV_ENTRY_NULL) { pv_et = pv_e; + } pv_cnt++; pv_e++; } @@ -2075,7 +2099,6 @@ mapping_free_prime_internal(void) } while (pv_cnt < needed_pv_cnt) { - ret = pmap_pages_alloc(&pa, PAGE_SIZE, alloc_options); assert(ret == KERN_SUCCESS); @@ -2083,12 +2106,13 @@ mapping_free_prime_internal(void) pv_e = (pv_entry_t *)phystokv(pa); - for (j = 0; j < (PAGE_SIZE/sizeof(pv_entry_t)) ; j++) { + for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { pv_e->pve_next = pv_eh; pv_eh = pv_e; - if (pv_et == PV_ENTRY_NULL) + if (pv_et == PV_ENTRY_NULL) { pv_et = pv_e; + } pv_cnt++; pv_e++; } @@ -2116,7 +2140,9 @@ mapping_free_prime(void) void mapping_replenish(void); -void mapping_adjust(void) { +void +mapping_adjust(void) +{ kern_return_t mres; mres = kernel_thread_start_priority((thread_continue_t)mapping_replenish, NULL, MAXPRI_KERNEL, &mapping_replenish_thread); @@ -2151,12 +2177,13 @@ mapping_replenish_internal(void) pv_e = (pv_entry_t *)phystokv(pa); - for (j = 0; j < (PAGE_SIZE/sizeof(pv_entry_t)) ; j++) { + for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { pv_e->pve_next = pv_eh; pv_eh = pv_e; - if (pv_et == PV_ENTRY_NULL) + if (pv_et == PV_ENTRY_NULL) { pv_et = pv_e; + } pv_cnt++; pv_e++; } @@ -2175,12 +2202,13 @@ mapping_replenish_internal(void) pv_e = (pv_entry_t *)phystokv(pa); - for (j = 0; j < (PAGE_SIZE/sizeof(pv_entry_t)) ; j++) { + for (j = 0; j < (PAGE_SIZE / sizeof(pv_entry_t)); j++) { pv_e->pve_next = pv_eh; pv_eh = pv_e; - if (pv_et == PV_ENTRY_NULL) + if (pv_et == PV_ENTRY_NULL) { pv_et = pv_e; + } pv_cnt++; pv_e++; } @@ -2222,8 +2250,9 @@ mapping_replenish(void) /* Check if the kernel pool has been depleted since the * first pass, to reduce refill latency. */ - if (pv_kern_free_count < pv_kern_low_water_mark) + if (pv_kern_free_count < pv_kern_low_water_mark) { continue; + } /* Block sans continuation to avoid yielding kernel stack */ assert_wait(&mapping_replenish_event, THREAD_UNINT); mappingrecurse = 0; @@ -2249,39 +2278,44 @@ ptd_bootstrap( ptd_preboot = FALSE; } -static pt_desc_t -*ptd_alloc_unlinked(void) +static pt_desc_t* +ptd_alloc_unlinked(bool reclaim) { - pt_desc_t *ptdp; - unsigned i; + pt_desc_t *ptdp; + unsigned i; - if (!ptd_preboot) + if (!ptd_preboot) { pmap_simple_lock(&ptd_free_list_lock); + } if (ptd_free_count == 0) { unsigned int ptd_cnt; - pt_desc_t *ptdp_next; + pt_desc_t *ptdp_next; if (ptd_preboot) { ptdp = (pt_desc_t *)avail_start; avail_start += ARM_PGBYTES; ptdp_next = ptdp; - ptd_cnt = ARM_PGBYTES/sizeof(pt_desc_t); + ptd_cnt = ARM_PGBYTES / sizeof(pt_desc_t); } else { pmap_paddr_t pa; - kern_return_t ret; + kern_return_t ret; pmap_simple_unlock(&ptd_free_list_lock); if (pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT) != KERN_SUCCESS) { - ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_RECLAIM_NOWAIT); - assert(ret == KERN_SUCCESS); + if (reclaim) { + ret = pmap_pages_alloc(&pa, PAGE_SIZE, PMAP_PAGES_RECLAIM_NOWAIT); + assert(ret == KERN_SUCCESS); + } else { + return NULL; + } } ptdp = (pt_desc_t *)phystokv(pa); pmap_simple_lock(&ptd_free_list_lock); ptdp_next = ptdp; - ptd_cnt = PAGE_SIZE/sizeof(pt_desc_t); + ptd_cnt = PAGE_SIZE / sizeof(pt_desc_t); } while (ptd_cnt != 0) { @@ -2300,26 +2334,31 @@ static pt_desc_t panic("out of ptd entry\n"); } - if (!ptd_preboot) + if (!ptd_preboot) { pmap_simple_unlock(&ptd_free_list_lock); + } ptdp->pt_page.next = NULL; ptdp->pt_page.prev = NULL; ptdp->pmap = NULL; - for (i = 0 ; i < PT_INDEX_MAX ; i++) { + for (i = 0; i < PT_INDEX_MAX; i++) { ptdp->pt_map[i].va = (vm_offset_t)-1; ptdp->pt_cnt[i].refcnt = 0; ptdp->pt_cnt[i].wiredcnt = 0; } - return(ptdp); + return ptdp; } static inline pt_desc_t* -ptd_alloc(pmap_t pmap) +ptd_alloc(pmap_t pmap, bool reclaim) { - pt_desc_t *ptdp = ptd_alloc_unlinked(); + pt_desc_t *ptdp = ptd_alloc_unlinked(reclaim); + + if (ptdp == NULL) { + return NULL; + } ptdp->pmap = pmap; if (pmap != kernel_pmap) { @@ -2337,7 +2376,7 @@ ptd_alloc(pmap_t pmap) static void ptd_deallocate(pt_desc_t *ptdp) { - pmap_t pmap = ptdp->pmap; + pmap_t pmap = ptdp->pmap; if (ptd_preboot) { panic("ptd_deallocate(): early boot\n"); @@ -2353,8 +2392,9 @@ ptd_deallocate(pt_desc_t *ptdp) ptd_free_list = (pt_desc_t *)ptdp; ptd_free_count++; pmap_simple_unlock(&ptd_free_list_lock); - if (pmap != NULL) + if (pmap != NULL) { pmap_tt_ledger_debit(pmap, sizeof(*ptdp)); + } } static void @@ -2365,20 +2405,23 @@ ptd_init( unsigned int level, pt_entry_t *pte_p) { - if (ptdp->pmap != pmap) + if (ptdp->pmap != pmap) { panic("ptd_init(): pmap mismatch\n"); + } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) assert(level == 2); ptdp->pt_map[ARM_PT_DESC_INDEX(pte_p)].va = (vm_offset_t) va & ~(ARM_TT_L1_PT_OFFMASK); #else - if (level == 3) + if (level == 3) { ptdp->pt_map[ARM_PT_DESC_INDEX(pte_p)].va = (vm_offset_t) va & ~ARM_TT_L2_OFFMASK; - else if (level == 2) + } else if (level == 2) { ptdp->pt_map[ARM_PT_DESC_INDEX(pte_p)].va = (vm_offset_t) va & ~ARM_TT_L1_OFFMASK; + } #endif - if (level < PMAP_TT_MAX_LEVEL) + if (level < PMAP_TT_MAX_LEVEL) { ptdp->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt = PT_DESC_REFCOUNT; + } } @@ -2389,7 +2432,7 @@ pmap_valid_address( return pa_valid(addr); } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) /* * Given an offset and a map, compute the address of the @@ -2397,11 +2440,12 @@ pmap_valid_address( */ static inline tt_entry_t * pmap_tte(pmap_t pmap, - vm_map_address_t addr) + vm_map_address_t addr) { - if (!(tte_index(pmap, addr) < pmap->tte_index_max)) + if (!(tte_index(pmap, addr) < pmap->tte_index_max)) { return (tt_entry_t *)NULL; - return (&pmap->tte[tte_index(pmap, addr)]); + } + return &pmap->tte[tte_index(pmap, addr)]; } @@ -2414,25 +2458,28 @@ pmap_tte(pmap_t pmap, */ static inline pt_entry_t * pmap_pte( - pmap_t pmap, - vm_map_address_t addr) + pmap_t pmap, + vm_map_address_t addr) { pt_entry_t *ptp; tt_entry_t *ttp; tt_entry_t tte; ttp = pmap_tte(pmap, addr); - if (ttp == (tt_entry_t *)NULL) - return (PT_ENTRY_NULL); + if (ttp == (tt_entry_t *)NULL) { + return PT_ENTRY_NULL; + } tte = *ttp; #if MACH_ASSERT - if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) + if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { panic("Attempt to demote L1 block: pmap=%p, va=0x%llx, tte=0x%llx\n", pmap, (uint64_t)addr, (uint64_t)tte); + } #endif - if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) - return (PT_ENTRY_NULL); + if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { + return PT_ENTRY_NULL; + } ptp = (pt_entry_t *) ttetokv(tte) + ptenum(addr); - return (ptp); + return ptp; } #else @@ -2443,15 +2490,15 @@ pmap_pte( */ static inline tt_entry_t * pmap_tt1e(pmap_t pmap, - vm_map_address_t addr) + vm_map_address_t addr) { /* Level 0 currently unused */ #if __ARM64_TWO_LEVEL_PMAP__ #pragma unused(pmap, addr) panic("pmap_tt1e called on a two level pmap"); - return (NULL); + return NULL; #else - return (&pmap->tte[tt1_index(pmap, addr)]); + return &pmap->tte[tt1_index(pmap, addr)]; #endif } @@ -2461,10 +2508,10 @@ pmap_tt1e(pmap_t pmap, */ static inline tt_entry_t * pmap_tt2e(pmap_t pmap, - vm_map_address_t addr) + vm_map_address_t addr) { #if __ARM64_TWO_LEVEL_PMAP__ - return (&pmap->tte[tt2_index(pmap, addr)]); + return &pmap->tte[tt2_index(pmap, addr)]; #else tt_entry_t *ttp; tt_entry_t tte; @@ -2472,14 +2519,16 @@ pmap_tt2e(pmap_t pmap, ttp = pmap_tt1e(pmap, addr); tte = *ttp; #if MACH_ASSERT - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) + if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) { panic("Attempt to demote L1 block (?!): pmap=%p, va=0x%llx, tte=0x%llx\n", pmap, (uint64_t)addr, (uint64_t)tte); + } #endif - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) - return (PT_ENTRY_NULL); + if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { + return PT_ENTRY_NULL; + } ttp = &((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt2_index(pmap, addr)]; - return ((tt_entry_t *)ttp); + return (tt_entry_t *)ttp; #endif } @@ -2490,30 +2539,32 @@ pmap_tt2e(pmap_t pmap, */ static inline pt_entry_t * pmap_tt3e( - pmap_t pmap, - vm_map_address_t addr) + pmap_t pmap, + vm_map_address_t addr) { pt_entry_t *ptp; tt_entry_t *ttp; tt_entry_t tte; ttp = pmap_tt2e(pmap, addr); - if (ttp == PT_ENTRY_NULL) - return (PT_ENTRY_NULL); + if (ttp == PT_ENTRY_NULL) { + return PT_ENTRY_NULL; + } tte = *ttp; #if MACH_ASSERT - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) + if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) { panic("Attempt to demote L2 block: pmap=%p, va=0x%llx, tte=0x%llx\n", pmap, (uint64_t)addr, (uint64_t)tte); + } #endif if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { - return (PT_ENTRY_NULL); + return PT_ENTRY_NULL; } /* Get third-level (4KB) entry */ ptp = &(((pt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt3_index(pmap, addr)]); - return (ptp); + return ptp; } @@ -2522,16 +2573,16 @@ pmap_tte( pmap_t pmap, vm_map_address_t addr) { - return(pmap_tt2e(pmap, addr)); + return pmap_tt2e(pmap, addr); } static inline pt_entry_t * pmap_pte( - pmap_t pmap, - vm_map_address_t addr) + pmap_t pmap, + vm_map_address_t addr) { - return(pmap_tt3e(pmap, addr)); + return pmap_tt3e(pmap, addr); } #endif @@ -2546,11 +2597,11 @@ pmap_pte( */ vm_map_address_t pmap_map( - vm_map_address_t virt, - vm_offset_t start, - vm_offset_t end, - vm_prot_t prot, - unsigned int flags) + vm_map_address_t virt, + vm_offset_t start, + vm_offset_t end, + vm_prot_t prot, + unsigned int flags) { kern_return_t kr; vm_size_t ps; @@ -2558,39 +2609,39 @@ pmap_map( ps = PAGE_SIZE; while (start < end) { kr = pmap_enter(kernel_pmap, virt, (ppnum_t)atop(start), - prot, VM_PROT_NONE, flags, FALSE); + prot, VM_PROT_NONE, flags, FALSE); if (kr != KERN_SUCCESS) { panic("%s: failed pmap_enter, " - "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x", - __FUNCTION__, - (void *) virt, (void *) start, (void *) end, prot, flags); + "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x", + __FUNCTION__, + (void *) virt, (void *) start, (void *) end, prot, flags); } virt += ps; start += ps; } - return (virt); + return virt; } vm_map_address_t pmap_map_bd_with_options( - vm_map_address_t virt, - vm_offset_t start, - vm_offset_t end, - vm_prot_t prot, - int32_t options) + vm_map_address_t virt, + vm_offset_t start, + vm_offset_t end, + vm_prot_t prot, + int32_t options) { pt_entry_t tmplate; pt_entry_t *ptep; vm_map_address_t vaddr; vm_offset_t paddr; - pt_entry_t mem_attr; + pt_entry_t mem_attr; switch (options & PMAP_MAP_BD_MASK) { case PMAP_MAP_BD_WCOMB: mem_attr = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITECOMB); -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) mem_attr |= ARM_PTE_SH(SH_OUTER_MEMORY); #else mem_attr |= ARM_PTE_SH; @@ -2605,7 +2656,7 @@ pmap_map_bd_with_options( } tmplate = pa_to_pte(start) | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA) | - mem_attr | ARM_PTE_TYPE | ARM_PTE_NX | ARM_PTE_PNX | ARM_PTE_AF; + mem_attr | ARM_PTE_TYPE | ARM_PTE_NX | ARM_PTE_PNX | ARM_PTE_AF; #if __ARM_KERNEL_PROTECT__ tmplate |= ARM_PTE_NG; #endif /* __ARM_KERNEL_PROTECT__ */ @@ -2613,7 +2664,6 @@ pmap_map_bd_with_options( vaddr = virt; paddr = start; while (paddr < end) { - ptep = pmap_pte(kernel_pmap, vaddr); if (ptep == PT_ENTRY_NULL) { panic("pmap_map_bd"); @@ -2626,10 +2676,11 @@ pmap_map_bd_with_options( paddr += PAGE_SIZE; } - if (end >= start) + if (end >= start) { flush_mmu_tlb_region(virt, (unsigned)(end - start)); + } - return (vaddr); + return vaddr; } /* @@ -2646,15 +2697,15 @@ pmap_map_bd( vm_prot_t prot) { pt_entry_t tmplate; - pt_entry_t *ptep; + pt_entry_t *ptep; vm_map_address_t vaddr; - vm_offset_t paddr; + vm_offset_t paddr; /* not cacheable and not buffered */ tmplate = pa_to_pte(start) - | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX - | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA) - | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE); + | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX + | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA) + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE); #if __ARM_KERNEL_PROTECT__ tmplate |= ARM_PTE_NG; #endif /* __ARM_KERNEL_PROTECT__ */ @@ -2662,7 +2713,6 @@ pmap_map_bd( vaddr = virt; paddr = start; while (paddr < end) { - ptep = pmap_pte(kernel_pmap, vaddr); if (ptep == PT_ENTRY_NULL) { panic("pmap_map_bd"); @@ -2675,10 +2725,11 @@ pmap_map_bd( paddr += PAGE_SIZE; } - if (end >= start) + if (end >= start) { flush_mmu_tlb_region(virt, (unsigned)(end - start)); + } - return (vaddr); + return vaddr; } /* @@ -2695,17 +2746,17 @@ pmap_map_high_window_bd( vm_size_t len, vm_prot_t prot) { - pt_entry_t *ptep, pte; + pt_entry_t *ptep, pte; #if (__ARM_VMSA__ == 7) - vm_map_address_t va_start = VM_HIGH_KERNEL_WINDOW; - vm_map_address_t va_max = VM_MAX_KERNEL_ADDRESS; + vm_map_address_t va_start = VM_HIGH_KERNEL_WINDOW; + vm_map_address_t va_max = VM_MAX_KERNEL_ADDRESS; #else - vm_map_address_t va_start = VREGION1_START; - vm_map_address_t va_max = VREGION1_START + VREGION1_SIZE; + vm_map_address_t va_start = VREGION1_START; + vm_map_address_t va_max = VREGION1_START + VREGION1_SIZE; #endif - vm_map_address_t va_end; - vm_map_address_t va; - vm_size_t offset; + vm_map_address_t va_end; + vm_map_address_t va; + vm_size_t offset; offset = pa_start & PAGE_MASK; pa_start -= offset; @@ -2716,11 +2767,12 @@ pmap_map_high_window_bd( } scan: - for ( ; va_start < va_max; va_start += PAGE_SIZE) { + for (; va_start < va_max; va_start += PAGE_SIZE) { ptep = pmap_pte(kernel_pmap, va_start); assert(!ARM_PTE_IS_COMPRESSED(*ptep)); - if (*ptep == ARM_PTE_TYPE_FAULT) + if (*ptep == ARM_PTE_TYPE_FAULT) { break; + } } if (va_start > va_max) { panic("pmap_map_high_window_bd: insufficient pages\n"); @@ -2738,10 +2790,10 @@ scan: for (va = va_start; va < va_end; va += PAGE_SIZE, pa_start += PAGE_SIZE) { ptep = pmap_pte(kernel_pmap, va); pte = pa_to_pte(pa_start) - | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX - | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA) - | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); -#if (__ARM_VMSA__ > 7) + | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX + | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA) + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); +#if (__ARM_VMSA__ > 7) pte |= ARM_PTE_SH(SH_OUTER_MEMORY); #else pte |= ARM_PTE_SH; @@ -2767,42 +2819,51 @@ pmap_compute_io_rgns(void) pmap_io_range_t *ranges; uint64_t rgn_end; void *prop = NULL; - int err; + int err; unsigned int prop_size; - err = DTLookupEntry(NULL, "/defaults", &entry); - assert(err == kSuccess); + err = DTLookupEntry(NULL, "/defaults", &entry); + assert(err == kSuccess); - if (kSuccess != DTGetProperty(entry, "pmap-io-ranges", &prop, &prop_size)) + if (kSuccess != DTGetProperty(entry, "pmap-io-ranges", &prop, &prop_size)) { return 0; + } ranges = prop; for (unsigned int i = 0; i < (prop_size / sizeof(*ranges)); ++i) { - if (ranges[i].addr & PAGE_MASK) + if (ranges[i].addr & PAGE_MASK) { panic("pmap I/O region %u addr 0x%llx is not page-aligned", i, ranges[i].addr); - if (ranges[i].len & PAGE_MASK) + } + if (ranges[i].len & PAGE_MASK) { panic("pmap I/O region %u length 0x%x is not page-aligned", i, ranges[i].len); - if (os_add_overflow(ranges[i].addr, ranges[i].len, &rgn_end)) + } + if (os_add_overflow(ranges[i].addr, ranges[i].len, &rgn_end)) { panic("pmap I/O region %u addr 0x%llx length 0x%x wraps around", i, ranges[i].addr, ranges[i].len); - if ((i == 0) || (ranges[i].addr < io_rgn_start)) + } + if ((i == 0) || (ranges[i].addr < io_rgn_start)) { io_rgn_start = ranges[i].addr; - if ((i == 0) || (rgn_end > io_rgn_end)) + } + if ((i == 0) || (rgn_end > io_rgn_end)) { io_rgn_end = rgn_end; + } ++num_io_rgns; } - if (io_rgn_start & PAGE_MASK) + if (io_rgn_start & PAGE_MASK) { panic("pmap I/O region start is not page-aligned!\n"); + } - if (io_rgn_end & PAGE_MASK) + if (io_rgn_end & PAGE_MASK) { panic("pmap I/O region end is not page-aligned!\n"); + } if (((io_rgn_start <= gPhysBase) && (io_rgn_end > gPhysBase)) || ((io_rgn_start < avail_end) && (io_rgn_end >= avail_end)) || - ((io_rgn_start > gPhysBase) && (io_rgn_end < avail_end))) + ((io_rgn_start > gPhysBase) && (io_rgn_end < avail_end))) { panic("pmap I/O region overlaps physical memory!\n"); + } - return (num_io_rgns * sizeof(*ranges)); + return num_io_rgns * sizeof(*ranges); } /* @@ -2820,12 +2881,13 @@ cmp_io_rgns(const void *a, const void *b) { const pmap_io_range_t *range_a = a; const pmap_io_range_t *range_b = b; - if ((range_b->addr + range_b->len) <= range_a->addr) + if ((range_b->addr + range_b->len) <= range_a->addr) { return 1; - else if ((range_a->addr + range_a->len) <= range_b->addr) + } else if ((range_a->addr + range_a->len) <= range_b->addr) { return -1; - else + } else { return 0; + } } static void @@ -2834,11 +2896,12 @@ pmap_load_io_rgns(void) DTEntry entry; pmap_io_range_t *ranges; void *prop = NULL; - int err; + int err; unsigned int prop_size; - if (num_io_rgns == 0) + if (num_io_rgns == 0) { return; + } err = DTLookupEntry(NULL, "/defaults", &entry); assert(err == kSuccess); @@ -2847,8 +2910,9 @@ pmap_load_io_rgns(void) assert(err == kSuccess); ranges = prop; - for (unsigned int i = 0; i < (prop_size / sizeof(*ranges)); ++i) + for (unsigned int i = 0; i < (prop_size / sizeof(*ranges)); ++i) { io_attr_table[i] = ranges[i]; + } qsort(io_attr_table, num_io_rgns, sizeof(*ranges), cmp_io_rgns); } @@ -2879,7 +2943,7 @@ pmap_get_arm64_prot( tt_type = tte & ARM_TTE_TYPE_MASK; - if(tt_type == ARM_TTE_TYPE_BLOCK) { + if (tt_type == ARM_TTE_TYPE_BLOCK) { return prot | (tte & ARM_TTE_BLOCK_NX) | (tte & ARM_TTE_BLOCK_PNX) | (tte & ARM_TTE_BLOCK_APMASK) | ARM_TTE_VALID; } @@ -2927,7 +2991,7 @@ pmap_get_arm64_prot( * physical-to-virtual translation lookup tables for the * physical memory to be managed (between avail_start and * avail_end). - + * * Map the kernel's code and data, and allocate the system page table. * Page_size must already be set. * @@ -2942,18 +3006,20 @@ void pmap_bootstrap( vm_offset_t vstart) { - pmap_paddr_t pmap_struct_start; + pmap_paddr_t pmap_struct_start; vm_size_t pv_head_size; - vm_size_t ptd_root_table_size; + vm_size_t ptd_root_table_size; vm_size_t pp_attr_table_size; - vm_size_t io_attr_table_size; + vm_size_t io_attr_table_size; unsigned int npages; unsigned int i; - vm_map_offset_t maxoffset; + vm_map_offset_t maxoffset; + + lck_grp_init(&pmap_lck_grp, "pmap", LCK_GRP_ATTR_NULL); #if DEVELOPMENT || DEBUG - if (PE_parse_boot_argn("pmap_trace", &pmap_trace_mask, sizeof (pmap_trace_mask))) { + if (PE_parse_boot_argn("pmap_trace", &pmap_trace_mask, sizeof(pmap_trace_mask))) { kprintf("Kernel traces for pmap operations enabled\n"); } #endif @@ -2973,7 +3039,7 @@ pmap_bootstrap( kernel_pmap->ref_count = 1; kernel_pmap->gc_status = 0; kernel_pmap->nx_enabled = TRUE; -#ifdef __arm64__ +#ifdef __arm64__ kernel_pmap->is_64bit = TRUE; #else kernel_pmap->is_64bit = FALSE; @@ -2987,14 +3053,14 @@ pmap_bootstrap( kernel_pmap->nested_region_asid_bitmap_size = 0x0UL; #if (__ARM_VMSA__ == 7) - kernel_pmap->tte_index_max = 4*NTTES; + kernel_pmap->tte_index_max = 4 * NTTES; #else kernel_pmap->tte_index_max = (ARM_PGBYTES / sizeof(tt_entry_t)); #endif kernel_pmap->prev_tte = (tt_entry_t *) NULL; PMAP_LOCK_INIT(kernel_pmap); -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) simple_lock_init(&kernel_pmap->tt1_lock, 0); kernel_pmap->cpu_ref = 0; #endif @@ -3005,11 +3071,8 @@ pmap_bootstrap( npages = (unsigned int)atop(mem_size); pp_attr_table_size = npages * sizeof(pp_attr_t); pv_head_size = round_page(sizeof(pv_entry_t *) * npages); -#if (__ARM_VMSA__ == 7) - ptd_root_table_size = sizeof(pt_desc_t) * (1<<((mem_size>>30)+12)); -#else - ptd_root_table_size = sizeof(pt_desc_t) * (1<<((mem_size>>30)+13)); -#endif + // allocate enough initial PTDs to map twice the available physical memory + ptd_root_table_size = sizeof(pt_desc_t) * (mem_size / ((PAGE_SIZE / sizeof(pt_entry_t)) * ARM_PGBYTES)) * 2; pmap_struct_start = avail_start; @@ -3025,7 +3088,7 @@ pmap_bootstrap( memset((char *)phystokv(pmap_struct_start), 0, avail_start - pmap_struct_start); pmap_load_io_rgns(); - ptd_bootstrap(ptd_root_table, (unsigned int)(ptd_root_table_size/sizeof(pt_desc_t))); + ptd_bootstrap(ptd_root_table, (unsigned int)(ptd_root_table_size / sizeof(pt_desc_t))); pmap_cpu_data_array_init(); @@ -3057,8 +3120,9 @@ pmap_bootstrap( virtual_space_end = VM_MAX_KERNEL_ADDRESS; /* mark all the address spaces in use */ - for (i = 0; i < MAX_ASID / (sizeof(uint32_t) * NBBY); i++) + for (i = 0; i < MAX_ASID / (sizeof(uint32_t) * NBBY); i++) { asid_bitmap[i] = 0xffffffff; + } /* * The kernel gets ASID 0, and all aliases of it. This is @@ -3079,25 +3143,25 @@ pmap_bootstrap( kernel_pmap->vasid = 0; - if (PE_parse_boot_argn("arm_maxoffset", &maxoffset, sizeof (maxoffset))) { + if (PE_parse_boot_argn("arm_maxoffset", &maxoffset, sizeof(maxoffset))) { maxoffset = trunc_page(maxoffset); if ((maxoffset >= pmap_max_offset(FALSE, ARM_PMAP_MAX_OFFSET_MIN)) && (maxoffset <= pmap_max_offset(FALSE, ARM_PMAP_MAX_OFFSET_MAX))) { - arm_pmap_max_offset_default = maxoffset; + arm_pmap_max_offset_default = maxoffset; } } #if defined(__arm64__) - if (PE_parse_boot_argn("arm64_maxoffset", &maxoffset, sizeof (maxoffset))) { + if (PE_parse_boot_argn("arm64_maxoffset", &maxoffset, sizeof(maxoffset))) { maxoffset = trunc_page(maxoffset); if ((maxoffset >= pmap_max_offset(TRUE, ARM_PMAP_MAX_OFFSET_MIN)) && (maxoffset <= pmap_max_offset(TRUE, ARM_PMAP_MAX_OFFSET_MAX))) { - arm64_pmap_max_offset_default = maxoffset; + arm64_pmap_max_offset_default = maxoffset; } } #endif #if DEVELOPMENT || DEBUG - PE_parse_boot_argn("panic_on_unsigned_execute", &panic_on_unsigned_execute, sizeof (panic_on_unsigned_execute)); + PE_parse_boot_argn("panic_on_unsigned_execute", &panic_on_unsigned_execute, sizeof(panic_on_unsigned_execute)); #endif /* DEVELOPMENT || DEBUG */ pmap_nesting_size_min = ARM_NESTING_SIZE_MIN; @@ -3108,11 +3172,11 @@ pmap_bootstrap( #if MACH_ASSERT PE_parse_boot_argn("pmap_stats_assert", - &pmap_stats_assert, - sizeof (pmap_stats_assert)); + &pmap_stats_assert, + sizeof(pmap_stats_assert)); PE_parse_boot_argn("vm_footprint_suspend_allowed", - &vm_footprint_suspend_allowed, - sizeof (vm_footprint_suspend_allowed)); + &vm_footprint_suspend_allowed, + sizeof(vm_footprint_suspend_allowed)); #endif /* MACH_ASSERT */ #if KASAN @@ -3124,9 +3188,9 @@ pmap_bootstrap( void pmap_virtual_space( - vm_offset_t *startp, - vm_offset_t *endp -) + vm_offset_t *startp, + vm_offset_t *endp + ) { *startp = virtual_space_start; *endp = virtual_space_end; @@ -3138,10 +3202,10 @@ pmap_virtual_region( unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size -) + ) { - boolean_t ret = FALSE; -#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ + boolean_t ret = FALSE; +#if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ if (region_select == 0) { /* * In this config, the bootstrap mappings should occupy their own L2 @@ -3149,7 +3213,7 @@ pmap_virtual_region( * TTEs and PTEs in their own pages allows us to lock down those pages, * while allowing the rest of the kernel address range to be remapped. */ -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) *startp = LOW_GLOBAL_BASE_ADDRESS & ~ARM_TT_L2_OFFMASK; #else #error Unsupported configuration @@ -3164,17 +3228,17 @@ pmap_virtual_region( #endif if (region_select == 0) { -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) *startp = gVirtBase & 0xFFC00000; - *size = ((virtual_space_start-(gVirtBase & 0xFFC00000)) + ~0xFFC00000) & 0xFFC00000; + *size = ((virtual_space_start - (gVirtBase & 0xFFC00000)) + ~0xFFC00000) & 0xFFC00000; #else /* Round to avoid overlapping with the V=P area; round to at least the L2 block size. */ if (!TEST_PAGE_SIZE_4K) { *startp = gVirtBase & 0xFFFFFFFFFE000000; - *size = ((virtual_space_start-(gVirtBase & 0xFFFFFFFFFE000000)) + ~0xFFFFFFFFFE000000) & 0xFFFFFFFFFE000000; + *size = ((virtual_space_start - (gVirtBase & 0xFFFFFFFFFE000000)) + ~0xFFFFFFFFFE000000) & 0xFFFFFFFFFE000000; } else { *startp = gVirtBase & 0xFFFFFFFFFF800000; - *size = ((virtual_space_start-(gVirtBase & 0xFFFFFFFFFF800000)) + ~0xFFFFFFFFFF800000) & 0xFFFFFFFFFF800000; + *size = ((virtual_space_start - (gVirtBase & 0xFFFFFFFFFF800000)) + ~0xFFFFFFFFFF800000) & 0xFFFFFFFFFF800000; } #endif ret = TRUE; @@ -3184,7 +3248,7 @@ pmap_virtual_region( *size = VREGION1_SIZE; ret = TRUE; } -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) /* We need to reserve a range that is at least the size of an L2 block mapping for the low globals */ if (!TEST_PAGE_SIZE_4K) { low_global_vr_mask = 0xFFFFFFFFFE000000; @@ -3194,7 +3258,7 @@ pmap_virtual_region( low_global_vr_size = 0x800000; } - if (((gVirtBase & low_global_vr_mask) != LOW_GLOBAL_BASE_ADDRESS) && (region_select == 2)) { + if (((gVirtBase & low_global_vr_mask) != LOW_GLOBAL_BASE_ADDRESS) && (region_select == 2)) { *startp = LOW_GLOBAL_BASE_ADDRESS; *size = low_global_vr_size; ret = TRUE; @@ -3284,24 +3348,39 @@ pmap_init( #endif } +void +pmap_pv_fixup(__unused vm_offset_t start, __unused vm_size_t length) +{ +} + boolean_t pmap_verify_free( ppnum_t ppnum) { - pv_entry_t **pv_h; + pv_entry_t **pv_h; int pai; pmap_paddr_t phys = ptoa(ppnum); assert(phys != vm_page_fictitious_addr); - if (!pa_valid(phys)) - return (FALSE); + if (!pa_valid(phys)) { + return FALSE; + } pai = (int)pa_index(phys); pv_h = pai_to_pvh(pai); - return (pvh_test_type(pv_h, PVH_TYPE_NULL)); + return pvh_test_type(pv_h, PVH_TYPE_NULL); +} + +#if MACH_ASSERT +void +pmap_assert_free(ppnum_t ppnum) +{ + assertf(pmap_verify_free(ppnum), "page = 0x%x", ppnum); + (void)ppnum; } +#endif /* @@ -3315,8 +3394,8 @@ pmap_zone_init( * Create the zone of physical maps * and the physical-to-virtual entries. */ - pmap_zone = zinit((vm_size_t) sizeof(struct pmap), (vm_size_t) sizeof(struct pmap)*256, - PAGE_SIZE, "pmap"); + pmap_zone = zinit((vm_size_t) sizeof(struct pmap), (vm_size_t) sizeof(struct pmap) * 256, + PAGE_SIZE, "pmap"); } @@ -3324,8 +3403,8 @@ void pmap_ledger_alloc_init(size_t size) { panic("%s: unsupported, " - "size=%lu", - __func__, size); + "size=%lu", + __func__, size); } ledger_t @@ -3334,7 +3413,7 @@ pmap_ledger_alloc(void) ledger_t retval = NULL; panic("%s: unsupported", - __func__); + __func__); return retval; } @@ -3343,8 +3422,8 @@ void pmap_ledger_free(ledger_t ledger) { panic("%s: unsupported, " - "ledger=%p", - __func__, ledger); + "ledger=%p", + __func__, ledger); } /* @@ -3372,7 +3451,7 @@ pmap_create_internal( * A software use-only map doesn't even need a pmap. */ if (size != 0) { - return (PMAP_NULL); + return PMAP_NULL; } @@ -3380,8 +3459,9 @@ pmap_create_internal( * Allocate a pmap struct from the pmap_zone. Then allocate * the translation table of the right size for the pmap. */ - if ((p = (pmap_t) zalloc(pmap_zone)) == PMAP_NULL) - return (PMAP_NULL); + if ((p = (pmap_t) zalloc(pmap_zone)) == PMAP_NULL) { + return PMAP_NULL; + } if (is_64bit) { p->min = MACH_VM_MIN_ADDRESS; @@ -3407,7 +3487,7 @@ pmap_create_internal( p->ledger = ledger; PMAP_LOCK_INIT(p); -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) simple_lock_init(&p->tt1_lock, 0); p->cpu_ref = 0; #endif @@ -3427,8 +3507,9 @@ pmap_create_internal( p->prev_tte = (tt_entry_t *) NULL; /* nullify the translation table */ - for (i = 0; i < p->tte_index_max; i++) + for (i = 0; i < p->tte_index_max; i++) { p->tte[i] = ARM_TTE_TYPE_FAULT; + } FLUSH_PTE_RANGE(p->tte, p->tte + p->tte_index_max); @@ -3453,7 +3534,7 @@ pmap_create_internal( #if MACH_ASSERT p->pmap_stats_assert = TRUE; p->pmap_pid = 0; - strlcpy(p->pmap_procname, "", sizeof (p->pmap_procname)); + strlcpy(p->pmap_procname, "", sizeof(p->pmap_procname)); #endif /* MACH_ASSERT */ #if DEVELOPMENT || DEBUG p->footprint_was_suspended = FALSE; @@ -3463,7 +3544,7 @@ pmap_create_internal( queue_enter(&map_pmap_list, p, pmap_t, pmaps); pmap_simple_unlock(&pmaps_lock); - return (p); + return p; } pmap_t @@ -3504,7 +3585,7 @@ pmap_set_process_internal( VALIDATE_PMAP(pmap); pmap->pmap_pid = pid; - strlcpy(pmap->pmap_procname, procname, sizeof (pmap->pmap_procname)); + strlcpy(pmap->pmap_procname, procname, sizeof(pmap->pmap_procname)); if (pmap_ledgers_panic_leeway) { /* * XXX FBDP @@ -3519,17 +3600,17 @@ pmap_set_process_internal( */ pmap->pmap_stats_assert = FALSE; ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.phys_footprint); + task_ledgers.phys_footprint); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.internal); + task_ledgers.internal); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.internal_compressed); + task_ledgers.internal_compressed); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.iokit_mapped); + task_ledgers.iokit_mapped); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.alternate_accounting); + task_ledgers.alternate_accounting); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.alternate_accounting_compressed); + task_ledgers.alternate_accounting_compressed); } #endif /* MACH_ASSERT */ } @@ -3557,112 +3638,112 @@ pmap_set_process( */ struct { - uint64_t num_pmaps_checked; - - int phys_footprint_over; - ledger_amount_t phys_footprint_over_total; - ledger_amount_t phys_footprint_over_max; - int phys_footprint_under; - ledger_amount_t phys_footprint_under_total; - ledger_amount_t phys_footprint_under_max; - - int internal_over; - ledger_amount_t internal_over_total; - ledger_amount_t internal_over_max; - int internal_under; - ledger_amount_t internal_under_total; - ledger_amount_t internal_under_max; - - int internal_compressed_over; - ledger_amount_t internal_compressed_over_total; - ledger_amount_t internal_compressed_over_max; - int internal_compressed_under; - ledger_amount_t internal_compressed_under_total; - ledger_amount_t internal_compressed_under_max; - - int iokit_mapped_over; - ledger_amount_t iokit_mapped_over_total; - ledger_amount_t iokit_mapped_over_max; - int iokit_mapped_under; - ledger_amount_t iokit_mapped_under_total; - ledger_amount_t iokit_mapped_under_max; - - int alternate_accounting_over; - ledger_amount_t alternate_accounting_over_total; - ledger_amount_t alternate_accounting_over_max; - int alternate_accounting_under; - ledger_amount_t alternate_accounting_under_total; - ledger_amount_t alternate_accounting_under_max; - - int alternate_accounting_compressed_over; - ledger_amount_t alternate_accounting_compressed_over_total; - ledger_amount_t alternate_accounting_compressed_over_max; - int alternate_accounting_compressed_under; - ledger_amount_t alternate_accounting_compressed_under_total; - ledger_amount_t alternate_accounting_compressed_under_max; - - int page_table_over; - ledger_amount_t page_table_over_total; - ledger_amount_t page_table_over_max; - int page_table_under; - ledger_amount_t page_table_under_total; - ledger_amount_t page_table_under_max; - - int purgeable_volatile_over; - ledger_amount_t purgeable_volatile_over_total; - ledger_amount_t purgeable_volatile_over_max; - int purgeable_volatile_under; - ledger_amount_t purgeable_volatile_under_total; - ledger_amount_t purgeable_volatile_under_max; - - int purgeable_nonvolatile_over; - ledger_amount_t purgeable_nonvolatile_over_total; - ledger_amount_t purgeable_nonvolatile_over_max; - int purgeable_nonvolatile_under; - ledger_amount_t purgeable_nonvolatile_under_total; - ledger_amount_t purgeable_nonvolatile_under_max; - - int purgeable_volatile_compressed_over; - ledger_amount_t purgeable_volatile_compressed_over_total; - ledger_amount_t purgeable_volatile_compressed_over_max; - int purgeable_volatile_compressed_under; - ledger_amount_t purgeable_volatile_compressed_under_total; - ledger_amount_t purgeable_volatile_compressed_under_max; - - int purgeable_nonvolatile_compressed_over; - ledger_amount_t purgeable_nonvolatile_compressed_over_total; - ledger_amount_t purgeable_nonvolatile_compressed_over_max; - int purgeable_nonvolatile_compressed_under; - ledger_amount_t purgeable_nonvolatile_compressed_under_total; - ledger_amount_t purgeable_nonvolatile_compressed_under_max; - - int network_volatile_over; - ledger_amount_t network_volatile_over_total; - ledger_amount_t network_volatile_over_max; - int network_volatile_under; - ledger_amount_t network_volatile_under_total; - ledger_amount_t network_volatile_under_max; - - int network_nonvolatile_over; - ledger_amount_t network_nonvolatile_over_total; - ledger_amount_t network_nonvolatile_over_max; - int network_nonvolatile_under; - ledger_amount_t network_nonvolatile_under_total; - ledger_amount_t network_nonvolatile_under_max; - - int network_volatile_compressed_over; - ledger_amount_t network_volatile_compressed_over_total; - ledger_amount_t network_volatile_compressed_over_max; - int network_volatile_compressed_under; - ledger_amount_t network_volatile_compressed_under_total; - ledger_amount_t network_volatile_compressed_under_max; - - int network_nonvolatile_compressed_over; - ledger_amount_t network_nonvolatile_compressed_over_total; - ledger_amount_t network_nonvolatile_compressed_over_max; - int network_nonvolatile_compressed_under; - ledger_amount_t network_nonvolatile_compressed_under_total; - ledger_amount_t network_nonvolatile_compressed_under_max; + uint64_t num_pmaps_checked; + + int phys_footprint_over; + ledger_amount_t phys_footprint_over_total; + ledger_amount_t phys_footprint_over_max; + int phys_footprint_under; + ledger_amount_t phys_footprint_under_total; + ledger_amount_t phys_footprint_under_max; + + int internal_over; + ledger_amount_t internal_over_total; + ledger_amount_t internal_over_max; + int internal_under; + ledger_amount_t internal_under_total; + ledger_amount_t internal_under_max; + + int internal_compressed_over; + ledger_amount_t internal_compressed_over_total; + ledger_amount_t internal_compressed_over_max; + int internal_compressed_under; + ledger_amount_t internal_compressed_under_total; + ledger_amount_t internal_compressed_under_max; + + int iokit_mapped_over; + ledger_amount_t iokit_mapped_over_total; + ledger_amount_t iokit_mapped_over_max; + int iokit_mapped_under; + ledger_amount_t iokit_mapped_under_total; + ledger_amount_t iokit_mapped_under_max; + + int alternate_accounting_over; + ledger_amount_t alternate_accounting_over_total; + ledger_amount_t alternate_accounting_over_max; + int alternate_accounting_under; + ledger_amount_t alternate_accounting_under_total; + ledger_amount_t alternate_accounting_under_max; + + int alternate_accounting_compressed_over; + ledger_amount_t alternate_accounting_compressed_over_total; + ledger_amount_t alternate_accounting_compressed_over_max; + int alternate_accounting_compressed_under; + ledger_amount_t alternate_accounting_compressed_under_total; + ledger_amount_t alternate_accounting_compressed_under_max; + + int page_table_over; + ledger_amount_t page_table_over_total; + ledger_amount_t page_table_over_max; + int page_table_under; + ledger_amount_t page_table_under_total; + ledger_amount_t page_table_under_max; + + int purgeable_volatile_over; + ledger_amount_t purgeable_volatile_over_total; + ledger_amount_t purgeable_volatile_over_max; + int purgeable_volatile_under; + ledger_amount_t purgeable_volatile_under_total; + ledger_amount_t purgeable_volatile_under_max; + + int purgeable_nonvolatile_over; + ledger_amount_t purgeable_nonvolatile_over_total; + ledger_amount_t purgeable_nonvolatile_over_max; + int purgeable_nonvolatile_under; + ledger_amount_t purgeable_nonvolatile_under_total; + ledger_amount_t purgeable_nonvolatile_under_max; + + int purgeable_volatile_compressed_over; + ledger_amount_t purgeable_volatile_compressed_over_total; + ledger_amount_t purgeable_volatile_compressed_over_max; + int purgeable_volatile_compressed_under; + ledger_amount_t purgeable_volatile_compressed_under_total; + ledger_amount_t purgeable_volatile_compressed_under_max; + + int purgeable_nonvolatile_compressed_over; + ledger_amount_t purgeable_nonvolatile_compressed_over_total; + ledger_amount_t purgeable_nonvolatile_compressed_over_max; + int purgeable_nonvolatile_compressed_under; + ledger_amount_t purgeable_nonvolatile_compressed_under_total; + ledger_amount_t purgeable_nonvolatile_compressed_under_max; + + int network_volatile_over; + ledger_amount_t network_volatile_over_total; + ledger_amount_t network_volatile_over_max; + int network_volatile_under; + ledger_amount_t network_volatile_under_total; + ledger_amount_t network_volatile_under_max; + + int network_nonvolatile_over; + ledger_amount_t network_nonvolatile_over_total; + ledger_amount_t network_nonvolatile_over_max; + int network_nonvolatile_under; + ledger_amount_t network_nonvolatile_under_total; + ledger_amount_t network_nonvolatile_under_max; + + int network_volatile_compressed_over; + ledger_amount_t network_volatile_compressed_over_total; + ledger_amount_t network_volatile_compressed_over_max; + int network_volatile_compressed_under; + ledger_amount_t network_volatile_compressed_under_total; + ledger_amount_t network_volatile_compressed_under_max; + + int network_nonvolatile_compressed_over; + ledger_amount_t network_nonvolatile_compressed_over_total; + ledger_amount_t network_nonvolatile_compressed_over_max; + int network_nonvolatile_compressed_under; + ledger_amount_t network_nonvolatile_compressed_under_total; + ledger_amount_t network_nonvolatile_compressed_under_max; } pmap_ledgers_drift; #endif /* MACH_ASSERT */ @@ -3675,37 +3756,39 @@ MARK_AS_PMAP_TEXT static void pmap_destroy_internal( pmap_t pmap) { - if (pmap == PMAP_NULL) + if (pmap == PMAP_NULL) { return; + } VALIDATE_PMAP(pmap); int32_t ref_count = __c11_atomic_fetch_sub(&pmap->ref_count, 1, memory_order_relaxed) - 1; - if (ref_count > 0) + if (ref_count > 0) { return; - else if (ref_count < 0) + } else if (ref_count < 0) { panic("pmap %p: refcount underflow", pmap); - else if (pmap == kernel_pmap) + } else if (pmap == kernel_pmap) { panic("pmap %p: attempt to destroy kernel pmap", pmap); + } #if (__ARM_VMSA__ == 7) pt_entry_t *ttep; - unsigned int i; + unsigned int i; pmap_simple_lock(&pmaps_lock); while (pmap->gc_status & PMAP_GC_INFLIGHT) { pmap->gc_status |= PMAP_GC_WAIT; - assert_wait((event_t) & pmap->gc_status, THREAD_UNINT); + assert_wait((event_t) &pmap->gc_status, THREAD_UNINT); pmap_simple_unlock(&pmaps_lock); - (void) thread_block(THREAD_CONTINUE_NULL); + (void) thread_block(THREAD_CONTINUE_NULL); pmap_simple_lock(&pmaps_lock); - } queue_remove(&map_pmap_list, pmap, pmap_t, pmaps); pmap_simple_unlock(&pmaps_lock); - if (pmap->cpu_ref != 0) + if (pmap->cpu_ref != 0) { panic("pmap_destroy(%p): cpu_ref = %u", pmap, pmap->cpu_ref); + } pmap_trim_self(pmap); @@ -3723,7 +3806,7 @@ pmap_destroy_internal( PMAP_UNLOCK(pmap); if (pmap->tte) { - pmap_tt1_deallocate(pmap, pmap->tte, pmap->tte_index_max*sizeof(tt_entry_t), 0); + pmap_tt1_deallocate(pmap, pmap->tte, pmap->tte_index_max * sizeof(tt_entry_t), 0); pmap->tte = (tt_entry_t *) NULL; pmap->ttep = 0; pmap->tte_index_max = 0; @@ -3740,12 +3823,13 @@ pmap_destroy_internal( pmap_check_ledgers(pmap); - if (pmap->nested_region_asid_bitmap) - kfree(pmap->nested_region_asid_bitmap, pmap->nested_region_asid_bitmap_size*sizeof(unsigned int)); + if (pmap->nested_region_asid_bitmap) { + kfree(pmap->nested_region_asid_bitmap, pmap->nested_region_asid_bitmap_size * sizeof(unsigned int)); + } zfree(pmap_zone, pmap); #else /* __ARM_VMSA__ == 7 */ pt_entry_t *ttep; - pmap_paddr_t pa; + pmap_paddr_t pa; vm_map_address_t c; pmap_unmap_sharedpage(pmap); @@ -3753,7 +3837,7 @@ pmap_destroy_internal( pmap_simple_lock(&pmaps_lock); while (pmap->gc_status & PMAP_GC_INFLIGHT) { pmap->gc_status |= PMAP_GC_WAIT; - assert_wait((event_t) & pmap->gc_status, THREAD_UNINT); + assert_wait((event_t) &pmap->gc_status, THREAD_UNINT); pmap_simple_unlock(&pmaps_lock); (void) thread_block(THREAD_CONTINUE_NULL); pmap_simple_lock(&pmaps_lock); @@ -3797,7 +3881,7 @@ pmap_destroy_internal( free_asid(pmap->vasid); if (pmap->nested_region_asid_bitmap) { - kfree(pmap->nested_region_asid_bitmap, pmap->nested_region_asid_bitmap_size*sizeof(unsigned int)); + kfree(pmap->nested_region_asid_bitmap, pmap->nested_region_asid_bitmap_size * sizeof(unsigned int)); } pmap_check_ledgers(pmap); @@ -3847,62 +3931,66 @@ pmap_reference( static tt_entry_t * pmap_tt1_allocate( - pmap_t pmap, - vm_size_t size, - unsigned option) + pmap_t pmap, + vm_size_t size, + unsigned option) { - tt_entry_t *tt1; - tt_free_entry_t *tt1_free; - pmap_paddr_t pa; - vm_address_t va; - vm_address_t va_end; - kern_return_t ret; + tt_entry_t *tt1; + tt_free_entry_t *tt1_free; + pmap_paddr_t pa; + vm_address_t va; + vm_address_t va_end; + kern_return_t ret; pmap_simple_lock(&pmaps_lock); if ((size == PAGE_SIZE) && (free_page_size_tt_count != 0)) { - free_page_size_tt_count--; - tt1 = (tt_entry_t *)free_page_size_tt_list; - free_page_size_tt_list = ((tt_free_entry_t *)tt1)->next; - pmap_simple_unlock(&pmaps_lock); - pmap_tt_ledger_credit(pmap, size); - return (tt_entry_t *)tt1; - }; - if ((size == 2*PAGE_SIZE) && (free_two_page_size_tt_count != 0)) { - free_two_page_size_tt_count--; - tt1 = (tt_entry_t *)free_two_page_size_tt_list; - free_two_page_size_tt_list = ((tt_free_entry_t *)tt1)->next; - pmap_simple_unlock(&pmaps_lock); - pmap_tt_ledger_credit(pmap, size); - return (tt_entry_t *)tt1; - }; + free_page_size_tt_count--; + tt1 = (tt_entry_t *)free_page_size_tt_list; + free_page_size_tt_list = ((tt_free_entry_t *)tt1)->next; + pmap_simple_unlock(&pmaps_lock); + pmap_tt_ledger_credit(pmap, size); + return (tt_entry_t *)tt1; + } + ; + if ((size == 2 * PAGE_SIZE) && (free_two_page_size_tt_count != 0)) { + free_two_page_size_tt_count--; + tt1 = (tt_entry_t *)free_two_page_size_tt_list; + free_two_page_size_tt_list = ((tt_free_entry_t *)tt1)->next; + pmap_simple_unlock(&pmaps_lock); + pmap_tt_ledger_credit(pmap, size); + return (tt_entry_t *)tt1; + } + ; if (free_tt_count != 0) { - free_tt_count--; - tt1 = (tt_entry_t *)free_tt_list; - free_tt_list = (tt_free_entry_t *)((tt_free_entry_t *)tt1)->next; - pmap_simple_unlock(&pmaps_lock); - pmap_tt_ledger_credit(pmap, size); - return (tt_entry_t *)tt1; + free_tt_count--; + tt1 = (tt_entry_t *)free_tt_list; + free_tt_list = (tt_free_entry_t *)((tt_free_entry_t *)tt1)->next; + pmap_simple_unlock(&pmaps_lock); + pmap_tt_ledger_credit(pmap, size); + return (tt_entry_t *)tt1; } pmap_simple_unlock(&pmaps_lock); ret = pmap_pages_alloc(&pa, (unsigned)((size < PAGE_SIZE)? PAGE_SIZE : size), ((option & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)); - if(ret == KERN_RESOURCE_SHORTAGE) + if (ret == KERN_RESOURCE_SHORTAGE) { return (tt_entry_t *)0; + } if (size < PAGE_SIZE) { pmap_simple_lock(&pmaps_lock); - for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + size; va < va_end; va = va+size) { + for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + size; va < va_end; va = va + size) { tt1_free = (tt_free_entry_t *)va; tt1_free->next = free_tt_list; free_tt_list = tt1_free; free_tt_count++; } - if (free_tt_count > free_tt_max) + if (free_tt_count > free_tt_max) { free_tt_max = free_tt_count; + } pmap_simple_unlock(&pmaps_lock); } @@ -3923,45 +4011,49 @@ pmap_tt1_deallocate( vm_size_t size, unsigned option) { - tt_free_entry_t *tt_entry; + tt_free_entry_t *tt_entry; tt_entry = (tt_free_entry_t *)tt; - if (not_in_kdp) + if (not_in_kdp) { pmap_simple_lock(&pmaps_lock); + } - if (size < PAGE_SIZE) { + if (size < PAGE_SIZE) { free_tt_count++; - if (free_tt_count > free_tt_max) + if (free_tt_count > free_tt_max) { free_tt_max = free_tt_count; + } tt_entry->next = free_tt_list; free_tt_list = tt_entry; } if (size == PAGE_SIZE) { free_page_size_tt_count++; - if (free_page_size_tt_count > free_page_size_tt_max) + if (free_page_size_tt_count > free_page_size_tt_max) { free_page_size_tt_max = free_page_size_tt_count; + } tt_entry->next = free_page_size_tt_list; free_page_size_tt_list = tt_entry; } - if (size == 2*PAGE_SIZE) { + if (size == 2 * PAGE_SIZE) { free_two_page_size_tt_count++; - if (free_two_page_size_tt_count > free_two_page_size_tt_max) + if (free_two_page_size_tt_count > free_two_page_size_tt_max) { free_two_page_size_tt_max = free_two_page_size_tt_count; + } tt_entry->next = free_two_page_size_tt_list; free_two_page_size_tt_list = tt_entry; } if ((option & PMAP_TT_DEALLOCATE_NOBLOCK) || (!not_in_kdp)) { - if (not_in_kdp) + if (not_in_kdp) { pmap_simple_unlock(&pmaps_lock); + } pmap_tt_ledger_debit(pmap, size); return; } while (free_page_size_tt_count > FREE_PAGE_SIZE_TT_MAX) { - free_page_size_tt_count--; tt = (tt_entry_t *)free_page_size_tt_list; free_page_size_tt_list = ((tt_free_entry_t *)tt)->next; @@ -3982,7 +4074,7 @@ pmap_tt1_deallocate( pmap_simple_unlock(&pmaps_lock); - pmap_pages_free(ml_static_vtop((vm_offset_t)tt), 2*PAGE_SIZE); + pmap_pages_free(ml_static_vtop((vm_offset_t)tt), 2 * PAGE_SIZE); OSAddAtomic(-2 * (int32_t)(PAGE_SIZE / PMAP_ROOT_ALLOC_SIZE), (pmap == kernel_pmap ? &inuse_kernel_tteroot_count : &inuse_user_tteroot_count)); @@ -4003,7 +4095,7 @@ pmap_tt_allocate( *ttp = NULL; PMAP_LOCK(pmap); - if ((tt_free_entry_t *)pmap->tt_entry_free != NULL) { + if ((tt_free_entry_t *)pmap->tt_entry_free != NULL) { tt_free_entry_t *tt_free_next; tt_free_next = ((tt_free_entry_t *)pmap->tt_entry_free)->next; @@ -4013,13 +4105,21 @@ pmap_tt_allocate( PMAP_UNLOCK(pmap); if (*ttp == NULL) { - pt_desc_t *ptdp; + pt_desc_t *ptdp; /* * Allocate a VM page for the level x page table entries. */ while (pmap_pages_alloc(&pa, PAGE_SIZE, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { - if(options & PMAP_OPTIONS_NOWAIT) { + if (options & PMAP_OPTIONS_NOWAIT) { + return KERN_RESOURCE_SHORTAGE; + } + VM_PAGE_WAIT(); + } + + while ((ptdp = ptd_alloc(pmap, false)) == NULL) { + if (options & PMAP_OPTIONS_NOWAIT) { + pmap_pages_free(pa, PAGE_SIZE); return KERN_RESOURCE_SHORTAGE; } VM_PAGE_WAIT(); @@ -4037,17 +4137,16 @@ pmap_tt_allocate( PMAP_ZINFO_PALLOC(pmap, PAGE_SIZE); - ptdp = ptd_alloc(pmap); pvh_update_head_unlocked(pai_to_pvh(pa_index(pa)), ptdp, PVH_TYPE_PTDP); __unreachable_ok_push if (TEST_PAGE_RATIO_4) { - vm_address_t va; - vm_address_t va_end; + vm_address_t va; + vm_address_t va_end; PMAP_LOCK(pmap); - for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + ARM_PGBYTES; va < va_end; va = va+ARM_PGBYTES) { + for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + ARM_PGBYTES; va < va_end; va = va + ARM_PGBYTES) { ((tt_free_entry_t *)va)->next = (tt_free_entry_t *)pmap->tt_entry_free; pmap->tt_entry_free = (tt_entry_t *)va; } @@ -4072,7 +4171,7 @@ pmap_tt_deallocate( pt_desc_t *ptdp; unsigned pt_acc_cnt; unsigned i, max_pt_index = PAGE_RATIO; - vm_offset_t free_page=0; + vm_offset_t free_page = 0; PMAP_LOCK(pmap); @@ -4080,16 +4179,19 @@ pmap_tt_deallocate( ptdp->pt_map[ARM_PT_DESC_INDEX(ttp)].va = (vm_offset_t)-1; - if ((level < PMAP_TT_MAX_LEVEL) && (ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt == PT_DESC_REFCOUNT)) + if ((level < PMAP_TT_MAX_LEVEL) && (ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt == PT_DESC_REFCOUNT)) { ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt = 0; + } - if (ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt != 0) + if (ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt != 0) { panic("pmap_tt_deallocate(): ptdp %p, count %d\n", ptdp, ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt); + } ptdp->pt_cnt[ARM_PT_DESC_INDEX(ttp)].refcnt = 0; - for (i = 0, pt_acc_cnt = 0 ; i < max_pt_index ; i++) + for (i = 0, pt_acc_cnt = 0; i < max_pt_index; i++) { pt_acc_cnt += ptdp->pt_cnt[i].refcnt; + } if (pt_acc_cnt == 0) { tt_free_entry_t *tt_free_list = (tt_free_entry_t *)&pmap->tt_entry_free; @@ -4134,14 +4236,14 @@ pmap_tt_deallocate( PMAP_UNLOCK(pmap); if (free_page != 0) { - ptd_deallocate(ptep_get_ptd((vm_offset_t)free_page)); *(pt_desc_t **)pai_to_pvh(pa_index(ml_static_vtop(free_page))) = NULL; pmap_pages_free(ml_static_vtop(free_page), PAGE_SIZE); - if (level < PMAP_TT_MAX_LEVEL) + if (level < PMAP_TT_MAX_LEVEL) { OSAddAtomic(-1, (pmap == kernel_pmap ? &inuse_kernel_ttepages_count : &inuse_user_ttepages_count)); - else + } else { OSAddAtomic(-1, (pmap == kernel_pmap ? &inuse_kernel_ptepages_count : &inuse_user_ptepages_count)); + } PMAP_ZINFO_PFREE(pmap, PAGE_SIZE); pmap_tt_ledger_debit(pmap, PAGE_SIZE); } @@ -4159,18 +4261,19 @@ pmap_tte_remove( panic("pmap_tte_deallocate(): null tt_entry ttep==%p\n", ttep); } - if (((level+1) == PMAP_TT_MAX_LEVEL) && (tte_get_ptd(tte)->pt_cnt[ARM_PT_DESC_INDEX(ttetokv(*ttep))].refcnt != 0)) { + if (((level + 1) == PMAP_TT_MAX_LEVEL) && (tte_get_ptd(tte)->pt_cnt[ARM_PT_DESC_INDEX(ttetokv(*ttep))].refcnt != 0)) { panic("pmap_tte_deallocate(): pmap=%p ttep=%p ptd=%p refcnt=0x%x \n", pmap, ttep, - tte_get_ptd(tte), (tte_get_ptd(tte)->pt_cnt[ARM_PT_DESC_INDEX(ttetokv(*ttep))].refcnt)); + tte_get_ptd(tte), (tte_get_ptd(tte)->pt_cnt[ARM_PT_DESC_INDEX(ttetokv(*ttep))].refcnt)); } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) { tt_entry_t *ttep_4M = (tt_entry_t *) ((vm_offset_t)ttep & 0xFFFFFFF0); unsigned i; - for (i = 0; i<4; i++, ttep_4M++) + for (i = 0; i < 4; i++, ttep_4M++) { *ttep_4M = (tt_entry_t) 0; + } FLUSH_PTE_RANGE_STRONG(ttep_4M - 4, ttep_4M); } #else @@ -4195,25 +4298,25 @@ pmap_tte_deallocate( #if MACH_ASSERT if (tte_get_ptd(tte)->pmap != pmap) { panic("pmap_tte_deallocate(): ptd=%p ptd->pmap=%p pmap=%p \n", - tte_get_ptd(tte), tte_get_ptd(tte)->pmap, pmap); + tte_get_ptd(tte), tte_get_ptd(tte)->pmap, pmap); } #endif pmap_tte_remove(pmap, ttep, level); if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { -#if MACH_ASSERT +#if MACH_ASSERT { - pt_entry_t *pte_p = ((pt_entry_t *) (ttetokv(tte) & ~ARM_PGMASK)); - unsigned i; + pt_entry_t *pte_p = ((pt_entry_t *) (ttetokv(tte) & ~ARM_PGMASK)); + unsigned i; - for (i = 0; i < (ARM_PGBYTES / sizeof(*pte_p)); i++,pte_p++) { + for (i = 0; i < (ARM_PGBYTES / sizeof(*pte_p)); i++, pte_p++) { if (ARM_PTE_IS_COMPRESSED(*pte_p)) { panic("pmap_tte_deallocate: tte=0x%llx pmap=%p, pte_p=%p, pte=0x%llx compressed\n", - (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); + (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); } else if (((*pte_p) & ARM_PTE_TYPE_MASK) != ARM_PTE_TYPE_FAULT) { panic("pmap_tte_deallocate: tte=0x%llx pmap=%p, pte_p=%p, pte=0x%llx\n", - (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); + (uint64_t)tte, pmap, pte_p, (uint64_t)(*pte_p)); } } } @@ -4223,7 +4326,7 @@ pmap_tte_deallocate( /* Clear any page offset: we mean to free the whole page, but armv7 TTEs may only be * aligned on 1K boundaries. We clear the surrounding "chunk" of 4 TTEs above. */ pa = tte_to_pa(tte) & ~ARM_PGMASK; - pmap_tt_deallocate(pmap, (tt_entry_t *) phystokv(pa), level+1); + pmap_tt_deallocate(pmap, (tt_entry_t *) phystokv(pa), level + 1); PMAP_LOCK(pmap); } } @@ -4251,7 +4354,7 @@ pmap_remove_range( uint32_t *rmv_cnt) { return pmap_remove_range_options(pmap, va, bpte, epte, rmv_cnt, - PMAP_OPTIONS_REMOVE); + PMAP_OPTIONS_REMOVE); } @@ -4274,19 +4377,21 @@ pmap_set_ptov_ap(unsigned int pai __unused, unsigned int ap __unused, boolean_t pt_entry_t *pte_p = pmap_pte(kernel_pmap, kva); pt_entry_t tmplate = *pte_p; - if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(ap)) + if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(ap)) { return; + } tmplate = (tmplate & ~ARM_PTE_APMASK) | ARM_PTE_AP(ap); #if (__ARM_VMSA__ > 7) if (tmplate & ARM_PTE_HINT_MASK) { panic("%s: physical aperture PTE %p has hint bit set, va=%p, pte=0x%llx", - __func__, pte_p, (void *)kva, tmplate); + __func__, pte_p, (void *)kva, tmplate); } #endif WRITE_PTE_STRONG(pte_p, tmplate); flush_mmu_tlb_region_asid_async(kva, PAGE_SIZE, kernel_pmap); - if (!flush_tlb_async) + if (!flush_tlb_async) { sync_tlb_flush(); + } #endif } @@ -4311,8 +4416,9 @@ pmap_remove_pv( if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { - if (__builtin_expect((cpte != pvh_ptep(pv_h)), 0)) + if (__builtin_expect((cpte != pvh_ptep(pv_h)), 0)) { panic("%s: cpte=%p does not match pv_h=%p (%p), pai=0x%x\n", __func__, cpte, pv_h, pvh_ptep(pv_h), pai); + } if (IS_ALTACCT_PAGE(pai, PV_ENTRY_NULL)) { assert(IS_INTERNAL_PAGE(pai)); (*num_internal)++; @@ -4329,18 +4435,18 @@ pmap_remove_pv( } pvh_update_head(pv_h, PV_ENTRY_NULL, PVH_TYPE_NULL); } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { - pve_pp = pv_h; pve_p = pvh_list(pv_h); while (pve_p != PV_ENTRY_NULL && - (pve_get_ptep(pve_p) != cpte)) { + (pve_get_ptep(pve_p) != cpte)) { pve_pp = pve_link_field(pve_p); pve_p = PVE_NEXT_PTR(pve_next(pve_p)); } - if (__builtin_expect((pve_p == PV_ENTRY_NULL), 0)) + if (__builtin_expect((pve_p == PV_ENTRY_NULL), 0)) { panic("%s: cpte=%p (pai=0x%x) not in pv_h=%p\n", __func__, cpte, pai, pv_h); + } #if MACH_ASSERT if ((pmap != NULL) && (kern_feature_override(KF_PMAPV_OVRD) == FALSE)) { @@ -4372,16 +4478,18 @@ pmap_remove_pv( pvh_remove(pv_h, pve_pp, pve_p); pv_free(pve_p); - if (!pvh_test_type(pv_h, PVH_TYPE_NULL)) + if (!pvh_test_type(pv_h, PVH_TYPE_NULL)) { pvh_set_flags(pv_h, pvh_flags); + } } else { panic("%s: unexpected PV head %p, cpte=%p pmap=%p pv_h=%p pai=0x%x", - __func__, *pv_h, cpte, pmap, pv_h, pai); + __func__, *pv_h, cpte, pmap, pv_h, pai); } #ifdef PVH_FLAG_EXEC - if ((pvh_flags & PVH_FLAG_EXEC) && pvh_test_type(pv_h, PVH_TYPE_NULL)) + if ((pvh_flags & PVH_FLAG_EXEC) && pvh_test_type(pv_h, PVH_TYPE_NULL)) { pmap_set_ptov_ap(pai, AP_RWNA, FALSE); + } #endif } @@ -4399,9 +4507,9 @@ pmap_remove_range_options( int num_pte_changed; int pai = 0; pmap_paddr_t pa; - int num_external, num_internal, num_reusable; - int num_alt_internal; - uint64_t num_compressed, num_alt_compressed; + int num_external, num_internal, num_reusable; + int num_alt_internal; + uint64_t num_compressed, num_alt_compressed; PMAP_ASSERT_LOCKED(pmap); @@ -4416,15 +4524,16 @@ pmap_remove_range_options( num_alt_compressed = 0; for (cpte = bpte; cpte < epte; - cpte += PAGE_SIZE/ARM_PGBYTES, va += PAGE_SIZE) { + cpte += PAGE_SIZE / ARM_PGBYTES, va += PAGE_SIZE) { pt_entry_t spte; - boolean_t managed=FALSE; + boolean_t managed = FALSE; spte = *cpte; #if CONFIG_PGTRACE - if (pgtrace_enabled) + if (pgtrace_enabled) { pmap_pgtrace_remove_clone(pmap, pte_to_pa(spte), va); + } #endif while (!managed) { @@ -4451,8 +4560,9 @@ pmap_remove_range_options( * our "compressed" markers, * so let's update it here. */ - if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(cpte)->pt_cnt[ARM_PT_DESC_INDEX(cpte)].refcnt)) <= 0) + if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(cpte)->pt_cnt[ARM_PT_DESC_INDEX(cpte)].refcnt)) <= 0) { panic("pmap_remove_range_options: over-release of ptdp %p for pte %p\n", ptep_get_ptd(cpte), cpte); + } spte = *cpte; } /* @@ -4470,7 +4580,7 @@ pmap_remove_range_options( spte = *cpte; pa = pte_to_pa(spte); if (pai == (int)pa_index(pa)) { - managed =TRUE; + managed = TRUE; break; // Leave pai locked as we will unlock it after we free the PV entry } UNLOCK_PVH(pai); @@ -4492,7 +4602,7 @@ pmap_remove_range_options( #if MACH_ASSERT if (managed && (pmap != kernel_pmap) && (ptep_get_va(cpte) != va)) { panic("pmap_remove_range_options(): cpte=%p ptd=%p pte=0x%llx va=0x%llx\n", - cpte, ptep_get_ptd(cpte), (uint64_t)*cpte, (uint64_t)va); + cpte, ptep_get_ptd(cpte), (uint64_t)*cpte, (uint64_t)va); } #endif WRITE_PTE_FAST(cpte, ARM_PTE_TYPE_FAULT); @@ -4502,9 +4612,12 @@ pmap_remove_range_options( if ((spte != ARM_PTE_TYPE_FAULT) && (pmap != kernel_pmap)) { assert(!ARM_PTE_IS_COMPRESSED(spte)); - if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(cpte)->pt_cnt[ARM_PT_DESC_INDEX(cpte)].refcnt)) <= 0) + if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(cpte)->pt_cnt[ARM_PT_DESC_INDEX(cpte)].refcnt)) <= 0) { panic("pmap_remove_range_options: over-release of ptdp %p for pte %p\n", ptep_get_ptd(cpte), cpte); - if(rmv_cnt) (*rmv_cnt)++; + } + if (rmv_cnt) { + (*rmv_cnt)++; + } } if (pte_is_wired(spte)) { @@ -4514,8 +4627,9 @@ pmap_remove_range_options( /* * if not managed, we're done */ - if (!managed) + if (!managed) { continue; + } /* * find and remove the mapping from the chain for this * physical address. @@ -4537,82 +4651,86 @@ pmap_remove_range_options( /* sanity checks... */ #if MACH_ASSERT if (pmap->stats.internal < num_internal) { - if ((! pmap_stats_assert || - ! pmap->pmap_stats_assert)) { + if ((!pmap_stats_assert || + !pmap->pmap_stats_assert)) { printf("%d[%s] pmap_remove_range_options(%p,0x%llx,%p,%p,0x%x): num_internal=%d num_removed=%d num_unwired=%d num_external=%d num_reusable=%d num_compressed=%lld num_alt_internal=%d num_alt_compressed=%lld num_pte_changed=%d stats.internal=%d stats.reusable=%d\n", - pmap->pmap_pid, - pmap->pmap_procname, - pmap, - (uint64_t) va, - bpte, - epte, - options, - num_internal, - num_removed, - num_unwired, - num_external, - num_reusable, - num_compressed, - num_alt_internal, - num_alt_compressed, - num_pte_changed, - pmap->stats.internal, - pmap->stats.reusable); + pmap->pmap_pid, + pmap->pmap_procname, + pmap, + (uint64_t) va, + bpte, + epte, + options, + num_internal, + num_removed, + num_unwired, + num_external, + num_reusable, + num_compressed, + num_alt_internal, + num_alt_compressed, + num_pte_changed, + pmap->stats.internal, + pmap->stats.reusable); } else { panic("%d[%s] pmap_remove_range_options(%p,0x%llx,%p,%p,0x%x): num_internal=%d num_removed=%d num_unwired=%d num_external=%d num_reusable=%d num_compressed=%lld num_alt_internal=%d num_alt_compressed=%lld num_pte_changed=%d stats.internal=%d stats.reusable=%d", - pmap->pmap_pid, - pmap->pmap_procname, - pmap, - (uint64_t) va, - bpte, - epte, - options, - num_internal, - num_removed, - num_unwired, - num_external, - num_reusable, - num_compressed, - num_alt_internal, - num_alt_compressed, - num_pte_changed, - pmap->stats.internal, - pmap->stats.reusable); + pmap->pmap_pid, + pmap->pmap_procname, + pmap, + (uint64_t) va, + bpte, + epte, + options, + num_internal, + num_removed, + num_unwired, + num_external, + num_reusable, + num_compressed, + num_alt_internal, + num_alt_compressed, + num_pte_changed, + pmap->stats.internal, + pmap->stats.reusable); } } #endif /* MACH_ASSERT */ PMAP_STATS_ASSERTF(pmap->stats.external >= num_external, - pmap, - "pmap=%p num_external=%d stats.external=%d", - pmap, num_external, pmap->stats.external); + pmap, + "pmap=%p num_external=%d stats.external=%d", + pmap, num_external, pmap->stats.external); PMAP_STATS_ASSERTF(pmap->stats.internal >= num_internal, - pmap, - "pmap=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", - pmap, - num_internal, pmap->stats.internal, - num_reusable, pmap->stats.reusable); + pmap, + "pmap=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", + pmap, + num_internal, pmap->stats.internal, + num_reusable, pmap->stats.reusable); PMAP_STATS_ASSERTF(pmap->stats.reusable >= num_reusable, - pmap, - "pmap=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", - pmap, - num_internal, pmap->stats.internal, - num_reusable, pmap->stats.reusable); + pmap, + "pmap=%p num_internal=%d stats.internal=%d num_reusable=%d stats.reusable=%d", + pmap, + num_internal, pmap->stats.internal, + num_reusable, pmap->stats.reusable); PMAP_STATS_ASSERTF(pmap->stats.compressed >= num_compressed, - pmap, - "pmap=%p num_compressed=%lld num_alt_compressed=%lld stats.compressed=%lld", - pmap, num_compressed, num_alt_compressed, - pmap->stats.compressed); + pmap, + "pmap=%p num_compressed=%lld num_alt_compressed=%lld stats.compressed=%lld", + pmap, num_compressed, num_alt_compressed, + pmap->stats.compressed); /* update pmap stats... */ OSAddAtomic(-num_unwired, (SInt32 *) &pmap->stats.wired_count); - if (num_external) + if (num_external) { OSAddAtomic(-num_external, &pmap->stats.external); - if (num_internal) + } + if (num_internal) { OSAddAtomic(-num_internal, &pmap->stats.internal); - if (num_reusable) + } + if (num_reusable) { OSAddAtomic(-num_reusable, &pmap->stats.reusable); - if (num_compressed) + } + if (num_compressed) { OSAddAtomic64(-num_compressed, &pmap->stats.compressed); + } /* ... and ledgers */ pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired)); pmap_ledger_debit(pmap, task_ledgers.internal, machine_ptob(num_internal)); @@ -4621,15 +4739,16 @@ pmap_remove_range_options( pmap_ledger_debit(pmap, task_ledgers.internal_compressed, machine_ptob(num_compressed)); /* make needed adjustments to phys_footprint */ pmap_ledger_debit(pmap, task_ledgers.phys_footprint, - machine_ptob((num_internal - - num_alt_internal) + - (num_compressed - - num_alt_compressed))); + machine_ptob((num_internal - + num_alt_internal) + + (num_compressed - + num_alt_compressed))); } /* flush the ptable entries we have written */ - if (num_pte_changed > 0) + if (num_pte_changed > 0) { FLUSH_PTE_RANGE_STRONG(bpte, epte); + } return num_pte_changed; } @@ -4662,10 +4781,11 @@ pmap_remove_options_internal( pt_entry_t *bpte, *epte; pt_entry_t *pte_p; tt_entry_t *tte_p; - uint32_t rmv_spte=0; + uint32_t rmv_spte = 0; - if (__improbable(end < start)) + if (__improbable(end < start)) { panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end); + } VALIDATE_PMAP(pmap); PMAP_LOCK(pmap); @@ -4682,9 +4802,9 @@ pmap_remove_options_internal( epte = bpte + ((end - start) >> ARM_TT_LEAF_SHIFT); remove_count += pmap_remove_range_options(pmap, start, bpte, epte, - &rmv_spte, options); + &rmv_spte, options); -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) if (rmv_spte && (ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt == 0) && (pmap != kernel_pmap) && (pmap->nested == FALSE)) { pmap_tte_deallocate(pmap, tte_p, PMAP_TT_L1_LEVEL); @@ -4692,7 +4812,7 @@ pmap_remove_options_internal( } #else if (rmv_spte && (ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt == 0) && - (pmap != kernel_pmap) && (pmap->nested == FALSE)) { + (pmap != kernel_pmap) && (pmap->nested == FALSE)) { pmap_tte_deallocate(pmap, tte_p, PMAP_TT_L2_LEVEL); flush_mmu_tlb_entry(tlbi_addr(start & ~ARM_TT_L2_OFFMASK) | tlbi_asid(pmap->asid)); } @@ -4714,21 +4834,22 @@ pmap_remove_options( int remove_count = 0; vm_map_address_t va; - if (pmap == PMAP_NULL) + if (pmap == PMAP_NULL) { return; + } PMAP_TRACE(2, PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(start), - VM_KERNEL_ADDRHIDE(end)); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(start), + VM_KERNEL_ADDRHIDE(end)); #if MACH_ASSERT - if ((start|end) & PAGE_MASK) { + if ((start | end) & PAGE_MASK) { panic("pmap_remove_options() pmap %p start 0x%llx end 0x%llx\n", - pmap, (uint64_t)start, (uint64_t)end); + pmap, (uint64_t)start, (uint64_t)end); } if ((end < start) || (start < pmap->min) || (end > pmap->max)) { panic("pmap_remove_options(): invalid address range, pmap=%p, start=0x%llx, end=0x%llx\n", - pmap, (uint64_t)start, (uint64_t)end); + pmap, (uint64_t)start, (uint64_t)end); } #endif @@ -4739,21 +4860,23 @@ pmap_remove_options( while (va < end) { vm_map_address_t l; -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) l = ((va + ARM_TT_L1_SIZE) & ~ARM_TT_L1_OFFMASK); #else l = ((va + ARM_TT_L2_SIZE) & ~ARM_TT_L2_OFFMASK); #endif - if (l > end) + if (l > end) { l = end; + } remove_count += pmap_remove_options_internal(pmap, va, l, options); va = l; } - if (remove_count > 0) + if (remove_count > 0) { PMAP_UPDATE_TLBS(pmap, start, end); + } PMAP_TRACE(2, PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END); } @@ -4773,10 +4896,10 @@ pmap_remove_some_phys( void pmap_set_pmap( pmap_t pmap, -#if !__ARM_USER_PROTECT__ +#if !__ARM_USER_PROTECT__ __unused #endif - thread_t thread) + thread_t thread) { pmap_switch(pmap); #if __ARM_USER_PROTECT__ @@ -4785,7 +4908,7 @@ pmap_set_pmap( thread->machine.uptw_ttb = ((unsigned int) pmap->ttep) | TTBR_SETUP; } else { thread->machine.uptw_ttc = 1; \ - thread->machine.uptw_ttb = ((unsigned int) pmap->ttep ) | TTBR_SETUP; + thread->machine.uptw_ttb = ((unsigned int) pmap->ttep) | TTBR_SETUP; } thread->machine.asid = pmap->asid; #endif @@ -4810,9 +4933,10 @@ pmap_switch_internal( uint32_t last_asid_high_bits, asid_high_bits; boolean_t do_asid_flush = FALSE; -#if (__ARM_VMSA__ == 7) - if (not_in_kdp) +#if (__ARM_VMSA__ == 7) + if (not_in_kdp) { pmap_simple_lock(&pmap->tt1_lock); + } #else pmap_t last_nested_pmap = cpu_data_ptr->cpu_nested_pmap; #endif @@ -4837,21 +4961,23 @@ pmap_switch_internal( pmap_switch_user_ttb_internal(pmap); -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) /* If we're switching to a different nested pmap (i.e. shared region), we'll need * to flush the userspace mappings for that region. Those mappings are global * and will not be protected by the ASID. It should also be cheaper to flush the * entire local TLB rather than to do a broadcast MMU flush by VA region. */ - if ((pmap != kernel_pmap) && (last_nested_pmap != NULL) && (pmap->nested_pmap != last_nested_pmap)) + if ((pmap != kernel_pmap) && (last_nested_pmap != NULL) && (pmap->nested_pmap != last_nested_pmap)) { flush_core_tlb(); - else + } else #endif - if (do_asid_flush) + if (do_asid_flush) { pmap_flush_core_tlb_asid(pmap); + } -#if (__ARM_VMSA__ == 7) - if (not_in_kdp) +#if (__ARM_VMSA__ == 7) + if (not_in_kdp) { pmap_simple_unlock(&pmap->tt1_lock); + } #endif } @@ -4898,8 +5024,8 @@ pmap_page_protect_options_internal( int pai; boolean_t remove; boolean_t set_NX; - boolean_t tlb_flush_needed = FALSE; - unsigned int pvh_cnt = 0; + boolean_t tlb_flush_needed = FALSE; + unsigned int pvh_cnt = 0; assert(ppnum != vm_page_fictitious_addr); @@ -4913,7 +5039,7 @@ pmap_page_protect_options_internal( */ switch (prot) { case VM_PROT_ALL: - return; /* nothing to do */ + return; /* nothing to do */ case VM_PROT_READ: case VM_PROT_READ | VM_PROT_EXECUTE: remove = FALSE; @@ -4938,7 +5064,7 @@ pmap_page_protect_options_internal( new_pte_p = PT_ENTRY_NULL; if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { pte_p = pvh_ptep(pv_h); - } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { + } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { pve_p = pvh_list(pv_h); pveh_p = pve_p; } @@ -4949,15 +5075,16 @@ pmap_page_protect_options_internal( pt_entry_t tmplate; boolean_t update = FALSE; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pte_p = pve_get_ptep(pve_p); + } #ifdef PVH_FLAG_IOMMU if ((vm_offset_t)pte_p & PVH_FLAG_IOMMU) { if (remove) { if (options & PMAP_OPTIONS_COMPRESSOR) { panic("pmap_page_protect: attempt to compress ppnum 0x%x owned by iommu 0x%llx, pve_p=%p", - ppnum, (uint64_t)pte_p & ~PVH_FLAG_IOMMU, pve_p); + ppnum, (uint64_t)pte_p & ~PVH_FLAG_IOMMU, pve_p); } if (pve_p != PV_ENTRY_NULL) { pv_entry_t *temp_pve_p = PVE_NEXT_PTR(pve_next(pve_p)); @@ -4980,11 +5107,10 @@ pmap_page_protect_options_internal( if (pte_p == PT_ENTRY_NULL) { panic("pmap_page_protect: pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, va=0x%llx ppnum: 0x%x\n", - pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)va, ppnum); + pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)va, ppnum); } else if ((pmap == NULL) || (atop(pte_to_pa(*pte_p)) != ppnum)) { #if MACH_ASSERT if (kern_feature_override(KF_PMAPV_OVRD) == FALSE) { - pv_entry_t *check_pve_p = pveh_p; while (check_pve_p != PV_ENTRY_NULL) { if ((check_pve_p != pve_p) && (pve_get_ptep(check_pve_p) == pte_p)) { @@ -5004,9 +5130,9 @@ pmap_page_protect_options_internal( #else if ((prot & VM_PROT_EXECUTE)) #endif - set_NX = FALSE; - else + { set_NX = FALSE;} else { set_NX = TRUE; + } /* Remove the mapping if new protection is NONE */ if (remove) { @@ -5044,8 +5170,9 @@ pmap_page_protect_options_internal( if ((*pte_p != ARM_PTE_TYPE_FAULT) && tmplate == ARM_PTE_TYPE_FAULT && (pmap != kernel_pmap)) { - if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt)) <= 0) + if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt)) <= 0) { panic("pmap_page_protect_options(): over-release of ptdp %p for pte %p\n", ptep_get_ptd(pte_p), pte_p); + } } if (*pte_p != tmplate) { @@ -5100,7 +5227,6 @@ pmap_page_protect_options_internal( * we free this pv_entry. */ CLR_ALTACCT_PAGE(pai, pve_p); - } else if (IS_REUSABLE_PAGE(pai)) { assert(IS_INTERNAL_PAGE(pai)); if (options & PMAP_OPTIONS_COMPRESSOR) { @@ -5108,7 +5234,6 @@ pmap_page_protect_options_internal( /* was not in footprint, but is now */ pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); } - } else if (IS_INTERNAL_PAGE(pai)) { pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); @@ -5137,20 +5262,20 @@ pmap_page_protect_options_internal( if (pve_p != PV_ENTRY_NULL) { assert(pve_next(pve_p) == PVE_NEXT_PTR(pve_next(pve_p))); } - } else { pt_entry_t spte; spte = *pte_p; - if (pmap == kernel_pmap) + if (pmap == kernel_pmap) { tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA)); - else + } else { tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RORO)); + } - pte_set_ffr(tmplate, 0); + pte_set_was_writeable(tmplate, false); -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) if (set_NX) { tmplate |= ARM_PTE_NX; } else { @@ -5166,9 +5291,9 @@ pmap_page_protect_options_internal( #endif } #else - if (set_NX) + if (set_NX) { tmplate |= ARM_PTE_NX | ARM_PTE_PNX; - else { + } else { /* * While the naive implementation of this would serve to add execute * permission, this is not how the VM uses this interface, or how @@ -5204,7 +5329,7 @@ pmap_page_protect_options_internal( } #ifdef PVH_FLAG_IOMMU - protect_skip_pve: +protect_skip_pve: #endif pte_p = PT_ENTRY_NULL; pvet_p = pve_p; @@ -5218,11 +5343,13 @@ pmap_page_protect_options_internal( } #ifdef PVH_FLAG_EXEC - if (remove && (pvh_get_flags(pv_h) & PVH_FLAG_EXEC)) + if (remove && (pvh_get_flags(pv_h) & PVH_FLAG_EXEC)) { pmap_set_ptov_ap(pai, AP_RWNA, tlb_flush_needed); + } #endif - if (tlb_flush_needed) + if (tlb_flush_needed) { sync_tlb_flush(); + } /* if we removed a bunch of entries, take care of them now */ if (remove) { @@ -5256,14 +5383,15 @@ pmap_page_protect_options( assert(ppnum != vm_page_fictitious_addr); /* Only work with managed pages. */ - if (!pa_valid(phys)) + if (!pa_valid(phys)) { return; + } /* * Determine the new protection. */ if (prot == VM_PROT_ALL) { - return; /* nothing to do */ + return; /* nothing to do */ } PMAP_TRACE(2, PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START, ppnum, prot); @@ -5277,7 +5405,8 @@ pmap_page_protect_options( * Indicates if the pmap layer enforces some additional restrictions on the * given set of protections. */ -bool pmap_has_prot_policy(__unused vm_prot_t prot) +bool +pmap_has_prot_policy(__unused vm_prot_t prot) { return FALSE; } @@ -5299,7 +5428,8 @@ pmap_protect( } MARK_AS_PMAP_TEXT static void -pmap_protect_options_internal(pmap_t pmap, +pmap_protect_options_internal( + pmap_t pmap, vm_map_address_t start, vm_map_address_t end, vm_prot_t prot, @@ -5313,14 +5443,15 @@ pmap_protect_options_internal(pmap_t pmap, #if (__ARM_VMSA__ > 7) boolean_t set_XO = FALSE; #endif - boolean_t should_have_removed = FALSE; + boolean_t should_have_removed = FALSE; -#ifndef __ARM_IC_NOALIAS_ICACHE__ - boolean_t InvalidatePoU_Icache_Done = FALSE; +#ifndef __ARM_IC_NOALIAS_ICACHE__ + boolean_t InvalidatePoU_Icache_Done = FALSE; #endif - if (__improbable(end < start)) + if (__improbable(end < start)) { panic("%s called with bogus range: %p, %p", __func__, (void*)start, (void*)end); + } #if DEVELOPMENT || DEBUG if (options & PMAP_OPTIONS_PROTECT_IMMEDIATE) { @@ -5342,7 +5473,7 @@ pmap_protect_options_internal(pmap_t pmap, break; case VM_PROT_READ | VM_PROT_WRITE: case VM_PROT_ALL: - return; /* nothing to do */ + return; /* nothing to do */ default: should_have_removed = TRUE; } @@ -5350,9 +5481,9 @@ pmap_protect_options_internal(pmap_t pmap, if (should_have_removed) { panic("%s: should have been a remove operation, " - "pmap=%p, start=%p, end=%p, prot=%#x, options=%#x, args=%p", - __FUNCTION__, - pmap, (void *)start, (void *)end, prot, options, args); + "pmap=%p, start=%p, end=%p, prot=%#x, options=%#x, args=%p", + __FUNCTION__, + pmap, (void *)start, (void *)end, prot, options, args); } #if DEVELOPMENT || DEBUG @@ -5377,8 +5508,8 @@ pmap_protect_options_internal(pmap_t pmap, pte_p = bpte_p; for (pte_p = bpte_p; - pte_p < epte_p; - pte_p += PAGE_SIZE/ARM_PGBYTES) { + pte_p < epte_p; + pte_p += PAGE_SIZE / ARM_PGBYTES) { pt_entry_t spte; #if DEVELOPMENT || DEBUG boolean_t force_write = FALSE; @@ -5391,9 +5522,9 @@ pmap_protect_options_internal(pmap_t pmap, continue; } - pmap_paddr_t pa; - int pai=0; - boolean_t managed=FALSE; + pmap_paddr_t pa; + int pai = 0; + boolean_t managed = FALSE; while (!managed) { /* @@ -5403,14 +5534,15 @@ pmap_protect_options_internal(pmap_t pmap, */ // assert(!ARM_PTE_IS_COMPRESSED(spte)); pa = pte_to_pa(spte); - if (!pa_valid(pa)) + if (!pa_valid(pa)) { break; + } pai = (int)pa_index(pa); LOCK_PVH(pai); spte = *pte_p; pa = pte_to_pa(spte); if (pai == (int)pa_index(pa)) { - managed =TRUE; + managed = TRUE; break; // Leave the PVH locked as we will unlock it after we free the PTE } UNLOCK_PVH(pai); @@ -5455,16 +5587,16 @@ pmap_protect_options_internal(pmap_t pmap, * not allowed to increase * access permissions. */ -#if (__ARM_VMSA__ == 7) - if (set_NX) +#if (__ARM_VMSA__ == 7) + if (set_NX) { tmplate |= ARM_PTE_NX; - else { + } else { /* do NOT clear "NX"! */ } #else - if (set_NX) + if (set_NX) { tmplate |= ARM_PTE_NX | ARM_PTE_PNX; - else { + } else { if (pmap == kernel_pmap) { /* * TODO: Run CS/Monitor checks here; @@ -5524,12 +5656,12 @@ pmap_protect_options_internal(pmap_t pmap, #endif /* We do not expect to write fast fault the entry. */ - pte_set_ffr(tmplate, 0); + pte_set_was_writeable(tmplate, false); /* TODO: Doesn't this need to worry about PNX? */ if (((spte & ARM_PTE_NX) == ARM_PTE_NX) && (prot & VM_PROT_EXECUTE)) { CleanPoU_DcacheRegion((vm_offset_t) phystokv(pa), PAGE_SIZE); -#ifdef __ARM_IC_NOALIAS_ICACHE__ +#ifdef __ARM_IC_NOALIAS_ICACHE__ InvalidatePoU_IcacheRegion((vm_offset_t) phystokv(pa), PAGE_SIZE); #else if (!InvalidatePoU_Icache_Done) { @@ -5565,9 +5697,9 @@ pmap_protect_options( { vm_map_address_t l, beg; - if ((b|e) & PAGE_MASK) { + if ((b | e) & PAGE_MASK) { panic("pmap_protect_options() pmap %p start 0x%llx end 0x%llx\n", - pmap, (uint64_t)b, (uint64_t)e); + pmap, (uint64_t)b, (uint64_t)e); } #if DEVELOPMENT || DEBUG @@ -5587,7 +5719,7 @@ pmap_protect_options( break; case VM_PROT_READ | VM_PROT_WRITE: case VM_PROT_ALL: - return; /* nothing to do */ + return; /* nothing to do */ default: pmap_remove_options(pmap, b, e, options); return; @@ -5595,16 +5727,17 @@ pmap_protect_options( } PMAP_TRACE(2, PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(b), - VM_KERNEL_ADDRHIDE(e)); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(b), + VM_KERNEL_ADDRHIDE(e)); beg = b; while (beg < e) { l = ((beg + ARM_TT_TWIG_SIZE) & ~ARM_TT_TWIG_OFFMASK); - if (l > e) + if (l > e) { l = e; + } pmap_protect_options_internal(pmap, beg, l, prot, options, args); @@ -5638,9 +5771,9 @@ pmap_map_block( * removing the mappings is correct. */ panic("%s: failed pmap_enter, " - "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x", - __FUNCTION__, - pmap, va, pa, size, prot, flags); + "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x", + __FUNCTION__, + pmap, va, pa, size, prot, flags); pmap_remove(pmap, original_va, va - original_va); return kr; @@ -5680,11 +5813,11 @@ pmap_enter( } -static inline void pmap_enter_pte(pmap_t pmap, pt_entry_t *pte_p, pt_entry_t pte, vm_map_address_t v) +static inline void +pmap_enter_pte(pmap_t pmap, pt_entry_t *pte_p, pt_entry_t pte, vm_map_address_t v) { - if (pmap != kernel_pmap && ((pte & ARM_PTE_WIRED) != (*pte_p & ARM_PTE_WIRED))) - { - SInt16 *ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].wiredcnt); + if (pmap != kernel_pmap && ((pte & ARM_PTE_WIRED) != (*pte_p & ARM_PTE_WIRED))) { + SInt16 *ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].wiredcnt); if (pte & ARM_PTE_WIRED) { OSAddAtomic16(1, ptd_wiredcnt_ptr); pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE); @@ -5713,49 +5846,49 @@ wimg_to_pte(unsigned int wimg) pt_entry_t pte; switch (wimg & (VM_WIMG_MASK)) { - case VM_WIMG_IO: - case VM_WIMG_RT: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE); - pte |= ARM_PTE_NX | ARM_PTE_PNX; - break; - case VM_WIMG_POSTED: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED); - pte |= ARM_PTE_NX | ARM_PTE_PNX; - break; - case VM_WIMG_WCOMB: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITECOMB); - pte |= ARM_PTE_NX | ARM_PTE_PNX; - break; - case VM_WIMG_WTHRU: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITETHRU); -#if (__ARM_VMSA__ > 7) - pte |= ARM_PTE_SH(SH_OUTER_MEMORY); + case VM_WIMG_IO: + case VM_WIMG_RT: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE); + pte |= ARM_PTE_NX | ARM_PTE_PNX; + break; + case VM_WIMG_POSTED: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED); + pte |= ARM_PTE_NX | ARM_PTE_PNX; + break; + case VM_WIMG_WCOMB: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITECOMB); + pte |= ARM_PTE_NX | ARM_PTE_PNX; + break; + case VM_WIMG_WTHRU: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITETHRU); +#if (__ARM_VMSA__ > 7) + pte |= ARM_PTE_SH(SH_OUTER_MEMORY); #else - pte |= ARM_PTE_SH; + pte |= ARM_PTE_SH; #endif - break; - case VM_WIMG_COPYBACK: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK); -#if (__ARM_VMSA__ > 7) - pte |= ARM_PTE_SH(SH_OUTER_MEMORY); + break; + case VM_WIMG_COPYBACK: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK); +#if (__ARM_VMSA__ > 7) + pte |= ARM_PTE_SH(SH_OUTER_MEMORY); #else - pte |= ARM_PTE_SH; + pte |= ARM_PTE_SH; #endif - break; - case VM_WIMG_INNERWBACK: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_INNERWRITEBACK); -#if (__ARM_VMSA__ > 7) - pte |= ARM_PTE_SH(SH_INNER_MEMORY); + break; + case VM_WIMG_INNERWBACK: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_INNERWRITEBACK); +#if (__ARM_VMSA__ > 7) + pte |= ARM_PTE_SH(SH_INNER_MEMORY); #else - pte |= ARM_PTE_SH; + pte |= ARM_PTE_SH; #endif - break; - default: - pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); -#if (__ARM_VMSA__ > 7) - pte |= ARM_PTE_SH(SH_OUTER_MEMORY); + break; + default: + pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); +#if (__ARM_VMSA__ > 7) + pte |= ARM_PTE_SH(SH_OUTER_MEMORY); #else - pte |= ARM_PTE_SH; + pte |= ARM_PTE_SH; #endif } @@ -5792,8 +5925,9 @@ pmap_enter_pv( * is recycled. An IOMMU mapping of a freed/recycled page is * considered a security violation & potential DMA corruption path.*/ first_cpu_mapping = ((pmap != NULL) && !(pvh_flags & PVH_FLAG_CPU)); - if (first_cpu_mapping) + if (first_cpu_mapping) { pvh_flags |= PVH_FLAG_CPU; + } #else first_cpu_mapping = pvh_test_type(pv_h, PVH_TYPE_NULL); #endif @@ -5811,11 +5945,11 @@ pmap_enter_pv( CLR_REUSABLE_PAGE(pai); } } - if (pvh_test_type(pv_h, PVH_TYPE_NULL)) { + if (pvh_test_type(pv_h, PVH_TYPE_NULL)) { pvh_update_head(pv_h, pte_p, PVH_TYPE_PTEP); if (pmap != NULL && pmap != kernel_pmap && ((options & PMAP_OPTIONS_ALT_ACCT) || - PMAP_FOOTPRINT_SUSPENDED(pmap)) && + PMAP_FOOTPRINT_SUSPENDED(pmap)) && IS_INTERNAL_PAGE(pai)) { /* * Make a note to ourselves that this mapping is using alternative @@ -5832,15 +5966,16 @@ pmap_enter_pv( } } else { if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { - pt_entry_t *pte1_p; + pt_entry_t *pte1_p; /* * convert pvh list from PVH_TYPE_PTEP to PVH_TYPE_PVEP */ pte1_p = pvh_ptep(pv_h); pvh_set_flags(pv_h, pvh_flags); - if((*pve_p == PV_ENTRY_NULL) && (!pv_alloc(pmap, pai, pve_p))) + if ((*pve_p == PV_ENTRY_NULL) && (!pv_alloc(pmap, pai, pve_p))) { return FALSE; + } pve_set_ptep(*pve_p, pte1_p); (*pve_p)->pve_next = PV_ENTRY_NULL; @@ -5857,15 +5992,16 @@ pmap_enter_pv( *pve_p = PV_ENTRY_NULL; } else if (!pvh_test_type(pv_h, PVH_TYPE_PVEP)) { panic("%s: unexpected PV head %p, pte_p=%p pmap=%p pv_h=%p", - __func__, *pv_h, pte_p, pmap, pv_h); + __func__, *pv_h, pte_p, pmap, pv_h); } /* * Set up pv_entry for this new mapping and then * add it to the list for this physical page. */ pvh_set_flags(pv_h, pvh_flags); - if((*pve_p == PV_ENTRY_NULL) && (!pv_alloc(pmap, pai, pve_p))) + if ((*pve_p == PV_ENTRY_NULL) && (!pv_alloc(pmap, pai, pve_p))) { return FALSE; + } pve_set_ptep(*pve_p, pte_p); (*pve_p)->pve_next = PV_ENTRY_NULL; @@ -5874,7 +6010,7 @@ pmap_enter_pv( if (pmap != NULL && pmap != kernel_pmap && ((options & PMAP_OPTIONS_ALT_ACCT) || - PMAP_FOOTPRINT_SUSPENDED(pmap)) && + PMAP_FOOTPRINT_SUSPENDED(pmap)) && IS_INTERNAL_PAGE(pai)) { /* * Make a note to ourselves that this @@ -5898,7 +6034,7 @@ pmap_enter_pv( pvh_set_flags(pv_h, pvh_flags); return TRUE; -} +} MARK_AS_PMAP_TEXT static kern_return_t pmap_enter_options_internal( @@ -5928,7 +6064,7 @@ pmap_enter_options_internal( if ((v) & PAGE_MASK) { panic("pmap_enter_options() pmap %p v 0x%llx\n", - pmap, (uint64_t)v); + pmap, (uint64_t)v); } if ((prot & VM_PROT_EXECUTE) && (prot & VM_PROT_WRITE) && (pmap == kernel_pmap)) { @@ -5940,9 +6076,9 @@ pmap_enter_options_internal( #else if ((prot & VM_PROT_EXECUTE)) #endif - set_NX = FALSE; - else + { set_NX = FALSE;} else { set_NX = TRUE; + } #if (__ARM_VMSA__ > 7) if (prot == VM_PROT_EXECUTE) { @@ -5971,8 +6107,9 @@ pmap_enter_options_internal( kr = pmap_expand(pmap, v, options, PMAP_TT_MAX_LEVEL); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } PMAP_LOCK(pmap); } @@ -5996,7 +6133,7 @@ Pmap_enter_retry: /* one less "compressed" */ OSAddAtomic64(-1, &pmap->stats.compressed); pmap_ledger_debit(pmap, task_ledgers.internal_compressed, - PAGE_SIZE); + PAGE_SIZE); was_compressed = TRUE; if (spte & ARM_PTE_COMPRESSED_ALT) { @@ -6030,16 +6167,18 @@ Pmap_enter_retry: * wired memory statistics for user pmaps, but kernel PTEs are assumed * to be wired in nearly all cases. For VM layer functionality, the wired * count in vm_page_t is sufficient. */ - if (wired && pmap != kernel_pmap) + if (wired && pmap != kernel_pmap) { pte |= ARM_PTE_WIRED; + } -#if (__ARM_VMSA__ == 7) - if (set_NX) +#if (__ARM_VMSA__ == 7) + if (set_NX) { pte |= ARM_PTE_NX; + } #else - if (set_NX) + if (set_NX) { pte |= ARM_PTE_NX | ARM_PTE_PNX; - else { + } else { if (pmap == kernel_pmap) { pte |= ARM_PTE_NX; } else { @@ -6059,78 +6198,79 @@ Pmap_enter_retry: pte |= ARM_PTE_AP(AP_RONA); pa_set_bits(pa, PP_ATTR_REFERENCED); } -#if (__ARM_VMSA__ == 7) - if ((_COMM_PAGE_BASE_ADDRESS <= v) && (v < _COMM_PAGE_BASE_ADDRESS + _COMM_PAGE_AREA_LENGTH)) +#if (__ARM_VMSA__ == 7) + if ((_COMM_PAGE_BASE_ADDRESS <= v) && (v < _COMM_PAGE_BASE_ADDRESS + _COMM_PAGE_AREA_LENGTH)) { pte = (pte & ~(ARM_PTE_APMASK)) | ARM_PTE_AP(AP_RORO); + } #endif } else { if (!(pmap->nested)) { pte |= ARM_PTE_NG; } else if ((pmap->nested_region_asid_bitmap) - && (v >= pmap->nested_region_subord_addr) - && (v < (pmap->nested_region_subord_addr+pmap->nested_region_size))) { - + && (v >= pmap->nested_region_subord_addr) + && (v < (pmap->nested_region_subord_addr + pmap->nested_region_size))) { unsigned int index = (unsigned int)((v - pmap->nested_region_subord_addr) >> ARM_TT_TWIG_SHIFT); if ((pmap->nested_region_asid_bitmap) - && testbit(index, (int *)pmap->nested_region_asid_bitmap)) + && testbit(index, (int *)pmap->nested_region_asid_bitmap)) { pte |= ARM_PTE_NG; + } } #if MACH_ASSERT if (pmap->nested_pmap != NULL) { vm_map_address_t nest_vaddr; - pt_entry_t *nest_pte_p; + pt_entry_t *nest_pte_p; nest_vaddr = v - pmap->nested_region_grand_addr + pmap->nested_region_subord_addr; if ((nest_vaddr >= pmap->nested_region_subord_addr) - && (nest_vaddr < (pmap->nested_region_subord_addr+pmap->nested_region_size)) - && ((nest_pte_p = pmap_pte(pmap->nested_pmap, nest_vaddr)) != PT_ENTRY_NULL) - && (*nest_pte_p != ARM_PTE_TYPE_FAULT) - && (!ARM_PTE_IS_COMPRESSED(*nest_pte_p)) - && (((*nest_pte_p) & ARM_PTE_NG) != ARM_PTE_NG)) { + && (nest_vaddr < (pmap->nested_region_subord_addr + pmap->nested_region_size)) + && ((nest_pte_p = pmap_pte(pmap->nested_pmap, nest_vaddr)) != PT_ENTRY_NULL) + && (*nest_pte_p != ARM_PTE_TYPE_FAULT) + && (!ARM_PTE_IS_COMPRESSED(*nest_pte_p)) + && (((*nest_pte_p) & ARM_PTE_NG) != ARM_PTE_NG)) { unsigned int index = (unsigned int)((v - pmap->nested_region_subord_addr) >> ARM_TT_TWIG_SHIFT); if ((pmap->nested_pmap->nested_region_asid_bitmap) - && !testbit(index, (int *)pmap->nested_pmap->nested_region_asid_bitmap)) { - + && !testbit(index, (int *)pmap->nested_pmap->nested_region_asid_bitmap)) { panic("pmap_enter(): Global attribute conflict nest_pte_p=%p pmap=%p v=0x%llx spte=0x%llx \n", - nest_pte_p, pmap, (uint64_t)v, (uint64_t)*nest_pte_p); + nest_pte_p, pmap, (uint64_t)v, (uint64_t)*nest_pte_p); } } - } #endif if (prot & VM_PROT_WRITE) { - if (pa_valid(pa) && (!pa_test_bits(pa, PP_ATTR_MODIFIED))) { if (fault_type & VM_PROT_WRITE) { - if (set_XO) + if (set_XO) { pte |= ARM_PTE_AP(AP_RWNA); - else + } else { pte |= ARM_PTE_AP(AP_RWRW); + } pa_set_bits(pa, PP_ATTR_REFERENCED | PP_ATTR_MODIFIED); } else { - if (set_XO) + if (set_XO) { pte |= ARM_PTE_AP(AP_RONA); - else + } else { pte |= ARM_PTE_AP(AP_RORO); + } pa_set_bits(pa, PP_ATTR_REFERENCED); - pte_set_ffr(pte, 1); + pte_set_was_writeable(pte, true); } } else { - if (set_XO) + if (set_XO) { pte |= ARM_PTE_AP(AP_RWNA); - else + } else { pte |= ARM_PTE_AP(AP_RWRW); + } pa_set_bits(pa, PP_ATTR_REFERENCED); } } else { - - if (set_XO) + if (set_XO) { pte |= ARM_PTE_AP(AP_RONA); - else + } else { pte |= ARM_PTE_AP(AP_RORO); + } pa_set_bits(pa, PP_ATTR_REFERENCED); } } @@ -6165,12 +6305,13 @@ Pmap_enter_retry: pai = (int)pa_index(pa); LOCK_PVH(pai); - + Pmap_enter_loop: - if ((flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT))) + if ((flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT))) { wimg_bits = (flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT)); - else + } else { wimg_bits = pmap_cache_attributes(pn); + } /* We may be retrying this operation after dropping the PVH lock. * Cache attributes for the physical page may have changed while the lock @@ -6200,8 +6341,9 @@ Pmap_enter_loop: UNLOCK_PVH(pai); goto Pmap_enter_retry; } - if (!pmap_enter_pv(pmap, pte_p, pai, options, &pve_p, &is_altacct)) + if (!pmap_enter_pv(pmap, pte_p, pai, options, &pve_p, &is_altacct)) { goto Pmap_enter_loop; + } pmap_enter_pte(pmap, pte_p, pte, v); @@ -6254,25 +6396,26 @@ Pmap_enter_loop: * touch phys_footprint here. */ pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); - } else { + } else { pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); } } } OSAddAtomic(1, (SInt32 *) &pmap->stats.resident_count); - if (pmap->stats.resident_count > pmap->stats.resident_max) + if (pmap->stats.resident_count > pmap->stats.resident_max) { pmap->stats.resident_max = pmap->stats.resident_count; + } } else { - if (prot & VM_PROT_EXECUTE) { kr = KERN_FAILURE; goto Pmap_enter_cleanup; } wimg_bits = pmap_cache_attributes(pn); - if ((flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT))) + if ((flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT))) { wimg_bits = (wimg_bits & (~VM_WIMG_MASK)) | (flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT)); + } pte |= wimg_to_pte(wimg_bits); @@ -6285,8 +6428,9 @@ Pmap_enter_cleanup: if (refcnt != NULL) { assert(refcnt_updated); - if (OSAddAtomic16(-1, (volatile int16_t*)refcnt) <= 0) + if (OSAddAtomic16(-1, (volatile int16_t*)refcnt) <= 0) { panic("pmap_enter(): over-release of ptdp %p for pte %p\n", ptep_get_ptd(pte_p), pte_p); + } } Pmap_enter_return: @@ -6295,16 +6439,18 @@ Pmap_enter_return: if (pgtrace_enabled) { // Clone and invalidate original mapping if eligible for (int i = 0; i < PAGE_RATIO; i++) { - pmap_pgtrace_enter_clone(pmap, v + ARM_PGBYTES*i, 0, 0); + pmap_pgtrace_enter_clone(pmap, v + ARM_PGBYTES * i, 0, 0); } } #endif - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pv_free(pve_p); + } - if (wiredcnt_updated && (OSAddAtomic16(-1, (volatile int16_t*)wiredcnt) <= 0)) + if (wiredcnt_updated && (OSAddAtomic16(-1, (volatile int16_t*)wiredcnt) <= 0)) { panic("pmap_enter(): over-unwire of ptdp %p for pte %p\n", ptep_get_ptd(pte_p), pte_p); + } PMAP_UNLOCK(pmap); @@ -6321,12 +6467,12 @@ pmap_enter_options( unsigned int flags, boolean_t wired, unsigned int options, - __unused void *arg) + __unused void *arg) { kern_return_t kr = KERN_FAILURE; PMAP_TRACE(2, PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v), pn, prot); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v), pn, prot); kr = pmap_enter_options_internal(pmap, v, pn, prot, fault_type, flags, wired, options); pv_water_mark_check(); @@ -6365,22 +6511,24 @@ pmap_change_wiring_internal( pte_p = pmap_pte(pmap, v); assert(pte_p != PT_ENTRY_NULL); pa = pte_to_pa(*pte_p); - if (pa_valid(pa)) + if (pa_valid(pa)) { LOCK_PVH((int)pa_index(pa)); + } if (wired && !pte_is_wired(*pte_p)) { pte_set_wired(pte_p, wired); OSAddAtomic(+1, (SInt32 *) &pmap->stats.wired_count); pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE); } else if (!wired && pte_is_wired(*pte_p)) { - PMAP_STATS_ASSERTF(pmap->stats.wired_count >= 1, pmap, "stats.wired_count %d", pmap->stats.wired_count); + PMAP_STATS_ASSERTF(pmap->stats.wired_count >= 1, pmap, "stats.wired_count %d", pmap->stats.wired_count); pte_set_wired(pte_p, wired); OSAddAtomic(-1, (SInt32 *) &pmap->stats.wired_count); pmap_ledger_debit(pmap, task_ledgers.wired_mem, PAGE_SIZE); } - if (pa_valid(pa)) + if (pa_valid(pa)) { UNLOCK_PVH((int)pa_index(pa)); + } PMAP_UNLOCK(pmap); } @@ -6399,7 +6547,7 @@ pmap_find_phys_internal( pmap_t pmap, addr64_t va) { - ppnum_t ppn=0; + ppnum_t ppn = 0; VALIDATE_PMAP(pmap); @@ -6421,14 +6569,17 @@ pmap_find_phys( pmap_t pmap, addr64_t va) { - pmap_paddr_t pa=0; + pmap_paddr_t pa = 0; - if (pmap == kernel_pmap) + if (pmap == kernel_pmap) { pa = mmu_kvtop(va); - else if ((current_thread()->map) && (pmap == vm_map_pmap(current_thread()->map))) + } else if ((current_thread()->map) && (pmap == vm_map_pmap(current_thread()->map))) { pa = mmu_uvtop(va); + } - if (pa) return (ppnum_t)(pa >> PAGE_SHIFT); + if (pa) { + return (ppnum_t)(pa >> PAGE_SHIFT); + } if (not_in_kdp) { return pmap_find_phys_internal(pmap, va); @@ -6444,12 +6595,15 @@ kvtophys( pmap_paddr_t pa; pa = mmu_kvtop(va); - if (pa) return pa; + if (pa) { + return pa; + } pa = ((pmap_paddr_t)pmap_vtophys(kernel_pmap, va)) << PAGE_SHIFT; - if (pa) + if (pa) { pa |= (va & PAGE_MASK); + } - return ((pmap_paddr_t)pa); + return (pmap_paddr_t)pa; } ppnum_t @@ -6461,14 +6615,15 @@ pmap_vtophys( return 0; } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) tt_entry_t *tte_p, tte; pt_entry_t *pte_p; ppnum_t ppn; tte_p = pmap_tte(pmap, va); - if (tte_p == (tt_entry_t *) NULL) + if (tte_p == (tt_entry_t *) NULL) { return (ppnum_t) 0; + } tte = *tte_p; if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { @@ -6478,20 +6633,22 @@ pmap_vtophys( if (ppn != 0 && ARM_PTE_IS_COMPRESSED(*pte_p)) { panic("pmap_vtophys(%p,0x%llx): compressed pte_p=%p 0x%llx with ppn=0x%x\n", - pmap, va, pte_p, (uint64_t) (*pte_p), ppn); + pmap, va, pte_p, (uint64_t) (*pte_p), ppn); } #endif /* DEVELOPMENT || DEBUG */ - } else if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) - if ((tte & ARM_TTE_BLOCK_SUPER) == ARM_TTE_BLOCK_SUPER) + } else if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { + if ((tte & ARM_TTE_BLOCK_SUPER) == ARM_TTE_BLOCK_SUPER) { ppn = (ppnum_t) atop(suptte_to_pa(tte) | (va & ARM_TT_L1_SUPER_OFFMASK)); - else + } else { ppn = (ppnum_t) atop(sectte_to_pa(tte) | (va & ARM_TT_L1_BLOCK_OFFMASK)); - else + } + } else { ppn = 0; + } #else - tt_entry_t *ttp; - tt_entry_t tte; - ppnum_t ppn=0; + tt_entry_t *ttp; + tt_entry_t tte; + ppnum_t ppn = 0; /* Level 0 currently unused */ @@ -6503,20 +6660,22 @@ pmap_vtophys( /* Get first-level (1GB) entry */ ttp = pmap_tt1e(pmap, va); tte = *ttp; - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) - return (ppn); + if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { + return ppn; + } tte = ((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt2_index(pmap, va)]; #endif - if ((tte & ARM_TTE_VALID) != (ARM_TTE_VALID)) - return (ppn); + if ((tte & ARM_TTE_VALID) != (ARM_TTE_VALID)) { + return ppn; + } if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) { - ppn = (ppnum_t) atop((tte & ARM_TTE_BLOCK_L2_MASK)| (va & ARM_TT_L2_OFFMASK)); - return(ppn); + ppn = (ppnum_t) atop((tte & ARM_TTE_BLOCK_L2_MASK) | (va & ARM_TT_L2_OFFMASK)); + return ppn; } tte = ((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt3_index(pmap, va)]; - ppn = (ppnum_t) atop((tte & ARM_PTE_MASK)| (va & ARM_TT_L3_OFFMASK)); + ppn = (ppnum_t) atop((tte & ARM_PTE_MASK) | (va & ARM_TT_L3_OFFMASK)); #endif return ppn; @@ -6527,8 +6686,8 @@ pmap_extract_internal( pmap_t pmap, vm_map_address_t va) { - pmap_paddr_t pa=0; - ppnum_t ppn=0; + pmap_paddr_t pa = 0; + ppnum_t ppn = 0; if (pmap == NULL) { return 0; @@ -6540,8 +6699,9 @@ pmap_extract_internal( ppn = pmap_vtophys(pmap, va); - if (ppn != 0) - pa = ptoa(ppn)| ((va) & PAGE_MASK); + if (ppn != 0) { + pa = ptoa(ppn) | ((va) & PAGE_MASK); + } PMAP_UNLOCK(pmap); @@ -6560,14 +6720,17 @@ pmap_extract( pmap_t pmap, vm_map_address_t va) { - pmap_paddr_t pa=0; + pmap_paddr_t pa = 0; - if (pmap == kernel_pmap) + if (pmap == kernel_pmap) { pa = mmu_kvtop(va); - else if (pmap == vm_map_pmap(current_thread()->map)) + } else if (pmap == vm_map_pmap(current_thread()->map)) { pa = mmu_uvtop(va); + } - if (pa) return pa; + if (pa) { + return pa; + } return pmap_extract_internal(pmap, va); } @@ -6595,7 +6758,7 @@ pmap_init_pte_page( * on 4KB hardware, we may already have allocated a page table descriptor for a * bootstrap request, so we check for an existing PTD here. */ - ptdp = ptd_alloc(pmap); + ptdp = ptd_alloc(pmap, true); pvh_update_head_unlocked(pvh, ptdp, PVH_TYPE_PTDP); } else { panic("pmap_init_pte_page(): pte_p %p", pte_p); @@ -6631,53 +6794,57 @@ pmap_expand( unsigned int options, unsigned int level) { -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) vm_offset_t pa; - tt_entry_t *tte_p; - tt_entry_t *tt_p; - unsigned int i; + tt_entry_t *tte_p; + tt_entry_t *tt_p; + unsigned int i; while (tte_index(pmap, v) >= pmap->tte_index_max) { - tte_p = pmap_tt1_allocate(pmap, 2*ARM_PGBYTES, ((options & PMAP_OPTIONS_NOWAIT)? PMAP_TT_ALLOCATE_NOWAIT : 0)); - if (tte_p == (tt_entry_t *)0) + tte_p = pmap_tt1_allocate(pmap, 2 * ARM_PGBYTES, ((options & PMAP_OPTIONS_NOWAIT)? PMAP_TT_ALLOCATE_NOWAIT : 0)); + if (tte_p == (tt_entry_t *)0) { return KERN_RESOURCE_SHORTAGE; + } PMAP_LOCK(pmap); - if (pmap->tte_index_max > NTTES) { - pmap_tt1_deallocate(pmap, tte_p, 2*ARM_PGBYTES, PMAP_TT_DEALLOCATE_NOBLOCK); + if (pmap->tte_index_max > NTTES) { + pmap_tt1_deallocate(pmap, tte_p, 2 * ARM_PGBYTES, PMAP_TT_DEALLOCATE_NOBLOCK); PMAP_UNLOCK(pmap); break; } pmap_simple_lock(&pmap->tt1_lock); - for (i = 0; i < pmap->tte_index_max; i++) + for (i = 0; i < pmap->tte_index_max; i++) { tte_p[i] = pmap->tte[i]; - for (i = NTTES; i < 2*NTTES; i++) + } + for (i = NTTES; i < 2 * NTTES; i++) { tte_p[i] = ARM_TTE_TYPE_FAULT; + } pmap->prev_tte = pmap->tte; pmap->tte = tte_p; pmap->ttep = ml_static_vtop((vm_offset_t)pmap->tte); - FLUSH_PTE_RANGE(pmap->tte, pmap->tte + (2*NTTES)); + FLUSH_PTE_RANGE(pmap->tte, pmap->tte + (2 * NTTES)); - pmap->tte_index_max = 2*NTTES; + pmap->tte_index_max = 2 * NTTES; pmap->stamp = hw_atomic_add(&pmap_stamp, 1); - for (i = 0; i < NTTES; i++) + for (i = 0; i < NTTES; i++) { pmap->prev_tte[i] = ARM_TTE_TYPE_FAULT; + } FLUSH_PTE_RANGE(pmap->prev_tte, pmap->prev_tte + NTTES); pmap_simple_unlock(&pmap->tt1_lock); PMAP_UNLOCK(pmap); pmap_set_pmap(pmap, current_thread()); - } - if (level == 1) - return (KERN_SUCCESS); + if (level == 1) { + return KERN_SUCCESS; + } { tt_entry_t *tte_next_p; @@ -6686,10 +6853,10 @@ pmap_expand( pa = 0; if (pmap_pte(pmap, v) != PT_ENTRY_NULL) { PMAP_UNLOCK(pmap); - return (KERN_SUCCESS); + return KERN_SUCCESS; } tte_p = &pmap->tte[ttenum(v & ~ARM_TT_L1_PT_OFFMASK)]; - for (i = 0, tte_next_p = tte_p; i<4; i++) { + for (i = 0, tte_next_p = tte_p; i < 4; i++) { if (tte_to_pa(*tte_next_p)) { pa = tte_to_pa(*tte_next_p); break; @@ -6704,7 +6871,7 @@ pmap_expand( PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~ARM_TT_L1_OFFMASK), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE), *tte_p); PMAP_UNLOCK(pmap); - return (KERN_SUCCESS); + return KERN_SUCCESS; } PMAP_UNLOCK(pmap); } @@ -6716,7 +6883,7 @@ pmap_expand( * Allocate a VM page for the level 2 page table entries. */ while (pmap_tt_allocate(pmap, &tt_p, PMAP_TT_L2_LEVEL, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { - if(options & PMAP_OPTIONS_NOWAIT) { + if (options & PMAP_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; } VM_PAGE_WAIT(); @@ -6729,18 +6896,18 @@ pmap_expand( if (pmap_pte(pmap, v) == PT_ENTRY_NULL) { tt_entry_t *tte_next_p; - pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, PMAP_TT_L2_LEVEL, FALSE); + pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, PMAP_TT_L2_LEVEL, FALSE); pa = kvtophys((vm_offset_t)tt_p); #ifndef __ARM_L1_PTW__ CleanPoU_DcacheRegion((vm_offset_t) phystokv(pa), PAGE_SIZE); #endif tte_p = &pmap->tte[ttenum(v)]; - for (i = 0, tte_next_p = tte_p; i<4; i++) { + for (i = 0, tte_next_p = tte_p; i < 4; i++) { *tte_next_p = pa_to_tte(pa) | ARM_TTE_TYPE_TABLE; PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + (i * ARM_TT_L1_SIZE)), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + ((i + 1) * ARM_TT_L1_SIZE)), *tte_p); tte_next_p++; - pa = pa +0x400; + pa = pa + 0x400; } FLUSH_PTE_RANGE(tte_p, tte_p + 4); @@ -6753,31 +6920,30 @@ pmap_expand( tt_p = (tt_entry_t *)NULL; } } - return (KERN_SUCCESS); + return KERN_SUCCESS; #else - pmap_paddr_t pa; + pmap_paddr_t pa; #if __ARM64_TWO_LEVEL_PMAP__ /* If we are using a two level page table, we'll start at L2. */ - unsigned int ttlevel = 2; + unsigned int ttlevel = 2; #else /* Otherwise, we start at L1 (we use 3 levels by default). */ - unsigned int ttlevel = 1; + unsigned int ttlevel = 1; #endif - tt_entry_t *tte_p; - tt_entry_t *tt_p; + tt_entry_t *tte_p; + tt_entry_t *tt_p; pa = 0x0ULL; tt_p = (tt_entry_t *)NULL; for (; ttlevel < level; ttlevel++) { - PMAP_LOCK(pmap); if (ttlevel == 1) { if ((pmap_tt2e(pmap, v) == PT_ENTRY_NULL)) { PMAP_UNLOCK(pmap); while (pmap_tt_allocate(pmap, &tt_p, PMAP_TT_L2_LEVEL, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { - if(options & PMAP_OPTIONS_NOWAIT) { + if (options & PMAP_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; } VM_PAGE_WAIT(); @@ -6789,31 +6955,31 @@ pmap_expand( tte_p = pmap_tt1e( pmap, v); *tte_p = (pa & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~ARM_TT_L1_OFFMASK), - VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE), *tte_p); + VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE), *tte_p); pa = 0x0ULL; tt_p = (tt_entry_t *)NULL; - if ((pmap == kernel_pmap) && (VM_MIN_KERNEL_ADDRESS < 0x00000000FFFFFFFFULL)) - current_pmap()->tte[v>>ARM_TT_L1_SHIFT] = kernel_pmap->tte[v>>ARM_TT_L1_SHIFT]; + if ((pmap == kernel_pmap) && (VM_MIN_KERNEL_ADDRESS < 0x00000000FFFFFFFFULL)) { + current_pmap()->tte[v >> ARM_TT_L1_SHIFT] = kernel_pmap->tte[v >> ARM_TT_L1_SHIFT]; + } } - } } else if (ttlevel == 2) { if (pmap_tt3e(pmap, v) == PT_ENTRY_NULL) { PMAP_UNLOCK(pmap); while (pmap_tt_allocate(pmap, &tt_p, PMAP_TT_L3_LEVEL, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) { - if(options & PMAP_OPTIONS_NOWAIT) { + if (options & PMAP_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; } VM_PAGE_WAIT(); } PMAP_LOCK(pmap); if ((pmap_tt3e(pmap, v) == PT_ENTRY_NULL)) { - pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v , PMAP_TT_L3_LEVEL, FALSE); + pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, PMAP_TT_L3_LEVEL, FALSE); pa = kvtophys((vm_offset_t)tt_p); tte_p = pmap_tt2e( pmap, v); *tte_p = (pa & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID; PMAP_TRACE(3, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~ARM_TT_L2_OFFMASK), - VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L2_OFFMASK) + ARM_TT_L2_SIZE), *tte_p); + VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L2_OFFMASK) + ARM_TT_L2_SIZE), *tte_p); pa = 0x0ULL; tt_p = (tt_entry_t *)NULL; } @@ -6823,12 +6989,12 @@ pmap_expand( PMAP_UNLOCK(pmap); if (tt_p != (tt_entry_t *)NULL) { - pmap_tt_deallocate(pmap, tt_p, ttlevel+1); + pmap_tt_deallocate(pmap, tt_p, ttlevel + 1); tt_p = (tt_entry_t *)NULL; } } - return (KERN_SUCCESS); + return KERN_SUCCESS; #endif } @@ -6844,8 +7010,9 @@ pmap_expand( void pmap_collect(pmap_t pmap) { - if (pmap == PMAP_NULL) + if (pmap == PMAP_NULL) { return; + } #if 0 PMAP_LOCK(pmap); @@ -6861,7 +7028,7 @@ pmap_collect(pmap_t pmap) /* * Routine: pmap_gc * Function: - * Pmap garbage collection + * Pmap garbage collection * Called by the pageout daemon when pages are scarce. * */ @@ -6869,32 +7036,34 @@ void pmap_gc( void) { - pmap_t pmap, pmap_next; - boolean_t gc_wait; + pmap_t pmap, pmap_next; + boolean_t gc_wait; if (pmap_gc_allowed && (pmap_gc_allowed_by_time_throttle || - pmap_gc_forced)) { + pmap_gc_forced)) { pmap_gc_forced = FALSE; pmap_gc_allowed_by_time_throttle = FALSE; pmap_simple_lock(&pmaps_lock); pmap = CAST_DOWN_EXPLICIT(pmap_t, queue_first(&map_pmap_list)); while (!queue_end(&map_pmap_list, (queue_entry_t)pmap)) { - if (!(pmap->gc_status & PMAP_GC_INFLIGHT)) + if (!(pmap->gc_status & PMAP_GC_INFLIGHT)) { pmap->gc_status |= PMAP_GC_INFLIGHT; + } pmap_simple_unlock(&pmaps_lock); pmap_collect(pmap); pmap_simple_lock(&pmaps_lock); gc_wait = (pmap->gc_status & PMAP_GC_WAIT); - pmap->gc_status &= ~(PMAP_GC_INFLIGHT|PMAP_GC_WAIT); + pmap->gc_status &= ~(PMAP_GC_INFLIGHT | PMAP_GC_WAIT); pmap_next = CAST_DOWN_EXPLICIT(pmap_t, queue_next(&pmap->pmaps)); if (gc_wait) { - if (!queue_end(&map_pmap_list, (queue_entry_t)pmap_next)) + if (!queue_end(&map_pmap_list, (queue_entry_t)pmap_next)) { pmap_next->gc_status |= PMAP_GC_INFLIGHT; + } pmap_simple_unlock(&pmaps_lock); - thread_wakeup((event_t) & pmap->gc_status); + thread_wakeup((event_t) &pmap->gc_status); pmap_simple_lock(&pmaps_lock); } pmap = pmap_next; @@ -6940,8 +7109,9 @@ pmap_attribute_cache_sync( { if (size > PAGE_SIZE) { panic("pmap_attribute_cache_sync size: 0x%llx\n", (uint64_t)size); - } else + } else { cache_sync_page(pp); + } return KERN_SUCCESS; } @@ -6982,10 +7152,11 @@ coredumpok( pt_entry_t spte; pte_p = pmap_pte(map->pmap, va); - if (0 == pte_p) + if (0 == pte_p) { return FALSE; + } spte = *pte_p; - return ((spte & ARM_PTE_ATTRINDXMASK) == ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)); + return (spte & ARM_PTE_ATTRINDXMASK) == ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT); } #endif @@ -6999,8 +7170,9 @@ fillPage( addr = (unsigned int *) phystokv(ptoa(pn)); count = PAGE_SIZE / sizeof(unsigned int); - while (count--) + while (count--) { *addr++ = fill; + } } extern void mapping_set_mod(ppnum_t pn); @@ -7032,10 +7204,10 @@ mapping_set_ref( */ MARK_AS_PMAP_TEXT static void phys_attribute_clear_internal( - ppnum_t pn, - unsigned int bits, - int options, - void *arg) + ppnum_t pn, + unsigned int bits, + int options, + void *arg) { pmap_paddr_t pa = ptoa(pn); vm_prot_t allow_mode = VM_PROT_ALL; @@ -7045,15 +7217,15 @@ phys_attribute_clear_internal( (options & PMAP_OPTIONS_NOFLUSH) && (arg == NULL)) { panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): " - "should not clear 'modified' without flushing TLBs\n", - pn, bits, options, arg); + "should not clear 'modified' without flushing TLBs\n", + pn, bits, options, arg); } assert(pn != vm_page_fictitious_addr); if (options & PMAP_OPTIONS_CLEAR_WRITE) { assert(bits == PP_ATTR_MODIFIED); - + pmap_page_protect_options_internal(pn, (VM_PROT_ALL & ~VM_PROT_WRITE), 0); /* * We short circuit this case; it should not need to @@ -7065,10 +7237,12 @@ phys_attribute_clear_internal( pa_clear_bits(pa, bits); return; } - if (bits & PP_ATTR_REFERENCED) + if (bits & PP_ATTR_REFERENCED) { allow_mode &= ~(VM_PROT_READ | VM_PROT_EXECUTE); - if (bits & PP_ATTR_MODIFIED) + } + if (bits & PP_ATTR_MODIFIED) { allow_mode &= ~VM_PROT_WRITE; + } if (bits == PP_ATTR_NOENCRYPT) { /* @@ -7080,17 +7254,18 @@ phys_attribute_clear_internal( return; } - if (arm_force_fast_fault_internal(pn, allow_mode, options)) + if (arm_force_fast_fault_internal(pn, allow_mode, options)) { pa_clear_bits(pa, bits); + } return; } static void phys_attribute_clear( - ppnum_t pn, - unsigned int bits, - int options, - void *arg) + ppnum_t pn, + unsigned int bits, + int options, + void *arg) { /* * Do we really want this tracepoint? It will be extremely chatty. @@ -7227,8 +7402,8 @@ unsigned int pmap_get_refmod( ppnum_t pn) { - return (((phys_attribute_test(pn, PP_ATTR_MODIFIED)) ? VM_MEM_MODIFIED : 0) - | ((phys_attribute_test(pn, PP_ATTR_REFERENCED)) ? VM_MEM_REFERENCED : 0)); + return ((phys_attribute_test(pn, PP_ATTR_MODIFIED)) ? VM_MEM_MODIFIED : 0) + | ((phys_attribute_test(pn, PP_ATTR_REFERENCED)) ? VM_MEM_REFERENCED : 0); } /* @@ -7238,15 +7413,15 @@ pmap_get_refmod( */ void pmap_clear_refmod_options( - ppnum_t pn, - unsigned int mask, - unsigned int options, - void *arg) + ppnum_t pn, + unsigned int mask, + unsigned int options, + void *arg) { unsigned int bits; bits = ((mask & VM_MEM_MODIFIED) ? PP_ATTR_MODIFIED : 0) | - ((mask & VM_MEM_REFERENCED) ? PP_ATTR_REFERENCED : 0); + ((mask & VM_MEM_REFERENCED) ? PP_ATTR_REFERENCED : 0); phys_attribute_clear(pn, bits, options, arg); } @@ -7291,7 +7466,7 @@ pmap_disconnect_options( pmap_page_protect_options(pn, 0, options, arg); /* return ref/chg status */ - return (pmap_get_refmod(pn)); + return pmap_get_refmod(pn); } /* @@ -7307,17 +7482,21 @@ unsigned int pmap_disconnect( ppnum_t pn) { - pmap_page_protect(pn, 0); /* disconnect the page */ - return (pmap_get_refmod(pn)); /* return ref/chg status */ + pmap_page_protect(pn, 0); /* disconnect the page */ + return pmap_get_refmod(pn); /* return ref/chg status */ } boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last) { - if (ptoa(first) >= vm_last_phys) return (FALSE); - if (ptoa(last) < vm_first_phys) return (FALSE); + if (ptoa(first) >= vm_last_phys) { + return FALSE; + } + if (ptoa(last) < vm_first_phys) { + return FALSE; + } - return (TRUE); + return TRUE; } /* @@ -7335,7 +7514,9 @@ pmap_is_noencrypt( #if DEVELOPMENT || DEBUG boolean_t result = FALSE; - if (!pa_valid(ptoa(pn))) return FALSE; + if (!pa_valid(ptoa(pn))) { + return FALSE; + } result = (phys_attribute_test(pn, PP_ATTR_NOENCRYPT)); @@ -7351,7 +7532,9 @@ pmap_set_noencrypt( ppnum_t pn) { #if DEVELOPMENT || DEBUG - if (!pa_valid(ptoa(pn))) return; + if (!pa_valid(ptoa(pn))) { + return; + } phys_attribute_set(pn, PP_ATTR_NOENCRYPT); #else @@ -7364,7 +7547,9 @@ pmap_clear_noencrypt( ppnum_t pn) { #if DEVELOPMENT || DEBUG - if (!pa_valid(ptoa(pn))) return; + if (!pa_valid(ptoa(pn))) { + return; + } phys_attribute_clear(pn, PP_ATTR_NOENCRYPT, 0, NULL); #else @@ -7377,13 +7562,13 @@ void pmap_lock_phys_page(ppnum_t pn) { int pai; - pmap_paddr_t phys = ptoa(pn); + pmap_paddr_t phys = ptoa(pn); if (pa_valid(phys)) { pai = (int)pa_index(phys); LOCK_PVH(pai); } else - simple_lock(&phys_backup_lock); + { simple_lock(&phys_backup_lock, LCK_GRP_NULL);} } @@ -7391,13 +7576,13 @@ void pmap_unlock_phys_page(ppnum_t pn) { int pai; - pmap_paddr_t phys = ptoa(pn); + pmap_paddr_t phys = ptoa(pn); if (pa_valid(phys)) { pai = (int)pa_index(phys); UNLOCK_PVH(pai); } else - simple_unlock(&phys_backup_lock); + { simple_unlock(&phys_backup_lock);} } MARK_AS_PMAP_TEXT static void @@ -7405,20 +7590,20 @@ pmap_switch_user_ttb_internal( pmap_t pmap) { VALIDATE_PMAP(pmap); - pmap_cpu_data_t *cpu_data_ptr; + pmap_cpu_data_t *cpu_data_ptr; cpu_data_ptr = pmap_get_cpu_data(); -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) if ((cpu_data_ptr->cpu_user_pmap != PMAP_NULL) && (cpu_data_ptr->cpu_user_pmap != kernel_pmap)) { - unsigned int c; + unsigned int c; c = hw_atomic_sub((volatile uint32_t *)&cpu_data_ptr->cpu_user_pmap->cpu_ref, 1); if ((c == 0) && (cpu_data_ptr->cpu_user_pmap->prev_tte != 0)) { /* We saved off the old 1-page tt1 in pmap_expand() in case other cores were still using it. * Now that the user pmap's cpu_ref is 0, we should be able to safely free it.*/ - tt_entry_t *tt_entry; + tt_entry_t *tt_entry; tt_entry = cpu_data_ptr->cpu_user_pmap->prev_tte; cpu_data_ptr->cpu_user_pmap->prev_tte = (tt_entry_t *) NULL; @@ -7429,11 +7614,11 @@ pmap_switch_user_ttb_internal( cpu_data_ptr->cpu_user_pmap_stamp = pmap->stamp; (void) hw_atomic_add((volatile uint32_t *)&pmap->cpu_ref, 1); -#if MACH_ASSERT && __ARM_USER_PROTECT__ +#if MACH_ASSERT && __ARM_USER_PROTECT__ { unsigned int ttbr0_val, ttbr1_val; - __asm__ volatile("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); - __asm__ volatile("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val)); + __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val)); if (ttbr0_val != ttbr1_val) { panic("Misaligned ttbr0 %08X\n", ttbr0_val); } @@ -7441,7 +7626,7 @@ pmap_switch_user_ttb_internal( #endif if (pmap->tte_index_max == NTTES) { /* Setting TTBCR.N for TTBR0 TTBR1 boundary at 0x40000000 */ - __asm__ volatile("mcr p15,0,%0,c2,c0,2" : : "r"(2)); + __asm__ volatile ("mcr p15,0,%0,c2,c0,2" : : "r"(2)); __builtin_arm_isb(ISB_SY); #if !__ARM_USER_PROTECT__ set_mmu_ttb(pmap->ttep); @@ -7451,9 +7636,9 @@ pmap_switch_user_ttb_internal( set_mmu_ttb(pmap->ttep); #endif /* Setting TTBCR.N for TTBR0 TTBR1 boundary at 0x80000000 */ - __asm__ volatile("mcr p15,0,%0,c2,c0,2" : : "r"(1)); + __asm__ volatile ("mcr p15,0,%0,c2,c0,2" : : "r"(1)); __builtin_arm_isb(ISB_SY); -#if MACH_ASSERT && __ARM_USER_PROTECT__ +#if MACH_ASSERT && __ARM_USER_PROTECT__ if (pmap->ttep & 0x1000) { panic("Misaligned ttbr0 %08X\n", pmap->ttep); } @@ -7466,13 +7651,14 @@ pmap_switch_user_ttb_internal( #else /* (__ARM_VMSA__ == 7) */ - if (pmap != kernel_pmap) - cpu_data_ptr->cpu_nested_pmap = pmap->nested_pmap; + if (pmap != kernel_pmap) { + cpu_data_ptr->cpu_nested_pmap = pmap->nested_pmap; + } if (pmap == kernel_pmap) { pmap_clear_user_ttb_internal(); } else { - set_mmu_ttb((pmap->ttep & TTBR_BADDR_MASK)|(((uint64_t)pmap->asid) << TTBR_ASID_SHIFT)); + set_mmu_ttb((pmap->ttep & TTBR_BADDR_MASK) | (((uint64_t)pmap->asid) << TTBR_ASID_SHIFT)); } #endif } @@ -7512,9 +7698,9 @@ pmap_clear_user_ttb(void) */ MARK_AS_PMAP_TEXT static boolean_t arm_force_fast_fault_internal( - ppnum_t ppnum, - vm_prot_t allow_mode, - int options) + ppnum_t ppnum, + vm_prot_t allow_mode, + int options) { pmap_paddr_t phys = ptoa(ppnum); pv_entry_t *pve_p; @@ -7530,7 +7716,7 @@ arm_force_fast_fault_internal( assert(ppnum != vm_page_fictitious_addr); if (!pa_valid(phys)) { - return FALSE; /* Not a managed page. */ + return FALSE; /* Not a managed page. */ } result = TRUE; @@ -7542,9 +7728,9 @@ arm_force_fast_fault_internal( pte_p = PT_ENTRY_NULL; pve_p = PV_ENTRY_NULL; - if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { + if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { pte_p = pvh_ptep(pv_h); - } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { + } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { pve_p = pvh_list(pv_h); } @@ -7558,8 +7744,9 @@ arm_force_fast_fault_internal( pmap_t pmap; boolean_t update_pte; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pte_p = pve_get_ptep(pve_p); + } if (pte_p == PT_ENTRY_NULL) { panic("pte_p is NULL: pve_p=%p ppnum=0x%x\n", pve_p, ppnum); @@ -7567,7 +7754,7 @@ arm_force_fast_fault_internal( #ifdef PVH_FLAG_IOMMU if ((vm_offset_t)pte_p & PVH_FLAG_IOMMU) { goto fff_skip_pve; - } + } #endif if (*pte_p == ARM_PTE_EMPTY) { panic("pte is NULL: pte_p=%p ppnum=0x%x\n", pte_p, ppnum); @@ -7601,16 +7788,18 @@ arm_force_fast_fault_internal( if (pmap == kernel_pmap) { if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RWNA)) { tmplate = ((tmplate & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA)); + pte_set_was_writeable(tmplate, true); + update_pte = TRUE; + mod_fault = TRUE; } } else { if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RWRW)) { tmplate = ((tmplate & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RORO)); + pte_set_was_writeable(tmplate, true); + update_pte = TRUE; + mod_fault = TRUE; } } - - pte_set_ffr(tmplate, 1); - update_pte = TRUE; - mod_fault = TRUE; } @@ -7633,9 +7822,9 @@ arm_force_fast_fault_internal( * "alternate accounting" mappings. */ } else if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) && - is_reusable && - is_internal && - pmap != kernel_pmap) { + is_reusable && + is_internal && + pmap != kernel_pmap) { /* one less "reusable" */ PMAP_STATS_ASSERTF(pmap->stats.reusable > 0, pmap, "stats.reusable %d", pmap->stats.reusable); OSAddAtomic(-1, &pmap->stats.reusable); @@ -7658,9 +7847,9 @@ arm_force_fast_fault_internal( arm_clear_fast_fault(ppnum, VM_PROT_WRITE); } } else if ((options & PMAP_OPTIONS_SET_REUSABLE) && - !is_reusable && - is_internal && - pmap != kernel_pmap) { + !is_reusable && + is_internal && + pmap != kernel_pmap) { /* one more "reusable" */ OSAddAtomic(+1, &pmap->stats.reusable); PMAP_STATS_PEAK(pmap->stats.reusable); @@ -7675,15 +7864,17 @@ arm_force_fast_fault_internal( } #ifdef PVH_FLAG_IOMMU - fff_skip_pve: +fff_skip_pve: #endif pte_p = PT_ENTRY_NULL; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pve_p = PVE_NEXT_PTR(pve_next(pve_p)); + } } - if (tlb_flush_needed) + if (tlb_flush_needed) { sync_tlb_flush(); + } /* update global "reusable" status for this page */ if (is_internal) { @@ -7691,7 +7882,7 @@ arm_force_fast_fault_internal( is_reusable) { CLR_REUSABLE_PAGE(pai); } else if ((options & PMAP_OPTIONS_SET_REUSABLE) && - !is_reusable) { + !is_reusable) { SET_REUSABLE_PAGE(pai); } } @@ -7709,17 +7900,17 @@ arm_force_fast_fault_internal( boolean_t arm_force_fast_fault( - ppnum_t ppnum, - vm_prot_t allow_mode, - int options, - __unused void *arg) + ppnum_t ppnum, + vm_prot_t allow_mode, + int options, + __unused void *arg) { pmap_paddr_t phys = ptoa(ppnum); assert(ppnum != vm_page_fictitious_addr); if (!pa_valid(phys)) { - return FALSE; /* Not a managed page. */ + return FALSE; /* Not a managed page. */ } return arm_force_fast_fault_internal(ppnum, allow_mode, options); @@ -7742,13 +7933,13 @@ arm_clear_fast_fault( pt_entry_t *pte_p; int pai; boolean_t result; - boolean_t tlb_flush_needed = FALSE; + boolean_t tlb_flush_needed = FALSE; pv_entry_t **pv_h; assert(ppnum != vm_page_fictitious_addr); if (!pa_valid(pa)) { - return FALSE; /* Not a managed page. */ + return FALSE; /* Not a managed page. */ } result = FALSE; @@ -7758,20 +7949,21 @@ arm_clear_fast_fault( pte_p = PT_ENTRY_NULL; pve_p = PV_ENTRY_NULL; - if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { + if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { pte_p = pvh_ptep(pv_h); - } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { + } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { pve_p = pvh_list(pv_h); } while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) { vm_map_address_t va; - pt_entry_t spte; + pt_entry_t spte; pt_entry_t tmplate; pmap_t pmap; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pte_p = pve_get_ptep(pve_p); + } if (pte_p == PT_ENTRY_NULL) { panic("pte_p is NULL: pve_p=%p ppnum=0x%x\n", pve_p, ppnum); @@ -7779,7 +7971,7 @@ arm_clear_fast_fault( #ifdef PVH_FLAG_IOMMU if ((vm_offset_t)pte_p & PVH_FLAG_IOMMU) { goto cff_skip_pve; - } + } #endif if (*pte_p == ARM_PTE_EMPTY) { panic("pte is NULL: pte_p=%p ppnum=0x%x\n", pte_p, ppnum); @@ -7793,19 +7985,19 @@ arm_clear_fast_fault( spte = *pte_p; tmplate = spte; - if ((fault_type & VM_PROT_WRITE) && (pte_is_ffr(spte))) { + if ((fault_type & VM_PROT_WRITE) && (pte_was_writeable(spte))) { { - if (pmap == kernel_pmap) + if (pmap == kernel_pmap) { tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RWNA)); - else + } else { tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RWRW)); + } } tmplate |= ARM_PTE_AF; - pte_set_ffr(tmplate, 0); + pte_set_was_writeable(tmplate, false); pa_set_bits(pa, PP_ATTR_REFERENCED | PP_ATTR_MODIFIED); - } else if ((fault_type & VM_PROT_READ) && ((spte & ARM_PTE_AF) != ARM_PTE_AF)) { tmplate = spte | ARM_PTE_AF; @@ -7828,14 +8020,16 @@ arm_clear_fast_fault( } #ifdef PVH_FLAG_IOMMU - cff_skip_pve: +cff_skip_pve: #endif pte_p = PT_ENTRY_NULL; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pve_p = PVE_NEXT_PTR(pve_next(pve_p)); + } } - if (tlb_flush_needed) + if (tlb_flush_needed) { sync_tlb_flush(); + } return result; } @@ -7889,7 +8083,7 @@ arm_fast_fault_internal( if (!pa_valid(pa)) { PMAP_UNLOCK(pmap); - return result; + return result; } pai = (int)pa_index(pa); LOCK_PVH(pai); @@ -7914,11 +8108,11 @@ arm_fast_fault_internal( if (IS_REFFAULT_PAGE(pai)) { CLR_REFFAULT_PAGE(pai); } - if ( (fault_type & VM_PROT_WRITE) && IS_MODFAULT_PAGE(pai)) { + if ((fault_type & VM_PROT_WRITE) && IS_MODFAULT_PAGE(pai)) { CLR_MODFAULT_PAGE(pai); } - if (arm_clear_fast_fault((ppnum_t)atop(pa),fault_type)) { + if (arm_clear_fast_fault((ppnum_t)atop(pa), fault_type)) { /* * Should this preserve KERN_PROTECTION_FAILURE? The * cost of not doing so is a another fault in a case @@ -7942,14 +8136,15 @@ arm_fast_fault( { kern_return_t result = KERN_FAILURE; - if (va < pmap->min || va >= pmap->max) + if (va < pmap->min || va >= pmap->max) { return result; + } PMAP_TRACE(3, PMAP_CODE(PMAP__FAST_FAULT) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(va), fault_type, - from_user); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(va), fault_type, + from_user); -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) if (pmap != kernel_pmap) { pmap_cpu_data_t *cpu_data_ptr = pmap_get_cpu_data(); pmap_t cur_pmap; @@ -7985,8 +8180,8 @@ pmap_copy_page( ppnum_t pdst) { bcopy_phys((addr64_t) (ptoa(psrc)), - (addr64_t) (ptoa(pdst)), - PAGE_SIZE); + (addr64_t) (ptoa(pdst)), + PAGE_SIZE); } @@ -8002,8 +8197,8 @@ pmap_copy_part_page( vm_size_t len) { bcopy_phys((addr64_t) (ptoa(psrc) + src_offset), - (addr64_t) (ptoa(pdst) + dst_offset), - len); + (addr64_t) (ptoa(pdst) + dst_offset), + len); } @@ -8047,7 +8242,7 @@ void pmap_map_globals( void) { - pt_entry_t *ptep, pte; + pt_entry_t *ptep, pte; ptep = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS); assert(ptep != PT_ENTRY_NULL); @@ -8058,43 +8253,45 @@ pmap_map_globals( pte |= ARM_PTE_NG; #endif /* __ARM_KERNEL_PROTECT__ */ pte |= ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK); -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) pte |= ARM_PTE_SH(SH_OUTER_MEMORY); #else pte |= ARM_PTE_SH; #endif *ptep = pte; - FLUSH_PTE_RANGE(ptep,(ptep+1)); + FLUSH_PTE_RANGE(ptep, (ptep + 1)); PMAP_UPDATE_TLBS(kernel_pmap, LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE); } vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index) { - if (__improbable(index >= CPUWINDOWS_MAX)) + if (__improbable(index >= CPUWINDOWS_MAX)) { panic("%s: invalid index %u", __func__, index); + } return (vm_offset_t)(CPUWINDOWS_BASE + (PAGE_SIZE * ((CPUWINDOWS_MAX * cpu_num) + index))); } MARK_AS_PMAP_TEXT static unsigned int pmap_map_cpu_windows_copy_internal( - ppnum_t pn, + ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits) { - pt_entry_t *ptep = NULL, pte; - unsigned int cpu_num; - unsigned int i; - vm_offset_t cpu_copywindow_vaddr = 0; + pt_entry_t *ptep = NULL, pte; + unsigned int cpu_num; + unsigned int i; + vm_offset_t cpu_copywindow_vaddr = 0; cpu_num = pmap_get_cpu_data()->cpu_number; - for (i = 0; icpu_number; @@ -8206,9 +8403,9 @@ pmap_trim_range( if (__improbable(end < start)) { panic("%s: invalid address range, " - "pmap=%p, start=%p, end=%p", - __func__, - pmap, (void*)start, (void*)end); + "pmap=%p, start=%p, end=%p", + __func__, + pmap, (void*)start, (void*)end); } nested_region_start = pmap->nested ? pmap->nested_region_subord_addr : pmap->nested_region_subord_addr; @@ -8216,9 +8413,9 @@ pmap_trim_range( if (__improbable((start < nested_region_start) || (end > nested_region_end))) { panic("%s: range outside nested region %p-%p, " - "pmap=%p, start=%p, end=%p", - __func__, (void *)nested_region_start, (void *)nested_region_end, - pmap, (void*)start, (void*)end); + "pmap=%p, start=%p, end=%p", + __func__, (void *)nested_region_start, (void *)nested_region_end, + pmap, (void*)start, (void*)end); } /* Contract the range to TT page boundaries. */ @@ -8262,7 +8459,7 @@ pmap_trim_range( } #else if ((ptep_get_ptd(pte_p)->pt_cnt[ARM_PT_DESC_INDEX(pte_p)].refcnt == 0) && - (pmap != kernel_pmap)) { + (pmap != kernel_pmap)) { if (pmap->nested == TRUE) { /* Deallocate for the nested map. */ pmap_tte_deallocate(pmap, tte_p, PMAP_TT_L2_LEVEL); @@ -8361,14 +8558,14 @@ pmap_trim_internal( if (__improbable(os_add_overflow(vstart, size, &vend))) { panic("%s: grand addr wraps around, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } if (__improbable(os_add_overflow(nstart, size, &nend))) { panic("%s: nested addr wraps around, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } VALIDATE_PMAP(grand); @@ -8378,33 +8575,33 @@ pmap_trim_internal( if (!subord->nested) { panic("%s: subord is not nestable, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } if (grand->nested) { panic("%s: grand is nestable, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } if (grand->nested_pmap != subord) { panic("%s: grand->nested != subord, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } if (size != 0) { if ((vstart < grand->nested_region_grand_addr) || (vend > (grand->nested_region_grand_addr + grand->nested_region_size))) { panic("%s: grand range not in nested region, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } if ((nstart < grand->nested_region_grand_addr) || (nend > (grand->nested_region_grand_addr + grand->nested_region_size))) { panic("%s: subord range not in nested region, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } } @@ -8436,8 +8633,8 @@ pmap_trim_internal( if (__improbable(os_add_overflow(subord->nested_region_true_end, adjust_offmask, &subord->nested_region_true_end))) { panic("%s: padded true end wraps around, " - "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", - __func__, grand, subord, (void*)vstart, (void*)nstart, size); + "grand=%p, subord=%p, vstart=%p, nstart=%p, size=%#llx", + __func__, grand, subord, (void*)vstart, (void*)nstart, size); } subord->nested_region_true_end &= ~adjust_offmask; @@ -8467,7 +8664,8 @@ pmap_trim_internal( pmap_trim_subord(subord); } -MARK_AS_PMAP_TEXT static void pmap_trim_self(pmap_t pmap) +MARK_AS_PMAP_TEXT static void +pmap_trim_self(pmap_t pmap) { if (pmap->nested_has_no_bounds_ref && pmap->nested_pmap) { /* If we have a no bounds ref, we need to drop it. */ @@ -8558,41 +8756,44 @@ pmap_nest_internal( tt_entry_t *gtte_p; unsigned int i; unsigned int num_tte; - unsigned int nested_region_asid_bitmap_size; - unsigned int* nested_region_asid_bitmap; + unsigned int nested_region_asid_bitmap_size; + unsigned int* nested_region_asid_bitmap; int expand_options = 0; addr64_t vend, nend; - if (__improbable(os_add_overflow(vstart, size, &vend))) + if (__improbable(os_add_overflow(vstart, size, &vend))) { panic("%s: %p grand addr wraps around: 0x%llx + 0x%llx", __func__, grand, vstart, size); - if (__improbable(os_add_overflow(nstart, size, &nend))) + } + if (__improbable(os_add_overflow(nstart, size, &nend))) { panic("%s: %p nested addr wraps around: 0x%llx + 0x%llx", __func__, subord, nstart, size); + } VALIDATE_PMAP(grand); VALIDATE_PMAP(subord); -#if (__ARM_VMSA__ == 7) - if (((size|vstart|nstart) & ARM_TT_L1_PT_OFFMASK) != 0x0ULL) { - return KERN_INVALID_VALUE; /* Nest 4MB region */ +#if (__ARM_VMSA__ == 7) + if (((size | vstart | nstart) & ARM_TT_L1_PT_OFFMASK) != 0x0ULL) { + return KERN_INVALID_VALUE; /* Nest 4MB region */ } #else - if (((size|vstart|nstart) & (ARM_TT_L2_OFFMASK)) != 0x0ULL) { + if (((size | vstart | nstart) & (ARM_TT_L2_OFFMASK)) != 0x0ULL) { panic("pmap_nest() pmap %p unaligned nesting request 0x%llx, 0x%llx, 0x%llx\n", grand, vstart, nstart, size); } #endif - if (!subord->nested) + if (!subord->nested) { panic("%s: subordinate pmap %p is not nestable", __func__, subord); + } if ((grand->nested_pmap != PMAP_NULL) && (grand->nested_pmap != subord)) { panic("pmap_nest() pmap %p has a nested pmap\n", grand); } if (subord->nested_region_asid_bitmap == NULL) { - nested_region_asid_bitmap_size = (unsigned int)(size>>ARM_TT_TWIG_SHIFT)/(sizeof(unsigned int)*NBBY); + nested_region_asid_bitmap_size = (unsigned int)(size >> ARM_TT_TWIG_SHIFT) / (sizeof(unsigned int) * NBBY); - nested_region_asid_bitmap = kalloc(nested_region_asid_bitmap_size*sizeof(unsigned int)); - bzero(nested_region_asid_bitmap, nested_region_asid_bitmap_size*sizeof(unsigned int)); + nested_region_asid_bitmap = kalloc(nested_region_asid_bitmap_size * sizeof(unsigned int)); + bzero(nested_region_asid_bitmap, nested_region_asid_bitmap_size * sizeof(unsigned int)); PMAP_LOCK(subord); if (subord->nested_region_asid_bitmap == NULL) { @@ -8604,25 +8805,25 @@ pmap_nest_internal( } PMAP_UNLOCK(subord); if (nested_region_asid_bitmap != NULL) { - kfree(nested_region_asid_bitmap, nested_region_asid_bitmap_size*sizeof(unsigned int)); + kfree(nested_region_asid_bitmap, nested_region_asid_bitmap_size * sizeof(unsigned int)); } } if ((subord->nested_region_subord_addr + subord->nested_region_size) < nend) { - uint64_t new_size; - unsigned int new_nested_region_asid_bitmap_size; - unsigned int* new_nested_region_asid_bitmap; + uint64_t new_size; + unsigned int new_nested_region_asid_bitmap_size; + unsigned int* new_nested_region_asid_bitmap; nested_region_asid_bitmap = NULL; nested_region_asid_bitmap_size = 0; new_size = nend - subord->nested_region_subord_addr; /* We explicitly add 1 to the bitmap allocation size in order to avoid issues with truncation. */ - new_nested_region_asid_bitmap_size = (unsigned int)((new_size>>ARM_TT_TWIG_SHIFT)/(sizeof(unsigned int)*NBBY)) + 1; + new_nested_region_asid_bitmap_size = (unsigned int)((new_size >> ARM_TT_TWIG_SHIFT) / (sizeof(unsigned int) * NBBY)) + 1; - new_nested_region_asid_bitmap = kalloc(new_nested_region_asid_bitmap_size*sizeof(unsigned int)); + new_nested_region_asid_bitmap = kalloc(new_nested_region_asid_bitmap_size * sizeof(unsigned int)); PMAP_LOCK(subord); if (subord->nested_region_size < new_size) { - bzero(new_nested_region_asid_bitmap, new_nested_region_asid_bitmap_size*sizeof(unsigned int)); + bzero(new_nested_region_asid_bitmap, new_nested_region_asid_bitmap_size * sizeof(unsigned int)); bcopy(subord->nested_region_asid_bitmap, new_nested_region_asid_bitmap, subord->nested_region_asid_bitmap_size); nested_region_asid_bitmap_size = subord->nested_region_asid_bitmap_size; nested_region_asid_bitmap = subord->nested_region_asid_bitmap; @@ -8633,9 +8834,9 @@ pmap_nest_internal( } PMAP_UNLOCK(subord); if (nested_region_asid_bitmap != NULL) - kfree(nested_region_asid_bitmap, nested_region_asid_bitmap_size*sizeof(unsigned int)); + { kfree(nested_region_asid_bitmap, nested_region_asid_bitmap_size * sizeof(unsigned int));} if (new_nested_region_asid_bitmap != NULL) - kfree(new_nested_region_asid_bitmap, new_nested_region_asid_bitmap_size*sizeof(unsigned int)); + { kfree(new_nested_region_asid_bitmap, new_nested_region_asid_bitmap_size * sizeof(unsigned int));} } PMAP_LOCK(subord); @@ -8657,13 +8858,12 @@ pmap_nest_internal( } else { if ((grand->nested_region_grand_addr > vstart)) { panic("pmap_nest() pmap %p : attempt to nest outside the nested region\n", grand); - } - else if ((grand->nested_region_grand_addr + grand->nested_region_size) < vend) { + } else if ((grand->nested_region_grand_addr + grand->nested_region_size) < vend) { grand->nested_region_size = (mach_vm_offset_t)(vstart - grand->nested_region_grand_addr + size); } } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) nvaddr = (vm_map_offset_t) nstart; vaddr = (vm_map_offset_t) vstart; num_tte = size >> ARM_TT_L1_SHIFT; @@ -8743,7 +8943,7 @@ expand_next: vaddr = (vm_map_offset_t) vstart; -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) for (i = 0; i < num_tte; i++) { if (((subord->nested_region_true_start) > nvaddr) || ((subord->nested_region_true_end) <= nvaddr)) { goto nest_next; @@ -8790,7 +8990,7 @@ done: stte_p = pmap_tte(grand, vstart); FLUSH_PTE_RANGE_STRONG(stte_p, stte_p + num_tte); -#if (__ARM_VMSA__ > 7) +#if (__ARM_VMSA__ > 7) /* * check for overflow on LP64 arch */ @@ -8802,7 +9002,8 @@ done: return kr; } -kern_return_t pmap_nest( +kern_return_t +pmap_nest( pmap_t grand, pmap_t subord, addr64_t vstart, @@ -8812,8 +9013,8 @@ kern_return_t pmap_nest( kern_return_t kr = KERN_FAILURE; PMAP_TRACE(2, PMAP_CODE(PMAP__NEST) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord), - VM_KERNEL_ADDRHIDE(vstart)); + VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord), + VM_KERNEL_ADDRHIDE(vstart)); kr = pmap_nest_internal(grand, subord, vstart, nstart, size); @@ -8837,7 +9038,7 @@ pmap_unnest( addr64_t vaddr, uint64_t size) { - return(pmap_unnest_options(grand, vaddr, size, 0)); + return pmap_unnest_options(grand, vaddr, size, 0); } MARK_AS_PMAP_TEXT static kern_return_t @@ -8857,37 +9058,39 @@ pmap_unnest_options_internal( unsigned int i; addr64_t vend; - if (__improbable(os_add_overflow(vaddr, size, &vend))) + if (__improbable(os_add_overflow(vaddr, size, &vend))) { panic("%s: %p vaddr wraps around: 0x%llx + 0x%llx", __func__, grand, vaddr, size); + } VALIDATE_PMAP(grand); -#if (__ARM_VMSA__ == 7) - if (((size|vaddr) & ARM_TT_L1_PT_OFFMASK) != 0x0ULL) { +#if (__ARM_VMSA__ == 7) + if (((size | vaddr) & ARM_TT_L1_PT_OFFMASK) != 0x0ULL) { panic("pmap_unnest(): unaligned request\n"); } #else - if (((size|vaddr) & ARM_TT_L2_OFFMASK) != 0x0ULL) { - panic("pmap_unnest(): unaligned request\n"); + if (((size | vaddr) & ARM_TT_L2_OFFMASK) != 0x0ULL) { + panic("pmap_unnest(): unaligned request\n"); } #endif - if ((option & PMAP_UNNEST_CLEAN) == 0) - { - if (grand->nested_pmap == NULL) + if ((option & PMAP_UNNEST_CLEAN) == 0) { + if (grand->nested_pmap == NULL) { panic("%s: %p has no nested pmap", __func__, grand); + } - if ((vaddr < grand->nested_region_grand_addr) || (vend > (grand->nested_region_grand_addr + grand->nested_region_size))) + if ((vaddr < grand->nested_region_grand_addr) || (vend > (grand->nested_region_grand_addr + grand->nested_region_size))) { panic("%s: %p: unnest request to region not-fully-nested region [%p, %p)", __func__, grand, (void*)vaddr, (void*)vend); + } PMAP_LOCK(grand->nested_pmap); - start = vaddr - grand->nested_region_grand_addr + grand->nested_region_subord_addr ; + start = vaddr - grand->nested_region_grand_addr + grand->nested_region_subord_addr; start_index = (unsigned int)((vaddr - grand->nested_region_grand_addr) >> ARM_TT_TWIG_SHIFT); max_index = (unsigned int)(start_index + (size >> ARM_TT_TWIG_SHIFT)); num_tte = (unsigned int)(size >> ARM_TT_TWIG_SHIFT); - for (current_index = start_index, addr = start; current_index < max_index; current_index++, addr += ARM_TT_TWIG_SIZE) { + for (current_index = start_index, addr = start; current_index < max_index; current_index++, addr += ARM_TT_TWIG_SIZE) { pt_entry_t *bpte, *epte, *cpte; if (addr < grand->nested_pmap->nested_region_true_start) { @@ -8901,43 +9104,41 @@ pmap_unnest_options_internal( } bpte = pmap_pte(grand->nested_pmap, addr); - epte = bpte + (ARM_TT_LEAF_INDEX_MASK>>ARM_TT_LEAF_SHIFT); + epte = bpte + (ARM_TT_LEAF_INDEX_MASK >> ARM_TT_LEAF_SHIFT); - if(!testbit(current_index, (int *)grand->nested_pmap->nested_region_asid_bitmap)) { + if (!testbit(current_index, (int *)grand->nested_pmap->nested_region_asid_bitmap)) { setbit(current_index, (int *)grand->nested_pmap->nested_region_asid_bitmap); for (cpte = bpte; cpte <= epte; cpte++) { - pmap_paddr_t pa; - int pai=0; - boolean_t managed=FALSE; + pmap_paddr_t pa; + int pai = 0; + boolean_t managed = FALSE; pt_entry_t spte; if ((*cpte != ARM_PTE_TYPE_FAULT) && (!ARM_PTE_IS_COMPRESSED(*cpte))) { - spte = *cpte; while (!managed) { pa = pte_to_pa(spte); - if (!pa_valid(pa)) + if (!pa_valid(pa)) { break; + } pai = (int)pa_index(pa); LOCK_PVH(pai); spte = *cpte; pa = pte_to_pa(spte); if (pai == (int)pa_index(pa)) { - managed =TRUE; + managed = TRUE; break; // Leave the PVH locked as we'll unlock it after we update the PTE } UNLOCK_PVH(pai); } if (((spte & ARM_PTE_NG) != ARM_PTE_NG)) { - WRITE_PTE_FAST(cpte, (spte | ARM_PTE_NG)); } - if (managed) - { + if (managed) { ASSERT_PVH_LOCKED(pai); UNLOCK_PVH(pai); } @@ -8945,7 +9146,7 @@ pmap_unnest_options_internal( } } - FLUSH_PTE_RANGE_STRONG(bpte, epte); + FLUSH_PTE_RANGE_STRONG(bpte, epte); flush_mmu_tlb_region_asid_async(start, (unsigned)size, grand->nested_pmap); } @@ -8998,7 +9199,7 @@ pmap_unnest_options( kern_return_t kr = KERN_FAILURE; PMAP_TRACE(2, PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr)); + VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr)); kr = pmap_unnest_options_internal(grand, vaddr, size, option); @@ -9066,8 +9267,8 @@ pt_fake_zone_info( * an ARM small page (4K). */ -#define ARM_FULL_TLB_FLUSH_THRESHOLD 64 -#define ARM64_FULL_TLB_FLUSH_THRESHOLD 256 +#define ARM_FULL_TLB_FLUSH_THRESHOLD 64 +#define ARM64_FULL_TLB_FLUSH_THRESHOLD 256 static void flush_mmu_tlb_region_asid_async( @@ -9075,26 +9276,28 @@ flush_mmu_tlb_region_asid_async( unsigned length, pmap_t pmap) { -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) vm_offset_t end = va + length; - uint32_t asid; + uint32_t asid; asid = pmap->asid; if (length / ARM_SMALL_PAGE_SIZE > ARM_FULL_TLB_FLUSH_THRESHOLD) { - boolean_t flush_all = FALSE; + boolean_t flush_all = FALSE; - if ((asid == 0) || (pmap->nested == TRUE)) + if ((asid == 0) || (pmap->nested == TRUE)) { flush_all = TRUE; - if (flush_all) + } + if (flush_all) { flush_mmu_tlb_async(); - else + } else { flush_mmu_tlb_asid_async(asid); + } return; } if (pmap->nested == TRUE) { -#if !__ARM_MP_EXT__ +#if !__ARM_MP_EXT__ flush_mmu_tlb(); #else va = arm_trunc_page(va); @@ -9109,20 +9312,22 @@ flush_mmu_tlb_region_asid_async( flush_mmu_tlb_entries_async(va, end); #else - vm_offset_t end = va + length; - uint32_t asid; + vm_offset_t end = va + length; + uint32_t asid; asid = pmap->asid; if ((length >> ARM_TT_L3_SHIFT) > ARM64_FULL_TLB_FLUSH_THRESHOLD) { boolean_t flush_all = FALSE; - if ((asid == 0) || (pmap->nested == TRUE)) + if ((asid == 0) || (pmap->nested == TRUE)) { flush_all = TRUE; - if (flush_all) + } + if (flush_all) { flush_mmu_tlb_async(); - else + } else { flush_mmu_tlb_asid_async((uint64_t)asid << TLBI_ASID_SHIFT); + } return; } va = tlbi_asid(asid) | tlbi_addr(va); @@ -9155,17 +9360,19 @@ pmap_find_io_attr(pmap_paddr_t paddr) for (;;) { unsigned int middle = (begin + end) / 2; int cmp = cmp_io_rgns(&find_range, &io_attr_table[middle]); - if (cmp == 0) + if (cmp == 0) { return io_attr_table[middle].wimg; - else if (begin == end) + } else if (begin == end) { break; - else if (cmp > 0) + } else if (cmp > 0) { begin = middle + 1; - else + } else { end = middle; - }; + } + } + ; - return (VM_WIMG_IO); + return VM_WIMG_IO; } unsigned int @@ -9173,39 +9380,103 @@ pmap_cache_attributes( ppnum_t pn) { pmap_paddr_t paddr; - int pai; - unsigned int result; - pp_attr_t pp_attr_current; + int pai; + unsigned int result; + pp_attr_t pp_attr_current; paddr = ptoa(pn); - if ((paddr >= io_rgn_start) && (paddr < io_rgn_end)) + if ((paddr >= io_rgn_start) && (paddr < io_rgn_end)) { return pmap_find_io_attr(paddr); + } if (!pmap_initialized) { - if ((paddr >= gPhysBase) && (paddr < gPhysBase+gPhysSize)) - return (VM_WIMG_DEFAULT); - else - return (VM_WIMG_IO); + if ((paddr >= gPhysBase) && (paddr < gPhysBase + gPhysSize)) { + return VM_WIMG_DEFAULT; + } else { + return VM_WIMG_IO; + } } - if (!pa_valid(paddr)) - return (VM_WIMG_IO); + if (!pa_valid(paddr)) { + return VM_WIMG_IO; + } result = VM_WIMG_DEFAULT; pai = (int)pa_index(paddr); pp_attr_current = pp_attr_table[pai]; - if (pp_attr_current & PP_ATTR_WIMG_MASK) + if (pp_attr_current & PP_ATTR_WIMG_MASK) { result = pp_attr_current & PP_ATTR_WIMG_MASK; + } return result; } +MARK_AS_PMAP_TEXT static void +pmap_sync_wimg(ppnum_t pn, unsigned int wimg_bits_prev, unsigned int wimg_bits_new) +{ + if ((wimg_bits_prev != wimg_bits_new) + && ((wimg_bits_prev == VM_WIMG_COPYBACK) + || ((wimg_bits_prev == VM_WIMG_INNERWBACK) + && (wimg_bits_new != VM_WIMG_COPYBACK)) + || ((wimg_bits_prev == VM_WIMG_WTHRU) + && ((wimg_bits_new != VM_WIMG_COPYBACK) || (wimg_bits_new != VM_WIMG_INNERWBACK))))) { + pmap_sync_page_attributes_phys(pn); + } + + if ((wimg_bits_new == VM_WIMG_RT) && (wimg_bits_prev != VM_WIMG_RT)) { + pmap_force_dcache_clean(phystokv(ptoa(pn)), PAGE_SIZE); + } +} + +MARK_AS_PMAP_TEXT static __unused void +pmap_update_compressor_page_internal(ppnum_t pn, unsigned int prev_cacheattr, unsigned int new_cacheattr) +{ + pmap_paddr_t paddr = ptoa(pn); + int pai = (int)pa_index(paddr); + + if (__improbable(!pa_valid(paddr))) { + panic("%s called on non-managed page 0x%08x", __func__, pn); + } + + LOCK_PVH(pai); + + + pmap_update_cache_attributes_locked(pn, new_cacheattr); + + UNLOCK_PVH(pai); + + pmap_sync_wimg(pn, prev_cacheattr & VM_WIMG_MASK, new_cacheattr & VM_WIMG_MASK); +} + +void * +pmap_map_compressor_page(ppnum_t pn) +{ +#if __ARM_PTE_PHYSMAP__ + unsigned int cacheattr = pmap_cache_attributes(pn) & VM_WIMG_MASK; + if (cacheattr != VM_WIMG_DEFAULT) { + pmap_update_compressor_page_internal(pn, cacheattr, VM_WIMG_DEFAULT); + } +#endif + return (void*)phystokv(ptoa(pn)); +} + +void +pmap_unmap_compressor_page(ppnum_t pn __unused, void *kva __unused) +{ +#if __ARM_PTE_PHYSMAP__ + unsigned int cacheattr = pmap_cache_attributes(pn) & VM_WIMG_MASK; + if (cacheattr != VM_WIMG_DEFAULT) { + pmap_update_compressor_page_internal(pn, VM_WIMG_DEFAULT, cacheattr); + } +#endif +} + MARK_AS_PMAP_TEXT static boolean_t pmap_batch_set_cache_attributes_internal( - ppnum_t pn, + ppnum_t pn, unsigned int cacheattr, unsigned int page_cnt, unsigned int page_index, @@ -9213,19 +9484,20 @@ pmap_batch_set_cache_attributes_internal( unsigned int *res) { pmap_paddr_t paddr; - int pai; - pp_attr_t pp_attr_current; - pp_attr_t pp_attr_template; - unsigned int wimg_bits_prev, wimg_bits_new; + int pai; + pp_attr_t pp_attr_current; + pp_attr_t pp_attr_template; + unsigned int wimg_bits_prev, wimg_bits_new; - if (cacheattr & VM_WIMG_USE_DEFAULT) + if (cacheattr & VM_WIMG_USE_DEFAULT) { cacheattr = VM_WIMG_DEFAULT; + } - if ((doit == FALSE) && (*res == 0)) { + if ((doit == FALSE) && (*res == 0)) { pmap_pin_kernel_pages((vm_offset_t)res, sizeof(*res)); *res = page_cnt; pmap_unpin_kernel_pages((vm_offset_t)res, sizeof(*res)); - if (platform_cache_batch_wimg(cacheattr & (VM_WIMG_MASK), page_cnt< 7) if (tmplate & ARM_PTE_HINT_MASK) { panic("%s: physical aperture PTE %p has hint bit set, va=%p, pte=0x%llx", - __FUNCTION__, pte_p, (void *)kva, tmplate); + __FUNCTION__, pte_p, (void *)kva, tmplate); } #endif WRITE_PTE_STRONG(pte_p, tmplate); @@ -9423,7 +9695,7 @@ pmap_update_cache_attributes_locked( pve_p = PV_ENTRY_NULL; if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) { pte_p = pvh_ptep(pv_h); - } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { + } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { pve_p = pvh_list(pv_h); pte_p = PT_ENTRY_NULL; } @@ -9432,11 +9704,13 @@ pmap_update_cache_attributes_locked( vm_map_address_t va; pmap_t pmap; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pte_p = pve_get_ptep(pve_p); + } #ifdef PVH_FLAG_IOMMU - if ((vm_offset_t)pte_p & PVH_FLAG_IOMMU) + if ((vm_offset_t)pte_p & PVH_FLAG_IOMMU) { goto cache_skip_pve; + } #endif pmap = ptep_get_pmap(pte_p); va = ptep_get_va(pte_p); @@ -9450,18 +9724,19 @@ pmap_update_cache_attributes_locked( tlb_flush_needed = TRUE; #ifdef PVH_FLAG_IOMMU - cache_skip_pve: +cache_skip_pve: #endif pte_p = PT_ENTRY_NULL; - if (pve_p != PV_ENTRY_NULL) + if (pve_p != PV_ENTRY_NULL) { pve_p = PVE_NEXT_PTR(pve_next(pve_p)); - + } } - if (tlb_flush_needed) + if (tlb_flush_needed) { sync_tlb_flush(); + } } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) vm_map_address_t pmap_create_sharedpage( void) @@ -9475,8 +9750,7 @@ pmap_create_sharedpage( kr = pmap_enter(kernel_pmap, _COMM_PAGE_BASE_ADDRESS, atop(pa), VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE); assert(kr == KERN_SUCCESS); - return((vm_map_address_t)phystokv(pa)); - + return (vm_map_address_t)phystokv(pa); } #else static void @@ -9499,14 +9773,14 @@ pmap_update_tt3e( /* Note absence of non-global bit */ #define PMAP_COMM_PAGE_PTE_TEMPLATE (ARM_PTE_TYPE_VALID \ - | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) \ - | ARM_PTE_SH(SH_INNER_MEMORY) | ARM_PTE_NX \ - | ARM_PTE_PNX | ARM_PTE_AP(AP_RORO) | ARM_PTE_AF) + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) \ + | ARM_PTE_SH(SH_INNER_MEMORY) | ARM_PTE_NX \ + | ARM_PTE_PNX | ARM_PTE_AP(AP_RORO) | ARM_PTE_AF) vm_map_address_t pmap_create_sharedpage( - void -) + void + ) { kern_return_t kr; pmap_paddr_t pa = 0; @@ -9562,7 +9836,7 @@ pmap_create_sharedpage( pmap_update_tt3e(sharedpage_pmap, _COMM_PAGE32_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE); /* For manipulation in kernel, go straight to physical page */ - return ((vm_map_address_t)phystokv(pa)); + return (vm_map_address_t)phystokv(pa); } /* @@ -9741,7 +10015,7 @@ static boolean_t pmap_is_64bit( pmap_t pmap) { - return (pmap->is_64bit); + return pmap->is_64bit; } #endif @@ -9751,7 +10025,8 @@ pmap_is_64bit( */ boolean_t pmap_valid_page( - ppnum_t pn) { + ppnum_t pn) +{ return pa_valid(ptoa(pn)); } @@ -9774,7 +10049,7 @@ pmap_is_empty_internal( PMAP_LOCK(pmap); } -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) if (tte_index(pmap, va_end) >= pmap->tte_index_max) { if ((pmap != kernel_pmap) && (not_in_kdp)) { PMAP_UNLOCK(pmap); @@ -9786,16 +10061,17 @@ pmap_is_empty_internal( tte_p = pmap_tte(pmap, block_start); while (block_start < va_end) { block_end = (block_start + ARM_TT_L1_SIZE) & ~(ARM_TT_L1_OFFMASK); - if (block_end > va_end) + if (block_end > va_end) { block_end = va_end; + } if ((*tte_p & ARM_TTE_TYPE_MASK) != 0) { - vm_map_offset_t offset; + vm_map_offset_t offset; ppnum_t phys_page = 0; for (offset = block_start; - offset < block_end; - offset += ARM_PGBYTES) { + offset < block_end; + offset += ARM_PGBYTES) { // This does a pmap_find_phys() lookup but assumes lock is held phys_page = pmap_vtophys(pmap, offset); if (phys_page) { @@ -9818,13 +10094,13 @@ pmap_is_empty_internal( pt_entry_t *pte_p; block_end = (block_start + ARM_TT_L2_SIZE) & ~ARM_TT_L2_OFFMASK; - if (block_end > va_end) + if (block_end > va_end) { block_end = va_end; + } tte_p = pmap_tt2e(pmap, block_start); if ((tte_p != PT_ENTRY_NULL) - && ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE)) { - + && ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE)) { pte_p = (pt_entry_t *) ttetokv(*tte_p); bpte_p = &pte_p[tt3_index(pmap, block_start)]; epte_p = bpte_p + (((block_end - block_start) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); @@ -9837,7 +10113,7 @@ pmap_is_empty_internal( return FALSE; } } - } + } block_start = block_end; } #endif @@ -9858,17 +10134,19 @@ pmap_is_empty( return pmap_is_empty_internal(pmap, va_start, va_end); } -vm_map_offset_t pmap_max_offset( - boolean_t is64, - unsigned int option) +vm_map_offset_t +pmap_max_offset( + boolean_t is64, + unsigned int option) { return (is64) ? pmap_max_64bit_offset(option) : pmap_max_32bit_offset(option); } -vm_map_offset_t pmap_max_64bit_offset( +vm_map_offset_t +pmap_max_64bit_offset( __unused unsigned int option) { - vm_map_offset_t max_offset_ret = 0; + vm_map_offset_t max_offset_ret = 0; #if defined(__arm64__) const vm_map_offset_t min_max_offset = SHARED_REGION_BASE_ARM64 + SHARED_REGION_SIZE_ARM64 + 0x20000000; // end of shared region + 512MB for various purposes @@ -9908,10 +10186,11 @@ vm_map_offset_t pmap_max_64bit_offset( return max_offset_ret; } -vm_map_offset_t pmap_max_32bit_offset( +vm_map_offset_t +pmap_max_32bit_offset( unsigned int option) { - vm_map_offset_t max_offset_ret = 0; + vm_map_offset_t max_offset_ret = 0; if (option == ARM_PMAP_MAX_OFFSET_DEFAULT) { max_offset_ret = arm_pmap_max_offset_default; @@ -9944,16 +10223,19 @@ vm_map_offset_t pmap_max_32bit_offset( extern kern_return_t dtrace_copyio_preflight(addr64_t); extern kern_return_t dtrace_copyio_postflight(addr64_t); -kern_return_t dtrace_copyio_preflight( +kern_return_t +dtrace_copyio_preflight( __unused addr64_t va) { - if (current_map() == kernel_map) + if (current_map() == kernel_map) { return KERN_FAILURE; - else + } else { return KERN_SUCCESS; + } } -kern_return_t dtrace_copyio_postflight( +kern_return_t +dtrace_copyio_postflight( __unused addr64_t va) { return KERN_SUCCESS; @@ -9988,17 +10270,17 @@ pmap_unpin_kernel_pages(vm_offset_t kva __unused, size_t nbytes __unused) -#define PMAP_RESIDENT_INVALID ((mach_vm_size_t)-1) +#define PMAP_RESIDENT_INVALID ((mach_vm_size_t)-1) MARK_AS_PMAP_TEXT static mach_vm_size_t pmap_query_resident_internal( - pmap_t pmap, - vm_map_address_t start, - vm_map_address_t end, - mach_vm_size_t *compressed_bytes_p) + pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + mach_vm_size_t *compressed_bytes_p) { - mach_vm_size_t resident_bytes = 0; - mach_vm_size_t compressed_bytes = 0; + mach_vm_size_t resident_bytes = 0; + mach_vm_size_t compressed_bytes = 0; pt_entry_t *bpte, *epte; pt_entry_t *pte_p; @@ -10011,11 +10293,13 @@ pmap_query_resident_internal( VALIDATE_PMAP(pmap); /* Ensure that this request is valid, and addresses exactly one TTE. */ - if (__improbable((start % ARM_PGBYTES) || (end % ARM_PGBYTES))) + if (__improbable((start % ARM_PGBYTES) || (end % ARM_PGBYTES))) { panic("%s: address range %p, %p not page-aligned", __func__, (void*)start, (void*)end); + } - if (__improbable((end < start) || ((end - start) > (PTE_PGENTRIES * ARM_PGBYTES)))) + if (__improbable((end < start) || ((end - start) > (PTE_PGENTRIES * ARM_PGBYTES)))) { panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end); + } PMAP_LOCK(pmap); tte_p = pmap_tte(pmap, start); @@ -10024,8 +10308,7 @@ pmap_query_resident_internal( return PMAP_RESIDENT_INVALID; } if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) { - -#if (__ARM_VMSA__ == 7) +#if (__ARM_VMSA__ == 7) pte_p = (pt_entry_t *) ttetokv(*tte_p); bpte = &pte_p[ptenum(start)]; epte = bpte + atop(end - start); @@ -10056,14 +10339,14 @@ pmap_query_resident_internal( mach_vm_size_t pmap_query_resident( - pmap_t pmap, - vm_map_address_t start, - vm_map_address_t end, - mach_vm_size_t *compressed_bytes_p) + pmap_t pmap, + vm_map_address_t start, + vm_map_address_t end, + mach_vm_size_t *compressed_bytes_p) { - mach_vm_size_t total_resident_bytes; - mach_vm_size_t compressed_bytes; - vm_map_address_t va; + mach_vm_size_t total_resident_bytes; + mach_vm_size_t compressed_bytes; + vm_map_address_t va; if (pmap == PMAP_NULL) { @@ -10077,8 +10360,8 @@ pmap_query_resident( compressed_bytes = 0; PMAP_TRACE(3, PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(start), - VM_KERNEL_ADDRHIDE(end)); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(start), + VM_KERNEL_ADDRHIDE(end)); va = start; while (va < end) { @@ -10087,11 +10370,13 @@ pmap_query_resident( l = ((va + ARM_TT_TWIG_SIZE) & ~ARM_TT_TWIG_OFFMASK); - if (l > end) + if (l > end) { l = end; + } resident_bytes = pmap_query_resident_internal(pmap, va, l, compressed_bytes_p); - if (resident_bytes == PMAP_RESIDENT_INVALID) + if (resident_bytes == PMAP_RESIDENT_INVALID) { break; + } total_resident_bytes += resident_bytes; @@ -10103,7 +10388,7 @@ pmap_query_resident( } PMAP_TRACE(3, PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END, - total_resident_bytes); + total_resident_bytes); return total_resident_bytes; } @@ -10113,10 +10398,10 @@ static void pmap_check_ledgers( pmap_t pmap) { - ledger_amount_t bal; - int pid; - char *procname; - boolean_t do_panic; + ledger_amount_t bal; + int pid; + char *procname; + boolean_t do_panic; if (pmap->pmap_pid == 0) { /* @@ -10140,40 +10425,40 @@ pmap_check_ledgers( pmap_ledgers_drift.num_pmaps_checked++; -#define LEDGER_CHECK_BALANCE(__LEDGER) \ -MACRO_BEGIN \ - int panic_on_negative = TRUE; \ - ledger_get_balance(pmap->ledger, \ - task_ledgers.__LEDGER, \ - &bal); \ - ledger_get_panic_on_negative(pmap->ledger, \ - task_ledgers.__LEDGER, \ - &panic_on_negative); \ - if (bal != 0) { \ - if (panic_on_negative || \ - (pmap_ledgers_panic && \ - pmap_ledgers_panic_leeway > 0 && \ - (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \ - bal < (-pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \ - do_panic = TRUE; \ - } \ - printf("LEDGER BALANCE proc %d (%s) " \ - "\"%s\" = %lld\n", \ - pid, procname, #__LEDGER, bal); \ - if (bal > 0) { \ - pmap_ledgers_drift.__LEDGER##_over++; \ - pmap_ledgers_drift.__LEDGER##_over_total += bal; \ - if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \ - pmap_ledgers_drift.__LEDGER##_over_max = bal; \ - } \ - } else if (bal < 0) { \ - pmap_ledgers_drift.__LEDGER##_under++; \ - pmap_ledgers_drift.__LEDGER##_under_total += bal; \ - if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \ - pmap_ledgers_drift.__LEDGER##_under_max = bal; \ - } \ - } \ - } \ +#define LEDGER_CHECK_BALANCE(__LEDGER) \ +MACRO_BEGIN \ + int panic_on_negative = TRUE; \ + ledger_get_balance(pmap->ledger, \ + task_ledgers.__LEDGER, \ + &bal); \ + ledger_get_panic_on_negative(pmap->ledger, \ + task_ledgers.__LEDGER, \ + &panic_on_negative); \ + if (bal != 0) { \ + if (panic_on_negative || \ + (pmap_ledgers_panic && \ + pmap_ledgers_panic_leeway > 0 && \ + (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \ + bal < (-pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \ + do_panic = TRUE; \ + } \ + printf("LEDGER BALANCE proc %d (%s) " \ + "\"%s\" = %lld\n", \ + pid, procname, #__LEDGER, bal); \ + if (bal > 0) { \ + pmap_ledgers_drift.__LEDGER##_over++; \ + pmap_ledgers_drift.__LEDGER##_over_total += bal; \ + if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \ + pmap_ledgers_drift.__LEDGER##_over_max = bal; \ + } \ + } else if (bal < 0) { \ + pmap_ledgers_drift.__LEDGER##_under++; \ + pmap_ledgers_drift.__LEDGER##_under_total += bal; \ + if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \ + pmap_ledgers_drift.__LEDGER##_under_max = bal; \ + } \ + } \ + } \ MACRO_END LEDGER_CHECK_BALANCE(phys_footprint); @@ -10195,10 +10480,10 @@ MACRO_END if (do_panic) { if (pmap_ledgers_panic) { panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n", - pmap, pid, procname); + pmap, pid, procname); } else { printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n", - pmap, pid, procname); + pmap, pid, procname); } } @@ -10214,933 +10499,947 @@ MACRO_END } #endif /* MACH_ASSERT */ -void pmap_advise_pagezero_range(__unused pmap_t p, __unused uint64_t a) { +void +pmap_advise_pagezero_range(__unused pmap_t p, __unused uint64_t a) +{ } #if CONFIG_PGTRACE #define PROF_START uint64_t t, nanot;\ - t = mach_absolute_time(); + t = mach_absolute_time(); #define PROF_END absolutetime_to_nanoseconds(mach_absolute_time()-t, &nanot);\ - kprintf("%s: took %llu ns\n", __func__, nanot); + kprintf("%s: took %llu ns\n", __func__, nanot); #define PMAP_PGTRACE_LOCK(p) \ do { \ - *(p) = ml_set_interrupts_enabled(false); \ - if (simple_lock_try(&(pmap_pgtrace.lock))) break; \ - ml_set_interrupts_enabled(*(p)); \ + *(p) = ml_set_interrupts_enabled(false); \ + if (simple_lock_try(&(pmap_pgtrace.lock), LCK_GRP_NULL)) break; \ + ml_set_interrupts_enabled(*(p)); \ } while (true) #define PMAP_PGTRACE_UNLOCK(p) \ do { \ - simple_unlock(&(pmap_pgtrace.lock)); \ - ml_set_interrupts_enabled(*(p)); \ + simple_unlock(&(pmap_pgtrace.lock)); \ + ml_set_interrupts_enabled(*(p)); \ } while (0) #define PGTRACE_WRITE_PTE(pte_p, pte_entry) \ do { \ - *(pte_p) = (pte_entry); \ - FLUSH_PTE(pte_p); \ + *(pte_p) = (pte_entry); \ + FLUSH_PTE(pte_p); \ } while (0) #define PGTRACE_MAX_MAP 16 // maximum supported va to same pa typedef enum { - UNDEFINED, - PA_UNDEFINED, - VA_UNDEFINED, - DEFINED + UNDEFINED, + PA_UNDEFINED, + VA_UNDEFINED, + DEFINED } pmap_pgtrace_page_state_t; typedef struct { - queue_chain_t chain; - - /* - pa - pa - maps - list of va maps to upper pa - map_pool - map pool - map_waste - waste can - state - state - */ - pmap_paddr_t pa; - queue_head_t maps; - queue_head_t map_pool; - queue_head_t map_waste; - pmap_pgtrace_page_state_t state; + queue_chain_t chain; + + /* + * pa - pa + * maps - list of va maps to upper pa + * map_pool - map pool + * map_waste - waste can + * state - state + */ + pmap_paddr_t pa; + queue_head_t maps; + queue_head_t map_pool; + queue_head_t map_waste; + pmap_pgtrace_page_state_t state; } pmap_pgtrace_page_t; static struct { - /* - pages - list of tracing page info - */ - queue_head_t pages; - decl_simple_lock_data(, lock); + /* + * pages - list of tracing page info + */ + queue_head_t pages; + decl_simple_lock_data(, lock); } pmap_pgtrace = {}; -static void pmap_pgtrace_init(void) +static void +pmap_pgtrace_init(void) { - queue_init(&(pmap_pgtrace.pages)); - simple_lock_init(&(pmap_pgtrace.lock), 0); + queue_init(&(pmap_pgtrace.pages)); + simple_lock_init(&(pmap_pgtrace.lock), 0); - boolean_t enabled; + boolean_t enabled; - if (PE_parse_boot_argn("pgtrace", &enabled, sizeof(enabled))) { - pgtrace_enabled = enabled; - } + if (PE_parse_boot_argn("pgtrace", &enabled, sizeof(enabled))) { + pgtrace_enabled = enabled; + } } // find a page with given pa - pmap_pgtrace should be locked -inline static pmap_pgtrace_page_t *pmap_pgtrace_find_page(pmap_paddr_t pa) +inline static pmap_pgtrace_page_t * +pmap_pgtrace_find_page(pmap_paddr_t pa) { - queue_head_t *q = &(pmap_pgtrace.pages); - pmap_pgtrace_page_t *p; + queue_head_t *q = &(pmap_pgtrace.pages); + pmap_pgtrace_page_t *p; - queue_iterate(q, p, pmap_pgtrace_page_t *, chain) { - if (p->state == UNDEFINED) { - continue; - } - if (p->state == PA_UNDEFINED) { - continue; - } - if (p->pa == pa) { - return p; - } - } + queue_iterate(q, p, pmap_pgtrace_page_t *, chain) { + if (p->state == UNDEFINED) { + continue; + } + if (p->state == PA_UNDEFINED) { + continue; + } + if (p->pa == pa) { + return p; + } + } - return NULL; + return NULL; } // enter clone of given pmap, va page and range - pmap should be locked -static bool pmap_pgtrace_enter_clone(pmap_t pmap, vm_map_offset_t va_page, vm_map_offset_t start, vm_map_offset_t end) -{ - bool ints; - queue_head_t *q = &(pmap_pgtrace.pages); - pmap_paddr_t pa_page; - pt_entry_t *ptep, *cptep; - pmap_pgtrace_page_t *p; - bool found = false; - - PMAP_ASSERT_LOCKED(pmap); - assert(va_page == arm_trunc_page(va_page)); - - PMAP_PGTRACE_LOCK(&ints); - - ptep = pmap_pte(pmap, va_page); - - // target pte should exist - if (!ptep || !(*ptep & ARM_PTE_TYPE_VALID)) { - PMAP_PGTRACE_UNLOCK(&ints); - return false; - } - - queue_head_t *mapq; - queue_head_t *mappool; - pmap_pgtrace_map_t *map = NULL; - - pa_page = pte_to_pa(*ptep); - - // find if we have a page info defined for this - queue_iterate(q, p, pmap_pgtrace_page_t *, chain) { - mapq = &(p->maps); - mappool = &(p->map_pool); - - switch (p->state) { - case PA_UNDEFINED: - queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { - if (map->cloned == false && map->pmap == pmap && map->ova == va_page) { - p->pa = pa_page; - map->range.start = start; - map->range.end = end; - found = true; - break; - } - } - break; - - case VA_UNDEFINED: - if (p->pa != pa_page) { - break; - } - queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { - if (map->cloned == false) { - map->pmap = pmap; - map->ova = va_page; - map->range.start = start; - map->range.end = end; - found = true; - break; - } - } - break; - - case DEFINED: - if (p->pa != pa_page) { - break; - } - queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { - if (map->cloned == true && map->pmap == pmap && map->ova == va_page) { - kprintf("%s: skip existing mapping at va=%llx\n", __func__, va_page); - break; - } else if (map->cloned == true && map->pmap == kernel_pmap && map->cva[1] == va_page) { - kprintf("%s: skip clone mapping at va=%llx\n", __func__, va_page); - break; - } else if (map->cloned == false && map->pmap == pmap && map->ova == va_page) { - // range should be already defined as well - found = true; - break; - } - } - break; - - default: - panic("invalid state p->state=%x\n", p->state); - } - - if (found == true) { - break; - } - } - - // do not clone if no page info found - if (found == false) { - PMAP_PGTRACE_UNLOCK(&ints); - return false; - } - - // copy pre, target and post ptes to clone ptes - for (int i = 0; i < 3; i++) { - ptep = pmap_pte(pmap, va_page + (i-1)*ARM_PGBYTES); - cptep = pmap_pte(kernel_pmap, map->cva[i]); - assert(cptep != NULL); - if (ptep == NULL) { - PGTRACE_WRITE_PTE(cptep, (pt_entry_t)NULL); - } else { - PGTRACE_WRITE_PTE(cptep, *ptep); - } - PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i]+ARM_PGBYTES); - } - - // get ptes for original and clone - ptep = pmap_pte(pmap, va_page); - cptep = pmap_pte(kernel_pmap, map->cva[1]); - - // invalidate original pte and mark it as a pgtrace page - PGTRACE_WRITE_PTE(ptep, (*ptep | ARM_PTE_PGTRACE) & ~ARM_PTE_TYPE_VALID); - PMAP_UPDATE_TLBS(pmap, map->ova, map->ova+ARM_PGBYTES); - - map->cloned = true; - p->state = DEFINED; - - kprintf("%s: pa_page=%llx va_page=%llx cva[1]=%llx pmap=%p ptep=%p cptep=%p\n", __func__, pa_page, va_page, map->cva[1], pmap, ptep, cptep); - - PMAP_PGTRACE_UNLOCK(&ints); - - return true; +static bool +pmap_pgtrace_enter_clone(pmap_t pmap, vm_map_offset_t va_page, vm_map_offset_t start, vm_map_offset_t end) +{ + bool ints; + queue_head_t *q = &(pmap_pgtrace.pages); + pmap_paddr_t pa_page; + pt_entry_t *ptep, *cptep; + pmap_pgtrace_page_t *p; + bool found = false; + + PMAP_ASSERT_LOCKED(pmap); + assert(va_page == arm_trunc_page(va_page)); + + PMAP_PGTRACE_LOCK(&ints); + + ptep = pmap_pte(pmap, va_page); + + // target pte should exist + if (!ptep || !(*ptep & ARM_PTE_TYPE_VALID)) { + PMAP_PGTRACE_UNLOCK(&ints); + return false; + } + + queue_head_t *mapq; + queue_head_t *mappool; + pmap_pgtrace_map_t *map = NULL; + + pa_page = pte_to_pa(*ptep); + + // find if we have a page info defined for this + queue_iterate(q, p, pmap_pgtrace_page_t *, chain) { + mapq = &(p->maps); + mappool = &(p->map_pool); + + switch (p->state) { + case PA_UNDEFINED: + queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { + if (map->cloned == false && map->pmap == pmap && map->ova == va_page) { + p->pa = pa_page; + map->range.start = start; + map->range.end = end; + found = true; + break; + } + } + break; + + case VA_UNDEFINED: + if (p->pa != pa_page) { + break; + } + queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { + if (map->cloned == false) { + map->pmap = pmap; + map->ova = va_page; + map->range.start = start; + map->range.end = end; + found = true; + break; + } + } + break; + + case DEFINED: + if (p->pa != pa_page) { + break; + } + queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { + if (map->cloned == true && map->pmap == pmap && map->ova == va_page) { + kprintf("%s: skip existing mapping at va=%llx\n", __func__, va_page); + break; + } else if (map->cloned == true && map->pmap == kernel_pmap && map->cva[1] == va_page) { + kprintf("%s: skip clone mapping at va=%llx\n", __func__, va_page); + break; + } else if (map->cloned == false && map->pmap == pmap && map->ova == va_page) { + // range should be already defined as well + found = true; + break; + } + } + break; + + default: + panic("invalid state p->state=%x\n", p->state); + } + + if (found == true) { + break; + } + } + + // do not clone if no page info found + if (found == false) { + PMAP_PGTRACE_UNLOCK(&ints); + return false; + } + + // copy pre, target and post ptes to clone ptes + for (int i = 0; i < 3; i++) { + ptep = pmap_pte(pmap, va_page + (i - 1) * ARM_PGBYTES); + cptep = pmap_pte(kernel_pmap, map->cva[i]); + assert(cptep != NULL); + if (ptep == NULL) { + PGTRACE_WRITE_PTE(cptep, (pt_entry_t)NULL); + } else { + PGTRACE_WRITE_PTE(cptep, *ptep); + } + PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i] + ARM_PGBYTES); + } + + // get ptes for original and clone + ptep = pmap_pte(pmap, va_page); + cptep = pmap_pte(kernel_pmap, map->cva[1]); + + // invalidate original pte and mark it as a pgtrace page + PGTRACE_WRITE_PTE(ptep, (*ptep | ARM_PTE_PGTRACE) & ~ARM_PTE_TYPE_VALID); + PMAP_UPDATE_TLBS(pmap, map->ova, map->ova + ARM_PGBYTES); + + map->cloned = true; + p->state = DEFINED; + + kprintf("%s: pa_page=%llx va_page=%llx cva[1]=%llx pmap=%p ptep=%p cptep=%p\n", __func__, pa_page, va_page, map->cva[1], pmap, ptep, cptep); + + PMAP_PGTRACE_UNLOCK(&ints); + + return true; } // This function removes trace bit and validate pte if applicable. Pmap must be locked. -static void pmap_pgtrace_remove_clone(pmap_t pmap, pmap_paddr_t pa, vm_map_offset_t va) -{ - bool ints, found = false; - pmap_pgtrace_page_t *p; - pt_entry_t *ptep; - - PMAP_PGTRACE_LOCK(&ints); - - // we must have this page info - p = pmap_pgtrace_find_page(pa); - if (p == NULL) { - goto unlock_exit; - } - - // find matching map - queue_head_t *mapq = &(p->maps); - queue_head_t *mappool = &(p->map_pool); - pmap_pgtrace_map_t *map; - - queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { - if (map->pmap == pmap && map->ova == va) { - found = true; - break; - } - } - - if (!found) { - goto unlock_exit; - } - - if (map->cloned == true) { - // Restore back the pte to original state - ptep = pmap_pte(pmap, map->ova); - assert(ptep); - PGTRACE_WRITE_PTE(ptep, *ptep | ARM_PTE_TYPE_VALID); - PMAP_UPDATE_TLBS(pmap, va, va+ARM_PGBYTES); - - // revert clone pages - for (int i = 0; i < 3; i++) { - ptep = pmap_pte(kernel_pmap, map->cva[i]); - assert(ptep != NULL); - PGTRACE_WRITE_PTE(ptep, map->cva_spte[i]); - PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i]+ARM_PGBYTES); - } - } - - queue_remove(mapq, map, pmap_pgtrace_map_t *, chain); - map->pmap = NULL; - map->ova = (vm_map_offset_t)NULL; - map->cloned = false; - queue_enter_first(mappool, map, pmap_pgtrace_map_t *, chain); - - kprintf("%s: p=%p pa=%llx va=%llx\n", __func__, p, pa, va); +static void +pmap_pgtrace_remove_clone(pmap_t pmap, pmap_paddr_t pa, vm_map_offset_t va) +{ + bool ints, found = false; + pmap_pgtrace_page_t *p; + pt_entry_t *ptep; + + PMAP_PGTRACE_LOCK(&ints); + + // we must have this page info + p = pmap_pgtrace_find_page(pa); + if (p == NULL) { + goto unlock_exit; + } + + // find matching map + queue_head_t *mapq = &(p->maps); + queue_head_t *mappool = &(p->map_pool); + pmap_pgtrace_map_t *map; + + queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { + if (map->pmap == pmap && map->ova == va) { + found = true; + break; + } + } + + if (!found) { + goto unlock_exit; + } + + if (map->cloned == true) { + // Restore back the pte to original state + ptep = pmap_pte(pmap, map->ova); + assert(ptep); + PGTRACE_WRITE_PTE(ptep, *ptep | ARM_PTE_TYPE_VALID); + PMAP_UPDATE_TLBS(pmap, va, va + ARM_PGBYTES); + + // revert clone pages + for (int i = 0; i < 3; i++) { + ptep = pmap_pte(kernel_pmap, map->cva[i]); + assert(ptep != NULL); + PGTRACE_WRITE_PTE(ptep, map->cva_spte[i]); + PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i] + ARM_PGBYTES); + } + } + + queue_remove(mapq, map, pmap_pgtrace_map_t *, chain); + map->pmap = NULL; + map->ova = (vm_map_offset_t)NULL; + map->cloned = false; + queue_enter_first(mappool, map, pmap_pgtrace_map_t *, chain); + + kprintf("%s: p=%p pa=%llx va=%llx\n", __func__, p, pa, va); unlock_exit: - PMAP_PGTRACE_UNLOCK(&ints); + PMAP_PGTRACE_UNLOCK(&ints); } // remove all clones of given pa - pmap must be locked -static void pmap_pgtrace_remove_all_clone(pmap_paddr_t pa) +static void +pmap_pgtrace_remove_all_clone(pmap_paddr_t pa) { - bool ints; - pmap_pgtrace_page_t *p; - pt_entry_t *ptep; + bool ints; + pmap_pgtrace_page_t *p; + pt_entry_t *ptep; - PMAP_PGTRACE_LOCK(&ints); + PMAP_PGTRACE_LOCK(&ints); - // we must have this page info - p = pmap_pgtrace_find_page(pa); - if (p == NULL) { - PMAP_PGTRACE_UNLOCK(&ints); - return; - } + // we must have this page info + p = pmap_pgtrace_find_page(pa); + if (p == NULL) { + PMAP_PGTRACE_UNLOCK(&ints); + return; + } - queue_head_t *mapq = &(p->maps); - queue_head_t *mappool = &(p->map_pool); - queue_head_t *mapwaste = &(p->map_waste); - pmap_pgtrace_map_t *map; + queue_head_t *mapq = &(p->maps); + queue_head_t *mappool = &(p->map_pool); + queue_head_t *mapwaste = &(p->map_waste); + pmap_pgtrace_map_t *map; - // move maps to waste - while (!queue_empty(mapq)) { - queue_remove_first(mapq, map, pmap_pgtrace_map_t *, chain); - queue_enter_first(mapwaste, map, pmap_pgtrace_map_t*, chain); - } + // move maps to waste + while (!queue_empty(mapq)) { + queue_remove_first(mapq, map, pmap_pgtrace_map_t *, chain); + queue_enter_first(mapwaste, map, pmap_pgtrace_map_t*, chain); + } - PMAP_PGTRACE_UNLOCK(&ints); + PMAP_PGTRACE_UNLOCK(&ints); - // sanitize maps in waste - queue_iterate(mapwaste, map, pmap_pgtrace_map_t *, chain) { - if (map->cloned == true) { - PMAP_LOCK(map->pmap); + // sanitize maps in waste + queue_iterate(mapwaste, map, pmap_pgtrace_map_t *, chain) { + if (map->cloned == true) { + PMAP_LOCK(map->pmap); - // restore back original pte - ptep = pmap_pte(map->pmap, map->ova); - assert(ptep); - PGTRACE_WRITE_PTE(ptep, *ptep | ARM_PTE_TYPE_VALID); - PMAP_UPDATE_TLBS(map->pmap, map->ova, map->ova+ARM_PGBYTES); + // restore back original pte + ptep = pmap_pte(map->pmap, map->ova); + assert(ptep); + PGTRACE_WRITE_PTE(ptep, *ptep | ARM_PTE_TYPE_VALID); + PMAP_UPDATE_TLBS(map->pmap, map->ova, map->ova + ARM_PGBYTES); - // revert clone ptes - for (int i = 0; i < 3; i++) { - ptep = pmap_pte(kernel_pmap, map->cva[i]); - assert(ptep != NULL); - PGTRACE_WRITE_PTE(ptep, map->cva_spte[i]); - PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i]+ARM_PGBYTES); - } + // revert clone ptes + for (int i = 0; i < 3; i++) { + ptep = pmap_pte(kernel_pmap, map->cva[i]); + assert(ptep != NULL); + PGTRACE_WRITE_PTE(ptep, map->cva_spte[i]); + PMAP_UPDATE_TLBS(kernel_pmap, map->cva[i], map->cva[i] + ARM_PGBYTES); + } - PMAP_UNLOCK(map->pmap); - } + PMAP_UNLOCK(map->pmap); + } - map->pmap = NULL; - map->ova = (vm_map_offset_t)NULL; - map->cloned = false; - } + map->pmap = NULL; + map->ova = (vm_map_offset_t)NULL; + map->cloned = false; + } - PMAP_PGTRACE_LOCK(&ints); + PMAP_PGTRACE_LOCK(&ints); - // recycle maps back to map_pool - while (!queue_empty(mapwaste)) { - queue_remove_first(mapwaste, map, pmap_pgtrace_map_t *, chain); - queue_enter_first(mappool, map, pmap_pgtrace_map_t*, chain); - } + // recycle maps back to map_pool + while (!queue_empty(mapwaste)) { + queue_remove_first(mapwaste, map, pmap_pgtrace_map_t *, chain); + queue_enter_first(mappool, map, pmap_pgtrace_map_t*, chain); + } - PMAP_PGTRACE_UNLOCK(&ints); + PMAP_PGTRACE_UNLOCK(&ints); } -inline static void pmap_pgtrace_get_search_space(pmap_t pmap, vm_map_offset_t *startp, vm_map_offset_t *endp) +inline static void +pmap_pgtrace_get_search_space(pmap_t pmap, vm_map_offset_t *startp, vm_map_offset_t *endp) { - uint64_t tsz; - vm_map_offset_t end; + uint64_t tsz; + vm_map_offset_t end; - if (pmap == kernel_pmap) { - tsz = (get_tcr() >> TCR_T1SZ_SHIFT) & TCR_TSZ_MASK; - *startp = MAX(VM_MIN_KERNEL_ADDRESS, (UINT64_MAX >> (64-tsz)) << (64-tsz)); - *endp = VM_MAX_KERNEL_ADDRESS; - } else { - tsz = (get_tcr() >> TCR_T0SZ_SHIFT) & TCR_TSZ_MASK; - if (tsz == 64) { - end = 0; - } else { - end = ((uint64_t)1 << (64-tsz)) - 1; - } + if (pmap == kernel_pmap) { + tsz = (get_tcr() >> TCR_T1SZ_SHIFT) & TCR_TSZ_MASK; + *startp = MAX(VM_MIN_KERNEL_ADDRESS, (UINT64_MAX >> (64 - tsz)) << (64 - tsz)); + *endp = VM_MAX_KERNEL_ADDRESS; + } else { + tsz = (get_tcr() >> TCR_T0SZ_SHIFT) & TCR_TSZ_MASK; + if (tsz == 64) { + end = 0; + } else { + end = ((uint64_t)1 << (64 - tsz)) - 1; + } - *startp = 0; - *endp = end; - } + *startp = 0; + *endp = end; + } - assert(*endp > *startp); + assert(*endp > *startp); - return; + return; } // has pa mapped in given pmap? then clone it -static uint64_t pmap_pgtrace_clone_from_pa(pmap_t pmap, pmap_paddr_t pa, vm_map_offset_t start_offset, vm_map_offset_t end_offset) { - uint64_t ret = 0; - vm_map_offset_t min, max; - vm_map_offset_t cur_page, end_page; - pt_entry_t *ptep; - tt_entry_t *ttep; - tt_entry_t tte; - - pmap_pgtrace_get_search_space(pmap, &min, &max); - - cur_page = arm_trunc_page(min); - end_page = arm_trunc_page(max); - while (cur_page <= end_page) { - vm_map_offset_t add = 0; - - PMAP_LOCK(pmap); - - // skip uninterested space - if (pmap == kernel_pmap && - ((vm_kernel_base <= cur_page && cur_page < vm_kernel_top) || - (vm_kext_base <= cur_page && cur_page < vm_kext_top))) { - add = ARM_PGBYTES; - goto unlock_continue; - } +static uint64_t +pmap_pgtrace_clone_from_pa(pmap_t pmap, pmap_paddr_t pa, vm_map_offset_t start_offset, vm_map_offset_t end_offset) +{ + uint64_t ret = 0; + vm_map_offset_t min, max; + vm_map_offset_t cur_page, end_page; + pt_entry_t *ptep; + tt_entry_t *ttep; + tt_entry_t tte; + + pmap_pgtrace_get_search_space(pmap, &min, &max); + + cur_page = arm_trunc_page(min); + end_page = arm_trunc_page(max); + while (cur_page <= end_page) { + vm_map_offset_t add = 0; + + PMAP_LOCK(pmap); + + // skip uninterested space + if (pmap == kernel_pmap && + ((vm_kernel_base <= cur_page && cur_page < vm_kernel_top) || + (vm_kext_base <= cur_page && cur_page < vm_kext_top))) { + add = ARM_PGBYTES; + goto unlock_continue; + } #if __ARM64_TWO_LEVEL_PMAP__ - // check whether we can skip l2 - ttep = pmap_tt2e(pmap, cur_page); - assert(ttep); - tte = *ttep; + // check whether we can skip l2 + ttep = pmap_tt2e(pmap, cur_page); + assert(ttep); + tte = *ttep; #else - // check whether we can skip l1 - ttep = pmap_tt1e(pmap, cur_page); - assert(ttep); - tte = *ttep; - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { - add = ARM_TT_L1_SIZE; - goto unlock_continue; - } - - // how about l2 - tte = ((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt2_index(pmap, cur_page)]; -#endif - if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { - add = ARM_TT_L2_SIZE; - goto unlock_continue; - } - - // ptep finally - ptep = &(((pt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt3_index(pmap, cur_page)]); - if (ptep == PT_ENTRY_NULL) { - add = ARM_TT_L3_SIZE; - goto unlock_continue; - } - - if (arm_trunc_page(pa) == pte_to_pa(*ptep)) { - if (pmap_pgtrace_enter_clone(pmap, cur_page, start_offset, end_offset) == true) { - ret++; - } - } - - add = ARM_PGBYTES; + // check whether we can skip l1 + ttep = pmap_tt1e(pmap, cur_page); + assert(ttep); + tte = *ttep; + if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { + add = ARM_TT_L1_SIZE; + goto unlock_continue; + } + + // how about l2 + tte = ((tt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt2_index(pmap, cur_page)]; +#endif + if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) { + add = ARM_TT_L2_SIZE; + goto unlock_continue; + } + + // ptep finally + ptep = &(((pt_entry_t*) phystokv(tte & ARM_TTE_TABLE_MASK))[tt3_index(pmap, cur_page)]); + if (ptep == PT_ENTRY_NULL) { + add = ARM_TT_L3_SIZE; + goto unlock_continue; + } + + if (arm_trunc_page(pa) == pte_to_pa(*ptep)) { + if (pmap_pgtrace_enter_clone(pmap, cur_page, start_offset, end_offset) == true) { + ret++; + } + } + + add = ARM_PGBYTES; unlock_continue: - PMAP_UNLOCK(pmap); + PMAP_UNLOCK(pmap); - //overflow - if (cur_page + add < cur_page) { - break; - } + //overflow + if (cur_page + add < cur_page) { + break; + } - cur_page += add; - } + cur_page += add; + } - return ret; + return ret; } // search pv table and clone vas of given pa -static uint64_t pmap_pgtrace_clone_from_pvtable(pmap_paddr_t pa, vm_map_offset_t start_offset, vm_map_offset_t end_offset) +static uint64_t +pmap_pgtrace_clone_from_pvtable(pmap_paddr_t pa, vm_map_offset_t start_offset, vm_map_offset_t end_offset) { - uint64_t ret = 0; - unsigned long pai; - pv_entry_t **pvh; - pt_entry_t *ptep; - pmap_t pmap; - - typedef struct { - queue_chain_t chain; - pmap_t pmap; - vm_map_offset_t va; - } pmap_va_t; + uint64_t ret = 0; + unsigned long pai; + pv_entry_t **pvh; + pt_entry_t *ptep; + pmap_t pmap; - queue_head_t pmapvaq; - pmap_va_t *pmapva; + typedef struct { + queue_chain_t chain; + pmap_t pmap; + vm_map_offset_t va; + } pmap_va_t; - queue_init(&pmapvaq); + queue_head_t pmapvaq; + pmap_va_t *pmapva; - pai = pa_index(pa); - LOCK_PVH(pai); - pvh = pai_to_pvh(pai); + queue_init(&pmapvaq); - // collect pmap/va pair from pvh - if (pvh_test_type(pvh, PVH_TYPE_PTEP)) { - ptep = pvh_ptep(pvh); - pmap = ptep_get_pmap(ptep); + pai = pa_index(pa); + LOCK_PVH(pai); + pvh = pai_to_pvh(pai); - pmapva = (pmap_va_t *)kalloc(sizeof(pmap_va_t)); - pmapva->pmap = pmap; - pmapva->va = ptep_get_va(ptep); + // collect pmap/va pair from pvh + if (pvh_test_type(pvh, PVH_TYPE_PTEP)) { + ptep = pvh_ptep(pvh); + pmap = ptep_get_pmap(ptep); - queue_enter_first(&pmapvaq, pmapva, pmap_va_t *, chain); + pmapva = (pmap_va_t *)kalloc(sizeof(pmap_va_t)); + pmapva->pmap = pmap; + pmapva->va = ptep_get_va(ptep); - } else if (pvh_test_type(pvh, PVH_TYPE_PVEP)) { - pv_entry_t *pvep; + queue_enter_first(&pmapvaq, pmapva, pmap_va_t *, chain); + } else if (pvh_test_type(pvh, PVH_TYPE_PVEP)) { + pv_entry_t *pvep; - pvep = pvh_list(pvh); - while (pvep) { - ptep = pve_get_ptep(pvep); - pmap = ptep_get_pmap(ptep); + pvep = pvh_list(pvh); + while (pvep) { + ptep = pve_get_ptep(pvep); + pmap = ptep_get_pmap(ptep); - pmapva = (pmap_va_t *)kalloc(sizeof(pmap_va_t)); - pmapva->pmap = pmap; - pmapva->va = ptep_get_va(ptep); + pmapva = (pmap_va_t *)kalloc(sizeof(pmap_va_t)); + pmapva->pmap = pmap; + pmapva->va = ptep_get_va(ptep); - queue_enter_first(&pmapvaq, pmapva, pmap_va_t *, chain); + queue_enter_first(&pmapvaq, pmapva, pmap_va_t *, chain); - pvep = PVE_NEXT_PTR(pve_next(pvep)); - } - } + pvep = PVE_NEXT_PTR(pve_next(pvep)); + } + } - UNLOCK_PVH(pai); + UNLOCK_PVH(pai); - // clone them while making sure mapping still exists - queue_iterate(&pmapvaq, pmapva, pmap_va_t *, chain) { - PMAP_LOCK(pmapva->pmap); - ptep = pmap_pte(pmapva->pmap, pmapva->va); - if (pte_to_pa(*ptep) == pa) { - if (pmap_pgtrace_enter_clone(pmapva->pmap, pmapva->va, start_offset, end_offset) == true) { - ret++; - } - } - PMAP_UNLOCK(pmapva->pmap); + // clone them while making sure mapping still exists + queue_iterate(&pmapvaq, pmapva, pmap_va_t *, chain) { + PMAP_LOCK(pmapva->pmap); + ptep = pmap_pte(pmapva->pmap, pmapva->va); + if (pte_to_pa(*ptep) == pa) { + if (pmap_pgtrace_enter_clone(pmapva->pmap, pmapva->va, start_offset, end_offset) == true) { + ret++; + } + } + PMAP_UNLOCK(pmapva->pmap); - kfree(pmapva, sizeof(pmap_va_t)); - } + kfree(pmapva, sizeof(pmap_va_t)); + } - return ret; + return ret; } // allocate a page info -static pmap_pgtrace_page_t *pmap_pgtrace_alloc_page(void) -{ - pmap_pgtrace_page_t *p; - queue_head_t *mapq; - queue_head_t *mappool; - queue_head_t *mapwaste; - pmap_pgtrace_map_t *map; - - p = kalloc(sizeof(pmap_pgtrace_page_t)); - assert(p); - - p->state = UNDEFINED; - - mapq = &(p->maps); - mappool = &(p->map_pool); - mapwaste = &(p->map_waste); - queue_init(mapq); - queue_init(mappool); - queue_init(mapwaste); - - for (int i = 0; i < PGTRACE_MAX_MAP; i++) { - vm_map_offset_t newcva; - pt_entry_t *cptep; - kern_return_t kr; - vm_map_entry_t entry; - - // get a clone va - vm_object_reference(kernel_object); - kr = vm_map_find_space(kernel_map, &newcva, vm_map_round_page(3*ARM_PGBYTES, PAGE_MASK), 0, 0, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_DIAG, &entry); - if (kr != KERN_SUCCESS) { - panic("%s VM couldn't find any space kr=%d\n", __func__, kr); - } - VME_OBJECT_SET(entry, kernel_object); - VME_OFFSET_SET(entry, newcva); - vm_map_unlock(kernel_map); - - // fill default clone page info and add to pool - map = kalloc(sizeof(pmap_pgtrace_map_t)); - for (int j = 0; j < 3; j ++) { - vm_map_offset_t addr = newcva + j * ARM_PGBYTES; - - // pre-expand pmap while preemption enabled - kr = pmap_expand(kernel_pmap, addr, 0, PMAP_TT_MAX_LEVEL); - if (kr != KERN_SUCCESS) { - panic("%s: pmap_expand(kernel_pmap, addr=%llx) returns kr=%d\n", __func__, addr, kr); - } - - cptep = pmap_pte(kernel_pmap, addr); - assert(cptep != NULL); - - map->cva[j] = addr; - map->cva_spte[j] = *cptep; - } - map->range.start = map->range.end = 0; - map->cloned = false; - queue_enter_first(mappool, map, pmap_pgtrace_map_t *, chain); - } - - return p; +static pmap_pgtrace_page_t * +pmap_pgtrace_alloc_page(void) +{ + pmap_pgtrace_page_t *p; + queue_head_t *mapq; + queue_head_t *mappool; + queue_head_t *mapwaste; + pmap_pgtrace_map_t *map; + + p = kalloc(sizeof(pmap_pgtrace_page_t)); + assert(p); + + p->state = UNDEFINED; + + mapq = &(p->maps); + mappool = &(p->map_pool); + mapwaste = &(p->map_waste); + queue_init(mapq); + queue_init(mappool); + queue_init(mapwaste); + + for (int i = 0; i < PGTRACE_MAX_MAP; i++) { + vm_map_offset_t newcva; + pt_entry_t *cptep; + kern_return_t kr; + vm_map_entry_t entry; + + // get a clone va + vm_object_reference(kernel_object); + kr = vm_map_find_space(kernel_map, &newcva, vm_map_round_page(3 * ARM_PGBYTES, PAGE_MASK), 0, 0, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_DIAG, &entry); + if (kr != KERN_SUCCESS) { + panic("%s VM couldn't find any space kr=%d\n", __func__, kr); + } + VME_OBJECT_SET(entry, kernel_object); + VME_OFFSET_SET(entry, newcva); + vm_map_unlock(kernel_map); + + // fill default clone page info and add to pool + map = kalloc(sizeof(pmap_pgtrace_map_t)); + for (int j = 0; j < 3; j++) { + vm_map_offset_t addr = newcva + j * ARM_PGBYTES; + + // pre-expand pmap while preemption enabled + kr = pmap_expand(kernel_pmap, addr, 0, PMAP_TT_MAX_LEVEL); + if (kr != KERN_SUCCESS) { + panic("%s: pmap_expand(kernel_pmap, addr=%llx) returns kr=%d\n", __func__, addr, kr); + } + + cptep = pmap_pte(kernel_pmap, addr); + assert(cptep != NULL); + + map->cva[j] = addr; + map->cva_spte[j] = *cptep; + } + map->range.start = map->range.end = 0; + map->cloned = false; + queue_enter_first(mappool, map, pmap_pgtrace_map_t *, chain); + } + + return p; } // free a page info -static void pmap_pgtrace_free_page(pmap_pgtrace_page_t *p) +static void +pmap_pgtrace_free_page(pmap_pgtrace_page_t *p) { - queue_head_t *mapq; - queue_head_t *mappool; - queue_head_t *mapwaste; - pmap_pgtrace_map_t *map; + queue_head_t *mapq; + queue_head_t *mappool; + queue_head_t *mapwaste; + pmap_pgtrace_map_t *map; - assert(p); + assert(p); - mapq = &(p->maps); - mappool = &(p->map_pool); - mapwaste = &(p->map_waste); + mapq = &(p->maps); + mappool = &(p->map_pool); + mapwaste = &(p->map_waste); - while (!queue_empty(mapq)) { - queue_remove_first(mapq, map, pmap_pgtrace_map_t *, chain); - kfree(map, sizeof(pmap_pgtrace_map_t)); - } + while (!queue_empty(mapq)) { + queue_remove_first(mapq, map, pmap_pgtrace_map_t *, chain); + kfree(map, sizeof(pmap_pgtrace_map_t)); + } - while (!queue_empty(mappool)) { - queue_remove_first(mappool, map, pmap_pgtrace_map_t *, chain); - kfree(map, sizeof(pmap_pgtrace_map_t)); - } + while (!queue_empty(mappool)) { + queue_remove_first(mappool, map, pmap_pgtrace_map_t *, chain); + kfree(map, sizeof(pmap_pgtrace_map_t)); + } - while (!queue_empty(mapwaste)) { - queue_remove_first(mapwaste, map, pmap_pgtrace_map_t *, chain); - kfree(map, sizeof(pmap_pgtrace_map_t)); - } + while (!queue_empty(mapwaste)) { + queue_remove_first(mapwaste, map, pmap_pgtrace_map_t *, chain); + kfree(map, sizeof(pmap_pgtrace_map_t)); + } - kfree(p, sizeof(pmap_pgtrace_page_t)); + kfree(p, sizeof(pmap_pgtrace_page_t)); } // construct page infos with the given address range -int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end) -{ - int ret = 0; - pt_entry_t *ptep; - queue_head_t *q = &(pmap_pgtrace.pages); - bool ints; - vm_map_offset_t cur_page, end_page; - - if (start > end) { - kprintf("%s: invalid start=%llx > end=%llx\n", __func__, start, end); - return -1; - } - - PROF_START - - // add each page in given range - cur_page = arm_trunc_page(start); - end_page = arm_trunc_page(end); - while (cur_page <= end_page) { - pmap_paddr_t pa_page = 0; - uint64_t num_cloned = 0; - pmap_pgtrace_page_t *p = NULL, *newp; - bool free_newp = true; - pmap_pgtrace_page_state_t state; - - // do all allocations outside of spinlocks - newp = pmap_pgtrace_alloc_page(); - - // keep lock orders in pmap, kernel_pmap and pgtrace lock - if (pmap != NULL) { - PMAP_LOCK(pmap); - } - if (pmap != kernel_pmap) { - PMAP_LOCK(kernel_pmap); - } - - // addresses are physical if pmap is null - if (pmap == NULL) { - ptep = NULL; - pa_page = cur_page; - state = VA_UNDEFINED; - } else { - ptep = pmap_pte(pmap, cur_page); - if (ptep != NULL) { - pa_page = pte_to_pa(*ptep); - state = DEFINED; - } else { - state = PA_UNDEFINED; - } - } - - // search if we have a page info already - PMAP_PGTRACE_LOCK(&ints); - if (state != PA_UNDEFINED) { - p = pmap_pgtrace_find_page(pa_page); - } - - // add pre-allocated page info if nothing found - if (p == NULL) { - queue_enter_first(q, newp, pmap_pgtrace_page_t *, chain); - p = newp; - free_newp = false; - } - - // now p points what we want - p->state = state; - - queue_head_t *mapq = &(p->maps); - queue_head_t *mappool = &(p->map_pool); - pmap_pgtrace_map_t *map; - vm_map_offset_t start_offset, end_offset; - - // calculate trace offsets in the page - if (cur_page > start) { - start_offset = 0; - } else { - start_offset = start-cur_page; - } - if (cur_page == end_page) { - end_offset = end-end_page; - } else { - end_offset = ARM_PGBYTES-1; - } - - kprintf("%s: pmap=%p cur_page=%llx ptep=%p state=%d start_offset=%llx end_offset=%llx\n", __func__, pmap, cur_page, ptep, state, start_offset, end_offset); - - // fill map info - assert(!queue_empty(mappool)); - queue_remove_first(mappool, map, pmap_pgtrace_map_t *, chain); - if (p->state == PA_UNDEFINED) { - map->pmap = pmap; - map->ova = cur_page; - map->range.start = start_offset; - map->range.end = end_offset; - } else if (p->state == VA_UNDEFINED) { - p->pa = pa_page; - map->range.start = start_offset; - map->range.end = end_offset; - } else if (p->state == DEFINED) { - p->pa = pa_page; - map->pmap = pmap; - map->ova = cur_page; - map->range.start = start_offset; - map->range.end = end_offset; - } else { - panic("invalid p->state=%d\n", p->state); - } - - // not cloned yet - map->cloned = false; - queue_enter(mapq, map, pmap_pgtrace_map_t *, chain); - - // unlock locks - PMAP_PGTRACE_UNLOCK(&ints); - if (pmap != kernel_pmap) { - PMAP_UNLOCK(kernel_pmap); - } - if (pmap != NULL) { - PMAP_UNLOCK(pmap); - } - - // now clone it - if (pa_valid(pa_page)) { - num_cloned = pmap_pgtrace_clone_from_pvtable(pa_page, start_offset, end_offset); - } - if (pmap == NULL) { - num_cloned += pmap_pgtrace_clone_from_pa(kernel_pmap, pa_page, start_offset, end_offset); - } else { - num_cloned += pmap_pgtrace_clone_from_pa(pmap, pa_page, start_offset, end_offset); - } - - // free pre-allocations if we didn't add it to the q - if (free_newp) { - pmap_pgtrace_free_page(newp); - } - - if (num_cloned == 0) { - kprintf("%s: no mapping found for pa_page=%llx but will be added when a page entered\n", __func__, pa_page); - } - - ret += num_cloned; - - // overflow - if (cur_page + ARM_PGBYTES < cur_page) { - break; - } else { - cur_page += ARM_PGBYTES; - } - } - - PROF_END - - return ret; +int +pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end) +{ + int ret = 0; + pt_entry_t *ptep; + queue_head_t *q = &(pmap_pgtrace.pages); + bool ints; + vm_map_offset_t cur_page, end_page; + + if (start > end) { + kprintf("%s: invalid start=%llx > end=%llx\n", __func__, start, end); + return -1; + } + + PROF_START + + // add each page in given range + cur_page = arm_trunc_page(start); + end_page = arm_trunc_page(end); + while (cur_page <= end_page) { + pmap_paddr_t pa_page = 0; + uint64_t num_cloned = 0; + pmap_pgtrace_page_t *p = NULL, *newp; + bool free_newp = true; + pmap_pgtrace_page_state_t state; + + // do all allocations outside of spinlocks + newp = pmap_pgtrace_alloc_page(); + + // keep lock orders in pmap, kernel_pmap and pgtrace lock + if (pmap != NULL) { + PMAP_LOCK(pmap); + } + if (pmap != kernel_pmap) { + PMAP_LOCK(kernel_pmap); + } + + // addresses are physical if pmap is null + if (pmap == NULL) { + ptep = NULL; + pa_page = cur_page; + state = VA_UNDEFINED; + } else { + ptep = pmap_pte(pmap, cur_page); + if (ptep != NULL) { + pa_page = pte_to_pa(*ptep); + state = DEFINED; + } else { + state = PA_UNDEFINED; + } + } + + // search if we have a page info already + PMAP_PGTRACE_LOCK(&ints); + if (state != PA_UNDEFINED) { + p = pmap_pgtrace_find_page(pa_page); + } + + // add pre-allocated page info if nothing found + if (p == NULL) { + queue_enter_first(q, newp, pmap_pgtrace_page_t *, chain); + p = newp; + free_newp = false; + } + + // now p points what we want + p->state = state; + + queue_head_t *mapq = &(p->maps); + queue_head_t *mappool = &(p->map_pool); + pmap_pgtrace_map_t *map; + vm_map_offset_t start_offset, end_offset; + + // calculate trace offsets in the page + if (cur_page > start) { + start_offset = 0; + } else { + start_offset = start - cur_page; + } + if (cur_page == end_page) { + end_offset = end - end_page; + } else { + end_offset = ARM_PGBYTES - 1; + } + + kprintf("%s: pmap=%p cur_page=%llx ptep=%p state=%d start_offset=%llx end_offset=%llx\n", __func__, pmap, cur_page, ptep, state, start_offset, end_offset); + + // fill map info + assert(!queue_empty(mappool)); + queue_remove_first(mappool, map, pmap_pgtrace_map_t *, chain); + if (p->state == PA_UNDEFINED) { + map->pmap = pmap; + map->ova = cur_page; + map->range.start = start_offset; + map->range.end = end_offset; + } else if (p->state == VA_UNDEFINED) { + p->pa = pa_page; + map->range.start = start_offset; + map->range.end = end_offset; + } else if (p->state == DEFINED) { + p->pa = pa_page; + map->pmap = pmap; + map->ova = cur_page; + map->range.start = start_offset; + map->range.end = end_offset; + } else { + panic("invalid p->state=%d\n", p->state); + } + + // not cloned yet + map->cloned = false; + queue_enter(mapq, map, pmap_pgtrace_map_t *, chain); + + // unlock locks + PMAP_PGTRACE_UNLOCK(&ints); + if (pmap != kernel_pmap) { + PMAP_UNLOCK(kernel_pmap); + } + if (pmap != NULL) { + PMAP_UNLOCK(pmap); + } + + // now clone it + if (pa_valid(pa_page)) { + num_cloned = pmap_pgtrace_clone_from_pvtable(pa_page, start_offset, end_offset); + } + if (pmap == NULL) { + num_cloned += pmap_pgtrace_clone_from_pa(kernel_pmap, pa_page, start_offset, end_offset); + } else { + num_cloned += pmap_pgtrace_clone_from_pa(pmap, pa_page, start_offset, end_offset); + } + + // free pre-allocations if we didn't add it to the q + if (free_newp) { + pmap_pgtrace_free_page(newp); + } + + if (num_cloned == 0) { + kprintf("%s: no mapping found for pa_page=%llx but will be added when a page entered\n", __func__, pa_page); + } + + ret += num_cloned; + + // overflow + if (cur_page + ARM_PGBYTES < cur_page) { + break; + } else { + cur_page += ARM_PGBYTES; + } + } + + PROF_END + + return ret; } // delete page infos for given address range -int pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end) -{ - int ret = 0; - bool ints; - queue_head_t *q = &(pmap_pgtrace.pages); - pmap_pgtrace_page_t *p; - vm_map_offset_t cur_page, end_page; - - kprintf("%s start=%llx end=%llx\n", __func__, start, end); - - PROF_START - - pt_entry_t *ptep; - pmap_paddr_t pa_page; - - // remove page info from start to end - cur_page = arm_trunc_page(start); - end_page = arm_trunc_page(end); - while (cur_page <= end_page) { - p = NULL; - - if (pmap == NULL) { - pa_page = cur_page; - } else { - PMAP_LOCK(pmap); - ptep = pmap_pte(pmap, cur_page); - if (ptep == NULL) { - PMAP_UNLOCK(pmap); - goto cont; - } - pa_page = pte_to_pa(*ptep); - PMAP_UNLOCK(pmap); - } - - // remove all clones and validate - pmap_pgtrace_remove_all_clone(pa_page); - - // find page info and delete - PMAP_PGTRACE_LOCK(&ints); - p = pmap_pgtrace_find_page(pa_page); - if (p != NULL) { - queue_remove(q, p, pmap_pgtrace_page_t *, chain); - ret++; - } - PMAP_PGTRACE_UNLOCK(&ints); - - // free outside of locks - if (p != NULL) { - pmap_pgtrace_free_page(p); - } +int +pmap_pgtrace_delete_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end) +{ + int ret = 0; + bool ints; + queue_head_t *q = &(pmap_pgtrace.pages); + pmap_pgtrace_page_t *p; + vm_map_offset_t cur_page, end_page; + + kprintf("%s start=%llx end=%llx\n", __func__, start, end); + + PROF_START + + pt_entry_t *ptep; + pmap_paddr_t pa_page; + + // remove page info from start to end + cur_page = arm_trunc_page(start); + end_page = arm_trunc_page(end); + while (cur_page <= end_page) { + p = NULL; + + if (pmap == NULL) { + pa_page = cur_page; + } else { + PMAP_LOCK(pmap); + ptep = pmap_pte(pmap, cur_page); + if (ptep == NULL) { + PMAP_UNLOCK(pmap); + goto cont; + } + pa_page = pte_to_pa(*ptep); + PMAP_UNLOCK(pmap); + } + + // remove all clones and validate + pmap_pgtrace_remove_all_clone(pa_page); + + // find page info and delete + PMAP_PGTRACE_LOCK(&ints); + p = pmap_pgtrace_find_page(pa_page); + if (p != NULL) { + queue_remove(q, p, pmap_pgtrace_page_t *, chain); + ret++; + } + PMAP_PGTRACE_UNLOCK(&ints); + + // free outside of locks + if (p != NULL) { + pmap_pgtrace_free_page(p); + } cont: - // overflow - if (cur_page + ARM_PGBYTES < cur_page) { - break; - } else { - cur_page += ARM_PGBYTES; - } - } + // overflow + if (cur_page + ARM_PGBYTES < cur_page) { + break; + } else { + cur_page += ARM_PGBYTES; + } + } - PROF_END + PROF_END - return ret; + return ret; } -kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss) +kern_return_t +pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_state_t *ss) { - pt_entry_t *ptep; - pgtrace_run_result_t res; - pmap_pgtrace_page_t *p; - bool ints, found = false; - pmap_paddr_t pa; - - // Quick check if we are interested - ptep = pmap_pte(pmap, va); - if (!ptep || !(*ptep & ARM_PTE_PGTRACE)) { - return KERN_FAILURE; - } + pt_entry_t *ptep; + pgtrace_run_result_t res; + pmap_pgtrace_page_t *p; + bool ints, found = false; + pmap_paddr_t pa; - PMAP_PGTRACE_LOCK(&ints); + // Quick check if we are interested + ptep = pmap_pte(pmap, va); + if (!ptep || !(*ptep & ARM_PTE_PGTRACE)) { + return KERN_FAILURE; + } - // Check again since access is serialized - ptep = pmap_pte(pmap, va); - if (!ptep || !(*ptep & ARM_PTE_PGTRACE)) { - PMAP_PGTRACE_UNLOCK(&ints); - return KERN_FAILURE; + PMAP_PGTRACE_LOCK(&ints); - } else if ((*ptep & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE_VALID) { - // Somehow this cpu's tlb has not updated - kprintf("%s Somehow this cpu's tlb has not updated?\n", __func__); - PMAP_UPDATE_TLBS(pmap, va, va+ARM_PGBYTES); + // Check again since access is serialized + ptep = pmap_pte(pmap, va); + if (!ptep || !(*ptep & ARM_PTE_PGTRACE)) { + PMAP_PGTRACE_UNLOCK(&ints); + return KERN_FAILURE; + } else if ((*ptep & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE_VALID) { + // Somehow this cpu's tlb has not updated + kprintf("%s Somehow this cpu's tlb has not updated?\n", __func__); + PMAP_UPDATE_TLBS(pmap, va, va + ARM_PGBYTES); - PMAP_PGTRACE_UNLOCK(&ints); - return KERN_SUCCESS; - } + PMAP_PGTRACE_UNLOCK(&ints); + return KERN_SUCCESS; + } - // Find if this pa is what we are tracing - pa = pte_to_pa(*ptep); + // Find if this pa is what we are tracing + pa = pte_to_pa(*ptep); - p = pmap_pgtrace_find_page(arm_trunc_page(pa)); - if (p == NULL) { - panic("%s Can't find va=%llx pa=%llx from tracing pages\n", __func__, va, pa); - } + p = pmap_pgtrace_find_page(arm_trunc_page(pa)); + if (p == NULL) { + panic("%s Can't find va=%llx pa=%llx from tracing pages\n", __func__, va, pa); + } - // find if pmap and va are also matching - queue_head_t *mapq = &(p->maps); - queue_head_t *mapwaste = &(p->map_waste); - pmap_pgtrace_map_t *map; + // find if pmap and va are also matching + queue_head_t *mapq = &(p->maps); + queue_head_t *mapwaste = &(p->map_waste); + pmap_pgtrace_map_t *map; - queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { - if (map->pmap == pmap && map->ova == arm_trunc_page(va)) { - found = true; - break; - } - } + queue_iterate(mapq, map, pmap_pgtrace_map_t *, chain) { + if (map->pmap == pmap && map->ova == arm_trunc_page(va)) { + found = true; + break; + } + } - // if not found, search map waste as they are still valid - if (!found) { - queue_iterate(mapwaste, map, pmap_pgtrace_map_t *, chain) { - if (map->pmap == pmap && map->ova == arm_trunc_page(va)) { - found = true; - break; - } - } - } + // if not found, search map waste as they are still valid + if (!found) { + queue_iterate(mapwaste, map, pmap_pgtrace_map_t *, chain) { + if (map->pmap == pmap && map->ova == arm_trunc_page(va)) { + found = true; + break; + } + } + } - if (!found) { - panic("%s Can't find va=%llx pa=%llx from tracing pages\n", __func__, va, pa); - } + if (!found) { + panic("%s Can't find va=%llx pa=%llx from tracing pages\n", __func__, va, pa); + } - // Decode and run it on the clone map - bzero(&res, sizeof(res)); - pgtrace_decode_and_run(*(uint32_t *)get_saved_state_pc(ss), // instruction - va, map->cva, // fault va and clone page vas - ss, &res); + // Decode and run it on the clone map + bzero(&res, sizeof(res)); + pgtrace_decode_and_run(*(uint32_t *)get_saved_state_pc(ss), // instruction + va, map->cva, // fault va and clone page vas + ss, &res); - // write a log if in range - vm_map_offset_t offset = va - map->ova; - if (map->range.start <= offset && offset <= map->range.end) { - pgtrace_write_log(res); - } + // write a log if in range + vm_map_offset_t offset = va - map->ova; + if (map->range.start <= offset && offset <= map->range.end) { + pgtrace_write_log(res); + } - PMAP_PGTRACE_UNLOCK(&ints); + PMAP_PGTRACE_UNLOCK(&ints); - // Return to next instruction - set_saved_state_pc(ss, get_saved_state_pc(ss) + sizeof(uint32_t)); + // Return to next instruction + set_saved_state_pc(ss, get_saved_state_pc(ss) + sizeof(uint32_t)); - return KERN_SUCCESS; + return KERN_SUCCESS; } #endif @@ -11152,7 +11451,7 @@ pmap_enforces_execute_only( pmap_t pmap) { #if (__ARM_VMSA__ > 7) - return (pmap != kernel_pmap); + return pmap != kernel_pmap; #else return FALSE; #endif @@ -11174,9 +11473,9 @@ pmap_set_jit_entitled( MARK_AS_PMAP_TEXT static kern_return_t pmap_query_page_info_internal( - pmap_t pmap, - vm_map_offset_t va, - int *disp_p) + pmap_t pmap, + vm_map_offset_t va, + int *disp_p) { pmap_paddr_t pa; int disp; @@ -11221,7 +11520,7 @@ pmap_query_page_info_internal( if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) { pve_p = pvh_list(pv_h); while (pve_p != PV_ENTRY_NULL && - pve_get_ptep(pve_p) != pte) { + pve_get_ptep(pve_p) != pte) { pve_p = PVE_NEXT_PTR(pve_next(pve_p)); } } @@ -11245,9 +11544,9 @@ done: kern_return_t pmap_query_page_info( - pmap_t pmap, - vm_map_offset_t va, - int *disp_p) + pmap_t pmap, + vm_map_offset_t va, + int *disp_p) { return pmap_query_page_info_internal(pmap, va, disp_p); } @@ -11269,8 +11568,8 @@ pmap_return(boolean_t do_panic, boolean_t do_recurse) MARK_AS_PMAP_TEXT static void pmap_footprint_suspend_internal( - vm_map_t map, - boolean_t suspend) + vm_map_t map, + boolean_t suspend) { #if DEVELOPMENT || DEBUG if (suspend) { @@ -11313,17 +11612,17 @@ struct page_table_dump_header { }; struct page_table_level_info page_table_levels[] = - { { ARM_TT_L0_SIZE, ARM_TT_L0_OFFMASK, ARM_TT_L0_SHIFT, ARM_TT_L0_INDEX_MASK, ARM_TTE_VALID, ARM_TTE_TYPE_MASK, ARM_TTE_TYPE_BLOCK }, - { ARM_TT_L1_SIZE, ARM_TT_L1_OFFMASK, ARM_TT_L1_SHIFT, ARM_TT_L1_INDEX_MASK, ARM_TTE_VALID, ARM_TTE_TYPE_MASK, ARM_TTE_TYPE_BLOCK }, - { ARM_TT_L2_SIZE, ARM_TT_L2_OFFMASK, ARM_TT_L2_SHIFT, ARM_TT_L2_INDEX_MASK, ARM_TTE_VALID, ARM_TTE_TYPE_MASK, ARM_TTE_TYPE_BLOCK }, - { ARM_TT_L3_SIZE, ARM_TT_L3_OFFMASK, ARM_TT_L3_SHIFT, ARM_TT_L3_INDEX_MASK, ARM_PTE_TYPE_VALID, ARM_PTE_TYPE_MASK, ARM_TTE_TYPE_L3BLOCK } }; +{ { ARM_TT_L0_SIZE, ARM_TT_L0_OFFMASK, ARM_TT_L0_SHIFT, ARM_TT_L0_INDEX_MASK, ARM_TTE_VALID, ARM_TTE_TYPE_MASK, ARM_TTE_TYPE_BLOCK }, + { ARM_TT_L1_SIZE, ARM_TT_L1_OFFMASK, ARM_TT_L1_SHIFT, ARM_TT_L1_INDEX_MASK, ARM_TTE_VALID, ARM_TTE_TYPE_MASK, ARM_TTE_TYPE_BLOCK }, + { ARM_TT_L2_SIZE, ARM_TT_L2_OFFMASK, ARM_TT_L2_SHIFT, ARM_TT_L2_INDEX_MASK, ARM_TTE_VALID, ARM_TTE_TYPE_MASK, ARM_TTE_TYPE_BLOCK }, + { ARM_TT_L3_SIZE, ARM_TT_L3_OFFMASK, ARM_TT_L3_SHIFT, ARM_TT_L3_INDEX_MASK, ARM_PTE_TYPE_VALID, ARM_PTE_TYPE_MASK, ARM_TTE_TYPE_L3BLOCK } }; static size_t pmap_dump_page_tables_recurse(const tt_entry_t *ttp, - unsigned int cur_level, - uint64_t start_va, - void *bufp, - void *buf_end) + unsigned int cur_level, + uint64_t start_va, + void *bufp, + void *buf_end) { size_t bytes_used = 0; uint64_t num_entries = ARM_PGBYTES / sizeof(*ttp); @@ -11332,8 +11631,9 @@ pmap_dump_page_tables_recurse(const tt_entry_t *ttp, uint64_t type_mask = page_table_levels[cur_level].type_mask; uint64_t type_block = page_table_levels[cur_level].type_block; - if (cur_level == arm64_root_pgtable_level) + if (cur_level == arm64_root_pgtable_level) { num_entries = arm64_root_pgtable_num_ttes; + } uint64_t tt_size = num_entries * sizeof(tt_entry_t); const tt_entry_t *tt_end = &ttp[num_entries]; @@ -11364,9 +11664,9 @@ pmap_dump_page_tables_recurse(const tt_entry_t *ttp, } else { if (cur_level >= PMAP_TT_MAX_LEVEL) { panic("%s: corrupt entry %#llx at %p, " - "ttp=%p, cur_level=%u, bufp=%p, buf_end=%p", - __FUNCTION__, tte, ttep, - ttp, cur_level, bufp, buf_end); + "ttp=%p, cur_level=%u, bufp=%p, buf_end=%p", + __FUNCTION__, tte, ttep, + ttp, cur_level, bufp, buf_end); } const tt_entry_t *next_tt = (const tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK); @@ -11387,8 +11687,9 @@ pmap_dump_page_tables_recurse(const tt_entry_t *ttp, size_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end) { - if (not_in_kdp) + if (not_in_kdp) { panic("pmap_dump_page_tables must only be called from kernel debugger context"); + } return pmap_dump_page_tables_recurse(pmap->tte, arm64_root_pgtable_level, pmap->min, bufp, buf_end); } @@ -11401,4 +11702,3 @@ pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end _ } #endif /* !defined(__arm64__) */ - diff --git a/osfmk/arm/pmap.h b/osfmk/arm/pmap.h index 88d89086c..50464cd10 100644 --- a/osfmk/arm/pmap.h +++ b/osfmk/arm/pmap.h @@ -27,7 +27,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _ARM_PMAP_H_ -#define _ARM_PMAP_H_ 1 +#define _ARM_PMAP_H_ 1 #include @@ -54,15 +54,15 @@ * For __ARM_KERNEL_PROTECT__, we need twice as many ASIDs to support having * unique EL0 and EL1 ASIDs for each pmap. */ -#define ASID_SHIFT (12) /* Shift for the maximum virtual ASID value (2048)*/ +#define ASID_SHIFT (12) /* Shift for the maximum virtual ASID value (2048)*/ #else /* __ARM_KERNEL_PROTECT__ */ -#define ASID_SHIFT (11) /* Shift for the maximum virtual ASID value (2048) */ +#define ASID_SHIFT (11) /* Shift for the maximum virtual ASID value (2048) */ #endif /* __ARM_KERNEL_PROTECT__ */ -#define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */ -#define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */ -#define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */ -#define ASID_VIRT_BITS (ASID_SHIFT - ARM_ASID_SHIFT) /* The number of virtual bits in a virtaul ASID */ -#define NBBY 8 +#define MAX_ASID (1 << ASID_SHIFT) /* Max supported ASIDs (can be virtual) */ +#define ARM_ASID_SHIFT (8) /* Shift for the maximum ARM ASID value (256) */ +#define ARM_MAX_ASID (1 << ARM_ASID_SHIFT) /* Max ASIDs supported by the hardware */ +#define ASID_VIRT_BITS (ASID_SHIFT - ARM_ASID_SHIFT) /* The number of virtual bits in a virtaul ASID */ +#define NBBY 8 struct pmap_cpu_data { #if defined(__arm64__) @@ -116,19 +116,19 @@ typedef struct pmap_cpu_data pmap_cpu_data_t; #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8) -typedef uint64_t tt_entry_t; /* translation table entry type */ -#define TT_ENTRY_NULL ((tt_entry_t *) 0) +typedef uint64_t tt_entry_t; /* translation table entry type */ +#define TT_ENTRY_NULL ((tt_entry_t *) 0) -typedef uint64_t pt_entry_t; /* page table entry type */ -#define PT_ENTRY_NULL ((pt_entry_t *) 0) +typedef uint64_t pt_entry_t; /* page table entry type */ +#define PT_ENTRY_NULL ((pt_entry_t *) 0) #elif defined(__arm__) -typedef uint32_t tt_entry_t; /* translation table entry type */ -#define PT_ENTRY_NULL ((pt_entry_t *) 0) +typedef uint32_t tt_entry_t; /* translation table entry type */ +#define PT_ENTRY_NULL ((pt_entry_t *) 0) -typedef uint32_t pt_entry_t; /* page table entry type */ -#define TT_ENTRY_NULL ((tt_entry_t *) 0) +typedef uint32_t pt_entry_t; /* page table entry type */ +#define TT_ENTRY_NULL ((tt_entry_t *) 0) #else #error unknown arch @@ -136,7 +136,7 @@ typedef uint32_t pt_entry_t; /* page table entry type */ /* superpages */ -#define SUPERPAGE_NBASEPAGES 1 /* No superpages support */ +#define SUPERPAGE_NBASEPAGES 1 /* No superpages support */ /* * Convert addresses to pages and vice versa. @@ -163,15 +163,15 @@ typedef uint32_t pt_entry_t; /* page table entry type */ * the hardware page size), we will need to determine what the page * ratio is. */ -#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT) -#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4) +#define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT) +#define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4) #if (__ARM_VMSA__ <= 7) -#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t)) -#define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t)) +#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t)) +#define NPTES ((ARM_PGBYTES/4) /sizeof(pt_entry_t)) #else -#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t)) -#define NPTES (ARM_PGBYTES / sizeof(pt_entry_t)) +#define NTTES (ARM_PGBYTES / sizeof(tt_entry_t)) +#define NPTES (ARM_PGBYTES / sizeof(pt_entry_t)) #endif extern void sync_tlb_flush(void); @@ -227,57 +227,57 @@ extern void set_context_id(uint32_t); #endif extern pmap_paddr_t get_mmu_ttb(void); -extern pmap_paddr_t mmu_kvtop(vm_offset_t va); -extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va); -extern pmap_paddr_t mmu_uvtop(vm_offset_t va); +extern pmap_paddr_t mmu_kvtop(vm_offset_t va); +extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va); +extern pmap_paddr_t mmu_uvtop(vm_offset_t va); #if (__ARM_VMSA__ <= 7) /* Convert address offset to translation table index */ -#define ttenum(a) ((a) >> ARM_TT_L1_SHIFT) +#define ttenum(a) ((a) >> ARM_TT_L1_SHIFT) /* Convert translation table index to user virtual address */ -#define tteitova(a) ((a) << ARM_TT_L1_SHIFT) +#define tteitova(a) ((a) << ARM_TT_L1_SHIFT) -#define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK) -#define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK) +#define pa_to_suptte(a) ((a) & ARM_TTE_SUPER_L1_MASK) +#define suptte_to_pa(p) ((p) & ARM_TTE_SUPER_L1_MASK) -#define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK) -#define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK) +#define pa_to_sectte(a) ((a) & ARM_TTE_BLOCK_L1_MASK) +#define sectte_to_pa(p) ((p) & ARM_TTE_BLOCK_L1_MASK) -#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK) -#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK) +#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK) +#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK) -#define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK) -#define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK) -#define pte_increment_pa(p) ((p) += ptoa(1)) +#define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK) +#define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK) +#define pte_increment_pa(p) ((p) += ptoa(1)) -#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE) -#define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE)) +#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/0x1000)*4*ARM_TT_L1_SIZE) +#define ARM_NESTING_SIZE_MAX ((256*ARM_TT_L1_SIZE)) #else /* Convert address offset to translation table index */ -#define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT) -#define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT) -#define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT) +#define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT) +#define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT) +#define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT) -#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK) -#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK) +#define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK) +#define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK) -#define pa_to_pte(a) ((a) & ARM_PTE_MASK) -#define pte_to_pa(p) ((p) & ARM_PTE_MASK) -#define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT) -#define pte_increment_pa(p) ((p) += ptoa(1)) +#define pa_to_pte(a) ((a) & ARM_PTE_MASK) +#define pte_to_pa(p) ((p) & ARM_PTE_MASK) +#define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT) +#define pte_increment_pa(p) ((p) += ptoa(1)) -#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE) -#define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL) +#define ARM_NESTING_SIZE_MIN ((PAGE_SIZE/ARM_PGBYTES)*ARM_TT_L2_SIZE) +#define ARM_NESTING_SIZE_MAX (0x0000000010000000ULL) -#define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE)) +#define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE)) -#endif /* __ARM_VMSA__ <= 7 */ +#endif /* __ARM_VMSA__ <= 7 */ -#define PMAP_GC_INFLIGHT 1 -#define PMAP_GC_WAIT 2 +#define PMAP_GC_INFLIGHT 1 +#define PMAP_GC_WAIT 2 #if DEVELOPMENT || DEBUG #define pmap_cs_log(msg, args...) printf("PMAP_CS: " msg "\n", args) @@ -298,53 +298,53 @@ extern pmap_paddr_t mmu_uvtop(vm_offset_t va); #define ptetokv(a) (phystokv(pte_to_pa(a))) struct pmap { - tt_entry_t *tte; /* translation table entries */ - pmap_paddr_t ttep; /* translation table physical */ - vm_map_address_t min; /* min address in pmap */ - vm_map_address_t max; /* max address in pmap */ - ledger_t ledger; /* ledger tracking phys mappings */ - decl_simple_lock_data(,lock) /* lock on map */ - struct pmap_statistics stats; /* map statistics */ - queue_chain_t pmaps; /* global list of pmaps */ - tt_entry_t *tt_entry_free; /* free translation table entries */ - tt_entry_t *prev_tte; /* previous translation table */ - struct pmap *nested_pmap; /* nested pmap */ - vm_map_address_t nested_region_grand_addr; - vm_map_address_t nested_region_subord_addr; - vm_map_offset_t nested_region_size; - vm_map_offset_t nested_region_true_start; - vm_map_offset_t nested_region_true_end; - unsigned int *nested_region_asid_bitmap; + tt_entry_t *tte; /* translation table entries */ + pmap_paddr_t ttep; /* translation table physical */ + vm_map_address_t min; /* min address in pmap */ + vm_map_address_t max; /* max address in pmap */ + ledger_t ledger; /* ledger tracking phys mappings */ + decl_simple_lock_data(, lock) /* lock on map */ + struct pmap_statistics stats; /* map statistics */ + queue_chain_t pmaps; /* global list of pmaps */ + tt_entry_t *tt_entry_free; /* free translation table entries */ + tt_entry_t *prev_tte; /* previous translation table */ + struct pmap *nested_pmap; /* nested pmap */ + vm_map_address_t nested_region_grand_addr; + vm_map_address_t nested_region_subord_addr; + vm_map_offset_t nested_region_size; + vm_map_offset_t nested_region_true_start; + vm_map_offset_t nested_region_true_end; + unsigned int *nested_region_asid_bitmap; #if (__ARM_VMSA__ <= 7) - decl_simple_lock_data(,tt1_lock) /* lock on tt1 */ - unsigned int cpu_ref; /* number of cpus using pmap */ + decl_simple_lock_data(, tt1_lock) /* lock on tt1 */ + unsigned int cpu_ref; /* number of cpus using pmap */ #endif - unsigned int asid; /* address space id */ - unsigned int vasid; /* Virtual address space id */ - unsigned int stamp; /* creation stamp */ - _Atomic int32_t ref_count; /* pmap reference count */ - unsigned int gc_status; /* gc status */ - unsigned int nested_region_asid_bitmap_size; - unsigned int tte_index_max; /* max tte index in translation table entries */ - uint32_t nested_no_bounds_refcnt;/* number of pmaps that nested this pmap without bounds set */ + unsigned int asid; /* address space id */ + unsigned int vasid; /* Virtual address space id */ + unsigned int stamp; /* creation stamp */ + _Atomic int32_t ref_count; /* pmap reference count */ + unsigned int gc_status; /* gc status */ + unsigned int nested_region_asid_bitmap_size; + unsigned int tte_index_max; /* max tte index in translation table entries */ + uint32_t nested_no_bounds_refcnt;/* number of pmaps that nested this pmap without bounds set */ #if MACH_ASSERT - int pmap_pid; - char pmap_procname[17]; - bool pmap_stats_assert; + int pmap_pid; + char pmap_procname[17]; + bool pmap_stats_assert; #endif /* MACH_ASSERT */ #if DEVELOPMENT || DEBUG - bool footprint_suspended; - bool footprint_was_suspended; + bool footprint_suspended; + bool footprint_was_suspended; #endif /* DEVELOPMENT || DEBUG */ - bool nx_enabled; /* no execute */ - bool nested; /* is nested */ - bool is_64bit; /* is 64bit */ - bool nested_has_no_bounds_ref; /* nested a pmap when the bounds were not set */ - bool nested_bounds_set; /* The nesting bounds have been set */ + bool nx_enabled; /* no execute */ + bool nested; /* is nested */ + bool is_64bit; /* is 64bit */ + bool nested_has_no_bounds_ref; /* nested a pmap when the bounds were not set */ + bool nested_bounds_set; /* The nesting bounds have been set */ }; /* typedef struct pmap *pmap_t; */ @@ -354,25 +354,25 @@ struct pmap { /* * WIMG control */ -#define VM_MEM_INNER 0x10 -#define VM_MEM_RT 0x10 // intentionally alias VM_MEM_INNER; will be used with mutually exclusive caching policies -#define VM_MEM_EARLY_ACK 0x20 - -#define VM_WIMG_DEFAULT (VM_MEM_COHERENT) -#define VM_WIMG_COPYBACK (VM_MEM_COHERENT) -#define VM_WIMG_INNERWBACK (VM_MEM_COHERENT | VM_MEM_INNER) -#define VM_WIMG_IO (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) -#define VM_WIMG_POSTED (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED | VM_MEM_EARLY_ACK) -#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) -#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) -#define VM_WIMG_RT (VM_WIMG_IO | VM_MEM_RT) +#define VM_MEM_INNER 0x10 +#define VM_MEM_RT 0x10 // intentionally alias VM_MEM_INNER; will be used with mutually exclusive caching policies +#define VM_MEM_EARLY_ACK 0x20 + +#define VM_WIMG_DEFAULT (VM_MEM_COHERENT) +#define VM_WIMG_COPYBACK (VM_MEM_COHERENT) +#define VM_WIMG_INNERWBACK (VM_MEM_COHERENT | VM_MEM_INNER) +#define VM_WIMG_IO (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) +#define VM_WIMG_POSTED (VM_MEM_COHERENT | VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED | VM_MEM_EARLY_ACK) +#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) +#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) +#define VM_WIMG_RT (VM_WIMG_IO | VM_MEM_RT) #if VM_DEBUG extern int pmap_list_resident_pages( - pmap_t pmap, - vm_offset_t *listp, - int space - ); + pmap_t pmap, + vm_offset_t *listp, + int space + ); #else /* #if VM_DEBUG */ #define pmap_list_resident_pages(pmap, listp, space) (0) #endif /* #if VM_DEBUG */ @@ -380,7 +380,7 @@ extern int pmap_list_resident_pages( extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied); /* globals shared between arm_vm_init and pmap */ -extern tt_entry_t *cpu_tte; /* first CPUs translation table (shared with kernel pmap) */ +extern tt_entry_t *cpu_tte; /* first CPUs translation table (shared with kernel pmap) */ extern pmap_paddr_t cpu_ttep; /* physical translation table addr */ #if __arm64__ @@ -389,7 +389,7 @@ extern void *ropagetable_end; #endif #if __arm64__ -extern tt_entry_t *invalid_tte; /* global invalid translation table */ +extern tt_entry_t *invalid_tte; /* global invalid translation table */ extern pmap_paddr_t invalid_ttep; /* physical invalid translation table addr */ #endif @@ -401,11 +401,11 @@ extern pmap_paddr_t invalid_ttep; /* physical invalid translation table addr */ extern void pmap_switch_user_ttb(pmap_t pmap); extern void pmap_clear_user_ttb(void); extern void pmap_bootstrap(vm_offset_t); -extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t); +extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t); extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va); extern void pmap_set_pmap(pmap_t pmap, thread_t thread); extern void pmap_collect(pmap_t pmap); -extern void pmap_gc(void); +extern void pmap_gc(void); #if defined(__arm64__) extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va); #endif @@ -414,44 +414,44 @@ extern vm_offset_t pmap_extract(pmap_t pmap, vm_map_offset_t va); * Interfaces implemented as macros. */ -#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ - th->map = new_map; \ - pmap_set_pmap(vm_map_pmap(new_map), th); \ +#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ + th->map = new_map; \ + pmap_set_pmap(vm_map_pmap(new_map), th); \ } -#define pmap_kernel() \ +#define pmap_kernel() \ (kernel_pmap) -#define pmap_compressed(pmap) \ +#define pmap_compressed(pmap) \ ((pmap)->stats.compressed) -#define pmap_resident_count(pmap) \ +#define pmap_resident_count(pmap) \ ((pmap)->stats.resident_count) -#define pmap_resident_max(pmap) \ +#define pmap_resident_max(pmap) \ ((pmap)->stats.resident_max) #define MACRO_NOOP -#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \ +#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) \ MACRO_NOOP -#define pmap_pageable(pmap, start, end, pageable) \ +#define pmap_pageable(pmap, start, end, pageable) \ MACRO_NOOP -#define pmap_kernel_va(VA) \ +#define pmap_kernel_va(VA) \ (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS)) -#define pmap_attribute(pmap,addr,size,attr,value) \ +#define pmap_attribute(pmap, addr, size, attr, value) \ (KERN_INVALID_ADDRESS) -#define copyinmsg(from, to, cnt) \ +#define copyinmsg(from, to, cnt) \ copyin(from, to, cnt) -#define copyoutmsg(from, to, cnt) \ +#define copyoutmsg(from, to, cnt) \ copyout(from, to, cnt) -extern pmap_paddr_t kvtophys(vm_offset_t va); +extern pmap_paddr_t kvtophys(vm_offset_t va); extern vm_map_address_t phystokv(pmap_paddr_t pa); extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len); @@ -460,10 +460,10 @@ extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags); extern void pmap_map_globals(void); -#define PMAP_MAP_BD_DEVICE 0x1 -#define PMAP_MAP_BD_WCOMB 0x2 -#define PMAP_MAP_BD_POSTED 0x3 -#define PMAP_MAP_BD_MASK 0x3 +#define PMAP_MAP_BD_DEVICE 0x1 +#define PMAP_MAP_BD_WCOMB 0x2 +#define PMAP_MAP_BD_POSTED 0x3 +#define PMAP_MAP_BD_MASK 0x3 extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options); extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot); @@ -482,19 +482,19 @@ extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsign extern void pmap_unmap_cpu_windows_copy(unsigned int index); extern void pt_fake_zone_init(int); -extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, - uint64_t *, int *, int *, int *); +extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, + uint64_t *, int *, int *, int *); extern boolean_t pmap_valid_page(ppnum_t pn); #define MACHINE_PMAP_IS_EMPTY 1 extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); -#define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01 -#define ARM_PMAP_MAX_OFFSET_MIN 0x02 -#define ARM_PMAP_MAX_OFFSET_MAX 0x04 -#define ARM_PMAP_MAX_OFFSET_DEVICE 0x08 -#define ARM_PMAP_MAX_OFFSET_JUMBO 0x10 +#define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01 +#define ARM_PMAP_MAX_OFFSET_MIN 0x02 +#define ARM_PMAP_MAX_OFFSET_MAX 0x04 +#define ARM_PMAP_MAX_OFFSET_DEVICE 0x08 +#define ARM_PMAP_MAX_OFFSET_JUMBO 0x10 extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option); @@ -545,6 +545,7 @@ boolean_t pmap_enforces_execute_only(pmap_t pmap); #define PMAP_SET_JIT_ENTITLED_INDEX 36 +#define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 57 #define PMAP_TRIM_INDEX 64 #define PMAP_LEDGER_ALLOC_INIT_INDEX 65 #define PMAP_LEDGER_ALLOC_INDEX 66 @@ -573,12 +574,14 @@ extern pmap_cpu_data_t * pmap_get_cpu_data(void); extern kern_return_t pmap_return(boolean_t do_panic, boolean_t do_recurse); -#define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz) -#define pmap_simple_lock(l) simple_lock(l) -#define pmap_simple_unlock(l) simple_unlock(l) -#define pmap_simple_lock_try(l) simple_lock_try(l) -#define pmap_lock_bit(l, i) hw_lock_bit(l, i) -#define pmap_unlock_bit(l, i) hw_unlock_bit(l, i) +extern lck_grp_t pmap_lck_grp; + +#define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz) +#define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp) +#define pmap_simple_unlock(l) simple_unlock(l) +#define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp) +#define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp) +#define pmap_unlock_bit(l, i) hw_unlock_bit(l, i) #endif /* #ifndef ASSEMBLER */ diff --git a/osfmk/arm/pmap_public.h b/osfmk/arm/pmap_public.h index 98393ccd5..8b5b385a5 100644 --- a/osfmk/arm/pmap_public.h +++ b/osfmk/arm/pmap_public.h @@ -36,9 +36,9 @@ __BEGIN_DECLS #if defined(__arm64__) -typedef uint64_t pmap_paddr_t; /* physical address (not ppnum_t) */ +typedef uint64_t pmap_paddr_t; /* physical address (not ppnum_t) */ #else -typedef uint32_t pmap_paddr_t; /* physical address (not ppnum_t) */ +typedef uint32_t pmap_paddr_t; /* physical address (not ppnum_t) */ #endif diff --git a/osfmk/arm/proc_reg.h b/osfmk/arm/proc_reg.h index 5d170e88d..45536d43f 100644 --- a/osfmk/arm/proc_reg.h +++ b/osfmk/arm/proc_reg.h @@ -29,28 +29,28 @@ * @OSF_COPYRIGHT@ */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,8 +61,8 @@ /* * Processor registers for ARM */ -#ifndef _ARM_PROC_REG_H_ -#define _ARM_PROC_REG_H_ +#ifndef _ARM_PROC_REG_H_ +#define _ARM_PROC_REG_H_ #if defined (__arm64__) #include @@ -78,103 +78,103 @@ #if defined(__XNU_UP__) #define __ARM_SMP__ 0 #else -#define __ARM_SMP__ 1 +#define __ARM_SMP__ 1 /* For SMP kernels, force physical aperture to be mapped at PTE level so that its mappings * can be updated to reflect cache attribute changes on alias mappings. This prevents * prefetched physical aperture cachelines from becoming dirty in L1 due to a write to * an uncached alias mapping on the same core. Subsequent uncached writes from another * core may not snoop this line, and the dirty line may end up being evicted later to * effectively overwrite the uncached writes from other cores. */ -#define __ARM_PTE_PHYSMAP__ 1 +#define __ARM_PTE_PHYSMAP__ 1 #endif /* __ARMA7_SMP__ controls whether we are consistent with the A7 MP_CORE spec; needed because entities other than * the xnu-managed processors may need to snoop our cache operations. */ -#define __ARMA7_SMP__ 1 +#define __ARMA7_SMP__ 1 #define __ARM_COHERENT_CACHE__ 1 -#define __ARM_L1_PTW__ 1 +#define __ARM_L1_PTW__ 1 #define __ARM_DEBUG__ 7 #define __ARM_USER_PROTECT__ 1 -#define __ARM_TIME_TIMEBASE_ONLY__ 1 +#define __ARM_TIME_TIMEBASE_ONLY__ 1 #elif defined (APPLECYCLONE) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_SMP__ 1 +#define __ARM_VFP__ 4 #define __ARM_COHERENT_CACHE__ 1 #define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 +#define __ARM_IC_NOALIAS_ICACHE__ 1 #define __ARM_L1_PTW__ 1 -#define __ARM_DEBUG__ 7 +#define __ARM_DEBUG__ 7 #define __ARM_ENABLE_SWAP__ 1 #define __ARM_V8_CRYPTO_EXTENSIONS__ 1 #define __ARM64_PMAP_SUBPAGE_L1__ 1 #define __ARM_KERNEL_PROTECT__ 1 #elif defined (APPLETYPHOON) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_SMP__ 1 +#define __ARM_VFP__ 4 #define __ARM_COHERENT_CACHE__ 1 #define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 +#define __ARM_IC_NOALIAS_ICACHE__ 1 #define __ARM_L1_PTW__ 1 -#define __ARM_DEBUG__ 7 +#define __ARM_DEBUG__ 7 #define __ARM_ENABLE_SWAP__ 1 #define __ARM_V8_CRYPTO_EXTENSIONS__ 1 #define __ARM64_PMAP_SUBPAGE_L1__ 1 #define __ARM_KERNEL_PROTECT__ 1 #elif defined (APPLETWISTER) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_SMP__ 1 +#define __ARM_VFP__ 4 #define __ARM_COHERENT_CACHE__ 1 #define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 +#define __ARM_IC_NOALIAS_ICACHE__ 1 #define __ARM_L1_PTW__ 1 -#define __ARM_DEBUG__ 7 +#define __ARM_DEBUG__ 7 #define __ARM_ENABLE_SWAP__ 1 #define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 +#define __ARM_16K_PG__ 1 #define __ARM64_PMAP_SUBPAGE_L1__ 1 #define __ARM_KERNEL_PROTECT__ 1 #elif defined (APPLEHURRICANE) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_SMP__ 1 +#define __ARM_VFP__ 4 #define __ARM_COHERENT_CACHE__ 1 #define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 +#define __ARM_IC_NOALIAS_ICACHE__ 1 #define __ARM_L1_PTW__ 1 -#define __ARM_DEBUG__ 7 +#define __ARM_DEBUG__ 7 #define __ARM_ENABLE_SWAP__ 1 #define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 +#define __ARM_16K_PG__ 1 #define __ARM64_PMAP_SUBPAGE_L1__ 1 #define __ARM_KERNEL_PROTECT__ 1 #define __ARM_GLOBAL_SLEEP_BIT__ 1 #define __ARM_PAN_AVAILABLE__ 1 #elif defined (APPLEMONSOON) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_AMP__ 1 -#define __ARM_VFP__ 4 +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_SMP__ 1 +#define __ARM_AMP__ 1 +#define __ARM_VFP__ 4 #define __ARM_COHERENT_CACHE__ 1 #define __ARM_COHERENT_IO__ 1 -#define __ARM_IC_NOALIAS_ICACHE__ 1 +#define __ARM_IC_NOALIAS_ICACHE__ 1 #define __ARM_L1_PTW__ 1 -#define __ARM_DEBUG__ 7 +#define __ARM_DEBUG__ 7 #define __ARM_ENABLE_SWAP__ 1 #define __ARM_V8_CRYPTO_EXTENSIONS__ 1 -#define __ARM_16K_PG__ 1 +#define __ARM_16K_PG__ 1 #define __ARM64_PMAP_SUBPAGE_L1__ 1 #define __ARM_KERNEL_PROTECT__ 1 #define __ARM_GLOBAL_SLEEP_BIT__ 1 @@ -185,13 +185,13 @@ #define __ARM_CLUSTER_COUNT__ 2 #elif defined (BCM2837) -#define __ARM_ARCH__ 8 -#define __ARM_VMSA__ 8 -#define __ARM_SMP__ 1 -#define __ARM_VFP__ 4 +#define __ARM_ARCH__ 8 +#define __ARM_VMSA__ 8 +#define __ARM_SMP__ 1 +#define __ARM_VFP__ 4 #define __ARM_COHERENT_CACHE__ 1 #define __ARM_L1_PTW__ 1 -#define __ARM_DEBUG__ 7 +#define __ARM_DEBUG__ 7 #define __ARM64_PMAP_SUBPAGE_L1__ 1 #else #error processor not supported @@ -217,10 +217,10 @@ #define CONFIG_THREAD_GROUPS 0 -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#if __ARM_VFP__ -#define ARM_VFP_DEBUG 0 +#if __ARM_VFP__ +#define ARM_VFP_DEBUG 0 #endif #endif @@ -239,109 +239,109 @@ * +-----------------------------------------------------------+ */ -/* - * Flags +/* + * Flags */ -#define PSR_NF 0x80000000 /* Negative/Less than */ -#define PSR_ZF 0x40000000 /* Zero */ -#define PSR_CF 0x20000000 /* Carry/Borrow/Extend */ -#define PSR_VF 0x10000000 /* Overflow */ -#define PSR_QF 0x08000000 /* saturation flag (QADD ARMv5) */ +#define PSR_NF 0x80000000 /* Negative/Less than */ +#define PSR_ZF 0x40000000 /* Zero */ +#define PSR_CF 0x20000000 /* Carry/Borrow/Extend */ +#define PSR_VF 0x10000000 /* Overflow */ +#define PSR_QF 0x08000000 /* saturation flag (QADD ARMv5) */ /* * Modified execution mode flags */ -#define PSR_JF 0x01000000 /* Jazelle flag (BXJ ARMv5) */ -#define PSR_EF 0x00000200 /* mixed-endian flag (SETEND ARMv6) */ -#define PSR_AF 0x00000100 /* precise abort flag (ARMv6) */ -#define PSR_TF 0x00000020 /* thumb flag (BX ARMv4T) */ -#define PSR_TFb 5 /* thumb flag (BX ARMv4T) */ +#define PSR_JF 0x01000000 /* Jazelle flag (BXJ ARMv5) */ +#define PSR_EF 0x00000200 /* mixed-endian flag (SETEND ARMv6) */ +#define PSR_AF 0x00000100 /* precise abort flag (ARMv6) */ +#define PSR_TF 0x00000020 /* thumb flag (BX ARMv4T) */ +#define PSR_TFb 5 /* thumb flag (BX ARMv4T) */ /* * Interrupts */ -#define PSR_IRQFb 7 /* IRQ : 0 = IRQ enable */ -#define PSR_IRQF 0x00000080 /* IRQ : 0 = IRQ enable */ -#define PSR_FIQF 0x00000040 /* FIQ : 0 = FIQ enable */ +#define PSR_IRQFb 7 /* IRQ : 0 = IRQ enable */ +#define PSR_IRQF 0x00000080 /* IRQ : 0 = IRQ enable */ +#define PSR_FIQF 0x00000040 /* FIQ : 0 = FIQ enable */ /* * CPU mode */ -#define PSR_USER_MODE 0x00000010 /* User mode */ -#define PSR_FIQ_MODE 0x00000011 /* FIQ mode */ -#define PSR_IRQ_MODE 0x00000012 /* IRQ mode */ -#define PSR_SVC_MODE 0x00000013 /* Supervisor mode */ -#define PSR_ABT_MODE 0x00000017 /* Abort mode */ -#define PSR_UND_MODE 0x0000001B /* Undefined mode */ +#define PSR_USER_MODE 0x00000010 /* User mode */ +#define PSR_FIQ_MODE 0x00000011 /* FIQ mode */ +#define PSR_IRQ_MODE 0x00000012 /* IRQ mode */ +#define PSR_SVC_MODE 0x00000013 /* Supervisor mode */ +#define PSR_ABT_MODE 0x00000017 /* Abort mode */ +#define PSR_UND_MODE 0x0000001B /* Undefined mode */ -#define PSR_MODE_MASK 0x0000001F -#define PSR_IS_KERNEL(psr) (((psr) & PSR_MODE_MASK) != PSR_USER_MODE) -#define PSR_IS_USER(psr) (((psr) & PSR_MODE_MASK) == PSR_USER_MODE) +#define PSR_MODE_MASK 0x0000001F +#define PSR_IS_KERNEL(psr) (((psr) & PSR_MODE_MASK) != PSR_USER_MODE) +#define PSR_IS_USER(psr) (((psr) & PSR_MODE_MASK) == PSR_USER_MODE) -#define PSR_USERDFLT PSR_USER_MODE -#define PSR_USER_MASK (PSR_AF | PSR_IRQF | PSR_FIQF | PSR_MODE_MASK) -#define PSR_USER_SET PSR_USER_MODE +#define PSR_USERDFLT PSR_USER_MODE +#define PSR_USER_MASK (PSR_AF | PSR_IRQF | PSR_FIQF | PSR_MODE_MASK) +#define PSR_USER_SET PSR_USER_MODE -#define PSR_INTMASK PSR_IRQF /* Interrupt disable */ +#define PSR_INTMASK PSR_IRQF /* Interrupt disable */ /* * FPEXC: Floating-Point Exception Register */ -#define FPEXC_EX 0x80000000 /* Exception status */ -#define FPEXC_EX_BIT 31 -#define FPEXC_EN 0x40000000 /* VFP : 1 = EN enable */ -#define FPEXC_EN_BIT 30 +#define FPEXC_EX 0x80000000 /* Exception status */ +#define FPEXC_EX_BIT 31 +#define FPEXC_EN 0x40000000 /* VFP : 1 = EN enable */ +#define FPEXC_EN_BIT 30 /* * FPSCR: Floating-point Status and Control Register */ -#define FPSCR_DN 0x02000000 /* Default NaN */ -#define FPSCR_FZ 0x01000000 /* Flush to zero */ +#define FPSCR_DN 0x02000000 /* Default NaN */ +#define FPSCR_FZ 0x01000000 /* Flush to zero */ -#define FPSCR_DEFAULT FPSCR_DN | FPSCR_FZ +#define FPSCR_DEFAULT FPSCR_DN | FPSCR_FZ /* - * FSR registers + * FSR registers * * IFSR: Instruction Fault Status Register * DFSR: Data Fault Status Register */ -#define FSR_ALIGN 0x00000001 /* Alignment */ -#define FSR_DEBUG 0x00000002 /* Debug (watch/break) */ -#define FSR_ICFAULT 0x00000004 /* Fault on instruction cache maintenance */ -#define FSR_SFAULT 0x00000005 /* Translation Section */ -#define FSR_PFAULT 0x00000007 /* Translation Page */ -#define FSR_SACCESS 0x00000003 /* Section access */ -#define FSR_PACCESS 0x00000006 /* Page Access */ -#define FSR_SDOM 0x00000009 /* Domain Section */ -#define FSR_PDOM 0x0000000B /* Domain Page */ -#define FSR_SPERM 0x0000000D /* Permission Section */ -#define FSR_PPERM 0x0000000F /* Permission Page */ -#define FSR_EXT 0x00001000 /* External (Implementation Defined Classification) */ - -#define FSR_MASK 0x0000040F /* Valid bits */ -#define FSR_ALIGN_MASK 0x0000040D /* Valid bits to check align */ - -#define DFSR_WRITE 0x00000800 /* write data abort fault */ +#define FSR_ALIGN 0x00000001 /* Alignment */ +#define FSR_DEBUG 0x00000002 /* Debug (watch/break) */ +#define FSR_ICFAULT 0x00000004 /* Fault on instruction cache maintenance */ +#define FSR_SFAULT 0x00000005 /* Translation Section */ +#define FSR_PFAULT 0x00000007 /* Translation Page */ +#define FSR_SACCESS 0x00000003 /* Section access */ +#define FSR_PACCESS 0x00000006 /* Page Access */ +#define FSR_SDOM 0x00000009 /* Domain Section */ +#define FSR_PDOM 0x0000000B /* Domain Page */ +#define FSR_SPERM 0x0000000D /* Permission Section */ +#define FSR_PPERM 0x0000000F /* Permission Page */ +#define FSR_EXT 0x00001000 /* External (Implementation Defined Classification) */ + +#define FSR_MASK 0x0000040F /* Valid bits */ +#define FSR_ALIGN_MASK 0x0000040D /* Valid bits to check align */ + +#define DFSR_WRITE 0x00000800 /* write data abort fault */ #if defined (ARMA7) || defined (APPLE_ARM64_ARCH_FAMILY) || defined (BCM2837) -#define TEST_FSR_VMFAULT(status) \ - (((status) == FSR_PFAULT) \ - || ((status) == FSR_PPERM) \ - || ((status) == FSR_SFAULT) \ - || ((status) == FSR_SPERM) \ - || ((status) == FSR_ICFAULT) \ - || ((status) == FSR_SACCESS) \ - || ((status) == FSR_PACCESS)) +#define TEST_FSR_VMFAULT(status) \ + (((status) == FSR_PFAULT) \ + || ((status) == FSR_PPERM) \ + || ((status) == FSR_SFAULT) \ + || ((status) == FSR_SPERM) \ + || ((status) == FSR_ICFAULT) \ + || ((status) == FSR_SACCESS) \ + || ((status) == FSR_PACCESS)) -#define TEST_FSR_TRANSLATION_FAULT(status) \ - (((status) == FSR_SFAULT) \ - || ((status) == FSR_PFAULT)) +#define TEST_FSR_TRANSLATION_FAULT(status) \ + (((status) == FSR_SFAULT) \ + || ((status) == FSR_PFAULT)) #else @@ -368,89 +368,89 @@ #define MMU_SWAY (MMU_CSIZE - MMU_NWAY) /* set size 1<>1)&0x1)<>1)&0x1)<>2)&0x1)<>2)&0x1)<> PTE_SHIFT) /* number of ptes per page */ +#define PTE_SHIFT 2 /* shift width of a pte (sizeof(pte) == (1 << PTE_SHIFT)) */ +#define PTE_PGENTRIES (1024 >> PTE_SHIFT) /* number of ptes per page */ -#define ARM_PTE_EMPTY 0x00000000 /* unasigned - invalid entry */ +#define ARM_PTE_EMPTY 0x00000000 /* unasigned - invalid entry */ /* markers for (invalid) PTE for a page sent to compressor */ -#define ARM_PTE_COMPRESSED ARM_PTE_TEX1 /* compressed... */ -#define ARM_PTE_COMPRESSED_ALT ARM_PTE_TEX2 /* ... and was "alt_acct" */ -#define ARM_PTE_COMPRESSED_MASK (ARM_PTE_COMPRESSED | ARM_PTE_COMPRESSED_ALT) -#define ARM_PTE_IS_COMPRESSED(x) \ - ((((x) & 0x3) == 0) && /* PTE is not valid... */ \ - ((x) & ARM_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \ +#define ARM_PTE_COMPRESSED ARM_PTE_TEX1 /* compressed... */ +#define ARM_PTE_COMPRESSED_ALT ARM_PTE_TEX2 /* ... and was "alt_acct" */ +#define ARM_PTE_COMPRESSED_MASK (ARM_PTE_COMPRESSED | ARM_PTE_COMPRESSED_ALT) +#define ARM_PTE_IS_COMPRESSED(x) \ + ((((x) & 0x3) == 0) && /* PTE is not valid... */ \ + ((x) & ARM_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \ ((!((x) & ~ARM_PTE_COMPRESSED_MASK)) || /* ...no other bits */ \ (panic("compressed PTE %p 0x%x has extra bits 0x%x: corrupted?", \ - &(x), (x), (x) & ~ARM_PTE_COMPRESSED_MASK), FALSE))) + &(x), (x), (x) & ~ARM_PTE_COMPRESSED_MASK), FALSE))) -#define ARM_PTE_TYPE_FAULT 0x00000000 /* fault entry type */ -#define ARM_PTE_TYPE 0x00000002 /* small page entry type */ -#define ARM_PTE_TYPE_MASK 0x00000002 /* mask to get pte type */ +#define ARM_PTE_TYPE_FAULT 0x00000000 /* fault entry type */ +#define ARM_PTE_TYPE 0x00000002 /* small page entry type */ +#define ARM_PTE_TYPE_MASK 0x00000002 /* mask to get pte type */ -#define ARM_PTE_NG_MASK 0x00000800 /* mask to determine notGlobal bit */ -#define ARM_PTE_NG 0x00000800 /* value for a per-process mapping */ +#define ARM_PTE_NG_MASK 0x00000800 /* mask to determine notGlobal bit */ +#define ARM_PTE_NG 0x00000800 /* value for a per-process mapping */ -#define ARM_PTE_SHSHIFT 10 -#define ARM_PTE_SHMASK 0x00000400 /* shared (SMP) mapping mask */ -#define ARM_PTE_SH 0x00000400 /* shared (SMP) mapping */ +#define ARM_PTE_SHSHIFT 10 +#define ARM_PTE_SHMASK 0x00000400 /* shared (SMP) mapping mask */ +#define ARM_PTE_SH 0x00000400 /* shared (SMP) mapping */ -#define ARM_PTE_CBSHIFT 2 -#define ARM_PTE_CB(x) ((x)<>1)&0x1)<>1)&0x1)<>2)&0x1)<>2)&0x1)<timebase_den < 1 || freq->timebase_den > 4 || - freq->timebase_num < freq->timebase_den) + freq->timebase_num < freq->timebase_den) { panic("rtclock timebase_callback: invalid constant %ld / %ld", - freq->timebase_num, freq->timebase_den); + freq->timebase_num, freq->timebase_den); + } denom = freq->timebase_num; numer = freq->timebase_den * NSEC_PER_SEC; @@ -152,10 +155,10 @@ rtclock_init(void) cdp = getCpuDatap(); abstime = mach_absolute_time(); - cdp->rtcPop = EndOfAllTime; /* Init Pop time */ - timer_resync_deadlines(); /* Start the timers going */ + cdp->rtcPop = EndOfAllTime; /* Init Pop time */ + timer_resync_deadlines(); /* Start the timers going */ - return (1); + return 1; } uint64_t @@ -189,7 +192,7 @@ mach_absolute_time(void) old_absolute_time = s_last_absolute_time; #if __arm64__ - __asm__ volatile("dsb ld" ::: "memory"); + __asm__ volatile ("dsb ld" ::: "memory"); #else OSSynchronizeIO(); // See osfmk/arm64/rtclock.c #endif @@ -231,14 +234,14 @@ mach_approximate_time(void) void clock_get_system_microtime(clock_sec_t * secs, - clock_usec_t * microsecs) + clock_usec_t * microsecs) { absolutetime_to_microtime(mach_absolute_time(), secs, microsecs); } void clock_get_system_nanotime(clock_sec_t * secs, - clock_nsec_t * nanosecs) + clock_nsec_t * nanosecs) { uint64_t abstime; uint64_t t64; @@ -252,10 +255,10 @@ clock_get_system_nanotime(clock_sec_t * secs, void clock_gettimeofday_set_commpage(uint64_t abstime, - uint64_t sec, - uint64_t frac, - uint64_t scale, - uint64_t tick_per_sec) + uint64_t sec, + uint64_t frac, + uint64_t scale, + uint64_t tick_per_sec) { commpage_set_timestamp(abstime, sec, frac, scale, tick_per_sec); } @@ -289,10 +292,11 @@ rtclock_intr(__unused unsigned int is_user_context) abstime = mach_absolute_time(); if (cdp->cpu_idle_pop != 0x0ULL) { - if (( cdp->rtcPop-abstime) < cdp->cpu_idle_latency) { + if ((cdp->rtcPop - abstime) < cdp->cpu_idle_latency) { cdp->cpu_idle_pop = 0x0ULL; - while (abstime < cdp->rtcPop) + while (abstime < cdp->rtcPop) { abstime = mach_absolute_time(); + } } else { ClearIdlePop(FALSE); } @@ -313,9 +317,9 @@ rtclock_intr(__unused unsigned int is_user_context) if (abstime >= cdp->rtcPop) { /* Log the interrupt service latency (-ve value expected by tool) */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, - -(abstime - cdp->rtcPop), - user_mode ? pc : VM_KERNEL_UNSLIDE(pc), user_mode, 0, 0); + MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) | DBG_FUNC_NONE, + -(abstime - cdp->rtcPop), + user_mode ? pc : VM_KERNEL_UNSLIDE(pc), user_mode, 0, 0); } /* call the generic etimer */ @@ -324,13 +328,13 @@ rtclock_intr(__unused unsigned int is_user_context) static int deadline_to_decrementer(uint64_t deadline, - uint64_t now) + uint64_t now) { uint64_t delt; - if (deadline <= now) + if (deadline <= now) { return DECREMENTER_MIN; - else { + } else { delt = deadline - now; return (delt >= (DECREMENTER_MAX + 1)) ? DECREMENTER_MAX : ((delt >= (DECREMENTER_MIN + 1)) ? (int)delt : DECREMENTER_MIN); @@ -355,7 +359,7 @@ setPop(uint64_t time) ml_set_decrementer((uint32_t) delay_time); - return (delay_time); + return delay_time; } /* @@ -373,8 +377,9 @@ SetIdlePop(void) current_time = mach_absolute_time(); if (((cdp->rtcPop < current_time) || - (cdp->rtcPop - current_time) < cdp->cpu_idle_latency)) + (cdp->rtcPop - current_time) < cdp->cpu_idle_latency)) { return FALSE; + } time = cdp->rtcPop - cdp->cpu_idle_latency; @@ -390,7 +395,7 @@ SetIdlePop(void) */ void ClearIdlePop( - boolean_t wfi) + boolean_t wfi) { #if !__arm64__ #pragma unused(wfi) @@ -418,8 +423,8 @@ ClearIdlePop( void absolutetime_to_microtime(uint64_t abstime, - clock_sec_t * secs, - clock_usec_t * microsecs) + clock_sec_t * secs, + clock_usec_t * microsecs) { uint64_t t64; @@ -431,7 +436,7 @@ absolutetime_to_microtime(uint64_t abstime, void absolutetime_to_nanoseconds(uint64_t abstime, - uint64_t * result) + uint64_t * result) { uint64_t t64; @@ -442,7 +447,7 @@ absolutetime_to_nanoseconds(uint64_t abstime, void nanoseconds_to_absolutetime(uint64_t nanosecs, - uint64_t * result) + uint64_t * result) { uint64_t t64; @@ -453,17 +458,17 @@ nanoseconds_to_absolutetime(uint64_t nanosecs, void nanotime_to_absolutetime(clock_sec_t secs, - clock_nsec_t nanosecs, - uint64_t * result) + clock_nsec_t nanosecs, + uint64_t * result) { *result = ((uint64_t) secs * rtclock_sec_divisor) + - ((uint64_t) nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC; + ((uint64_t) nanosecs * rtclock_sec_divisor) / NSEC_PER_SEC; } void clock_interval_to_absolutetime_interval(uint32_t interval, - uint32_t scale_factor, - uint64_t * result) + uint32_t scale_factor, + uint64_t * result) { uint64_t nanosecs = (uint64_t) interval * scale_factor; uint64_t t64; @@ -475,13 +480,13 @@ clock_interval_to_absolutetime_interval(uint32_t interval, void machine_delay_until(uint64_t interval, - uint64_t deadline) + uint64_t deadline) { #pragma unused(interval) uint64_t now; do { -#if __ARM_ENABLE_WFE_ +#if __ARM_ENABLE_WFE_ #if __arm64__ if (arm64_wfe_allowed()) #endif /* __arm64__ */ diff --git a/osfmk/arm/rtclock.h b/osfmk/arm/rtclock.h index fb051b2ab..10205a56f 100644 --- a/osfmk/arm/rtclock.h +++ b/osfmk/arm/rtclock.h @@ -40,51 +40,50 @@ #include #include -#define EndOfAllTime 0xFFFFFFFFFFFFFFFFULL -#define DECREMENTER_MAX 0x7FFFFFFFUL -#define DECREMENTER_MIN 0xAUL +#define EndOfAllTime 0xFFFFFFFFFFFFFFFFULL +#define DECREMENTER_MAX 0x7FFFFFFFUL +#define DECREMENTER_MIN 0xAUL typedef struct _rtclock_data_ { - uint32_t rtc_sec_divisor; - uint32_t rtc_usec_divisor; - mach_timebase_info_data_t rtc_timebase_const; - union { - uint64_t abstime; + uint32_t rtc_sec_divisor; + uint32_t rtc_usec_divisor; + mach_timebase_info_data_t rtc_timebase_const; + union { + uint64_t abstime; struct { - uint32_t low; - uint32_t high; + uint32_t low; + uint32_t high; } abstime_val; - } rtc_base; - union { - uint64_t abstime; + } rtc_base; + union { + uint64_t abstime; struct { - uint32_t low; - uint32_t high; + uint32_t low; + uint32_t high; } abstime_val; - } rtc_adj; - tbd_ops_data_t rtc_timebase_func; + } rtc_adj; + tbd_ops_data_t rtc_timebase_func; /* Only needed for AIC manipulation */ - vm_offset_t rtc_timebase_addr; - vm_offset_t rtc_timebase_val; - + vm_offset_t rtc_timebase_addr; + vm_offset_t rtc_timebase_val; } rtclock_data_t; -extern rtclock_data_t RTClockData; -#define rtclock_sec_divisor RTClockData.rtc_sec_divisor -#define rtclock_usec_divisor RTClockData.rtc_usec_divisor -#define rtclock_timebase_const RTClockData.rtc_timebase_const -#define rtclock_base_abstime RTClockData.rtc_base.abstime -#define rtclock_base_abstime_low RTClockData.rtc_base.abstime_val.low -#define rtclock_base_abstime_high RTClockData.rtc_base.abstime_val.high -#define rtclock_adj_abstime RTClockData.rtc_adj.abstime -#define rtclock_adj_abstime_low RTClockData.rtc_adj.abstime_val.low -#define rtclock_adj_abstime_high RTClockData.rtc_adj.abstime_val.high -#define rtclock_timebase_func RTClockData.rtc_timebase_func +extern rtclock_data_t RTClockData; +#define rtclock_sec_divisor RTClockData.rtc_sec_divisor +#define rtclock_usec_divisor RTClockData.rtc_usec_divisor +#define rtclock_timebase_const RTClockData.rtc_timebase_const +#define rtclock_base_abstime RTClockData.rtc_base.abstime +#define rtclock_base_abstime_low RTClockData.rtc_base.abstime_val.low +#define rtclock_base_abstime_high RTClockData.rtc_base.abstime_val.high +#define rtclock_adj_abstime RTClockData.rtc_adj.abstime +#define rtclock_adj_abstime_low RTClockData.rtc_adj.abstime_val.low +#define rtclock_adj_abstime_high RTClockData.rtc_adj.abstime_val.high +#define rtclock_timebase_func RTClockData.rtc_timebase_func /* Only needed for AIC manipulation */ -#define rtclock_timebase_addr RTClockData.rtc_timebase_addr -#define rtclock_timebase_val RTClockData.rtc_timebase_val +#define rtclock_timebase_addr RTClockData.rtc_timebase_addr +#define rtclock_timebase_val RTClockData.rtc_timebase_val extern uint64_t arm_timer_slop_max; diff --git a/osfmk/arm/sched_param.h b/osfmk/arm/sched_param.h index a3d20dc47..36383678b 100644 --- a/osfmk/arm/sched_param.h +++ b/osfmk/arm/sched_param.h @@ -28,29 +28,29 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * Scheduler parameters. */ -#ifndef _ARM_SCHED_PARAM_H_ -#define _ARM_SCHED_PARAM_H_ +#ifndef _ARM_SCHED_PARAM_H_ +#define _ARM_SCHED_PARAM_H_ #endif /* _ARM_SCHED_PARAM_H_ */ diff --git a/osfmk/arm/setjmp.h b/osfmk/arm/setjmp.h index a3a2f5ead..dfed26d48 100644 --- a/osfmk/arm/setjmp.h +++ b/osfmk/arm/setjmp.h @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -59,11 +59,11 @@ /* * Setjmp/longjmp buffer for ARM. */ -#ifndef _ARM_SETJMP_H_ -#define _ARM_SETJMP_H_ +#ifndef _ARM_SETJMP_H_ +#define _ARM_SETJMP_H_ -typedef struct jmp_buf { - int jmp_buf[28]; +typedef struct jmp_buf { + int jmp_buf[28]; } jmp_buf_t; -#endif /* _ARM_SETJMP_H_ */ +#endif /* _ARM_SETJMP_H_ */ diff --git a/osfmk/arm/simple_lock.h b/osfmk/arm/simple_lock.h index f1fac9bee..3f4d5c91a 100644 --- a/osfmk/arm/simple_lock.h +++ b/osfmk/arm/simple_lock.h @@ -28,41 +28,42 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _ARM_SIMPLE_LOCK_TYPES_H_ -#define _ARM_SIMPLE_LOCK_TYPES_H_ +#ifndef _ARM_SIMPLE_LOCK_TYPES_H_ +#define _ARM_SIMPLE_LOCK_TYPES_H_ -#ifdef KERNEL_PRIVATE +#include + +#ifdef KERNEL_PRIVATE #include #include - #include #ifdef MACH_KERNEL_PRIVATE #include @@ -74,50 +75,79 @@ typedef uint32_t hw_lock_bit_t; -extern void hw_lock_bit( - hw_lock_bit_t *, - unsigned int); +#if LOCK_STATS +extern void hw_lock_bit( + hw_lock_bit_t *, + unsigned int, + lck_grp_t*); -extern void hw_lock_bit_nopreempt( - hw_lock_bit_t *, - unsigned int); +extern void hw_lock_bit_nopreempt( + hw_lock_bit_t *, + unsigned int, + lck_grp_t*); -extern void hw_unlock_bit( - hw_lock_bit_t *, - unsigned int); +extern unsigned int hw_lock_bit_try( + hw_lock_bit_t *, + unsigned int, + lck_grp_t*); + +extern unsigned int hw_lock_bit_to( + hw_lock_bit_t *, + unsigned int, + uint32_t, + lck_grp_t*); -extern void hw_unlock_bit_nopreempt( - hw_lock_bit_t *, - unsigned int); +#else +extern void hw_lock_bit( + hw_lock_bit_t *, + unsigned int); +#define hw_lock_bit(lck, bit, grp) hw_lock_bit(lck, bit) + +extern void hw_lock_bit_nopreempt( + hw_lock_bit_t *, + unsigned int); +#define hw_lock_bit_nopreempt(lck, bit, grp) hw_lock_bit_nopreempt(lck, bit) extern unsigned int hw_lock_bit_try( - hw_lock_bit_t *, - unsigned int); + hw_lock_bit_t *, + unsigned int); +#define hw_lock_bit_try(lck, bit, grp) hw_lock_bit_try(lck, bit) extern unsigned int hw_lock_bit_to( - hw_lock_bit_t *, - unsigned int, - uint32_t); + hw_lock_bit_t *, + unsigned int, + uint32_t); +#define hw_lock_bit_to(lck, bit, timeout, grp) hw_lock_bit_to(lck, bit, timeout) + +#endif /* LOCK_STATS */ + +extern void hw_unlock_bit( + hw_lock_bit_t *, + unsigned int); -#define hw_lock_bit_held(l,b) (((*(l))&(1< #define __SMP__ __ARM_SMP__ #define __AMP__ __ARM_AMP__ -#endif /* _ARM_SMP_H_ */ +#endif /* _ARM_SMP_H_ */ diff --git a/osfmk/arm/status_shared.c b/osfmk/arm/status_shared.c index 42d7a1235..b0a389990 100644 --- a/osfmk/arm/status_shared.c +++ b/osfmk/arm/status_shared.c @@ -48,8 +48,9 @@ saved_state_to_thread_state32(const arm_saved_state_t *saved_state, arm_thread_s ts32->sp = (uint32_t)get_saved_state_sp(saved_state); ts32->pc = (uint32_t)get_saved_state_pc(saved_state); ts32->cpsr = get_saved_state_cpsr(saved_state); - for (i = 0; i < 13; i++) + for (i = 0; i < 13; i++) { ts32->r[i] = (uint32_t)get_saved_state_reg(saved_state, i); + } } /* @@ -70,12 +71,11 @@ thread_state32_to_saved_state(const arm_thread_state32_t *ts32, arm_saved_state_ set_saved_state_cpsr(saved_state, (ts32->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_32); #elif defined(__arm__) set_saved_state_cpsr(saved_state, (ts32->cpsr & ~PSR_USER_MASK) | (ts32->cpsr & PSR_USER_MASK)); -#else +#else #error Unknown architecture. #endif - for (i = 0; i < 13; i++) + for (i = 0; i < 13; i++) { set_saved_state_reg(saved_state, i, ts32->r[i]); + } } - - diff --git a/osfmk/arm/strlcpy.c b/osfmk/arm/strlcpy.c index e06f4173b..6823d8517 100644 --- a/osfmk/arm/strlcpy.c +++ b/osfmk/arm/strlcpy.c @@ -30,13 +30,14 @@ #undef strlcpy size_t -strlcpy(char * dst, const char * src, size_t maxlen) { - const size_t srclen = strlen(src); - if (srclen + 1 < maxlen) { - memcpy(dst, src, srclen + 1); - } else if (maxlen != 0) { - memcpy(dst, src, maxlen - 1); - dst[maxlen-1] = '\0'; - } - return srclen; +strlcpy(char * dst, const char * src, size_t maxlen) +{ + const size_t srclen = strlen(src); + if (srclen + 1 < maxlen) { + memcpy(dst, src, srclen + 1); + } else if (maxlen != 0) { + memcpy(dst, src, maxlen - 1); + dst[maxlen - 1] = '\0'; + } + return srclen; } diff --git a/osfmk/arm/strncpy.c b/osfmk/arm/strncpy.c index 5ee1847a6..4774589fb 100644 --- a/osfmk/arm/strncpy.c +++ b/osfmk/arm/strncpy.c @@ -30,13 +30,14 @@ #undef strncpy char * -strncpy(char * dst, const char * src, size_t maxlen) { - const size_t srclen = strnlen(src, maxlen); - if (srclen < maxlen) { - memcpy(dst, src, srclen); - memset(dst+srclen, 0, maxlen - srclen); - } else { - memcpy(dst, src, maxlen); - } - return dst; +strncpy(char * dst, const char * src, size_t maxlen) +{ + const size_t srclen = strnlen(src, maxlen); + if (srclen < maxlen) { + memcpy(dst, src, srclen); + memset(dst + srclen, 0, maxlen - srclen); + } else { + memcpy(dst, src, maxlen); + } + return dst; } diff --git a/osfmk/arm/task.h b/osfmk/arm/task.h index e545bd36c..2558ed3bb 100644 --- a/osfmk/arm/task.h +++ b/osfmk/arm/task.h @@ -29,28 +29,28 @@ * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,6 +60,4 @@ */ #define MACHINE_TASK \ - void* task_debug; - - + void* task_debug; diff --git a/osfmk/arm/thread.h b/osfmk/arm/thread.h index e270512a2..46a603dcc 100644 --- a/osfmk/arm/thread.h +++ b/osfmk/arm/thread.h @@ -28,35 +28,35 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _ARM_THREAD_H_ +#ifndef _ARM_THREAD_H_ #define _ARM_THREAD_H_ #include @@ -64,16 +64,16 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include #endif #if __ARM_VFP__ -#define VFPSAVE_ALIGN 16 -#define VFPSAVE_ATTRIB __attribute__ ((aligned (VFPSAVE_ALIGN))) -#define THREAD_ALIGN VFPSAVE_ALIGN +#define VFPSAVE_ALIGN 16 +#define VFPSAVE_ATTRIB __attribute__ ((aligned (VFPSAVE_ALIGN))) +#define THREAD_ALIGN VFPSAVE_ALIGN /* * vector floating point saved state @@ -86,7 +86,7 @@ struct arm_vfpsaved_state { #endif struct perfcontrol_state { - uint64_t opaque[8] __attribute__((aligned(8))); + uint64_t opaque[8] __attribute__((aligned(8))); }; /* @@ -94,7 +94,7 @@ struct perfcontrol_state { */ extern unsigned int _MachineStateCount[]; -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #if __arm64__ typedef arm_context_t machine_thread_kernel_state; #else @@ -104,98 +104,104 @@ typedef struct arm_saved_state machine_thread_kernel_state; struct machine_thread { #if __arm64__ - arm_context_t *contextData; /* allocated user context */ - arm_saved_state_t *upcb; /* pointer to user GPR state */ - arm_neon_saved_state_t *uNeon; /* pointer to user VFP state */ + arm_context_t *contextData; /* allocated user context */ + arm_saved_state_t *upcb; /* pointer to user GPR state */ + arm_neon_saved_state_t *uNeon; /* pointer to user VFP state */ #elif __arm__ - struct arm_saved_state PcbData; + struct arm_saved_state PcbData; #if __ARM_VFP__ - struct arm_vfpsaved_state uVFPdata VFPSAVE_ATTRIB; - struct arm_vfpsaved_state kVFPdata VFPSAVE_ATTRIB; + struct arm_vfpsaved_state uVFPdata VFPSAVE_ATTRIB; + struct arm_vfpsaved_state kVFPdata VFPSAVE_ATTRIB; #endif /* __ARM_VFP__ */ #else #error Unknown arch #endif #if __ARM_USER_PROTECT__ - unsigned int uptw_ttc; - unsigned int uptw_ttb; - unsigned int kptw_ttb; - unsigned int asid; + unsigned int uptw_ttc; + unsigned int uptw_ttb; + unsigned int kptw_ttb; + unsigned int asid; #endif - vm_offset_t kstackptr; /* top of kernel stack */ - struct cpu_data *CpuDatap; /* current per cpu data */ - unsigned int preemption_count; /* preemption count */ + vm_offset_t kstackptr; /* top of kernel stack */ + struct cpu_data *CpuDatap; /* current per cpu data */ + unsigned int preemption_count; /* preemption count */ + +#if __ARM_SMP__ +#define MACHINE_THREAD_FLAGS_ON_CPU (0x1) + + uint8_t machine_thread_flags; +#endif /* __ARM_SMP__ */ arm_debug_state_t *DebugData; - mach_vm_address_t cthread_self; /* for use of cthread package */ - mach_vm_address_t cthread_data; /* for use of cthread package */ + mach_vm_address_t cthread_self; /* for use of cthread package */ + mach_vm_address_t cthread_data; /* for use of cthread package */ - struct perfcontrol_state perfctrl_state; + struct perfcontrol_state perfctrl_state; #if __arm64__ - uint64_t energy_estimate_nj; + uint64_t energy_estimate_nj; #endif #if INTERRUPT_MASKED_DEBUG - uint64_t intmask_timestamp; /* timestamp of when interrupts were masked */ + uint64_t intmask_timestamp; /* timestamp of when interrupts were masked */ #endif }; #endif -extern struct arm_saved_state *get_user_regs(thread_t); -extern struct arm_saved_state *find_user_regs(thread_t); -extern struct arm_saved_state *find_kern_regs(thread_t); -extern struct arm_vfpsaved_state *find_user_vfp(thread_t); +extern struct arm_saved_state *get_user_regs(thread_t); +extern struct arm_saved_state *find_user_regs(thread_t); +extern struct arm_saved_state *find_kern_regs(thread_t); +extern struct arm_vfpsaved_state *find_user_vfp(thread_t); #if defined(__arm__) -extern arm_debug_state_t *find_debug_state(thread_t); +extern arm_debug_state_t *find_debug_state(thread_t); #elif defined(__arm64__) -extern arm_debug_state32_t *find_debug_state32(thread_t); -extern arm_debug_state64_t *find_debug_state64(thread_t); -extern arm_neon_saved_state_t *get_user_neon_regs(thread_t); +extern arm_debug_state32_t *find_debug_state32(thread_t); +extern arm_debug_state64_t *find_debug_state64(thread_t); +extern arm_neon_saved_state_t *get_user_neon_regs(thread_t); #else #error unknown arch #endif #define FIND_PERFCONTROL_STATE(th) (&th->machine.perfctrl_state) -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #if __ARM_VFP__ extern void vfp_state_initialize(struct arm_vfpsaved_state *vfp_state); -extern void vfp_save(struct arm_vfpsaved_state *vfp_ss); -extern void vfp_load(struct arm_vfpsaved_state *vfp_ss); -extern void toss_live_vfp(void *vfp_fc); +extern void vfp_save(struct arm_vfpsaved_state *vfp_ss); +extern void vfp_load(struct arm_vfpsaved_state *vfp_ss); +extern void toss_live_vfp(void *vfp_fc); #endif /* __ARM_VFP__ */ -extern void arm_debug_set(arm_debug_state_t *debug_state); +extern void arm_debug_set(arm_debug_state_t *debug_state); #if defined(__arm64__) -extern void arm_debug_set32(arm_debug_state_t *debug_state); -extern void arm_debug_set64(arm_debug_state_t *debug_state); +extern void arm_debug_set32(arm_debug_state_t *debug_state); +extern void arm_debug_set64(arm_debug_state_t *debug_state); kern_return_t handle_get_arm_thread_state( - thread_state_t tstate, - mach_msg_type_number_t * count, - const arm_saved_state_t *saved_state); + thread_state_t tstate, + mach_msg_type_number_t * count, + const arm_saved_state_t *saved_state); kern_return_t handle_get_arm32_thread_state( - thread_state_t tstate, - mach_msg_type_number_t * count, - const arm_saved_state_t *saved_state); + thread_state_t tstate, + mach_msg_type_number_t * count, + const arm_saved_state_t *saved_state); kern_return_t handle_get_arm64_thread_state( - thread_state_t tstate, - mach_msg_type_number_t * count, - const arm_saved_state_t *saved_state); + thread_state_t tstate, + mach_msg_type_number_t * count, + const arm_saved_state_t *saved_state); kern_return_t handle_set_arm_thread_state( - const thread_state_t tstate, - mach_msg_type_number_t count, - arm_saved_state_t *saved_state); + const thread_state_t tstate, + mach_msg_type_number_t count, + arm_saved_state_t *saved_state); kern_return_t handle_set_arm32_thread_state( - const thread_state_t tstate, - mach_msg_type_number_t count, - arm_saved_state_t *saved_state); + const thread_state_t tstate, + mach_msg_type_number_t count, + arm_saved_state_t *saved_state); kern_return_t handle_set_arm64_thread_state( - const thread_state_t tstate, - mach_msg_type_number_t count, - arm_saved_state_t *saved_state); + const thread_state_t tstate, + mach_msg_type_number_t count, + arm_saved_state_t *saved_state); #endif #endif /* MACH_KERNEL_PRIVATE */ @@ -207,7 +213,7 @@ extern void act_thread_cfree(void *ctx); * Return address of the function that called current function, given * address of the first parameter of current function. */ -#define GET_RETURN_PC(addr) (((vm_offset_t *)0)) +#define GET_RETURN_PC(addr) (((vm_offset_t *)0)) /* * Defining this indicates that MD code will supply an exception() @@ -216,4 +222,4 @@ extern void act_thread_cfree(void *ctx); */ #define MACHINE_FAST_EXCEPTION 1 -#endif /* _ARM_THREAD_H_ */ +#endif /* _ARM_THREAD_H_ */ diff --git a/osfmk/arm/trap.c b/osfmk/arm/trap.c index 6c1f35727..2605951b2 100644 --- a/osfmk/arm/trap.c +++ b/osfmk/arm/trap.c @@ -75,7 +75,7 @@ extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs, unsigned int ins extern boolean_t dtrace_tally_fault(user_addr_t); /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions - over from that file. Need to keep these in sync! */ + * over from that file. Need to keep these in sync! */ #define FASTTRAP_ARM_INSTR 0xe7ffdefc #define FASTTRAP_THUMB_INSTR 0xdefc @@ -86,23 +86,23 @@ extern boolean_t dtrace_tally_fault(user_addr_t); perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ #endif -#define COPYIN(dst, src, size) \ - ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \ - copyin_kern(dst, src, size) \ - : \ - copyin(dst, src, size) +#define COPYIN(dst, src, size) \ + ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \ + copyin_kern(dst, src, size) \ + : \ + copyin(dst, src, size) -#define COPYOUT(src, dst, size) \ - ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \ - copyout_kern(src, dst, size) \ - : \ - copyout(src, dst, size) +#define COPYOUT(src, dst, size) \ + ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) ? \ + copyout_kern(src, dst, size) \ + : \ + copyout(src, dst, size) /* Second-level exception handlers forward declarations */ void sleh_undef(struct arm_saved_state *, struct arm_vfpsaved_state *); void sleh_abort(struct arm_saved_state *, int); static kern_return_t sleh_alignment(struct arm_saved_state *); -static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs); +static void panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *regs); int sleh_alignment_count = 0; int trap_on_alignment_fault = 0; @@ -127,8 +127,9 @@ sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __u getCpuDatap()->cpu_stat.undef_ex_cnt++; /* Inherit the interrupt masks from previous */ - if (!(regs->cpsr & PSR_INTMASK)) + if (!(regs->cpsr & PSR_INTMASK)) { ml_set_interrupts_enabled(TRUE); + } #if CONFIG_DTRACE if (tempDTraceTrapHook) { @@ -145,24 +146,28 @@ sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __u if (regs->cpsr & PSR_TF) { uint16_t instr = 0; - if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) { goto exit; + } if (instr == FASTTRAP_THUMB_INSTR || instr == FASTTRAP_THUMB_RET_INSTR) { - if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) + if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) { /* If it succeeds, we are done... */ goto exit; + } } } else { uint32_t instr = 0; - if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) { goto exit; + } if (instr == FASTTRAP_ARM_INSTR || instr == FASTTRAP_ARM_RET_INSTR) { - if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) + if (dtrace_user_probe(regs, instr) == KERN_SUCCESS) { /* If it succeeds, we are done... */ goto exit; + } } } } @@ -172,25 +177,28 @@ sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __u if (regs->cpsr & PSR_TF) { unsigned short instr = 0; - if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) { goto exit; + } if (IS_THUMB32(instr)) { unsigned int instr32; - instr32 = (instr<<16); + instr32 = (instr << 16); - if(COPYIN((user_addr_t)(((unsigned short *) (regs->pc))+1), (char *)&instr,(vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(((unsigned short *) (regs->pc)) + 1), (char *)&instr, (vm_size_t)(sizeof(unsigned short))) != KERN_SUCCESS) { goto exit; + } instr32 |= instr; code[1] = instr32; -#if __ARM_VFP__ +#if __ARM_VFP__ if (IS_THUMB_VFP(instr32)) { /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */ - if (!get_vfp_enabled()) + if (!get_vfp_enabled()) { panic("VFP was disabled (thumb); VFP should always be enabled"); + } } #endif } else { @@ -205,15 +213,17 @@ sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __u } else { uint32_t instr = 0; - if(COPYIN((user_addr_t)(regs->pc), (char *)&instr,(vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(regs->pc), (char *)&instr, (vm_size_t)(sizeof(uint32_t))) != KERN_SUCCESS) { goto exit; + } code[1] = instr; -#if __ARM_VFP__ +#if __ARM_VFP__ if (IS_ARM_VFP(instr)) { /* We no longer manage FPEXC beyond bootstrap, so verify that VFP is still enabled. */ - if (!get_vfp_enabled()) + if (!get_vfp_enabled()) { panic("VFP was disabled (arm); VFP should always be enabled"); + } } #endif @@ -224,7 +234,7 @@ sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __u } if (!((regs->cpsr & PSR_MODE_MASK) == PSR_USER_MODE)) { - boolean_t intr; + boolean_t intr; intr = ml_set_interrupts_enabled(FALSE); @@ -242,15 +252,15 @@ sleh_undef(struct arm_saved_state * regs, struct arm_vfpsaved_state * vfp_ss __u panic_with_thread_kernel_state("undefined kernel instruction", regs); (void) ml_set_interrupts_enabled(intr); - } else { exception_triage(exception, code, codeCnt); /* NOTREACHED */ } exit: - if (recover) + if (recover) { thread->recover = recover; + } } /* @@ -261,8 +271,8 @@ exit: void sleh_abort(struct arm_saved_state * regs, int type) { - int status; - int debug_status=0; + int status; + int debug_status = 0; int spsr; int exc; mach_exception_data_type_t codes[2]; @@ -273,7 +283,7 @@ sleh_abort(struct arm_saved_state * regs, int type) kern_return_t result; vm_offset_t recover; thread_t thread = current_thread(); - boolean_t intr; + boolean_t intr; recover = thread->recover; thread->recover = 0; @@ -285,10 +295,10 @@ sleh_abort(struct arm_saved_state * regs, int type) * Allow a platform-level error handler to decode it. */ if ((regs->fsr) & FSR_EXT) { - cpu_data_t *cdp = getCpuDatap(); + cpu_data_t *cdp = getCpuDatap(); if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { - (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, 0); + (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, 0); /* If a platform error handler is registered, expect it to panic, not fall through */ panic("Unexpected return from platform_error_handler"); } @@ -315,12 +325,14 @@ sleh_abort(struct arm_saved_state * regs, int type) fault_type = VM_PROT_READ | VM_PROT_EXECUTE; } - if (status == FSR_DEBUG) - debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK; + if (status == FSR_DEBUG) { + debug_status = arm_debug_read_dscr() & ARM_DBGDSCR_MOE_MASK; + } /* Inherit the interrupt masks from previous */ - if (!(spsr & PSR_INTMASK)) + if (!(spsr & PSR_INTMASK)) { ml_set_interrupts_enabled(TRUE); + } if (type == T_DATA_ABT) { /* @@ -338,11 +350,13 @@ sleh_abort(struct arm_saved_state * regs, int type) if (!(regs->cpsr & PSR_TF)) { unsigned int ins = 0; - if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { goto exit; + } - if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins)) + if (arm_mcr_cp15(ins) || arm_mcrr_cp15(ins)) { fault_type = VM_PROT_READ; + } } } else { fault_type = VM_PROT_READ; @@ -354,11 +368,13 @@ sleh_abort(struct arm_saved_state * regs, int type) if (!(regs->cpsr & PSR_TF)) { unsigned int ins = 0; - if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) + if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { goto exit; + } - if ((ins & ARM_SWP_MASK) == ARM_SWP) + if ((ins & ARM_SWP_MASK) == ARM_SWP) { fault_type = VM_PROT_WRITE; + } } } } @@ -377,7 +393,6 @@ sleh_abort(struct arm_saved_state * regs, int type) } if ((type == T_PREFETCH_ABT) || (status == FSR_DEBUG)) { - intr = ml_set_interrupts_enabled(FALSE); if (status == FSR_DEBUG) { DebuggerCall(EXC_BREAKPOINT, regs); @@ -387,11 +402,9 @@ sleh_abort(struct arm_saved_state * regs, int type) panic_with_thread_kernel_state("prefetch abort in kernel mode", regs); (void) ml_set_interrupts_enabled(intr); - } else if (TEST_FSR_VMFAULT(status)) { - #if CONFIG_DTRACE - if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ + if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ /* Point to next instruction */ regs->pc += ((regs->cpsr & PSR_TF) && !IS_THUMB32(*((uint16_t*) (regs->pc)))) ? 2 : 4; @@ -407,25 +420,27 @@ sleh_abort(struct arm_saved_state * regs, int type) } #endif - if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL) + if (VM_KERNEL_ADDRESS(vaddr) || thread == THREAD_NULL) { map = kernel_map; - else + } else { map = thread->map; + } if (!TEST_FSR_TRANSLATION_FAULT(status)) { /* check to see if it is just a pmap ref/modify fault */ result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE); - if (result == KERN_SUCCESS) + if (result == KERN_SUCCESS) { goto exit; + } } /* * We have to "fault" the page in. */ result = vm_fault(map, fault_addr, - fault_type, - FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, - (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0); + fault_type, + FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, + (map == kernel_map) ? THREAD_UNINT : THREAD_ABORTSAFE, NULL, 0); if (result == KERN_SUCCESS) { goto exit; @@ -452,22 +467,21 @@ sleh_abort(struct arm_saved_state * regs, int type) goto exit; } - } intr = ml_set_interrupts_enabled(FALSE); panic_plain("kernel abort type %d: fault_type=0x%x, fault_addr=0x%x\n" - "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n" - "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n" - "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n" - "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n" - "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", - type, fault_type, fault_addr, - regs->r[0], regs->r[1], regs->r[2], regs->r[3], - regs->r[4], regs->r[5], regs->r[6], regs->r[7], - regs->r[8], regs->r[9], regs->r[10], regs->r[11], - regs->r[12], regs->sp, regs->lr, regs->pc, - regs->cpsr, regs->fsr, regs->far); + "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n" + "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n" + "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n" + "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n" + "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", + type, fault_type, fault_addr, + regs->r[0], regs->r[1], regs->r[2], regs->r[3], + regs->r[4], regs->r[5], regs->r[6], regs->r[7], + regs->r[8], regs->r[9], regs->r[10], regs->r[11], + regs->r[12], regs->sp, regs->lr, regs->pc, + regs->cpsr, regs->fsr, regs->far); (void) ml_set_interrupts_enabled(intr); @@ -479,7 +493,7 @@ sleh_abort(struct arm_saved_state * regs, int type) map = thread->map; #if CONFIG_DTRACE - if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ + if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ if (recover) { regs->pc = recover; @@ -506,16 +520,17 @@ sleh_abort(struct arm_saved_state * regs, int type) if (!TEST_FSR_TRANSLATION_FAULT(status)) { /* check to see if it is just a pmap ref/modify fault */ result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, TRUE); - if (result == KERN_SUCCESS) + if (result == KERN_SUCCESS) { goto exception_return; + } } /* * We have to "fault" the page in. */ result = vm_fault(map, fault_addr, fault_type, - FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, - THREAD_ABORTSAFE, NULL, 0); + FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, + THREAD_ABORTSAFE, NULL, 0); if (result == KERN_SUCCESS || result == KERN_ABORTED) { goto exception_return; } @@ -543,14 +558,16 @@ sleh_abort(struct arm_saved_state * regs, int type) /* NOTREACHED */ exception_return: - if (recover) + if (recover) { thread->recover = recover; + } thread_exception_return(); /* NOTREACHED */ exit: - if (recover) + if (recover) { thread->recover = recover; + } return; } @@ -578,33 +595,37 @@ sleh_alignment(struct arm_saved_state * regs) getCpuDatap()->cpu_stat.unaligned_cnt++; /* Do not try to emulate in modified execution states */ - if (regs->cpsr & (PSR_EF | PSR_JF)) + if (regs->cpsr & (PSR_EF | PSR_JF)) { return KERN_NOT_SUPPORTED; + } /* Disallow emulation of kernel instructions */ - if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) + if ((regs->cpsr & PSR_MODE_MASK) != PSR_USER_MODE) { return KERN_NOT_SUPPORTED; - + } + #define ALIGN_THRESHOLD 1024 if ((sleh_alignment_count++ & (ALIGN_THRESHOLD - 1)) == - (ALIGN_THRESHOLD - 1)) + (ALIGN_THRESHOLD - 1)) { kprintf("sleh_alignment: %d more alignment faults: %d total\n", - ALIGN_THRESHOLD, sleh_alignment_count); + ALIGN_THRESHOLD, sleh_alignment_count); + } if ((trap_on_alignment_fault != 0) - && (sleh_alignment_count % trap_on_alignment_fault == 0)) + && (sleh_alignment_count % trap_on_alignment_fault == 0)) { return KERN_NOT_SUPPORTED; + } status = regs->fsr; paddr = regs->far; if (regs->cpsr & PSR_TF) { - unsigned short ins16 = 0; + unsigned short ins16 = 0; /* Get aborted instruction */ -#if __ARM_SMP__ || __ARM_USER_PROTECT__ - if(COPYIN((user_addr_t)(regs->pc), (char *)&ins16,(vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) { +#if __ARM_SMP__ || __ARM_USER_PROTECT__ + if (COPYIN((user_addr_t)(regs->pc), (char *)&ins16, (vm_size_t)(sizeof(uint16_t))) != KERN_SUCCESS) { /* Failed to fetch instruction, return success to re-drive the exception */ return KERN_SUCCESS; } @@ -620,18 +641,21 @@ sleh_alignment(struct arm_saved_state * regs) */ reg_list = ins16 & 0xff; - if (reg_list == 0) + if (reg_list == 0) { return KERN_NOT_SUPPORTED; + } if (((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) || ((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA)) { base_index = (ins16 >> 8) & 0x7; ins = 0xE8800000 | (base_index << 16) | reg_list; - if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) + if ((ins16 & THUMB_STR_1_MASK) == THUMB_LDMIA) { ins |= (1 << 20); + } if (((ins16 & THUMB_STR_1_MASK) == THUMB_STMIA) || - !(reg_list & (1 << base_index))) + !(reg_list & (1 << base_index))) { ins |= (1 << 21); + } } else if ((ins16 & THUMB_PUSH_MASK) == THUMB_POP) { unsigned int r = (ins16 >> 8) & 1; ins = 0xE8BD0000 | (r << 15) | reg_list; @@ -643,8 +667,8 @@ sleh_alignment(struct arm_saved_state * regs) } } else { /* Get aborted instruction */ -#if __ARM_SMP__ || __ARM_USER_PROTECT__ - if(COPYIN((user_addr_t)(regs->pc), (char *)&ins,(vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { +#if __ARM_SMP__ || __ARM_USER_PROTECT__ + if (COPYIN((user_addr_t)(regs->pc), (char *)&ins, (vm_size_t)(sizeof(unsigned int))) != KERN_SUCCESS) { /* Failed to fetch instruction, return success to re-drive the exception */ return KERN_SUCCESS; } @@ -654,8 +678,9 @@ sleh_alignment(struct arm_saved_state * regs) } /* Don't try to emulate unconditional instructions */ - if ((ins & 0xF0000000) == 0xF0000000) + if ((ins & 0xF0000000) == 0xF0000000) { return KERN_NOT_SUPPORTED; + } pre = (ins >> 24) & 1; up = (ins >> 23) & 1; @@ -663,36 +688,37 @@ sleh_alignment(struct arm_saved_state * regs) write_back = (ins >> 21) & 1; base_index = (ins >> 16) & 0xf; - if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */ + if ((ins & ARM_BLK_MASK) == ARM_STM) { /* STM or LDM */ int reg_count = 0; int waddr; for (rd_index = 0; rd_index < 16; rd_index++) { - if (reg_list & (1 << rd_index)) + if (reg_list & (1 << rd_index)) { reg_count++; + } } paddr = regs->r[base_index]; switch (ins & (ARM_POST_INDEXING | ARM_INCREMENT)) { - /* Increment after */ + /* Increment after */ case ARM_INCREMENT: waddr = paddr + reg_count * 4; break; - /* Increment before */ + /* Increment before */ case ARM_POST_INDEXING | ARM_INCREMENT: waddr = paddr + reg_count * 4; paddr += 4; break; - /* Decrement after */ + /* Decrement after */ case 0: waddr = paddr - reg_count * 4; paddr = waddr + 4; break; - /* Decrement before */ + /* Decrement before */ case ARM_POST_INDEXING: waddr = paddr - reg_count * 4; paddr = waddr; @@ -706,13 +732,15 @@ sleh_alignment(struct arm_saved_state * regs) if (reg_list & (1 << rd_index)) { src = ®s->r[rd_index]; - if ((ins & (1 << 20)) == 0) /* STM */ + if ((ins & (1 << 20)) == 0) { /* STM */ rc = COPYOUT(src, paddr, 4); - else /* LDM */ + } else { /* LDM */ rc = COPYIN(paddr, src, 4); + } - if (rc != KERN_SUCCESS) + if (rc != KERN_SUCCESS) { break; + } paddr += 4; } @@ -724,19 +752,21 @@ sleh_alignment(struct arm_saved_state * regs) } if (rc == KERN_SUCCESS) { - if (regs->cpsr & PSR_TF) + if (regs->cpsr & PSR_TF) { regs->pc += 2; - else + } else { regs->pc += 4; + } - if (write_back) + if (write_back) { regs->r[base_index] = paddr; + } } - return (rc); + return rc; } -#ifndef NO_KDEBUG +#ifndef NO_KDEBUG /* XXX quell warnings */ void syscall_trace(struct arm_saved_state * regs); void syscall_trace_exit(unsigned int, unsigned int); @@ -748,66 +778,67 @@ void interrupt_trace_exit(void); /* called from the fleh_swi handler, if TRACE_SYSCALL is enabled */ void syscall_trace( - struct arm_saved_state * regs) + struct arm_saved_state * regs) { kprintf("syscall: %d\n", regs->r[12]); } void syscall_trace_exit( - unsigned int r0, - unsigned int r1) + unsigned int r0, + unsigned int r1) { kprintf("syscall exit: 0x%x 0x%x\n", r0, r1); } void mach_syscall_trace( - struct arm_saved_state * regs, - unsigned int call_number) + struct arm_saved_state * regs, + unsigned int call_number) { int i, argc; int kdarg[3] = {0, 0, 0}; argc = mach_trap_table[call_number].mach_trap_arg_count; - if (argc > 3) + if (argc > 3) { argc = 3; + } - for (i = 0; i < argc; i++) + for (i = 0; i < argc; i++) { kdarg[i] = (int) regs->r[i]; + } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, - kdarg[0], kdarg[1], kdarg[2], 0, 0); - + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, + kdarg[0], kdarg[1], kdarg[2], 0, 0); } void mach_syscall_trace_exit( - unsigned int retval, - unsigned int call_number) + unsigned int retval, + unsigned int call_number) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, - retval, 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); } void interrupt_trace( - struct arm_saved_state * regs) + struct arm_saved_state * regs) { -#define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) +#define UMODE(rp) (((rp)->cpsr & PSR_MODE_MASK) == PSR_USER_MODE) KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, - 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc), - UMODE(regs), 0, 0); + MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, + 0, UMODE(regs) ? regs->pc : VM_KERNEL_UNSLIDE(regs->pc), + UMODE(regs), 0, 0); } void interrupt_trace_exit( - void) + void) { #if KPERF kperf_interrupt(); @@ -826,20 +857,19 @@ interrupt_stats(void) SCHED_STATS_INTERRUPT(current_processor()); } -static void +static void panic_with_thread_kernel_state(const char *msg, struct arm_saved_state *regs) { panic_plain("%s (saved state:%p)\n" - "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n" - "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n" - "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n" - "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n" - "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", - msg, regs, - regs->r[0], regs->r[1], regs->r[2], regs->r[3], - regs->r[4], regs->r[5], regs->r[6], regs->r[7], - regs->r[8], regs->r[9], regs->r[10], regs->r[11], - regs->r[12], regs->sp, regs->lr, regs->pc, - regs->cpsr, regs->fsr, regs->far); - + "r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x\n" + "r4: 0x%08x r5: 0x%08x r6: 0x%08x r7: 0x%08x\n" + "r8: 0x%08x r9: 0x%08x r10: 0x%08x r11: 0x%08x\n" + "r12: 0x%08x sp: 0x%08x lr: 0x%08x pc: 0x%08x\n" + "cpsr: 0x%08x fsr: 0x%08x far: 0x%08x\n", + msg, regs, + regs->r[0], regs->r[1], regs->r[2], regs->r[3], + regs->r[4], regs->r[5], regs->r[6], regs->r[7], + regs->r[8], regs->r[9], regs->r[10], regs->r[11], + regs->r[12], regs->sp, regs->lr, regs->pc, + regs->cpsr, regs->fsr, regs->far); } diff --git a/osfmk/arm/trap.h b/osfmk/arm/trap.h index b1d02a39c..d3a07cb10 100644 --- a/osfmk/arm/trap.h +++ b/osfmk/arm/trap.h @@ -28,36 +28,36 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _ARM_TRAP_H_ -#define _ARM_TRAP_H_ +#ifndef _ARM_TRAP_H_ +#define _ARM_TRAP_H_ /* * Hardware trap vectors for ARM. @@ -70,63 +70,63 @@ #define T_DATA_ABT 4 #define T_IRQ 6 #define T_FIQ 7 -#define T_PMU 8 +#define T_PMU 8 #define TRAP_NAMES "reset", "undefined instruction", "software interrupt", \ - "prefetch abort", "data abort", "irq interrupt", \ - "fast interrupt", "perfmon" + "prefetch abort", "data abort", "irq interrupt", \ + "fast interrupt", "perfmon" /* * Page-fault trap codes. */ -#define T_PF_PROT 0x1 /* protection violation */ -#define T_PF_WRITE 0x2 /* write access */ -#define T_PF_USER 0x4 /* from user state */ +#define T_PF_PROT 0x1 /* protection violation */ +#define T_PF_WRITE 0x2 /* write access */ +#define T_PF_USER 0x4 /* from user state */ #if !defined(ASSEMBLER) && defined(MACH_KERNEL) #include -#define GDB_TRAP_INSTR1 0xe7ffdefe -#define GDB_TRAP_INSTR2 0xe7ffdeff +#define GDB_TRAP_INSTR1 0xe7ffdefe +#define GDB_TRAP_INSTR2 0xe7ffdeff -#define ARM_GDB_INSTR1 GDB_TRAP_INSTR1 -#define ARM_GDB_INSTR2 GDB_TRAP_INSTR2 +#define ARM_GDB_INSTR1 GDB_TRAP_INSTR1 +#define ARM_GDB_INSTR2 GDB_TRAP_INSTR2 -#define IS_ARM_GDB_TRAP(op) \ +#define IS_ARM_GDB_TRAP(op) \ (((op) == ARM_GDB_INSTR1) || ((op) == ARM_GDB_INSTR2)) -#define THUMB_GDB_INSTR1 (GDB_TRAP_INSTR1 & 0xFFFF) -#define THUMB_GDB_INSTR2 (GDB_TRAP_INSTR2 & 0xFFFF) +#define THUMB_GDB_INSTR1 (GDB_TRAP_INSTR1 & 0xFFFF) +#define THUMB_GDB_INSTR2 (GDB_TRAP_INSTR2 & 0xFFFF) -#define IS_THUMB_GDB_TRAP(op) \ +#define IS_THUMB_GDB_TRAP(op) \ (((op) == THUMB_GDB_INSTR1) || ((op) == THUMB_GDB_INSTR2)) -#define ARM_STR 0x04000000 /* STR */ -#define ARM_STRH 0x000000B0 /* STRH */ -#define ARM_STRH_MASK 0x0E1000F0 /* STRH MASK */ -#define ARM_SDX_MASK 0x0C100000 /* SINGLE DATA TRANSFER */ -#define ARM_SNGL_DX_MASK 0x0C000000 /* SINGLE DATA TRANSFER MASK */ -#define ARM_SDX 0x04000000 +#define ARM_STR 0x04000000 /* STR */ +#define ARM_STRH 0x000000B0 /* STRH */ +#define ARM_STRH_MASK 0x0E1000F0 /* STRH MASK */ +#define ARM_SDX_MASK 0x0C100000 /* SINGLE DATA TRANSFER */ +#define ARM_SNGL_DX_MASK 0x0C000000 /* SINGLE DATA TRANSFER MASK */ +#define ARM_SDX 0x04000000 -#define ARM_STM 0x08000000 /* STM */ -#define ARM_BDX_MASK 0x0E100000 /* BLOCK DATA TRANSFER */ -#define ARM_BLK_MASK 0x0E000000 /* BLOCK DATA TRANSFER */ -#define ARM_BDX 0x08000000 /* BLOCK DATA TRANSFER */ +#define ARM_STM 0x08000000 /* STM */ +#define ARM_BDX_MASK 0x0E100000 /* BLOCK DATA TRANSFER */ +#define ARM_BLK_MASK 0x0E000000 /* BLOCK DATA TRANSFER */ +#define ARM_BDX 0x08000000 /* BLOCK DATA TRANSFER */ -#define ARM_WRITE_BACK 0x00200000 -#define ARM_BASE_REG 0x000F0000 -#define ARM_INCREMENT 0x00800000 +#define ARM_WRITE_BACK 0x00200000 +#define ARM_BASE_REG 0x000F0000 +#define ARM_INCREMENT 0x00800000 -#define ARM_STC 0x0C000000 /* STC */ -#define ARM_CDX_MASK ARM_BDX_MASK /* COPROCESSOR DATA TRANSFER */ -#define ARM_CBLK_MASK ARM_BLK_MASK -#define ARM_CDX 0x0C000000 /* COPROCESSOR DATA TRANSFER */ +#define ARM_STC 0x0C000000 /* STC */ +#define ARM_CDX_MASK ARM_BDX_MASK /* COPROCESSOR DATA TRANSFER */ +#define ARM_CBLK_MASK ARM_BLK_MASK +#define ARM_CDX 0x0C000000 /* COPROCESSOR DATA TRANSFER */ -#define ARM_SWP 0x01000090 /* SWP */ -#define ARM_SWP_MASK 0x0FB00FF0 /* SWP */ +#define ARM_SWP 0x01000090 /* SWP */ +#define ARM_SWP_MASK 0x0FB00FF0 /* SWP */ #define ARM_POST_INDEXING 0x01000000 #define ARM_IMMEDIATE 0x02000000 @@ -135,108 +135,108 @@ #define ARM_ASR 2 #define ARM_ROR 3 -#define MCR_MASK 0x0F100F10 -#define MCR_CP15 0x0E000F10 -#define MCRR_MASK 0x0FF00F00 -#define MCRR_CP15 0x0C400F00 +#define MCR_MASK 0x0F100F10 +#define MCR_CP15 0x0E000F10 +#define MCRR_MASK 0x0FF00F00 +#define MCRR_CP15 0x0C400F00 -#define arm_mcr_cp15(op) (((op)&MCR_MASK) == 0x0E000F10) -#define arm_mcrr_cp15(op) (((op)&0x0FF00F00) == 0x0C400F00) +#define arm_mcr_cp15(op) (((op)&MCR_MASK) == 0x0E000F10) +#define arm_mcrr_cp15(op) (((op)&0x0FF00F00) == 0x0C400F00) -#define IS_THUMB32(op) ( \ +#define IS_THUMB32(op) ( \ (((op) & 0xE000) == 0xE000) && (((op) & 0x1800) != 0x0000)) -#define THUMB_LDR_1_MASK 0x8800 /* (1) forms of LD* instructions */ -#define THUMB_STR_1_MASK 0xF800 /* (1) forms of ST* instructions */ -#define THUMB_STR_2_MASK 0xFE00 /* (2) forms of ST* instructions */ -#define THUMB_STR_3_MASK 0xF800 /* (3) forms of ST* instructions */ -#define THUMB_PUSH_MASK 0xFE00 /* PUSH instruction */ - -#define THUMB_LDRH_1 0x8800 /* LDRH(1) */ -#define THUMB_STMIA 0xC000 /* STMIA */ -#define THUMB_STR_1 0x6000 /* STR(1) */ -#define THUMB_STR_2 0x5000 /* STR(2) */ -#define THUMB_STR_3 0x9000 /* STR(3) */ -#define THUMB_STRB_1 0x7000 /* STRB(1) */ -#define THUMB_STRB_2 0x5400 /* STRB(2) */ -#define THUMB_STRH_1 0x8000 /* STRH(1) */ -#define THUMB_STRH_2 0x5200 /* STRH(2) */ -#define THUMB_PUSH 0xB400 /* PUSH */ -#define THUMB_LDMIA 0xC800 /* LDMIA */ -#define THUMB_POP 0xBC00 /* POP */ +#define THUMB_LDR_1_MASK 0x8800 /* (1) forms of LD* instructions */ +#define THUMB_STR_1_MASK 0xF800 /* (1) forms of ST* instructions */ +#define THUMB_STR_2_MASK 0xFE00 /* (2) forms of ST* instructions */ +#define THUMB_STR_3_MASK 0xF800 /* (3) forms of ST* instructions */ +#define THUMB_PUSH_MASK 0xFE00 /* PUSH instruction */ + +#define THUMB_LDRH_1 0x8800 /* LDRH(1) */ +#define THUMB_STMIA 0xC000 /* STMIA */ +#define THUMB_STR_1 0x6000 /* STR(1) */ +#define THUMB_STR_2 0x5000 /* STR(2) */ +#define THUMB_STR_3 0x9000 /* STR(3) */ +#define THUMB_STRB_1 0x7000 /* STRB(1) */ +#define THUMB_STRB_2 0x5400 /* STRB(2) */ +#define THUMB_STRH_1 0x8000 /* STRH(1) */ +#define THUMB_STRH_2 0x5200 /* STRH(2) */ +#define THUMB_PUSH 0xB400 /* PUSH */ +#define THUMB_LDMIA 0xC800 /* LDMIA */ +#define THUMB_POP 0xBC00 /* POP */ /* * Shifts, masks, and other values for load/store multiple decoding; largely needed for * supporting misaligned accesses. */ -#define THUMB_STR_1_BASE_OFFSET 8 /* Offset of the base register field */ -#define THUMB_PUSH_EXTRA_OFFSET 8 /* Offset of the "extra" register field */ -#define ARM_STM_BASE_OFFSET 16 /* Offset of the base register field */ -#define ARM_STM_LOAD_OFFSET 20 /* Offset of the load flag */ -#define ARM_STM_WBACK_OFFSET 21 /* Offset of the writeback flag */ -#define ARM_STM_INCR_OFFSET 23 /* Offset of the increment flag */ -#define ARM_STM_BEFORE_OFFSET 24 /* Offset of the pre-index flag */ -#define ARM_REG_LIST_LR_OFFSET 14 /* Offset of LR in the register list */ -#define ARM_REG_LIST_PC_OFFSET 15 /* Offset of PC in the register list */ - -#define THUMB_STR_REG_LIST_MASK 0x000000FF /* Offset of the reg list is 0 */ -#define THUMB_STR_1_BASE_MASK 0x00000700 -#define THUMB_PUSH_EXTRA_MASK 0x00000100 -#define ARM_STM_REG_LIST_MASK 0x0000FFFF /* Offset of the reg list is 0 */ -#define ARM_STM_BASE_MASK 0x000F0000 -#define ARM_STM_LOAD_MASK 0x00100000 -#define ARM_STM_WBACK_MASK 0x00200000 -#define ARM_STM_INCR_MASK 0x00800000 -#define ARM_STM_BEFORE_MASK 0x01000000 -#define ARM_COND_MASK 0xF0000000 /* Mask for the condition code */ - -#define ARM_COND_UNCOND 0xF0000000 /* Instruction does not support condition codes */ - -#define ARM_SIMD_MASK0 0xFE000000 -#define ARM_SIMD_CODE0 0xF2000000 - -#define ARM_VFP_MASK0 0x0F000E10 -#define ARM_VFP_CODE0 0x0E000A00 - -#define ARM_SIMD_VFP_MASK0 0x0E000E00 -#define ARM_SIMD_VFP_CODE0 0x0C000A00 -#define ARM_SIMD_VFP_MASK1 0xFF100000 -#define ARM_SIMD_VFP_CODE1 0xF4000000 -#define ARM_SIMD_VFP_MASK2 0x0F000E10 -#define ARM_SIMD_VFP_CODE2 0x0E000A10 -#define ARM_SIMD_VFP_MASK3 0x0FE00E00 -#define ARM_SIMD_VFP_CODE3 0x0C400A00 - -#define IS_ARM_VFP(op) ( \ - (((op) & ARM_SIMD_MASK0) == ARM_SIMD_CODE0) \ - ||(((op) & ARM_VFP_MASK0) == ARM_VFP_CODE0) \ - ||(((op) & ARM_SIMD_VFP_MASK0) == ARM_SIMD_VFP_CODE0) \ - ||(((op) & ARM_SIMD_VFP_MASK1) == ARM_SIMD_VFP_CODE1) \ - ||(((op) & ARM_SIMD_VFP_MASK2) == ARM_SIMD_VFP_CODE2) \ +#define THUMB_STR_1_BASE_OFFSET 8 /* Offset of the base register field */ +#define THUMB_PUSH_EXTRA_OFFSET 8 /* Offset of the "extra" register field */ +#define ARM_STM_BASE_OFFSET 16 /* Offset of the base register field */ +#define ARM_STM_LOAD_OFFSET 20 /* Offset of the load flag */ +#define ARM_STM_WBACK_OFFSET 21 /* Offset of the writeback flag */ +#define ARM_STM_INCR_OFFSET 23 /* Offset of the increment flag */ +#define ARM_STM_BEFORE_OFFSET 24 /* Offset of the pre-index flag */ +#define ARM_REG_LIST_LR_OFFSET 14 /* Offset of LR in the register list */ +#define ARM_REG_LIST_PC_OFFSET 15 /* Offset of PC in the register list */ + +#define THUMB_STR_REG_LIST_MASK 0x000000FF /* Offset of the reg list is 0 */ +#define THUMB_STR_1_BASE_MASK 0x00000700 +#define THUMB_PUSH_EXTRA_MASK 0x00000100 +#define ARM_STM_REG_LIST_MASK 0x0000FFFF /* Offset of the reg list is 0 */ +#define ARM_STM_BASE_MASK 0x000F0000 +#define ARM_STM_LOAD_MASK 0x00100000 +#define ARM_STM_WBACK_MASK 0x00200000 +#define ARM_STM_INCR_MASK 0x00800000 +#define ARM_STM_BEFORE_MASK 0x01000000 +#define ARM_COND_MASK 0xF0000000 /* Mask for the condition code */ + +#define ARM_COND_UNCOND 0xF0000000 /* Instruction does not support condition codes */ + +#define ARM_SIMD_MASK0 0xFE000000 +#define ARM_SIMD_CODE0 0xF2000000 + +#define ARM_VFP_MASK0 0x0F000E10 +#define ARM_VFP_CODE0 0x0E000A00 + +#define ARM_SIMD_VFP_MASK0 0x0E000E00 +#define ARM_SIMD_VFP_CODE0 0x0C000A00 +#define ARM_SIMD_VFP_MASK1 0xFF100000 +#define ARM_SIMD_VFP_CODE1 0xF4000000 +#define ARM_SIMD_VFP_MASK2 0x0F000E10 +#define ARM_SIMD_VFP_CODE2 0x0E000A10 +#define ARM_SIMD_VFP_MASK3 0x0FE00E00 +#define ARM_SIMD_VFP_CODE3 0x0C400A00 + +#define IS_ARM_VFP(op) ( \ + (((op) & ARM_SIMD_MASK0) == ARM_SIMD_CODE0) \ + ||(((op) & ARM_VFP_MASK0) == ARM_VFP_CODE0) \ + ||(((op) & ARM_SIMD_VFP_MASK0) == ARM_SIMD_VFP_CODE0) \ + ||(((op) & ARM_SIMD_VFP_MASK1) == ARM_SIMD_VFP_CODE1) \ + ||(((op) & ARM_SIMD_VFP_MASK2) == ARM_SIMD_VFP_CODE2) \ || (((op) & ARM_SIMD_VFP_MASK3) == ARM_SIMD_VFP_CODE3)) -#define THUMB_SIMD_MASK0 0xEF000000 -#define THUMB_SIMD_CODE0 0xEF000000 - -#define THUMB_VFP_MASK0 0xEF000E10 -#define THUMB_VFP_CODE0 0xEE000A00 - -#define THUMB_SIMD_VFP_MASK0 0xEE000E00 -#define THUMB_SIMD_VFP_CODE0 0xEC000A00 -#define THUMB_SIMD_VFP_MASK1 0xFF100000 -#define THUMB_SIMD_VFP_CODE1 0xF9000000 -#define THUMB_SIMD_VFP_MASK2 0xEF000E10 -#define THUMB_SIMD_VFP_CODE2 0xEE000A10 -#define THUMB_SIMD_VFP_MASK3 0xEFE00E00 -#define THUMB_SIMD_VFP_CODE3 0xEC400A00 - -#define IS_THUMB_VFP(op) ( \ - (((op) & THUMB_SIMD_MASK0) == THUMB_SIMD_CODE0 ) \ - || (((op) & THUMB_VFP_MASK0) == THUMB_VFP_CODE0 ) \ - || (((op) & THUMB_SIMD_VFP_MASK0) == THUMB_SIMD_VFP_CODE0 ) \ - || (((op) & THUMB_SIMD_VFP_MASK1) == THUMB_SIMD_VFP_CODE1 ) \ - || (((op) & THUMB_SIMD_VFP_MASK2) == THUMB_SIMD_VFP_CODE2 ) \ +#define THUMB_SIMD_MASK0 0xEF000000 +#define THUMB_SIMD_CODE0 0xEF000000 + +#define THUMB_VFP_MASK0 0xEF000E10 +#define THUMB_VFP_CODE0 0xEE000A00 + +#define THUMB_SIMD_VFP_MASK0 0xEE000E00 +#define THUMB_SIMD_VFP_CODE0 0xEC000A00 +#define THUMB_SIMD_VFP_MASK1 0xFF100000 +#define THUMB_SIMD_VFP_CODE1 0xF9000000 +#define THUMB_SIMD_VFP_MASK2 0xEF000E10 +#define THUMB_SIMD_VFP_CODE2 0xEE000A10 +#define THUMB_SIMD_VFP_MASK3 0xEFE00E00 +#define THUMB_SIMD_VFP_CODE3 0xEC400A00 + +#define IS_THUMB_VFP(op) ( \ + (((op) & THUMB_SIMD_MASK0) == THUMB_SIMD_CODE0 ) \ + || (((op) & THUMB_VFP_MASK0) == THUMB_VFP_CODE0 ) \ + || (((op) & THUMB_SIMD_VFP_MASK0) == THUMB_SIMD_VFP_CODE0 ) \ + || (((op) & THUMB_SIMD_VFP_MASK1) == THUMB_SIMD_VFP_CODE1 ) \ + || (((op) & THUMB_SIMD_VFP_MASK2) == THUMB_SIMD_VFP_CODE2 ) \ || (((op) & THUMB_SIMD_VFP_MASK3) == THUMB_SIMD_VFP_CODE3)) extern boolean_t arm_force_fast_fault(ppnum_t, vm_prot_t, int, void *); @@ -245,14 +245,14 @@ extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, boolean /* * Determines if the aborted instruction is read or write operation */ -#define arm_fault_type(op,spsr,vaddr) \ +#define arm_fault_type(op, spsr, vaddr) \ (((((op)&ARM_CDX_MASK) == ARM_STC) || \ (((op)&ARM_STRH_MASK) == ARM_STRH) || \ (((op)&ARM_BDX_MASK) == ARM_STM) || \ (((op)&ARM_SDX_MASK) == ARM_STR)) ? \ - (VM_PROT_WRITE|VM_PROT_READ) : (VM_PROT_READ)) - -#define thumb_fault_type(op,spsr,vaddr) \ + (VM_PROT_WRITE|VM_PROT_READ) : (VM_PROT_READ)) + +#define thumb_fault_type(op, spsr, vaddr) \ (((((op)&THUMB_STR_1_MASK) == THUMB_STMIA) || \ (((op)&THUMB_STR_1_MASK) == THUMB_STR_1) || \ (((op)&THUMB_STR_2_MASK) == THUMB_STR_2) || \ @@ -262,14 +262,14 @@ extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, boolean (((op)&THUMB_STR_1_MASK) == THUMB_STRH_1) || \ (((op)&THUMB_STR_2_MASK) == THUMB_STRH_2) || \ (((op)&THUMB_PUSH_MASK) == THUMB_PUSH)) ? \ - (VM_PROT_WRITE|VM_PROT_READ) : (VM_PROT_READ)) + (VM_PROT_WRITE|VM_PROT_READ) : (VM_PROT_READ)) typedef kern_return_t (*perfCallback)( - int trapno, - struct arm_saved_state *ss, - uintptr_t *, - int); + int trapno, + struct arm_saved_state *ss, + uintptr_t *, + int); -#endif /* !ASSEMBLER && MACH_KERNEL */ +#endif /* !ASSEMBLER && MACH_KERNEL */ -#endif /* _ARM_TRAP_H_ */ +#endif /* _ARM_TRAP_H_ */ diff --git a/osfmk/arm/vm_tuning.h b/osfmk/arm/vm_tuning.h index 728de1775..b0d3c0e5a 100644 --- a/osfmk/arm/vm_tuning.h +++ b/osfmk/arm/vm_tuning.h @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * VM tuning parameters for arm (without reference bits). */ -#ifndef _ARM_VM_TUNING_H_ -#define _ARM_VM_TUNING_H_ +#ifndef _ARM_VM_TUNING_H_ +#define _ARM_VM_TUNING_H_ -#endif /* _ARM_VM_TUNING_H_ */ +#endif /* _ARM_VM_TUNING_H_ */ diff --git a/osfmk/arm/xpr.h b/osfmk/arm/xpr.h index b6151ddca..82904b1b0 100644 --- a/osfmk/arm/xpr.h +++ b/osfmk/arm/xpr.h @@ -33,4 +33,4 @@ * Machine dependent module for the XPR tracing facility. */ -#define XPR_TIMESTAMP (0) +#define XPR_TIMESTAMP (0) diff --git a/osfmk/arm64/Makefile b/osfmk/arm64/Makefile index 4498b9ff0..f7ec7555e 100644 --- a/osfmk/arm64/Makefile +++ b/osfmk/arm64/Makefile @@ -21,10 +21,9 @@ INSTALL_MD_LIST = INSTALL_KF_MD_LIST = $(ARM_HEADER_FILES) -INSTALL_KF_MD_LCL_LIST = machine_kpc.h monotonic.h pgtrace.h $(ARM_HEADER_FILES) - -EXPORT_MD_LIST = machine_cpuid.h machine_kpc.h monotonic.h proc_reg.h pgtrace.h asm.h +INSTALL_KF_MD_LCL_LIST = machine_kpc.h machine_remote_time.h monotonic.h pgtrace.h $(ARM_HEADER_FILES) +EXPORT_MD_LIST = machine_cpuid.h machine_kpc.h machine_remote_time.h monotonic.h proc_reg.h pgtrace.h asm.h EXPORT_MD_DIR = arm64 diff --git a/osfmk/arm64/alternate_debugger.c b/osfmk/arm64/alternate_debugger.c index 0fa80fe36..9216d276e 100644 --- a/osfmk/arm64/alternate_debugger.c +++ b/osfmk/arm64/alternate_debugger.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,38 +22,38 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #if ALTERNATE_DEBUGGER /* - -The alternate debugger feature is enabled by setting the boot arg "alternate_debugger_init" -to the size of memory that should be set aside for the debugger. The boot arg -"alternate_debugger_init_pages" is used to allocate more vmpages that the alternate debugger -may use to do additional VA->PA mappings. The boot-arg "alternate_debugger_pause_for_load_at_boot" -will halt the system so that the debugger can be loaded early in the boot cycle -- once the -alternate debugger code is loaded, a register must be set to a 1 to continue the boot process. - -Here's an example: -nvram boot-arg="alternate_debugger_init=0x800000 alternate_debugger_init_pages=0x8000 alternate_debugger_pause_for_load_at_boot=1" - -The low memory global lgAltDebugger will contain the address of the allocated memory for -the alternate debugger. On arm64, the address of this low memory global is 0xffffff8000002048. - -At any point after the low memory global is non-zero, Astris may be used to halt the cpu -and load the alternate debugger: - -If no alternate debugger is given, but alternate_debugger_init has been specified, and the -kernel debugger is entered, the string ">MT<" is printed and normal processing continues. - -Anytime the alternate debugger is entered, the osversion string is modified to start with "ALT" -so that panic reports can clearly indicated that some kernel poking may have occurred, and -the panic should be weighted accordingly. - -*/ + * + * The alternate debugger feature is enabled by setting the boot arg "alternate_debugger_init" + * to the size of memory that should be set aside for the debugger. The boot arg + * "alternate_debugger_init_pages" is used to allocate more vmpages that the alternate debugger + * may use to do additional VA->PA mappings. The boot-arg "alternate_debugger_pause_for_load_at_boot" + * will halt the system so that the debugger can be loaded early in the boot cycle -- once the + * alternate debugger code is loaded, a register must be set to a 1 to continue the boot process. + * + * Here's an example: + * nvram boot-arg="alternate_debugger_init=0x800000 alternate_debugger_init_pages=0x8000 alternate_debugger_pause_for_load_at_boot=1" + * + * The low memory global lgAltDebugger will contain the address of the allocated memory for + * the alternate debugger. On arm64, the address of this low memory global is 0xffffff8000002048. + * + * At any point after the low memory global is non-zero, Astris may be used to halt the cpu + * and load the alternate debugger: + * + * If no alternate debugger is given, but alternate_debugger_init has been specified, and the + * kernel debugger is entered, the string ">MT<" is printed and normal processing continues. + * + * Anytime the alternate debugger is entered, the osversion string is modified to start with "ALT" + * so that panic reports can clearly indicated that some kernel poking may have occurred, and + * the panic should be weighted accordingly. + * + */ #include @@ -78,14 +78,15 @@ static mach_vm_size_t alt_pages_size; typedef void (*t_putc_fn)(char c); typedef void (*t_call_altdbg_fn)(mach_vm_size_t size, mach_vm_address_t pages, mach_vm_size_t pages_size, t_putc_fn putc_address ); -// used as a temporary alternate debugger until another is loaded +// used as a temporary alternate debugger until another is loaded extern void alternate_debugger_just_return(__unused mach_vm_size_t size, __unused mach_vm_address_t pages, __unused mach_vm_size_t pages_size, t_putc_fn putc_address); extern void *alternate_debugger_just_return_end; // public entry to the alternate debugger -void alternate_debugger_enter(void) +void +alternate_debugger_enter(void) { - if ( alt_code != 0 ) { + if (alt_code != 0) { disable_preemption(); printf("########## Going to call ALTERNATE DEBUGGER\n"); @@ -95,10 +96,10 @@ void alternate_debugger_enter(void) flush_dcache(alt_code, (unsigned int)alt_size, 0); // set the code to execute - pmap_protect(kernel_map->pmap, alt_code, alt_code+alt_size, VM_PROT_READ|VM_PROT_EXECUTE); + pmap_protect(kernel_map->pmap, alt_code, alt_code + alt_size, VM_PROT_READ | VM_PROT_EXECUTE); // black-spot the OS version for any panic reports that occur because of entering the alternate debugger - if ( *osversion ) { + if (*osversion) { memcpy(osversion, "ALT", 3); // Version set, stomp on the begining of it } else { strncpy(osversion, "ALT - Version Not Set Yet", OSVERSIZE); @@ -113,11 +114,11 @@ void alternate_debugger_enter(void) } // public entry to check boot args and init accordingly -void alternate_debugger_init(void) +void +alternate_debugger_init(void) { // use the alternate debugger - if( PE_parse_boot_argn("alternate_debugger_init", (void*)&alt_size, sizeof(alt_size)) ) - { + if (PE_parse_boot_argn("alternate_debugger_init", (void*)&alt_size, sizeof(alt_size))) { vm_offset_t alt_va = 0; kprintf("########## ALTERNATE_DEBUGGER\n"); @@ -125,27 +126,25 @@ void alternate_debugger_init(void) PE_parse_boot_argn("alternate_debugger_init_pages", (void*)&alt_pages_size, sizeof(alt_pages_size)); alt_size = vm_map_round_page(alt_size, - VM_MAP_PAGE_MASK(kernel_map)); + VM_MAP_PAGE_MASK(kernel_map)); alt_pages_size = vm_map_round_page(alt_pages_size, - VM_MAP_PAGE_MASK(kernel_map)); + VM_MAP_PAGE_MASK(kernel_map)); kern_return_t kr = KERN_SUCCESS; kr = kmem_alloc_contig(kernel_map, &alt_va, alt_size, VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_LOMEM, VM_KERN_MEMORY_DIAG); - if( kr != KERN_SUCCESS) - { + if (kr != KERN_SUCCESS) { kprintf("########## ALTERNATE_DEBUGGER FAILED kmem_alloc_contig with %d\n", kr); alt_va = 0; - } - else { - if ( alt_pages_size ) { + } else { + if (alt_pages_size) { alt_pages = (vm_offset_t) kalloc((vm_size_t) alt_pages_size); } } kprintf("########## Initializing ALTERNATE DEBUGGER : [alloc size 0x%llx @0x%lx] [pages_size 0x%llx @0x%llx] -- lowmem pointer at %p\n", - alt_size, alt_va, alt_pages_size, alt_pages, &lowGlo.lgAltDebugger ); + alt_size, alt_va, alt_pages_size, alt_pages, &lowGlo.lgAltDebugger ); - if ( alt_va ) { + if (alt_va) { uintptr_t just_return_size = (uintptr_t)&alternate_debugger_just_return_end - (uintptr_t)&alternate_debugger_just_return; assert(just_return_size <= alt_size); // alt_size is page-rounded, just_return_size should be much less than a page. // install a simple return vector @@ -157,13 +156,13 @@ void alternate_debugger_init(void) #if 1 // DEBUG for BRING-UP testing unsigned int alt_init_test; - if(PE_parse_boot_argn("alternate_debugger_pause_for_load_at_boot", &alt_init_test, sizeof(alt_init_test)) ) { - + if (PE_parse_boot_argn("alternate_debugger_pause_for_load_at_boot", &alt_init_test, sizeof(alt_init_test))) { // debug!! kprintf("########## Waiting for ALTERNATE DEBUGGER to load (in file %s).... to continue, set register to 1", __FILE__ ); volatile int ii = 0; - while(!ii) + while (!ii) { ; + } kprintf("\n"); alternate_debugger_enter(); } diff --git a/osfmk/arm64/alternate_debugger.h b/osfmk/arm64/alternate_debugger.h index 22be4c09e..9dd630b34 100644 --- a/osfmk/arm64/alternate_debugger.h +++ b/osfmk/arm64/alternate_debugger.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _ALTERNATE_DEBUGGER_H_ @@ -42,4 +42,3 @@ __END_DECLS #endif /* ALTERNATE_DEBUGGER */ #endif /* _ALTERNATE_DEBUGGER_H_ */ - diff --git a/osfmk/arm64/bsd_arm64.c b/osfmk/arm64/bsd_arm64.c index 726b12bd3..f40b6bfca 100644 --- a/osfmk/arm64/bsd_arm64.c +++ b/osfmk/arm64/bsd_arm64.c @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef MACH_BSD +#ifdef MACH_BSD #include #include @@ -71,7 +71,7 @@ struct mach_call_args { }; static void -arm_set_mach_syscall_ret(struct arm_saved_state *state, int retval) +arm_set_mach_syscall_ret(struct arm_saved_state *state, int retval) { if (is_saved_state32(state)) { saved_state32(state)->r[0] = retval; @@ -88,17 +88,17 @@ arm_get_mach_syscall_args(struct arm_saved_state *state, struct mach_call_args * if (is_saved_state32(state)) { /* The trap table entry defines the number of 32-bit words to be copied in from userspace. */ reg_count = trapp->mach_trap_u32_words; - - /* - * We get 7 contiguous words; r0-r6, hop over r7 - * (frame pointer), optionally r8 + + /* + * We get 7 contiguous words; r0-r6, hop over r7 + * (frame pointer), optionally r8 */ if (reg_count <= 7) { bcopy((char*)saved_state32(state), (char*)dest, sizeof(uint32_t) * reg_count); } else if (reg_count <= 9) { bcopy((char*)saved_state32(state), (char*)dest, sizeof(uint32_t) * 7); - bcopy((char*)&saved_state32(state)->r[8], ((char*)dest) + sizeof(uint32_t) * 7, - reg_count - 7); + bcopy((char*)&saved_state32(state)->r[8], ((char*)dest) + sizeof(uint32_t) * 7, + reg_count - 7); } else { panic("Trap with %d words of args? We only support 9.", reg_count); } @@ -108,7 +108,7 @@ arm_get_mach_syscall_args(struct arm_saved_state *state, struct mach_call_args * #else #error U32 mach traps on ARM64 kernel requires munging #endif - } else { + } else { assert(is_saved_state64(state)); bcopy((char*)saved_state64(state), (char*)dest, trapp->mach_trap_arg_count * sizeof(uint64_t)); } @@ -119,7 +119,7 @@ arm_get_mach_syscall_args(struct arm_saved_state *state, struct mach_call_args * kern_return_t thread_setsinglestep(__unused thread_t thread, __unused int on) { - return (KERN_FAILURE); /* XXX TODO */ + return KERN_FAILURE; /* XXX TODO */ } #if CONFIG_DTRACE @@ -160,7 +160,7 @@ mach_syscall(struct arm_saved_state *state) DEBUG_KPRINT_SYSCALL_MACH( "mach_syscall: code=%d(%s) (pid %d, tid %lld)\n", - call_number, mach_syscall_name_table[call_number], + call_number, mach_syscall_name_table[call_number], proc_pid(current_proc()), thread_tid(current_thread())); #if DEBUG_TRACE @@ -188,17 +188,17 @@ mach_syscall(struct arm_saved_state *state) } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, - args.arg1, args.arg2, args.arg3, args.arg4, 0); + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, + args.arg1, args.arg2, args.arg3, args.arg4, 0); retval = mach_call(&args); DEBUG_KPRINT_SYSCALL_MACH("mach_syscall: retval=0x%x (pid %d, tid %lld)\n", retval, - proc_pid(current_proc()), thread_tid(current_thread())); + proc_pid(current_proc()), thread_tid(current_thread())); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, - retval, 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); arm_set_mach_syscall_ret(state, retval); diff --git a/osfmk/arm64/copyio.c b/osfmk/arm64/copyio.c index 599353c49..031e5a396 100644 --- a/osfmk/arm64/copyio.c +++ b/osfmk/arm64/copyio.c @@ -72,7 +72,7 @@ user_access_disable(void) static int copyio(copyio_type_t copytype, const char *src, char *dst, - vm_size_t nbytes, vm_size_t *lencopied) + vm_size_t nbytes, vm_size_t *lencopied) { int result = 0; vm_size_t bytes_copied = 0; @@ -81,11 +81,13 @@ copyio(copyio_type_t copytype, const char *src, char *dst, /* Reject TBI addresses */ if (copytype == COPYIO_OUT) { - if ((uintptr_t)dst & TBI_MASK) + if ((uintptr_t)dst & TBI_MASK) { return EINVAL; + } } else { - if ((uintptr_t)src & TBI_MASK) + if ((uintptr_t)src & TBI_MASK) { return EINVAL; + } } if (__probable(copyio_zalloc_check)) { @@ -94,10 +96,12 @@ copyio(copyio_type_t copytype, const char *src, char *dst, } else if (copytype == COPYIO_OUT) { kernel_addr = (void*)(uintptr_t)src; } - if (kernel_addr) + if (kernel_addr) { kernel_buf_size = zone_element_size(kernel_addr, NULL); - if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) + } + if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) { panic("copyio: kernel buffer %p has size %lu < nbytes %lu", kernel_addr, kernel_buf_size, nbytes); + } } #if KASAN @@ -161,16 +165,20 @@ copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes) { int result; - if (nbytes == 0) + if (nbytes == 0) { return 0; + } result = copyin_validate(user_addr, (uintptr_t)kernel_addr, nbytes); - if (result) return result; + if (result) { + return result; + } - if (current_thread()->map->pmap == kernel_pmap) + if (current_thread()->map->pmap == kernel_pmap) { return copyin_kern(user_addr, kernel_addr, nbytes); - else + } else { return copyio(COPYIO_IN, (const char *)(uintptr_t)user_addr, kernel_addr, nbytes, NULL); + } } /* @@ -181,19 +189,22 @@ copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes) int copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes) { - int result; + int result; /* Verify sizes */ - if ((nbytes != 4) && (nbytes != 8)) + if ((nbytes != 4) && (nbytes != 8)) { return EINVAL; + } /* Test alignment */ - if (user_addr & (nbytes - 1)) + if (user_addr & (nbytes - 1)) { return EINVAL; + } result = copyin_validate(user_addr, (uintptr_t)kernel_addr, nbytes); - if (result) + if (result) { return result; + } return copyio(COPYIO_IN_WORD, (const char *)user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL); } @@ -204,12 +215,15 @@ copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_s int result; *lencopied = 0; - if (nbytes == 0) + if (nbytes == 0) { return ENAMETOOLONG; + } result = copyin_validate(user_addr, (uintptr_t)kernel_addr, nbytes); - if (result) return result; + if (result) { + return result; + } return copyio(COPYIO_INSTR, (const char *)(uintptr_t)user_addr, kernel_addr, nbytes, lencopied); } @@ -219,16 +233,20 @@ copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) { int result; - if (nbytes == 0) + if (nbytes == 0) { return 0; + } result = copyout_validate((uintptr_t)kernel_addr, user_addr, nbytes); - if (result) return result; + if (result) { + return result; + } - if (current_thread()->map->pmap == kernel_pmap) + if (current_thread()->map->pmap == kernel_pmap) { return copyout_kern(kernel_addr, user_addr, nbytes); - else + } else { return copyio(COPYIO_OUT, kernel_addr, (char *)(uintptr_t)user_addr, nbytes, NULL); + } } @@ -247,39 +265,41 @@ const int copysize_limit_panic = (64 * 1024 * 1024); */ static int copy_validate(const user_addr_t user_addr, - uintptr_t kernel_addr, vm_size_t nbytes) + uintptr_t kernel_addr, vm_size_t nbytes) { uintptr_t kernel_addr_last = kernel_addr + nbytes; if (__improbable(kernel_addr < VM_MIN_KERNEL_ADDRESS || kernel_addr > VM_MAX_KERNEL_ADDRESS || kernel_addr_last < kernel_addr || - kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) + kernel_addr_last > VM_MAX_KERNEL_ADDRESS)) { panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__, - (void *)user_addr, (void *)kernel_addr, nbytes); + (void *)user_addr, (void *)kernel_addr, nbytes); + } user_addr_t user_addr_last = user_addr + nbytes; if (__improbable((user_addr_last < user_addr) || ((user_addr + nbytes) > vm_map_max(current_thread()->map)) || - (user_addr < vm_map_min(current_thread()->map)))) - return (EFAULT); + (user_addr < vm_map_min(current_thread()->map)))) { + return EFAULT; + } - if (__improbable(nbytes > copysize_limit_panic)) + if (__improbable(nbytes > copysize_limit_panic)) { panic("%s(%p, %p, %lu) - transfer too large", __func__, - (void *)user_addr, (void *)kernel_addr, nbytes); + (void *)user_addr, (void *)kernel_addr, nbytes); + } - return (0); + return 0; } int copyin_validate(const user_addr_t ua, uintptr_t ka, vm_size_t nbytes) { - return (copy_validate(ua, ka, nbytes)); + return copy_validate(ua, ka, nbytes); } int copyout_validate(uintptr_t ka, const user_addr_t ua, vm_size_t nbytes) { - return (copy_validate(ua, ka, nbytes)); + return copy_validate(ua, ka, nbytes); } - diff --git a/osfmk/arm64/cpu.c b/osfmk/arm64/cpu.c index 4d1300d75..483d4673b 100644 --- a/osfmk/arm64/cpu.c +++ b/osfmk/arm64/cpu.c @@ -67,8 +67,8 @@ #include #endif /* MONOTONIC */ -extern boolean_t idle_enable; -extern uint64_t wake_abstime; +extern boolean_t idle_enable; +extern uint64_t wake_abstime; #if WITH_CLASSIC_S2R void sleep_token_buffer_init(void); @@ -137,7 +137,9 @@ static boolean_t coresight_debug_enabled = FALSE; #if defined(CONFIG_XNUPOST) void arm64_ipi_test_callback(void *); -void arm64_ipi_test_callback(void *parm) { +void +arm64_ipi_test_callback(void *parm) +{ volatile uint64_t *ipi_test_data = parm; cpu_data_t *cpu_data; @@ -148,7 +150,9 @@ void arm64_ipi_test_callback(void *parm) { uint64_t arm64_ipi_test_data[MAX_CPUS]; -void arm64_ipi_test() { +void +arm64_ipi_test() +{ volatile uint64_t *ipi_test_data; uint32_t timeout_ms = 100; uint64_t then, now, delta; @@ -167,28 +171,28 @@ void arm64_ipi_test() { ipi_test_data = &arm64_ipi_test_data[i]; *ipi_test_data = ~i; kern_return_t error = cpu_xcall((int)i, (void *)arm64_ipi_test_callback, (void *)(uintptr_t)ipi_test_data); - if (error != KERN_SUCCESS) + if (error != KERN_SUCCESS) { panic("CPU %d was unable to IPI CPU %u: error %d", current_cpu_number, i, error); + } then = mach_absolute_time(); while (*ipi_test_data != i) { now = mach_absolute_time(); - absolutetime_to_nanoseconds(now-then, &delta); + absolutetime_to_nanoseconds(now - then, &delta); if ((delta / NSEC_PER_MSEC) > timeout_ms) { panic("CPU %d tried to IPI CPU %d but didn't get correct response within %dms, respose: %llx", current_cpu_number, i, timeout_ms, *ipi_test_data); } } } - } #endif /* defined(CONFIG_XNUPOST) */ static void configure_coresight_registers(cpu_data_t *cdp) { - uint64_t addr; - int i; + uint64_t addr; + int i; assert(cdp); @@ -206,11 +210,13 @@ configure_coresight_registers(cpu_data_t *cdp) * need the kernel to unlock CTI, so it is safer * to avoid doing the access. */ - if (i == CORESIGHT_CTI) + if (i == CORESIGHT_CTI) { continue; + } /* Skip debug-only registers on production chips */ - if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) + if (((i == CORESIGHT_ED) || (i == CORESIGHT_UTT)) && !coresight_debug_enabled) { continue; + } if (!cdp->coresight_base[i]) { addr = cdp->cpu_regmap_paddr + CORESIGHT_OFFSET(i); @@ -225,8 +231,9 @@ configure_coresight_registers(cpu_data_t *cdp) } } /* Unlock EDLAR, CTILAR, PMLAR */ - if (i != CORESIGHT_UTT) + if (i != CORESIGHT_UTT) { *(volatile uint32_t *)(cdp->coresight_base[i] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY; + } } } } @@ -265,7 +272,6 @@ cpu_sleep(void) CleanPoC_Dcache(); PE_cpu_machine_quiesce(cpu_data_ptr->cpu_id); - } /* @@ -276,22 +282,26 @@ void __attribute__((noreturn)) cpu_idle(void) { cpu_data_t *cpu_data_ptr = getCpuDatap(); - uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; + uint64_t new_idle_timeout_ticks = 0x0ULL, lastPop; - if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) + if ((!idle_enable) || (cpu_data_ptr->cpu_signal & SIGPdisabled)) { Idle_load_context(); - if (!SetIdlePop()) + } + if (!SetIdlePop()) { Idle_load_context(); + } lastPop = cpu_data_ptr->rtcPop; pmap_switch_user_ttb(kernel_pmap); cpu_data_ptr->cpu_active_thread = current_thread(); - if (cpu_data_ptr->cpu_user_debug) + if (cpu_data_ptr->cpu_user_debug) { arm_debug_set(NULL); + } cpu_data_ptr->cpu_user_debug = NULL; - if (cpu_data_ptr->cpu_idle_notify) - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); + if (cpu_data_ptr->cpu_idle_notify) { + ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, TRUE, &new_idle_timeout_ticks); + } if (cpu_data_ptr->idle_timer_notify != 0) { if (new_idle_timeout_ticks == 0x0ULL) { @@ -302,8 +312,9 @@ cpu_idle(void) clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline); } timer_resync_deadlines(); - if (cpu_data_ptr->rtcPop != lastPop) + if (cpu_data_ptr->rtcPop != lastPop) { SetIdlePop(); + } } #if KPC @@ -375,14 +386,15 @@ cpu_idle(void) void cpu_idle_exit(boolean_t from_reset) { - uint64_t new_idle_timeout_ticks = 0x0ULL; + uint64_t new_idle_timeout_ticks = 0x0ULL; cpu_data_t *cpu_data_ptr = getCpuDatap(); assert(exception_stack_pointer() != 0); /* Back from WFI, unlock OSLAR and EDLAR. */ - if (from_reset) + if (from_reset) { configure_coresight_registers(cpu_data_ptr); + } #if KPC kpc_idle_exit(); @@ -394,8 +406,9 @@ cpu_idle_exit(boolean_t from_reset) pmap_switch_user_ttb(cpu_data_ptr->cpu_active_thread->map->pmap); - if (cpu_data_ptr->cpu_idle_notify) - ((processor_idle_t) cpu_data_ptr->cpu_idle_notify) (cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); + if (cpu_data_ptr->cpu_idle_notify) { + ((processor_idle_t) cpu_data_ptr->cpu_idle_notify)(cpu_data_ptr->cpu_id, FALSE, &new_idle_timeout_ticks); + } if (cpu_data_ptr->idle_timer_notify != 0) { if (new_idle_timeout_ticks == 0x0ULL) { @@ -420,7 +433,6 @@ cpu_init(void) assert(exception_stack_pointer() != 0); if (cdp->cpu_type != CPU_TYPE_ARM64) { - cdp->cpu_type = CPU_TYPE_ARM64; timer_call_queue_init(&cdp->rtclock_timer.queue); @@ -459,6 +471,7 @@ cpu_init(void) cdp->cpu_stat.irq_ex_cnt_wake = 0; cdp->cpu_stat.ipi_cnt_wake = 0; cdp->cpu_stat.timer_cnt_wake = 0; + cdp->cpu_stat.pmi_cnt_wake = 0; cdp->cpu_running = TRUE; cdp->cpu_sleep_token_last = cdp->cpu_sleep_token; cdp->cpu_sleep_token = 0x0UL; @@ -473,27 +486,29 @@ cpu_init(void) void cpu_stack_alloc(cpu_data_t *cpu_data_ptr) { - vm_offset_t irq_stack = 0; - vm_offset_t exc_stack = 0; + vm_offset_t irq_stack = 0; + vm_offset_t exc_stack = 0; kern_return_t kr = kernel_memory_allocate(kernel_map, &irq_stack, - INTSTACK_SIZE + (2 * PAGE_SIZE), - PAGE_MASK, - KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, - VM_KERN_MEMORY_STACK); - if (kr != KERN_SUCCESS) + INTSTACK_SIZE + (2 * PAGE_SIZE), + PAGE_MASK, + KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, + VM_KERN_MEMORY_STACK); + if (kr != KERN_SUCCESS) { panic("Unable to allocate cpu interrupt stack\n"); + } cpu_data_ptr->intstack_top = irq_stack + PAGE_SIZE + INTSTACK_SIZE; cpu_data_ptr->istackptr = cpu_data_ptr->intstack_top; kr = kernel_memory_allocate(kernel_map, &exc_stack, - EXCEPSTACK_SIZE + (2 * PAGE_SIZE), - PAGE_MASK, - KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, - VM_KERN_MEMORY_STACK); - if (kr != KERN_SUCCESS) + EXCEPSTACK_SIZE + (2 * PAGE_SIZE), + PAGE_MASK, + KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT, + VM_KERN_MEMORY_STACK); + if (kr != KERN_SUCCESS) { panic("Unable to allocate cpu exception stack\n"); + } cpu_data_ptr->excepstack_top = exc_stack + PAGE_SIZE + EXCEPSTACK_SIZE; cpu_data_ptr->excepstackptr = cpu_data_ptr->excepstack_top; @@ -502,12 +517,13 @@ cpu_stack_alloc(cpu_data_t *cpu_data_ptr) void cpu_data_free(cpu_data_t *cpu_data_ptr) { - if (cpu_data_ptr == &BootCpuData) - return; + if (cpu_data_ptr == &BootCpuData) { + return; + } cpu_processor_free( cpu_data_ptr->cpu_processor); - kfree( (void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); - kfree( (void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE); + (kfree)((void *)(cpu_data_ptr->intstack_top - INTSTACK_SIZE), INTSTACK_SIZE); + (kfree)((void *)(cpu_data_ptr->excepstack_top - EXCEPSTACK_SIZE), EXCEPSTACK_SIZE); kmem_free(kernel_map, (vm_offset_t)cpu_data_ptr, sizeof(cpu_data_t)); } @@ -583,7 +599,7 @@ cpu_data_init(cpu_data_t *cpu_data_ptr) kern_return_t cpu_data_register(cpu_data_t *cpu_data_ptr) { - int cpu = cpu_data_ptr->cpu_number; + int cpu = cpu_data_ptr->cpu_number; #if KASAN for (int i = 0; i < CPUWINDOWS_MAX; i++) { @@ -592,9 +608,8 @@ cpu_data_register(cpu_data_t *cpu_data_ptr) #endif CpuDataEntries[cpu].cpu_data_vaddr = cpu_data_ptr; - CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys( (vm_offset_t)cpu_data_ptr); + CpuDataEntries[cpu].cpu_data_paddr = (void *)ml_vtophys((vm_offset_t)cpu_data_ptr); return KERN_SUCCESS; - } @@ -615,10 +630,11 @@ cpu_start(int cpu) cpu_data_ptr->cpu_pmap_cpu_data.cpu_nested_pmap = NULL; - if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL) + if (cpu_data_ptr->cpu_processor->next_thread != THREAD_NULL) { first_thread = cpu_data_ptr->cpu_processor->next_thread; - else + } else { first_thread = cpu_data_ptr->cpu_processor->idle_thread; + } cpu_data_ptr->cpu_active_thread = first_thread; first_thread->machine.CpuDatap = cpu_data_ptr; @@ -669,28 +685,31 @@ cpu_timebase_init(boolean_t from_boot) int cpu_cluster_id(void) { - return (getCpuDatap()->cpu_cluster_id); + return getCpuDatap()->cpu_cluster_id; } __attribute__((noreturn)) void ml_arm_sleep(void) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); + cpu_data_t *cpu_data_ptr = getCpuDatap(); if (cpu_data_ptr == &BootCpuData) { cpu_data_t *target_cdp; - int cpu; - int max_cpu; + int cpu; + int max_cpu; max_cpu = ml_get_max_cpu_number(); - for (cpu=0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= max_cpu; cpu++) { target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) + if ((target_cdp == NULL) || (target_cdp == cpu_data_ptr)) { continue; + } - while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH); + while (target_cdp->cpu_sleep_token != ARM_CPU_ON_SLEEP_PATH) { + ; + } } /* @@ -712,11 +731,10 @@ ml_arm_sleep(void) // do not go through SecureROM/iBoot on the warm boot path. The // reconfig engine script brings the CPU out of reset at the kernel's // reset vector which points to the warm boot initialization code. - if(sleepTokenBuffer != (vm_offset_t) NULL) { + if (sleepTokenBuffer != (vm_offset_t) NULL) { platform_cache_shutdown(); bcopy((const void *)suspend_signature, (void *)sleepTokenBuffer, sizeof(SleepToken)); - } - else { + } else { panic("No sleep token buffer"); } #endif @@ -728,8 +746,8 @@ ml_arm_sleep(void) #endif /* Architectural debug state: : - * Grab debug lock EDLAR and clear bit 0 in EDPRCR, - * tell debugger to not prevent power gating . + * Grab debug lock EDLAR and clear bit 0 in EDPRCR, + * tell debugger to not prevent power gating . */ if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) { *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY; @@ -761,8 +779,8 @@ ml_arm_sleep(void) CleanPoU_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); /* Architectural debug state: : - * Grab debug lock EDLAR and clear bit 0 in EDPRCR, - * tell debugger to not prevent power gating . + * Grab debug lock EDLAR and clear bit 0 in EDPRCR, + * tell debugger to not prevent power gating . */ if (cpu_data_ptr->coresight_base[CORESIGHT_ED]) { *(volatile uint32_t *)(cpu_data_ptr->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY; @@ -777,34 +795,36 @@ ml_arm_sleep(void) void cpu_machine_idle_init(boolean_t from_boot) { - static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL; - cpu_data_t *cpu_data_ptr = getCpuDatap(); + static vm_address_t resume_idle_cpu_paddr = (vm_address_t)NULL; + cpu_data_t *cpu_data_ptr = getCpuDatap(); if (from_boot) { - unsigned long jtag = 0; - int wfi_tmp = 1; - uint32_t production = 1; - DTEntry entry; + unsigned long jtag = 0; + int wfi_tmp = 1; + uint32_t production = 1; + DTEntry entry; - if (PE_parse_boot_argn("jtag", &jtag, sizeof (jtag))) { - if (jtag != 0) + if (PE_parse_boot_argn("jtag", &jtag, sizeof(jtag))) { + if (jtag != 0) { idle_enable = FALSE; - else + } else { idle_enable = TRUE; - } else + } + } else { idle_enable = TRUE; + } - PE_parse_boot_argn("wfi", &wfi_tmp, sizeof (wfi_tmp)); + PE_parse_boot_argn("wfi", &wfi_tmp, sizeof(wfi_tmp)); // bits 7..0 give the wfi type switch (wfi_tmp & 0xff) { - case 0 : + case 0: // disable wfi wfi = 0; break; #if DEVELOPMENT || DEBUG - case 2 : + case 2: // wfi overhead simulation // 31..16 - wfi delay is us // 15..8 - flags @@ -815,8 +835,8 @@ cpu_machine_idle_init(boolean_t from_boot) break; #endif /* DEVELOPMENT || DEBUG */ - case 1 : - default : + case 1: + default: // do nothing break; } @@ -832,12 +852,14 @@ cpu_machine_idle_init(boolean_t from_boot) // Determine if we are on production or debug chip if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) { - unsigned int size; - void *prop; + unsigned int size; + void *prop; - if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size)) - if (size == 4) + if (kSuccess == DTGetProperty(entry, "effective-production-status-ap", &prop, &size)) { + if (size == 4) { bcopy(prop, &production, size); + } + } } if (!production) { #if defined(APPLE_ARM64_ARCH_FAMILY) @@ -855,15 +877,15 @@ cpu_machine_idle_init(boolean_t from_boot) static addr64_t SleepToken_low_paddr = (addr64_t)NULL; if (sleepTokenBuffer != (vm_offset_t) NULL) { SleepToken_low_paddr = ml_vtophys(sleepTokenBuffer); - } - else { + } else { panic("No sleep token buffer"); } bcopy_phys((addr64_t)ml_static_vtop((vm_offset_t)running_signature), - SleepToken_low_paddr, sizeof(SleepToken)); + SleepToken_low_paddr, sizeof(SleepToken)); flush_dcache((vm_offset_t)SleepToken, sizeof(SleepToken), TRUE); - }; + } + ; #endif cpu_data_ptr->cpu_reset_handler = resume_idle_cpu_paddr; @@ -875,32 +897,34 @@ _Atomic uint32_t cpu_idle_count = 0; void machine_track_platform_idle(boolean_t entry) { - if (entry) + if (entry) { (void)__c11_atomic_fetch_add(&cpu_idle_count, 1, __ATOMIC_RELAXED); - else + } else { (void)__c11_atomic_fetch_sub(&cpu_idle_count, 1, __ATOMIC_RELAXED); + } } #if WITH_CLASSIC_S2R void sleep_token_buffer_init(void) { - cpu_data_t *cpu_data_ptr = getCpuDatap(); - DTEntry entry; - size_t size; - void **prop; + cpu_data_t *cpu_data_ptr = getCpuDatap(); + DTEntry entry; + size_t size; + void **prop; if ((cpu_data_ptr == &BootCpuData) && (sleepTokenBuffer == (vm_offset_t) NULL)) { /* Find the stpage node in the device tree */ - if (kSuccess != DTLookupEntry(0, "stram", &entry)) + if (kSuccess != DTLookupEntry(0, "stram", &entry)) { return; + } - if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size)) + if (kSuccess != DTGetProperty(entry, "reg", (void **)&prop, (unsigned int *)&size)) { return; + } /* Map the page into the kernel space */ sleepTokenBuffer = ml_io_map(((vm_offset_t *)prop)[0], ((vm_size_t *)prop)[1]); } } #endif - diff --git a/osfmk/arm64/dbgwrap.c b/osfmk/arm64/dbgwrap.c index 282432450..666efc2d3 100644 --- a/osfmk/arm64/dbgwrap.c +++ b/osfmk/arm64/dbgwrap.c @@ -32,24 +32,24 @@ #include #include -#define DBGWRAP_REG_OFFSET 0 -#define DBGWRAP_DBGHALT (1ULL << 31) -#define DBGWRAP_DBGACK (1ULL << 28) - -#define EDDTRRX_REG_OFFSET 0x80 -#define EDITR_REG_OFFSET 0x84 -#define EDSCR_REG_OFFSET 0x88 -#define EDSCR_TXFULL (1ULL << 29) -#define EDSCR_ITE (1ULL << 24) -#define EDSCR_MA (1ULL << 20) -#define EDSCR_ERR (1ULL << 6) -#define EDDTRTX_REG_OFFSET 0x8C -#define EDRCR_REG_OFFSET 0x90 -#define EDRCR_CSE (1ULL << 2) -#define EDPRSR_REG_OFFSET 0x314 -#define EDPRSR_OSLK (1ULL << 5) - -#define MAX_EDITR_RETRIES 16 +#define DBGWRAP_REG_OFFSET 0 +#define DBGWRAP_DBGHALT (1ULL << 31) +#define DBGWRAP_DBGACK (1ULL << 28) + +#define EDDTRRX_REG_OFFSET 0x80 +#define EDITR_REG_OFFSET 0x84 +#define EDSCR_REG_OFFSET 0x88 +#define EDSCR_TXFULL (1ULL << 29) +#define EDSCR_ITE (1ULL << 24) +#define EDSCR_MA (1ULL << 20) +#define EDSCR_ERR (1ULL << 6) +#define EDDTRTX_REG_OFFSET 0x8C +#define EDRCR_REG_OFFSET 0x90 +#define EDRCR_CSE (1ULL << 2) +#define EDPRSR_REG_OFFSET 0x314 +#define EDPRSR_OSLK (1ULL << 5) + +#define MAX_EDITR_RETRIES 16 /* Older SoCs require 32-bit accesses for DBGWRAP; * newer ones require 64-bit accesses. */ @@ -60,29 +60,31 @@ typedef uint64_t dbgwrap_reg_t; #endif #if DEVELOPMENT || DEBUG -#define MAX_STUFFED_INSTRS 64 +#define MAX_STUFFED_INSTRS 64 uint32_t stuffed_instrs[MAX_STUFFED_INSTRS]; volatile uint32_t stuffed_instr_count = 0; #endif -static volatile uint32_t halt_from_cpu = (uint32_t)-1; +static volatile uint32_t halt_from_cpu = (uint32_t)-1; boolean_t ml_dbgwrap_cpu_is_halted(int cpu_index) { cpu_data_t *cdp = cpu_datap(cpu_index); - if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) + if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) { return FALSE; + } - return ((*(volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET) & DBGWRAP_DBGACK) != 0); + return (*(volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET) & DBGWRAP_DBGACK) != 0; } dbgwrap_status_t ml_dbgwrap_wait_cpu_halted(int cpu_index, uint64_t timeout_ns) { cpu_data_t *cdp = cpu_datap(cpu_index); - if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) + if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) { return DBGWRAP_ERR_UNSUPPORTED; + } volatile dbgwrap_reg_t *dbgWrapReg = (volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET); @@ -90,8 +92,9 @@ ml_dbgwrap_wait_cpu_halted(int cpu_index, uint64_t timeout_ns) nanoseconds_to_absolutetime(timeout_ns, &interval); uint64_t deadline = mach_absolute_time() + interval; while (!(*dbgWrapReg & DBGWRAP_DBGACK)) { - if (mach_absolute_time() > deadline) - return DBGWRAP_ERR_HALT_TIMEOUT; + if (mach_absolute_time() > deadline) { + return DBGWRAP_ERR_HALT_TIMEOUT; + } } return DBGWRAP_SUCCESS; @@ -101,23 +104,27 @@ dbgwrap_status_t ml_dbgwrap_halt_cpu(int cpu_index, uint64_t timeout_ns) { cpu_data_t *cdp = cpu_datap(cpu_index); - if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) + if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_UTT] == 0)) { return DBGWRAP_ERR_UNSUPPORTED; + } /* Only one cpu is allowed to initiate the halt sequence, to prevent cpus from cross-halting * each other. The first cpu to request a halt may then halt any and all other cpus besides itself. */ int curcpu = cpu_number(); - if (cpu_index == curcpu) + if (cpu_index == curcpu) { return DBGWRAP_ERR_SELF_HALT; + } if (!hw_compare_and_store((uint32_t)-1, (unsigned int)curcpu, &halt_from_cpu) && - (halt_from_cpu != (uint32_t)curcpu)) + (halt_from_cpu != (uint32_t)curcpu)) { return DBGWRAP_ERR_INPROGRESS; + } volatile dbgwrap_reg_t *dbgWrapReg = (volatile dbgwrap_reg_t *)(cdp->coresight_base[CORESIGHT_UTT] + DBGWRAP_REG_OFFSET); - if (ml_dbgwrap_cpu_is_halted(cpu_index)) + if (ml_dbgwrap_cpu_is_halted(cpu_index)) { return DBGWRAP_WARN_ALREADY_HALTED; + } /* Clear all other writable bits besides dbgHalt; none of the power-down or reset bits must be set. */ *dbgWrapReg = DBGWRAP_DBGHALT; @@ -125,16 +132,17 @@ ml_dbgwrap_halt_cpu(int cpu_index, uint64_t timeout_ns) if (timeout_ns != 0) { dbgwrap_status_t stat = ml_dbgwrap_wait_cpu_halted(cpu_index, timeout_ns); return stat; - } - else + } else { return DBGWRAP_SUCCESS; + } } static void ml_dbgwrap_stuff_instr(cpu_data_t *cdp, uint32_t instr, uint64_t timeout_ns, dbgwrap_status_t *status) { - if (*status < 0) + if (*status < 0) { return; + } volatile uint32_t *editr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDITR_REG_OFFSET); volatile uint32_t *edscr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDSCR_REG_OFFSET); @@ -159,18 +167,21 @@ ml_dbgwrap_stuff_instr(cpu_data_t *cdp, uint32_t instr, uint64_t timeout_ns, dbg *status = DBGWRAP_ERR_INSTR_TIMEOUT; return; } - if (edscr_val & EDSCR_ERR) + if (edscr_val & EDSCR_ERR) { break; + } } if (edscr_val & EDSCR_ERR) { /* If memory access mode was enable by a debugger, clear it. - * This will cause ERR to be set on any attempt to use EDITR. */ - if (edscr_val & EDSCR_MA) + * This will cause ERR to be set on any attempt to use EDITR. */ + if (edscr_val & EDSCR_MA) { *edscr = edscr_val & ~EDSCR_MA; + } *edrcr = EDRCR_CSE; ++retries; - } else + } else { break; + } } while (retries < MAX_EDITR_RETRIES); if (retries >= MAX_EDITR_RETRIES) { @@ -182,14 +193,15 @@ ml_dbgwrap_stuff_instr(cpu_data_t *cdp, uint32_t instr, uint64_t timeout_ns, dbg static uint64_t ml_dbgwrap_read_dtr(cpu_data_t *cdp, uint64_t timeout_ns, dbgwrap_status_t *status) { - if (*status < 0) + if (*status < 0) { return 0; + } uint64_t interval; nanoseconds_to_absolutetime(timeout_ns, &interval); uint64_t deadline = mach_absolute_time() + interval; - /* Per armv8 debug spec, writes to DBGDTR_EL0 on target cpu will set EDSCR.TXFull, + /* Per armv8 debug spec, writes to DBGDTR_EL0 on target cpu will set EDSCR.TXFull, * with bits 63:32 available in EDDTRRX and bits 31:0 availabe in EDDTRTX. */ volatile uint32_t *edscr = (volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + EDSCR_REG_OFFSET); @@ -207,15 +219,16 @@ ml_dbgwrap_read_dtr(cpu_data_t *cdp, uint64_t timeout_ns, dbgwrap_status_t *stat uint32_t dtrrx = *((volatile uint32_t*)(cdp->coresight_base[CORESIGHT_ED] + EDDTRRX_REG_OFFSET)); uint32_t dtrtx = *((volatile uint32_t*)(cdp->coresight_base[CORESIGHT_ED] + EDDTRTX_REG_OFFSET)); - return (((uint64_t)dtrrx << 32) | dtrtx); + return ((uint64_t)dtrrx << 32) | dtrtx; } dbgwrap_status_t ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_thread_state_t *state) { cpu_data_t *cdp = cpu_datap(cpu_index); - if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_ED] == 0)) + if ((cdp == NULL) || (cdp->coresight_base[CORESIGHT_ED] == 0)) { return DBGWRAP_ERR_UNSUPPORTED; + } /* Ensure memory-mapped coresight registers can be written */ *((volatile uint32_t *)(cdp->coresight_base[CORESIGHT_ED] + ARM_DEBUG_OFFSET_DBGLAR)) = ARM_DBG_LOCK_ACCESS_KEY; @@ -252,7 +265,7 @@ ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_threa * rather than sp when used as the transfer operand there. Instead, load sp into a GPR * we've already saved off and then store that register in the DTR. I've chosen x18 * as the temporary GPR since it's reserved by the arm64 ABI and unused by xnu, so overwriting - * it poses the least risk of causing trouble for external debuggers. */ + * it poses the least risk of causing trouble for external debuggers. */ instr = (0x91U << 24) | (31 << 5) | 18; // mov x18, sp ml_dbgwrap_stuff_instr(cdp, instr, timeout_ns, &status); @@ -279,5 +292,3 @@ ml_dbgwrap_halt_cpu_with_state(int cpu_index, uint64_t timeout_ns, dbgwrap_threa return status; } - - diff --git a/osfmk/arm64/genassym.c b/osfmk/arm64/genassym.c index c9755bde8..faf7f8843 100644 --- a/osfmk/arm64/genassym.c +++ b/osfmk/arm64/genassym.c @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -86,11 +86,6 @@ #include #include -#if CONFIG_DTRACE -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> -#endif /* CONFIG_DTRACE */ - /* * genassym.c is used to produce an * assembly file which, intermingled with unuseful assembly code, @@ -104,52 +99,51 @@ * the values, but we cannot run anything on the target machine. */ -#define DECLARE(SYM,VAL) \ +#define DECLARE(SYM, VAL) \ __asm("DEFINITION__define__" SYM ":\t .ascii \"%0\"" : : "n" ((u_long)(VAL))) -int main( - int argc, - char ** argv); +int main( + int argc, + char ** argv); int main( - int argc, - char **argv) + int argc, + char **argv) { + DECLARE("T_PREFETCH_ABT", T_PREFETCH_ABT); + DECLARE("T_DATA_ABT", T_DATA_ABT); - DECLARE("T_PREFETCH_ABT", T_PREFETCH_ABT); - DECLARE("T_DATA_ABT", T_DATA_ABT); - - DECLARE("AST_URGENT", AST_URGENT); - DECLARE("AST_PREEMPTION", AST_PREEMPTION); + DECLARE("AST_URGENT", AST_URGENT); + DECLARE("AST_PREEMPTION", AST_PREEMPTION); - DECLARE("TH_RECOVER", offsetof(struct thread, recover)); - DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation)); - DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack)); + DECLARE("TH_RECOVER", offsetof(struct thread, recover)); + DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation)); + DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack)); DECLARE("TH_KSTACKPTR", offsetof(struct thread, machine.kstackptr)); - DECLARE("THREAD_UTHREAD", offsetof(struct thread, uthread)); + DECLARE("THREAD_UTHREAD", offsetof(struct thread, uthread)); DECLARE("TASK_MACH_EXC_PORT", - offsetof(struct task, exc_actions[EXC_MACH_SYSCALL].port)); + offsetof(struct task, exc_actions[EXC_MACH_SYSCALL].port)); /* These fields are being added on demand */ - DECLARE("ACT_TASK", offsetof(struct thread, task)); + DECLARE("ACT_TASK", offsetof(struct thread, task)); DECLARE("ACT_CONTEXT", offsetof(struct thread, machine.contextData)); - DECLARE("ACT_UPCB", offsetof(struct thread, machine.upcb)); + DECLARE("ACT_UPCB", offsetof(struct thread, machine.upcb)); // DECLARE("ACT_PCBDATA", offsetof(struct thread, machine.contextData.ss)); DECLARE("ACT_UNEON", offsetof(struct thread, machine.uNeon)); // DECLARE("ACT_NEONDATA", offsetof(struct thread, machine.contextData.ns)); - DECLARE("TH_CTH_SELF", offsetof(struct thread, machine.cthread_self)); - DECLARE("TH_CTH_DATA", offsetof(struct thread, machine.cthread_data)); - DECLARE("ACT_PREEMPT_CNT", offsetof(struct thread, machine.preemption_count)); - DECLARE("ACT_CPUDATAP", offsetof(struct thread, machine.CpuDatap)); - DECLARE("ACT_MAP", offsetof(struct thread, map)); - DECLARE("ACT_DEBUGDATA", offsetof(struct thread, machine.DebugData)); - DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); - DECLARE("TH_RWLOCK_CNT", offsetof(struct thread, rwlock_count)); - DECLARE("TH_SCHED_FLAGS", offsetof(struct thread, sched_flags)); - DECLARE("TH_SFLAG_RW_PROMOTED_BIT", TH_SFLAG_RW_PROMOTED_BIT); + DECLARE("TH_CTH_SELF", offsetof(struct thread, machine.cthread_self)); + DECLARE("TH_CTH_DATA", offsetof(struct thread, machine.cthread_data)); + DECLARE("ACT_PREEMPT_CNT", offsetof(struct thread, machine.preemption_count)); + DECLARE("ACT_CPUDATAP", offsetof(struct thread, machine.CpuDatap)); + DECLARE("ACT_MAP", offsetof(struct thread, map)); + DECLARE("ACT_DEBUGDATA", offsetof(struct thread, machine.DebugData)); + DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); + DECLARE("TH_RWLOCK_CNT", offsetof(struct thread, rwlock_count)); + DECLARE("TH_SCHED_FLAGS", offsetof(struct thread, sched_flags)); + DECLARE("TH_SFLAG_RW_PROMOTED_BIT", TH_SFLAG_RW_PROMOTED_BIT); DECLARE("TH_MACH_SYSCALLS", offsetof(struct thread, syscalls_mach)); DECLARE("TH_UNIX_SYSCALLS", offsetof(struct thread, syscalls_unix)); @@ -158,7 +152,7 @@ main( DECLARE("MACH_TRAP_TABLE_COUNT", MACH_TRAP_TABLE_COUNT); DECLARE("MACH_TRAP_TABLE_ENTRY_SIZE", sizeof(mach_trap_t)); - DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); + DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); DECLARE("ARM_CONTEXT_SIZE", sizeof(arm_context_t)); @@ -260,177 +254,177 @@ main( DECLARE("PGSHIFT", ARM_PGSHIFT); DECLARE("PGMASK", ARM_PGMASK); - DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); - DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); - DECLARE("VM_MIN_KERNEL_ADDRESS", VM_MIN_KERNEL_ADDRESS); - DECLARE("VM_MAX_KERNEL_ADDRESS", VM_MAX_KERNEL_ADDRESS); - DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); - DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); - DECLARE("TBI_MASK", TBI_MASK); + DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); + DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); + DECLARE("VM_MIN_KERNEL_ADDRESS", VM_MIN_KERNEL_ADDRESS); + DECLARE("VM_MAX_KERNEL_ADDRESS", VM_MAX_KERNEL_ADDRESS); + DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); + DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); + DECLARE("TBI_MASK", TBI_MASK); - DECLARE("KERN_INVALID_ADDRESS", KERN_INVALID_ADDRESS); + DECLARE("KERN_INVALID_ADDRESS", KERN_INVALID_ADDRESS); - DECLARE("MAX_CPUS", MAX_CPUS); + DECLARE("MAX_CPUS", MAX_CPUS); DECLARE("cdeSize", - sizeof(struct cpu_data_entry)); + sizeof(struct cpu_data_entry)); DECLARE("cdSize", - sizeof(struct cpu_data)); - - DECLARE("CPU_ACTIVE_THREAD", - offsetof(cpu_data_t, cpu_active_thread)); - DECLARE("CPU_ACTIVE_STACK", - offsetof(cpu_data_t, cpu_active_stack)); - DECLARE("CPU_ISTACKPTR", - offsetof(cpu_data_t, istackptr)); - DECLARE("CPU_INTSTACK_TOP", - offsetof(cpu_data_t, intstack_top)); - DECLARE("CPU_EXCEPSTACKPTR", - offsetof(cpu_data_t, excepstackptr)); - DECLARE("CPU_EXCEPSTACK_TOP", - offsetof(cpu_data_t, excepstack_top)); + sizeof(struct cpu_data)); + + DECLARE("CPU_ACTIVE_THREAD", + offsetof(cpu_data_t, cpu_active_thread)); + DECLARE("CPU_ACTIVE_STACK", + offsetof(cpu_data_t, cpu_active_stack)); + DECLARE("CPU_ISTACKPTR", + offsetof(cpu_data_t, istackptr)); + DECLARE("CPU_INTSTACK_TOP", + offsetof(cpu_data_t, intstack_top)); + DECLARE("CPU_EXCEPSTACKPTR", + offsetof(cpu_data_t, excepstackptr)); + DECLARE("CPU_EXCEPSTACK_TOP", + offsetof(cpu_data_t, excepstack_top)); #if __ARM_KERNEL_PROTECT__ DECLARE("CPU_EXC_VECTORS", - offsetof(cpu_data_t, cpu_exc_vectors)); + offsetof(cpu_data_t, cpu_exc_vectors)); #endif /* __ARM_KERNEL_PROTECT__ */ - DECLARE("CPU_NUMBER_GS", - offsetof(cpu_data_t,cpu_number)); - DECLARE("CPU_IDENT", - offsetof(cpu_data_t,cpu_ident)); - DECLARE("CPU_RUNNING", - offsetof(cpu_data_t,cpu_running)); - DECLARE("CPU_MCOUNT_OFF", - offsetof(cpu_data_t,cpu_mcount_off)); + DECLARE("CPU_NUMBER_GS", + offsetof(cpu_data_t, cpu_number)); + DECLARE("CPU_IDENT", + offsetof(cpu_data_t, cpu_ident)); + DECLARE("CPU_RUNNING", + offsetof(cpu_data_t, cpu_running)); + DECLARE("CPU_MCOUNT_OFF", + offsetof(cpu_data_t, cpu_mcount_off)); DECLARE("CPU_PENDING_AST", - offsetof(cpu_data_t,cpu_pending_ast)); + offsetof(cpu_data_t, cpu_pending_ast)); DECLARE("CPU_PROCESSOR", - offsetof(cpu_data_t,cpu_processor)); + offsetof(cpu_data_t, cpu_processor)); DECLARE("CPU_CACHE_DISPATCH", - offsetof(cpu_data_t,cpu_cache_dispatch)); - DECLARE("CPU_BASE_TIMEBASE", - offsetof(cpu_data_t,cpu_base_timebase)); + offsetof(cpu_data_t, cpu_cache_dispatch)); + DECLARE("CPU_BASE_TIMEBASE", + offsetof(cpu_data_t, cpu_base_timebase)); DECLARE("CPU_DECREMENTER", - offsetof(cpu_data_t,cpu_decrementer)); + offsetof(cpu_data_t, cpu_decrementer)); DECLARE("CPU_GET_DECREMENTER_FUNC", - offsetof(cpu_data_t,cpu_get_decrementer_func)); + offsetof(cpu_data_t, cpu_get_decrementer_func)); DECLARE("CPU_SET_DECREMENTER_FUNC", - offsetof(cpu_data_t,cpu_set_decrementer_func)); + offsetof(cpu_data_t, cpu_set_decrementer_func)); DECLARE("CPU_GET_FIQ_HANDLER", - offsetof(cpu_data_t,cpu_get_fiq_handler)); + offsetof(cpu_data_t, cpu_get_fiq_handler)); DECLARE("CPU_TBD_HARDWARE_ADDR", - offsetof(cpu_data_t,cpu_tbd_hardware_addr)); + offsetof(cpu_data_t, cpu_tbd_hardware_addr)); DECLARE("CPU_TBD_HARDWARE_VAL", - offsetof(cpu_data_t,cpu_tbd_hardware_val)); + offsetof(cpu_data_t, cpu_tbd_hardware_val)); DECLARE("CPU_INT_STATE", - offsetof(cpu_data_t,cpu_int_state)); + offsetof(cpu_data_t, cpu_int_state)); DECLARE("INTERRUPT_HANDLER", - offsetof(cpu_data_t,interrupt_handler)); + offsetof(cpu_data_t, interrupt_handler)); DECLARE("INTERRUPT_TARGET", - offsetof(cpu_data_t,interrupt_target)); + offsetof(cpu_data_t, interrupt_target)); DECLARE("INTERRUPT_REFCON", - offsetof(cpu_data_t,interrupt_refCon)); + offsetof(cpu_data_t, interrupt_refCon)); DECLARE("INTERRUPT_NUB", - offsetof(cpu_data_t,interrupt_nub)); + offsetof(cpu_data_t, interrupt_nub)); DECLARE("INTERRUPT_SOURCE", - offsetof(cpu_data_t,interrupt_source)); + offsetof(cpu_data_t, interrupt_source)); DECLARE("CPU_USER_DEBUG", - offsetof(cpu_data_t, cpu_user_debug)); + offsetof(cpu_data_t, cpu_user_debug)); DECLARE("CPU_STAT_IRQ", - offsetof(cpu_data_t, cpu_stat.irq_ex_cnt)); + offsetof(cpu_data_t, cpu_stat.irq_ex_cnt)); DECLARE("CPU_STAT_IRQ_WAKE", - offsetof(cpu_data_t, cpu_stat.irq_ex_cnt_wake)); + offsetof(cpu_data_t, cpu_stat.irq_ex_cnt_wake)); DECLARE("CPU_RESET_HANDLER", - offsetof(cpu_data_t, cpu_reset_handler)); + offsetof(cpu_data_t, cpu_reset_handler)); DECLARE("CPU_RESET_ASSIST", - offsetof(cpu_data_t, cpu_reset_assist)); + offsetof(cpu_data_t, cpu_reset_assist)); DECLARE("CPU_REGMAP_PADDR", - offsetof(cpu_data_t, cpu_regmap_paddr)); + offsetof(cpu_data_t, cpu_regmap_paddr)); DECLARE("CPU_PHYS_ID", - offsetof(cpu_data_t, cpu_phys_id)); + offsetof(cpu_data_t, cpu_phys_id)); DECLARE("RTCLOCK_DATAP", - offsetof(cpu_data_t, rtclock_datap)); + offsetof(cpu_data_t, rtclock_datap)); DECLARE("CLUSTER_MASTER", - offsetof(cpu_data_t, cluster_master)); + offsetof(cpu_data_t, cluster_master)); DECLARE("RTCLOCKDataSize", - sizeof(rtclock_data_t)); + sizeof(rtclock_data_t)); DECLARE("RTCLOCK_ADJ_ABSTIME_LOW", - offsetof(rtclock_data_t, rtc_adj.abstime_val.low)); + offsetof(rtclock_data_t, rtc_adj.abstime_val.low)); DECLARE("RTCLOCK_ADJ_ABSTIME_HIGH", - offsetof(rtclock_data_t, rtc_adj.abstime_val.high)); + offsetof(rtclock_data_t, rtc_adj.abstime_val.high)); DECLARE("RTCLOCK_BASE_ABSTIME_LOW", - offsetof(rtclock_data_t, rtc_base.abstime_val.low)); + offsetof(rtclock_data_t, rtc_base.abstime_val.low)); DECLARE("RTCLOCK_BASE_ABSTIME_HIGH", - offsetof(rtclock_data_t, rtc_base.abstime_val.high)); + offsetof(rtclock_data_t, rtc_base.abstime_val.high)); - DECLARE("SIGPdec", SIGPdec); + DECLARE("SIGPdec", SIGPdec); DECLARE("rhdSize", - sizeof(struct reset_handler_data)); + sizeof(struct reset_handler_data)); #if WITH_CLASSIC_S2R || !__arm64__ DECLARE("stSize", - sizeof(SleepToken)); + sizeof(SleepToken)); #endif - DECLARE("CPU_DATA_ENTRIES", offsetof(struct reset_handler_data, cpu_data_entries)); - DECLARE("ASSIST_RESET_HANDLER", offsetof(struct reset_handler_data, assist_reset_handler)); + DECLARE("CPU_DATA_ENTRIES", offsetof(struct reset_handler_data, cpu_data_entries)); + DECLARE("ASSIST_RESET_HANDLER", offsetof(struct reset_handler_data, assist_reset_handler)); - DECLARE("CPU_DATA_PADDR", offsetof(struct cpu_data_entry, cpu_data_paddr)); + DECLARE("CPU_DATA_PADDR", offsetof(struct cpu_data_entry, cpu_data_paddr)); - DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); - DECLARE("EXCEPSTACK_SIZE", EXCEPSTACK_SIZE); + DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); + DECLARE("EXCEPSTACK_SIZE", EXCEPSTACK_SIZE); - DECLARE("PAGE_MAX_SIZE", PAGE_MAX_SIZE); + DECLARE("PAGE_MAX_SIZE", PAGE_MAX_SIZE); DECLARE("TIMER_TSTAMP", - offsetof(struct timer, tstamp)); + offsetof(struct timer, tstamp)); DECLARE("THREAD_TIMER", - offsetof(struct processor, processor_data.thread_timer)); + offsetof(struct processor, processor_data.thread_timer)); DECLARE("KERNEL_TIMER", - offsetof(struct processor, processor_data.kernel_timer)); + offsetof(struct processor, processor_data.kernel_timer)); DECLARE("SYSTEM_STATE", - offsetof(struct processor, processor_data.system_state)); + offsetof(struct processor, processor_data.system_state)); DECLARE("USER_STATE", - offsetof(struct processor, processor_data.user_state)); + offsetof(struct processor, processor_data.user_state)); DECLARE("CURRENT_STATE", - offsetof(struct processor, processor_data.current_state)); + offsetof(struct processor, processor_data.current_state)); DECLARE("SYSTEM_TIMER", - offsetof(struct thread, system_timer)); + offsetof(struct thread, system_timer)); DECLARE("USER_TIMER", - offsetof(struct thread, user_timer)); + offsetof(struct thread, user_timer)); #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME DECLARE("PRECISE_USER_KERNEL_TIME", - offsetof(struct thread, precise_user_kernel_time)); + offsetof(struct thread, precise_user_kernel_time)); #endif DECLARE("BA_VIRT_BASE", - offsetof(struct boot_args, virtBase)); + offsetof(struct boot_args, virtBase)); DECLARE("BA_PHYS_BASE", - offsetof(struct boot_args, physBase)); + offsetof(struct boot_args, physBase)); DECLARE("BA_MEM_SIZE", - offsetof(struct boot_args, memSize)); + offsetof(struct boot_args, memSize)); DECLARE("BA_TOP_OF_KERNEL_DATA", - offsetof(struct boot_args, topOfKernelData)); + offsetof(struct boot_args, topOfKernelData)); DECLARE("BA_DEVICE_TREE", - offsetof(struct boot_args, deviceTreeP)); + offsetof(struct boot_args, deviceTreeP)); DECLARE("BA_DEVICE_TREE_LENGTH", - offsetof(struct boot_args, deviceTreeLength)); + offsetof(struct boot_args, deviceTreeLength)); DECLARE("BA_BOOT_FLAGS", - offsetof(struct boot_args, bootFlags)); + offsetof(struct boot_args, bootFlags)); DECLARE("ENTROPY_INDEX_PTR", - offsetof(entropy_data_t, index_ptr)); + offsetof(entropy_data_t, index_ptr)); DECLARE("ENTROPY_BUFFER", - offsetof(entropy_data_t, buffer)); + offsetof(entropy_data_t, buffer)); DECLARE("ENTROPY_DATA_SIZE", sizeof(struct entropy_data)); DECLARE("SR_RESTORE_TCR_EL1", offsetof(struct sysreg_restore, tcr_el1)); - return (0); + return 0; } diff --git a/osfmk/arm64/kpc.c b/osfmk/arm64/kpc.c index 660df3676..25a328da8 100644 --- a/osfmk/arm64/kpc.c +++ b/osfmk/arm64/kpc.c @@ -50,7 +50,7 @@ void kpc_pmi_handler(unsigned int ctr); #define PMCR_PMC_8_9_OFFSET (32) #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET) #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \ - PMCR_PMC_8_9_SHIFT(PMC)) + PMCR_PMC_8_9_SHIFT(PMC)) /* * PMCR0 controls enabling, interrupts, and overflow of performance counters. @@ -133,9 +133,9 @@ void kpc_pmi_handler(unsigned int ctr); #endif #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \ - PMCR1_EL0_A64_ENABLE_MASK(PMC) | \ - PMCR1_EL1_A64_ENABLE_MASK(PMC) | \ - PMCR1_EL3_A64_ENABLE_MASK(PMC)) + PMCR1_EL0_A64_ENABLE_MASK(PMC) | \ + PMCR1_EL1_A64_ENABLE_MASK(PMC) | \ + PMCR1_EL3_A64_ENABLE_MASK(PMC)) #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC)) /* PMESR0 and PMESR1 are event selection registers */ @@ -208,8 +208,8 @@ void kpc_pmi_handler(unsigned int ctr); */ #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V)) #define SREG_READ(SR) ({ uint64_t VAL; \ - __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \ - VAL; }) + __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \ + VAL; }) /* * Configuration registers that can be controlled by RAWPMU: @@ -340,7 +340,8 @@ config_in_whitelist(kpc_config_t cfg) } #ifdef KPC_DEBUG -static void dump_regs(void) +static void +dump_regs(void) { uint64_t val; kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR0)); @@ -468,19 +469,19 @@ static uint64_t read_counter(uint32_t counter) { switch (counter) { - // case 0: return SREG_READ(SREG_PMC0); - // case 1: return SREG_READ(SREG_PMC1); - case 2: return SREG_READ(SREG_PMC2); - case 3: return SREG_READ(SREG_PMC3); - case 4: return SREG_READ(SREG_PMC4); - case 5: return SREG_READ(SREG_PMC5); - case 6: return SREG_READ(SREG_PMC6); - case 7: return SREG_READ(SREG_PMC7); + // case 0: return SREG_READ(SREG_PMC0); + // case 1: return SREG_READ(SREG_PMC1); + case 2: return SREG_READ(SREG_PMC2); + case 3: return SREG_READ(SREG_PMC3); + case 4: return SREG_READ(SREG_PMC4); + case 5: return SREG_READ(SREG_PMC5); + case 6: return SREG_READ(SREG_PMC6); + case 7: return SREG_READ(SREG_PMC7); #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) - case 8: return SREG_READ(SREG_PMC8); - case 9: return SREG_READ(SREG_PMC9); + case 8: return SREG_READ(SREG_PMC8); + case 9: return SREG_READ(SREG_PMC9); #endif - default: return 0; + default: return 0; } } @@ -488,19 +489,19 @@ static void write_counter(uint32_t counter, uint64_t value) { switch (counter) { - // case 0: SREG_WRITE(SREG_PMC0, value); break; - // case 1: SREG_WRITE(SREG_PMC1, value); break; - case 2: SREG_WRITE(SREG_PMC2, value); break; - case 3: SREG_WRITE(SREG_PMC3, value); break; - case 4: SREG_WRITE(SREG_PMC4, value); break; - case 5: SREG_WRITE(SREG_PMC5, value); break; - case 6: SREG_WRITE(SREG_PMC6, value); break; - case 7: SREG_WRITE(SREG_PMC7, value); break; + // case 0: SREG_WRITE(SREG_PMC0, value); break; + // case 1: SREG_WRITE(SREG_PMC1, value); break; + case 2: SREG_WRITE(SREG_PMC2, value); break; + case 3: SREG_WRITE(SREG_PMC3, value); break; + case 4: SREG_WRITE(SREG_PMC4, value); break; + case 5: SREG_WRITE(SREG_PMC5, value); break; + case 6: SREG_WRITE(SREG_PMC6, value); break; + case 7: SREG_WRITE(SREG_PMC7, value); break; #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) - case 8: SREG_WRITE(SREG_PMC8, value); break; - case 9: SREG_WRITE(SREG_PMC9, value); break; + case 8: SREG_WRITE(SREG_PMC8, value); break; + case 9: SREG_WRITE(SREG_PMC9, value); break; #endif - default: break; + default: break; } } @@ -553,7 +554,7 @@ save_regs(void) { int cpuid = cpu_number(); - __asm__ volatile("dmb ish"); + __asm__ volatile ("dmb ish"); assert(ml_get_interrupts_enabled() == FALSE); @@ -602,24 +603,24 @@ get_counter_config(uint32_t counter) uint64_t pmesr; switch (counter) { - case 2: /* FALLTHROUGH */ - case 3: /* FALLTHROUGH */ - case 4: /* FALLTHROUGH */ - case 5: - pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2); - break; - case 6: /* FALLTHROUGH */ - case 7: + case 2: /* FALLTHROUGH */ + case 3: /* FALLTHROUGH */ + case 4: /* FALLTHROUGH */ + case 5: + pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2); + break; + case 6: /* FALLTHROUGH */ + case 7: #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) - /* FALLTHROUGH */ - case 8: /* FALLTHROUGH */ - case 9: + /* FALLTHROUGH */ + case 8: /* FALLTHROUGH */ + case 9: #endif - pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6); - break; - default: - pmesr = 0; - break; + pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6); + break; + default: + pmesr = 0; + break; } kpc_config_t config = pmesr; @@ -654,32 +655,32 @@ set_counter_config(uint32_t counter, uint64_t config) uint64_t pmesr = 0; switch (counter) { - case 2: /* FALLTHROUGH */ - case 3: /* FALLTHROUGH */ - case 4: /* FALLTHROUGH */ - case 5: - pmesr = SREG_READ(SREG_PMESR0); - pmesr &= PMESR_EVT_CLEAR(counter, 2); - pmesr |= PMESR_EVT_ENCODE(config, counter, 2); - SREG_WRITE(SREG_PMESR0, pmesr); - saved_PMESR[cpuid][0] = pmesr; - break; - - case 6: /* FALLTHROUGH */ - case 7: + case 2: /* FALLTHROUGH */ + case 3: /* FALLTHROUGH */ + case 4: /* FALLTHROUGH */ + case 5: + pmesr = SREG_READ(SREG_PMESR0); + pmesr &= PMESR_EVT_CLEAR(counter, 2); + pmesr |= PMESR_EVT_ENCODE(config, counter, 2); + SREG_WRITE(SREG_PMESR0, pmesr); + saved_PMESR[cpuid][0] = pmesr; + break; + + case 6: /* FALLTHROUGH */ + case 7: #if KPC_ARM64_CONFIGURABLE_COUNT > 6 - /* FALLTHROUGH */ - case 8: /* FALLTHROUGH */ - case 9: + /* FALLTHROUGH */ + case 8: /* FALLTHROUGH */ + case 9: #endif - pmesr = SREG_READ(SREG_PMESR1); - pmesr &= PMESR_EVT_CLEAR(counter, 6); - pmesr |= PMESR_EVT_ENCODE(config, counter, 6); - SREG_WRITE(SREG_PMESR1, pmesr); - saved_PMESR[cpuid][1] = pmesr; - break; - default: - break; + pmesr = SREG_READ(SREG_PMESR1); + pmesr &= PMESR_EVT_CLEAR(counter, 6); + pmesr |= PMESR_EVT_ENCODE(config, counter, 6); + SREG_WRITE(SREG_PMESR1, pmesr); + saved_PMESR[cpuid][1] = pmesr; + break; + default: + break; } set_modes(counter, config); @@ -758,8 +759,9 @@ set_running_configurable(uint64_t target_mask, uint64_t state_mask) enabled = ml_set_interrupts_enabled(FALSE); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & target_mask) == 0) + if (((1ULL << i) & target_mask) == 0) { continue; + } assert(kpc_controls_counter(offset + i)); if ((1ULL << i) & state_mask) { @@ -780,10 +782,11 @@ kpc_set_running_xcall( void *vstate ) assert(mp_config); set_running_configurable(mp_config->cfg_target_mask, - mp_config->cfg_state_mask); + mp_config->cfg_state_mask); - if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) + if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) { thread_wakeup((event_t) &kpc_xcall_sync); + } } static uint32_t kpc_xread_sync; @@ -854,17 +857,18 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) assert(counterv); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & pmc_mask) == 0) + if (((1ULL << i) & pmc_mask) == 0) { continue; + } ctr = read_counter(i + offset); if (ctr & KPC_ARM64_COUNTER_OVF_MASK) { ctr = CONFIGURABLE_SHADOW(i) + - (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + - (ctr & KPC_ARM64_COUNTER_MASK); + (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + + (ctr & KPC_ARM64_COUNTER_MASK); } else { ctr = CONFIGURABLE_SHADOW(i) + - (ctr - CONFIGURABLE_RELOAD(i)); + (ctr - CONFIGURABLE_RELOAD(i)); } *counterv++ = ctr; @@ -880,9 +884,11 @@ kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) assert(configv); - for (uint32_t i = 0; i < cfg_count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < cfg_count; ++i) { + if ((1ULL << i) & pmc_mask) { *configv++ = get_counter_config(i + offset); + } + } return 0; } @@ -897,8 +903,9 @@ kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) enabled = ml_set_interrupts_enabled(FALSE); for (uint32_t i = 0; i < cfg_count; ++i) { - if (((1ULL << i) & pmc_mask) == 0) + if (((1ULL << i) & pmc_mask) == 0) { continue; + } assert(kpc_controls_counter(i + offset)); set_counter_config(i + offset, *configv++); @@ -932,8 +939,9 @@ kpc_set_config_xcall(void *vmp_config) new_config += RAWPMU_CONFIG_COUNT; } - if (hw_atomic_sub(&kpc_config_sync, 1) == 0) + if (hw_atomic_sub(&kpc_config_sync, 1) == 0) { thread_wakeup((event_t) &kpc_config_sync); + } } static uint64_t @@ -986,10 +994,12 @@ kpc_set_reload_xcall(void *vmp_config) count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) { /* ignore the counter */ - if (((1ULL << i) & mp_config->pmc_mask) == 0) + if (((1ULL << i) & mp_config->pmc_mask) == 0) { continue; - if (*new_period == 0) + } + if (*new_period == 0) { *new_period = kpc_configurable_max(); + } CONFIGURABLE_RELOAD(i) = max - *new_period; /* reload the counter */ kpc_reload_counter(offset + i); @@ -1000,8 +1010,9 @@ kpc_set_reload_xcall(void *vmp_config) ml_set_interrupts_enabled(enabled); - if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) + if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) { thread_wakeup((event_t) &kpc_reload_sync); + } } void @@ -1073,7 +1084,7 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config) return 0; } -void +void kpc_idle(void) { if (kpc_configured) { @@ -1081,8 +1092,8 @@ kpc_idle(void) } } -void -kpc_idle_exit(void) +void +kpc_idle_exit(void) { if (kpc_configured) { restore_regs(); diff --git a/osfmk/arm64/loose_ends.c b/osfmk/arm64/loose_ends.c index e211448c7..00aae153f 100644 --- a/osfmk/arm64/loose_ends.c +++ b/osfmk/arm64/loose_ends.c @@ -88,8 +88,9 @@ bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags) #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__ count = PAGE_SIZE - src_offset; wimg_bits_src = pmap_cache_attributes(pn_src); - if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) + if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { use_copy_window_src = TRUE; + } #else if (use_copy_window_src) { wimg_bits_src = pmap_cache_attributes(pn_src); @@ -104,8 +105,9 @@ bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags) #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__ count2 = PAGE_SIZE - dst_offset; wimg_bits_dst = pmap_cache_attributes(pn_dst); - if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) + if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { use_copy_window_dst = TRUE; + } #else if (use_copy_window_dst) { wimg_bits_dst = pmap_cache_attributes(pn_dst); @@ -139,24 +141,30 @@ bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags) tmp_dst = (char*)dst; } - if (count > count2) + if (count > count2) { count = count2; - if (count > bytes) + } + if (count > bytes) { count = bytes; + } - if (BCOPY_PHYS_SRC_IS_USER(flags)) + if (BCOPY_PHYS_SRC_IS_USER(flags)) { res = copyin((user_addr_t)src, tmp_dst, count); - else if (BCOPY_PHYS_DST_IS_USER(flags)) + } else if (BCOPY_PHYS_DST_IS_USER(flags)) { res = copyout(tmp_src, (user_addr_t)dst, count); - else + } else { bcopy(tmp_src, tmp_dst, count); + } - if (use_copy_window_src) + if (use_copy_window_src) { pmap_unmap_cpu_windows_copy(src_index); - if (use_copy_window_dst) + } + if (use_copy_window_dst) { pmap_unmap_cpu_windows_copy(dst_index); - if (use_copy_window_src || use_copy_window_dst) + } + if (use_copy_window_src || use_copy_window_dst) { mp_enable_preemption(); + } src += count; dst += count; @@ -197,8 +205,9 @@ bzero_phys(addr64_t src, vm_size_t bytes) #if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__ count = PAGE_SIZE - offset; wimg_bits = pmap_cache_attributes(pn); - if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) + if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { use_copy_window = TRUE; + } #else if (use_copy_window) { wimg_bits = pmap_cache_attributes(pn); @@ -216,8 +225,9 @@ bzero_phys(addr64_t src, vm_size_t bytes) buf = (char *)phystokv_range((pmap_paddr_t)src, &count); } - if (count > bytes) + if (count > bytes) { count = bytes; + } bzero(buf, count); @@ -243,15 +253,16 @@ ml_phys_read_data(pmap_paddr_t paddr, int size) unsigned int index; unsigned int wimg_bits; ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT); - ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT); + ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT); unsigned long long result = 0; vm_offset_t copywindow_vaddr = 0; unsigned char s1; unsigned short s2; unsigned int s4; - if (__improbable(pn_end != pn)) + if (__improbable(pn_end != pn)) { panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr); + } #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ if (pmap_valid_address(paddr)) { @@ -273,7 +284,7 @@ ml_phys_read_data(pmap_paddr_t paddr, int size) break; default: panic("Invalid size %d for ml_phys_read_data\n", size); - break; + break; } return result; } @@ -285,25 +296,24 @@ ml_phys_read_data(pmap_paddr_t paddr, int size) copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK); switch (size) { - case 1: - s1 = *(volatile unsigned char *)copywindow_vaddr; - result = s1; - break; - case 2: - s2 = *(volatile unsigned short *)copywindow_vaddr; - result = s2; - break; - case 4: - s4 = *(volatile unsigned int *)copywindow_vaddr; - result = s4; - break; - case 8: - result = *(volatile unsigned long long*)copywindow_vaddr; - break; - default: - panic("Invalid size %d for ml_phys_read_data\n", size); - break; - + case 1: + s1 = *(volatile unsigned char *)copywindow_vaddr; + result = s1; + break; + case 2: + s2 = *(volatile unsigned short *)copywindow_vaddr; + result = s2; + break; + case 4: + s4 = *(volatile unsigned int *)copywindow_vaddr; + result = s4; + break; + case 8: + result = *(volatile unsigned long long*)copywindow_vaddr; + break; + default: + panic("Invalid size %d for ml_phys_read_data\n", size); + break; } pmap_unmap_cpu_windows_copy(index); @@ -312,54 +322,64 @@ ml_phys_read_data(pmap_paddr_t paddr, int size) return result; } -unsigned int ml_phys_read( vm_offset_t paddr) +unsigned int +ml_phys_read( vm_offset_t paddr) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4); } -unsigned int ml_phys_read_word(vm_offset_t paddr) { - - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4); +unsigned int +ml_phys_read_word(vm_offset_t paddr) +{ + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 4); } -unsigned int ml_phys_read_64(addr64_t paddr64) +unsigned int +ml_phys_read_64(addr64_t paddr64) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4); } -unsigned int ml_phys_read_word_64(addr64_t paddr64) +unsigned int +ml_phys_read_word_64(addr64_t paddr64) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 4); } -unsigned int ml_phys_read_half(vm_offset_t paddr) +unsigned int +ml_phys_read_half(vm_offset_t paddr) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 2); } -unsigned int ml_phys_read_half_64(addr64_t paddr64) +unsigned int +ml_phys_read_half_64(addr64_t paddr64) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 2); } -unsigned int ml_phys_read_byte(vm_offset_t paddr) +unsigned int +ml_phys_read_byte(vm_offset_t paddr) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr, 1); } -unsigned int ml_phys_read_byte_64(addr64_t paddr64) +unsigned int +ml_phys_read_byte_64(addr64_t paddr64) { - return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1); + return (unsigned int)ml_phys_read_data((pmap_paddr_t)paddr64, 1); } -unsigned long long ml_phys_read_double(vm_offset_t paddr) +unsigned long long +ml_phys_read_double(vm_offset_t paddr) { - return ml_phys_read_data((pmap_paddr_t)paddr, 8); + return ml_phys_read_data((pmap_paddr_t)paddr, 8); } -unsigned long long ml_phys_read_double_64(addr64_t paddr64) +unsigned long long +ml_phys_read_double_64(addr64_t paddr64) { - return ml_phys_read_data((pmap_paddr_t)paddr64, 8); + return ml_phys_read_data((pmap_paddr_t)paddr64, 8); } @@ -374,11 +394,12 @@ ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size) unsigned int index; unsigned int wimg_bits; ppnum_t pn = (ppnum_t)(paddr >> PAGE_SHIFT); - ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT); + ppnum_t pn_end = (ppnum_t)((paddr + size - 1) >> PAGE_SHIFT); vm_offset_t copywindow_vaddr = 0; - if (__improbable(pn_end != pn)) + if (__improbable(pn_end != pn)) { panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr); + } #if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ if (pmap_valid_address(paddr)) { @@ -403,83 +424,93 @@ ml_phys_write_data(pmap_paddr_t paddr, unsigned long long data, int size) mp_disable_preemption(); wimg_bits = pmap_cache_attributes(pn); - index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ|VM_PROT_WRITE, wimg_bits); + index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits); copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK); switch (size) { - case 1: - *(volatile unsigned char *)(copywindow_vaddr) = - (unsigned char)data; - break; - case 2: - *(volatile unsigned short *)(copywindow_vaddr) = - (unsigned short)data; - break; - case 4: - *(volatile unsigned int *)(copywindow_vaddr) = - (uint32_t)data; - break; - case 8: - *(volatile unsigned long long *)(copywindow_vaddr) = - (unsigned long long)data; - break; - default: - panic("Invalid size %d for ml_phys_write_data\n", size); - break; + case 1: + *(volatile unsigned char *)(copywindow_vaddr) = + (unsigned char)data; + break; + case 2: + *(volatile unsigned short *)(copywindow_vaddr) = + (unsigned short)data; + break; + case 4: + *(volatile unsigned int *)(copywindow_vaddr) = + (uint32_t)data; + break; + case 8: + *(volatile unsigned long long *)(copywindow_vaddr) = + (unsigned long long)data; + break; + default: + panic("Invalid size %d for ml_phys_write_data\n", size); + break; } pmap_unmap_cpu_windows_copy(index); mp_enable_preemption(); } -void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_byte(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 1); + ml_phys_write_data((pmap_paddr_t)paddr, data, 1); } -void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); } -void ml_phys_write_half(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_half(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 2); + ml_phys_write_data((pmap_paddr_t)paddr, data, 2); } -void ml_phys_write_half_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_half_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); } -void ml_phys_write(vm_offset_t paddr, unsigned int data) +void +ml_phys_write(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr, data, 4); } -void ml_phys_write_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); } -void ml_phys_write_word(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_word(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr, data, 4); } -void ml_phys_write_word_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_word_64(addr64_t paddr64, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); } -void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) +void +ml_phys_write_double(vm_offset_t paddr, unsigned long long data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 8); + ml_phys_write_data((pmap_paddr_t)paddr, data, 8); } -void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) +void +ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 8); + ml_phys_write_data((pmap_paddr_t)paddr64, data, 8); } @@ -518,15 +549,18 @@ ffsbit(int *s) { int offset; - for (offset = 0; !*s; offset += INT_SIZE, ++s); + for (offset = 0; !*s; offset += INT_SIZE, ++s) { + ; + } return offset + __builtin_ctz(*s); } int ffs(unsigned int mask) { - if (mask == 0) + if (mask == 0) { return 0; + } /* * NOTE: cannot use __builtin_ffs because it generates a call to @@ -538,8 +572,9 @@ ffs(unsigned int mask) int ffsll(unsigned long long mask) { - if (mask == 0) + if (mask == 0) { return 0; + } /* * NOTE: cannot use __builtin_ffsll because it generates a call to @@ -554,48 +589,53 @@ ffsll(unsigned long long mask) int fls(unsigned int mask) { - if (mask == 0) + if (mask == 0) { return 0; + } - return (sizeof (mask) << 3) - __builtin_clz(mask); + return (sizeof(mask) << 3) - __builtin_clz(mask); } int flsll(unsigned long long mask) { - if (mask == 0) + if (mask == 0) { return 0; + } - return (sizeof (mask) << 3) - __builtin_clzll(mask); + return (sizeof(mask) << 3) - __builtin_clzll(mask); } #undef bcmp -int +int bcmp( - const void *pa, - const void *pb, - size_t len) + const void *pa, + const void *pb, + size_t len) { const char *a = (const char *) pa; const char *b = (const char *) pb; - if (len == 0) + if (len == 0) { return 0; + } - do - if (*a++ != *b++) + do{ + if (*a++ != *b++) { break; - while (--len); + } + } while (--len); /* * Check for the overflow case but continue to handle the non-overflow * case the same way just in case someone is using the return value * as more than zero/non-zero */ - if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) + if ((len & 0xFFFFFFFF00000000ULL) && !(len & 0x00000000FFFFFFFFULL)) { return 0xFFFFFFFFL; - else + } else { return (int)len; + } } #undef memcmp @@ -606,27 +646,31 @@ memcmp(const void *s1, const void *s2, size_t n) const unsigned char *p1 = s1, *p2 = s2; do { - if (*p1++ != *p2++) - return (*--p1 - *--p2); + if (*p1++ != *p2++) { + return *--p1 - *--p2; + } } while (--n != 0); } - return (0); + return 0; } kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which) { - if ((which & (cppvPsrc | cppvPsnk)) == 0) /* Make sure that only one is virtual */ + if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */ panic("%s: no more than 1 parameter may be virtual", __func__); + } kern_return_t res = bcopy_phys_internal(source, sink, size, which); #ifndef __ARM_COHERENT_IO__ - if (which & cppvFsrc) - flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc)); + if (which & cppvFsrc) { + flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc)); + } - if (which & cppvFsnk) - flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk)); + if (which & cppvFsnk) { + flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk)); + } #endif return res; @@ -642,45 +686,48 @@ extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); */ void machine_callstack( - uintptr_t * buf, - vm_size_t callstack_max) + uintptr_t * buf, + vm_size_t callstack_max) { /* Captures the USER call stack */ - uint32_t i=0; + uint32_t i = 0; struct arm_saved_state *state = find_user_regs(current_thread()); if (!state) { - while (i: Reduce print noise - // kprintf("ml_thread_policy() unimplemented\n"); + // : Reduce print noise + // kprintf("ml_thread_policy() unimplemented\n"); } void -panic_unimplemented() +panic_unimplemented() { panic("Not yet implemented."); } @@ -728,7 +775,7 @@ panic_unimplemented() void abort(void); void -abort() +abort() { panic("Abort."); } @@ -741,4 +788,3 @@ kdp_register_callout(kdp_callout_fn_t fn, void *arg) #pragma unused(fn,arg) } #endif - diff --git a/osfmk/arm64/lowglobals.h b/osfmk/arm64/lowglobals.h index c4d0c3df0..12aab8548 100644 --- a/osfmk/arm64/lowglobals.h +++ b/osfmk/arm64/lowglobals.h @@ -28,8 +28,8 @@ /* * Header files for the Low Memory Globals (lg) */ -#ifndef _LOW_MEMORY_GLOBALS_H_ -#define _LOW_MEMORY_GLOBALS_H_ +#ifndef _LOW_MEMORY_GLOBALS_H_ +#define _LOW_MEMORY_GLOBALS_H_ #include #include @@ -37,44 +37,44 @@ #include #ifndef __arm64__ -#error Wrong architecture - this file is meant for arm64 +#error Wrong architecture - this file is meant for arm64 #endif -#define LOWGLO_LAYOUT_MAGIC 0xC0DEC0DE +#define LOWGLO_LAYOUT_MAGIC 0xC0DEC0DE /* * This structure is bound to lowmem_vectors.c. Make sure changes here are * reflected there as well. */ -#pragma pack(8) /* Make sure the structure stays as we defined it */ +#pragma pack(8) /* Make sure the structure stays as we defined it */ typedef struct lowglo { - unsigned char lgVerCode[8]; /* 0xffffff8000002000 System verification code */ - uint64_t lgZero; /* 0xffffff8000002008 Constant 0 */ - uint64_t lgStext; /* 0xffffff8000002010 Start of kernel text */ - uint64_t lgVersion; /* 0xffffff8000002018 Pointer to kernel version string */ - uint64_t lgOSVersion; /* 0xffffff8000002020 Pointer to OS version string */ - uint64_t lgKmodptr; /* 0xffffff8000002028 Pointer to kmod, debugging aid */ - uint64_t lgTransOff; /* 0xffffff8000002030 Pointer to kdp_trans_off, debugging aid */ - uint64_t lgRebootFlag; /* 0xffffff8000002038 Pointer to debugger reboot trigger */ - uint64_t lgManualPktAddr; /* 0xffffff8000002040 Pointer to manual packet structure */ - uint64_t lgAltDebugger; /* 0xffffff8000002048 Pointer to reserved space for alternate kernel debugger */ - uint64_t lgPmapMemQ; /* 0xffffff8000002050 Pointer to PMAP memory queue */ - uint64_t lgPmapMemPageOffset;/* 0xffffff8000002058 Offset of physical page member in vm_page_t or vm_page_with_ppnum_t */ - uint64_t lgPmapMemChainOffset;/*0xffffff8000002060 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ - uint64_t lgStaticAddr; /* 0xffffff8000002068 Static allocation address */ - uint64_t lgStaticSize; /* 0xffffff8000002070 Static allocation size */ - uint64_t lgLayoutMajorVersion; /* 0xffffff8000002078 Lowglo major layout version */ - uint64_t lgLayoutMagic; /* 0xffffff8000002080 Magic value evaluated to determine if lgLayoutVersion is valid */ - uint64_t lgPmapMemStartAddr; /* 0xffffff8000002088 Pointer to start of vm_page_t array */ - uint64_t lgPmapMemEndAddr; /* 0xffffff8000002090 Pointer to end of vm_page_t array */ - uint64_t lgPmapMemPagesize; /* 0xffffff8000002098 size of vm_page_t */ - uint64_t lgPmapMemFromArrayMask; /* 0xffffff80000020A0 Mask to indicate page is from vm_page_t array */ - uint64_t lgPmapMemFirstppnum; /* 0xffffff80000020A8 physical page number of the first vm_page_t in the array */ - uint64_t lgPmapMemPackedShift; /* 0xffffff80000020B0 alignment of packed pointer */ - uint64_t lgPmapMemPackedBaseAddr;/* 0xffffff80000020B8 base address of that packed pointers are relative to */ - uint64_t lgLayoutMinorVersion; /* 0xffffff80000020C0 Lowglo minor layout version */ - uint64_t lgPageShift; /* 0xffffff80000020C8 number of shifts from page number to size */ + unsigned char lgVerCode[8]; /* 0xffffff8000002000 System verification code */ + uint64_t lgZero; /* 0xffffff8000002008 Constant 0 */ + uint64_t lgStext; /* 0xffffff8000002010 Start of kernel text */ + uint64_t lgVersion; /* 0xffffff8000002018 Pointer to kernel version string */ + uint64_t lgOSVersion; /* 0xffffff8000002020 Pointer to OS version string */ + uint64_t lgKmodptr; /* 0xffffff8000002028 Pointer to kmod, debugging aid */ + uint64_t lgTransOff; /* 0xffffff8000002030 Pointer to kdp_trans_off, debugging aid */ + uint64_t lgRebootFlag; /* 0xffffff8000002038 Pointer to debugger reboot trigger */ + uint64_t lgManualPktAddr; /* 0xffffff8000002040 Pointer to manual packet structure */ + uint64_t lgAltDebugger; /* 0xffffff8000002048 Pointer to reserved space for alternate kernel debugger */ + uint64_t lgPmapMemQ; /* 0xffffff8000002050 Pointer to PMAP memory queue */ + uint64_t lgPmapMemPageOffset;/* 0xffffff8000002058 Offset of physical page member in vm_page_t or vm_page_with_ppnum_t */ + uint64_t lgPmapMemChainOffset;/*0xffffff8000002060 Offset of listq in vm_page_t or vm_page_with_ppnum_t */ + uint64_t lgStaticAddr; /* 0xffffff8000002068 Static allocation address */ + uint64_t lgStaticSize; /* 0xffffff8000002070 Static allocation size */ + uint64_t lgLayoutMajorVersion; /* 0xffffff8000002078 Lowglo major layout version */ + uint64_t lgLayoutMagic; /* 0xffffff8000002080 Magic value evaluated to determine if lgLayoutVersion is valid */ + uint64_t lgPmapMemStartAddr; /* 0xffffff8000002088 Pointer to start of vm_page_t array */ + uint64_t lgPmapMemEndAddr; /* 0xffffff8000002090 Pointer to end of vm_page_t array */ + uint64_t lgPmapMemPagesize; /* 0xffffff8000002098 size of vm_page_t */ + uint64_t lgPmapMemFromArrayMask; /* 0xffffff80000020A0 Mask to indicate page is from vm_page_t array */ + uint64_t lgPmapMemFirstppnum; /* 0xffffff80000020A8 physical page number of the first vm_page_t in the array */ + uint64_t lgPmapMemPackedShift; /* 0xffffff80000020B0 alignment of packed pointer */ + uint64_t lgPmapMemPackedBaseAddr;/* 0xffffff80000020B8 base address of that packed pointers are relative to */ + uint64_t lgLayoutMinorVersion; /* 0xffffff80000020C0 Lowglo minor layout version */ + uint64_t lgPageShift; /* 0xffffff80000020C8 number of shifts from page number to size */ } lowglo; #pragma pack() diff --git a/osfmk/arm64/lowmem_vectors.c b/osfmk/arm64/lowmem_vectors.c index 6cd326a30..2b22f1a6f 100644 --- a/osfmk/arm64/lowmem_vectors.c +++ b/osfmk/arm64/lowmem_vectors.c @@ -38,13 +38,13 @@ */ extern vm_offset_t vm_kernel_stext; -extern void *version; -extern void *kmod; -extern void *kdp_trans_off; -extern void *osversion; -extern void *flag_kdp_trigger_reboot; -extern void *manual_pkt; -extern struct vm_object pmap_object_store; /* store pt pages */ +extern void *version; +extern void *kmod; +extern void *kdp_trans_off; +extern void *osversion; +extern void *flag_kdp_trigger_reboot; +extern void *manual_pkt; +extern struct vm_object pmap_object_store; /* store pt pages */ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { // Increment the major version for changes that break the current Astris @@ -54,7 +54,7 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { .lgLayoutMajorVersion = 3, .lgLayoutMinorVersion = 0, .lgLayoutMagic = LOWGLO_LAYOUT_MAGIC, - .lgVerCode = { 'K','r','a','k','e','n',' ',' ' }, + .lgVerCode = { 'K', 'r', 'a', 'k', 'e', 'n', ' ', ' ' }, .lgZero = 0, .lgStext = 0, // To be filled in below .lgVersion = (uint64_t) &version, @@ -78,19 +78,22 @@ lowglo lowGlo __attribute__ ((aligned(PAGE_MAX_SIZE))) = { .lgPageShift = ARM_PGSHIFT }; -void patch_low_glo(void) +void +patch_low_glo(void) { lowGlo.lgStext = (uint64_t)vm_kernel_stext; } -void patch_low_glo_static_region(uint64_t address, uint64_t size) +void +patch_low_glo_static_region(uint64_t address, uint64_t size) { lowGlo.lgStaticAddr = address; lowGlo.lgStaticSize = size; } -void patch_low_glo_vm_page_info(void * start_addr, void * end_addr, uint32_t first_ppnum) +void +patch_low_glo_vm_page_info(void * start_addr, void * end_addr, uint32_t first_ppnum) { lowGlo.lgPmapMemStartAddr = (uint64_t)start_addr; lowGlo.lgPmapMemEndAddr = (uint64_t)end_addr; diff --git a/osfmk/arm64/machine_cpuid.h b/osfmk/arm64/machine_cpuid.h index 63eff5f7b..2e91848e0 100644 --- a/osfmk/arm64/machine_cpuid.h +++ b/osfmk/arm64/machine_cpuid.h @@ -29,19 +29,19 @@ #define _ARM64_MACHINE_CPUID_H_ typedef struct { -uint64_t el0_not_implemented : 1, - el0_aarch64_only : 1, - el0_aarch32_and_64 : 1, - el1_not_implemented : 1, - el1_aarch64_only : 1, - el1_aarch32_and_64 : 1, - el2_not_implemented : 1, - el2_aarch64_only : 1, - el2_aarch32_and_64 : 1, - el3_not_implemented : 1, - el3_aarch64_only : 1, - el3_aarch32_and_64 : 1, - reserved : 52; + uint64_t el0_not_implemented : 1, + el0_aarch64_only : 1, + el0_aarch32_and_64 : 1, + el1_not_implemented : 1, + el1_aarch64_only : 1, + el1_aarch32_and_64 : 1, + el2_not_implemented : 1, + el2_aarch64_only : 1, + el2_aarch32_and_64 : 1, + el3_not_implemented : 1, + el3_aarch64_only : 1, + el3_aarch32_and_64 : 1, + reserved : 52; } arm_feature_bits_t; /* Debug identification */ @@ -49,15 +49,15 @@ uint64_t el0_not_implemented : 1, /* ID_AA64DFR0_EL1 */ typedef union { struct { - uint64_t debug_arch_version : 4, - trace_extn_version : 4, - perf_extn_version : 4, - brps : 4, - reserved0 : 4, - wrps : 4, - reserved1 : 4, - ctx_cmps : 4, - reserved32 : 32; + uint64_t debug_arch_version : 4, + trace_extn_version : 4, + perf_extn_version : 4, + brps : 4, + reserved0 : 4, + wrps : 4, + reserved1 : 4, + ctx_cmps : 4, + reserved32 : 32; } debug_feature; uint64_t value; } arm_cpuid_id_aa64dfr0_el1; diff --git a/osfmk/arm64/machine_kpc.h b/osfmk/arm64/machine_kpc.h index 7f1b0b640..3afcbb6fe 100644 --- a/osfmk/arm64/machine_kpc.h +++ b/osfmk/arm64/machine_kpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_ARM64_KPC_H diff --git a/osfmk/arm64/machine_machdep.h b/osfmk/arm64/machine_machdep.h index cc8470ba3..7b65fcd69 100644 --- a/osfmk/arm64/machine_machdep.h +++ b/osfmk/arm64/machine_machdep.h @@ -35,8 +35,8 @@ * NOTE: Keep this in sync with libsyscall/os/tsd.h, specifically _os_cpu_number() */ -#define MACHDEP_CTHREAD_ALIGNMENT (1 << 3) -#define MACHDEP_CPUNUM_MASK (MACHDEP_CTHREAD_ALIGNMENT - 1) -#define MACHDEP_CTHREAD_MASK (~MACHDEP_CPUNUM_MASK) +#define MACHDEP_CTHREAD_ALIGNMENT (1 << 3) +#define MACHDEP_CPUNUM_MASK (MACHDEP_CTHREAD_ALIGNMENT - 1) +#define MACHDEP_CTHREAD_MASK (~MACHDEP_CPUNUM_MASK) #endif /* _MACHDEP_INTERNAL_H_ */ diff --git a/osfmk/arm64/machine_remote_time.c b/osfmk/arm64/machine_remote_time.c new file mode 100644 index 000000000..095ec6dab --- /dev/null +++ b/osfmk/arm64/machine_remote_time.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +lck_spin_t *bt_spin_lock = NULL; +_Atomic uint32_t bt_init_flag = 0; + +extern lck_spin_t *ts_conversion_lock; +extern void mach_bridge_add_timestamp(uint64_t remote_timestamp, uint64_t local_timestamp); +extern void bt_calibration_thread_start(void); + +void +mach_bridge_init_timestamp(void) +{ + /* This function should be called only once by the driver + * implementing the interrupt handler for receiving timestamps */ + if (bt_init_flag) { + assert(!bt_init_flag); + return; + } + + /* Initialize the locks */ + static lck_grp_t *bt_lck_grp = NULL; + + bt_lck_grp = lck_grp_alloc_init("bridgetimestamp", LCK_GRP_ATTR_NULL); + bt_spin_lock = lck_spin_alloc_init(bt_lck_grp, NULL); + ts_conversion_lock = lck_spin_alloc_init(bt_lck_grp, NULL); + + atomic_store(&bt_init_flag, 1); + + /* Start the kernel thread only after all the locks have been initialized */ + bt_calibration_thread_start(); +} + +/* + * Conditions: Should be called from primary interrupt context + */ +void +mach_bridge_recv_timestamps(uint64_t remoteTimestamp, uint64_t localTimestamp) +{ + assert(ml_at_interrupt_context() == TRUE); + + /* Ensure the locks have been initialized */ + if (!bt_init_flag) { + assert(bt_init_flag != 0); + return; + } + + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RCV_TS), localTimestamp, remoteTimestamp); + + lck_spin_lock(bt_spin_lock); + mach_bridge_add_timestamp(remoteTimestamp, localTimestamp); + lck_spin_unlock(bt_spin_lock); + + return; +} diff --git a/osfmk/arm64/machine_remote_time.h b/osfmk/arm64/machine_remote_time.h new file mode 100644 index 000000000..ee4db2f3b --- /dev/null +++ b/osfmk/arm64/machine_remote_time.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef MACHINE_ARM64_REMOTE_TIME_H +#define MACHINE_ARM64_REMOTE_TIME_H + +#include +#include + +__BEGIN_DECLS +void mach_bridge_recv_timestamps(uint64_t bridgeTimestamp, uint64_t localTimestamp); +void mach_bridge_init_timestamp(void); +__END_DECLS + +#endif /* MACHINE_ARM64_REMOTE_TIME_H */ diff --git a/osfmk/arm64/machine_routines.c b/osfmk/arm64/machine_routines.c index bf9633ed5..b426e7eb6 100644 --- a/osfmk/arm64/machine_routines.c +++ b/osfmk/arm64/machine_routines.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -69,10 +70,11 @@ static int max_cpus_initialized = 0; uint32_t LockTimeOut; uint32_t LockTimeOutUsec; +uint64_t TLockTimeOut; uint64_t MutexSpin; boolean_t is_clock_configured = FALSE; -uint32_t yield_delay_us = 42; /* Less than cpu_idle_latency to ensure ml_delay_should_spin is true */ +uint32_t yield_delay_us = 0; /* Must be less than cpu_idle_latency to ensure ml_delay_should_spin is true */ extern int mach_assert; extern volatile uint32_t debug_enabled; @@ -93,7 +95,7 @@ SECURITY_READ_ONLY_LATE(static int) boot_cpu = -1; SECURITY_READ_ONLY_LATE(static int) max_cpu_number = 0; SECURITY_READ_ONLY_LATE(cluster_type_t) boot_cluster = CLUSTER_TYPE_SMP; -SECURITY_READ_ONLY_LATE(static uint32_t) fiq_eventi = UINT32_MAX; +SECURITY_READ_ONLY_LATE(static uint32_t) fiq_eventi = UINT32_MAX; lockdown_handler_t lockdown_handler; void *lockdown_this; @@ -106,107 +108,115 @@ void ml_lockdown_run_handler(void); uint32_t get_arm_cpu_version(void); -void ml_cpu_signal(unsigned int cpu_id __unused) +void +ml_cpu_signal(unsigned int cpu_id __unused) { panic("Platform does not support ACC Fast IPI"); } -void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs) { +void +ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs) +{ (void)nanosecs; panic("Platform does not support ACC Fast IPI"); } -uint64_t ml_cpu_signal_deferred_get_timer() { +uint64_t +ml_cpu_signal_deferred_get_timer() +{ return 0; } -void ml_cpu_signal_deferred(unsigned int cpu_id __unused) +void +ml_cpu_signal_deferred(unsigned int cpu_id __unused) { panic("Platform does not support ACC Fast IPI deferral"); } -void ml_cpu_signal_retract(unsigned int cpu_id __unused) +void +ml_cpu_signal_retract(unsigned int cpu_id __unused) { panic("Platform does not support ACC Fast IPI retraction"); } -void machine_idle(void) +void +machine_idle(void) { __asm__ volatile ("msr DAIFSet, %[mask]" ::[mask] "i" (DAIFSC_IRQF | DAIFSC_FIQF)); Idle_context(); __asm__ volatile ("msr DAIFClr, %[mask]" ::[mask] "i" (DAIFSC_IRQF | DAIFSC_FIQF)); } -void init_vfp(void) +void +init_vfp(void) { return; } -boolean_t get_vfp_enabled(void) +boolean_t +get_vfp_enabled(void) { return TRUE; } -void OSSynchronizeIO(void) +void +OSSynchronizeIO(void) { __builtin_arm_dsb(DSB_SY); } -uint64_t get_aux_control(void) +uint64_t +get_aux_control(void) { - uint64_t value; + uint64_t value; MRS(value, "ACTLR_EL1"); return value; } -uint64_t get_mmu_control(void) +uint64_t +get_mmu_control(void) { - uint64_t value; + uint64_t value; MRS(value, "SCTLR_EL1"); return value; } -uint64_t get_tcr(void) +uint64_t +get_tcr(void) { - uint64_t value; + uint64_t value; MRS(value, "TCR_EL1"); return value; } -boolean_t ml_get_interrupts_enabled(void) +boolean_t +ml_get_interrupts_enabled(void) { - uint64_t value; + uint64_t value; MRS(value, "DAIF"); - if (value & DAIF_IRQF) + if (value & DAIF_IRQF) { return FALSE; + } return TRUE; } -pmap_paddr_t get_mmu_ttb(void) +pmap_paddr_t +get_mmu_ttb(void) { - pmap_paddr_t value; + pmap_paddr_t value; MRS(value, "TTBR0_EL1"); return value; } -static uint32_t get_midr_el1(void) -{ - uint64_t value; - - MRS(value, "MIDR_EL1"); - - /* This is a 32-bit register. */ - return (uint32_t) value; -} - -uint32_t get_arm_cpu_version(void) +uint32_t +get_arm_cpu_version(void) { - uint32_t value = get_midr_el1(); + uint32_t value = machine_read_midr(); /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ return ((value & MIDR_EL1_REV_MASK) >> MIDR_EL1_REV_SHIFT) | ((value & MIDR_EL1_VAR_MASK) >> (MIDR_EL1_VAR_SHIFT - 4)); @@ -218,7 +228,8 @@ uint32_t get_arm_cpu_version(void) * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0) * as a continuous time source (e.g. from mach_continuous_time) */ -boolean_t user_cont_hwclock_allowed(void) +boolean_t +user_cont_hwclock_allowed(void) { return FALSE; } @@ -228,12 +239,14 @@ boolean_t user_cont_hwclock_allowed(void) * * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0). */ -boolean_t user_timebase_allowed(void) +boolean_t +user_timebase_allowed(void) { return TRUE; } -boolean_t arm64_wfe_allowed(void) +boolean_t +arm64_wfe_allowed(void) { return TRUE; } @@ -249,9 +262,9 @@ static void assert_amcc_cache_disabled(void); static void lock_amcc(void); static void lock_mmu(uint64_t begin, uint64_t end); -void rorgn_stash_range(void) +void +rorgn_stash_range(void) { - #if DEVELOPMENT || DEBUG boolean_t rorgn_disable = FALSE; @@ -301,7 +314,9 @@ void rorgn_stash_range(void) #endif /* defined (KERNEL_INTEGRITY_KTRR) */ } -static void assert_unlocked() { +static void +assert_unlocked() +{ uint64_t ktrr_lock = 0; uint32_t rorgn_lock = 0; @@ -317,7 +332,9 @@ static void assert_unlocked() { assert(!rorgn_lock); } -static void lock_amcc() { +static void +lock_amcc() +{ #if defined(KERNEL_INTEGRITY_KTRR) rRORGNLOCK = 1; __builtin_arm_isb(ISB_SY); @@ -326,13 +343,14 @@ static void lock_amcc() { #endif } -static void lock_mmu(uint64_t begin, uint64_t end) { - +static void +lock_mmu(uint64_t begin, uint64_t end) +{ #if defined(KERNEL_INTEGRITY_KTRR) __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin); __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end); - __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); + __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); /* flush TLB */ @@ -342,10 +360,11 @@ static void lock_mmu(uint64_t begin, uint64_t end) { #else #error KERNEL_INTEGRITY config error #endif - } -static void assert_amcc_cache_disabled() { +static void +assert_amcc_cache_disabled() +{ #if defined(KERNEL_INTEGRITY_KTRR) assert((rMCCGEN & 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */ #else @@ -361,7 +380,8 @@ static void assert_amcc_cache_disabled() { * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in * start.s:start_cpu() for subsequent wake/resume of all cores */ -void rorgn_lockdown(void) +void +rorgn_lockdown(void) { vm_offset_t ktrr_begin, ktrr_end; unsigned long last_segsz; @@ -409,7 +429,7 @@ void rorgn_lockdown(void) assert_amcc_cache_disabled(); CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin), - (unsigned)((ktrr_end + last_segsz) - ktrr_begin + AMCC_PGMASK)); + (unsigned)((ktrr_end + last_segsz) - ktrr_begin + AMCC_PGMASK)); lock_amcc(); @@ -431,16 +451,16 @@ machine_startup(__unused boot_args * args) int boot_arg; - PE_parse_boot_argn("assert", &mach_assert, sizeof (mach_assert)); + PE_parse_boot_argn("assert", &mach_assert, sizeof(mach_assert)); - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { default_preemption_rate = boot_arg; } - if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("bg_preempt", &boot_arg, sizeof(boot_arg))) { default_bg_preemption_rate = boot_arg; } - PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof (yield_delay_us)); + PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof(yield_delay_us)); machine_conf(); @@ -451,7 +471,8 @@ machine_startup(__unused boot_args * args) /* NOTREACHED */ } -void machine_lockdown_preflight(void) +void +machine_lockdown_preflight(void) { #if CONFIG_KERNEL_INTEGRITY @@ -462,7 +483,8 @@ void machine_lockdown_preflight(void) #endif } -void machine_lockdown(void) +void +machine_lockdown(void) { #if CONFIG_KERNEL_INTEGRITY #if KERNEL_INTEGRITY_WT @@ -495,10 +517,10 @@ void machine_lockdown(void) char * machine_boot_info( - __unused char *buf, - __unused vm_size_t size) + __unused char *buf, + __unused vm_size_t size) { - return (PE_boot_args()); + return PE_boot_args(); } void @@ -516,15 +538,16 @@ machine_init(void) debug_log_init(); clock_config(); is_clock_configured = TRUE; - if (debug_enabled) + if (debug_enabled) { pmap_map_globals(); + } } void slave_machine_init(__unused void *param) { - cpu_machine_init(); /* Initialize the processor */ - clock_init(); /* Init the clock */ + cpu_machine_init(); /* Initialize the processor */ + clock_init(); /* Init the clock */ } /* @@ -533,11 +556,11 @@ slave_machine_init(__unused void *param) */ thread_t machine_processor_shutdown( - __unused thread_t thread, - void (*doshutdown) (processor_t), - processor_t processor) + __unused thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor) { - return (Shutdown_context(doshutdown, processor)); + return Shutdown_context(doshutdown, processor); } /* @@ -554,8 +577,9 @@ ml_init_max_cpus(unsigned int max_cpus) machine_info.max_cpus = max_cpus; machine_info.physical_cpu_max = max_cpus; machine_info.logical_cpu_max = max_cpus; - if (max_cpus_initialized == MAX_CPUS_WAIT) - thread_wakeup((event_t) & max_cpus_initialized); + if (max_cpus_initialized == MAX_CPUS_WAIT) { + thread_wakeup((event_t) &max_cpus_initialized); + } max_cpus_initialized = MAX_CPUS_SET; } (void) ml_set_interrupts_enabled(current_state); @@ -573,11 +597,11 @@ ml_get_max_cpus(void) current_state = ml_set_interrupts_enabled(FALSE); if (max_cpus_initialized != MAX_CPUS_SET) { max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT); + assert_wait((event_t) &max_cpus_initialized, THREAD_UNINT); (void) thread_block(THREAD_CONTINUE_NULL); } (void) ml_set_interrupts_enabled(current_state); - return (machine_info.max_cpus); + return machine_info.max_cpus; } /* @@ -589,22 +613,31 @@ ml_init_lock_timeout(void) { uint64_t abstime; uint64_t mtxspin; - uint64_t default_timeout_ns = NSEC_PER_SEC>>2; + uint64_t default_timeout_ns = NSEC_PER_SEC >> 2; uint32_t slto; - if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) + if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) { default_timeout_ns = slto * NSEC_PER_USEC; + } nanoseconds_to_absolutetime(default_timeout_ns, &abstime); - LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC); + LockTimeOutUsec = (uint32_t) (default_timeout_ns / NSEC_PER_USEC); LockTimeOut = (uint32_t)abstime; - if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { - if (mtxspin > USEC_PER_SEC>>4) - mtxspin = USEC_PER_SEC>>4; - nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); + if (PE_parse_boot_argn("tlto_us", &slto, sizeof(slto))) { + nanoseconds_to_absolutetime(slto * NSEC_PER_USEC, &abstime); + TLockTimeOut = abstime; + } else { + TLockTimeOut = LockTimeOut >> 1; + } + + if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) { + if (mtxspin > USEC_PER_SEC >> 4) { + mtxspin = USEC_PER_SEC >> 4; + } + nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime); } else { - nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); + nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime); } MutexSpin = abstime; } @@ -627,7 +660,7 @@ ml_cpu_up(void) void ml_cpu_down(void) { - cpu_data_t *cpu_data_ptr; + cpu_data_t *cpu_data_ptr; hw_atomic_sub(&machine_info.physical_cpu, 1); hw_atomic_sub(&machine_info.logical_cpu, 1); @@ -677,7 +710,7 @@ ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) unsigned int ml_get_machine_mem(void) { - return (machine_info.memory_size); + return machine_info.memory_size; } __attribute__((noreturn)) @@ -691,7 +724,9 @@ halt_all_cpus(boolean_t reboot) printf("CPU halted\n"); PEHaltRestart(kPEHaltCPU); } - while (1); + while (1) { + ; + } } __attribute__((noreturn)) @@ -707,7 +742,7 @@ halt_cpu(void) */ void machine_signal_idle( - processor_t processor) + processor_t processor) { cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -715,7 +750,7 @@ machine_signal_idle( void machine_signal_idle_deferred( - processor_t processor) + processor_t processor) { cpu_signal_deferred(processor_to_cpu_datap(processor)); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -723,7 +758,7 @@ machine_signal_idle_deferred( void machine_signal_idle_cancel( - processor_t processor) + processor_t processor) { cpu_signal_cancel(processor_to_cpu_datap(processor)); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); @@ -733,13 +768,13 @@ machine_signal_idle_cancel( * Routine: ml_install_interrupt_handler * Function: Initialize Interrupt Handler */ -void +void ml_install_interrupt_handler( - void *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon) + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon) { cpu_data_t *cpu_data_ptr; boolean_t current_state; @@ -772,9 +807,10 @@ ml_init_interrupt(void) * Routine: ml_init_timebase * Function: register and setup Timebase, Decremeter services */ -void ml_init_timebase( - void *args, - tbd_ops_t tbd_funcs, +void +ml_init_timebase( + void *args, + tbd_ops_t tbd_funcs, vm_offset_t int_address, vm_offset_t int_value __unused) { @@ -812,37 +848,44 @@ ml_parse_cpu_topology(void) void *prop = NULL; int cpu_id = avail_cpus++; - if (kSuccess == DTGetProperty(child, "cpu-id", &prop, &propSize)) + if (kSuccess == DTGetProperty(child, "cpu-id", &prop, &propSize)) { cpu_id = *((int32_t*)prop); + } assert(cpu_id < MAX_CPUS); assert(cpu_phys_ids[cpu_id] == (uint32_t)-1); if (boot_cpu == -1) { - if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) + if (kSuccess != DTGetProperty(child, "state", &prop, &propSize)) { panic("unable to retrieve state for cpu %d", cpu_id); + } if (strncmp((char*)prop, "running", propSize) == 0) { boot_cpu = cpu_id; } } - if (kSuccess != DTGetProperty(child, "reg", &prop, &propSize)) + if (kSuccess != DTGetProperty(child, "reg", &prop, &propSize)) { panic("unable to retrieve physical ID for cpu %d", cpu_id); + } cpu_phys_ids[cpu_id] = *((uint32_t*)prop); - if ((cpu_id > max_cpu_number) && ((cpu_id == boot_cpu) || (avail_cpus <= cpu_boot_arg))) + if ((cpu_id > max_cpu_number) && ((cpu_id == boot_cpu) || (avail_cpus <= cpu_boot_arg))) { max_cpu_number = cpu_id; + } } - if (avail_cpus > cpu_boot_arg) + if (avail_cpus > cpu_boot_arg) { avail_cpus = cpu_boot_arg; + } - if (avail_cpus == 0) + if (avail_cpus == 0) { panic("No cpus found!"); + } - if (boot_cpu == -1) + if (boot_cpu == -1) { panic("unable to determine boot cpu!"); + } /* * Set TPIDRRO_EL0 to indicate the correct cpu number, as we may @@ -877,8 +920,9 @@ int ml_get_cpu_number(uint32_t phys_id) { for (int log_id = 0; log_id <= ml_get_max_cpu_number(); ++log_id) { - if (cpu_phys_ids[log_id] == phys_id) + if (cpu_phys_ids[log_id] == phys_id) { return log_id; + } } return -1; } @@ -890,65 +934,70 @@ ml_get_max_cpu_number(void) } -void ml_lockdown_init() { - lockdown_handler_grp = lck_grp_alloc_init("lockdown_handler", NULL); - assert(lockdown_handler_grp != NULL); +void +ml_lockdown_init() +{ + lockdown_handler_grp = lck_grp_alloc_init("lockdown_handler", NULL); + assert(lockdown_handler_grp != NULL); - lck_mtx_init(&lockdown_handler_lck, lockdown_handler_grp, NULL); + lck_mtx_init(&lockdown_handler_lck, lockdown_handler_grp, NULL); } kern_return_t ml_lockdown_handler_register(lockdown_handler_t f, void *this) { - if (lockdown_handler || !f) { - return KERN_FAILURE; - } + if (lockdown_handler || !f) { + return KERN_FAILURE; + } - lck_mtx_lock(&lockdown_handler_lck); - lockdown_handler = f; - lockdown_this = this; + lck_mtx_lock(&lockdown_handler_lck); + lockdown_handler = f; + lockdown_this = this; #if !(defined(KERNEL_INTEGRITY_KTRR)) - lockdown_done=1; - lockdown_handler(this); + lockdown_done = 1; + lockdown_handler(this); #else - if (lockdown_done) { - lockdown_handler(this); - } + if (lockdown_done) { + lockdown_handler(this); + } #endif - lck_mtx_unlock(&lockdown_handler_lck); + lck_mtx_unlock(&lockdown_handler_lck); - return KERN_SUCCESS; + return KERN_SUCCESS; } -void ml_lockdown_run_handler() { - lck_mtx_lock(&lockdown_handler_lck); - assert(!lockdown_done); +void +ml_lockdown_run_handler() +{ + lck_mtx_lock(&lockdown_handler_lck); + assert(!lockdown_done); - lockdown_done = 1; - if (lockdown_handler) { - lockdown_handler(lockdown_this); - } - lck_mtx_unlock(&lockdown_handler_lck); + lockdown_done = 1; + if (lockdown_handler) { + lockdown_handler(lockdown_this); + } + lck_mtx_unlock(&lockdown_handler_lck); } kern_return_t -ml_processor_register( - ml_processor_info_t * in_processor_info, - processor_t * processor_out, - ipi_handler_t * ipi_handler) +ml_processor_register(ml_processor_info_t *in_processor_info, + processor_t *processor_out, ipi_handler_t *ipi_handler_out, + perfmon_interrupt_handler_func *pmi_handler_out) { cpu_data_t *this_cpu_datap; processor_set_t pset; boolean_t is_boot_cpu; static unsigned int reg_cpu_count = 0; - if (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()) + if (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()) { return KERN_FAILURE; + } - if ((unsigned int)OSIncrementAtomic((SInt32*)®_cpu_count) >= avail_cpus) + if ((unsigned int)OSIncrementAtomic((SInt32*)®_cpu_count) >= avail_cpus) { return KERN_FAILURE; + } if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { is_boot_cpu = FALSE; @@ -964,14 +1013,16 @@ ml_processor_register( this_cpu_datap->cpu_id = in_processor_info->cpu_id; this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); - if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) + if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) { goto processor_register_error; + } if (!is_boot_cpu) { this_cpu_datap->cpu_number = in_processor_info->log_id; - if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) + if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) { goto processor_register_error; + } } this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; @@ -1002,7 +1053,7 @@ ml_processor_register( if (!is_boot_cpu) { processor_init((struct processor *)this_cpu_datap->cpu_processor, - this_cpu_datap->cpu_number, pset); + this_cpu_datap->cpu_number, pset); if (this_cpu_datap->cpu_l2_access_penalty) { /* @@ -1012,19 +1063,26 @@ ml_processor_register( * preferentially. */ processor_set_primary(this_cpu_datap->cpu_processor, - master_processor); + master_processor); } } *processor_out = this_cpu_datap->cpu_processor; - *ipi_handler = cpu_signal_handler; - if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) + *ipi_handler_out = cpu_signal_handler; +#if CPMU_AIC_PMI && MONOTONIC + *pmi_handler_out = mt_cpmu_aic_pmi; +#else + *pmi_handler_out = NULL; +#endif /* CPMU_AIC_PMI && MONOTONIC */ + if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) { *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; + } #if KPC - if (kpc_register_cpu(this_cpu_datap) != TRUE) + if (kpc_register_cpu(this_cpu_datap) != TRUE) { goto processor_register_error; -#endif + } +#endif /* KPC */ if (!is_boot_cpu) { early_random_cpu_init(this_cpu_datap->cpu_number); @@ -1037,17 +1095,18 @@ ml_processor_register( processor_register_error: #if KPC kpc_unregister_cpu(this_cpu_datap); -#endif - if (!is_boot_cpu) +#endif /* KPC */ + if (!is_boot_cpu) { cpu_data_free(this_cpu_datap); + } return KERN_FAILURE; } void ml_init_arm_debug_interface( - void * in_cpu_datap, - vm_offset_t virt_address) + void * in_cpu_datap, + vm_offset_t virt_address) { ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; do_debugid(); @@ -1059,7 +1118,7 @@ ml_init_arm_debug_interface( */ void init_ast_check( - __unused processor_t processor) + __unused processor_t processor) { } @@ -1069,7 +1128,7 @@ init_ast_check( */ void cause_ast_check( - processor_t processor) + processor_t processor) { if (current_processor() != processor) { cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); @@ -1079,7 +1138,9 @@ cause_ast_check( extern uint32_t cpu_idle_count; -void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { +void +ml_get_power_state(boolean_t *icp, boolean_t *pidlep) +{ *icp = ml_at_interrupt_context(); *pidlep = (cpu_idle_count == real_ncpus); } @@ -1091,45 +1152,45 @@ void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { void ml_cause_interrupt(void) { - return; /* BS_XXX */ + return; /* BS_XXX */ } /* Map memory map IO space */ vm_offset_t ml_io_map( - vm_offset_t phys_addr, - vm_size_t size) + vm_offset_t phys_addr, + vm_size_t size) { - return (io_map(phys_addr, size, VM_WIMG_IO)); + return io_map(phys_addr, size, VM_WIMG_IO); } vm_offset_t ml_io_map_wcomb( - vm_offset_t phys_addr, - vm_size_t size) + vm_offset_t phys_addr, + vm_size_t size) { - return (io_map(phys_addr, size, VM_WIMG_WCOMB)); + return io_map(phys_addr, size, VM_WIMG_WCOMB); } /* boot memory allocation */ vm_offset_t ml_static_malloc( - __unused vm_size_t size) + __unused vm_size_t size) { - return ((vm_offset_t) NULL); + return (vm_offset_t) NULL; } vm_map_address_t ml_map_high_window( - vm_offset_t phys_addr, - vm_size_t len) + vm_offset_t phys_addr, + vm_size_t len) { return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); } vm_offset_t ml_static_ptovirt( - vm_offset_t paddr) + vm_offset_t paddr) { return phystokv(paddr); } @@ -1145,7 +1206,7 @@ vm_offset_t ml_static_unslide( vm_offset_t vaddr) { - return (ml_static_vtop(vaddr) - gPhysBase + gVirtBase - vm_kernel_slide) ; + return ml_static_vtop(vaddr) - gPhysBase + gVirtBase - vm_kernel_slide; } extern tt_entry_t *arm_kva_to_tte(vm_offset_t va); @@ -1159,7 +1220,7 @@ ml_static_protect( pt_entry_t arm_prot = 0; pt_entry_t arm_block_prot = 0; vm_offset_t vaddr_cur; - ppnum_t ppn; + ppnum_t ppn; kern_return_t result = KERN_SUCCESS; if (vaddr < VM_MIN_KERNEL_ADDRESS) { @@ -1191,13 +1252,13 @@ ml_static_protect( } for (vaddr_cur = vaddr; - vaddr_cur < trunc_page_64(vaddr + size); - vaddr_cur += PAGE_SIZE) { + vaddr_cur < trunc_page_64(vaddr + size); + vaddr_cur += PAGE_SIZE) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t) NULL) { - tt_entry_t *tte2; - pt_entry_t *pte_p; - pt_entry_t ptmp; + tt_entry_t *tte2; + pt_entry_t *pte_p; + pt_entry_t ptmp; tte2 = arm_kva_to_tte(vaddr_cur); @@ -1232,11 +1293,11 @@ ml_static_protect( __unreachable_ok_push if (TEST_PAGE_RATIO_4) { { - unsigned int i; - pt_entry_t *ptep_iter; + unsigned int i; + pt_entry_t *ptep_iter; ptep_iter = pte_p; - for (i=0; i<4; i++, ptep_iter++) { + for (i = 0; i < 4; i++, ptep_iter++) { /* Note that there is a hole in the HINT sanity checking here. */ ptmp = *ptep_iter; @@ -1248,7 +1309,7 @@ ml_static_protect( } } #ifndef __ARM_L1_PTW__ - FlushPoC_DcacheRegion( trunc_page_32(pte_p), 4*sizeof(*pte_p)); + FlushPoC_DcacheRegion( trunc_page_32(pte_p), 4 * sizeof(*pte_p)); #endif } else { ptmp = *pte_p; @@ -1282,23 +1343,23 @@ ml_static_protect( */ void ml_static_mfree( - vm_offset_t vaddr, - vm_size_t size) + vm_offset_t vaddr, + vm_size_t size) { vm_offset_t vaddr_cur; ppnum_t ppn; uint32_t freed_pages = 0; /* It is acceptable (if bad) to fail to free. */ - if (vaddr < VM_MIN_KERNEL_ADDRESS) + if (vaddr < VM_MIN_KERNEL_ADDRESS) { return; + } - assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ + assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ for (vaddr_cur = vaddr; - vaddr_cur < trunc_page_64(vaddr + size); - vaddr_cur += PAGE_SIZE) { - + vaddr_cur < trunc_page_64(vaddr + size); + vaddr_cur += PAGE_SIZE) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t) NULL) { /* @@ -1327,7 +1388,7 @@ ml_static_mfree( vm_page_wire_count -= freed_pages; vm_page_wire_count_initial -= freed_pages; vm_page_unlock_queues(); -#if DEBUG +#if DEBUG kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); #endif } @@ -1351,21 +1412,26 @@ vm_size_t ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) { addr64_t cur_phys_dst, cur_phys_src; - vm_size_t count, nbytes = 0; + vm_size_t count, nbytes = 0; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { break; - if (!(cur_phys_dst = kvtophys(virtdst))) + } + if (!(cur_phys_dst = kvtophys(virtdst))) { break; + } if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || - !pmap_valid_address(trunc_page_64(cur_phys_src))) + !pmap_valid_address(trunc_page_64(cur_phys_src))) { break; + } count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); - if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) + if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) { count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); - if (count > size) + } + if (count > size) { count = size; + } bcopy_phys(cur_phys_src, cur_phys_dst, count); @@ -1388,20 +1454,24 @@ ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) * FALSE otherwise. */ -boolean_t ml_validate_nofault( +boolean_t +ml_validate_nofault( vm_offset_t virtsrc, vm_size_t size) { addr64_t cur_phys_src; uint32_t count; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { return FALSE; - if (!pmap_valid_address(trunc_page_64(cur_phys_src))) + } + if (!pmap_valid_address(trunc_page_64(cur_phys_src))) { return FALSE; + } count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); - if (count > size) + if (count > size) { count = (uint32_t)size; + } virtsrc += count; size -= count; @@ -1422,13 +1492,17 @@ active_rt_threads(__unused boolean_t active) { } -static void cpu_qos_cb_default(__unused int urgency, __unused uint64_t qos_param1, __unused uint64_t qos_param2) { +static void +cpu_qos_cb_default(__unused int urgency, __unused uint64_t qos_param1, __unused uint64_t qos_param2) +{ return; } cpu_qos_update_t cpu_qos_update = cpu_qos_cb_default; -void cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb) { +void +cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb) +{ if (cpu_qos_cb != NULL) { cpu_qos_update = cpu_qos_cb; } else { @@ -1437,13 +1511,13 @@ void cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb) { } void -thread_tell_urgency(int urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency __unused, __unused thread_t nthread) +thread_tell_urgency(thread_urgency_t urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency __unused, __unused thread_t nthread) { - SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0); + SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0); - cpu_qos_update(urgency, rt_period, rt_deadline); + cpu_qos_update((int)urgency, rt_period, rt_deadline); - SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0); + SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0); } void @@ -1454,7 +1528,7 @@ machine_run_count(__unused uint32_t count) processor_t machine_choose_processor(__unused processor_set_t pset, processor_t processor) { - return (processor); + return processor; } #if KASAN @@ -1489,7 +1563,9 @@ ml_stack_size(void) } #endif -boolean_t machine_timeout_suspended(void) { +boolean_t +machine_timeout_suspended(void) +{ return FALSE; } @@ -1505,28 +1581,29 @@ ml_interrupt_prewarm(__unused uint64_t deadline) void ml_set_decrementer(uint32_t dec_value) { - cpu_data_t *cdp = getCpuDatap(); + cpu_data_t *cdp = getCpuDatap(); assert(ml_get_interrupts_enabled() == FALSE); cdp->cpu_decrementer = dec_value; - if (cdp->cpu_set_decrementer_func) { + if (cdp->cpu_set_decrementer_func) { ((void (*)(uint32_t))cdp->cpu_set_decrementer_func)(dec_value); } else { - __asm__ volatile("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value)); + __asm__ volatile ("msr CNTP_TVAL_EL0, %0" : : "r"((uint64_t)dec_value)); } } -uint64_t ml_get_hwclock() +uint64_t +ml_get_hwclock() { uint64_t timebase; // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2 // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative // to other instructions executed on the same processor." - __asm__ volatile("isb\n" - "mrs %0, CNTPCT_EL0" - : "=r"(timebase)); + __asm__ volatile ("isb\n" + "mrs %0, CNTPCT_EL0" + : "=r"(timebase)); return timebase; } @@ -1534,7 +1611,7 @@ uint64_t ml_get_hwclock() uint64_t ml_get_timebase() { - return (ml_get_hwclock() + getCpuDatap()->cpu_base_timebase); + return ml_get_hwclock() + getCpuDatap()->cpu_base_timebase; } uint32_t @@ -1550,7 +1627,7 @@ ml_get_decrementer() } else { uint64_t wide_val; - __asm__ volatile("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val)); + __asm__ volatile ("mrs %0, CNTP_TVAL_EL0" : "=r"(wide_val)); dec = (uint32_t)wide_val; assert(wide_val == (uint64_t)dec); } @@ -1563,7 +1640,7 @@ ml_get_timer_pending() { uint64_t cntp_ctl; - __asm__ volatile("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl)); + __asm__ volatile ("mrs %0, CNTP_CTL_EL0" : "=r"(cntp_ctl)); return ((cntp_ctl & CNTP_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE; } @@ -1593,11 +1670,17 @@ cache_trap_recover() { vm_map_address_t fault_addr; - __asm__ volatile("mrs %0, FAR_EL1" : "=r"(fault_addr)); + __asm__ volatile ("mrs %0, FAR_EL1" : "=r"(fault_addr)); cache_trap_error(current_thread(), fault_addr); } +static void +set_cache_trap_recover(thread_t thread) +{ + thread->recover = (vm_address_t)cache_trap_recover; +} + static void dcache_flush_trap(vm_map_address_t start, vm_map_size_t size) { @@ -1620,8 +1703,7 @@ dcache_flush_trap(vm_map_address_t start, vm_map_size_t size) cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1)); } - /* Set recovery function */ - thread->recover = (vm_address_t)cache_trap_recover; + set_cache_trap_recover(thread); /* * We're coherent on Apple ARM64 CPUs, so this could be a nop. However, @@ -1658,8 +1740,7 @@ icache_invalidate_trap(vm_map_address_t start, vm_map_size_t size) cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1)); } - /* Set recovery function */ - thread->recover = (vm_address_t)cache_trap_recover; + set_cache_trap_recover(thread); CleanPoU_DcacheRegion(start, (uint32_t) size); @@ -1766,8 +1847,8 @@ fiq_context_bootstrap(boolean_t enable_fiq) { #if defined(APPLE_ARM64_ARCH_FAMILY) || defined(BCM2837) /* Could fill in our own ops here, if we needed them */ - uint64_t ticks_per_sec, ticks_per_event, events_per_sec; - uint32_t bit_index; + uint64_t ticks_per_sec, ticks_per_event, events_per_sec; + uint32_t bit_index; ticks_per_sec = gPEClockFrequencyInfo.timebase_frequency_hz; #if defined(ARM_BOARD_WFE_TIMEOUT_NS) @@ -1780,8 +1861,9 @@ fiq_context_bootstrap(boolean_t enable_fiq) bit_index = flsll(ticks_per_event) - 1; /* Highest bit set */ /* Round up to power of two */ - if ((ticks_per_event & ((1 << bit_index) - 1)) != 0) + if ((ticks_per_event & ((1 << bit_index) - 1)) != 0) { bit_index++; + } /* * The timer can only trigger on rising or falling edge, @@ -1789,8 +1871,9 @@ fiq_context_bootstrap(boolean_t enable_fiq) * do need to adjust which bit we are interested in to * account for this. */ - if (bit_index != 0) + if (bit_index != 0) { bit_index--; + } fiq_eventi = bit_index; #else @@ -1815,31 +1898,43 @@ ml_delay_should_spin(uint64_t interval) } } -void -ml_delay_on_yield(void) +boolean_t +ml_thread_is64bit(thread_t thread) { + return thread_is_64bit_addr(thread); } -boolean_t ml_thread_is64bit(thread_t thread) { - return (thread_is_64bit_addr(thread)); +void +ml_delay_on_yield(void) +{ +#if DEVELOPMENT || DEBUG + if (yield_delay_us) { + delay(yield_delay_us); + } +#endif } -void ml_timer_evaluate(void) { +void +ml_timer_evaluate(void) +{ } boolean_t -ml_timer_forced_evaluation(void) { +ml_timer_forced_evaluation(void) +{ return FALSE; } uint64_t -ml_energy_stat(thread_t t) { +ml_energy_stat(thread_t t) +{ return t->machine.energy_estimate_nj; } void -ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { +ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) +{ #if CONFIG_EMBEDDED /* * For now: update the resource coalition stats of the @@ -1850,7 +1945,8 @@ ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { } uint64_t -ml_gpu_stat(__unused thread_t t) { +ml_gpu_stat(__unused thread_t t) +{ return 0; } @@ -1859,7 +1955,9 @@ static void timer_state_event(boolean_t switch_to_kernel) { thread_t thread = current_thread(); - if (!thread->precise_user_kernel_time) return; + if (!thread->precise_user_kernel_time) { + return; + } processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; uint64_t now = ml_get_timebase(); @@ -1905,10 +2003,9 @@ current_thread(void) return current_thread_fast(); } -typedef struct -{ - ex_cb_t cb; - void *refcon; +typedef struct{ + ex_cb_t cb; + void *refcon; } ex_cb_info_t; @@ -1919,20 +2016,19 @@ ex_cb_info_t ex_cb_info[EXCB_CLASS_MAX]; * Currently we support only one registered callback per class but * it should be possible to support more callbacks */ -kern_return_t ex_cb_register( - ex_cb_class_t cb_class, - ex_cb_t cb, - void *refcon) +kern_return_t +ex_cb_register( + ex_cb_class_t cb_class, + ex_cb_t cb, + void *refcon) { ex_cb_info_t *pInfo = &ex_cb_info[cb_class]; - if ((NULL == cb) || (cb_class >= EXCB_CLASS_MAX)) - { + if ((NULL == cb) || (cb_class >= EXCB_CLASS_MAX)) { return KERN_INVALID_VALUE; } - if (NULL == pInfo->cb) - { + if (NULL == pInfo->cb) { pInfo->cb = cb; pInfo->refcon = refcon; return KERN_SUCCESS; @@ -1943,20 +2039,19 @@ kern_return_t ex_cb_register( /* * Called internally by platform kernel to invoke the registered callback for class */ -ex_cb_action_t ex_cb_invoke( - ex_cb_class_t cb_class, - vm_offset_t far) +ex_cb_action_t +ex_cb_invoke( + ex_cb_class_t cb_class, + vm_offset_t far) { ex_cb_info_t *pInfo = &ex_cb_info[cb_class]; ex_cb_state_t state = {far}; - if (cb_class >= EXCB_CLASS_MAX) - { + if (cb_class >= EXCB_CLASS_MAX) { panic("Invalid exception callback class 0x%x\n", cb_class); } - if (pInfo->cb) - { + if (pInfo->cb) { return pInfo->cb(cb_class, pInfo->refcon, &state); } return EXCB_ACTION_NONE; diff --git a/osfmk/arm64/machine_routines_asm.s b/osfmk/arm64/machine_routines_asm.s index 8f51e2a22..08756dc8d 100644 --- a/osfmk/arm64/machine_routines_asm.s +++ b/osfmk/arm64/machine_routines_asm.s @@ -681,10 +681,11 @@ L_mmu_kvtop_wpreflight_invalid: */ .macro SET_RECOVERY_HANDLER mrs $0, TPIDR_EL1 // Load thread pointer - ldr $1, [$0, TH_RECOVER] // Save previous recovery handler adrp $2, $3@page // Load the recovery handler address add $2, $2, $3@pageoff - str $2, [$0, TH_RECOVER] // Set new recovery handler + + ldr $1, [$0, TH_RECOVER] // Save previous recovery handler + str $2, [$0, TH_RECOVER] // Set new signed recovery handler .endmacro /* @@ -823,7 +824,9 @@ LEXT(_bcopyinstr) adr x4, Lcopyinstr_error // Get address for recover mrs x10, TPIDR_EL1 // Get thread pointer ldr x11, [x10, TH_RECOVER] // Save previous recover + str x4, [x10, TH_RECOVER] // Store new recover + mov x4, #0 // x4 - total bytes copied Lcopyinstr_loop: ldrb w5, [x0], #1 // Load a byte from the user source diff --git a/osfmk/arm64/machine_task.c b/osfmk/arm64/machine_task.c index a07df7007..6c6429d4f 100644 --- a/osfmk/arm64/machine_task.c +++ b/osfmk/arm64/machine_task.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,149 +62,151 @@ extern zone_t ads_zone; kern_return_t machine_task_set_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { switch (flavor) { case ARM_DEBUG_STATE: { arm_legacy_debug_state_t *tstate = (arm_legacy_debug_state_t *) state; if (task_has_64Bit_data(task) || - (state_count != ARM_LEGACY_DEBUG_STATE_COUNT) || - (!debug_legacy_state_is_valid(tstate))) { + (state_count != ARM_LEGACY_DEBUG_STATE_COUNT) || + (!debug_legacy_state_is_valid(tstate))) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { task->task_debug = zalloc(ads_zone); - if (task->task_debug == NULL) + if (task->task_debug == NULL) { return KERN_FAILURE; + } } copy_legacy_debug_state(tstate, (arm_legacy_debug_state_t *) task->task_debug, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - + return KERN_SUCCESS; } case ARM_DEBUG_STATE32: { arm_debug_state32_t *tstate = (arm_debug_state32_t *) state; if (task_has_64Bit_data(task) || - (state_count != ARM_DEBUG_STATE32_COUNT) || - (!debug_state_is_valid32(tstate))) { + (state_count != ARM_DEBUG_STATE32_COUNT) || + (!debug_state_is_valid32(tstate))) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { task->task_debug = zalloc(ads_zone); - if (task->task_debug == NULL) + if (task->task_debug == NULL) { return KERN_FAILURE; + } } copy_debug_state32(tstate, (arm_debug_state32_t *) task->task_debug, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - + return KERN_SUCCESS; } case ARM_DEBUG_STATE64: { arm_debug_state64_t *tstate = (arm_debug_state64_t *) state; - + if ((!task_has_64Bit_data(task)) || - (state_count != ARM_DEBUG_STATE64_COUNT) || - (!debug_state_is_valid64(tstate))) { + (state_count != ARM_DEBUG_STATE64_COUNT) || + (!debug_state_is_valid64(tstate))) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { task->task_debug = zalloc(ads_zone); - if (task->task_debug == NULL) + if (task->task_debug == NULL) { return KERN_FAILURE; + } } copy_debug_state64(tstate, (arm_debug_state64_t *) task->task_debug, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - + return KERN_SUCCESS; } - case THREAD_STATE_NONE: /* Using this flavor to clear task_debug */ + case THREAD_STATE_NONE: /* Using this flavor to clear task_debug */ { if (task->task_debug != NULL) { zfree(ads_zone, task->task_debug); task->task_debug = NULL; - + return KERN_SUCCESS; } return KERN_FAILURE; } default: - { + { return KERN_INVALID_ARGUMENT; - } + } } return KERN_FAILURE; } -kern_return_t -machine_task_get_state(task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t *state_count) +kern_return_t +machine_task_get_state(task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count) { switch (flavor) { case ARM_DEBUG_STATE: { arm_legacy_debug_state_t *tstate = (arm_legacy_debug_state_t *) state; - + if (task_has_64Bit_data(task) || (*state_count != ARM_LEGACY_DEBUG_STATE_COUNT)) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { - bzero(state, sizeof(*tstate)); + bzero(state, sizeof(*tstate)); } else { copy_legacy_debug_state((arm_legacy_debug_state_t*) task->task_debug, tstate, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - } - + } + return KERN_SUCCESS; } case ARM_DEBUG_STATE32: { arm_debug_state32_t *tstate = (arm_debug_state32_t *) state; - + if (task_has_64Bit_data(task) || (*state_count != ARM_DEBUG_STATE32_COUNT)) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { - bzero(state, sizeof(*tstate)); + bzero(state, sizeof(*tstate)); } else { copy_debug_state32((arm_debug_state32_t*) task->task_debug, tstate, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - } - + } + return KERN_SUCCESS; } case ARM_DEBUG_STATE64: { arm_debug_state64_t *tstate = (arm_debug_state64_t *) state; - + if ((!task_has_64Bit_data(task)) || (*state_count != ARM_DEBUG_STATE64_COUNT)) { return KERN_INVALID_ARGUMENT; } - + if (task->task_debug == NULL) { - bzero(state, sizeof(*tstate)); + bzero(state, sizeof(*tstate)); } else { copy_debug_state64((arm_debug_state64_t*) task->task_debug, tstate, FALSE); /* FALSE OR TRUE doesn't matter since we are ignoring it for arm */ - } - + } + return KERN_SUCCESS; } default: - { + { return KERN_INVALID_ARGUMENT; - } - + } } return KERN_FAILURE; } @@ -219,15 +221,15 @@ machine_task_terminate(task_t task) if (task_debug != NULL) { task->task_debug = NULL; zfree(ads_zone, task_debug); - } + } } } kern_return_t machine_thread_inherit_taskwide( - thread_t thread, - task_t parent_task) + thread_t thread, + task_t parent_task) { if (parent_task->task_debug) { int flavor; @@ -245,7 +247,7 @@ machine_thread_inherit_taskwide( void machine_task_init(__unused task_t new_task, - __unused task_t parent_task, - __unused boolean_t memory_inherit) -{ + __unused task_t parent_task, + __unused boolean_t memory_inherit) +{ } diff --git a/osfmk/arm64/monotonic.h b/osfmk/arm64/monotonic.h index ec10b1981..992d501db 100644 --- a/osfmk/arm64/monotonic.h +++ b/osfmk/arm64/monotonic.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -28,10 +28,20 @@ #ifndef ARM64_MONOTONIC_H #define ARM64_MONOTONIC_H +#include + +#if MONOTONIC + #include #if KERNEL_PRIVATE +__BEGIN_DECLS + +#if MONOTONIC && !CPMU_AIC_PMI +#define MONOTONIC_FIQ 1 +#endif /* MONOTONIC && !CPMU_AIC_PMI */ + #include #define MT_NDEVS 1 @@ -41,29 +51,52 @@ #define MT_CORE_NFIXED 2 #define MT_CORE_MAXVAL ((UINT64_C(1) << 48) - 1) +__END_DECLS + #endif /* KERNEL_PRIVATE */ #if MACH_KERNEL_PRIVATE #include -#define PMSR "s3_1_c15_c13_0" -#define PMSR_PMI(REG) ((REG) & ((1 << CORE_NCTRS) - 1)) +__BEGIN_DECLS + +#define PMCR0 "s3_1_c15_c0_0" + +/* set by hardware if a PMI was delivered */ +#define PMCR0_PMAI (UINT64_C(1) << 11) +#define PMCR0_PMI(REG) ((REG) & PMCR0_PMAI) static inline bool -mt_pmi_pending(uint64_t * restrict pmsr, uint64_t * restrict upmsr) +mt_pmi_pending(uint64_t * restrict pmcr0_out, + uint64_t * restrict upmsr_out) { - *pmsr = __builtin_arm_rsr64(PMSR); - bool pmi = PMSR_PMI(*pmsr); - -#pragma unused(upmsr) + uint64_t pmcr0 = __builtin_arm_rsr64(PMCR0); + bool pmi = PMCR0_PMI(pmcr0); + if (pmi) { + /* + * Acknowledge the PMI by clearing the pmai bit. + */ + __builtin_arm_wsr64(PMCR0, pmcr0 & ~PMCR0_PMAI); + } + *pmcr0_out = pmcr0; + +#pragma unused(upmsr_out) return pmi; } -void mt_fiq(void *cpu, uint64_t pmsr, uint64_t upmsr); +void mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr); + +#if CPMU_AIC_PMI +void mt_cpmu_aic_pmi(void *source); +#endif /* CPMU_AIC_PMI */ + +__END_DECLS #endif /* MACH_KERNEL_PRIVATE */ +#endif /* MONOTONIC */ + #endif /* !defined(ARM64_MONOTONIC_H) */ diff --git a/osfmk/arm64/monotonic_arm64.c b/osfmk/arm64/monotonic_arm64.c index 321205db8..878c87973 100644 --- a/osfmk/arm64/monotonic_arm64.c +++ b/osfmk/arm64/monotonic_arm64.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -32,15 +32,38 @@ #include #include /* panic */ #include +#include #include /* CHAR_BIT */ +#include +#include +#include /* DTFindEntry */ +#include #include #include #include #include #include -#include -#include /* DTFindEntry */ -#include + +/* + * Ensure that control registers read back what was written under MACH_ASSERT + * kernels. + * + * A static inline function cannot be used due to passing the register through + * the builtin -- it requires a constant string as its first argument, since + * MSRs registers are encoded as an immediate in the instruction. + */ +#if MACH_ASSERT +#define CTRL_REG_SET(reg, val) do { \ + __builtin_arm_wsr64((reg), (val)); \ + uint64_t __check_reg = __builtin_arm_rsr64((reg)); \ + if (__check_reg != (val)) { \ + panic("value written to %s was not read back (wrote %llx, read %llx)", \ + #reg, (val), __check_reg); \ + } \ +} while (0) +#else /* MACH_ASSERT */ +#define CTRL_REG_SET(reg, val) __builtin_arm_wsr64((reg), (val)) +#endif /* MACH_ASSERT */ #pragma mark core counters @@ -92,8 +115,6 @@ bool mt_core_supported = true; * other features. */ -#define PMCR0 "s3_1_c15_c0_0" - #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR)) #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS)) /* how interrupts are delivered on a PMI */ @@ -105,9 +126,13 @@ enum { PMCR0_INTGEN_FIQ = 4, }; #define PMCR0_INTGEN_SET(INT) ((uint64_t)(INT) << 8) + +#if CPMU_AIC_PMI +#define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC) +#else /* CPMU_AIC_PMI */ #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ) -/* set by hardware if a PMI was delivered */ -#define PMCR0_PMAI (UINT64_C(1) << 11) +#endif /* !CPMU_AIC_PMI */ + #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (12 + CTR_POS(CTR))) /* fixed counters are always counting */ #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS)) @@ -138,7 +163,7 @@ enum { #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR))) #endif #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \ - PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR)) + PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR)) /* fixed counters always count in all modes */ #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS)) @@ -165,12 +190,9 @@ core_init_execution_modes(void) #define PMCR3 "s3_1_c15_c3_0" #define PMCR4 "s3_1_c15_c4_0" -#define PMSR_OVF(CTR) (1ULL << (CTR)) +#define PMSR "s3_1_c15_c13_0" -void -mt_early_init(void) -{ -} +#define PMSR_OVF(CTR) (1ULL << (CTR)) static int core_init(__unused mt_device_t dev) @@ -218,11 +240,20 @@ mt_core_set_snap(unsigned int ctr, uint64_t count) static void core_set_enabled(void) { - uint64_t pmcr0; - - pmcr0 = __builtin_arm_rsr64(PMCR0); + uint64_t pmcr0 = __builtin_arm_rsr64(PMCR0); pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN; + pmcr0 &= ~PMCR0_PMAI; __builtin_arm_wsr64(PMCR0, pmcr0); +#if MACH_ASSERT + /* + * Only check for the values that were ORed in. + */ + uint64_t pmcr0_check = __builtin_arm_rsr64(PMCR0); + if (!(pmcr0_check & (PMCR0_INIT | PMCR0_FIXED_EN))) { + panic("monotonic: hardware ignored enable (read %llx)", + pmcr0_check); + } +#endif /* MACH_ASSERT */ } static void @@ -234,11 +265,11 @@ core_idle(__unused cpu_data_t *cpu) #if DEBUG uint64_t pmcr0 = __builtin_arm_rsr64(PMCR0); if ((pmcr0 & PMCR0_FIXED_EN) == 0) { - panic("monotonic: counters disabled while idling, pmcr0 = 0x%llx\n", pmcr0); + panic("monotonic: counters disabled before idling, pmcr0 = 0x%llx\n", pmcr0); } uint64_t pmcr1 = __builtin_arm_rsr64(PMCR1); if ((pmcr1 & PMCR1_INIT) == 0) { - panic("monotonic: counter modes disabled while idling, pmcr1 = 0x%llx\n", pmcr1); + panic("monotonic: counter modes disabled before idling, pmcr1 = 0x%llx\n", pmcr1); } #endif /* DEBUG */ @@ -262,7 +293,6 @@ mt_cpu_idle(cpu_data_t *cpu) void mt_cpu_run(cpu_data_t *cpu) { - uint64_t pmcr0; struct mt_cpu *mtc; assert(cpu != NULL); @@ -277,9 +307,7 @@ mt_cpu_run(cpu_data_t *cpu) /* re-enable the counters */ core_init_execution_modes(); - pmcr0 = __builtin_arm_rsr64(PMCR0); - pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN; - __builtin_arm_wsr64(PMCR0, pmcr0); + core_set_enabled(); } void @@ -305,12 +333,30 @@ mt_wake_per_core(void) } static void -mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmsr) +mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmcr0) { assert(cpu != NULL); assert(ml_get_interrupts_enabled() == FALSE); - (void)atomic_fetch_add_explicit(&mt_pmis, 1, memory_order_relaxed); + os_atomic_inc(&mt_pmis, relaxed); + cpu->cpu_stat.pmi_cnt++; + cpu->cpu_stat.pmi_cnt_wake++; + +#if MONOTONIC_DEBUG + if (!PMCR0_PMI(pmcr0)) { + kprintf("monotonic: mt_cpu_pmi but no PMI (PMCR0 = %#llx)\n", + pmcr0); + } +#else /* MONOTONIC_DEBUG */ +#pragma unused(pmcr0) +#endif /* !MONOTONIC_DEBUG */ + + uint64_t pmsr = __builtin_arm_rsr64(PMSR); + +#if MONOTONIC_DEBUG + kprintf("monotonic: cpu = %d, PMSR = 0x%llx, PMCR0 = 0x%llx", + cpu_number(), pmsr, pmcr0); +#endif /* MONOTONIC_DEBUG */ /* * monotonic handles any fixed counter PMIs. @@ -332,7 +378,7 @@ mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmsr) user_mode = PSR64_IS_USER(get_saved_state_cpsr(state)); } KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1), - mt_microstackshot_ctr, user_mode); + mt_microstackshot_ctr, user_mode); mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx); } } @@ -347,13 +393,35 @@ mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmsr) } } +#if MACH_ASSERT + pmsr = __builtin_arm_rsr64(PMSR); + assert(pmsr == 0); +#endif /* MACH_ASSERT */ + core_set_enabled(); } +#if CPMU_AIC_PMI void -mt_fiq(void *cpu, uint64_t pmsr, uint64_t upmsr) +mt_cpmu_aic_pmi(cpu_id_t source) { - mt_cpu_pmi(cpu, pmsr); + struct cpu_data *curcpu = getCpuDatap(); + if (source != curcpu->interrupt_nub) { + panic("monotonic: PMI from IOCPU %p delivered to %p", source, + curcpu->interrupt_nub); + } + mt_cpu_pmi(curcpu, __builtin_arm_rsr64(PMCR0)); +} +#endif /* CPMU_AIC_PMI */ + +void +mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr) +{ +#if CPMU_AIC_PMI +#pragma unused(cpu, pmcr0) +#else /* CPMU_AIC_PMI */ + mt_cpu_pmi(cpu, pmcr0); +#endif /* !CPMU_AIC_PMI */ #pragma unused(upmsr) } @@ -384,9 +452,15 @@ mt_microstackshot_start_remote(__unused void *arg) int mt_microstackshot_start_arch(uint64_t period) { - mt_core_reset_values[mt_microstackshot_ctr] = CTR_MAX - period; + uint64_t reset_value = 0; + int ovf = os_sub_overflow(CTR_MAX, period, &reset_value); + if (ovf) { + return ERANGE; + } + + mt_core_reset_values[mt_microstackshot_ctr] = reset_value; cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote, - mt_microstackshot_start_remote /* cannot pass NULL */); + mt_microstackshot_start_remote /* cannot pass NULL */); return 0; } @@ -400,5 +474,5 @@ struct mt_device mt_devices[] = { }; static_assert( - (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS, - "MT_NDEVS macro should be same as the length of mt_devices"); + (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS, + "MT_NDEVS macro should be same as the length of mt_devices"); diff --git a/osfmk/arm64/pcb.c b/osfmk/arm64/pcb.c index d8809b38f..5904e612f 100644 --- a/osfmk/arm64/pcb.c +++ b/osfmk/arm64/pcb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2016 Apple Inc. All rights reserved. + * Copyright (c) 2007-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -118,6 +118,10 @@ machine_switch_context( new->machine.CpuDatap = cpu_data_ptr; + /* TODO: Should this be ordered? */ + old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; + new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; + machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new); retval = Switch_context(old, continuation, new); @@ -345,6 +349,11 @@ machine_stack_handoff( pmap_switch(new_pmap); new->machine.CpuDatap = cpu_data_ptr; + + /* TODO: Should this be ordered? */ + old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU; + new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; + machine_set_current_thread(new); thread_initialize_kernel_state(new); @@ -887,3 +896,14 @@ machine_thread_set_tsd_base( return KERN_SUCCESS; } + +void +machine_tecs(__unused thread_t thr) +{ +} + +int +machine_csv(__unused cpuvn_e cve) +{ + return 0; +} diff --git a/osfmk/arm64/pgtrace.c b/osfmk/arm64/pgtrace.c index 8bb15a959..d13c8415b 100644 --- a/osfmk/arm64/pgtrace.c +++ b/osfmk/arm64/pgtrace.c @@ -35,11 +35,11 @@ #include typedef struct { - queue_chain_t chain; + queue_chain_t chain; - pmap_t pmap; - vm_offset_t start; - vm_offset_t end; + pmap_t pmap; + vm_offset_t start; + vm_offset_t end; } probe_t; #if CONFIG_PGTRACE_NONKEXT @@ -58,351 +58,365 @@ typedef struct { typedef uint8_t RWLOCK; typedef struct { - uint64_t id; - pgtrace_run_result_t res; - void *stack[PGTRACE_STACK_DEPTH]; + uint64_t id; + pgtrace_run_result_t res; + void *stack[PGTRACE_STACK_DEPTH]; } log_t; //-------------------------------------------- // Statics // static struct { - log_t *logs; // Protect - uint32_t size; // Protect - uint64_t rdidx, wridx; // Protect - decl_simple_lock_data(, loglock); - - uint64_t id; - uint32_t option; - uint32_t enabled; - uint32_t bytes; - - queue_head_t probes; // Protect - - lck_grp_t *lock_grp; - lck_grp_attr_t *lock_grp_attr; - lck_attr_t *lock_attr; - lck_mtx_t probelock; + log_t *logs; // Protect + uint32_t size; // Protect + uint64_t rdidx, wridx; // Protect + decl_simple_lock_data(, loglock); + + uint64_t id; + uint32_t option; + uint32_t enabled; + uint32_t bytes; + + queue_head_t probes; // Protect + + lck_grp_t *lock_grp; + lck_grp_attr_t *lock_grp_attr; + lck_attr_t *lock_attr; + lck_mtx_t probelock; } pgtrace = {}; //-------------------------------------------- // Globals // -void pgtrace_init(void) +void +pgtrace_init(void) { - simple_lock_init(&pgtrace.loglock, 0); + simple_lock_init(&pgtrace.loglock, 0); - pgtrace.lock_attr = lck_attr_alloc_init(); - pgtrace.lock_grp_attr = lck_grp_attr_alloc_init(); - pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr); + pgtrace.lock_attr = lck_attr_alloc_init(); + pgtrace.lock_grp_attr = lck_grp_attr_alloc_init(); + pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr); - lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr); + lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr); - queue_init(&pgtrace.probes); + queue_init(&pgtrace.probes); - pgtrace.size = RBUF_DEFAULT_SIZE; - pgtrace.logs = kalloc(RBUF_DEFAULT_SIZE * sizeof(log_t)); + pgtrace.size = RBUF_DEFAULT_SIZE; + pgtrace.logs = kalloc(RBUF_DEFAULT_SIZE * sizeof(log_t)); } - -void pgtrace_clear_probe(void) + +void +pgtrace_clear_probe(void) { - probe_t *p, *next; - queue_head_t *q = &pgtrace.probes; + probe_t *p, *next; + queue_head_t *q = &pgtrace.probes; - lck_mtx_lock(&pgtrace.probelock); + lck_mtx_lock(&pgtrace.probelock); - p = (probe_t *)queue_first(q); - while (!queue_end(q, (queue_entry_t)p)) { - next = (probe_t *)queue_next(&(p->chain)); + p = (probe_t *)queue_first(q); + while (!queue_end(q, (queue_entry_t)p)) { + next = (probe_t *)queue_next(&(p->chain)); - queue_remove(q, p, probe_t *, chain); - kfree(p, sizeof(probe_t)); + queue_remove(q, p, probe_t *, chain); + kfree(p, sizeof(probe_t)); - p = next; - } + p = next; + } - lck_mtx_unlock(&pgtrace.probelock); + lck_mtx_unlock(&pgtrace.probelock); - return; + return; } -int pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end) +int +pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end) { - probe_t *p; - queue_head_t *q = &pgtrace.probes; - - if (start > end) { - kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end); - return -1; - } - - p = kalloc(sizeof(probe_t)); - p->start = start; - p->end = end; - if (thread == NULL) { - p->pmap = NULL; - } else { - p->pmap = vm_map_pmap(thread->map); - } - - lck_mtx_lock(&pgtrace.probelock); - queue_enter(q, p, probe_t *, chain); - lck_mtx_unlock(&pgtrace.probelock); - - return 0; + probe_t *p; + queue_head_t *q = &pgtrace.probes; + + if (start > end) { + kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end); + return -1; + } + + p = kalloc(sizeof(probe_t)); + p->start = start; + p->end = end; + if (thread == NULL) { + p->pmap = NULL; + } else { + p->pmap = vm_map_pmap(thread->map); + } + + lck_mtx_lock(&pgtrace.probelock); + queue_enter(q, p, probe_t *, chain); + lck_mtx_unlock(&pgtrace.probelock); + + return 0; } -void pgtrace_start(void) +void +pgtrace_start(void) { - probe_t *p; - queue_head_t *q = &pgtrace.probes; + probe_t *p; + queue_head_t *q = &pgtrace.probes; - kprintf("%s\n", __func__); + kprintf("%s\n", __func__); - if (pgtrace.enabled) { - return; - } + if (pgtrace.enabled) { + return; + } - pgtrace.enabled = 1; + pgtrace.enabled = 1; - lck_mtx_lock(&pgtrace.probelock); + lck_mtx_lock(&pgtrace.probelock); - queue_iterate(q, p, probe_t *, chain) { - pmap_pgtrace_add_page(p->pmap, p->start, p->end); - } + queue_iterate(q, p, probe_t *, chain) { + pmap_pgtrace_add_page(p->pmap, p->start, p->end); + } - lck_mtx_unlock(&pgtrace.probelock); + lck_mtx_unlock(&pgtrace.probelock); - return; + return; } -void pgtrace_stop(void) +void +pgtrace_stop(void) { - probe_t *p; - queue_head_t *q = &pgtrace.probes; + probe_t *p; + queue_head_t *q = &pgtrace.probes; - kprintf("%s\n", __func__); + kprintf("%s\n", __func__); - lck_mtx_lock(&pgtrace.probelock); + lck_mtx_lock(&pgtrace.probelock); - queue_iterate(q, p, probe_t *, chain) { - pmap_pgtrace_delete_page(p->pmap, p->start, p->end); - } + queue_iterate(q, p, probe_t *, chain) { + pmap_pgtrace_delete_page(p->pmap, p->start, p->end); + } - lck_mtx_unlock(&pgtrace.probelock); + lck_mtx_unlock(&pgtrace.probelock); - pgtrace.enabled = 0; + pgtrace.enabled = 0; } -uint32_t pgtrace_get_size(void) +uint32_t +pgtrace_get_size(void) { - return pgtrace.size; + return pgtrace.size; } -bool pgtrace_set_size(uint32_t size) +bool +pgtrace_set_size(uint32_t size) { - log_t *old_buf, *new_buf; - uint32_t old_size, new_size = 1; - - // round up to next power of 2 - while (size > new_size) { - new_size <<= 1; - if (new_size > 0x100000) { - // over million entries - kprintf("%s: size=%x new_size=%x is too big\n", __func__, size, new_size); - return false; - } - } - - new_buf = kalloc(new_size * sizeof(log_t)); - if (new_buf == NULL) { - kprintf("%s: can't allocate new_size=%x\n entries", __func__, new_size); - return false; - } - - pgtrace_stop(); - - simple_lock(&pgtrace.loglock); - old_buf = pgtrace.logs; - old_size = pgtrace.size; - pgtrace.logs = new_buf; - pgtrace.size = new_size; - pgtrace.rdidx = pgtrace.wridx = 0; - simple_unlock(&pgtrace.loglock); - - if (old_buf) { - kfree(old_buf, old_size * sizeof(log_t)); - } - - return true; + log_t *old_buf, *new_buf; + uint32_t old_size, new_size = 1; + + // round up to next power of 2 + while (size > new_size) { + new_size <<= 1; + if (new_size > 0x100000) { + // over million entries + kprintf("%s: size=%x new_size=%x is too big\n", __func__, size, new_size); + return false; + } + } + + new_buf = kalloc(new_size * sizeof(log_t)); + if (new_buf == NULL) { + kprintf("%s: can't allocate new_size=%x\n entries", __func__, new_size); + return false; + } + + pgtrace_stop(); + + simple_lock(&pgtrace.loglock); + old_buf = pgtrace.logs; + old_size = pgtrace.size; + pgtrace.logs = new_buf; + pgtrace.size = new_size; + pgtrace.rdidx = pgtrace.wridx = 0; + simple_unlock(&pgtrace.loglock); + + if (old_buf) { + kfree(old_buf, old_size * sizeof(log_t)); + } + + return true; } -void pgtrace_clear_trace(void) +void +pgtrace_clear_trace(void) { - simple_lock(&pgtrace.loglock); - pgtrace.rdidx = pgtrace.wridx = 0; - simple_unlock(&pgtrace.loglock); + simple_lock(&pgtrace.loglock); + pgtrace.rdidx = pgtrace.wridx = 0; + simple_unlock(&pgtrace.loglock); } -boolean_t pgtrace_active(void) +boolean_t +pgtrace_active(void) { - return (pgtrace.enabled > 0); + return pgtrace.enabled > 0; } -uint32_t pgtrace_get_option(void) +uint32_t +pgtrace_get_option(void) { - return pgtrace.option; + return pgtrace.option; } -void pgtrace_set_option(uint32_t option) +void +pgtrace_set_option(uint32_t option) { - pgtrace.option = option; + pgtrace.option = option; } // pgtrace_write_log() is in interrupt disabled context -void pgtrace_write_log(pgtrace_run_result_t res) +void +pgtrace_write_log(pgtrace_run_result_t res) { - uint8_t i; - log_t log = {}; - const char *rwmap[] = { "R", "W", "PREFETCH" }; + uint8_t i; + log_t log = {}; + const char *rwmap[] = { "R", "W", "PREFETCH" }; + + log.id = pgtrace.id++; + log.res = res; - log.id = pgtrace.id++; - log.res = res; + if (pgtrace.option & PGTRACE_OPTION_KPRINTF) { + char msg[MSG_MAX]; + char *p; - if (pgtrace.option & PGTRACE_OPTION_KPRINTF) { - char msg[MSG_MAX]; - char *p; + p = msg; - p = msg; + snprintf(p, MSG_MAX, "%llu %s ", res.rr_time, rwmap[res.rr_rw]); + p += strlen(p); - snprintf(p, MSG_MAX, "%llu %s ", res.rr_time, rwmap[res.rr_rw]); - p += strlen(p); + for (i = 0; i < res.rr_num; i++) { + snprintf(p, MSG_MAX - (p - msg), "%lx=%llx ", res.rr_addrdata[i].ad_addr, res.rr_addrdata[i].ad_data); + p += strlen(p); + } - for (i = 0; i < res.rr_num; i++) { - snprintf(p, MSG_MAX-(p-msg), "%lx=%llx ", res.rr_addrdata[i].ad_addr, res.rr_addrdata[i].ad_data); - p += strlen(p); - } + kprintf("%s %s\n", __func__, msg); + } - kprintf("%s %s\n", __func__, msg); - } - - if (pgtrace.option & PGTRACE_OPTION_STACK) { - OSBacktrace(log.stack, PGTRACE_STACK_DEPTH); - } + if (pgtrace.option & PGTRACE_OPTION_STACK) { + OSBacktrace(log.stack, PGTRACE_STACK_DEPTH); + } - pgtrace.bytes += sizeof(log); + pgtrace.bytes += sizeof(log); - simple_lock(&pgtrace.loglock); + simple_lock(&pgtrace.loglock); - pgtrace.logs[RBUF_IDX(pgtrace.wridx, pgtrace.size-1)] = log; + pgtrace.logs[RBUF_IDX(pgtrace.wridx, pgtrace.size - 1)] = log; - // Advance rdidx if ring is full - if (RBUF_IDX(pgtrace.wridx, pgtrace.size-1) == RBUF_IDX(pgtrace.rdidx, pgtrace.size-1) && - (pgtrace.wridx != pgtrace.rdidx)) { - pgtrace.rdidx++; - } - pgtrace.wridx++; + // Advance rdidx if ring is full + if (RBUF_IDX(pgtrace.wridx, pgtrace.size - 1) == RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1) && + (pgtrace.wridx != pgtrace.rdidx)) { + pgtrace.rdidx++; + } + pgtrace.wridx++; - // Signal if ring was empty - if (pgtrace.wridx == (pgtrace.rdidx + 1)) { - thread_wakeup(pgtrace.logs); - } + // Signal if ring was empty + if (pgtrace.wridx == (pgtrace.rdidx + 1)) { + thread_wakeup(pgtrace.logs); + } - simple_unlock(&pgtrace.loglock); + simple_unlock(&pgtrace.loglock); - return; + return; } // pgtrace_read_log() is in user thread -int64_t pgtrace_read_log(uint8_t *buf, uint32_t size) +int64_t +pgtrace_read_log(uint8_t *buf, uint32_t size) { - int total, front, back; - boolean_t ints; - wait_result_t wr; + int total, front, back; + boolean_t ints; + wait_result_t wr; - if (pgtrace.enabled == FALSE) { - return -EINVAL; - } + if (pgtrace.enabled == FALSE) { + return -EINVAL; + } - total = size / sizeof(log_t); + total = size / sizeof(log_t); - // Check if buf is too small - if (buf && total == 0) { - return -EINVAL; - } + // Check if buf is too small + if (buf && total == 0) { + return -EINVAL; + } - ints = ml_set_interrupts_enabled(FALSE); - simple_lock(&pgtrace.loglock); + ints = ml_set_interrupts_enabled(FALSE); + simple_lock(&pgtrace.loglock); - // Wait if ring is empty - if (pgtrace.rdidx == pgtrace.wridx) { - assert_wait(pgtrace.logs, THREAD_ABORTSAFE); + // Wait if ring is empty + if (pgtrace.rdidx == pgtrace.wridx) { + assert_wait(pgtrace.logs, THREAD_ABORTSAFE); - simple_unlock(&pgtrace.loglock); - ml_set_interrupts_enabled(ints); + simple_unlock(&pgtrace.loglock); + ml_set_interrupts_enabled(ints); - wr = thread_block(NULL); - if (wr != THREAD_AWAKENED) { - return -EINTR; - } + wr = thread_block(NULL); + if (wr != THREAD_AWAKENED) { + return -EINTR; + } - ints = ml_set_interrupts_enabled(FALSE); - simple_lock(&pgtrace.loglock); - } + ints = ml_set_interrupts_enabled(FALSE); + simple_lock(&pgtrace.loglock); + } - // Trim the size - if ((pgtrace.rdidx + total) > pgtrace.wridx) { - total = (int)(pgtrace.wridx - pgtrace.rdidx); - } + // Trim the size + if ((pgtrace.rdidx + total) > pgtrace.wridx) { + total = (int)(pgtrace.wridx - pgtrace.rdidx); + } - // Copy front - if ((RBUF_IDX(pgtrace.rdidx, pgtrace.size-1) + total) >= pgtrace.size) { - front = pgtrace.size - RBUF_IDX(pgtrace.rdidx, pgtrace.size-1); - } else { - front = total; - } + // Copy front + if ((RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1) + total) >= pgtrace.size) { + front = pgtrace.size - RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1); + } else { + front = total; + } - memcpy(buf, &(pgtrace.logs[RBUF_IDX(pgtrace.rdidx, pgtrace.size-1)]), front*sizeof(log_t)); + memcpy(buf, &(pgtrace.logs[RBUF_IDX(pgtrace.rdidx, pgtrace.size - 1)]), front * sizeof(log_t)); - // Copy back if any - back = total-front; - if (back) { - buf += front * sizeof(log_t); - memcpy(buf, pgtrace.logs, back*sizeof(log_t)); - } + // Copy back if any + back = total - front; + if (back) { + buf += front * sizeof(log_t); + memcpy(buf, pgtrace.logs, back * sizeof(log_t)); + } - pgtrace.rdidx += total; + pgtrace.rdidx += total; - simple_unlock(&pgtrace.loglock); - ml_set_interrupts_enabled(ints); + simple_unlock(&pgtrace.loglock); + ml_set_interrupts_enabled(ints); - return total*sizeof(log_t); + return total * sizeof(log_t); } -int pgtrace_get_stats(pgtrace_stats_t *stats) +int +pgtrace_get_stats(pgtrace_stats_t *stats) { - if (!stats) { - return -1; - } + if (!stats) { + return -1; + } - stats->stat_logger.sl_bytes = pgtrace.bytes; - pgtrace_decoder_get_stats(stats); + stats->stat_logger.sl_bytes = pgtrace.bytes; + pgtrace_decoder_get_stats(stats); - return 0; + return 0; } #else // CONFIG_PGTRACE_NONKEXT static struct { - bool active; - decoder_t *decoder; - logger_t *logger; - queue_head_t probes; - - lck_grp_t *lock_grp; - lck_grp_attr_t *lock_grp_attr; - lck_attr_t *lock_attr; - lck_mtx_t probelock; + bool active; + decoder_t *decoder; + logger_t *logger; + queue_head_t probes; + + lck_grp_t *lock_grp; + lck_grp_attr_t *lock_grp_attr; + lck_attr_t *lock_attr; + lck_mtx_t probelock; } pgtrace = {}; //------------------------------------ @@ -410,38 +424,40 @@ static struct { // - pgtrace_decode_and_run // - pgtrace_write_log //------------------------------------ -int pgtrace_decode_and_run(uint32_t inst, vm_offset_t fva, vm_map_offset_t *cva_page, arm_saved_state_t *ss, pgtrace_run_result_t *res) +int +pgtrace_decode_and_run(uint32_t inst, vm_offset_t fva, vm_map_offset_t *cva_page, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - vm_offset_t pa, cva; - pgtrace_instruction_info_t info; - vm_offset_t cva_front_page = cva_page[0]; - vm_offset_t cva_cur_page = cva_page[1]; - - pgtrace.decoder->decode(inst, ss, &info); - - if (info.addr == fva) { - cva = cva_cur_page + (fva & ARM_PGMASK); - } else { - // which means a front page is not a tracing page - cva = cva_front_page + (fva & ARM_PGMASK); - } - - pa = mmu_kvtop(cva); - if (!pa) { - panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__, cva, fva, info.addr, inst); - } - - absolutetime_to_nanoseconds(mach_absolute_time(), &res->rr_time); - - pgtrace.decoder->run(inst, pa, cva, ss, res); - - return 0; + vm_offset_t pa, cva; + pgtrace_instruction_info_t info; + vm_offset_t cva_front_page = cva_page[0]; + vm_offset_t cva_cur_page = cva_page[1]; + + pgtrace.decoder->decode(inst, ss, &info); + + if (info.addr == fva) { + cva = cva_cur_page + (fva & ARM_PGMASK); + } else { + // which means a front page is not a tracing page + cva = cva_front_page + (fva & ARM_PGMASK); + } + + pa = mmu_kvtop(cva); + if (!pa) { + panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__, cva, fva, info.addr, inst); + } + + absolutetime_to_nanoseconds(mach_absolute_time(), &res->rr_time); + + pgtrace.decoder->run(inst, pa, cva, ss, res); + + return 0; } -int pgtrace_write_log(pgtrace_run_result_t res) +int +pgtrace_write_log(pgtrace_run_result_t res) { - pgtrace.logger->write(res); - return 0; + pgtrace.logger->write(res); + return 0; } //------------------------------------ @@ -453,128 +469,134 @@ int pgtrace_write_log(pgtrace_run_result_t res) // - pgtrace_stop // - pgtrace_active //------------------------------------ -int pgtrace_init(decoder_t *decoder, logger_t *logger) +int +pgtrace_init(decoder_t *decoder, logger_t *logger) { - kprintf("%s decoder=%p logger=%p\n", __func__, decoder, logger); + kprintf("%s decoder=%p logger=%p\n", __func__, decoder, logger); - assert(decoder && logger); + assert(decoder && logger); - if (decoder->magic != 0xfeedface || logger->magic != 0xfeedface || - strcmp(decoder->arch, "arm64") != 0 || strcmp(logger->arch, "arm64") != 0) { - kprintf("%s:wrong decoder/logger magic=%llx/%llx arch=%s/%s", __func__, decoder->magic, logger->magic, decoder->arch, logger->arch); - return EINVAL; - } + if (decoder->magic != 0xfeedface || logger->magic != 0xfeedface || + strcmp(decoder->arch, "arm64") != 0 || strcmp(logger->arch, "arm64") != 0) { + kprintf("%s:wrong decoder/logger magic=%llx/%llx arch=%s/%s", __func__, decoder->magic, logger->magic, decoder->arch, logger->arch); + return EINVAL; + } - pgtrace.lock_attr = lck_attr_alloc_init(); - pgtrace.lock_grp_attr = lck_grp_attr_alloc_init(); - pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr); + pgtrace.lock_attr = lck_attr_alloc_init(); + pgtrace.lock_grp_attr = lck_grp_attr_alloc_init(); + pgtrace.lock_grp = lck_grp_alloc_init("pgtrace_lock", pgtrace.lock_grp_attr); - lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr); + lck_mtx_init(&pgtrace.probelock, pgtrace.lock_grp, pgtrace.lock_attr); - queue_init(&pgtrace.probes); - pgtrace.decoder = decoder; - pgtrace.logger = logger; + queue_init(&pgtrace.probes); + pgtrace.decoder = decoder; + pgtrace.logger = logger; - return 0; + return 0; } - -int pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end) + +int +pgtrace_add_probe(thread_t thread, vm_offset_t start, vm_offset_t end) { - probe_t *p; - queue_head_t *q = &pgtrace.probes; - - kprintf("%s start=%lx end=%lx\n", __func__, start, end); - - if (start > end) { - kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end); - return -1; - } - - p = kalloc(sizeof(probe_t)); - p->start = start; - p->end = end; - if (thread == NULL) { - p->pmap = NULL; - } else { - p->pmap = vm_map_pmap(thread->map); - } - - lck_mtx_lock(&pgtrace.probelock); - queue_enter(q, p, probe_t *, chain); - lck_mtx_unlock(&pgtrace.probelock); - - return 0; + probe_t *p; + queue_head_t *q = &pgtrace.probes; + + kprintf("%s start=%lx end=%lx\n", __func__, start, end); + + if (start > end) { + kprintf("%s Invalid start=%lx end=%lx\n", __func__, start, end); + return -1; + } + + p = kalloc(sizeof(probe_t)); + p->start = start; + p->end = end; + if (thread == NULL) { + p->pmap = NULL; + } else { + p->pmap = vm_map_pmap(thread->map); + } + + lck_mtx_lock(&pgtrace.probelock); + queue_enter(q, p, probe_t *, chain); + lck_mtx_unlock(&pgtrace.probelock); + + return 0; } -void pgtrace_clear_probe(void) +void +pgtrace_clear_probe(void) { - probe_t *p, *next; - queue_head_t *q = &pgtrace.probes; + probe_t *p, *next; + queue_head_t *q = &pgtrace.probes; - kprintf("%s\n", __func__); + kprintf("%s\n", __func__); - lck_mtx_lock(&pgtrace.probelock); + lck_mtx_lock(&pgtrace.probelock); - p = (probe_t *)queue_first(q); - while (!queue_end(q, (queue_entry_t)p)) { - next = (probe_t *)queue_next(&(p->chain)); + p = (probe_t *)queue_first(q); + while (!queue_end(q, (queue_entry_t)p)) { + next = (probe_t *)queue_next(&(p->chain)); - queue_remove(q, p, probe_t *, chain); - kfree(p, sizeof(probe_t)); + queue_remove(q, p, probe_t *, chain); + kfree(p, sizeof(probe_t)); - p = next; - } + p = next; + } - lck_mtx_unlock(&pgtrace.probelock); + lck_mtx_unlock(&pgtrace.probelock); - return; + return; } -void pgtrace_start(void) +void +pgtrace_start(void) { - probe_t *p; - queue_head_t *q = &pgtrace.probes; + probe_t *p; + queue_head_t *q = &pgtrace.probes; - kprintf("%s\n", __func__); + kprintf("%s\n", __func__); - if (pgtrace.active == true) { - return; - } + if (pgtrace.active == true) { + return; + } - pgtrace.active = true; + pgtrace.active = true; - lck_mtx_lock(&pgtrace.probelock); + lck_mtx_lock(&pgtrace.probelock); - queue_iterate(q, p, probe_t *, chain) { - pmap_pgtrace_add_page(p->pmap, p->start, p->end); - } + queue_iterate(q, p, probe_t *, chain) { + pmap_pgtrace_add_page(p->pmap, p->start, p->end); + } - lck_mtx_unlock(&pgtrace.probelock); + lck_mtx_unlock(&pgtrace.probelock); - return; + return; } -void pgtrace_stop(void) +void +pgtrace_stop(void) { - probe_t *p; - queue_head_t *q = &pgtrace.probes; + probe_t *p; + queue_head_t *q = &pgtrace.probes; - kprintf("%s\n", __func__); + kprintf("%s\n", __func__); - lck_mtx_lock(&pgtrace.probelock); + lck_mtx_lock(&pgtrace.probelock); - queue_iterate(q, p, probe_t *, chain) { - pmap_pgtrace_delete_page(p->pmap, p->start, p->end); - } + queue_iterate(q, p, probe_t *, chain) { + pmap_pgtrace_delete_page(p->pmap, p->start, p->end); + } - lck_mtx_unlock(&pgtrace.probelock); + lck_mtx_unlock(&pgtrace.probelock); - pgtrace.active = false; + pgtrace.active = false; } -bool pgtrace_active(void) +bool +pgtrace_active(void) { - return pgtrace.active; + return pgtrace.active; } #endif // CONFIG_PGTRACE_NONKEXT #else @@ -585,10 +607,28 @@ extern void pgtrace_clear_probe(void); extern void pgtrace_add_probe(void); extern void pgtrace_init(void); extern void pgtrace_active(void); -void pgtrace_stop(void) {} -void pgtrace_start(void) {} -void pgtrace_clear_probe(void) {} -void pgtrace_add_probe(void) {} -void pgtrace_init(void) {} -void pgtrace_active(void) {} +void +pgtrace_stop(void) +{ +} +void +pgtrace_start(void) +{ +} +void +pgtrace_clear_probe(void) +{ +} +void +pgtrace_add_probe(void) +{ +} +void +pgtrace_init(void) +{ +} +void +pgtrace_active(void) +{ +} #endif diff --git a/osfmk/arm64/pgtrace.h b/osfmk/arm64/pgtrace.h index bbee25de9..06c2ca198 100644 --- a/osfmk/arm64/pgtrace.h +++ b/osfmk/arm64/pgtrace.h @@ -37,21 +37,21 @@ #define PGTRACE_STACK_DEPTH 8 typedef enum { - PGTRACE_RW_LOAD, - PGTRACE_RW_STORE, - PGTRACE_RW_PREFETCH + PGTRACE_RW_LOAD, + PGTRACE_RW_STORE, + PGTRACE_RW_PREFETCH } pgtrace_rw_t; typedef struct { - vm_offset_t ad_addr; - uint64_t ad_data; + vm_offset_t ad_addr; + uint64_t ad_data; } pgtrace_addr_data_t; typedef struct { - uint64_t rr_time; - pgtrace_rw_t rr_rw; - uint8_t rr_num; - pgtrace_addr_data_t rr_addrdata[RR_NUM_MAX]; + uint64_t rr_time; + pgtrace_rw_t rr_rw; + uint8_t rr_num; + pgtrace_addr_data_t rr_addrdata[RR_NUM_MAX]; } pgtrace_run_result_t; #ifdef CONFIG_PGTRACE_NONKEXT @@ -61,35 +61,35 @@ typedef struct { #define PGTRACE_OPTION_SPIN 0x4 typedef struct { - struct { - uint32_t sl_bytes; - } stat_logger; - - struct { - uint64_t sd_ldr; - uint64_t sd_str; - uint64_t sd_ldrs; - uint64_t sd_ldtr; - uint64_t sd_sttr; - uint64_t sd_ldtrs; - uint64_t sd_ldp; - uint64_t sd_stp; - uint64_t sd_ldpsw; - uint64_t sd_prfm; - - uint64_t sd_c335; - uint64_t sd_c336; - uint64_t sd_c337; - uint64_t sd_c338; - uint64_t sd_c339; - uint64_t sd_c3310; - uint64_t sd_c3311; - uint64_t sd_c3312; - uint64_t sd_c3313; - uint64_t sd_c3314; - uint64_t sd_c3315; - uint64_t sd_c3316; - } stat_decoder; + struct { + uint32_t sl_bytes; + } stat_logger; + + struct { + uint64_t sd_ldr; + uint64_t sd_str; + uint64_t sd_ldrs; + uint64_t sd_ldtr; + uint64_t sd_sttr; + uint64_t sd_ldtrs; + uint64_t sd_ldp; + uint64_t sd_stp; + uint64_t sd_ldpsw; + uint64_t sd_prfm; + + uint64_t sd_c335; + uint64_t sd_c336; + uint64_t sd_c337; + uint64_t sd_c338; + uint64_t sd_c339; + uint64_t sd_c3310; + uint64_t sd_c3311; + uint64_t sd_c3312; + uint64_t sd_c3313; + uint64_t sd_c3314; + uint64_t sd_c3315; + uint64_t sd_c3316; + } stat_decoder; } pgtrace_stats_t; void pgtrace_init(void); @@ -112,14 +112,14 @@ int pgtrace_get_stats(pgtrace_stats_t *stats); extern "C" { #endif typedef struct { - vm_offset_t addr; - uint64_t bytes; + vm_offset_t addr; + uint64_t bytes; } pgtrace_instruction_info_t; -typedef struct { - uint64_t id; - pgtrace_run_result_t res; - void *stack[PGTRACE_STACK_DEPTH]; +typedef struct { + uint64_t id; + pgtrace_run_result_t res; + void *stack[PGTRACE_STACK_DEPTH]; } log_t; typedef int (*run_func_t)(uint32_t inst, vm_offset_t pa, vm_offset_t va, void *ss, pgtrace_run_result_t *res); @@ -127,18 +127,18 @@ typedef bool (*decode_func_t)(uint32_t inst, void *ss, pgtrace_instruction_info_ typedef void (*write_func_t)(pgtrace_run_result_t res); typedef struct { - uint64_t magic; - char *arch; - char *desc; - decode_func_t decode; - run_func_t run; + uint64_t magic; + char *arch; + char *desc; + decode_func_t decode; + run_func_t run; } decoder_t; typedef struct { - uint64_t magic; - char *arch; - char *desc; - write_func_t write; + uint64_t magic; + char *arch; + char *desc; + write_func_t write; } logger_t; //------------------------------------ diff --git a/osfmk/arm64/pgtrace_decoder.c b/osfmk/arm64/pgtrace_decoder.c index 98471cbd6..9a39a4454 100644 --- a/osfmk/arm64/pgtrace_decoder.c +++ b/osfmk/arm64/pgtrace_decoder.c @@ -41,7 +41,7 @@ #define INLINE __attribute__((noinline)) #else #define INLINE inline -#endif +#endif #define BITS(v, msb, lsb) ((v) << (31-msb) >> (31-msb) >> (lsb)) #define READ_GPR_X(ss, n, v) { \ @@ -73,17 +73,17 @@ typedef int (*run_t)(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res); typedef struct { - vm_offset_t addr; - uint64_t bytes; + vm_offset_t addr; + uint64_t bytes; } instruction_info_t; typedef bool (*get_info_t)(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info); typedef struct { - uint32_t mask; - uint32_t value; - run_t run; - get_info_t get_info; + uint32_t mask; + uint32_t value; + run_t run; + get_info_t get_info; } type_entry_t; //------------------------------------------------------------------- @@ -118,1434 +118,1677 @@ static bool get_info_c3316(uint32_t inst, arm_saved_state_t *ss, instruction_inf // Table from ARM DDI 0487A.a C3.3 static type_entry_t typetbl[] = { - { 0x3f000000, 0x08000000, run_c336, get_info_c336 }, // Load/store exclusive - { 0x3b000000, 0x18000000, run_c335, get_info_c335 }, // Load register (literal) - { 0x3b800000, 0x28000000, run_c337, get_info_c337 }, // Load/store no-allocate pair (offset) - { 0x3b800000, 0x28800000, run_c3315, get_info_c3315 }, // Load/store register pair (post-indexed) - { 0x3b800000, 0x29000000, run_c3314, get_info_c3314 }, // Load/store register pair (offset) - { 0x3b800000, 0x29800000, run_c3316, get_info_c3316 }, // Load/store register pair (pre-indexed) - { 0x3b200c00, 0x38000000, run_c3312, get_info_c3312 }, // Load/store register (unscaled immediate) - { 0x3b200c00, 0x38000400, run_c338, get_info_c338 }, // Load/store register (immediate post-indexed) - { 0x3b200c00, 0x38000800, run_c3311, get_info_c3311 }, // Load/store register (unprivileged) - { 0x3b200c00, 0x38000c00, run_c339, get_info_c339 }, // Load/store register (immediate pre-indexed) - { 0x3b200c00, 0x38200800, run_c3310, get_info_c3310 }, // Load/store register (register offset) - { 0x3b000000, 0x39000000, run_c3313, get_info_c3313 }, // Load/store register (unsigned immediate) - - { 0xbfbf0000, 0x0c000000, run_simd, get_info_simd }, // AdvSIMD load/store multiple structures - { 0xbfa00000, 0x0c800000, run_simd, get_info_simd }, // AdvSIMD load/store multiple structures (post-indexed) - { 0xbf980000, 0x0d000000, run_simd, get_info_simd }, // AdvSIMD load/store single structure - { 0xbf800000, 0x0d800000, run_simd, get_info_simd } // AdvSIMD load/store single structure (post-indexed) + { 0x3f000000, 0x08000000, run_c336, get_info_c336 }, // Load/store exclusive + { 0x3b000000, 0x18000000, run_c335, get_info_c335 }, // Load register (literal) + { 0x3b800000, 0x28000000, run_c337, get_info_c337 }, // Load/store no-allocate pair (offset) + { 0x3b800000, 0x28800000, run_c3315, get_info_c3315 }, // Load/store register pair (post-indexed) + { 0x3b800000, 0x29000000, run_c3314, get_info_c3314 }, // Load/store register pair (offset) + { 0x3b800000, 0x29800000, run_c3316, get_info_c3316 }, // Load/store register pair (pre-indexed) + { 0x3b200c00, 0x38000000, run_c3312, get_info_c3312 }, // Load/store register (unscaled immediate) + { 0x3b200c00, 0x38000400, run_c338, get_info_c338 }, // Load/store register (immediate post-indexed) + { 0x3b200c00, 0x38000800, run_c3311, get_info_c3311 }, // Load/store register (unprivileged) + { 0x3b200c00, 0x38000c00, run_c339, get_info_c339 }, // Load/store register (immediate pre-indexed) + { 0x3b200c00, 0x38200800, run_c3310, get_info_c3310 }, // Load/store register (register offset) + { 0x3b000000, 0x39000000, run_c3313, get_info_c3313 }, // Load/store register (unsigned immediate) + + { 0xbfbf0000, 0x0c000000, run_simd, get_info_simd }, // AdvSIMD load/store multiple structures + { 0xbfa00000, 0x0c800000, run_simd, get_info_simd }, // AdvSIMD load/store multiple structures (post-indexed) + { 0xbf980000, 0x0d000000, run_simd, get_info_simd }, // AdvSIMD load/store single structure + { 0xbf800000, 0x0d800000, run_simd, get_info_simd } // AdvSIMD load/store single structure (post-indexed) }; static pgtrace_stats_t stats; -INLINE static void do_str(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_str(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt; - uint64_t xt; - - res->rr_rw = PGTRACE_RW_STORE; - - if (size == 8) { - READ_GPR_X(ss, Rt, xt); - res->rr_addrdata[0].ad_data = xt; - } else { - READ_GPR_W(ss, Rt, wt); - res->rr_addrdata[0].ad_data = wt; - } - - if (size == 1) __asm__ volatile("strb %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); - else if (size == 2) __asm__ volatile("strh %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); - else if (size == 4) __asm__ volatile("str %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); - else if (size == 8) __asm__ volatile("str %x[xt], [%[va]]\n" :: [xt] "r"(xt), [va] "r"(va)); - else panic("%s Invalid size %d\n", __func__, size); - - stats.stat_decoder.sd_str++; + uint32_t wt; + uint64_t xt; + + res->rr_rw = PGTRACE_RW_STORE; + + if (size == 8) { + READ_GPR_X(ss, Rt, xt); + res->rr_addrdata[0].ad_data = xt; + } else { + READ_GPR_W(ss, Rt, wt); + res->rr_addrdata[0].ad_data = wt; + } + + if (size == 1) { + __asm__ volatile ("strb %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); + } else if (size == 2) { + __asm__ volatile ("strh %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); + } else if (size == 4) { + __asm__ volatile ("str %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); + } else if (size == 8) { + __asm__ volatile ("str %x[xt], [%[va]]\n" :: [xt] "r"(xt), [va] "r"(va)); + } else { + panic("%s Invalid size %d\n", __func__, size); + } + + stats.stat_decoder.sd_str++; } -INLINE static void do_ldr(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_ldr(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt; - uint64_t xt; - - res->rr_rw = PGTRACE_RW_LOAD; - - if (size == 1) __asm__ volatile("ldrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 2) __asm__ volatile("ldrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 4) __asm__ volatile("ldr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 8) __asm__ volatile("ldr %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else panic("%s Invalid size %d\n", __func__, size); - - if (size == 8) { - WRITE_GPR_X(ss, Rt, xt); - res->rr_addrdata[0].ad_data = xt; - } else { - WRITE_GPR_W(ss, Rt, wt); - res->rr_addrdata[0].ad_data = wt; - } - - stats.stat_decoder.sd_ldr++; + uint32_t wt; + uint64_t xt; + + res->rr_rw = PGTRACE_RW_LOAD; + + if (size == 1) { + __asm__ volatile ("ldrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 2) { + __asm__ volatile ("ldrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 4) { + __asm__ volatile ("ldr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 8) { + __asm__ volatile ("ldr %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else { + panic("%s Invalid size %d\n", __func__, size); + } + + if (size == 8) { + WRITE_GPR_X(ss, Rt, xt); + res->rr_addrdata[0].ad_data = xt; + } else { + WRITE_GPR_W(ss, Rt, wt); + res->rr_addrdata[0].ad_data = wt; + } + + stats.stat_decoder.sd_ldr++; } -INLINE static void do_stp(uint8_t size, uint8_t Rt, uint8_t Rt2, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_stp(uint8_t size, uint8_t Rt, uint8_t Rt2, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt1, wt2; - uint64_t xt1, xt2; - - if (size == 4) { - READ_GPR_W(ss, Rt, wt1); - READ_GPR_W(ss, Rt2, wt2); - __asm__ volatile("stp %w[wt1], %w[wt2], [%[va]]\n" :: [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[1].ad_addr = va+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - } else if (size == 8) { - READ_GPR_X(ss, Rt, xt1); - READ_GPR_X(ss, Rt2, xt2); - __asm__ volatile("stp %x[xt1], %x[xt2], [%[va]]\n" :: [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[1].ad_addr = va+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; - } else panic("%s Invalid size %d\n", __func__, size); - - stats.stat_decoder.sd_stp++; + uint32_t wt1, wt2; + uint64_t xt1, xt2; + + if (size == 4) { + READ_GPR_W(ss, Rt, wt1); + READ_GPR_W(ss, Rt2, wt2); + __asm__ volatile ("stp %w[wt1], %w[wt2], [%[va]]\n" :: [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[1].ad_addr = va + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + } else if (size == 8) { + READ_GPR_X(ss, Rt, xt1); + READ_GPR_X(ss, Rt2, xt2); + __asm__ volatile ("stp %x[xt1], %x[xt2], [%[va]]\n" :: [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[1].ad_addr = va + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; + } else { + panic("%s Invalid size %d\n", __func__, size); + } + + stats.stat_decoder.sd_stp++; } -INLINE static void do_ldp(uint8_t size, uint8_t Rt, uint8_t Rt2, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_ldp(uint8_t size, uint8_t Rt, uint8_t Rt2, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt1, wt2; - uint64_t xt1, xt2; - - if (size == 4) { - __asm__ volatile("ldp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt1); - WRITE_GPR_W(ss, Rt2, wt2); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[1].ad_addr = va+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - } else if (size == 8) { - __asm__ volatile("ldp %x[xt1], %x[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt1); - WRITE_GPR_X(ss, Rt2, xt2); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[1].ad_addr = va+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; - } else panic("%s Invalid size %d\n", __func__, size); - - stats.stat_decoder.sd_ldp++; + uint32_t wt1, wt2; + uint64_t xt1, xt2; + + if (size == 4) { + __asm__ volatile ("ldp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt1); + WRITE_GPR_W(ss, Rt2, wt2); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[1].ad_addr = va + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + } else if (size == 8) { + __asm__ volatile ("ldp %x[xt1], %x[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt1); + WRITE_GPR_X(ss, Rt2, xt2); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[1].ad_addr = va + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; + } else { + panic("%s Invalid size %d\n", __func__, size); + } + + stats.stat_decoder.sd_ldp++; } -INLINE static void do_ldpsw(uint8_t Rt, uint8_t Rt2, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_ldpsw(uint8_t Rt, uint8_t Rt2, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint64_t xt1, xt2; + uint64_t xt1, xt2; - __asm__ volatile("ldpsw %x[xt1], %x[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt1); - WRITE_GPR_X(ss, Rt2, xt2); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[1].ad_addr = va+sizeof(uint32_t); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; + __asm__ volatile ("ldpsw %x[xt1], %x[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt1); + WRITE_GPR_X(ss, Rt2, xt2); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[1].ad_addr = va + sizeof(uint32_t); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; - stats.stat_decoder.sd_ldpsw++; + stats.stat_decoder.sd_ldpsw++; } -INLINE static void do_ldrs(uint8_t size, uint8_t extsize, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_ldrs(uint8_t size, uint8_t extsize, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt; - uint64_t xt; - - res->rr_rw = PGTRACE_RW_LOAD; - - if (size == 1 && extsize == 4) __asm__ volatile("ldrsb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 1 && extsize == 8) __asm__ volatile("ldrsb %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else if (size == 2 && extsize == 4) __asm__ volatile("ldrsh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 2 && extsize == 8) __asm__ volatile("ldrsh %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else if (size == 4 && extsize == 8) __asm__ volatile("ldrsw %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else panic("%s Invalid size %d extsize=%d\n", __func__, size, extsize); - - if (extsize == 8) { - WRITE_GPR_X(ss, Rt, xt); - res->rr_addrdata[0].ad_data = xt; - } else { - WRITE_GPR_W(ss, Rt, wt); - res->rr_addrdata[0].ad_data = wt; - } - - stats.stat_decoder.sd_ldrs++; + uint32_t wt; + uint64_t xt; + + res->rr_rw = PGTRACE_RW_LOAD; + + if (size == 1 && extsize == 4) { + __asm__ volatile ("ldrsb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 1 && extsize == 8) { + __asm__ volatile ("ldrsb %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else if (size == 2 && extsize == 4) { + __asm__ volatile ("ldrsh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 2 && extsize == 8) { + __asm__ volatile ("ldrsh %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else if (size == 4 && extsize == 8) { + __asm__ volatile ("ldrsw %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else { + panic("%s Invalid size %d extsize=%d\n", __func__, size, extsize); + } + + if (extsize == 8) { + WRITE_GPR_X(ss, Rt, xt); + res->rr_addrdata[0].ad_data = xt; + } else { + WRITE_GPR_W(ss, Rt, wt); + res->rr_addrdata[0].ad_data = wt; + } + + stats.stat_decoder.sd_ldrs++; } -INLINE static void do_ldtrs(uint8_t size, uint8_t extsize, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_ldtrs(uint8_t size, uint8_t extsize, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt; - uint64_t xt; - - res->rr_rw = PGTRACE_RW_LOAD; - - if (size == 1 && extsize == 4) __asm__ volatile("ldtrsb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 1 && extsize == 8) __asm__ volatile("ldtrsb %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else if (size == 2 && extsize == 4) __asm__ volatile("ldtrsh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 2 && extsize == 8) __asm__ volatile("ldtrsh %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else if (size == 4 && extsize == 8) __asm__ volatile("ldtrsw %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else panic("%s Invalid size %d extsize=%d\n", __func__, size, extsize); - - if (extsize == 8) { - WRITE_GPR_X(ss, Rt, xt); - res->rr_addrdata[0].ad_data = xt; - } else { - WRITE_GPR_W(ss, Rt, wt); - res->rr_addrdata[0].ad_data = wt; - } - - stats.stat_decoder.sd_ldtrs++; + uint32_t wt; + uint64_t xt; + + res->rr_rw = PGTRACE_RW_LOAD; + + if (size == 1 && extsize == 4) { + __asm__ volatile ("ldtrsb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 1 && extsize == 8) { + __asm__ volatile ("ldtrsb %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else if (size == 2 && extsize == 4) { + __asm__ volatile ("ldtrsh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 2 && extsize == 8) { + __asm__ volatile ("ldtrsh %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else if (size == 4 && extsize == 8) { + __asm__ volatile ("ldtrsw %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else { + panic("%s Invalid size %d extsize=%d\n", __func__, size, extsize); + } + + if (extsize == 8) { + WRITE_GPR_X(ss, Rt, xt); + res->rr_addrdata[0].ad_data = xt; + } else { + WRITE_GPR_W(ss, Rt, wt); + res->rr_addrdata[0].ad_data = wt; + } + + stats.stat_decoder.sd_ldtrs++; } -INLINE static void do_ldtr(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_ldtr(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt; - uint64_t xt; - - res->rr_rw = PGTRACE_RW_LOAD; - - if (size == 1) __asm__ volatile("ldtrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 2) __asm__ volatile("ldtrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 4) __asm__ volatile("ldtr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - else if (size == 8) __asm__ volatile("ldtr %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - else panic("%s Invalid size %d\n", __func__, size); - - if (size == 8) { - WRITE_GPR_X(ss, Rt, xt); - res->rr_addrdata[0].ad_data = xt; - } else { - WRITE_GPR_W(ss, Rt, wt); - res->rr_addrdata[0].ad_data = wt; - } - - stats.stat_decoder.sd_ldtr++; + uint32_t wt; + uint64_t xt; + + res->rr_rw = PGTRACE_RW_LOAD; + + if (size == 1) { + __asm__ volatile ("ldtrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 2) { + __asm__ volatile ("ldtrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 4) { + __asm__ volatile ("ldtr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + } else if (size == 8) { + __asm__ volatile ("ldtr %x[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + } else { + panic("%s Invalid size %d\n", __func__, size); + } + + if (size == 8) { + WRITE_GPR_X(ss, Rt, xt); + res->rr_addrdata[0].ad_data = xt; + } else { + WRITE_GPR_W(ss, Rt, wt); + res->rr_addrdata[0].ad_data = wt; + } + + stats.stat_decoder.sd_ldtr++; } -INLINE static void do_sttr(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +INLINE static void +do_sttr(uint8_t size, uint8_t Rt, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt; - uint64_t xt; - - res->rr_rw = PGTRACE_RW_STORE; - - if (size == 8) { - READ_GPR_X(ss, Rt, xt); - res->rr_addrdata[0].ad_data = xt; - } else { - READ_GPR_W(ss, Rt, wt); - res->rr_addrdata[0].ad_data = wt; - } - - if (size == 1) __asm__ volatile("sttrb %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); - else if (size == 2) __asm__ volatile("sttrh %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); - else if (size == 4) __asm__ volatile("sttr %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); - else if (size == 8) __asm__ volatile("sttr %x[xt], [%[va]]\n" :: [xt] "r"(xt), [va] "r"(va)); - else panic("%s Invalid size %d\n", __func__, size); - - stats.stat_decoder.sd_sttr++; + uint32_t wt; + uint64_t xt; + + res->rr_rw = PGTRACE_RW_STORE; + + if (size == 8) { + READ_GPR_X(ss, Rt, xt); + res->rr_addrdata[0].ad_data = xt; + } else { + READ_GPR_W(ss, Rt, wt); + res->rr_addrdata[0].ad_data = wt; + } + + if (size == 1) { + __asm__ volatile ("sttrb %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); + } else if (size == 2) { + __asm__ volatile ("sttrh %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); + } else if (size == 4) { + __asm__ volatile ("sttr %w[wt], [%[va]]\n" :: [wt] "r"(wt), [va] "r"(va)); + } else if (size == 8) { + __asm__ volatile ("sttr %x[xt], [%[va]]\n" :: [xt] "r"(xt), [va] "r"(va)); + } else { + panic("%s Invalid size %d\n", __func__, size); + } + + stats.stat_decoder.sd_sttr++; } -INLINE static void do_prfm(uint8_t Rt, vm_offset_t va, pgtrace_run_result_t *res) +INLINE static void +do_prfm(uint8_t Rt, vm_offset_t va, pgtrace_run_result_t *res) { - if (Rt == 0) __asm__ volatile("prfm pldl1keep, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 1) __asm__ volatile("prfm pldl1strm, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 2) __asm__ volatile("prfm pldl2keep, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 3) __asm__ volatile("prfm pldl2strm, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 4) __asm__ volatile("prfm pldl3keep, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 5) __asm__ volatile("prfm pldl3strm, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 6) __asm__ volatile("prfm #6, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 7) __asm__ volatile("prfm #7, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 8) __asm__ volatile("prfm #8, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 9) __asm__ volatile("prfm #9, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 10) __asm__ volatile("prfm #10, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 11) __asm__ volatile("prfm #11, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 12) __asm__ volatile("prfm #12, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 13) __asm__ volatile("prfm #13, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 14) __asm__ volatile("prfm #14, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 15) __asm__ volatile("prfm #15, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 16) __asm__ volatile("prfm pstl1keep, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 17) __asm__ volatile("prfm pstl1strm, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 18) __asm__ volatile("prfm pstl2keep, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 19) __asm__ volatile("prfm pstl2strm, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 20) __asm__ volatile("prfm pstl3keep, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 21) __asm__ volatile("prfm pstl3strm, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 22) __asm__ volatile("prfm #22, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 23) __asm__ volatile("prfm #23, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 24) __asm__ volatile("prfm #24, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 25) __asm__ volatile("prfm #25, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 26) __asm__ volatile("prfm #26, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 27) __asm__ volatile("prfm #27, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 28) __asm__ volatile("prfm #28, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 29) __asm__ volatile("prfm #29, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 30) __asm__ volatile("prfm #30, [%[va]]\n" : : [va] "r"(va)); - else if (Rt == 31) __asm__ volatile("prfm #31, [%[va]]\n" : : [va] "r"(va)); - else panic("%s Invalid Rt %d\n", __func__, Rt); - - res->rr_num = 0; - res->rr_rw = PGTRACE_RW_PREFETCH; - - stats.stat_decoder.sd_prfm++; + if (Rt == 0) { + __asm__ volatile ("prfm pldl1keep, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 1) { + __asm__ volatile ("prfm pldl1strm, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 2) { + __asm__ volatile ("prfm pldl2keep, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 3) { + __asm__ volatile ("prfm pldl2strm, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 4) { + __asm__ volatile ("prfm pldl3keep, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 5) { + __asm__ volatile ("prfm pldl3strm, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 6) { + __asm__ volatile ("prfm #6, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 7) { + __asm__ volatile ("prfm #7, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 8) { + __asm__ volatile ("prfm #8, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 9) { + __asm__ volatile ("prfm #9, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 10) { + __asm__ volatile ("prfm #10, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 11) { + __asm__ volatile ("prfm #11, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 12) { + __asm__ volatile ("prfm #12, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 13) { + __asm__ volatile ("prfm #13, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 14) { + __asm__ volatile ("prfm #14, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 15) { + __asm__ volatile ("prfm #15, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 16) { + __asm__ volatile ("prfm pstl1keep, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 17) { + __asm__ volatile ("prfm pstl1strm, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 18) { + __asm__ volatile ("prfm pstl2keep, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 19) { + __asm__ volatile ("prfm pstl2strm, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 20) { + __asm__ volatile ("prfm pstl3keep, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 21) { + __asm__ volatile ("prfm pstl3strm, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 22) { + __asm__ volatile ("prfm #22, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 23) { + __asm__ volatile ("prfm #23, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 24) { + __asm__ volatile ("prfm #24, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 25) { + __asm__ volatile ("prfm #25, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 26) { + __asm__ volatile ("prfm #26, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 27) { + __asm__ volatile ("prfm #27, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 28) { + __asm__ volatile ("prfm #28, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 29) { + __asm__ volatile ("prfm #29, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 30) { + __asm__ volatile ("prfm #30, [%[va]]\n" : : [va] "r"(va)); + } else if (Rt == 31) { + __asm__ volatile ("prfm #31, [%[va]]\n" : : [va] "r"(va)); + } else { + panic("%s Invalid Rt %d\n", __func__, Rt); + } + + res->rr_num = 0; + res->rr_rw = PGTRACE_RW_PREFETCH; + + stats.stat_decoder.sd_prfm++; } #define CANNOTDECODE(msg, inst) do {\ panic("%s: " msg " inst=%x not supported yet\n", __func__, inst);\ } while (0) -static int run_simd(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_simd(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { #pragma unused(pa,va,ss,res) - CANNOTDECODE("simd", inst); - return 0; + CANNOTDECODE("simd", inst); + return 0; } -static int run_c335(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c335(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t opc = BITS(inst, 31, 30), - v = BITS(inst, 26, 26), - Rt = BITS(inst, 4, 0); - uint8_t fields = (opc << 1) | v; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_ldr(4, Rt, va, ss, res); - else if ((fields == 1) || - (fields == 3) || - (fields == 5)) CANNOTDECODE("simd", inst); - else if (fields == 2) do_ldr(8, Rt, va, ss, res); - else if (fields == 4) do_ldrs(4, 8, Rt, va, ss, res); - else if (fields == 6) do_prfm(Rt, va, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c335++; - - return 0; + uint32_t opc = BITS(inst, 31, 30), + v = BITS(inst, 26, 26), + Rt = BITS(inst, 4, 0); + uint8_t fields = (opc << 1) | v; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_ldr(4, Rt, va, ss, res); + } else if ((fields == 1) || + (fields == 3) || + (fields == 5)) { + CANNOTDECODE("simd", inst); + } else if (fields == 2) { + do_ldr(8, Rt, va, ss, res); + } else if (fields == 4) { + do_ldrs(4, 8, Rt, va, ss, res); + } else if (fields == 6) { + do_prfm(Rt, va, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c335++; + + return 0; } -static int run_c336(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c336(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t ws, wt, wt1, wt2; - uint64_t xt, xt1, xt2; - uint32_t size = BITS(inst, 31, 30), - o2 = BITS(inst, 23, 23), - L = BITS(inst, 22, 22), - o1 = BITS(inst, 21, 21), - Rs = BITS(inst, 20, 16), - o0 = BITS(inst, 15, 15), - Rt2 = BITS(inst, 14, 10), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 4) | (o2 << 3) | (L << 2) | (o1 << 1) | o0; - - kprintf("%s Load/store exclusive on device memory???n", __func__); - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - switch (fields) { - case 0: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stxrb %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 1: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stlxrb %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 4: - __asm__ volatile("ldxrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 5: - __asm__ volatile("ldaxrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 9: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stlrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0xd: - __asm__ volatile("ldarb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x10: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stxrh %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x11: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stlxrh %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x14: - __asm__ volatile("ldxrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x15: - __asm__ volatile("ldaxrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x19: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stlrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x1d: - __asm__ volatile("ldarh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x20: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stxr %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x21: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stlxr %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x22: - READ_GPR_W(ss, Rt, wt1); - READ_GPR_W(ss, Rt2, wt2); - __asm__ volatile("stxp %w[ws], %w[wt1], %w[wt2], [%[va]]\n" : [ws] "=r"(ws) : [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - break; - case 0x23: - READ_GPR_W(ss, Rt, wt1); - READ_GPR_W(ss, Rt2, wt2); - __asm__ volatile("stlxp %w[ws], %w[wt1], %w[wt2], [%[va]]\n" : [ws] "=r"(ws) : [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - break; - case 0x24: - __asm__ volatile("ldxr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x25: - __asm__ volatile("ldaxr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x26: - __asm__ volatile("ldxp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt1); - WRITE_GPR_W(ss, Rt2, wt2); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - break; - case 0x27: - __asm__ volatile("ldaxp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt1); - WRITE_GPR_W(ss, Rt2, wt2); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - break; - case 0x29: - READ_GPR_W(ss, Rt, wt); - __asm__ volatile("stlr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x2d: - __asm__ volatile("ldar %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = wt; - break; - case 0x30: - READ_GPR_X(ss, Rt, xt); - __asm__ volatile("stxr %w[ws], %[xt], [%[va]]\n" : [ws] "=r"(ws) : [xt] "r"(xt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = xt; - break; - case 0x31: - READ_GPR_X(ss, Rt, xt); - __asm__ volatile("stlxr %w[ws], %[xt], [%[va]]\n" : [ws] "=r"(ws) : [xt] "r"(xt), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = xt; - break; - case 0x32: - READ_GPR_X(ss, Rt, xt1); - READ_GPR_X(ss, Rt2, xt2); - __asm__ volatile("stxp %w[ws], %[xt1], %[xt2], [%[va]]\n" : [ws] "=r"(ws) : [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; - break; - case 0x33: - READ_GPR_X(ss, Rt, xt1); - READ_GPR_X(ss, Rt2, xt2); - __asm__ volatile("stlxp %w[ws], %[xt1], %[xt2], [%[va]]\n" : [ws] "=r"(ws) : [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); - WRITE_GPR_W(ss, Rs, ws); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; - break; - case 0x34: - __asm__ volatile("ldxr %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = xt; - break; - case 0x35: - __asm__ volatile("ldaxr %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = xt; - break; - case 0x36: - __asm__ volatile("ldxp %[xt1], %[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt1); - WRITE_GPR_X(ss, Rt2, xt2); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[0].ad_data = xt2; - break; - case 0x37: - __asm__ volatile("ldaxp %[xt1], %[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt1); - WRITE_GPR_X(ss, Rt2, xt2); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = va; - res->rr_addrdata[1].ad_addr = va+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[0].ad_data = xt2; - break; - case 0x39: - READ_GPR_X(ss, Rt, xt); - __asm__ volatile("stlr %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_addrdata[0].ad_data = xt; - break; - case 0x3d: - __asm__ volatile("ldar %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt); - res->rr_rw = PGTRACE_RW_LOAD; - res->rr_addrdata[0].ad_data = xt; - break; - default: - CANNOTDECODE("unknown", inst); - } - - stats.stat_decoder.sd_c336++; - - return 0; + uint32_t ws, wt, wt1, wt2; + uint64_t xt, xt1, xt2; + uint32_t size = BITS(inst, 31, 30), + o2 = BITS(inst, 23, 23), + L = BITS(inst, 22, 22), + o1 = BITS(inst, 21, 21), + Rs = BITS(inst, 20, 16), + o0 = BITS(inst, 15, 15), + Rt2 = BITS(inst, 14, 10), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 4) | (o2 << 3) | (L << 2) | (o1 << 1) | o0; + + kprintf("%s Load/store exclusive on device memory???n", __func__); + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + switch (fields) { + case 0: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stxrb %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 1: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stlxrb %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 4: + __asm__ volatile ("ldxrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 5: + __asm__ volatile ("ldaxrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 9: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stlrb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0xd: + __asm__ volatile ("ldarb %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x10: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stxrh %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x11: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stlxrh %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x14: + __asm__ volatile ("ldxrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x15: + __asm__ volatile ("ldaxrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x19: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stlrh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x1d: + __asm__ volatile ("ldarh %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x20: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stxr %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x21: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stlxr %w[ws], %w[wt], [%[va]]\n" : [ws] "=r"(ws) : [wt] "r"(wt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x22: + READ_GPR_W(ss, Rt, wt1); + READ_GPR_W(ss, Rt2, wt2); + __asm__ volatile ("stxp %w[ws], %w[wt1], %w[wt2], [%[va]]\n" : [ws] "=r"(ws) : [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + break; + case 0x23: + READ_GPR_W(ss, Rt, wt1); + READ_GPR_W(ss, Rt2, wt2); + __asm__ volatile ("stlxp %w[ws], %w[wt1], %w[wt2], [%[va]]\n" : [ws] "=r"(ws) : [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + break; + case 0x24: + __asm__ volatile ("ldxr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x25: + __asm__ volatile ("ldaxr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x26: + __asm__ volatile ("ldxp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt1); + WRITE_GPR_W(ss, Rt2, wt2); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + break; + case 0x27: + __asm__ volatile ("ldaxp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt1); + WRITE_GPR_W(ss, Rt2, wt2); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + break; + case 0x29: + READ_GPR_W(ss, Rt, wt); + __asm__ volatile ("stlr %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x2d: + __asm__ volatile ("ldar %w[wt], [%[va]]\n" : [wt] "=r"(wt) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = wt; + break; + case 0x30: + READ_GPR_X(ss, Rt, xt); + __asm__ volatile ("stxr %w[ws], %[xt], [%[va]]\n" : [ws] "=r"(ws) : [xt] "r"(xt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = xt; + break; + case 0x31: + READ_GPR_X(ss, Rt, xt); + __asm__ volatile ("stlxr %w[ws], %[xt], [%[va]]\n" : [ws] "=r"(ws) : [xt] "r"(xt), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = xt; + break; + case 0x32: + READ_GPR_X(ss, Rt, xt1); + READ_GPR_X(ss, Rt2, xt2); + __asm__ volatile ("stxp %w[ws], %[xt1], %[xt2], [%[va]]\n" : [ws] "=r"(ws) : [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; + break; + case 0x33: + READ_GPR_X(ss, Rt, xt1); + READ_GPR_X(ss, Rt2, xt2); + __asm__ volatile ("stlxp %w[ws], %[xt1], %[xt2], [%[va]]\n" : [ws] "=r"(ws) : [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); + WRITE_GPR_W(ss, Rs, ws); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; + break; + case 0x34: + __asm__ volatile ("ldxr %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = xt; + break; + case 0x35: + __asm__ volatile ("ldaxr %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = xt; + break; + case 0x36: + __asm__ volatile ("ldxp %[xt1], %[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt1); + WRITE_GPR_X(ss, Rt2, xt2); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[0].ad_data = xt2; + break; + case 0x37: + __asm__ volatile ("ldaxp %[xt1], %[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt1); + WRITE_GPR_X(ss, Rt2, xt2); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = va; + res->rr_addrdata[1].ad_addr = va + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[0].ad_data = xt2; + break; + case 0x39: + READ_GPR_X(ss, Rt, xt); + __asm__ volatile ("stlr %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_addrdata[0].ad_data = xt; + break; + case 0x3d: + __asm__ volatile ("ldar %[xt], [%[va]]\n" : [xt] "=r"(xt) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt); + res->rr_rw = PGTRACE_RW_LOAD; + res->rr_addrdata[0].ad_data = xt; + break; + default: + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c336++; + + return 0; } -static int run_c337(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c337(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t wt1, wt2; - uint64_t xt1, xt2; - uint32_t opc = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - L = BITS(inst, 22, 22), - Rt = BITS(inst, 4, 0), - Rt2 = BITS(inst, 14, 10); - uint8_t fields = (opc << 2) | (V << 1) | L; - - switch (fields) { - case 0: - READ_GPR_W(ss, Rt, wt1); - READ_GPR_W(ss, Rt2, wt2); - __asm__ volatile("stnp %w[wt1], %w[wt2], [%[va]]\n" :: [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - res->rr_addrdata[1].ad_addr = pa+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - break; - case 1: - __asm__ volatile("ldnp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); - WRITE_GPR_W(ss, Rt, wt1); - WRITE_GPR_W(ss, Rt2, wt2); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - res->rr_addrdata[1].ad_addr = pa+sizeof(wt1); - res->rr_addrdata[0].ad_data = wt1; - res->rr_addrdata[1].ad_data = wt2; - break; - case 2: - case 3: - case 6: - case 7: - case 10: - case 11: - CANNOTDECODE("simd", inst); - case 8: - READ_GPR_X(ss, Rt, xt1); - READ_GPR_X(ss, Rt2, xt2); - __asm__ volatile("stnp %x[xt1], %x[xt2], [%[va]]\n" :: [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - res->rr_addrdata[1].ad_addr = pa+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; - break; - case 9: - __asm__ volatile("ldnp %x[xt1], %x[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); - WRITE_GPR_X(ss, Rt, xt1); - WRITE_GPR_X(ss, Rt2, xt2); - res->rr_rw = PGTRACE_RW_STORE; - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - res->rr_addrdata[1].ad_addr = pa+sizeof(xt1); - res->rr_addrdata[0].ad_data = xt1; - res->rr_addrdata[1].ad_data = xt2; - break; - default: - CANNOTDECODE("simd", inst); - } - - stats.stat_decoder.sd_c337++; - - return 0; + uint32_t wt1, wt2; + uint64_t xt1, xt2; + uint32_t opc = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + L = BITS(inst, 22, 22), + Rt = BITS(inst, 4, 0), + Rt2 = BITS(inst, 14, 10); + uint8_t fields = (opc << 2) | (V << 1) | L; + + switch (fields) { + case 0: + READ_GPR_W(ss, Rt, wt1); + READ_GPR_W(ss, Rt2, wt2); + __asm__ volatile ("stnp %w[wt1], %w[wt2], [%[va]]\n" :: [wt1] "r"(wt1), [wt2] "r"(wt2), [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + res->rr_addrdata[1].ad_addr = pa + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + break; + case 1: + __asm__ volatile ("ldnp %w[wt1], %w[wt2], [%[va]]\n" : [wt1] "=r"(wt1), [wt2] "=r"(wt2) : [va] "r"(va)); + WRITE_GPR_W(ss, Rt, wt1); + WRITE_GPR_W(ss, Rt2, wt2); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + res->rr_addrdata[1].ad_addr = pa + sizeof(wt1); + res->rr_addrdata[0].ad_data = wt1; + res->rr_addrdata[1].ad_data = wt2; + break; + case 2: + case 3: + case 6: + case 7: + case 10: + case 11: + CANNOTDECODE("simd", inst); + case 8: + READ_GPR_X(ss, Rt, xt1); + READ_GPR_X(ss, Rt2, xt2); + __asm__ volatile ("stnp %x[xt1], %x[xt2], [%[va]]\n" :: [xt1] "r"(xt1), [xt2] "r"(xt2), [va] "r"(va)); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + res->rr_addrdata[1].ad_addr = pa + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; + break; + case 9: + __asm__ volatile ("ldnp %x[xt1], %x[xt2], [%[va]]\n" : [xt1] "=r"(xt1), [xt2] "=r"(xt2) : [va] "r"(va)); + WRITE_GPR_X(ss, Rt, xt1); + WRITE_GPR_X(ss, Rt2, xt2); + res->rr_rw = PGTRACE_RW_STORE; + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + res->rr_addrdata[1].ad_addr = pa + sizeof(xt1); + res->rr_addrdata[0].ad_data = xt1; + res->rr_addrdata[1].ad_data = xt2; + break; + default: + CANNOTDECODE("simd", inst); + } + + stats.stat_decoder.sd_c337++; + + return 0; } -static int run_c338(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c338(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t size = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - opc = BITS(inst, 23, 22), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 3) | (V << 2) | opc; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_str(1, Rt, va, ss, res); - else if (fields == 1) do_ldr(1, Rt, va, ss, res); - else if (fields == 2) do_ldrs(1, 8, Rt, va, ss, res); - else if (fields == 3) do_ldrs(1, 4, Rt, va, ss, res); - else if ((fields == 4) || - (fields == 5) || - (fields == 6) || - (fields == 7) || - (fields == 12) || - (fields == 13) || - (fields == 0x14) || - (fields == 0x15) || - (fields == 0x1c) || - (fields == 0x1d)) CANNOTDECODE("simd", inst); - else if (fields == 8) do_str(2, Rt, va, ss, res); - else if (fields == 9) do_ldr(2, Rt, va, ss, res); - else if (fields == 10) do_ldrs(2, 8, Rt, va, ss, res); - else if (fields == 11) do_ldrs(2, 4, Rt, va, ss, res); - else if (fields == 0x10) do_str(4, Rt, va, ss, res); - else if (fields == 0x11) do_ldr(4, Rt, va, ss, res); - else if (fields == 0x12) do_ldrs(4, 8, Rt, va, ss, res); - else if (fields == 0x18) do_str(8, Rt, va, ss, res); - else if (fields == 0x19) do_ldr(8, Rt, va, ss, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c338++; - - return 0; + uint32_t size = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + opc = BITS(inst, 23, 22), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 3) | (V << 2) | opc; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_str(1, Rt, va, ss, res); + } else if (fields == 1) { + do_ldr(1, Rt, va, ss, res); + } else if (fields == 2) { + do_ldrs(1, 8, Rt, va, ss, res); + } else if (fields == 3) { + do_ldrs(1, 4, Rt, va, ss, res); + } else if ((fields == 4) || + (fields == 5) || + (fields == 6) || + (fields == 7) || + (fields == 12) || + (fields == 13) || + (fields == 0x14) || + (fields == 0x15) || + (fields == 0x1c) || + (fields == 0x1d)) { + CANNOTDECODE("simd", inst); + } else if (fields == 8) { + do_str(2, Rt, va, ss, res); + } else if (fields == 9) { + do_ldr(2, Rt, va, ss, res); + } else if (fields == 10) { + do_ldrs(2, 8, Rt, va, ss, res); + } else if (fields == 11) { + do_ldrs(2, 4, Rt, va, ss, res); + } else if (fields == 0x10) { + do_str(4, Rt, va, ss, res); + } else if (fields == 0x11) { + do_ldr(4, Rt, va, ss, res); + } else if (fields == 0x12) { + do_ldrs(4, 8, Rt, va, ss, res); + } else if (fields == 0x18) { + do_str(8, Rt, va, ss, res); + } else if (fields == 0x19) { + do_ldr(8, Rt, va, ss, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c338++; + + return 0; } -static int run_c339(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c339(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t size = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - opc = BITS(inst, 23, 22), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 3) | (V << 2) | opc; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_str(1, Rt, va, ss, res); - else if (fields == 1) do_ldr(1, Rt, va, ss, res); - else if (fields == 2) do_ldrs(1, 8, Rt, va, ss, res); - else if (fields == 3) do_ldrs(1, 4, Rt, va, ss, res); - else if ((fields == 4) || - (fields == 5) || - (fields == 6) || - (fields == 7) || - (fields == 12) || - (fields == 13) || - (fields == 0x14) || - (fields == 0x15) || - (fields == 0x1c) || - (fields == 0x1d)) CANNOTDECODE("simd", inst); - else if (fields == 8) do_str(2, Rt, va, ss, res); - else if (fields == 9) do_ldr(2, Rt, va, ss, res); - else if (fields == 10) do_ldrs(2, 8, Rt, va, ss, res); - else if (fields == 11) do_ldrs(2, 4, Rt, va, ss, res); - else if (fields == 0x10) do_str(4, Rt, va, ss, res); - else if (fields == 0x11) do_ldr(4, Rt, va, ss, res); - else if (fields == 0x12) do_ldrs(4, 8, Rt, va, ss, res); - else if (fields == 0x18) do_str(8, Rt, va, ss, res); - else if (fields == 0x19) do_ldr(8, Rt, va, ss, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c339++; - - return 0; + uint32_t size = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + opc = BITS(inst, 23, 22), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 3) | (V << 2) | opc; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_str(1, Rt, va, ss, res); + } else if (fields == 1) { + do_ldr(1, Rt, va, ss, res); + } else if (fields == 2) { + do_ldrs(1, 8, Rt, va, ss, res); + } else if (fields == 3) { + do_ldrs(1, 4, Rt, va, ss, res); + } else if ((fields == 4) || + (fields == 5) || + (fields == 6) || + (fields == 7) || + (fields == 12) || + (fields == 13) || + (fields == 0x14) || + (fields == 0x15) || + (fields == 0x1c) || + (fields == 0x1d)) { + CANNOTDECODE("simd", inst); + } else if (fields == 8) { + do_str(2, Rt, va, ss, res); + } else if (fields == 9) { + do_ldr(2, Rt, va, ss, res); + } else if (fields == 10) { + do_ldrs(2, 8, Rt, va, ss, res); + } else if (fields == 11) { + do_ldrs(2, 4, Rt, va, ss, res); + } else if (fields == 0x10) { + do_str(4, Rt, va, ss, res); + } else if (fields == 0x11) { + do_ldr(4, Rt, va, ss, res); + } else if (fields == 0x12) { + do_ldrs(4, 8, Rt, va, ss, res); + } else if (fields == 0x18) { + do_str(8, Rt, va, ss, res); + } else if (fields == 0x19) { + do_ldr(8, Rt, va, ss, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c339++; + + return 0; } -static int run_c3310(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3310(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t size = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - opc = BITS(inst, 23, 22), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 3) | (V << 2) | opc; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_str(1, Rt, va, ss, res); - else if (fields == 1) do_ldr(1, Rt, va, ss, res); - else if (fields == 2) do_ldrs(1, 8, Rt, va, ss, res); - else if (fields == 3) do_ldrs(1, 4, Rt, va, ss, res); - else if ((fields == 4) || - (fields == 5) || - (fields == 6) || - (fields == 7) || - (fields == 12) || - (fields == 13) || - (fields == 0x14) || - (fields == 0x15) || - (fields == 0x1c) || - (fields == 0x1d)) CANNOTDECODE("simd", inst); - else if (fields == 8) do_str(2, Rt, va, ss, res); - else if (fields == 9) do_ldr(2, Rt, va, ss, res); - else if (fields == 10) do_ldrs(2, 8, Rt, va, ss, res); - else if (fields == 11) do_ldrs(2, 4, Rt, va, ss, res); - else if (fields == 0x10) do_str(4, Rt, va, ss, res); - else if (fields == 0x11) do_ldr(4, Rt, va, ss, res); - else if (fields == 0x12) do_ldrs(4, 8, Rt, va, ss, res); - else if (fields == 0x18) do_str(8, Rt, va, ss, res); - else if (fields == 0x19) do_ldr(8, Rt, va, ss, res); - else if (fields == 0x1a) do_prfm(Rt, va, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3310++; - - return 0; + uint32_t size = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + opc = BITS(inst, 23, 22), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 3) | (V << 2) | opc; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_str(1, Rt, va, ss, res); + } else if (fields == 1) { + do_ldr(1, Rt, va, ss, res); + } else if (fields == 2) { + do_ldrs(1, 8, Rt, va, ss, res); + } else if (fields == 3) { + do_ldrs(1, 4, Rt, va, ss, res); + } else if ((fields == 4) || + (fields == 5) || + (fields == 6) || + (fields == 7) || + (fields == 12) || + (fields == 13) || + (fields == 0x14) || + (fields == 0x15) || + (fields == 0x1c) || + (fields == 0x1d)) { + CANNOTDECODE("simd", inst); + } else if (fields == 8) { + do_str(2, Rt, va, ss, res); + } else if (fields == 9) { + do_ldr(2, Rt, va, ss, res); + } else if (fields == 10) { + do_ldrs(2, 8, Rt, va, ss, res); + } else if (fields == 11) { + do_ldrs(2, 4, Rt, va, ss, res); + } else if (fields == 0x10) { + do_str(4, Rt, va, ss, res); + } else if (fields == 0x11) { + do_ldr(4, Rt, va, ss, res); + } else if (fields == 0x12) { + do_ldrs(4, 8, Rt, va, ss, res); + } else if (fields == 0x18) { + do_str(8, Rt, va, ss, res); + } else if (fields == 0x19) { + do_ldr(8, Rt, va, ss, res); + } else if (fields == 0x1a) { + do_prfm(Rt, va, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3310++; + + return 0; } -static int run_c3311(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3311(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t size = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - opc = BITS(inst, 23, 22), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 3) | (V << 2) | opc; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_sttr(1, Rt, va, ss, res); - else if (fields == 1) do_ldtr(1, Rt, va, ss, res); - else if (fields == 2) do_ldtrs(1, 8, Rt, va, ss, res); - else if (fields == 3) do_ldtrs(1, 4, Rt, va, ss, res); - else if (fields == 8) do_sttr(2, Rt, va, ss, res); - else if (fields == 9) do_ldtr(2, Rt, va, ss, res); - else if (fields == 10) do_ldtrs(2, 8, Rt, va, ss, res); - else if (fields == 11) do_ldtrs(2, 4, Rt, va, ss, res); - else if (fields == 0x10) do_sttr(4, Rt, va, ss, res); - else if (fields == 0x11) do_ldtr(4, Rt, va, ss, res); - else if (fields == 0x12) do_ldtrs(4, 8, Rt, va, ss, res); - else if (fields == 0x18) do_sttr(8, Rt, va, ss, res); - else if (fields == 0x19) do_ldtr(8, Rt, va, ss, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3311++; - - return 0; + uint32_t size = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + opc = BITS(inst, 23, 22), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 3) | (V << 2) | opc; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_sttr(1, Rt, va, ss, res); + } else if (fields == 1) { + do_ldtr(1, Rt, va, ss, res); + } else if (fields == 2) { + do_ldtrs(1, 8, Rt, va, ss, res); + } else if (fields == 3) { + do_ldtrs(1, 4, Rt, va, ss, res); + } else if (fields == 8) { + do_sttr(2, Rt, va, ss, res); + } else if (fields == 9) { + do_ldtr(2, Rt, va, ss, res); + } else if (fields == 10) { + do_ldtrs(2, 8, Rt, va, ss, res); + } else if (fields == 11) { + do_ldtrs(2, 4, Rt, va, ss, res); + } else if (fields == 0x10) { + do_sttr(4, Rt, va, ss, res); + } else if (fields == 0x11) { + do_ldtr(4, Rt, va, ss, res); + } else if (fields == 0x12) { + do_ldtrs(4, 8, Rt, va, ss, res); + } else if (fields == 0x18) { + do_sttr(8, Rt, va, ss, res); + } else if (fields == 0x19) { + do_ldtr(8, Rt, va, ss, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3311++; + + return 0; } -static int run_c3312(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3312(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t size = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - opc = BITS(inst, 23, 22), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 3) | (V << 2) | opc; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_str(1, Rt, va, ss, res); - else if (fields == 1) do_ldr(1, Rt, va, ss, res); - else if (fields == 2) do_ldrs(1, 8, Rt, va, ss, res); - else if (fields == 3) do_ldrs(1, 4, Rt, va, ss, res); - else if ((fields == 4) || - (fields == 5) || - (fields == 6) || - (fields == 7) || - (fields == 12) || - (fields == 13) || - (fields == 0x14) || - (fields == 0x15) || - (fields == 0x1c) || - (fields == 0x1d)) CANNOTDECODE("simd", inst); - else if (fields == 8) do_str(2, Rt, va, ss, res); - else if (fields == 9) do_ldr(2, Rt, va, ss, res); - else if (fields == 10) do_ldrs(2, 8, Rt, va, ss, res); - else if (fields == 11) do_ldrs(2, 4, Rt, va, ss, res); - else if (fields == 0x10) do_str(4, Rt, va, ss, res); - else if (fields == 0x11) do_ldr(4, Rt, va, ss, res); - else if (fields == 0x12) do_ldrs(4, 8, Rt, va, ss, res); - else if (fields == 0x18) do_str(8, Rt, va, ss, res); - else if (fields == 0x19) do_ldr(8, Rt, va, ss, res); - else if (fields == 0x1a) do_prfm(Rt, va, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3312++; - - return 0; + uint32_t size = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + opc = BITS(inst, 23, 22), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 3) | (V << 2) | opc; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_str(1, Rt, va, ss, res); + } else if (fields == 1) { + do_ldr(1, Rt, va, ss, res); + } else if (fields == 2) { + do_ldrs(1, 8, Rt, va, ss, res); + } else if (fields == 3) { + do_ldrs(1, 4, Rt, va, ss, res); + } else if ((fields == 4) || + (fields == 5) || + (fields == 6) || + (fields == 7) || + (fields == 12) || + (fields == 13) || + (fields == 0x14) || + (fields == 0x15) || + (fields == 0x1c) || + (fields == 0x1d)) { + CANNOTDECODE("simd", inst); + } else if (fields == 8) { + do_str(2, Rt, va, ss, res); + } else if (fields == 9) { + do_ldr(2, Rt, va, ss, res); + } else if (fields == 10) { + do_ldrs(2, 8, Rt, va, ss, res); + } else if (fields == 11) { + do_ldrs(2, 4, Rt, va, ss, res); + } else if (fields == 0x10) { + do_str(4, Rt, va, ss, res); + } else if (fields == 0x11) { + do_ldr(4, Rt, va, ss, res); + } else if (fields == 0x12) { + do_ldrs(4, 8, Rt, va, ss, res); + } else if (fields == 0x18) { + do_str(8, Rt, va, ss, res); + } else if (fields == 0x19) { + do_ldr(8, Rt, va, ss, res); + } else if (fields == 0x1a) { + do_prfm(Rt, va, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3312++; + + return 0; } -static int run_c3313(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3313(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t size = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - opc = BITS(inst, 23, 22), - Rt = BITS(inst, 4, 0); - uint8_t fields = (size << 3) | (V << 2) | opc; - - res->rr_num = 1; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_str(1, Rt, va, ss, res); - else if (fields == 1) do_ldr(1, Rt, va, ss, res); - else if (fields == 2) do_ldrs(1, 8, Rt, va, ss, res); - else if (fields == 3) do_ldrs(1, 4, Rt, va, ss, res); - else if ((fields == 4) || - (fields == 5) || - (fields == 6) || - (fields == 7) || - (fields == 12) || - (fields == 13) || - (fields == 0x14) || - (fields == 0x15) || - (fields == 0x1c) || - (fields == 0x1d)) CANNOTDECODE("simd", inst); - else if (fields == 8) do_str(2, Rt, va, ss, res); - else if (fields == 9) do_ldr(2, Rt, va, ss, res); - else if (fields == 10) do_ldrs(2, 8, Rt, va, ss, res); - else if (fields == 11) do_ldrs(2, 4, Rt, va, ss, res); - else if (fields == 0x10) do_str(4, Rt, va, ss, res); - else if (fields == 0x11) do_ldr(4, Rt, va, ss, res); - else if (fields == 0x12) do_ldrs(4, 8, Rt, va, ss, res); - else if (fields == 0x18) do_str(8, Rt, va, ss, res); - else if (fields == 0x19) do_ldr(8, Rt, va, ss, res); - else if (fields == 0x1a) do_prfm(Rt, va, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3313++; - - return 0; + uint32_t size = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + opc = BITS(inst, 23, 22), + Rt = BITS(inst, 4, 0); + uint8_t fields = (size << 3) | (V << 2) | opc; + + res->rr_num = 1; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_str(1, Rt, va, ss, res); + } else if (fields == 1) { + do_ldr(1, Rt, va, ss, res); + } else if (fields == 2) { + do_ldrs(1, 8, Rt, va, ss, res); + } else if (fields == 3) { + do_ldrs(1, 4, Rt, va, ss, res); + } else if ((fields == 4) || + (fields == 5) || + (fields == 6) || + (fields == 7) || + (fields == 12) || + (fields == 13) || + (fields == 0x14) || + (fields == 0x15) || + (fields == 0x1c) || + (fields == 0x1d)) { + CANNOTDECODE("simd", inst); + } else if (fields == 8) { + do_str(2, Rt, va, ss, res); + } else if (fields == 9) { + do_ldr(2, Rt, va, ss, res); + } else if (fields == 10) { + do_ldrs(2, 8, Rt, va, ss, res); + } else if (fields == 11) { + do_ldrs(2, 4, Rt, va, ss, res); + } else if (fields == 0x10) { + do_str(4, Rt, va, ss, res); + } else if (fields == 0x11) { + do_ldr(4, Rt, va, ss, res); + } else if (fields == 0x12) { + do_ldrs(4, 8, Rt, va, ss, res); + } else if (fields == 0x18) { + do_str(8, Rt, va, ss, res); + } else if (fields == 0x19) { + do_ldr(8, Rt, va, ss, res); + } else if (fields == 0x1a) { + do_prfm(Rt, va, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3313++; + + return 0; } -static int run_c3314(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3314(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t opc = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - L = BITS(inst, 22, 22), - Rt = BITS(inst, 4, 0), - Rt2 = BITS(inst, 14, 10); - uint8_t fields = (opc << 2) | (V << 1) | L; - - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_stp(4, Rt, Rt2, va, ss, res); - else if (fields == 1) do_ldp(4, Rt, Rt2, va, ss, res); - else if ((fields == 2) || - (fields == 3) || - (fields == 6) || - (fields == 7) || - (fields == 10) || - (fields == 11)) CANNOTDECODE("simd", inst); - else if (fields == 5) do_ldpsw(Rt, Rt2, va, ss, res); - else if (fields == 8) do_stp(8, Rt, Rt2, va, ss, res); - else if (fields == 9) do_ldp(8, Rt, Rt2, va, ss, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3314++; - - return 0; + uint32_t opc = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + L = BITS(inst, 22, 22), + Rt = BITS(inst, 4, 0), + Rt2 = BITS(inst, 14, 10); + uint8_t fields = (opc << 2) | (V << 1) | L; + + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_stp(4, Rt, Rt2, va, ss, res); + } else if (fields == 1) { + do_ldp(4, Rt, Rt2, va, ss, res); + } else if ((fields == 2) || + (fields == 3) || + (fields == 6) || + (fields == 7) || + (fields == 10) || + (fields == 11)) { + CANNOTDECODE("simd", inst); + } else if (fields == 5) { + do_ldpsw(Rt, Rt2, va, ss, res); + } else if (fields == 8) { + do_stp(8, Rt, Rt2, va, ss, res); + } else if (fields == 9) { + do_ldp(8, Rt, Rt2, va, ss, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3314++; + + return 0; } -static int run_c3315(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3315(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t opc = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - L = BITS(inst, 22, 22), - Rt = BITS(inst, 4, 0), - Rt2 = BITS(inst, 14, 10); - uint8_t fields = (opc << 2) | (V << 1) | L; - - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_stp(4, Rt, Rt2, va, ss, res); - else if (fields == 1) do_ldp(4, Rt, Rt2, va, ss, res); - else if ((fields == 2) || - (fields == 3) || - (fields == 6) || - (fields == 7) || - (fields == 10) || - (fields == 11)) CANNOTDECODE("simd", inst); - else if (fields == 5) do_ldpsw(Rt, Rt2, va, ss, res); - else if (fields == 8) do_stp(8, Rt, Rt2, va, ss, res); - else if (fields == 9) do_ldp(8, Rt, Rt2, va, ss, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3315++; - - return 0; + uint32_t opc = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + L = BITS(inst, 22, 22), + Rt = BITS(inst, 4, 0), + Rt2 = BITS(inst, 14, 10); + uint8_t fields = (opc << 2) | (V << 1) | L; + + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_stp(4, Rt, Rt2, va, ss, res); + } else if (fields == 1) { + do_ldp(4, Rt, Rt2, va, ss, res); + } else if ((fields == 2) || + (fields == 3) || + (fields == 6) || + (fields == 7) || + (fields == 10) || + (fields == 11)) { + CANNOTDECODE("simd", inst); + } else if (fields == 5) { + do_ldpsw(Rt, Rt2, va, ss, res); + } else if (fields == 8) { + do_stp(8, Rt, Rt2, va, ss, res); + } else if (fields == 9) { + do_ldp(8, Rt, Rt2, va, ss, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3315++; + + return 0; } -static int run_c3316(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) +static int +run_c3316(uint32_t inst, vm_offset_t pa, vm_offset_t va, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint32_t opc = BITS(inst, 31, 30), - V = BITS(inst, 26, 26), - L = BITS(inst, 22, 22), - Rt = BITS(inst, 4, 0), - Rt2 = BITS(inst, 14, 10); - uint8_t fields = (opc << 2) | (V << 1) | L; - - res->rr_num = 2; - res->rr_addrdata[0].ad_addr = pa; - - if (fields == 0) do_stp(4, Rt, Rt2, va, ss, res); - else if (fields == 1) do_ldp(4, Rt, Rt2, va, ss, res); - else if ((fields == 2) || - (fields == 3) || - (fields == 6) || - (fields == 7) || - (fields == 10) || - (fields == 11)) CANNOTDECODE("simd", inst); - else if (fields == 5) do_ldpsw(Rt, Rt2, va, ss, res); - else if (fields == 8) do_stp(8, Rt, Rt2, va, ss, res); - else if (fields == 9) do_ldp(8, Rt, Rt2, va, ss, res); - else CANNOTDECODE("unknown", inst); - - stats.stat_decoder.sd_c3316++; - - return 0; + uint32_t opc = BITS(inst, 31, 30), + V = BITS(inst, 26, 26), + L = BITS(inst, 22, 22), + Rt = BITS(inst, 4, 0), + Rt2 = BITS(inst, 14, 10); + uint8_t fields = (opc << 2) | (V << 1) | L; + + res->rr_num = 2; + res->rr_addrdata[0].ad_addr = pa; + + if (fields == 0) { + do_stp(4, Rt, Rt2, va, ss, res); + } else if (fields == 1) { + do_ldp(4, Rt, Rt2, va, ss, res); + } else if ((fields == 2) || + (fields == 3) || + (fields == 6) || + (fields == 7) || + (fields == 10) || + (fields == 11)) { + CANNOTDECODE("simd", inst); + } else if (fields == 5) { + do_ldpsw(Rt, Rt2, va, ss, res); + } else if (fields == 8) { + do_stp(8, Rt, Rt2, va, ss, res); + } else if (fields == 9) { + do_ldp(8, Rt, Rt2, va, ss, res); + } else { + CANNOTDECODE("unknown", inst); + } + + stats.stat_decoder.sd_c3316++; + + return 0; } -static bool get_info_simd(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_simd(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { #pragma unused(inst, ss, info) - CANNOTDECODE("simd", inst); - return false; + CANNOTDECODE("simd", inst); + return false; } // load register (literal) -static bool get_info_c335(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c335(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t opc = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t imm19 = BITS(inst, 23, 5); - uint32_t fields = (opc << 1) | V; - uint8_t scale; - - if (__builtin_expect(fields > 6, false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - assert(fields <= 6); - - if (V == 1) { - scale = 2 + opc; - } else { - switch (opc) { - case 0 ... 1: - scale = 2 + opc; - break; - case 2: - scale = 2; - break; - default: - CANNOTDECODE("invalid", inst); - return false; - } - } - - info->bytes = 1 << scale; - info->addr = ss->ss_64.pc + (SIGN_EXTEND_64(imm19, 19) << 2); - - return true; + uint32_t opc = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t imm19 = BITS(inst, 23, 5); + uint32_t fields = (opc << 1) | V; + uint8_t scale; + + if (__builtin_expect(fields > 6, false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + assert(fields <= 6); + + if (V == 1) { + scale = 2 + opc; + } else { + switch (opc) { + case 0 ... 1: + scale = 2 + opc; + break; + case 2: + scale = 2; + break; + default: + CANNOTDECODE("invalid", inst); + return false; + } + } + + info->bytes = 1 << scale; + info->addr = ss->ss_64.pc + (SIGN_EXTEND_64(imm19, 19) << 2); + + return true; } // load/store exclusive -static bool get_info_c336(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c336(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t o2 = BITS(inst, 23, 23); - uint32_t L = BITS(inst, 22, 22); - uint32_t o1 = BITS(inst, 21, 21); - uint32_t o0 = BITS(inst, 15, 15); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 4) | (o2 << 3) | (L << 2) | (o1 << 1) | o0; - - if (__builtin_expect((2 <= fields && fields <= 3) || - (6 <= fields && fields <= 8) || - (10 <= fields && fields <= 12) || - (14 <= fields && fields <= 15) || - (18 <= fields && fields <= 19) || - (22 <= fields && fields <= 24) || - (26 <= fields && fields <= 28) || - (30 <= fields && fields <= 31) || - (40 == fields) || - (42 <= fields && fields <= 44) || - (46 <= fields && fields <= 47) || - (56 == fields) || - (58 <= fields && fields <= 60) || - (62 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - info->bytes = (1 << size) << o1; - info->addr = ss->ss_64.x[Rn]; - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t o2 = BITS(inst, 23, 23); + uint32_t L = BITS(inst, 22, 22); + uint32_t o1 = BITS(inst, 21, 21); + uint32_t o0 = BITS(inst, 15, 15); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 4) | (o2 << 3) | (L << 2) | (o1 << 1) | o0; + + if (__builtin_expect((2 <= fields && fields <= 3) || + (6 <= fields && fields <= 8) || + (10 <= fields && fields <= 12) || + (14 <= fields && fields <= 15) || + (18 <= fields && fields <= 19) || + (22 <= fields && fields <= 24) || + (26 <= fields && fields <= 28) || + (30 <= fields && fields <= 31) || + (40 == fields) || + (42 <= fields && fields <= 44) || + (46 <= fields && fields <= 47) || + (56 == fields) || + (58 <= fields && fields <= 60) || + (62 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + info->bytes = (1 << size) << o1; + info->addr = ss->ss_64.x[Rn]; + + return true; } // load/store no-allocate pair (offset) -bool get_info_c337(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +bool +get_info_c337(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t opc = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t L = BITS(inst, 22, 22); - uint32_t imm7 = BITS(inst, 21, 15); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (opc << 2) | (V << 1) | L; - uint8_t scale; - - if (__builtin_expect((4 <= fields && fields <= 5) || - (12 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = opc + 2; - } else { - scale = BITS(opc, 1, 1) + 2; - } - - // double since it's pair - info->bytes = 2 * (1 << scale); - info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(imm7, 7) << scale); - - return true; + uint32_t opc = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t L = BITS(inst, 22, 22); + uint32_t imm7 = BITS(inst, 21, 15); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (opc << 2) | (V << 1) | L; + uint8_t scale; + + if (__builtin_expect((4 <= fields && fields <= 5) || + (12 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = opc + 2; + } else { + scale = BITS(opc, 1, 1) + 2; + } + + // double since it's pair + info->bytes = 2 * (1 << scale); + info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(imm7, 7) << scale); + + return true; } // load/store reigster (immediate post-indexed) -static bool get_info_c338(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c338(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t opc = BITS(inst, 23, 22); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 3) | (V << 2) | opc; - uint8_t scale; - - if (__builtin_expect((14 <= fields && fields <= 15) || - (19 == fields) || - (22 <= fields && fields <= 23) || - (26 <= fields && fields <= 27) || - (30 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = BITS(opc, 1, 1) << 2 | size; - } else { - scale = size; - } - - info->bytes = 1 << scale; - // post-indexed - info->addr = ss->ss_64.x[Rn]; - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t opc = BITS(inst, 23, 22); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 3) | (V << 2) | opc; + uint8_t scale; + + if (__builtin_expect((14 <= fields && fields <= 15) || + (19 == fields) || + (22 <= fields && fields <= 23) || + (26 <= fields && fields <= 27) || + (30 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = BITS(opc, 1, 1) << 2 | size; + } else { + scale = size; + } + + info->bytes = 1 << scale; + // post-indexed + info->addr = ss->ss_64.x[Rn]; + + return true; } // load/store register (immediate pre-indexed) -static bool get_info_c339(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c339(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t opc = BITS(inst, 23, 22); - uint32_t imm9 = BITS(inst, 20, 12); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 3) | (V << 2) | opc; - uint8_t scale; - - if (__builtin_expect((14 <= fields && fields <= 15) || - (19 == fields) || - (22 <= fields && fields <= 23) || - (26 <= fields && fields <= 27) || - (30 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = BITS(opc, 1, 1) << 2 | size; - } else { - scale = size; - } - - info->bytes = 1 << scale; - info->addr = ss->ss_64.x[Rn] + SIGN_EXTEND_64(imm9, 9); - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t opc = BITS(inst, 23, 22); + uint32_t imm9 = BITS(inst, 20, 12); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 3) | (V << 2) | opc; + uint8_t scale; + + if (__builtin_expect((14 <= fields && fields <= 15) || + (19 == fields) || + (22 <= fields && fields <= 23) || + (26 <= fields && fields <= 27) || + (30 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = BITS(opc, 1, 1) << 2 | size; + } else { + scale = size; + } + + info->bytes = 1 << scale; + info->addr = ss->ss_64.x[Rn] + SIGN_EXTEND_64(imm9, 9); + + return true; } // load/store register (register offset) -static bool get_info_c3310(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c3310(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t opc = BITS(inst, 23, 22); - uint32_t Rm = BITS(inst, 20, 16); - uint32_t option = BITS(inst, 15, 13); - uint32_t S = BITS(inst, 12, 12); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 3) | (V << 2) | opc; - uint32_t scale; - - if (__builtin_expect((14 <= fields && fields <= 15) || - (19 == fields) || - (22 <= fields && fields <= 23) || - (27 == fields) || - (30 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = BITS(opc, 1, 1) | size; - } else { - scale = size; - } - - info->bytes = 1 << scale; - - uint64_t m = ss->ss_64.x[Rm]; - uint8_t shift = (S == 1 ? scale : 0); - - switch (option) { - case 0 ... 3: - info->addr = ss->ss_64.x[Rn] + (ZERO_EXTEND_64(m, 8 << option) << shift); - break; - case 4 ... 7: - info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(m, 8 << BITS(option, 1, 0)) << shift); - break; - default: - CANNOTDECODE("invalid", inst); - return false; - } - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t opc = BITS(inst, 23, 22); + uint32_t Rm = BITS(inst, 20, 16); + uint32_t option = BITS(inst, 15, 13); + uint32_t S = BITS(inst, 12, 12); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 3) | (V << 2) | opc; + uint32_t scale; + + if (__builtin_expect((14 <= fields && fields <= 15) || + (19 == fields) || + (22 <= fields && fields <= 23) || + (27 == fields) || + (30 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = BITS(opc, 1, 1) | size; + } else { + scale = size; + } + + info->bytes = 1 << scale; + + uint64_t m = ss->ss_64.x[Rm]; + uint8_t shift = (S == 1 ? scale : 0); + + switch (option) { + case 0 ... 3: + info->addr = ss->ss_64.x[Rn] + (ZERO_EXTEND_64(m, 8 << option) << shift); + break; + case 4 ... 7: + info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(m, 8 << BITS(option, 1, 0)) << shift); + break; + default: + CANNOTDECODE("invalid", inst); + return false; + } + + return true; } // load/store register (unprivileged) -static bool get_info_c3311(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c3311(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t opc = BITS(inst, 23, 22); - uint32_t imm9 = BITS(inst, 20, 12); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 3) | (V << 2) | opc; - - if (__builtin_expect((4 <= fields && fields <= 7) || - (12 <= fields && fields <= 15) || - (19 <= fields && fields <= 23) || - (26 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - info->bytes = 1 << size; - info->addr = ss->ss_64.x[Rn] + SIGN_EXTEND_64(imm9, 9); - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t opc = BITS(inst, 23, 22); + uint32_t imm9 = BITS(inst, 20, 12); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 3) | (V << 2) | opc; + + if (__builtin_expect((4 <= fields && fields <= 7) || + (12 <= fields && fields <= 15) || + (19 <= fields && fields <= 23) || + (26 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + info->bytes = 1 << size; + info->addr = ss->ss_64.x[Rn] + SIGN_EXTEND_64(imm9, 9); + + return true; } // load/store register (unscaled immediate) -static bool get_info_c3312(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c3312(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t opc = BITS(inst, 23, 22); - uint32_t imm9 = BITS(inst, 20, 12); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 3) | (V << 2) | opc; - uint32_t scale; - - if (__builtin_expect((14 <= fields && fields <= 15) || - (19 == fields) || - (22 <= fields && fields <= 23) || - (27 == fields) || - (30 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = BITS(opc, 1, 1) << 2 | size; - } else { - scale = size; - } - - info->bytes = 1 < scale; - info->addr = ss->ss_64.x[Rn] + SIGN_EXTEND_64(imm9, 9); - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t opc = BITS(inst, 23, 22); + uint32_t imm9 = BITS(inst, 20, 12); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 3) | (V << 2) | opc; + uint32_t scale; + + if (__builtin_expect((14 <= fields && fields <= 15) || + (19 == fields) || + (22 <= fields && fields <= 23) || + (27 == fields) || + (30 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = BITS(opc, 1, 1) << 2 | size; + } else { + scale = size; + } + + info->bytes = 1 < scale; + info->addr = ss->ss_64.x[Rn] + SIGN_EXTEND_64(imm9, 9); + + return true; } // load/store register (unsigned immediate) -bool get_info_c3313(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +bool +get_info_c3313(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t size = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t opc = BITS(inst, 23, 22); - uint32_t imm12 = BITS(inst, 21, 10); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (size << 3) | (V << 2) | opc; - uint32_t scale; - - if (__builtin_expect((14 <= fields && fields <= 15) || - (19 == fields) || - (22 <= fields && fields <= 23) || - (27 == fields) || - (30 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = BITS(opc, 1, 1) << 2 | size; - } else { - scale = size; - } - - info->bytes = 1 << scale; - info->addr = ss->ss_64.x[Rn] + (ZERO_EXTEND_64(imm12, 12) << scale); - - return true; + uint32_t size = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t opc = BITS(inst, 23, 22); + uint32_t imm12 = BITS(inst, 21, 10); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (size << 3) | (V << 2) | opc; + uint32_t scale; + + if (__builtin_expect((14 <= fields && fields <= 15) || + (19 == fields) || + (22 <= fields && fields <= 23) || + (27 == fields) || + (30 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = BITS(opc, 1, 1) << 2 | size; + } else { + scale = size; + } + + info->bytes = 1 << scale; + info->addr = ss->ss_64.x[Rn] + (ZERO_EXTEND_64(imm12, 12) << scale); + + return true; } // load/store register pair (offset) -static bool get_info_c3314(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c3314(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t opc = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t L = BITS(inst, 22, 22); - uint32_t imm7 = BITS(inst, 21, 15); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (opc << 2) | (V << 1) | L; - uint8_t scale = 2 + (opc >> 1); - - if (__builtin_expect((4 == fields) || - (12 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = 2 + opc; - } else { - scale = 2 + BITS(opc, 1, 1); - } - - info->bytes = 2 * (1 << scale); - info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(imm7, 7) << scale); - - return true; + uint32_t opc = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t L = BITS(inst, 22, 22); + uint32_t imm7 = BITS(inst, 21, 15); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (opc << 2) | (V << 1) | L; + uint8_t scale = 2 + (opc >> 1); + + if (__builtin_expect((4 == fields) || + (12 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = 2 + opc; + } else { + scale = 2 + BITS(opc, 1, 1); + } + + info->bytes = 2 * (1 << scale); + info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(imm7, 7) << scale); + + return true; } // load/store register pair (post-indexed) -static bool get_info_c3315(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c3315(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t opc = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t L = BITS(inst, 22, 22); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (opc << 2) | (V << 1) | L; - uint8_t scale = 2 + (opc >> 1); - - if (__builtin_expect((4 == fields) || - (12 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = 2 + opc; - } else { - scale = 2 + BITS(opc, 1, 1); - } - - info->bytes = 2 * (1 << scale); - // post-indexed - info->addr = ss->ss_64.x[Rn]; - - return true; + uint32_t opc = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t L = BITS(inst, 22, 22); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (opc << 2) | (V << 1) | L; + uint8_t scale = 2 + (opc >> 1); + + if (__builtin_expect((4 == fields) || + (12 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = 2 + opc; + } else { + scale = 2 + BITS(opc, 1, 1); + } + + info->bytes = 2 * (1 << scale); + // post-indexed + info->addr = ss->ss_64.x[Rn]; + + return true; } // load/store register pair (pre-indexed) -static bool get_info_c3316(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) +static bool +get_info_c3316(uint32_t inst, arm_saved_state_t *ss, instruction_info_t *info) { - uint32_t opc = BITS(inst, 31, 30); - uint32_t V = BITS(inst, 26, 26); - uint32_t L = BITS(inst, 22, 22); - uint32_t imm7 = BITS(inst, 21, 15); - uint32_t Rn = BITS(inst, 9, 5); - uint32_t fields = (opc << 2) | (V << 1) | L; - uint8_t scale = 2 + (opc >> 1); - - if (__builtin_expect((4 == fields) || - (12 <= fields), false)) { - CANNOTDECODE("invalid", inst); - return false; - } - - if (V == 1) { - scale = 2 + opc; - } else { - scale = 2 + BITS(opc, 1, 1); - } - - info->bytes = 2 * (1 << scale); - info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(imm7, 7) << scale); - - return true; + uint32_t opc = BITS(inst, 31, 30); + uint32_t V = BITS(inst, 26, 26); + uint32_t L = BITS(inst, 22, 22); + uint32_t imm7 = BITS(inst, 21, 15); + uint32_t Rn = BITS(inst, 9, 5); + uint32_t fields = (opc << 2) | (V << 1) | L; + uint8_t scale = 2 + (opc >> 1); + + if (__builtin_expect((4 == fields) || + (12 <= fields), false)) { + CANNOTDECODE("invalid", inst); + return false; + } + + if (V == 1) { + scale = 2 + opc; + } else { + scale = 2 + BITS(opc, 1, 1); + } + + info->bytes = 2 * (1 << scale); + info->addr = ss->ss_64.x[Rn] + (SIGN_EXTEND_64(imm7, 7) << scale); + + return true; } //------------------------------------------------------------------- // Globals // -int pgtrace_decode_and_run(uint32_t inst, vm_offset_t fva, vm_map_offset_t *cva_page, arm_saved_state_t *ss, pgtrace_run_result_t *res) +int +pgtrace_decode_and_run(uint32_t inst, vm_offset_t fva, vm_map_offset_t *cva_page, arm_saved_state_t *ss, pgtrace_run_result_t *res) { - uint8_t len = sizeof(typetbl)/sizeof(type_entry_t); - run_t run = NULL; - get_info_t get_info = NULL; - vm_offset_t pa, cva; - vm_offset_t cva_front_page = cva_page[0]; - vm_offset_t cva_cur_page = cva_page[1]; - instruction_info_t info; - - for (uint8_t i = 0; i < len; i++) { - if ((typetbl[i].mask & inst) == typetbl[i].value) { - run = typetbl[i].run; - get_info = typetbl[i].get_info; - break; - } - } - - assert(run != NULL && get_info != NULL); - - get_info(inst, ss, &info); - - if (info.addr == fva) { - cva = cva_cur_page + (fva & ARM_PGMASK); - } else { - // which means a front page is not a tracing page - cva = cva_front_page + (fva & ARM_PGMASK); - } - - pa = mmu_kvtop(cva); - if (!pa) { - panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__, cva, fva, info.addr, inst); - } - - absolutetime_to_nanoseconds(mach_absolute_time(), &res->rr_time); - run(inst, pa, cva, ss, res); - - return 0; + uint8_t len = sizeof(typetbl) / sizeof(type_entry_t); + run_t run = NULL; + get_info_t get_info = NULL; + vm_offset_t pa, cva; + vm_offset_t cva_front_page = cva_page[0]; + vm_offset_t cva_cur_page = cva_page[1]; + instruction_info_t info; + + for (uint8_t i = 0; i < len; i++) { + if ((typetbl[i].mask & inst) == typetbl[i].value) { + run = typetbl[i].run; + get_info = typetbl[i].get_info; + break; + } + } + + assert(run != NULL && get_info != NULL); + + get_info(inst, ss, &info); + + if (info.addr == fva) { + cva = cva_cur_page + (fva & ARM_PGMASK); + } else { + // which means a front page is not a tracing page + cva = cva_front_page + (fva & ARM_PGMASK); + } + + pa = mmu_kvtop(cva); + if (!pa) { + panic("%s: invalid address cva=%lx fva=%lx info.addr=%lx inst=%x", __func__, cva, fva, info.addr, inst); + } + + absolutetime_to_nanoseconds(mach_absolute_time(), &res->rr_time); + run(inst, pa, cva, ss, res); + + return 0; } -void pgtrace_decoder_get_stats(pgtrace_stats_t *s) +void +pgtrace_decoder_get_stats(pgtrace_stats_t *s) { - memcpy((void *)&(s->stat_decoder), &(stats.stat_decoder), sizeof(stats.stat_decoder)); + memcpy((void *)&(s->stat_decoder), &(stats.stat_decoder), sizeof(stats.stat_decoder)); } #endif diff --git a/osfmk/arm64/pgtrace_decoder.h b/osfmk/arm64/pgtrace_decoder.h index e5c4b5c77..7e7268817 100644 --- a/osfmk/arm64/pgtrace_decoder.h +++ b/osfmk/arm64/pgtrace_decoder.h @@ -37,4 +37,3 @@ int pgtrace_decode_and_run(uint32_t inst, vm_offset_t va, vm_map_offset_t *cva, arm_saved_state_t *ss, pgtrace_run_result_t *res); void pgtrace_decoder_get_stats(pgtrace_stats_t *stats); #endif - diff --git a/osfmk/arm64/platform_tests.c b/osfmk/arm64/platform_tests.c index 96a59cac2..5f6e474fb 100644 --- a/osfmk/arm64/platform_tests.c +++ b/osfmk/arm64/platform_tests.c @@ -31,23 +31,23 @@ /* * Mach Operating System Copyright (c) 1991,1990,1989,1988,1987 Carnegie * Mellon University All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright notice * and this permission notice appear in all copies of the software, * derivative works or modified versions, and any portions thereof, and that * both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. * CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES * WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science Carnegie Mellon University Pittsburgh PA * 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon the * rights to redistribute these changes. */ @@ -68,12 +68,12 @@ #include #include -#if MACH_KDB +#if MACH_KDB #include #include #include #include -#endif /* MACH_KDB */ +#endif /* MACH_KDB */ #include #include @@ -97,12 +97,12 @@ volatile char pan_fault_value = 0; #include #define LOCK_TEST_ITERATIONS 50 -static hw_lock_data_t lt_hw_lock; -static lck_spin_t lt_lck_spin_t; -static lck_mtx_t lt_mtx; -static lck_rw_t lt_rwlock; +static hw_lock_data_t lt_hw_lock; +static lck_spin_t lt_lck_spin_t; +static lck_mtx_t lt_mtx; +static lck_rw_t lt_rwlock; static volatile uint32_t lt_counter = 0; -static volatile int lt_spinvolatile; +static volatile int lt_spinvolatile; static volatile uint32_t lt_max_holders = 0; static volatile uint32_t lt_upgrade_holders = 0; static volatile uint32_t lt_max_upgrade_holders = 0; @@ -112,40 +112,40 @@ static volatile uint32_t lt_target_done_threads; static volatile uint32_t lt_cpu_bind_id = 0; static void -lt_note_another_blocking_lock_holder() +lt_note_another_blocking_lock_holder() { - hw_lock_lock(<_hw_lock); + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); lt_num_holders++; lt_max_holders = (lt_max_holders < lt_num_holders) ? lt_num_holders : lt_max_holders; hw_lock_unlock(<_hw_lock); } static void -lt_note_blocking_lock_release() +lt_note_blocking_lock_release() { - hw_lock_lock(<_hw_lock); + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); lt_num_holders--; hw_lock_unlock(<_hw_lock); } static void -lt_spin_a_little_bit() +lt_spin_a_little_bit() { uint32_t i; - + for (i = 0; i < 10000; i++) { lt_spinvolatile++; } } static void -lt_sleep_a_little_bit() +lt_sleep_a_little_bit() { delay(100); } static void -lt_grab_mutex() +lt_grab_mutex() { lck_mtx_lock(<_mtx); lt_note_another_blocking_lock_holder(); @@ -158,13 +158,14 @@ lt_grab_mutex() static void lt_grab_mutex_with_try() { - while(0 == lck_mtx_try_lock(<_mtx)); + while (0 == lck_mtx_try_lock(<_mtx)) { + ; + } lt_note_another_blocking_lock_holder(); lt_sleep_a_little_bit(); lt_counter++; lt_note_blocking_lock_release(); lck_mtx_unlock(<_mtx); - } static void @@ -181,7 +182,7 @@ lt_grab_rw_exclusive() static void lt_grab_rw_exclusive_with_try() { - while(0 == lck_rw_try_lock_exclusive(<_rwlock)) { + while (0 == lck_rw_try_lock_exclusive(<_rwlock)) { lt_sleep_a_little_bit(); } @@ -193,37 +194,37 @@ lt_grab_rw_exclusive_with_try() } /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) -static void -lt_grab_rw_shared() -{ - lck_rw_lock_shared(<_rwlock); - lt_counter++; - - lt_note_another_blocking_lock_holder(); - lt_sleep_a_little_bit(); - lt_note_blocking_lock_release(); - - lck_rw_done(<_rwlock); -} -*/ + * static void + * lt_grab_rw_shared() + * { + * lck_rw_lock_shared(<_rwlock); + * lt_counter++; + * + * lt_note_another_blocking_lock_holder(); + * lt_sleep_a_little_bit(); + * lt_note_blocking_lock_release(); + * + * lck_rw_done(<_rwlock); + * } + */ /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) -static void -lt_grab_rw_shared_with_try() -{ - while(0 == lck_rw_try_lock_shared(<_rwlock)); - lt_counter++; - - lt_note_another_blocking_lock_holder(); - lt_sleep_a_little_bit(); - lt_note_blocking_lock_release(); - - lck_rw_done(<_rwlock); -} -*/ + * static void + * lt_grab_rw_shared_with_try() + * { + * while(0 == lck_rw_try_lock_shared(<_rwlock)); + * lt_counter++; + * + * lt_note_another_blocking_lock_holder(); + * lt_sleep_a_little_bit(); + * lt_note_blocking_lock_release(); + * + * lck_rw_done(<_rwlock); + * } + */ static void -lt_upgrade_downgrade_rw() +lt_upgrade_downgrade_rw() { boolean_t upgraded, success; @@ -235,7 +236,7 @@ lt_upgrade_downgrade_rw() lt_note_another_blocking_lock_holder(); lt_sleep_a_little_bit(); lt_note_blocking_lock_release(); - + upgraded = lck_rw_lock_shared_to_exclusive(<_rwlock); if (!upgraded) { success = lck_rw_try_lock_exclusive(<_rwlock); @@ -254,7 +255,7 @@ lt_upgrade_downgrade_rw() lt_sleep_a_little_bit(); lt_upgrade_holders--; - + lck_rw_lock_exclusive_to_shared(<_rwlock); lt_spin_a_little_bit(); @@ -273,7 +274,7 @@ lt_stress_hw_lock() kprintf("%s>cpu %d starting\n", __FUNCTION__, cpuid); - hw_lock_lock(<_hw_lock); + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); lt_counter++; local_counter++; hw_lock_unlock(<_hw_lock); @@ -285,14 +286,12 @@ lt_stress_hw_lock() kprintf("%s>cpu %d started\n", __FUNCTION__, cpuid); while (lt_counter < limit) { - spl_t s = splsched(); - hw_lock_lock(<_hw_lock); + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); if (lt_counter < limit) { lt_counter++; local_counter++; } hw_lock_unlock(<_hw_lock); - splx(s); } lt_stress_local_counters[cpuid] = local_counter; @@ -301,9 +300,9 @@ lt_stress_hw_lock() } static void -lt_grab_hw_lock() +lt_grab_hw_lock() { - hw_lock_lock(<_hw_lock); + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); lt_counter++; lt_spin_a_little_bit(); hw_lock_unlock(<_hw_lock); @@ -312,7 +311,9 @@ lt_grab_hw_lock() static void lt_grab_hw_lock_with_try() { - while(0 == hw_lock_try(<_hw_lock)); + while (0 == hw_lock_try(<_hw_lock, LCK_GRP_NULL)) { + ; + } lt_counter++; lt_spin_a_little_bit(); hw_lock_unlock(<_hw_lock); @@ -321,15 +322,16 @@ lt_grab_hw_lock_with_try() static void lt_grab_hw_lock_with_to() { - while(0 == hw_lock_to(<_hw_lock, LockTimeOut)) + while (0 == hw_lock_to(<_hw_lock, LockTimeOut, LCK_GRP_NULL)) { mp_enable_preemption(); + } lt_counter++; lt_spin_a_little_bit(); hw_lock_unlock(<_hw_lock); } static void -lt_grab_spin_lock() +lt_grab_spin_lock() { lck_spin_lock(<_lck_spin_t); lt_counter++; @@ -338,9 +340,11 @@ lt_grab_spin_lock() } static void -lt_grab_spin_lock_with_try() +lt_grab_spin_lock_with_try() { - while(0 == lck_spin_try_lock(<_lck_spin_t)); + while (0 == lck_spin_try_lock(<_lck_spin_t)) { + ; + } lt_counter++; lt_spin_a_little_bit(); lck_spin_unlock(<_lck_spin_t); @@ -372,7 +376,7 @@ lt_trylock_hw_lock_with_to() lt_sleep_a_little_bit(); OSMemoryBarrier(); } - lt_thread_lock_success = hw_lock_to(<_hw_lock, 100); + lt_thread_lock_success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL); OSMemoryBarrier(); mp_enable_preemption(); } @@ -392,7 +396,7 @@ lt_trylock_spin_try_lock() static void lt_trylock_thread(void *arg, wait_result_t wres __unused) { - void (*func)(void) = (void(*)(void))arg; + void (*func)(void) = (void (*)(void))arg; func(); @@ -426,10 +430,10 @@ lt_wait_for_lock_test_threads() static kern_return_t lt_test_trylocks() { - boolean_t success; + boolean_t success; extern unsigned int real_ncpus; - - /* + + /* * First mtx try lock succeeds, second fails. */ success = lck_mtx_try_lock(<_mtx); @@ -447,7 +451,7 @@ lt_test_trylocks() lck_mtx_unlock(<_mtx); /* - * Two shared try locks on a previously unheld rwlock suceed, and a + * Two shared try locks on a previously unheld rwlock suceed, and a * subsequent exclusive attempt fails. */ success = lck_rw_try_lock_shared(<_rwlock); @@ -493,17 +497,17 @@ lt_test_trylocks() T_ASSERT_NULL(success, "After regular exclusive grab, exclusive trylock should not succeed"); lck_rw_done(<_rwlock); - /* + /* * First spin lock attempts succeed, second attempts fail. */ - success = hw_lock_try(<_hw_lock); + success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); T_ASSERT_NOTNULL(success, "First spin lock attempts should succeed"); - success = hw_lock_try(<_hw_lock); + success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); T_ASSERT_NULL(success, "Second attempt to spin lock should fail"); hw_lock_unlock(<_hw_lock); - - hw_lock_lock(<_hw_lock); - success = hw_lock_try(<_hw_lock); + + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); + success = hw_lock_try(<_hw_lock, LCK_GRP_NULL); T_ASSERT_NULL(success, "After taking spin lock, trylock attempt should fail"); hw_lock_unlock(<_hw_lock); @@ -513,7 +517,7 @@ lt_test_trylocks() lt_target_done_threads = 1; OSMemoryBarrier(); lt_start_trylock_thread(lt_trylock_hw_lock_with_to); - success = hw_lock_to(<_hw_lock, 100); + success = hw_lock_to(<_hw_lock, 100, LCK_GRP_NULL); T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed"); if (real_ncpus == 1) { mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ @@ -532,7 +536,7 @@ lt_test_trylocks() lt_target_done_threads = 1; OSMemoryBarrier(); lt_start_trylock_thread(lt_trylock_hw_lock_with_to); - hw_lock_lock(<_hw_lock); + hw_lock_lock(<_hw_lock, LCK_GRP_NULL); if (real_ncpus == 1) { mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */ } @@ -571,9 +575,9 @@ lt_test_trylocks() } static void -lt_thread(void *arg, wait_result_t wres __unused) +lt_thread(void *arg, wait_result_t wres __unused) { - void (*func)(void) = (void(*)(void)) arg; + void (*func)(void) = (void (*)(void))arg; uint32_t i; for (i = 0; i < LOCK_TEST_ITERATIONS; i++) { @@ -584,9 +588,9 @@ lt_thread(void *arg, wait_result_t wres __unused) } static void -lt_bound_thread(void *arg, wait_result_t wres __unused) +lt_bound_thread(void *arg, wait_result_t wres __unused) { - void (*func)(void) = (void(*)(void)) arg; + void (*func)(void) = (void (*)(void))arg; int cpuid = OSIncrementAtomic((volatile SInt32 *)<_cpu_bind_id); @@ -695,14 +699,14 @@ lt_test_locks() /* Uncontended shared rwlock */ /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) - T_LOG("Running uncontended shared rwlock test."); - lt_reset(); - lt_target_done_threads = 1; - lt_start_lock_thread(lt_grab_rw_shared); - lt_wait_for_lock_test_threads(); - T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); - T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); - */ + * T_LOG("Running uncontended shared rwlock test."); + * lt_reset(); + * lt_target_done_threads = 1; + * lt_start_lock_thread(lt_grab_rw_shared); + * lt_wait_for_lock_test_threads(); + * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); + * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); + */ /* Contended exclusive rwlock */ T_LOG("Running contended exclusive rwlock test."); @@ -717,29 +721,29 @@ lt_test_locks() /* One shared, two exclusive */ /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) - T_LOG("Running test with one shared and two exclusive rw lock threads."); - lt_reset(); - lt_target_done_threads = 3; - lt_start_lock_thread(lt_grab_rw_shared); - lt_start_lock_thread(lt_grab_rw_exclusive); - lt_start_lock_thread(lt_grab_rw_exclusive); - lt_wait_for_lock_test_threads(); - T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); - T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); - */ + * T_LOG("Running test with one shared and two exclusive rw lock threads."); + * lt_reset(); + * lt_target_done_threads = 3; + * lt_start_lock_thread(lt_grab_rw_shared); + * lt_start_lock_thread(lt_grab_rw_exclusive); + * lt_start_lock_thread(lt_grab_rw_exclusive); + * lt_wait_for_lock_test_threads(); + * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); + * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); + */ /* Four shared */ /* Disabled until lt_grab_rw_shared() is fixed (rdar://30685840) - T_LOG("Running test with four shared holders."); - lt_reset(); - lt_target_done_threads = 4; - lt_start_lock_thread(lt_grab_rw_shared); - lt_start_lock_thread(lt_grab_rw_shared); - lt_start_lock_thread(lt_grab_rw_shared); - lt_start_lock_thread(lt_grab_rw_shared); - lt_wait_for_lock_test_threads(); - T_EXPECT_LE_UINT(lt_max_holders, 4, NULL); - */ + * T_LOG("Running test with four shared holders."); + * lt_reset(); + * lt_target_done_threads = 4; + * lt_start_lock_thread(lt_grab_rw_shared); + * lt_start_lock_thread(lt_grab_rw_shared); + * lt_start_lock_thread(lt_grab_rw_shared); + * lt_start_lock_thread(lt_grab_rw_shared); + * lt_wait_for_lock_test_threads(); + * T_EXPECT_LE_UINT(lt_max_holders, 4, NULL); + */ /* Three doing upgrades and downgrades */ T_LOG("Running test with threads upgrading and downgrading."); @@ -764,14 +768,14 @@ lt_test_locks() /* Uncontended - shared trylocks */ /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) - T_LOG("Running test with single thread doing shared rwlock trylocks."); - lt_reset(); - lt_target_done_threads = 1; - lt_start_lock_thread(lt_grab_rw_shared_with_try); - lt_wait_for_lock_test_threads(); - T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); - T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); - */ + * T_LOG("Running test with single thread doing shared rwlock trylocks."); + * lt_reset(); + * lt_target_done_threads = 1; + * lt_start_lock_thread(lt_grab_rw_shared_with_try); + * lt_wait_for_lock_test_threads(); + * T_EXPECT_EQ_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); + * T_EXPECT_EQ_UINT(lt_max_holders, 1, NULL); + */ /* Three doing exclusive trylocks */ T_LOG("Running test with threads doing exclusive rwlock trylocks."); @@ -786,30 +790,30 @@ lt_test_locks() /* Three doing shared trylocks */ /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) - T_LOG("Running test with threads doing shared rwlock trylocks."); - lt_reset(); - lt_target_done_threads = 3; - lt_start_lock_thread(lt_grab_rw_shared_with_try); - lt_start_lock_thread(lt_grab_rw_shared_with_try); - lt_start_lock_thread(lt_grab_rw_shared_with_try); - lt_wait_for_lock_test_threads(); - T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); - T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); - */ + * T_LOG("Running test with threads doing shared rwlock trylocks."); + * lt_reset(); + * lt_target_done_threads = 3; + * lt_start_lock_thread(lt_grab_rw_shared_with_try); + * lt_start_lock_thread(lt_grab_rw_shared_with_try); + * lt_start_lock_thread(lt_grab_rw_shared_with_try); + * lt_wait_for_lock_test_threads(); + * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); + * T_EXPECT_LE_UINT(lt_max_holders, 3, NULL); + */ /* Three doing various trylocks */ /* Disabled until lt_grab_rw_shared_with_try() is fixed (rdar://30685840) - T_LOG("Running test with threads doing mixed rwlock trylocks."); - lt_reset(); - lt_target_done_threads = 4; - lt_start_lock_thread(lt_grab_rw_shared_with_try); - lt_start_lock_thread(lt_grab_rw_shared_with_try); - lt_start_lock_thread(lt_grab_rw_exclusive_with_try); - lt_start_lock_thread(lt_grab_rw_exclusive_with_try); - lt_wait_for_lock_test_threads(); - T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); - T_EXPECT_LE_UINT(lt_max_holders, 2, NULL); - */ + * T_LOG("Running test with threads doing mixed rwlock trylocks."); + * lt_reset(); + * lt_target_done_threads = 4; + * lt_start_lock_thread(lt_grab_rw_shared_with_try); + * lt_start_lock_thread(lt_grab_rw_shared_with_try); + * lt_start_lock_thread(lt_grab_rw_exclusive_with_try); + * lt_start_lock_thread(lt_grab_rw_exclusive_with_try); + * lt_wait_for_lock_test_threads(); + * T_EXPECT_LE_UINT(lt_counter, LOCK_TEST_ITERATIONS * lt_target_done_threads, NULL); + * T_EXPECT_LE_UINT(lt_max_holders, 2, NULL); + */ /* HW locks */ T_LOG("Running test with hw_lock_lock()"); @@ -888,68 +892,68 @@ lt_test_locks() return KERN_SUCCESS; } -#define MT_MAX_ARGS 8 -#define MT_INITIAL_VALUE 0xfeedbeef -#define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */ -#define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */ -#define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */ +#define MT_MAX_ARGS 8 +#define MT_INITIAL_VALUE 0xfeedbeef +#define MT_W_VAL (0x00000000feedbeefULL) /* Drop in zeros */ +#define MT_S_VAL (0xfffffffffeedbeefULL) /* High bit is 1, so sign-extends as negative */ +#define MT_L_VAL (((uint64_t)MT_INITIAL_VALUE) | (((uint64_t)MT_INITIAL_VALUE) << 32)) /* Two back-to-back */ typedef void (*sy_munge_t)(void*); #define MT_FUNC(x) #x, x struct munger_test { - const char *mt_name; - sy_munge_t mt_func; - uint32_t mt_in_words; - uint32_t mt_nout; - uint64_t mt_expected[MT_MAX_ARGS]; + const char *mt_name; + sy_munge_t mt_func; + uint32_t mt_in_words; + uint32_t mt_nout; + uint64_t mt_expected[MT_MAX_ARGS]; } munger_tests[] = { - {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}}, - {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}}, - {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}}, - {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, - {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}}, - {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}}, - {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, - {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, - {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}} + {MT_FUNC(munge_w), 1, 1, {MT_W_VAL}}, + {MT_FUNC(munge_ww), 2, 2, {MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_www), 3, 3, {MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwww), 4, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwww), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwww), 6, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwwww), 7, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwwwww), 8, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wl), 3, 2, {MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwl), 4, 3, {MT_W_VAL, MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwlll), 8, 5, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wlw), 4, 3, {MT_W_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wlwwwll), 10, 7, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wlwwwllw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wlwwlwlw), 11, 8, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wll), 5, 3, {MT_W_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wlll), 7, 4, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wllwwll), 11, 7, {MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwwlw), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwlww), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwl), 5, 4, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwwwlw), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwl), 6, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwwwwl), 7, 6, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwwwwlww), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwwllw), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwwlll), 11, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwwwwwl), 8, 7, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwwwwwlw), 9, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wwwwwwll), 10, 8, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wsw), 3, 3, {MT_W_VAL, MT_S_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wws), 3, 3, {MT_W_VAL, MT_W_VAL, MT_S_VAL}}, + {MT_FUNC(munge_wwwsw), 5, 5, {MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_S_VAL, MT_W_VAL}}, + {MT_FUNC(munge_llllll), 12, 6, {MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL, MT_L_VAL}}, + {MT_FUNC(munge_l), 2, 1, {MT_L_VAL}}, + {MT_FUNC(munge_lw), 3, 2, {MT_L_VAL, MT_W_VAL}}, + {MT_FUNC(munge_lwww), 5, 4, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_lwwwwwww), 9, 8, {MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL}}, + {MT_FUNC(munge_wlwwwl), 8, 6, {MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}}, + {MT_FUNC(munge_wwlwwwl), 9, 7, {MT_W_VAL, MT_W_VAL, MT_L_VAL, MT_W_VAL, MT_W_VAL, MT_W_VAL, MT_L_VAL}} }; #define MT_TEST_COUNT (sizeof(munger_tests) / sizeof(struct munger_test)) static void -mt_reset(uint32_t in_words, size_t total_size, uint32_t *data) +mt_reset(uint32_t in_words, size_t total_size, uint32_t *data) { uint32_t i; @@ -990,29 +994,28 @@ mt_test_mungers() } /* Exception Callback Test */ -static ex_cb_action_t excb_test_action( - ex_cb_class_t cb_class, - void *refcon, - const ex_cb_state_t *state +static ex_cb_action_t +excb_test_action( + ex_cb_class_t cb_class, + void *refcon, + const ex_cb_state_t *state ) { ex_cb_state_t *context = (ex_cb_state_t *)refcon; - if ((NULL == refcon) || (NULL == state)) - { + if ((NULL == refcon) || (NULL == state)) { return EXCB_ACTION_TEST_FAIL; } context->far = state->far; - switch (cb_class) - { - case EXCB_CLASS_TEST1: - return EXCB_ACTION_RERUN; - case EXCB_CLASS_TEST2: - return EXCB_ACTION_NONE; - default: - return EXCB_ACTION_TEST_FAIL; + switch (cb_class) { + case EXCB_CLASS_TEST1: + return EXCB_ACTION_RERUN; + case EXCB_CLASS_TEST2: + return EXCB_ACTION_NONE; + default: + return EXCB_ACTION_TEST_FAIL; } } @@ -1028,7 +1031,7 @@ ex_cb_test() ex_cb_action_t action; T_LOG("Testing Exception Callback."); - + T_LOG("Running registration test."); kr = ex_cb_register(EXCB_CLASS_TEST1, &excb_test_action, &test_context_1); @@ -1072,7 +1075,7 @@ arm64_pan_test() pan_fault_value = 0xDE; // convert priv_addr to one that is accessible from user mode pan_test_addr = priv_addr + _COMM_HIGH_PAGE64_BASE_ADDRESS - - _COMM_PAGE_START_ADDRESS; + _COMM_PAGE_START_ADDRESS; // Below should trigger a PAN exception as pan_test_addr is accessible // in user mode @@ -1122,4 +1125,3 @@ arm64_munger_test() return 0; } - diff --git a/osfmk/arm64/proc_reg.h b/osfmk/arm64/proc_reg.h index 914cce974..ee13e1844 100644 --- a/osfmk/arm64/proc_reg.h +++ b/osfmk/arm64/proc_reg.h @@ -105,93 +105,93 @@ * M Mode field */ -#define PSR64_NZCV_SHIFT 28 -#define PSR64_NZCV_MASK (1 << PSR64_NZCV_SHIFT) +#define PSR64_NZCV_SHIFT 28 +#define PSR64_NZCV_MASK (1 << PSR64_NZCV_SHIFT) -#define PSR64_N_SHIFT 31 -#define PSR64_N (1 << PSR64_N_SHIFT) +#define PSR64_N_SHIFT 31 +#define PSR64_N (1 << PSR64_N_SHIFT) -#define PSR64_Z_SHIFT 30 -#define PSR64_Z (1 << PSR64_Z_SHIFT) +#define PSR64_Z_SHIFT 30 +#define PSR64_Z (1 << PSR64_Z_SHIFT) -#define PSR64_C_SHIFT 29 -#define PSR64_C (1 << PSR64_C_SHIFT) +#define PSR64_C_SHIFT 29 +#define PSR64_C (1 << PSR64_C_SHIFT) -#define PSR64_V_SHIFT 28 -#define PSR64_V (1 << PSR64_V_SHIFT) +#define PSR64_V_SHIFT 28 +#define PSR64_V (1 << PSR64_V_SHIFT) -#define PSR64_PAN_SHIFT 22 -#define PSR64_PAN (1 << PSR64_PAN_SHIFT) +#define PSR64_PAN_SHIFT 22 +#define PSR64_PAN (1 << PSR64_PAN_SHIFT) -#define PSR64_SS_SHIFT 21 -#define PSR64_SS (1 << PSR64_SS_SHIFT) +#define PSR64_SS_SHIFT 21 +#define PSR64_SS (1 << PSR64_SS_SHIFT) -#define PSR64_IL_SHIFT 20 -#define PSR64_IL (1 << PSR64_IL_SHIFT) +#define PSR64_IL_SHIFT 20 +#define PSR64_IL (1 << PSR64_IL_SHIFT) /* * msr DAIF, Xn and mrs Xn, DAIF transfer into * and out of bits 9:6 */ -#define DAIF_DEBUG_SHIFT 9 -#define DAIF_DEBUGF (1 << DAIF_DEBUG_SHIFT) +#define DAIF_DEBUG_SHIFT 9 +#define DAIF_DEBUGF (1 << DAIF_DEBUG_SHIFT) -#define DAIF_ASYNC_SHIFT 8 -#define DAIF_ASYNCF (1 << DAIF_ASYNC_SHIFT) +#define DAIF_ASYNC_SHIFT 8 +#define DAIF_ASYNCF (1 << DAIF_ASYNC_SHIFT) -#define DAIF_IRQF_SHIFT 7 -#define DAIF_IRQF (1 << DAIF_IRQF_SHIFT) +#define DAIF_IRQF_SHIFT 7 +#define DAIF_IRQF (1 << DAIF_IRQF_SHIFT) -#define DAIF_FIQF_SHIFT 6 -#define DAIF_FIQF (1 << DAIF_FIQF_SHIFT) +#define DAIF_FIQF_SHIFT 6 +#define DAIF_FIQF (1 << DAIF_FIQF_SHIFT) -#define DAIF_ALL (DAIF_DEBUGF | DAIF_ASYNCF | DAIF_IRQF | DAIF_FIQF) -#define DAIF_STANDARD_DISABLE (DAIF_ASYNCF | DAIF_IRQF | DAIF_FIQF) +#define DAIF_ALL (DAIF_DEBUGF | DAIF_ASYNCF | DAIF_IRQF | DAIF_FIQF) +#define DAIF_STANDARD_DISABLE (DAIF_ASYNCF | DAIF_IRQF | DAIF_FIQF) -#define SPSR_INTERRUPTS_ENABLED(x) (!(x & DAIF_FIQF)) +#define SPSR_INTERRUPTS_ENABLED(x) (!(x & DAIF_FIQF)) /* * msr DAIFSet, Xn, and msr DAIFClr, Xn transfer * from bits 3:0. */ -#define DAIFSC_DEBUGF (1 << 3) -#define DAIFSC_ASYNCF (1 << 2) -#define DAIFSC_IRQF (1 << 1) -#define DAIFSC_FIQF (1 << 0) -#define DAIFSC_ALL (DAIFSC_DEBUGF | DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) -#define DAIFSC_STANDARD_DISABLE (DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) +#define DAIFSC_DEBUGF (1 << 3) +#define DAIFSC_ASYNCF (1 << 2) +#define DAIFSC_IRQF (1 << 1) +#define DAIFSC_FIQF (1 << 0) +#define DAIFSC_ALL (DAIFSC_DEBUGF | DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) +#define DAIFSC_STANDARD_DISABLE (DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) /* * ARM64_TODO: unify with ARM? */ -#define PSR64_CF 0x20000000 /* Carry/Borrow/Extend */ +#define PSR64_CF 0x20000000 /* Carry/Borrow/Extend */ -#define PSR64_MODE_MASK 0x1F +#define PSR64_MODE_MASK 0x1F -#define PSR64_MODE_USER32_THUMB 0x20 +#define PSR64_MODE_USER32_THUMB 0x20 -#define PSR64_MODE_RW_SHIFT 4 -#define PSR64_MODE_RW_64 0 -#define PSR64_MODE_RW_32 (0x1 << PSR64_MODE_RW_SHIFT) +#define PSR64_MODE_RW_SHIFT 4 +#define PSR64_MODE_RW_64 0 +#define PSR64_MODE_RW_32 (0x1 << PSR64_MODE_RW_SHIFT) -#define PSR64_MODE_EL_SHIFT 2 -#define PSR64_MODE_EL_MASK (0x3 << PSR64_MODE_EL_SHIFT) -#define PSR64_MODE_EL3 (0x3 << PSR64_MODE_EL_SHIFT) -#define PSR64_MODE_EL1 (0x1 << PSR64_MODE_EL_SHIFT) -#define PSR64_MODE_EL0 0 +#define PSR64_MODE_EL_SHIFT 2 +#define PSR64_MODE_EL_MASK (0x3 << PSR64_MODE_EL_SHIFT) +#define PSR64_MODE_EL3 (0x3 << PSR64_MODE_EL_SHIFT) +#define PSR64_MODE_EL1 (0x1 << PSR64_MODE_EL_SHIFT) +#define PSR64_MODE_EL0 0 -#define PSR64_MODE_SPX 0x1 -#define PSR64_MODE_SP0 0 +#define PSR64_MODE_SPX 0x1 +#define PSR64_MODE_SP0 0 -#define PSR64_USER32_DEFAULT (PSR64_MODE_RW_32 | PSR64_MODE_EL0 | PSR64_MODE_SP0) -#define PSR64_USER64_DEFAULT (PSR64_MODE_RW_64 | PSR64_MODE_EL0 | PSR64_MODE_SP0) -#define PSR64_KERNEL_DEFAULT (DAIF_STANDARD_DISABLE | PSR64_MODE_RW_64 | PSR64_MODE_EL1 | PSR64_MODE_SP0) +#define PSR64_USER32_DEFAULT (PSR64_MODE_RW_32 | PSR64_MODE_EL0 | PSR64_MODE_SP0) +#define PSR64_USER64_DEFAULT (PSR64_MODE_RW_64 | PSR64_MODE_EL0 | PSR64_MODE_SP0) +#define PSR64_KERNEL_DEFAULT (DAIF_STANDARD_DISABLE | PSR64_MODE_RW_64 | PSR64_MODE_EL1 | PSR64_MODE_SP0) -#define PSR64_IS_KERNEL(x) ((x & PSR64_MODE_EL_MASK) == PSR64_MODE_EL1) -#define PSR64_IS_USER(x) ((x & PSR64_MODE_EL_MASK) == PSR64_MODE_EL0) +#define PSR64_IS_KERNEL(x) ((x & PSR64_MODE_EL_MASK) == PSR64_MODE_EL1) +#define PSR64_IS_USER(x) ((x & PSR64_MODE_EL_MASK) == PSR64_MODE_EL0) -#define PSR64_IS_USER32(x) (PSR64_IS_USER(x) && (x & PSR64_MODE_RW_32)) -#define PSR64_IS_USER64(x) (PSR64_IS_USER(x) && !(x & PSR64_MODE_RW_32)) +#define PSR64_IS_USER32(x) (PSR64_IS_USER(x) && (x & PSR64_MODE_RW_32)) +#define PSR64_IS_USER64(x) (PSR64_IS_USER(x) && !(x & PSR64_MODE_RW_32)) @@ -199,82 +199,82 @@ * System Control Register (SCTLR) */ -#define SCTLR_RESERVED ((3 << 28) | (1 << 22) | (1 << 20) | (1 << 11)) +#define SCTLR_RESERVED ((3 << 28) | (1 << 22) | (1 << 20) | (1 << 11)) // 26 UCI User Cache Instructions -#define SCTLR_UCI_ENABLED (1 << 26) +#define SCTLR_UCI_ENABLED (1 << 26) // 25 EE Exception Endianness -#define SCTLR_EE_BIG_ENDIAN (1 << 25) +#define SCTLR_EE_BIG_ENDIAN (1 << 25) // 24 E0E EL0 Endianness -#define SCTLR_E0E_BIG_ENDIAN (1 << 24) +#define SCTLR_E0E_BIG_ENDIAN (1 << 24) // 23 SPAN Set PAN -#define SCTLR_PAN_UNCHANGED (1 << 23) +#define SCTLR_PAN_UNCHANGED (1 << 23) // 22 RES1 1 // 21 RES0 0 // 20 RES1 1 // 19 WXN Writeable implies eXecute Never -#define SCTLR_WXN_ENABLED (1 << 19) +#define SCTLR_WXN_ENABLED (1 << 19) // 18 nTWE Not trap WFE from EL0 -#define SCTLR_nTWE_WFE_ENABLED (1 << 18) +#define SCTLR_nTWE_WFE_ENABLED (1 << 18) // 17 RES0 0 // 16 nTWI Not trap WFI from EL0 -#define SCTRL_nTWI_WFI_ENABLED (1 << 16) +#define SCTRL_nTWI_WFI_ENABLED (1 << 16) // 15 UCT User Cache Type register (CTR_EL0) -#define SCTLR_UCT_ENABLED (1 << 15) +#define SCTLR_UCT_ENABLED (1 << 15) // 14 DZE User Data Cache Zero (DC ZVA) -#define SCTLR_DZE_ENABLED (1 << 14) +#define SCTLR_DZE_ENABLED (1 << 14) // 13 PACDB_ENABLED AddPACDB and AuthDB functions enabled -#define SCTLR_PACDB_ENABLED (1 << 13) +#define SCTLR_PACDB_ENABLED (1 << 13) // 12 I Instruction cache enable -#define SCTLR_I_ENABLED (1 << 12) +#define SCTLR_I_ENABLED (1 << 12) // 11 RES1 1 // 10 RES0 0 // 9 UMA User Mask Access -#define SCTLR_UMA_ENABLED (1 << 9) +#define SCTLR_UMA_ENABLED (1 << 9) // 8 SED SETEND Disable -#define SCTLR_SED_DISABLED (1 << 8) +#define SCTLR_SED_DISABLED (1 << 8) // 7 ITD IT Disable -#define SCTLR_ITD_DISABLED (1 << 7) +#define SCTLR_ITD_DISABLED (1 << 7) // 6 RES0 0 // 5 CP15BEN CP15 Barrier ENable -#define SCTLR_CP15BEN_ENABLED (1 << 5) +#define SCTLR_CP15BEN_ENABLED (1 << 5) // 4 SA0 Stack Alignment check for EL0 -#define SCTLR_SA0_ENABLED (1 << 4) +#define SCTLR_SA0_ENABLED (1 << 4) // 3 SA Stack Alignment check -#define SCTLR_SA_ENABLED (1 << 3) +#define SCTLR_SA_ENABLED (1 << 3) // 2 C Cache enable -#define SCTLR_C_ENABLED (1 << 2) +#define SCTLR_C_ENABLED (1 << 2) // 1 A Alignment check -#define SCTLR_A_ENABLED (1 << 1) +#define SCTLR_A_ENABLED (1 << 1) // 0 M MMU enable -#define SCTLR_M_ENABLED (1 << 0) +#define SCTLR_M_ENABLED (1 << 0) -#define SCTLR_EL1_DEFAULT (SCTLR_RESERVED | SCTLR_UCI_ENABLED | SCTLR_nTWE_WFE_ENABLED | SCTLR_DZE_ENABLED | \ - SCTLR_I_ENABLED | SCTLR_SED_DISABLED | SCTLR_CP15BEN_ENABLED | \ - SCTLR_SA0_ENABLED | SCTLR_SA_ENABLED | SCTLR_C_ENABLED | SCTLR_M_ENABLED) +#define SCTLR_EL1_DEFAULT (SCTLR_RESERVED | SCTLR_UCI_ENABLED | SCTLR_nTWE_WFE_ENABLED | SCTLR_DZE_ENABLED | \ + SCTLR_I_ENABLED | SCTLR_SED_DISABLED | SCTLR_CP15BEN_ENABLED | \ + SCTLR_SA0_ENABLED | SCTLR_SA_ENABLED | SCTLR_C_ENABLED | SCTLR_M_ENABLED) /* * Coprocessor Access Control Register (CPACR) @@ -288,12 +288,12 @@ * TTA Trace trap * FPEN Floating point enable */ -#define CPACR_TTA_SHIFT 28 -#define CPACR_TTA (1 << CPACR_TTA_SHIFT) +#define CPACR_TTA_SHIFT 28 +#define CPACR_TTA (1 << CPACR_TTA_SHIFT) -#define CPACR_FPEN_SHIFT 20 -#define CPACR_FPEN_EL0_TRAP (0x1 << CPACR_FPEN_SHIFT) -#define CPACR_FPEN_ENABLE (0x3 << CPACR_FPEN_SHIFT) +#define CPACR_FPEN_SHIFT 20 +#define CPACR_FPEN_EL0_TRAP (0x1 << CPACR_FPEN_SHIFT) +#define CPACR_FPEN_ENABLE (0x3 << CPACR_FPEN_SHIFT) /* * FPSR: Floating Point Status Register @@ -304,37 +304,37 @@ * +--+--+--+--+--+-------------------+---+--+---+---+---+---+---+ */ -#define FPSR_N_SHIFT 31 -#define FPSR_Z_SHIFT 30 -#define FPSR_C_SHIFT 29 -#define FPSR_V_SHIFT 28 -#define FPSR_QC_SHIFT 27 -#define FPSR_IDC_SHIFT 7 -#define FPSR_IXC_SHIFT 4 -#define FPSR_UFC_SHIFT 3 -#define FPSR_OFC_SHIFT 2 -#define FPSR_DZC_SHIFT 1 -#define FPSR_IOC_SHIFT 0 -#define FPSR_N (1 << FPSR_N_SHIFT) -#define FPSR_Z (1 << FPSR_Z_SHIFT) -#define FPSR_C (1 << FPSR_C_SHIFT) -#define FPSR_V (1 << FPSR_V_SHIFT) -#define FPSR_QC (1 << FPSR_QC_SHIFT) -#define FPSR_IDC (1 << FPSR_IDC_SHIFT) -#define FPSR_IXC (1 << FPSR_IXC_SHIFT) -#define FPSR_UFC (1 << FPSR_UFC_SHIFT) -#define FPSR_OFC (1 << FPSR_OFC_SHIFT) -#define FPSR_DZC (1 << FPSR_DZC_SHIFT) -#define FPSR_IOC (1 << FPSR_IOC_SHIFT) +#define FPSR_N_SHIFT 31 +#define FPSR_Z_SHIFT 30 +#define FPSR_C_SHIFT 29 +#define FPSR_V_SHIFT 28 +#define FPSR_QC_SHIFT 27 +#define FPSR_IDC_SHIFT 7 +#define FPSR_IXC_SHIFT 4 +#define FPSR_UFC_SHIFT 3 +#define FPSR_OFC_SHIFT 2 +#define FPSR_DZC_SHIFT 1 +#define FPSR_IOC_SHIFT 0 +#define FPSR_N (1 << FPSR_N_SHIFT) +#define FPSR_Z (1 << FPSR_Z_SHIFT) +#define FPSR_C (1 << FPSR_C_SHIFT) +#define FPSR_V (1 << FPSR_V_SHIFT) +#define FPSR_QC (1 << FPSR_QC_SHIFT) +#define FPSR_IDC (1 << FPSR_IDC_SHIFT) +#define FPSR_IXC (1 << FPSR_IXC_SHIFT) +#define FPSR_UFC (1 << FPSR_UFC_SHIFT) +#define FPSR_OFC (1 << FPSR_OFC_SHIFT) +#define FPSR_DZC (1 << FPSR_DZC_SHIFT) +#define FPSR_IOC (1 << FPSR_IOC_SHIFT) /* * A mask for all for all of the bits that are not RAZ for FPSR; this * is primarily for converting between a 32-bit view of NEON state * (FPSCR) and a 64-bit view of NEON state (FPSR, FPCR). */ -#define FPSR_MASK (FPSR_N | FPSR_Z | FPSR_C | FPSR_V | FPSR_QC | \ - FPSR_IDC | FPSR_IXC | FPSR_UFC | FPSR_OFC | \ - FPSR_DZC | FPSR_IOC) +#define FPSR_MASK (FPSR_N | FPSR_Z | FPSR_C | FPSR_V | FPSR_QC | \ + FPSR_IDC | FPSR_IXC | FPSR_UFC | FPSR_OFC | \ + FPSR_DZC | FPSR_IOC) /* * FPCR: Floating Point Control Register @@ -345,41 +345,41 @@ * +-----+---+--+--+-----+------+--+---+---+--+---+---+---+---+---+--------+ */ -#define FPCR_AHP_SHIFT 26 -#define FPCR_DN_SHIFT 25 -#define FPCR_FZ_SHIFT 24 -#define FPCR_RMODE_SHIFT 22 -#define FPCR_STRIDE_SHIFT 20 -#define FPCR_LEN_SHIFT 16 -#define FPCR_IDE_SHIFT 15 -#define FPCR_IXE_SHIFT 12 -#define FPCR_UFE_SHIFT 11 -#define FPCR_OFE_SHIFT 10 -#define FPCR_DZE_SHIFT 9 -#define FPCR_IOE_SHIFT 8 -#define FPCR_AHP (1 << FPCR_AHP_SHIFT) -#define FPCR_DN (1 << FPCR_DN_SHIFT) -#define FPCR_FZ (1 << FPCR_FZ_SHIFT) -#define FPCR_RMODE (0x3 << FPCR_RMODE_SHIFT) -#define FPCR_STRIDE (0x3 << FPCR_STRIDE_SHIFT) -#define FPCR_LEN (0x7 << FPCR_LEN_SHIFT) -#define FPCR_IDE (1 << FPCR_IDE_SHIFT) -#define FPCR_IXE (1 << FPCR_IXE_SHIFT) -#define FPCR_UFE (1 << FPCR_UFE_SHIFT) -#define FPCR_OFE (1 << FPCR_OFE_SHIFT) -#define FPCR_DZE (1 << FPCR_DZE_SHIFT) -#define FPCR_IOE (1 << FPCR_IOE_SHIFT) -#define FPCR_DEFAULT (FPCR_DN) -#define FPCR_DEFAULT_32 (FPCR_DN|FPCR_FZ) +#define FPCR_AHP_SHIFT 26 +#define FPCR_DN_SHIFT 25 +#define FPCR_FZ_SHIFT 24 +#define FPCR_RMODE_SHIFT 22 +#define FPCR_STRIDE_SHIFT 20 +#define FPCR_LEN_SHIFT 16 +#define FPCR_IDE_SHIFT 15 +#define FPCR_IXE_SHIFT 12 +#define FPCR_UFE_SHIFT 11 +#define FPCR_OFE_SHIFT 10 +#define FPCR_DZE_SHIFT 9 +#define FPCR_IOE_SHIFT 8 +#define FPCR_AHP (1 << FPCR_AHP_SHIFT) +#define FPCR_DN (1 << FPCR_DN_SHIFT) +#define FPCR_FZ (1 << FPCR_FZ_SHIFT) +#define FPCR_RMODE (0x3 << FPCR_RMODE_SHIFT) +#define FPCR_STRIDE (0x3 << FPCR_STRIDE_SHIFT) +#define FPCR_LEN (0x7 << FPCR_LEN_SHIFT) +#define FPCR_IDE (1 << FPCR_IDE_SHIFT) +#define FPCR_IXE (1 << FPCR_IXE_SHIFT) +#define FPCR_UFE (1 << FPCR_UFE_SHIFT) +#define FPCR_OFE (1 << FPCR_OFE_SHIFT) +#define FPCR_DZE (1 << FPCR_DZE_SHIFT) +#define FPCR_IOE (1 << FPCR_IOE_SHIFT) +#define FPCR_DEFAULT (FPCR_DN) +#define FPCR_DEFAULT_32 (FPCR_DN|FPCR_FZ) /* * A mask for all for all of the bits that are not RAZ for FPCR; this * is primarily for converting between a 32-bit view of NEON state * (FPSCR) and a 64-bit view of NEON state (FPSR, FPCR). */ -#define FPCR_MASK (FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE | \ - FPCR_STRIDE | FPCR_LEN | FPCR_IDE | FPCR_IXE | \ - FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE) +#define FPCR_MASK (FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE | \ + FPCR_STRIDE | FPCR_LEN | FPCR_IDE | FPCR_IXE | \ + FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE) /* * Translation Control Register (TCR) @@ -392,7 +392,7 @@ * +------+----+----+--+-+-----+-+---+-----+-----+-----+----+--+------+-+---+-----+-----+-----+----+-+----+ * * Current (with 16KB granule support): - * + * * 63 39 38 37 36 34 32 30 29 28 27 26 25 24 23 22 21 16 14 13 12 11 10 9 8 7 5 0 * +------+----+----+--+-+-----+-----+-----+-----+-----+----+--+------+-----+-----+-----+-----+----+-+----+ * | zero |TBI1|TBI0|AS|z| IPS | TG1 | SH1 |ORGN1|IRGN1|EPD1|A1| T1SZ | TG0 | SH0 |ORGN0|IRGN0|EPD0|z|T0SZ| @@ -416,84 +416,84 @@ * T0SZ Virtual address size for TTBR0 */ -#define TCR_T0SZ_SHIFT 0ULL -#define TCR_TSZ_BITS 6ULL -#define TCR_TSZ_MASK ((1ULL << TCR_TSZ_BITS) - 1ULL) +#define TCR_T0SZ_SHIFT 0ULL +#define TCR_TSZ_BITS 6ULL +#define TCR_TSZ_MASK ((1ULL << TCR_TSZ_BITS) - 1ULL) -#define TCR_IRGN0_SHIFT 8ULL -#define TCR_IRGN0_DISABLED (0ULL << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_WRITEBACK (1ULL << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_WRITETHRU (2ULL << TCR_IRGN0_SHIFT) -#define TCR_IRGN0_WRITEBACKNO (3ULL << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_SHIFT 8ULL +#define TCR_IRGN0_DISABLED (0ULL << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_WRITEBACK (1ULL << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_WRITETHRU (2ULL << TCR_IRGN0_SHIFT) +#define TCR_IRGN0_WRITEBACKNO (3ULL << TCR_IRGN0_SHIFT) -#define TCR_ORGN0_SHIFT 10ULL -#define TCR_ORGN0_DISABLED (0ULL << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_WRITEBACK (1ULL << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_WRITETHRU (2ULL << TCR_ORGN0_SHIFT) -#define TCR_ORGN0_WRITEBACKNO (3ULL << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_SHIFT 10ULL +#define TCR_ORGN0_DISABLED (0ULL << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_WRITEBACK (1ULL << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_WRITETHRU (2ULL << TCR_ORGN0_SHIFT) +#define TCR_ORGN0_WRITEBACKNO (3ULL << TCR_ORGN0_SHIFT) -#define TCR_SH0_SHIFT 12ULL -#define TCR_SH0_NONE (0ULL << TCR_SH0_SHIFT) -#define TCR_SH0_OUTER (2ULL << TCR_SH0_SHIFT) -#define TCR_SH0_INNER (3ULL << TCR_SH0_SHIFT) +#define TCR_SH0_SHIFT 12ULL +#define TCR_SH0_NONE (0ULL << TCR_SH0_SHIFT) +#define TCR_SH0_OUTER (2ULL << TCR_SH0_SHIFT) +#define TCR_SH0_INNER (3ULL << TCR_SH0_SHIFT) -#define TCR_TG0_GRANULE_SHIFT (14ULL) +#define TCR_TG0_GRANULE_SHIFT (14ULL) -#define TCR_TG0_GRANULE_4KB (0ULL << TCR_TG0_GRANULE_SHIFT) -#define TCR_TG0_GRANULE_64KB (1ULL << TCR_TG0_GRANULE_SHIFT) -#define TCR_TG0_GRANULE_16KB (2ULL << TCR_TG0_GRANULE_SHIFT) +#define TCR_TG0_GRANULE_4KB (0ULL << TCR_TG0_GRANULE_SHIFT) +#define TCR_TG0_GRANULE_64KB (1ULL << TCR_TG0_GRANULE_SHIFT) +#define TCR_TG0_GRANULE_16KB (2ULL << TCR_TG0_GRANULE_SHIFT) #if __ARM_16K_PG__ -#define TCR_TG0_GRANULE_SIZE (TCR_TG0_GRANULE_16KB) +#define TCR_TG0_GRANULE_SIZE (TCR_TG0_GRANULE_16KB) #else -#define TCR_TG0_GRANULE_SIZE (TCR_TG0_GRANULE_4KB) +#define TCR_TG0_GRANULE_SIZE (TCR_TG0_GRANULE_4KB) #endif -#define TCR_T1SZ_SHIFT 16ULL +#define TCR_T1SZ_SHIFT 16ULL -#define TCR_A1_ASID1 (1ULL << 22ULL) -#define TCR_EPD1_TTBR1_DISABLED (1ULL << 23ULL) +#define TCR_A1_ASID1 (1ULL << 22ULL) +#define TCR_EPD1_TTBR1_DISABLED (1ULL << 23ULL) -#define TCR_IRGN1_SHIFT 24ULL -#define TCR_IRGN1_DISABLED (0ULL << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_WRITEBACK (1ULL << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_WRITETHRU (2ULL << TCR_IRGN1_SHIFT) -#define TCR_IRGN1_WRITEBACKNO (3ULL << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_SHIFT 24ULL +#define TCR_IRGN1_DISABLED (0ULL << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_WRITEBACK (1ULL << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_WRITETHRU (2ULL << TCR_IRGN1_SHIFT) +#define TCR_IRGN1_WRITEBACKNO (3ULL << TCR_IRGN1_SHIFT) -#define TCR_ORGN1_SHIFT 26ULL -#define TCR_ORGN1_DISABLED (0ULL << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_WRITEBACK (1ULL << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_WRITETHRU (2ULL << TCR_ORGN1_SHIFT) -#define TCR_ORGN1_WRITEBACKNO (3ULL << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_SHIFT 26ULL +#define TCR_ORGN1_DISABLED (0ULL << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_WRITEBACK (1ULL << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_WRITETHRU (2ULL << TCR_ORGN1_SHIFT) +#define TCR_ORGN1_WRITEBACKNO (3ULL << TCR_ORGN1_SHIFT) -#define TCR_SH1_SHIFT 28ULL -#define TCR_SH1_NONE (0ULL << TCR_SH1_SHIFT) -#define TCR_SH1_OUTER (2ULL << TCR_SH1_SHIFT) -#define TCR_SH1_INNER (3ULL << TCR_SH1_SHIFT) +#define TCR_SH1_SHIFT 28ULL +#define TCR_SH1_NONE (0ULL << TCR_SH1_SHIFT) +#define TCR_SH1_OUTER (2ULL << TCR_SH1_SHIFT) +#define TCR_SH1_INNER (3ULL << TCR_SH1_SHIFT) -#define TCR_TG1_GRANULE_SHIFT 30ULL +#define TCR_TG1_GRANULE_SHIFT 30ULL -#define TCR_TG1_GRANULE_16KB (1ULL << TCR_TG1_GRANULE_SHIFT) -#define TCR_TG1_GRANULE_4KB (2ULL << TCR_TG1_GRANULE_SHIFT) -#define TCR_TG1_GRANULE_64KB (3ULL << TCR_TG1_GRANULE_SHIFT) +#define TCR_TG1_GRANULE_16KB (1ULL << TCR_TG1_GRANULE_SHIFT) +#define TCR_TG1_GRANULE_4KB (2ULL << TCR_TG1_GRANULE_SHIFT) +#define TCR_TG1_GRANULE_64KB (3ULL << TCR_TG1_GRANULE_SHIFT) #if __ARM_16K_PG__ -#define TCR_TG1_GRANULE_SIZE (TCR_TG1_GRANULE_16KB) +#define TCR_TG1_GRANULE_SIZE (TCR_TG1_GRANULE_16KB) #else -#define TCR_TG1_GRANULE_SIZE (TCR_TG1_GRANULE_4KB) +#define TCR_TG1_GRANULE_SIZE (TCR_TG1_GRANULE_4KB) #endif -#define TCR_IPS_SHIFT 32ULL -#define TCR_IPS_32BITS (0ULL << TCR_IPS_SHIFT) -#define TCR_IPS_36BITS (1ULL << TCR_IPS_SHIFT) -#define TCR_IPS_40BITS (2ULL << TCR_IPS_SHIFT) -#define TCR_IPS_42BITS (3ULL << TCR_IPS_SHIFT) -#define TCR_IPS_44BITS (4ULL << TCR_IPS_SHIFT) -#define TCR_IPS_48BITS (5ULL << TCR_IPS_SHIFT) +#define TCR_IPS_SHIFT 32ULL +#define TCR_IPS_32BITS (0ULL << TCR_IPS_SHIFT) +#define TCR_IPS_36BITS (1ULL << TCR_IPS_SHIFT) +#define TCR_IPS_40BITS (2ULL << TCR_IPS_SHIFT) +#define TCR_IPS_42BITS (3ULL << TCR_IPS_SHIFT) +#define TCR_IPS_44BITS (4ULL << TCR_IPS_SHIFT) +#define TCR_IPS_48BITS (5ULL << TCR_IPS_SHIFT) -#define TCR_AS_16BIT_ASID (1ULL << 36) -#define TCR_TBI0_TOPBYTE_IGNORED (1ULL << 37) -#define TCR_TBI1_TOPBYTE_IGNORED (1ULL << 38) +#define TCR_AS_16BIT_ASID (1ULL << 36) +#define TCR_TBI0_TOPBYTE_IGNORED (1ULL << 37) +#define TCR_TBI1_TOPBYTE_IGNORED (1ULL << 38) /* * Multiprocessor Affinity Register (MPIDR_EL1) @@ -501,7 +501,7 @@ * +64-----------------------------31+30+29-25+24+23-16+15-8+7--0+ * |000000000000000000000000000000001| U|00000|MT| Aff2|Aff1|Aff0| * +---------------------------------+--+-----+--+-----+----+----+ - * + * * where * U Uniprocessor * MT Multi-threading at lowest affinity level @@ -509,11 +509,11 @@ * Aff1 Cluster ID * Aff0 CPU ID */ -#define MPIDR_AFF0_MASK 0xFF -#define MPIDR_AFF1_MASK 0xFF00 -#define MPIDR_AFF1_SHIFT 8 -#define MPIDR_AFF2_MASK 0xFF0000 -#define MPIDR_AFF2_SHIFT 16 +#define MPIDR_AFF0_MASK 0xFF +#define MPIDR_AFF1_MASK 0xFF00 +#define MPIDR_AFF1_SHIFT 8 +#define MPIDR_AFF2_MASK 0xFF0000 +#define MPIDR_AFF2_SHIFT 16 /* * We currently use a 3 level page table (rather than the full 4 @@ -545,53 +545,53 @@ #endif /* __ARM_KERNEL_PROTECT__ */ #ifdef __ARM_16K_PG__ #if __ARM64_TWO_LEVEL_PMAP__ -#define T0SZ_BOOT 28ULL +#define T0SZ_BOOT 28ULL #elif __ARM64_PMAP_SUBPAGE_L1__ -#define T0SZ_BOOT 25ULL +#define T0SZ_BOOT 25ULL #else /* __ARM64_TWO_LEVEL_PMAP__ */ -#define T0SZ_BOOT 17ULL +#define T0SZ_BOOT 17ULL #endif /* __ARM64_TWO_LEVEL_PMAP__ */ #else /* __ARM_16K_PG__ */ #if __ARM64_PMAP_SUBPAGE_L1__ -#define T0SZ_BOOT 26ULL +#define T0SZ_BOOT 26ULL #else /* __ARM64_PMAP_SUBPAGE_L1__ */ -#define T0SZ_BOOT 25ULL +#define T0SZ_BOOT 25ULL #endif /* __ARM64_PMAP_SUBPAGE_L1__ */ #endif /* __ARM_16K_PG__ */ #if defined(APPLE_ARM64_ARCH_FAMILY) /* T0SZ must be the same as T1SZ */ -#define T1SZ_BOOT T0SZ_BOOT +#define T1SZ_BOOT T0SZ_BOOT #else /* defined(APPLE_ARM64_ARCH_FAMILY) */ #ifdef __ARM_16K_PG__ #if __ARM64_TWO_LEVEL_PMAP__ -#define T1SZ_BOOT 28ULL +#define T1SZ_BOOT 28ULL #elif __ARM64_PMAP_SUBPAGE_L1__ -#define T1SZ_BOOT 25ULL +#define T1SZ_BOOT 25ULL #else /* __ARM64_TWO_LEVEL_PMAP__ */ -#define T1SZ_BOOT 17ULL +#define T1SZ_BOOT 17ULL #endif /* __ARM64_TWO_LEVEL_PMAP__ */ #else /* __ARM_16K_PG__ */ #if __ARM64_PMAP_SUBPAGE_L1__ -#define T1SZ_BOOT 26ULL +#define T1SZ_BOOT 26ULL #else /* __ARM64_PMAP_SUBPAGE_L1__ */ -#define T1SZ_BOOT 25ULL +#define T1SZ_BOOT 25ULL #endif /*__ARM64_PMAP_SUBPAGE_L1__*/ #endif /* __ARM_16K_PG__ */ #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ -#define TCR_EL1_BASE (TCR_IPS_40BITS | \ - TCR_SH0_OUTER | TCR_ORGN0_WRITEBACK | TCR_IRGN0_WRITEBACK | (T0SZ_BOOT << TCR_T0SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE) |\ - TCR_SH1_OUTER | TCR_ORGN1_WRITEBACK | TCR_IRGN1_WRITEBACK | (TCR_TG1_GRANULE_SIZE)) +#define TCR_EL1_BASE (TCR_IPS_40BITS | \ + TCR_SH0_OUTER | TCR_ORGN0_WRITEBACK | TCR_IRGN0_WRITEBACK | (T0SZ_BOOT << TCR_T0SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE) |\ + TCR_SH1_OUTER | TCR_ORGN1_WRITEBACK | TCR_IRGN1_WRITEBACK | (TCR_TG1_GRANULE_SIZE)) #if __ARM_KERNEL_PROTECT__ -#define TCR_EL1_BOOT (TCR_EL1_BASE | \ - (T1SZ_BOOT << TCR_T1SZ_SHIFT) | TCR_TBI0_TOPBYTE_IGNORED) -#define T1SZ_USER (T1SZ_BOOT + 1) -#define TCR_EL1_USER (TCR_EL1_BASE | (T1SZ_USER << TCR_T1SZ_SHIFT) | TCR_TBI0_TOPBYTE_IGNORED) +#define TCR_EL1_BOOT (TCR_EL1_BASE | \ + (T1SZ_BOOT << TCR_T1SZ_SHIFT) | TCR_TBI0_TOPBYTE_IGNORED) +#define T1SZ_USER (T1SZ_BOOT + 1) +#define TCR_EL1_USER (TCR_EL1_BASE | (T1SZ_USER << TCR_T1SZ_SHIFT) | TCR_TBI0_TOPBYTE_IGNORED) #else -#define TCR_EL1_BOOT (TCR_EL1_BASE | \ - (T1SZ_BOOT << TCR_T1SZ_SHIFT)) +#define TCR_EL1_BOOT (TCR_EL1_BASE | \ + (T1SZ_BOOT << TCR_T1SZ_SHIFT)) #endif /* __ARM_KERNEL_PROTECT__ */ /* @@ -603,10 +603,10 @@ * +--------+------------------+------+ * */ -#define TTBR_ASID_SHIFT 48 -#define TTBR_ASID_MASK 0xffff000000000000 +#define TTBR_ASID_SHIFT 48 +#define TTBR_ASID_MASK 0xffff000000000000 -#define TTBR_BADDR_MASK 0x0000ffffffffffff +#define TTBR_BADDR_MASK 0x0000ffffffffffff /* * Memory Attribute Indirection Register @@ -618,44 +618,44 @@ * */ -#define MAIR_ATTR_SHIFT(x) (8*(x)) +#define MAIR_ATTR_SHIFT(x) (8*(x)) /* Strongly ordered or device memory attributes */ -#define MAIR_OUTER_STRONGLY_ORDERED 0x0 -#define MAIR_OUTER_DEVICE 0x0 +#define MAIR_OUTER_STRONGLY_ORDERED 0x0 +#define MAIR_OUTER_DEVICE 0x0 -#define MAIR_INNER_STRONGLY_ORDERED 0x0 -#define MAIR_INNER_DEVICE 0x4 +#define MAIR_INNER_STRONGLY_ORDERED 0x0 +#define MAIR_INNER_DEVICE 0x4 /* Normal memory attributes */ -#define MAIR_OUTER_NON_CACHEABLE 0x40 -#define MAIR_OUTER_WRITE_THROUGH 0x80 -#define MAIR_OUTER_WRITE_BACK 0xc0 +#define MAIR_OUTER_NON_CACHEABLE 0x40 +#define MAIR_OUTER_WRITE_THROUGH 0x80 +#define MAIR_OUTER_WRITE_BACK 0xc0 -#define MAIR_INNER_NON_CACHEABLE 0x4 -#define MAIR_INNER_WRITE_THROUGH 0x8 -#define MAIR_INNER_WRITE_BACK 0xc +#define MAIR_INNER_NON_CACHEABLE 0x4 +#define MAIR_INNER_WRITE_THROUGH 0x8 +#define MAIR_INNER_WRITE_BACK 0xc /* Allocate policy for cacheable memory */ -#define MAIR_OUTER_WRITE_ALLOCATE 0x10 -#define MAIR_OUTER_READ_ALLOCATE 0x20 +#define MAIR_OUTER_WRITE_ALLOCATE 0x10 +#define MAIR_OUTER_READ_ALLOCATE 0x20 -#define MAIR_INNER_WRITE_ALLOCATE 0x1 -#define MAIR_INNER_READ_ALLOCATE 0x2 +#define MAIR_INNER_WRITE_ALLOCATE 0x1 +#define MAIR_INNER_READ_ALLOCATE 0x2 /* Memory Atribute Encoding */ /* Device memory types: - G (gathering): multiple reads/writes can be combined - R (reordering): reads or writes may reach device out of program order - E (early-acknowledge): writes may return immediately (e.g. PCIe posted writes) -*/ -#define MAIR_DISABLE 0x00 /* Device Memory, nGnRnE (strongly ordered) */ -#define MAIR_POSTED 0x04 /* Device Memory, nGnRE (strongly ordered, posted writes) */ -#define MAIR_WRITECOMB 0x44 /* Normal Memory, Outer Non-Cacheable, Inner Non-Cacheable */ -#define MAIR_WRITETHRU 0xBB /* Normal Memory, Outer Write-through, Inner Write-through */ -#define MAIR_WRITEBACK 0xFF /* Normal Memory, Outer Write-back, Inner Write-back */ -#define MAIR_INNERWRITEBACK 0x4F /* Normal Memory, Outer Non-Cacheable, Inner Write-back */ + * G (gathering): multiple reads/writes can be combined + * R (reordering): reads or writes may reach device out of program order + * E (early-acknowledge): writes may return immediately (e.g. PCIe posted writes) + */ +#define MAIR_DISABLE 0x00 /* Device Memory, nGnRnE (strongly ordered) */ +#define MAIR_POSTED 0x04 /* Device Memory, nGnRE (strongly ordered, posted writes) */ +#define MAIR_WRITECOMB 0x44 /* Normal Memory, Outer Non-Cacheable, Inner Non-Cacheable */ +#define MAIR_WRITETHRU 0xBB /* Normal Memory, Outer Write-through, Inner Write-through */ +#define MAIR_WRITEBACK 0xFF /* Normal Memory, Outer Write-back, Inner Write-back */ +#define MAIR_INNERWRITEBACK 0x4F /* Normal Memory, Outer Non-Cacheable, Inner Write-back */ /* @@ -666,37 +666,37 @@ /* * Memory Attribute Index */ -#define CACHE_ATTRINDX_WRITEBACK 0x0 /* cache enabled, buffer enabled */ -#define CACHE_ATTRINDX_WRITECOMB 0x1 /* no cache, buffered writes */ -#define CACHE_ATTRINDX_WRITETHRU 0x2 /* cache enabled, buffer disabled */ -#define CACHE_ATTRINDX_DISABLE 0x3 /* no cache, no buffer */ -#define CACHE_ATTRINDX_INNERWRITEBACK 0x4 /* inner cache enabled, buffer enabled, write allocate */ -#define CACHE_ATTRINDX_POSTED 0x5 /* no cache, no buffer, posted writes */ -#define CACHE_ATTRINDX_DEFAULT CACHE_ATTRINDX_WRITEBACK +#define CACHE_ATTRINDX_WRITEBACK 0x0 /* cache enabled, buffer enabled */ +#define CACHE_ATTRINDX_WRITECOMB 0x1 /* no cache, buffered writes */ +#define CACHE_ATTRINDX_WRITETHRU 0x2 /* cache enabled, buffer disabled */ +#define CACHE_ATTRINDX_DISABLE 0x3 /* no cache, no buffer */ +#define CACHE_ATTRINDX_INNERWRITEBACK 0x4 /* inner cache enabled, buffer enabled, write allocate */ +#define CACHE_ATTRINDX_POSTED 0x5 /* no cache, no buffer, posted writes */ +#define CACHE_ATTRINDX_DEFAULT CACHE_ATTRINDX_WRITEBACK /* - * Access protection bit values (TTEs and PTEs) + * Access protection bit values (TTEs and PTEs) */ -#define AP_RWNA 0x0 /* priv=read-write, user=no-access */ -#define AP_RWRW 0x1 /* priv=read-write, user=read-write */ -#define AP_RONA 0x2 /* priv=read-only, user=no-access */ -#define AP_RORO 0x3 /* priv=read-only, user=read-only */ -#define AP_MASK 0x3 /* mask to find ap bits */ +#define AP_RWNA 0x0 /* priv=read-write, user=no-access */ +#define AP_RWRW 0x1 /* priv=read-write, user=read-write */ +#define AP_RONA 0x2 /* priv=read-only, user=no-access */ +#define AP_RORO 0x3 /* priv=read-only, user=read-only */ +#define AP_MASK 0x3 /* mask to find ap bits */ /* * Shareability attributes */ -#define SH_NONE 0x0 /* Non shareable */ -#define SH_NONE 0x0 /* Device shareable */ -#define SH_DEVICE 0x2 /* Normal memory Inner non shareable - Outer non shareable */ -#define SH_OUTER_MEMORY 0x2 /* Normal memory Inner shareable - Outer shareable */ -#define SH_INNER_MEMORY 0x3 /* Normal memory Inner shareable - Outer non shareable */ +#define SH_NONE 0x0 /* Non shareable */ +#define SH_NONE 0x0 /* Device shareable */ +#define SH_DEVICE 0x2 /* Normal memory Inner non shareable - Outer non shareable */ +#define SH_OUTER_MEMORY 0x2 /* Normal memory Inner shareable - Outer shareable */ +#define SH_INNER_MEMORY 0x3 /* Normal memory Inner shareable - Outer non shareable */ /* * ARM Page Granule */ -#ifdef __ARM_16K_PG__ +#ifdef __ARM_16K_PG__ #define ARM_PGSHIFT 14 #else #define ARM_PGSHIFT 12 @@ -720,15 +720,15 @@ */ #ifdef __ARM_16K_PG__ -#define ARM_TT_L0_SIZE 0x0000800000000000ULL /* size of area covered by a tte */ -#define ARM_TT_L0_OFFMASK 0x00007fffffffffffULL /* offset within an L0 entry */ -#define ARM_TT_L0_SHIFT 47 /* page descriptor shift */ -#define ARM_TT_L0_INDEX_MASK 0x0000800000000000ULL /* mask for getting index in L0 table from virtual address */ +#define ARM_TT_L0_SIZE 0x0000800000000000ULL /* size of area covered by a tte */ +#define ARM_TT_L0_OFFMASK 0x00007fffffffffffULL /* offset within an L0 entry */ +#define ARM_TT_L0_SHIFT 47 /* page descriptor shift */ +#define ARM_TT_L0_INDEX_MASK 0x0000800000000000ULL /* mask for getting index in L0 table from virtual address */ #else -#define ARM_TT_L0_SIZE 0x0000008000000000ULL /* size of area covered by a tte */ -#define ARM_TT_L0_OFFMASK 0x0000007fffffffffULL /* offset within an L0 entry */ -#define ARM_TT_L0_SHIFT 39 /* page descriptor shift */ -#define ARM_TT_L0_INDEX_MASK 0x0000ff8000000000ULL /* mask for getting index in L0 table from virtual address */ +#define ARM_TT_L0_SIZE 0x0000008000000000ULL /* size of area covered by a tte */ +#define ARM_TT_L0_OFFMASK 0x0000007fffffffffULL /* offset within an L0 entry */ +#define ARM_TT_L0_SHIFT 39 /* page descriptor shift */ +#define ARM_TT_L0_INDEX_MASK 0x0000ff8000000000ULL /* mask for getting index in L0 table from virtual address */ #endif /* @@ -746,24 +746,24 @@ */ #ifdef __ARM_16K_PG__ -#define ARM_TT_L1_SIZE 0x0000001000000000ULL /* size of area covered by a tte */ -#define ARM_TT_L1_OFFMASK 0x0000000fffffffffULL /* offset within an L1 entry */ -#define ARM_TT_L1_SHIFT 36 /* page descriptor shift */ +#define ARM_TT_L1_SIZE 0x0000001000000000ULL /* size of area covered by a tte */ +#define ARM_TT_L1_OFFMASK 0x0000000fffffffffULL /* offset within an L1 entry */ +#define ARM_TT_L1_SHIFT 36 /* page descriptor shift */ #ifdef __ARM64_PMAP_SUBPAGE_L1__ /* This config supports 512GB per TTBR. */ -#define ARM_TT_L1_INDEX_MASK 0x0000007000000000ULL /* mask for getting index into L1 table from virtual address */ +#define ARM_TT_L1_INDEX_MASK 0x0000007000000000ULL /* mask for getting index into L1 table from virtual address */ #else /* __ARM64_PMAP_SUBPAGE_L1__ */ -#define ARM_TT_L1_INDEX_MASK 0x00007ff000000000ULL /* mask for getting index into L1 table from virtual address */ +#define ARM_TT_L1_INDEX_MASK 0x00007ff000000000ULL /* mask for getting index into L1 table from virtual address */ #endif /* __ARM64_PMAP_SUBPAGE_L1__ */ #else /* __ARM_16K_PG__ */ -#define ARM_TT_L1_SIZE 0x0000000040000000ULL /* size of area covered by a tte */ -#define ARM_TT_L1_OFFMASK 0x000000003fffffffULL /* offset within an L1 entry */ -#define ARM_TT_L1_SHIFT 30 /* page descriptor shift */ +#define ARM_TT_L1_SIZE 0x0000000040000000ULL /* size of area covered by a tte */ +#define ARM_TT_L1_OFFMASK 0x000000003fffffffULL /* offset within an L1 entry */ +#define ARM_TT_L1_SHIFT 30 /* page descriptor shift */ #ifdef __ARM64_PMAP_SUBPAGE_L1__ /* This config supports 256GB per TTBR. */ -#define ARM_TT_L1_INDEX_MASK 0x0000003fc0000000ULL /* mask for getting index into L1 table from virtual address */ +#define ARM_TT_L1_INDEX_MASK 0x0000003fc0000000ULL /* mask for getting index into L1 table from virtual address */ #else /* __ARM64_PMAP_SUBPAGE_L1__ */ -#define ARM_TT_L1_INDEX_MASK 0x0000007fc0000000ULL /* mask for getting index into L1 table from virtual address */ +#define ARM_TT_L1_INDEX_MASK 0x0000007fc0000000ULL /* mask for getting index into L1 table from virtual address */ #endif /* __ARM64_PMAP_SUBPAGE_L1__ */ #endif @@ -791,15 +791,15 @@ */ #ifdef __ARM_16K_PG__ -#define ARM_TT_L2_SIZE 0x0000000002000000ULL /* size of area covered by a tte */ -#define ARM_TT_L2_OFFMASK 0x0000000001ffffffULL /* offset within an L2 entry */ -#define ARM_TT_L2_SHIFT 25 /* page descriptor shift */ -#define ARM_TT_L2_INDEX_MASK 0x0000000ffe000000ULL /* mask for getting index in L2 table from virtual address */ +#define ARM_TT_L2_SIZE 0x0000000002000000ULL /* size of area covered by a tte */ +#define ARM_TT_L2_OFFMASK 0x0000000001ffffffULL /* offset within an L2 entry */ +#define ARM_TT_L2_SHIFT 25 /* page descriptor shift */ +#define ARM_TT_L2_INDEX_MASK 0x0000000ffe000000ULL /* mask for getting index in L2 table from virtual address */ #else -#define ARM_TT_L2_SIZE 0x0000000000200000ULL /* size of area covered by a tte */ -#define ARM_TT_L2_OFFMASK 0x00000000001fffffULL /* offset within an L2 entry */ -#define ARM_TT_L2_SHIFT 21 /* page descriptor shift */ -#define ARM_TT_L2_INDEX_MASK 0x000000003fe00000ULL /* mask for getting index in L2 table from virtual address */ +#define ARM_TT_L2_SIZE 0x0000000000200000ULL /* size of area covered by a tte */ +#define ARM_TT_L2_OFFMASK 0x00000000001fffffULL /* offset within an L2 entry */ +#define ARM_TT_L2_SHIFT 21 /* page descriptor shift */ +#define ARM_TT_L2_INDEX_MASK 0x000000003fe00000ULL /* mask for getting index in L2 table from virtual address */ #endif /* @@ -817,15 +817,15 @@ */ #ifdef __ARM_16K_PG__ -#define ARM_TT_L3_SIZE 0x0000000000004000ULL /* size of area covered by a tte */ -#define ARM_TT_L3_OFFMASK 0x0000000000003fffULL /* offset within L3 PTE */ -#define ARM_TT_L3_SHIFT 14 /* page descriptor shift */ -#define ARM_TT_L3_INDEX_MASK 0x0000000001ffc000ULL /* mask for page descriptor index */ +#define ARM_TT_L3_SIZE 0x0000000000004000ULL /* size of area covered by a tte */ +#define ARM_TT_L3_OFFMASK 0x0000000000003fffULL /* offset within L3 PTE */ +#define ARM_TT_L3_SHIFT 14 /* page descriptor shift */ +#define ARM_TT_L3_INDEX_MASK 0x0000000001ffc000ULL /* mask for page descriptor index */ #else -#define ARM_TT_L3_SIZE 0x0000000000001000ULL /* size of area covered by a tte */ -#define ARM_TT_L3_OFFMASK 0x0000000000000fffULL /* offset within L3 PTE */ -#define ARM_TT_L3_SHIFT 12 /* page descriptor shift */ -#define ARM_TT_L3_INDEX_MASK 0x00000000001ff000ULL /* mask for page descriptor index */ +#define ARM_TT_L3_SIZE 0x0000000000001000ULL /* size of area covered by a tte */ +#define ARM_TT_L3_OFFMASK 0x0000000000000fffULL /* offset within L3 PTE */ +#define ARM_TT_L3_SHIFT 12 /* page descriptor shift */ +#define ARM_TT_L3_INDEX_MASK 0x00000000001ff000ULL /* mask for page descriptor index */ #endif /* @@ -836,26 +836,26 @@ * * My apologies to any botanists who may be reading this. */ -#define ARM_TT_LEAF_SIZE ARM_TT_L3_SIZE -#define ARM_TT_LEAF_OFFMASK ARM_TT_L3_OFFMASK -#define ARM_TT_LEAF_SHIFT ARM_TT_L3_SHIFT -#define ARM_TT_LEAF_INDEX_MASK ARM_TT_L3_INDEX_MASK +#define ARM_TT_LEAF_SIZE ARM_TT_L3_SIZE +#define ARM_TT_LEAF_OFFMASK ARM_TT_L3_OFFMASK +#define ARM_TT_LEAF_SHIFT ARM_TT_L3_SHIFT +#define ARM_TT_LEAF_INDEX_MASK ARM_TT_L3_INDEX_MASK -#define ARM_TT_TWIG_SIZE ARM_TT_L2_SIZE -#define ARM_TT_TWIG_OFFMASK ARM_TT_L2_OFFMASK -#define ARM_TT_TWIG_SHIFT ARM_TT_L2_SHIFT -#define ARM_TT_TWIG_INDEX_MASK ARM_TT_L2_INDEX_MASK +#define ARM_TT_TWIG_SIZE ARM_TT_L2_SIZE +#define ARM_TT_TWIG_OFFMASK ARM_TT_L2_OFFMASK +#define ARM_TT_TWIG_SHIFT ARM_TT_L2_SHIFT +#define ARM_TT_TWIG_INDEX_MASK ARM_TT_L2_INDEX_MASK #if __ARM64_TWO_LEVEL_PMAP__ -#define ARM_TT_ROOT_SIZE ARM_TT_L2_SIZE -#define ARM_TT_ROOT_OFFMASK ARM_TT_L2_OFFMASK -#define ARM_TT_ROOT_SHIFT ARM_TT_L2_SHIFT -#define ARM_TT_ROOT_INDEX_MASK ARM_TT_L2_INDEX_MASK +#define ARM_TT_ROOT_SIZE ARM_TT_L2_SIZE +#define ARM_TT_ROOT_OFFMASK ARM_TT_L2_OFFMASK +#define ARM_TT_ROOT_SHIFT ARM_TT_L2_SHIFT +#define ARM_TT_ROOT_INDEX_MASK ARM_TT_L2_INDEX_MASK #else -#define ARM_TT_ROOT_SIZE ARM_TT_L1_SIZE -#define ARM_TT_ROOT_OFFMASK ARM_TT_L1_OFFMASK -#define ARM_TT_ROOT_SHIFT ARM_TT_L1_SHIFT -#define ARM_TT_ROOT_INDEX_MASK ARM_TT_L1_INDEX_MASK +#define ARM_TT_ROOT_SIZE ARM_TT_L1_SIZE +#define ARM_TT_ROOT_OFFMASK ARM_TT_L1_OFFMASK +#define ARM_TT_ROOT_SHIFT ARM_TT_L1_SHIFT +#define ARM_TT_ROOT_INDEX_MASK ARM_TT_L1_INDEX_MASK #endif /* @@ -925,7 +925,7 @@ * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ * | ign |sw use|XN|PXN|HINT| zero | OutputAddress[47:25] | zero |nG|AF| SH | AP |NS|AttrIdx|0|V| * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ - * + * * where: * 'nG' notGlobal bit * 'SH' Shareability field @@ -937,106 +937,106 @@ * 'AttrIdx' Memory Attribute Index */ -#define TTE_SHIFT 3 /* shift width of a tte (sizeof(tte) == (1 << TTE_SHIFT)) */ +#define TTE_SHIFT 3 /* shift width of a tte (sizeof(tte) == (1 << TTE_SHIFT)) */ #ifdef __ARM_16K_PG__ -#define TTE_PGENTRIES (16384 >> TTE_SHIFT) /* number of ttes per page */ +#define TTE_PGENTRIES (16384 >> TTE_SHIFT) /* number of ttes per page */ #else -#define TTE_PGENTRIES (4096 >> TTE_SHIFT) /* number of ttes per page */ +#define TTE_PGENTRIES (4096 >> TTE_SHIFT) /* number of ttes per page */ #endif -#define ARM_TTE_MAX (TTE_PGENTRIES) +#define ARM_TTE_MAX (TTE_PGENTRIES) -#define ARM_TTE_EMPTY 0x0000000000000000ULL /* unasigned - invalid entry */ -#define ARM_TTE_TYPE_FAULT 0x0000000000000000ULL /* unasigned - invalid entry */ +#define ARM_TTE_EMPTY 0x0000000000000000ULL /* unasigned - invalid entry */ +#define ARM_TTE_TYPE_FAULT 0x0000000000000000ULL /* unasigned - invalid entry */ -#define ARM_TTE_VALID 0x0000000000000001ULL /* valid entry */ +#define ARM_TTE_VALID 0x0000000000000001ULL /* valid entry */ -#define ARM_TTE_TYPE_MASK 0x0000000000000002ULL /* mask for extracting the type */ -#define ARM_TTE_TYPE_TABLE 0x0000000000000002ULL /* page table type */ -#define ARM_TTE_TYPE_BLOCK 0x0000000000000000ULL /* block entry type */ -#define ARM_TTE_TYPE_L3BLOCK 0x0000000000000002ULL -#define ARM_TTE_TYPE_MASK 0x0000000000000002ULL /* mask for extracting the type */ +#define ARM_TTE_TYPE_MASK 0x0000000000000002ULL /* mask for extracting the type */ +#define ARM_TTE_TYPE_TABLE 0x0000000000000002ULL /* page table type */ +#define ARM_TTE_TYPE_BLOCK 0x0000000000000000ULL /* block entry type */ +#define ARM_TTE_TYPE_L3BLOCK 0x0000000000000002ULL +#define ARM_TTE_TYPE_MASK 0x0000000000000002ULL /* mask for extracting the type */ #ifdef __ARM_16K_PG__ /* Note that L0/L1 block entries are disallowed for the 16KB granule size; what are we doing with these? */ -#define ARM_TTE_BLOCK_SHIFT 12 /* entry shift for a 16KB L3 TTE entry */ -#define ARM_TTE_BLOCK_L0_SHIFT ARM_TT_L0_SHIFT /* block shift for 128TB section */ -#define ARM_TTE_BLOCK_L1_MASK 0x0000fff000000000ULL /* mask to extract phys address from L1 block entry */ -#define ARM_TTE_BLOCK_L1_SHIFT ARM_TT_L1_SHIFT /* block shift for 64GB section */ -#define ARM_TTE_BLOCK_L2_MASK 0x0000fffffe000000ULL /* mask to extract phys address from Level 2 Translation Block entry */ -#define ARM_TTE_BLOCK_L2_SHIFT ARM_TT_L2_SHIFT /* block shift for 32MB section */ +#define ARM_TTE_BLOCK_SHIFT 12 /* entry shift for a 16KB L3 TTE entry */ +#define ARM_TTE_BLOCK_L0_SHIFT ARM_TT_L0_SHIFT /* block shift for 128TB section */ +#define ARM_TTE_BLOCK_L1_MASK 0x0000fff000000000ULL /* mask to extract phys address from L1 block entry */ +#define ARM_TTE_BLOCK_L1_SHIFT ARM_TT_L1_SHIFT /* block shift for 64GB section */ +#define ARM_TTE_BLOCK_L2_MASK 0x0000fffffe000000ULL /* mask to extract phys address from Level 2 Translation Block entry */ +#define ARM_TTE_BLOCK_L2_SHIFT ARM_TT_L2_SHIFT /* block shift for 32MB section */ #else -#define ARM_TTE_BLOCK_SHIFT 12 /* entry shift for a 4KB L3 TTE entry */ -#define ARM_TTE_BLOCK_L0_SHIFT ARM_TT_L0_SHIFT /* block shift for 2048GB section */ -#define ARM_TTE_BLOCK_L1_MASK 0x0000ffffc0000000ULL /* mask to extract phys address from L1 block entry */ -#define ARM_TTE_BLOCK_L1_SHIFT ARM_TT_L1_SHIFT /* block shift for 1GB section */ -#define ARM_TTE_BLOCK_L2_MASK 0x0000ffffffe00000ULL /* mask to extract phys address from Level 2 Translation Block entry */ -#define ARM_TTE_BLOCK_L2_SHIFT ARM_TT_L2_SHIFT /* block shift for 2MB section */ +#define ARM_TTE_BLOCK_SHIFT 12 /* entry shift for a 4KB L3 TTE entry */ +#define ARM_TTE_BLOCK_L0_SHIFT ARM_TT_L0_SHIFT /* block shift for 2048GB section */ +#define ARM_TTE_BLOCK_L1_MASK 0x0000ffffc0000000ULL /* mask to extract phys address from L1 block entry */ +#define ARM_TTE_BLOCK_L1_SHIFT ARM_TT_L1_SHIFT /* block shift for 1GB section */ +#define ARM_TTE_BLOCK_L2_MASK 0x0000ffffffe00000ULL /* mask to extract phys address from Level 2 Translation Block entry */ +#define ARM_TTE_BLOCK_L2_SHIFT ARM_TT_L2_SHIFT /* block shift for 2MB section */ #endif -#define ARM_TTE_BLOCK_APSHIFT 6 -#define ARM_TTE_BLOCK_AP(x) ((x)<> PTE_SHIFT) /* number of ptes per page */ +#define PTE_PGENTRIES (16384 >> PTE_SHIFT) /* number of ptes per page */ #else -#define PTE_PGENTRIES (4096 >> PTE_SHIFT) /* number of ptes per page */ +#define PTE_PGENTRIES (4096 >> PTE_SHIFT) /* number of ptes per page */ #endif -#define ARM_PTE_EMPTY 0x0000000000000000ULL /* unasigned - invalid entry */ +#define ARM_PTE_EMPTY 0x0000000000000000ULL /* unasigned - invalid entry */ /* markers for (invalid) PTE for a page sent to compressor */ -#define ARM_PTE_COMPRESSED 0x8000000000000000ULL /* compressed... */ -#define ARM_PTE_COMPRESSED_ALT 0x4000000000000000ULL /* ... and was "alt_acct" */ -#define ARM_PTE_COMPRESSED_MASK 0xC000000000000000ULL -#define ARM_PTE_IS_COMPRESSED(x) \ - ((((x) & 0x3) == 0) && /* PTE is not valid... */ \ - ((x) & ARM_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \ +#define ARM_PTE_COMPRESSED 0x8000000000000000ULL /* compressed... */ +#define ARM_PTE_COMPRESSED_ALT 0x4000000000000000ULL /* ... and was "alt_acct" */ +#define ARM_PTE_COMPRESSED_MASK 0xC000000000000000ULL +#define ARM_PTE_IS_COMPRESSED(x) \ + ((((x) & 0x3) == 0) && /* PTE is not valid... */ \ + ((x) & ARM_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \ ((!((x) & ~ARM_PTE_COMPRESSED_MASK)) || /* ...no other bits */ \ (panic("compressed PTE %p 0x%llx has extra bits 0x%llx: corrupted?", \ - &(x), (x), (x) & ~ARM_PTE_COMPRESSED_MASK), FALSE))) + &(x), (x), (x) & ~ARM_PTE_COMPRESSED_MASK), FALSE))) -#define ARM_PTE_TYPE 0x0000000000000003ULL /* valid L3 entry: includes bit #1 (counterintuitively) */ -#define ARM_PTE_TYPE_VALID 0x0000000000000003ULL /* valid L3 entry: includes bit #1 (counterintuitively) */ -#define ARM_PTE_TYPE_FAULT 0x0000000000000000ULL /* invalid L3 entry */ -#define ARM_PTE_TYPE_MASK 0x0000000000000002ULL /* mask to get pte type */ +#define ARM_PTE_TYPE 0x0000000000000003ULL /* valid L3 entry: includes bit #1 (counterintuitively) */ +#define ARM_PTE_TYPE_VALID 0x0000000000000003ULL /* valid L3 entry: includes bit #1 (counterintuitively) */ +#define ARM_PTE_TYPE_FAULT 0x0000000000000000ULL /* invalid L3 entry */ +#define ARM_PTE_TYPE_MASK 0x0000000000000002ULL /* mask to get pte type */ #ifdef __ARM_16K_PG__ /* TODO: What does the shift mean here? */ -#define ARM_PTE_PAGE_MASK 0x0000FFFFFFFFC000ULL /* mask for 16KB page */ +#define ARM_PTE_PAGE_MASK 0x0000FFFFFFFFC000ULL /* mask for 16KB page */ #else -#define ARM_PTE_PAGE_MASK 0x0000FFFFFFFFF000ULL /* mask for 4KB page */ -#define ARM_PTE_PAGE_SHIFT 12 /* page shift for 4KB page */ +#define ARM_PTE_PAGE_MASK 0x0000FFFFFFFFF000ULL /* mask for 4KB page */ +#define ARM_PTE_PAGE_SHIFT 12 /* page shift for 4KB page */ #endif -#define ARM_PTE_AP(x) ((x) << 6) /* access protections */ -#define ARM_PTE_APMASK (0x3ULL << 6) /* mask access protections */ -#define ARM_PTE_EXTRACT_AP(x) (((x) >> 6) & 0x3ULL) /* extract access protections from PTE */ +#define ARM_PTE_AP(x) ((x) << 6) /* access protections */ +#define ARM_PTE_APMASK (0x3ULL << 6) /* mask access protections */ +#define ARM_PTE_EXTRACT_AP(x) (((x) >> 6) & 0x3ULL) /* extract access protections from PTE */ -#define ARM_PTE_ATTRINDX(x) ((x) << 2) /* memory attributes index */ -#define ARM_PTE_ATTRINDXMASK (0x7ULL << 2) /* mask memory attributes index */ +#define ARM_PTE_ATTRINDX(x) ((x) << 2) /* memory attributes index */ +#define ARM_PTE_ATTRINDXMASK (0x7ULL << 2) /* mask memory attributes index */ -#define ARM_PTE_SH(x) ((x) << 8) /* access shared */ -#define ARM_PTE_SHMASK (0x3ULL << 8) /* mask access shared */ +#define ARM_PTE_SH(x) ((x) << 8) /* access shared */ +#define ARM_PTE_SHMASK (0x3ULL << 8) /* mask access shared */ -#define ARM_PTE_AF 0x0000000000000400ULL /* value for access */ -#define ARM_PTE_AFMASK 0x0000000000000400ULL /* access mask */ +#define ARM_PTE_AF 0x0000000000000400ULL /* value for access */ +#define ARM_PTE_AFMASK 0x0000000000000400ULL /* access mask */ -#define ARM_PTE_NG 0x0000000000000800ULL /* value for a global mapping */ -#define ARM_PTE_NG_MASK 0x0000000000000800ULL /* notGlobal mapping mask */ +#define ARM_PTE_NG 0x0000000000000800ULL /* value for a global mapping */ +#define ARM_PTE_NG_MASK 0x0000000000000800ULL /* notGlobal mapping mask */ -#define ARM_PTE_NS 0x0000000000000020ULL /* value for a secure mapping */ -#define ARM_PTE_NS_MASK 0x0000000000000020ULL /* notSecure mapping mask */ +#define ARM_PTE_NS 0x0000000000000020ULL /* value for a secure mapping */ +#define ARM_PTE_NS_MASK 0x0000000000000020ULL /* notSecure mapping mask */ -#define ARM_PTE_HINT 0x0010000000000000ULL /* value for contiguous entries hint */ -#define ARM_PTE_HINT_MASK 0x0010000000000000ULL /* mask for contiguous entries hint */ +#define ARM_PTE_HINT 0x0010000000000000ULL /* value for contiguous entries hint */ +#define ARM_PTE_HINT_MASK 0x0010000000000000ULL /* mask for contiguous entries hint */ #if __ARM_16K_PG__ -#define ARM_PTE_HINT_ENTRIES 128ULL /* number of entries the hint covers */ -#define ARM_PTE_HINT_ENTRIES_SHIFT 7ULL /* shift to construct the number of entries */ -#define ARM_PTE_HINT_ADDR_MASK 0x0000FFFFFFE00000ULL /* mask to extract the starting hint address */ -#define ARM_PTE_HINT_ADDR_SHIFT 21 /* shift for the hint address */ -#define ARM_KVA_HINT_ADDR_MASK 0xFFFFFFFFFFE00000ULL /* mask to extract the starting hint address */ +#define ARM_PTE_HINT_ENTRIES 128ULL /* number of entries the hint covers */ +#define ARM_PTE_HINT_ENTRIES_SHIFT 7ULL /* shift to construct the number of entries */ +#define ARM_PTE_HINT_ADDR_MASK 0x0000FFFFFFE00000ULL /* mask to extract the starting hint address */ +#define ARM_PTE_HINT_ADDR_SHIFT 21 /* shift for the hint address */ +#define ARM_KVA_HINT_ADDR_MASK 0xFFFFFFFFFFE00000ULL /* mask to extract the starting hint address */ #else -#define ARM_PTE_HINT_ENTRIES 16ULL /* number of entries the hint covers */ -#define ARM_PTE_HINT_ENTRIES_SHIFT 4ULL /* shift to construct the number of entries */ -#define ARM_PTE_HINT_ADDR_MASK 0x0000FFFFFFFF0000ULL /* mask to extract the starting hint address */ -#define ARM_PTE_HINT_ADDR_SHIFT 16 /* shift for the hint address */ -#define ARM_KVA_HINT_ADDR_MASK 0xFFFFFFFFFFFF0000ULL /* mask to extract the starting hint address */ +#define ARM_PTE_HINT_ENTRIES 16ULL /* number of entries the hint covers */ +#define ARM_PTE_HINT_ENTRIES_SHIFT 4ULL /* shift to construct the number of entries */ +#define ARM_PTE_HINT_ADDR_MASK 0x0000FFFFFFFF0000ULL /* mask to extract the starting hint address */ +#define ARM_PTE_HINT_ADDR_SHIFT 16 /* shift for the hint address */ +#define ARM_KVA_HINT_ADDR_MASK 0xFFFFFFFFFFFF0000ULL /* mask to extract the starting hint address */ #endif -#define ARM_PTE_PNX 0x0020000000000000ULL /* value for privilege no execute bit */ -#define ARM_PTE_PNXMASK 0x0020000000000000ULL /* privilege no execute mask */ +#define ARM_PTE_PNX 0x0020000000000000ULL /* value for privilege no execute bit */ +#define ARM_PTE_PNXMASK 0x0020000000000000ULL /* privilege no execute mask */ -#define ARM_PTE_NX 0x0040000000000000ULL /* value for no execute bit */ -#define ARM_PTE_NXMASK 0x0040000000000000ULL /* no execute mask */ +#define ARM_PTE_NX 0x0040000000000000ULL /* value for no execute bit */ +#define ARM_PTE_NXMASK 0x0040000000000000ULL /* no execute mask */ -#define ARM_PTE_WIRED 0x0080000000000000ULL /* value for software wired bit */ -#define ARM_PTE_WIRED_MASK 0x0080000000000000ULL /* software wired mask */ +#define ARM_PTE_WIRED 0x0080000000000000ULL /* value for software wired bit */ +#define ARM_PTE_WIRED_MASK 0x0080000000000000ULL /* software wired mask */ -#define ARM_PTE_WRITEABLE 0x0100000000000000ULL /* value for software writeable bit */ -#define ARM_PTE_WRITEABLE_MASK 0x0100000000000000ULL /* software writeable mask */ +#define ARM_PTE_WRITEABLE 0x0100000000000000ULL /* value for software writeable bit */ +#define ARM_PTE_WRITEABLE_MASK 0x0100000000000000ULL /* software writeable mask */ #if CONFIG_PGTRACE #define ARM_PTE_PGTRACE 0x0200000000000000ULL /* value for software trace bit */ #define ARM_PTE_PGTRACE_MASK 0x0200000000000000ULL /* software trace mask */ #endif -#define ARM_PTE_BOOT_PAGE_BASE (ARM_PTE_TYPE_VALID | ARM_PTE_SH(SH_OUTER_MEMORY) \ - | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | ARM_PTE_AF) +#define ARM_PTE_BOOT_PAGE_BASE (ARM_PTE_TYPE_VALID | ARM_PTE_SH(SH_OUTER_MEMORY) \ + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | ARM_PTE_AF) #if __ARM_KERNEL_PROTECT__ -#define ARM_PTE_BOOT_PAGE (ARM_PTE_BOOT_PAGE_BASE | ARM_PTE_NG) +#define ARM_PTE_BOOT_PAGE (ARM_PTE_BOOT_PAGE_BASE | ARM_PTE_NG) #else /* __ARM_KERNEL_PROTECT__ */ -#define ARM_PTE_BOOT_PAGE (ARM_PTE_BOOT_PAGE_BASE) +#define ARM_PTE_BOOT_PAGE (ARM_PTE_BOOT_PAGE_BASE) #endif /* __ARM_KERNEL_PROTECT__ */ /* @@ -1212,78 +1212,78 @@ * Note: The ISS can have many forms. These are defined separately below. */ -#define ESR_EC_SHIFT 26 -#define ESR_EC_MASK (0x3F << ESR_EC_SHIFT) -#define ESR_EC(x) ((x & ESR_EC_MASK) >> ESR_EC_SHIFT) +#define ESR_EC_SHIFT 26 +#define ESR_EC_MASK (0x3F << ESR_EC_SHIFT) +#define ESR_EC(x) ((x & ESR_EC_MASK) >> ESR_EC_SHIFT) -#define ESR_IL_SHIFT 25 -#define ESR_IL (1 << ESR_IL_SHIFT) +#define ESR_IL_SHIFT 25 +#define ESR_IL (1 << ESR_IL_SHIFT) -#define ESR_INSTR_IS_2BYTES(x) (!(x & ESR_IL)) +#define ESR_INSTR_IS_2BYTES(x) (!(x & ESR_IL)) -#define ESR_ISS_MASK 0x01FFFFFF -#define ESR_ISS(x) (x & ESR_ISS_MASK) +#define ESR_ISS_MASK 0x01FFFFFF +#define ESR_ISS(x) (x & ESR_ISS_MASK) #ifdef __ASSEMBLER__ /* Define only the classes we need to test in the exception vectors. */ -#define ESR_EC_IABORT_EL1 0x21 -#define ESR_EC_DABORT_EL1 0x25 -#define ESR_EC_SP_ALIGN 0x26 +#define ESR_EC_IABORT_EL1 0x21 +#define ESR_EC_DABORT_EL1 0x25 +#define ESR_EC_SP_ALIGN 0x26 #else typedef enum { - ESR_EC_UNCATEGORIZED = 0x00, - ESR_EC_WFI_WFE = 0x01, - ESR_EC_MCR_MRC_CP15_TRAP = 0x03, - ESR_EC_MCRR_MRRC_CP15_TRAP = 0x04, - ESR_EC_MCR_MRC_CP14_TRAP = 0x05, - ESR_EC_LDC_STC_CP14_TRAP = 0x06, - ESR_EC_TRAP_SIMD_FP = 0x07, - ESR_EC_MCRR_MRRC_CP14_TRAP = 0x0c, - ESR_EC_ILLEGAL_INSTR_SET = 0x0e, - ESR_EC_SVC_32 = 0x11, - ESR_EC_SVC_64 = 0x15, - ESR_EC_MSR_TRAP = 0x18, - ESR_EC_IABORT_EL0 = 0x20, - ESR_EC_IABORT_EL1 = 0x21, - ESR_EC_PC_ALIGN = 0x22, - ESR_EC_DABORT_EL0 = 0x24, - ESR_EC_DABORT_EL1 = 0x25, - ESR_EC_SP_ALIGN = 0x26, - ESR_EC_FLOATING_POINT_32 = 0x28, - ESR_EC_FLOATING_POINT_64 = 0x2C, - ESR_EC_BKPT_REG_MATCH_EL0 = 0x30, // Breakpoint Debug event taken to the EL from a lower EL. - ESR_EC_BKPT_REG_MATCH_EL1 = 0x31, // Breakpoint Debug event taken to the EL from the EL. - ESR_EC_SW_STEP_DEBUG_EL0 = 0x32, // Software Step Debug event taken to the EL from a lower EL. - ESR_EC_SW_STEP_DEBUG_EL1 = 0x33, // Software Step Debug event taken to the EL from the EL. - ESR_EC_WATCHPT_MATCH_EL0 = 0x34, // Watchpoint Debug event taken to the EL from a lower EL. - ESR_EC_WATCHPT_MATCH_EL1 = 0x35, // Watchpoint Debug event taken to the EL from the EL. - ESR_EC_BKPT_AARCH32 = 0x38, - ESR_EC_BRK_AARCH64 = 0x3C + ESR_EC_UNCATEGORIZED = 0x00, + ESR_EC_WFI_WFE = 0x01, + ESR_EC_MCR_MRC_CP15_TRAP = 0x03, + ESR_EC_MCRR_MRRC_CP15_TRAP = 0x04, + ESR_EC_MCR_MRC_CP14_TRAP = 0x05, + ESR_EC_LDC_STC_CP14_TRAP = 0x06, + ESR_EC_TRAP_SIMD_FP = 0x07, + ESR_EC_MCRR_MRRC_CP14_TRAP = 0x0c, + ESR_EC_ILLEGAL_INSTR_SET = 0x0e, + ESR_EC_SVC_32 = 0x11, + ESR_EC_SVC_64 = 0x15, + ESR_EC_MSR_TRAP = 0x18, + ESR_EC_IABORT_EL0 = 0x20, + ESR_EC_IABORT_EL1 = 0x21, + ESR_EC_PC_ALIGN = 0x22, + ESR_EC_DABORT_EL0 = 0x24, + ESR_EC_DABORT_EL1 = 0x25, + ESR_EC_SP_ALIGN = 0x26, + ESR_EC_FLOATING_POINT_32 = 0x28, + ESR_EC_FLOATING_POINT_64 = 0x2C, + ESR_EC_BKPT_REG_MATCH_EL0 = 0x30, // Breakpoint Debug event taken to the EL from a lower EL. + ESR_EC_BKPT_REG_MATCH_EL1 = 0x31, // Breakpoint Debug event taken to the EL from the EL. + ESR_EC_SW_STEP_DEBUG_EL0 = 0x32, // Software Step Debug event taken to the EL from a lower EL. + ESR_EC_SW_STEP_DEBUG_EL1 = 0x33, // Software Step Debug event taken to the EL from the EL. + ESR_EC_WATCHPT_MATCH_EL0 = 0x34, // Watchpoint Debug event taken to the EL from a lower EL. + ESR_EC_WATCHPT_MATCH_EL1 = 0x35, // Watchpoint Debug event taken to the EL from the EL. + ESR_EC_BKPT_AARCH32 = 0x38, + ESR_EC_BRK_AARCH64 = 0x3C } esr_exception_class_t; typedef enum { - FSC_TRANSLATION_FAULT_L0 = 0x04, - FSC_TRANSLATION_FAULT_L1 = 0x05, - FSC_TRANSLATION_FAULT_L2 = 0x06, - FSC_TRANSLATION_FAULT_L3 = 0x07, - FSC_ACCESS_FLAG_FAULT_L1 = 0x09, - FSC_ACCESS_FLAG_FAULT_L2 = 0x0A, - FSC_ACCESS_FLAG_FAULT_L3 = 0x0B, - FSC_PERMISSION_FAULT_L1 = 0x0D, - FSC_PERMISSION_FAULT_L2 = 0x0E, - FSC_PERMISSION_FAULT_L3 = 0x0F, - FSC_SYNC_EXT_ABORT = 0x10, - FSC_ASYNC_EXT_ABORT = 0x11, - FSC_SYNC_EXT_ABORT_TT_L1 = 0x15, - FSC_SYNC_EXT_ABORT_TT_L2 = 0x16, - FSC_SYNC_EXT_ABORT_TT_L3 = 0x17, - FSC_SYNC_PARITY = 0x18, - FSC_ASYNC_PARITY = 0x19, - FSC_SYNC_PARITY_TT_L1 = 0x1D, - FSC_SYNC_PARITY_TT_L2 = 0x1E, - FSC_SYNC_PARITY_TT_L3 = 0x1F, - FSC_ALIGNMENT_FAULT = 0x21, - FSC_DEBUG_FAULT = 0x22 + FSC_TRANSLATION_FAULT_L0 = 0x04, + FSC_TRANSLATION_FAULT_L1 = 0x05, + FSC_TRANSLATION_FAULT_L2 = 0x06, + FSC_TRANSLATION_FAULT_L3 = 0x07, + FSC_ACCESS_FLAG_FAULT_L1 = 0x09, + FSC_ACCESS_FLAG_FAULT_L2 = 0x0A, + FSC_ACCESS_FLAG_FAULT_L3 = 0x0B, + FSC_PERMISSION_FAULT_L1 = 0x0D, + FSC_PERMISSION_FAULT_L2 = 0x0E, + FSC_PERMISSION_FAULT_L3 = 0x0F, + FSC_SYNC_EXT_ABORT = 0x10, + FSC_ASYNC_EXT_ABORT = 0x11, + FSC_SYNC_EXT_ABORT_TT_L1 = 0x15, + FSC_SYNC_EXT_ABORT_TT_L2 = 0x16, + FSC_SYNC_EXT_ABORT_TT_L3 = 0x17, + FSC_SYNC_PARITY = 0x18, + FSC_ASYNC_PARITY = 0x19, + FSC_SYNC_PARITY_TT_L1 = 0x1D, + FSC_SYNC_PARITY_TT_L2 = 0x1E, + FSC_SYNC_PARITY_TT_L3 = 0x1F, + FSC_ALIGNMENT_FAULT = 0x21, + FSC_DEBUG_FAULT = 0x22 } fault_status_t; #endif /* ASSEMBLER */ @@ -1300,14 +1300,14 @@ typedef enum { * IFSC Instruction Fault Status Code */ -#define ISS_SSDE_ISV_SHIFT 24 -#define ISS_SSDE_ISV (0x1 << ISS_SSDE_ISV_SHIFT) +#define ISS_SSDE_ISV_SHIFT 24 +#define ISS_SSDE_ISV (0x1 << ISS_SSDE_ISV_SHIFT) -#define ISS_SSDE_EX_SHIFT 6 -#define ISS_SSDE_EX (0x1 << ISS_SSDE_EX_SHIFT) +#define ISS_SSDE_EX_SHIFT 6 +#define ISS_SSDE_EX (0x1 << ISS_SSDE_EX_SHIFT) -#define ISS_SSDE_FSC_MASK 0x3F -#define ISS_SSDE_FSC(x) (x & ISS_SSDE_FSC_MASK) +#define ISS_SSDE_FSC_MASK 0x3F +#define ISS_SSDE_FSC(x) (x & ISS_SSDE_FSC_MASK) /* * Instruction Abort ISS (EL1) @@ -1321,11 +1321,11 @@ typedef enum { * IFSC Instruction Fault Status Code */ -#define ISS_IA_EA_SHIFT 9 -#define ISS_IA_EA (0x1 << ISS_IA_EA_SHIFT) +#define ISS_IA_EA_SHIFT 9 +#define ISS_IA_EA (0x1 << ISS_IA_EA_SHIFT) -#define ISS_IA_FSC_MASK 0x3F -#define ISS_IA_FSC(x) (x & ISS_IA_FSC_MASK) +#define ISS_IA_FSC_MASK 0x3F +#define ISS_IA_FSC(x) (x & ISS_IA_FSC_MASK) /* @@ -1342,54 +1342,54 @@ typedef enum { * WnR Write not Read * DFSC Data Fault Status Code */ -#define ISS_DA_EA_SHIFT 9 -#define ISS_DA_EA (0x1 << ISS_DA_EA_SHIFT) +#define ISS_DA_EA_SHIFT 9 +#define ISS_DA_EA (0x1 << ISS_DA_EA_SHIFT) -#define ISS_DA_CM_SHIFT 8 -#define ISS_DA_CM (0x1 << ISS_DA_CM_SHIFT) +#define ISS_DA_CM_SHIFT 8 +#define ISS_DA_CM (0x1 << ISS_DA_CM_SHIFT) -#define ISS_DA_WNR_SHIFT 6 -#define ISS_DA_WNR (0x1 << ISS_DA_WNR_SHIFT) +#define ISS_DA_WNR_SHIFT 6 +#define ISS_DA_WNR (0x1 << ISS_DA_WNR_SHIFT) -#define ISS_DA_FSC_MASK 0x3F -#define ISS_DA_FSC(x) (x & ISS_DA_FSC_MASK) +#define ISS_DA_FSC_MASK 0x3F +#define ISS_DA_FSC(x) (x & ISS_DA_FSC_MASK) /* * Physical Address Register (EL1) */ -#define PAR_F_SHIFT 0 -#define PAR_F (0x1 << PAR_F_SHIFT) +#define PAR_F_SHIFT 0 +#define PAR_F (0x1 << PAR_F_SHIFT) -#define PLATFORM_SYSCALL_TRAP_NO 0x80000000 +#define PLATFORM_SYSCALL_TRAP_NO 0x80000000 -#define ARM64_SYSCALL_CODE_REG_NUM (16) +#define ARM64_SYSCALL_CODE_REG_NUM (16) -#define ARM64_CLINE_SHIFT 6 +#define ARM64_CLINE_SHIFT 6 #if defined(APPLE_ARM64_ARCH_FAMILY) -#define L2CERRSTS_DATSBEESV (1ULL << 2) /* L2C data single bit ECC error */ -#define L2CERRSTS_DATDBEESV (1ULL << 4) /* L2C data double bit ECC error */ +#define L2CERRSTS_DATSBEESV (1ULL << 2) /* L2C data single bit ECC error */ +#define L2CERRSTS_DATDBEESV (1ULL << 4) /* L2C data double bit ECC error */ #endif /* * Timer definitions. */ -#define CNTKCTL_EL1_PL0PTEN (0x1 << 9) /* 1: EL0 access to physical timer regs permitted */ -#define CNTKCTL_EL1_PL0VTEN (0x1 << 8) /* 1: EL0 access to virtual timer regs permitted */ -#define CNTKCTL_EL1_EVENTI_MASK (0x000000f0) /* Mask for bits describing which bit to use for triggering event stream */ -#define CNTKCTL_EL1_EVENTI_SHIFT (0x4) /* Shift for same */ -#define CNTKCTL_EL1_EVENTDIR (0x1 << 3) /* 1: one-to-zero transition of specified bit causes event */ -#define CNTKCTL_EL1_EVNTEN (0x1 << 2) /* 1: enable event stream */ -#define CNTKCTL_EL1_PL0VCTEN (0x1 << 1) /* 1: EL0 access to physical timebase + frequency reg enabled */ -#define CNTKCTL_EL1_PL0PCTEN (0x1 << 0) /* 1: EL0 access to virtual timebase + frequency reg enabled */ - -#define CNTV_CTL_EL0_ISTATUS (0x1 << 2) /* (read only): whether interrupt asserted */ -#define CNTV_CTL_EL0_IMASKED (0x1 << 1) /* 1: interrupt masked */ -#define CNTV_CTL_EL0_ENABLE (0x1 << 0) /* 1: virtual timer enabled */ - -#define CNTP_CTL_EL0_ISTATUS CNTV_CTL_EL0_ISTATUS -#define CNTP_CTL_EL0_IMASKED CNTV_CTL_EL0_IMASKED -#define CNTP_CTL_EL0_ENABLE CNTV_CTL_EL0_ENABLE +#define CNTKCTL_EL1_PL0PTEN (0x1 << 9) /* 1: EL0 access to physical timer regs permitted */ +#define CNTKCTL_EL1_PL0VTEN (0x1 << 8) /* 1: EL0 access to virtual timer regs permitted */ +#define CNTKCTL_EL1_EVENTI_MASK (0x000000f0) /* Mask for bits describing which bit to use for triggering event stream */ +#define CNTKCTL_EL1_EVENTI_SHIFT (0x4) /* Shift for same */ +#define CNTKCTL_EL1_EVENTDIR (0x1 << 3) /* 1: one-to-zero transition of specified bit causes event */ +#define CNTKCTL_EL1_EVNTEN (0x1 << 2) /* 1: enable event stream */ +#define CNTKCTL_EL1_PL0VCTEN (0x1 << 1) /* 1: EL0 access to physical timebase + frequency reg enabled */ +#define CNTKCTL_EL1_PL0PCTEN (0x1 << 0) /* 1: EL0 access to virtual timebase + frequency reg enabled */ + +#define CNTV_CTL_EL0_ISTATUS (0x1 << 2) /* (read only): whether interrupt asserted */ +#define CNTV_CTL_EL0_IMASKED (0x1 << 1) /* 1: interrupt masked */ +#define CNTV_CTL_EL0_ENABLE (0x1 << 0) /* 1: virtual timer enabled */ + +#define CNTP_CTL_EL0_ISTATUS CNTV_CTL_EL0_ISTATUS +#define CNTP_CTL_EL0_IMASKED CNTV_CTL_EL0_IMASKED +#define CNTP_CTL_EL0_ENABLE CNTV_CTL_EL0_ENABLE /* * At present all other uses of ARM_DBG_* are shared bit compatibly with the 32bit definitons. @@ -1397,28 +1397,28 @@ typedef enum { */ #define ARM_DBG_VR_ADDRESS_MASK64 0xFFFFFFFFFFFFFFFCull /* BVR & WVR */ -#define MIDR_EL1_REV_SHIFT 0 -#define MIDR_EL1_REV_MASK (0xf << MIDR_EL1_REV_SHIFT) -#define MIDR_EL1_PNUM_SHIFT 4 -#define MIDR_EL1_PNUM_MASK (0xfff << MIDR_EL1_PNUM_SHIFT) -#define MIDR_EL1_ARCH_SHIFT 16 -#define MIDR_EL1_ARCH_MASK (0xf << MIDR_EL1_ARCH_SHIFT) -#define MIDR_EL1_VAR_SHIFT 20 -#define MIDR_EL1_VAR_MASK (0xf << MIDR_EL1_VAR_SHIFT) -#define MIDR_EL1_IMP_SHIFT 24 -#define MIDR_EL1_IMP_MASK (0xff << MIDR_EL1_IMP_SHIFT) +#define MIDR_EL1_REV_SHIFT 0 +#define MIDR_EL1_REV_MASK (0xf << MIDR_EL1_REV_SHIFT) +#define MIDR_EL1_PNUM_SHIFT 4 +#define MIDR_EL1_PNUM_MASK (0xfff << MIDR_EL1_PNUM_SHIFT) +#define MIDR_EL1_ARCH_SHIFT 16 +#define MIDR_EL1_ARCH_MASK (0xf << MIDR_EL1_ARCH_SHIFT) +#define MIDR_EL1_VAR_SHIFT 20 +#define MIDR_EL1_VAR_MASK (0xf << MIDR_EL1_VAR_SHIFT) +#define MIDR_EL1_IMP_SHIFT 24 +#define MIDR_EL1_IMP_MASK (0xff << MIDR_EL1_IMP_SHIFT) /* * CoreSight debug registers */ -#define CORESIGHT_ED 0 -#define CORESIGHT_CTI 1 -#define CORESIGHT_PMU 2 -#define CORESIGHT_UTT 3 /* Not truly a coresight thing, but at a fixed convenient location right after the coresight region */ +#define CORESIGHT_ED 0 +#define CORESIGHT_CTI 1 +#define CORESIGHT_PMU 2 +#define CORESIGHT_UTT 3 /* Not truly a coresight thing, but at a fixed convenient location right after the coresight region */ -#define CORESIGHT_OFFSET(x) ((x) * 0x10000) -#define CORESIGHT_REGIONS 4 -#define CORESIGHT_SIZE 0x1000 +#define CORESIGHT_OFFSET(x) ((x) * 0x10000) +#define CORESIGHT_REGIONS 4 +#define CORESIGHT_SIZE 0x1000 /* @@ -1430,49 +1430,50 @@ typedef enum { * +----------+--------+------+------+------+-----+------+ */ -#define ID_AA64ISAR0_EL1_ATOMIC_OFFSET 20 -#define ID_AA64ISAR0_EL1_ATOMIC_MASK (0xfull << ID_AA64ISAR0_EL1_ATOMIC_OFFSET) -#define ID_AA64ISAR0_EL1_ATOMIC_8_1 (2ull << ID_AA64ISAR0_EL1_ATOMIC_OFFSET) +#define ID_AA64ISAR0_EL1_ATOMIC_OFFSET 20 +#define ID_AA64ISAR0_EL1_ATOMIC_MASK (0xfull << ID_AA64ISAR0_EL1_ATOMIC_OFFSET) +#define ID_AA64ISAR0_EL1_ATOMIC_8_1 (2ull << ID_AA64ISAR0_EL1_ATOMIC_OFFSET) -#define ID_AA64ISAR0_EL1_CRC32_OFFSET 16 -#define ID_AA64ISAR0_EL1_CRC32_MASK (0xfull << ID_AA64ISAR0_EL1_CRC32_OFFSET) -#define ID_AA64ISAR0_EL1_CRC32_EN (1ull << ID_AA64ISAR0_EL1_CRC32_OFFSET) +#define ID_AA64ISAR0_EL1_CRC32_OFFSET 16 +#define ID_AA64ISAR0_EL1_CRC32_MASK (0xfull << ID_AA64ISAR0_EL1_CRC32_OFFSET) +#define ID_AA64ISAR0_EL1_CRC32_EN (1ull << ID_AA64ISAR0_EL1_CRC32_OFFSET) -#define ID_AA64ISAR0_EL1_SHA2_OFFSET 12 -#define ID_AA64ISAR0_EL1_SHA2_MASK (0xfull << ID_AA64ISAR0_EL1_SHA2_OFFSET) -#define ID_AA64ISAR0_EL1_SHA2_EN (1ull << ID_AA64ISAR0_EL1_SHA2_OFFSET) +#define ID_AA64ISAR0_EL1_SHA2_OFFSET 12 +#define ID_AA64ISAR0_EL1_SHA2_MASK (0xfull << ID_AA64ISAR0_EL1_SHA2_OFFSET) +#define ID_AA64ISAR0_EL1_SHA2_EN (1ull << ID_AA64ISAR0_EL1_SHA2_OFFSET) -#define ID_AA64ISAR0_EL1_SHA1_OFFSET 8 -#define ID_AA64ISAR0_EL1_SHA1_MASK (0xfull << ID_AA64ISAR0_EL1_SHA1_OFFSET) -#define ID_AA64ISAR0_EL1_SHA1_EN (1ull << ID_AA64ISAR0_EL1_SHA1_OFFSET) +#define ID_AA64ISAR0_EL1_SHA1_OFFSET 8 +#define ID_AA64ISAR0_EL1_SHA1_MASK (0xfull << ID_AA64ISAR0_EL1_SHA1_OFFSET) +#define ID_AA64ISAR0_EL1_SHA1_EN (1ull << ID_AA64ISAR0_EL1_SHA1_OFFSET) + +#define ID_AA64ISAR0_EL1_AES_OFFSET 4 +#define ID_AA64ISAR0_EL1_AES_MASK (0xfull << ID_AA64ISAR0_EL1_AES_OFFSET) +#define ID_AA64ISAR0_EL1_AES_EN (1ull << ID_AA64ISAR0_EL1_AES_OFFSET) +#define ID_AA64ISAR0_EL1_AES_PMULL_EN (2ull << ID_AA64ISAR0_EL1_AES_OFFSET) -#define ID_AA64ISAR0_EL1_AES_OFFSET 4 -#define ID_AA64ISAR0_EL1_AES_MASK (0xfull << ID_AA64ISAR0_EL1_AES_OFFSET) -#define ID_AA64ISAR0_EL1_AES_EN (1ull << ID_AA64ISAR0_EL1_AES_OFFSET) -#define ID_AA64ISAR0_EL1_AES_PMULL_EN (2ull << ID_AA64ISAR0_EL1_AES_OFFSET) #ifdef __ASSEMBLER__ -/* +/* * Compute CPU version: * Version is constructed as [4 bits of MIDR variant]:[4 bits of MIDR revision] * * Where the "variant" is the major number and the "revision" is the minor number. * * For example: - * Cyclone A0 is variant 0, revision 0, i.e. 0. + * Cyclone A0 is variant 0, revision 0, i.e. 0. * Cyclone B0 is variant 1, revision 0, i.e. 0x10 * $0 - register to place value in */ -.macro GET_MIDR_CPU_VERSION - mrs $0, MIDR_EL1 // Read MIDR_EL1 for CPUID - bfi $0, $0, #(MIDR_EL1_VAR_SHIFT - 4), #4 // move bits 3:0 (revision) to 19:16 (below variant) to get values adjacent - ubfx $0, $0, #(MIDR_EL1_VAR_SHIFT - 4), #8 // And extract the concatenated bitstring to beginning of register +.macro GET_MIDR_CPU_VERSION +mrs $0, MIDR_EL1 // Read MIDR_EL1 for CPUID +bfi $0, $0, #(MIDR_EL1_VAR_SHIFT - 4), #4 // move bits 3:0 (revision) to 19:16 (below variant) to get values adjacent +ubfx $0, $0, #(MIDR_EL1_VAR_SHIFT - 4), #8 // And extract the concatenated bitstring to beginning of register .endmacro -/* +/* * To apply a workaround for CPU versions less than a given value * (e.g. earlier than when a fix arrived) * @@ -1481,12 +1482,12 @@ typedef enum { * $2 - label to branch to (at end of workaround) */ .macro SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL - GET_MIDR_CPU_VERSION $0 - cmp $0, $1 - b.pl $2 // Unsigned "greater or equal" +GET_MIDR_CPU_VERSION $0 +cmp $0, $1 +b.pl $2 // Unsigned "greater or equal" .endmacro -/* +/* * To apply a workaround for CPU versions greater than a given value * (e.g. starting when a bug was introduced) * @@ -1495,15 +1496,15 @@ typedef enum { * $2 - label to branch to (at end of workaround) */ .macro SKIP_IF_CPU_VERSION_LESS_THAN - GET_MIDR_CPU_VERSION $0 - cmp $0, $1 - b.mi $2 // Unsigned "strictly less than" +GET_MIDR_CPU_VERSION $0 +cmp $0, $1 +b.mi $2 // Unsigned "strictly less than" .endmacro #endif /* __ASSEMBLER__ */ -#define MSR(reg,src) __asm__ volatile ("msr " reg ", %0" :: "r" (src)) -#define MRS(dest,reg) __asm__ volatile ("mrs %0, " reg : "=r" (dest)) +#define MSR(reg, src) __asm__ volatile ("msr " reg ", %0" :: "r" (src)) +#define MRS(dest, reg) __asm__ volatile ("mrs %0, " reg : "=r" (dest)) #endif /* _ARM64_PROC_REG_H_ */ diff --git a/osfmk/arm64/sleh.c b/osfmk/arm64/sleh.c index 4ace02086..2ad70b7c8 100644 --- a/osfmk/arm64/sleh.c +++ b/osfmk/arm64/sleh.c @@ -79,17 +79,17 @@ assert(TEST_CONTEXT32_SANITY(context) || TEST_CONTEXT64_SANITY(context)) -#define COPYIN(src, dst, size) \ +#define COPYIN(src, dst, size) \ (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ - copyin_kern(src, dst, size) \ - : \ - copyin(src, dst, size) + copyin_kern(src, dst, size) \ + : \ + copyin(src, dst, size) -#define COPYOUT(src, dst, size) \ +#define COPYOUT(src, dst, size) \ (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) ? \ - copyout_kern(src, dst, size) \ - : \ - copyout(src, dst, size) + copyout_kern(src, dst, size) \ + : \ + copyout(src, dst, size) // Below is for concatenating a string param to a string literal #define STR1(x) #x @@ -118,7 +118,7 @@ extern kern_return_t arm_fast_fault(pmap_t, vm_map_address_t, vm_prot_t, boolean static void handle_uncategorized(arm_saved_state_t *, boolean_t); static void handle_breakpoint(arm_saved_state_t *); -typedef void(*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); +typedef void (*abort_inspector_t)(uint32_t, fault_status_t *, vm_prot_t *); static void inspect_instruction_abort(uint32_t, fault_status_t *, vm_prot_t *); static void inspect_data_abort(uint32_t, fault_status_t *, vm_prot_t *); @@ -126,7 +126,7 @@ static int is_vm_fault(fault_status_t); static int is_translation_fault(fault_status_t); static int is_alignment_fault(fault_status_t); -typedef void(*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); +typedef void (*abort_handler_t)(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); static void handle_user_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); static void handle_kernel_abort(arm_saved_state_t *, uint32_t, vm_offset_t, fault_status_t, vm_prot_t, vm_offset_t); @@ -151,7 +151,7 @@ struct proc; extern void unix_syscall(struct arm_saved_state * regs, thread_t thread_act, - struct uthread * uthread, struct proc * proc); + struct uthread * uthread, struct proc * proc); extern void mach_syscall(struct arm_saved_state*); @@ -161,7 +161,7 @@ extern kern_return_t dtrace_user_probe(arm_saved_state_t* regs); extern boolean_t dtrace_tally_fault(user_addr_t); /* Traps for userland processing. Can't include bsd/sys/fasttrap_isa.h, so copy and paste the trap instructions - over from that file. Need to keep these in sync! */ + * over from that file. Need to keep these in sync! */ #define FASTTRAP_ARM32_INSTR 0xe7ffdefc #define FASTTRAP_THUMB32_INSTR 0xdefc #define FASTTRAP_ARM64_INSTR 0xe7eeee7e @@ -188,15 +188,15 @@ extern volatile char pan_fault_value; #endif #if defined(APPLECYCLONE) -#define CPU_NAME "Cyclone" +#define CPU_NAME "Cyclone" #elif defined(APPLETYPHOON) -#define CPU_NAME "Typhoon" +#define CPU_NAME "Typhoon" #elif defined(APPLETWISTER) -#define CPU_NAME "Twister" +#define CPU_NAME "Twister" #elif defined(APPLEHURRICANE) -#define CPU_NAME "Hurricane" +#define CPU_NAME "Hurricane" #else -#define CPU_NAME "Unknown" +#define CPU_NAME "Unknown" #endif #if (CONFIG_KERNEL_INTEGRITY && defined(KERNEL_INTEGRITY_WT)) @@ -220,8 +220,8 @@ extern vm_offset_t static_memory_end; static inline unsigned __ror(unsigned value, unsigned shift) { - return (((unsigned)(value) >> (unsigned)(shift)) | - (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift))); + return ((unsigned)(value) >> (unsigned)(shift)) | + (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift)); } static void @@ -240,12 +240,12 @@ arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_o fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); panic_plain("Unhandled " CPU_NAME - " implementation specific error. state=%p esr=%#x far=%p\n" - "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" - "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", - state, esr, (void *)far, - (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, - (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); + " implementation specific error. state=%p esr=%#x far=%p\n" + "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" + "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", + state, esr, (void *)far, + (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, + (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); #elif defined(HAS_MIGSTS) uint64_t l2c_err_sts, l2c_err_adr, l2c_err_inf, mpidr, migsts; @@ -260,12 +260,12 @@ arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_o fed_err_sts = __builtin_arm_rsr64(STR(ARM64_REG_FED_ERR_STS)); panic_plain("Unhandled " CPU_NAME - " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n" - "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" - "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", - state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts, - (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, - (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); + " implementation specific error. state=%p esr=%#x far=%p p-core?%d migsts=%p\n" + "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" + "\tl2c_err_sts:%p, l2c_err_adr:%p, l2c_err_inf:%p\n", + state, esr, (void *)far, !!(mpidr & MPIDR_PNE), (void *)migsts, + (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, + (void *)l2c_err_sts, (void *)l2c_err_adr, (void *)l2c_err_inf); #else // !defined(NO_ECORE) && !defined(HAS_MIGSTS) uint64_t llc_err_sts, llc_err_adr, llc_err_inf, mpidr; @@ -286,12 +286,12 @@ arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_o llc_err_inf = __builtin_arm_rsr64(STR(ARM64_REG_L2C_ERR_INF)); panic_plain("Unhandled " CPU_NAME - " implementation specific error. state=%p esr=%#x far=%p p-core?%d\n" - "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" - "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n", - state, esr, (void *)far, !!(mpidr & MPIDR_PNE), - (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, - (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf); + " implementation specific error. state=%p esr=%#x far=%p p-core?%d\n" + "\tlsu_err_sts:%p, fed_err_sts:%p, mmu_err_sts:%p\n" + "\tllc_err_sts:%p, llc_err_adr:%p, llc_err_inf:%p\n", + state, esr, (void *)far, !!(mpidr & MPIDR_PNE), + (void *)lsu_err_sts, (void *)fed_err_sts, (void *)mmu_err_sts, + (void *)llc_err_sts, (void *)llc_err_adr, (void *)llc_err_inf); #endif #else // !defined(APPLE_ARM64_ARCH_FAMILY) #pragma unused (state, esr, far) @@ -303,7 +303,8 @@ arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_o #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-parameter" static void -kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) { +kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) +{ #if defined(KERNEL_INTEGRITY_WT) #if (DEVELOPMENT || DEBUG) if (ESR_WT_SERROR(esr)) { @@ -320,13 +321,13 @@ kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) { panic_plain("Kernel integrity, software request."); case WT_REASON_PT_INVALID: panic_plain("Kernel integrity, encountered invalid TTE/PTE while " - "walking 0x%016lx.", far); + "walking 0x%016lx.", far); case WT_REASON_PT_VIOLATION: panic_plain("Kernel integrity, violation in mapping 0x%016lx.", - far); + far); case WT_REASON_REG_VIOLATION: panic_plain("Kernel integrity, violation in system register %d.", - (unsigned) far); + (unsigned) far); default: panic_plain("Kernel integrity, unknown (esr=0x%08x).", esr); } @@ -344,16 +345,17 @@ kernel_integrity_error_handler(uint32_t esr, vm_offset_t far) { static void arm64_platform_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far) { - cpu_data_t *cdp = getCpuDatap(); + cpu_data_t *cdp = getCpuDatap(); #if CONFIG_KERNEL_INTEGRITY kernel_integrity_error_handler(esr, far); #endif - if (cdp->platform_error_handler != (platform_error_handler_t) NULL) - (*(platform_error_handler_t)cdp->platform_error_handler) (cdp->cpu_id, far); - else + if (cdp->platform_error_handler != (platform_error_handler_t) NULL) { + (*(platform_error_handler_t)cdp->platform_error_handler)(cdp->cpu_id, far); + } else { arm64_implementation_specific_error(state, esr, far); + } } void @@ -365,40 +367,41 @@ panic_with_thread_kernel_state(const char *msg, arm_saved_state_t *ss) arm_saved_state64_t *state = saved_state64(ss); panic_plain("%s (saved state: %p%s)\n" - "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" - "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" - "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n" - "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n" - "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n" - "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n" - "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n" - "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n" - "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n", - msg, ss, (ss_valid ? "" : " INVALID"), - state->x[0], state->x[1], state->x[2], state->x[3], - state->x[4], state->x[5], state->x[6], state->x[7], - state->x[8], state->x[9], state->x[10], state->x[11], - state->x[12], state->x[13], state->x[14], state->x[15], - state->x[16], state->x[17], state->x[18], state->x[19], - state->x[20], state->x[21], state->x[22], state->x[23], - state->x[24], state->x[25], state->x[26], state->x[27], - state->x[28], state->fp, state->lr, state->sp, - state->pc, state->cpsr, state->esr, state->far); + "\t x0: 0x%016llx x1: 0x%016llx x2: 0x%016llx x3: 0x%016llx\n" + "\t x4: 0x%016llx x5: 0x%016llx x6: 0x%016llx x7: 0x%016llx\n" + "\t x8: 0x%016llx x9: 0x%016llx x10: 0x%016llx x11: 0x%016llx\n" + "\t x12: 0x%016llx x13: 0x%016llx x14: 0x%016llx x15: 0x%016llx\n" + "\t x16: 0x%016llx x17: 0x%016llx x18: 0x%016llx x19: 0x%016llx\n" + "\t x20: 0x%016llx x21: 0x%016llx x22: 0x%016llx x23: 0x%016llx\n" + "\t x24: 0x%016llx x25: 0x%016llx x26: 0x%016llx x27: 0x%016llx\n" + "\t x28: 0x%016llx fp: 0x%016llx lr: 0x%016llx sp: 0x%016llx\n" + "\t pc: 0x%016llx cpsr: 0x%08x esr: 0x%08x far: 0x%016llx\n", + msg, ss, (ss_valid ? "" : " INVALID"), + state->x[0], state->x[1], state->x[2], state->x[3], + state->x[4], state->x[5], state->x[6], state->x[7], + state->x[8], state->x[9], state->x[10], state->x[11], + state->x[12], state->x[13], state->x[14], state->x[15], + state->x[16], state->x[17], state->x[18], state->x[19], + state->x[20], state->x[21], state->x[22], state->x[23], + state->x[24], state->x[25], state->x[26], state->x[27], + state->x[28], state->fp, state->lr, state->sp, + state->pc, state->cpsr, state->esr, state->far); } void sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unused) { - esr_exception_class_t class = ESR_EC(esr); - arm_saved_state_t *state = &context->ss; + esr_exception_class_t class = ESR_EC(esr); + arm_saved_state_t *state = &context->ss; switch (class) { case ESR_EC_UNCATEGORIZED: { uint32_t instr = *((uint32_t*)get_saved_state_pc(state)); - if (IS_ARM_GDB_TRAP(instr)) + if (IS_ARM_GDB_TRAP(instr)) { DebuggerCall(EXC_BREAKPOINT, state); + } // Intentionally fall through to panic if we return from the debugger } default: @@ -409,22 +412,23 @@ sleh_synchronous_sp1(arm_context_t *context, uint32_t esr, vm_offset_t far __unu void sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) { - esr_exception_class_t class = ESR_EC(esr); - arm_saved_state_t *state = &context->ss; - vm_offset_t recover = 0; - thread_t thread = current_thread(); + esr_exception_class_t class = ESR_EC(esr); + arm_saved_state_t *state = &context->ss; + vm_offset_t recover = 0, recover_saved = 0; + thread_t thread = current_thread(); ASSERT_CONTEXT_SANITY(context); /* Don't run exception handler with recover handler set in case of double fault */ if (thread->recover) { - recover = thread->recover; + recover = recover_saved = thread->recover; thread->recover = (vm_offset_t)NULL; } /* Inherit the interrupt masks from previous context */ - if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) + if (SPSR_INTERRUPTS_ENABLED(get_saved_state_cpsr(state))) { ml_set_interrupts_enabled(TRUE); + } switch (class) { case ESR_EC_SVC_64: @@ -448,7 +452,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) assert(0); /* Unreachable */ case ESR_EC_IABORT_EL1: - + panic_with_thread_kernel_state("Kernel instruction fetch abort", state); case ESR_EC_PC_ALIGN: @@ -483,9 +487,10 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) case ESR_EC_BRK_AARCH64: if (PSR64_IS_KERNEL(get_saved_state_cpsr(state))) { - kprintf("Breakpoint instruction exception from kernel. Hanging here (by design).\n"); - for (;;); + for (;;) { + ; + } __unreachable_ok_push DebuggerCall(EXC_BREAKPOINT, &context->ss); @@ -502,14 +507,16 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) assert(0); /* Unreachable */ } panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", - class, state, class, esr, (void *)far); + class, state, class, esr, (void *)far); assert(0); /* Unreachable */ break; case ESR_EC_BKPT_REG_MATCH_EL1: if (!PE_i_can_has_debugger(NULL) && FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { kprintf("Hardware Breakpoint Debug exception from kernel. Hanging here (by design).\n"); - for (;;); + for (;;) { + ; + } __unreachable_ok_push DebuggerCall(EXC_BREAKPOINT, &context->ss); @@ -517,7 +524,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) __unreachable_ok_pop } panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", - class, state, class, esr, (void *)far); + class, state, class, esr, (void *)far); assert(0); /* Unreachable */ break; @@ -527,14 +534,16 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) assert(0); /* Unreachable */ } panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", - class, state, class, esr, (void *)far); + class, state, class, esr, (void *)far); assert(0); /* Unreachable */ break; case ESR_EC_SW_STEP_DEBUG_EL1: if (!PE_i_can_has_debugger(NULL) && FSC_DEBUG_FAULT == ISS_SSDE_FSC(esr)) { kprintf("Software Step Debug exception from kernel. Hanging here (by design).\n"); - for (;;); + for (;;) { + ; + } __unreachable_ok_push DebuggerCall(EXC_BREAKPOINT, &context->ss); @@ -542,7 +551,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) __unreachable_ok_pop } panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", - class, state, class, esr, (void *)far); + class, state, class, esr, (void *)far); assert(0); /* Unreachable */ break; @@ -552,7 +561,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) assert(0); /* Unreachable */ } panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", - class, state, class, esr, (void *)far); + class, state, class, esr, (void *)far); assert(0); /* Unreachable */ break; @@ -566,7 +575,7 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) break; /* return to first level handler */ } panic("Unsupported Class %u event code. state=%p class=%u esr=%u far=%p", - class, state, class, esr, (void *)far); + class, state, class, esr, (void *)far); assert(0); /* Unreachable */ break; @@ -576,11 +585,11 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) break; case ESR_EC_ILLEGAL_INSTR_SET: - if (EXCB_ACTION_RERUN != - ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) { + if (EXCB_ACTION_RERUN != + ex_cb_invoke(EXCB_CLASS_ILLEGAL_INSTR_SET, far)) { // instruction is not re-executed panic("Illegal instruction set exception. state=%p class=%u esr=%u far=%p spsr=0x%x", - state, class, esr, (void *)far, get_saved_state_cpsr(state)); + state, class, esr, (void *)far, get_saved_state_cpsr(state)); assert(0); } // must clear this fault in PSR to re-run @@ -599,18 +608,19 @@ sleh_synchronous(arm_context_t *context, uint32_t esr, vm_offset_t far) case ESR_EC_WFI_WFE: // Use of WFI or WFE instruction when they have been disabled for EL0 handle_wf_trap(state); - assert(0); /* Unreachable */ + assert(0); /* Unreachable */ break; default: panic("Unsupported synchronous exception. state=%p class=%u esr=%u far=%p", - state, class, esr, (void *)far); + state, class, esr, (void *)far); assert(0); /* Unreachable */ break; } - if (recover) - thread->recover = recover; + if (recover_saved) { + thread->recover = recover_saved; + } } /* @@ -645,8 +655,9 @@ handle_uncategorized(arm_saved_state_t *state, boolean_t instrLen2) * instr. */ if (instr == FASTTRAP_ARM64_INSTR || instr == FASTTRAP_ARM64_RET_INSTR) { - if (dtrace_user_probe(state) == KERN_SUCCESS) + if (dtrace_user_probe(state) == KERN_SUCCESS) { return; + } } } else if (PSR64_IS_USER32(get_saved_state_cpsr(state))) { /* @@ -738,9 +749,9 @@ handle_uncategorized(arm_saved_state_t *state, boolean_t instrLen2) static void handle_breakpoint(arm_saved_state_t *state) { - exception_type_t exception = EXC_BREAKPOINT; - mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; - mach_msg_type_number_t numcodes = 2; + exception_type_t exception = EXC_BREAKPOINT; + mach_exception_data_type_t codes[2] = {EXC_ARM_BREAKPOINT}; + mach_msg_type_number_t numcodes = 2; codes[1] = get_saved_state_pc(state); exception_triage(exception, codes, numcodes); @@ -750,9 +761,9 @@ handle_breakpoint(arm_saved_state_t *state) static void handle_watchpoint(vm_offset_t fault_addr) { - exception_type_t exception = EXC_BREAKPOINT; - mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG}; - mach_msg_type_number_t numcodes = 2; + exception_type_t exception = EXC_BREAKPOINT; + mach_exception_data_type_t codes[2] = {EXC_ARM_DA_DEBUG}; + mach_msg_type_number_t numcodes = 2; codes[1] = fault_addr; exception_triage(exception, codes, numcodes); @@ -761,10 +772,10 @@ handle_watchpoint(vm_offset_t fault_addr) static void handle_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, vm_offset_t recover, - abort_inspector_t inspect_abort, abort_handler_t handler) + abort_inspector_t inspect_abort, abort_handler_t handler) { - fault_status_t fault_code; - vm_prot_t fault_type; + fault_status_t fault_code; + vm_prot_t fault_type; inspect_abort(ESR_ISS(esr), &fault_code, &fault_type); handler(state, esr, fault_addr, fault_code, fault_type, recover); @@ -929,7 +940,7 @@ is_permission_fault(fault_status_t status) static int is_alignment_fault(fault_status_t status) { - return (status == FSC_ALIGNMENT_FAULT); + return status == FSC_ALIGNMENT_FAULT; } static int @@ -949,18 +960,19 @@ is_parity_error(fault_status_t status) static void handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, - fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) + fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) { - exception_type_t exc = EXC_BAD_ACCESS; - mach_exception_data_type_t codes[2]; - mach_msg_type_number_t numcodes = 2; - thread_t thread = current_thread(); + exception_type_t exc = EXC_BAD_ACCESS; + mach_exception_data_type_t codes[2]; + mach_msg_type_number_t numcodes = 2; + thread_t thread = current_thread(); (void)esr; (void)state; - if (ml_at_interrupt_context()) + if (ml_at_interrupt_context()) { panic_with_thread_kernel_state("Apparently on interrupt stack when taking user abort!\n", state); + } thread->iotier_override = THROTTLE_LEVEL_NONE; /* Reset IO tier override before handling abort from userspace */ @@ -971,11 +983,12 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr assert(map != kernel_map); - if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) - vm_fault_addr = tbi_clear(fault_addr); + if (!(fault_type & VM_PROT_EXECUTE) && user_tbi_enabled()) { + vm_fault_addr = tbi_clear(fault_addr); + } #if CONFIG_DTRACE - if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ + if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(vm_fault_addr)) { /* Should a user mode fault under dtrace be ignored? */ if (recover) { set_saved_state_pc(state, recover); @@ -1000,7 +1013,9 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr if (pgtrace_enabled) { /* Check to see if trace bit is set */ result = pmap_pgtrace_fault(map->pmap, fault_addr, state); - if (result == KERN_SUCCESS) return; + if (result == KERN_SUCCESS) { + return; + } } #endif @@ -1010,12 +1025,11 @@ handle_user_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr result = arm_fast_fault(map->pmap, trunc_page(vm_fault_addr), fault_type, TRUE); } if (result != KERN_SUCCESS) { - { /* We have to fault the page in */ result = vm_fault(map, vm_fault_addr, fault_type, - /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE, - /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); + /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE, + /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); } } if (result == KERN_SUCCESS || result == KERN_ABORTED) { @@ -1075,13 +1089,13 @@ is_pan_fault(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, fau static void handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_addr, - fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) + fault_status_t fault_code, vm_prot_t fault_type, vm_offset_t recover) { - thread_t thread = current_thread(); + thread_t thread = current_thread(); (void)esr; #if CONFIG_DTRACE - if (is_vm_fault(fault_code) && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ + if (is_vm_fault(fault_code) && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(fault_addr)) { /* Should a fault under dtrace be ignored? */ /* * Point to next instruction, or recovery handler if set. @@ -1102,8 +1116,9 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad #endif #if !CONFIG_PGTRACE /* This will be moved next to pgtrace fault evaluation */ - if (ml_at_interrupt_context()) + if (ml_at_interrupt_context()) { panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); + } #endif if (is_vm_fault(fault_code)) { @@ -1132,7 +1147,7 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad #endif if (fault_addr >= gVirtBase && fault_addr < static_memory_end) { - panic_with_thread_kernel_state("Unexpected fault in kernel static region\n",state); + panic_with_thread_kernel_state("Unexpected fault in kernel static region\n", state); } if (VM_KERNEL_ADDRESS(fault_addr) || thread == THREAD_NULL) { @@ -1147,30 +1162,36 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad if (pgtrace_enabled) { /* Check to see if trace bit is set */ result = pmap_pgtrace_fault(map->pmap, fault_addr, state); - if (result == KERN_SUCCESS) return; + if (result == KERN_SUCCESS) { + return; + } } - if (ml_at_interrupt_context()) + if (ml_at_interrupt_context()) { panic_with_thread_kernel_state("Unexpected abort while on interrupt stack.", state); + } #endif /* check to see if it is just a pmap ref/modify fault */ if (!is_translation_fault(fault_code)) { result = arm_fast_fault(map->pmap, trunc_page(fault_addr), fault_type, FALSE); - if (result == KERN_SUCCESS) return; + if (result == KERN_SUCCESS) { + return; + } } - if (result != KERN_PROTECTION_FAILURE) - { + if (result != KERN_PROTECTION_FAILURE) { /* * We have to "fault" the page in. */ result = vm_fault(map, fault_addr, fault_type, - /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible, - /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); + /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, interruptible, + /* caller_pmap */ NULL, /* caller_pmap_addr */ 0); } - if (result == KERN_SUCCESS) return; + if (result == KERN_SUCCESS) { + return; + } /* * If we have a recover handler, invoke it now. @@ -1183,18 +1204,18 @@ handle_kernel_abort(arm_saved_state_t *state, uint32_t esr, vm_offset_t fault_ad #if __ARM_PAN_AVAILABLE__ if (is_pan_fault(state, esr, fault_addr, fault_code)) { #ifdef CONFIG_XNUPOST - if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) - { + if ((pan_test_addr != 0) && (fault_addr == pan_test_addr)) { ++pan_exception_level; // read the user-accessible value to make sure // pan is enabled and produces a 2nd fault from // the exception handler - if (pan_exception_level == 1) - pan_fault_value = *(char *)pan_test_addr; + if (pan_exception_level == 1) { + pan_fault_value = *(char *)pan_test_addr; + } // this fault address is used for PAN test // disable PAN and rerun set_saved_state_cpsr(state, - get_saved_state_cpsr(state) & (~PSR64_PAN)); + get_saved_state_cpsr(state) & (~PSR64_PAN)); return; } #endif @@ -1366,9 +1387,9 @@ sleh_irq(arm_saved_state_t *state) /* Run the registered interrupt handler. */ cdp->interrupt_handler(cdp->interrupt_target, - cdp->interrupt_refCon, - cdp->interrupt_nub, - cdp->interrupt_source); + cdp->interrupt_refCon, + cdp->interrupt_nub, + cdp->interrupt_source); /* We use interrupt timing as an entropy source. */ timestamp = ml_get_timebase(); @@ -1395,8 +1416,9 @@ sleh_irq(arm_saved_state_t *state) sleh_interrupt_handler_epilogue(); #if DEVELOPMENT || DEBUG - if (preemption_level != get_preemption_level()) + if (preemption_level != get_preemption_level()) { panic("irq handler %p changed preemption level from %d to %d", cdp->interrupt_handler, preemption_level, get_preemption_level()); + } #endif } @@ -1407,26 +1429,27 @@ sleh_fiq(arm_saved_state_t *state) #if DEVELOPMENT || DEBUG int preemption_level = get_preemption_level(); #endif -#if MONOTONIC - uint64_t pmsr = 0, upmsr = 0; -#endif /* MONOTONIC */ -#if MONOTONIC - if (mt_pmi_pending(&pmsr, &upmsr)) { +#if MONOTONIC_FIQ + uint64_t pmcr0 = 0, upmsr = 0; +#endif /* MONOTONIC_FIQ */ + +#if MONOTONIC_FIQ + if (mt_pmi_pending(&pmcr0, &upmsr)) { type = DBG_INTR_TYPE_PMI; } else -#endif /* MONOTONIC */ +#endif /* MONOTONIC_FIQ */ if (ml_get_timer_pending()) { type = DBG_INTR_TYPE_TIMER; } sleh_interrupt_handler_prologue(state, type); -#if MONOTONIC +#if MONOTONIC_FIQ if (type == DBG_INTR_TYPE_PMI) { - mt_fiq(getCpuDatap(), pmsr, upmsr); + mt_fiq(getCpuDatap(), pmcr0, upmsr); } else -#endif /* MONOTONIC */ +#endif /* MONOTONIC_FIQ */ { /* * We don't know that this is a timer, but we don't have insight into @@ -1447,15 +1470,16 @@ sleh_fiq(arm_saved_state_t *state) sleh_interrupt_handler_epilogue(); #if DEVELOPMENT || DEBUG - if (preemption_level != get_preemption_level()) + if (preemption_level != get_preemption_level()) { panic("fiq type %u changed preemption level from %d to %d", type, preemption_level, get_preemption_level()); + } #endif } void sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) { - arm_saved_state_t *state = &context->ss; + arm_saved_state_t *state = &context->ss; #if DEVELOPMENT || DEBUG int preemption_level = get_preemption_level(); #endif @@ -1463,19 +1487,20 @@ sleh_serror(arm_context_t *context, uint32_t esr, vm_offset_t far) ASSERT_CONTEXT_SANITY(context); arm64_platform_error(state, esr, far); #if DEVELOPMENT || DEBUG - if (preemption_level != get_preemption_level()) + if (preemption_level != get_preemption_level()) { panic("serror changed preemption level from %d to %d", preemption_level, get_preemption_level()); + } #endif } void mach_syscall_trace_exit( - unsigned int retval, - unsigned int call_number) + unsigned int retval, + unsigned int call_number) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, - retval, 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); } __attribute__((noreturn)) @@ -1507,7 +1532,7 @@ thread_syscall_return(kern_return_t error) void syscall_trace( - struct arm_saved_state * regs __unused) + struct arm_saved_state * regs __unused) { /* kprintf("syscall: %d\n", saved_state64(regs)->x[16]); */ } @@ -1518,10 +1543,10 @@ sleh_interrupt_handler_prologue(arm_saved_state_t *state, unsigned int type) uint64_t is_user = PSR64_IS_USER(get_saved_state_cpsr(state)); uint64_t pc = is_user ? get_saved_state_pc(state) : - VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); + VM_KERNEL_UNSLIDE(get_saved_state_pc(state)); KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, - 0, pc, is_user, type); + 0, pc, is_user, type); #if CONFIG_TELEMETRY if (telemetry_needs_record) { @@ -1554,4 +1579,3 @@ sleh_invalid_stack(arm_context_t *context, uint32_t esr __unused, vm_offset_t fa panic_with_thread_kernel_state("Invalid kernel stack pointer (probable corruption).", &context->ss); } - diff --git a/osfmk/arm64/status.c b/osfmk/arm64/status.c index ff0429e36..5a69eabc4 100644 --- a/osfmk/arm64/status.c +++ b/osfmk/arm64/status.c @@ -35,17 +35,15 @@ #include #include -struct arm_vfpv2_state -{ - __uint32_t __r[32]; - __uint32_t __fpscr; - +struct arm_vfpv2_state { + __uint32_t __r[32]; + __uint32_t __fpscr; }; typedef struct arm_vfpv2_state arm_vfpv2_state_t; #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t))) + (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t))) /* * Forward definitions @@ -58,7 +56,7 @@ void thread_set_parent(thread_t parent, int pid); */ /* __private_extern__ */ unsigned int _MachineStateCount[] = { - /* FLAVOR_LIST */ 0, + /* FLAVOR_LIST */ 0, ARM_UNIFIED_THREAD_STATE_COUNT, ARM_VFP_STATE_COUNT, ARM_EXCEPTION_STATE_COUNT, @@ -102,8 +100,9 @@ saved_state_to_thread_state64(const arm_saved_state_t *saved_state, arm_thread_s ts64->sp = get_saved_state_sp(saved_state); ts64->pc = get_saved_state_pc(saved_state); ts64->cpsr = get_saved_state_cpsr(saved_state); - for (i = 0; i < 29; i++) + for (i = 0; i < 29; i++) { ts64->x[i] = get_saved_state_reg(saved_state, i); + } } /* @@ -121,21 +120,24 @@ thread_state64_to_saved_state(const arm_thread_state64_t *ts64, arm_saved_state_ set_saved_state_sp(saved_state, ts64->sp); set_saved_state_pc(saved_state, ts64->pc); set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64); - for (i = 0; i < 29; i++) + for (i = 0; i < 29; i++) { set_saved_state_reg(saved_state, i, ts64->x[i]); + } } #endif kern_return_t handle_get_arm32_thread_state( - thread_state_t tstate, - mach_msg_type_number_t * count, - const arm_saved_state_t *saved_state) + thread_state_t tstate, + mach_msg_type_number_t * count, + const arm_saved_state_t *saved_state) { - if (*count < ARM_THREAD_STATE32_COUNT) - return (KERN_INVALID_ARGUMENT); - if (!is_saved_state32(saved_state)) - return (KERN_INVALID_ARGUMENT); + if (*count < ARM_THREAD_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (!is_saved_state32(saved_state)) { + return KERN_INVALID_ARGUMENT; + } (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate); *count = ARM_THREAD_STATE32_COUNT; @@ -144,14 +146,16 @@ handle_get_arm32_thread_state( kern_return_t handle_get_arm64_thread_state( - thread_state_t tstate, - mach_msg_type_number_t * count, - const arm_saved_state_t *saved_state) + thread_state_t tstate, + mach_msg_type_number_t * count, + const arm_saved_state_t *saved_state) { - if (*count < ARM_THREAD_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); - if (!is_saved_state64(saved_state)) - return (KERN_INVALID_ARGUMENT); + if (*count < ARM_THREAD_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (!is_saved_state64(saved_state)) { + return KERN_INVALID_ARGUMENT; + } (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate); *count = ARM_THREAD_STATE64_COUNT; @@ -161,9 +165,9 @@ handle_get_arm64_thread_state( kern_return_t handle_get_arm_thread_state( - thread_state_t tstate, - mach_msg_type_number_t * count, - const arm_saved_state_t *saved_state) + thread_state_t tstate, + mach_msg_type_number_t * count, + const arm_saved_state_t *saved_state) { /* In an arm64 world, this flavor can be used to retrieve the thread * state of a 32-bit or 64-bit thread into a unified structure, but we @@ -189,17 +193,18 @@ handle_get_arm_thread_state( (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state)); } *count = ARM_UNIFIED_THREAD_STATE_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t handle_set_arm32_thread_state( - const thread_state_t tstate, - mach_msg_type_number_t count, - arm_saved_state_t *saved_state) + const thread_state_t tstate, + mach_msg_type_number_t count, + arm_saved_state_t *saved_state) { - if (count != ARM_THREAD_STATE32_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_THREAD_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state); return KERN_SUCCESS; @@ -207,12 +212,13 @@ handle_set_arm32_thread_state( kern_return_t handle_set_arm64_thread_state( - const thread_state_t tstate, - mach_msg_type_number_t count, - arm_saved_state_t *saved_state) + const thread_state_t tstate, + mach_msg_type_number_t count, + arm_saved_state_t *saved_state) { - if (count != ARM_THREAD_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_THREAD_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state); return KERN_SUCCESS; @@ -221,9 +227,9 @@ handle_set_arm64_thread_state( kern_return_t handle_set_arm_thread_state( - const thread_state_t tstate, - mach_msg_type_number_t count, - arm_saved_state_t *saved_state) + const thread_state_t tstate, + mach_msg_type_number_t count, + arm_saved_state_t *saved_state) { /* In an arm64 world, this flavor can be used to set the thread state of a * 32-bit or 64-bit thread from a unified structure, but we need to support @@ -232,7 +238,7 @@ handle_set_arm_thread_state( */ if (count < ARM_UNIFIED_THREAD_STATE_COUNT) { if (!is_saved_state32(saved_state)) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } return handle_set_arm32_thread_state(tstate, count, saved_state); } @@ -241,19 +247,19 @@ handle_set_arm_thread_state( #if __arm64__ if (is_thread_state64(unified_state)) { if (!is_saved_state64(saved_state)) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state); } else #endif { if (!is_saved_state32(saved_state)) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -262,10 +268,10 @@ handle_set_arm_thread_state( kern_return_t machine_thread_state_convert_to_user( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t *count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { // No conversion to userspace representation on this platform (void)thread; (void)flavor; (void)tstate; (void)count; @@ -278,10 +284,10 @@ machine_thread_state_convert_to_user( kern_return_t machine_thread_state_convert_from_user( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { // No conversion from userspace representation on this platform (void)thread; (void)flavor; (void)tstate; (void)count; @@ -294,8 +300,8 @@ machine_thread_state_convert_from_user( kern_return_t machine_thread_siguctx_pointer_convert_to_user( - __assert_only thread_t thread, - user_addr_t *uctxp) + __assert_only thread_t thread, + user_addr_t *uctxp) { // No conversion to userspace representation on this platform (void)thread; (void)uctxp; @@ -308,9 +314,9 @@ machine_thread_siguctx_pointer_convert_to_user( kern_return_t machine_thread_function_pointers_convert_from_user( - __assert_only thread_t thread, - user_addr_t *fptrs, - uint32_t count) + __assert_only thread_t thread, + user_addr_t *fptrs, + uint32_t count) { // No conversion from userspace representation on this platform (void)thread; (void)fptrs; (void)count; @@ -323,15 +329,16 @@ machine_thread_function_pointers_convert_from_user( */ kern_return_t machine_thread_get_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t * count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t * count) { switch (flavor) { case THREAD_STATE_FLAVOR_LIST: - if (*count < 4) - return (KERN_INVALID_ARGUMENT); + if (*count < 4) { + return KERN_INVALID_ARGUMENT; + } tstate[0] = ARM_THREAD_STATE; tstate[1] = ARM_VFP_STATE; @@ -341,8 +348,9 @@ machine_thread_get_state( break; case THREAD_STATE_FLAVOR_LIST_NEW: - if (*count < 4) - return (KERN_INVALID_ARGUMENT); + if (*count < 4) { + return KERN_INVALID_ARGUMENT; + } tstate[0] = ARM_THREAD_STATE; tstate[1] = ARM_VFP_STATE; @@ -354,166 +362,191 @@ machine_thread_get_state( case ARM_THREAD_STATE: { kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb); - if (rn) return rn; + if (rn) { + return rn; + } break; } case ARM_THREAD_STATE32: { - if (thread_is_64bit_data(thread)) + if (thread_is_64bit_data(thread)) { return KERN_INVALID_ARGUMENT; + } kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb); - if (rn) return rn; + if (rn) { + return rn; + } break; } #if __arm64__ case ARM_THREAD_STATE64: { - if (!thread_is_64bit_data(thread)) + if (!thread_is_64bit_data(thread)) { return KERN_INVALID_ARGUMENT; + } kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb); - if (rn) return rn; + if (rn) { + return rn; + } break; } #endif case ARM_EXCEPTION_STATE:{ - struct arm_exception_state *state; - struct arm_saved_state32 *saved_state; + struct arm_exception_state *state; + struct arm_saved_state32 *saved_state; - if (*count < ARM_EXCEPTION_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (*count < ARM_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } - state = (struct arm_exception_state *) tstate; - saved_state = saved_state32(thread->machine.upcb); + state = (struct arm_exception_state *) tstate; + saved_state = saved_state32(thread->machine.upcb); - state->exception = saved_state->exception; - state->fsr = saved_state->esr; - state->far = saved_state->far; + state->exception = saved_state->exception; + state->fsr = saved_state->esr; + state->far = saved_state->far; - *count = ARM_EXCEPTION_STATE_COUNT; - break; - } + *count = ARM_EXCEPTION_STATE_COUNT; + break; + } case ARM_EXCEPTION_STATE64:{ - struct arm_exception_state64 *state; - struct arm_saved_state64 *saved_state; + struct arm_exception_state64 *state; + struct arm_saved_state64 *saved_state; + + if (*count < ARM_EXCEPTION_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } - if (*count < ARM_EXCEPTION_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + state = (struct arm_exception_state64 *) tstate; + saved_state = saved_state64(thread->machine.upcb); - state = (struct arm_exception_state64 *) tstate; - saved_state = saved_state64(thread->machine.upcb); + state->exception = saved_state->exception; + state->far = saved_state->far; + state->esr = saved_state->esr; - state->exception = saved_state->exception; - state->far = saved_state->far; - state->esr = saved_state->esr; + *count = ARM_EXCEPTION_STATE64_COUNT; + break; + } + case ARM_DEBUG_STATE:{ + arm_legacy_debug_state_t *state; + arm_debug_state32_t *thread_state; - *count = ARM_EXCEPTION_STATE64_COUNT; - break; + if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; } - case ARM_DEBUG_STATE:{ - arm_legacy_debug_state_t *state; - arm_debug_state32_t *thread_state; - - if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); - - state = (arm_legacy_debug_state_t *) tstate; - thread_state = find_debug_state32(thread); - - if (thread_state == NULL) - bzero(state, sizeof(arm_legacy_debug_state_t)); - else - bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t)); - - *count = ARM_LEGACY_DEBUG_STATE_COUNT; - break; + + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } + + state = (arm_legacy_debug_state_t *) tstate; + thread_state = find_debug_state32(thread); + + if (thread_state == NULL) { + bzero(state, sizeof(arm_legacy_debug_state_t)); + } else { + bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t)); } + + *count = ARM_LEGACY_DEBUG_STATE_COUNT; + break; + } case ARM_DEBUG_STATE32:{ - arm_debug_state32_t *state; - arm_debug_state32_t *thread_state; - - if (*count < ARM_DEBUG_STATE32_COUNT) - return (KERN_INVALID_ARGUMENT); - - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); - - state = (arm_debug_state32_t *) tstate; - thread_state = find_debug_state32(thread); - - if (thread_state == NULL) - bzero(state, sizeof(arm_debug_state32_t)); - else - bcopy(thread_state, state, sizeof(arm_debug_state32_t)); - - *count = ARM_DEBUG_STATE32_COUNT; - break; + arm_debug_state32_t *state; + arm_debug_state32_t *thread_state; + + if (*count < ARM_DEBUG_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; } + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } + + state = (arm_debug_state32_t *) tstate; + thread_state = find_debug_state32(thread); + + if (thread_state == NULL) { + bzero(state, sizeof(arm_debug_state32_t)); + } else { + bcopy(thread_state, state, sizeof(arm_debug_state32_t)); + } + + *count = ARM_DEBUG_STATE32_COUNT; + break; + } + case ARM_DEBUG_STATE64:{ - arm_debug_state64_t *state; - arm_debug_state64_t *thread_state; - - if (*count < ARM_DEBUG_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); - - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); - - state = (arm_debug_state64_t *) tstate; - thread_state = find_debug_state64(thread); - - if (thread_state == NULL) - bzero(state, sizeof(arm_debug_state64_t)); - else - bcopy(thread_state, state, sizeof(arm_debug_state64_t)); - - *count = ARM_DEBUG_STATE64_COUNT; - break; + arm_debug_state64_t *state; + arm_debug_state64_t *thread_state; + + if (*count < ARM_DEBUG_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; } + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } + + state = (arm_debug_state64_t *) tstate; + thread_state = find_debug_state64(thread); + + if (thread_state == NULL) { + bzero(state, sizeof(arm_debug_state64_t)); + } else { + bcopy(thread_state, state, sizeof(arm_debug_state64_t)); + } + + *count = ARM_DEBUG_STATE64_COUNT; + break; + } + case ARM_VFP_STATE:{ - struct arm_vfp_state *state; - arm_neon_saved_state32_t *thread_state; - unsigned int max; - - if (*count < ARM_VFP_STATE_COUNT) { - if (*count < ARM_VFPV2_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - else - *count = ARM_VFPV2_STATE_COUNT; + struct arm_vfp_state *state; + arm_neon_saved_state32_t *thread_state; + unsigned int max; + + if (*count < ARM_VFP_STATE_COUNT) { + if (*count < ARM_VFPV2_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } else { + *count = ARM_VFPV2_STATE_COUNT; } + } - if (*count == ARM_VFPV2_STATE_COUNT) - max = 32; - else - max = 64; + if (*count == ARM_VFPV2_STATE_COUNT) { + max = 32; + } else { + max = 64; + } - state = (struct arm_vfp_state *) tstate; - thread_state = neon_state32(thread->machine.uNeon); - /* ARM64 TODO: set fpsr and fpcr from state->fpscr */ + state = (struct arm_vfp_state *) tstate; + thread_state = neon_state32(thread->machine.uNeon); + /* ARM64 TODO: set fpsr and fpcr from state->fpscr */ - bcopy(thread_state, state, (max + 1)*sizeof(uint32_t)); - *count = (max + 1); - break; - } + bcopy(thread_state, state, (max + 1) * sizeof(uint32_t)); + *count = (max + 1); + break; + } case ARM_NEON_STATE:{ arm_neon_state_t *state; arm_neon_saved_state32_t *thread_state; - if (*count < ARM_NEON_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < ARM_NEON_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } state = (arm_neon_state_t *)tstate; thread_state = neon_state32(thread->machine.uNeon); @@ -523,18 +556,19 @@ machine_thread_get_state( *count = ARM_NEON_STATE_COUNT; break; - - } + } case ARM_NEON_STATE64:{ arm_neon_state64_t *state; arm_neon_saved_state64_t *thread_state; - if (*count < ARM_NEON_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < ARM_NEON_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } state = (arm_neon_state64_t *)tstate; thread_state = neon_state64(thread->machine.uNeon); @@ -545,13 +579,12 @@ machine_thread_get_state( *count = ARM_NEON_STATE64_COUNT; break; - - } + } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -561,42 +594,49 @@ machine_thread_get_state( */ kern_return_t machine_thread_get_kern_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t * count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t * count) { /* * This works only for an interrupted kernel thread */ - if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) + if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) { return KERN_FAILURE; + } switch (flavor) { case ARM_THREAD_STATE: { kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state); - if (rn) return rn; + if (rn) { + return rn; + } break; } case ARM_THREAD_STATE32: { kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state); - if (rn) return rn; + if (rn) { + return rn; + } break; } #if __arm64__ case ARM_THREAD_STATE64: { kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state); - if (rn) return rn; + if (rn) { + return rn; + } break; } #endif default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } void @@ -635,329 +675,351 @@ extern long long arm_debug_get(void); */ kern_return_t machine_thread_set_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { kern_return_t rn; switch (flavor) { case ARM_THREAD_STATE: rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb); - if (rn) return rn; + if (rn) { + return rn; + } break; case ARM_THREAD_STATE32: - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb); - if (rn) return rn; + if (rn) { + return rn; + } break; #if __arm64__ case ARM_THREAD_STATE64: - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb); - if (rn) return rn; + if (rn) { + return rn; + } break; #endif case ARM_EXCEPTION_STATE:{ - - if (count != ARM_EXCEPTION_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); - - break; + if (count != ARM_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; } + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } + + break; + } case ARM_EXCEPTION_STATE64:{ + if (count != ARM_EXCEPTION_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } - if (count != ARM_EXCEPTION_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + break; + } + case ARM_DEBUG_STATE: + { + arm_legacy_debug_state_t *state; + boolean_t enabled = FALSE; + unsigned int i; - break; + if (count != ARM_LEGACY_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; } - case ARM_DEBUG_STATE: - { - arm_legacy_debug_state_t *state; - boolean_t enabled = FALSE; - unsigned int i; - if (count != ARM_LEGACY_DEBUG_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + state = (arm_legacy_debug_state_t *) tstate; - state = (arm_legacy_debug_state_t *) tstate; + for (i = 0; i < 16; i++) { + /* do not allow context IDs to be set */ + if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) + || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { + return KERN_PROTECTION_FAILURE; + } + if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) + || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { + enabled = TRUE; + } + } - for (i = 0; i < 16; i++) { - /* do not allow context IDs to be set */ - if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) - || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { - return KERN_PROTECTION_FAILURE; - } - if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) - || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { - enabled = TRUE; - } + + if (!enabled) { + arm_debug_state32_t *thread_state = find_debug_state32(thread); + if (thread_state != NULL) { + void *pTmp = thread->machine.DebugData; + thread->machine.DebugData = NULL; + zfree(ads_zone, pTmp); } - - - if (!enabled) { - arm_debug_state32_t *thread_state = find_debug_state32(thread); - if (thread_state != NULL) { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } - } else { - arm_debug_state32_t *thread_state = find_debug_state32(thread); - if (thread_state == NULL) { - thread->machine.DebugData = zalloc(ads_zone); - bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); - thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; - thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; - thread_state = find_debug_state32(thread); - } - assert(NULL != thread_state); - - for (i = 0; i < 16; i++) { - /* set appropriate privilege; mask out unknown bits */ - thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGBCR_MATCH_MASK - | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBGBCR_TYPE_IVA - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; - thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBGWCR_ACCESS_CONTROL_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; - } - - thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping. + } else { + arm_debug_state32_t *thread_state = find_debug_state32(thread); + if (thread_state == NULL) { + thread->machine.DebugData = zalloc(ads_zone); + bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); + thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; + thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; + thread_state = find_debug_state32(thread); } - - if (thread == current_thread()) { - arm_debug_set32(thread->machine.DebugData); + assert(NULL != thread_state); + + for (i = 0; i < 16; i++) { + /* set appropriate privilege; mask out unknown bits */ + thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGBCR_MATCH_MASK + | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBGBCR_TYPE_IVA + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; + thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBGWCR_ACCESS_CONTROL_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; } - - break; + + thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping. } + + if (thread == current_thread()) { + arm_debug_set32(thread->machine.DebugData); + } + + break; + } case ARM_DEBUG_STATE32: /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */ - { - arm_debug_state32_t *state; - boolean_t enabled = FALSE; - unsigned int i; + { + arm_debug_state32_t *state; + boolean_t enabled = FALSE; + unsigned int i; - if (count != ARM_DEBUG_STATE32_COUNT) - return (KERN_INVALID_ARGUMENT); - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_DEBUG_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } + + state = (arm_debug_state32_t *) tstate; - state = (arm_debug_state32_t *) tstate; + if (state->mdscr_el1 & 0x1) { + enabled = TRUE; + } - if (state->mdscr_el1 & 0x1) + for (i = 0; i < 16; i++) { + /* do not allow context IDs to be set */ + if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) + || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { + return KERN_PROTECTION_FAILURE; + } + if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) + || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { enabled = TRUE; + } + } - for (i = 0; i < 16; i++) { - /* do not allow context IDs to be set */ - if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) - || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { - return KERN_PROTECTION_FAILURE; - } - if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) - || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { - enabled = TRUE; - } + if (!enabled) { + arm_debug_state32_t *thread_state = find_debug_state32(thread); + if (thread_state != NULL) { + void *pTmp = thread->machine.DebugData; + thread->machine.DebugData = NULL; + zfree(ads_zone, pTmp); + } + } else { + arm_debug_state32_t *thread_state = find_debug_state32(thread); + if (thread_state == NULL) { + thread->machine.DebugData = zalloc(ads_zone); + bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); + thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; + thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; + thread_state = find_debug_state32(thread); } - - if (!enabled) { - arm_debug_state32_t *thread_state = find_debug_state32(thread); - if (thread_state != NULL) { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } + assert(NULL != thread_state); + + if (state->mdscr_el1 & 0x1) { + thread_state->mdscr_el1 |= 0x1; } else { - arm_debug_state32_t *thread_state = find_debug_state32(thread); - if (thread_state == NULL) { - thread->machine.DebugData = zalloc(ads_zone); - bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); - thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; - thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; - thread_state = find_debug_state32(thread); - } - assert(NULL != thread_state); - - if (state->mdscr_el1 & 0x1) - thread_state->mdscr_el1 |= 0x1; - else - thread_state->mdscr_el1 &= ~0x1; - - for (i = 0; i < 16; i++) { - /* set appropriate privilege; mask out unknown bits */ - thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGBCR_MATCH_MASK - | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBGBCR_TYPE_IVA - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; - thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBGWCR_ACCESS_CONTROL_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; - } - + thread_state->mdscr_el1 &= ~0x1; } - - if (thread == current_thread()) { - arm_debug_set32(thread->machine.DebugData); + + for (i = 0; i < 16; i++) { + /* set appropriate privilege; mask out unknown bits */ + thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGBCR_MATCH_MASK + | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBGBCR_TYPE_IVA + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; + thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBGWCR_ACCESS_CONTROL_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; } - - break; } + if (thread == current_thread()) { + arm_debug_set32(thread->machine.DebugData); + } + + break; + } + case ARM_DEBUG_STATE64: - { - arm_debug_state64_t *state; - boolean_t enabled = FALSE; - unsigned int i; + { + arm_debug_state64_t *state; + boolean_t enabled = FALSE; + unsigned int i; - if (count != ARM_DEBUG_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_DEBUG_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } - state = (arm_debug_state64_t *) tstate; + state = (arm_debug_state64_t *) tstate; + + if (state->mdscr_el1 & 0x1) { + enabled = TRUE; + } - if (state->mdscr_el1 & 0x1) + for (i = 0; i < 16; i++) { + /* do not allow context IDs to be set */ + if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) + || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) + || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { + return KERN_PROTECTION_FAILURE; + } + if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) + || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { enabled = TRUE; + } + } - for (i = 0; i < 16; i++) { - /* do not allow context IDs to be set */ - if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) - || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) - || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { - return KERN_PROTECTION_FAILURE; - } - if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) - || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { - enabled = TRUE; - } + if (!enabled) { + arm_debug_state64_t *thread_state = find_debug_state64(thread); + if (thread_state != NULL) { + void *pTmp = thread->machine.DebugData; + thread->machine.DebugData = NULL; + zfree(ads_zone, pTmp); } + } else { + arm_debug_state64_t *thread_state = find_debug_state64(thread); + if (thread_state == NULL) { + thread->machine.DebugData = zalloc(ads_zone); + bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); + thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64; + thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT; + thread_state = find_debug_state64(thread); + } + assert(NULL != thread_state); - if (!enabled) { - arm_debug_state64_t *thread_state = find_debug_state64(thread); - if (thread_state != NULL) { - void *pTmp = thread->machine.DebugData; - thread->machine.DebugData = NULL; - zfree(ads_zone, pTmp); - } + if (state->mdscr_el1 & 0x1) { + thread_state->mdscr_el1 |= 0x1; } else { - arm_debug_state64_t *thread_state = find_debug_state64(thread); - if (thread_state == NULL) { - thread->machine.DebugData = zalloc(ads_zone); - bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); - thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64; - thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT; - thread_state = find_debug_state64(thread); - } - assert(NULL != thread_state); - - if (state->mdscr_el1 & 0x1) - thread_state->mdscr_el1 |= 0x1; - else - thread_state->mdscr_el1 &= ~0x1; - - for (i = 0; i < 16; i++) { - /* set appropriate privilege; mask out unknown bits */ - thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */ - | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */ - | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBGBCR_TYPE_IVA - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64; - thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK - | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK - | ARM_DBGWCR_ACCESS_CONTROL_MASK - | ARM_DBG_CR_ENABLE_MASK)) - | ARM_DBG_CR_LINKED_UNLINKED - | ARM_DBG_CR_SECURITY_STATE_BOTH - | ARM_DBG_CR_MODE_CONTROL_USER; - thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64; - } - + thread_state->mdscr_el1 &= ~0x1; } - - if (thread == current_thread()) { - arm_debug_set64(thread->machine.DebugData); + + for (i = 0; i < 16; i++) { + /* set appropriate privilege; mask out unknown bits */ + thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */ + | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */ + | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBGBCR_TYPE_IVA + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64; + thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK + | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK + | ARM_DBGWCR_ACCESS_CONTROL_MASK + | ARM_DBG_CR_ENABLE_MASK)) + | ARM_DBG_CR_LINKED_UNLINKED + | ARM_DBG_CR_SECURITY_STATE_BOTH + | ARM_DBG_CR_MODE_CONTROL_USER; + thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64; } - - break; } + if (thread == current_thread()) { + arm_debug_set64(thread->machine.DebugData); + } + + break; + } + case ARM_VFP_STATE:{ - struct arm_vfp_state *state; - arm_neon_saved_state32_t *thread_state; - unsigned int max; + struct arm_vfp_state *state; + arm_neon_saved_state32_t *thread_state; + unsigned int max; - if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (count == ARM_VFPV2_STATE_COUNT) - max = 32; - else - max = 64; + if (count == ARM_VFPV2_STATE_COUNT) { + max = 32; + } else { + max = 64; + } - state = (struct arm_vfp_state *) tstate; - thread_state = neon_state32(thread->machine.uNeon); - /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */ + state = (struct arm_vfp_state *) tstate; + thread_state = neon_state32(thread->machine.uNeon); + /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */ - bcopy(state, thread_state, (max + 1)*sizeof(uint32_t)); + bcopy(state, thread_state, (max + 1) * sizeof(uint32_t)); - thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; - thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; - break; - } + thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; + thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; + break; + } case ARM_NEON_STATE:{ arm_neon_state_t *state; arm_neon_saved_state32_t *thread_state; - if (count != ARM_NEON_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_NEON_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } state = (arm_neon_state_t *)tstate; thread_state = neon_state32(thread->machine.uNeon); @@ -968,18 +1030,19 @@ machine_thread_set_state( thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; break; - - } + } case ARM_NEON_STATE64:{ arm_neon_state64_t *state; arm_neon_saved_state64_t *thread_state; - if (count != ARM_NEON_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_NEON_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (!thread_is_64bit_data(thread)) - return (KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_data(thread)) { + return KERN_INVALID_ARGUMENT; + } state = (arm_neon_state64_t *)tstate; thread_state = neon_state64(thread->machine.uNeon); @@ -990,13 +1053,12 @@ machine_thread_set_state( thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64; thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT; break; - - } + } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1005,13 +1067,13 @@ machine_thread_set_state( */ kern_return_t machine_thread_state_initialize( - thread_t thread) + thread_t thread) { arm_context_t *context = thread->machine.contextData; - /* + /* * Should always be set up later. For a kernel thread, we don't care - * about this state. For a user thread, we'll set the state up in + * about this state. For a user thread, we'll set the state up in * setup_wqthread, bsdthread_create, load_main(), or load_unixthread(). */ @@ -1038,9 +1100,9 @@ machine_thread_state_initialize( */ kern_return_t machine_thread_dup( - thread_t self, - thread_t target, - __unused boolean_t is_corpse) + thread_t self, + thread_t target, + __unused boolean_t is_corpse) { struct arm_saved_state *self_saved_state; struct arm_saved_state *target_saved_state; @@ -1052,7 +1114,7 @@ machine_thread_dup( target_saved_state = target->machine.upcb; bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state)); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1061,16 +1123,16 @@ machine_thread_dup( */ struct arm_saved_state * get_user_regs( - thread_t thread) + thread_t thread) { - return (thread->machine.upcb); + return thread->machine.upcb; } arm_neon_saved_state_t * get_user_neon_regs( - thread_t thread) + thread_t thread) { - return (thread->machine.uNeon); + return thread->machine.uNeon; } /* @@ -1079,9 +1141,9 @@ get_user_neon_regs( */ struct arm_saved_state * find_user_regs( - thread_t thread) + thread_t thread) { - return (thread->machine.upcb); + return thread->machine.upcb; } /* @@ -1090,36 +1152,38 @@ find_user_regs( */ struct arm_saved_state * find_kern_regs( - thread_t thread) + thread_t thread) { /* - * This works only for an interrupted kernel thread - */ - if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) - return ((struct arm_saved_state *) NULL); - else - return (getCpuDatap()->cpu_int_state); - + * This works only for an interrupted kernel thread + */ + if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) { + return (struct arm_saved_state *) NULL; + } else { + return getCpuDatap()->cpu_int_state; + } } arm_debug_state32_t * find_debug_state32( - thread_t thread) + thread_t thread) { - if (thread && thread->machine.DebugData) + if (thread && thread->machine.DebugData) { return &(thread->machine.DebugData->uds.ds32); - else + } else { return NULL; + } } arm_debug_state64_t * find_debug_state64( - thread_t thread) + thread_t thread) { - if (thread && thread->machine.DebugData) + if (thread && thread->machine.DebugData) { return &(thread->machine.DebugData->uds.ds64); - else + } else { return NULL; + } } /* @@ -1128,14 +1192,14 @@ find_debug_state64( */ kern_return_t thread_userstack( - __unused thread_t thread, - int flavor, - thread_state_t tstate, - unsigned int count, - mach_vm_offset_t * user_stack, - int *customstack, - boolean_t is_64bit_data -) + __unused thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count, + mach_vm_offset_t * user_stack, + int *customstack, + boolean_t is_64bit_data + ) { register_t sp; @@ -1156,38 +1220,44 @@ thread_userstack( /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */ case ARM_THREAD_STATE32: - if (count != ARM_THREAD_STATE32_COUNT) - return (KERN_INVALID_ARGUMENT); - if (is_64bit_data) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_THREAD_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (is_64bit_data) { + return KERN_INVALID_ARGUMENT; + } sp = ((arm_thread_state32_t *)tstate)->sp; break; #if __arm64__ case ARM_THREAD_STATE64: - if (count != ARM_THREAD_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); - if (!is_64bit_data) - return (KERN_INVALID_ARGUMENT); + if (count != ARM_THREAD_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + if (!is_64bit_data) { + return KERN_INVALID_ARGUMENT; + } sp = ((arm_thread_state32_t *)tstate)->sp; break; #endif default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } if (sp) { *user_stack = CAST_USER_ADDR_T(sp); - if (customstack) + if (customstack) { *customstack = 1; + } } else { *user_stack = CAST_USER_ADDR_T(USRSTACK64); - if (customstack) + if (customstack) { *customstack = 0; + } } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1207,7 +1277,7 @@ thread_userstackdefault( *default_user_stack = USRSTACK; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1267,53 +1337,53 @@ thread_setentrypoint(thread_t thread, mach_vm_offset_t entry) */ kern_return_t thread_entrypoint( - __unused thread_t thread, - int flavor, - thread_state_t tstate, - unsigned int count __unused, - mach_vm_offset_t * entry_point -) + __unused thread_t thread, + int flavor, + thread_state_t tstate, + unsigned int count __unused, + mach_vm_offset_t * entry_point + ) { switch (flavor) { case ARM_THREAD_STATE: - { - struct arm_thread_state *state; + { + struct arm_thread_state *state; - state = (struct arm_thread_state *) tstate; + state = (struct arm_thread_state *) tstate; - /* - * If a valid entry point is specified, use it. - */ - if (state->pc) { - *entry_point = CAST_USER_ADDR_T(state->pc); - } else { - *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); - } + /* + * If a valid entry point is specified, use it. + */ + if (state->pc) { + *entry_point = CAST_USER_ADDR_T(state->pc); + } else { + *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); } - break; + } + break; case ARM_THREAD_STATE64: - { - struct arm_thread_state64 *state; - - state = (struct arm_thread_state64*) tstate; + { + struct arm_thread_state64 *state; - /* - * If a valid entry point is specified, use it. - */ - if (state->pc) { - *entry_point = CAST_USER_ADDR_T(state->pc); - } else { - *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); - } + state = (struct arm_thread_state64*) tstate; - break; + /* + * If a valid entry point is specified, use it. + */ + if (state->pc) { + *entry_point = CAST_USER_ADDR_T(state->pc); + } else { + *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); } + + break; + } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -1323,8 +1393,8 @@ thread_entrypoint( */ void thread_set_child( - thread_t child, - int pid) + thread_t child, + int pid) { struct arm_saved_state *child_state; @@ -1341,8 +1411,8 @@ thread_set_child( */ void thread_set_parent( - thread_t parent, - int pid) + thread_t parent, + int pid) { struct arm_saved_state *parent_state; @@ -1373,36 +1443,37 @@ act_thread_csave(void) thread_t thread = current_thread(); ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context)); - if (ic == (struct arm_act_context *) NULL) - return ((void *) 0); + if (ic == (struct arm_act_context *) NULL) { + return (void *) 0; + } val = ARM_UNIFIED_THREAD_STATE_COUNT; kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val); if (kret != KERN_SUCCESS) { kfree(ic, sizeof(struct arm_act_context)); - return ((void *) 0); + return (void *) 0; } #if __ARM_VFP__ if (thread_is_64bit_data(thread)) { val = ARM_NEON_STATE64_COUNT; kret = machine_thread_get_state(thread, - ARM_NEON_STATE64, - (thread_state_t) & ic->ns, - &val); + ARM_NEON_STATE64, + (thread_state_t) &ic->ns, + &val); } else { val = ARM_NEON_STATE_COUNT; kret = machine_thread_get_state(thread, - ARM_NEON_STATE, - (thread_state_t) & ic->ns, - &val); + ARM_NEON_STATE, + (thread_state_t) &ic->ns, + &val); } if (kret != KERN_SUCCESS) { kfree(ic, sizeof(struct arm_act_context)); - return ((void *) 0); + return (void *) 0; } #endif - return (ic); + return ic; } /* @@ -1417,27 +1488,30 @@ act_thread_catt(void *ctx) thread_t thread = current_thread(); ic = (struct arm_act_context *) ctx; - if (ic == (struct arm_act_context *) NULL) + if (ic == (struct arm_act_context *) NULL) { return; + } kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { goto out; + } #if __ARM_VFP__ if (thread_is_64bit_data(thread)) { kret = machine_thread_set_state(thread, - ARM_NEON_STATE64, - (thread_state_t) & ic->ns, - ARM_NEON_STATE64_COUNT); + ARM_NEON_STATE64, + (thread_state_t) &ic->ns, + ARM_NEON_STATE64_COUNT); } else { kret = machine_thread_set_state(thread, - ARM_NEON_STATE, - (thread_state_t) & ic->ns, - ARM_NEON_STATE_COUNT); + ARM_NEON_STATE, + (thread_state_t) &ic->ns, + ARM_NEON_STATE_COUNT); } - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { goto out; + } #endif out: kfree(ic, sizeof(struct arm_act_context)); @@ -1447,7 +1521,7 @@ out: * Routine: act_thread_catt * */ -void +void act_thread_cfree(void *ctx) { kfree(ctx, sizeof(struct arm_act_context)); @@ -1460,7 +1534,7 @@ thread_set_wq_state32(thread_t thread, thread_state_t tstate) struct arm_saved_state *saved_state; struct arm_saved_state32 *saved_state_32; thread_t curth = current_thread(); - spl_t s=0; + spl_t s = 0; assert(!thread_is_64bit_data(thread)); @@ -1497,7 +1571,7 @@ thread_set_wq_state64(thread_t thread, thread_state_t tstate) struct arm_saved_state *saved_state; struct arm_saved_state64 *saved_state_64; thread_t curth = current_thread(); - spl_t s=0; + spl_t s = 0; assert(thread_is_64bit_data(thread)); diff --git a/osfmk/atm/atm.c b/osfmk/atm/atm.c index 0c90d585b..69fc9f738 100644 --- a/osfmk/atm/atm.c +++ b/osfmk/atm/atm.c @@ -79,7 +79,7 @@ static atm_value_t get_atm_value_from_aid(aid_t aid) __unused; static void atm_value_get_ref(atm_value_t atm_value); static kern_return_t atm_listener_insert(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); static void atm_listener_delete_all(atm_value_t atm_value); -static atm_task_descriptor_t atm_task_descriptor_alloc_init(mach_port_t trace_buffer,uint64_t buffer_size, __assert_only task_t task); +static atm_task_descriptor_t atm_task_descriptor_alloc_init(mach_port_t trace_buffer, uint64_t buffer_size, __assert_only task_t task); static void atm_descriptor_get_reference(atm_task_descriptor_t task_descriptor); static void atm_task_descriptor_dealloc(atm_task_descriptor_t task_descriptor); static kern_return_t atm_value_unregister(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard); @@ -140,7 +140,7 @@ struct ipc_voucher_attr_manager atm_manager = { .ivam_release_value = atm_release_value, .ivam_get_value = atm_get_value, .ivam_extract_content = atm_extract_content, - .ivam_command = atm_command, + .ivam_command = atm_command, .ivam_release = atm_release, .ivam_flags = IVAM_FLAGS_NONE, }; @@ -149,9 +149,9 @@ struct ipc_voucher_attr_manager atm_manager = { decl_lck_mtx_data(, atm_descriptors_list_lock); decl_lck_mtx_data(, atm_values_list_lock); -lck_grp_t atm_dev_lock_grp; -lck_attr_t atm_dev_lock_attr; -lck_grp_attr_t atm_dev_lock_grp_attr; +lck_grp_t atm_dev_lock_grp; +lck_attr_t atm_dev_lock_attr; +lck_grp_attr_t atm_dev_lock_grp_attr; #endif extern vm_map_t kernel_map; @@ -168,9 +168,9 @@ mach_atm_subaid_t global_subaid; /* * Lock group attributes for atm sub system. */ -lck_grp_t atm_lock_grp; -lck_attr_t atm_lock_attr; -lck_grp_attr_t atm_lock_grp_attr; +lck_grp_t atm_lock_grp; +lck_attr_t atm_lock_attr; +lck_grp_attr_t atm_lock_grp_attr; /* * Global that is set by diagnosticd and readable by userspace @@ -190,32 +190,32 @@ atm_init() char temp_buf[20]; /* Disable atm if disable_atm present in device-tree properties or in boot-args */ - if ((PE_get_default("kern.disable_atm", temp_buf, sizeof(temp_buf))) || + if ((PE_get_default("kern.disable_atm", temp_buf, sizeof(temp_buf))) || (PE_parse_boot_argn("-disable_atm", temp_buf, sizeof(temp_buf)))) { disable_atm = TRUE; } if (!PE_parse_boot_argn("atm_diagnostic_config", &atm_diagnostic_config, sizeof(atm_diagnostic_config))) { - if (!PE_get_default("kern.atm_diagnostic_config", &atm_diagnostic_config, sizeof(atm_diagnostic_config))) { + if (!PE_get_default("kern.atm_diagnostic_config", &atm_diagnostic_config, sizeof(atm_diagnostic_config))) { atm_diagnostic_config = 0; } } /* setup zones for descriptors, values and link objects */ atm_value_zone = zinit(sizeof(struct atm_value), - MAX_ATM_VALUES * sizeof(struct atm_value), - sizeof(struct atm_value), - "atm_values"); + MAX_ATM_VALUES * sizeof(struct atm_value), + sizeof(struct atm_value), + "atm_values"); atm_descriptors_zone = zinit(sizeof(struct atm_task_descriptor), - MAX_ATM_VALUES * sizeof(struct atm_task_descriptor), - sizeof(struct atm_task_descriptor), - "atm_task_descriptors"); + MAX_ATM_VALUES * sizeof(struct atm_task_descriptor), + sizeof(struct atm_task_descriptor), + "atm_task_descriptors"); atm_link_objects_zone = zinit(sizeof(struct atm_link_object), - MAX_ATM_VALUES * sizeof(struct atm_link_object), - sizeof(struct atm_link_object), - "atm_link_objects"); + MAX_ATM_VALUES * sizeof(struct atm_link_object), + sizeof(struct atm_link_object), + "atm_link_objects"); /* Initialize atm lock group and lock attributes. */ lck_grp_attr_setdefault(&atm_lock_grp_attr); @@ -241,15 +241,16 @@ atm_init() /* Register the atm manager with the Vouchers sub system. */ kr = ipc_register_well_known_mach_voucher_attr_manager( - &atm_manager, - 0, - MACH_VOUCHER_ATTR_KEY_ATM, - &voucher_attr_control); - if (kr != KERN_SUCCESS ) + &atm_manager, + 0, + MACH_VOUCHER_ATTR_KEY_ATM, + &voucher_attr_control); + if (kr != KERN_SUCCESS) { panic("ATM subsystem initialization failed"); + } kprintf("ATM subsystem is initialized\n"); - return ; + return; } @@ -266,10 +267,10 @@ atm_init() */ kern_return_t atm_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync) { atm_value_t atm_value = ATM_VALUE_NULL; @@ -298,16 +299,16 @@ atm_release_value( */ kern_return_t atm_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_msg_type_number_t __assert_only prev_value_count, + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t prev_values, + mach_msg_type_number_t __assert_only prev_value_count, mach_voucher_attr_content_t __unused recipe, mach_voucher_attr_content_size_t __unused recipe_size, mach_voucher_attr_value_handle_t *out_value, mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher) + ipc_voucher_t *out_value_voucher) { atm_value_t atm_value = ATM_VALUE_NULL; mach_voucher_attr_value_handle_t atm_handle; @@ -325,19 +326,20 @@ atm_get_value( *out_value_voucher = IPC_VOUCHER_NULL; *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE; - if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) + if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) { return KERN_NOT_SUPPORTED; + } switch (command) { - case MACH_VOUCHER_ATTR_ATM_REGISTER: for (i = 0; i < prev_value_count; i++) { atm_handle = prev_values[i]; atm_value = HANDLE_TO_ATM_VALUE(atm_handle); - if (atm_value == VAM_DEFAULT_VALUE) + if (atm_value == VAM_DEFAULT_VALUE) { continue; + } if (recipe_size != sizeof(atm_guard_t)) { kr = KERN_INVALID_ARGUMENT; @@ -347,7 +349,7 @@ atm_get_value( task = current_task(); task_descriptor = task->atm_context; - + kr = atm_value_register(atm_value, task_descriptor, guard); if (kr != KERN_SUCCESS) { break; @@ -374,14 +376,14 @@ atm_get_value( kr = KERN_INVALID_ARGUMENT; break; } - + /* Allocate a new atm value. */ atm_value = atm_value_alloc_init(aid); if (atm_value == ATM_VALUE_NULL) { kr = KERN_RESOURCE_SHORTAGE; break; } -redrive: +redrive: kr = atm_value_hash_table_insert(atm_value); if (kr != KERN_SUCCESS) { if (recipe_size == 0) { @@ -417,7 +419,7 @@ atm_extract_content( ipc_voucher_attr_manager_t __assert_only manager, mach_voucher_attr_key_t __assert_only key, mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, + mach_msg_type_number_t value_count, mach_voucher_attr_recipe_command_t *out_command, mach_voucher_attr_content_t out_recipe, mach_voucher_attr_content_size_t *in_out_recipe_size) @@ -429,13 +431,14 @@ atm_extract_content( assert(MACH_VOUCHER_ATTR_KEY_ATM == key); assert(manager == &atm_manager); - for (i = 0; i < value_count; i++) { + for (i = 0; i < value_count && *in_out_recipe_size > 0; i++) { atm_handle = values[i]; atm_value = HANDLE_TO_ATM_VALUE(atm_handle); - if (atm_value == VAM_DEFAULT_VALUE) + if (atm_value == VAM_DEFAULT_VALUE) { continue; + } - if (( sizeof(aid_t)) > *in_out_recipe_size) { + if ((sizeof(aid_t)) > *in_out_recipe_size) { *in_out_recipe_size = 0; return KERN_NO_SPACE; } @@ -454,18 +457,18 @@ atm_extract_content( * Routine: atm_command * Purpose: Execute a command against a set of ATM values. * Returns: KERN_SUCCESS: On successful execution of command. - KERN_FAILURE: On failure. + * KERN_FAILURE: On failure. */ kern_return_t atm_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t values, + mach_msg_type_number_t value_count, + mach_voucher_attr_command_t command, + mach_voucher_attr_content_t in_content, mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_t out_content, mach_voucher_attr_content_size_t *out_content_size) { assert(MACH_VOUCHER_ATTR_KEY_ATM == key); @@ -479,21 +482,23 @@ atm_command( task_t task; kern_return_t kr = KERN_SUCCESS; atm_guard_t guard; - + switch (command) { case ATM_ACTION_COLLECT: - /* Fall through */ + /* Fall through */ case ATM_ACTION_LOGFAIL: return KERN_NOT_SUPPORTED; case ATM_FIND_MIN_SUB_AID: - if ((in_content_size/sizeof(aid_t)) > (*out_content_size/sizeof(mach_atm_subaid_t))) + if ((in_content_size / sizeof(aid_t)) > (*out_content_size / sizeof(mach_atm_subaid_t))) { return KERN_FAILURE; + } aid_array_count = in_content_size / sizeof(aid_t); - if (aid_array_count > AID_ARRAY_COUNT_MAX) + if (aid_array_count > AID_ARRAY_COUNT_MAX) { return KERN_FAILURE; + } subaid_array = (mach_atm_subaid_t *) (void *) out_content; for (i = 0; i < aid_array_count; i++) { @@ -510,8 +515,9 @@ atm_command( /* find the first non-default atm_value */ for (i = 0; i < value_count; i++) { atm_value = HANDLE_TO_ATM_VALUE(values[i]); - if (atm_value != VAM_DEFAULT_VALUE) + if (atm_value != VAM_DEFAULT_VALUE) { break; + } } /* if we are not able to find any atm values @@ -520,7 +526,7 @@ atm_command( if (atm_value == NULL) { return KERN_FAILURE; } - if (in_content == NULL || in_content_size != sizeof(atm_guard_t)){ + if (in_content == NULL || in_content_size != sizeof(atm_guard_t)) { return KERN_INVALID_ARGUMENT; } @@ -535,8 +541,9 @@ atm_command( case ATM_ACTION_REGISTER: for (i = 0; i < value_count; i++) { atm_value = HANDLE_TO_ATM_VALUE(values[i]); - if (atm_value != VAM_DEFAULT_VALUE) + if (atm_value != VAM_DEFAULT_VALUE) { break; + } } /* if we are not able to find any atm values * in stack then this call was made in error @@ -544,7 +551,7 @@ atm_command( if (atm_value == NULL) { return KERN_FAILURE; } - if (in_content == NULL || in_content_size != sizeof(atm_guard_t)){ + if (in_content == NULL || in_content_size != sizeof(atm_guard_t)) { return KERN_INVALID_ARGUMENT; } @@ -557,8 +564,9 @@ atm_command( break; case ATM_ACTION_GETSUBAID: - if (out_content == NULL || *out_content_size != sizeof(mach_atm_subaid_t)) + if (out_content == NULL || *out_content_size != sizeof(mach_atm_subaid_t)) { return KERN_FAILURE; + } next_subaid = get_subaid(); memcpy(out_content, &next_subaid, sizeof(mach_atm_subaid_t)); @@ -575,7 +583,7 @@ atm_command( void atm_release( - ipc_voucher_attr_manager_t __assert_only manager) + ipc_voucher_attr_manager_t __assert_only manager) { assert(manager == &atm_manager); } @@ -593,8 +601,9 @@ atm_value_alloc_init(aid_t aid) atm_value_t new_atm_value = ATM_VALUE_NULL; new_atm_value = (atm_value_t) zalloc(atm_value_zone); - if (new_atm_value == ATM_VALUE_NULL) + if (new_atm_value == ATM_VALUE_NULL) { panic("Ran out of ATM values structure.\n\n"); + } new_atm_value->aid = aid; queue_init(&new_atm_value->listeners); @@ -713,7 +722,7 @@ atm_value_hash_table_insert(atm_value_t new_atm_value) * aid found. return error. */ lck_mtx_unlock(&hash_list_head->hash_list_lock); - return (KERN_NAME_EXISTS); + return KERN_NAME_EXISTS; } } @@ -773,7 +782,7 @@ get_atm_value_from_aid(aid_t aid) */ atm_value_get_ref(next); lck_mtx_unlock(&hash_list_head->hash_list_lock); - return (next); + return next; } } lck_mtx_unlock(&hash_list_head->hash_list_lock); @@ -801,9 +810,9 @@ atm_value_get_ref(atm_value_t atm_value) */ static kern_return_t atm_listener_insert( - atm_value_t atm_value, - atm_task_descriptor_t task_descriptor, - atm_guard_t guard) + atm_value_t atm_value, + atm_task_descriptor_t task_descriptor, + atm_guard_t guard) { atm_link_object_t new_link_object; atm_link_object_t next, elem; @@ -842,8 +851,9 @@ atm_listener_insert( continue; } - if (element_found) + if (element_found) { continue; + } if (elem->descriptor == task_descriptor) { /* Increment reference count on Link object. */ @@ -853,8 +863,7 @@ atm_listener_insert( elem->guard = guard; element_found = TRUE; KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_GETVALUE_INFO, (ATM_VALUE_REPLACED))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, 0, 0); - + VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, 0, 0); } } @@ -865,7 +874,7 @@ atm_listener_insert( zfree(atm_link_objects_zone, new_link_object); } else { KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_GETVALUE_INFO, (ATM_VALUE_ADDED))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, 0, 0); + VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, 0, 0); queue_enter(&atm_value->listeners, new_link_object, atm_link_object_t, listeners_element); atm_listener_count_incr_internal(atm_value); @@ -873,7 +882,7 @@ atm_listener_insert( } /* Free the link objects */ - while(!queue_empty(&free_listeners)) { + while (!queue_empty(&free_listeners)) { queue_remove_first(&free_listeners, next, atm_link_object_t, listeners_element); /* Deallocate the link object */ @@ -881,7 +890,7 @@ atm_listener_insert( } KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_SUBAID_INFO, (ATM_LINK_LIST_TRIM))) | DBG_FUNC_NONE, - listener_count, freed_count, dead_but_not_freed, VM_KERNEL_ADDRPERM(atm_value), 1); + listener_count, freed_count, dead_but_not_freed, VM_KERNEL_ADDRPERM(atm_value), 1); return KERN_SUCCESS; } @@ -897,7 +906,7 @@ atm_listener_delete_all(atm_value_t atm_value) { atm_link_object_t next; - while(!queue_empty(&atm_value->listeners)) { + while (!queue_empty(&atm_value->listeners)) { queue_remove_first(&atm_value->listeners, next, atm_link_object_t, listeners_element); /* Deallocate the link object */ @@ -935,14 +944,14 @@ atm_listener_delete( if (elem->descriptor == task_descriptor) { if (elem->guard == guard) { KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_UNREGISTER_INFO, - (ATM_VALUE_UNREGISTERED))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, elem->reference_count, 0); + (ATM_VALUE_UNREGISTERED))) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, guard, elem->reference_count, 0); elem->guard = 0; kr = KERN_SUCCESS; } else { KERNEL_DEBUG_CONSTANT((ATM_CODE(ATM_UNREGISTER_INFO, - (ATM_VALUE_DIFF_MAILBOX))) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, elem->guard, elem->reference_count, 0); + (ATM_VALUE_DIFF_MAILBOX))) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM(atm_value), atm_value->aid, elem->guard, elem->reference_count, 0); kr = KERN_INVALID_VALUE; } if (0 == atm_link_object_release_internal(elem)) { @@ -955,9 +964,9 @@ atm_listener_delete( } lck_mtx_unlock(&atm_value->listener_lock); - while(!queue_empty(&free_listeners)) { + while (!queue_empty(&free_listeners)) { queue_remove_first(&free_listeners, next, atm_link_object_t, listeners_element); - + /* Deallocate the link object */ atm_link_dealloc(next); } @@ -973,9 +982,9 @@ atm_listener_delete( */ static atm_task_descriptor_t atm_task_descriptor_alloc_init( - mach_port_t trace_buffer, - uint64_t buffer_size, - task_t __assert_only task) + mach_port_t trace_buffer, + uint64_t buffer_size, + task_t __assert_only task) { atm_task_descriptor_t new_task_descriptor; @@ -1071,19 +1080,21 @@ atm_link_dealloc(atm_link_object_t link_object) */ kern_return_t atm_register_trace_memory( - task_t task, - uint64_t trace_buffer_address, - uint64_t buffer_size) + task_t task, + uint64_t trace_buffer_address, + uint64_t buffer_size) { atm_task_descriptor_t task_descriptor; mach_port_t trace_buffer = MACH_PORT_NULL; kern_return_t kr = KERN_SUCCESS; - if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) + if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE)) { return KERN_NOT_SUPPORTED; + } - if (task != current_task()) + if (task != current_task()) { return KERN_INVALID_ARGUMENT; + } if (task->atm_context != NULL || (void *)trace_buffer_address == NULL @@ -1096,13 +1107,14 @@ atm_register_trace_memory( vm_map_t map = current_map(); memory_object_size_t mo_size = (memory_object_size_t) buffer_size; kr = mach_make_memory_entry_64(map, - &mo_size, - (mach_vm_offset_t)trace_buffer_address, - VM_PROT_READ, - &trace_buffer, - NULL); - if (kr != KERN_SUCCESS) + &mo_size, + (mach_vm_offset_t)trace_buffer_address, + VM_PROT_READ, + &trace_buffer, + NULL); + if (kr != KERN_SUCCESS) { return kr; + } task_descriptor = atm_task_descriptor_alloc_init(trace_buffer, buffer_size, task); if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) { @@ -1136,8 +1148,9 @@ extern uint32_t atm_diagnostic_config; /* Proxied to commpage for fast user acce kern_return_t atm_set_diagnostic_config(uint32_t diagnostic_config) { - if (disable_atm) + if (disable_atm) { return KERN_NOT_SUPPORTED; + } atm_diagnostic_config = diagnostic_config; commpage_update_atm_diagnostic_config(atm_diagnostic_config); @@ -1173,9 +1186,10 @@ atm_value_unregister( { kern_return_t kr; - if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) + if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) { return KERN_INVALID_TASK; - + } + kr = atm_listener_delete(atm_value, task_descriptor, guard); return kr; } @@ -1196,8 +1210,9 @@ atm_value_register( { kern_return_t kr; - if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) + if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL) { return KERN_INVALID_TASK; + } kr = atm_listener_insert(atm_value, task_descriptor, guard); return kr; diff --git a/osfmk/atm/atm_internal.h b/osfmk/atm/atm_internal.h index bd5719e45..ea1cbce7c 100644 --- a/osfmk/atm/atm_internal.h +++ b/osfmk/atm/atm_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,7 +33,7 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -49,8 +49,8 @@ typedef mach_voucher_attr_value_handle_t atm_voucher_id_t; struct atm_task_descriptor { - decl_lck_mtx_data(,lock) /* lock to protect reference count */ - mach_port_t trace_buffer; /* named memory entry registered by user */ + decl_lck_mtx_data(, lock) /* lock to protect reference count */ + mach_port_t trace_buffer; /* named memory entry registered by user */ uint64_t trace_buffer_size; /* size of the trace_buffer registered */ uint32_t reference_count; uint8_t flags; @@ -60,10 +60,10 @@ struct atm_task_descriptor { #endif }; -#define atm_task_desc_reference_internal(elem) \ +#define atm_task_desc_reference_internal(elem) \ (hw_atomic_add(&(elem)->reference_count, 1)) -#define atm_task_desc_release_internal(elem) \ +#define atm_task_desc_release_internal(elem) \ (hw_atomic_sub(&(elem)->reference_count, 1)) typedef struct atm_task_descriptor *atm_task_descriptor_t; @@ -72,29 +72,29 @@ typedef struct atm_task_descriptor *atm_task_descriptor_t; struct atm_value { aid_t aid; /* activity id */ queue_head_t listeners; /* List of listeners who register for this activity */ - decl_lck_mtx_data( ,listener_lock) /* Lock to protect listener list */ + decl_lck_mtx_data(, listener_lock) /* Lock to protect listener list */ queue_chain_t vid_hash_elt; /* Next hash element in the global hash table */ #if DEVELOPMENT || DEBUG queue_chain_t value_elt; /* global chain of all values */ #endif uint32_t sync; /* Made ref count given to voucher sub system. */ uint32_t listener_count; /* Number of Listerners listening on the value. */ - uint32_t reference_count; /* use count on the atm value, 1 taken by the global hash table */ + uint32_t reference_count; /* use count on the atm value, 1 taken by the global hash table */ }; -#define atm_value_reference_internal(elem) \ +#define atm_value_reference_internal(elem) \ (hw_atomic_add(&(elem)->reference_count, 1)) -#define atm_value_release_internal(elem) \ +#define atm_value_release_internal(elem) \ (hw_atomic_sub(&(elem)->reference_count, 1)) -#define atm_listener_count_incr_internal(elem) \ +#define atm_listener_count_incr_internal(elem) \ (hw_atomic_add(&(elem)->listener_count, 1)) -#define atm_listener_count_decr_internal(elem) \ +#define atm_listener_count_decr_internal(elem) \ (hw_atomic_sub(&(elem)->listener_count, 1)) -#define atm_sync_reference_internal(elem) \ +#define atm_sync_reference_internal(elem) \ (hw_atomic_add(&(elem)->sync, 1)) typedef struct atm_value *atm_value_t; @@ -105,29 +105,29 @@ typedef struct atm_value *atm_value_t; struct atm_link_object { atm_task_descriptor_t descriptor; - queue_chain_t listeners_element; /* Head is atm_value->listeners. */ - atm_guard_t guard; /* Guard registered by the user for an activity. */ + queue_chain_t listeners_element; /* Head is atm_value->listeners. */ + atm_guard_t guard; /* Guard registered by the user for an activity. */ uint32_t reference_count; /* Refernece count for link object */ }; typedef struct atm_link_object *atm_link_object_t; -#define atm_link_object_reference_internal(elem) \ +#define atm_link_object_reference_internal(elem) \ (hw_atomic_add(&(elem)->reference_count, 1)) -#define atm_link_object_release_internal(elem) \ +#define atm_link_object_release_internal(elem) \ (hw_atomic_sub(&(elem)->reference_count, 1)) struct atm_value_hash { - queue_head_t hash_list; - decl_lck_mtx_data(, hash_list_lock) /* lock to protect bucket list. */ + queue_head_t hash_list; + decl_lck_mtx_data(, hash_list_lock) /* lock to protect bucket list. */ }; typedef struct atm_value_hash *atm_value_hash_t; void atm_init(void); void atm_task_descriptor_destroy(atm_task_descriptor_t task_descriptor); -kern_return_t atm_register_trace_memory(task_t task, uint64_t trace_buffer_address, uint64_t buffer_size); +kern_return_t atm_register_trace_memory(task_t task, uint64_t trace_buffer_address, uint64_t buffer_size); kern_return_t atm_send_proc_inspect_notification(task_t task, int32_t traced_pid, uint64_t traced_uniqueid); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/atm/atm_types.h b/osfmk/atm/atm_types.h index 2169e6c0e..6008a71ab 100644 --- a/osfmk/atm/atm_types.h +++ b/osfmk/atm/atm_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,16 +32,16 @@ #include #include -#define MACH_VOUCHER_ATTR_ATM_NULL ((mach_voucher_attr_recipe_command_t)501) -#define MACH_VOUCHER_ATTR_ATM_CREATE ((mach_voucher_attr_recipe_command_t)510) -#define MACH_VOUCHER_ATTR_ATM_REGISTER ((mach_voucher_attr_recipe_command_t)511) +#define MACH_VOUCHER_ATTR_ATM_NULL ((mach_voucher_attr_recipe_command_t)501) +#define MACH_VOUCHER_ATTR_ATM_CREATE ((mach_voucher_attr_recipe_command_t)510) +#define MACH_VOUCHER_ATTR_ATM_REGISTER ((mach_voucher_attr_recipe_command_t)511) typedef uint32_t atm_action_t; -#define ATM_ACTION_DISCARD 0x1 -#define ATM_ACTION_COLLECT 0x2 -#define ATM_ACTION_LOGFAIL 0x3 -#define ATM_FIND_MIN_SUB_AID 0x4 -#define ATM_ACTION_UNREGISTER 0x5 +#define ATM_ACTION_DISCARD 0x1 +#define ATM_ACTION_COLLECT 0x2 +#define ATM_ACTION_LOGFAIL 0x3 +#define ATM_FIND_MIN_SUB_AID 0x4 +#define ATM_ACTION_UNREGISTER 0x5 #define ATM_ACTION_REGISTER 0x6 #define ATM_ACTION_GETSUBAID 0x7 @@ -55,7 +55,7 @@ typedef uint64_t mailbox_offset_t; typedef uint64_t atm_aid_t; typedef uint32_t atm_subaid32_t; -typedef uint64_t mach_atm_subaid_t; /* Used for mach based apis. */ +typedef uint64_t mach_atm_subaid_t; /* Used for mach based apis. */ typedef uint64_t atm_mailbox_offset_t; @@ -63,10 +63,9 @@ typedef mach_port_t atm_memory_descriptor_t; typedef atm_memory_descriptor_t *atm_memory_descriptor_array_t; typedef uint64_t *atm_memory_size_array_t; -#define ATM_SUBAID32_MAX (UINT32_MAX) -#define ATM_TRACE_DISABLE (0x0100) /* OS_TRACE_MODE_DISABLE - Do not initialize the new logging*/ -#define ATM_TRACE_OFF (0x0400) /* OS_TRACE_MODE_OFF - Don't drop log messages to new log buffers */ -#define ATM_ENABLE_LEGACY_LOGGING (0x20000000) /* OS_TRACE_SYSTEMMODE_LEGACY_LOGGING - Enable legacy logging */ +#define ATM_SUBAID32_MAX (UINT32_MAX) +#define ATM_TRACE_DISABLE (0x0100) /* OS_TRACE_MODE_DISABLE - Do not initialize the new logging*/ +#define ATM_TRACE_OFF (0x0400) /* OS_TRACE_MODE_OFF - Don't drop log messages to new log buffers */ +#define ATM_ENABLE_LEGACY_LOGGING (0x20000000) /* OS_TRACE_SYSTEMMODE_LEGACY_LOGGING - Enable legacy logging */ #endif /* _ATM_ATM_TYPES_H_ */ - diff --git a/osfmk/bank/bank.c b/osfmk/bank/bank.c index 4b183e9b5..ef4d2977d 100644 --- a/osfmk/bank/bank.c +++ b/osfmk/bank/bank.c @@ -70,12 +70,12 @@ struct _bank_ledger_indices bank_ledgers = { -1, -1 }; static bank_task_t bank_task_alloc_init(task_t task); static bank_account_t bank_account_alloc_init(bank_task_t bank_holder, bank_task_t bank_merchant, - bank_task_t bank_secureoriginator, bank_task_t bank_proximateprocess, struct thread_group* banktg); + bank_task_t bank_secureoriginator, bank_task_t bank_proximateprocess, struct thread_group* banktg); static bank_task_t get_bank_task_context(task_t task, boolean_t initialize); static void bank_task_dealloc(bank_task_t bank_task, mach_voucher_attr_value_reference_t sync); static kern_return_t bank_account_dealloc_with_sync(bank_account_t bank_account, mach_voucher_attr_value_reference_t sync); static void bank_rollup_chit_to_tasks(ledger_t bill, ledger_t bank_holder_ledger, ledger_t bank_merchant_ledger, - int bank_holder_pid, int bank_merchant_pid); + int bank_holder_pid, int bank_merchant_pid); static ledger_t bank_get_bank_task_ledger_with_ref(bank_task_t bank_task); static void bank_destroy_bank_task_ledger(bank_task_t bank_task); static void init_bank_ledgers(void); @@ -89,11 +89,11 @@ static lck_spin_t g_bank_task_lock_data; /* lock to protect task->bank_contex lck_spin_init(&g_bank_task_lock_data, &bank_lock_grp, &bank_lock_attr) #define global_bank_task_lock_destroy() \ lck_spin_destroy(&g_bank_task_lock_data, &bank_lock_grp) -#define global_bank_task_lock() \ - lck_spin_lock(&g_bank_task_lock_data) -#define global_bank_task_lock_try() \ - lck_spin_try_lock(&g_bank_task_lock_data) -#define global_bank_task_unlock() \ +#define global_bank_task_lock() \ + lck_spin_lock_grp(&g_bank_task_lock_data, &bank_lock_grp) +#define global_bank_task_lock_try() \ + lck_spin_try_lock_grp(&g_bank_task_lock_data, &bank_lock_grp) +#define global_bank_task_unlock() \ lck_spin_unlock(&g_bank_task_lock_data) extern uint64_t proc_uniqueid(void *p); @@ -159,7 +159,7 @@ struct ipc_voucher_attr_manager bank_manager = { .ivam_release_value = bank_release_value, .ivam_get_value = bank_get_value, .ivam_extract_content = bank_extract_content, - .ivam_command = bank_command, + .ivam_command = bank_command, .ivam_release = bank_release, .ivam_flags = (IVAM_FLAGS_SUPPORT_SEND_PREPROCESS | IVAM_FLAGS_SUPPORT_RECEIVE_POSTPROCESS), }; @@ -169,17 +169,17 @@ struct ipc_voucher_attr_manager bank_manager = { decl_lck_mtx_data(, bank_tasks_list_lock); decl_lck_mtx_data(, bank_accounts_list_lock); -lck_grp_t bank_dev_lock_grp; -lck_attr_t bank_dev_lock_attr; -lck_grp_attr_t bank_dev_lock_grp_attr; +lck_grp_t bank_dev_lock_grp; +lck_attr_t bank_dev_lock_attr; +lck_grp_attr_t bank_dev_lock_grp_attr; #endif /* * Lock group attributes for bank sub system. */ -lck_grp_t bank_lock_grp; -lck_attr_t bank_lock_attr; -lck_grp_attr_t bank_lock_grp_attr; +lck_grp_t bank_lock_grp; +lck_attr_t bank_lock_attr; +lck_grp_attr_t bank_lock_grp_attr; /* * Routine: bank_init @@ -192,14 +192,14 @@ bank_init() kern_return_t kr = KERN_SUCCESS; /* setup zones for bank_task and bank_account objects */ bank_task_zone = zinit(sizeof(struct bank_task), - MAX_BANK_TASK * sizeof(struct bank_task), - sizeof(struct bank_task), - "bank_task"); + MAX_BANK_TASK * sizeof(struct bank_task), + sizeof(struct bank_task), + "bank_task"); bank_account_zone = zinit(sizeof(struct bank_account), - MAX_BANK_ACCOUNT * sizeof(struct bank_account), - sizeof(struct bank_account), - "bank_account"); + MAX_BANK_ACCOUNT * sizeof(struct bank_account), + sizeof(struct bank_account), + "bank_account"); init_bank_ledgers(); @@ -224,15 +224,16 @@ bank_init() /* Register the bank manager with the Vouchers sub system. */ kr = ipc_register_well_known_mach_voucher_attr_manager( - &bank_manager, - 0, - MACH_VOUCHER_ATTR_KEY_BANK, - &bank_voucher_attr_control); - if (kr != KERN_SUCCESS ) + &bank_manager, + 0, + MACH_VOUCHER_ATTR_KEY_BANK, + &bank_voucher_attr_control); + if (kr != KERN_SUCCESS) { panic("BANK subsystem initialization failed"); + } kprintf("BANK subsystem is initialized\n"); - return ; + return; } @@ -249,10 +250,10 @@ bank_init() */ kern_return_t bank_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync) { bank_task_t bank_task = BANK_TASK_NULL; bank_element_t bank_element = BANK_ELEMENT_NULL; @@ -305,16 +306,16 @@ bank_release_value( */ kern_return_t bank_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_msg_type_number_t prev_value_count, + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t prev_values, + mach_msg_type_number_t prev_value_count, mach_voucher_attr_content_t __unused recipe, mach_voucher_attr_content_size_t __unused recipe_size, mach_voucher_attr_value_handle_t *out_value, mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher) + ipc_voucher_t *out_value_voucher) { bank_task_t bank_task = BANK_TASK_NULL; bank_task_t bank_holder = BANK_TASK_NULL; @@ -339,7 +340,6 @@ bank_get_value( *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE; switch (command) { - case MACH_VOUCHER_ATTR_BANK_CREATE: /* Return the default task value instead of bank task */ @@ -354,8 +354,9 @@ bank_get_value( bank_element = HANDLE_TO_BANK_ELEMENT(bank_handle); /* Should not have received default task value from an IPC */ - if (bank_element == BANK_DEFAULT_VALUE || bank_element == BANK_DEFAULT_TASK_VALUE) + if (bank_element == BANK_DEFAULT_VALUE || bank_element == BANK_DEFAULT_TASK_VALUE) { continue; + } task = current_task(); if (bank_element->be_type == BANK_TASK) { @@ -369,14 +370,14 @@ bank_get_value( bank_secureoriginator = old_bank_account->ba_secureoriginator; bank_proximateprocess = old_bank_account->ba_proximateprocess; thread_group = bank_get_bank_account_thread_group(old_bank_account); - } else { panic("Bogus bank type: %d passed in get_value\n", bank_element->be_type); } bank_merchant = get_bank_task_context(task, FALSE); - if (bank_merchant == BANK_TASK_NULL) + if (bank_merchant == BANK_TASK_NULL) { return KERN_RESOURCE_SHORTAGE; + } cur_thread_group = bank_get_bank_task_thread_group(bank_merchant); @@ -386,20 +387,21 @@ bank_get_value( } /* Check if trying to redeem for self task, return the default bank task */ - if (bank_holder == bank_merchant && - bank_holder == bank_secureoriginator && - bank_holder == bank_proximateprocess && - thread_group == cur_thread_group) { + if (bank_holder == bank_merchant && + bank_holder == bank_secureoriginator && + bank_holder == bank_proximateprocess && + thread_group == cur_thread_group) { *out_value = BANK_ELEMENT_TO_HANDLE(BANK_DEFAULT_TASK_VALUE); *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_PERSIST; return kr; } bank_account = bank_account_alloc_init(bank_holder, bank_merchant, - bank_secureoriginator, bank_proximateprocess, - thread_group); - if (bank_account == BANK_ACCOUNT_NULL) + bank_secureoriginator, bank_proximateprocess, + thread_group); + if (bank_account == BANK_ACCOUNT_NULL) { return KERN_RESOURCE_SHORTAGE; + } *out_value = BANK_ELEMENT_TO_HANDLE(bank_account); return kr; @@ -414,8 +416,9 @@ bank_get_value( bank_handle = prev_values[i]; bank_element = HANDLE_TO_BANK_ELEMENT(bank_handle); - if (bank_element == BANK_DEFAULT_VALUE) + if (bank_element == BANK_DEFAULT_VALUE) { continue; + } task = current_task(); if (bank_element == BANK_DEFAULT_TASK_VALUE) { @@ -436,8 +439,9 @@ bank_get_value( } bank_merchant = get_bank_task_context(task, FALSE); - if (bank_merchant == BANK_TASK_NULL) + if (bank_merchant == BANK_TASK_NULL) { return KERN_RESOURCE_SHORTAGE; + } cur_thread_group = bank_get_bank_task_thread_group(bank_merchant); @@ -447,19 +451,18 @@ bank_get_value( */ if (bank_merchant->bt_hasentitlement == 0) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (BANK_CODE(BANK_ACCOUNT_INFO, (BANK_SECURE_ORIGINATOR_CHANGED))) | DBG_FUNC_NONE, - bank_secureoriginator->bt_pid, bank_merchant->bt_pid, 0, 0, 0); + (BANK_CODE(BANK_ACCOUNT_INFO, (BANK_SECURE_ORIGINATOR_CHANGED))) | DBG_FUNC_NONE, + bank_secureoriginator->bt_pid, bank_merchant->bt_pid, 0, 0, 0); bank_secureoriginator = bank_merchant; } bank_proximateprocess = bank_merchant; /* Check if trying to redeem for self task, return the bank task */ - if (bank_holder == bank_merchant && - bank_holder == bank_secureoriginator && - bank_holder == bank_proximateprocess && - thread_group == cur_thread_group) { - + if (bank_holder == bank_merchant && + bank_holder == bank_secureoriginator && + bank_holder == bank_proximateprocess && + thread_group == cur_thread_group) { lck_mtx_lock(&bank_holder->bt_acc_to_pay_lock); bank_task_made_reference(bank_holder); if (bank_holder->bt_voucher_ref == 0) { @@ -473,10 +476,11 @@ bank_get_value( return kr; } bank_account = bank_account_alloc_init(bank_holder, bank_merchant, - bank_secureoriginator, bank_proximateprocess, - thread_group); - if (bank_account == BANK_ACCOUNT_NULL) + bank_secureoriginator, bank_proximateprocess, + thread_group); + if (bank_account == BANK_ACCOUNT_NULL) { return KERN_RESOURCE_SHORTAGE; + } *out_value = BANK_ELEMENT_TO_HANDLE(bank_account); return kr; @@ -491,8 +495,9 @@ bank_get_value( bank_handle = prev_values[i]; bank_element = HANDLE_TO_BANK_ELEMENT(bank_handle); - if (bank_element == BANK_DEFAULT_VALUE) + if (bank_element == BANK_DEFAULT_VALUE) { continue; + } task = current_task(); if (bank_element == BANK_DEFAULT_TASK_VALUE) { @@ -505,7 +510,6 @@ bank_get_value( panic("Found a bank task in MACH_VOUCHER_ATTR_REDEEM: %p", bank_task); return kr; - } else if (bank_element->be_type == BANK_ACCOUNT) { bank_account = CAST_TO_BANK_ACCOUNT(bank_element); bank_merchant = bank_account->ba_merchant; @@ -545,7 +549,7 @@ bank_extract_content( ipc_voucher_attr_manager_t __assert_only manager, mach_voucher_attr_key_t __assert_only key, mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, + mach_msg_type_number_t value_count, mach_voucher_attr_recipe_command_t *out_command, mach_voucher_attr_content_t out_recipe, mach_voucher_attr_content_size_t *in_out_recipe_size) @@ -560,11 +564,12 @@ bank_extract_content( assert(MACH_VOUCHER_ATTR_KEY_BANK == key); assert(manager == &bank_manager); - for (i = 0; i < value_count; i++) { + for (i = 0; i < value_count && *in_out_recipe_size > 0; i++) { bank_handle = values[i]; bank_element = HANDLE_TO_BANK_ELEMENT(bank_handle); - if (bank_element == BANK_DEFAULT_VALUE) + if (bank_element == BANK_DEFAULT_VALUE) { continue; + } if (bank_element == BANK_DEFAULT_TASK_VALUE) { bank_element = CAST_TO_BANK_ELEMENT(get_bank_task_context(current_task(), FALSE)); @@ -577,18 +582,18 @@ bank_extract_content( if (bank_element->be_type == BANK_TASK) { bank_task = CAST_TO_BANK_TASK(bank_element); - snprintf(buf, MACH_VOUCHER_BANK_CONTENT_SIZE, - " Bank Context for a pid %d\n", bank_task->bt_pid); + snprintf(buf, MACH_VOUCHER_BANK_CONTENT_SIZE, + " Bank Context for a pid %d\n", bank_task->bt_pid); } else if (bank_element->be_type == BANK_ACCOUNT) { bank_account = CAST_TO_BANK_ACCOUNT(bank_element); snprintf(buf, MACH_VOUCHER_BANK_CONTENT_SIZE, - " Bank Account linking holder pid %d with merchant pid %d, originator PID/persona: %d, %u and proximate PID/persona: %d, %u\n", - bank_account->ba_holder->bt_pid, - bank_account->ba_merchant->bt_pid, - bank_account->ba_secureoriginator->bt_pid, - bank_account->ba_secureoriginator->bt_persona_id, - bank_account->ba_proximateprocess->bt_pid, - bank_account->ba_proximateprocess->bt_persona_id); + " Bank Account linking holder pid %d with merchant pid %d, originator PID/persona: %d, %u and proximate PID/persona: %d, %u\n", + bank_account->ba_holder->bt_pid, + bank_account->ba_merchant->bt_pid, + bank_account->ba_secureoriginator->bt_pid, + bank_account->ba_secureoriginator->bt_persona_id, + bank_account->ba_proximateprocess->bt_pid, + bank_account->ba_proximateprocess->bt_persona_id); } else { panic("Bogus bank type: %d passed in get_value\n", bank_element->be_type); } @@ -607,18 +612,18 @@ bank_extract_content( * Routine: bank_command * Purpose: Execute a command against a set of ATM values. * Returns: KERN_SUCCESS: On successful execution of command. - KERN_FAILURE: On failure. + * KERN_FAILURE: On failure. */ kern_return_t bank_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t __unused values, - mach_msg_type_number_t __unused value_count, - mach_voucher_attr_command_t __unused command, - mach_voucher_attr_content_t __unused in_content, + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t __unused values, + mach_msg_type_number_t __unused value_count, + mach_voucher_attr_command_t __unused command, + mach_voucher_attr_content_t __unused in_content, mach_voucher_attr_content_size_t __unused in_content_size, - mach_voucher_attr_content_t __unused out_content, + mach_voucher_attr_content_t __unused out_content, mach_voucher_attr_content_size_t __unused *out_content_size) { bank_task_t bank_task = BANK_TASK_NULL; @@ -645,8 +650,9 @@ bank_command( for (i = 0; i < value_count; i++) { bank_handle = values[i]; bank_element = HANDLE_TO_BANK_ELEMENT(bank_handle); - if (bank_element == BANK_DEFAULT_VALUE) + if (bank_element == BANK_DEFAULT_VALUE) { continue; + } if (bank_element == BANK_DEFAULT_TASK_VALUE) { bank_element = CAST_TO_BANK_ELEMENT(get_bank_task_context(current_task(), FALSE)); @@ -679,8 +685,9 @@ bank_command( for (i = 0; i < value_count; i++) { bank_handle = values[i]; bank_element = HANDLE_TO_BANK_ELEMENT(bank_handle); - if (bank_element == BANK_DEFAULT_VALUE) + if (bank_element == BANK_DEFAULT_VALUE) { continue; + } if (bank_element == BANK_DEFAULT_TASK_VALUE) { bank_element = CAST_TO_BANK_ELEMENT(get_bank_task_context(current_task(), FALSE)); @@ -716,7 +723,7 @@ bank_command( void bank_release( - ipc_voucher_attr_manager_t __assert_only manager) + ipc_voucher_attr_manager_t __assert_only manager) { assert(manager == &bank_manager); } @@ -733,7 +740,7 @@ bank_release( * Returns: bank_task_t on Success. * BANK_TASK_NULL: on Failure. * Notes: Leaves the task and ledger blank and has only 1 ref, - needs to take 1 extra ref after the task field is initialized. + * needs to take 1 extra ref after the task field is initialized. */ static bank_task_t bank_task_alloc_init(task_t task) @@ -741,8 +748,9 @@ bank_task_alloc_init(task_t task) bank_task_t new_bank_task; new_bank_task = (bank_task_t) zalloc(bank_task_zone); - if (new_bank_task == BANK_TASK_NULL) + if (new_bank_task == BANK_TASK_NULL) { return BANK_TASK_NULL; + } new_bank_task->bt_type = BANK_TASK; new_bank_task->bt_voucher_ref = 0; @@ -774,7 +782,7 @@ bank_task_alloc_init(task_t task) queue_enter(&bank_tasks_list, new_bank_task, bank_task_t, bt_global_elt); lck_mtx_unlock(&bank_tasks_list_lock); #endif - return (new_bank_task); + return new_bank_task; } /* @@ -816,8 +824,9 @@ bank_account_alloc_init( boolean_t entry_found = FALSE; ledger_t new_ledger = ledger_instantiate(bank_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES); - if (new_ledger == LEDGER_NULL) + if (new_ledger == LEDGER_NULL) { return BANK_ACCOUNT_NULL; + } ledger_entry_setactive(new_ledger, bank_ledgers.cpu_time); ledger_entry_setactive(new_ledger, bank_ledgers.energy); @@ -843,8 +852,9 @@ bank_account_alloc_init( if (bank_account->ba_merchant != bank_merchant || bank_account->ba_secureoriginator != bank_secureoriginator || bank_account->ba_proximateprocess != bank_proximateprocess || - bank_get_bank_account_thread_group(bank_account) != thread_group) + bank_get_bank_account_thread_group(bank_account) != thread_group) { continue; + } entry_found = TRUE; /* Take a made ref, since this value would be returned to voucher system. */ @@ -853,10 +863,9 @@ bank_account_alloc_init( } if (!entry_found) { - /* Create a linkage between the holder and the merchant task, Grab both the list locks before adding it to the list. */ lck_mtx_lock(&bank_merchant->bt_acc_to_charge_lock); - + /* Add the account entry into Accounts need to pay account link list. */ queue_enter(&bank_holder->bt_accounts_to_pay, new_bank_account, bank_account_t, ba_next_acc_to_pay); @@ -873,7 +882,7 @@ bank_account_alloc_init( zfree(bank_account_zone, new_bank_account); return bank_account; } - + bank_task_reference(bank_holder); bank_task_reference(bank_merchant); bank_task_reference(bank_secureoriginator); @@ -886,7 +895,7 @@ bank_account_alloc_init( lck_mtx_unlock(&bank_accounts_list_lock); #endif - return (new_bank_account); + return new_bank_account; } /* @@ -898,14 +907,14 @@ bank_account_alloc_init( */ static bank_task_t get_bank_task_context - (task_t task, - boolean_t initialize) +(task_t task, + boolean_t initialize) { bank_task_t bank_task; if (task->bank_context || !initialize) { assert(task->bank_context != NULL); - return (task->bank_context); + return task->bank_context; } bank_task = bank_task_alloc_init(task); @@ -914,9 +923,10 @@ get_bank_task_context task_lock(task); if (task->bank_context) { task_unlock(task); - if (bank_task != BANK_TASK_NULL) + if (bank_task != BANK_TASK_NULL) { bank_task_dealloc(bank_task, 1); - return (task->bank_context); + } + return task->bank_context; } else if (bank_task == BANK_TASK_NULL) { task_unlock(task); return BANK_TASK_NULL; @@ -934,10 +944,10 @@ get_bank_task_context global_bank_task_unlock(); task_unlock(task); - - return (bank_task); + + return bank_task; } - + /* * Routine: bank_task_dealloc * Purpose: Drops the reference on bank task. @@ -950,8 +960,9 @@ bank_task_dealloc( { assert(bank_task->bt_refs >= 0); - if (bank_task_release_num(bank_task, sync) > (int)sync) + if (bank_task_release_num(bank_task, sync) > (int)sync) { return; + } assert(bank_task->bt_refs == 0); assert(queue_empty(&bank_task->bt_accounts_to_pay)); @@ -1004,11 +1015,12 @@ bank_account_dealloc_with_sync( } return KERN_FAILURE; } - + bank_account_made_release_num(bank_account, sync); - if (bank_account_release_num(bank_account, 1) > 1) + if (bank_account_release_num(bank_account, 1) > 1) { panic("Releasing a non zero ref bank account %p\n", bank_account); + } /* Grab both the acc to pay and acc to charge locks */ @@ -1016,14 +1028,14 @@ bank_account_dealloc_with_sync( /* No need to take ledger reference for bank_holder ledger since bt_acc_to_pay_lock is locked */ bank_rollup_chit_to_tasks(bank_account->ba_bill, bank_holder->bt_ledger, bank_merchant_ledger, - bank_holder->bt_pid, bank_merchant->bt_pid); + bank_holder->bt_pid, bank_merchant->bt_pid); /* Remove the account entry from Accounts need to pay account link list. */ queue_remove(&bank_holder->bt_accounts_to_pay, bank_account, bank_account_t, ba_next_acc_to_pay); - + /* Remove the account entry from Accounts need to charge account link list. */ queue_remove(&bank_merchant->bt_accounts_to_charge, bank_account, bank_account_t, ba_next_acc_to_charge); - + lck_mtx_unlock(&bank_merchant->bt_acc_to_charge_lock); lck_mtx_unlock(&bank_holder->bt_acc_to_pay_lock); @@ -1043,7 +1055,7 @@ bank_account_dealloc_with_sync( queue_remove(&bank_accounts_list, bank_account, bank_account_t, ba_global_elt); lck_mtx_unlock(&bank_accounts_list_lock); #endif - + zfree(bank_account_zone, bank_account); return KERN_SUCCESS; } @@ -1065,14 +1077,15 @@ bank_rollup_chit_to_tasks( ledger_amount_t debit; kern_return_t ret; - if (bank_holder_ledger == bank_merchant_ledger) + if (bank_holder_ledger == bank_merchant_ledger) { return; + } ret = ledger_get_entries(bill, bank_ledgers.cpu_time, &credit, &debit); if (ret == KERN_SUCCESS) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (BANK_CODE(BANK_ACCOUNT_INFO, (BANK_SETTLE_CPU_TIME))) | DBG_FUNC_NONE, - bank_merchant_pid, bank_holder_pid, credit, debit, 0); + (BANK_CODE(BANK_ACCOUNT_INFO, (BANK_SETTLE_CPU_TIME))) | DBG_FUNC_NONE, + bank_merchant_pid, bank_holder_pid, credit, debit, 0); if (bank_holder_ledger) { ledger_credit(bank_holder_ledger, task_ledgers.cpu_time_billed_to_me, credit); @@ -1088,8 +1101,8 @@ bank_rollup_chit_to_tasks( ret = ledger_get_entries(bill, bank_ledgers.energy, &credit, &debit); if (ret == KERN_SUCCESS) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (BANK_CODE(BANK_ACCOUNT_INFO, (BANK_SETTLE_ENERGY))) | DBG_FUNC_NONE, - bank_merchant_pid, bank_holder_pid, credit, debit, 0); + (BANK_CODE(BANK_ACCOUNT_INFO, (BANK_SETTLE_ENERGY))) | DBG_FUNC_NONE, + bank_merchant_pid, bank_holder_pid, credit, debit, 0); if (bank_holder_ledger) { ledger_credit(bank_holder_ledger, task_ledgers.energy_billed_to_me, credit); @@ -1142,14 +1155,16 @@ bank_task_initialize(task_t task) * Returns: None. */ static void -init_bank_ledgers(void) { +init_bank_ledgers(void) +{ ledger_template_t t; int idx; - + assert(bank_ledger_template == NULL); - if ((t = ledger_template_create("Bank ledger")) == NULL) + if ((t = ledger_template_create("Bank ledger")) == NULL) { panic("couldn't create bank ledger template"); + } if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) { panic("couldn't create cpu_time entry for bank ledger template"); @@ -1168,7 +1183,7 @@ init_bank_ledgers(void) { /* Routine: bank_billed_balance_safe * Purpose: Walk through all the bank accounts billed to me by other tasks and get the current billing balance. * Called from another task. It takes global bank task lock to make sure the bank context is - not deallocated while accesing it. + * not deallocated while accesing it. * Returns: cpu balance and energy balance in out paremeters. */ void @@ -1194,12 +1209,12 @@ bank_billed_balance_safe(task_t task, uint64_t *cpu_time, uint64_t *energy) bank_task_dealloc(bank_task, 1); } else { kr = ledger_get_entries(task->ledger, task_ledgers.cpu_time_billed_to_me, - &credit, &debit); + &credit, &debit); if (kr == KERN_SUCCESS) { cpu_balance = credit - debit; } kr = ledger_get_entries(task->ledger, task_ledgers.energy_billed_to_me, - &credit, &debit); + &credit, &debit); if (kr == KERN_SUCCESS) { energy_balance = credit - debit; } @@ -1228,7 +1243,7 @@ bank_billed_balance(bank_task_t bank_task, uint64_t *cpu_time, uint64_t *energy) *energy = 0; return; } - + lck_mtx_lock(&bank_task->bt_acc_to_pay_lock); /* bt_acc_to_pay_lock locked, no need to take ledger reference for bt_ledger */ @@ -1275,7 +1290,7 @@ bank_billed_balance(bank_task_t bank_task, uint64_t *cpu_time, uint64_t *energy) /* Routine: bank_serviced_balance_safe * Purpose: Walk through the bank accounts billed to other tasks by me and get the current balance to be charged. * Called from another task. It takes global bank task lock to make sure the bank context is - not deallocated while accesing it. + * not deallocated while accesing it. * Returns: cpu balance and energy balance in out paremeters. */ void @@ -1301,13 +1316,13 @@ bank_serviced_balance_safe(task_t task, uint64_t *cpu_time, uint64_t *energy) bank_task_dealloc(bank_task, 1); } else { kr = ledger_get_entries(task->ledger, task_ledgers.cpu_time_billed_to_others, - &credit, &debit); + &credit, &debit); if (kr == KERN_SUCCESS) { cpu_balance = credit - debit; } kr = ledger_get_entries(task->ledger, task_ledgers.energy_billed_to_others, - &credit, &debit); + &credit, &debit); if (kr == KERN_SUCCESS) { energy_balance = credit - debit; } @@ -1404,18 +1419,21 @@ bank_get_voucher_bank_account(ipc_voucher_t voucher) val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED; kr = mach_voucher_attr_control_get_values(bank_voucher_attr_control, - voucher, - vals, - &val_count); + voucher, + vals, + &val_count); - if (kr != KERN_SUCCESS || val_count == 0) + if (kr != KERN_SUCCESS || val_count == 0) { return BANK_ACCOUNT_NULL; + } bank_element = HANDLE_TO_BANK_ELEMENT(vals[0]); - if (bank_element == BANK_DEFAULT_VALUE) + if (bank_element == BANK_DEFAULT_VALUE) { return BANK_ACCOUNT_NULL; - if (bank_element == BANK_DEFAULT_TASK_VALUE) + } + if (bank_element == BANK_DEFAULT_TASK_VALUE) { bank_element = CAST_TO_BANK_ELEMENT(get_bank_task_context(current_task(), FALSE)); + } if (bank_element->be_type == BANK_TASK) { return BANK_ACCOUNT_NULL; @@ -1432,8 +1450,8 @@ bank_get_voucher_bank_account(ipc_voucher_t voucher) * voucher to App's thread group when it gets auto redeemed by the App). */ if (bank_account->ba_holder != bank_account->ba_merchant || - bank_get_bank_account_thread_group(bank_account) != - bank_get_bank_task_thread_group(bank_account->ba_merchant)) { + bank_get_bank_account_thread_group(bank_account) != + bank_get_bank_task_thread_group(bank_account->ba_merchant)) { return bank_account; } else { return BANK_ACCOUNT_NULL; @@ -1492,10 +1510,11 @@ bank_get_bank_account_ledger(bank_account_t bank_account) ledger_t bankledger = LEDGER_NULL; if (bank_account != BANK_ACCOUNT_NULL && - bank_account->ba_holder != bank_account->ba_merchant) + bank_account->ba_holder != bank_account->ba_merchant) { bankledger = bank_account->ba_bill; + } - return (bankledger); + return bankledger; } /* @@ -1508,7 +1527,7 @@ bank_get_bank_task_thread_group(bank_task_t bank_task __unused) struct thread_group *banktg = NULL; - return (banktg); + return banktg; } /* @@ -1521,7 +1540,7 @@ bank_get_bank_account_thread_group(bank_account_t bank_account __unused) struct thread_group *banktg = NULL; - return (banktg); + return banktg; } /* @@ -1545,7 +1564,7 @@ bank_get_bank_ledger_and_thread_group( /* Return NULL thread group if voucher has current task's thread group */ if (thread_group == bank_get_bank_task_thread_group( - get_bank_task_context(current_task(), FALSE))) { + get_bank_task_context(current_task(), FALSE))) { thread_group = NULL; } *banktg = thread_group; @@ -1561,16 +1580,17 @@ bank_get_bank_ledger_and_thread_group( void bank_swap_thread_bank_ledger(thread_t thread __unused, ledger_t new_ledger __unused) { - spl_t s; - processor_t processor; + spl_t s; + processor_t processor; ledger_t old_ledger = thread->t_bankledger; - int64_t ctime, effective_ledger_time_consumed = 0; + int64_t ctime, effective_ledger_time_consumed = 0; int64_t remainder = 0, consumed = 0; int64_t effective_energy_consumed = 0; uint64_t thread_energy; - - if (old_ledger == LEDGER_NULL && new_ledger == LEDGER_NULL) + + if (old_ledger == LEDGER_NULL && new_ledger == LEDGER_NULL) { return; + } assert((thread == current_thread() || thread->started == 0)); @@ -1595,9 +1615,10 @@ bank_swap_thread_bank_ledger(thread_t thread __unused, ledger_t new_ledger __unu ctime = mach_absolute_time(); processor = thread->last_processor; if (processor != NULL) { - if ((int64_t)processor->quantum_end > ctime) + if ((int64_t)processor->quantum_end > ctime) { remainder = (int64_t)processor->quantum_end - ctime; - + } + consumed = thread->quantum_remaining - remainder; effective_ledger_time_consumed = consumed - thread->t_deduct_bank_ledger_time; } @@ -1606,7 +1627,7 @@ bank_swap_thread_bank_ledger(thread_t thread __unused, ledger_t new_ledger __unu thread_energy = ml_energy_stat(thread); effective_energy_consumed = - thread_energy - thread->t_deduct_bank_ledger_energy; + thread_energy - thread->t_deduct_bank_ledger_energy; assert(effective_energy_consumed >= 0); thread->t_deduct_bank_ledger_energy = thread_energy; @@ -1614,14 +1635,13 @@ bank_swap_thread_bank_ledger(thread_t thread __unused, ledger_t new_ledger __unu thread_unlock(thread); splx(s); - + if (old_ledger != LEDGER_NULL) { ledger_credit(old_ledger, - bank_ledgers.cpu_time, - effective_ledger_time_consumed); + bank_ledgers.cpu_time, + effective_ledger_time_consumed); ledger_credit(old_ledger, - bank_ledgers.energy, - effective_energy_consumed); + bank_ledgers.energy, + effective_energy_consumed); } } - diff --git a/osfmk/bank/bank_internal.h b/osfmk/bank/bank_internal.h index e3b3480e2..f20d09950 100644 --- a/osfmk/bank/bank_internal.h +++ b/osfmk/bank/bank_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,7 +32,7 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -52,7 +52,7 @@ typedef mach_voucher_attr_value_handle_t bank_handle_t; struct bank_element { unsigned int be_type:31, /* Type of element */ - be_voucher_ref:1; /* Voucher system holds a ref */ + be_voucher_ref:1; /* Voucher system holds a ref */ int be_refs; /* Ref count */ unsigned int be_made; /* Made refs for voucher, Actual ref is only taken for voucher ref transition (0 to 1) */ #if DEVELOPMENT || DEBUG @@ -69,8 +69,8 @@ struct bank_task { ledger_t bt_ledger; /* Ledger of the customer task */ queue_head_t bt_accounts_to_pay; /* List of accounts worked for me and need to pay */ queue_head_t bt_accounts_to_charge; /* List of accounts I did work and need to charge */ - decl_lck_mtx_data(, bt_acc_to_pay_lock) /* Lock to protect accounts to pay list */ - decl_lck_mtx_data(, bt_acc_to_charge_lock) /* Lock to protect accounts to charge list */ + decl_lck_mtx_data(, bt_acc_to_pay_lock) /* Lock to protect accounts to pay list */ + decl_lck_mtx_data(, bt_acc_to_charge_lock) /* Lock to protect accounts to charge list */ uint8_t bt_hasentitlement; /* If the secure persona entitlement is set on the task */ #if DEVELOPMENT || DEBUG queue_chain_t bt_global_elt; /* Element on the global bank task chain */ @@ -98,23 +98,23 @@ struct bank_task { typedef struct bank_task * bank_task_t; #define BANK_TASK_NULL ((bank_task_t) 0) -#define bank_task_reference(elem) \ - (OSAddAtomic(1, &(elem)->bt_refs)) +#define bank_task_reference(elem) \ + (OSAddAtomic(1, &(elem)->bt_refs)) -#define bank_task_release(elem) \ - (OSAddAtomic(-1, &(elem)->bt_refs)) +#define bank_task_release(elem) \ + (OSAddAtomic(-1, &(elem)->bt_refs)) -#define bank_task_release_num(elem, num) \ - (OSAddAtomic(-(num), &(elem)->bt_refs)) +#define bank_task_release_num(elem, num) \ + (OSAddAtomic(-(num), &(elem)->bt_refs)) -#define bank_task_made_reference(elem) \ - (hw_atomic_add(&(elem)->bt_made, 1) - 1) +#define bank_task_made_reference(elem) \ + (hw_atomic_add(&(elem)->bt_made, 1) - 1) -#define bank_task_made_release(elem) \ - (hw_atomic_sub(&(elem)->bt_made, 1) + 1) +#define bank_task_made_release(elem) \ + (hw_atomic_sub(&(elem)->bt_made, 1) + 1) -#define bank_task_made_release_num(elem, num) \ - (hw_atomic_sub(&(elem)->bt_made, (num)) + (num)) +#define bank_task_made_release_num(elem, num) \ + (hw_atomic_sub(&(elem)->bt_made, (num)) + (num)) struct bank_account { @@ -143,23 +143,23 @@ struct bank_account { typedef struct bank_account * bank_account_t; #define BANK_ACCOUNT_NULL ((bank_account_t) 0) -#define bank_account_reference(elem) \ - (OSAddAtomic(1, &(elem)->ba_refs)) +#define bank_account_reference(elem) \ + (OSAddAtomic(1, &(elem)->ba_refs)) -#define bank_account_release(elem) \ - (OSAddAtomic(-1, &(elem)->ba_refs)) +#define bank_account_release(elem) \ + (OSAddAtomic(-1, &(elem)->ba_refs)) -#define bank_account_release_num(elem, num) \ - (OSAddAtomic(-(num), &(elem)->ba_refs)) +#define bank_account_release_num(elem, num) \ + (OSAddAtomic(-(num), &(elem)->ba_refs)) -#define bank_account_made_reference(elem) \ - (hw_atomic_add(&(elem)->ba_made, 1) - 1) +#define bank_account_made_reference(elem) \ + (hw_atomic_add(&(elem)->ba_made, 1) - 1) -#define bank_account_made_release(elem) \ - (hw_atomic_sub(&(elem)->ba_made, 1) + 1) +#define bank_account_made_release(elem) \ + (hw_atomic_sub(&(elem)->ba_made, 1) + 1) -#define bank_account_made_release_num(elem, num) \ - (hw_atomic_sub(&(elem)->ba_made, (num)) + (num)) +#define bank_account_made_release_num(elem, num) \ + (hw_atomic_sub(&(elem)->ba_made, (num)) + (num)) struct _bank_ledger_indices { int cpu_time; @@ -176,7 +176,7 @@ extern void bank_billed_balance(bank_task_t bank_task, uint64_t *cpu_time, uint6 extern void bank_serviced_balance_safe(task_t task, uint64_t *cpu_time, uint64_t *energy); extern void bank_serviced_balance(bank_task_t bank_task, uint64_t *cpu_time, uint64_t *energy); extern kern_return_t bank_get_bank_ledger_and_thread_group(ipc_voucher_t voucher, - ledger_t *bankledger, struct thread_group **banktg); + ledger_t *bankledger, struct thread_group **banktg); extern void bank_swap_thread_bank_ledger(thread_t thread, ledger_t ledger); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/bank/bank_types.h b/osfmk/bank/bank_types.h index 17ec0e151..51c40830d 100644 --- a/osfmk/bank/bank_types.h +++ b/osfmk/bank/bank_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,8 +32,8 @@ #include #include -#define MACH_VOUCHER_ATTR_BANK_NULL ((mach_voucher_attr_recipe_command_t)601) -#define MACH_VOUCHER_ATTR_BANK_CREATE ((mach_voucher_attr_recipe_command_t)610) +#define MACH_VOUCHER_ATTR_BANK_NULL ((mach_voucher_attr_recipe_command_t)601) +#define MACH_VOUCHER_ATTR_BANK_CREATE ((mach_voucher_attr_recipe_command_t)610) #define MACH_VOUCHER_BANK_CONTENT_SIZE (500) diff --git a/osfmk/conf/files b/osfmk/conf/files index 342d24d83..d7a06429a 100644 --- a/osfmk/conf/files +++ b/osfmk/conf/files @@ -140,6 +140,7 @@ osfmk/kern/ecc_logging.c optional config_ecc_logging osfmk/kern/ktrace_background_notify.c standard osfmk/kern/ledger.c standard osfmk/kern/locks.c standard +osfmk/kern/tlock.c standard osfmk/kern/ltable.c standard osfmk/kern/machine.c standard osfmk/kern/mach_node.c standard @@ -189,6 +190,7 @@ osfmk/kern/zcache.c optional config_zcache osfmk/kern/gzalloc.c optional config_gzalloc osfmk/kern/bsd_kern.c optional mach_bsd osfmk/kern/hibernate.c optional hibernation +osfmk/kern/remote_time.c standard osfmk/kern/memset_s.c standard osfmk/kern/copyout_shim.c optional copyout_shim diff --git a/osfmk/conf/files.arm64 b/osfmk/conf/files.arm64 index 68611422f..239a78423 100644 --- a/osfmk/conf/files.arm64 +++ b/osfmk/conf/files.arm64 @@ -87,3 +87,4 @@ osfmk/corecrypto/ccn/src/ccn_set.c standard osfmk/arm64/pgtrace.c standard osfmk/arm64/pgtrace_decoder.c optional config_pgtrace_nonkext +osfmk/arm64/machine_remote_time.c optional config_mach_bridge_recv_time diff --git a/osfmk/conf/files.x86_64 b/osfmk/conf/files.x86_64 index a696fc0ac..dfe06058d 100644 --- a/osfmk/conf/files.x86_64 +++ b/osfmk/conf/files.x86_64 @@ -122,3 +122,4 @@ osfmk/i386/startup64.c standard osfmk/x86_64/idt64.s standard osfmk/i386/panic_hooks.c standard +osfmk/x86_64/machine_remote_time.c optional config_mach_bridge_send_time diff --git a/osfmk/console/art/scalegear.c b/osfmk/console/art/scalegear.c index 3dbc5ad0a..9e5334997 100644 --- a/osfmk/console/art/scalegear.c +++ b/osfmk/console/art/scalegear.c @@ -34,47 +34,52 @@ #include "../../../pexpert/pexpert/GearImage.h" -int main(int argc, char * argv[]) +int +main(int argc, char * argv[]) { - vImage_Buffer vs; - vImage_Buffer vd; - vImage_Error verr; - uint32_t i, data32; - uint8_t data8; + vImage_Buffer vs; + vImage_Buffer vd; + vImage_Error verr; + uint32_t i, data32; + uint8_t data8; - vs.width = kGearWidth * 2; - vs.height = kGearHeight * 2 * kGearFrames; - vs.rowBytes = vs.width * sizeof(uint32_t); - vs.data = malloc(vs.height * vs.rowBytes); + vs.width = kGearWidth * 2; + vs.height = kGearHeight * 2 * kGearFrames; + vs.rowBytes = vs.width * sizeof(uint32_t); + vs.data = malloc(vs.height * vs.rowBytes); - vd.width = 1.5 * vs.width; - vd.height = 1.5 * vs.height; - vd.rowBytes = vd.width * sizeof(uint32_t); - vd.data = malloc(vd.height * vd.rowBytes); + vd.width = 1.5 * vs.width; + vd.height = 1.5 * vs.height; + vd.rowBytes = vd.width * sizeof(uint32_t); + vd.data = malloc(vd.height * vd.rowBytes); - for (i = 0; i < vs.width * vs.height; i++) - { - data32 = gGearPict2x[i]; - data32 = (0xFF000000 | (data32 << 16) | (data32 << 8) | data32); - ((uint32_t *)vs.data)[i] = data32; - } + for (i = 0; i < vs.width * vs.height; i++) { + data32 = gGearPict2x[i]; + data32 = (0xFF000000 | (data32 << 16) | (data32 << 8) | data32); + ((uint32_t *)vs.data)[i] = data32; + } - verr = vImageScale_ARGB8888(&vs, &vd, NULL, kvImageHighQualityResampling); + verr = vImageScale_ARGB8888(&vs, &vd, NULL, kvImageHighQualityResampling); - if (kvImageNoError != verr) exit(1); + if (kvImageNoError != verr) { + exit(1); + } - printf("const unsigned char gGearPict3x[9*kGearFrames*kGearWidth*kGearHeight] = {"); + printf("const unsigned char gGearPict3x[9*kGearFrames*kGearWidth*kGearHeight] = {"); - for (i = 0; i < vd.width * vd.height; i++) - { - data32 = ((uint32_t *)vd.data)[i]; - data8 = (0xFF & data32); - if (data32 != (0xFF000000 | (data8 << 16) | (data8 << 8) | data8)) exit(1); + for (i = 0; i < vd.width * vd.height; i++) { + data32 = ((uint32_t *)vd.data)[i]; + data8 = (0xFF & data32); + if (data32 != (0xFF000000 | (data8 << 16) | (data8 << 8) | data8)) { + exit(1); + } - if (0 == (15 & i)) printf("\n "); - printf("0x%02x,", data8); - } - printf("\n};\n"); + if (0 == (15 & i)) { + printf("\n "); + } + printf("0x%02x,", data8); + } + printf("\n};\n"); - exit(0); + exit(0); } diff --git a/osfmk/console/iso_font.c b/osfmk/console/iso_font.c index e82fb3ff9..f8e6c7996 100644 --- a/osfmk/console/iso_font.c +++ b/osfmk/console/iso_font.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -44,263 +44,263 @@ * adjusted 'E' 'F' '#' */ -unsigned char iso_font[256*16] = { -/* 0 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 1 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 2 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 3 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 4 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 5 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 6 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 7 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 8 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 9 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 10 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 11 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 12 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 13 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 14 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 15 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 16 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 17 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 18 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 19 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 20 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 21 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 22 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 23 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 24 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 25 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 26 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 27 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 28 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 29 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 30 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 31 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 32 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 33 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 34 */ 0x00,0x00,0x6c,0x6c,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 35 */ 0x00,0x00,0x00,0x36,0x36,0x7f,0x36,0x36,0x7f,0x36,0x36,0x00,0x00,0x00,0x00,0x00, -/* 36 */ 0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x3e,0x68,0x68,0x6b,0x3e,0x08,0x08,0x00,0x00, -/* 37 */ 0x00,0x00,0x00,0x33,0x13,0x18,0x08,0x0c,0x04,0x06,0x32,0x33,0x00,0x00,0x00,0x00, -/* 38 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x6c,0x3e,0x33,0x33,0x7b,0xce,0x00,0x00,0x00,0x00, -/* 39 */ 0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 40 */ 0x00,0x00,0x30,0x18,0x18,0x0c,0x0c,0x0c,0x0c,0x0c,0x18,0x18,0x30,0x00,0x00,0x00, -/* 41 */ 0x00,0x00,0x0c,0x18,0x18,0x30,0x30,0x30,0x30,0x30,0x18,0x18,0x0c,0x00,0x00,0x00, -/* 42 */ 0x00,0x00,0x00,0x00,0x00,0x36,0x1c,0x7f,0x1c,0x36,0x00,0x00,0x00,0x00,0x00,0x00, -/* 43 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00, -/* 44 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, -/* 45 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 46 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 47 */ 0x00,0x00,0x60,0x20,0x30,0x10,0x18,0x08,0x0c,0x04,0x06,0x02,0x03,0x00,0x00,0x00, -/* 48 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x6b,0x6b,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 49 */ 0x00,0x00,0x18,0x1e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 50 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x30,0x18,0x0c,0x06,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 51 */ 0x00,0x00,0x3e,0x63,0x60,0x60,0x3c,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 52 */ 0x00,0x00,0x30,0x38,0x3c,0x36,0x33,0x7f,0x30,0x30,0x30,0x30,0x00,0x00,0x00,0x00, -/* 53 */ 0x00,0x00,0x7f,0x03,0x03,0x3f,0x60,0x60,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 54 */ 0x00,0x00,0x3c,0x06,0x03,0x03,0x3f,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 55 */ 0x00,0x00,0x7f,0x60,0x30,0x30,0x18,0x18,0x18,0x0c,0x0c,0x0c,0x00,0x00,0x00,0x00, -/* 56 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x3e,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 57 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7e,0x60,0x60,0x60,0x30,0x1e,0x00,0x00,0x00,0x00, -/* 58 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 59 */ 0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x18,0x18,0x0c,0x00,0x00,0x00, -/* 60 */ 0x00,0x00,0x60,0x30,0x18,0x0c,0x06,0x06,0x0c,0x18,0x30,0x60,0x00,0x00,0x00,0x00, -/* 61 */ 0x00,0x00,0x00,0x00,0x00,0x7e,0x00,0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 62 */ 0x00,0x00,0x06,0x0c,0x18,0x30,0x60,0x60,0x30,0x18,0x0c,0x06,0x00,0x00,0x00,0x00, -/* 63 */ 0x00,0x00,0x3e,0x63,0x60,0x30,0x30,0x18,0x18,0x00,0x18,0x18,0x00,0x00,0x00,0x00, -/* 64 */ 0x00,0x00,0x3c,0x66,0x73,0x7b,0x6b,0x6b,0x7b,0x33,0x06,0x3c,0x00,0x00,0x00,0x00, -/* 65 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 66 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x3f,0x63,0x63,0x63,0x63,0x3f,0x00,0x00,0x00,0x00, -/* 67 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x00,0x00,0x00,0x00, -/* 68 */ 0x00,0x00,0x1f,0x33,0x63,0x63,0x63,0x63,0x63,0x63,0x33,0x1f,0x00,0x00,0x00,0x00, -/* 69 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 70 */ 0x00,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, -/* 71 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x73,0x63,0x63,0x66,0x7c,0x00,0x00,0x00,0x00, -/* 72 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 73 */ 0x00,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 74 */ 0x00,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00,0x00,0x00,0x00, -/* 75 */ 0x00,0x00,0x63,0x33,0x1b,0x0f,0x07,0x07,0x0f,0x1b,0x33,0x63,0x00,0x00,0x00,0x00, -/* 76 */ 0x00,0x00,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 77 */ 0x00,0x00,0x63,0x63,0x77,0x7f,0x7f,0x6b,0x6b,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 78 */ 0x00,0x00,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, -/* 79 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 80 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, -/* 81 */ 0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x6f,0x7b,0x3e,0x30,0x60,0x00,0x00, -/* 82 */ 0x00,0x00,0x3f,0x63,0x63,0x63,0x63,0x3f,0x1b,0x33,0x63,0x63,0x00,0x00,0x00,0x00, -/* 83 */ 0x00,0x00,0x3e,0x63,0x03,0x03,0x0e,0x38,0x60,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 84 */ 0x00,0x00,0x7e,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 85 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 86 */ 0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, -/* 87 */ 0x00,0x00,0x63,0x63,0x6b,0x6b,0x6b,0x6b,0x7f,0x36,0x36,0x36,0x00,0x00,0x00,0x00, -/* 88 */ 0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x36,0x36,0x63,0x63,0x00,0x00,0x00,0x00, -/* 89 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 90 */ 0x00,0x00,0x7f,0x30,0x30,0x18,0x18,0x0c,0x0c,0x06,0x06,0x7f,0x00,0x00,0x00,0x00, -/* 91 */ 0x00,0x00,0x3c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x3c,0x00,0x00,0x00,0x00, -/* 92 */ 0x00,0x00,0x03,0x02,0x06,0x04,0x0c,0x08,0x18,0x10,0x30,0x20,0x60,0x00,0x00,0x00, -/* 93 */ 0x00,0x00,0x3c,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x3c,0x00,0x00,0x00,0x00, -/* 94 */ 0x00,0x08,0x1c,0x36,0x63,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 95 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x00,0x00, -/* 96 */ 0x00,0x00,0x0c,0x0c,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 97 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 98 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x00,0x00,0x00,0x00, -/* 99 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 100 */ 0x00,0x00,0x60,0x60,0x60,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 101 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 102 */ 0x00,0x00,0x3c,0x66,0x06,0x1f,0x06,0x06,0x06,0x06,0x06,0x06,0x00,0x00,0x00,0x00, -/* 103 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0x63,0x3e,0x00, -/* 104 */ 0x00,0x00,0x03,0x03,0x03,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 105 */ 0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 106 */ 0x00,0x00,0x30,0x30,0x00,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x30,0x33,0x1e,0x00, -/* 107 */ 0x00,0x00,0x03,0x03,0x03,0x63,0x33,0x1b,0x0f,0x1f,0x33,0x63,0x00,0x00,0x00,0x00, -/* 108 */ 0x00,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 109 */ 0x00,0x00,0x00,0x00,0x00,0x35,0x6b,0x6b,0x6b,0x6b,0x6b,0x6b,0x00,0x00,0x00,0x00, -/* 110 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 111 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 112 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x63,0x63,0x63,0x67,0x3b,0x03,0x03,0x03,0x00, -/* 113 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0x73,0x63,0x63,0x63,0x73,0x6e,0x60,0xe0,0x60,0x00, -/* 114 */ 0x00,0x00,0x00,0x00,0x00,0x3b,0x67,0x03,0x03,0x03,0x03,0x03,0x00,0x00,0x00,0x00, -/* 115 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x0e,0x38,0x60,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 116 */ 0x00,0x00,0x00,0x0c,0x0c,0x3e,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 117 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 118 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x08,0x00,0x00,0x00,0x00, -/* 119 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x6b,0x6b,0x6b,0x3e,0x36,0x36,0x00,0x00,0x00,0x00, -/* 120 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x36,0x1c,0x1c,0x1c,0x36,0x63,0x00,0x00,0x00,0x00, -/* 121 */ 0x00,0x00,0x00,0x00,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, -/* 122 */ 0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x30,0x18,0x0c,0x06,0x7f,0x00,0x00,0x00,0x00, -/* 123 */ 0x00,0x00,0x70,0x18,0x18,0x18,0x18,0x0e,0x18,0x18,0x18,0x18,0x70,0x00,0x00,0x00, -/* 124 */ 0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00,0x00, -/* 125 */ 0x00,0x00,0x0e,0x18,0x18,0x18,0x18,0x70,0x18,0x18,0x18,0x18,0x0e,0x00,0x00,0x00, -/* 126 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x6e,0x3b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 127 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 128 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 129 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 130 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 131 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 132 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 133 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 134 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 135 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 136 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 137 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 138 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 139 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 140 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 141 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 142 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 143 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 144 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 145 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 146 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 147 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 148 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 149 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 150 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 151 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 152 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 153 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 154 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 155 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 156 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 157 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 158 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 159 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 160 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 161 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x00,0x00, -/* 162 */ 0x00,0x00,0x00,0x08,0x08,0x3e,0x6b,0x0b,0x0b,0x0b,0x6b,0x3e,0x08,0x08,0x00,0x00, -/* 163 */ 0x00,0x00,0x1c,0x36,0x06,0x06,0x1f,0x06,0x06,0x07,0x6f,0x3b,0x00,0x00,0x00,0x00, -/* 164 */ 0x00,0x00,0x00,0x00,0x66,0x3c,0x66,0x66,0x66,0x3c,0x66,0x00,0x00,0x00,0x00,0x00, -/* 165 */ 0x00,0x00,0xc3,0xc3,0x66,0x66,0x3c,0x7e,0x18,0x7e,0x18,0x18,0x00,0x00,0x00,0x00, -/* 166 */ 0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 167 */ 0x00,0x3c,0x66,0x0c,0x1e,0x33,0x63,0x66,0x3c,0x18,0x33,0x1e,0x00,0x00,0x00,0x00, -/* 168 */ 0x00,0x00,0x36,0x36,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 169 */ 0x00,0x00,0x3c,0x42,0x99,0xa5,0x85,0xa5,0x99,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, -/* 170 */ 0x00,0x1e,0x30,0x3e,0x33,0x3b,0x36,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 171 */ 0x00,0x00,0x00,0x00,0x00,0x6c,0x36,0x1b,0x1b,0x36,0x6c,0x00,0x00,0x00,0x00,0x00, -/* 172 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x7f,0x60,0x60,0x60,0x00,0x00,0x00,0x00,0x00,0x00, -/* 173 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x3c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 174 */ 0x00,0x00,0x3c,0x42,0x9d,0xa5,0x9d,0xa5,0xa5,0x42,0x3c,0x00,0x00,0x00,0x00,0x00, -/* 175 */ 0x00,0x7e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 176 */ 0x00,0x00,0x1c,0x36,0x36,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 177 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x7e,0x18,0x18,0x00,0x7e,0x00,0x00,0x00,0x00,0x00, -/* 178 */ 0x00,0x1e,0x33,0x18,0x0c,0x06,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 179 */ 0x00,0x1e,0x33,0x18,0x30,0x33,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 180 */ 0x00,0x30,0x18,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 181 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x66,0x66,0x66,0x66,0x76,0x6e,0x06,0x06,0x03,0x00, -/* 182 */ 0x00,0x00,0x7e,0x2f,0x2f,0x2f,0x2e,0x28,0x28,0x28,0x28,0x28,0x00,0x00,0x00,0x00, -/* 183 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 184 */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x18,0x30,0x1e,0x00, -/* 185 */ 0x00,0x0c,0x0e,0x0c,0x0c,0x0c,0x1e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 186 */ 0x00,0x1e,0x33,0x33,0x33,0x33,0x1e,0x00,0x3f,0x00,0x00,0x00,0x00,0x00,0x00,0x00, -/* 187 */ 0x00,0x00,0x00,0x00,0x00,0x1b,0x36,0x6c,0x6c,0x36,0x1b,0x00,0x00,0x00,0x00,0x00, -/* 188 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, -/* 189 */ 0x00,0x10,0x1c,0x18,0x18,0x18,0x00,0x7f,0x00,0x1c,0x36,0x18,0x0c,0x3e,0x00,0x00, -/* 190 */ 0x00,0x1c,0x36,0x18,0x36,0x1c,0x00,0x7f,0x00,0x18,0x1c,0x1a,0x3e,0x18,0x00,0x00, -/* 191 */ 0x00,0x00,0x00,0x00,0x0c,0x0c,0x00,0x0c,0x0c,0x06,0x06,0x03,0x63,0x3e,0x00,0x00, -/* 192 */ 0x0c,0x18,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 193 */ 0x18,0x0c,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 194 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 195 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 196 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 197 */ 0x1c,0x36,0x3e,0x63,0x63,0x63,0x7f,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 198 */ 0x00,0x00,0xfe,0x33,0x33,0x33,0xff,0x33,0x33,0x33,0x33,0xf3,0x00,0x00,0x00,0x00, -/* 199 */ 0x00,0x00,0x3c,0x66,0x03,0x03,0x03,0x03,0x03,0x03,0x66,0x3c,0x18,0x30,0x1e,0x00, -/* 200 */ 0x0c,0x18,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 201 */ 0x18,0x0c,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 202 */ 0x08,0x14,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 203 */ 0x36,0x00,0x7f,0x03,0x03,0x03,0x3f,0x03,0x03,0x03,0x03,0x7f,0x00,0x00,0x00,0x00, -/* 204 */ 0x0c,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 205 */ 0x30,0x18,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 206 */ 0x18,0x24,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 207 */ 0x66,0x00,0x3c,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x18,0x3c,0x00,0x00,0x00,0x00, -/* 208 */ 0x00,0x00,0x1e,0x36,0x66,0x66,0x6f,0x66,0x66,0x66,0x36,0x1e,0x00,0x00,0x00,0x00, -/* 209 */ 0x6e,0x3b,0x63,0x63,0x67,0x6f,0x6f,0x7b,0x7b,0x73,0x63,0x63,0x00,0x00,0x00,0x00, -/* 210 */ 0x06,0x0c,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 211 */ 0x30,0x18,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 212 */ 0x08,0x14,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 213 */ 0x6e,0x3b,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 214 */ 0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 215 */ 0x00,0x00,0x00,0x00,0x00,0x66,0x3c,0x18,0x3c,0x66,0x00,0x00,0x00,0x00,0x00,0x00, -/* 216 */ 0x00,0x20,0x3e,0x73,0x73,0x6b,0x6b,0x6b,0x6b,0x67,0x67,0x3e,0x02,0x00,0x00,0x00, -/* 217 */ 0x0c,0x18,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 218 */ 0x18,0x0c,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 219 */ 0x08,0x14,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 220 */ 0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 221 */ 0x30,0x18,0xc3,0xc3,0x66,0x66,0x3c,0x3c,0x18,0x18,0x18,0x18,0x00,0x00,0x00,0x00, -/* 222 */ 0x00,0x00,0x0f,0x06,0x3e,0x66,0x66,0x66,0x66,0x3e,0x06,0x0f,0x00,0x00,0x00,0x00, -/* 223 */ 0x00,0x00,0x1e,0x33,0x33,0x1b,0x33,0x63,0x63,0x63,0x63,0x3b,0x00,0x00,0x00,0x00, -/* 224 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 225 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 226 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 227 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 228 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 229 */ 0x00,0x1c,0x36,0x1c,0x00,0x3e,0x60,0x7e,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 230 */ 0x00,0x00,0x00,0x00,0x00,0x6e,0xdb,0xd8,0xfe,0x1b,0xdb,0x76,0x00,0x00,0x00,0x00, -/* 231 */ 0x00,0x00,0x00,0x00,0x00,0x3e,0x63,0x03,0x03,0x03,0x63,0x3e,0x18,0x30,0x1e,0x00, -/* 232 */ 0x00,0x0c,0x18,0x30,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 233 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 234 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 235 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x7f,0x03,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 236 */ 0x00,0x06,0x0c,0x18,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 237 */ 0x00,0x18,0x0c,0x06,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 238 */ 0x00,0x08,0x1c,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 239 */ 0x00,0x00,0x36,0x36,0x00,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x38,0x00,0x00,0x00,0x00, -/* 240 */ 0x00,0x00,0x2c,0x18,0x34,0x60,0x7c,0x66,0x66,0x66,0x66,0x3c,0x00,0x00,0x00,0x00, -/* 241 */ 0x00,0x00,0x6e,0x3b,0x00,0x3b,0x67,0x63,0x63,0x63,0x63,0x63,0x00,0x00,0x00,0x00, -/* 242 */ 0x00,0x06,0x0c,0x18,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 243 */ 0x00,0x30,0x18,0x0c,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 244 */ 0x00,0x08,0x1c,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 245 */ 0x00,0x00,0x6e,0x3b,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 246 */ 0x00,0x00,0x36,0x36,0x00,0x3e,0x63,0x63,0x63,0x63,0x63,0x3e,0x00,0x00,0x00,0x00, -/* 247 */ 0x00,0x00,0x00,0x00,0x18,0x18,0x00,0x7e,0x00,0x18,0x18,0x00,0x00,0x00,0x00,0x00, -/* 248 */ 0x00,0x00,0x00,0x00,0x20,0x3e,0x73,0x6b,0x6b,0x6b,0x67,0x3e,0x02,0x00,0x00,0x00, -/* 249 */ 0x00,0x06,0x0c,0x18,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 250 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 251 */ 0x00,0x08,0x1c,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 252 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x63,0x63,0x63,0x73,0x6e,0x00,0x00,0x00,0x00, -/* 253 */ 0x00,0x30,0x18,0x0c,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00, -/* 254 */ 0x00,0x00,0x0f,0x06,0x06,0x3e,0x66,0x66,0x66,0x66,0x66,0x3e,0x06,0x06,0x0f,0x00, -/* 255 */ 0x00,0x00,0x36,0x36,0x00,0x63,0x63,0x36,0x36,0x1c,0x1c,0x0c,0x0c,0x06,0x03,0x00 +unsigned char iso_font[256 * 16] = { +/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 1 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 2 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 3 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 4 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 5 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 6 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 8 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 9 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 10 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 11 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 12 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 13 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 14 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 15 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 16 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 18 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 19 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 20 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 21 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 22 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 23 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 24 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 25 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 26 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 28 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 29 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 30 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 31 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 32 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 33 */ 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 34 */ 0x00, 0x00, 0x6c, 0x6c, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 35 */ 0x00, 0x00, 0x00, 0x36, 0x36, 0x7f, 0x36, 0x36, 0x7f, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 36 */ 0x00, 0x08, 0x08, 0x3e, 0x6b, 0x0b, 0x0b, 0x3e, 0x68, 0x68, 0x6b, 0x3e, 0x08, 0x08, 0x00, 0x00, +/* 37 */ 0x00, 0x00, 0x00, 0x33, 0x13, 0x18, 0x08, 0x0c, 0x04, 0x06, 0x32, 0x33, 0x00, 0x00, 0x00, 0x00, +/* 38 */ 0x00, 0x00, 0x1c, 0x36, 0x36, 0x1c, 0x6c, 0x3e, 0x33, 0x33, 0x7b, 0xce, 0x00, 0x00, 0x00, 0x00, +/* 39 */ 0x00, 0x00, 0x18, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 40 */ 0x00, 0x00, 0x30, 0x18, 0x18, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, +/* 41 */ 0x00, 0x00, 0x0c, 0x18, 0x18, 0x30, 0x30, 0x30, 0x30, 0x30, 0x18, 0x18, 0x0c, 0x00, 0x00, 0x00, +/* 42 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x1c, 0x7f, 0x1c, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 43 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 44 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x0c, 0x00, 0x00, 0x00, +/* 45 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 46 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 47 */ 0x00, 0x00, 0x60, 0x20, 0x30, 0x10, 0x18, 0x08, 0x0c, 0x04, 0x06, 0x02, 0x03, 0x00, 0x00, 0x00, +/* 48 */ 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x6b, 0x6b, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 49 */ 0x00, 0x00, 0x18, 0x1e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 50 */ 0x00, 0x00, 0x3e, 0x63, 0x60, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 51 */ 0x00, 0x00, 0x3e, 0x63, 0x60, 0x60, 0x3c, 0x60, 0x60, 0x60, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 52 */ 0x00, 0x00, 0x30, 0x38, 0x3c, 0x36, 0x33, 0x7f, 0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, +/* 53 */ 0x00, 0x00, 0x7f, 0x03, 0x03, 0x3f, 0x60, 0x60, 0x60, 0x60, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 54 */ 0x00, 0x00, 0x3c, 0x06, 0x03, 0x03, 0x3f, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 55 */ 0x00, 0x00, 0x7f, 0x60, 0x30, 0x30, 0x18, 0x18, 0x18, 0x0c, 0x0c, 0x0c, 0x00, 0x00, 0x00, 0x00, +/* 56 */ 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 57 */ 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x7e, 0x60, 0x60, 0x60, 0x30, 0x1e, 0x00, 0x00, 0x00, 0x00, +/* 58 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 59 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x0c, 0x00, 0x00, 0x00, +/* 60 */ 0x00, 0x00, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, +/* 61 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 62 */ 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x00, 0x00, 0x00, 0x00, +/* 63 */ 0x00, 0x00, 0x3e, 0x63, 0x60, 0x30, 0x30, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 64 */ 0x00, 0x00, 0x3c, 0x66, 0x73, 0x7b, 0x6b, 0x6b, 0x7b, 0x33, 0x06, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 65 */ 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 66 */ 0x00, 0x00, 0x3f, 0x63, 0x63, 0x63, 0x3f, 0x63, 0x63, 0x63, 0x63, 0x3f, 0x00, 0x00, 0x00, 0x00, +/* 67 */ 0x00, 0x00, 0x3c, 0x66, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 68 */ 0x00, 0x00, 0x1f, 0x33, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x33, 0x1f, 0x00, 0x00, 0x00, 0x00, +/* 69 */ 0x00, 0x00, 0x7f, 0x03, 0x03, 0x03, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 70 */ 0x00, 0x00, 0x7f, 0x03, 0x03, 0x03, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, +/* 71 */ 0x00, 0x00, 0x3c, 0x66, 0x03, 0x03, 0x03, 0x73, 0x63, 0x63, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00, +/* 72 */ 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 73 */ 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 74 */ 0x00, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x33, 0x1e, 0x00, 0x00, 0x00, 0x00, +/* 75 */ 0x00, 0x00, 0x63, 0x33, 0x1b, 0x0f, 0x07, 0x07, 0x0f, 0x1b, 0x33, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 76 */ 0x00, 0x00, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 77 */ 0x00, 0x00, 0x63, 0x63, 0x77, 0x7f, 0x7f, 0x6b, 0x6b, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 78 */ 0x00, 0x00, 0x63, 0x63, 0x67, 0x6f, 0x6f, 0x7b, 0x7b, 0x73, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 79 */ 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 80 */ 0x00, 0x00, 0x3f, 0x63, 0x63, 0x63, 0x63, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, +/* 81 */ 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x6f, 0x7b, 0x3e, 0x30, 0x60, 0x00, 0x00, +/* 82 */ 0x00, 0x00, 0x3f, 0x63, 0x63, 0x63, 0x63, 0x3f, 0x1b, 0x33, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 83 */ 0x00, 0x00, 0x3e, 0x63, 0x03, 0x03, 0x0e, 0x38, 0x60, 0x60, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 84 */ 0x00, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 85 */ 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 86 */ 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x36, 0x36, 0x1c, 0x1c, 0x08, 0x00, 0x00, 0x00, 0x00, +/* 87 */ 0x00, 0x00, 0x63, 0x63, 0x6b, 0x6b, 0x6b, 0x6b, 0x7f, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, +/* 88 */ 0x00, 0x00, 0x63, 0x63, 0x36, 0x36, 0x1c, 0x1c, 0x36, 0x36, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 89 */ 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x66, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 90 */ 0x00, 0x00, 0x7f, 0x30, 0x30, 0x18, 0x18, 0x0c, 0x0c, 0x06, 0x06, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 91 */ 0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 92 */ 0x00, 0x00, 0x03, 0x02, 0x06, 0x04, 0x0c, 0x08, 0x18, 0x10, 0x30, 0x20, 0x60, 0x00, 0x00, 0x00, +/* 93 */ 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 94 */ 0x00, 0x08, 0x1c, 0x36, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 95 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, +/* 96 */ 0x00, 0x00, 0x0c, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 98 */ 0x00, 0x00, 0x03, 0x03, 0x03, 0x3b, 0x67, 0x63, 0x63, 0x63, 0x67, 0x3b, 0x00, 0x00, 0x00, 0x00, +/* 99 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x63, 0x03, 0x03, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 100 */ 0x00, 0x00, 0x60, 0x60, 0x60, 0x6e, 0x73, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 101 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x63, 0x63, 0x7f, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 102 */ 0x00, 0x00, 0x3c, 0x66, 0x06, 0x1f, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, +/* 103 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x73, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x60, 0x63, 0x3e, 0x00, +/* 104 */ 0x00, 0x00, 0x03, 0x03, 0x03, 0x3b, 0x67, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 105 */ 0x00, 0x00, 0x0c, 0x0c, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 106 */ 0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x33, 0x1e, 0x00, +/* 107 */ 0x00, 0x00, 0x03, 0x03, 0x03, 0x63, 0x33, 0x1b, 0x0f, 0x1f, 0x33, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 108 */ 0x00, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 109 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x6b, 0x00, 0x00, 0x00, 0x00, +/* 110 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x67, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 111 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 112 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x67, 0x63, 0x63, 0x63, 0x67, 0x3b, 0x03, 0x03, 0x03, 0x00, +/* 113 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x73, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x60, 0xe0, 0x60, 0x00, +/* 114 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x67, 0x03, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, +/* 115 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x63, 0x0e, 0x38, 0x60, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 116 */ 0x00, 0x00, 0x00, 0x0c, 0x0c, 0x3e, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 117 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 118 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x63, 0x36, 0x36, 0x1c, 0x1c, 0x08, 0x00, 0x00, 0x00, 0x00, +/* 119 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x6b, 0x6b, 0x6b, 0x3e, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, +/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x36, 0x1c, 0x1c, 0x1c, 0x36, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 121 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x63, 0x36, 0x36, 0x1c, 0x1c, 0x0c, 0x0c, 0x06, 0x03, 0x00, +/* 122 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 123 */ 0x00, 0x00, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, +/* 124 */ 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, +/* 125 */ 0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e, 0x00, 0x00, 0x00, +/* 126 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 127 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 129 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 130 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 131 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 132 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 133 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 134 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 135 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 137 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 138 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 139 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 140 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 141 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 142 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 143 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 144 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 145 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 146 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 147 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 148 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 149 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 150 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 151 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 153 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 154 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 155 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 156 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 157 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 158 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 159 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 160 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 161 */ 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, +/* 162 */ 0x00, 0x00, 0x00, 0x08, 0x08, 0x3e, 0x6b, 0x0b, 0x0b, 0x0b, 0x6b, 0x3e, 0x08, 0x08, 0x00, 0x00, +/* 163 */ 0x00, 0x00, 0x1c, 0x36, 0x06, 0x06, 0x1f, 0x06, 0x06, 0x07, 0x6f, 0x3b, 0x00, 0x00, 0x00, 0x00, +/* 164 */ 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 165 */ 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x66, 0x3c, 0x7e, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 166 */ 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 167 */ 0x00, 0x3c, 0x66, 0x0c, 0x1e, 0x33, 0x63, 0x66, 0x3c, 0x18, 0x33, 0x1e, 0x00, 0x00, 0x00, 0x00, +/* 168 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 169 */ 0x00, 0x00, 0x3c, 0x42, 0x99, 0xa5, 0x85, 0xa5, 0x99, 0x42, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 170 */ 0x00, 0x1e, 0x30, 0x3e, 0x33, 0x3b, 0x36, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 171 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x36, 0x1b, 0x1b, 0x36, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 172 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x60, 0x60, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 173 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 174 */ 0x00, 0x00, 0x3c, 0x42, 0x9d, 0xa5, 0x9d, 0xa5, 0xa5, 0x42, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 175 */ 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 176 */ 0x00, 0x00, 0x1c, 0x36, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 177 */ 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 178 */ 0x00, 0x1e, 0x33, 0x18, 0x0c, 0x06, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 179 */ 0x00, 0x1e, 0x33, 0x18, 0x30, 0x33, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 180 */ 0x00, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 181 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x76, 0x6e, 0x06, 0x06, 0x03, 0x00, +/* 182 */ 0x00, 0x00, 0x7e, 0x2f, 0x2f, 0x2f, 0x2e, 0x28, 0x28, 0x28, 0x28, 0x28, 0x00, 0x00, 0x00, 0x00, +/* 183 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x1e, 0x00, +/* 185 */ 0x00, 0x0c, 0x0e, 0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 186 */ 0x00, 0x1e, 0x33, 0x33, 0x33, 0x33, 0x1e, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 187 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x36, 0x6c, 0x6c, 0x36, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 188 */ 0x00, 0x10, 0x1c, 0x18, 0x18, 0x18, 0x00, 0x7f, 0x00, 0x18, 0x1c, 0x1a, 0x3e, 0x18, 0x00, 0x00, +/* 189 */ 0x00, 0x10, 0x1c, 0x18, 0x18, 0x18, 0x00, 0x7f, 0x00, 0x1c, 0x36, 0x18, 0x0c, 0x3e, 0x00, 0x00, +/* 190 */ 0x00, 0x1c, 0x36, 0x18, 0x36, 0x1c, 0x00, 0x7f, 0x00, 0x18, 0x1c, 0x1a, 0x3e, 0x18, 0x00, 0x00, +/* 191 */ 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c, 0x00, 0x0c, 0x0c, 0x06, 0x06, 0x03, 0x63, 0x3e, 0x00, 0x00, +/* 192 */ 0x0c, 0x18, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 193 */ 0x18, 0x0c, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 194 */ 0x08, 0x14, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 195 */ 0x6e, 0x3b, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 196 */ 0x36, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 197 */ 0x1c, 0x36, 0x3e, 0x63, 0x63, 0x63, 0x7f, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 198 */ 0x00, 0x00, 0xfe, 0x33, 0x33, 0x33, 0xff, 0x33, 0x33, 0x33, 0x33, 0xf3, 0x00, 0x00, 0x00, 0x00, +/* 199 */ 0x00, 0x00, 0x3c, 0x66, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x66, 0x3c, 0x18, 0x30, 0x1e, 0x00, +/* 200 */ 0x0c, 0x18, 0x7f, 0x03, 0x03, 0x03, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 201 */ 0x18, 0x0c, 0x7f, 0x03, 0x03, 0x03, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 202 */ 0x08, 0x14, 0x7f, 0x03, 0x03, 0x03, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 203 */ 0x36, 0x00, 0x7f, 0x03, 0x03, 0x03, 0x3f, 0x03, 0x03, 0x03, 0x03, 0x7f, 0x00, 0x00, 0x00, 0x00, +/* 204 */ 0x0c, 0x18, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 205 */ 0x30, 0x18, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 206 */ 0x18, 0x24, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 207 */ 0x66, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 208 */ 0x00, 0x00, 0x1e, 0x36, 0x66, 0x66, 0x6f, 0x66, 0x66, 0x66, 0x36, 0x1e, 0x00, 0x00, 0x00, 0x00, +/* 209 */ 0x6e, 0x3b, 0x63, 0x63, 0x67, 0x6f, 0x6f, 0x7b, 0x7b, 0x73, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 210 */ 0x06, 0x0c, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 211 */ 0x30, 0x18, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 212 */ 0x08, 0x14, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 213 */ 0x6e, 0x3b, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 214 */ 0x36, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 215 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 216 */ 0x00, 0x20, 0x3e, 0x73, 0x73, 0x6b, 0x6b, 0x6b, 0x6b, 0x67, 0x67, 0x3e, 0x02, 0x00, 0x00, 0x00, +/* 217 */ 0x0c, 0x18, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 218 */ 0x18, 0x0c, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 219 */ 0x08, 0x14, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 220 */ 0x36, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 221 */ 0x30, 0x18, 0xc3, 0xc3, 0x66, 0x66, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, +/* 222 */ 0x00, 0x00, 0x0f, 0x06, 0x3e, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x0f, 0x00, 0x00, 0x00, 0x00, +/* 223 */ 0x00, 0x00, 0x1e, 0x33, 0x33, 0x1b, 0x33, 0x63, 0x63, 0x63, 0x63, 0x3b, 0x00, 0x00, 0x00, 0x00, +/* 224 */ 0x00, 0x0c, 0x18, 0x30, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 225 */ 0x00, 0x30, 0x18, 0x0c, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 226 */ 0x00, 0x08, 0x1c, 0x36, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 227 */ 0x00, 0x00, 0x6e, 0x3b, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 228 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 229 */ 0x00, 0x1c, 0x36, 0x1c, 0x00, 0x3e, 0x60, 0x7e, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 230 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0xdb, 0xd8, 0xfe, 0x1b, 0xdb, 0x76, 0x00, 0x00, 0x00, 0x00, +/* 231 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x63, 0x03, 0x03, 0x03, 0x63, 0x3e, 0x18, 0x30, 0x1e, 0x00, +/* 232 */ 0x00, 0x0c, 0x18, 0x30, 0x00, 0x3e, 0x63, 0x63, 0x7f, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 233 */ 0x00, 0x30, 0x18, 0x0c, 0x00, 0x3e, 0x63, 0x63, 0x7f, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 234 */ 0x00, 0x08, 0x1c, 0x36, 0x00, 0x3e, 0x63, 0x63, 0x7f, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 235 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x3e, 0x63, 0x63, 0x7f, 0x03, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 236 */ 0x00, 0x06, 0x0c, 0x18, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 237 */ 0x00, 0x18, 0x0c, 0x06, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 238 */ 0x00, 0x08, 0x1c, 0x36, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 239 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x38, 0x00, 0x00, 0x00, 0x00, +/* 240 */ 0x00, 0x00, 0x2c, 0x18, 0x34, 0x60, 0x7c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, +/* 241 */ 0x00, 0x00, 0x6e, 0x3b, 0x00, 0x3b, 0x67, 0x63, 0x63, 0x63, 0x63, 0x63, 0x00, 0x00, 0x00, 0x00, +/* 242 */ 0x00, 0x06, 0x0c, 0x18, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 243 */ 0x00, 0x30, 0x18, 0x0c, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 244 */ 0x00, 0x08, 0x1c, 0x36, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 245 */ 0x00, 0x00, 0x6e, 0x3b, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 246 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x3e, 0x63, 0x63, 0x63, 0x63, 0x63, 0x3e, 0x00, 0x00, 0x00, 0x00, +/* 247 */ 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, +/* 248 */ 0x00, 0x00, 0x00, 0x00, 0x20, 0x3e, 0x73, 0x6b, 0x6b, 0x6b, 0x67, 0x3e, 0x02, 0x00, 0x00, 0x00, +/* 249 */ 0x00, 0x06, 0x0c, 0x18, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 250 */ 0x00, 0x30, 0x18, 0x0c, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 251 */ 0x00, 0x08, 0x1c, 0x36, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 252 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x63, 0x63, 0x63, 0x63, 0x63, 0x73, 0x6e, 0x00, 0x00, 0x00, 0x00, +/* 253 */ 0x00, 0x30, 0x18, 0x0c, 0x00, 0x63, 0x63, 0x36, 0x36, 0x1c, 0x1c, 0x0c, 0x0c, 0x06, 0x03, 0x00, +/* 254 */ 0x00, 0x00, 0x0f, 0x06, 0x06, 0x3e, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x06, 0x0f, 0x00, +/* 255 */ 0x00, 0x00, 0x36, 0x36, 0x00, 0x63, 0x63, 0x36, 0x36, 0x1c, 0x1c, 0x0c, 0x0c, 0x06, 0x03, 0x00 }; #define ISO_CHAR_MIN 0x00 diff --git a/osfmk/console/progress_meter_data.c b/osfmk/console/progress_meter_data.c index a5ed2560a..d48a1c3e1 100644 --- a/osfmk/console/progress_meter_data.c +++ b/osfmk/console/progress_meter_data.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,378 +31,386 @@ #define kProgressBarWidth (234) static const unsigned char progressmeter_leftcap1x[2][9 * 18] = { -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xdc,0xc6,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2, - 0xdd,0xcc,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xc6,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xc6,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xdd,0xcc,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xfe,0xdc,0xc6,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}, -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfa,0x8b,0x41,0x33,0x33,0x33,0x33,0x33,0x33, - 0x8c,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x41,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x41,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x8c,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xfa,0x8b,0x41,0x33,0x33,0x33,0x33,0x33,0x33, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}}; + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xdc, 0xc6, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, + 0xdd, 0xcc, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xc6, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xc6, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xdd, 0xcc, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xfe, 0xdc, 0xc6, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0x8b, 0x41, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x8c, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x41, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x41, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x8c, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xfa, 0x8b, 0x41, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +}; static const unsigned char progressmeter_leftcap2x[2][4 * 9 * 18] = { -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xe5,0xcf,0xc5,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2, - 0xff,0xf9,0xcd,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2, - 0xfe,0xcd,0xc2,0xc8,0xd2,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xe5,0xc2,0xc8,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xcf,0xc2,0xd1,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xc3,0xc2,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xc3,0xc2,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xcf,0xc2,0xd2,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xe5,0xc2,0xc7,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xfd,0xcd,0xc2,0xc8,0xd2,0xd8,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xff,0xf9,0xcd,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2, - 0xff,0xff,0xfe,0xe4,0xce,0xc5,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}, -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfa,0xa7,0x60,0x3d,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xff,0xe9,0x59,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xfa,0x59,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xa8,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x60,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x38,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x38,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0x60,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xa7,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xf9,0x59,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xff,0xe9,0x59,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xff,0xff,0xfa,0xa6,0x5d,0x3c,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}}; + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xe5, 0xcf, 0xc5, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, + 0xff, 0xf9, 0xcd, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, + 0xfe, 0xcd, 0xc2, 0xc8, 0xd2, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xe5, 0xc2, 0xc8, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xcf, 0xc2, 0xd1, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xc3, 0xc2, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xc3, 0xc2, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xcf, 0xc2, 0xd2, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xe5, 0xc2, 0xc7, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xfd, 0xcd, 0xc2, 0xc8, 0xd2, 0xd8, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xff, 0xf9, 0xcd, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, + 0xff, 0xff, 0xfe, 0xe4, 0xce, 0xc5, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfa, 0xa7, 0x60, 0x3d, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xff, 0xe9, 0x59, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xfa, 0x59, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xa8, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x60, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x38, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x38, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0x60, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xa7, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xf9, 0x59, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xff, 0xe9, 0x59, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xff, 0xff, 0xfa, 0xa6, 0x5d, 0x3c, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +}; static const unsigned char progressmeter_middle1x[2][1 * 18] = { -{ - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xc2, - 0xd9, - 0xd9, - 0xd9, - 0xd9, - 0xc2, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, -}, -{ - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, -}}; + { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xc2, + 0xd9, + 0xd9, + 0xd9, + 0xd9, + 0xc2, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + }, + { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + } +}; static const unsigned char progressmeter_middle2x[2][2 * 1 * 18] = { -{ - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xc2, - 0xc2, - 0xd9, - 0xd9, - 0xd9, - 0xd9, - 0xd9, - 0xd9, - 0xd9, - 0xd9, - 0xc2, - 0xc2, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, -}, -{ - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0x33, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, - 0xff, -}}; + { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xc2, + 0xc2, + 0xd9, + 0xd9, + 0xd9, + 0xd9, + 0xd9, + 0xd9, + 0xd9, + 0xd9, + 0xc2, + 0xc2, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + }, + { + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0x33, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + } +}; static const unsigned char progressmeter_rightcap1x[2][9 * 18] = { -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc6,0xdc,0xfe, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xcc,0xde, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xc6, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xc6, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xcc,0xde, - 0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc6,0xdc,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}, -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0x33,0x33,0x33,0x33,0x33,0x33,0x41,0x8b,0xfa, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x92, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x41, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x41, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x91, - 0x33,0x33,0x33,0x33,0x33,0x33,0x41,0x8b,0xfa, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}}; + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc6, 0xdc, 0xfe, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xcc, 0xde, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xc6, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xc6, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xcc, 0xde, + 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc6, 0xdc, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x41, 0x8b, 0xfa, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x92, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x41, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x41, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x91, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x41, 0x8b, 0xfa, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +}; static const unsigned char progressmeter_rightcap2x[2][4 * 9 * 18] = { -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc5,0xcf,0xe5,0xfe,0xff,0xff, - 0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xcd,0xf9,0xff, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xd2,0xc8,0xc2,0xcd,0xfd, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xc7,0xc2,0xe5, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd1,0xc2,0xcf, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xc2,0xc4, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xc2,0xc5, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd2,0xc2,0xcf, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xc7,0xc2,0xe5, - 0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd9,0xd8,0xd2,0xc7,0xc2,0xcd,0xfd, - 0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xcd,0xf9,0xff, - 0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc2,0xc5,0xce,0xe4,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}, -{ - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x3d,0x60,0xa7,0xfa,0xff,0xff, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x59,0xe9,0xff, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x59,0xf9, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0xa8, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x60, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x39, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x3d, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x60, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0xa7, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x58,0xf9, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x59,0xe9,0xff, - 0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x33,0x3c,0x5d,0xa6,0xfa,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, -}}; + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc5, 0xcf, 0xe5, 0xfe, 0xff, 0xff, + 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xcd, 0xf9, 0xff, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xd2, 0xc8, 0xc2, 0xcd, 0xfd, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xc7, 0xc2, 0xe5, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd1, 0xc2, 0xcf, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xc2, 0xc4, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xc2, 0xc5, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd2, 0xc2, 0xcf, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xc7, 0xc2, 0xe5, + 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, 0xd8, 0xd2, 0xc7, 0xc2, 0xcd, 0xfd, + 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xcd, 0xf9, 0xff, + 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc2, 0xc5, 0xce, 0xe4, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x3d, 0x60, 0xa7, 0xfa, 0xff, 0xff, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x59, 0xe9, 0xff, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x59, 0xf9, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0xa8, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x60, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x39, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x3d, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x60, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0xa7, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x58, 0xf9, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x59, 0xe9, 0xff, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x33, 0x3c, 0x5d, 0xa6, 0xfa, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + } +}; static const unsigned char * -progressmeter_leftcap[2][2] = { + progressmeter_leftcap[2][2] = { { &progressmeter_leftcap1x[0][0], &progressmeter_leftcap1x[1][0] }, - { &progressmeter_leftcap2x[0][0], &progressmeter_leftcap2x[1][0] } }; + { &progressmeter_leftcap2x[0][0], &progressmeter_leftcap2x[1][0] } +}; static const unsigned char * -progressmeter_middle[2][2] = { + progressmeter_middle[2][2] = { { &progressmeter_middle1x[0][0], &progressmeter_middle1x[1][0] }, - { &progressmeter_middle2x[0][0], &progressmeter_middle2x[1][0] } }; + { &progressmeter_middle2x[0][0], &progressmeter_middle2x[1][0] } +}; static const unsigned char * -progressmeter_rightcap[2][2] = { + progressmeter_rightcap[2][2] = { { &progressmeter_rightcap1x[0][0], &progressmeter_rightcap1x[1][0] }, - { &progressmeter_rightcap2x[0][0], &progressmeter_rightcap2x[1][0] } }; - + { &progressmeter_rightcap2x[0][0], &progressmeter_rightcap2x[1][0] } +}; diff --git a/osfmk/console/serial_console.c b/osfmk/console/serial_console.c index 2a74280b8..160ab8fb1 100644 --- a/osfmk/console/serial_console.c +++ b/osfmk/console/serial_console.c @@ -122,12 +122,12 @@ extern void serial_putc(char); static void _serial_putc(int, int, int); SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = { - { - .putc = _serial_putc, .getc = _serial_getc, - }, - { - .putc = vcputc, .getc = vcgetc, - }, + { + .putc = _serial_putc, .getc = _serial_getc, + }, + { + .putc = vcputc, .getc = vcgetc, + }, }; SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]); @@ -156,8 +156,9 @@ console_restore_interrupts_state(boolean_t state) * take the panic when it reenables interrupts. * Hopefully one day this is fixed so that this workaround is unnecessary. */ - if (state == TRUE) + if (state == TRUE) { ml_spin_debug_clear_self(); + } #endif /* INTERRUPT_MASKED_DEBUG */ ml_set_interrupts_enabled(state); } @@ -175,8 +176,9 @@ console_init(void) int ret, i; uint32_t * p; - if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) + if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) { return; + } assert(console_ring.len > 0); @@ -211,8 +213,9 @@ console_cpu_alloc(__unused boolean_t boot_processor) /* select the next slot from the per cpu buffers at end of console_ring.buffer */ for (i = 0; i < MAX_CPU_SLOTS; i++) { p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t))); - if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) + if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) { break; + } } assert(i < MAX_CPU_SLOTS); @@ -233,8 +236,9 @@ console_cpu_free(void * buf) { assert((uintptr_t)buf > (uintptr_t)console_ring.buffer); assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE); - if (buf != NULL) + if (buf != NULL) { *(uint32_t *)buf = CPU_BUF_FREE_HEX; + } } static inline int @@ -249,8 +253,9 @@ console_ring_put(char ch) if (console_ring.used < console_ring.len) { console_ring.used++; *console_ring.write_ptr++ = ch; - if (console_ring.write_ptr - console_ring.buffer == console_ring.len) + if (console_ring.write_ptr - console_ring.buffer == console_ring.len) { console_ring.write_ptr = console_ring.buffer; + } return TRUE; } else { return FALSE; @@ -287,7 +292,7 @@ _cnputs(char * c, int size) #endif mp_disable_preemption(); - if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks)) { + if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks, LCK_GRP_NULL)) { /* If we timed out on the lock, and we're in the debugger, * copy lock data for debugging and break the lock. */ @@ -297,17 +302,18 @@ _cnputs(char * c, int size) /* Since hw_lock_to takes a pre-emption count...*/ mp_enable_preemption(); hw_lock_init(&cnputc_lock); - hw_lock_lock(&cnputc_lock); + hw_lock_lock(&cnputc_lock, LCK_GRP_NULL); } else { panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock, - _shadow_lock.lock_data, current_thread()); + _shadow_lock.lock_data, current_thread()); } } while (size-- > 0) { cons_ops[cons_ops_index].putc(0, 0, *c); - if (*c == '\n') + if (*c == '\n') { cons_ops[cons_ops_index].putc(0, 0, '\r'); + } c++; } @@ -322,7 +328,8 @@ cnputc_unbuffered(char c) } -void cnputcusr(char c) +void +cnputcusr(char c) { cnputsusr(&c, 1); } @@ -330,7 +337,6 @@ void cnputcusr(char c) void cnputsusr(char *s, int size) { - if (size > 1) { console_write(s, size); return; @@ -377,15 +383,16 @@ console_ring_try_empty(void) do { #ifdef __x86_64__ - if (handle_tlb_flushes) + if (handle_tlb_flushes) { handle_pending_TLB_flushes(); + } #endif /* __x86_64__ */ /* * Try to get the read lock on the ring buffer to empty it. * If this fails someone else is already emptying... */ - if (!simple_lock_try(&console_ring.read_lock)) { + if (!simple_lock_try(&console_ring.read_lock, LCK_GRP_NULL)) { /* * If multiple cores are spinning trying to empty the buffer, * we may suffer lock starvation (get the read lock, but @@ -402,15 +409,16 @@ console_ring_try_empty(void) /* Indicate that we're in the process of writing a block of data to the console. */ (void)hw_atomic_add(&console_output, 1); - simple_lock_try_lock_loop(&console_ring.write_lock); + simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); /* try small chunk at a time, so we allow writes from other cpus into the buffer */ nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE); /* account for data to be read before wrap around */ size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr); - if (nchars_out > size_before_wrap) + if (nchars_out > size_before_wrap) { nchars_out = size_before_wrap; + } if (nchars_out > 0) { _cnputs(console_ring.read_ptr, nchars_out); @@ -433,9 +441,9 @@ console_ring_try_empty(void) * for far too long, break out. Except in panic/suspend cases * where we should clear out full buffer. */ - if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) + if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) { break; - + } } while (nchars_out > 0); } @@ -460,13 +468,14 @@ console_write(char * str, int size) int chunk_size = size; int i = 0; - if (size > console_ring.len) + if (size > console_ring.len) { chunk_size = CPU_CONS_BUF_SIZE; + } while (size > 0) { boolean_t state = ml_set_interrupts_enabled(FALSE); - simple_lock_try_lock_loop(&console_ring.write_lock); + simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); while (chunk_size > console_ring_space()) { simple_unlock(&console_ring.write_lock); console_restore_interrupts_state(state); @@ -474,11 +483,12 @@ console_write(char * str, int size) console_ring_try_empty(); state = ml_set_interrupts_enabled(FALSE); - simple_lock_try_lock_loop(&console_ring.write_lock); + simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); } - for (i = 0; i < chunk_size; i++) + for (i = 0; i < chunk_size; i++) { console_ring_put(str[i]); + } str = &str[i]; size -= chunk_size; @@ -536,7 +546,7 @@ restart: * it. */ if (needs_print && !cpu_buffer_put(cbp, c)) { - simple_lock_try_lock_loop(&console_ring.write_lock); + simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); if (cpu_buffer_size(cbp) > console_ring_space()) { simple_unlock(&console_ring.write_lock); @@ -547,8 +557,9 @@ restart: goto restart; } - for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) + for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) { console_ring_put(*cp); + } cbp->buf_ptr = cbp->buf_base; simple_unlock(&console_ring.write_lock); @@ -564,7 +575,7 @@ restart: } /* We printed a newline, time to flush the CPU buffer to the global buffer */ - simple_lock_try_lock_loop(&console_ring.write_lock); + simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL); /* * Is there enough space in the shared ring buffer? @@ -583,8 +594,9 @@ restart: goto restart; } - for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) + for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) { console_ring_put(*cp); + } cbp->buf_ptr = cbp->buf_base; simple_unlock(&console_ring.write_lock); @@ -646,10 +658,11 @@ vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean { char c; - if (0 == (*PE_poll_input)(0, &c)) + if (0 == (*PE_poll_input)(0, &c)) { return c; - else + } else { return 0; + } } #ifdef CONFIG_XNUPOST @@ -814,8 +827,9 @@ console_serial_test(void) T_LOG("Using console_write call repeatedly for 100 iterations"); for (i = 0; i < 100; i++) { console_write(&buffer[0], 14); - if ((i % 6) == 0) + if ((i % 6) == 0) { printf("\n"); + } } printf("\n"); diff --git a/osfmk/console/serial_general.c b/osfmk/console/serial_general.c index 686564ed3..fb2d0d6c5 100644 --- a/osfmk/console/serial_general.c +++ b/osfmk/console/serial_general.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,9 +39,9 @@ #include #include -extern void cons_cinput(char ch); /* The BSD routine that gets characters */ +extern void cons_cinput(char ch); /* The BSD routine that gets characters */ -SECURITY_READ_ONLY_LATE(unsigned int) serialmode; /* Serial mode keyboard and console control */ +SECURITY_READ_ONLY_LATE(unsigned int) serialmode; /* Serial mode keyboard and console control */ /* * This routine will start a thread that polls the serial port, listening for @@ -51,16 +51,18 @@ SECURITY_READ_ONLY_LATE(unsigned int) serialmode; /* Serial mode keyboard and void serial_keyboard_init(void) { - kern_return_t result; - thread_t thread; + kern_return_t result; + thread_t thread; - if(!(serialmode & SERIALMODE_INPUT)) /* Leave if we do not want a serial console */ + if (!(serialmode & SERIALMODE_INPUT)) { /* Leave if we do not want a serial console */ return; + } kprintf("Serial keyboard started\n"); result = kernel_thread_start_priority((thread_continue_t)serial_keyboard_start, NULL, MAXPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { panic("serial_keyboard_init"); + } thread_deallocate(thread); } @@ -79,24 +81,25 @@ serial_keyboard_poll(void) int chr; uint64_t next; - while(1) { - chr = _serial_getc(0, 1, 0, 1); /* Get a character if there is one */ - if(chr < 0) /* The serial buffer is empty */ + while (1) { + chr = _serial_getc(0, 1, 0, 1); /* Get a character if there is one */ + if (chr < 0) { /* The serial buffer is empty */ break; - cons_cinput((char)chr); /* Buffer up the character */ + } + cons_cinput((char)chr); /* Buffer up the character */ } - clock_interval_to_deadline(16, 1000000, &next); /* Get time of pop */ + clock_interval_to_deadline(16, 1000000, &next); /* Get time of pop */ - assert_wait_deadline((event_t)serial_keyboard_poll, THREAD_UNINT, next); /* Show we are "waiting" */ - thread_block((thread_continue_t)serial_keyboard_poll); /* Wait for it */ + assert_wait_deadline((event_t)serial_keyboard_poll, THREAD_UNINT, next); /* Show we are "waiting" */ + thread_block((thread_continue_t)serial_keyboard_poll); /* Wait for it */ panic("serial_keyboard_poll: Shouldn't never ever get here...\n"); } boolean_t console_is_serial(void) { - return (cons_ops_index == SERIAL_CONS_OPS); + return cons_ops_index == SERIAL_CONS_OPS; } int @@ -116,8 +119,8 @@ switch_to_serial_console(void) } /* The switch_to_{video,serial,kgdb}_console functions return a cookie that - can be used to restore the console to whatever it was before, in the - same way that splwhatever() and splx() work. */ + * can be used to restore the console to whatever it was before, in the + * same way that splwhatever() and splx() work. */ void switch_to_old_console(int old_console) { @@ -127,20 +130,24 @@ switch_to_old_console(int old_console) if ((ops >= nconsops) && !squawked) { squawked = TRUE; printf("switch_to_old_console: unknown ops %d\n", ops); - } else + } else { cons_ops_index = ops; + } } void console_printbuf_state_init(struct console_printbuf_state * data, int write_on_newline, int can_block) { - if (data == NULL) + if (data == NULL) { return; + } bzero(data, sizeof(struct console_printbuf_state)); - if (write_on_newline) + if (write_on_newline) { data->flags |= CONS_PB_WRITE_NEWLINE; - if (can_block) + } + if (can_block) { data->flags |= CONS_PB_CANBLOCK; + } } void @@ -176,7 +183,8 @@ console_printbuf_putc(int ch, void * arg) } void -console_printbuf_clear(struct console_printbuf_state * info) { +console_printbuf_clear(struct console_printbuf_state * info) +{ if (info->pos != 0) { console_write(info->str, info->pos); } @@ -184,4 +192,3 @@ console_printbuf_clear(struct console_printbuf_state * info) { info->str[info->pos] = '\0'; info->total = 0; } - diff --git a/osfmk/console/serial_protos.h b/osfmk/console/serial_protos.h index 722f528f8..68f4a21e7 100644 --- a/osfmk/console/serial_protos.h +++ b/osfmk/console/serial_protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -63,8 +63,8 @@ void console_init(void); int _serial_getc(int unit, int line, boolean_t wait, boolean_t raw); struct console_ops { - void (*putc)(int, int, int); - int (*getc)(int, int, boolean_t, boolean_t); + void (*putc)(int, int, int); + int (*getc)(int, int, boolean_t, boolean_t); }; boolean_t console_is_serial(void); diff --git a/osfmk/console/video_console.c b/osfmk/console/video_console.c index 52d956da3..2034f1e51 100644 --- a/osfmk/console/video_console.c +++ b/osfmk/console/video_console.c @@ -161,7 +161,7 @@ MACRO_END #define VCPUTC_LOCK_LOCK() \ MACRO_BEGIN \ boolean_t istate = ml_get_interrupts_enabled(); \ - while (!simple_lock_try(&vcputc_lock)) \ + while (!simple_lock_try(&vcputc_lock, LCK_GRP_NULL)) \ { \ if (!istate) \ handle_pending_TLB_flushes(); \ @@ -171,7 +171,7 @@ MACRO_END #define VCPUTC_LOCK_UNLOCK() \ MACRO_BEGIN \ - simple_unlock(&vcputc_lock); \ + simple_unlock(&vcputc_lock); \ MACRO_END #else static hw_lock_data_t vcputc_lock; @@ -183,7 +183,7 @@ MACRO_END #define VCPUTC_LOCK_LOCK() \ MACRO_BEGIN \ - if (!hw_lock_to(&vcputc_lock, ~0U))\ + if (!hw_lock_to(&vcputc_lock, ~0U, LCK_GRP_NULL))\ { \ panic("VCPUTC_LOCK_LOCK"); \ } \ @@ -191,7 +191,7 @@ MACRO_END #define VCPUTC_LOCK_UNLOCK() \ MACRO_BEGIN \ - hw_lock_unlock(&vcputc_lock); \ + hw_lock_unlock(&vcputc_lock); \ MACRO_END #endif @@ -1896,38 +1896,39 @@ enum { kDataAlpha = 0x40, kDataBack = 0x80, kDataRotate = 0x03, - kDataRotate0 = 0, - kDataRotate90 = 1, - kDataRotate180 = 2, - kDataRotate270 = 3 }; static void vc_blit_rect(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, void * backBuffer, unsigned int flags); static void vc_blit_rect_8(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned char * backBuffer, unsigned int flags); static void vc_blit_rect_16(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned short * backBuffer, unsigned int flags); static void vc_blit_rect_32(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned int * backBuffer, unsigned int flags); static void vc_blit_rect_30(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned int * backBuffer, @@ -1939,6 +1940,7 @@ static void vc_progressmeter_task( void * arg0, void * arg ); static void vc_blit_rect(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, void * backBuffer, @@ -1951,16 +1953,16 @@ static void vc_blit_rect(int x, int y, int bx, switch( vinfo.v_depth) { case 8: if( vc_clut8 == vc_clut) - vc_blit_rect_8( x, y, bx, width, height, sourceRow, backRow, dataPtr, (unsigned char *) backBuffer, flags ); + vc_blit_rect_8( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned char *) backBuffer, flags ); break; case 16: - vc_blit_rect_16( x, y, bx, width, height, sourceRow, backRow, dataPtr, (unsigned short *) backBuffer, flags ); + vc_blit_rect_16( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned short *) backBuffer, flags ); break; case 32: - vc_blit_rect_32( x, y, bx, width, height, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); + vc_blit_rect_32( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); break; case 30: - vc_blit_rect_30( x, y, bx, width, height, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); + vc_blit_rect_30( x, y, bx, width, height, sourceWidth, sourceHeight, sourceRow, backRow, dataPtr, (unsigned int *) backBuffer, flags ); break; } } @@ -1968,6 +1970,7 @@ static void vc_blit_rect(int x, int y, int bx, static void vc_blit_rect_8(int x, int y, __unused int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, __unused int backRow, const unsigned char * dataPtr, __unused unsigned char * backBuffer, @@ -1979,15 +1982,15 @@ vc_blit_rect_8(int x, int y, __unused int bx, int sx, sy, a, b, c, d; int scale = 0x10000; - a = vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = vc_rotate_matr[kDataRotate & flags][0][1] * scale; + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - sx = ((a + b) < 0) ? ((width * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((height * scale) - 0x8000) : 0; + + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - else if (1 == sourceRow) a = 0; dst = (volatile unsigned short *) (vinfo.v_baseaddr + (y * vinfo.v_rowbytes) + @@ -2041,6 +2044,7 @@ vc_blit_rect_8(int x, int y, __unused int bx, static void vc_blit_rect_16( int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned short * backPtr, @@ -2052,15 +2056,15 @@ static void vc_blit_rect_16( int x, int y, int bx, int sx, sy, a, b, c, d; int scale = 0x10000; - a = vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = vc_rotate_matr[kDataRotate & flags][0][1] * scale; + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - sx = ((a + b) < 0) ? ((width * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((height * scale) - 0x8000) : 0; + + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - else if (1 == sourceRow) a = 0; if (backPtr) backPtr += bx; @@ -2107,6 +2111,7 @@ static void vc_blit_rect_16( int x, int y, int bx, static void vc_blit_rect_32(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned int * backPtr, @@ -2118,16 +2123,16 @@ static void vc_blit_rect_32(int x, int y, int bx, int sx, sy, a, b, c, d; int scale = 0x10000; - a = vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = vc_rotate_matr[kDataRotate & flags][0][1] * scale; + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - sx = ((a + b) < 0) ? ((width * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((height * scale) - 0x8000) : 0; - if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - else if (1 == sourceRow) a = 0; + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; + if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; + if (backPtr) backPtr += bx; dst = (volatile unsigned int *) (vinfo.v_baseaddr + @@ -2171,6 +2176,7 @@ static void vc_blit_rect_32(int x, int y, int bx, static void vc_blit_rect_30(int x, int y, int bx, int width, int height, + int sourceWidth, int sourceHeight, int sourceRow, int backRow, const unsigned char * dataPtr, unsigned int * backPtr, @@ -2183,16 +2189,16 @@ static void vc_blit_rect_30(int x, int y, int bx, int sx, sy, a, b, c, d; int scale = 0x10000; - a = vc_rotate_matr[kDataRotate & flags][0][0] * scale; - b = vc_rotate_matr[kDataRotate & flags][0][1] * scale; + a = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][0] * scale; + b = (sourceRow == 1) ? 0 : vc_rotate_matr[kDataRotate & flags][0][1] * scale; c = vc_rotate_matr[kDataRotate & flags][1][0] * scale; d = vc_rotate_matr[kDataRotate & flags][1][1] * scale; - sx = ((a + b) < 0) ? ((width * scale) - 0x8000) : 0; - sy = ((c + d) < 0) ? ((height * scale) - 0x8000) : 0; - if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; - else if (1 == sourceRow) a = 0; + sx = ((a + b) < 0) ? ((sourceWidth * scale) - 0x8000) : 0; + sy = ((c + d) < 0) ? ((sourceHeight * scale) - 0x8000) : 0; + if (!sourceRow) data = (unsigned int)(uintptr_t)dataPtr; + if (backPtr) backPtr += bx; dst = (volatile unsigned int *) (vinfo.v_baseaddr + @@ -2243,7 +2249,7 @@ static void vc_clean_boot_graphics(void) vc_progress_set(FALSE, 0); const unsigned char * color = (typeof(color))(uintptr_t)(vc_progress_white ? 0x00000000 : 0xBFBFBFBF); - vc_blit_rect(0, 0, 0, vinfo.v_width, vinfo.v_height, 0, 0, color, NULL, 0); + vc_blit_rect(0, 0, 0, vinfo.v_width, vinfo.v_height, vinfo.v_width, vinfo.v_height, 0, 0, color, NULL, 0); #endif } @@ -2409,7 +2415,7 @@ void vc_display_icon( vc_progress_element * desc, x += ((vinfo.v_width - width) / 2); y += ((vinfo.v_height - height) / 2); } - vc_blit_rect( x, y, 0, width, height, width, 0, data, NULL, kDataIndexed ); + vc_blit_rect( x, y, 0, width, height, width, height, width, 0, data, NULL, kDataIndexed ); } } @@ -2472,7 +2478,7 @@ vc_progress_set(boolean_t enable, uint32_t vc_delay) if (enable) internal_enable_progressmeter(kProgressMeterKernel); s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if( vc_progress_enable != enable) { vc_progress_enable = enable; @@ -2540,7 +2546,7 @@ vc_progress_set(boolean_t enable, uint32_t vc_delay) } s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if( vc_progress_enable != enable) { vc_progress_enable = enable; @@ -2597,7 +2603,7 @@ vc_progressmeter_task(__unused void *arg0, __unused void *arg) uint64_t interval; s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if (vc_progressmeter_enable) { uint32_t pos = (vc_progressmeter_count >> 13); @@ -2634,7 +2640,7 @@ vc_progress_task(__unused void *arg0, __unused void *arg) const unsigned char * data; s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if( vc_progress_enable) do { @@ -2652,19 +2658,19 @@ vc_progress_task(__unused void *arg0, __unused void *arg) if (kVCUsePosition & vc_progress_options.options) { /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ switch (3 & vinfo.v_rotate) { - case 0: + case kDataRotate0: x_pos = vc_progress_options.x_pos; y_pos = vc_progress_options.y_pos; break; - case 2: + case kDataRotate180: x_pos = 0xFFFFFFFF - vc_progress_options.x_pos; y_pos = 0xFFFFFFFF - vc_progress_options.y_pos; break; - case 1: + case kDataRotate90: x_pos = 0xFFFFFFFF - vc_progress_options.y_pos; y_pos = vc_progress_options.x_pos; break; - case 3: + case kDataRotate270: x_pos = vc_progress_options.y_pos; y_pos = 0xFFFFFFFF - vc_progress_options.x_pos; break; @@ -2688,7 +2694,7 @@ vc_progress_task(__unused void *arg0, __unused void *arg) data += vc_progress_count * width * height; vc_blit_rect( x, y, 0, - width, height, width, width, + width, height, width, height, width, width, data, vc_saveunder, kDataAlpha | (vc_progress_angle & kDataRotate) @@ -2734,7 +2740,7 @@ gc_pause( boolean_t pause, boolean_t graphics_now ) VCPUTC_LOCK_UNLOCK( ); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if (pause) { @@ -2872,7 +2878,7 @@ initialize_screen(PE_Video * boot_vinfo, unsigned int op) /* Update the vinfo structure atomically with respect to the vc_progress task if running */ if (vc_progress) { - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); vinfo = new_vinfo; simple_unlock(&vc_progress_lock); } @@ -3015,7 +3021,7 @@ initialize_screen(PE_Video * boot_vinfo, unsigned int op) internal_enable_progressmeter(kProgressMeterKernel); s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); vc_progressmeter_drawn = 0; internal_set_progressmeter(vc_progressmeter_range(vc_progressmeter_count >> 13)); @@ -3086,72 +3092,85 @@ vcattach(void) #if !CONFIG_EMBEDDED -// redraw progress meter between pixels x1, x2, position at x3 +// redraw progress meter between pixels start, end, position at pos, +// options (including rotation) passed in flags static void -vc_draw_progress_meter(unsigned int flags, int x1, int x2, int x3) -{ - const unsigned char * data; - int x, w; - int ox, oy; - int endCapPos; - int onoff; +vc_draw_progress_meter(unsigned int flags, int start, int end, int pos) +{ + const unsigned char *data; + int i, width, bx, srcRow, backRow; + int rectX, rectY, rectW, rectH; + int endCapPos, endCapStart; + int barWidth = kProgressBarWidth * vc_uiscale; + int barHeight = kProgressBarHeight * vc_uiscale; + int capWidth = kProgressBarCapWidth * vc_uiscale; // 1 rounded fill, 0 square end int style = (0 == (2 & vc_progress_withmeter)); + // 1 white, 0 greyed out + int onoff; - ox = ((vinfo.v_width - (kProgressBarWidth * vc_uiscale)) / 2); - oy = vinfo.v_height - (vinfo.v_height / 3) - ((kProgressBarHeight * vc_uiscale) / 2); - - if (kDataBack == flags) - { - // restore back bits - vc_blit_rect(ox + x1, oy, x1, - x2, (kProgressBarHeight * vc_uiscale), 0, (kProgressBarWidth * vc_uiscale), - NULL, vc_progressmeter_backbuffer, flags); - return; - } - - for (x = x1; x < x2; x += w) + for (i = start; i < end; i += width) { - onoff = (x < x3); - endCapPos = ((style && onoff) ? x3 : (kProgressBarWidth * vc_uiscale)); - if (x < (kProgressBarCapWidth * vc_uiscale)) - { - if (x2 < (kProgressBarCapWidth * vc_uiscale)) - w = x2 - x; - else - w = (kProgressBarCapWidth * vc_uiscale) - x; - data = progressmeter_leftcap[vc_uiscale >= 2][onoff]; - data += x; - vc_blit_rect(ox + x, oy, x, w, - (kProgressBarHeight * vc_uiscale), - (kProgressBarCapWidth * vc_uiscale), - (kProgressBarWidth * vc_uiscale), - data, vc_progressmeter_backbuffer, flags); - } - else if (x < (endCapPos - (kProgressBarCapWidth * vc_uiscale))) - { - if (x2 < (endCapPos - (kProgressBarCapWidth * vc_uiscale))) - w = x2 - x; - else - w = (endCapPos - (kProgressBarCapWidth * vc_uiscale)) - x; - data = progressmeter_middle[vc_uiscale >= 2][onoff]; - vc_blit_rect(ox + x, oy, x, w, - (kProgressBarHeight * vc_uiscale), - 1, - (kProgressBarWidth * vc_uiscale), - data, vc_progressmeter_backbuffer, flags); - } - else - { - w = endCapPos - x; - data = progressmeter_rightcap[vc_uiscale >= 2][onoff]; - data += x - (endCapPos - (kProgressBarCapWidth * vc_uiscale)); - vc_blit_rect(ox + x, oy, x, w, - (kProgressBarHeight * vc_uiscale), - (kProgressBarCapWidth * vc_uiscale), - (kProgressBarWidth * vc_uiscale), - data, vc_progressmeter_backbuffer, flags); + onoff = (i < pos); + endCapPos = ((style && onoff) ? pos : barWidth); + endCapStart = endCapPos - capWidth; + if (flags & kDataBack) { // restore back bits + width = end; // loop done after this iteration + data = NULL; + srcRow = 0; + } else if (i < capWidth) { // drawing the left cap + width = (end < capWidth) ? (end - i) : (capWidth - i); + data = progressmeter_leftcap[vc_uiscale >= 2][onoff]; + data += i; + srcRow = capWidth; + } else if (i < endCapStart) { // drawing the middle + width = (end < endCapStart) ? (end - i) : (endCapStart - i); + data = progressmeter_middle[vc_uiscale >= 2][onoff]; + srcRow = 1; + } else { // drawing the right cap + width = endCapPos - i; + data = progressmeter_rightcap[vc_uiscale >= 2][onoff]; + data += i - endCapStart; + srcRow = capWidth; + } + + switch (flags & kDataRotate) { + case kDataRotate90: // left middle, bar goes down + rectW = barHeight; + rectH = width; + rectX = ((vinfo.v_width / 3) - (barHeight / 2)); + rectY = ((vinfo.v_height - barWidth) / 2) + i; + bx = i * barHeight; + backRow = barHeight; + break; + case kDataRotate180: // middle upper, bar goes left + rectW = width; + rectH = barHeight; + rectX = ((vinfo.v_width - barWidth) / 2) + barWidth - width - i; + rectY = (vinfo.v_height / 3) - (barHeight / 2); + bx = barWidth - width - i; + backRow = barWidth; + break; + case kDataRotate270: // right middle, bar goes up + rectW = barHeight; + rectH = width; + rectX = (vinfo.v_width - (vinfo.v_width / 3) - (barHeight / 2)); + rectY = ((vinfo.v_height - barWidth) / 2) + barWidth - width - i; + bx = (barWidth - width - i) * barHeight; + backRow = barHeight; + break; + default: + case kDataRotate0: // middle lower, bar goes right + rectW = width; + rectH = barHeight; + rectX = ((vinfo.v_width - barWidth) / 2) + i; + rectY = vinfo.v_height - (vinfo.v_height / 3) - (barHeight / 2); + bx = i; + backRow = barWidth; + break; } + vc_blit_rect(rectX, rectY, bx, rectW, rectH, width, barHeight, + srcRow, backRow, data, vc_progressmeter_backbuffer, flags); } } @@ -3163,6 +3182,7 @@ internal_enable_progressmeter(int new_value) spl_t s; void * new_buffer; boolean_t stashBackbuffer; + int flags = vinfo.v_rotate; stashBackbuffer = FALSE; new_buffer = NULL; @@ -3173,7 +3193,7 @@ internal_enable_progressmeter(int new_value) } s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if (kProgressMeterUser == new_value) { @@ -3188,7 +3208,7 @@ internal_enable_progressmeter(int new_value) if (kProgressMeterOff == vc_progressmeter_enable) { vc_progressmeter_backbuffer = new_buffer; - vc_draw_progress_meter(kDataAlpha | kSave, 0, (kProgressBarWidth * vc_uiscale), 0); + vc_draw_progress_meter(kDataAlpha | kSave | flags, 0, (kProgressBarWidth * vc_uiscale), 0); new_buffer = NULL; vc_progressmeter_drawn = 0; } @@ -3198,7 +3218,7 @@ internal_enable_progressmeter(int new_value) { if (kProgressMeterUser == vc_progressmeter_enable) { - vc_draw_progress_meter(kDataBack, 0, (kProgressBarWidth * vc_uiscale), vc_progressmeter_drawn); + vc_draw_progress_meter(kDataBack | flags, 0, (kProgressBarWidth * vc_uiscale), vc_progressmeter_drawn); } else stashBackbuffer = TRUE; new_buffer = vc_progressmeter_backbuffer; @@ -3229,6 +3249,7 @@ internal_set_progressmeter(int new_value) int capRedraw; // 1 rounded fill, 0 square end int style = (0 == (2 & vc_progress_withmeter)); + int flags = kDataAlpha | vinfo.v_rotate; if ((new_value < 0) || (new_value > kProgressMeterMax)) return; @@ -3244,11 +3265,11 @@ internal_set_progressmeter(int new_value) { x1 = capRedraw; if (x1 > vc_progressmeter_drawn) x1 = vc_progressmeter_drawn; - vc_draw_progress_meter(kDataAlpha, vc_progressmeter_drawn - x1, x3, x3); + vc_draw_progress_meter(flags, vc_progressmeter_drawn - x1, x3, x3); } else { - vc_draw_progress_meter(kDataAlpha, x3 - capRedraw, vc_progressmeter_drawn, x3); + vc_draw_progress_meter(flags, x3 - capRedraw, vc_progressmeter_drawn, x3); } vc_progressmeter_drawn = x3; } @@ -3273,7 +3294,7 @@ vc_set_progressmeter(int new_value) spl_t s; s = splhigh(); - simple_lock(&vc_progress_lock); + simple_lock(&vc_progress_lock, LCK_GRP_NULL); if (vc_progressmeter_enable && (kProgressMeterKernel != vc_progressmeter_enable)) { diff --git a/osfmk/console/video_console.h b/osfmk/console/video_console.h index 025c51817..f9c03eee1 100644 --- a/osfmk/console/video_console.h +++ b/osfmk/console/video_console.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -46,21 +46,27 @@ extern "C" { #define kVCSysctlProgressMeterEnable "kern.progressmeterenable" #define kVCSysctlProgressMeter "kern.progressmeter" -enum -{ - kVCDarkReboot = 0x00000001, - kVCAcquireImmediate = 0x00000002, - kVCUsePosition = 0x00000004, - kVCDarkBackground = 0x00000008, - kVCLightBackground = 0x00000010, +enum{ + kVCDarkReboot = 0x00000001, + kVCAcquireImmediate = 0x00000002, + kVCUsePosition = 0x00000004, + kVCDarkBackground = 0x00000008, + kVCLightBackground = 0x00000010, +}; + +enum { + kDataRotate0 = 0, + kDataRotate90 = 1, + kDataRotate180 = 2, + kDataRotate270 = 3 }; struct vc_progress_user_options { - uint32_t options; - // fractional position of middle of spinner 0 (0.0) - 0xFFFFFFFF (1.0) - uint32_t x_pos; - uint32_t y_pos; - uint32_t resv[8]; + uint32_t options; + // fractional position of middle of spinner 0 (0.0) - 0xFFFFFFFF (1.0) + uint32_t x_pos; + uint32_t y_pos; + uint32_t resv[8]; }; typedef struct vc_progress_user_options vc_progress_user_options; @@ -69,69 +75,68 @@ typedef struct vc_progress_user_options vc_progress_user_options; void vcputc(int, int, int); -int vcgetc( int l, - int u, - boolean_t wait, - boolean_t raw ); - -void video_scroll_up( void *start, - void *end, - void *dest ); - -void video_scroll_down( void *start, /* HIGH addr */ - void *end, /* LOW addr */ - void *dest ); /* HIGH addr */ - -struct vc_info -{ - unsigned int v_height; /* pixels */ - unsigned int v_width; /* pixels */ - unsigned int v_depth; - unsigned int v_rowbytes; - unsigned long v_baseaddr; - unsigned int v_type; - char v_name[32]; - uint64_t v_physaddr; - unsigned int v_rows; /* characters */ - unsigned int v_columns; /* characters */ - unsigned int v_rowscanbytes; /* Actualy number of bytes used for display per row*/ - unsigned int v_scale; - unsigned int v_rotate; - unsigned int v_reserved[3]; +int vcgetc( int l, + int u, + boolean_t wait, + boolean_t raw ); + +void video_scroll_up( void *start, + void *end, + void *dest ); + +void video_scroll_down( void *start, /* HIGH addr */ + void *end, /* LOW addr */ + void *dest ); /* HIGH addr */ + +struct vc_info { + unsigned int v_height; /* pixels */ + unsigned int v_width; /* pixels */ + unsigned int v_depth; + unsigned int v_rowbytes; + unsigned long v_baseaddr; + unsigned int v_type; + char v_name[32]; + uint64_t v_physaddr; + unsigned int v_rows; /* characters */ + unsigned int v_columns; /* characters */ + unsigned int v_rowscanbytes; /* Actualy number of bytes used for display per row*/ + unsigned int v_scale; + unsigned int v_rotate; + unsigned int v_reserved[3]; }; struct vc_progress_element { - unsigned int version; - unsigned int flags; - unsigned int time; - unsigned char count; - unsigned char res[3]; - int width; - int height; - int dx; - int dy; - int transparent; - unsigned int res2[3]; + unsigned int version; + unsigned int flags; + unsigned int time; + unsigned char count; + unsigned char res[3]; + int width; + int height; + int dx; + int dy; + int transparent; + unsigned int res2[3]; }; typedef struct vc_progress_element vc_progress_element; extern struct vc_progress_user_options vc_user_options; void vc_progress_initialize( vc_progress_element * desc, - const unsigned char * data1x, - const unsigned char * data2x, - const unsigned char * data3x, - const unsigned char * clut ); + const unsigned char * data1x, + const unsigned char * data2x, + const unsigned char * data3x, + const unsigned char * clut ); void vc_progress_set(boolean_t enable, uint32_t vc_delay); void vc_display_icon( vc_progress_element * desc, const unsigned char * data ); -int vc_display_lzss_icon(uint32_t dst_x, uint32_t dst_y, - uint32_t image_width, uint32_t image_height, - const uint8_t *compressed_image, - uint32_t compressed_size, - const uint8_t *clut); +int vc_display_lzss_icon(uint32_t dst_x, uint32_t dst_y, + uint32_t image_width, uint32_t image_height, + const uint8_t *compressed_image, + uint32_t compressed_size, + const uint8_t *clut); #if !CONFIG_EMBEDDED diff --git a/osfmk/console/video_scroll.c b/osfmk/console/video_scroll.c index cdfef7d9f..439787daa 100644 --- a/osfmk/console/video_scroll.c +++ b/osfmk/console/video_scroll.c @@ -35,9 +35,10 @@ video_scroll_up(void * start, void * end, void * dest) bcopy(start, dest, ((char *)end - (char *)start) << 2); } -void video_scroll_down(void * start, /* HIGH addr */ - void * end, /* LOW addr */ - void * dest) /* HIGH addr */ +void +video_scroll_down(void * start, /* HIGH addr */ + void * end, /* LOW addr */ + void * dest) /* HIGH addr */ { bcopy(end, dest, ((char *)start - (char *)end) << 2); } diff --git a/osfmk/corecrypto/cc/src/cc_clear.c b/osfmk/corecrypto/cc/src/cc_clear.c index 5fff15bba..1733f9a53 100644 --- a/osfmk/corecrypto/cc/src/cc_clear.c +++ b/osfmk/corecrypto/cc/src/cc_clear.c @@ -37,31 +37,35 @@ //rdar://problem/26986552 -#if ( CC_HAS_MEMSET_S == 1 ) && (defined( __STDC_WANT_LIB_EXT1__ ) && ( __STDC_WANT_LIB_EXT1__ == 1 ) ) -void cc_clear(size_t len, void *dst) +#if (CC_HAS_MEMSET_S == 1) && (defined(__STDC_WANT_LIB_EXT1__) && (__STDC_WANT_LIB_EXT1__ == 1)) +void +cc_clear(size_t len, void *dst) { - FIPSPOST_TRACE_EVENT; - memset_s(dst,len,0,len); + FIPSPOST_TRACE_EVENT; + memset_s(dst, len, 0, len); } #elif defined(_WIN32) && !defined(__clang__) //Clang with Microsoft CodeGen, doesn't support SecureZeroMemory #include -static void cc_clear(size_t len, void *dst) +static void +cc_clear(size_t len, void *dst) { - SecureZeroMemory(dst, len); + SecureZeroMemory(dst, len); } #else -void cc_clear(size_t len, void *dst) +void +cc_clear(size_t len, void *dst) { - FIPSPOST_TRACE_EVENT; - volatile char *vptr = (volatile char *)dst; - while (len--) - *vptr++ = '\0'; + FIPSPOST_TRACE_EVENT; + volatile char *vptr = (volatile char *)dst; + while (len--) { + *vptr++ = '\0'; + } } #endif /* This is an altarnative for clang that should work - void cc_clear(size_t len, void *dst) __attribute__ ((optnone)) - { - cc_zero(len,dst); - } -*/ + * void cc_clear(size_t len, void *dst) __attribute__ ((optnone)) + * { + * cc_zero(len,dst); + * } + */ diff --git a/osfmk/corecrypto/cc/src/cc_cmp_safe.c b/osfmk/corecrypto/cc/src/cc_cmp_safe.c index 7a33dff61..ee9efab11 100644 --- a/osfmk/corecrypto/cc/src/cc_cmp_safe.c +++ b/osfmk/corecrypto/cc/src/cc_cmp_safe.c @@ -34,16 +34,16 @@ #include -int cc_cmp_safe (size_t num, const void * ptr1, const void * ptr2) +int +cc_cmp_safe(size_t num, const void * ptr1, const void * ptr2) { - size_t i; - const uint8_t *s=(const uint8_t *)ptr1; - const uint8_t *t=(const uint8_t *)ptr2; - uint8_t flag=((num<=0)?1:0); // If 0 return an error - for (i=0;i -void cc_try_abort(const char * msg CC_UNUSED , ...) +void +cc_try_abort(const char * msg CC_UNUSED, ...) { - panic("%s", msg); + panic("%s", msg); } #elif CC_USE_SEPROM || CC_USE_S3 || CC_BASEBAND || CC_EFI || CC_IBOOT || CC_RTKIT || CC_RTKITROM -void cc_try_abort(const char * msg CC_UNUSED, ...) +void +cc_try_abort(const char * msg CC_UNUSED, ...) { - //Do nothing and return because we don't have panic() in those - //environments. Make sure you return error, when using cc_try_abort() in above environments + //Do nothing and return because we don't have panic() in those + //environments. Make sure you return error, when using cc_try_abort() in above environments } #else #include -void cc_try_abort(const char * msg CC_UNUSED, ...) +void +cc_try_abort(const char * msg CC_UNUSED, ...) { - abort(); + abort(); } #endif diff --git a/osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c b/osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c index 47486191c..5757bd413 100644 --- a/osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c +++ b/osfmk/corecrypto/ccdbrg/src/ccdrbg_nisthmac.c @@ -44,56 +44,56 @@ // /* - This HMAC DBRG is described in: - - SP 800-90 A Rev. 1 (2nd Draft) - DRAFT Recommendation for Random Number Generation Using Deterministic Random Bit Generators - April 2014 - - - See in particular - - 10.1.2 HMAC_DRBG (p 45) - - B.2 HMAC_DRBGExample (p 83) - - We support maximum security strength of 256 bits - Note that the example in B.2 is very limited, refer to §10.1.2 for more + * This HMAC DBRG is described in: + * + * SP 800-90 A Rev. 1 (2nd Draft) + * DRAFT Recommendation for Random Number Generation Using Deterministic Random Bit Generators + * April 2014 + * + * + * See in particular + * - 10.1.2 HMAC_DRBG (p 45) + * - B.2 HMAC_DRBGExample (p 83) + * + * We support maximum security strength of 256 bits + * Note that the example in B.2 is very limited, refer to §10.1.2 for more */ /* - The Get_entropy_input function is specified in pseudocode in [SP 800-90C] for various RBG constructions; - however, in general, the function has the following meaning: - Get_entropy_input: A function that is used to obtain entropy input. The function call is: - (status, entropy_input) = Get_entropy_input (min_entropy, min_ length, max_ length, prediction_resistance_request), - which requests a string of bits (entropy_input) with at least min_entropy bits of entropy. The length for the string - shall be equal to or greater than min_length bits, and less than or equal to max_length bits. The - prediction_resistance_request parameter indicates whether or not prediction resistance is to be provided during the request - (i.e., whether fresh entropy is required). A status code is also returned from the function. + * The Get_entropy_input function is specified in pseudocode in [SP 800-90C] for various RBG constructions; + * however, in general, the function has the following meaning: + * Get_entropy_input: A function that is used to obtain entropy input. The function call is: + * (status, entropy_input) = Get_entropy_input (min_entropy, min_ length, max_ length, prediction_resistance_request), + * which requests a string of bits (entropy_input) with at least min_entropy bits of entropy. The length for the string + * shall be equal to or greater than min_length bits, and less than or equal to max_length bits. The + * prediction_resistance_request parameter indicates whether or not prediction resistance is to be provided during the request + * (i.e., whether fresh entropy is required). A status code is also returned from the function. */ /* - Check the validity of the input parameters. - 1. If (requested_instantiation_security_strength > 256), then Return (“Invalid - requested_instantiation_security_strength”, −1). - 2. If (len (personalization_string) > 160), then Return (“Personalization_string - too long”, −1) - Comment: Set the security_strength to one of the valid security strengths. - 3. If (requested_security_strength ≤ 112), then security_strength = 112 Else (requested_ security_strength ≤ 128), then security_strength = 128 Else (requested_ security_strength ≤ 192), then security_strength = 192 Else security_strength = 256. - Comment: Get the entropy_input and the nonce. - 4. min_entropy = 1.5 × security_strength. - 5. (status, entropy_input) = Get_entropy_input (min_entropy, 1000). - 6. If (status ≠ “Success”), then Return (status, −1). + * Check the validity of the input parameters. + * 1. If (requested_instantiation_security_strength > 256), then Return (“Invalid + * requested_instantiation_security_strength”, −1). + * 2. If (len (personalization_string) > 160), then Return (“Personalization_string + * too long”, −1) + * Comment: Set the security_strength to one of the valid security strengths. + * 3. If (requested_security_strength ≤ 112), then security_strength = 112 Else (requested_ security_strength ≤ 128), then security_strength = 128 Else (requested_ security_strength ≤ 192), then security_strength = 192 Else security_strength = 256. + * Comment: Get the entropy_input and the nonce. + * 4. min_entropy = 1.5 × security_strength. + * 5. (status, entropy_input) = Get_entropy_input (min_entropy, 1000). + * 6. If (status ≠ “Success”), then Return (status, −1). */ /* - 1. highest_supported_security_strength = 256. - 2. Output block (outlen) = 256 bits. - 3. Required minimum entropy for the entropy input at instantiation = 3/2 security_strength (this includes the entropy required for the nonce). - 4. Seed length (seedlen) = 440 bits. - 5. Maximum number of bits per request (max_number_of_bits_per_request) = 7500 - bits. - 6. Reseed_interval (reseed_ interval) = 10,000 requests. - 7. Maximum length of the personalization string (max_personalization_string_length) = 160 bits. - 8. Maximum length of the entropy input (max _length) = 1000 bits. + * 1. highest_supported_security_strength = 256. + * 2. Output block (outlen) = 256 bits. + * 3. Required minimum entropy for the entropy input at instantiation = 3/2 security_strength (this includes the entropy required for the nonce). + * 4. Seed length (seedlen) = 440 bits. + * 5. Maximum number of bits per request (max_number_of_bits_per_request) = 7500 + * bits. + * 6. Reseed_interval (reseed_ interval) = 10,000 requests. + * 7. Maximum length of the personalization string (max_personalization_string_length) = 160 bits. + * 8. Maximum length of the entropy input (max _length) = 1000 bits. */ // @@ -106,15 +106,15 @@ #define MIN_REQ_ENTROPY(di) ((di)->output_size/2) struct ccdrbg_nisthmac_state { - const struct ccdrbg_nisthmac_custom *custom; //ccdrbg_nisthmac_state does not need to store ccdrbg_info. ccdrbg_nisthmac_custom is sufficient - size_t bytesLeft; - uint64_t reseed_counter; // the reseed counter should be able to hole 2^^48. size_t might be smaller than 48 bits - size_t vsize; - size_t keysize; - uint8_t v[2*NH_MAX_OUTPUT_BLOCK_SIZE]; - uint8_t *vptr; - uint8_t *nextvptr; - uint8_t key[NH_MAX_KEY_SIZE]; + const struct ccdrbg_nisthmac_custom *custom; //ccdrbg_nisthmac_state does not need to store ccdrbg_info. ccdrbg_nisthmac_custom is sufficient + size_t bytesLeft; + uint64_t reseed_counter; // the reseed counter should be able to hole 2^^48. size_t might be smaller than 48 bits + size_t vsize; + size_t keysize; + uint8_t v[2 * NH_MAX_OUTPUT_BLOCK_SIZE]; + uint8_t *vptr; + uint8_t *nextvptr; + uint8_t key[NH_MAX_KEY_SIZE]; }; #define DRBG_NISTHMAC_DEBUG 0 @@ -123,10 +123,12 @@ struct ccdrbg_nisthmac_state { #if DRBG_NISTHMAC_DEBUG #include "cc_debug.h" -static void dumpState(const char *label, struct ccdrbg_nisthmac_state *state) { - //cc_print(label, state->vsize, state->nextvptr); - cc_print(label, state->vsize, state->vptr); - cc_print(label, state->keysize, state->key); +static void +dumpState(const char *label, struct ccdrbg_nisthmac_state *state) +{ + //cc_print(label, state->vsize, state->nextvptr); + cc_print(label, state->vsize, state->vptr); + cc_print(label, state->keysize, state->key); } #endif @@ -134,168 +136,183 @@ static void dumpState(const char *label, struct ccdrbg_nisthmac_state *state) { static void done(struct ccdrbg_state *drbg); /* - NIST SP 800-90A, Rev. 1 HMAC_DRBG April 2014, p 46 - - HMAC_DRBG_Update (provided_data, K, V): - 1. provided_data: The data to be used. - 2. K: The current value of Key. - 3. V: The current value of V. - Output: - 1. K: The new value for Key. - 2. V: The new value for V. - - HMAC_DRBG Update Process: - - 1. K = HMAC (K, V || 0x00 || provided_data). - 2. V=HMAC(K,V). - 3. If (provided_data = Null), then return K and V. - 4. K = HMAC (K, V || 0x01 || provided_data). - 5. V=HMAC(K,V). - 6. Return K and V. + * NIST SP 800-90A, Rev. 1 HMAC_DRBG April 2014, p 46 + * + * HMAC_DRBG_Update (provided_data, K, V): + * 1. provided_data: The data to be used. + * 2. K: The current value of Key. + * 3. V: The current value of V. + * Output: + * 1. K: The new value for Key. + * 2. V: The new value for V. + * + * HMAC_DRBG Update Process: + * + * 1. K = HMAC (K, V || 0x00 || provided_data). + * 2. V=HMAC(K,V). + * 3. If (provided_data = Null), then return K and V. + * 4. K = HMAC (K, V || 0x01 || provided_data). + * 5. V=HMAC(K,V). + * 6. Return K and V. */ // was: size_t providedDataLength, const void *providedData /* - To handle the case where we have three strings that are concatenated, - we pass in three (ptr, len) pairs + * To handle the case where we have three strings that are concatenated, + * we pass in three (ptr, len) pairs */ -static int hmac_dbrg_update(struct ccdrbg_state *drbg, - size_t daLen, const void *da, - size_t dbLen, const void *db, - size_t dcLen, const void *dc - ) +static int +hmac_dbrg_update(struct ccdrbg_state *drbg, + size_t daLen, const void *da, + size_t dbLen, const void *db, + size_t dcLen, const void *dc + ) { - int rc=CCDRBG_STATUS_ERROR; - struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; - const struct ccdigest_info *di = state->custom->di; - - const unsigned char cZero = 0x00; - const unsigned char cOne = 0x01; - - cchmac_ctx_decl(di->state_size, di->block_size, ctx); - cchmac_init(di, ctx, state->keysize, state->key); - - // 1. K = HMAC (K, V || 0x00 || provided_data). - cchmac_update(di, ctx, state->vsize, state->vptr); - cchmac_update(di, ctx, 1, &cZero); - if (da && daLen) cchmac_update(di, ctx, daLen, da); - if (db && dbLen) cchmac_update(di, ctx, dbLen, db); - if (dc && dcLen) cchmac_update(di, ctx, dcLen, dc); - cchmac_final(di, ctx, state->key); - - // One parameter must be non-empty, or return - if (((da && daLen) || (db && dbLen) || (dc && dcLen))) { - // 2. V=HMAC(K,V). - cchmac(di, state->keysize, state->key, state->vsize, state->vptr, state->vptr); - // 4. K = HMAC (K, V || 0x01 || provided_data). - cchmac_init(di, ctx, state->keysize, state->key); - cchmac_update(di, ctx, state->vsize, state->vptr); - cchmac_update(di, ctx, 1, &cOne); - if (da && daLen) cchmac_update(di, ctx, daLen, da); - if (db && dbLen) cchmac_update(di, ctx, dbLen, db); - if (dc && dcLen) cchmac_update(di, ctx, dcLen, dc); - cchmac_final(di, ctx, state->key); - } - // If additional data 5. V=HMAC(K,V) - // If no addtional data, this is step 2. V=HMAC(K,V). - state->bytesLeft = 0; - - // FIPS 140-2 4.9.2 Conditional Tests - // "the first n-bit block generated after power-up, initialization, or reset shall not be used, but shall be saved for comparison with the next n-bit block to be generated" - // Generate the first block and the second block. Compare for FIPS and discard the first block - // We keep the second block as the first set of data to be returned - cchmac(di, state->keysize, state->key, state->vsize, state->vptr, state->vptr); // First block - cchmac(di, state->keysize, state->key, state->vsize, state->vptr, state->nextvptr); // First to be returned - if (0==cc_cmp_safe(state->vsize, state->vptr, state->nextvptr)) { - //The world as we know it has come to an end - //the DRBG data structure is zeroized. subsequent calls to - //DRBG ends up in NULL dereferencing and/or unpredictable state. - //catastrophic error in SP 800-90A - done(drbg); - rc=CCDRBG_STATUS_ABORT; - cc_try_abort(NULL); - goto errOut; - } - rc=CCDRBG_STATUS_OK; + int rc = CCDRBG_STATUS_ERROR; + struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; + const struct ccdigest_info *di = state->custom->di; + + const unsigned char cZero = 0x00; + const unsigned char cOne = 0x01; + + cchmac_ctx_decl(di->state_size, di->block_size, ctx); + cchmac_init(di, ctx, state->keysize, state->key); + + // 1. K = HMAC (K, V || 0x00 || provided_data). + cchmac_update(di, ctx, state->vsize, state->vptr); + cchmac_update(di, ctx, 1, &cZero); + if (da && daLen) { + cchmac_update(di, ctx, daLen, da); + } + if (db && dbLen) { + cchmac_update(di, ctx, dbLen, db); + } + if (dc && dcLen) { + cchmac_update(di, ctx, dcLen, dc); + } + cchmac_final(di, ctx, state->key); + + // One parameter must be non-empty, or return + if (((da && daLen) || (db && dbLen) || (dc && dcLen))) { + // 2. V=HMAC(K,V). + cchmac(di, state->keysize, state->key, state->vsize, state->vptr, state->vptr); + // 4. K = HMAC (K, V || 0x01 || provided_data). + cchmac_init(di, ctx, state->keysize, state->key); + cchmac_update(di, ctx, state->vsize, state->vptr); + cchmac_update(di, ctx, 1, &cOne); + if (da && daLen) { + cchmac_update(di, ctx, daLen, da); + } + if (db && dbLen) { + cchmac_update(di, ctx, dbLen, db); + } + if (dc && dcLen) { + cchmac_update(di, ctx, dcLen, dc); + } + cchmac_final(di, ctx, state->key); + } + // If additional data 5. V=HMAC(K,V) + // If no addtional data, this is step 2. V=HMAC(K,V). + state->bytesLeft = 0; + + // FIPS 140-2 4.9.2 Conditional Tests + // "the first n-bit block generated after power-up, initialization, or reset shall not be used, but shall be saved for comparison with the next n-bit block to be generated" + // Generate the first block and the second block. Compare for FIPS and discard the first block + // We keep the second block as the first set of data to be returned + cchmac(di, state->keysize, state->key, state->vsize, state->vptr, state->vptr); // First block + cchmac(di, state->keysize, state->key, state->vsize, state->vptr, state->nextvptr); // First to be returned + if (0 == cc_cmp_safe(state->vsize, state->vptr, state->nextvptr)) { + //The world as we know it has come to an end + //the DRBG data structure is zeroized. subsequent calls to + //DRBG ends up in NULL dereferencing and/or unpredictable state. + //catastrophic error in SP 800-90A + done(drbg); + rc = CCDRBG_STATUS_ABORT; + cc_try_abort(NULL); + goto errOut; + } + rc = CCDRBG_STATUS_OK; errOut: - return rc; + return rc; } //make sure state is initialized, before calling this function -static int validate_inputs(struct ccdrbg_nisthmac_state *state, - size_t entropyLength, - size_t additionalInputLength, - size_t psLength) +static int +validate_inputs(struct ccdrbg_nisthmac_state *state, + size_t entropyLength, + size_t additionalInputLength, + size_t psLength) { - int rc; - const struct ccdrbg_nisthmac_custom *custom=state->custom; - const struct ccdigest_info *di = custom->di; + int rc; + const struct ccdrbg_nisthmac_custom *custom = state->custom; + const struct ccdigest_info *di = custom->di; - rc =CCDRBG_STATUS_ERROR; - //buffer size checks - cc_require (di->output_size<=sizeof(state->v)/2, end); //digest size too long - cc_require (di->output_size<=sizeof(state->key), end); //digest size too long + rc = CCDRBG_STATUS_ERROR; + //buffer size checks + cc_require(di->output_size <= sizeof(state->v) / 2, end); //digest size too long + cc_require(di->output_size <= sizeof(state->key), end); //digest size too long - //NIST SP800 compliance checks - //the following maximum checks are redundant if long is 32 bits. + //NIST SP800 compliance checks + //the following maximum checks are redundant if long is 32 bits. - rc=CCDRBG_STATUS_PARAM_ERROR; - cc_require (psLength <= CCDRBG_MAX_PSINPUT_SIZE, end); //personalization string too long - cc_require (entropyLength <= CCDRBG_MAX_ENTROPY_SIZE, end); //supplied too much entropy - cc_require (additionalInputLength <= CCDRBG_MAX_ADDITIONALINPUT_SIZE, end); //additional input too long - cc_require (entropyLength >= MIN_REQ_ENTROPY(di), end); //supplied too litle entropy + rc = CCDRBG_STATUS_PARAM_ERROR; + cc_require(psLength <= CCDRBG_MAX_PSINPUT_SIZE, end); //personalization string too long + cc_require(entropyLength <= CCDRBG_MAX_ENTROPY_SIZE, end); //supplied too much entropy + cc_require(additionalInputLength <= CCDRBG_MAX_ADDITIONALINPUT_SIZE, end); //additional input too long + cc_require(entropyLength >= MIN_REQ_ENTROPY(di), end); //supplied too litle entropy - cc_require(di->output_size<=NH_MAX_OUTPUT_BLOCK_SIZE, end); //the requested security strength is not supported + cc_require(di->output_size <= NH_MAX_OUTPUT_BLOCK_SIZE, end); //the requested security strength is not supported - rc=CCDRBG_STATUS_OK; + rc = CCDRBG_STATUS_OK; end: - return rc; + return rc; } /* - NIST SP 800-90A, Rev. 1 April 2014 B.2.2, p 84 - - HMAC_DRBG_Instantiate_algorithm (...): - Input: bitstring (entropy_input, personalization_string). - Output: bitstring (V, Key), integer reseed_counter. - - Process: - 1. seed_material = entropy_input || personalization_string. - 2. Set Key to outlen bits of zeros. - 3. Set V to outlen/8 bytes of 0x01. - 4. (Key, V) = HMAC_DRBG_Update (seed_material, Key, V). - 5. reseed_counter = 1. - 6. Return (V, Key, reseed_counter). + * NIST SP 800-90A, Rev. 1 April 2014 B.2.2, p 84 + * + * HMAC_DRBG_Instantiate_algorithm (...): + * Input: bitstring (entropy_input, personalization_string). + * Output: bitstring (V, Key), integer reseed_counter. + * + * Process: + * 1. seed_material = entropy_input || personalization_string. + * 2. Set Key to outlen bits of zeros. + * 3. Set V to outlen/8 bytes of 0x01. + * 4. (Key, V) = HMAC_DRBG_Update (seed_material, Key, V). + * 5. reseed_counter = 1. + * 6. Return (V, Key, reseed_counter). */ // This version does not do memory allocation //SP800-90 A: Required minimum entropy for instantiate and reseed=security_strength -static int hmac_dbrg_instantiate_algorithm(struct ccdrbg_state *drbg, - size_t entropyLength, const void *entropy, - size_t nonceLength, const void *nonce, - size_t psLength, const void *ps) +static int +hmac_dbrg_instantiate_algorithm(struct ccdrbg_state *drbg, + size_t entropyLength, const void *entropy, + size_t nonceLength, const void *nonce, + size_t psLength, const void *ps) { - // TODO: The NIST code passes nonce (i.e. HMAC key) to generate, but cc interface isn't set up that way - struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; + // TODO: The NIST code passes nonce (i.e. HMAC key) to generate, but cc interface isn't set up that way + struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; - // 1. seed_material = entropy_input || nonce || personalization_string. + // 1. seed_material = entropy_input || nonce || personalization_string. - // 2. Set Key to outlen bits of zeros. - cc_zero(state->keysize, state->key); + // 2. Set Key to outlen bits of zeros. + cc_zero(state->keysize, state->key); - // 3. Set V to outlen/8 bytes of 0x01. - CC_MEMSET(state->vptr, 0x01, state->vsize); + // 3. Set V to outlen/8 bytes of 0x01. + CC_MEMSET(state->vptr, 0x01, state->vsize); - // 4. (Key, V) = HMAC_DRBG_Update (seed_material, Key, V). - hmac_dbrg_update(drbg, entropyLength, entropy, nonceLength, nonce, psLength, ps); + // 4. (Key, V) = HMAC_DRBG_Update (seed_material, Key, V). + hmac_dbrg_update(drbg, entropyLength, entropy, nonceLength, nonce, psLength, ps); - // 5. reseed_counter = 1. - state->reseed_counter = 1; + // 5. reseed_counter = 1. + state->reseed_counter = 1; - return CCDRBG_STATUS_OK; + return CCDRBG_STATUS_OK; } // In NIST terminology, the nonce is the HMAC key and ps is the personalization string @@ -303,200 +320,207 @@ static int hmac_dbrg_instantiate_algorithm(struct ccdrbg_state *drbg, // min_entropy = NH_REQUIRED_MIN_ENTROPY(security_strength) // bytes of entropy -static int init(const struct ccdrbg_info *info, struct ccdrbg_state *drbg, - size_t entropyLength, const void* entropy, - size_t nonceLength, const void* nonce, - size_t psLength, const void* ps) +static int +init(const struct ccdrbg_info *info, struct ccdrbg_state *drbg, + size_t entropyLength, const void* entropy, + size_t nonceLength, const void* nonce, + size_t psLength, const void* ps) { - struct ccdrbg_nisthmac_state *state=(struct ccdrbg_nisthmac_state *)drbg; - state->bytesLeft = 0; - state->custom = info->custom; //we only need to get the custom parameter from the info structure. - - int rc = validate_inputs(state , entropyLength, 0, psLength); - if(rc!=CCDRBG_STATUS_OK){ - //clear everything if cannot initialize. The idea is that if the caller doesn't check the output of init() and init() fails, - //the system crashes by NULL dereferencing after a call to generate, rather than generating bad random numbers. - done(drbg); - return rc; - } - - const struct ccdigest_info *di = state->custom->di; - state->vsize = di->output_size; - state->keysize = di->output_size; - state->vptr=state->v; - state->nextvptr=state->v+state->vsize; - - // 7. (V, Key, reseed_counter) = HMAC_DRBG_Instantiate_algorithm (entropy_input, personalization_string). - hmac_dbrg_instantiate_algorithm(drbg, entropyLength, entropy, nonceLength, nonce, psLength, ps); + struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; + state->bytesLeft = 0; + state->custom = info->custom; //we only need to get the custom parameter from the info structure. + + int rc = validate_inputs(state, entropyLength, 0, psLength); + if (rc != CCDRBG_STATUS_OK) { + //clear everything if cannot initialize. The idea is that if the caller doesn't check the output of init() and init() fails, + //the system crashes by NULL dereferencing after a call to generate, rather than generating bad random numbers. + done(drbg); + return rc; + } + + const struct ccdigest_info *di = state->custom->di; + state->vsize = di->output_size; + state->keysize = di->output_size; + state->vptr = state->v; + state->nextvptr = state->v + state->vsize; + + // 7. (V, Key, reseed_counter) = HMAC_DRBG_Instantiate_algorithm (entropy_input, personalization_string). + hmac_dbrg_instantiate_algorithm(drbg, entropyLength, entropy, nonceLength, nonce, psLength, ps); #if DRBG_NISTHMAC_DEBUG - dumpState("Init: ", state); + dumpState("Init: ", state); #endif - return CCDRBG_STATUS_OK; - + return CCDRBG_STATUS_OK; } /* - 10.1.2.4 Reseeding an HMAC_DRBG Instantiation - Notes for the reseed function specified in Section 9.2: - The reseeding of an HMAC_DRBG instantiation requires a call to the Reseed_function specified in Section 9.2. - Process step 6 of that function calls the reseed algorithm specified in this section. The values for min_length - are provided in Table 2 of Section 10.1. - - The reseed algorithm: - Let HMAC_DRBG_Update be the function specified in Section 10.1.2.2. The following process or its equivalent - shall be used as the reseed algorithm for this DRBG mechanism (see step 6 of the reseed process in Section 9.2): - - HMAC_DRBG_Reseed_algorithm (working_state, entropy_input, additional_input): - 1. working_state: The current values for V, Key and reseed_counter (see Section 10.1.2.1). - 2. entropy_input: The string of bits obtained from the source of entropy input. - 3. additional_input: The additional input string received from the consuming application. - Note that the length of the additional_input string may be zero. - - Output: - 1. new_working_state: The new values for V, Key and reseed_counter. HMAC_DRBG Reseed Process: - 1. seed_material = entropy_input || additional_input. - 2. (Key, V) = HMAC_DRBG_Update (seed_material, Key, V). 3. reseed_counter = 1. - 4. Return V, Key and reseed_counter as the new_working_state. + * 10.1.2.4 Reseeding an HMAC_DRBG Instantiation + * Notes for the reseed function specified in Section 9.2: + * The reseeding of an HMAC_DRBG instantiation requires a call to the Reseed_function specified in Section 9.2. + * Process step 6 of that function calls the reseed algorithm specified in this section. The values for min_length + * are provided in Table 2 of Section 10.1. + * + * The reseed algorithm: + * Let HMAC_DRBG_Update be the function specified in Section 10.1.2.2. The following process or its equivalent + * shall be used as the reseed algorithm for this DRBG mechanism (see step 6 of the reseed process in Section 9.2): + * + * HMAC_DRBG_Reseed_algorithm (working_state, entropy_input, additional_input): + * 1. working_state: The current values for V, Key and reseed_counter (see Section 10.1.2.1). + * 2. entropy_input: The string of bits obtained from the source of entropy input. + * 3. additional_input: The additional input string received from the consuming application. + * Note that the length of the additional_input string may be zero. + * + * Output: + * 1. new_working_state: The new values for V, Key and reseed_counter. HMAC_DRBG Reseed Process: + * 1. seed_material = entropy_input || additional_input. + * 2. (Key, V) = HMAC_DRBG_Update (seed_material, Key, V). 3. reseed_counter = 1. + * 4. Return V, Key and reseed_counter as the new_working_state. */ static int reseed(struct ccdrbg_state *drbg, - size_t entropyLength, const void *entropy, - size_t additionalLength, const void *additional) + size_t entropyLength, const void *entropy, + size_t additionalLength, const void *additional) { + struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; + int rc = validate_inputs(state, entropyLength, additionalLength, 0); + if (rc != CCDRBG_STATUS_OK) { + return rc; + } - struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; - int rc = validate_inputs(state, entropyLength, additionalLength, 0); - if(rc!=CCDRBG_STATUS_OK) return rc; - - int rx = hmac_dbrg_update(drbg, entropyLength, entropy, additionalLength, additional, 0, NULL); - state->reseed_counter = 1; + int rx = hmac_dbrg_update(drbg, entropyLength, entropy, additionalLength, additional, 0, NULL); + state->reseed_counter = 1; #if DRBG_NISTHMAC_DEBUG - dumpState("Reseed: ", state); + dumpState("Reseed: ", state); #endif - return rx; + return rx; } /* - HMAC_DRBG_Generate_algorithm: - Input: bitstring (V, Key), integer (reseed_counter, requested_number_of_bits). - Output: string status, bitstring (pseudorandom_bits, V, Key), integer reseed_counter. - - Process: - 1. If (reseed_counter ≥ 10,000), then Return (“Reseed required”, Null, V, Key, reseed_counter). - 2. temp = Null. - 3. While (len (temp) < requested_no_of_bits) do: - 3.1 V = HMAC (Key, V). - 3.2 temp = temp || V. - 4. pseudorandom_bits = Leftmost (requested_no_of_bits) of temp. - 5. (Key, V) = HMAC_DRBG_Update (Null, Key, V). - 6. reseed_counter = reseed_counter + 1. - 7. Return (“Success”, pseudorandom_bits, V, Key, reseed_counter). + * HMAC_DRBG_Generate_algorithm: + * Input: bitstring (V, Key), integer (reseed_counter, requested_number_of_bits). + * Output: string status, bitstring (pseudorandom_bits, V, Key), integer reseed_counter. + * + * Process: + * 1. If (reseed_counter ≥ 10,000), then Return (“Reseed required”, Null, V, Key, reseed_counter). + * 2. temp = Null. + * 3. While (len (temp) < requested_no_of_bits) do: + * 3.1 V = HMAC (Key, V). + * 3.2 temp = temp || V. + * 4. pseudorandom_bits = Leftmost (requested_no_of_bits) of temp. + * 5. (Key, V) = HMAC_DRBG_Update (Null, Key, V). + * 6. reseed_counter = reseed_counter + 1. + * 7. Return (“Success”, pseudorandom_bits, V, Key, reseed_counter). */ -static int validate_gen_params(uint64_t reseed_counter, size_t dataOutLength, size_t additionalLength) - +static int +validate_gen_params(uint64_t reseed_counter, size_t dataOutLength, size_t additionalLength) { - int rc=CCDRBG_STATUS_PARAM_ERROR; + int rc = CCDRBG_STATUS_PARAM_ERROR; - // Zero byte in one request is a valid use-case (21208820) - cc_require (dataOutLength <= CCDRBG_MAX_REQUEST_SIZE, end); //Requested too many bytes in one request - cc_require (additionalLength<=CCDRBG_MAX_ADDITIONALINPUT_SIZE, end); //Additional input too long + // Zero byte in one request is a valid use-case (21208820) + cc_require(dataOutLength <= CCDRBG_MAX_REQUEST_SIZE, end); //Requested too many bytes in one request + cc_require(additionalLength <= CCDRBG_MAX_ADDITIONALINPUT_SIZE, end); //Additional input too long - // 1. If (reseed_counter > 2^^48), then Return (“Reseed required”, Null, V, Key, reseed_counter). - rc = CCDRBG_STATUS_NEED_RESEED; - cc_require (reseed_counter <= CCDRBG_RESEED_INTERVAL, end); //Reseed required + // 1. If (reseed_counter > 2^^48), then Return (“Reseed required”, Null, V, Key, reseed_counter). + rc = CCDRBG_STATUS_NEED_RESEED; + cc_require(reseed_counter <= CCDRBG_RESEED_INTERVAL, end); //Reseed required - rc=CCDRBG_STATUS_OK; + rc = CCDRBG_STATUS_OK; end: - return rc; + return rc; } -static int generate(struct ccdrbg_state *drbg, size_t dataOutLength, void *dataOut, - size_t additionalLength, const void *additional) +static int +generate(struct ccdrbg_state *drbg, size_t dataOutLength, void *dataOut, + size_t additionalLength, const void *additional) { - struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; - const struct ccdrbg_nisthmac_custom *custom = state->custom; - const struct ccdigest_info *di = custom->di; - - int rc = validate_gen_params(state->reseed_counter, dataOutLength, additional==NULL?0:additionalLength); - if(rc!=CCDRBG_STATUS_OK) return rc; - - // 2. If additional_input ≠ Null, then (Key, V) = HMAC_DRBG_Update (additional_input, Key, V). - if (additional && additionalLength) - hmac_dbrg_update(drbg, additionalLength, additional, 0, NULL, 0, NULL); - - // hmac_dbrg_generate_algorithm - char *outPtr = (char *) dataOut; - while (dataOutLength > 0) { - if (!state->bytesLeft) { - // 5. V=HMAC(K,V). - cchmac(di, state->keysize, state->key, state->vsize, state->nextvptr, state->vptr); // Won't be returned - // FIPS 140-2 4.9.2 Conditional Tests - // "Each subsequent generation of an n-bit block shall be compared with the previously generated block. The test shall fail if any two compared n-bit blocks are equal." - if (0==cc_cmp_safe(state->vsize, state->vptr, state->nextvptr)) { - //The world as we know it has come to an end - //the DRBG data structure is zeroized. subsequent calls to - //DRBG ends up in NULL dereferencing and/or unpredictable state. - //catastrophic error in SP 800-90A - done(drbg); - rc=CCDRBG_STATUS_ABORT; - cc_try_abort(NULL); - goto errOut; - } - CC_SWAP(state->nextvptr, state->vptr); - state->bytesLeft = state->vsize; + struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; + const struct ccdrbg_nisthmac_custom *custom = state->custom; + const struct ccdigest_info *di = custom->di; + + int rc = validate_gen_params(state->reseed_counter, dataOutLength, additional == NULL?0:additionalLength); + if (rc != CCDRBG_STATUS_OK) { + return rc; + } + + // 2. If additional_input ≠ Null, then (Key, V) = HMAC_DRBG_Update (additional_input, Key, V). + if (additional && additionalLength) { + hmac_dbrg_update(drbg, additionalLength, additional, 0, NULL, 0, NULL); + } + + // hmac_dbrg_generate_algorithm + char *outPtr = (char *) dataOut; + while (dataOutLength > 0) { + if (!state->bytesLeft) { + // 5. V=HMAC(K,V). + cchmac(di, state->keysize, state->key, state->vsize, state->nextvptr, state->vptr); // Won't be returned + // FIPS 140-2 4.9.2 Conditional Tests + // "Each subsequent generation of an n-bit block shall be compared with the previously generated block. The test shall fail if any two compared n-bit blocks are equal." + if (0 == cc_cmp_safe(state->vsize, state->vptr, state->nextvptr)) { + //The world as we know it has come to an end + //the DRBG data structure is zeroized. subsequent calls to + //DRBG ends up in NULL dereferencing and/or unpredictable state. + //catastrophic error in SP 800-90A + done(drbg); + rc = CCDRBG_STATUS_ABORT; + cc_try_abort(NULL); + goto errOut; + } + CC_SWAP(state->nextvptr, state->vptr); + state->bytesLeft = state->vsize; #if DRBG_NISTHMAC_DEBUG - cc_print("generate blk: ", state->vsize, state->vptr); + cc_print("generate blk: ", state->vsize, state->vptr); #endif - } - size_t outLength = dataOutLength > state->bytesLeft ? state->bytesLeft : dataOutLength; - CC_MEMCPY(outPtr, state->vptr, outLength); - state->bytesLeft -= outLength; - outPtr += outLength; - dataOutLength -= outLength; - } + } + size_t outLength = dataOutLength > state->bytesLeft ? state->bytesLeft : dataOutLength; + CC_MEMCPY(outPtr, state->vptr, outLength); + state->bytesLeft -= outLength; + outPtr += outLength; + dataOutLength -= outLength; + } - // 6. (Key, V) = HMAC_DRBG_Update (additional_input, Key, V). - hmac_dbrg_update(drbg, additionalLength, additional, 0, NULL, 0, NULL); + // 6. (Key, V) = HMAC_DRBG_Update (additional_input, Key, V). + hmac_dbrg_update(drbg, additionalLength, additional, 0, NULL, 0, NULL); - // 7. reseed_counter = reseed_counter + 1. - state->reseed_counter++; + // 7. reseed_counter = reseed_counter + 1. + state->reseed_counter++; #if DRBG_NISTHMAC_DEBUG - dumpState("generate end: ", state); - cc_print("generate end nxt: ", state->vsize, state->nextvptr); + dumpState("generate end: ", state); + cc_print("generate end nxt: ", state->vsize, state->nextvptr); #endif - rc=CCDRBG_STATUS_OK; + rc = CCDRBG_STATUS_OK; errOut: - return rc; + return rc; } -static void done(struct ccdrbg_state *drbg) +static void +done(struct ccdrbg_state *drbg) { - struct ccdrbg_nisthmac_state *state=(struct ccdrbg_nisthmac_state *)drbg; - cc_clear(sizeof(struct ccdrbg_nisthmac_state), state); //clear v, key as well as internal variables + struct ccdrbg_nisthmac_state *state = (struct ccdrbg_nisthmac_state *)drbg; + cc_clear(sizeof(struct ccdrbg_nisthmac_state), state); //clear v, key as well as internal variables } struct ccdrbg_info ccdrbg_nisthmac_info = { - .size = sizeof(struct ccdrbg_nisthmac_state) + sizeof(struct ccdrbg_nisthmac_custom), - .init = init, - .reseed = reseed, - .generate = generate, - .done = done, - .custom = NULL + .size = sizeof(struct ccdrbg_nisthmac_state) + sizeof(struct ccdrbg_nisthmac_custom), + .init = init, + .reseed = reseed, + .generate = generate, + .done = done, + .custom = NULL }; /* This initializes an info object with the right options */ -void ccdrbg_factory_nisthmac(struct ccdrbg_info *info, const struct ccdrbg_nisthmac_custom *custom) +void +ccdrbg_factory_nisthmac(struct ccdrbg_info *info, const struct ccdrbg_nisthmac_custom *custom) { - info->size = sizeof(struct ccdrbg_nisthmac_state) + sizeof(struct ccdrbg_nisthmac_custom); - info->init = init; - info->generate = generate; - info->reseed = reseed; - info->done = done; - info->custom = custom; + info->size = sizeof(struct ccdrbg_nisthmac_state) + sizeof(struct ccdrbg_nisthmac_custom); + info->init = init; + info->generate = generate; + info->reseed = reseed; + info->done = done; + info->custom = custom; }; diff --git a/osfmk/corecrypto/ccdigest/src/ccdigest_init.c b/osfmk/corecrypto/ccdigest/src/ccdigest_init.c index 0ddab7476..9dc776366 100644 --- a/osfmk/corecrypto/ccdigest/src/ccdigest_init.c +++ b/osfmk/corecrypto/ccdigest/src/ccdigest_init.c @@ -35,8 +35,10 @@ #include #include -void ccdigest_init(const struct ccdigest_info *di, ccdigest_ctx_t ctx) { - ccdigest_copy_state(di, ccdigest_state_ccn(di, ctx), di->initial_state); - ccdigest_nbits(di, ctx) = 0; - ccdigest_num(di, ctx) = 0; +void +ccdigest_init(const struct ccdigest_info *di, ccdigest_ctx_t ctx) +{ + ccdigest_copy_state(di, ccdigest_state_ccn(di, ctx), di->initial_state); + ccdigest_nbits(di, ctx) = 0; + ccdigest_num(di, ctx) = 0; } diff --git a/osfmk/corecrypto/ccdigest/src/ccdigest_update.c b/osfmk/corecrypto/ccdigest/src/ccdigest_update.c index 089928f99..94b29a172 100644 --- a/osfmk/corecrypto/ccdigest/src/ccdigest_update.c +++ b/osfmk/corecrypto/ccdigest/src/ccdigest_update.c @@ -35,43 +35,46 @@ #include #include -void ccdigest_update(const struct ccdigest_info *di, ccdigest_ctx_t ctx, - size_t len, const void *data) { - const char * data_ptr = data; - size_t nblocks, nbytes; +void +ccdigest_update(const struct ccdigest_info *di, ccdigest_ctx_t ctx, + size_t len, const void *data) +{ + const char * data_ptr = data; + size_t nblocks, nbytes; - while (len > 0) { - if (ccdigest_num(di, ctx) == 0 && len > di->block_size) { - //low-end processors are slow on divison - if(di->block_size == 1<<6 ){ //sha256 - nblocks = len >> 6; - nbytes = len & 0xFFFFffC0; - }else if(di->block_size == 1<<7 ){ //sha512 - nblocks = len >> 7; - nbytes = len & 0xFFFFff80; - }else { - nblocks = len / di->block_size; - nbytes = nblocks * di->block_size; - } + while (len > 0) { + if (ccdigest_num(di, ctx) == 0 && len > di->block_size) { + //low-end processors are slow on divison + if (di->block_size == 1 << 6) { //sha256 + nblocks = len >> 6; + nbytes = len & 0xFFFFffC0; + } else if (di->block_size == 1 << 7) { //sha512 + nblocks = len >> 7; + nbytes = len & 0xFFFFff80; + } else { + nblocks = len / di->block_size; + nbytes = nblocks * di->block_size; + } - di->compress(ccdigest_state(di, ctx), nblocks, data_ptr); - len -= nbytes; - data_ptr += nbytes; - ccdigest_nbits(di, ctx) += nbytes * 8; - } else { - size_t n = di->block_size - ccdigest_num(di, ctx); - if (len < n) - n = len; - CC_MEMCPY(ccdigest_data(di, ctx) + ccdigest_num(di, ctx), data_ptr, n); - /* typecast: less than block size, will always fit into an int */ - ccdigest_num(di, ctx) += (unsigned int)n; - len -= n; - data_ptr += n; - if (ccdigest_num(di, ctx) == di->block_size) { - di->compress(ccdigest_state(di, ctx), 1, ccdigest_data(di, ctx)); - ccdigest_nbits(di, ctx) += ccdigest_num(di, ctx) * 8; - ccdigest_num(di, ctx) = 0; - } - } - } + di->compress(ccdigest_state(di, ctx), nblocks, data_ptr); + len -= nbytes; + data_ptr += nbytes; + ccdigest_nbits(di, ctx) += nbytes * 8; + } else { + size_t n = di->block_size - ccdigest_num(di, ctx); + if (len < n) { + n = len; + } + CC_MEMCPY(ccdigest_data(di, ctx) + ccdigest_num(di, ctx), data_ptr, n); + /* typecast: less than block size, will always fit into an int */ + ccdigest_num(di, ctx) += (unsigned int)n; + len -= n; + data_ptr += n; + if (ccdigest_num(di, ctx) == di->block_size) { + di->compress(ccdigest_state(di, ctx), 1, ccdigest_data(di, ctx)); + ccdigest_nbits(di, ctx) += ccdigest_num(di, ctx) * 8; + ccdigest_num(di, ctx) = 0; + } + } + } } diff --git a/osfmk/corecrypto/cchmac/src/cchmac.c b/osfmk/corecrypto/cchmac/src/cchmac.c index dbd6a3454..050f39838 100644 --- a/osfmk/corecrypto/cchmac/src/cchmac.c +++ b/osfmk/corecrypto/cchmac/src/cchmac.c @@ -36,14 +36,16 @@ #include "corecrypto/fipspost_trace.h" -void cchmac(const struct ccdigest_info *di, - size_t key_len, const void *key, - size_t data_len, const void *data, unsigned char *mac) { - FIPSPOST_TRACE_EVENT; +void +cchmac(const struct ccdigest_info *di, + size_t key_len, const void *key, + size_t data_len, const void *data, unsigned char *mac) +{ + FIPSPOST_TRACE_EVENT; - cchmac_di_decl(di, hc); - cchmac_init(di, hc, key_len, key); - cchmac_update(di, hc, data_len, data); - cchmac_final(di, hc, mac); + cchmac_di_decl(di, hc); + cchmac_init(di, hc, key_len, key); + cchmac_update(di, hc, data_len, data); + cchmac_final(di, hc, mac); cchmac_di_clear(di, hc); } diff --git a/osfmk/corecrypto/cchmac/src/cchmac_final.c b/osfmk/corecrypto/cchmac/src/cchmac_final.c index cbd7db453..dc72c7adb 100644 --- a/osfmk/corecrypto/cchmac/src/cchmac_final.c +++ b/osfmk/corecrypto/cchmac/src/cchmac_final.c @@ -36,18 +36,19 @@ #include #include -void cchmac_final(const struct ccdigest_info *di, cchmac_ctx_t hc, - unsigned char *mac) { +void +cchmac_final(const struct ccdigest_info *di, cchmac_ctx_t hc, + unsigned char *mac) +{ + // Finalize the inner state of the data being HMAC'd, i.e., H((key \oplus ipad) || m) + ccdigest_final(di, cchmac_digest_ctx(di, hc), cchmac_data(di, hc)); - // Finalize the inner state of the data being HMAC'd, i.e., H((key \oplus ipad) || m) - ccdigest_final(di, cchmac_digest_ctx(di, hc), cchmac_data(di, hc)); + // Set the HMAC output size based on the digest algorithm + cchmac_num(di, hc) = (unsigned int)di->output_size; /* typecast: output size will alwys fit in an unsigned int */ + cchmac_nbits(di, hc) = di->block_size * 8; - // Set the HMAC output size based on the digest algorithm - cchmac_num(di, hc) = (unsigned int)di->output_size; /* typecast: output size will alwys fit in an unsigned int */ - cchmac_nbits(di, hc) = di->block_size * 8; - - // Copy the pre-computed compress(key \oplus opad) back to digest state, - // and then run through the digest once more to finish the HMAC - ccdigest_copy_state(di, cchmac_istate32(di, hc), cchmac_ostate32(di, hc)); - ccdigest_final(di, cchmac_digest_ctx(di, hc), mac); + // Copy the pre-computed compress(key \oplus opad) back to digest state, + // and then run through the digest once more to finish the HMAC + ccdigest_copy_state(di, cchmac_istate32(di, hc), cchmac_ostate32(di, hc)); + ccdigest_final(di, cchmac_digest_ctx(di, hc), mac); } diff --git a/osfmk/corecrypto/cchmac/src/cchmac_init.c b/osfmk/corecrypto/cchmac/src/cchmac_init.c index e276fe1ba..4eba5b23a 100644 --- a/osfmk/corecrypto/cchmac/src/cchmac_init.c +++ b/osfmk/corecrypto/cchmac/src/cchmac_init.c @@ -37,48 +37,50 @@ #include /* The HMAC_ transform looks like: - (K XOR opad || (K XOR ipad || text)) - Where K is a n byte key - ipad is the byte 0x36 repeated 64 times. - opad is the byte 0x5c repeated 64 times. - text is the data being protected. + * (K XOR opad || (K XOR ipad || text)) + * Where K is a n byte key + * ipad is the byte 0x36 repeated 64 times. + * opad is the byte 0x5c repeated 64 times. + * text is the data being protected. */ -void cchmac_init(const struct ccdigest_info *di, cchmac_ctx_t hc, - size_t key_len, const void *key_data) { - const unsigned char *key = key_data; +void +cchmac_init(const struct ccdigest_info *di, cchmac_ctx_t hc, + size_t key_len, const void *key_data) +{ + const unsigned char *key = key_data; - /* Set cchmac_data(di, hc) to key ^ opad. */ - size_t byte = 0; + /* Set cchmac_data(di, hc) to key ^ opad. */ + size_t byte = 0; if (key_len <= di->block_size) { - for (;byte < key_len; ++byte) { - cchmac_data(di, hc)[byte] = key[byte] ^ 0x5c; - } - } else { - /* Key is longer than di->block size, reset it to key=digest(key) */ - ccdigest_init(di, cchmac_digest_ctx(di, hc)); - ccdigest_update(di, cchmac_digest_ctx(di, hc), key_len, key); - ccdigest_final(di, cchmac_digest_ctx(di, hc), cchmac_data(di, hc)); - key_len = di->output_size; - for (;byte < key_len; ++byte) { - cchmac_data(di, hc)[byte] ^= 0x5c; - } - } - /* Fill remainder of cchmac_data(di, hc) with opad. */ + for (; byte < key_len; ++byte) { + cchmac_data(di, hc)[byte] = key[byte] ^ 0x5c; + } + } else { + /* Key is longer than di->block size, reset it to key=digest(key) */ + ccdigest_init(di, cchmac_digest_ctx(di, hc)); + ccdigest_update(di, cchmac_digest_ctx(di, hc), key_len, key); + ccdigest_final(di, cchmac_digest_ctx(di, hc), cchmac_data(di, hc)); + key_len = di->output_size; + for (; byte < key_len; ++byte) { + cchmac_data(di, hc)[byte] ^= 0x5c; + } + } + /* Fill remainder of cchmac_data(di, hc) with opad. */ if (key_len < di->block_size) { CC_MEMSET(cchmac_data(di, hc) + key_len, 0x5c, di->block_size - key_len); } - /* Set cchmac_ostate32(di, hc) to the state of the first round of the - outer digest. */ - ccdigest_copy_state(di, cchmac_ostate32(di, hc), di->initial_state); - di->compress(cchmac_ostate(di, hc), 1, cchmac_data(di, hc)); + /* Set cchmac_ostate32(di, hc) to the state of the first round of the + * outer digest. */ + ccdigest_copy_state(di, cchmac_ostate32(di, hc), di->initial_state); + di->compress(cchmac_ostate(di, hc), 1, cchmac_data(di, hc)); - /* Set cchmac_data(di, hc) to key ^ ipad. */ - for (byte = 0; byte < di->block_size; ++byte) { - cchmac_data(di, hc)[byte] ^= (0x5c ^ 0x36); - } - ccdigest_copy_state(di, cchmac_istate32(di, hc), di->initial_state); - di->compress(cchmac_istate(di, hc), 1, cchmac_data(di, hc)); - cchmac_num(di, hc) = 0; - cchmac_nbits(di, hc) = di->block_size * 8; + /* Set cchmac_data(di, hc) to key ^ ipad. */ + for (byte = 0; byte < di->block_size; ++byte) { + cchmac_data(di, hc)[byte] ^= (0x5c ^ 0x36); + } + ccdigest_copy_state(di, cchmac_istate32(di, hc), di->initial_state); + di->compress(cchmac_istate(di, hc), 1, cchmac_data(di, hc)); + cchmac_num(di, hc) = 0; + cchmac_nbits(di, hc) = di->block_size * 8; } diff --git a/osfmk/corecrypto/cchmac/src/cchmac_update.c b/osfmk/corecrypto/cchmac/src/cchmac_update.c index a2c768896..649694c11 100644 --- a/osfmk/corecrypto/cchmac/src/cchmac_update.c +++ b/osfmk/corecrypto/cchmac/src/cchmac_update.c @@ -34,7 +34,9 @@ #include -void cchmac_update(const struct ccdigest_info *di, cchmac_ctx_t hc, - size_t data_len, const void *data) { - ccdigest_update(di, cchmac_digest_ctx(di, hc), data_len, data); +void +cchmac_update(const struct ccdigest_info *di, cchmac_ctx_t hc, + size_t data_len, const void *data) +{ + ccdigest_update(di, cchmac_digest_ctx(di, hc), data_len, data); } diff --git a/osfmk/corecrypto/ccn/src/ccn_set.c b/osfmk/corecrypto/ccn/src/ccn_set.c index e288733f6..4cd06a506 100644 --- a/osfmk/corecrypto/ccn/src/ccn_set.c +++ b/osfmk/corecrypto/ccn/src/ccn_set.c @@ -36,8 +36,9 @@ #include #if !CCN_SET_ASM -void ccn_set(cc_size n, cc_unit *r, const cc_unit *s) +void +ccn_set(cc_size n, cc_unit *r, const cc_unit *s) { - CC_MEMMOVE(r, s, ccn_sizeof_n(n)); + CC_MEMMOVE(r, s, ccn_sizeof_n(n)); } #endif diff --git a/osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c b/osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c index be6acaa1a..8b30793d5 100644 --- a/osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c +++ b/osfmk/corecrypto/ccsha1/src/ccdigest_final_64be.c @@ -37,31 +37,33 @@ #include "ccdigest_internal.h" /* This can be used for SHA1, SHA256 and SHA224 */ -void ccdigest_final_64be(const struct ccdigest_info *di, ccdigest_ctx_t ctx, - unsigned char *digest) { - ccdigest_nbits(di, ctx) += ccdigest_num(di, ctx) * 8; - ccdigest_data(di, ctx)[ccdigest_num(di, ctx)++] = 0x80; +void +ccdigest_final_64be(const struct ccdigest_info *di, ccdigest_ctx_t ctx, + unsigned char *digest) +{ + ccdigest_nbits(di, ctx) += ccdigest_num(di, ctx) * 8; + ccdigest_data(di, ctx)[ccdigest_num(di, ctx)++] = 0x80; - /* If we don't have at least 8 bytes (for the length) left we need to add - a second block. */ - if (ccdigest_num(di, ctx) > 64 - 8) { - while (ccdigest_num(di, ctx) < 64) { - ccdigest_data(di, ctx)[ccdigest_num(di, ctx)++] = 0; - } - di->compress(ccdigest_state(di, ctx), 1, ccdigest_data(di, ctx)); - ccdigest_num(di, ctx) = 0; - } + /* If we don't have at least 8 bytes (for the length) left we need to add + * a second block. */ + if (ccdigest_num(di, ctx) > 64 - 8) { + while (ccdigest_num(di, ctx) < 64) { + ccdigest_data(di, ctx)[ccdigest_num(di, ctx)++] = 0; + } + di->compress(ccdigest_state(di, ctx), 1, ccdigest_data(di, ctx)); + ccdigest_num(di, ctx) = 0; + } - /* pad upto block_size minus 8 with 0s */ - while (ccdigest_num(di, ctx) < 64 - 8) { - ccdigest_data(di, ctx)[ccdigest_num(di, ctx)++] = 0; - } + /* pad upto block_size minus 8 with 0s */ + while (ccdigest_num(di, ctx) < 64 - 8) { + ccdigest_data(di, ctx)[ccdigest_num(di, ctx)++] = 0; + } - CC_STORE64_BE(ccdigest_nbits(di, ctx), ccdigest_data(di, ctx) + 64 - 8); - di->compress(ccdigest_state(di, ctx), 1, ccdigest_data(di, ctx)); + CC_STORE64_BE(ccdigest_nbits(di, ctx), ccdigest_data(di, ctx) + 64 - 8); + di->compress(ccdigest_state(di, ctx), 1, ccdigest_data(di, ctx)); - /* copy output */ - for (unsigned int i = 0; i < di->output_size / 4; i++) { - CC_STORE32_BE(ccdigest_state_u32(di, ctx)[i], digest+(4*i)); - } + /* copy output */ + for (unsigned int i = 0; i < di->output_size / 4; i++) { + CC_STORE32_BE(ccdigest_state_u32(di, ctx)[i], digest + (4 * i)); + } } diff --git a/osfmk/corecrypto/ccsha1/src/ccdigest_internal.h b/osfmk/corecrypto/ccsha1/src/ccdigest_internal.h index bc3921e2e..59c6acc04 100644 --- a/osfmk/corecrypto/ccsha1/src/ccdigest_internal.h +++ b/osfmk/corecrypto/ccsha1/src/ccdigest_internal.h @@ -38,10 +38,10 @@ #include void ccdigest_final_common(const struct ccdigest_info *di, - ccdigest_ctx_t ctx, void *digest); + ccdigest_ctx_t ctx, void *digest); void ccdigest_final_64be(const struct ccdigest_info *di, ccdigest_ctx_t, - unsigned char *digest); + unsigned char *digest); void ccdigest_final_64le(const struct ccdigest_info *di, ccdigest_ctx_t, - unsigned char *digest); + unsigned char *digest); #endif /* _CORECRYPTO_CCDIGEST_INTERNAL_H_ */ diff --git a/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c b/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c index a28e38f9f..22941eb91 100644 --- a/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c +++ b/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c @@ -88,21 +88,21 @@ #ifndef SHA_LONG_LOG2 -#define SHA_LONG_LOG2 2 /* default to 32 bits */ +#define SHA_LONG_LOG2 2 /* default to 32 bits */ #endif #define ROTATE(b, n) CC_ROLc(b, n) -#define Xupdate(a,ix,ia,ib,ic,id) ( (a)=(ia^ib^ic^id), \ - ix=(a)=ROTATE((a),1) \ - ) +#define Xupdate(a, ix, ia, ib, ic, id) ( (a)=(ia^ib^ic^id),\ + ix=(a)=ROTATE((a),1) \ + ) #define MD32_REG_T uint32_t #define HOST_c2l(data, l) CC_LOAD32_BE(l, data); data+=4; -#define K_00_19 0x5a827999 +#define K_00_19 0x5a827999 #define K_20_39 0x6ed9eba1 #define K_40_59 0x8f1bbcdc #define K_60_79 0xca62c1d6 @@ -114,36 +114,36 @@ * I've just become aware of another tweak to be made, again from Wei Dai, * in F_40_59, (x&a)|(y&a) -> (x|y)&a */ -#define F_00_19(b,c,d) ((((c) ^ (d)) & (b)) ^ (d)) -#define F_20_39(b,c,d) ((b) ^ (c) ^ (d)) -#define F_40_59(b,c,d) (((b) & (c)) | (((b)|(c)) & (d))) -#define F_60_79(b,c,d) F_20_39(b,c,d) +#define F_00_19(b, c, d) ((((c) ^ (d)) & (b)) ^ (d)) +#define F_20_39(b, c, d) ((b) ^ (c) ^ (d)) +#define F_40_59(b, c, d) (((b) & (c)) | (((b)|(c)) & (d))) +#define F_60_79(b, c, d) F_20_39(b,c,d) -#define BODY_00_15(i,a,b,c,d,e,f,xi) \ +#define BODY_00_15(i, a, b, c, d, e, f, xi) \ (f)=xi+(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \ (b)=ROTATE((b),30); -#define BODY_16_19(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \ +#define BODY_16_19(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \ Xupdate(f,xi,xa,xb,xc,xd); \ (f)+=(e)+K_00_19+ROTATE((a),5)+F_00_19((b),(c),(d)); \ (b)=ROTATE((b),30); -#define BODY_20_31(i,a,b,c,d,e,f,xi,xa,xb,xc,xd) \ +#define BODY_20_31(i, a, b, c, d, e, f, xi, xa, xb, xc, xd) \ Xupdate(f,xi,xa,xb,xc,xd); \ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \ (b)=ROTATE((b),30); -#define BODY_32_39(i,a,b,c,d,e,f,xa,xb,xc,xd) \ +#define BODY_32_39(i, a, b, c, d, e, f, xa, xb, xc, xd) \ Xupdate(f,xa,xa,xb,xc,xd); \ (f)+=(e)+K_20_39+ROTATE((a),5)+F_20_39((b),(c),(d)); \ (b)=ROTATE((b),30); -#define BODY_40_59(i,a,b,c,d,e,f,xa,xb,xc,xd) \ +#define BODY_40_59(i, a, b, c, d, e, f, xa, xb, xc, xd) \ Xupdate(f,xa,xa,xb,xc,xd); \ (f)+=(e)+K_40_59+ROTATE((a),5)+F_40_59((b),(c),(d)); \ (b)=ROTATE((b),30); -#define BODY_60_79(i,a,b,c,d,e,f,xa,xb,xc,xd) \ +#define BODY_60_79(i, a, b, c, d, e, f, xa, xb, xc, xd) \ Xupdate(f,xa,xa,xb,xc,xd); \ (f)=xa+(e)+K_60_79+ROTATE((a),5)+F_60_79((b),(c),(d)); \ (b)=ROTATE((b),30); @@ -153,157 +153,157 @@ #endif #ifndef MD32_XARRAY - /* - * Originally X was an array. As it's automatic it's natural - * to expect RISC compiler to accomodate at least part of it in - * the register bank, isn't it? Unfortunately not all compilers - * "find" this expectation reasonable:-( On order to make such - * compilers generate better code I replace X[] with a bunch of - * X0, X1, etc. See the function body below... - * - */ -# define X(i) XX##i +/* + * Originally X was an array. As it's automatic it's natural + * to expect RISC compiler to accomodate at least part of it in + * the register bank, isn't it? Unfortunately not all compilers + * "find" this expectation reasonable:-( On order to make such + * compilers generate better code I replace X[] with a bunch of + * X0, X1, etc. See the function body below... + * + */ +# define X(i) XX##i #else - /* - * However! Some compilers (most notably HP C) get overwhelmed by - * that many local variables so that we have to have the way to - * fall down to the original behavior. - */ -# define X(i) XX[i] +/* + * However! Some compilers (most notably HP C) get overwhelmed by + * that many local variables so that we have to have the way to + * fall down to the original behavior. + */ +# define X(i) XX[i] #endif -static void sha1_compress(ccdigest_state_t s, size_t num, const void *buf) +static void +sha1_compress(ccdigest_state_t s, size_t num, const void *buf) { - const unsigned char *data=buf; - register uint32_t A,B,C,D,E,T,l; + const unsigned char *data = buf; + register uint32_t A, B, C, D, E, T, l; #ifndef MD32_XARRAY uint32_t XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7, - XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15; + XX8, XX9, XX10, XX11, XX12, XX13, XX14, XX15; #else uint32_t XX[16]; #endif - uint32_t *state=ccdigest_u32(s); - - A=state[0]; - B=state[1]; - C=state[2]; - D=state[3]; - E=state[4]; - - for (;;) - { - - HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l; - BODY_00_15( 0,A,B,C,D,E,T,X( 0)); HOST_c2l(data,l); X( 2)=l; - BODY_00_15( 1,T,A,B,C,D,E,X( 1)); HOST_c2l(data,l); X( 3)=l; - BODY_00_15( 2,E,T,A,B,C,D,X( 2)); HOST_c2l(data,l); X( 4)=l; - BODY_00_15( 3,D,E,T,A,B,C,X( 3)); HOST_c2l(data,l); X( 5)=l; - BODY_00_15( 4,C,D,E,T,A,B,X( 4)); HOST_c2l(data,l); X( 6)=l; - BODY_00_15( 5,B,C,D,E,T,A,X( 5)); HOST_c2l(data,l); X( 7)=l; - BODY_00_15( 6,A,B,C,D,E,T,X( 6)); HOST_c2l(data,l); X( 8)=l; - BODY_00_15( 7,T,A,B,C,D,E,X( 7)); HOST_c2l(data,l); X( 9)=l; - BODY_00_15( 8,E,T,A,B,C,D,X( 8)); HOST_c2l(data,l); X(10)=l; - BODY_00_15( 9,D,E,T,A,B,C,X( 9)); HOST_c2l(data,l); X(11)=l; - BODY_00_15(10,C,D,E,T,A,B,X(10)); HOST_c2l(data,l); X(12)=l; - BODY_00_15(11,B,C,D,E,T,A,X(11)); HOST_c2l(data,l); X(13)=l; - BODY_00_15(12,A,B,C,D,E,T,X(12)); HOST_c2l(data,l); X(14)=l; - BODY_00_15(13,T,A,B,C,D,E,X(13)); HOST_c2l(data,l); X(15)=l; - BODY_00_15(14,E,T,A,B,C,D,X(14)); - BODY_00_15(15,D,E,T,A,B,C,X(15)); - - BODY_16_19(16,C,D,E,T,A,B,X( 0),X( 0),X( 2),X( 8),X(13)); - BODY_16_19(17,B,C,D,E,T,A,X( 1),X( 1),X( 3),X( 9),X(14)); - BODY_16_19(18,A,B,C,D,E,T,X( 2),X( 2),X( 4),X(10),X(15)); - BODY_16_19(19,T,A,B,C,D,E,X( 3),X( 3),X( 5),X(11),X( 0)); - - BODY_20_31(20,E,T,A,B,C,D,X( 4),X( 4),X( 6),X(12),X( 1)); - BODY_20_31(21,D,E,T,A,B,C,X( 5),X( 5),X( 7),X(13),X( 2)); - BODY_20_31(22,C,D,E,T,A,B,X( 6),X( 6),X( 8),X(14),X( 3)); - BODY_20_31(23,B,C,D,E,T,A,X( 7),X( 7),X( 9),X(15),X( 4)); - BODY_20_31(24,A,B,C,D,E,T,X( 8),X( 8),X(10),X( 0),X( 5)); - BODY_20_31(25,T,A,B,C,D,E,X( 9),X( 9),X(11),X( 1),X( 6)); - BODY_20_31(26,E,T,A,B,C,D,X(10),X(10),X(12),X( 2),X( 7)); - BODY_20_31(27,D,E,T,A,B,C,X(11),X(11),X(13),X( 3),X( 8)); - BODY_20_31(28,C,D,E,T,A,B,X(12),X(12),X(14),X( 4),X( 9)); - BODY_20_31(29,B,C,D,E,T,A,X(13),X(13),X(15),X( 5),X(10)); - BODY_20_31(30,A,B,C,D,E,T,X(14),X(14),X( 0),X( 6),X(11)); - BODY_20_31(31,T,A,B,C,D,E,X(15),X(15),X( 1),X( 7),X(12)); - - BODY_32_39(32,E,T,A,B,C,D,X( 0),X( 2),X( 8),X(13)); - BODY_32_39(33,D,E,T,A,B,C,X( 1),X( 3),X( 9),X(14)); - BODY_32_39(34,C,D,E,T,A,B,X( 2),X( 4),X(10),X(15)); - BODY_32_39(35,B,C,D,E,T,A,X( 3),X( 5),X(11),X( 0)); - BODY_32_39(36,A,B,C,D,E,T,X( 4),X( 6),X(12),X( 1)); - BODY_32_39(37,T,A,B,C,D,E,X( 5),X( 7),X(13),X( 2)); - BODY_32_39(38,E,T,A,B,C,D,X( 6),X( 8),X(14),X( 3)); - BODY_32_39(39,D,E,T,A,B,C,X( 7),X( 9),X(15),X( 4)); - - BODY_40_59(40,C,D,E,T,A,B,X( 8),X(10),X( 0),X( 5)); - BODY_40_59(41,B,C,D,E,T,A,X( 9),X(11),X( 1),X( 6)); - BODY_40_59(42,A,B,C,D,E,T,X(10),X(12),X( 2),X( 7)); - BODY_40_59(43,T,A,B,C,D,E,X(11),X(13),X( 3),X( 8)); - BODY_40_59(44,E,T,A,B,C,D,X(12),X(14),X( 4),X( 9)); - BODY_40_59(45,D,E,T,A,B,C,X(13),X(15),X( 5),X(10)); - BODY_40_59(46,C,D,E,T,A,B,X(14),X( 0),X( 6),X(11)); - BODY_40_59(47,B,C,D,E,T,A,X(15),X( 1),X( 7),X(12)); - BODY_40_59(48,A,B,C,D,E,T,X( 0),X( 2),X( 8),X(13)); - BODY_40_59(49,T,A,B,C,D,E,X( 1),X( 3),X( 9),X(14)); - BODY_40_59(50,E,T,A,B,C,D,X( 2),X( 4),X(10),X(15)); - BODY_40_59(51,D,E,T,A,B,C,X( 3),X( 5),X(11),X( 0)); - BODY_40_59(52,C,D,E,T,A,B,X( 4),X( 6),X(12),X( 1)); - BODY_40_59(53,B,C,D,E,T,A,X( 5),X( 7),X(13),X( 2)); - BODY_40_59(54,A,B,C,D,E,T,X( 6),X( 8),X(14),X( 3)); - BODY_40_59(55,T,A,B,C,D,E,X( 7),X( 9),X(15),X( 4)); - BODY_40_59(56,E,T,A,B,C,D,X( 8),X(10),X( 0),X( 5)); - BODY_40_59(57,D,E,T,A,B,C,X( 9),X(11),X( 1),X( 6)); - BODY_40_59(58,C,D,E,T,A,B,X(10),X(12),X( 2),X( 7)); - BODY_40_59(59,B,C,D,E,T,A,X(11),X(13),X( 3),X( 8)); - - BODY_60_79(60,A,B,C,D,E,T,X(12),X(14),X( 4),X( 9)); - BODY_60_79(61,T,A,B,C,D,E,X(13),X(15),X( 5),X(10)); - BODY_60_79(62,E,T,A,B,C,D,X(14),X( 0),X( 6),X(11)); - BODY_60_79(63,D,E,T,A,B,C,X(15),X( 1),X( 7),X(12)); - BODY_60_79(64,C,D,E,T,A,B,X( 0),X( 2),X( 8),X(13)); - BODY_60_79(65,B,C,D,E,T,A,X( 1),X( 3),X( 9),X(14)); - BODY_60_79(66,A,B,C,D,E,T,X( 2),X( 4),X(10),X(15)); - BODY_60_79(67,T,A,B,C,D,E,X( 3),X( 5),X(11),X( 0)); - BODY_60_79(68,E,T,A,B,C,D,X( 4),X( 6),X(12),X( 1)); - BODY_60_79(69,D,E,T,A,B,C,X( 5),X( 7),X(13),X( 2)); - BODY_60_79(70,C,D,E,T,A,B,X( 6),X( 8),X(14),X( 3)); - BODY_60_79(71,B,C,D,E,T,A,X( 7),X( 9),X(15),X( 4)); - BODY_60_79(72,A,B,C,D,E,T,X( 8),X(10),X( 0),X( 5)); - BODY_60_79(73,T,A,B,C,D,E,X( 9),X(11),X( 1),X( 6)); - BODY_60_79(74,E,T,A,B,C,D,X(10),X(12),X( 2),X( 7)); - BODY_60_79(75,D,E,T,A,B,C,X(11),X(13),X( 3),X( 8)); - BODY_60_79(76,C,D,E,T,A,B,X(12),X(14),X( 4),X( 9)); - BODY_60_79(77,B,C,D,E,T,A,X(13),X(15),X( 5),X(10)); - BODY_60_79(78,A,B,C,D,E,T,X(14),X( 0),X( 6),X(11)); - BODY_60_79(79,T,A,B,C,D,E,X(15),X( 1),X( 7),X(12)); - - state[0]=(state[0]+E)&0xffffffff; - state[1]=(state[1]+T)&0xffffffff; - state[2]=(state[2]+A)&0xffffffff; - state[3]=(state[3]+B)&0xffffffff; - state[4]=(state[4]+C)&0xffffffff; - - if (--num <= 0) break; - - A=state[0]; - B=state[1]; - C=state[2]; - D=state[3]; - E=state[4]; - + uint32_t *state = ccdigest_u32(s); + + A = state[0]; + B = state[1]; + C = state[2]; + D = state[3]; + E = state[4]; + + for (;;) { + HOST_c2l(data, l); X( 0) = l; HOST_c2l(data, l); X( 1) = l; + BODY_00_15( 0, A, B, C, D, E, T, X( 0)); HOST_c2l(data, l); X( 2) = l; + BODY_00_15( 1, T, A, B, C, D, E, X( 1)); HOST_c2l(data, l); X( 3) = l; + BODY_00_15( 2, E, T, A, B, C, D, X( 2)); HOST_c2l(data, l); X( 4) = l; + BODY_00_15( 3, D, E, T, A, B, C, X( 3)); HOST_c2l(data, l); X( 5) = l; + BODY_00_15( 4, C, D, E, T, A, B, X( 4)); HOST_c2l(data, l); X( 6) = l; + BODY_00_15( 5, B, C, D, E, T, A, X( 5)); HOST_c2l(data, l); X( 7) = l; + BODY_00_15( 6, A, B, C, D, E, T, X( 6)); HOST_c2l(data, l); X( 8) = l; + BODY_00_15( 7, T, A, B, C, D, E, X( 7)); HOST_c2l(data, l); X( 9) = l; + BODY_00_15( 8, E, T, A, B, C, D, X( 8)); HOST_c2l(data, l); X(10) = l; + BODY_00_15( 9, D, E, T, A, B, C, X( 9)); HOST_c2l(data, l); X(11) = l; + BODY_00_15(10, C, D, E, T, A, B, X(10)); HOST_c2l(data, l); X(12) = l; + BODY_00_15(11, B, C, D, E, T, A, X(11)); HOST_c2l(data, l); X(13) = l; + BODY_00_15(12, A, B, C, D, E, T, X(12)); HOST_c2l(data, l); X(14) = l; + BODY_00_15(13, T, A, B, C, D, E, X(13)); HOST_c2l(data, l); X(15) = l; + BODY_00_15(14, E, T, A, B, C, D, X(14)); + BODY_00_15(15, D, E, T, A, B, C, X(15)); + + BODY_16_19(16, C, D, E, T, A, B, X( 0), X( 0), X( 2), X( 8), X(13)); + BODY_16_19(17, B, C, D, E, T, A, X( 1), X( 1), X( 3), X( 9), X(14)); + BODY_16_19(18, A, B, C, D, E, T, X( 2), X( 2), X( 4), X(10), X(15)); + BODY_16_19(19, T, A, B, C, D, E, X( 3), X( 3), X( 5), X(11), X( 0)); + + BODY_20_31(20, E, T, A, B, C, D, X( 4), X( 4), X( 6), X(12), X( 1)); + BODY_20_31(21, D, E, T, A, B, C, X( 5), X( 5), X( 7), X(13), X( 2)); + BODY_20_31(22, C, D, E, T, A, B, X( 6), X( 6), X( 8), X(14), X( 3)); + BODY_20_31(23, B, C, D, E, T, A, X( 7), X( 7), X( 9), X(15), X( 4)); + BODY_20_31(24, A, B, C, D, E, T, X( 8), X( 8), X(10), X( 0), X( 5)); + BODY_20_31(25, T, A, B, C, D, E, X( 9), X( 9), X(11), X( 1), X( 6)); + BODY_20_31(26, E, T, A, B, C, D, X(10), X(10), X(12), X( 2), X( 7)); + BODY_20_31(27, D, E, T, A, B, C, X(11), X(11), X(13), X( 3), X( 8)); + BODY_20_31(28, C, D, E, T, A, B, X(12), X(12), X(14), X( 4), X( 9)); + BODY_20_31(29, B, C, D, E, T, A, X(13), X(13), X(15), X( 5), X(10)); + BODY_20_31(30, A, B, C, D, E, T, X(14), X(14), X( 0), X( 6), X(11)); + BODY_20_31(31, T, A, B, C, D, E, X(15), X(15), X( 1), X( 7), X(12)); + + BODY_32_39(32, E, T, A, B, C, D, X( 0), X( 2), X( 8), X(13)); + BODY_32_39(33, D, E, T, A, B, C, X( 1), X( 3), X( 9), X(14)); + BODY_32_39(34, C, D, E, T, A, B, X( 2), X( 4), X(10), X(15)); + BODY_32_39(35, B, C, D, E, T, A, X( 3), X( 5), X(11), X( 0)); + BODY_32_39(36, A, B, C, D, E, T, X( 4), X( 6), X(12), X( 1)); + BODY_32_39(37, T, A, B, C, D, E, X( 5), X( 7), X(13), X( 2)); + BODY_32_39(38, E, T, A, B, C, D, X( 6), X( 8), X(14), X( 3)); + BODY_32_39(39, D, E, T, A, B, C, X( 7), X( 9), X(15), X( 4)); + + BODY_40_59(40, C, D, E, T, A, B, X( 8), X(10), X( 0), X( 5)); + BODY_40_59(41, B, C, D, E, T, A, X( 9), X(11), X( 1), X( 6)); + BODY_40_59(42, A, B, C, D, E, T, X(10), X(12), X( 2), X( 7)); + BODY_40_59(43, T, A, B, C, D, E, X(11), X(13), X( 3), X( 8)); + BODY_40_59(44, E, T, A, B, C, D, X(12), X(14), X( 4), X( 9)); + BODY_40_59(45, D, E, T, A, B, C, X(13), X(15), X( 5), X(10)); + BODY_40_59(46, C, D, E, T, A, B, X(14), X( 0), X( 6), X(11)); + BODY_40_59(47, B, C, D, E, T, A, X(15), X( 1), X( 7), X(12)); + BODY_40_59(48, A, B, C, D, E, T, X( 0), X( 2), X( 8), X(13)); + BODY_40_59(49, T, A, B, C, D, E, X( 1), X( 3), X( 9), X(14)); + BODY_40_59(50, E, T, A, B, C, D, X( 2), X( 4), X(10), X(15)); + BODY_40_59(51, D, E, T, A, B, C, X( 3), X( 5), X(11), X( 0)); + BODY_40_59(52, C, D, E, T, A, B, X( 4), X( 6), X(12), X( 1)); + BODY_40_59(53, B, C, D, E, T, A, X( 5), X( 7), X(13), X( 2)); + BODY_40_59(54, A, B, C, D, E, T, X( 6), X( 8), X(14), X( 3)); + BODY_40_59(55, T, A, B, C, D, E, X( 7), X( 9), X(15), X( 4)); + BODY_40_59(56, E, T, A, B, C, D, X( 8), X(10), X( 0), X( 5)); + BODY_40_59(57, D, E, T, A, B, C, X( 9), X(11), X( 1), X( 6)); + BODY_40_59(58, C, D, E, T, A, B, X(10), X(12), X( 2), X( 7)); + BODY_40_59(59, B, C, D, E, T, A, X(11), X(13), X( 3), X( 8)); + + BODY_60_79(60, A, B, C, D, E, T, X(12), X(14), X( 4), X( 9)); + BODY_60_79(61, T, A, B, C, D, E, X(13), X(15), X( 5), X(10)); + BODY_60_79(62, E, T, A, B, C, D, X(14), X( 0), X( 6), X(11)); + BODY_60_79(63, D, E, T, A, B, C, X(15), X( 1), X( 7), X(12)); + BODY_60_79(64, C, D, E, T, A, B, X( 0), X( 2), X( 8), X(13)); + BODY_60_79(65, B, C, D, E, T, A, X( 1), X( 3), X( 9), X(14)); + BODY_60_79(66, A, B, C, D, E, T, X( 2), X( 4), X(10), X(15)); + BODY_60_79(67, T, A, B, C, D, E, X( 3), X( 5), X(11), X( 0)); + BODY_60_79(68, E, T, A, B, C, D, X( 4), X( 6), X(12), X( 1)); + BODY_60_79(69, D, E, T, A, B, C, X( 5), X( 7), X(13), X( 2)); + BODY_60_79(70, C, D, E, T, A, B, X( 6), X( 8), X(14), X( 3)); + BODY_60_79(71, B, C, D, E, T, A, X( 7), X( 9), X(15), X( 4)); + BODY_60_79(72, A, B, C, D, E, T, X( 8), X(10), X( 0), X( 5)); + BODY_60_79(73, T, A, B, C, D, E, X( 9), X(11), X( 1), X( 6)); + BODY_60_79(74, E, T, A, B, C, D, X(10), X(12), X( 2), X( 7)); + BODY_60_79(75, D, E, T, A, B, C, X(11), X(13), X( 3), X( 8)); + BODY_60_79(76, C, D, E, T, A, B, X(12), X(14), X( 4), X( 9)); + BODY_60_79(77, B, C, D, E, T, A, X(13), X(15), X( 5), X(10)); + BODY_60_79(78, A, B, C, D, E, T, X(14), X( 0), X( 6), X(11)); + BODY_60_79(79, T, A, B, C, D, E, X(15), X( 1), X( 7), X(12)); + + state[0] = (state[0] + E) & 0xffffffff; + state[1] = (state[1] + T) & 0xffffffff; + state[2] = (state[2] + A) & 0xffffffff; + state[3] = (state[3] + B) & 0xffffffff; + state[4] = (state[4] + C) & 0xffffffff; + + if (--num <= 0) { + break; + } + + A = state[0]; + B = state[1]; + C = state[2]; + D = state[3]; + E = state[4]; } } const struct ccdigest_info ccsha1_eay_di = { - .output_size = CCSHA1_OUTPUT_SIZE, - .state_size = CCSHA1_STATE_SIZE, - .block_size = CCSHA1_BLOCK_SIZE, - .oid_size = ccoid_sha1_len, - .oid = CC_DIGEST_OID_SHA1, - .initial_state = ccsha1_initial_state, - .compress = sha1_compress, - .final = ccdigest_final_64be, + .output_size = CCSHA1_OUTPUT_SIZE, + .state_size = CCSHA1_STATE_SIZE, + .block_size = CCSHA1_BLOCK_SIZE, + .oid_size = ccoid_sha1_len, + .oid = CC_DIGEST_OID_SHA1, + .initial_state = ccsha1_initial_state, + .compress = sha1_compress, + .final = ccdigest_final_64be, }; diff --git a/osfmk/corecrypto/ccsha1/src/ccsha1_initial_state.c b/osfmk/corecrypto/ccsha1/src/ccsha1_initial_state.c index 63fb74fe1..f72ecfd9a 100644 --- a/osfmk/corecrypto/ccsha1/src/ccsha1_initial_state.c +++ b/osfmk/corecrypto/ccsha1/src/ccsha1_initial_state.c @@ -36,9 +36,9 @@ #include const uint32_t ccsha1_initial_state[5] = { - 0x67452301, - 0xefcdab89, - 0x98badcfe, - 0x10325476, - 0xc3d2e1f0 + 0x67452301, + 0xefcdab89, + 0x98badcfe, + 0x10325476, + 0xc3d2e1f0 }; diff --git a/osfmk/corecrypto/ccsha2/src/ccdigest_internal.h b/osfmk/corecrypto/ccsha2/src/ccdigest_internal.h index bc3921e2e..59c6acc04 100644 --- a/osfmk/corecrypto/ccsha2/src/ccdigest_internal.h +++ b/osfmk/corecrypto/ccsha2/src/ccdigest_internal.h @@ -38,10 +38,10 @@ #include void ccdigest_final_common(const struct ccdigest_info *di, - ccdigest_ctx_t ctx, void *digest); + ccdigest_ctx_t ctx, void *digest); void ccdigest_final_64be(const struct ccdigest_info *di, ccdigest_ctx_t, - unsigned char *digest); + unsigned char *digest); void ccdigest_final_64le(const struct ccdigest_info *di, ccdigest_ctx_t, - unsigned char *digest); + unsigned char *digest); #endif /* _CORECRYPTO_CCDIGEST_INTERNAL_H_ */ diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_K.c b/osfmk/corecrypto/ccsha2/src/ccsha256_K.c index ed300d1b6..ee04cd1e5 100644 --- a/osfmk/corecrypto/ccsha2/src/ccsha256_K.c +++ b/osfmk/corecrypto/ccsha2/src/ccsha256_K.c @@ -37,17 +37,17 @@ /* the K array */ const uint32_t ccsha256_K[64] CC_ALIGNED(16) = { - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, - 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, - 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, - 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, - 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, - 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, - 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, - 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, - 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, - 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, + 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, + 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, + 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, + 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, + 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, + 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, + 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, + 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, + 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_di.c b/osfmk/corecrypto/ccsha2/src/ccsha256_di.c index c702b9736..d31a9402c 100644 --- a/osfmk/corecrypto/ccsha2/src/ccsha256_di.c +++ b/osfmk/corecrypto/ccsha2/src/ccsha256_di.c @@ -38,26 +38,28 @@ #include "corecrypto/fipspost_trace.h" -const struct ccdigest_info *ccsha256_di(void) +const struct ccdigest_info * +ccsha256_di(void) { - FIPSPOST_TRACE_EVENT; + FIPSPOST_TRACE_EVENT; #if CCSHA2_VNG_INTEL #if defined (__x86_64__) - if (CC_HAS_AVX512_AND_IN_KERNEL()) - return &ccsha256_vng_intel_SupplementalSSE3_di; - else - return ( (CC_HAS_AVX2() ? &ccsha256_vng_intel_AVX2_di : - ( (CC_HAS_AVX1() ? &ccsha256_vng_intel_AVX1_di : - &ccsha256_vng_intel_SupplementalSSE3_di ) ) ) ) ; + if (CC_HAS_AVX512_AND_IN_KERNEL()) { + return &ccsha256_vng_intel_SupplementalSSE3_di; + } else { + return CC_HAS_AVX2() ? &ccsha256_vng_intel_AVX2_di : + ((CC_HAS_AVX1() ? &ccsha256_vng_intel_AVX1_di : + &ccsha256_vng_intel_SupplementalSSE3_di)); + } #else - return &ccsha256_vng_intel_SupplementalSSE3_di; + return &ccsha256_vng_intel_SupplementalSSE3_di; #endif #elif CCSHA2_VNG_ARMV7NEON - return &ccsha256_vng_armv7neon_di; + return &ccsha256_vng_armv7neon_di; #elif CCSHA256_ARMV6M_ASM - return &ccsha256_v6m_di; + return &ccsha256_v6m_di; #else - return &ccsha256_ltc_di; + return &ccsha256_ltc_di; #endif } diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c b/osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c index 591f1d999..2ede4284b 100644 --- a/osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c +++ b/osfmk/corecrypto/ccsha2/src/ccsha256_initial_state.c @@ -35,12 +35,12 @@ #include "ccsha2_internal.h" const uint32_t ccsha256_initial_state[8] = { - 0x6A09E667, - 0xBB67AE85, - 0x3C6EF372, - 0xA54FF53A, - 0x510E527F, - 0x9B05688C, - 0x1F83D9AB, - 0x5BE0CD19 + 0x6A09E667, + 0xBB67AE85, + 0x3C6EF372, + 0xA54FF53A, + 0x510E527F, + 0x9B05688C, + 0x1F83D9AB, + 0x5BE0CD19 }; diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c b/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c index fb301b446..6c84aa4d4 100644 --- a/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c +++ b/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_compress.c @@ -53,8 +53,8 @@ #if !CC_KERNEL || !CC_USE_ASM // Various logical functions -#define Ch(x,y,z) (z ^ (x & (y ^ z))) -#define Maj(x,y,z) (((x | y) & z) | (x & y)) +#define Ch(x, y, z) (z ^ (x & (y ^ z))) +#define Maj(x, y, z) (((x | y) & z) | (x & y)) #define S(x, n) ror((x),(n)) #define R(x, n) ((x)>>(n)) @@ -87,70 +87,70 @@ #endif // the round function -#define RND(a,b,c,d,e,f,g,h,i) \ +#define RND(a, b, c, d, e, f, g, h, i) \ t0 = h + Sigma1(e) + Ch(e, f, g) + ccsha256_K[i] + W[i]; \ t1 = Sigma0(a) + Maj(a, b, c); \ d += t0; \ h = t0 + t1; // compress 512-bits -void ccsha256_ltc_compress(ccdigest_state_t state, size_t nblocks, const void *in) +void +ccsha256_ltc_compress(ccdigest_state_t state, size_t nblocks, const void *in) { - uint32_t W[64], t0, t1; - uint32_t S0,S1,S2,S3,S4,S5,S6,S7; - int i; - uint32_t *s = ccdigest_u32(state); + uint32_t W[64], t0, t1; + uint32_t S0, S1, S2, S3, S4, S5, S6, S7; + int i; + uint32_t *s = ccdigest_u32(state); #if CC_HANDLE_UNALIGNED_DATA - const unsigned char *buf = in; + const unsigned char *buf = in; #else - const uint32_t *buf = in; + const uint32_t *buf = in; #endif - while(nblocks--) { - - // schedule W 0..15 - set_W(0); set_W(1); set_W(2); set_W(3); set_W(4); set_W(5); set_W(6); set_W(7); - set_W(8); set_W(9); set_W(10);set_W(11);set_W(12);set_W(13);set_W(14);set_W(15); - - // schedule W 16..63 - for (i = 16; i < 64; i++) { - W[i] = Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]; - } - - // copy state into S - S0= s[0]; - S1= s[1]; - S2= s[2]; - S3= s[3]; - S4= s[4]; - S5= s[5]; - S6= s[6]; - S7= s[7]; - - // Compress - for (i = 0; i < 64; i += 8) { - RND(S0,S1,S2,S3,S4,S5,S6,S7,i+0); - RND(S7,S0,S1,S2,S3,S4,S5,S6,i+1); - RND(S6,S7,S0,S1,S2,S3,S4,S5,i+2); - RND(S5,S6,S7,S0,S1,S2,S3,S4,i+3); - RND(S4,S5,S6,S7,S0,S1,S2,S3,i+4); - RND(S3,S4,S5,S6,S7,S0,S1,S2,i+5); - RND(S2,S3,S4,S5,S6,S7,S0,S1,i+6); - RND(S1,S2,S3,S4,S5,S6,S7,S0,i+7); - } - - // feedback - s[0] += S0; - s[1] += S1; - s[2] += S2; - s[3] += S3; - s[4] += S4; - s[5] += S5; - s[6] += S6; - s[7] += S7; - - buf+=CCSHA256_BLOCK_SIZE/sizeof(buf[0]); - } + while (nblocks--) { + // schedule W 0..15 + set_W(0); set_W(1); set_W(2); set_W(3); set_W(4); set_W(5); set_W(6); set_W(7); + set_W(8); set_W(9); set_W(10); set_W(11); set_W(12); set_W(13); set_W(14); set_W(15); + + // schedule W 16..63 + for (i = 16; i < 64; i++) { + W[i] = Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]; + } + + // copy state into S + S0 = s[0]; + S1 = s[1]; + S2 = s[2]; + S3 = s[3]; + S4 = s[4]; + S5 = s[5]; + S6 = s[6]; + S7 = s[7]; + + // Compress + for (i = 0; i < 64; i += 8) { + RND(S0, S1, S2, S3, S4, S5, S6, S7, i + 0); + RND(S7, S0, S1, S2, S3, S4, S5, S6, i + 1); + RND(S6, S7, S0, S1, S2, S3, S4, S5, i + 2); + RND(S5, S6, S7, S0, S1, S2, S3, S4, i + 3); + RND(S4, S5, S6, S7, S0, S1, S2, S3, i + 4); + RND(S3, S4, S5, S6, S7, S0, S1, S2, i + 5); + RND(S2, S3, S4, S5, S6, S7, S0, S1, i + 6); + RND(S1, S2, S3, S4, S5, S6, S7, S0, i + 7); + } + + // feedback + s[0] += S0; + s[1] += S1; + s[2] += S2; + s[3] += S3; + s[4] += S4; + s[5] += S5; + s[6] += S6; + s[7] += S7; + + buf += CCSHA256_BLOCK_SIZE / sizeof(buf[0]); + } } #endif diff --git a/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c b/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c index 7b9aef1ce..93a14e449 100644 --- a/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c +++ b/osfmk/corecrypto/ccsha2/src/ccsha256_ltc_di.c @@ -40,14 +40,14 @@ #if !CC_KERNEL || !CC_USE_ASM const struct ccdigest_info ccsha256_ltc_di = { - .output_size = CCSHA256_OUTPUT_SIZE, - .state_size = CCSHA256_STATE_SIZE, - .block_size = CCSHA256_BLOCK_SIZE, - .oid_size = ccoid_sha256_len, - .oid = CC_DIGEST_OID_SHA256, - .initial_state = ccsha256_initial_state, - .compress = ccsha256_ltc_compress, - .final = ccdigest_final_64be, + .output_size = CCSHA256_OUTPUT_SIZE, + .state_size = CCSHA256_STATE_SIZE, + .block_size = CCSHA256_BLOCK_SIZE, + .oid_size = ccoid_sha256_len, + .oid = CC_DIGEST_OID_SHA256, + .initial_state = ccsha256_initial_state, + .compress = ccsha256_ltc_compress, + .final = ccdigest_final_64be, }; #endif diff --git a/osfmk/corpses/corpse.c b/osfmk/corpses/corpse.c index 2941e2125..73448a1f5 100644 --- a/osfmk/corpses/corpse.c +++ b/osfmk/corpses/corpse.c @@ -30,17 +30,17 @@ /* * Corpses Overview * ================ - * + * * A corpse is a state of process that is past the point of its death. This means that process has * completed all its termination operations like releasing file descriptors, mach ports, sockets and * other constructs used to identify a process. For all the processes this mimics the behavior as if * the process has died and no longer available by any means. - * + * * Why do we need Corpses? * ----------------------- * For crash inspection we need to inspect the state and data that is associated with process so that * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash - * + * * Corpses functionality in kernel * =============================== * The corpse functionality is an extension of existing exception reporting mechanisms we have. The @@ -49,7 +49,7 @@ * notification the exception is not handled, then the process begins the death operations and during * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through * of events and data shuffling that happens when corpses is enabled. - * + * * * a process causes an exception during normal execution of threads. * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD * etc) side is passed through the exception_triage() function to follow the thread -> task -> host @@ -78,8 +78,8 @@ * inspection flag set are just bounced to another holding queue (crashed_threads_queue). * Only after the corpse notification these are pulled out from holding queue and enqueued * back to termination queue - * - * + * + * * Corpse info format * ================== * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag @@ -88,7 +88,7 @@ * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc * * mach side may append data regarding ledger usage, memory stats etc * See detailed info about the memory structure and format in kern_cdata.h documentation. - * + * * Configuring Corpses functionality * ================================= * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting @@ -98,7 +98,7 @@ * by system. * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more * data to be put in, then please re-tune this parameter. - * + * * Debugging/Visibility * ==================== * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads. @@ -106,13 +106,13 @@ * and holding queue (dumpcrashed_thread_queue). * * In case of corpse creation is disabled of ignored then the system log is updated with * printf data with reason. - * + * * Limitations of Corpses * ====================== * With holding off memory for inspection, it creates vm pressure which might not be desirable * on low memory devices. There are limits to max corpses being inspected at a time which is * marked by TOTAL_CORPSES_ALLOWED. - * + * */ @@ -158,19 +158,25 @@ int exc_via_corpse_forking = 1; /* bootarg to generate corpse for fatal high memory watermark violation */ int corpse_for_fatal_memkill = 1; -#ifdef __arm__ -static inline int IS_64BIT_PROCESS(__unused void *p) { return 0; } +#ifdef __arm__ +static inline int +IS_64BIT_PROCESS(__unused void *p) +{ + return 0; +} #else extern int IS_64BIT_PROCESS(void *); #endif /* __arm__ */ extern void gather_populate_corpse_crashinfo(void *p, task_t task, - mach_exception_data_type_t code, mach_exception_data_type_t subcode, - uint64_t *udata_buffer, int num_udata, void *reason); + mach_exception_data_type_t code, mach_exception_data_type_t subcode, + uint64_t *udata_buffer, int num_udata, void *reason); extern void *proc_find(int pid); extern int proc_rele(void *p); -void corpses_init(){ +void +corpses_init() +{ char temp_buf[20]; int exc_corpse_forking; int fatal_memkill; @@ -189,7 +195,8 @@ void corpses_init(){ * Routine: corpses_enabled * returns FALSE if not enabled */ -boolean_t corpses_enabled() +boolean_t +corpses_enabled() { return corpse_enabled_config; } @@ -229,8 +236,8 @@ task_crashinfo_get_ref(uint16_t kcd_u_flags) // this reloads the value in oldgate if (atomic_compare_exchange_strong_explicit(&inflight_corpses, - &oldgate.value, newgate.value, memory_order_relaxed, - memory_order_relaxed)) { + &oldgate.value, newgate.value, memory_order_relaxed, + memory_order_relaxed)) { return KERN_SUCCESS; } } @@ -260,8 +267,8 @@ task_crashinfo_release_ref(uint16_t kcd_u_flags) } // this reloads the value in oldgate if (atomic_compare_exchange_strong_explicit(&inflight_corpses, - &oldgate.value, newgate.value, memory_order_relaxed, - memory_order_relaxed)) { + &oldgate.value, newgate.value, memory_order_relaxed, + memory_order_relaxed)) { return KERN_SUCCESS; } } @@ -270,7 +277,7 @@ task_crashinfo_release_ref(uint16_t kcd_u_flags) kcdata_descriptor_t task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size, - uint32_t kc_u_flags, unsigned kc_flags) + uint32_t kc_u_flags, unsigned kc_flags) { kcdata_descriptor_t kcdata; @@ -281,7 +288,7 @@ task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size, } kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size, - kc_flags); + kc_flags); if (kcdata) { kcdata->kcd_user_flags = kc_u_flags; } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) { @@ -312,10 +319,11 @@ task_crashinfo_destroy(kcdata_descriptor_t data) * returns: crash info data attached to task. * NULL if task is null or has no corpse info */ -kcdata_descriptor_t task_get_corpseinfo(task_t task) +kcdata_descriptor_t +task_get_corpseinfo(task_t task) { kcdata_descriptor_t retval = NULL; - if (task != NULL){ + if (task != NULL) { retval = task->corpse_info; } return retval; @@ -363,17 +371,17 @@ task_purge_all_corpses(void) /* Iterate through all the corpse tasks and clear all map entries */ queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) { vm_map_remove(task->map, - task->map->min_offset, - task->map->max_offset, - /* - * Final cleanup: - * + no unnesting - * + remove immutable mappings - * + allow gaps in the range - */ - (VM_MAP_REMOVE_NO_UNNESTING | - VM_MAP_REMOVE_IMMUTABLE | - VM_MAP_REMOVE_GAPS_OK)); + task->map->min_offset, + task->map->max_offset, + /* + * Final cleanup: + * + no unnesting + * + remove immutable mappings + * + allow gaps in the range + */ + (VM_MAP_REMOVE_NO_UNNESTING | + VM_MAP_REMOVE_IMMUTABLE | + VM_MAP_REMOVE_GAPS_OK)); } lck_mtx_unlock(&tasks_corpse_lock); @@ -478,7 +486,7 @@ task_enqueue_exception_with_corpse( /* Generate a corpse for the given task, will return with a ref on corpse task */ kr = task_generate_corpse_internal(task, &new_task, &thread, - etype, code[0], code[1], reason); + etype, code[0], code[1], reason); if (kr == KERN_SUCCESS) { if (thread == THREAD_NULL) { return KERN_FAILURE; @@ -529,7 +537,7 @@ task_generate_corpse_internal( #if CONFIG_MACF struct label *label = NULL; #endif - + if (!corpses_enabled()) { return KERN_NOT_SUPPORTED; } @@ -553,10 +561,10 @@ task_generate_corpse_internal( is_64bit_addr = IS_64BIT_PROCESS(p); is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task); t_flags = TF_CORPSE_FORK | - TF_PENDING_CORPSE | - TF_CORPSE | - (is_64bit_addr ? TF_64B_ADDR : TF_NONE) | - (is_64bit_data ? TF_64B_DATA : TF_NONE); + TF_PENDING_CORPSE | + TF_CORPSE | + (is_64bit_addr ? TF_64B_ADDR : TF_NONE) | + (is_64bit_data ? TF_64B_DATA : TF_NONE); #if CONFIG_MACF /* Create the corpse label credentials from the process. */ @@ -565,29 +573,29 @@ task_generate_corpse_internal( /* Create a task for corpse */ kr = task_create_internal(task, - NULL, - TRUE, - is_64bit_addr, - is_64bit_data, - t_flags, - TPF_NONE, - &new_task); + NULL, + TRUE, + is_64bit_addr, + is_64bit_data, + t_flags, + TPF_NONE, + &new_task); if (kr != KERN_SUCCESS) { goto error_task_generate_corpse; } /* Create and copy threads from task, returns a ref to thread */ kr = task_duplicate_map_and_threads(task, p, new_task, &thread, - &udata_buffer, &size, &num_udata); + &udata_buffer, &size, &num_udata); if (kr != KERN_SUCCESS) { goto error_task_generate_corpse; } kr = task_collect_crash_info(new_task, #if CONFIG_MACF - label, + label, #endif - TRUE); + TRUE); if (kr != KERN_SUCCESS) { goto error_task_generate_corpse; } @@ -607,7 +615,7 @@ task_generate_corpse_internal( /* Populate the corpse blob, use the proc struct of task instead of corpse task */ gather_populate_corpse_crashinfo(p, new_task, - code, subcode, udata_buffer, num_udata, reason); + code, subcode, udata_buffer, num_udata, reason); /* Add it to global corpse task list */ task_add_to_corpse_task_list(new_task); @@ -621,7 +629,7 @@ error_task_generate_corpse: mac_exc_free_label(label); } #endif - + /* Release the proc reference */ if (p != NULL) { proc_rele(p); @@ -722,7 +730,7 @@ task_map_corpse_info_64( } corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info); kr = mach_vm_allocate_kernel(task->map, &crash_data_ptr, size, - VM_FLAGS_ANYWHERE, VM_MEMORY_CORPSEINFO); + VM_FLAGS_ANYWHERE, VM_MEMORY_CORPSEINFO); if (kr != KERN_SUCCESS) { return kr; } diff --git a/osfmk/corpses/task_corpse.h b/osfmk/corpses/task_corpse.h index 120aeb97c..a264a60a9 100644 --- a/osfmk/corpses/task_corpse.h +++ b/osfmk/corpses/task_corpse.h @@ -34,16 +34,16 @@ #include #include -typedef struct kcdata_item *task_crashinfo_item_t; +typedef struct kcdata_item *task_crashinfo_item_t; /* Deprecated: use the KCDATA_* macros for all future use */ -#define CRASHINFO_ITEM_TYPE(item) KCDATA_ITEM_TYPE(item) -#define CRASHINFO_ITEM_SIZE(item) KCDATA_ITEM_SIZE(item) -#define CRASHINFO_ITEM_DATA_PTR(item) KCDATA_ITEM_DATA_PTR(item) +#define CRASHINFO_ITEM_TYPE(item) KCDATA_ITEM_TYPE(item) +#define CRASHINFO_ITEM_SIZE(item) KCDATA_ITEM_SIZE(item) +#define CRASHINFO_ITEM_DATA_PTR(item) KCDATA_ITEM_DATA_PTR(item) #define CRASHINFO_ITEM_NEXT_HEADER(item) KCDATA_ITEM_NEXT_HEADER(item) -#define CRASHINFO_ITEM_FOREACH(head) KCDATA_ITEM_FOREACH(head) +#define CRASHINFO_ITEM_FOREACH(head) KCDATA_ITEM_FOREACH(head) #ifndef KERNEL @@ -69,8 +69,8 @@ extern kern_return_t task_deliver_crash_notification(task_t, thread_t, exception extern kcdata_descriptor_t task_get_corpseinfo(task_t task); extern kcdata_descriptor_t task_crashinfo_alloc_init( - mach_vm_address_t crash_data_p, - unsigned size, uint32_t kc_u_flags, unsigned kc_flags); + mach_vm_address_t crash_data_p, + unsigned size, uint32_t kc_u_flags, unsigned kc_flags); extern kern_return_t task_crashinfo_destroy(kcdata_descriptor_t data); extern void corpses_init(void); @@ -79,24 +79,24 @@ extern unsigned long total_corpses_count(void); extern boolean_t corpses_enabled(void); extern kern_return_t task_generate_corpse_internal( - task_t task, - task_t *corpse_task, - thread_t *thread, - exception_type_t etype, - mach_exception_data_type_t code, - mach_exception_data_type_t subcode, - void *reason); + task_t task, + task_t *corpse_task, + thread_t *thread, + exception_type_t etype, + mach_exception_data_type_t code, + mach_exception_data_type_t subcode, + void *reason); extern void task_clear_corpse(task_t task); extern kern_return_t task_duplicate_map_and_threads( - task_t task, - void *p, - task_t new_task, - thread_t *thread, - uint64_t **udata_buffer, - int *size, - int *num_udata); + task_t task, + void *p, + task_t new_task, + thread_t *thread, + uint64_t **udata_buffer, + int *size, + int *num_udata); extern kern_return_t task_enqueue_exception_with_corpse( task_t task, diff --git a/osfmk/default_pager/default_pager_types.h b/osfmk/default_pager/default_pager_types.h index 8ad6fedd4..3e8fd8924 100644 --- a/osfmk/default_pager/default_pager_types.h +++ b/osfmk/default_pager/default_pager_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ -#ifndef _MACH_DEFAULT_PAGER_TYPES_H_ +#ifndef _MACH_DEFAULT_PAGER_TYPES_H_ #define _MACH_DEFAULT_PAGER_TYPES_H_ #include @@ -41,16 +41,16 @@ #include #include -#define HI_WAT_ALERT 0x01 -#define LO_WAT_ALERT 0x02 -#define SWAP_ENCRYPT_ON 0x04 -#define SWAP_ENCRYPT_OFF 0x08 -#define SWAP_COMPACT_DISABLE 0x10 -#define SWAP_COMPACT_ENABLE 0x20 -#define PROC_RESUME 0x40 -#define SWAP_FILE_CREATION_ERROR 0x80 -#define USE_EMERGENCY_SWAP_FILE_FIRST 0x100 +#define HI_WAT_ALERT 0x01 +#define LO_WAT_ALERT 0x02 +#define SWAP_ENCRYPT_ON 0x04 +#define SWAP_ENCRYPT_OFF 0x08 +#define SWAP_COMPACT_DISABLE 0x10 +#define SWAP_COMPACT_ENABLE 0x20 +#define PROC_RESUME 0x40 +#define SWAP_FILE_CREATION_ERROR 0x80 +#define USE_EMERGENCY_SWAP_FILE_FIRST 0x100 #endif /* __APPLE_API_UNSTABLE */ -#endif /* _MACH_DEFAULT_PAGER_TYPES_H_ */ +#endif /* _MACH_DEFAULT_PAGER_TYPES_H_ */ diff --git a/osfmk/device/device_init.c b/osfmk/device/device_init.c index 8bf42dbfa..c0a92c6e7 100644 --- a/osfmk/device/device_init.c +++ b/osfmk/device/device_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,9 +57,9 @@ */ /* * Author: David B. Golub, Carnegie Mellon University - * Date: 8/89 + * Date: 8/89 * - * Initialize device service as part of kernel task. + * Initialize device service as part of kernel task. */ #include @@ -79,7 +79,7 @@ #include #include -ipc_port_t master_device_port; +ipc_port_t master_device_port; void *master_device_kobject; lck_grp_attr_t * dev_lck_grp_attr; @@ -91,16 +91,17 @@ void device_service_create(void) { master_device_port = ipc_port_alloc_kernel(); - if (master_device_port == IP_NULL) - panic("can't allocate master device port"); + if (master_device_port == IP_NULL) { + panic("can't allocate master device port"); + } ipc_kobject_set(master_device_port, (ipc_kobject_t)&master_device_kobject, IKOT_MASTER_DEVICE); kernel_set_special_port(host_priv_self(), HOST_IO_MASTER_PORT, - ipc_port_make_send(master_device_port)); + ipc_port_make_send(master_device_port)); /* allocate device lock group attribute and group */ - dev_lck_grp_attr= lck_grp_attr_alloc_init(); - dev_lck_grp = lck_grp_alloc_init("device", dev_lck_grp_attr); + dev_lck_grp_attr = lck_grp_attr_alloc_init(); + dev_lck_grp = lck_grp_alloc_init("device", dev_lck_grp_attr); /* Allocate device lock attribute */ dev_lck_attr = lck_attr_alloc_init(); diff --git a/osfmk/device/device_port.h b/osfmk/device/device_port.h index a2445bab1..625eabe0a 100644 --- a/osfmk/device/device_port.h +++ b/osfmk/device/device_port.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,25 +57,25 @@ */ /* * Author: David B. Golub, Carnegie Mellon University - * Date: 8/89 + * Date: 8/89 */ -#ifndef _DEVICE_DEVICE_PORT_H_ -#define _DEVICE_DEVICE_PORT_H_ +#ifndef _DEVICE_DEVICE_PORT_H_ +#define _DEVICE_DEVICE_PORT_H_ #include /* * Master privileged I/O object for this host */ -extern mach_port_t master_device_port; +extern mach_port_t master_device_port; -#define DEVICE_PAGER_GUARDED 0x1 -#define DEVICE_PAGER_COHERENT 0x2 -#define DEVICE_PAGER_CACHE_INHIB 0x4 -#define DEVICE_PAGER_WRITE_THROUGH 0x8 -#define DEVICE_PAGER_EARLY_ACK 0x20 -#define DEVICE_PAGER_CONTIGUOUS 0x100 -#define DEVICE_PAGER_NOPHYSCACHE 0x200 +#define DEVICE_PAGER_GUARDED 0x1 +#define DEVICE_PAGER_COHERENT 0x2 +#define DEVICE_PAGER_CACHE_INHIB 0x4 +#define DEVICE_PAGER_WRITE_THROUGH 0x8 +#define DEVICE_PAGER_EARLY_ACK 0x20 +#define DEVICE_PAGER_CONTIGUOUS 0x100 +#define DEVICE_PAGER_NOPHYSCACHE 0x200 -#endif /* _DEVICE_DEVICE_PORT_H_ */ +#endif /* _DEVICE_DEVICE_PORT_H_ */ diff --git a/osfmk/device/device_types.h b/osfmk/device/device_types.h index fb2562055..28a649ce8 100644 --- a/osfmk/device/device_types.h +++ b/osfmk/device/device_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,11 +57,11 @@ */ /* * Author: David B. Golub, Carnegie Mellon University - * Date: 3/89 + * Date: 3/89 */ -#ifndef DEVICE_TYPES_H -#define DEVICE_TYPES_H +#ifndef DEVICE_TYPES_H +#define DEVICE_TYPES_H /* * Types for device interface. @@ -72,14 +72,14 @@ #include #if PRIVATE -#define IOKIT_SERVER_VERSION 20150715 +#define IOKIT_SERVER_VERSION 20150715 #endif /* * IO buffer - out-of-line array of characters. */ -typedef char * io_buf_ptr_t; +typedef char * io_buf_ptr_t; /* * Some types for IOKit. @@ -88,33 +88,33 @@ typedef char * io_buf_ptr_t; #ifdef IOKIT /* must match device_types.defs */ -typedef char io_name_t[128]; -typedef char io_string_t[512]; -typedef char io_string_inband_t[4096]; -typedef char io_struct_inband_t[4096]; +typedef char io_name_t[128]; +typedef char io_string_t[512]; +typedef char io_string_inband_t[4096]; +typedef char io_struct_inband_t[4096]; #if KERNEL -typedef uint64_t io_user_scalar_t; -typedef uint64_t io_user_reference_t; -typedef int io_scalar_inband_t[16]; +typedef uint64_t io_user_scalar_t; +typedef uint64_t io_user_reference_t; +typedef int io_scalar_inband_t[16]; // must be the same type as OSAsyncReference -typedef natural_t io_async_ref_t[8]; -typedef io_user_scalar_t io_scalar_inband64_t[16]; -typedef io_user_reference_t io_async_ref64_t[8]; +typedef natural_t io_async_ref_t[8]; +typedef io_user_scalar_t io_scalar_inband64_t[16]; +typedef io_user_reference_t io_async_ref64_t[8]; #elif __LP64__ -typedef uint64_t io_user_scalar_t; -typedef uint64_t io_user_reference_t; -typedef io_user_scalar_t io_scalar_inband_t[16]; -typedef io_user_reference_t io_async_ref_t[8]; -typedef io_user_scalar_t io_scalar_inband64_t[16]; -typedef io_user_reference_t io_async_ref64_t[8]; +typedef uint64_t io_user_scalar_t; +typedef uint64_t io_user_reference_t; +typedef io_user_scalar_t io_scalar_inband_t[16]; +typedef io_user_reference_t io_async_ref_t[8]; +typedef io_user_scalar_t io_scalar_inband64_t[16]; +typedef io_user_reference_t io_async_ref64_t[8]; #else -typedef int io_user_scalar_t; -typedef natural_t io_user_reference_t; -typedef io_user_scalar_t io_scalar_inband_t[16]; -typedef io_user_reference_t io_async_ref_t[8]; -typedef uint64_t io_scalar_inband64_t[16]; -typedef uint64_t io_async_ref64_t[8]; +typedef int io_user_scalar_t; +typedef natural_t io_user_reference_t; +typedef io_user_scalar_t io_scalar_inband_t[16]; +typedef io_user_reference_t io_async_ref_t[8]; +typedef uint64_t io_scalar_inband64_t[16]; +typedef uint64_t io_async_ref64_t[8]; #endif // __LP64__ #ifdef MACH_KERNEL_PRIVATE @@ -122,8 +122,8 @@ typedef uint64_t io_async_ref64_t[8]; typedef struct IOObject * io_object_t; typedef io_object_t io_connect_t; -extern void iokit_remove_reference( io_object_t obj ); -extern void iokit_remove_connect_reference( io_object_t obj ); +extern void iokit_remove_reference( io_object_t obj ); +extern void iokit_remove_connect_reference( io_object_t obj ); extern io_object_t iokit_lookup_object_port( ipc_port_t port ); extern io_connect_t iokit_lookup_connect_port( ipc_port_t port ); @@ -135,14 +135,13 @@ extern boolean_t iokit_notify( mach_msg_header_t *msg ); #else -#ifndef __IOKIT_PORTS_DEFINED__ +#ifndef __IOKIT_PORTS_DEFINED__ #define __IOKIT_PORTS_DEFINED__ -typedef mach_port_t io_object_t; -#endif /* __IOKIT_PORTS_DEFINED__ */ +typedef mach_port_t io_object_t; +#endif /* __IOKIT_PORTS_DEFINED__ */ #endif /* MACH_KERNEL */ #endif /* IOKIT */ -#endif /* DEVICE_TYPES_H */ - +#endif /* DEVICE_TYPES_H */ diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c index 75650da7b..487eee336 100644 --- a/osfmk/device/iokit_rpc.c +++ b/osfmk/device/iokit_rpc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -35,7 +35,7 @@ //#include #include -#include /* spl definitions */ +#include /* spl definitions */ #include #include @@ -78,36 +78,37 @@ static io_object_t iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type) { - io_object_t obj; + io_object_t obj; - if (!IP_VALID(port)) - return (NULL); + if (!IP_VALID(port)) { + return NULL; + } iokit_lock_port(port); if (ip_active(port) && (ip_kotype(port) == type)) { - obj = (io_object_t) port->ip_kobject; - iokit_add_reference( obj, type ); + obj = (io_object_t) port->ip_kobject; + iokit_add_reference( obj, type ); + } else { + obj = NULL; } - else - obj = NULL; iokit_unlock_port(port); - return( obj ); + return obj; } MIGEXTERN io_object_t iokit_lookup_object_port( - ipc_port_t port) + ipc_port_t port) { - return (iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT)); + return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT); } MIGEXTERN io_object_t iokit_lookup_connect_port( - ipc_port_t port) + ipc_port_t port) { - return (iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT)); + return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT); } static io_object_t @@ -144,31 +145,31 @@ iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_t EXTERN io_object_t iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task) { - return (iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space)); + return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space); } EXTERN io_object_t iokit_lookup_connect_ref_current_task(mach_port_name_t name) { - return (iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space())); + return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space()); } EXTERN void iokit_retain_port( ipc_port_t port ) { - ipc_port_reference( port ); + ipc_port_reference( port ); } EXTERN void iokit_release_port( ipc_port_t port ) { - ipc_port_release( port ); + ipc_port_release( port ); } EXTERN void iokit_release_port_send( ipc_port_t port ) { - ipc_port_release_send( port ); + ipc_port_release_send( port ); } extern lck_mtx_t iokit_obj_to_port_binding_lock; @@ -176,13 +177,13 @@ extern lck_mtx_t iokit_obj_to_port_binding_lock; EXTERN void iokit_lock_port( __unused ipc_port_t port ) { - lck_mtx_lock(&iokit_obj_to_port_binding_lock); + lck_mtx_lock(&iokit_obj_to_port_binding_lock); } EXTERN void iokit_unlock_port( __unused ipc_port_t port ) { - lck_mtx_unlock(&iokit_obj_to_port_binding_lock); + lck_mtx_unlock(&iokit_obj_to_port_binding_lock); } /* @@ -193,36 +194,38 @@ iokit_unlock_port( __unused ipc_port_t port ) static ipc_port_t iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type) { - ipc_port_t port; - ipc_port_t sendPort; + ipc_port_t port; + ipc_port_t sendPort; - if( obj == NULL) - return IP_NULL; + if (obj == NULL) { + return IP_NULL; + } - port = iokit_port_for_object( obj, type ); - if( port) { - sendPort = ipc_port_make_send( port); - iokit_release_port( port ); - } else - sendPort = IP_NULL; + port = iokit_port_for_object( obj, type ); + if (port) { + sendPort = ipc_port_make_send( port); + iokit_release_port( port ); + } else { + sendPort = IP_NULL; + } - iokit_remove_reference( obj ); + iokit_remove_reference( obj ); - return( sendPort); + return sendPort; } MIGEXTERN ipc_port_t iokit_make_object_port( - io_object_t obj ) + io_object_t obj ) { - return (iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT)); + return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT); } MIGEXTERN ipc_port_t iokit_make_connect_port( - io_object_t obj ) + io_object_t obj ) { - return (iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT)); + return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT); } int gIOKitPortCount; @@ -230,149 +233,150 @@ int gIOKitPortCount; EXTERN ipc_port_t iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ) { - ipc_port_t notify; - ipc_port_t port; - - do { - - /* Allocate port, keeping a reference for it. */ - port = ipc_port_alloc_kernel(); - if( port == IP_NULL) - continue; - - /* set kobject & type */ - ipc_kobject_set( port, (ipc_kobject_t) obj, type); + ipc_port_t notify; + ipc_port_t port; + + do { + /* Allocate port, keeping a reference for it. */ + port = ipc_port_alloc_kernel(); + if (port == IP_NULL) { + continue; + } - /* Request no-senders notifications on the port. */ - ip_lock( port); - notify = ipc_port_make_sonce_locked( port); - ipc_port_nsrequest( port, 1, notify, ¬ify); - /* port unlocked */ - assert( notify == IP_NULL); - gIOKitPortCount++; + /* set kobject & type */ + ipc_kobject_set( port, (ipc_kobject_t) obj, type); - } while( FALSE); + /* Request no-senders notifications on the port. */ + ip_lock( port); + notify = ipc_port_make_sonce_locked( port); + ipc_port_nsrequest( port, 1, notify, ¬ify); + /* port unlocked */ + assert( notify == IP_NULL); + gIOKitPortCount++; + } while (FALSE); - return( port ); + return port; } EXTERN kern_return_t iokit_destroy_object_port( ipc_port_t port ) { - - iokit_lock_port(port); - ipc_kobject_set( port, IKO_NULL, IKOT_NONE); + iokit_lock_port(port); + ipc_kobject_set( port, IKO_NULL, IKOT_NONE); // iokit_remove_reference( obj ); - iokit_unlock_port(port); - ipc_port_dealloc_kernel( port); - gIOKitPortCount--; + iokit_unlock_port(port); + ipc_port_dealloc_kernel( port); + gIOKitPortCount--; - return( KERN_SUCCESS); + return KERN_SUCCESS; } EXTERN kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ) { - iokit_lock_port(port); - ipc_kobject_set( port, (ipc_kobject_t) obj, type); - iokit_unlock_port(port); + iokit_lock_port(port); + ipc_kobject_set( port, (ipc_kobject_t) obj, type); + iokit_unlock_port(port); - return( KERN_SUCCESS); + return KERN_SUCCESS; } EXTERN mach_port_name_t iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) { - ipc_port_t port; - ipc_port_t sendPort; - mach_port_name_t name = 0; - - if( obj == NULL) - return MACH_PORT_NULL; - - port = iokit_port_for_object( obj, type ); - if( port) { - sendPort = ipc_port_make_send( port); - iokit_release_port( port ); - } else - sendPort = IP_NULL; - - if (IP_VALID( sendPort )) { - kern_return_t kr; - kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort, - MACH_MSG_TYPE_PORT_SEND, TRUE, &name); - if ( kr != KERN_SUCCESS) { - ipc_port_release_send( sendPort ); - name = MACH_PORT_NULL; + ipc_port_t port; + ipc_port_t sendPort; + mach_port_name_t name = 0; + + if (obj == NULL) { + return MACH_PORT_NULL; } - } else if ( sendPort == IP_NULL) - name = MACH_PORT_NULL; - else if ( sendPort == IP_DEAD) - name = MACH_PORT_DEAD; - return( name ); + port = iokit_port_for_object( obj, type ); + if (port) { + sendPort = ipc_port_make_send( port); + iokit_release_port( port ); + } else { + sendPort = IP_NULL; + } + + if (IP_VALID( sendPort )) { + kern_return_t kr; + kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort, + MACH_MSG_TYPE_PORT_SEND, TRUE, &name); + if (kr != KERN_SUCCESS) { + ipc_port_release_send( sendPort ); + name = MACH_PORT_NULL; + } + } else if (sendPort == IP_NULL) { + name = MACH_PORT_NULL; + } else if (sendPort == IP_DEAD) { + name = MACH_PORT_DEAD; + } + + return name; } EXTERN kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ) { - return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta )); + return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ); } /* * Handle the No-More_Senders notification generated from a device port destroy. * Since there are no longer any tasks which hold a send right to this device - * port a NMS notification has been generated. + * port a NMS notification has been generated. */ static void iokit_no_senders( mach_no_senders_notification_t * notification ) { - ipc_port_t port; - io_object_t obj = NULL; - ipc_kobject_type_t type = IKOT_NONE; - ipc_port_t notify; - - port = (ipc_port_t) notification->not_header.msgh_remote_port; - - // convert a port to io_object_t. - if( IP_VALID(port)) { - iokit_lock_port(port); - if( ip_active(port)) { - obj = (io_object_t) port->ip_kobject; - type = ip_kotype( port ); - if( (IKOT_IOKIT_OBJECT == type) - || (IKOT_IOKIT_CONNECT == type) - || (IKOT_IOKIT_IDENT == type)) - iokit_add_reference( obj, IKOT_IOKIT_OBJECT ); - else - obj = NULL; - } - iokit_unlock_port(port); + ipc_port_t port; + io_object_t obj = NULL; + ipc_kobject_type_t type = IKOT_NONE; + ipc_port_t notify; - if( obj ) { + port = (ipc_port_t) notification->not_header.msgh_remote_port; - mach_port_mscount_t mscount = notification->not_count; - - if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) - { - /* Re-request no-senders notifications on the port (if still active) */ - ip_lock(port); + // convert a port to io_object_t. + if (IP_VALID(port)) { + iokit_lock_port(port); if (ip_active(port)) { - notify = ipc_port_make_sonce_locked(port); - ipc_port_nsrequest( port, mscount + 1, notify, ¬ify); - /* port unlocked */ - if ( notify != IP_NULL) - ipc_port_release_sonce(notify); - } else { - ip_unlock(port); + obj = (io_object_t) port->ip_kobject; + type = ip_kotype( port ); + if ((IKOT_IOKIT_OBJECT == type) + || (IKOT_IOKIT_CONNECT == type) + || (IKOT_IOKIT_IDENT == type)) { + iokit_add_reference( obj, IKOT_IOKIT_OBJECT ); + } else { + obj = NULL; + } } - } - iokit_remove_reference( obj ); - } - } + iokit_unlock_port(port); + + if (obj) { + mach_port_mscount_t mscount = notification->not_count; + + if (KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) { + /* Re-request no-senders notifications on the port (if still active) */ + ip_lock(port); + if (ip_active(port)) { + notify = ipc_port_make_sonce_locked(port); + ipc_port_nsrequest( port, mscount + 1, notify, ¬ify); + /* port unlocked */ + if (notify != IP_NULL) { + ipc_port_release_sonce(notify); + } + } else { + ip_unlock(port); + } + } + iokit_remove_reference( obj ); + } + } } @@ -380,167 +384,170 @@ EXTERN boolean_t iokit_notify( mach_msg_header_t * msg ) { - switch (msg->msgh_id) { - case MACH_NOTIFY_NO_SENDERS: - iokit_no_senders((mach_no_senders_notification_t *) msg); - return TRUE; - - case MACH_NOTIFY_PORT_DELETED: - case MACH_NOTIFY_PORT_DESTROYED: - case MACH_NOTIFY_SEND_ONCE: - case MACH_NOTIFY_DEAD_NAME: - default: - printf("iokit_notify: strange notification %d\n", msg->msgh_id); - return FALSE; - } + switch (msg->msgh_id) { + case MACH_NOTIFY_NO_SENDERS: + iokit_no_senders((mach_no_senders_notification_t *) msg); + return TRUE; + + case MACH_NOTIFY_PORT_DELETED: + case MACH_NOTIFY_PORT_DESTROYED: + case MACH_NOTIFY_SEND_ONCE: + case MACH_NOTIFY_DEAD_NAME: + default: + printf("iokit_notify: strange notification %d\n", msg->msgh_id); + return FALSE; + } } /* need to create a pmap function to generalize */ -unsigned int IODefaultCacheBits(addr64_t pa) +unsigned int +IODefaultCacheBits(addr64_t pa) { - return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT))); + return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)); } -kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, - mach_vm_size_t length, unsigned int options) +kern_return_t +IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, + mach_vm_size_t length, unsigned int options) { - vm_prot_t prot; - unsigned int flags; - ppnum_t pagenum; - pmap_t pmap = map->pmap; + vm_prot_t prot; + unsigned int flags; + ppnum_t pagenum; + pmap_t pmap = map->pmap; - prot = (options & kIOMapReadOnly) - ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); + prot = (options & kIOMapReadOnly) + ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE); - pagenum = (ppnum_t)atop_64(pa); - - switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ + pagenum = (ppnum_t)atop_64(pa); + switch (options & kIOMapCacheMask) { /* What cache mode do we need? */ case kIOMapDefaultCache: default: - flags = IODefaultCacheBits(pa); - break; + flags = IODefaultCacheBits(pa); + break; case kIOMapInhibitCache: - flags = VM_WIMG_IO; - break; + flags = VM_WIMG_IO; + break; case kIOMapWriteThruCache: - flags = VM_WIMG_WTHRU; - break; + flags = VM_WIMG_WTHRU; + break; case kIOMapWriteCombineCache: - flags = VM_WIMG_WCOMB; - break; + flags = VM_WIMG_WCOMB; + break; case kIOMapCopybackCache: - flags = VM_WIMG_COPYBACK; - break; + flags = VM_WIMG_COPYBACK; + break; case kIOMapCopybackInnerCache: - flags = VM_WIMG_INNERWBACK; - break; + flags = VM_WIMG_INNERWBACK; + break; case kIOMapPostedWrite: - flags = VM_WIMG_POSTED; - break; - } + flags = VM_WIMG_POSTED; + break; + } - pmap_set_cache_attributes(pagenum, flags); + pmap_set_cache_attributes(pagenum, flags); - vm_map_set_cache_attr(map, (vm_map_offset_t)va); + vm_map_set_cache_attr(map, (vm_map_offset_t)va); - // Set up a block mapped area - return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0); + // Set up a block mapped area + return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0); } -kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length) +kern_return_t +IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length) { - pmap_t pmap = map->pmap; + pmap_t pmap = map->pmap; - pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length)); + pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length)); - return( KERN_SUCCESS ); + return KERN_SUCCESS; } -kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va, - mach_vm_size_t __unused length, unsigned int __unused options) +kern_return_t +IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va, + mach_vm_size_t __unused length, unsigned int __unused options) { - mach_vm_size_t off; - vm_prot_t prot; - unsigned int flags; - pmap_t pmap = map->pmap; - pmap_flush_context pmap_flush_context_storage; - boolean_t delayed_pmap_flush = FALSE; - - prot = (options & kIOMapReadOnly) - ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); - - switch (options & kIOMapCacheMask) - { + mach_vm_size_t off; + vm_prot_t prot; + unsigned int flags; + pmap_t pmap = map->pmap; + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; + + prot = (options & kIOMapReadOnly) + ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE); + + switch (options & kIOMapCacheMask) { // what cache mode do we need? case kIOMapDefaultCache: default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; case kIOMapInhibitCache: - flags = VM_WIMG_IO; - break; + flags = VM_WIMG_IO; + break; case kIOMapWriteThruCache: - flags = VM_WIMG_WTHRU; - break; + flags = VM_WIMG_WTHRU; + break; case kIOMapWriteCombineCache: - flags = VM_WIMG_WCOMB; - break; + flags = VM_WIMG_WCOMB; + break; case kIOMapCopybackCache: - flags = VM_WIMG_COPYBACK; - break; + flags = VM_WIMG_COPYBACK; + break; case kIOMapCopybackInnerCache: - flags = VM_WIMG_INNERWBACK; - break; + flags = VM_WIMG_INNERWBACK; + break; case kIOMapPostedWrite: - flags = VM_WIMG_POSTED; - break; - } - - pmap_flush_context_init(&pmap_flush_context_storage); - delayed_pmap_flush = FALSE; - - // enter each page's physical address in the target map - for (off = 0; off < length; off += page_size) - { - ppnum_t ppnum = pmap_find_phys(pmap, va + off); - if (ppnum) { - pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE, - PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); - delayed_pmap_flush = TRUE; + flags = VM_WIMG_POSTED; + break; } - } - if (delayed_pmap_flush == TRUE) - pmap_flush(&pmap_flush_context_storage); - return (KERN_SUCCESS); + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + + // enter each page's physical address in the target map + for (off = 0; off < length; off += page_size) { + ppnum_t ppnum = pmap_find_phys(pmap, va + off); + if (ppnum) { + pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE, + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + delayed_pmap_flush = TRUE; + } + } + if (delayed_pmap_flush == TRUE) { + pmap_flush(&pmap_flush_context_storage); + } + + return KERN_SUCCESS; } -ppnum_t IOGetLastPageNumber(void) +ppnum_t +IOGetLastPageNumber(void) { #if __i386__ || __x86_64__ - ppnum_t lastPage, highest = 0; + ppnum_t lastPage, highest = 0; unsigned int idx; - for (idx = 0; idx < pmap_memory_region_count; idx++) - { + for (idx = 0; idx < pmap_memory_region_count; idx++) { lastPage = pmap_memory_regions[idx].end - 1; - if (lastPage > highest) + if (lastPage > highest) { highest = lastPage; + } } - return (highest); + return highest; #elif __arm__ || __arm64__ return 0; #else @@ -550,7 +557,8 @@ ppnum_t IOGetLastPageNumber(void) void IOGetTime( mach_timespec_t * clock_time); -void IOGetTime( mach_timespec_t * clock_time) +void +IOGetTime( mach_timespec_t * clock_time) { clock_sec_t sec; clock_nsec_t nsec; @@ -558,4 +566,3 @@ void IOGetTime( mach_timespec_t * clock_time) clock_time->tv_sec = (typeof(clock_time->tv_sec))sec; clock_time->tv_nsec = nsec; } - diff --git a/osfmk/device/subrs.c b/osfmk/device/subrs.c index 711ded8cf..56b25cb7d 100644 --- a/osfmk/device/subrs.c +++ b/osfmk/device/subrs.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,34 +30,34 @@ */ /* *(C)UNIX System Laboratories, Inc. all or some portions of this file are - *derived from material licensed to the University of California by - *American Telephone and Telegraph Co. or UNIX System Laboratories, - *Inc. and are reproduced herein with the permission of UNIX System - *Laboratories, Inc. + * derived from material licensed to the University of California by + * American Telephone and Telegraph Co. or UNIX System Laboratories, + * Inc. and are reproduced herein with the permission of UNIX System + * Laboratories, Inc. */ -/* +/* * Mach Operating System * Copyright (c) 1993,1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -139,7 +139,7 @@ #include /* String routines, from CMU */ -#ifdef strcpy +#ifdef strcpy #undef strcmp #undef strncmp #undef strcpy @@ -171,21 +171,22 @@ int strcmp( - const char *s1, - const char *s2) + const char *s1, + const char *s2) { - unsigned int a, b; - - do { - a = *s1++; - b = *s2++; - if (a != b) - return a-b; /* includes case when - 'a' is zero and 'b' is not zero - or vice versa */ + unsigned int a, b; + + do { + a = *s1++; + b = *s2++; + if (a != b) { + return a - b; /* includes case when + * 'a' is zero and 'b' is not zero + * or vice versa */ + } } while (a != '\0'); - return 0; /* both are zero */ + return 0; /* both are zero */ } /* @@ -200,25 +201,27 @@ strcmp( // ARM64 implementation in ../arm64/strncmp.s int strncmp( - const char *s1, - const char *s2, - size_t n) + const char *s1, + const char *s2, + size_t n) { - unsigned int a, b; - - while (n != 0) { - a = *s1++; - b = *s2++; - if (a != b) - return a-b; /* includes case when - 'a' is zero and 'b' is not zero - or vice versa */ - if (a == '\0') - return 0; /* both are zero */ - n--; + unsigned int a, b; + + while (n != 0) { + a = *s1++; + b = *s2++; + if (a != b) { + return a - b; /* includes case when + * 'a' is zero and 'b' is not zero + * or vice versa */ + } + if (a == '\0') { + return 0; /* both are zero */ + } + n--; } - return 0; + return 0; } #endif // #ifndef __arm__ @@ -229,39 +232,44 @@ strncmp( static int tolower(unsigned char ch) { - if (ch >= 'A' && ch <= 'Z') - ch = 'a' + (ch - 'A'); + if (ch >= 'A' && ch <= 'Z') { + ch = 'a' + (ch - 'A'); + } - return ch; + return ch; } int strcasecmp(const char *s1, const char *s2) { - const unsigned char *us1 = (const u_char *)s1, - *us2 = (const u_char *)s2; + const unsigned char *us1 = (const u_char *)s1, + *us2 = (const u_char *)s2; - while (tolower(*us1) == tolower(*us2++)) - if (*us1++ == '\0') - return (0); - return (tolower(*us1) - tolower(*--us2)); + while (tolower(*us1) == tolower(*us2++)) { + if (*us1++ == '\0') { + return 0; + } + } + return tolower(*us1) - tolower(*--us2); } int strncasecmp(const char *s1, const char *s2, size_t n) { - if (n != 0) { - const unsigned char *us1 = (const u_char *)s1, - *us2 = (const u_char *)s2; + if (n != 0) { + const unsigned char *us1 = (const u_char *)s1, + *us2 = (const u_char *)s2; - do { - if (tolower(*us1) != tolower(*us2++)) - return (tolower(*us1) - tolower(*--us2)); - if (*us1++ == '\0') - break; - } while (--n != 0); - } - return (0); + do { + if (tolower(*us1) != tolower(*us2++)) { + return tolower(*us1) - tolower(*--us2); + } + if (*us1++ == '\0') { + break; + } + } while (--n != 0); + } + return 0; } char * @@ -303,21 +311,22 @@ strrchr(const char *s, int c) * strcpy copies the contents of the string "from" including * the null terminator to the string "to". A pointer to "to" * is returned. - * Deprecation Warning: + * Deprecation Warning: * strcpy() is being deprecated. Please use strlcpy() instead. */ #if !CONFIG_EMBEDDED char * strcpy( - char *to, - const char *from) + char *to, + const char *from) { - char *ret = to; + char *ret = to; - while ((*to++ = *from++) != '\0') - continue; + while ((*to++ = *from++) != '\0') { + continue; + } - return ret; + return ret; } #endif @@ -335,20 +344,23 @@ strcpy( #undef strncpy char * strncpy( - char *s1, + char *s1, const char *s2, size_t n) { - char *os1 = s1; - unsigned long i; - - for (i = 0; i < n;) - if ((*s1++ = *s2++) == '\0') - for (i++; i < n; i++) - *s1++ = '\0'; - else - i++; - return (os1); + char *os1 = s1; + unsigned long i; + + for (i = 0; i < n;) { + if ((*s1++ = *s2++) == '\0') { + for (i++; i < n; i++) { + *s1++ = '\0'; + } + } else { + i++; + } + } + return os1; } #endif // #ifndef __arm__ @@ -364,12 +376,13 @@ strncpy( int atoi(const char *cp) { - int number; + int number; - for (number = 0; ('0' <= *cp) && (*cp <= '9'); cp++) - number = (number * 10) + (*cp - '0'); + for (number = 0; ('0' <= *cp) && (*cp <= '9'); cp++) { + number = (number * 10) + (*cp - '0'); + } - return( number ); + return number; } /* @@ -386,43 +399,45 @@ atoi(const char *cp) int atoi_term( - char *p, /* IN */ - char **t) /* OUT */ + char *p, /* IN */ + char **t) /* OUT */ { - int n; - int f; - - n = 0; - f = 0; - for(;;p++) { - switch(*p) { - case ' ': - case '\t': - continue; - case '-': - f++; - case '+': - p++; - } - break; - } - while(*p >= '0' && *p <= '9') - n = n*10 + *p++ - '0'; - - /* return pointer to terminating character */ - if ( t ) - *t = p; - - return(f? -n: n); + int n; + int f; + + n = 0; + f = 0; + for (;; p++) { + switch (*p) { + case ' ': + case '\t': + continue; + case '-': + f++; + case '+': + p++; + } + break; + } + while (*p >= '0' && *p <= '9') { + n = n * 10 + *p++ - '0'; + } + + /* return pointer to terminating character */ + if (t) { + *t = p; + } + + return f? -n: n; } /* * Does the same thing as strlen, except only looks up - * to max chars inside the buffer. - * Taken from archive/kern-stuff/sbf_machine.c in - * seatbelt. + * to max chars inside the buffer. + * Taken from archive/kern-stuff/sbf_machine.c in + * seatbelt. * inputs: - * s string whose length is to be measured + * s string whose length is to be measured * max maximum length of string to search for null * outputs: * length of s or max; whichever is smaller @@ -433,10 +448,12 @@ atoi_term( // ARM64 implementation in ../arm64/strnlen.s #undef strnlen size_t -strnlen(const char *s, size_t max) { +strnlen(const char *s, size_t max) +{ const char *es = s + max, *p = s; - while(*p && p != es) + while (*p && p != es) { p++; + } return p - s; } @@ -454,32 +471,31 @@ strnlen(const char *s, size_t max) { char * itoa( - int num, - char *str) + int num, + char *str) { - char digits[11]; - char *dp; - char *cp = str; - - if (num == 0) { - *cp++ = '0'; - } - else { - dp = digits; - while (num) { - *dp++ = '0' + num % 10; - num /= 10; - } - while (dp != digits) { - *cp++ = *--dp; - } - } - *cp++ = '\0'; + char digits[11]; + char *dp; + char *cp = str; + + if (num == 0) { + *cp++ = '0'; + } else { + dp = digits; + while (num) { + *dp++ = '0' + num % 10; + num /= 10; + } + while (dp != digits) { + *cp++ = *--dp; + } + } + *cp++ = '\0'; return str; } -/* +/* * Deprecation Warning: * strcat() is being deprecated. Please use strlcat() instead. */ @@ -491,11 +507,13 @@ strcat( { char *old = dest; - while (*dest) + while (*dest) { ++dest; - while ((*dest++ = *src++)) + } + while ((*dest++ = *src++)) { ; - return (old); + } + return old; } #endif @@ -516,13 +534,15 @@ strlcat(char *dst, const char *src, size_t siz) size_t dlen; /* Find the end of dst and adjust bytes left but don't go past end */ - while (n-- != 0 && *d != '\0') + while (n-- != 0 && *d != '\0') { d++; + } dlen = d - dst; n = siz - dlen; - if (n == 0) - return(dlen + strlen(s)); + if (n == 0) { + return dlen + strlen(s); + } while (*s != '\0') { if (n != 1) { *d++ = *s; @@ -532,7 +552,7 @@ strlcat(char *dst, const char *src, size_t siz) } *d = '\0'; - return(dlen + (s - src)); /* count does not include NUL */ + return dlen + (s - src); /* count does not include NUL */ } /* @@ -554,20 +574,23 @@ strlcpy(char *dst, const char *src, size_t siz) /* Copy as many bytes as will fit */ if (n != 0 && --n != 0) { do { - if ((*d++ = *s++) == 0) + if ((*d++ = *s++) == 0) { break; + } } while (--n != 0); } /* Not enough room in dst, add NUL and traverse rest of src */ if (n == 0) { - if (siz != 0) - *d = '\0'; /* NUL-terminate dst */ - while (*s++) + if (siz != 0) { + *d = '\0'; /* NUL-terminate dst */ + } + while (*s++) { ; + } } - return(s - src - 1); /* count does not include NUL */ + return s - src - 1; /* count does not include NUL */ } #endif @@ -581,7 +604,7 @@ strlcpy(char *dst, const char *src, size_t siz) * * Parameters: string String to be duplicated * type type of memory to be allocated (normally - * M_TEMP) + * M_TEMP) * * Returns: char * A pointer to the newly allocated string with * duplicated contents in it. @@ -602,14 +625,15 @@ char * STRDUP(const char *string, int type) { size_t len; - char *copy; + char *copy; len = strlen(string) + 1; MALLOC(copy, char *, len, type, M_WAITOK); - if (copy == NULL) - return (NULL); + if (copy == NULL) { + return NULL; + } bcopy(string, copy, len); - return (copy); + return copy; } /* @@ -621,31 +645,34 @@ strprefix(const char *s1, const char *s2) int c; while ((c = *s2++) != '\0') { - if (c != *s1++) - return (0); + if (c != *s1++) { + return 0; + } } - return (1); + return 1; } char * strnstr(char *s, const char *find, size_t slen) { - char c, sc; - size_t len; - - if ((c = *find++) != '\0') { - len = strlen(find); - do { - do { - if ((sc = *s++) == '\0' || slen-- < 1) - return (NULL); - } while (sc != c); - if (len > slen) - return (NULL); - } while (strncmp(s, find, len) != 0); - s--; - } - return (s); + char c, sc; + size_t len; + + if ((c = *find++) != '\0') { + len = strlen(find); + do { + do { + if ((sc = *s++) == '\0' || slen-- < 1) { + return NULL; + } + } while (sc != c); + if (len > slen) { + return NULL; + } + } while (strncmp(s, find, len) != 0); + s--; + } + return s; } void * __memcpy_chk(void *dst, void const *src, size_t s, size_t chk_size); @@ -653,93 +680,98 @@ void * __memmove_chk(void *dst, void const *src, size_t s, size_t chk_size); void * __memset_chk(void *dst, int c, size_t s, size_t chk_size); size_t __strlcpy_chk(char *dst, char const *src, size_t s, size_t chk_size); size_t __strlcat_chk(char *dst, char const *src, size_t s, size_t chk_size); -char * __strncpy_chk (char *restrict dst, char *restrict src, size_t len, size_t chk_size); -char * __strncat_chk (char *restrict dst, const char *restrict src, size_t len, size_t chk_size); +char * __strncpy_chk(char *restrict dst, char *restrict src, size_t len, size_t chk_size); +char * __strncat_chk(char *restrict dst, const char *restrict src, size_t len, size_t chk_size); char * __strcpy_chk(char *restrict dst, const char *restrict src, size_t chk_size); -char * __strcat_chk (char *restrict dst, const char *restrict src, size_t chk_size); +char * __strcat_chk(char *restrict dst, const char *restrict src, size_t chk_size); void * __memcpy_chk(void *dst, void const *src, size_t s, size_t chk_size) { - if (__improbable(chk_size < s)) - panic("__memcpy_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); - return memcpy(dst, src, s); + if (__improbable(chk_size < s)) { + panic("__memcpy_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); + } + return memcpy(dst, src, s); } void * __memmove_chk(void *dst, void const *src, size_t s, size_t chk_size) { - if (__improbable(chk_size < s)) - panic("__memmove_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); - return memmove(dst, src, s); + if (__improbable(chk_size < s)) { + panic("__memmove_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); + } + return memmove(dst, src, s); } void * __memset_chk(void *dst, int c, size_t s, size_t chk_size) { - if (__improbable(chk_size < s)) - panic("__memset_chk object size check failed: dst %p, c %c, (%zu < %zu)", dst, c, chk_size, s); - return memset(dst, c, s); + if (__improbable(chk_size < s)) { + panic("__memset_chk object size check failed: dst %p, c %c, (%zu < %zu)", dst, c, chk_size, s); + } + return memset(dst, c, s); } size_t __strlcat_chk(char *dst, char const *src, size_t s, size_t chk_size) { - if (__improbable(chk_size < s)) - panic("__strlcat_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); - return strlcat(dst, src, s); + if (__improbable(chk_size < s)) { + panic("__strlcat_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); + } + return strlcat(dst, src, s); } size_t __strlcpy_chk(char *dst, char const *src, size_t s, size_t chk_size) { - if (__improbable(chk_size < s)) - panic("__strlcpy_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); - return strlcpy(dst, src, s); + if (__improbable(chk_size < s)) { + panic("__strlcpy_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, s); + } + return strlcpy(dst, src, s); } char * -__strncpy_chk (char *restrict dst, char *restrict src, - size_t len, size_t chk_size) +__strncpy_chk(char *restrict dst, char *restrict src, + size_t len, size_t chk_size) { - if (__improbable(chk_size < len)) { - panic("__strncpy_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, len); - } - return strncpy(dst, src, len); + if (__improbable(chk_size < len)) { + panic("__strncpy_chk object size check failed: dst %p, src %p, (%zu < %zu)", dst, src, chk_size, len); + } + return strncpy(dst, src, len); } char * -__strncat_chk (char *restrict dst, const char *restrict src, - size_t len, size_t chk_size) +__strncat_chk(char *restrict dst, const char *restrict src, + size_t len, size_t chk_size) { - size_t len1 = strlen(dst); - size_t len2 = strnlen(src, len); - if (__improbable (chk_size < len1 + len2 + 1)) { - panic("__strncat_chk object size check failed: dst %p, src %p, (%zu < %zu + %zu + 1)", dst, src, chk_size, len1, len2); - } - return strncat(dst, src, len); + size_t len1 = strlen(dst); + size_t len2 = strnlen(src, len); + if (__improbable(chk_size < len1 + len2 + 1)) { + panic("__strncat_chk object size check failed: dst %p, src %p, (%zu < %zu + %zu + 1)", dst, src, chk_size, len1, len2); + } + return strncat(dst, src, len); } char * -__strcpy_chk (char *restrict dst, const char *restrict src, size_t chk_size) +__strcpy_chk(char *restrict dst, const char *restrict src, size_t chk_size) { - size_t len = strlen(src); - if (__improbable (chk_size < len + 1)) { - panic("__strcpy_chk object size check failed: dst %p, src %p, (%zu < %zu + 1)", dst, src, chk_size, len); - } - memcpy(dst, src, len+1); - return dst; + size_t len = strlen(src); + if (__improbable(chk_size < len + 1)) { + panic("__strcpy_chk object size check failed: dst %p, src %p, (%zu < %zu + 1)", dst, src, chk_size, len); + } + memcpy(dst, src, len + 1); + return dst; } char * -__strcat_chk (char *restrict dst, const char *restrict src, size_t chk_size) +__strcat_chk(char *restrict dst, const char *restrict src, size_t chk_size) { - size_t len1 = strlen(dst); - size_t len2 = strlen(src); - size_t required_len = len1 + len2 + 1; - if (__improbable (chk_size < required_len)) { - panic("__strcat_chk object size check failed: dst %p, src %p, (%zu < %zu + %zu + 1)", dst, src, chk_size, len1, len2); - } - memcpy(dst + len1, src, len2 + 1); - return dst; + size_t len1 = strlen(dst); + size_t len2 = strlen(src); + size_t required_len = len1 + len2 + 1; + if (__improbable(chk_size < required_len)) { + panic("__strcat_chk object size check failed: dst %p, src %p, (%zu < %zu + %zu + 1)", dst, src, chk_size, len1, len2); + } + memcpy(dst + len1, src, len2 + 1); + return dst; } diff --git a/osfmk/gssd/gssd_mach_types.h b/osfmk/gssd/gssd_mach_types.h index e1ba9a829..d6468d645 100644 --- a/osfmk/gssd/gssd_mach_types.h +++ b/osfmk/gssd/gssd_mach_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2006, 2008, 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,14 +42,14 @@ typedef enum gssd_mechtype { typedef enum gssd_nametype { GSSD_STRING_NAME = 0, - GSSD_EXPORT, + GSSD_EXPORT, GSSD_ANONYMOUS, GSSD_HOSTBASED, GSSD_USER, GSSD_MACHINE_UID, GSSD_STRING_UID, GSSD_KRB5_PRINCIPAL, - GSSD_KRB5_REFERRAL, + GSSD_KRB5_REFERRAL, GSSD_NTLM_PRINCIPAL, GSSD_NTLM_BLOB, GSSD_UUID @@ -64,24 +64,24 @@ typedef uint64_t gssd_cred; typedef int32_t *gssd_etype_list; /* The following need to correspond to GSS_C_*_FLAG in gssapi.h */ -#define GSSD_DELEG_FLAG 1 -#define GSSD_MUTUAL_FLAG 2 -#define GSSD_REPLAY_FLAG 4 -#define GSSD_SEQUENCE_FLAG 8 -#define GSSD_CONF_FLAG 16 -#define GSSD_INTEG_FLAG 32 -#define GSSD_ANON_FLAG 64 -#define GSSD_PROT_FLAG 128 -#define GSSD_TRANS_FLAG 256 -#define GSSD_DELEG_POLICY_FLAG 32768 +#define GSSD_DELEG_FLAG 1 +#define GSSD_MUTUAL_FLAG 2 +#define GSSD_REPLAY_FLAG 4 +#define GSSD_SEQUENCE_FLAG 8 +#define GSSD_CONF_FLAG 16 +#define GSSD_INTEG_FLAG 32 +#define GSSD_ANON_FLAG 64 +#define GSSD_PROT_FLAG 128 +#define GSSD_TRANS_FLAG 256 +#define GSSD_DELEG_POLICY_FLAG 32768 -#define GSSD_NO_DEFAULT 1 // Only use the supplied principal, do not fallback to the default. -#define GSSD_NO_CANON 2 // Don't canononicalize host names -#define GSSD_HOME_ACCESS_OK 4 // OK to access home directory -#define GSSD_GUEST_ONLY 8 // NTLM Server is forcing guest access -#define GSSD_RESTART 16 // Destroy the supplied context and start over -#define GSSD_NFS_1DES 64 // Only get single DES session keys -#define GSSD_WIN2K_HACK 128 // Hack for Win2K -#define GSSD_LUCID_CONTEXT 256 // Export Lucid context +#define GSSD_NO_DEFAULT 1 // Only use the supplied principal, do not fallback to the default. +#define GSSD_NO_CANON 2 // Don't canononicalize host names +#define GSSD_HOME_ACCESS_OK 4 // OK to access home directory +#define GSSD_GUEST_ONLY 8 // NTLM Server is forcing guest access +#define GSSD_RESTART 16 // Destroy the supplied context and start over +#define GSSD_NFS_1DES 64 // Only get single DES session keys +#define GSSD_WIN2K_HACK 128 // Hack for Win2K +#define GSSD_LUCID_CONTEXT 256 // Export Lucid context #endif /* _GSSD_MACH_TYPES_H_ */ diff --git a/osfmk/i386/AT386/model_dep.c b/osfmk/i386/AT386/model_dep.c index 3976f1f6b..19781ce3b 100644 --- a/osfmk/i386/AT386/model_dep.c +++ b/osfmk/i386/AT386/model_dep.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -81,6 +81,7 @@ #include #include #include +#include #include #include #include @@ -92,7 +93,7 @@ #include #include #include -#include /* mp_rendezvous_break_lock */ +#include /* mp_rendezvous_break_lock */ #include #include #include @@ -127,8 +128,8 @@ #include #include -#if DEBUG || DEVELOPMENT -#define DPRINTF(x...) kprintf(x) +#if DEBUG || DEVELOPMENT +#define DPRINTF(x...) kprintf(x) #else #define DPRINTF(x...) #endif @@ -138,7 +139,7 @@ #endif #ifndef ROUNDDOWN -#define ROUNDDOWN(x,y) (((x)/(y))*(y)) +#define ROUNDDOWN(x, y) (((x)/(y))*(y)) #endif static void machine_conf(void); @@ -146,20 +147,20 @@ void panic_print_symbol_name(vm_address_t search); void RecordPanicStackshot(void); typedef enum paniclog_flush_type { - kPaniclogFlushBase = 1, /* Flush the initial log and paniclog header */ - kPaniclogFlushStackshot = 2, /* Flush only the stackshot data, then flush the header */ - kPaniclogFlushOtherLog = 3 /* Flush the other log, then flush the header */ + kPaniclogFlushBase = 1,/* Flush the initial log and paniclog header */ + kPaniclogFlushStackshot = 2,/* Flush only the stackshot data, then flush the header */ + kPaniclogFlushOtherLog = 3/* Flush the other log, then flush the header */ } paniclog_flush_type_t; void paniclog_flush_internal(paniclog_flush_type_t variant); -extern const char version[]; -extern char osversion[]; -extern int max_unsafe_quanta; -extern int max_poll_quanta; -extern unsigned int panic_is_inited; +extern const char version[]; +extern char osversion[]; +extern int max_unsafe_quanta; +extern int max_poll_quanta; +extern unsigned int panic_is_inited; -extern int proc_pid(void *p); +extern int proc_pid(void *p); /* Definitions for frame pointers */ #define FP_ALIGNMENT_MASK ((uint32_t)(0x3)) @@ -168,34 +169,34 @@ extern int proc_pid(void *p); #define FP_MAX_NUM_TO_EVALUATE (50) volatile int pbtcpu = -1; -hw_lock_data_t pbtlock; /* backtrace print lock */ +hw_lock_data_t pbtlock; /* backtrace print lock */ uint32_t pbtcnt = 0; volatile int panic_double_fault_cpu = -1; -#define PRINT_ARGS_FROM_STACK_FRAME 0 +#define PRINT_ARGS_FROM_STACK_FRAME 0 typedef struct _cframe_t { - struct _cframe_t *prev; - uintptr_t caller; + struct _cframe_t *prev; + uintptr_t caller; #if PRINT_ARGS_FROM_STACK_FRAME - unsigned args[0]; + unsigned args[0]; #endif } cframe_t; static unsigned panic_io_port; -static unsigned commit_paniclog_to_nvram; +static unsigned commit_paniclog_to_nvram; boolean_t coprocessor_paniclog_flush = FALSE; struct kcdata_descriptor kc_panic_data; static boolean_t begun_panic_stackshot = FALSE; -extern kern_return_t do_stackshot(void *); +extern kern_return_t do_stackshot(void *); -extern void kdp_snapshot_preflight(int pid, void *tracebuf, - uint32_t tracebuf_size, uint32_t flags, - kcdata_descriptor_t data_p, - boolean_t enable_faulting); -extern int kdp_stack_snapshot_bytes_traced(void); +extern void kdp_snapshot_preflight(int pid, void *tracebuf, + uint32_t tracebuf_size, uint32_t flags, + kcdata_descriptor_t data_p, + boolean_t enable_faulting); +extern int kdp_stack_snapshot_bytes_traced(void); #if DEVELOPMENT || DEBUG vm_offset_t panic_stackshot_buf = 0; @@ -207,35 +208,39 @@ size_t panic_stackshot_len = 0; */ void print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, - boolean_t is_64_bit) + boolean_t is_64_bit) { - int i = 0; - addr64_t lr; - addr64_t fp; - addr64_t fp_for_ppn; - ppnum_t ppn; - boolean_t dump_kernel_stack; + int i = 0; + addr64_t lr; + addr64_t fp; + addr64_t fp_for_ppn; + ppnum_t ppn; + boolean_t dump_kernel_stack; fp = topfp; fp_for_ppn = 0; ppn = (ppnum_t)NULL; - if (fp >= VM_MIN_KERNEL_ADDRESS) + if (fp >= VM_MIN_KERNEL_ADDRESS) { dump_kernel_stack = TRUE; - else + } else { dump_kernel_stack = FALSE; + } do { - if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) + if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0)) { break; - if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) + } + if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS))) { break; - if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS)) + } + if ((!dump_kernel_stack) && (fp >= VM_MIN_KERNEL_ADDRESS)) { break; - - /* Check to see if current address will result in a different - ppn than previously computed (to avoid recomputation) via - (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */ + } + + /* Check to see if current address will result in a different + * ppn than previously computed (to avoid recomputation) via + * (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */ if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) { ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET); @@ -284,41 +289,44 @@ print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, void machine_startup(void) { - int boot_arg; + int boot_arg; #if 0 - if( PE_get_hotkey( kPEControlKey )) - halt_in_debugger = halt_in_debugger ? 0 : 1; + if (PE_get_hotkey( kPEControlKey )) { + halt_in_debugger = halt_in_debugger ? 0 : 1; + } #endif - if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram))) + if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof(commit_paniclog_to_nvram))) { commit_paniclog_to_nvram = 1; + } /* * Entering the debugger will put the CPUs into a "safe" * power mode. */ - if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg))) - pmsafe_debug = boot_arg; + if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof(boot_arg))) { + pmsafe_debug = boot_arg; + } - hw_lock_init(&pbtlock); /* initialize print backtrace lock */ + hw_lock_init(&pbtlock); /* initialize print backtrace lock */ - if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("preempt", &boot_arg, sizeof(boot_arg))) { default_preemption_rate = boot_arg; } - if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof(boot_arg))) { max_unsafe_quanta = boot_arg; } - if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("poll", &boot_arg, sizeof(boot_arg))) { max_poll_quanta = boot_arg; } - if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("yield", &boot_arg, sizeof(boot_arg))) { sched_poll_yield_shift = boot_arg; } /* The I/O port to issue a read from, in the event of a panic. Useful for * triggering logic analyzers. */ - if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) { + if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof(boot_arg))) { /*I/O ports range from 0 through 0xFFFF */ panic_io_port = boot_arg & 0xffff; } @@ -347,143 +355,141 @@ extern void *gPEEFISystemTable; static void efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table) { - EFI_RUNTIME_SERVICES_64 *runtime; - uint32_t hdr_cksum; - uint32_t cksum; - - DPRINTF("Processing 64-bit EFI tables at %p\n", system_table); - do { - DPRINTF("Header:\n"); - DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature); - DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision); - DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize); - DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32); - DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices); - if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { - kprintf("Bad EFI system table signature\n"); - break; - } - // Verify signature of the system table - hdr_cksum = system_table->Hdr.CRC32; - system_table->Hdr.CRC32 = 0; - cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize); - - DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); - system_table->Hdr.CRC32 = hdr_cksum; - if (cksum != hdr_cksum) { - kprintf("Bad EFI system table checksum\n"); - break; - } - - gPEEFISystemTable = system_table; - - if(system_table->RuntimeServices == 0) { - kprintf("No runtime table present\n"); - break; - } - DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices); - // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel. - runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices; - DPRINTF("Checking runtime services table %p\n", runtime); - if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { - kprintf("Bad EFI runtime table signature\n"); - break; - } - - // Verify signature of runtime services table - hdr_cksum = runtime->Hdr.CRC32; - runtime->Hdr.CRC32 = 0; - cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize); - - DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); - runtime->Hdr.CRC32 = hdr_cksum; - if (cksum != hdr_cksum) { - kprintf("Bad EFI runtime table checksum\n"); - break; - } + EFI_RUNTIME_SERVICES_64 *runtime; + uint32_t hdr_cksum; + uint32_t cksum; + + DPRINTF("Processing 64-bit EFI tables at %p\n", system_table); + do { + DPRINTF("Header:\n"); + DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature); + DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision); + DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize); + DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32); + DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices); + if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { + kprintf("Bad EFI system table signature\n"); + break; + } + // Verify signature of the system table + hdr_cksum = system_table->Hdr.CRC32; + system_table->Hdr.CRC32 = 0; + cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize); + + DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); + system_table->Hdr.CRC32 = hdr_cksum; + if (cksum != hdr_cksum) { + kprintf("Bad EFI system table checksum\n"); + break; + } + + gPEEFISystemTable = system_table; + + if (system_table->RuntimeServices == 0) { + kprintf("No runtime table present\n"); + break; + } + DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices); + // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel. + runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices; + DPRINTF("Checking runtime services table %p\n", runtime); + if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { + kprintf("Bad EFI runtime table signature\n"); + break; + } + + // Verify signature of runtime services table + hdr_cksum = runtime->Hdr.CRC32; + runtime->Hdr.CRC32 = 0; + cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize); + + DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); + runtime->Hdr.CRC32 = hdr_cksum; + if (cksum != hdr_cksum) { + kprintf("Bad EFI runtime table checksum\n"); + break; + } - gPEEFIRuntimeServices = runtime; - } - while (FALSE); + gPEEFIRuntimeServices = runtime; + }while (FALSE); } static void efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table) { - EFI_RUNTIME_SERVICES_32 *runtime; - uint32_t hdr_cksum; - uint32_t cksum; - - DPRINTF("Processing 32-bit EFI tables at %p\n", system_table); - do { - DPRINTF("Header:\n"); - DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature); - DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision); - DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize); - DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32); - DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices); - if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { - kprintf("Bad EFI system table signature\n"); - break; - } - // Verify signature of the system table - hdr_cksum = system_table->Hdr.CRC32; - system_table->Hdr.CRC32 = 0; - DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize); - cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize); - - DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); - system_table->Hdr.CRC32 = hdr_cksum; - if (cksum != hdr_cksum) { - kprintf("Bad EFI system table checksum\n"); - break; - } - - gPEEFISystemTable = system_table; - - if(system_table->RuntimeServices == 0) { - kprintf("No runtime table present\n"); - break; - } - DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices); - // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel. - // For a 64-bit kernel, booter provides a virtual address mod 4G - runtime = (EFI_RUNTIME_SERVICES_32 *) - (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS); - DPRINTF("Runtime table addressed at %p\n", runtime); - if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { - kprintf("Bad EFI runtime table signature\n"); - break; - } - - // Verify signature of runtime services table - hdr_cksum = runtime->Hdr.CRC32; - runtime->Hdr.CRC32 = 0; - cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize); - - DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); - runtime->Hdr.CRC32 = hdr_cksum; - if (cksum != hdr_cksum) { - kprintf("Bad EFI runtime table checksum\n"); - break; - } + EFI_RUNTIME_SERVICES_32 *runtime; + uint32_t hdr_cksum; + uint32_t cksum; + + DPRINTF("Processing 32-bit EFI tables at %p\n", system_table); + do { + DPRINTF("Header:\n"); + DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature); + DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision); + DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize); + DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32); + DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices); + if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { + kprintf("Bad EFI system table signature\n"); + break; + } + // Verify signature of the system table + hdr_cksum = system_table->Hdr.CRC32; + system_table->Hdr.CRC32 = 0; + DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize); + cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize); + + DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); + system_table->Hdr.CRC32 = hdr_cksum; + if (cksum != hdr_cksum) { + kprintf("Bad EFI system table checksum\n"); + break; + } + + gPEEFISystemTable = system_table; + + if (system_table->RuntimeServices == 0) { + kprintf("No runtime table present\n"); + break; + } + DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices); + // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel. + // For a 64-bit kernel, booter provides a virtual address mod 4G + runtime = (EFI_RUNTIME_SERVICES_32 *) + (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS); + DPRINTF("Runtime table addressed at %p\n", runtime); + if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { + kprintf("Bad EFI runtime table signature\n"); + break; + } + + // Verify signature of runtime services table + hdr_cksum = runtime->Hdr.CRC32; + runtime->Hdr.CRC32 = 0; + cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize); - DPRINTF("Runtime functions\n"); - DPRINTF(" GetTime : 0x%x\n", runtime->GetTime); - DPRINTF(" SetTime : 0x%x\n", runtime->SetTime); - DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime); - DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime); - DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap); - DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer); - DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable); - DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName); - DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable); - DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount); - DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem); - - gPEEFIRuntimeServices = runtime; - } - while (FALSE); + DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum); + runtime->Hdr.CRC32 = hdr_cksum; + if (cksum != hdr_cksum) { + kprintf("Bad EFI runtime table checksum\n"); + break; + } + + DPRINTF("Runtime functions\n"); + DPRINTF(" GetTime : 0x%x\n", runtime->GetTime); + DPRINTF(" SetTime : 0x%x\n", runtime->SetTime); + DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime); + DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime); + DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap); + DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer); + DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable); + DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName); + DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable); + DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount); + DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem); + + gPEEFIRuntimeServices = runtime; + }while (FALSE); } @@ -491,162 +497,161 @@ efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table) static void efi_init(void) { - boot_args *args = (boot_args *)PE_state.bootArgs; - - kprintf("Initializing EFI runtime services\n"); - - do - { - vm_offset_t vm_size, vm_addr; - vm_map_offset_t phys_addr; - EfiMemoryRange *mptr; - unsigned int msize, mcount; - unsigned int i; - - msize = args->MemoryMapDescriptorSize; - mcount = args->MemoryMapSize / msize; - - DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n", - args->kaddr, args->ksize); - DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n", - args->efiSystemTable, - (void *) ml_static_ptovirt(args->efiSystemTable)); - DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n", - args->efiRuntimeServicesPageStart); - DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n", - args->efiRuntimeServicesPageCount); - DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n", - args->efiRuntimeServicesVirtualPageStart); - mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); - for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) { - vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); - vm_addr = (vm_offset_t) mptr->VirtualStart; - /* For K64 on EFI32, shadow-map into high KVA */ - if (vm_addr < VM_MIN_KERNEL_ADDRESS) - vm_addr |= VM_MIN_KERNEL_ADDRESS; - phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n", - mptr->Type, - (void *) (uintptr_t) phys_addr, - (void *) (uintptr_t) mptr->VirtualStart, - (void *) vm_addr, - (void *) vm_size); - pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size), - (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE, - (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT); - } - } + boot_args *args = (boot_args *)PE_state.bootArgs; + + kprintf("Initializing EFI runtime services\n"); + + do{ + vm_offset_t vm_size, vm_addr; + vm_map_offset_t phys_addr; + EfiMemoryRange *mptr; + unsigned int msize, mcount; + unsigned int i; + + msize = args->MemoryMapDescriptorSize; + mcount = args->MemoryMapSize / msize; + + DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n", + args->kaddr, args->ksize); + DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n", + args->efiSystemTable, + (void *) ml_static_ptovirt(args->efiSystemTable)); + DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n", + args->efiRuntimeServicesPageStart); + DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n", + args->efiRuntimeServicesPageCount); + DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n", + args->efiRuntimeServicesVirtualPageStart); + mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); + for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { + if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME)) { + vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); + vm_addr = (vm_offset_t) mptr->VirtualStart; + /* For K64 on EFI32, shadow-map into high KVA */ + if (vm_addr < VM_MIN_KERNEL_ADDRESS) { + vm_addr |= VM_MIN_KERNEL_ADDRESS; + } + phys_addr = (vm_map_offset_t) mptr->PhysicalStart; + DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n", + mptr->Type, + (void *) (uintptr_t) phys_addr, + (void *) (uintptr_t) mptr->VirtualStart, + (void *) vm_addr, + (void *) vm_size); + pmap_map_bd(vm_addr, phys_addr, phys_addr + round_page(vm_size), + (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ | VM_PROT_WRITE, + (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT); + } + } - if (args->Version != kBootArgsVersion2) - panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision); + if (args->Version != kBootArgsVersion2) { + panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision); + } - DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode); - if (args->efiMode == kBootArgsEfiMode64) { - efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); - } else { - efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); - } - } - while (FALSE); + DPRINTF("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode); + if (args->efiMode == kBootArgsEfiMode64) { + efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); + } else { + efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); + } + }while (FALSE); - return; + return; } -/* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */ +/* Returns TRUE if a page belongs to the EFI Runtime Services (code or data) */ boolean_t -efi_valid_page(ppnum_t ppn) +efi_valid_page(ppnum_t ppn) { - boot_args *args = (boot_args *)PE_state.bootArgs; - ppnum_t pstart = args->efiRuntimeServicesPageStart; - ppnum_t pend = pstart + args->efiRuntimeServicesPageCount; + boot_args *args = (boot_args *)PE_state.bootArgs; + ppnum_t pstart = args->efiRuntimeServicesPageStart; + ppnum_t pend = pstart + args->efiRuntimeServicesPageCount; - return pstart <= ppn && ppn < pend; + return pstart <= ppn && ppn < pend; } /* Remap EFI runtime areas. */ void hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_offset) { - boot_args *args = (boot_args *)PE_state.bootArgs; - - kprintf("Reinitializing EFI runtime services\n"); - - do - { - vm_offset_t vm_size, vm_addr; - vm_map_offset_t phys_addr; - EfiMemoryRange *mptr; - unsigned int msize, mcount; - unsigned int i; - - gPEEFISystemTable = 0; - gPEEFIRuntimeServices = 0; - - system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart); - - kprintf("Old system table 0x%x, new 0x%x\n", - (uint32_t)args->efiSystemTable, system_table_offset); - - args->efiSystemTable = system_table_offset; - - kprintf("Old map:\n"); - msize = args->MemoryMapDescriptorSize; - mcount = args->MemoryMapSize / msize; - mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); - for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) { - - vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); - vm_addr = (vm_offset_t) mptr->VirtualStart; - /* K64 on EFI32 */ - if (vm_addr < VM_MIN_KERNEL_ADDRESS) - vm_addr |= VM_MIN_KERNEL_ADDRESS; - phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - - kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages); - } - } - - pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart), - i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount)); + boot_args *args = (boot_args *)PE_state.bootArgs; + + kprintf("Reinitializing EFI runtime services\n"); + + do{ + vm_offset_t vm_size, vm_addr; + vm_map_offset_t phys_addr; + EfiMemoryRange *mptr; + unsigned int msize, mcount; + unsigned int i; + + gPEEFISystemTable = 0; + gPEEFIRuntimeServices = 0; + + system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart); + + kprintf("Old system table 0x%x, new 0x%x\n", + (uint32_t)args->efiSystemTable, system_table_offset); + + args->efiSystemTable = system_table_offset; + + kprintf("Old map:\n"); + msize = args->MemoryMapDescriptorSize; + mcount = args->MemoryMapSize / msize; + mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); + for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { + if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) { + vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); + vm_addr = (vm_offset_t) mptr->VirtualStart; + /* K64 on EFI32 */ + if (vm_addr < VM_MIN_KERNEL_ADDRESS) { + vm_addr |= VM_MIN_KERNEL_ADDRESS; + } + phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - kprintf("New map:\n"); - msize = args->MemoryMapDescriptorSize; - mcount = (unsigned int )(map_size / msize); - mptr = map; - for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) { + kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages); + } + } - vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); - vm_addr = (vm_offset_t) mptr->VirtualStart; - if (vm_addr < VM_MIN_KERNEL_ADDRESS) - vm_addr |= VM_MIN_KERNEL_ADDRESS; - phys_addr = (vm_map_offset_t) mptr->PhysicalStart; + pmap_remove(kernel_pmap, i386_ptob(args->efiRuntimeServicesPageStart), + i386_ptob(args->efiRuntimeServicesPageStart + args->efiRuntimeServicesPageCount)); + + kprintf("New map:\n"); + msize = args->MemoryMapDescriptorSize; + mcount = (unsigned int)(map_size / msize); + mptr = map; + for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { + if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) { + vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); + vm_addr = (vm_offset_t) mptr->VirtualStart; + if (vm_addr < VM_MIN_KERNEL_ADDRESS) { + vm_addr |= VM_MIN_KERNEL_ADDRESS; + } + phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages); + kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages); - pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size), - (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE, - (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT); - } - } + pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size), + (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ | VM_PROT_WRITE, + (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT); + } + } - if (args->Version != kBootArgsVersion2) - panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision); + if (args->Version != kBootArgsVersion2) { + panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision); + } - kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode); - if (args->efiMode == kBootArgsEfiMode64) { - efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); - } else { - efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); - } - } - while (FALSE); + kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode); + if (args->efiMode == kBootArgsEfiMode64) { + efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); + } else { + efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); + } + }while (FALSE); - kprintf("Done reinitializing EFI runtime services\n"); + kprintf("Done reinitializing EFI runtime services\n"); - return; + return; } /* @@ -658,8 +663,8 @@ machine_init(void) /* Now with VM up, switch to dynamically allocated cpu data */ cpu_data_realloc(); - /* Ensure panic buffer is initialized. */ - debug_log_init(); + /* Ensure panic buffer is initialized. */ + debug_log_init(); /* * Display CPU identification @@ -668,10 +673,10 @@ machine_init(void) cpuid_feature_display("CPU features"); cpuid_extfeature_display("CPU extended features"); - /* - * Initialize EFI runtime services. - */ - efi_init(); + /* + * Initialize EFI runtime services. + */ + efi_init(); smp_init(); @@ -728,18 +733,22 @@ halt_all_cpus(boolean_t reboot) printf("CPU halted\n"); PEHaltRestart( kPEHaltCPU ); } - while(1); + while (1) { + ; + } } - + /* Issue an I/O port read if one has been requested - this is an event logic * analyzers can use as a trigger point. */ void -panic_io_port_read(void) { - if (panic_io_port) +panic_io_port_read(void) +{ + if (panic_io_port) { (void)inb(panic_io_port); + } } /* For use with the MP rendezvous mechanism @@ -792,7 +801,7 @@ RecordPanicStackshot() bytes_remaining = debug_buf_size - (unsigned int)((uintptr_t)stackshot_begin_loc - (uintptr_t)debug_buf_base); err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)stackshot_begin_loc, - KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining, KCFLAG_USE_MEMCOPY); + KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining, KCFLAG_USE_MEMCOPY); if (err != KERN_SUCCESS) { panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); @@ -801,9 +810,9 @@ RecordPanicStackshot() } kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, bytes_remaining, - (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | - STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | - STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); + (STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | + STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | + STACKSHOT_NO_IO_STATS | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); err = do_stackshot(NULL); bytes_traced = (int) kdp_stack_snapshot_bytes_traced(); bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data); @@ -820,7 +829,7 @@ RecordPanicStackshot() memset(stackshot_begin_loc, '\0', bytes_used); err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)stackshot_begin_loc, - KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining, KCFLAG_USE_MEMCOPY); + KCDATA_BUFFER_BEGIN_STACKSHOT, bytes_remaining, KCFLAG_USE_MEMCOPY); if (err != KERN_SUCCESS) { panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_STACKSHOT_FAILED_ERROR; panic_info->mph_other_log_offset = PE_get_offset_into_panic_region(debug_buf_ptr); @@ -829,8 +838,8 @@ RecordPanicStackshot() } kdp_snapshot_preflight(-1, (void *) stackshot_begin_loc, bytes_remaining, (STACKSHOT_KCDATA_FORMAT | - STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | - STACKSHOT_FROM_PANIC | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); + STACKSHOT_NO_IO_STATS | STACKSHOT_SAVE_KEXT_LOADINFO | STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY | + STACKSHOT_FROM_PANIC | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); err = do_stackshot(NULL); bytes_traced = (int) kdp_stack_snapshot_bytes_traced(); bytes_used = (int) kcdata_memory_get_used_bytes(&kc_panic_data); @@ -884,15 +893,15 @@ RecordPanicStackshot() } err = kcdata_memory_static_init(&kc_panic_data, (mach_vm_address_t)panic_stackshot_buf, KCDATA_BUFFER_BEGIN_STACKSHOT, - PANIC_STACKSHOT_BUFSIZE, KCFLAG_USE_MEMCOPY); + PANIC_STACKSHOT_BUFSIZE, KCFLAG_USE_MEMCOPY); if (err != KERN_SUCCESS) { kdb_printf("Failed to initialize kcdata buffer for file backed panic stackshot, skipping ...\n"); return; } kdp_snapshot_preflight(-1, (void *) panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, (STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT | - STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_NO_IO_STATS - | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); + STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_FROM_PANIC | STACKSHOT_NO_IO_STATS + | STACKSHOT_THREAD_WAITINFO), &kc_panic_data, 0); err = do_stackshot(NULL); bytes_traced = (int) kdp_stack_snapshot_bytes_traced(); if (bytes_traced > 0 && !err) { @@ -949,7 +958,7 @@ SavePanicInfo( } if (stackptr == NULL) { - __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr)); } /* Print backtrace - callee is internally synchronized */ @@ -966,7 +975,7 @@ SavePanicInfo( if (PE_get_offset_into_panic_region(debug_buf_ptr) < panic_info->mph_panic_log_offset) { kdb_printf("Invalid panic log offset found (not properly initialized?): debug_buf_ptr : 0x%p, panic_info: 0x%p mph_panic_log_offset: 0x%x\n", - debug_buf_ptr, panic_info, panic_info->mph_panic_log_offset); + debug_buf_ptr, panic_info, panic_info->mph_panic_log_offset); panic_info->mph_panic_log_len = 0; } else { panic_info->mph_panic_log_len = PE_get_offset_into_panic_region(debug_buf_ptr) - panic_info->mph_panic_log_offset; @@ -987,7 +996,8 @@ SavePanicInfo( } } -void paniclog_flush_internal(paniclog_flush_type_t variant) +void +paniclog_flush_internal(paniclog_flush_type_t variant) { /* Update the other log offset if we've opened the other log */ if (panic_info->mph_other_log_offset != 0) { @@ -1064,7 +1074,7 @@ void paniclog_flush_internal(paniclog_flush_type_t variant) * Don't include the macOS panic header (for co-processor systems only) */ bufpos = packA(debug_buf_base, (unsigned int) (debug_buf_ptr - debug_buf_base), - debug_buf_size); + debug_buf_size); /* * If compression was successful, use the compressed length */ @@ -1088,7 +1098,7 @@ void paniclog_flush_internal(paniclog_flush_type_t variant) */ kprintf("Attempting to commit panic log to NVRAM\n"); pi_size = PESavePanicInfo((unsigned char *)debug_buf_base, - (uint32_t)pi_size ); + (uint32_t)pi_size ); set_cr0(cr0); /* @@ -1112,7 +1122,7 @@ paniclog_flush() char * machine_boot_info(char *buf, __unused vm_size_t size) { - *buf ='\0'; + *buf = '\0'; return buf; } @@ -1123,72 +1133,76 @@ machine_boot_info(char *buf, __unused vm_size_t size) static int panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name) { - kernel_nlist_t *sym = NULL; - struct load_command *cmd; - kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL; - struct symtab_command *orig_st = NULL; - unsigned int i; - char *strings, *bestsym = NULL; - vm_address_t bestaddr = 0, diff, curdiff; - - /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */ - - cmd = (struct load_command *) &mh[1]; - for (i = 0; i < mh->ncmds; i++) { - if (cmd->cmd == LC_SEGMENT_KERNEL) { - kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd; - - if (strncmp(SEG_TEXT, orig_sg->segname, - sizeof(orig_sg->segname)) == 0) - orig_ts = orig_sg; - else if (strncmp(SEG_LINKEDIT, orig_sg->segname, - sizeof(orig_sg->segname)) == 0) - orig_le = orig_sg; - else if (strncmp("", orig_sg->segname, - sizeof(orig_sg->segname)) == 0) - orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */ - } - else if (cmd->cmd == LC_SYMTAB) - orig_st = (struct symtab_command *) cmd; - - cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); - } - - if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) - return 0; - - if ((search < orig_ts->vmaddr) || - (search >= orig_ts->vmaddr + orig_ts->vmsize)) { - /* search out of range for this mach header */ - return 0; - } - - sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff); - strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff); - diff = search; - - for (i = 0; i < orig_st->nsyms; i++) { - if (sym[i].n_type & N_STAB) continue; - - if (sym[i].n_value <= search) { - curdiff = search - (vm_address_t)sym[i].n_value; - if (curdiff < diff) { - diff = curdiff; - bestaddr = sym[i].n_value; - bestsym = strings + sym[i].n_un.n_strx; - } - } - } - - if (bestsym != NULL) { - if (diff != 0) { - paniclog_append_noflush("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff); - } else { - paniclog_append_noflush("%s : %s", module_name, bestsym); - } - return 1; - } - return 0; + kernel_nlist_t *sym = NULL; + struct load_command *cmd; + kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL; + struct symtab_command *orig_st = NULL; + unsigned int i; + char *strings, *bestsym = NULL; + vm_address_t bestaddr = 0, diff, curdiff; + + /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */ + + cmd = (struct load_command *) &mh[1]; + for (i = 0; i < mh->ncmds; i++) { + if (cmd->cmd == LC_SEGMENT_KERNEL) { + kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd; + + if (strncmp(SEG_TEXT, orig_sg->segname, + sizeof(orig_sg->segname)) == 0) { + orig_ts = orig_sg; + } else if (strncmp(SEG_LINKEDIT, orig_sg->segname, + sizeof(orig_sg->segname)) == 0) { + orig_le = orig_sg; + } else if (strncmp("", orig_sg->segname, + sizeof(orig_sg->segname)) == 0) { + orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */ + } + } else if (cmd->cmd == LC_SYMTAB) { + orig_st = (struct symtab_command *) cmd; + } + + cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); + } + + if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) { + return 0; + } + + if ((search < orig_ts->vmaddr) || + (search >= orig_ts->vmaddr + orig_ts->vmsize)) { + /* search out of range for this mach header */ + return 0; + } + + sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff); + strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff); + diff = search; + + for (i = 0; i < orig_st->nsyms; i++) { + if (sym[i].n_type & N_STAB) { + continue; + } + + if (sym[i].n_value <= search) { + curdiff = search - (vm_address_t)sym[i].n_value; + if (curdiff < diff) { + diff = curdiff; + bestaddr = sym[i].n_value; + bestsym = strings + sym[i].n_un.n_strx; + } + } + } + + if (bestsym != NULL) { + if (diff != 0) { + paniclog_append_noflush("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff); + } else { + paniclog_append_noflush("%s : %s", module_name, bestsym); + } + return 1; + } + return 0; } extern kmod_info_t * kmod; /* the list of modules */ @@ -1196,33 +1210,33 @@ extern kmod_info_t * kmod; /* the list of modules */ static void panic_print_kmod_symbol_name(vm_address_t search) { - u_int i; - - if (gLoadedKextSummaries == NULL) - return; - for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { - OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i; - - if ((search >= summary->address) && - (search < (summary->address + summary->size))) - { - kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address; - if (panic_print_macho_symbol_name(header, search, summary->name) == 0) { - paniclog_append_noflush("%s + %llu", summary->name, (unsigned long)search - summary->address); - } - break; - } - } + u_int i; + + if (gLoadedKextSummaries == NULL) { + return; + } + for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) { + OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i; + + if ((search >= summary->address) && + (search < (summary->address + summary->size))) { + kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address; + if (panic_print_macho_symbol_name(header, search, summary->name) == 0) { + paniclog_append_noflush("%s + %llu", summary->name, (unsigned long)search - summary->address); + } + break; + } + } } void panic_print_symbol_name(vm_address_t search) { - /* try searching in the kernel */ - if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) { - /* that failed, now try to search for the right kext */ - panic_print_kmod_symbol_name(search); - } + /* try searching in the kernel */ + if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) { + /* that failed, now try to search for the right kext */ + panic_print_kmod_symbol_name(search); + } } /* Generate a backtrace, given a frame pointer - this routine @@ -1236,7 +1250,7 @@ panic_print_symbol_name(vm_address_t search) void panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs) { - cframe_t *frame = (cframe_t *)_frame; + cframe_t *frame = (cframe_t *)_frame; vm_offset_t raddrs[DUMPFRAMES]; vm_offset_t PC = 0; int frame_index; @@ -1246,12 +1260,12 @@ panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdu int cn = cpu_number(); boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; - if(pbtcpu != cn) { + if (pbtcpu != cn) { hw_atomic_add(&pbtcnt, 1); /* Spin on print backtrace lock, which serializes output * Continue anyway if a timeout occurs. */ - hw_lock_to(&pbtlock, ~0U); + hw_lock_to(&pbtlock, ~0U, LCK_GRP_NULL); pbtcpu = cn; } @@ -1263,41 +1277,42 @@ panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdu panic_check_hook(); - PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms)); + PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms)); if (msg != NULL) { paniclog_append_noflush("%s", msg); } if ((regdump == TRUE) && (regs != NULL)) { - x86_saved_state64_t *ss64p = saved_state64(regs); + x86_saved_state64_t *ss64p = saved_state64(regs); paniclog_append_noflush( - "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" - "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" - "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" - "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" - "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n", - ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx, - ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi, - ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11, - ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15, - ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs, - ss64p->isf.ss); + "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" + "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" + "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" + "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" + "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n", + ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx, + ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi, + ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11, + ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15, + ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs, + ss64p->isf.ss); PC = ss64p->isf.rip; } paniclog_append_noflush("Backtrace (CPU %d), " #if PRINT_ARGS_FROM_STACK_FRAME - "Frame : Return Address (4 potential args on stack)\n", cn); + "Frame : Return Address (4 potential args on stack)\n", cn); #else - "Frame : Return Address\n", cn); + "Frame : Return Address\n", cn); #endif for (frame_index = 0; frame_index < nframes; frame_index++) { vm_offset_t curframep = (vm_offset_t) frame; - if (!curframep) + if (!curframep) { break; + } if (curframep & 0x3) { paniclog_append_noflush("Unaligned frame\n"); @@ -1311,14 +1326,16 @@ panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdu } paniclog_append_noflush("%p : 0x%lx ", frame, frame->caller); - if (frame_index < DUMPFRAMES) + if (frame_index < DUMPFRAMES) { raddrs[frame_index] = frame->caller; + } #if PRINT_ARGS_FROM_STACK_FRAME - if (kvtophys((vm_offset_t)&(frame->args[3]))) + if (kvtophys((vm_offset_t)&(frame->args[3]))) { paniclog_append_noflush("(0x%x 0x%x 0x%x 0x%x) ", frame->args[0], frame->args[1], frame->args[2], frame->args[3]); + } #endif /* Display address-symbol translation only if the "keepsyms" @@ -1326,32 +1343,36 @@ panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdu * This routine is potentially unsafe; also, function * boundary identification is unreliable after a strip -x. */ - if (keepsyms) + if (keepsyms) { panic_print_symbol_name((vm_address_t)frame->caller); - + } + paniclog_append_noflush("\n"); frame = frame->prev; } - if (frame_index >= nframes) + if (frame_index >= nframes) { paniclog_append_noflush("\tBacktrace continues...\n"); + } goto out; invalid: - paniclog_append_noflush("Backtrace terminated-invalid frame pointer %p\n",frame); + paniclog_append_noflush("Backtrace terminated-invalid frame pointer %p\n", frame); out: /* Identify kernel modules in the backtrace and display their * load addresses and dependencies. This routine should walk * the kmod list safely. */ - if (frame_index) + if (frame_index) { kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index); + } - if (PC != 0) + if (PC != 0) { kmod_panic_dump(&PC, 1); + } panic_display_system_configuration(FALSE); @@ -1366,51 +1387,53 @@ out: * Timeout and continue after PBT_TIMEOUT_CYCLES. */ bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES; - while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout)); + while (*ppbtcnt && (rdtsc64() < bt_tsc_timeout)) { + ; + } } static boolean_t debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size) { - size_t rem = size; - char *kvaddr = dest; - - while (rem) { - ppnum_t upn = pmap_find_phys(p, uaddr); - uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK); - uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr); - uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK); - uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK); - size_t cur_size = (uint32_t) MIN(src_rem, dst_rem); - cur_size = MIN(cur_size, rem); - - if (upn && pmap_valid_page(upn) && phys_dest) { - bcopy_phys(phys_src, phys_dest, cur_size); - } - else - break; - uaddr += cur_size; - kvaddr += cur_size; - rem -= cur_size; - } - return (rem == 0); + size_t rem = size; + char *kvaddr = dest; + + while (rem) { + ppnum_t upn = pmap_find_phys(p, uaddr); + uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK); + uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr); + uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK); + uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK); + size_t cur_size = (uint32_t) MIN(src_rem, dst_rem); + cur_size = MIN(cur_size, rem); + + if (upn && pmap_valid_page(upn) && phys_dest) { + bcopy_phys(phys_src, phys_dest, cur_size); + } else { + break; + } + uaddr += cur_size; + kvaddr += cur_size; + rem -= cur_size; + } + return rem == 0; } void print_threads_registers(thread_t thread) { x86_saved_state_t *savestate; - + savestate = get_user_regs(thread); paniclog_append_noflush( "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" - "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" - "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" + "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" + "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n", savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx, savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi, - savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11, + savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11, savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15, savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs, savestate->ss_64.isf.ss); @@ -1419,16 +1442,15 @@ print_threads_registers(thread_t thread) void print_tasks_user_threads(task_t task) { - thread_t thread = current_thread(); + thread_t thread = current_thread(); x86_saved_state_t *savestate; - pmap_t pmap = 0; - uint64_t rbp; - const char *cur_marker = 0; + pmap_t pmap = 0; + uint64_t rbp; + const char *cur_marker = 0; int j; - - for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count; - ++j, thread = (thread_t) queue_next(&thread->task_threads)) { + for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count; + ++j, thread = (thread_t) queue_next(&thread->task_threads)) { paniclog_append_noflush("Thread %d: %p\n", j, thread); pmap = get_task_pmap(task); savestate = get_user_regs(thread); @@ -1442,13 +1464,12 @@ print_tasks_user_threads(task_t task) void print_thread_num_that_crashed(task_t task) { - thread_t c_thread = current_thread(); - thread_t thread; + thread_t c_thread = current_thread(); + thread_t thread; int j; - - for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count; - ++j, thread = (thread_t) queue_next(&thread->task_threads)) { + for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count; + ++j, thread = (thread_t) queue_next(&thread->task_threads)) { if (c_thread == thread) { paniclog_append_noflush("\nThread %d crashed\n", j); break; @@ -1458,22 +1479,23 @@ print_thread_num_that_crashed(task_t task) #define PANICLOG_UUID_BUF_SIZE 256 -void print_uuid_info(task_t task) +void +print_uuid_info(task_t task) { - uint32_t uuid_info_count = 0; - mach_vm_address_t uuid_info_addr = 0; - boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map))); - boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap))); - int task_pid = pid_from_task(task); - char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0}; - char *uuidbufptr = uuidbuf; - uint32_t k; + uint32_t uuid_info_count = 0; + mach_vm_address_t uuid_info_addr = 0; + boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map))); + boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap))); + int task_pid = pid_from_task(task); + char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0}; + char *uuidbufptr = uuidbuf; + uint32_t k; if (have_pmap && task->active && task_pid > 0) { /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */ struct user64_dyld_all_image_infos task_image_infos; if (debug_copyin(task->map->pmap, task->all_image_info_addr, - &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) { + &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) { uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount; uuid_info_addr = task_image_infos.uuidArray; } @@ -1494,18 +1516,18 @@ void print_uuid_info(task_t task) uint32_t uuid_image_count = 0; char *current_uuid_buffer = NULL; /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */ - + paniclog_append_noflush("\nuuid info:\n"); while (uuid_array_size) { if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) { uuid_copy_size = uuid_array_size; - uuid_image_count = uuid_array_size/uuid_info_size; + uuid_image_count = uuid_array_size / uuid_info_size; } else { - uuid_image_count = PANICLOG_UUID_BUF_SIZE/uuid_info_size; + uuid_image_count = PANICLOG_UUID_BUF_SIZE / uuid_info_size; uuid_copy_size = uuid_image_count * uuid_info_size; } if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr, - uuid_copy_size)) { + uuid_copy_size)) { paniclog_append_noflush("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid); uuid_image_count = 0; break; @@ -1518,8 +1540,8 @@ void print_uuid_info(task_t task) current_uuid_buffer += sizeof(uint64_t); uint8_t *uuid = (uint8_t *)current_uuid_buffer; paniclog_append_noflush("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n", - uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8], - uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]); + uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8], + uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]); current_uuid_buffer += 16; } bzero(&uuidbuf, sizeof(uuidbuf)); @@ -1530,30 +1552,31 @@ void print_uuid_info(task_t task) } } -void print_launchd_info(void) +void +print_launchd_info(void) { - task_t task = current_task(); - thread_t thread = current_thread(); - volatile uint32_t *ppbtcnt = &pbtcnt; - uint64_t bt_tsc_timeout; - int cn = cpu_number(); + task_t task = current_task(); + thread_t thread = current_thread(); + volatile uint32_t *ppbtcnt = &pbtcnt; + uint64_t bt_tsc_timeout; + int cn = cpu_number(); - if(pbtcpu != cn) { + if (pbtcpu != cn) { hw_atomic_add(&pbtcnt, 1); /* Spin on print backtrace lock, which serializes output * Continue anyway if a timeout occurs. */ - hw_lock_to(&pbtlock, ~0U); + hw_lock_to(&pbtlock, ~0U, LCK_GRP_NULL); pbtcpu = cn; } - + print_uuid_info(task); print_thread_num_that_crashed(task); print_threads_registers(thread); print_tasks_user_threads(task); panic_display_system_configuration(TRUE); - + /* Release print backtrace lock, to permit other callers in the * event of panics on multiple processors. */ @@ -1563,6 +1586,7 @@ void print_launchd_info(void) * Timeout and continue after PBT_TIMEOUT_CYCLES. */ bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES; - while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout)); - + while (*ppbtcnt && (rdtsc64() < bt_tsc_timeout)) { + ; + } } diff --git a/osfmk/i386/Diagnostics.c b/osfmk/i386/Diagnostics.c index 90eeb4192..4f1360216 100644 --- a/osfmk/i386/Diagnostics.c +++ b/osfmk/i386/Diagnostics.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -83,7 +83,7 @@ diagWork dgWork; uint64_t lastRuptClear = 0ULL; -boolean_t diag_pmc_enabled = FALSE; +boolean_t diag_pmc_enabled = FALSE; void cpu_powerstats(void *); typedef struct { @@ -98,7 +98,7 @@ typedef struct { uint64_t cpu_insns; uint64_t cpu_ucc; uint64_t cpu_urc; -#if DIAG_ALL_PMCS +#if DIAG_ALL_PMCS uint64_t gpmcs[4]; #endif /* DIAG_ALL_PMCS */ } core_energy_stat_t; @@ -125,15 +125,15 @@ typedef struct { } pkg_energy_statistics_t; -int +int diagCall64(x86_saved_state_t * state) { - uint64_t curpos, i, j; - uint64_t selector, data; - uint64_t currNap, durNap; - x86_saved_state64_t *regs; - boolean_t diagflag; - uint32_t rval = 0; + uint64_t curpos, i, j; + uint64_t selector, data; + uint64_t currNap, durNap; + x86_saved_state64_t *regs; + boolean_t diagflag; + uint32_t rval = 0; assert(is_saved_state64(state)); regs = saved_state64(state); @@ -141,46 +141,47 @@ diagCall64(x86_saved_state_t * state) diagflag = ((dgWork.dgFlags & enaDiagSCs) != 0); selector = regs->rdi; - switch (selector) { /* Select the routine */ - case dgRuptStat: /* Suck Interruption statistics */ + switch (selector) { /* Select the routine */ + case dgRuptStat: /* Suck Interruption statistics */ (void) ml_set_interrupts_enabled(TRUE); data = regs->rsi; /* Get the number of processors */ if (data == 0) { /* If no location is specified for data, clear all - * counts - */ - for (i = 0; i < real_ncpus; i++) { /* Cycle through - * processors */ - for (j = 0; j < 256; j++) + * counts + */ + for (i = 0; i < real_ncpus; i++) { /* Cycle through + * processors */ + for (j = 0; j < 256; j++) { cpu_data_ptr[i]->cpu_hwIntCnt[j] = 0; + } } - lastRuptClear = mach_absolute_time(); /* Get the time of clear */ - rval = 1; /* Normal return */ + lastRuptClear = mach_absolute_time(); /* Get the time of clear */ + rval = 1; /* Normal return */ (void) ml_set_interrupts_enabled(FALSE); break; } - (void) copyout((char *) &real_ncpus, data, sizeof(real_ncpus)); /* Copy out number of - * processors */ - currNap = mach_absolute_time(); /* Get the time now */ - durNap = currNap - lastRuptClear; /* Get the last interval - * duration */ - if (durNap == 0) - durNap = 1; /* This is a very short time, make it - * bigger */ - - curpos = data + sizeof(real_ncpus); /* Point to the next - * available spot */ - - for (i = 0; i < real_ncpus; i++) { /* Move 'em all out */ - (void) copyout((char *) &durNap, curpos, 8); /* Copy out the time - * since last clear */ - (void) copyout((char *) &cpu_data_ptr[i]->cpu_hwIntCnt, curpos + 8, 256 * sizeof(uint32_t)); /* Copy out interrupt - * data for this - * processor */ - curpos = curpos + (256 * sizeof(uint32_t) + 8); /* Point to next out put - * slot */ + (void) copyout((char *) &real_ncpus, data, sizeof(real_ncpus)); /* Copy out number of + * processors */ + currNap = mach_absolute_time(); /* Get the time now */ + durNap = currNap - lastRuptClear; /* Get the last interval + * duration */ + if (durNap == 0) { + durNap = 1; /* This is a very short time, make it + * bigger */ + } + curpos = data + sizeof(real_ncpus); /* Point to the next + * available spot */ + + for (i = 0; i < real_ncpus; i++) { /* Move 'em all out */ + (void) copyout((char *) &durNap, curpos, 8); /* Copy out the time + * since last clear */ + (void) copyout((char *) &cpu_data_ptr[i]->cpu_hwIntCnt, curpos + 8, 256 * sizeof(uint32_t)); /* Copy out interrupt + * data for this + * processor */ + curpos = curpos + (256 * sizeof(uint32_t) + 8); /* Point to next out put + * slot */ } rval = 1; (void) ml_set_interrupts_enabled(FALSE); @@ -266,7 +267,7 @@ diagCall64(x86_saved_state_t * state) curpos = regs->rsi + sizeof(pkes); mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_powerstats, NULL); - + for (i = 0; i < real_ncpus; i++) { (void) ml_set_interrupts_enabled(FALSE); @@ -298,10 +299,10 @@ diagCall64(x86_saved_state_t * state) rval = 1; (void) ml_set_interrupts_enabled(FALSE); } - break; - case dgEnaPMC: - { - boolean_t enable = TRUE; + break; + case dgEnaPMC: + { + boolean_t enable = TRUE; uint32_t cpuinfo[4]; /* Require architectural PMC v2 or higher, corresponding to * Merom+, or equivalent virtualised facility. @@ -311,10 +312,10 @@ diagCall64(x86_saved_state_t * state) mp_cpus_call(CPUMASK_ALL, ASYNC, cpu_pmc_control, &enable); diag_pmc_enabled = TRUE; } - rval = 1; - } - break; -#if DEVELOPMENT || DEBUG + rval = 1; + } + break; +#if DEVELOPMENT || DEBUG case dgGzallocTest: { (void) ml_set_interrupts_enabled(TRUE); @@ -329,17 +330,18 @@ diagCall64(x86_saved_state_t * state) #endif #if DEVELOPMENT || DEBUG - case dgPermCheck: + case dgPermCheck: { (void) ml_set_interrupts_enabled(TRUE); - if (diagflag) + if (diagflag) { rval = pmap_permissions_verify(kernel_pmap, kernel_map, 0, ~0ULL); + } (void) ml_set_interrupts_enabled(FALSE); } - break; + break; #endif /* DEVELOPMENT || DEBUG */ - default: /* Handle invalid ones */ - rval = 0; /* Return an exception */ + default: /* Handle invalid ones */ + rval = 0; /* Return an exception */ } regs->rax = rval; @@ -348,7 +350,9 @@ diagCall64(x86_saved_state_t * state) return rval; } -void cpu_powerstats(__unused void *arg) { +void +cpu_powerstats(__unused void *arg) +{ cpu_data_t *cdp = current_cpu_datap(); __unused int cnum = cdp->cpu_number; uint32_t cl = 0, ch = 0, mpl = 0, mph = 0, apl = 0, aph = 0; @@ -395,16 +399,17 @@ void cpu_powerstats(__unused void *arg) { } } -void cpu_pmc_control(void *enablep) { +void +cpu_pmc_control(void *enablep) +{ #if !MONOTONIC boolean_t enable = *(boolean_t *)enablep; - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); if (enable) { wrmsr64(0x38F, 0x70000000FULL); wrmsr64(0x38D, 0x333); set_cr4(get_cr4() | CR4_PCE); - } else { wrmsr64(0x38F, 0); wrmsr64(0x38D, 0); diff --git a/osfmk/i386/Diagnostics.h b/osfmk/i386/Diagnostics.h index 133ddc463..c0496a40a 100644 --- a/osfmk/i386/Diagnostics.h +++ b/osfmk/i386/Diagnostics.h @@ -2,7 +2,7 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,7 +36,7 @@ * Here are the Diagnostic interface interfaces * Lovingly crafted by Bill Angell using traditional methods */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _DIAGNOSTICS_H_ #define _DIAGNOSTICS_H_ @@ -71,15 +71,14 @@ int diagCall64(x86_saved_state_t *regs); #define dgAcntg 20 #define dgKlra 21 #define dgEnaPMC 22 -#define dgWar 23 +#define dgWar 23 #define dgNapStat 24 #define dgRuptStat 25 -#define dgPermCheck 26 +#define dgPermCheck 26 -typedef struct diagWork { /* Diagnostic work area */ - - unsigned int dgLock; /* Lock if needed */ - unsigned int dgFlags; /* Flags */ +typedef struct diagWork { /* Diagnostic work area */ + unsigned int dgLock; /* Lock if needed */ + unsigned int dgFlags; /* Flags */ #define enaExpTrace 0x00000001 #define enaUsrFCall 0x00000002 #define enaUsrPhyMp 0x00000004 @@ -88,14 +87,13 @@ typedef struct diagWork { /* Diagnostic work area */ #define enaDiagEM 0x00000020 #define enaDiagTrap 0x00000040 #define enaNotifyEM 0x00000080 - + unsigned int dgMisc0; unsigned int dgMisc1; unsigned int dgMisc2; unsigned int dgMisc3; unsigned int dgMisc4; unsigned int dgMisc5; - } diagWork; extern diagWork dgWork; @@ -109,11 +107,12 @@ extern diagWork dgWork; #define GPMC2 (2) #define GPMC3 (3) -static inline uint64_t read_pmc(uint32_t counter) +static inline uint64_t +read_pmc(uint32_t counter) { uint32_t lo = 0, hi = 0; - __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)); - return ((((uint64_t)hi) << 32) | ((uint64_t)lo)); + __asm__ volatile ("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)); + return (((uint64_t)hi) << 32) | ((uint64_t)lo); } #endif /* _DIAGNOSTICS_H_ */ diff --git a/osfmk/i386/acpi.c b/osfmk/i386/acpi.c index 5a991c597..10ab92123 100644 --- a/osfmk/i386/acpi.c +++ b/osfmk/i386/acpi.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -73,14 +73,14 @@ #endif /* MONOTONIC */ #if CONFIG_SLEEP -extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); -extern void acpi_wake_prot(void); +extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon); +extern void acpi_wake_prot(void); #endif extern kern_return_t IOCPURunPlatformQuiesceActions(void); extern kern_return_t IOCPURunPlatformActiveActions(void); extern kern_return_t IOCPURunPlatformHaltRestartActions(uint32_t message); -extern void fpinit(void); +extern void fpinit(void); vm_offset_t acpi_install_wake_handler(void) @@ -95,11 +95,11 @@ acpi_install_wake_handler(void) #if CONFIG_SLEEP -unsigned int save_kdebug_enable = 0; -static uint64_t acpi_sleep_abstime; -static uint64_t acpi_idle_abstime; -static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime; -boolean_t deep_idle_rebase = TRUE; +unsigned int save_kdebug_enable = 0; +static uint64_t acpi_sleep_abstime; +static uint64_t acpi_idle_abstime; +static uint64_t acpi_wake_abstime, acpi_wake_postrebase_abstime; +boolean_t deep_idle_rebase = TRUE; #if HIBERNATION struct acpi_hibernate_callback_data { @@ -114,35 +114,32 @@ acpi_hibernate(void *refcon) uint32_t mode; acpi_hibernate_callback_data_t *data = - (acpi_hibernate_callback_data_t *)refcon; + (acpi_hibernate_callback_data_t *)refcon; - if (current_cpu_datap()->cpu_hibernate) - { + if (current_cpu_datap()->cpu_hibernate) { mode = hibernate_write_image(); - if( mode == kIOHibernatePostWriteHalt ) - { + if (mode == kIOHibernatePostWriteHalt) { // off HIBLOG("power off\n"); IOCPURunPlatformHaltRestartActions(kPEHaltCPU); - if (PE_halt_restart) (*PE_halt_restart)(kPEHaltCPU); - } - else if( mode == kIOHibernatePostWriteRestart ) - { + if (PE_halt_restart) { + (*PE_halt_restart)(kPEHaltCPU); + } + } else if (mode == kIOHibernatePostWriteRestart) { // restart HIBLOG("restart\n"); IOCPURunPlatformHaltRestartActions(kPERestartCPU); - if (PE_halt_restart) (*PE_halt_restart)(kPERestartCPU); - } - else - { + if (PE_halt_restart) { + (*PE_halt_restart)(kPERestartCPU); + } + } else { // sleep HIBLOG("sleep\n"); - + // should we come back via regular wake, set the state in memory. - cpu_datap(0)->cpu_hibernate = 0; + cpu_datap(0)->cpu_hibernate = 0; } - } #if CONFIG_VMX @@ -161,8 +158,8 @@ acpi_hibernate(void *refcon) #endif /* HIBERNATION */ #endif /* CONFIG_SLEEP */ -extern void slave_pstart(void); -extern void hibernate_rebuild_vm_structs(void); +extern void slave_pstart(void); +extern void hibernate_rebuild_vm_structs(void); extern unsigned int wake_nkdbufs; extern unsigned int trace_wrap; @@ -175,25 +172,27 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) #endif boolean_t did_hibernate; cpu_data_t *cdp = current_cpu_datap(); - unsigned int cpu; - kern_return_t rc; - unsigned int my_cpu; - uint64_t start; - uint64_t elapsed = 0; - uint64_t elapsed_trace_start = 0; + unsigned int cpu; + kern_return_t rc; + unsigned int my_cpu; + uint64_t start; + uint64_t elapsed = 0; + uint64_t elapsed_trace_start = 0; my_cpu = cpu_number(); kprintf("acpi_sleep_kernel hib=%d, cpu=%d\n", cdp->cpu_hibernate, - my_cpu); + my_cpu); /* Get all CPUs to be in the "off" state */ for (cpu = 0; cpu < real_ncpus; cpu += 1) { - if (cpu == my_cpu) + if (cpu == my_cpu) { continue; + } rc = pmCPUExitHaltToOff(cpu); - if (rc != KERN_SUCCESS) + if (rc != KERN_SUCCESS) { panic("Error %d trying to transition CPU %d to OFF", - rc, cpu); + rc, cpu); + } } /* shutdown local APIC before passing control to firmware */ @@ -219,7 +218,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) /* * Enable FPU/SIMD unit for potential hibernate acceleration */ - clear_ts(); + clear_ts(); KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_START); @@ -261,15 +260,15 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) * for compatibility with firewire kprintf. */ - if (FALSE == disable_serial_output) + if (FALSE == disable_serial_output) { pal_serial_init(); + } #if HIBERNATION if (current_cpu_datap()->cpu_hibernate) { did_hibernate = TRUE; - } else -#endif +#endif { did_hibernate = FALSE; } @@ -296,7 +295,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) #endif #if CONFIG_VMX - /* + /* * Restore VT mode */ vmx_resume(did_hibernate); @@ -310,8 +309,9 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) /* re-enable and re-init local apic (prior to starting timers) */ - if (lapic_probe()) + if (lapic_probe()) { lapic_configure(); + } #if KASAN /* @@ -356,7 +356,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) #endif /* HIBERNATION */ KDBG(IOKDBG_CODE(DBG_HIBERNATE, 0) | DBG_FUNC_END, start, elapsed, - elapsed_trace_start, acpi_wake_abstime); + elapsed_trace_start, acpi_wake_abstime); /* Restore power management register state */ pmCPUMarkRunning(current_cpu_datap()); @@ -374,7 +374,7 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) #if CONFIG_SLEEP /* Becase we don't save the bootstrap page, and we share it - * between sleep and mp slave init, we need to recreate it + * between sleep and mp slave init, we need to recreate it * after coming back from sleep or hibernate */ install_real_mode_bootstrap(slave_pstart); #endif @@ -395,10 +395,10 @@ acpi_sleep_kernel(acpi_sleep_callback func, void *refcon) void acpi_idle_kernel(acpi_sleep_callback func, void *refcon) { - boolean_t istate = ml_get_interrupts_enabled(); - + boolean_t istate = ml_get_interrupts_enabled(); + kprintf("acpi_idle_kernel, cpu=%d, interrupts %s\n", - cpu_number(), istate ? "enabled" : "disabled"); + cpu_number(), istate ? "enabled" : "disabled"); assert(cpu_number() == master_cpu); @@ -410,16 +410,16 @@ acpi_idle_kernel(acpi_sleep_callback func, void *refcon) ml_set_interrupts_enabled(FALSE); } - if (current_cpu_datap()->cpu_hibernate) { - /* Call hibernate_write_image() to put disk to low power state */ - hibernate_write_image(); - cpu_datap(0)->cpu_hibernate = 0; - } + if (current_cpu_datap()->cpu_hibernate) { + /* Call hibernate_write_image() to put disk to low power state */ + hibernate_write_image(); + cpu_datap(0)->cpu_hibernate = 0; + } /* * Call back to caller to indicate that interrupts will remain * disabled while we deep idle, wake and return. - */ + */ IOCPURunPlatformQuiesceActions(); func(refcon); @@ -450,7 +450,7 @@ acpi_idle_kernel(acpi_sleep_callback func, void *refcon) * Get wakeup time relative to the TSC which has progressed. * Then rebase nanotime to reflect time not progressing over sleep * - unless overriden so that tracing can occur during deep_idle. - */ + */ acpi_wake_abstime = mach_absolute_time(); if (deep_idle_rebase) { rtc_sleep_wakeup(acpi_idle_abstime); @@ -462,8 +462,8 @@ acpi_idle_kernel(acpi_sleep_callback func, void *refcon) KERNEL_DEBUG_CONSTANT( MACHDBG_CODE(DBG_MACH_SCHED, MACH_DEEP_IDLE) | DBG_FUNC_END, acpi_wake_abstime, acpi_wake_abstime - acpi_idle_abstime, 0, 0, 0); - - /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ + + /* Like S3 sleep, turn on tracing if trace_wake boot-arg is present */ if (kdebug_enable == 0) { if (wake_nkdbufs) { __kdebug_only uint64_t start = mach_absolute_time(); @@ -492,24 +492,25 @@ install_real_mode_bootstrap(void *prot_entry) * mode and then jumping to the common startup, _start(). */ bcopy_phys(kvtophys((vm_offset_t) real_mode_bootstrap_base), - (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, - real_mode_bootstrap_end-real_mode_bootstrap_base); + (addr64_t) REAL_MODE_BOOTSTRAP_OFFSET, + real_mode_bootstrap_end - real_mode_bootstrap_base); /* * Set the location at the base of the stack to point to the * common startup entry. */ ml_phys_write_word( - PROT_MODE_START+REAL_MODE_BOOTSTRAP_OFFSET, + PROT_MODE_START + REAL_MODE_BOOTSTRAP_OFFSET, (unsigned int)kvtophys((vm_offset_t)prot_entry)); - + /* Flush caches */ __asm__("wbinvd"); } boolean_t -ml_recent_wake(void) { +ml_recent_wake(void) +{ uint64_t ctime = mach_absolute_time(); assert(ctime > acpi_wake_postrebase_abstime); - return ((ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC); + return (ctime - acpi_wake_postrebase_abstime) < 5 * NSEC_PER_SEC; } diff --git a/osfmk/i386/acpi.h b/osfmk/i386/acpi.h index ed8fdd08e..5d911b089 100644 --- a/osfmk/i386/acpi.h +++ b/osfmk/i386/acpi.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,12 +42,12 @@ #define PROT_MODE_START 0x800 #define REAL_MODE_BOOTSTRAP_OFFSET 0x2000 -#ifndef ASSEMBLER +#ifndef ASSEMBLER typedef void (*acpi_sleep_callback)(void * refcon); extern vm_offset_t acpi_install_wake_handler(void); -extern void acpi_sleep_kernel(acpi_sleep_callback func, void * refcon); -extern void acpi_idle_kernel(acpi_sleep_callback func, void * refcon); +extern void acpi_sleep_kernel(acpi_sleep_callback func, void * refcon); +extern void acpi_idle_kernel(acpi_sleep_callback func, void * refcon); void install_real_mode_bootstrap(void *prot_entry); -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ #endif /* !_I386_ACPI_H_ */ diff --git a/osfmk/i386/apic.h b/osfmk/i386/apic.h index 971e1d092..b8c6d572d 100644 --- a/osfmk/i386/apic.h +++ b/osfmk/i386/apic.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,40 +22,39 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ #ifndef _I386_APIC_H_ #define _I386_APIC_H_ -#define IOAPIC_START 0xFEC00000 -#define IOAPIC_SIZE 0x00000020 +#define IOAPIC_START 0xFEC00000 +#define IOAPIC_SIZE 0x00000020 -#define IOAPIC_RSELECT 0x00000000 -#define IOAPIC_RWINDOW 0x00000010 -#define IOA_R_ID 0x00 -#define IOA_R_ID_SHIFT 24 -#define IOA_R_VERSION 0x01 -#define IOA_R_VERSION_MASK 0xFF -#define IOA_R_VERSION_ME_SHIFT 16 -#define IOA_R_VERSION_ME_MASK 0xFF -#define IOA_R_REDIRECTION 0x10 -#define IOA_R_R_VECTOR_MASK 0x000FF -#define IOA_R_R_DM_MASK 0x00700 -#define IOA_R_R_DM_FIXED 0x00000 -#define IOA_R_R_DM_LOWEST 0x00100 -#define IOA_R_R_DM_NMI 0x00400 -#define IOA_R_R_DM_RESET 0x00500 -#define IOA_R_R_DM_EXTINT 0x00700 -#define IOA_R_R_DEST_LOGICAL 0x00800 -#define IOA_R_R_DS_PENDING 0x01000 -#define IOA_R_R_IP_PLRITY_LOW 0x02000 -#define IOA_R_R_TM_LEVEL 0x08000 -#define IOA_R_R_MASKED 0x10000 +#define IOAPIC_RSELECT 0x00000000 +#define IOAPIC_RWINDOW 0x00000010 +#define IOA_R_ID 0x00 +#define IOA_R_ID_SHIFT 24 +#define IOA_R_VERSION 0x01 +#define IOA_R_VERSION_MASK 0xFF +#define IOA_R_VERSION_ME_SHIFT 16 +#define IOA_R_VERSION_ME_MASK 0xFF +#define IOA_R_REDIRECTION 0x10 +#define IOA_R_R_VECTOR_MASK 0x000FF +#define IOA_R_R_DM_MASK 0x00700 +#define IOA_R_R_DM_FIXED 0x00000 +#define IOA_R_R_DM_LOWEST 0x00100 +#define IOA_R_R_DM_NMI 0x00400 +#define IOA_R_R_DM_RESET 0x00500 +#define IOA_R_R_DM_EXTINT 0x00700 +#define IOA_R_R_DEST_LOGICAL 0x00800 +#define IOA_R_R_DS_PENDING 0x01000 +#define IOA_R_R_IP_PLRITY_LOW 0x02000 +#define IOA_R_R_TM_LEVEL 0x08000 +#define IOA_R_R_MASKED 0x10000 #endif /* _I386_APIC_H_ */ - diff --git a/osfmk/i386/arch_types.h b/osfmk/i386/arch_types.h index 832a3096f..85ef99901 100644 --- a/osfmk/i386/arch_types.h +++ b/osfmk/i386/arch_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ #ifndef _MACHINE_ARCH_TYPES_H_ diff --git a/osfmk/i386/atomic.h b/osfmk/i386/atomic.h index 940e5fcf2..75ce5c5a9 100644 --- a/osfmk/i386/atomic.h +++ b/osfmk/i386/atomic.h @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,7 +31,7 @@ #include -#if __SMP__ +#if __SMP__ #define memory_order_consume_smp memory_order_consume #define memory_order_acquire_smp memory_order_acquire @@ -53,7 +53,7 @@ static inline boolean_t atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval, - enum memory_order ord, boolean_t wait) + enum memory_order ord, boolean_t wait) { (void)wait; return __c11_atomic_compare_exchange_strong((_Atomic uintptr_t *)target, &oldval, newval, ord, memory_order_relaxed); @@ -61,7 +61,7 @@ atomic_compare_exchange(uintptr_t *target, uintptr_t oldval, uintptr_t newval, static inline boolean_t atomic_compare_exchange32(uint32_t *target, uint32_t oldval, uint32_t newval, - enum memory_order ord, boolean_t wait) + enum memory_order ord, boolean_t wait) { (void)wait; return __c11_atomic_compare_exchange_strong((_Atomic uint32_t *)target, &oldval, newval, ord, memory_order_relaxed); diff --git a/osfmk/i386/bit_routines.h b/osfmk/i386/bit_routines.h index 2154ec763..f3b279ae4 100644 --- a/osfmk/i386/bit_routines.h +++ b/osfmk/i386/bit_routines.h @@ -106,15 +106,6 @@ : \ "r" (bit), "m" (*(volatile int *)(l))); -static inline char xchgb(volatile char * cp, char new) -{ - char old = new; - - __asm__ volatile (" xchgb %0,%2" : - "=q" (old) : - "0" (new), "m" (*(volatile char *)cp) : "memory"); - return (old); -} static inline void atomic_incl(volatile long * p, long delta) { @@ -124,22 +115,6 @@ static inline void atomic_incl(volatile long * p, long delta) "r" (delta), "m" (*(volatile long *)p)); } -static inline void atomic_incs(volatile short * p, short delta) -{ - __asm__ volatile (" lock \n \ - addw %0,%1" : \ - : \ - "q" (delta), "m" (*(volatile short *)p)); -} - -static inline void atomic_incb(volatile char * p, char delta) -{ - __asm__ volatile (" lock \n \ - addb %0,%1" : \ - : \ - "q" (delta), "m" (*(volatile char *)p)); -} - static inline void atomic_decl(volatile long * p, long delta) { __asm__ volatile (" lock \n \ @@ -159,53 +134,6 @@ static inline int atomic_decl_and_test(volatile long * p, long delta) : "r" (delta), "m" (*(volatile long *)p)); return ret; } - -static inline void atomic_decs(volatile short * p, short delta) -{ - __asm__ volatile (" lock \n \ - subw %0,%1" : \ - : \ - "q" (delta), "m" (*(volatile short *)p)); -} - -static inline void atomic_decb(volatile char * p, char delta) -{ - __asm__ volatile (" lock \n \ - subb %0,%1" : \ - : \ - "q" (delta), "m" (*(volatile char *)p)); -} - -static inline long atomic_getl(const volatile long * p) -{ - return (*p); -} - -static inline short atomic_gets(const volatile short * p) -{ - return (*p); -} - -static inline char atomic_getb(const volatile char * p) -{ - return (*p); -} - -static inline void atomic_setl(volatile long * p, long value) -{ - *p = value; -} - -static inline void atomic_sets(volatile short * p, short value) -{ - *p = value; -} - -static inline void atomic_setb(volatile char * p, char value) -{ - *p = value; -} - #endif /* MACH_KERNEL_PRIVATE */ #endif /* _I386_BIT_ROUTINES_H_ */ diff --git a/osfmk/i386/bsd_i386.c b/osfmk/i386/bsd_i386.c index 805cbc1de..a1d3b4965 100644 --- a/osfmk/i386/bsd_i386.c +++ b/osfmk/i386/bsd_i386.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef MACH_BSD +#ifdef MACH_BSD #include #include @@ -69,7 +69,7 @@ #include <../bsd/sys/sysent.h> #ifdef MACH_BSD -extern void mach_kauth_cred_uthread_update(void); +extern void mach_kauth_cred_uthread_update(void); extern void throttle_lowpri_io(int); #endif @@ -89,60 +89,67 @@ unsigned int get_msr_rbits(void); */ kern_return_t thread_userstack( - __unused thread_t thread, - int flavor, - thread_state_t tstate, - __unused unsigned int count, - mach_vm_offset_t *user_stack, - int *customstack, - __unused boolean_t is64bit -) + __unused thread_t thread, + int flavor, + thread_state_t tstate, + __unused unsigned int count, + mach_vm_offset_t *user_stack, + int *customstack, + __unused boolean_t is64bit + ) { - if (customstack) + if (customstack) { *customstack = 0; + } switch (flavor) { case x86_THREAD_STATE32: - { - x86_thread_state32_t *state25; + { + x86_thread_state32_t *state25; - state25 = (x86_thread_state32_t *) tstate; + state25 = (x86_thread_state32_t *) tstate; - if (state25->esp) { - *user_stack = state25->esp; - if (customstack) - *customstack = 1; - } else { - *user_stack = VM_USRSTACK32; - if (customstack) - *customstack = 0; + if (state25->esp) { + *user_stack = state25->esp; + if (customstack) { + *customstack = 1; + } + } else { + *user_stack = VM_USRSTACK32; + if (customstack) { + *customstack = 0; } - break; } + break; + } + case x86_THREAD_FULL_STATE64: + /* FALL THROUGH */ case x86_THREAD_STATE64: - { - x86_thread_state64_t *state25; + { + x86_thread_state64_t *state25; - state25 = (x86_thread_state64_t *) tstate; + state25 = (x86_thread_state64_t *) tstate; - if (state25->rsp) { - *user_stack = state25->rsp; - if (customstack) - *customstack = 1; - } else { - *user_stack = VM_USRSTACK64; - if (customstack) - *customstack = 0; + if (state25->rsp) { + *user_stack = state25->rsp; + if (customstack) { + *customstack = 1; + } + } else { + *user_stack = VM_USRSTACK64; + if (customstack) { + *customstack = 0; } - break; } + break; + } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -161,47 +168,48 @@ thread_userstackdefault( } else { *default_user_stack = VM_USRSTACK32; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t thread_entrypoint( - __unused thread_t thread, - int flavor, - thread_state_t tstate, - __unused unsigned int count, - mach_vm_offset_t *entry_point -) -{ + __unused thread_t thread, + int flavor, + thread_state_t tstate, + __unused unsigned int count, + mach_vm_offset_t *entry_point + ) +{ /* * Set a default. */ - if (*entry_point == 0) + if (*entry_point == 0) { *entry_point = VM_MIN_ADDRESS; + } switch (flavor) { case x86_THREAD_STATE32: - { - x86_thread_state32_t *state25; + { + x86_thread_state32_t *state25; - state25 = (i386_thread_state_t *) tstate; - *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS; - break; - } + state25 = (i386_thread_state_t *) tstate; + *entry_point = state25->eip ? state25->eip : VM_MIN_ADDRESS; + break; + } case x86_THREAD_STATE64: - { - x86_thread_state64_t *state25; + { + x86_thread_state64_t *state25; - state25 = (x86_thread_state64_t *) tstate; - *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64; - break; - } + state25 = (x86_thread_state64_t *) tstate; + *entry_point = state25->rip ? state25->rip : VM_MIN_ADDRESS64; + break; + } } - return (KERN_SUCCESS); + return KERN_SUCCESS; } -/* +/* * FIXME - thread_set_child */ @@ -212,7 +220,7 @@ thread_set_child(thread_t child, int pid) pal_register_cache_state(child, DIRTY); if (thread_is_64bit_addr(child)) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(child); @@ -220,7 +228,7 @@ thread_set_child(thread_t child, int pid) iss64->rdx = 1; iss64->isf.rflags &= ~EFL_CF; } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(child); @@ -242,15 +250,15 @@ __attribute__((noreturn)) void machdep_syscall(x86_saved_state_t *state) { - int args[machdep_call_count]; - int trapno; - int nargs; - const machdep_call_t *entry; - x86_saved_state32_t *regs; + int args[machdep_call_count]; + int trapno; + int nargs; + const machdep_call_t *entry; + x86_saved_state32_t *regs; assert(is_saved_state32(state)); regs = saved_state32(state); - + trapno = regs->eax; #if DEBUG_TRACE kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno); @@ -269,8 +277,8 @@ machdep_syscall(x86_saved_state_t *state) nargs = entry->nargs; if (nargs != 0) { - if (copyin((user_addr_t) regs->uesp + sizeof (int), - (char *) args, (nargs * sizeof (int)))) { + if (copyin((user_addr_t) regs->uesp + sizeof(int), + (char *) args, (nargs * sizeof(int)))) { regs->eax = KERN_INVALID_ADDRESS; thread_exception_return(); @@ -285,19 +293,19 @@ machdep_syscall(x86_saved_state_t *state) regs->eax = (*entry->routine.args_1)(args[0]); break; case 2: - regs->eax = (*entry->routine.args_2)(args[0],args[1]); + regs->eax = (*entry->routine.args_2)(args[0], args[1]); break; case 3: - if (!entry->bsd_style) - regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]); - else { - int error; - uint32_t rval; + if (!entry->bsd_style) { + regs->eax = (*entry->routine.args_3)(args[0], args[1], args[2]); + } else { + int error; + uint32_t rval; error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]); if (error) { regs->eax = error; - regs->efl |= EFL_CF; /* carry bit */ + regs->efl |= EFL_CF; /* carry bit */ } else { regs->eax = rval; regs->efl &= ~EFL_CF; @@ -330,13 +338,13 @@ __attribute__((noreturn)) void machdep_syscall64(x86_saved_state_t *state) { - int trapno; - const machdep_call_t *entry; - x86_saved_state64_t *regs; + int trapno; + const machdep_call_t *entry; + x86_saved_state64_t *regs; assert(is_saved_state64(state)); regs = saved_state64(state); - + trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK); DEBUG_KPRINT_SYSCALL_MDEP( @@ -360,6 +368,23 @@ machdep_syscall64(x86_saved_state_t *state) case 2: regs->rax = (*entry->routine.args64_2)(regs->rdi, regs->rsi); break; + case 3: + if (!entry->bsd_style) { + regs->rax = (*entry->routine.args64_3)(regs->rdi, regs->rsi, regs->rdx); + } else { + int error; + uint32_t rval; + + error = (*entry->routine.args64_bsd_3)(&rval, regs->rdi, regs->rsi, regs->rdx); + if (error) { + regs->rax = (uint64_t)error; + regs->isf.rflags |= EFL_CF; /* carry bit */ + } else { + regs->rax = rval; + regs->isf.rflags &= ~(uint64_t)EFL_CF; + } + } + break; default: panic("machdep_syscall64: too many args"); } @@ -378,7 +403,7 @@ machdep_syscall64(x86_saved_state_t *state) /* NOTREACHED */ } -#endif /* MACH_BSD */ +#endif /* MACH_BSD */ typedef kern_return_t (*mach_call_t)(void *); @@ -402,8 +427,9 @@ mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap static kern_return_t mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp) { - if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int))) + if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof(int))) { return KERN_INVALID_ARGUMENT; + } #if CONFIG_REQUIRES_U32_MUNGING trapp->mach_trap_arg_munge32(args); #else @@ -426,7 +452,7 @@ mach_call_munger(x86_saved_state_t *state) mach_call_t mach_call; kern_return_t retval; struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - x86_saved_state32_t *regs; + x86_saved_state32_t *regs; struct uthread *ut = get_bsdthread_info(current_thread()); uthread_reset_proc_refcount(ut); @@ -458,7 +484,7 @@ mach_call_munger(x86_saved_state_t *state) argc = mach_trap_table[call_number].mach_trap_arg_count; if (argc) { - retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]); + retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]); if (retval != KERN_SUCCESS) { regs->eax = retval; @@ -475,16 +501,16 @@ mach_call_munger(x86_saved_state_t *state) #endif KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, - args.arg1, args.arg2, args.arg3, args.arg4, 0); + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, + args.arg1, args.arg2, args.arg3, args.arg4, 0); retval = mach_call(&args); DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, - retval, 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, + retval, 0, 0, 0, 0); regs->eax = retval; @@ -517,7 +543,7 @@ mach_call_munger64(x86_saved_state_t *state) int argc; mach_call_t mach_call; struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - x86_saved_state64_t *regs; + x86_saved_state64_t *regs; struct uthread *ut = get_bsdthread_info(current_thread()); uthread_reset_proc_refcount(ut); @@ -531,18 +557,18 @@ mach_call_munger64(x86_saved_state_t *state) "mach_call_munger64: code=%d(%s)\n", call_number, mach_syscall_name_table[call_number]); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START, - regs->rdi, regs->rsi, regs->rdx, regs->r10, 0); - + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, + regs->rdi, regs->rsi, regs->rdx, regs->r10, 0); + if (call_number < 0 || call_number >= mach_trap_count) { - i386_exception(EXC_SYSCALL, regs->rax, 1); + i386_exception(EXC_SYSCALL, regs->rax, 1); /* NOTREACHED */ } mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function; if (mach_call == (mach_call_t)kern_invalid) { - i386_exception(EXC_SYSCALL, regs->rax, 1); + i386_exception(EXC_SYSCALL, regs->rax, 1); /* NOTREACHED */ } argc = mach_trap_table[call_number].mach_trap_arg_count; @@ -552,14 +578,14 @@ mach_call_munger64(x86_saved_state_t *state) memcpy(&args.arg1, ®s->rdi, args_in_regs * sizeof(syscall_arg_t)); if (argc > 6) { - int copyin_count; + int copyin_count; assert(argc <= 9); copyin_count = (argc - 6) * (int)sizeof(syscall_arg_t); - if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) { - regs->rax = KERN_INVALID_ARGUMENT; - + if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)&args.arg7, copyin_count)) { + regs->rax = KERN_INVALID_ARGUMENT; + thread_exception_return(); /* NOTREACHED */ } @@ -571,12 +597,12 @@ mach_call_munger64(x86_saved_state_t *state) #endif regs->rax = (uint64_t)mach_call((void *)&args); - + DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, - regs->rax, 0, 0, 0, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_END, + regs->rax, 0, 0, 0, 0); #if DEBUG || DEVELOPMENT kern_allocation_name_t @@ -605,18 +631,18 @@ mach_call_munger64(x86_saved_state_t *state) */ void thread_setuserstack( - thread_t thread, - mach_vm_address_t user_stack) + thread_t thread, + mach_vm_address_t user_stack) { pal_register_cache_state(thread, DIRTY); if (thread_is_64bit_addr(thread)) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); iss64->isf.rsp = (uint64_t)user_stack; } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); @@ -632,12 +658,12 @@ thread_setuserstack( */ uint64_t thread_adjuserstack( - thread_t thread, - int adjust) + thread_t thread, + int adjust) { pal_register_cache_state(thread, DIRTY); if (thread_is_64bit_addr(thread)) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); @@ -645,7 +671,7 @@ thread_adjuserstack( return iss64->isf.rsp; } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); @@ -666,13 +692,13 @@ thread_setentrypoint(thread_t thread, mach_vm_address_t entry) { pal_register_cache_state(thread, DIRTY); if (thread_is_64bit_addr(thread)) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); iss64->isf.rip = (uint64_t)entry; } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); @@ -686,37 +712,39 @@ thread_setsinglestep(thread_t thread, int on) { pal_register_cache_state(thread, DIRTY); if (thread_is_64bit_addr(thread)) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); - if (on) + if (on) { iss64->isf.rflags |= EFL_TF; - else + } else { iss64->isf.rflags &= ~EFL_TF; + } } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(thread); if (on) { iss32->efl |= EFL_TF; /* Ensure IRET */ - if (iss32->cs == SYSENTER_CS) + if (iss32->cs == SYSENTER_CS) { iss32->cs = SYSENTER_TF_CS; - } - else + } + } else { iss32->efl &= ~EFL_TF; + } } - - return (KERN_SUCCESS); + + return KERN_SUCCESS; } void * get_user_regs(thread_t th) { pal_register_cache_state(th, DIRTY); - return(USER_STATE(th)); + return USER_STATE(th); } void * @@ -734,11 +762,10 @@ x86_saved_state_t *find_kern_regs(thread_t); x86_saved_state_t * find_kern_regs(thread_t thread) { - if (thread == current_thread() && - NULL != current_cpu_datap()->cpu_int_state && - !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state && - current_cpu_datap()->cpu_interrupt_level == 1)) { - + if (thread == current_thread() && + NULL != current_cpu_datap()->cpu_int_state && + !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state && + current_cpu_datap()->cpu_interrupt_level == 1)) { return current_cpu_datap()->cpu_int_state; } else { return NULL; diff --git a/osfmk/i386/bsd_i386_native.c b/osfmk/i386/bsd_i386_native.c index 6b8e1d124..541ec6a72 100644 --- a/osfmk/i386/bsd_i386_native.c +++ b/osfmk/i386/bsd_i386_native.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -74,22 +74,22 @@ */ kern_return_t machine_thread_dup( - thread_t parent, - thread_t child, - __unused boolean_t is_corpse -) + thread_t parent, + thread_t child, + __unused boolean_t is_corpse + ) { - - pcb_t parent_pcb = THREAD_TO_PCB(parent); - pcb_t child_pcb = THREAD_TO_PCB(child); + pcb_t parent_pcb = THREAD_TO_PCB(parent); + pcb_t child_pcb = THREAD_TO_PCB(child); /* * Copy over the x86_saved_state registers */ - if (thread_is_64bit_addr(parent)) + if (thread_is_64bit_addr(parent)) { bcopy(USER_REGS64(parent), USER_REGS64(child), sizeof(x86_saved_state64_t)); - else + } else { bcopy(USER_REGS32(parent), USER_REGS32(child), sizeof(x86_saved_state32_t)); + } /* * Check to see if parent is using floating point @@ -97,26 +97,27 @@ machine_thread_dup( */ fpu_dup_fxstate(parent, child); -#ifdef MACH_BSD +#ifdef MACH_BSD /* * Copy the parent's cthread id and USER_CTHREAD descriptor, if 32-bit. */ child_pcb->cthread_self = parent_pcb->cthread_self; - if (!thread_is_64bit_addr(parent)) + if (!thread_is_64bit_addr(parent)) { child_pcb->cthread_desc = parent_pcb->cthread_desc; + } /* * FIXME - should a user specified LDT, TSS and V86 info * be duplicated as well?? - probably not. */ // duplicate any use LDT entry that was set I think this is appropriate. - if (parent_pcb->uldt_selector!= 0) { - child_pcb->uldt_selector = parent_pcb->uldt_selector; + if (parent_pcb->uldt_selector != 0) { + child_pcb->uldt_selector = parent_pcb->uldt_selector; child_pcb->uldt_desc = parent_pcb->uldt_desc; } #endif - return (KERN_SUCCESS); + return KERN_SUCCESS; } void thread_set_parent(thread_t parent, int pid); @@ -127,7 +128,7 @@ thread_set_parent(thread_t parent, int pid) pal_register_cache_state(parent, DIRTY); if (thread_is_64bit_addr(parent)) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(parent); @@ -135,7 +136,7 @@ thread_set_parent(thread_t parent, int pid) iss64->rdx = 0; iss64->isf.rflags &= ~EFL_CF; } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(parent); @@ -150,7 +151,7 @@ thread_set_parent(thread_t parent, int pid) * current thread to the given thread ID; fast version for 32-bit processes * * Parameters: self Thread ID to set - * + * * Returns: 0 Success * !0 Not success */ @@ -158,15 +159,15 @@ kern_return_t thread_fast_set_cthread_self(uint32_t self) { machine_thread_set_tsd_base(current_thread(), self); - return (USER_CTHREAD); /* N.B.: not a kern_return_t! */ + return USER_CTHREAD; /* N.B.: not a kern_return_t! */ } /* * thread_fast_set_cthread_self64: Sets the machine kernel thread ID of the - * current thread to the given thread ID; fast version for 64-bit processes + * current thread to the given thread ID; fast version for 64-bit processes * * Parameters: self Thread ID - * + * * Returns: 0 Success * !0 Not success */ @@ -174,7 +175,7 @@ kern_return_t thread_fast_set_cthread_self64(uint64_t self) { machine_thread_set_tsd_base(current_thread(), self); - return (USER_CTHREAD); /* N.B.: not a kern_return_t! */ + return USER_CTHREAD; /* N.B.: not a kern_return_t! */ } /* @@ -211,11 +212,12 @@ thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags) pcb_t pcb; struct fake_descriptor temp; - if (flags != 0) - return -1; // flags not supported - if (size > 0xFFFFF) - return -1; // size too big, 1 meg is the limit - + if (flags != 0) { + return -1; // flags not supported + } + if (size > 0xFFFFF) { + return -1; // size too big, 1 meg is the limit + } mp_disable_preemption(); // create a "fake" descriptor so we can use fix_desc() @@ -226,14 +228,14 @@ thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags) temp.offset = address; temp.lim_or_seg = size; temp.size_or_wdct = SZ_32; - temp.access = ACC_P|ACC_PL_U|ACC_DATA_W; + temp.access = ACC_P | ACC_PL_U | ACC_DATA_W; // turn this into a real descriptor - fix_desc(&temp,1); + fix_desc(&temp, 1); // set up our data in the pcb pcb->uldt_desc = *(struct real_descriptor*)&temp; - pcb->uldt_selector = USER_SETTABLE; // set the selector value + pcb->uldt_selector = USER_SETTABLE; // set the selector value // now set it up in the current table... *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp; diff --git a/osfmk/i386/commpage/commpage.c b/osfmk/i386/commpage/commpage.c index 8d93e9456..67f1ea084 100644 --- a/osfmk/i386/commpage/commpage.c +++ b/osfmk/i386/commpage/commpage.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2003-2010 Apple Inc. All rights reserved. + * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -76,85 +76,88 @@ #endif /* the lists of commpage routines are in commpage_asm.s */ -extern commpage_descriptor* commpage_32_routines[]; -extern commpage_descriptor* commpage_64_routines[]; +extern commpage_descriptor* commpage_32_routines[]; +extern commpage_descriptor* commpage_64_routines[]; -extern vm_map_t commpage32_map; // the shared submap, set up in vm init -extern vm_map_t commpage64_map; // the shared submap, set up in vm init -extern vm_map_t commpage_text32_map; // the shared submap, set up in vm init -extern vm_map_t commpage_text64_map; // the shared submap, set up in vm init +extern vm_map_t commpage32_map; // the shared submap, set up in vm init +extern vm_map_t commpage64_map; // the shared submap, set up in vm init +extern vm_map_t commpage_text32_map; // the shared submap, set up in vm init +extern vm_map_t commpage_text64_map; // the shared submap, set up in vm init -char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage -char *commPagePtr64 = NULL; // ...and of 64-bit commpage -char *commPageTextPtr32 = NULL; // virtual addr in kernel map of 32-bit commpage -char *commPageTextPtr64 = NULL; // ...and of 64-bit commpage +char *commPagePtr32 = NULL; // virtual addr in kernel map of 32-bit commpage +char *commPagePtr64 = NULL; // ...and of 64-bit commpage +char *commPageTextPtr32 = NULL; // virtual addr in kernel map of 32-bit commpage +char *commPageTextPtr64 = NULL; // ...and of 64-bit commpage uint64_t _cpu_capabilities = 0; // define the capability vector typedef uint32_t commpage_address_t; -static commpage_address_t next; // next available address in comm page +static commpage_address_t next; // next available address in comm page -static char *commPagePtr; // virtual addr in kernel map of commpage we are working on -static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map +static char *commPagePtr; // virtual addr in kernel map of commpage we are working on +static commpage_address_t commPageBaseOffset; // subtract from 32-bit runtime address to get offset in virtual commpage in kernel map -static commpage_time_data *time_data32 = NULL; -static commpage_time_data *time_data64 = NULL; +static commpage_time_data *time_data32 = NULL; +static commpage_time_data *time_data64 = NULL; static new_commpage_timeofday_data_t *gtod_time_data32 = NULL; static new_commpage_timeofday_data_t *gtod_time_data64 = NULL; -decl_simple_lock_data(static,commpage_active_cpus_lock); +decl_simple_lock_data(static, commpage_active_cpus_lock); /* Allocate the commpage and add to the shared submap created by vm: - * 1. allocate a page in the kernel map (RW) + * 1. allocate a page in the kernel map (RW) * 2. wire it down * 3. make a memory entry out of it * 4. map that entry into the shared comm region map (R-only) */ static void* -commpage_allocate( - vm_map_t submap, // commpage32_map or commpage_map64 - size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED - vm_prot_t uperm) +commpage_allocate( + vm_map_t submap, // commpage32_map or commpage_map64 + size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED + vm_prot_t uperm) { - vm_offset_t kernel_addr = 0; // address of commpage in kernel map - vm_offset_t zero = 0; - vm_size_t size = area_used; // size actually populated - vm_map_entry_t entry; - ipc_port_t handle; - kern_return_t kr; + vm_offset_t kernel_addr = 0; // address of commpage in kernel map + vm_offset_t zero = 0; + vm_size_t size = area_used; // size actually populated + vm_map_entry_t entry; + ipc_port_t handle; + kern_return_t kr; vm_map_kernel_flags_t vmk_flags; - if (submap == NULL) + if (submap == NULL) { panic("commpage submap is null"); + } kr = vm_map_kernel(kernel_map, - &kernel_addr, - area_used, - 0, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_OSFMK, - NULL, - 0, - FALSE, - VM_PROT_ALL, - VM_PROT_ALL, - VM_INHERIT_NONE); - if (kr != KERN_SUCCESS) + &kernel_addr, + area_used, + 0, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_OSFMK, + NULL, + 0, + FALSE, + VM_PROT_ALL, + VM_PROT_ALL, + VM_INHERIT_NONE); + if (kr != KERN_SUCCESS) { panic("cannot allocate commpage %d", kr); + } if ((kr = vm_map_wire_kernel(kernel_map, - kernel_addr, - kernel_addr+area_used, - VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, - FALSE))) + kernel_addr, + kernel_addr + area_used, + VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, + FALSE))) { panic("cannot wire commpage: %d", kr); + } - /* + /* * Now that the object is created and wired into the kernel map, mark it so that no delay * copy-on-write will ever be performed on it as a result of mapping it into user-space. * If such a delayed copy ever occurred, we could remove the kernel's wired mapping - and @@ -162,17 +165,19 @@ commpage_allocate( * * JMM - What we really need is a way to create it like this in the first place. */ - if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map)) + if (!(kr = vm_map_lookup_entry( kernel_map, vm_map_trunc_page(kernel_addr, VM_MAP_PAGE_MASK(kernel_map)), &entry) || entry->is_sub_map)) { panic("cannot find commpage entry %d", kr); + } VME_OBJECT(entry)->copy_strategy = MEMORY_OBJECT_COPY_NONE; - if ((kr = mach_make_memory_entry( kernel_map, // target map - &size, // size - kernel_addr, // offset (address in kernel map) - uperm, // protections as specified - &handle, // this is the object handle we get - NULL ))) // parent_entry (what is this?) + if ((kr = mach_make_memory_entry( kernel_map, // target map + &size, // size + kernel_addr, // offset (address in kernel map) + uperm, // protections as specified + &handle, // this is the object handle we get + NULL ))) { // parent_entry (what is this?) panic("cannot make entry for commpage %d", kr); + } vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; if (uperm == (VM_PROT_READ | VM_PROT_EXECUTE)) { @@ -185,21 +190,22 @@ commpage_allocate( } kr = vm_map_64_kernel( - submap, // target map (shared submap) - &zero, // address (map into 1st page in submap) - area_used, // size - 0, // mask - VM_FLAGS_FIXED, // flags (it must be 1st page in submap) + submap, // target map (shared submap) + &zero, // address (map into 1st page in submap) + area_used, // size + 0, // mask + VM_FLAGS_FIXED, // flags (it must be 1st page in submap) vmk_flags, VM_KERN_MEMORY_NONE, - handle, // port is the memory entry we just made - 0, // offset (map 1st page in memory entry) - FALSE, // copy - uperm, // cur_protection (R-only in user map) - uperm, // max_protection - VM_INHERIT_SHARE); // inheritance - if (kr != KERN_SUCCESS) + handle, // port is the memory entry we just made + 0, // offset (map 1st page in memory entry) + FALSE, // copy + uperm, // cur_protection (R-only in user map) + uperm, // max_protection + VM_INHERIT_SHARE); // inheritance + if (kr != KERN_SUCCESS) { panic("cannot map commpage %d", kr); + } ipc_port_release(handle); /* Make the kernel mapping non-executable. This cannot be done @@ -207,7 +213,7 @@ commpage_allocate( * cannot handle disjoint permissions at this time. */ kr = vm_protect(kernel_map, kernel_addr, area_used, FALSE, VM_PROT_READ | VM_PROT_WRITE); - assert (kr == KERN_SUCCESS); + assert(kr == KERN_SUCCESS); return (void*)(intptr_t)kernel_addr; // return address in kernel map } @@ -216,9 +222,23 @@ commpage_allocate( static void* commpage_addr_of( - commpage_address_t addr_at_runtime ) + commpage_address_t addr_at_runtime ) { - return (void*) ((uintptr_t)commPagePtr + (addr_at_runtime - commPageBaseOffset)); + return (void*) ((uintptr_t)commPagePtr + (addr_at_runtime - commPageBaseOffset)); +} + +/* + * Calculate address of data within 32- and 64-bit commpages (not to be used with commpage + * text). + */ +static void* +commpage_specific_addr_of(char *commPageBase, commpage_address_t addr_at_runtime) +{ + /* + * Note that the base address (_COMM_PAGE32_BASE_ADDRESS) is the same for + * 32- and 64-bit commpages + */ + return (void*) ((uintptr_t)commPageBase + (addr_at_runtime - _COMM_PAGE32_BASE_ADDRESS)); } /* Determine number of CPUs on this system. We cannot rely on @@ -231,10 +251,12 @@ commpage_cpus( void ) cpus = ml_get_max_cpus(); // NB: this call can block - if (cpus == 0) + if (cpus == 0) { panic("commpage cpus==0"); - if (cpus > 0xFF) + } + if (cpus > 0xFF) { cpus = 0xFF; + } return cpus; } @@ -250,116 +272,117 @@ commpage_init_cpu_capabilities( void ) bits = 0; ml_cpu_get_info(&cpu_info); - + switch (cpu_info.vector_unit) { - case 9: - bits |= kHasAVX1_0; - /* fall thru */ - case 8: - bits |= kHasSSE4_2; - /* fall thru */ - case 7: - bits |= kHasSSE4_1; - /* fall thru */ - case 6: - bits |= kHasSupplementalSSE3; - /* fall thru */ - case 5: - bits |= kHasSSE3; - /* fall thru */ - case 4: - bits |= kHasSSE2; - /* fall thru */ - case 3: - bits |= kHasSSE; - /* fall thru */ - case 2: - bits |= kHasMMX; - default: - break; + case 9: + bits |= kHasAVX1_0; + /* fall thru */ + case 8: + bits |= kHasSSE4_2; + /* fall thru */ + case 7: + bits |= kHasSSE4_1; + /* fall thru */ + case 6: + bits |= kHasSupplementalSSE3; + /* fall thru */ + case 5: + bits |= kHasSSE3; + /* fall thru */ + case 4: + bits |= kHasSSE2; + /* fall thru */ + case 3: + bits |= kHasSSE; + /* fall thru */ + case 2: + bits |= kHasMMX; + default: + break; } switch (cpu_info.cache_line_size) { - case 128: - bits |= kCache128; - break; - case 64: - bits |= kCache64; - break; - case 32: - bits |= kCache32; - break; - default: - break; - } - cpus = commpage_cpus(); // how many CPUs do we have + case 128: + bits |= kCache128; + break; + case 64: + bits |= kCache64; + break; + case 32: + bits |= kCache32; + break; + default: + break; + } + cpus = commpage_cpus(); // how many CPUs do we have bits |= (cpus << kNumCPUsShift); - bits |= kFastThreadLocalStorage; // we use %gs for TLS + bits |= kFastThreadLocalStorage; // we use %gs for TLS #define setif(_bits, _bit, _condition) \ if (_condition) _bits |= _bit - setif(bits, kUP, cpus == 1); - setif(bits, k64Bit, cpu_mode_is64bit()); - setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); - - setif(bits, kHasAES, cpuid_features() & - CPUID_FEATURE_AES); - setif(bits, kHasF16C, cpuid_features() & - CPUID_FEATURE_F16C); - setif(bits, kHasRDRAND, cpuid_features() & - CPUID_FEATURE_RDRAND); - setif(bits, kHasFMA, cpuid_features() & - CPUID_FEATURE_FMA); - - setif(bits, kHasBMI1, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_BMI1); - setif(bits, kHasBMI2, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_BMI2); - setif(bits, kHasRTM, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_RTM); - setif(bits, kHasHLE, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_HLE); - setif(bits, kHasAVX2_0, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX2); - setif(bits, kHasRDSEED, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_RDSEED); - setif(bits, kHasADX, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_ADX); - -#if 0 /* The kernel doesn't support MPX or SGX */ - setif(bits, kHasMPX, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_MPX); - setif(bits, kHasSGX, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_SGX); + setif(bits, kUP, cpus == 1); + setif(bits, k64Bit, cpu_mode_is64bit()); + setif(bits, kSlow, tscFreq <= SLOW_TSC_THRESHOLD); + + setif(bits, kHasAES, cpuid_features() & + CPUID_FEATURE_AES); + setif(bits, kHasF16C, cpuid_features() & + CPUID_FEATURE_F16C); + setif(bits, kHasRDRAND, cpuid_features() & + CPUID_FEATURE_RDRAND); + setif(bits, kHasFMA, cpuid_features() & + CPUID_FEATURE_FMA); + + setif(bits, kHasBMI1, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_BMI1); + setif(bits, kHasBMI2, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_BMI2); + /* Do not advertise RTM and HLE if the TSX FORCE ABORT WA is required */ + if (cpuid_wa_required(CPU_INTEL_TSXFA) & CWA_OFF) { + setif(bits, kHasRTM, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_RTM); + setif(bits, kHasHLE, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_HLE); + } + setif(bits, kHasAVX2_0, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_AVX2); + setif(bits, kHasRDSEED, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_RDSEED); + setif(bits, kHasADX, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_ADX); + +#if 0 /* The kernel doesn't support MPX or SGX */ + setif(bits, kHasMPX, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_MPX); + setif(bits, kHasSGX, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_SGX); #endif -#if !defined(RC_HIDE_XNU_J137) if (ml_fpu_avx512_enabled()) { - setif(bits, kHasAVX512F, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512F); - setif(bits, kHasAVX512CD, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512CD); - setif(bits, kHasAVX512DQ, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512DQ); - setif(bits, kHasAVX512BW, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512BW); - setif(bits, kHasAVX512VL, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512VL); + setif(bits, kHasAVX512F, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_AVX512F); + setif(bits, kHasAVX512CD, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_AVX512CD); + setif(bits, kHasAVX512DQ, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_AVX512DQ); + setif(bits, kHasAVX512BW, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_AVX512BW); + setif(bits, kHasAVX512VL, cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_AVX512VL); setif(bits, kHasAVX512IFMA, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512IFMA); + CPUID_LEAF7_FEATURE_AVX512IFMA); setif(bits, kHasAVX512VBMI, cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_AVX512VBMI); + CPUID_LEAF7_FEATURE_AVX512VBMI); } -#endif /* not RC_HIDE_XNU_J137 */ uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE); setif(bits, kHasENFSTRG, (misc_enable & 1ULL) && - (cpuid_leaf7_features() & - CPUID_LEAF7_FEATURE_ERMS)); - - _cpu_capabilities = bits; // set kernel version for use by drivers etc + (cpuid_leaf7_features() & + CPUID_LEAF7_FEATURE_ERMS)); + + _cpu_capabilities = bits; // set kernel version for use by drivers etc } /* initialize the approx_time_supported flag and set the approx time to 0. @@ -376,13 +399,13 @@ commpage_mach_approximate_time_init(void) #else supported = 0; #endif - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_BASE_ADDRESS); *(boolean_t *)cp = supported; } - + cp = commPagePtr64; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_APPROX_TIME_SUPPORTED - _COMM_PAGE32_START_ADDRESS); *(boolean_t *)cp = supported; } @@ -414,83 +437,106 @@ _get_cpu_capabilities(void) static void commpage_stuff( - commpage_address_t address, - const void *source, - int length ) -{ - void *dest = commpage_addr_of(address); - - if (address < next) - panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next); - - bcopy(source,dest,length); - - next = address + length; + commpage_address_t address, + const void *source, + int length ) +{ + void *dest = commpage_addr_of(address); + + if (address < next) { + panic("commpage overlap at address 0x%p, 0x%x < 0x%x", dest, address, next); + } + + bcopy(source, dest, length); + + next = address + length; +} + +/* + * Updates both the 32-bit and 64-bit commpages with the new data. + */ +static void +commpage_update(commpage_address_t address, const void *source, int length) +{ + void *dest = commpage_specific_addr_of(commPagePtr32, address); + bcopy(source, dest, length); + + dest = commpage_specific_addr_of(commPagePtr64, address); + bcopy(source, dest, length); +} + +void +commpage_post_ucode_update(void) +{ + commpage_init_cpu_capabilities(); + commpage_update(_COMM_PAGE_CPU_CAPABILITIES64, &_cpu_capabilities, sizeof(_cpu_capabilities)); + commpage_update(_COMM_PAGE_CPU_CAPABILITIES, &_cpu_capabilities, sizeof(uint32_t)); } /* Copy a routine into comm page if it matches running machine. */ static void commpage_stuff_routine( - commpage_descriptor *rd ) + commpage_descriptor *rd ) { - commpage_stuff(rd->commpage_address,rd->code_address,rd->code_length); + commpage_stuff(rd->commpage_address, rd->code_address, rd->code_length); } /* Fill in the 32- or 64-bit commpage. Called once for each. */ static void -commpage_populate_one( - vm_map_t submap, // commpage32_map or compage64_map - char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64 - size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED - commpage_address_t base_offset, // will become commPageBaseOffset - commpage_time_data** time_data, // &time_data32 or &time_data64 +commpage_populate_one( + vm_map_t submap, // commpage32_map or compage64_map + char ** kernAddressPtr, // &commPagePtr32 or &commPagePtr64 + size_t area_used, // _COMM_PAGE32_AREA_USED or _COMM_PAGE64_AREA_USED + commpage_address_t base_offset, // will become commPageBaseOffset + commpage_time_data** time_data, // &time_data32 or &time_data64 new_commpage_timeofday_data_t** gtod_time_data, // >od_time_data32 or >od_time_data64 - const char* signature, // "commpage 32-bit" or "commpage 64-bit" - vm_prot_t uperm) + const char* signature, // "commpage 32-bit" or "commpage 64-bit" + vm_prot_t uperm) { - uint8_t c1; - uint16_t c2; - int c4; - uint64_t c8; - uint32_t cfamily; + uint8_t c1; + uint16_t c2; + int c4; + uint64_t c8; + uint32_t cfamily; short version = _COMM_PAGE_THIS_VERSION; next = 0; commPagePtr = (char *)commpage_allocate( submap, (vm_size_t) area_used, uperm ); - *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 + *kernAddressPtr = commPagePtr; // save address either in commPagePtr32 or 64 commPageBaseOffset = base_offset; *time_data = commpage_addr_of( _COMM_PAGE_TIME_DATA_START ); *gtod_time_data = commpage_addr_of( _COMM_PAGE_NEWTIMEOFDAY_DATA ); /* Stuff in the constants. We move things into the comm page in strictly - * ascending order, so we can check for overlap and panic if so. - * Note: the 32-bit cpu_capabilities vector is retained in addition to - * the expanded 64-bit vector. - */ - commpage_stuff(_COMM_PAGE_SIGNATURE,signature,(int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature))); - commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64,&_cpu_capabilities,sizeof(_cpu_capabilities)); - commpage_stuff(_COMM_PAGE_VERSION,&version,sizeof(short)); - commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES,&_cpu_capabilities,sizeof(uint32_t)); + * ascending order, so we can check for overlap and panic if so. + * Note: the 32-bit cpu_capabilities vector is retained in addition to + * the expanded 64-bit vector. + */ + commpage_stuff(_COMM_PAGE_SIGNATURE, signature, (int)MIN(_COMM_PAGE_SIGNATURELEN, strlen(signature))); + commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES64, &_cpu_capabilities, sizeof(_cpu_capabilities)); + commpage_stuff(_COMM_PAGE_VERSION, &version, sizeof(short)); + commpage_stuff(_COMM_PAGE_CPU_CAPABILITIES, &_cpu_capabilities, sizeof(uint32_t)); c2 = 32; // default - if (_cpu_capabilities & kCache64) + if (_cpu_capabilities & kCache64) { c2 = 64; - else if (_cpu_capabilities & kCache128) + } else if (_cpu_capabilities & kCache128) { c2 = 128; - commpage_stuff(_COMM_PAGE_CACHE_LINESIZE,&c2,2); + } + commpage_stuff(_COMM_PAGE_CACHE_LINESIZE, &c2, 2); c4 = MP_SPIN_TRIES; - commpage_stuff(_COMM_PAGE_SPIN_COUNT,&c4,4); + commpage_stuff(_COMM_PAGE_SPIN_COUNT, &c4, 4); /* machine_info valid after ml_get_max_cpus() */ c1 = machine_info.physical_cpu_max; - commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS,&c1,1); + commpage_stuff(_COMM_PAGE_PHYSICAL_CPUS, &c1, 1); c1 = machine_info.logical_cpu_max; - commpage_stuff(_COMM_PAGE_LOGICAL_CPUS,&c1,1); + commpage_stuff(_COMM_PAGE_LOGICAL_CPUS, &c1, 1); c8 = ml_cpu_cache_size(0); commpage_stuff(_COMM_PAGE_MEMORY_SIZE, &c8, 8); @@ -498,9 +544,9 @@ commpage_populate_one( cfamily = cpuid_info()->cpuid_cpufamily; commpage_stuff(_COMM_PAGE_CPUFAMILY, &cfamily, 4); - if (next > _COMM_PAGE_END) + if (next > _COMM_PAGE_END) { panic("commpage overflow: next = 0x%08x, commPagePtr = 0x%p", next, commPagePtr); - + } } @@ -509,40 +555,40 @@ commpage_populate_one( * * See the top of this file for a list of what you have to do to add * a new routine to the commpage. - */ + */ void commpage_populate( void ) { commpage_init_cpu_capabilities(); - - commpage_populate_one( commpage32_map, - &commPagePtr32, - _COMM_PAGE32_AREA_USED, - _COMM_PAGE32_BASE_ADDRESS, - &time_data32, - >od_time_data32, - "commpage 32-bit", - VM_PROT_READ); + + commpage_populate_one( commpage32_map, + &commPagePtr32, + _COMM_PAGE32_AREA_USED, + _COMM_PAGE32_BASE_ADDRESS, + &time_data32, + >od_time_data32, + "commpage 32-bit", + VM_PROT_READ); #ifndef __LP64__ - pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, - _COMM_PAGE32_AREA_USED/INTEL_PGBYTES); -#endif - time_data64 = time_data32; /* if no 64-bit commpage, point to 32-bit */ + pmap_commpage32_init((vm_offset_t) commPagePtr32, _COMM_PAGE32_BASE_ADDRESS, + _COMM_PAGE32_AREA_USED / INTEL_PGBYTES); +#endif + time_data64 = time_data32; /* if no 64-bit commpage, point to 32-bit */ gtod_time_data64 = gtod_time_data32; if (_cpu_capabilities & k64Bit) { - commpage_populate_one( commpage64_map, - &commPagePtr64, - _COMM_PAGE64_AREA_USED, - _COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */ - &time_data64, - >od_time_data64, - "commpage 64-bit", - VM_PROT_READ); + commpage_populate_one( commpage64_map, + &commPagePtr64, + _COMM_PAGE64_AREA_USED, + _COMM_PAGE32_START_ADDRESS, /* commpage address are relative to 32-bit commpage placement */ + &time_data64, + >od_time_data64, + "commpage 64-bit", + VM_PROT_READ); #ifndef __LP64__ - pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, - _COMM_PAGE64_AREA_USED/INTEL_PGBYTES); + pmap_commpage64_init((vm_offset_t) commPagePtr64, _COMM_PAGE64_BASE_ADDRESS, + _COMM_PAGE64_AREA_USED / INTEL_PGBYTES); #endif } @@ -559,55 +605,57 @@ commpage_populate( void ) #endif } -/* Fill in the common routines during kernel initialization. +/* Fill in the common routines during kernel initialization. * This is called before user-mode code is running. */ -void commpage_text_populate( void ){ +void +commpage_text_populate( void ) +{ commpage_descriptor **rd; - + next = 0; commPagePtr = (char *) commpage_allocate(commpage_text32_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); commPageTextPtr32 = commPagePtr; - + char *cptr = commPagePtr; - int i=0; - for(; i< _COMM_PAGE_TEXT_AREA_USED; i++){ - cptr[i]=0xCC; + int i = 0; + for (; i < _COMM_PAGE_TEXT_AREA_USED; i++) { + cptr[i] = 0xCC; } - + commPageBaseOffset = _COMM_PAGE_TEXT_START; for (rd = commpage_32_routines; *rd != NULL; rd++) { commpage_stuff_routine(*rd); } #ifndef __LP64__ - pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START, - _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); -#endif + pmap_commpage32_init((vm_offset_t) commPageTextPtr32, _COMM_PAGE_TEXT_START, + _COMM_PAGE_TEXT_AREA_USED / INTEL_PGBYTES); +#endif if (_cpu_capabilities & k64Bit) { next = 0; commPagePtr = (char *) commpage_allocate(commpage_text64_map, (vm_size_t) _COMM_PAGE_TEXT_AREA_USED, VM_PROT_READ | VM_PROT_EXECUTE); commPageTextPtr64 = commPagePtr; - cptr=commPagePtr; - for(i=0; i<_COMM_PAGE_TEXT_AREA_USED; i++){ - cptr[i]=0xCC; + cptr = commPagePtr; + for (i = 0; i < _COMM_PAGE_TEXT_AREA_USED; i++) { + cptr[i] = 0xCC; } - for (rd = commpage_64_routines; *rd !=NULL; rd++) { + for (rd = commpage_64_routines; *rd != NULL; rd++) { commpage_stuff_routine(*rd); } #ifndef __LP64__ - pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START, - _COMM_PAGE_TEXT_AREA_USED/INTEL_PGBYTES); -#endif + pmap_commpage64_init((vm_offset_t) commPageTextPtr64, _COMM_PAGE_TEXT_START, + _COMM_PAGE_TEXT_AREA_USED / INTEL_PGBYTES); +#endif } - if (next > _COMM_PAGE_TEXT_END) - panic("commpage text overflow: next=0x%08x, commPagePtr=%p", next, commPagePtr); - + if (next > _COMM_PAGE_TEXT_END) { + panic("commpage text overflow: next=0x%08x, commPagePtr=%p", next, commPagePtr); + } } /* Update commpage nanotime information. @@ -617,51 +665,56 @@ void commpage_text_populate( void ){ void commpage_set_nanotime( - uint64_t tsc_base, - uint64_t ns_base, - uint32_t scale, - uint32_t shift ) + uint64_t tsc_base, + uint64_t ns_base, + uint32_t scale, + uint32_t shift ) { - commpage_time_data *p32 = time_data32; - commpage_time_data *p64 = time_data64; - static uint32_t generation = 0; - uint32_t next_gen; - - if (p32 == NULL) /* have commpages been allocated yet? */ + commpage_time_data *p32 = time_data32; + commpage_time_data *p64 = time_data64; + static uint32_t generation = 0; + uint32_t next_gen; + + if (p32 == NULL) { /* have commpages been allocated yet? */ return; - - if ( generation != p32->nt_generation ) - panic("nanotime trouble 1"); /* possibly not serialized */ - if ( ns_base < p32->nt_ns_base ) + } + + if (generation != p32->nt_generation) { + panic("nanotime trouble 1"); /* possibly not serialized */ + } + if (ns_base < p32->nt_ns_base) { panic("nanotime trouble 2"); - if ((shift != 0) && ((_cpu_capabilities & kSlow)==0) ) + } + if ((shift != 0) && ((_cpu_capabilities & kSlow) == 0)) { panic("nanotime trouble 3"); - + } + next_gen = ++generation; - if (next_gen == 0) + if (next_gen == 0) { next_gen = ++generation; - - p32->nt_generation = 0; /* mark invalid, so commpage won't try to use it */ + } + + p32->nt_generation = 0; /* mark invalid, so commpage won't try to use it */ p64->nt_generation = 0; - + p32->nt_tsc_base = tsc_base; p64->nt_tsc_base = tsc_base; - + p32->nt_ns_base = ns_base; p64->nt_ns_base = ns_base; - + p32->nt_scale = scale; p64->nt_scale = scale; - + p32->nt_shift = shift; p64->nt_shift = shift; - - p32->nt_generation = next_gen; /* mark data as valid */ + + p32->nt_generation = next_gen; /* mark data as valid */ p64->nt_generation = next_gen; } /* Update commpage gettimeofday() information. As with nanotime(), we interleave - * updates to the 32- and 64-bit commpage, in order to keep time more nearly in sync + * updates to the 32- and 64-bit commpage, in order to keep time more nearly in sync * between the two environments. * * This routine must be serializeed by some external means, ie a lock. @@ -669,15 +722,15 @@ commpage_set_nanotime( void commpage_set_timestamp( - uint64_t abstime, - uint64_t sec, - uint64_t frac, - uint64_t scale, - uint64_t tick_per_sec) + uint64_t abstime, + uint64_t sec, + uint64_t frac, + uint64_t scale, + uint64_t tick_per_sec) { - new_commpage_timeofday_data_t *p32 = gtod_time_data32; - new_commpage_timeofday_data_t *p64 = gtod_time_data64; - + new_commpage_timeofday_data_t *p32 = gtod_time_data32; + new_commpage_timeofday_data_t *p64 = gtod_time_data64; + p32->TimeStamp_tick = 0x0ULL; p64->TimeStamp_tick = 0x0ULL; @@ -701,25 +754,24 @@ commpage_set_timestamp( void commpage_set_memory_pressure( - unsigned int pressure ) + unsigned int pressure ) { - char *cp; + char *cp; uint32_t *ip; - + cp = commPagePtr32; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_BASE_ADDRESS); ip = (uint32_t*) (void *) cp; *ip = (uint32_t) pressure; } - + cp = commPagePtr64; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_MEMORY_PRESSURE - _COMM_PAGE32_START_ADDRESS); ip = (uint32_t*) (void *) cp; *ip = (uint32_t) pressure; } - } @@ -727,53 +779,54 @@ commpage_set_memory_pressure( void commpage_set_spin_count( - unsigned int count ) + unsigned int count ) { - char *cp; + char *cp; uint32_t *ip; - - if (count == 0) /* we test for 0 after decrement, not before */ - count = 1; - + + if (count == 0) { /* we test for 0 after decrement, not before */ + count = 1; + } + cp = commPagePtr32; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_BASE_ADDRESS); ip = (uint32_t*) (void *) cp; *ip = (uint32_t) count; } - + cp = commPagePtr64; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_SPIN_COUNT - _COMM_PAGE32_START_ADDRESS); ip = (uint32_t*) (void *) cp; *ip = (uint32_t) count; } - } /* Updated every time a logical CPU goes offline/online */ void commpage_update_active_cpus(void) { - char *cp; + char *cp; volatile uint8_t *ip; - + /* At least 32-bit commpage must be initialized */ - if (!commPagePtr32) + if (!commPagePtr32) { return; + } - simple_lock(&commpage_active_cpus_lock); + simple_lock(&commpage_active_cpus_lock, LCK_GRP_NULL); cp = commPagePtr32; cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_BASE_ADDRESS); ip = (volatile uint8_t*) cp; - *ip = (uint8_t) processor_avail_count; - + *ip = (uint8_t) processor_avail_count_user; + cp = commPagePtr64; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_ACTIVE_CPUS - _COMM_PAGE32_START_ADDRESS); ip = (volatile uint8_t*) cp; - *ip = (uint8_t) processor_avail_count; + *ip = (uint8_t) processor_avail_count_user; } simple_unlock(&commpage_active_cpus_lock); @@ -823,7 +876,7 @@ commpage_update_atm_diagnostic_config(uint32_t diagnostic_config) } cp = commPagePtr64; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_ATM_DIAGNOSTIC_CONFIG - _COMM_PAGE32_START_ADDRESS); saved_data_ptr = (volatile uint32_t *)cp; *saved_data_ptr = diagnostic_config; @@ -840,9 +893,9 @@ commpage_update_mach_approximate_time(uint64_t abstime) #ifdef CONFIG_MACH_APPROXIMATE_TIME uint64_t saved_data; char *cp; - + cp = commPagePtr32; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS); saved_data = atomic_load_explicit((_Atomic uint64_t *)(uintptr_t)cp, memory_order_relaxed); if (saved_data < abstime) { @@ -850,12 +903,12 @@ commpage_update_mach_approximate_time(uint64_t abstime) * if the value has been updated since we last read it, * "someone" has a newer timestamp than us and ours is * now invalid. */ - atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)(uintptr_t)cp, - &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); + atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)(uintptr_t)cp, + &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); } } cp = commPagePtr64; - if ( cp ) { + if (cp) { cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS); saved_data = atomic_load_explicit((_Atomic uint64_t *)(uintptr_t)cp, memory_order_relaxed); if (saved_data < abstime) { @@ -863,8 +916,8 @@ commpage_update_mach_approximate_time(uint64_t abstime) * if the value has been updated since we last read it, * "someone" has a newer timestamp than us and ours is * now invalid. */ - atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)(uintptr_t)cp, - &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); + atomic_compare_exchange_strong_explicit((_Atomic uint64_t *)(uintptr_t)cp, + &saved_data, abstime, memory_order_relaxed, memory_order_relaxed); } } #else @@ -881,7 +934,7 @@ commpage_update_mach_continuous_time(uint64_t sleeptime) cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS); *(uint64_t *)cp = sleeptime; } - + cp = commPagePtr64; if (cp) { cp += (_COMM_PAGE_CONT_TIMEBASE - _COMM_PAGE32_START_ADDRESS); @@ -915,22 +968,21 @@ extern user64_addr_t commpage_text64_location; uint32_t commpage_is_in_pfz32(uint32_t addr32) { - if ( (addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET)) - && (addr32 < (commpage_text32_location+_COMM_TEXT_PFZ_END_OFFSET))) { + if ((addr32 >= (commpage_text32_location + _COMM_TEXT_PFZ_START_OFFSET)) + && (addr32 < (commpage_text32_location + _COMM_TEXT_PFZ_END_OFFSET))) { return 1; - } - else + } else { return 0; + } } uint32_t commpage_is_in_pfz64(addr64_t addr64) { - if ( (addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET)) - && (addr64 < (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) { + if ((addr64 >= (commpage_text64_location + _COMM_TEXT_PFZ_START_OFFSET)) + && (addr64 < (commpage_text64_location + _COMM_TEXT_PFZ_END_OFFSET))) { return 1; - } - else + } else { return 0; + } } - diff --git a/osfmk/i386/commpage/commpage.h b/osfmk/i386/commpage/commpage.h index e4a2c7ac3..2dc782686 100644 --- a/osfmk/i386/commpage/commpage.h +++ b/osfmk/i386/commpage/commpage.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2003-2012 Apple Inc. All rights reserved. + * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_COMMPAGE_H #define _I386_COMMPAGE_H -#ifndef __ASSEMBLER__ +#ifndef __ASSEMBLER__ #include #include #include @@ -44,7 +44,7 @@ * At present spinlocks do not use _COMM_PAGE_SPIN_COUNT. * They use MP_SPIN_TRIES directly. */ -#define MP_SPIN_TRIES 1000 +#define MP_SPIN_TRIES 1000 /* The following macro is used to generate the 64-bit commpage address for a given @@ -52,10 +52,10 @@ * the 64-bit commpage. Since the kernel can be a 32-bit object, cpu_capabilities.h * only defines the 32-bit address. */ -#define _COMM_PAGE_32_TO_64( ADDRESS ) ( ADDRESS + _COMM_PAGE64_START_ADDRESS - _COMM_PAGE32_START_ADDRESS ) +#define _COMM_PAGE_32_TO_64( ADDRESS ) ( ADDRESS + _COMM_PAGE64_START_ADDRESS - _COMM_PAGE32_START_ADDRESS ) -#ifdef __ASSEMBLER__ +#ifdef __ASSEMBLER__ #define COMMPAGE_DESCRIPTOR_NAME(label) _commpage_ ## label @@ -63,19 +63,19 @@ #define COMMPAGE_DESCRIPTOR_REFERENCE(label) \ .quad COMMPAGE_DESCRIPTOR_NAME(label) -#define COMMPAGE_FUNCTION_START(label,codetype,alignment) \ +#define COMMPAGE_FUNCTION_START(label, codetype, alignment) \ .text ;\ .code ## codetype ;\ .align alignment, 0x90 ;\ L ## label ## : -#define COMMPAGE_DESCRIPTOR(label,address) \ +#define COMMPAGE_DESCRIPTOR(label, address) \ L ## label ## _end: ;\ .set L ## label ## _size, L ## label ## _end - L ## label ;\ .const_data ;\ .private_extern COMMPAGE_DESCRIPTOR_NAME(label) ;\ COMMPAGE_DESCRIPTOR_NAME(label) ## : ;\ - COMMPAGE_DESCRIPTOR_FIELD_POINTER L ## label ;\ + COMMPAGE_DESCRIPTOR_FIELD_POINTER L ## label ;\ .long L ## label ## _size ;\ .long address ;\ .text @@ -96,22 +96,22 @@ COMMPAGE_DESCRIPTOR_NAME(label) ## : ;\ * start = the label at the start of the code for this func * This is admitedly ugly and fragile. Is there a better way? */ -#define COMMPAGE_CALL(target,from,start) \ +#define COMMPAGE_CALL(target, from, start) \ COMMPAGE_CALL_INTERNAL(target,from,start,__LINE__) -#define COMMPAGE_CALL_INTERNAL(target,from,start,unique) \ +#define COMMPAGE_CALL_INTERNAL(target, from, start, unique) \ .byte 0xe8 ;\ .set UNIQUEID(unique), L ## start - . + target - from - 4 ;\ .long UNIQUEID(unique) -#define UNIQUEID(name) L ## name +#define UNIQUEID(name) L ## name /* COMMPAGE_JMP(target,from,start) * * This macro perform a jump to another commpage routine. * Used to return from the PFZ by jumping via a return outside the PFZ. */ -#define COMMPAGE_JMP(target,from,start) \ +#define COMMPAGE_JMP(target, from, start) \ jmp L ## start - from + target #else /* __ASSEMBLER__ */ @@ -120,47 +120,48 @@ COMMPAGE_DESCRIPTOR_NAME(label) ## : ;\ * Note that the COMMPAGE_DESCRIPTOR macro (above), used in * assembly language, must agree with this. */ - -typedef struct commpage_descriptor { - void *code_address; // address of code - uint32_t code_length; // length in bytes - uint32_t commpage_address; // put at this address (_COMM_PAGE_BCOPY etc) + +typedef struct commpage_descriptor { + void *code_address; // address of code + uint32_t code_length; // length in bytes + uint32_t commpage_address; // put at this address (_COMM_PAGE_BCOPY etc) } commpage_descriptor; /* Warning: following structure must match the layout of the commpage. */ /* This is the data starting at _COMM_PAGE_TIME_DATA_START, ie for nanotime() and gettimeofday() */ -typedef volatile struct commpage_time_data { - uint64_t nt_tsc_base; // _COMM_PAGE_NT_TSC_BASE - uint32_t nt_scale; // _COMM_PAGE_NT_SCALE - uint32_t nt_shift; // _COMM_PAGE_NT_SHIFT - uint64_t nt_ns_base; // _COMM_PAGE_NT_NS_BASE - uint32_t nt_generation; // _COMM_PAGE_NT_GENERATION - uint32_t gtod_generation; // _COMM_PAGE_GTOD_GENERATION - uint64_t gtod_ns_base; // _COMM_PAGE_GTOD_NS_BASE - uint64_t gtod_sec_base; // _COMM_PAGE_GTOD_SEC_BASE +typedef volatile struct commpage_time_data { + uint64_t nt_tsc_base; // _COMM_PAGE_NT_TSC_BASE + uint32_t nt_scale; // _COMM_PAGE_NT_SCALE + uint32_t nt_shift; // _COMM_PAGE_NT_SHIFT + uint64_t nt_ns_base; // _COMM_PAGE_NT_NS_BASE + uint32_t nt_generation; // _COMM_PAGE_NT_GENERATION + uint32_t gtod_generation; // _COMM_PAGE_GTOD_GENERATION + uint64_t gtod_ns_base; // _COMM_PAGE_GTOD_NS_BASE + uint64_t gtod_sec_base; // _COMM_PAGE_GTOD_SEC_BASE } commpage_time_data; -extern char *commPagePtr32; // virt address of 32-bit commpage in kernel map -extern char *commPagePtr64; // ...and of 64-bit commpage +extern char *commPagePtr32; // virt address of 32-bit commpage in kernel map +extern char *commPagePtr64; // ...and of 64-bit commpage -extern void commpage_set_timestamp(uint64_t abstime, uint64_t sec, uint64_t frac, uint64_t scale, uint64_t tick_per_sec); +extern void commpage_set_timestamp(uint64_t abstime, uint64_t sec, uint64_t frac, uint64_t scale, uint64_t tick_per_sec); #define commpage_disable_timestamp() commpage_set_timestamp( 0, 0, 0, 0, 0 ); -extern void commpage_set_nanotime(uint64_t tsc_base, uint64_t ns_base, uint32_t scale, uint32_t shift); -extern void commpage_set_memory_pressure(unsigned int pressure); -extern void commpage_set_spin_count(unsigned int count); -extern void commpage_sched_gen_inc(void); -extern void commpage_update_active_cpus(void); -extern void commpage_update_mach_approximate_time(uint64_t abstime); -extern void commpage_update_mach_continuous_time(uint64_t sleeptime); -extern void commpage_update_boottime(uint64_t boottime_usec); -extern void commpage_update_kdebug_state(void); -extern void commpage_update_atm_diagnostic_config(uint32_t); - -extern uint32_t commpage_is_in_pfz32(uint32_t); -extern uint32_t commpage_is_in_pfz64(addr64_t); - -#endif /* __ASSEMBLER__ */ +extern void commpage_set_nanotime(uint64_t tsc_base, uint64_t ns_base, uint32_t scale, uint32_t shift); +extern void commpage_set_memory_pressure(unsigned int pressure); +extern void commpage_set_spin_count(unsigned int count); +extern void commpage_sched_gen_inc(void); +extern void commpage_update_active_cpus(void); +extern void commpage_update_mach_approximate_time(uint64_t abstime); +extern void commpage_update_mach_continuous_time(uint64_t sleeptime); +extern void commpage_update_boottime(uint64_t boottime_usec); +extern void commpage_update_kdebug_state(void); +extern void commpage_update_atm_diagnostic_config(uint32_t); +extern void commpage_post_ucode_update(void); + +extern uint32_t commpage_is_in_pfz32(uint32_t); +extern uint32_t commpage_is_in_pfz64(addr64_t); + +#endif /* __ASSEMBLER__ */ #endif /* _I386_COMMPAGE_H */ diff --git a/osfmk/i386/cpu.c b/osfmk/i386/cpu.c index 84cf06615..517c229c8 100644 --- a/osfmk/i386/cpu.c +++ b/osfmk/i386/cpu.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,6 +33,7 @@ #include #include +#include #include #include #include @@ -48,47 +49,49 @@ #include #include -struct processor processor_master; +const char *processor_to_datastring(const char *prefix, processor_t target_processor); + +struct processor processor_master; /*ARGSUSED*/ kern_return_t cpu_control( - int slot_num, - processor_info_t info, - unsigned int count) + int slot_num, + processor_info_t info, + unsigned int count) { printf("cpu_control(%d,%p,%d) not implemented\n", - slot_num, info, count); - return (KERN_FAILURE); + slot_num, info, count); + return KERN_FAILURE; } /*ARGSUSED*/ kern_return_t cpu_info_count( - __unused processor_flavor_t flavor, - unsigned int *count) + __unused processor_flavor_t flavor, + unsigned int *count) { *count = 0; - return (KERN_FAILURE); + return KERN_FAILURE; } /*ARGSUSED*/ kern_return_t cpu_info( - processor_flavor_t flavor, - int slot_num, - processor_info_t info, - unsigned int *count) + processor_flavor_t flavor, + int slot_num, + processor_info_t info, + unsigned int *count) { printf("cpu_info(%d,%d,%p,%p) not implemented\n", - flavor, slot_num, info, count); - return (KERN_FAILURE); + flavor, slot_num, info, count); + return KERN_FAILURE; } void cpu_sleep(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); PE_cpu_machine_quiesce(cdp->cpu_id); @@ -98,7 +101,7 @@ cpu_sleep(void) void cpu_init(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); timer_call_queue_init(&cdp->rtclock_timer.queue); cdp->rtclock_timer.deadline = EndOfAllTime; @@ -113,7 +116,7 @@ kern_return_t cpu_start( int cpu) { - kern_return_t ret; + kern_return_t ret; if (cpu == cpu_number()) { cpu_machine_init(); @@ -134,19 +137,20 @@ cpu_start( ret = intel_startCPU(cpu); } - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { kprintf("cpu: cpu_start(%d) returning failure!\n", cpu); + } - return(ret); + return ret; } void cpu_exit_wait( int cpu) { - cpu_data_t *cdp = cpu_datap(cpu); - boolean_t intrs_enabled; - uint64_t tsc_timeout; + cpu_data_t *cdp = cpu_datap(cpu); + boolean_t intrs_enabled; + uint64_t tsc_timeout; /* * Wait until the CPU indicates that it has stopped. @@ -159,15 +163,16 @@ cpu_exit_wait( /* Set a generous timeout of several seconds (in TSC ticks) */ tsc_timeout = rdtsc64() + (10ULL * 1000 * 1000 * 1000); while ((cdp->lcpu.state != LCPU_HALT) - && (cdp->lcpu.state != LCPU_OFF) - && !cdp->lcpu.stopped) { - simple_unlock(&x86_topo_lock); - ml_set_interrupts_enabled(intrs_enabled); - cpu_pause(); - if (rdtsc64() > tsc_timeout) - panic("cpu_exit_wait(%d) timeout", cpu); - ml_set_interrupts_enabled(FALSE); - mp_safe_spin_lock(&x86_topo_lock); + && (cdp->lcpu.state != LCPU_OFF) + && !cdp->lcpu.stopped) { + simple_unlock(&x86_topo_lock); + ml_set_interrupts_enabled(intrs_enabled); + cpu_pause(); + if (rdtsc64() > tsc_timeout) { + panic("cpu_exit_wait(%d) timeout", cpu); + } + ml_set_interrupts_enabled(FALSE); + mp_safe_spin_lock(&x86_topo_lock); } simple_unlock(&x86_topo_lock); ml_set_interrupts_enabled(intrs_enabled); @@ -177,7 +182,7 @@ void cpu_machine_init( void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); PE_cpu_machine_init(cdp->cpu_id, !cdp->cpu_boot_complete); cdp->cpu_boot_complete = TRUE; @@ -193,15 +198,17 @@ cpu_machine_init( processor_t cpu_processor_alloc(boolean_t is_boot_cpu) { - int ret; - processor_t proc; + int ret; + processor_t proc; - if (is_boot_cpu) + if (is_boot_cpu) { return &processor_master; + } ret = kmem_alloc(kernel_map, (vm_offset_t *) &proc, sizeof(*proc), VM_KERN_MEMORY_OSFMK); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return NULL; + } bzero((void *) proc, sizeof(*proc)); return proc; @@ -210,8 +217,9 @@ cpu_processor_alloc(boolean_t is_boot_cpu) void cpu_processor_free(processor_t proc) { - if (proc != NULL && proc != &processor_master) - kfree((void *) proc, sizeof(*proc)); + if (proc != NULL && proc != &processor_master) { + kfree(proc, sizeof(*proc)); + } } processor_t @@ -222,7 +230,7 @@ current_processor(void) processor_t cpu_to_processor( - int cpu) + int cpu) { return cpu_datap(cpu)->cpu_processor; } @@ -230,44 +238,75 @@ cpu_to_processor( ast_t * ast_pending(void) { - return (¤t_cpu_datap()->cpu_pending_ast); + return ¤t_cpu_datap()->cpu_pending_ast; } cpu_type_t slot_type( - int slot_num) + int slot_num) { - return (cpu_datap(slot_num)->cpu_type); + return cpu_datap(slot_num)->cpu_type; } cpu_subtype_t slot_subtype( - int slot_num) + int slot_num) { - return (cpu_datap(slot_num)->cpu_subtype); + return cpu_datap(slot_num)->cpu_subtype; } cpu_threadtype_t slot_threadtype( - int slot_num) + int slot_num) { - return (cpu_datap(slot_num)->cpu_threadtype); + return cpu_datap(slot_num)->cpu_threadtype; } cpu_type_t cpu_type(void) { - return (current_cpu_datap()->cpu_type); + return current_cpu_datap()->cpu_type; } cpu_subtype_t cpu_subtype(void) { - return (current_cpu_datap()->cpu_subtype); + return current_cpu_datap()->cpu_subtype; } cpu_threadtype_t cpu_threadtype(void) { - return (current_cpu_datap()->cpu_threadtype); + return current_cpu_datap()->cpu_threadtype; +} + +const char * +processor_to_datastring(const char *prefix, processor_t target_processor) +{ + static char printBuf[256]; + uint32_t cpu_num = target_processor->cpu_id; + + cpu_data_t *cpup = cpu_datap(cpu_num); + thread_t act; + + act = ml_validate_nofault((vm_offset_t)cpup->cpu_active_thread, + sizeof(struct thread)) ? cpup->cpu_active_thread : NULL; + + snprintf(printBuf, sizeof(printBuf), + "%s: tCPU %u (%d) [tid=0x%llx(bp=%d sp=%d) s=0x%x ps=0x%x cpa=0x%x spa=0x%llx pl=%d il=%d r=%d]", + prefix, + cpu_num, + target_processor->state, + act ? act->thread_id : ~0ULL, + act ? act->base_pri : -1, + act ? act->sched_pri : -1, + cpup->cpu_signals, + cpup->cpu_prior_signals, + cpup->cpu_pending_ast, + target_processor->processor_set->pending_AST_URGENT_cpu_mask, + cpup->cpu_preemption_level, + cpup->cpu_interrupt_level, + cpup->cpu_running); + + return (const char *)&printBuf[0]; } diff --git a/osfmk/i386/cpu_affinity.h b/osfmk/i386/cpu_affinity.h index d65614a90..0ecd9bd49 100644 --- a/osfmk/i386/cpu_affinity.h +++ b/osfmk/i386/cpu_affinity.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL_PRIVATE @@ -31,18 +31,17 @@ #include -typedef struct x86_affinity_set -{ - struct x86_affinity_set *next; /* Forward link */ - struct x86_cpu_cache *cache; /* The L2 cache concerned */ - processor_set_t pset; /* The processor set container */ - uint32_t num; /* Logical id */ +typedef struct x86_affinity_set { + struct x86_affinity_set *next;/* Forward link */ + struct x86_cpu_cache *cache;/* The L2 cache concerned */ + processor_set_t pset;/* The processor set container */ + uint32_t num;/* Logical id */ } x86_affinity_set_t; -extern x86_affinity_set_t *x86_affinities; /* root of all affinities */ +extern x86_affinity_set_t *x86_affinities; /* root of all affinities */ -extern int ml_get_max_affinity_sets(void); -extern processor_set_t ml_affinity_to_pset(uint32_t affinity_num); +extern int ml_get_max_affinity_sets(void); +extern processor_set_t ml_affinity_to_pset(uint32_t affinity_num); #endif /* _I386_CPU_AFFINITY_H_ */ #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/cpu_capabilities.h b/osfmk/i386/cpu_capabilities.h index 5caa412da..991bf1b34 100644 --- a/osfmk/i386/cpu_capabilities.h +++ b/osfmk/i386/cpu_capabilities.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,69 +22,69 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef PRIVATE +#ifdef PRIVATE #ifndef _I386_CPU_CAPABILITIES_H #define _I386_CPU_CAPABILITIES_H -#ifndef __ASSEMBLER__ +#ifndef __ASSEMBLER__ #include #endif - + /* * This API only supported for Apple internal use. */ /* Bit definitions for _cpu_capabilities: */ -#define kHasMMX 0x00000001 -#define kHasSSE 0x00000002 -#define kHasSSE2 0x00000004 -#define kHasSSE3 0x00000008 -#define kCache32 0x00000010 /* cache line size is 32 bytes */ -#define kCache64 0x00000020 -#define kCache128 0x00000040 -#define kFastThreadLocalStorage 0x00000080 /* TLS ptr is kept in a user-mode-readable register */ -#define kHasSupplementalSSE3 0x00000100 -#define k64Bit 0x00000200 /* processor supports EM64T (not what mode you're running in) */ -#define kHasSSE4_1 0x00000400 -#define kHasSSE4_2 0x00000800 -#define kHasAES 0x00001000 -#define kInOrderPipeline 0x00002000 -#define kSlow 0x00004000 /* tsc < nanosecond */ -#define kUP 0x00008000 /* set if (kNumCPUs == 1) */ -#define kNumCPUs 0x00FF0000 /* number of CPUs (see _NumCPUs() below) */ -#define kNumCPUsShift 16 -#define kHasAVX1_0 0x01000000 -#define kHasRDRAND 0x02000000 -#define kHasF16C 0x04000000 -#define kHasENFSTRG 0x08000000 -#define kHasFMA 0x10000000 -#define kHasAVX2_0 0x20000000 -#define kHasBMI1 0x40000000 -#define kHasBMI2 0x80000000 -/* Extending into 64-bits from here: */ -#define kHasRTM 0x0000000100000000ULL -#define kHasHLE 0x0000000200000000ULL -#define kHasRDSEED 0x0000000800000000ULL -#define kHasADX 0x0000000400000000ULL -#define kHasMPX 0x0000001000000000ULL -#define kHasSGX 0x0000002000000000ULL +#define kHasMMX 0x00000001 +#define kHasSSE 0x00000002 +#define kHasSSE2 0x00000004 +#define kHasSSE3 0x00000008 +#define kCache32 0x00000010 /* cache line size is 32 bytes */ +#define kCache64 0x00000020 +#define kCache128 0x00000040 +#define kFastThreadLocalStorage 0x00000080 /* TLS ptr is kept in a user-mode-readable register */ +#define kHasSupplementalSSE3 0x00000100 +#define k64Bit 0x00000200 /* processor supports EM64T (not what mode you're running in) */ +#define kHasSSE4_1 0x00000400 +#define kHasSSE4_2 0x00000800 +#define kHasAES 0x00001000 +#define kInOrderPipeline 0x00002000 +#define kSlow 0x00004000 /* tsc < nanosecond */ +#define kUP 0x00008000 /* set if (kNumCPUs == 1) */ +#define kNumCPUs 0x00FF0000 /* number of CPUs (see _NumCPUs() below) */ +#define kNumCPUsShift 16 +#define kHasAVX1_0 0x01000000 +#define kHasRDRAND 0x02000000 +#define kHasF16C 0x04000000 +#define kHasENFSTRG 0x08000000 +#define kHasFMA 0x10000000 +#define kHasAVX2_0 0x20000000 +#define kHasBMI1 0x40000000 +#define kHasBMI2 0x80000000 +/* Extending into 64-bits from here: */ +#define kHasRTM 0x0000000100000000ULL +#define kHasHLE 0x0000000200000000ULL +#define kHasRDSEED 0x0000000800000000ULL +#define kHasADX 0x0000000400000000ULL +#define kHasMPX 0x0000001000000000ULL +#define kHasSGX 0x0000002000000000ULL #if !defined(RC_HIDE_XNU_J137) -#define kHasAVX512F 0x0000004000000000ULL -#define kHasAVX512CD 0x0000008000000000ULL -#define kHasAVX512DQ 0x0000010000000000ULL -#define kHasAVX512BW 0x0000020000000000ULL -#define kHasAVX512IFMA 0x0000040000000000ULL -#define kHasAVX512VBMI 0x0000080000000000ULL -#define kHasAVX512VL 0x0000100000000000ULL +#define kHasAVX512F 0x0000004000000000ULL +#define kHasAVX512CD 0x0000008000000000ULL +#define kHasAVX512DQ 0x0000010000000000ULL +#define kHasAVX512BW 0x0000020000000000ULL +#define kHasAVX512IFMA 0x0000040000000000ULL +#define kHasAVX512VBMI 0x0000080000000000ULL +#define kHasAVX512VL 0x0000100000000000ULL #endif /* not RC_HIDE_XNU_J137 */ -#ifndef __ASSEMBLER__ +#ifndef __ASSEMBLER__ #include #include @@ -93,7 +93,8 @@ extern uint64_t _get_cpu_capabilities( void ); __END_DECLS __inline static -int _NumCPUs( void ) +int +_NumCPUs( void ) { return (int) (_get_cpu_capabilities() & kNumCPUs) >> kNumCPUsShift; } @@ -105,7 +106,7 @@ int _NumCPUs( void ) * the 64-bit commpage. Since the kernel can be a 32-bit object, cpu_capabilities.h * only defines the 32-bit address. */ -#define _COMM_PAGE_32_TO_64( ADDRESS ) ( ADDRESS + _COMM_PAGE64_START_ADDRESS - _COMM_PAGE32_START_ADDRESS ) +#define _COMM_PAGE_32_TO_64( ADDRESS ) ( ADDRESS + _COMM_PAGE64_START_ADDRESS - _COMM_PAGE32_START_ADDRESS ) /* @@ -117,27 +118,27 @@ int _NumCPUs( void ) * * Because Mach VM cannot map the last page of an address space, we don't use it. */ - -#define _COMM_PAGE32_AREA_LENGTH ( 1 * 4096 ) /* reserved length of entire comm area */ -#define _COMM_PAGE32_BASE_ADDRESS ( 0xffff0000 ) /* base address of allocated memory */ -#define _COMM_PAGE32_START_ADDRESS ( _COMM_PAGE32_BASE_ADDRESS ) /* address traditional commpage code starts on */ -#define _COMM_PAGE32_AREA_USED ( 1 * 4096 ) /* this is the amt actually allocated */ -#define _COMM_PAGE32_SIGS_OFFSET 0x8000 /* offset to routine signatures */ - -#define _COMM_PAGE64_AREA_LENGTH ( 1 * 4096 ) /* reserved length of entire comm area (2MB) */ + +#define _COMM_PAGE32_AREA_LENGTH ( 1 * 4096 ) /* reserved length of entire comm area */ +#define _COMM_PAGE32_BASE_ADDRESS ( 0xffff0000 ) /* base address of allocated memory */ +#define _COMM_PAGE32_START_ADDRESS ( _COMM_PAGE32_BASE_ADDRESS ) /* address traditional commpage code starts on */ +#define _COMM_PAGE32_AREA_USED ( 1 * 4096 ) /* this is the amt actually allocated */ +#define _COMM_PAGE32_SIGS_OFFSET 0x8000 /* offset to routine signatures */ + +#define _COMM_PAGE64_AREA_LENGTH ( 1 * 4096 ) /* reserved length of entire comm area (2MB) */ #ifdef __ASSEMBLER__ -#define _COMM_PAGE64_BASE_ADDRESS ( 0x00007fffffe00000 ) /* base address of allocated memory */ +#define _COMM_PAGE64_BASE_ADDRESS ( 0x00007fffffe00000 ) /* base address of allocated memory */ #else /* __ASSEMBLER__ */ -#define _COMM_PAGE64_BASE_ADDRESS ( 0x00007fffffe00000ULL ) /* base address of allocated memory */ +#define _COMM_PAGE64_BASE_ADDRESS ( 0x00007fffffe00000ULL ) /* base address of allocated memory */ #endif /* __ASSEMBLER__ */ -#define _COMM_PAGE64_START_ADDRESS ( _COMM_PAGE64_BASE_ADDRESS ) /* address traditional commpage code starts on */ -#define _COMM_PAGE64_AREA_USED ( 1 * 4096 ) /* this is the amt actually populated */ +#define _COMM_PAGE64_START_ADDRESS ( _COMM_PAGE64_BASE_ADDRESS ) /* address traditional commpage code starts on */ +#define _COMM_PAGE64_AREA_USED ( 1 * 4096 ) /* this is the amt actually populated */ /* no need for an Objective-C area on Intel */ -#define _COMM_PAGE32_OBJC_SIZE 0ULL -#define _COMM_PAGE32_OBJC_BASE 0ULL -#define _COMM_PAGE64_OBJC_SIZE 0ULL -#define _COMM_PAGE64_OBJC_BASE 0ULL +#define _COMM_PAGE32_OBJC_SIZE 0ULL +#define _COMM_PAGE32_OBJC_BASE 0ULL +#define _COMM_PAGE64_OBJC_SIZE 0ULL +#define _COMM_PAGE64_OBJC_BASE 0ULL #ifdef KERNEL_PRIVATE @@ -145,157 +146,158 @@ int _NumCPUs( void ) * assuming they are a part of the 32-bit commpage. They may * be mapped somewhere else, especially for the 64-bit commpage. */ -#define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_START_ADDRESS -#define _COMM_PAGE_SIGS_OFFSET _COMM_PAGE32_SIGS_OFFSET +#define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_START_ADDRESS +#define _COMM_PAGE_SIGS_OFFSET _COMM_PAGE32_SIGS_OFFSET #else /* !KERNEL_PRIVATE */ #if defined(__i386__) -#define _COMM_PAGE_AREA_LENGTH _COMM_PAGE32_AREA_LENGTH -#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE32_BASE_ADDRESS -#define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_START_ADDRESS -#define _COMM_PAGE_AREA_USED _COMM_PAGE32_AREA_USED -#define _COMM_PAGE_SIGS_OFFSET _COMM_PAGE32_SIGS_OFFSET +#define _COMM_PAGE_AREA_LENGTH _COMM_PAGE32_AREA_LENGTH +#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE32_BASE_ADDRESS +#define _COMM_PAGE_START_ADDRESS _COMM_PAGE32_START_ADDRESS +#define _COMM_PAGE_AREA_USED _COMM_PAGE32_AREA_USED +#define _COMM_PAGE_SIGS_OFFSET _COMM_PAGE32_SIGS_OFFSET #elif defined(__x86_64__) -#define _COMM_PAGE_AREA_LENGTH _COMM_PAGE64_AREA_LENGTH -#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE64_BASE_ADDRESS -#define _COMM_PAGE_START_ADDRESS _COMM_PAGE64_START_ADDRESS -#define _COMM_PAGE_AREA_USED _COMM_PAGE64_AREA_USED +#define _COMM_PAGE_AREA_LENGTH _COMM_PAGE64_AREA_LENGTH +#define _COMM_PAGE_BASE_ADDRESS _COMM_PAGE64_BASE_ADDRESS +#define _COMM_PAGE_START_ADDRESS _COMM_PAGE64_START_ADDRESS +#define _COMM_PAGE_AREA_USED _COMM_PAGE64_AREA_USED #else #error architecture not supported #endif - + #endif /* !KERNEL_PRIVATE */ /* data in the comm page */ - -#define _COMM_PAGE_SIGNATURE (_COMM_PAGE_START_ADDRESS+0x000) /* first 16 bytes are a signature */ -#define _COMM_PAGE_SIGNATURELEN (0x10) -#define _COMM_PAGE_CPU_CAPABILITIES64 (_COMM_PAGE_START_ADDRESS+0x010) /* uint64_t _cpu_capabilities */ -#define _COMM_PAGE_UNUSED (_COMM_PAGE_START_ADDRESS+0x018) /* 6 unused bytes */ -#define _COMM_PAGE_VERSION (_COMM_PAGE_START_ADDRESS+0x01E) /* 16-bit version# */ -#define _COMM_PAGE_THIS_VERSION 13 /* in ver 13, _COMM_PAGE_NT_SHIFT defaults to 0 (was 32) */ - -#define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_START_ADDRESS+0x020) /* uint32_t _cpu_capabilities (retained for compatibility) */ -#define _COMM_PAGE_NCPUS (_COMM_PAGE_START_ADDRESS+0x022) /* uint8_t number of configured CPUs (hw.logicalcpu at boot time) */ -#define _COMM_PAGE_UNUSED0 (_COMM_PAGE_START_ADDRESS+0x024) /* 2 unused bytes, previouly reserved for expansion of cpu_capabilities */ -#define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_START_ADDRESS+0x026) /* uint16_t cache line size */ - -#define _COMM_PAGE_SCHED_GEN (_COMM_PAGE_START_ADDRESS+0x028) /* uint32_t scheduler generation number (count of pre-emptions) */ -#define _COMM_PAGE_MEMORY_PRESSURE (_COMM_PAGE_START_ADDRESS+0x02c) /* uint32_t copy of vm_memory_pressure */ -#define _COMM_PAGE_SPIN_COUNT (_COMM_PAGE_START_ADDRESS+0x030) /* uint32_t max spin count for mutex's */ - -#define _COMM_PAGE_ACTIVE_CPUS (_COMM_PAGE_START_ADDRESS+0x034) /* uint8_t number of active CPUs (hw.activecpu) */ -#define _COMM_PAGE_PHYSICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x035) /* uint8_t number of physical CPUs (hw.physicalcpu_max) */ -#define _COMM_PAGE_LOGICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x036) /* uint8_t number of logical CPUs (hw.logicalcpu_max) */ -#define _COMM_PAGE_UNUSED1 (_COMM_PAGE_START_ADDRESS+0x037) /* 1 unused bytes */ -#define _COMM_PAGE_MEMORY_SIZE (_COMM_PAGE_START_ADDRESS+0x038) /* uint64_t max memory size */ - -#define _COMM_PAGE_CPUFAMILY (_COMM_PAGE_START_ADDRESS+0x040) /* uint32_t hw.cpufamily, x86*/ -#define _COMM_PAGE_KDEBUG_ENABLE (_COMM_PAGE_START_ADDRESS+0x044) /* uint32_t export "kdebug_enable" to userspace */ -#define _COMM_PAGE_ATM_DIAGNOSTIC_CONFIG (_COMM_PAGE_START_ADDRESS+0x48) /* uint32_t export "atm_diagnostic_config" to userspace */ - -#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_START_ADDRESS+0x04C) /* [0x4C,0x50) unused */ - -#define _COMM_PAGE_TIME_DATA_START (_COMM_PAGE_START_ADDRESS+0x050) /* base of offsets below (_NT_SCALE etc) */ -#define _COMM_PAGE_NT_TSC_BASE (_COMM_PAGE_START_ADDRESS+0x050) /* used by nanotime() */ -#define _COMM_PAGE_NT_SCALE (_COMM_PAGE_START_ADDRESS+0x058) /* used by nanotime() */ -#define _COMM_PAGE_NT_SHIFT (_COMM_PAGE_START_ADDRESS+0x05c) /* used by nanotime() */ -#define _COMM_PAGE_NT_NS_BASE (_COMM_PAGE_START_ADDRESS+0x060) /* used by nanotime() */ -#define _COMM_PAGE_NT_GENERATION (_COMM_PAGE_START_ADDRESS+0x068) /* used by nanotime() */ -#define _COMM_PAGE_GTOD_GENERATION (_COMM_PAGE_START_ADDRESS+0x06c) /* used by gettimeofday() */ -#define _COMM_PAGE_GTOD_NS_BASE (_COMM_PAGE_START_ADDRESS+0x070) /* used by gettimeofday() */ -#define _COMM_PAGE_GTOD_SEC_BASE (_COMM_PAGE_START_ADDRESS+0x078) /* used by gettimeofday() */ - + +#define _COMM_PAGE_SIGNATURE (_COMM_PAGE_START_ADDRESS+0x000) /* first 16 bytes are a signature */ +#define _COMM_PAGE_SIGNATURELEN (0x10) +#define _COMM_PAGE_CPU_CAPABILITIES64 (_COMM_PAGE_START_ADDRESS+0x010) /* uint64_t _cpu_capabilities */ +#define _COMM_PAGE_UNUSED (_COMM_PAGE_START_ADDRESS+0x018) /* 6 unused bytes */ +#define _COMM_PAGE_VERSION (_COMM_PAGE_START_ADDRESS+0x01E) /* 16-bit version# */ +#define _COMM_PAGE_THIS_VERSION 13 /* in ver 13, _COMM_PAGE_NT_SHIFT defaults to 0 (was 32) */ + +#define _COMM_PAGE_CPU_CAPABILITIES (_COMM_PAGE_START_ADDRESS+0x020) /* uint32_t _cpu_capabilities (retained for compatibility) */ +#define _COMM_PAGE_NCPUS (_COMM_PAGE_START_ADDRESS+0x022) /* uint8_t number of configured CPUs (hw.logicalcpu at boot time) */ +#define _COMM_PAGE_UNUSED0 (_COMM_PAGE_START_ADDRESS+0x024) /* 2 unused bytes, previouly reserved for expansion of cpu_capabilities */ +#define _COMM_PAGE_CACHE_LINESIZE (_COMM_PAGE_START_ADDRESS+0x026) /* uint16_t cache line size */ + +#define _COMM_PAGE_SCHED_GEN (_COMM_PAGE_START_ADDRESS+0x028) /* uint32_t scheduler generation number (count of pre-emptions) */ +#define _COMM_PAGE_MEMORY_PRESSURE (_COMM_PAGE_START_ADDRESS+0x02c) /* uint32_t copy of vm_memory_pressure */ +#define _COMM_PAGE_SPIN_COUNT (_COMM_PAGE_START_ADDRESS+0x030) /* uint32_t max spin count for mutex's */ + +#define _COMM_PAGE_ACTIVE_CPUS (_COMM_PAGE_START_ADDRESS+0x034) /* uint8_t number of active CPUs (hw.activecpu) */ +#define _COMM_PAGE_PHYSICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x035) /* uint8_t number of physical CPUs (hw.physicalcpu_max) */ +#define _COMM_PAGE_LOGICAL_CPUS (_COMM_PAGE_START_ADDRESS+0x036) /* uint8_t number of logical CPUs (hw.logicalcpu_max) */ +#define _COMM_PAGE_UNUSED1 (_COMM_PAGE_START_ADDRESS+0x037) /* 1 unused bytes */ +#define _COMM_PAGE_MEMORY_SIZE (_COMM_PAGE_START_ADDRESS+0x038) /* uint64_t max memory size */ + +#define _COMM_PAGE_CPUFAMILY (_COMM_PAGE_START_ADDRESS+0x040) /* uint32_t hw.cpufamily, x86*/ +#define _COMM_PAGE_KDEBUG_ENABLE (_COMM_PAGE_START_ADDRESS+0x044) /* uint32_t export "kdebug_enable" to userspace */ +#define _COMM_PAGE_ATM_DIAGNOSTIC_CONFIG (_COMM_PAGE_START_ADDRESS+0x48) /* uint32_t export "atm_diagnostic_config" to userspace */ + +#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_START_ADDRESS+0x04C) /* [0x4C,0x50) unused */ + +#define _COMM_PAGE_TIME_DATA_START (_COMM_PAGE_START_ADDRESS+0x050) /* base of offsets below (_NT_SCALE etc) */ +#define _COMM_PAGE_NT_TSC_BASE (_COMM_PAGE_START_ADDRESS+0x050) /* used by nanotime() */ +#define _COMM_PAGE_NT_SCALE (_COMM_PAGE_START_ADDRESS+0x058) /* used by nanotime() */ +#define _COMM_PAGE_NT_SHIFT (_COMM_PAGE_START_ADDRESS+0x05c) /* used by nanotime() */ +#define _COMM_PAGE_NT_NS_BASE (_COMM_PAGE_START_ADDRESS+0x060) /* used by nanotime() */ +#define _COMM_PAGE_NT_GENERATION (_COMM_PAGE_START_ADDRESS+0x068) /* used by nanotime() */ +#define _COMM_PAGE_GTOD_GENERATION (_COMM_PAGE_START_ADDRESS+0x06c) /* used by gettimeofday() */ +#define _COMM_PAGE_GTOD_NS_BASE (_COMM_PAGE_START_ADDRESS+0x070) /* used by gettimeofday() */ +#define _COMM_PAGE_GTOD_SEC_BASE (_COMM_PAGE_START_ADDRESS+0x078) /* used by gettimeofday() */ + /* NOTE: APPROX_TIME must be aligned to 64-byte cache line size: */ -#define _COMM_PAGE_APPROX_TIME (_COMM_PAGE_START_ADDRESS+0x080) /* used by mach_approximate_time() */ -#define _COMM_PAGE_APPROX_TIME_SUPPORTED (_COMM_PAGE_START_ADDRESS+0x088) /* used by mach_approximate_time() */ +#define _COMM_PAGE_APPROX_TIME (_COMM_PAGE_START_ADDRESS+0x080) /* used by mach_approximate_time() */ +#define _COMM_PAGE_APPROX_TIME_SUPPORTED (_COMM_PAGE_START_ADDRESS+0x088) /* used by mach_approximate_time() */ /* Align following entries to next cache line */ -#define _COMM_PAGE_CONT_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x0C0) /* used by mach_continuous_time() */ -#define _COMM_PAGE_BOOTTIME_USEC (_COMM_PAGE_START_ADDRESS+0x0C8) /* uint64_t boottime */ -#define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x0D0) /* used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40 */ +#define _COMM_PAGE_CONT_TIMEBASE (_COMM_PAGE_START_ADDRESS+0x0C0) /* used by mach_continuous_time() */ +#define _COMM_PAGE_BOOTTIME_USEC (_COMM_PAGE_START_ADDRESS+0x0C8) /* uint64_t boottime */ +#define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x0D0) /* used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40 */ -#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0xfff) /* end of common page */ +#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0xfff) /* end of common page */ /* Warning: kernel commpage.h has a matching c typedef for the following. They must be kept in sync. */ /* These offsets are from _COMM_PAGE_TIME_DATA_START */ -#define _NT_TSC_BASE 0 -#define _NT_SCALE 8 -#define _NT_SHIFT 12 -#define _NT_NS_BASE 16 -#define _NT_GENERATION 24 -#define _GTOD_GENERATION 28 -#define _GTOD_NS_BASE 32 -#define _GTOD_SEC_BASE 40 - - /* jump table (jmp to this address, which may be a branch to the actual code somewhere else) */ - /* When new jump table entries are added, corresponding symbols should be added below */ - /* New slots should be allocated with at least 16-byte alignment. Some like bcopy require */ - /* 32-byte alignment, and should be aligned as such in the assembly source before they are relocated */ -#define _COMM_PAGE_TEXT_START (_COMM_PAGE_START_ADDRESS+0x1000) +#define _NT_TSC_BASE 0 +#define _NT_SCALE 8 +#define _NT_SHIFT 12 +#define _NT_NS_BASE 16 +#define _NT_GENERATION 24 +#define _GTOD_GENERATION 28 +#define _GTOD_NS_BASE 32 +#define _GTOD_SEC_BASE 40 + +/* jump table (jmp to this address, which may be a branch to the actual code somewhere else) */ +/* When new jump table entries are added, corresponding symbols should be added below */ +/* New slots should be allocated with at least 16-byte alignment. Some like bcopy require */ +/* 32-byte alignment, and should be aligned as such in the assembly source before they are relocated */ +#define _COMM_PAGE_TEXT_START (_COMM_PAGE_START_ADDRESS+0x1000) #define _COMM_PAGE32_TEXT_START (_COMM_PAGE32_BASE_ADDRESS+0x1000) /* start of text section */ -#define _COMM_PAGE64_TEXT_START (_COMM_PAGE64_BASE_ADDRESS+0x1000) -#define _COMM_PAGE_TEXT_AREA_USED ( 1 * 4096 ) -#define _COMM_PAGE_TEXT_AREA_LENGTH ( 1 * 4096 ) -#define _PFZ32_SLIDE_RANGE ( 14 ) /* pages between 0xfffff000 and _COMM_PAGE32_TEXT_START */ -#define _PFZ64_SLIDE_RANGE ( 510 ) /* pages between 0x00007ffffffff000 and _COMM_PAGE64_TEXT_START */ +#define _COMM_PAGE64_TEXT_START (_COMM_PAGE64_BASE_ADDRESS+0x1000) +#define _COMM_PAGE_TEXT_AREA_USED ( 1 * 4096 ) +#define _COMM_PAGE_TEXT_AREA_LENGTH ( 1 * 4096 ) +#define _PFZ32_SLIDE_RANGE ( 14 ) /* pages between 0xfffff000 and _COMM_PAGE32_TEXT_START */ +#define _PFZ64_SLIDE_RANGE ( 510 ) /* pages between 0x00007ffffffff000 and _COMM_PAGE64_TEXT_START */ -/* setup start offset in the commpage text region for each jump table entry +/* setup start offset in the commpage text region for each jump table entry * the Comm Page Offset is shortened to _COMM_TEXT_[label]_OFFSET */ -#define _COMM_TEXT_PREEMPT_OFFSET (0x5a0) /* called from withing pfz */ -#define _COMM_TEXT_BACKOFF_OFFSET (0x600) /* called from PFZ */ -#define _COMM_TEXT_RET_OFFSET (0x680) /* called from PFZ */ -#define _COMM_TEXT_PFZ_START_OFFSET (0xc00) /* offset for Preemption Free Zone */ -#define _COMM_TEXT_PFZ_ENQUEUE_OFFSET (0xc00) /* internal FIFO enqueue */ -#define _COMM_TEXT_PFZ_DEQUEUE_OFFSET (0xc80) /* internal FIFO dequeue */ -#define _COMM_TEXT_UNUSED_OFFSET (0xd00) /* end of routines in text page */ -#define _COMM_TEXT_PFZ_END_OFFSET (0xd00) /* offset for end of PFZ */ +#define _COMM_TEXT_PREEMPT_OFFSET (0x5a0) /* called from withing pfz */ +#define _COMM_TEXT_BACKOFF_OFFSET (0x600) /* called from PFZ */ +#define _COMM_TEXT_RET_OFFSET (0x680) /* called from PFZ */ +#define _COMM_TEXT_PFZ_START_OFFSET (0xc00) /* offset for Preemption Free Zone */ +#define _COMM_TEXT_PFZ_ENQUEUE_OFFSET (0xc00) /* internal FIFO enqueue */ +#define _COMM_TEXT_PFZ_DEQUEUE_OFFSET (0xc80) /* internal FIFO dequeue */ +#define _COMM_TEXT_UNUSED_OFFSET (0xd00) /* end of routines in text page */ +#define _COMM_TEXT_PFZ_END_OFFSET (0xd00) /* offset for end of PFZ */ -#define _COMM_PAGE_PREEMPT (_COMM_PAGE_TEXT_START+_COMM_TEXT_PREEMPT_OFFSET) -#define _COMM_PAGE_BACKOFF (_COMM_PAGE_TEXT_START+_COMM_TEXT_BACKOFF_OFFSET) -#define _COMM_PAGE_RET (_COMM_PAGE_TEXT_START+_COMM_TEXT_RET_OFFSET) +#define _COMM_PAGE_PREEMPT (_COMM_PAGE_TEXT_START+_COMM_TEXT_PREEMPT_OFFSET) +#define _COMM_PAGE_BACKOFF (_COMM_PAGE_TEXT_START+_COMM_TEXT_BACKOFF_OFFSET) +#define _COMM_PAGE_RET (_COMM_PAGE_TEXT_START+_COMM_TEXT_RET_OFFSET) -#define _COMM_PAGE_PFZ_START (_COMM_PAGE_TEXT_START+_COMM_PAGE_PFZ_START_OFFSET) +#define _COMM_PAGE_PFZ_START (_COMM_PAGE_TEXT_START+_COMM_PAGE_PFZ_START_OFFSET) -#define _COMM_PAGE_PFZ_ENQUEUE (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_ENQUEUE_OFFSET) -#define _COMM_PAGE_PFZ_DEQUEUE (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_DEQUEUE_OFFSET) +#define _COMM_PAGE_PFZ_ENQUEUE (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_ENQUEUE_OFFSET) +#define _COMM_PAGE_PFZ_DEQUEUE (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_DEQUEUE_OFFSET) -#define _COMM_PAGE_UNUSED6 (_COMM_PAGE_TEXT_START+_COMM_TEXT_UNUSED_OFFSET) -#define _COMM_PAGE_PFZ_END (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_END_OFFSET) -#define _COMM_PAGE_TEXT_END (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_END_OFFSET) /* end of common text page */ +#define _COMM_PAGE_UNUSED6 (_COMM_PAGE_TEXT_START+_COMM_TEXT_UNUSED_OFFSET) +#define _COMM_PAGE_PFZ_END (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_END_OFFSET) +#define _COMM_PAGE_TEXT_END (_COMM_PAGE_TEXT_START+_COMM_TEXT_PFZ_END_OFFSET) /* end of common text page */ /* _COMM_PAGE_COMPARE_AND_SWAP{32,64}B are not used on x86 and are * maintained here for source compatability. These will be removed at * some point, so don't go relying on them. */ -#define _COMM_PAGE_COMPARE_AND_SWAP32B (_COMM_PAGE_START_ADDRESS+0xf80) /* compare-and-swap word w barrier */ -#define _COMM_PAGE_COMPARE_AND_SWAP64B (_COMM_PAGE_START_ADDRESS+0xfc0) /* compare-and-swap doubleword w barrier */ +#define _COMM_PAGE_COMPARE_AND_SWAP32B (_COMM_PAGE_START_ADDRESS+0xf80) /* compare-and-swap word w barrier */ +#define _COMM_PAGE_COMPARE_AND_SWAP64B (_COMM_PAGE_START_ADDRESS+0xfc0) /* compare-and-swap doubleword w barrier */ #ifdef __ASSEMBLER__ #ifdef __COMM_PAGE_SYMBOLS -#define CREATE_COMM_PAGE_SYMBOL(symbol_name, symbol_address) \ - .org (symbol_address - (_COMM_PAGE_START_ADDRESS & 0xFFFFE000)) ;\ +#define CREATE_COMM_PAGE_SYMBOL(symbol_name, symbol_address) \ + .org (symbol_address - (_COMM_PAGE_START_ADDRESS & 0xFFFFE000)) ;\ symbol_name: nop - .text /* Required to make a well behaved symbol file */ +.text +/* Required to make a well behaved symbol file */ - CREATE_COMM_PAGE_SYMBOL(___preempt, _COMM_PAGE_PREEMPT) - CREATE_COMM_PAGE_SYMBOL(___backoff, _COMM_PAGE_BACKOFF) - CREATE_COMM_PAGE_SYMBOL(___pfz_enqueue, _COMM_PAGE_PFZ_ENQUEUE) - CREATE_COMM_PAGE_SYMBOL(___pfz_dequeue, _COMM_PAGE_PFZ_DEQUEUE) - CREATE_COMM_PAGE_SYMBOL(___end_comm_page, _COMM_PAGE_END) +CREATE_COMM_PAGE_SYMBOL(___preempt, _COMM_PAGE_PREEMPT) +CREATE_COMM_PAGE_SYMBOL(___backoff, _COMM_PAGE_BACKOFF) +CREATE_COMM_PAGE_SYMBOL(___pfz_enqueue, _COMM_PAGE_PFZ_ENQUEUE) +CREATE_COMM_PAGE_SYMBOL(___pfz_dequeue, _COMM_PAGE_PFZ_DEQUEUE) +CREATE_COMM_PAGE_SYMBOL(___end_comm_page, _COMM_PAGE_END) - .data /* Required to make a well behaved symbol file */ - .long 0 /* Required to make a well behaved symbol file */ +.data /* Required to make a well behaved symbol file */ +.long 0 /* Required to make a well behaved symbol file */ #endif /* __COMM_PAGE_SYMBOLS */ #endif /* __ASSEMBLER__ */ diff --git a/osfmk/i386/cpu_data.h b/osfmk/i386/cpu_data.h index 4201068f4..da7919cda 100644 --- a/osfmk/i386/cpu_data.h +++ b/osfmk/i386/cpu_data.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,21 +22,22 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ -#ifndef I386_CPU_DATA +#ifndef I386_CPU_DATA #define I386_CPU_DATA #include #include #include +#include #include #include #include @@ -48,6 +49,7 @@ #include #include #include +#include #if CONFIG_VMX #include @@ -71,29 +73,29 @@ struct prngContext; * Data structures embedded in per-cpu data: */ typedef struct rtclock_timer { - mpqueue_head_t queue; - uint64_t deadline; - uint64_t when_set; - boolean_t has_expired; + mpqueue_head_t queue; + uint64_t deadline; + uint64_t when_set; + boolean_t has_expired; } rtclock_timer_t; typedef struct { /* The 'u' suffixed fields store the double-mapped descriptor addresses */ - struct x86_64_tss *cdi_ktssu; - struct x86_64_tss *cdi_ktssb; - x86_64_desc_register_t cdi_gdtu; - x86_64_desc_register_t cdi_gdtb; - x86_64_desc_register_t cdi_idtu; - x86_64_desc_register_t cdi_idtb; - struct fake_descriptor *cdi_ldtu; - struct fake_descriptor *cdi_ldtb; - vm_offset_t cdi_sstku; - vm_offset_t cdi_sstkb; + struct x86_64_tss *cdi_ktssu; + struct x86_64_tss *cdi_ktssb; + x86_64_desc_register_t cdi_gdtu; + x86_64_desc_register_t cdi_gdtb; + x86_64_desc_register_t cdi_idtu; + x86_64_desc_register_t cdi_idtb; + struct real_descriptor *cdi_ldtu; + struct real_descriptor *cdi_ldtb; + vm_offset_t cdi_sstku; + vm_offset_t cdi_sstkb; } cpu_desc_index_t; typedef enum { - TASK_MAP_32BIT, /* 32-bit user, compatibility mode */ - TASK_MAP_64BIT, /* 64-bit user thread, shared space */ + TASK_MAP_32BIT, /* 32-bit user, compatibility mode */ + TASK_MAP_64BIT, /* 64-bit user thread, shared space */ } task_map_t; @@ -104,24 +106,56 @@ typedef enum { * before loading this address into rsp. */ typedef struct { - addr64_t cu_isf; /* thread->pcb->iss.isf */ - uint64_t cu_tmp; /* temporary scratch */ - addr64_t cu_user_gs_base; + addr64_t cu_isf; /* thread->pcb->iss.isf */ + uint64_t cu_tmp; /* temporary scratch */ + addr64_t cu_user_gs_base; } cpu_uber_t; -typedef uint16_t pcid_t; -typedef uint8_t pcid_ref_t; +typedef uint16_t pcid_t; +typedef uint8_t pcid_ref_t; #define CPU_RTIME_BINS (12) #define CPU_ITIME_BINS (CPU_RTIME_BINS) -#define MAXPLFRAMES (16) +#define MAX_TRACE_BTFRAMES (16) typedef struct { boolean_t pltype; int plevel; - uint64_t plbt[MAXPLFRAMES]; + uint64_t plbt[MAX_TRACE_BTFRAMES]; } plrecord_t; +typedef enum { + IOTRACE_PHYS_READ = 1, + IOTRACE_PHYS_WRITE, + IOTRACE_IO_READ, + IOTRACE_IO_WRITE, + IOTRACE_PORTIO_READ, + IOTRACE_PORTIO_WRITE +} iotrace_type_e; + +typedef struct { + iotrace_type_e iotype; + int size; + uint64_t vaddr; + uint64_t paddr; + uint64_t val; + uint64_t start_time_abs; + uint64_t duration; + uint64_t backtrace[MAX_TRACE_BTFRAMES]; +} iotrace_entry_t; + +#if DEVELOPMENT || DEBUG +#define DEFAULT_IOTRACE_ENTRIES_PER_CPU (64) +#define IOTRACE_MAX_ENTRIES_PER_CPU (256) +extern volatile int mmiotrace_enabled; +extern int iotrace_generators; +extern int iotrace_entries_per_cpu; +extern int *iotrace_next; +extern iotrace_entry_t **iotrace_ring; + +extern void init_iotrace_bufs(int cpucnt, int entries_per_cpu); +#endif /* DEVELOPMENT || DEBUG */ + /* * Per-cpu data. * @@ -129,45 +163,44 @@ typedef struct { * current_cpu_datap() macro. For speed, the %gs segment is based here, and * using this, inlines provides single-instruction access to frequently used * members - such as get_cpu_number()/cpu_number(), and get_active_thread()/ - * current_thread(). - * + * current_thread(). + * * Cpu data owned by another processor can be accessed using the * cpu_datap(cpu_number) macro which uses the cpu_data_ptr[] array of per-cpu * pointers. */ typedef struct { - pcid_t cpu_pcid_free_hint; -#define PMAP_PCID_MAX_PCID (0x800) - pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID]; - pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID]; + pcid_t cpu_pcid_free_hint; +#define PMAP_PCID_MAX_PCID (0x800) + pcid_ref_t cpu_pcid_refcounts[PMAP_PCID_MAX_PCID]; + pmap_t cpu_pcid_last_pmap_dispatched[PMAP_PCID_MAX_PCID]; } pcid_cdata_t; -typedef struct cpu_data -{ - struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ -#define cpu_pd cpu_pal_data /* convenience alias */ - struct cpu_data *cpu_this; /* pointer to myself */ - thread_t cpu_active_thread; - thread_t cpu_nthread; - volatile int cpu_preemption_level; - int cpu_number; /* Logical CPU */ - void *cpu_int_state; /* interrupt state */ - vm_offset_t cpu_active_stack; /* kernel stack base */ - vm_offset_t cpu_kernel_stack; /* kernel stack top */ - vm_offset_t cpu_int_stack_top; - int cpu_interrupt_level; - volatile int cpu_signals; /* IPI events */ - volatile int cpu_prior_signals; /* Last set of events, - * debugging - */ - ast_t cpu_pending_ast; - volatile int cpu_running; +typedef struct cpu_data { + struct pal_cpu_data cpu_pal_data; /* PAL-specific data */ +#define cpu_pd cpu_pal_data /* convenience alias */ + struct cpu_data *cpu_this; /* pointer to myself */ + thread_t cpu_active_thread; + thread_t cpu_nthread; + volatile int cpu_preemption_level; + int cpu_number; /* Logical CPU */ + void *cpu_int_state; /* interrupt state */ + vm_offset_t cpu_active_stack; /* kernel stack base */ + vm_offset_t cpu_kernel_stack; /* kernel stack top */ + vm_offset_t cpu_int_stack_top; + int cpu_interrupt_level; + volatile int cpu_signals; /* IPI events */ + volatile int cpu_prior_signals; /* Last set of events, + * debugging + */ + ast_t cpu_pending_ast; + volatile int cpu_running; #if !MONOTONIC - boolean_t cpu_fixed_pmcs_enabled; + boolean_t cpu_fixed_pmcs_enabled; #endif /* !MONOTONIC */ - rtclock_timer_t rtclock_timer; - uint64_t quantum_timer_deadline; - volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); + rtclock_timer_t rtclock_timer; + uint64_t quantum_timer_deadline; + volatile addr64_t cpu_active_cr3 __attribute((aligned(64))); union { volatile uint32_t cpu_tlb_invalid; struct { @@ -175,41 +208,56 @@ typedef struct cpu_data volatile uint16_t cpu_tlb_invalid_global; }; }; - volatile task_map_t cpu_task_map; - volatile addr64_t cpu_task_cr3; - addr64_t cpu_kernel_cr3; + uint64_t cpu_ip_desc[2]; + volatile task_map_t cpu_task_map; + volatile addr64_t cpu_task_cr3; + addr64_t cpu_kernel_cr3; volatile addr64_t cpu_ucr3; - boolean_t cpu_pagezero_mapped; - cpu_uber_t cpu_uber; + volatile addr64_t cpu_shadowtask_cr3; + boolean_t cpu_pagezero_mapped; + cpu_uber_t cpu_uber; /* Double-mapped per-CPU exception stack address */ - uintptr_t cd_estack; - int cpu_xstate; + uintptr_t cd_estack; + int cpu_xstate; + int cpu_curtask_has_ldt; + int cpu_curthread_do_segchk; /* Address of shadowed, partially mirrored CPU data structures located * in the double mapped PML4 */ - void *cd_shadow; - struct processor *cpu_processor; + void *cd_shadow; + union { + volatile uint32_t cpu_tlb_invalid_count; + struct { + volatile uint16_t cpu_tlb_invalid_local_count; + volatile uint16_t cpu_tlb_invalid_global_count; + }; + }; + + uint16_t cpu_tlb_gen_counts_local[MAX_CPUS]; + uint16_t cpu_tlb_gen_counts_global[MAX_CPUS]; + + struct processor *cpu_processor; #if NCOPY_WINDOWS > 0 - struct cpu_pmap *cpu_pmap; + struct cpu_pmap *cpu_pmap; #endif - struct real_descriptor *cpu_ldtp; - struct cpu_desc_table *cpu_desc_tablep; - cpu_desc_index_t cpu_desc_index; - int cpu_ldt; + struct real_descriptor *cpu_ldtp; + struct cpu_desc_table *cpu_desc_tablep; + cpu_desc_index_t cpu_desc_index; + int cpu_ldt; #if NCOPY_WINDOWS > 0 - vm_offset_t cpu_copywindow_base; - uint64_t *cpu_copywindow_pdp; + vm_offset_t cpu_copywindow_base; + uint64_t *cpu_copywindow_pdp; - vm_offset_t cpu_physwindow_base; - uint64_t *cpu_physwindow_ptep; + vm_offset_t cpu_physwindow_base; + uint64_t *cpu_physwindow_ptep; #endif #define HWINTCNT_SIZE 256 - uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ - uint64_t cpu_hwIntpexits[HWINTCNT_SIZE]; - uint64_t cpu_dr7; /* debug control register */ - uint64_t cpu_int_event_time; /* intr entry/exit time */ - pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ + uint32_t cpu_hwIntCnt[HWINTCNT_SIZE]; /* Interrupt counts */ + uint64_t cpu_hwIntpexits[HWINTCNT_SIZE]; + uint64_t cpu_dr7; /* debug control register */ + uint64_t cpu_int_event_time; /* intr entry/exit time */ + pal_rtc_nanotime_t *cpu_nanotime; /* Nanotime info */ #if KPC /* double-buffered performance counter data */ uint64_t *cpu_kpc_buf[2]; @@ -220,142 +268,142 @@ typedef struct cpu_data #if MONOTONIC struct mt_cpu cpu_monotonic; #endif /* MONOTONIC */ - uint32_t cpu_pmap_pcid_enabled; - pcid_t cpu_active_pcid; - pcid_t cpu_last_pcid; - pcid_t cpu_kernel_pcid; - volatile pcid_ref_t *cpu_pmap_pcid_coherentp; - volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel; - pcid_cdata_t *cpu_pcid_data; -#ifdef PCID_STATS - uint64_t cpu_pmap_pcid_flushes; - uint64_t cpu_pmap_pcid_preserves; + uint32_t cpu_pmap_pcid_enabled; + pcid_t cpu_active_pcid; + pcid_t cpu_last_pcid; + pcid_t cpu_kernel_pcid; + volatile pcid_ref_t *cpu_pmap_pcid_coherentp; + volatile pcid_ref_t *cpu_pmap_pcid_coherentp_kernel; + pcid_cdata_t *cpu_pcid_data; +#ifdef PCID_STATS + uint64_t cpu_pmap_pcid_flushes; + uint64_t cpu_pmap_pcid_preserves; #endif - uint64_t cpu_aperf; - uint64_t cpu_mperf; - uint64_t cpu_c3res; - uint64_t cpu_c6res; - uint64_t cpu_c7res; - uint64_t cpu_itime_total; - uint64_t cpu_rtime_total; - uint64_t cpu_ixtime; + uint64_t cpu_aperf; + uint64_t cpu_mperf; + uint64_t cpu_c3res; + uint64_t cpu_c6res; + uint64_t cpu_c7res; + uint64_t cpu_itime_total; + uint64_t cpu_rtime_total; + uint64_t cpu_ixtime; uint64_t cpu_idle_exits; - uint64_t cpu_rtimes[CPU_RTIME_BINS]; - uint64_t cpu_itimes[CPU_ITIME_BINS]; + uint64_t cpu_rtimes[CPU_RTIME_BINS]; + uint64_t cpu_itimes[CPU_ITIME_BINS]; #if !MONOTONIC - uint64_t cpu_cur_insns; - uint64_t cpu_cur_ucc; - uint64_t cpu_cur_urc; + uint64_t cpu_cur_insns; + uint64_t cpu_cur_ucc; + uint64_t cpu_cur_urc; #endif /* !MONOTONIC */ - uint64_t cpu_gpmcs[4]; + uint64_t cpu_gpmcs[4]; uint64_t cpu_max_observed_int_latency; int cpu_max_observed_int_latency_vector; - volatile boolean_t cpu_NMI_acknowledged; - uint64_t debugger_entry_time; - uint64_t debugger_ipi_time; + volatile boolean_t cpu_NMI_acknowledged; + uint64_t debugger_entry_time; + uint64_t debugger_ipi_time; /* A separate nested interrupt stack flag, to account * for non-nested interrupts arriving while on the interrupt stack * Currently only occurs when AICPM enables interrupts on the * interrupt stack during processor offlining. */ - uint32_t cpu_nested_istack; - uint32_t cpu_nested_istack_events; - x86_saved_state64_t *cpu_fatal_trap_state; - x86_saved_state64_t *cpu_post_fatal_trap_state; + uint32_t cpu_nested_istack; + uint32_t cpu_nested_istack_events; + x86_saved_state64_t *cpu_fatal_trap_state; + x86_saved_state64_t *cpu_post_fatal_trap_state; #if CONFIG_VMX - vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */ + vmx_cpu_t cpu_vmx; /* wonderful world of virtualization */ #endif #if CONFIG_MCA - struct mca_state *cpu_mca_state; /* State at MC fault */ + struct mca_state *cpu_mca_state; /* State at MC fault */ #endif - int cpu_type; - int cpu_subtype; - int cpu_threadtype; - boolean_t cpu_iflag; - boolean_t cpu_boot_complete; - int cpu_hibernate; + int cpu_type; + int cpu_subtype; + int cpu_threadtype; + boolean_t cpu_iflag; + boolean_t cpu_boot_complete; + int cpu_hibernate; #define MAX_PREEMPTION_RECORDS (8) -#if DEVELOPMENT || DEBUG - int cpu_plri; - plrecord_t plrecords[MAX_PREEMPTION_RECORDS]; +#if DEVELOPMENT || DEBUG + int cpu_plri; + plrecord_t plrecords[MAX_PREEMPTION_RECORDS]; #endif - void *cpu_console_buf; - struct x86_lcpu lcpu; - int cpu_phys_number; /* Physical CPU */ - cpu_id_t cpu_id; /* Platform Expert */ + void *cpu_console_buf; + struct x86_lcpu lcpu; + int cpu_phys_number; /* Physical CPU */ + cpu_id_t cpu_id; /* Platform Expert */ #if DEBUG - uint64_t cpu_entry_cr3; - uint64_t cpu_exit_cr3; - uint64_t cpu_pcid_last_cr3; + uint64_t cpu_entry_cr3; + uint64_t cpu_exit_cr3; + uint64_t cpu_pcid_last_cr3; #endif - boolean_t cpu_rendezvous_in_progress; + boolean_t cpu_rendezvous_in_progress; } cpu_data_t; -extern cpu_data_t *cpu_data_ptr[]; +extern cpu_data_t *cpu_data_ptr[]; /* Macro to generate inline bodies to retrieve per-cpu data fields. */ #if defined(__clang__) #define GS_RELATIVE volatile __attribute__((address_space(256))) #ifndef offsetof -#define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER) +#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE,MEMBER) #endif -#define CPU_DATA_GET(member,type) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - type ret; \ - ret = cpu_data->member; \ +#define CPU_DATA_GET(member, type) \ + cpu_data_t GS_RELATIVE *cpu_data = \ + (cpu_data_t GS_RELATIVE *)0UL; \ + type ret; \ + ret = cpu_data->member; \ return ret; -#define CPU_DATA_GET_INDEX(member,index,type) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - type ret; \ - ret = cpu_data->member[index]; \ +#define CPU_DATA_GET_INDEX(member, index, type) \ + cpu_data_t GS_RELATIVE *cpu_data = \ + (cpu_data_t GS_RELATIVE *)0UL; \ + type ret; \ + ret = cpu_data->member[index]; \ return ret; -#define CPU_DATA_SET(member,value) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ +#define CPU_DATA_SET(member, value) \ + cpu_data_t GS_RELATIVE *cpu_data = \ + (cpu_data_t GS_RELATIVE *)0UL; \ cpu_data->member = value; -#define CPU_DATA_XCHG(member,value,type) \ - cpu_data_t GS_RELATIVE *cpu_data = \ - (cpu_data_t GS_RELATIVE *)0UL; \ - type ret; \ - ret = cpu_data->member; \ - cpu_data->member = value; \ +#define CPU_DATA_XCHG(member, value, type) \ + cpu_data_t GS_RELATIVE *cpu_data = \ + (cpu_data_t GS_RELATIVE *)0UL; \ + type ret; \ + ret = cpu_data->member; \ + cpu_data->member = value; \ return ret; #else /* !defined(__clang__) */ #ifndef offsetof -#define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif /* offsetof */ -#define CPU_DATA_GET(member,type) \ - type ret; \ - __asm__ volatile ("mov %%gs:%P1,%0" \ - : "=r" (ret) \ - : "i" (offsetof(cpu_data_t,member))); \ +#define CPU_DATA_GET(member, type) \ + type ret; \ + __asm__ volatile ("mov %%gs:%P1,%0" \ + : "=r" (ret) \ + : "i" (offsetof(cpu_data_t,member))); \ return ret; -#define CPU_DATA_GET_INDEX(member,index,type) \ - type ret; \ - __asm__ volatile ("mov %%gs:(%1),%0" \ - : "=r" (ret) \ - : "r" (offsetof(cpu_data_t,member[index]))); \ +#define CPU_DATA_GET_INDEX(member, index, type) \ + type ret; \ + __asm__ volatile ("mov %%gs:(%1),%0" \ + : "=r" (ret) \ + : "r" (offsetof(cpu_data_t,member[index]))); \ return ret; -#define CPU_DATA_SET(member,value) \ - __asm__ volatile ("mov %0,%%gs:%P1" \ - : \ - : "r" (value), "i" (offsetof(cpu_data_t,member))); +#define CPU_DATA_SET(member, value) \ + __asm__ volatile ("mov %0,%%gs:%P1" \ + : \ + : "r" (value), "i" (offsetof(cpu_data_t,member))); -#define CPU_DATA_XCHG(member,value,type) \ - type ret; \ - __asm__ volatile ("xchg %0,%%gs:%P1" \ - : "=r" (ret) \ - : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ +#define CPU_DATA_XCHG(member, value, type) \ + type ret; \ + __asm__ volatile ("xchg %0,%%gs:%P1" \ + : "=r" (ret) \ + : "i" (offsetof(cpu_data_t,member)), "0" (value)); \ return ret; #endif /* !defined(__clang__) */ @@ -385,44 +433,45 @@ extern cpu_data_t *cpu_data_ptr[]; static inline thread_t get_active_thread_volatile(void) { - CPU_DATA_GET(cpu_active_thread,thread_t) + CPU_DATA_GET(cpu_active_thread, thread_t) } static inline __pure2 thread_t get_active_thread(void) { - CPU_DATA_GET(cpu_active_thread,thread_t) + CPU_DATA_GET(cpu_active_thread, thread_t) } -#define current_thread_fast() get_active_thread() -#define current_thread_volatile() get_active_thread_volatile() -#define current_thread() current_thread_fast() +#define current_thread_fast() get_active_thread() +#define current_thread_volatile() get_active_thread_volatile() +#define current_thread() current_thread_fast() -#define cpu_mode_is64bit() TRUE +#define cpu_mode_is64bit() TRUE static inline int get_preemption_level(void) { - CPU_DATA_GET(cpu_preemption_level,int) + CPU_DATA_GET(cpu_preemption_level, int) } static inline int get_interrupt_level(void) { - CPU_DATA_GET(cpu_interrupt_level,int) + CPU_DATA_GET(cpu_interrupt_level, int) } static inline int get_cpu_number(void) { - CPU_DATA_GET(cpu_number,int) + CPU_DATA_GET(cpu_number, int) } static inline int get_cpu_phys_number(void) { - CPU_DATA_GET(cpu_phys_number,int) + CPU_DATA_GET(cpu_phys_number, int) } static inline cpu_data_t * -current_cpu_datap(void) { +current_cpu_datap(void) +{ CPU_DATA_GET(cpu_this, cpu_data_t *); } @@ -439,55 +488,23 @@ current_cpu_datap(void) { * The bounds check currently doesn't account for non-default thread stack sizes. */ #if DEVELOPMENT || DEBUG -static inline void pltrace_bt(uint64_t *rets, int maxframes, uint64_t stacklo, uint64_t stackhi) { - uint64_t *cfp = (uint64_t *) __builtin_frame_address(0); - int plbtf; - - assert(stacklo !=0 && stackhi !=0); - - for (plbtf = 0; plbtf < maxframes; plbtf++) { - if (((uint64_t)cfp == 0) || (((uint64_t)cfp < stacklo) || ((uint64_t)cfp > stackhi))) { - rets[plbtf] = 0; - continue; - } - rets[plbtf] = *(cfp + 1); - cfp = (uint64_t *) (*cfp); - } -} - - -extern uint32_t low_intstack[]; /* bottom */ -extern uint32_t low_eintstack[]; /* top */ -extern char mp_slave_stack[PAGE_SIZE]; - -static inline void pltrace_internal(boolean_t enable) { - cpu_data_t *cdata = current_cpu_datap(); - int cpli = cdata->cpu_preemption_level; - int cplrecord = cdata->cpu_plri; - uint64_t kstackb, kstackt, *plbts; - - assert(cpli >= 0); - - cdata->plrecords[cplrecord].pltype = enable; - cdata->plrecords[cplrecord].plevel = cpli; - - plbts = &cdata->plrecords[cplrecord].plbt[0]; +static inline void +rbtrace_bt(uint64_t *rets, int maxframes, cpu_data_t *cdata) +{ + extern uint32_t low_intstack[]; /* bottom */ + extern uint32_t low_eintstack[]; /* top */ + extern char mp_slave_stack[]; - cplrecord++; + uint64_t kstackb, kstackt; - if (cplrecord >= MAX_PREEMPTION_RECORDS) { - cplrecord = 0; - } - - cdata->cpu_plri = cplrecord; /* Obtain the 'current' program counter, initial backtrace * element. This will also indicate if we were unable to * trace further up the stack for some reason */ - __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" - : "=m" (plbts[0]) - : - : "rax"); + __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" + : "=m" (rets[0]) + : + : "rax"); thread_t cplthread = cdata->cpu_active_thread; @@ -502,31 +519,103 @@ static inline void pltrace_internal(boolean_t enable) { kstackt = kstackb + KERNEL_STACK_SIZE; if (csp < kstackb || csp > kstackt) { kstackt = cdata->cpu_kernel_stack; - kstackb = kstackb - KERNEL_STACK_SIZE; + kstackb = kstackt - KERNEL_STACK_SIZE; if (csp < kstackb || csp > kstackt) { kstackt = cdata->cpu_int_stack_top; kstackb = kstackt - INTSTACK_SIZE; if (csp < kstackb || csp > kstackt) { kstackt = (uintptr_t)low_eintstack; - kstackb = (uintptr_t)low_eintstack - INTSTACK_SIZE; + kstackb = kstackt - INTSTACK_SIZE; if (csp < kstackb || csp > kstackt) { kstackb = (uintptr_t) mp_slave_stack; - kstackt = (uintptr_t) mp_slave_stack + PAGE_SIZE; + kstackt = kstackb + PAGE_SIZE; + } else { + kstackb = 0; + kstackt = 0; } } } } - if (kstackb) { - pltrace_bt(&plbts[1], MAXPLFRAMES - 1, kstackb, kstackt); + if (__probable(kstackb && kstackt)) { + uint64_t *cfp = (uint64_t *) __builtin_frame_address(0); + int rbbtf; + + for (rbbtf = 1; rbbtf < maxframes; rbbtf++) { + if (((uint64_t)cfp == 0) || (((uint64_t)cfp < kstackb) || ((uint64_t)cfp > kstackt))) { + rets[rbbtf] = 0; + continue; + } + rets[rbbtf] = *(cfp + 1); + cfp = (uint64_t *) (*cfp); + } } } } +static inline void +pltrace_internal(boolean_t enable) +{ + cpu_data_t *cdata = current_cpu_datap(); + int cpli = cdata->cpu_preemption_level; + int cplrecord = cdata->cpu_plri; + uint64_t *plbts; + + assert(cpli >= 0); + + cdata->plrecords[cplrecord].pltype = enable; + cdata->plrecords[cplrecord].plevel = cpli; + + plbts = &cdata->plrecords[cplrecord].plbt[0]; + + cplrecord++; + + if (cplrecord >= MAX_PREEMPTION_RECORDS) { + cplrecord = 0; + } + + cdata->cpu_plri = cplrecord; + + rbtrace_bt(plbts, MAX_TRACE_BTFRAMES - 1, cdata); +} + extern int plctrace_enabled; + +static inline void +iotrace(iotrace_type_e type, uint64_t vaddr, uint64_t paddr, int size, uint64_t val, + uint64_t sabs, uint64_t duration) +{ + cpu_data_t *cdata; + int cpu_num, nextidx; + iotrace_entry_t *cur_iotrace_ring; + + if (__improbable(mmiotrace_enabled == 0 || iotrace_generators == 0)) { + return; + } + + cdata = current_cpu_datap(); + cpu_num = cdata->cpu_number; + nextidx = iotrace_next[cpu_num]; + cur_iotrace_ring = iotrace_ring[cpu_num]; + + cur_iotrace_ring[nextidx].iotype = type; + cur_iotrace_ring[nextidx].vaddr = vaddr; + cur_iotrace_ring[nextidx].paddr = paddr; + cur_iotrace_ring[nextidx].size = size; + cur_iotrace_ring[nextidx].val = val; + cur_iotrace_ring[nextidx].start_time_abs = sabs; + cur_iotrace_ring[nextidx].duration = duration; + + iotrace_next[cpu_num] = ((nextidx + 1) >= iotrace_entries_per_cpu) ? 0 : (nextidx + 1); + + rbtrace_bt(&cur_iotrace_ring[nextidx].backtrace[0], + MAX_TRACE_BTFRAMES - 1, cdata); +} #endif /* DEVELOPMENT || DEBUG */ -static inline void pltrace(boolean_t plenable) { +static inline void +pltrace(boolean_t plenable) +{ #if DEVELOPMENT || DEBUG if (__improbable(plctrace_enabled != 0)) { pltrace_internal(plenable); @@ -537,7 +626,8 @@ static inline void pltrace(boolean_t plenable) { } static inline void -disable_preemption_internal(void) { +disable_preemption_internal(void) +{ assert(get_preemption_level() >= 0); #if defined(__clang__) @@ -545,28 +635,30 @@ disable_preemption_internal(void) { cpu_data->cpu_preemption_level++; #else __asm__ volatile ("incl %%gs:%P0" - : - : "i" (offsetof(cpu_data_t, cpu_preemption_level))); + : + : "i" (offsetof(cpu_data_t, cpu_preemption_level))); #endif pltrace(FALSE); } static inline void -enable_preemption_internal(void) { +enable_preemption_internal(void) +{ assert(get_preemption_level() > 0); pltrace(TRUE); #if defined(__clang__) cpu_data_t GS_RELATIVE *cpu_data = (cpu_data_t GS_RELATIVE *)0UL; - if (0 == --cpu_data->cpu_preemption_level) + if (0 == --cpu_data->cpu_preemption_level) { kernel_preempt_check(); + } #else __asm__ volatile ("decl %%gs:%P0 \n\t" - "jne 1f \n\t" - "call _kernel_preempt_check \n\t" - "1:" - : /* no outputs */ - : "i" (offsetof(cpu_data_t, cpu_preemption_level)) - : "eax", "ecx", "edx", "cc", "memory"); + "jne 1f \n\t" + "call _kernel_preempt_check \n\t" + "1:" + : /* no outputs */ + : "i" (offsetof(cpu_data_t, cpu_preemption_level)) + : "eax", "ecx", "edx", "cc", "memory"); #endif } @@ -581,14 +673,15 @@ enable_preemption_no_check(void) cpu_data->cpu_preemption_level--; #else __asm__ volatile ("decl %%gs:%P0" - : /* no outputs */ - : "i" (offsetof(cpu_data_t, cpu_preemption_level)) - : "cc", "memory"); + : /* no outputs */ + : "i" (offsetof(cpu_data_t, cpu_preemption_level)) + : "cc", "memory"); #endif } static inline void -_enable_preemption_no_check(void) { +_enable_preemption_no_check(void) +{ enable_preemption_no_check(); } @@ -611,17 +704,20 @@ mp_enable_preemption(void) } static inline void -_mp_enable_preemption(void) { +_mp_enable_preemption(void) +{ enable_preemption_internal(); } static inline void -mp_enable_preemption_no_check(void) { +mp_enable_preemption_no_check(void) +{ enable_preemption_no_check(); } static inline void -_mp_enable_preemption_no_check(void) { +_mp_enable_preemption_no_check(void) +{ enable_preemption_no_check(); } @@ -632,18 +728,21 @@ _mp_enable_preemption_no_check(void) { #endif static inline cpu_data_t * -cpu_datap(int cpu) { +cpu_datap(int cpu) +{ return cpu_data_ptr[cpu]; } static inline int -cpu_is_running(int cpu) { - return ((cpu_datap(cpu) != NULL) && (cpu_datap(cpu)->cpu_running)); +cpu_is_running(int cpu) +{ + return (cpu_datap(cpu) != NULL) && (cpu_datap(cpu)->cpu_running); } #ifdef MACH_KERNEL_PRIVATE static inline cpu_data_t * -cpu_shadowp(int cpu) { +cpu_shadowp(int cpu) +{ return cpu_data_ptr[cpu]->cd_shadow; } @@ -651,4 +750,4 @@ cpu_shadowp(int cpu) { extern cpu_data_t *cpu_data_alloc(boolean_t is_boot_cpu); extern void cpu_data_realloc(void); -#endif /* I386_CPU_DATA */ +#endif /* I386_CPU_DATA */ diff --git a/osfmk/i386/cpu_number.h b/osfmk/i386/cpu_number.h index b0348fce6..d58c4e982 100644 --- a/osfmk/i386/cpu_number.h +++ b/osfmk/i386/cpu_number.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,23 +61,22 @@ * Machine-dependent definitions for cpu identification. * */ -#ifndef _I386_CPU_NUMBER_H_ -#define _I386_CPU_NUMBER_H_ +#ifndef _I386_CPU_NUMBER_H_ +#define _I386_CPU_NUMBER_H_ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* Use a function to do this less directly. */ -extern int cpu_number(void); +extern int cpu_number(void); #ifdef MACH_KERNEL_PRIVATE #include /* Get the cpu number directly from the pre-processor data area */ -#define cpu_number() get_cpu_number() - -#endif /* !MACH_KERNEL_PRIVATE */ +#define cpu_number() get_cpu_number() -#endif /* KERNEL_PRIVATE */ +#endif /* !MACH_KERNEL_PRIVATE */ -#endif /* _I386_CPU_NUMBER_H_ */ +#endif /* KERNEL_PRIVATE */ +#endif /* _I386_CPU_NUMBER_H_ */ diff --git a/osfmk/i386/cpu_threads.c b/osfmk/i386/cpu_threads.c index 92f34b8fd..890a6aa1f 100644 --- a/osfmk/i386/cpu_threads.c +++ b/osfmk/i386/cpu_threads.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include #include +#include #include #include #include @@ -39,224 +40,228 @@ #include #endif /* MONOTONIC */ -#define DIVISOR_GUARD(denom) \ - if ((denom) == 0) { \ - kprintf("%s: %d Zero divisor: " #denom, \ - __FILE__, __LINE__); \ +#define DIVISOR_GUARD(denom) \ + if ((denom) == 0) { \ + kprintf("%s: %d Zero divisor: " #denom, \ + __FILE__, __LINE__); \ } static void debug_topology_print(void); -boolean_t topo_dbg = FALSE; +boolean_t topo_dbg = FALSE; -x86_pkg_t *x86_pkgs = NULL; -uint32_t num_Lx_caches[MAX_CACHE_DEPTH] = { 0 }; +x86_pkg_t *x86_pkgs = NULL; +uint32_t num_Lx_caches[MAX_CACHE_DEPTH] = { 0 }; -static x86_pkg_t *free_pkgs = NULL; -static x86_die_t *free_dies = NULL; -static x86_core_t *free_cores = NULL; -static uint32_t num_dies = 0; +static x86_pkg_t *free_pkgs = NULL; +static x86_die_t *free_dies = NULL; +static x86_core_t *free_cores = NULL; +static uint32_t num_dies = 0; -static x86_cpu_cache_t *x86_caches = NULL; -static uint32_t num_caches = 0; +static x86_cpu_cache_t *x86_caches = NULL; +static uint32_t num_caches = 0; -static boolean_t topoParmsInited = FALSE; -x86_topology_parameters_t topoParms; +static boolean_t topoParmsInited = FALSE; +x86_topology_parameters_t topoParms; decl_simple_lock_data(, x86_topo_lock); - + static struct cpu_cache { - int level; int type; -} cpu_caches [LCACHE_MAX] = { - [L1D] = { 1, CPU_CACHE_TYPE_DATA }, - [L1I] = { 1, CPU_CACHE_TYPE_INST }, - [L2U] = { 2, CPU_CACHE_TYPE_UNIF }, - [L3U] = { 3, CPU_CACHE_TYPE_UNIF }, + int level; int type; +} cpu_caches[LCACHE_MAX] = { + [L1D] = { 1, CPU_CACHE_TYPE_DATA }, + [L1I] = { 1, CPU_CACHE_TYPE_INST }, + [L2U] = { 2, CPU_CACHE_TYPE_UNIF }, + [L3U] = { 3, CPU_CACHE_TYPE_UNIF }, }; static boolean_t cpu_is_hyperthreaded(void) { - i386_cpu_info_t *cpuinfo; + i386_cpu_info_t *cpuinfo; - cpuinfo = cpuid_info(); - return(cpuinfo->thread_count > cpuinfo->core_count); + cpuinfo = cpuid_info(); + return cpuinfo->thread_count > cpuinfo->core_count; } static x86_cpu_cache_t * x86_cache_alloc(void) { - x86_cpu_cache_t *cache; - int i; - - if (x86_caches == NULL) { - cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *))); - if (cache == NULL) - return(NULL); - } else { - cache = x86_caches; - x86_caches = cache->next; - cache->next = NULL; - } + x86_cpu_cache_t *cache; + int i; + + if (x86_caches == NULL) { + cache = kalloc(sizeof(x86_cpu_cache_t) + (MAX_CPUS * sizeof(x86_lcpu_t *))); + if (cache == NULL) { + return NULL; + } + } else { + cache = x86_caches; + x86_caches = cache->next; + cache->next = NULL; + } - bzero(cache, sizeof(x86_cpu_cache_t)); - cache->next = NULL; - cache->maxcpus = MAX_CPUS; - for (i = 0; i < cache->maxcpus; i += 1) { - cache->cpus[i] = NULL; - } + bzero(cache, sizeof(x86_cpu_cache_t)); + cache->next = NULL; + cache->maxcpus = MAX_CPUS; + for (i = 0; i < cache->maxcpus; i += 1) { + cache->cpus[i] = NULL; + } - num_caches += 1; + num_caches += 1; - return(cache); + return cache; } - + static void x86_LLC_info(void) { - int cache_level = 0; - uint32_t nCPUsSharing = 1; - i386_cpu_info_t *cpuinfo; - struct cpu_cache *cachep; - int i; + int cache_level = 0; + uint32_t nCPUsSharing = 1; + i386_cpu_info_t *cpuinfo; + struct cpu_cache *cachep; + int i; - cpuinfo = cpuid_info(); + cpuinfo = cpuid_info(); - for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) { + for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) { + if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0) { + continue; + } - if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0) - continue; + /* + * Only worry about it if it's a deeper level than + * what we've seen before. + */ + if (cachep->level > cache_level) { + cache_level = cachep->level; + + /* + * Save the number of CPUs sharing this cache. + */ + nCPUsSharing = cpuinfo->cache_sharing[i]; + } + } /* - * Only worry about it if it's a deeper level than - * what we've seen before. + * Make the level of the LLC be 0 based. */ - if (cachep->level > cache_level) { - cache_level = cachep->level; + topoParms.LLCDepth = cache_level - 1; - /* - * Save the number of CPUs sharing this cache. - */ - nCPUsSharing = cpuinfo->cache_sharing[i]; + /* + * nCPUsSharing represents the *maximum* number of cores or + * logical CPUs sharing the cache. + */ + topoParms.maxSharingLLC = nCPUsSharing; + + topoParms.nCoresSharingLLC = nCPUsSharing / (cpuinfo->thread_count / + cpuinfo->core_count); + topoParms.nLCPUsSharingLLC = nCPUsSharing; + + /* + * nCPUsSharing may not be the number of *active* cores or + * threads that are sharing the cache. + */ + if (nCPUsSharing > cpuinfo->core_count) { + topoParms.nCoresSharingLLC = cpuinfo->core_count; + } + if (nCPUsSharing > cpuinfo->thread_count) { + topoParms.nLCPUsSharingLLC = cpuinfo->thread_count; } - } - - /* - * Make the level of the LLC be 0 based. - */ - topoParms.LLCDepth = cache_level - 1; - - /* - * nCPUsSharing represents the *maximum* number of cores or - * logical CPUs sharing the cache. - */ - topoParms.maxSharingLLC = nCPUsSharing; - - topoParms.nCoresSharingLLC = nCPUsSharing / (cpuinfo->thread_count / - cpuinfo->core_count); - topoParms.nLCPUsSharingLLC = nCPUsSharing; - - /* - * nCPUsSharing may not be the number of *active* cores or - * threads that are sharing the cache. - */ - if (nCPUsSharing > cpuinfo->core_count) - topoParms.nCoresSharingLLC = cpuinfo->core_count; - if (nCPUsSharing > cpuinfo->thread_count) - topoParms.nLCPUsSharingLLC = cpuinfo->thread_count; } static void initTopoParms(void) { - i386_cpu_info_t *cpuinfo; - - topoParms.stable = FALSE; - - cpuinfo = cpuid_info(); - - PE_parse_boot_argn("-topo", &topo_dbg, sizeof(topo_dbg)); - - /* - * We need to start with getting the LLC information correct. - */ - x86_LLC_info(); - - /* - * Compute the number of threads (logical CPUs) per core. - */ - DIVISOR_GUARD(cpuinfo->core_count); - topoParms.nLThreadsPerCore = cpuinfo->thread_count / cpuinfo->core_count; - DIVISOR_GUARD(cpuinfo->cpuid_cores_per_package); - topoParms.nPThreadsPerCore = cpuinfo->cpuid_logical_per_package / cpuinfo->cpuid_cores_per_package; - - /* - * Compute the number of dies per package. - */ - DIVISOR_GUARD(topoParms.nCoresSharingLLC); - topoParms.nLDiesPerPackage = cpuinfo->core_count / topoParms.nCoresSharingLLC; - DIVISOR_GUARD(topoParms.nPThreadsPerCore); - DIVISOR_GUARD(topoParms.maxSharingLLC / topoParms.nPThreadsPerCore); - topoParms.nPDiesPerPackage = cpuinfo->cpuid_cores_per_package / (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore); - - - /* - * Compute the number of cores per die. - */ - topoParms.nLCoresPerDie = topoParms.nCoresSharingLLC; - topoParms.nPCoresPerDie = (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore); - - /* - * Compute the number of threads per die. - */ - topoParms.nLThreadsPerDie = topoParms.nLThreadsPerCore * topoParms.nLCoresPerDie; - topoParms.nPThreadsPerDie = topoParms.nPThreadsPerCore * topoParms.nPCoresPerDie; - - /* - * Compute the number of cores per package. - */ - topoParms.nLCoresPerPackage = topoParms.nLCoresPerDie * topoParms.nLDiesPerPackage; - topoParms.nPCoresPerPackage = topoParms.nPCoresPerDie * topoParms.nPDiesPerPackage; - - /* - * Compute the number of threads per package. - */ - topoParms.nLThreadsPerPackage = topoParms.nLThreadsPerCore * topoParms.nLCoresPerPackage; - topoParms.nPThreadsPerPackage = topoParms.nPThreadsPerCore * topoParms.nPCoresPerPackage; - - TOPO_DBG("\nCache Topology Parameters:\n"); - TOPO_DBG("\tLLC Depth: %d\n", topoParms.LLCDepth); - TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms.nCoresSharingLLC); - TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms.nLCPUsSharingLLC); - TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms.maxSharingLLC); - - TOPO_DBG("\nLogical Topology Parameters:\n"); - TOPO_DBG("\tThreads per Core: %d\n", topoParms.nLThreadsPerCore); - TOPO_DBG("\tCores per Die: %d\n", topoParms.nLCoresPerDie); - TOPO_DBG("\tThreads per Die: %d\n", topoParms.nLThreadsPerDie); - TOPO_DBG("\tDies per Package: %d\n", topoParms.nLDiesPerPackage); - TOPO_DBG("\tCores per Package: %d\n", topoParms.nLCoresPerPackage); - TOPO_DBG("\tThreads per Package: %d\n", topoParms.nLThreadsPerPackage); - - TOPO_DBG("\nPhysical Topology Parameters:\n"); - TOPO_DBG("\tThreads per Core: %d\n", topoParms.nPThreadsPerCore); - TOPO_DBG("\tCores per Die: %d\n", topoParms.nPCoresPerDie); - TOPO_DBG("\tThreads per Die: %d\n", topoParms.nPThreadsPerDie); - TOPO_DBG("\tDies per Package: %d\n", topoParms.nPDiesPerPackage); - TOPO_DBG("\tCores per Package: %d\n", topoParms.nPCoresPerPackage); - TOPO_DBG("\tThreads per Package: %d\n", topoParms.nPThreadsPerPackage); - - topoParmsInited = TRUE; + i386_cpu_info_t *cpuinfo; + + topoParms.stable = FALSE; + + cpuinfo = cpuid_info(); + + PE_parse_boot_argn("-topo", &topo_dbg, sizeof(topo_dbg)); + + /* + * We need to start with getting the LLC information correct. + */ + x86_LLC_info(); + + /* + * Compute the number of threads (logical CPUs) per core. + */ + DIVISOR_GUARD(cpuinfo->core_count); + topoParms.nLThreadsPerCore = cpuinfo->thread_count / cpuinfo->core_count; + DIVISOR_GUARD(cpuinfo->cpuid_cores_per_package); + topoParms.nPThreadsPerCore = cpuinfo->cpuid_logical_per_package / cpuinfo->cpuid_cores_per_package; + + /* + * Compute the number of dies per package. + */ + DIVISOR_GUARD(topoParms.nCoresSharingLLC); + topoParms.nLDiesPerPackage = cpuinfo->core_count / topoParms.nCoresSharingLLC; + DIVISOR_GUARD(topoParms.nPThreadsPerCore); + DIVISOR_GUARD(topoParms.maxSharingLLC / topoParms.nPThreadsPerCore); + topoParms.nPDiesPerPackage = cpuinfo->cpuid_cores_per_package / (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore); + + + /* + * Compute the number of cores per die. + */ + topoParms.nLCoresPerDie = topoParms.nCoresSharingLLC; + topoParms.nPCoresPerDie = (topoParms.maxSharingLLC / topoParms.nPThreadsPerCore); + + /* + * Compute the number of threads per die. + */ + topoParms.nLThreadsPerDie = topoParms.nLThreadsPerCore * topoParms.nLCoresPerDie; + topoParms.nPThreadsPerDie = topoParms.nPThreadsPerCore * topoParms.nPCoresPerDie; + + /* + * Compute the number of cores per package. + */ + topoParms.nLCoresPerPackage = topoParms.nLCoresPerDie * topoParms.nLDiesPerPackage; + topoParms.nPCoresPerPackage = topoParms.nPCoresPerDie * topoParms.nPDiesPerPackage; + + /* + * Compute the number of threads per package. + */ + topoParms.nLThreadsPerPackage = topoParms.nLThreadsPerCore * topoParms.nLCoresPerPackage; + topoParms.nPThreadsPerPackage = topoParms.nPThreadsPerCore * topoParms.nPCoresPerPackage; + + TOPO_DBG("\nCache Topology Parameters:\n"); + TOPO_DBG("\tLLC Depth: %d\n", topoParms.LLCDepth); + TOPO_DBG("\tCores Sharing LLC: %d\n", topoParms.nCoresSharingLLC); + TOPO_DBG("\tThreads Sharing LLC: %d\n", topoParms.nLCPUsSharingLLC); + TOPO_DBG("\tmax Sharing of LLC: %d\n", topoParms.maxSharingLLC); + + TOPO_DBG("\nLogical Topology Parameters:\n"); + TOPO_DBG("\tThreads per Core: %d\n", topoParms.nLThreadsPerCore); + TOPO_DBG("\tCores per Die: %d\n", topoParms.nLCoresPerDie); + TOPO_DBG("\tThreads per Die: %d\n", topoParms.nLThreadsPerDie); + TOPO_DBG("\tDies per Package: %d\n", topoParms.nLDiesPerPackage); + TOPO_DBG("\tCores per Package: %d\n", topoParms.nLCoresPerPackage); + TOPO_DBG("\tThreads per Package: %d\n", topoParms.nLThreadsPerPackage); + + TOPO_DBG("\nPhysical Topology Parameters:\n"); + TOPO_DBG("\tThreads per Core: %d\n", topoParms.nPThreadsPerCore); + TOPO_DBG("\tCores per Die: %d\n", topoParms.nPCoresPerDie); + TOPO_DBG("\tThreads per Die: %d\n", topoParms.nPThreadsPerDie); + TOPO_DBG("\tDies per Package: %d\n", topoParms.nPDiesPerPackage); + TOPO_DBG("\tCores per Package: %d\n", topoParms.nPCoresPerPackage); + TOPO_DBG("\tThreads per Package: %d\n", topoParms.nPThreadsPerPackage); + + topoParmsInited = TRUE; } static void x86_cache_free(x86_cpu_cache_t *cache) { - num_caches -= 1; - if (cache->level > 0 && cache->level <= MAX_CACHE_DEPTH) - num_Lx_caches[cache->level - 1] -= 1; - cache->next = x86_caches; - x86_caches = cache; + num_caches -= 1; + if (cache->level > 0 && cache->level <= MAX_CACHE_DEPTH) { + num_Lx_caches[cache->level - 1] -= 1; + } + cache->next = x86_caches; + x86_caches = cache; } /* @@ -267,707 +272,719 @@ x86_cache_free(x86_cpu_cache_t *cache) static x86_cpu_cache_t * x86_cache_list(void) { - x86_cpu_cache_t *root = NULL; - x86_cpu_cache_t *cur = NULL; - x86_cpu_cache_t *last = NULL; - struct cpu_cache *cachep; - int i; - - /* - * Cons up a list driven not by CPUID leaf 4 (deterministic cache params) - * but by the table above plus parameters already cracked from cpuid... - */ - for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) { - - if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0) - continue; - - cur = x86_cache_alloc(); - if (cur == NULL) - break; - - cur->type = cachep->type; - cur->level = cachep->level; - cur->nlcpus = 0; - cur->maxcpus = cpuid_info()->cache_sharing[i]; - cur->partitions = cpuid_info()->cache_partitions[i]; - cur->cache_size = cpuid_info()->cache_size[i]; - cur->line_size = cpuid_info()->cache_linesize; - - if (last == NULL) { - root = cur; - last = cur; - } else { - last->next = cur; - last = cur; + x86_cpu_cache_t *root = NULL; + x86_cpu_cache_t *cur = NULL; + x86_cpu_cache_t *last = NULL; + struct cpu_cache *cachep; + int i; + + /* + * Cons up a list driven not by CPUID leaf 4 (deterministic cache params) + * but by the table above plus parameters already cracked from cpuid... + */ + for (i = 0, cachep = &cpu_caches[0]; i < LCACHE_MAX; i++, cachep++) { + if (cachep->type == 0 || cpuid_info()->cache_size[i] == 0) { + continue; + } + + cur = x86_cache_alloc(); + if (cur == NULL) { + break; + } + + cur->type = cachep->type; + cur->level = cachep->level; + cur->nlcpus = 0; + cur->maxcpus = cpuid_info()->cache_sharing[i]; + cur->partitions = cpuid_info()->cache_partitions[i]; + cur->cache_size = cpuid_info()->cache_size[i]; + cur->line_size = cpuid_info()->cache_linesize; + + if (last == NULL) { + root = cur; + last = cur; + } else { + last->next = cur; + last = cur; + } + num_Lx_caches[cur->level - 1] += 1; } - num_Lx_caches[cur->level - 1] += 1; - } - return root; + return root; } static x86_cpu_cache_t * x86_match_cache(x86_cpu_cache_t *list, x86_cpu_cache_t *matcher) { - x86_cpu_cache_t *cur_cache; - - cur_cache = list; - while (cur_cache != NULL) { - if (cur_cache->maxcpus == matcher->maxcpus - && cur_cache->type == matcher->type - && cur_cache->level == matcher->level - && cur_cache->partitions == matcher->partitions - && cur_cache->line_size == matcher->line_size - && cur_cache->cache_size == matcher->cache_size) - break; - - cur_cache = cur_cache->next; - } - - return(cur_cache); + x86_cpu_cache_t *cur_cache; + + cur_cache = list; + while (cur_cache != NULL) { + if (cur_cache->maxcpus == matcher->maxcpus + && cur_cache->type == matcher->type + && cur_cache->level == matcher->level + && cur_cache->partitions == matcher->partitions + && cur_cache->line_size == matcher->line_size + && cur_cache->cache_size == matcher->cache_size) { + break; + } + + cur_cache = cur_cache->next; + } + + return cur_cache; } static void x86_lcpu_init(int cpu) { - cpu_data_t *cpup; - x86_lcpu_t *lcpu; - int i; - - cpup = cpu_datap(cpu); - - lcpu = &cpup->lcpu; - lcpu->lcpu = lcpu; - lcpu->cpu = cpup; - lcpu->next_in_core = NULL; - lcpu->next_in_die = NULL; - lcpu->next_in_pkg = NULL; - lcpu->core = NULL; - lcpu->die = NULL; - lcpu->package = NULL; - lcpu->cpu_num = cpu; - lcpu->lnum = cpu; - lcpu->pnum = cpup->cpu_phys_number; - lcpu->state = LCPU_OFF; - for (i = 0; i < MAX_CACHE_DEPTH; i += 1) - lcpu->caches[i] = NULL; + cpu_data_t *cpup; + x86_lcpu_t *lcpu; + int i; + + cpup = cpu_datap(cpu); + + lcpu = &cpup->lcpu; + lcpu->lcpu = lcpu; + lcpu->cpu = cpup; + lcpu->next_in_core = NULL; + lcpu->next_in_die = NULL; + lcpu->next_in_pkg = NULL; + lcpu->core = NULL; + lcpu->die = NULL; + lcpu->package = NULL; + lcpu->cpu_num = cpu; + lcpu->lnum = cpu; + lcpu->pnum = cpup->cpu_phys_number; + lcpu->state = LCPU_OFF; + for (i = 0; i < MAX_CACHE_DEPTH; i += 1) { + lcpu->caches[i] = NULL; + } } static x86_core_t * x86_core_alloc(int cpu) { - x86_core_t *core; - cpu_data_t *cpup; + x86_core_t *core; + cpu_data_t *cpup; - cpup = cpu_datap(cpu); + cpup = cpu_datap(cpu); - mp_safe_spin_lock(&x86_topo_lock); - if (free_cores != NULL) { - core = free_cores; - free_cores = core->next_in_die; - core->next_in_die = NULL; - simple_unlock(&x86_topo_lock); - } else { - simple_unlock(&x86_topo_lock); - core = kalloc(sizeof(x86_core_t)); - if (core == NULL) - panic("x86_core_alloc() kalloc of x86_core_t failed!\n"); - } + mp_safe_spin_lock(&x86_topo_lock); + if (free_cores != NULL) { + core = free_cores; + free_cores = core->next_in_die; + core->next_in_die = NULL; + simple_unlock(&x86_topo_lock); + } else { + simple_unlock(&x86_topo_lock); + core = kalloc(sizeof(x86_core_t)); + if (core == NULL) { + panic("x86_core_alloc() kalloc of x86_core_t failed!\n"); + } + } - bzero((void *) core, sizeof(x86_core_t)); + bzero((void *) core, sizeof(x86_core_t)); - core->pcore_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore; - core->lcore_num = core->pcore_num % topoParms.nPCoresPerPackage; + core->pcore_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore; + core->lcore_num = core->pcore_num % topoParms.nPCoresPerPackage; - core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY - | X86CORE_FL_HALTED | X86CORE_FL_IDLE; + core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY + | X86CORE_FL_HALTED | X86CORE_FL_IDLE; - return(core); + return core; } static void x86_core_free(x86_core_t *core) { - mp_safe_spin_lock(&x86_topo_lock); - core->next_in_die = free_cores; - free_cores = core; - simple_unlock(&x86_topo_lock); + mp_safe_spin_lock(&x86_topo_lock); + core->next_in_die = free_cores; + free_cores = core; + simple_unlock(&x86_topo_lock); } static x86_pkg_t * x86_package_find(int cpu) { - x86_pkg_t *pkg; - cpu_data_t *cpup; - uint32_t pkg_num; + x86_pkg_t *pkg; + cpu_data_t *cpup; + uint32_t pkg_num; - cpup = cpu_datap(cpu); + cpup = cpu_datap(cpu); - pkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage; + pkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage; - pkg = x86_pkgs; - while (pkg != NULL) { - if (pkg->ppkg_num == pkg_num) - break; - pkg = pkg->next; - } + pkg = x86_pkgs; + while (pkg != NULL) { + if (pkg->ppkg_num == pkg_num) { + break; + } + pkg = pkg->next; + } - return(pkg); + return pkg; } - + static x86_die_t * x86_die_find(int cpu) { - x86_die_t *die; - x86_pkg_t *pkg; - cpu_data_t *cpup; - uint32_t die_num; + x86_die_t *die; + x86_pkg_t *pkg; + cpu_data_t *cpup; + uint32_t die_num; - cpup = cpu_datap(cpu); + cpup = cpu_datap(cpu); - die_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie; + die_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie; - pkg = x86_package_find(cpu); - if (pkg == NULL) - return(NULL); + pkg = x86_package_find(cpu); + if (pkg == NULL) { + return NULL; + } - die = pkg->dies; - while (die != NULL) { - if (die->pdie_num == die_num) - break; - die = die->next_in_pkg; - } + die = pkg->dies; + while (die != NULL) { + if (die->pdie_num == die_num) { + break; + } + die = die->next_in_pkg; + } - return(die); + return die; } static x86_core_t * x86_core_find(int cpu) { - x86_core_t *core; - x86_die_t *die; - cpu_data_t *cpup; - uint32_t core_num; + x86_core_t *core; + x86_die_t *die; + cpu_data_t *cpup; + uint32_t core_num; - cpup = cpu_datap(cpu); + cpup = cpu_datap(cpu); - core_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore; + core_num = cpup->cpu_phys_number / topoParms.nPThreadsPerCore; - die = x86_die_find(cpu); - if (die == NULL) - return(NULL); + die = x86_die_find(cpu); + if (die == NULL) { + return NULL; + } - core = die->cores; - while (core != NULL) { - if (core->pcore_num == core_num) - break; - core = core->next_in_die; - } + core = die->cores; + while (core != NULL) { + if (core->pcore_num == core_num) { + break; + } + core = core->next_in_die; + } - return(core); + return core; } - + void x86_set_logical_topology(x86_lcpu_t *lcpu, int pnum, int lnum) { - x86_core_t *core = lcpu->core; - x86_die_t *die = lcpu->die; - x86_pkg_t *pkg = lcpu->package; - - assert(core != NULL); - assert(die != NULL); - assert(pkg != NULL); + x86_core_t *core = lcpu->core; + x86_die_t *die = lcpu->die; + x86_pkg_t *pkg = lcpu->package; - lcpu->cpu_num = lnum; - lcpu->pnum = pnum; - lcpu->master = (lnum == master_cpu); - lcpu->primary = (lnum % topoParms.nLThreadsPerPackage) == 0; + assert(core != NULL); + assert(die != NULL); + assert(pkg != NULL); - lcpu->lnum = lnum % topoParms.nLThreadsPerCore; + lcpu->cpu_num = lnum; + lcpu->pnum = pnum; + lcpu->master = (lnum == master_cpu); + lcpu->primary = (lnum % topoParms.nLThreadsPerPackage) == 0; - core->pcore_num = lnum / topoParms.nLThreadsPerCore; - core->lcore_num = core->pcore_num % topoParms.nLCoresPerDie; + lcpu->lnum = lnum % topoParms.nLThreadsPerCore; - die->pdie_num = lnum / (topoParms.nLThreadsPerCore*topoParms.nLCoresPerDie); - die->ldie_num = die->pdie_num % topoParms.nLDiesPerPackage; + core->pcore_num = lnum / topoParms.nLThreadsPerCore; + core->lcore_num = core->pcore_num % topoParms.nLCoresPerDie; - pkg->ppkg_num = lnum / topoParms.nLThreadsPerPackage; - pkg->lpkg_num = pkg->ppkg_num; + die->pdie_num = lnum / (topoParms.nLThreadsPerCore * topoParms.nLCoresPerDie); + die->ldie_num = die->pdie_num % topoParms.nLDiesPerPackage; + pkg->ppkg_num = lnum / topoParms.nLThreadsPerPackage; + pkg->lpkg_num = pkg->ppkg_num; } static x86_die_t * x86_die_alloc(int cpu) { - x86_die_t *die; - cpu_data_t *cpup; + x86_die_t *die; + cpu_data_t *cpup; - cpup = cpu_datap(cpu); + cpup = cpu_datap(cpu); - mp_safe_spin_lock(&x86_topo_lock); - if (free_dies != NULL) { - die = free_dies; - free_dies = die->next_in_pkg; - die->next_in_pkg = NULL; - simple_unlock(&x86_topo_lock); - } else { - simple_unlock(&x86_topo_lock); - die = kalloc(sizeof(x86_die_t)); - if (die == NULL) - panic("x86_die_alloc() kalloc of x86_die_t failed!\n"); - } + mp_safe_spin_lock(&x86_topo_lock); + if (free_dies != NULL) { + die = free_dies; + free_dies = die->next_in_pkg; + die->next_in_pkg = NULL; + simple_unlock(&x86_topo_lock); + } else { + simple_unlock(&x86_topo_lock); + die = kalloc(sizeof(x86_die_t)); + if (die == NULL) { + panic("x86_die_alloc() kalloc of x86_die_t failed!\n"); + } + } - bzero((void *) die, sizeof(x86_die_t)); + bzero((void *) die, sizeof(x86_die_t)); - die->pdie_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie; + die->pdie_num = cpup->cpu_phys_number / topoParms.nPThreadsPerDie; - die->ldie_num = num_dies; - atomic_incl((long *) &num_dies, 1); + die->ldie_num = num_dies; + atomic_incl((long *) &num_dies, 1); - die->flags = X86DIE_FL_PRESENT; - return(die); + die->flags = X86DIE_FL_PRESENT; + return die; } static void x86_die_free(x86_die_t *die) { - mp_safe_spin_lock(&x86_topo_lock); - die->next_in_pkg = free_dies; - free_dies = die; - atomic_decl((long *) &num_dies, 1); - simple_unlock(&x86_topo_lock); + mp_safe_spin_lock(&x86_topo_lock); + die->next_in_pkg = free_dies; + free_dies = die; + atomic_decl((long *) &num_dies, 1); + simple_unlock(&x86_topo_lock); } static x86_pkg_t * x86_package_alloc(int cpu) { - x86_pkg_t *pkg; - cpu_data_t *cpup; + x86_pkg_t *pkg; + cpu_data_t *cpup; - cpup = cpu_datap(cpu); + cpup = cpu_datap(cpu); - mp_safe_spin_lock(&x86_topo_lock); - if (free_pkgs != NULL) { - pkg = free_pkgs; - free_pkgs = pkg->next; - pkg->next = NULL; - simple_unlock(&x86_topo_lock); - } else { - simple_unlock(&x86_topo_lock); - pkg = kalloc(sizeof(x86_pkg_t)); - if (pkg == NULL) - panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n"); - } + mp_safe_spin_lock(&x86_topo_lock); + if (free_pkgs != NULL) { + pkg = free_pkgs; + free_pkgs = pkg->next; + pkg->next = NULL; + simple_unlock(&x86_topo_lock); + } else { + simple_unlock(&x86_topo_lock); + pkg = kalloc(sizeof(x86_pkg_t)); + if (pkg == NULL) { + panic("x86_package_alloc() kalloc of x86_pkg_t failed!\n"); + } + } - bzero((void *) pkg, sizeof(x86_pkg_t)); + bzero((void *) pkg, sizeof(x86_pkg_t)); - pkg->ppkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage; + pkg->ppkg_num = cpup->cpu_phys_number / topoParms.nPThreadsPerPackage; - pkg->lpkg_num = topoParms.nPackages; - atomic_incl((long *) &topoParms.nPackages, 1); + pkg->lpkg_num = topoParms.nPackages; + atomic_incl((long *) &topoParms.nPackages, 1); - pkg->flags = X86PKG_FL_PRESENT | X86PKG_FL_READY; - return(pkg); + pkg->flags = X86PKG_FL_PRESENT | X86PKG_FL_READY; + return pkg; } static void x86_package_free(x86_pkg_t *pkg) { - mp_safe_spin_lock(&x86_topo_lock); - pkg->next = free_pkgs; - free_pkgs = pkg; - atomic_decl((long *) &topoParms.nPackages, 1); - simple_unlock(&x86_topo_lock); + mp_safe_spin_lock(&x86_topo_lock); + pkg->next = free_pkgs; + free_pkgs = pkg; + atomic_decl((long *) &topoParms.nPackages, 1); + simple_unlock(&x86_topo_lock); } static void x86_cache_add_lcpu(x86_cpu_cache_t *cache, x86_lcpu_t *lcpu) { - x86_cpu_cache_t *cur_cache; - int i; - - /* - * Put the new CPU into the list of the cache. - */ - cur_cache = lcpu->caches[cache->level - 1]; - lcpu->caches[cache->level - 1] = cache; - cache->next = cur_cache; - cache->nlcpus += 1; - for (i = 0; i < cache->nlcpus; i += 1) { - if (cache->cpus[i] == NULL) { - cache->cpus[i] = lcpu; - break; + x86_cpu_cache_t *cur_cache; + int i; + + /* + * Put the new CPU into the list of the cache. + */ + cur_cache = lcpu->caches[cache->level - 1]; + lcpu->caches[cache->level - 1] = cache; + cache->next = cur_cache; + cache->nlcpus += 1; + for (i = 0; i < cache->nlcpus; i += 1) { + if (cache->cpus[i] == NULL) { + cache->cpus[i] = lcpu; + break; + } } - } } static void x86_lcpu_add_caches(x86_lcpu_t *lcpu) { - x86_cpu_cache_t *list; - x86_cpu_cache_t *cur; - x86_cpu_cache_t *match; - x86_die_t *die; - x86_core_t *core; - x86_lcpu_t *cur_lcpu; - uint32_t level; - boolean_t found = FALSE; - - assert(lcpu != NULL); - - /* - * Add the cache data to the topology. - */ - list = x86_cache_list(); + x86_cpu_cache_t *list; + x86_cpu_cache_t *cur; + x86_cpu_cache_t *match; + x86_die_t *die; + x86_core_t *core; + x86_lcpu_t *cur_lcpu; + uint32_t level; + boolean_t found = FALSE; - mp_safe_spin_lock(&x86_topo_lock); + assert(lcpu != NULL); - while (list != NULL) { /* - * Remove the cache from the front of the list. + * Add the cache data to the topology. */ - cur = list; - list = cur->next; - cur->next = NULL; - level = cur->level - 1; + list = x86_cache_list(); - /* - * If the cache isn't shared then just put it where it - * belongs. - */ - if (cur->maxcpus == 1) { - x86_cache_add_lcpu(cur, lcpu); - continue; - } + mp_safe_spin_lock(&x86_topo_lock); - /* - * We'll assume that all of the caches at a particular level - * have the same sharing. So if we have a cache already at - * this level, we'll just skip looking for the match. - */ - if (lcpu->caches[level] != NULL) { - x86_cache_free(cur); - continue; - } + while (list != NULL) { + /* + * Remove the cache from the front of the list. + */ + cur = list; + list = cur->next; + cur->next = NULL; + level = cur->level - 1; - /* - * This is a shared cache, so we have to figure out if - * this is the first time we've seen this cache. We do - * this by searching through the topology and seeing if - * this cache is already described. - * - * Assume that L{LLC-1} are all at the core level and that - * LLC is shared at the die level. - */ - if (level < topoParms.LLCDepth) { - /* - * Shared at the core. - */ - core = lcpu->core; - cur_lcpu = core->lcpus; - while (cur_lcpu != NULL) { /* - * Skip ourselves. + * If the cache isn't shared then just put it where it + * belongs. */ - if (cur_lcpu == lcpu) { - cur_lcpu = cur_lcpu->next_in_core; - continue; + if (cur->maxcpus == 1) { + x86_cache_add_lcpu(cur, lcpu); + continue; } /* - * If there's a cache on this logical CPU, - * then use that one. + * We'll assume that all of the caches at a particular level + * have the same sharing. So if we have a cache already at + * this level, we'll just skip looking for the match. */ - match = x86_match_cache(cur_lcpu->caches[level], cur); - if (match != NULL) { - x86_cache_free(cur); - x86_cache_add_lcpu(match, lcpu); - found = TRUE; - break; + if (lcpu->caches[level] != NULL) { + x86_cache_free(cur); + continue; } - cur_lcpu = cur_lcpu->next_in_core; - } - } else { - /* - * Shared at the die. - */ - die = lcpu->die; - cur_lcpu = die->lcpus; - while (cur_lcpu != NULL) { /* - * Skip ourselves. + * This is a shared cache, so we have to figure out if + * this is the first time we've seen this cache. We do + * this by searching through the topology and seeing if + * this cache is already described. + * + * Assume that L{LLC-1} are all at the core level and that + * LLC is shared at the die level. */ - if (cur_lcpu == lcpu) { - cur_lcpu = cur_lcpu->next_in_die; - continue; + if (level < topoParms.LLCDepth) { + /* + * Shared at the core. + */ + core = lcpu->core; + cur_lcpu = core->lcpus; + while (cur_lcpu != NULL) { + /* + * Skip ourselves. + */ + if (cur_lcpu == lcpu) { + cur_lcpu = cur_lcpu->next_in_core; + continue; + } + + /* + * If there's a cache on this logical CPU, + * then use that one. + */ + match = x86_match_cache(cur_lcpu->caches[level], cur); + if (match != NULL) { + x86_cache_free(cur); + x86_cache_add_lcpu(match, lcpu); + found = TRUE; + break; + } + + cur_lcpu = cur_lcpu->next_in_core; + } + } else { + /* + * Shared at the die. + */ + die = lcpu->die; + cur_lcpu = die->lcpus; + while (cur_lcpu != NULL) { + /* + * Skip ourselves. + */ + if (cur_lcpu == lcpu) { + cur_lcpu = cur_lcpu->next_in_die; + continue; + } + + /* + * If there's a cache on this logical CPU, + * then use that one. + */ + match = x86_match_cache(cur_lcpu->caches[level], cur); + if (match != NULL) { + x86_cache_free(cur); + x86_cache_add_lcpu(match, lcpu); + found = TRUE; + break; + } + + cur_lcpu = cur_lcpu->next_in_die; + } } /* - * If there's a cache on this logical CPU, - * then use that one. + * If a shared cache wasn't found, then this logical CPU must + * be the first one encountered. */ - match = x86_match_cache(cur_lcpu->caches[level], cur); - if (match != NULL) { - x86_cache_free(cur); - x86_cache_add_lcpu(match, lcpu); - found = TRUE; - break; + if (!found) { + x86_cache_add_lcpu(cur, lcpu); } - - cur_lcpu = cur_lcpu->next_in_die; - } } - /* - * If a shared cache wasn't found, then this logical CPU must - * be the first one encountered. - */ - if (!found) { - x86_cache_add_lcpu(cur, lcpu); - } - } - - simple_unlock(&x86_topo_lock); + simple_unlock(&x86_topo_lock); } static void x86_core_add_lcpu(x86_core_t *core, x86_lcpu_t *lcpu) { - assert(core != NULL); - assert(lcpu != NULL); + assert(core != NULL); + assert(lcpu != NULL); - mp_safe_spin_lock(&x86_topo_lock); + mp_safe_spin_lock(&x86_topo_lock); - lcpu->next_in_core = core->lcpus; - lcpu->core = core; - core->lcpus = lcpu; - core->num_lcpus += 1; - simple_unlock(&x86_topo_lock); + lcpu->next_in_core = core->lcpus; + lcpu->core = core; + core->lcpus = lcpu; + core->num_lcpus += 1; + simple_unlock(&x86_topo_lock); } static void x86_die_add_lcpu(x86_die_t *die, x86_lcpu_t *lcpu) { - assert(die != NULL); - assert(lcpu != NULL); - - lcpu->next_in_die = die->lcpus; - lcpu->die = die; - die->lcpus = lcpu; + assert(die != NULL); + assert(lcpu != NULL); + + lcpu->next_in_die = die->lcpus; + lcpu->die = die; + die->lcpus = lcpu; } static void x86_die_add_core(x86_die_t *die, x86_core_t *core) { - assert(die != NULL); - assert(core != NULL); + assert(die != NULL); + assert(core != NULL); - core->next_in_die = die->cores; - core->die = die; - die->cores = core; - die->num_cores += 1; + core->next_in_die = die->cores; + core->die = die; + die->cores = core; + die->num_cores += 1; } - static void +static void x86_package_add_lcpu(x86_pkg_t *pkg, x86_lcpu_t *lcpu) { - assert(pkg != NULL); - assert(lcpu != NULL); + assert(pkg != NULL); + assert(lcpu != NULL); - lcpu->next_in_pkg = pkg->lcpus; - lcpu->package = pkg; - pkg->lcpus = lcpu; + lcpu->next_in_pkg = pkg->lcpus; + lcpu->package = pkg; + pkg->lcpus = lcpu; } static void x86_package_add_core(x86_pkg_t *pkg, x86_core_t *core) { - assert(pkg != NULL); - assert(core != NULL); + assert(pkg != NULL); + assert(core != NULL); - core->next_in_pkg = pkg->cores; - core->package = pkg; - pkg->cores = core; + core->next_in_pkg = pkg->cores; + core->package = pkg; + pkg->cores = core; } static void x86_package_add_die(x86_pkg_t *pkg, x86_die_t *die) { - assert(pkg != NULL); - assert(die != NULL); + assert(pkg != NULL); + assert(die != NULL); - die->next_in_pkg = pkg->dies; - die->package = pkg; - pkg->dies = die; - pkg->num_dies += 1; + die->next_in_pkg = pkg->dies; + die->package = pkg; + pkg->dies = die; + pkg->num_dies += 1; } void * cpu_thread_alloc(int cpu) { - x86_core_t *core = NULL; - x86_die_t *die = NULL; - x86_pkg_t *pkg = NULL; - cpu_data_t *cpup; - uint32_t phys_cpu; - - /* - * Only allow one to manipulate the topology at a time. - */ - mp_safe_spin_lock(&x86_topo_lock); - - /* - * Make sure all of the topology parameters have been initialized. - */ - if (!topoParmsInited) - initTopoParms(); - - cpup = cpu_datap(cpu); - - phys_cpu = cpup->cpu_phys_number; - - x86_lcpu_init(cpu); - - /* - * Assume that all cpus have the same features. - */ - if (cpu_is_hyperthreaded()) { - cpup->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; - } else { - cpup->cpu_threadtype = CPU_THREADTYPE_NONE; - } - - /* - * Get the package that the logical CPU is in. - */ - do { - pkg = x86_package_find(cpu); - if (pkg == NULL) { - /* - * Package structure hasn't been created yet, do it now. - */ - simple_unlock(&x86_topo_lock); - pkg = x86_package_alloc(cpu); - mp_safe_spin_lock(&x86_topo_lock); - if (x86_package_find(cpu) != NULL) { - x86_package_free(pkg); - continue; - } - - /* - * Add the new package to the global list of packages. - */ - pkg->next = x86_pkgs; - x86_pkgs = pkg; - } - } while (pkg == NULL); + x86_core_t *core = NULL; + x86_die_t *die = NULL; + x86_pkg_t *pkg = NULL; + cpu_data_t *cpup; + uint32_t phys_cpu; - /* - * Get the die that the logical CPU is in. - */ - do { - die = x86_die_find(cpu); - if (die == NULL) { - /* - * Die structure hasn't been created yet, do it now. - */ - simple_unlock(&x86_topo_lock); - die = x86_die_alloc(cpu); - mp_safe_spin_lock(&x86_topo_lock); - if (x86_die_find(cpu) != NULL) { - x86_die_free(die); - continue; - } - - /* - * Add the die to the package. - */ - x86_package_add_die(pkg, die); + /* + * Only allow one to manipulate the topology at a time. + */ + mp_safe_spin_lock(&x86_topo_lock); + + /* + * Make sure all of the topology parameters have been initialized. + */ + if (!topoParmsInited) { + initTopoParms(); } - } while (die == NULL); - - /* - * Get the core for this logical CPU. - */ - do { - core = x86_core_find(cpu); - if (core == NULL) { - /* - * Allocate the core structure now. - */ - simple_unlock(&x86_topo_lock); - core = x86_core_alloc(cpu); - mp_safe_spin_lock(&x86_topo_lock); - if (x86_core_find(cpu) != NULL) { - x86_core_free(core); - continue; - } - - /* - * Add the core to the die & package. - */ - x86_die_add_core(die, core); - x86_package_add_core(pkg, core); - machine_info.physical_cpu_max += 1; + + cpup = cpu_datap(cpu); + + phys_cpu = cpup->cpu_phys_number; + + x86_lcpu_init(cpu); + + /* + * Assume that all cpus have the same features. + */ + if (cpu_is_hyperthreaded()) { + cpup->cpu_threadtype = CPU_THREADTYPE_INTEL_HTT; + } else { + cpup->cpu_threadtype = CPU_THREADTYPE_NONE; } - } while (core == NULL); - - - /* - * Done manipulating the topology, so others can get in. - */ - machine_info.logical_cpu_max += 1; - simple_unlock(&x86_topo_lock); - - /* - * Add the logical CPU to the other topology structures. - */ - x86_core_add_lcpu(core, &cpup->lcpu); - x86_die_add_lcpu(core->die, &cpup->lcpu); - x86_package_add_lcpu(core->package, &cpup->lcpu); - x86_lcpu_add_caches(&cpup->lcpu); - - return (void *) core; + + /* + * Get the package that the logical CPU is in. + */ + do { + pkg = x86_package_find(cpu); + if (pkg == NULL) { + /* + * Package structure hasn't been created yet, do it now. + */ + simple_unlock(&x86_topo_lock); + pkg = x86_package_alloc(cpu); + mp_safe_spin_lock(&x86_topo_lock); + if (x86_package_find(cpu) != NULL) { + x86_package_free(pkg); + continue; + } + + /* + * Add the new package to the global list of packages. + */ + pkg->next = x86_pkgs; + x86_pkgs = pkg; + } + } while (pkg == NULL); + + /* + * Get the die that the logical CPU is in. + */ + do { + die = x86_die_find(cpu); + if (die == NULL) { + /* + * Die structure hasn't been created yet, do it now. + */ + simple_unlock(&x86_topo_lock); + die = x86_die_alloc(cpu); + mp_safe_spin_lock(&x86_topo_lock); + if (x86_die_find(cpu) != NULL) { + x86_die_free(die); + continue; + } + + /* + * Add the die to the package. + */ + x86_package_add_die(pkg, die); + } + } while (die == NULL); + + /* + * Get the core for this logical CPU. + */ + do { + core = x86_core_find(cpu); + if (core == NULL) { + /* + * Allocate the core structure now. + */ + simple_unlock(&x86_topo_lock); + core = x86_core_alloc(cpu); + mp_safe_spin_lock(&x86_topo_lock); + if (x86_core_find(cpu) != NULL) { + x86_core_free(core); + continue; + } + + /* + * Add the core to the die & package. + */ + x86_die_add_core(die, core); + x86_package_add_core(pkg, core); + machine_info.physical_cpu_max += 1; + } + } while (core == NULL); + + + /* + * Done manipulating the topology, so others can get in. + */ + machine_info.logical_cpu_max += 1; + simple_unlock(&x86_topo_lock); + + /* + * Add the logical CPU to the other topology structures. + */ + x86_core_add_lcpu(core, &cpup->lcpu); + x86_die_add_lcpu(core->die, &cpup->lcpu); + x86_package_add_lcpu(core->package, &cpup->lcpu); + x86_lcpu_add_caches(&cpup->lcpu); + + return (void *) core; } void cpu_thread_init(void) { - int my_cpu = get_cpu_number(); - cpu_data_t *cpup = current_cpu_datap(); - x86_core_t *core; - static int initialized = 0; - - /* - * If we're the boot processor, we do all of the initialization of - * the CPU topology infrastructure. - */ - if (my_cpu == master_cpu && !initialized) { - simple_lock_init(&x86_topo_lock, 0); + int my_cpu = get_cpu_number(); + cpu_data_t *cpup = current_cpu_datap(); + x86_core_t *core; + static int initialized = 0; + + /* + * If we're the boot processor, we do all of the initialization of + * the CPU topology infrastructure. + */ + if (my_cpu == master_cpu && !initialized) { + simple_lock_init(&x86_topo_lock, 0); + + /* + * Put this logical CPU into the physical CPU topology. + */ + cpup->lcpu.core = cpu_thread_alloc(my_cpu); + + initialized = 1; + } /* - * Put this logical CPU into the physical CPU topology. + * Do the CPU accounting. */ - cpup->lcpu.core = cpu_thread_alloc(my_cpu); - - initialized = 1; - } - - /* - * Do the CPU accounting. - */ - core = cpup->lcpu.core; - mp_safe_spin_lock(&x86_topo_lock); - machine_info.logical_cpu += 1; - if (core->active_lcpus == 0) - machine_info.physical_cpu += 1; - core->active_lcpus += 1; - simple_unlock(&x86_topo_lock); - - pmCPUMarkRunning(cpup); - timer_resync_deadlines(); + core = cpup->lcpu.core; + mp_safe_spin_lock(&x86_topo_lock); + machine_info.logical_cpu += 1; + if (core->active_lcpus == 0) { + machine_info.physical_cpu += 1; + } + core->active_lcpus += 1; + simple_unlock(&x86_topo_lock); + + pmCPUMarkRunning(cpup); + timer_resync_deadlines(); } /* @@ -978,26 +995,27 @@ __attribute__((noreturn)) void cpu_thread_halt(void) { - x86_core_t *core; - cpu_data_t *cpup = current_cpu_datap(); - - mp_safe_spin_lock(&x86_topo_lock); - machine_info.logical_cpu -= 1; - core = cpup->lcpu.core; - core->active_lcpus -= 1; - if (core->active_lcpus == 0) - machine_info.physical_cpu -= 1; - simple_unlock(&x86_topo_lock); - - /* - * Let the power management code determine the best way to "stop" - * the processor. - */ - ml_set_interrupts_enabled(FALSE); - while (1) { - pmCPUHalt(PM_HALT_NORMAL); - } - /* NOT REACHED */ + x86_core_t *core; + cpu_data_t *cpup = current_cpu_datap(); + + mp_safe_spin_lock(&x86_topo_lock); + machine_info.logical_cpu -= 1; + core = cpup->lcpu.core; + core->active_lcpus -= 1; + if (core->active_lcpus == 0) { + machine_info.physical_cpu -= 1; + } + simple_unlock(&x86_topo_lock); + + /* + * Let the power management code determine the best way to "stop" + * the processor. + */ + ml_set_interrupts_enabled(FALSE); + while (1) { + pmCPUHalt(PM_HALT_NORMAL); + } + /* NOT REACHED */ } /* @@ -1007,169 +1025,189 @@ cpu_thread_halt(void) void x86_validate_topology(void) { - x86_pkg_t *pkg; - x86_die_t *die; - x86_core_t *core; - x86_lcpu_t *lcpu; - uint32_t nDies; - uint32_t nCores; - uint32_t nCPUs; - - if (topo_dbg) - debug_topology_print(); - - /* - * Called after processors are registered but before non-boot processors - * are started: - * - real_ncpus: number of registered processors driven from MADT - * - max_ncpus: max number of processors that will be started - */ - nCPUs = topoParms.nPackages * topoParms.nLThreadsPerPackage; - if (nCPUs != real_ncpus) - panic("x86_validate_topology() %d threads but %d registered from MADT", - nCPUs, real_ncpus); - - pkg = x86_pkgs; - while (pkg != NULL) { - /* - * Make sure that the package has the correct number of dies. - */ - nDies = 0; - die = pkg->dies; - while (die != NULL) { - if (die->package == NULL) - panic("Die(%d)->package is NULL", - die->pdie_num); - if (die->package != pkg) - panic("Die %d points to package %d, should be %d", - die->pdie_num, die->package->lpkg_num, pkg->lpkg_num); - - TOPO_DBG("Die(%d)->package %d\n", - die->pdie_num, pkg->lpkg_num); - - /* - * Make sure that the die has the correct number of cores. - */ - TOPO_DBG("Die(%d)->cores: ", die->pdie_num); - nCores = 0; - core = die->cores; - while (core != NULL) { - if (core->die == NULL) - panic("Core(%d)->die is NULL", - core->pcore_num); - if (core->die != die) - panic("Core %d points to die %d, should be %d", - core->pcore_num, core->die->pdie_num, die->pdie_num); - nCores += 1; - TOPO_DBG("%d ", core->pcore_num); - core = core->next_in_die; - } - TOPO_DBG("\n"); - - if (nCores != topoParms.nLCoresPerDie) - panic("Should have %d Cores, but only found %d for Die %d", - topoParms.nLCoresPerDie, nCores, die->pdie_num); - - /* - * Make sure that the die has the correct number of CPUs. - */ - TOPO_DBG("Die(%d)->lcpus: ", die->pdie_num); - nCPUs = 0; - lcpu = die->lcpus; - while (lcpu != NULL) { - if (lcpu->die == NULL) - panic("CPU(%d)->die is NULL", - lcpu->cpu_num); - if (lcpu->die != die) - panic("CPU %d points to die %d, should be %d", - lcpu->cpu_num, lcpu->die->pdie_num, die->pdie_num); - nCPUs += 1; - TOPO_DBG("%d ", lcpu->cpu_num); - lcpu = lcpu->next_in_die; - } - TOPO_DBG("\n"); - - if (nCPUs != topoParms.nLThreadsPerDie) - panic("Should have %d Threads, but only found %d for Die %d", - topoParms.nLThreadsPerDie, nCPUs, die->pdie_num); - - nDies += 1; - die = die->next_in_pkg; + x86_pkg_t *pkg; + x86_die_t *die; + x86_core_t *core; + x86_lcpu_t *lcpu; + uint32_t nDies; + uint32_t nCores; + uint32_t nCPUs; + + if (topo_dbg) { + debug_topology_print(); } - if (nDies != topoParms.nLDiesPerPackage) - panic("Should have %d Dies, but only found %d for package %d", - topoParms.nLDiesPerPackage, nDies, pkg->lpkg_num); - /* - * Make sure that the package has the correct number of cores. + * Called after processors are registered but before non-boot processors + * are started: + * - real_ncpus: number of registered processors driven from MADT + * - max_ncpus: max number of processors that will be started */ - nCores = 0; - core = pkg->cores; - while (core != NULL) { - if (core->package == NULL) - panic("Core(%d)->package is NULL", - core->pcore_num); - if (core->package != pkg) - panic("Core %d points to package %d, should be %d", - core->pcore_num, core->package->lpkg_num, pkg->lpkg_num); - TOPO_DBG("Core(%d)->package %d\n", - core->pcore_num, pkg->lpkg_num); - - /* - * Make sure that the core has the correct number of CPUs. - */ - nCPUs = 0; - lcpu = core->lcpus; - TOPO_DBG("Core(%d)->lcpus: ", core->pcore_num); - while (lcpu != NULL) { - if (lcpu->core == NULL) - panic("CPU(%d)->core is NULL", - lcpu->cpu_num); - if (lcpu->core != core) - panic("CPU %d points to core %d, should be %d", - lcpu->cpu_num, lcpu->core->pcore_num, core->pcore_num); - TOPO_DBG("%d ", lcpu->cpu_num); - nCPUs += 1; - lcpu = lcpu->next_in_core; - } - TOPO_DBG("\n"); - - if (nCPUs != topoParms.nLThreadsPerCore) - panic("Should have %d Threads, but only found %d for Core %d", - topoParms.nLThreadsPerCore, nCPUs, core->pcore_num); - nCores += 1; - core = core->next_in_pkg; + nCPUs = topoParms.nPackages * topoParms.nLThreadsPerPackage; + if (nCPUs != real_ncpus) { + panic("x86_validate_topology() %d threads but %d registered from MADT", + nCPUs, real_ncpus); } - if (nCores != topoParms.nLCoresPerPackage) - panic("Should have %d Cores, but only found %d for package %d", - topoParms.nLCoresPerPackage, nCores, pkg->lpkg_num); + pkg = x86_pkgs; + while (pkg != NULL) { + /* + * Make sure that the package has the correct number of dies. + */ + nDies = 0; + die = pkg->dies; + while (die != NULL) { + if (die->package == NULL) { + panic("Die(%d)->package is NULL", + die->pdie_num); + } + if (die->package != pkg) { + panic("Die %d points to package %d, should be %d", + die->pdie_num, die->package->lpkg_num, pkg->lpkg_num); + } + + TOPO_DBG("Die(%d)->package %d\n", + die->pdie_num, pkg->lpkg_num); + + /* + * Make sure that the die has the correct number of cores. + */ + TOPO_DBG("Die(%d)->cores: ", die->pdie_num); + nCores = 0; + core = die->cores; + while (core != NULL) { + if (core->die == NULL) { + panic("Core(%d)->die is NULL", + core->pcore_num); + } + if (core->die != die) { + panic("Core %d points to die %d, should be %d", + core->pcore_num, core->die->pdie_num, die->pdie_num); + } + nCores += 1; + TOPO_DBG("%d ", core->pcore_num); + core = core->next_in_die; + } + TOPO_DBG("\n"); + + if (nCores != topoParms.nLCoresPerDie) { + panic("Should have %d Cores, but only found %d for Die %d", + topoParms.nLCoresPerDie, nCores, die->pdie_num); + } + + /* + * Make sure that the die has the correct number of CPUs. + */ + TOPO_DBG("Die(%d)->lcpus: ", die->pdie_num); + nCPUs = 0; + lcpu = die->lcpus; + while (lcpu != NULL) { + if (lcpu->die == NULL) { + panic("CPU(%d)->die is NULL", + lcpu->cpu_num); + } + if (lcpu->die != die) { + panic("CPU %d points to die %d, should be %d", + lcpu->cpu_num, lcpu->die->pdie_num, die->pdie_num); + } + nCPUs += 1; + TOPO_DBG("%d ", lcpu->cpu_num); + lcpu = lcpu->next_in_die; + } + TOPO_DBG("\n"); + + if (nCPUs != topoParms.nLThreadsPerDie) { + panic("Should have %d Threads, but only found %d for Die %d", + topoParms.nLThreadsPerDie, nCPUs, die->pdie_num); + } + + nDies += 1; + die = die->next_in_pkg; + } - /* - * Make sure that the package has the correct number of CPUs. - */ - nCPUs = 0; - lcpu = pkg->lcpus; - while (lcpu != NULL) { - if (lcpu->package == NULL) - panic("CPU(%d)->package is NULL", - lcpu->cpu_num); - if (lcpu->package != pkg) - panic("CPU %d points to package %d, should be %d", - lcpu->cpu_num, lcpu->package->lpkg_num, pkg->lpkg_num); - TOPO_DBG("CPU(%d)->package %d\n", - lcpu->cpu_num, pkg->lpkg_num); - nCPUs += 1; - lcpu = lcpu->next_in_pkg; - } + if (nDies != topoParms.nLDiesPerPackage) { + panic("Should have %d Dies, but only found %d for package %d", + topoParms.nLDiesPerPackage, nDies, pkg->lpkg_num); + } - if (nCPUs != topoParms.nLThreadsPerPackage) - panic("Should have %d Threads, but only found %d for package %d", - topoParms.nLThreadsPerPackage, nCPUs, pkg->lpkg_num); + /* + * Make sure that the package has the correct number of cores. + */ + nCores = 0; + core = pkg->cores; + while (core != NULL) { + if (core->package == NULL) { + panic("Core(%d)->package is NULL", + core->pcore_num); + } + if (core->package != pkg) { + panic("Core %d points to package %d, should be %d", + core->pcore_num, core->package->lpkg_num, pkg->lpkg_num); + } + TOPO_DBG("Core(%d)->package %d\n", + core->pcore_num, pkg->lpkg_num); + + /* + * Make sure that the core has the correct number of CPUs. + */ + nCPUs = 0; + lcpu = core->lcpus; + TOPO_DBG("Core(%d)->lcpus: ", core->pcore_num); + while (lcpu != NULL) { + if (lcpu->core == NULL) { + panic("CPU(%d)->core is NULL", + lcpu->cpu_num); + } + if (lcpu->core != core) { + panic("CPU %d points to core %d, should be %d", + lcpu->cpu_num, lcpu->core->pcore_num, core->pcore_num); + } + TOPO_DBG("%d ", lcpu->cpu_num); + nCPUs += 1; + lcpu = lcpu->next_in_core; + } + TOPO_DBG("\n"); + + if (nCPUs != topoParms.nLThreadsPerCore) { + panic("Should have %d Threads, but only found %d for Core %d", + topoParms.nLThreadsPerCore, nCPUs, core->pcore_num); + } + nCores += 1; + core = core->next_in_pkg; + } - pkg = pkg->next; - } + if (nCores != topoParms.nLCoresPerPackage) { + panic("Should have %d Cores, but only found %d for package %d", + topoParms.nLCoresPerPackage, nCores, pkg->lpkg_num); + } + + /* + * Make sure that the package has the correct number of CPUs. + */ + nCPUs = 0; + lcpu = pkg->lcpus; + while (lcpu != NULL) { + if (lcpu->package == NULL) { + panic("CPU(%d)->package is NULL", + lcpu->cpu_num); + } + if (lcpu->package != pkg) { + panic("CPU %d points to package %d, should be %d", + lcpu->cpu_num, lcpu->package->lpkg_num, pkg->lpkg_num); + } + TOPO_DBG("CPU(%d)->package %d\n", + lcpu->cpu_num, pkg->lpkg_num); + nCPUs += 1; + lcpu = lcpu->next_in_pkg; + } + + if (nCPUs != topoParms.nLThreadsPerPackage) { + panic("Should have %d Threads, but only found %d for package %d", + topoParms.nLThreadsPerPackage, nCPUs, pkg->lpkg_num); + } + + pkg = pkg->next; + } } /* @@ -1178,53 +1216,56 @@ x86_validate_topology(void) static void debug_topology_print(void) { - x86_pkg_t *pkg; - x86_die_t *die; - x86_core_t *core; - x86_lcpu_t *cpu; - - pkg = x86_pkgs; - while (pkg != NULL) { - kprintf("Package:\n"); - kprintf(" Physical: %d\n", pkg->ppkg_num); - kprintf(" Logical: %d\n", pkg->lpkg_num); - - die = pkg->dies; - while (die != NULL) { - kprintf(" Die:\n"); - kprintf(" Physical: %d\n", die->pdie_num); - kprintf(" Logical: %d\n", die->ldie_num); - - core = die->cores; - while (core != NULL) { - kprintf(" Core:\n"); - kprintf(" Physical: %d\n", core->pcore_num); - kprintf(" Logical: %d\n", core->lcore_num); - - cpu = core->lcpus; - while (cpu != NULL) { - kprintf(" LCPU:\n"); - kprintf(" CPU #: %d\n", cpu->cpu_num); - kprintf(" Physical: %d\n", cpu->pnum); - kprintf(" Logical: %d\n", cpu->lnum); - kprintf(" Flags: "); - if (cpu->master) - kprintf("MASTER "); - if (cpu->primary) - kprintf("PRIMARY"); - if (!cpu->master && !cpu->primary) - kprintf("(NONE)"); - kprintf("\n"); - - cpu = cpu->next_in_core; + x86_pkg_t *pkg; + x86_die_t *die; + x86_core_t *core; + x86_lcpu_t *cpu; + + pkg = x86_pkgs; + while (pkg != NULL) { + kprintf("Package:\n"); + kprintf(" Physical: %d\n", pkg->ppkg_num); + kprintf(" Logical: %d\n", pkg->lpkg_num); + + die = pkg->dies; + while (die != NULL) { + kprintf(" Die:\n"); + kprintf(" Physical: %d\n", die->pdie_num); + kprintf(" Logical: %d\n", die->ldie_num); + + core = die->cores; + while (core != NULL) { + kprintf(" Core:\n"); + kprintf(" Physical: %d\n", core->pcore_num); + kprintf(" Logical: %d\n", core->lcore_num); + + cpu = core->lcpus; + while (cpu != NULL) { + kprintf(" LCPU:\n"); + kprintf(" CPU #: %d\n", cpu->cpu_num); + kprintf(" Physical: %d\n", cpu->pnum); + kprintf(" Logical: %d\n", cpu->lnum); + kprintf(" Flags: "); + if (cpu->master) { + kprintf("MASTER "); + } + if (cpu->primary) { + kprintf("PRIMARY"); + } + if (!cpu->master && !cpu->primary) { + kprintf("(NONE)"); + } + kprintf("\n"); + + cpu = cpu->next_in_core; + } + + core = core->next_in_die; + } + + die = die->next_in_pkg; } - core = core->next_in_die; - } - - die = die->next_in_pkg; + pkg = pkg->next; } - - pkg = pkg->next; - } } diff --git a/osfmk/i386/cpu_threads.h b/osfmk/i386/cpu_threads.h index ff028e02a..5b60e5e02 100644 --- a/osfmk/i386/cpu_threads.h +++ b/osfmk/i386/cpu_threads.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_CPU_THREADS_H_ @@ -36,34 +36,34 @@ * These are defined here rather than in cpu_topology.h so as to keep * cpu_topology.h from having a dependency on cpu_data.h. */ -#define CPU_THREAD_MASK 0x00000001 -#define cpu_to_core_lapic(cpu) (cpu_to_lapic[cpu] & ~CPU_THREAD_MASK) -#define cpu_to_core_cpu(cpu) (lapic_to_cpu[cpu_to_core_lapic(cpu)]) -#define cpu_to_logical_cpu(cpu) (cpu_to_lapic[cpu] & CPU_THREAD_MASK) -#define cpu_is_core_cpu(cpu) (cpu_to_logical_cpu(cpu) == 0) +#define CPU_THREAD_MASK 0x00000001 +#define cpu_to_core_lapic(cpu) (cpu_to_lapic[cpu] & ~CPU_THREAD_MASK) +#define cpu_to_core_cpu(cpu) (lapic_to_cpu[cpu_to_core_lapic(cpu)]) +#define cpu_to_logical_cpu(cpu) (cpu_to_lapic[cpu] & CPU_THREAD_MASK) +#define cpu_is_core_cpu(cpu) (cpu_to_logical_cpu(cpu) == 0) -#define _cpu_to_lcpu(cpu) (&cpu_datap(cpu)->lcpu) -#define _cpu_to_core(cpu) (_cpu_to_lcpu(cpu)->core) -#define _cpu_to_package(cpu) (_cpu_to_core(cpu)->package) +#define _cpu_to_lcpu(cpu) (&cpu_datap(cpu)->lcpu) +#define _cpu_to_core(cpu) (_cpu_to_lcpu(cpu)->core) +#define _cpu_to_package(cpu) (_cpu_to_core(cpu)->package) -#define cpu_to_lcpu(cpu) ((cpu_datap(cpu) != NULL) ? _cpu_to_lcpu(cpu) : NULL) -#define cpu_to_core(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->core : NULL) -#define cpu_to_die(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->die : NULL) -#define cpu_to_package(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->package : NULL) +#define cpu_to_lcpu(cpu) ((cpu_datap(cpu) != NULL) ? _cpu_to_lcpu(cpu) : NULL) +#define cpu_to_core(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->core : NULL) +#define cpu_to_die(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->die : NULL) +#define cpu_to_package(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->package : NULL) /* Fast access: */ -#define x86_lcpu() (¤t_cpu_datap()->lcpu) -#define x86_core() (x86_lcpu()->core) -#define x86_die() (x86_lcpu()->die) -#define x86_package() (x86_lcpu()->package) +#define x86_lcpu() (¤t_cpu_datap()->lcpu) +#define x86_core() (x86_lcpu()->core) +#define x86_die() (x86_lcpu()->die) +#define x86_package() (x86_lcpu()->package) -#define cpu_is_same_core(cpu1,cpu2) (cpu_to_core(cpu1) == cpu_to_core(cpu2)) -#define cpu_is_same_die(cpu1,cpu2) (cpu_to_die(cpu1) == cpu_to_die(cpu2)) -#define cpu_is_same_package(cpu1,cpu2) (cpu_to_package(cpu1) == cpu_to_package(cpu2)) -#define cpus_share_cache(cpu1,cpu2,_cl) (cpu_to_lcpu(cpu1)->caches[_cl] == cpu_to_lcpu(cpu2)->caches[_cl]) +#define cpu_is_same_core(cpu1, cpu2) (cpu_to_core(cpu1) == cpu_to_core(cpu2)) +#define cpu_is_same_die(cpu1, cpu2) (cpu_to_die(cpu1) == cpu_to_die(cpu2)) +#define cpu_is_same_package(cpu1, cpu2) (cpu_to_package(cpu1) == cpu_to_package(cpu2)) +#define cpus_share_cache(cpu1, cpu2, _cl) (cpu_to_lcpu(cpu1)->caches[_cl] == cpu_to_lcpu(cpu2)->caches[_cl]) /* always take the x86_topo_lock with mp_safe_spin_lock */ -boolean_t mp_safe_spin_lock(usimple_lock_t lock); +boolean_t mp_safe_spin_lock(usimple_lock_t lock); extern decl_simple_lock_data(, x86_topo_lock); extern void *cpu_thread_alloc(int); @@ -73,13 +73,13 @@ extern void cpu_thread_halt(void); extern void x86_set_logical_topology(x86_lcpu_t *lcpu, int pnum, int lnum); extern void x86_validate_topology(void); -extern x86_topology_parameters_t topoParms; +extern x86_topology_parameters_t topoParms; -extern boolean_t topo_dbg; -#define TOPO_DBG(x...) \ - do { \ - if (topo_dbg) \ - kprintf(x); \ - } while (0) \ +extern boolean_t topo_dbg; +#define TOPO_DBG(x...) \ + do { \ + if (topo_dbg) \ + kprintf(x); \ + } while (0) \ #endif /* _I386_CPU_THREADS_H_ */ diff --git a/osfmk/i386/cpu_topology.c b/osfmk/i386/cpu_topology.c index a4a460893..37ce39b2a 100644 --- a/osfmk/i386/cpu_topology.c +++ b/osfmk/i386/cpu_topology.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,25 +40,33 @@ #include __private_extern__ void qsort( - void * array, - size_t nmembers, - size_t member_size, - int (*)(const void *, const void *)); + void * array, + size_t nmembers, + size_t member_size, + int (*)(const void *, const void *)); static int lapicid_cmp(const void *x, const void *y); static x86_affinity_set_t *find_cache_affinity(x86_cpu_cache_t *L2_cachep); -x86_affinity_set_t *x86_affinities = NULL; -static int x86_affinity_count = 0; +x86_affinity_set_t *x86_affinities = NULL; +static int x86_affinity_count = 0; extern cpu_data_t cpshadows[]; + +#if DEVELOPMENT || DEBUG +void iotrace_init(int ncpus); +#endif /* DEVELOPMENT || DEBUG */ + + /* Re-sort double-mapped CPU data shadows after topology discovery sorts the * primary CPU data structures by physical/APIC CPU ID. */ -static void cpu_shadow_sort(int ncpus) { +static void +cpu_shadow_sort(int ncpus) +{ for (int i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); - ptrdiff_t coff = cpup - cpu_datap(0); + cpu_data_t *cpup = cpu_datap(i); + ptrdiff_t coff = cpup - cpu_datap(0); cpup->cd_shadow = &cpshadows[coff]; } @@ -66,7 +74,7 @@ static void cpu_shadow_sort(int ncpus) { /* * cpu_topology_sort() is called after all processors have been registered but - * before any non-boot processor id started. We establish canonical logical + * before any non-boot processor is started. We establish canonical logical * processor numbering - logical cpus must be contiguous, zero-based and * assigned in physical (local apic id) order. This step is required because * the discovery/registration order is non-deterministic - cores are registered @@ -76,9 +84,9 @@ static void cpu_shadow_sort(int ncpus) { void cpu_topology_sort(int ncpus) { - int i; - boolean_t istate; - processor_t lprim = NULL; + int i; + boolean_t istate; + processor_t lprim = NULL; assert(machine_info.physical_cpu == 1); assert(machine_info.logical_cpu == 1); @@ -91,11 +99,11 @@ cpu_topology_sort(int ncpus) if (topo_dbg) { TOPO_DBG("cpu_topology_start() %d cpu%s registered\n", - ncpus, (ncpus > 1) ? "s" : ""); + ncpus, (ncpus > 1) ? "s" : ""); for (i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); + cpu_data_t *cpup = cpu_datap(i); TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n", - i, (void *) cpup, cpup->cpu_phys_number); + i, (void *) cpup, cpup->cpu_phys_number); } } @@ -105,16 +113,16 @@ cpu_topology_sort(int ncpus) */ if (ncpus > 1) { qsort((void *) &cpu_data_ptr[1], - ncpus - 1, - sizeof(cpu_data_t *), - lapicid_cmp); + ncpus - 1, + sizeof(cpu_data_t *), + lapicid_cmp); } if (topo_dbg) { TOPO_DBG("cpu_topology_start() after sorting:\n"); for (i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); + cpu_data_t *cpup = cpu_datap(i); TOPO_DBG("\tcpu_data[%d]:%p local apic 0x%x\n", - i, (void *) cpup, cpup->cpu_phys_number); + i, (void *) cpup, cpup->cpu_phys_number); } } @@ -122,13 +130,13 @@ cpu_topology_sort(int ncpus) * Finalize logical numbers and map kept by the lapic code. */ for (i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); + cpu_data_t *cpup = cpu_datap(i); if (cpup->cpu_number != i) { kprintf("cpu_datap(%d):%p local apic id 0x%x " - "remapped from %d\n", - i, cpup, cpup->cpu_phys_number, - cpup->cpu_number); + "remapped from %d\n", + i, cpup, cpup->cpu_phys_number, + cpup->cpu_number); } cpup->cpu_number = i; lapic_cpu_map(cpup->cpu_phys_number, i); @@ -141,6 +149,10 @@ cpu_topology_sort(int ncpus) ml_set_interrupts_enabled(istate); TOPO_DBG("cpu_topology_start() LLC is L%d\n", topoParms.LLCDepth + 1); +#if DEVELOPMENT || DEBUG + iotrace_init(ncpus); +#endif /* DEVELOPMENT || DEBUG */ + /* * Let the CPU Power Management know that the topology is stable. */ @@ -154,40 +166,44 @@ cpu_topology_sort(int ncpus) */ TOPO_DBG("cpu_topology_start() creating affinity sets:\n"); for (i = 0; i < ncpus; i++) { - cpu_data_t *cpup = cpu_datap(i); - x86_lcpu_t *lcpup = cpu_to_lcpu(i); - x86_cpu_cache_t *LLC_cachep; - x86_affinity_set_t *aset; + cpu_data_t *cpup = cpu_datap(i); + x86_lcpu_t *lcpup = cpu_to_lcpu(i); + x86_cpu_cache_t *LLC_cachep; + x86_affinity_set_t *aset; LLC_cachep = lcpup->caches[topoParms.LLCDepth]; assert(LLC_cachep->type == CPU_CACHE_TYPE_UNIF); - aset = find_cache_affinity(LLC_cachep); + aset = find_cache_affinity(LLC_cachep); if (aset == NULL) { aset = (x86_affinity_set_t *) kalloc(sizeof(*aset)); - if (aset == NULL) + if (aset == NULL) { panic("cpu_topology_start() failed aset alloc"); + } aset->next = x86_affinities; x86_affinities = aset; aset->num = x86_affinity_count++; aset->cache = LLC_cachep; aset->pset = (i == master_cpu) ? - processor_pset(master_processor) : - pset_create(pset_node_root()); - if (aset->pset == PROCESSOR_SET_NULL) + processor_pset(master_processor) : + pset_create(pset_node_root()); + if (aset->pset == PROCESSOR_SET_NULL) { panic("cpu_topology_start: pset_create"); + } TOPO_DBG("\tnew set %p(%d) pset %p for cache %p\n", - aset, aset->num, aset->pset, aset->cache); + aset, aset->num, aset->pset, aset->cache); } TOPO_DBG("\tprocessor_init set %p(%d) lcpup %p(%d) cpu %p processor %p\n", - aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor); + aset, aset->num, lcpup, lcpup->cpu_num, cpup, cpup->cpu_processor); - if (i != master_cpu) + if (i != master_cpu) { processor_init(cpup->cpu_processor, i, aset->pset); + } if (lcpup->core->num_lcpus > 1) { - if (lcpup->lnum == 0) + if (lcpup->lnum == 0) { lprim = cpup->cpu_processor; + } processor_set_primary(cpup->cpu_processor, lprim); } @@ -200,46 +216,48 @@ cpu_topology_sort(int ncpus) kern_return_t cpu_topology_start_cpu( int cpunum ) { - int ncpus = machine_info.max_cpus; - int i = cpunum; + int ncpus = machine_info.max_cpus; + int i = cpunum; /* Decide whether to start a CPU, and actually start it */ TOPO_DBG("cpu_topology_start() processor_start():\n"); - if( i < ncpus) - { + if (i < ncpus) { TOPO_DBG("\tlcpu %d\n", cpu_datap(i)->cpu_number); - processor_start(cpu_datap(i)->cpu_processor); + processor_start(cpu_datap(i)->cpu_processor); return KERN_SUCCESS; + } else { + return KERN_FAILURE; } - else - return KERN_FAILURE; } static int lapicid_cmp(const void *x, const void *y) { - cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x); - cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y); + cpu_data_t *cpu_x = *((cpu_data_t **)(uintptr_t)x); + cpu_data_t *cpu_y = *((cpu_data_t **)(uintptr_t)y); TOPO_DBG("lapicid_cmp(%p,%p) (%d,%d)\n", - x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number); - if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number) + x, y, cpu_x->cpu_phys_number, cpu_y->cpu_phys_number); + if (cpu_x->cpu_phys_number < cpu_y->cpu_phys_number) { return -1; - if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number) + } + if (cpu_x->cpu_phys_number == cpu_y->cpu_phys_number) { return 0; + } return 1; } static x86_affinity_set_t * find_cache_affinity(x86_cpu_cache_t *l2_cachep) { - x86_affinity_set_t *aset; + x86_affinity_set_t *aset; for (aset = x86_affinities; aset != NULL; aset = aset->next) { - if (l2_cachep == aset->cache) + if (l2_cachep == aset->cache) { break; + } } - return aset; + return aset; } int @@ -249,13 +267,14 @@ ml_get_max_affinity_sets(void) } processor_set_t -ml_affinity_to_pset(uint32_t affinity_num) +ml_affinity_to_pset(uint32_t affinity_num) { - x86_affinity_set_t *aset; + x86_affinity_set_t *aset; for (aset = x86_affinities; aset != NULL; aset = aset->next) { - if (affinity_num == aset->num) + if (affinity_num == aset->num) { break; + } } return (aset == NULL) ? PROCESSOR_SET_NULL : aset->pset; } @@ -263,12 +282,12 @@ ml_affinity_to_pset(uint32_t affinity_num) uint64_t ml_cpu_cache_size(unsigned int level) { - x86_cpu_cache_t *cachep; + x86_cpu_cache_t *cachep; if (level == 0) { return machine_info.max_mem; - } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) { - cachep = current_cpu_datap()->lcpu.caches[level-1]; + } else if (1 <= level && level <= MAX_CACHE_DEPTH) { + cachep = current_cpu_datap()->lcpu.caches[level - 1]; return cachep ? cachep->cache_size : 0; } else { return 0; @@ -278,15 +297,84 @@ ml_cpu_cache_size(unsigned int level) uint64_t ml_cpu_cache_sharing(unsigned int level) { - x86_cpu_cache_t *cachep; + x86_cpu_cache_t *cachep; if (level == 0) { return machine_info.max_cpus; - } else if ( 1 <= level && level <= MAX_CACHE_DEPTH) { - cachep = current_cpu_datap()->lcpu.caches[level-1]; + } else if (1 <= level && level <= MAX_CACHE_DEPTH) { + cachep = current_cpu_datap()->lcpu.caches[level - 1]; return cachep ? cachep->nlcpus : 0; } else { return 0; } } +#if DEVELOPMENT || DEBUG +volatile int mmiotrace_enabled = 1; +int iotrace_generators = 0; +int iotrace_entries_per_cpu = 0; +int *iotrace_next; +iotrace_entry_t **iotrace_ring; + +void +init_iotrace_bufs(int cpucnt, int entries_per_cpu) +{ + int i; + + iotrace_next = kalloc_tag(cpucnt * sizeof(int), VM_KERN_MEMORY_DIAG); + if (__improbable(iotrace_next == NULL)) { + iotrace_generators = 0; + return; + } else { + bzero(iotrace_next, cpucnt * sizeof(int)); + } + + iotrace_ring = kalloc_tag(cpucnt * sizeof(iotrace_entry_t *), VM_KERN_MEMORY_DIAG); + if (__improbable(iotrace_ring == NULL)) { + kfree(iotrace_next, cpucnt * sizeof(int)); + iotrace_generators = 0; + return; + } + for (i = 0; i < cpucnt; i++) { + iotrace_ring[i] = kalloc_tag(entries_per_cpu * sizeof(iotrace_entry_t), VM_KERN_MEMORY_DIAG); + if (__improbable(iotrace_ring[i] == NULL)) { + kfree(iotrace_next, cpucnt * sizeof(int)); + iotrace_next = NULL; + for (int j = 0; j < i; j++) { + kfree(iotrace_ring[j], entries_per_cpu * sizeof(iotrace_entry_t)); + } + kfree(iotrace_ring, cpucnt * sizeof(iotrace_entry_t *)); + iotrace_ring = NULL; + return; + } + bzero(iotrace_ring[i], entries_per_cpu * sizeof(iotrace_entry_t)); + } + + iotrace_entries_per_cpu = entries_per_cpu; + iotrace_generators = cpucnt; +} + +void +iotrace_init(int ncpus) +{ + int iot, epc; + int entries_per_cpu; + + if (PE_parse_boot_argn("iotrace", &iot, sizeof(iot))) { + mmiotrace_enabled = iot; + } + + if (mmiotrace_enabled == 0) { + return; + } + + if (PE_parse_boot_argn("iotrace_epc", &epc, sizeof(epc)) && + epc >= 1 && epc <= IOTRACE_MAX_ENTRIES_PER_CPU) { + entries_per_cpu = epc; + } else { + entries_per_cpu = DEFAULT_IOTRACE_ENTRIES_PER_CPU; + } + + init_iotrace_bufs(ncpus, entries_per_cpu); +} +#endif /* DEVELOPMENT || DEBUG */ diff --git a/osfmk/i386/cpu_topology.h b/osfmk/i386/cpu_topology.h index 715a25420..3e5567331 100644 --- a/osfmk/i386/cpu_topology.h +++ b/osfmk/i386/cpu_topology.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL_PRIVATE @@ -42,30 +42,29 @@ /* * Cache structure that can be used to identify the cache heirarchy. */ -typedef struct x86_cpu_cache -{ - struct x86_cpu_cache *next; /* next cache at this level/lcpu */ - struct x86_die *die; /* die containing this cache (only for LLC) */ - uint8_t maxcpus; /* maximum # of cpus that can share */ - uint8_t nlcpus; /* # of logical cpus sharing this cache */ - uint8_t type; /* type of cache */ - uint8_t level; /* level of cache */ - uint16_t ways; /* # of ways in cache */ - uint16_t partitions; /* # of partitions in cache */ - uint16_t line_size; /* size of a cache line */ - uint32_t cache_size; /* total size of cache */ - struct x86_lcpu *cpus[0]; /* cpus sharing this cache */ +typedef struct x86_cpu_cache { + struct x86_cpu_cache *next; /* next cache at this level/lcpu */ + struct x86_die *die; /* die containing this cache (only for LLC) */ + uint8_t maxcpus; /* maximum # of cpus that can share */ + uint8_t nlcpus; /* # of logical cpus sharing this cache */ + uint8_t type; /* type of cache */ + uint8_t level; /* level of cache */ + uint16_t ways; /* # of ways in cache */ + uint16_t partitions; /* # of partitions in cache */ + uint16_t line_size; /* size of a cache line */ + uint32_t cache_size; /* total size of cache */ + struct x86_lcpu *cpus[0]; /* cpus sharing this cache */ } x86_cpu_cache_t; -#define CPU_CACHE_TYPE_DATA 1 /* data cache */ -#define CPU_CACHE_TYPE_INST 2 /* instruction cache */ -#define CPU_CACHE_TYPE_UNIF 3 /* unified cache */ +#define CPU_CACHE_TYPE_DATA 1 /* data cache */ +#define CPU_CACHE_TYPE_INST 2 /* instruction cache */ +#define CPU_CACHE_TYPE_UNIF 3 /* unified cache */ -#define CPU_CACHE_DEPTH_L1 0 -#define CPU_CACHE_DEPTH_L2 1 -#define CPU_CACHE_DEPTH_L3 2 +#define CPU_CACHE_DEPTH_L1 0 +#define CPU_CACHE_DEPTH_L2 1 +#define CPU_CACHE_DEPTH_L3 2 -#define MAX_CACHE_DEPTH 3 /* deepest cache */ +#define MAX_CACHE_DEPTH 3 /* deepest cache */ struct pmc; struct cpu_data; @@ -97,14 +96,13 @@ struct mca_state; * In normal system operation, CPUs will usually be transitioning between * LCPU_IDLE and LCPU_RUN. */ -typedef enum lcpu_state -{ - LCPU_OFF = 0, /* 0 so the right thing happens on boot */ - LCPU_HALT = 1, - LCPU_NONSCHED = 2, - LCPU_PAUSE = 3, - LCPU_IDLE = 4, - LCPU_RUN = 5, +typedef enum lcpu_state { + LCPU_OFF = 0,/* 0 so the right thing happens on boot */ + LCPU_HALT = 1, + LCPU_NONSCHED = 2, + LCPU_PAUSE = 3, + LCPU_IDLE = 4, + LCPU_RUN = 5, } lcpu_state_t; /* @@ -123,121 +121,116 @@ typedef enum lcpu_state * The logical CPU structure contains a third number which is the CPU number. * This number is identical to the CPU number used in other parts of the kernel. */ -typedef struct x86_lcpu -{ - struct x86_lcpu *next_in_core; /* next logical cpu in core */ - struct x86_lcpu *next_in_die; /* next logical cpu in die */ - struct x86_lcpu *next_in_pkg; /* next logical cpu in package */ - struct x86_lcpu *lcpu; /* pointer back to self */ - struct x86_core *core; /* core containing the logical cpu */ - struct x86_die *die; /* die containing the logical cpu */ - struct x86_pkg *package; /* package containing the logical cpu */ - struct cpu_data *cpu; /* cpu_data structure */ - uint32_t flags; - uint32_t cpu_num; /* cpu number */ - uint32_t lnum; /* logical cpu number (within core) */ - uint32_t pnum; /* physical cpu number */ - boolean_t master; /* logical cpu is the master (boot) CPU */ - boolean_t primary; /* logical cpu is primary CPU in package */ - volatile lcpu_state_t state; /* state of the logical CPU */ - volatile boolean_t stopped; /* used to indicate that the CPU has "stopped" */ - uint64_t rtcPop; /* next timer pop programmed */ - uint64_t rtcDeadline; /* next etimer-requested deadline */ - x86_cpu_cache_t *caches[MAX_CACHE_DEPTH]; - void *pmStats; /* Power management stats for lcpu */ - void *pmState; /* Power management state for lcpu */ +typedef struct x86_lcpu { + struct x86_lcpu *next_in_core;/* next logical cpu in core */ + struct x86_lcpu *next_in_die;/* next logical cpu in die */ + struct x86_lcpu *next_in_pkg;/* next logical cpu in package */ + struct x86_lcpu *lcpu; /* pointer back to self */ + struct x86_core *core; /* core containing the logical cpu */ + struct x86_die *die; /* die containing the logical cpu */ + struct x86_pkg *package; /* package containing the logical cpu */ + struct cpu_data *cpu; /* cpu_data structure */ + uint32_t flags; + uint32_t cpu_num; /* cpu number */ + uint32_t lnum; /* logical cpu number (within core) */ + uint32_t pnum; /* physical cpu number */ + boolean_t master; /* logical cpu is the master (boot) CPU */ + boolean_t primary; /* logical cpu is primary CPU in package */ + volatile lcpu_state_t state;/* state of the logical CPU */ + volatile boolean_t stopped; /* used to indicate that the CPU has "stopped" */ + uint64_t rtcPop; /* next timer pop programmed */ + uint64_t rtcDeadline;/* next etimer-requested deadline */ + x86_cpu_cache_t *caches[MAX_CACHE_DEPTH]; + void *pmStats; /* Power management stats for lcpu */ + void *pmState; /* Power management state for lcpu */ } x86_lcpu_t; -#define X86CORE_FL_PRESENT 0x80000000 /* core is present */ -#define X86CORE_FL_READY 0x40000000 /* core struct is init'd */ -#define X86CORE_FL_HAS_HPET 0x10000000 /* core has HPET assigned */ -#define X86CORE_FL_HALTED 0x00008000 /* core is halted */ -#define X86CORE_FL_IDLE 0x00004000 /* core is idle */ - -typedef struct x86_core -{ - struct x86_core *next_in_die; /* next core in die */ - struct x86_core *next_in_pkg; /* next core in package */ - struct x86_die *die; /* die containing the core */ - struct x86_pkg *package; /* package containing core */ - struct x86_lcpu *lcpus; /* list of logical cpus in core */ - uint32_t flags; - uint32_t lcore_num; /* logical core # (unique within die) */ - uint32_t pcore_num; /* physical core # (globally unique) */ - uint32_t num_lcpus; /* Number of logical cpus */ - uint32_t active_lcpus; /* Number of {running, idle} cpus */ - void *pmStats; /* Power management stats for core */ - void *pmState; /* Power management state for core */ +#define X86CORE_FL_PRESENT 0x80000000 /* core is present */ +#define X86CORE_FL_READY 0x40000000 /* core struct is init'd */ +#define X86CORE_FL_HAS_HPET 0x10000000 /* core has HPET assigned */ +#define X86CORE_FL_HALTED 0x00008000 /* core is halted */ +#define X86CORE_FL_IDLE 0x00004000 /* core is idle */ + +typedef struct x86_core { + struct x86_core *next_in_die;/* next core in die */ + struct x86_core *next_in_pkg;/* next core in package */ + struct x86_die *die; /* die containing the core */ + struct x86_pkg *package; /* package containing core */ + struct x86_lcpu *lcpus; /* list of logical cpus in core */ + uint32_t flags; + uint32_t lcore_num; /* logical core # (unique within die) */ + uint32_t pcore_num; /* physical core # (globally unique) */ + uint32_t num_lcpus; /* Number of logical cpus */ + uint32_t active_lcpus;/* Number of {running, idle} cpus */ + void *pmStats; /* Power management stats for core */ + void *pmState; /* Power management state for core */ } x86_core_t; -#define X86DIE_FL_PRESENT 0x80000000 /* die is present */ -#define X86DIE_FL_READY 0x40000000 /* die struct is init'd */ - -typedef struct x86_die -{ - struct x86_die *next_in_pkg; /* next die in package */ - struct x86_lcpu *lcpus; /* list of lcpus in die */ - struct x86_core *cores; /* list of cores in die */ - struct x86_pkg *package; /* package containing the die */ - uint32_t flags; - uint32_t ldie_num; /* logical die # (unique to package) */ - uint32_t pdie_num; /* physical die # (globally unique) */ - uint32_t num_cores; /* Number of cores in die */ - x86_cpu_cache_t *LLC; /* LLC contained in this die */ - void *pmStats; /* Power Management stats for die */ - void *pmState; /* Power Management state for die */ +#define X86DIE_FL_PRESENT 0x80000000 /* die is present */ +#define X86DIE_FL_READY 0x40000000 /* die struct is init'd */ + +typedef struct x86_die { + struct x86_die *next_in_pkg;/* next die in package */ + struct x86_lcpu *lcpus; /* list of lcpus in die */ + struct x86_core *cores; /* list of cores in die */ + struct x86_pkg *package; /* package containing the die */ + uint32_t flags; + uint32_t ldie_num; /* logical die # (unique to package) */ + uint32_t pdie_num; /* physical die # (globally unique) */ + uint32_t num_cores; /* Number of cores in die */ + x86_cpu_cache_t *LLC; /* LLC contained in this die */ + void *pmStats; /* Power Management stats for die */ + void *pmState; /* Power Management state for die */ } x86_die_t; -#define X86PKG_FL_PRESENT 0x80000000 /* package is present */ -#define X86PKG_FL_READY 0x40000000 /* package struct init'd */ -#define X86PKG_FL_HAS_HPET 0x10000000 /* package has HPET assigned */ -#define X86PKG_FL_HALTED 0x00008000 /* package is halted */ -#define X86PKG_FL_IDLE 0x00004000 /* package is idle */ - -typedef struct x86_pkg -{ - struct x86_pkg *next; /* next package */ - struct x86_lcpu *lcpus; /* list of logical cpus in package */ - struct x86_core *cores; /* list of cores in package */ - struct x86_die *dies; /* list of dies in package */ - uint32_t flags; - uint32_t lpkg_num; /* logical package # */ - uint32_t ppkg_num; /* physical package # */ - uint32_t num_dies; /* number of dies in package */ - void *pmStats; /* Power Management stats for package*/ - void *pmState; /* Power Management state for package*/ - struct mca_state *mca_state; /* MCA state for memory errors */ - uint64_t package_idle_exits; - uint32_t num_idle; +#define X86PKG_FL_PRESENT 0x80000000 /* package is present */ +#define X86PKG_FL_READY 0x40000000 /* package struct init'd */ +#define X86PKG_FL_HAS_HPET 0x10000000 /* package has HPET assigned */ +#define X86PKG_FL_HALTED 0x00008000 /* package is halted */ +#define X86PKG_FL_IDLE 0x00004000 /* package is idle */ + +typedef struct x86_pkg { + struct x86_pkg *next; /* next package */ + struct x86_lcpu *lcpus; /* list of logical cpus in package */ + struct x86_core *cores; /* list of cores in package */ + struct x86_die *dies; /* list of dies in package */ + uint32_t flags; + uint32_t lpkg_num; /* logical package # */ + uint32_t ppkg_num; /* physical package # */ + uint32_t num_dies; /* number of dies in package */ + void *pmStats; /* Power Management stats for package*/ + void *pmState; /* Power Management state for package*/ + struct mca_state *mca_state; /* MCA state for memory errors */ + uint64_t package_idle_exits; + uint32_t num_idle; } x86_pkg_t; -extern x86_pkg_t *x86_pkgs; /* root of all CPU packages */ - -typedef struct x86_topology_parameters -{ - uint32_t LLCDepth; - uint32_t nCoresSharingLLC; - uint32_t nLCPUsSharingLLC; - uint32_t maxSharingLLC; - uint32_t nLThreadsPerCore; - uint32_t nPThreadsPerCore; - uint32_t nLCoresPerDie; - uint32_t nPCoresPerDie; - uint32_t nLDiesPerPackage; - uint32_t nPDiesPerPackage; - uint32_t nLThreadsPerDie; - uint32_t nPThreadsPerDie; - uint32_t nLThreadsPerPackage; - uint32_t nPThreadsPerPackage; - uint32_t nLCoresPerPackage; - uint32_t nPCoresPerPackage; - uint32_t nPackages; - boolean_t stable; +extern x86_pkg_t *x86_pkgs; /* root of all CPU packages */ + +typedef struct x86_topology_parameters { + uint32_t LLCDepth; + uint32_t nCoresSharingLLC; + uint32_t nLCPUsSharingLLC; + uint32_t maxSharingLLC; + uint32_t nLThreadsPerCore; + uint32_t nPThreadsPerCore; + uint32_t nLCoresPerDie; + uint32_t nPCoresPerDie; + uint32_t nLDiesPerPackage; + uint32_t nPDiesPerPackage; + uint32_t nLThreadsPerDie; + uint32_t nPThreadsPerDie; + uint32_t nLThreadsPerPackage; + uint32_t nPThreadsPerPackage; + uint32_t nLCoresPerPackage; + uint32_t nPCoresPerPackage; + uint32_t nPackages; + boolean_t stable; } x86_topology_parameters_t; /* Called after cpu discovery */ -extern void cpu_topology_sort(int ncpus); -extern kern_return_t cpu_topology_start_cpu(int cpunum); +extern void cpu_topology_sort(int ncpus); +extern kern_return_t cpu_topology_start_cpu(int cpunum); #endif /* _I386_CPU_TOPOLOGY_H_ */ diff --git a/osfmk/i386/cpuid.c b/osfmk/i386/cpuid.c index 0ebd786b5..75da80e5f 100644 --- a/osfmk/i386/cpuid.c +++ b/osfmk/i386/cpuid.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,73 +31,72 @@ #include #include +#include #include -static boolean_t cpuid_dbg +int force_tecs_at_idle; +int tecs_mode_supported; + +static boolean_t cpuid_dbg #if DEBUG - = TRUE; + = TRUE; #else - = FALSE; + = FALSE; #endif -#define DBG(x...) \ - do { \ - if (cpuid_dbg) \ - kprintf(x); \ - } while (0) \ +#define DBG(x...) \ + do { \ + if (cpuid_dbg) \ + kprintf(x); \ + } while (0) \ -#define min(a,b) ((a) < (b) ? (a) : (b)) -#define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo)) - -/* Only for 32bit values */ -#define bit32(n) (1U << (n)) -#define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1)) -#define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l)) +#define min(a, b) ((a) < (b) ? (a) : (b)) +#define quad(hi, lo) (((uint64_t)(hi)) << 32 | (lo)) /* * Leaf 2 cache descriptor encodings. */ typedef enum { - _NULL_, /* NULL (empty) descriptor */ - CACHE, /* Cache */ - TLB, /* TLB */ - STLB, /* Shared second-level unified TLB */ - PREFETCH /* Prefetch size */ + _NULL_, /* NULL (empty) descriptor */ + CACHE, /* Cache */ + TLB, /* TLB */ + STLB, /* Shared second-level unified TLB */ + PREFETCH /* Prefetch size */ } cpuid_leaf2_desc_type_t; typedef enum { - NA, /* Not Applicable */ - FULLY, /* Fully-associative */ - TRACE, /* Trace Cache (P4 only) */ - INST, /* Instruction TLB */ - DATA, /* Data TLB */ - DATA0, /* Data TLB, 1st level */ - DATA1, /* Data TLB, 2nd level */ - L1, /* L1 (unified) cache */ - L1_INST, /* L1 Instruction cache */ - L1_DATA, /* L1 Data cache */ - L2, /* L2 (unified) cache */ - L3, /* L3 (unified) cache */ - L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */ - L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */ - SMALL, /* Small page TLB */ - LARGE, /* Large page TLB */ - BOTH /* Small and Large page TLB */ + NA, /* Not Applicable */ + FULLY, /* Fully-associative */ + TRACE, /* Trace Cache (P4 only) */ + INST, /* Instruction TLB */ + DATA, /* Data TLB */ + DATA0, /* Data TLB, 1st level */ + DATA1, /* Data TLB, 2nd level */ + L1, /* L1 (unified) cache */ + L1_INST, /* L1 Instruction cache */ + L1_DATA, /* L1 Data cache */ + L2, /* L2 (unified) cache */ + L3, /* L3 (unified) cache */ + L2_2LINESECTOR, /* L2 (unified) cache with 2 lines per sector */ + L3_2LINESECTOR, /* L3(unified) cache with 2 lines per sector */ + SMALL, /* Small page TLB */ + LARGE, /* Large page TLB */ + BOTH /* Small and Large page TLB */ } cpuid_leaf2_qualifier_t; typedef struct cpuid_cache_descriptor { - uint8_t value; /* descriptor code */ - uint8_t type; /* cpuid_leaf2_desc_type_t */ - uint8_t level; /* level of cache/TLB hierachy */ - uint8_t ways; /* wayness of cache */ - uint16_t size; /* cachesize or TLB pagesize */ - uint16_t entries; /* number of TLB entries or linesize */ + uint8_t value; /* descriptor code */ + uint8_t type; /* cpuid_leaf2_desc_type_t */ + uint8_t level; /* level of cache/TLB hierachy */ + uint8_t ways; /* wayness of cache */ + uint16_t size; /* cachesize or TLB pagesize */ + uint16_t entries; /* number of TLB entries or linesize */ } cpuid_cache_descriptor_t; /* - * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field + * These multipliers are used to encode 1*K .. 64*M in a 16 bit size field */ -#define K (1) -#define M (1024) +#define K (1) +#define M (1024) /* * Intel cache descriptor table: @@ -106,121 +105,125 @@ static cpuid_cache_descriptor_t intel_cpuid_leaf2_descriptor_table[] = { // ------------------------------------------------------- // value type level ways size entries // ------------------------------------------------------- - { 0x00, _NULL_, NA, NA, NA, NA }, - { 0x01, TLB, INST, 4, SMALL, 32 }, - { 0x02, TLB, INST, FULLY, LARGE, 2 }, - { 0x03, TLB, DATA, 4, SMALL, 64 }, - { 0x04, TLB, DATA, 4, LARGE, 8 }, - { 0x05, TLB, DATA1, 4, LARGE, 32 }, - { 0x06, CACHE, L1_INST, 4, 8*K, 32 }, - { 0x08, CACHE, L1_INST, 4, 16*K, 32 }, - { 0x09, CACHE, L1_INST, 4, 32*K, 64 }, - { 0x0A, CACHE, L1_DATA, 2, 8*K, 32 }, - { 0x0B, TLB, INST, 4, LARGE, 4 }, - { 0x0C, CACHE, L1_DATA, 4, 16*K, 32 }, - { 0x0D, CACHE, L1_DATA, 4, 16*K, 64 }, - { 0x0E, CACHE, L1_DATA, 6, 24*K, 64 }, - { 0x21, CACHE, L2, 8, 256*K, 64 }, - { 0x22, CACHE, L3_2LINESECTOR, 4, 512*K, 64 }, - { 0x23, CACHE, L3_2LINESECTOR, 8, 1*M, 64 }, - { 0x25, CACHE, L3_2LINESECTOR, 8, 2*M, 64 }, - { 0x29, CACHE, L3_2LINESECTOR, 8, 4*M, 64 }, - { 0x2C, CACHE, L1_DATA, 8, 32*K, 64 }, - { 0x30, CACHE, L1_INST, 8, 32*K, 64 }, - { 0x40, CACHE, L2, NA, 0, NA }, - { 0x41, CACHE, L2, 4, 128*K, 32 }, - { 0x42, CACHE, L2, 4, 256*K, 32 }, - { 0x43, CACHE, L2, 4, 512*K, 32 }, - { 0x44, CACHE, L2, 4, 1*M, 32 }, - { 0x45, CACHE, L2, 4, 2*M, 32 }, - { 0x46, CACHE, L3, 4, 4*M, 64 }, - { 0x47, CACHE, L3, 8, 8*M, 64 }, - { 0x48, CACHE, L2, 12, 3*M, 64 }, - { 0x49, CACHE, L2, 16, 4*M, 64 }, - { 0x4A, CACHE, L3, 12, 6*M, 64 }, - { 0x4B, CACHE, L3, 16, 8*M, 64 }, - { 0x4C, CACHE, L3, 12, 12*M, 64 }, - { 0x4D, CACHE, L3, 16, 16*M, 64 }, - { 0x4E, CACHE, L2, 24, 6*M, 64 }, - { 0x4F, TLB, INST, NA, SMALL, 32 }, - { 0x50, TLB, INST, NA, BOTH, 64 }, - { 0x51, TLB, INST, NA, BOTH, 128 }, - { 0x52, TLB, INST, NA, BOTH, 256 }, - { 0x55, TLB, INST, FULLY, BOTH, 7 }, - { 0x56, TLB, DATA0, 4, LARGE, 16 }, - { 0x57, TLB, DATA0, 4, SMALL, 16 }, - { 0x59, TLB, DATA0, FULLY, SMALL, 16 }, - { 0x5A, TLB, DATA0, 4, LARGE, 32 }, - { 0x5B, TLB, DATA, NA, BOTH, 64 }, - { 0x5C, TLB, DATA, NA, BOTH, 128 }, - { 0x5D, TLB, DATA, NA, BOTH, 256 }, - { 0x60, CACHE, L1, 16*K, 8, 64 }, - { 0x61, CACHE, L1, 4, 8*K, 64 }, - { 0x62, CACHE, L1, 4, 16*K, 64 }, - { 0x63, CACHE, L1, 4, 32*K, 64 }, - { 0x70, CACHE, TRACE, 8, 12*K, NA }, - { 0x71, CACHE, TRACE, 8, 16*K, NA }, - { 0x72, CACHE, TRACE, 8, 32*K, NA }, - { 0x76, TLB, INST, NA, BOTH, 8 }, - { 0x78, CACHE, L2, 4, 1*M, 64 }, - { 0x79, CACHE, L2_2LINESECTOR, 8, 128*K, 64 }, - { 0x7A, CACHE, L2_2LINESECTOR, 8, 256*K, 64 }, - { 0x7B, CACHE, L2_2LINESECTOR, 8, 512*K, 64 }, - { 0x7C, CACHE, L2_2LINESECTOR, 8, 1*M, 64 }, - { 0x7D, CACHE, L2, 8, 2*M, 64 }, - { 0x7F, CACHE, L2, 2, 512*K, 64 }, - { 0x80, CACHE, L2, 8, 512*K, 64 }, - { 0x82, CACHE, L2, 8, 256*K, 32 }, - { 0x83, CACHE, L2, 8, 512*K, 32 }, - { 0x84, CACHE, L2, 8, 1*M, 32 }, - { 0x85, CACHE, L2, 8, 2*M, 32 }, - { 0x86, CACHE, L2, 4, 512*K, 64 }, - { 0x87, CACHE, L2, 8, 1*M, 64 }, - { 0xB0, TLB, INST, 4, SMALL, 128 }, - { 0xB1, TLB, INST, 4, LARGE, 8 }, - { 0xB2, TLB, INST, 4, SMALL, 64 }, - { 0xB3, TLB, DATA, 4, SMALL, 128 }, - { 0xB4, TLB, DATA1, 4, SMALL, 256 }, - { 0xB5, TLB, DATA1, 8, SMALL, 64 }, - { 0xB6, TLB, DATA1, 8, SMALL, 128 }, - { 0xBA, TLB, DATA1, 4, BOTH, 64 }, - { 0xC1, STLB, DATA1, 8, SMALL, 1024}, - { 0xCA, STLB, DATA1, 4, SMALL, 512 }, - { 0xD0, CACHE, L3, 4, 512*K, 64 }, - { 0xD1, CACHE, L3, 4, 1*M, 64 }, - { 0xD2, CACHE, L3, 4, 2*M, 64 }, - { 0xD3, CACHE, L3, 4, 4*M, 64 }, - { 0xD4, CACHE, L3, 4, 8*M, 64 }, - { 0xD6, CACHE, L3, 8, 1*M, 64 }, - { 0xD7, CACHE, L3, 8, 2*M, 64 }, - { 0xD8, CACHE, L3, 8, 4*M, 64 }, - { 0xD9, CACHE, L3, 8, 8*M, 64 }, - { 0xDA, CACHE, L3, 8, 12*M, 64 }, - { 0xDC, CACHE, L3, 12, 1536*K, 64 }, - { 0xDD, CACHE, L3, 12, 3*M, 64 }, - { 0xDE, CACHE, L3, 12, 6*M, 64 }, - { 0xDF, CACHE, L3, 12, 12*M, 64 }, - { 0xE0, CACHE, L3, 12, 18*M, 64 }, - { 0xE2, CACHE, L3, 16, 2*M, 64 }, - { 0xE3, CACHE, L3, 16, 4*M, 64 }, - { 0xE4, CACHE, L3, 16, 8*M, 64 }, - { 0xE5, CACHE, L3, 16, 16*M, 64 }, - { 0xE6, CACHE, L3, 16, 24*M, 64 }, - { 0xF0, PREFETCH, NA, NA, 64, NA }, - { 0xF1, PREFETCH, NA, NA, 128, NA }, - { 0xFF, CACHE, NA, NA, 0, NA } + { 0x00, _NULL_, NA, NA, NA, NA }, + { 0x01, TLB, INST, 4, SMALL, 32 }, + { 0x02, TLB, INST, FULLY, LARGE, 2 }, + { 0x03, TLB, DATA, 4, SMALL, 64 }, + { 0x04, TLB, DATA, 4, LARGE, 8 }, + { 0x05, TLB, DATA1, 4, LARGE, 32 }, + { 0x06, CACHE, L1_INST, 4, 8 * K, 32 }, + { 0x08, CACHE, L1_INST, 4, 16 * K, 32 }, + { 0x09, CACHE, L1_INST, 4, 32 * K, 64 }, + { 0x0A, CACHE, L1_DATA, 2, 8 * K, 32 }, + { 0x0B, TLB, INST, 4, LARGE, 4 }, + { 0x0C, CACHE, L1_DATA, 4, 16 * K, 32 }, + { 0x0D, CACHE, L1_DATA, 4, 16 * K, 64 }, + { 0x0E, CACHE, L1_DATA, 6, 24 * K, 64 }, + { 0x21, CACHE, L2, 8, 256 * K, 64 }, + { 0x22, CACHE, L3_2LINESECTOR, 4, 512 * K, 64 }, + { 0x23, CACHE, L3_2LINESECTOR, 8, 1 * M, 64 }, + { 0x25, CACHE, L3_2LINESECTOR, 8, 2 * M, 64 }, + { 0x29, CACHE, L3_2LINESECTOR, 8, 4 * M, 64 }, + { 0x2C, CACHE, L1_DATA, 8, 32 * K, 64 }, + { 0x30, CACHE, L1_INST, 8, 32 * K, 64 }, + { 0x40, CACHE, L2, NA, 0, NA }, + { 0x41, CACHE, L2, 4, 128 * K, 32 }, + { 0x42, CACHE, L2, 4, 256 * K, 32 }, + { 0x43, CACHE, L2, 4, 512 * K, 32 }, + { 0x44, CACHE, L2, 4, 1 * M, 32 }, + { 0x45, CACHE, L2, 4, 2 * M, 32 }, + { 0x46, CACHE, L3, 4, 4 * M, 64 }, + { 0x47, CACHE, L3, 8, 8 * M, 64 }, + { 0x48, CACHE, L2, 12, 3 * M, 64 }, + { 0x49, CACHE, L2, 16, 4 * M, 64 }, + { 0x4A, CACHE, L3, 12, 6 * M, 64 }, + { 0x4B, CACHE, L3, 16, 8 * M, 64 }, + { 0x4C, CACHE, L3, 12, 12 * M, 64 }, + { 0x4D, CACHE, L3, 16, 16 * M, 64 }, + { 0x4E, CACHE, L2, 24, 6 * M, 64 }, + { 0x4F, TLB, INST, NA, SMALL, 32 }, + { 0x50, TLB, INST, NA, BOTH, 64 }, + { 0x51, TLB, INST, NA, BOTH, 128 }, + { 0x52, TLB, INST, NA, BOTH, 256 }, + { 0x55, TLB, INST, FULLY, BOTH, 7 }, + { 0x56, TLB, DATA0, 4, LARGE, 16 }, + { 0x57, TLB, DATA0, 4, SMALL, 16 }, + { 0x59, TLB, DATA0, FULLY, SMALL, 16 }, + { 0x5A, TLB, DATA0, 4, LARGE, 32 }, + { 0x5B, TLB, DATA, NA, BOTH, 64 }, + { 0x5C, TLB, DATA, NA, BOTH, 128 }, + { 0x5D, TLB, DATA, NA, BOTH, 256 }, + { 0x60, CACHE, L1, 16 * K, 8, 64 }, + { 0x61, CACHE, L1, 4, 8 * K, 64 }, + { 0x62, CACHE, L1, 4, 16 * K, 64 }, + { 0x63, CACHE, L1, 4, 32 * K, 64 }, + { 0x70, CACHE, TRACE, 8, 12 * K, NA }, + { 0x71, CACHE, TRACE, 8, 16 * K, NA }, + { 0x72, CACHE, TRACE, 8, 32 * K, NA }, + { 0x76, TLB, INST, NA, BOTH, 8 }, + { 0x78, CACHE, L2, 4, 1 * M, 64 }, + { 0x79, CACHE, L2_2LINESECTOR, 8, 128 * K, 64 }, + { 0x7A, CACHE, L2_2LINESECTOR, 8, 256 * K, 64 }, + { 0x7B, CACHE, L2_2LINESECTOR, 8, 512 * K, 64 }, + { 0x7C, CACHE, L2_2LINESECTOR, 8, 1 * M, 64 }, + { 0x7D, CACHE, L2, 8, 2 * M, 64 }, + { 0x7F, CACHE, L2, 2, 512 * K, 64 }, + { 0x80, CACHE, L2, 8, 512 * K, 64 }, + { 0x82, CACHE, L2, 8, 256 * K, 32 }, + { 0x83, CACHE, L2, 8, 512 * K, 32 }, + { 0x84, CACHE, L2, 8, 1 * M, 32 }, + { 0x85, CACHE, L2, 8, 2 * M, 32 }, + { 0x86, CACHE, L2, 4, 512 * K, 64 }, + { 0x87, CACHE, L2, 8, 1 * M, 64 }, + { 0xB0, TLB, INST, 4, SMALL, 128 }, + { 0xB1, TLB, INST, 4, LARGE, 8 }, + { 0xB2, TLB, INST, 4, SMALL, 64 }, + { 0xB3, TLB, DATA, 4, SMALL, 128 }, + { 0xB4, TLB, DATA1, 4, SMALL, 256 }, + { 0xB5, TLB, DATA1, 8, SMALL, 64 }, + { 0xB6, TLB, DATA1, 8, SMALL, 128 }, + { 0xBA, TLB, DATA1, 4, BOTH, 64 }, + { 0xC1, STLB, DATA1, 8, SMALL, 1024}, + { 0xCA, STLB, DATA1, 4, SMALL, 512 }, + { 0xD0, CACHE, L3, 4, 512 * K, 64 }, + { 0xD1, CACHE, L3, 4, 1 * M, 64 }, + { 0xD2, CACHE, L3, 4, 2 * M, 64 }, + { 0xD3, CACHE, L3, 4, 4 * M, 64 }, + { 0xD4, CACHE, L3, 4, 8 * M, 64 }, + { 0xD6, CACHE, L3, 8, 1 * M, 64 }, + { 0xD7, CACHE, L3, 8, 2 * M, 64 }, + { 0xD8, CACHE, L3, 8, 4 * M, 64 }, + { 0xD9, CACHE, L3, 8, 8 * M, 64 }, + { 0xDA, CACHE, L3, 8, 12 * M, 64 }, + { 0xDC, CACHE, L3, 12, 1536 * K, 64 }, + { 0xDD, CACHE, L3, 12, 3 * M, 64 }, + { 0xDE, CACHE, L3, 12, 6 * M, 64 }, + { 0xDF, CACHE, L3, 12, 12 * M, 64 }, + { 0xE0, CACHE, L3, 12, 18 * M, 64 }, + { 0xE2, CACHE, L3, 16, 2 * M, 64 }, + { 0xE3, CACHE, L3, 16, 4 * M, 64 }, + { 0xE4, CACHE, L3, 16, 8 * M, 64 }, + { 0xE5, CACHE, L3, 16, 16 * M, 64 }, + { 0xE6, CACHE, L3, 16, 24 * M, 64 }, + { 0xF0, PREFETCH, NA, NA, 64, NA }, + { 0xF1, PREFETCH, NA, NA, 128, NA }, + { 0xFF, CACHE, NA, NA, 0, NA } }; -#define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \ - sizeof(cpuid_cache_descriptor_t)) +#define INTEL_LEAF2_DESC_NUM (sizeof(intel_cpuid_leaf2_descriptor_table) / \ + sizeof(cpuid_cache_descriptor_t)) + +static void do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave); static inline cpuid_cache_descriptor_t * cpuid_leaf2_find(uint8_t value) { - unsigned int i; + unsigned int i; - for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) - if (intel_cpuid_leaf2_descriptor_table[i].value == value) + for (i = 0; i < INTEL_LEAF2_DESC_NUM; i++) { + if (intel_cpuid_leaf2_descriptor_table[i].value == value) { return &intel_cpuid_leaf2_descriptor_table[i]; + } + } return NULL; } @@ -228,55 +231,112 @@ cpuid_leaf2_find(uint8_t value) * CPU identification routines. */ -static i386_cpu_info_t cpuid_cpu_info; -static i386_cpu_info_t *cpuid_cpu_infop = NULL; +static i386_cpu_info_t cpuid_cpu_info; +static i386_cpu_info_t *cpuid_cpu_infop = NULL; -static void cpuid_fn(uint32_t selector, uint32_t *result) +static void +cpuid_fn(uint32_t selector, uint32_t *result) { do_cpuid(selector, result); DBG("cpuid_fn(0x%08x) eax:0x%08x ebx:0x%08x ecx:0x%08x edx:0x%08x\n", - selector, result[0], result[1], result[2], result[3]); + selector, result[0], result[1], result[2], result[3]); } static const char *cache_type_str[LCACHE_MAX] = { "Lnone", "L1I", "L1D", "L2U", "L3U" }; +static void +do_cwas(i386_cpu_info_t *cpuinfo, boolean_t on_slave) +{ + extern int force_thread_policy_tecs; + + /* + * Workaround for reclaiming perf counter 3 due to TSX memory ordering erratum. + * This workaround does not support being forcibly set (since an MSR must be + * enumerated, lest we #GP when forced to access it.) + * When RTM_FORCE_FORCE is enabled all RTM transactions on the logical CPU will + * forcefully abort, but the general purpose counter 3 will report correct values. + */ + if (cpuid_wa_required(CPU_INTEL_TSXFA) == CWA_ON) { + wrmsr64(MSR_IA32_TSX_FORCE_ABORT, + rdmsr64(MSR_IA32_TSX_FORCE_ABORT) | MSR_IA32_TSXFA_RTM_FORCE_ABORT); + } + + if (on_slave) { + return; + } + + switch (cpuid_wa_required(CPU_INTEL_SEGCHK)) { + case CWA_FORCE_ON: + force_thread_policy_tecs = 1; + + /* If hyperthreaded, enable idle workaround */ + if (cpuinfo->thread_count > cpuinfo->core_count) { + force_tecs_at_idle = 1; + } + + /*FALLTHROUGH*/ + case CWA_ON: + tecs_mode_supported = 1; + break; + + case CWA_FORCE_OFF: + case CWA_OFF: + tecs_mode_supported = 0; + force_tecs_at_idle = 0; + force_thread_policy_tecs = 0; + break; + + default: + break; + } +} + +void +cpuid_do_was(void) +{ + do_cwas(cpuid_info(), TRUE); +} + /* this function is Intel-specific */ static void cpuid_set_cache_info( i386_cpu_info_t * info_p ) { - uint32_t cpuid_result[4]; - uint32_t reg[4]; - uint32_t index; - uint32_t linesizes[LCACHE_MAX]; - unsigned int i; - unsigned int j; - boolean_t cpuid_deterministic_supported = FALSE; + uint32_t cpuid_result[4]; + uint32_t reg[4]; + uint32_t index; + uint32_t linesizes[LCACHE_MAX]; + unsigned int i; + unsigned int j; + boolean_t cpuid_deterministic_supported = FALSE; DBG("cpuid_set_cache_info(%p)\n", info_p); - bzero( linesizes, sizeof(linesizes) ); + bzero( linesizes, sizeof(linesizes)); /* Get processor cache descriptor info using leaf 2. We don't use * this internally, but must publish it for KEXTs. */ cpuid_fn(2, cpuid_result); for (j = 0; j < 4; j++) { - if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */ + if ((cpuid_result[j] >> 31) == 1) { /* bit31 is validity */ continue; + } ((uint32_t *) info_p->cache_info)[j] = cpuid_result[j]; } /* first byte gives number of cpuid calls to get all descriptors */ for (i = 1; i < info_p->cache_info[0]; i++) { - if (i*16 > sizeof(info_p->cache_info)) + if (i * 16 > sizeof(info_p->cache_info)) { break; + } cpuid_fn(2, cpuid_result); for (j = 0; j < 4; j++) { - if ((cpuid_result[j] >> 31) == 1) + if ((cpuid_result[j] >> 31) == 1) { continue; - ((uint32_t *) info_p->cache_info)[4*i+j] = - cpuid_result[j]; + } + ((uint32_t *) info_p->cache_info)[4 * i + j] = + cpuid_result[j]; } } @@ -286,62 +346,64 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) * Loop over each cache on the processor. */ cpuid_fn(0, cpuid_result); - if (cpuid_result[eax] >= 4) + if (cpuid_result[eax] >= 4) { cpuid_deterministic_supported = TRUE; + } for (index = 0; cpuid_deterministic_supported; index++) { - cache_type_t type = Lnone; - uint32_t cache_type; - uint32_t cache_level; - uint32_t cache_sharing; - uint32_t cache_linesize; - uint32_t cache_sets; - uint32_t cache_associativity; - uint32_t cache_size; - uint32_t cache_partitions; - uint32_t colors; - - reg[eax] = 4; /* cpuid request 4 */ - reg[ecx] = index; /* index starting at 0 */ + cache_type_t type = Lnone; + uint32_t cache_type; + uint32_t cache_level; + uint32_t cache_sharing; + uint32_t cache_linesize; + uint32_t cache_sets; + uint32_t cache_associativity; + uint32_t cache_size; + uint32_t cache_partitions; + uint32_t colors; + + reg[eax] = 4; /* cpuid request 4 */ + reg[ecx] = index; /* index starting at 0 */ cpuid(reg); DBG("cpuid(4) index=%d eax=0x%x\n", index, reg[eax]); cache_type = bitfield32(reg[eax], 4, 0); - if (cache_type == 0) - break; /* no more caches */ - cache_level = bitfield32(reg[eax], 7, 5); - cache_sharing = bitfield32(reg[eax], 25, 14) + 1; - info_p->cpuid_cores_per_package - = bitfield32(reg[eax], 31, 26) + 1; - cache_linesize = bitfield32(reg[ebx], 11, 0) + 1; - cache_partitions = bitfield32(reg[ebx], 21, 12) + 1; - cache_associativity = bitfield32(reg[ebx], 31, 22) + 1; - cache_sets = bitfield32(reg[ecx], 31, 0) + 1; - + if (cache_type == 0) { + break; /* no more caches */ + } + cache_level = bitfield32(reg[eax], 7, 5); + cache_sharing = bitfield32(reg[eax], 25, 14) + 1; + info_p->cpuid_cores_per_package + = bitfield32(reg[eax], 31, 26) + 1; + cache_linesize = bitfield32(reg[ebx], 11, 0) + 1; + cache_partitions = bitfield32(reg[ebx], 21, 12) + 1; + cache_associativity = bitfield32(reg[ebx], 31, 22) + 1; + cache_sets = bitfield32(reg[ecx], 31, 0) + 1; + /* Map type/levels returned by CPUID into cache_type_t */ switch (cache_level) { case 1: type = cache_type == 1 ? L1D : - cache_type == 2 ? L1I : - Lnone; + cache_type == 2 ? L1I : + Lnone; break; case 2: type = cache_type == 3 ? L2U : - Lnone; + Lnone; break; case 3: type = cache_type == 3 ? L3U : - Lnone; + Lnone; break; default: type = Lnone; } - + /* The total size of a cache is: * ( linesize * sets * associativity * partitions ) */ if (type != Lnone) { cache_size = cache_linesize * cache_sets * - cache_associativity * cache_partitions; + cache_associativity * cache_partitions; info_p->cache_size[type] = cache_size; info_p->cache_sharing[type] = cache_sharing; info_p->cache_partitions[type] = cache_partitions; @@ -359,14 +421,16 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) * CPUID.0x80000006 -- this leaf is more * accurate */ - if (type == L2U) + if (type == L2U) { info_p->cpuid_cache_L2_associativity = cache_associativity; - /* - * Adjust #sets to account for the N CBos - * This is because addresses are hashed across CBos - */ - if (type == L3U && info_p->core_count) - cache_sets = cache_sets / info_p->core_count; + } + /* + * Adjust #sets to account for the N CBos + * This is because addresses are hashed across CBos + */ + if (type == L3U && info_p->core_count) { + cache_sets = cache_sets / info_p->core_count; + } /* Compute the number of page colors for this cache, * which is: @@ -389,14 +453,15 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) * The color is those bits in (set+offset) not covered * by the page offset. */ - colors = ( cache_linesize * cache_sets ) >> 12; - - if ( colors > vm_cache_geometry_colors ) + colors = (cache_linesize * cache_sets) >> 12; + + if (colors > vm_cache_geometry_colors) { vm_cache_geometry_colors = colors; + } } - } + } DBG(" vm_cache_geometry_colors: %d\n", vm_cache_geometry_colors); - + /* * If deterministic cache parameters are not available, use * something else @@ -418,16 +483,18 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) DBG(" linesizes[L2U] : %d\n", info_p->cpuid_cache_linesize); } - + /* * What linesize to publish? We use the L2 linesize if any, * else the L1D. */ - if ( linesizes[L2U] ) + if (linesizes[L2U]) { info_p->cache_linesize = linesizes[L2U]; - else if (linesizes[L1D]) + } else if (linesizes[L1D]) { info_p->cache_linesize = linesizes[L1D]; - else panic("no linesize"); + } else { + panic("no linesize"); + } DBG(" cache_linesize : %d\n", info_p->cache_linesize); /* @@ -435,15 +502,16 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) */ DBG(" %ld leaf2 descriptors:\n", sizeof(info_p->cache_info)); for (i = 1; i < sizeof(info_p->cache_info); i++) { - cpuid_cache_descriptor_t *descp; - int id; - int level; - int page; + cpuid_cache_descriptor_t *descp; + int id; + int level; + int page; DBG(" 0x%02x", info_p->cache_info[i]); descp = cpuid_leaf2_find(info_p->cache_info[i]); - if (descp == NULL) + if (descp == NULL) { continue; + } switch (descp->type) { case TLB: @@ -481,8 +549,8 @@ cpuid_set_cache_info( i386_cpu_info_t * info_p ) static void cpuid_set_generic_info(i386_cpu_info_t *info_p) { - uint32_t reg[4]; - char str[128], *p; + uint32_t reg[4]; + char str[128], *p; DBG("cpuid_set_generic_info(%p)\n", info_p); @@ -511,28 +579,30 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) cpuid_fn(0x80000004, reg); bcopy((char *)reg, &str[32], 16); for (p = str; *p != '\0'; p++) { - if (*p != ' ') break; + if (*p != ' ') { + break; + } } strlcpy(info_p->cpuid_brand_string, - p, sizeof(info_p->cpuid_brand_string)); - - if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN, - min(sizeof(info_p->cpuid_brand_string), - strlen(CPUID_STRING_UNKNOWN) + 1))) { - /* - * This string means we have a firmware-programmable brand string, - * and the firmware couldn't figure out what sort of CPU we have. - */ - info_p->cpuid_brand_string[0] = '\0'; - } + p, sizeof(info_p->cpuid_brand_string)); + + if (!strncmp(info_p->cpuid_brand_string, CPUID_STRING_UNKNOWN, + min(sizeof(info_p->cpuid_brand_string), + strlen(CPUID_STRING_UNKNOWN) + 1))) { + /* + * This string means we have a firmware-programmable brand string, + * and the firmware couldn't figure out what sort of CPU we have. + */ + info_p->cpuid_brand_string[0] = '\0'; + } } - + /* Get cache and addressing info. */ if (info_p->cpuid_max_ext >= 0x80000006) { uint32_t assoc; cpuid_fn(0x80000006, reg); info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0); - assoc = bitfield32(reg[ecx],15,12); + assoc = bitfield32(reg[ecx], 15, 12); /* * L2 associativity is encoded, though in an insufficiently * descriptive fashion, e.g. 24-way is mapped to 16-way. @@ -540,19 +610,20 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) * Overwritten by associativity as determined via CPUID.4 * if available. */ - if (assoc == 6) + if (assoc == 6) { assoc = 8; - else if (assoc == 8) + } else if (assoc == 8) { assoc = 16; - else if (assoc == 0xF) + } else if (assoc == 0xF) { assoc = 0xFFFF; + } info_p->cpuid_cache_L2_associativity = assoc; - info_p->cpuid_cache_size = bitfield32(reg[ecx],31,16); + info_p->cpuid_cache_size = bitfield32(reg[ecx], 31, 16); cpuid_fn(0x80000008, reg); info_p->cpuid_address_bits_physical = - bitfield32(reg[eax], 7, 0); + bitfield32(reg[eax], 7, 0); info_p->cpuid_address_bits_virtual = - bitfield32(reg[eax],15, 8); + bitfield32(reg[eax], 15, 8); } /* @@ -563,36 +634,39 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) wrmsr64(MSR_IA32_BIOS_SIGN_ID, 0); cpuid_fn(1, reg); info_p->cpuid_microcode_version = - (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32); + (uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32); info_p->cpuid_signature = reg[eax]; - info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0); - info_p->cpuid_model = bitfield32(reg[eax], 7, 4); - info_p->cpuid_family = bitfield32(reg[eax], 11, 8); + info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0); + info_p->cpuid_model = bitfield32(reg[eax], 7, 4); + info_p->cpuid_family = bitfield32(reg[eax], 11, 8); info_p->cpuid_type = bitfield32(reg[eax], 13, 12); info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16); info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20); - info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0); + info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0); info_p->cpuid_features = quad(reg[ecx], reg[edx]); /* Get "processor flag"; necessary for microcode update matching */ - info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID)>> 50) & 0x7; + info_p->cpuid_processor_flag = (rdmsr64(MSR_IA32_PLATFORM_ID) >> 50) & 0x7; /* Fold extensions into family/model */ - if (info_p->cpuid_family == 0x0f) + if (info_p->cpuid_family == 0x0f) { info_p->cpuid_family += info_p->cpuid_extfamily; - if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) + } + if (info_p->cpuid_family == 0x0f || info_p->cpuid_family == 0x06) { info_p->cpuid_model += (info_p->cpuid_extmodel << 4); + } - if (info_p->cpuid_features & CPUID_FEATURE_HTT) + if (info_p->cpuid_features & CPUID_FEATURE_HTT) { info_p->cpuid_logical_per_package = - bitfield32(reg[ebx], 23, 16); - else + bitfield32(reg[ebx], 23, 16); + } else { info_p->cpuid_logical_per_package = 1; + } if (info_p->cpuid_max_ext >= 0x80000001) { cpuid_fn(0x80000001, reg); info_p->cpuid_extfeatures = - quad(reg[ecx], reg[edx]); + quad(reg[ecx], reg[edx]); } DBG(" max_basic : %d\n", info_p->cpuid_max_basic); @@ -614,15 +688,15 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) /* Fold in the Invariant TSC feature bit, if present */ if (info_p->cpuid_max_ext >= 0x80000007) { - cpuid_fn(0x80000007, reg); + cpuid_fn(0x80000007, reg); info_p->cpuid_extfeatures |= - reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI; + reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI; DBG(" extfeatures : 0x%016llx\n", info_p->cpuid_extfeatures); } if (info_p->cpuid_max_basic >= 0x5) { - cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf; + cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf; /* * Extract the Monitor/Mwait Leaf info: @@ -642,22 +716,22 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) } if (info_p->cpuid_max_basic >= 0x6) { - cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf; + cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf; /* * The thermal and Power Leaf: */ cpuid_fn(6, reg); - ctp->sensor = bitfield32(reg[eax], 0, 0); + ctp->sensor = bitfield32(reg[eax], 0, 0); ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1); ctp->invariant_APIC_timer = bitfield32(reg[eax], 2, 2); ctp->core_power_limits = bitfield32(reg[eax], 4, 4); ctp->fine_grain_clock_mod = bitfield32(reg[eax], 5, 5); ctp->package_thermal_intr = bitfield32(reg[eax], 6, 6); - ctp->thresholds = bitfield32(reg[ebx], 3, 0); - ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0); - ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1); - ctp->energy_policy = bitfield32(reg[ecx], 3, 3); + ctp->thresholds = bitfield32(reg[ebx], 3, 0); + ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0); + ctp->hardware_feedback = bitfield32(reg[ecx], 1, 1); + ctp->energy_policy = bitfield32(reg[ecx], 3, 3); info_p->cpuid_thermal_leafp = ctp; DBG(" Thermal/Power Leaf:\n"); @@ -674,19 +748,19 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) } if (info_p->cpuid_max_basic >= 0xa) { - cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf; + cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf; /* * Architectural Performance Monitoring Leaf: */ cpuid_fn(0xa, reg); - capp->version = bitfield32(reg[eax], 7, 0); - capp->number = bitfield32(reg[eax], 15, 8); - capp->width = bitfield32(reg[eax], 23, 16); + capp->version = bitfield32(reg[eax], 7, 0); + capp->number = bitfield32(reg[eax], 15, 8); + capp->width = bitfield32(reg[eax], 23, 16); capp->events_number = bitfield32(reg[eax], 31, 24); - capp->events = reg[ebx]; - capp->fixed_number = bitfield32(reg[edx], 4, 0); - capp->fixed_width = bitfield32(reg[edx], 12, 5); + capp->events = reg[ebx]; + capp->fixed_number = bitfield32(reg[edx], 4, 0); + capp->fixed_width = bitfield32(reg[edx], 12, 5); info_p->cpuid_arch_perf_leafp = capp; DBG(" Architectural Performance Monitoring Leaf:\n"); @@ -700,7 +774,7 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) } if (info_p->cpuid_max_basic >= 0xd) { - cpuid_xsave_leaf_t *xsp; + cpuid_xsave_leaf_t *xsp; /* * XSAVE Features: */ @@ -724,7 +798,6 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) DBG(" EBX : 0x%x\n", xsp->extended_state[ebx]); DBG(" ECX : 0x%x\n", xsp->extended_state[ecx]); DBG(" EDX : 0x%x\n", xsp->extended_state[edx]); - } if (info_p->cpuid_model >= CPUID_MODEL_IVYBRIDGE) { @@ -733,10 +806,12 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p) */ cpuid_fn(0x7, reg); info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]); + info_p->cpuid_leaf7_extfeatures = reg[edx]; DBG(" Feature Leaf7:\n"); DBG(" EBX : 0x%x\n", reg[ebx]); DBG(" ECX : 0x%x\n", reg[ecx]); + DBG(" EDX : 0x%x\n", reg[edx]); } if (info_p->cpuid_max_basic >= 0x15) { @@ -802,10 +877,10 @@ cpuid_set_cpufamily(i386_cpu_info_t *info_p) #endif cpufamily = CPUFAMILY_INTEL_SKYLAKE; break; - case CPUID_MODEL_KABYLAKE: - case CPUID_MODEL_KABYLAKE_DT: - cpufamily = CPUFAMILY_INTEL_KABYLAKE; - break; + case CPUID_MODEL_KABYLAKE: + case CPUID_MODEL_KABYLAKE_DT: + cpufamily = CPUFAMILY_INTEL_KABYLAKE; + break; } break; } @@ -821,22 +896,23 @@ cpuid_set_cpufamily(i386_cpu_info_t *info_p) void cpuid_set_info(void) { - i386_cpu_info_t *info_p = &cpuid_cpu_info; - boolean_t enable_x86_64h = TRUE; + i386_cpu_info_t *info_p = &cpuid_cpu_info; + boolean_t enable_x86_64h = TRUE; cpuid_set_generic_info(info_p); /* verify we are running on a supported CPU */ if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor, - min(strlen(CPUID_STRING_UNKNOWN) + 1, - sizeof(info_p->cpuid_vendor)))) || - (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) + min(strlen(CPUID_STRING_UNKNOWN) + 1, + sizeof(info_p->cpuid_vendor)))) || + (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN)) { panic("Unsupported CPU"); + } info_p->cpuid_cpu_type = CPU_TYPE_X86; if (!PE_parse_boot_argn("-enable_x86_64h", &enable_x86_64h, sizeof(enable_x86_64h))) { - boolean_t disable_x86_64h = FALSE; + boolean_t disable_x86_64h = FALSE; if (PE_parse_boot_argn("-disable_x86_64h", &disable_x86_64h, sizeof(disable_x86_64h))) { enable_x86_64h = FALSE; @@ -853,8 +929,9 @@ cpuid_set_info(void) } /* cpuid_set_cache_info must be invoked after set_generic_info */ - if (info_p->cpuid_cpufamily == CPUFAMILY_INTEL_PENRYN) + if (info_p->cpuid_cpufamily == CPUFAMILY_INTEL_PENRYN) { cpuid_set_cache_info(info_p); + } /* * Find the number of enabled cores and threads @@ -868,26 +945,28 @@ cpuid_set_info(void) case CPUFAMILY_INTEL_WESTMERE: { uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT); info_p->core_count = bitfield32((uint32_t)msr, 19, 16); - info_p->thread_count = bitfield32((uint32_t)msr, 15, 0); + info_p->thread_count = bitfield32((uint32_t)msr, 15, 0); break; - } + } default: { uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT); - if (msr == 0) + if (msr == 0) { /* Provide a non-zero default for some VMMs */ msr = (1 << 16) + 1; + } info_p->core_count = bitfield32((uint32_t)msr, 31, 16); - info_p->thread_count = bitfield32((uint32_t)msr, 15, 0); + info_p->thread_count = bitfield32((uint32_t)msr, 15, 0); break; - } + } } if (info_p->core_count == 0) { info_p->core_count = info_p->cpuid_cores_per_package; info_p->thread_count = info_p->cpuid_logical_per_package; } - if (info_p->cpuid_cpufamily != CPUFAMILY_INTEL_PENRYN) + if (info_p->cpuid_cpufamily != CPUFAMILY_INTEL_PENRYN) { cpuid_set_cache_info(info_p); + } DBG("cpuid_set_info():\n"); DBG(" core_count : %d\n", info_p->core_count); @@ -896,135 +975,164 @@ cpuid_set_info(void) DBG(" cpu_subtype: 0x%08x\n", info_p->cpuid_cpu_subtype); info_p->cpuid_model_string = ""; /* deprecated */ + + do_cwas(info_p, FALSE); } static struct table { - uint64_t mask; - const char *name; + uint64_t mask; + const char *name; } feature_map[] = { - {CPUID_FEATURE_FPU, "FPU"}, - {CPUID_FEATURE_VME, "VME"}, - {CPUID_FEATURE_DE, "DE"}, - {CPUID_FEATURE_PSE, "PSE"}, - {CPUID_FEATURE_TSC, "TSC"}, - {CPUID_FEATURE_MSR, "MSR"}, - {CPUID_FEATURE_PAE, "PAE"}, - {CPUID_FEATURE_MCE, "MCE"}, - {CPUID_FEATURE_CX8, "CX8"}, - {CPUID_FEATURE_APIC, "APIC"}, - {CPUID_FEATURE_SEP, "SEP"}, - {CPUID_FEATURE_MTRR, "MTRR"}, - {CPUID_FEATURE_PGE, "PGE"}, - {CPUID_FEATURE_MCA, "MCA"}, - {CPUID_FEATURE_CMOV, "CMOV"}, - {CPUID_FEATURE_PAT, "PAT"}, - {CPUID_FEATURE_PSE36, "PSE36"}, - {CPUID_FEATURE_PSN, "PSN"}, - {CPUID_FEATURE_CLFSH, "CLFSH"}, - {CPUID_FEATURE_DS, "DS"}, - {CPUID_FEATURE_ACPI, "ACPI"}, - {CPUID_FEATURE_MMX, "MMX"}, - {CPUID_FEATURE_FXSR, "FXSR"}, - {CPUID_FEATURE_SSE, "SSE"}, - {CPUID_FEATURE_SSE2, "SSE2"}, - {CPUID_FEATURE_SS, "SS"}, - {CPUID_FEATURE_HTT, "HTT"}, - {CPUID_FEATURE_TM, "TM"}, - {CPUID_FEATURE_PBE, "PBE"}, - {CPUID_FEATURE_SSE3, "SSE3"}, + {CPUID_FEATURE_FPU, "FPU"}, + {CPUID_FEATURE_VME, "VME"}, + {CPUID_FEATURE_DE, "DE"}, + {CPUID_FEATURE_PSE, "PSE"}, + {CPUID_FEATURE_TSC, "TSC"}, + {CPUID_FEATURE_MSR, "MSR"}, + {CPUID_FEATURE_PAE, "PAE"}, + {CPUID_FEATURE_MCE, "MCE"}, + {CPUID_FEATURE_CX8, "CX8"}, + {CPUID_FEATURE_APIC, "APIC"}, + {CPUID_FEATURE_SEP, "SEP"}, + {CPUID_FEATURE_MTRR, "MTRR"}, + {CPUID_FEATURE_PGE, "PGE"}, + {CPUID_FEATURE_MCA, "MCA"}, + {CPUID_FEATURE_CMOV, "CMOV"}, + {CPUID_FEATURE_PAT, "PAT"}, + {CPUID_FEATURE_PSE36, "PSE36"}, + {CPUID_FEATURE_PSN, "PSN"}, + {CPUID_FEATURE_CLFSH, "CLFSH"}, + {CPUID_FEATURE_DS, "DS"}, + {CPUID_FEATURE_ACPI, "ACPI"}, + {CPUID_FEATURE_MMX, "MMX"}, + {CPUID_FEATURE_FXSR, "FXSR"}, + {CPUID_FEATURE_SSE, "SSE"}, + {CPUID_FEATURE_SSE2, "SSE2"}, + {CPUID_FEATURE_SS, "SS"}, + {CPUID_FEATURE_HTT, "HTT"}, + {CPUID_FEATURE_TM, "TM"}, + {CPUID_FEATURE_PBE, "PBE"}, + {CPUID_FEATURE_SSE3, "SSE3"}, {CPUID_FEATURE_PCLMULQDQ, "PCLMULQDQ"}, - {CPUID_FEATURE_DTES64, "DTES64"}, - {CPUID_FEATURE_MONITOR, "MON"}, - {CPUID_FEATURE_DSCPL, "DSCPL"}, - {CPUID_FEATURE_VMX, "VMX"}, - {CPUID_FEATURE_SMX, "SMX"}, - {CPUID_FEATURE_EST, "EST"}, - {CPUID_FEATURE_TM2, "TM2"}, - {CPUID_FEATURE_SSSE3, "SSSE3"}, - {CPUID_FEATURE_CID, "CID"}, - {CPUID_FEATURE_FMA, "FMA"}, - {CPUID_FEATURE_CX16, "CX16"}, - {CPUID_FEATURE_xTPR, "TPR"}, - {CPUID_FEATURE_PDCM, "PDCM"}, - {CPUID_FEATURE_SSE4_1, "SSE4.1"}, - {CPUID_FEATURE_SSE4_2, "SSE4.2"}, - {CPUID_FEATURE_x2APIC, "x2APIC"}, - {CPUID_FEATURE_MOVBE, "MOVBE"}, - {CPUID_FEATURE_POPCNT, "POPCNT"}, - {CPUID_FEATURE_AES, "AES"}, - {CPUID_FEATURE_VMM, "VMM"}, - {CPUID_FEATURE_PCID, "PCID"}, - {CPUID_FEATURE_XSAVE, "XSAVE"}, - {CPUID_FEATURE_OSXSAVE, "OSXSAVE"}, - {CPUID_FEATURE_SEGLIM64, "SEGLIM64"}, - {CPUID_FEATURE_TSCTMR, "TSCTMR"}, - {CPUID_FEATURE_AVX1_0, "AVX1.0"}, - {CPUID_FEATURE_RDRAND, "RDRAND"}, - {CPUID_FEATURE_F16C, "F16C"}, + {CPUID_FEATURE_DTES64, "DTES64"}, + {CPUID_FEATURE_MONITOR, "MON"}, + {CPUID_FEATURE_DSCPL, "DSCPL"}, + {CPUID_FEATURE_VMX, "VMX"}, + {CPUID_FEATURE_SMX, "SMX"}, + {CPUID_FEATURE_EST, "EST"}, + {CPUID_FEATURE_TM2, "TM2"}, + {CPUID_FEATURE_SSSE3, "SSSE3"}, + {CPUID_FEATURE_CID, "CID"}, + {CPUID_FEATURE_FMA, "FMA"}, + {CPUID_FEATURE_CX16, "CX16"}, + {CPUID_FEATURE_xTPR, "TPR"}, + {CPUID_FEATURE_PDCM, "PDCM"}, + {CPUID_FEATURE_SSE4_1, "SSE4.1"}, + {CPUID_FEATURE_SSE4_2, "SSE4.2"}, + {CPUID_FEATURE_x2APIC, "x2APIC"}, + {CPUID_FEATURE_MOVBE, "MOVBE"}, + {CPUID_FEATURE_POPCNT, "POPCNT"}, + {CPUID_FEATURE_AES, "AES"}, + {CPUID_FEATURE_VMM, "VMM"}, + {CPUID_FEATURE_PCID, "PCID"}, + {CPUID_FEATURE_XSAVE, "XSAVE"}, + {CPUID_FEATURE_OSXSAVE, "OSXSAVE"}, + {CPUID_FEATURE_SEGLIM64, "SEGLIM64"}, + {CPUID_FEATURE_TSCTMR, "TSCTMR"}, + {CPUID_FEATURE_AVX1_0, "AVX1.0"}, + {CPUID_FEATURE_RDRAND, "RDRAND"}, + {CPUID_FEATURE_F16C, "F16C"}, {0, 0} }, -extfeature_map[] = { + extfeature_map[] = { {CPUID_EXTFEATURE_SYSCALL, "SYSCALL"}, - {CPUID_EXTFEATURE_XD, "XD"}, + {CPUID_EXTFEATURE_XD, "XD"}, {CPUID_EXTFEATURE_1GBPAGE, "1GBPAGE"}, - {CPUID_EXTFEATURE_EM64T, "EM64T"}, - {CPUID_EXTFEATURE_LAHF, "LAHF"}, - {CPUID_EXTFEATURE_LZCNT, "LZCNT"}, + {CPUID_EXTFEATURE_EM64T, "EM64T"}, + {CPUID_EXTFEATURE_LAHF, "LAHF"}, + {CPUID_EXTFEATURE_LZCNT, "LZCNT"}, {CPUID_EXTFEATURE_PREFETCHW, "PREFETCHW"}, - {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"}, - {CPUID_EXTFEATURE_TSCI, "TSCI"}, + {CPUID_EXTFEATURE_RDTSCP, "RDTSCP"}, + {CPUID_EXTFEATURE_TSCI, "TSCI"}, {0, 0} - }, -leaf7_feature_map[] = { - {CPUID_LEAF7_FEATURE_SMEP, "SMEP"}, - {CPUID_LEAF7_FEATURE_ERMS, "ERMS"}, + leaf7_feature_map[] = { {CPUID_LEAF7_FEATURE_RDWRFSGS, "RDWRFSGS"}, - {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"}, - {CPUID_LEAF7_FEATURE_BMI1, "BMI1"}, - {CPUID_LEAF7_FEATURE_HLE, "HLE"}, - {CPUID_LEAF7_FEATURE_AVX2, "AVX2"}, - {CPUID_LEAF7_FEATURE_BMI2, "BMI2"}, - {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"}, - {CPUID_LEAF7_FEATURE_RTM, "RTM"}, - {CPUID_LEAF7_FEATURE_SMAP, "SMAP"}, - {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"}, - {CPUID_LEAF7_FEATURE_ADX, "ADX"}, - {CPUID_LEAF7_FEATURE_IPT, "IPT"}, -#if !defined(RC_HIDE_XNU_J137) - {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"}, - {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"}, + {CPUID_LEAF7_FEATURE_TSCOFF, "TSC_THREAD_OFFSET"}, + {CPUID_LEAF7_FEATURE_SGX, "SGX"}, + {CPUID_LEAF7_FEATURE_BMI1, "BMI1"}, + {CPUID_LEAF7_FEATURE_HLE, "HLE"}, + {CPUID_LEAF7_FEATURE_AVX2, "AVX2"}, + {CPUID_LEAF7_FEATURE_FDPEO, "FDPEO"}, + {CPUID_LEAF7_FEATURE_SMEP, "SMEP"}, + {CPUID_LEAF7_FEATURE_BMI2, "BMI2"}, + {CPUID_LEAF7_FEATURE_ERMS, "ERMS"}, + {CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"}, + {CPUID_LEAF7_FEATURE_RTM, "RTM"}, + {CPUID_LEAF7_FEATURE_PQM, "PQM"}, + {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"}, + {CPUID_LEAF7_FEATURE_MPX, "MPX"}, + {CPUID_LEAF7_FEATURE_PQE, "PQE"}, + {CPUID_LEAF7_FEATURE_AVX512F, "AVX512F"}, {CPUID_LEAF7_FEATURE_AVX512DQ, "AVX512DQ"}, + {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"}, + {CPUID_LEAF7_FEATURE_ADX, "ADX"}, + {CPUID_LEAF7_FEATURE_SMAP, "SMAP"}, + {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"}, + {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"}, + {CPUID_LEAF7_FEATURE_CLWB, "CLWB"}, + {CPUID_LEAF7_FEATURE_IPT, "IPT"}, + {CPUID_LEAF7_FEATURE_AVX512CD, "AVX512CD"}, + {CPUID_LEAF7_FEATURE_SHA, "SHA"}, {CPUID_LEAF7_FEATURE_AVX512BW, "AVX512BW"}, {CPUID_LEAF7_FEATURE_AVX512VL, "AVX512VL"}, - {CPUID_LEAF7_FEATURE_AVX512IFMA, "AVX512IFMA"}, + {CPUID_LEAF7_FEATURE_PREFETCHWT1, "PREFETCHWT1"}, {CPUID_LEAF7_FEATURE_AVX512VBMI, "AVX512VBMI"}, -#endif /* not RC_HIDE_XNU_J137 */ - {CPUID_LEAF7_FEATURE_SGX, "SGX"}, - {CPUID_LEAF7_FEATURE_PQM, "PQM"}, - {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"}, - {CPUID_LEAF7_FEATURE_MPX, "MPX"}, - {CPUID_LEAF7_FEATURE_PQE, "PQE"}, - {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"}, - {CPUID_LEAF7_FEATURE_SHA, "SHA"}, + {CPUID_LEAF7_FEATURE_UMIP, "UMIP"}, + {CPUID_LEAF7_FEATURE_PKU, "PKU"}, + {CPUID_LEAF7_FEATURE_OSPKE, "OSPKE"}, + {CPUID_LEAF7_FEATURE_WAITPKG, "WAITPKG"}, + {CPUID_LEAF7_FEATURE_GFNI, "GFNI"}, + {CPUID_LEAF7_FEATURE_AVX512VPCDQ, "AVX512VPCDQ"}, + {CPUID_LEAF7_FEATURE_RDPID, "RDPID"}, + {CPUID_LEAF7_FEATURE_CLDEMOTE, "CLDEMOTE"}, + {CPUID_LEAF7_FEATURE_MOVDIRI, "MOVDIRI"}, + {CPUID_LEAF7_FEATURE_MOVDIRI64B, "MOVDIRI64B"}, + {CPUID_LEAF7_FEATURE_SGXLC, "SGXLC"}, + {0, 0} +}, + leaf7_extfeature_map[] = { + { CPUID_LEAF7_EXTFEATURE_AVX5124VNNIW, "AVX5124VNNIW" }, + { CPUID_LEAF7_EXTFEATURE_AVX5124FMAPS, "AVX5124FMAPS" }, + { CPUID_LEAF7_EXTFEATURE_MDCLEAR, "MDCLEAR" }, + { CPUID_LEAF7_EXTFEATURE_TSXFA, "TSXFA" }, + { CPUID_LEAF7_EXTFEATURE_IBRS, "IBRS" }, + { CPUID_LEAF7_EXTFEATURE_STIBP, "STIBP" }, + { CPUID_LEAF7_EXTFEATURE_L1DF, "L1DF" }, + { CPUID_LEAF7_EXTFEATURE_ACAPMSR, "ACAPMSR" }, + { CPUID_LEAF7_EXTFEATURE_CCAPMSR, "CCAPMSR" }, + { CPUID_LEAF7_EXTFEATURE_SSBD, "SSBD" }, {0, 0} }; static char * cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len) { - size_t len = 0; - char *p = buf; - int i; + size_t len = 0; + char *p = buf; + int i; for (i = 0; map[i].mask != 0; i++) { - if ((bits & map[i].mask) == 0) + if ((bits & map[i].mask) == 0) { continue; - if (len && ((size_t) (p - buf) < (buf_len - 1))) + } + if (len && ((size_t) (p - buf) < (buf_len - 1))) { *p++ = ' '; - len = min(strlen(map[i].name), (size_t)((buf_len-1)-(p-buf))); - if (len == 0) + } + len = min(strlen(map[i].name), (size_t)((buf_len - 1) - (p - buf))); + if (len == 0) { break; + } bcopy(map[i].name, p, len); p += len; } @@ -1032,7 +1140,7 @@ cpuid_get_names(struct table *map, uint64_t bits, char *buf, unsigned buf_len) return buf; } -i386_cpu_info_t * +i386_cpu_info_t * cpuid_info(void) { /* Set-up the cpuid_info stucture lazily */ @@ -1047,62 +1155,73 @@ cpuid_info(void) char * cpuid_get_feature_names(uint64_t features, char *buf, unsigned buf_len) { - return cpuid_get_names(feature_map, features, buf, buf_len); + return cpuid_get_names(feature_map, features, buf, buf_len); } char * cpuid_get_extfeature_names(uint64_t extfeatures, char *buf, unsigned buf_len) { - return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len); + return cpuid_get_names(extfeature_map, extfeatures, buf, buf_len); } char * cpuid_get_leaf7_feature_names(uint64_t features, char *buf, unsigned buf_len) { - return cpuid_get_names(leaf7_feature_map, features, buf, buf_len); + return cpuid_get_names(leaf7_feature_map, features, buf, buf_len); +} + +char * +cpuid_get_leaf7_extfeature_names(uint64_t features, char *buf, unsigned buf_len) +{ + return cpuid_get_names(leaf7_extfeature_map, features, buf, buf_len); } void cpuid_feature_display( - const char *header) + const char *header) { - char buf[320]; + char buf[320]; kprintf("%s: %s", header, - cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf))); - if (cpuid_leaf7_features()) + cpuid_get_feature_names(cpuid_features(), buf, sizeof(buf))); + if (cpuid_leaf7_features()) { kprintf(" %s", cpuid_get_leaf7_feature_names( - cpuid_leaf7_features(), buf, sizeof(buf))); + cpuid_leaf7_features(), buf, sizeof(buf))); + } + if (cpuid_leaf7_extfeatures()) { + kprintf(" %s", cpuid_get_leaf7_extfeature_names( + cpuid_leaf7_extfeatures(), buf, sizeof(buf))); + } kprintf("\n"); if (cpuid_features() & CPUID_FEATURE_HTT) { -#define s_if_plural(n) ((n > 1) ? "s" : "") +#define s_if_plural(n) ((n > 1) ? "s" : "") kprintf(" HTT: %d core%s per package;" - " %d logical cpu%s per package\n", - cpuid_cpu_infop->cpuid_cores_per_package, - s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package), - cpuid_cpu_infop->cpuid_logical_per_package, - s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package)); + " %d logical cpu%s per package\n", + cpuid_cpu_infop->cpuid_cores_per_package, + s_if_plural(cpuid_cpu_infop->cpuid_cores_per_package), + cpuid_cpu_infop->cpuid_logical_per_package, + s_if_plural(cpuid_cpu_infop->cpuid_logical_per_package)); } } void cpuid_extfeature_display( - const char *header) + const char *header) { - char buf[256]; + char buf[256]; kprintf("%s: %s\n", header, - cpuid_get_extfeature_names(cpuid_extfeatures(), - buf, sizeof(buf))); + cpuid_get_extfeature_names(cpuid_extfeatures(), + buf, sizeof(buf))); } void cpuid_cpu_display( - const char *header) + const char *header) { - if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') { - kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string); - } + if (cpuid_cpu_infop->cpuid_brand_string[0] != '\0') { + kprintf("%s: %s\n", header, cpuid_cpu_infop->cpuid_brand_string); + } } unsigned int @@ -1133,22 +1252,22 @@ uint64_t cpuid_features(void) { static int checked = 0; - char fpu_arg[20] = { 0 }; + char fpu_arg[20] = { 0 }; (void) cpuid_info(); if (!checked) { - /* check for boot-time fpu limitations */ - if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof (fpu_arg))) { - printf("limiting fpu features to: %s\n", fpu_arg); - if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) { - printf("no sse or sse2\n"); - cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR); - } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) { - printf("no sse2\n"); - cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2); - } + /* check for boot-time fpu limitations */ + if (PE_parse_boot_argn("_fpu", &fpu_arg[0], sizeof(fpu_arg))) { + printf("limiting fpu features to: %s\n", fpu_arg); + if (!strncmp("387", fpu_arg, sizeof("387")) || !strncmp("mmx", fpu_arg, sizeof("mmx"))) { + printf("no sse or sse2\n"); + cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE | CPUID_FEATURE_SSE2 | CPUID_FEATURE_FXSR); + } else if (!strncmp("sse", fpu_arg, sizeof("sse"))) { + printf("no sse2\n"); + cpuid_cpu_infop->cpuid_features &= ~(CPUID_FEATURE_SSE2); } - checked = 1; + } + checked = 1; } return cpuid_cpu_infop->cpuid_features; } @@ -1158,26 +1277,33 @@ cpuid_extfeatures(void) { return cpuid_info()->cpuid_extfeatures; } - + uint64_t cpuid_leaf7_features(void) { return cpuid_info()->cpuid_leaf7_features; } -static i386_vmm_info_t *_cpuid_vmm_infop = NULL; -static i386_vmm_info_t _cpuid_vmm_info; +uint64_t +cpuid_leaf7_extfeatures(void) +{ + return cpuid_info()->cpuid_leaf7_extfeatures; +} + +static i386_vmm_info_t *_cpuid_vmm_infop = NULL; +static i386_vmm_info_t _cpuid_vmm_info; static void cpuid_init_vmm_info(i386_vmm_info_t *info_p) { - uint32_t reg[4]; - uint32_t max_vmm_leaf; + uint32_t reg[4]; + uint32_t max_vmm_leaf; bzero(info_p, sizeof(*info_p)); - if (!cpuid_vmm_present()) + if (!cpuid_vmm_present()) { return; + } DBG("cpuid_init_vmm_info(%p)\n", info_p); @@ -1202,7 +1328,7 @@ cpuid_init_vmm_info(i386_vmm_info_t *info_p) /* VMM generic leaves: https://lkml.org/lkml/2008/10/1/246 */ if (max_vmm_leaf >= 0x40000010) { cpuid_fn(0x40000010, reg); - + info_p->cpuid_vmm_tsc_frequency = reg[eax]; info_p->cpuid_vmm_bus_frequency = reg[ebx]; } @@ -1235,3 +1361,71 @@ cpuid_vmm_family(void) return cpuid_vmm_info()->cpuid_vmm_family; } +cwa_classifier_e +cpuid_wa_required(cpu_wa_e wa) +{ + static uint64_t bootarg_cpu_wa_enables = 0; + static uint64_t bootarg_cpu_wa_disables = 0; + static int bootargs_overrides_processed = 0; + i386_cpu_info_t *info_p = &cpuid_cpu_info; + + if (!bootargs_overrides_processed) { + if (!PE_parse_boot_argn("cwae", &bootarg_cpu_wa_enables, sizeof(bootarg_cpu_wa_enables))) { + bootarg_cpu_wa_enables = 0; + } + + if (!PE_parse_boot_argn("cwad", &bootarg_cpu_wa_disables, sizeof(bootarg_cpu_wa_disables))) { + bootarg_cpu_wa_disables = 0; + } + bootargs_overrides_processed = 1; + } + + if (bootarg_cpu_wa_enables & (1 << wa)) { + return CWA_FORCE_ON; + } + + if (bootarg_cpu_wa_disables & (1 << wa)) { + return CWA_FORCE_OFF; + } + + switch (wa) { + case CPU_INTEL_SEGCHK: + /* First, check to see if this CPU requires the workaround */ + if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_ACAPMSR) != 0) { + /* We have ARCHCAP, so check it for either RDCL_NO or MDS_NO */ + uint64_t archcap_msr = rdmsr64(MSR_IA32_ARCH_CAPABILITIES); + if ((archcap_msr & (MSR_IA32_ARCH_CAPABILITIES_RDCL_NO | MSR_IA32_ARCH_CAPABILITIES_MDS_NO)) != 0) { + /* Workaround not needed */ + return CWA_OFF; + } + } + + if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_MDCLEAR) != 0) { + return CWA_ON; + } + + /* + * If the CPU supports the ARCHCAP MSR and neither the RDCL_NO bit nor the MDS_NO + * bit are set, OR the CPU does not support the ARCHCAP MSR and the CPU does + * not enumerate the presence of the enhanced VERW instruction, report + * that the workaround should not be enabled. + */ + break; + + case CPU_INTEL_TSXFA: + /* + * If this CPU supports RTM and supports FORCE_ABORT, return that + * the workaround should be enabled. + */ + if ((info_p->cpuid_leaf7_extfeatures & CPUID_LEAF7_EXTFEATURE_TSXFA) != 0 && + (info_p->cpuid_leaf7_features & CPUID_LEAF7_FEATURE_RTM) != 0) { + return CWA_ON; + } + break; + + default: + break; + } + + return CWA_OFF; +} diff --git a/osfmk/i386/cpuid.h b/osfmk/i386/cpuid.h index f8e8e24c2..3af0e20ef 100644 --- a/osfmk/i386/cpuid.h +++ b/osfmk/i386/cpuid.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,22 +39,28 @@ #include +#if defined(MACH_KERNEL_PRIVATE) && !defined(ASSEMBLER) +#include +#include +#include +#endif + #ifdef __APPLE_API_PRIVATE -#define CPUID_VID_INTEL "GenuineIntel" -#define CPUID_VID_AMD "AuthenticAMD" +#define CPUID_VID_INTEL "GenuineIntel" +#define CPUID_VID_AMD "AuthenticAMD" -#define CPUID_VMM_ID_VMWARE "VMwareVMware" -#define CPUID_VMM_ID_PARALLELS "Parallels\0\0\0" +#define CPUID_VMM_ID_VMWARE "VMwareVMware" +#define CPUID_VMM_ID_PARALLELS "Parallels\0\0\0" #define CPUID_STRING_UNKNOWN "Unknown CPU Typ" -#define _Bit(n) (1ULL << n) -#define _HBit(n) (1ULL << ((n)+32)) +#define _Bit(n) (1ULL << n) +#define _HBit(n) (1ULL << ((n)+32)) /* * The CPUID_FEATURE_XXX values define 64-bit values - * returned in %ecx:%edx to a CPUID request with %eax of 1: + * returned in %ecx:%edx to a CPUID request with %eax of 1: */ #define CPUID_FEATURE_FPU _Bit(0) /* Floating point unit on-chip */ #define CPUID_FEATURE_VME _Bit(1) /* Virtual Mode Extension */ @@ -85,7 +91,7 @@ #define CPUID_FEATURE_HTT _Bit(28) /* Hyper-Threading Technology */ #define CPUID_FEATURE_TM _Bit(29) /* Thermal Monitor (TM1) */ #define CPUID_FEATURE_PBE _Bit(31) /* Pend Break Enable */ - + #define CPUID_FEATURE_SSE3 _HBit(0) /* Streaming SIMD extensions 3 */ #define CPUID_FEATURE_PCLMULQDQ _HBit(1) /* PCLMULQDQ instruction */ #define CPUID_FEATURE_DTES64 _HBit(2) /* 64-bit DS layout */ @@ -114,70 +120,93 @@ #define CPUID_FEATURE_AES _HBit(25) /* AES instructions */ #define CPUID_FEATURE_XSAVE _HBit(26) /* XSAVE instructions */ #define CPUID_FEATURE_OSXSAVE _HBit(27) /* XGETBV/XSETBV instructions */ -#define CPUID_FEATURE_AVX1_0 _HBit(28) /* AVX 1.0 instructions */ -#define CPUID_FEATURE_F16C _HBit(29) /* Float16 convert instructions */ -#define CPUID_FEATURE_RDRAND _HBit(30) /* RDRAND instruction */ +#define CPUID_FEATURE_AVX1_0 _HBit(28) /* AVX 1.0 instructions */ +#define CPUID_FEATURE_F16C _HBit(29) /* Float16 convert instructions */ +#define CPUID_FEATURE_RDRAND _HBit(30) /* RDRAND instruction */ #define CPUID_FEATURE_VMM _HBit(31) /* VMM (Hypervisor) present */ /* * Leaf 7, subleaf 0 additional features. * Bits returned in %ebx:%ecx to a CPUID request with {%eax,%ecx} of (0x7,0x0}: */ -#define CPUID_LEAF7_FEATURE_RDWRFSGS _Bit(0) /* FS/GS base read/write */ -#define CPUID_LEAF7_FEATURE_TSCOFF _Bit(1) /* TSC thread offset */ -#define CPUID_LEAF7_FEATURE_BMI1 _Bit(3) /* Bit Manipulation Instrs, set 1 */ -#define CPUID_LEAF7_FEATURE_HLE _Bit(4) /* Hardware Lock Elision*/ -#define CPUID_LEAF7_FEATURE_AVX2 _Bit(5) /* AVX2 Instructions */ -#define CPUID_LEAF7_FEATURE_SMEP _Bit(7) /* Supervisor Mode Execute Protect */ -#define CPUID_LEAF7_FEATURE_BMI2 _Bit(8) /* Bit Manipulation Instrs, set 2 */ -#define CPUID_LEAF7_FEATURE_ERMS _Bit(9) /* Enhanced Rep Movsb/Stosb */ -#define CPUID_LEAF7_FEATURE_INVPCID _Bit(10) /* INVPCID intruction, TDB */ -#define CPUID_LEAF7_FEATURE_RTM _Bit(11) /* RTM */ -#define CPUID_LEAF7_FEATURE_RDSEED _Bit(18) /* RDSEED Instruction */ -#define CPUID_LEAF7_FEATURE_ADX _Bit(19) /* ADX Instructions */ -#define CPUID_LEAF7_FEATURE_SMAP _Bit(20) /* Supervisor Mode Access Protect */ -#define CPUID_LEAF7_FEATURE_SGX _Bit(2) /* Software Guard eXtensions */ -#define CPUID_LEAF7_FEATURE_PQM _Bit(12) /* Platform Qos Monitoring */ -#define CPUID_LEAF7_FEATURE_FPU_CSDS _Bit(13) /* FPU CS/DS deprecation */ -#define CPUID_LEAF7_FEATURE_MPX _Bit(14) /* Memory Protection eXtensions */ -#define CPUID_LEAF7_FEATURE_PQE _Bit(15) /* Platform Qos Enforcement */ -#define CPUID_LEAF7_FEATURE_CLFSOPT _Bit(23) /* CLFSOPT */ -#define CPUID_LEAF7_FEATURE_IPT _Bit(25) /* Intel Processor Trace */ -#define CPUID_LEAF7_FEATURE_SHA _Bit(29) /* SHA instructions */ -#if !defined(RC_HIDE_XNU_J137) -#define CPUID_LEAF7_FEATURE_AVX512F _Bit(16) /* AVX512F instructions */ -#define CPUID_LEAF7_FEATURE_AVX512DQ _Bit(17) /* AVX512DQ instructions */ -#define CPUID_LEAF7_FEATURE_AVX512IFMA _Bit(21) /* AVX512IFMA instructions */ -#define CPUID_LEAF7_FEATURE_AVX512CD _Bit(28) /* AVX512CD instructions */ -#define CPUID_LEAF7_FEATURE_AVX512BW _Bit(30) /* AVX512BW instructions */ -#define CPUID_LEAF7_FEATURE_AVX512VL _Bit(31) /* AVX512VL instructions */ -#endif /* not RC_HIDE_XNU_J137 */ +#define CPUID_LEAF7_FEATURE_RDWRFSGS _Bit(0) /* FS/GS base read/write */ +#define CPUID_LEAF7_FEATURE_TSCOFF _Bit(1) /* TSC thread offset */ +#define CPUID_LEAF7_FEATURE_SGX _Bit(2) /* Software Guard eXtensions */ +#define CPUID_LEAF7_FEATURE_BMI1 _Bit(3) /* Bit Manipulation Instrs, set 1 */ +#define CPUID_LEAF7_FEATURE_HLE _Bit(4) /* Hardware Lock Elision*/ +#define CPUID_LEAF7_FEATURE_AVX2 _Bit(5) /* AVX2 Instructions */ +#define CPUID_LEAF7_FEATURE_FDPEO _Bit(6) /* x87 FPU Data Pointer updated only on x87 exceptions */ +#define CPUID_LEAF7_FEATURE_SMEP _Bit(7) /* Supervisor Mode Execute Protect */ +#define CPUID_LEAF7_FEATURE_BMI2 _Bit(8) /* Bit Manipulation Instrs, set 2 */ +#define CPUID_LEAF7_FEATURE_ERMS _Bit(9) /* Enhanced Rep Movsb/Stosb */ +#define CPUID_LEAF7_FEATURE_INVPCID _Bit(10) /* INVPCID intruction, TDB */ +#define CPUID_LEAF7_FEATURE_RTM _Bit(11) /* RTM */ +#define CPUID_LEAF7_FEATURE_PQM _Bit(12) /* Platform Qos Monitoring */ +#define CPUID_LEAF7_FEATURE_FPU_CSDS _Bit(13) /* FPU CS/DS deprecation */ +#define CPUID_LEAF7_FEATURE_MPX _Bit(14) /* Memory Protection eXtensions */ +#define CPUID_LEAF7_FEATURE_PQE _Bit(15) /* Platform Qos Enforcement */ +#define CPUID_LEAF7_FEATURE_AVX512F _Bit(16) /* AVX512F instructions */ +#define CPUID_LEAF7_FEATURE_AVX512DQ _Bit(17) /* AVX512DQ instructions */ +#define CPUID_LEAF7_FEATURE_RDSEED _Bit(18) /* RDSEED Instruction */ +#define CPUID_LEAF7_FEATURE_ADX _Bit(19) /* ADX Instructions */ +#define CPUID_LEAF7_FEATURE_SMAP _Bit(20) /* Supervisor Mode Access Protect */ +#define CPUID_LEAF7_FEATURE_AVX512IFMA _Bit(21) /* AVX512IFMA instructions */ +#define CPUID_LEAF7_FEATURE_CLFSOPT _Bit(23) /* CLFSOPT */ +#define CPUID_LEAF7_FEATURE_CLWB _Bit(24) /* CLWB */ +#define CPUID_LEAF7_FEATURE_IPT _Bit(25) /* Intel Processor Trace */ +#define CPUID_LEAF7_FEATURE_AVX512CD _Bit(28) /* AVX512CD instructions */ +#define CPUID_LEAF7_FEATURE_SHA _Bit(29) /* SHA instructions */ +#define CPUID_LEAF7_FEATURE_AVX512BW _Bit(30) /* AVX512BW instructions */ +#define CPUID_LEAF7_FEATURE_AVX512VL _Bit(31) /* AVX512VL instructions */ #define CPUID_LEAF7_FEATURE_PREFETCHWT1 _HBit(0)/* Prefetch Write/T1 hint */ -#if !defined(RC_HIDE_XNU_J137) #define CPUID_LEAF7_FEATURE_AVX512VBMI _HBit(1)/* AVX512VBMI instructions */ -#endif /* not RC_HIDE_XNU_J137 */ +#define CPUID_LEAF7_FEATURE_UMIP _HBit(2) /* User Mode Instruction Prevention */ +#define CPUID_LEAF7_FEATURE_PKU _HBit(3) /* Protection Keys for Usermode */ +#define CPUID_LEAF7_FEATURE_OSPKE _HBit(4) /* OS has enabled PKE */ +#define CPUID_LEAF7_FEATURE_WAITPKG _HBit(5) /* WAITPKG instructions */ +#define CPUID_LEAF7_FEATURE_GFNI _HBit(8) /* Galois Field New Instructions */ +#define CPUID_LEAF7_FEATURE_AVX512VPCDQ _HBit(14) /* AVX512 VPOPCNTDQ instruction */ +#define CPUID_LEAF7_FEATURE_RDPID _HBit(22) /* RDPID and IA32_TSC_AUX */ +#define CPUID_LEAF7_FEATURE_CLDEMOTE _HBit(25) /* Cache line demote */ +#define CPUID_LEAF7_FEATURE_MOVDIRI _HBit(27) /* MOVDIRI instruction */ +#define CPUID_LEAF7_FEATURE_MOVDIRI64B _HBit(28) /* MOVDIRI64B instruction */ +#define CPUID_LEAF7_FEATURE_SGXLC _HBit(30) /* SGX Launch Configuration */ + +/* + * Values in EDX returned by CPUID Leaf 7, subleaf 0 + */ +#define CPUID_LEAF7_EXTFEATURE_AVX5124VNNIW _Bit(2) /* AVX512_4VNNIW */ +#define CPUID_LEAF7_EXTFEATURE_AVX5124FMAPS _Bit(3) /* AVX512_4FMAPS */ +#define CPUID_LEAF7_EXTFEATURE_MDCLEAR _Bit(10) /* Overloaded VERW / L1D_FLUSH */ +#define CPUID_LEAF7_EXTFEATURE_TSXFA _Bit(13) /* TSX RTM_FORCE_ABORT MSR */ +#define CPUID_LEAF7_EXTFEATURE_IBRS _Bit(26) /* IBRS / IBPB */ +#define CPUID_LEAF7_EXTFEATURE_STIBP _Bit(27) /* Single Thread Indirect Branch Predictors */ +#define CPUID_LEAF7_EXTFEATURE_L1DF _Bit(28) /* L1D_FLUSH MSR */ +#define CPUID_LEAF7_EXTFEATURE_ACAPMSR _Bit(29) /* ARCH_CAP MSR */ +#define CPUID_LEAF7_EXTFEATURE_CCAPMSR _Bit(30) /* CORE_CAP MSR */ +#define CPUID_LEAF7_EXTFEATURE_SSBD _Bit(31) /* Speculative Store Bypass Disable */ /* * The CPUID_EXTFEATURE_XXX values define 64-bit values - * returned in %ecx:%edx to a CPUID request with %eax of 0x80000001: + * returned in %ecx:%edx to a CPUID request with %eax of 0x80000001: */ -#define CPUID_EXTFEATURE_SYSCALL _Bit(11) /* SYSCALL/sysret */ -#define CPUID_EXTFEATURE_XD _Bit(20) /* eXecute Disable */ +#define CPUID_EXTFEATURE_SYSCALL _Bit(11) /* SYSCALL/sysret */ +#define CPUID_EXTFEATURE_XD _Bit(20) /* eXecute Disable */ -#define CPUID_EXTFEATURE_1GBPAGE _Bit(26) /* 1GB pages */ -#define CPUID_EXTFEATURE_RDTSCP _Bit(27) /* RDTSCP */ -#define CPUID_EXTFEATURE_EM64T _Bit(29) /* Extended Mem 64 Technology */ +#define CPUID_EXTFEATURE_1GBPAGE _Bit(26) /* 1GB pages */ +#define CPUID_EXTFEATURE_RDTSCP _Bit(27) /* RDTSCP */ +#define CPUID_EXTFEATURE_EM64T _Bit(29) /* Extended Mem 64 Technology */ -#define CPUID_EXTFEATURE_LAHF _HBit(0) /* LAFH/SAHF instructions */ -#define CPUID_EXTFEATURE_LZCNT _HBit(5) /* LZCNT instruction */ -#define CPUID_EXTFEATURE_PREFETCHW _HBit(8) /* PREFETCHW instruction */ +#define CPUID_EXTFEATURE_LAHF _HBit(0) /* LAFH/SAHF instructions */ +#define CPUID_EXTFEATURE_LZCNT _HBit(5) /* LZCNT instruction */ +#define CPUID_EXTFEATURE_PREFETCHW _HBit(8) /* PREFETCHW instruction */ /* * The CPUID_EXTFEATURE_XXX values define 64-bit values - * returned in %ecx:%edx to a CPUID request with %eax of 0x80000007: + * returned in %ecx:%edx to a CPUID request with %eax of 0x80000007: */ -#define CPUID_EXTFEATURE_TSCI _Bit(8) /* TSC Invariant */ +#define CPUID_EXTFEATURE_TSCI _Bit(8) /* TSC Invariant */ /* * CPUID_X86_64_H_FEATURE_SUBSET and CPUID_X86_64_H_LEAF7_FEATURE_SUBSET @@ -185,60 +214,63 @@ * is eligible to run the "x86_64h" "Haswell feature subset" slice. */ #define CPUID_X86_64_H_FEATURE_SUBSET ( CPUID_FEATURE_FMA | \ - CPUID_FEATURE_SSE4_2 | \ - CPUID_FEATURE_MOVBE | \ - CPUID_FEATURE_POPCNT | \ - CPUID_FEATURE_AVX1_0 \ - ) + CPUID_FEATURE_SSE4_2 | \ + CPUID_FEATURE_MOVBE | \ + CPUID_FEATURE_POPCNT | \ + CPUID_FEATURE_AVX1_0 \ + ) #define CPUID_X86_64_H_EXTFEATURE_SUBSET ( CPUID_EXTFEATURE_LZCNT \ - ) + ) #define CPUID_X86_64_H_LEAF7_FEATURE_SUBSET ( CPUID_LEAF7_FEATURE_BMI1 | \ - CPUID_LEAF7_FEATURE_AVX2 | \ - CPUID_LEAF7_FEATURE_BMI2 \ - ) - -#define CPUID_CACHE_SIZE 16 /* Number of descriptor values */ - -#define CPUID_MWAIT_EXTENSION _Bit(0) /* enumeration of WMAIT extensions */ -#define CPUID_MWAIT_BREAK _Bit(1) /* interrupts are break events */ - -#define CPUID_MODEL_PENRYN 0x17 -#define CPUID_MODEL_NEHALEM 0x1A -#define CPUID_MODEL_FIELDS 0x1E /* Lynnfield, Clarksfield */ -#define CPUID_MODEL_DALES 0x1F /* Havendale, Auburndale */ -#define CPUID_MODEL_NEHALEM_EX 0x2E -#define CPUID_MODEL_DALES_32NM 0x25 /* Clarkdale, Arrandale */ -#define CPUID_MODEL_WESTMERE 0x2C /* Gulftown, Westmere-EP/-WS */ -#define CPUID_MODEL_WESTMERE_EX 0x2F -#define CPUID_MODEL_SANDYBRIDGE 0x2A -#define CPUID_MODEL_JAKETOWN 0x2D -#define CPUID_MODEL_IVYBRIDGE 0x3A -#define CPUID_MODEL_IVYBRIDGE_EP 0x3E -#define CPUID_MODEL_CRYSTALWELL 0x46 -#define CPUID_MODEL_HASWELL 0x3C -#define CPUID_MODEL_HASWELL_EP 0x3F -#define CPUID_MODEL_HASWELL_ULT 0x45 -#define CPUID_MODEL_BROADWELL 0x3D -#define CPUID_MODEL_BROADWELL_ULX 0x3D -#define CPUID_MODEL_BROADWELL_ULT 0x3D -#define CPUID_MODEL_BRYSTALWELL 0x47 -#define CPUID_MODEL_SKYLAKE 0x4E -#define CPUID_MODEL_SKYLAKE_ULT 0x4E -#define CPUID_MODEL_SKYLAKE_ULX 0x4E -#define CPUID_MODEL_SKYLAKE_DT 0x5E + CPUID_LEAF7_FEATURE_AVX2 | \ + CPUID_LEAF7_FEATURE_BMI2 \ + ) + +#define CPUID_CACHE_SIZE 16 /* Number of descriptor values */ + +#define CPUID_MWAIT_EXTENSION _Bit(0) /* enumeration of WMAIT extensions */ +#define CPUID_MWAIT_BREAK _Bit(1) /* interrupts are break events */ + +#define CPUID_MODEL_PENRYN 0x17 +#define CPUID_MODEL_NEHALEM 0x1A +#define CPUID_MODEL_FIELDS 0x1E /* Lynnfield, Clarksfield */ +#define CPUID_MODEL_DALES 0x1F /* Havendale, Auburndale */ +#define CPUID_MODEL_NEHALEM_EX 0x2E +#define CPUID_MODEL_DALES_32NM 0x25 /* Clarkdale, Arrandale */ +#define CPUID_MODEL_WESTMERE 0x2C /* Gulftown, Westmere-EP/-WS */ +#define CPUID_MODEL_WESTMERE_EX 0x2F +#define CPUID_MODEL_SANDYBRIDGE 0x2A +#define CPUID_MODEL_JAKETOWN 0x2D +#define CPUID_MODEL_IVYBRIDGE 0x3A +#define CPUID_MODEL_IVYBRIDGE_EP 0x3E +#define CPUID_MODEL_CRYSTALWELL 0x46 +#define CPUID_MODEL_HASWELL 0x3C +#define CPUID_MODEL_HASWELL_EP 0x3F +#define CPUID_MODEL_HASWELL_ULT 0x45 +#define CPUID_MODEL_BROADWELL 0x3D +#define CPUID_MODEL_BROADWELL_ULX 0x3D +#define CPUID_MODEL_BROADWELL_ULT 0x3D +#define CPUID_MODEL_BRYSTALWELL 0x47 +#define CPUID_MODEL_SKYLAKE 0x4E +#define CPUID_MODEL_SKYLAKE_ULT 0x4E +#define CPUID_MODEL_SKYLAKE_ULX 0x4E +#define CPUID_MODEL_SKYLAKE_DT 0x5E #if !defined(RC_HIDE_XNU_J137) -#define CPUID_MODEL_SKYLAKE_W 0x55 +#define CPUID_MODEL_SKYLAKE_W 0x55 +#define PLATID_XEON_SP_1 0x00 +#define PLATID_XEON_SP_2 0x07 +#define PLATID_MAYBE_XEON_SP 0x01 #endif /* not RC_HIDE_XNU_J137 */ #define CPUID_MODEL_KABYLAKE 0x8E #define CPUID_MODEL_KABYLAKE_ULT 0x8E #define CPUID_MODEL_KABYLAKE_ULX 0x8E #define CPUID_MODEL_KABYLAKE_DT 0x9E -#define CPUID_VMM_FAMILY_UNKNOWN 0x0 -#define CPUID_VMM_FAMILY_VMWARE 0x1 -#define CPUID_VMM_FAMILY_PARALLELS 0x2 +#define CPUID_VMM_FAMILY_UNKNOWN 0x0 +#define CPUID_VMM_FAMILY_VMWARE 0x1 +#define CPUID_VMM_FAMILY_PARALLELS 0x2 #ifndef ASSEMBLER #include @@ -252,185 +284,226 @@ static inline void cpuid(uint32_t *data) { __asm__ volatile ("cpuid" - : "=a" (data[eax]), - "=b" (data[ebx]), - "=c" (data[ecx]), - "=d" (data[edx]) - : "a" (data[eax]), - "b" (data[ebx]), - "c" (data[ecx]), - "d" (data[edx])); + : "=a" (data[eax]), + "=b" (data[ebx]), + "=c" (data[ecx]), + "=d" (data[edx]) + : "a" (data[eax]), + "b" (data[ebx]), + "c" (data[ecx]), + "d" (data[edx])); } static inline void do_cpuid(uint32_t selector, uint32_t *data) { __asm__ volatile ("cpuid" - : "=a" (data[0]), - "=b" (data[1]), - "=c" (data[2]), - "=d" (data[3]) - : "a"(selector), - "b" (0), - "c" (0), - "d" (0)); + : "=a" (data[0]), + "=b" (data[1]), + "=c" (data[2]), + "=d" (data[3]) + : "a"(selector), + "b" (0), + "c" (0), + "d" (0)); } /* * Cache ID descriptor structure, used to parse CPUID leaf 2. * Note: not used in kernel. */ -typedef enum { Lnone, L1I, L1D, L2U, L3U, LCACHE_MAX } cache_type_t ; +typedef enum { Lnone, L1I, L1D, L2U, L3U, LCACHE_MAX } cache_type_t; typedef struct { - unsigned char value; /* Descriptor value */ - cache_type_t type; /* Cache type */ - unsigned int size; /* Cache size */ - unsigned int linesize; /* Cache line size */ + unsigned char value; /* Descriptor value */ + cache_type_t type; /* Cache type */ + unsigned int size; /* Cache size */ + unsigned int linesize; /* Cache line size */ #ifdef KERNEL - const char *description; /* Cache description */ + const char *description; /* Cache description */ #endif /* KERNEL */ -} cpuid_cache_desc_t; +} cpuid_cache_desc_t; #ifdef KERNEL -#define CACHE_DESC(value,type,size,linesize,text) \ +#define CACHE_DESC(value, type, size, linesize, text) \ { value, type, size, linesize, text } #else -#define CACHE_DESC(value,type,size,linesize,text) \ +#define CACHE_DESC(value, type, size, linesize, text) \ { value, type, size, linesize } #endif /* KERNEL */ /* Monitor/mwait Leaf: */ typedef struct { - uint32_t linesize_min; - uint32_t linesize_max; - uint32_t extensions; - uint32_t sub_Cstates; + uint32_t linesize_min; + uint32_t linesize_max; + uint32_t extensions; + uint32_t sub_Cstates; } cpuid_mwait_leaf_t; /* Thermal and Power Management Leaf: */ typedef struct { - boolean_t sensor; - boolean_t dynamic_acceleration; - boolean_t invariant_APIC_timer; - boolean_t core_power_limits; - boolean_t fine_grain_clock_mod; - boolean_t package_thermal_intr; - uint32_t thresholds; - boolean_t ACNT_MCNT; - boolean_t hardware_feedback; - boolean_t energy_policy; + boolean_t sensor; + boolean_t dynamic_acceleration; + boolean_t invariant_APIC_timer; + boolean_t core_power_limits; + boolean_t fine_grain_clock_mod; + boolean_t package_thermal_intr; + uint32_t thresholds; + boolean_t ACNT_MCNT; + boolean_t hardware_feedback; + boolean_t energy_policy; } cpuid_thermal_leaf_t; /* XSAVE Feature Leaf: */ typedef struct { - uint32_t extended_state[4]; /* eax .. edx */ + uint32_t extended_state[4]; /* eax .. edx */ } cpuid_xsave_leaf_t; /* Architectural Performance Monitoring Leaf: */ typedef struct { - uint8_t version; - uint8_t number; - uint8_t width; - uint8_t events_number; - uint32_t events; - uint8_t fixed_number; - uint8_t fixed_width; + uint8_t version; + uint8_t number; + uint8_t width; + uint8_t events_number; + uint32_t events; + uint8_t fixed_number; + uint8_t fixed_width; } cpuid_arch_perf_leaf_t; /* The TSC to Core Crystal (RefCLK) Clock Information leaf */ typedef struct { - uint32_t numerator; - uint32_t denominator; + uint32_t numerator; + uint32_t denominator; } cpuid_tsc_leaf_t; /* Physical CPU info - this is exported out of the kernel (kexts), so be wary of changes */ typedef struct { - char cpuid_vendor[16]; - char cpuid_brand_string[48]; - const char *cpuid_model_string; - - cpu_type_t cpuid_type; /* this is *not* a cpu_type_t in our */ - uint8_t cpuid_family; - uint8_t cpuid_model; - uint8_t cpuid_extmodel; - uint8_t cpuid_extfamily; - uint8_t cpuid_stepping; - uint64_t cpuid_features; - uint64_t cpuid_extfeatures; - uint32_t cpuid_signature; - uint8_t cpuid_brand; - uint8_t cpuid_processor_flag; - - uint32_t cache_size[LCACHE_MAX]; - uint32_t cache_linesize; - - uint8_t cache_info[64]; /* list of cache descriptors */ - - uint32_t cpuid_cores_per_package; - uint32_t cpuid_logical_per_package; - uint32_t cache_sharing[LCACHE_MAX]; - uint32_t cache_partitions[LCACHE_MAX]; - - cpu_type_t cpuid_cpu_type; /* */ - cpu_subtype_t cpuid_cpu_subtype; /* */ + char cpuid_vendor[16]; + char cpuid_brand_string[48]; + const char *cpuid_model_string; + + cpu_type_t cpuid_type; /* this is *not* a cpu_type_t in our */ + uint8_t cpuid_family; + uint8_t cpuid_model; + uint8_t cpuid_extmodel; + uint8_t cpuid_extfamily; + uint8_t cpuid_stepping; + uint64_t cpuid_features; + uint64_t cpuid_extfeatures; + uint32_t cpuid_signature; + uint8_t cpuid_brand; + uint8_t cpuid_processor_flag; + + uint32_t cache_size[LCACHE_MAX]; + uint32_t cache_linesize; + + uint8_t cache_info[64]; /* list of cache descriptors */ + + uint32_t cpuid_cores_per_package; + uint32_t cpuid_logical_per_package; + uint32_t cache_sharing[LCACHE_MAX]; + uint32_t cache_partitions[LCACHE_MAX]; + + cpu_type_t cpuid_cpu_type; /* */ + cpu_subtype_t cpuid_cpu_subtype; /* */ /* Per-vendor info */ - cpuid_mwait_leaf_t cpuid_mwait_leaf; -#define cpuid_mwait_linesize_max cpuid_mwait_leaf.linesize_max -#define cpuid_mwait_linesize_min cpuid_mwait_leaf.linesize_min -#define cpuid_mwait_extensions cpuid_mwait_leaf.extensions -#define cpuid_mwait_sub_Cstates cpuid_mwait_leaf.sub_Cstates - cpuid_thermal_leaf_t cpuid_thermal_leaf; - cpuid_arch_perf_leaf_t cpuid_arch_perf_leaf; - uint32_t unused[4]; /* cpuid_xsave_leaf */ + cpuid_mwait_leaf_t cpuid_mwait_leaf; +#define cpuid_mwait_linesize_max cpuid_mwait_leaf.linesize_max +#define cpuid_mwait_linesize_min cpuid_mwait_leaf.linesize_min +#define cpuid_mwait_extensions cpuid_mwait_leaf.extensions +#define cpuid_mwait_sub_Cstates cpuid_mwait_leaf.sub_Cstates + cpuid_thermal_leaf_t cpuid_thermal_leaf; + cpuid_arch_perf_leaf_t cpuid_arch_perf_leaf; + uint32_t unused[4]; /* cpuid_xsave_leaf */ /* Cache details: */ - uint32_t cpuid_cache_linesize; - uint32_t cpuid_cache_L2_associativity; - uint32_t cpuid_cache_size; + uint32_t cpuid_cache_linesize; + uint32_t cpuid_cache_L2_associativity; + uint32_t cpuid_cache_size; /* Virtual and physical address aize: */ - uint32_t cpuid_address_bits_physical; - uint32_t cpuid_address_bits_virtual; + uint32_t cpuid_address_bits_physical; + uint32_t cpuid_address_bits_virtual; - uint32_t cpuid_microcode_version; + uint32_t cpuid_microcode_version; /* Numbers of tlbs per processor [i|d, small|large, level0|level1] */ - uint32_t cpuid_tlb[2][2][2]; - #define TLB_INST 0 - #define TLB_DATA 1 - #define TLB_SMALL 0 - #define TLB_LARGE 1 - uint32_t cpuid_stlb; + uint32_t cpuid_tlb[2][2][2]; + #define TLB_INST 0 + #define TLB_DATA 1 + #define TLB_SMALL 0 + #define TLB_LARGE 1 + uint32_t cpuid_stlb; - uint32_t core_count; - uint32_t thread_count; + uint32_t core_count; + uint32_t thread_count; /* Max leaf ids available from CPUID */ - uint32_t cpuid_max_basic; - uint32_t cpuid_max_ext; + uint32_t cpuid_max_basic; + uint32_t cpuid_max_ext; /* Family-specific info links */ - uint32_t cpuid_cpufamily; - cpuid_mwait_leaf_t *cpuid_mwait_leafp; - cpuid_thermal_leaf_t *cpuid_thermal_leafp; - cpuid_arch_perf_leaf_t *cpuid_arch_perf_leafp; - cpuid_xsave_leaf_t *cpuid_xsave_leafp; - uint64_t cpuid_leaf7_features; - cpuid_tsc_leaf_t cpuid_tsc_leaf; - cpuid_xsave_leaf_t cpuid_xsave_leaf[2]; + uint32_t cpuid_cpufamily; + cpuid_mwait_leaf_t *cpuid_mwait_leafp; + cpuid_thermal_leaf_t *cpuid_thermal_leafp; + cpuid_arch_perf_leaf_t *cpuid_arch_perf_leafp; + cpuid_xsave_leaf_t *cpuid_xsave_leafp; + uint64_t cpuid_leaf7_features; + uint64_t cpuid_leaf7_extfeatures; + cpuid_tsc_leaf_t cpuid_tsc_leaf; + cpuid_xsave_leaf_t cpuid_xsave_leaf[2]; } i386_cpu_info_t; -#ifdef MACH_KERNEL_PRIVATE +#if defined(MACH_KERNEL_PRIVATE) && !defined(ASSEMBLER) +/* Only for 32bit values */ +#define bit32(n) (1U << (n)) +#define bitmask32(h, l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1)) +#define bitfield32(x, h, l) ((((x) & bitmask32(h,l)) >> l)) + typedef struct { - char cpuid_vmm_vendor[16]; - uint32_t cpuid_vmm_family; - uint32_t cpuid_vmm_bus_frequency; - uint32_t cpuid_vmm_tsc_frequency; + char cpuid_vmm_vendor[16]; + uint32_t cpuid_vmm_family; + uint32_t cpuid_vmm_bus_frequency; + uint32_t cpuid_vmm_tsc_frequency; } i386_vmm_info_t; -#endif + +typedef enum { + CPU_INTEL_SEGCHK = 1, + CPU_INTEL_TSXFA +} cpu_wa_e; + +typedef enum { + CWA_ON = 2, + CWA_FORCE_ON = 3, /* FORCE_ON shares bit 1 so consumers can test that for ON */ + CWA_OFF = 4, + CWA_FORCE_OFF = 5 /* Similarly for FORCE_OFF sharing bit 2 */ +} cwa_classifier_e; + +static inline int +is_xeon_sp(uint8_t platid) +{ + if (platid == PLATID_XEON_SP_1 || platid == PLATID_XEON_SP_2) { + return 1; + } + if (platid != PLATID_MAYBE_XEON_SP) { + return 0; + } + boolean_t intrs = ml_set_interrupts_enabled(FALSE); + outl(cfgAdr, XeonCapID5); + uint32_t cap5reg = inl(cfgDat); + ml_set_interrupts_enabled(intrs); + /* Read from PCI config space 1:30:3:0x98 [bits 13:9] */ + if (bitfield32(cap5reg, 13, 9) == 3) { + return 1; + } + return 0; +} + +extern int force_tecs_at_idle; + +#endif /* defined(MACH_KERNEL_PRIVATE) && !defined(ASSEMBLER) */ #ifdef __cplusplus extern "C" { @@ -439,28 +512,32 @@ extern "C" { /* * External declarations */ -extern cpu_type_t cpuid_cputype(void); -extern cpu_subtype_t cpuid_cpusubtype(void); -extern void cpuid_cpu_display(const char *); -extern void cpuid_feature_display(const char *); -extern void cpuid_extfeature_display(const char *); -extern char * cpuid_get_feature_names(uint64_t, char *, unsigned); -extern char * cpuid_get_extfeature_names(uint64_t, char *, unsigned); -extern char * cpuid_get_leaf7_feature_names(uint64_t, char *, unsigned); - -extern uint64_t cpuid_features(void); -extern uint64_t cpuid_extfeatures(void); -extern uint64_t cpuid_leaf7_features(void); -extern uint32_t cpuid_family(void); -extern uint32_t cpuid_cpufamily(void); - -extern i386_cpu_info_t *cpuid_info(void); -extern void cpuid_set_info(void); +extern cpu_type_t cpuid_cputype(void); +extern cpu_subtype_t cpuid_cpusubtype(void); +extern void cpuid_cpu_display(const char *); +extern void cpuid_feature_display(const char *); +extern void cpuid_extfeature_display(const char *); +extern char * cpuid_get_feature_names(uint64_t, char *, unsigned); +extern char * cpuid_get_extfeature_names(uint64_t, char *, unsigned); +extern char * cpuid_get_leaf7_feature_names(uint64_t, char *, unsigned); +extern char * cpuid_get_leaf7_extfeature_names(uint64_t, char *, unsigned); + +extern uint64_t cpuid_features(void); +extern uint64_t cpuid_extfeatures(void); +extern uint64_t cpuid_leaf7_features(void); +extern uint64_t cpuid_leaf7_extfeatures(void); +extern uint32_t cpuid_family(void); +extern uint32_t cpuid_cpufamily(void); + +extern i386_cpu_info_t *cpuid_info(void); +extern void cpuid_set_info(void); #ifdef MACH_KERNEL_PRIVATE -extern boolean_t cpuid_vmm_present(void); -extern i386_vmm_info_t *cpuid_vmm_info(void); -extern uint32_t cpuid_vmm_family(void); +extern boolean_t cpuid_vmm_present(void); +extern i386_vmm_info_t *cpuid_vmm_info(void); +extern uint32_t cpuid_vmm_family(void); +extern cwa_classifier_e cpuid_wa_required(cpu_wa_e wa); +extern void cpuid_do_was(void); #endif #ifdef __cplusplus diff --git a/osfmk/i386/eflags.h b/osfmk/i386/eflags.h index 9dc64783e..1ac8b7268 100644 --- a/osfmk/i386/eflags.h +++ b/osfmk/i386/eflags.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,73 +22,73 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _I386_EFLAGS_H_ -#define _I386_EFLAGS_H_ +#ifndef _I386_EFLAGS_H_ +#define _I386_EFLAGS_H_ /* * i386 flags register */ #ifndef EFL_CF -#define EFL_CF 0x00000001 /* carry */ -#define EFL_PF 0x00000004 /* parity of low 8 bits */ -#define EFL_AF 0x00000010 /* carry out of bit 3 */ -#define EFL_ZF 0x00000040 /* zero */ -#define EFL_SF 0x00000080 /* sign */ -#define EFL_TF 0x00000100 /* trace trap */ -#define EFL_IF 0x00000200 /* interrupt enable */ -#define EFL_DF 0x00000400 /* direction */ -#define EFL_OF 0x00000800 /* overflow */ -#define EFL_IOPL 0x00003000 /* IO privilege level: */ -#define EFL_IOPL_KERNEL 0x00000000 /* kernel */ -#define EFL_IOPL_USER 0x00003000 /* user */ -#define EFL_NT 0x00004000 /* nested task */ -#define EFL_RF 0x00010000 /* resume without tracing */ -#define EFL_VM 0x00020000 /* virtual 8086 mode */ -#define EFL_AC 0x00040000 /* alignment check */ -#define EFL_VIF 0x00080000 /* virtual interrupt flag */ -#define EFL_VIP 0x00100000 /* virtual interrupt pending */ -#define EFL_ID 0x00200000 /* cpuID instruction */ +#define EFL_CF 0x00000001 /* carry */ +#define EFL_PF 0x00000004 /* parity of low 8 bits */ +#define EFL_AF 0x00000010 /* carry out of bit 3 */ +#define EFL_ZF 0x00000040 /* zero */ +#define EFL_SF 0x00000080 /* sign */ +#define EFL_TF 0x00000100 /* trace trap */ +#define EFL_IF 0x00000200 /* interrupt enable */ +#define EFL_DF 0x00000400 /* direction */ +#define EFL_OF 0x00000800 /* overflow */ +#define EFL_IOPL 0x00003000 /* IO privilege level: */ +#define EFL_IOPL_KERNEL 0x00000000 /* kernel */ +#define EFL_IOPL_USER 0x00003000 /* user */ +#define EFL_NT 0x00004000 /* nested task */ +#define EFL_RF 0x00010000 /* resume without tracing */ +#define EFL_VM 0x00020000 /* virtual 8086 mode */ +#define EFL_AC 0x00040000 /* alignment check */ +#define EFL_VIF 0x00080000 /* virtual interrupt flag */ +#define EFL_VIP 0x00100000 /* virtual interrupt pending */ +#define EFL_ID 0x00200000 /* cpuID instruction */ #endif -#define EFL_CLR 0xfff88028 -#define EFL_SET 0x00000002 +#define EFL_CLR 0xfff88028 +#define EFL_SET 0x00000002 -#define EFL_USER_SET (EFL_IF) -#define EFL_USER_CLEAR (EFL_IOPL|EFL_NT|EFL_RF) +#define EFL_USER_SET (EFL_IF) +#define EFL_USER_CLEAR (EFL_IOPL|EFL_NT|EFL_RF) -#endif /* _I386_EFLAGS_H_ */ +#endif /* _I386_EFLAGS_H_ */ diff --git a/osfmk/i386/endian.h b/osfmk/i386/endian.h index d9bed4649..a83ba637b 100644 --- a/osfmk/i386/endian.h +++ b/osfmk/i386/endian.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ #ifndef _MACHINE_ENDIAN_H_ @@ -37,12 +37,12 @@ * Definitions for byte order, * according to byte significance from low address to high. */ -#define LITTLE_ENDIAN 1234 /* least-significant byte first (vax) */ -#define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ -#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ +#define LITTLE_ENDIAN 1234 /* least-significant byte first (vax) */ +#define BIG_ENDIAN 4321 /* most-significant byte first (IBM, net) */ +#define PDP_ENDIAN 3412 /* LSB first in word, MSW first in long (pdp) */ -#define BYTE_ORDER LITTLE_ENDIAN /* byte order on i386 */ -#define ENDIAN LITTLE +#define BYTE_ORDER LITTLE_ENDIAN /* byte order on i386 */ +#define ENDIAN LITTLE /* * Macros for network/external number representation conversion. @@ -50,22 +50,22 @@ */ #if !defined(ntohs) -static __inline__ unsigned short ntohs(unsigned short); +static __inline__ unsigned short ntohs(unsigned short); static __inline__ unsigned short ntohs(unsigned short w_int) { - return ((w_int << 8) | (w_int >> 8)); + return (w_int << 8) | (w_int >> 8); } #endif #if !defined(htons) -unsigned short htons(unsigned short); -#define htons ntohs +unsigned short htons(unsigned short); +#define htons ntohs #endif #if !defined(ntohl) -static __inline__ unsigned long ntohl(unsigned long); +static __inline__ unsigned long ntohl(unsigned long); static __inline__ unsigned long ntohl(unsigned long value) @@ -74,20 +74,20 @@ ntohl(unsigned long value) return (unsigned long)__builtin_bswap32((unsigned int)value); #else unsigned long l = value; - __asm__ volatile("bswap %0" : "=r" (l) : "0" (l)); + __asm__ volatile ("bswap %0" : "=r" (l) : "0" (l)); return l; #endif } #endif #if !defined(htonl) -unsigned long htonl(unsigned long); -#define htonl ntohl +unsigned long htonl(unsigned long); +#define htonl ntohl #endif -#define NTOHL(x) (x) = ntohl((unsigned long)x) -#define NTOHS(x) (x) = ntohs((unsigned short)x) -#define HTONL(x) (x) = htonl((unsigned long)x) -#define HTONS(x) (x) = htons((unsigned short)x) +#define NTOHL(x) (x) = ntohl((unsigned long)x) +#define NTOHS(x) (x) = ntohs((unsigned short)x) +#define HTONL(x) (x) = htonl((unsigned long)x) +#define HTONS(x) (x) = htons((unsigned short)x) #endif diff --git a/osfmk/i386/exec.h b/osfmk/i386/exec.h index 2de2ea0cb..98bd28fb8 100644 --- a/osfmk/i386/exec.h +++ b/osfmk/i386/exec.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,27 +60,26 @@ * a.out.gnu.h file. */ -#ifndef _EXEC_ -#define _EXEC_ 1 +#ifndef _EXEC_ +#define _EXEC_ 1 /* * Header prepended to each a.out file. */ -struct exec -{ -#ifdef sun - unsigned short a_machtype; /* machine type */ - unsigned short a_info; /* Use macros N_MAGIC, etc for access */ -#else /* sun */ - unsigned long a_info; /* Use macros N_MAGIC, etc for access */ -#endif /* sun */ - unsigned long a_text; /* length of text, in bytes */ - unsigned long a_data; /* length of data, in bytes */ - unsigned long a_bss; /* length of uninitialized data area for file, in bytes */ - unsigned long a_syms; /* length of symbol table data in file, in bytes */ - unsigned long a_entry; /* start address */ - unsigned long a_trsize; /* length of relocation info for text, in bytes */ - unsigned long a_drsize; /* length of relocation info for data, in bytes */ +struct exec { +#ifdef sun + unsigned short a_machtype;/* machine type */ + unsigned short a_info; /* Use macros N_MAGIC, etc for access */ +#else /* sun */ + unsigned long a_info; /* Use macros N_MAGIC, etc for access */ +#endif /* sun */ + unsigned long a_text; /* length of text, in bytes */ + unsigned long a_data; /* length of data, in bytes */ + unsigned long a_bss; /* length of uninitialized data area for file, in bytes */ + unsigned long a_syms; /* length of symbol table data in file, in bytes */ + unsigned long a_entry; /* start address */ + unsigned long a_trsize; /* length of relocation info for text, in bytes */ + unsigned long a_drsize; /* length of relocation info for data, in bytes */ }; /* Code indicating object file or impure executable. */ @@ -93,9 +92,9 @@ struct exec #ifdef sun /* Sun machine types */ -#define M_OLDSUN2 0 /* old sun-2 executable files */ -#define M_68010 1 /* runs on either 68010 or 68020 */ -#define M_68020 2 /* runs only on 68020 */ +#define M_OLDSUN2 0 /* old sun-2 executable files */ +#define M_68010 1 /* runs on either 68010 or 68020 */ +#define M_68020 2 /* runs only on 68020 */ #endif /* sun */ -#endif /* _EXEC_ */ +#endif /* _EXEC_ */ diff --git a/osfmk/i386/fpu.c b/osfmk/i386/fpu.c index c0883c821..0ac53d48c 100644 --- a/osfmk/i386/fpu.c +++ b/osfmk/i386/fpu.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1992-1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -76,81 +76,89 @@ #include #include -xstate_t fpu_capability = UNDEFINED; /* extended state capability */ -xstate_t fpu_default = UNDEFINED; /* default extended state */ +xstate_t fpu_capability = UNDEFINED; /* extended state capability */ +xstate_t fpu_default = UNDEFINED; /* default extended state */ -#define ALIGNED(addr,size) (((uintptr_t)(addr)&((size)-1))==0) +#define ALIGNED(addr, size) (((uintptr_t)(addr)&((size)-1))==0) /* Forward */ -extern void fpinit(void); -extern void fp_save( - thread_t thr_act); -extern void fp_load( - thread_t thr_act); +extern void fpinit(void); +extern void fp_save( + thread_t thr_act); +extern void fp_load( + thread_t thr_act); static void configure_mxcsr_capability_mask(x86_ext_thread_state_t *fps); static xstate_t thread_xstate(thread_t); -x86_ext_thread_state_t initial_fp_state __attribute((aligned(64))); -x86_ext_thread_state_t default_avx512_state __attribute((aligned(64))); -x86_ext_thread_state_t default_avx_state __attribute((aligned(64))); -x86_ext_thread_state_t default_fx_state __attribute((aligned(64))); +x86_ext_thread_state_t initial_fp_state __attribute((aligned(64))); +x86_ext_thread_state_t default_avx512_state __attribute((aligned(64))); +x86_ext_thread_state_t default_avx_state __attribute((aligned(64))); +x86_ext_thread_state_t default_fx_state __attribute((aligned(64))); /* Global MXCSR capability bitmask */ static unsigned int mxcsr_capability_mask; -#define fninit() \ +#define fninit() \ __asm__ volatile("fninit") -#define fnstcw(control) \ +#define fnstcw(control) \ __asm__("fnstcw %0" : "=m" (*(unsigned short *)(control))) -#define fldcw(control) \ +#define fldcw(control) \ __asm__ volatile("fldcw %0" : : "m" (*(unsigned short *) &(control)) ) -#define fnclex() \ +#define fnclex() \ __asm__ volatile("fnclex") -#define fnsave(state) \ +#define fnsave(state) \ __asm__ volatile("fnsave %0" : "=m" (*state)) -#define frstor(state) \ +#define frstor(state) \ __asm__ volatile("frstor %0" : : "m" (state)) #define fwait() \ - __asm__("fwait"); + __asm__("fwait"); -static inline void fxrstor(struct x86_fx_thread_state *a) { - __asm__ __volatile__("fxrstor %0" :: "m" (*a)); +static inline void +fxrstor(struct x86_fx_thread_state *a) +{ + __asm__ __volatile__ ("fxrstor %0" :: "m" (*a)); } -static inline void fxsave(struct x86_fx_thread_state *a) { - __asm__ __volatile__("fxsave %0" : "=m" (*a)); +static inline void +fxsave(struct x86_fx_thread_state *a) +{ + __asm__ __volatile__ ("fxsave %0" : "=m" (*a)); } -static inline void fxrstor64(struct x86_fx_thread_state *a) { - __asm__ __volatile__("fxrstor64 %0" :: "m" (*a)); +static inline void +fxrstor64(struct x86_fx_thread_state *a) +{ + __asm__ __volatile__ ("fxrstor64 %0" :: "m" (*a)); } -static inline void fxsave64(struct x86_fx_thread_state *a) { - __asm__ __volatile__("fxsave64 %0" : "=m" (*a)); +static inline void +fxsave64(struct x86_fx_thread_state *a) +{ + __asm__ __volatile__ ("fxsave64 %0" : "=m" (*a)); } #if !defined(RC_HIDE_XNU_J137) -#define IS_VALID_XSTATE(x) ((x) == FP || (x) == AVX || (x) == AVX512) +#define IS_VALID_XSTATE(x) ((x) == FP || (x) == AVX || (x) == AVX512) #else -#define IS_VALID_XSTATE(x) ((x) == FP || (x) == AVX) +#define IS_VALID_XSTATE(x) ((x) == FP || (x) == AVX) #endif -zone_t ifps_zone[] = { +zone_t ifps_zone[] = { [FP] = NULL, [AVX] = NULL, #if !defined(RC_HIDE_XNU_J137) [AVX512] = NULL #endif }; -static uint32_t fp_state_size[] = { +static uint32_t fp_state_size[] = { [FP] = sizeof(struct x86_fx_thread_state), [AVX] = sizeof(struct x86_avx_thread_state), #if !defined(RC_HIDE_XNU_J137) @@ -200,46 +208,56 @@ static const char *xstate_name[] = { #else #define fpu_YMM_capable (fpu_capability == AVX) #endif -static uint32_t cpuid_reevaluated = 0; +static uint32_t cpuid_reevaluated = 0; static void fpu_store_registers(void *, boolean_t); static void fpu_load_registers(void *); #if !defined(RC_HIDE_XNU_J137) static const uint32_t xstate_xmask[] = { - [FP] = FP_XMASK, - [AVX] = AVX_XMASK, - [AVX512] = AVX512_XMASK + [FP] = FP_XMASK, + [AVX] = AVX_XMASK, + [AVX512] = AVX512_XMASK }; #else static const uint32_t xstate_xmask[] = { - [FP] = FP_XMASK, - [AVX] = AVX_XMASK, + [FP] = FP_XMASK, + [AVX] = AVX_XMASK, }; #endif -static inline void xsave(struct x86_fx_thread_state *a, uint32_t rfbm) { - __asm__ __volatile__("xsave %0" :"=m" (*a) : "a"(rfbm), "d"(0)); +static inline void +xsave(struct x86_fx_thread_state *a, uint32_t rfbm) +{ + __asm__ __volatile__ ("xsave %0" :"=m" (*a) : "a"(rfbm), "d"(0)); } -static inline void xsave64(struct x86_fx_thread_state *a, uint32_t rfbm) { - __asm__ __volatile__("xsave64 %0" :"=m" (*a) : "a"(rfbm), "d"(0)); +static inline void +xsave64(struct x86_fx_thread_state *a, uint32_t rfbm) +{ + __asm__ __volatile__ ("xsave64 %0" :"=m" (*a) : "a"(rfbm), "d"(0)); } -static inline void xrstor(struct x86_fx_thread_state *a, uint32_t rfbm) { - __asm__ __volatile__("xrstor %0" :: "m" (*a), "a"(rfbm), "d"(0)); +static inline void +xrstor(struct x86_fx_thread_state *a, uint32_t rfbm) +{ + __asm__ __volatile__ ("xrstor %0" :: "m" (*a), "a"(rfbm), "d"(0)); } -static inline void xrstor64(struct x86_fx_thread_state *a, uint32_t rfbm) { - __asm__ __volatile__("xrstor64 %0" :: "m" (*a), "a"(rfbm), "d"(0)); +static inline void +xrstor64(struct x86_fx_thread_state *a, uint32_t rfbm) +{ + __asm__ __volatile__ ("xrstor64 %0" :: "m" (*a), "a"(rfbm), "d"(0)); } #if !defined(RC_HIDE_XNU_J137) -__unused static inline void vzeroupper(void) { - __asm__ __volatile__("vzeroupper" ::); +__unused static inline void +vzeroupper(void) +{ + __asm__ __volatile__ ("vzeroupper" ::); } -static boolean_t fpu_thread_promote_avx512(thread_t); /* Forward */ +static boolean_t fpu_thread_promote_avx512(thread_t); /* Forward */ /* * Define a wrapper for bcopy to defeat destination size checka. @@ -251,16 +269,18 @@ static boolean_t fpu_thread_promote_avx512(thread_t); /* Forward */ * bcopy_nockch(src,&dst->fpu_ymmh0,8*sizeof(_STRUCT_XMM_REG)); * without the compiler throwing a __builtin__memmove_chk error. */ -static inline void bcopy_nochk(void *_src, void *_dst, size_t _len) { +static inline void +bcopy_nochk(void *_src, void *_dst, size_t _len) +{ bcopy(_src, _dst, _len); -} +} /* * Furthermore, make compile-time asserts that no padding creeps into structures * for which we're doing this. */ -#define ASSERT_PACKED(t, m1, m2, n, mt) \ -extern char assert_packed_ ## t ## _ ## m1 ## _ ## m2 \ +#define ASSERT_PACKED(t, m1, m2, n, mt) \ +extern char assert_packed_ ## t ## _ ## m1 ## _ ## m2 \ [(offsetof(t,m2) - offsetof(t,m1) == (n - 1)*sizeof(mt)) ? 1 : -1] ASSERT_PACKED(x86_avx_state32_t, fpu_ymmh0, fpu_ymmh7, 8, _STRUCT_XMM_REG); @@ -278,7 +298,7 @@ ASSERT_PACKED(x86_avx512_state64_t, fpu_zmm16, fpu_zmm31, 16, _STRUCT_ZMM_REG); #if defined(DEBUG_AVX512) -#define DBG(x...) kprintf("DBG: " x) +#define DBG(x...) kprintf("DBG: " x) typedef struct { uint8_t byte[8]; } opmask_t; typedef struct { uint8_t byte[16]; } xmm_t; @@ -288,7 +308,7 @@ typedef struct { uint8_t byte[64]; } zmm_t; static void DBG_AVX512_STATE(struct x86_avx512_thread_state *sp) { - int i, j; + int i, j; xmm_t *xmm = (xmm_t *) &sp->fp.fx_XMM_reg; xmm_t *ymmh = (xmm_t *) &sp->x_YMM_Hi128; ymm_t *zmmh = (ymm_t *) &sp->x_ZMM_Hi256; @@ -306,24 +326,29 @@ DBG_AVX512_STATE(struct x86_avx512_thread_state *sp) /* Print all ZMM registers */ for (i = 0; i < 16; i++) { kprintf("zmm%d:\t0x", i); - for (j = 0; j < 16; j++) + for (j = 0; j < 16; j++) { kprintf("%02x", xmm[i].byte[j]); - for (j = 0; j < 16; j++) + } + for (j = 0; j < 16; j++) { kprintf("%02x", ymmh[i].byte[j]); - for (j = 0; j < 32; j++) + } + for (j = 0; j < 32; j++) { kprintf("%02x", zmmh[i].byte[j]); + } kprintf("\n"); } for (i = 0; i < 16; i++) { - kprintf("zmm%d:\t0x", 16+i); - for (j = 0; j < 64; j++) + kprintf("zmm%d:\t0x", 16 + i); + for (j = 0; j < 64; j++) { kprintf("%02x", zmm[i].byte[j]); + } kprintf("\n"); } for (i = 0; i < 8; i++) { kprintf("k%d:\t0x", i); - for (j = 0; j < 8; j++) + for (j = 0; j < 8; j++) { kprintf("%02x", k[i].byte[j]); + } kprintf("\n"); } @@ -331,7 +356,7 @@ DBG_AVX512_STATE(struct x86_avx512_thread_state *sp) kprintf("xcomp_bv: 0x%016llx\n", sp->_xh.xcomp_bv); } #else -#define DBG(x...) +#define DBG(x...) static void DBG_AVX512_STATE(__unused struct x86_avx512_thread_state *sp) { @@ -341,13 +366,13 @@ DBG_AVX512_STATE(__unused struct x86_avx512_thread_state *sp) #endif -#if DEBUG +#if DEBUG static inline unsigned short fnstsw(void) { unsigned short status; - __asm__ volatile("fnstsw %0" : "=ma" (status)); - return(status); + __asm__ volatile ("fnstsw %0" : "=ma" (status)); + return status; } #endif @@ -372,17 +397,18 @@ configure_mxcsr_capability_mask(x86_ext_thread_state_t *fps) mxcsr_capability_mask = fps->fx.fx_MXCSR_MASK; /* Set default mask value if necessary */ - if (mxcsr_capability_mask == 0) + if (mxcsr_capability_mask == 0) { mxcsr_capability_mask = 0xffbf; - + } + /* Clear vector register store */ - bzero(&fps->fx.fx_XMM_reg[0][0], sizeof(fps->fx.fx_XMM_reg)); + bzero(&fps->fx.fx_XMM_reg[0][0], sizeof(fps->fx.fx_XMM_reg)); bzero(fps->avx.x_YMM_Hi128, sizeof(fps->avx.x_YMM_Hi128)); #if !defined(RC_HIDE_XNU_J137) if (fpu_ZMM_capable) { bzero(fps->avx512.x_ZMM_Hi256, sizeof(fps->avx512.x_ZMM_Hi256)); - bzero(fps->avx512.x_Hi16_ZMM, sizeof(fps->avx512.x_Hi16_ZMM)); - bzero(fps->avx512.x_Opmask, sizeof(fps->avx512.x_Opmask)); + bzero(fps->avx512.x_Hi16_ZMM, sizeof(fps->avx512.x_Hi16_ZMM)); + bzero(fps->avx512.x_Opmask, sizeof(fps->avx512.x_Opmask)); } #endif @@ -415,21 +441,21 @@ int fpsimd_fault_popc = 0; void init_fpu(void) { -#if DEBUG - unsigned short status; - unsigned short control; +#if DEBUG + unsigned short status; + unsigned short control; #endif /* * Check for FPU by initializing it, * then trying to read the correct bit patterns from * the control and status registers. */ - set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | CR0_NE); /* allow use of FPU */ + set_cr0((get_cr0() & ~(CR0_EM | CR0_TS)) | CR0_NE); /* allow use of FPU */ fninit(); -#if DEBUG +#if DEBUG status = fnstsw(); fnstcw(&control); - + assert(((status & 0xff) == 0) && ((control & 0x103f) == 0x3f)); #endif /* Advertise SSE support */ @@ -439,8 +465,9 @@ init_fpu(void) if (cpuid_features() & CPUID_FEATURE_SSE) { set_cr4(get_cr4() | CR4_OSXMM); } - } else + } else { panic("fpu is not FP_FXSR"); + } fpu_capability = fpu_default = FP; @@ -452,11 +479,11 @@ init_fpu(void) if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_AVX512F) { PE_parse_boot_argn("avx512", &is_avx512_enabled, sizeof(boolean_t)); kprintf("AVX512 supported %s\n", - is_avx512_enabled ? "and enabled" : "but disabled"); + is_avx512_enabled ? "and enabled" : "but disabled"); } } #endif - + /* Configure the XSAVE context mechanism if the processor supports * AVX/YMM registers */ @@ -472,8 +499,9 @@ init_fpu(void) set_cr4(get_cr4() | CR4_OSXSAVE); xsetbv(0, AVX512_XMASK); /* Re-evaluate CPUID, once, to reflect OSXSAVE */ - if (OSCompareAndSwap(0, 1, &cpuid_reevaluated)) + if (OSCompareAndSwap(0, 1, &cpuid_reevaluated)) { cpuid_set_info(); + } /* Verify that now selected state can be accommodated */ assert(xs0p->extended_state[ebx] == fp_state_size[AVX512]); /* @@ -492,17 +520,19 @@ init_fpu(void) set_cr4(get_cr4() | CR4_OSXSAVE); xsetbv(0, AVX_XMASK); /* Re-evaluate CPUID, once, to reflect OSXSAVE */ - if (OSCompareAndSwap(0, 1, &cpuid_reevaluated)) + if (OSCompareAndSwap(0, 1, &cpuid_reevaluated)) { cpuid_set_info(); + } /* Verify that now selected state can be accommodated */ assert(xs0p->extended_state[ebx] == fp_state_size[AVX]); } } - if (cpu_number() == master_cpu) + if (cpu_number() == master_cpu) { kprintf("fpu_state: %s, state_size: %d\n", - xstate_name[fpu_capability], - fp_state_size[fpu_capability]); + xstate_name[fpu_capability], + fp_state_size[fpu_capability]); + } fpinit(); current_cpu_datap()->cpu_xstate = fpu_default; @@ -525,12 +555,12 @@ fp_state_alloc(xstate_t xs) assert(ifps_zone[xs] != NULL); ifps = zalloc(ifps_zone[xs]); -#if DEBUG - if (!(ALIGNED(ifps,64))) { +#if DEBUG + if (!(ALIGNED(ifps, 64))) { panic("fp_state_alloc: %p, %u, %p, %u", - ifps, (unsigned) ifps_zone[xs]->elem_size, - (void *) ifps_zone[xs]->free_elements, - (unsigned) ifps_zone[xs]->alloc_size); + ifps, (unsigned) ifps_zone[xs]->elem_size, + (void *) ifps_zone[xs]->free_elements, + (unsigned) ifps_zone[xs]->alloc_size); } #endif bzero(ifps, fp_state_size[xs]); @@ -545,64 +575,73 @@ fp_state_free(void *ifps, xstate_t xs) zfree(ifps_zone[xs], ifps); } -void clear_fpu(void) +void +clear_fpu(void) { set_ts(); } -static void fpu_load_registers(void *fstate) { +static void +fpu_load_registers(void *fstate) +{ struct x86_fx_thread_state *ifps = fstate; fp_save_layout_t layout = ifps->fp_save_layout; - assert(current_task() == NULL || \ - (thread_is_64bit_addr(current_thread()) ? \ - (layout == FXSAVE64 || layout == XSAVE64) : \ - (layout == FXSAVE32 || layout == XSAVE32))); + assert(current_task() == NULL || \ + (thread_is_64bit_addr(current_thread()) ? \ + (layout == FXSAVE64 || layout == XSAVE64) : \ + (layout == FXSAVE32 || layout == XSAVE32))); assert(ALIGNED(ifps, 64)); assert(ml_get_interrupts_enabled() == FALSE); -#if DEBUG +#if DEBUG if (layout == XSAVE32 || layout == XSAVE64) { struct x86_avx_thread_state *iavx = fstate; unsigned i; /* Verify reserved bits in the XSAVE header*/ - if (iavx->_xh.xstate_bv & ~xstate_xmask[current_xstate()]) + if (iavx->_xh.xstate_bv & ~xstate_xmask[current_xstate()]) { panic("iavx->_xh.xstate_bv: 0x%llx", iavx->_xh.xstate_bv); - for (i = 0; i < sizeof(iavx->_xh.xhrsvd); i++) - if (iavx->_xh.xhrsvd[i]) + } + for (i = 0; i < sizeof(iavx->_xh.xhrsvd); i++) { + if (iavx->_xh.xhrsvd[i]) { panic("Reserved bit set"); + } + } } if (fpu_YMM_capable) { - if (layout != XSAVE32 && layout != XSAVE64) + if (layout != XSAVE32 && layout != XSAVE64) { panic("Inappropriate layout: %u\n", layout); + } } -#endif /* DEBUG */ +#endif /* DEBUG */ switch (layout) { - case FXSAVE64: + case FXSAVE64: fxrstor64(ifps); break; - case FXSAVE32: + case FXSAVE32: fxrstor(ifps); break; - case XSAVE64: + case XSAVE64: xrstor64(ifps, xstate_xmask[current_xstate()]); break; - case XSAVE32: + case XSAVE32: xrstor(ifps, xstate_xmask[current_xstate()]); break; - default: + default: panic("fpu_load_registers() bad layout: %d\n", layout); } } -static void fpu_store_registers(void *fstate, boolean_t is64) { +static void +fpu_store_registers(void *fstate, boolean_t is64) +{ struct x86_fx_thread_state *ifps = fstate; assert(ALIGNED(ifps, 64)); xstate_t xs = current_xstate(); switch (xs) { - case FP: + case FP: if (is64) { fxsave64(fstate); ifps->fp_save_layout = FXSAVE64; @@ -611,9 +650,9 @@ static void fpu_store_registers(void *fstate, boolean_t is64) { ifps->fp_save_layout = FXSAVE32; } break; - case AVX: + case AVX: #if !defined(RC_HIDE_XNU_J137) - case AVX512: + case AVX512: #endif if (is64) { xsave64(ifps, xstate_xmask[xs]); @@ -623,7 +662,7 @@ static void fpu_store_registers(void *fstate, boolean_t is64) { ifps->fp_save_layout = XSAVE32; } break; - default: + default: panic("fpu_store_registers() bad xstate: %d\n", xs); } } @@ -635,18 +674,19 @@ static void fpu_store_registers(void *fstate, boolean_t is64) { void fpu_module_init(void) { - if (!IS_VALID_XSTATE(fpu_default)) + if (!IS_VALID_XSTATE(fpu_default)) { panic("fpu_module_init: invalid extended state %u\n", - fpu_default); + fpu_default); + } /* We explicitly choose an allocation size of 13 pages = 64 * 832 * to eliminate waste for the 832 byte sized * AVX XSAVE register save area. */ ifps_zone[fpu_default] = zinit(fp_state_size[fpu_default], - thread_max * fp_state_size[fpu_default], - 64 * fp_state_size[fpu_default], - "x86 fpsave state"); + thread_max * fp_state_size[fpu_default], + 64 * fp_state_size[fpu_default], + "x86 fpsave state"); /* To maintain the required alignment, disable * zone debugging for this zone as that appends @@ -661,9 +701,9 @@ fpu_module_init(void) */ if (fpu_capability == AVX512) { ifps_zone[AVX512] = zinit(fp_state_size[AVX512], - thread_max * fp_state_size[AVX512], - 32 * fp_state_size[AVX512], - "x86 avx512 save state"); + thread_max * fp_state_size[AVX512], + 32 * fp_state_size[AVX512], + "x86 avx512 save state"); zone_change(ifps_zone[AVX512], Z_ALIGNMENT_REQUIRED, TRUE); } #endif @@ -681,13 +721,13 @@ fpu_module_init(void) void fpu_switch_context(thread_t old, thread_t new) { - struct x86_fx_thread_state *ifps; + struct x86_fx_thread_state *ifps; cpu_data_t *cdp = current_cpu_datap(); xstate_t new_xstate = new ? thread_xstate(new) : fpu_default; assert(ml_get_interrupts_enabled() == FALSE); ifps = (old)->machine.ifps; -#if DEBUG +#if DEBUG if (ifps && ((ifps->fp_valid != FALSE) && (ifps->fp_valid != TRUE))) { panic("ifps->fp_valid: %u\n", ifps->fp_valid); } @@ -716,9 +756,9 @@ fpu_switch_context(thread_t old, thread_t new) } assertf(fpu_YMM_capable ? (xgetbv(XCR0) == xstate_xmask[cdp->cpu_xstate]) : TRUE, "XCR0 mismatch: 0x%llx 0x%x 0x%x", xgetbv(XCR0), cdp->cpu_xstate, xstate_xmask[cdp->cpu_xstate]); - if (new_xstate != cdp->cpu_xstate) { + if (new_xstate != (xstate_t) cdp->cpu_xstate) { DBG("fpu_switch_context(%p,%p) new xstate: %s\n", - old, new, xstate_name[new_xstate]); + old, new, xstate_name[new_xstate]); xsetbv(0, xstate_xmask[new_xstate]); cdp->cpu_xstate = new_xstate; } @@ -733,45 +773,49 @@ fpu_switch_context(thread_t old, thread_t new) void fpu_free(thread_t thread, void *fps) { - pcb_t pcb = THREAD_TO_PCB(thread); - + pcb_t pcb = THREAD_TO_PCB(thread); + fp_state_free(fps, pcb->xstate); pcb->xstate = UNDEFINED; } /* - * Set the floating-point state for a thread based - * on the FXSave formatted data. This is basically - * the same as fpu_set_state except it uses the - * expanded data structure. + * Set the floating-point state for a thread based + * on the FXSave formatted data. This is basically + * the same as fpu_set_state except it uses the + * expanded data structure. * If the thread is not the current thread, it is * not running (held). Locking needed against * concurrent fpu_set_state or fpu_get_state. */ kern_return_t fpu_set_fxstate( - thread_t thr_act, - thread_state_t tstate, + thread_t thr_act, + thread_state_t tstate, thread_flavor_t f) { - struct x86_fx_thread_state *ifps; - struct x86_fx_thread_state *new_ifps; - x86_float_state64_t *state; - pcb_t pcb; - boolean_t old_valid, fresh_state = FALSE; + struct x86_fx_thread_state *ifps; + struct x86_fx_thread_state *new_ifps; + x86_float_state64_t *state; + pcb_t pcb; + boolean_t old_valid, fresh_state = FALSE; - if (fpu_capability == UNDEFINED) + if (fpu_capability == UNDEFINED) { return KERN_FAILURE; + } if ((f == x86_AVX_STATE32 || f == x86_AVX_STATE64) && - fpu_capability < AVX) + fpu_capability < AVX) { return KERN_FAILURE; + } #if !defined(RC_HIDE_XNU_J137) if ((f == x86_AVX512_STATE32 || f == x86_AVX512_STATE64) && - thread_xstate(thr_act) == AVX) - if (!fpu_thread_promote_avx512(thr_act)) + thread_xstate(thr_act) == AVX) { + if (!fpu_thread_promote_avx512(thr_act)) { return KERN_FAILURE; + } + } #endif state = (x86_float_state64_t *)tstate; @@ -784,7 +828,7 @@ fpu_set_fxstate( * new FPU state is 'invalid'. * Deallocate the fp state if it exists. */ - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); ifps = pcb->ifps; pcb->ifps = 0; @@ -799,8 +843,8 @@ fpu_set_fxstate( * Valid incoming state. Allocate the fp state if there is none. */ new_ifps = 0; - Retry: - simple_lock(&pcb->lock); +Retry: + simple_lock(&pcb->lock, LCK_GRP_NULL); ifps = pcb->ifps; if (ifps == 0) { @@ -822,7 +866,7 @@ fpu_set_fxstate( old_valid = ifps->fp_valid; -#if DEBUG || DEVELOPMENT +#if DEBUG || DEVELOPMENT if ((fresh_state == FALSE) && (old_valid == FALSE) && (thr_act != current_thread())) { panic("fpu_set_fxstate inconsistency, thread: %p not stopped", thr_act); } @@ -837,13 +881,20 @@ fpu_set_fxstate( bcopy((char *)&state->fpu_fcw, (char *)ifps, fp_state_size[FP]); switch (thread_xstate(thr_act)) { - case UNDEFINED: + case UNDEFINED_FULL: + case FP_FULL: + case AVX_FULL: + case AVX512_FULL: + panic("fpu_set_fxstate() INVALID xstate: 0x%x", thread_xstate(thr_act)); + break; + + case UNDEFINED: panic("fpu_set_fxstate() UNDEFINED xstate"); break; - case FP: + case FP: ifps->fp_save_layout = thread_is_64bit_addr(thr_act) ? FXSAVE64 : FXSAVE32; break; - case AVX: { + case AVX: { struct x86_avx_thread_state *iavx = (void *) ifps; x86_avx_state64_t *xs = (x86_avx_state64_t *) state; @@ -862,9 +913,9 @@ fpu_set_fxstate( iavx->_xh.xstate_bv = (XFEM_SSE | XFEM_X87); } break; - } + } #if !defined(RC_HIDE_XNU_J137) - case AVX512: { + case AVX512: { struct x86_avx512_thread_state *iavx = (void *) ifps; union { thread_state_t ts; @@ -880,28 +931,28 @@ fpu_set_fxstate( iavx->_xh.xcomp_bv = 0; switch (f) { - case x86_AVX512_STATE32: - bcopy_nochk(&xs.s32->fpu_k0, iavx->x_Opmask, 8 * sizeof(_STRUCT_OPMASK_REG)); - bcopy_nochk(&xs.s32->fpu_zmmh0, iavx->x_ZMM_Hi256, 8 * sizeof(_STRUCT_YMM_REG)); - bcopy_nochk(&xs.s32->fpu_ymmh0, iavx->x_YMM_Hi128, 8 * sizeof(_STRUCT_XMM_REG)); + case x86_AVX512_STATE32: + bcopy_nochk(&xs.s32->fpu_k0, iavx->x_Opmask, 8 * sizeof(_STRUCT_OPMASK_REG)); + bcopy_nochk(&xs.s32->fpu_zmmh0, iavx->x_ZMM_Hi256, 8 * sizeof(_STRUCT_YMM_REG)); + bcopy_nochk(&xs.s32->fpu_ymmh0, iavx->x_YMM_Hi128, 8 * sizeof(_STRUCT_XMM_REG)); DBG_AVX512_STATE(iavx); break; - case x86_AVX_STATE32: - bcopy_nochk(&xs.s32->fpu_ymmh0, iavx->x_YMM_Hi128, 8 * sizeof(_STRUCT_XMM_REG)); + case x86_AVX_STATE32: + bcopy_nochk(&xs.s32->fpu_ymmh0, iavx->x_YMM_Hi128, 8 * sizeof(_STRUCT_XMM_REG)); break; - case x86_AVX512_STATE64: - bcopy_nochk(&xs.s64->fpu_k0, iavx->x_Opmask, 8 * sizeof(_STRUCT_OPMASK_REG)); - bcopy_nochk(&xs.s64->fpu_zmm16, iavx->x_Hi16_ZMM, 16 * sizeof(_STRUCT_ZMM_REG)); + case x86_AVX512_STATE64: + bcopy_nochk(&xs.s64->fpu_k0, iavx->x_Opmask, 8 * sizeof(_STRUCT_OPMASK_REG)); + bcopy_nochk(&xs.s64->fpu_zmm16, iavx->x_Hi16_ZMM, 16 * sizeof(_STRUCT_ZMM_REG)); bcopy_nochk(&xs.s64->fpu_zmmh0, iavx->x_ZMM_Hi256, 16 * sizeof(_STRUCT_YMM_REG)); bcopy_nochk(&xs.s64->fpu_ymmh0, iavx->x_YMM_Hi128, 16 * sizeof(_STRUCT_XMM_REG)); DBG_AVX512_STATE(iavx); break; - case x86_AVX_STATE64: + case x86_AVX_STATE64: bcopy_nochk(&xs.s64->fpu_ymmh0, iavx->x_YMM_Hi128, 16 * sizeof(_STRUCT_XMM_REG)); break; } break; - } + } #endif } @@ -911,16 +962,18 @@ fpu_set_fxstate( boolean_t istate = ml_set_interrupts_enabled(FALSE); ifps->fp_valid = TRUE; /* If altering the current thread's state, disable FPU */ - if (thr_act == current_thread()) + if (thr_act == current_thread()) { set_ts(); + } ml_set_interrupts_enabled(istate); } simple_unlock(&pcb->lock); - if (new_ifps != 0) + if (new_ifps != 0) { fp_state_free(new_ifps, thread_xstate(thr_act)); + } } return KERN_SUCCESS; } @@ -933,26 +986,29 @@ fpu_set_fxstate( */ kern_return_t fpu_get_fxstate( - thread_t thr_act, - thread_state_t tstate, + thread_t thr_act, + thread_state_t tstate, thread_flavor_t f) { - struct x86_fx_thread_state *ifps; - x86_float_state64_t *state; - kern_return_t ret = KERN_FAILURE; - pcb_t pcb; + struct x86_fx_thread_state *ifps; + x86_float_state64_t *state; + kern_return_t ret = KERN_FAILURE; + pcb_t pcb; - if (fpu_capability == UNDEFINED) + if (fpu_capability == UNDEFINED) { return KERN_FAILURE; + } if ((f == x86_AVX_STATE32 || f == x86_AVX_STATE64) && - fpu_capability < AVX) + fpu_capability < AVX) { return KERN_FAILURE; + } #if !defined(RC_HIDE_XNU_J137) if ((f == x86_AVX512_STATE32 || f == x86_AVX512_STATE64) && - thread_xstate(thr_act) != AVX512) + thread_xstate(thr_act) != AVX512) { return KERN_FAILURE; + } #endif state = (x86_float_state64_t *)tstate; @@ -960,7 +1016,7 @@ fpu_get_fxstate( assert(thr_act != THREAD_NULL); pcb = THREAD_TO_PCB(thr_act); - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); ifps = pcb->ifps; if (ifps == 0) { @@ -980,7 +1036,7 @@ fpu_get_fxstate( * If the live fpu state belongs to our target */ if (thr_act == current_thread()) { - boolean_t intr; + boolean_t intr; intr = ml_set_interrupts_enabled(FALSE); @@ -991,14 +1047,21 @@ fpu_get_fxstate( (void)ml_set_interrupts_enabled(intr); } if (ifps->fp_valid) { - bcopy((char *)ifps, (char *)&state->fpu_fcw, fp_state_size[FP]); + bcopy((char *)ifps, (char *)&state->fpu_fcw, fp_state_size[FP]); switch (thread_xstate(thr_act)) { - case UNDEFINED: + case UNDEFINED_FULL: + case FP_FULL: + case AVX_FULL: + case AVX512_FULL: + panic("fpu_get_fxstate() INVALID xstate: 0x%x", thread_xstate(thr_act)); + break; + + case UNDEFINED: panic("fpu_get_fxstate() UNDEFINED xstate"); break; - case FP: - break; /* already done */ - case AVX: { + case FP: + break; /* already done */ + case AVX: { struct x86_avx_thread_state *iavx = (void *) ifps; x86_avx_state64_t *xs = (x86_avx_state64_t *) state; if (f == x86_AVX_STATE32) { @@ -1007,9 +1070,9 @@ fpu_get_fxstate( bcopy_nochk(iavx->x_YMM_Hi128, &xs->fpu_ymmh0, 16 * sizeof(_STRUCT_XMM_REG)); } break; - } + } #if !defined(RC_HIDE_XNU_J137) - case AVX512: { + case AVX512: { struct x86_avx512_thread_state *iavx = (void *) ifps; union { thread_state_t ts; @@ -1017,28 +1080,28 @@ fpu_get_fxstate( x86_avx512_state64_t *s64; } xs = { .ts = tstate }; switch (f) { - case x86_AVX512_STATE32: - bcopy_nochk(iavx->x_Opmask, &xs.s32->fpu_k0, 8 * sizeof(_STRUCT_OPMASK_REG)); + case x86_AVX512_STATE32: + bcopy_nochk(iavx->x_Opmask, &xs.s32->fpu_k0, 8 * sizeof(_STRUCT_OPMASK_REG)); bcopy_nochk(iavx->x_ZMM_Hi256, &xs.s32->fpu_zmmh0, 8 * sizeof(_STRUCT_YMM_REG)); bcopy_nochk(iavx->x_YMM_Hi128, &xs.s32->fpu_ymmh0, 8 * sizeof(_STRUCT_XMM_REG)); DBG_AVX512_STATE(iavx); break; - case x86_AVX_STATE32: + case x86_AVX_STATE32: bcopy_nochk(iavx->x_YMM_Hi128, &xs.s32->fpu_ymmh0, 8 * sizeof(_STRUCT_XMM_REG)); break; - case x86_AVX512_STATE64: - bcopy_nochk(iavx->x_Opmask, &xs.s64->fpu_k0, 8 * sizeof(_STRUCT_OPMASK_REG)); - bcopy_nochk(iavx->x_Hi16_ZMM, &xs.s64->fpu_zmm16, 16 * sizeof(_STRUCT_ZMM_REG)); + case x86_AVX512_STATE64: + bcopy_nochk(iavx->x_Opmask, &xs.s64->fpu_k0, 8 * sizeof(_STRUCT_OPMASK_REG)); + bcopy_nochk(iavx->x_Hi16_ZMM, &xs.s64->fpu_zmm16, 16 * sizeof(_STRUCT_ZMM_REG)); bcopy_nochk(iavx->x_ZMM_Hi256, &xs.s64->fpu_zmmh0, 16 * sizeof(_STRUCT_YMM_REG)); bcopy_nochk(iavx->x_YMM_Hi128, &xs.s64->fpu_ymmh0, 16 * sizeof(_STRUCT_XMM_REG)); DBG_AVX512_STATE(iavx); break; - case x86_AVX_STATE64: + case x86_AVX_STATE64: bcopy_nochk(iavx->x_YMM_Hi128, &xs.s64->fpu_ymmh0, 16 * sizeof(_STRUCT_XMM_REG)); break; } break; - } + } #endif } @@ -1060,29 +1123,31 @@ fpu_get_fxstate( void fpu_dup_fxstate( - thread_t parent, - thread_t child) + thread_t parent, + thread_t child) { struct x86_fx_thread_state *new_ifps = NULL; - boolean_t intr; - pcb_t ppcb; - xstate_t xstate = thread_xstate(parent); + boolean_t intr; + pcb_t ppcb; + xstate_t xstate = thread_xstate(parent); ppcb = THREAD_TO_PCB(parent); - if (ppcb->ifps == NULL) - return; + if (ppcb->ifps == NULL) { + return; + } - if (child->machine.ifps) - panic("fpu_dup_fxstate: child's ifps non-null"); + if (child->machine.ifps) { + panic("fpu_dup_fxstate: child's ifps non-null"); + } new_ifps = fp_state_alloc(xstate); - simple_lock(&ppcb->lock); + simple_lock(&ppcb->lock, LCK_GRP_NULL); if (ppcb->ifps != NULL) { struct x86_fx_thread_state *ifps = ppcb->ifps; - /* + /* * Make sure we`ve got the latest fp state info */ if (current_thread() == parent) { @@ -1099,8 +1164,8 @@ fpu_dup_fxstate( child->machine.ifps = new_ifps; child->machine.xstate = xstate; bcopy((char *)(ppcb->ifps), - (char *)(child->machine.ifps), - fp_state_size[xstate]); + (char *)(child->machine.ifps), + fp_state_size[xstate]); /* Mark the new fp saved state as non-live. */ /* Temporarily disabled: radar 4647827 @@ -1117,8 +1182,9 @@ fpu_dup_fxstate( } simple_unlock(&ppcb->lock); - if (new_ifps != NULL) - fp_state_free(new_ifps, xstate); + if (new_ifps != NULL) { + fp_state_free(new_ifps, xstate); + } } /* @@ -1128,7 +1194,8 @@ fpu_dup_fxstate( */ void -fpinit(void) { +fpinit(void) +{ boolean_t istate = ml_set_interrupts_enabled(FALSE); clear_ts(); fninit(); @@ -1137,18 +1204,18 @@ fpinit(void) { * non-DEBUG, as dirtying the x87 control word may slow down * xsave/xrstor and affect energy use. */ - unsigned short control, control2; + unsigned short control, control2; fnstcw(&control); control2 = control; - control &= ~(FPC_PC|FPC_RC); /* Clear precision & rounding control */ - control |= (FPC_PC_64 | /* Set precision */ - FPC_RC_RN | /* round-to-nearest */ - FPC_ZE | /* Suppress zero-divide */ - FPC_OE | /* and overflow */ - FPC_UE | /* underflow */ - FPC_IE | /* Allow NaNQs and +-INF */ - FPC_DE | /* Allow denorms as operands */ - FPC_PE); /* No trap for precision loss */ + control &= ~(FPC_PC | FPC_RC); /* Clear precision & rounding control */ + control |= (FPC_PC_64 | /* Set precision */ + FPC_RC_RN | /* round-to-nearest */ + FPC_ZE | /* Suppress zero-divide */ + FPC_OE | /* and overflow */ + FPC_UE | /* underflow */ + FPC_IE | /* Allow NaNQs and +-INF */ + FPC_DE | /* Allow denorms as operands */ + FPC_PE); /* No trap for precision loss */ assert(control == control2); fldcw(control); #endif @@ -1171,29 +1238,29 @@ uint64_t x86_isr_fp_simd_use; void fpnoextflt(void) { - boolean_t intr; - thread_t thr_act; - pcb_t pcb; + boolean_t intr; + thread_t thr_act; + pcb_t pcb; struct x86_fx_thread_state *ifps = 0; - xstate_t xstate = current_xstate(); + xstate_t xstate = current_xstate(); thr_act = current_thread(); pcb = THREAD_TO_PCB(thr_act); if (pcb->ifps == 0 && !get_interrupt_level()) { - ifps = fp_state_alloc(xstate); + ifps = fp_state_alloc(xstate); bcopy((char *)&initial_fp_state, (char *)ifps, fp_state_size[xstate]); if (!thread_is_64bit_addr(thr_act)) { ifps->fp_save_layout = fpu_YMM_capable ? XSAVE32 : FXSAVE32; - } - else + } else { ifps->fp_save_layout = fpu_YMM_capable ? XSAVE64 : FXSAVE64; + } ifps->fp_valid = TRUE; } intr = ml_set_interrupts_enabled(FALSE); - clear_ts(); /* Enable FPU use */ + clear_ts(); /* Enable FPU use */ if (__improbable(get_interrupt_level())) { /* Track number of #DNA traps at interrupt context, @@ -1209,9 +1276,9 @@ fpnoextflt(void) } fpinit(); } else { - if (pcb->ifps == 0) { - pcb->ifps = ifps; - pcb->xstate = xstate; + if (pcb->ifps == 0) { + pcb->ifps = ifps; + pcb->xstate = xstate; ifps = 0; } /* @@ -1221,8 +1288,9 @@ fpnoextflt(void) } (void)ml_set_interrupts_enabled(intr); - if (ifps) - fp_state_free(ifps, xstate); + if (ifps) { + fp_state_free(ifps, xstate); + } } /* @@ -1233,25 +1301,27 @@ fpnoextflt(void) void fpextovrflt(void) { - thread_t thr_act = current_thread(); - pcb_t pcb; + thread_t thr_act = current_thread(); + pcb_t pcb; struct x86_fx_thread_state *ifps; - boolean_t intr; - xstate_t xstate = current_xstate(); + boolean_t intr; + xstate_t xstate = current_xstate(); intr = ml_set_interrupts_enabled(FALSE); - if (get_interrupt_level()) + if (get_interrupt_level()) { panic("FPU segment overrun exception at interrupt context\n"); - if (current_task() == kernel_task) + } + if (current_task() == kernel_task) { panic("FPU segment overrun exception in kernel thread context\n"); + } /* * This is a non-recoverable error. * Invalidate the thread`s FPU state. */ pcb = THREAD_TO_PCB(thr_act); - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); ifps = pcb->ifps; pcb->ifps = 0; simple_unlock(&pcb->lock); @@ -1269,13 +1339,14 @@ fpextovrflt(void) (void)ml_set_interrupts_enabled(intr); - if (ifps) - fp_state_free(ifps, xstate); + if (ifps) { + fp_state_free(ifps, xstate); + } /* * Raise exception. */ - i386_exception(EXC_BAD_ACCESS, VM_PROT_READ|VM_PROT_EXECUTE, 0); + i386_exception(EXC_BAD_ACCESS, VM_PROT_READ | VM_PROT_EXECUTE, 0); /*NOTREACHED*/ } @@ -1288,16 +1359,18 @@ extern void fpxlog(int, uint32_t, uint32_t, uint32_t); void fpexterrflt(void) { - thread_t thr_act = current_thread(); + thread_t thr_act = current_thread(); struct x86_fx_thread_state *ifps = thr_act->machine.ifps; - boolean_t intr; + boolean_t intr; intr = ml_set_interrupts_enabled(FALSE); - if (get_interrupt_level()) + if (get_interrupt_level()) { panic("FPU error exception at interrupt context\n"); - if (current_task() == kernel_task) + } + if (current_task() == kernel_task) { panic("FPU error exception in kernel thread context\n"); + } /* * Save the FPU state and turn off the FPU. @@ -1317,8 +1390,8 @@ fpexterrflt(void) * since thread is running. */ i386_exception(EXC_ARITHMETIC, - EXC_I386_EXTERR, - ifps->fx_status); + EXC_I386_EXTERR, + ifps->fx_status); /*NOTREACHED*/ } @@ -1335,7 +1408,7 @@ fpexterrflt(void) void fp_save( - thread_t thr_act) + thread_t thr_act) { pcb_t pcb = THREAD_TO_PCB(thr_act); struct x86_fx_thread_state *ifps = pcb->ifps; @@ -1357,16 +1430,16 @@ fp_save( void fp_load( - thread_t thr_act) + thread_t thr_act) { pcb_t pcb = THREAD_TO_PCB(thr_act); struct x86_fx_thread_state *ifps = pcb->ifps; assert(ifps); -#if DEBUG +#if DEBUG if (ifps->fp_valid != FALSE && ifps->fp_valid != TRUE) { panic("fp_load() invalid fp_valid: %u, fp_save_layout: %u\n", - ifps->fp_valid, ifps->fp_save_layout); + ifps->fp_valid, ifps->fp_save_layout); } #endif @@ -1375,7 +1448,7 @@ fp_load( } else { fpu_load_registers(ifps); } - ifps->fp_valid = FALSE; /* in FPU */ + ifps->fp_valid = FALSE; /* in FPU */ } /* @@ -1386,16 +1459,18 @@ fp_load( void fpSSEexterrflt(void) { - thread_t thr_act = current_thread(); + thread_t thr_act = current_thread(); struct x86_fx_thread_state *ifps = thr_act->machine.ifps; - boolean_t intr; + boolean_t intr; intr = ml_set_interrupts_enabled(FALSE); - if (get_interrupt_level()) + if (get_interrupt_level()) { panic("SSE exception at interrupt context\n"); - if (current_task() == kernel_task) + } + if (current_task() == kernel_task) { panic("SSE exception in kernel thread context\n"); + } /* * Save the FPU state and turn off the FPU. @@ -1409,14 +1484,14 @@ fpSSEexterrflt(void) * since thread is running. */ const uint32_t mask = (ifps->fx_MXCSR >> 7) & - (FPC_IM | FPC_DM | FPC_ZM | FPC_OM | FPC_UE | FPC_PE); + (FPC_IM | FPC_DM | FPC_ZM | FPC_OM | FPC_UE | FPC_PE); const uint32_t xcpt = ~mask & (ifps->fx_MXCSR & - (FPS_IE | FPS_DE | FPS_ZE | FPS_OE | FPS_UE | FPS_PE)); + (FPS_IE | FPS_DE | FPS_ZE | FPS_OE | FPS_UE | FPS_PE)); fpxlog(EXC_I386_SSEEXTERR, ifps->fx_MXCSR, ifps->fx_MXCSR, xcpt); i386_exception(EXC_ARITHMETIC, - EXC_I386_SSEEXTERR, - ifps->fx_MXCSR); + EXC_I386_SSEEXTERR, + ifps->fx_MXCSR); /*NOTREACHED*/ } @@ -1431,14 +1506,14 @@ fpSSEexterrflt(void) static void fpu_savearea_promote_avx512(thread_t thread) { - struct x86_avx_thread_state *ifps = NULL; - struct x86_avx512_thread_state *ifps512 = NULL; - pcb_t pcb = THREAD_TO_PCB(thread); - boolean_t do_avx512_alloc = FALSE; + struct x86_avx_thread_state *ifps = NULL; + struct x86_avx512_thread_state *ifps512 = NULL; + pcb_t pcb = THREAD_TO_PCB(thread); + boolean_t do_avx512_alloc = FALSE; DBG("fpu_upgrade_savearea(%p)\n", thread); - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); ifps = pcb->ifps; if (ifps == NULL) { @@ -1462,9 +1537,9 @@ fpu_savearea_promote_avx512(thread_t thread) ifps512 = fp_state_alloc(AVX512); } - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); if (thread == current_thread()) { - boolean_t intr; + boolean_t intr; intr = ml_set_interrupts_enabled(FALSE); @@ -1506,12 +1581,14 @@ fpu_savearea_promote_avx512(thread_t thread) boolean_t fpu_thread_promote_avx512(thread_t thread) { - task_t task = current_task(); + task_t task = current_task(); - if (thread != current_thread()) + if (thread != current_thread()) { return FALSE; - if (!ml_fpu_avx512_enabled()) + } + if (!ml_fpu_avx512_enabled()) { return FALSE; + } fpu_savearea_promote_avx512(thread); @@ -1534,9 +1611,9 @@ fpu_thread_promote_avx512(thread_t thread) void fpUDflt(user_addr_t rip) { - uint8_t instruction_prefix; - boolean_t is_AVX512_instruction = FALSE; - user_addr_t original_rip = rip; + uint8_t instruction_prefix; + boolean_t is_AVX512_instruction = FALSE; + user_addr_t original_rip = rip; do { /* TODO: as an optimisation, copy up to the lesser of the * next page boundary or maximal prefix length in one pass @@ -1546,31 +1623,31 @@ fpUDflt(user_addr_t rip) return; } DBG("fpUDflt(0x%016llx) prefix: 0x%x\n", - rip, instruction_prefix); + rip, instruction_prefix); /* TODO: determine more specifically which prefixes * are sane possibilities for AVX512 insns */ switch (instruction_prefix) { - case 0x2E: /* CS segment override */ - case 0x36: /* SS segment override */ - case 0x3E: /* DS segment override */ - case 0x26: /* ES segment override */ - case 0x64: /* FS segment override */ - case 0x65: /* GS segment override */ - case 0x66: /* Operand-size override */ - case 0x67: /* address-size override */ + case 0x2E: /* CS segment override */ + case 0x36: /* SS segment override */ + case 0x3E: /* DS segment override */ + case 0x26: /* ES segment override */ + case 0x64: /* FS segment override */ + case 0x65: /* GS segment override */ + case 0x66: /* Operand-size override */ + case 0x67: /* address-size override */ /* Skip optional prefixes */ rip++; if ((rip - original_rip) > MAX_X86_INSN_LENGTH) { return; } break; - case 0x62: /* EVEX */ - case 0xC5: /* VEX 2-byte */ - case 0xC4: /* VEX 3-byte */ + case 0x62: /* EVEX */ + case 0xC5: /* VEX 2-byte */ + case 0xC4: /* VEX 3-byte */ is_AVX512_instruction = TRUE; break; - default: + default: return; } } while (!is_AVX512_instruction); @@ -1580,8 +1657,9 @@ fpUDflt(user_addr_t rip) /* * Fail if this machine doesn't support AVX512 */ - if (fpu_capability != AVX512) + if (fpu_capability != AVX512) { return; + } assert(xgetbv(XCR0) == AVX_XMASK); @@ -1594,50 +1672,55 @@ fpUDflt(user_addr_t rip) #endif /* !defined(RC_HIDE_XNU_J137) */ void -fp_setvalid(boolean_t value) { - thread_t thr_act = current_thread(); +fp_setvalid(boolean_t value) +{ + thread_t thr_act = current_thread(); struct x86_fx_thread_state *ifps = thr_act->machine.ifps; if (ifps) { - ifps->fp_valid = value; + ifps->fp_valid = value; if (value == TRUE) { boolean_t istate = ml_set_interrupts_enabled(FALSE); - clear_fpu(); + clear_fpu(); ml_set_interrupts_enabled(istate); } } } boolean_t -ml_fpu_avx_enabled(void) { - return (fpu_capability >= AVX); +ml_fpu_avx_enabled(void) +{ + return fpu_capability >= AVX; } #if !defined(RC_HIDE_XNU_J137) boolean_t -ml_fpu_avx512_enabled(void) { - return (fpu_capability == AVX512); +ml_fpu_avx512_enabled(void) +{ + return fpu_capability == AVX512; } #endif static xstate_t task_xstate(task_t task) { - if (task == TASK_NULL) + if (task == TASK_NULL) { return fpu_default; - else + } else { return task->xstate; + } } static xstate_t thread_xstate(thread_t thread) { xstate_t xs = THREAD_TO_PCB(thread)->xstate; - if (xs == UNDEFINED) + if (xs == UNDEFINED) { return task_xstate(thread->task); - else + } else { return xs; + } } xstate_t @@ -1666,7 +1749,9 @@ fpu_switch_addrmode(thread_t thread, boolean_t is_64bit) mp_enable_preemption(); } -static inline uint32_t fpsimd_pop(uintptr_t ins, int sz) { +static inline uint32_t +fpsimd_pop(uintptr_t ins, int sz) +{ uint32_t rv = 0; @@ -1697,9 +1782,12 @@ static inline uint32_t fpsimd_pop(uintptr_t ins, int sz) { return rv; } -uint32_t thread_fpsimd_hash(thread_t ft) { - if (fpsimd_fault_popc == 0) +uint32_t +thread_fpsimd_hash(thread_t ft) +{ + if (fpsimd_fault_popc == 0) { return 0; + } uint32_t prv = 0; boolean_t istate = ml_set_interrupts_enabled(FALSE); diff --git a/osfmk/i386/fpu.h b/osfmk/i386/fpu.h index 0ed1dda80..7042cea10 100644 --- a/osfmk/i386/fpu.h +++ b/osfmk/i386/fpu.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,40 +22,40 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifndef _I386_FPU_H_ -#define _I386_FPU_H_ +#ifndef _I386_FPU_H_ +#define _I386_FPU_H_ /* * Macro definitions for routines to manipulate the @@ -70,70 +70,84 @@ #define AVX_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE | XFEM_YMM)) #define AVX512_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE | XFEM_YMM | XFEM_ZMM)) -typedef enum { - FXSAVE32 = 1, - FXSAVE64 = 2, - XSAVE32 = 3, - XSAVE64 = 4, - FP_UNUSED = 5 - } fp_save_layout_t; +typedef enum { + FXSAVE32 = 1, + FXSAVE64 = 2, + XSAVE32 = 3, + XSAVE64 = 4, + FP_UNUSED = 5 +} fp_save_layout_t; +#define STATE64_FULL 0x10 typedef enum { UNDEFINED, FP, AVX, - AVX512 + AVX512, + /* + * The following states are never associated with a thread or task. + * They are used for array declarations of data used during signal dispatch, + * but these values are never assigned to threads' (or tasks') xstate members. + */ + UNDEFINED_FULL = UNDEFINED | STATE64_FULL, + FP_FULL = FP | STATE64_FULL, + AVX_FULL = AVX | STATE64_FULL, + AVX512_FULL = AVX512 | STATE64_FULL, } xstate_t; -static inline uint64_t xgetbv(uint32_t c) { - uint32_t mask_hi, mask_lo; - __asm__ __volatile__("xgetbv" : "=a"(mask_lo), "=d"(mask_hi) : "c" (c)); - return ((uint64_t) mask_hi<<32) + (uint64_t) mask_lo; +static inline uint64_t +xgetbv(uint32_t c) +{ + uint32_t mask_hi, mask_lo; + __asm__ __volatile__ ("xgetbv" : "=a"(mask_lo), "=d"(mask_hi) : "c" (c)); + return ((uint64_t) mask_hi << 32) + (uint64_t) mask_lo; } -static inline void xsetbv(uint32_t mask_hi, uint32_t mask_lo) { - __asm__ __volatile__("xsetbv" :: "a"(mask_lo), "d"(mask_hi), "c" (XCR0)); +static inline void +xsetbv(uint32_t mask_hi, uint32_t mask_lo) +{ + __asm__ __volatile__ ("xsetbv" :: "a"(mask_lo), "d"(mask_hi), "c" (XCR0)); } -extern void init_fpu(void); -extern void fpu_module_init(void); -extern void fpu_free( - thread_t thr_act, - void *fps); -extern kern_return_t fpu_set_fxstate( - thread_t thr_act, - thread_state_t state, - thread_flavor_t f); -extern kern_return_t fpu_get_fxstate( - thread_t thr_act, - thread_state_t state, - thread_flavor_t f); -extern void fpu_dup_fxstate( - thread_t parent, - thread_t child); -extern void fpnoextflt(void); -extern void fpextovrflt(void); -extern void fpexterrflt(void); -extern void fpSSEexterrflt(void); -extern void fpflush(thread_t); -extern void fp_setvalid(boolean_t); +extern void init_fpu(void); +extern void fpu_module_init(void); +extern void fpu_free( + thread_t thr_act, + void *fps); +extern kern_return_t fpu_set_fxstate( + thread_t thr_act, + thread_state_t state, + thread_flavor_t f); +extern kern_return_t fpu_get_fxstate( + thread_t thr_act, + thread_state_t state, + thread_flavor_t f); +extern void fpu_dup_fxstate( + thread_t parent, + thread_t child); +extern void fpnoextflt(void); +extern void fpextovrflt(void); +extern void fpexterrflt(void); +extern void fpSSEexterrflt(void); +extern void fpflush(thread_t); +extern void fp_setvalid(boolean_t); -extern void clear_fpu(void); -extern void fpu_switch_context( - thread_t old, - thread_t new); -extern void fpu_switch_addrmode( - thread_t thread, - boolean_t is_64bit); +extern void clear_fpu(void); +extern void fpu_switch_context( + thread_t old, + thread_t new); +extern void fpu_switch_addrmode( + thread_t thread, + boolean_t is_64bit); -extern xstate_t fpu_default; -extern xstate_t fpu_capability; -extern xstate_t current_xstate(void); -extern void fpUDflt(user_addr_t rip); +extern xstate_t fpu_default; +extern xstate_t fpu_capability; +extern xstate_t current_xstate(void); +extern void fpUDflt(user_addr_t rip); #ifdef MACH_KERNEL_PRIVATE -extern uint32_t thread_fpsimd_hash(thread_t); +extern uint32_t thread_fpsimd_hash(thread_t); extern void vzeroall(void); extern void xmmzeroall(void); extern void avx512_zero(void); #endif /* MKP */ -#endif /* _I386_FPU_H_ */ +#endif /* _I386_FPU_H_ */ diff --git a/osfmk/i386/gdt.c b/osfmk/i386/gdt.c index 04937648b..c774093a1 100644 --- a/osfmk/i386/gdt.c +++ b/osfmk/i386/gdt.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,48 +63,48 @@ #include struct real_descriptor master_gdt[GDTSZ] - __attribute__((section("__HIB,__desc"))) - __attribute__((aligned(PAGE_SIZE))) = { - [SEL_TO_INDEX(KERNEL32_CS)] = MAKE_REAL_DESCRIPTOR( /* kernel 32-bit code */ +__attribute__((section("__HIB,__desc"))) +__attribute__((aligned(PAGE_SIZE))) = { + [SEL_TO_INDEX(KERNEL32_CS)] = MAKE_REAL_DESCRIPTOR( /* kernel 32-bit code */ 0, 0xfffff, - SZ_32|SZ_G, - ACC_P|ACC_PL_K|ACC_CODE_R - ), - [SEL_TO_INDEX(KERNEL_DS)] = MAKE_REAL_DESCRIPTOR( /* kernel data */ + SZ_32 | SZ_G, + ACC_P | ACC_PL_K | ACC_CODE_R + ), + [SEL_TO_INDEX(KERNEL_DS)] = MAKE_REAL_DESCRIPTOR( /* kernel data */ 0, 0xfffff, - SZ_32|SZ_G, - ACC_P|ACC_PL_K|ACC_DATA_W - ), - [SEL_TO_INDEX(KERNEL64_CS)] = MAKE_REAL_DESCRIPTOR( /* kernel 64-bit code */ + SZ_32 | SZ_G, + ACC_P | ACC_PL_K | ACC_DATA_W + ), + [SEL_TO_INDEX(KERNEL64_CS)] = MAKE_REAL_DESCRIPTOR( /* kernel 64-bit code */ 0, 0xfffff, - SZ_64|SZ_G, - ACC_P|ACC_PL_K|ACC_CODE_R - ), - [SEL_TO_INDEX(KERNEL64_SS)] = MAKE_REAL_DESCRIPTOR( /* kernel 64-bit syscall stack */ + SZ_64 | SZ_G, + ACC_P | ACC_PL_K | ACC_CODE_R + ), + [SEL_TO_INDEX(KERNEL64_SS)] = MAKE_REAL_DESCRIPTOR( /* kernel 64-bit syscall stack */ 0, 0xfffff, - SZ_32|SZ_G, - ACC_P|ACC_PL_K|ACC_DATA_W - ), - [SEL_TO_INDEX(USER_CS)] = MAKE_REAL_DESCRIPTOR( /* 32-bit user code segment */ + SZ_32 | SZ_G, + ACC_P | ACC_PL_K | ACC_DATA_W + ), + [SEL_TO_INDEX(USER_CS)] = MAKE_REAL_DESCRIPTOR( /* 32-bit user code segment */ 0, 0xfffff, - SZ_32|SZ_G, - ACC_P|ACC_PL_U|ACC_CODE_R - ), - [SEL_TO_INDEX(USER_DS)] = MAKE_REAL_DESCRIPTOR( /* 32-bit user data segment */ + SZ_32 | SZ_G, + ACC_P | ACC_PL_U | ACC_CODE_R + ), + [SEL_TO_INDEX(USER_DS)] = MAKE_REAL_DESCRIPTOR( /* 32-bit user data segment */ 0, 0xfffff, - SZ_32|SZ_G, - ACC_P|ACC_PL_U|ACC_DATA_W - ), - [SEL_TO_INDEX(USER64_CS)] = MAKE_REAL_DESCRIPTOR( /* user 64-bit code segment */ + SZ_32 | SZ_G, + ACC_P | ACC_PL_U | ACC_DATA_W + ), + [SEL_TO_INDEX(USER64_CS)] = MAKE_REAL_DESCRIPTOR( /* user 64-bit code segment */ 0, 0xfffff, - SZ_64|SZ_G, - ACC_P|ACC_PL_U|ACC_CODE_R - ), + SZ_64 | SZ_G, + ACC_P | ACC_PL_U | ACC_CODE_R + ), }; diff --git a/osfmk/i386/genassym.c b/osfmk/i386/genassym.c index f6812c6f0..f2e340ab7 100644 --- a/osfmk/i386/genassym.c +++ b/osfmk/i386/genassym.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -81,16 +81,12 @@ #include #include #include +#include #include #include #include #include -#if CONFIG_DTRACE -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> -#endif - /* * genassym.c is used to produce an * assembly file which, intermingled with unuseful assembly code, @@ -104,309 +100,310 @@ * the values, but we cannot run anything on the target machine. */ -#define DECLARE(SYM,VAL) \ +#define DECLARE(SYM, VAL) \ __asm("DEFINITION__define__" SYM ":\t .ascii \"%0\"" : : "n" ((u_int)(VAL))) -int main( - int argc, - char ** argv); +#define DECLAREULL(SYM, VAL) \ + __asm("DEFINITION__define__" SYM ":\t .ascii \"%0\"" : : "n" ((unsigned long long)(VAL))) + +int main( + int argc, + char ** argv); int main( - int argc, - char **argv) + int argc, + char **argv) { + DECLARE("AST_URGENT", AST_URGENT); + DECLARE("AST_BSD", AST_BSD); - DECLARE("AST_URGENT", AST_URGENT); - DECLARE("AST_BSD", AST_BSD); - - DECLARE("MAX_CPUS", MAX_CPUS); + DECLARE("MAX_CPUS", MAX_CPUS); /* Simple Lock structure */ - DECLARE("SLOCK_ILK", offsetof(usimple_lock_data_t, interlock)); -#if MACH_LDEBUG - DECLARE("SLOCK_TYPE", offsetof(usimple_lock_data_t, lock_type)); - DECLARE("SLOCK_PC", offsetof(usimple_lock_data_t, debug.lock_pc)); - DECLARE("SLOCK_THREAD", offsetof(usimple_lock_data_t, debug.lock_thread)); - DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_data_t, debug.duration[0])); - DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_data_t, debug.duration[1])); - DECLARE("USLOCK_TAG", USLOCK_TAG); -#endif /* MACH_LDEBUG */ + DECLARE("SLOCK_ILK", offsetof(usimple_lock_data_t, interlock)); +#if MACH_LDEBUG + DECLARE("SLOCK_TYPE", offsetof(usimple_lock_data_t, lock_type)); + DECLARE("SLOCK_PC", offsetof(usimple_lock_data_t, debug.lock_pc)); + DECLARE("SLOCK_THREAD", offsetof(usimple_lock_data_t, debug.lock_thread)); + DECLARE("SLOCK_DURATIONH", offsetof(usimple_lock_data_t, debug.duration[0])); + DECLARE("SLOCK_DURATIONL", offsetof(usimple_lock_data_t, debug.duration[1])); + DECLARE("USLOCK_TAG", USLOCK_TAG); +#endif /* MACH_LDEBUG */ /* Mutex structure */ DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t, lck_mtx_owner)); - DECLARE("MUTEX_PTR", offsetof(lck_mtx_t, lck_mtx_ptr)); + DECLARE("MUTEX_PTR", offsetof(lck_mtx_t, lck_mtx_ptr)); DECLARE("MUTEX_STATE", offsetof(lck_mtx_t, lck_mtx_state)); - DECLARE("MUTEX_IND", LCK_MTX_TAG_INDIRECT); - DECLARE("MUTEX_ASSERT_OWNED", LCK_MTX_ASSERT_OWNED); - DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED); - DECLARE("GRP_MTX_STAT_UTIL", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt)); - DECLARE("GRP_MTX_STAT_MISS", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt)); - DECLARE("GRP_MTX_STAT_WAIT", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt)); - + DECLARE("MUTEX_IND", LCK_MTX_TAG_INDIRECT); + DECLARE("MUTEX_ASSERT_OWNED", LCK_MTX_ASSERT_OWNED); + DECLARE("MUTEX_ASSERT_NOTOWNED", LCK_MTX_ASSERT_NOTOWNED); + /* x86 only */ DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED); /* Per-mutex statistic element */ - DECLARE("MTX_ACQ_TSC", offsetof(lck_mtx_ext_t, lck_mtx_stat)); + DECLARE("MTX_ACQ_TSC", offsetof(lck_mtx_ext_t, lck_mtx_stat)); - /* Mutex group statistics elements */ - DECLARE("MUTEX_GRP", offsetof(lck_mtx_ext_t, lck_mtx_grp)); - - /* - * The use of this field is somewhat at variance with the alias. - */ - DECLARE("GRP_MTX_STAT_DIRECT_WAIT", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt)); - - DECLARE("GRP_MTX_STAT_HELD_MAX", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max)); /* Reader writer lock types */ - DECLARE("RW_SHARED", LCK_RW_TYPE_SHARED); - DECLARE("RW_EXCL", LCK_RW_TYPE_EXCLUSIVE); + DECLARE("RW_SHARED", LCK_RW_TYPE_SHARED); + DECLARE("RW_EXCL", LCK_RW_TYPE_EXCLUSIVE); - DECLARE("TH_RECOVER", offsetof(struct thread, recover)); - DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation)); - DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack)); - DECLARE("TH_MUTEX_COUNT", offsetof(struct thread, mutex_count)); + DECLARE("TH_RECOVER", offsetof(struct thread, recover)); + DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation)); + DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack)); + DECLARE("TH_MUTEX_COUNT", offsetof(struct thread, mutex_count)); DECLARE("TH_WAS_PROMOTED_ON_WAKEUP", offsetof(struct thread, was_promoted_on_wakeup)); - DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); + DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override)); + + DECLARE("TH_SYSCALLS_MACH", offsetof(struct thread, syscalls_mach)); + DECLARE("TH_SYSCALLS_UNIX", offsetof(struct thread, syscalls_unix)); - DECLARE("TH_SYSCALLS_MACH", offsetof(struct thread, syscalls_mach)); - DECLARE("TH_SYSCALLS_UNIX", offsetof(struct thread, syscalls_unix)); + DECLARE("TH_CTH_SELF", offsetof(struct thread, machine.cthread_self)); - DECLARE("TASK_VTIMERS", offsetof(struct task, vtimers)); + DECLARE("TASK_VTIMERS", offsetof(struct task, vtimers)); /* These fields are being added on demand */ - DECLARE("TH_TASK", offsetof(struct thread, task)); - DECLARE("TH_AST", offsetof(struct thread, ast)); - DECLARE("TH_MAP", offsetof(struct thread, map)); - DECLARE("TH_SPF", offsetof(struct thread, machine.specFlags)); - DECLARE("TH_PCB_ISS", offsetof(struct thread, machine.iss)); - DECLARE("TH_PCB_IDS", offsetof(struct thread, machine.ids)); - DECLARE("TH_PCB_FPS", offsetof(struct thread, machine.ifps)); + DECLARE("TH_TASK", offsetof(struct thread, task)); + DECLARE("TH_AST", offsetof(struct thread, ast)); + DECLARE("TH_MAP", offsetof(struct thread, map)); + DECLARE("TH_SPF", offsetof(struct thread, machine.specFlags)); + DECLARE("TH_PCB_ISS", offsetof(struct thread, machine.iss)); + DECLARE("TH_PCB_IDS", offsetof(struct thread, machine.ids)); + DECLARE("TH_PCB_FPS", offsetof(struct thread, machine.ifps)); #if NCOPY_WINDOWS > 0 DECLARE("TH_COPYIO_STATE", offsetof(struct thread, machine.copyio_state)); DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN); #endif - DECLARE("TH_RWLOCK_COUNT", offsetof(struct thread, rwlock_count)); + DECLARE("TH_RWLOCK_COUNT", offsetof(struct thread, rwlock_count)); - DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); + DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap)); -#define IEL_SIZE (sizeof(struct i386_exception_link *)) - DECLARE("IKS_SIZE", sizeof(struct thread_kernel_state)); +#define IEL_SIZE (sizeof(struct i386_exception_link *)) + DECLARE("IKS_SIZE", sizeof(struct thread_kernel_state)); /* * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack) */ - DECLARE("KSS_RBX", offsetof(struct thread_kernel_state, machine.k_rbx)); - DECLARE("KSS_RSP", offsetof(struct thread_kernel_state, machine.k_rsp)); - DECLARE("KSS_RBP", offsetof(struct thread_kernel_state, machine.k_rbp)); - DECLARE("KSS_R12", offsetof(struct thread_kernel_state, machine.k_r12)); - DECLARE("KSS_R13", offsetof(struct thread_kernel_state, machine.k_r13)); - DECLARE("KSS_R14", offsetof(struct thread_kernel_state, machine.k_r14)); - DECLARE("KSS_R15", offsetof(struct thread_kernel_state, machine.k_r15)); - DECLARE("KSS_RIP", offsetof(struct thread_kernel_state, machine.k_rip)); - - DECLARE("DS_DR0", offsetof(struct x86_debug_state32, dr0)); - DECLARE("DS_DR1", offsetof(struct x86_debug_state32, dr1)); - DECLARE("DS_DR2", offsetof(struct x86_debug_state32, dr2)); - DECLARE("DS_DR3", offsetof(struct x86_debug_state32, dr3)); - DECLARE("DS_DR4", offsetof(struct x86_debug_state32, dr4)); - DECLARE("DS_DR5", offsetof(struct x86_debug_state32, dr5)); - DECLARE("DS_DR6", offsetof(struct x86_debug_state32, dr6)); - DECLARE("DS_DR7", offsetof(struct x86_debug_state32, dr7)); - - DECLARE("DS64_DR0", offsetof(struct x86_debug_state64, dr0)); - DECLARE("DS64_DR1", offsetof(struct x86_debug_state64, dr1)); - DECLARE("DS64_DR2", offsetof(struct x86_debug_state64, dr2)); - DECLARE("DS64_DR3", offsetof(struct x86_debug_state64, dr3)); - DECLARE("DS64_DR4", offsetof(struct x86_debug_state64, dr4)); - DECLARE("DS64_DR5", offsetof(struct x86_debug_state64, dr5)); - DECLARE("DS64_DR6", offsetof(struct x86_debug_state64, dr6)); - DECLARE("DS64_DR7", offsetof(struct x86_debug_state64, dr7)); - - DECLARE("FP_VALID", offsetof(struct x86_fx_thread_state,fp_valid)); - - DECLARE("SS_FLAVOR", offsetof(x86_saved_state_t, flavor)); - DECLARE("SS_32", x86_SAVED_STATE32); - DECLARE("SS_64", x86_SAVED_STATE64); + DECLARE("KSS_RBX", offsetof(struct thread_kernel_state, machine.k_rbx)); + DECLARE("KSS_RSP", offsetof(struct thread_kernel_state, machine.k_rsp)); + DECLARE("KSS_RBP", offsetof(struct thread_kernel_state, machine.k_rbp)); + DECLARE("KSS_R12", offsetof(struct thread_kernel_state, machine.k_r12)); + DECLARE("KSS_R13", offsetof(struct thread_kernel_state, machine.k_r13)); + DECLARE("KSS_R14", offsetof(struct thread_kernel_state, machine.k_r14)); + DECLARE("KSS_R15", offsetof(struct thread_kernel_state, machine.k_r15)); + DECLARE("KSS_RIP", offsetof(struct thread_kernel_state, machine.k_rip)); + + DECLARE("DS_DR0", offsetof(struct x86_debug_state32, dr0)); + DECLARE("DS_DR1", offsetof(struct x86_debug_state32, dr1)); + DECLARE("DS_DR2", offsetof(struct x86_debug_state32, dr2)); + DECLARE("DS_DR3", offsetof(struct x86_debug_state32, dr3)); + DECLARE("DS_DR4", offsetof(struct x86_debug_state32, dr4)); + DECLARE("DS_DR5", offsetof(struct x86_debug_state32, dr5)); + DECLARE("DS_DR6", offsetof(struct x86_debug_state32, dr6)); + DECLARE("DS_DR7", offsetof(struct x86_debug_state32, dr7)); + + DECLARE("DS64_DR0", offsetof(struct x86_debug_state64, dr0)); + DECLARE("DS64_DR1", offsetof(struct x86_debug_state64, dr1)); + DECLARE("DS64_DR2", offsetof(struct x86_debug_state64, dr2)); + DECLARE("DS64_DR3", offsetof(struct x86_debug_state64, dr3)); + DECLARE("DS64_DR4", offsetof(struct x86_debug_state64, dr4)); + DECLARE("DS64_DR5", offsetof(struct x86_debug_state64, dr5)); + DECLARE("DS64_DR6", offsetof(struct x86_debug_state64, dr6)); + DECLARE("DS64_DR7", offsetof(struct x86_debug_state64, dr7)); + + DECLARE("FP_VALID", offsetof(struct x86_fx_thread_state, fp_valid)); + + DECLARE("SS_FLAVOR", offsetof(x86_saved_state_t, flavor)); + DECLARE("SS_32", x86_SAVED_STATE32); + DECLARE("SS_64", x86_SAVED_STATE64); #define R_(x) offsetof(x86_saved_state_t, ss_32.x) - DECLARE("R32_CS", R_(cs)); - DECLARE("R32_SS", R_(ss)); - DECLARE("R32_DS", R_(ds)); - DECLARE("R32_ES", R_(es)); - DECLARE("R32_FS", R_(fs)); - DECLARE("R32_GS", R_(gs)); - DECLARE("R32_UESP", R_(uesp)); - DECLARE("R32_EBP", R_(ebp)); - DECLARE("R32_EAX", R_(eax)); - DECLARE("R32_EBX", R_(ebx)); - DECLARE("R32_ECX", R_(ecx)); - DECLARE("R32_EDX", R_(edx)); - DECLARE("R32_ESI", R_(esi)); - DECLARE("R32_EDI", R_(edi)); - DECLARE("R32_TRAPNO", R_(trapno)); - DECLARE("R32_ERR", R_(err)); - DECLARE("R32_EFLAGS", R_(efl)); - DECLARE("R32_EIP", R_(eip)); - DECLARE("R32_CR2", R_(cr2)); - DECLARE("ISS32_SIZE", sizeof (x86_saved_state32_t)); + DECLARE("R32_CS", R_(cs)); + DECLARE("R32_SS", R_(ss)); + DECLARE("R32_DS", R_(ds)); + DECLARE("R32_ES", R_(es)); + DECLARE("R32_FS", R_(fs)); + DECLARE("R32_GS", R_(gs)); + DECLARE("R32_UESP", R_(uesp)); + DECLARE("R32_EBP", R_(ebp)); + DECLARE("R32_EAX", R_(eax)); + DECLARE("R32_EBX", R_(ebx)); + DECLARE("R32_ECX", R_(ecx)); + DECLARE("R32_EDX", R_(edx)); + DECLARE("R32_ESI", R_(esi)); + DECLARE("R32_EDI", R_(edi)); + DECLARE("R32_TRAPNO", R_(trapno)); + DECLARE("R32_ERR", R_(err)); + DECLARE("R32_EFLAGS", R_(efl)); + DECLARE("R32_EIP", R_(eip)); + DECLARE("R32_CR2", R_(cr2)); + DECLARE("ISS32_SIZE", sizeof(x86_saved_state32_t)); #define R64_(x) offsetof(x86_saved_state_t, ss_64.x) - DECLARE("R64_FS", R64_(fs)); - DECLARE("R64_GS", R64_(gs)); - DECLARE("R64_R8", R64_(r8)); - DECLARE("R64_R9", R64_(r9)); - DECLARE("R64_R10", R64_(r10)); - DECLARE("R64_R11", R64_(r11)); - DECLARE("R64_R12", R64_(r12)); - DECLARE("R64_R13", R64_(r13)); - DECLARE("R64_R14", R64_(r14)); - DECLARE("R64_R15", R64_(r15)); - DECLARE("R64_RBP", R64_(rbp)); - DECLARE("R64_RAX", R64_(rax)); - DECLARE("R64_RBX", R64_(rbx)); - DECLARE("R64_RCX", R64_(rcx)); - DECLARE("R64_RDX", R64_(rdx)); - DECLARE("R64_RSI", R64_(rsi)); - DECLARE("R64_RDI", R64_(rdi)); - DECLARE("R64_CS", R64_(isf.cs)); - DECLARE("R64_SS", R64_(isf.ss)); - DECLARE("R64_RSP", R64_(isf.rsp)); - DECLARE("R64_TRAPNO", R64_(isf.trapno)); - DECLARE("R64_TRAPFN", R64_(isf.trapfn)); - DECLARE("R64_ERR", R64_(isf.err)); - DECLARE("R64_RFLAGS", R64_(isf.rflags)); - DECLARE("R64_RIP", R64_(isf.rip)); - DECLARE("R64_CR2", R64_(cr2)); - DECLARE("ISS64_OFFSET", R64_(isf)); - DECLARE("ISS64_SIZE", sizeof (x86_saved_state64_t)); + DECLARE("R64_DS", R64_(ds)); + DECLARE("R64_ES", R64_(es)); + DECLARE("R64_FS", R64_(fs)); + DECLARE("R64_GS", R64_(gs)); + DECLARE("R64_R8", R64_(r8)); + DECLARE("R64_R9", R64_(r9)); + DECLARE("R64_R10", R64_(r10)); + DECLARE("R64_R11", R64_(r11)); + DECLARE("R64_R12", R64_(r12)); + DECLARE("R64_R13", R64_(r13)); + DECLARE("R64_R14", R64_(r14)); + DECLARE("R64_R15", R64_(r15)); + DECLARE("R64_RBP", R64_(rbp)); + DECLARE("R64_RAX", R64_(rax)); + DECLARE("R64_RBX", R64_(rbx)); + DECLARE("R64_RCX", R64_(rcx)); + DECLARE("R64_RDX", R64_(rdx)); + DECLARE("R64_RSI", R64_(rsi)); + DECLARE("R64_RDI", R64_(rdi)); + DECLARE("R64_CS", R64_(isf.cs)); + DECLARE("R64_SS", R64_(isf.ss)); + DECLARE("R64_RSP", R64_(isf.rsp)); + DECLARE("R64_TRAPNO", R64_(isf.trapno)); + DECLARE("R64_TRAPFN", R64_(isf.trapfn)); + DECLARE("R64_ERR", R64_(isf.err)); + DECLARE("R64_RFLAGS", R64_(isf.rflags)); + DECLARE("R64_RIP", R64_(isf.rip)); + DECLARE("R64_CR2", R64_(cr2)); + DECLARE("ISS64_OFFSET", R64_(isf)); + DECLARE("ISS64_SIZE", sizeof(x86_saved_state64_t)); #define ISF64_(x) offsetof(x86_64_intr_stack_frame_t, x) - DECLARE("ISF64_TRAPNO", ISF64_(trapno)); - DECLARE("ISF64_TRAPFN", ISF64_(trapfn)); - DECLARE("ISF64_ERR", ISF64_(err)); - DECLARE("ISF64_RIP", ISF64_(rip)); - DECLARE("ISF64_CS", ISF64_(cs)); - DECLARE("ISF64_RFLAGS", ISF64_(rflags)); - DECLARE("ISF64_RSP", ISF64_(rsp)); - DECLARE("ISF64_SS", ISF64_(ss)); - DECLARE("ISF64_SIZE", sizeof(x86_64_intr_stack_frame_t)); - - DECLARE("NBPG", I386_PGBYTES); - DECLARE("PAGE_SIZE", I386_PGBYTES); - DECLARE("PAGE_MASK", I386_PGBYTES-1); - DECLARE("PAGE_SHIFT", 12); - DECLARE("NKPT", NKPT); - DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); - DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); - DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); - DECLARE("LINEAR_KERNELBASE", LINEAR_KERNEL_ADDRESS); - DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); - - DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS", _COMM_PAGE32_BASE_ADDRESS); - DECLARE("ASM_COMM_PAGE32_START_ADDRESS", _COMM_PAGE32_START_ADDRESS); - DECLARE("ASM_COMM_PAGE_SCHED_GEN", _COMM_PAGE_SCHED_GEN); + DECLARE("ISF64_TRAPNO", ISF64_(trapno)); + DECLARE("ISF64_TRAPFN", ISF64_(trapfn)); + DECLARE("ISF64_ERR", ISF64_(err)); + DECLARE("ISF64_RIP", ISF64_(rip)); + DECLARE("ISF64_CS", ISF64_(cs)); + DECLARE("ISF64_RFLAGS", ISF64_(rflags)); + DECLARE("ISF64_RSP", ISF64_(rsp)); + DECLARE("ISF64_SS", ISF64_(ss)); + DECLARE("ISF64_SIZE", sizeof(x86_64_intr_stack_frame_t)); + + DECLARE("NBPG", I386_PGBYTES); + DECLARE("PAGE_SIZE", I386_PGBYTES); + DECLARE("PAGE_MASK", I386_PGBYTES - 1); + DECLARE("PAGE_SHIFT", 12); + DECLARE("NKPT", NKPT); + DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS); + DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS); + DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS); + DECLARE("LINEAR_KERNELBASE", LINEAR_KERNEL_ADDRESS); + DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE); + + DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS", _COMM_PAGE32_BASE_ADDRESS); + DECLARE("ASM_COMM_PAGE32_START_ADDRESS", _COMM_PAGE32_START_ADDRESS); + DECLARE("ASM_COMM_PAGE_SCHED_GEN", _COMM_PAGE_SCHED_GEN); DECLARE("KERNEL_PML4_INDEX", KERNEL_PML4_INDEX); - DECLARE("IDTSZ", IDTSZ); - DECLARE("GDTSZ", GDTSZ); - - DECLARE("KERNEL_DS", KERNEL_DS); - DECLARE("USER_CS", USER_CS); - DECLARE("USER_DS", USER_DS); - DECLARE("USER_CTHREAD", USER_CTHREAD); - DECLARE("KERNEL32_CS", KERNEL32_CS); - DECLARE("KERNEL64_CS", KERNEL64_CS); - DECLARE("USER64_CS", USER64_CS); - DECLARE("KERNEL_TSS", KERNEL_TSS); - DECLARE("KERNEL_LDT", KERNEL_LDT); - DECLARE("SYSENTER_CS", SYSENTER_CS); - DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS); - DECLARE("SYSENTER_DS", SYSENTER_DS); - DECLARE("SYSCALL_CS", SYSCALL_CS); - - DECLARE("CPU_THIS", - offsetof(cpu_data_t, cpu_this)); - DECLARE("CPU_ACTIVE_THREAD", - offsetof(cpu_data_t, cpu_active_thread)); - DECLARE("CPU_ACTIVE_STACK", - offsetof(cpu_data_t, cpu_active_stack)); - DECLARE("CPU_KERNEL_STACK", - offsetof(cpu_data_t, cpu_kernel_stack)); - DECLARE("CPU_INT_STACK_TOP", - offsetof(cpu_data_t, cpu_int_stack_top)); - DECLARE("CPU_PREEMPTION_LEVEL", - offsetof(cpu_data_t, cpu_preemption_level)); - DECLARE("CPU_HIBERNATE", - offsetof(cpu_data_t, cpu_hibernate)); - DECLARE("CPU_INTERRUPT_LEVEL", - offsetof(cpu_data_t, cpu_interrupt_level)); + DECLAREULL("KERNEL_BASE", KERNEL_BASE); + DECLARE("IDTSZ", IDTSZ); + DECLARE("GDTSZ", GDTSZ); + + DECLARE("KERNEL_DS", KERNEL_DS); + DECLARE("USER_CS", USER_CS); + DECLARE("USER_DS", USER_DS); + DECLARE("USER_CTHREAD", USER_CTHREAD); + DECLARE("KERNEL32_CS", KERNEL32_CS); + DECLARE("KERNEL64_CS", KERNEL64_CS); + DECLARE("USER64_CS", USER64_CS); + DECLARE("KERNEL_TSS", KERNEL_TSS); + DECLARE("KERNEL_LDT", KERNEL_LDT); + DECLARE("SYSENTER_CS", SYSENTER_CS); + DECLARE("SYSENTER_TF_CS", SYSENTER_TF_CS); + DECLARE("SYSENTER_DS", SYSENTER_DS); + DECLARE("SYSCALL_CS", SYSCALL_CS); + + DECLARE("CPU_THIS", + offsetof(cpu_data_t, cpu_this)); + DECLARE("CPU_ACTIVE_THREAD", + offsetof(cpu_data_t, cpu_active_thread)); + DECLARE("CPU_ACTIVE_STACK", + offsetof(cpu_data_t, cpu_active_stack)); + DECLARE("CPU_KERNEL_STACK", + offsetof(cpu_data_t, cpu_kernel_stack)); + DECLARE("CPU_INT_STACK_TOP", + offsetof(cpu_data_t, cpu_int_stack_top)); + DECLARE("CPU_PREEMPTION_LEVEL", + offsetof(cpu_data_t, cpu_preemption_level)); + DECLARE("CPU_HIBERNATE", + offsetof(cpu_data_t, cpu_hibernate)); + DECLARE("CPU_INTERRUPT_LEVEL", + offsetof(cpu_data_t, cpu_interrupt_level)); + DECLARE("CPU_NEED_SEGCHK", + offsetof(cpu_data_t, cpu_curthread_do_segchk)); DECLARE("CPU_NESTED_ISTACK", offsetof(cpu_data_t, cpu_nested_istack)); - DECLARE("CPU_NUMBER_GS", - offsetof(cpu_data_t,cpu_number)); - DECLARE("CPU_RUNNING", - offsetof(cpu_data_t,cpu_running)); + DECLARE("CPU_NUMBER_GS", + offsetof(cpu_data_t, cpu_number)); + DECLARE("CPU_RUNNING", + offsetof(cpu_data_t, cpu_running)); DECLARE("CPU_PENDING_AST", - offsetof(cpu_data_t,cpu_pending_ast)); + offsetof(cpu_data_t, cpu_pending_ast)); DECLARE("CPU_PROCESSOR", - offsetof(cpu_data_t,cpu_processor)); - DECLARE("CPU_INT_STATE", - offsetof(cpu_data_t, cpu_int_state)); - DECLARE("CPU_INT_EVENT_TIME", - offsetof(cpu_data_t, cpu_int_event_time)); - - DECLARE("CPU_TASK_CR3", - offsetof(cpu_data_t, cpu_task_cr3)); - DECLARE("CPU_ACTIVE_CR3", - offsetof(cpu_data_t, cpu_active_cr3)); - DECLARE("CPU_KERNEL_CR3", - offsetof(cpu_data_t, cpu_kernel_cr3)); + offsetof(cpu_data_t, cpu_processor)); + DECLARE("CPU_INT_STATE", + offsetof(cpu_data_t, cpu_int_state)); + DECLARE("CPU_INT_EVENT_TIME", + offsetof(cpu_data_t, cpu_int_event_time)); + + DECLARE("CPU_TASK_CR3", + offsetof(cpu_data_t, cpu_task_cr3)); + DECLARE("CPU_SHADOWTASK_CR3", + offsetof(cpu_data_t, cpu_shadowtask_cr3)); + DECLARE("CPU_ACTIVE_CR3", + offsetof(cpu_data_t, cpu_active_cr3)); + DECLARE("CPU_KERNEL_CR3", + offsetof(cpu_data_t, cpu_kernel_cr3)); DECLARE("CPU_UCR3", - offsetof(cpu_data_t, cpu_ucr3)); -#if DEBUG + offsetof(cpu_data_t, cpu_ucr3)); + DECLARE("CPU_IP_DESC", + offsetof(cpu_data_t, cpu_ip_desc)); +#if DEBUG DECLARE("CPU_ENTRY_CR3", - offsetof(cpu_data_t, cpu_entry_cr3)); + offsetof(cpu_data_t, cpu_entry_cr3)); DECLARE("CPU_EXIT_CR3", - offsetof(cpu_data_t, cpu_exit_cr3)); + offsetof(cpu_data_t, cpu_exit_cr3)); #endif DECLARE("CPU_TLB_INVALID", - offsetof(cpu_data_t, cpu_tlb_invalid)); + offsetof(cpu_data_t, cpu_tlb_invalid)); DECLARE("CPU_PAGEZERO_MAPPED", - offsetof(cpu_data_t, cpu_pagezero_mapped)); + offsetof(cpu_data_t, cpu_pagezero_mapped)); + DECLARE("CPU_CURTASK_HAS_LDT", + offsetof(cpu_data_t, cpu_curtask_has_ldt)); DECLARE("CPU_TASK_MAP", - offsetof(cpu_data_t, cpu_task_map)); - DECLARE("TASK_MAP_32BIT", TASK_MAP_32BIT); - DECLARE("TASK_MAP_64BIT", TASK_MAP_64BIT); - DECLARE("CPU_UBER_USER_GS_BASE", - offsetof(cpu_data_t, cpu_uber.cu_user_gs_base)); + offsetof(cpu_data_t, cpu_task_map)); + DECLARE("TASK_MAP_32BIT", TASK_MAP_32BIT); + DECLARE("TASK_MAP_64BIT", TASK_MAP_64BIT); DECLARE("CPU_UBER_ISF", - offsetof(cpu_data_t, cpu_uber.cu_isf)); + offsetof(cpu_data_t, cpu_uber.cu_isf)); DECLARE("CPU_UBER_TMP", - offsetof(cpu_data_t, cpu_uber.cu_tmp)); + offsetof(cpu_data_t, cpu_uber.cu_tmp)); DECLARE("CPU_NANOTIME", - offsetof(cpu_data_t, cpu_nanotime)); + offsetof(cpu_data_t, cpu_nanotime)); DECLARE("CPU_DR7", - offsetof(cpu_data_t, cpu_dr7)); + offsetof(cpu_data_t, cpu_dr7)); - DECLARE("hwIntCnt", offsetof(cpu_data_t,cpu_hwIntCnt)); + DECLARE("hwIntCnt", offsetof(cpu_data_t, cpu_hwIntCnt)); DECLARE("CPU_ACTIVE_PCID", - offsetof(cpu_data_t, cpu_active_pcid)); + offsetof(cpu_data_t, cpu_active_pcid)); DECLARE("CPU_KERNEL_PCID", - offsetof(cpu_data_t, cpu_kernel_pcid)); + offsetof(cpu_data_t, cpu_kernel_pcid)); DECLARE("CPU_PCID_COHERENTP", - offsetof(cpu_data_t, cpu_pmap_pcid_coherentp)); + offsetof(cpu_data_t, cpu_pmap_pcid_coherentp)); DECLARE("CPU_PCID_COHERENTP_KERNEL", - offsetof(cpu_data_t, cpu_pmap_pcid_coherentp_kernel)); + offsetof(cpu_data_t, cpu_pmap_pcid_coherentp_kernel)); DECLARE("CPU_PMAP_PCID_ENABLED", offsetof(cpu_data_t, cpu_pmap_pcid_enabled)); -#ifdef PCID_STATS +#ifdef PCID_STATS DECLARE("CPU_PMAP_USER_RETS", offsetof(cpu_data_t, cpu_pmap_user_rets)); DECLARE("CPU_PMAP_PCID_PRESERVES", @@ -417,108 +414,90 @@ main( DECLARE("CPU_TLB_INVALID_LOCAL", offsetof(cpu_data_t, cpu_tlb_invalid_local)); DECLARE("CPU_TLB_INVALID_GLOBAL", - offsetof(cpu_data_t, cpu_tlb_invalid_global)); + offsetof(cpu_data_t, cpu_tlb_invalid_global)); DECLARE("CPU_ESTACK", - offsetof(cpu_data_t, cd_estack)); + offsetof(cpu_data_t, cd_estack)); DECLARE("CPU_DSHADOW", - offsetof(cpu_data_t, cd_shadow)); - - DECLARE("enaExpTrace", enaExpTrace); - DECLARE("enaUsrFCall", enaUsrFCall); - DECLARE("enaUsrPhyMp", enaUsrPhyMp); - DECLARE("enaDiagSCs", enaDiagSCs); - DECLARE("enaDiagEM", enaDiagEM); - DECLARE("enaNotifyEM", enaNotifyEM); - DECLARE("dgLock", offsetof(struct diagWork, dgLock)); - DECLARE("dgFlags", offsetof(struct diagWork, dgFlags)); - DECLARE("dgMisc1", offsetof(struct diagWork, dgMisc1)); - DECLARE("dgMisc2", offsetof(struct diagWork, dgMisc2)); - DECLARE("dgMisc3", offsetof(struct diagWork, dgMisc3)); - DECLARE("dgMisc4", offsetof(struct diagWork, dgMisc4)); - DECLARE("dgMisc5", offsetof(struct diagWork, dgMisc5)); - - DECLARE("TSS_ESP0", offsetof(struct i386_tss, esp0)); - DECLARE("TSS_SS0", offsetof(struct i386_tss, ss0)); - DECLARE("TSS_LDT", offsetof(struct i386_tss, ldt)); - DECLARE("TSS_PDBR", offsetof(struct i386_tss, cr3)); - DECLARE("TSS_LINK", offsetof(struct i386_tss, back_link)); - - DECLARE("K_TASK_GATE", ACC_P|ACC_PL_K|ACC_TASK_GATE); - DECLARE("K_TRAP_GATE", ACC_P|ACC_PL_K|ACC_TRAP_GATE); - DECLARE("U_TRAP_GATE", ACC_P|ACC_PL_U|ACC_TRAP_GATE); - DECLARE("K_INTR_GATE", ACC_P|ACC_PL_K|ACC_INTR_GATE); - DECLARE("U_INTR_GATE", ACC_P|ACC_PL_U|ACC_INTR_GATE); - DECLARE("K_TSS", ACC_P|ACC_PL_K|ACC_TSS); + offsetof(cpu_data_t, cd_shadow)); + + DECLARE("enaExpTrace", enaExpTrace); + DECLARE("enaUsrFCall", enaUsrFCall); + DECLARE("enaUsrPhyMp", enaUsrPhyMp); + DECLARE("enaDiagSCs", enaDiagSCs); + DECLARE("enaDiagEM", enaDiagEM); + DECLARE("enaNotifyEM", enaNotifyEM); + DECLARE("dgLock", offsetof(struct diagWork, dgLock)); + DECLARE("dgFlags", offsetof(struct diagWork, dgFlags)); + DECLARE("dgMisc1", offsetof(struct diagWork, dgMisc1)); + DECLARE("dgMisc2", offsetof(struct diagWork, dgMisc2)); + DECLARE("dgMisc3", offsetof(struct diagWork, dgMisc3)); + DECLARE("dgMisc4", offsetof(struct diagWork, dgMisc4)); + DECLARE("dgMisc5", offsetof(struct diagWork, dgMisc5)); + + DECLARE("TSS_ESP0", offsetof(struct i386_tss, esp0)); + DECLARE("TSS_SS0", offsetof(struct i386_tss, ss0)); + DECLARE("TSS_LDT", offsetof(struct i386_tss, ldt)); + DECLARE("TSS_PDBR", offsetof(struct i386_tss, cr3)); + DECLARE("TSS_LINK", offsetof(struct i386_tss, back_link)); + + DECLARE("K_TASK_GATE", ACC_P | ACC_PL_K | ACC_TASK_GATE); + DECLARE("K_TRAP_GATE", ACC_P | ACC_PL_K | ACC_TRAP_GATE); + DECLARE("U_TRAP_GATE", ACC_P | ACC_PL_U | ACC_TRAP_GATE); + DECLARE("K_INTR_GATE", ACC_P | ACC_PL_K | ACC_INTR_GATE); + DECLARE("U_INTR_GATE", ACC_P | ACC_PL_U | ACC_INTR_GATE); + DECLARE("K_TSS", ACC_P | ACC_PL_K | ACC_TSS); /* * usimple_lock fields */ - DECLARE("USL_INTERLOCK", offsetof(usimple_lock_data_t, interlock)); + DECLARE("USL_INTERLOCK", offsetof(usimple_lock_data_t, interlock)); - DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); + DECLARE("INTSTACK_SIZE", INTSTACK_SIZE); DECLARE("KADDR", offsetof(struct boot_args, kaddr)); DECLARE("KSIZE", offsetof(struct boot_args, ksize)); DECLARE("MEMORYMAP", offsetof(struct boot_args, MemoryMap)); DECLARE("DEVICETREEP", offsetof(struct boot_args, deviceTreeP)); DECLARE("RNT_TSC_BASE", - offsetof(pal_rtc_nanotime_t, tsc_base)); + offsetof(pal_rtc_nanotime_t, tsc_base)); DECLARE("RNT_NS_BASE", - offsetof(pal_rtc_nanotime_t, ns_base)); + offsetof(pal_rtc_nanotime_t, ns_base)); DECLARE("RNT_SCALE", - offsetof(pal_rtc_nanotime_t, scale)); + offsetof(pal_rtc_nanotime_t, scale)); DECLARE("RNT_SHIFT", - offsetof(pal_rtc_nanotime_t, shift)); + offsetof(pal_rtc_nanotime_t, shift)); DECLARE("RNT_GENERATION", - offsetof(pal_rtc_nanotime_t, generation)); + offsetof(pal_rtc_nanotime_t, generation)); /* values from kern/timer.h */ #ifdef __LP64__ DECLARE("TIMER_ALL", offsetof(struct timer, all_bits)); #else - DECLARE("TIMER_LOW", offsetof(struct timer, low_bits)); - DECLARE("TIMER_HIGH", offsetof(struct timer, high_bits)); - DECLARE("TIMER_HIGHCHK", offsetof(struct timer, high_bits_check)); + DECLARE("TIMER_LOW", offsetof(struct timer, low_bits)); + DECLARE("TIMER_HIGH", offsetof(struct timer, high_bits)); + DECLARE("TIMER_HIGHCHK", offsetof(struct timer, high_bits_check)); #endif DECLARE("TIMER_TSTAMP", - offsetof(struct timer, tstamp)); + offsetof(struct timer, tstamp)); DECLARE("THREAD_TIMER", - offsetof(struct processor, processor_data.thread_timer)); + offsetof(struct processor, processor_data.thread_timer)); DECLARE("KERNEL_TIMER", - offsetof(struct processor, processor_data.kernel_timer)); + offsetof(struct processor, processor_data.kernel_timer)); DECLARE("SYSTEM_TIMER", - offsetof(struct thread, system_timer)); + offsetof(struct thread, system_timer)); DECLARE("USER_TIMER", - offsetof(struct thread, user_timer)); + offsetof(struct thread, user_timer)); DECLARE("SYSTEM_STATE", - offsetof(struct processor, processor_data.system_state)); + offsetof(struct processor, processor_data.system_state)); DECLARE("USER_STATE", - offsetof(struct processor, processor_data.user_state)); + offsetof(struct processor, processor_data.user_state)); DECLARE("IDLE_STATE", - offsetof(struct processor, processor_data.idle_state)); + offsetof(struct processor, processor_data.idle_state)); DECLARE("CURRENT_STATE", - offsetof(struct processor, processor_data.current_state)); + offsetof(struct processor, processor_data.current_state)); DECLARE("OnProc", OnProc); - -#if CONFIG_DTRACE - DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE); - DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE); - DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE); - DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE); - DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE); - DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE); - DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE); - DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE); - DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE); - DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE); - DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE); - DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE); - DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE); - DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE); -#endif - - return (0); + return 0; } diff --git a/osfmk/i386/hibernate_i386.c b/osfmk/i386/hibernate_i386.c index 21322b751..d88bb1897 100644 --- a/osfmk/i386/hibernate_i386.c +++ b/osfmk/i386/hibernate_i386.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -47,201 +47,209 @@ extern ppnum_t max_ppnum; -#define MAX_BANKS 32 +#define MAX_BANKS 32 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ hibernate_page_list_t * hibernate_page_list_allocate(boolean_t log) { - ppnum_t base, num; - vm_size_t size; - uint32_t bank, num_banks; - uint32_t pages, page_count; - hibernate_page_list_t * list; - hibernate_bitmap_t * bitmap; - - EfiMemoryRange * mptr; - uint32_t mcount, msize, i; - hibernate_bitmap_t dram_ranges[MAX_BANKS]; - boot_args * args = (boot_args *) PE_state.bootArgs; - uint32_t non_os_pagecount; - ppnum_t pnmax = max_ppnum; - - mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); - if (args->MemoryMapDescriptorSize == 0) - panic("Invalid memory map descriptor size"); - msize = args->MemoryMapDescriptorSize; - mcount = args->MemoryMapSize / msize; + ppnum_t base, num; + vm_size_t size; + uint32_t bank, num_banks; + uint32_t pages, page_count; + hibernate_page_list_t * list; + hibernate_bitmap_t * bitmap; + + EfiMemoryRange * mptr; + uint32_t mcount, msize, i; + hibernate_bitmap_t dram_ranges[MAX_BANKS]; + boot_args * args = (boot_args *) PE_state.bootArgs; + uint32_t non_os_pagecount; + ppnum_t pnmax = max_ppnum; + + mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); + if (args->MemoryMapDescriptorSize == 0) { + panic("Invalid memory map descriptor size"); + } + msize = args->MemoryMapDescriptorSize; + mcount = args->MemoryMapSize / msize; #if KASAN - /* adjust max page number to include stolen memory */ - if (atop(shadow_ptop) > pnmax) { - pnmax = (ppnum_t)atop(shadow_ptop); - } + /* adjust max page number to include stolen memory */ + if (atop(shadow_ptop) > pnmax) { + pnmax = (ppnum_t)atop(shadow_ptop); + } #endif - num_banks = 0; - non_os_pagecount = 0; - for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) - { - base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT); - num = (ppnum_t) mptr->NumberOfPages; + num_banks = 0; + non_os_pagecount = 0; + for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { + base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT); + num = (ppnum_t) mptr->NumberOfPages; #if KASAN - if (i == shadow_stolen_idx) { - /* - * Add all stolen pages to the bitmap. Later we will prune the unused - * pages. - */ - num += shadow_pages_total; - } + if (i == shadow_stolen_idx) { + /* + * Add all stolen pages to the bitmap. Later we will prune the unused + * pages. + */ + num += shadow_pages_total; + } #endif - if (base > pnmax) - continue; - if ((base + num - 1) > pnmax) - num = pnmax - base + 1; - if (!num) - continue; - - switch (mptr->Type) - { - // any kind of dram - case kEfiACPIMemoryNVS: - case kEfiPalCode: - non_os_pagecount += num; - - // OS used dram - case kEfiLoaderCode: - case kEfiLoaderData: - case kEfiBootServicesCode: - case kEfiBootServicesData: - case kEfiConventionalMemory: - - for (bank = 0; bank < num_banks; bank++) - { - if (dram_ranges[bank].first_page <= base) + if (base > pnmax) { continue; - if ((base + num) == dram_ranges[bank].first_page) - { - dram_ranges[bank].first_page = base; - num = 0; - } - break; } - if (!num) break; - - if (bank && (base == (1 + dram_ranges[bank - 1].last_page))) - bank--; - else - { - num_banks++; - if (num_banks >= MAX_BANKS) break; - bcopy(&dram_ranges[bank], - &dram_ranges[bank + 1], - (num_banks - bank - 1) * sizeof(hibernate_bitmap_t)); - dram_ranges[bank].first_page = base; + if ((base + num - 1) > pnmax) { + num = pnmax - base + 1; + } + if (!num) { + continue; } - dram_ranges[bank].last_page = base + num - 1; - break; - - // runtime services will be restarted, so no save - case kEfiRuntimeServicesCode: - case kEfiRuntimeServicesData: - // contents are volatile once the platform expert starts - case kEfiACPIReclaimMemory: - // non dram - case kEfiReservedMemoryType: - case kEfiUnusableMemory: - case kEfiMemoryMappedIO: - case kEfiMemoryMappedIOPortSpace: - default: - break; + + switch (mptr->Type) { + // any kind of dram + case kEfiACPIMemoryNVS: + case kEfiPalCode: + non_os_pagecount += num; + + // OS used dram + case kEfiLoaderCode: + case kEfiLoaderData: + case kEfiBootServicesCode: + case kEfiBootServicesData: + case kEfiConventionalMemory: + + for (bank = 0; bank < num_banks; bank++) { + if (dram_ranges[bank].first_page <= base) { + continue; + } + if ((base + num) == dram_ranges[bank].first_page) { + dram_ranges[bank].first_page = base; + num = 0; + } + break; + } + if (!num) { + break; + } + + if (bank && (base == (1 + dram_ranges[bank - 1].last_page))) { + bank--; + } else { + num_banks++; + if (num_banks >= MAX_BANKS) { + break; + } + bcopy(&dram_ranges[bank], + &dram_ranges[bank + 1], + (num_banks - bank - 1) * sizeof(hibernate_bitmap_t)); + dram_ranges[bank].first_page = base; + } + dram_ranges[bank].last_page = base + num - 1; + break; + + // runtime services will be restarted, so no save + case kEfiRuntimeServicesCode: + case kEfiRuntimeServicesData: + // contents are volatile once the platform expert starts + case kEfiACPIReclaimMemory: + // non dram + case kEfiReservedMemoryType: + case kEfiUnusableMemory: + case kEfiMemoryMappedIO: + case kEfiMemoryMappedIOPortSpace: + default: + break; + } + } + + if (num_banks >= MAX_BANKS) { + return NULL; } - } - - if (num_banks >= MAX_BANKS) - return (NULL); - - // size the hibernation bitmap - - size = sizeof(hibernate_page_list_t); - page_count = 0; - for (bank = 0; bank < num_banks; bank++) { - pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page; - page_count += pages; - size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t); - } - - list = (hibernate_page_list_t *)kalloc(size); - if (!list) - return (list); - - list->list_size = (uint32_t)size; - list->page_count = page_count; - list->bank_count = num_banks; - - // convert to hibernation bitmap. - - bitmap = &list->bank_bitmap[0]; - for (bank = 0; bank < num_banks; bank++) - { - bitmap->first_page = dram_ranges[bank].first_page; - bitmap->last_page = dram_ranges[bank].last_page; - bitmap->bitmapwords = (bitmap->last_page + 1 - - bitmap->first_page + 31) >> 5; - if (log) kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", - bank, bitmap->first_page, bitmap->last_page); - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; - } - if (log) printf("efi pagecount %d\n", non_os_pagecount); - - return (list); + + // size the hibernation bitmap + + size = sizeof(hibernate_page_list_t); + page_count = 0; + for (bank = 0; bank < num_banks; bank++) { + pages = dram_ranges[bank].last_page + 1 - dram_ranges[bank].first_page; + page_count += pages; + size += sizeof(hibernate_bitmap_t) + ((pages + 31) >> 5) * sizeof(uint32_t); + } + + list = (hibernate_page_list_t *)kalloc(size); + if (!list) { + return list; + } + + list->list_size = (uint32_t)size; + list->page_count = page_count; + list->bank_count = num_banks; + + // convert to hibernation bitmap. + + bitmap = &list->bank_bitmap[0]; + for (bank = 0; bank < num_banks; bank++) { + bitmap->first_page = dram_ranges[bank].first_page; + bitmap->last_page = dram_ranges[bank].last_page; + bitmap->bitmapwords = (bitmap->last_page + 1 + - bitmap->first_page + 31) >> 5; + if (log) { + kprintf("hib bank[%d]: 0x%x000 end 0x%xfff\n", + bank, bitmap->first_page, bitmap->last_page); + } + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; + } + if (log) { + printf("efi pagecount %d\n", non_os_pagecount); + } + + return list; } // mark pages not to be saved, but available for scratch usage during restore void hibernate_page_list_setall_machine( __unused hibernate_page_list_t * page_list, - __unused hibernate_page_list_t * page_list_wired, - __unused boolean_t preflight, - __unused uint32_t * pagesOut) + __unused hibernate_page_list_t * page_list_wired, + __unused boolean_t preflight, + __unused uint32_t * pagesOut) { } // mark pages not to be saved and not for scratch usage during restore void hibernate_page_list_set_volatile( hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - uint32_t * pagesOut) + hibernate_page_list_t * page_list_wired, + uint32_t * pagesOut) { - boot_args * args = (boot_args *) PE_state.bootArgs; + boot_args * args = (boot_args *) PE_state.bootArgs; - if (args->efiRuntimeServicesPageStart) - { - hibernate_set_page_state(page_list, page_list_wired, - args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount, + if (args->efiRuntimeServicesPageStart) { + hibernate_set_page_state(page_list, page_list_wired, + args->efiRuntimeServicesPageStart, args->efiRuntimeServicesPageCount, kIOHibernatePageStateFree); - *pagesOut -= args->efiRuntimeServicesPageCount; - } + *pagesOut -= args->efiRuntimeServicesPageCount; + } } -kern_return_t +kern_return_t hibernate_processor_setup(IOHibernateImageHeader * header) { - boot_args * args = (boot_args *) PE_state.bootArgs; + boot_args * args = (boot_args *) PE_state.bootArgs; - cpu_datap(0)->cpu_hibernate = 1; - header->processorFlags = 0; + cpu_datap(0)->cpu_hibernate = 1; + header->processorFlags = 0; - header->runtimePages = args->efiRuntimeServicesPageStart; - header->runtimePageCount = args->efiRuntimeServicesPageCount; - header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart; - header->performanceDataStart = args->performanceDataStart; - header->performanceDataSize = args->performanceDataSize; + header->runtimePages = args->efiRuntimeServicesPageStart; + header->runtimePageCount = args->efiRuntimeServicesPageCount; + header->runtimeVirtualPages = args->efiRuntimeServicesVirtualPageStart; + header->performanceDataStart = args->performanceDataStart; + header->performanceDataSize = args->performanceDataSize; - return (KERN_SUCCESS); + return KERN_SUCCESS; } static boolean_t hibernate_vm_locks_safe; @@ -249,18 +257,20 @@ static boolean_t hibernate_vm_locks_safe; void hibernate_vm_lock(void) { - if (current_cpu_datap()->cpu_hibernate) { - hibernate_vm_lock_queues(); - hibernate_vm_locks_safe = TRUE; - } + if (current_cpu_datap()->cpu_hibernate) { + hibernate_vm_lock_queues(); + hibernate_vm_locks_safe = TRUE; + } } void hibernate_vm_unlock(void) { - assert(FALSE == ml_get_interrupts_enabled()); - if (current_cpu_datap()->cpu_hibernate) hibernate_vm_unlock_queues(); - ml_set_is_quiescing(TRUE); + assert(FALSE == ml_get_interrupts_enabled()); + if (current_cpu_datap()->cpu_hibernate) { + hibernate_vm_unlock_queues(); + } + ml_set_is_quiescing(TRUE); } // ACPI calls hibernate_vm_lock(), interrupt disable, hibernate_vm_unlock() on sleep, @@ -270,14 +280,14 @@ hibernate_vm_unlock(void) void hibernate_vm_lock_end(void) { - assert(FALSE == ml_get_interrupts_enabled()); - hibernate_vm_locks_safe = FALSE; - ml_set_is_quiescing(FALSE); + assert(FALSE == ml_get_interrupts_enabled()); + hibernate_vm_locks_safe = FALSE; + ml_set_is_quiescing(FALSE); } boolean_t hibernate_vm_locks_are_safe(void) { - assert(FALSE == ml_get_interrupts_enabled()); - return (hibernate_vm_locks_safe); + assert(FALSE == ml_get_interrupts_enabled()); + return hibernate_vm_locks_safe; } diff --git a/osfmk/i386/hibernate_restore.c b/osfmk/i386/hibernate_restore.c index 2e4c69e79..f8b0bb195 100644 --- a/osfmk/i386/hibernate_restore.c +++ b/osfmk/i386/hibernate_restore.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -33,19 +33,20 @@ extern pd_entry_t BootPTD[2048]; -// src is virtually mapped, not page aligned, +// src is virtually mapped, not page aligned, // dst is a physical 4k page aligned ptr, len is one 4K page // src & dst will not overlap -uintptr_t +uintptr_t hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, uint32_t procFlags) { (void)procFlags; uint64_t * d; uint64_t * s; - if (src == 0) + if (src == 0) { return (uintptr_t)dst; + } d = (uint64_t *)pal_hib_map(DEST_COPY_AREA, dst); s = (uint64_t *) (uintptr_t)src; @@ -61,43 +62,44 @@ void hibprintf(const char *fmt, ...); uintptr_t pal_hib_map(uintptr_t virt, uint64_t phys) { - uintptr_t index; + uintptr_t index; - switch (virt) - { + switch (virt) { case DEST_COPY_AREA: case SRC_COPY_AREA: case COPY_PAGE_AREA: case BITMAP_AREA: case IMAGE_AREA: case IMAGE2_AREA: - break; + break; default: - asm("cli;hlt;"); - break; - } - if (phys < IMAGE2_AREA) - { - // first 4Gb is all mapped, - // and do not expect source areas to cross 4Gb - return (phys); - } - index = (virt >> I386_LPGSHIFT); - virt += (uintptr_t)(phys & I386_LPGMASK); - phys = ((phys & ~((uint64_t)I386_LPGMASK)) | INTEL_PTE_PS | INTEL_PTE_VALID | INTEL_PTE_WRITE); - if (phys == BootPTD[index]) return (virt); - BootPTD[index] = phys; - invlpg(virt); - BootPTD[index + 1] = (phys + I386_LPGBYTES); - invlpg(virt + I386_LPGBYTES); + asm("cli;hlt;"); + break; + } + if (phys < IMAGE2_AREA) { + // first 4Gb is all mapped, + // and do not expect source areas to cross 4Gb + return phys; + } + index = (virt >> I386_LPGSHIFT); + virt += (uintptr_t)(phys & I386_LPGMASK); + phys = ((phys & ~((uint64_t)I386_LPGMASK)) | INTEL_PTE_PS | INTEL_PTE_VALID | INTEL_PTE_WRITE); + if (phys == BootPTD[index]) { + return virt; + } + BootPTD[index] = phys; + invlpg(virt); + BootPTD[index + 1] = (phys + I386_LPGBYTES); + invlpg(virt + I386_LPGBYTES); - return (virt); + return virt; } -void hibernateRestorePALState(uint32_t *arg) +void +hibernateRestorePALState(uint32_t *arg) { - (void)arg; + (void)arg; } void diff --git a/osfmk/i386/hpet.c b/osfmk/i386/hpet.c index f8fd9832d..8b9f03b2e 100644 --- a/osfmk/i386/hpet.c +++ b/osfmk/i386/hpet.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -66,11 +66,11 @@ #define Tera (kilo * Giga) #define Peta (kilo * Tera) -vm_offset_t hpetArea = 0; -uint32_t hpetAreap = 0; +vm_offset_t hpetArea = 0; +uint32_t hpetAreap = 0; uint64_t hpetFemto = 0; uint64_t hpetFreq = 0; -uint64_t hpetCvt = 0; /* (TAKE OUT LATER) */ +uint64_t hpetCvt = 0; /* (TAKE OUT LATER) */ uint64_t hpetCvtt2n = 0; uint64_t hpetCvtn2t = 0; uint64_t tsc2hpet = 0; @@ -78,27 +78,27 @@ uint64_t hpet2tsc = 0; uint64_t bus2hpet = 0; uint64_t hpet2bus = 0; -vm_offset_t rcbaArea = 0; -uint32_t rcbaAreap = 0; +vm_offset_t rcbaArea = 0; +uint32_t rcbaAreap = 0; static int (*hpet_req)(uint32_t apicid, void *arg, hpetRequest_t *hpet) = NULL; static void *hpet_arg = NULL; #if DEBUG -#define DBG(x...) kprintf("DBG: " x) +#define DBG(x...) kprintf("DBG: " x) #else #define DBG(x...) #endif int hpet_register_callback(int (*hpet_reqst)(uint32_t apicid, - void *arg, - hpetRequest_t *hpet), - void *arg) + void *arg, + hpetRequest_t *hpet), + void *arg) { - hpet_req = hpet_reqst; - hpet_arg = arg; - return(0); + hpet_req = hpet_reqst; + hpet_arg = arg; + return 0; } /* @@ -109,78 +109,78 @@ hpet_register_callback(int (*hpet_reqst)(uint32_t apicid, int hpet_request(uint32_t cpu) { - hpetRequest_t hpetReq; - int rc; - x86_lcpu_t *lcpu; - x86_core_t *core; - x86_pkg_t *pkg; - boolean_t enabled; - - if (hpet_req == NULL) { - return(-1); - } - - /* - * Deal with the case where the CPU # passed in is past the - * value specified in cpus=n in boot-args. - */ - if (cpu >= real_ncpus) { + hpetRequest_t hpetReq; + int rc; + x86_lcpu_t *lcpu; + x86_core_t *core; + x86_pkg_t *pkg; + boolean_t enabled; + + if (hpet_req == NULL) { + return -1; + } + + /* + * Deal with the case where the CPU # passed in is past the + * value specified in cpus=n in boot-args. + */ + if (cpu >= real_ncpus) { + enabled = ml_set_interrupts_enabled(FALSE); + lcpu = cpu_to_lcpu(cpu); + if (lcpu != NULL) { + core = lcpu->core; + pkg = core->package; + + if (lcpu->primary) { + pkg->flags |= X86PKG_FL_HAS_HPET; + } + } + + ml_set_interrupts_enabled(enabled); + return 0; + } + + rc = (*hpet_req)(ml_get_apicid(cpu), hpet_arg, &hpetReq); + if (rc != 0) { + return rc; + } + enabled = ml_set_interrupts_enabled(FALSE); lcpu = cpu_to_lcpu(cpu); - if (lcpu != NULL) { - core = lcpu->core; - pkg = core->package; + core = lcpu->core; + pkg = core->package; + + /* + * Compute the address of the HPET. + */ + core->Hpet = (hpetTimer_t *)((uint8_t *)hpetArea + hpetReq.hpetOffset); + core->HpetVec = hpetReq.hpetVector; + + /* + * Enable interrupts + */ + core->Hpet->Config |= Tn_INT_ENB_CNF; + + /* + * Save the configuration + */ + core->HpetCfg = core->Hpet->Config; + core->HpetCmp = 0; - if (lcpu->primary) { + /* + * If the CPU is the "primary" for the package, then + * add the HPET to the package too. + */ + if (lcpu->primary) { + pkg->Hpet = core->Hpet; + pkg->HpetCfg = core->HpetCfg; + pkg->HpetCmp = core->HpetCmp; pkg->flags |= X86PKG_FL_HAS_HPET; - } } ml_set_interrupts_enabled(enabled); - return(0); - } - - rc = (*hpet_req)(ml_get_apicid(cpu), hpet_arg, &hpetReq); - if (rc != 0) { - return(rc); - } - - enabled = ml_set_interrupts_enabled(FALSE); - lcpu = cpu_to_lcpu(cpu); - core = lcpu->core; - pkg = core->package; - - /* - * Compute the address of the HPET. - */ - core->Hpet = (hpetTimer_t *)((uint8_t *)hpetArea + hpetReq.hpetOffset); - core->HpetVec = hpetReq.hpetVector; - - /* - * Enable interrupts - */ - core->Hpet->Config |= Tn_INT_ENB_CNF; - - /* - * Save the configuration - */ - core->HpetCfg = core->Hpet->Config; - core->HpetCmp = 0; - - /* - * If the CPU is the "primary" for the package, then - * add the HPET to the package too. - */ - if (lcpu->primary) { - pkg->Hpet = core->Hpet; - pkg->HpetCfg = core->HpetCfg; - pkg->HpetCmp = core->HpetCmp; - pkg->flags |= X86PKG_FL_HAS_HPET; - } - - ml_set_interrupts_enabled(enabled); - return(0); + return 0; } /* @@ -204,7 +204,7 @@ map_rcbaArea(void) void hpet_init(void) { - unsigned int *xmod; + unsigned int *xmod; map_rcbaArea(); @@ -212,10 +212,10 @@ hpet_init(void) * Is the HPET memory already enabled? * If not, set address and enable. */ - xmod = (uint32_t *)(rcbaArea + 0x3404); /* Point to the HPTC */ - uint32_t hptc = *xmod; /* Get HPET config */ + xmod = (uint32_t *)(rcbaArea + 0x3404); /* Point to the HPTC */ + uint32_t hptc = *xmod; /* Get HPET config */ DBG(" current RCBA.HPTC: %08X\n", *xmod); - if(!(hptc & hptcAE)) { + if (!(hptc & hptcAE)) { DBG("HPET memory is not enabled, " "enabling and assigning to 0xFED00000 (hope that's ok)\n"); *xmod = (hptc & ~3) | hptcAE; @@ -254,10 +254,10 @@ hpet_init(void) hpetCvtt2n = hpetCvtt2n / 1000000ULL; hpetCvtn2t = 0xFFFFFFFFFFFFFFFFULL / hpetCvtt2n; kprintf("HPET: Frequency = %6d.%04dMHz, " - "cvtt2n = %08X.%08X, cvtn2t = %08X.%08X\n", - (uint32_t)(hpetFreq / Mega), (uint32_t)(hpetFreq % Mega), - (uint32_t)(hpetCvtt2n >> 32), (uint32_t)hpetCvtt2n, - (uint32_t)(hpetCvtn2t >> 32), (uint32_t)hpetCvtn2t); + "cvtt2n = %08X.%08X, cvtn2t = %08X.%08X\n", + (uint32_t)(hpetFreq / Mega), (uint32_t)(hpetFreq % Mega), + (uint32_t)(hpetCvtt2n >> 32), (uint32_t)hpetCvtt2n, + (uint32_t)(hpetCvtn2t >> 32), (uint32_t)hpetCvtn2t); /* (TAKE OUT LATER) @@ -296,19 +296,19 @@ hpet_init(void) void hpet_get_info(hpetInfo_t *info) { - info->hpetCvtt2n = hpetCvtt2n; - info->hpetCvtn2t = hpetCvtn2t; - info->tsc2hpet = tsc2hpet; - info->hpet2tsc = hpet2tsc; - info->bus2hpet = bus2hpet; - info->hpet2bus = hpet2bus; - /* - * XXX - * We're repurposing the rcbaArea so we can use the HPET. - * Eventually we'll rename this correctly. - */ - info->rcbaArea = hpetArea; - info->rcbaAreap = hpetAreap; + info->hpetCvtt2n = hpetCvtt2n; + info->hpetCvtn2t = hpetCvtn2t; + info->tsc2hpet = tsc2hpet; + info->hpet2tsc = hpet2tsc; + info->bus2hpet = bus2hpet; + info->hpet2bus = hpet2bus; + /* + * XXX + * We're repurposing the rcbaArea so we can use the HPET. + * Eventually we'll rename this correctly. + */ + info->rcbaArea = hpetArea; + info->rcbaAreap = hpetAreap; } @@ -322,14 +322,14 @@ hpet_get_info(hpetInfo_t *info) void ml_hpet_cfg(uint32_t cpu, uint32_t hpetVect) { - uint64_t *hpetVaddr; - hpetTimer_t *hpet; - x86_lcpu_t *lcpu; - x86_core_t *core; - x86_pkg_t *pkg; - boolean_t enabled; - - if(cpu > 1) { + uint64_t *hpetVaddr; + hpetTimer_t *hpet; + x86_lcpu_t *lcpu; + x86_core_t *core; + x86_pkg_t *pkg; + boolean_t enabled; + + if (cpu > 1) { panic("ml_hpet_cfg: invalid cpu = %d\n", cpu); } @@ -340,8 +340,9 @@ ml_hpet_cfg(uint32_t cpu, uint32_t hpetVect) /* * Only deal with the primary CPU for the package. */ - if (!lcpu->primary) - return; + if (!lcpu->primary) { + return; + } enabled = ml_set_interrupts_enabled(FALSE); @@ -350,7 +351,7 @@ ml_hpet_cfg(uint32_t cpu, uint32_t hpetVect) hpet = (hpetTimer_t *)hpetVaddr; DBG("ml_hpet_cfg: HPET for cpu %d at %p, vector = %d\n", - cpu, hpetVaddr, hpetVect); + cpu, hpetVaddr, hpetVect); /* Save the address and vector of the HPET for this processor */ core->Hpet = hpet; @@ -387,7 +388,6 @@ ml_hpet_cfg(uint32_t cpu, uint32_t hpetVect) int HPETInterrupt(void) { - /* All we do here is to bump the count */ x86_package()->HpetInt++; @@ -406,8 +406,8 @@ static hpetReg_t saved_hpet; void hpet_save(void) { - hpetReg_t *from = (hpetReg_t *) hpetArea; - hpetReg_t *to = &saved_hpet; + hpetReg_t *from = (hpetReg_t *) hpetArea; + hpetReg_t *to = &saved_hpet; to->GEN_CONF = from->GEN_CONF; to->TIM0_CONF = from->TIM0_CONF; @@ -422,8 +422,8 @@ hpet_save(void) void hpet_restore(void) { - hpetReg_t *from = &saved_hpet; - hpetReg_t *to = (hpetReg_t *) hpetArea; + hpetReg_t *from = &saved_hpet; + hpetReg_t *to = (hpetReg_t *) hpetArea; /* * Is the HPET memory already enabled? @@ -431,7 +431,7 @@ hpet_restore(void) */ uint32_t *hptcp = (uint32_t *)(rcbaArea + 0x3404); uint32_t hptc = *hptcp; - if(!(hptc & hptcAE)) { + if (!(hptc & hptcAE)) { DBG("HPET memory is not enabled, " "enabling and assigning to 0xFED00000 (hope that's ok)\n"); *hptcp = (hptc & ~3) | hptcAE; @@ -458,10 +458,10 @@ hpet_restore(void) uint64_t rdHPET(void) { - hpetReg_t *hpetp = (hpetReg_t *) hpetArea; - volatile uint32_t *regp = (uint32_t *) &hpetp->MAIN_CNT; - uint32_t high; - uint32_t low; + hpetReg_t *hpetp = (hpetReg_t *) hpetArea; + volatile uint32_t *regp = (uint32_t *) &hpetp->MAIN_CNT; + uint32_t high; + uint32_t low; do { high = *(regp + 1); diff --git a/osfmk/i386/hpet.h b/osfmk/i386/hpet.h index 6bc829bb5..7bcbf77e2 100644 --- a/osfmk/i386/hpet.h +++ b/osfmk/i386/hpet.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL_PRIVATE @@ -39,56 +39,54 @@ * Memory mapped registers for the HPET */ typedef struct hpetReg { - uint64_t GCAP_ID; /* General capabilities */ - uint64_t rsv1; - uint64_t GEN_CONF; /* General configuration */ - uint64_t rsv2; - uint64_t GINTR_STA; /* General Interrupt status */ - uint64_t rsv3[25]; - uint64_t MAIN_CNT; /* Main counter */ - uint64_t rsv4; - uint64_t TIM0_CONF; /* Timer 0 config and cap */ -#define TIM_CONF 0 -#define Tn_INT_ENB_CNF 4 - uint64_t TIM0_COMP; /* Timer 0 comparator */ -#define TIM_COMP 8 - uint64_t rsv5[2]; - uint64_t TIM1_CONF; /* Timer 1 config and cap */ - uint64_t TIM1_COMP; /* Timer 1 comparator */ - uint64_t rsv6[2]; - uint64_t TIM2_CONF; /* Timer 2 config and cap */ - uint64_t TIM2_COMP; /* Timer 2 comparator */ - uint64_t rsv7[2]; + uint64_t GCAP_ID; /* General capabilities */ + uint64_t rsv1; + uint64_t GEN_CONF; /* General configuration */ + uint64_t rsv2; + uint64_t GINTR_STA; /* General Interrupt status */ + uint64_t rsv3[25]; + uint64_t MAIN_CNT; /* Main counter */ + uint64_t rsv4; + uint64_t TIM0_CONF; /* Timer 0 config and cap */ +#define TIM_CONF 0 +#define Tn_INT_ENB_CNF 4 + uint64_t TIM0_COMP; /* Timer 0 comparator */ +#define TIM_COMP 8 + uint64_t rsv5[2]; + uint64_t TIM1_CONF; /* Timer 1 config and cap */ + uint64_t TIM1_COMP; /* Timer 1 comparator */ + uint64_t rsv6[2]; + uint64_t TIM2_CONF; /* Timer 2 config and cap */ + uint64_t TIM2_COMP; /* Timer 2 comparator */ + uint64_t rsv7[2]; } hpetReg; -typedef struct hpetReg hpetReg_t; +typedef struct hpetReg hpetReg_t; typedef struct hpetTimer { - uint64_t Config; /* Timer config and capabilities */ - uint64_t Compare; /* Timer comparitor */ + uint64_t Config; /* Timer config and capabilities */ + uint64_t Compare; /* Timer comparitor */ } hpetTimer_t; -struct hpetInfo -{ - uint64_t hpetCvtt2n; - uint64_t hpetCvtn2t; - uint64_t tsc2hpet; - uint64_t hpet2tsc; - uint64_t bus2hpet; - uint64_t hpet2bus; - uint32_t rcbaArea; - uint32_t rcbaAreap; +struct hpetInfo { + uint64_t hpetCvtt2n; + uint64_t hpetCvtn2t; + uint64_t tsc2hpet; + uint64_t hpet2tsc; + uint64_t bus2hpet; + uint64_t hpet2bus; + uint32_t rcbaArea; + uint32_t rcbaAreap; }; typedef struct hpetInfo hpetInfo_t; -struct hpetRequest -{ - uint32_t flags; - uint32_t hpetOffset; - uint32_t hpetVector; +struct hpetRequest { + uint32_t flags; + uint32_t hpetOffset; + uint32_t hpetVector; }; typedef struct hpetRequest hpetRequest_t; -#define HPET_REQFL_64BIT 0x00000001 /* Timer is 64 bits */ +#define HPET_REQFL_64BIT 0x00000001 /* Timer is 64 bits */ extern uint64_t hpetFemto; extern uint64_t hpetFreq; @@ -99,7 +97,7 @@ extern uint64_t hpet2tsc; extern uint64_t bus2hpet; extern uint64_t hpet2bus; -extern vm_offset_t rcbaArea; +extern vm_offset_t rcbaArea; extern uint32_t rcbaAreap; extern void map_rcbaAread(void); @@ -118,9 +116,9 @@ extern int hpet_request(uint32_t cpu); extern uint64_t rdHPET(void); extern void hpet_get_info(hpetInfo_t *info); -#define hpetAddr 0xFED00000 -#define hptcAE 0x80 +#define hpetAddr 0xFED00000 +#define hptcAE 0x80 -#endif /* _I386_HPET_H_ */ +#endif /* _I386_HPET_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/hw_defs.h b/osfmk/i386/hw_defs.h index 0fac10f3c..945123b9b 100644 --- a/osfmk/i386/hw_defs.h +++ b/osfmk/i386/hw_defs.h @@ -2,7 +2,7 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,24 +22,24 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_HW_DEFS_H_ #define _I386_HW_DEFS_H_ -#define pmMwaitC1 0x00 -#define pmMwaitC2 0x10 -#define pmMwaitC3 0x20 -#define pmMwaitC4 0x30 +#define pmMwaitC1 0x00 +#define pmMwaitC2 0x10 +#define pmMwaitC3 0x20 +#define pmMwaitC4 0x30 #define pmMwaitBrInt 0x1 -#define pmBase 0x400 -#define pmCtl1 0x04 -#define pmCtl2 0x20 -#define pmC3Res 0x54 -#define pmStatus 0x00 -#define msrTSC 0x10 +#define pmBase 0x400 +#define pmCtl1 0x04 +#define pmCtl2 0x20 +#define pmC3Res 0x54 +#define pmStatus 0x00 +#define msrTSC 0x10 #endif /* _I386_HW_DEFS_H_ */ diff --git a/osfmk/i386/hw_lock_types.h b/osfmk/i386/hw_lock_types.h index 52f4355f6..6642fcf15 100644 --- a/osfmk/i386/hw_lock_types.h +++ b/osfmk/i386/hw_lock_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -59,8 +59,8 @@ * Machine-dependent simple locks for the i386. */ -#ifndef _I386_HW_LOCK_TYPES_H_ -#define _I386_HW_LOCK_TYPES_H_ +#ifndef _I386_HW_LOCK_TYPES_H_ +#define _I386_HW_LOCK_TYPES_H_ /* * The "hardware lock". Low-level locking primitives that @@ -90,10 +90,9 @@ * later in kern/lock.h.. */ struct hslock { - uintptr_t lock_data; + uintptr_t lock_data; }; typedef struct hslock hw_lock_data_t, *hw_lock_t; -#define hw_lock_addr(hwl) (&((hwl).lock_data)) - -#endif /* _I386_HW_LOCK_TYPES_H_ */ +#define hw_lock_addr(hwl) (&((hwl).lock_data)) +#endif /* _I386_HW_LOCK_TYPES_H_ */ diff --git a/osfmk/i386/i386_init.c b/osfmk/i386/i386_init.c index 8eb6b7edf..1e8810b17 100644 --- a/osfmk/i386/i386_init.c +++ b/osfmk/i386/i386_init.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2003-2016 Apple Inc. All rights reserved. + * Copyright (c) 2003-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -58,6 +58,7 @@ #include #include +#include #include #include #include @@ -110,31 +111,38 @@ #include #if DEBUG -#define DBG(x...) kprintf(x) +#define DBG(x ...) kprintf(x) #else -#define DBG(x...) +#define DBG(x ...) #endif -int debug_task; +int debug_task; -static boot_args *kernelBootArgs; +int early_boot = 1; -extern int disableConsoleOutput; -extern const char version[]; -extern const char version_variant[]; -extern int nx_enabled; +static boot_args *kernelBootArgs; + +extern int disableConsoleOutput; +extern const char version[]; +extern const char version_variant[]; +extern int nx_enabled; /* * Set initial values so that ml_phys_* routines can use the booter's ID mapping * to touch physical space before the kernel's physical aperture exists. */ -uint64_t physmap_base = 0; -uint64_t physmap_max = 4*GB; +uint64_t physmap_base = 0; +uint64_t physmap_max = 4 * GB; + +pd_entry_t *KPTphys; +pd_entry_t *IdlePTD; +pdpt_entry_t *IdlePDPT; +pml4_entry_t *IdlePML4; + +int kernPhysPML4Index; +int kernPhysPML4EntryCount; -pd_entry_t *KPTphys; -pd_entry_t *IdlePTD; -pdpt_entry_t *IdlePDPT; -pml4_entry_t *IdlePML4; +int allow_64bit_proc_LDT_ops; char *physfree; void idt64_remap(void); @@ -157,7 +165,7 @@ static void fillkpt(pt_entry_t *base, int prot, uintptr_t src, int index, int count) { int i; - for (i=0; iPhysicalMemorySize > K64_MAXMEM) { + panic("Installed physical memory exceeds configured maximum."); + } +#endif + + /* + * Add 4GB to the loader-provided physical memory size to account for MMIO space + * XXX in a perfect world, we'd scan PCI buses and count the max memory requested in BARs by + * XXX all enumerated device, then add more for hot-pluggable devices. + */ + highest_physaddr = kernelBootArgs->PhysicalMemorySize + 4 * GB; + + /* + * Calculate the number of PML4 entries we'll need. The total number of entries is + * pdpte_count = (((highest_physaddr) >> PDPT_SHIFT) + entropy_value + + * ((highest_physaddr & PDPT_MASK) == 0 ? 0 : 1)) + * pml4e_count = pdpte_count >> (PML4_SHIFT - PDPT_SHIFT) + */ + assert(highest_physaddr < (UINT64_MAX - PDPTMASK)); + pdpte_count = (unsigned) (((highest_physaddr + PDPTMASK) >> PDPTSHIFT) + phys_random_L3); + kernPhysPML4EntryCount = (pdpte_count + ((1U << (PML4SHIFT - PDPTSHIFT)) - 1)) >> (PML4SHIFT - PDPTSHIFT); + if (kernPhysPML4EntryCount == 0) { + kernPhysPML4EntryCount = 1; + } + if (kernPhysPML4EntryCount > KERNEL_PHYSMAP_PML4_COUNT_MAX) { +#if DEVELOPMENT || DEBUG + panic("physmap too large"); +#else + kprintf("[pmap] Limiting physmap to %d PML4s (was %d)\n", KERNEL_PHYSMAP_PML4_COUNT_MAX, + kernPhysPML4EntryCount); + kernPhysPML4EntryCount = KERNEL_PHYSMAP_PML4_COUNT_MAX; +#endif + } + + kernPhysPML4Index = KERNEL_KEXTS_INDEX - kernPhysPML4EntryCount; /* utb: KERNEL_PHYSMAP_PML4_INDEX */ + + /* + * XXX: Make sure that the addresses returned for physmapL3 and physmapL2 plus their extents + * are in the system-available memory range + */ - uint64_t i; - uint8_t phys_random_L3 = early_random() & 0xFF; /* We assume NX support. Mark all levels of the PHYSMAP NX * to avoid granting executability via a single bit flip. @@ -219,95 +319,102 @@ physmap_init(void) } #endif /* DEVELOPMENT || DEBUG */ - for(i = 0; i < NPHYSMAP; i++) { - physmapL3[i + phys_random_L3] = - ((uintptr_t)ID_MAP_VTOP(&physmapL2[i])) - | INTEL_PTE_VALID - | INTEL_PTE_NX - | INTEL_PTE_WRITE; - - uint64_t j; - for(j = 0; j < PTE_PER_PAGE; j++) { - physmapL2[i].entries[j] = - ((i * PTE_PER_PAGE + j) << PDSHIFT) - | INTEL_PTE_PS - | INTEL_PTE_VALID - | INTEL_PTE_NX - | INTEL_PTE_WRITE; + L3_start_index = phys_random_L3; + + for (pml4_index = kernPhysPML4Index; + pml4_index < (kernPhysPML4Index + kernPhysPML4EntryCount) && physAddr < highest_physaddr; + pml4_index++) { + if (physmap_init_L3(L3_start_index, highest_physaddr, &physAddr, &l3pte) < 0) { + panic("Physmap page table initialization failed"); + /* NOTREACHED */ } + + L3_start_index = 0; + + IdlePML4[pml4_index] = ((uintptr_t)ID_MAP_VTOP(l3pte)) + | INTEL_PTE_VALID + | INTEL_PTE_NX + | INTEL_PTE_WRITE; } - IdlePML4[KERNEL_PHYSMAP_PML4_INDEX] = - ((uintptr_t)ID_MAP_VTOP(physmapL3)) - | INTEL_PTE_VALID - | INTEL_PTE_NX - | INTEL_PTE_WRITE; + physmap_base = KVADDR(kernPhysPML4Index, phys_random_L3, 0, 0); + /* + * physAddr contains the last-mapped physical address, so that's what we + * add to physmap_base to derive the ending VA for the physmap. + */ + physmap_max = physmap_base + physAddr; - physmap_base = KVADDR(KERNEL_PHYSMAP_PML4_INDEX, phys_random_L3, 0, 0); - physmap_max = physmap_base + NPHYSMAP * GB; DBG("Physical address map base: 0x%qx\n", physmap_base); - DBG("Physical map idlepml4[%d]: 0x%llx\n", - KERNEL_PHYSMAP_PML4_INDEX, IdlePML4[KERNEL_PHYSMAP_PML4_INDEX]); + for (i = kernPhysPML4Index; i < (kernPhysPML4Index + kernPhysPML4EntryCount); i++) { + DBG("Physical map idlepml4[%d]: 0x%llx\n", i, IdlePML4[i]); + } } -void doublemap_init(void); +void doublemap_init(uint8_t); static void Idle_PTs_init(void) { + uint64_t rand64; + /* Allocate the "idle" kernel page tables: */ - KPTphys = ALLOCPAGES(NKPT); /* level 1 */ - IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */ - IdlePDPT = ALLOCPAGES(1); /* level 3 */ - IdlePML4 = ALLOCPAGES(1); /* level 4 */ + KPTphys = ALLOCPAGES(NKPT); /* level 1 */ + IdlePTD = ALLOCPAGES(NPGPTD); /* level 2 */ + IdlePDPT = ALLOCPAGES(1); /* level 3 */ + IdlePML4 = ALLOCPAGES(1); /* level 4 */ // Fill the lowest level with everything up to physfree fillkpt(KPTphys, - INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT)); + INTEL_PTE_WRITE, 0, 0, (int)(((uintptr_t)physfree) >> PAGE_SHIFT)); /* IdlePTD */ fillkpt(IdlePTD, - INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT); + INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(KPTphys), 0, NKPT); // IdlePDPT entries fillkpt(IdlePDPT, - INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD); + INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePTD), 0, NPGPTD); // IdlePML4 single entry for kernel space. fillkpt(IdlePML4 + KERNEL_PML4_INDEX, - INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1); - + INTEL_PTE_WRITE, (uintptr_t)ID_MAP_VTOP(IdlePDPT), 0, 1); + postcode(VSTART_PHYSMAP_INIT); - physmap_init(); - doublemap_init(); + /* + * early_random() cannot be called more than one time before the cpu's + * gsbase is initialized, so use the full 64-bit value to extract the + * two 8-bit entropy values needed for address randomization. + */ + rand64 = early_random(); + physmap_init(rand64 & 0xFF); + doublemap_init((rand64 >> 8) & 0xFF); idt64_remap(); postcode(VSTART_SET_CR3); // Switch to the page tables.. set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4)); - } extern void vstart_trap_handler; -#define BOOT_TRAP_VECTOR(t) \ - [t] = { \ - (uintptr_t) &vstart_trap_handler, \ - KERNEL64_CS, \ - 0, \ - ACC_P|ACC_PL_K|ACC_INTR_GATE, \ - 0 \ +#define BOOT_TRAP_VECTOR(t) \ + [t] = { \ + (uintptr_t) &vstart_trap_handler, \ + KERNEL64_CS, \ + 0, \ + ACC_P|ACC_PL_K|ACC_INTR_GATE, \ + 0 \ }, /* Recursive macro to iterate 0..31 */ -#define L0(x,n) x(n) -#define L1(x,n) L0(x,n-1) L0(x,n) -#define L2(x,n) L1(x,n-2) L1(x,n) -#define L3(x,n) L2(x,n-4) L2(x,n) -#define L4(x,n) L3(x,n-8) L3(x,n) -#define L5(x,n) L4(x,n-16) L4(x,n) +#define L0(x, n) x(n) +#define L1(x, n) L0(x,n-1) L0(x,n) +#define L2(x, n) L1(x,n-2) L1(x,n) +#define L3(x, n) L2(x,n-4) L2(x,n) +#define L4(x, n) L3(x,n-8) L3(x,n) +#define L5(x, n) L4(x,n-16) L4(x,n) #define FOR_0_TO_31(x) L5(x,31) /* @@ -316,28 +423,29 @@ extern void vstart_trap_handler; * All traps point to a common handler. */ struct fake_descriptor64 master_boot_idt64[IDTSZ] - __attribute__((section("__HIB,__desc"))) - __attribute__((aligned(PAGE_SIZE))) = { +__attribute__((section("__HIB,__desc"))) +__attribute__((aligned(PAGE_SIZE))) = { FOR_0_TO_31(BOOT_TRAP_VECTOR) }; static void vstart_idt_init(void) { - x86_64_desc_register_t vstart_idt = { - sizeof(master_boot_idt64), - master_boot_idt64 }; - + x86_64_desc_register_t vstart_idt = { + sizeof(master_boot_idt64), + master_boot_idt64 + }; + fix_desc64(master_boot_idt64, 32); lidt((void *)&vstart_idt); } /* * vstart() is called in the natural mode (64bit for K64, 32 for K32) - * on a set of bootstrap pagetables which use large, 2MB pages to map + * on a set of bootstrap pagetables which use large, 2MB pages to map * all of physical memory in both. See idle_pt.c for details. * - * In K64 this identity mapping is mirrored the top and bottom 512GB + * In K64 this identity mapping is mirrored the top and bottom 512GB * slots of PML4. * * The bootstrap processor called with argument boot_args_start pointing to @@ -351,9 +459,9 @@ __attribute__((noreturn)) void vstart(vm_offset_t boot_args_start) { - boolean_t is_boot_cpu = !(boot_args_start == 0); - int cpu = 0; - uint32_t lphysfree; + boolean_t is_boot_cpu = !(boot_args_start == 0); + int cpu = 0; + uint32_t lphysfree; postcode(VSTART_ENTRY); @@ -364,12 +472,18 @@ vstart(vm_offset_t boot_args_start) vstart_idt_init(); postcode(VSTART_IDT_INIT); + /* + * Ensure that any %gs-relative access results in an immediate fault + * until gsbase is properly initialized below + */ + wrmsr64(MSR_IA32_GS_BASE, EARLY_GSBASE_MAGIC); + /* * Get startup parameters. */ kernelBootArgs = (boot_args *)boot_args_start; lphysfree = kernelBootArgs->kaddr + kernelBootArgs->ksize; - physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) &~ (PAGE_SIZE - 1)); + physfree = (void *)(uintptr_t)((lphysfree + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)); #if DEVELOPMENT || DEBUG pal_serial_init(); @@ -383,9 +497,9 @@ vstart(vm_offset_t boot_args_start) DBG("ksize 0x%x\n", kernelBootArgs->ksize); DBG("physfree %p\n", physfree); DBG("bootargs: %p, &ksize: %p &kaddr: %p\n", - kernelBootArgs, - &kernelBootArgs->ksize, - &kernelBootArgs->kaddr); + kernelBootArgs, + &kernelBootArgs->ksize, + &kernelBootArgs->kaddr); DBG("SMBIOS mem sz 0x%llx\n", kernelBootArgs->PhysicalMemorySize); /* @@ -429,22 +543,23 @@ vstart(vm_offset_t boot_args_start) postcode(VSTART_CPU_MODE_INIT); cpu_syscall_init(cpu_datap(0)); /* cpu_syscall_init() will be - * invoked on the APs - * via i386_init_slave() - */ + * invoked on the APs + * via i386_init_slave() + */ } else { /* Switch to kernel's page tables (from the Boot PTs) */ set_cr3_raw((uintptr_t)ID_MAP_VTOP(IdlePML4)); /* Find our logical cpu number */ - cpu = lapic_to_cpu[(LAPIC_READ(ID)>>LAPIC_ID_SHIFT) & LAPIC_ID_MASK]; + cpu = lapic_to_cpu[(LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK]; DBG("CPU: %d, GSBASE initial value: 0x%llx\n", cpu, rdmsr64(MSR_IA32_GS_BASE)); cpu_desc_load(cpu_datap(cpu)); } + early_boot = 0; postcode(VSTART_EXIT); x86_init_wrapper(is_boot_cpu ? (uintptr_t) i386_init - : (uintptr_t) i386_init_slave, - cpu_datap(cpu)->cpu_int_stack_top); + : (uintptr_t) i386_init_slave, + cpu_datap(cpu)->cpu_int_stack_top); } void @@ -459,17 +574,17 @@ pstate_trace(void) void i386_init(void) { - unsigned int maxmem; - uint64_t maxmemtouse; - unsigned int cpus = 0; - boolean_t fidn; - boolean_t IA32e = TRUE; + unsigned int maxmem; + uint64_t maxmemtouse; + unsigned int cpus = 0; + boolean_t fidn; + boolean_t IA32e = TRUE; postcode(I386_INIT_ENTRY); pal_i386_init(); tsc_init(); - rtclock_early_init(); /* mach_absolute_time() now functionsl */ + rtclock_early_init(); /* mach_absolute_time() now functionsl */ kernel_debug_string_early("i386_init"); pstate_trace(); @@ -484,8 +599,8 @@ i386_init(void) postcode(CPU_INIT_D); - printf_init(); /* Init this in case we need debugger */ - panic_init(); /* Init this in case we need debugger */ + printf_init(); /* Init this in case we need debugger */ + panic_init(); /* Init this in case we need debugger */ /* setup debugging output if one has been chosen */ kernel_debug_string_early("PE_init_kprintf"); @@ -494,8 +609,14 @@ i386_init(void) kernel_debug_string_early("kernel_early_bootstrap"); kernel_early_bootstrap(); - if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof (dgWork.dgFlags))) + if (!PE_parse_boot_argn("diag", &dgWork.dgFlags, sizeof(dgWork.dgFlags))) { dgWork.dgFlags = 0; + } + + if (!PE_parse_boot_argn("ldt64", &allow_64bit_proc_LDT_ops, + sizeof(allow_64bit_proc_LDT_ops))) { + allow_64bit_proc_LDT_ops = 0; + } serialmode = 0; if (PE_parse_boot_argn("serial", &serialmode, sizeof(serialmode))) { @@ -506,9 +627,9 @@ i386_init(void) if (force_sync) { serialmode |= SERIALMODE_SYNCDRAIN; kprintf( - "WARNING: Forcing uart driver to output synchronously." - "printf()s/IOLogs will impact kernel performance.\n" - "You are advised to avoid using 'drain_uart_sync' boot-arg.\n"); + "WARNING: Forcing uart driver to output synchronously." + "printf()s/IOLogs will impact kernel performance.\n" + "You are advised to avoid using 'drain_uart_sync' boot-arg.\n"); } } } @@ -523,28 +644,32 @@ i386_init(void) kprintf("version_variant = %s\n", version_variant); kprintf("version = %s\n", version); - - if (!PE_parse_boot_argn("maxmem", &maxmem, sizeof (maxmem))) + + if (!PE_parse_boot_argn("maxmem", &maxmem, sizeof(maxmem))) { maxmemtouse = 0; - else - maxmemtouse = ((uint64_t)maxmem) * MB; + } else { + maxmemtouse = ((uint64_t)maxmem) * MB; + } - if (PE_parse_boot_argn("cpus", &cpus, sizeof (cpus))) { - if ((0 < cpus) && (cpus < max_ncpus)) - max_ncpus = cpus; + if (PE_parse_boot_argn("cpus", &cpus, sizeof(cpus))) { + if ((0 < cpus) && (cpus < max_ncpus)) { + max_ncpus = cpus; + } } /* * debug support for > 4G systems */ - PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof (vm_himemory_mode)); - if (vm_himemory_mode != 0) - kprintf("himemory_mode: %d\n", vm_himemory_mode); + PE_parse_boot_argn("himemory_mode", &vm_himemory_mode, sizeof(vm_himemory_mode)); + if (!vm_himemory_mode) { + kprintf("himemory_mode disabled\n"); + } - if (!PE_parse_boot_argn("immediate_NMI", &fidn, sizeof (fidn))) + if (!PE_parse_boot_argn("immediate_NMI", &fidn, sizeof(fidn))) { force_immediate_debugger_NMI = FALSE; - else + } else { force_immediate_debugger_NMI = fidn; + } #if DEBUG nanoseconds_to_absolutetime(URGENCY_NOTIFICATION_ASSERT_NS, &urgency_notification_assert_abstime_threshold); @@ -553,10 +678,11 @@ i386_init(void) &urgency_notification_assert_abstime_threshold, sizeof(urgency_notification_assert_abstime_threshold)); - if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) + if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD)) { nx_enabled = 0; + } - /* + /* * VM initialization, after this we're using page tables... * Thn maximum number of cpus must be set beforehand. */ @@ -582,40 +708,44 @@ i386_init(void) static void do_init_slave(boolean_t fast_restart) { - void *init_param = FULL_SLAVE_INIT; + void *init_param = FULL_SLAVE_INIT; postcode(I386_INIT_SLAVE); if (!fast_restart) { /* Ensure that caching and write-through are enabled */ - set_cr0(get_cr0() & ~(CR0_NW|CR0_CD)); - + set_cr0(get_cr0() & ~(CR0_NW | CR0_CD)); + DBG("i386_init_slave() CPU%d: phys (%d) active.\n", get_cpu_number(), get_cpu_phys_number()); - + assert(!ml_get_interrupts_enabled()); - + cpu_syscall_init(current_cpu_datap()); pmap_cpu_init(); - + #if CONFIG_MCA mca_cpu_init(); #endif - + LAPIC_INIT(); lapic_configure(); LAPIC_DUMP(); LAPIC_CPU_MAP_DUMP(); - + init_fpu(); - + #if CONFIG_MTRR mtrr_update_cpu(); #endif /* update CPU microcode */ ucode_update_wake(); - } else - init_param = FAST_SLAVE_INIT; + + /* Do CPU workarounds after the microcode update */ + cpuid_do_was(); + } else { + init_param = FAST_SLAVE_INIT; + } #if CONFIG_VMX /* resume VT operation */ @@ -623,16 +753,17 @@ do_init_slave(boolean_t fast_restart) #endif #if CONFIG_MTRR - if (!fast_restart) - pat_init(); + if (!fast_restart) { + pat_init(); + } #endif - cpu_thread_init(); /* not strictly necessary */ + cpu_thread_init(); /* not strictly necessary */ + + cpu_init(); /* Sets cpu_running which starter cpu waits for */ + slave_main(init_param); - cpu_init(); /* Sets cpu_running which starter cpu waits for */ - slave_main(init_param); - - panic("do_init_slave() returned from slave_main()"); + panic("do_init_slave() returned from slave_main()"); } /* @@ -644,7 +775,7 @@ do_init_slave(boolean_t fast_restart) void i386_init_slave(void) { - do_init_slave(FALSE); + do_init_slave(FALSE); } /* @@ -656,7 +787,7 @@ i386_init_slave(void) void i386_init_slave_fast(void) { - do_init_slave(TRUE); + do_init_slave(TRUE); } #include @@ -672,7 +803,9 @@ uint64_t dblmap_dist; extern uint64_t idt64_hndl_table0[]; -void doublemap_init(void) { +void +doublemap_init(uint8_t randL3) +{ dblmapL3 = ALLOCPAGES(1); // for 512 1GiB entries dblallocs++; @@ -681,7 +814,7 @@ void doublemap_init(void) { } * dblmapL2 = ALLOCPAGES(1); // for 512 2MiB entries dblallocs++; - dblmapL3[0] = ((uintptr_t)ID_MAP_VTOP(&dblmapL2[0])) + dblmapL3[randL3] = ((uintptr_t)ID_MAP_VTOP(&dblmapL2[0])) | INTEL_PTE_VALID | INTEL_PTE_WRITE; @@ -700,7 +833,7 @@ void doublemap_init(void) { assert((hdescb & 0xFFF) == 0); /* Mirror HIB translations into the double-mapped pagetable subtree*/ - for(int i = 0; hdescc < hdesce; i++) { + for (int i = 0; hdescc < hdesce; i++) { struct { pt_entry_t entries[PTE_PER_PAGE]; } * dblmapL1 = ALLOCPAGES(1); @@ -712,7 +845,7 @@ void doublemap_init(void) { if ((hdescc >= thdescb) && (hdescc < thdesce)) { /* executable */ } else { - template |= INTEL_PTE_WRITE | INTEL_PTE_NX ; /* Writeable, NX */ + template |= INTEL_PTE_WRITE | INTEL_PTE_NX; /* Writeable, NX */ } dblmapL1[0].entries[j] = ((uintptr_t)ID_MAP_VTOP(hdescc)) | template; hdescc += PAGE_SIZE; @@ -721,7 +854,7 @@ void doublemap_init(void) { IdlePML4[KERNEL_DBLMAP_PML4_INDEX] = ((uintptr_t)ID_MAP_VTOP(dblmapL3)) | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF; - dblmap_base = KVADDR(KERNEL_DBLMAP_PML4_INDEX, dblmapL3, 0, 0); + dblmap_base = KVADDR(KERNEL_DBLMAP_PML4_INDEX, randL3, 0, 0); dblmap_max = dblmap_base + hdescszr; /* Calculate the double-map distance, which accounts for the current * KASLR slide @@ -738,7 +871,7 @@ void doublemap_init(void) { * programmed into GSBASE, to the "shadows" in the doublemapped * region. These are not aliases, but separate physical allocations * containing data required in the doublemapped trampolines. -*/ + */ idt64_hndl_table0[2] = dblmap_dist + cd1 - cd2; DBG("Double map base: 0x%qx\n", dblmap_base); @@ -753,7 +886,9 @@ vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t); /* Use of this routine is expected to be synchronized by callers * Creates non-executable aliases. */ -vm_offset_t dyn_dblmap(vm_offset_t cva, vm_offset_t sz) { +vm_offset_t +dyn_dblmap(vm_offset_t cva, vm_offset_t sz) +{ vm_offset_t ava = dblmap_max; assert((sz & PAGE_MASK) == 0); @@ -761,13 +896,15 @@ vm_offset_t dyn_dblmap(vm_offset_t cva, vm_offset_t sz) { pmap_alias(ava, cva, cva + sz, VM_PROT_READ | VM_PROT_WRITE, PMAP_EXPAND_OPTIONS_ALIASMAP); dblmap_max += sz; - return (ava - cva); + return ava - cva; } /* Adjust offsets interior to the bootstrap interrupt descriptor table to redirect * control to the double-mapped interrupt vectors. The IDTR proper will be * programmed via cpu_desc_load() */ -void idt64_remap(void) { +void +idt64_remap(void) +{ for (int i = 0; i < IDTSZ; i++) { master_idt64[i].offset64 = DBLMAP(master_idt64[i].offset64); } diff --git a/osfmk/i386/i386_lowmem.h b/osfmk/i386/i386_lowmem.h index 810e1b533..eec9ff640 100644 --- a/osfmk/i386/i386_lowmem.h +++ b/osfmk/i386/i386_lowmem.h @@ -2,7 +2,7 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,14 +35,14 @@ /* * The kernel better be statically linked at VM_MIN_KERNEL_ADDRESS + 0x100000 */ -#define I386_KERNEL_IMAGE_BASE_PAGE 0x100 +#define I386_KERNEL_IMAGE_BASE_PAGE 0x100 /* For K64, only 3 pages are reserved * - physical page zero, a gap page, and then real-mode-bootstrap/lowGlo. - * Note that the kernel virtual address 0xffffff8000002000 is re-mapped + * Note that the kernel virtual address KERNEL_BASE+0x2000 is re-mapped * to the low globals and that physical page, 0x2000, is used by the bootstrap. */ -#define I386_LOWMEM_RESERVED 3 +#define I386_LOWMEM_RESERVED 3 #endif /* __APPLE_API_PRIVATE */ diff --git a/osfmk/i386/i386_timer.c b/osfmk/i386/i386_timer.c index f302314e9..1d56ea08e 100644 --- a/osfmk/i386/i386_timer.c +++ b/osfmk/i386/i386_timer.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -59,7 +59,7 @@ uint32_t spurious_timers; /* - * Event timer interrupt. + * Event timer interrupt. * * XXX a drawback of this implementation is that events serviced earlier must not set deadlines * that occur before the entire chain completes. @@ -67,35 +67,35 @@ uint32_t spurious_timers; * XXX a better implementation would use a set of generic callouts and iterate over them */ void -timer_intr(int user_mode, - uint64_t rip) +timer_intr(int user_mode, + uint64_t rip) { - uint64_t abstime; - rtclock_timer_t *mytimer; - cpu_data_t *pp; - int64_t latency; - uint64_t pmdeadline; - boolean_t timer_processed = FALSE; + uint64_t abstime; + rtclock_timer_t *mytimer; + cpu_data_t *pp; + int64_t latency; + uint64_t pmdeadline; + boolean_t timer_processed = FALSE; pp = current_cpu_datap(); SCHED_STATS_TIMER_POP(current_processor()); - abstime = mach_absolute_time(); /* Get the time now */ + abstime = mach_absolute_time(); /* Get the time now */ /* has a pending clock timer expired? */ - mytimer = &pp->rtclock_timer; /* Point to the event timer */ + mytimer = &pp->rtclock_timer; /* Point to the event timer */ if ((timer_processed = ((mytimer->deadline <= abstime) || - (abstime >= (mytimer->queue.earliest_soft_deadline))))) { + (abstime >= (mytimer->queue.earliest_soft_deadline))))) { /* * Log interrupt service latency (-ve value expected by tool) * a non-PM event is expected next. - * The requested deadline may be earlier than when it was set + * The requested deadline may be earlier than when it was set * - use MAX to avoid reporting bogus latencies. */ latency = (int64_t) (abstime - MAX(mytimer->deadline, - mytimer->when_set)); + mytimer->when_set)); /* Log zero timer latencies when opportunistically processing * coalesced timers. */ @@ -105,12 +105,12 @@ timer_intr(int user_mode, } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_TRAP_LATENCY | DBG_FUNC_NONE, - -latency, - ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)), - user_mode, 0, 0); + DECR_TRAP_LATENCY | DBG_FUNC_NONE, + -latency, + ((user_mode != 0) ? rip : VM_KERNEL_UNSLIDE(rip)), + user_mode, 0, 0); - mytimer->has_expired = TRUE; /* Remember that we popped */ + mytimer->has_expired = TRUE; /* Remember that we popped */ mytimer->deadline = timer_queue_expire(&mytimer->queue, abstime); mytimer->has_expired = FALSE; @@ -122,12 +122,12 @@ timer_intr(int user_mode, /* is it time for power management state change? */ if ((pmdeadline = pmCPUGetDeadline(pp)) && (pmdeadline <= abstime)) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_PM_DEADLINE | DBG_FUNC_START, - 0, 0, 0, 0, 0); + DECR_PM_DEADLINE | DBG_FUNC_START, + 0, 0, 0, 0, 0); pmCPUDeadline(pp); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_PM_DEADLINE | DBG_FUNC_END, - 0, 0, 0, 0, 0); + DECR_PM_DEADLINE | DBG_FUNC_END, + 0, 0, 0, 0, 0); timer_processed = TRUE; abstime = mach_absolute_time(); /* Get the time again since we ran a bit */ } @@ -138,29 +138,31 @@ timer_intr(int user_mode, pp->quantum_timer_deadline = 0; quantum_timer_expire(abstime); } - + /* schedule our next deadline */ x86_lcpu()->rtcDeadline = EndOfAllTime; timer_resync_deadlines(); - if (__improbable(timer_processed == FALSE)) + if (__improbable(timer_processed == FALSE)) { spurious_timers++; + } } /* * Set the clock deadline. */ -void timer_set_deadline(uint64_t deadline) +void +timer_set_deadline(uint64_t deadline) { - rtclock_timer_t *mytimer; - spl_t s; - cpu_data_t *pp; + rtclock_timer_t *mytimer; + spl_t s; + cpu_data_t *pp; - s = splclock(); /* no interruptions */ + s = splclock(); /* no interruptions */ pp = current_cpu_datap(); - mytimer = &pp->rtclock_timer; /* Point to the timer itself */ - mytimer->deadline = deadline; /* Set new expiration time */ + mytimer = &pp->rtclock_timer; /* Point to the timer itself */ + mytimer->deadline = deadline; /* Set new expiration time */ mytimer->when_set = mach_absolute_time(); timer_resync_deadlines(); @@ -171,13 +173,13 @@ void timer_set_deadline(uint64_t deadline) void quantum_timer_set_deadline(uint64_t deadline) { - cpu_data_t *pp; - /* We should've only come into this path with interrupts disabled */ - assert(ml_get_interrupts_enabled() == FALSE); + cpu_data_t *pp; + /* We should've only come into this path with interrupts disabled */ + assert(ml_get_interrupts_enabled() == FALSE); - pp = current_cpu_datap(); - pp->quantum_timer_deadline = deadline; - timer_resync_deadlines(); + pp = current_cpu_datap(); + pp->quantum_timer_deadline = deadline; + timer_resync_deadlines(); } /* @@ -188,39 +190,43 @@ quantum_timer_set_deadline(uint64_t deadline) void timer_resync_deadlines(void) { - uint64_t deadline = EndOfAllTime; - uint64_t pmdeadline; - uint64_t quantum_deadline; - rtclock_timer_t *mytimer; - spl_t s = splclock(); - cpu_data_t *pp; - uint32_t decr; + uint64_t deadline = EndOfAllTime; + uint64_t pmdeadline; + uint64_t quantum_deadline; + rtclock_timer_t *mytimer; + spl_t s = splclock(); + cpu_data_t *pp; + uint32_t decr; pp = current_cpu_datap(); - if (!pp->cpu_running) + if (!pp->cpu_running) { /* There's really nothing to do if this processor is down */ return; + } /* * If we have a clock timer set, pick that. */ mytimer = &pp->rtclock_timer; if (!mytimer->has_expired && - 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) + 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) { deadline = mytimer->deadline; + } /* * If we have a power management deadline, see if that's earlier. */ pmdeadline = pmCPUGetDeadline(pp); - if (0 < pmdeadline && pmdeadline < deadline) + if (0 < pmdeadline && pmdeadline < deadline) { deadline = pmdeadline; + } /* If we have the quantum timer setup, check that */ quantum_deadline = pp->quantum_timer_deadline; - if ((quantum_deadline > 0) && - (quantum_deadline < deadline)) + if ((quantum_deadline > 0) && + (quantum_deadline < deadline)) { deadline = quantum_deadline; + } /* @@ -232,7 +238,7 @@ timer_resync_deadlines(void) if (decr != 0 && deadline != pmdeadline) { uint64_t queue_count = 0; if (deadline != quantum_deadline) { - /* + /* * For non-quantum timer put the queue count * in the tracepoint. */ @@ -249,11 +255,11 @@ timer_resync_deadlines(void) void timer_queue_expire_local( -__unused void *arg) + __unused void *arg) { - rtclock_timer_t *mytimer; - uint64_t abstime; - cpu_data_t *pp; + rtclock_timer_t *mytimer; + uint64_t abstime; + cpu_data_t *pp; pp = current_cpu_datap(); @@ -270,11 +276,11 @@ __unused void *arg) void timer_queue_expire_rescan( -__unused void *arg) + __unused void *arg) { - rtclock_timer_t *mytimer; - uint64_t abstime; - cpu_data_t *pp; + rtclock_timer_t *mytimer; + uint64_t abstime; + cpu_data_t *pp; assert(ml_get_interrupts_enabled() == FALSE); pp = current_cpu_datap(); @@ -300,11 +306,13 @@ int32_t nc_tcl, rt_tcl, bg_tcl, kt_tcl, fp_tcl, ts_tcl, qos_tcl; #endif boolean_t -timer_resort_threshold(uint64_t skew) { - if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME) +timer_resort_threshold(uint64_t skew) +{ + if (skew >= TIMER_RESORT_THRESHOLD_ABSTIME) { return TRUE; - else + } else { return FALSE; + } } /* @@ -313,33 +321,35 @@ timer_resort_threshold(uint64_t skew) { */ mpqueue_head_t * timer_queue_assign( - uint64_t deadline) + uint64_t deadline) { - cpu_data_t *cdp = current_cpu_datap(); - mpqueue_head_t *queue; + cpu_data_t *cdp = current_cpu_datap(); + mpqueue_head_t *queue; if (cdp->cpu_running) { queue = &cdp->rtclock_timer.queue; - if (deadline < cdp->rtclock_timer.deadline) + if (deadline < cdp->rtclock_timer.deadline) { timer_set_deadline(deadline); - } - else + } + } else { queue = &cpu_datap(master_cpu)->rtclock_timer.queue; + } - return (queue); + return queue; } void timer_queue_cancel( - mpqueue_head_t *queue, - uint64_t deadline, - uint64_t new_deadline) + mpqueue_head_t *queue, + uint64_t deadline, + uint64_t new_deadline) { - if (queue == ¤t_cpu_datap()->rtclock_timer.queue) { - if (deadline < new_deadline) - timer_set_deadline(new_deadline); - } + if (queue == ¤t_cpu_datap()->rtclock_timer.queue) { + if (deadline < new_deadline) { + timer_set_deadline(new_deadline); + } + } } /* @@ -353,19 +363,19 @@ timer_queue_cancel( uint32_t timer_queue_migrate_cpu(int target_cpu) { - cpu_data_t *target_cdp = cpu_datap(target_cpu); - cpu_data_t *cdp = current_cpu_datap(); - int ntimers_moved; + cpu_data_t *target_cdp = cpu_datap(target_cpu); + cpu_data_t *cdp = current_cpu_datap(); + int ntimers_moved; assert(!ml_get_interrupts_enabled()); assert(target_cpu != cdp->cpu_number); assert(target_cpu == master_cpu); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_TIMER_MIGRATE | DBG_FUNC_START, - target_cpu, - cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >>32), - 0, 0); + DECR_TIMER_MIGRATE | DBG_FUNC_START, + target_cpu, + cdp->rtclock_timer.deadline, (cdp->rtclock_timer.deadline >> 32), + 0, 0); /* * Move timer requests from the local queue to the target processor's. @@ -375,7 +385,7 @@ timer_queue_migrate_cpu(int target_cpu) * resync, the move of this and all later requests is aborted. */ ntimers_moved = timer_queue_migrate(&cdp->rtclock_timer.queue, - &target_cdp->rtclock_timer.queue); + &target_cdp->rtclock_timer.queue); /* * Assuming we moved stuff, clear local deadline. @@ -384,10 +394,10 @@ timer_queue_migrate_cpu(int target_cpu) cdp->rtclock_timer.deadline = EndOfAllTime; setPop(EndOfAllTime); } - + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - DECR_TIMER_MIGRATE | DBG_FUNC_END, - target_cpu, ntimers_moved, 0, 0, 0); + DECR_TIMER_MIGRATE | DBG_FUNC_END, + target_cpu, ntimers_moved, 0, 0, 0); return ntimers_moved; } @@ -428,12 +438,13 @@ static timer_coalescing_priority_params_ns_t tcoal_prio_params_init = .timer_coalesce_fp_ns_max = 1 * NSEC_PER_MSEC, .timer_coalesce_ts_ns_max = 1 * NSEC_PER_MSEC, .latency_qos_scale = {3, 2, 1, -2, -15, -15}, - .latency_qos_ns_max ={1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, - 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, + .latency_qos_ns_max = {1 * NSEC_PER_MSEC, 5 * NSEC_PER_MSEC, 20 * NSEC_PER_MSEC, + 75 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC, 10000 * NSEC_PER_MSEC}, .latency_tier_rate_limited = {FALSE, FALSE, FALSE, FALSE, TRUE, TRUE}, }; -timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void) +timer_coalescing_priority_params_ns_t * +timer_call_get_priority_params(void) { return &tcoal_prio_params_init; } diff --git a/osfmk/i386/i386_vm_init.c b/osfmk/i386/i386_vm_init.c index 9b74e8d44..63a1b46ef 100644 --- a/osfmk/i386/i386_vm_init.c +++ b/osfmk/i386/i386_vm_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989, 1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -84,27 +84,27 @@ #include -vm_size_t mem_size = 0; -pmap_paddr_t first_avail = 0;/* first after page tables */ +vm_size_t mem_size = 0; +pmap_paddr_t first_avail = 0;/* first after page tables */ -uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */ +uint64_t max_mem; /* Size of physical memory (bytes), adjusted by maxmem */ uint64_t mem_actual; -uint64_t sane_size = 0; /* Memory size for defaults calculations */ +uint64_t sane_size = 0; /* Memory size for defaults calculations */ /* * KASLR parameters */ -ppnum_t vm_kernel_base_page; -vm_offset_t vm_kernel_base; -vm_offset_t vm_kernel_top; -vm_offset_t vm_kernel_stext; -vm_offset_t vm_kernel_etext; -vm_offset_t vm_kernel_slide; -vm_offset_t vm_kernel_slid_base; -vm_offset_t vm_kernel_slid_top; +ppnum_t vm_kernel_base_page; +vm_offset_t vm_kernel_base; +vm_offset_t vm_kernel_top; +vm_offset_t vm_kernel_stext; +vm_offset_t vm_kernel_etext; +vm_offset_t vm_kernel_slide; +vm_offset_t vm_kernel_slid_base; +vm_offset_t vm_kernel_slid_top; vm_offset_t vm_hib_base; -vm_offset_t vm_kext_base = VM_MIN_KERNEL_AND_KEXT_ADDRESS; -vm_offset_t vm_kext_top = VM_MIN_KERNEL_ADDRESS; +vm_offset_t vm_kext_base = VM_MIN_KERNEL_AND_KEXT_ADDRESS; +vm_offset_t vm_kext_top = VM_MIN_KERNEL_ADDRESS; vm_offset_t vm_prelink_stext; vm_offset_t vm_prelink_etext; @@ -116,12 +116,12 @@ vm_offset_t vm_elinkedit; vm_offset_t vm_kernel_builtinkmod_text; vm_offset_t vm_kernel_builtinkmod_text_end; -#define MAXLORESERVE (32 * 1024 * 1024) +#define MAXLORESERVE (32 * 1024 * 1024) -ppnum_t max_ppnum = 0; -ppnum_t lowest_lo = 0; -ppnum_t lowest_hi = 0; -ppnum_t highest_hi = 0; +ppnum_t max_ppnum = 0; +ppnum_t lowest_lo = 0; +ppnum_t lowest_hi = 0; +ppnum_t highest_hi = 0; enum {PMAP_MAX_RESERVED_RANGES = 32}; uint32_t pmap_reserved_pages_allocated = 0; @@ -132,11 +132,11 @@ uint32_t pmap_reserved_ranges = 0; extern unsigned int bsd_mbuf_cluster_reserve(boolean_t *); pmap_paddr_t avail_start, avail_end; -vm_offset_t virtual_avail, virtual_end; -static pmap_paddr_t avail_remaining; +vm_offset_t virtual_avail, virtual_end; +static pmap_paddr_t avail_remaining; vm_offset_t static_memory_end = 0; -vm_offset_t sHIB, eHIB, stext, etext, sdata, edata, end, sconst, econst; +vm_offset_t sHIB, eHIB, stext, etext, sdata, edata, end, sconst, econst; /* * _mh_execute_header is the mach_header for the currently executing kernel @@ -166,53 +166,55 @@ uint64_t firmware_MMIO_bytes; /* * Linker magic to establish the highest address in the kernel. */ -extern void *last_kernel_symbol; +extern void *last_kernel_symbol; -boolean_t memmap = FALSE; -#if DEBUG || DEVELOPMENT +boolean_t memmap = FALSE; +#if DEBUG || DEVELOPMENT static void -kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) { - unsigned int i; - unsigned int j; - pmap_memory_region_t *p = pmap_memory_regions; - EfiMemoryRange *mptr; - addr64_t region_start, region_end; - addr64_t efi_start, efi_end; - - for (j = 0; j < pmap_memory_region_count; j++, p++) { - kprintf("pmap region %d type %d base 0x%llx alloc_up 0x%llx alloc_down 0x%llx top 0x%llx\n", - j, p->type, - (addr64_t) p->base << I386_PGSHIFT, - (addr64_t) p->alloc_up << I386_PGSHIFT, - (addr64_t) p->alloc_down << I386_PGSHIFT, - (addr64_t) p->end << I386_PGSHIFT); - region_start = (addr64_t) p->base << I386_PGSHIFT; - region_end = ((addr64_t) p->end << I386_PGSHIFT) - 1; - mptr = (EfiMemoryRange *) maddr; - for (i = 0; - i < mcount; - i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - if (mptr->Type != kEfiLoaderCode && - mptr->Type != kEfiLoaderData && - mptr->Type != kEfiBootServicesCode && - mptr->Type != kEfiBootServicesData && - mptr->Type != kEfiConventionalMemory) { - efi_start = (addr64_t)mptr->PhysicalStart; - efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1; - if ((efi_start >= region_start && efi_start <= region_end) || - (efi_end >= region_start && efi_end <= region_end)) { - kprintf(" *** Overlapping region with EFI runtime region %d\n", i); - } - } - } - } +kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) +{ + unsigned int i; + unsigned int j; + pmap_memory_region_t *p = pmap_memory_regions; + EfiMemoryRange *mptr; + addr64_t region_start, region_end; + addr64_t efi_start, efi_end; + + for (j = 0; j < pmap_memory_region_count; j++, p++) { + kprintf("pmap region %d type %d base 0x%llx alloc_up 0x%llx alloc_down 0x%llx top 0x%llx\n", + j, p->type, + (addr64_t) p->base << I386_PGSHIFT, + (addr64_t) p->alloc_up << I386_PGSHIFT, + (addr64_t) p->alloc_down << I386_PGSHIFT, + (addr64_t) p->end << I386_PGSHIFT); + region_start = (addr64_t) p->base << I386_PGSHIFT; + region_end = ((addr64_t) p->end << I386_PGSHIFT) - 1; + mptr = (EfiMemoryRange *) maddr; + for (i = 0; + i < mcount; + i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { + if (mptr->Type != kEfiLoaderCode && + mptr->Type != kEfiLoaderData && + mptr->Type != kEfiBootServicesCode && + mptr->Type != kEfiBootServicesData && + mptr->Type != kEfiConventionalMemory) { + efi_start = (addr64_t)mptr->PhysicalStart; + efi_end = efi_start + ((vm_offset_t)mptr->NumberOfPages << I386_PGSHIFT) - 1; + if ((efi_start >= region_start && efi_start <= region_end) || + (efi_end >= region_start && efi_end <= region_end)) { + kprintf(" *** Overlapping region with EFI runtime region %d\n", i); + } + } + } + } } -#define DPRINTF(x...) do { if (memmap) kprintf(x); } while (0) +#define DPRINTF(x...) do { if (memmap) kprintf(x); } while (0) #else static void -kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) { +kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) +{ #pragma unused(maddr, msize, mcount) } @@ -223,20 +225,20 @@ kprint_memmap(vm_offset_t maddr, unsigned int msize, unsigned int mcount) { * Basic VM initialization. */ void -i386_vm_init(uint64_t maxmem, - boolean_t IA32e, - boot_args *args) +i386_vm_init(uint64_t maxmem, + boolean_t IA32e, + boot_args *args) { pmap_memory_region_t *pmptr; - pmap_memory_region_t *prev_pmptr; + pmap_memory_region_t *prev_pmptr; EfiMemoryRange *mptr; - unsigned int mcount; - unsigned int msize; + unsigned int mcount; + unsigned int msize; vm_offset_t maddr; ppnum_t fap; unsigned int i; ppnum_t maxpg = 0; - uint32_t pmap_type; + uint32_t pmap_type; uint32_t maxloreserve; uint32_t maxdmaaddr; uint32_t mbuf_reserve = 0; @@ -245,7 +247,7 @@ i386_vm_init(uint64_t maxmem, vm_kernel_base_page = i386_btop(args->kaddr); vm_offset_t base_address; vm_offset_t static_base_address; - + PE_parse_boot_argn("memmap", &memmap, sizeof(memmap)); /* @@ -256,12 +258,13 @@ i386_vm_init(uint64_t maxmem, vm_kernel_slide = base_address - static_base_address; if (args->kslide) { kprintf("KASLR slide: 0x%016lx dynamic\n", vm_kernel_slide); - if (vm_kernel_slide != ((vm_offset_t)args->kslide)) + if (vm_kernel_slide != ((vm_offset_t)args->kslide)) { panic("Kernel base inconsistent with slide - rebased?"); + } } else { /* No slide relative to on-disk symbols */ kprintf("KASLR slide: 0x%016lx static and ignored\n", - vm_kernel_slide); + vm_kernel_slide); vm_kernel_slide = 0; } @@ -274,7 +277,7 @@ i386_vm_init(uint64_t maxmem, uint32_t cmd; loadcmd = (struct load_command *)((uintptr_t)&_mh_execute_header + - sizeof (_mh_execute_header)); + sizeof(_mh_execute_header)); for (cmd = 0; cmd < _mh_execute_header.ncmds; cmd++) { if (loadcmd->cmd == LC_DYSYMTAB) { @@ -291,27 +294,27 @@ i386_vm_init(uint64_t maxmem, } /* - * Now retrieve addresses for end, edata, and etext + * Now retrieve addresses for end, edata, and etext * from MACH-O headers. */ segTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, - "__TEXT", &segSizeTEXT); + "__TEXT", &segSizeTEXT); segDATAB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, - "__DATA", &segSizeDATA); + "__DATA", &segSizeDATA); segLINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, - "__LINKEDIT", &segSizeLINK); + "__LINKEDIT", &segSizeLINK); segHIBB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, - "__HIB", &segSizeHIB); + "__HIB", &segSizeHIB); segPRELINKTEXTB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, - "__PRELINK_TEXT", &segSizePRELINKTEXT); + "__PRELINK_TEXT", &segSizePRELINKTEXT); segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header, - "__PRELINK_INFO", &segSizePRELINKINFO); + "__PRELINK_INFO", &segSizePRELINKINFO); segTEXT = getsegbynamefromheader(&_mh_execute_header, - "__TEXT"); + "__TEXT"); segDATA = getsegbynamefromheader(&_mh_execute_header, - "__DATA"); + "__DATA"); segCONST = getsegbynamefromheader(&_mh_execute_header, - "__CONST"); + "__CONST"); cursectTEXT = lastsectTEXT = firstsect(segTEXT); /* Discover the last TEXT section within the TEXT segment */ while ((cursectTEXT = nextsect(segTEXT, cursectTEXT)) != NULL) { @@ -333,8 +336,8 @@ i386_vm_init(uint64_t maxmem, segSizeConst = segCONST->vmsize; econst = sconst + segSizeConst; - assert(((sconst|econst) & PAGE_MASK) == 0); - + assert(((sconst | econst) & PAGE_MASK) == 0); + DPRINTF("segTEXTB = %p\n", (void *) segTEXTB); DPRINTF("segDATAB = %p\n", (void *) segDATAB); DPRINTF("segLINKB = %p\n", (void *) segLINKB); @@ -373,26 +376,27 @@ i386_vm_init(uint64_t maxmem, avail_remaining = 0; avail_end = 0; pmptr = pmap_memory_regions; - prev_pmptr = 0; + prev_pmptr = 0; pmap_memory_region_count = pmap_memory_region_current = 0; fap = (ppnum_t) i386_btop(first_avail); maddr = ml_static_ptovirt((vm_offset_t)args->MemoryMap); mptr = (EfiMemoryRange *)maddr; - if (args->MemoryMapDescriptorSize == 0) - panic("Invalid memory map descriptor size"); - msize = args->MemoryMapDescriptorSize; - mcount = args->MemoryMapSize / msize; + if (args->MemoryMapDescriptorSize == 0) { + panic("Invalid memory map descriptor size"); + } + msize = args->MemoryMapDescriptorSize; + mcount = args->MemoryMapSize / msize; #define FOURGIG 0x0000000100000000ULL #define ONEGIG 0x0000000040000000ULL for (i = 0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { - ppnum_t base, top; + ppnum_t base, top; uint64_t region_bytes = 0; if (pmap_memory_region_count >= PMAP_MEMORY_REGIONS_SIZE) { - kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count); + kprintf("WARNING: truncating memory region count at %d\n", pmap_memory_region_count); break; } base = (ppnum_t) (mptr->PhysicalStart >> I386_PGSHIFT); @@ -400,7 +404,7 @@ i386_vm_init(uint64_t maxmem, if (base == 0) { /* - * Avoid having to deal with the edge case of the + * Avoid having to deal with the edge case of the * very first possible physical page and the roll-over * to -1; just ignore that page. */ @@ -409,7 +413,7 @@ i386_vm_init(uint64_t maxmem, } if (top + 1 == 0) { /* - * Avoid having to deal with the edge case of the + * Avoid having to deal with the edge case of the * very last possible physical page and the roll-over * to 0; just ignore that page. */ @@ -424,10 +428,11 @@ i386_vm_init(uint64_t maxmem, continue; } -#if MR_RSV_TEST +#if MR_RSV_TEST static uint32_t nmr = 0; - if ((base > 0x20000) && (nmr++ < 4)) + if ((base > 0x20000) && (nmr++ < 4)) { mptr->Attribute |= EFI_MEMORY_KERN_RESERVED; + } #endif region_bytes = (uint64_t)(mptr->NumberOfPages << I386_PGSHIFT); pmap_type = mptr->Type; @@ -438,22 +443,22 @@ i386_vm_init(uint64_t maxmem, case kEfiBootServicesCode: case kEfiBootServicesData: case kEfiConventionalMemory: - /* + /* * Consolidate usable memory types into one. */ - pmap_type = kEfiConventionalMemory; - sane_size += region_bytes; + pmap_type = kEfiConventionalMemory; + sane_size += region_bytes; firmware_Conventional_bytes += region_bytes; break; - /* - * sane_size should reflect the total amount of physical - * RAM in the system, not just the amount that is - * available for the OS to use. - * We now get this value from SMBIOS tables - * rather than reverse engineering the memory map. - * But the legacy computation of "sane_size" is kept - * for diagnostic information. - */ + /* + * sane_size should reflect the total amount of physical + * RAM in the system, not just the amount that is + * available for the OS to use. + * We now get this value from SMBIOS tables + * rather than reverse engineering the memory map. + * But the legacy computation of "sane_size" is kept + * for diagnostic information. + */ case kEfiRuntimeServicesCode: case kEfiRuntimeServicesData: @@ -470,7 +475,7 @@ i386_vm_init(uint64_t maxmem, break; case kEfiPalCode: firmware_PalCode_bytes += region_bytes; - sane_size += region_bytes; + sane_size += region_bytes; break; case kEfiReservedMemoryType: @@ -490,13 +495,14 @@ i386_vm_init(uint64_t maxmem, DPRINTF("EFI region %d: type %u/%d, base 0x%x, top 0x%x %s\n", i, mptr->Type, pmap_type, base, top, - (mptr->Attribute&EFI_MEMORY_KERN_RESERVED)? "RESERVED" : - (mptr->Attribute&EFI_MEMORY_RUNTIME)? "RUNTIME" : ""); + (mptr->Attribute & EFI_MEMORY_KERN_RESERVED)? "RESERVED" : + (mptr->Attribute & EFI_MEMORY_RUNTIME)? "RUNTIME" : ""); if (maxpg) { - if (base >= maxpg) + if (base >= maxpg) { break; - top = (top > maxpg) ? maxpg : top; + } + top = (top > maxpg) ? maxpg : top; } /* @@ -504,15 +510,15 @@ i386_vm_init(uint64_t maxmem, */ if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME || pmap_type != kEfiConventionalMemory) { - prev_pmptr = 0; + prev_pmptr = 0; continue; } else { - /* + /* * Usable memory region */ - if (top < I386_LOWMEM_RESERVED || + if (top < I386_LOWMEM_RESERVED || !pal_is_usable_memory(base, top)) { - prev_pmptr = 0; + prev_pmptr = 0; continue; } /* @@ -529,16 +535,17 @@ i386_vm_init(uint64_t maxmem, } if (top < fap) { - /* + /* * entire range below first_avail - * salvage some low memory pages + * salvage some low memory pages * we use some very low memory at startup * mark as already allocated here */ - if (base >= I386_LOWMEM_RESERVED) - pmptr->base = base; - else - pmptr->base = I386_LOWMEM_RESERVED; + if (base >= I386_LOWMEM_RESERVED) { + pmptr->base = base; + } else { + pmptr->base = I386_LOWMEM_RESERVED; + } pmptr->end = top; @@ -548,8 +555,7 @@ i386_vm_init(uint64_t maxmem, pmptr->alloc_up = pmptr->base; pmptr->alloc_down = pmptr->end; pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count; - } - else { + } else { /* * mark as already mapped */ @@ -558,14 +564,13 @@ i386_vm_init(uint64_t maxmem, } pmptr->type = pmap_type; pmptr->attribute = mptr->Attribute; - } - else if ( (base < fap) && (top > fap) ) { - /* + } else if ((base < fap) && (top > fap)) { + /* * spans first_avail * put mem below first avail in table but * mark already allocated */ - pmptr->base = base; + pmptr->base = base; pmptr->end = (fap - 1); pmptr->alloc_up = pmptr->end + 1; pmptr->alloc_down = pmptr->end; @@ -583,22 +588,25 @@ i386_vm_init(uint64_t maxmem, pmptr->attribute = mptr->Attribute; pmptr->alloc_down = pmptr->end = top; - if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) + if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) { pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count; + } } else { - /* + /* * entire range useable */ - pmptr->alloc_up = pmptr->base = base; + pmptr->alloc_up = pmptr->base = base; pmptr->type = pmap_type; pmptr->attribute = mptr->Attribute; pmptr->alloc_down = pmptr->end = top; - if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) + if (mptr->Attribute & EFI_MEMORY_KERN_RESERVED) { pmap_reserved_range_indices[pmap_last_reserved_range_index++] = pmap_memory_region_count; + } } - if (i386_ptob(pmptr->end) > avail_end ) - avail_end = i386_ptob(pmptr->end); + if (i386_ptob(pmptr->end) > avail_end) { + avail_end = i386_ptob(pmptr->end); + } avail_remaining += (pmptr->end - pmptr->base); coalescing_permitted = (prev_pmptr && (pmptr->attribute == prev_pmptr->attribute) && ((pmptr->attribute & EFI_MEMORY_KERN_RESERVED) == 0)); @@ -610,12 +618,11 @@ i386_vm_init(uint64_t maxmem, (coalescing_permitted) && (pmptr->base == pmptr->alloc_up) && (prev_pmptr->end == prev_pmptr->alloc_down) && - (pmptr->base == (prev_pmptr->end + 1))) - { + (pmptr->base == (prev_pmptr->end + 1))) { prev_pmptr->end = pmptr->end; prev_pmptr->alloc_down = pmptr->alloc_down; } else { - pmap_memory_region_count++; + pmap_memory_region_count++; prev_pmptr = pmptr; pmptr++; } @@ -636,9 +643,10 @@ i386_vm_init(uint64_t maxmem, * we now use the memory size reported by EFI/Booter. */ sane_size = (sane_size + 128 * MB - 1) & ~((uint64_t)(128 * MB - 1)); - if (sane_size != mem_actual) + if (sane_size != mem_actual) { printf("mem_actual: 0x%llx\n legacy sane_size: 0x%llx\n", - mem_actual, sane_size); + mem_actual, sane_size); + } sane_size = mem_actual; /* @@ -649,39 +657,41 @@ i386_vm_init(uint64_t maxmem, if (maxmem == 0 && sane_size > KERNEL_MAXMEM) { maxmem = KERNEL_MAXMEM; printf("Physical memory %lld bytes capped at %dGB\n", - sane_size, (uint32_t) (KERNEL_MAXMEM/GB)); + sane_size, (uint32_t) (KERNEL_MAXMEM / GB)); } /* * if user set maxmem, reduce memory sizes */ - if ( (maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) { + if ((maxmem > (uint64_t)first_avail) && (maxmem < sane_size)) { ppnum_t discarded_pages = (ppnum_t)((sane_size - maxmem) >> I386_PGSHIFT); - ppnum_t highest_pn = 0; - ppnum_t cur_end = 0; - uint64_t pages_to_use; - unsigned cur_region = 0; + ppnum_t highest_pn = 0; + ppnum_t cur_end = 0; + uint64_t pages_to_use; + unsigned cur_region = 0; sane_size = maxmem; - if (avail_remaining > discarded_pages) + if (avail_remaining > discarded_pages) { avail_remaining -= discarded_pages; - else + } else { avail_remaining = 0; - + } + pages_to_use = avail_remaining; while (cur_region < pmap_memory_region_count && pages_to_use) { - for (cur_end = pmap_memory_regions[cur_region].base; - cur_end < pmap_memory_regions[cur_region].end && pages_to_use; - cur_end++) { - if (cur_end > highest_pn) - highest_pn = cur_end; + for (cur_end = pmap_memory_regions[cur_region].base; + cur_end < pmap_memory_regions[cur_region].end && pages_to_use; + cur_end++) { + if (cur_end > highest_pn) { + highest_pn = cur_end; + } pages_to_use--; } if (pages_to_use == 0) { - pmap_memory_regions[cur_region].end = cur_end; - pmap_memory_regions[cur_region].alloc_down = cur_end; + pmap_memory_regions[cur_region].end = cur_end; + pmap_memory_regions[cur_region].alloc_down = cur_end; } cur_region++; @@ -695,55 +705,58 @@ i386_vm_init(uint64_t maxmem, * mem_size is only a 32 bit container... follow the PPC route * and pin it to a 2 Gbyte maximum */ - if (sane_size > (FOURGIG >> 1)) - mem_size = (vm_size_t)(FOURGIG >> 1); - else - mem_size = (vm_size_t)sane_size; + if (sane_size > (FOURGIG >> 1)) { + mem_size = (vm_size_t)(FOURGIG >> 1); + } else { + mem_size = (vm_size_t)sane_size; + } max_mem = sane_size; - kprintf("Physical memory %llu MB\n", sane_size/MB); + kprintf("Physical memory %llu MB\n", sane_size / MB); max_valid_low_ppnum = (2 * GB) / PAGE_SIZE; - if (!PE_parse_boot_argn("max_valid_dma_addr", &maxdmaaddr, sizeof (maxdmaaddr))) { - max_valid_dma_address = (uint64_t)4 * (uint64_t)GB; + if (!PE_parse_boot_argn("max_valid_dma_addr", &maxdmaaddr, sizeof(maxdmaaddr))) { + max_valid_dma_address = (uint64_t)4 * (uint64_t)GB; } else { - max_valid_dma_address = ((uint64_t) maxdmaaddr) * MB; + max_valid_dma_address = ((uint64_t) maxdmaaddr) * MB; - if ((max_valid_dma_address / PAGE_SIZE) < max_valid_low_ppnum) + if ((max_valid_dma_address / PAGE_SIZE) < max_valid_low_ppnum) { max_valid_low_ppnum = (ppnum_t)(max_valid_dma_address / PAGE_SIZE); + } } if (avail_end >= max_valid_dma_address) { - - if (!PE_parse_boot_argn("maxloreserve", &maxloreserve, sizeof (maxloreserve))) { - - if (sane_size >= (ONEGIG * 15)) + if (!PE_parse_boot_argn("maxloreserve", &maxloreserve, sizeof(maxloreserve))) { + if (sane_size >= (ONEGIG * 15)) { maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 4; - else if (sane_size >= (ONEGIG * 7)) + } else if (sane_size >= (ONEGIG * 7)) { maxloreserve = (MAXLORESERVE / PAGE_SIZE) * 2; - else + } else { maxloreserve = MAXLORESERVE / PAGE_SIZE; + } #if SOCKETS mbuf_reserve = bsd_mbuf_cluster_reserve(&mbuf_override) / PAGE_SIZE; #endif - } else + } else { maxloreserve = (maxloreserve * (1024 * 1024)) / PAGE_SIZE; + } if (maxloreserve) { - vm_lopage_free_limit = maxloreserve; - + vm_lopage_free_limit = maxloreserve; + if (mbuf_override == TRUE) { vm_lopage_free_limit += mbuf_reserve; vm_lopage_lowater = 0; - } else + } else { vm_lopage_lowater = vm_lopage_free_limit / 16; + } vm_lopage_refill = TRUE; vm_lopage_needed = TRUE; } } - + /* * Initialize kernel physical map. * Kernel virtual address starts at VM_KERNEL_MIN_ADDRESS. @@ -767,7 +780,8 @@ boolean_t pmap_next_page_reserved(ppnum_t *); * errata on some hardware. */ boolean_t -pmap_next_page_reserved(ppnum_t *pn) { +pmap_next_page_reserved(ppnum_t *pn) +{ if (pmap_reserved_ranges) { uint32_t n; pmap_memory_region_t *region; @@ -778,11 +792,13 @@ pmap_next_page_reserved(ppnum_t *pn) { *pn = region->alloc_up++; avail_remaining--; - if (*pn > max_ppnum) + if (*pn > max_ppnum) { max_ppnum = *pn; + } - if (lowest_lo == 0 || *pn < lowest_lo) + if (lowest_lo == 0 || *pn < lowest_lo) { lowest_lo = *pn; + } pmap_reserved_pages_allocated++; #if DEBUG @@ -800,13 +816,14 @@ pmap_next_page_reserved(ppnum_t *pn) { boolean_t pmap_next_page_hi( - ppnum_t *pn) + ppnum_t *pn) { pmap_memory_region_t *region; - int n; + int n; - if (pmap_next_page_reserved(pn)) + if (pmap_next_page_reserved(pn)) { return TRUE; + } if (avail_remaining) { for (n = pmap_memory_region_count - 1; n >= 0; n--) { @@ -816,17 +833,21 @@ pmap_next_page_hi( *pn = region->alloc_down--; avail_remaining--; - if (*pn > max_ppnum) + if (*pn > max_ppnum) { max_ppnum = *pn; + } - if (lowest_lo == 0 || *pn < lowest_lo) - lowest_lo = *pn; + if (lowest_lo == 0 || *pn < lowest_lo) { + lowest_lo = *pn; + } - if (lowest_hi == 0 || *pn < lowest_hi) - lowest_hi = *pn; + if (lowest_hi == 0 || *pn < lowest_hi) { + lowest_hi = *pn; + } - if (*pn > highest_hi) - highest_hi = *pn; + if (*pn > highest_hi) { + highest_hi = *pn; + } return TRUE; } @@ -838,24 +859,28 @@ pmap_next_page_hi( boolean_t pmap_next_page( - ppnum_t *pn) + ppnum_t *pn) { - if (avail_remaining) while (pmap_memory_region_current < pmap_memory_region_count) { - if (pmap_memory_regions[pmap_memory_region_current].alloc_up > - pmap_memory_regions[pmap_memory_region_current].alloc_down) { - pmap_memory_region_current++; - continue; - } - *pn = pmap_memory_regions[pmap_memory_region_current].alloc_up++; - avail_remaining--; + if (avail_remaining) { + while (pmap_memory_region_current < pmap_memory_region_count) { + if (pmap_memory_regions[pmap_memory_region_current].alloc_up > + pmap_memory_regions[pmap_memory_region_current].alloc_down) { + pmap_memory_region_current++; + continue; + } + *pn = pmap_memory_regions[pmap_memory_region_current].alloc_up++; + avail_remaining--; - if (*pn > max_ppnum) - max_ppnum = *pn; + if (*pn > max_ppnum) { + max_ppnum = *pn; + } - if (lowest_lo == 0 || *pn < lowest_lo) - lowest_lo = *pn; + if (lowest_lo == 0 || *pn < lowest_lo) { + lowest_lo = *pn; + } - return TRUE; + return TRUE; + } } return FALSE; } @@ -865,13 +890,13 @@ boolean_t pmap_valid_page( ppnum_t pn) { - unsigned int i; + unsigned int i; pmap_memory_region_t *pmptr = pmap_memory_regions; for (i = 0; i < pmap_memory_region_count; i++, pmptr++) { - if ( (pn >= pmptr->base) && (pn <= pmptr->end) ) - return TRUE; + if ((pn >= pmptr->base) && (pn <= pmptr->end)) { + return TRUE; + } } return FALSE; } - diff --git a/osfmk/i386/io_map.c b/osfmk/i386/io_map.c index 012d4117f..2a0a4adf4 100644 --- a/osfmk/i386/io_map.c +++ b/osfmk/i386/io_map.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ #include #include -extern vm_offset_t virtual_avail; +extern vm_offset_t virtual_avail; /* * Allocate and map memory for devices that may need to be mapped before @@ -72,34 +72,34 @@ extern vm_offset_t virtual_avail; vm_offset_t io_map(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags) { - vm_offset_t start; + vm_offset_t start; if (kernel_map == VM_MAP_NULL) { - /* - * VM is not initialized. Grab memory. - */ - start = virtual_avail; - virtual_avail += round_page(size); + /* + * VM is not initialized. Grab memory. + */ + start = virtual_avail; + virtual_avail += round_page(size); #if KASAN - kasan_notify_address(start, size); + kasan_notify_address(start, size); #endif - (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size), - VM_PROT_READ|VM_PROT_WRITE, - flags); - } - else { - (void) kmem_alloc_pageable(kernel_map, &start, round_page(size), VM_KERN_MEMORY_IOKIT); - (void) pmap_map(start, phys_addr, phys_addr + round_page(size), - VM_PROT_READ|VM_PROT_WRITE, - flags); + (void) pmap_map_bd(start, phys_addr, phys_addr + round_page(size), + VM_PROT_READ | VM_PROT_WRITE, + flags); + } else { + (void) kmem_alloc_pageable(kernel_map, &start, round_page(size), VM_KERN_MEMORY_IOKIT); + (void) pmap_map(start, phys_addr, phys_addr + round_page(size), + VM_PROT_READ | VM_PROT_WRITE, + flags); } - return (start); + return start; } /* just wrap this since io_map handles it */ -vm_offset_t io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags) +vm_offset_t +io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags) { - return (io_map(phys_addr, size, flags)); + return io_map(phys_addr, size, flags); } diff --git a/osfmk/i386/io_map_entries.h b/osfmk/i386/io_map_entries.h index 3dc373b47..f19f3373c 100644 --- a/osfmk/i386/io_map_entries.h +++ b/osfmk/i386/io_map_entries.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _I386_IO_MAP_ENTRIES #define _I386_IO_MAP_ENTRIES @@ -36,17 +36,16 @@ #include #include -#ifdef __APPLE_API_PRIVATE +#ifdef __APPLE_API_PRIVATE __BEGIN_DECLS -extern vm_offset_t io_map( - vm_map_offset_t phys_addr, - vm_size_t size, - unsigned int flags); +extern vm_offset_t io_map( + vm_map_offset_t phys_addr, + vm_size_t size, + unsigned int flags); extern vm_offset_t io_map_spec(vm_map_offset_t phys_addr, vm_size_t size, unsigned int flags); __END_DECLS -#endif /* __APPLE_API_PRIVATE */ +#endif /* __APPLE_API_PRIVATE */ #endif /* _I386_IO_MAP_ENTRIES */ -#endif /* KERNEL_PRIVATE */ - +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/io_port.h b/osfmk/i386/io_port.h index 0dccd723f..6257df9d0 100644 --- a/osfmk/i386/io_port.h +++ b/osfmk/i386/io_port.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,49 +22,49 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _I386_IO_PORT_H_ -#define _I386_IO_PORT_H_ +#ifndef _I386_IO_PORT_H_ +#define _I386_IO_PORT_H_ /* * IO register definitions. */ -typedef unsigned short io_reg_t; +typedef unsigned short io_reg_t; -#define IO_REG_NULL (0x00ff) /* reserved */ +#define IO_REG_NULL (0x00ff) /* reserved */ /* * Allocate and destroy io port sets for users to map into @@ -72,11 +72,11 @@ typedef unsigned short io_reg_t; */ #if 0 -extern void io_port_create( - device_t device, - io_reg_t * portlist); -extern void io_port_destroy( - device_t device); +extern void io_port_create( + device_t device, + io_reg_t * portlist); +extern void io_port_destroy( + device_t device); #endif -#endif /* _I386_IO_PORT_H_ */ +#endif /* _I386_IO_PORT_H_ */ diff --git a/osfmk/i386/iopb.h b/osfmk/i386/iopb.h index e73d13933..0aae7399b 100644 --- a/osfmk/i386/iopb.h +++ b/osfmk/i386/iopb.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,8 +57,8 @@ /* */ -#ifndef _I386_IOPB_H_ -#define _I386_IOPB_H_ +#ifndef _I386_IOPB_H_ +#define _I386_IOPB_H_ #include #include @@ -69,25 +69,25 @@ * Allows only IO ports 0 .. 0xffff: for ISA machines. */ -#define iopb_howmany(a,b) (((a)+(b)-1)/(b)) +#define iopb_howmany(a, b) (((a)+(b)-1)/(b)) -#define IOPB_MAX 0xffff /* x86 allows ports 0..ffff */ -#define IOPB_BYTES (iopb_howmany(IOPB_MAX+1,8)) +#define IOPB_MAX 0xffff /* x86 allows ports 0..ffff */ +#define IOPB_BYTES (iopb_howmany(IOPB_MAX+1,8)) -typedef unsigned char isa_iopb[IOPB_BYTES]; +typedef unsigned char isa_iopb[IOPB_BYTES]; /* * An IO permission map is a task segment with an IO permission bitmap. */ struct iopb_tss { - struct i386_tss tss; /* task state segment */ - isa_iopb bitmap; /* bitmap of mapped IO ports */ - unsigned int barrier; /* bitmap barrier for CPU slop */ - queue_head_t io_port_list; /* list of mapped IO ports */ - int iopb_desc[2]; /* descriptor for this TSS */ + struct i386_tss tss; /* task state segment */ + isa_iopb bitmap; /* bitmap of mapped IO ports */ + unsigned int barrier; /* bitmap barrier for CPU slop */ + queue_head_t io_port_list; /* list of mapped IO ports */ + int iopb_desc[2]; /* descriptor for this TSS */ }; -typedef struct iopb_tss *iopb_tss_t; +typedef struct iopb_tss *iopb_tss_t; -#endif /* _I386_IOPB_H_ */ +#endif /* _I386_IOPB_H_ */ diff --git a/osfmk/i386/ktss.c b/osfmk/i386/ktss.c index 9d49d2571..0507cb791 100644 --- a/osfmk/i386/ktss.c +++ b/osfmk/i386/ktss.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -73,10 +73,9 @@ * due to a single-step trace trap at system call entry. */ struct sysenter_stack master_sstk - __attribute__ ((section ("__HIB, __desc"))) - __attribute__ ((aligned (16))) = { {0}, 0 }; +__attribute__ ((section("__HIB, __desc"))) +__attribute__ ((aligned(16))) = { {0}, 0 }; -struct x86_64_tss master_ktss64 __attribute__ ((aligned (4096))) __attribute__ ((section ("__HIB, __desc"))) = { +struct x86_64_tss master_ktss64 __attribute__ ((aligned(4096))) __attribute__ ((section("__HIB, __desc"))) = { .io_bit_map_offset = 0x0FFF, }; - diff --git a/osfmk/i386/lapic.c b/osfmk/i386/lapic.c index 0e74dd06e..21db3a6ce 100644 --- a/osfmk/i386/lapic.c +++ b/osfmk/i386/lapic.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -48,18 +48,20 @@ /* Base vector for local APIC interrupt sources */ int lapic_interrupt_base = LAPIC_DEFAULT_INTERRUPT_BASE; -int lapic_to_cpu[MAX_LAPICIDS]; -int cpu_to_lapic[MAX_CPUS]; +int lapic_to_cpu[MAX_LAPICIDS]; +int cpu_to_lapic[MAX_CPUS]; void lapic_cpu_map_init(void) { - int i; + int i; - for (i = 0; i < MAX_CPUS; i++) + for (i = 0; i < MAX_CPUS; i++) { cpu_to_lapic[i] = -1; - for (i = 0; i < MAX_LAPICIDS; i++) + } + for (i = 0; i < MAX_LAPICIDS; i++) { lapic_to_cpu[i] = -1; + } } void @@ -81,21 +83,19 @@ lapic_cpu_map(int apic_id, int cpu) uint32_t ml_get_apicid(uint32_t cpu) { - if(cpu >= (uint32_t)MAX_CPUS) - return 0xFFFFFFFF; /* Return -1 if cpu too big */ - + if (cpu >= (uint32_t)MAX_CPUS) { + return 0xFFFFFFFF; /* Return -1 if cpu too big */ + } /* Return the apic ID (or -1 if not configured) */ return (uint32_t)cpu_to_lapic[cpu]; - } uint32_t ml_get_cpuid(uint32_t lapic_index) { - if(lapic_index >= (uint32_t)MAX_LAPICIDS) - return 0xFFFFFFFF; /* Return -1 if cpu too big */ - + if (lapic_index >= (uint32_t)MAX_LAPICIDS) { + return 0xFFFFFFFF; /* Return -1 if cpu too big */ + } /* Return the cpu ID (or -1 if not configured) */ return (uint32_t)lapic_to_cpu[lapic_index]; - } diff --git a/osfmk/i386/lapic.h b/osfmk/i386/lapic.h index ca9db2dbf..9a046a2f5 100644 --- a/osfmk/i386/lapic.h +++ b/osfmk/i386/lapic.h @@ -2,7 +2,7 @@ * Copyright (c) 2008-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ #ifndef _I386_LAPIC_H_ #define _I386_LAPIC_H_ @@ -36,164 +36,164 @@ * Legacy mode definitions. * The register offsets are no longer used by XNU - see LAPIC_MMIO_OFFSET(). */ -#define LAPIC_START 0xFEE00000 -#define LAPIC_SIZE 0x00000400 - -#define LAPIC_ID 0x00000020 -#define LAPIC_ID_SHIFT 24 -#define LAPIC_ID_MASK 0xFF -#define LAPIC_VERSION 0x00000030 -#define LAPIC_VERSION_MASK 0xFF -#define LAPIC_TPR 0x00000080 -#define LAPIC_TPR_MASK 0xFF -#define LAPIC_APR 0x00000090 -#define LAPIC_APR_MASK 0xFF -#define LAPIC_PPR 0x000000A0 -#define LAPIC_PPR_MASK 0xFF -#define LAPIC_EOI 0x000000B0 -#define LAPIC_REMOTE_READ 0x000000C0 -#define LAPIC_LDR 0x000000D0 -#define LAPIC_LDR_SHIFT 24 -#define LAPIC_DFR 0x000000E0 -#define LAPIC_DFR_FLAT 0xFFFFFFFF -#define LAPIC_DFR_CLUSTER 0x0FFFFFFF -#define LAPIC_DFR_SHIFT 28 -#define LAPIC_SVR 0x000000F0 -#define LAPIC_SVR_MASK 0x0FF -#define LAPIC_SVR_ENABLE 0x100 -#define LAPIC_SVR_FOCUS_OFF 0x200 -#define LAPIC_ISR_BASE 0x00000100 -#define LAPIC_TMR_BASE 0x00000180 -#define LAPIC_IRR_BASE 0x00000200 -#define LAPIC_ERROR_STATUS 0x00000280 -#define LAPIC_LVT_CMCI 0x000002F0 -#define LAPIC_ICR 0x00000300 -#define LAPIC_ICR_VECTOR_MASK 0x000FF -#define LAPIC_ICR_DM_MASK 0x00700 -#define LAPIC_ICR_DM_FIXED 0x00000 -#define LAPIC_ICR_DM_LOWEST 0x00100 -#define LAPIC_ICR_DM_SMI 0x00200 -#define LAPIC_ICR_DM_REMOTE 0x00300 -#define LAPIC_ICR_DM_NMI 0x00400 -#define LAPIC_ICR_DM_INIT 0x00500 -#define LAPIC_ICR_DM_STARTUP 0x00600 -#define LAPIC_ICR_DM_LOGICAL 0x00800 -#define LAPIC_ICR_DS_PENDING 0x01000 -#define LAPIC_ICR_LEVEL_ASSERT 0x04000 -#define LAPIC_ICR_TRIGGER_LEVEL 0x08000 -#define LAPIC_ICR_RR_MASK 0x30000 -#define LAPIC_ICR_RR_INVALID 0x00000 -#define LAPIC_ICR_RR_INPROGRESS 0x10000 -#define LAPIC_ICR_RR_VALID 0x20000 -#define LAPIC_ICR_DSS_MASK 0xC0000 -#define LAPIC_ICR_DSS_DEST 0x00000 -#define LAPIC_ICR_DSS_SELF 0x40000 -#define LAPIC_ICR_DSS_ALL 0x80000 -#define LAPIC_ICR_DSS_OTHERS 0xC0000 -#define LAPIC_ICRD 0x00000310 -#define LAPIC_ICRD_DEST_SHIFT 24 -#define LAPIC_LVT_TIMER 0x00000320 -#define LAPIC_LVT_THERMAL 0x00000330 -#define LAPIC_LVT_PERFCNT 0x00000340 -#define LAPIC_LVT_LINT0 0x00000350 -#define LAPIC_LVT_LINT1 0x00000360 -#define LAPIC_LVT_ERROR 0x00000370 -#define LAPIC_LVT_VECTOR_MASK 0x000FF -#define LAPIC_LVT_DM_SHIFT 8 -#define LAPIC_LVT_DM_MASK 0x00007 -#define LAPIC_LVT_DM_FIXED 0x00000 -#define LAPIC_LVT_DM_NMI 0x00400 -#define LAPIC_LVT_DM_EXTINT 0x00700 -#define LAPIC_LVT_DS_PENDING 0x01000 -#define LAPIC_LVT_IP_PLRITY_LOW 0x02000 -#define LAPIC_LVT_REMOTE_IRR 0x04000 -#define LAPIC_LVT_TM_LEVEL 0x08000 -#define LAPIC_LVT_MASKED 0x10000 -#define LAPIC_LVT_PERIODIC 0x20000 -#define LAPIC_LVT_TSC_DEADLINE 0x40000 -#define LAPIC_LVT_TMR_SHIFT 17 -#define LAPIC_LVT_TMR_MASK 3 -#define LAPIC_TIMER_INITIAL_COUNT 0x00000380 -#define LAPIC_TIMER_CURRENT_COUNT 0x00000390 -#define LAPIC_TIMER_DIVIDE_CONFIG 0x000003E0 +#define LAPIC_START 0xFEE00000 +#define LAPIC_SIZE 0x00000400 + +#define LAPIC_ID 0x00000020 +#define LAPIC_ID_SHIFT 24 +#define LAPIC_ID_MASK 0xFF +#define LAPIC_VERSION 0x00000030 +#define LAPIC_VERSION_MASK 0xFF +#define LAPIC_TPR 0x00000080 +#define LAPIC_TPR_MASK 0xFF +#define LAPIC_APR 0x00000090 +#define LAPIC_APR_MASK 0xFF +#define LAPIC_PPR 0x000000A0 +#define LAPIC_PPR_MASK 0xFF +#define LAPIC_EOI 0x000000B0 +#define LAPIC_REMOTE_READ 0x000000C0 +#define LAPIC_LDR 0x000000D0 +#define LAPIC_LDR_SHIFT 24 +#define LAPIC_DFR 0x000000E0 +#define LAPIC_DFR_FLAT 0xFFFFFFFF +#define LAPIC_DFR_CLUSTER 0x0FFFFFFF +#define LAPIC_DFR_SHIFT 28 +#define LAPIC_SVR 0x000000F0 +#define LAPIC_SVR_MASK 0x0FF +#define LAPIC_SVR_ENABLE 0x100 +#define LAPIC_SVR_FOCUS_OFF 0x200 +#define LAPIC_ISR_BASE 0x00000100 +#define LAPIC_TMR_BASE 0x00000180 +#define LAPIC_IRR_BASE 0x00000200 +#define LAPIC_ERROR_STATUS 0x00000280 +#define LAPIC_LVT_CMCI 0x000002F0 +#define LAPIC_ICR 0x00000300 +#define LAPIC_ICR_VECTOR_MASK 0x000FF +#define LAPIC_ICR_DM_MASK 0x00700 +#define LAPIC_ICR_DM_FIXED 0x00000 +#define LAPIC_ICR_DM_LOWEST 0x00100 +#define LAPIC_ICR_DM_SMI 0x00200 +#define LAPIC_ICR_DM_REMOTE 0x00300 +#define LAPIC_ICR_DM_NMI 0x00400 +#define LAPIC_ICR_DM_INIT 0x00500 +#define LAPIC_ICR_DM_STARTUP 0x00600 +#define LAPIC_ICR_DM_LOGICAL 0x00800 +#define LAPIC_ICR_DS_PENDING 0x01000 +#define LAPIC_ICR_LEVEL_ASSERT 0x04000 +#define LAPIC_ICR_TRIGGER_LEVEL 0x08000 +#define LAPIC_ICR_RR_MASK 0x30000 +#define LAPIC_ICR_RR_INVALID 0x00000 +#define LAPIC_ICR_RR_INPROGRESS 0x10000 +#define LAPIC_ICR_RR_VALID 0x20000 +#define LAPIC_ICR_DSS_MASK 0xC0000 +#define LAPIC_ICR_DSS_DEST 0x00000 +#define LAPIC_ICR_DSS_SELF 0x40000 +#define LAPIC_ICR_DSS_ALL 0x80000 +#define LAPIC_ICR_DSS_OTHERS 0xC0000 +#define LAPIC_ICRD 0x00000310 +#define LAPIC_ICRD_DEST_SHIFT 24 +#define LAPIC_LVT_TIMER 0x00000320 +#define LAPIC_LVT_THERMAL 0x00000330 +#define LAPIC_LVT_PERFCNT 0x00000340 +#define LAPIC_LVT_LINT0 0x00000350 +#define LAPIC_LVT_LINT1 0x00000360 +#define LAPIC_LVT_ERROR 0x00000370 +#define LAPIC_LVT_VECTOR_MASK 0x000FF +#define LAPIC_LVT_DM_SHIFT 8 +#define LAPIC_LVT_DM_MASK 0x00007 +#define LAPIC_LVT_DM_FIXED 0x00000 +#define LAPIC_LVT_DM_NMI 0x00400 +#define LAPIC_LVT_DM_EXTINT 0x00700 +#define LAPIC_LVT_DS_PENDING 0x01000 +#define LAPIC_LVT_IP_PLRITY_LOW 0x02000 +#define LAPIC_LVT_REMOTE_IRR 0x04000 +#define LAPIC_LVT_TM_LEVEL 0x08000 +#define LAPIC_LVT_MASKED 0x10000 +#define LAPIC_LVT_PERIODIC 0x20000 +#define LAPIC_LVT_TSC_DEADLINE 0x40000 +#define LAPIC_LVT_TMR_SHIFT 17 +#define LAPIC_LVT_TMR_MASK 3 +#define LAPIC_TIMER_INITIAL_COUNT 0x00000380 +#define LAPIC_TIMER_CURRENT_COUNT 0x00000390 +#define LAPIC_TIMER_DIVIDE_CONFIG 0x000003E0 /* divisor encoded by bits 0,1,3 with bit 2 always 0: */ -#define LAPIC_TIMER_DIVIDE_MASK 0x0000000F -#define LAPIC_TIMER_DIVIDE_2 0x00000000 -#define LAPIC_TIMER_DIVIDE_4 0x00000001 -#define LAPIC_TIMER_DIVIDE_8 0x00000002 -#define LAPIC_TIMER_DIVIDE_16 0x00000003 -#define LAPIC_TIMER_DIVIDE_32 0x00000008 -#define LAPIC_TIMER_DIVIDE_64 0x00000009 -#define LAPIC_TIMER_DIVIDE_128 0x0000000A -#define LAPIC_TIMER_DIVIDE_1 0x0000000B - -#define LAPIC_ID_MAX (LAPIC_ID_MASK) - -#define CPU_NUMBER(r) \ +#define LAPIC_TIMER_DIVIDE_MASK 0x0000000F +#define LAPIC_TIMER_DIVIDE_2 0x00000000 +#define LAPIC_TIMER_DIVIDE_4 0x00000001 +#define LAPIC_TIMER_DIVIDE_8 0x00000002 +#define LAPIC_TIMER_DIVIDE_16 0x00000003 +#define LAPIC_TIMER_DIVIDE_32 0x00000008 +#define LAPIC_TIMER_DIVIDE_64 0x00000009 +#define LAPIC_TIMER_DIVIDE_128 0x0000000A +#define LAPIC_TIMER_DIVIDE_1 0x0000000B + +#define LAPIC_ID_MAX (LAPIC_ID_MASK) + +#define CPU_NUMBER(r) \ movl %gs:CPU_NUMBER_GS,r -#ifndef ASSEMBLER +#ifndef ASSEMBLER typedef enum { - ID = 0x02, - VERSION = 0x03, - TPR = 0x08, - APR = 0x09, - PPR = 0x0A, - EOI = 0x0B, - REMOTE_READ = 0x0C, - LDR = 0x0D, - DFR = 0x0E, - SVR = 0x0F, - ISR_BASE = 0x10, - TMR_BASE = 0x18, - IRR_BASE = 0x20, - ERROR_STATUS = 0x28, - LVT_CMCI = 0x2F, - ICR = 0x30, - ICRD = 0x31, - LVT_TIMER = 0x32, - LVT_THERMAL = 0x33, - LVT_PERFCNT = 0x34, - LVT_LINT0 = 0x35, - LVT_LINT1 = 0x36, - LVT_ERROR = 0x37, - TIMER_INITIAL_COUNT = 0x38, - TIMER_CURRENT_COUNT = 0x39, - TIMER_DIVIDE_CONFIG = 0x3E, + ID = 0x02, + VERSION = 0x03, + TPR = 0x08, + APR = 0x09, + PPR = 0x0A, + EOI = 0x0B, + REMOTE_READ = 0x0C, + LDR = 0x0D, + DFR = 0x0E, + SVR = 0x0F, + ISR_BASE = 0x10, + TMR_BASE = 0x18, + IRR_BASE = 0x20, + ERROR_STATUS = 0x28, + LVT_CMCI = 0x2F, + ICR = 0x30, + ICRD = 0x31, + LVT_TIMER = 0x32, + LVT_THERMAL = 0x33, + LVT_PERFCNT = 0x34, + LVT_LINT0 = 0x35, + LVT_LINT1 = 0x36, + LVT_ERROR = 0x37, + TIMER_INITIAL_COUNT = 0x38, + TIMER_CURRENT_COUNT = 0x39, + TIMER_DIVIDE_CONFIG = 0x3E, } lapic_register_t; -#define LAPIC_MMIO_PBASE 0xFEE00000 /* Default physical MMIO addr */ -#define LAPIC_MMIO_VBASE lapic_vbase /* Actual virtual mapped addr */ -#define LAPIC_MSR_BASE 0x800 +#define LAPIC_MMIO_PBASE 0xFEE00000 /* Default physical MMIO addr */ +#define LAPIC_MMIO_VBASE lapic_vbase /* Actual virtual mapped addr */ +#define LAPIC_MSR_BASE 0x800 -#define LAPIC_MMIO_OFFSET(reg) (reg << 4) -#define LAPIC_MSR_OFFSET(reg) (reg) +#define LAPIC_MMIO_OFFSET(reg) (reg << 4) +#define LAPIC_MSR_OFFSET(reg) (reg) -#define LAPIC_MMIO(reg) ((volatile uint32_t *) \ - (LAPIC_MMIO_VBASE + LAPIC_MMIO_OFFSET(reg))) -#define LAPIC_MSR(reg) (LAPIC_MSR_BASE + LAPIC_MSR_OFFSET(reg)) +#define LAPIC_MMIO(reg) ((volatile uint32_t *) \ + (LAPIC_MMIO_VBASE + LAPIC_MMIO_OFFSET(reg))) +#define LAPIC_MSR(reg) (LAPIC_MSR_BASE + LAPIC_MSR_OFFSET(reg)) typedef struct { - void (*init) (void); - uint32_t (*read) (lapic_register_t); - void (*write) (lapic_register_t, uint32_t); - uint64_t (*read_icr) (void); - void (*write_icr) (uint32_t, uint32_t); + void (*init)(void); + uint32_t (*read)(lapic_register_t); + void (*write)(lapic_register_t, uint32_t); + uint64_t (*read_icr)(void); + void (*write_icr)(uint32_t, uint32_t); } lapic_ops_table_t; extern lapic_ops_table_t *lapic_ops; -#define LAPIC_INIT() lapic_ops->init(); -#define LAPIC_WRITE(reg,val) lapic_ops->write(reg, val) -#define LAPIC_READ(reg) lapic_ops->read(reg) -#define LAPIC_READ_OFFSET(reg,off) LAPIC_READ((reg)+(off)) -#define LAPIC_READ_ICR() lapic_ops->read_icr() -#define LAPIC_WRITE_ICR(dst,cmd) lapic_ops->write_icr(dst, cmd) +#define LAPIC_INIT() lapic_ops->init(); +#define LAPIC_WRITE(reg, val) lapic_ops->write(reg, val) +#define LAPIC_READ(reg) lapic_ops->read(reg) +#define LAPIC_READ_OFFSET(reg, off) LAPIC_READ((reg)+(off)) +#define LAPIC_READ_ICR() lapic_ops->read_icr() +#define LAPIC_WRITE_ICR(dst, cmd) lapic_ops->write_icr(dst, cmd) typedef enum { periodic, one_shot } lapic_timer_mode_t; -typedef enum { +typedef enum { divide_by_1 = LAPIC_TIMER_DIVIDE_1, divide_by_2 = LAPIC_TIMER_DIVIDE_2, divide_by_4 = LAPIC_TIMER_DIVIDE_4, @@ -212,127 +212,134 @@ typedef uint32_t lapic_timer_count_t; * 0x5F are used. Those systems are not expected to have I/O APIC * so 16 (0x50 - 0x40) vectors for legacy PIC support is perfect. */ -#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0 -#define LAPIC_REDUCED_INTERRUPT_BASE 0x50 +#define LAPIC_DEFAULT_INTERRUPT_BASE 0xD0 +#define LAPIC_REDUCED_INTERRUPT_BASE 0x50 /* * Specific lapic interrupts are relative to this base * in priority order from high to low: */ -#define LAPIC_PERFCNT_INTERRUPT 0xF -#define LAPIC_INTERPROCESSOR_INTERRUPT 0xE -#define LAPIC_TIMER_INTERRUPT 0xD -#define LAPIC_THERMAL_INTERRUPT 0xC -#define LAPIC_ERROR_INTERRUPT 0xB -#define LAPIC_SPURIOUS_INTERRUPT 0xA -#define LAPIC_CMCI_INTERRUPT 0x9 -#define LAPIC_PMC_SW_INTERRUPT 0x8 -#define LAPIC_PM_INTERRUPT 0x7 -#define LAPIC_KICK_INTERRUPT 0x6 +#define LAPIC_PERFCNT_INTERRUPT 0xF +#define LAPIC_INTERPROCESSOR_INTERRUPT 0xE +#define LAPIC_TIMER_INTERRUPT 0xD +#define LAPIC_THERMAL_INTERRUPT 0xC +#define LAPIC_ERROR_INTERRUPT 0xB +#define LAPIC_SPURIOUS_INTERRUPT 0xA +#define LAPIC_CMCI_INTERRUPT 0x9 +#define LAPIC_PMC_SW_INTERRUPT 0x8 +#define LAPIC_PM_INTERRUPT 0x7 +#define LAPIC_KICK_INTERRUPT 0x6 -#define LAPIC_PMC_SWI_VECTOR (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_PMC_SW_INTERRUPT) -#define LAPIC_TIMER_VECTOR (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT) +#define LAPIC_PMC_SWI_VECTOR (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_PMC_SW_INTERRUPT) +#define LAPIC_TIMER_VECTOR (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT) /* The vector field is ignored for NMI interrupts via the LAPIC * or otherwise, so this is not an offset from the interrupt * base. */ -#define LAPIC_NMI_INTERRUPT 0x2 -#define LAPIC_FUNC_TABLE_SIZE (LAPIC_PERFCNT_INTERRUPT + 1) +#define LAPIC_NMI_INTERRUPT 0x2 +#define LAPIC_FUNC_TABLE_SIZE (LAPIC_PERFCNT_INTERRUPT + 1) #define LAPIC_VECTOR(src) \ (lapic_interrupt_base + LAPIC_##src##_INTERRUPT) -#define LAPIC_ISR_IS_SET(base,src) \ +#define LAPIC_ISR_IS_SET(base, src) \ (LAPIC_READ_OFFSET(ISR_BASE,(base+LAPIC_##src##_INTERRUPT)/32) \ - & (1 <<((base + LAPIC_##src##_INTERRUPT)%32))) - -extern void lapic_init(void); -extern void lapic_configure(void); -extern void lapic_shutdown(void); -extern void lapic_smm_restore(void); -extern boolean_t lapic_probe(void); -extern void lapic_dump(void); -extern void lapic_cpu_map_dump(void); -extern int lapic_interrupt( - int interrupt, x86_saved_state_t *state); -extern void lapic_end_of_interrupt(void); -extern void lapic_unmask_perfcnt_interrupt(void); -extern void lapic_set_perfcnt_interrupt_mask(boolean_t); -extern void lapic_send_ipi(int cpu, int interupt); - -extern int lapic_to_cpu[]; -extern int cpu_to_lapic[]; -extern int lapic_interrupt_base; -extern void lapic_cpu_map_init(void); -extern void lapic_cpu_map(int lapic, int cpu_num); -extern uint32_t ml_get_apicid(uint32_t cpu); -extern uint32_t ml_get_cpuid(uint32_t lapic_index); - -extern void lapic_config_timer( - boolean_t interrupt, - lapic_timer_mode_t mode, - lapic_timer_divide_t divisor); - -extern void lapic_set_timer_fast( - lapic_timer_count_t initial_count); - -extern void lapic_set_timer( - boolean_t interrupt, - lapic_timer_mode_t mode, - lapic_timer_divide_t divisor, - lapic_timer_count_t initial_count); - -extern void lapic_get_timer( - lapic_timer_mode_t *mode, - lapic_timer_divide_t *divisor, - lapic_timer_count_t *initial_count, - lapic_timer_count_t *current_count); - -extern void lapic_config_tsc_deadline_timer(void); -extern void lapic_set_tsc_deadline_timer(uint64_t deadline); -extern uint64_t lapic_get_tsc_deadline_timer(void); - -typedef int (*i386_intr_func_t)(x86_saved_state_t *state); -extern void lapic_set_intr_func(int intr, i386_intr_func_t func); - -extern void lapic_set_pmi_func(i386_intr_func_t); - -static inline void lapic_set_timer_func(i386_intr_func_t func) + & (1 <<((base + LAPIC_##src##_INTERRUPT)%32))) + +extern void lapic_init(void); +extern void lapic_configure(void); +extern void lapic_shutdown(void); +extern void lapic_smm_restore(void); +extern boolean_t lapic_probe(void); +extern void lapic_dump(void); +extern void lapic_cpu_map_dump(void); +extern int lapic_interrupt( + int interrupt, x86_saved_state_t *state); +extern void lapic_end_of_interrupt(void); +extern void lapic_unmask_perfcnt_interrupt(void); +extern void lapic_set_perfcnt_interrupt_mask(boolean_t); +extern void lapic_send_ipi(int cpu, int interupt); + +extern int lapic_to_cpu[]; +extern int cpu_to_lapic[]; +extern int lapic_interrupt_base; +extern void lapic_cpu_map_init(void); +extern void lapic_cpu_map(int lapic, int cpu_num); +extern uint32_t ml_get_apicid(uint32_t cpu); +extern uint32_t ml_get_cpuid(uint32_t lapic_index); +extern int lapic_max_interrupt_cpunum; + +extern void lapic_config_timer( + boolean_t interrupt, + lapic_timer_mode_t mode, + lapic_timer_divide_t divisor); + +extern void lapic_set_timer_fast( + lapic_timer_count_t initial_count); + +extern void lapic_set_timer( + boolean_t interrupt, + lapic_timer_mode_t mode, + lapic_timer_divide_t divisor, + lapic_timer_count_t initial_count); + +extern void lapic_get_timer( + lapic_timer_mode_t *mode, + lapic_timer_divide_t *divisor, + lapic_timer_count_t *initial_count, + lapic_timer_count_t *current_count); + +extern void lapic_config_tsc_deadline_timer(void); +extern void lapic_set_tsc_deadline_timer(uint64_t deadline); +extern uint64_t lapic_get_tsc_deadline_timer(void); + +typedef int (*i386_intr_func_t)(x86_saved_state_t *state); +extern void lapic_set_intr_func(int intr, i386_intr_func_t func); + +extern void lapic_set_pmi_func(i386_intr_func_t); + +static inline void +lapic_set_timer_func(i386_intr_func_t func) { lapic_set_intr_func(LAPIC_VECTOR(TIMER), func); } /* We don't support dynamic adjustment of the LAPIC timer base vector here * it's effectively incompletely supported elsewhere as well. */ -static inline void lapic_timer_swi(void) { - __asm__ __volatile__("int %0" :: "i"(LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT):"memory"); +static inline void +lapic_timer_swi(void) +{ + __asm__ __volatile__ ("int %0" :: "i"(LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT):"memory"); } -static inline void lapic_set_thermal_func(i386_intr_func_t func) +static inline void +lapic_set_thermal_func(i386_intr_func_t func) { lapic_set_intr_func(LAPIC_VECTOR(THERMAL), func); } -static inline void lapic_set_cmci_func(i386_intr_func_t func) +static inline void +lapic_set_cmci_func(i386_intr_func_t func) { lapic_set_intr_func(LAPIC_VECTOR(CMCI), func); } -static inline void lapic_set_pm_func(i386_intr_func_t func) +static inline void +lapic_set_pm_func(i386_intr_func_t func) { lapic_set_intr_func(LAPIC_VECTOR(PM), func); } -extern boolean_t lapic_is_interrupt_pending(void); -extern boolean_t lapic_is_interrupting(uint8_t vector); -extern void lapic_interrupt_counts(uint64_t intrs[256]); -extern void lapic_disable_timer(void); +extern boolean_t lapic_is_interrupt_pending(void); +extern boolean_t lapic_is_interrupting(uint8_t vector); +extern void lapic_interrupt_counts(uint64_t intrs[256]); +extern void lapic_disable_timer(void); -extern uint8_t lapic_get_cmci_vector(void); +extern uint8_t lapic_get_cmci_vector(void); -#define MAX_LAPICIDS (LAPIC_ID_MAX+1) +#define MAX_LAPICIDS (LAPIC_ID_MAX+1) #ifdef MP_DEBUG -#define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump() -#define LAPIC_DUMP() lapic_dump() +#define LAPIC_CPU_MAP_DUMP() lapic_cpu_map_dump() +#define LAPIC_DUMP() lapic_dump() #else #define LAPIC_CPU_MAP_DUMP() #define LAPIC_DUMP() @@ -341,4 +348,3 @@ extern uint8_t lapic_get_cmci_vector(void); #endif /* ASSEMBLER */ #endif /* _I386_LAPIC_H_ */ - diff --git a/osfmk/i386/lapic_native.c b/osfmk/i386/lapic_native.c index 73e5e1c13..bf5e61a3c 100644 --- a/osfmk/i386/lapic_native.c +++ b/osfmk/i386/lapic_native.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -58,20 +58,20 @@ #include -#if MP_DEBUG -#define PAUSE delay(1000000) -#define DBG(x...) kprintf(x) +#if MP_DEBUG +#define PAUSE delay(1000000) +#define DBG(x...) kprintf(x) #else #define DBG(x...) #define PAUSE -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ -lapic_ops_table_t *lapic_ops; /* Lapic operations switch */ +lapic_ops_table_t *lapic_ops; /* Lapic operations switch */ -static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */ -static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */ +static vm_map_offset_t lapic_pbase; /* Physical base memory-mapped regs */ +static vm_offset_t lapic_vbase; /* Virtual base memory-mapped regs */ -static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE]; +static i386_intr_func_t lapic_intr_func[LAPIC_FUNC_TABLE_SIZE]; /* TRUE if local APIC was enabled by the OS not by the BIOS */ static boolean_t lapic_os_enabled = FALSE; @@ -82,24 +82,27 @@ static uint64_t lapic_error_time_threshold = 0; static unsigned lapic_master_error_count = 0; static unsigned lapic_error_count_threshold = 5; static boolean_t lapic_dont_panic = FALSE; +int lapic_max_interrupt_cpunum = 0; #ifdef MP_DEBUG void lapic_cpu_map_dump(void) { - int i; + int i; for (i = 0; i < MAX_CPUS; i++) { - if (cpu_to_lapic[i] == -1) + if (cpu_to_lapic[i] == -1) { continue; + } kprintf("cpu_to_lapic[%d]: %d\n", - i, cpu_to_lapic[i]); + i, cpu_to_lapic[i]); } for (i = 0; i < MAX_LAPICIDS; i++) { - if (lapic_to_cpu[i] == -1) + if (lapic_to_cpu[i] == -1) { continue; + } kprintf("lapic_to_cpu[%d]: %d\n", - i, lapic_to_cpu[i]); + i, lapic_to_cpu[i]); } } #endif /* MP_DEBUG */ @@ -107,21 +110,21 @@ lapic_cpu_map_dump(void) static void legacy_init(void) { - int result; - kern_return_t kr; - vm_map_entry_t entry; + int result; + kern_return_t kr; + vm_map_entry_t entry; vm_map_offset_t lapic_vbase64; /* Establish a map to the local apic */ if (lapic_vbase == 0) { lapic_vbase64 = (vm_offset_t)vm_map_min(kernel_map); result = vm_map_find_space(kernel_map, - &lapic_vbase64, - round_page(LAPIC_SIZE), 0, - 0, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_IOKIT, - &entry); + &lapic_vbase64, + round_page(LAPIC_SIZE), 0, + 0, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_IOKIT, + &entry); /* Convert 64-bit vm_map_offset_t to "pointer sized" vm_offset_t */ lapic_vbase = (vm_offset_t) lapic_vbase64; @@ -138,12 +141,12 @@ legacy_init(void) * UC and this will override the default PAT setting. */ kr = pmap_enter(pmap_kernel(), - lapic_vbase, - (ppnum_t) i386_btop(lapic_pbase), - VM_PROT_READ|VM_PROT_WRITE, - VM_PROT_NONE, - VM_WIMG_IO, - TRUE); + lapic_vbase, + (ppnum_t) i386_btop(lapic_pbase), + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_NONE, + VM_WIMG_IO, + TRUE); assert(kr == KERN_SUCCESS); } @@ -160,7 +163,7 @@ legacy_init(void) static uint32_t legacy_read(lapic_register_t reg) { - return *LAPIC_MMIO(reg); + return *LAPIC_MMIO(reg); } static void @@ -190,16 +193,16 @@ static lapic_ops_table_t legacy_ops = { legacy_write_icr }; -static boolean_t is_x2apic = FALSE; +static boolean_t is_x2apic = FALSE; static void x2apic_init(void) { - uint32_t lo; - uint32_t hi; + uint32_t lo; + uint32_t hi; rdmsr(MSR_IA32_APIC_BASE, lo, hi); - if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) { + if ((lo & MSR_IA32_APIC_BASE_EXTENDED) == 0) { lo |= MSR_IA32_APIC_BASE_EXTENDED; wrmsr(MSR_IA32_APIC_BASE, lo, hi); kprintf("x2APIC mode enabled\n"); @@ -209,8 +212,8 @@ x2apic_init(void) static uint32_t x2apic_read(lapic_register_t reg) { - uint32_t lo; - uint32_t hi; + uint32_t lo; + uint32_t hi; rdmsr(LAPIC_MSR(reg), lo, hi); return lo; @@ -231,7 +234,7 @@ x2apic_read_icr(void) static void x2apic_write_icr(uint32_t dst, uint32_t cmd) { - wrmsr(LAPIC_MSR(ICR), cmd, dst); + wrmsr(LAPIC_MSR(ICR), cmd, dst); } static lapic_ops_table_t x2apic_ops = { @@ -245,10 +248,10 @@ static lapic_ops_table_t x2apic_ops = { void lapic_init(void) { - uint32_t lo; - uint32_t hi; - boolean_t is_boot_processor; - boolean_t is_lapic_enabled; + uint32_t lo; + uint32_t hi; + boolean_t is_boot_processor; + boolean_t is_lapic_enabled; /* Examine the local APIC state */ rdmsr(MSR_IA32_APIC_BASE, lo, hi); @@ -257,11 +260,12 @@ lapic_init(void) is_x2apic = (lo & MSR_IA32_APIC_BASE_EXTENDED) != 0; lapic_pbase = (lo & MSR_IA32_APIC_BASE_BASE); kprintf("MSR_IA32_APIC_BASE 0x%llx %s %s mode %s\n", lapic_pbase, - is_lapic_enabled ? "enabled" : "disabled", - is_x2apic ? "extended" : "legacy", - is_boot_processor ? "BSP" : "AP"); - if (!is_boot_processor || !is_lapic_enabled) + is_lapic_enabled ? "enabled" : "disabled", + is_x2apic ? "extended" : "legacy", + is_boot_processor ? "BSP" : "AP"); + if (!is_boot_processor || !is_lapic_enabled) { panic("Unexpected local APIC state\n"); + } /* * If x2APIC is available and not already enabled, enable it. @@ -270,7 +274,7 @@ lapic_init(void) if (!is_x2apic && (cpuid_features() & CPUID_FEATURE_x2APIC)) { PE_parse_boot_argn("-x2apic", &is_x2apic, sizeof(is_x2apic)); kprintf("x2APIC supported %s be enabled\n", - is_x2apic ? "and will" : "but will not"); + is_x2apic ? "and will" : "but will not"); } lapic_ops = is_x2apic ? &x2apic_ops : &legacy_ops; @@ -278,14 +282,14 @@ lapic_init(void) LAPIC_INIT(); kprintf("ID: 0x%x LDR: 0x%x\n", LAPIC_READ(ID), LAPIC_READ(LDR)); - if ((LAPIC_READ(VERSION)&LAPIC_VERSION_MASK) < 0x14) { + if ((LAPIC_READ(VERSION) & LAPIC_VERSION_MASK) < 0x14) { panic("Local APIC version 0x%x, 0x14 or more expected\n", - (LAPIC_READ(VERSION)&LAPIC_VERSION_MASK)); + (LAPIC_READ(VERSION) & LAPIC_VERSION_MASK)); } /* Set up the lapic_id <-> cpu_number map and add this boot processor */ lapic_cpu_map_init(); - lapic_cpu_map((LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, 0); + lapic_cpu_map((LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK, 0); current_cpu_datap()->cpu_phys_number = cpu_to_lapic[0]; kprintf("Boot cpu local APIC id 0x%x\n", cpu_to_lapic[0]); } @@ -299,7 +303,7 @@ lapic_esr_read(void) return LAPIC_READ(ERROR_STATUS); } -static void +static void lapic_esr_clear(void) { LAPIC_WRITE(ERROR_STATUS, 0); @@ -314,7 +318,8 @@ static const char *DM_str[8] = { "NMI", "Reset", "Invalid", - "ExtINT"}; + "ExtINT" +}; static const char *TMR_str[] = { "OneShot", @@ -326,12 +331,12 @@ static const char *TMR_str[] = { void lapic_dump(void) { - int i; + int i; #define BOOL(a) ((a)?' ':'!') #define VEC(lvt) \ LAPIC_READ(lvt)&LAPIC_LVT_VECTOR_MASK -#define DS(lvt) \ +#define DS(lvt) \ (LAPIC_READ(lvt)&LAPIC_LVT_DS_PENDING)?" SendPending" : "Idle" #define DM(lvt) \ DM_str[(LAPIC_READ(lvt)>>LAPIC_LVT_DM_SHIFT)&LAPIC_LVT_DM_MASK] @@ -342,93 +347,99 @@ lapic_dump(void) #define IP(lvt) \ (LAPIC_READ(lvt)&LAPIC_LVT_IP_PLRITY_LOW)? "Low " : "High" - kprintf("LAPIC %d at %p version 0x%x\n", - (LAPIC_READ(ID)>>LAPIC_ID_SHIFT)&LAPIC_ID_MASK, - (void *) lapic_vbase, - LAPIC_READ(VERSION)&LAPIC_VERSION_MASK); + kprintf("LAPIC %d at %p version 0x%x\n", + (LAPIC_READ(ID) >> LAPIC_ID_SHIFT) & LAPIC_ID_MASK, + (void *) lapic_vbase, + LAPIC_READ(VERSION) & LAPIC_VERSION_MASK); kprintf("Priorities: Task 0x%x Arbitration 0x%x Processor 0x%x\n", - LAPIC_READ(TPR)&LAPIC_TPR_MASK, - LAPIC_READ(APR)&LAPIC_APR_MASK, - LAPIC_READ(PPR)&LAPIC_PPR_MASK); + LAPIC_READ(TPR) & LAPIC_TPR_MASK, + LAPIC_READ(APR) & LAPIC_APR_MASK, + LAPIC_READ(PPR) & LAPIC_PPR_MASK); kprintf("Destination Format 0x%x Logical Destination 0x%x\n", - is_x2apic ? 0 : LAPIC_READ(DFR)>>LAPIC_DFR_SHIFT, - LAPIC_READ(LDR)>>LAPIC_LDR_SHIFT); + is_x2apic ? 0 : LAPIC_READ(DFR) >> LAPIC_DFR_SHIFT, + LAPIC_READ(LDR) >> LAPIC_LDR_SHIFT); kprintf("%cEnabled %cFocusChecking SV 0x%x\n", - BOOL(LAPIC_READ(SVR)&LAPIC_SVR_ENABLE), - BOOL(!(LAPIC_READ(SVR)&LAPIC_SVR_FOCUS_OFF)), - LAPIC_READ(SVR) & LAPIC_SVR_MASK); + BOOL(LAPIC_READ(SVR) & LAPIC_SVR_ENABLE), + BOOL(!(LAPIC_READ(SVR) & LAPIC_SVR_FOCUS_OFF)), + LAPIC_READ(SVR) & LAPIC_SVR_MASK); #if CONFIG_MCA - if (mca_is_cmci_present()) + if (mca_is_cmci_present()) { kprintf("LVT_CMCI: Vector 0x%02x [%s] %s %cmasked\n", - VEC(LVT_CMCI), - DM(LVT_CMCI), - DS(LVT_CMCI), - MASK(LVT_CMCI)); + VEC(LVT_CMCI), + DM(LVT_CMCI), + DS(LVT_CMCI), + MASK(LVT_CMCI)); + } #endif kprintf("LVT_TIMER: Vector 0x%02x %s %cmasked %s\n", - VEC(LVT_TIMER), - DS(LVT_TIMER), - MASK(LVT_TIMER), - TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT) - & LAPIC_LVT_TMR_MASK]); + VEC(LVT_TIMER), + DS(LVT_TIMER), + MASK(LVT_TIMER), + TMR_str[(LAPIC_READ(LVT_TIMER) >> LAPIC_LVT_TMR_SHIFT) + & LAPIC_LVT_TMR_MASK]); kprintf(" Initial Count: 0x%08x \n", LAPIC_READ(TIMER_INITIAL_COUNT)); kprintf(" Current Count: 0x%08x \n", LAPIC_READ(TIMER_CURRENT_COUNT)); kprintf(" Divide Config: 0x%08x \n", LAPIC_READ(TIMER_DIVIDE_CONFIG)); kprintf("LVT_PERFCNT: Vector 0x%02x [%s] %s %cmasked\n", - VEC(LVT_PERFCNT), - DM(LVT_PERFCNT), - DS(LVT_PERFCNT), - MASK(LVT_PERFCNT)); + VEC(LVT_PERFCNT), + DM(LVT_PERFCNT), + DS(LVT_PERFCNT), + MASK(LVT_PERFCNT)); kprintf("LVT_THERMAL: Vector 0x%02x [%s] %s %cmasked\n", - VEC(LVT_THERMAL), - DM(LVT_THERMAL), - DS(LVT_THERMAL), - MASK(LVT_THERMAL)); + VEC(LVT_THERMAL), + DM(LVT_THERMAL), + DS(LVT_THERMAL), + MASK(LVT_THERMAL)); kprintf("LVT_LINT0: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", - VEC(LVT_LINT0), - DM(LVT_LINT0), - TM(LVT_LINT0), - IP(LVT_LINT0), - DS(LVT_LINT0), - MASK(LVT_LINT0)); + VEC(LVT_LINT0), + DM(LVT_LINT0), + TM(LVT_LINT0), + IP(LVT_LINT0), + DS(LVT_LINT0), + MASK(LVT_LINT0)); kprintf("LVT_LINT1: Vector 0x%02x [%s][%s][%s] %s %cmasked\n", - VEC(LVT_LINT1), - DM(LVT_LINT1), - TM(LVT_LINT1), - IP(LVT_LINT1), - DS(LVT_LINT1), - MASK(LVT_LINT1)); + VEC(LVT_LINT1), + DM(LVT_LINT1), + TM(LVT_LINT1), + IP(LVT_LINT1), + DS(LVT_LINT1), + MASK(LVT_LINT1)); kprintf("LVT_ERROR: Vector 0x%02x %s %cmasked\n", - VEC(LVT_ERROR), - DS(LVT_ERROR), - MASK(LVT_ERROR)); + VEC(LVT_ERROR), + DS(LVT_ERROR), + MASK(LVT_ERROR)); kprintf("ESR: %08x \n", lapic_esr_read()); kprintf(" "); - for(i=0xf; i>=0; i--) - kprintf("%x%x%x%x",i,i,i,i); + for (i = 0xf; i >= 0; i--) { + kprintf("%x%x%x%x", i, i, i, i); + } kprintf("\n"); kprintf("TMR: 0x"); - for(i=7; i>=0; i--) - kprintf("%08x",LAPIC_READ_OFFSET(TMR_BASE, i)); + for (i = 7; i >= 0; i--) { + kprintf("%08x", LAPIC_READ_OFFSET(TMR_BASE, i)); + } kprintf("\n"); kprintf("IRR: 0x"); - for(i=7; i>=0; i--) - kprintf("%08x",LAPIC_READ_OFFSET(IRR_BASE, i)); + for (i = 7; i >= 0; i--) { + kprintf("%08x", LAPIC_READ_OFFSET(IRR_BASE, i)); + } kprintf("\n"); kprintf("ISR: 0x"); - for(i=7; i >= 0; i--) - kprintf("%08x",LAPIC_READ_OFFSET(ISR_BASE, i)); + for (i = 7; i >= 0; i--) { + kprintf("%08x", LAPIC_READ_OFFSET(ISR_BASE, i)); + } kprintf("\n"); } boolean_t lapic_probe(void) { - uint32_t lo; - uint32_t hi; + uint32_t lo; + uint32_t hi; - if (cpuid_features() & CPUID_FEATURE_APIC) + if (cpuid_features() & CPUID_FEATURE_APIC) { return TRUE; + } if (cpuid_family() == 6 || cpuid_family() == 15) { /* @@ -469,13 +480,14 @@ lapic_shutdown(void) uint32_t value; /* Shutdown if local APIC was enabled by OS */ - if (lapic_os_enabled == FALSE) + if (lapic_os_enabled == FALSE) { return; + } mp_disable_preemption(); /* ExtINT: masked */ - if (get_cpu_number() == master_cpu) { + if (get_cpu_number() <= lapic_max_interrupt_cpunum) { value = LAPIC_READ(LVT_LINT0); value |= LAPIC_LVT_MASKED; LAPIC_WRITE(LVT_LINT0, value); @@ -502,10 +514,16 @@ lapic_shutdown(void) mp_enable_preemption(); } +boolean_t +cpu_can_exit(int cpu) +{ + return cpu > lapic_max_interrupt_cpunum; +} + void lapic_configure(void) { - int value; + int value; if (lapic_error_time_threshold == 0 && cpu_number() == 0) { nanoseconds_to_absolutetime(NSEC_PER_SEC >> 2, &lapic_error_time_threshold); @@ -514,13 +532,19 @@ lapic_configure(void) } } + if (cpu_number() == 0) { + if (!PE_parse_boot_argn("intcpumax", &lapic_max_interrupt_cpunum, sizeof(lapic_max_interrupt_cpunum))) { + lapic_max_interrupt_cpunum = ((cpuid_features() & CPUID_FEATURE_HTT) ? 1 : 0); + } + } + /* Accept all */ LAPIC_WRITE(TPR, 0); LAPIC_WRITE(SVR, LAPIC_VECTOR(SPURIOUS) | LAPIC_SVR_ENABLE); /* ExtINT */ - if (get_cpu_number() == master_cpu) { + if (get_cpu_number() <= lapic_max_interrupt_cpunum) { value = LAPIC_READ(LVT_LINT0); value &= ~LAPIC_LVT_MASKED; value |= LAPIC_LVT_DM_EXTINT; @@ -538,12 +562,13 @@ lapic_configure(void) #if CONFIG_MCA /* CMCI, if available */ - if (mca_is_cmci_present()) + if (mca_is_cmci_present()) { LAPIC_WRITE(LVT_CMCI, LAPIC_VECTOR(CMCI)); + } #endif if (((cpu_number() == master_cpu) && lapic_errors_masked == FALSE) || - (cpu_number() != master_cpu)) { + (cpu_number() != master_cpu)) { lapic_esr_clear(); LAPIC_WRITE(LVT_ERROR, LAPIC_VECTOR(ERROR)); } @@ -551,16 +576,16 @@ lapic_configure(void) void lapic_set_timer( - boolean_t interrupt_unmasked, - lapic_timer_mode_t mode, - lapic_timer_divide_t divisor, - lapic_timer_count_t initial_count) + boolean_t interrupt_unmasked, + lapic_timer_mode_t mode, + lapic_timer_divide_t divisor, + lapic_timer_count_t initial_count) { - uint32_t timer_vector; + uint32_t timer_vector; mp_disable_preemption(); timer_vector = LAPIC_READ(LVT_TIMER); - timer_vector &= ~(LAPIC_LVT_MASKED|LAPIC_LVT_PERIODIC);; + timer_vector &= ~(LAPIC_LVT_MASKED | LAPIC_LVT_PERIODIC);; timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED; timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0; LAPIC_WRITE(LVT_TIMER, timer_vector); @@ -571,17 +596,17 @@ lapic_set_timer( void lapic_config_timer( - boolean_t interrupt_unmasked, - lapic_timer_mode_t mode, - lapic_timer_divide_t divisor) + boolean_t interrupt_unmasked, + lapic_timer_mode_t mode, + lapic_timer_divide_t divisor) { - uint32_t timer_vector; + uint32_t timer_vector; mp_disable_preemption(); timer_vector = LAPIC_READ(LVT_TIMER); timer_vector &= ~(LAPIC_LVT_MASKED | - LAPIC_LVT_PERIODIC | - LAPIC_LVT_TSC_DEADLINE); + LAPIC_LVT_PERIODIC | + LAPIC_LVT_TSC_DEADLINE); timer_vector |= interrupt_unmasked ? 0 : LAPIC_LVT_MASKED; timer_vector |= (mode == periodic) ? LAPIC_LVT_PERIODIC : 0; LAPIC_WRITE(LVT_TIMER, timer_vector); @@ -595,19 +620,19 @@ lapic_config_timer( void lapic_config_tsc_deadline_timer(void) { - uint32_t timer_vector; + uint32_t timer_vector; DBG("lapic_config_tsc_deadline_timer()\n"); mp_disable_preemption(); timer_vector = LAPIC_READ(LVT_TIMER); timer_vector &= ~(LAPIC_LVT_MASKED | - LAPIC_LVT_PERIODIC); + LAPIC_LVT_PERIODIC); timer_vector |= LAPIC_LVT_TSC_DEADLINE; LAPIC_WRITE(LVT_TIMER, timer_vector); /* Serialize writes per Intel OSWG */ do { - lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL<<32)); + lapic_set_tsc_deadline_timer(rdtsc64() + (1ULL << 32)); } while (lapic_get_tsc_deadline_timer() == 0); lapic_set_tsc_deadline_timer(0); @@ -617,7 +642,7 @@ lapic_config_tsc_deadline_timer(void) void lapic_set_timer_fast( - lapic_timer_count_t initial_count) + lapic_timer_count_t initial_count) { LAPIC_WRITE(LVT_TIMER, LAPIC_READ(LVT_TIMER) & ~LAPIC_LVT_MASKED); LAPIC_WRITE(TIMER_INITIAL_COUNT, initial_count); @@ -638,23 +663,27 @@ lapic_get_tsc_deadline_timer(void) void lapic_get_timer( - lapic_timer_mode_t *mode, - lapic_timer_divide_t *divisor, - lapic_timer_count_t *initial_count, - lapic_timer_count_t *current_count) + lapic_timer_mode_t *mode, + lapic_timer_divide_t *divisor, + lapic_timer_count_t *initial_count, + lapic_timer_count_t *current_count) { mp_disable_preemption(); - if (mode) + if (mode) { *mode = (LAPIC_READ(LVT_TIMER) & LAPIC_LVT_PERIODIC) ? - periodic : one_shot; - if (divisor) + periodic : one_shot; + } + if (divisor) { *divisor = LAPIC_READ(TIMER_DIVIDE_CONFIG) & LAPIC_TIMER_DIVIDE_MASK; - if (initial_count) + } + if (initial_count) { *initial_count = LAPIC_READ(TIMER_INITIAL_COUNT); - if (current_count) + } + if (current_count) { *current_count = LAPIC_READ(TIMER_CURRENT_COUNT); + } mp_enable_preemption(); -} +} static inline void _lapic_end_of_interrupt(void) @@ -668,11 +697,15 @@ lapic_end_of_interrupt(void) _lapic_end_of_interrupt(); } -void lapic_unmask_perfcnt_interrupt(void) { +void +lapic_unmask_perfcnt_interrupt(void) +{ LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT)); } -void lapic_set_perfcnt_interrupt_mask(boolean_t mask) { +void +lapic_set_perfcnt_interrupt_mask(boolean_t mask) +{ uint32_t m = (mask ? LAPIC_LVT_MASKED : 0); LAPIC_WRITE(LVT_PERFCNT, LAPIC_VECTOR(PERFCNT) | m); } @@ -680,8 +713,9 @@ void lapic_set_perfcnt_interrupt_mask(boolean_t mask) { void lapic_set_intr_func(int vector, i386_intr_func_t func) { - if (vector > lapic_interrupt_base) + if (vector > lapic_interrupt_base) { vector -= lapic_interrupt_base; + } switch (vector) { case LAPIC_NMI_INTERRUPT: @@ -695,19 +729,21 @@ lapic_set_intr_func(int vector, i386_intr_func_t func) break; default: panic("lapic_set_intr_func(%d,%p) invalid vector\n", - vector, func); + vector, func); } } -void lapic_set_pmi_func(i386_intr_func_t func) { +void +lapic_set_pmi_func(i386_intr_func_t func) +{ lapic_set_intr_func(LAPIC_VECTOR(PERFCNT), func); } int lapic_interrupt(int interrupt_num, x86_saved_state_t *state) { - int retval = 0; - int esr = -1; + int retval = 0; + int esr = -1; interrupt_num -= lapic_interrupt_base; if (interrupt_num < 0) { @@ -715,18 +751,19 @@ lapic_interrupt(int interrupt_num, x86_saved_state_t *state) lapic_intr_func[LAPIC_NMI_INTERRUPT] != NULL) { retval = (*lapic_intr_func[LAPIC_NMI_INTERRUPT])(state); return retval; - } - else + } else { return 0; + } } - switch(interrupt_num) { + switch (interrupt_num) { case LAPIC_TIMER_INTERRUPT: case LAPIC_THERMAL_INTERRUPT: case LAPIC_INTERPROCESSOR_INTERRUPT: case LAPIC_PM_INTERRUPT: - if (lapic_intr_func[interrupt_num] != NULL) + if (lapic_intr_func[interrupt_num] != NULL) { (void) (*lapic_intr_func[interrupt_num])(state); + } _lapic_end_of_interrupt(); retval = 1; break; @@ -745,8 +782,9 @@ lapic_interrupt(int interrupt_num, x86_saved_state_t *state) } break; case LAPIC_CMCI_INTERRUPT: - if (lapic_intr_func[interrupt_num] != NULL) + if (lapic_intr_func[interrupt_num] != NULL) { (void) (*lapic_intr_func[interrupt_num])(state); + } /* return 0 for plaform expert to handle */ break; case LAPIC_ERROR_INTERRUPT: @@ -772,7 +810,7 @@ lapic_interrupt(int interrupt_num, x86_saved_state_t *state) lapic_dump(); if ((debug_boot_arg && (lapic_dont_panic == FALSE)) || - cpu_number() != master_cpu) { + cpu_number() != master_cpu) { panic("Local APIC error, ESR: %d\n", esr); } @@ -784,8 +822,7 @@ lapic_interrupt(int interrupt_num, x86_saved_state_t *state) LAPIC_WRITE(LVT_ERROR, LAPIC_READ(LVT_ERROR) | LAPIC_LVT_MASKED); printf("Local APIC: errors masked\n"); } - } - else { + } else { lapic_last_master_error = abstime; lapic_master_error_count = 0; } @@ -800,10 +837,10 @@ lapic_interrupt(int interrupt_num, x86_saved_state_t *state) /* No EOI required here */ retval = 1; break; - case LAPIC_PMC_SW_INTERRUPT: - { - } - break; + case LAPIC_PMC_SW_INTERRUPT: + { + } + break; case LAPIC_KICK_INTERRUPT: _lapic_end_of_interrupt(); retval = 1; @@ -818,18 +855,19 @@ lapic_smm_restore(void) { boolean_t state; - if (lapic_os_enabled == FALSE) + if (lapic_os_enabled == FALSE) { return; + } state = ml_set_interrupts_enabled(FALSE); - if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) { + if (LAPIC_ISR_IS_SET(LAPIC_REDUCED_INTERRUPT_BASE, TIMER)) { /* * Bogus SMI handler enables interrupts but does not know about * local APIC interrupt sources. When APIC timer counts down to * zero while in SMM, local APIC will end up waiting for an EOI * but no interrupt was delivered to the OS. - */ + */ _lapic_end_of_interrupt(); /* @@ -849,10 +887,11 @@ lapic_smm_restore(void) void lapic_send_ipi(int cpu, int vector) { - boolean_t state; + boolean_t state; - if (vector < lapic_interrupt_base) + if (vector < lapic_interrupt_base) { vector += lapic_interrupt_base; + } state = ml_set_interrupts_enabled(FALSE); @@ -873,24 +912,25 @@ lapic_send_ipi(int cpu, int vector) boolean_t lapic_is_interrupt_pending(void) { - int i; + int i; for (i = 0; i < 8; i += 1) { if ((LAPIC_READ_OFFSET(IRR_BASE, i) != 0) || - (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) - return (TRUE); + (LAPIC_READ_OFFSET(ISR_BASE, i) != 0)) { + return TRUE; + } } - return (FALSE); + return FALSE; } boolean_t lapic_is_interrupting(uint8_t vector) { - int i; - int bit; - uint32_t irr; - uint32_t isr; + int i; + int bit; + uint32_t irr; + uint32_t isr; i = vector / 32; bit = 1 << (vector % 32); @@ -898,35 +938,39 @@ lapic_is_interrupting(uint8_t vector) irr = LAPIC_READ_OFFSET(IRR_BASE, i); isr = LAPIC_READ_OFFSET(ISR_BASE, i); - if ((irr | isr) & bit) - return (TRUE); + if ((irr | isr) & bit) { + return TRUE; + } - return (FALSE); + return FALSE; } void lapic_interrupt_counts(uint64_t intrs[256]) { - int i; - int j; - int bit; - uint32_t irr; - uint32_t isr; + int i; + int j; + int bit; + uint32_t irr; + uint32_t isr; - if (intrs == NULL) + if (intrs == NULL) { return; + } for (i = 0; i < 8; i += 1) { irr = LAPIC_READ_OFFSET(IRR_BASE, i); isr = LAPIC_READ_OFFSET(ISR_BASE, i); - if ((isr | irr) == 0) + if ((isr | irr) == 0) { continue; + } for (j = (i == 0) ? 16 : 0; j < 32; j += 1) { bit = (32 * i) + j; - if ((isr | irr) & (1 << j)) + if ((isr | irr) & (1 << j)) { intrs[bit] += 1; + } } } } @@ -934,13 +978,13 @@ lapic_interrupt_counts(uint64_t intrs[256]) void lapic_disable_timer(void) { - uint32_t lvt_timer; + uint32_t lvt_timer; /* - * If we're in deadline timer mode, + * If we're in deadline timer mode, * simply clear the deadline timer, otherwise * mask the timer interrupt and clear the countdown. - */ + */ lvt_timer = LAPIC_READ(LVT_TIMER); if (lvt_timer & LAPIC_LVT_TSC_DEADLINE) { wrmsr64(MSR_IA32_TSC_DEADLINE, 0); @@ -955,11 +999,12 @@ lapic_disable_timer(void) uint8_t lapic_get_cmci_vector(void) { - uint8_t cmci_vector = 0; + uint8_t cmci_vector = 0; #if CONFIG_MCA /* CMCI, if available */ - if (mca_is_cmci_present()) + if (mca_is_cmci_present()) { cmci_vector = LAPIC_VECTOR(CMCI); + } #endif return cmci_vector; } diff --git a/osfmk/i386/ldt.c b/osfmk/i386/ldt.c index 6e1a1e115..53bfe023e 100644 --- a/osfmk/i386/ldt.c +++ b/osfmk/i386/ldt.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,15 +62,15 @@ */ #include -struct real_descriptor master_ldt[LDTSZ] __attribute__ ((aligned (4096))) - __attribute__ ((section ("__HIB, __desc"))) - = { - [SEL_TO_INDEX(USER_CTHREAD)] = MAKE_REAL_DESCRIPTOR( /* user cthread segment */ +struct real_descriptor master_ldt[LDTSZ] __attribute__ ((aligned(4096))) +__attribute__ ((section("__HIB, __desc"))) + = { + [SEL_TO_INDEX(USER_CTHREAD)] = MAKE_REAL_DESCRIPTOR( /* user cthread segment */ 0, 0xfffff, - SZ_32|SZ_G, - ACC_P|ACC_PL_U|ACC_DATA_W - ), -}; + SZ_32 | SZ_G, + ACC_P | ACC_PL_U | ACC_DATA_W + ), + }; unsigned mldtsz = sizeof(master_ldt); diff --git a/osfmk/i386/lock.h b/osfmk/i386/lock.h index 935742195..bdd42a5af 100644 --- a/osfmk/i386/lock.h +++ b/osfmk/i386/lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,28 +32,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,13 +64,13 @@ /* * Machine-dependent simple locks for the i386. */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _I386_LOCK_H_ -#define _I386_LOCK_H_ +#ifndef _I386_LOCK_H_ +#define _I386_LOCK_H_ #warning This header is deprecated. Use instead. -#endif /* _I386_LOCK_H_ */ +#endif /* _I386_LOCK_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/locks.h b/osfmk/i386/locks.h index 9bdd394cf..21e74d712 100644 --- a/osfmk/i386/locks.h +++ b/osfmk/i386/locks.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,99 +22,100 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _I386_LOCKS_H_ -#define _I386_LOCKS_H_ +#ifndef _I386_LOCKS_H_ +#define _I386_LOCKS_H_ #include #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include -extern unsigned int LcksOpts; +extern unsigned int LcksOpts; #if DEVELOPMENT || DEBUG -extern unsigned int LckDisablePreemptCheck; +extern unsigned int LckDisablePreemptCheck; #endif -#define enaLkDeb 0x00000001 /* Request debug in default attribute */ -#define enaLkStat 0x00000002 /* Request statistic in default attribute */ -#define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */ +#define enaLkDeb 0x00000001 /* Request debug in default attribute */ +#define enaLkStat 0x00000002 /* Request statistic in default attribute */ +#define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */ +#define enaLkTimeStat 0x00000008 /* Request time statistics in default attribute */ #endif /* MACH_KERNEL_PRIVATE */ -#if defined(MACH_KERNEL_PRIVATE) +#if defined(MACH_KERNEL_PRIVATE) typedef struct { - volatile uintptr_t interlock; -#if MACH_LDEBUG - unsigned long lck_spin_pad[9]; /* XXX - usimple_lock_data_t */ + volatile uintptr_t interlock; +#if MACH_LDEBUG + unsigned long lck_spin_pad[9]; /* XXX - usimple_lock_data_t */ #endif } lck_spin_t; -#define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */ +#define LCK_SPIN_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */ #else /* MACH_KERNEL_PRIVATE */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE typedef struct { unsigned long opaque[10]; } lck_spin_t; #else /* KERNEL_PRIVATE */ -typedef struct __lck_spin_t__ lck_spin_t; +typedef struct __lck_spin_t__ lck_spin_t; #endif #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE /* The definition of this structure, including the layout of the * state bitfield, is tailored to the asm implementation in i386_lock.s */ typedef struct _lck_mtx_ { union { struct { - volatile uintptr_t lck_mtx_owner; + volatile uintptr_t lck_mtx_owner; union { struct { volatile uint32_t - lck_mtx_waiters:16, - lck_mtx_pri:8, - lck_mtx_ilocked:1, - lck_mtx_mlocked:1, - lck_mtx_promoted:1, - lck_mtx_spin:1, - lck_mtx_is_ext:1, - lck_mtx_pad3:3; + lck_mtx_waiters:16, + lck_mtx_pri:8, + lck_mtx_ilocked:1, + lck_mtx_mlocked:1, + lck_mtx_promoted:1, + lck_mtx_spin:1, + lck_mtx_is_ext:1, + lck_mtx_pad3:3; }; - uint32_t lck_mtx_state; + uint32_t lck_mtx_state; }; /* Pad field used as a canary, initialized to ~0 */ - uint32_t lck_mtx_pad32; + uint32_t lck_mtx_pad32; }; struct { - struct _lck_mtx_ext_ *lck_mtx_ptr; - uint32_t lck_mtx_tag; - uint32_t lck_mtx_pad32_2; + struct _lck_mtx_ext_ *lck_mtx_ptr; + uint32_t lck_mtx_tag; + uint32_t lck_mtx_pad32_2; }; }; } lck_mtx_t; -#define LCK_MTX_WAITERS_MSK 0x0000ffff -#define LCK_MTX_WAITER 0x00000001 -#define LCK_MTX_PRIORITY_MSK 0x00ff0000 -#define LCK_MTX_ILOCKED_MSK 0x01000000 -#define LCK_MTX_MLOCKED_MSK 0x02000000 -#define LCK_MTX_PROMOTED_MSK 0x04000000 -#define LCK_MTX_SPIN_MSK 0x08000000 +#define LCK_MTX_WAITERS_MSK 0x0000ffff +#define LCK_MTX_WAITER 0x00000001 +#define LCK_MTX_PRIORITY_MSK 0x00ff0000 +#define LCK_MTX_ILOCKED_MSK 0x01000000 +#define LCK_MTX_MLOCKED_MSK 0x02000000 +#define LCK_MTX_PROMOTED_MSK 0x04000000 +#define LCK_MTX_SPIN_MSK 0x08000000 /* This pattern must subsume the interlocked, mlocked and spin bits */ -#define LCK_MTX_TAG_INDIRECT 0x07ff1007 /* lock marked as Indirect */ -#define LCK_MTX_TAG_DESTROYED 0x07fe2007 /* lock marked as Destroyed */ +#define LCK_MTX_TAG_INDIRECT 0x07ff1007 /* lock marked as Indirect */ +#define LCK_MTX_TAG_DESTROYED 0x07fe2007 /* lock marked as Destroyed */ /* Adaptive spin before blocking */ -extern uint64_t MutexSpin; +extern uint64_t MutexSpin; typedef enum lck_mtx_spinwait_ret_type { LCK_MTX_SPINWAIT_ACQUIRED = 0, @@ -122,137 +123,137 @@ typedef enum lck_mtx_spinwait_ret_type { LCK_MTX_SPINWAIT_NO_SPIN = 2, } lck_mtx_spinwait_ret_type_t; -extern lck_mtx_spinwait_ret_type_t lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex); -extern void lck_mtx_lock_wait_x86(lck_mtx_t *mutex); -extern void lck_mtx_lock_acquire_x86(lck_mtx_t *mutex); +extern lck_mtx_spinwait_ret_type_t lck_mtx_lock_spinwait_x86(lck_mtx_t *mutex); +extern void lck_mtx_lock_wait_x86(lck_mtx_t *mutex); +extern void lck_mtx_lock_acquire_x86(lck_mtx_t *mutex); -extern void lck_mtx_lock_slow(lck_mtx_t *lock); -extern boolean_t lck_mtx_try_lock_slow(lck_mtx_t *lock); -extern void lck_mtx_unlock_slow(lck_mtx_t *lock); -extern void lck_mtx_lock_spin_slow(lck_mtx_t *lock); -extern boolean_t lck_mtx_try_lock_spin_slow(lck_mtx_t *lock); -extern void hw_lock_byte_init(volatile uint8_t *lock_byte); -extern void hw_lock_byte_lock(volatile uint8_t *lock_byte); -extern void hw_lock_byte_unlock(volatile uint8_t *lock_byte); +extern void lck_mtx_lock_slow(lck_mtx_t *lock); +extern boolean_t lck_mtx_try_lock_slow(lck_mtx_t *lock); +extern void lck_mtx_unlock_slow(lck_mtx_t *lock); +extern void lck_mtx_lock_spin_slow(lck_mtx_t *lock); +extern boolean_t lck_mtx_try_lock_spin_slow(lck_mtx_t *lock); +extern void hw_lock_byte_init(volatile uint8_t *lock_byte); +extern void hw_lock_byte_lock(volatile uint8_t *lock_byte); +extern void hw_lock_byte_unlock(volatile uint8_t *lock_byte); typedef struct { - unsigned int type; - unsigned int pad4; - vm_offset_t pc; - vm_offset_t thread; + unsigned int type; + unsigned int pad4; + vm_offset_t pc; + vm_offset_t thread; } lck_mtx_deb_t; #define MUTEX_TAG 0x4d4d typedef struct { - unsigned int lck_mtx_stat_data; + unsigned int lck_mtx_stat_data; } lck_mtx_stat_t; typedef struct _lck_mtx_ext_ { - lck_mtx_t lck_mtx; - struct _lck_grp_ *lck_mtx_grp; - unsigned int lck_mtx_attr; - unsigned int lck_mtx_pad1; - lck_mtx_deb_t lck_mtx_deb; - uint64_t lck_mtx_stat; - unsigned int lck_mtx_pad2[2]; + lck_mtx_t lck_mtx; + struct _lck_grp_ *lck_mtx_grp; + unsigned int lck_mtx_attr; + unsigned int lck_mtx_pad1; + lck_mtx_deb_t lck_mtx_deb; + uint64_t lck_mtx_stat; + unsigned int lck_mtx_pad2[2]; } lck_mtx_ext_t; -#define LCK_MTX_ATTR_DEBUG 0x1 -#define LCK_MTX_ATTR_DEBUGb 0 -#define LCK_MTX_ATTR_STAT 0x2 -#define LCK_MTX_ATTR_STATb 1 +#define LCK_MTX_ATTR_DEBUG 0x1 +#define LCK_MTX_ATTR_DEBUGb 0 +#define LCK_MTX_ATTR_STAT 0x2 +#define LCK_MTX_ATTR_STATb 1 #define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+(sizeof(lck_mtx_t)-1)/sizeof(unsigned int))) #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int)))) #else /* MACH_KERNEL_PRIVATE */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE typedef struct { - unsigned long opaque[2]; + unsigned long opaque[2]; } lck_mtx_t; typedef struct { - unsigned long opaque[10]; + unsigned long opaque[10]; } lck_mtx_ext_t; #else -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE typedef struct { - unsigned long opaque[2]; + unsigned long opaque[2]; } lck_mtx_t; typedef struct { - unsigned long opaque[10]; + unsigned long opaque[10]; } lck_mtx_ext_t; #else -typedef struct __lck_mtx_t__ lck_mtx_t; -typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t; +typedef struct __lck_mtx_t__ lck_mtx_t; +typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t; #endif #endif #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE typedef union _lck_rw_t_internal_ { struct { - volatile uint16_t lck_rw_shared_count; /* No. of accepted readers */ - volatile uint8_t lck_rw_interlock; /* Interlock byte */ + volatile uint16_t lck_rw_shared_count; /* No. of accepted readers */ + volatile uint8_t lck_rw_interlock; /* Interlock byte */ volatile uint8_t - lck_rw_priv_excl:1, /* Writers prioritized if set */ - lck_rw_want_upgrade:1, /* Read-to-write upgrade waiting */ - lck_rw_want_write:1, /* Writer waiting or locked for write */ - lck_r_waiting:1, /* Reader is sleeping on lock */ - lck_w_waiting:1, /* Writer is sleeping on lock */ - lck_rw_can_sleep:1, /* Can attempts to lock go to sleep? */ - lck_rw_padb6:2; /* padding */ - uint32_t lck_rw_tag; /* This can be obsoleted when stats are in */ - thread_t lck_rw_owner; /* Unused */ + lck_rw_priv_excl:1, /* Writers prioritized if set */ + lck_rw_want_upgrade:1, /* Read-to-write upgrade waiting */ + lck_rw_want_write:1, /* Writer waiting or locked for write */ + lck_r_waiting:1, /* Reader is sleeping on lock */ + lck_w_waiting:1, /* Writer is sleeping on lock */ + lck_rw_can_sleep:1, /* Can attempts to lock go to sleep? */ + lck_rw_padb6:2; /* padding */ + uint32_t lck_rw_tag; /* This can be obsoleted when stats are in */ + thread_t lck_rw_owner; /* Unused */ }; struct { - uint32_t data; /* Single word for count, ilk, and bitfields */ - uint32_t lck_rw_pad4; - uint32_t lck_rw_pad8; - uint32_t lck_rw_pad12; + uint32_t data; /* Single word for count, ilk, and bitfields */ + uint32_t lck_rw_pad4; + uint32_t lck_rw_pad8; + uint32_t lck_rw_pad12; }; } lck_rw_t; -#define LCK_RW_T_SIZE 16 +#define LCK_RW_T_SIZE 16 static_assert(sizeof(lck_rw_t) == LCK_RW_T_SIZE); -#define LCK_RW_SHARED_SHIFT 0 -#define LCK_RW_INTERLOCK_BIT 16 -#define LCK_RW_PRIV_EXCL_BIT 24 -#define LCK_RW_WANT_UPGRADE_BIT 25 -#define LCK_RW_WANT_EXCL_BIT 26 -#define LCK_RW_R_WAITING_BIT 27 -#define LCK_RW_W_WAITING_BIT 28 -#define LCK_RW_CAN_SLEEP_BIT 29 - -#define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT) -#define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT) -#define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT) -#define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT) -#define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT) -#define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT) -#define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT) -#define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_SHIFT) -#define LCK_RW_SHARED_READER (1 << LCK_RW_SHARED_SHIFT) - -#define LCK_RW_WANT_WRITE LCK_RW_WANT_EXCL - - -#define LCK_RW_ATTR_DEBUG 0x1 -#define LCK_RW_ATTR_DEBUGb 0 -#define LCK_RW_ATTR_STAT 0x2 -#define LCK_RW_ATTR_STATb 1 -#define LCK_RW_ATTR_READ_PRI 0x3 -#define LCK_RW_ATTR_READ_PRIb 2 -#define LCK_RW_ATTR_DIS_THREAD 0x40000000 -#define LCK_RW_ATTR_DIS_THREADb 30 -#define LCK_RW_ATTR_DIS_MYLOCK 0x10000000 -#define LCK_RW_ATTR_DIS_MYLOCKb 28 - -#define LCK_RW_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */ +#define LCK_RW_SHARED_SHIFT 0 +#define LCK_RW_INTERLOCK_BIT 16 +#define LCK_RW_PRIV_EXCL_BIT 24 +#define LCK_RW_WANT_UPGRADE_BIT 25 +#define LCK_RW_WANT_EXCL_BIT 26 +#define LCK_RW_R_WAITING_BIT 27 +#define LCK_RW_W_WAITING_BIT 28 +#define LCK_RW_CAN_SLEEP_BIT 29 + +#define LCK_RW_INTERLOCK (1 << LCK_RW_INTERLOCK_BIT) +#define LCK_RW_WANT_UPGRADE (1 << LCK_RW_WANT_UPGRADE_BIT) +#define LCK_RW_WANT_EXCL (1 << LCK_RW_WANT_EXCL_BIT) +#define LCK_RW_R_WAITING (1 << LCK_RW_R_WAITING_BIT) +#define LCK_RW_W_WAITING (1 << LCK_RW_W_WAITING_BIT) +#define LCK_RW_PRIV_EXCL (1 << LCK_RW_PRIV_EXCL_BIT) +#define LCK_RW_TAG_VALID (1 << LCK_RW_TAG_VALID_BIT) +#define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_SHIFT) +#define LCK_RW_SHARED_READER (1 << LCK_RW_SHARED_SHIFT) + +#define LCK_RW_WANT_WRITE LCK_RW_WANT_EXCL + + +#define LCK_RW_ATTR_DEBUG 0x1 +#define LCK_RW_ATTR_DEBUGb 0 +#define LCK_RW_ATTR_STAT 0x2 +#define LCK_RW_ATTR_STATb 1 +#define LCK_RW_ATTR_READ_PRI 0x3 +#define LCK_RW_ATTR_READ_PRIb 2 +#define LCK_RW_ATTR_DIS_THREAD 0x40000000 +#define LCK_RW_ATTR_DIS_THREADb 30 +#define LCK_RW_ATTR_DIS_MYLOCK 0x10000000 +#define LCK_RW_ATTR_DIS_MYLOCKb 28 + +#define LCK_RW_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */ #define RW_LOCK_READER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_tag)))) #define RW_LOCK_WRITER_EVENT(x) ((event_t) (((unsigned char*) (x)) + (offsetof(lck_rw_t, lck_rw_pad8)))) @@ -264,33 +265,35 @@ static_assert(sizeof(lck_rw_t) == LCK_RW_T_SIZE); #define disable_preemption_for_thread(t) ((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level++ #define preemption_disabled_for_thread(t) (((cpu_data_t GS_RELATIVE *)0UL)->cpu_preemption_level > 0) -#define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t) -#define PLATFORM_LCK_ILOCK 0 +#define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t) +#define PLATFORM_LCK_ILOCK 0 -#define LOCK_SNOOP_SPINS 1000 -#define LOCK_PRETEST 1 +#define LOCK_SNOOP_SPINS 1000 +#define LOCK_PRETEST 1 -/* Spinlock panic deadline, in mach_absolute_time units (ns on i386) */ -#define LOCK_PANIC_TIMEOUT 0xf00000 /* 250 ms (huge) */ +/* hw_lock_lock static panic deadline, in timebase units. hw_lock_to() uses + * LockTimeoutTSC computed at startup + */ +#define LOCK_PANIC_TIMEOUT 0xf000000 /* 251e6 TSC ticks */ -#endif // LOCK_PRIVATE +#endif // LOCK_PRIVATE #else -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #pragma pack(1) typedef struct { - uint32_t opaque[3]; - uint32_t opaque4; + uint32_t opaque[3]; + uint32_t opaque4; } lck_rw_t; #pragma pack() #else -typedef struct __lck_rw_t__ lck_rw_t; +typedef struct __lck_rw_t__ lck_rw_t; #endif #endif #ifdef MACH_KERNEL_PRIVATE -extern void kernel_preempt_check (void); +extern void kernel_preempt_check(void); #endif /* MACH_KERNEL_PRIVATE */ -#endif /* _I386_LOCKS_H_ */ +#endif /* _I386_LOCKS_H_ */ diff --git a/osfmk/i386/locks_i386.c b/osfmk/i386/locks_i386.c index bc1669f7f..5f693ff51 100644 --- a/osfmk/i386/locks_i386.c +++ b/osfmk/i386/locks_i386.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -66,6 +66,7 @@ #include +#include #include #include #include @@ -86,41 +87,32 @@ #include #include -/* - * We need only enough declarations from the BSD-side to be able to - * test if our probe is active, and to call __dtrace_probe(). Setting - * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in. - */ -#if CONFIG_DTRACE -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> - -#define DTRACE_RW_SHARED 0x0 //reader -#define DTRACE_RW_EXCL 0x1 //writer -#define DTRACE_NO_FLAG 0x0 //not applicable - -#endif +#if CONFIG_DTRACE +#define DTRACE_RW_SHARED 0x0 //reader +#define DTRACE_RW_EXCL 0x1 //writer +#define DTRACE_NO_FLAG 0x0 //not applicable +#endif /* CONFIG_DTRACE */ -#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100 -#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101 -#define LCK_RW_LCK_SHARED_CODE 0x102 -#define LCK_RW_LCK_SH_TO_EX_CODE 0x103 -#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104 -#define LCK_RW_LCK_EX_TO_SH_CODE 0x105 +#define LCK_RW_LCK_EXCLUSIVE_CODE 0x100 +#define LCK_RW_LCK_EXCLUSIVE1_CODE 0x101 +#define LCK_RW_LCK_SHARED_CODE 0x102 +#define LCK_RW_LCK_SH_TO_EX_CODE 0x103 +#define LCK_RW_LCK_SH_TO_EX1_CODE 0x104 +#define LCK_RW_LCK_EX_TO_SH_CODE 0x105 -#define LCK_RW_LCK_EX_WRITER_SPIN_CODE 0x106 -#define LCK_RW_LCK_EX_WRITER_WAIT_CODE 0x107 -#define LCK_RW_LCK_EX_READER_SPIN_CODE 0x108 -#define LCK_RW_LCK_EX_READER_WAIT_CODE 0x109 -#define LCK_RW_LCK_SHARED_SPIN_CODE 0x110 -#define LCK_RW_LCK_SHARED_WAIT_CODE 0x111 -#define LCK_RW_LCK_SH_TO_EX_SPIN_CODE 0x112 -#define LCK_RW_LCK_SH_TO_EX_WAIT_CODE 0x113 +#define LCK_RW_LCK_EX_WRITER_SPIN_CODE 0x106 +#define LCK_RW_LCK_EX_WRITER_WAIT_CODE 0x107 +#define LCK_RW_LCK_EX_READER_SPIN_CODE 0x108 +#define LCK_RW_LCK_EX_READER_WAIT_CODE 0x109 +#define LCK_RW_LCK_SHARED_SPIN_CODE 0x110 +#define LCK_RW_LCK_SHARED_WAIT_CODE 0x111 +#define LCK_RW_LCK_SH_TO_EX_SPIN_CODE 0x112 +#define LCK_RW_LCK_SH_TO_EX_WAIT_CODE 0x113 -#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) +#define ANY_LOCK_DEBUG (USLOCK_DEBUG || LOCK_DEBUG || MUTEX_DEBUG) -unsigned int LcksOpts=0; +unsigned int LcksOpts = 0; #if DEVELOPMENT || DEBUG unsigned int LckDisablePreemptCheck = 0; @@ -128,15 +120,15 @@ unsigned int LckDisablePreemptCheck = 0; /* Forwards */ -#if USLOCK_DEBUG +#if USLOCK_DEBUG /* * Perform simple lock checks. */ -int uslock_check = 1; -int max_lock_loops = 100000000; -decl_simple_lock_data(extern , printf_lock) -decl_simple_lock_data(extern , panic_lock) -#endif /* USLOCK_DEBUG */ +int uslock_check = 1; +int max_lock_loops = 100000000; +decl_simple_lock_data(extern, printf_lock) +decl_simple_lock_data(extern, panic_lock) +#endif /* USLOCK_DEBUG */ extern unsigned int not_in_kdp; @@ -145,23 +137,23 @@ extern unsigned int not_in_kdp; * of the various lock routines. However, this information * is only used for debugging and statistics. */ -typedef void *pc_t; -#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS) -#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS) -#if ANY_LOCK_DEBUG -#define OBTAIN_PC(pc) ((pc) = GET_RETURN_PC()) -#define DECL_PC(pc) pc_t pc; -#else /* ANY_LOCK_DEBUG */ +typedef void *pc_t; +#define INVALID_PC ((void *) VM_MAX_KERNEL_ADDRESS) +#define INVALID_THREAD ((void *) VM_MAX_KERNEL_ADDRESS) +#if ANY_LOCK_DEBUG +#define OBTAIN_PC(pc) ((pc) = GET_RETURN_PC()) +#define DECL_PC(pc) pc_t pc; +#else /* ANY_LOCK_DEBUG */ #define DECL_PC(pc) -#ifdef lint +#ifdef lint /* * Eliminate lint complaints about unused local pc variables. */ -#define OBTAIN_PC(pc) ++pc -#else /* lint */ -#define OBTAIN_PC(pc) -#endif /* lint */ -#endif /* USLOCK_DEBUG */ +#define OBTAIN_PC(pc) ++pc +#else /* lint */ +#define OBTAIN_PC(pc) +#endif /* lint */ +#endif /* USLOCK_DEBUG */ /* * atomic exchange API is a low level abstraction of the operations @@ -176,9 +168,9 @@ typedef void *pc_t; static uint32_t atomic_exchange_begin32(uint32_t *target, uint32_t *previous, enum memory_order ord) { - uint32_t val; + uint32_t val; - (void)ord; // Memory order not used + (void)ord; // Memory order not used val = __c11_atomic_load((_Atomic uint32_t *)target, memory_order_relaxed); *previous = val; return val; @@ -191,25 +183,29 @@ atomic_exchange_complete32(uint32_t *target, uint32_t previous, uint32_t newval, } static void -atomic_exchange_abort(void) { } +atomic_exchange_abort(void) +{ +} static boolean_t atomic_test_and_set32(uint32_t *target, uint32_t test_mask, uint32_t set_mask, enum memory_order ord, boolean_t wait) { - uint32_t value, prev; + uint32_t value, prev; - for ( ; ; ) { + for (;;) { value = atomic_exchange_begin32(target, &prev, ord); if (value & test_mask) { - if (wait) + if (wait) { cpu_pause(); - else + } else { atomic_exchange_abort(); + } return FALSE; } value |= set_mask; - if (atomic_exchange_complete32(target, prev, value, ord)) + if (atomic_exchange_complete32(target, prev, value, ord)) { return TRUE; + } } } @@ -217,18 +213,18 @@ atomic_test_and_set32(uint32_t *target, uint32_t test_mask, uint32_t set_mask, e * Portable lock package implementation of usimple_locks. */ -#if USLOCK_DEBUG -#define USLDBG(stmt) stmt -void usld_lock_init(usimple_lock_t, unsigned short); -void usld_lock_pre(usimple_lock_t, pc_t); -void usld_lock_post(usimple_lock_t, pc_t); -void usld_unlock(usimple_lock_t, pc_t); -void usld_lock_try_pre(usimple_lock_t, pc_t); -void usld_lock_try_post(usimple_lock_t, pc_t); -int usld_lock_common_checks(usimple_lock_t, char *); -#else /* USLOCK_DEBUG */ -#define USLDBG(stmt) -#endif /* USLOCK_DEBUG */ +#if USLOCK_DEBUG +#define USLDBG(stmt) stmt +void usld_lock_init(usimple_lock_t, unsigned short); +void usld_lock_pre(usimple_lock_t, pc_t); +void usld_lock_post(usimple_lock_t, pc_t); +void usld_unlock(usimple_lock_t, pc_t); +void usld_lock_try_pre(usimple_lock_t, pc_t); +void usld_lock_try_post(usimple_lock_t, pc_t); +int usld_lock_common_checks(usimple_lock_t, char *); +#else /* USLOCK_DEBUG */ +#define USLDBG(stmt) +#endif /* USLOCK_DEBUG */ /* * Forward definitions @@ -258,15 +254,16 @@ static boolean_t lck_mtx_try_lock_wait_interlock_to_clear(lck_mtx_t *lock, uint3 */ lck_spin_t * lck_spin_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr) + lck_grp_t *grp, + lck_attr_t *attr) { - lck_spin_t *lck; + lck_spin_t *lck; - if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0) + if ((lck = (lck_spin_t *)kalloc(sizeof(lck_spin_t))) != 0) { lck_spin_init(lck, grp, attr); + } - return(lck); + return lck; } /* @@ -274,8 +271,8 @@ lck_spin_alloc_init( */ void lck_spin_free( - lck_spin_t *lck, - lck_grp_t *grp) + lck_spin_t *lck, + lck_grp_t *grp) { lck_spin_destroy(lck, grp); kfree(lck, sizeof(lck_spin_t)); @@ -286,9 +283,9 @@ lck_spin_free( */ void lck_spin_init( - lck_spin_t *lck, - lck_grp_t *grp, - __unused lck_attr_t *attr) + lck_spin_t *lck, + lck_grp_t *grp, + __unused lck_attr_t *attr) { usimple_lock_init((usimple_lock_t) lck, 0); lck_grp_reference(grp); @@ -300,11 +297,12 @@ lck_spin_init( */ void lck_spin_destroy( - lck_spin_t *lck, - lck_grp_t *grp) + lck_spin_t *lck, + lck_grp_t *grp) { - if (lck->interlock == LCK_SPIN_TAG_DESTROYED) + if (lck->interlock == LCK_SPIN_TAG_DESTROYED) { return; + } lck->interlock = LCK_SPIN_TAG_DESTROYED; lck_grp_lckcnt_decr(grp, LCK_TYPE_SPIN); lck_grp_deallocate(grp); @@ -314,11 +312,20 @@ lck_spin_destroy( /* * Routine: lck_spin_lock */ +void +lck_spin_lock_grp( + lck_spin_t *lck, + lck_grp_t *grp) +{ +#pragma unused(grp) + usimple_lock((usimple_lock_t) lck, grp); +} + void lck_spin_lock( - lck_spin_t *lck) + lck_spin_t *lck) { - usimple_lock((usimple_lock_t) lck); + usimple_lock((usimple_lock_t) lck, NULL); } /* @@ -326,26 +333,41 @@ lck_spin_lock( */ void lck_spin_unlock( - lck_spin_t *lck) + lck_spin_t *lck) { usimple_unlock((usimple_lock_t) lck); } +boolean_t +lck_spin_try_lock_grp( + lck_spin_t *lck, + lck_grp_t *grp) +{ +#pragma unused(grp) + boolean_t lrval = (boolean_t)usimple_lock_try((usimple_lock_t) lck, grp); +#if DEVELOPMENT || DEBUG + if (lrval) { + pltrace(FALSE); + } +#endif + return lrval; +} + /* * Routine: lck_spin_try_lock */ boolean_t lck_spin_try_lock( - lck_spin_t *lck) + lck_spin_t *lck) { - boolean_t lrval = (boolean_t)usimple_lock_try((usimple_lock_t) lck); -#if DEVELOPMENT || DEBUG + boolean_t lrval = (boolean_t)usimple_lock_try((usimple_lock_t) lck, LCK_GRP_NULL); +#if DEVELOPMENT || DEBUG if (lrval) { pltrace(FALSE); } #endif - return(lrval); + return lrval; } /* @@ -388,7 +410,8 @@ lck_spin_assert(lck_spin_t *lock, unsigned int type) * Returns: TRUE if lock is acquired. */ boolean_t -kdp_lck_spin_is_acquired(lck_spin_t *lck) { +kdp_lck_spin_is_acquired(lck_spin_t *lck) +{ if (not_in_kdp) { panic("panic: spinlock acquired check done outside of kernel debugger"); } @@ -402,21 +425,23 @@ kdp_lck_spin_is_acquired(lck_spin_t *lck) { */ void usimple_lock_init( - usimple_lock_t l, - __unused unsigned short tag) + usimple_lock_t l, + __unused unsigned short tag) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK USLDBG(usld_lock_init(l, tag)); hw_lock_init(&l->interlock); #else - simple_lock_init((simple_lock_t)l,tag); + simple_lock_init((simple_lock_t)l, tag); #endif } volatile uint32_t spinlock_owner_cpu = ~0; volatile usimple_lock_t spinlock_timed_out; -uint32_t spinlock_timeout_NMI(uintptr_t thread_addr) { +uint32_t +spinlock_timeout_NMI(uintptr_t thread_addr) +{ uint32_t i; for (i = 0; i < real_ncpus; i++) { @@ -441,21 +466,23 @@ uint32_t spinlock_timeout_NMI(uintptr_t thread_addr) { * maintaining preemption state. */ void -usimple_lock( - usimple_lock_t l) +(usimple_lock)( + usimple_lock_t l + LCK_GRP_ARG(lck_grp_t *grp)) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK DECL_PC(pc); OBTAIN_PC(pc); USLDBG(usld_lock_pre(l, pc)); - if(__improbable(hw_lock_to(&l->interlock, LockTimeOutTSC) == 0)) { + if (__improbable(hw_lock_to(&l->interlock, LockTimeOutTSC, grp) == 0)) { boolean_t uslock_acquired = FALSE; while (machine_timeout_suspended()) { enable_preemption(); - if ((uslock_acquired = hw_lock_to(&l->interlock, LockTimeOutTSC))) + if ((uslock_acquired = hw_lock_to(&l->interlock, LockTimeOutTSC, grp))) { break; + } } if (uslock_acquired == FALSE) { @@ -464,19 +491,19 @@ usimple_lock( spinlock_timed_out = l; lock_cpu = spinlock_timeout_NMI(lowner); panic("Spinlock acquisition timed out: lock=%p, lock owner thread=0x%lx, current_thread: %p, lock owner active on CPU 0x%x, current owner: 0x%lx, time: %llu", - l, lowner, current_thread(), lock_cpu, (uintptr_t)l->interlock.lock_data, mach_absolute_time()); + l, lowner, current_thread(), lock_cpu, (uintptr_t)l->interlock.lock_data, mach_absolute_time()); } } #if DEVELOPMENT || DEBUG - pltrace(FALSE); + pltrace(FALSE); #endif USLDBG(usld_lock_post(l, pc)); #else - simple_lock((simple_lock_t)l); + simple_lock((simple_lock_t)l, grp); #endif #if CONFIG_DTRACE - LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, l, 0); + LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, l, 0, (uintptr_t)LCK_GRP_PROBEARG(grp)); #endif } @@ -490,15 +517,15 @@ usimple_lock( */ void usimple_unlock( - usimple_lock_t l) + usimple_lock_t l) { -#ifndef MACHINE_SIMPLE_LOCK +#ifndef MACHINE_SIMPLE_LOCK DECL_PC(pc); OBTAIN_PC(pc); USLDBG(usld_unlock(l, pc)); #if DEVELOPMENT || DEBUG - pltrace(TRUE); + pltrace(TRUE); #endif hw_lock_unlock(&l->interlock); #else @@ -521,23 +548,24 @@ usimple_unlock( */ unsigned int usimple_lock_try( - usimple_lock_t l) + usimple_lock_t l, + lck_grp_t *grp) { -#ifndef MACHINE_SIMPLE_LOCK - unsigned int success; +#ifndef MACHINE_SIMPLE_LOCK + unsigned int success; DECL_PC(pc); OBTAIN_PC(pc); USLDBG(usld_lock_try_pre(l, pc)); - if ((success = hw_lock_try(&l->interlock))) { + if ((success = hw_lock_try(&l->interlock, grp))) { #if DEVELOPMENT || DEBUG pltrace(FALSE); #endif - USLDBG(usld_lock_try_post(l, pc)); + USLDBG(usld_lock_try_post(l, pc)); } return success; #else - return(simple_lock_try((simple_lock_t)l)); + return simple_lock_try((simple_lock_t)l, grp); #endif } @@ -547,32 +575,33 @@ usimple_lock_try( * */ void -usimple_lock_try_lock_loop(usimple_lock_t l) +usimple_lock_try_lock_loop(usimple_lock_t l, lck_grp_t *grp) { boolean_t istate = ml_get_interrupts_enabled(); - while (!simple_lock_try((l))) { - if (!istate) + while (!simple_lock_try(l, grp)) { + if (!istate) { handle_pending_TLB_flushes(); + } cpu_pause(); } } -#if USLOCK_DEBUG +#if USLOCK_DEBUG /* * States of a usimple_lock. The default when initializing * a usimple_lock is setting it up for debug checking. */ -#define USLOCK_CHECKED 0x0001 /* lock is being checked */ -#define USLOCK_TAKEN 0x0002 /* lock has been taken */ -#define USLOCK_INIT 0xBAA0 /* lock has been initialized */ -#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED) -#define USLOCK_CHECKING(l) (uslock_check && \ - ((l)->debug.state & USLOCK_CHECKED)) +#define USLOCK_CHECKED 0x0001 /* lock is being checked */ +#define USLOCK_TAKEN 0x0002 /* lock has been taken */ +#define USLOCK_INIT 0xBAA0 /* lock has been initialized */ +#define USLOCK_INITIALIZED (USLOCK_INIT|USLOCK_CHECKED) +#define USLOCK_CHECKING(l) (uslock_check && \ + ((l)->debug.state & USLOCK_CHECKED)) /* * Trace activities of a particularly interesting lock. */ -void usl_trace(usimple_lock_t, int, pc_t, const char *); +void usl_trace(usimple_lock_t, int, pc_t, const char *); /* @@ -581,11 +610,12 @@ void usl_trace(usimple_lock_t, int, pc_t, const char *); */ void usld_lock_init( - usimple_lock_t l, - __unused unsigned short tag) + usimple_lock_t l, + __unused unsigned short tag) { - if (l == USIMPLE_LOCK_NULL) + if (l == USIMPLE_LOCK_NULL) { panic("lock initialization: null lock pointer"); + } l->lock_type = USLOCK_TAG; l->debug.state = uslock_check ? USLOCK_INITIALIZED : 0; l->debug.lock_cpu = l->debug.unlock_cpu = 0; @@ -604,15 +634,18 @@ usld_lock_init( */ int usld_lock_common_checks( - usimple_lock_t l, - char *caller) + usimple_lock_t l, + char *caller) { - if (l == USIMPLE_LOCK_NULL) + if (l == USIMPLE_LOCK_NULL) { panic("%s: null lock pointer", caller); - if (l->lock_type != USLOCK_TAG) + } + if (l->lock_type != USLOCK_TAG) { panic("%s: %p is not a usimple lock, 0x%x", caller, l, l->lock_type); - if (!(l->debug.state & USLOCK_INIT)) + } + if (!(l->debug.state & USLOCK_INIT)) { panic("%s: %p is not an initialized lock, 0x%x", caller, l, l->debug.state); + } return USLOCK_CHECKING(l); } @@ -624,14 +657,15 @@ usld_lock_common_checks( /* ARGSUSED */ void usld_lock_pre( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { - char caller[] = "usimple_lock"; + char caller[] = "usimple_lock"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } /* * Note that we have a weird case where we are getting a lock when we are] @@ -644,9 +678,9 @@ usld_lock_pre( if ((l->debug.state & USLOCK_TAKEN) && l->debug.lock_thread && l->debug.lock_thread == (void *) current_thread()) { printf("%s: lock %p already locked (at %p) by", - caller, l, l->debug.lock_pc); + caller, l, l->debug.lock_pc); printf(" current thread %p (new attempt at pc %p)\n", - l->debug.lock_thread, pc); + l->debug.lock_thread, pc); panic("%s", caller); } mp_disable_preemption(); @@ -663,22 +697,25 @@ usld_lock_pre( */ void usld_lock_post( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { - int mycpu; - char caller[] = "successful usimple_lock"; + int mycpu; + char caller[] = "successful usimple_lock"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } - if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) + if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) { panic("%s: lock %p became uninitialized", - caller, l); - if ((l->debug.state & USLOCK_TAKEN)) + caller, l); + } + if ((l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%p became TAKEN by someone else", - caller, l); + caller, l); + } mycpu = cpu_number(); l->debug.lock_thread = (void *)current_thread(); @@ -700,27 +737,30 @@ usld_lock_post( */ void usld_unlock( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { - int mycpu; - char caller[] = "usimple_unlock"; + int mycpu; + char caller[] = "usimple_unlock"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } mycpu = cpu_number(); - if (!(l->debug.state & USLOCK_TAKEN)) + if (!(l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%p hasn't been taken", - caller, l); - if (l->debug.lock_thread != (void *) current_thread()) + caller, l); + } + if (l->debug.lock_thread != (void *) current_thread()) { panic("%s: unlocking lock 0x%p, owned by thread %p", - caller, l, l->debug.lock_thread); + caller, l, l->debug.lock_thread); + } if (l->debug.lock_cpu != mycpu) { printf("%s: unlocking lock 0x%p on cpu 0x%x", - caller, l, mycpu); + caller, l, mycpu); printf(" (acquired on cpu 0x%x)\n", l->debug.lock_cpu); panic("%s", caller); } @@ -742,13 +782,14 @@ usld_unlock( */ void usld_lock_try_pre( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { - char caller[] = "usimple_lock_try"; + char caller[] = "usimple_lock_try"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } mp_disable_preemption(); usl_trace(l, cpu_number(), pc, caller); mp_enable_preemption(); @@ -765,21 +806,24 @@ usld_lock_try_pre( */ void usld_lock_try_post( - usimple_lock_t l, - pc_t pc) + usimple_lock_t l, + pc_t pc) { - int mycpu; - char caller[] = "successful usimple_lock_try"; + int mycpu; + char caller[] = "successful usimple_lock_try"; - if (!usld_lock_common_checks(l, caller)) + if (!usld_lock_common_checks(l, caller)) { return; + } - if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) + if (!((l->debug.state & ~USLOCK_TAKEN) == USLOCK_INITIALIZED)) { panic("%s: lock 0x%p became uninitialized", - caller, l); - if ((l->debug.state & USLOCK_TAKEN)) + caller, l); + } + if ((l->debug.state & USLOCK_TAKEN)) { panic("%s: lock 0x%p became TAKEN by someone else", - caller, l); + caller, l); + } mycpu = cpu_number(); l->debug.lock_thread = (void *) current_thread(); @@ -797,15 +841,15 @@ usld_lock_try_post( * XPRs showing lock operations on that lock. The lock_seq * value is used to show the order of those operations. */ -usimple_lock_t traced_lock; -unsigned int lock_seq; +usimple_lock_t traced_lock; +unsigned int lock_seq; void usl_trace( - usimple_lock_t l, - int mycpu, - pc_t pc, - const char * op_name) + usimple_lock_t l, + int mycpu, + pc_t pc, + const char * op_name) { if (traced_lock == l) { XPR(XPR_SLOCK, @@ -817,23 +861,24 @@ usl_trace( } -#endif /* USLOCK_DEBUG */ +#endif /* USLOCK_DEBUG */ /* * Routine: lck_rw_alloc_init */ lck_rw_t * lck_rw_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr) { - lck_rw_t *lck; + lck_grp_t *grp, + lck_attr_t *attr) +{ + lck_rw_t *lck; if ((lck = (lck_rw_t *)kalloc(sizeof(lck_rw_t))) != 0) { bzero(lck, sizeof(lck_rw_t)); lck_rw_init(lck, grp, attr); } - return(lck); + return lck; } /* @@ -841,8 +886,9 @@ lck_rw_alloc_init( */ void lck_rw_free( - lck_rw_t *lck, - lck_grp_t *grp) { + lck_rw_t *lck, + lck_grp_t *grp) +{ lck_rw_destroy(lck, grp); kfree(lck, sizeof(lck_rw_t)); } @@ -852,12 +898,12 @@ lck_rw_free( */ void lck_rw_init( - lck_rw_t *lck, - lck_grp_t *grp, - lck_attr_t *attr) + lck_rw_t *lck, + lck_grp_t *grp, + lck_attr_t *attr) { - lck_attr_t *lck_attr = (attr != LCK_ATTR_NULL) ? - attr : &LockDefaultLckAttr; + lck_attr_t *lck_attr = (attr != LCK_ATTR_NULL) ? + attr : &LockDefaultLckAttr; hw_lock_byte_init(&lck->lck_rw_interlock); lck->lck_rw_want_write = FALSE; @@ -867,7 +913,7 @@ lck_rw_init( lck->lck_r_waiting = lck->lck_w_waiting = 0; lck->lck_rw_tag = 0; lck->lck_rw_priv_excl = ((lck_attr->lck_attr_val & - LCK_ATTR_RW_SHARED_PRIORITY) == 0); + LCK_ATTR_RW_SHARED_PRIORITY) == 0); lck_grp_reference(grp); lck_grp_lckcnt_incr(grp, LCK_TYPE_RW); @@ -878,11 +924,12 @@ lck_rw_init( */ void lck_rw_destroy( - lck_rw_t *lck, - lck_grp_t *grp) + lck_rw_t *lck, + lck_grp_t *grp) { - if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED) + if (lck->lck_rw_tag == LCK_RW_TAG_DESTROYED) { return; + } #if MACH_LDEBUG lck_rw_assert(lck, LCK_RW_ASSERT_NOTHELD); #endif @@ -908,16 +955,16 @@ lck_rw_destroy( static inline boolean_t lck_interlock_lock(lck_rw_t *lck) { - boolean_t istate; + boolean_t istate; - istate = ml_set_interrupts_enabled(FALSE); + istate = ml_set_interrupts_enabled(FALSE); hw_lock_byte_lock(&lck->lck_rw_interlock); return istate; } static inline void lck_interlock_unlock(lck_rw_t *lck, boolean_t istate) -{ +{ hw_lock_byte_unlock(&lck->lck_rw_interlock); ml_set_interrupts_enabled(istate); } @@ -931,16 +978,18 @@ lck_interlock_unlock(lck_rw_t *lck, boolean_t istate) static inline void lck_rw_lock_pause(boolean_t interrupts_enabled) { - if (!interrupts_enabled) + if (!interrupts_enabled) { handle_pending_TLB_flushes(); + } cpu_pause(); } static inline boolean_t lck_rw_held_read_or_upgrade(lck_rw_t *lock) { - if (ordered_load(&lock->data) & (LCK_RW_SHARED_MASK | LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE)) + if (ordered_load(&lock->data) & (LCK_RW_SHARED_MASK | LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE)) { return TRUE; + } return FALSE; } @@ -955,7 +1004,7 @@ lck_rw_deadline_for_spin(lck_rw_t *lck) if (lck->lck_r_waiting || lck->lck_w_waiting || lck->lck_rw_shared_count > machine_info.max_cpus) { /* * there are already threads waiting on this lock... this - * implies that they have spun beyond their deadlines waiting for + * implies that they have spun beyond their deadlines waiting for * the desired state to show up so we will not bother spinning at this time... * or * the current number of threads sharing this lock exceeds our capacity to run them @@ -963,11 +1012,12 @@ lck_rw_deadline_for_spin(lck_rw_t *lck) * to be at 0, we'll not bother spinning since the latency for this to happen is * unpredictable... */ - return (mach_absolute_time()); + return mach_absolute_time(); } - return (mach_absolute_time() + MutexSpin); - } else - return (mach_absolute_time() + (100000LL * 1000000000LL)); + return mach_absolute_time() + MutexSpin; + } else { + return mach_absolute_time() + (1LL * 1000000000LL); + } } @@ -986,12 +1036,13 @@ lck_rw_interlock_spin(lck_rw_t *lock) static boolean_t lck_rw_grab_want(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_relaxed); - if ((data & LCK_RW_INTERLOCK) == 0) + if ((data & LCK_RW_INTERLOCK) == 0) { break; + } atomic_exchange_abort(); lck_rw_interlock_spin(lock); } @@ -1006,12 +1057,13 @@ lck_rw_grab_want(lck_rw_t *lock) static boolean_t lck_rw_grab_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_acquire_smp); - if ((data & LCK_RW_INTERLOCK) == 0) + if ((data & LCK_RW_INTERLOCK) == 0) { break; + } atomic_exchange_abort(); lck_rw_interlock_spin(lock); } @@ -1030,19 +1082,19 @@ lck_rw_grab_shared(lck_rw_t *lock) */ static void lck_rw_lock_exclusive_gen( - lck_rw_t *lck) + lck_rw_t *lck) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); - uint64_t deadline = 0; - int slept = 0; - int gotlock = 0; - int lockheld = 0; - wait_result_t res = 0; - boolean_t istate = -1; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); + uint64_t deadline = 0; + int slept = 0; + int gotlock = 0; + int lockheld = 0; + wait_result_t res = 0; + boolean_t istate = -1; -#if CONFIG_DTRACE +#if CONFIG_DTRACE boolean_t dtrace_ls_initialized = FALSE; - boolean_t dtrace_rwl_excl_spin, dtrace_rwl_excl_block, dtrace_ls_enabled= FALSE; + boolean_t dtrace_rwl_excl_spin, dtrace_rwl_excl_block, dtrace_ls_enabled = FALSE; uint64_t wait_interval = 0; int readers_at_sleep = 0; #endif @@ -1050,9 +1102,8 @@ lck_rw_lock_exclusive_gen( /* * Try to acquire the lck_rw_want_write bit. */ - while ( !lck_rw_grab_want(lck)) { - -#if CONFIG_DTRACE + while (!lck_rw_grab_want(lck)) { +#if CONFIG_DTRACE if (dtrace_ls_initialized == FALSE) { dtrace_ls_initialized = TRUE; dtrace_rwl_excl_spin = (lockstat_probemap[LS_LCK_RW_LOCK_EXCL_SPIN] != 0); @@ -1068,38 +1119,39 @@ lck_rw_lock_exclusive_gen( } } #endif - if (istate == -1) + if (istate == -1) { istate = ml_get_interrupts_enabled(); + } deadline = lck_rw_deadline_for_spin(lck); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0); - - while (((gotlock = lck_rw_grab_want(lck)) == 0) && mach_absolute_time() < deadline) + + while (((gotlock = lck_rw_grab_want(lck)) == 0) && mach_absolute_time() < deadline) { lck_rw_lock_pause(istate); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_SPIN_CODE) | DBG_FUNC_END, trace_lck, 0, 0, gotlock, 0); - if (gotlock) + if (gotlock) { break; + } /* * if we get here, the deadline has expired w/o us * being able to grab the lock exclusively * check to see if we're allowed to do a thread_block */ if (lck->lck_rw_can_sleep) { - istate = lck_interlock_lock(lck); if (lck->lck_rw_want_write) { - KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_WRITER_WAIT_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0); lck->lck_w_waiting = TRUE; thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockWrite); res = assert_wait(RW_LOCK_WRITER_EVENT(lck), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lck, istate); if (res == THREAD_WAITING) { @@ -1126,8 +1178,7 @@ lck_rw_lock_exclusive_gen( * and the interlock not held, we are safe to proceed */ while (lck_rw_held_read_or_upgrade(lck)) { - -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * Either sleeping or spinning is happening, start * a timing of our delay interval now. If we set it @@ -1149,27 +1200,29 @@ lck_rw_lock_exclusive_gen( } } #endif - if (istate == -1) + if (istate == -1) { istate = ml_get_interrupts_enabled(); + } deadline = lck_rw_deadline_for_spin(lck); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_START, trace_lck, 0, 0, 0, 0); - while ((lockheld = lck_rw_held_read_or_upgrade(lck)) && mach_absolute_time() < deadline) + while ((lockheld = lck_rw_held_read_or_upgrade(lck)) && mach_absolute_time() < deadline) { lck_rw_lock_pause(istate); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_READER_SPIN_CODE) | DBG_FUNC_END, trace_lck, 0, 0, lockheld, 0); - if ( !lockheld) + if (!lockheld) { break; + } /* * if we get here, the deadline has expired w/o us * being able to grab the lock exclusively * check to see if we're allowed to do a thread_block */ if (lck->lck_rw_can_sleep) { - istate = lck_interlock_lock(lck); if (lck->lck_rw_shared_count != 0 || lck->lck_rw_want_upgrade) { @@ -1179,7 +1232,7 @@ lck_rw_lock_exclusive_gen( thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockWrite); res = assert_wait(RW_LOCK_WRITER_EVENT(lck), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lck, istate); if (res == THREAD_WAITING) { @@ -1199,7 +1252,7 @@ lck_rw_lock_exclusive_gen( } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * Decide what latencies we suffered that are Dtrace events. * If we have set wait_interval, then we either spun or slept. @@ -1211,7 +1264,7 @@ lck_rw_lock_exclusive_gen( */ if (dtrace_ls_enabled == TRUE) { if (slept == 0) { - LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN, lck, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 1); } else { /* @@ -1220,7 +1273,7 @@ lck_rw_lock_exclusive_gen( * Notice that above we recorded this before we dropped * the interlock so the count is accurate. */ - LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK, lck, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_BLOCK, lck, mach_absolute_time() - wait_interval, 1, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } @@ -1233,40 +1286,46 @@ lck_rw_lock_exclusive_gen( * Routine: lck_rw_done */ -lck_rw_type_t lck_rw_done(lck_rw_t *lock) +lck_rw_type_t +lck_rw_done(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_release_smp); - if (data & LCK_RW_INTERLOCK) { /* wait for interlock to clear */ + if (data & LCK_RW_INTERLOCK) { /* wait for interlock to clear */ atomic_exchange_abort(); lck_rw_interlock_spin(lock); continue; } if (data & LCK_RW_SHARED_MASK) { data -= LCK_RW_SHARED_READER; - if ((data & LCK_RW_SHARED_MASK) == 0) /* if reader count has now gone to 0, check for waiters */ + if ((data & LCK_RW_SHARED_MASK) == 0) { /* if reader count has now gone to 0, check for waiters */ goto check_waiters; - } else { /* if reader count == 0, must be exclusive lock */ + } + } else { /* if reader count == 0, must be exclusive lock */ if (data & LCK_RW_WANT_UPGRADE) { data &= ~(LCK_RW_WANT_UPGRADE); } else { - if (data & LCK_RW_WANT_WRITE) + if (data & LCK_RW_WANT_WRITE) { data &= ~(LCK_RW_WANT_EXCL); - else /* lock is not 'owned', panic */ + } else { /* lock is not 'owned', panic */ panic("Releasing non-exclusive RW lock without a reader refcount!"); + } } check_waiters: if (prev & LCK_RW_W_WAITING) { data &= ~(LCK_RW_W_WAITING); - if ((prev & LCK_RW_PRIV_EXCL) == 0) + if ((prev & LCK_RW_PRIV_EXCL) == 0) { data &= ~(LCK_RW_R_WAITING); - } else + } + } else { data &= ~(LCK_RW_R_WAITING); + } } - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_release_smp)) + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_release_smp)) { break; + } cpu_pause(); } return lck_rw_done_gen(lock, prev); @@ -1277,13 +1336,13 @@ check_waiters: * * called from lck_rw_done() * prior_lock_state is the value in the 1st - * word of the lock at the time of a successful + * word of the lock at the time of a successful * atomic compare and exchange with the new value... - * it represents the state of the lock before we + * it represents the state of the lock before we * decremented the rw_shared_count or cleared either - * rw_want_upgrade or rw_want_write and + * rw_want_upgrade or rw_want_write and * the lck_x_waiting bits... since the wrapper - * routine has already changed the state atomically, + * routine has already changed the state atomically, * we just need to decide if we should * wake up anyone and what value to return... we do * this by examining the state of the lock before @@ -1291,52 +1350,58 @@ check_waiters: */ static lck_rw_type_t lck_rw_done_gen( - lck_rw_t *lck, - uint32_t prior_lock_state) + lck_rw_t *lck, + uint32_t prior_lock_state) { - lck_rw_t *fake_lck; - lck_rw_type_t lock_type; - thread_t thread; - uint32_t rwlock_count; + lck_rw_t *fake_lck; + lck_rw_type_t lock_type; + thread_t thread; + uint32_t rwlock_count; - /* - * prior_lock state is a snapshot of the 1st word of the - * lock in question... we'll fake up a pointer to it - * and carefully not access anything beyond whats defined - * in the first word of a lck_rw_t - */ + thread = current_thread(); + rwlock_count = thread->rwlock_count--; fake_lck = (lck_rw_t *)&prior_lock_state; - if (fake_lck->lck_rw_shared_count <= 1) { - if (fake_lck->lck_w_waiting) - thread_wakeup(RW_LOCK_WRITER_EVENT(lck)); + if (lck->lck_rw_can_sleep) { + /* + * prior_lock state is a snapshot of the 1st word of the + * lock in question... we'll fake up a pointer to it + * and carefully not access anything beyond whats defined + * in the first word of a lck_rw_t + */ - if (!(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) && fake_lck->lck_r_waiting) - thread_wakeup(RW_LOCK_READER_EVENT(lck)); - } - if (fake_lck->lck_rw_shared_count) - lock_type = LCK_RW_TYPE_SHARED; - else - lock_type = LCK_RW_TYPE_EXCLUSIVE; + if (fake_lck->lck_rw_shared_count <= 1) { + if (fake_lck->lck_w_waiting) { + thread_wakeup(RW_LOCK_WRITER_EVENT(lck)); + } - /* Check if dropping the lock means that we need to unpromote */ - thread = current_thread(); - rwlock_count = thread->rwlock_count--; + if (!(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) && fake_lck->lck_r_waiting) { + thread_wakeup(RW_LOCK_READER_EVENT(lck)); + } + } #if MACH_LDEBUG - if (rwlock_count == 0) { - panic("rw lock count underflow for thread %p", thread); - } + if (rwlock_count == 0) { + panic("rw lock count underflow for thread %p", thread); + } #endif - if ((rwlock_count == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { - /* sched_flags checked without lock, but will be rechecked while clearing */ - lck_rw_clear_promotion(thread, unslide_for_kdebug(lck)); + /* Check if dropping the lock means that we need to unpromote */ + + if ((rwlock_count == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { + /* sched_flags checked without lock, but will be rechecked while clearing */ + lck_rw_clear_promotion(thread, unslide_for_kdebug(lck)); + } + } + if (fake_lck->lck_rw_shared_count) { + lock_type = LCK_RW_TYPE_SHARED; + } else { + lock_type = LCK_RW_TYPE_EXCLUSIVE; } #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_DONE_RELEASE, lck, lock_type == LCK_RW_TYPE_SHARED ? 0 : 1); #endif - return(lock_type); + return lock_type; } @@ -1345,15 +1410,16 @@ lck_rw_done_gen( */ void lck_rw_unlock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type) + lck_rw_t *lck, + lck_rw_type_t lck_rw_type) { - if (lck_rw_type == LCK_RW_TYPE_SHARED) + if (lck_rw_type == LCK_RW_TYPE_SHARED) { lck_rw_unlock_shared(lck); - else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) + } else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) { lck_rw_unlock_exclusive(lck); - else + } else { panic("lck_rw_unlock(): Invalid RW lock type: %d\n", lck_rw_type); + } } @@ -1362,15 +1428,16 @@ lck_rw_unlock( */ void lck_rw_unlock_shared( - lck_rw_t *lck) + lck_rw_t *lck) { - lck_rw_type_t ret; + lck_rw_type_t ret; assertf(lck->lck_rw_shared_count > 0, "lck %p has shared_count=0x%x", lck, lck->lck_rw_shared_count); ret = lck_rw_done(lck); - if (ret != LCK_RW_TYPE_SHARED) + if (ret != LCK_RW_TYPE_SHARED) { panic("lck_rw_unlock_shared(): lock %p held in mode: %d\n", lck, ret); + } } @@ -1379,14 +1446,15 @@ lck_rw_unlock_shared( */ void lck_rw_unlock_exclusive( - lck_rw_t *lck) + lck_rw_t *lck) { - lck_rw_type_t ret; + lck_rw_type_t ret; ret = lck_rw_done(lck); - if (ret != LCK_RW_TYPE_EXCLUSIVE) + if (ret != LCK_RW_TYPE_EXCLUSIVE) { panic("lck_rw_unlock_exclusive(): lock held in mode: %d\n", ret); + } } @@ -1395,15 +1463,16 @@ lck_rw_unlock_exclusive( */ void lck_rw_lock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type) + lck_rw_t *lck, + lck_rw_type_t lck_rw_type) { - if (lck_rw_type == LCK_RW_TYPE_SHARED) + if (lck_rw_type == LCK_RW_TYPE_SHARED) { lck_rw_lock_shared(lck); - else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) + } else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) { lck_rw_lock_exclusive(lck); - else + } else { panic("lck_rw_lock(): Invalid RW lock type: %x\n", lck_rw_type); + } } /* @@ -1412,24 +1481,30 @@ lck_rw_lock( void lck_rw_lock_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; current_thread()->rwlock_count++; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_acquire_smp); if (data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK)) { atomic_exchange_abort(); - lck_rw_lock_shared_gen(lock); + if (lock->lck_rw_can_sleep) { + lck_rw_lock_shared_gen(lock); + } else { + cpu_pause(); + continue; + } break; } data += LCK_RW_SHARED_READER; - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) { break; + } cpu_pause(); } -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, lock, DTRACE_RW_SHARED); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return; } @@ -1442,25 +1517,24 @@ lck_rw_lock_shared(lck_rw_t *lock) */ static void lck_rw_lock_shared_gen( - lck_rw_t *lck) + lck_rw_t *lck) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); - uint64_t deadline = 0; - int gotlock = 0; - int slept = 0; - wait_result_t res = 0; - boolean_t istate = -1; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); + uint64_t deadline = 0; + int gotlock = 0; + int slept = 0; + wait_result_t res = 0; + boolean_t istate = -1; -#if CONFIG_DTRACE +#if CONFIG_DTRACE uint64_t wait_interval = 0; int readers_at_sleep = 0; boolean_t dtrace_ls_initialized = FALSE; boolean_t dtrace_rwl_shared_spin, dtrace_rwl_shared_block, dtrace_ls_enabled = FALSE; #endif - while ( !lck_rw_grab_shared(lck)) { - -#if CONFIG_DTRACE + while (!lck_rw_grab_shared(lck)) { +#if CONFIG_DTRACE if (dtrace_ls_initialized == FALSE) { dtrace_ls_initialized = TRUE; dtrace_rwl_shared_spin = (lockstat_probemap[LS_LCK_RW_LOCK_SHARED_SPIN] != 0); @@ -1476,42 +1550,43 @@ lck_rw_lock_shared_gen( } } #endif - if (istate == -1) + if (istate == -1) { istate = ml_get_interrupts_enabled(); + } deadline = lck_rw_deadline_for_spin(lck); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_SPIN_CODE) | DBG_FUNC_START, - trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0); + trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0); - while (((gotlock = lck_rw_grab_shared(lck)) == 0) && mach_absolute_time() < deadline) + while (((gotlock = lck_rw_grab_shared(lck)) == 0) && mach_absolute_time() < deadline) { lck_rw_lock_pause(istate); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_SPIN_CODE) | DBG_FUNC_END, - trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, gotlock, 0); + trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, gotlock, 0); - if (gotlock) + if (gotlock) { break; + } /* * if we get here, the deadline has expired w/o us * being able to grab the lock for read * check to see if we're allowed to do a thread_block */ if (lck->lck_rw_can_sleep) { - istate = lck_interlock_lock(lck); if ((lck->lck_rw_want_write || lck->lck_rw_want_upgrade) && ((lck->lck_rw_shared_count == 0) || lck->lck_rw_priv_excl)) { - KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_WAIT_CODE) | DBG_FUNC_START, - trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0); + trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, 0, 0); lck->lck_r_waiting = TRUE; thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockRead); res = assert_wait(RW_LOCK_READER_EVENT(lck), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lck, istate); if (res == THREAD_WAITING) { @@ -1519,7 +1594,7 @@ lck_rw_lock_shared_gen( slept++; } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_WAIT_CODE) | DBG_FUNC_END, - trace_lck, res, slept, 0, 0); + trace_lck, res, slept, 0, 0); } else { lck->lck_rw_shared_count++; lck_interlock_unlock(lck, istate); @@ -1528,12 +1603,12 @@ lck_rw_lock_shared_gen( } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (dtrace_ls_enabled == TRUE) { if (slept == 0) { - LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN, lck, mach_absolute_time() - wait_interval, 0); + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_SPIN, lck, mach_absolute_time() - wait_interval, 0); } else { - LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK, lck, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_BLOCK, lck, mach_absolute_time() - wait_interval, 0, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } @@ -1552,13 +1627,14 @@ lck_rw_lock_exclusive(lck_rw_t *lock) { current_thread()->rwlock_count++; if (atomic_test_and_set32(&lock->data, - (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), - LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) { -#if CONFIG_DTRACE + (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE | LCK_RW_INTERLOCK), + LCK_RW_WANT_EXCL, memory_order_acquire_smp, FALSE)) { +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); -#endif /* CONFIG_DTRACE */ - } else +#endif /* CONFIG_DTRACE */ + } else { lck_rw_lock_exclusive_gen(lock); + } } @@ -1569,9 +1645,9 @@ lck_rw_lock_exclusive(lck_rw_t *lock) boolean_t lck_rw_lock_shared_to_exclusive(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { atomic_exchange_abort(); @@ -1580,22 +1656,26 @@ lck_rw_lock_shared_to_exclusive(lck_rw_t *lock) } if (data & LCK_RW_WANT_UPGRADE) { data -= LCK_RW_SHARED_READER; - if ((data & LCK_RW_SHARED_MASK) == 0) /* we were the last reader */ - data &= ~(LCK_RW_W_WAITING); /* so clear the wait indicator */ - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) + if ((data & LCK_RW_SHARED_MASK) == 0) { /* we were the last reader */ + data &= ~(LCK_RW_W_WAITING); /* so clear the wait indicator */ + } + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) { return lck_rw_lock_shared_to_exclusive_failure(lock, prev); + } } else { - data |= LCK_RW_WANT_UPGRADE; /* ask for WANT_UPGRADE */ - data -= LCK_RW_SHARED_READER; /* and shed our read count */ - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) + data |= LCK_RW_WANT_UPGRADE; /* ask for WANT_UPGRADE */ + data -= LCK_RW_SHARED_READER; /* and shed our read count */ + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) { break; + } } cpu_pause(); } - /* we now own the WANT_UPGRADE */ - if (data & LCK_RW_SHARED_MASK) /* check to see if all of the readers are drained */ - lck_rw_lock_shared_to_exclusive_success(lock); /* if not, we need to go wait */ -#if CONFIG_DTRACE + /* we now own the WANT_UPGRADE */ + if (data & LCK_RW_SHARED_MASK) { /* check to see if all of the readers are drained */ + lck_rw_lock_shared_to_exclusive_success(lock); /* if not, we need to go wait */ + } +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, lock, 0); #endif return TRUE; @@ -1612,12 +1692,12 @@ lck_rw_lock_shared_to_exclusive(lck_rw_t *lock) */ static boolean_t lck_rw_lock_shared_to_exclusive_failure( - lck_rw_t *lck, - uint32_t prior_lock_state) + lck_rw_t *lck, + uint32_t prior_lock_state) { - lck_rw_t *fake_lck; - thread_t thread = current_thread(); - uint32_t rwlock_count; + lck_rw_t *fake_lck; + thread_t thread = current_thread(); + uint32_t rwlock_count; /* Check if dropping the lock means that we need to unpromote */ rwlock_count = thread->rwlock_count--; @@ -1643,9 +1723,9 @@ lck_rw_lock_shared_to_exclusive_failure( } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(lck), lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0); + VM_KERNEL_UNSLIDE_OR_PERM(lck), lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0); - return (FALSE); + return FALSE; } @@ -1659,16 +1739,16 @@ lck_rw_lock_shared_to_exclusive_failure( */ static boolean_t lck_rw_lock_shared_to_exclusive_success( - lck_rw_t *lck) + lck_rw_t *lck) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); - uint64_t deadline = 0; - int slept = 0; - int still_shared = 0; - wait_result_t res; - boolean_t istate = -1; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); + uint64_t deadline = 0; + int slept = 0; + int still_shared = 0; + wait_result_t res; + boolean_t istate = -1; -#if CONFIG_DTRACE +#if CONFIG_DTRACE uint64_t wait_interval = 0; int readers_at_sleep = 0; boolean_t dtrace_ls_initialized = FALSE; @@ -1676,8 +1756,7 @@ lck_rw_lock_shared_to_exclusive_success( #endif while (lck->lck_rw_shared_count != 0) { - -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (dtrace_ls_initialized == FALSE) { dtrace_ls_initialized = TRUE; dtrace_rwl_shared_to_excl_spin = (lockstat_probemap[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN] != 0); @@ -1693,40 +1772,42 @@ lck_rw_lock_shared_to_exclusive_success( } } #endif - if (istate == -1) + if (istate == -1) { istate = ml_get_interrupts_enabled(); + } deadline = lck_rw_deadline_for_spin(lck); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_SPIN_CODE) | DBG_FUNC_START, - trace_lck, lck->lck_rw_shared_count, 0, 0, 0); + trace_lck, lck->lck_rw_shared_count, 0, 0, 0); - while ((still_shared = lck->lck_rw_shared_count) && mach_absolute_time() < deadline) + while ((still_shared = lck->lck_rw_shared_count) && mach_absolute_time() < deadline) { lck_rw_lock_pause(istate); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_SPIN_CODE) | DBG_FUNC_END, - trace_lck, lck->lck_rw_shared_count, 0, 0, 0); + trace_lck, lck->lck_rw_shared_count, 0, 0, 0); - if ( !still_shared) + if (!still_shared) { break; + } /* * if we get here, the deadline has expired w/o * the rw_shared_count having drained to 0 * check to see if we're allowed to do a thread_block */ if (lck->lck_rw_can_sleep) { - istate = lck_interlock_lock(lck); - + if (lck->lck_rw_shared_count != 0) { KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_WAIT_CODE) | DBG_FUNC_START, - trace_lck, lck->lck_rw_shared_count, 0, 0, 0); + trace_lck, lck->lck_rw_shared_count, 0, 0, 0); lck->lck_w_waiting = TRUE; thread_set_pending_block_hint(current_thread(), kThreadWaitKernelRWLockUpgrade); res = assert_wait(RW_LOCK_WRITER_EVENT(lck), - THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); + THREAD_UNINT | THREAD_WAIT_NOREPORT_USER); lck_interlock_unlock(lck, istate); if (res == THREAD_WAITING) { @@ -1734,55 +1815,59 @@ lck_rw_lock_shared_to_exclusive_success( slept++; } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_WAIT_CODE) | DBG_FUNC_END, - trace_lck, res, slept, 0, 0); + trace_lck, res, slept, 0, 0); } else { lck_interlock_unlock(lck, istate); break; } } } -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * We infer whether we took the sleep/spin path above by checking readers_at_sleep. */ if (dtrace_ls_enabled == TRUE) { if (slept == 0) { - LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 0); + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 0); } else { - LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, lck, + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, lck, mach_absolute_time() - wait_interval, 1, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, lck, 1); #endif - return (TRUE); + return TRUE; } /* * Routine: lck_rw_lock_exclusive_to_shared */ -void lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) +void +lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_release_smp); if (data & LCK_RW_INTERLOCK) { atomic_exchange_abort(); - lck_rw_interlock_spin(lock); /* wait for interlock to clear */ + lck_rw_interlock_spin(lock); /* wait for interlock to clear */ continue; } data += LCK_RW_SHARED_READER; - if (data & LCK_RW_WANT_UPGRADE) + if (data & LCK_RW_WANT_UPGRADE) { data &= ~(LCK_RW_WANT_UPGRADE); - else + } else { data &= ~(LCK_RW_WANT_EXCL); - if (!((prev & LCK_RW_W_WAITING) && (prev & LCK_RW_PRIV_EXCL))) + } + if (!((prev & LCK_RW_W_WAITING) && (prev & LCK_RW_PRIV_EXCL))) { data &= ~(LCK_RW_W_WAITING); - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_release_smp)) + } + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_release_smp)) { break; + } cpu_pause(); } return lck_rw_lock_exclusive_to_shared_gen(lock, prev); @@ -1791,7 +1876,7 @@ void lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) /* * Routine: lck_rw_lock_exclusive_to_shared_gen - * Function: + * Function: * assembly fast path has already dropped * our exclusive state and bumped lck_rw_shared_count * all we need to do here is determine if anyone @@ -1799,16 +1884,16 @@ void lck_rw_lock_exclusive_to_shared(lck_rw_t *lock) */ static void lck_rw_lock_exclusive_to_shared_gen( - lck_rw_t *lck, - uint32_t prior_lock_state) + lck_rw_t *lck, + uint32_t prior_lock_state) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); - lck_rw_t *fake_lck; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); + lck_rw_t *fake_lck; fake_lck = (lck_rw_t *)&prior_lock_state; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_START, - trace_lck, fake_lck->lck_rw_want_write, fake_lck->lck_rw_want_upgrade, 0, 0); + trace_lck, fake_lck->lck_rw_want_write, fake_lck->lck_rw_want_upgrade, 0, 0); /* * don't wake up anyone waiting to take the lock exclusively @@ -1818,11 +1903,12 @@ lck_rw_lock_exclusive_to_shared_gen( * wake up any waiting readers if we don't have any writers waiting, * or the lock is NOT marked as rw_priv_excl (writers have privilege) */ - if (!(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) && fake_lck->lck_r_waiting) + if (!(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) && fake_lck->lck_r_waiting) { thread_wakeup(RW_LOCK_READER_EVENT(lck)); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EX_TO_SH_CODE) | DBG_FUNC_END, - trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0); + trace_lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, lck->lck_rw_shared_count, 0); #if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE, lck, 0); @@ -1835,27 +1921,29 @@ lck_rw_lock_exclusive_to_shared_gen( */ boolean_t lck_rw_try_lock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type) -{ - if (lck_rw_type == LCK_RW_TYPE_SHARED) - return(lck_rw_try_lock_shared(lck)); - else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) - return(lck_rw_try_lock_exclusive(lck)); - else + lck_rw_t *lck, + lck_rw_type_t lck_rw_type) +{ + if (lck_rw_type == LCK_RW_TYPE_SHARED) { + return lck_rw_try_lock_shared(lck); + } else if (lck_rw_type == LCK_RW_TYPE_EXCLUSIVE) { + return lck_rw_try_lock_exclusive(lck); + } else { panic("lck_rw_try_lock(): Invalid rw lock type: %x\n", lck_rw_type); - return(FALSE); + } + return FALSE; } /* * Routine: lck_rw_try_lock_shared */ -boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) +boolean_t +lck_rw_try_lock_shared(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { atomic_exchange_abort(); @@ -1864,18 +1952,19 @@ boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) } if (data & (LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE)) { atomic_exchange_abort(); - return FALSE; /* lock is busy */ + return FALSE; /* lock is busy */ } - data += LCK_RW_SHARED_READER; /* Increment reader refcount */ - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) + data += LCK_RW_SHARED_READER; /* Increment reader refcount */ + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) { break; + } cpu_pause(); } current_thread()->rwlock_count++; /* There is a 3 instr window where preemption may not notice rwlock_count after cmpxchg */ -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, lock, DTRACE_RW_SHARED); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return TRUE; } @@ -1884,11 +1973,12 @@ boolean_t lck_rw_try_lock_shared(lck_rw_t *lock) * Routine: lck_rw_try_lock_exclusive */ -boolean_t lck_rw_try_lock_exclusive(lck_rw_t *lock) +boolean_t +lck_rw_try_lock_exclusive(lck_rw_t *lock) { - uint32_t data, prev; + uint32_t data, prev; - for ( ; ; ) { + for (;;) { data = atomic_exchange_begin32(&lock->data, &prev, memory_order_acquire_smp); if (data & LCK_RW_INTERLOCK) { atomic_exchange_abort(); @@ -1897,26 +1987,27 @@ boolean_t lck_rw_try_lock_exclusive(lck_rw_t *lock) } if (data & (LCK_RW_SHARED_MASK | LCK_RW_WANT_EXCL | LCK_RW_WANT_UPGRADE)) { atomic_exchange_abort(); - return FALSE; /* can't get it */ + return FALSE; /* can't get it */ } data |= LCK_RW_WANT_EXCL; - if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) + if (atomic_exchange_complete32(&lock->data, prev, data, memory_order_acquire_smp)) { break; + } cpu_pause(); } current_thread()->rwlock_count++; -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, lock, DTRACE_RW_EXCL); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ return TRUE; } void lck_rw_assert( - lck_rw_t *lck, - unsigned int type) + lck_rw_t *lck, + unsigned int type) { switch (type) { case LCK_RW_ASSERT_SHARED: @@ -1926,7 +2017,7 @@ lck_rw_assert( break; case LCK_RW_ASSERT_EXCLUSIVE: if ((lck->lck_rw_want_write || - lck->lck_rw_want_upgrade) && + lck->lck_rw_want_upgrade) && lck->lck_rw_shared_count == 0) { return; } @@ -1940,8 +2031,8 @@ lck_rw_assert( break; case LCK_RW_ASSERT_NOTHELD: if (!(lck->lck_rw_want_write || - lck->lck_rw_want_upgrade || - lck->lck_rw_shared_count != 0)) { + lck->lck_rw_want_upgrade || + lck->lck_rw_shared_count != 0)) { return; } break; @@ -1986,7 +2077,8 @@ lck_rw_lock_yield_shared(lck_rw_t *lck, boolean_t force_yield) * NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ boolean_t -kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) { +kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) +{ if (not_in_kdp) { panic("panic: rw lock exclusive check done outside of kernel debugger"); } @@ -2039,48 +2131,29 @@ kdp_lck_rw_lock_is_acquired_exclusive(lck_rw_t *lck) { * on acquire. */ -#ifdef MUTEX_ZONE +#ifdef MUTEX_ZONE extern zone_t lck_mtx_zone; #endif -/* - * N.B.: On x86, statistics are currently recorded for all indirect mutexes. - * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained - * as a 64-bit quantity (the new x86 specific statistics are also maintained - * as 32-bit quantities). - * - * - * Enable this preprocessor define to record the first miss alone - * By default, we count every miss, hence multiple misses may be - * recorded for a single lock acquire attempt via lck_mtx_lock - */ -#undef LOG_FIRST_MISS_ALONE - -/* - * This preprocessor define controls whether the R-M-W update of the - * per-group statistics elements are atomic (LOCK-prefixed) - * Enabled by default. - */ -#define ATOMIC_STAT_UPDATES 1 - - /* * Routine: lck_mtx_alloc_init */ lck_mtx_t * lck_mtx_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr) + lck_grp_t *grp, + lck_attr_t *attr) { - lck_mtx_t *lck; -#ifdef MUTEX_ZONE - if ((lck = (lck_mtx_t *)zalloc(lck_mtx_zone)) != 0) + lck_mtx_t *lck; +#ifdef MUTEX_ZONE + if ((lck = (lck_mtx_t *)zalloc(lck_mtx_zone)) != 0) { lck_mtx_init(lck, grp, attr); + } #else - if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0) + if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0) { lck_mtx_init(lck, grp, attr); -#endif - return(lck); + } +#endif + return lck; } /* @@ -2088,11 +2161,11 @@ lck_mtx_alloc_init( */ void lck_mtx_free( - lck_mtx_t *lck, - lck_grp_t *grp) + lck_mtx_t *lck, + lck_grp_t *grp) { lck_mtx_destroy(lck, grp); -#ifdef MUTEX_ZONE +#ifdef MUTEX_ZONE zfree(lck_mtx_zone, lck); #else kfree(lck, sizeof(lck_mtx_t)); @@ -2104,9 +2177,9 @@ lck_mtx_free( */ static void lck_mtx_ext_init( - lck_mtx_ext_t *lck, - lck_grp_t *grp, - lck_attr_t *attr) + lck_mtx_ext_t *lck, + lck_grp_t *grp, + lck_attr_t *attr) { bzero((void *)lck, sizeof(lck_mtx_ext_t)); @@ -2117,8 +2190,9 @@ lck_mtx_ext_init( lck->lck_mtx_grp = grp; - if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT) + if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT) { lck->lck_mtx_attr |= LCK_MTX_ATTR_STAT; + } lck->lck_mtx.lck_mtx_is_ext = 1; lck->lck_mtx.lck_mtx_pad32 = 0xFFFFFFFF; @@ -2129,21 +2203,22 @@ lck_mtx_ext_init( */ void lck_mtx_init( - lck_mtx_t *lck, - lck_grp_t *grp, - lck_attr_t *attr) + lck_mtx_t *lck, + lck_grp_t *grp, + lck_attr_t *attr) { - lck_mtx_ext_t *lck_ext; - lck_attr_t *lck_attr; + lck_mtx_ext_t *lck_ext; + lck_attr_t *lck_attr; - if (attr != LCK_ATTR_NULL) + if (attr != LCK_ATTR_NULL) { lck_attr = attr; - else + } else { lck_attr = &LockDefaultLckAttr; + } if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) { if ((lck_ext = (lck_mtx_ext_t *)kalloc(sizeof(lck_mtx_ext_t))) != 0) { - lck_mtx_ext_init(lck_ext, grp, lck_attr); + lck_mtx_ext_init(lck_ext, grp, lck_attr); lck->lck_mtx_tag = LCK_MTX_TAG_INDIRECT; lck->lck_mtx_ptr = lck_ext; } @@ -2161,17 +2236,18 @@ lck_mtx_init( */ void lck_mtx_init_ext( - lck_mtx_t *lck, - lck_mtx_ext_t *lck_ext, - lck_grp_t *grp, - lck_attr_t *attr) + lck_mtx_t *lck, + lck_mtx_ext_t *lck_ext, + lck_grp_t *grp, + lck_attr_t *attr) { - lck_attr_t *lck_attr; + lck_attr_t *lck_attr; - if (attr != LCK_ATTR_NULL) + if (attr != LCK_ATTR_NULL) { lck_attr = attr; - else + } else { lck_attr = &LockDefaultLckAttr; + } if ((lck_attr->lck_attr_val) & LCK_ATTR_DEBUG) { lck_mtx_ext_init(lck_ext, grp, lck_attr); @@ -2213,13 +2289,14 @@ lck_mtx_lock_mark_destroyed( */ void lck_mtx_destroy( - lck_mtx_t *lck, - lck_grp_t *grp) + lck_mtx_t *lck, + lck_grp_t *grp) { boolean_t indirect; - - if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) + + if (lck->lck_mtx_tag == LCK_MTX_TAG_DESTROYED) { return; + } #if MACH_LDEBUG lck_mtx_assert(lck, LCK_MTX_ASSERT_NOTOWNED); #endif @@ -2227,8 +2304,9 @@ lck_mtx_destroy( lck_mtx_lock_mark_destroyed(lck, indirect); - if (indirect) + if (indirect) { kfree(lck->lck_mtx_ptr, sizeof(lck_mtx_ext_t)); + } lck_grp_lckcnt_decr(grp, LCK_TYPE_MTX); lck_grp_deallocate(grp); return; @@ -2250,7 +2328,7 @@ __attribute__((always_inline)) static boolean_t get_indirect_mutex( lck_mtx_t **lock, - uint32_t *state) + uint32_t *state) { *lock = &((*lock)->lck_mtx_ptr->lck_mtx); *state = ordered_load_mtx_state(*lock); @@ -2258,7 +2336,7 @@ get_indirect_mutex( } /* - * Routine: lck_mtx_unlock_slow + * Routine: lck_mtx_unlock_slow * * Unlocks a mutex held by current thread. * @@ -2270,11 +2348,11 @@ get_indirect_mutex( __attribute__((noinline)) void lck_mtx_unlock_slow( - lck_mtx_t *lock) + lck_mtx_t *lock) { - thread_t thread; - uint32_t state, prev; - boolean_t indirect = FALSE; + thread_t thread; + uint32_t state, prev; + boolean_t indirect = FALSE; state = ordered_load_mtx_state(lock); @@ -2287,13 +2365,15 @@ lck_mtx_unlock_slow( #if DEVELOPMENT | DEBUG thread_t owner = (thread_t)lock->lck_mtx_owner; - if(__improbable(owner != thread)) + if (__improbable(owner != thread)) { return lck_mtx_owner_check_panic(lock); + } #endif /* check if it is held as a spinlock */ - if (__improbable((state & LCK_MTX_MLOCKED_MSK) == 0)) + if (__improbable((state & LCK_MTX_MLOCKED_MSK) == 0)) { goto unlock; + } lck_mtx_interlock_lock_clear_flags(lock, LCK_MTX_MLOCKED_MSK, &state); @@ -2306,19 +2386,22 @@ unlock: prev = state; /* release interlock, promotion and clear spin flag */ state &= (~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK | LCK_MTX_PROMOTED_MSK)); - if ((state & LCK_MTX_WAITERS_MSK)) - state -= LCK_MTX_WAITER; /* decrement waiter count */ - ordered_store_mtx_state_release(lock, state); /* since I own the interlock, I don't need an atomic update */ + if ((state & LCK_MTX_WAITERS_MSK)) { + state -= LCK_MTX_WAITER; /* decrement waiter count */ + } + ordered_store_mtx_state_release(lock, state); /* since I own the interlock, I don't need an atomic update */ -#if MACH_LDEBUG +#if MACH_LDEBUG /* perform lock statistics after drop to prevent delay */ - if (thread) - thread->mutex_count--; /* lock statistic */ -#endif /* MACH_LDEBUG */ + if (thread) { + thread->mutex_count--; /* lock statistic */ + } +#endif /* MACH_LDEBUG */ /* check if there are waiters to wake up or priority to drop */ - if ((prev & (LCK_MTX_PROMOTED_MSK | LCK_MTX_WAITERS_MSK))) + if ((prev & (LCK_MTX_PROMOTED_MSK | LCK_MTX_WAITERS_MSK))) { return lck_mtx_unlock_wakeup_tail(lock, prev, indirect); + } /* re-enable preemption */ lck_mtx_unlock_finish_inline(lock, FALSE); @@ -2326,11 +2409,11 @@ unlock: return; } -#define LCK_MTX_LCK_WAIT_CODE 0x20 -#define LCK_MTX_LCK_WAKEUP_CODE 0x21 -#define LCK_MTX_LCK_SPIN_CODE 0x22 -#define LCK_MTX_LCK_ACQUIRE_CODE 0x23 -#define LCK_MTX_LCK_DEMOTE_CODE 0x24 +#define LCK_MTX_LCK_WAIT_CODE 0x20 +#define LCK_MTX_LCK_WAKEUP_CODE 0x21 +#define LCK_MTX_LCK_SPIN_CODE 0x22 +#define LCK_MTX_LCK_ACQUIRE_CODE 0x23 +#define LCK_MTX_LCK_DEMOTE_CODE 0x24 /* * Routine: lck_mtx_unlock_wakeup_tail @@ -2356,13 +2439,13 @@ unlock: */ __attribute__((noinline)) static void -lck_mtx_unlock_wakeup_tail ( - lck_mtx_t *mutex, - int prior_lock_state, - boolean_t indirect) +lck_mtx_unlock_wakeup_tail( + lck_mtx_t *mutex, + int prior_lock_state, + boolean_t indirect) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); - lck_mtx_t fake_lck; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); + lck_mtx_t fake_lck; /* * prior_lock state is a snapshot of the 2nd word of the @@ -2373,15 +2456,16 @@ lck_mtx_unlock_wakeup_tail ( fake_lck.lck_mtx_state = prior_lock_state; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAKEUP_CODE) | DBG_FUNC_START, - trace_lck, fake_lck.lck_mtx_promoted, fake_lck.lck_mtx_waiters, fake_lck.lck_mtx_pri, 0); + trace_lck, fake_lck.lck_mtx_promoted, fake_lck.lck_mtx_waiters, fake_lck.lck_mtx_pri, 0); if (__probable(fake_lck.lck_mtx_waiters)) { kern_return_t did_wake; - if (fake_lck.lck_mtx_waiters > 1) + if (fake_lck.lck_mtx_waiters > 1) { did_wake = thread_wakeup_one_with_pri(LCK_MTX_EVENT(mutex), fake_lck.lck_mtx_pri); - else + } else { did_wake = thread_wakeup_one(LCK_MTX_EVENT(mutex)); + } /* * The waiters count always precisely matches the number of threads on the waitqueue. * i.e. we should never see ret == KERN_NOT_WAITING. @@ -2397,14 +2481,15 @@ lck_mtx_unlock_wakeup_tail ( thread_lock(thread); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_DEMOTE_CODE) | DBG_FUNC_NONE, - thread_tid(thread), thread->promotions, thread->sched_flags & TH_SFLAG_PROMOTED, 0, 0); + thread_tid(thread), thread->promotions, thread->sched_flags & TH_SFLAG_PROMOTED, 0, 0); assert(thread->was_promoted_on_wakeup == 0); assert(thread->promotions > 0); assert_promotions_invariant(thread); - if (--thread->promotions == 0) + if (--thread->promotions == 0) { sched_thread_unpromote(thread, trace_lck); + } assert_promotions_invariant(thread); @@ -2413,17 +2498,17 @@ lck_mtx_unlock_wakeup_tail ( } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAKEUP_CODE) | DBG_FUNC_END, - trace_lck, 0, mutex->lck_mtx_waiters, 0, 0); + trace_lck, 0, mutex->lck_mtx_waiters, 0, 0); lck_mtx_unlock_finish_inline(mutex, indirect); } /* - * Routine: lck_mtx_lock_acquire_x86 + * Routine: lck_mtx_lock_acquire_x86 * * Invoked on acquiring the mutex when there is * contention (i.e. the assembly routine sees that - * that mutex->lck_mtx_waiters != 0 or + * that mutex->lck_mtx_waiters != 0 or * thread->was_promoted_on_wakeup != 0)... * * mutex is owned... interlock is held... preemption is disabled @@ -2431,19 +2516,19 @@ lck_mtx_unlock_wakeup_tail ( __attribute__((always_inline)) static void lck_mtx_lock_acquire_inline( - lck_mtx_t *mutex) + lck_mtx_t *mutex) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); - integer_t priority; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); + integer_t priority; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_ACQUIRE_CODE) | DBG_FUNC_START, - trace_lck, thread->was_promoted_on_wakeup, mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0); + trace_lck, thread->was_promoted_on_wakeup, mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0); - if (mutex->lck_mtx_waiters) + if (mutex->lck_mtx_waiters) { priority = mutex->lck_mtx_pri; - else + } else { priority = 0; /* not worth resetting lck_mtx_pri here, it will be reset by next waiter */ - + } /* the priority must have been set correctly by wait */ assert(priority <= MAXPRI_PROMOTE); assert(priority == 0 || priority >= BASEPRI_DEFAULT); @@ -2457,8 +2542,9 @@ lck_mtx_lock_acquire_inline( spl_t s = splsched(); thread_lock(thread); - if (thread->was_promoted_on_wakeup) + if (thread->was_promoted_on_wakeup) { assert(thread->promotions > 0); + } /* Intel only promotes if priority goes up */ if (thread->sched_pri < priority && thread->promotion_priority < priority) { @@ -2481,20 +2567,21 @@ lck_mtx_lock_acquire_inline( if (thread->was_promoted_on_wakeup) { thread->was_promoted_on_wakeup = 0; - if (--thread->promotions == 0) + if (--thread->promotions == 0) { sched_thread_unpromote(thread, trace_lck); + } } thread_unlock(thread); splx(s); } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_ACQUIRE_CODE) | DBG_FUNC_END, - trace_lck, 0, mutex->lck_mtx_waiters, 0, 0); + trace_lck, 0, mutex->lck_mtx_waiters, 0, 0); } void lck_mtx_lock_acquire_x86( - lck_mtx_t *mutex) + lck_mtx_t *mutex) { return lck_mtx_lock_acquire_inline(mutex); } @@ -2508,8 +2595,8 @@ lck_mtx_lock_acquire_x86( __attribute__((noinline)) static void lck_mtx_lock_acquire_tail( - lck_mtx_t *mutex, - boolean_t indirect) + lck_mtx_t *mutex, + boolean_t indirect) { lck_mtx_lock_acquire_inline(mutex); lck_mtx_lock_finish_inline(mutex, ordered_load_mtx_state(mutex), indirect); @@ -2518,7 +2605,7 @@ lck_mtx_lock_acquire_tail( __attribute__((noinline)) static boolean_t lck_mtx_try_lock_acquire_tail( - lck_mtx_t *mutex) + lck_mtx_t *mutex) { lck_mtx_lock_acquire_inline(mutex); lck_mtx_try_lock_finish_inline(mutex, ordered_load_mtx_state(mutex)); @@ -2529,7 +2616,7 @@ lck_mtx_try_lock_acquire_tail( __attribute__((noinline)) static void lck_mtx_convert_spin_acquire_tail( - lck_mtx_t *mutex) + lck_mtx_t *mutex) { lck_mtx_lock_acquire_inline(mutex); lck_mtx_convert_spin_finish_inline(mutex, ordered_load_mtx_state(mutex)); @@ -2553,7 +2640,7 @@ lck_mtx_interlock_lock_set_and_clear_flags( uint32_t state, prev; state = *new_state; - for ( ; ; ) { + for (;;) { /* have to wait for interlock to clear */ while (__improbable(state & (LCK_MTX_ILOCKED_MSK | xor_flags))) { cpu_pause(); @@ -2561,11 +2648,12 @@ lck_mtx_interlock_lock_set_and_clear_flags( } prev = state; /* prev contains snapshot for exchange */ state |= LCK_MTX_ILOCKED_MSK | xor_flags; /* pick up interlock */ - state &= ~and_flags; /* clear flags */ + state &= ~and_flags; /* clear flags */ disable_preemption(); - if (atomic_compare_exchange32(&mutex->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) + if (atomic_compare_exchange32(&mutex->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { break; + } enable_preemption(); cpu_pause(); state = ordered_load_mtx_state(mutex); @@ -2604,12 +2692,12 @@ lck_mtx_interlock_try_lock_set_flags( if (state & (LCK_MTX_ILOCKED_MSK | or_flags)) { return 0; } - prev = state; /* prev contains snapshot for exchange */ - state |= LCK_MTX_ILOCKED_MSK | or_flags; /* pick up interlock */ + prev = state; /* prev contains snapshot for exchange */ + state |= LCK_MTX_ILOCKED_MSK | or_flags; /* pick up interlock */ disable_preemption(); if (atomic_compare_exchange32(&mutex->lck_mtx_state, prev, state, memory_order_acquire_smp, FALSE)) { - *new_state = state; - return 1; + *new_state = state; + return 1; } enable_preemption(); @@ -2629,7 +2717,7 @@ lck_mtx_interlock_try_lock_disable_interrupts( lck_mtx_t *mutex, boolean_t *istate) { - uint32_t state; + uint32_t state; *istate = ml_set_interrupts_enabled(FALSE); state = ordered_load_mtx_state(mutex); @@ -2651,71 +2739,6 @@ lck_mtx_interlock_unlock_enable_interrupts( ml_set_interrupts_enabled(istate); } -static void __inline__ -lck_mtx_inc_stats( - uint64_t* stat) -{ -#if ATOMIC_STAT_UPDATES - os_atomic_inc(stat, relaxed); -#else - *stat = (*stat)++; -#endif -} - -static void __inline__ -lck_mtx_update_miss( - struct _lck_mtx_ext_ *lock, - int *first_miss) -{ -#if LOG_FIRST_MISS_ALONE - if ((*first_miss & 1) == 0) { -#else -#pragma unused(first_miss) -#endif - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_miss; - lck_mtx_inc_stats(stat); - -#if LOG_FIRST_MISS_ALONE - *first_miss |= 1; - } -#endif -} - -static void __inline__ -lck_mtx_update_direct_wait( - struct _lck_mtx_ext_ *lock) -{ - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_direct_wait; - lck_mtx_inc_stats(stat); -} - -static void __inline__ -lck_mtx_update_wait( - struct _lck_mtx_ext_ *lock, - int *first_miss) -{ -#if LOG_FIRST_MISS_ALONE - if ((*first_miss & 2) == 0) { -#else -#pragma unused(first_miss) -#endif - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_wait; - lck_mtx_inc_stats(stat); - -#if LOG_FIRST_MISS_ALONE - *first_miss |= 2; - } -#endif -} - -static void __inline__ -lck_mtx_update_util( - struct _lck_mtx_ext_ *lock) -{ - uint64_t* stat = &lock->lck_mtx_grp->lck_grp_util; - lck_mtx_inc_stats(stat); -} - __attribute__((noinline)) static void lck_mtx_lock_contended( @@ -2730,7 +2753,7 @@ lck_mtx_lock_contended( try_again: if (indirect) { - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, first_miss); + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, first_miss); } ret = lck_mtx_lock_spinwait_x86(lock); @@ -2742,10 +2765,10 @@ try_again: * try to spin. */ if (indirect) { - lck_mtx_update_direct_wait((struct _lck_mtx_ext_*)lock); + lck_grp_mtx_update_direct_wait((struct _lck_mtx_ext_*)lock); } - /* just fall through case LCK_MTX_SPINWAIT_SPUN */ + /* just fall through case LCK_MTX_SPINWAIT_SPUN */ case LCK_MTX_SPINWAIT_SPUN: /* * mutex not acquired but lck_mtx_lock_spinwait_x86 tried to spin @@ -2756,7 +2779,7 @@ try_again: if (state & LCK_MTX_MLOCKED_MSK) { if (indirect) { - lck_mtx_update_wait((struct _lck_mtx_ext_*)lock, first_miss); + lck_grp_mtx_update_wait((struct _lck_mtx_ext_*)lock, first_miss); } lck_mtx_lock_wait_x86(lock); /* @@ -2764,7 +2787,6 @@ try_again: */ goto try_again; } else { - /* grab the mutex */ state |= LCK_MTX_MLOCKED_MSK; ordered_store_mtx_state_release(lock, state); @@ -2834,7 +2856,7 @@ lck_mtx_lock_wait_interlock_to_clear( { uint32_t state; - for ( ; ; ) { + for (;;) { cpu_pause(); state = ordered_load_mtx_state(lock); if (!(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK))) { @@ -2856,7 +2878,7 @@ lck_mtx_try_lock_wait_interlock_to_clear( { uint32_t state; - for ( ; ; ) { + for (;;) { cpu_pause(); state = ordered_load_mtx_state(lock); if (state & (LCK_MTX_MLOCKED_MSK | LCK_MTX_SPIN_MSK)) { @@ -2884,9 +2906,9 @@ void lck_mtx_lock_slow( lck_mtx_t *lock) { - boolean_t indirect = FALSE; - uint32_t state; - int first_miss = 0; + boolean_t indirect = FALSE; + uint32_t state; + int first_miss = 0; state = ordered_load_mtx_state(lock); @@ -2900,7 +2922,7 @@ lck_mtx_lock_slow( /* is the mutex already held and not indirect */ - if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){ + if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))) { /* no, must have been the mutex */ return lck_mtx_lock_contended(lock, indirect, &first_miss); } @@ -2915,12 +2937,12 @@ lck_mtx_lock_slow( indirect = get_indirect_mutex(&lock, &state); first_miss = 0; - lck_mtx_update_util((struct _lck_mtx_ext_*)lock); + lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock); if (state & LCK_MTX_SPIN_MSK) { - /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ + /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ assert(state & LCK_MTX_ILOCKED_MSK); - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); } } @@ -2944,7 +2966,7 @@ lck_mtx_lock_slow( #if MACH_LDEBUG if (thread) { - thread->mutex_count++; /* lock statistic */ + thread->mutex_count++; /* lock statistic */ } #endif /* @@ -2981,7 +3003,7 @@ lck_mtx_try_lock_slow( */ /* is the mutex already held and not indirect */ - if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){ + if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))) { return FALSE; } @@ -2995,12 +3017,13 @@ lck_mtx_try_lock_slow( indirect = get_indirect_mutex(&lock, &state); first_miss = 0; - lck_mtx_update_util((struct _lck_mtx_ext_*)lock); + lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock); } if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) { - if (indirect) - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + if (indirect) { + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + } return FALSE; } } @@ -3008,8 +3031,9 @@ lck_mtx_try_lock_slow( /* no - can't be INDIRECT, DESTROYED or locked */ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_MLOCKED_MSK, &state))) { if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) { - if (indirect) - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + if (indirect) { + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + } return FALSE; } } @@ -3022,7 +3046,7 @@ lck_mtx_try_lock_slow( #if MACH_LDEBUG if (thread) { - thread->mutex_count++; /* lock statistic */ + thread->mutex_count++; /* lock statistic */ } #endif /* @@ -3037,13 +3061,12 @@ lck_mtx_try_lock_slow( lck_mtx_try_lock_finish_inline(lock, ordered_load_mtx_state(lock)); return TRUE; - } __attribute__((noinline)) void lck_mtx_lock_spin_slow( - lck_mtx_t *lock) + lck_mtx_t *lock) { boolean_t indirect = FALSE; uint32_t state; @@ -3061,7 +3084,7 @@ lck_mtx_lock_spin_slow( /* is the mutex already held and not indirect */ - if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){ + if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))) { /* no, must have been the mutex */ return lck_mtx_lock_contended(lock, indirect, &first_miss); } @@ -3076,12 +3099,12 @@ lck_mtx_lock_spin_slow( indirect = get_indirect_mutex(&lock, &state); first_miss = 0; - lck_mtx_update_util((struct _lck_mtx_ext_*)lock); + lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock); if (state & LCK_MTX_SPIN_MSK) { - /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ + /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ assert(state & LCK_MTX_ILOCKED_MSK); - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); } } @@ -3091,7 +3114,7 @@ lck_mtx_lock_spin_slow( } /* no - can't be INDIRECT, DESTROYED or locked */ - while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state) )) { + while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state))) { if (!lck_mtx_lock_wait_interlock_to_clear(lock, &state)) { return lck_mtx_lock_contended(lock, indirect, &first_miss); } @@ -3109,7 +3132,7 @@ lck_mtx_lock_spin_slow( } #endif -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0); #endif /* return with the interlock held and preemption disabled */ @@ -3136,7 +3159,7 @@ lck_mtx_try_lock_spin_slow( */ /* is the mutex already held and not indirect */ - if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))){ + if (__improbable(!(state & LCK_MTX_ILOCKED_MSK))) { return FALSE; } @@ -3150,12 +3173,13 @@ lck_mtx_try_lock_spin_slow( indirect = get_indirect_mutex(&lock, &state); first_miss = 0; - lck_mtx_update_util((struct _lck_mtx_ext_*)lock); + lck_grp_mtx_update_held((struct _lck_mtx_ext_*)lock); } if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) { - if (indirect) - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + if (indirect) { + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + } return FALSE; } } @@ -3163,8 +3187,9 @@ lck_mtx_try_lock_spin_slow( /* no - can't be INDIRECT, DESTROYED or locked */ while (__improbable(!lck_mtx_interlock_try_lock_set_flags(lock, LCK_MTX_SPIN_MSK, &state))) { if (!lck_mtx_try_lock_wait_interlock_to_clear(lock, &state)) { - if (indirect) - lck_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + if (indirect) { + lck_grp_mtx_update_miss((struct _lck_mtx_ext_*)lock, &first_miss); + } return FALSE; } } @@ -3177,7 +3202,7 @@ lck_mtx_try_lock_spin_slow( #if MACH_LDEBUG if (thread) { - thread->mutex_count++; /* lock statistic */ + thread->mutex_count++; /* lock statistic */ } #endif @@ -3185,13 +3210,12 @@ lck_mtx_try_lock_spin_slow( LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, lock, 0); #endif return TRUE; - } __attribute__((noinline)) void lck_mtx_convert_spin( - lck_mtx_t *lock) + lck_mtx_t *lock) { uint32_t state; @@ -3229,7 +3253,7 @@ lck_mtx_convert_spin( static inline boolean_t lck_mtx_lock_grab_mutex( - lck_mtx_t *lock) + lck_mtx_t *lock) { uint32_t state; @@ -3247,7 +3271,7 @@ lck_mtx_lock_grab_mutex( #if MACH_LDEBUG if (thread) { - thread->mutex_count++; /* lock statistic */ + thread->mutex_count++; /* lock statistic */ } #endif return TRUE; @@ -3256,8 +3280,8 @@ lck_mtx_lock_grab_mutex( __attribute__((noinline)) void lck_mtx_assert( - lck_mtx_t *lock, - unsigned int type) + lck_mtx_t *lock, + unsigned int type) { thread_t thread, owner; uint32_t state; @@ -3272,17 +3296,19 @@ lck_mtx_assert( owner = (thread_t)lock->lck_mtx_owner; if (type == LCK_MTX_ASSERT_OWNED) { - if (owner != thread || !(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK))) + if (owner != thread || !(state & (LCK_MTX_ILOCKED_MSK | LCK_MTX_MLOCKED_MSK))) { panic("mutex (%p) not owned\n", lock); + } } else { - assert (type == LCK_MTX_ASSERT_NOTOWNED); - if (owner == thread) + assert(type == LCK_MTX_ASSERT_NOTOWNED); + if (owner == thread) { panic("mutex (%p) owned\n", lock); + } } } /* - * Routine: lck_mtx_lock_spinwait_x86 + * Routine: lck_mtx_lock_spinwait_x86 * * Invoked trying to acquire a mutex when there is contention but * the holder is running on another processor. We spin for up to a maximum @@ -3296,18 +3322,18 @@ lck_mtx_assert( __attribute__((noinline)) lck_mtx_spinwait_ret_type_t lck_mtx_lock_spinwait_x86( - lck_mtx_t *mutex) + lck_mtx_t *mutex) { - __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); - thread_t holder; - uint64_t overall_deadline; - uint64_t check_owner_deadline; - uint64_t cur_time; - lck_mtx_spinwait_ret_type_t retval = LCK_MTX_SPINWAIT_SPUN; - int loopcount = 0; + __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); + thread_t holder; + uint64_t overall_deadline; + uint64_t check_owner_deadline; + uint64_t cur_time; + lck_mtx_spinwait_ret_type_t retval = LCK_MTX_SPINWAIT_SPUN; + int loopcount = 0; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_START, - trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, 0, 0); + trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, 0, 0); cur_time = mach_absolute_time(); overall_deadline = cur_time + MutexSpin; @@ -3328,11 +3354,12 @@ lck_mtx_lock_spinwait_x86( } cur_time = mach_absolute_time(); - if (cur_time >= overall_deadline) + if (cur_time >= overall_deadline) { break; + } if (cur_time >= check_owner_deadline && mutex->lck_mtx_owner) { - boolean_t istate; + boolean_t istate; /* * We will repeatedly peek at the state of the lock while spinning, @@ -3345,18 +3372,16 @@ lck_mtx_lock_spinwait_x86( * This is safe because it is a "try_lock", if we can't acquire * the interlock we re-enable the interrupts and fail, so it is * ok to call it even if the interlock was already held. - */ + */ if (lck_mtx_interlock_try_lock_disable_interrupts(mutex, &istate)) { - if ((holder = (thread_t) mutex->lck_mtx_owner) != NULL) { - - if ( !(holder->machine.specFlags & OnProc) || - (holder->state & TH_IDLE)) { - + if (!(holder->machine.specFlags & OnProc) || + (holder->state & TH_IDLE)) { lck_mtx_interlock_unlock_enable_interrupts(mutex, istate); - if (loopcount == 0) + if (loopcount == 0) { retval = LCK_MTX_SPINWAIT_NO_SPIN; + } break; } } @@ -3368,32 +3393,31 @@ lck_mtx_lock_spinwait_x86( cpu_pause(); loopcount++; - } while (TRUE); -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * We've already kept a count via overall_deadline of how long we spun. * If dtrace is active, then we compute backwards to decide how * long we spun. * * Note that we record a different probe id depending on whether - * this is a direct or indirect mutex. This allows us to + * this is a direct or indirect mutex. This allows us to * penalize only lock groups that have debug/stats enabled * with dtrace processing if desired. */ if (__probable(mutex->lck_mtx_is_ext == 0)) { LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN, mutex, - mach_absolute_time() - (overall_deadline - MutexSpin)); + mach_absolute_time() - (overall_deadline - MutexSpin)); } else { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_SPIN, mutex, - mach_absolute_time() - (overall_deadline - MutexSpin)); + mach_absolute_time() - (overall_deadline - MutexSpin)); } /* The lockstat acquire event is recorded by the assembly code beneath us. */ #endif KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_SPIN_CODE) | DBG_FUNC_END, - trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, retval, 0); + trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), mutex->lck_mtx_waiters, retval, 0); return retval; } @@ -3401,7 +3425,7 @@ lck_mtx_lock_spinwait_x86( /* - * Routine: lck_mtx_lock_wait_x86 + * Routine: lck_mtx_lock_wait_x86 * * Invoked in order to wait on contention. * @@ -3428,10 +3452,10 @@ lck_mtx_lock_spinwait_x86( */ __attribute__((noinline)) void -lck_mtx_lock_wait_x86 ( - lck_mtx_t *mutex) +lck_mtx_lock_wait_x86( + lck_mtx_t *mutex) { -#if CONFIG_DTRACE +#if CONFIG_DTRACE uint64_t sleep_start = 0; if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) { @@ -3446,8 +3470,8 @@ lck_mtx_lock_wait_x86 ( __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(mutex); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, - trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), - mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0); + trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), + mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0); integer_t waiter_pri = self->sched_pri; waiter_pri = MAX(waiter_pri, self->base_pri); @@ -3457,8 +3481,9 @@ lck_mtx_lock_wait_x86 ( assert(mutex->lck_mtx_pri <= MAXPRI_PROMOTE); /* Re-initialize lck_mtx_pri if this is the first contention */ - if (mutex->lck_mtx_waiters == 0 || mutex->lck_mtx_pri <= waiter_pri) + if (mutex->lck_mtx_waiters == 0 || mutex->lck_mtx_pri <= waiter_pri) { mutex->lck_mtx_pri = waiter_pri; + } thread_t holder = (thread_t)mutex->lck_mtx_owner; @@ -3490,7 +3515,7 @@ lck_mtx_lock_wait_x86 ( * check if it needs to raise to match this one */ sched_thread_update_promotion_to_pri(holder, promote_pri, - trace_lck); + trace_lck); } } else { /* @@ -3517,10 +3542,10 @@ lck_mtx_lock_wait_x86 ( self->waiting_for_mutex = NULL; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, - trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), - mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0); + trace_lck, VM_KERNEL_UNSLIDE_OR_PERM(mutex->lck_mtx_owner), + mutex->lck_mtx_waiters, mutex->lck_mtx_pri, 0); -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * Record the Dtrace lockstat probe for blocking, block time * measured from when we were entered. @@ -3543,7 +3568,7 @@ lck_mtx_lock_wait_x86 ( * Returns: TRUE if lock is acquired. */ boolean_t -kdp_lck_mtx_lock_spin_is_acquired(lck_mtx_t *lck) +kdp_lck_mtx_lock_spin_is_acquired(lck_mtx_t *lck) { if (not_in_kdp) { panic("panic: kdp_lck_mtx_lock_spin_is_acquired called outside of kernel debugger"); @@ -3569,17 +3594,17 @@ void kdp_rwlck_find_owner(__unused struct waitq * waitq, event64_t event, thread_waitinfo_t * waitinfo) { lck_rw_t *rwlck = NULL; - switch(waitinfo->wait_type) { - case kThreadWaitKernelRWLockRead: - rwlck = READ_EVENT_TO_RWLOCK(event); - break; - case kThreadWaitKernelRWLockWrite: - case kThreadWaitKernelRWLockUpgrade: - rwlck = WRITE_EVENT_TO_RWLOCK(event); - break; - default: - panic("%s was called with an invalid blocking type", __FUNCTION__); - break; + switch (waitinfo->wait_type) { + case kThreadWaitKernelRWLockRead: + rwlck = READ_EVENT_TO_RWLOCK(event); + break; + case kThreadWaitKernelRWLockWrite: + case kThreadWaitKernelRWLockUpgrade: + rwlck = WRITE_EVENT_TO_RWLOCK(event); + break; + default: + panic("%s was called with an invalid blocking type", __FUNCTION__); + break; } waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(rwlck); waitinfo->owner = 0; diff --git a/osfmk/i386/locks_i386_inlines.h b/osfmk/i386/locks_i386_inlines.h index 7e4aa5995..a7e188072 100644 --- a/osfmk/i386/locks_i386_inlines.h +++ b/osfmk/i386/locks_i386_inlines.h @@ -2,7 +2,7 @@ * Copyright (c) 201 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,31 +30,23 @@ #define _I386_LOCKS_I386_INLINES_H_ #include -/* - * We need only enough declarations from the BSD-side to be able to - * test if our probe is active, and to call __dtrace_probe(). Setting - * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in. - */ -#if CONFIG_DTRACE -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> -#endif +#include // Enforce program order of loads and stores. #define ordered_load(target) _Generic( (target),\ - uint32_t* : __c11_atomic_load((_Atomic uint32_t* )(target), memory_order_relaxed), \ - uintptr_t*: __c11_atomic_load((_Atomic uintptr_t*)(target), memory_order_relaxed) ) + uint32_t* : __c11_atomic_load((_Atomic uint32_t* )(target), memory_order_relaxed), \ + uintptr_t*: __c11_atomic_load((_Atomic uintptr_t*)(target), memory_order_relaxed) ) #define ordered_store_release(target, value) _Generic( (target),\ - uint32_t* : __c11_atomic_store((_Atomic uint32_t* )(target), (value), memory_order_release_smp), \ - uintptr_t*: __c11_atomic_store((_Atomic uintptr_t*)(target), (value), memory_order_release_smp) ) + uint32_t* : __c11_atomic_store((_Atomic uint32_t* )(target), (value), memory_order_release_smp), \ + uintptr_t*: __c11_atomic_store((_Atomic uintptr_t*)(target), (value), memory_order_release_smp) ) #define ordered_store_volatile(target, value) _Generic( (target),\ - volatile uint32_t* : __c11_atomic_store((_Atomic volatile uint32_t* )(target), (value), memory_order_relaxed), \ - volatile uintptr_t*: __c11_atomic_store((_Atomic volatile uintptr_t*)(target), (value), memory_order_relaxed) ) + volatile uint32_t* : __c11_atomic_store((_Atomic volatile uint32_t* )(target), (value), memory_order_relaxed), \ + volatile uintptr_t*: __c11_atomic_store((_Atomic volatile uintptr_t*)(target), (value), memory_order_relaxed) ) /* Enforce program order of loads and stores. */ -#define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state) -#define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value)) -#define ordered_store_mtx_owner(lock, value) ordered_store_volatile(&(lock)->lck_mtx_owner, (value)) +#define ordered_load_mtx_state(lock) ordered_load(&(lock)->lck_mtx_state) +#define ordered_store_mtx_state_release(lock, value) ordered_store_release(&(lock)->lck_mtx_state, (value)) +#define ordered_store_mtx_owner(lock, value) ordered_store_volatile(&(lock)->lck_mtx_owner, (value)) #if DEVELOPMENT | DEBUG void lck_mtx_owner_check_panic(lck_mtx_t *mutex); @@ -64,7 +56,7 @@ __attribute__((always_inline)) static inline void lck_mtx_ilk_unlock_inline( lck_mtx_t *mutex, - uint32_t state) + uint32_t state) { state &= ~LCK_MTX_ILOCKED_MSK; ordered_store_mtx_state_release(mutex, state); @@ -76,15 +68,15 @@ __attribute__((always_inline)) static inline void lck_mtx_lock_finish_inline( lck_mtx_t *mutex, - uint32_t state, - boolean_t indirect) + uint32_t state, + boolean_t indirect) { assert(state & LCK_MTX_ILOCKED_MSK); /* release the interlock and re-enable preemption */ lck_mtx_ilk_unlock_inline(mutex, state); -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (indirect) { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, mutex, 0); } else { @@ -97,12 +89,12 @@ __attribute__((always_inline)) static inline void lck_mtx_try_lock_finish_inline( lck_mtx_t *mutex, - uint32_t state) + uint32_t state) { /* release the interlock and re-enable preemption */ lck_mtx_ilk_unlock_inline(mutex, state); -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, mutex, 0); #endif } @@ -111,7 +103,7 @@ __attribute__((always_inline)) static inline void lck_mtx_convert_spin_finish_inline( lck_mtx_t *mutex, - uint32_t state) + uint32_t state) { /* release the interlock and acquire it as mutex */ state &= ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_SPIN_MSK); @@ -129,14 +121,13 @@ lck_mtx_unlock_finish_inline( { enable_preemption(); -#if CONFIG_DTRACE +#if CONFIG_DTRACE if (indirect) { LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, mutex, 0); } else { LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, mutex, 0); } -#endif // CONFIG_DTRACE +#endif // CONFIG_DTRACE } #endif /* _I386_LOCKS_I386_INLINES_H_ */ - diff --git a/osfmk/i386/locks_i386_opt.c b/osfmk/i386/locks_i386_opt.c index 90dcf06a1..fb0562fe8 100644 --- a/osfmk/i386/locks_i386_opt.c +++ b/osfmk/i386/locks_i386_opt.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -83,12 +83,15 @@ void __inline__ lck_mtx_check_preemption(void) { - if (get_preemption_level() == 0) + if (get_preemption_level() == 0) { return; - if (LckDisablePreemptCheck) + } + if (LckDisablePreemptCheck) { return; - if (current_cpu_datap()->cpu_hibernate) + } + if (current_cpu_datap()->cpu_hibernate) { return; + } panic("preemption_level(%d) != 0\n", get_preemption_level()); } @@ -172,7 +175,7 @@ lck_mtx_lock( __attribute__((noinline)) boolean_t lck_mtx_try_lock( - lck_mtx_t *lock) + lck_mtx_t *lock) { uint32_t prev, state; @@ -233,7 +236,7 @@ lck_mtx_try_lock( __attribute__((noinline)) void lck_mtx_lock_spin_always( - lck_mtx_t *lock) + lck_mtx_t *lock) { uint32_t prev, state; @@ -269,7 +272,7 @@ lck_mtx_lock_spin_always( } #endif -#if CONFIG_DTRACE +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, lock, 0); #endif /* return with the interlock held and preemption disabled */ @@ -296,7 +299,7 @@ lck_mtx_lock_spin_always( */ void lck_mtx_lock_spin( - lck_mtx_t *lock) + lck_mtx_t *lock) { lck_mtx_check_preemption(); lck_mtx_lock_spin_always(lock); @@ -387,7 +390,7 @@ lck_mtx_try_lock_spin( } /* - * Routine: lck_mtx_unlock + * Routine: lck_mtx_unlock * * Unlocks a mutex held by current thread. * It tries the fast path first, and falls @@ -400,14 +403,15 @@ lck_mtx_try_lock_spin( __attribute__((noinline)) void lck_mtx_unlock( - lck_mtx_t *lock) + lck_mtx_t *lock) { uint32_t prev, state; state = ordered_load_mtx_state(lock); - if (state & LCK_MTX_SPIN_MSK) + if (state & LCK_MTX_SPIN_MSK) { return lck_mtx_unlock_slow(lock); + } /* * Only full mutex will go through the fast path @@ -418,10 +422,10 @@ lck_mtx_unlock( * If it is indirect it will fall through the slow path. */ - /* - * Fast path state: - * interlock not held, no waiters, no promotion and mutex held. - */ + /* + * Fast path state: + * interlock not held, no waiters, no promotion and mutex held. + */ prev = state & ~(LCK_MTX_ILOCKED_MSK | LCK_MTX_WAITERS_MSK | LCK_MTX_PROMOTED_MSK); prev |= LCK_MTX_MLOCKED_MSK; @@ -440,8 +444,9 @@ lck_mtx_unlock( #if DEVELOPMENT | DEBUG thread_t owner = (thread_t)lock->lck_mtx_owner; - if(__improbable(owner != current_thread())) + if (__improbable(owner != current_thread())) { return lck_mtx_owner_check_panic(lock); + } #endif /* clear owner */ @@ -452,11 +457,11 @@ lck_mtx_unlock( #if MACH_LDEBUG thread_t thread = current_thread(); - if (thread) + if (thread) { thread->mutex_count--; + } #endif /* MACH_LDEBUG */ /* re-enable preemption */ lck_mtx_unlock_finish_inline(lock, FALSE); } - diff --git a/osfmk/i386/machdep_call.c b/osfmk/i386/machdep_call.c index 04bd5fb0d..cde942292 100644 --- a/osfmk/i386/machdep_call.c +++ b/osfmk/i386/machdep_call.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -35,37 +35,36 @@ * 17 June 1992 ? at NeXT * Created. */ - + #include #include -extern kern_return_t kern_invalid(void); +extern kern_return_t kern_invalid(void); -const machdep_call_t machdep_call_table[] = { - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE(thread_fast_set_cthread_self,1), - MACHDEP_CALL_ROUTINE(thread_set_user_ldt,3), - MACHDEP_BSD_CALL_ROUTINE(i386_set_ldt,3), - MACHDEP_BSD_CALL_ROUTINE(i386_get_ldt,3), +const machdep_call_t machdep_call_table[] = { + MACHDEP_CALL_ROUTINE(kern_invalid, 0), + MACHDEP_CALL_ROUTINE(kern_invalid, 0), + MACHDEP_CALL_ROUTINE(kern_invalid, 0), + MACHDEP_CALL_ROUTINE(thread_fast_set_cthread_self, 1), + MACHDEP_CALL_ROUTINE(thread_set_user_ldt, 3), + MACHDEP_BSD_CALL_ROUTINE(i386_set_ldt, 3), + MACHDEP_BSD_CALL_ROUTINE(i386_get_ldt, 3), }; -const machdep_call_t machdep_call_table64[] = { +const machdep_call_t machdep_call_table64[] = { #if HYPERVISOR - MACHDEP_CALL_ROUTINE64(hv_task_trap,2), - MACHDEP_CALL_ROUTINE64(hv_thread_trap,2), + MACHDEP_CALL_ROUTINE64(hv_task_trap, 2), + MACHDEP_CALL_ROUTINE64(hv_thread_trap, 2), #else - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE(kern_invalid,0), + MACHDEP_CALL_ROUTINE(kern_invalid, 0), + MACHDEP_CALL_ROUTINE(kern_invalid, 0), #endif - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE64(thread_fast_set_cthread_self64,1), - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE(kern_invalid,0), - MACHDEP_CALL_ROUTINE(kern_invalid,0), + MACHDEP_CALL_ROUTINE(kern_invalid, 0), + MACHDEP_CALL_ROUTINE64(thread_fast_set_cthread_self64, 1), + MACHDEP_CALL_ROUTINE(kern_invalid, 0), + MACHDEP_BSD_CALL_ROUTINE64(i386_set_ldt64, 3), + MACHDEP_BSD_CALL_ROUTINE64(i386_get_ldt64, 3) }; -int machdep_call_count = - (sizeof (machdep_call_table) / sizeof (machdep_call_t)); - +int machdep_call_count = + (sizeof(machdep_call_table) / sizeof(machdep_call_t)); diff --git a/osfmk/i386/machdep_call.h b/osfmk/i386/machdep_call.h index 29a25f4d7..514f4a301 100644 --- a/osfmk/i386/machdep_call.h +++ b/osfmk/i386/machdep_call.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,49 +37,57 @@ */ typedef union { - kern_return_t (*args_0)(void); - kern_return_t (*args_1)(uint32_t); - kern_return_t (*args64_1)(uint64_t); - kern_return_t (*args_2)(uint32_t,uint32_t); - kern_return_t (*args64_2)(uint64_t,uint64_t); - kern_return_t (*args_3)(uint32_t,uint32_t,uint32_t); - kern_return_t (*args_4)(uint32_t,uint32_t,uint32_t,uint32_t); - kern_return_t (*args_var)(uint32_t,...); - int (*args_bsd_3)(uint32_t *, uint32_t, - uint32_t, uint32_t); + kern_return_t (*args_0)(void); + kern_return_t (*args_1)(uint32_t); + kern_return_t (*args64_1)(uint64_t); + kern_return_t (*args_2)(uint32_t, uint32_t); + kern_return_t (*args64_2)(uint64_t, uint64_t); + kern_return_t (*args_3)(uint32_t, uint32_t, uint32_t); + kern_return_t (*args64_3)(uint64_t, uint64_t, uint64_t); + kern_return_t (*args_4)(uint32_t, uint32_t, uint32_t, uint32_t); + kern_return_t (*args_var)(uint32_t, ...); + int (*args_bsd_3)(uint32_t *, uint32_t, + uint32_t, uint32_t); + int (*args64_bsd_3)(uint32_t *, uint64_t, + uint64_t, uint64_t); } machdep_call_routine_t; -#define MACHDEP_CALL_ROUTINE(func, args) \ +#define MACHDEP_CALL_ROUTINE(func, args) \ { { .args_ ## args = func }, args, 0 } - -#define MACHDEP_CALL_ROUTINE64(func, args) \ + +#define MACHDEP_CALL_ROUTINE64(func, args) \ { { .args64_ ## args = func }, args, 0 } -#define MACHDEP_BSD_CALL_ROUTINE(func, args) \ +#define MACHDEP_BSD_CALL_ROUTINE(func, args) \ { { .args_bsd_ ## args = func }, args, 1 } +#define MACHDEP_BSD_CALL_ROUTINE64(func, args) \ + { { .args64_bsd_ ## args = func }, args, 1 } + typedef struct { - machdep_call_routine_t routine; - int nargs; - int bsd_style; + machdep_call_routine_t routine; + int nargs; + int bsd_style; } machdep_call_t; -extern const machdep_call_t machdep_call_table[]; -extern const machdep_call_t machdep_call_table64[]; +extern const machdep_call_t machdep_call_table[]; +extern const machdep_call_t machdep_call_table64[]; -extern int machdep_call_count; +extern int machdep_call_count; #if HYPERVISOR -extern kern_return_t hv_task_trap(uint64_t,uint64_t); -extern kern_return_t hv_thread_trap(uint64_t,uint64_t); +extern kern_return_t hv_task_trap(uint64_t, uint64_t); +extern kern_return_t hv_thread_trap(uint64_t, uint64_t); #endif -extern kern_return_t thread_fast_set_cthread_self(uint32_t); -extern kern_return_t thread_fast_set_cthread_self64(uint64_t); -extern kern_return_t thread_set_user_ldt(uint32_t,uint32_t,uint32_t); +extern kern_return_t thread_fast_set_cthread_self(uint32_t); +extern kern_return_t thread_fast_set_cthread_self64(uint64_t); +extern kern_return_t thread_set_user_ldt(uint32_t, uint32_t, uint32_t); -extern int i386_set_ldt(uint32_t *,uint32_t,uint32_t,uint32_t); -extern int i386_get_ldt(uint32_t *,uint32_t,uint32_t,uint32_t); +extern int i386_set_ldt(uint32_t *, uint32_t, uint32_t, uint32_t); +extern int i386_get_ldt(uint32_t *, uint32_t, uint32_t, uint32_t); +extern int i386_set_ldt64(uint32_t *, uint64_t, uint64_t, uint64_t); +extern int i386_get_ldt64(uint32_t *, uint64_t, uint64_t, uint64_t); -extern void machdep_syscall(x86_saved_state_t *); -extern void machdep_syscall64(x86_saved_state_t *); +extern void machdep_syscall(x86_saved_state_t *); +extern void machdep_syscall64(x86_saved_state_t *); diff --git a/osfmk/i386/machine_check.c b/osfmk/i386/machine_check.c index ce8344659..fd565d785 100644 --- a/osfmk/i386/machine_check.c +++ b/osfmk/i386/machine_check.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,31 +48,31 @@ * part is the affair. */ -#define IF(bool,str) ((bool) ? (str) : "") +#define IF(bool, str) ((bool) ? (str) : "") -static boolean_t mca_initialized = FALSE; -static boolean_t mca_MCE_present = FALSE; -static boolean_t mca_MCA_present = FALSE; -static uint32_t mca_family = 0; -static unsigned int mca_error_bank_count = 0; -static boolean_t mca_control_MSR_present = FALSE; -static boolean_t mca_cmci_present = FALSE; -static ia32_mcg_cap_t ia32_mcg_cap; +static boolean_t mca_initialized = FALSE; +static boolean_t mca_MCE_present = FALSE; +static boolean_t mca_MCA_present = FALSE; +static uint32_t mca_family = 0; +static unsigned int mca_error_bank_count = 0; +static boolean_t mca_control_MSR_present = FALSE; +static boolean_t mca_cmci_present = FALSE; +static ia32_mcg_cap_t ia32_mcg_cap; decl_simple_lock_data(static, mca_lock); typedef struct { - ia32_mci_ctl_t mca_mci_ctl; - ia32_mci_status_t mca_mci_status; - ia32_mci_misc_t mca_mci_misc; - ia32_mci_addr_t mca_mci_addr; + ia32_mci_ctl_t mca_mci_ctl; + ia32_mci_status_t mca_mci_status; + ia32_mci_misc_t mca_mci_misc; + ia32_mci_addr_t mca_mci_addr; } mca_mci_bank_t; typedef struct mca_state { - boolean_t mca_is_saved; - boolean_t mca_is_valid; /* some state is valid */ - ia32_mcg_ctl_t mca_mcg_ctl; - ia32_mcg_status_t mca_mcg_status; - mca_mci_bank_t mca_error_bank[0]; + boolean_t mca_is_saved; + boolean_t mca_is_valid; /* some state is valid */ + ia32_mcg_ctl_t mca_mcg_ctl; + ia32_mcg_status_t mca_mcg_status; + mca_mci_bank_t mca_error_bank[0]; } mca_state_t; typedef enum { @@ -85,15 +85,16 @@ static volatile mca_dump_state_t mca_dump_state = CLEAR; static void mca_get_availability(void) { - uint64_t features = cpuid_info()->cpuid_features; - uint32_t family = cpuid_info()->cpuid_family; - uint32_t model = cpuid_info()->cpuid_model; - uint32_t stepping = cpuid_info()->cpuid_stepping; + uint64_t features = cpuid_info()->cpuid_features; + uint32_t family = cpuid_info()->cpuid_family; + uint32_t model = cpuid_info()->cpuid_model; + uint32_t stepping = cpuid_info()->cpuid_stepping; - if ((model == CPUID_MODEL_HASWELL && stepping < 3) || + if ((model == CPUID_MODEL_HASWELL && stepping < 3) || (model == CPUID_MODEL_HASWELL_ULT && stepping < 1) || - (model == CPUID_MODEL_CRYSTALWELL && stepping < 1)) + (model == CPUID_MODEL_CRYSTALWELL && stepping < 1)) { panic("Haswell pre-C0 steppings are not supported"); + } mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0; mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0; @@ -113,7 +114,7 @@ mca_get_availability(void) void mca_cpu_init(void) { - unsigned int i; + unsigned int i; /* * The first (boot) processor is responsible for discovering the @@ -126,57 +127,62 @@ mca_cpu_init(void) } if (mca_MCA_present) { - /* Enable all MCA features */ - if (mca_control_MSR_present) + if (mca_control_MSR_present) { wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE); - + } + switch (mca_family) { case 0x06: /* Enable all but mc0 */ - for (i = 1; i < mca_error_bank_count; i++) - wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL); - + for (i = 1; i < mca_error_bank_count; i++) { + wrmsr64(IA32_MCi_CTL(i), 0xFFFFFFFFFFFFFFFFULL); + } + /* Clear all errors */ - for (i = 0; i < mca_error_bank_count; i++) + for (i = 0; i < mca_error_bank_count; i++) { wrmsr64(IA32_MCi_STATUS(i), 0ULL); + } break; case 0x0F: /* Enable all banks */ - for (i = 0; i < mca_error_bank_count; i++) - wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL); - + for (i = 0; i < mca_error_bank_count; i++) { + wrmsr64(IA32_MCi_CTL(i), 0xFFFFFFFFFFFFFFFFULL); + } + /* Clear all errors */ - for (i = 0; i < mca_error_bank_count; i++) + for (i = 0; i < mca_error_bank_count; i++) { wrmsr64(IA32_MCi_STATUS(i), 0ULL); + } break; } } /* Enable machine check exception handling if available */ if (mca_MCE_present) { - set_cr4(get_cr4()|CR4_MCE); + set_cr4(get_cr4() | CR4_MCE); } } boolean_t mca_is_cmci_present(void) { - if (!mca_initialized) + if (!mca_initialized) { mca_cpu_init(); + } return mca_cmci_present; } void -mca_cpu_alloc(cpu_data_t *cdp) +mca_cpu_alloc(cpu_data_t *cdp) { - vm_size_t mca_state_size; + vm_size_t mca_state_size; /* * Allocate space for an array of error banks. */ mca_state_size = sizeof(mca_state_t) + - sizeof(mca_mci_bank_t) * mca_error_bank_count; + sizeof(mca_mci_bank_t) * mca_error_bank_count; cdp->cpu_mca_state = kalloc(mca_state_size); if (cdp->cpu_mca_state == NULL) { printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number); @@ -188,44 +194,48 @@ mca_cpu_alloc(cpu_data_t *cdp) * If the boot processor is yet have its allocation made, * do this now. */ - if (cpu_datap(master_cpu)->cpu_mca_state == NULL) + if (cpu_datap(master_cpu)->cpu_mca_state == NULL) { mca_cpu_alloc(cpu_datap(master_cpu)); + } } static void mca_save_state(mca_state_t *mca_state) { mca_mci_bank_t *bank; - unsigned int i; + unsigned int i; assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0); - if (mca_state == NULL) + if (mca_state == NULL) { return; + } mca_state->mca_mcg_ctl = mca_control_MSR_present ? - rdmsr64(IA32_MCG_CTL) : 0ULL; + rdmsr64(IA32_MCG_CTL) : 0ULL; mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS); - bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0]; + bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0]; for (i = 0; i < mca_error_bank_count; i++, bank++) { - bank->mca_mci_ctl = rdmsr64(IA32_MCi_CTL(i)); - bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i)); - if (!bank->mca_mci_status.bits.val) + bank->mca_mci_ctl = rdmsr64(IA32_MCi_CTL(i)); + bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i)); + if (!bank->mca_mci_status.bits.val) { continue; + } bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)? - rdmsr64(IA32_MCi_MISC(i)) : 0ULL; + rdmsr64(IA32_MCi_MISC(i)) : 0ULL; bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)? - rdmsr64(IA32_MCi_ADDR(i)) : 0ULL; + rdmsr64(IA32_MCi_ADDR(i)) : 0ULL; mca_state->mca_is_valid = TRUE; - } + } /* * If we're the first thread with MCA state, point our package to it * and don't care about races */ - if (x86_package()->mca_state == NULL) + if (x86_package()->mca_state == NULL) { x86_package()->mca_state = mca_state; + } mca_state->mca_is_saved = TRUE; } @@ -233,8 +243,9 @@ mca_save_state(mca_state_t *mca_state) void mca_check_save(void) { - if (mca_dump_state > CLEAR) + if (mca_dump_state > CLEAR) { mca_save_state(current_cpu_datap()->cpu_mca_state); + } } static void @@ -243,49 +254,52 @@ mca_report_cpu_info(void) i386_cpu_info_t *infop = cpuid_info(); paniclog_append_noflush(" family: %d model: %d stepping: %d microcode: %d\n", - infop->cpuid_family, - infop->cpuid_model, - infop->cpuid_stepping, - infop->cpuid_microcode_version); + infop->cpuid_family, + infop->cpuid_model, + infop->cpuid_stepping, + infop->cpuid_microcode_version); paniclog_append_noflush(" signature: 0x%x\n", - infop->cpuid_signature); + infop->cpuid_signature); paniclog_append_noflush(" %s\n", - infop->cpuid_brand_string); - + infop->cpuid_brand_string); } static void mca_dump_bank(mca_state_t *state, int i) { - mca_mci_bank_t *bank; - ia32_mci_status_t status; + mca_mci_bank_t *bank; + ia32_mci_status_t status; bank = &state->mca_error_bank[i]; status = bank->mca_mci_status; - if (!status.bits.val) + if (!status.bits.val) { return; + } paniclog_append_noflush(" IA32_MC%d_STATUS(0x%x): 0x%016qx\n", - i, IA32_MCi_STATUS(i), status.u64); + i, IA32_MCi_STATUS(i), status.u64); - if (status.bits.addrv) + if (status.bits.addrv) { paniclog_append_noflush(" IA32_MC%d_ADDR(0x%x): 0x%016qx\n", - i, IA32_MCi_ADDR(i), bank->mca_mci_addr); + i, IA32_MCi_ADDR(i), bank->mca_mci_addr); + } - if (status.bits.miscv) + if (status.bits.miscv) { paniclog_append_noflush(" IA32_MC%d_MISC(0x%x): 0x%016qx\n", - i, IA32_MCi_MISC(i), bank->mca_mci_misc); + i, IA32_MCi_MISC(i), bank->mca_mci_misc); + } } static void mca_cpu_dump_error_banks(mca_state_t *state) { - unsigned int i; + unsigned int i; - if (!state->mca_is_valid) + if (!state->mca_is_valid) { return; + } - for (i = 0; i < mca_error_bank_count; i++ ) { + for (i = 0; i < mca_error_bank_count; i++) { mca_dump_bank(state, i); } } @@ -293,9 +307,9 @@ mca_cpu_dump_error_banks(mca_state_t *state) void mca_dump(void) { - mca_state_t *mca_state = current_cpu_datap()->cpu_mca_state; - uint64_t deadline; - unsigned int i = 0; + mca_state_t *mca_state = current_cpu_datap()->cpu_mca_state; + uint64_t deadline; + unsigned int i = 0; /* * Capture local MCA registers to per-cpu data. @@ -306,11 +320,12 @@ mca_dump(void) * Serialize: the first caller controls dumping MCA registers, * other threads spin meantime. */ - simple_lock(&mca_lock); + simple_lock(&mca_lock, LCK_GRP_NULL); if (mca_dump_state > CLEAR) { simple_unlock(&mca_lock); - while (mca_dump_state == DUMPING) + while (mca_dump_state == DUMPING) { cpu_pause(); + } return; } mca_dump_state = DUMPING; @@ -337,13 +352,13 @@ mca_dump(void) mca_report_cpu_info(); paniclog_append_noflush(" %d error-reporting banks\n", mca_error_bank_count); - + /* * Dump all processor state: */ for (i = 0; i < real_ncpus; i++) { - mca_state_t *mcsp = cpu_datap(i)->cpu_mca_state; - ia32_mcg_status_t status; + mca_state_t *mcsp = cpu_datap(i)->cpu_mca_state; + ia32_mcg_status_t status; if (mcsp == NULL || mcsp->mca_is_saved == FALSE || @@ -353,7 +368,7 @@ mca_dump(void) } status = mcsp->mca_mcg_status; paniclog_append_noflush("Processor %d: IA32_MCG_STATUS: 0x%016qx\n", - i, status.u64); + i, status.u64); mca_cpu_dump_error_banks(mcsp); } @@ -365,7 +380,8 @@ mca_dump(void) #if DEVELOPMENT || DEBUG extern void mca_exception_panic(void); extern void lapic_trigger_MC(void); -void mca_exception_panic(void) +void +mca_exception_panic(void) { lapic_trigger_MC(); } diff --git a/osfmk/i386/machine_check.h b/osfmk/i386/machine_check.h index 138122596..6a1a5af9f 100644 --- a/osfmk/i386/machine_check.h +++ b/osfmk/i386/machine_check.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL_PRIVATE @@ -41,126 +41,126 @@ * Macro BITS(n,m) returns the number of bits between bit(n) and bit(m), * where (n>m). Macro BIT1(n) is cosmetic and returns 1. */ -#define BITS(n,m) ((n)-(m)+1) -#define BIT1(n) (1) +#define BITS(n, m) ((n)-(m)+1) +#define BIT1(n) (1) /* * IA32 SDM 14.3.1 Machine-Check Global Control MSRs: */ -#define IA32_MCG_CAP (0x179) +#define IA32_MCG_CAP (0x179) typedef union { - struct { - uint64_t count :BITS(7,0); - uint64_t mcg_ctl_p :BIT1(8); - uint64_t mcg_ext_p :BIT1(9); - uint64_t mcg_ext_corr_err_p :BIT1(10); - uint64_t mcg_tes_p :BIT1(11); - uint64_t mcg_ecms :BIT1(12); - uint64_t mcg_reserved2 :BITS(15,13); - uint64_t mcg_ext_cnt :BITS(23,16); - uint64_t mcg_ser_p :BIT1(24); - } bits; - uint64_t u64; + struct { + uint64_t count :BITS(7, 0); + uint64_t mcg_ctl_p :BIT1(8); + uint64_t mcg_ext_p :BIT1(9); + uint64_t mcg_ext_corr_err_p :BIT1(10); + uint64_t mcg_tes_p :BIT1(11); + uint64_t mcg_ecms :BIT1(12); + uint64_t mcg_reserved2 :BITS(15, 13); + uint64_t mcg_ext_cnt :BITS(23, 16); + uint64_t mcg_ser_p :BIT1(24); + } bits; + uint64_t u64; } ia32_mcg_cap_t; -#define IA32_MCG_STATUS (0x17A) +#define IA32_MCG_STATUS (0x17A) typedef union { - struct { - uint64_t ripv :BIT1(0); - uint64_t eipv :BIT1(1); - uint64_t mcip :BIT1(2); - } bits; - uint64_t u64; + struct { + uint64_t ripv :BIT1(0); + uint64_t eipv :BIT1(1); + uint64_t mcip :BIT1(2); + } bits; + uint64_t u64; } ia32_mcg_status_t; -#define IA32_MCG_CTL (0x17B) -typedef uint64_t ia32_mcg_ctl_t; -#define IA32_MCG_CTL_ENABLE (0xFFFFFFFFFFFFFFFFULL) -#define IA32_MCG_CTL_DISABLE (0x0ULL) +#define IA32_MCG_CTL (0x17B) +typedef uint64_t ia32_mcg_ctl_t; +#define IA32_MCG_CTL_ENABLE (0xFFFFFFFFFFFFFFFFULL) +#define IA32_MCG_CTL_DISABLE (0x0ULL) /* * IA32 SDM 14.3.2 Error-Reporting Register Banks: */ -#define IA32_MCi_CTL(i) (0x400 + 4*(i)) -#define IA32_MCi_STATUS(i) (0x401 + 4*(i)) -#define IA32_MCi_ADDR(i) (0x402 + 4*(i)) -#define IA32_MCi_MISC(i) (0x403 + 4*(i)) - -#define IA32_MC0_CTL IA32_MCi_CTL(0) -#define IA32_MC0_STATUS IA32_MCi_STATUS(0) -#define IA32_MC0_ADDR IA32_MCi_ADDR(0) -#define IA32_MC0_MISC IA32_MCi_MISC(0) - -#define IA32_MC1_CTL IA32_MCi_CTL(1) -#define IA32_MC1_STATUS IA32_MCi_STATUS(1) -#define IA32_MC1_ADDR IA32_MCi_ADDR(1) -#define IA32_MC1_MISC IA32_MCi_MISC(1) - -#define IA32_MC2_CTL IA32_MCi_CTL(2) -#define IA32_MC2_STATUS IA32_MCi_STATUS(2) -#define IA32_MC2_ADDR IA32_MCi_ADDR(2) -#define IA32_MC2_MISC IA32_MCi_MISC(2) - -#define IA32_MC3_CTL IA32_MCi_CTL(3) -#define IA32_MC3_STATUS IA32_MCi_STATUS(3) -#define IA32_MC3_ADDR IA32_MCi_ADDR(3) -#define IA32_MC3_MISC IA32_MCi_MISC(3) - -#define IA32_MC4_CTL IA32_MCi_CTL(4) -#define IA32_MC4_STATUS IA32_MCi_STATUS(4) -#define IA32_MC4_ADDR IA32_MCi_ADDR(4) -#define IA32_MC4_MISC IA32_MCi_MISC(4) - -typedef uint64_t ia32_mci_ctl_t; -#define IA32_MCi_CTL_EE(j) (0x1ULL << (j)) -#define IA32_MCi_CTL_ENABLE_ALL (0xFFFFFFFFFFFFFFFFULL) +#define IA32_MCi_CTL(i) (0x400 + 4*(i)) +#define IA32_MCi_STATUS(i) (0x401 + 4*(i)) +#define IA32_MCi_ADDR(i) (0x402 + 4*(i)) +#define IA32_MCi_MISC(i) (0x403 + 4*(i)) + +#define IA32_MC0_CTL IA32_MCi_CTL(0) +#define IA32_MC0_STATUS IA32_MCi_STATUS(0) +#define IA32_MC0_ADDR IA32_MCi_ADDR(0) +#define IA32_MC0_MISC IA32_MCi_MISC(0) + +#define IA32_MC1_CTL IA32_MCi_CTL(1) +#define IA32_MC1_STATUS IA32_MCi_STATUS(1) +#define IA32_MC1_ADDR IA32_MCi_ADDR(1) +#define IA32_MC1_MISC IA32_MCi_MISC(1) + +#define IA32_MC2_CTL IA32_MCi_CTL(2) +#define IA32_MC2_STATUS IA32_MCi_STATUS(2) +#define IA32_MC2_ADDR IA32_MCi_ADDR(2) +#define IA32_MC2_MISC IA32_MCi_MISC(2) + +#define IA32_MC3_CTL IA32_MCi_CTL(3) +#define IA32_MC3_STATUS IA32_MCi_STATUS(3) +#define IA32_MC3_ADDR IA32_MCi_ADDR(3) +#define IA32_MC3_MISC IA32_MCi_MISC(3) + +#define IA32_MC4_CTL IA32_MCi_CTL(4) +#define IA32_MC4_STATUS IA32_MCi_STATUS(4) +#define IA32_MC4_ADDR IA32_MCi_ADDR(4) +#define IA32_MC4_MISC IA32_MCi_MISC(4) + +typedef uint64_t ia32_mci_ctl_t; +#define IA32_MCi_CTL_EE(j) (0x1ULL << (j)) +#define IA32_MCi_CTL_ENABLE_ALL (0xFFFFFFFFFFFFFFFFULL) typedef union { - struct { - uint64_t mca_error :BITS(15,0); - uint64_t model_specific_error :BITS(31,16); - uint64_t other_information :BITS(56,32); - uint64_t pcc :BIT1(57); - uint64_t addrv :BIT1(58); - uint64_t miscv :BIT1(59); - uint64_t en :BIT1(60); - uint64_t uc :BIT1(61); - uint64_t over :BIT1(62); - uint64_t val :BIT1(63); - } bits; - struct { /* Variant if threshold-based error status present: */ - uint64_t mca_error :BITS(15,0); - uint64_t model_specific_error :BITS(31,16); - uint64_t other_information :BITS(52,32); - uint64_t threshold :BITS(54,53); - uint64_t ar :BIT1(55); - uint64_t s :BIT1(56); - uint64_t pcc :BIT1(57); - uint64_t addrv :BIT1(58); - uint64_t miscv :BIT1(59); - uint64_t en :BIT1(60); - uint64_t uc :BIT1(61); - uint64_t over :BIT1(62); - uint64_t val :BIT1(63); - } bits_tes_p; - uint64_t u64; + struct { + uint64_t mca_error :BITS(15, 0); + uint64_t model_specific_error :BITS(31, 16); + uint64_t other_information :BITS(56, 32); + uint64_t pcc :BIT1(57); + uint64_t addrv :BIT1(58); + uint64_t miscv :BIT1(59); + uint64_t en :BIT1(60); + uint64_t uc :BIT1(61); + uint64_t over :BIT1(62); + uint64_t val :BIT1(63); + } bits; + struct { /* Variant if threshold-based error status present: */ + uint64_t mca_error :BITS(15, 0); + uint64_t model_specific_error :BITS(31, 16); + uint64_t other_information :BITS(52, 32); + uint64_t threshold :BITS(54, 53); + uint64_t ar :BIT1(55); + uint64_t s :BIT1(56); + uint64_t pcc :BIT1(57); + uint64_t addrv :BIT1(58); + uint64_t miscv :BIT1(59); + uint64_t en :BIT1(60); + uint64_t uc :BIT1(61); + uint64_t over :BIT1(62); + uint64_t val :BIT1(63); + } bits_tes_p; + uint64_t u64; } ia32_mci_status_t; /* Values for threshold_status if mcg_tes_p == 1 and uc == 0 */ -#define THRESHOLD_STATUS_NO_TRACKING 0 -#define THRESHOLD_STATUS_GREEN 1 -#define THRESHOLD_STATUS_YELLOW 2 -#define THRESHOLD_STATUS_RESERVED 3 - -typedef uint64_t ia32_mci_addr_t; -typedef uint64_t ia32_mci_misc_t; - -extern void mca_cpu_alloc(cpu_data_t *cdp); -extern void mca_cpu_init(void); -extern void mca_dump(void); -extern void mca_check_save(void); -extern boolean_t mca_is_cmci_present(void); - -#endif /* _I386_MACHINE_CHECK_H_ */ -#endif /* KERNEL_PRIVATE */ +#define THRESHOLD_STATUS_NO_TRACKING 0 +#define THRESHOLD_STATUS_GREEN 1 +#define THRESHOLD_STATUS_YELLOW 2 +#define THRESHOLD_STATUS_RESERVED 3 + +typedef uint64_t ia32_mci_addr_t; +typedef uint64_t ia32_mci_misc_t; + +extern void mca_cpu_alloc(cpu_data_t *cdp); +extern void mca_cpu_init(void); +extern void mca_dump(void); +extern void mca_check_save(void); +extern boolean_t mca_is_cmci_present(void); + +#endif /* _I386_MACHINE_CHECK_H_ */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/machine_cpu.h b/osfmk/i386/machine_cpu.h index c193948e1..e8911fdb7 100644 --- a/osfmk/i386/machine_cpu.h +++ b/osfmk/i386/machine_cpu.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_MACHINE_CPU_H_ @@ -35,24 +35,26 @@ #include __BEGIN_DECLS -void cpu_machine_init( +void cpu_machine_init( void); -void handle_pending_TLB_flushes( +void handle_pending_TLB_flushes( void); int cpu_signal_handler(x86_saved_state_t *regs); kern_return_t cpu_register( - int *slot_nump); + int *slot_nump); __END_DECLS -static inline void cpu_halt(void) +static inline void +cpu_halt(void) { - asm volatile( "wbinvd; cli; hlt" ); + asm volatile ( "wbinvd; cli; hlt"); } -static inline void cpu_pause(void) +static inline void +cpu_pause(void) { __builtin_ia32_pause(); } diff --git a/osfmk/i386/machine_routines.c b/osfmk/i386/machine_routines.c index 611470eaa..7d4568ed9 100644 --- a/osfmk/i386/machine_routines.c +++ b/osfmk/i386/machine_routines.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -60,7 +60,7 @@ #include #include #if DEBUG -#define DBG(x...) kprintf("DBG: " x) +#define DBG(x...) kprintf("DBG: " x) #else #define DBG(x...) #endif @@ -69,23 +69,23 @@ #include #endif /* MONOTONIC */ -extern void wakeup(void *); +extern void wakeup(void *); static int max_cpus_initialized = 0; -uint64_t LockTimeOut; -uint64_t TLBTimeOut; -uint64_t LockTimeOutTSC; -uint32_t LockTimeOutUsec; -uint64_t MutexSpin; -uint64_t LastDebuggerEntryAllowance; -uint64_t delay_spin_threshold; +uint64_t LockTimeOut; +uint64_t TLBTimeOut; +uint64_t LockTimeOutTSC; +uint32_t LockTimeOutUsec; +uint64_t MutexSpin; +uint64_t LastDebuggerEntryAllowance; +uint64_t delay_spin_threshold; extern uint64_t panic_restart_timeout; boolean_t virtualized = FALSE; -decl_simple_lock_data(static, ml_timer_evaluation_slock); +decl_simple_lock_data(static, ml_timer_evaluation_slock); uint32_t ml_timer_eager_evaluations; uint64_t ml_timer_eager_evaluation_max; static boolean_t ml_timer_evaluation_in_progress = FALSE; @@ -97,24 +97,27 @@ static boolean_t ml_timer_evaluation_in_progress = FALSE; /* IO memory map services */ /* Map memory map IO space */ -vm_offset_t ml_io_map( - vm_offset_t phys_addr, +vm_offset_t +ml_io_map( + vm_offset_t phys_addr, vm_size_t size) { - return(io_map(phys_addr,size,VM_WIMG_IO)); + return io_map(phys_addr, size, VM_WIMG_IO); } /* boot memory allocation */ -vm_offset_t ml_static_malloc( - __unused vm_size_t size) +vm_offset_t +ml_static_malloc( + __unused vm_size_t size) { - return((vm_offset_t)NULL); + return (vm_offset_t)NULL; } -void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size) +void +ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size) { - *phys_addr = 0; + *phys_addr = 0; *size = 0; } @@ -128,7 +131,7 @@ ml_static_ptovirt( #else return (vm_offset_t)((paddr) | LINEAR_KERNEL_ADDRESS); #endif -} +} vm_offset_t ml_static_slide( @@ -157,25 +160,26 @@ ml_static_mfree( addr64_t vaddr_cur; ppnum_t ppn; uint32_t freed_pages = 0; + assert(vaddr >= VM_MIN_KERNEL_ADDRESS); - assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */ + assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ for (vaddr_cur = vaddr; - vaddr_cur < round_page_64(vaddr+size); - vaddr_cur += PAGE_SIZE) { + vaddr_cur < round_page_64(vaddr + size); + vaddr_cur += PAGE_SIZE) { ppn = pmap_find_phys(kernel_pmap, vaddr_cur); if (ppn != (vm_offset_t)NULL) { - kernel_pmap->stats.resident_count++; + kernel_pmap->stats.resident_count++; if (kernel_pmap->stats.resident_count > kernel_pmap->stats.resident_max) { kernel_pmap->stats.resident_max = - kernel_pmap->stats.resident_count; + kernel_pmap->stats.resident_count; } - pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE); + pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur + PAGE_SIZE); assert(pmap_valid_page(ppn)); if (IS_MANAGED_PAGE(ppn)) { - vm_page_create(ppn,(ppn+1)); + vm_page_create(ppn, (ppn + 1)); freed_pages++; } } @@ -183,19 +187,24 @@ ml_static_mfree( vm_page_lockspin_queues(); vm_page_wire_count -= freed_pages; vm_page_wire_count_initial -= freed_pages; + if (vm_page_wire_count_on_boot != 0) { + assert(vm_page_wire_count_on_boot >= freed_pages); + vm_page_wire_count_on_boot -= freed_pages; + } vm_page_unlock_queues(); -#if DEBUG +#if DEBUG kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n", freed_pages, (void *)vaddr, (uint64_t)size, ppn); #endif } /* virtual to physical on wired pages */ -vm_offset_t ml_vtophys( +vm_offset_t +ml_vtophys( vm_offset_t vaddr) { - return (vm_offset_t)kvtophys(vaddr); + return (vm_offset_t)kvtophys(vaddr); } /* @@ -208,24 +217,30 @@ vm_offset_t ml_vtophys( * the duration of the copy process. */ -vm_size_t ml_nofault_copy( +vm_size_t +ml_nofault_copy( vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) { addr64_t cur_phys_dst, cur_phys_src; uint32_t count, nbytes = 0; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { break; - if (!(cur_phys_dst = kvtophys(virtdst))) + } + if (!(cur_phys_dst = kvtophys(virtdst))) { break; - if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) + } + if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) { break; + } count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); - if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) + if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) { count = (uint32_t)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); - if (count > size) + } + if (count > size) { count = (uint32_t)size; + } bcopy_phys(cur_phys_src, cur_phys_dst, count); @@ -248,20 +263,24 @@ vm_size_t ml_nofault_copy( * FALSE otherwise. */ -boolean_t ml_validate_nofault( +boolean_t +ml_validate_nofault( vm_offset_t virtsrc, vm_size_t size) { addr64_t cur_phys_src; uint32_t count; while (size > 0) { - if (!(cur_phys_src = kvtophys(virtsrc))) + if (!(cur_phys_src = kvtophys(virtsrc))) { return FALSE; - if (!pmap_valid_page(i386_btop(cur_phys_src))) + } + if (!pmap_valid_page(i386_btop(cur_phys_src))) { return FALSE; + } count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); - if (count > size) + if (count > size) { count = (uint32_t)size; + } virtsrc += count; size -= count; @@ -273,54 +292,76 @@ boolean_t ml_validate_nofault( /* Interrupt handling */ /* Initialize Interrupts */ -void ml_init_interrupt(void) +void +ml_init_interrupt(void) { (void) ml_set_interrupts_enabled(TRUE); } /* Get Interrupts Enabled */ -boolean_t ml_get_interrupts_enabled(void) +boolean_t +ml_get_interrupts_enabled(void) { - unsigned long flags; + unsigned long flags; - __asm__ volatile("pushf; pop %0" : "=r" (flags)); - return (flags & EFL_IF) != 0; + __asm__ volatile ("pushf; pop %0": "=r" (flags)); + return (flags & EFL_IF) != 0; } /* Set Interrupts Enabled */ -boolean_t ml_set_interrupts_enabled(boolean_t enable) +boolean_t +ml_set_interrupts_enabled(boolean_t enable) { unsigned long flags; boolean_t istate; - - __asm__ volatile("pushf; pop %0" : "=r" (flags)); + + __asm__ volatile ("pushf; pop %0" : "=r" (flags)); assert(get_interrupt_level() ? (enable == FALSE) : TRUE); istate = ((flags & EFL_IF) != 0); if (enable) { - __asm__ volatile("sti;nop"); + __asm__ volatile ("sti;nop"); - if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) + if ((get_preemption_level() == 0) && (*ast_pending() & AST_URGENT)) { __asm__ volatile ("int %0" :: "N" (T_PREEMPT)); - } - else { - if (istate) - __asm__ volatile("cli"); + } + } else { + if (istate) { + __asm__ volatile ("cli"); + } } return istate; } +/* Early Set Interrupts Enabled */ +boolean_t +ml_early_set_interrupts_enabled(boolean_t enable) +{ + if (enable == TRUE) { + kprintf("Caller attempted to enable interrupts too early in " + "kernel startup. Halting.\n"); + hlt(); + /*NOTREACHED*/ + } + + /* On x86, do not allow interrupts to be enabled very early */ + return FALSE; +} + /* Check if running at interrupt context */ -boolean_t ml_at_interrupt_context(void) +boolean_t +ml_at_interrupt_context(void) { return get_interrupt_level() != 0; } -void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { +void +ml_get_power_state(boolean_t *icp, boolean_t *pidlep) +{ *icp = (get_interrupt_level() != 0); /* These will be technically inaccurate for interrupts that occur * successively within a single "idle exit" event, but shouldn't @@ -330,7 +371,8 @@ void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { } /* Generate a fake interrupt */ -void ml_cause_interrupt(void) +void +ml_cause_interrupt(void) { panic("ml_cause_interrupt not defined yet on Intel"); } @@ -339,9 +381,10 @@ void ml_cause_interrupt(void) * TODO: transition users of this to kernel_thread_start_priority * ml_thread_policy is an unsupported KPI */ -void ml_thread_policy( +void +ml_thread_policy( thread_t thread, -__unused unsigned policy_id, + __unused unsigned policy_id, unsigned policy_info) { if (policy_info & MACHINE_NETWORK_WORKLOOP) { @@ -351,26 +394,27 @@ __unused unsigned policy_id, info.importance = 1; kret = thread_policy_set_internal(thread, THREAD_PRECEDENCE_POLICY, - (thread_policy_t)&info, - THREAD_PRECEDENCE_POLICY_COUNT); + (thread_policy_t)&info, + THREAD_PRECEDENCE_POLICY_COUNT); assert(kret == KERN_SUCCESS); } } /* Initialize Interrupts */ -void ml_install_interrupt_handler( +void +ml_install_interrupt_handler( void *nub, int source, void *target, IOInterruptHandler handler, - void *refCon) + void *refCon) { boolean_t current_state; current_state = ml_set_interrupts_enabled(FALSE); PE_install_interrupt_handler(nub, source, target, - (IOInterruptHandler) handler, refCon); + (IOInterruptHandler) handler, refCon); (void) ml_set_interrupts_enabled(current_state); @@ -380,7 +424,7 @@ void ml_install_interrupt_handler( void machine_signal_idle( - processor_t processor) + processor_t processor) { cpu_interrupt(processor->cpu_id); } @@ -401,12 +445,12 @@ machine_signal_idle_cancel( static kern_return_t register_cpu( - uint32_t lapic_id, + uint32_t lapic_id, processor_t *processor_out, boolean_t boot_cpu ) { - int target_cpu; - cpu_data_t *this_cpu_datap; + int target_cpu; + cpu_data_t *this_cpu_datap; this_cpu_datap = cpu_data_alloc(boot_cpu); if (this_cpu_datap == NULL) { @@ -414,42 +458,47 @@ register_cpu( } target_cpu = this_cpu_datap->cpu_number; assert((boot_cpu && (target_cpu == 0)) || - (!boot_cpu && (target_cpu != 0))); + (!boot_cpu && (target_cpu != 0))); lapic_cpu_map(lapic_id, target_cpu); /* The cpu_id is not known at registration phase. Just do - * lapic_id for now + * lapic_id for now */ this_cpu_datap->cpu_phys_number = lapic_id; this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu); - if (this_cpu_datap->cpu_console_buf == NULL) + if (this_cpu_datap->cpu_console_buf == NULL) { goto failed; + } #if KPC - if (kpc_register_cpu(this_cpu_datap) != TRUE) + if (kpc_register_cpu(this_cpu_datap) != TRUE) { goto failed; + } #endif if (!boot_cpu) { cpu_thread_alloc(this_cpu_datap->cpu_number); - if (this_cpu_datap->lcpu.core == NULL) + if (this_cpu_datap->lcpu.core == NULL) { goto failed; + } #if NCOPY_WINDOWS > 0 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu); - if (this_cpu_datap->cpu_pmap == NULL) + if (this_cpu_datap->cpu_pmap == NULL) { goto failed; + } #endif this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu); - if (this_cpu_datap->cpu_processor == NULL) + if (this_cpu_datap->cpu_processor == NULL) { goto failed; + } /* * processor_init() deferred to topology start * because "slot numbers" a.k.a. logical processor numbers - * are not yet finalized. + * are not yet finalized. */ } @@ -473,157 +522,159 @@ failed: kern_return_t ml_processor_register( - cpu_id_t cpu_id, - uint32_t lapic_id, - processor_t *processor_out, - boolean_t boot_cpu, + cpu_id_t cpu_id, + uint32_t lapic_id, + processor_t *processor_out, + boolean_t boot_cpu, boolean_t start ) { - static boolean_t done_topo_sort = FALSE; - static uint32_t num_registered = 0; + static boolean_t done_topo_sort = FALSE; + static uint32_t num_registered = 0; - /* Register all CPUs first, and track max */ - if( start == FALSE ) - { - num_registered++; + /* Register all CPUs first, and track max */ + if (start == FALSE) { + num_registered++; - DBG( "registering CPU lapic id %d\n", lapic_id ); + DBG( "registering CPU lapic id %d\n", lapic_id ); - return register_cpu( lapic_id, processor_out, boot_cpu ); - } + return register_cpu( lapic_id, processor_out, boot_cpu ); + } - /* Sort by topology before we start anything */ - if( !done_topo_sort ) - { - DBG( "about to start CPUs. %d registered\n", num_registered ); + /* Sort by topology before we start anything */ + if (!done_topo_sort) { + DBG( "about to start CPUs. %d registered\n", num_registered ); - cpu_topology_sort( num_registered ); - done_topo_sort = TRUE; - } + cpu_topology_sort( num_registered ); + done_topo_sort = TRUE; + } - /* Assign the cpu ID */ - uint32_t cpunum = -1; - cpu_data_t *this_cpu_datap = NULL; + /* Assign the cpu ID */ + uint32_t cpunum = -1; + cpu_data_t *this_cpu_datap = NULL; - /* find cpu num and pointer */ - cpunum = ml_get_cpuid( lapic_id ); + /* find cpu num and pointer */ + cpunum = ml_get_cpuid( lapic_id ); - if( cpunum == 0xFFFFFFFF ) /* never heard of it? */ - panic( "trying to start invalid/unregistered CPU %d\n", lapic_id ); + if (cpunum == 0xFFFFFFFF) { /* never heard of it? */ + panic( "trying to start invalid/unregistered CPU %d\n", lapic_id ); + } - this_cpu_datap = cpu_datap(cpunum); + this_cpu_datap = cpu_datap(cpunum); - /* fix the CPU id */ - this_cpu_datap->cpu_id = cpu_id; + /* fix the CPU id */ + this_cpu_datap->cpu_id = cpu_id; - /* allocate and initialize other per-cpu structures */ - if (!boot_cpu) { - mp_cpus_call_cpu_init(cpunum); - early_random_cpu_init(cpunum); - } + /* allocate and initialize other per-cpu structures */ + if (!boot_cpu) { + mp_cpus_call_cpu_init(cpunum); + early_random_cpu_init(cpunum); + } - /* output arg */ - *processor_out = this_cpu_datap->cpu_processor; + /* output arg */ + *processor_out = this_cpu_datap->cpu_processor; - /* OK, try and start this CPU */ - return cpu_topology_start_cpu( cpunum ); + /* OK, try and start this CPU */ + return cpu_topology_start_cpu( cpunum ); } void ml_cpu_get_info(ml_cpu_info_t *cpu_infop) { - boolean_t os_supports_sse; + boolean_t os_supports_sse; i386_cpu_info_t *cpuid_infop; - if (cpu_infop == NULL) + if (cpu_infop == NULL) { return; - + } + /* * Are we supporting MMX/SSE/SSE2/SSE3? * As distinct from whether the cpu has these capabilities. */ os_supports_sse = !!(get_cr4() & CR4_OSXMM); - if (ml_fpu_avx_enabled()) + if (ml_fpu_avx_enabled()) { cpu_infop->vector_unit = 9; - else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) + } else if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse) { cpu_infop->vector_unit = 8; - else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) + } else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse) { cpu_infop->vector_unit = 7; - else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) + } else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse) { cpu_infop->vector_unit = 6; - else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) + } else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse) { cpu_infop->vector_unit = 5; - else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) + } else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse) { cpu_infop->vector_unit = 4; - else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) + } else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse) { cpu_infop->vector_unit = 3; - else if (cpuid_features() & CPUID_FEATURE_MMX) + } else if (cpuid_features() & CPUID_FEATURE_MMX) { cpu_infop->vector_unit = 2; - else + } else { cpu_infop->vector_unit = 0; + } cpuid_infop = cpuid_info(); - cpu_infop->cache_line_size = cpuid_infop->cache_linesize; + cpu_infop->cache_line_size = cpuid_infop->cache_linesize; cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I]; cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D]; - - if (cpuid_infop->cache_size[L2U] > 0) { - cpu_infop->l2_settings = 1; - cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U]; - } else { - cpu_infop->l2_settings = 0; - cpu_infop->l2_cache_size = 0xFFFFFFFF; - } - - if (cpuid_infop->cache_size[L3U] > 0) { - cpu_infop->l3_settings = 1; - cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U]; - } else { - cpu_infop->l3_settings = 0; - cpu_infop->l3_cache_size = 0xFFFFFFFF; - } + + if (cpuid_infop->cache_size[L2U] > 0) { + cpu_infop->l2_settings = 1; + cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U]; + } else { + cpu_infop->l2_settings = 0; + cpu_infop->l2_cache_size = 0xFFFFFFFF; + } + + if (cpuid_infop->cache_size[L3U] > 0) { + cpu_infop->l3_settings = 1; + cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U]; + } else { + cpu_infop->l3_settings = 0; + cpu_infop->l3_cache_size = 0xFFFFFFFF; + } } void ml_init_max_cpus(unsigned long max_cpus) { - boolean_t current_state; + boolean_t current_state; - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { - if (max_cpus > 0 && max_cpus <= MAX_CPUS) { + current_state = ml_set_interrupts_enabled(FALSE); + if (max_cpus_initialized != MAX_CPUS_SET) { + if (max_cpus > 0 && max_cpus <= MAX_CPUS) { /* * Note: max_cpus is the number of enabled processors * that ACPI found; max_ncpus is the maximum number * that the kernel supports or that the "cpus=" * boot-arg has set. Here we take int minimum. */ - machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus); + machine_info.max_cpus = (integer_t)MIN(max_cpus, max_ncpus); } - if (max_cpus_initialized == MAX_CPUS_WAIT) - wakeup((event_t)&max_cpus_initialized); - max_cpus_initialized = MAX_CPUS_SET; - } - (void) ml_set_interrupts_enabled(current_state); + if (max_cpus_initialized == MAX_CPUS_WAIT) { + wakeup((event_t)&max_cpus_initialized); + } + max_cpus_initialized = MAX_CPUS_SET; + } + (void) ml_set_interrupts_enabled(current_state); } int ml_get_max_cpus(void) { - boolean_t current_state; + boolean_t current_state; - current_state = ml_set_interrupts_enabled(FALSE); - if (max_cpus_initialized != MAX_CPUS_SET) { - max_cpus_initialized = MAX_CPUS_WAIT; - assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT); - (void)thread_block(THREAD_CONTINUE_NULL); - } - (void) ml_set_interrupts_enabled(current_state); - return(machine_info.max_cpus); + current_state = ml_set_interrupts_enabled(FALSE); + if (max_cpus_initialized != MAX_CPUS_SET) { + max_cpus_initialized = MAX_CPUS_WAIT; + assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT); + (void)thread_block(THREAD_CONTINUE_NULL); + } + (void) ml_set_interrupts_enabled(current_state); + return machine_info.max_cpus; } boolean_t @@ -634,11 +685,11 @@ ml_wants_panic_trap_to_debugger(void) void ml_panic_trap_to_debugger(__unused const char *panic_format_str, - __unused va_list *panic_args, - __unused unsigned int reason, - __unused void *ctx, - __unused uint64_t panic_options_mask, - __unused unsigned long panic_caller) + __unused va_list *panic_args, + __unused unsigned int reason, + __unused void *ctx, + __unused uint64_t panic_options_mask, + __unused unsigned long panic_caller) { return; } @@ -650,18 +701,19 @@ ml_panic_trap_to_debugger(__unused const char *panic_format_str, void ml_init_lock_timeout(void) { - uint64_t abstime; - uint32_t mtxspin; + uint64_t abstime; + uint32_t mtxspin; #if DEVELOPMENT || DEBUG - uint64_t default_timeout_ns = NSEC_PER_SEC>>2; + uint64_t default_timeout_ns = NSEC_PER_SEC >> 2; #else - uint64_t default_timeout_ns = NSEC_PER_SEC>>1; + uint64_t default_timeout_ns = NSEC_PER_SEC >> 1; #endif - uint32_t slto; - uint32_t prt; + uint32_t slto; + uint32_t prt; - if (PE_parse_boot_argn("slto_us", &slto, sizeof (slto))) + if (PE_parse_boot_argn("slto_us", &slto, sizeof(slto))) { default_timeout_ns = slto * NSEC_PER_USEC; + } /* * LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks, @@ -678,7 +730,7 @@ ml_init_lock_timeout(void) * zero value inhibits the timeout-panic and cuts a trace evnt instead * - see pmap_flush_tlbs(). */ - if (PE_parse_boot_argn("tlbto_us", &slto, sizeof (slto))) { + if (PE_parse_boot_argn("tlbto_us", &slto, sizeof(slto))) { default_timeout_ns = slto * NSEC_PER_USEC; nanoseconds_to_absolutetime(default_timeout_ns, &abstime); TLBTimeOut = (uint32_t) abstime; @@ -689,47 +741,65 @@ ml_init_lock_timeout(void) #if DEVELOPMENT || DEBUG reportphyreaddelayabs = LockTimeOut >> 1; #endif - if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof (slto))) { + if (PE_parse_boot_argn("phyreadmaxus", &slto, sizeof(slto))) { default_timeout_ns = slto * NSEC_PER_USEC; nanoseconds_to_absolutetime(default_timeout_ns, &abstime); reportphyreaddelayabs = abstime; } - if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof (mtxspin))) { - if (mtxspin > USEC_PER_SEC>>4) - mtxspin = USEC_PER_SEC>>4; - nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); + if (PE_parse_boot_argn("phywritemaxus", &slto, sizeof(slto))) { + nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime); + reportphywritedelayabs = abstime; + } + + if (PE_parse_boot_argn("tracephyreadus", &slto, sizeof(slto))) { + nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime); + tracephyreaddelayabs = abstime; + } + + if (PE_parse_boot_argn("tracephywriteus", &slto, sizeof(slto))) { + nanoseconds_to_absolutetime((uint64_t)slto * NSEC_PER_USEC, &abstime); + tracephywritedelayabs = abstime; + } + + if (PE_parse_boot_argn("mtxspin", &mtxspin, sizeof(mtxspin))) { + if (mtxspin > USEC_PER_SEC >> 4) { + mtxspin = USEC_PER_SEC >> 4; + } + nanoseconds_to_absolutetime(mtxspin * NSEC_PER_USEC, &abstime); } else { - nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); + nanoseconds_to_absolutetime(10 * NSEC_PER_USEC, &abstime); } MutexSpin = (unsigned int)abstime; nanoseconds_to_absolutetime(4ULL * NSEC_PER_SEC, &LastDebuggerEntryAllowance); - if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof (prt))) + if (PE_parse_boot_argn("panic_restart_timeout", &prt, sizeof(prt))) { nanoseconds_to_absolutetime(prt * NSEC_PER_SEC, &panic_restart_timeout); + } virtualized = ((cpuid_features() & CPUID_FEATURE_VMM) != 0); if (virtualized) { - int vti; - - if (!PE_parse_boot_argn("vti", &vti, sizeof (vti))) + int vti; + + if (!PE_parse_boot_argn("vti", &vti, sizeof(vti))) { vti = 6; + } printf("Timeouts adjusted for virtualization (<<%d)\n", vti); kprintf("Timeouts adjusted for virtualization (<<%d):\n", vti); -#define VIRTUAL_TIMEOUT_INFLATE64(_timeout) \ -MACRO_BEGIN \ - kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \ - _timeout <<= vti; \ - kprintf("-> 0x%016llx\n", _timeout); \ +#define VIRTUAL_TIMEOUT_INFLATE64(_timeout) \ +MACRO_BEGIN \ + kprintf("%24s: 0x%016llx ", #_timeout, _timeout); \ + _timeout <<= vti; \ + kprintf("-> 0x%016llx\n", _timeout); \ MACRO_END -#define VIRTUAL_TIMEOUT_INFLATE32(_timeout) \ -MACRO_BEGIN \ - kprintf("%24s: 0x%08x ", #_timeout, _timeout); \ - if ((_timeout <> vti == _timeout) \ - _timeout <<= vti; \ - else \ - _timeout = ~0; /* cap rather than overflow */ \ - kprintf("-> 0x%08x\n", _timeout); \ +#define VIRTUAL_TIMEOUT_INFLATE32(_timeout) \ +MACRO_BEGIN \ + kprintf("%24s: 0x%08x ", #_timeout, _timeout); \ + if ((_timeout <> vti == _timeout) \ + _timeout <<= vti; \ + else \ + _timeout = ~0; /* cap rather than overflow */ \ + kprintf("-> 0x%08x\n", _timeout); \ MACRO_END VIRTUAL_TIMEOUT_INFLATE32(LockTimeOutUsec); VIRTUAL_TIMEOUT_INFLATE64(LockTimeOut); @@ -760,7 +830,17 @@ ml_delay_should_spin(uint64_t interval) return (interval < delay_spin_threshold) ? TRUE : FALSE; } -void ml_delay_on_yield(void) {} +uint32_t yield_delay_us = 0; + +void +ml_delay_on_yield(void) +{ +#if DEVELOPMENT || DEBUG + if (yield_delay_us) { + delay(yield_delay_us); + } +#endif +} /* * This is called from the machine-independent layer @@ -792,7 +872,7 @@ extern thread_t current_act(void); thread_t current_act(void) { - return(current_thread_fast()); + return current_thread_fast(); } #undef current_thread @@ -800,59 +880,67 @@ extern thread_t current_thread(void); thread_t current_thread(void) { - return(current_thread_fast()); + return current_thread_fast(); } -boolean_t ml_is64bit(void) { - - return (cpu_mode_is64bit()); +boolean_t +ml_is64bit(void) +{ + return cpu_mode_is64bit(); } -boolean_t ml_thread_is64bit(thread_t thread) { - - return (thread_is_64bit_addr(thread)); +boolean_t +ml_thread_is64bit(thread_t thread) +{ + return thread_is_64bit_addr(thread); } -boolean_t ml_state_is64bit(void *saved_state) { - +boolean_t +ml_state_is64bit(void *saved_state) +{ return is_saved_state64(saved_state); } -void ml_cpu_set_ldt(int selector) +void +ml_cpu_set_ldt(int selector) { /* * Avoid loading the LDT * if we're setting the KERNEL LDT and it's already set. */ if (selector == KERNEL_LDT && - current_cpu_datap()->cpu_ldt == KERNEL_LDT) + current_cpu_datap()->cpu_ldt == KERNEL_LDT) { return; + } lldt(selector); current_cpu_datap()->cpu_ldt = selector; } -void ml_fp_setvalid(boolean_t value) +void +ml_fp_setvalid(boolean_t value) { - fp_setvalid(value); + fp_setvalid(value); } -uint64_t ml_cpu_int_event_time(void) +uint64_t +ml_cpu_int_event_time(void) { return current_cpu_datap()->cpu_int_event_time; } -vm_offset_t ml_stack_remaining(void) +vm_offset_t +ml_stack_remaining(void) { uintptr_t local = (uintptr_t) &local; if (ml_at_interrupt_context() != 0) { - return (local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE)); + return local - (current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE); } else { - return (local - current_thread()->kernel_stack); + return local - current_thread()->kernel_stack; } } @@ -866,7 +954,7 @@ ml_stack_base(void) if (ml_at_interrupt_context()) { return current_cpu_datap()->cpu_int_stack_top - INTSTACK_SIZE; } else { - return current_thread()->kernel_stack; + return current_thread()->kernel_stack; } } @@ -874,9 +962,9 @@ vm_size_t ml_stack_size(void) { if (ml_at_interrupt_context()) { - return INTSTACK_SIZE; + return INTSTACK_SIZE; } else { - return kernel_stack_size; + return kernel_stack_size; } } #endif @@ -884,40 +972,44 @@ ml_stack_size(void) void kernel_preempt_check(void) { - boolean_t intr; + boolean_t intr; unsigned long flags; assert(get_preemption_level() == 0); if (__improbable(*ast_pending() & AST_URGENT)) { /* - * can handle interrupts and preemptions + * can handle interrupts and preemptions * at this point */ - __asm__ volatile("pushf; pop %0" : "=r" (flags)); + __asm__ volatile ("pushf; pop %0" : "=r" (flags)); intr = ((flags & EFL_IF) != 0); /* * now cause the PRE-EMPTION trap */ - if (intr == TRUE){ + if (intr == TRUE) { __asm__ volatile ("int %0" :: "N" (T_PREEMPT)); } } } -boolean_t machine_timeout_suspended(void) { - return (pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake()); +boolean_t +machine_timeout_suspended(void) +{ + return pmap_tlb_flush_timeout || spinlock_timed_out || panic_active() || mp_recent_debugger_activity() || ml_recent_wake(); } /* Eagerly evaluate all pending timer and thread callouts */ -void ml_timer_evaluate(void) { - KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_START, 0, 0, 0, 0, 0); +void +ml_timer_evaluate(void) +{ + KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_START, 0, 0, 0, 0, 0); uint64_t te_end, te_start = mach_absolute_time(); - simple_lock(&ml_timer_evaluation_slock); + simple_lock(&ml_timer_evaluation_slock, LCK_GRP_NULL); ml_timer_evaluation_in_progress = TRUE; thread_call_delayed_timer_rescan_all(); mp_cpus_call(CPUMASK_ALL, ASYNC, timer_queue_expire_rescan, NULL); @@ -927,83 +1019,98 @@ void ml_timer_evaluate(void) { ml_timer_eager_evaluation_max = MAX(ml_timer_eager_evaluation_max, (te_end - te_start)); simple_unlock(&ml_timer_evaluation_slock); - KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN|DBG_FUNC_END, 0, 0, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(DECR_TIMER_RESCAN | DBG_FUNC_END, 0, 0, 0, 0, 0); } boolean_t -ml_timer_forced_evaluation(void) { +ml_timer_forced_evaluation(void) +{ return ml_timer_evaluation_in_progress; } /* 32-bit right-rotate n bits */ -static inline uint32_t ror32(uint32_t val, const unsigned int n) -{ - __asm__ volatile("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n)); +static inline uint32_t +ror32(uint32_t val, const unsigned int n) +{ + __asm__ volatile ("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n)); return val; } void ml_entropy_collect(void) { - uint32_t tsc_lo, tsc_hi; - uint32_t *ep; + uint32_t tsc_lo, tsc_hi; + uint32_t *ep; assert(cpu_number() == master_cpu); /* update buffer pointer cyclically */ - if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE) + if (EntropyData.index_ptr - EntropyData.buffer == ENTROPY_BUFFER_SIZE) { ep = EntropyData.index_ptr = EntropyData.buffer; - else + } else { ep = EntropyData.index_ptr++; + } rdtsc_nofence(tsc_lo, tsc_hi); *ep = ror32(*ep, 9) ^ tsc_lo; } uint64_t -ml_energy_stat(__unused thread_t t) { +ml_energy_stat(__unused thread_t t) +{ return 0; } void -ml_gpu_stat_update(uint64_t gpu_ns_delta) { +ml_gpu_stat_update(uint64_t gpu_ns_delta) +{ current_thread()->machine.thread_gpu_ns += gpu_ns_delta; } uint64_t -ml_gpu_stat(thread_t t) { +ml_gpu_stat(thread_t t) +{ return t->machine.thread_gpu_ns; } int plctrace_enabled = 0; -void _disable_preemption(void) { +void +_disable_preemption(void) +{ disable_preemption_internal(); } -void _enable_preemption(void) { +void +_enable_preemption(void) +{ enable_preemption_internal(); } -void plctrace_disable(void) { +void +plctrace_disable(void) +{ plctrace_enabled = 0; } static boolean_t ml_quiescing; -void ml_set_is_quiescing(boolean_t quiescing) +void +ml_set_is_quiescing(boolean_t quiescing) { - assert(FALSE == ml_get_interrupts_enabled()); - ml_quiescing = quiescing; + assert(FALSE == ml_get_interrupts_enabled()); + ml_quiescing = quiescing; } -boolean_t ml_is_quiescing(void) +boolean_t +ml_is_quiescing(void) { - assert(FALSE == ml_get_interrupts_enabled()); - return (ml_quiescing); + assert(FALSE == ml_get_interrupts_enabled()); + return ml_quiescing; } -uint64_t ml_get_booter_memory_size(void) +uint64_t +ml_get_booter_memory_size(void) { - return (0); + return 0; } diff --git a/osfmk/i386/machine_routines.h b/osfmk/i386/machine_routines.h index 487cc6b61..28018871b 100644 --- a/osfmk/i386/machine_routines.h +++ b/osfmk/i386/machine_routines.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _I386_MACHINE_ROUTINES_H_ -#define _I386_MACHINE_ROUTINES_H_ +#ifndef _I386_MACHINE_ROUTINES_H_ +#define _I386_MACHINE_ROUTINES_H_ #include #include @@ -60,9 +60,9 @@ boolean_t ml_state_is64bit(void *); /* set state of fpu save area for signal handling */ -void ml_fp_setvalid(boolean_t); +void ml_fp_setvalid(boolean_t); -void ml_cpu_set_ldt(int); +void ml_cpu_set_ldt(int); /* Interrupt handling */ @@ -74,16 +74,16 @@ void ml_cause_interrupt(void); /* Initialize Interrupts */ void ml_install_interrupt_handler( - void *nub, - int source, - void *target, - IOInterruptHandler handler, - void *refCon); + void *nub, + int source, + void *target, + IOInterruptHandler handler, + void *refCon); void ml_entropy_collect(void); uint64_t ml_get_timebase(void); -void ml_init_lock_timeout(void); +void ml_init_lock_timeout(void); void ml_init_delay_spin_threshold(int); boolean_t ml_delay_should_spin(uint64_t interval); @@ -91,7 +91,7 @@ boolean_t ml_delay_should_spin(uint64_t interval); extern void ml_delay_on_yield(void); vm_offset_t -ml_static_ptovirt( + ml_static_ptovirt( vm_offset_t); void ml_static_mfree( @@ -119,19 +119,19 @@ boolean_t ml_validate_nofault( vm_offset_t virtsrc, vm_size_t size); /* Machine topology info */ -uint64_t ml_cpu_cache_size(unsigned int level); -uint64_t ml_cpu_cache_sharing(unsigned int level); +uint64_t ml_cpu_cache_size(unsigned int level); +uint64_t ml_cpu_cache_sharing(unsigned int level); /* Initialize the maximum number of CPUs */ void ml_init_max_cpus( unsigned long max_cpus); -extern void ml_cpu_up(void); -extern void ml_cpu_down(void); +extern void ml_cpu_up(void); +extern void ml_cpu_down(void); void bzero_phys_nc( - addr64_t phys_address, - uint32_t length); + addr64_t phys_address, + uint32_t length); extern uint32_t interrupt_timer_coalescing_enabled; extern uint32_t idle_entry_timer_processing_hdeadline_threshold; @@ -141,18 +141,18 @@ extern uint32_t idle_entry_timer_processing_hdeadline_threshold; #define TCOAL_DEBUG(x, a, b, c, d, e) do { } while(0) #endif /* TCOAL_INSTRUMENT */ -#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(MACH_KERNEL_PRIVATE) /* IO memory map services */ /* Map memory map IO space */ vm_offset_t ml_io_map( - vm_offset_t phys_addr, + vm_offset_t phys_addr, vm_size_t size); -void ml_get_bouncepool_info( - vm_offset_t *phys_addr, - vm_size_t *size); +void ml_get_bouncepool_info( + vm_offset_t *phys_addr, + vm_size_t *size); /* Indicates if spinlock, IPI and other timeouts should be suspended */ boolean_t machine_timeout_suspended(void); void plctrace_disable(void); @@ -166,11 +166,11 @@ boolean_t ml_wants_panic_trap_to_debugger(void); /* Machine layer routine for intercepting panics */ void ml_panic_trap_to_debugger(const char *panic_format_str, - va_list *panic_args, - unsigned int reason, - void *ctx, - uint64_t panic_options_mask, - unsigned long panic_caller); + va_list *panic_args, + unsigned int reason, + void *ctx, + uint64_t panic_options_mask, + unsigned long panic_caller); #endif /* XNU_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE @@ -183,12 +183,12 @@ typedef void (*ipi_handler_t)(void); /* Struct for ml_processor_register */ struct ml_processor_info { - cpu_id_t cpu_id; - boolean_t boot_cpu; - vm_offset_t start_paddr; - boolean_t supports_nap; - unsigned long l2cr_value; - time_base_enable_t time_base_enable; + cpu_id_t cpu_id; + boolean_t boot_cpu; + vm_offset_t start_paddr; + boolean_t supports_nap; + unsigned long l2cr_value; + time_base_enable_t time_base_enable; }; typedef struct ml_processor_info ml_processor_info_t; @@ -197,10 +197,10 @@ typedef struct ml_processor_info ml_processor_info_t; /* Register a processor */ kern_return_t ml_processor_register( - cpu_id_t cpu_id, - uint32_t lapic_id, - processor_t *processor_out, - boolean_t boot_cpu, + cpu_id_t cpu_id, + uint32_t lapic_id, + processor_t *processor_out, + boolean_t boot_cpu, boolean_t start ); /* PCI config cycle probing */ @@ -245,6 +245,21 @@ unsigned int ml_io_read16(uintptr_t iovaddr); unsigned int ml_io_read32(uintptr_t iovaddr); unsigned long long ml_io_read64(uintptr_t iovaddr); +extern void ml_io_write(uintptr_t vaddr, uint64_t val, int size); +extern void ml_io_write8(uintptr_t vaddr, uint8_t val); +extern void ml_io_write16(uintptr_t vaddr, uint16_t val); +extern void ml_io_write32(uintptr_t vaddr, uint32_t val); +extern void ml_io_write64(uintptr_t vaddr, uint64_t val); + +extern uint32_t ml_port_io_read(uint16_t ioport, int size); +extern uint8_t ml_port_io_read8(uint16_t ioport); +extern uint16_t ml_port_io_read16(uint16_t ioport); +extern uint32_t ml_port_io_read32(uint16_t ioport); +extern void ml_port_io_write(uint16_t ioport, uint32_t val, int size); +extern void ml_port_io_write8(uint16_t ioport, uint8_t val); +extern void ml_port_io_write16(uint16_t ioport, uint16_t val); +extern void ml_port_io_write32(uint16_t ioport, uint32_t val); + /* Write physical address byte */ void ml_phys_write_byte( vm_offset_t paddr, unsigned int data); @@ -275,14 +290,14 @@ void ml_phys_write_double_64( /* Struct for ml_cpu_get_info */ struct ml_cpu_info { - uint32_t vector_unit; - uint32_t cache_line_size; - uint32_t l1_icache_size; - uint32_t l1_dcache_size; - uint32_t l2_settings; - uint32_t l2_cache_size; - uint32_t l3_settings; - uint32_t l3_cache_size; + uint32_t vector_unit; + uint32_t cache_line_size; + uint32_t l1_icache_size; + uint32_t l1_dcache_size; + uint32_t l2_settings; + uint32_t l2_cache_size; + uint32_t l3_settings; + uint32_t l3_cache_size; }; typedef struct ml_cpu_info ml_cpu_info_t; @@ -295,10 +310,10 @@ void ml_thread_policy( unsigned policy_id, unsigned policy_info); -#define MACHINE_GROUP 0x00000001 -#define MACHINE_NETWORK_GROUP 0x10000000 -#define MACHINE_NETWORK_WORKLOOP 0x00000001 -#define MACHINE_NETWORK_NETISR 0x00000002 +#define MACHINE_GROUP 0x00000001 +#define MACHINE_NETWORK_GROUP 0x10000000 +#define MACHINE_NETWORK_WORKLOOP 0x00000001 +#define MACHINE_NETWORK_NETISR 0x00000002 /* Return the maximum number of CPUs set by ml_init_max_cpus() */ int ml_get_max_cpus( @@ -327,6 +342,7 @@ boolean_t ml_get_interrupts_enabled(void); /* Set Interrupts Enabled */ boolean_t ml_set_interrupts_enabled(boolean_t enable); +boolean_t ml_early_set_interrupts_enabled(boolean_t enable); /* Check if running at interrupt context */ boolean_t ml_at_interrupt_context(void); @@ -347,12 +363,13 @@ vm_offset_t ml_stack_remaining(void); __END_DECLS #if defined(MACH_KERNEL_PRIVATE) -__private_extern__ uint64_t -ml_phys_read_data(uint64_t paddr, int psz); -__private_extern__ void +__private_extern__ uint64_t ml_phys_read_data(uint64_t paddr, int psz); +__private_extern__ void ml_phys_write_data(uint64_t paddr, + unsigned long long data, int size); +__private_extern__ uintptr_t pmap_verify_noncacheable(uintptr_t vaddr); #endif /* MACH_KERNEL_PRIVATE */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE boolean_t ml_fpu_avx_enabled(void); #if !defined(RC_HIDE_XNU_J137) @@ -374,9 +391,19 @@ void ml_gpu_stat_update(uint64_t); uint64_t ml_gpu_stat(thread_t); boolean_t ml_recent_wake(void); +#define ALL_CORES_RECOMMENDED (~(uint64_t)0) + +extern void sched_usercontrol_update_recommended_cores(uint64_t recommended_cores); + + extern uint64_t reportphyreaddelayabs; +extern uint64_t reportphywritedelayabs; extern uint32_t reportphyreadosbt; +extern uint32_t reportphywriteosbt; extern uint32_t phyreadpanic; +extern uint32_t phywritepanic; +extern uint64_t tracephyreaddelayabs; +extern uint64_t tracephywritedelayabs; #endif /* XNU_KERNEL_PRIVATE */ #endif /* _I386_MACHINE_ROUTINES_H_ */ diff --git a/osfmk/i386/machine_rpc.h b/osfmk/i386/machine_rpc.h index 5e29e9504..a891db145 100644 --- a/osfmk/i386/machine_rpc.h +++ b/osfmk/i386/machine_rpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/osfmk/i386/machine_task.c b/osfmk/i386/machine_task.c index 3edd363c7..bdfff77ac 100644 --- a/osfmk/i386/machine_task.c +++ b/osfmk/i386/machine_task.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -67,161 +67,159 @@ extern zone_t ids_zone; kern_return_t machine_task_set_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { switch (flavor) { - case x86_DEBUG_STATE32: - { - x86_debug_state32_t *tstate = (x86_debug_state32_t*) state; - if ((task_has_64Bit_addr(task)) || - (state_count != x86_DEBUG_STATE32_COUNT) || - (!debug_state_is_valid32(tstate))) { - return KERN_INVALID_ARGUMENT; - } + case x86_DEBUG_STATE32: + { + x86_debug_state32_t *tstate = (x86_debug_state32_t*) state; + if ((task_has_64Bit_addr(task)) || + (state_count != x86_DEBUG_STATE32_COUNT) || + (!debug_state_is_valid32(tstate))) { + return KERN_INVALID_ARGUMENT; + } - if (task->task_debug == NULL) { - task->task_debug = zalloc(ids_zone); - } + if (task->task_debug == NULL) { + task->task_debug = zalloc(ids_zone); + } - copy_debug_state32(tstate, (x86_debug_state32_t*) task->task_debug, FALSE); - - return KERN_SUCCESS; + copy_debug_state32(tstate, (x86_debug_state32_t*) task->task_debug, FALSE); + + return KERN_SUCCESS; + } + case x86_DEBUG_STATE64: + { + x86_debug_state64_t *tstate = (x86_debug_state64_t*) state; + + if ((!task_has_64Bit_addr(task)) || + (state_count != x86_DEBUG_STATE64_COUNT) || + (!debug_state_is_valid64(tstate))) { + return KERN_INVALID_ARGUMENT; } - case x86_DEBUG_STATE64: - { - x86_debug_state64_t *tstate = (x86_debug_state64_t*) state; - - if ((!task_has_64Bit_addr(task)) || - (state_count != x86_DEBUG_STATE64_COUNT) || - (!debug_state_is_valid64(tstate))) { - return KERN_INVALID_ARGUMENT; - } + if (task->task_debug == NULL) { + task->task_debug = zalloc(ids_zone); + } + + copy_debug_state64(tstate, (x86_debug_state64_t*) task->task_debug, FALSE); + + return KERN_SUCCESS; + } + case x86_DEBUG_STATE: + { + x86_debug_state_t *tstate = (x86_debug_state_t*) state; + + if (state_count != x86_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + if ((tstate->dsh.flavor == x86_DEBUG_STATE32) && + (tstate->dsh.count == x86_DEBUG_STATE32_COUNT) && + (!task_has_64Bit_addr(task)) && + debug_state_is_valid32(&tstate->uds.ds32)) { if (task->task_debug == NULL) { task->task_debug = zalloc(ids_zone); } - - copy_debug_state64(tstate, (x86_debug_state64_t*) task->task_debug, FALSE); - - return KERN_SUCCESS; - } - case x86_DEBUG_STATE: - { - x86_debug_state_t *tstate = (x86_debug_state_t*) state; - if (state_count != x86_DEBUG_STATE_COUNT) { - return KERN_INVALID_ARGUMENT; + copy_debug_state32(&tstate->uds.ds32, (x86_debug_state32_t*) task->task_debug, FALSE); + return KERN_SUCCESS; + } else if ((tstate->dsh.flavor == x86_DEBUG_STATE64) && + (tstate->dsh.count == x86_DEBUG_STATE64_COUNT) && + task_has_64Bit_addr(task) && + debug_state_is_valid64(&tstate->uds.ds64)) { + if (task->task_debug == NULL) { + task->task_debug = zalloc(ids_zone); } - if ((tstate->dsh.flavor == x86_DEBUG_STATE32) && - (tstate->dsh.count == x86_DEBUG_STATE32_COUNT) && - (!task_has_64Bit_addr(task)) && - debug_state_is_valid32(&tstate->uds.ds32)) { - - if (task->task_debug == NULL) { - task->task_debug = zalloc(ids_zone); - } - - copy_debug_state32(&tstate->uds.ds32, (x86_debug_state32_t*) task->task_debug, FALSE); - return KERN_SUCCESS; - - } else if ((tstate->dsh.flavor == x86_DEBUG_STATE64) && - (tstate->dsh.count == x86_DEBUG_STATE64_COUNT) && - task_has_64Bit_addr(task) && - debug_state_is_valid64(&tstate->uds.ds64)) { - - if (task->task_debug == NULL) { - task->task_debug = zalloc(ids_zone); - } - - copy_debug_state64(&tstate->uds.ds64, (x86_debug_state64_t*) task->task_debug, FALSE); - return KERN_SUCCESS; - } else { - return KERN_INVALID_ARGUMENT; - } - } - default: - { + copy_debug_state64(&tstate->uds.ds64, (x86_debug_state64_t*) task->task_debug, FALSE); + return KERN_SUCCESS; + } else { return KERN_INVALID_ARGUMENT; } } + default: + { + return KERN_INVALID_ARGUMENT; + } + } } -kern_return_t -machine_task_get_state(task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t *state_count) +kern_return_t +machine_task_get_state(task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count) { switch (flavor) { - case x86_DEBUG_STATE32: - { - x86_debug_state32_t *tstate = (x86_debug_state32_t*) state; + case x86_DEBUG_STATE32: + { + x86_debug_state32_t *tstate = (x86_debug_state32_t*) state; - if ((task_has_64Bit_addr(task)) || (*state_count != x86_DEBUG_STATE32_COUNT)) { - return KERN_INVALID_ARGUMENT; - } + if ((task_has_64Bit_addr(task)) || (*state_count != x86_DEBUG_STATE32_COUNT)) { + return KERN_INVALID_ARGUMENT; + } - if (task->task_debug == NULL) { - bzero(state, sizeof(*tstate)); - } else { - copy_debug_state32((x86_debug_state32_t*) task->task_debug, tstate, TRUE); - } + if (task->task_debug == NULL) { + bzero(state, sizeof(*tstate)); + } else { + copy_debug_state32((x86_debug_state32_t*) task->task_debug, tstate, TRUE); + } - return KERN_SUCCESS; + return KERN_SUCCESS; + } + case x86_DEBUG_STATE64: + { + x86_debug_state64_t *tstate = (x86_debug_state64_t*) state; + + if ((!task_has_64Bit_addr(task)) || (*state_count != x86_DEBUG_STATE64_COUNT)) { + return KERN_INVALID_ARGUMENT; } - case x86_DEBUG_STATE64: - { - x86_debug_state64_t *tstate = (x86_debug_state64_t*) state; - if ((!task_has_64Bit_addr(task)) || (*state_count != x86_DEBUG_STATE64_COUNT)) { - return KERN_INVALID_ARGUMENT; - } + if (task->task_debug == NULL) { + bzero(state, sizeof(*tstate)); + } else { + copy_debug_state64((x86_debug_state64_t*) task->task_debug, tstate, TRUE); + } + + return KERN_SUCCESS; + } + case x86_DEBUG_STATE: + { + x86_debug_state_t *tstate = (x86_debug_state_t*)state; + + if (*state_count != x86_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + if (task_has_64Bit_addr(task)) { + tstate->dsh.flavor = x86_DEBUG_STATE64; + tstate->dsh.count = x86_DEBUG_STATE64_COUNT; if (task->task_debug == NULL) { - bzero(state, sizeof(*tstate)); + bzero(&tstate->uds.ds64, sizeof(tstate->uds.ds64)); } else { - copy_debug_state64((x86_debug_state64_t*) task->task_debug, tstate, TRUE); - } + copy_debug_state64((x86_debug_state64_t*)task->task_debug, &tstate->uds.ds64, TRUE); + } + } else { + tstate->dsh.flavor = x86_DEBUG_STATE32; + tstate->dsh.count = x86_DEBUG_STATE32_COUNT; - return KERN_SUCCESS; - } - case x86_DEBUG_STATE: - { - x86_debug_state_t *tstate = (x86_debug_state_t*)state; - - if (*state_count != x86_DEBUG_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); - - if (task_has_64Bit_addr(task)) { - tstate->dsh.flavor = x86_DEBUG_STATE64; - tstate->dsh.count = x86_DEBUG_STATE64_COUNT; - - if (task->task_debug == NULL) { - bzero(&tstate->uds.ds64, sizeof(tstate->uds.ds64)); - } else { - copy_debug_state64((x86_debug_state64_t*)task->task_debug, &tstate->uds.ds64, TRUE); - } + if (task->task_debug == NULL) { + bzero(&tstate->uds.ds32, sizeof(tstate->uds.ds32)); } else { - tstate->dsh.flavor = x86_DEBUG_STATE32; - tstate->dsh.count = x86_DEBUG_STATE32_COUNT; - - if (task->task_debug == NULL) { - bzero(&tstate->uds.ds32, sizeof(tstate->uds.ds32)); - } else { - copy_debug_state32((x86_debug_state32_t*)task->task_debug, &tstate->uds.ds32, TRUE); - } + copy_debug_state32((x86_debug_state32_t*)task->task_debug, &tstate->uds.ds32, TRUE); } - - return KERN_SUCCESS; - } - default: - { - return KERN_INVALID_ARGUMENT; } + + return KERN_SUCCESS; + } + default: + { + return KERN_INVALID_ARGUMENT; + } } } @@ -253,7 +251,7 @@ machine_task_terminate(task_t task) if (task_debug != NULL) { task->task_debug = NULL; zfree(ids_zone, task_debug); - } + } } } @@ -263,8 +261,8 @@ machine_task_terminate(task_t task) */ kern_return_t machine_thread_inherit_taskwide( - thread_t thread, - task_t parent_task) + thread_t thread, + task_t parent_task) { if (parent_task->task_debug) { int flavor; @@ -286,8 +284,8 @@ machine_thread_inherit_taskwide( void machine_task_init(task_t new_task, - task_t parent_task, - boolean_t inherit_memory) + task_t parent_task, + boolean_t inherit_memory) { new_task->uexc_range_start = 0; new_task->uexc_range_size = 0; @@ -296,8 +294,9 @@ machine_task_init(task_t new_task, new_task->i386_ldt = 0; if (parent_task != TASK_NULL) { - if (inherit_memory && parent_task->i386_ldt) + if (inherit_memory && parent_task->i386_ldt) { new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt); + } new_task->xstate = parent_task->xstate; } else { assert(fpu_default != UNDEFINED); diff --git a/osfmk/i386/machlimits.h b/osfmk/i386/machlimits.h index b3e775f56..4b55675b1 100644 --- a/osfmk/i386/machlimits.h +++ b/osfmk/i386/machlimits.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -49,29 +49,28 @@ #ifndef _MACH_MACHLIMITS_H_ #define _MACH_MACHLIMITS_H_ -#define CHAR_BIT 8 /* number of bits in a char */ +#define CHAR_BIT 8 /* number of bits in a char */ -#define SCHAR_MAX 127 /* max value for a signed char */ -#define SCHAR_MIN (-128) /* min value for a signed char */ +#define SCHAR_MAX 127 /* max value for a signed char */ +#define SCHAR_MIN (-128) /* min value for a signed char */ -#define UCHAR_MAX 255U /* max value for an unsigned char */ -#define CHAR_MAX 127 /* max value for a char */ -#define CHAR_MIN (-128) /* min value for a char */ +#define UCHAR_MAX 255U /* max value for an unsigned char */ +#define CHAR_MAX 127 /* max value for a char */ +#define CHAR_MIN (-128) /* min value for a char */ -#define USHRT_MAX 65535U /* max value for an unsigned short */ -#define SHRT_MAX 32767 /* max value for a short */ -#define SHRT_MIN (-32768) /* min value for a short */ +#define USHRT_MAX 65535U /* max value for an unsigned short */ +#define SHRT_MAX 32767 /* max value for a short */ +#define SHRT_MIN (-32768) /* min value for a short */ -#define UINT_MAX 0xFFFFFFFFU /* max value for an unsigned int */ -#define INT_MAX 2147483647 /* max value for an int */ -#define INT_MIN (-2147483647-1) /* min value for an int */ +#define UINT_MAX 0xFFFFFFFFU /* max value for an unsigned int */ +#define INT_MAX 2147483647 /* max value for an int */ +#define INT_MIN (-2147483647-1) /* min value for an int */ -#define ULONG_MAX UINT_MAX /* max value for an unsigned long */ -#define LONG_MAX INT_MAX /* max value for a long */ -#define LONG_MIN INT_MIN /* min value for a long */ +#define ULONG_MAX UINT_MAX /* max value for an unsigned long */ +#define LONG_MAX INT_MAX /* max value for a long */ +#define LONG_MIN INT_MIN /* min value for a long */ /* Must be at least two, for internationalization (NLS/KJI) */ -#define MB_LEN_MAX 4 /* multibyte characters */ +#define MB_LEN_MAX 4 /* multibyte characters */ #endif /* _MACH_MACHLIMITS_H_ */ - diff --git a/osfmk/i386/machparam.h b/osfmk/i386/machparam.h index 1096d6e5a..297ae0a98 100644 --- a/osfmk/i386/machparam.h +++ b/osfmk/i386/machparam.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,4 +61,3 @@ * * SPLs are true functions on i386, defined elsewhere. */ - diff --git a/osfmk/i386/misc_protos.h b/osfmk/i386/misc_protos.h index 178af299d..8a0165905 100644 --- a/osfmk/i386/misc_protos.h +++ b/osfmk/i386/misc_protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,72 +22,72 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _I386_MISC_PROTOS_H_ -#define _I386_MISC_PROTOS_H_ +#ifndef _I386_MISC_PROTOS_H_ +#define _I386_MISC_PROTOS_H_ #include struct boot_args; struct cpu_data; -extern boolean_t virtualized; +extern boolean_t virtualized; -extern void vstart(vm_offset_t); -extern void i386_init(void); -extern void x86_init_wrapper(uintptr_t, uintptr_t) __attribute__((noreturn)); -extern void i386_vm_init( - uint64_t, - boolean_t, - struct boot_args *); +extern void vstart(vm_offset_t); +extern void i386_init(void); +extern void x86_init_wrapper(uintptr_t, uintptr_t) __attribute__((noreturn)); +extern void i386_vm_init( + uint64_t, + boolean_t, + struct boot_args *); #if NCOPY_WINDOWS > 0 extern void cpu_userwindow_init(int); extern void cpu_physwindow_init(int); #endif -extern void machine_startup(void); - -extern void get_root_device(void); -extern void picinit(void); -extern void interrupt_processor( - int cpu); -extern void mp_probe_cpus(void); -extern void panic_io_port_read(void); - -extern void remote_kdb(void); -extern void clear_kdb_intr(void); -extern void cpu_init(void); -extern void fix_desc( - void * desc, - int num_desc); -extern void fix_desc64( - void * desc, - int num_desc); -extern void cnpollc( - boolean_t on); -extern void form_pic_mask(void); -extern void intnull( - int unit); -extern char * i386_boot_info( - char *buf, - vm_size_t buf_len); - -extern void blkclr( - const char *from, - int nbytes); - -extern void memset_word( - int *dst, - int pattern, - int nwords); - +extern void machine_startup(void); + +extern void get_root_device(void); +extern void picinit(void); +extern void interrupt_processor( + int cpu); +extern void mp_probe_cpus(void); +extern void panic_io_port_read(void); + +extern void remote_kdb(void); +extern void clear_kdb_intr(void); +extern void cpu_init(void); +extern void fix_desc( + void * desc, + int num_desc); +extern void fix_desc64( + void * desc, + int num_desc); +extern void cnpollc( + boolean_t on); +extern void form_pic_mask(void); +extern void intnull( + int unit); +extern char * i386_boot_info( + char *buf, + vm_size_t buf_len); + +extern void blkclr( + const char *from, + int nbytes); + +extern void memset_word( + int *dst, + int pattern, + int nwords); + /* Move arbitrarily-aligned data from one physical address to another */ extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t nbytes); @@ -105,54 +105,54 @@ extern void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); extern void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); -extern processor_t cpu_processor_alloc(boolean_t is_boot_cpu); -extern void cpu_processor_free(processor_t proc); +extern processor_t cpu_processor_alloc(boolean_t is_boot_cpu); +extern void cpu_processor_free(processor_t proc); -extern void sysclk_gettime_interrupts_disabled( - mach_timespec_t *cur_time); +extern void sysclk_gettime_interrupts_disabled( + mach_timespec_t *cur_time); extern void rtc_nanotime_init_commpage(void); -extern void rtc_sleep_wakeup(uint64_t base); +extern void rtc_sleep_wakeup(uint64_t base); -extern void rtc_timer_start(void); +extern void rtc_timer_start(void); -extern void rtc_clock_stepping( - uint32_t new_frequency, - uint32_t old_frequency); -extern void rtc_clock_stepped( - uint32_t new_frequency, - uint32_t old_frequency); -extern void rtc_clock_napped(uint64_t, uint64_t); -extern void rtc_clock_adjust(uint64_t); +extern void rtc_clock_stepping( + uint32_t new_frequency, + uint32_t old_frequency); +extern void rtc_clock_stepped( + uint32_t new_frequency, + uint32_t old_frequency); +extern void rtc_clock_napped(uint64_t, uint64_t); +extern void rtc_clock_adjust(uint64_t); extern void pmap_lowmem_finalize(void); thread_t Switch_context(thread_t, thread_continue_t, thread_t); -thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t),processor_t processor); +thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t), processor_t processor); #ifdef __x86_64__ uint64_t x86_64_pre_sleep(void); void x86_64_post_sleep(uint64_t new_cr3); #endif -boolean_t +boolean_t debug_state_is_valid32(x86_debug_state32_t *ds); -boolean_t +boolean_t debug_state_is_valid64(x86_debug_state64_t *ds); -void +void copy_debug_state32(x86_debug_state32_t *src, x86_debug_state32_t *target, boolean_t all); -void +void copy_debug_state64(x86_debug_state64_t *src, x86_debug_state64_t *target, boolean_t all); extern void act_machine_switch_pcb(thread_t old, thread_t new); /* Fast-restart parameters */ -#define FULL_SLAVE_INIT (NULL) -#define FAST_SLAVE_INIT ((void *)(uintptr_t)1) +#define FULL_SLAVE_INIT (NULL) +#define FAST_SLAVE_INIT ((void *)(uintptr_t)1) void cpu_pmc_control(void *); diff --git a/osfmk/i386/mp.c b/osfmk/i386/mp.c index e5c83e895..428f6151c 100644 --- a/osfmk/i386/mp.c +++ b/osfmk/i386/mp.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -85,67 +86,67 @@ #include #endif /* MONOTONIC */ -#if MP_DEBUG -#define PAUSE delay(1000000) -#define DBG(x...) kprintf(x) +#if MP_DEBUG +#define PAUSE delay(1000000) +#define DBG(x...) kprintf(x) #else #define DBG(x...) #define PAUSE -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ /* Debugging/test trace events: */ -#define TRACE_MP_TLB_FLUSH MACHDBG_CODE(DBG_MACH_MP, 0) -#define TRACE_MP_CPUS_CALL MACHDBG_CODE(DBG_MACH_MP, 1) -#define TRACE_MP_CPUS_CALL_LOCAL MACHDBG_CODE(DBG_MACH_MP, 2) -#define TRACE_MP_CPUS_CALL_ACTION MACHDBG_CODE(DBG_MACH_MP, 3) -#define TRACE_MP_CPUS_CALL_NOBUF MACHDBG_CODE(DBG_MACH_MP, 4) -#define TRACE_MP_CPU_FAST_START MACHDBG_CODE(DBG_MACH_MP, 5) -#define TRACE_MP_CPU_START MACHDBG_CODE(DBG_MACH_MP, 6) -#define TRACE_MP_CPU_DEACTIVATE MACHDBG_CODE(DBG_MACH_MP, 7) +#define TRACE_MP_TLB_FLUSH MACHDBG_CODE(DBG_MACH_MP, 0) +#define TRACE_MP_CPUS_CALL MACHDBG_CODE(DBG_MACH_MP, 1) +#define TRACE_MP_CPUS_CALL_LOCAL MACHDBG_CODE(DBG_MACH_MP, 2) +#define TRACE_MP_CPUS_CALL_ACTION MACHDBG_CODE(DBG_MACH_MP, 3) +#define TRACE_MP_CPUS_CALL_NOBUF MACHDBG_CODE(DBG_MACH_MP, 4) +#define TRACE_MP_CPU_FAST_START MACHDBG_CODE(DBG_MACH_MP, 5) +#define TRACE_MP_CPU_START MACHDBG_CODE(DBG_MACH_MP, 6) +#define TRACE_MP_CPU_DEACTIVATE MACHDBG_CODE(DBG_MACH_MP, 7) -#define ABS(v) (((v) > 0)?(v):-(v)) +#define ABS(v) (((v) > 0)?(v):-(v)) -void slave_boot_init(void); -void i386_cpu_IPI(int cpu); +void slave_boot_init(void); +void i386_cpu_IPI(int cpu); #if MACH_KDP -static void mp_kdp_wait(boolean_t flush, boolean_t isNMI); +static void mp_kdp_wait(boolean_t flush, boolean_t isNMI); #endif /* MACH_KDP */ #if MACH_KDP -static boolean_t cpu_signal_pending(int cpu, mp_event_t event); +static boolean_t cpu_signal_pending(int cpu, mp_event_t event); #endif /* MACH_KDP */ -static int NMIInterruptHandler(x86_saved_state_t *regs); +static int NMIInterruptHandler(x86_saved_state_t *regs); -boolean_t smp_initialized = FALSE; -uint32_t TSC_sync_margin = 0xFFF; -volatile boolean_t force_immediate_debugger_NMI = FALSE; -volatile boolean_t pmap_tlb_flush_timeout = FALSE; +boolean_t smp_initialized = FALSE; +uint32_t TSC_sync_margin = 0xFFF; +volatile boolean_t force_immediate_debugger_NMI = FALSE; +volatile boolean_t pmap_tlb_flush_timeout = FALSE; #if DEBUG || DEVELOPMENT -boolean_t mp_interrupt_watchdog_enabled = TRUE; -uint32_t mp_interrupt_watchdog_events = 0; +boolean_t mp_interrupt_watchdog_enabled = TRUE; +uint32_t mp_interrupt_watchdog_events = 0; #endif -decl_simple_lock_data(,debugger_callback_lock); +decl_simple_lock_data(, debugger_callback_lock); struct debugger_callback *debugger_callback = NULL; decl_lck_mtx_data(static, mp_cpu_boot_lock); -lck_mtx_ext_t mp_cpu_boot_lock_ext; +lck_mtx_ext_t mp_cpu_boot_lock_ext; /* Variables needed for MP rendezvous. */ -decl_simple_lock_data(,mp_rv_lock); -static void (*mp_rv_setup_func)(void *arg); -static void (*mp_rv_action_func)(void *arg); -static void (*mp_rv_teardown_func)(void *arg); -static void *mp_rv_func_arg; -static volatile int mp_rv_ncpus; - /* Cache-aligned barriers: */ -static volatile long mp_rv_entry __attribute__((aligned(64))); -static volatile long mp_rv_exit __attribute__((aligned(64))); -static volatile long mp_rv_complete __attribute__((aligned(64))); - -volatile uint64_t debugger_entry_time; -volatile uint64_t debugger_exit_time; +decl_simple_lock_data(, mp_rv_lock); +static void (*mp_rv_setup_func)(void *arg); +static void (*mp_rv_action_func)(void *arg); +static void (*mp_rv_teardown_func)(void *arg); +static void *mp_rv_func_arg; +static volatile int mp_rv_ncpus; +/* Cache-aligned barriers: */ +static volatile long mp_rv_entry __attribute__((aligned(64))); +static volatile long mp_rv_exit __attribute__((aligned(64))); +static volatile long mp_rv_complete __attribute__((aligned(64))); + +volatile uint64_t debugger_entry_time; +volatile uint64_t debugger_exit_time; #if MACH_KDP #include extern int kdp_snapshot; @@ -163,27 +164,27 @@ static struct _kdp_xcpu_call_func { /* Variables needed for MP broadcast. */ static void (*mp_bc_action_func)(void *arg); static void *mp_bc_func_arg; -static int mp_bc_ncpus; +static int mp_bc_ncpus; static volatile long mp_bc_count; decl_lck_mtx_data(static, mp_bc_lock); -lck_mtx_ext_t mp_bc_lock_ext; -static volatile int debugger_cpu = -1; -volatile long NMIPI_acks = 0; -volatile long NMI_count = 0; -static NMI_reason_t NMI_panic_reason = NONE; -static int vector_timed_out; +lck_mtx_ext_t mp_bc_lock_ext; +static volatile int debugger_cpu = -1; +volatile long NMIPI_acks = 0; +volatile long NMI_count = 0; +static NMI_reason_t NMI_panic_reason = NONE; +static int vector_timed_out; -extern void NMI_cpus(void); +extern void NMI_cpus(void); -static void mp_cpus_call_init(void); -static void mp_cpus_call_action(void); -static void mp_call_PM(void); +static void mp_cpus_call_init(void); +static void mp_cpus_call_action(void); +static void mp_call_PM(void); -char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init +char mp_slave_stack[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE))); // Temp stack for slave init /* PAL-related routines */ -boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, - int ipi_vector, i386_intr_func_t ipi_handler); +boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, + int ipi_vector, i386_intr_func_t ipi_handler); void i386_start_cpu(int lapic_id, int cpu_num); void i386_send_NMI(int cpu); void NMIPI_enable(boolean_t); @@ -194,37 +195,37 @@ void NMIPI_enable(boolean_t); */ struct profile_vars _profile_vars; struct profile_vars *_profile_vars_cpus[MAX_CPUS] = { &_profile_vars }; -#define GPROF_INIT() \ -{ \ - int i; \ - \ - /* Hack to initialize pointers to unused profiling structs */ \ - for (i = 1; i < MAX_CPUS; i++) \ - _profile_vars_cpus[i] = &_profile_vars; \ +#define GPROF_INIT() \ +{ \ + int i; \ + \ + /* Hack to initialize pointers to unused profiling structs */ \ + for (i = 1; i < MAX_CPUS; i++) \ + _profile_vars_cpus[i] = &_profile_vars; \ } #else #define GPROF_INIT() #endif /* GPROF */ -static lck_grp_t smp_lck_grp; -static lck_grp_attr_t smp_lck_grp_attr; +static lck_grp_t smp_lck_grp; +static lck_grp_attr_t smp_lck_grp_attr; -#define NUM_CPU_WARM_CALLS 20 -struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS]; -queue_head_t cpu_warm_call_list; +#define NUM_CPU_WARM_CALLS 20 +struct timer_call cpu_warm_call_arr[NUM_CPU_WARM_CALLS]; +queue_head_t cpu_warm_call_list; decl_simple_lock_data(static, cpu_warm_lock); typedef struct cpu_warm_data { - timer_call_t cwd_call; - uint64_t cwd_deadline; - int cwd_result; + timer_call_t cwd_call; + uint64_t cwd_deadline; + int cwd_result; } *cpu_warm_data_t; -static void cpu_prewarm_init(void); -static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1); -static void _cpu_warm_setup(void *arg); -static timer_call_t grab_warm_timer_call(void); -static void free_warm_timer_call(timer_call_t call); +static void cpu_prewarm_init(void); +static void cpu_warm_timer_call_func(call_entry_param_t p0, call_entry_param_t p1); +static void _cpu_warm_setup(void *arg); +static timer_call_t grab_warm_timer_call(void); +static void free_warm_timer_call(timer_call_t call); void smp_init(void) @@ -237,9 +238,10 @@ smp_init(void) lck_mtx_init_ext(&mp_bc_lock, &mp_bc_lock_ext, &smp_lck_grp, LCK_ATTR_NULL); console_init(); - if(!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler, - LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler)) + if (!i386_smp_init(LAPIC_NMI_INTERRUPT, NMIInterruptHandler, + LAPIC_VECTOR(INTERPROCESSOR), cpu_signal_handler)) { return; + } cpu_thread_init(); @@ -251,15 +253,15 @@ smp_init(void) #if DEBUG || DEVELOPMENT if (PE_parse_boot_argn("interrupt_watchdog", - &mp_interrupt_watchdog_enabled, - sizeof(mp_interrupt_watchdog_enabled))) { + &mp_interrupt_watchdog_enabled, + sizeof(mp_interrupt_watchdog_enabled))) { kprintf("Interrupt watchdog %sabled\n", - mp_interrupt_watchdog_enabled ? "en" : "dis"); + mp_interrupt_watchdog_enabled ? "en" : "dis"); } #endif if (PE_parse_boot_argn("TSC_sync_margin", - &TSC_sync_margin, sizeof(TSC_sync_margin))) { + &TSC_sync_margin, sizeof(TSC_sync_margin))) { kprintf("TSC sync Margin 0x%x\n", TSC_sync_margin); } else if (cpuid_vmm_present()) { kprintf("TSC sync margin disabled\n"); @@ -273,18 +275,18 @@ smp_init(void) } typedef struct { - int target_cpu; - int target_lapic; - int starter_cpu; + int target_cpu; + int target_lapic; + int starter_cpu; } processor_start_info_t; -static processor_start_info_t start_info __attribute__((aligned(64))); +static processor_start_info_t start_info __attribute__((aligned(64))); -/* +/* * Cache-alignment is to avoid cross-cpu false-sharing interference. */ -static volatile long tsc_entry_barrier __attribute__((aligned(64))); -static volatile long tsc_exit_barrier __attribute__((aligned(64))); -static volatile uint64_t tsc_target __attribute__((aligned(64))); +static volatile long tsc_entry_barrier __attribute__((aligned(64))); +static volatile long tsc_exit_barrier __attribute__((aligned(64))); +static volatile uint64_t tsc_target __attribute__((aligned(64))); /* * Poll a CPU to see when it has marked itself as running. @@ -293,8 +295,9 @@ static void mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay) { while (iters-- > 0) { - if (cpu_datap(slot_num)->cpu_running) + if (cpu_datap(slot_num)->cpu_running) { break; + } delay(usecdelay); } } @@ -305,17 +308,18 @@ mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay) kern_return_t intel_startCPU_fast(int slot_num) { - kern_return_t rc; + kern_return_t rc; /* * Try to perform a fast restart */ rc = pmCPUExitHalt(slot_num); - if (rc != KERN_SUCCESS) + if (rc != KERN_SUCCESS) { /* * The CPU was not eligible for a fast restart. */ - return(rc); + return rc; + } KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_FAST_START | DBG_FUNC_START, @@ -325,7 +329,7 @@ intel_startCPU_fast(int slot_num) * Wait until the CPU is back online. */ mp_disable_preemption(); - + /* * We use short pauses (1us) for low latency. 30,000 iterations is * longer than a full restart would require so it should be more @@ -343,10 +347,11 @@ intel_startCPU_fast(int slot_num) * Check to make sure that the CPU is really running. If not, * go through the slow path. */ - if (cpu_datap(slot_num)->cpu_running) - return(KERN_SUCCESS); - else - return(KERN_FAILURE); + if (cpu_datap(slot_num)->cpu_running) { + return KERN_SUCCESS; + } else { + return KERN_FAILURE; + } } static void @@ -362,8 +367,9 @@ started_cpu(void) */ tsc_target = 0; atomic_decl(&tsc_entry_barrier, 1); - while (tsc_entry_barrier != 0) - ; /* spin for starter and target at barrier */ + while (tsc_entry_barrier != 0) { + ; /* spin for starter and target at barrier */ + } tsc_target = rdtsc64(); atomic_decl(&tsc_exit_barrier, 1); } @@ -372,15 +378,16 @@ started_cpu(void) static void start_cpu(void *arg) { - int i = 1000; - processor_start_info_t *psip = (processor_start_info_t *) arg; + int i = 1000; + processor_start_info_t *psip = (processor_start_info_t *) arg; /* Ignore this if the current processor is not the starter */ - if (cpu_number() != psip->starter_cpu) + if (cpu_number() != psip->starter_cpu) { return; + } DBG("start_cpu(%p) about to start cpu %d, lapic %d\n", - arg, psip->target_cpu, psip->target_lapic); + arg, psip->target_cpu, psip->target_lapic); KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_START | DBG_FUNC_START, @@ -389,14 +396,14 @@ start_cpu(void *arg) i386_start_cpu(psip->target_lapic, psip->target_cpu); -#ifdef POSTCODE_DELAY +#ifdef POSTCODE_DELAY /* Wait much longer if postcodes are displayed for a delay period. */ i *= 10000; #endif DBG("start_cpu(%p) about to wait for cpu %d\n", - arg, psip->target_cpu); + arg, psip->target_cpu); - mp_wait_for_cpu_up(psip->target_cpu, i*100, 100); + mp_wait_for_cpu_up(psip->target_cpu, i * 100, 100); KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_START | DBG_FUNC_END, @@ -411,26 +418,28 @@ start_cpu(void *arg) * TSC_sync_margin (TSC_SYNC_MARGIN) ticks. This margin * can be overriden by boot-arg (with 0 meaning no checking). */ - uint64_t tsc_starter; - int64_t tsc_delta; + uint64_t tsc_starter; + int64_t tsc_delta; atomic_decl(&tsc_entry_barrier, 1); - while (tsc_entry_barrier != 0) - ; /* spin for both processors at barrier */ + while (tsc_entry_barrier != 0) { + ; /* spin for both processors at barrier */ + } tsc_starter = rdtsc64(); atomic_decl(&tsc_exit_barrier, 1); - while (tsc_exit_barrier != 0) - ; /* spin for target to store its TSC */ + while (tsc_exit_barrier != 0) { + ; /* spin for target to store its TSC */ + } tsc_delta = tsc_target - tsc_starter; kprintf("TSC sync for cpu %d: 0x%016llx delta 0x%llx (%lld)\n", - psip->target_cpu, tsc_target, tsc_delta, tsc_delta); - if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) { + psip->target_cpu, tsc_target, tsc_delta, tsc_delta); + if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) { #if DEBUG panic( #else printf( #endif "Unsynchronized TSC for cpu %d: " - "0x%016llx, delta 0x%llx\n", + "0x%016llx, delta 0x%llx\n", psip->target_cpu, tsc_target, tsc_delta); } } @@ -438,10 +447,10 @@ start_cpu(void *arg) kern_return_t intel_startCPU( - int slot_num) + int slot_num) { - int lapic = cpu_to_lapic[slot_num]; - boolean_t istate; + int lapic = cpu_to_lapic[slot_num]; + boolean_t istate; assert(lapic != -1); @@ -496,13 +505,13 @@ intel_startCPU( } } -#if MP_DEBUG -cpu_signal_event_log_t *cpu_signal[MAX_CPUS]; -cpu_signal_event_log_t *cpu_handle[MAX_CPUS]; +#if MP_DEBUG +cpu_signal_event_log_t *cpu_signal[MAX_CPUS]; +cpu_signal_event_log_t *cpu_handle[MAX_CPUS]; MP_EVENT_NAME_DECL(); -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ /* * Note: called with NULL state when polling for TLB flush and cross-calls. @@ -510,11 +519,11 @@ MP_EVENT_NAME_DECL(); int cpu_signal_handler(x86_saved_state_t *regs) { -#if !MACH_KDP +#if !MACH_KDP #pragma unused (regs) #endif /* !MACH_KDP */ - int my_cpu; - volatile int *my_word; + int my_cpu; + volatile int *my_word; SCHED_STATS_IPI(current_processor()); @@ -528,9 +537,9 @@ cpu_signal_handler(x86_saved_state_t *regs) cpu_data_ptr[my_cpu]->cpu_prior_signals = *my_word; do { -#if MACH_KDP +#if MACH_KDP if (i_bit(MP_KDP, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_KDP); + DBGLOG(cpu_handle, my_cpu, MP_KDP); i_bit_clear(MP_KDP, my_word); /* Ensure that the i386_kernel_state at the base of the * current thread's stack (if any) is synchronized with the @@ -538,23 +547,25 @@ cpu_signal_handler(x86_saved_state_t *regs) * access through the debugger. */ sync_iss_to_iks(regs); - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); + } mp_kdp_wait(TRUE, FALSE); - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + } } else -#endif /* MACH_KDP */ +#endif /* MACH_KDP */ if (i_bit(MP_TLB_FLUSH, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_TLB_FLUSH); + DBGLOG(cpu_handle, my_cpu, MP_TLB_FLUSH); i_bit_clear(MP_TLB_FLUSH, my_word); pmap_update_interrupt(); } else if (i_bit(MP_CALL, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_CALL); + DBGLOG(cpu_handle, my_cpu, MP_CALL); i_bit_clear(MP_CALL, my_word); mp_cpus_call_action(); } else if (i_bit(MP_CALL_PM, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_CALL_PM); + DBGLOG(cpu_handle, my_cpu, MP_CALL_PM); i_bit_clear(MP_CALL_PM, my_word); mp_call_PM(); } @@ -562,7 +573,7 @@ cpu_signal_handler(x86_saved_state_t *regs) /* Called to poll only for cross-calls and TLB flush */ break; } else if (i_bit(MP_AST, my_word)) { - DBGLOG(cpu_handle,my_cpu,MP_AST); + DBGLOG(cpu_handle, my_cpu, MP_AST); i_bit_clear(MP_AST, my_word); ast_check(cpu_to_processor(my_cpu)); } @@ -575,61 +586,65 @@ extern void kprintf_break_lock(void); int NMIInterruptHandler(x86_saved_state_t *regs) { - void *stackptr; - char pstr[192]; - uint64_t now = mach_absolute_time(); + void *stackptr; + char pstr[256]; + uint64_t now = mach_absolute_time(); if (panic_active() && !panicDebugging) { - if (pmsafe_debug) + if (pmsafe_debug) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); - for(;;) + } + for (;;) { cpu_pause(); + } } atomic_incl(&NMIPI_acks, 1); atomic_incl(&NMI_count, 1); sync_iss_to_iks_unconditionally(regs); - __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (stackptr)); - if (cpu_number() == debugger_cpu) + if (cpu_number() == debugger_cpu) { goto NMExit; + } if (NMI_panic_reason == SPINLOCK_TIMEOUT) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for spinlock acquisition timeout, spinlock: %p, spinlock owner: %p, current_thread: %p, spinlock_owner_cpu: 0x%x\n", - cpu_number(), now, spinlock_timed_out, (void *) spinlock_timed_out->interlock.lock_data, current_thread(), spinlock_owner_cpu); + "Panic(CPU %d, time %llu): NMIPI for spinlock acquisition timeout, spinlock: %p, spinlock owner: %p, current_thread: %p, spinlock_owner_cpu: 0x%x\n", + cpu_number(), now, spinlock_timed_out, (void *) spinlock_timed_out->interlock.lock_data, current_thread(), spinlock_owner_cpu); panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs); } else if (NMI_panic_reason == TLB_FLUSH_TIMEOUT) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: TLB flush timeout, TLB state:0x%x\n", - cpu_number(), now, current_cpu_datap()->cpu_tlb_invalid); + "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: TLB flush timeout, TLB state:0x%x\n", + cpu_number(), now, current_cpu_datap()->cpu_tlb_invalid); panic_i386_backtrace(stackptr, 48, &pstr[0], TRUE, regs); } else if (NMI_panic_reason == CROSSCALL_TIMEOUT) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: cross-call timeout\n", - cpu_number(), now); + "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: cross-call timeout\n", + cpu_number(), now); panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs); } else if (NMI_panic_reason == INTERRUPT_WATCHDOG) { snprintf(&pstr[0], sizeof(pstr), - "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: interrupt watchdog for vector 0x%x\n", - cpu_number(), now, vector_timed_out); + "Panic(CPU %d, time %llu): NMIPI for unresponsive processor: interrupt watchdog for vector 0x%x\n", + cpu_number(), now, vector_timed_out); panic_i386_backtrace(stackptr, 64, &pstr[0], TRUE, regs); } - + #if MACH_KDP - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); + } current_cpu_datap()->cpu_NMI_acknowledged = TRUE; i_bit_clear(MP_KDP, ¤t_cpu_datap()->cpu_signals); if (panic_active() || NMI_panic_reason != NONE) { mp_kdp_wait(FALSE, TRUE); } else if (!mp_kdp_trap && - !mp_kdp_is_NMI && - virtualized && (debug_boot_arg & DB_NMI)) { + !mp_kdp_is_NMI && + virtualized && (debug_boot_arg & DB_NMI)) { /* * Under a VMM with the debug boot-arg set, drop into kdp. * Since an NMI is involved, there's a risk of contending with - * a panic. And side-effects of NMIs may result in entry into, + * a panic. And side-effects of NMIs may result in entry into, * and continuing from, the debugger being unreliable. */ if (__sync_bool_compare_and_swap(&mp_kdp_is_NMI, FALSE, TRUE)) { @@ -644,10 +659,11 @@ NMIInterruptHandler(x86_saved_state_t *regs) } else { mp_kdp_wait(FALSE, FALSE); } - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + } #endif -NMExit: +NMExit: return 1; } @@ -685,25 +701,27 @@ cpu_NMI_interrupt(int cpu) void NMI_cpus(void) { - unsigned int cpu; - boolean_t intrs_enabled; - uint64_t tsc_timeout; + unsigned int cpu; + boolean_t intrs_enabled; + uint64_t tsc_timeout; intrs_enabled = ml_set_interrupts_enabled(FALSE); NMIPI_enable(TRUE); for (cpu = 0; cpu < real_ncpus; cpu++) { - if (!cpu_is_running(cpu)) + if (!cpu_is_running(cpu)) { continue; + } cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; cpu_NMI_interrupt(cpu); tsc_timeout = !machine_timeout_suspended() ? - rdtsc64() + (1000 * 1000 * 1000 * 10ULL) : - ~0ULL; + rdtsc64() + (1000 * 1000 * 1000 * 10ULL) : + ~0ULL; while (!cpu_datap(cpu)->cpu_NMI_acknowledged) { handle_pending_TLB_flushes(); cpu_pause(); - if (rdtsc64() > tsc_timeout) + if (rdtsc64() > tsc_timeout) { panic("NMI_cpus() timeout cpu %d", cpu); + } } cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; } @@ -712,15 +730,16 @@ NMI_cpus(void) ml_set_interrupts_enabled(intrs_enabled); } -static void (* volatile mp_PM_func)(void) = NULL; +static void(*volatile mp_PM_func)(void) = NULL; static void mp_call_PM(void) { assert(!ml_get_interrupts_enabled()); - if (mp_PM_func != NULL) + if (mp_PM_func != NULL) { mp_PM_func(); + } } void @@ -729,10 +748,11 @@ cpu_PM_interrupt(int cpu) assert(!ml_get_interrupts_enabled()); if (mp_PM_func != NULL) { - if (cpu == cpu_number()) + if (cpu == cpu_number()) { mp_PM_func(); - else + } else { i386_signal_cpu(cpu, MP_CALL_PM, ASYNC); + } } } @@ -745,36 +765,39 @@ PM_interrupt_register(void (*fn)(void)) void i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode) { - volatile int *signals = &cpu_datap(cpu)->cpu_signals; - uint64_t tsc_timeout; + volatile int *signals = &cpu_datap(cpu)->cpu_signals; + uint64_t tsc_timeout; + - - if (!cpu_datap(cpu)->cpu_running) + if (!cpu_datap(cpu)->cpu_running) { return; + } - if (event == MP_TLB_FLUSH) - KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_START, cpu, 0, 0, 0, 0); + if (event == MP_TLB_FLUSH) { + KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_START, cpu, 0, 0, 0, 0); + } DBGLOG(cpu_signal, cpu, event); - + i_bit_set(event, signals); i386_cpu_IPI(cpu); if (mode == SYNC) { - again: +again: tsc_timeout = !machine_timeout_suspended() ? - rdtsc64() + (1000*1000*1000) : - ~0ULL; + rdtsc64() + (1000 * 1000 * 1000) : + ~0ULL; while (i_bit(event, signals) && rdtsc64() < tsc_timeout) { cpu_pause(); } if (i_bit(event, signals)) { DBG("i386_signal_cpu(%d, 0x%x, SYNC) timed out\n", - cpu, event); + cpu, event); goto again; } } - if (event == MP_TLB_FLUSH) - KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_END, cpu, 0, 0, 0, 0); + if (event == MP_TLB_FLUSH) { + KERNEL_DEBUG(TRACE_MP_TLB_FLUSH | DBG_FUNC_END, cpu, 0, 0, 0, 0); + } } /* @@ -784,11 +807,12 @@ i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode) static boolean_t mp_spin_timeout(uint64_t tsc_start) { - uint64_t tsc_timeout; + uint64_t tsc_timeout; cpu_pause(); - if (machine_timeout_suspended()) + if (machine_timeout_suspended()) { return FALSE; + } /* * The timeout is 4 * the spinlock timeout period @@ -796,8 +820,8 @@ mp_spin_timeout(uint64_t tsc_start) * in which case we allow an even greater margin. */ tsc_timeout = disable_serial_output ? LockTimeOutTSC << 2 - : LockTimeOutTSC << 4; - return (rdtsc64() > tsc_start + tsc_timeout); + : LockTimeOutTSC << 4; + return rdtsc64() > tsc_start + tsc_timeout; } /* @@ -809,30 +833,30 @@ boolean_t mp_safe_spin_lock(usimple_lock_t lock) { if (ml_get_interrupts_enabled()) { - simple_lock(lock); + simple_lock(lock, LCK_GRP_NULL); return TRUE; } else { uint64_t tsc_spin_start = rdtsc64(); - while (!simple_lock_try(lock)) { + while (!simple_lock_try(lock, LCK_GRP_NULL)) { cpu_signal_handler(NULL); if (mp_spin_timeout(tsc_spin_start)) { uint32_t lock_cpu; uintptr_t lowner = (uintptr_t) - lock->interlock.lock_data; + lock->interlock.lock_data; spinlock_timed_out = lock; lock_cpu = spinlock_timeout_NMI(lowner); NMIPI_panic(cpu_to_cpumask(lock_cpu), SPINLOCK_TIMEOUT); panic("mp_safe_spin_lock() timed out, lock: %p, owner thread: 0x%lx, current_thread: %p, owner on CPU 0x%x, time: %llu", - lock, lowner, current_thread(), lock_cpu, mach_absolute_time()); + lock, lowner, current_thread(), lock_cpu, mach_absolute_time()); } } return FALSE; - } + } } /* * All-CPU rendezvous: - * - CPUs are signalled, + * - CPUs are signalled, * - all execute the setup function (if specified), * - rendezvous (i.e. all cpus reach a barrier), * - all execute the action function (if specified), @@ -847,8 +871,8 @@ mp_safe_spin_lock(usimple_lock_t lock) static void mp_rendezvous_action(__unused void *null) { - boolean_t intrs_enabled; - uint64_t tsc_spin_start; + boolean_t intrs_enabled; + uint64_t tsc_spin_start; /* * Note that mp_rv_lock was acquired by the thread that initiated the @@ -858,8 +882,9 @@ mp_rendezvous_action(__unused void *null) current_cpu_datap()->cpu_rendezvous_in_progress = TRUE; /* setup function */ - if (mp_rv_setup_func != NULL) + if (mp_rv_setup_func != NULL) { mp_rv_setup_func(mp_rv_func_arg); + } intrs_enabled = ml_get_interrupts_enabled(); @@ -869,30 +894,35 @@ mp_rendezvous_action(__unused void *null) while (mp_rv_entry < mp_rv_ncpus) { /* poll for pesky tlb flushes if interrupts disabled */ - if (!intrs_enabled) + if (!intrs_enabled) { handle_pending_TLB_flushes(); + } if (mp_spin_timeout(tsc_spin_start)) { panic("mp_rv_action() entry: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_entry, mp_rv_ncpus, tsc_spin_start, rdtsc64()); } } /* action function */ - if (mp_rv_action_func != NULL) + if (mp_rv_action_func != NULL) { mp_rv_action_func(mp_rv_func_arg); + } /* spin on exit rendezvous */ atomic_incl(&mp_rv_exit, 1); tsc_spin_start = rdtsc64(); while (mp_rv_exit < mp_rv_ncpus) { - if (!intrs_enabled) + if (!intrs_enabled) { handle_pending_TLB_flushes(); - if (mp_spin_timeout(tsc_spin_start)) + } + if (mp_spin_timeout(tsc_spin_start)) { panic("mp_rv_action() exit: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_exit, mp_rv_ncpus, tsc_spin_start, rdtsc64()); + } } /* teardown function */ - if (mp_rv_teardown_func != NULL) + if (mp_rv_teardown_func != NULL) { mp_rv_teardown_func(mp_rv_func_arg); + } current_cpu_datap()->cpu_rendezvous_in_progress = FALSE; @@ -901,23 +931,26 @@ mp_rendezvous_action(__unused void *null) } void -mp_rendezvous(void (*setup_func)(void *), - void (*action_func)(void *), - void (*teardown_func)(void *), - void *arg) +mp_rendezvous(void (*setup_func)(void *), + void (*action_func)(void *), + void (*teardown_func)(void *), + void *arg) { - uint64_t tsc_spin_start; + uint64_t tsc_spin_start; if (!smp_initialized) { - if (setup_func != NULL) + if (setup_func != NULL) { setup_func(arg); - if (action_func != NULL) + } + if (action_func != NULL) { action_func(arg); - if (teardown_func != NULL) + } + if (teardown_func != NULL) { teardown_func(arg); + } return; } - + /* obtain rendezvous lock */ mp_rendezvous_lock(); @@ -947,10 +980,11 @@ mp_rendezvous(void (*setup_func)(void *), */ tsc_spin_start = rdtsc64(); while (mp_rv_complete < mp_rv_ncpus) { - if (mp_spin_timeout(tsc_spin_start)) + if (mp_spin_timeout(tsc_spin_start)) { panic("mp_rendezvous() timeout: %ld of %d responses, start: 0x%llx, cur: 0x%llx", mp_rv_complete, mp_rv_ncpus, tsc_spin_start, rdtsc64()); + } } - + /* Tidy up */ mp_rv_setup_func = NULL; mp_rv_action_func = NULL; @@ -1003,40 +1037,40 @@ teardown_restore_intrs(__unused void * param_not_used) */ void mp_rendezvous_no_intrs( - void (*action_func)(void *), - void *arg) + void (*action_func)(void *), + void *arg) { mp_rendezvous(setup_disable_intrs, - action_func, - teardown_restore_intrs, - arg); + action_func, + teardown_restore_intrs, + arg); } typedef struct { - queue_chain_t link; /* queue linkage */ - void (*func)(void *,void *); /* routine to call */ - void *arg0; /* routine's 1st arg */ - void *arg1; /* routine's 2nd arg */ - cpumask_t *maskp; /* completion response mask */ + queue_chain_t link; /* queue linkage */ + void (*func)(void *, void *); /* routine to call */ + void *arg0; /* routine's 1st arg */ + void *arg1; /* routine's 2nd arg */ + cpumask_t *maskp; /* completion response mask */ } mp_call_t; typedef struct { - queue_head_t queue; - decl_simple_lock_data(, lock); + queue_head_t queue; + decl_simple_lock_data(, lock); } mp_call_queue_t; -#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS -static mp_call_queue_t mp_cpus_call_freelist; -static mp_call_queue_t mp_cpus_call_head[MAX_CPUS]; +#define MP_CPUS_CALL_BUFS_PER_CPU MAX_CPUS +static mp_call_queue_t mp_cpus_call_freelist; +static mp_call_queue_t mp_cpus_call_head[MAX_CPUS]; static inline boolean_t mp_call_head_lock(mp_call_queue_t *cqp) { - boolean_t intrs_enabled; + boolean_t intrs_enabled; intrs_enabled = ml_set_interrupts_enabled(FALSE); - simple_lock(&cqp->lock); + simple_lock(&cqp->lock, LCK_GRP_NULL); return intrs_enabled; } @@ -1045,7 +1079,8 @@ mp_call_head_lock(mp_call_queue_t *cqp) * Deliver an NMIPI to a set of processors to cause them to panic . */ void -NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) { +NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) +{ unsigned int cpu; cpumask_t cpu_bit; uint64_t deadline; @@ -1054,8 +1089,9 @@ NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) { NMI_panic_reason = why; for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - if ((cpu_mask & cpu_bit) == 0) + if ((cpu_mask & cpu_bit) == 0) { continue; + } cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; cpu_NMI_interrupt(cpu); } @@ -1063,10 +1099,11 @@ NMIPI_panic(cpumask_t cpu_mask, NMI_reason_t why) { /* Wait (only so long) for NMi'ed cpus to respond */ deadline = mach_absolute_time() + LockTimeOut; for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - if ((cpu_mask & cpu_bit) == 0) + if ((cpu_mask & cpu_bit) == 0) { continue; + } while (!cpu_datap(cpu)->cpu_NMI_acknowledged && - mach_absolute_time() < deadline) { + mach_absolute_time() < deadline) { cpu_pause(); } } @@ -1077,7 +1114,7 @@ static inline boolean_t mp_call_head_is_locked(mp_call_queue_t *cqp) { return !ml_get_interrupts_enabled() && - hw_lock_held((hw_lock_t)&cqp->lock); + hw_lock_held((hw_lock_t)&cqp->lock); } #endif @@ -1091,13 +1128,14 @@ mp_call_head_unlock(mp_call_queue_t *cqp, boolean_t intrs_enabled) static inline mp_call_t * mp_call_alloc(void) { - mp_call_t *callp = NULL; - boolean_t intrs_enabled; - mp_call_queue_t *cqp = &mp_cpus_call_freelist; + mp_call_t *callp = NULL; + boolean_t intrs_enabled; + mp_call_queue_t *cqp = &mp_cpus_call_freelist; intrs_enabled = mp_call_head_lock(cqp); - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { queue_remove_first(&cqp->queue, callp, typeof(callp), link); + } mp_call_head_unlock(cqp, intrs_enabled); return callp; @@ -1106,8 +1144,8 @@ mp_call_alloc(void) static inline void mp_call_free(mp_call_t *callp) { - boolean_t intrs_enabled; - mp_call_queue_t *cqp = &mp_cpus_call_freelist; + boolean_t intrs_enabled; + mp_call_queue_t *cqp = &mp_cpus_call_freelist; intrs_enabled = mp_call_head_lock(cqp); queue_enter_first(&cqp->queue, callp, typeof(callp), link); @@ -1117,18 +1155,19 @@ mp_call_free(mp_call_t *callp) static inline mp_call_t * mp_call_dequeue_locked(mp_call_queue_t *cqp) { - mp_call_t *callp = NULL; + mp_call_t *callp = NULL; assert(mp_call_head_is_locked(cqp)); - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { queue_remove_first(&cqp->queue, callp, typeof(callp), link); + } return callp; } static inline void mp_call_enqueue_locked( - mp_call_queue_t *cqp, - mp_call_t *callp) + mp_call_queue_t *cqp, + mp_call_t *callp) { queue_enter(&cqp->queue, callp, typeof(callp), link); } @@ -1137,7 +1176,7 @@ mp_call_enqueue_locked( static void mp_cpus_call_init(void) { - mp_call_queue_t *cqp = &mp_cpus_call_freelist; + mp_call_queue_t *cqp = &mp_cpus_call_freelist; DBG("mp_cpus_call_init()\n"); simple_lock_init(&cqp->lock, 0); @@ -1151,9 +1190,9 @@ mp_cpus_call_init(void) void mp_cpus_call_cpu_init(int cpu) { - int i; - mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; - mp_call_t *callp; + int i; + mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; + mp_call_t *callp; simple_lock_init(&cqp->lock, 0); queue_init(&cqp->queue); @@ -1172,10 +1211,10 @@ mp_cpus_call_cpu_init(int cpu) static void mp_cpus_call_action(void) { - mp_call_queue_t *cqp; - boolean_t intrs_enabled; - mp_call_t *callp; - mp_call_t call; + mp_call_queue_t *cqp; + boolean_t intrs_enabled; + mp_call_t *callp; + mp_call_t call; assert(!ml_get_interrupts_enabled()); cqp = &mp_cpus_call_head[cpu_number()]; @@ -1193,8 +1232,9 @@ mp_cpus_call_action(void) call.func(call.arg0, call.arg1); (void) mp_call_head_lock(cqp); } - if (call.maskp != NULL) + if (call.maskp != NULL) { i_bit_set(cpu_number(), call.maskp); + } } mp_call_head_unlock(cqp, intrs_enabled); } @@ -1207,34 +1247,34 @@ mp_cpus_call_action(void) * ASYNC: function call is queued to the specified cpus * waiting for all calls to complete in parallel before returning * NOSYNC: function calls are queued - * but we return before confirmation of calls completing. + * but we return before confirmation of calls completing. * The action function may be NULL. * The cpu mask may include the local cpu. Offline cpus are ignored. * The return value is the number of cpus on which the call was made or queued. */ cpu_t mp_cpus_call( - cpumask_t cpus, - mp_sync_t mode, - void (*action_func)(void *), - void *arg) + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *), + void *arg) { return mp_cpus_call1( - cpus, - mode, - (void (*)(void *,void *))action_func, - arg, - NULL, - NULL); + cpus, + mode, + (void (*)(void *, void *))action_func, + arg, + NULL, + NULL); } static void -mp_cpus_call_wait(boolean_t intrs_enabled, - cpumask_t cpus_called, - cpumask_t *cpus_responded) +mp_cpus_call_wait(boolean_t intrs_enabled, + cpumask_t cpus_called, + cpumask_t *cpus_responded) { - mp_call_queue_t *cqp; - uint64_t tsc_spin_start; + mp_call_queue_t *cqp; + uint64_t tsc_spin_start; assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); cqp = &mp_cpus_call_head[cpu_number()]; @@ -1243,46 +1283,48 @@ mp_cpus_call_wait(boolean_t intrs_enabled, while (*cpus_responded != cpus_called) { if (!intrs_enabled) { /* Sniffing w/o locking */ - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { mp_cpus_call_action(); + } cpu_signal_handler(NULL); } if (mp_spin_timeout(tsc_spin_start)) { - cpumask_t cpus_unresponsive; + cpumask_t cpus_unresponsive; cpus_unresponsive = cpus_called & ~(*cpus_responded); NMIPI_panic(cpus_unresponsive, CROSSCALL_TIMEOUT); panic("mp_cpus_call_wait() timeout, cpus: 0x%llx", - cpus_unresponsive); + cpus_unresponsive); } } } cpu_t mp_cpus_call1( - cpumask_t cpus, - mp_sync_t mode, - void (*action_func)(void *, void *), - void *arg0, - void *arg1, - cpumask_t *cpus_calledp) -{ - cpu_t cpu = 0; - boolean_t intrs_enabled = FALSE; - boolean_t call_self = FALSE; - cpumask_t cpus_called = 0; - cpumask_t cpus_responded = 0; - long cpus_call_count = 0; - uint64_t tsc_spin_start; - boolean_t topo_lock; + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *, void *), + void *arg0, + void *arg1, + cpumask_t *cpus_calledp) +{ + cpu_t cpu = 0; + boolean_t intrs_enabled = FALSE; + boolean_t call_self = FALSE; + cpumask_t cpus_called = 0; + cpumask_t cpus_responded = 0; + long cpus_call_count = 0; + uint64_t tsc_spin_start; + boolean_t topo_lock; KERNEL_DEBUG_CONSTANT( TRACE_MP_CPUS_CALL | DBG_FUNC_START, cpus, mode, VM_KERNEL_UNSLIDE(action_func), VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1)); if (!smp_initialized) { - if ((cpus & CPUMASK_SELF) == 0) + if ((cpus & CPUMASK_SELF) == 0) { goto out; + } if (action_func != NULL) { intrs_enabled = ml_set_interrupts_enabled(FALSE); action_func(arg0, arg1); @@ -1299,7 +1341,7 @@ mp_cpus_call1( * although an exception is made if we're calling only the master * processor since that always remains active. Note: this exception * is expected for longterm timer nosync cross-calls to the master cpu. - */ + */ mp_disable_preemption(); intrs_enabled = ml_get_interrupts_enabled(); topo_lock = (cpus != cpu_to_cpumask(master_cpu)); @@ -1309,8 +1351,9 @@ mp_cpus_call1( } for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) { if (((cpu_to_cpumask(cpu) & cpus) == 0) || - !cpu_is_running(cpu)) + !cpu_is_running(cpu)) { continue; + } tsc_spin_start = rdtsc64(); if (cpu == (cpu_t) cpu_number()) { /* @@ -1329,13 +1372,14 @@ mp_cpus_call1( /* * Here to queue a call to cpu and IPI. */ - mp_call_t *callp = NULL; - mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; - boolean_t intrs_inner; + mp_call_t *callp = NULL; + mp_call_queue_t *cqp = &mp_cpus_call_head[cpu]; + boolean_t intrs_inner; - queue_call: - if (callp == NULL) +queue_call: + if (callp == NULL) { callp = mp_call_alloc(); + } intrs_inner = mp_call_head_lock(cqp); if (callp == NULL) { mp_call_head_unlock(cqp, intrs_inner); @@ -1344,13 +1388,15 @@ mp_cpus_call1( cpu, 0, 0, 0, 0); if (!intrs_inner) { /* Sniffing w/o locking */ - if (!queue_empty(&cqp->queue)) + if (!queue_empty(&cqp->queue)) { mp_cpus_call_action(); + } handle_pending_TLB_flushes(); } - if (mp_spin_timeout(tsc_spin_start)) + if (mp_spin_timeout(tsc_spin_start)) { panic("mp_cpus_call1() timeout start: 0x%llx, cur: 0x%llx", - tsc_spin_start, rdtsc64()); + tsc_spin_start, rdtsc64()); + } goto queue_call; } callp->maskp = (mode == NOSYNC) ? NULL : &cpus_responded; @@ -1373,7 +1419,7 @@ mp_cpus_call1( } /* Call locally if mode not SYNC */ - if (mode != SYNC && call_self ) { + if (mode != SYNC && call_self) { KERNEL_DEBUG_CONSTANT( TRACE_MP_CPUS_CALL_LOCAL, VM_KERNEL_UNSLIDE(action_func), VM_KERNEL_UNSLIDE_OR_PERM(arg0), VM_KERNEL_UNSLIDE_OR_PERM(arg1), 0, 0); @@ -1385,20 +1431,22 @@ mp_cpus_call1( } /* For ASYNC, now wait for all signaled cpus to complete their calls */ - if (mode == ASYNC) + if (mode == ASYNC) { mp_cpus_call_wait(intrs_enabled, cpus_called, &cpus_responded); + } /* Safe to allow pre-emption now */ mp_enable_preemption(); out: - if (call_self){ + if (call_self) { cpus_called |= cpu_to_cpumask(cpu); cpus_call_count++; } - if (cpus_calledp) + if (cpus_calledp) { *cpus_calledp = cpus_called; + } KERNEL_DEBUG_CONSTANT( TRACE_MP_CPUS_CALL | DBG_FUNC_END, @@ -1411,75 +1459,78 @@ out: static void mp_broadcast_action(__unused void *null) { - /* call action function */ - if (mp_bc_action_func != NULL) - mp_bc_action_func(mp_bc_func_arg); + /* call action function */ + if (mp_bc_action_func != NULL) { + mp_bc_action_func(mp_bc_func_arg); + } - /* if we're the last one through, wake up the instigator */ - if (atomic_decl_and_test(&mp_bc_count, 1)) - thread_wakeup(((event_t)(uintptr_t) &mp_bc_count)); + /* if we're the last one through, wake up the instigator */ + if (atomic_decl_and_test(&mp_bc_count, 1)) { + thread_wakeup(((event_t)(uintptr_t) &mp_bc_count)); + } } /* * mp_broadcast() runs a given function on all active cpus. * The caller blocks until the functions has run on all cpus. - * The caller will also block if there is another pending braodcast. + * The caller will also block if there is another pending broadcast. */ void mp_broadcast( - void (*action_func)(void *), - void *arg) -{ - if (!smp_initialized) { - if (action_func != NULL) - action_func(arg); - return; - } - - /* obtain broadcast lock */ - lck_mtx_lock(&mp_bc_lock); - - /* set static function pointers */ - mp_bc_action_func = action_func; - mp_bc_func_arg = arg; - - assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT); - - /* - * signal other processors, which will call mp_broadcast_action() - */ - mp_bc_count = real_ncpus; /* assume max possible active */ - mp_bc_ncpus = mp_cpus_call(CPUMASK_OTHERS, NOSYNC, *mp_broadcast_action, NULL) + 1; - atomic_decl(&mp_bc_count, real_ncpus - mp_bc_ncpus); /* subtract inactive */ - - /* call executor function on this cpu */ - mp_broadcast_action(NULL); - - /* block for other cpus to have run action_func */ - if (mp_bc_ncpus > 1) - thread_block(THREAD_CONTINUE_NULL); - else - clear_wait(current_thread(), THREAD_AWAKENED); - - /* release lock */ - lck_mtx_unlock(&mp_bc_lock); + void (*action_func)(void *), + void *arg) +{ + if (!smp_initialized) { + if (action_func != NULL) { + action_func(arg); + } + return; + } + + /* obtain broadcast lock */ + lck_mtx_lock(&mp_bc_lock); + + /* set static function pointers */ + mp_bc_action_func = action_func; + mp_bc_func_arg = arg; + + assert_wait((event_t)(uintptr_t)&mp_bc_count, THREAD_UNINT); + + /* + * signal other processors, which will call mp_broadcast_action() + */ + mp_bc_count = real_ncpus; /* assume max possible active */ + mp_bc_ncpus = mp_cpus_call(CPUMASK_OTHERS, NOSYNC, *mp_broadcast_action, NULL) + 1; + atomic_decl(&mp_bc_count, real_ncpus - mp_bc_ncpus); /* subtract inactive */ + + /* call executor function on this cpu */ + mp_broadcast_action(NULL); + + /* block for other cpus to have run action_func */ + if (mp_bc_ncpus > 1) { + thread_block(THREAD_CONTINUE_NULL); + } else { + clear_wait(current_thread(), THREAD_AWAKENED); + } + + /* release lock */ + lck_mtx_unlock(&mp_bc_lock); } void mp_cpus_kick(cpumask_t cpus) { - cpu_t cpu; - boolean_t intrs_enabled = FALSE; + cpu_t cpu; + boolean_t intrs_enabled = FALSE; intrs_enabled = ml_set_interrupts_enabled(FALSE); mp_safe_spin_lock(&x86_topo_lock); for (cpu = 0; cpu < (cpu_t) real_ncpus; cpu++) { if ((cpu == (cpu_t) cpu_number()) - || ((cpu_to_cpumask(cpu) & cpus) == 0) - || !cpu_is_running(cpu)) - { - continue; + || ((cpu_to_cpumask(cpu) & cpus) == 0) + || !cpu_is_running(cpu)) { + continue; } lapic_send_ipi(cpu, LAPIC_VECTOR(KICK)); @@ -1492,7 +1543,7 @@ mp_cpus_kick(cpumask_t cpus) void i386_activate_cpu(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); assert(!ml_get_interrupts_enabled()); @@ -1504,17 +1555,17 @@ i386_activate_cpu(void) mp_safe_spin_lock(&x86_topo_lock); cdp->cpu_running = TRUE; started_cpu(); + pmap_tlbi_range(0, ~0ULL, true, 0); simple_unlock(&x86_topo_lock); - flush_tlb_raw(); } void i386_deactivate_cpu(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); assert(!ml_get_interrupts_enabled()); - + KERNEL_DEBUG_CONSTANT( TRACE_MP_CPU_DEACTIVATE | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -1541,8 +1592,9 @@ i386_deactivate_cpu(void) mp_disable_preemption(); ml_set_interrupts_enabled(TRUE); - while (cdp->cpu_signals && x86_lcpu()->rtcDeadline != EndOfAllTime) + while (cdp->cpu_signals && x86_lcpu()->rtcDeadline != EndOfAllTime) { cpu_pause(); + } /* * Ensure there's no remaining timer deadline set * - AICPM may have left one active. @@ -1557,22 +1609,22 @@ i386_deactivate_cpu(void) 0, 0, 0, 0, 0); } -int pmsafe_debug = 1; +int pmsafe_debug = 1; -#if MACH_KDP -volatile boolean_t mp_kdp_trap = FALSE; -volatile boolean_t mp_kdp_is_NMI = FALSE; -volatile unsigned long mp_kdp_ncpus; -boolean_t mp_kdp_state; +#if MACH_KDP +volatile boolean_t mp_kdp_trap = FALSE; +volatile boolean_t mp_kdp_is_NMI = FALSE; +volatile unsigned long mp_kdp_ncpus; +boolean_t mp_kdp_state; void mp_kdp_enter(boolean_t proceed_on_failure) { - unsigned int cpu; - unsigned int ncpus = 0; - unsigned int my_cpu; - uint64_t tsc_timeout; + unsigned int cpu; + unsigned int ncpus = 0; + unsigned int my_cpu; + uint64_t tsc_timeout; DBG("mp_kdp_enter()\n"); @@ -1601,7 +1653,7 @@ mp_kdp_enter(boolean_t proceed_on_failure) paniclog_append_noflush("mp_kdp_enter() can't get x86_topo_lock! Debugging anyway! #YOLO\n"); break; } - locked = simple_lock_try(&x86_topo_lock); + locked = simple_lock_try(&x86_topo_lock, LCK_GRP_NULL); if (!locked) { cpu_pause(); } @@ -1620,8 +1672,9 @@ mp_kdp_enter(boolean_t proceed_on_failure) } } - if (pmsafe_debug && !kdp_snapshot) + if (pmsafe_debug && !kdp_snapshot) { pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_SAFE); + } debugger_cpu = my_cpu; ncpus = 1; @@ -1635,8 +1688,9 @@ mp_kdp_enter(boolean_t proceed_on_failure) DBG("mp_kdp_enter() signaling other processors\n"); if (force_immediate_debugger_NMI == FALSE) { for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_is_running(cpu)) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; + } ncpus++; i386_signal_cpu(cpu, MP_KDP, ASYNC); } @@ -1672,13 +1726,15 @@ mp_kdp_enter(boolean_t proceed_on_failure) } if (mp_kdp_ncpus != ncpus) { unsigned int wait_cycles = 0; - if (proceed_on_failure) + if (proceed_on_failure) { paniclog_append_noflush("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu); - else + } else { DBG("mp_kdp_enter() timed-out on cpu %d, NMI-ing\n", my_cpu); + } for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_is_running(cpu)) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; + } if (cpu_signal_pending(cpu, MP_KDP)) { cpu_datap(cpu)->cpu_NMI_acknowledged = FALSE; cpu_NMI_interrupt(cpu); @@ -1694,28 +1750,30 @@ mp_kdp_enter(boolean_t proceed_on_failure) if (mp_kdp_ncpus != ncpus) { paniclog_append_noflush("mp_kdp_enter() NMI pending on cpus:"); for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu_is_running(cpu) && !cpu_datap(cpu)->cpu_NMI_acknowledged) + if (cpu_is_running(cpu) && !cpu_datap(cpu)->cpu_NMI_acknowledged) { paniclog_append_noflush(" %d", cpu); + } } paniclog_append_noflush("\n"); if (proceed_on_failure) { paniclog_append_noflush("mp_kdp_enter() timed-out during %s wait after NMI;" "expected %u acks but received %lu after %u loops in %llu ticks\n", - (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC); + (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC); } else { panic("mp_kdp_enter() timed-out during %s wait after NMI;" "expected %u acks but received %lu after %u loops in %llu ticks", - (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC); + (locked ? "locked" : "unlocked"), ncpus, mp_kdp_ncpus, wait_cycles, LockTimeOutTSC); } } } - } - else + } else { for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_is_running(cpu)) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; + } cpu_NMI_interrupt(cpu); } + } if (locked) { simple_unlock(&x86_topo_lock); @@ -1723,7 +1781,7 @@ mp_kdp_enter(boolean_t proceed_on_failure) DBG("mp_kdp_enter() %d processors done %s\n", (int)mp_kdp_ncpus, (mp_kdp_ncpus == ncpus) ? "OK" : "timed out"); - + postcode(MP_KDP_ENTER); } @@ -1735,53 +1793,59 @@ mp_kdp_all_cpus_halted() my_cpu = cpu_number(); ncpus = 1; /* current CPU */ for (cpu = 0; cpu < real_ncpus; cpu++) { - if (cpu == my_cpu || !cpu_is_running(cpu)) + if (cpu == my_cpu || !cpu_is_running(cpu)) { continue; + } ncpus++; } - return (mp_kdp_ncpus == ncpus); + return mp_kdp_ncpus == ncpus; } static boolean_t cpu_signal_pending(int cpu, mp_event_t event) { - volatile int *signals = &cpu_datap(cpu)->cpu_signals; + volatile int *signals = &cpu_datap(cpu)->cpu_signals; boolean_t retval = FALSE; - if (i_bit(event, signals)) + if (i_bit(event, signals)) { retval = TRUE; + } return retval; } -long kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func, - void *arg0, void *arg1) +long +kdp_x86_xcpu_invoke(const uint16_t lcpu, kdp_x86_xcpu_func_t func, + void *arg0, void *arg1) { - if (lcpu > (real_ncpus - 1)) + if (lcpu > (real_ncpus - 1)) { return -1; + } - if (func == NULL) + if (func == NULL) { return -1; + } kdp_xcpu_call_func.func = func; - kdp_xcpu_call_func.ret = -1; + kdp_xcpu_call_func.ret = -1; kdp_xcpu_call_func.arg0 = arg0; kdp_xcpu_call_func.arg1 = arg1; kdp_xcpu_call_func.cpu = lcpu; DBG("Invoking function %p on CPU %d\n", func, (int32_t)lcpu); - while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE) + while (kdp_xcpu_call_func.cpu != KDP_XCPU_NONE) { cpu_pause(); - return kdp_xcpu_call_func.ret; + } + return kdp_xcpu_call_func.ret; } static void kdp_x86_xcpu_poll(void) { if ((uint16_t)cpu_number() == kdp_xcpu_call_func.cpu) { - kdp_xcpu_call_func.ret = + kdp_xcpu_call_func.ret = kdp_xcpu_call_func.func(kdp_xcpu_call_func.arg0, - kdp_xcpu_call_func.arg1, - cpu_number()); + kdp_xcpu_call_func.arg1, + cpu_number()); kdp_xcpu_call_func.cpu = KDP_XCPU_NONE; } } @@ -1799,14 +1863,15 @@ mp_kdp_wait(boolean_t flush, boolean_t isNMI) atomic_incl((volatile long *)&mp_kdp_ncpus, 1); while (mp_kdp_trap || (isNMI == TRUE)) { - /* + /* * A TLB shootdown request may be pending--this would result * in the requesting processor waiting in PMAP_UPDATE_TLBS() * until this processor handles it. * Process it, so it can now enter mp_kdp_wait() */ - if (flush) + if (flush) { handle_pending_TLB_flushes(); + } kdp_x86_xcpu_poll(); cpu_pause(); @@ -1831,18 +1896,19 @@ mp_kdp_exit(void) /* Wait other processors to stop spinning. XXX needs timeout */ DBG("mp_kdp_exit() waiting for processors to resume\n"); while (mp_kdp_ncpus > 0) { - /* + /* * a TLB shootdown request may be pending... this would result in the requesting * processor waiting in PMAP_UPDATE_TLBS() until this processor deals with it. * Process it, so it can now enter mp_kdp_wait() */ - handle_pending_TLB_flushes(); + handle_pending_TLB_flushes(); cpu_pause(); } - if (pmsafe_debug && !kdp_snapshot) - pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + if (pmsafe_debug && !kdp_snapshot) { + pmSafeMode(¤t_cpu_datap()->lcpu, PM_SAFE_FL_NORMAL); + } debugger_exit_time = mach_absolute_time(); @@ -1851,27 +1917,28 @@ mp_kdp_exit(void) postcode(MP_KDP_EXIT); } -#endif /* MACH_KDP */ +#endif /* MACH_KDP */ boolean_t -mp_recent_debugger_activity(void) { +mp_recent_debugger_activity(void) +{ uint64_t abstime = mach_absolute_time(); - return (((abstime - debugger_entry_time) < LastDebuggerEntryAllowance) || - ((abstime - debugger_exit_time) < LastDebuggerEntryAllowance)); + return ((abstime - debugger_entry_time) < LastDebuggerEntryAllowance) || + ((abstime - debugger_exit_time) < LastDebuggerEntryAllowance); } /*ARGSUSED*/ void init_ast_check( - __unused processor_t processor) + __unused processor_t processor) { } void cause_ast_check( - processor_t processor) + processor_t processor) { - int cpu = processor->cpu_id; + int cpu = processor->cpu_id; if (cpu != cpu_number()) { i386_signal_cpu(cpu, MP_AST, ASYNC); @@ -1883,7 +1950,7 @@ void slave_machine_init(void *param) { /* - * Here in process context, but with interrupts disabled. + * Here in process context, but with interrupts disabled. */ DBG("slave_machine_init() CPU%d\n", get_cpu_number()); @@ -1893,11 +1960,12 @@ slave_machine_init(void *param) */ clock_init(); } - cpu_machine_init(); /* Interrupts enabled hereafter */ + cpu_machine_init(); /* Interrupts enabled hereafter */ } #undef cpu_number -int cpu_number(void) +int +cpu_number(void) { return get_cpu_number(); } @@ -1921,7 +1989,7 @@ grab_warm_timer_call() timer_call_t call = NULL; x = splsched(); - simple_lock(&cpu_warm_lock); + simple_lock(&cpu_warm_lock, LCK_GRP_NULL); if (!queue_empty(&cpu_warm_call_list)) { call = (timer_call_t) dequeue_head(&cpu_warm_call_list); } @@ -1937,7 +2005,7 @@ free_warm_timer_call(timer_call_t call) spl_t x; x = splsched(); - simple_lock(&cpu_warm_lock); + simple_lock(&cpu_warm_lock, LCK_GRP_NULL); enqueue_head(&cpu_warm_call_list, (queue_entry_t)call); simple_unlock(&cpu_warm_lock); splx(x); @@ -1948,8 +2016,8 @@ free_warm_timer_call(timer_call_t call) */ static void cpu_warm_timer_call_func( - call_entry_param_t p0, - __unused call_entry_param_t p1) + call_entry_param_t p0, + __unused call_entry_param_t p1) { free_warm_timer_call((timer_call_t)p0); return; @@ -1960,7 +2028,7 @@ cpu_warm_timer_call_func( */ static void _cpu_warm_setup( - void *arg) + void *arg) { cpu_warm_data_t cwdp = (cpu_warm_data_t)arg; @@ -1975,7 +2043,7 @@ _cpu_warm_setup( */ kern_return_t ml_interrupt_prewarm( - uint64_t deadline) + uint64_t deadline) { struct cpu_warm_data cwd; timer_call_t call; @@ -1985,8 +2053,8 @@ ml_interrupt_prewarm( panic("%s: Interrupts disabled?\n", __FUNCTION__); } - /* - * If the platform doesn't need our help, say that we succeeded. + /* + * If the platform doesn't need our help, say that we succeeded. */ if (!ml_get_interrupt_prewake_applicable()) { return KERN_SUCCESS; @@ -2021,10 +2089,10 @@ ml_interrupt_prewarm( void kernel_spin(uint64_t spin_ns) { - boolean_t istate; - uint64_t spin_abs; - uint64_t deadline; - cpu_data_t *cdp; + boolean_t istate; + uint64_t spin_abs; + uint64_t deadline; + cpu_data_t *cdp; kprintf("kernel_spin(%llu) spinning uninterruptibly\n", spin_ns); istate = ml_set_interrupts_enabled(FALSE); @@ -2036,8 +2104,9 @@ kernel_spin(uint64_t spin_ns) cdp->cpu_int_state = (void *) USER_STATE(current_thread()); deadline = mach_absolute_time() + spin_ns; - while (mach_absolute_time() < deadline) + while (mach_absolute_time() < deadline) { cpu_pause(); + } cdp->cpu_int_event_time = 0; cdp->cpu_int_state = NULL; @@ -2055,17 +2124,18 @@ kernel_spin(uint64_t spin_ns) void mp_interrupt_watchdog(void) { - cpu_t cpu; - boolean_t intrs_enabled = FALSE; - uint16_t cpu_int_num; - uint64_t cpu_int_event_time; - uint64_t cpu_rip; - uint64_t cpu_int_duration; - uint64_t now; - x86_saved_state_t *cpu_int_state; - - if (__improbable(!mp_interrupt_watchdog_enabled)) + cpu_t cpu; + boolean_t intrs_enabled = FALSE; + uint16_t cpu_int_num; + uint64_t cpu_int_event_time; + uint64_t cpu_rip; + uint64_t cpu_int_duration; + uint64_t now; + x86_saved_state_t *cpu_int_state; + + if (__improbable(!mp_interrupt_watchdog_enabled)) { return; + } intrs_enabled = ml_set_interrupts_enabled(FALSE); now = mach_absolute_time(); @@ -2074,20 +2144,24 @@ mp_interrupt_watchdog(void) * check all other processors for long outstanding interrupt handling. */ for (cpu = 0; - cpu < (cpu_t) real_ncpus && !machine_timeout_suspended(); - cpu++) { + cpu < (cpu_t) real_ncpus && !machine_timeout_suspended(); + cpu++) { if ((cpu == (cpu_t) cpu_number()) || - (!cpu_is_running(cpu))) + (!cpu_is_running(cpu))) { continue; + } cpu_int_event_time = cpu_datap(cpu)->cpu_int_event_time; - if (cpu_int_event_time == 0) + if (cpu_int_event_time == 0) { continue; - if (__improbable(now < cpu_int_event_time)) - continue; /* skip due to inter-processor skew */ + } + if (__improbable(now < cpu_int_event_time)) { + continue; /* skip due to inter-processor skew */ + } cpu_int_state = cpu_datap(cpu)->cpu_int_state; - if (__improbable(cpu_int_state == NULL)) + if (__improbable(cpu_int_state == NULL)) { /* The interrupt may have been dismissed */ continue; + } /* Here with a cpu handling an interrupt */ @@ -2098,8 +2172,8 @@ mp_interrupt_watchdog(void) vector_timed_out = cpu_int_num; NMIPI_panic(cpu_to_cpumask(cpu), INTERRUPT_WATCHDOG); panic("Interrupt watchdog, " - "cpu: %d interrupt: 0x%x time: %llu..%llu state: %p RIP: 0x%llx", - cpu, cpu_int_num, cpu_int_event_time, now, cpu_int_state, cpu_rip); + "cpu: %d interrupt: 0x%x time: %llu..%llu state: %p RIP: 0x%llx", + cpu, cpu_int_num, cpu_int_event_time, now, cpu_int_state, cpu_rip); /* NOT REACHED */ } else if (__improbable(cpu_int_duration > (uint64_t) std_quantum)) { mp_interrupt_watchdog_events++; @@ -2107,8 +2181,8 @@ mp_interrupt_watchdog(void) cpu_rip = saved_state64(cpu_int_state)->isf.rip; ml_set_interrupts_enabled(intrs_enabled); printf("Interrupt watchdog, " - "cpu: %d interrupt: 0x%x time: %llu..%llu RIP: 0x%llx\n", - cpu, cpu_int_num, cpu_int_event_time, now, cpu_rip); + "cpu: %d interrupt: 0x%x time: %llu..%llu RIP: 0x%llx\n", + cpu, cpu_int_num, cpu_int_event_time, now, cpu_rip); return; } } diff --git a/osfmk/i386/mp.h b/osfmk/i386/mp.h index 705f41c18..e63c9f4c4 100644 --- a/osfmk/i386/mp.h +++ b/osfmk/i386/mp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _I386_MP_H_ #define _I386_MP_H_ @@ -66,9 +66,9 @@ #include #include -#define MAX_CPUS 64 /* 8 * sizeof(cpumask_t) */ +#define MAX_CPUS 64 /* 8 * sizeof(cpumask_t) */ -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include #include @@ -88,48 +88,48 @@ extern void smp_init(void); extern void cpu_interrupt(int cpu); __END_DECLS -extern unsigned int real_ncpus; /* real number of cpus */ -extern unsigned int max_ncpus; /* max number of cpus */ -decl_simple_lock_data(extern,kdb_lock) /* kdb lock */ +extern unsigned int real_ncpus; /* real number of cpus */ +extern unsigned int max_ncpus; /* max number of cpus */ +decl_simple_lock_data(extern, kdb_lock) /* kdb lock */ __BEGIN_DECLS -extern void console_init(void); -extern void *console_cpu_alloc(boolean_t boot_cpu); -extern void console_cpu_free(void *console_buf); +extern void console_init(void); +extern void *console_cpu_alloc(boolean_t boot_cpu); +extern void console_cpu_free(void *console_buf); -extern int kdb_cpu; /* current cpu running kdb */ -extern int kdb_debug; -extern int kdb_active[]; +extern int kdb_cpu; /* current cpu running kdb */ +extern int kdb_debug; +extern int kdb_active[]; -extern volatile boolean_t mp_kdp_trap; -extern volatile boolean_t mp_kdp_is_NMI; -extern volatile boolean_t force_immediate_debugger_NMI; +extern volatile boolean_t mp_kdp_trap; +extern volatile boolean_t mp_kdp_is_NMI; +extern volatile boolean_t force_immediate_debugger_NMI; extern volatile boolean_t pmap_tlb_flush_timeout; extern volatile usimple_lock_t spinlock_timed_out; extern volatile uint32_t spinlock_owner_cpu; extern uint32_t spinlock_timeout_NMI(uintptr_t thread_addr); -extern uint64_t LastDebuggerEntryAllowance; +extern uint64_t LastDebuggerEntryAllowance; -extern void mp_kdp_enter(boolean_t proceed_on_failure); -extern void mp_kdp_exit(void); -extern boolean_t mp_kdp_all_cpus_halted(void); +extern void mp_kdp_enter(boolean_t proceed_on_failure); +extern void mp_kdp_exit(void); +extern boolean_t mp_kdp_all_cpus_halted(void); -extern boolean_t mp_recent_debugger_activity(void); -extern void kernel_spin(uint64_t spin_ns); +extern boolean_t mp_recent_debugger_activity(void); +extern void kernel_spin(uint64_t spin_ns); /* * All cpu rendezvous: */ extern void mp_rendezvous( - void (*setup_func)(void *), - void (*action_func)(void *), - void (*teardown_func)(void *), - void *arg); + void (*setup_func)(void *), + void (*action_func)(void *), + void (*teardown_func)(void *), + void *arg); extern void mp_rendezvous_no_intrs( - void (*action_func)(void *), - void *arg); + void (*action_func)(void *), + void *arg); extern void mp_rendezvous_break_lock(void); extern void mp_rendezvous_lock(void); extern void mp_rendezvous_unlock(void); @@ -140,15 +140,15 @@ extern void mp_rendezvous_unlock(void); * run action_func: */ extern void mp_broadcast( - void (*action_func)(void *), - void *arg); + void (*action_func)(void *), + void *arg); #if MACH_KDP typedef long (*kdp_x86_xcpu_func_t) (void *arg0, void *arg1, uint16_t lcpu); -extern long kdp_x86_xcpu_invoke(const uint16_t lcpu, - kdp_x86_xcpu_func_t func, - void *arg0, void *arg1); -typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t; +extern long kdp_x86_xcpu_invoke(const uint16_t lcpu, + kdp_x86_xcpu_func_t func, + void *arg0, void *arg1); +typedef enum {KDP_XCPU_NONE = 0xffff, KDP_CURRENT_LCPU = 0xfffe} kdp_cpu_t; #endif typedef uint32_t cpu_t; @@ -158,9 +158,9 @@ cpu_to_cpumask(cpu_t cpu) { return (cpu < MAX_CPUS) ? (1ULL << cpu) : 0; } -#define CPUMASK_ALL 0xffffffffffffffffULL -#define CPUMASK_SELF cpu_to_cpumask(cpu_number()) -#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF) +#define CPUMASK_ALL 0xffffffffffffffffULL +#define CPUMASK_SELF cpu_to_cpumask(cpu_number()) +#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF) /* Initialation routing called at processor registration */ extern void mp_cpus_call_cpu_init(int cpu); @@ -170,25 +170,25 @@ extern void mp_cpus_call_cpu_init(int cpu); * The mask may include the local cpu. * If the mode is: * - ASYNC: other cpus make their calls in parallel - * - SYNC: the calls are performed serially in logical cpu order - * - NOSYNC: the calls are queued + * - SYNC: the calls are performed serially in logical cpu order + * - NOSYNC: the calls are queued * Unless the mode is NOSYNC, mp_cpus_call() returns when the function has been * called on all specified cpus. * The return value is the number of cpus where the call was made or queued. * The action function is called with interrupts disabled. */ extern cpu_t mp_cpus_call( - cpumask_t cpus, - mp_sync_t mode, - void (*action_func)(void *), - void *arg); + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *), + void *arg); extern cpu_t mp_cpus_call1( - cpumask_t cpus, - mp_sync_t mode, - void (*action_func)(void *, void*), - void *arg0, - void *arg1, - cpumask_t *cpus_calledp); + cpumask_t cpus, + mp_sync_t mode, + void (*action_func)(void *, void*), + void *arg0, + void *arg1, + cpumask_t *cpus_calledp); typedef enum { NONE = 0, @@ -213,73 +213,74 @@ __END_DECLS #if MP_DEBUG typedef struct { - uint64_t time; - int cpu; - mp_event_t event; + uint64_t time; + int cpu; + mp_event_t event; } cpu_signal_event_t; -#define LOG_NENTRIES 100 +#define LOG_NENTRIES 100 typedef struct { - uint64_t count[MP_LAST]; - int next_entry; - cpu_signal_event_t entry[LOG_NENTRIES]; + uint64_t count[MP_LAST]; + int next_entry; + cpu_signal_event_t entry[LOG_NENTRIES]; } cpu_signal_event_log_t; -extern cpu_signal_event_log_t *cpu_signal[]; -extern cpu_signal_event_log_t *cpu_handle[]; - -#define DBGLOG(log,_cpu,_event) { \ - boolean_t spl = ml_set_interrupts_enabled(FALSE); \ - cpu_signal_event_log_t *logp = log[cpu_number()]; \ - int next = logp->next_entry; \ - cpu_signal_event_t *eventp = &logp->entry[next]; \ - \ - logp->count[_event]++; \ - \ - eventp->time = rdtsc64(); \ - eventp->cpu = _cpu; \ - eventp->event = _event; \ - if (next == (LOG_NENTRIES - 1)) \ - logp->next_entry = 0; \ - else \ - logp->next_entry++; \ - \ - (void) ml_set_interrupts_enabled(spl); \ +extern cpu_signal_event_log_t *cpu_signal[]; +extern cpu_signal_event_log_t *cpu_handle[]; + +#define DBGLOG(log, _cpu, _event) { \ + boolean_t spl = ml_set_interrupts_enabled(FALSE); \ + cpu_signal_event_log_t *logp = log[cpu_number()]; \ + int next = logp->next_entry; \ + cpu_signal_event_t *eventp = &logp->entry[next]; \ + \ + logp->count[_event]++; \ + \ + eventp->time = rdtsc64(); \ + eventp->cpu = _cpu; \ + eventp->event = _event; \ + if (next == (LOG_NENTRIES - 1)) \ + logp->next_entry = 0; \ + else \ + logp->next_entry++; \ + \ + (void) ml_set_interrupts_enabled(spl); \ } -#define DBGLOG_CPU_INIT(cpu) { \ - cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \ - cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \ - \ - if (*sig_logpp == NULL && \ - kmem_alloc(kernel_map, \ - (vm_offset_t *) sig_logpp, \ - sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ - panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\ - bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \ - if (*hdl_logpp == NULL && \ - kmem_alloc(kernel_map, \ - (vm_offset_t *) hdl_logpp, \ - sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ - panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\ - bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \ +#define DBGLOG_CPU_INIT(cpu) { \ + cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \ + cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \ + \ + if (*sig_logpp == NULL && \ + kmem_alloc(kernel_map, \ + (vm_offset_t *) sig_logpp, \ + sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ + panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\ + bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \ + if (*hdl_logpp == NULL && \ + kmem_alloc(kernel_map, \ + (vm_offset_t *) hdl_logpp, \ + sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\ + panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\ + bzero(*hdl_logpp, sizeof(cpu_signal_event_log_t)); \ } -#else /* MP_DEBUG */ -#define DBGLOG(log,_cpu,_event) +#else /* MP_DEBUG */ +#define DBGLOG(log, _cpu, _event) #define DBGLOG_CPU_INIT(cpu) -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ #ifdef ASSEMBLER -#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit))) +#define i_bit(bit, word) ((long)(*(word)) & (1L << (bit))) #else -__attribute__((always_inline)) static inline long -i_bit_impl(long word, long bit) { +__attribute__((always_inline)) static inline long +i_bit_impl(long word, long bit) +{ long bitmask = 1L << bit; return word & bitmask; } -#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit) +#define i_bit(bit, word) i_bit_impl((long)(*(word)), bit) #endif diff --git a/osfmk/i386/mp_desc.c b/osfmk/i386/mp_desc.c index 78c9e11d0..ad97efdaa 100644 --- a/osfmk/i386/mp_desc.c +++ b/osfmk/i386/mp_desc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -89,14 +89,14 @@ #define U_INTR_GATE (ACC_P|ACC_PL_U|ACC_INTR_GATE) // Declare macros that will declare the externs -#define TRAP(n, name) extern void *name ; -#define TRAP_ERR(n, name) extern void *name ; -#define TRAP_SPC(n, name) extern void *name ; -#define TRAP_IST1(n, name) extern void *name ; -#define TRAP_IST2(n, name) extern void *name ; -#define INTERRUPT(n) extern void *_intr_ ## n ; -#define USER_TRAP(n, name) extern void *name ; -#define USER_TRAP_SPC(n, name) extern void *name ; +#define TRAP(n, name) extern void *name ; +#define TRAP_ERR(n, name) extern void *name ; +#define TRAP_SPC(n, name) extern void *name ; +#define TRAP_IST1(n, name) extern void *name ; +#define TRAP_IST2(n, name) extern void *name ; +#define INTERRUPT(n) extern void *_intr_ ## n ; +#define USER_TRAP(n, name) extern void *name ; +#define USER_TRAP_SPC(n, name) extern void *name ; // Include the table to declare the externs #include "../x86_64/idt_table.h" @@ -111,68 +111,68 @@ #undef USER_TRAP #undef USER_TRAP_SPC -#define TRAP(n, name) \ - [n] = { \ - (uintptr_t)&name, \ - KERNEL64_CS, \ - 0, \ - K_INTR_GATE, \ - 0 \ +#define TRAP(n, name) \ + [n] = { \ + (uintptr_t)&name, \ + KERNEL64_CS, \ + 0, \ + K_INTR_GATE, \ + 0 \ }, #define TRAP_ERR TRAP #define TRAP_SPC TRAP #define TRAP_IST1(n, name) \ - [n] = { \ - (uintptr_t)&name, \ - KERNEL64_CS, \ - 1, \ - K_INTR_GATE, \ - 0 \ + [n] = { \ + (uintptr_t)&name, \ + KERNEL64_CS, \ + 1, \ + K_INTR_GATE, \ + 0 \ }, #define TRAP_IST2(n, name) \ - [n] = { \ - (uintptr_t)&name, \ - KERNEL64_CS, \ - 2, \ - K_INTR_GATE, \ - 0 \ + [n] = { \ + (uintptr_t)&name, \ + KERNEL64_CS, \ + 2, \ + K_INTR_GATE, \ + 0 \ }, #define INTERRUPT(n) \ - [n] = { \ - (uintptr_t)&_intr_ ## n,\ - KERNEL64_CS, \ - 0, \ - K_INTR_GATE, \ - 0 \ + [n] = { \ + (uintptr_t)&_intr_ ## n,\ + KERNEL64_CS, \ + 0, \ + K_INTR_GATE, \ + 0 \ }, #define USER_TRAP(n, name) \ - [n] = { \ - (uintptr_t)&name, \ - KERNEL64_CS, \ - 0, \ - U_INTR_GATE, \ - 0 \ + [n] = { \ + (uintptr_t)&name, \ + KERNEL64_CS, \ + 0, \ + U_INTR_GATE, \ + 0 \ }, #define USER_TRAP_SPC USER_TRAP // Declare the table using the macros we just set up struct fake_descriptor64 master_idt64[IDTSZ] - __attribute__ ((section("__HIB,__desc"))) - __attribute__ ((aligned(PAGE_SIZE))) = { +__attribute__ ((section("__HIB,__desc"))) +__attribute__ ((aligned(PAGE_SIZE))) = { #include "../x86_64/idt_table.h" }; /* * First cpu`s interrupt stack. */ -extern uint32_t low_intstack[]; /* bottom */ -extern uint32_t low_eintstack[]; /* top */ +extern uint32_t low_intstack[]; /* bottom */ +extern uint32_t low_eintstack[]; /* top */ /* * Per-cpu data area pointers. @@ -186,11 +186,11 @@ cpu_data_t scdatas[MAX_CPUS] __attribute__((aligned(64))) = { }; cpu_data_t *cpu_data_master = &scdatas[0]; -cpu_data_t *cpu_data_ptr[MAX_CPUS] = { [0] = &scdatas[0] }; +cpu_data_t *cpu_data_ptr[MAX_CPUS] = {[0] = &scdatas[0] }; -decl_simple_lock_data(,ncpus_lock); /* protects real_ncpus */ -unsigned int real_ncpus = 1; -unsigned int max_ncpus = MAX_CPUS; +decl_simple_lock_data(, ncpus_lock); /* protects real_ncpus */ +unsigned int real_ncpus = 1; +unsigned int max_ncpus = MAX_CPUS; extern void hi64_sysenter(void); extern void hi64_syscall(void); @@ -225,9 +225,9 @@ cldt_t *dyn_ldts; */ struct fake_descriptor64 kernel_ldt_desc64 = { 0, - LDTSZ_MIN*sizeof(struct fake_descriptor)-1, + LDTSZ_MIN*sizeof(struct fake_descriptor) - 1, 0, - ACC_P|ACC_PL_K|ACC_LDT, + ACC_P | ACC_PL_K | ACC_LDT, 0 }; @@ -237,9 +237,9 @@ struct fake_descriptor64 kernel_ldt_desc64 = { */ struct fake_descriptor64 kernel_tss_desc64 = { 0, - sizeof(struct x86_64_tss)-1, + sizeof(struct x86_64_tss) - 1, 0, - ACC_P|ACC_PL_K|ACC_TSS, + ACC_P | ACC_PL_K | ACC_TSS, 0 }; @@ -274,8 +274,8 @@ struct fake_descriptor64 kernel_tss_desc64 = { * bytes 6..7 offset 31..16 */ void -fix_desc(void *d, int num_desc) { - //early_kprintf("fix_desc(%x, %x)\n", d, num_desc); +fix_desc(void *d, int num_desc) +{ uint8_t *desc = (uint8_t*) d; do { @@ -284,30 +284,29 @@ fix_desc(void *d, int num_desc) { uint16_t selector; uint8_t wordcount; uint8_t acc; - + offset = *((uint32_t*)(desc)); - selector = *((uint32_t*)(desc+4)); + selector = *((uint32_t*)(desc + 4)); wordcount = desc[6] >> 4; acc = desc[7]; *((uint16_t*)desc) = offset & 0xFFFF; - *((uint16_t*)(desc+2)) = selector; + *((uint16_t*)(desc + 2)) = selector; desc[4] = wordcount; desc[5] = acc; - *((uint16_t*)(desc+6)) = offset >> 16; - + *((uint16_t*)(desc + 6)) = offset >> 16; } else { /* descriptor */ uint32_t base; uint16_t limit; uint8_t acc1, acc2; base = *((uint32_t*)(desc)); - limit = *((uint16_t*)(desc+4)); + limit = *((uint16_t*)(desc + 4)); acc2 = desc[6]; acc1 = desc[7]; *((uint16_t*)(desc)) = limit; - *((uint16_t*)(desc+2)) = base & 0xFFFF; + *((uint16_t*)(desc + 2)) = base & 0xFFFF; desc[4] = (base >> 16) & 0xFF; desc[5] = acc1; desc[6] = acc2; @@ -320,15 +319,15 @@ fix_desc(void *d, int num_desc) { void fix_desc64(void *descp, int count) { - struct fake_descriptor64 *fakep; + struct fake_descriptor64 *fakep; union { - struct real_gate64 gate; - struct real_descriptor64 desc; - } real; - int i; + struct real_gate64 gate; + struct real_descriptor64 desc; + } real; + int i; fakep = (struct fake_descriptor64 *) descp; - + for (i = 0; i < count; i++, fakep++) { /* * Construct the real decriptor locally. @@ -346,10 +345,10 @@ fix_desc64(void *descp, int count) real.gate.selector16 = fakep->lim_or_seg & 0xFFFF; real.gate.IST = fakep->size_or_IST & 0x7; real.gate.access8 = fakep->access; - real.gate.offset_high16 = (uint16_t)((fakep->offset64>>16) & 0xFFFF); - real.gate.offset_top32 = (uint32_t)(fakep->offset64>>32); + real.gate.offset_high16 = (uint16_t)((fakep->offset64 >> 16) & 0xFFFF); + real.gate.offset_top32 = (uint32_t)(fakep->offset64 >> 32); break; - default: /* Otherwise */ + default: /* Otherwise */ real.desc.limit_low16 = fakep->lim_or_seg & 0xFFFF; real.desc.base_low16 = (uint16_t)(fakep->offset64 & 0xFFFF); real.desc.base_med8 = (uint8_t)((fakep->offset64 >> 16) & 0xFF); @@ -357,7 +356,7 @@ fix_desc64(void *descp, int count) real.desc.limit_high4 = (fakep->lim_or_seg >> 16) & 0xFF; real.desc.granularity4 = fakep->size_or_IST; real.desc.base_high8 = (uint8_t)((fakep->offset64 >> 24) & 0xFF); - real.desc.base_top32 = (uint32_t)(fakep->offset64>>32); + real.desc.base_top32 = (uint32_t)(fakep->offset64 >> 32); } /* @@ -371,7 +370,7 @@ extern unsigned mldtsz; void cpu_desc_init(cpu_data_t *cdp) { - cpu_desc_index_t *cdi = &cdp->cpu_desc_index; + cpu_desc_index_t *cdi = &cdp->cpu_desc_index; if (cdp == cpu_data_master) { /* @@ -387,18 +386,18 @@ cpu_desc_init(cpu_data_t *cdp) cdi->cdi_gdtb.ptr = (void *)&master_gdt; cdi->cdi_idtu.ptr = (void *)DBLMAP((uintptr_t) &master_idt64); cdi->cdi_idtb.ptr = (void *)((uintptr_t) &master_idt64); - cdi->cdi_ldtu = (struct fake_descriptor *) (void *) DBLMAP((uintptr_t)&master_ldt[0]); - cdi->cdi_ldtb = (struct fake_descriptor *) (void *) &master_ldt[0]; + cdi->cdi_ldtu = (struct real_descriptor *)DBLMAP((uintptr_t)&master_ldt[0]); + cdi->cdi_ldtb = &master_ldt[0]; /* Replace the expanded LDTs and TSS slots in the GDT */ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu; *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] = - kernel_ldt_desc64; + kernel_ldt_desc64; *(struct fake_descriptor64 *) &master_gdt[sel_idx(USER_LDT)] = - kernel_ldt_desc64; + kernel_ldt_desc64; kernel_tss_desc64.offset64 = (uintptr_t) DBLMAP(&master_ktss64); *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] = - kernel_tss_desc64; + kernel_tss_desc64; /* Fix up the expanded descriptors for 64-bit. */ fix_desc64((void *) &master_idt64, IDTSZ); @@ -411,8 +410,8 @@ cpu_desc_init(cpu_data_t *cdp) */ master_ktss64.ist2 = (uintptr_t) low_eintstack; master_ktss64.ist1 = (uintptr_t) low_eintstack - sizeof(x86_64_intr_stack_frame_t); - } else if (cdi->cdi_ktssu == NULL) { /* Skipping re-init on wake */ - cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; + } else if (cdi->cdi_ktssu == NULL) { /* Skipping re-init on wake */ + cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; cdi->cdi_idtu.ptr = (void *)DBLMAP((uintptr_t) &master_idt64); @@ -439,17 +438,17 @@ cpu_desc_init(cpu_data_t *cdp) */ kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] = - kernel_ldt_desc64; + kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1); kernel_ldt_desc64.offset64 = (uintptr_t) cdi->cdi_ldtu; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] = - kernel_ldt_desc64; + kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1); kernel_tss_desc64.offset64 = (uintptr_t) cdi->cdi_ktssu; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] = - kernel_tss_desc64; + kernel_tss_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); /* Set (zeroed) fault stack as IST1, NMI intr stack IST2 */ @@ -461,13 +460,14 @@ cpu_desc_init(cpu_data_t *cdp) } /* Require that the top of the sysenter stack is 16-byte aligned */ - if ((cdi->cdi_sstku % 16) != 0) + if ((cdi->cdi_sstku % 16) != 0) { panic("cpu_desc_init() sysenter stack not 16-byte aligned"); + } } void cpu_desc_load(cpu_data_t *cdp) { - cpu_desc_index_t *cdi = &cdp->cpu_desc_index; + cpu_desc_index_t *cdi = &cdp->cpu_desc_index; postcode(CPU_DESC_LOAD_ENTRY); @@ -485,7 +485,7 @@ cpu_desc_load(cpu_data_t *cdp) gdt_desc_p(KERNEL_TSS)->access &= ~ACC_TSS_BUSY; /* Load the GDT, LDT, IDT and TSS */ - cdi->cdi_gdtb.size = sizeof(struct real_descriptor)*GDTSZ - 1; + cdi->cdi_gdtb.size = sizeof(struct real_descriptor) * GDTSZ - 1; cdi->cdi_gdtu.size = cdi->cdi_gdtb.size; cdi->cdi_idtb.size = 0x1000 + cdp->cpu_number; cdi->cdi_idtu.size = cdi->cdi_idtb.size; @@ -500,7 +500,7 @@ cpu_desc_load(cpu_data_t *cdp) set_tr(KERNEL_TSS); #if GPROF // Hack to enable mcount to work on K64 - __asm__ volatile("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS))); + __asm__ volatile ("mov %0, %%gs" : : "rm" ((unsigned short)(KERNEL_DS))); #endif postcode(CPU_DESC_LOAD_EXIT); } @@ -516,7 +516,7 @@ cpu_syscall_init(cpu_data_t *cdp) #else /* MONOTONIC */ #pragma unused(cdp) #endif /* !MONOTONIC */ - wrmsr64(MSR_IA32_SYSENTER_CS, SYSENTER_CS); + wrmsr64(MSR_IA32_SYSENTER_CS, SYSENTER_CS); wrmsr64(MSR_IA32_SYSENTER_EIP, DBLMAP((uintptr_t) hi64_sysenter)); wrmsr64(MSR_IA32_SYSENTER_ESP, current_cpu_datap()->cpu_desc_index.cdi_sstku); /* Enable syscall/sysret */ @@ -536,8 +536,7 @@ cpu_syscall_init(cpu_data_t *cdp) * is also cleared to avoid a spurious "task switch" * should we choose to return via an IRET. */ - wrmsr64(MSR_IA32_FMASK, EFL_DF|EFL_IF|EFL_TF|EFL_NT); - + wrmsr64(MSR_IA32_FMASK, EFL_DF | EFL_IF | EFL_TF | EFL_NT); } extern vm_offset_t dyn_dblmap(vm_offset_t, vm_offset_t); uint64_t ldt_alias_offset; @@ -545,8 +544,8 @@ uint64_t ldt_alias_offset; cpu_data_t * cpu_data_alloc(boolean_t is_boot_cpu) { - int ret; - cpu_data_t *cdp; + int ret; + cpu_data_t *cdp; if (is_boot_cpu) { assert(real_ncpus == 1); @@ -562,7 +561,7 @@ cpu_data_alloc(boolean_t is_boot_cpu) } boolean_t do_ldt_alloc = FALSE; - simple_lock(&ncpus_lock); + simple_lock(&ncpus_lock, LCK_GRP_NULL); int cnum = real_ncpus; real_ncpus++; if (dyn_ldts == NULL) { @@ -582,9 +581,9 @@ cpu_data_alloc(boolean_t is_boot_cpu) /* * Allocate interrupt stack: */ - ret = kmem_alloc(kernel_map, - (vm_offset_t *) &cdp->cpu_int_stack_top, - INTSTACK_SIZE, VM_KERN_MEMORY_CPU); + ret = kmem_alloc(kernel_map, + (vm_offset_t *) &cdp->cpu_int_stack_top, + INTSTACK_SIZE, VM_KERN_MEMORY_CPU); if (ret != KERN_SUCCESS) { panic("cpu_data_alloc() int stack failed, ret=%d\n", ret); } @@ -611,7 +610,7 @@ cpu_data_alloc(boolean_t is_boot_cpu) panic("cpu_data_alloc() ldt failed, kmem_alloc=%d\n", ret); } - simple_lock(&ncpus_lock); + simple_lock(&ncpus_lock, LCK_GRP_NULL); if (dyn_ldts == NULL) { dyn_ldts = (cldt_t *)ldtalloc; } else { @@ -650,94 +649,100 @@ cpu_data_alloc(boolean_t is_boot_cpu) cdp->cpu_nanotime = &pal_rtc_nanotime_info; kprintf("cpu_data_alloc(%d) %p desc_table: %p " - "ldt: %p " - "int_stack: 0x%lx-0x%lx\n", - cdp->cpu_number, cdp, cdp->cpu_desc_tablep, cdp->cpu_ldtp, - (long)(cdp->cpu_int_stack_top - INTSTACK_SIZE), (long)(cdp->cpu_int_stack_top)); + "ldt: %p " + "int_stack: 0x%lx-0x%lx\n", + cdp->cpu_number, cdp, cdp->cpu_desc_tablep, cdp->cpu_ldtp, + (long)(cdp->cpu_int_stack_top - INTSTACK_SIZE), (long)(cdp->cpu_int_stack_top)); cpu_data_ptr[cnum] = cdp; return cdp; - } boolean_t valid_user_data_selector(uint16_t selector) { - sel_t sel = selector_to_sel(selector); - - if (selector == 0) - return (TRUE); - - if (sel.ti == SEL_LDT) - return (TRUE); - else if (sel.index < GDTSZ) { - if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) - return (TRUE); - } - return (FALSE); + sel_t sel = selector_to_sel(selector); + + if (selector == 0) { + return TRUE; + } + + if (sel.ti == SEL_LDT) { + return TRUE; + } else if (sel.index < GDTSZ) { + if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) { + return TRUE; + } + } + return FALSE; } boolean_t valid_user_code_selector(uint16_t selector) { - sel_t sel = selector_to_sel(selector); - - if (selector == 0) - return (FALSE); - - if (sel.ti == SEL_LDT) { - if (sel.rpl == USER_PRIV) - return (TRUE); - } - else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { - if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) - return (TRUE); - /* Explicitly validate the system code selectors - * even if not instantaneously privileged, - * since they are dynamically re-privileged - * at context switch - */ - if ((selector == USER_CS) || (selector == USER64_CS)) - return (TRUE); - } + sel_t sel = selector_to_sel(selector); + + if (selector == 0) { + return FALSE; + } - return (FALSE); + if (sel.ti == SEL_LDT) { + if (sel.rpl == USER_PRIV) { + return TRUE; + } + } else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { + if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) { + return TRUE; + } + /* Explicitly validate the system code selectors + * even if not instantaneously privileged, + * since they are dynamically re-privileged + * at context switch + */ + if ((selector == USER_CS) || (selector == USER64_CS)) { + return TRUE; + } + } + + return FALSE; } boolean_t valid_user_stack_selector(uint16_t selector) { - sel_t sel = selector_to_sel(selector); - - if (selector == 0) - return (FALSE); - - if (sel.ti == SEL_LDT) { - if (sel.rpl == USER_PRIV) - return (TRUE); - } - else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { - if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) - return (TRUE); - } - - return (FALSE); + sel_t sel = selector_to_sel(selector); + + if (selector == 0) { + return FALSE; + } + + if (sel.ti == SEL_LDT) { + if (sel.rpl == USER_PRIV) { + return TRUE; + } + } else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) { + if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U) { + return TRUE; + } + } + + return FALSE; } boolean_t valid_user_segment_selectors(uint16_t cs, - uint16_t ss, - uint16_t ds, - uint16_t es, - uint16_t fs, - uint16_t gs) -{ - return valid_user_code_selector(cs) && - valid_user_stack_selector(ss) && - valid_user_data_selector(ds) && - valid_user_data_selector(es) && - valid_user_data_selector(fs) && - valid_user_data_selector(gs); + uint16_t ss, + uint16_t ds, + uint16_t es, + uint16_t fs, + uint16_t gs) +{ + return valid_user_code_selector(cs) && + valid_user_stack_selector(ss) && + valid_user_data_selector(ds) && + valid_user_data_selector(es) && + valid_user_data_selector(fs) && + valid_user_data_selector(gs); } #if NCOPY_WINDOWS > 0 @@ -747,23 +752,24 @@ static vm_offset_t user_window_base = 0; void cpu_userwindow_init(int cpu) { - cpu_data_t *cdp = cpu_data_ptr[cpu]; - vm_offset_t user_window; - vm_offset_t vaddr; - int num_cpus; + cpu_data_t *cdp = cpu_data_ptr[cpu]; + vm_offset_t user_window; + vm_offset_t vaddr; + int num_cpus; num_cpus = ml_get_max_cpus(); - if (cpu >= num_cpus) + if (cpu >= num_cpus) { panic("cpu_userwindow_init: cpu > num_cpus"); + } if (user_window_base == 0) { - if (vm_allocate(kernel_map, &vaddr, - (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE, - VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) + (NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE, + VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) != KERN_SUCCESS) { panic("cpu_userwindow_init: " - "couldn't allocate user map window"); + "couldn't allocate user map window"); + } /* * window must start on a page table boundary @@ -781,37 +787,37 @@ cpu_userwindow_init(int cpu) * get rid of tail that we don't need */ user_window = user_window_base + - (NBPDE * NCOPY_WINDOWS * num_cpus); + (NBPDE * NCOPY_WINDOWS * num_cpus); vm_deallocate(kernel_map, user_window, - (vaddr + - ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) - - user_window); + (vaddr + + ((NBPDE * NCOPY_WINDOWS * num_cpus) + NBPDE)) - + user_window); } - user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE); + user_window = user_window_base + (cpu * NCOPY_WINDOWS * NBPDE); cdp->cpu_copywindow_base = user_window; /* - * Abuse this pdp entry, the pdp now actually points to + * Abuse this pdp entry, the pdp now actually points to * an array of copy windows addresses. */ cdp->cpu_copywindow_pdp = pmap_pde(kernel_pmap, user_window); - } void cpu_physwindow_init(int cpu) { - cpu_data_t *cdp = cpu_data_ptr[cpu]; - vm_offset_t phys_window = cdp->cpu_physwindow_base; + cpu_data_t *cdp = cpu_data_ptr[cpu]; + vm_offset_t phys_window = cdp->cpu_physwindow_base; if (phys_window == 0) { if (vm_allocate(kernel_map, &phys_window, - PAGE_SIZE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) - != KERN_SUCCESS) - panic("cpu_physwindow_init: " - "couldn't allocate phys map window"); + PAGE_SIZE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_CPU)) + != KERN_SUCCESS) { + panic("cpu_physwindow_init: " + "couldn't allocate phys map window"); + } /* * make sure the page that encompasses the @@ -834,10 +840,10 @@ cpu_physwindow_init(int cpu) void cpu_data_realloc(void) { - int ret; - vm_offset_t istk; - cpu_data_t *cdp; - boolean_t istate; + int ret; + vm_offset_t istk; + cpu_data_t *cdp; + boolean_t istate; ret = kmem_alloc(kernel_map, &istk, INTSTACK_SIZE, VM_KERN_MEMORY_CPU); if (ret != KERN_SUCCESS) { @@ -855,7 +861,7 @@ cpu_data_realloc(void) cdp->cpu_int_stack_top = istk; timer_call_queue_init(&cdp->rtclock_timer.queue); cdp->cpu_desc_tablep = (struct cpu_desc_table *) &scdtables[0]; - cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; + cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; uint8_t *cfstk = &scfstks[cdp->cpu_number].fstk[0]; cdt->fstkp = cfstk; @@ -873,6 +879,6 @@ cpu_data_realloc(void) (void) ml_set_interrupts_enabled(istate); kprintf("Reallocated master cpu data: %p," - " interrupt stack: %p, fault stack: %p\n", - (void *) cdp, (void *) istk, (void *) cfstk); + " interrupt stack: %p, fault stack: %p\n", + (void *) cdp, (void *) istk, (void *) cfstk); } diff --git a/osfmk/i386/mp_desc.h b/osfmk/i386/mp_desc.h index 50f6c62a6..aeca023a5 100644 --- a/osfmk/i386/mp_desc.h +++ b/osfmk/i386/mp_desc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -57,8 +57,8 @@ /* */ -#ifndef _I386_MP_DESC_H_ -#define _I386_MP_DESC_H_ +#ifndef _I386_MP_DESC_H_ +#define _I386_MP_DESC_H_ #include @@ -82,34 +82,34 @@ __BEGIN_DECLS */ #define FSTK_SZ (PAGE_SIZE/2) typedef struct cpu_desc_table64 { - struct fake_descriptor gdt[GDTSZ] __attribute__ ((aligned (16))); - struct x86_64_tss ktss __attribute__ ((aligned (16))); - struct sysenter_stack sstk __attribute__ ((aligned (16))); - uint8_t *fstkp; + struct fake_descriptor gdt[GDTSZ] __attribute__ ((aligned(16))); + struct x86_64_tss ktss __attribute__ ((aligned(16))); + struct sysenter_stack sstk __attribute__ ((aligned(16))); + uint8_t *fstkp; } cpu_desc_table64_t; typedef struct { - uint8_t fstk[FSTK_SZ] __attribute__ ((aligned (16))); + uint8_t fstk[FSTK_SZ] __attribute__ ((aligned(16))); } cpu_fault_stack_t; -#define current_gdt() (current_cpu_datap()->cpu_desc_index.cdi_gdtb.ptr) -#define current_idt() (current_cpu_datap()->cpu_desc_index.cdi_idtb.ptr) -#define current_ldt() (current_cpu_datap()->cpu_desc_index.cdi_ldtb) -#define current_ktss() (current_cpu_datap()->cpu_desc_index.cdi_ktssb) -#define current_sstk() (current_cpu_datap()->cpu_desc_index.cdi_sstkb) +#define current_gdt() (current_cpu_datap()->cpu_desc_index.cdi_gdtb.ptr) +#define current_idt() (current_cpu_datap()->cpu_desc_index.cdi_idtb.ptr) +#define current_ldt() (current_cpu_datap()->cpu_desc_index.cdi_ldtb) +#define current_ktss() (current_cpu_datap()->cpu_desc_index.cdi_ktssb) +#define current_sstk() (current_cpu_datap()->cpu_desc_index.cdi_sstkb) -#define current_ktss64() ((struct x86_64_tss *) current_ktss()) -#define current_sstk64() ((addr64_t *) current_sstk()) +#define current_ktss64() ((struct x86_64_tss *) current_ktss()) +#define current_sstk64() ((addr64_t *) current_sstk()) -#define gdt_desc_p(sel) \ +#define gdt_desc_p(sel) \ (&((struct real_descriptor *)current_gdt())[sel_idx(sel)]) -#define ldt_desc_p(sel) \ - (&((struct real_descriptor *)current_ldt())[sel_idx(sel)]) +#define ldt_desc_p(sel) \ + (¤t_ldt()[sel_idx(sel)]) -extern void cpu_syscall_init(cpu_data_t *cdp); +extern void cpu_syscall_init(cpu_data_t *cdp); -extern void cpu_desc_init(cpu_data_t *cdp); -extern void cpu_desc_load(cpu_data_t *cdp); +extern void cpu_desc_init(cpu_data_t *cdp); +extern void cpu_desc_load(cpu_data_t *cdp); extern boolean_t valid_user_data_selector(uint16_t selector); @@ -122,12 +122,12 @@ valid_user_stack_selector(uint16_t selector); extern boolean_t valid_user_segment_selectors(uint16_t cs, - uint16_t ss, - uint16_t ds, - uint16_t es, - uint16_t fs, - uint16_t gs); + uint16_t ss, + uint16_t ds, + uint16_t es, + uint16_t fs, + uint16_t gs); __END_DECLS -#endif /* _X86_64_MP_DESC_H_ */ +#endif /* _X86_64_MP_DESC_H_ */ diff --git a/osfmk/i386/mp_events.h b/osfmk/i386/mp_events.h index 6fd14ed44..2e8b45598 100644 --- a/osfmk/i386/mp_events.h +++ b/osfmk/i386/mp_events.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef __AT386_MP_EVENTS__ -#define __AT386_MP_EVENTS__ +#define __AT386_MP_EVENTS__ /* Interrupt types */ @@ -46,27 +46,27 @@ typedef enum { MP_LAST } mp_event_t; -#define MP_EVENT_NAME_DECL() \ -const char *mp_event_name[] = { \ - "MP_TLB_FLUSH", \ - "MP_KDP", \ - "MP_KDB", \ - "MP_AST", \ - "MP_IDLE", \ - "MP_UNIDLE", \ - "MP_CALL", \ - "MP_CALL_PM", \ - "MP_LAST" \ +#define MP_EVENT_NAME_DECL() \ +const char *mp_event_name[] = { \ + "MP_TLB_FLUSH", \ + "MP_KDP", \ + "MP_KDB", \ + "MP_AST", \ + "MP_IDLE", \ + "MP_UNIDLE", \ + "MP_CALL", \ + "MP_CALL_PM", \ + "MP_LAST" \ } typedef enum { SYNC, ASYNC, NOSYNC } mp_sync_t; __BEGIN_DECLS -extern void i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode); -extern void i386_activate_cpu(void); -extern void i386_deactivate_cpu(void); -extern void cpu_NMI_interrupt(int /* cpu */); +extern void i386_signal_cpu(int cpu, mp_event_t event, mp_sync_t mode); +extern void i386_activate_cpu(void); +extern void i386_deactivate_cpu(void); +extern void cpu_NMI_interrupt(int /* cpu */); __END_DECLS diff --git a/osfmk/i386/mp_native.c b/osfmk/i386/mp_native.c index e64344db7..1ba36090a 100644 --- a/osfmk/i386/mp_native.c +++ b/osfmk/i386/mp_native.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,29 +37,30 @@ /* PAL-related routines */ void i386_cpu_IPI(int cpu); -boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, - int ipi_vector, i386_intr_func_t ipi_handler); +boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, + int ipi_vector, i386_intr_func_t ipi_handler); void i386_start_cpu(int lapic_id, int cpu_num); void i386_send_NMI(int cpu); void handle_pending_TLB_flushes(void); void NMIPI_enable(boolean_t); -extern void slave_pstart(void); +extern void slave_pstart(void); -#ifdef MP_DEBUG -int trappedalready = 0; /* (BRINGUP) */ -#endif /* MP_DEBUG */ +#ifdef MP_DEBUG +int trappedalready = 0; /* (BRINGUP) */ +#endif /* MP_DEBUG */ boolean_t i386_smp_init(int nmi_vector, i386_intr_func_t nmi_handler, int ipi_vector, i386_intr_func_t ipi_handler) { /* Local APIC? */ - if (!lapic_probe()) + if (!lapic_probe()) { return FALSE; + } lapic_init(); lapic_configure(); - lapic_set_intr_func(nmi_vector, nmi_handler); + lapic_set_intr_func(nmi_vector, nmi_handler); lapic_set_intr_func(ipi_vector, ipi_handler); install_real_mode_bootstrap(slave_pstart); @@ -73,12 +74,14 @@ i386_start_cpu(int lapic_id, __unused int cpu_num ) LAPIC_WRITE_ICR(lapic_id, LAPIC_ICR_DM_INIT); delay(100); LAPIC_WRITE_ICR(lapic_id, - LAPIC_ICR_DM_STARTUP|(REAL_MODE_BOOTSTRAP_OFFSET>>12)); + LAPIC_ICR_DM_STARTUP | (REAL_MODE_BOOTSTRAP_OFFSET >> 12)); } static boolean_t NMIPIs_enabled = FALSE; -void NMIPI_enable(boolean_t enable) { +void +NMIPI_enable(boolean_t enable) +{ NMIPIs_enabled = enable; } @@ -90,12 +93,12 @@ i386_send_NMI(int cpu) if (NMIPIs_enabled == FALSE) { i386_cpu_IPI(cpu); } else { - /* Program the interrupt command register */ - /* The vector is ignored in this case--the target CPU will enter on the - * NMI vector. - */ - LAPIC_WRITE_ICR(cpu_to_lapic[cpu], - LAPIC_VECTOR(INTERPROCESSOR)|LAPIC_ICR_DM_NMI); + /* Program the interrupt command register */ + /* The vector is ignored in this case--the target CPU will enter on the + * NMI vector. + */ + LAPIC_WRITE_ICR(cpu_to_lapic[cpu], + LAPIC_VECTOR(INTERPROCESSOR) | LAPIC_ICR_DM_NMI); } (void) ml_set_interrupts_enabled(state); } @@ -103,9 +106,9 @@ i386_send_NMI(int cpu) void handle_pending_TLB_flushes(void) { - volatile int *my_word = ¤t_cpu_datap()->cpu_signals; + volatile int *my_word = ¤t_cpu_datap()->cpu_signals; - if (i_bit(MP_TLB_FLUSH, my_word) && (pmap_tlb_flush_timeout == FALSE)) { + if (i_bit(MP_TLB_FLUSH, my_word) && (pmap_tlb_flush_timeout == FALSE)) { DBGLOG(cpu_handle, cpu_number(), MP_TLB_FLUSH); i_bit_clear(MP_TLB_FLUSH, my_word); pmap_update_interrupt(); @@ -115,11 +118,11 @@ handle_pending_TLB_flushes(void) void i386_cpu_IPI(int cpu) { -#ifdef MP_DEBUG - if(cpu_datap(cpu)->cpu_signals & 6) { /* (BRINGUP) */ +#ifdef MP_DEBUG + if (cpu_datap(cpu)->cpu_signals & 6) { /* (BRINGUP) */ kprintf("i386_cpu_IPI: sending enter debugger signal (%08X) to cpu %d\n", cpu_datap(cpu)->cpu_signals, cpu); } -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ lapic_send_ipi(cpu, LAPIC_VECTOR(INTERPROCESSOR)); } diff --git a/osfmk/i386/mtrr.c b/osfmk/i386/mtrr.c index 30df3db47..a72c09700 100644 --- a/osfmk/i386/mtrr.c +++ b/osfmk/i386/mtrr.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,13 +37,13 @@ #include struct mtrr_var_range { - uint64_t base; /* in IA32_MTRR_PHYSBASE format */ - uint64_t mask; /* in IA32_MTRR_PHYSMASK format */ - uint32_t refcnt; /* var ranges reference count */ + uint64_t base; /* in IA32_MTRR_PHYSBASE format */ + uint64_t mask; /* in IA32_MTRR_PHYSMASK format */ + uint32_t refcnt; /* var ranges reference count */ }; struct mtrr_fix_range { - uint64_t types; /* fixed-range type octet */ + uint64_t types; /* fixed-range type octet */ }; typedef struct mtrr_var_range mtrr_var_range_t; @@ -60,12 +60,12 @@ static struct { static boolean_t mtrr_initialized = FALSE; decl_simple_lock_data(static, mtrr_lock); -#define MTRR_LOCK() simple_lock(&mtrr_lock); -#define MTRR_UNLOCK() simple_unlock(&mtrr_lock); +#define MTRR_LOCK() simple_lock(&mtrr_lock, LCK_GRP_NULL); +#define MTRR_UNLOCK() simple_unlock(&mtrr_lock); //#define MTRR_DEBUG 1 -#if MTRR_DEBUG -#define DBG(x...) kprintf(x) +#if MTRR_DEBUG +#define DBG(x...) kprintf(x) #else #define DBG(x...) #endif @@ -79,23 +79,23 @@ static void mtrr_update_setup(void * param); static void mtrr_update_teardown(void * param); static void mtrr_update_action(void * param); static void var_range_encode(mtrr_var_range_t * range, addr64_t address, - uint64_t length, uint32_t type, int valid); + uint64_t length, uint32_t type, int valid); static int var_range_overlap(mtrr_var_range_t * range, addr64_t address, - uint64_t length, uint32_t type); + uint64_t length, uint32_t type); -#define CACHE_CONTROL_MTRR (NULL) -#define CACHE_CONTROL_PAT ((void *)1) +#define CACHE_CONTROL_MTRR (NULL) +#define CACHE_CONTROL_PAT ((void *)1) /* * MTRR MSR bit fields. */ -#define IA32_MTRR_DEF_TYPE_MT 0x000000ff -#define IA32_MTRR_DEF_TYPE_FE 0x00000400 -#define IA32_MTRR_DEF_TYPE_E 0x00000800 +#define IA32_MTRR_DEF_TYPE_MT 0x000000ff +#define IA32_MTRR_DEF_TYPE_FE 0x00000400 +#define IA32_MTRR_DEF_TYPE_E 0x00000800 -#define IA32_MTRRCAP_VCNT 0x000000ff -#define IA32_MTRRCAP_FIX 0x00000100 -#define IA32_MTRRCAP_WC 0x00000400 +#define IA32_MTRRCAP_VCNT 0x000000ff +#define IA32_MTRRCAP_FIX 0x00000100 +#define IA32_MTRRCAP_WC 0x00000400 /* 0 < bits <= 64 */ #define PHYS_BITS_TO_MASK(bits) \ @@ -107,9 +107,9 @@ static int var_range_overlap(mtrr_var_range_t * range, addr64_t address, */ static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36); -#define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL -#define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL) -#define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL +#define IA32_MTRR_PHYMASK_VALID 0x0000000000000800ULL +#define IA32_MTRR_PHYSBASE_MASK (mtrr_phys_mask & ~0x0000000000000FFFULL) +#define IA32_MTRR_PHYSBASE_TYPE 0x00000000000000FFULL /* * Variable-range mask to/from length conversions. @@ -120,7 +120,7 @@ static uint64_t mtrr_phys_mask = PHYS_BITS_TO_MASK(36); #define LEN_TO_MASK(len) \ (~((len) - 1) & IA32_MTRR_PHYSBASE_MASK) -#define LSB(x) ((x) & (~((x) - 1))) +#define LSB(x) ((x) & (~((x) - 1))) /* * Fetch variable-range MTRR register pairs. @@ -135,10 +135,11 @@ mtrr_get_var_ranges(mtrr_var_range_t * range, int count) range[i].mask = rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)); /* bump ref count for firmware configured ranges */ - if (range[i].mask & IA32_MTRR_PHYMASK_VALID) + if (range[i].mask & IA32_MTRR_PHYMASK_VALID) { range[i].refcnt = 1; - else + } else { range[i].refcnt = 0; + } } } @@ -168,8 +169,9 @@ mtrr_get_fix_ranges(mtrr_fix_range_t * range) range[0].types = rdmsr64(MSR_IA32_MTRR_FIX64K_00000); range[1].types = rdmsr64(MSR_IA32_MTRR_FIX16K_80000); range[2].types = rdmsr64(MSR_IA32_MTRR_FIX16K_A0000); - for (i = 0; i < 8; i++) + for (i = 0; i < 8; i++) { range[3 + i].types = rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i); + } } /* @@ -184,25 +186,26 @@ mtrr_set_fix_ranges(const struct mtrr_fix_range * range) wrmsr64(MSR_IA32_MTRR_FIX64K_00000, range[0].types); wrmsr64(MSR_IA32_MTRR_FIX16K_80000, range[1].types); wrmsr64(MSR_IA32_MTRR_FIX16K_A0000, range[2].types); - for (i = 0; i < 8; i++) + for (i = 0; i < 8; i++) { wrmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i, range[3 + i].types); + } } static boolean_t mtrr_check_fix_ranges(const struct mtrr_fix_range * range) { - int i; - boolean_t match = TRUE; + int i; + boolean_t match = TRUE; DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__); /* assume 11 fix range registers */ match = range[0].types == rdmsr64(MSR_IA32_MTRR_FIX64K_00000) && - range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) && - range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000); + range[1].types == rdmsr64(MSR_IA32_MTRR_FIX16K_80000) && + range[2].types == rdmsr64(MSR_IA32_MTRR_FIX16K_A0000); for (i = 0; match && i < 8; i++) { match = range[3 + i].types == - rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i); + rdmsr64(MSR_IA32_MTRR_FIX4K_C0000 + i); } return match; @@ -211,14 +214,14 @@ mtrr_check_fix_ranges(const struct mtrr_fix_range * range) static boolean_t mtrr_check_var_ranges(mtrr_var_range_t * range, int count) { - int i; - boolean_t match = TRUE; - + int i; + boolean_t match = TRUE; + DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__); for (i = 0; match && i < count; i++) { match = range[i].base == rdmsr64(MSR_IA32_MTRR_PHYSBASE(i)) && - range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)); + range[i].mask == rdmsr64(MSR_IA32_MTRR_PHYSMASK(i)); } return match; @@ -253,7 +256,7 @@ mtrr_msr_dump(void) DBG(" FIX4K_F8000: 0x%016llx\n", rdmsr64(MSR_IA32_MTRR_FIX4K_F8000)); DBG("\nMTRRcap = 0x%llx MTRRdefType = 0x%llx\n", - rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE)); + rdmsr64(MSR_IA32_MTRRCAP), rdmsr64(MSR_IA32_MTRR_DEF_TYPE)); } #endif /* MTRR_DEBUG */ @@ -266,13 +269,14 @@ void mtrr_init(void) { /* no reason to init more than once */ - if (mtrr_initialized == TRUE) + if (mtrr_initialized == TRUE) { return; + } /* check for presence of MTRR feature on the processor */ - if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0) - return; /* no MTRR feature */ - + if ((cpuid_features() & CPUID_FEATURE_MTRR) == 0) { + return; /* no MTRR feature */ + } /* use a lock to serialize MTRR changes */ bzero((void *)&mtrr_state, sizeof(mtrr_state)); simple_lock_init(&mtrr_lock, 0); @@ -284,27 +288,29 @@ mtrr_init(void) /* allocate storage for variable ranges (can block?) */ if (mtrr_state.var_count) { mtrr_state.var_range = (mtrr_var_range_t *) - kalloc(sizeof(mtrr_var_range_t) * - mtrr_state.var_count); - if (mtrr_state.var_range == NULL) + kalloc(sizeof(mtrr_var_range_t) * + mtrr_state.var_count); + if (mtrr_state.var_range == NULL) { mtrr_state.var_count = 0; + } } /* fetch the initial firmware configured variable ranges */ - if (mtrr_state.var_count) + if (mtrr_state.var_count) { mtrr_get_var_ranges(mtrr_state.var_range, - mtrr_state.var_count); + mtrr_state.var_count); + } /* fetch the initial firmware configured fixed ranges */ - if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) + if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) { mtrr_get_fix_ranges(mtrr_state.fix_range); + } mtrr_initialized = TRUE; #if MTRR_DEBUG - mtrr_msr_dump(); /* dump firmware settings */ + mtrr_msr_dump(); /* dump firmware settings */ #endif - } /* @@ -331,50 +337,58 @@ mtrr_update_action(void * cache_control_type) wbinvd(); /* clear the PGE flag in CR4 */ - if (cr4 & CR4_PGE) + if (cr4 & CR4_PGE) { set_cr4(cr4 & ~CR4_PGE); - - /* flush TLBs */ - flush_tlb_raw(); + } else { + set_cr3_raw(get_cr3_raw()); + } if (CACHE_CONTROL_PAT == cache_control_type) { /* Change PA6 attribute field to WC */ uint64_t pat = rdmsr64(MSR_IA32_CR_PAT); DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat); - pat &= ~(0x0FULL << 48); + /* + * Intel doc states: + * "The IA32_PAT MSR contains eight page attribute fields: PA0 through PA7. + * The three low-order bits of each field are used to specify a memory type. + * The five high-order bits of each field are reserved, and must be set to all 0s." + * So, we zero-out the high 5 bits of the PA6 entry here: + */ + pat &= ~(0xFFULL << 48); pat |= (0x01ULL << 48); wrmsr64(MSR_IA32_CR_PAT, pat); DBG("CPU%d PAT: is 0x%016llx\n", get_cpu_number(), rdmsr64(MSR_IA32_CR_PAT)); - } - else { + } else { /* disable all MTRR ranges */ wrmsr64(MSR_IA32_MTRR_DEF_TYPE, - mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E); + mtrr_state.MTRRdefType & ~IA32_MTRR_DEF_TYPE_E); /* apply MTRR settings */ - if (mtrr_state.var_count) + if (mtrr_state.var_count) { mtrr_set_var_ranges(mtrr_state.var_range, - mtrr_state.var_count); + mtrr_state.var_count); + } - if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) + if (mtrr_state.MTRRcap & IA32_MTRRCAP_FIX) { mtrr_set_fix_ranges(mtrr_state.fix_range); + } /* enable all MTRR range registers (what if E was not set?) */ wrmsr64(MSR_IA32_MTRR_DEF_TYPE, - mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E); + mtrr_state.MTRRdefType | IA32_MTRR_DEF_TYPE_E); } /* flush all caches and TLBs a second time */ wbinvd(); - flush_tlb_raw(); - + set_cr3_raw(get_cr3_raw()); /* restore normal cache mode */ set_cr0(cr0); /* restore PGE flag */ - if (cr4 & CR4_PGE) + if (cr4 & CR4_PGE) { set_cr4(cr4); + } DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__); } @@ -401,13 +415,14 @@ mtrr_update_teardown(__unused void * param_not_used) kern_return_t mtrr_update_all_cpus(void) { - if (mtrr_initialized == FALSE) + if (mtrr_initialized == FALSE) { return KERN_NOT_SUPPORTED; + } MTRR_LOCK(); mp_rendezvous(mtrr_update_setup, - mtrr_update_action, - mtrr_update_teardown, NULL); + mtrr_update_action, + mtrr_update_teardown, NULL); MTRR_UNLOCK(); return KERN_SUCCESS; @@ -421,10 +436,11 @@ mtrr_update_all_cpus(void) kern_return_t mtrr_update_cpu(void) { - boolean_t match = TRUE; + boolean_t match = TRUE; - if (mtrr_initialized == FALSE) + if (mtrr_initialized == FALSE) { return KERN_NOT_SUPPORTED; + } DBG("CPU%d: %s\n", get_cpu_number(), __FUNCTION__); @@ -441,7 +457,7 @@ mtrr_update_cpu(void) /* Check variable ranges */ if (match && mtrr_state.var_count) { match = mtrr_check_var_ranges(mtrr_state.var_range, - mtrr_state.var_count); + mtrr_state.var_count); } /* Check fixed ranges */ @@ -450,17 +466,19 @@ mtrr_update_cpu(void) } #if MTRR_DEBUG - if (!match) + if (!match) { mtrr_msr_dump(); + } #endif if (!match) { DBG("mtrr_update_cpu() setting MTRR for cpu %d\n", - get_cpu_number()); + get_cpu_number()); mtrr_update_action(NULL); } #if MTRR_DEBUG - if (!match) + if (!match) { mtrr_msr_dump(); + } #endif MTRR_UNLOCK(); @@ -482,14 +500,14 @@ mtrr_range_add(addr64_t address, uint64_t length, uint32_t type) unsigned int i; DBG("mtrr_range_add base = 0x%llx, size = 0x%llx, type = %d\n", - address, length, type); + address, length, type); if (mtrr_initialized == FALSE) { return KERN_NOT_SUPPORTED; } /* check memory type (GPF exception for undefined types) */ - if ((type != MTRR_TYPE_UNCACHEABLE) && + if ((type != MTRR_TYPE_UNCACHEABLE) && (type != MTRR_TYPE_WRITECOMBINE) && (type != MTRR_TYPE_WRITETHROUGH) && (type != MTRR_TYPE_WRITEPROTECT) && @@ -512,9 +530,9 @@ mtrr_range_add(addr64_t address, uint64_t length, uint32_t type) * Length must be a power of 2 given by 2^n, where n >= 12. * Base address alignment must be larger than or equal to length. */ - if ((length < 0x1000) || + if ((length < 0x1000) || (LSB(length) != length) || - (address && (length > LSB(address)))) { + (address && (length > LSB(address)))) { return KERN_INVALID_ARGUMENT; } @@ -523,8 +541,7 @@ mtrr_range_add(addr64_t address, uint64_t length, uint32_t type) /* * Check for overlap and locate a free range. */ - for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++) - { + for (i = 0, free_range = NULL; i < mtrr_state.var_count; i++) { vr = &mtrr_state.var_range[i]; if (vr->refcnt == 0) { @@ -553,8 +570,8 @@ mtrr_range_add(addr64_t address, uint64_t length, uint32_t type) if (free_range->refcnt++ == 0) { var_range_encode(free_range, address, length, type, 1); mp_rendezvous(mtrr_update_setup, - mtrr_update_action, - mtrr_update_teardown, NULL); + mtrr_update_action, + mtrr_update_teardown, NULL); } ret = KERN_SUCCESS; } @@ -581,7 +598,7 @@ mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type) unsigned int i; DBG("mtrr_range_remove base = 0x%llx, size = 0x%llx, type = %d\n", - address, length, type); + address, length, type); if (mtrr_initialized == FALSE) { return KERN_NOT_SUPPORTED; @@ -606,8 +623,8 @@ mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type) if (cpu_update) { mp_rendezvous(mtrr_update_setup, - mtrr_update_action, - mtrr_update_teardown, NULL); + mtrr_update_action, + mtrr_update_teardown, NULL); result = KERN_SUCCESS; } @@ -625,18 +642,18 @@ mtrr_range_remove(addr64_t address, uint64_t length, uint32_t type) */ static void var_range_encode(mtrr_var_range_t * range, addr64_t address, - uint64_t length, uint32_t type, int valid) + uint64_t length, uint32_t type, int valid) { range->base = (address & IA32_MTRR_PHYSBASE_MASK) | - (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE); + (type & (uint32_t)IA32_MTRR_PHYSBASE_TYPE); range->mask = LEN_TO_MASK(length) | - (valid ? IA32_MTRR_PHYMASK_VALID : 0); + (valid ? IA32_MTRR_PHYMASK_VALID : 0); } static int var_range_overlap(mtrr_var_range_t * range, addr64_t address, - uint64_t length, uint32_t type) + uint64_t length, uint32_t type) { uint64_t v_address, v_length; uint32_t v_type; @@ -649,20 +666,17 @@ var_range_overlap(mtrr_var_range_t * range, addr64_t address, /* detect range overlap */ if ((v_address >= address && v_address < (address + length)) || (address >= v_address && address < (v_address + v_length))) { - - if (v_address == address && v_length == length && v_type == type) + if (v_address == address && v_length == length && v_type == type) { result = 1; /* identical overlap ok */ - else if ( v_type == MTRR_TYPE_UNCACHEABLE && - type == MTRR_TYPE_UNCACHEABLE ) { + } else if (v_type == MTRR_TYPE_UNCACHEABLE && + type == MTRR_TYPE_UNCACHEABLE) { /* UC ranges can overlap */ - } - else if ((v_type == MTRR_TYPE_UNCACHEABLE && - type == MTRR_TYPE_WRITEBACK) || - (v_type == MTRR_TYPE_WRITEBACK && - type == MTRR_TYPE_UNCACHEABLE)) { + } else if ((v_type == MTRR_TYPE_UNCACHEABLE && + type == MTRR_TYPE_WRITEBACK) || + (v_type == MTRR_TYPE_WRITEBACK && + type == MTRR_TYPE_UNCACHEABLE)) { /* UC/WB can overlap - effective type becomes UC */ - } - else { + } else { /* anything else may cause undefined behavior */ result = -1; } @@ -677,11 +691,12 @@ var_range_overlap(mtrr_var_range_t * range, addr64_t address, void pat_init(void) { - boolean_t istate; - uint64_t pat; + boolean_t istate; + uint64_t pat; - if (!(cpuid_features() & CPUID_FEATURE_PAT)) + if (!(cpuid_features() & CPUID_FEATURE_PAT)) { return; + } istate = ml_set_interrupts_enabled(FALSE); @@ -689,7 +704,7 @@ pat_init(void) DBG("CPU%d PAT: was 0x%016llx\n", get_cpu_number(), pat); /* Change PA6 attribute field to WC if required */ - if ((pat & ~(0x0FULL << 48)) != (0x01ULL << 48)) { + if ((pat & (0x07ULL << 48)) != (0x01ULL << 48)) { mtrr_update_action(CACHE_CONTROL_PAT); } ml_set_interrupts_enabled(istate); diff --git a/osfmk/i386/mtrr.h b/osfmk/i386/mtrr.h index a749ad4ea..9ec426d43 100644 --- a/osfmk/i386/mtrr.h +++ b/osfmk/i386/mtrr.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -53,13 +53,13 @@ extern void mtrr_init(void); extern kern_return_t mtrr_update_cpu(void); extern kern_return_t mtrr_update_all_cpus(void); -extern kern_return_t mtrr_range_add( addr64_t phys_addr, - uint64_t length, - uint32_t mem_type); +extern kern_return_t mtrr_range_add( addr64_t phys_addr, + uint64_t length, + uint32_t mem_type); -extern kern_return_t mtrr_range_remove( addr64_t phys_addr, - uint64_t length, - uint32_t mem_type); +extern kern_return_t mtrr_range_remove( addr64_t phys_addr, + uint64_t length, + uint32_t mem_type); extern void pat_init(void); diff --git a/osfmk/i386/pal_hibernate.h b/osfmk/i386/pal_hibernate.h index 13d48fbe4..32697bdf2 100644 --- a/osfmk/i386/pal_hibernate.h +++ b/osfmk/i386/pal_hibernate.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,19 +22,19 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_PAL_HIBERNATE_H #define _I386_PAL_HIBERNATE_H #define HIB_MAP_SIZE (2*I386_LPGBYTES) -#define DEST_COPY_AREA (4*GB - HIB_MAP_SIZE) /*4GB - 2*2m */ -#define SRC_COPY_AREA (DEST_COPY_AREA - HIB_MAP_SIZE) -#define COPY_PAGE_AREA (SRC_COPY_AREA - HIB_MAP_SIZE) -#define BITMAP_AREA (COPY_PAGE_AREA - HIB_MAP_SIZE) -#define IMAGE_AREA (BITMAP_AREA - HIB_MAP_SIZE) -#define IMAGE2_AREA (IMAGE_AREA - HIB_MAP_SIZE) +#define DEST_COPY_AREA (4*GB - HIB_MAP_SIZE) /*4GB - 2*2m */ +#define SRC_COPY_AREA (DEST_COPY_AREA - HIB_MAP_SIZE) +#define COPY_PAGE_AREA (SRC_COPY_AREA - HIB_MAP_SIZE) +#define BITMAP_AREA (COPY_PAGE_AREA - HIB_MAP_SIZE) +#define IMAGE_AREA (BITMAP_AREA - HIB_MAP_SIZE) +#define IMAGE2_AREA (IMAGE_AREA - HIB_MAP_SIZE) #define HIB_BASE segHIBB #define HIB_ENTRYPOINT acpi_wake_prot_entry @@ -42,6 +42,6 @@ uintptr_t pal_hib_map(uintptr_t v, uint64_t p); void hibernateRestorePALState(uint32_t *src); void pal_hib_patchup(void); -#define PAL_HIBERNATE_MAGIC_1 0xfeedfacedeadbeef -#define PAL_HIBERNATE_MAGIC_2 0x41b312133714 +#define PAL_HIBERNATE_MAGIC_1 0xfeedfacedeadbeef +#define PAL_HIBERNATE_MAGIC_2 0x41b312133714 #endif /* _I386_PAL_HIBERNATE_H */ diff --git a/osfmk/i386/pal_native.h b/osfmk/i386/pal_native.h index 4a0225a05..bb9f29b1f 100644 --- a/osfmk/i386/pal_native.h +++ b/osfmk/i386/pal_native.h @@ -2,7 +2,7 @@ * Copyright (c) 2009-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_PAL_I386_H @@ -35,18 +35,19 @@ #define pal_dbg_set_task_name( x ) #define pal_set_signal_delivery( x ) -#define pal_is_usable_memory(b, t) (TRUE) +#define pal_is_usable_memory(b, t) (TRUE) -#define pal_hlt() __asm__ volatile ("sti; hlt") -#define pal_sti() __asm__ volatile ("sti") -#define pal_cli() __asm__ volatile ("cli") +#define pal_hlt() __asm__ volatile ("sti; hlt") +#define pal_sti() __asm__ volatile ("sti") +#define pal_cli() __asm__ volatile ("cli") static inline void pal_stop_cpu(boolean_t cli) { - if (cli) - __asm__ volatile ( "cli" ); - __asm__ volatile ( "wbinvd; hlt" ); + if (cli) { + __asm__ volatile ( "cli"); + } + __asm__ volatile ( "wbinvd; hlt"); } #define pal_register_cache_state(t, v) @@ -55,8 +56,8 @@ pal_stop_cpu(boolean_t cli) #define pal_thread_terminate_self(t) #define pal_ast_check(t) -#define panic_display_pal_info() do { } while(0) -#define pal_kernel_announce() do { } while(0) +#define panic_display_pal_info() do { } while(0) +#define pal_kernel_announce() do { } while(0) #define PAL_AICPM_PROPERTY_VALUE 0 @@ -68,28 +69,25 @@ pal_stop_cpu(boolean_t cli) #define PAL_KDP_ADDR(x) (x) struct pal_rtc_nanotime { - volatile uint64_t tsc_base; /* timestamp */ - volatile uint64_t ns_base; /* nanoseconds */ - uint32_t scale; /* tsc -> nanosec multiplier */ - uint32_t shift; /* shift is nonzero only on "slow" machines, */ - /* ie where tscFreq <= SLOW_TSC_THRESHOLD */ - volatile uint32_t generation; /* 0 == being updated */ - uint32_t spare1; + volatile uint64_t tsc_base; /* timestamp */ + volatile uint64_t ns_base; /* nanoseconds */ + uint32_t scale; /* tsc -> nanosec multiplier */ + uint32_t shift; /* shift is nonzero only on "slow" machines, */ + /* ie where tscFreq <= SLOW_TSC_THRESHOLD */ + volatile uint32_t generation; /* 0 == being updated */ + uint32_t spare1; }; #ifdef MACH_KERNEL_PRIVATE struct pal_cpu_data { - }; struct pal_pcb { - }; struct pal_apic_table { - }; #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/i386/pal_routines.c b/osfmk/i386/pal_routines.c index b11aaf462..1455ab223 100644 --- a/osfmk/i386/pal_routines.c +++ b/osfmk/i386/pal_routines.c @@ -2,7 +2,7 @@ * Copyright (c) 2009-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -57,16 +57,16 @@ extern void *gPEEFIRuntimeServices; extern void *gPEEFISystemTable; /* nanotime conversion information */ -pal_rtc_nanotime_t pal_rtc_nanotime_info = {0,0,0,0,1,0}; +pal_rtc_nanotime_t pal_rtc_nanotime_info = {0, 0, 0, 0, 1, 0}; /* APIC kext may use this to access xnu internal state */ struct pal_apic_table *apic_table = NULL; -decl_simple_lock_data(static , pal_efi_lock); +decl_simple_lock_data(static, pal_efi_lock); #ifdef __x86_64__ -static pml4_entry_t IDPML4[PTE_PER_PAGE] __attribute__ ((aligned (4096))); -uint64_t pal_efi_saved_cr0; -uint64_t pal_efi_saved_cr3; +static pml4_entry_t IDPML4[PTE_PER_PAGE] __attribute__ ((aligned(4096))); +uint64_t pal_efi_saved_cr0; +uint64_t pal_efi_saved_cr3; #endif @@ -87,8 +87,9 @@ void pal_serial_putc(char c) { serial_putc(c); - if (c == '\n') + if (c == '\n') { serial_putc('\r'); + } } int @@ -102,12 +103,12 @@ pal_serial_getc(void) void pal_i386_init(void) { - simple_lock_init(&pal_efi_lock, 0); + simple_lock_init(&pal_efi_lock, 0); } void -pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2, - pal_cr_t *cr3, pal_cr_t *cr4 ) +pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2, + pal_cr_t *cr3, pal_cr_t *cr4 ) { *cr0 = get_cr0(); *cr2 = get_cr2(); @@ -123,8 +124,8 @@ pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2, #undef pal_dbg_page_fault void pal_dbg_page_fault( thread_t thread __unused, - user_addr_t vaddr __unused, - kern_return_t kr __unused ) + user_addr_t vaddr __unused, + kern_return_t kr __unused ) { } @@ -143,167 +144,167 @@ pal_set_signal_delivery(thread_t thread __unused) /* EFI thunks */ extern void _pal_efi_call_in_64bit_mode_asm(uint64_t func, - struct pal_efi_registers *efi_reg, - void *stack_contents, - size_t stack_contents_size); + struct pal_efi_registers *efi_reg, + void *stack_contents, + size_t stack_contents_size); kern_return_t pal_efi_call_in_64bit_mode(uint64_t func, - struct pal_efi_registers *efi_reg, - void *stack_contents, - size_t stack_contents_size, /* 16-byte multiple */ - uint64_t *efi_status) + struct pal_efi_registers *efi_reg, + void *stack_contents, + size_t stack_contents_size, /* 16-byte multiple */ + uint64_t *efi_status) { - DBG("pal_efi_call_in_64bit_mode(0x%016llx, %p, %p, %lu, %p)\n", - func, efi_reg, stack_contents, stack_contents_size, efi_status); - - if (func == 0) { - return KERN_INVALID_ADDRESS; - } - - if ((efi_reg == NULL) - || (stack_contents == NULL) - || (stack_contents_size % 16 != 0)) { - return KERN_INVALID_ARGUMENT; - } - - if (!gPEEFISystemTable || !gPEEFIRuntimeServices) { - return KERN_NOT_SUPPORTED; - } - - if (func < VM_MIN_KERNEL_ADDRESS) { - /* - * EFI Runtime Services must be mapped in our address - * space at an appropriate location. - */ - return KERN_INVALID_ADDRESS; - } - - _pal_efi_call_in_64bit_mode_asm(func, - efi_reg, - stack_contents, - stack_contents_size); - - *efi_status = efi_reg->rax; - - return KERN_SUCCESS; + DBG("pal_efi_call_in_64bit_mode(0x%016llx, %p, %p, %lu, %p)\n", + func, efi_reg, stack_contents, stack_contents_size, efi_status); + + if (func == 0) { + return KERN_INVALID_ADDRESS; + } + + if ((efi_reg == NULL) + || (stack_contents == NULL) + || (stack_contents_size % 16 != 0)) { + return KERN_INVALID_ARGUMENT; + } + + if (!gPEEFISystemTable || !gPEEFIRuntimeServices) { + return KERN_NOT_SUPPORTED; + } + + if (func < VM_MIN_KERNEL_ADDRESS) { + /* + * EFI Runtime Services must be mapped in our address + * space at an appropriate location. + */ + return KERN_INVALID_ADDRESS; + } + + _pal_efi_call_in_64bit_mode_asm(func, + efi_reg, + stack_contents, + stack_contents_size); + + *efi_status = efi_reg->rax; + + return KERN_SUCCESS; } extern void _pal_efi_call_in_32bit_mode_asm(uint32_t func, - struct pal_efi_registers *efi_reg, - void *stack_contents, - size_t stack_contents_size); + struct pal_efi_registers *efi_reg, + void *stack_contents, + size_t stack_contents_size); kern_return_t pal_efi_call_in_32bit_mode(uint32_t func, - struct pal_efi_registers *efi_reg, - void *stack_contents, - size_t stack_contents_size, /* 16-byte multiple */ - uint32_t *efi_status) + struct pal_efi_registers *efi_reg, + void *stack_contents, + size_t stack_contents_size, /* 16-byte multiple */ + uint32_t *efi_status) { - DBG("pal_efi_call_in_32bit_mode(0x%08x, %p, %p, %lu, %p)\n", - func, efi_reg, stack_contents, stack_contents_size, efi_status); - - if (func == 0) { - return KERN_INVALID_ADDRESS; - } - - if ((efi_reg == NULL) - || (stack_contents == NULL) - || (stack_contents_size % 16 != 0)) { - return KERN_INVALID_ARGUMENT; - } - - if (!gPEEFISystemTable || !gPEEFIRuntimeServices) { - return KERN_NOT_SUPPORTED; - } - - DBG("pal_efi_call_in_32bit_mode() efi_reg:\n"); - DBG(" rcx: 0x%016llx\n", efi_reg->rcx); - DBG(" rdx: 0x%016llx\n", efi_reg->rdx); - DBG(" r8: 0x%016llx\n", efi_reg->r8); - DBG(" r9: 0x%016llx\n", efi_reg->r9); - DBG(" rax: 0x%016llx\n", efi_reg->rax); - - DBG("pal_efi_call_in_32bit_mode() stack:\n"); + DBG("pal_efi_call_in_32bit_mode(0x%08x, %p, %p, %lu, %p)\n", + func, efi_reg, stack_contents, stack_contents_size, efi_status); + + if (func == 0) { + return KERN_INVALID_ADDRESS; + } + + if ((efi_reg == NULL) + || (stack_contents == NULL) + || (stack_contents_size % 16 != 0)) { + return KERN_INVALID_ARGUMENT; + } + + if (!gPEEFISystemTable || !gPEEFIRuntimeServices) { + return KERN_NOT_SUPPORTED; + } + + DBG("pal_efi_call_in_32bit_mode() efi_reg:\n"); + DBG(" rcx: 0x%016llx\n", efi_reg->rcx); + DBG(" rdx: 0x%016llx\n", efi_reg->rdx); + DBG(" r8: 0x%016llx\n", efi_reg->r8); + DBG(" r9: 0x%016llx\n", efi_reg->r9); + DBG(" rax: 0x%016llx\n", efi_reg->rax); + + DBG("pal_efi_call_in_32bit_mode() stack:\n"); #if PAL_DEBUG - size_t i; - for (i = 0; i < stack_contents_size; i += sizeof(uint32_t)) { - uint32_t *p = (uint32_t *) ((uintptr_t)stack_contents + i); - DBG(" %p: 0x%08x\n", p, *p); - } + size_t i; + for (i = 0; i < stack_contents_size; i += sizeof(uint32_t)) { + uint32_t *p = (uint32_t *) ((uintptr_t)stack_contents + i); + DBG(" %p: 0x%08x\n", p, *p); + } #endif #ifdef __x86_64__ - /* - * Ensure no interruptions. - * Taking a spinlock for serialization is technically unnecessary - * because the EFIRuntime kext should serialize. - */ - boolean_t istate = ml_set_interrupts_enabled(FALSE); - simple_lock(&pal_efi_lock); - - /* - * Switch to special page tables with the entire high kernel space - * double-mapped into the bottom 4GB. - * - * NB: We assume that all data passed exchanged with RuntimeServices is - * located in the 4GB of KVA based at VM_MIN_ADDRESS. In particular, kexts - * loaded the basement (below VM_MIN_ADDRESS) cannot pass static data. - * Kernel stack and heap space is OK. - */ - MARK_CPU_IDLE(cpu_number()); - pal_efi_saved_cr3 = get_cr3_raw(); - pal_efi_saved_cr0 = get_cr0(); - IDPML4[KERNEL_PML4_INDEX] = IdlePML4[KERNEL_PML4_INDEX]; - IDPML4[0] = IdlePML4[KERNEL_PML4_INDEX]; - clear_ts(); - set_cr3_raw((uint64_t) ID_MAP_VTOP(IDPML4)); - - swapgs(); /* Save kernel's GS base */ - - /* Set segment state ready for compatibility mode */ - set_gs(NULL_SEG); - set_fs(NULL_SEG); - set_es(KERNEL_DS); - set_ds(KERNEL_DS); - set_ss(KERNEL_DS); - - _pal_efi_call_in_32bit_mode_asm(func, - efi_reg, - stack_contents, - stack_contents_size); - - /* Restore NULL segment state */ - set_ss(NULL_SEG); - set_es(NULL_SEG); - set_ds(NULL_SEG); - - swapgs(); /* Restore kernel's GS base */ - - /* Restore the 64-bit user GS base we just destroyed */ - wrmsr64(MSR_IA32_KERNEL_GS_BASE, + /* + * Ensure no interruptions. + * Taking a spinlock for serialization is technically unnecessary + * because the EFIRuntime kext should serialize. + */ + boolean_t istate = ml_set_interrupts_enabled(FALSE); + simple_lock(&pal_efi_lock, LCK_GRP_NULL); + + /* + * Switch to special page tables with the entire high kernel space + * double-mapped into the bottom 4GB. + * + * NB: We assume that all data passed exchanged with RuntimeServices is + * located in the 4GB of KVA based at VM_MIN_ADDRESS. In particular, kexts + * loaded the basement (below VM_MIN_ADDRESS) cannot pass static data. + * Kernel stack and heap space is OK. + */ + MARK_CPU_IDLE(cpu_number()); + pal_efi_saved_cr3 = get_cr3_raw(); + pal_efi_saved_cr0 = get_cr0(); + IDPML4[KERNEL_PML4_INDEX] = IdlePML4[KERNEL_PML4_INDEX]; + IDPML4[0] = IdlePML4[KERNEL_PML4_INDEX]; + clear_ts(); + set_cr3_raw((uint64_t) ID_MAP_VTOP(IDPML4)); + + swapgs(); /* Save kernel's GS base */ + + /* Set segment state ready for compatibility mode */ + set_gs(NULL_SEG); + set_fs(NULL_SEG); + set_es(KERNEL_DS); + set_ds(KERNEL_DS); + set_ss(KERNEL_DS); + + _pal_efi_call_in_32bit_mode_asm(func, + efi_reg, + stack_contents, + stack_contents_size); + + /* Restore NULL segment state */ + set_ss(NULL_SEG); + set_es(NULL_SEG); + set_ds(NULL_SEG); + + swapgs(); /* Restore kernel's GS base */ + + /* Restore the 64-bit user GS base we just destroyed */ + wrmsr64(MSR_IA32_KERNEL_GS_BASE, current_cpu_datap()->cpu_uber.cu_user_gs_base); - /* End of mapping games */ - set_cr3_raw(pal_efi_saved_cr3); - set_cr0(pal_efi_saved_cr0); - MARK_CPU_ACTIVE(cpu_number()); - - simple_unlock(&pal_efi_lock); - ml_set_interrupts_enabled(istate); + /* End of mapping games */ + set_cr3_raw(pal_efi_saved_cr3); + set_cr0(pal_efi_saved_cr0); + MARK_CPU_ACTIVE(cpu_number()); + + simple_unlock(&pal_efi_lock); + ml_set_interrupts_enabled(istate); #else - _pal_efi_call_in_32bit_mode_asm(func, - efi_reg, - stack_contents, - stack_contents_size); + _pal_efi_call_in_32bit_mode_asm(func, + efi_reg, + stack_contents, + stack_contents_size); #endif - *efi_status = (uint32_t)efi_reg->rax; - DBG("pal_efi_call_in_32bit_mode() efi_status: 0x%x\n", *efi_status); + *efi_status = (uint32_t)efi_reg->rax; + DBG("pal_efi_call_in_32bit_mode() efi_status: 0x%x\n", *efi_status); - return KERN_SUCCESS; + return KERN_SUCCESS; } /* wind-back a syscall instruction */ @@ -311,34 +312,31 @@ void pal_syscall_restart(thread_t thread __unused, x86_saved_state_t *state) { /* work out which flavour thread it is */ - if( is_saved_state32(state) ) - { - x86_saved_state32_t *regs32; + if (is_saved_state32(state)) { + x86_saved_state32_t *regs32; regs32 = saved_state32(state); - if (regs32->cs == SYSENTER_CS || regs32->cs == SYSENTER_TF_CS) + if (regs32->cs == SYSENTER_CS || regs32->cs == SYSENTER_TF_CS) { regs32->eip -= 5; - else + } else { regs32->eip -= 2; - } - else - { - x86_saved_state64_t *regs64; + } + } else { + x86_saved_state64_t *regs64; - assert( is_saved_state64(state) ); + assert( is_saved_state64(state)); regs64 = saved_state64(state); /* Only one instruction for 64-bit threads */ regs64->isf.rip -= 2; } - } /* Helper function to put the machine to sleep (or shutdown) */ boolean_t -pal_machine_sleep(uint8_t type_a __unused, uint8_t type_b __unused, uint32_t bit_position __unused, - uint32_t disable_mask __unused, uint32_t enable_mask __unused) +pal_machine_sleep(uint8_t type_a __unused, uint8_t type_b __unused, uint32_t bit_position __unused, + uint32_t disable_mask __unused, uint32_t enable_mask __unused) { return 0; } diff --git a/osfmk/i386/pal_routines.h b/osfmk/i386/pal_routines.h index 4336d45dd..03972d40b 100644 --- a/osfmk/i386/pal_routines.h +++ b/osfmk/i386/pal_routines.h @@ -2,7 +2,7 @@ * Copyright (c) 2009-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _I386_PAL_ROUTINES_H @@ -43,11 +43,11 @@ extern "C" { * only the low-order half is loaded (if applicable) */ struct pal_efi_registers { - uint64_t rcx; - uint64_t rdx; - uint64_t r8; - uint64_t r9; - uint64_t rax; + uint64_t rcx; + uint64_t rdx; + uint64_t r8; + uint64_t r9; + uint64_t rax; }; /* @@ -56,25 +56,25 @@ struct pal_efi_registers { */ kern_return_t pal_efi_call_in_64bit_mode(uint64_t func, - struct pal_efi_registers *efi_reg, - void *stack_contents, - size_t stack_contents_size, /* 16-byte multiple */ - uint64_t *efi_status); + struct pal_efi_registers *efi_reg, + void *stack_contents, + size_t stack_contents_size, /* 16-byte multiple */ + uint64_t *efi_status); kern_return_t pal_efi_call_in_32bit_mode(uint32_t func, - struct pal_efi_registers *efi_reg, - void *stack_contents, - size_t stack_contents_size, /* 16-byte multiple */ - uint32_t *efi_status); + struct pal_efi_registers *efi_reg, + void *stack_contents, + size_t stack_contents_size, /* 16-byte multiple */ + uint32_t *efi_status); /* Go into ACPI sleep */ -boolean_t pal_machine_sleep(uint8_t type_a, - uint8_t type_b, - uint32_t bit_position, - uint32_t disable_mask, - uint32_t enable_mask); +boolean_t pal_machine_sleep(uint8_t type_a, + uint8_t type_b, + uint32_t bit_position, + uint32_t disable_mask, + uint32_t enable_mask); /* xnu internal PAL routines */ #ifdef XNU_KERNEL_PRIVATE @@ -88,7 +88,7 @@ struct pal_apic_table; /* Defined per-platform */ /* For use by APIC kext */ extern struct pal_apic_table *apic_table; - + /* serial / debug output routines */ extern int pal_serial_init(void); extern void pal_serial_putc(char); @@ -100,12 +100,12 @@ extern void pal_i386_init(void); extern void pal_set_signal_delivery(thread_t); /* Get values for cr0..4 */ -extern void pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2, - pal_cr_t *cr3, pal_cr_t *cr4 ); +extern void pal_get_control_registers( pal_cr_t *cr0, pal_cr_t *cr2, + pal_cr_t *cr3, pal_cr_t *cr4 ); /* Debug hook invoked in the page-fault path */ -extern void pal_dbg_page_fault( thread_t thread, user_addr_t vadddr, - kern_return_t kr ); +extern void pal_dbg_page_fault( thread_t thread, user_addr_t vadddr, + kern_return_t kr ); /* Set a task's name in the platform kernel debugger */ extern void pal_dbg_set_task_name( task_t task ); @@ -127,7 +127,7 @@ extern void pal_get_kern_regs( x86_saved_state_t *state ); /* * Platform-specific hlt/sti. - */ + */ extern void pal_hlt(void); extern void pal_sti(void); extern void pal_cli(void); @@ -148,25 +148,26 @@ void pal_preemption_assert(void); extern boolean_t virtualized; #define PAL_VIRTUALIZED_PROPERTY_VALUE 4 - + /* Allow for tricky IOKit property matching */ #define PAL_AICPM_PROPERTY_NAME "intel_cpupm_matching" -static inline void +static inline void pal_get_resource_property(const char **property_name, int *property_value) { *property_name = PAL_AICPM_PROPERTY_NAME; *property_value = PAL_AICPM_PROPERTY_VALUE; - if (virtualized) + if (virtualized) { *property_value = PAL_VIRTUALIZED_PROPERTY_VALUE; + } } /* assembly function to update TSC / timebase info */ extern void _pal_rtc_nanotime_store( - uint64_t tsc, - uint64_t nsec, - uint32_t scale, - uint32_t shift, - struct pal_rtc_nanotime *dst); + uint64_t tsc, + uint64_t nsec, + uint32_t scale, + uint32_t shift, + struct pal_rtc_nanotime *dst); /* global nanotime info */ extern struct pal_rtc_nanotime pal_rtc_nanotime_info; diff --git a/osfmk/i386/panic_hooks.c b/osfmk/i386/panic_hooks.c index 298994081..af8fa615e 100644 --- a/osfmk/i386/panic_hooks.c +++ b/osfmk/i386/panic_hooks.c @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,54 +37,58 @@ #include "pmap.h" struct panic_hook { - uint32_t magic1; - queue_chain_t chain; - thread_t thread; - panic_hook_fn_t hook_fn; - uint32_t magic2; + uint32_t magic1; + queue_chain_t chain; + thread_t thread; + panic_hook_fn_t hook_fn; + uint32_t magic2; }; typedef char check1_[sizeof(struct panic_hook) - <= sizeof(panic_hook_t) ? 1 : -1]; + <= sizeof(panic_hook_t) ? 1 : -1]; typedef char check2_[PAGE_SIZE == 4096 ? 1 : -1]; -static hw_lock_data_t panic_hooks_lock; -static queue_head_t panic_hooks; -static uint8_t panic_dump_buf[8192]; +static hw_lock_data_t panic_hooks_lock; +static queue_head_t panic_hooks; +static uint8_t panic_dump_buf[8192]; -#define PANIC_HOOK_MAGIC1 0x4A1C400C -#define PANIC_HOOK_MAGIC2 0xC004C1A4 +#define PANIC_HOOK_MAGIC1 0x4A1C400C +#define PANIC_HOOK_MAGIC2 0xC004C1A4 -void panic_hooks_init(void) +void +panic_hooks_init(void) { hw_lock_init(&panic_hooks_lock); queue_init(&panic_hooks); } -void panic_hook(panic_hook_t *hook_, panic_hook_fn_t hook_fn) +void +panic_hook(panic_hook_t *hook_, panic_hook_fn_t hook_fn) { struct panic_hook *hook = (struct panic_hook *)hook_; - hook->magic1 = PANIC_HOOK_MAGIC1; - hook->magic2 = PANIC_HOOK_MAGIC2; - hook->hook_fn = hook_fn; - hook->thread = current_thread(); + hook->magic1 = PANIC_HOOK_MAGIC1; + hook->magic2 = PANIC_HOOK_MAGIC2; + hook->hook_fn = hook_fn; + hook->thread = current_thread(); - hw_lock_lock(&panic_hooks_lock); + hw_lock_lock(&panic_hooks_lock, LCK_GRP_NULL); queue_enter(&panic_hooks, hook, struct panic_hook *, chain); hw_lock_unlock(&panic_hooks_lock); } -void panic_unhook(panic_hook_t *hook_) +void +panic_unhook(panic_hook_t *hook_) { struct panic_hook *hook = (struct panic_hook *)hook_; - hw_lock_lock(&panic_hooks_lock); + hw_lock_lock(&panic_hooks_lock, LCK_GRP_NULL); queue_remove(&panic_hooks, hook, struct panic_hook *, chain); hw_lock_unlock(&panic_hooks_lock); } -void panic_check_hook(void) +void +panic_check_hook(void) { struct panic_hook *hook; thread_t thread = current_thread(); @@ -92,12 +96,13 @@ void panic_check_hook(void) queue_iterate(&panic_hooks, hook, struct panic_hook *, chain) { if (++count > 1024 - || !kvtophys((vm_offset_t)hook) - || !kvtophys((vm_offset_t)hook + sizeof (*hook) - 1) - || hook->magic1 != PANIC_HOOK_MAGIC1 - || hook->magic2 != PANIC_HOOK_MAGIC2 - || !kvtophys((vm_offset_t)hook->hook_fn)) + || !kvtophys((vm_offset_t)hook) + || !kvtophys((vm_offset_t)hook + sizeof(*hook) - 1) + || hook->magic1 != PANIC_HOOK_MAGIC1 + || hook->magic2 != PANIC_HOOK_MAGIC2 + || !kvtophys((vm_offset_t)hook->hook_fn)) { return; + } if (hook->thread == thread) { hook->hook_fn((panic_hook_t *)hook); @@ -114,26 +119,28 @@ void panic_check_hook(void) * Remember the debug buffer isn't very big so don't try and dump too * much. */ -void panic_dump_mem(const void *addr, int len) +void +panic_dump_mem(const void *addr, int len) { void *scratch = panic_dump_buf + 4096; for (; len > 0; addr = (const uint8_t *)addr + PAGE_SIZE, len -= PAGE_SIZE) { - if (!kvtophys((vm_offset_t)addr)) + if (!kvtophys((vm_offset_t)addr)) { continue; + } // 4095 is multiple of 3 -- see below int n = WKdm_compress_new((const WK_word *)addr, (WK_word *)(void *)panic_dump_buf, - scratch, 4095); + scratch, 4095); - if (n == -1) + if (n == -1) { return; // Give up - + } kdb_log("%p: ", addr); // Dump out base64 static char base64_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz0123456789+/"; + "abcdefghijklmnopqrstuvwxyz0123456789+/"; // Pad to multiple of 3 switch (n % 3) { @@ -167,37 +174,43 @@ void panic_dump_mem(const void *addr, int len) } } -boolean_t panic_phys_range_before(const void *addr, uint64_t *pphys, - panic_phys_range_t *range) +boolean_t +panic_phys_range_before(const void *addr, uint64_t *pphys, + panic_phys_range_t *range) { *pphys = kvtophys((vm_offset_t)addr); const boot_args *args = PE_state.bootArgs; - if (!kvtophys((vm_offset_t)args)) + if (!kvtophys((vm_offset_t)args)) { return FALSE; + } const EfiMemoryRange *r = PHYSMAP_PTOV((uintptr_t)args->MemoryMap), *closest = NULL; const uint32_t size = args->MemoryMapDescriptorSize; const uint32_t count = args->MemoryMapSize / size; - if (count > 1024) // Sanity check + if (count > 1024) { // Sanity check return FALSE; + } for (uint32_t i = 0; i < count; ++i, r = (const EfiMemoryRange *)(const void *)((const uint8_t *)r + size)) { - if (r->PhysicalStart + r->NumberOfPages * PAGE_SIZE > *pphys) + if (r->PhysicalStart + r->NumberOfPages * PAGE_SIZE > *pphys) { continue; + } - if (!closest || r->PhysicalStart > closest->PhysicalStart) + if (!closest || r->PhysicalStart > closest->PhysicalStart) { closest = r; + } } - if (!closest) + if (!closest) { return FALSE; + } - range->type = closest->Type; - range->phys_start = closest->PhysicalStart; - range->len = closest->NumberOfPages * PAGE_SIZE; + range->type = closest->Type; + range->phys_start = closest->PhysicalStart; + range->len = closest->NumberOfPages * PAGE_SIZE; return TRUE; } diff --git a/osfmk/i386/panic_hooks.h b/osfmk/i386/panic_hooks.h index 10b38e575..6a6cd5c6e 100644 --- a/osfmk/i386/panic_hooks.h +++ b/osfmk/i386/panic_hooks.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,7 +35,7 @@ #include typedef struct { - uint64_t opaque[6]; + uint64_t opaque[6]; } panic_hook_t; typedef void (*panic_hook_fn_t)(panic_hook_t *); @@ -53,8 +53,8 @@ typedef struct panic_phys_range { uint64_t len; } panic_phys_range_t; -boolean_t panic_phys_range_before(const void *addr, uint64_t *pphys, - panic_phys_range_t *range); +boolean_t panic_phys_range_before(const void *addr, uint64_t *pphys, + panic_phys_range_t *range); #endif // XNU_KERNEL_PRIVATE diff --git a/osfmk/i386/pcb.c b/osfmk/i386/pcb.c index 9f1471f36..fe5d56b8e 100644 --- a/osfmk/i386/pcb.c +++ b/osfmk/i386/pcb.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -93,6 +93,7 @@ #include #include #include /* LAPIC_PMC_SWI_VECTOR */ +#include #if HYPERVISOR #include @@ -102,36 +103,39 @@ * Maps state flavor to number of words in the state: */ unsigned int _MachineStateCount[] = { - [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT, - [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT, - [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT, - [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT, - [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT, - [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT, - [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT, - [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT, - [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT, - [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT, - [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT, - [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT, - [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT, - [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT, - [x86_AVX_STATE] = x86_AVX_STATE_COUNT, + [x86_THREAD_STATE32] = x86_THREAD_STATE32_COUNT, + [x86_THREAD_STATE64] = x86_THREAD_STATE64_COUNT, + [x86_THREAD_FULL_STATE64] = x86_THREAD_FULL_STATE64_COUNT, + [x86_THREAD_STATE] = x86_THREAD_STATE_COUNT, + [x86_FLOAT_STATE32] = x86_FLOAT_STATE32_COUNT, + [x86_FLOAT_STATE64] = x86_FLOAT_STATE64_COUNT, + [x86_FLOAT_STATE] = x86_FLOAT_STATE_COUNT, + [x86_EXCEPTION_STATE32] = x86_EXCEPTION_STATE32_COUNT, + [x86_EXCEPTION_STATE64] = x86_EXCEPTION_STATE64_COUNT, + [x86_EXCEPTION_STATE] = x86_EXCEPTION_STATE_COUNT, + [x86_DEBUG_STATE32] = x86_DEBUG_STATE32_COUNT, + [x86_DEBUG_STATE64] = x86_DEBUG_STATE64_COUNT, + [x86_DEBUG_STATE] = x86_DEBUG_STATE_COUNT, + [x86_AVX_STATE32] = x86_AVX_STATE32_COUNT, + [x86_AVX_STATE64] = x86_AVX_STATE64_COUNT, + [x86_AVX_STATE] = x86_AVX_STATE_COUNT, #if !defined(RC_HIDE_XNU_J137) - [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT, - [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT, - [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT, + [x86_AVX512_STATE32] = x86_AVX512_STATE32_COUNT, + [x86_AVX512_STATE64] = x86_AVX512_STATE64_COUNT, + [x86_AVX512_STATE] = x86_AVX512_STATE_COUNT, #endif /* not RC_HIDE_XNU_J137 */ }; -zone_t iss_zone; /* zone for saved_state area */ -zone_t ids_zone; /* zone for debug_state area */ +zone_t iss_zone; /* zone for saved_state area */ +zone_t ids_zone; /* zone for debug_state area */ + +extern int allow_64bit_proc_LDT_ops; /* Forward */ -extern void Thread_continue(void); -extern void Load_context( - thread_t thread) __attribute__((noreturn)); +extern void Thread_continue(void); +extern void Load_context( + thread_t thread) __attribute__((noreturn)); static void get_exception_state32(thread_t thread, x86_exception_state32_t *es); @@ -143,23 +147,25 @@ static void get_thread_state32(thread_t thread, x86_thread_state32_t *ts); static void -get_thread_state64(thread_t thread, x86_thread_state64_t *ts); +get_thread_state64(thread_t thread, void *ts, boolean_t full); static int set_thread_state32(thread_t thread, x86_thread_state32_t *ts); static int -set_thread_state64(thread_t thread, x86_thread_state64_t *ts); +set_thread_state64(thread_t thread, void *ts, boolean_t full); #if HYPERVISOR static inline void ml_hv_cswitch(thread_t old, thread_t new) { - if (old->hv_thread_target) + if (old->hv_thread_target) { hv_callbacks.preempt(old->hv_thread_target); + } - if (new->hv_thread_target) - hv_callbacks.dispatch(new->hv_thread_target); + if (new->hv_thread_target) { + hv_callbacks.dispatch(new->hv_thread_target); + } } #endif @@ -179,21 +185,26 @@ dr7d_is_valid(uint32_t *dr7d) * If the DE bit is set in CR4, R/W0-3 can be pattern * "10B" to indicate i/o reads and write */ - if (!(get_cr4() & CR4_DE)) - for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 4; - i++, mask1 <<= 4, mask2 <<= 4) - if ((*dr7d & mask1) == mask2) - return (FALSE); + if (!(get_cr4() & CR4_DE)) { + for (i = 0, mask1 = 0x3 << 16, mask2 = 0x2 << 16; i < 4; + i++, mask1 <<= 4, mask2 <<= 4) { + if ((*dr7d & mask1) == mask2) { + return FALSE; + } + } + } /* * if we are doing an instruction execution break (indicated * by r/w[x] being "00B"), then the len[x] must also be set * to "00B" */ - for (i = 0; i < 4; i++) - if (((((*dr7d >> (16 + i*4))) & 0x3) == 0) && - ((((*dr7d >> (18 + i*4))) & 0x3) != 0)) - return (FALSE); + for (i = 0; i < 4; i++) { + if (((((*dr7d >> (16 + i * 4))) & 0x3) == 0) && + ((((*dr7d >> (18 + i * 4))) & 0x3) != 0)) { + return FALSE; + } + } /* * Intel docs have these bits fixed. @@ -208,28 +219,33 @@ dr7d_is_valid(uint32_t *dr7d) * We don't allow anything to set the global breakpoints. */ - if (*dr7d & 0x2) - return (FALSE); + if (*dr7d & 0x2) { + return FALSE; + } - if (*dr7d & (0x2<<2)) - return (FALSE); + if (*dr7d & (0x2 << 2)) { + return FALSE; + } - if (*dr7d & (0x2<<4)) - return (FALSE); + if (*dr7d & (0x2 << 4)) { + return FALSE; + } - if (*dr7d & (0x2<<6)) - return (FALSE); + if (*dr7d & (0x2 << 6)) { + return FALSE; + } - return (TRUE); + return TRUE; } extern void set_64bit_debug_regs(x86_debug_state64_t *ds); boolean_t -debug_state_is_valid32(x86_debug_state32_t *ds) +debug_state_is_valid32(x86_debug_state32_t *ds) { - if (!dr7d_is_valid(&ds->dr7)) + if (!dr7d_is_valid(&ds->dr7)) { return FALSE; + } return TRUE; } @@ -237,28 +253,37 @@ debug_state_is_valid32(x86_debug_state32_t *ds) boolean_t debug_state_is_valid64(x86_debug_state64_t *ds) { - if (!dr7d_is_valid((uint32_t *)&ds->dr7)) + if (!dr7d_is_valid((uint32_t *)&ds->dr7)) { return FALSE; + } /* * Don't allow the user to set debug addresses above their max * value */ - if (ds->dr7 & 0x1) - if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) + if (ds->dr7 & 0x1) { + if (ds->dr0 >= VM_MAX_PAGE_ADDRESS) { return FALSE; + } + } - if (ds->dr7 & (0x1<<2)) - if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) + if (ds->dr7 & (0x1 << 2)) { + if (ds->dr1 >= VM_MAX_PAGE_ADDRESS) { return FALSE; + } + } - if (ds->dr7 & (0x1<<4)) - if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) + if (ds->dr7 & (0x1 << 4)) { + if (ds->dr2 >= VM_MAX_PAGE_ADDRESS) { return FALSE; + } + } - if (ds->dr7 & (0x1<<6)) - if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) + if (ds->dr7 & (0x1 << 6)) { + if (ds->dr3 >= VM_MAX_PAGE_ADDRESS) { return FALSE; + } + } /* For x86-64, we must ensure the upper 32-bits of DR7 are clear */ ds->dr7 &= 0xffffffffULL; @@ -283,7 +308,7 @@ set_debug_state32(thread_t thread, x86_debug_state32_t *ds) new_ids = zalloc(ids_zone); bzero(new_ids, sizeof *new_ids); - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); /* make sure it wasn't already alloc()'d elsewhere */ if (pcb->ids == NULL) { pcb->ids = new_ids; @@ -297,7 +322,7 @@ set_debug_state32(thread_t thread, x86_debug_state32_t *ds) copy_debug_state32(ds, pcb->ids, FALSE); - return (KERN_SUCCESS); + return KERN_SUCCESS; } static kern_return_t @@ -319,11 +344,11 @@ set_debug_state64(thread_t thread, x86_debug_state64_t *ds) #if HYPERVISOR if (thread->hv_thread_target) { hv_callbacks.volatile_state(thread->hv_thread_target, - HV_DEBUG_STATE); + HV_DEBUG_STATE); } #endif - simple_lock(&pcb->lock); + simple_lock(&pcb->lock, LCK_GRP_NULL); /* make sure it wasn't already alloc()'d elsewhere */ if (pcb->ids == NULL) { pcb->ids = new_ids; @@ -336,7 +361,7 @@ set_debug_state64(thread_t thread, x86_debug_state64_t *ds) copy_debug_state64(ds, pcb->ids, FALSE); - return (KERN_SUCCESS); + return KERN_SUCCESS; } static void @@ -348,8 +373,9 @@ get_debug_state32(thread_t thread, x86_debug_state32_t *ds) if (saved_state) { copy_debug_state32(saved_state, ds, TRUE); - } else + } else { bzero(ds, sizeof *ds); + } } static void @@ -361,8 +387,9 @@ get_debug_state64(thread_t thread, x86_debug_state64_t *ds) if (saved_state) { copy_debug_state64(saved_state, ds, TRUE); - } else + } else { bzero(ds, sizeof *ds); + } } /* @@ -385,14 +412,16 @@ consider_machine_adjust(void) */ void machine_load_context( - thread_t new) + thread_t new) { new->machine.specFlags |= OnProc; act_machine_switch_pcb(NULL, new); Load_context(new); } -static inline void pmap_switch_context(thread_t ot, thread_t nt, int cnum) { +static inline void +pmap_switch_context(thread_t ot, thread_t nt, int cnum) +{ pmap_assert(ml_get_interrupts_enabled() == FALSE); vm_map_t nmap = nt->map, omap = ot->map; if ((omap != nmap) || (nmap->pmap->pagezero_accessible)) { @@ -408,9 +437,9 @@ static inline void pmap_switch_context(thread_t ot, thread_t nt, int cnum) { */ thread_t machine_switch_context( - thread_t old, - thread_continue_t continuation, - thread_t new) + thread_t old, + thread_continue_t continuation, + thread_t new) { assert(current_cpu_datap()->cpu_active_stack == old->kernel_stack); @@ -430,7 +459,7 @@ machine_switch_context( * Monitor the stack depth and report new max, * not worrying about races. */ - vm_offset_t depth = current_stack_depth(); + vm_offset_t depth = current_stack_depth(); if (depth > kernel_stack_depth_max) { kernel_stack_depth_max = depth; KERNEL_DEBUG_CONSTANT( @@ -453,21 +482,21 @@ machine_switch_context( ml_hv_cswitch(old, new); #endif - return(Switch_context(old, continuation, new)); + return Switch_context(old, continuation, new); } -thread_t +thread_t machine_processor_shutdown( - thread_t thread, - void (*doshutdown)(processor_t), - processor_t processor) + thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor) { #if CONFIG_VMX vmx_suspend(); #endif fpu_switch_context(thread, NULL); pmap_switch_context(thread, processor->idle_thread, cpu_number()); - return(Shutdown_context(thread, doshutdown, processor)); + return Shutdown_context(thread, doshutdown, processor); } @@ -479,16 +508,17 @@ kern_return_t machine_thread_state_initialize( thread_t thread) { - /* - * If there's an fpu save area, free it. - * The initialized state will then be lazily faulted-in, if required. - * And if we're target, re-arm the no-fpu trap. - */ + /* + * If there's an fpu save area, free it. + * The initialized state will then be lazily faulted-in, if required. + * And if we're target, re-arm the no-fpu trap. + */ if (thread->machine.ifps) { (void) fpu_set_fxstate(thread, NULL, x86_FLOAT_STATE64); - if (thread == current_thread()) + if (thread == current_thread()) { clear_fpu(); + } } if (thread->machine.ids) { @@ -496,7 +526,7 @@ machine_thread_state_initialize( thread->machine.ids = NULL; } - return KERN_SUCCESS; + return KERN_SUCCESS; } uint32_t @@ -530,38 +560,38 @@ get_eflags_exportmask(void) * for either 32bit or 64bit tasks */ - + static void get_exception_state64(thread_t thread, x86_exception_state64_t *es) { - x86_saved_state64_t *saved_state; + x86_saved_state64_t *saved_state; - saved_state = USER_REGS64(thread); + saved_state = USER_REGS64(thread); es->trapno = saved_state->isf.trapno; es->cpu = saved_state->isf.cpu; es->err = (typeof(es->err))saved_state->isf.err; es->faultvaddr = saved_state->cr2; -} +} static void get_exception_state32(thread_t thread, x86_exception_state32_t *es) { - x86_saved_state32_t *saved_state; + x86_saved_state32_t *saved_state; - saved_state = USER_REGS32(thread); + saved_state = USER_REGS32(thread); es->trapno = saved_state->trapno; es->cpu = saved_state->cpu; es->err = saved_state->err; es->faultvaddr = saved_state->cr2; -} +} static int set_thread_state32(thread_t thread, x86_thread_state32_t *ts) { - x86_saved_state32_t *saved_state; + x86_saved_state32_t *saved_state; pal_register_cache_state(thread, DIRTY); @@ -583,15 +613,16 @@ set_thread_state32(thread_t thread, x86_thread_state32_t *ts) /* Set GS to CTHREAD only if's been established */ ts->gs = thread->machine.cthread_self ? USER_CTHREAD : NULL_SEG; - + /* Check segment selectors are safe */ if (!valid_user_segment_selectors(ts->cs, - ts->ss, - ts->ds, - ts->es, - ts->fs, - ts->gs)) - return(KERN_INVALID_ARGUMENT); + ts->ss, + ts->ds, + ts->es, + ts->fs, + ts->gs)) { + return KERN_INVALID_ARGUMENT; + } saved_state->eax = ts->eax; saved_state->ebx = ts->ebx; @@ -615,24 +646,33 @@ set_thread_state32(thread_t thread, x86_thread_state32_t *ts) * ensure that the user returns via iret * - which is signaled thusly: */ - if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) + if ((saved_state->efl & EFL_TF) && saved_state->cs == SYSENTER_CS) { saved_state->cs = SYSENTER_TF_CS; + } - return(KERN_SUCCESS); + return KERN_SUCCESS; } static int -set_thread_state64(thread_t thread, x86_thread_state64_t *ts) +set_thread_state64(thread_t thread, void *state, int full) { - x86_saved_state64_t *saved_state; + x86_thread_state64_t *ts; + x86_saved_state64_t *saved_state; + + if (full == TRUE) { + ts = &((x86_thread_full_state64_t *)state)->ss64; + } else { + ts = (x86_thread_state64_t *)state; + } pal_register_cache_state(thread, DIRTY); saved_state = USER_REGS64(thread); if (!IS_USERADDR64_CANONICAL(ts->rsp) || - !IS_USERADDR64_CANONICAL(ts->rip)) - return(KERN_INVALID_ARGUMENT); + !IS_USERADDR64_CANONICAL(ts->rip)) { + return KERN_INVALID_ARGUMENT; + } saved_state->r8 = ts->r8; saved_state->r9 = ts->r9; @@ -652,11 +692,22 @@ set_thread_state64(thread_t thread, x86_thread_state64_t *ts) saved_state->isf.rsp = ts->rsp; saved_state->isf.rflags = (ts->rflags & ~EFL_USER_CLEAR) | EFL_USER_SET; saved_state->isf.rip = ts->rip; - saved_state->isf.cs = USER64_CS; + + if (full == FALSE) { + saved_state->isf.cs = USER64_CS; + } else { + saved_state->isf.cs = ((x86_thread_full_state64_t *)ts)->ss64.cs; + saved_state->isf.ss = ((x86_thread_full_state64_t *)ts)->ss; + saved_state->ds = (uint32_t)((x86_thread_full_state64_t *)ts)->ds; + saved_state->es = (uint32_t)((x86_thread_full_state64_t *)ts)->es; + machine_thread_set_tsd_base(thread, + ((x86_thread_full_state64_t *)ts)->gsbase); + } + saved_state->fs = (uint32_t)ts->fs; saved_state->gs = (uint32_t)ts->gs; - return(KERN_SUCCESS); + return KERN_SUCCESS; } @@ -664,7 +715,7 @@ set_thread_state64(thread_t thread, x86_thread_state64_t *ts) static void get_thread_state32(thread_t thread, x86_thread_state32_t *ts) { - x86_saved_state32_t *saved_state; + x86_saved_state32_t *saved_state; pal_register_cache_state(thread, VALID); @@ -690,9 +741,16 @@ get_thread_state32(thread_t thread, x86_thread_state32_t *ts) static void -get_thread_state64(thread_t thread, x86_thread_state64_t *ts) +get_thread_state64(thread_t thread, void *state, boolean_t full) { - x86_saved_state64_t *saved_state; + x86_thread_state64_t *ts; + x86_saved_state64_t *saved_state; + + if (full == TRUE) { + ts = &((x86_thread_full_state64_t *)state)->ss64; + } else { + ts = (x86_thread_state64_t *)state; + } pal_register_cache_state(thread, VALID); @@ -717,16 +775,25 @@ get_thread_state64(thread_t thread, x86_thread_state64_t *ts) ts->rflags = saved_state->isf.rflags; ts->rip = saved_state->isf.rip; ts->cs = saved_state->isf.cs; + + if (full == TRUE) { + ((x86_thread_full_state64_t *)state)->ds = saved_state->ds; + ((x86_thread_full_state64_t *)state)->es = saved_state->es; + ((x86_thread_full_state64_t *)state)->ss = saved_state->isf.ss; + ((x86_thread_full_state64_t *)state)->gsbase = + thread->machine.cthread_self; + } + ts->fs = saved_state->fs; ts->gs = saved_state->gs; } kern_return_t machine_thread_state_convert_to_user( - __unused thread_t thread, - __unused thread_flavor_t flavor, - __unused thread_state_t tstate, - __unused mach_msg_type_number_t *count) + __unused thread_t thread, + __unused thread_flavor_t flavor, + __unused thread_state_t tstate, + __unused mach_msg_type_number_t *count) { // No conversion to userspace representation on this platform return KERN_SUCCESS; @@ -734,10 +801,10 @@ machine_thread_state_convert_to_user( kern_return_t machine_thread_state_convert_from_user( - __unused thread_t thread, - __unused thread_flavor_t flavor, - __unused thread_state_t tstate, - __unused mach_msg_type_number_t count) + __unused thread_t thread, + __unused thread_flavor_t flavor, + __unused thread_state_t tstate, + __unused mach_msg_type_number_t count) { // No conversion from userspace representation on this platform return KERN_SUCCESS; @@ -745,8 +812,8 @@ machine_thread_state_convert_from_user( kern_return_t machine_thread_siguctx_pointer_convert_to_user( - __unused thread_t thread, - __unused user_addr_t *uctxp) + __unused thread_t thread, + __unused user_addr_t *uctxp) { // No conversion to userspace representation on this platform return KERN_SUCCESS; @@ -754,9 +821,9 @@ machine_thread_siguctx_pointer_convert_to_user( kern_return_t machine_thread_function_pointers_convert_from_user( - __unused thread_t thread, - __unused user_addr_t *fptrs, - __unused uint32_t count) + __unused thread_t thread, + __unused user_addr_t *fptrs, + __unused uint32_t count) { // No conversion from userspace representation on this platform return KERN_SUCCESS; @@ -778,25 +845,37 @@ machine_thread_set_state( switch (flavor) { case x86_SAVED_STATE32: { - x86_saved_state32_t *state; - x86_saved_state32_t *saved_state; + x86_saved_state32_t *state; + x86_saved_state32_t *saved_state; - if (count < x86_SAVED_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); - - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (count < x86_SAVED_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_saved_state32_t *) tstate; + /* + * Allow a thread in a 64-bit process to set + * 32-bit state iff the code segment originates + * in the LDT (the implication is that only + * 32-bit code segments are allowed there, so + * setting 32-bit state implies a switch to + * compatibility mode on resume-to-user). + */ + if (thread_is_64bit_addr(thr_act) && + thr_act->task->i386_ldt == 0) { + return KERN_INVALID_ARGUMENT; + } + /* Check segment selectors are safe */ if (!valid_user_segment_selectors(state->cs, - state->ss, - state->ds, - state->es, - state->fs, - state->gs)) + state->ss, + state->ds, + state->es, + state->fs, + state->gs)) { return KERN_INVALID_ARGUMENT; + } pal_register_cache_state(thr_act, DIRTY); @@ -822,8 +901,9 @@ machine_thread_set_state( * ensure that the user returns via iret * - which is signaled thusly: */ - if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) + if ((saved_state->efl & EFL_TF) && state->cs == SYSENTER_CS) { state->cs = SYSENTER_TF_CS; + } /* * User setting segment registers. @@ -843,14 +923,16 @@ machine_thread_set_state( case x86_SAVED_STATE64: { - x86_saved_state64_t *state; - x86_saved_state64_t *saved_state; + x86_saved_state64_t *state; + x86_saved_state64_t *saved_state; - if (count < x86_SAVED_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); + if (count < x86_SAVED_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (!thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } state = (x86_saved_state64_t *) tstate; @@ -861,13 +943,15 @@ machine_thread_set_state( * restore the segment registers--hence they are no * longer relevant for validation. */ - if (!valid_user_code_selector(state->isf.cs)) - return KERN_INVALID_ARGUMENT; - + if (!valid_user_code_selector(state->isf.cs)) { + return KERN_INVALID_ARGUMENT; + } + /* Check pc and stack are canonical addresses */ if (!IS_USERADDR64_CANONICAL(state->isf.rsp) || - !IS_USERADDR64_CANONICAL(state->isf.rip)) + !IS_USERADDR64_CANONICAL(state->isf.rip)) { return KERN_INVALID_ARGUMENT; + } pal_register_cache_state(thr_act, DIRTY); @@ -915,37 +999,42 @@ machine_thread_set_state( #if !defined(RC_HIDE_XNU_J137) case x86_AVX512_STATE32: #endif /* not RC_HIDE_XNU_J137 */ - { - if (count != _MachineStateCount[flavor]) - return(KERN_INVALID_ARGUMENT); + { + if (count != _MachineStateCount[flavor]) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } - return fpu_set_fxstate(thr_act, tstate, flavor); - } + return fpu_set_fxstate(thr_act, tstate, flavor); + } case x86_FLOAT_STATE64: case x86_AVX_STATE64: #if !defined(RC_HIDE_XNU_J137) case x86_AVX512_STATE64: #endif /* not RC_HIDE_XNU_J137 */ - { - if (count != _MachineStateCount[flavor]) - return(KERN_INVALID_ARGUMENT); + { + if (count != _MachineStateCount[flavor]) { + return KERN_INVALID_ARGUMENT; + } - if (!thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } - return fpu_set_fxstate(thr_act, tstate, flavor); - } + return fpu_set_fxstate(thr_act, tstate, flavor); + } case x86_FLOAT_STATE: - { + { x86_float_state_t *state; - if (count != x86_FLOAT_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (count != x86_FLOAT_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_float_state_t *)tstate; if (state->fsh.flavor == x86_FLOAT_STATE64 && state->fsh.count == x86_FLOAT_STATE64_COUNT && @@ -954,91 +1043,120 @@ machine_thread_set_state( } if (state->fsh.flavor == x86_FLOAT_STATE32 && state->fsh.count == x86_FLOAT_STATE32_COUNT && !thread_is_64bit_addr(thr_act)) { - return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); + return fpu_set_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); } - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } case x86_AVX_STATE: #if !defined(RC_HIDE_XNU_J137) case x86_AVX512_STATE: #endif - { - x86_avx_state_t *state; - - if (count != _MachineStateCount[flavor]) - return(KERN_INVALID_ARGUMENT); + { + x86_avx_state_t *state; + + if (count != _MachineStateCount[flavor]) { + return KERN_INVALID_ARGUMENT; + } + + state = (x86_avx_state_t *)tstate; + /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */ + /* 64-bit flavor? */ + if (state->ash.flavor == (flavor - 1) && + state->ash.count == _MachineStateCount[flavor - 1] && + thread_is_64bit_addr(thr_act)) { + return fpu_set_fxstate(thr_act, + (thread_state_t)&state->ufs.as64, + flavor - 1); + } + /* 32-bit flavor? */ + if (state->ash.flavor == (flavor - 2) && + state->ash.count == _MachineStateCount[flavor - 2] && + !thread_is_64bit_addr(thr_act)) { + return fpu_set_fxstate(thr_act, + (thread_state_t)&state->ufs.as32, + flavor - 2); + } + return KERN_INVALID_ARGUMENT; + } - state = (x86_avx_state_t *)tstate; - /* Flavors are defined to have sequential values: 32-bit, 64-bit, non-specific */ - /* 64-bit flavor? */ - if (state->ash.flavor == (flavor - 1) && - state->ash.count == _MachineStateCount[flavor - 1] && - thread_is_64bit_addr(thr_act)) { - return fpu_set_fxstate(thr_act, - (thread_state_t)&state->ufs.as64, - flavor - 1); + case x86_THREAD_STATE32: + { + if (count != x86_THREAD_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; } - /* 32-bit flavor? */ - if (state->ash.flavor == (flavor - 2) && - state->ash.count == _MachineStateCount[flavor - 2] && - !thread_is_64bit_addr(thr_act)) { - return fpu_set_fxstate(thr_act, - (thread_state_t)&state->ufs.as32, - flavor - 2); + + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; } - return(KERN_INVALID_ARGUMENT); + + return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate); } - case x86_THREAD_STATE32: + case x86_THREAD_STATE64: { - if (count != x86_THREAD_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); + if (count != x86_THREAD_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } - return set_thread_state32(thr_act, (x86_thread_state32_t *)tstate); + return set_thread_state64(thr_act, tstate, FALSE); } - case x86_THREAD_STATE64: + case x86_THREAD_FULL_STATE64: { - if (count != x86_THREAD_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); + if (!allow_64bit_proc_LDT_ops) { + return KERN_INVALID_ARGUMENT; + } - if (!thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (count != x86_THREAD_FULL_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - return set_thread_state64(thr_act, (x86_thread_state64_t *)tstate); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } + return set_thread_state64(thr_act, tstate, TRUE); } + case x86_THREAD_STATE: { x86_thread_state_t *state; - if (count != x86_THREAD_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (count != x86_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_thread_state_t *)tstate; if (state->tsh.flavor == x86_THREAD_STATE64 && state->tsh.count == x86_THREAD_STATE64_COUNT && thread_is_64bit_addr(thr_act)) { - return set_thread_state64(thr_act, &state->uts.ts64); + return set_thread_state64(thr_act, &state->uts.ts64, FALSE); + } else if (state->tsh.flavor == x86_THREAD_FULL_STATE64 && + state->tsh.count == x86_THREAD_FULL_STATE64_COUNT && + thread_is_64bit_addr(thr_act)) { + return set_thread_state64(thr_act, &state->uts.ts64, TRUE); } else if (state->tsh.flavor == x86_THREAD_STATE32 && - state->tsh.count == x86_THREAD_STATE32_COUNT && - !thread_is_64bit_addr(thr_act)) { + state->tsh.count == x86_THREAD_STATE32_COUNT && + !thread_is_64bit_addr(thr_act)) { return set_thread_state32(thr_act, &state->uts.ts32); - } else - return(KERN_INVALID_ARGUMENT); + } else { + return KERN_INVALID_ARGUMENT; + } } case x86_DEBUG_STATE32: { x86_debug_state32_t *state; kern_return_t ret; - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } state = (x86_debug_state32_t *)tstate; @@ -1051,8 +1169,9 @@ machine_thread_set_state( x86_debug_state64_t *state; kern_return_t ret; - if (!thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } state = (x86_debug_state64_t *)tstate; @@ -1065,28 +1184,27 @@ machine_thread_set_state( x86_debug_state_t *state; kern_return_t ret = KERN_INVALID_ARGUMENT; - if (count != x86_DEBUG_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count != x86_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_debug_state_t *)tstate; if (state->dsh.flavor == x86_DEBUG_STATE64 && - state->dsh.count == x86_DEBUG_STATE64_COUNT && - thread_is_64bit_addr(thr_act)) { + state->dsh.count == x86_DEBUG_STATE64_COUNT && + thread_is_64bit_addr(thr_act)) { ret = set_debug_state64(thr_act, &state->uds.ds64); - } - else - if (state->dsh.flavor == x86_DEBUG_STATE32 && - state->dsh.count == x86_DEBUG_STATE32_COUNT && - !thread_is_64bit_addr(thr_act)) { - ret = set_debug_state32(thr_act, &state->uds.ds32); + } else if (state->dsh.flavor == x86_DEBUG_STATE32 && + state->dsh.count == x86_DEBUG_STATE32_COUNT && + !thread_is_64bit_addr(thr_act)) { + ret = set_debug_state32(thr_act, &state->uds.ds32); } return ret; } default: - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return(KERN_SUCCESS); + return KERN_SUCCESS; } @@ -1104,42 +1222,43 @@ machine_thread_get_state( thread_state_t tstate, mach_msg_type_number_t *count) { + switch (flavor) { + case THREAD_STATE_FLAVOR_LIST: + { + if (*count < 3) { + return KERN_INVALID_ARGUMENT; + } - switch (flavor) { - - case THREAD_STATE_FLAVOR_LIST: - { - if (*count < 3) - return (KERN_INVALID_ARGUMENT); - - tstate[0] = i386_THREAD_STATE; + tstate[0] = i386_THREAD_STATE; tstate[1] = i386_FLOAT_STATE; tstate[2] = i386_EXCEPTION_STATE; *count = 3; break; - } + } - case THREAD_STATE_FLAVOR_LIST_NEW: - { - if (*count < 4) - return (KERN_INVALID_ARGUMENT); + case THREAD_STATE_FLAVOR_LIST_NEW: + { + if (*count < 4) { + return KERN_INVALID_ARGUMENT; + } - tstate[0] = x86_THREAD_STATE; + tstate[0] = x86_THREAD_STATE; tstate[1] = x86_FLOAT_STATE; tstate[2] = x86_EXCEPTION_STATE; tstate[3] = x86_DEBUG_STATE; *count = 4; break; - } + } - case THREAD_STATE_FLAVOR_LIST_10_9: - { - if (*count < 5) - return (KERN_INVALID_ARGUMENT); + case THREAD_STATE_FLAVOR_LIST_10_9: + { + if (*count < 5) { + return KERN_INVALID_ARGUMENT; + } - tstate[0] = x86_THREAD_STATE; + tstate[0] = x86_THREAD_STATE; tstate[1] = x86_FLOAT_STATE; tstate[2] = x86_EXCEPTION_STATE; tstate[3] = x86_DEBUG_STATE; @@ -1147,15 +1266,16 @@ machine_thread_get_state( *count = 5; break; - } + } #if !defined(RC_HIDE_XNU_J137) - case THREAD_STATE_FLAVOR_LIST_10_13: - { - if (*count < 6) - return (KERN_INVALID_ARGUMENT); + case THREAD_STATE_FLAVOR_LIST_10_13: + { + if (*count < 6) { + return KERN_INVALID_ARGUMENT; + } - tstate[0] = x86_THREAD_STATE; + tstate[0] = x86_THREAD_STATE; tstate[1] = x86_FLOAT_STATE; tstate[2] = x86_EXCEPTION_STATE; tstate[3] = x86_DEBUG_STATE; @@ -1164,19 +1284,21 @@ machine_thread_get_state( *count = 6; break; - } + } #endif - case x86_SAVED_STATE32: - { - x86_saved_state32_t *state; - x86_saved_state32_t *saved_state; + case x86_SAVED_STATE32: + { + x86_saved_state32_t *state; + x86_saved_state32_t *saved_state; - if (*count < x86_SAVED_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_SAVED_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } state = (x86_saved_state32_t *) tstate; saved_state = USER_REGS32(thr_act); @@ -1192,18 +1314,20 @@ machine_thread_get_state( *count = x86_SAVED_STATE32_COUNT; break; - } + } - case x86_SAVED_STATE64: - { - x86_saved_state64_t *state; - x86_saved_state64_t *saved_state; + case x86_SAVED_STATE64: + { + x86_saved_state64_t *state; + x86_saved_state64_t *saved_state; - if (*count < x86_SAVED_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_SAVED_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (!thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } state = (x86_saved_state64_t *)tstate; saved_state = USER_REGS64(thr_act); @@ -1212,164 +1336,201 @@ machine_thread_get_state( * First, copy everything: */ *state = *saved_state; + state->ds = saved_state->ds & 0xffff; + state->es = saved_state->es & 0xffff; state->fs = saved_state->fs & 0xffff; state->gs = saved_state->gs & 0xffff; *count = x86_SAVED_STATE64_COUNT; break; - } + } - case x86_FLOAT_STATE32: - { - if (*count < x86_FLOAT_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); + case x86_FLOAT_STATE32: + { + if (*count < x86_FLOAT_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } *count = x86_FLOAT_STATE32_COUNT; return fpu_get_fxstate(thr_act, tstate, flavor); - } + } - case x86_FLOAT_STATE64: - { - if (*count < x86_FLOAT_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); + case x86_FLOAT_STATE64: + { + if (*count < x86_FLOAT_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if ( !thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } *count = x86_FLOAT_STATE64_COUNT; return fpu_get_fxstate(thr_act, tstate, flavor); - } + } - case x86_FLOAT_STATE: - { - x86_float_state_t *state; - kern_return_t kret; + case x86_FLOAT_STATE: + { + x86_float_state_t *state; + kern_return_t kret; - if (*count < x86_FLOAT_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_FLOAT_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_float_state_t *)tstate; /* - * no need to bzero... currently + * no need to bzero... currently * x86_FLOAT_STATE64_COUNT == x86_FLOAT_STATE32_COUNT */ if (thread_is_64bit_addr(thr_act)) { - state->fsh.flavor = x86_FLOAT_STATE64; - state->fsh.count = x86_FLOAT_STATE64_COUNT; + state->fsh.flavor = x86_FLOAT_STATE64; + state->fsh.count = x86_FLOAT_STATE64_COUNT; kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs64, x86_FLOAT_STATE64); } else { - state->fsh.flavor = x86_FLOAT_STATE32; + state->fsh.flavor = x86_FLOAT_STATE32; state->fsh.count = x86_FLOAT_STATE32_COUNT; kret = fpu_get_fxstate(thr_act, (thread_state_t)&state->ufs.fs32, x86_FLOAT_STATE32); } *count = x86_FLOAT_STATE_COUNT; - return(kret); - } + return kret; + } - case x86_AVX_STATE32: + case x86_AVX_STATE32: #if !defined(RC_HIDE_XNU_J137) - case x86_AVX512_STATE32: + case x86_AVX512_STATE32: #endif - { - if (*count != _MachineStateCount[flavor]) - return(KERN_INVALID_ARGUMENT); + { + if (*count != _MachineStateCount[flavor]) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } - *count = _MachineStateCount[flavor]; + *count = _MachineStateCount[flavor]; - return fpu_get_fxstate(thr_act, tstate, flavor); - } + return fpu_get_fxstate(thr_act, tstate, flavor); + } - case x86_AVX_STATE64: + case x86_AVX_STATE64: #if !defined(RC_HIDE_XNU_J137) - case x86_AVX512_STATE64: + case x86_AVX512_STATE64: #endif - { - if (*count != _MachineStateCount[flavor]) - return(KERN_INVALID_ARGUMENT); + { + if (*count != _MachineStateCount[flavor]) { + return KERN_INVALID_ARGUMENT; + } - if ( !thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } - *count = _MachineStateCount[flavor]; + *count = _MachineStateCount[flavor]; - return fpu_get_fxstate(thr_act, tstate, flavor); - } + return fpu_get_fxstate(thr_act, tstate, flavor); + } - case x86_AVX_STATE: + case x86_AVX_STATE: #if !defined(RC_HIDE_XNU_J137) - case x86_AVX512_STATE: + case x86_AVX512_STATE: #endif - { - x86_avx_state_t *state; - thread_state_t fstate; - - if (*count < _MachineStateCount[flavor]) - return(KERN_INVALID_ARGUMENT); - - *count = _MachineStateCount[flavor]; - state = (x86_avx_state_t *)tstate; + { + x86_avx_state_t *state; + thread_state_t fstate; + + if (*count < _MachineStateCount[flavor]) { + return KERN_INVALID_ARGUMENT; + } + + *count = _MachineStateCount[flavor]; + state = (x86_avx_state_t *)tstate; + + bzero((char *)state, *count * sizeof(int)); + + if (thread_is_64bit_addr(thr_act)) { + flavor -= 1; /* 64-bit flavor */ + fstate = (thread_state_t) &state->ufs.as64; + } else { + flavor -= 2; /* 32-bit flavor */ + fstate = (thread_state_t) &state->ufs.as32; + } + state->ash.flavor = flavor; + state->ash.count = _MachineStateCount[flavor]; + + return fpu_get_fxstate(thr_act, fstate, flavor); + } - bzero((char *)state, *count * sizeof(int)); + case x86_THREAD_STATE32: + { + if (*count < x86_THREAD_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } if (thread_is_64bit_addr(thr_act)) { - flavor -= 1; /* 64-bit flavor */ - fstate = (thread_state_t) &state->ufs.as64; - } else { - flavor -= 2; /* 32-bit flavor */ - fstate = (thread_state_t) &state->ufs.as32; + return KERN_INVALID_ARGUMENT; } - state->ash.flavor = flavor; - state->ash.count = _MachineStateCount[flavor]; - - return fpu_get_fxstate(thr_act, fstate, flavor); - } - - case x86_THREAD_STATE32: - { - if (*count < x86_THREAD_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); - - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); *count = x86_THREAD_STATE32_COUNT; get_thread_state32(thr_act, (x86_thread_state32_t *)tstate); break; - } + } - case x86_THREAD_STATE64: - { - if (*count < x86_THREAD_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); + case x86_THREAD_STATE64: + { + if (*count < x86_THREAD_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if ( !thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } *count = x86_THREAD_STATE64_COUNT; - get_thread_state64(thr_act, (x86_thread_state64_t *)tstate); + get_thread_state64(thr_act, tstate, FALSE); + break; + } + + case x86_THREAD_FULL_STATE64: + { + if (!allow_64bit_proc_LDT_ops) { + return KERN_INVALID_ARGUMENT; + } + + if (*count < x86_THREAD_FULL_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } + + *count = x86_THREAD_FULL_STATE64_COUNT; + + get_thread_state64(thr_act, tstate, TRUE); break; - } + } - case x86_THREAD_STATE: - { - x86_thread_state_t *state; + case x86_THREAD_STATE: + { + x86_thread_state_t *state; - if (*count < x86_THREAD_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_thread_state_t *)tstate; @@ -1379,26 +1540,28 @@ machine_thread_get_state( state->tsh.flavor = x86_THREAD_STATE64; state->tsh.count = x86_THREAD_STATE64_COUNT; - get_thread_state64(thr_act, &state->uts.ts64); + get_thread_state64(thr_act, &state->uts.ts64, FALSE); } else { state->tsh.flavor = x86_THREAD_STATE32; state->tsh.count = x86_THREAD_STATE32_COUNT; - get_thread_state32(thr_act, &state->uts.ts32); + get_thread_state32(thr_act, &state->uts.ts32); } *count = x86_THREAD_STATE_COUNT; break; - } + } - case x86_EXCEPTION_STATE32: - { - if (*count < x86_EXCEPTION_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); + case x86_EXCEPTION_STATE32: + { + if (*count < x86_EXCEPTION_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } *count = x86_EXCEPTION_STATE32_COUNT; @@ -1409,15 +1572,17 @@ machine_thread_get_state( */ ((x86_exception_state32_t *)tstate)->cpu = 0; break; - } + } - case x86_EXCEPTION_STATE64: - { - if (*count < x86_EXCEPTION_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); + case x86_EXCEPTION_STATE64: + { + if (*count < x86_EXCEPTION_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if ( !thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } *count = x86_EXCEPTION_STATE64_COUNT; @@ -1428,14 +1593,15 @@ machine_thread_get_state( */ ((x86_exception_state64_t *)tstate)->cpu = 0; break; - } + } - case x86_EXCEPTION_STATE: - { - x86_exception_state_t *state; + case x86_EXCEPTION_STATE: + { + x86_exception_state_t *state; - if (*count < x86_EXCEPTION_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_EXCEPTION_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_exception_state_t *)tstate; @@ -1445,12 +1611,12 @@ machine_thread_get_state( state->esh.flavor = x86_EXCEPTION_STATE64; state->esh.count = x86_EXCEPTION_STATE64_COUNT; - get_exception_state64(thr_act, &state->ues.es64); + get_exception_state64(thr_act, &state->ues.es64); } else { state->esh.flavor = x86_EXCEPTION_STATE32; state->esh.count = x86_EXCEPTION_STATE32_COUNT; - get_exception_state32(thr_act, &state->ues.es32); + get_exception_state32(thr_act, &state->ues.es32); } *count = x86_EXCEPTION_STATE_COUNT; @@ -1458,11 +1624,13 @@ machine_thread_get_state( } case x86_DEBUG_STATE32: { - if (*count < x86_DEBUG_STATE32_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_DEBUG_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } get_debug_state32(thr_act, (x86_debug_state32_t *)tstate); @@ -1472,11 +1640,13 @@ machine_thread_get_state( } case x86_DEBUG_STATE64: { - if (*count < x86_DEBUG_STATE64_COUNT) - return(KERN_INVALID_ARGUMENT); - - if (!thread_is_64bit_addr(thr_act)) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_DEBUG_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } + + if (!thread_is_64bit_addr(thr_act)) { + return KERN_INVALID_ARGUMENT; + } get_debug_state64(thr_act, (x86_debug_state64_t *)tstate); @@ -1488,8 +1658,9 @@ machine_thread_get_state( { x86_debug_state_t *state; - if (*count < x86_DEBUG_STATE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < x86_DEBUG_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_debug_state_t *)tstate; @@ -1510,35 +1681,37 @@ machine_thread_get_state( break; } default: - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return(KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t machine_thread_get_kern_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t *count) + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state; + x86_saved_state_t *int_state = current_cpu_datap()->cpu_int_state; /* * This works only for an interrupted kernel thread */ - if (thread != current_thread() || int_state == NULL) + if (thread != current_thread() || int_state == NULL) { return KERN_FAILURE; + } switch (flavor) { - case x86_THREAD_STATE32: { + case x86_THREAD_STATE32: { x86_thread_state32_t *state; x86_saved_state32_t *saved_state; if (!is_saved_state32(int_state) || - *count < x86_THREAD_STATE32_COUNT) - return (KERN_INVALID_ARGUMENT); + *count < x86_THREAD_STATE32_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_thread_state32_t *) tstate; @@ -1566,15 +1739,16 @@ machine_thread_get_kern_state( *count = x86_THREAD_STATE32_COUNT; return KERN_SUCCESS; - } - - case x86_THREAD_STATE64: { - x86_thread_state64_t *state; - x86_saved_state64_t *saved_state; + } + + case x86_THREAD_STATE64: { + x86_thread_state64_t *state; + x86_saved_state64_t *saved_state; if (!is_saved_state64(int_state) || - *count < x86_THREAD_STATE64_COUNT) - return (KERN_INVALID_ARGUMENT); + *count < x86_THREAD_STATE64_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_thread_state64_t *) tstate; @@ -1607,13 +1781,14 @@ machine_thread_get_kern_state( *count = x86_THREAD_STATE64_COUNT; return KERN_SUCCESS; - } - - case x86_THREAD_STATE: { + } + + case x86_THREAD_STATE: { x86_thread_state_t *state = NULL; - if (*count < x86_THREAD_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < x86_THREAD_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } state = (x86_thread_state_t *) tstate; @@ -1679,7 +1854,7 @@ machine_thread_get_kern_state( *count = x86_THREAD_STATE_COUNT; return KERN_SUCCESS; - } + } } return KERN_FAILURE; } @@ -1732,14 +1907,14 @@ void machine_thread_init(void) { iss_zone = zinit(sizeof(x86_saved_state_t), - thread_max * sizeof(x86_saved_state_t), - THREAD_CHUNK * sizeof(x86_saved_state_t), - "x86_64 saved state"); + thread_max * sizeof(x86_saved_state_t), + THREAD_CHUNK * sizeof(x86_saved_state_t), + "x86_64 saved state"); - ids_zone = zinit(sizeof(x86_debug_state64_t), - thread_max * sizeof(x86_debug_state64_t), - THREAD_CHUNK * sizeof(x86_debug_state64_t), - "x86_64 debug state"); + ids_zone = zinit(sizeof(x86_debug_state64_t), + thread_max * sizeof(x86_debug_state64_t), + THREAD_CHUNK * sizeof(x86_debug_state64_t), + "x86_64 debug state"); fpu_module_init(); } @@ -1749,20 +1924,20 @@ machine_thread_init(void) user_addr_t get_useraddr(void) { - thread_t thr_act = current_thread(); - - if (thread_is_64bit_addr(thr_act)) { - x86_saved_state64_t *iss64; - + thread_t thr_act = current_thread(); + + if (thread_is_64bit_addr(thr_act)) { + x86_saved_state64_t *iss64; + iss64 = USER_REGS64(thr_act); - return(iss64->isf.rip); + return iss64->isf.rip; } else { - x86_saved_state32_t *iss32; + x86_saved_state32_t *iss32; iss32 = USER_REGS32(thr_act); - return(iss32->eip); + return iss32->eip; } } @@ -1776,14 +1951,14 @@ machine_stack_detach(thread_t thread) vm_offset_t stack; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH), - (uintptr_t)thread_tid(thread), thread->priority, - thread->sched_pri, 0, - 0); + (uintptr_t)thread_tid(thread), thread->priority, + thread->sched_pri, 0, + 0); stack = thread->kernel_stack; thread->kernel_stack = 0; - return (stack); + return stack; } /* @@ -1792,14 +1967,14 @@ machine_stack_detach(thread_t thread) void machine_stack_attach( - thread_t thread, - vm_offset_t stack) + thread_t thread, + vm_offset_t stack) { struct x86_kernel_state *statep; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH), - (uintptr_t)thread_tid(thread), thread->priority, - thread->sched_pri, 0, 0); + (uintptr_t)thread_tid(thread), thread->priority, + thread->sched_pri, 0, 0); assert(stack); thread->kernel_stack = stack; @@ -1825,7 +2000,7 @@ machine_stack_attach( void machine_stack_handoff(thread_t old, - thread_t new) + thread_t new) { vm_offset_t stack; @@ -1848,7 +2023,7 @@ machine_stack_handoff(thread_t old, new->kernel_stack = stack; fpu_switch_context(old, new); - + old->machine.specFlags &= ~OnProc; new->machine.specFlags |= OnProc; @@ -1894,110 +2069,113 @@ act_thread_csave(void) ic64 = (struct x86_act_context64 *)kalloc(sizeof(struct x86_act_context64)); - if (ic64 == (struct x86_act_context64 *)NULL) - return((void *)0); + if (ic64 == (struct x86_act_context64 *)NULL) { + return (void *)0; + } - val = x86_SAVED_STATE64_COUNT; + val = x86_SAVED_STATE64_COUNT; kret = machine_thread_get_state(thr_act, x86_SAVED_STATE64, - (thread_state_t) &ic64->ss, &val); + (thread_state_t) &ic64->ss, &val); if (kret != KERN_SUCCESS) { kfree(ic64, sizeof(struct x86_act_context64)); - return((void *)0); + return (void *)0; } - val = x86_FLOAT_STATE64_COUNT; + val = x86_FLOAT_STATE64_COUNT; kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE64, - (thread_state_t) &ic64->fs, &val); + (thread_state_t) &ic64->fs, &val); if (kret != KERN_SUCCESS) { kfree(ic64, sizeof(struct x86_act_context64)); - return((void *)0); + return (void *)0; } val = x86_DEBUG_STATE64_COUNT; kret = machine_thread_get_state(thr_act, - x86_DEBUG_STATE64, - (thread_state_t)&ic64->ds, - &val); + x86_DEBUG_STATE64, + (thread_state_t)&ic64->ds, + &val); if (kret != KERN_SUCCESS) { - kfree(ic64, sizeof(struct x86_act_context64)); - return((void *)0); + kfree(ic64, sizeof(struct x86_act_context64)); + return (void *)0; } - return(ic64); - + return ic64; } else { struct x86_act_context32 *ic32; ic32 = (struct x86_act_context32 *)kalloc(sizeof(struct x86_act_context32)); - if (ic32 == (struct x86_act_context32 *)NULL) - return((void *)0); + if (ic32 == (struct x86_act_context32 *)NULL) { + return (void *)0; + } - val = x86_SAVED_STATE32_COUNT; + val = x86_SAVED_STATE32_COUNT; kret = machine_thread_get_state(thr_act, x86_SAVED_STATE32, - (thread_state_t) &ic32->ss, &val); + (thread_state_t) &ic32->ss, &val); if (kret != KERN_SUCCESS) { kfree(ic32, sizeof(struct x86_act_context32)); - return((void *)0); + return (void *)0; } - val = x86_FLOAT_STATE32_COUNT; + val = x86_FLOAT_STATE32_COUNT; kret = machine_thread_get_state(thr_act, x86_FLOAT_STATE32, - (thread_state_t) &ic32->fs, &val); + (thread_state_t) &ic32->fs, &val); if (kret != KERN_SUCCESS) { kfree(ic32, sizeof(struct x86_act_context32)); - return((void *)0); + return (void *)0; } val = x86_DEBUG_STATE32_COUNT; kret = machine_thread_get_state(thr_act, - x86_DEBUG_STATE32, - (thread_state_t)&ic32->ds, - &val); + x86_DEBUG_STATE32, + (thread_state_t)&ic32->ds, + &val); if (kret != KERN_SUCCESS) { - kfree(ic32, sizeof(struct x86_act_context32)); - return((void *)0); + kfree(ic32, sizeof(struct x86_act_context32)); + return (void *)0; } - return(ic32); + return ic32; } } -void +void act_thread_catt(void *ctx) { - thread_t thr_act = current_thread(); + thread_t thr_act = current_thread(); kern_return_t kret; - if (ctx == (void *)NULL) - return; + if (ctx == (void *)NULL) { + return; + } - if (thread_is_64bit_addr(thr_act)) { - struct x86_act_context64 *ic64; + if (thread_is_64bit_addr(thr_act)) { + struct x86_act_context64 *ic64; - ic64 = (struct x86_act_context64 *)ctx; + ic64 = (struct x86_act_context64 *)ctx; kret = machine_thread_set_state(thr_act, x86_SAVED_STATE64, - (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT); + (thread_state_t) &ic64->ss, x86_SAVED_STATE64_COUNT); if (kret == KERN_SUCCESS) { - machine_thread_set_state(thr_act, x86_FLOAT_STATE64, - (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT); + machine_thread_set_state(thr_act, x86_FLOAT_STATE64, + (thread_state_t) &ic64->fs, x86_FLOAT_STATE64_COUNT); } kfree(ic64, sizeof(struct x86_act_context64)); } else { - struct x86_act_context32 *ic32; + struct x86_act_context32 *ic32; - ic32 = (struct x86_act_context32 *)ctx; + ic32 = (struct x86_act_context32 *)ctx; kret = machine_thread_set_state(thr_act, x86_SAVED_STATE32, - (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT); + (thread_state_t) &ic32->ss, x86_SAVED_STATE32_COUNT); if (kret == KERN_SUCCESS) { (void) machine_thread_set_state(thr_act, x86_FLOAT_STATE32, - (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT); + (thread_state_t) &ic32->fs, x86_FLOAT_STATE32_COUNT); } kfree(ic32, sizeof(struct x86_act_context32)); } } -void act_thread_cfree(__unused void *ctx) +void +act_thread_cfree(__unused void *ctx) { /* XXX - Unused */ } @@ -2005,14 +2183,14 @@ void act_thread_cfree(__unused void *ctx) /* * Duplicate one x86_debug_state32_t to another. "all" parameter * chooses whether dr4 and dr5 are copied (they are never meant - * to be installed when we do machine_task_set_state() or + * to be installed when we do machine_task_set_state() or * machine_thread_set_state()). */ void copy_debug_state32( - x86_debug_state32_t *src, - x86_debug_state32_t *target, - boolean_t all) + x86_debug_state32_t *src, + x86_debug_state32_t *target, + boolean_t all) { if (all) { target->dr4 = src->dr4; @@ -2030,14 +2208,14 @@ copy_debug_state32( /* * Duplicate one x86_debug_state64_t to another. "all" parameter * chooses whether dr4 and dr5 are copied (they are never meant - * to be installed when we do machine_task_set_state() or + * to be installed when we do machine_task_set_state() or * machine_thread_set_state()). */ void copy_debug_state64( - x86_debug_state64_t *src, - x86_debug_state64_t *target, - boolean_t all) + x86_debug_state64_t *src, + x86_debug_state64_t *target, + boolean_t all) { if (all) { target->dr4 = src->dr4; diff --git a/osfmk/i386/pcb_native.c b/osfmk/i386/pcb_native.c index 748bde049..81b4dfcd2 100644 --- a/osfmk/i386/pcb_native.c +++ b/osfmk/i386/pcb_native.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -85,6 +85,7 @@ #include #include #include +#include #include #include #include @@ -100,9 +101,9 @@ #include #endif -#define ASSERT_IS_16BYTE_MULTIPLE_SIZEOF(_type_) \ -extern char assert_is_16byte_multiple_sizeof_ ## _type_ \ - [(sizeof(_type_) % 16) == 0 ? 1 : -1] +#define ASSERT_IS_16BYTE_MULTIPLE_SIZEOF(_type_) \ +extern char assert_is_16byte_multiple_sizeof_ ## _type_ \ + [(sizeof(_type_) % 16) == 0 ? 1 : -1] /* Compile-time checks for vital save area sizing: */ ASSERT_IS_16BYTE_MULTIPLE_SIZEOF(x86_64_intr_stack_frame_t); @@ -110,20 +111,23 @@ ASSERT_IS_16BYTE_MULTIPLE_SIZEOF(x86_saved_state_t); #define DIRECTION_FLAG_DEBUG (DEBUG | DEVELOPMENT) -extern zone_t iss_zone; /* zone for saved_state area */ -extern zone_t ids_zone; /* zone for debug_state area */ +extern zone_t iss_zone; /* zone for saved_state area */ +extern zone_t ids_zone; /* zone for debug_state area */ +extern int tecs_mode_supported; + +int force_thread_policy_tecs; void act_machine_switch_pcb(__unused thread_t old, thread_t new) { - pcb_t pcb = THREAD_TO_PCB(new); - cpu_data_t *cdp = current_cpu_datap(); - struct real_descriptor *ldtp; - mach_vm_offset_t pcb_stack_top; + pcb_t pcb = THREAD_TO_PCB(new); + cpu_data_t *cdp = current_cpu_datap(); + struct real_descriptor *ldtp; + mach_vm_offset_t pcb_stack_top; assert(new->kernel_stack != 0); assert(ml_get_interrupts_enabled() == FALSE); -#ifdef DIRECTION_FLAG_DEBUG +#ifdef DIRECTION_FLAG_DEBUG if (x86_get_flags() & EFL_DF) { panic("Direction flag detected: 0x%lx", x86_get_flags()); } @@ -139,15 +143,15 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) set_fs(NULL_SEG); if (get_gs() != NULL_SEG) { - swapgs(); /* switch to user's GS context */ + swapgs(); /* switch to user's GS context */ set_gs(NULL_SEG); - swapgs(); /* and back to kernel */ + swapgs(); /* and back to kernel */ /* record the active machine state lost */ cdp->cpu_uber.cu_user_gs_base = 0; - } + } - vm_offset_t isf; + vm_offset_t isf; /* * Set pointer to PCB's interrupt stack frame in cpu data. @@ -170,8 +174,7 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) cdp->cd_estack = cpu_shadowp(cdp->cpu_number)->cd_estack = cdp->cpu_desc_index.cdi_sstku; if (is_saved_state64(pcb->iss)) { - - cdp->cpu_task_map = new->map->pmap->pm_task_map; + cdp->cpu_task_map = new->map->pmap->pm_task_map; /* * Enable the 64-bit user code segment, USER64_CS. @@ -197,7 +200,6 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) } } } else { - cdp->cpu_task_map = TASK_MAP_32BIT; /* @@ -218,27 +220,32 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) * For 32-bit user this involves setting the USER_CTHREAD * descriptor in the LDT to point to the cthread data. * The involves copying in the pre-initialized descriptor. - */ - ldtp = (struct real_descriptor *)current_ldt(); + */ + ldtp = current_ldt(); ldtp[sel_idx(USER_CTHREAD)] = pcb->cthread_desc; - if (pcb->uldt_selector != 0) + if (pcb->uldt_selector != 0) { ldtp[sel_idx(pcb->uldt_selector)] = pcb->uldt_desc; + } cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self; + } + cdp->cpu_curthread_do_segchk = new->machine.mthr_do_segchk; + + /* + * Set the thread`s LDT or LDT entry. + */ + if (__probable(new->task == TASK_NULL || new->task->i386_ldt == 0)) { /* - * Set the thread`s LDT or LDT entry. + * Use system LDT. */ - if (new->task == TASK_NULL || new->task->i386_ldt == 0) { - /* - * Use system LDT. - */ - ml_cpu_set_ldt(KERNEL_LDT); - } else { - /* - * Task has its own LDT. - */ - user_ldt_set(new); - } + ml_cpu_set_ldt(KERNEL_LDT); + cdp->cpu_curtask_has_ldt = 0; + } else { + /* + * Task has its own LDT. + */ + user_ldt_set(new); + cdp->cpu_curtask_has_ldt = 1; } /* @@ -251,20 +258,20 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new) kern_return_t thread_set_wq_state32(thread_t thread, thread_state_t tstate) { - x86_thread_state32_t *state; - x86_saved_state32_t *saved_state; + x86_thread_state32_t *state; + x86_saved_state32_t *saved_state; thread_t curth = current_thread(); - spl_t s=0; + spl_t s = 0; pal_register_cache_state(thread, DIRTY); saved_state = USER_REGS32(thread); state = (x86_thread_state32_t *)tstate; - + if (curth != thread) { s = splsched(); - thread_lock(thread); + thread_lock(thread); } saved_state->ebp = 0; @@ -284,7 +291,7 @@ thread_set_wq_state32(thread_t thread, thread_state_t tstate) saved_state->es = USER_DS; if (curth != thread) { - thread_unlock(thread); + thread_unlock(thread); splx(s); } @@ -295,14 +302,14 @@ thread_set_wq_state32(thread_t thread, thread_state_t tstate) kern_return_t thread_set_wq_state64(thread_t thread, thread_state_t tstate) { - x86_thread_state64_t *state; - x86_saved_state64_t *saved_state; + x86_thread_state64_t *state; + x86_saved_state64_t *saved_state; thread_t curth = current_thread(); - spl_t s=0; + spl_t s = 0; saved_state = USER_REGS64(thread); state = (x86_thread_state64_t *)tstate; - + /* Disallow setting non-canonical PC or stack */ if (!IS_USERADDR64_CANONICAL(state->rsp) || !IS_USERADDR64_CANONICAL(state->rip)) { @@ -313,7 +320,7 @@ thread_set_wq_state64(thread_t thread, thread_state_t tstate) if (curth != thread) { s = splsched(); - thread_lock(thread); + thread_lock(thread); } saved_state->rbp = 0; @@ -330,7 +337,7 @@ thread_set_wq_state64(thread_t thread, thread_state_t tstate) saved_state->isf.rflags = EFL_USER_SET; if (curth != thread) { - thread_unlock(thread); + thread_unlock(thread); splx(s); } @@ -342,10 +349,10 @@ thread_set_wq_state64(thread_t thread, thread_state_t tstate) */ kern_return_t machine_thread_create( - thread_t thread, - task_t task) + thread_t thread, + task_t task) { - pcb_t pcb = THREAD_TO_PCB(thread); + pcb_t pcb = THREAD_TO_PCB(thread); #if NCOPY_WINDOWS > 0 inval_copy_windows(thread); @@ -354,27 +361,34 @@ machine_thread_create( thread->machine.physwindow_busy = 0; #endif + if (__improbable(force_thread_policy_tecs)) { + thread->machine.mthr_do_segchk = 1; + } else { + thread->machine.mthr_do_segchk = 0; + } + /* * Allocate save frame only if required. */ if (pcb->iss == NULL) { assert((get_preemption_level() == 0)); pcb->iss = (x86_saved_state_t *) zalloc(iss_zone); - if (pcb->iss == NULL) + if (pcb->iss == NULL) { panic("iss_zone"); + } } /* * Ensure that the synthesized 32-bit state including - * the 64-bit interrupt state can be acommodated in the + * the 64-bit interrupt state can be acommodated in the * 64-bit state we allocate for both 32-bit and 64-bit threads. */ assert(sizeof(pcb->iss->ss_32) + sizeof(pcb->iss->ss_64.isf) <= - sizeof(pcb->iss->ss_64)); + sizeof(pcb->iss->ss_64)); bzero((char *)pcb->iss, sizeof(x86_saved_state_t)); - if (task_has_64Bit_addr(task)) { + if (task_has_64Bit_addr(task)) { pcb->iss->flavor = x86_SAVED_STATE64; pcb->iss->ss_64.isf.cs = USER64_CS; @@ -406,7 +420,8 @@ machine_thread_create( pcb->cthread_desc = *gdt_desc_p(USER_DS); } - return(KERN_SUCCESS); + + return KERN_SUCCESS; } /* @@ -414,9 +429,9 @@ machine_thread_create( */ void machine_thread_destroy( - thread_t thread) + thread_t thread) { - pcb_t pcb = THREAD_TO_PCB(thread); + pcb_t pcb = THREAD_TO_PCB(thread); #if HYPERVISOR if (thread->hv_thread_target) { @@ -425,8 +440,9 @@ machine_thread_destroy( } #endif - if (pcb->ifps != 0) + if (pcb->ifps != 0) { fpu_free(thread, pcb->ifps); + } if (pcb->iss != 0) { zfree(iss_zone, pcb->iss); pcb->iss = 0; @@ -439,21 +455,22 @@ machine_thread_destroy( kern_return_t machine_thread_set_tsd_base( - thread_t thread, - mach_vm_offset_t tsd_base) + thread_t thread, + mach_vm_offset_t tsd_base) { - if (thread->task == kernel_task) { return KERN_INVALID_ARGUMENT; } if (thread_is_64bit_addr(thread)) { /* check for canonical address, set 0 otherwise */ - if (!IS_USERADDR64_CANONICAL(tsd_base)) + if (!IS_USERADDR64_CANONICAL(tsd_base)) { tsd_base = 0ULL; + } } else { - if (tsd_base > UINT32_MAX) + if (tsd_base > UINT32_MAX) { tsd_base = 0ULL; + } } pcb_t pcb = THREAD_TO_PCB(thread); @@ -467,8 +484,8 @@ machine_thread_set_tsd_base( .base_low = tsd_base & 0xffff, .base_med = (tsd_base >> 16) & 0xff, .base_high = (tsd_base >> 24) & 0xff, - .access = ACC_P|ACC_PL_U|ACC_DATA_W, - .granularity = SZ_32|SZ_G, + .access = ACC_P | ACC_PL_U | ACC_DATA_W, + .granularity = SZ_32 | SZ_G, }; pcb->cthread_desc = desc; @@ -477,19 +494,18 @@ machine_thread_set_tsd_base( /* For current thread, make the TSD base active immediately */ if (thread == current_thread()) { - if (thread_is_64bit_addr(thread)) { cpu_data_t *cdp; mp_disable_preemption(); cdp = current_cpu_datap(); if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) || - (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) + (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) { wrmsr64(MSR_IA32_KERNEL_GS_BASE, tsd_base); + } cdp->cpu_uber.cu_user_gs_base = tsd_base; mp_enable_preemption(); } else { - /* assign descriptor */ mp_disable_preemption(); *ldt_desc_p(USER_CTHREAD) = pcb->cthread_desc; @@ -499,3 +515,25 @@ machine_thread_set_tsd_base( return KERN_SUCCESS; } + +void +machine_tecs(thread_t thr) +{ + if (tecs_mode_supported) { + thr->machine.mthr_do_segchk = 1; + } +} + +int +machine_csv(cpuvn_e cve) +{ + switch (cve) { + case CPUVN_CI: + return (cpuid_wa_required(CPU_INTEL_SEGCHK) & CWA_ON) != 0; + + default: + break; + } + + return 0; +} diff --git a/osfmk/i386/phys.c b/osfmk/i386/phys.c index 49147fc2a..3cdae0971 100644 --- a/osfmk/i386/phys.c +++ b/osfmk/i386/phys.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -92,7 +92,7 @@ */ void pmap_zero_page( - ppnum_t pn) + ppnum_t pn) { assert(pn != vm_page_fictitious_addr); assert(pn != vm_page_guard_addr); @@ -120,13 +120,13 @@ pmap_zero_part_page( */ void pmap_copy_part_page( - ppnum_t psrc, - vm_offset_t src_offset, - ppnum_t pdst, - vm_offset_t dst_offset, - vm_size_t len) + ppnum_t psrc, + vm_offset_t src_offset, + ppnum_t pdst, + vm_offset_t dst_offset, + vm_size_t len) { - pmap_paddr_t src, dst; + pmap_paddr_t src, dst; assert(psrc != vm_page_fictitious_addr); assert(pdst != vm_page_fictitious_addr); @@ -140,44 +140,40 @@ pmap_copy_part_page( assert((((uintptr_t)src & PAGE_MASK) + src_offset + len) <= PAGE_SIZE); bcopy_phys((addr64_t)src + (src_offset & INTEL_OFFMASK), - (addr64_t)dst + (dst_offset & INTEL_OFFMASK), - len); + (addr64_t)dst + (dst_offset & INTEL_OFFMASK), + len); } /* - * pmap_copy_part_lpage copies part of a virtually addressed page + * pmap_copy_part_lpage copies part of a virtually addressed page * to a physically addressed page. */ void pmap_copy_part_lpage( - __unused vm_offset_t src, - __unused ppnum_t pdst, - __unused vm_offset_t dst_offset, - __unused vm_size_t len) + __unused vm_offset_t src, + __unused ppnum_t pdst, + __unused vm_offset_t dst_offset, + __unused vm_size_t len) { - assert(pdst != vm_page_fictitious_addr); assert(pdst != vm_page_guard_addr); assert((dst_offset + len) <= PAGE_SIZE); - } /* - * pmap_copy_part_rpage copies part of a physically addressed page + * pmap_copy_part_rpage copies part of a physically addressed page * to a virtually addressed page. */ void pmap_copy_part_rpage( - __unused ppnum_t psrc, - __unused vm_offset_t src_offset, - __unused vm_offset_t dst, - __unused vm_size_t len) + __unused ppnum_t psrc, + __unused vm_offset_t src_offset, + __unused vm_offset_t dst, + __unused vm_size_t len) { - assert(psrc != vm_page_fictitious_addr); assert(psrc != vm_page_guard_addr); assert((src_offset + len) <= PAGE_SIZE); - } /* @@ -192,10 +188,11 @@ kvtophys( pmap_paddr_t pa; pa = ((pmap_paddr_t)pmap_find_phys(kernel_pmap, addr)) << INTEL_PGSHIFT; - if (pa) + if (pa) { pa |= (addr & INTEL_OFFMASK); + } - return ((addr64_t)pa); + return (addr64_t)pa; } extern pt_entry_t *debugger_ptep; @@ -205,7 +202,9 @@ extern int _bcopy2(const void *, void *); extern int _bcopy4(const void *, void *); extern int _bcopy8(const void *, void *); -__private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) { +__private_extern__ int +ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t bytes) +{ void *src, *dst; int err = 0; @@ -241,21 +240,22 @@ __private_extern__ int ml_copy_phys(addr64_t src64, addr64_t dst64, vm_size_t by * identical mapping. */ if (debug_pa) { - if (debugger_window_kva == 0) + if (debugger_window_kva == 0) { panic("%s: invoked in non-debug mode", __FUNCTION__); + } /* Establish a cache-inhibited physical window; some platforms * may not cover arbitrary ranges with MTRRs */ - pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF| INTEL_PTE_MOD | INTEL_PTE_VALID); - flush_tlb_raw(); -#if DEBUG + pmap_store_pte(debugger_ptep, debug_pa | INTEL_PTE_NCACHE | INTEL_PTE_RW | INTEL_PTE_REF | INTEL_PTE_MOD | INTEL_PTE_VALID); + pmap_tlbi_range(0, ~0ULL, true, 0); +#if DEBUG kprintf("Remapping debugger physical window at %p to 0x%llx\n", (void *)debugger_window_kva, debug_pa); #endif } #endif /* ensure we stay within a page */ - if (((((uint32_t)src64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES-1)) + bytes) > I386_PGBYTES) ) { - panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64); + if (((((uint32_t)src64 & (I386_PGBYTES - 1)) + bytes) > I386_PGBYTES) || ((((uint32_t)dst64 & (I386_PGBYTES - 1)) + bytes) > I386_PGBYTES)) { + panic("ml_copy_phys spans pages, src: 0x%llx, dst: 0x%llx", src64, dst64); } /* diff --git a/osfmk/i386/pio.h b/osfmk/i386/pio.h index b90616545..dfcc678bd 100644 --- a/osfmk/i386/pio.h +++ b/osfmk/i386/pio.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,38 +22,38 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* +/* */ #ifndef I386_PIO_H #define I386_PIO_H diff --git a/osfmk/i386/pmCPU.c b/osfmk/i386/pmCPU.c index de55ad8dd..ff49e3fe2 100644 --- a/osfmk/i386/pmCPU.c +++ b/osfmk/i386/pmCPU.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,22 +52,24 @@ #include #include +#include + extern int disableConsoleOutput; -#define DELAY_UNSET 0xFFFFFFFFFFFFFFFFULL +#define DELAY_UNSET 0xFFFFFFFFFFFFFFFFULL -uint64_t cpu_itime_bins[CPU_ITIME_BINS] = {16* NSEC_PER_USEC, 32* NSEC_PER_USEC, 64* NSEC_PER_USEC, 128* NSEC_PER_USEC, 256* NSEC_PER_USEC, 512* NSEC_PER_USEC, 1024* NSEC_PER_USEC, 2048* NSEC_PER_USEC, 4096* NSEC_PER_USEC, 8192* NSEC_PER_USEC, 16384* NSEC_PER_USEC, 32768* NSEC_PER_USEC}; +uint64_t cpu_itime_bins[CPU_ITIME_BINS] = {16 * NSEC_PER_USEC, 32 * NSEC_PER_USEC, 64 * NSEC_PER_USEC, 128 * NSEC_PER_USEC, 256 * NSEC_PER_USEC, 512 * NSEC_PER_USEC, 1024 * NSEC_PER_USEC, 2048 * NSEC_PER_USEC, 4096 * NSEC_PER_USEC, 8192 * NSEC_PER_USEC, 16384 * NSEC_PER_USEC, 32768 * NSEC_PER_USEC}; uint64_t *cpu_rtime_bins = &cpu_itime_bins[0]; /* * The following is set when the KEXT loads and initializes. */ -pmDispatch_t *pmDispatch = NULL; +pmDispatch_t *pmDispatch = NULL; -uint32_t pmInitDone = 0; -static boolean_t earlyTopology = FALSE; -static uint64_t earlyMaxBusDelay = DELAY_UNSET; -static uint64_t earlyMaxIntDelay = DELAY_UNSET; +uint32_t pmInitDone = 0; +static boolean_t earlyTopology = FALSE; +static uint64_t earlyMaxBusDelay = DELAY_UNSET; +static uint64_t earlyMaxIntDelay = DELAY_UNSET; /* * Initialize the Cstate change code. @@ -75,22 +77,25 @@ static uint64_t earlyMaxIntDelay = DELAY_UNSET; void power_management_init(void) { - if (pmDispatch != NULL && pmDispatch->cstateInit != NULL) - (*pmDispatch->cstateInit)(); + if (pmDispatch != NULL && pmDispatch->cstateInit != NULL) { + (*pmDispatch->cstateInit)(); + } } -static inline void machine_classify_interval(uint64_t interval, uint64_t *bins, uint64_t *binvals, uint32_t nbins) { +static inline void +machine_classify_interval(uint64_t interval, uint64_t *bins, uint64_t *binvals, uint32_t nbins) +{ uint32_t i; - for (i = 0; i < nbins; i++) { - if (interval < binvals[i]) { - bins[i]++; - break; - } - } + for (i = 0; i < nbins; i++) { + if (interval < binvals[i]) { + bins[i]++; + break; + } + } } -uint64_t idle_pending_timers_processed; -uint32_t idle_entry_timer_processing_hdeadline_threshold = 5000000; +uint64_t idle_pending_timers_processed; +uint32_t idle_entry_timer_processing_hdeadline_threshold = 5000000; /* * Called when the CPU is idle. It calls into the power management kext @@ -99,12 +104,12 @@ uint32_t idle_entry_timer_processing_hdeadline_threshold = 5000000; void machine_idle(void) { - cpu_data_t *my_cpu = current_cpu_datap(); - __unused uint32_t cnum = my_cpu->cpu_number; - uint64_t ctime, rtime, itime; + cpu_data_t *my_cpu = current_cpu_datap(); + __unused uint32_t cnum = my_cpu->cpu_number; + uint64_t ctime, rtime, itime; #if CST_DEMOTION_DEBUG - processor_t cproc = my_cpu->cpu_processor; - uint64_t cwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total); + processor_t cproc = my_cpu->cpu_processor; + uint64_t cwakeups = PROCESSOR_DATA(cproc, wakeups_issued_total); #endif /* CST_DEMOTION_DEBUG */ uint64_t esdeadline, ehdeadline; boolean_t do_process_pending_timers = FALSE; @@ -112,7 +117,7 @@ machine_idle(void) ctime = mach_absolute_time(); esdeadline = my_cpu->rtclock_timer.queue.earliest_soft_deadline; ehdeadline = my_cpu->rtclock_timer.deadline; -/* Determine if pending timers exist */ +/* Determine if pending timers exist */ if ((ctime >= esdeadline) && (ctime < ehdeadline) && ((ehdeadline - ctime) < idle_entry_timer_processing_hdeadline_threshold)) { idle_pending_timers_processed++; @@ -121,7 +126,7 @@ machine_idle(void) } else { TCOAL_DEBUG(0xCCCC0000, ctime, my_cpu->rtclock_timer.queue.earliest_soft_deadline, my_cpu->rtclock_timer.deadline, idle_pending_timers_processed, 0); } - + my_cpu->lcpu.state = LCPU_IDLE; DBGLOG(cpu_handle, cpu_number(), MP_IDLE); MARK_CPU_IDLE(cnum); @@ -148,17 +153,19 @@ machine_idle(void) * this here since we know at this point the values will be first * used since idle is where the decisions using these values is made. */ - if (earlyMaxBusDelay != DELAY_UNSET) + if (earlyMaxBusDelay != DELAY_UNSET) { ml_set_maxbusdelay((uint32_t)(earlyMaxBusDelay & 0xFFFFFFFF)); - if (earlyMaxIntDelay != DELAY_UNSET) + } + if (earlyMaxIntDelay != DELAY_UNSET) { ml_set_maxintdelay(earlyMaxIntDelay); + } } if (pmInitDone && pmDispatch != NULL - && pmDispatch->MachineIdle != NULL) + && pmDispatch->MachineIdle != NULL) { (*pmDispatch->MachineIdle)(0x7FFFFFFFFFFFFFFFULL); - else { + } else { /* * If no power management, re-enable interrupts and halt. * This will keep the CPU from spinning through the scheduler @@ -180,8 +187,8 @@ machine_idle(void) uint64_t ixtime = my_cpu->cpu_ixtime = mach_absolute_time(); itime = ixtime - ctime; my_cpu->cpu_idle_exits++; - my_cpu->cpu_itime_total += itime; - machine_classify_interval(itime, &my_cpu->cpu_itimes[0], &cpu_itime_bins[0], CPU_ITIME_BINS); + my_cpu->cpu_itime_total += itime; + machine_classify_interval(itime, &my_cpu->cpu_itimes[0], &cpu_itime_bins[0], CPU_ITIME_BINS); #if CST_DEMOTION_DEBUG cl = ch = 0; rdmsr_carefully(MSR_IA32_CORE_C3_RESIDENCY, &cl, &ch); @@ -193,11 +200,12 @@ machine_idle(void) uint64_t ndelta = itime - tmrCvt(c3res + c6res + c7res, tscFCvtt2n); KERNEL_DEBUG_CONSTANT(0xcead0000, ndelta, itime, c7res, c6res, c3res); - if ((itime > 1000000) && (ndelta > 250000)) + if ((itime > 1000000) && (ndelta > 250000)) { KERNEL_DEBUG_CONSTANT(0xceae0000, ndelta, itime, c7res, c6res, c3res); + } #endif - machine_idle_exit: +machine_idle_exit: /* * Re-enable interrupts. */ @@ -220,7 +228,7 @@ machine_idle(void) if ((nwakeups == cwakeups) && (topoParms.nLThreadsPerPackage == my_cpu->lcpu.package->num_idle)) { KERNEL_DEBUG_CONSTANT(0xceaa0000, cwakeups, 0, 0, 0, 0); } -#endif +#endif } /* @@ -230,144 +238,144 @@ machine_idle(void) void pmCPUHalt(uint32_t reason) { - cpu_data_t *cpup = current_cpu_datap(); + cpu_data_t *cpup = current_cpu_datap(); + + switch (reason) { + case PM_HALT_DEBUG: + cpup->lcpu.state = LCPU_PAUSE; + pal_stop_cpu(FALSE); + break; + + case PM_HALT_PANIC: + cpup->lcpu.state = LCPU_PAUSE; + pal_stop_cpu(TRUE); + break; + + case PM_HALT_NORMAL: + case PM_HALT_SLEEP: + default: + pal_cli(); - switch (reason) { - case PM_HALT_DEBUG: - cpup->lcpu.state = LCPU_PAUSE; - pal_stop_cpu(FALSE); - break; + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->pmCPUHalt != NULL) { + /* + * Halt the CPU (and put it in a low power state. + */ + (*pmDispatch->pmCPUHalt)(); - case PM_HALT_PANIC: - cpup->lcpu.state = LCPU_PAUSE; - pal_stop_cpu(TRUE); - break; + /* + * We've exited halt, so get the CPU schedulable again. + * - by calling the fast init routine for a slave, or + * - by returning if we're the master processor. + */ + if (cpup->cpu_number != master_cpu) { + i386_init_slave_fast(); + panic("init_slave_fast returned"); + } + } else { + /* + * If no power managment and a processor is taken off-line, + * then invalidate the cache and halt it (it will not be able + * to be brought back on-line without resetting the CPU). + */ + __asm__ volatile ("wbinvd"); + cpup->lcpu.state = LCPU_HALT; + pal_stop_cpu(FALSE); - case PM_HALT_NORMAL: - case PM_HALT_SLEEP: - default: - pal_cli(); + panic("back from Halt"); + } - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->pmCPUHalt != NULL) { - /* - * Halt the CPU (and put it in a low power state. - */ - (*pmDispatch->pmCPUHalt)(); - - /* - * We've exited halt, so get the CPU schedulable again. - * - by calling the fast init routine for a slave, or - * - by returning if we're the master processor. - */ - if (cpup->cpu_number != master_cpu) { - i386_init_slave_fast(); - panic("init_slave_fast returned"); - } - } else - { - /* - * If no power managment and a processor is taken off-line, - * then invalidate the cache and halt it (it will not be able - * to be brought back on-line without resetting the CPU). - */ - __asm__ volatile ("wbinvd"); - cpup->lcpu.state = LCPU_HALT; - pal_stop_cpu(FALSE); - - panic("back from Halt"); - } - - break; - } + break; + } } void pmMarkAllCPUsOff(void) { - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->markAllCPUsOff != NULL) - (*pmDispatch->markAllCPUsOff)(); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->markAllCPUsOff != NULL) { + (*pmDispatch->markAllCPUsOff)(); + } } static void pmInitComplete(void) { - if (earlyTopology - && pmDispatch != NULL - && pmDispatch->pmCPUStateInit != NULL) { - (*pmDispatch->pmCPUStateInit)(); - earlyTopology = FALSE; - } - pmInitDone = 1; + if (earlyTopology + && pmDispatch != NULL + && pmDispatch->pmCPUStateInit != NULL) { + (*pmDispatch->pmCPUStateInit)(); + earlyTopology = FALSE; + } + pmInitDone = 1; } x86_lcpu_t * pmGetLogicalCPU(int cpu) { - return(cpu_to_lcpu(cpu)); + return cpu_to_lcpu(cpu); } x86_lcpu_t * pmGetMyLogicalCPU(void) { - cpu_data_t *cpup = current_cpu_datap(); + cpu_data_t *cpup = current_cpu_datap(); - return(&cpup->lcpu); + return &cpup->lcpu; } static x86_core_t * pmGetCore(int cpu) { - return(cpu_to_core(cpu)); + return cpu_to_core(cpu); } static x86_core_t * pmGetMyCore(void) { - cpu_data_t *cpup = current_cpu_datap(); + cpu_data_t *cpup = current_cpu_datap(); - return(cpup->lcpu.core); + return cpup->lcpu.core; } static x86_die_t * pmGetDie(int cpu) { - return(cpu_to_die(cpu)); + return cpu_to_die(cpu); } static x86_die_t * pmGetMyDie(void) { - cpu_data_t *cpup = current_cpu_datap(); + cpu_data_t *cpup = current_cpu_datap(); - return(cpup->lcpu.die); + return cpup->lcpu.die; } static x86_pkg_t * pmGetPackage(int cpu) { - return(cpu_to_package(cpu)); + return cpu_to_package(cpu); } static x86_pkg_t * pmGetMyPackage(void) { - cpu_data_t *cpup = current_cpu_datap(); + cpu_data_t *cpup = current_cpu_datap(); - return(cpup->lcpu.package); + return cpup->lcpu.package; } static void pmLockCPUTopology(int lock) { - if (lock) { - mp_safe_spin_lock(&x86_topo_lock); - } else { - simple_unlock(&x86_topo_lock); - } + if (lock) { + mp_safe_spin_lock(&x86_topo_lock); + } else { + simple_unlock(&x86_topo_lock); + } } /* @@ -379,14 +387,15 @@ pmLockCPUTopology(int lock) uint64_t pmCPUGetDeadline(cpu_data_t *cpu) { - uint64_t deadline = 0; + uint64_t deadline = 0; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->GetDeadline != NULL) - deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->GetDeadline != NULL) { + deadline = (*pmDispatch->GetDeadline)(&cpu->lcpu); + } - return(deadline); + return deadline; } /* @@ -397,12 +406,13 @@ pmCPUGetDeadline(cpu_data_t *cpu) uint64_t pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline) { - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->SetDeadline != NULL) - deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->SetDeadline != NULL) { + deadline = (*pmDispatch->SetDeadline)(&cpu->lcpu, deadline); + } - return(deadline); + return deadline; } /* @@ -411,10 +421,11 @@ pmCPUSetDeadline(cpu_data_t *cpu, uint64_t deadline) void pmCPUDeadline(cpu_data_t *cpu) { - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->Deadline != NULL) - (*pmDispatch->Deadline)(&cpu->lcpu); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->Deadline != NULL) { + (*pmDispatch->Deadline)(&cpu->lcpu); + } } /* @@ -423,42 +434,45 @@ pmCPUDeadline(cpu_data_t *cpu) boolean_t pmCPUExitIdle(cpu_data_t *cpu) { - boolean_t do_ipi; + boolean_t do_ipi; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->exitIdle != NULL) - do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu); - else - do_ipi = TRUE; + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->exitIdle != NULL) { + do_ipi = (*pmDispatch->exitIdle)(&cpu->lcpu); + } else { + do_ipi = TRUE; + } - return(do_ipi); + return do_ipi; } kern_return_t pmCPUExitHalt(int cpu) { - kern_return_t rc = KERN_INVALID_ARGUMENT; + kern_return_t rc = KERN_INVALID_ARGUMENT; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->exitHalt != NULL) - rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu)); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->exitHalt != NULL) { + rc = pmDispatch->exitHalt(cpu_to_lcpu(cpu)); + } - return(rc); + return rc; } kern_return_t pmCPUExitHaltToOff(int cpu) { - kern_return_t rc = KERN_SUCCESS; + kern_return_t rc = KERN_SUCCESS; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->exitHaltToOff != NULL) - rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu)); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->exitHaltToOff != NULL) { + rc = pmDispatch->exitHaltToOff(cpu_to_lcpu(cpu)); + } - return(rc); + return rc; } /* @@ -467,10 +481,11 @@ pmCPUExitHaltToOff(int cpu) void pmCPUStateInit(void) { - if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL) - (*pmDispatch->pmCPUStateInit)(); - else - earlyTopology = TRUE; + if (pmDispatch != NULL && pmDispatch->pmCPUStateInit != NULL) { + (*pmDispatch->pmCPUStateInit)(); + } else { + earlyTopology = TRUE; + } } /* @@ -479,14 +494,15 @@ pmCPUStateInit(void) void pmCPUMarkRunning(cpu_data_t *cpu) { - cpu_data_t *cpup = current_cpu_datap(); + cpu_data_t *cpup = current_cpu_datap(); - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->markCPURunning != NULL) - (*pmDispatch->markCPURunning)(&cpu->lcpu); - else - cpup->lcpu.state = LCPU_RUN; + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->markCPURunning != NULL) { + (*pmDispatch->markCPURunning)(&cpu->lcpu); + } else { + cpup->lcpu.state = LCPU_RUN; + } } /* @@ -495,13 +511,14 @@ pmCPUMarkRunning(cpu_data_t *cpu) int pmCPUControl(uint32_t cmd, void *datap) { - int rc = -1; + int rc = -1; - if (pmDispatch != NULL - && pmDispatch->pmCPUControl != NULL) - rc = (*pmDispatch->pmCPUControl)(cmd, datap); + if (pmDispatch != NULL + && pmDispatch->pmCPUControl != NULL) { + rc = (*pmDispatch->pmCPUControl)(cmd, datap); + } - return(rc); + return rc; } /* @@ -511,9 +528,10 @@ pmCPUControl(uint32_t cmd, void *datap) void pmTimerSave(void) { - if (pmDispatch != NULL - && pmDispatch->pmTimerStateSave != NULL) - (*pmDispatch->pmTimerStateSave)(); + if (pmDispatch != NULL + && pmDispatch->pmTimerStateSave != NULL) { + (*pmDispatch->pmTimerStateSave)(); + } } /* @@ -523,9 +541,10 @@ pmTimerSave(void) void pmTimerRestore(void) { - if (pmDispatch != NULL - && pmDispatch->pmTimerStateRestore != NULL) - (*pmDispatch->pmTimerStateRestore)(); + if (pmDispatch != NULL + && pmDispatch->pmTimerStateRestore != NULL) { + (*pmDispatch->pmTimerStateRestore)(); + } } /* @@ -544,28 +563,30 @@ ml_set_maxsnoop(__unused uint32_t maxdelay) unsigned ml_get_maxsnoop(void) { - uint64_t max_snoop = 0; + uint64_t max_snoop = 0; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->getMaxSnoop != NULL) - max_snoop = pmDispatch->getMaxSnoop(); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->getMaxSnoop != NULL) { + max_snoop = pmDispatch->getMaxSnoop(); + } - return((unsigned)(max_snoop & 0xffffffff)); + return (unsigned)(max_snoop & 0xffffffff); } uint32_t ml_get_maxbusdelay(void) { - uint64_t max_delay = 0; + uint64_t max_delay = 0; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->getMaxBusDelay != NULL) - max_delay = pmDispatch->getMaxBusDelay(); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->getMaxBusDelay != NULL) { + max_delay = pmDispatch->getMaxBusDelay(); + } - return((uint32_t)(max_delay & 0xffffffff)); + return (uint32_t)(max_delay & 0xffffffff); } /* @@ -574,26 +595,28 @@ ml_get_maxbusdelay(void) void ml_set_maxbusdelay(uint32_t mdelay) { - uint64_t maxdelay = mdelay; + uint64_t maxdelay = mdelay; - if (pmDispatch != NULL - && pmDispatch->setMaxBusDelay != NULL) { - earlyMaxBusDelay = DELAY_UNSET; - pmDispatch->setMaxBusDelay(maxdelay); - } else - earlyMaxBusDelay = maxdelay; + if (pmDispatch != NULL + && pmDispatch->setMaxBusDelay != NULL) { + earlyMaxBusDelay = DELAY_UNSET; + pmDispatch->setMaxBusDelay(maxdelay); + } else { + earlyMaxBusDelay = maxdelay; + } } uint64_t ml_get_maxintdelay(void) { - uint64_t max_delay = 0; + uint64_t max_delay = 0; - if (pmDispatch != NULL - && pmDispatch->getMaxIntDelay != NULL) - max_delay = pmDispatch->getMaxIntDelay(); + if (pmDispatch != NULL + && pmDispatch->getMaxIntDelay != NULL) { + max_delay = pmDispatch->getMaxIntDelay(); + } - return(max_delay); + return max_delay; } /* @@ -602,25 +625,27 @@ ml_get_maxintdelay(void) void ml_set_maxintdelay(uint64_t mdelay) { - if (pmDispatch != NULL - && pmDispatch->setMaxIntDelay != NULL) { - earlyMaxIntDelay = DELAY_UNSET; - pmDispatch->setMaxIntDelay(mdelay); - } else - earlyMaxIntDelay = mdelay; + if (pmDispatch != NULL + && pmDispatch->setMaxIntDelay != NULL) { + earlyMaxIntDelay = DELAY_UNSET; + pmDispatch->setMaxIntDelay(mdelay); + } else { + earlyMaxIntDelay = mdelay; + } } boolean_t ml_get_interrupt_prewake_applicable() { - boolean_t applicable = FALSE; + boolean_t applicable = FALSE; - if (pmInitDone - && pmDispatch != NULL - && pmDispatch->pmInterruptPrewakeApplicable != NULL) - applicable = pmDispatch->pmInterruptPrewakeApplicable(); + if (pmInitDone + && pmDispatch != NULL + && pmDispatch->pmInterruptPrewakeApplicable != NULL) { + applicable = pmDispatch->pmInterruptPrewakeApplicable(); + } - return applicable; + return applicable; } /* @@ -633,135 +658,144 @@ ml_get_interrupt_prewake_applicable() void pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags) { - if (pmDispatch != NULL - && pmDispatch->pmCPUSafeMode != NULL) - pmDispatch->pmCPUSafeMode(lcpu, flags); - else { - /* - * Do something reasonable if the KEXT isn't present. - * - * We only look at the PAUSE and RESUME flags. The other flag(s) - * will not make any sense without the KEXT, so just ignore them. - * - * We set the CPU's state to indicate that it's halted. If this - * is the CPU we're currently running on, then spin until the - * state becomes non-halted. - */ - if (flags & PM_SAFE_FL_PAUSE) { - lcpu->state = LCPU_PAUSE; - if (lcpu == x86_lcpu()) { - while (lcpu->state == LCPU_PAUSE) - cpu_pause(); - } - } - - /* - * Clear the halted flag for the specified CPU, that will - * get it out of it's spin loop. - */ - if (flags & PM_SAFE_FL_RESUME) { - lcpu->state = LCPU_RUN; + if (pmDispatch != NULL + && pmDispatch->pmCPUSafeMode != NULL) { + pmDispatch->pmCPUSafeMode(lcpu, flags); + } else { + /* + * Do something reasonable if the KEXT isn't present. + * + * We only look at the PAUSE and RESUME flags. The other flag(s) + * will not make any sense without the KEXT, so just ignore them. + * + * We set the CPU's state to indicate that it's halted. If this + * is the CPU we're currently running on, then spin until the + * state becomes non-halted. + */ + if (flags & PM_SAFE_FL_PAUSE) { + lcpu->state = LCPU_PAUSE; + if (lcpu == x86_lcpu()) { + while (lcpu->state == LCPU_PAUSE) { + cpu_pause(); + } + } + } + + /* + * Clear the halted flag for the specified CPU, that will + * get it out of it's spin loop. + */ + if (flags & PM_SAFE_FL_RESUME) { + lcpu->state = LCPU_RUN; + } } - } } -static uint32_t saved_run_count = 0; +static uint32_t saved_run_count = 0; void machine_run_count(uint32_t count) { - if (pmDispatch != NULL - && pmDispatch->pmSetRunCount != NULL) - pmDispatch->pmSetRunCount(count); - else - saved_run_count = count; + if (pmDispatch != NULL + && pmDispatch->pmSetRunCount != NULL) { + pmDispatch->pmSetRunCount(count); + } else { + saved_run_count = count; + } } processor_t machine_choose_processor(processor_set_t pset, - processor_t preferred) + processor_t preferred) { - int startCPU; - int endCPU; - int preferredCPU; - int chosenCPU; + int startCPU; + int endCPU; + int preferredCPU; + int chosenCPU; - if (!pmInitDone) - return(preferred); + if (!pmInitDone) { + return preferred; + } - if (pset == NULL) { - startCPU = -1; - endCPU = -1; - } else { - startCPU = pset->cpu_set_low; - endCPU = pset->cpu_set_hi; - } + if (pset == NULL) { + startCPU = -1; + endCPU = -1; + } else { + startCPU = pset->cpu_set_low; + endCPU = pset->cpu_set_hi; + } - if (preferred == NULL) - preferredCPU = -1; - else - preferredCPU = preferred->cpu_id; + if (preferred == NULL) { + preferredCPU = -1; + } else { + preferredCPU = preferred->cpu_id; + } - if (pmDispatch != NULL - && pmDispatch->pmChooseCPU != NULL) { - chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU); + if (pmDispatch != NULL + && pmDispatch->pmChooseCPU != NULL) { + chosenCPU = pmDispatch->pmChooseCPU(startCPU, endCPU, preferredCPU); - if (chosenCPU == -1) - return(NULL); - return(cpu_datap(chosenCPU)->cpu_processor); - } + if (chosenCPU == -1) { + return NULL; + } + return cpu_datap(chosenCPU)->cpu_processor; + } - return(preferred); + return preferred; } static int pmThreadGetUrgency(uint64_t *rt_period, uint64_t *rt_deadline) { - int urgency; + thread_urgency_t urgency; uint64_t arg1, arg2; urgency = thread_get_urgency(current_processor()->next_thread, &arg1, &arg2); if (urgency == THREAD_URGENCY_REAL_TIME) { - if (rt_period != NULL) + if (rt_period != NULL) { *rt_period = arg1; - - if (rt_deadline != NULL) + } + + if (rt_deadline != NULL) { *rt_deadline = arg2; + } } - return(urgency); + return (int)urgency; } -#if DEBUG -uint32_t urgency_stats[64][THREAD_URGENCY_MAX]; +#if DEBUG +uint32_t urgency_stats[64][THREAD_URGENCY_MAX]; #endif -#define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000) -uint64_t urgency_notification_assert_abstime_threshold, urgency_notification_max_recorded; +#define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000) +uint64_t urgency_notification_assert_abstime_threshold, urgency_notification_max_recorded; void -thread_tell_urgency(int urgency, +thread_tell_urgency(thread_urgency_t urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency, thread_t nthread) { - uint64_t urgency_notification_time_start = 0, delta; - boolean_t urgency_assert = (urgency_notification_assert_abstime_threshold != 0); + uint64_t urgency_notification_time_start = 0, delta; + boolean_t urgency_assert = (urgency_notification_assert_abstime_threshold != 0); assert(get_preemption_level() > 0 || ml_get_interrupts_enabled() == FALSE); -#if DEBUG +#if DEBUG urgency_stats[cpu_number() % 64][urgency]++; #endif if (!pmInitDone || pmDispatch == NULL - || pmDispatch->pmThreadTellUrgency == NULL) + || pmDispatch->pmThreadTellUrgency == NULL) { return; + } - SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0); + SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0); - if (__improbable((urgency_assert == TRUE))) + if (__improbable((urgency_assert == TRUE))) { urgency_notification_time_start = mach_absolute_time(); + } current_cpu_datap()->cpu_nthread = nthread; pmDispatch->pmThreadTellUrgency(urgency, rt_period, rt_deadline); @@ -776,73 +810,86 @@ thread_tell_urgency(int urgency, */ urgency_notification_max_recorded = delta; - if (__improbable((delta > urgency_notification_assert_abstime_threshold) && !machine_timeout_suspended())) + if (__improbable((delta > urgency_notification_assert_abstime_threshold) && !machine_timeout_suspended())) { panic("Urgency notification callout %p exceeded threshold, 0x%llx abstime units", pmDispatch->pmThreadTellUrgency, delta); + } } } - SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0); + SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0); } void machine_thread_going_on_core(__unused thread_t new_thread, - __unused int urgency, - __unused uint64_t sched_latency, - __unused uint64_t same_pri_latency, - __unused uint64_t dispatch_time) + __unused thread_urgency_t urgency, + __unused uint64_t sched_latency, + __unused uint64_t same_pri_latency, + __unused uint64_t dispatch_time) { } void -machine_thread_going_off_core(__unused thread_t old_thread, __unused boolean_t thread_terminating, __unused uint64_t last_dispatch) +machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, + uint64_t last_dispatch, boolean_t thread_runnable) { + if (!pmInitDone + || pmDispatch == NULL + || pmDispatch->pmThreadGoingOffCore == NULL) { + return; + } + + pmDispatch->pmThreadGoingOffCore(old_thread, thread_terminating, + last_dispatch, thread_runnable); } void machine_max_runnable_latency(__unused uint64_t bg_max_latency, - __unused uint64_t default_max_latency, - __unused uint64_t realtime_max_latency) + __unused uint64_t default_max_latency, + __unused uint64_t realtime_max_latency) { } void machine_work_interval_notify(__unused thread_t thread, - __unused struct kern_work_interval_args* kwi_args) + __unused struct kern_work_interval_args* kwi_args) { } -void machine_switch_perfcontrol_context(__unused perfcontrol_event event, - __unused uint64_t timestamp, - __unused uint32_t flags, - __unused uint64_t new_thread_same_pri_latency, - __unused thread_t old, - __unused thread_t new) +void +machine_switch_perfcontrol_context(__unused perfcontrol_event event, + __unused uint64_t timestamp, + __unused uint32_t flags, + __unused uint64_t new_thread_same_pri_latency, + __unused thread_t old, + __unused thread_t new) { } -void machine_switch_perfcontrol_state_update(__unused perfcontrol_event event, - __unused uint64_t timestamp, - __unused uint32_t flags, - __unused thread_t thread) +void +machine_switch_perfcontrol_state_update(__unused perfcontrol_event event, + __unused uint64_t timestamp, + __unused uint32_t flags, + __unused thread_t thread) { } void active_rt_threads(boolean_t active) { - if (!pmInitDone - || pmDispatch == NULL - || pmDispatch->pmActiveRTThreads == NULL) - return; + if (!pmInitDone + || pmDispatch == NULL + || pmDispatch->pmActiveRTThreads == NULL) { + return; + } - pmDispatch->pmActiveRTThreads(active); + pmDispatch->pmActiveRTThreads(active); } static uint32_t pmGetSavedRunCount(void) { - return(saved_run_count); + return saved_run_count; } /* @@ -851,41 +898,42 @@ pmGetSavedRunCount(void) x86_pkg_t * pmGetPkgRoot(void) { - return(x86_pkgs); + return x86_pkgs; } static boolean_t pmCPUGetHibernate(int cpu) { - return(cpu_datap(cpu)->cpu_hibernate); + return cpu_datap(cpu)->cpu_hibernate; } processor_t pmLCPUtoProcessor(int lcpu) { - return(cpu_datap(lcpu)->cpu_processor); + return cpu_datap(lcpu)->cpu_processor; } static void pmReSyncDeadlines(int cpu) { - static boolean_t registered = FALSE; + static boolean_t registered = FALSE; - if (!registered) { - PM_interrupt_register(&timer_resync_deadlines); - registered = TRUE; - } + if (!registered) { + PM_interrupt_register(&timer_resync_deadlines); + registered = TRUE; + } - if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num) - timer_resync_deadlines(); - else - cpu_PM_interrupt(cpu); + if ((uint32_t)cpu == current_cpu_datap()->lcpu.cpu_num) { + timer_resync_deadlines(); + } else { + cpu_PM_interrupt(cpu); + } } static void pmSendIPI(int cpu) { - lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT); + lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT); } static void @@ -900,17 +948,17 @@ pmGetNanotimeInfo(pm_rtc_nanotime_t *rtc_nanotime) rtc_nanotime->ns_base = pal_rtc_nanotime_info.ns_base; rtc_nanotime->scale = pal_rtc_nanotime_info.scale; rtc_nanotime->shift = pal_rtc_nanotime_info.shift; - } while(pal_rtc_nanotime_info.generation != 0 - && rtc_nanotime->generation != pal_rtc_nanotime_info.generation); + } while (pal_rtc_nanotime_info.generation != 0 + && rtc_nanotime->generation != pal_rtc_nanotime_info.generation); } uint32_t pmTimerQueueMigrate(int target_cpu) { - /* Call the etimer code to do this. */ - return (target_cpu != cpu_number()) - ? timer_queue_migrate_cpu(target_cpu) - : 0; + /* Call the etimer code to do this. */ + return (target_cpu != cpu_number()) + ? timer_queue_migrate_cpu(target_cpu) + : 0; } @@ -942,16 +990,16 @@ pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs, callbacks->LCPUtoProcessor = pmLCPUtoProcessor; callbacks->ThreadBind = thread_bind; callbacks->GetSavedRunCount = pmGetSavedRunCount; - callbacks->GetNanotimeInfo = pmGetNanotimeInfo; - callbacks->ThreadGetUrgency = pmThreadGetUrgency; - callbacks->RTCClockAdjust = rtc_clock_adjust; + callbacks->GetNanotimeInfo = pmGetNanotimeInfo; + callbacks->ThreadGetUrgency = pmThreadGetUrgency; + callbacks->RTCClockAdjust = rtc_clock_adjust; callbacks->timerQueueMigrate = pmTimerQueueMigrate; callbacks->topoParms = &topoParms; - callbacks->pmSendIPI = pmSendIPI; - callbacks->InterruptPending = lapic_is_interrupt_pending; - callbacks->IsInterrupting = lapic_is_interrupting; - callbacks->InterruptStats = lapic_interrupt_counts; - callbacks->DisableApicTimer = lapic_disable_timer; + callbacks->pmSendIPI = pmSendIPI; + callbacks->InterruptPending = lapic_is_interrupt_pending; + callbacks->IsInterrupting = lapic_is_interrupting; + callbacks->InterruptStats = lapic_interrupt_counts; + callbacks->DisableApicTimer = lapic_disable_timer; } else { panic("Version mis-match between Kernel and CPU PM"); } @@ -981,21 +1029,22 @@ pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs, void pmUnRegister(pmDispatch_t *cpuFuncs) { - if (cpuFuncs != NULL && pmDispatch == cpuFuncs) { - pmDispatch = NULL; - } + if (cpuFuncs != NULL && pmDispatch == cpuFuncs) { + pmDispatch = NULL; + } } -void machine_track_platform_idle(boolean_t entry) { - cpu_data_t *my_cpu = current_cpu_datap(); +void +machine_track_platform_idle(boolean_t entry) +{ + cpu_data_t *my_cpu = current_cpu_datap(); if (entry) { (void)__sync_fetch_and_add(&my_cpu->lcpu.package->num_idle, 1); + } else { + uint32_t nidle = __sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1); + if (nidle == topoParms.nLThreadsPerPackage) { + my_cpu->lcpu.package->package_idle_exits++; + } } - else { - uint32_t nidle = __sync_fetch_and_sub(&my_cpu->lcpu.package->num_idle, 1); - if (nidle == topoParms.nLThreadsPerPackage) { - my_cpu->lcpu.package->package_idle_exits++; - } - } } diff --git a/osfmk/i386/pmCPU.h b/osfmk/i386/pmCPU.h index 1ed973e4c..20d0003c5 100644 --- a/osfmk/i386/pmCPU.h +++ b/osfmk/i386/pmCPU.h @@ -2,7 +2,7 @@ * Copyright (c) 2006-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifdef KERNEL_PRIVATE @@ -37,7 +37,7 @@ * This value should be changed each time that pmDispatch_t or pmCallBacks_t * changes. */ -#define PM_DISPATCH_VERSION 102 +#define PM_DISPATCH_VERSION 102 /* * Dispatch table for functions that get installed when the power @@ -49,84 +49,85 @@ * pmCallBacks_t is the set of functions that the power management kext * can call to get at specific kernel functions. */ -typedef struct -{ - kern_return_t (*pmCPUStateInit)(void); - void (*cstateInit)(void); - uint64_t (*MachineIdle)(uint64_t maxIdleDuration); - uint64_t (*GetDeadline)(x86_lcpu_t *lcpu); - uint64_t (*SetDeadline)(x86_lcpu_t *lcpu, uint64_t); - void (*Deadline)(x86_lcpu_t *lcpu); - boolean_t (*exitIdle)(x86_lcpu_t *lcpu); - void (*markCPURunning)(x86_lcpu_t *lcpu); - int (*pmCPUControl)(uint32_t cmd, void *datap); - void (*pmCPUHalt)(void); - uint64_t (*getMaxSnoop)(void); - void (*setMaxBusDelay)(uint64_t time); - uint64_t (*getMaxBusDelay)(void); - void (*setMaxIntDelay)(uint64_t time); - uint64_t (*getMaxIntDelay)(void); - void (*pmCPUSafeMode)(x86_lcpu_t *lcpu, uint32_t flags); - void (*pmTimerStateSave)(void); - void (*pmTimerStateRestore)(void); - kern_return_t (*exitHalt)(x86_lcpu_t *lcpu); - kern_return_t (*exitHaltToOff)(x86_lcpu_t *lcpu); - void (*markAllCPUsOff)(void); - void (*pmSetRunCount)(uint32_t count); - boolean_t (*pmIsCPUUnAvailable)(x86_lcpu_t *lcpu); - int (*pmChooseCPU)(int startCPU, int endCPU, int preferredCPU); - int (*pmIPIHandler)(void *state); - void (*pmThreadTellUrgency)(int urgency, uint64_t rt_period, uint64_t rt_deadline); - void (*pmActiveRTThreads)(boolean_t active); - boolean_t (*pmInterruptPrewakeApplicable)(void); +typedef struct{ + kern_return_t (*pmCPUStateInit)(void); + void (*cstateInit)(void); + uint64_t (*MachineIdle)(uint64_t maxIdleDuration); + uint64_t (*GetDeadline)(x86_lcpu_t *lcpu); + uint64_t (*SetDeadline)(x86_lcpu_t *lcpu, uint64_t); + void (*Deadline)(x86_lcpu_t *lcpu); + boolean_t (*exitIdle)(x86_lcpu_t *lcpu); + void (*markCPURunning)(x86_lcpu_t *lcpu); + int (*pmCPUControl)(uint32_t cmd, void *datap); + void (*pmCPUHalt)(void); + uint64_t (*getMaxSnoop)(void); + void (*setMaxBusDelay)(uint64_t time); + uint64_t (*getMaxBusDelay)(void); + void (*setMaxIntDelay)(uint64_t time); + uint64_t (*getMaxIntDelay)(void); + void (*pmCPUSafeMode)(x86_lcpu_t *lcpu, uint32_t flags); + void (*pmTimerStateSave)(void); + void (*pmTimerStateRestore)(void); + kern_return_t (*exitHalt)(x86_lcpu_t *lcpu); + kern_return_t (*exitHaltToOff)(x86_lcpu_t *lcpu); + void (*markAllCPUsOff)(void); + void (*pmSetRunCount)(uint32_t count); + boolean_t (*pmIsCPUUnAvailable)(x86_lcpu_t *lcpu); + int (*pmChooseCPU)(int startCPU, int endCPU, int preferredCPU); + int (*pmIPIHandler)(void *state); + void (*pmThreadTellUrgency)(int urgency, uint64_t rt_period, uint64_t rt_deadline); + void (*pmActiveRTThreads)(boolean_t active); + boolean_t (*pmInterruptPrewakeApplicable)(void); + void (*pmThreadGoingOffCore)(thread_t old_thread, boolean_t transfer_load, + uint64_t last_dispatch, boolean_t thread_runnable); } pmDispatch_t; /* common time fields exported to PM code. This structure may be * allocated on the stack, so avoid making it unnecessarily large. */ typedef struct pm_rtc_nanotime { - uint64_t tsc_base; /* timestamp */ - uint64_t ns_base; /* nanoseconds */ - uint32_t scale; /* tsc -> nanosec multiplier */ - uint32_t shift; /* tsc -> nanosec shift/div */ - uint32_t generation; /* 0 == being updated */ + uint64_t tsc_base; /* timestamp */ + uint64_t ns_base; /* nanoseconds */ + uint32_t scale; /* tsc -> nanosec multiplier */ + uint32_t shift; /* tsc -> nanosec shift/div */ + uint32_t generation; /* 0 == being updated */ } pm_rtc_nanotime_t; typedef struct { - uint64_t (*setRTCPop)(uint64_t time); - void (*resyncDeadlines)(int cpu); - void (*initComplete)(void); - x86_lcpu_t *(*GetLCPU)(int cpu); - x86_core_t *(*GetCore)(int cpu); - x86_die_t *(*GetDie)(int cpu); - x86_pkg_t *(*GetPackage)(int cpu); - x86_lcpu_t *(*GetMyLCPU)(void); - x86_core_t *(*GetMyCore)(void); - x86_die_t *(*GetMyDie)(void); - x86_pkg_t *(*GetMyPackage)(void); - x86_pkg_t *(*GetPkgRoot)(void); - void (*LockCPUTopology)(int lock); - boolean_t (*GetHibernate)(int cpu); - processor_t (*LCPUtoProcessor)(int lcpu); - processor_t (*ThreadBind)(processor_t proc); - uint32_t (*GetSavedRunCount)(void); - void (*pmSendIPI)(int cpu); - void (*GetNanotimeInfo)(pm_rtc_nanotime_t *); - int (*ThreadGetUrgency)(uint64_t *rt_period, uint64_t *rt_deadline); - uint32_t (*timerQueueMigrate)(int cpu); - void (*RTCClockAdjust)(uint64_t adjustment); - x86_topology_parameters_t *topoParms; - boolean_t (*InterruptPending)(void); - boolean_t (*IsInterrupting)(uint8_t vector); - void (*InterruptStats)(uint64_t intrs[256]); - void (*DisableApicTimer)(void); + uint64_t (*setRTCPop)(uint64_t time); + void (*resyncDeadlines)(int cpu); + void (*initComplete)(void); + x86_lcpu_t *(*GetLCPU)(int cpu); + x86_core_t *(*GetCore)(int cpu); + x86_die_t *(*GetDie)(int cpu); + x86_pkg_t *(*GetPackage)(int cpu); + x86_lcpu_t *(*GetMyLCPU)(void); + x86_core_t *(*GetMyCore)(void); + x86_die_t *(*GetMyDie)(void); + x86_pkg_t *(*GetMyPackage)(void); + x86_pkg_t *(*GetPkgRoot)(void); + void (*LockCPUTopology)(int lock); + boolean_t (*GetHibernate)(int cpu); + processor_t (*LCPUtoProcessor)(int lcpu); + processor_t (*ThreadBind)(processor_t proc); + uint32_t (*GetSavedRunCount)(void); + void (*pmSendIPI)(int cpu); + void (*GetNanotimeInfo)(pm_rtc_nanotime_t *); + int (*ThreadGetUrgency)(uint64_t *rt_period, uint64_t *rt_deadline); + uint32_t (*timerQueueMigrate)(int cpu); + void (*RTCClockAdjust)(uint64_t adjustment); + x86_topology_parameters_t *topoParms; + boolean_t (*InterruptPending)(void); + boolean_t (*IsInterrupting)(uint8_t vector); + void (*InterruptStats)(uint64_t intrs[256]); + void (*DisableApicTimer)(void); } pmCallBacks_t; -extern pmDispatch_t *pmDispatch; +extern pmDispatch_t *pmDispatch; void power_management_init(void); void pmKextRegister(uint32_t version, pmDispatch_t *cpuFuncs, - pmCallBacks_t *callbacks); + pmCallBacks_t *callbacks); void pmUnRegister(pmDispatch_t *cpuFuncs); void pmCPUStateInit(void); uint64_t pmCPUGetDeadline(struct cpu_data *cpu); @@ -143,22 +144,22 @@ kern_return_t pmCPUExitHalt(int cpu); kern_return_t pmCPUExitHaltToOff(int cpu); uint32_t pmTimerQueueMigrate(int); -#define PM_HALT_NORMAL 0 /* normal halt path */ -#define PM_HALT_DEBUG 1 /* debug code wants to halt */ -#define PM_HALT_PANIC 2 /* panic code wants to halt */ -#define PM_HALT_SLEEP 3 /* sleep code wants to halt */ +#define PM_HALT_NORMAL 0 /* normal halt path */ +#define PM_HALT_DEBUG 1 /* debug code wants to halt */ +#define PM_HALT_PANIC 2 /* panic code wants to halt */ +#define PM_HALT_SLEEP 3 /* sleep code wants to halt */ void pmSafeMode(x86_lcpu_t *lcpu, uint32_t flags); -#define PM_SAFE_FL_NORMAL 0x00000001 /* put CPU into "normal" power mode */ -#define PM_SAFE_FL_SAFE 0x00000002 /* put CPU into a "safe" power mode */ -#define PM_SAFE_FL_PAUSE 0x00000010 /* pause execution on the CPU */ -#define PM_SAFE_FL_RESUME 0x00000020 /* resume execution on the CPU */ +#define PM_SAFE_FL_NORMAL 0x00000001 /* put CPU into "normal" power mode */ +#define PM_SAFE_FL_SAFE 0x00000002 /* put CPU into a "safe" power mode */ +#define PM_SAFE_FL_PAUSE 0x00000010 /* pause execution on the CPU */ +#define PM_SAFE_FL_RESUME 0x00000020 /* resume execution on the CPU */ extern int pmsafe_debug; /* Default urgency timing threshold for the DEBUG build */ -#define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000) -extern uint64_t urgency_notification_assert_abstime_threshold; +#define URGENCY_NOTIFICATION_ASSERT_NS (5 * 1000 * 1000) +extern uint64_t urgency_notification_assert_abstime_threshold; x86_lcpu_t * pmGetLogicalCPU(int cpu); @@ -171,10 +172,10 @@ pmGetPkgRoot(void); /****************************************************************************** - * - * All of the following are deprecated interfaces and no longer used. - * - ******************************************************************************/ +* +* All of the following are deprecated interfaces and no longer used. +* +******************************************************************************/ #endif /* ASSEMBLER */ diff --git a/osfmk/i386/pmap.h b/osfmk/i386/pmap.h index 076b69aa3..bd932f8e2 100644 --- a/osfmk/i386/pmap.h +++ b/osfmk/i386/pmap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,24 +32,24 @@ * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -65,10 +65,10 @@ * Machine-dependent structures for the physical map module. */ #ifdef KERNEL_PRIVATE -#ifndef _PMAP_MACHINE_ -#define _PMAP_MACHINE_ 1 +#ifndef _PMAP_MACHINE_ +#define _PMAP_MACHINE_ 1 -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include @@ -88,18 +88,18 @@ * Define the generic in terms of the specific */ -#define INTEL_PGBYTES I386_PGBYTES -#define INTEL_PGSHIFT I386_PGSHIFT -#define intel_btop(x) i386_btop(x) -#define intel_ptob(x) i386_ptob(x) -#define intel_round_page(x) i386_round_page(x) -#define intel_trunc_page(x) i386_trunc_page(x) +#define INTEL_PGBYTES I386_PGBYTES +#define INTEL_PGSHIFT I386_PGSHIFT +#define intel_btop(x) i386_btop(x) +#define intel_ptob(x) i386_ptob(x) +#define intel_round_page(x) i386_round_page(x) +#define intel_trunc_page(x) i386_trunc_page(x) /* * i386/i486/i860 Page Table Entry */ -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ #define NPGPTD 4ULL #define PDESHIFT 21ULL @@ -108,13 +108,13 @@ #define PTESHIFT 12ULL -#define LOW_4GB_MASK ((vm_offset_t)0x00000000FFFFFFFFUL) +#define LOW_4GB_MASK ((vm_offset_t)0x00000000FFFFFFFFUL) -#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ -#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ +#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ +#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ -#define INTEL_OFFMASK (I386_PGBYTES - 1) -#define INTEL_LOFFMASK (I386_LPGBYTES - 1) +#define INTEL_OFFMASK (I386_PGBYTES - 1) +#define INTEL_LOFFMASK (I386_LPGBYTES - 1) #define PG_FRAME 0x000FFFFFFFFFF000ULL #define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t))) #define NPTDPG (PAGE_SIZE/(sizeof (pd_entry_t))) @@ -125,9 +125,9 @@ #define NBPDE (1ULL << PDESHIFT) #define PDEMASK (NBPDE - 1) -#define PTE_PER_PAGE 512 /* number of PTE's per page on any level */ +#define PTE_PER_PAGE 512 /* number of PTE's per page on any level */ - /* cleanly define parameters for all the page table levels */ +/* cleanly define parameters for all the page table levels */ typedef uint64_t pml4_entry_t; #define NPML4PG (PAGE_SIZE/(sizeof (pml4_entry_t))) #define PML4SHIFT 39 @@ -158,30 +158,30 @@ typedef uint64_t pt_entry_t; #define PTPGSHIFT 9 #define NBPT (1ULL << PTSHIFT) #define PTMASK (NBPT-1) -#define PT_ENTRY_NULL ((pt_entry_t *) 0) +#define PT_ENTRY_NULL ((pt_entry_t *) 0) typedef uint64_t pmap_paddr_t; -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG #define PMAP_ASSERT 1 extern int pmap_asserts_enabled; extern int pmap_asserts_traced; #endif #if PMAP_ASSERT -#define pmap_assert(ex) (pmap_asserts_enabled ? ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex)) : (void)0) - -#define pmap_assert2(ex, fmt, args...) \ - do { \ - if (__improbable(pmap_asserts_enabled && !(ex))) { \ - if (pmap_asserts_traced) { \ - KERNEL_DEBUG_CONSTANT(0xDEAD1000, __builtin_return_address(0), __LINE__, 0, 0, 0); \ - kdebug_enable = 0; \ - } else { \ - kprintf("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE__, __LINE__, __builtin_return_address(0), ##args); \ - panic("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE__, __LINE__, __builtin_return_address(0), ##args); \ - } \ - } \ +#define pmap_assert(ex) (pmap_asserts_enabled ? ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex)) : (void)0) + +#define pmap_assert2(ex, fmt, args...) \ + do { \ + if (__improbable(pmap_asserts_enabled && !(ex))) { \ + if (pmap_asserts_traced) { \ + KERNEL_DEBUG_CONSTANT(0xDEAD1000, __builtin_return_address(0), __LINE__, 0, 0, 0); \ + kdebug_enable = 0; \ + } else { \ + kprintf("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE__, __LINE__, __builtin_return_address(0), ##args); \ + panic("Assertion %s failed (%s:%d, caller %p) " fmt , #ex, __FILE__, __LINE__, __builtin_return_address(0), ##args); \ + } \ + } \ } while(0) #else #define pmap_assert(ex) @@ -210,51 +210,47 @@ pmap_store_pte(pt_entry_t *entryp, pt_entry_t value) #define NPDEPGS (NPDPTPGS * (PAGE_SIZE/(sizeof (pd_entry_t)))) #define NPTEPGS (NPDEPGS * (PAGE_SIZE/(sizeof (pt_entry_t)))) -#define KERNEL_PML4_INDEX 511 -#define KERNEL_KEXTS_INDEX 510 /* Home of KEXTs - the basement */ -#define KERNEL_PHYSMAP_PML4_INDEX 509 /* virtual to physical map */ -#define KERNEL_KASAN_PML4_INDEX0 508 -#define KERNEL_KASAN_PML4_INDEX1 507 -#define KERNEL_DBLMAP_PML4_INDEX (506) -#define KERNEL_BASE (0ULL - NBPML4) -#define KERNEL_BASEMENT (KERNEL_BASE - NBPML4) - -#define VM_WIMG_COPYBACK VM_MEM_COHERENT -#define VM_WIMG_COPYBACKLW VM_WIMG_COPYBACK -#define VM_WIMG_DEFAULT VM_MEM_COHERENT +extern int kernPhysPML4Index; +extern int kernPhysPML4EntryCount; + +#define KERNEL_PML4_INDEX 511 +#define KERNEL_KEXTS_INDEX (KERNEL_PML4_INDEX - 1) /* 510: Home of KEXTs - the basement */ +#define KERNEL_PHYSMAP_PML4_INDEX (kernPhysPML4Index) /* 50X: virtual to physical map */ +#define KERNEL_PHYSMAP_PML4_COUNT (kernPhysPML4EntryCount) +#define KERNEL_PHYSMAP_PML4_COUNT_MAX (16 - 2) /* 1 for KERNEL, 1 for BASEMENT */ +/* 2 PML4s for KASAN to cover a maximum of 16 PML4s {PHYSMAP + BASEMENT + KVA} */ +#define KERNEL_KASAN_PML4_LAST (495) /* 511 - 16 */ +#define KERNEL_KASAN_PML4_FIRST (494) /* 511 - 17 */ +#define KERNEL_DBLMAP_PML4_INDEX (KERNEL_KASAN_PML4_FIRST - 1) +#define KERNEL_PML4_COUNT 1 +#define KERNEL_BASE (0ULL - (NBPML4 * KERNEL_PML4_COUNT)) +#define KERNEL_BASEMENT (KERNEL_BASE - NBPML4) /* Basement uses one PML4 entry */ + +#define VM_WIMG_COPYBACK VM_MEM_COHERENT +#define VM_WIMG_COPYBACKLW VM_WIMG_COPYBACK +#define VM_WIMG_DEFAULT VM_MEM_COHERENT /* ?? intel ?? */ -#define VM_WIMG_IO (VM_MEM_COHERENT | \ - VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) -#define VM_WIMG_POSTED VM_WIMG_IO -#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) +#define VM_WIMG_IO (VM_MEM_COHERENT | \ + VM_MEM_NOT_CACHEABLE | VM_MEM_GUARDED) +#define VM_WIMG_POSTED VM_WIMG_IO +#define VM_WIMG_WTHRU (VM_MEM_WRITE_THROUGH | VM_MEM_COHERENT | VM_MEM_GUARDED) /* write combining mode, aka store gather */ -#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) -#define VM_WIMG_INNERWBACK VM_MEM_COHERENT +#define VM_WIMG_WCOMB (VM_MEM_NOT_CACHEABLE | VM_MEM_COHERENT) +#define VM_WIMG_INNERWBACK VM_MEM_COHERENT /* * Pte related macros */ -#define KVADDR(pmi, pdpi, pdi, pti) \ - ((vm_offset_t) \ - ((uint64_t) -1 << 47) | \ - ((uint64_t)(pmi) << PML4SHIFT) | \ - ((uint64_t)(pdpi) << PDPTSHIFT) | \ - ((uint64_t)(pdi) << PDESHIFT) | \ - ((uint64_t)(pti) << PTESHIFT)) +#define KVADDR(pmi, pdpi, pdi, pti) \ + ((vm_offset_t) \ + ((uint64_t) -1 << 47) | \ + ((uint64_t)(pmi) << PML4SHIFT) | \ + ((uint64_t)(pdpi) << PDPTSHIFT) | \ + ((uint64_t)(pdi) << PDESHIFT) | \ + ((uint64_t)(pti) << PTESHIFT)) -/* - * Size of Kernel address space. This is the number of page table pages - * (4MB each) to use for the kernel. 256 pages == 1 Gigabyte. - * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc). - */ -#ifndef KVA_PAGES -#define KVA_PAGES 1024 -#endif #ifndef NKPT -#define NKPT 500 /* actual number of kernel page tables */ -#endif -#ifndef NKPDE -#define NKPDE (KVA_PAGES - 1) /* addressable number of page tables/pde's */ +#define NKPT 500 /* actual number of bootstrap kernel page tables */ #endif @@ -263,7 +259,7 @@ pmap_store_pte(pt_entry_t *entryp, pt_entry_t value) * Convert address offset to page descriptor index */ #define pdptnum(pmap, a) (((vm_offset_t)(a) >> PDPTSHIFT) & PDPTMASK) -#define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK) +#define pdenum(pmap, a) (((vm_offset_t)(a) >> PDESHIFT) & PDEMASK) #define PMAP_INVALID_PDPTNUM (~0ULL) #define pdeidx(pmap, a) (((a) >> PDSHIFT) & ((1ULL<<(48 - PDSHIFT)) -1)) @@ -274,79 +270,120 @@ pmap_store_pte(pt_entry_t *entryp, pt_entry_t value) /* * Convert page descriptor index to user virtual address */ -#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) +#define pdetova(a) ((vm_offset_t)(a) << PDESHIFT) /* * Convert address offset to page table index */ -#define ptenum(a) (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK) +#define ptenum(a) (((vm_offset_t)(a) >> PTESHIFT) & PTEMASK) /* * Hardware pte bit definitions (to be used directly on the ptes * without using the bit fields). */ -#define INTEL_PTE_VALID 0x00000001ULL -#define INTEL_PTE_WRITE 0x00000002ULL -#define INTEL_PTE_RW 0x00000002ULL -#define INTEL_PTE_USER 0x00000004ULL -#define INTEL_PTE_WTHRU 0x00000008ULL -#define INTEL_PTE_NCACHE 0x00000010ULL -#define INTEL_PTE_REF 0x00000020ULL -#define INTEL_PTE_MOD 0x00000040ULL -#define INTEL_PTE_PS 0x00000080ULL -#define INTEL_PTE_PTA 0x00000080ULL -#define INTEL_PTE_GLOBAL 0x00000100ULL -#define INTEL_PTE_WIRED 0x00000400ULL -#define INTEL_PDPTE_NESTED 0x00000800ULL -#define INTEL_PTE_PFN PG_FRAME - -#define INTEL_PTE_NX (1ULL << 63) +#define INTEL_PTE_VALID 0x00000001ULL + +#define INTEL_PTE_WRITE 0x00000002ULL +#define INTEL_PTE_RW 0x00000002ULL + +#define INTEL_PTE_USER 0x00000004ULL + +#define INTEL_PTE_WTHRU 0x00000008ULL +#define INTEL_PTE_NCACHE 0x00000010ULL + +#define INTEL_PTE_REF 0x00000020ULL +#define INTEL_PTE_MOD 0x00000040ULL + +#define INTEL_PTE_PS 0x00000080ULL +#define INTEL_PTE_PAT 0x00000080ULL + +#define INTEL_PTE_GLOBAL 0x00000100ULL + +/* These markers use software available bits ignored by the + * processor's 4-level and EPT pagetable walkers. + * N.B.: WIRED was originally bit 10, but that conflicts with + * execute permissions for EPT entries iff mode-based execute controls + * are enabled. + */ +#define INTEL_PTE_SWLOCK (0x1ULL << 52) +#define INTEL_PDPTE_NESTED (0x1ULL << 53) +#define INTEL_PTE_WIRED (0x1ULL << 54) +/* TODO: Compressed markers, potential conflict with protection keys? */ +#define INTEL_PTE_COMPRESSED_ALT (1ULL << 61) /* compressed but with "alternate accounting" */ +#define INTEL_PTE_COMPRESSED (1ULL << 62) /* marker, for invalid PTE only -- ignored by hardware for both regular/EPT entries*/ + +#define INTEL_PTE_PFN PG_FRAME +/* TODO: these should be internal definitions */ +#define INTEL_PTE_NX (1ULL << 63) #define INTEL_PTE_INVALID 0 /* This is conservative, but suffices */ -#define INTEL_PTE_RSVD ((1ULL << 10) | (1ULL << 11) | (0x1FFULL << 54)) +#define INTEL_PTE_RSVD ((1ULL << 10) | (1ULL << 11)) -#define INTEL_PTE_COMPRESSED (1ULL << 62) /* marker, for invalid PTE only -- ignored by hardware for both regular/EPT entries*/ -#define INTEL_PTE_COMPRESSED_ALT (1ULL << 61) /* compressed but with "alternate accounting" */ #define INTEL_PTE_COMPRESSED_MASK (INTEL_PTE_COMPRESSED | \ - INTEL_PTE_COMPRESSED_ALT) -#define PTE_IS_COMPRESSED(x) \ - ((((x) & INTEL_PTE_VALID) == 0) && /* PTE is not valid... */ \ + INTEL_PTE_COMPRESSED_ALT | INTEL_PTE_SWLOCK) +#define PTE_IS_COMPRESSED(x, ptep) \ + ((((x) & INTEL_PTE_VALID) == 0) && /* PTE is not valid... */ \ ((x) & INTEL_PTE_COMPRESSED) && /* ...has "compressed" marker" */ \ ((!((x) & ~INTEL_PTE_COMPRESSED_MASK)) || /* ...no other bits */ \ - (panic("compressed PTE %p 0x%llx has extra bits 0x%llx: corrupted?", \ - &(x), (x), (x) & ~INTEL_PTE_COMPRESSED_MASK), FALSE))) + (panic_compressed_pte_corrupt((x), &(x), (ptep)), FALSE))) -#define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */ -#define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */ -#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) +static inline void +panic_compressed_pte_corrupt(uint64_t pte, uint64_t *pte_addr, uint64_t *ptep) +{ + uint64_t *adj_pteps[2]; + int pteidx = ((uintptr_t)ptep & INTEL_OFFMASK) / sizeof(pt_entry_t); + /* + * Grab pointers to PTEs on either side of the PTE in question, unless we're at the start of + * a PT (grab pointers to the next and next-next PTEs) or the end of a PT (grab the previous + * 2 PTEs). + */ + if (pteidx == 0) { + adj_pteps[0] = ptep + 1; + adj_pteps[1] = ptep + 2; + } else if (pteidx == (NPTPG - 1)) { + adj_pteps[0] = ptep - 2; + adj_pteps[1] = ptep - 1; + } else { + adj_pteps[0] = ptep - 1; + adj_pteps[1] = ptep + 1; + } + + panic("compressed PTE %p 0x%llx has extra bits 0x%llx: corrupted? Adjacent PTEs: 0x%llx@%p, 0x%llx@%p", + pte_addr, pte, pte & ~INTEL_PTE_COMPRESSED_MASK, *adj_pteps[0], adj_pteps[0], *adj_pteps[1], adj_pteps[1]); + /*NOTREACHED*/ +} + +#define pa_to_pte(a) ((a) & INTEL_PTE_PFN) /* XXX */ +#define pte_to_pa(p) ((p) & INTEL_PTE_PFN) /* XXX */ +#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1) #define pte_kernel_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_RW)) #define pte_kernel_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID)) #define pte_user_rw(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER|INTEL_PTE_RW)) #define pte_user_ro(p) ((pt_entry_t)(pa_to_pte(p) | INTEL_PTE_VALID|INTEL_PTE_USER)) -#define PMAP_INVEPT_SINGLE_CONTEXT 1 +#define PMAP_INVEPT_SINGLE_CONTEXT 1 -#define INTEL_EPTP_AD 0x00000040ULL +#define INTEL_EPTP_AD 0x00000040ULL -#define INTEL_EPT_READ 0x00000001ULL -#define INTEL_EPT_WRITE 0x00000002ULL -#define INTEL_EPT_EX 0x00000004ULL -#define INTEL_EPT_IPTA 0x00000040ULL -#define INTEL_EPT_PS 0x00000080ULL -#define INTEL_EPT_REF 0x00000100ULL -#define INTEL_EPT_MOD 0x00000200ULL +#define INTEL_EPT_READ 0x00000001ULL +#define INTEL_EPT_WRITE 0x00000002ULL +#define INTEL_EPT_EX 0x00000004ULL +#define INTEL_EPT_IPAT 0x00000040ULL +#define INTEL_EPT_PS 0x00000080ULL +#define INTEL_EPT_REF 0x00000100ULL +#define INTEL_EPT_MOD 0x00000200ULL -#define INTEL_EPT_CACHE_MASK 0x00000038ULL -#define INTEL_EPT_NCACHE 0x00000000ULL -#define INTEL_EPT_WC 0x00000008ULL -#define INTEL_EPT_WTHRU 0x00000020ULL -#define INTEL_EPT_WP 0x00000028ULL -#define INTEL_EPT_WB 0x00000030ULL +#define INTEL_EPT_CACHE_MASK 0x00000038ULL +#define INTEL_EPT_NCACHE 0x00000000ULL +#define INTEL_EPT_WC 0x00000008ULL +#define INTEL_EPT_WTHRU 0x00000020ULL +#define INTEL_EPT_WP 0x00000028ULL +#define INTEL_EPT_WB 0x00000030ULL /* * Routines to filter correct bits depending on the pmap type @@ -356,20 +393,20 @@ static inline pt_entry_t pte_remove_ex(pt_entry_t pte, boolean_t is_ept) { if (__probable(!is_ept)) { - return (pte | INTEL_PTE_NX); + return pte | INTEL_PTE_NX; } - return (pte & (~INTEL_EPT_EX)); + return pte & (~INTEL_EPT_EX); } static inline pt_entry_t pte_set_ex(pt_entry_t pte, boolean_t is_ept) { if (__probable(!is_ept)) { - return (pte & (~INTEL_PTE_NX)); + return pte & (~INTEL_PTE_NX); } - return (pte | INTEL_EPT_EX); + return pte | INTEL_EPT_EX; } static inline pt_entry_t @@ -413,30 +450,30 @@ ept_refmod_to_physmap(pt_entry_t ept_pte) */ extern boolean_t pmap_ept_support_ad; -#define PTE_VALID_MASK(is_ept) ((is_ept) ? (INTEL_EPT_READ | INTEL_EPT_WRITE | INTEL_EPT_EX) : INTEL_PTE_VALID) -#define PTE_READ(is_ept) ((is_ept) ? INTEL_EPT_READ : INTEL_PTE_VALID) -#define PTE_WRITE(is_ept) ((is_ept) ? INTEL_EPT_WRITE : INTEL_PTE_WRITE) -#define PTE_PS INTEL_PTE_PS -#define PTE_COMPRESSED INTEL_PTE_COMPRESSED -#define PTE_COMPRESSED_ALT INTEL_PTE_COMPRESSED_ALT -#define PTE_NCACHE(is_ept) ((is_ept) ? INTEL_EPT_NCACHE : INTEL_PTE_NCACHE) -#define PTE_WTHRU(is_ept) ((is_ept) ? INTEL_EPT_WTHRU : INTEL_PTE_WTHRU) -#define PTE_REF(is_ept) ((is_ept) ? INTEL_EPT_REF : INTEL_PTE_REF) -#define PTE_MOD(is_ept) ((is_ept) ? INTEL_EPT_MOD : INTEL_PTE_MOD) -#define PTE_WIRED INTEL_PTE_WIRED - - -#define PMAP_DEFAULT_CACHE 0 -#define PMAP_INHIBIT_CACHE 1 -#define PMAP_GUARDED_CACHE 2 -#define PMAP_ACTIVATE_CACHE 4 -#define PMAP_NO_GUARD_CACHE 8 +#define PTE_VALID_MASK(is_ept) ((is_ept) ? (INTEL_EPT_READ | INTEL_EPT_WRITE | INTEL_EPT_EX) : INTEL_PTE_VALID) +#define PTE_READ(is_ept) ((is_ept) ? INTEL_EPT_READ : INTEL_PTE_VALID) +#define PTE_WRITE(is_ept) ((is_ept) ? INTEL_EPT_WRITE : INTEL_PTE_WRITE) +#define PTE_PS INTEL_PTE_PS +#define PTE_COMPRESSED INTEL_PTE_COMPRESSED +#define PTE_COMPRESSED_ALT INTEL_PTE_COMPRESSED_ALT +#define PTE_NCACHE(is_ept) ((is_ept) ? INTEL_EPT_NCACHE : INTEL_PTE_NCACHE) +#define PTE_WTHRU(is_ept) ((is_ept) ? INTEL_EPT_WTHRU : INTEL_PTE_WTHRU) +#define PTE_REF(is_ept) ((is_ept) ? INTEL_EPT_REF : INTEL_PTE_REF) +#define PTE_MOD(is_ept) ((is_ept) ? INTEL_EPT_MOD : INTEL_PTE_MOD) +#define PTE_WIRED INTEL_PTE_WIRED + + +#define PMAP_DEFAULT_CACHE 0 +#define PMAP_INHIBIT_CACHE 1 +#define PMAP_GUARDED_CACHE 2 +#define PMAP_ACTIVATE_CACHE 4 +#define PMAP_NO_GUARD_CACHE 8 /* Per-pmap ledger operations */ -#define pmap_ledger_debit(p, e, a) ledger_debit((p)->ledger, e, a) -#define pmap_ledger_credit(p, e, a) ledger_credit((p)->ledger, e, a) +#define pmap_ledger_debit(p, e, a) ledger_debit((p)->ledger, e, a) +#define pmap_ledger_credit(p, e, a) ledger_credit((p)->ledger, e, a) -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include @@ -445,53 +482,61 @@ extern boolean_t pmap_ept_support_ad; * and directories. */ -extern pt_entry_t *PTmap; -extern pdpt_entry_t *IdlePDPT; -extern pml4_entry_t *IdlePML4; -extern boolean_t no_shared_cr3; -extern pd_entry_t *IdlePTD; /* physical addr of "Idle" state PTD */ +extern pt_entry_t *PTmap; +extern pdpt_entry_t *IdlePDPT; +extern pml4_entry_t *IdlePML4; +extern boolean_t no_shared_cr3; +extern pd_entry_t *IdlePTD; /* physical addr of "Idle" state PTD */ -extern uint64_t pmap_pv_hashlist_walks; -extern uint64_t pmap_pv_hashlist_cnts; -extern uint32_t pmap_pv_hashlist_max; -extern uint32_t pmap_kernel_text_ps; +extern uint64_t pmap_pv_hashlist_walks; +extern uint64_t pmap_pv_hashlist_cnts; +extern uint32_t pmap_pv_hashlist_max; +extern uint32_t pmap_kernel_text_ps; -#define ID_MAP_VTOP(x) ((void *)(((uint64_t)(x)) & LOW_4GB_MASK)) +#define ID_MAP_VTOP(x) ((void *)(((uint64_t)(x)) & LOW_4GB_MASK)) -extern uint64_t physmap_base, physmap_max; +extern uint64_t physmap_base, physmap_max; -#define NPHYSMAP (MAX(K64_MAXMEM/GB + 4, 4)) +#define NPHYSMAP (MAX(((physmap_max - physmap_base) / GB), 4)) -static inline boolean_t physmap_enclosed(addr64_t a) { - return (a < (NPHYSMAP * GB)); +static inline boolean_t +physmap_enclosed(addr64_t a) +{ + return a < (NPHYSMAP * GB); } -static inline void * PHYSMAP_PTOV_check(void *paddr) { +static inline void * +PHYSMAP_PTOV_check(void *paddr) +{ uint64_t pvaddr = (uint64_t)paddr + physmap_base; - if (__improbable(pvaddr >= physmap_max)) + if (__improbable(pvaddr >= physmap_max)) { panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx", - pvaddr, physmap_base, physmap_max); + pvaddr, physmap_base, physmap_max); + } return (void *)pvaddr; } -#define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x))) +#define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x))) #if MACH_KERNEL_PRIVATE extern uint64_t dblmap_base, dblmap_max, dblmap_dist; -static inline uint64_t DBLMAP_CHECK(uintptr_t x) { +static inline uint64_t +DBLMAP_CHECK(uintptr_t x) +{ uint64_t dbladdr = (uint64_t)x + dblmap_dist; if (__improbable((dbladdr >= dblmap_max) || (dbladdr < dblmap_base))) { panic("DBLMAP bounds exceeded, 0x%qx, 0x%qx 0x%qx, 0x%qx", (uint64_t)x, dbladdr, dblmap_base, dblmap_max); } return dbladdr; - } #define DBLMAP(x) (DBLMAP_CHECK((uint64_t) x)) extern uint64_t ldt_alias_offset; -static inline uint64_t LDTALIAS_CHECK(uintptr_t x) { +static inline uint64_t +LDTALIAS_CHECK(uintptr_t x) +{ uint64_t dbladdr = (uint64_t)x + ldt_alias_offset; if (__improbable((dbladdr >= dblmap_max) || (dbladdr < dblmap_base))) { panic("LDTALIAS: bounds exceeded, 0x%qx, 0x%qx 0x%qx, 0x%qx", @@ -508,7 +553,7 @@ static inline uint64_t LDTALIAS_CHECK(uintptr_t x) { * And non-boot processor's GDT aliases likewise (skipping LOWGLOBAL_ALIAS) * The low global vector page is mapped at a fixed alias also. */ -#define LOWGLOBAL_ALIAS (VM_MIN_KERNEL_ADDRESS + 0x2000) +#define LOWGLOBAL_ALIAS (VM_MIN_KERNEL_ADDRESS + 0x2000) /* * This indicates (roughly) where there is free space for the VM @@ -516,6 +561,10 @@ static inline uint64_t LDTALIAS_CHECK(uintptr_t x) { */ #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS +#if MACH_KERNEL_PRIVATE +extern void +pmap_tlbi_range(uint64_t startv, uint64_t endv, bool global, uint16_t pcid); + #include /* @@ -525,29 +574,31 @@ static inline uint64_t LDTALIAS_CHECK(uintptr_t x) { */ struct pmap { - decl_simple_lock_data(,lock) /* lock on map */ - pmap_paddr_t pm_cr3; /* Kernel+user shared PML4 physical*/ - pmap_paddr_t pm_ucr3; /* Mirrored user PML4 physical */ - task_map_t pm_task_map; - boolean_t pm_shared; - boolean_t pagezero_accessible; -#define PMAP_PCID_MAX_CPUS MAX_CPUS /* Must be a multiple of 8 */ - pcid_t pmap_pcid_cpus[PMAP_PCID_MAX_CPUS]; - volatile uint8_t pmap_pcid_coherency_vector[PMAP_PCID_MAX_CPUS]; - struct pmap_statistics stats; /* map statistics */ - int ref_count; /* reference count */ - int nx_enabled; + lck_rw_t pmap_rwl __attribute((aligned(64))); + pmap_paddr_t pm_cr3 __attribute((aligned(64))); /* Kernel+user shared PML4 physical*/ + pmap_paddr_t pm_ucr3; /* Mirrored user PML4 physical */ pml4_entry_t *pm_pml4; /* VKA of top level */ pml4_entry_t *pm_upml4; /* Shadow VKA of top level */ + pmap_paddr_t pm_eptp; /* EPTP */ + task_map_t pm_task_map; + boolean_t pagezero_accessible; +#define PMAP_PCID_MAX_CPUS MAX_CPUS /* Must be a multiple of 8 */ + pcid_t pmap_pcid_cpus[PMAP_PCID_MAX_CPUS]; + volatile uint8_t pmap_pcid_coherency_vector[PMAP_PCID_MAX_CPUS]; + boolean_t pm_shared; vm_object_t pm_obj; /* object to hold pde's */ vm_object_t pm_obj_pdpt; /* holds pdpt pages */ vm_object_t pm_obj_pml4; /* holds pml4 pages */ - pmap_paddr_t pm_eptp; /* EPTP */ - ledger_t ledger; /* ledger tracking phys mappings */ +#if DEVELOPMENT || DEBUG + int nx_enabled; +#endif + int ref_count; + ledger_t ledger; /* ledger tracking phys mappings */ + struct pmap_statistics stats; /* map statistics */ #if MACH_ASSERT - boolean_t pmap_stats_assert; - int pmap_pid; - char pmap_procname[17]; + boolean_t pmap_stats_assert; + int pmap_pid; + char pmap_procname[17]; #endif /* MACH_ASSERT */ }; @@ -579,15 +630,15 @@ void hv_ept_pmap_create(void **ept_pmap, void **eptp); #define PMAP_NWINDOWS (PMAP_NWINDOWS_FIRSTFREE + PMAP_WINDOW_SIZE) typedef struct { - pt_entry_t *prv_CMAP; - caddr_t prv_CADDR; + pt_entry_t *prv_CMAP; + caddr_t prv_CADDR; } mapwindow_t; typedef struct cpu_pmap { - int pdpt_window_index; - int pde_window_index; - int pte_window_index; - mapwindow_t mapwindow[PMAP_NWINDOWS]; + int pdpt_window_index; + int pde_window_index; + int pte_window_index; + mapwindow_t mapwindow[PMAP_NWINDOWS]; } cpu_pmap_t; @@ -596,10 +647,10 @@ extern void pmap_put_mapwindow(mapwindow_t *map); #endif typedef struct pmap_memory_regions { - ppnum_t base; /* first page of this region */ - ppnum_t alloc_up; /* pages below this one have been "stolen" */ - ppnum_t alloc_down; /* pages above this one have been "stolen" */ - ppnum_t end; /* last page of this region */ + ppnum_t base; /* first page of this region */ + ppnum_t alloc_up; /* pages below this one have been "stolen" */ + ppnum_t alloc_down; /* pages above this one have been "stolen" */ + ppnum_t end; /* last page of this region */ uint32_t type; uint64_t attribute; } pmap_memory_region_t; @@ -613,11 +664,12 @@ extern pmap_memory_region_t pmap_memory_regions[]; #include static inline void -set_dirbase(pmap_t tpmap, thread_t thread, int my_cpu) { +set_dirbase(pmap_t tpmap, thread_t thread, int my_cpu) +{ int ccpu = my_cpu; uint64_t pcr3 = tpmap->pm_cr3, ucr3 = tpmap->pm_ucr3; cpu_datap(ccpu)->cpu_task_cr3 = pcr3; - cpu_shadowp(ccpu)->cpu_task_cr3 = pcr3; + cpu_shadowp(ccpu)->cpu_shadowtask_cr3 = pcr3; cpu_datap(ccpu)->cpu_ucr3 = ucr3; cpu_shadowp(ccpu)->cpu_ucr3 = ucr3; @@ -658,8 +710,9 @@ set_dirbase(pmap_t tpmap, thread_t thread, int my_cpu) { } } } else { - if (get_cr3_base() != cpu_datap(ccpu)->cpu_kernel_cr3) + if (get_cr3_base() != cpu_datap(ccpu)->cpu_kernel_cr3) { set_cr3_raw(cpu_datap(ccpu)->cpu_kernel_cr3); + } } } @@ -667,60 +720,59 @@ set_dirbase(pmap_t tpmap, thread_t thread, int my_cpu) { * External declarations for PMAP_ACTIVATE. */ -extern void process_pmap_updates(void); -extern void pmap_update_interrupt(void); - -extern addr64_t (kvtophys)( - vm_offset_t addr); - -extern kern_return_t pmap_expand( - pmap_t pmap, - vm_map_offset_t addr, - unsigned int options); -extern vm_offset_t pmap_map( - vm_offset_t virt, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t prot, - unsigned int flags); - -extern vm_offset_t pmap_map_bd( - vm_offset_t virt, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t prot, - unsigned int flags); -extern void pmap_bootstrap( - vm_offset_t load_start, - boolean_t IA32e); - -extern boolean_t pmap_valid_page( - ppnum_t pn); - -extern int pmap_list_resident_pages( - struct pmap *pmap, - vm_offset_t *listp, - int space); -extern void x86_filter_TLB_coherency_interrupts(boolean_t); +extern void pmap_update_interrupt(void); + +extern addr64_t(kvtophys)( + vm_offset_t addr); + +extern kern_return_t pmap_expand( + pmap_t pmap, + vm_map_offset_t addr, + unsigned int options); +extern vm_offset_t pmap_map( + vm_offset_t virt, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t prot, + unsigned int flags); + +extern vm_offset_t pmap_map_bd( + vm_offset_t virt, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t prot, + unsigned int flags); +extern void pmap_bootstrap( + vm_offset_t load_start, + boolean_t IA32e); + +extern boolean_t pmap_valid_page( + ppnum_t pn); + +extern int pmap_list_resident_pages( + struct pmap *pmap, + vm_offset_t *listp, + int space); +extern void x86_filter_TLB_coherency_interrupts(boolean_t); /* * Get cache attributes (as pagetable bits) for the specified phys page */ -extern unsigned pmap_get_cache_attributes(ppnum_t, boolean_t is_ept); +extern unsigned pmap_get_cache_attributes(ppnum_t, boolean_t is_ept); #if NCOPY_WINDOWS > 0 -extern struct cpu_pmap *pmap_cpu_alloc( - boolean_t is_boot_cpu); -extern void pmap_cpu_free( - struct cpu_pmap *cp); +extern struct cpu_pmap *pmap_cpu_alloc( + boolean_t is_boot_cpu); +extern void pmap_cpu_free( + struct cpu_pmap *cp); #endif -extern kern_return_t pmap_map_block( - pmap_t pmap, - addr64_t va, - ppnum_t pa, - uint32_t size, - vm_prot_t prot, - int attr, - unsigned int flags); +extern kern_return_t pmap_map_block( + pmap_t pmap, + addr64_t va, + ppnum_t pa, + uint32_t size, + vm_prot_t prot, + int attr, + unsigned int flags); extern void invalidate_icache(vm_offset_t addr, unsigned cnt, int phys); extern void flush_dcache(vm_offset_t addr, unsigned count, int phys); @@ -731,8 +783,8 @@ extern void pmap_disable_NX(pmap_t pmap); extern void pt_fake_zone_init(int); extern void pt_fake_zone_info(int *, vm_size_t *, vm_size_t *, vm_size_t *, vm_size_t *, - uint64_t *, int *, int *, int *); -extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__printflike(1,2)); + uint64_t *, int *, int *, int *); +extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__printflike(1, 2)); /* * Macros for speed. @@ -742,40 +794,40 @@ extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__pr #include -#define PMAP_ACTIVATE_MAP(map, thread, my_cpu) { \ - pmap_t tpmap; \ +#define PMAP_ACTIVATE_MAP(map, thread, my_cpu) { \ + pmap_t tpmap; \ \ - tpmap = vm_map_pmap(map); \ - set_dirbase(tpmap, thread, my_cpu); \ + tpmap = vm_map_pmap(map); \ + set_dirbase(tpmap, thread, my_cpu); \ } #if defined(__x86_64__) -#define PMAP_DEACTIVATE_MAP(map, thread, ccpu) \ +#define PMAP_DEACTIVATE_MAP(map, thread, ccpu) \ pmap_assert2((pmap_pcid_ncpus ? (pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu) == (get_cr3_raw() & 0xFFF)) : TRUE),"PCIDs: 0x%x, active PCID: 0x%x, CR3: 0x%lx, pmap_cr3: 0x%llx, kernel_cr3: 0x%llx, kernel pmap cr3: 0x%llx, CPU active PCID: 0x%x, CPU kernel PCID: 0x%x, specflags: 0x%x, pagezero: 0x%x", pmap_pcid_ncpus, pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu), get_cr3_raw(), map->pmap->pm_cr3, cpu_datap(ccpu)->cpu_kernel_cr3, kernel_pmap->pm_cr3, cpu_datap(ccpu)->cpu_active_pcid, cpu_datap(ccpu)->cpu_kernel_pcid, thread->machine.specFlags, map->pmap->pagezero_accessible); #else #define PMAP_DEACTIVATE_MAP(map, thread) #endif #if NCOPY_WINDOWS > 0 -#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ - spl_t spl; \ - \ - spl = splhigh(); \ - PMAP_DEACTIVATE_MAP(th->map, th); \ - th->map = new_map; \ - PMAP_ACTIVATE_MAP(th->map, th); \ - splx(spl); \ - inval_copy_windows(th); \ +#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ + spl_t spl; \ + \ + spl = splhigh(); \ + PMAP_DEACTIVATE_MAP(th->map, th); \ + th->map = new_map; \ + PMAP_ACTIVATE_MAP(th->map, th); \ + splx(spl); \ + inval_copy_windows(th); \ } #else -#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ - spl_t spl; \ - \ - spl = splhigh(); \ - PMAP_DEACTIVATE_MAP(th->map, th, my_cpu); \ - th->map = new_map; \ - PMAP_ACTIVATE_MAP(th->map, th, my_cpu); \ - splx(spl); \ +#define PMAP_SWITCH_USER(th, new_map, my_cpu) { \ + spl_t spl; \ + \ + spl = splhigh(); \ + PMAP_DEACTIVATE_MAP(th->map, th, my_cpu); \ + th->map = new_map; \ + PMAP_ACTIVATE_MAP(th->map, th, my_cpu); \ + splx(spl); \ } #endif @@ -786,19 +838,19 @@ extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__pr * are normally zero, modulo PCID. * We can only mark the current cpu active/inactive but we can test any cpu. */ -#define CPU_CR3_MARK_INACTIVE() \ +#define CPU_CR3_MARK_INACTIVE() \ current_cpu_datap()->cpu_active_cr3 |= 1 -#define CPU_CR3_MARK_ACTIVE() \ +#define CPU_CR3_MARK_ACTIVE() \ current_cpu_datap()->cpu_active_cr3 &= ~1 -#define CPU_CR3_IS_ACTIVE(cpu) \ +#define CPU_CR3_IS_ACTIVE(cpu) \ ((cpu_datap(cpu)->cpu_active_cr3 & 1) == 0) -#define CPU_GET_ACTIVE_CR3(cpu) \ +#define CPU_GET_ACTIVE_CR3(cpu) \ (cpu_datap(cpu)->cpu_active_cr3 & ~1) -#define CPU_GET_TASK_CR3(cpu) \ +#define CPU_GET_TASK_CR3(cpu) \ (cpu_datap(cpu)->cpu_task_cr3) /* @@ -808,66 +860,65 @@ extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__pr * but will queue the update request for when the cpu * becomes active. */ -#define MARK_CPU_IDLE(my_cpu) { \ - assert(ml_get_interrupts_enabled() == FALSE); \ - CPU_CR3_MARK_INACTIVE(); \ - mfence(); \ +#define MARK_CPU_IDLE(my_cpu) { \ + assert(ml_get_interrupts_enabled() == FALSE); \ + CPU_CR3_MARK_INACTIVE(); \ + mfence(); \ } -#define MARK_CPU_ACTIVE(my_cpu) { \ - assert(ml_get_interrupts_enabled() == FALSE); \ - /* \ - * If a kernel_pmap update was requested while this cpu \ - * was idle, process it as if we got the interrupt. \ - * Before doing so, remove this cpu from the idle set. \ - * Since we do not grab any pmap locks while we flush \ - * our TLB, another cpu may start an update operation \ - * before we finish. Removing this cpu from the idle \ - * set assures that we will receive another update \ - * interrupt if this happens. \ - */ \ - CPU_CR3_MARK_ACTIVE(); \ - mfence(); \ - \ - if (current_cpu_datap()->cpu_tlb_invalid) \ - process_pmap_updates(); \ +#define MARK_CPU_ACTIVE(my_cpu) { \ + assert(ml_get_interrupts_enabled() == FALSE); \ + /* \ + * If a kernel_pmap update was requested while this cpu \ + * was idle, process it as if we got the interrupt. \ + * Before doing so, remove this cpu from the idle set. \ + * Since we do not grab any pmap locks while we flush \ + * our TLB, another cpu may start an update operation \ + * before we finish. Removing this cpu from the idle \ + * set assures that we will receive another update \ + * interrupt if this happens. \ + */ \ + CPU_CR3_MARK_ACTIVE(); \ + mfence(); \ + pmap_update_interrupt(); \ } #define PMAP_CONTEXT(pmap, thread) -#define pmap_kernel_va(VA) \ - ((((vm_offset_t) (VA)) >= vm_min_kernel_address) && \ +#define pmap_kernel_va(VA) \ + ((((vm_offset_t) (VA)) >= vm_min_kernel_address) && \ (((vm_offset_t) (VA)) <= vm_max_kernel_address)) -#define pmap_compressed(pmap) ((pmap)->stats.compressed) -#define pmap_resident_count(pmap) ((pmap)->stats.resident_count) -#define pmap_resident_max(pmap) ((pmap)->stats.resident_max) -#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr) -#define pmap_attribute(pmap,addr,size,attr,value) \ - (KERN_INVALID_ADDRESS) -#define pmap_attribute_cache_sync(addr,size,attr,value) \ - (KERN_INVALID_ADDRESS) +#define pmap_compressed(pmap) ((pmap)->stats.compressed) +#define pmap_resident_count(pmap) ((pmap)->stats.resident_count) +#define pmap_resident_max(pmap) ((pmap)->stats.resident_max) +#define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) +#define pmap_attribute(pmap, addr, size, attr, value) \ + (KERN_INVALID_ADDRESS) +#define pmap_attribute_cache_sync(addr, size, attr, value) \ + (KERN_INVALID_ADDRESS) -#define MACHINE_PMAP_IS_EMPTY 1 -extern boolean_t pmap_is_empty(pmap_t pmap, - vm_map_offset_t start, - vm_map_offset_t end); +#define MACHINE_PMAP_IS_EMPTY 1 +extern boolean_t pmap_is_empty(pmap_t pmap, + vm_map_offset_t start, + vm_map_offset_t end); -#define MACHINE_BOOTSTRAPPTD 1 /* Static bootstrap page-tables */ +#define MACHINE_BOOTSTRAPPTD 1 /* Static bootstrap page-tables */ kern_return_t -pmap_permissions_verify(pmap_t, vm_map_t, vm_offset_t, vm_offset_t); + pmap_permissions_verify(pmap_t, vm_map_t, vm_offset_t, vm_offset_t); #if MACH_ASSERT extern int pmap_stats_assert; -#define PMAP_STATS_ASSERTF(args) \ - MACRO_BEGIN \ - if (pmap_stats_assert) assertf args; \ +#define PMAP_STATS_ASSERTF(args) \ + MACRO_BEGIN \ + if (pmap_stats_assert) assertf args; \ MACRO_END #else /* MACH_ASSERT */ #define PMAP_STATS_ASSERTF(args) #endif /* MACH_ASSERT */ -#endif /* ASSEMBLER */ -#endif /* _PMAP_MACHINE_ */ +#endif /* MACH_KERNEL_PRIVATE */ +#endif /* ASSEMBLER */ +#endif /* _PMAP_MACHINE_ */ #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/pmap_common.c b/osfmk/i386/pmap_common.c index 9436719d4..17c6e2947 100644 --- a/osfmk/i386/pmap_common.c +++ b/osfmk/i386/pmap_common.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -36,48 +36,60 @@ * address of the page they lock. */ -char *pv_lock_table; /* pointer to array of bits */ +char *pv_lock_table; /* pointer to array of bits */ char *pv_hash_lock_table; -pv_rooted_entry_t pv_head_table; /* array of entries, one per - * page */ -uint32_t pv_hashed_free_count = 0; -uint32_t pv_hashed_kern_free_count = 0; +pv_rooted_entry_t pv_head_table; /* array of entries, one per + * page */ +uint32_t pv_hashed_free_count = 0; +uint32_t pv_hashed_kern_free_count = 0; pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[PMAP_PAGETABLE_CORRUPTION_MAX_LOG]; uint32_t pmap_pagetable_corruption_incidents; uint64_t pmap_pagetable_corruption_last_abstime = (~(0ULL) >> 1); uint64_t pmap_pagetable_corruption_interval_abstime; -thread_call_t pmap_pagetable_corruption_log_call; -static thread_call_data_t pmap_pagetable_corruption_log_call_data; +thread_call_t pmap_pagetable_corruption_log_call; +static thread_call_data_t pmap_pagetable_corruption_log_call_data; boolean_t pmap_pagetable_corruption_timeout = FALSE; -volatile uint32_t mappingrecurse = 0; +volatile uint32_t mappingrecurse = 0; uint32_t pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark, pv_hashed_alloc_chunk, pv_hashed_kern_alloc_chunk; thread_t mapping_replenish_thread; -event_t mapping_replenish_event, pmap_user_pv_throttle_event; +event_t mapping_replenish_event, pmap_user_pv_throttle_event; uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters; int pmap_asserts_enabled = (DEBUG); int pmap_asserts_traced = 0; -unsigned int pmap_cache_attributes(ppnum_t pn) { - if (pmap_get_cache_attributes(pn, FALSE) & INTEL_PTE_NCACHE) - return (VM_WIMG_IO); - else - return (VM_WIMG_COPYBACK); +unsigned int +pmap_cache_attributes(ppnum_t pn) +{ + int cacheattr = pmap_get_cache_attributes(pn, FALSE); + + if (cacheattr & INTEL_PTE_NCACHE) { + if (cacheattr & INTEL_PTE_PAT) { + /* WC */ + return VM_WIMG_WCOMB; + } + return VM_WIMG_IO; + } else { + return VM_WIMG_COPYBACK; + } } -void pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr) { +void +pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr) +{ unsigned int current, template = 0; int pai; if (cacheattr & VM_MEM_NOT_CACHEABLE) { - if(!(cacheattr & VM_MEM_GUARDED)) - template |= PHYS_PTA; + if (!(cacheattr & VM_MEM_GUARDED)) { + template |= PHYS_PAT; + } template |= PHYS_NCACHE; } @@ -111,12 +123,16 @@ void pmap_set_cache_attributes(ppnum_t pn, unsigned int cacheattr) { } } -unsigned pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept) { - if (last_managed_page == 0) +unsigned +pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept) +{ + if (last_managed_page == 0) { return 0; + } - if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) - return PTE_NCACHE(is_ept); + if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) { + return PTE_NCACHE(is_ept); + } /* * The cache attributes are read locklessly for efficiency. @@ -127,22 +143,24 @@ unsigned pmap_get_cache_attributes(ppnum_t pn, boolean_t is_ept) { /* * The PTA bit is currently unsupported for EPT PTEs. */ - if ((attr & PHYS_PTA) && !is_ept) - template |= INTEL_PTE_PTA; + if ((attr & PHYS_PAT) && !is_ept) { + template |= INTEL_PTE_PAT; + } /* * If the page isn't marked as NCACHE, the default for EPT entries * is WB. */ - if (attr & PHYS_NCACHE) + if (attr & PHYS_NCACHE) { template |= PTE_NCACHE(is_ept); - else if (is_ept) + } else if (is_ept) { template |= INTEL_EPT_WB; + } return template; } -boolean_t +boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last) { ppnum_t pn, kdata_start, kdata_end; @@ -157,43 +175,46 @@ pmap_has_managed_page(ppnum_t first, ppnum_t last) kdata_start = atop_32(args->kaddr); kdata_end = atop_32(args->kaddr + args->ksize); - assert(last_managed_page); - assert(first <= last); + assert(last_managed_page); + assert(first <= last); - for (result = FALSE, pn = first; - !result - && (pn <= last) - && (pn <= last_managed_page); - pn++) - { - if ((pn >= kdata_start) && (pn < kdata_end)) continue; - result = (0 != (pmap_phys_attributes[pn] & PHYS_MANAGED)); - } + for (result = FALSE, pn = first; + !result + && (pn <= last) + && (pn <= last_managed_page); + pn++) { + if ((pn >= kdata_start) && (pn < kdata_end)) { + continue; + } + result = (0 != (pmap_phys_attributes[pn] & PHYS_MANAGED)); + } - return (result); + return result; } boolean_t pmap_is_noencrypt(ppnum_t pn) { - int pai; + int pai; pai = ppn_to_pai(pn); - if (!IS_MANAGED_PAGE(pai)) - return (FALSE); + if (!IS_MANAGED_PAGE(pai)) { + return FALSE; + } - if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) - return (TRUE); + if (pmap_phys_attributes[pai] & PHYS_NOENCRYPT) { + return TRUE; + } - return (FALSE); + return FALSE; } void pmap_set_noencrypt(ppnum_t pn) { - int pai; + int pai; pai = ppn_to_pai(pn); @@ -210,7 +231,7 @@ pmap_set_noencrypt(ppnum_t pn) void pmap_clear_noencrypt(ppnum_t pn) { - int pai; + int pai; pai = ppn_to_pai(pn); @@ -232,52 +253,55 @@ pmap_clear_noencrypt(ppnum_t pn) void compute_pmap_gc_throttle(void *arg __unused) { - } void pmap_lock_phys_page(ppnum_t pn) { - int pai; + int pai; pai = ppn_to_pai(pn); if (IS_MANAGED_PAGE(pai)) { LOCK_PVH(pai); - } else - simple_lock(&phys_backup_lock); + } else { + simple_lock(&phys_backup_lock, LCK_GRP_NULL); + } } void pmap_unlock_phys_page(ppnum_t pn) { - int pai; + int pai; pai = ppn_to_pai(pn); if (IS_MANAGED_PAGE(pai)) { UNLOCK_PVH(pai); - } else + } else { simple_unlock(&phys_backup_lock); + } } __private_extern__ void -pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1,2)) { +pmap_pagetable_corruption_msg_log(int (*log_func)(const char * fmt, ...)__printflike(1, 2)) +{ if (pmap_pagetable_corruption_incidents > 0) { int i, e = MIN(pmap_pagetable_corruption_incidents, PMAP_PAGETABLE_CORRUPTION_MAX_LOG); (*log_func)("%u pagetable corruption incident(s) detected, timeout: %u\n", pmap_pagetable_corruption_incidents, pmap_pagetable_corruption_timeout); for (i = 0; i < e; i++) { - (*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n", pmap_pagetable_corruption_records[i].incident, pmap_pagetable_corruption_records[i].reason, pmap_pagetable_corruption_records[i].action, pmap_pagetable_corruption_records[i].abstime); + (*log_func)("Incident 0x%x, reason: 0x%x, action: 0x%x, time: 0x%llx\n", pmap_pagetable_corruption_records[i].incident, pmap_pagetable_corruption_records[i].reason, pmap_pagetable_corruption_records[i].action, pmap_pagetable_corruption_records[i].abstime); } } } static inline void -pmap_pagetable_corruption_log_setup(void) { +pmap_pagetable_corruption_log_setup(void) +{ if (pmap_pagetable_corruption_log_call == NULL) { nanotime_to_absolutetime(PMAP_PAGETABLE_CORRUPTION_INTERVAL, 0, &pmap_pagetable_corruption_interval_abstime); thread_call_setup(&pmap_pagetable_corruption_log_call_data, @@ -290,11 +314,11 @@ pmap_pagetable_corruption_log_setup(void) { void mapping_free_prime(void) { - unsigned i; - pv_hashed_entry_t pvh_e; - pv_hashed_entry_t pvh_eh; - pv_hashed_entry_t pvh_et; - int pv_cnt; + unsigned i; + pv_hashed_entry_t pvh_e; + pv_hashed_entry_t pvh_eh; + pv_hashed_entry_t pvh_et; + int pv_cnt; /* Scale based on DRAM size */ pv_hashed_low_water_mark = MAX(PV_HASHED_LOW_WATER_MARK_DEFAULT, ((uint32_t)(sane_size >> 30)) * 2000); @@ -314,8 +338,9 @@ mapping_free_prime(void) pvh_e->qlink.next = (queue_entry_t)pvh_eh; pvh_eh = pvh_e; - if (pvh_et == PV_HASHED_ENTRY_NULL) - pvh_et = pvh_e; + if (pvh_et == PV_HASHED_ENTRY_NULL) { + pvh_et = pvh_e; + } pv_cnt++; } PV_HASHED_FREE_LIST(pvh_eh, pvh_et, pv_cnt); @@ -328,8 +353,9 @@ mapping_free_prime(void) pvh_e->qlink.next = (queue_entry_t)pvh_eh; pvh_eh = pvh_e; - if (pvh_et == PV_HASHED_ENTRY_NULL) - pvh_et = pvh_e; + if (pvh_et == PV_HASHED_ENTRY_NULL) { + pvh_et = pvh_e; + } pv_cnt++; } PV_HASHED_KERN_FREE_LIST(pvh_eh, pvh_et, pv_cnt); @@ -337,7 +363,9 @@ mapping_free_prime(void) void mapping_replenish(void); -void mapping_adjust(void) { +void +mapping_adjust(void) +{ kern_return_t mres; pmap_pagetable_corruption_log_setup(); @@ -349,7 +377,7 @@ void mapping_adjust(void) { thread_deallocate(mapping_replenish_thread); } -unsigned pmap_mapping_thread_wakeups; +unsigned pmap_mapping_thread_wakeups; unsigned pmap_kernel_reserve_replenish_stat; unsigned pmap_user_reserve_replenish_stat; unsigned pmap_kern_reserve_alloc_stat; @@ -358,17 +386,16 @@ __attribute__((noreturn)) void mapping_replenish(void) { - pv_hashed_entry_t pvh_e; - pv_hashed_entry_t pvh_eh; - pv_hashed_entry_t pvh_et; - int pv_cnt; - unsigned i; + pv_hashed_entry_t pvh_e; + pv_hashed_entry_t pvh_eh; + pv_hashed_entry_t pvh_et; + int pv_cnt; + unsigned i; /* We qualify for VM privileges...*/ current_thread()->options |= TH_OPT_VMPRIV; for (;;) { - while (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) { pv_cnt = 0; pvh_eh = pvh_et = PV_HASHED_ENTRY_NULL; @@ -378,8 +405,9 @@ mapping_replenish(void) pvh_e->qlink.next = (queue_entry_t)pvh_eh; pvh_eh = pvh_e; - if (pvh_et == PV_HASHED_ENTRY_NULL) + if (pvh_et == PV_HASHED_ENTRY_NULL) { pvh_et = pvh_e; + } pv_cnt++; } pmap_kernel_reserve_replenish_stat += pv_cnt; @@ -396,8 +424,9 @@ mapping_replenish(void) pvh_e->qlink.next = (queue_entry_t)pvh_eh; pvh_eh = pvh_e; - if (pvh_et == PV_HASHED_ENTRY_NULL) + if (pvh_et == PV_HASHED_ENTRY_NULL) { pvh_et = pvh_e; + } pv_cnt++; } pmap_user_reserve_replenish_stat += pv_cnt; @@ -412,8 +441,9 @@ mapping_replenish(void) /* Check if the kernel pool has been depleted since the * first pass, to reduce refill latency. */ - if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) + if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) { continue; + } /* Block sans continuation to avoid yielding kernel stack */ assert_wait(&mapping_replenish_event, THREAD_UNINT); mappingrecurse = 0; @@ -428,15 +458,16 @@ mapping_replenish(void) void phys_attribute_set( - ppnum_t pn, - int bits) + ppnum_t pn, + int bits) { - int pai; + int pai; pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); - if (pn == vm_page_guard_addr) + if (pn == vm_page_guard_addr) { return; + } pai = ppn_to_pai(pn); @@ -480,8 +511,9 @@ pmap_clear_modify(ppnum_t pn) boolean_t pmap_is_modified(ppnum_t pn) { - if (phys_attribute_test(pn, PHYS_MODIFIED)) + if (phys_attribute_test(pn, PHYS_MODIFIED)) { return TRUE; + } return FALSE; } @@ -514,8 +546,9 @@ pmap_set_reference(ppnum_t pn) boolean_t pmap_is_referenced(ppnum_t pn) { - if (phys_attribute_test(pn, PHYS_REFERENCED)) + if (phys_attribute_test(pn, PHYS_REFERENCED)) { return TRUE; + } return FALSE; } @@ -528,29 +561,31 @@ pmap_is_referenced(ppnum_t pn) unsigned int pmap_get_refmod(ppnum_t pn) { - int refmod; - unsigned int retval = 0; + int refmod; + unsigned int retval = 0; refmod = phys_attribute_test(pn, PHYS_MODIFIED | PHYS_REFERENCED); - if (refmod & PHYS_MODIFIED) - retval |= VM_MEM_MODIFIED; - if (refmod & PHYS_REFERENCED) - retval |= VM_MEM_REFERENCED; + if (refmod & PHYS_MODIFIED) { + retval |= VM_MEM_MODIFIED; + } + if (refmod & PHYS_REFERENCED) { + retval |= VM_MEM_REFERENCED; + } - return (retval); + return retval; } void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *arg) { - unsigned int x86Mask; + unsigned int x86Mask; - x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0) - | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0)); + x86Mask = (((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0) + | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0)); - phys_attribute_clear(pn, x86Mask, options, arg); + phys_attribute_clear(pn, x86Mask, options, arg); } /* @@ -563,8 +598,8 @@ pmap_clear_refmod(ppnum_t pn, unsigned int mask) { unsigned int x86Mask; - x86Mask = ( ((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0) - | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0)); + x86Mask = (((mask & VM_MEM_MODIFIED)? PHYS_MODIFIED : 0) + | ((mask & VM_MEM_REFERENCED)? PHYS_REFERENCED : 0)); phys_attribute_clear(pn, x86Mask, 0, NULL); } @@ -572,7 +607,7 @@ pmap_clear_refmod(ppnum_t pn, unsigned int mask) unsigned int pmap_disconnect(ppnum_t pa) { - return (pmap_disconnect_options(pa, 0, NULL)); + return pmap_disconnect_options(pa, 0, NULL); } /* @@ -589,17 +624,20 @@ pmap_disconnect_options(ppnum_t pa, unsigned int options, void *arg) { unsigned refmod, vmrefmod = 0; - pmap_page_protect_options(pa, 0, options, arg); /* disconnect the page */ + pmap_page_protect_options(pa, 0, options, arg); /* disconnect the page */ pmap_assert(pa != vm_page_fictitious_addr); - if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD)) + if ((pa == vm_page_guard_addr) || !IS_MANAGED_PAGE(pa) || (options & PMAP_OPTIONS_NOREFMOD)) { return 0; + } refmod = pmap_phys_attributes[pa] & (PHYS_MODIFIED | PHYS_REFERENCED); - - if (refmod & PHYS_MODIFIED) - vmrefmod |= VM_MEM_MODIFIED; - if (refmod & PHYS_REFERENCED) - vmrefmod |= VM_MEM_REFERENCED; + + if (refmod & PHYS_MODIFIED) { + vmrefmod |= VM_MEM_MODIFIED; + } + if (refmod & PHYS_REFERENCED) { + vmrefmod |= VM_MEM_REFERENCED; + } return vmrefmod; } diff --git a/osfmk/i386/pmap_internal.h b/osfmk/i386/pmap_internal.h index 1a7c75e32..abf263a1f 100644 --- a/osfmk/i386/pmap_internal.h +++ b/osfmk/i386/pmap_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _I386_PMAP_INTERNAL_ +#ifndef _I386_PMAP_INTERNAL_ #define _I386_PMAP_INTERNAL_ #ifdef MACH_KERNEL_PRIVATE @@ -41,65 +41,100 @@ * pmap locking */ -#define PMAP_LOCK(pmap) { \ - simple_lock(&(pmap)->lock); \ +static inline void +PMAP_LOCK_EXCLUSIVE(pmap_t p) +{ + mp_disable_preemption(); + lck_rw_lock_exclusive(&p->pmap_rwl); } -#define PMAP_UNLOCK(pmap) { \ - simple_unlock(&(pmap)->lock); \ +static inline void +PMAP_LOCK_SHARED(pmap_t p) +{ + mp_disable_preemption(); + lck_rw_lock_shared(&p->pmap_rwl); } -#define PMAP_UPDATE_TLBS(pmap, s, e) \ - pmap_flush_tlbs(pmap, s, e, 0, NULL) - +static inline void +PMAP_LOCK_SHARED_TO_EXCLUSIVE(pmap_t p) +{ + lck_rw_lock_shared_to_exclusive(&p->pmap_rwl); +} -#define PMAP_DELAY_TLB_FLUSH 0x01 +static inline void +PMAP_LOCK_EXCLUSIVE_TO_SHARED(pmap_t p) +{ + lck_rw_lock_exclusive_to_shared(&p->pmap_rwl); +} -#define PMAP_UPDATE_TLBS_DELAYED(pmap, s, e, c) \ - pmap_flush_tlbs(pmap, s, e, PMAP_DELAY_TLB_FLUSH, c) +static inline void +PMAP_UNLOCK_EXCLUSIVE(pmap_t p) +{ + lck_rw_unlock_exclusive(&p->pmap_rwl); + mp_enable_preemption(); +} +static inline void +PMAP_UNLOCK_SHARED(pmap_t p) +{ + lck_rw_unlock_shared(&p->pmap_rwl); + mp_enable_preemption(); +} -#define iswired(pte) ((pte) & INTEL_PTE_WIRED) +#define iswired(pte) ((pte) & INTEL_PTE_WIRED) -#ifdef PMAP_TRACES -extern boolean_t pmap_trace; +#ifdef PMAP_TRACES +extern boolean_t pmap_trace; #define PMAP_TRACE(...) \ if (pmap_trace) { \ - KDBG_RELEASE(__VA_ARGS__); \ + KDBG_RELEASE(__VA_ARGS__); \ } #else -#define PMAP_TRACE(...) KDBG_DEBUG(__VA_ARGS__) +#define PMAP_TRACE(...) KDBG_DEBUG(__VA_ARGS__) #endif /* PMAP_TRACES */ #define PMAP_TRACE_CONSTANT(...) KDBG_RELEASE(__VA_ARGS__) -kern_return_t pmap_expand_pml4( - pmap_t map, - vm_map_offset_t v, - unsigned int options); +kern_return_t pmap_expand_pml4( + pmap_t map, + vm_map_offset_t v, + unsigned int options); -kern_return_t pmap_expand_pdpt( - pmap_t map, - vm_map_offset_t v, - unsigned int options); +kern_return_t pmap_expand_pdpt( + pmap_t map, + vm_map_offset_t v, + unsigned int options); -void phys_attribute_set( - ppnum_t phys, - int bits); +void phys_attribute_set( + ppnum_t phys, + int bits); -void pmap_set_reference( - ppnum_t pn); +void pmap_set_reference( + ppnum_t pn); -boolean_t phys_page_exists( - ppnum_t pn); +boolean_t phys_page_exists( + ppnum_t pn); void -pmap_flush_tlbs(pmap_t, vm_map_offset_t, vm_map_offset_t, int, pmap_flush_context *); + pmap_flush_tlbs(pmap_t, vm_map_offset_t, vm_map_offset_t, int, pmap_flush_context *); void -pmap_update_cache_attributes_locked(ppnum_t, unsigned); + pmap_update_cache_attributes_locked(ppnum_t, unsigned); + + +static inline void +PMAP_UPDATE_TLBS(pmap_t fp, addr64_t s, addr64_t e) +{ + pmap_flush_tlbs(fp, s, e, 0, NULL); +} + +#define PMAP_DELAY_TLB_FLUSH 0x01 -extern const boolean_t cpu_64bit; +static inline void +PMAP_UPDATE_TLBS_DELAYED(pmap_t fp, addr64_t s, addr64_t e, pmap_flush_context *pfc) +{ + pmap_flush_tlbs(fp, s, e, PMAP_DELAY_TLB_FLUSH, pfc); +} /* * Private data structures. @@ -121,116 +156,116 @@ extern const boolean_t cpu_64bit; */ /* - -PV HASHING Changes - JK 1/2007 - -Pve's establish physical to virtual mappings. These are used for aliasing of a -physical page to (potentially many) virtual addresses within pmaps. In the -previous implementation the structure of the pv_entries (each 16 bytes in size) was - -typedef struct pv_entry { - struct pv_entry_t next; - pmap_t pmap; - vm_map_offset_t va; -} *pv_entry_t; - -An initial array of these is created at boot time, one per physical page of -memory, indexed by the physical page number. Additionally, a pool of entries -is created from a pv_zone to be used as needed by pmap_enter() when it is -creating new mappings. Originally, we kept this pool around because the code -in pmap_enter() was unable to block if it needed an entry and none were -available - we'd panic. Some time ago I restructured the pmap_enter() code -so that for user pmaps it can block while zalloc'ing a pv structure and restart, -removing a panic from the code (in the case of the kernel pmap we cannot block -and still panic, so, we keep a separate hot pool for use only on kernel pmaps). -The pool has not been removed since there is a large performance gain keeping -freed pv's around for reuse and not suffering the overhead of zalloc for every -new pv we need. - -As pmap_enter() created new mappings it linked the new pve's for them off the -fixed pv array for that ppn (off the next pointer). These pve's are accessed -for several operations, one of them being address space teardown. In that case, -we basically do this - - for (every page/pte in the space) { - calc pve_ptr from the ppn in the pte - for (every pv in the list for the ppn) { - if (this pv is for this pmap/vaddr) { - do housekeeping - unlink/free the pv - } - } - } - -The problem arose when we were running, say 8000 (or even 2000) apache or -other processes and one or all terminate. The list hanging off each pv array -entry could have thousands of entries. We were continuously linearly searching -each of these lists as we stepped through the address space we were tearing -down. Because of the locks we hold, likely taking a cache miss for each node, -and interrupt disabling for MP issues the system became completely unresponsive -for many seconds while we did this. - -Realizing that pve's are accessed in two distinct ways (linearly running the -list by ppn for operations like pmap_page_protect and finding and -modifying/removing a single pve as part of pmap_enter processing) has led to -modifying the pve structures and databases. - -There are now two types of pve structures. A "rooted" structure which is -basically the original structure accessed in an array by ppn, and a ''hashed'' -structure accessed on a hash list via a hash of [pmap, vaddr]. These have been -designed with the two goals of minimizing wired memory and making the lookup of -a ppn faster. Since a vast majority of pages in the system are not aliased -and hence represented by a single pv entry I've kept the rooted entry size as -small as possible because there is one of these dedicated for every physical -page of memory. The hashed pve's are larger due to the addition of the hash -link and the ppn entry needed for matching while running the hash list to find -the entry we are looking for. This way, only systems that have lots of -aliasing (like 2000+ httpd procs) will pay the extra memory price. Both -structures have the same first three fields allowing some simplification in -the code. - -They have these shapes - -typedef struct pv_rooted_entry { - queue_head_t qlink; - vm_map_offset_t va; - pmap_t pmap; -} *pv_rooted_entry_t; - - -typedef struct pv_hashed_entry { - queue_head_t qlink; - vm_map_offset_t va; - pmap_t pmap; - ppnum_t ppn; - struct pv_hashed_entry *nexth; -} *pv_hashed_entry_t; - -The main flow difference is that the code is now aware of the rooted entry and -the hashed entries. Code that runs the pv list still starts with the rooted -entry and then continues down the qlink onto the hashed entries. Code that is -looking up a specific pv entry first checks the rooted entry and then hashes -and runs the hash list for the match. The hash list lengths are much smaller -than the original pv lists that contained all aliases for the specific ppn. - -*/ + * + * PV HASHING Changes - JK 1/2007 + * + * Pve's establish physical to virtual mappings. These are used for aliasing of a + * physical page to (potentially many) virtual addresses within pmaps. In the + * previous implementation the structure of the pv_entries (each 16 bytes in size) was + * + * typedef struct pv_entry { + * struct pv_entry_t next; + * pmap_t pmap; + * vm_map_offset_t va; + * } *pv_entry_t; + * + * An initial array of these is created at boot time, one per physical page of + * memory, indexed by the physical page number. Additionally, a pool of entries + * is created from a pv_zone to be used as needed by pmap_enter() when it is + * creating new mappings. Originally, we kept this pool around because the code + * in pmap_enter() was unable to block if it needed an entry and none were + * available - we'd panic. Some time ago I restructured the pmap_enter() code + * so that for user pmaps it can block while zalloc'ing a pv structure and restart, + * removing a panic from the code (in the case of the kernel pmap we cannot block + * and still panic, so, we keep a separate hot pool for use only on kernel pmaps). + * The pool has not been removed since there is a large performance gain keeping + * freed pv's around for reuse and not suffering the overhead of zalloc for every + * new pv we need. + * + * As pmap_enter() created new mappings it linked the new pve's for them off the + * fixed pv array for that ppn (off the next pointer). These pve's are accessed + * for several operations, one of them being address space teardown. In that case, + * we basically do this + * + * for (every page/pte in the space) { + * calc pve_ptr from the ppn in the pte + * for (every pv in the list for the ppn) { + * if (this pv is for this pmap/vaddr) { + * do housekeeping + * unlink/free the pv + * } + * } + * } + * + * The problem arose when we were running, say 8000 (or even 2000) apache or + * other processes and one or all terminate. The list hanging off each pv array + * entry could have thousands of entries. We were continuously linearly searching + * each of these lists as we stepped through the address space we were tearing + * down. Because of the locks we hold, likely taking a cache miss for each node, + * and interrupt disabling for MP issues the system became completely unresponsive + * for many seconds while we did this. + * + * Realizing that pve's are accessed in two distinct ways (linearly running the + * list by ppn for operations like pmap_page_protect and finding and + * modifying/removing a single pve as part of pmap_enter processing) has led to + * modifying the pve structures and databases. + * + * There are now two types of pve structures. A "rooted" structure which is + * basically the original structure accessed in an array by ppn, and a ''hashed'' + * structure accessed on a hash list via a hash of [pmap, vaddr]. These have been + * designed with the two goals of minimizing wired memory and making the lookup of + * a ppn faster. Since a vast majority of pages in the system are not aliased + * and hence represented by a single pv entry I've kept the rooted entry size as + * small as possible because there is one of these dedicated for every physical + * page of memory. The hashed pve's are larger due to the addition of the hash + * link and the ppn entry needed for matching while running the hash list to find + * the entry we are looking for. This way, only systems that have lots of + * aliasing (like 2000+ httpd procs) will pay the extra memory price. Both + * structures have the same first three fields allowing some simplification in + * the code. + * + * They have these shapes + * + * typedef struct pv_rooted_entry { + * queue_head_t qlink; + * vm_map_offset_t va; + * pmap_t pmap; + * } *pv_rooted_entry_t; + * + * + * typedef struct pv_hashed_entry { + * queue_head_t qlink; + * vm_map_offset_t va; + * pmap_t pmap; + * ppnum_t ppn; + * struct pv_hashed_entry *nexth; + * } *pv_hashed_entry_t; + * + * The main flow difference is that the code is now aware of the rooted entry and + * the hashed entries. Code that runs the pv list still starts with the rooted + * entry and then continues down the qlink onto the hashed entries. Code that is + * looking up a specific pv entry first checks the rooted entry and then hashes + * and runs the hash list for the match. The hash list lengths are much smaller + * than the original pv lists that contained all aliases for the specific ppn. + * + */ typedef struct pv_rooted_entry { /* first three entries must match pv_hashed_entry_t */ - queue_head_t qlink; - vm_map_offset_t va_and_flags; /* virtual address for mapping */ - pmap_t pmap; /* pmap where mapping lies */ + queue_head_t qlink; + vm_map_offset_t va_and_flags; /* virtual address for mapping */ + pmap_t pmap; /* pmap where mapping lies */ } *pv_rooted_entry_t; -#define PV_ROOTED_ENTRY_NULL ((pv_rooted_entry_t) 0) +#define PV_ROOTED_ENTRY_NULL ((pv_rooted_entry_t) 0) typedef struct pv_hashed_entry { /* first three entries must match pv_rooted_entry_t */ - queue_head_t qlink; - vm_map_offset_t va_and_flags; - pmap_t pmap; - ppnum_t ppn; - struct pv_hashed_entry *nexth; + queue_head_t qlink; + vm_map_offset_t va_and_flags; + pmap_t pmap; + ppnum_t ppn; + struct pv_hashed_entry *nexth; } *pv_hashed_entry_t; #define PV_HASHED_ENTRY_NULL ((pv_hashed_entry_t)0) @@ -255,45 +290,47 @@ typedef struct pv_hashed_entry { #define PV_HASHED_ALLOC_CHUNK_INITIAL 2000 #define PV_HASHED_KERN_ALLOC_CHUNK_INITIAL 200 -extern volatile uint32_t mappingrecurse; +extern volatile uint32_t mappingrecurse; extern uint32_t pv_hashed_low_water_mark, pv_hashed_kern_low_water_mark; /* * PV hash locking */ -#define LOCK_PV_HASH(hash) lock_hash_hash(hash) -#define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash) +#define LOCK_PV_HASH(hash) lock_hash_hash(hash) +#define UNLOCK_PV_HASH(hash) unlock_hash_hash(hash) extern uint32_t npvhashmask; -extern pv_hashed_entry_t *pv_hash_table; /* hash lists */ -extern pv_hashed_entry_t pv_hashed_free_list; -extern pv_hashed_entry_t pv_hashed_kern_free_list; +extern pv_hashed_entry_t *pv_hash_table; /* hash lists */ +extern pv_hashed_entry_t pv_hashed_free_list; +extern pv_hashed_entry_t pv_hashed_kern_free_list; decl_simple_lock_data(extern, pv_hashed_free_list_lock) decl_simple_lock_data(extern, pv_hashed_kern_free_list_lock) decl_simple_lock_data(extern, pv_hash_table_lock) decl_simple_lock_data(extern, phys_backup_lock) -extern zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry - * structures */ +extern zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry + * structures */ -extern uint32_t pv_hashed_free_count; -extern uint32_t pv_hashed_kern_free_count; +extern uint32_t pv_hashed_free_count; +extern uint32_t pv_hashed_kern_free_count; /* * Each entry in the pv_head_table is locked by a bit in the * pv_lock_table. The lock bits are accessed by the address of * the frame they lock. */ -#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) +#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) #define pv_hash_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE) -extern char *pv_lock_table; /* pointer to array of bits */ -extern char *pv_hash_lock_table; -extern pv_rooted_entry_t pv_head_table; /* array of entries, one per page */ +extern char *pv_lock_table; /* pointer to array of bits */ +extern char *pv_hash_lock_table; +extern pv_rooted_entry_t pv_head_table; /* array of entries, one per page */ extern event_t mapping_replenish_event; -static inline void PV_HASHED_ALLOC(pv_hashed_entry_t *pvh_ep) { +static inline void +PV_HASHED_ALLOC(pv_hashed_entry_t *pvh_ep) +{ pmap_assert(*pvh_ep == PV_HASHED_ENTRY_NULL); - simple_lock(&pv_hashed_free_list_lock); + simple_lock(&pv_hashed_free_list_lock, LCK_GRP_NULL); /* If the kernel reserved pool is low, let non-kernel mappings allocate * synchronously, possibly subject to a throttle. */ @@ -305,13 +342,16 @@ static inline void PV_HASHED_ALLOC(pv_hashed_entry_t *pvh_ep) { simple_unlock(&pv_hashed_free_list_lock); if (pv_hashed_free_count <= pv_hashed_low_water_mark) { - if (!mappingrecurse && hw_compare_and_store(0,1, &mappingrecurse)) + if (!mappingrecurse && hw_compare_and_store(0, 1, &mappingrecurse)) { thread_wakeup(&mapping_replenish_event); + } } } -static inline void PV_HASHED_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_cnt) { - simple_lock(&pv_hashed_free_list_lock); +static inline void +PV_HASHED_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_cnt) +{ + simple_lock(&pv_hashed_free_list_lock, LCK_GRP_NULL); pvh_et->qlink.next = (queue_entry_t)pv_hashed_free_list; pv_hashed_free_list = pvh_eh; pv_hashed_free_count += pv_cnt; @@ -320,9 +360,11 @@ static inline void PV_HASHED_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry extern unsigned pmap_kern_reserve_alloc_stat; -static inline void PV_HASHED_KERN_ALLOC(pv_hashed_entry_t *pvh_e) { +static inline void +PV_HASHED_KERN_ALLOC(pv_hashed_entry_t *pvh_e) +{ pmap_assert(*pvh_e == PV_HASHED_ENTRY_NULL); - simple_lock(&pv_hashed_kern_free_list_lock); + simple_lock(&pv_hashed_kern_free_list_lock, LCK_GRP_NULL); if ((*pvh_e = pv_hashed_kern_free_list) != 0) { pv_hashed_kern_free_list = (pv_hashed_entry_t)(*pvh_e)->qlink.next; @@ -333,13 +375,16 @@ static inline void PV_HASHED_KERN_ALLOC(pv_hashed_entry_t *pvh_e) { simple_unlock(&pv_hashed_kern_free_list_lock); if (pv_hashed_kern_free_count < pv_hashed_kern_low_water_mark) { - if (!mappingrecurse && hw_compare_and_store(0,1, &mappingrecurse)) + if (!mappingrecurse && hw_compare_and_store(0, 1, &mappingrecurse)) { thread_wakeup(&mapping_replenish_event); + } } } -static inline void PV_HASHED_KERN_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_cnt) { - simple_lock(&pv_hashed_kern_free_list_lock); +static inline void +PV_HASHED_KERN_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_entry_t pvh_et, int pv_cnt) +{ + simple_lock(&pv_hashed_kern_free_list_lock, LCK_GRP_NULL); pvh_et->qlink.next = (queue_entry_t)pv_hashed_kern_free_list; pv_hashed_kern_free_list = pvh_eh; pv_hashed_kern_free_count += pv_cnt; @@ -349,7 +394,9 @@ static inline void PV_HASHED_KERN_FREE_LIST(pv_hashed_entry_t pvh_eh, pv_hashed_ extern uint64_t pmap_pv_throttle_stat, pmap_pv_throttled_waiters; extern event_t pmap_user_pv_throttle_event; -static inline void pmap_pv_throttle(__unused pmap_t p) { +static inline void +pmap_pv_throttle(__unused pmap_t p) +{ pmap_assert(p != kernel_pmap); /* Apply throttle on non-kernel mappings */ if (pv_hashed_kern_free_count < (pv_hashed_kern_low_water_mark / 2)) { @@ -367,42 +414,44 @@ static inline void pmap_pv_throttle(__unused pmap_t p) { * Index into pv_head table, its lock bits, and the modify/reference and managed bits */ -#define pa_index(pa) (i386_btop(pa)) -#define ppn_to_pai(ppn) ((int)ppn) +#define pa_index(pa) (i386_btop(pa)) +#define ppn_to_pai(ppn) ((int)ppn) -#define pai_to_pvh(pai) (&pv_head_table[pai]) -#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table) -#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table) -#define pvhash(idx) (&pv_hash_table[idx]) -#define lock_hash_hash(hash) bit_lock(hash, (void *)pv_hash_lock_table) -#define unlock_hash_hash(hash) bit_unlock(hash, (void *)pv_hash_lock_table) +#define pai_to_pvh(pai) (&pv_head_table[pai]) +#define lock_pvh_pai(pai) bit_lock(pai, (void *)pv_lock_table) +#define unlock_pvh_pai(pai) bit_unlock(pai, (void *)pv_lock_table) +#define pvhash(idx) (&pv_hash_table[idx]) +#define lock_hash_hash(hash) bit_lock(hash, (void *)pv_hash_lock_table) +#define unlock_hash_hash(hash) bit_unlock(hash, (void *)pv_hash_lock_table) -#define IS_MANAGED_PAGE(x) \ - ((unsigned int)(x) <= last_managed_page && \ +#define IS_MANAGED_PAGE(x) \ + ((unsigned int)(x) <= last_managed_page && \ (pmap_phys_attributes[x] & PHYS_MANAGED)) -#define IS_INTERNAL_PAGE(x) \ +#define IS_INTERNAL_PAGE(x) \ (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_INTERNAL)) -#define IS_REUSABLE_PAGE(x) \ +#define IS_REUSABLE_PAGE(x) \ (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_REUSABLE)) -#define IS_ALTACCT_PAGE(x,pve) \ - (IS_MANAGED_PAGE((x)) && \ +#define IS_ALTACCT_PAGE(x, pve) \ + (IS_MANAGED_PAGE((x)) && \ (PVE_IS_ALTACCT_PAGE((pve)))) /* * Physical page attributes. Copy bits from PTE definition. */ -#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */ -#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */ -#define PHYS_MANAGED INTEL_PTE_VALID /* page is managed */ -#define PHYS_NOENCRYPT INTEL_PTE_USER /* no need to encrypt this page in the hibernation image */ -#define PHYS_NCACHE INTEL_PTE_NCACHE -#define PHYS_PTA INTEL_PTE_PTA -#define PHYS_CACHEABILITY_MASK (INTEL_PTE_PTA | INTEL_PTE_NCACHE) -#define PHYS_INTERNAL INTEL_PTE_WTHRU /* page from internal object */ -#define PHYS_REUSABLE INTEL_PTE_WRITE /* page is "reusable" */ - -extern boolean_t pmap_disable_kheap_nx; -extern boolean_t pmap_disable_kstack_nx; +#define PHYS_MODIFIED INTEL_PTE_MOD /* page modified */ +#define PHYS_REFERENCED INTEL_PTE_REF /* page referenced */ +#define PHYS_MANAGED INTEL_PTE_VALID /* page is managed */ +#define PHYS_NOENCRYPT INTEL_PTE_USER /* no need to encrypt this page in the hibernation image */ +#define PHYS_NCACHE INTEL_PTE_NCACHE +#define PHYS_PAT INTEL_PTE_PAT +#define PHYS_CACHEABILITY_MASK (INTEL_PTE_PAT | INTEL_PTE_NCACHE) +#define PHYS_INTERNAL INTEL_PTE_WTHRU /* page from internal object */ +#define PHYS_REUSABLE INTEL_PTE_WRITE /* page is "reusable" */ + +#if DEVELOPMENT || DEBUG +extern boolean_t pmap_disable_kheap_nx; +extern boolean_t pmap_disable_kstack_nx; +#endif #define PMAP_EXPAND_OPTIONS_NONE (0x0) #define PMAP_EXPAND_OPTIONS_NOWAIT (PMAP_OPTIONS_NOWAIT) @@ -412,8 +461,7 @@ extern boolean_t pmap_disable_kstack_nx; * Amount of virtual memory mapped by one * page-directory entry. */ -#define PDE_MAPPED_SIZE (pdetova(1)) - +#define PDE_MAPPED_SIZE (pdetova(1)) /* * Locking and TLB invalidation @@ -444,24 +492,24 @@ extern boolean_t pmap_disable_kstack_nx; * PV locking */ -#define LOCK_PVH(index) { \ - mp_disable_preemption(); \ - lock_pvh_pai(index); \ +#define LOCK_PVH(index) { \ + mp_disable_preemption(); \ + lock_pvh_pai(index); \ } -#define UNLOCK_PVH(index) { \ - unlock_pvh_pai(index); \ - mp_enable_preemption(); \ +#define UNLOCK_PVH(index) { \ + unlock_pvh_pai(index); \ + mp_enable_preemption(); \ } extern uint64_t pde_mapped_size; -extern char *pmap_phys_attributes; -extern ppnum_t last_managed_page; +extern char *pmap_phys_attributes; +extern ppnum_t last_managed_page; -extern ppnum_t lowest_lo; -extern ppnum_t lowest_hi; -extern ppnum_t highest_hi; +extern ppnum_t lowest_lo; +extern ppnum_t lowest_hi; +extern ppnum_t highest_hi; /* * when spinning through pmap_remove @@ -478,24 +526,25 @@ extern uint64_t max_preemption_latency_tsc; #endif #if PMAP_INTR_DEBUG -#define pmap_intr_assert() { \ - if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) \ - panic("pmap interrupt assert %d %s, %d", processor_avail_count, __FILE__, __LINE__); \ +#define pmap_intr_assert() { \ + if (processor_avail_count > 1 && !ml_get_interrupts_enabled()) \ + panic("pmap interrupt assert %d %s, %d", processor_avail_count, __FILE__, __LINE__); \ } #else #define pmap_intr_assert() #endif - -extern int nx_enabled; +#if DEVELOPMENT || DEBUG +extern int nx_enabled; +#endif extern unsigned int inuse_ptepages_count; static inline uint32_t pvhashidx(pmap_t pmap, vm_map_offset_t va) { uint32_t hashidx = ((uint32_t)(uintptr_t)pmap ^ - ((uint32_t)(va >> PAGE_SHIFT) & 0xFFFFFFFF)) & - npvhashmask; - return hashidx; + ((uint32_t)(va >> PAGE_SHIFT) & 0xFFFFFFFF)) & + npvhashmask; + return hashidx; } /* @@ -503,12 +552,12 @@ pvhashidx(pmap_t pmap, vm_map_offset_t va) * properly deals with the anchor. * must be called with the hash locked, does not unlock it */ -static inline void +static inline void pmap_pvh_unlink(pv_hashed_entry_t pvh) { - pv_hashed_entry_t curh; - pv_hashed_entry_t *pprevh; - int pvhash_idx; + pv_hashed_entry_t curh; + pv_hashed_entry_t *pprevh; + int pvhash_idx; CHK_NPVHASH(); pvhash_idx = pvhashidx(pvh->pmap, PVE_VA(pvh)); @@ -516,25 +565,29 @@ pmap_pvh_unlink(pv_hashed_entry_t pvh) pprevh = pvhash(pvhash_idx); #if PV_DEBUG - if (NULL == *pprevh) + if (NULL == *pprevh) { panic("pvh_unlink null anchor"); /* JK DEBUG */ + } #endif curh = *pprevh; while (PV_HASHED_ENTRY_NULL != curh) { - if (pvh == curh) + if (pvh == curh) { break; + } pprevh = &curh->nexth; curh = curh->nexth; } - if (PV_HASHED_ENTRY_NULL == curh) panic("pmap_pvh_unlink no pvh"); + if (PV_HASHED_ENTRY_NULL == curh) { + panic("pmap_pvh_unlink no pvh"); + } *pprevh = pvh->nexth; return; } static inline void -pv_hash_add(pv_hashed_entry_t pvh_e, - pv_rooted_entry_t pv_h) +pv_hash_add(pv_hashed_entry_t pvh_e, + pv_rooted_entry_t pv_h) { pv_hashed_entry_t *hashp; int pvhash_idx; @@ -545,8 +598,9 @@ pv_hash_add(pv_hashed_entry_t pvh_e, insque(&pvh_e->qlink, &pv_h->qlink); hashp = pvhash(pvhash_idx); #if PV_DEBUG - if (NULL==hashp) + if (NULL == hashp) { panic("pv_hash_add(%p) null hash bucket", pvh_e); + } #endif pvh_e->nexth = *hashp; *hashp = pvh_e; @@ -559,15 +613,17 @@ pv_hash_remove(pv_hashed_entry_t pvh_e) int pvhash_idx; CHK_NPVHASH(); - pvhash_idx = pvhashidx(pvh_e->pmap,PVE_VA(pvh_e)); + pvhash_idx = pvhashidx(pvh_e->pmap, PVE_VA(pvh_e)); LOCK_PV_HASH(pvhash_idx); remque(&pvh_e->qlink); pmap_pvh_unlink(pvh_e); UNLOCK_PV_HASH(pvhash_idx); -} +} -static inline boolean_t popcnt1(uint64_t distance) { - return ((distance & (distance - 1)) == 0); +static inline boolean_t +popcnt1(uint64_t distance) +{ + return (distance & (distance - 1)) == 0; } /* @@ -587,12 +643,12 @@ static inline boolean_t popcnt1(uint64_t distance) { */ typedef enum { - PTE_VALID = 0x0, - PTE_INVALID = 0x1, - PTE_RSVD = 0x2, - PTE_SUPERVISOR = 0x4, - PTE_BITFLIP = 0x8, - PV_BITFLIP = 0x10, + PTE_VALID = 0x0, + PTE_INVALID = 0x1, + PTE_RSVD = 0x2, + PTE_SUPERVISOR = 0x4, + PTE_BITFLIP = 0x8, + PV_BITFLIP = 0x10, PTE_INVALID_CACHEABILITY = 0x20 } pmap_pagetable_corruption_t; @@ -602,13 +658,13 @@ typedef enum { } pmap_pv_assertion_t; typedef enum { - PMAP_ACTION_IGNORE = 0x0, - PMAP_ACTION_ASSERT = 0x1, - PMAP_ACTION_RETRY = 0x2, + PMAP_ACTION_IGNORE = 0x0, + PMAP_ACTION_ASSERT = 0x1, + PMAP_ACTION_RETRY = 0x2, PMAP_ACTION_RETRY_RELOCK = 0x4 } pmap_pagetable_corruption_action_t; -#define PMAP_PAGETABLE_CORRUPTION_INTERVAL (6ULL * 3600ULL) +#define PMAP_PAGETABLE_CORRUPTION_INTERVAL (6ULL * 3600ULL) extern uint64_t pmap_pagetable_corruption_interval_abstime; extern uint32_t pmap_pagetable_corruption_incidents; @@ -617,7 +673,7 @@ typedef struct { pmap_pv_assertion_t incident; pmap_pagetable_corruption_t reason; pmap_pagetable_corruption_action_t action; - pmap_t pmap; + pmap_t pmap; vm_map_offset_t vaddr; pt_entry_t pte; ppnum_t ppn; @@ -628,11 +684,12 @@ typedef struct { extern pmap_pagetable_corruption_record_t pmap_pagetable_corruption_records[]; extern uint64_t pmap_pagetable_corruption_last_abstime; -extern thread_call_t pmap_pagetable_corruption_log_call; +extern thread_call_t pmap_pagetable_corruption_log_call; extern boolean_t pmap_pagetable_corruption_timeout; static inline void -pmap_pagetable_corruption_log(pmap_pv_assertion_t incident, pmap_pagetable_corruption_t suppress_reason, pmap_pagetable_corruption_action_t action, pmap_t pmap, vm_map_offset_t vaddr, pt_entry_t *ptep, ppnum_t ppn, pmap_t pvpmap, vm_map_offset_t pvva) { +pmap_pagetable_corruption_log(pmap_pv_assertion_t incident, pmap_pagetable_corruption_t suppress_reason, pmap_pagetable_corruption_action_t action, pmap_t pmap, vm_map_offset_t vaddr, pt_entry_t *ptep, ppnum_t ppn, pmap_t pvpmap, vm_map_offset_t pvva) +{ uint32_t pmap_pagetable_corruption_log_index; pmap_pagetable_corruption_log_index = pmap_pagetable_corruption_incidents++ % PMAP_PAGETABLE_CORRUPTION_MAX_LOG; pmap_pagetable_corruption_records[pmap_pagetable_corruption_log_index].incident = incident; @@ -650,16 +707,17 @@ pmap_pagetable_corruption_log(pmap_pv_assertion_t incident, pmap_pagetable_corru } static inline pmap_pagetable_corruption_action_t -pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t *ppnp, pt_entry_t *ptep, pmap_pv_assertion_t incident) { - pmap_pagetable_corruption_action_t action = PMAP_ACTION_ASSERT; - pmap_pagetable_corruption_t suppress_reason = PTE_VALID; - ppnum_t suppress_ppn = 0; +pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t *ppnp, pt_entry_t *ptep, pmap_pv_assertion_t incident) +{ + pmap_pagetable_corruption_action_t action = PMAP_ACTION_ASSERT; + pmap_pagetable_corruption_t suppress_reason = PTE_VALID; + ppnum_t suppress_ppn = 0; pt_entry_t cpte = *ptep; - ppnum_t cpn = pa_index(pte_to_pa(cpte)); - ppnum_t ppn = *ppnp; - pv_rooted_entry_t pv_h = pai_to_pvh(ppn_to_pai(ppn)); - pv_rooted_entry_t pv_e = pv_h; - uint32_t bitdex; + ppnum_t cpn = pa_index(pte_to_pa(cpte)); + ppnum_t ppn = *ppnp; + pv_rooted_entry_t pv_h = pai_to_pvh(ppn_to_pai(ppn)); + pv_rooted_entry_t pv_e = pv_h; + uint32_t bitdex; pmap_t pvpmap = pv_h->pmap; vm_map_offset_t pvva = PVE_VA(pv_h); vm_map_offset_t pve_flags; @@ -722,15 +780,13 @@ pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t * * Check for malformed/inconsistent entries. * The first check here isn't useful for EPT PTEs because INTEL_EPT_NCACHE == 0 */ - if (!is_ept && ((cpte & (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU | INTEL_PTE_PTA)) == (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU))) { + if (!is_ept && ((cpte & (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU | INTEL_PTE_PAT)) == (INTEL_PTE_NCACHE | INTEL_PTE_WTHRU))) { action = PMAP_ACTION_IGNORE; suppress_reason = PTE_INVALID_CACHEABILITY; - } - else if (cpte & INTEL_PTE_RSVD) { + } else if (cpte & INTEL_PTE_RSVD) { action = PMAP_ACTION_IGNORE; suppress_reason = PTE_RSVD; - } - else if ((pmap != kernel_pmap) && (!is_ept) && ((cpte & INTEL_PTE_USER) == 0)) { + } else if ((pmap != kernel_pmap) && (!is_ept) && ((cpte & INTEL_PTE_USER) == 0)) { action = PMAP_ACTION_IGNORE; suppress_reason = PTE_SUPERVISOR; } @@ -744,9 +800,7 @@ pmap_cpc_exit: if ((mach_absolute_time() - pmap_pagetable_corruption_last_abstime) < pmap_pagetable_corruption_interval_abstime) { action = PMAP_ACTION_ASSERT; pmap_pagetable_corruption_timeout = TRUE; - } - else - { + } else { pmap_pagetable_corruption_last_abstime = mach_absolute_time(); } pmap_pagetable_corruption_log(incident, suppress_reason, action, pmap, vaddr, &cpte, *ppnp, pvpmap, pvva); @@ -759,18 +813,18 @@ pmap_cpc_exit: * Returns pv entry to be freed (or NULL). */ static inline __attribute__((always_inline)) pv_hashed_entry_t -pmap_pv_remove(pmap_t pmap, - vm_map_offset_t vaddr, - ppnum_t *ppnp, - pt_entry_t *pte, - boolean_t *was_altacct) +pmap_pv_remove(pmap_t pmap, + vm_map_offset_t vaddr, + ppnum_t *ppnp, + pt_entry_t *pte, + boolean_t *was_altacct) { pv_hashed_entry_t pvh_e; - pv_rooted_entry_t pv_h; - pv_hashed_entry_t *pprevh; + pv_rooted_entry_t pv_h; + pv_hashed_entry_t *pprevh; int pvhash_idx; uint32_t pv_cnt; - ppnum_t ppn; + ppnum_t ppn; *was_altacct = FALSE; pmap_pv_remove_retry: @@ -780,28 +834,28 @@ pmap_pv_remove_retry: if (__improbable(pv_h->pmap == PMAP_NULL)) { pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_ABSENT); - if (pac == PMAP_ACTION_IGNORE) + if (pac == PMAP_ACTION_IGNORE) { goto pmap_pv_remove_exit; - else if (pac == PMAP_ACTION_ASSERT) + } else if (pac == PMAP_ACTION_ASSERT) { panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x, 0x%llx, %p, %p): null pv_list, priors: %d", pmap, vaddr, ppn, *pte, ppnp, pte, pmap_pagetable_corruption_incidents); - else if (pac == PMAP_ACTION_RETRY_RELOCK) { + } else if (pac == PMAP_ACTION_RETRY_RELOCK) { LOCK_PVH(ppn_to_pai(*ppnp)); pmap_phys_attributes[ppn_to_pai(*ppnp)] |= (PHYS_MODIFIED | PHYS_REFERENCED); goto pmap_pv_remove_retry; - } - else if (pac == PMAP_ACTION_RETRY) + } else if (pac == PMAP_ACTION_RETRY) { goto pmap_pv_remove_retry; + } } if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) { *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pv_h); /* - * Header is the pv_rooted_entry. + * Header is the pv_rooted_entry. * We can't free that. If there is a queued - * entry after this one we remove that - * from the ppn queue, we remove it from the hash chain - * and copy it to the rooted entry. Then free it instead. - */ + * entry after this one we remove that + * from the ppn queue, we remove it from the hash chain + * and copy it to the rooted entry. Then free it instead. + */ pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink); if (pv_h != (pv_rooted_entry_t) pvh_e) { /* @@ -815,7 +869,7 @@ pmap_pv_remove_retry: pprevh = pvhash(pvhash_idx); if (PV_HASHED_ENTRY_NULL == *pprevh) { panic("Possible memory corruption: pmap_pv_remove(%p,0x%llx,0x%x): " - "empty hash, removing rooted, priors: %d", + "empty hash, removing rooted, priors: %d", pmap, vaddr, ppn, pmap_pagetable_corruption_incidents); } pmap_pvh_unlink(pvh_e); @@ -848,8 +902,9 @@ pmap_pv_remove_retry: pv_cnt++; if (pvh_e->pmap == pmap && PVE_VA(pvh_e) == vaddr && - pvh_e->ppn == ppn) + pvh_e->ppn == ppn) { break; + } pprevh = &pvh_e->nexth; pvh_e = pvh_e->nexth; } @@ -857,19 +912,17 @@ pmap_pv_remove_retry: if (PV_HASHED_ENTRY_NULL == pvh_e) { pmap_pagetable_corruption_action_t pac = pmap_classify_pagetable_corruption(pmap, vaddr, ppnp, pte, ROOT_PRESENT); - if (pac == PMAP_ACTION_ASSERT) + if (pac == PMAP_ACTION_ASSERT) { panic("Possible memory corruption: pmap_pv_remove(%p, 0x%llx, 0x%x, 0x%llx, %p, %p): pv not on hash, head: %p, 0x%llx, priors: %d", pmap, vaddr, ppn, *pte, ppnp, pte, pv_h->pmap, PVE_VA(pv_h), pmap_pagetable_corruption_incidents); - else { + } else { UNLOCK_PV_HASH(pvhash_idx); if (pac == PMAP_ACTION_RETRY_RELOCK) { LOCK_PVH(ppn_to_pai(*ppnp)); pmap_phys_attributes[ppn_to_pai(*ppnp)] |= (PHYS_MODIFIED | PHYS_REFERENCED); goto pmap_pv_remove_retry; - } - else if (pac == PMAP_ACTION_RETRY) { + } else if (pac == PMAP_ACTION_RETRY) { goto pmap_pv_remove_retry; - } - else if (pac == PMAP_ACTION_IGNORE) { + } else if (pac == PMAP_ACTION_IGNORE) { goto pmap_pv_remove_exit; } } @@ -878,8 +931,9 @@ pmap_pv_remove_retry: *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pvh_e); pmap_pv_hashlist_cnts += pv_cnt; - if (pmap_pv_hashlist_max < pv_cnt) + if (pmap_pv_hashlist_max < pv_cnt) { pmap_pv_hashlist_max = pv_cnt; + } *pprevh = pvh_e->nexth; remque(&pvh_e->qlink); UNLOCK_PV_HASH(pvhash_idx); @@ -890,14 +944,14 @@ pmap_pv_remove_exit: static inline __attribute__((always_inline)) boolean_t pmap_pv_is_altacct( - pmap_t pmap, - vm_map_offset_t vaddr, - ppnum_t ppn) + pmap_t pmap, + vm_map_offset_t vaddr, + ppnum_t ppn) { pv_hashed_entry_t pvh_e; - pv_rooted_entry_t pv_h; + pv_rooted_entry_t pv_h; int pvhash_idx; - boolean_t is_altacct; + boolean_t is_altacct; pvh_e = PV_HASHED_ENTRY_NULL; pv_h = pai_to_pvh(ppn_to_pai(ppn)); @@ -908,8 +962,8 @@ pmap_pv_is_altacct( if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) { /* - * Header is the pv_rooted_entry. - */ + * Header is the pv_rooted_entry. + */ return IS_ALTACCT_PAGE(ppn, pv_h); } @@ -920,8 +974,9 @@ pmap_pv_is_altacct( while (PV_HASHED_ENTRY_NULL != pvh_e) { if (pvh_e->pmap == pmap && PVE_VA(pvh_e) == vaddr && - pvh_e->ppn == ppn) + pvh_e->ppn == ppn) { break; + } pvh_e = pvh_e->nexth; } if (PV_HASHED_ENTRY_NULL == pvh_e) { @@ -934,7 +989,7 @@ pmap_pv_is_altacct( return is_altacct; } -extern int pt_fake_zone_index; +extern int pt_fake_zone_index; static inline void PMAP_ZINFO_PALLOC(pmap_t pmap, vm_size_t bytes) { @@ -959,71 +1014,60 @@ PMAP_ZINFO_SFREE(pmap_t pmap, vm_size_t bytes) pmap_ledger_debit(pmap, task_ledgers.tkm_shared, bytes); } -extern boolean_t pmap_initialized;/* Has pmap_init completed? */ +extern boolean_t pmap_initialized;/* Has pmap_init completed? */ #define valid_page(x) (pmap_initialized && pmap_valid_page(x)) -int phys_attribute_test( - ppnum_t phys, - int bits); -void phys_attribute_clear( - ppnum_t phys, - int bits, - unsigned int options, - void *arg); +int phys_attribute_test( + ppnum_t phys, + int bits); +void phys_attribute_clear( + ppnum_t phys, + int bits, + unsigned int options, + void *arg); //#define PCID_DEBUG 1 -#if PCID_DEBUG -#define pmap_pcid_log(fmt, args...) \ - do { \ - kprintf(fmt, ##args); \ - printf(fmt, ##args); \ +#if PCID_DEBUG +#define pmap_pcid_log(fmt, args...) \ + do { \ + kprintf(fmt, ##args); \ + printf(fmt, ##args); \ } while(0) #else #define pmap_pcid_log(fmt, args...) #endif -void pmap_pcid_configure(void); +void pmap_pcid_configure(void); /* * Atomic 64-bit compare and exchange of a page table entry. */ + +#include static inline boolean_t pmap_cmpx_pte(pt_entry_t *entryp, pt_entry_t old, pt_entry_t new) { - boolean_t ret; - - /* - * Load the old value into %rax - * Load the new value into another register - * Compare-exchange-quad at address entryp - * If the compare succeeds, the new value is stored, return TRUE. - * Otherwise, no swap is made, return FALSE. - */ - asm volatile( - " lock; cmpxchgq %2,(%3) \n\t" - " setz %%al \n\t" - " movzbl %%al,%0" - : "=a" (ret) - : "a" (old), - "r" (new), - "r" (entryp) - : "memory"); - return ret; + return __c11_atomic_compare_exchange_strong((_Atomic pt_entry_t *)entryp, &old, new, + memory_order_acq_rel_smp, memory_order_relaxed); } extern uint32_t pmap_update_clear_pte_count; -static inline void pmap_update_pte(pt_entry_t *mptep, uint64_t pclear_bits, uint64_t pset_bits) { +static inline void +pmap_update_pte(pt_entry_t *mptep, uint64_t pclear_bits, uint64_t pset_bits) +{ pt_entry_t npte, opte; do { opte = *mptep; if (__improbable(opte == 0)) { +#if DEVELOPMENT || DEBUG pmap_update_clear_pte_count++; +#endif break; } npte = opte & ~(pclear_bits); npte |= pset_bits; - } while (!pmap_cmpx_pte(mptep, opte, npte)); + } while (!pmap_cmpx_pte(mptep, opte, npte)); } /* @@ -1036,14 +1080,14 @@ pml4_entry_t * pmap64_pml4(pmap_t pmap, vm_map_offset_t vaddr) { if (__improbable((vaddr > 0x00007FFFFFFFFFFFULL) && - (vaddr < 0xFFFF800000000000ULL))) { - return (NULL); + (vaddr < 0xFFFF800000000000ULL))) { + return NULL; } -#if DEBUG - return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_cr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]); +#if DEBUG + return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_cr3)[(vaddr >> PML4SHIFT) & (NPML4PG - 1)]); #else - return &pmap->pm_pml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)]; + return &pmap->pm_pml4[(vaddr >> PML4SHIFT) & (NPML4PG - 1)]; #endif } @@ -1051,14 +1095,14 @@ static inline pml4_entry_t * pmap64_user_pml4(pmap_t pmap, vm_map_offset_t vaddr) { if (__improbable((vaddr > 0x00007FFFFFFFFFFFULL) && - (vaddr < 0xFFFF800000000000ULL))) { - return (NULL); + (vaddr < 0xFFFF800000000000ULL))) { + return NULL; } -#if DEBUG - return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_ucr3)[(vaddr >> PML4SHIFT) & (NPML4PG-1)]); +#if DEBUG + return PHYSMAP_PTOV(&((pml4_entry_t *)pmap->pm_ucr3)[(vaddr >> PML4SHIFT) & (NPML4PG - 1)]); #else - return &pmap->pm_upml4[(vaddr >> PML4SHIFT) & (NPML4PG-1)]; + return &pmap->pm_upml4[(vaddr >> PML4SHIFT) & (NPML4PG - 1)]; #endif } @@ -1068,9 +1112,9 @@ pmap64_user_pml4(pmap_t pmap, vm_map_offset_t vaddr) static inline pdpt_entry_t * pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr) { - pml4_entry_t newpf; - pml4_entry_t *pml4; - boolean_t is_ept; + pml4_entry_t newpf; + pml4_entry_t *pml4; + boolean_t is_ept; pml4 = pmap64_pml4(pmap, vaddr); is_ept = is_ept_pmap(pmap); @@ -1078,39 +1122,53 @@ pmap64_pdpt(pmap_t pmap, vm_map_offset_t vaddr) if (pml4 && (*pml4 & PTE_VALID_MASK(is_ept))) { newpf = *pml4 & PG_FRAME; return &((pdpt_entry_t *) PHYSMAP_PTOV(newpf)) - [(vaddr >> PDPTSHIFT) & (NPDPTPG-1)]; + [(vaddr >> PDPTSHIFT) & (NPDPTPG - 1)]; } - return (NULL); + return NULL; } /* * Returns the address of the requested PDE entry in the physmap. */ static inline pd_entry_t * -pmap64_pde(pmap_t pmap, vm_map_offset_t vaddr) +pmap_pde_internal1(vm_map_offset_t vaddr, boolean_t is_ept, pdpt_entry_t *pdpte) { - pdpt_entry_t newpf; - pdpt_entry_t *pdpt; - boolean_t is_ept; + if (*pdpte & PTE_VALID_MASK(is_ept)) { + pdpt_entry_t newpf = *pdpte & PG_FRAME; + return &((pd_entry_t *) PHYSMAP_PTOV(newpf)) + [(vaddr >> PDSHIFT) & (NPDPG - 1)]; + } else { + return NULL; + } +} - pdpt = pmap64_pdpt(pmap, vaddr); - is_ept = is_ept_pmap(pmap); +static inline pd_entry_t * +pmap_pde_internal0(pmap_t pmap, vm_map_offset_t vaddr, boolean_t is_ept) +{ + pdpt_entry_t *pdpt; - if (pdpt && (*pdpt & PTE_VALID_MASK(is_ept))) { - newpf = *pdpt & PG_FRAME; - return &((pd_entry_t *) PHYSMAP_PTOV(newpf)) - [(vaddr >> PDSHIFT) & (NPDPG-1)]; + pdpt = pmap64_pdpt(pmap, vaddr); + if (pdpt) { + return pmap_pde_internal1(vaddr, is_ept, pdpt); + } else { + return NULL; } - return (NULL); } -static inline pd_entry_t * -pmap_pde(pmap_t m, vm_map_offset_t v) + +static inline pd_entry_t * +pmap_pde(pmap_t pmap, vm_map_offset_t vaddr) { - pd_entry_t *pde; + pdpt_entry_t *pdpt; + boolean_t is_ept; - pde = pmap64_pde(m, v); + pdpt = pmap64_pdpt(pmap, vaddr); + is_ept = is_ept_pmap(pmap); - return pde; + if (pdpt) { + return pmap_pde_internal1(vaddr, is_ept, pdpt); + } else { + return NULL; + } } @@ -1120,36 +1178,51 @@ pmap_pde(pmap_t m, vm_map_offset_t v) * In case the pde maps a superpage, return the pde, which, in this case * is the actual page table entry. */ + + +static inline pt_entry_t * +pmap_pte_internal(vm_map_offset_t vaddr, boolean_t is_ept, pd_entry_t *pde) +{ + if (*pde & PTE_VALID_MASK(is_ept)) { + if (__improbable(*pde & PTE_PS)) { + return pde; + } + pd_entry_t newpf = *pde & PG_FRAME; + + return &((pt_entry_t *)PHYSMAP_PTOV(newpf)) + [i386_btop(vaddr) & (ppnum_t)(NPTEPG - 1)]; + } else { + return NULL; + } +} + static inline pt_entry_t * pmap_pte(pmap_t pmap, vm_map_offset_t vaddr) { - pd_entry_t *pde; - pd_entry_t newpf; - boolean_t is_ept; + pd_entry_t *pde; - assert(pmap); - pde = pmap64_pde(pmap, vaddr); + boolean_t is_ept; is_ept = is_ept_pmap(pmap); - if (pde && (*pde & PTE_VALID_MASK(is_ept))) { - if (*pde & PTE_PS) - return pde; - newpf = *pde & PG_FRAME; - return &((pt_entry_t *)PHYSMAP_PTOV(newpf)) - [i386_btop(vaddr) & (ppnum_t)(NPTEPG-1)]; + pde = pmap_pde_internal0(pmap, vaddr, is_ept); + + if (pde) { + return pmap_pte_internal(vaddr, is_ept, pde); + } else { + return NULL; } - return (NULL); } -extern void pmap_alias( - vm_offset_t ava, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t prot, - unsigned int options); - -#if DEBUG -#define DPRINTF(x...) kprintf(x) + +extern void pmap_alias( + vm_offset_t ava, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t prot, + unsigned int options); + +#if DEBUG +#define DPRINTF(x...) kprintf(x) #else #define DPRINTF(x...) #endif diff --git a/osfmk/i386/pmap_pcid.h b/osfmk/i386/pmap_pcid.h index 372f0a4ed..87da57912 100644 --- a/osfmk/i386/pmap_pcid.h +++ b/osfmk/i386/pmap_pcid.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,68 +22,48 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _PMAP_PCID_ -#define _PMAP_PCID_ 1 +#ifndef _PMAP_PCID_ +#define _PMAP_PCID_ 1 #if defined(__x86_64__) void pmap_pcid_initialize(pmap_t); void pmap_pcid_initialize_kernel(pmap_t); -pcid_t pmap_pcid_allocate_pcid(int); -void pmap_pcid_deallocate_pcid(int, pmap_t); -void pmap_destroy_pcid_sync_action(void *); -void pmap_destroy_pcid_sync(pmap_t); -void pmap_pcid_lazy_flush(pmap_t); -void pmap_pcid_activate(pmap_t, int, boolean_t, boolean_t); -pcid_t pcid_for_pmap_cpu_tuple(pmap_t, thread_t, int); +pcid_t pmap_pcid_allocate_pcid(int); +void pmap_pcid_deallocate_pcid(int, pmap_t); +void pmap_destroy_pcid_sync_action(void *); +void pmap_destroy_pcid_sync(pmap_t); +void pmap_pcid_lazy_flush(pmap_t); +void pmap_pcid_activate(pmap_t, int, boolean_t, boolean_t); +pcid_t pcid_for_pmap_cpu_tuple(pmap_t, thread_t, int); #define PMAP_INVALID ((pmap_t)0xDEAD7347) -#define PMAP_PCID_INVALID_PCID (0xDEAD) -#define PMAP_PCID_MAX_REFCOUNT (0xF0) -#define PMAP_PCID_MIN_PCID (1) +#define PMAP_PCID_INVALID_PCID (0xDEAD) +#define PMAP_PCID_MAX_REFCOUNT (0xF0) +#define PMAP_PCID_MIN_PCID (1) extern uint32_t pmap_pcid_ncpus; static inline void -tlb_flush_global(void) { - uintptr_t cr4 = get_cr4(); - pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0); - pmap_assert2(((cr4 & CR4_PGE) || ml_at_interrupt_context()), "CR4: 0x%lx", cr4); - /* - * We are, unfortunately, forced to rely on this expensive - * read-modify-write-write scheme due to the inadequate - * TLB invalidation ISA. The read is necessary as - * the kernel does not "own" the contents of CR4, the VMX - * feature in particular. It may be possible to - * avoid a global flush and instead track a generation - * count of kernel invalidations, but that scheme - * has its disadvantages as well. - */ - if (cr4 & CR4_PGE) { - set_cr4(cr4 & ~CR4_PGE); - set_cr4(cr4 | CR4_PGE); - } else { - set_cr3_raw(get_cr3_raw()); - } - return; -} - -static inline void pmap_pcid_invalidate_all_cpus(pmap_t tpmap) { +pmap_pcid_invalidate_all_cpus(pmap_t tpmap) +{ unsigned i; pmap_assert((sizeof(tpmap->pmap_pcid_coherency_vector) >= real_ncpus) && (!(sizeof(tpmap->pmap_pcid_coherency_vector) & 7))); - for (i = 0; i < real_ncpus; i+=8) { - *(uint64_t *)(uintptr_t)&tpmap->pmap_pcid_coherency_vector[i] = (~0ULL); - } + for (i = 0; i < real_ncpus; i += 8) { + *(uint64_t *)(uintptr_t)&tpmap->pmap_pcid_coherency_vector[i] = (~0ULL); + } } -static inline void pmap_pcid_validate_current(void) { - int ccpu = cpu_number(); +static inline void +pmap_pcid_validate_current(void) +{ + int ccpu = cpu_number(); volatile uint8_t *cptr = cpu_datap(ccpu)->cpu_pmap_pcid_coherentp; -#ifdef PMAP_MODULE +#ifdef PMAP_MODULE pmap_assert(cptr == &(current_thread()->map->pmap->pmap_pcid_coherency_vector[ccpu])); #endif if (cptr) { @@ -91,11 +71,15 @@ static inline void pmap_pcid_validate_current(void) { } } -static inline void pmap_pcid_invalidate_cpu(pmap_t tpmap, int ccpu) { +static inline void +pmap_pcid_invalidate_cpu(pmap_t tpmap, int ccpu) +{ tpmap->pmap_pcid_coherency_vector[ccpu] = 0xFF; } -static inline void pmap_pcid_validate_cpu(pmap_t tpmap, int ccpu) { +static inline void +pmap_pcid_validate_cpu(pmap_t tpmap, int ccpu) +{ tpmap->pmap_pcid_coherency_vector[ccpu] = 0; } #endif /* x86_64 */ diff --git a/osfmk/i386/pmap_x86_common.c b/osfmk/i386/pmap_x86_common.c index 443b97217..93169df60 100644 --- a/osfmk/i386/pmap_x86_common.c +++ b/osfmk/i386/pmap_x86_common.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,25 +34,25 @@ #include #include -void pmap_remove_range( - pmap_t pmap, - vm_map_offset_t va, - pt_entry_t *spte, - pt_entry_t *epte); - -void pmap_remove_range_options( - pmap_t pmap, - vm_map_offset_t va, - pt_entry_t *spte, - pt_entry_t *epte, - int options); - -void pmap_reusable_range( - pmap_t pmap, - vm_map_offset_t va, - pt_entry_t *spte, - pt_entry_t *epte, - boolean_t reusable); +void pmap_remove_range( + pmap_t pmap, + vm_map_offset_t va, + pt_entry_t *spte, + pt_entry_t *epte); + +void pmap_remove_range_options( + pmap_t pmap, + vm_map_offset_t va, + pt_entry_t *spte, + pt_entry_t *epte, + int options); + +void pmap_reusable_range( + pmap_t pmap, + vm_map_offset_t va, + pt_entry_t *spte, + pt_entry_t *epte, + boolean_t reusable); uint32_t pmap_update_clear_pte_count; @@ -89,61 +89,63 @@ uint64_t pmap_nesting_size_max = 0 - (uint64_t)NBPDE; * pagetable hierarchy which can be unnecessarily sparse (DRK). */ -kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint64_t size) { - vm_map_offset_t vaddr, nvaddr; - pd_entry_t *pde,*npde; - unsigned int i; - uint64_t num_pde; +kern_return_t +pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t nstart, uint64_t size) +{ + vm_map_offset_t vaddr, nvaddr; + pd_entry_t *pde, *npde; + unsigned int i; + uint64_t num_pde; assert(!is_ept_pmap(grand)); assert(!is_ept_pmap(subord)); - if ((size & (pmap_nesting_size_min-1)) || - (va_start & (pmap_nesting_size_min-1)) || - (nstart & (pmap_nesting_size_min-1)) || - ((size >> 28) > 65536)) /* Max size we can nest is 16TB */ + if ((size & (pmap_nesting_size_min - 1)) || + (va_start & (pmap_nesting_size_min - 1)) || + (nstart & (pmap_nesting_size_min - 1)) || + ((size >> 28) > 65536)) { /* Max size we can nest is 16TB */ return KERN_INVALID_VALUE; + } - if(size == 0) { + if (size == 0) { panic("pmap_nest: size is invalid - %016llX\n", size); } - if (va_start != nstart) + if (va_start != nstart) { panic("pmap_nest: va_start(0x%llx) != nstart(0x%llx)\n", va_start, nstart); + } PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord), - VM_KERNEL_ADDRHIDE(va_start)); + VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord), + VM_KERNEL_ADDRHIDE(va_start)); nvaddr = (vm_map_offset_t)nstart; num_pde = size >> PDESHIFT; - PMAP_LOCK(subord); + PMAP_LOCK_EXCLUSIVE(subord); subord->pm_shared = TRUE; for (i = 0; i < num_pde;) { - if (((nvaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG && cpu_64bit) { - + if (((nvaddr & PDPTMASK) == 0) && (num_pde - i) >= NPDEPG) { npde = pmap64_pdpt(subord, nvaddr); while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) { - PMAP_UNLOCK(subord); + PMAP_UNLOCK_EXCLUSIVE(subord); pmap_expand_pdpt(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE); - PMAP_LOCK(subord); + PMAP_LOCK_EXCLUSIVE(subord); npde = pmap64_pdpt(subord, nvaddr); } *npde |= INTEL_PDPTE_NESTED; nvaddr += NBPDPT; i += (uint32_t)NPDEPG; - } - else { + } else { npde = pmap_pde(subord, nvaddr); while (0 == npde || ((*npde & INTEL_PTE_VALID) == 0)) { - PMAP_UNLOCK(subord); + PMAP_UNLOCK_EXCLUSIVE(subord); pmap_expand(subord, nvaddr, PMAP_EXPAND_OPTIONS_NONE); - PMAP_LOCK(subord); + PMAP_LOCK_EXCLUSIVE(subord); npde = pmap_pde(subord, nvaddr); } nvaddr += NBPDE; @@ -151,55 +153,58 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t } } - PMAP_UNLOCK(subord); + PMAP_UNLOCK_EXCLUSIVE(subord); vaddr = (vm_map_offset_t)va_start; - PMAP_LOCK(grand); + PMAP_LOCK_EXCLUSIVE(grand); - for (i = 0;i < num_pde;) { + for (i = 0; i < num_pde;) { pd_entry_t tpde; - if (((vaddr & PDPTMASK) == 0) && ((num_pde - i) >= NPDEPG) && cpu_64bit) { + if (((vaddr & PDPTMASK) == 0) && ((num_pde - i) >= NPDEPG)) { npde = pmap64_pdpt(subord, vaddr); - if (npde == 0) + if (npde == 0) { panic("pmap_nest: no PDPT, subord %p nstart 0x%llx", subord, vaddr); + } tpde = *npde; pde = pmap64_pdpt(grand, vaddr); if (0 == pde) { - PMAP_UNLOCK(grand); + PMAP_UNLOCK_EXCLUSIVE(grand); pmap_expand_pml4(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE); - PMAP_LOCK(grand); + PMAP_LOCK_EXCLUSIVE(grand); pde = pmap64_pdpt(grand, vaddr); } - if (pde == 0) + if (pde == 0) { panic("pmap_nest: no PDPT, grand %p vaddr 0x%llx", grand, vaddr); + } pmap_store_pte(pde, tpde); vaddr += NBPDPT; i += (uint32_t) NPDEPG; - } - else { + } else { npde = pmap_pde(subord, vaddr); - if (npde == 0) + if (npde == 0) { panic("pmap_nest: no npde, subord %p vaddr 0x%llx", subord, vaddr); + } tpde = *npde; pde = pmap_pde(grand, vaddr); - if ((0 == pde) && cpu_64bit) { - PMAP_UNLOCK(grand); + if (0 == pde) { + PMAP_UNLOCK_EXCLUSIVE(grand); pmap_expand_pdpt(grand, vaddr, PMAP_EXPAND_OPTIONS_NONE); - PMAP_LOCK(grand); + PMAP_LOCK_EXCLUSIVE(grand); pde = pmap_pde(grand, vaddr); } - if (pde == 0) + if (pde == 0) { panic("pmap_nest: no pde, grand %p vaddr 0x%llx", grand, vaddr); + } vaddr += NBPDE; pmap_store_pte(pde, tpde); i++; } } - PMAP_UNLOCK(grand); + PMAP_UNLOCK_EXCLUSIVE(grand); PMAP_TRACE(PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, KERN_SUCCESS); @@ -215,7 +220,9 @@ kern_return_t pmap_nest(pmap_t grand, pmap_t subord, addr64_t va_start, addr64_t * Removes a pmap from another. This is used to implement shared segments. */ -kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) { +kern_return_t +pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) +{ pd_entry_t *pde; unsigned int i; uint64_t num_pde; @@ -223,10 +230,10 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) { uint64_t npdpt = PMAP_INVALID_PDPTNUM; PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr)); + VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr)); - if ((size & (pmap_nesting_size_min-1)) || - (vaddr & (pmap_nesting_size_min-1))) { + if ((size & (pmap_nesting_size_min - 1)) || + (vaddr & (pmap_nesting_size_min - 1))) { panic("pmap_unnest(%p,0x%llx,0x%llx): unaligned...\n", grand, vaddr, size); } @@ -234,17 +241,17 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) { assert(!is_ept_pmap(grand)); /* align everything to PDE boundaries */ - va_start = vaddr & ~(NBPDE-1); - va_end = (vaddr + size + NBPDE - 1) & ~(NBPDE-1); + va_start = vaddr & ~(NBPDE - 1); + va_end = (vaddr + size + NBPDE - 1) & ~(NBPDE - 1); size = va_end - va_start; - PMAP_LOCK(grand); + PMAP_LOCK_EXCLUSIVE(grand); num_pde = size >> PDESHIFT; vaddr = va_start; - for (i = 0; i < num_pde; ) { - if ((pdptnum(grand, vaddr) != npdpt) && cpu_64bit) { + for (i = 0; i < num_pde;) { + if (pdptnum(grand, vaddr) != npdpt) { npdpt = pdptnum(grand, vaddr); pde = pmap64_pdpt(grand, vaddr); if (pde && (*pde & INTEL_PDPTE_NESTED)) { @@ -255,8 +262,9 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) { } } pde = pmap_pde(grand, (vm_map_offset_t)vaddr); - if (pde == 0) + if (pde == 0) { panic("pmap_unnest: no pde, grand %p vaddr 0x%llx\n", grand, vaddr); + } pmap_store_pte(pde, (pd_entry_t)0); i++; vaddr += NBPDE; @@ -264,7 +272,7 @@ kern_return_t pmap_unnest(pmap_t grand, addr64_t vaddr, uint64_t size) { PMAP_UPDATE_TLBS(grand, va_start, va_end); - PMAP_UNLOCK(grand); + PMAP_UNLOCK_EXCLUSIVE(grand); PMAP_TRACE(PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_END, KERN_SUCCESS); @@ -276,34 +284,34 @@ pmap_unnest_options( pmap_t grand, addr64_t vaddr, __unused uint64_t size, - __unused unsigned int options) { + __unused unsigned int options) +{ return pmap_unnest(grand, vaddr, size); } /* Invoked by the Mach VM to determine the platform specific unnest region */ -boolean_t pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) { +boolean_t +pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_offset_t *e) +{ pd_entry_t *pdpte; boolean_t rval = FALSE; - if (!cpu_64bit) - return rval; - - PMAP_LOCK(p); + PMAP_LOCK_EXCLUSIVE(p); pdpte = pmap64_pdpt(p, *s); if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) { - *s &= ~(NBPDPT -1); + *s &= ~(NBPDPT - 1); rval = TRUE; } pdpte = pmap64_pdpt(p, *e); if (pdpte && (*pdpte & INTEL_PDPTE_NESTED)) { - *e = ((*e + NBPDPT) & ~(NBPDPT -1)); + *e = ((*e + NBPDPT) & ~(NBPDPT - 1)); rval = TRUE; } - PMAP_UNLOCK(p); + PMAP_UNLOCK_EXCLUSIVE(p); return rval; } @@ -318,24 +326,25 @@ boolean_t pmap_adjust_unnest_parameters(pmap_t p, vm_map_offset_t *s, vm_map_off ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va) { - pt_entry_t *ptp; - pd_entry_t *pdep; - ppnum_t ppn = 0; - pd_entry_t pde; - pt_entry_t pte; - boolean_t is_ept; + pt_entry_t *ptp; + pd_entry_t *pdep; + ppnum_t ppn = 0; + pd_entry_t pde; + pt_entry_t pte; + boolean_t is_ept, locked = FALSE; is_ept = is_ept_pmap(pmap); - mp_disable_preemption(); + if ((pmap != kernel_pmap) && not_in_kdp) { + PMAP_LOCK_EXCLUSIVE(pmap); + locked = TRUE; + } else { + mp_disable_preemption(); + } - /* This refcount test is a band-aid--several infrastructural changes - * are necessary to eliminate invocation of this routine from arbitrary - * contexts. - */ - - if (!pmap->ref_count) + if (!pmap->ref_count) { goto pfp_exit; + } pdep = pmap_pde(pmap, va); @@ -343,18 +352,21 @@ pmap_find_phys(pmap_t pmap, addr64_t va) if (pde & PTE_PS) { ppn = (ppnum_t) i386_btop(pte_to_pa(pde)); ppn += (ppnum_t) ptenum(va); - } - else { + } else { ptp = pmap_pte(pmap, va); if ((PT_ENTRY_NULL != ptp) && (((pte = *ptp) & PTE_VALID_MASK(is_ept)) != 0)) { ppn = (ppnum_t) i386_btop(pte_to_pa(pte)); } } } -pfp_exit: - mp_enable_preemption(); +pfp_exit: + if (locked) { + PMAP_UNLOCK_EXCLUSIVE(pmap); + } else { + mp_enable_preemption(); + } - return ppn; + return ppn; } /* @@ -368,23 +380,25 @@ pfp_exit: * PHYS_CACHEABILITY_MASK. */ void -pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) { - pv_rooted_entry_t pv_h, pv_e; +pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) +{ + pv_rooted_entry_t pv_h, pv_e; pv_hashed_entry_t pvh_e, nexth; vm_map_offset_t vaddr; - pmap_t pmap; - pt_entry_t *ptep; - boolean_t is_ept; - unsigned ept_attributes; - + pmap_t pmap; + pt_entry_t *ptep; + boolean_t is_ept; + unsigned ept_attributes; + assert(IS_MANAGED_PAGE(pn)); assert(((~PHYS_CACHEABILITY_MASK) & attributes) == 0); - /* We don't support the PTA bit for EPT PTEs */ - if (attributes & INTEL_PTE_NCACHE) + /* We don't support the PAT bit for EPT PTEs */ + if (attributes & INTEL_PTE_NCACHE) { ept_attributes = INTEL_EPT_NCACHE; - else + } else { ept_attributes = INTEL_EPT_WB; + } pv_h = pai_to_pvh(pn); /* TODO: translate the PHYS_* bits to PTE bits, while they're @@ -393,7 +407,7 @@ pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) { * parallel shootdowns, check for redundant * attribute modifications. */ - + /* * Alter attributes on all mappings */ @@ -405,9 +419,10 @@ pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) { pmap = pv_e->pmap; vaddr = PVE_VA(pv_e); ptep = pmap_pte(pmap, vaddr); - - if (0 == ptep) + + if (0 == ptep) { panic("pmap_update_cache_attributes_locked: Missing PTE, pmap: %p, pn: 0x%x vaddr: 0x%llx kernel_pmap: %p", pmap, pn, vaddr, kernel_pmap); + } is_ept = is_ept_pmap(pmap); @@ -423,7 +438,9 @@ pmap_update_cache_attributes_locked(ppnum_t pn, unsigned attributes) { } } -void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) { +void +x86_filter_TLB_coherency_interrupts(boolean_t dofilter) +{ assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); if (dofilter) { @@ -431,8 +448,7 @@ void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) { } else { CPU_CR3_MARK_ACTIVE(); mfence(); - if (current_cpu_datap()->cpu_tlb_invalid) - process_pmap_updates(); + pmap_update_interrupt(); } } @@ -452,57 +468,85 @@ void x86_filter_TLB_coherency_interrupts(boolean_t dofilter) { kern_return_t pmap_enter( - pmap_t pmap, - vm_map_offset_t vaddr, + pmap_t pmap, + vm_map_offset_t vaddr, ppnum_t pn, - vm_prot_t prot, - vm_prot_t fault_type, - unsigned int flags, - boolean_t wired) + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired) { return pmap_enter_options(pmap, vaddr, pn, prot, fault_type, flags, wired, PMAP_EXPAND_OPTIONS_NONE, NULL); } +#define PTE_LOCK(EPT) INTEL_PTE_SWLOCK + +static inline void PTE_LOCK_LOCK(pt_entry_t *); +static inline void PTE_LOCK_UNLOCK(pt_entry_t *); + +void +PTE_LOCK_LOCK(pt_entry_t *lpte) +{ + pt_entry_t pte; +plretry: + while ((pte = __c11_atomic_load((_Atomic pt_entry_t *)lpte, memory_order_relaxed)) & PTE_LOCK(0)) { + __builtin_ia32_pause(); + } + if (__c11_atomic_compare_exchange_strong((_Atomic pt_entry_t *)lpte, &pte, pte | PTE_LOCK(0), memory_order_acquire_smp, TRUE)) { + return; + } + + goto plretry; +} + +void +PTE_LOCK_UNLOCK(pt_entry_t *lpte) +{ + __c11_atomic_fetch_and((_Atomic pt_entry_t *)lpte, ~PTE_LOCK(0), memory_order_release_smp); +} kern_return_t pmap_enter_options( - pmap_t pmap, - vm_map_offset_t vaddr, + pmap_t pmap, + vm_map_offset_t vaddr, ppnum_t pn, - vm_prot_t prot, - __unused vm_prot_t fault_type, - unsigned int flags, - boolean_t wired, - unsigned int options, - void *arg) + vm_prot_t prot, + __unused vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + void *arg) { - pt_entry_t *pte; - pv_rooted_entry_t pv_h; - ppnum_t pai; - pv_hashed_entry_t pvh_e; - pv_hashed_entry_t pvh_new; - pt_entry_t template; - pmap_paddr_t old_pa; - pmap_paddr_t pa = (pmap_paddr_t) i386_ptob(pn); - boolean_t need_tlbflush = FALSE; - boolean_t set_NX; - char oattr; - boolean_t old_pa_locked; + pt_entry_t *pte = NULL; + pv_rooted_entry_t pv_h; + ppnum_t pai; + pv_hashed_entry_t pvh_e; + pv_hashed_entry_t pvh_new; + pt_entry_t template; + pmap_paddr_t old_pa; + pmap_paddr_t pa = (pmap_paddr_t) i386_ptob(pn); + boolean_t need_tlbflush = FALSE; + boolean_t set_NX; + char oattr; + boolean_t old_pa_locked; /* 2MiB mappings are confined to x86_64 by VM */ - boolean_t superpage = flags & VM_MEM_SUPERPAGE; - vm_object_t delpage_pm_obj = NULL; - uint64_t delpage_pde_index = 0; - pt_entry_t old_pte; - kern_return_t kr; - boolean_t is_ept; - boolean_t is_altacct; - - kr = KERN_FAILURE; + boolean_t superpage = flags & VM_MEM_SUPERPAGE; + vm_object_t delpage_pm_obj = NULL; + uint64_t delpage_pde_index = 0; + pt_entry_t old_pte; + kern_return_t kr = KERN_FAILURE; + boolean_t is_ept; + boolean_t is_altacct; + boolean_t ptelocked = FALSE; pmap_intr_assert(); - if (pmap == PMAP_NULL) + if (__improbable(pmap == PMAP_NULL)) { + return KERN_INVALID_ARGUMENT; + } + if (__improbable(pn == vm_page_guard_addr)) { return KERN_INVALID_ARGUMENT; + } is_ept = is_ept_pmap(pmap); @@ -511,49 +555,49 @@ pmap_enter_options( */ assert(pn != vm_page_fictitious_addr); - if (pn == vm_page_guard_addr) - return KERN_INVALID_ARGUMENT; PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(vaddr), pn, - prot); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(vaddr), pn, + prot); - if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled) + if ((prot & VM_PROT_EXECUTE)) { set_NX = FALSE; - else + } else { set_NX = TRUE; + } - if (__improbable(set_NX && (pmap == kernel_pmap) && ((pmap_disable_kstack_nx && (flags & VM_MEM_STACK)) || (pmap_disable_kheap_nx && !(flags & VM_MEM_STACK))))) { +#if DEVELOPMENT || DEBUG + if (__improbable(set_NX && (!nx_enabled || !pmap->nx_enabled))) { set_NX = FALSE; } - /* - * Must allocate a new pvlist entry while we're unlocked; - * zalloc may cause pageout (which will lock the pmap system). - * If we determine we need a pvlist entry, we will unlock - * and allocate one. Then we will retry, throughing away - * the allocated entry later (if we no longer need it). - */ + if (__improbable(set_NX && (pmap == kernel_pmap) && + ((pmap_disable_kstack_nx && (flags & VM_MEM_STACK)) || + (pmap_disable_kheap_nx && !(flags & VM_MEM_STACK))))) { + set_NX = FALSE; + } +#endif pvh_new = PV_HASHED_ENTRY_NULL; Retry: pvh_e = PV_HASHED_ENTRY_NULL; - PMAP_LOCK(pmap); + PMAP_LOCK_SHARED(pmap); /* * Expand pmap to include this pte. Assume that * pmap is always expanded to include enough hardware * pages to map one VM page. */ - if (superpage) { - while ((pte = pmap64_pde(pmap, vaddr)) == PD_ENTRY_NULL) { + if (__improbable(superpage)) { + while ((pte = pmap_pde(pmap, vaddr)) == PD_ENTRY_NULL) { /* need room for another pde entry */ - PMAP_UNLOCK(pmap); + PMAP_UNLOCK_SHARED(pmap); kr = pmap_expand_pdpt(pmap, vaddr, options); - if (kr != KERN_SUCCESS) - goto done; - PMAP_LOCK(pmap); + if (kr != KERN_SUCCESS) { + goto done1; + } + PMAP_LOCK_SHARED(pmap); } } else { while ((pte = pmap_pte(pmap, vaddr)) == PT_ENTRY_NULL) { @@ -561,20 +605,22 @@ Retry: * Must unlock to expand the pmap * going to grow pde level page(s) */ - PMAP_UNLOCK(pmap); + PMAP_UNLOCK_SHARED(pmap); kr = pmap_expand(pmap, vaddr, options); - if (kr != KERN_SUCCESS) - goto done; - PMAP_LOCK(pmap); + if (kr != KERN_SUCCESS) { + goto done1; + } + PMAP_LOCK_SHARED(pmap); } } - if (options & PMAP_EXPAND_OPTIONS_NOENTER) { - PMAP_UNLOCK(pmap); + + if (__improbable(options & PMAP_EXPAND_OPTIONS_NOENTER)) { + PMAP_UNLOCK_SHARED(pmap); kr = KERN_SUCCESS; - goto done; + goto done1; } - if (superpage && *pte && !(*pte & PTE_PS)) { + if (__improbable(superpage && *pte && !(*pte & PTE_PS))) { /* * There is still an empty page table mapped that * was used for a previous base page mapping. @@ -583,15 +629,18 @@ Retry: */ delpage_pde_index = pdeidx(pmap, vaddr); delpage_pm_obj = pmap->pm_obj; - *pte = 0; + pmap_store_pte(pte, 0); } + PTE_LOCK_LOCK(pte); + ptelocked = TRUE; + old_pa = pte_to_pa(*pte); pai = pa_index(old_pa); old_pa_locked = FALSE; if (old_pa == 0 && - PTE_IS_COMPRESSED(*pte)) { + PTE_IS_COMPRESSED(*pte, pte)) { /* * "pmap" should be locked at this point, so this should * not race with another pmap_enter() or pmap_remove_range(). @@ -601,7 +650,7 @@ Retry: /* one less "compressed" */ OSAddAtomic64(-1, &pmap->stats.compressed); pmap_ledger_debit(pmap, task_ledgers.internal_compressed, - PAGE_SIZE); + PAGE_SIZE); if (*pte & PTE_COMPRESSED_ALT) { pmap_ledger_debit( pmap, @@ -610,7 +659,7 @@ Retry: } else { /* was part of the footprint */ pmap_ledger_debit(pmap, task_ledgers.phys_footprint, - PAGE_SIZE); + PAGE_SIZE); } /* marker will be cleared below */ } @@ -625,7 +674,7 @@ Retry: old_pa_locked = TRUE; old_pa = pte_to_pa(*pte); if (0 == old_pa) { - UNLOCK_PVH(pai); /* another path beat us to it */ + UNLOCK_PVH(pai); /* another path beat us to it */ old_pa_locked = FALSE; } } @@ -636,19 +685,18 @@ Retry: */ if (old_pa == pa) { pt_entry_t old_attributes = - *pte & ~(PTE_REF(is_ept) | PTE_MOD(is_ept)); + *pte & ~(PTE_REF(is_ept) | PTE_MOD(is_ept) | PTE_LOCK(is_ept)); /* - * May be changing its wired attribute or protection - */ + * May be changing its wired attribute or protection + */ template = pa_to_pte(pa); - /* ?: WORTH ASSERTING THAT AT LEAST ONE RWX (implicit valid) PASSED FOR EPT? */ - if (!is_ept) { + if (__probable(!is_ept)) { template |= INTEL_PTE_VALID; } else { - template |= INTEL_EPT_IPTA; + template |= INTEL_EPT_IPAT; } template |= pmap_get_cache_attributes(pa_index(pa), is_ept); @@ -658,15 +706,18 @@ Retry: */ if (!is_ept && (VM_MEM_NOT_CACHEABLE == (flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)))) { - if (!(flags & VM_MEM_GUARDED)) - template |= INTEL_PTE_PTA; + if (!(flags & VM_MEM_GUARDED)) { + template |= INTEL_PTE_PAT; + } template |= INTEL_PTE_NCACHE; } - if (pmap != kernel_pmap && !is_ept) + if (pmap != kernel_pmap && !is_ept) { template |= INTEL_PTE_USER; + } - if (prot & VM_PROT_READ) + if (prot & VM_PROT_READ) { template |= PTE_READ(is_ept); + } if (prot & VM_PROT_WRITE) { template |= PTE_WRITE(is_ept); @@ -683,12 +734,13 @@ Retry: template = pte_set_ex(template, is_ept); } - if (set_NX) + if (set_NX) { template = pte_remove_ex(template, is_ept); + } if (wired) { template |= PTE_WIRED; - if (!iswired(old_attributes)) { + if (!iswired(old_attributes)) { OSAddAtomic(+1, &pmap->stats.wired_count); pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE); } @@ -700,23 +752,26 @@ Retry: } } - if (superpage) /* this path can not be used */ - template |= PTE_PS; /* to change the page size! */ - - if (old_attributes == template) + if (superpage) { /* this path can not be used */ + template |= PTE_PS; /* to change the page size! */ + } + if (old_attributes == template) { goto dont_update_pte; + } /* Determine delta, PV locked */ need_tlbflush = ((old_attributes ^ template) != PTE_WIRED); + /* Optimisation: avoid TLB flush when adding writability */ if (need_tlbflush == TRUE && !(old_attributes & PTE_WRITE(is_ept))) { - if ((old_attributes ^ template) == PTE_WRITE(is_ept)) + if ((old_attributes ^ template) == PTE_WRITE(is_ept)) { need_tlbflush = FALSE; + } } /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */ - if (is_ept && !pmap_ept_support_ad) { + if (__improbable(is_ept && !pmap_ept_support_ad)) { template |= PTE_REF(is_ept); if (old_pa_locked) { assert(IS_MANAGED_PAGE(pai)); @@ -725,17 +780,22 @@ Retry: } /* store modified PTE and preserve RC bits */ - pt_entry_t npte, opte;; + pt_entry_t npte, opte; + + assert((*pte & PTE_LOCK(is_ept)) != 0); + do { opte = *pte; - npte = template | (opte & (PTE_REF(is_ept) | PTE_MOD(is_ept))); + npte = template | (opte & (PTE_REF(is_ept) | + PTE_MOD(is_ept))) | PTE_LOCK(is_ept); } while (!pmap_cmpx_pte(pte, opte, npte)); + dont_update_pte: if (old_pa_locked) { UNLOCK_PVH(pai); old_pa_locked = FALSE; } - goto Done; + goto done2; } /* @@ -750,25 +810,25 @@ dont_update_pte: * overwritten at step 3). If the new physical page is not * managed, step 2) is skipped. */ - + /* TODO: add opportunistic refmod collect */ if (old_pa != (pmap_paddr_t) 0) { - boolean_t was_altacct = FALSE; + boolean_t was_altacct = FALSE; /* - * Don't do anything to pages outside valid memory here. - * Instead convince the code that enters a new mapping - * to overwrite the old one. - */ + * Don't do anything to pages outside valid memory here. + * Instead convince the code that enters a new mapping + * to overwrite the old one. + */ /* invalidate the PTE */ pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0); /* propagate invalidate everywhere */ PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE); /* remember reference and change */ - old_pte = *pte; + old_pte = *pte; oattr = (char) (old_pte & (PTE_MOD(is_ept) | PTE_REF(is_ept))); /* completely invalidate the PTE */ - pmap_store_pte(pte, 0); + pmap_store_pte(pte, PTE_LOCK(is_ept)); if (IS_MANAGED_PAGE(pai)) { /* @@ -790,20 +850,20 @@ dont_update_pte: if (IS_REUSABLE_PAGE(pai)) { PMAP_STATS_ASSERTF( (pmap->stats.reusable > 0, - "reusable %d", - pmap->stats.reusable)); + "reusable %d", + pmap->stats.reusable)); OSAddAtomic(-1, &pmap->stats.reusable); } else if (IS_INTERNAL_PAGE(pai)) { PMAP_STATS_ASSERTF( (pmap->stats.internal > 0, - "internal %d", - pmap->stats.internal)); + "internal %d", + pmap->stats.internal)); OSAddAtomic(-1, &pmap->stats.internal); } else { PMAP_STATS_ASSERTF( (pmap->stats.external > 0, - "external %d", - pmap->stats.external)); + "external %d", + pmap->stats.external)); OSAddAtomic(-1, &pmap->stats.external); } @@ -837,9 +897,7 @@ dont_update_pte: } else { pmap_phys_attributes[pai] |= ept_refmod_to_physmap(oattr); } - } else { - /* * old_pa is not managed. * Do removal part of accounting. @@ -867,13 +925,12 @@ dont_update_pte: old_pa_locked = FALSE; } - pai = pa_index(pa); /* now working with new incoming phys page */ + pai = pa_index(pa); /* now working with new incoming phys page */ if (IS_MANAGED_PAGE(pai)) { - /* - * Step 2) Enter the mapping in the PV list for this - * physical page. - */ + * Step 2) Enter the mapping in the PV list for this + * physical page. + */ pv_h = pai_to_pvh(pai); LOCK_PVH(pai); @@ -927,7 +984,8 @@ dont_update_pte: PV_HASHED_KERN_ALLOC(&pvh_e); } else { UNLOCK_PVH(pai); - PMAP_UNLOCK(pmap); + PTE_LOCK_UNLOCK(pte); + PMAP_UNLOCK_SHARED(pmap); pmap_pv_throttle(pmap); pvh_new = (pv_hashed_entry_t) zalloc(pv_hashed_list_zone); goto Retry; @@ -935,8 +993,9 @@ dont_update_pte: } } - if (PV_HASHED_ENTRY_NULL == pvh_e) + if (PV_HASHED_ENTRY_NULL == pvh_e) { panic("Mapping alias chain exhaustion, possibly induced by numerous kernel virtual double mappings"); + } pvh_e->va_and_flags = vaddr; pvh_e->pmap = pmap; @@ -958,11 +1017,11 @@ dont_update_pte: } /* - * only count the mapping - * for 'managed memory' - */ + * only count the mapping + * for 'managed memory' + */ pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE); - OSAddAtomic(+1, &pmap->stats.resident_count); + OSAddAtomic(+1, &pmap->stats.resident_count); if (pmap->stats.resident_count > pmap->stats.resident_max) { pmap->stats.resident_max = pmap->stats.resident_count; } @@ -1005,7 +1064,7 @@ dont_update_pte: * are determined. Consider consulting the available DRAM map. */ pmap_ledger_credit(pmap, task_ledgers.phys_mem, PAGE_SIZE); - OSAddAtomic(+1, &pmap->stats.resident_count); + OSAddAtomic(+1, &pmap->stats.resident_count); if (pmap != kernel_pmap) { #if 00 OSAddAtomic(+1, &pmap->stats.device); @@ -1024,7 +1083,7 @@ dont_update_pte: if (!is_ept) { template |= INTEL_PTE_VALID; } else { - template |= INTEL_EPT_IPTA; + template |= INTEL_EPT_IPAT; } @@ -1039,20 +1098,24 @@ dont_update_pte: * We don't support passing VM_MEM_NOT_CACHEABLE flags for EPT PTEs */ if (!is_ept && (flags & VM_MEM_NOT_CACHEABLE)) { - if (!(flags & VM_MEM_GUARDED)) - template |= INTEL_PTE_PTA; + if (!(flags & VM_MEM_GUARDED)) { + template |= INTEL_PTE_PAT; + } template |= INTEL_PTE_NCACHE; } - if (pmap != kernel_pmap && !is_ept) + if (pmap != kernel_pmap && !is_ept) { template |= INTEL_PTE_USER; - if (prot & VM_PROT_READ) + } + if (prot & VM_PROT_READ) { template |= PTE_READ(is_ept); + } if (prot & VM_PROT_WRITE) { template |= PTE_WRITE(is_ept); if (is_ept && !pmap_ept_support_ad) { template |= PTE_MOD(is_ept); - if (IS_MANAGED_PAGE(pai)) + if (IS_MANAGED_PAGE(pai)) { pmap_phys_attributes[pai] |= PHYS_MODIFIED; + } } } if (prot & VM_PROT_EXECUTE) { @@ -1060,23 +1123,26 @@ dont_update_pte: template = pte_set_ex(template, is_ept); } - if (set_NX) + if (set_NX) { template = pte_remove_ex(template, is_ept); + } if (wired) { template |= INTEL_PTE_WIRED; - OSAddAtomic(+1, & pmap->stats.wired_count); + OSAddAtomic(+1, &pmap->stats.wired_count); pmap_ledger_credit(pmap, task_ledgers.wired_mem, PAGE_SIZE); } - if (superpage) + if (__improbable(superpage)) { template |= INTEL_PTE_PS; + } /* For hardware that doesn't have EPT AD support, we always set REFMOD for EPT PTEs */ - if (is_ept && !pmap_ept_support_ad) { + if (__improbable(is_ept && !pmap_ept_support_ad)) { template |= PTE_REF(is_ept); - if (IS_MANAGED_PAGE(pai)) + if (IS_MANAGED_PAGE(pai)) { pmap_phys_attributes[pai] |= PHYS_REFERENCED; + } } - + template |= PTE_LOCK(is_ept); pmap_store_pte(pte, template); /* @@ -1087,36 +1153,42 @@ dont_update_pte: if (IS_MANAGED_PAGE(pai)) { UNLOCK_PVH(pai); } -Done: +done2: if (need_tlbflush == TRUE) { - if (options & PMAP_OPTIONS_NOFLUSH) + if (options & PMAP_OPTIONS_NOFLUSH) { PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg); - else + } else { PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE); + } + } + if (ptelocked) { + PTE_LOCK_UNLOCK(pte); } + PMAP_UNLOCK_SHARED(pmap); + if (pvh_e != PV_HASHED_ENTRY_NULL) { PV_HASHED_FREE_LIST(pvh_e, pvh_e, 1); } if (pvh_new != PV_HASHED_ENTRY_NULL) { PV_HASHED_KERN_FREE_LIST(pvh_new, pvh_new, 1); } - PMAP_UNLOCK(pmap); if (delpage_pm_obj) { vm_page_t m; vm_object_lock(delpage_pm_obj); m = vm_page_lookup(delpage_pm_obj, (delpage_pde_index * PAGE_SIZE)); - if (m == VM_PAGE_NULL) - panic("pmap_enter: pte page not in object"); + if (m == VM_PAGE_NULL) { + panic("pmap_enter: pte page not in object"); + } VM_PAGE_FREE(m); vm_object_unlock(delpage_pm_obj); - OSAddAtomic(-1, &inuse_ptepages_count); + OSAddAtomic(-1, &inuse_ptepages_count); PMAP_ZINFO_PFREE(pmap, PAGE_SIZE); } kr = KERN_SUCCESS; -done: +done1: PMAP_TRACE(PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, kr); return kr; } @@ -1135,38 +1207,38 @@ done: void pmap_remove_range( - pmap_t pmap, - vm_map_offset_t start_vaddr, - pt_entry_t *spte, - pt_entry_t *epte) + pmap_t pmap, + vm_map_offset_t start_vaddr, + pt_entry_t *spte, + pt_entry_t *epte) { pmap_remove_range_options(pmap, start_vaddr, spte, epte, - PMAP_OPTIONS_REMOVE); + PMAP_OPTIONS_REMOVE); } void pmap_remove_range_options( - pmap_t pmap, - vm_map_offset_t start_vaddr, - pt_entry_t *spte, - pt_entry_t *epte, - int options) + pmap_t pmap, + vm_map_offset_t start_vaddr, + pt_entry_t *spte, + pt_entry_t *epte, + int options) { - pt_entry_t *cpte; + pt_entry_t *cpte; pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL; pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL; pv_hashed_entry_t pvh_e; - int pvh_cnt = 0; - int num_removed, num_unwired, num_found, num_invalid; - int stats_external, stats_internal, stats_reusable; - uint64_t stats_compressed; - int ledgers_internal, ledgers_alt_internal; - uint64_t ledgers_compressed, ledgers_alt_compressed; - ppnum_t pai; - pmap_paddr_t pa; - vm_map_offset_t vaddr; - boolean_t is_ept = is_ept_pmap(pmap); - boolean_t was_altacct; + int pvh_cnt = 0; + int num_removed, num_unwired, num_found, num_invalid; + int stats_external, stats_internal, stats_reusable; + uint64_t stats_compressed; + int ledgers_internal, ledgers_alt_internal; + uint64_t ledgers_compressed, ledgers_alt_compressed; + ppnum_t pai; + pmap_paddr_t pa; + vm_map_offset_t vaddr; + boolean_t is_ept = is_ept_pmap(pmap); + boolean_t was_altacct; num_removed = 0; num_unwired = 0; @@ -1182,14 +1254,14 @@ pmap_remove_range_options( ledgers_alt_compressed = 0; /* invalidate the PTEs first to "freeze" them */ for (cpte = spte, vaddr = start_vaddr; - cpte < epte; - cpte++, vaddr += PAGE_SIZE_64) { + cpte < epte; + cpte++, vaddr += PAGE_SIZE_64) { pt_entry_t p = *cpte; pa = pte_to_pa(p); if (pa == 0) { if ((options & PMAP_OPTIONS_REMOVE) && - (PTE_IS_COMPRESSED(p))) { + (PTE_IS_COMPRESSED(p, cpte))) { assert(pmap != kernel_pmap); /* one less "compressed"... */ stats_compressed++; @@ -1206,9 +1278,10 @@ pmap_remove_range_options( } num_found++; - if (iswired(p)) + if (iswired(p)) { num_unwired++; - + } + pai = pa_index(pa); if (!IS_MANAGED_PAGE(pai)) { @@ -1220,8 +1293,9 @@ pmap_remove_range_options( continue; } - if ((p & PTE_VALID_MASK(is_ept)) == 0) + if ((p & PTE_VALID_MASK(is_ept)) == 0) { num_invalid++; + } /* invalidate the PTE */ pmap_update_pte(cpte, PTE_VALID_MASK(is_ept), 0); @@ -1229,7 +1303,7 @@ pmap_remove_range_options( if (num_found == 0) { /* nothing was changed: we're done */ - goto update_counts; + goto update_counts; } /* propagate the invalidates to other CPUs */ @@ -1237,19 +1311,18 @@ pmap_remove_range_options( PMAP_UPDATE_TLBS(pmap, start_vaddr, vaddr); for (cpte = spte, vaddr = start_vaddr; - cpte < epte; - cpte++, vaddr += PAGE_SIZE_64) { - + cpte < epte; + cpte++, vaddr += PAGE_SIZE_64) { pa = pte_to_pa(*cpte); if (pa == 0) { - check_pte_for_compressed_marker: +check_pte_for_compressed_marker: /* * This PTE could have been replaced with a * "compressed" marker after our first "freeze" * loop above, so check again. */ if ((options & PMAP_OPTIONS_REMOVE) && - (PTE_IS_COMPRESSED(*cpte))) { + (PTE_IS_COMPRESSED(*cpte, cpte))) { assert(pmap != kernel_pmap); /* one less "compressed"... */ stats_compressed++; @@ -1307,16 +1380,16 @@ pmap_remove_range_options( } /* - * Get the modify and reference bits, then - * nuke the entry in the page table - */ + * Get the modify and reference bits, then + * nuke the entry in the page table + */ /* remember reference and change */ if (!is_ept) { pmap_phys_attributes[pai] |= - *cpte & (PHYS_MODIFIED | PHYS_REFERENCED); + *cpte & (PHYS_MODIFIED | PHYS_REFERENCED); } else { pmap_phys_attributes[pai] |= - ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); + ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); } /* completely invalidate the PTE */ @@ -1343,28 +1416,29 @@ update_counts: * Update the counts */ #if TESTING - if (pmap->stats.resident_count < num_removed) - panic("pmap_remove_range: resident_count"); + if (pmap->stats.resident_count < num_removed) { + panic("pmap_remove_range: resident_count"); + } #endif pmap_ledger_debit(pmap, task_ledgers.phys_mem, machine_ptob(num_removed)); PMAP_STATS_ASSERTF((pmap->stats.resident_count >= num_removed, - "pmap=%p num_removed=%d stats.resident_count=%d", - pmap, num_removed, pmap->stats.resident_count)); - OSAddAtomic(-num_removed, &pmap->stats.resident_count); + "pmap=%p num_removed=%d stats.resident_count=%d", + pmap, num_removed, pmap->stats.resident_count)); + OSAddAtomic(-num_removed, &pmap->stats.resident_count); if (pmap != kernel_pmap) { PMAP_STATS_ASSERTF((pmap->stats.external >= stats_external, - "pmap=%p stats_external=%d stats.external=%d", - pmap, stats_external, pmap->stats.external)); + "pmap=%p stats_external=%d stats.external=%d", + pmap, stats_external, pmap->stats.external)); PMAP_STATS_ASSERTF((pmap->stats.internal >= stats_internal, - "pmap=%p stats_internal=%d stats.internal=%d", - pmap, stats_internal, pmap->stats.internal)); + "pmap=%p stats_internal=%d stats.internal=%d", + pmap, stats_internal, pmap->stats.internal)); PMAP_STATS_ASSERTF((pmap->stats.reusable >= stats_reusable, - "pmap=%p stats_reusable=%d stats.reusable=%d", - pmap, stats_reusable, pmap->stats.reusable)); + "pmap=%p stats_reusable=%d stats.reusable=%d", + pmap, stats_reusable, pmap->stats.reusable)); PMAP_STATS_ASSERTF((pmap->stats.compressed >= stats_compressed, - "pmap=%p stats_compressed=%lld, stats.compressed=%lld", - pmap, stats_compressed, pmap->stats.compressed)); + "pmap=%p stats_compressed=%lld, stats.compressed=%lld", + pmap, stats_compressed, pmap->stats.compressed)); /* update pmap stats */ if (stats_external) { @@ -1373,47 +1447,50 @@ update_counts: if (stats_internal) { OSAddAtomic(-stats_internal, &pmap->stats.internal); } - if (stats_reusable) + if (stats_reusable) { OSAddAtomic(-stats_reusable, &pmap->stats.reusable); - if (stats_compressed) + } + if (stats_compressed) { OSAddAtomic64(-stats_compressed, &pmap->stats.compressed); + } /* update ledgers */ if (ledgers_internal) { pmap_ledger_debit(pmap, - task_ledgers.internal, - machine_ptob(ledgers_internal)); + task_ledgers.internal, + machine_ptob(ledgers_internal)); } if (ledgers_compressed) { pmap_ledger_debit(pmap, - task_ledgers.internal_compressed, - machine_ptob(ledgers_compressed)); + task_ledgers.internal_compressed, + machine_ptob(ledgers_compressed)); } if (ledgers_alt_internal) { pmap_ledger_debit(pmap, - task_ledgers.alternate_accounting, - machine_ptob(ledgers_alt_internal)); + task_ledgers.alternate_accounting, + machine_ptob(ledgers_alt_internal)); } if (ledgers_alt_compressed) { pmap_ledger_debit(pmap, - task_ledgers.alternate_accounting_compressed, - machine_ptob(ledgers_alt_compressed)); + task_ledgers.alternate_accounting_compressed, + machine_ptob(ledgers_alt_compressed)); } pmap_ledger_debit(pmap, - task_ledgers.phys_footprint, - machine_ptob((ledgers_internal - - ledgers_alt_internal) + - (ledgers_compressed - - ledgers_alt_compressed))); + task_ledgers.phys_footprint, + machine_ptob((ledgers_internal - + ledgers_alt_internal) + + (ledgers_compressed - + ledgers_alt_compressed))); } #if TESTING - if (pmap->stats.wired_count < num_unwired) - panic("pmap_remove_range: wired_count"); + if (pmap->stats.wired_count < num_unwired) { + panic("pmap_remove_range: wired_count"); + } #endif PMAP_STATS_ASSERTF((pmap->stats.wired_count >= num_unwired, - "pmap=%p num_unwired=%d stats.wired_count=%d", - pmap, num_unwired, pmap->stats.wired_count)); - OSAddAtomic(-num_unwired, &pmap->stats.wired_count); + "pmap=%p num_unwired=%d stats.wired_count=%d", + pmap, num_unwired, pmap->stats.wired_count)); + OSAddAtomic(-num_unwired, &pmap->stats.wired_count); pmap_ledger_debit(pmap, task_ledgers.wired_mem, machine_ptob(num_unwired)); return; @@ -1429,77 +1506,62 @@ update_counts: */ void pmap_remove( - pmap_t map, - addr64_t s64, - addr64_t e64) + pmap_t map, + addr64_t s64, + addr64_t e64) { pmap_remove_options(map, s64, e64, PMAP_OPTIONS_REMOVE); } +#define PLCHECK_THRESHOLD (8) void pmap_remove_options( - pmap_t map, - addr64_t s64, - addr64_t e64, - int options) + pmap_t map, + addr64_t s64, + addr64_t e64, + int options) { pt_entry_t *pde; pt_entry_t *spte, *epte; addr64_t l64; - uint64_t deadline; - boolean_t is_ept; + uint64_t deadline = 0; + boolean_t is_ept; pmap_intr_assert(); - if (map == PMAP_NULL || s64 == e64) + if (map == PMAP_NULL || s64 == e64) { return; + } is_ept = is_ept_pmap(map); PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(s64), - VM_KERNEL_ADDRHIDE(e64)); - - PMAP_LOCK(map); + VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(s64), + VM_KERNEL_ADDRHIDE(e64)); -#if 0 - /* - * Check that address range in the kernel does not overlap the stacks. - * We initialize local static min/max variables once to avoid making - * 2 function calls for every remove. Note also that these functions - * both return 0 before kernel stacks have been initialized, and hence - * the panic is not triggered in this case. - */ - if (map == kernel_pmap) { - static vm_offset_t kernel_stack_min = 0; - static vm_offset_t kernel_stack_max = 0; + PMAP_LOCK_EXCLUSIVE(map); + uint32_t traverse_count = 0; - if (kernel_stack_min == 0) { - kernel_stack_min = min_valid_stack_address(); - kernel_stack_max = max_valid_stack_address(); + while (s64 < e64) { + pml4_entry_t *pml4e = pmap64_pml4(map, s64); + if ((pml4e == NULL) || + ((*pml4e & PTE_VALID_MASK(is_ept)) == 0)) { + s64 = (s64 + NBPML4) & ~(PML4MASK); + continue; + } + pdpt_entry_t *pdpte = pmap64_pdpt(map, s64); + if ((pdpte == NULL) || + ((*pdpte & PTE_VALID_MASK(is_ept)) == 0)) { + s64 = (s64 + NBPDPT) & ~(PDPTMASK); + continue; } - if ((kernel_stack_min <= s64 && s64 < kernel_stack_max) || - (kernel_stack_min < e64 && e64 <= kernel_stack_max)) - panic("pmap_remove() attempted in kernel stack"); - } -#else - - /* - * The values of kernel_stack_min and kernel_stack_max are no longer - * relevant now that we allocate kernel stacks in the kernel map, - * so the old code above no longer applies. If we wanted to check that - * we weren't removing a mapping of a page in a kernel stack we'd - * mark the PTE with an unused bit and check that here. - */ - -#endif - deadline = rdtsc64() + max_preemption_latency_tsc; + l64 = (s64 + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE - 1); - while (s64 < e64) { - l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1); - if (l64 > e64) + if (l64 > e64) { l64 = e64; + } + pde = pmap_pde(map, s64); if (pde && (*pde & PTE_VALID_MASK(is_ept))) { @@ -1511,42 +1573,40 @@ pmap_remove_options( * level 1 range. */ spte = pde; - epte = spte+1; /* excluded */ + epte = spte + 1; /* excluded */ } else { - spte = pmap_pte(map, (s64 & ~(pde_mapped_size - 1))); + spte = pmap_pte(map, (s64 & ~(PDE_MAPPED_SIZE - 1))); spte = &spte[ptenum(s64)]; epte = &spte[intel_btop(l64 - s64)]; } pmap_remove_range_options(map, s64, spte, epte, - options); + options); } s64 = l64; - if (s64 < e64 && rdtsc64() >= deadline) { - PMAP_UNLOCK(map) - /* TODO: Rapid release/reacquisition can defeat - * the "backoff" intent here; either consider a - * fair spinlock, or a scheme whereby each lock - * attempt marks the processor as within a spinlock - * acquisition, and scan CPUs here to determine - * if a backoff is necessary, to avoid sacrificing - * performance in the common case. - */ - PMAP_LOCK(map) - deadline = rdtsc64() + max_preemption_latency_tsc; + if ((s64 < e64) && (traverse_count++ > PLCHECK_THRESHOLD)) { + if (deadline == 0) { + deadline = rdtsc64() + max_preemption_latency_tsc; + } else { + if (rdtsc64() > deadline) { + PMAP_UNLOCK_EXCLUSIVE(map); + __builtin_ia32_pause(); + PMAP_LOCK_EXCLUSIVE(map); + deadline = rdtsc64() + max_preemption_latency_tsc; + } + } } } - PMAP_UNLOCK(map); + PMAP_UNLOCK_EXCLUSIVE(map); PMAP_TRACE(PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END); - } void pmap_page_protect( - ppnum_t pn, - vm_prot_t prot) + ppnum_t pn, + vm_prot_t prot) { pmap_page_protect_options(pn, prot, 0, NULL); } @@ -1560,36 +1620,37 @@ pmap_page_protect( */ void pmap_page_protect_options( - ppnum_t pn, - vm_prot_t prot, - unsigned int options, - void *arg) + ppnum_t pn, + vm_prot_t prot, + unsigned int options, + void *arg) { - pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL; - pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL; - pv_hashed_entry_t nexth; - int pvh_cnt = 0; - pv_rooted_entry_t pv_h; - pv_rooted_entry_t pv_e; - pv_hashed_entry_t pvh_e; - pt_entry_t *pte; - int pai; - pmap_t pmap; - boolean_t remove; - pt_entry_t new_pte_value; - boolean_t is_ept; + pv_hashed_entry_t pvh_eh = PV_HASHED_ENTRY_NULL; + pv_hashed_entry_t pvh_et = PV_HASHED_ENTRY_NULL; + pv_hashed_entry_t nexth; + int pvh_cnt = 0; + pv_rooted_entry_t pv_h; + pv_rooted_entry_t pv_e; + pv_hashed_entry_t pvh_e; + pt_entry_t *pte; + int pai; + pmap_t pmap; + boolean_t remove; + pt_entry_t new_pte_value; + boolean_t is_ept; pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); - if (pn == vm_page_guard_addr) + if (pn == vm_page_guard_addr) { return; + } pai = ppn_to_pai(pn); if (!IS_MANAGED_PAGE(pai)) { /* - * Not a managed page. - */ + * Not a managed page. + */ return; } @@ -1604,7 +1665,7 @@ pmap_page_protect_options( remove = FALSE; break; case VM_PROT_ALL: - return; /* nothing to do */ + return; /* nothing to do */ default: remove = TRUE; break; @@ -1618,11 +1679,12 @@ pmap_page_protect_options( /* * Walk down PV list, if any, changing or removing all mappings. */ - if (pv_h->pmap == PMAP_NULL) + if (pv_h->pmap == PMAP_NULL) { goto done; + } pv_e = pv_h; - pvh_e = (pv_hashed_entry_t) pv_e; /* cheat */ + pvh_e = (pv_hashed_entry_t) pv_e; /* cheat */ do { vm_map_offset_t vaddr; @@ -1644,8 +1706,8 @@ pmap_page_protect_options( if (0 == pte) { panic("pmap_page_protect() " - "pmap=%p pn=0x%x vaddr=0x%llx\n", - pmap, pn, vaddr); + "pmap=%p pn=0x%x vaddr=0x%llx\n", + pmap, pn, vaddr); } nexth = (pv_hashed_entry_t) queue_next(&pvh_e->qlink); @@ -1653,7 +1715,6 @@ pmap_page_protect_options( * Remove the mapping if new protection is NONE */ if (remove) { - /* Remove per-pmap wired count */ if (iswired(*pte)) { OSAddAtomic(-1, &pmap->stats.wired_count); @@ -1663,7 +1724,7 @@ pmap_page_protect_options( if (pmap != kernel_pmap && (options & PMAP_OPTIONS_COMPRESSOR) && IS_INTERNAL_PAGE(pai)) { - assert(!PTE_IS_COMPRESSED(*pte)); + assert(!PTE_IS_COMPRESSED(*pte, pte)); /* mark this PTE as having been "compressed" */ new_pte_value = PTE_COMPRESSED; if (IS_ALTACCT_PAGE(pai, pv_e)) { @@ -1676,29 +1737,30 @@ pmap_page_protect_options( if (options & PMAP_OPTIONS_NOREFMOD) { pmap_store_pte(pte, new_pte_value); - if (options & PMAP_OPTIONS_NOFLUSH) + if (options & PMAP_OPTIONS_NOFLUSH) { PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg); - else + } else { PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE); + } } else { /* * Remove the mapping, collecting dirty bits. */ pmap_update_pte(pte, PTE_VALID_MASK(is_ept), 0); - PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE); + PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE); if (!is_ept) { pmap_phys_attributes[pai] |= - *pte & (PHYS_MODIFIED|PHYS_REFERENCED); + *pte & (PHYS_MODIFIED | PHYS_REFERENCED); } else { pmap_phys_attributes[pai] |= - ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); + ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); } if ((options & - PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) && + PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED) && IS_INTERNAL_PAGE(pai) && (pmap_phys_attributes[pai] & - PHYS_MODIFIED)) { + PHYS_MODIFIED)) { /* * Page is actually "modified" and * will be compressed. Start @@ -1719,12 +1781,13 @@ pmap_page_protect_options( } #if TESTING - if (pmap->stats.resident_count < 1) + if (pmap->stats.resident_count < 1) { panic("pmap_page_protect: resident_count"); + } #endif pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE); assert(pmap->stats.resident_count >= 1); - OSAddAtomic(-1, &pmap->stats.resident_count); + OSAddAtomic(-1, &pmap->stats.resident_count); /* * We only ever compress internal pages. @@ -1791,7 +1854,7 @@ pmap_page_protect_options( /* * This internal page isn't * going to the compressor, - * so adjust stats to keep + * so adjust stats to keep * phys_footprint up to date. */ pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); @@ -1800,8 +1863,8 @@ pmap_page_protect_options( } /* - * Deal with the pv_rooted_entry. - */ + * Deal with the pv_rooted_entry. + */ if (pv_e == pv_h) { /* @@ -1816,27 +1879,29 @@ pmap_page_protect_options( pvh_e->qlink.next = (queue_entry_t) pvh_eh; pvh_eh = pvh_e; - if (pvh_et == PV_HASHED_ENTRY_NULL) + if (pvh_et == PV_HASHED_ENTRY_NULL) { pvh_et = pvh_e; + } pvh_cnt++; } } else { /* - * Write-protect, after opportunistic refmod collect - */ + * Write-protect, after opportunistic refmod collect + */ if (!is_ept) { pmap_phys_attributes[pai] |= - *pte & (PHYS_MODIFIED|PHYS_REFERENCED); + *pte & (PHYS_MODIFIED | PHYS_REFERENCED); } else { pmap_phys_attributes[pai] |= - ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); + ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); } pmap_update_pte(pte, PTE_WRITE(is_ept), 0); - if (options & PMAP_OPTIONS_NOFLUSH) + if (options & PMAP_OPTIONS_NOFLUSH) { PMAP_UPDATE_TLBS_DELAYED(pmap, vaddr, vaddr + PAGE_SIZE, (pmap_flush_context *)arg); - else - PMAP_UPDATE_TLBS(pmap, vaddr, vaddr+PAGE_SIZE); + } else { + PMAP_UPDATE_TLBS(pmap, vaddr, vaddr + PAGE_SIZE); + } } pvh_e = nexth; } while ((pv_e = (pv_rooted_entry_t) nexth) != pv_h); @@ -1855,8 +1920,9 @@ pmap_page_protect_options( pvh_e->qlink.next = (queue_entry_t) pvh_eh; pvh_eh = pvh_e; - if (pvh_et == PV_HASHED_ENTRY_NULL) + if (pvh_et == PV_HASHED_ENTRY_NULL) { pvh_et = pvh_e; + } pvh_cnt++; } } @@ -1875,27 +1941,27 @@ done: */ void phys_attribute_clear( - ppnum_t pn, - int bits, - unsigned int options, - void *arg) + ppnum_t pn, + int bits, + unsigned int options, + void *arg) { - pv_rooted_entry_t pv_h; - pv_hashed_entry_t pv_e; - pt_entry_t *pte = NULL; - int pai; - pmap_t pmap; - char attributes = 0; - boolean_t is_internal, is_reusable, is_altacct, is_ept; - int ept_bits_to_clear; - boolean_t ept_keep_global_mod = FALSE; + pv_rooted_entry_t pv_h; + pv_hashed_entry_t pv_e; + pt_entry_t *pte = NULL; + int pai; + pmap_t pmap; + char attributes = 0; + boolean_t is_internal, is_reusable, is_altacct, is_ept; + int ept_bits_to_clear; + boolean_t ept_keep_global_mod = FALSE; if ((bits & PHYS_MODIFIED) && (options & PMAP_OPTIONS_NOFLUSH) && arg == NULL) { panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p): " - "should not clear 'modified' without flushing TLBs\n", - pn, bits, options, arg); + "should not clear 'modified' without flushing TLBs\n", + pn, bits, options, arg); } /* We only support converting MOD and REF bits for EPT PTEs in this function */ @@ -1905,8 +1971,9 @@ phys_attribute_clear( pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); - if (pn == vm_page_guard_addr) + if (pn == vm_page_guard_addr) { return; + } pai = ppn_to_pai(pn); @@ -1940,7 +2007,7 @@ phys_attribute_clear( pv_e = (pv_hashed_entry_t)pv_h; do { - vm_map_offset_t va; + vm_map_offset_t va; char pte_bits; pmap = pv_e->pmap; @@ -1969,12 +2036,13 @@ phys_attribute_clear( pte_bits &= ept_bits_to_clear; } } - if (options & PMAP_OPTIONS_CLEAR_WRITE) - pte_bits |= PTE_WRITE(is_ept); + if (options & PMAP_OPTIONS_CLEAR_WRITE) { + pte_bits |= PTE_WRITE(is_ept); + } - /* - * Clear modify and/or reference bits. - */ + /* + * Clear modify and/or reference bits. + */ if (pte_bits) { pmap_update_pte(pte, pte_bits, 0); @@ -1984,11 +2052,11 @@ phys_attribute_clear( * the TLB shadow of the 'D' bit (in particular) * is synchronized with the updated PTE. */ - if (! (options & PMAP_OPTIONS_NOFLUSH)) { + if (!(options & PMAP_OPTIONS_NOFLUSH)) { /* flush TLBS now */ PMAP_UPDATE_TLBS(pmap, - va, - va + PAGE_SIZE); + va, + va + PAGE_SIZE); } else if (arg) { /* delayed TLB flush: add "pmap" info */ PMAP_UPDATE_TLBS_DELAYED( @@ -2017,8 +2085,8 @@ phys_attribute_clear( /* no impact on ledgers */ } else { pmap_ledger_credit(pmap, - task_ledgers.internal, - PAGE_SIZE); + task_ledgers.internal, + PAGE_SIZE); pmap_ledger_credit( pmap, task_ledgers.phys_footprint, @@ -2031,8 +2099,8 @@ phys_attribute_clear( assert(pmap->stats.external > 0); } } else if ((options & PMAP_OPTIONS_SET_REUSABLE) && - !is_reusable && - pmap != kernel_pmap) { + !is_reusable && + pmap != kernel_pmap) { /* one more "reusable" */ OSAddAtomic(+1, &pmap->stats.reusable); PMAP_STATS_PEAK(pmap->stats.reusable); @@ -2045,8 +2113,8 @@ phys_attribute_clear( /* no impact on footprint */ } else { pmap_ledger_debit(pmap, - task_ledgers.internal, - PAGE_SIZE); + task_ledgers.internal, + PAGE_SIZE); pmap_ledger_debit( pmap, task_ledgers.phys_footprint, @@ -2060,7 +2128,6 @@ phys_attribute_clear( } pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink); - } while (pv_e != (pv_hashed_entry_t)pv_h); } /* Opportunistic refmod collection, annulled @@ -2097,22 +2164,23 @@ phys_attribute_clear( */ int phys_attribute_test( - ppnum_t pn, - int bits) + ppnum_t pn, + int bits) { - pv_rooted_entry_t pv_h; - pv_hashed_entry_t pv_e; - pt_entry_t *pte; - int pai; - pmap_t pmap; - int attributes = 0; - boolean_t is_ept; + pv_rooted_entry_t pv_h; + pv_hashed_entry_t pv_e; + pt_entry_t *pte; + int pai; + pmap_t pmap; + int attributes = 0; + boolean_t is_ept; pmap_intr_assert(); assert(pn != vm_page_fictitious_addr); assert((bits & ~(PHYS_MODIFIED | PHYS_REFERENCED)) == 0); - if (pn == vm_page_guard_addr) + if (pn == vm_page_guard_addr) { return 0; + } pai = ppn_to_pai(pn); @@ -2130,8 +2198,9 @@ phys_attribute_test( * the lock in case they got pulled in while * we were waiting for the lock */ - if ((pmap_phys_attributes[pai] & bits) == bits) + if ((pmap_phys_attributes[pai] & bits) == bits) { return bits; + } pv_h = pai_to_pvh(pai); @@ -2157,7 +2226,7 @@ phys_attribute_test( is_ept = is_ept_pmap(pmap); va = PVE_VA(pv_e); /* - * pick up modify and/or reference bits from mapping + * pick up modify and/or reference bits from mapping */ pte = pmap_pte(pmap, va); @@ -2165,18 +2234,16 @@ phys_attribute_test( attributes |= (int)(*pte & bits); } else { attributes |= (int)(ept_refmod_to_physmap((*pte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED)); - } pv_e = (pv_hashed_entry_t)queue_next(&pv_e->qlink); - } while ((attributes != bits) && - (pv_e != (pv_hashed_entry_t)pv_h)); + (pv_e != (pv_hashed_entry_t)pv_h)); } pmap_phys_attributes[pai] |= attributes; UNLOCK_PVH(pai); - return (attributes); + return attributes; } /* @@ -2188,78 +2255,81 @@ phys_attribute_test( */ void pmap_change_wiring( - pmap_t map, - vm_map_offset_t vaddr, - boolean_t wired) + pmap_t map, + vm_map_offset_t vaddr, + boolean_t wired) { - pt_entry_t *pte; + pt_entry_t *pte; - PMAP_LOCK(map); + PMAP_LOCK_SHARED(map); - if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL) + if ((pte = pmap_pte(map, vaddr)) == PT_ENTRY_NULL) { panic("pmap_change_wiring(%p,0x%llx,%d): pte missing", - map, vaddr, wired); + map, vaddr, wired); + } if (wired && !iswired(*pte)) { /* * wiring down mapping */ pmap_ledger_credit(map, task_ledgers.wired_mem, PAGE_SIZE); - OSAddAtomic(+1, &map->stats.wired_count); + OSAddAtomic(+1, &map->stats.wired_count); pmap_update_pte(pte, 0, PTE_WIRED); - } - else if (!wired && iswired(*pte)) { + } else if (!wired && iswired(*pte)) { /* * unwiring mapping */ assert(map->stats.wired_count >= 1); - OSAddAtomic(-1, &map->stats.wired_count); + OSAddAtomic(-1, &map->stats.wired_count); pmap_ledger_debit(map, task_ledgers.wired_mem, PAGE_SIZE); pmap_update_pte(pte, PTE_WIRED, 0); } - PMAP_UNLOCK(map); + PMAP_UNLOCK_SHARED(map); } /* * "Backdoor" direct map routine for early mappings. - * Useful for mapping memory outside the range + * Useful for mapping memory outside the range * Sets A, D and NC if requested */ vm_offset_t pmap_map_bd( - vm_offset_t virt, - vm_map_offset_t start_addr, - vm_map_offset_t end_addr, - vm_prot_t prot, - unsigned int flags) + vm_offset_t virt, + vm_map_offset_t start_addr, + vm_map_offset_t end_addr, + vm_prot_t prot, + unsigned int flags) { - pt_entry_t template; - pt_entry_t *ptep; + pt_entry_t template; + pt_entry_t *ptep; - vm_offset_t base = virt; - boolean_t doflush = FALSE; + vm_offset_t base = virt; + boolean_t doflush = FALSE; template = pa_to_pte(start_addr) - | INTEL_PTE_REF - | INTEL_PTE_MOD - | INTEL_PTE_WIRED - | INTEL_PTE_VALID; + | INTEL_PTE_REF + | INTEL_PTE_MOD + | INTEL_PTE_WIRED + | INTEL_PTE_VALID; if ((flags & (VM_MEM_NOT_CACHEABLE | VM_WIMG_USE_DEFAULT)) == VM_MEM_NOT_CACHEABLE) { template |= INTEL_PTE_NCACHE; - if (!(flags & (VM_MEM_GUARDED))) - template |= INTEL_PTE_PTA; + if (!(flags & (VM_MEM_GUARDED))) { + template |= INTEL_PTE_PAT; + } } - if ((prot & VM_PROT_EXECUTE) == 0) + if ((prot & VM_PROT_EXECUTE) == 0) { template |= INTEL_PTE_NX; + } - if (prot & VM_PROT_WRITE) + if (prot & VM_PROT_WRITE) { template |= INTEL_PTE_WRITE; - - while (start_addr < end_addr) { + } + vm_map_offset_t caddr = start_addr; + while (caddr < end_addr) { ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)virt); if (ptep == PT_ENTRY_NULL) { panic("pmap_map_bd: Invalid kernel address"); @@ -2270,13 +2340,13 @@ pmap_map_bd( pmap_store_pte(ptep, template); pte_increment_pa(template); virt += PAGE_SIZE; - start_addr += PAGE_SIZE; + caddr += PAGE_SIZE; } if (doflush) { - flush_tlb_raw(); + pmap_tlbi_range(0, ~0ULL, true, 0); PMAP_UPDATE_TLBS(kernel_pmap, base, base + end_addr - start_addr); } - return(virt); + return virt; } /* Create a virtual alias beginning at 'ava' of the specified kernel virtual @@ -2288,21 +2358,23 @@ pmap_map_bd( void pmap_alias( - vm_offset_t ava, - vm_map_offset_t start_addr, - vm_map_offset_t end_addr, - vm_prot_t prot, - unsigned int eoptions) + vm_offset_t ava, + vm_map_offset_t start_addr, + vm_map_offset_t end_addr, + vm_prot_t prot, + unsigned int eoptions) { - pt_entry_t prot_template, template; - pt_entry_t *aptep, *sptep; + pt_entry_t prot_template, template; + pt_entry_t *aptep, *sptep; prot_template = INTEL_PTE_REF | INTEL_PTE_MOD | INTEL_PTE_WIRED | INTEL_PTE_VALID; - if ((prot & VM_PROT_EXECUTE) == 0) + if ((prot & VM_PROT_EXECUTE) == 0) { prot_template |= INTEL_PTE_NX; + } - if (prot & VM_PROT_WRITE) + if (prot & VM_PROT_WRITE) { prot_template |= INTEL_PTE_WRITE; + } assert(((start_addr | end_addr) & PAGE_MASK) == 0); while (start_addr < end_addr) { aptep = pmap_pte(kernel_pmap, (vm_map_offset_t)ava); @@ -2329,18 +2401,18 @@ pmap_alias( mach_vm_size_t pmap_query_resident( - pmap_t pmap, - addr64_t s64, - addr64_t e64, - mach_vm_size_t *compressed_bytes_p) + pmap_t pmap, + addr64_t s64, + addr64_t e64, + mach_vm_size_t *compressed_bytes_p) { pt_entry_t *pde; pt_entry_t *spte, *epte; addr64_t l64; - uint64_t deadline; - mach_vm_size_t resident_bytes; - mach_vm_size_t compressed_bytes; - boolean_t is_ept; + uint64_t deadline = 0; + mach_vm_size_t resident_bytes; + mach_vm_size_t compressed_bytes; + boolean_t is_ept; pmap_intr_assert(); @@ -2354,20 +2426,20 @@ pmap_query_resident( is_ept = is_ept_pmap(pmap); PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(s64), - VM_KERNEL_ADDRHIDE(e64)); + VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(s64), + VM_KERNEL_ADDRHIDE(e64)); resident_bytes = 0; compressed_bytes = 0; - PMAP_LOCK(pmap); - - deadline = rdtsc64() + max_preemption_latency_tsc; + PMAP_LOCK_EXCLUSIVE(pmap); + uint32_t traverse_count = 0; while (s64 < e64) { - l64 = (s64 + pde_mapped_size) & ~(pde_mapped_size - 1); - if (l64 > e64) + l64 = (s64 + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE - 1); + if (l64 > e64) { l64 = e64; + } pde = pmap_pde(pmap, s64); if (pde && (*pde & PTE_VALID_MASK(is_ept))) { @@ -2375,7 +2447,7 @@ pmap_query_resident( /* superpage: not supported */ } else { spte = pmap_pte(pmap, - (s64 & ~(pde_mapped_size - 1))); + (s64 & ~(PDE_MAPPED_SIZE - 1))); spte = &spte[ptenum(s64)]; epte = &spte[intel_btop(l64 - s64)]; @@ -2386,22 +2458,28 @@ pmap_query_resident( compressed_bytes += PAGE_SIZE; } } - } } s64 = l64; - if (s64 < e64 && rdtsc64() >= deadline) { - PMAP_UNLOCK(pmap); - PMAP_LOCK(pmap); - deadline = rdtsc64() + max_preemption_latency_tsc; + if ((s64 < e64) && (traverse_count++ > PLCHECK_THRESHOLD)) { + if (deadline == 0) { + deadline = rdtsc64() + max_preemption_latency_tsc; + } else { + if (rdtsc64() > deadline) { + PMAP_UNLOCK_EXCLUSIVE(pmap); + __builtin_ia32_pause(); + PMAP_LOCK_EXCLUSIVE(pmap); + deadline = rdtsc64() + max_preemption_latency_tsc; + } + } } } - PMAP_UNLOCK(pmap); + PMAP_UNLOCK_EXCLUSIVE(pmap); PMAP_TRACE(PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END, - resident_bytes); + resident_bytes); if (compressed_bytes_p) { *compressed_bytes_p = compressed_bytes; @@ -2411,16 +2489,16 @@ pmap_query_resident( kern_return_t pmap_query_page_info( - pmap_t pmap, - vm_map_offset_t va, - int *disp_p) + pmap_t pmap, + vm_map_offset_t va, + int *disp_p) { - int disp; - boolean_t is_ept; - pmap_paddr_t pa; - ppnum_t pai; - pd_entry_t *pde; - pt_entry_t *pte; + int disp; + boolean_t is_ept; + pmap_paddr_t pa; + ppnum_t pai; + pd_entry_t *pde; + pt_entry_t *pte; pmap_intr_assert(); if (pmap == PMAP_NULL || pmap == kernel_pmap) { @@ -2431,7 +2509,7 @@ pmap_query_page_info( disp = 0; is_ept = is_ept_pmap(pmap); - PMAP_LOCK(pmap); + PMAP_LOCK_EXCLUSIVE(pmap); pde = pmap_pde(pmap, va); if (!pde || @@ -2447,7 +2525,7 @@ pmap_query_page_info( pa = pte_to_pa(*pte); if (pa == 0) { - if (PTE_IS_COMPRESSED(*pte)) { + if (PTE_IS_COMPRESSED(*pte, pte)) { disp |= PMAP_QUERY_PAGE_COMPRESSED; if (*pte & PTE_COMPRESSED_ALT) { disp |= PMAP_QUERY_PAGE_COMPRESSED_ALTACCT; @@ -2469,7 +2547,7 @@ pmap_query_page_info( } done: - PMAP_UNLOCK(pmap); + PMAP_UNLOCK_EXCLUSIVE(pmap); *disp_p = disp; return KERN_SUCCESS; } @@ -2503,26 +2581,29 @@ pmap_trim(__unused pmap_t grand, __unused pmap_t subord, __unused addr64_t vstar return; } -void pmap_ledger_alloc_init(size_t size) +void +pmap_ledger_alloc_init(size_t size) { panic("%s: unsupported, " - "size=%lu", - __func__, size); + "size=%lu", + __func__, size); } -ledger_t pmap_ledger_alloc(void) +ledger_t +pmap_ledger_alloc(void) { panic("%s: unsupported", - __func__); + __func__); return NULL; } -void pmap_ledger_free(ledger_t ledger) +void +pmap_ledger_free(ledger_t ledger) { panic("%s: unsupported, " - "ledger=%p", - __func__, ledger); + "ledger=%p", + __func__, ledger); } size_t @@ -2531,3 +2612,14 @@ pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end _ return (size_t)-1; } +void * +pmap_map_compressor_page(ppnum_t pn) +{ + assertf(IS_MANAGED_PAGE(ppn_to_pai(pn)), "%s called on non-managed page 0x%08x", __func__, pn); + return PHYSMAP_PTOV((uint64_t)pn << (uint64_t)PAGE_SHIFT); +} + +void +pmap_unmap_compressor_page(ppnum_t pn __unused, void *kva __unused) +{ +} diff --git a/osfmk/i386/postcode.h b/osfmk/i386/postcode.h index 82a4c0849..960b7d32c 100644 --- a/osfmk/i386/postcode.h +++ b/osfmk/i386/postcode.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _I386_POSTCODE_H_ -#define _I386_POSTCODE_H_ +#ifndef _I386_POSTCODE_H_ +#define _I386_POSTCODE_H_ /* * Postcodes are no longer enabled by default in the DEBUG kernel @@ -40,10 +40,10 @@ //#define POSTCODE_DELAY 1 /* The POSTCODE is port 0x80 */ -#define POSTPORT 0x80 +#define POSTPORT 0x80 -#define SPINCOUNT 300000000 -#define CPU_PAUSE() rep; nop +#define SPINCOUNT 300000000 +#define CPU_PAUSE() rep; nop #if DEBUG_POSTCODE /* @@ -51,151 +51,151 @@ * Additionally, if POSTCODE_DELAY, spin for about a second. */ #if POSTCODE_DELAY -#define POSTCODE_AL \ - outb %al,$(POSTPORT); \ - movl $(SPINCOUNT), %eax; \ -1: \ - CPU_PAUSE(); \ - decl %eax; \ +#define POSTCODE_AL \ + outb %al,$(POSTPORT); \ + movl $(SPINCOUNT), %eax; \ +1: \ + CPU_PAUSE(); \ + decl %eax; \ jne 1b -#define POSTCODE_AX \ - outw %ax,$(POSTPORT); \ - movl $(SPINCOUNT), %eax; \ -1: \ - CPU_PAUSE(); \ - decl %eax; \ +#define POSTCODE_AX \ + outw %ax,$(POSTPORT); \ + movl $(SPINCOUNT), %eax; \ +1: \ + CPU_PAUSE(); \ + decl %eax; \ jne 1b #else -#define POSTCODE_AL \ - outb %al,$(POSTPORT) -#define POSTCODE_AX \ - outw %ax,$(POSTPORT) +#define POSTCODE_AL \ + outb %al,$(POSTPORT) +#define POSTCODE_AX \ + outw %ax,$(POSTPORT) #endif /* POSTCODE_DELAY */ -#define POSTCODE(XX) \ - mov $(XX), %al; \ +#define POSTCODE(XX) \ + mov $(XX), %al; \ POSTCODE_AL -#define POSTCODE2(XXXX) \ - mov $(XXXX), %ax; \ +#define POSTCODE2(XXXX) \ + mov $(XXXX), %ax; \ POSTCODE_AX -/* Output byte value to postcode, without destoying register eax */ -#define POSTCODE_SAVE_EAX(XX) \ - push %eax; \ - POSTCODE(XX); \ +/* Output byte value to postcode, without destoying register eax */ +#define POSTCODE_SAVE_EAX(XX) \ + push %eax; \ + POSTCODE(XX); \ pop %eax /* * Display a 32-bit value to the post card - low byte to high byte * Entry: value in %ebx * Exit: %ebx preserved; %eax destroyed - */ -#define POSTCODE32_EBX \ - roll $8, %ebx; \ - movl %ebx, %eax; \ - POSTCODE_AL; \ - \ - roll $8, %ebx; \ - movl %ebx, %eax; \ - POSTCODE_AL; \ - \ - roll $8, %ebx; \ - movl %ebx, %eax; \ - POSTCODE_AL; \ - \ - roll $8, %ebx; \ - movl %ebx, %eax; \ + */ +#define POSTCODE32_EBX \ + roll $8, %ebx; \ + movl %ebx, %eax; \ + POSTCODE_AL; \ + \ + roll $8, %ebx; \ + movl %ebx, %eax; \ + POSTCODE_AL; \ + \ + roll $8, %ebx; \ + movl %ebx, %eax; \ + POSTCODE_AL; \ + \ + roll $8, %ebx; \ + movl %ebx, %eax; \ POSTCODE_AL -#else /* DEBUG_POSTCODE */ +#else /* DEBUG_POSTCODE */ #define POSTCODE_AL #define POSTCODE_AX #define POSTCODE(X) #define POSTCODE2(X) #define POSTCODE_SAVE_EAX(X) #define POSTCODE32_EBX -#endif /* DEBUG_POSTCODE */ +#endif /* DEBUG_POSTCODE */ /* * The following postcodes are defined for stages of early startup: */ -#define PSTART_ENTRY 0xFF -#define PSTART_REBASE 0xFE -#define PSTART_BEFORE_PAGING 0xFE -#define PSTART_VSTART 0xFD -#define VSTART_ENTRY 0xFC -#define VSTART_IDT_INIT 0xFB -#define VSTART_IDLE_PTS_INIT 0xFA -#define VSTART_PHYSMAP_INIT 0xF9 -#define VSTART_DESC_ALIAS_INIT 0xF8 -#define VSTART_SET_CR3 0xF7 -#define VSTART_CPU_DESC_INIT 0xF6 -#define VSTART_CPU_MODE_INIT 0xF5 -#define VSTART_EXIT 0xF4 -#define I386_INIT_ENTRY 0xF3 -#define CPU_INIT_D 0xF2 -#define PE_INIT_PLATFORM_D 0xF1 - -#define SLAVE_STARTPROG_ENTRY 0xEF -#define SLAVE_PSTART 0xEE -#define I386_INIT_SLAVE 0xED - -#define PANIC_DOUBLE_FAULT 0xDF /* Double Fault exception */ -#define PANIC_MACHINE_CHECK 0xDC /* Machine-Check */ -#define MP_KDP_ENTER 0xDB /* Debugger Begin */ -#define MP_KDP_EXIT 0xDE /* Debugger End */ -#define PANIC_HLT 0xD1 /* Die an early death */ -#define BOOT_TRAP_HLT 0xD0 /* D'oh! even earlier */ - -#define ACPI_WAKE_START_ENTRY 0xCF -#define ACPI_WAKE_PROT_ENTRY 0xCE -#define ACPI_WAKE_PAGED_ENTRY 0xCD - -#define CPU_DESC_LOAD_ENTRY 0xBF -#define CPU_DESC_LOAD_GS_BASE 0xBE -#define CPU_DESC_LOAD_KERNEL_GS_BASE 0xBD -#define CPU_DESC_LOAD_GDT 0xBC -#define CPU_DESC_LOAD_IDT 0xBB -#define CPU_DESC_LOAD_LDT 0xBA -#define CPU_DESC_LOAD_TSS 0xB9 -#define CPU_DESC_LOAD_EXIT 0xB7 +#define PSTART_ENTRY 0xFF +#define PSTART_REBASE 0xFE +#define PSTART_BEFORE_PAGING 0xFE +#define PSTART_VSTART 0xFD +#define VSTART_ENTRY 0xFC +#define VSTART_IDT_INIT 0xFB +#define VSTART_IDLE_PTS_INIT 0xFA +#define VSTART_PHYSMAP_INIT 0xF9 +#define VSTART_DESC_ALIAS_INIT 0xF8 +#define VSTART_SET_CR3 0xF7 +#define VSTART_CPU_DESC_INIT 0xF6 +#define VSTART_CPU_MODE_INIT 0xF5 +#define VSTART_EXIT 0xF4 +#define I386_INIT_ENTRY 0xF3 +#define CPU_INIT_D 0xF2 +#define PE_INIT_PLATFORM_D 0xF1 + +#define SLAVE_STARTPROG_ENTRY 0xEF +#define SLAVE_PSTART 0xEE +#define I386_INIT_SLAVE 0xED + +#define PANIC_DOUBLE_FAULT 0xDF /* Double Fault exception */ +#define PANIC_MACHINE_CHECK 0xDC /* Machine-Check */ +#define MP_KDP_ENTER 0xDB /* Debugger Begin */ +#define MP_KDP_EXIT 0xDE /* Debugger End */ +#define PANIC_HLT 0xD1 /* Die an early death */ +#define BOOT_TRAP_HLT 0xD0 /* D'oh! even earlier */ + +#define ACPI_WAKE_START_ENTRY 0xCF +#define ACPI_WAKE_PROT_ENTRY 0xCE +#define ACPI_WAKE_PAGED_ENTRY 0xCD + +#define CPU_DESC_LOAD_ENTRY 0xBF +#define CPU_DESC_LOAD_GS_BASE 0xBE +#define CPU_DESC_LOAD_KERNEL_GS_BASE 0xBD +#define CPU_DESC_LOAD_GDT 0xBC +#define CPU_DESC_LOAD_IDT 0xBB +#define CPU_DESC_LOAD_LDT 0xBA +#define CPU_DESC_LOAD_TSS 0xB9 +#define CPU_DESC_LOAD_EXIT 0xB7 #ifndef ASSEMBLER inline static void -_postcode_delay(uint32_t spincount) +_postcode_delay(uint32_t spincount) { - asm volatile("1: \n\t" - " rep; nop; \n\t" - " decl %%eax; \n\t" - " jne 1b" - : : "a" (spincount)); + asm volatile ("1: \n\t" + " rep; nop; \n\t" + " decl %%eax; \n\t" + " jne 1b" + : : "a" (spincount)); } inline static void -_postcode(uint8_t xx) +_postcode(uint8_t xx) { - asm volatile("outb %0, %1" : : "a" (xx), "N" (POSTPORT)); + asm volatile ("outb %0, %1" : : "a" (xx), "N" (POSTPORT)); } inline static void -_postcode2(uint16_t xxxx) +_postcode2(uint16_t xxxx) { - asm volatile("outw %0, %1" : : "a" (xxxx), "N" (POSTPORT)); + asm volatile ("outw %0, %1" : : "a" (xxxx), "N" (POSTPORT)); } -#if DEBUG_POSTCODE +#if DEBUG_POSTCODE inline static void -postcode(uint8_t xx) +postcode(uint8_t xx) { _postcode(xx); -#if POSTCODE_DELAY +#if POSTCODE_DELAY _postcode_delay(SPINCOUNT); #endif } inline static void -postcode2(uint8_t xxxx) +postcode2(uint8_t xxxx) { _postcode2(xxxx); -#if POSTCODE_DELAY +#if POSTCODE_DELAY _postcode_delay(SPINCOUNT); #endif } diff --git a/osfmk/i386/proc_reg.h b/osfmk/i386/proc_reg.h index 530f9e61c..cf5c384e4 100644 --- a/osfmk/i386/proc_reg.h +++ b/osfmk/i386/proc_reg.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,358 +61,383 @@ /* * Processor registers for i386 and i486. */ -#ifndef _I386_PROC_REG_H_ -#define _I386_PROC_REG_H_ +#ifndef _I386_PROC_REG_H_ +#define _I386_PROC_REG_H_ /* * Model Specific Registers */ -#define MSR_P5_TSC 0x10 /* Time Stamp Register */ -#define MSR_P5_CESR 0x11 /* Control and Event Select Register */ -#define MSR_P5_CTR0 0x12 /* Counter #0 */ -#define MSR_P5_CTR1 0x13 /* Counter #1 */ - -#define MSR_P5_CESR_PC 0x0200 /* Pin Control */ -#define MSR_P5_CESR_CC 0x01C0 /* Counter Control mask */ -#define MSR_P5_CESR_ES 0x003F /* Event Control mask */ - -#define MSR_P5_CESR_SHIFT 16 /* Shift to get Counter 1 */ -#define MSR_P5_CESR_MASK (MSR_P5_CESR_PC|\ - MSR_P5_CESR_CC|\ - MSR_P5_CESR_ES) /* Mask Counter */ - -#define MSR_P5_CESR_CC_CLOCK 0x0100 /* Clock Counting (otherwise Event) */ -#define MSR_P5_CESR_CC_DISABLE 0x0000 /* Disable counter */ -#define MSR_P5_CESR_CC_CPL012 0x0040 /* Count if the CPL == 0, 1, 2 */ -#define MSR_P5_CESR_CC_CPL3 0x0080 /* Count if the CPL == 3 */ -#define MSR_P5_CESR_CC_CPL 0x00C0 /* Count regardless of the CPL */ - -#define MSR_P5_CESR_ES_DATA_READ 0x000000 /* Data Read */ -#define MSR_P5_CESR_ES_DATA_WRITE 0x000001 /* Data Write */ -#define MSR_P5_CESR_ES_DATA_RW 0x101000 /* Data Read or Write */ -#define MSR_P5_CESR_ES_DATA_TLB_MISS 0x000010 /* Data TLB Miss */ -#define MSR_P5_CESR_ES_DATA_READ_MISS 0x000011 /* Data Read Miss */ -#define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */ -#define MSR_P5_CESR_ES_DATA_RW_MISS 0x101001 /* Data Read or Write Miss */ -#define MSR_P5_CESR_ES_HIT_EM 0x000101 /* Write (hit) to M|E state */ -#define MSR_P5_CESR_ES_DATA_CACHE_WB 0x000110 /* Cache lines written back */ -#define MSR_P5_CESR_ES_EXTERNAL_SNOOP 0x000111 /* External Snoop */ -#define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */ -#define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* Mem. access in both pipes */ -#define MSR_P5_CESR_ES_BANK_CONFLICTS 0x001010 /* Bank conflicts */ -#define MSR_P5_CESR_ES_MISALIGNED 0x001011 /* Misaligned Memory or I/O */ -#define MSR_P5_CESR_ES_CODE_READ 0x001100 /* Code Read */ -#define MSR_P5_CESR_ES_CODE_TLB_MISS 0x001101 /* Code TLB miss */ -#define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */ -#define MSR_P5_CESR_ES_SEGMENT_LOADED 0x001111 /* Any segment reg. loaded */ -#define MSR_P5_CESR_ES_BRANCHE 0x010010 /* Branches */ -#define MSR_P5_CESR_ES_BTB_HIT 0x010011 /* BTB Hits */ -#define MSR_P5_CESR_ES_BRANCHE_BTB 0x010100 /* Taken branch or BTB Hit */ -#define MSR_P5_CESR_ES_PIPELINE_FLUSH 0x010101 /* Pipeline Flushes */ -#define MSR_P5_CESR_ES_INSTRUCTION 0x010110 /* Instruction executed */ -#define MSR_P5_CESR_ES_INSTRUCTION_V 0x010111 /* Inst. executed (v-pipe) */ -#define MSR_P5_CESR_ES_BUS_CYCLE 0x011000 /* Clocks while bus cycle */ -#define MSR_P5_CESR_ES_FULL_WRITE_BUF 0x011001 /* Clocks while full wrt buf. */ -#define MSR_P5_CESR_ES_DATA_MEM_READ 0x011010 /* Pipeline waiting for read */ -#define MSR_P5_CESR_ES_WRITE_EM 0x011011 /* Stall on write E|M state */ -#define MSR_P5_CESR_ES_LOCKED_CYCLE 0x011100 /* Locked bus cycles */ -#define MSR_P5_CESR_ES_IO_CYCLE 0x011101 /* I/O Read or Write cycles */ -#define MSR_P5_CESR_ES_NON_CACHEABLE 0x011110 /* Non-cacheable Mem. read */ -#define MSR_P5_CESR_ES_AGI 0x011111 /* Stall because of AGI */ -#define MSR_P5_CESR_ES_FLOP 0x100010 /* Floating Point operations */ -#define MSR_P5_CESR_ES_BREAK_DR0 0x100011 /* Breakpoint matches on DR0 */ -#define MSR_P5_CESR_ES_BREAK_DR1 0x100100 /* Breakpoint matches on DR1 */ -#define MSR_P5_CESR_ES_BREAK_DR2 0x100101 /* Breakpoint matches on DR2 */ -#define MSR_P5_CESR_ES_BREAK_DR3 0x100110 /* Breakpoint matches on DR3 */ -#define MSR_P5_CESR_ES_HARDWARE_IT 0x100111 /* Hardware interrupts */ +#define MSR_P5_TSC 0x10 /* Time Stamp Register */ +#define MSR_P5_CESR 0x11 /* Control and Event Select Register */ +#define MSR_P5_CTR0 0x12 /* Counter #0 */ +#define MSR_P5_CTR1 0x13 /* Counter #1 */ + +#define MSR_P5_CESR_PC 0x0200 /* Pin Control */ +#define MSR_P5_CESR_CC 0x01C0 /* Counter Control mask */ +#define MSR_P5_CESR_ES 0x003F /* Event Control mask */ + +#define MSR_P5_CESR_SHIFT 16 /* Shift to get Counter 1 */ +#define MSR_P5_CESR_MASK (MSR_P5_CESR_PC|\ + MSR_P5_CESR_CC|\ + MSR_P5_CESR_ES) /* Mask Counter */ + +#define MSR_P5_CESR_CC_CLOCK 0x0100 /* Clock Counting (otherwise Event) */ +#define MSR_P5_CESR_CC_DISABLE 0x0000 /* Disable counter */ +#define MSR_P5_CESR_CC_CPL012 0x0040 /* Count if the CPL == 0, 1, 2 */ +#define MSR_P5_CESR_CC_CPL3 0x0080 /* Count if the CPL == 3 */ +#define MSR_P5_CESR_CC_CPL 0x00C0 /* Count regardless of the CPL */ + +#define MSR_P5_CESR_ES_DATA_READ 0x000000 /* Data Read */ +#define MSR_P5_CESR_ES_DATA_WRITE 0x000001 /* Data Write */ +#define MSR_P5_CESR_ES_DATA_RW 0x101000 /* Data Read or Write */ +#define MSR_P5_CESR_ES_DATA_TLB_MISS 0x000010 /* Data TLB Miss */ +#define MSR_P5_CESR_ES_DATA_READ_MISS 0x000011 /* Data Read Miss */ +#define MSR_P5_CESR_ES_DATA_WRITE_MISS 0x000100 /* Data Write Miss */ +#define MSR_P5_CESR_ES_DATA_RW_MISS 0x101001 /* Data Read or Write Miss */ +#define MSR_P5_CESR_ES_HIT_EM 0x000101 /* Write (hit) to M|E state */ +#define MSR_P5_CESR_ES_DATA_CACHE_WB 0x000110 /* Cache lines written back */ +#define MSR_P5_CESR_ES_EXTERNAL_SNOOP 0x000111 /* External Snoop */ +#define MSR_P5_CESR_ES_CACHE_SNOOP_HIT 0x001000 /* Data cache snoop hits */ +#define MSR_P5_CESR_ES_MEM_ACCESS_PIPE 0x001001 /* Mem. access in both pipes */ +#define MSR_P5_CESR_ES_BANK_CONFLICTS 0x001010 /* Bank conflicts */ +#define MSR_P5_CESR_ES_MISALIGNED 0x001011 /* Misaligned Memory or I/O */ +#define MSR_P5_CESR_ES_CODE_READ 0x001100 /* Code Read */ +#define MSR_P5_CESR_ES_CODE_TLB_MISS 0x001101 /* Code TLB miss */ +#define MSR_P5_CESR_ES_CODE_CACHE_MISS 0x001110 /* Code Cache miss */ +#define MSR_P5_CESR_ES_SEGMENT_LOADED 0x001111 /* Any segment reg. loaded */ +#define MSR_P5_CESR_ES_BRANCHE 0x010010 /* Branches */ +#define MSR_P5_CESR_ES_BTB_HIT 0x010011 /* BTB Hits */ +#define MSR_P5_CESR_ES_BRANCHE_BTB 0x010100 /* Taken branch or BTB Hit */ +#define MSR_P5_CESR_ES_PIPELINE_FLUSH 0x010101 /* Pipeline Flushes */ +#define MSR_P5_CESR_ES_INSTRUCTION 0x010110 /* Instruction executed */ +#define MSR_P5_CESR_ES_INSTRUCTION_V 0x010111 /* Inst. executed (v-pipe) */ +#define MSR_P5_CESR_ES_BUS_CYCLE 0x011000 /* Clocks while bus cycle */ +#define MSR_P5_CESR_ES_FULL_WRITE_BUF 0x011001 /* Clocks while full wrt buf. */ +#define MSR_P5_CESR_ES_DATA_MEM_READ 0x011010 /* Pipeline waiting for read */ +#define MSR_P5_CESR_ES_WRITE_EM 0x011011 /* Stall on write E|M state */ +#define MSR_P5_CESR_ES_LOCKED_CYCLE 0x011100 /* Locked bus cycles */ +#define MSR_P5_CESR_ES_IO_CYCLE 0x011101 /* I/O Read or Write cycles */ +#define MSR_P5_CESR_ES_NON_CACHEABLE 0x011110 /* Non-cacheable Mem. read */ +#define MSR_P5_CESR_ES_AGI 0x011111 /* Stall because of AGI */ +#define MSR_P5_CESR_ES_FLOP 0x100010 /* Floating Point operations */ +#define MSR_P5_CESR_ES_BREAK_DR0 0x100011 /* Breakpoint matches on DR0 */ +#define MSR_P5_CESR_ES_BREAK_DR1 0x100100 /* Breakpoint matches on DR1 */ +#define MSR_P5_CESR_ES_BREAK_DR2 0x100101 /* Breakpoint matches on DR2 */ +#define MSR_P5_CESR_ES_BREAK_DR3 0x100110 /* Breakpoint matches on DR3 */ +#define MSR_P5_CESR_ES_HARDWARE_IT 0x100111 /* Hardware interrupts */ /* * CR0 */ -#define CR0_PG 0x80000000 /* Enable paging */ -#define CR0_CD 0x40000000 /* i486: Cache disable */ -#define CR0_NW 0x20000000 /* i486: No write-through */ -#define CR0_AM 0x00040000 /* i486: Alignment check mask */ -#define CR0_WP 0x00010000 /* i486: Write-protect kernel access */ -#define CR0_NE 0x00000020 /* i486: Handle numeric exceptions */ -#define CR0_ET 0x00000010 /* Extension type is 80387 */ - /* (not official) */ -#define CR0_TS 0x00000008 /* Task switch */ -#define CR0_EM 0x00000004 /* Emulate coprocessor */ -#define CR0_MP 0x00000002 /* Monitor coprocessor */ -#define CR0_PE 0x00000001 /* Enable protected mode */ +#define CR0_PG 0x80000000 /* Enable paging */ +#define CR0_CD 0x40000000 /* i486: Cache disable */ +#define CR0_NW 0x20000000 /* i486: No write-through */ +#define CR0_AM 0x00040000 /* i486: Alignment check mask */ +#define CR0_WP 0x00010000 /* i486: Write-protect kernel access */ +#define CR0_NE 0x00000020 /* i486: Handle numeric exceptions */ +#define CR0_ET 0x00000010 /* Extension type is 80387 */ + /* (not official) */ +#define CR0_TS 0x00000008 /* Task switch */ +#define CR0_EM 0x00000004 /* Emulate coprocessor */ +#define CR0_MP 0x00000002 /* Monitor coprocessor */ +#define CR0_PE 0x00000001 /* Enable protected mode */ /* * CR4 */ -#define CR4_SEE 0x00008000 /* Secure Enclave Enable XXX */ -#define CR4_SMAP 0x00200000 /* Supervisor-Mode Access Protect */ -#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */ -#define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */ -#define CR4_PCIDE 0x00020000 /* PCID Enable */ -#define CR4_RDWRFSGS 0x00010000 /* RDWRFSGS Enable */ -#define CR4_SMXE 0x00004000 /* Enable SMX operation */ -#define CR4_VMXE 0x00002000 /* Enable VMX operation */ -#define CR4_OSXMM 0x00000400 /* SSE/SSE2 exception support in OS */ -#define CR4_OSFXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ -#define CR4_PCE 0x00000100 /* Performance-Monitor Count Enable */ -#define CR4_PGE 0x00000080 /* Page Global Enable */ -#define CR4_MCE 0x00000040 /* Machine Check Exceptions */ -#define CR4_PAE 0x00000020 /* Physical Address Extensions */ -#define CR4_PSE 0x00000010 /* Page Size Extensions */ -#define CR4_DE 0x00000008 /* Debugging Extensions */ -#define CR4_TSD 0x00000004 /* Time Stamp Disable */ -#define CR4_PVI 0x00000002 /* Protected-mode Virtual Interrupts */ -#define CR4_VME 0x00000001 /* Virtual-8086 Mode Extensions */ +#define CR4_SEE 0x00008000 /* Secure Enclave Enable XXX */ +#define CR4_SMAP 0x00200000 /* Supervisor-Mode Access Protect */ +#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */ +#define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */ +#define CR4_PCIDE 0x00020000 /* PCID Enable */ +#define CR4_RDWRFSGS 0x00010000 /* RDWRFSGS Enable */ +#define CR4_SMXE 0x00004000 /* Enable SMX operation */ +#define CR4_VMXE 0x00002000 /* Enable VMX operation */ +#define CR4_OSXMM 0x00000400 /* SSE/SSE2 exception support in OS */ +#define CR4_OSFXS 0x00000200 /* SSE/SSE2 OS supports FXSave */ +#define CR4_PCE 0x00000100 /* Performance-Monitor Count Enable */ +#define CR4_PGE 0x00000080 /* Page Global Enable */ +#define CR4_MCE 0x00000040 /* Machine Check Exceptions */ +#define CR4_PAE 0x00000020 /* Physical Address Extensions */ +#define CR4_PSE 0x00000010 /* Page Size Extensions */ +#define CR4_DE 0x00000008 /* Debugging Extensions */ +#define CR4_TSD 0x00000004 /* Time Stamp Disable */ +#define CR4_PVI 0x00000002 /* Protected-mode Virtual Interrupts */ +#define CR4_VME 0x00000001 /* Virtual-8086 Mode Extensions */ /* * XCR0 - XFEATURE_ENABLED_MASK (a.k.a. XFEM) register */ -#define XCR0_X87 (1ULL << 0) /* x87, FPU/MMX (always set) */ -#define XCR0_SSE (1ULL << 1) /* SSE supported by XSAVE/XRESTORE */ -#define XCR0_YMM (1ULL << 2) /* YMM state available */ -#define XCR0_BNDREGS (1ULL << 3) /* MPX Bounds register state */ -#define XCR0_BNDCSR (1ULL << 4) /* MPX Bounds configuration/state */ +#define XCR0_X87 (1ULL << 0) /* x87, FPU/MMX (always set) */ +#define XCR0_SSE (1ULL << 1) /* SSE supported by XSAVE/XRESTORE */ +#define XCR0_YMM (1ULL << 2) /* YMM state available */ +#define XCR0_BNDREGS (1ULL << 3) /* MPX Bounds register state */ +#define XCR0_BNDCSR (1ULL << 4) /* MPX Bounds configuration/state */ #if !defined(RC_HIDE_XNU_J137) -#define XCR0_OPMASK (1ULL << 5) /* Opmask register state */ -#define XCR0_ZMM_HI256 (1ULL << 6) /* ZMM upper 256-bit state */ -#define XCR0_HI16_ZMM (1ULL << 7) /* ZMM16..ZMM31 512-bit state */ +#define XCR0_OPMASK (1ULL << 5) /* Opmask register state */ +#define XCR0_ZMM_HI256 (1ULL << 6) /* ZMM upper 256-bit state */ +#define XCR0_HI16_ZMM (1ULL << 7) /* ZMM16..ZMM31 512-bit state */ #endif /* not RC_HIDE_XNU_J137 */ -#define XFEM_X87 XCR0_X87 -#define XFEM_SSE XCR0_SSE -#define XFEM_YMM XCR0_YMM -#define XFEM_BNDREGS XCR0_BNDREGS -#define XFEM_BNDCSR XCR0_BNDCSR +#define XFEM_X87 XCR0_X87 +#define XFEM_SSE XCR0_SSE +#define XFEM_YMM XCR0_YMM +#define XFEM_BNDREGS XCR0_BNDREGS +#define XFEM_BNDCSR XCR0_BNDCSR #if !defined(XNU_HODE_J137) -#define XFEM_OPMASK XCR0_OPMASK -#define XFEM_ZMM_HI256 XCR0_ZMM_HI256 -#define XFEM_HI16_ZMM XCR0_HI16_ZMM -#define XFEM_ZMM (XFEM_ZMM_HI256 | XFEM_HI16_ZMM | XFEM_OPMASK) +#define XFEM_OPMASK XCR0_OPMASK +#define XFEM_ZMM_HI256 XCR0_ZMM_HI256 +#define XFEM_HI16_ZMM XCR0_HI16_ZMM +#define XFEM_ZMM (XFEM_ZMM_HI256 | XFEM_HI16_ZMM | XFEM_OPMASK) #endif /* not XNU_HODE_J137 */ #define XCR0 (0) -#define PMAP_PCID_PRESERVE (1ULL << 63) -#define PMAP_PCID_MASK (0xFFF) +#define PMAP_PCID_PRESERVE (1ULL << 63) +#define PMAP_PCID_MASK (0xFFF) + +#define EARLY_GSBASE_MAGIC 0xffffdeadbeefee00 /* * If thread groups are needed for x86, set this to 1 */ #define CONFIG_THREAD_GROUPS 0 -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include __BEGIN_DECLS -#define set_ts() set_cr0(get_cr0() | CR0_TS) +#define set_ts() set_cr0(get_cr0() | CR0_TS) -static inline uint16_t get_es(void) +static inline uint16_t +get_es(void) { uint16_t es; - __asm__ volatile("mov %%es, %0" : "=r" (es)); + __asm__ volatile ("mov %%es, %0" : "=r" (es)); return es; } -static inline void set_es(uint16_t es) +static inline void +set_es(uint16_t es) { - __asm__ volatile("mov %0, %%es" : : "r" (es)); + __asm__ volatile ("mov %0, %%es" : : "r" (es)); } -static inline uint16_t get_ds(void) +static inline uint16_t +get_ds(void) { uint16_t ds; - __asm__ volatile("mov %%ds, %0" : "=r" (ds)); + __asm__ volatile ("mov %%ds, %0" : "=r" (ds)); return ds; } -static inline void set_ds(uint16_t ds) +static inline void +set_ds(uint16_t ds) { - __asm__ volatile("mov %0, %%ds" : : "r" (ds)); + __asm__ volatile ("mov %0, %%ds" : : "r" (ds)); } -static inline uint16_t get_fs(void) +static inline uint16_t +get_fs(void) { uint16_t fs; - __asm__ volatile("mov %%fs, %0" : "=r" (fs)); + __asm__ volatile ("mov %%fs, %0" : "=r" (fs)); return fs; } -static inline void set_fs(uint16_t fs) +static inline void +set_fs(uint16_t fs) { - __asm__ volatile("mov %0, %%fs" : : "r" (fs)); + __asm__ volatile ("mov %0, %%fs" : : "r" (fs)); } -static inline uint16_t get_gs(void) +static inline uint16_t +get_gs(void) { uint16_t gs; - __asm__ volatile("mov %%gs, %0" : "=r" (gs)); + __asm__ volatile ("mov %%gs, %0" : "=r" (gs)); return gs; } -static inline void set_gs(uint16_t gs) +static inline void +set_gs(uint16_t gs) { - __asm__ volatile("mov %0, %%gs" : : "r" (gs)); + __asm__ volatile ("mov %0, %%gs" : : "r" (gs)); } -static inline uint16_t get_ss(void) +static inline uint16_t +get_ss(void) { uint16_t ss; - __asm__ volatile("mov %%ss, %0" : "=r" (ss)); + __asm__ volatile ("mov %%ss, %0" : "=r" (ss)); return ss; } -static inline void set_ss(uint16_t ss) +static inline void +set_ss(uint16_t ss) { - __asm__ volatile("mov %0, %%ss" : : "r" (ss)); + __asm__ volatile ("mov %0, %%ss" : : "r" (ss)); } -static inline uintptr_t get_cr0(void) +static inline uintptr_t +get_cr0(void) { - uintptr_t cr0; - __asm__ volatile("mov %%cr0, %0" : "=r" (cr0)); - return(cr0); + uintptr_t cr0; + __asm__ volatile ("mov %%cr0, %0" : "=r" (cr0)); + return cr0; } -static inline void set_cr0(uintptr_t value) +static inline void +set_cr0(uintptr_t value) { - __asm__ volatile("mov %0, %%cr0" : : "r" (value)); + __asm__ volatile ("mov %0, %%cr0" : : "r" (value)); } -static inline uintptr_t get_cr2(void) +static inline uintptr_t +get_cr2(void) { uintptr_t cr2; - __asm__ volatile("mov %%cr2, %0" : "=r" (cr2)); - return(cr2); + __asm__ volatile ("mov %%cr2, %0" : "=r" (cr2)); + return cr2; } -static inline uintptr_t get_cr3_raw(void) +static inline uintptr_t +get_cr3_raw(void) { uintptr_t cr3; - __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); - return(cr3); + __asm__ volatile ("mov %%cr3, %0" : "=r" (cr3)); + return cr3; } -static inline void set_cr3_raw(uintptr_t value) +static inline void +set_cr3_raw(uintptr_t value) { - __asm__ volatile("mov %0, %%cr3" : : "r" (value)); + __asm__ volatile ("mov %0, %%cr3" : : "r" (value)); } -static inline uintptr_t get_cr3_base(void) +static inline uintptr_t +get_cr3_base(void) { uintptr_t cr3; - __asm__ volatile("mov %%cr3, %0" : "=r" (cr3)); - return(cr3 & ~(0xFFFULL)); + __asm__ volatile ("mov %%cr3, %0" : "=r" (cr3)); + return cr3 & ~(0xFFFULL); } -static inline void set_cr3_composed(uintptr_t base, uint16_t pcid, uint64_t preserve) +static inline void +set_cr3_composed(uintptr_t base, uint16_t pcid, uint64_t preserve) { - __asm__ volatile("mov %0, %%cr3" : : "r" (base | pcid | ( (preserve) << 63) ) ); + __asm__ volatile ("mov %0, %%cr3" : : "r" (base | pcid | ( (preserve) << 63) )); } -static inline uintptr_t get_cr4(void) +static inline uintptr_t +get_cr4(void) { uintptr_t cr4; - __asm__ volatile("mov %%cr4, %0" : "=r" (cr4)); - return(cr4); + __asm__ volatile ("mov %%cr4, %0" : "=r" (cr4)); + return cr4; } -static inline void set_cr4(uintptr_t value) +static inline void +set_cr4(uintptr_t value) { - __asm__ volatile("mov %0, %%cr4" : : "r" (value)); + __asm__ volatile ("mov %0, %%cr4" : : "r" (value)); } -static inline uintptr_t x86_get_flags(void) +static inline uintptr_t +x86_get_flags(void) { uintptr_t erflags; - __asm__ volatile("pushf; pop %0" : "=r" (erflags)); + __asm__ volatile ("pushf; pop %0" : "=r" (erflags)); return erflags; } -static inline void clear_ts(void) +static inline void +clear_ts(void) { - __asm__ volatile("clts"); + __asm__ volatile ("clts"); } -static inline unsigned short get_tr(void) +static inline unsigned short +get_tr(void) { - unsigned short seg; - __asm__ volatile("str %0" : "=rm" (seg)); - return(seg); + unsigned short seg; + __asm__ volatile ("str %0" : "=rm" (seg)); + return seg; } -static inline void set_tr(unsigned int seg) +static inline void +set_tr(unsigned int seg) { - __asm__ volatile("ltr %0" : : "rm" ((unsigned short)(seg))); + __asm__ volatile ("ltr %0" : : "rm" ((unsigned short)(seg))); } -static inline unsigned short sldt(void) +static inline unsigned short +sldt(void) { unsigned short seg; - __asm__ volatile("sldt %0" : "=rm" (seg)); - return(seg); + __asm__ volatile ("sldt %0" : "=rm" (seg)); + return seg; } -static inline void lldt(unsigned int seg) +static inline void +lldt(unsigned int seg) { - __asm__ volatile("lldt %0" : : "rm" ((unsigned short)(seg))); + __asm__ volatile ("lldt %0" : : "rm" ((unsigned short)(seg))); } -static inline void lgdt(uintptr_t *desc) +static inline void +lgdt(uintptr_t *desc) { - __asm__ volatile("lgdt %0" : : "m" (*desc)); + __asm__ volatile ("lgdt %0" : : "m" (*desc)); } -static inline void lidt(uintptr_t *desc) +static inline void +lidt(uintptr_t *desc) { - __asm__ volatile("lidt %0" : : "m" (*desc)); + __asm__ volatile ("lidt %0" : : "m" (*desc)); } -static inline void swapgs(void) +static inline void +swapgs(void) { - __asm__ volatile("swapgs"); + __asm__ volatile ("swapgs"); } -static inline void hlt(void) +static inline void +hlt(void) { - __asm__ volatile("hlt"); + __asm__ volatile ("hlt"); } #ifdef MACH_KERNEL_PRIVATE -static inline void flush_tlb_raw(void) -{ - uintptr_t cr4 = get_cr4(); - if (cr4 & CR4_PGE) { - set_cr4(cr4 & ~CR4_PGE); - set_cr4(cr4 | CR4_PGE); - } else { - set_cr3_raw(get_cr3_raw()); - } -} extern int rdmsr64_carefully(uint32_t msr, uint64_t *val); extern int wrmsr64_carefully(uint32_t msr, uint64_t val); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -static inline void wbinvd(void) +static inline void +wbinvd(void) { - __asm__ volatile("wbinvd"); + __asm__ volatile ("wbinvd"); } -static inline void invlpg(uintptr_t addr) +static inline void +invlpg(uintptr_t addr) { - __asm__ volatile("invlpg (%0)" :: "r" (addr) : "memory"); + __asm__ volatile ("invlpg (%0)" :: "r" (addr) : "memory"); } -static inline void clac(void) +static inline void +clac(void) { - __asm__ volatile("clac"); + __asm__ volatile ("clac"); } -static inline void stac(void) +static inline void +stac(void) { - __asm__ volatile("stac"); + __asm__ volatile ("stac"); } /* @@ -421,21 +446,21 @@ static inline void stac(void) * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr,lo,hi) \ +#define rdmsr(msr, lo, hi) \ __asm__ volatile("rdmsr" : "=a" (lo), "=d" (hi) : "c" (msr)) -#define wrmsr(msr,lo,hi) \ +#define wrmsr(msr, lo, hi) \ __asm__ volatile("wrmsr" : : "c" (msr), "a" (lo), "d" (hi)) -#define rdtsc(lo,hi) \ - __asm__ volatile("lfence; rdtsc; lfence" : "=a" (lo), "=d" (hi)) +#define rdtsc(lo, hi) \ + __asm__ volatile("lfence; rdtsc" : "=a" (lo), "=d" (hi)) -#define rdtsc_nofence(lo,hi) \ +#define rdtsc_nofence(lo, hi) \ __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)) -#define write_tsc(lo,hi) wrmsr(0x10, lo, hi) +#define write_tsc(lo, hi) wrmsr(0x10, lo, hi) -#define rdpmc(counter,lo,hi) \ +#define rdpmc(counter, lo, hi) \ __asm__ volatile("rdpmc" : "=a" (lo), "=d" (hi) : "c" (counter)) #ifdef XNU_KERNEL_PRIVATE @@ -444,39 +469,44 @@ extern void do_mfence(void); #endif #ifdef __LP64__ -static inline uint64_t rdpmc64(uint32_t pmc) +static inline uint64_t +rdpmc64(uint32_t pmc) { - uint32_t lo=0, hi=0; + uint32_t lo = 0, hi = 0; rdpmc(pmc, lo, hi); return (((uint64_t)hi) << 32) | ((uint64_t)lo); } -static inline uint64_t rdmsr64(uint32_t msr) +static inline uint64_t +rdmsr64(uint32_t msr) { - uint32_t lo=0, hi=0; + uint32_t lo = 0, hi = 0; rdmsr(msr, lo, hi); return (((uint64_t)hi) << 32) | ((uint64_t)lo); } -static inline void wrmsr64(uint32_t msr, uint64_t val) +static inline void +wrmsr64(uint32_t msr, uint64_t val) { wrmsr(msr, (val & 0xFFFFFFFFUL), ((val >> 32) & 0xFFFFFFFFUL)); } -static inline uint64_t rdtsc64(void) +static inline uint64_t +rdtsc64(void) { uint64_t lo, hi; rdtsc(lo, hi); return ((hi) << 32) | (lo); } -static inline uint64_t rdtscp64(uint32_t *aux) +static inline uint64_t +rdtscp64(uint32_t *aux) { uint64_t lo, hi; - __asm__ volatile("rdtscp; mov %%ecx, %1" - : "=a" (lo), "=d" (hi), "=m" (*aux) - : - : "ecx"); + __asm__ volatile ("rdtscp; mov %%ecx, %1" + : "=a" (lo), "=d" (hi), "=m" (*aux) + : + : "ecx"); return ((hi) << 32) | (lo); } #endif /* __LP64__ */ @@ -489,174 +519,185 @@ static inline uint64_t rdtscp64(uint32_t *aux) extern int rdmsr_carefully(uint32_t msr, uint32_t *lo, uint32_t *hi); __END_DECLS -#endif /* ASSEMBLER */ - -#define MSR_IA32_P5_MC_ADDR 0 -#define MSR_IA32_P5_MC_TYPE 1 -#define MSR_IA32_PLATFORM_ID 0x17 -#define MSR_IA32_EBL_CR_POWERON 0x2a - -#define MSR_IA32_APIC_BASE 0x1b -#define MSR_IA32_APIC_BASE_BSP (1<<8) -#define MSR_IA32_APIC_BASE_EXTENDED (1<<10) -#define MSR_IA32_APIC_BASE_ENABLE (1<<11) -#define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) - -#define MSR_CORE_THREAD_COUNT 0x35 - -#define MSR_IA32_FEATURE_CONTROL 0x3a -#define MSR_IA32_FEATCTL_LOCK (1<<0) -#define MSR_IA32_FEATCTL_VMXON_SMX (1<<1) -#define MSR_IA32_FEATCTL_VMXON (1<<2) -#define MSR_IA32_FEATCTL_CSTATE_SMI (1<<16) - -#define MSR_IA32_UPDT_TRIG 0x79 -#define MSR_IA32_BIOS_SIGN_ID 0x8b -#define MSR_IA32_UCODE_WRITE MSR_IA32_UPDT_TRIG -#define MSR_IA32_UCODE_REV MSR_IA32_BIOS_SIGN_ID - -#define MSR_IA32_PERFCTR0 0xc1 -#define MSR_IA32_PERFCTR1 0xc2 -#define MSR_IA32_PERFCTR3 0xc3 -#define MSR_IA32_PERFCTR4 0xc4 - -#define MSR_PLATFORM_INFO 0xce - -#define MSR_IA32_MPERF 0xE7 -#define MSR_IA32_APERF 0xE8 - -#define MSR_IA32_BBL_CR_CTL 0x119 - -#define MSR_IA32_SYSENTER_CS 0x174 -#define MSR_IA32_SYSENTER_ESP 0x175 -#define MSR_IA32_SYSENTER_EIP 0x176 - -#define MSR_IA32_MCG_CAP 0x179 -#define MSR_IA32_MCG_STATUS 0x17a -#define MSR_IA32_MCG_CTL 0x17b - -#define MSR_IA32_EVNTSEL0 0x186 -#define MSR_IA32_EVNTSEL1 0x187 -#define MSR_IA32_EVNTSEL2 0x188 -#define MSR_IA32_EVNTSEL3 0x189 - -#define MSR_FLEX_RATIO 0x194 -#define MSR_IA32_PERF_STS 0x198 -#define MSR_IA32_PERF_CTL 0x199 -#define MSR_IA32_CLOCK_MODULATION 0x19a - -#define MSR_IA32_MISC_ENABLE 0x1a0 - - -#define MSR_IA32_PACKAGE_THERM_STATUS 0x1b1 -#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x1b2 - -#define MSR_IA32_DEBUGCTLMSR 0x1d9 -#define MSR_IA32_LASTBRANCHFROMIP 0x1db -#define MSR_IA32_LASTBRANCHTOIP 0x1dc -#define MSR_IA32_LASTINTFROMIP 0x1dd -#define MSR_IA32_LASTINTTOIP 0x1de - -#define MSR_IA32_CR_PAT 0x277 - -#define MSR_IA32_MTRRCAP 0xfe -#define MSR_IA32_MTRR_DEF_TYPE 0x2ff -#define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2*(n)) -#define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2*(n) + 1) -#define MSR_IA32_MTRR_FIX64K_00000 0x250 -#define MSR_IA32_MTRR_FIX16K_80000 0x258 -#define MSR_IA32_MTRR_FIX16K_A0000 0x259 -#define MSR_IA32_MTRR_FIX4K_C0000 0x268 -#define MSR_IA32_MTRR_FIX4K_C8000 0x269 -#define MSR_IA32_MTRR_FIX4K_D0000 0x26a -#define MSR_IA32_MTRR_FIX4K_D8000 0x26b -#define MSR_IA32_MTRR_FIX4K_E0000 0x26c -#define MSR_IA32_MTRR_FIX4K_E8000 0x26d -#define MSR_IA32_MTRR_FIX4K_F0000 0x26e -#define MSR_IA32_MTRR_FIX4K_F8000 0x26f - -#define MSR_IA32_PERF_FIXED_CTR0 0x309 - -#define MSR_IA32_PERF_FIXED_CTR_CTRL 0x38D -#define MSR_IA32_PERF_GLOBAL_STATUS 0x38E -#define MSR_IA32_PERF_GLOBAL_CTRL 0x38F -#define MSR_IA32_PERF_GLOBAL_OVF_CTRL 0x390 - -#define MSR_IA32_PKG_C3_RESIDENCY 0x3F8 -#define MSR_IA32_PKG_C6_RESIDENCY 0x3F9 -#define MSR_IA32_PKG_C7_RESIDENCY 0x3FA - -#define MSR_IA32_CORE_C3_RESIDENCY 0x3FC -#define MSR_IA32_CORE_C6_RESIDENCY 0x3FD -#define MSR_IA32_CORE_C7_RESIDENCY 0x3FE - -#define MSR_IA32_MC0_CTL 0x400 -#define MSR_IA32_MC0_STATUS 0x401 -#define MSR_IA32_MC0_ADDR 0x402 -#define MSR_IA32_MC0_MISC 0x403 - -#define MSR_IA32_VMX_BASE 0x480 -#define MSR_IA32_VMX_BASIC MSR_IA32_VMX_BASE -#define MSR_IA32_VMX_PINBASED_CTLS MSR_IA32_VMX_BASE+1 -#define MSR_IA32_VMX_PROCBASED_CTLS MSR_IA32_VMX_BASE+2 -#define MSR_IA32_VMX_EXIT_CTLS MSR_IA32_VMX_BASE+3 -#define MSR_IA32_VMX_ENTRY_CTLS MSR_IA32_VMX_BASE+4 -#define MSR_IA32_VMX_MISC MSR_IA32_VMX_BASE+5 -#define MSR_IA32_VMX_CR0_FIXED0 MSR_IA32_VMX_BASE+6 -#define MSR_IA32_VMX_CR0_FIXED1 MSR_IA32_VMX_BASE+7 -#define MSR_IA32_VMX_CR4_FIXED0 MSR_IA32_VMX_BASE+8 -#define MSR_IA32_VMX_CR4_FIXED1 MSR_IA32_VMX_BASE+9 -#define MSR_IA32_VMX_VMCS_ENUM MSR_IA32_VMX_BASE+10 -#define MSR_IA32_VMX_PROCBASED_CTLS2 MSR_IA32_VMX_BASE+11 -#define MSR_IA32_VMX_EPT_VPID_CAP MSR_IA32_VMX_BASE+12 -#define MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT 21 -#define MSR_IA32_VMX_TRUE_PINBASED_CTLS MSR_IA32_VMX_BASE+13 -#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS MSR_IA32_VMX_BASE+14 -#define MSR_IA32_VMX_TRUE_VMEXIT_CTLS MSR_IA32_VMX_BASE+15 -#define MSR_IA32_VMX_TRUE_VMENTRY_CTLS MSR_IA32_VMX_BASE+16 -#define MSR_IA32_VMX_VMFUNC MSR_IA32_VMX_BASE+17 - -#define MSR_IA32_DS_AREA 0x600 - -#define MSR_IA32_PKG_POWER_SKU_UNIT 0x606 -#define MSR_IA32_PKG_C2_RESIDENCY 0x60D -#define MSR_IA32_PKG_ENERGY_STATUS 0x611 -#define MSR_IA32_DDR_ENERGY_STATUS 0x619 -#define MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER 0x61D -#define MSR_IA32_RING_PERF_STATUS 0x621 - -#define MSR_IA32_PKG_C8_RESIDENCY 0x630 -#define MSR_IA32_PKG_C9_RESIDENCY 0x631 -#define MSR_IA32_PKG_C10_RESIDENCY 0x632 - -#define MSR_IA32_PP0_ENERGY_STATUS 0x639 -#define MSR_IA32_PP1_ENERGY_STATUS 0x641 -#define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL 0x64F - -#define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690 -#define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0 - -#define MSR_IA32_TSC_DEADLINE 0x6e0 - -#define MSR_IA32_EFER 0xC0000080 -#define MSR_IA32_EFER_SCE 0x00000001 -#define MSR_IA32_EFER_LME 0x00000100 -#define MSR_IA32_EFER_LMA 0x00000400 -#define MSR_IA32_EFER_NXE 0x00000800 - -#define MSR_IA32_STAR 0xC0000081 -#define MSR_IA32_LSTAR 0xC0000082 -#define MSR_IA32_CSTAR 0xC0000083 -#define MSR_IA32_FMASK 0xC0000084 - -#define MSR_IA32_FS_BASE 0xC0000100 -#define MSR_IA32_GS_BASE 0xC0000101 -#define MSR_IA32_KERNEL_GS_BASE 0xC0000102 -#define MSR_IA32_TSC_AUX 0xC0000103 - -#define HV_VMX_EPTP_MEMORY_TYPE_UC 0x0 -#define HV_VMX_EPTP_MEMORY_TYPE_WB 0x6 -#define HV_VMX_EPTP_WALK_LENGTH(wl) (0ULL | ((((wl) - 1) & 0x7) << 3)) -#define HV_VMX_EPTP_ENABLE_AD_FLAGS (1ULL << 6) - -#endif /* _I386_PROC_REG_H_ */ +#endif /* ASSEMBLER */ + +#define MSR_IA32_P5_MC_ADDR 0 +#define MSR_IA32_P5_MC_TYPE 1 +#define MSR_IA32_PLATFORM_ID 0x17 +#define MSR_IA32_EBL_CR_POWERON 0x2a + +#define MSR_IA32_APIC_BASE 0x1b +#define MSR_IA32_APIC_BASE_BSP (1<<8) +#define MSR_IA32_APIC_BASE_EXTENDED (1<<10) +#define MSR_IA32_APIC_BASE_ENABLE (1<<11) +#define MSR_IA32_APIC_BASE_BASE (0xfffff<<12) + +#define MSR_CORE_THREAD_COUNT 0x35 + +#define MSR_IA32_FEATURE_CONTROL 0x3a +#define MSR_IA32_FEATCTL_LOCK (1<<0) +#define MSR_IA32_FEATCTL_VMXON_SMX (1<<1) +#define MSR_IA32_FEATCTL_VMXON (1<<2) +#define MSR_IA32_FEATCTL_CSTATE_SMI (1<<16) + +#define MSR_IA32_UPDT_TRIG 0x79 +#define MSR_IA32_BIOS_SIGN_ID 0x8b +#define MSR_IA32_UCODE_WRITE MSR_IA32_UPDT_TRIG +#define MSR_IA32_UCODE_REV MSR_IA32_BIOS_SIGN_ID + +#define MSR_IA32_PERFCTR0 0xc1 +#define MSR_IA32_PERFCTR1 0xc2 +#define MSR_IA32_PERFCTR3 0xc3 +#define MSR_IA32_PERFCTR4 0xc4 + +#define MSR_PLATFORM_INFO 0xce + +#define MSR_IA32_MPERF 0xE7 +#define MSR_IA32_APERF 0xE8 + +#define MSR_IA32_ARCH_CAPABILITIES 0x10a +#define MSR_IA32_ARCH_CAPABILITIES_RDCL_NO (1ULL << 0) +#define MSR_IA32_ARCH_CAPABILITIES_IBRS_ALL (1ULL << 1) +#define MSR_IA32_ARCH_CAPABILITIES_RSBA (1ULL << 2) +#define MSR_IA32_ARCH_CAPABILITIES_L1DF_NO (1ULL << 3) +#define MSR_IA32_ARCH_CAPABILITIES_SSB_NO (1ULL << 4) +#define MSR_IA32_ARCH_CAPABILITIES_MDS_NO (1ULL << 5) + +#define MSR_IA32_TSX_FORCE_ABORT 0x10f +#define MSR_IA32_TSXFA_RTM_FORCE_ABORT (1ULL << 0) /* Bit 0 */ + +#define MSR_IA32_BBL_CR_CTL 0x119 + +#define MSR_IA32_SYSENTER_CS 0x174 +#define MSR_IA32_SYSENTER_ESP 0x175 +#define MSR_IA32_SYSENTER_EIP 0x176 + +#define MSR_IA32_MCG_CAP 0x179 +#define MSR_IA32_MCG_STATUS 0x17a +#define MSR_IA32_MCG_CTL 0x17b + +#define MSR_IA32_EVNTSEL0 0x186 +#define MSR_IA32_EVNTSEL1 0x187 +#define MSR_IA32_EVNTSEL2 0x188 +#define MSR_IA32_EVNTSEL3 0x189 + +#define MSR_FLEX_RATIO 0x194 +#define MSR_IA32_PERF_STS 0x198 +#define MSR_IA32_PERF_CTL 0x199 +#define MSR_IA32_CLOCK_MODULATION 0x19a + +#define MSR_IA32_MISC_ENABLE 0x1a0 + + +#define MSR_IA32_PACKAGE_THERM_STATUS 0x1b1 +#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x1b2 + +#define MSR_IA32_DEBUGCTLMSR 0x1d9 +#define MSR_IA32_LASTBRANCHFROMIP 0x1db +#define MSR_IA32_LASTBRANCHTOIP 0x1dc +#define MSR_IA32_LASTINTFROMIP 0x1dd +#define MSR_IA32_LASTINTTOIP 0x1de + +#define MSR_IA32_CR_PAT 0x277 + +#define MSR_IA32_MTRRCAP 0xfe +#define MSR_IA32_MTRR_DEF_TYPE 0x2ff +#define MSR_IA32_MTRR_PHYSBASE(n) (0x200 + 2*(n)) +#define MSR_IA32_MTRR_PHYSMASK(n) (0x200 + 2*(n) + 1) +#define MSR_IA32_MTRR_FIX64K_00000 0x250 +#define MSR_IA32_MTRR_FIX16K_80000 0x258 +#define MSR_IA32_MTRR_FIX16K_A0000 0x259 +#define MSR_IA32_MTRR_FIX4K_C0000 0x268 +#define MSR_IA32_MTRR_FIX4K_C8000 0x269 +#define MSR_IA32_MTRR_FIX4K_D0000 0x26a +#define MSR_IA32_MTRR_FIX4K_D8000 0x26b +#define MSR_IA32_MTRR_FIX4K_E0000 0x26c +#define MSR_IA32_MTRR_FIX4K_E8000 0x26d +#define MSR_IA32_MTRR_FIX4K_F0000 0x26e +#define MSR_IA32_MTRR_FIX4K_F8000 0x26f + +#define MSR_IA32_PERF_FIXED_CTR0 0x309 + +#define MSR_IA32_PERF_FIXED_CTR_CTRL 0x38D +#define MSR_IA32_PERF_GLOBAL_STATUS 0x38E +#define MSR_IA32_PERF_GLOBAL_CTRL 0x38F +#define MSR_IA32_PERF_GLOBAL_OVF_CTRL 0x390 + +#define MSR_IA32_PKG_C3_RESIDENCY 0x3F8 +#define MSR_IA32_PKG_C6_RESIDENCY 0x3F9 +#define MSR_IA32_PKG_C7_RESIDENCY 0x3FA + +#define MSR_IA32_CORE_C3_RESIDENCY 0x3FC +#define MSR_IA32_CORE_C6_RESIDENCY 0x3FD +#define MSR_IA32_CORE_C7_RESIDENCY 0x3FE + +#define MSR_IA32_MC0_CTL 0x400 +#define MSR_IA32_MC0_STATUS 0x401 +#define MSR_IA32_MC0_ADDR 0x402 +#define MSR_IA32_MC0_MISC 0x403 + +#define MSR_IA32_VMX_BASE 0x480 +#define MSR_IA32_VMX_BASIC MSR_IA32_VMX_BASE +#define MSR_IA32_VMX_PINBASED_CTLS MSR_IA32_VMX_BASE+1 +#define MSR_IA32_VMX_PROCBASED_CTLS MSR_IA32_VMX_BASE+2 +#define MSR_IA32_VMX_EXIT_CTLS MSR_IA32_VMX_BASE+3 +#define MSR_IA32_VMX_ENTRY_CTLS MSR_IA32_VMX_BASE+4 +#define MSR_IA32_VMX_MISC MSR_IA32_VMX_BASE+5 +#define MSR_IA32_VMX_CR0_FIXED0 MSR_IA32_VMX_BASE+6 +#define MSR_IA32_VMX_CR0_FIXED1 MSR_IA32_VMX_BASE+7 +#define MSR_IA32_VMX_CR4_FIXED0 MSR_IA32_VMX_BASE+8 +#define MSR_IA32_VMX_CR4_FIXED1 MSR_IA32_VMX_BASE+9 +#define MSR_IA32_VMX_VMCS_ENUM MSR_IA32_VMX_BASE+10 +#define MSR_IA32_VMX_PROCBASED_CTLS2 MSR_IA32_VMX_BASE+11 +#define MSR_IA32_VMX_EPT_VPID_CAP MSR_IA32_VMX_BASE+12 +#define MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT 21 +#define MSR_IA32_VMX_TRUE_PINBASED_CTLS MSR_IA32_VMX_BASE+13 +#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS MSR_IA32_VMX_BASE+14 +#define MSR_IA32_VMX_TRUE_VMEXIT_CTLS MSR_IA32_VMX_BASE+15 +#define MSR_IA32_VMX_TRUE_VMENTRY_CTLS MSR_IA32_VMX_BASE+16 +#define MSR_IA32_VMX_VMFUNC MSR_IA32_VMX_BASE+17 + +#define MSR_IA32_DS_AREA 0x600 + +#define MSR_IA32_PKG_POWER_SKU_UNIT 0x606 +#define MSR_IA32_PKG_C2_RESIDENCY 0x60D +#define MSR_IA32_PKG_ENERGY_STATUS 0x611 +#define MSR_IA32_DDR_ENERGY_STATUS 0x619 +#define MSR_IA32_LLC_FLUSHED_RESIDENCY_TIMER 0x61D +#define MSR_IA32_RING_PERF_STATUS 0x621 + +#define MSR_IA32_PKG_C8_RESIDENCY 0x630 +#define MSR_IA32_PKG_C9_RESIDENCY 0x631 +#define MSR_IA32_PKG_C10_RESIDENCY 0x632 + +#define MSR_IA32_PP0_ENERGY_STATUS 0x639 +#define MSR_IA32_PP1_ENERGY_STATUS 0x641 +#define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL 0x64F + +#define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690 +#define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0 + +#define MSR_IA32_TSC_DEADLINE 0x6e0 + +#define MSR_IA32_EFER 0xC0000080 +#define MSR_IA32_EFER_SCE 0x00000001 +#define MSR_IA32_EFER_LME 0x00000100 +#define MSR_IA32_EFER_LMA 0x00000400 +#define MSR_IA32_EFER_NXE 0x00000800 + +#define MSR_IA32_STAR 0xC0000081 +#define MSR_IA32_LSTAR 0xC0000082 +#define MSR_IA32_CSTAR 0xC0000083 +#define MSR_IA32_FMASK 0xC0000084 + +#define MSR_IA32_FS_BASE 0xC0000100 +#define MSR_IA32_GS_BASE 0xC0000101 +#define MSR_IA32_KERNEL_GS_BASE 0xC0000102 +#define MSR_IA32_TSC_AUX 0xC0000103 + +#define HV_VMX_EPTP_MEMORY_TYPE_UC 0x0 +#define HV_VMX_EPTP_MEMORY_TYPE_WB 0x6 +#define HV_VMX_EPTP_WALK_LENGTH(wl) (0ULL | ((((wl) - 1) & 0x7) << 3)) +#define HV_VMX_EPTP_ENABLE_AD_FLAGS (1ULL << 6) + +#endif /* _I386_PROC_REG_H_ */ diff --git a/osfmk/i386/rtclock.c b/osfmk/i386/rtclock.c index c8abc4b1e..fa269748c 100644 --- a/osfmk/i386/rtclock.c +++ b/osfmk/i386/rtclock.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -53,7 +53,7 @@ #include #include #include -#include /* for kernel_map */ +#include /* for kernel_map */ #include #include #include @@ -69,14 +69,14 @@ #include #include #include -#define UI_CPUFREQ_ROUNDING_FACTOR 10000000 +#define UI_CPUFREQ_ROUNDING_FACTOR 10000000 -int rtclock_init(void); +int rtclock_init(void); -uint64_t tsc_rebase_abs_time = 0; +uint64_t tsc_rebase_abs_time = 0; -static void rtc_set_timescale(uint64_t cycles); -static uint64_t rtc_export_speed(uint64_t cycles); +static void rtc_set_timescale(uint64_t cycles); +static uint64_t rtc_export_speed(uint64_t cycles); void rtc_timer_start(void) @@ -115,12 +115,12 @@ _absolutetime_to_nanotime(uint64_t abstime, clock_sec_t *secs, clock_usec_t *nan * used to maintain a monotonic clock, adjusted from an outside reference as needed. * * The kernel maintains nanotime information recording: - * - the ratio of tsc to nanoseconds + * - the ratio of tsc to nanoseconds * with this ratio expressed as a 32-bit scale and shift * (power of 2 divider); * - { tsc_base, ns_base } pair of corresponding timestamps. * - * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage + * The tuple {tsc_base, ns_base, scale, shift} is exported in the commpage * for the userspace nanotime routine to read. * * All of the routines which update the nanotime data are non-reentrant. This must @@ -140,7 +140,7 @@ rtc_nanotime_set_commpage(pal_rtc_nanotime_t *rntp) static inline void _rtc_nanotime_init(pal_rtc_nanotime_t *rntp, uint64_t base) { - uint64_t tsc = rdtsc64(); + uint64_t tsc = rdtsc64(); _pal_rtc_nanotime_store(tsc, base, rntp->scale, rntp->shift, rntp); } @@ -162,7 +162,7 @@ rtc_nanotime_init(uint64_t base) void rtc_nanotime_init_commpage(void) { - spl_t s = splclock(); + spl_t s = splclock(); rtc_nanotime_set_commpage(&pal_rtc_nanotime_info); splx(s); @@ -177,7 +177,7 @@ rtc_nanotime_init_commpage(void) static inline uint64_t rtc_nanotime_read(void) { - return _rtc_nanotime_read(&pal_rtc_nanotime_info); + return _rtc_nanotime_read(&pal_rtc_nanotime_info); } /* @@ -190,23 +190,23 @@ rtc_nanotime_read(void) void rtc_clock_napped(uint64_t base, uint64_t tsc_base) { - pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; - uint64_t oldnsecs; - uint64_t newnsecs; - uint64_t tsc; + pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; + uint64_t oldnsecs; + uint64_t newnsecs; + uint64_t tsc; assert(!ml_get_interrupts_enabled()); tsc = rdtsc64(); oldnsecs = rntp->ns_base + _rtc_tsc_to_nanoseconds(tsc - rntp->tsc_base, rntp); newnsecs = base + _rtc_tsc_to_nanoseconds(tsc - tsc_base, rntp); - + /* * Only update the base values if time using the new base values * is later than the time using the old base values. */ if (oldnsecs < newnsecs) { - _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp); - rtc_nanotime_set_commpage(rntp); + _pal_rtc_nanotime_store(tsc_base, base, rntp->scale, rntp->shift, rntp); + rtc_nanotime_set_commpage(rntp); } } @@ -219,24 +219,24 @@ rtc_clock_napped(uint64_t base, uint64_t tsc_base) void rtc_clock_adjust(uint64_t tsc_base_delta) { - pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; + pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; - assert(!ml_get_interrupts_enabled()); - assert(tsc_base_delta < 100ULL); /* i.e. it's small */ - _rtc_nanotime_adjust(tsc_base_delta, rntp); - rtc_nanotime_set_commpage(rntp); + assert(!ml_get_interrupts_enabled()); + assert(tsc_base_delta < 100ULL); /* i.e. it's small */ + _rtc_nanotime_adjust(tsc_base_delta, rntp); + rtc_nanotime_set_commpage(rntp); } void rtc_clock_stepping(__unused uint32_t new_frequency, - __unused uint32_t old_frequency) + __unused uint32_t old_frequency) { panic("rtc_clock_stepping unsupported"); } void rtc_clock_stepped(__unused uint32_t new_frequency, - __unused uint32_t old_frequency) + __unused uint32_t old_frequency) { panic("rtc_clock_stepped unsupported"); } @@ -252,9 +252,9 @@ rtc_clock_stepped(__unused uint32_t new_frequency, */ void rtc_sleep_wakeup( - uint64_t base) + uint64_t base) { - /* Set fixed configuration for lapic timers */ + /* Set fixed configuration for lapic timers */ rtc_timer->rtc_config(); /* @@ -266,7 +266,8 @@ rtc_sleep_wakeup( } void -rtc_decrementer_configure(void) { +rtc_decrementer_configure(void) +{ rtc_timer->rtc_config(); } /* @@ -287,12 +288,11 @@ rtclock_early_init(void) int rtclock_init(void) { - uint64_t cycles; + uint64_t cycles; assert(!ml_get_interrupts_enabled()); if (cpu_number() == master_cpu) { - assert(tscFreq); /* @@ -313,29 +313,29 @@ rtclock_init(void) ml_init_delay_spin_threshold(10); } - /* Set fixed configuration for lapic timers */ + /* Set fixed configuration for lapic timers */ rtc_timer->rtc_config(); rtc_timer_start(); - return (1); + return 1; } -// utility routine +// utility routine // Code to calculate how many processor cycles are in a second... static void rtc_set_timescale(uint64_t cycles) { - pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; + pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; uint32_t shift = 0; - + /* the "scale" factor will overflow unless cycles>SLOW_TSC_THRESHOLD */ - - while ( cycles <= SLOW_TSC_THRESHOLD) { + + while (cycles <= SLOW_TSC_THRESHOLD) { shift++; cycles <<= 1; } - + rntp->scale = (uint32_t)(((uint64_t)NSEC_PER_SEC << 32) / cycles); rntp->shift = shift; @@ -346,9 +346,10 @@ rtc_set_timescale(uint64_t cycles) * mach_absolute_time(). Instead, we convert the TSC delta since boot * to nanoseconds. */ - if (tsc_rebase_abs_time == 0) + if (tsc_rebase_abs_time == 0) { tsc_rebase_abs_time = _rtc_tsc_to_nanoseconds( - rdtsc64() - tsc_at_boot, rntp); + rdtsc64() - tsc_at_boot, rntp); + } rtc_nanotime_init(0); } @@ -356,47 +357,48 @@ rtc_set_timescale(uint64_t cycles) static uint64_t rtc_export_speed(uint64_t cyc_per_sec) { - pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; - uint64_t cycles; + pal_rtc_nanotime_t *rntp = &pal_rtc_nanotime_info; + uint64_t cycles; - if (rntp->shift != 0 ) + if (rntp->shift != 0) { printf("Slow TSC, rtc_nanotime.shift == %d\n", rntp->shift); - + } + /* Round: */ - cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR/2)) - / UI_CPUFREQ_ROUNDING_FACTOR) - * UI_CPUFREQ_ROUNDING_FACTOR; + cycles = ((cyc_per_sec + (UI_CPUFREQ_ROUNDING_FACTOR / 2)) + / UI_CPUFREQ_ROUNDING_FACTOR) + * UI_CPUFREQ_ROUNDING_FACTOR; /* * Set current measured speed. */ - if (cycles >= 0x100000000ULL) { - gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL; - } else { - gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles; - } - gPEClockFrequencyInfo.cpu_frequency_hz = cycles; + if (cycles >= 0x100000000ULL) { + gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFFUL; + } else { + gPEClockFrequencyInfo.cpu_clock_rate_hz = (unsigned long)cycles; + } + gPEClockFrequencyInfo.cpu_frequency_hz = cycles; kprintf("[RTCLOCK] frequency %llu (%llu)\n", cycles, cyc_per_sec); - return(cycles); + return cycles; } void clock_get_system_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs) + clock_sec_t *secs, + clock_usec_t *microsecs) { - uint64_t now = rtc_nanotime_read(); + uint64_t now = rtc_nanotime_read(); _absolutetime_to_microtime(now, secs, microsecs); } void clock_get_system_nanotime( - clock_sec_t *secs, - clock_nsec_t *nanosecs) + clock_sec_t *secs, + clock_nsec_t *nanosecs) { - uint64_t now = rtc_nanotime_read(); + uint64_t now = rtc_nanotime_read(); _absolutetime_to_nanotime(now, secs, nanosecs); } @@ -409,39 +411,41 @@ clock_gettimeofday_set_commpage(uint64_t abstime, uint64_t sec, uint64_t frac, u void clock_timebase_info( - mach_timebase_info_t info) + mach_timebase_info_t info) { info->numer = info->denom = 1; -} +} /* * Real-time clock device interrupt. */ void rtclock_intr( - x86_saved_state_t *tregs) + x86_saved_state_t *tregs) { - uint64_t rip; - boolean_t user_mode = FALSE; + uint64_t rip; + boolean_t user_mode = FALSE; assert(get_preemption_level() > 0); assert(!ml_get_interrupts_enabled()); if (is_saved_state64(tregs) == TRUE) { - x86_saved_state64_t *regs; - + x86_saved_state64_t *regs; + regs = saved_state64(tregs); - if (regs->isf.cs & 0x03) + if (regs->isf.cs & 0x03) { user_mode = TRUE; + } rip = regs->isf.rip; } else { - x86_saved_state32_t *regs; + x86_saved_state32_t *regs; regs = saved_state32(tregs); - if (regs->cs & 0x03) - user_mode = TRUE; + if (regs->cs & 0x03) { + user_mode = TRUE; + } rip = regs->eip; } @@ -451,28 +455,28 @@ rtclock_intr( /* - * Request timer pop from the hardware + * Request timer pop from the hardware */ uint64_t setPop(uint64_t time) { - uint64_t now; - uint64_t pop; + uint64_t now; + uint64_t pop; /* 0 and EndOfAllTime are special-cases for "clear the timer" */ - if (time == 0 || time == EndOfAllTime ) { + if (time == 0 || time == EndOfAllTime) { time = EndOfAllTime; now = 0; pop = rtc_timer->rtc_set(0, 0); } else { - now = rtc_nanotime_read(); /* The time in nanoseconds */ + now = rtc_nanotime_read(); /* The time in nanoseconds */ pop = rtc_timer->rtc_set(time, now); } /* Record requested and actual deadlines set */ x86_lcpu()->rtcDeadline = time; - x86_lcpu()->rtcPop = pop; + x86_lcpu()->rtcPop = pop; return pop - now; } @@ -491,43 +495,43 @@ mach_approximate_time(void) void clock_interval_to_absolutetime_interval( - uint32_t interval, - uint32_t scale_factor, - uint64_t *result) + uint32_t interval, + uint32_t scale_factor, + uint64_t *result) { *result = (uint64_t)interval * scale_factor; } void absolutetime_to_microtime( - uint64_t abstime, - clock_sec_t *secs, - clock_usec_t *microsecs) + uint64_t abstime, + clock_sec_t *secs, + clock_usec_t *microsecs) { _absolutetime_to_microtime(abstime, secs, microsecs); } void nanotime_to_absolutetime( - clock_sec_t secs, - clock_nsec_t nanosecs, - uint64_t *result) + clock_sec_t secs, + clock_nsec_t nanosecs, + uint64_t *result) { *result = ((uint64_t)secs * NSEC_PER_SEC) + nanosecs; } void absolutetime_to_nanoseconds( - uint64_t abstime, - uint64_t *result) + uint64_t abstime, + uint64_t *result) { *result = abstime; } void nanoseconds_to_absolutetime( - uint64_t nanoseconds, - uint64_t *result) + uint64_t nanoseconds, + uint64_t *result) { *result = nanoseconds; } @@ -535,10 +539,10 @@ nanoseconds_to_absolutetime( void machine_delay_until( uint64_t interval, - uint64_t deadline) + uint64_t deadline) { (void)interval; while (mach_absolute_time() < deadline) { cpu_pause(); - } + } } diff --git a/osfmk/i386/rtclock_native.c b/osfmk/i386/rtclock_native.c index 13cde8e79..720b743e8 100644 --- a/osfmk/i386/rtclock_native.c +++ b/osfmk/i386/rtclock_native.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -50,21 +50,21 @@ #include #include -static uint64_t rtc_decrementer_min; -static uint64_t rtc_decrementer_max; +static uint64_t rtc_decrementer_min; +static uint64_t rtc_decrementer_max; static uint64_t deadline_to_decrementer( - uint64_t deadline, - uint64_t now) + uint64_t deadline, + uint64_t now) { - uint64_t delta; + uint64_t delta; - if (deadline <= now) + if (deadline <= now) { return rtc_decrementer_min; - else { + } else { delta = deadline - now; - return MIN(MAX(rtc_decrementer_min,delta),rtc_decrementer_max); + return MIN(MAX(rtc_decrementer_min, delta), rtc_decrementer_max); } } @@ -131,7 +131,7 @@ rtc_lapic_set_tsc_deadline_timer(uint64_t deadline, uint64_t now) } else { lapic_set_tsc_deadline_timer(0); } - + KERNEL_DEBUG_CONSTANT( DECR_SET_TSC_DEADLINE | DBG_FUNC_NONE, now, deadline, @@ -139,23 +139,23 @@ rtc_lapic_set_tsc_deadline_timer(uint64_t deadline, uint64_t now) 0); return set; -} +} /* * Definitions for timer operations table */ -rtc_timer_t rtc_timer_lapic = { +rtc_timer_t rtc_timer_lapic = { rtc_lapic_config_timer, rtc_lapic_set_timer }; -rtc_timer_t rtc_timer_tsc_deadline = { +rtc_timer_t rtc_timer_tsc_deadline = { rtc_lapic_config_tsc_deadline_timer, rtc_lapic_set_tsc_deadline_timer }; -rtc_timer_t *rtc_timer = &rtc_timer_lapic; /* defaults to LAPIC timer */ +rtc_timer_t *rtc_timer = &rtc_timer_lapic; /* defaults to LAPIC timer */ /* * rtc_timer_init() is called at startup on the boot processor only. @@ -163,33 +163,33 @@ rtc_timer_t *rtc_timer = &rtc_timer_lapic; /* defaults to LAPIC timer */ void rtc_timer_init(void) { - int TSC_deadline_timer = 0; + int TSC_deadline_timer = 0; /* See whether we can use the local apic in TSC-deadline mode */ if ((cpuid_features() & CPUID_FEATURE_TSCTMR)) { TSC_deadline_timer = 1; PE_parse_boot_argn("TSC_deadline_timer", &TSC_deadline_timer, - sizeof(TSC_deadline_timer)); + sizeof(TSC_deadline_timer)); printf("TSC Deadline Timer supported %s enabled\n", - TSC_deadline_timer ? "and" : "but not"); + TSC_deadline_timer ? "and" : "but not"); } if (TSC_deadline_timer) { rtc_timer = &rtc_timer_tsc_deadline; - rtc_decrementer_max = UINT64_MAX; /* effectively none */ + rtc_decrementer_max = UINT64_MAX; /* effectively none */ /* * The min could be as low as 1nsec, * but we're being conservative for now and making it the same * as for the local apic timer. */ - rtc_decrementer_min = 1*NSEC_PER_USEC; /* 1 usec */ + rtc_decrementer_min = 1 * NSEC_PER_USEC; /* 1 usec */ } else { /* * Compute the longest interval using LAPIC timer. */ rtc_decrementer_max = tmrCvt(0x7fffffffULL, busFCvtt2n); kprintf("maxDec: %lld\n", rtc_decrementer_max); - rtc_decrementer_min = 1*NSEC_PER_USEC; /* 1 usec */ + rtc_decrementer_min = 1 * NSEC_PER_USEC; /* 1 usec */ } /* Point LAPIC interrupts to hardclock() */ diff --git a/osfmk/i386/rtclock_protos.h b/osfmk/i386/rtclock_protos.h index b2c1bd529..019b51ec1 100644 --- a/osfmk/i386/rtclock_protos.h +++ b/osfmk/i386/rtclock_protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -43,32 +43,32 @@ typedef struct pal_rtc_nanotime pal_rtc_nanotime_t; extern uint64_t tsc_rebase_abs_time; -extern void _rtc_nanotime_adjust( - uint64_t tsc_base_delta, - pal_rtc_nanotime_t *dst); +extern void _rtc_nanotime_adjust( + uint64_t tsc_base_delta, + pal_rtc_nanotime_t *dst); -extern uint64_t _rtc_nanotime_read( - pal_rtc_nanotime_t *rntp); +extern uint64_t _rtc_nanotime_read( + pal_rtc_nanotime_t *rntp); extern uint64_t _rtc_tsc_to_nanoseconds( - uint64_t value, - pal_rtc_nanotime_t *rntp); + uint64_t value, + pal_rtc_nanotime_t *rntp); -extern void rtclock_intr(x86_saved_state_t *regs); +extern void rtclock_intr(x86_saved_state_t *regs); /* * Timer control. */ typedef struct { - void (*rtc_config)(void); - uint64_t (*rtc_set) (uint64_t, uint64_t); + void (*rtc_config)(void); + uint64_t (*rtc_set)(uint64_t, uint64_t); } rtc_timer_t; -extern rtc_timer_t *rtc_timer; +extern rtc_timer_t *rtc_timer; -extern void rtc_timer_init(void); +extern void rtc_timer_init(void); -extern void rtclock_early_init(void); -extern void rtc_nanotime_init(uint64_t); -extern void rtc_decrementer_configure(void); +extern void rtclock_early_init(void); +extern void rtc_nanotime_init(uint64_t); +extern void rtc_decrementer_configure(void); #endif /* _I386_RTCLOCK_PROTOS_H_ */ diff --git a/osfmk/i386/sched_param.h b/osfmk/i386/sched_param.h index 113049fdd..52bf64d6f 100644 --- a/osfmk/i386/sched_param.h +++ b/osfmk/i386/sched_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * Scheduler parameters. */ -#ifndef _I386_SCHED_PARAM_H_ -#define _I386_SCHED_PARAM_H_ +#ifndef _I386_SCHED_PARAM_H_ +#define _I386_SCHED_PARAM_H_ #endif /* _I386_SCHED_PARAM_H_ */ diff --git a/osfmk/i386/seg.h b/osfmk/i386/seg.h index d5760f7aa..c456b17ef 100644 --- a/osfmk/i386/seg.h +++ b/osfmk/i386/seg.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,39 +22,39 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifndef _I386_SEG_H_ -#define _I386_SEG_H_ +#ifndef _I386_SEG_H_ +#define _I386_SEG_H_ #ifndef __ASSEMBLER__ #include #include @@ -65,43 +65,43 @@ */ static inline uint16_t -sel_to_selector(sel_t sel) +sel_to_selector(sel_t sel) { - union { - sel_t sel; - uint16_t selector; - } tconv; - - tconv.sel = sel; - - return (tconv.selector); + union { + sel_t sel; + uint16_t selector; + } tconv; + + tconv.sel = sel; + + return tconv.selector; } static inline sel_t selector_to_sel(uint16_t selector) { - union { - uint16_t selector; - sel_t sel; - } tconv; - - tconv.selector = selector; - - return (tconv.sel); + union { + uint16_t selector; + sel_t sel; + } tconv; + + tconv.selector = selector; + + return tconv.sel; } -#define LDTSZ_MAX 8192 /* maximal size of the kernel ldt in entries */ -#define LDTSZ_DFL (128) -#define LDTSZ (LDTSZ_MAX) -#define LDTSZ_MIN SEL_TO_INDEX(USER_SETTABLE) - /* kernel ldt entries */ +#define LDTSZ_MAX 8192 /* maximal size of the kernel ldt in entries */ +#define LDTSZ_DFL (128) +#define LDTSZ (LDTSZ_MAX) +#define LDTSZ_MIN SEL_TO_INDEX(USER_SETTABLE) +/* kernel ldt entries */ -#define GDTSZ 19 +#define GDTSZ 19 /* * Interrupt table is always 256 entries long. */ -#define IDTSZ 256 +#define IDTSZ 256 #include @@ -109,44 +109,44 @@ selector_to_sel(uint16_t selector) * Real segment descriptor. */ struct real_descriptor { - uint32_t limit_low:16, /* limit 0..15 */ - base_low:16, /* base 0..15 */ - base_med:8, /* base 16..23 */ - access:8, /* access byte */ - limit_high:4, /* limit 16..19 */ - granularity:4, /* granularity */ - base_high:8; /* base 24..31 */ + uint32_t limit_low:16, /* limit 0..15 */ + base_low:16, /* base 0..15 */ + base_med:8, /* base 16..23 */ + access:8, /* access byte */ + limit_high:4, /* limit 16..19 */ + granularity:4, /* granularity */ + base_high:8; /* base 24..31 */ }; struct real_descriptor64 { - uint32_t limit_low16:16, /* limit 0..15 */ - base_low16:16, /* base 0..15 */ - base_med8:8, /* base 16..23 */ - access8:8, /* access byte */ - limit_high4:4, /* limit 16..19 */ - granularity4:4, /* granularity */ - base_high8:8, /* base 24..31 */ - base_top32:32, /* base 32..63 */ - reserved32:32; /* reserved/zero */ + uint32_t limit_low16:16, /* limit 0..15 */ + base_low16:16, /* base 0..15 */ + base_med8:8, /* base 16..23 */ + access8:8, /* access byte */ + limit_high4:4, /* limit 16..19 */ + granularity4:4, /* granularity */ + base_high8:8, /* base 24..31 */ + base_top32:32, /* base 32..63 */ + reserved32:32; /* reserved/zero */ }; struct real_gate { - uint32_t offset_low:16, /* offset 0..15 */ - selector:16, - word_count:8, - access:8, - offset_high:16; /* offset 16..31 */ + uint32_t offset_low:16, /* offset 0..15 */ + selector:16, + word_count:8, + access:8, + offset_high:16; /* offset 16..31 */ }; struct real_gate64 { - uint32_t offset_low16:16, /* offset 0..15 */ - selector16:16, - IST:3, - zeroes5:5, - access8:8, - offset_high16:16, /* offset 16..31 */ - offset_top32:32, /* offset 32..63 */ - reserved32:32; /* reserved/zero */ + uint32_t offset_low16:16, /* offset 0..15 */ + selector16:16, + IST:3, + zeroes5:5, + access8:8, + offset_high16:16, /* offset 16..31 */ + offset_top32:32, /* offset 32..63 */ + reserved32:32; /* reserved/zero */ }; -#define MAKE_REAL_DESCRIPTOR(base,lim,gran,acc) { \ +#define MAKE_REAL_DESCRIPTOR(base, lim, gran, acc) {\ .limit_low = lim & 0xffff, \ .limit_high = (lim >> 16) & 0xf, \ .base_low = base & 0xffff, \ @@ -162,26 +162,26 @@ struct real_gate64 { * at runtime. */ struct fake_descriptor { - uint32_t offset:32; /* offset */ - uint32_t lim_or_seg:20; /* limit */ - /* or segment, for gate */ - uint32_t size_or_wdct:4; /* size/granularity */ - /* word count, for gate */ - uint32_t access:8; /* access */ + uint32_t offset:32; /* offset */ + uint32_t lim_or_seg:20; /* limit */ + /* or segment, for gate */ + uint32_t size_or_wdct:4; /* size/granularity */ + /* word count, for gate */ + uint32_t access:8; /* access */ }; struct fake_descriptor64 { - uint64_t offset64; /* offset [0..31,32..63] */ - uint32_t lim_or_seg:20; /* limit */ - /* or segment, for gate */ - uint32_t size_or_IST:4; /* size/granularity */ - /* IST for gates */ - uint32_t access:8; /* access */ - uint32_t reserved:32; /* reserved/zero */ + uint64_t offset64; /* offset [0..31,32..63] */ + uint32_t lim_or_seg:20; /* limit */ + /* or segment, for gate */ + uint32_t size_or_IST:4; /* size/granularity */ + /* IST for gates */ + uint32_t access:8; /* access */ + uint32_t reserved:32; /* reserved/zero */ }; typedef struct __attribute__((packed)) { - uint16_t size; - void *ptr; + uint16_t size; + void *ptr; } x86_64_desc_register_t; @@ -189,112 +189,112 @@ typedef struct __attribute__((packed)) { /* * Boot-time data for master (or only) CPU */ -extern struct real_descriptor master_gdt[GDTSZ]; -extern struct real_descriptor master_ldt[]; -extern struct i386_tss master_ktss; -extern struct sysenter_stack master_sstk; +extern struct real_descriptor master_gdt[GDTSZ]; +extern struct real_descriptor master_ldt[]; +extern struct i386_tss master_ktss; +extern struct sysenter_stack master_sstk; -extern struct fake_descriptor64 master_idt64[IDTSZ]; -extern struct x86_64_tss master_ktss64; +extern struct fake_descriptor64 master_idt64[IDTSZ]; +extern struct x86_64_tss master_ktss64; __BEGIN_DECLS -extern char df_task_stack[]; -extern char df_task_stack_end[]; -extern struct i386_tss master_dftss; -extern void df_task_start(void); +extern char df_task_stack[]; +extern char df_task_stack_end[]; +extern struct i386_tss master_dftss; +extern void df_task_start(void); -extern char mc_task_stack[]; -extern char mc_task_stack_end[]; -extern struct i386_tss master_mctss; -extern void mc_task_start(void); +extern char mc_task_stack[]; +extern char mc_task_stack_end[]; +extern struct i386_tss master_mctss; +extern void mc_task_start(void); __END_DECLS -#endif /*__ASSEMBLER__*/ +#endif /*__ASSEMBLER__*/ -#define SZ_64 0x2 /* 64-bit segment */ -#define SZ_32 0x4 /* 32-bit segment */ -#define SZ_G 0x8 /* 4K limit field */ +#define SZ_64 0x2 /* 64-bit segment */ +#define SZ_32 0x4 /* 32-bit segment */ +#define SZ_G 0x8 /* 4K limit field */ -#define ACC_A 0x01 /* accessed */ -#define ACC_TYPE 0x1e /* type field: */ +#define ACC_A 0x01 /* accessed */ +#define ACC_TYPE 0x1e /* type field: */ -#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */ +#define ACC_TYPE_SYSTEM 0x00 /* system descriptors: */ -#define ACC_LDT 0x02 /* LDT */ -#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */ -#define ACC_TASK_GATE 0x05 /* task gate */ -#define ACC_TSS 0x09 /* task segment */ -#define ACC_CALL_GATE 0x0c /* call gate */ -#define ACC_INTR_GATE 0x0e /* interrupt gate */ -#define ACC_TRAP_GATE 0x0f /* trap gate */ +#define ACC_LDT 0x02 /* LDT */ +#define ACC_CALL_GATE_16 0x04 /* 16-bit call gate */ +#define ACC_TASK_GATE 0x05 /* task gate */ +#define ACC_TSS 0x09 /* task segment */ +#define ACC_CALL_GATE 0x0c /* call gate */ +#define ACC_INTR_GATE 0x0e /* interrupt gate */ +#define ACC_TRAP_GATE 0x0f /* trap gate */ -#define ACC_TSS_BUSY 0x02 /* task busy */ +#define ACC_TSS_BUSY 0x02 /* task busy */ -#define ACC_TYPE_USER 0x10 /* user descriptors */ +#define ACC_TYPE_USER 0x10 /* user descriptors */ -#define ACC_DATA 0x10 /* data */ -#define ACC_DATA_W 0x12 /* data, writable */ -#define ACC_DATA_E 0x14 /* data, expand-down */ -#define ACC_DATA_EW 0x16 /* data, expand-down, - writable */ -#define ACC_CODE 0x18 /* code */ -#define ACC_CODE_R 0x1a /* code, readable */ -#define ACC_CODE_C 0x1c /* code, conforming */ -#define ACC_CODE_CR 0x1e /* code, conforming, - readable */ -#define ACC_PL 0x60 /* access rights: */ -#define ACC_PL_K 0x00 /* kernel access only */ -#define ACC_PL_U 0x60 /* user access */ -#define ACC_P 0x80 /* segment present */ +#define ACC_DATA 0x10 /* data */ +#define ACC_DATA_W 0x12 /* data, writable */ +#define ACC_DATA_E 0x14 /* data, expand-down */ +#define ACC_DATA_EW 0x16 /* data, expand-down, + * writable */ +#define ACC_CODE 0x18 /* code */ +#define ACC_CODE_R 0x1a /* code, readable */ +#define ACC_CODE_C 0x1c /* code, conforming */ +#define ACC_CODE_CR 0x1e /* code, conforming, + * readable */ +#define ACC_PL 0x60 /* access rights: */ +#define ACC_PL_K 0x00 /* kernel access only */ +#define ACC_PL_U 0x60 /* user access */ +#define ACC_P 0x80 /* segment present */ /* * Components of a selector */ -#define SEL_LDTS 0x04 /* local selector */ -#define SEL_PL 0x03 /* privilege level: */ -#define SEL_PL_K 0x00 /* kernel selector */ -#define SEL_PL_U 0x03 /* user selector */ +#define SEL_LDTS 0x04 /* local selector */ +#define SEL_PL 0x03 /* privilege level: */ +#define SEL_PL_K 0x00 /* kernel selector */ +#define SEL_PL_U 0x03 /* user selector */ /* * Convert selector to descriptor table index. */ -#define sel_idx(sel) (selector_to_sel(sel).index) -#define SEL_TO_INDEX(s) ((s)>>3) +#define sel_idx(sel) (selector_to_sel(sel).index) +#define SEL_TO_INDEX(s) ((s)>>3) -#define NULL_SEG 0 +#define NULL_SEG 0 /* * Kernel descriptors for MACH - 64-bit flat address space. */ -#define KERNEL64_CS 0x08 /* 1: K64 code */ -#define SYSENTER_CS 0x0b /* U32 sysenter pseudo-segment */ -#define KERNEL64_SS 0x10 /* 2: KERNEL64_CS+8 for syscall */ -#define USER_CS 0x1b /* 3: U32 code */ -#define USER_DS 0x23 /* 4: USER_CS+8 for sysret */ -#define USER64_CS 0x2b /* 5: USER_CS+16 for sysret */ -#define USER64_DS USER_DS /* U64 data pseudo-segment */ -#define KERNEL_LDT 0x30 /* 6: */ - /* 7: other 8 bytes of KERNEL_LDT */ -#define KERNEL_TSS 0x40 /* 8: */ - /* 9: other 8 bytes of KERNEL_TSS */ -#define KERNEL32_CS 0x50 /* 10: */ -#define USER_LDT 0x58 /* 11: */ - /* 12: other 8 bytes of USER_LDT */ -#define KERNEL_DS 0x68 /* 13: 32-bit kernel data */ - - -#define SYSENTER_TF_CS (USER_CS|0x10000) -#define SYSENTER_DS KERNEL64_SS /* sysenter kernel data segment */ - -#endif /* _I386_SEG_H_ */ +#define KERNEL64_CS 0x08 /* 1: K64 code */ +#define SYSENTER_CS 0x0b /* U32 sysenter pseudo-segment */ +#define KERNEL64_SS 0x10 /* 2: KERNEL64_CS+8 for syscall */ +#define USER_CS 0x1b /* 3: U32 code */ +#define USER_DS 0x23 /* 4: USER_CS+8 for sysret */ +#define USER64_CS 0x2b /* 5: USER_CS+16 for sysret */ +#define USER64_DS USER_DS /* U64 data pseudo-segment */ +#define KERNEL_LDT 0x30 /* 6: */ + /* 7: other 8 bytes of KERNEL_LDT */ +#define KERNEL_TSS 0x40 /* 8: */ + /* 9: other 8 bytes of KERNEL_TSS */ +#define KERNEL32_CS 0x50 /* 10: */ +#define USER_LDT 0x58 /* 11: */ + /* 12: other 8 bytes of USER_LDT */ +#define KERNEL_DS 0x68 /* 13: 32-bit kernel data */ + + +#define SYSENTER_TF_CS (USER_CS|0x10000) +#define SYSENTER_DS KERNEL64_SS /* sysenter kernel data segment */ + +#endif /* _I386_SEG_H_ */ #ifdef __x86_64__ /* * 64-bit kernel LDT descriptors */ -#define SYSCALL_CS 0x07 /* syscall pseudo-segment */ -#define USER_CTHREAD 0x0f /* user cthread area */ -#define USER_SETTABLE 0x1f /* start of user settable ldt entries */ +#define SYSCALL_CS 0x07 /* syscall pseudo-segment */ +#define USER_CTHREAD 0x0f /* user cthread area */ +#define USER_SETTABLE 0x1f /* start of user settable ldt entries */ #endif diff --git a/osfmk/i386/serial_io.h b/osfmk/i386/serial_io.h index 1640256a2..0cf73b3c2 100644 --- a/osfmk/i386/serial_io.h +++ b/osfmk/i386/serial_io.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/osfmk/i386/setjmp.h b/osfmk/i386/setjmp.h index 57787d549..dcef7f53e 100644 --- a/osfmk/i386/setjmp.h +++ b/osfmk/i386/setjmp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -59,11 +59,11 @@ /* * Setjmp/longjmp buffer for i386. */ -#ifndef _I386_SETJMP_H_ -#define _I386_SETJMP_H_ +#ifndef _I386_SETJMP_H_ +#define _I386_SETJMP_H_ -typedef struct jmp_buf { - int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */ +typedef struct jmp_buf { + int jmp_buf[6]; /* ebx, esi, edi, ebp, esp, eip */ } jmp_buf_t; -#endif /* _I386_SETJMP_H_ */ +#endif /* _I386_SETJMP_H_ */ diff --git a/osfmk/i386/simple_lock.h b/osfmk/i386/simple_lock.h index 31032681d..b9298397d 100644 --- a/osfmk/i386/simple_lock.h +++ b/osfmk/i386/simple_lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,10 +60,10 @@ * * Simple lock data type definitions */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _I386_SIMPLE_LOCK_TYPES_H_ -#define _I386_SIMPLE_LOCK_TYPES_H_ +#ifndef _I386_SIMPLE_LOCK_TYPES_H_ +#define _I386_SIMPLE_LOCK_TYPES_H_ #include #include @@ -73,57 +73,57 @@ #include #include -extern uint64_t LockTimeOutTSC; /* Lock timeout in TSC ticks */ -extern uint32_t LockTimeOutUsec;/* Lock timeout in microseconds */ -extern uint64_t LockTimeOut; /* Lock timeout in absolute time */ +extern uint64_t LockTimeOutTSC; /* Lock timeout in TSC ticks */ +extern uint32_t LockTimeOutUsec;/* Lock timeout in microseconds */ +extern uint64_t LockTimeOut; /* Lock timeout in absolute time */ -#if MACH_LDEBUG -#define USLOCK_DEBUG 1 +#if MACH_LDEBUG +#define USLOCK_DEBUG 1 #else -#define USLOCK_DEBUG 0 -#endif /* USLOCK_DEBUG */ +#define USLOCK_DEBUG 0 +#endif /* USLOCK_DEBUG */ typedef struct uslock_debug { - void *lock_pc; /* pc where lock operation began */ - void *lock_thread; /* thread that acquired lock */ - void *unlock_thread; /* last thread to release lock */ - void *unlock_pc; /* pc where lock operation ended */ - unsigned long duration[2]; - unsigned short state; - unsigned char lock_cpu; - unsigned char unlock_cpu; + void *lock_pc; /* pc where lock operation began */ + void *lock_thread; /* thread that acquired lock */ + void *unlock_thread; /* last thread to release lock */ + void *unlock_pc; /* pc where lock operation ended */ + unsigned long duration[2]; + unsigned short state; + unsigned char lock_cpu; + unsigned char unlock_cpu; } uslock_debug; typedef struct slock { - hw_lock_data_t interlock; /* must be first... see lock.c */ -#if USLOCK_DEBUG - unsigned short lock_type; /* must be second... see lock.c */ -#define USLOCK_TAG 0x5353 - uslock_debug debug; + hw_lock_data_t interlock; /* must be first... see lock.c */ +#if USLOCK_DEBUG + unsigned short lock_type; /* must be second... see lock.c */ +#define USLOCK_TAG 0x5353 + uslock_debug debug; #endif } usimple_lock_data_t, *usimple_lock_t; -extern void i386_lock_unlock_with_flush( - hw_lock_t); +extern void i386_lock_unlock_with_flush( + hw_lock_t); #else -typedef struct slock { - unsigned long lock_data[10]; +typedef struct slock { + unsigned long lock_data[10]; } usimple_lock_data_t, *usimple_lock_t; -#endif /* defined(MACH_KERNEL_PRIVATE) && defined(__APPLE_API_PRIVATE) */ +#endif /* defined(MACH_KERNEL_PRIVATE) && defined(__APPLE_API_PRIVATE) */ -#define USIMPLE_LOCK_NULL ((usimple_lock_t) 0) +#define USIMPLE_LOCK_NULL ((usimple_lock_t) 0) #if !defined(decl_simple_lock_data) -typedef usimple_lock_data_t *simple_lock_t; -typedef usimple_lock_data_t simple_lock_data_t; +typedef usimple_lock_data_t *simple_lock_t; +typedef usimple_lock_data_t simple_lock_data_t; -#define decl_simple_lock_data(class,name) \ +#define decl_simple_lock_data(class, name) \ class simple_lock_data_t name; -#endif /* !defined(decl_simple_lock_data) */ +#endif /* !defined(decl_simple_lock_data) */ #endif /* !_I386_SIMPLE_LOCK_TYPES_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/i386/smp.h b/osfmk/i386/smp.h index e98af290e..7a99b1793 100644 --- a/osfmk/i386/smp.h +++ b/osfmk/i386/smp.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _I386_SMP_H_ +#ifndef _I386_SMP_H_ #define _I386_SMP_H_ /* x86_64 kernels are always built SMP, even if only 1 CPU is active */ #define __SMP__ 1 -#endif /* _I386_SMP_H_ */ +#endif /* _I386_SMP_H_ */ diff --git a/osfmk/i386/stab.h b/osfmk/i386/stab.h index c0f3f2013..a4d9cdec6 100644 --- a/osfmk/i386/stab.h +++ b/osfmk/i386/stab.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -72,29 +72,29 @@ * the N_STAB mask set. */ -#define N_GSYM 0x20 /* global symbol */ -#define N_FNAME 0x22 /* F77 function name */ -#define N_FUN 0x24 /* procedure name */ -#define N_STSYM 0x26 /* data segment variable */ -#define N_LCSYM 0x28 /* bss segment variable */ -#define N_MAIN 0x2a /* main function name */ -#define N_PC 0x30 /* global Pascal symbol */ -#define N_RSYM 0x40 /* register variable */ -#define N_SLINE 0x44 /* text segment line number */ -#define N_DSLINE 0x46 /* data segment line number */ -#define N_BSLINE 0x48 /* bss segment line number */ -#define N_SSYM 0x60 /* structure/union element */ -#define N_SO 0x64 /* main source file name */ -#define N_LSYM 0x80 /* stack variable */ -#define N_BINCL 0x82 /* include file beginning */ -#define N_SOL 0x84 /* included source file name */ -#define N_PSYM 0xa0 /* parameter variable */ -#define N_EINCL 0xa2 /* include file end */ -#define N_ENTRY 0xa4 /* alternate entry point */ -#define N_LBRAC 0xc0 /* left bracket */ -#define N_EXCL 0xc2 /* deleted include file */ -#define N_RBRAC 0xe0 /* right bracket */ -#define N_BCOMM 0xe2 /* begin common */ -#define N_ECOMM 0xe4 /* end common */ -#define N_ECOML 0xe8 /* end common (local name) */ -#define N_LENG 0xfe /* length of preceding entry */ +#define N_GSYM 0x20 /* global symbol */ +#define N_FNAME 0x22 /* F77 function name */ +#define N_FUN 0x24 /* procedure name */ +#define N_STSYM 0x26 /* data segment variable */ +#define N_LCSYM 0x28 /* bss segment variable */ +#define N_MAIN 0x2a /* main function name */ +#define N_PC 0x30 /* global Pascal symbol */ +#define N_RSYM 0x40 /* register variable */ +#define N_SLINE 0x44 /* text segment line number */ +#define N_DSLINE 0x46 /* data segment line number */ +#define N_BSLINE 0x48 /* bss segment line number */ +#define N_SSYM 0x60 /* structure/union element */ +#define N_SO 0x64 /* main source file name */ +#define N_LSYM 0x80 /* stack variable */ +#define N_BINCL 0x82 /* include file beginning */ +#define N_SOL 0x84 /* included source file name */ +#define N_PSYM 0xa0 /* parameter variable */ +#define N_EINCL 0xa2 /* include file end */ +#define N_ENTRY 0xa4 /* alternate entry point */ +#define N_LBRAC 0xc0 /* left bracket */ +#define N_EXCL 0xc2 /* deleted include file */ +#define N_RBRAC 0xe0 /* right bracket */ +#define N_BCOMM 0xe2 /* begin common */ +#define N_ECOMM 0xe4 /* end common */ +#define N_ECOML 0xe8 /* end common (local name) */ +#define N_LENG 0xfe /* length of preceding entry */ diff --git a/osfmk/i386/startup64.c b/osfmk/i386/startup64.c index ddf892451..2a363f7bc 100644 --- a/osfmk/i386/startup64.c +++ b/osfmk/i386/startup64.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,7 +48,7 @@ #include #include -#include /* prototyping */ +#include /* prototyping */ #include #include @@ -77,26 +77,28 @@ extern void dump_frame(x86_saved_state_t *sp); void dump_frame(x86_saved_state_t *sp) { - if (is_saved_state32(sp)) + if (is_saved_state32(sp)) { dump_frame32(&sp->ss_32); - else if (is_saved_state64(sp)) + } else if (is_saved_state64(sp)) { dump_frame64(&sp->ss_64); - else + } else { kprintf("dump_frame(%p) unknown type %d\n", sp, sp->flavor); + } } void dump_frame32(x86_saved_state32_t *sp) { - unsigned int i; - uint32_t *ip = (uint32_t *) sp; + unsigned int i; + uint32_t *ip = (uint32_t *) sp; kprintf("dump_frame32(%p):\n", sp); - + for (i = 0; - i < sizeof(x86_saved_state32_t)/sizeof(uint32_t); - i++, ip++) + i < sizeof(x86_saved_state32_t) / sizeof(uint32_t); + i++, ip++) { kprintf("%p: 0x%08x\n", ip, *ip); + } kprintf("sp->gs: 0x%08x\n", sp->gs); kprintf("sp->fs: 0x%08x\n", sp->fs); @@ -123,15 +125,16 @@ dump_frame32(x86_saved_state32_t *sp) void dump_frame64(x86_saved_state64_t *sp) { - unsigned int i; - uint64_t *ip = (uint64_t *) sp; + unsigned int i; + uint64_t *ip = (uint64_t *) sp; kprintf("dump_frame64(%p):\n", sp); - + for (i = 0; - i < sizeof(x86_saved_state64_t)/sizeof(uint64_t); - i++, ip++) + i < sizeof(x86_saved_state64_t) / sizeof(uint64_t); + i++, ip++) { kprintf("%p: 0x%016llx\n", ip, *ip); + } kprintf("sp->isf.trapno: 0x%08x\n", sp->isf.trapno); kprintf("sp->isf.trapfn: 0x%016llx\n", sp->isf.trapfn); @@ -167,64 +170,64 @@ dump_frame64(x86_saved_state64_t *sp) void dump_gdt(void *gdtp) { - unsigned int i; - uint32_t *ip = (uint32_t *) gdtp; + unsigned int i; + uint32_t *ip = (uint32_t *) gdtp; kprintf("GDT:\n"); for (i = 0; i < GDTSZ; i++, ip += 2) { - kprintf("%p: 0x%08x\n", ip+0, *(ip+0)); - kprintf("%p: 0x%08x\n", ip+1, *(ip+1)); + kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); + kprintf("%p: 0x%08x\n", ip + 1, *(ip + 1)); } } void dump_ldt(void *ldtp) { - unsigned int i; - uint32_t *ip = (uint32_t *) ldtp; + unsigned int i; + uint32_t *ip = (uint32_t *) ldtp; kprintf("LDT:\n"); for (i = 0; i < LDTSZ_MIN; i++, ip += 2) { - kprintf("%p: 0x%08x\n", ip+0, *(ip+0)); - kprintf("%p: 0x%08x\n", ip+1, *(ip+1)); + kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); + kprintf("%p: 0x%08x\n", ip + 1, *(ip + 1)); } } void dump_idt(void *idtp) { - unsigned int i; - uint32_t *ip = (uint32_t *) idtp; + unsigned int i; + uint32_t *ip = (uint32_t *) idtp; kprintf("IDT64:\n"); for (i = 0; i < 16; i++, ip += 4) { - kprintf("%p: 0x%08x\n", ip+0, *(ip+0)); - kprintf("%p: 0x%08x\n", ip+1, *(ip+1)); - kprintf("%p: 0x%08x\n", ip+2, *(ip+2)); - kprintf("%p: 0x%08x\n", ip+3, *(ip+3)); + kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); + kprintf("%p: 0x%08x\n", ip + 1, *(ip + 1)); + kprintf("%p: 0x%08x\n", ip + 2, *(ip + 2)); + kprintf("%p: 0x%08x\n", ip + 3, *(ip + 3)); } } void dump_tss(void *tssp) { - unsigned int i; - uint32_t *ip = (uint32_t *) tssp; + unsigned int i; + uint32_t *ip = (uint32_t *) tssp; kprintf("TSS64:\n"); - for (i = 0; i < sizeof(master_ktss64)/sizeof(uint32_t); i++, ip++) { - kprintf("%p: 0x%08x\n", ip+0, *(ip+0)); + for (i = 0; i < sizeof(master_ktss64) / sizeof(uint32_t); i++, ip++) { + kprintf("%p: 0x%08x\n", ip + 0, *(ip + 0)); } } -void dump_regs64(void) +void +dump_regs64(void) { - -#define SNAP_REG(reg) \ - uint64_t reg; \ +#define SNAP_REG(reg) \ + uint64_t reg; \ __asm__ volatile("mov %%" #reg ", %0" : "=m" (reg)) -#define KPRINT_REG(reg) \ +#define KPRINT_REG(reg) \ kprintf("%3s: %p\n", #reg, (void *) reg) SNAP_REG(rsp); diff --git a/osfmk/i386/task.h b/osfmk/i386/task.h index 0ca7d549e..53728d413 100644 --- a/osfmk/i386/task.h +++ b/osfmk/i386/task.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,12 +61,10 @@ #include #include -#define MACHINE_TASK \ - struct user_ldt * i386_ldt; \ - void* task_debug; \ - uint64_t uexc_range_start; \ - uint64_t uexc_range_size; \ - uint64_t uexc_handler; \ +#define MACHINE_TASK \ + struct user_ldt * i386_ldt; \ + void* task_debug; \ + uint64_t uexc_range_start; \ + uint64_t uexc_range_size; \ + uint64_t uexc_handler; \ xstate_t xstate; - - diff --git a/osfmk/i386/thread.h b/osfmk/i386/thread.h index d9ec2568e..74da242cf 100644 --- a/osfmk/i386/thread.h +++ b/osfmk/i386/thread.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * state as applied to I386 processors. */ -#ifndef _I386_THREAD_H_ +#ifndef _I386_THREAD_H_ #define _I386_THREAD_H_ #include @@ -92,17 +92,17 @@ */ struct x86_kernel_state { - uint64_t k_rbx; /* kernel context */ - uint64_t k_rsp; - uint64_t k_rbp; - uint64_t k_r12; - uint64_t k_r13; - uint64_t k_r14; - uint64_t k_r15; - uint64_t k_rip; + uint64_t k_rbx; /* kernel context */ + uint64_t k_rsp; + uint64_t k_rbp; + uint64_t k_r12; + uint64_t k_r13; + uint64_t k_r14; + uint64_t k_r15; + uint64_t k_rip; }; -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE typedef struct x86_kernel_state machine_thread_kernel_state; #include #endif @@ -119,55 +119,57 @@ extern unsigned int _MachineStateCount[]; * as the PCB. */ struct machine_thread { - x86_saved_state_t *iss; - void *ifps; - void *ids; - decl_simple_lock_data(,lock); /* protects ifps and ids */ - xstate_t xstate; - -#ifdef MACH_BSD - uint64_t cthread_self; /* for use of cthread package */ - struct real_descriptor cthread_desc; - unsigned long uldt_selector; /* user ldt selector to set */ - struct real_descriptor uldt_desc; /* actual user setable ldt */ + x86_saved_state_t *iss; + void *ifps; + void *ids; + decl_simple_lock_data(, lock); /* protects ifps and ids */ + xstate_t xstate; + +#ifdef MACH_BSD + uint64_t cthread_self; /* for use of cthread package */ + struct real_descriptor cthread_desc; + unsigned long uldt_selector; /* user ldt selector to set */ + struct real_descriptor uldt_desc; /* actual user setable ldt */ #endif - struct pal_pcb pal_pcb; - uint32_t specFlags; + struct pal_pcb pal_pcb; + uint32_t specFlags; /* N.B.: These "specFlags" are read-modify-written non-atomically within * the copyio routine. So conceivably any exception that modifies the * flags in a persistent manner could be clobbered if it occurs within * a copyio context. For now, the only other flag here is OnProc which * is not modified except at context switch. */ -#define OnProc 0x1 -#define CopyIOActive 0x2 /* Checked to ensure DTrace actions do not re-enter copyio(). */ - uint64_t thread_gpu_ns; +#define OnProc 0x1 +#define CopyIOActive 0x2 /* Checked to ensure DTrace actions do not re-enter copyio(). */ + uint64_t thread_gpu_ns; #if NCOPY_WINDOWS > 0 - struct { - user_addr_t user_base; + struct { + user_addr_t user_base; } copy_window[NCOPY_WINDOWS]; - int nxt_window; - int copyio_state; -#define WINDOWS_DIRTY 0 -#define WINDOWS_CLEAN 1 -#define WINDOWS_CLOSED 2 -#define WINDOWS_OPENED 3 - uint64_t physwindow_pte; - int physwindow_busy; + int nxt_window; + int copyio_state; +#define WINDOWS_DIRTY 0 +#define WINDOWS_CLEAN 1 +#define WINDOWS_CLOSED 2 +#define WINDOWS_OPENED 3 + uint64_t physwindow_pte; + int physwindow_busy; #endif + + int mthr_do_segchk; }; typedef struct machine_thread *pcb_t; -#define THREAD_TO_PCB(Thr) (&(Thr)->machine) +#define THREAD_TO_PCB(Thr) (&(Thr)->machine) -#define USER_STATE(Thr) ((Thr)->machine.iss) -#define USER_REGS32(Thr) (saved_state32(USER_STATE(Thr))) -#define USER_REGS64(Thr) (saved_state64(USER_STATE(Thr))) +#define USER_STATE(Thr) ((Thr)->machine.iss) +#define USER_REGS32(Thr) (saved_state32(USER_STATE(Thr))) +#define USER_REGS64(Thr) (saved_state64(USER_STATE(Thr))) -#define user_pc(Thr) (is_saved_state32(USER_STATE(Thr)) ? \ - USER_REGS32(Thr)->eip : \ - USER_REGS64(Thr)->isf.rip ) +#define user_pc(Thr) (is_saved_state32(USER_STATE(Thr)) ? \ + USER_REGS32(Thr)->eip : \ + USER_REGS64(Thr)->isf.rip ) extern void *get_user_regs(thread_t); @@ -175,7 +177,7 @@ extern void *act_thread_csave(void); extern void act_thread_catt(void *ctx); extern void act_thread_cfree(void *ctx); -#define FIND_PERFCONTROL_STATE(th) (PERFCONTROL_STATE_NULL) +#define FIND_PERFCONTROL_STATE(th) (PERFCONTROL_STATE_NULL) /* * On the kernel stack is: @@ -185,7 +187,7 @@ extern void act_thread_cfree(void *ctx); */ -#define STACK_IKS(stack) \ +#define STACK_IKS(stack) \ (&(((struct thread_kernel_state *)((stack) + kernel_stack_size)) - 1)->machine) /* @@ -194,24 +196,24 @@ extern void act_thread_cfree(void *ctx); static inline vm_offset_t current_stack_depth(void) { - vm_offset_t stack_ptr; + vm_offset_t stack_ptr; assert(get_preemption_level() > 0 || !ml_get_interrupts_enabled()); #if defined(__x86_64__) - __asm__ volatile("mov %%rsp, %0" : "=m" (stack_ptr)); + __asm__ volatile ("mov %%rsp, %0" : "=m" (stack_ptr)); #else - __asm__ volatile("mov %%esp, %0" : "=m" (stack_ptr)); + __asm__ volatile ("mov %%esp, %0" : "=m" (stack_ptr)); #endif - return (current_cpu_datap()->cpu_kernel_stack - + sizeof(struct thread_kernel_state) - - stack_ptr); + return current_cpu_datap()->cpu_kernel_stack + + sizeof(struct thread_kernel_state) + - stack_ptr; } /* * Return address of the function that called current function, given * address of the first parameter of current function. */ -#define GET_RETURN_PC(addr) (__builtin_return_address(0)) +#define GET_RETURN_PC(addr) (__builtin_return_address(0)) -#endif /* _I386_THREAD_H_ */ +#endif /* _I386_THREAD_H_ */ diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c index ef03460db..e9ef4dea7 100644 --- a/osfmk/i386/trap.c +++ b/osfmk/i386/trap.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -* @OSF_COPYRIGHT@ -*/ -/* -* Mach Operating System -* Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University -* All Rights Reserved. -* -* Permission to use, copy, modify and distribute this software and its -* documentation is hereby granted, provided that both the copyright -* notice and this permission notice appear in all copies of the -* software, derivative works or modified versions, and any portions -* thereof, and that both notices appear in supporting documentation. -* -* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" -* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR -* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. -* -* Carnegie Mellon requests users of this software to return to -* -* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU -* School of Computer Science -* Carnegie Mellon University -* Pittsburgh PA 15213-3890 -* -* any improvements or extensions that they make and grant Carnegie Mellon -* the rights to redistribute these changes. -*/ + * @OSF_COPYRIGHT@ + */ /* -*/ + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ /* -* Hardware trap/fault handler. + * Hardware trap/fault handler. */ #include @@ -133,62 +133,64 @@ extern boolean_t pmap_smap_enabled; __attribute__((noreturn)) void thread_syscall_return( - kern_return_t ret) + kern_return_t ret) { - thread_t thr_act = current_thread(); - boolean_t is_mach; - int code; + thread_t thr_act = current_thread(); + boolean_t is_mach; + int code; pal_register_cache_state(thr_act, DIRTY); - if (thread_is_64bit_addr(thr_act)) { - x86_saved_state64_t *regs; - + if (thread_is_64bit_addr(thr_act)) { + x86_saved_state64_t *regs; + regs = USER_REGS64(thr_act); code = (int) (regs->rax & SYSCALL_NUMBER_MASK); is_mach = (regs->rax & SYSCALL_CLASS_MASK) - == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT); + == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT); if (kdebug_enable && is_mach) { - /* Mach trap */ - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC,code)|DBG_FUNC_END, - ret, 0, 0, 0, 0); + /* Mach trap */ + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SC, code) | DBG_FUNC_END, + ret, 0, 0, 0, 0); } regs->rax = ret; #if DEBUG - if (is_mach) + if (is_mach) { DEBUG_KPRINT_SYSCALL_MACH( "thread_syscall_return: 64-bit mach ret=%u\n", ret); - else + } else { DEBUG_KPRINT_SYSCALL_UNIX( "thread_syscall_return: 64-bit unix ret=%u\n", ret); + } #endif } else { - x86_saved_state32_t *regs; - + x86_saved_state32_t *regs; + regs = USER_REGS32(thr_act); code = ((int) regs->eax); is_mach = (code < 0); if (kdebug_enable && is_mach) { - /* Mach trap */ - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_SC,-code)|DBG_FUNC_END, - ret, 0, 0, 0, 0); + /* Mach trap */ + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_SC, -code) | DBG_FUNC_END, + ret, 0, 0, 0, 0); } regs->eax = ret; #if DEBUG - if (is_mach) + if (is_mach) { DEBUG_KPRINT_SYSCALL_MACH( "thread_syscall_return: 32-bit mach ret=%u\n", ret); - else + } else { DEBUG_KPRINT_SYSCALL_UNIX( "thread_syscall_return: 32-bit unix ret=%u\n", ret); + } #endif } @@ -201,25 +203,25 @@ thread_syscall_return( throttle_lowpri_io(1); thread_exception_return(); - /*NOTREACHED*/ + /*NOTREACHED*/ } static inline void user_page_fault_continue( - kern_return_t kr) + kern_return_t kr) { - thread_t thread = current_thread(); - user_addr_t vaddr; + thread_t thread = current_thread(); + user_addr_t vaddr; if (thread_is_64bit_addr(thread)) { - x86_saved_state64_t *uregs; + x86_saved_state64_t *uregs; uregs = USER_REGS64(thread); vaddr = (user_addr_t)uregs->cr2; } else { - x86_saved_state32_t *uregs; + x86_saved_state32_t *uregs; uregs = USER_REGS32(thread); @@ -238,33 +240,33 @@ user_page_fault_continue( * Fault recovery in copyin/copyout routines. */ struct recovery { - uintptr_t fault_addr; - uintptr_t recover_addr; + uintptr_t fault_addr; + uintptr_t recover_addr; }; -extern struct recovery recover_table[]; -extern struct recovery recover_table_end[]; +extern struct recovery recover_table[]; +extern struct recovery recover_table_end[]; -const char * trap_type[] = {TRAP_NAMES}; -unsigned TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]); +const char * trap_type[] = {TRAP_NAMES}; +unsigned TRAP_TYPES = sizeof(trap_type) / sizeof(trap_type[0]); -extern void PE_incoming_interrupt(int interrupt); +extern void PE_incoming_interrupt(int interrupt); #if defined(__x86_64__) && DEBUG void -kprint_state(x86_saved_state64_t *saved_state) +kprint_state(x86_saved_state64_t *saved_state) { kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap()); kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE)); kprintf("Kernel GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE)); kprintf("state at 0x%lx:\n", (uintptr_t) saved_state); - kprintf(" rdi 0x%llx\n", saved_state->rdi); - kprintf(" rsi 0x%llx\n", saved_state->rsi); + kprintf(" rdi 0x%llx\n", saved_state->rdi); + kprintf(" rsi 0x%llx\n", saved_state->rsi); kprintf(" rdx 0x%llx\n", saved_state->rdx); kprintf(" r10 0x%llx\n", saved_state->r10); kprintf(" r8 0x%llx\n", saved_state->r8); - kprintf(" r9 0x%llx\n", saved_state->r9); + kprintf(" r9 0x%llx\n", saved_state->r9); kprintf(" cr2 0x%llx\n", saved_state->cr2); kprintf("real cr2 0x%lx\n", get_cr2()); @@ -298,12 +300,13 @@ kprint_state(x86_saved_state64_t *saved_state) * Non-zero indicates latency assert is enabled and capped at valued * absolute time units. */ - + uint64_t interrupt_latency_cap = 0; boolean_t ilat_assert = FALSE; void -interrupt_latency_tracker_setup(void) { +interrupt_latency_tracker_setup(void) +{ uint32_t ilat_cap_us; if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us, sizeof(ilat_cap_us))) { interrupt_latency_cap = ilat_cap_us * NSEC_PER_USEC; @@ -314,7 +317,9 @@ interrupt_latency_tracker_setup(void) { PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert, sizeof(ilat_assert)); } -void interrupt_reset_latency_stats(void) { +void +interrupt_reset_latency_stats(void) +{ uint32_t i; for (i = 0; i < real_ncpus; i++) { cpu_data_ptr[i]->cpu_max_observed_int_latency = @@ -322,7 +327,9 @@ void interrupt_reset_latency_stats(void) { } } -void interrupt_populate_latency_stats(char *buf, unsigned bufsize) { +void +interrupt_populate_latency_stats(char *buf, unsigned bufsize) +{ uint32_t i, tcpu = ~0; uint64_t cur_max = 0; @@ -333,8 +340,9 @@ void interrupt_populate_latency_stats(char *buf, unsigned bufsize) { } } - if (tcpu < real_ncpus) + if (tcpu < real_ncpus) { snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency); + } } uint32_t interrupt_timer_coalescing_enabled = 1; @@ -348,37 +356,41 @@ uint64_t interrupt_coalesced_timers; void interrupt(x86_saved_state_t *state) { - uint64_t rip; - uint64_t rsp; - int interrupt_num; - boolean_t user_mode = FALSE; - int ipl; - int cnum = cpu_number(); - cpu_data_t *cdp = cpu_data_ptr[cnum]; - int itype = DBG_INTR_TYPE_UNKNOWN; - - x86_saved_state64_t *state64 = saved_state64(state); + uint64_t rip; + uint64_t rsp; + int interrupt_num; + boolean_t user_mode = FALSE; + int ipl; + int cnum = cpu_number(); + cpu_data_t *cdp = cpu_data_ptr[cnum]; + int itype = DBG_INTR_TYPE_UNKNOWN; + int handled; + + x86_saved_state64_t *state64 = saved_state64(state); rip = state64->isf.rip; rsp = state64->isf.rsp; interrupt_num = state64->isf.trapno; - if(state64->isf.cs & 0x03) + if (state64->isf.cs & 0x03) { user_mode = TRUE; + } - if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) + if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) { cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++; + } - if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) + if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) { itype = DBG_INTR_TYPE_IPI; - else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) + } else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) { itype = DBG_INTR_TYPE_TIMER; - else + } else { itype = DBG_INTR_TYPE_OTHER; + } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, - interrupt_num, - (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)), - user_mode, itype, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, + interrupt_num, + (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)), + user_mode, itype, 0); SCHED_STATS_INTERRUPT(current_processor()); @@ -389,13 +401,26 @@ interrupt(x86_saved_state_t *state) #endif ipl = get_preemption_level(); - + /* * Handle local APIC interrupts * else call platform expert for devices. */ - if (!lapic_interrupt(interrupt_num, state)) { - PE_incoming_interrupt(interrupt_num); + handled = lapic_interrupt(interrupt_num, state); + + if (!handled) { + if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_CMCI_INTERRUPT)) { + /* + * CMCI can be signalled on any logical processor, and the kexts + * that implement handling CMCI use IOKit to register handlers for + * the CMCI vector, so if we see a CMCI, do not encode a CPU + * number in bits 8:31 (since the vector is the same regardless of + * the handling CPU). + */ + PE_incoming_interrupt(interrupt_num); + } else if (cnum <= lapic_max_interrupt_cpunum) { + PE_incoming_interrupt((cnum << 8) | interrupt_num); + } } if (__improbable(get_preemption_level() != ipl)) { @@ -403,10 +428,9 @@ interrupt(x86_saved_state_t *state) } - if (__improbable(cdp->cpu_nested_istack)) { - cdp->cpu_nested_istack_events++; - } - else { + if (__improbable(cdp->cpu_nested_istack)) { + cdp->cpu_nested_istack_events++; + } else { uint64_t ctime = mach_absolute_time(); uint64_t int_latency = ctime - cdp->cpu_int_event_time; uint64_t esdeadline, ehdeadline; @@ -445,9 +469,9 @@ interrupt(x86_saved_state_t *state) */ if (!user_mode) { uint64_t depth = cdp->cpu_kernel_stack - + sizeof(struct thread_kernel_state) - + sizeof(struct i386_exception_link *) - - rsp; + + sizeof(struct thread_kernel_state) + + sizeof(struct i386_exception_link *) + - rsp; if (__improbable(depth > kernel_stack_depth_max)) { kernel_stack_depth_max = (vm_offset_t)depth; KERNEL_DEBUG_CONSTANT( @@ -456,15 +480,16 @@ interrupt(x86_saved_state_t *state) } } - if (cnum == master_cpu) + if (cnum == master_cpu) { ml_entropy_collect(); + } #if KPERF kperf_interrupt(); #endif /* KPERF */ KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, - interrupt_num); + interrupt_num); assert(ml_get_interrupts_enabled() == FALSE); } @@ -473,7 +498,7 @@ static inline void reset_dr7(void) { long dr7 = 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */ - __asm__ volatile("mov %0,%%dr7" : : "r" (dr7)); + __asm__ volatile ("mov %0,%%dr7" : : "r" (dr7)); } #if MACH_KDP unsigned kdp_has_active_watchpoints = 0; @@ -489,31 +514,32 @@ unsigned kdp_has_active_watchpoints = 0; void kernel_trap( - x86_saved_state_t *state, + x86_saved_state_t *state, uintptr_t *lo_spp) { - x86_saved_state64_t *saved_state; - int code; - user_addr_t vaddr; - int type; - vm_map_t map = 0; /* protected by T_PAGE_FAULT */ - kern_return_t result = KERN_FAILURE; - kern_return_t fault_result = KERN_SUCCESS; - thread_t thread; + x86_saved_state64_t *saved_state; + int code; + user_addr_t vaddr; + int type; + vm_map_t map = 0; /* protected by T_PAGE_FAULT */ + kern_return_t result = KERN_FAILURE; + kern_return_t fault_result = KERN_SUCCESS; + thread_t thread; boolean_t intr; - vm_prot_t prot; - struct recovery *rp; - vm_offset_t kern_ip; + vm_prot_t prot; + struct recovery *rp; + vm_offset_t kern_ip; #if NCOPY_WINDOWS > 0 - int fault_in_copy_window = -1; + int fault_in_copy_window = -1; #endif - int is_user; - int trap_pl = get_preemption_level(); + int is_user; + int trap_pl = get_preemption_level(); thread = current_thread(); - if (__improbable(is_saved_state32(state))) + if (__improbable(is_saved_state32(state))) { panic("kernel_trap(%p) with 32-bit state", state); + } saved_state = saved_state64(state); /* Record cpu where state was captured */ @@ -522,7 +548,7 @@ kernel_trap( vaddr = (user_addr_t)saved_state->cr2; type = saved_state->isf.trapno; code = (int)(saved_state->isf.err & 0xffff); - intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */ + intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */ kern_ip = (vm_offset_t)saved_state->isf.rip; is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS); @@ -530,7 +556,7 @@ kernel_trap( #if CONFIG_DTRACE /* * Is there a DTrace hook? - */ + */ if (__improbable(tempDTraceTrapHook != NULL)) { if (tempDTraceTrapHook(type, state, lo_spp, 0) == KERN_SUCCESS) { /* @@ -549,17 +575,17 @@ kernel_trap( if (__improbable(T_PREEMPT == type)) { ast_taken_kernel(); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, - 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, + 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0); return; } - user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr); + user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, - (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user, - VM_KERNEL_UNSLIDE(kern_ip), 0); + (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, + (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user, + VM_KERNEL_UNSLIDE(kern_ip), 0); if (T_PAGE_FAULT == type) { @@ -570,9 +596,9 @@ kernel_trap( if (__probable(thread != THREAD_NULL && thread->map != kernel_map)) { #if NCOPY_WINDOWS > 0 - vm_offset_t copy_window_base; - vm_offset_t kvaddr; - int window_index; + vm_offset_t copy_window_base; + vm_offset_t kvaddr; + int window_index; kvaddr = (vm_offset_t)vaddr; /* @@ -584,14 +610,12 @@ kernel_trap( */ copy_window_base = current_cpu_datap()->cpu_copywindow_base; - if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS)) ) { - + if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS))) { window_index = (int)((kvaddr - copy_window_base) / NBPDE); if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) { - - kvaddr -= (copy_window_base + (NBPDE * window_index)); - vaddr = thread->machine.copy_window[window_index].user_base + kvaddr; + kvaddr -= (copy_window_base + (NBPDE * window_index)); + vaddr = thread->machine.copy_window[window_index].user_base + kvaddr; map = thread->map; fault_in_copy_window = window_index; @@ -610,7 +634,7 @@ kernel_trap( * the intercept). */ if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && - (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { + (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { goto debugger_entry; } @@ -620,8 +644,8 @@ kernel_trap( * the AC bit unset (i.e. not from copyin/out path). */ if (__improbable(code & T_PF_PROT && - pmap_smap_enabled && - (saved_state->isf.rflags & EFL_AC) == 0)) { + pmap_smap_enabled && + (saved_state->isf.rflags & EFL_AC) == 0)) { goto debugger_entry; } @@ -631,7 +655,7 @@ kernel_trap( * then switch cr3 here and dismiss the fault. */ if (no_shared_cr3 && - (thread->machine.specFlags&CopyIOActive) && + (thread->machine.specFlags & CopyIOActive) && map->pmap->pm_cr3 != get_cr3_base()) { pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE); set_cr3_raw(map->pmap->pm_cr3); @@ -649,75 +673,75 @@ kernel_trap( (void) ml_set_interrupts_enabled(intr); switch (type) { - - case T_NO_FPU: + case T_NO_FPU: fpnoextflt(); return; - case T_FPU_FAULT: + case T_FPU_FAULT: fpextovrflt(); return; - case T_FLOATING_POINT_ERROR: + case T_FLOATING_POINT_ERROR: fpexterrflt(); return; - case T_SSE_FLOAT_ERROR: + case T_SSE_FLOAT_ERROR: fpSSEexterrflt(); return; - case T_INVALID_OPCODE: + case T_INVALID_OPCODE: fpUDflt(kern_ip); goto debugger_entry; - case T_DEBUG: - if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) - { - /* We've somehow encountered a debug - * register match that does not belong - * to the kernel debugger. - * This isn't supposed to happen. - */ - reset_dr7(); - return; - } - goto debugger_entry; - case T_INT3: - goto debugger_entry; - case T_PAGE_FAULT: + case T_DEBUG: + if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) { + /* We've somehow encountered a debug + * register match that does not belong + * to the kernel debugger. + * This isn't supposed to happen. + */ + reset_dr7(); + return; + } + goto debugger_entry; + case T_INT3: + goto debugger_entry; + case T_PAGE_FAULT: #if CONFIG_DTRACE - if (thread != THREAD_NULL && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ + if (thread != THREAD_NULL && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */ /* * DTrace has "anticipated" the possibility of this fault, and has * established the suitable recovery state. Drop down now into the - * recovery handling code in "case T_GENERAL_PROTECTION:". + * recovery handling code in "case T_GENERAL_PROTECTION:". */ goto FALL_THROUGH; } } #endif /* CONFIG_DTRACE */ - + prot = VM_PROT_READ; - if (code & T_PF_WRITE) - prot |= VM_PROT_WRITE; - if (code & T_PF_EXECUTE) - prot |= VM_PROT_EXECUTE; + if (code & T_PF_WRITE) { + prot |= VM_PROT_WRITE; + } + if (code & T_PF_EXECUTE) { + prot |= VM_PROT_EXECUTE; + } fault_result = result = vm_fault(map, - vaddr, - prot, - FALSE, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vaddr, + prot, + FALSE, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); if (result == KERN_SUCCESS) { #if NCOPY_WINDOWS > 0 if (fault_in_copy_window != -1) { ml_set_interrupts_enabled(FALSE); copy_window_fault(thread, map, - fault_in_copy_window); + fault_in_copy_window); (void) ml_set_interrupts_enabled(intr); } #endif /* NCOPY_WINDOWS > 0 */ @@ -730,14 +754,14 @@ kernel_trap( FALL_THROUGH: #endif /* CONFIG_DTRACE */ - case T_GENERAL_PROTECTION: + case T_GENERAL_PROTECTION: /* * If there is a failure recovery address * for this fault, go there. */ - for (rp = recover_table; rp < recover_table_end; rp++) { - if (kern_ip == rp->fault_addr) { - set_recovery_ip(saved_state, rp->recover_addr); + for (rp = recover_table; rp < recover_table_end; rp++) { + if (kern_ip == rp->fault_addr) { + set_recovery_ip(saved_state, rp->recover_addr); return; } } @@ -750,19 +774,19 @@ FALL_THROUGH: thread->recover = 0; return; } - /* - * Unanticipated page-fault errors in kernel - * should not happen. - * - * fall through... - */ - default: + /* + * Unanticipated page-fault errors in kernel + * should not happen. + * + * fall through... + */ + default: /* * Exception 15 is reserved but some chips may generate it * spuriously. Seen at startup on AMD Athlon-64. */ - if (type == 15) { - kprintf("kernel_trap() ignoring spurious trap 15\n"); + if (type == 15) { + kprintf("kernel_trap() ignoring spurious trap 15\n"); return; } debugger_entry: @@ -773,8 +797,9 @@ debugger_entry: */ sync_iss_to_iks(state); #if MACH_KDP - if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) + if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) { return; + } #endif } pal_cli(); @@ -787,16 +812,16 @@ debugger_entry: static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip) { - saved_state->isf.rip = ip; + saved_state->isf.rip = ip; } static void panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result) { - const char *trapname = "Unknown"; - pal_cr_t cr0, cr2, cr3, cr4; - boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE; - boolean_t potential_smap_fault = FALSE; + const char *trapname = "Unknown"; + pal_cr_t cr0, cr2, cr3, cr4; + boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE; + boolean_t potential_smap_fault = FALSE; pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 ); assert(ml_get_interrupts_enabled() == FALSE); @@ -810,10 +835,11 @@ panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result) kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n", cpu_number(), regs->isf.trapno, regs->isf.rip); kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n", - cr0, cr2, cr3, cr4); + cr0, cr2, cr3, cr4); - if (regs->isf.trapno < TRAP_TYPES) - trapname = trap_type[regs->isf.trapno]; + if (regs->isf.trapno < TRAP_TYPES) { + trapname = trap_type[regs->isf.trapno]; + } if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) { if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) { @@ -822,36 +848,36 @@ panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result) potential_kernel_NX_fault = TRUE; } } else if (pmap_smap_enabled && - regs->isf.trapno == T_PAGE_FAULT && - regs->isf.err & T_PF_PROT && - regs->cr2 < VM_MAX_USER_PAGE_ADDRESS && - regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { + regs->isf.trapno == T_PAGE_FAULT && + regs->isf.err & T_PF_PROT && + regs->cr2 < VM_MAX_USER_PAGE_ADDRESS && + regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { potential_smap_fault = TRUE; } #undef panic panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n" - "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n" - "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" - "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" - "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" - "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" - "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" - "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n", - regs->isf.rip, regs->isf.trapno, trapname, - cr0, cr2, cr3, cr4, - regs->rax, regs->rbx, regs->rcx, regs->rdx, - regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi, - regs->r8, regs->r9, regs->r10, regs->r11, - regs->r12, regs->r13, regs->r14, regs->r15, - regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, - regs->isf.ss & 0xFFFF,regs->cr2, regs->isf.err, regs->isf.cpu, - virtualized ? " VMM" : "", - potential_kernel_NX_fault ? " Kernel NX fault" : "", - potential_smep_fault ? " SMEP/User NX fault" : "", - potential_smap_fault ? " SMAP fault" : "", - pl, - fault_result); + "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n" + "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" + "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" + "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" + "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" + "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" + "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n", + regs->isf.rip, regs->isf.trapno, trapname, + cr0, cr2, cr3, cr4, + regs->rax, regs->rbx, regs->rcx, regs->rdx, + regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi, + regs->r8, regs->r9, regs->r10, regs->r11, + regs->r12, regs->r13, regs->r14, regs->r15, + regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, + regs->isf.ss & 0xFFFF, regs->cr2, regs->isf.err, regs->isf.cpu, + virtualized ? " VMM" : "", + potential_kernel_NX_fault ? " Kernel NX fault" : "", + potential_smep_fault ? " SMEP/User NX fault" : "", + potential_smap_fault ? " SMAP fault" : "", + pl, + fault_result); /* * This next statement is not executed, * but it's needed to stop the compiler using tail call optimization @@ -876,23 +902,23 @@ void user_trap( x86_saved_state_t *saved_state) { - int exc; - int err; - mach_exception_code_t code; + int exc; + int err; + mach_exception_code_t code; mach_exception_subcode_t subcode; - int type; - user_addr_t vaddr; - vm_prot_t prot; - thread_t thread = current_thread(); - kern_return_t kret; - user_addr_t rip; - unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */ + int type; + user_addr_t vaddr; + vm_prot_t prot; + thread_t thread = current_thread(); + kern_return_t kret; + user_addr_t rip; + unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */ assert((is_saved_state32(saved_state) && !thread_is_64bit_addr(thread)) || - (is_saved_state64(saved_state) && thread_is_64bit_addr(thread))); + (is_saved_state64(saved_state) && thread_is_64bit_addr(thread))); if (is_saved_state64(saved_state)) { - x86_saved_state64_t *regs; + x86_saved_state64_t *regs; regs = saved_state64(saved_state); @@ -904,7 +930,7 @@ user_trap( vaddr = (user_addr_t)regs->cr2; rip = (user_addr_t)regs->isf.rip; } else { - x86_saved_state32_t *regs; + x86_saved_state32_t *regs; regs = saved_state32(saved_state); @@ -922,16 +948,16 @@ user_trap( /* Stash and clear this processor's DR6 value, in the event * this was a debug register match */ - __asm__ volatile ("mov %%db6, %0" : "=r" (dr6)); + __asm__ volatile ("mov %%db6, %0" : "=r" (dr6)); __asm__ volatile ("mov %0, %%db6" : : "r" (clear)); } pal_sti(); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE, - (unsigned)(vaddr>>32), (unsigned)vaddr, - (unsigned)(rip>>32), (unsigned)rip, 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE, + (unsigned)(vaddr >> 32), (unsigned)vaddr, + (unsigned)(rip >> 32), (unsigned)rip, 0); code = 0; subcode = 0; @@ -944,99 +970,99 @@ user_trap( * INT_3 case handle them. */ #endif - + DEBUG_KPRINT_SYSCALL_MASK(1, - "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n", - type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip); - - switch (type) { + "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n", + type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip); - case T_DIVIDE_ERROR: + switch (type) { + case T_DIVIDE_ERROR: exc = EXC_ARITHMETIC; code = EXC_I386_DIV; break; - case T_DEBUG: - { - pcb_t pcb; + case T_DEBUG: + { + pcb_t pcb; + /* + * Update the PCB with this processor's DR6 value + * in the event this was a debug register match. + */ + pcb = THREAD_TO_PCB(thread); + if (pcb->ids) { /* - * Update the PCB with this processor's DR6 value - * in the event this was a debug register match. + * We can get and set the status register + * in 32-bit mode even on a 64-bit thread + * because the high order bits are not + * used on x86_64 */ - pcb = THREAD_TO_PCB(thread); - if (pcb->ids) { - /* - * We can get and set the status register - * in 32-bit mode even on a 64-bit thread - * because the high order bits are not - * used on x86_64 - */ - if (thread_is_64bit_addr(thread)) { - x86_debug_state64_t *ids = pcb->ids; - ids->dr6 = dr6; - } else { /* 32 bit thread */ - x86_debug_state32_t *ids = pcb->ids; - ids->dr6 = (uint32_t) dr6; - } + if (thread_is_64bit_addr(thread)) { + x86_debug_state64_t *ids = pcb->ids; + ids->dr6 = dr6; + } else { /* 32 bit thread */ + x86_debug_state32_t *ids = pcb->ids; + ids->dr6 = (uint32_t) dr6; } - exc = EXC_BREAKPOINT; - code = EXC_I386_SGL; - break; } - case T_INT3: + exc = EXC_BREAKPOINT; + code = EXC_I386_SGL; + break; + } + case T_INT3: #if CONFIG_DTRACE - if (dtrace_user_probe(saved_state) == KERN_SUCCESS) + if (dtrace_user_probe(saved_state) == KERN_SUCCESS) { return; /* If it succeeds, we are done... */ + } #endif exc = EXC_BREAKPOINT; code = EXC_I386_BPT; break; - case T_OVERFLOW: + case T_OVERFLOW: exc = EXC_ARITHMETIC; code = EXC_I386_INTO; break; - case T_OUT_OF_BOUNDS: + case T_OUT_OF_BOUNDS: exc = EXC_SOFTWARE; code = EXC_I386_BOUND; break; - case T_INVALID_OPCODE: + case T_INVALID_OPCODE: #if !defined(RC_HIDE_XNU_J137) - fpUDflt(rip); /* May return from exception directly */ + fpUDflt(rip); /* May return from exception directly */ #endif exc = EXC_BAD_INSTRUCTION; code = EXC_I386_INVOP; break; - case T_NO_FPU: + case T_NO_FPU: fpnoextflt(); return; - case T_FPU_FAULT: + case T_FPU_FAULT: fpextovrflt(); /* Propagates exception directly, doesn't return */ return; - case T_INVALID_TSS: /* invalid TSS == iret with NT flag set */ + case T_INVALID_TSS: /* invalid TSS == iret with NT flag set */ exc = EXC_BAD_INSTRUCTION; code = EXC_I386_INVTSSFLT; subcode = err; break; - case T_SEGMENT_NOT_PRESENT: + case T_SEGMENT_NOT_PRESENT: exc = EXC_BAD_INSTRUCTION; code = EXC_I386_SEGNPFLT; subcode = err; break; - case T_STACK_FAULT: + case T_STACK_FAULT: exc = EXC_BAD_INSTRUCTION; code = EXC_I386_STKFLT; subcode = err; break; - case T_GENERAL_PROTECTION: + case T_GENERAL_PROTECTION: /* * There's a wide range of circumstances which generate this * class of exception. From user-space, many involve bad @@ -1050,20 +1076,22 @@ user_trap( * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than * EXC_BAD_INSTRUCTION which is more accurate. We just can't * win! - */ + */ exc = EXC_BAD_ACCESS; code = EXC_I386_GPFLT; subcode = err; break; - case T_PAGE_FAULT: - { - prot = VM_PROT_READ; + case T_PAGE_FAULT: + { + prot = VM_PROT_READ; - if (err & T_PF_WRITE) - prot |= VM_PROT_WRITE; - if (__improbable(err & T_PF_EXECUTE)) - prot |= VM_PROT_EXECUTE; + if (err & T_PF_WRITE) { + prot |= VM_PROT_WRITE; + } + if (__improbable(err & T_PF_EXECUTE)) { + prot |= VM_PROT_EXECUTE; + } #if DEVELOPMENT || DEBUG uint32_t fsig = 0; fsig = thread_fpsimd_hash(thread); @@ -1072,9 +1100,9 @@ user_trap( #endif #endif kret = vm_fault(thread->map, - vaddr, - prot, FALSE, VM_KERN_MEMORY_NONE, - THREAD_ABORTSAFE, NULL, 0); + vaddr, + prot, FALSE, VM_KERN_MEMORY_NONE, + THREAD_ABORTSAFE, NULL, 0); #if DEVELOPMENT || DEBUG if (fsig) { uint32_t fsig2 = thread_fpsimd_hash(thread); @@ -1096,23 +1124,24 @@ user_trap( /*NOTREACHED*/ } - user_page_fault_continue(kret); - } /* NOTREACHED */ - break; + user_page_fault_continue(kret); + } /* NOTREACHED */ + break; - case T_SSE_FLOAT_ERROR: + case T_SSE_FLOAT_ERROR: fpSSEexterrflt(); /* Propagates exception directly, doesn't return */ return; - case T_FLOATING_POINT_ERROR: + case T_FLOATING_POINT_ERROR: fpexterrflt(); /* Propagates exception directly, doesn't return */ return; - case T_DTRACE_RET: + case T_DTRACE_RET: #if CONFIG_DTRACE - if (dtrace_user_probe(saved_state) == KERN_SUCCESS) + if (dtrace_user_probe(saved_state) == KERN_SUCCESS) { return; /* If it succeeds, we are done... */ + } #endif /* * If we get an INT 0x7f when we do not expect to, @@ -1122,7 +1151,7 @@ user_trap( code = EXC_I386_INVOP; break; - default: + default: panic("Unexpected user trap, type %d", type); return; } @@ -1145,15 +1174,15 @@ user_trap( */ void i386_exception( - int exc, + int exc, mach_exception_code_t code, mach_exception_subcode_t subcode) { mach_exception_data_type_t codes[EXCEPTION_CODE_MAX]; DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n", - exc, code, subcode); - codes[0] = code; /* new exception interface */ + exc, code, subcode); + codes[0] = code; /* new exception interface */ codes[1] = subcode; exception_triage(exc, codes, 2); /*NOTREACHED*/ @@ -1166,7 +1195,7 @@ i386_exception( * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI * was detected from the kernel while spinning with interrupts masked. */ - + void sync_iss_to_iks(x86_saved_state_t *saved_state) { @@ -1175,20 +1204,21 @@ sync_iss_to_iks(x86_saved_state_t *saved_state) boolean_t record_active_regs = FALSE; /* The PAL may have a special way to sync registers */ - if (saved_state && saved_state->flavor == THREAD_STATE_NONE) + if (saved_state && saved_state->flavor == THREAD_STATE_NONE) { pal_get_kern_regs( saved_state ); + } - if (current_thread() != NULL && + if (current_thread() != NULL && (kstack = current_thread()->kernel_stack) != 0) { - x86_saved_state64_t *regs = saved_state64(saved_state); + x86_saved_state64_t *regs = saved_state64(saved_state); iks = STACK_IKS(kstack); /* Did we take the trap/interrupt in kernel mode? */ if (saved_state == NULL || /* NULL => polling in kernel */ - regs == USER_REGS64(current_thread())) - record_active_regs = TRUE; - else { + regs == USER_REGS64(current_thread())) { + record_active_regs = TRUE; + } else { iks->k_rbx = regs->rbx; iks->k_rsp = regs->isf.rsp; iks->k_rbp = regs->rbp; @@ -1202,18 +1232,18 @@ sync_iss_to_iks(x86_saved_state_t *saved_state) if (record_active_regs == TRUE) { /* Show the trap handler path */ - __asm__ volatile("movq %%rbx, %0" : "=m" (iks->k_rbx)); - __asm__ volatile("movq %%rsp, %0" : "=m" (iks->k_rsp)); - __asm__ volatile("movq %%rbp, %0" : "=m" (iks->k_rbp)); - __asm__ volatile("movq %%r12, %0" : "=m" (iks->k_r12)); - __asm__ volatile("movq %%r13, %0" : "=m" (iks->k_r13)); - __asm__ volatile("movq %%r14, %0" : "=m" (iks->k_r14)); - __asm__ volatile("movq %%r15, %0" : "=m" (iks->k_r15)); + __asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx)); + __asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp)); + __asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12)); + __asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13)); + __asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14)); + __asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15)); /* "Current" instruction pointer */ - __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" - : "=m" (iks->k_rip) - : - : "rax"); + __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" + : "=m" (iks->k_rip) + : + : "rax"); } } @@ -1224,22 +1254,23 @@ sync_iss_to_iks(x86_saved_state_t *saved_state) * or user space. */ void -sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) { +sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) +{ struct x86_kernel_state *iks; vm_offset_t kstack; if ((kstack = current_thread()->kernel_stack) != 0) { iks = STACK_IKS(kstack); /* Display the trap handler path */ - __asm__ volatile("movq %%rbx, %0" : "=m" (iks->k_rbx)); - __asm__ volatile("movq %%rsp, %0" : "=m" (iks->k_rsp)); - __asm__ volatile("movq %%rbp, %0" : "=m" (iks->k_rbp)); - __asm__ volatile("movq %%r12, %0" : "=m" (iks->k_r12)); - __asm__ volatile("movq %%r13, %0" : "=m" (iks->k_r13)); - __asm__ volatile("movq %%r14, %0" : "=m" (iks->k_r14)); - __asm__ volatile("movq %%r15, %0" : "=m" (iks->k_r15)); + __asm__ volatile ("movq %%rbx, %0" : "=m" (iks->k_rbx)); + __asm__ volatile ("movq %%rsp, %0" : "=m" (iks->k_rsp)); + __asm__ volatile ("movq %%rbp, %0" : "=m" (iks->k_rbp)); + __asm__ volatile ("movq %%r12, %0" : "=m" (iks->k_r12)); + __asm__ volatile ("movq %%r13, %0" : "=m" (iks->k_r13)); + __asm__ volatile ("movq %%r14, %0" : "=m" (iks->k_r14)); + __asm__ volatile ("movq %%r15, %0" : "=m" (iks->k_r15)); /* "Current" instruction pointer */ - __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax"); + __asm__ volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax"); } } @@ -1248,13 +1279,15 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) { #endif #if TERI -extern void thread_exception_return_internal(void) __dead2; +extern void thread_exception_return_internal(void) __dead2; -void thread_exception_return(void) { +void +thread_exception_return(void) +{ thread_t thread = current_thread(); ml_set_interrupts_enabled(FALSE); if (thread_is_64bit_addr(thread) != task_has_64Bit_addr(thread->task)) { - panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit_addr(thread), task_has_64Bit_addr(thread->task)); + panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit_addr(thread), task_has_64Bit_addr(thread->task)); } if (thread_is_64bit_addr(thread)) { @@ -1262,11 +1295,11 @@ void thread_exception_return(void) { panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS)); } } else { - if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) { - panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS)); - + if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) { + panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS)); } } + assert(get_preemption_level() == 0); thread_exception_return_internal(); } #endif diff --git a/osfmk/i386/trap.h b/osfmk/i386/trap.h index 6966aeca2..fc7df3dfb 100644 --- a/osfmk/i386/trap.h +++ b/osfmk/i386/trap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,65 +22,65 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _I386_TRAP_H_ -#define _I386_TRAP_H_ +#ifndef _I386_TRAP_H_ +#define _I386_TRAP_H_ /* * Hardware trap vectors for i386. */ -#define T_DIVIDE_ERROR 0 -#define T_DEBUG 1 -#define T_NMI 2 /* non-maskable interrupt */ -#define T_INT3 3 /* int 3 instruction */ -#define T_OVERFLOW 4 /* overflow test */ -#define T_OUT_OF_BOUNDS 5 /* bounds check */ -#define T_INVALID_OPCODE 6 /* invalid op code */ -#define T_NO_FPU 7 /* no floating point */ -#define T_DOUBLE_FAULT 8 /* double fault */ -#define T_FPU_FAULT 9 -#define T_INVALID_TSS 10 -#define T_SEGMENT_NOT_PRESENT 11 -#define T_STACK_FAULT 12 -#define T_GENERAL_PROTECTION 13 -#define T_PAGE_FAULT 14 +#define T_DIVIDE_ERROR 0 +#define T_DEBUG 1 +#define T_NMI 2 /* non-maskable interrupt */ +#define T_INT3 3 /* int 3 instruction */ +#define T_OVERFLOW 4 /* overflow test */ +#define T_OUT_OF_BOUNDS 5 /* bounds check */ +#define T_INVALID_OPCODE 6 /* invalid op code */ +#define T_NO_FPU 7 /* no floating point */ +#define T_DOUBLE_FAULT 8 /* double fault */ +#define T_FPU_FAULT 9 +#define T_INVALID_TSS 10 +#define T_SEGMENT_NOT_PRESENT 11 +#define T_STACK_FAULT 12 +#define T_GENERAL_PROTECTION 13 +#define T_PAGE_FAULT 14 /* 15 */ -#define T_FLOATING_POINT_ERROR 16 -#define T_WATCHPOINT 17 -#define T_MACHINE_CHECK 18 +#define T_FLOATING_POINT_ERROR 16 +#define T_WATCHPOINT 17 +#define T_MACHINE_CHECK 18 #define T_SSE_FLOAT_ERROR 19 /* 20-126 */ #define T_DTRACE_RET 127 @@ -90,72 +90,72 @@ * See also the "software interrupt codes" section of * osfmk/mach/i386/syscall_sw.h */ -#define T_SYSENTER 0x84 -#define T_SYSCALL 0x85 +#define T_SYSENTER 0x84 +#define T_SYSCALL 0x85 -#define T_PREEMPT 255 +#define T_PREEMPT 255 #define TRAP_NAMES "divide error", "debug trap", "NMI", "breakpoint", \ - "overflow", "bounds check", "invalid opcode", \ - "no coprocessor", "double fault", "coprocessor overrun", \ - "invalid TSS", "segment not present", "stack bounds", \ - "general protection", "page fault", "(reserved)", \ - "coprocessor error", "watchpoint", "machine check", "SSE floating point" + "overflow", "bounds check", "invalid opcode", \ + "no coprocessor", "double fault", "coprocessor overrun", \ + "invalid TSS", "segment not present", "stack bounds", \ + "general protection", "page fault", "(reserved)", \ + "coprocessor error", "watchpoint", "machine check", "SSE floating point" /* * Page-fault trap codes. */ -#define T_PF_PROT 0x1 /* protection violation */ -#define T_PF_WRITE 0x2 /* write access */ -#define T_PF_USER 0x4 /* from user state */ +#define T_PF_PROT 0x1 /* protection violation */ +#define T_PF_WRITE 0x2 /* write access */ +#define T_PF_USER 0x4 /* from user state */ -#define T_PF_RSVD 0x8 /* reserved bit set to 1 */ -#define T_PF_EXECUTE 0x10 /* instruction fetch when NX */ +#define T_PF_RSVD 0x8 /* reserved bit set to 1 */ +#define T_PF_EXECUTE 0x10 /* instruction fetch when NX */ #if !defined(ASSEMBLER) && defined(MACH_KERNEL) #include -extern void i386_exception( - int exc, - mach_exception_code_t code, - mach_exception_subcode_t subcode); +extern void i386_exception( + int exc, + mach_exception_code_t code, + mach_exception_subcode_t subcode); -extern void sync_iss_to_iks(x86_saved_state_t *regs); +extern void sync_iss_to_iks(x86_saved_state_t *regs); -extern void sync_iss_to_iks_unconditionally( - x86_saved_state_t *regs); +extern void sync_iss_to_iks_unconditionally( + x86_saved_state_t *regs); -extern void kernel_trap(x86_saved_state_t *regs, uintptr_t *lo_spp); +extern void kernel_trap(x86_saved_state_t *regs, uintptr_t *lo_spp); -extern void user_trap(x86_saved_state_t *regs); +extern void user_trap(x86_saved_state_t *regs); -extern void interrupt(x86_saved_state_t *regs); +extern void interrupt(x86_saved_state_t *regs); -extern void panic_double_fault64(x86_saved_state_t *regs); -extern void panic_machine_check64(x86_saved_state_t *regs); +extern void panic_double_fault64(x86_saved_state_t *regs); +extern void panic_machine_check64(x86_saved_state_t *regs); typedef kern_return_t (*perfCallback)( - int trapno, - void *regs, - uintptr_t *lo_spp, - int); - -extern void panic_i386_backtrace(void *, int, const char *, boolean_t, x86_saved_state_t *); -extern void print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, boolean_t is_64_bit); -extern void print_thread_num_that_crashed(task_t task); -extern void print_tasks_user_threads(task_t task); -extern void print_threads_registers(thread_t thread); -extern void print_uuid_info(task_t task); -extern void print_launchd_info(void); + int trapno, + void *regs, + uintptr_t *lo_spp, + int); + +extern void panic_i386_backtrace(void *, int, const char *, boolean_t, x86_saved_state_t *); +extern void print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker, boolean_t is_64_bit); +extern void print_thread_num_that_crashed(task_t task); +extern void print_tasks_user_threads(task_t task); +extern void print_threads_registers(thread_t thread); +extern void print_uuid_info(task_t task); +extern void print_launchd_info(void); #if MACH_KDP -extern boolean_t kdp_i386_trap( - unsigned int, - x86_saved_state64_t *, - kern_return_t, - vm_offset_t); +extern boolean_t kdp_i386_trap( + unsigned int, + x86_saved_state64_t *, + kern_return_t, + vm_offset_t); #endif /* MACH_KDP */ -#endif /* !ASSEMBLER && MACH_KERNEL */ +#endif /* !ASSEMBLER && MACH_KERNEL */ -#endif /* _I386_TRAP_H_ */ +#endif /* _I386_TRAP_H_ */ diff --git a/osfmk/i386/trap_native.c b/osfmk/i386/trap_native.c index 23bda004e..82f5c5168 100644 --- a/osfmk/i386/trap_native.c +++ b/osfmk/i386/trap_native.c @@ -2,7 +2,7 @@ * Copyright (c) 2009-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* -* @OSF_COPYRIGHT@ -*/ -/* -* Mach Operating System -* Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University -* All Rights Reserved. -* -* Permission to use, copy, modify and distribute this software and its -* documentation is hereby granted, provided that both the copyright -* notice and this permission notice appear in all copies of the -* software, derivative works or modified versions, and any portions -* thereof, and that both notices appear in supporting documentation. -* -* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" -* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR -* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. -* -* Carnegie Mellon requests users of this software to return to -* -* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU -* School of Computer Science -* Carnegie Mellon University -* Pittsburgh PA 15213-3890 -* -* any improvements or extensions that they make and grant Carnegie Mellon -* the rights to redistribute these changes. -*/ + * @OSF_COPYRIGHT@ + */ /* -*/ + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + */ /* -* Hardware trap/fault handler. + * Hardware trap/fault handler. */ #include @@ -132,7 +132,7 @@ panic_64(x86_saved_state_t *sp, __unused int pc, __unused const char *msg, boole */ panic_io_port_read(); - + /* * Break kprintf lock in case of recursion, * and record originally faulted instruction address. @@ -150,35 +150,32 @@ panic_64(x86_saved_state_t *sp, __unused int pc, __unused const char *msg, boole x86_saved_state64_t *regs = saved_state64(sp); panic("%s at 0x%016llx, registers:\n" - "CR0: 0x%016lx, CR2: 0x%016lx, CR3: 0x%016lx, CR4: 0x%016lx\n" - "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" - "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" - "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" - "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" - "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" - "Error code: 0x%016llx%s\n", - msg, - regs->isf.rip, - get_cr0(), get_cr2(), get_cr3_raw(), get_cr4(), - regs->rax, regs->rbx, regs->rcx, regs->rdx, - regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi, - regs->r8, regs->r9, regs->r10, regs->r11, - regs->r12, regs->r13, regs->r14, regs->r15, - regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, regs->isf.ss & 0xFFFF, - regs->isf.err, virtualized ? " VMM" : ""); + "CR0: 0x%016lx, CR2: 0x%016lx, CR3: 0x%016lx, CR4: 0x%016lx\n" + "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" + "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" + "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" + "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" + "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" + "Error code: 0x%016llx%s\n", + msg, + regs->isf.rip, + get_cr0(), get_cr2(), get_cr3_raw(), get_cr4(), + regs->rax, regs->rbx, regs->rcx, regs->rdx, + regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi, + regs->r8, regs->r9, regs->r10, regs->r11, + regs->r12, regs->r13, regs->r14, regs->r15, + regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, regs->isf.ss & 0xFFFF, + regs->isf.err, virtualized ? " VMM" : ""); } void panic_double_fault64(x86_saved_state_t *sp) { - (void)OSCompareAndSwap((UInt32) -1, (UInt32) cpu_number(), (volatile UInt32 *)&panic_double_fault_cpu); + (void)OSCompareAndSwap((UInt32) - 1, (UInt32) cpu_number(), (volatile UInt32 *)&panic_double_fault_cpu); panic_64(sp, PANIC_DOUBLE_FAULT, "Double fault", FALSE); - } void - panic_machine_check64(x86_saved_state_t *sp) { panic_64(sp, PANIC_MACHINE_CHECK, "Machine Check", TRUE); - } diff --git a/osfmk/i386/tsc.c b/osfmk/i386/tsc.c index c776541db..cd0aeb554 100644 --- a/osfmk/i386/tsc.c +++ b/osfmk/i386/tsc.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -48,7 +48,7 @@ #include #include #include -#include /* for kernel_map */ +#include /* for kernel_map */ #include #include #include @@ -63,23 +63,23 @@ #include #include -uint64_t busFCvtt2n = 0; -uint64_t busFCvtn2t = 0; -uint64_t tscFreq = 0; -uint64_t tscFCvtt2n = 0; -uint64_t tscFCvtn2t = 0; -uint64_t tscGranularity = 0; -uint64_t bus2tsc = 0; -uint64_t busFreq = 0; -uint32_t flex_ratio = 0; -uint32_t flex_ratio_min = 0; -uint32_t flex_ratio_max = 0; +uint64_t busFCvtt2n = 0; +uint64_t busFCvtn2t = 0; +uint64_t tscFreq = 0; +uint64_t tscFCvtt2n = 0; +uint64_t tscFCvtn2t = 0; +uint64_t tscGranularity = 0; +uint64_t bus2tsc = 0; +uint64_t busFreq = 0; +uint32_t flex_ratio = 0; +uint32_t flex_ratio_min = 0; +uint32_t flex_ratio_max = 0; -uint64_t tsc_at_boot = 0; +uint64_t tsc_at_boot = 0; -#define bit(n) (1ULL << (n)) -#define bitmask(h,l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1)) -#define bitfield(x,h,l) (((x) & bitmask(h,l)) >> l) +#define bit(n) (1ULL << (n)) +#define bitmask(h, l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1)) +#define bitfield(x, h, l) (((x) & bitmask(h,l)) >> l) /* Decimal powers: */ #define kilo (1000ULL) @@ -88,7 +88,7 @@ uint64_t tsc_at_boot = 0; #define Tera (kilo * Giga) #define Peta (kilo * Tera) -#define CPU_FAMILY_PENTIUM_M (0x6) +#define CPU_FAMILY_PENTIUM_M (0x6) /* * This routine extracts a frequency property in Hz from the device tree. @@ -97,36 +97,37 @@ uint64_t tsc_at_boot = 0; static uint64_t EFI_get_frequency(const char *prop) { - uint64_t frequency = 0; - DTEntry entry; - void *value; - unsigned int size; + uint64_t frequency = 0; + DTEntry entry; + void *value; + unsigned int size; if (DTLookupEntry(0, "/efi/platform", &entry) != kSuccess) { kprintf("EFI_get_frequency: didn't find /efi/platform\n"); return 0; } - if (DTGetProperty(entry,prop,&value,&size) != kSuccess) { - kprintf("EFI_get_frequency: property %s not found\n", prop); - return 0; - } - if (size == sizeof(uint64_t)) { - frequency = *(uint64_t *) value; - kprintf("EFI_get_frequency: read %s value: %llu\n", - prop, frequency); - } /* * While we're here, see if EFI published an initial TSC value. */ - if (DTGetProperty(entry,"InitialTSC",&value,&size) == kSuccess) { + if (DTGetProperty(entry, "InitialTSC", &value, &size) == kSuccess) { if (size == sizeof(uint64_t)) { tsc_at_boot = *(uint64_t *) value; kprintf("EFI_get_frequency: read InitialTSC: %llu\n", - tsc_at_boot); + tsc_at_boot); } } + if (DTGetProperty(entry, prop, &value, &size) != kSuccess) { + kprintf("EFI_get_frequency: property %s not found\n", prop); + return 0; + } + if (size == sizeof(uint64_t)) { + frequency = *(uint64_t *) value; + kprintf("EFI_get_frequency: read %s value: %llu\n", + prop, frequency); + } + return frequency; } @@ -137,27 +138,26 @@ EFI_get_frequency(const char *prop) void tsc_init(void) { - boolean_t N_by_2_bus_ratio = FALSE; + boolean_t N_by_2_bus_ratio = FALSE; if (cpuid_vmm_present()) { kprintf("VMM vendor %u TSC frequency %u KHz bus frequency %u KHz\n", - cpuid_vmm_info()->cpuid_vmm_family, - cpuid_vmm_info()->cpuid_vmm_tsc_frequency, - cpuid_vmm_info()->cpuid_vmm_bus_frequency); + cpuid_vmm_info()->cpuid_vmm_family, + cpuid_vmm_info()->cpuid_vmm_tsc_frequency, + cpuid_vmm_info()->cpuid_vmm_bus_frequency); if (cpuid_vmm_info()->cpuid_vmm_tsc_frequency && - cpuid_vmm_info()->cpuid_vmm_bus_frequency) { - + cpuid_vmm_info()->cpuid_vmm_bus_frequency) { busFreq = (uint64_t)cpuid_vmm_info()->cpuid_vmm_bus_frequency * kilo; busFCvtt2n = ((1 * Giga) << 32) / busFreq; busFCvtn2t = 0xFFFFFFFFFFFFFFFFULL / busFCvtt2n; - + tscFreq = (uint64_t)cpuid_vmm_info()->cpuid_vmm_tsc_frequency * kilo; tscFCvtt2n = ((1 * Giga) << 32) / tscFreq; tscFCvtn2t = 0xFFFFFFFFFFFFFFFFULL / tscFCvtt2n; - + tscGranularity = tscFreq / busFreq; - + bus2tsc = tmrCvt(busFCvtt2n, tscFCvtn2t); return; @@ -168,34 +168,50 @@ tsc_init(void) case CPUFAMILY_INTEL_KABYLAKE: case CPUFAMILY_INTEL_SKYLAKE: { /* - * SkyLake and later has an Always Running Timer (ART) providing + * SkyLake and later has an Always Running Timer (ART) providing * the reference frequency. CPUID leaf 0x15 determines the * rationship between this and the TSC frequency expressed as - * - multiplier (numerator, N), and + * - multiplier (numerator, N), and * - divisor (denominator, M). * So that TSC = ART * N / M. */ - cpuid_tsc_leaf_t *tsc_leafp = &cpuid_info()->cpuid_tsc_leaf; - uint64_t N = (uint64_t) tsc_leafp->numerator; - uint64_t M = (uint64_t) tsc_leafp->denominator; - uint64_t refFreq; + i386_cpu_info_t *infop = cpuid_info(); + cpuid_tsc_leaf_t *tsc_leafp = &infop->cpuid_tsc_leaf; + uint64_t N = (uint64_t) tsc_leafp->numerator; + uint64_t M = (uint64_t) tsc_leafp->denominator; + uint64_t refFreq; refFreq = EFI_get_frequency("ARTFrequency"); - if (refFreq == 0) - refFreq = BASE_ART_CLOCK_SOURCE; + if (refFreq == 0) { + /* + * Intel Scalable Processor (Xeon-SP) CPUs use a different + * ART frequency. Use that default here if EFI didn't + * specify the frequency. Since Xeon-SP uses the same + * DisplayModel / DisplayFamily as Xeon-W, we need to + * use the platform ID (or, as XNU calls it, the "processor + * flag") to differentiate the two. + */ + if (cpuid_family() == 0x06 && + infop->cpuid_model == CPUID_MODEL_SKYLAKE_W && + is_xeon_sp(infop->cpuid_processor_flag)) { + refFreq = BASE_ART_CLOCK_SOURCE_SP; + } else { + refFreq = BASE_ART_CLOCK_SOURCE; + } + } assert(N != 0); assert(M != 1); tscFreq = refFreq * N / M; - busFreq = tscFreq; /* bus is APIC frequency */ + busFreq = tscFreq; /* bus is APIC frequency */ kprintf(" ART: Frequency = %6d.%06dMHz, N/M = %lld/%llu\n", - (uint32_t)(refFreq / Mega), - (uint32_t)(refFreq % Mega), - N, M); + (uint32_t)(refFreq / Mega), + (uint32_t)(refFreq % Mega), + N, M); break; - } + } default: { uint64_t msr_flex_ratio; uint64_t msr_platform_info; @@ -208,30 +224,32 @@ tsc_init(void) /* No BIOS-programed flex ratio. Use hardware max as default */ tscGranularity = flex_ratio_max; if (msr_flex_ratio & bit(16)) { - /* Flex Enabled: Use this MSR if less than max */ + /* Flex Enabled: Use this MSR if less than max */ flex_ratio = (uint32_t)bitfield(msr_flex_ratio, 15, 8); - if (flex_ratio < flex_ratio_max) + if (flex_ratio < flex_ratio_max) { tscGranularity = flex_ratio; + } } busFreq = EFI_get_frequency("FSBFrequency"); - /* If EFI isn't configured correctly, use a constant + /* If EFI isn't configured correctly, use a constant * value. See 6036811. */ - if (busFreq == 0) - busFreq = BASE_NHM_CLOCK_SOURCE; + if (busFreq == 0) { + busFreq = BASE_NHM_CLOCK_SOURCE; + } break; - } + } case CPUFAMILY_INTEL_PENRYN: { - uint64_t prfsts; + uint64_t prfsts; prfsts = rdmsr64(IA32_PERF_STS); tscGranularity = (uint32_t)bitfield(prfsts, 44, 40); N_by_2_bus_ratio = (prfsts & bit(46)) != 0; busFreq = EFI_get_frequency("FSBFrequency"); - } + } } if (busFreq != 0) { @@ -242,11 +260,11 @@ tsc_init(void) } kprintf(" BUS: Frequency = %6d.%06dMHz, " - "cvtt2n = %08X.%08X, cvtn2t = %08X.%08X\n", - (uint32_t)(busFreq / Mega), - (uint32_t)(busFreq % Mega), - (uint32_t)(busFCvtt2n >> 32), (uint32_t)busFCvtt2n, - (uint32_t)(busFCvtn2t >> 32), (uint32_t)busFCvtn2t); + "cvtt2n = %08X.%08X, cvtn2t = %08X.%08X\n", + (uint32_t)(busFreq / Mega), + (uint32_t)(busFreq % Mega), + (uint32_t)(busFCvtt2n >> 32), (uint32_t)busFCvtt2n, + (uint32_t)(busFCvtn2t >> 32), (uint32_t)busFCvtn2t); if (tscFreq == busFreq) { bus2tsc = 1; @@ -263,12 +281,13 @@ tsc_init(void) * 0.5 more than this - i.e. that the true bus ratio * is (2*tscGranularity + 1)/2. */ - if (N_by_2_bus_ratio) - tscFCvtt2n = busFCvtt2n * 2 / (1 + 2*tscGranularity); - else + if (N_by_2_bus_ratio) { + tscFCvtt2n = busFCvtt2n * 2 / (1 + 2 * tscGranularity); + } else { tscFCvtt2n = busFCvtt2n / tscGranularity; + } - tscFreq = ((1 * Giga) << 32) / tscFCvtt2n; + tscFreq = ((1 * Giga) << 32) / tscFCvtt2n; tscFCvtn2t = 0xFFFFFFFFFFFFFFFFULL / tscFCvtt2n; /* @@ -278,12 +297,12 @@ tsc_init(void) } kprintf(" TSC: Frequency = %6d.%06dMHz, " - "cvtt2n = %08X.%08X, cvtn2t = %08X.%08X, gran = %lld%s\n", - (uint32_t)(tscFreq / Mega), - (uint32_t)(tscFreq % Mega), - (uint32_t)(tscFCvtt2n >> 32), (uint32_t)tscFCvtt2n, - (uint32_t)(tscFCvtn2t >> 32), (uint32_t)tscFCvtn2t, - tscGranularity, N_by_2_bus_ratio ? " (N/2)" : ""); + "cvtt2n = %08X.%08X, cvtn2t = %08X.%08X, gran = %lld%s\n", + (uint32_t)(tscFreq / Mega), + (uint32_t)(tscFreq % Mega), + (uint32_t)(tscFCvtt2n >> 32), (uint32_t)tscFCvtt2n, + (uint32_t)(tscFCvtn2t >> 32), (uint32_t)tscFCvtn2t, + tscGranularity, N_by_2_bus_ratio ? " (N/2)" : ""); } void diff --git a/osfmk/i386/tsc.h b/osfmk/i386/tsc.h index 1d084859b..cd8429b85 100644 --- a/osfmk/i386/tsc.h +++ b/osfmk/i386/tsc.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -40,38 +40,38 @@ #ifndef _I386_TSC_H_ #define _I386_TSC_H_ -#define BASE_NHM_CLOCK_SOURCE 133333333ULL -#define BASE_ART_CLOCK_SOURCE 24000000ULL /* 24Mhz */ -#define IA32_PERF_STS 0x198 -#define SLOW_TSC_THRESHOLD 1000067800 /* if slower, nonzero shift required in nanotime() algorithm */ +#define BASE_NHM_CLOCK_SOURCE 133333333ULL +#define BASE_ART_CLOCK_SOURCE 24000000ULL /* 24MHz */ +#define BASE_ART_CLOCK_SOURCE_SP 25000000ULL /* 25MHz */ +#define IA32_PERF_STS 0x198 +#define SLOW_TSC_THRESHOLD 1000067800 /* if slower, nonzero shift required in nanotime() algorithm */ #ifndef ASSEMBLER -extern uint64_t busFCvtt2n; -extern uint64_t busFCvtn2t; +extern uint64_t busFCvtt2n; +extern uint64_t busFCvtn2t; extern uint64_t tscFreq; extern uint64_t tscFCvtt2n; extern uint64_t tscFCvtn2t; extern uint64_t tscGranularity; extern uint64_t bus2tsc; extern uint64_t busFreq; -extern uint32_t flex_ratio; -extern uint32_t flex_ratio_min; -extern uint32_t flex_ratio_max; -extern uint64_t tsc_at_boot; +extern uint32_t flex_ratio; +extern uint32_t flex_ratio_min; +extern uint32_t flex_ratio_max; +extern uint64_t tsc_at_boot; -struct tscInfo -{ - uint64_t busFCvtt2n; - uint64_t busFCvtn2t; - uint64_t tscFreq; - uint64_t tscFCvtt2n; - uint64_t tscFCvtn2t; - uint64_t tscGranularity; - uint64_t bus2tsc; - uint64_t busFreq; - uint32_t flex_ratio; - uint32_t flex_ratio_min; - uint32_t flex_ratio_max; +struct tscInfo { + uint64_t busFCvtt2n; + uint64_t busFCvtn2t; + uint64_t tscFreq; + uint64_t tscFCvtt2n; + uint64_t tscFCvtn2t; + uint64_t tscGranularity; + uint64_t bus2tsc; + uint64_t busFreq; + uint32_t flex_ratio; + uint32_t flex_ratio_min; + uint32_t flex_ratio_max; }; typedef struct tscInfo tscInfo_t; diff --git a/osfmk/i386/tss.h b/osfmk/i386/tss.h index eab037c24..84869c004 100644 --- a/osfmk/i386/tss.h +++ b/osfmk/i386/tss.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _I386_TSS_H_ -#define _I386_TSS_H_ +#ifndef _I386_TSS_H_ +#define _I386_TSS_H_ #include @@ -65,37 +65,37 @@ * i386 Task State Segment */ struct i386_tss { - uint32_t back_link; /* segment number of previous task, - if nested */ - uint32_t esp0; /* initial stack pointer ... */ - uint32_t ss0; /* and segment for ring 0 */ - uint32_t esp1; /* initial stack pointer ... */ - uint32_t ss1; /* and segment for ring 1 */ - uint32_t esp2; /* initial stack pointer ... */ - uint32_t ss2; /* and segment for ring 2 */ - uint32_t cr3; /* CR3 - page table directory - physical address */ - uint32_t eip; - uint32_t eflags; - uint32_t eax; - uint32_t ecx; - uint32_t edx; - uint32_t ebx; - uint32_t esp; /* current stack pointer */ - uint32_t ebp; - uint32_t esi; - uint32_t edi; - uint32_t es; - uint32_t cs; - uint32_t ss; /* current stack segment */ - uint32_t ds; - uint32_t fs; - uint32_t gs; - uint32_t ldt; /* local descriptor table segment */ - uint16_t trace_trap; /* trap on switch to this task */ - uint16_t io_bit_map_offset; - /* offset to start of IO permission - bit map */ + uint32_t back_link; /* segment number of previous task, + * if nested */ + uint32_t esp0; /* initial stack pointer ... */ + uint32_t ss0; /* and segment for ring 0 */ + uint32_t esp1; /* initial stack pointer ... */ + uint32_t ss1; /* and segment for ring 1 */ + uint32_t esp2; /* initial stack pointer ... */ + uint32_t ss2; /* and segment for ring 2 */ + uint32_t cr3; /* CR3 - page table directory + * physical address */ + uint32_t eip; + uint32_t eflags; + uint32_t eax; + uint32_t ecx; + uint32_t edx; + uint32_t ebx; + uint32_t esp; /* current stack pointer */ + uint32_t ebp; + uint32_t esi; + uint32_t edi; + uint32_t es; + uint32_t cs; + uint32_t ss; /* current stack segment */ + uint32_t ds; + uint32_t fs; + uint32_t gs; + uint32_t ldt; /* local descriptor table segment */ + uint16_t trace_trap; /* trap on switch to this task */ + uint16_t io_bit_map_offset; + /* offset to start of IO permission + * bit map */ }; /* @@ -107,30 +107,30 @@ struct i386_tss { */ struct sysenter_stack { - uint64_t sysestack[64]; /* Space for a 64-bit frame and some */ - uint64_t top; /* Top and pointer to ISS in PCS */ + uint64_t sysestack[64]; /* Space for a 64-bit frame and some */ + uint64_t top; /* Top and pointer to ISS in PCS */ }; #pragma pack(4) struct x86_64_tss { - uint32_t reserved1; - uint64_t rsp0; /* stack pointer for CPL0 */ - uint64_t rsp1; /* stack pointer for CPL1 */ - uint64_t rsp2; /* stack pointer for CPL2 */ - uint32_t reserved2; - uint32_t reserved3; - uint64_t ist1; /* interrupt stack table 1 */ - uint64_t ist2; /* interrupt stack table 2 */ - uint64_t ist3; /* interrupt stack table 3 */ - uint64_t ist4; /* interrupt stack table 4 */ - uint64_t ist5; /* interrupt stack table 5 */ - uint64_t ist6; /* interrupt stack table 6 */ - uint64_t ist7; /* interrupt stack table 7 */ - uint32_t reserved4; - uint32_t reserved5; - uint16_t reserved6; - uint16_t io_bit_map_offset; - /* offset to IO permission bit map */ + uint32_t reserved1; + uint64_t rsp0; /* stack pointer for CPL0 */ + uint64_t rsp1; /* stack pointer for CPL1 */ + uint64_t rsp2; /* stack pointer for CPL2 */ + uint32_t reserved2; + uint32_t reserved3; + uint64_t ist1; /* interrupt stack table 1 */ + uint64_t ist2; /* interrupt stack table 2 */ + uint64_t ist3; /* interrupt stack table 3 */ + uint64_t ist4; /* interrupt stack table 4 */ + uint64_t ist5; /* interrupt stack table 5 */ + uint64_t ist6; /* interrupt stack table 6 */ + uint64_t ist7; /* interrupt stack table 7 */ + uint32_t reserved4; + uint32_t reserved5; + uint16_t reserved6; + uint16_t io_bit_map_offset; + /* offset to IO permission bit map */ }; #pragma pack() -#endif /* _I386_TSS_H_ */ +#endif /* _I386_TSS_H_ */ diff --git a/osfmk/i386/ucode.c b/osfmk/i386/ucode.c index 15e0e5103..a9e9a12f5 100644 --- a/osfmk/i386/ucode.c +++ b/osfmk/i386/ucode.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Apple Inc. All rights reserved. + * Copyright (c) 2017-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -37,7 +37,8 @@ #include #include #include -#include // mp_broadcast +#include // mp_cpus_call +#include #include #include // cpu_number #include // boot-args @@ -67,41 +68,48 @@ static kern_return_t register_locks(void) { /* already allocated? */ - if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) + if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) { return KERN_SUCCESS; + } /* allocate lock group attribute and group */ - if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) + if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) { goto nomem_out; + } - lck_grp_attr_setstat(ucode_slock_grp_attr); - - if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) + if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) { goto nomem_out; + } /* Allocate lock attribute */ - if (!(ucode_slock_attr = lck_attr_alloc_init())) + if (!(ucode_slock_attr = lck_attr_alloc_init())) { goto nomem_out; + } /* Allocate the spin lock */ /* We keep one global spin-lock. We could have one per update * request... but srsly, why would you update microcode like that? */ - if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) + if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) { goto nomem_out; + } return KERN_SUCCESS; nomem_out: /* clean up */ - if (ucode_slock) + if (ucode_slock) { lck_spin_free(ucode_slock, ucode_slock_grp); - if (ucode_slock_attr) + } + if (ucode_slock_attr) { lck_attr_free(ucode_slock_attr); - if (ucode_slock_grp) + } + if (ucode_slock_grp) { lck_grp_free(ucode_slock_grp); - if (ucode_slock_grp_attr) + } + if (ucode_slock_grp_attr) { lck_grp_attr_free(ucode_slock_grp_attr); + } return KERN_NO_SPACE; } @@ -118,28 +126,31 @@ copyin_update(uint64_t inaddr) /* Copy in enough header to peek at the size */ error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header)); - if (error) + if (error) { return error; + } /* Get the actual, alleged size */ size = update_header.total_size; /* huge bogus piece of data that somehow made it through? */ - if (size >= 1024 * 1024) + if (size >= 1024 * 1024) { return ENOMEM; + } /* Old microcodes? */ - if (size == 0) + if (size == 0) { size = 2048; /* default update size; see SDM */ - + } /* * create the buffer for the update * It need only be aligned to 16-bytes, according to the SDM. * This also wires it down */ ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size, VM_KERN_MEMORY_OSFMK); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return ENOMEM; + } /* Copy it in */ error = copyin((user_addr_t)inaddr, (void*)update, size); @@ -152,6 +163,27 @@ copyin_update(uint64_t inaddr) return 0; } +static void +cpu_apply_microcode(void) +{ + /* grab the lock */ + lck_spin_lock(ucode_slock); + + /* execute the update */ + update_microcode(); + + /* release the lock */ + lck_spin_unlock(ucode_slock); +} + +static void +cpu_update(__unused void *arg) +{ + cpu_apply_microcode(); + + cpuid_do_was(); +} + /* * This is called once by every CPU on a wake from sleep/hibernate * and is meant to re-apply a microcode update that got lost @@ -162,7 +194,7 @@ ucode_update_wake() { if (global_update) { kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number()); - update_microcode(); + cpu_update(NULL); #if DEBUG } else { kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number()); @@ -170,19 +202,6 @@ ucode_update_wake() } } -static void -cpu_update(__unused void *arg) -{ - /* grab the lock */ - lck_spin_lock(ucode_slock); - - /* execute the update */ - update_microcode(); - - /* release the lock */ - lck_spin_unlock(ucode_slock); -} - static void ucode_cpuid_set_info(void) { @@ -222,14 +241,32 @@ ucode_cpuid_set_info(void) static void xcpu_update(void) { - if (register_locks() != KERN_SUCCESS) - return; + cpumask_t dest_cpumask; - /* Get all CPUs to perform the update */ - mp_broadcast(cpu_update, NULL); + if (register_locks() != KERN_SUCCESS) { + return; + } + mp_disable_preemption(); + dest_cpumask = CPUMASK_OTHERS; + cpu_apply_microcode(); /* Update the cpuid info */ ucode_cpuid_set_info(); + /* Now apply workarounds */ + cpuid_do_was(); + mp_enable_preemption(); + + /* Get all other CPUs to perform the update */ + /* + * Calling mp_cpus_call with the ASYNC flag ensures that the + * IPI dispatch occurs in parallel, but that we will not + * proceed until all targeted CPUs complete the microcode + * update. + */ + mp_cpus_call(dest_cpumask, ASYNC, cpu_update, NULL); + + /* Update the commpage only after we update all CPUs' microcode */ + commpage_post_ucode_update(); } /* @@ -240,9 +277,9 @@ int ucode_interface(uint64_t addr) { int error; - char arg[16]; + char arg[16]; - if (PE_parse_boot_argn("-x", arg, sizeof (arg))) { + if (PE_parse_boot_argn("-x", arg, sizeof(arg))) { printf("ucode: no updates in safe mode\n"); return EPERM; } @@ -253,15 +290,17 @@ ucode_interface(uint64_t addr) * would not make sense (all updates are cumulative), and also * leak memory, because we don't free previous updates. */ - if (global_update) + if (global_update) { return EPERM; + } #endif /* Get the whole microcode */ error = copyin_update(addr); - if (error) + if (error) { return error; + } /* Farm out the updates */ xcpu_update(); diff --git a/osfmk/i386/user_ldt.c b/osfmk/i386/user_ldt.c index 35dd2cef7..29334339a 100644 --- a/osfmk/i386/user_ldt.c +++ b/osfmk/i386/user_ldt.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -80,185 +80,198 @@ #include static void user_ldt_set_action(void *); +static int i386_set_ldt_impl(uint32_t *retval, uint64_t start_sel, uint64_t descs, + uint64_t num_sels); +static int i386_get_ldt_impl(uint32_t *retval, uint64_t start_sel, uint64_t descs, + uint64_t num_sels); + +extern int allow_64bit_proc_LDT_ops; /* * Add the descriptors to the LDT, starting with * the descriptor for 'first_selector'. */ -int -i386_set_ldt( - uint32_t *retval, - uint32_t start_sel, - uint32_t descs, /* out */ - uint32_t num_sels) +static int +i386_set_ldt_impl( + uint32_t *retval, + uint64_t start_sel, + uint64_t descs, /* out */ + uint64_t num_sels) { - user_ldt_t new_ldt, old_ldt; + user_ldt_t new_ldt, old_ldt; struct real_descriptor *dp; - unsigned int i; - unsigned int min_selector = LDTSZ_MIN; /* do not allow the system selectors to be changed */ - task_t task = current_task(); - unsigned int ldt_count; + unsigned int i; + unsigned int min_selector = LDTSZ_MIN; /* do not allow the system selectors to be changed */ + task_t task = current_task(); + unsigned int ldt_count; kern_return_t err; if (start_sel != LDT_AUTO_ALLOC && (start_sel != 0 || num_sels != 0) - && (start_sel < min_selector || start_sel >= LDTSZ)) - return EINVAL; - if (start_sel != LDT_AUTO_ALLOC - && (uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) /* cast to uint64_t to detect wrap-around */ - return EINVAL; + && (start_sel < min_selector || start_sel >= LDTSZ || num_sels > LDTSZ)) { + return EINVAL; + } + if (start_sel != LDT_AUTO_ALLOC && start_sel + num_sels > LDTSZ) { + return EINVAL; + } task_lock(task); - + old_ldt = task->i386_ldt; if (start_sel == LDT_AUTO_ALLOC) { - if (old_ldt) { - unsigned int null_count; - struct real_descriptor null_ldt; - - bzero(&null_ldt, sizeof(null_ldt)); + if (old_ldt) { + unsigned int null_count; + struct real_descriptor null_ldt; - /* - * Look for null selectors among the already-allocated - * entries. - */ - null_count = 0; - i = 0; - while (i < old_ldt->count) - { - if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) { - null_count++; - if (null_count == num_sels) - break; /* break out of while loop */ - } else { + bzero(&null_ldt, sizeof(null_ldt)); + + /* + * Look for null selectors among the already-allocated + * entries. + */ null_count = 0; - } + i = 0; + while (i < old_ldt->count) { + if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) { + null_count++; + if (null_count == num_sels) { + break; /* break out of while loop */ + } + } else { + null_count = 0; + } + } + + /* + * If we broke out of the while loop, i points to the selector + * after num_sels null selectors. Otherwise it points to the end + * of the old LDTs, and null_count is the number of null selectors + * at the end. + * + * Either way, there are null_count null selectors just prior to + * the i-indexed selector, and either null_count >= num_sels, + * or we're at the end, so we can extend. + */ + start_sel = old_ldt->start + i - null_count; + } else { + start_sel = LDTSZ_MIN; } - /* - * If we broke out of the while loop, i points to the selector - * after num_sels null selectors. Otherwise it points to the end - * of the old LDTs, and null_count is the number of null selectors - * at the end. - * - * Either way, there are null_count null selectors just prior to - * the i-indexed selector, and either null_count >= num_sels, - * or we're at the end, so we can extend. - */ - start_sel = old_ldt->start + i - null_count; - } else { - start_sel = LDTSZ_MIN; - } - - if ((uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) { - task_unlock(task); - return ENOMEM; - } + if (start_sel + num_sels > LDTSZ) { + task_unlock(task); + return ENOMEM; + } } if (start_sel == 0 && num_sels == 0) { - new_ldt = NULL; + new_ldt = NULL; } else { - /* - * Allocate new LDT - */ - - unsigned int begin_sel = start_sel; - unsigned int end_sel = begin_sel + num_sels; - - if (old_ldt != NULL) { - if (old_ldt->start < begin_sel) - begin_sel = old_ldt->start; - if (old_ldt->start + old_ldt->count > end_sel) - end_sel = old_ldt->start + old_ldt->count; - } - - ldt_count = end_sel - begin_sel; - /* XXX allocation under task lock */ - new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor))); - if (new_ldt == NULL) { - task_unlock(task); - return ENOMEM; - } - - new_ldt->start = begin_sel; - new_ldt->count = ldt_count; - - /* - * Have new LDT. If there was a an old ldt, copy descriptors - * from old to new. - */ - if (old_ldt) { - bcopy(&old_ldt->ldt[0], - &new_ldt->ldt[old_ldt->start - begin_sel], - old_ldt->count * sizeof(struct real_descriptor)); - /* - * If the old and new LDTs are non-overlapping, fill the - * center in with null selectors. + * Allocate new LDT */ - - if (old_ldt->start + old_ldt->count < start_sel) - bzero(&new_ldt->ldt[old_ldt->count], - (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor)); - else if (old_ldt->start > start_sel + num_sels) - bzero(&new_ldt->ldt[num_sels], - (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor)); - } - - /* - * Install new descriptors. - */ - if (descs != 0) { - /* XXX copyin under task lock */ - err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel], - num_sels * sizeof(struct real_descriptor)); - if (err != 0) - { - task_unlock(task); - user_ldt_free(new_ldt); - return err; + + unsigned int begin_sel = (unsigned int)start_sel; + unsigned int end_sel = (unsigned int)begin_sel + + (unsigned int)num_sels; + + if (old_ldt != NULL) { + if (old_ldt->start < begin_sel) { + begin_sel = old_ldt->start; + } + if (old_ldt->start + old_ldt->count > end_sel) { + end_sel = old_ldt->start + old_ldt->count; + } } - } else { - bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor)); - } - /* - * Validate descriptors. - * Only allow descriptors with user privileges. - */ - for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel]; - i < num_sels; - i++, dp++) - { - switch (dp->access & ~ACC_A) { - case 0: - case ACC_P: - /* valid empty descriptor, clear Present preemptively */ - dp->access &= (~ACC_P & 0xff); - break; - case ACC_P | ACC_PL_U | ACC_DATA: - case ACC_P | ACC_PL_U | ACC_DATA_W: - case ACC_P | ACC_PL_U | ACC_DATA_E: - case ACC_P | ACC_PL_U | ACC_DATA_EW: - case ACC_P | ACC_PL_U | ACC_CODE: - case ACC_P | ACC_PL_U | ACC_CODE_R: - case ACC_P | ACC_PL_U | ACC_CODE_C: - case ACC_P | ACC_PL_U | ACC_CODE_CR: - break; - default: + + ldt_count = end_sel - begin_sel; + /* XXX allocation under task lock */ + new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor))); + if (new_ldt == NULL) { task_unlock(task); - user_ldt_free(new_ldt); - return EACCES; + return ENOMEM; } - /* Reject attempts to create segments with 64-bit granules */ - if (dp->granularity & SZ_64) { - task_unlock(task); - user_ldt_free(new_ldt); - return EACCES; + + new_ldt->start = begin_sel; + new_ldt->count = ldt_count; + + /* + * Have new LDT. If there was a an old ldt, copy descriptors + * from old to new. + */ + if (old_ldt) { + bcopy(&old_ldt->ldt[0], + &new_ldt->ldt[old_ldt->start - begin_sel], + old_ldt->count * sizeof(struct real_descriptor)); + + /* + * If the old and new LDTs are non-overlapping, fill the + * center in with null selectors. + */ + + if (old_ldt->start + old_ldt->count < start_sel) { + bzero(&new_ldt->ldt[old_ldt->count], + (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor)); + } else if (old_ldt->start > start_sel + num_sels) { + bzero(&new_ldt->ldt[num_sels], + (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor)); + } + } + + /* + * Install new descriptors. + */ + if (descs != 0) { + /* XXX copyin under task lock */ + err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel], + num_sels * sizeof(struct real_descriptor)); + if (err != 0) { + task_unlock(task); + user_ldt_free(new_ldt); + return err; + } + } else { + bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor)); + } + /* + * Validate descriptors. + * Only allow descriptors with user privileges. + */ + for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel]; + i < num_sels; + i++, dp++) { + switch (dp->access & ~ACC_A) { + case 0: + case ACC_P: + /* valid empty descriptor, clear Present preemptively */ + dp->access &= (~ACC_P & 0xff); + break; + case ACC_P | ACC_PL_U | ACC_DATA: + case ACC_P | ACC_PL_U | ACC_DATA_W: + case ACC_P | ACC_PL_U | ACC_DATA_E: + case ACC_P | ACC_PL_U | ACC_DATA_EW: + case ACC_P | ACC_PL_U | ACC_CODE: + case ACC_P | ACC_PL_U | ACC_CODE_R: + case ACC_P | ACC_PL_U | ACC_CODE_C: + case ACC_P | ACC_PL_U | ACC_CODE_CR: + break; + default: + task_unlock(task); + user_ldt_free(new_ldt); + return EACCES; + } + /* Reject attempts to create segments with 64-bit granules */ + /* Note this restriction is still correct, even when + * executing as a 64-bit process (we want to maintain a single + * 64-bit selector (located in the GDT)). + */ + if (dp->granularity & SZ_64) { + task_unlock(task); + user_ldt_free(new_ldt); + return EACCES; + } } - } } task->i386_ldt = new_ldt; /* new LDT for task */ @@ -277,32 +290,36 @@ i386_set_ldt( * rendezvoused with all CPUs, in case another thread * in this task was in the process of context switching. */ - if (old_ldt) - user_ldt_free(old_ldt); + if (old_ldt) { + user_ldt_free(old_ldt); + } - *retval = start_sel; + *retval = (uint32_t)start_sel; return 0; } -int -i386_get_ldt( - uint32_t *retval, - uint32_t start_sel, - uint32_t descs, /* out */ - uint32_t num_sels) +static int +i386_get_ldt_impl( + uint32_t *retval, + uint64_t start_sel, + uint64_t descs, /* out */ + uint64_t num_sels) { - user_ldt_t user_ldt; - task_t task = current_task(); - unsigned int ldt_count; - kern_return_t err; - - if (start_sel >= LDTSZ) - return EINVAL; - if ((uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) - return EINVAL; - if (descs == 0) - return EINVAL; + user_ldt_t user_ldt; + task_t task = current_task(); + unsigned int ldt_count; + kern_return_t err; + + if (start_sel >= LDTSZ || num_sels > LDTSZ) { + return EINVAL; + } + if (start_sel + num_sels > LDTSZ) { + return EINVAL; + } + if (descs == 0) { + return EINVAL; + } task_lock(task); @@ -313,21 +330,22 @@ i386_get_ldt( * copy out the descriptors */ - if (user_ldt != 0) - ldt_count = user_ldt->start + user_ldt->count; - else - ldt_count = LDTSZ_MIN; + if (user_ldt != 0) { + ldt_count = user_ldt->start + user_ldt->count; + } else { + ldt_count = LDTSZ_MIN; + } - if (start_sel < ldt_count) - { - unsigned int copy_sels = num_sels; + if (start_sel < ldt_count) { + unsigned int copy_sels = (unsigned int)num_sels; - if (start_sel + num_sels > ldt_count) - copy_sels = ldt_count - start_sel; + if (start_sel + num_sels > ldt_count) { + copy_sels = ldt_count - (unsigned int)start_sel; + } - err = copyout((char *)(current_ldt() + start_sel), - descs, copy_sels * sizeof(struct real_descriptor)); + err = copyout((char *)(current_ldt() + start_sel), + descs, copy_sels * sizeof(struct real_descriptor)); } task_unlock(task); @@ -339,23 +357,24 @@ i386_get_ldt( void user_ldt_free( - user_ldt_t user_ldt) + user_ldt_t user_ldt) { kfree(user_ldt, sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor))); } user_ldt_t user_ldt_copy( - user_ldt_t user_ldt) + user_ldt_t user_ldt) { if (user_ldt != NULL) { - size_t size = sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor)); - user_ldt_t new_ldt = (user_ldt_t)kalloc(size); - if (new_ldt != NULL) - bcopy(user_ldt, new_ldt, size); - return new_ldt; + size_t size = sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor)); + user_ldt_t new_ldt = (user_ldt_t)kalloc(size); + if (new_ldt != NULL) { + bcopy(user_ldt, new_ldt, size); + } + return new_ldt; } - + return 0; } @@ -363,10 +382,10 @@ void user_ldt_set_action( void *arg) { - task_t arg_task = (task_t)arg; + task_t arg_task = (task_t)arg; - if (arg_task == current_task()) { - user_ldt_set(current_thread()); + if (arg_task == current_task()) { + user_ldt_set(current_thread()); } } @@ -378,26 +397,80 @@ void user_ldt_set( thread_t thread) { - task_t task = thread->task; - user_ldt_t user_ldt; + task_t task = thread->task; + user_ldt_t user_ldt; user_ldt = task->i386_ldt; if (user_ldt != 0) { - struct real_descriptor *ldtp = (struct real_descriptor *)current_ldt(); + struct real_descriptor *ldtp = current_ldt(); - if (user_ldt->start > LDTSZ_MIN) { - bzero(&ldtp[LDTSZ_MIN], - sizeof(struct real_descriptor) * (user_ldt->start - LDTSZ_MIN)); - } - - bcopy(user_ldt->ldt, &ldtp[user_ldt->start], - sizeof(struct real_descriptor) * (user_ldt->count)); + if (user_ldt->start > LDTSZ_MIN) { + bzero(&ldtp[LDTSZ_MIN], + sizeof(struct real_descriptor) * (user_ldt->start - LDTSZ_MIN)); + } - gdt_desc_p(USER_LDT)->limit_low = (uint16_t)((sizeof(struct real_descriptor) * (user_ldt->start + user_ldt->count)) - 1); + bcopy(user_ldt->ldt, &ldtp[user_ldt->start], + sizeof(struct real_descriptor) * (user_ldt->count)); - ml_cpu_set_ldt(USER_LDT); + gdt_desc_p(USER_LDT)->limit_low = (uint16_t)((sizeof(struct real_descriptor) * (user_ldt->start + user_ldt->count)) - 1); + + ml_cpu_set_ldt(USER_LDT); } else { - ml_cpu_set_ldt(KERNEL_LDT); + ml_cpu_set_ldt(KERNEL_LDT); + } +} + +/* For 32-bit processes, called via machdep_syscall() */ +int +i386_set_ldt( + uint32_t *retval, + uint32_t start_sel, + uint32_t descs, /* out */ + uint32_t num_sels) +{ + return i386_set_ldt_impl(retval, (uint64_t)start_sel, (uint64_t)descs, + (uint64_t)num_sels); +} + +/* For 64-bit processes, called via machdep_syscall64() */ +int +i386_set_ldt64( + uint32_t *retval, + uint64_t start_sel, + uint64_t descs, /* out */ + uint64_t num_sels) +{ + if (!allow_64bit_proc_LDT_ops) { + return EINVAL; } + + return i386_set_ldt_impl(retval, start_sel, descs, num_sels); +} + +/* For 32-bit processes, called via machdep_syscall() */ +int +i386_get_ldt( + uint32_t *retval, + uint32_t start_sel, + uint32_t descs, /* out */ + uint32_t num_sels) +{ + return i386_get_ldt_impl(retval, (uint64_t)start_sel, (uint64_t)descs, + (uint64_t)num_sels); +} + +/* For 64-bit processes, called via machdep_syscall64() */ +int +i386_get_ldt64( + uint32_t *retval, + uint64_t start_sel, + uint64_t descs, /* out */ + uint64_t num_sels) +{ + if (!allow_64bit_proc_LDT_ops) { + return EINVAL; + } + + return i386_get_ldt_impl(retval, start_sel, descs, num_sels); } diff --git a/osfmk/i386/user_ldt.h b/osfmk/i386/user_ldt.h index 8285cb4df..83939f514 100644 --- a/osfmk/i386/user_ldt.h +++ b/osfmk/i386/user_ldt.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,43 +22,43 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * - * any improvements or extensions that they make and grant Carnegie Mellon + * + * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _I386_USER_LDT_H_ -#define _I386_USER_LDT_H_ +#ifndef _I386_USER_LDT_H_ +#define _I386_USER_LDT_H_ /* * User LDT management. @@ -66,24 +66,24 @@ * Each task may have its own LDT. */ -#define LDT_AUTO_ALLOC 0xffffffff +#define LDT_AUTO_ALLOC 0xffffffff #ifdef KERNEL #include struct user_ldt { - unsigned int start; /* first descriptor in table */ - unsigned int count; /* how many descriptors in table */ - struct real_descriptor ldt[0]; /* descriptor table (variable) */ + unsigned int start; /* first descriptor in table */ + unsigned int count; /* how many descriptors in table */ + struct real_descriptor ldt[0]; /* descriptor table (variable) */ }; -typedef struct user_ldt * user_ldt_t; +typedef struct user_ldt * user_ldt_t; -extern user_ldt_t user_ldt_copy( - user_ldt_t uldt); -extern void user_ldt_free( - user_ldt_t uldt); -extern void user_ldt_set( - thread_t thread); +extern user_ldt_t user_ldt_copy( + user_ldt_t uldt); +extern void user_ldt_free( + user_ldt_t uldt); +extern void user_ldt_set( + thread_t thread); #else /* !KERNEL */ #include @@ -95,4 +95,4 @@ int i386_set_ldt(int, const union ldt_entry *, int); __END_DECLS #endif /* KERNEL */ -#endif /* _I386_USER_LDT_H_ */ +#endif /* _I386_USER_LDT_H_ */ diff --git a/osfmk/i386/vm_tuning.h b/osfmk/i386/vm_tuning.h index 5465f801e..cb13cc635 100644 --- a/osfmk/i386/vm_tuning.h +++ b/osfmk/i386/vm_tuning.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * VM tuning parameters for the i386 (without reference bits). */ -#ifndef _I386_VM_TUNING_H_ -#define _I386_VM_TUNING_H_ +#ifndef _I386_VM_TUNING_H_ +#define _I386_VM_TUNING_H_ -#endif /* _I386_VM_TUNING_H_ */ +#endif /* _I386_VM_TUNING_H_ */ diff --git a/osfmk/i386/vmx.h b/osfmk/i386/vmx.h index e1776c521..894590d2d 100644 --- a/osfmk/i386/vmx.h +++ b/osfmk/i386/vmx.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #ifndef _I386_VMX_H_ #define _I386_VMX_H_ @@ -38,9 +38,9 @@ extern "C" { /* * Error codes */ -#define VMX_OK 0 /* all ok */ -#define VMX_UNSUPPORTED 1 /* VT unsupported or disabled on 1+ cores */ -#define VMX_INUSE 2 /* VT is being exclusively used already */ +#define VMX_OK 0 /* all ok */ +#define VMX_UNSUPPORTED 1 /* VT unsupported or disabled on 1+ cores */ +#define VMX_INUSE 2 /* VT is being exclusively used already */ /* SPI */ int host_vmxon(boolean_t exclusive); diff --git a/osfmk/i386/vmx/vmx_cpu.c b/osfmk/i386/vmx/vmx_cpu.c index 49b36f846..f2beaaab3 100644 --- a/osfmk/i386/vmx/vmx_cpu.c +++ b/osfmk/i386/vmx/vmx_cpu.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,47 +46,47 @@ lck_grp_t *vmx_lck_grp = NULL; lck_mtx_t *vmx_lck_mtx = NULL; /* ----------------------------------------------------------------------------- - vmx_is_available() - Is the VMX facility available on this CPU? - -------------------------------------------------------------------------- */ +* vmx_is_available() +* Is the VMX facility available on this CPU? +* -------------------------------------------------------------------------- */ static inline boolean_t vmx_is_available(void) { - return (0 != (cpuid_features() & CPUID_FEATURE_VMX)); + return 0 != (cpuid_features() & CPUID_FEATURE_VMX); } /* ----------------------------------------------------------------------------- - vmxon_is_enabled() - Is the VMXON instruction enabled on this CPU? - -------------------------------------------------------------------------- */ +* vmxon_is_enabled() +* Is the VMXON instruction enabled on this CPU? +* -------------------------------------------------------------------------- */ static inline boolean_t vmxon_is_enabled(void) { - return (vmx_is_available() && - (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON)); + return vmx_is_available() && + (rdmsr64(MSR_IA32_FEATURE_CONTROL) & MSR_IA32_FEATCTL_VMXON); } #if MACH_ASSERT /* ----------------------------------------------------------------------------- - vmx_is_cr0_valid() - Is CR0 valid for executing VMXON on this CPU? - -------------------------------------------------------------------------- */ +* vmx_is_cr0_valid() +* Is CR0 valid for executing VMXON on this CPU? +* -------------------------------------------------------------------------- */ static inline boolean_t vmx_is_cr0_valid(vmx_specs_t *specs) { uintptr_t cr0 = get_cr0(); - return (0 == ((~cr0 & specs->cr0_fixed_0)|(cr0 & ~specs->cr0_fixed_1))); + return 0 == ((~cr0 & specs->cr0_fixed_0) | (cr0 & ~specs->cr0_fixed_1)); } /* ----------------------------------------------------------------------------- - vmx_is_cr4_valid() - Is CR4 valid for executing VMXON on this CPU? - -------------------------------------------------------------------------- */ +* vmx_is_cr4_valid() +* Is CR4 valid for executing VMXON on this CPU? +* -------------------------------------------------------------------------- */ static inline boolean_t vmx_is_cr4_valid(vmx_specs_t *specs) { uintptr_t cr4 = get_cr4(); - return (0 == ((~cr4 & specs->cr4_fixed_0)|(cr4 & ~specs->cr4_fixed_1))); + return 0 == ((~cr4 & specs->cr4_fixed_0) | (cr4 & ~specs->cr4_fixed_1)); } #endif @@ -96,19 +96,21 @@ vmx_enable(void) { uint64_t msr_image; - if (!vmx_is_available()) + if (!vmx_is_available()) { return; + } /* * We don't count on EFI initializing MSR_IA32_FEATURE_CONTROL * and turning VMXON on and locking the bit, so we do that now. */ msr_image = rdmsr64(MSR_IA32_FEATURE_CONTROL); - if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) + if (0 == ((msr_image & MSR_IA32_FEATCTL_LOCK))) { wrmsr64(MSR_IA32_FEATURE_CONTROL, - (msr_image | - MSR_IA32_FEATCTL_VMXON | - MSR_IA32_FEATCTL_LOCK)); + (msr_image | + MSR_IA32_FEATCTL_VMXON | + MSR_IA32_FEATCTL_LOCK)); + } set_cr4(get_cr4() | CR4_VMXE); } @@ -124,12 +126,12 @@ vmx_init() } /* ----------------------------------------------------------------------------- - vmx_get_specs() - Obtain VMX facility specifications for this CPU and - enter them into the vmx_specs_t structure. If VMX is not available or - disabled on this CPU, set vmx_present to false and return leaving - the remainder of the vmx_specs_t uninitialized. - -------------------------------------------------------------------------- */ +* vmx_get_specs() +* Obtain VMX facility specifications for this CPU and +* enter them into the vmx_specs_t structure. If VMX is not available or +* disabled on this CPU, set vmx_present to false and return leaving +* the remainder of the vmx_specs_t uninitialized. +* -------------------------------------------------------------------------- */ void vmx_cpu_init() { @@ -138,20 +140,22 @@ vmx_cpu_init() vmx_enable(); VMX_KPRINTF("[%d]vmx_cpu_init() initialized: %d\n", - cpu_number(), specs->initialized); + cpu_number(), specs->initialized); /* if we have read the data on boot, we won't read it again on wakeup */ - if (specs->initialized) + if (specs->initialized) { return; - else + } else { specs->initialized = TRUE; + } /* See if VMX is present, return if it is not */ specs->vmx_present = vmx_is_available() && vmxon_is_enabled(); VMX_KPRINTF("[%d]vmx_cpu_init() vmx_present: %d\n", - cpu_number(), specs->vmx_present); - if (!specs->vmx_present) + cpu_number(), specs->vmx_present); + if (!specs->vmx_present) { return; + } #define rdmsr_mask(msr, mask) (uint32_t)(rdmsr64(msr) & (mask)) specs->vmcs_id = rdmsr_mask(MSR_IA32_VMX_BASIC, VMX_VCR_VMCS_REV_ID); @@ -159,16 +163,16 @@ vmx_cpu_init() /* Obtain VMX-fixed bits in CR0 */ specs->cr0_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED0, 0xFFFFFFFF); specs->cr0_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR0_FIXED1, 0xFFFFFFFF); - + /* Obtain VMX-fixed bits in CR4 */ specs->cr4_fixed_0 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED0, 0xFFFFFFFF); specs->cr4_fixed_1 = rdmsr_mask(MSR_IA32_VMX_CR4_FIXED1, 0xFFFFFFFF); } /* ----------------------------------------------------------------------------- - vmx_on() - Enter VMX root operation on this CPU. - -------------------------------------------------------------------------- */ +* vmx_on() +* Enter VMX root operation on this CPU. +* -------------------------------------------------------------------------- */ static void vmx_on(void *arg __unused) { @@ -177,12 +181,13 @@ vmx_on(void *arg __unused) int result; VMX_KPRINTF("[%d]vmx_on() entry state: %d\n", - cpu_number(), cpu->specs.vmx_on); + cpu_number(), cpu->specs.vmx_on); assert(cpu->specs.vmx_present); - if (NULL == cpu->vmxon_region) + if (NULL == cpu->vmxon_region) { panic("vmx_on: VMXON region not allocated"); + } vmxon_region_paddr = vmx_paddr(cpu->vmxon_region); /* @@ -191,7 +196,7 @@ vmx_on(void *arg __unused) if (FALSE == cpu->specs.vmx_on) { assert(vmx_is_cr0_valid(&cpu->specs)); assert(vmx_is_cr4_valid(&cpu->specs)); - + result = __vmxon(vmxon_region_paddr); if (result != VMX_SUCCEED) { @@ -201,21 +206,21 @@ vmx_on(void *arg __unused) cpu->specs.vmx_on = TRUE; } VMX_KPRINTF("[%d]vmx_on() return state: %d\n", - cpu_number(), cpu->specs.vmx_on); + cpu_number(), cpu->specs.vmx_on); } /* ----------------------------------------------------------------------------- - vmx_off() - Leave VMX root operation on this CPU. - -------------------------------------------------------------------------- */ +* vmx_off() +* Leave VMX root operation on this CPU. +* -------------------------------------------------------------------------- */ static void vmx_off(void *arg __unused) { vmx_cpu_t *cpu = ¤t_cpu_datap()->cpu_vmx; int result; - + VMX_KPRINTF("[%d]vmx_off() entry state: %d\n", - cpu_number(), cpu->specs.vmx_on); + cpu_number(), cpu->specs.vmx_on); if (TRUE == cpu->specs.vmx_on) { /* Tell the CPU to release the VMXON region */ @@ -224,44 +229,45 @@ vmx_off(void *arg __unused) if (result != VMX_SUCCEED) { panic("vmx_off: unexpected return %d from __vmxoff()", result); } - + cpu->specs.vmx_on = FALSE; } VMX_KPRINTF("[%d]vmx_off() return state: %d\n", - cpu_number(), cpu->specs.vmx_on); + cpu_number(), cpu->specs.vmx_on); } /* ----------------------------------------------------------------------------- - vmx_allocate_vmxon_regions() - Allocate, clear and init VMXON regions for all CPUs. - -------------------------------------------------------------------------- */ +* vmx_allocate_vmxon_regions() +* Allocate, clear and init VMXON regions for all CPUs. +* -------------------------------------------------------------------------- */ static void vmx_allocate_vmxon_regions(void) { unsigned int i; - - for (i=0; icpu_vmx; /* The size is defined to be always <= 4K, so we just allocate a page */ cpu->vmxon_region = vmx_pcalloc(); - if (NULL == cpu->vmxon_region) + if (NULL == cpu->vmxon_region) { panic("vmx_allocate_vmxon_regions: unable to allocate VMXON region"); + } *(uint32_t*)(cpu->vmxon_region) = cpu->specs.vmcs_id; } } /* ----------------------------------------------------------------------------- - vmx_free_vmxon_regions() - Free VMXON regions for all CPUs. - -------------------------------------------------------------------------- */ +* vmx_free_vmxon_regions() +* Free VMXON regions for all CPUs. +* -------------------------------------------------------------------------- */ static void vmx_free_vmxon_regions(void) { unsigned int i; - for (i=0; icpu_vmx; vmx_pfree(cpu->vmxon_region); @@ -270,9 +276,9 @@ vmx_free_vmxon_regions(void) } /* ----------------------------------------------------------------------------- - vmx_globally_available() - Checks whether VT can be turned on for all CPUs. - -------------------------------------------------------------------------- */ +* vmx_globally_available() +* Checks whether VT can be turned on for all CPUs. +* -------------------------------------------------------------------------- */ static boolean_t vmx_globally_available(void) { @@ -280,11 +286,12 @@ vmx_globally_available(void) unsigned int ncpus = ml_get_max_cpus(); boolean_t available = TRUE; - for (i=0; icpu_vmx; - if (!cpu->specs.vmx_present) + if (!cpu->specs.vmx_present) { available = FALSE; + } } VMX_KPRINTF("VMX available: %d\n", available); return available; @@ -292,9 +299,9 @@ vmx_globally_available(void) /* ----------------------------------------------------------------------------- - vmx_turn_on() - Turn on VT operation on all CPUs. - -------------------------------------------------------------------------- */ +* vmx_turn_on() +* Turn on VT operation on all CPUs. +* -------------------------------------------------------------------------- */ int host_vmxon(boolean_t exclusive) { @@ -302,8 +309,9 @@ host_vmxon(boolean_t exclusive) assert(0 == get_preemption_level()); - if (!vmx_globally_available()) + if (!vmx_globally_available()) { return VMX_UNSUPPORTED; + } lck_mtx_lock(vmx_lck_mtx); @@ -315,7 +323,6 @@ host_vmxon(boolean_t exclusive) vmx_exclusive = exclusive; vmx_use_count = 1; mp_cpus_call(CPUMASK_ALL, ASYNC, vmx_on, NULL); - } else { vmx_use_count++; } @@ -330,9 +337,9 @@ host_vmxon(boolean_t exclusive) } /* ----------------------------------------------------------------------------- - vmx_turn_off() - Turn off VT operation on all CPUs. - -------------------------------------------------------------------------- */ +* vmx_turn_off() +* Turn off VT operation on all CPUs. +* -------------------------------------------------------------------------- */ void host_vmxoff() { @@ -355,23 +362,24 @@ host_vmxoff() } /* ----------------------------------------------------------------------------- - vmx_suspend() - Turn off VT operation on this CPU if it was on. - Called when a CPU goes offline. - -------------------------------------------------------------------------- */ +* vmx_suspend() +* Turn off VT operation on this CPU if it was on. +* Called when a CPU goes offline. +* -------------------------------------------------------------------------- */ void vmx_suspend() { VMX_KPRINTF("vmx_suspend\n"); - if (vmx_use_count) + if (vmx_use_count) { vmx_off(NULL); + } } /* ----------------------------------------------------------------------------- - vmx_suspend() - Restore the previous VT state. Called when CPU comes back online. - -------------------------------------------------------------------------- */ +* vmx_suspend() +* Restore the previous VT state. Called when CPU comes back online. +* -------------------------------------------------------------------------- */ void vmx_resume(boolean_t is_wake_from_hibernate) { @@ -379,8 +387,9 @@ vmx_resume(boolean_t is_wake_from_hibernate) vmx_enable(); - if (vmx_use_count == 0) + if (vmx_use_count == 0) { return; + } /* * When resuming from hiberate on the boot cpu, @@ -397,14 +406,15 @@ vmx_resume(boolean_t is_wake_from_hibernate) } /* ----------------------------------------------------------------------------- - vmx_hv_support() - Determine if the VMX feature set is sufficent for kernel HV support. - -------------------------------------------------------------------------- */ +* vmx_hv_support() +* Determine if the VMX feature set is sufficent for kernel HV support. +* -------------------------------------------------------------------------- */ boolean_t vmx_hv_support() { - if (!vmx_is_available()) + if (!vmx_is_available()) { return FALSE; + } #define CHK(msr, shift, mask) if (!VMX_CAP(msr, shift, mask)) return FALSE; diff --git a/osfmk/i386/vmx/vmx_cpu.h b/osfmk/i386/vmx/vmx_cpu.h index 9ee53a530..eb9390861 100644 --- a/osfmk/i386/vmx/vmx_cpu.h +++ b/osfmk/i386/vmx/vmx_cpu.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #ifndef _I386_VMX_CPU_H_ #define _I386_VMX_CPU_H_ @@ -38,26 +38,26 @@ * */ typedef struct vmx_specs { - boolean_t initialized; /* the specs have already been read */ - boolean_t vmx_present; /* VMX feature available and enabled */ - boolean_t vmx_on; /* VMX is active */ - uint32_t vmcs_id; /* VMCS revision identifier */ + boolean_t initialized; /* the specs have already been read */ + boolean_t vmx_present; /* VMX feature available and enabled */ + boolean_t vmx_on; /* VMX is active */ + uint32_t vmcs_id; /* VMCS revision identifier */ /* * Fixed control register bits are specified by a pair of * bitfields: 0-settings contain 0 bits corresponding to * CR bits that may be 0; 1-settings contain 1 bits * corresponding to CR bits that may be 1. */ - uint32_t cr0_fixed_0; /* allowed 0-settings for CR0 */ - uint32_t cr0_fixed_1; /* allowed 1-settings for CR0 */ - - uint32_t cr4_fixed_0; /* allowed 0-settings for CR4 */ - uint32_t cr4_fixed_1; /* allowed 1-settings for CR4 */ + uint32_t cr0_fixed_0; /* allowed 0-settings for CR0 */ + uint32_t cr0_fixed_1; /* allowed 1-settings for CR0 */ + + uint32_t cr4_fixed_0; /* allowed 0-settings for CR4 */ + uint32_t cr4_fixed_1; /* allowed 1-settings for CR4 */ } vmx_specs_t; typedef struct vmx_cpu { - vmx_specs_t specs; /* this phys CPU's VMX specifications */ - void *vmxon_region; /* the logical address of the VMXON region page */ + vmx_specs_t specs; /* this phys CPU's VMX specifications */ + void *vmxon_region; /* the logical address of the VMXON region page */ } vmx_cpu_t; void vmx_init(void); @@ -65,10 +65,10 @@ void vmx_cpu_init(void); void vmx_resume(boolean_t is_wake_from_hibernate); void vmx_suspend(void); -#define VMX_BASIC_TRUE_CTLS (1ull << 55) -#define VMX_TRUE_PROCBASED_SECONDARY_CTLS (1ull << 31) -#define VMX_PROCBASED_CTLS2_EPT (1ull << 1) -#define VMX_PROCBASED_CTLS2_UNRESTRICTED (1ull << 7) +#define VMX_BASIC_TRUE_CTLS (1ull << 55) +#define VMX_TRUE_PROCBASED_SECONDARY_CTLS (1ull << 31) +#define VMX_PROCBASED_CTLS2_EPT (1ull << 1) +#define VMX_PROCBASED_CTLS2_UNRESTRICTED (1ull << 7) #define VMX_CAP(msr, shift, mask) (rdmsr64(msr) & ((mask) << (shift))) @@ -86,4 +86,4 @@ extern int __vmxoff(void); */ extern int __vmxon(addr64_t v); -#endif /* _I386_VMX_CPU_H_ */ +#endif /* _I386_VMX_CPU_H_ */ diff --git a/osfmk/i386/vmx/vmx_shims.c b/osfmk/i386/vmx/vmx_shims.c index 806ed701b..48c7f4a76 100644 --- a/osfmk/i386/vmx/vmx_shims.c +++ b/osfmk/i386/vmx/vmx_shims.c @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include -#include +#include #include #include #include @@ -37,18 +37,20 @@ void * vmx_pcalloc(void) { - char *pptr; - kern_return_t ret; + char *pptr; + kern_return_t ret; ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&pptr, PAGE_SIZE, VM_KERN_MEMORY_OSFMK); - if (ret != KERN_SUCCESS) return (NULL); + if (ret != KERN_SUCCESS) { + return NULL; + } bzero(pptr, PAGE_SIZE); - return (pptr); + return pptr; } addr64_t vmx_paddr(void *va) { - return (ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t)va))); + return ptoa_64(pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t)va)); } void diff --git a/osfmk/i386/vmx/vmx_shims.h b/osfmk/i386/vmx/vmx_shims.h index 979396699..8990c7eec 100644 --- a/osfmk/i386/vmx/vmx_shims.h +++ b/osfmk/i386/vmx/vmx_shims.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #ifndef _I386_VMX_SHIMS_H_ #define _I386_VMX_SHIMS_H_ @@ -37,4 +37,4 @@ void *vmx_pcalloc(void); addr64_t vmx_paddr(void *); void vmx_pfree(void *); -#endif /* _I386_VMX_SHIMS_H_ */ +#endif /* _I386_VMX_SHIMS_H_ */ diff --git a/osfmk/i386/xpr.h b/osfmk/i386/xpr.h index 9ffc12c8f..3c7449a28 100644 --- a/osfmk/i386/xpr.h +++ b/osfmk/i386/xpr.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,45 +22,44 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* +/* */ - + /* * File: xpr.h * * Machine dependent module for the XPR tracing facility. */ -#define XPR_TIMESTAMP (0) - +#define XPR_TIMESTAMP (0) diff --git a/osfmk/ipc/flipc.c b/osfmk/ipc/flipc.c index 055a33550..a8b265d23 100644 --- a/osfmk/ipc/flipc.c +++ b/osfmk/ipc/flipc.c @@ -65,32 +65,36 @@ zone_t flipc_port_zone; /* Get the mnl_name associated with local ipc_port . * Returns MNL_NAME_NULL if is invalid or not a flipc port. */ -static inline mnl_name_t mnl_name_from_port(ipc_port_t lport) +static inline mnl_name_t +mnl_name_from_port(ipc_port_t lport) { - mnl_name_t name = MNL_NAME_NULL; - - if (IP_VALID(lport)) { - flipc_port_t fport = lport->ip_messages.data.port.fport; - if (FPORT_VALID(fport)) - name = fport->obj.name; - } - return name; + mnl_name_t name = MNL_NAME_NULL; + + if (IP_VALID(lport)) { + flipc_port_t fport = lport->ip_messages.data.port.fport; + if (FPORT_VALID(fport)) { + name = fport->obj.name; + } + } + return name; } /* Lookup the ipc_port associated with mnl_name . * Returns IP_NULL if is invalid or not a known mnl object. */ -static inline ipc_port_t mnl_name_to_port(mnl_name_t name) +static inline ipc_port_t +mnl_name_to_port(mnl_name_t name) { - ipc_port_t lport = IP_NULL; - - if (MNL_NAME_VALID(name)) { - flipc_port_t fport = (flipc_port_t)mnl_obj_lookup(name); - if (FPORT_VALID(fport)) - lport = fport->lport; - } - return lport; + ipc_port_t lport = IP_NULL; + + if (MNL_NAME_VALID(name)) { + flipc_port_t fport = (flipc_port_t)mnl_obj_lookup(name); + if (FPORT_VALID(fport)) { + lport = fport->lport; + } + } + return lport; } @@ -101,37 +105,39 @@ static inline ipc_port_t mnl_name_to_port(mnl_name_t name) static kern_return_t flipc_port_create(ipc_port_t lport, mach_node_t node, mnl_name_t name) { - /* Ensure parameters are valid and not already linked */ - assert(IP_VALID(lport)); - assert(MACH_NODE_VALID(node)); - assert(MNL_NAME_VALID(name)); - assert(!FPORT_VALID(lport->ip_messages.imq_fport)); - - /* Allocate and initialize a flipc port */ - flipc_port_t fport = (flipc_port_t) zalloc(flipc_port_zone); - if (!FPORT_VALID(fport)) - return KERN_RESOURCE_SHORTAGE; - bzero(fport, sizeof(struct flipc_port)); - fport->obj.name = name; - fport->hostnode = node; - if (node == localnode) - fport->state = FPORT_STATE_PRINCIPAL; - else - fport->state = FPORT_STATE_PROXY; - - /* Link co-structures (lport is locked) */ - fport->lport = lport; - lport->ip_messages.imq_fport = fport; - - /* Add fport to the name hash table; revert link if insert fails */ - kern_return_t kr = mnl_obj_insert((mnl_obj_t)fport); - if (kr != KERN_SUCCESS) { - lport->ip_messages.imq_fport = FPORT_NULL; - fport->lport = IP_NULL; - zfree(flipc_port_zone, fport); - } - - return kr; + /* Ensure parameters are valid and not already linked */ + assert(IP_VALID(lport)); + assert(MACH_NODE_VALID(node)); + assert(MNL_NAME_VALID(name)); + assert(!FPORT_VALID(lport->ip_messages.imq_fport)); + + /* Allocate and initialize a flipc port */ + flipc_port_t fport = (flipc_port_t) zalloc(flipc_port_zone); + if (!FPORT_VALID(fport)) { + return KERN_RESOURCE_SHORTAGE; + } + bzero(fport, sizeof(struct flipc_port)); + fport->obj.name = name; + fport->hostnode = node; + if (node == localnode) { + fport->state = FPORT_STATE_PRINCIPAL; + } else { + fport->state = FPORT_STATE_PROXY; + } + + /* Link co-structures (lport is locked) */ + fport->lport = lport; + lport->ip_messages.imq_fport = fport; + + /* Add fport to the name hash table; revert link if insert fails */ + kern_return_t kr = mnl_obj_insert((mnl_obj_t)fport); + if (kr != KERN_SUCCESS) { + lport->ip_messages.imq_fport = FPORT_NULL; + fport->lport = IP_NULL; + zfree(flipc_port_zone, fport); + } + + return kr; } @@ -143,38 +149,39 @@ flipc_port_create(ipc_port_t lport, mach_node_t node, mnl_name_t name) static void flipc_port_destroy(ipc_port_t lport) { - /* Ensure parameter is valid, and linked to an fport with a valid name */ - assert(IP_VALID(lport)); - ipc_mqueue_t port_mq = &lport->ip_messages; - flipc_port_t fport = port_mq->data.port.fport; - assert(FPORT_VALID(fport)); - assert(MNL_NAME_VALID(fport->obj.name)); - - /* Dispose of any undelivered messages */ - int m = port_mq->data.port.msgcount; - if (m > 0) { - ipc_kmsg_t kmsg; + /* Ensure parameter is valid, and linked to an fport with a valid name */ + assert(IP_VALID(lport)); + ipc_mqueue_t port_mq = &lport->ip_messages; + flipc_port_t fport = port_mq->data.port.fport; + assert(FPORT_VALID(fport)); + assert(MNL_NAME_VALID(fport->obj.name)); + + /* Dispose of any undelivered messages */ + int m = port_mq->data.port.msgcount; + if (m > 0) { + ipc_kmsg_t kmsg; #if DEBUG - printf("flipc: destroying %p with %d undelivered msgs\n", lport, m); + printf("flipc: destroying %p with %d undelivered msgs\n", lport, m); #endif - /* Logic was lifted from ipc_mqueue_select_on_thread() */ - while (m--) { - kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages); - assert(kmsg != IKM_NULL); - ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg); - if (fport->state == FPORT_STATE_PRINCIPAL) - flipc_msg_ack(kmsg->ikm_node, port_mq, FALSE); - ipc_mqueue_release_msgcount(port_mq, NULL); - port_mq->imq_seqno++; - } - } - - /* Remove from name hash table, unlink co-structures, and free fport */ - mnl_obj_remove(fport->obj.name); - lport->ip_messages.data.port.fport = FPORT_NULL; - fport->lport = IP_NULL; - zfree(flipc_port_zone, fport); + /* Logic was lifted from ipc_mqueue_select_on_thread() */ + while (m--) { + kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages); + assert(kmsg != IKM_NULL); + ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg); + if (fport->state == FPORT_STATE_PRINCIPAL) { + flipc_msg_ack(kmsg->ikm_node, port_mq, FALSE); + } + ipc_mqueue_release_msgcount(port_mq, NULL); + port_mq->imq_seqno++; + } + } + + /* Remove from name hash table, unlink co-structures, and free fport */ + mnl_obj_remove(fport->obj.name); + lport->ip_messages.data.port.fport = FPORT_NULL; + fport->lport = IP_NULL; + zfree(flipc_port_zone, fport); } @@ -190,14 +197,16 @@ flipc_port_destroy(ipc_port_t lport) * Returns: * size of the message as it would be sent over the flipc link. */ -static mach_msg_size_t flipc_msg_size_from_kmsg(ipc_kmsg_t kmsg) +static mach_msg_size_t +flipc_msg_size_from_kmsg(ipc_kmsg_t kmsg) { - mach_msg_size_t fsize = kmsg->ikm_header->msgh_size; + mach_msg_size_t fsize = kmsg->ikm_header->msgh_size; - if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) - PE_enter_debugger("flipc_msg_size_from_kmsg(): Complex messages not supported."); + if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { + PE_enter_debugger("flipc_msg_size_from_kmsg(): Complex messages not supported."); + } - return fsize; + return fsize; } @@ -206,72 +215,75 @@ static mach_msg_size_t flipc_msg_size_from_kmsg(ipc_kmsg_t kmsg) * moves a receive right, then queued messages may need to be moved as a * result, causing this function to ultimately be recursive. */ -static kern_return_t mnl_msg_from_kmsg(ipc_kmsg_t kmsg, mnl_msg_t *fmsgp) +static kern_return_t +mnl_msg_from_kmsg(ipc_kmsg_t kmsg, mnl_msg_t *fmsgp) { - if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { - printf("mnl_msg_from_kmsg(): Complex messages not supported."); - return KERN_FAILURE; - } + if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { + printf("mnl_msg_from_kmsg(): Complex messages not supported."); + return KERN_FAILURE; + } - mach_msg_size_t fsize = flipc_msg_size_from_kmsg(kmsg); + mach_msg_size_t fsize = flipc_msg_size_from_kmsg(kmsg); - mnl_msg_t fmsg = mnl_msg_alloc(fsize, 0); + mnl_msg_t fmsg = mnl_msg_alloc(fsize, 0); - if (fmsg == MNL_MSG_NULL) - return KERN_RESOURCE_SHORTAGE; + if (fmsg == MNL_MSG_NULL) { + return KERN_RESOURCE_SHORTAGE; + } - /* Setup flipc message header */ - fmsg->sub = MACH_NODE_SUB_FLIPC; - fmsg->cmd = FLIPC_CMD_IPCMESSAGE; - fmsg->node_id = localnode_id; // Message is from us - fmsg->qos = 0; // not used - fmsg->size = fsize; // Payload size (does NOT include mnl_msg header) - fmsg->object = kmsg->ikm_header->msgh_remote_port->ip_messages.data.port.fport->obj.name; + /* Setup flipc message header */ + fmsg->sub = MACH_NODE_SUB_FLIPC; + fmsg->cmd = FLIPC_CMD_IPCMESSAGE; + fmsg->node_id = localnode_id; // Message is from us + fmsg->qos = 0; // not used + fmsg->size = fsize; // Payload size (does NOT include mnl_msg header) + fmsg->object = kmsg->ikm_header->msgh_remote_port->ip_messages.data.port.fport->obj.name; - /* Copy body of message */ - bcopy((const void*)kmsg->ikm_header, (void*)MNL_MSG_PAYLOAD(fmsg), fsize); + /* Copy body of message */ + bcopy((const void*)kmsg->ikm_header, (void*)MNL_MSG_PAYLOAD(fmsg), fsize); - // Convert port fields - mach_msg_header_t *mmsg = (mach_msg_header_t*)MNL_MSG_PAYLOAD(fmsg); - mmsg->msgh_remote_port = (mach_port_t)fmsg->object; - mmsg->msgh_local_port = (mach_port_t) - mnl_name_from_port(mmsg->msgh_local_port); - mmsg->msgh_voucher_port = (mach_port_name_t)MNL_NAME_NULL; + // Convert port fields + mach_msg_header_t *mmsg = (mach_msg_header_t*)MNL_MSG_PAYLOAD(fmsg); + mmsg->msgh_remote_port = (mach_port_t)fmsg->object; + mmsg->msgh_local_port = (mach_port_t) + mnl_name_from_port(mmsg->msgh_local_port); + mmsg->msgh_voucher_port = (mach_port_name_t)MNL_NAME_NULL; - *fmsgp = (mnl_msg_t)fmsg; + *fmsgp = (mnl_msg_t)fmsg; - return KERN_SUCCESS; + return KERN_SUCCESS; } /* lifted from ipc_mig.c:mach_msg_send_from_kernel_proper() */ static mach_msg_return_t -mach_msg_send_from_remote_kernel(mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_node_t node) +mach_msg_send_from_remote_kernel(mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_node_t node) { - ipc_kmsg_t kmsg; - mach_msg_return_t mr; - - mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); - if (mr != MACH_MSG_SUCCESS) - return mr; - - mr = ipc_kmsg_copyin_from_kernel(kmsg); - if (mr != MACH_MSG_SUCCESS) { - ipc_kmsg_free(kmsg); - return mr; - } - - kmsg->ikm_node = node; // node that needs to receive message ack - mr = ipc_kmsg_send(kmsg, - MACH_SEND_KERNEL_DEFAULT, - MACH_MSG_TIMEOUT_NONE); - if (mr != MACH_MSG_SUCCESS) { - ipc_kmsg_destroy(kmsg); - } - - return mr; + ipc_kmsg_t kmsg; + mach_msg_return_t mr; + + mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); + if (mr != MACH_MSG_SUCCESS) { + return mr; + } + + mr = ipc_kmsg_copyin_from_kernel(kmsg); + if (mr != MACH_MSG_SUCCESS) { + ipc_kmsg_free(kmsg); + return mr; + } + + kmsg->ikm_node = node; // node that needs to receive message ack + mr = ipc_kmsg_send(kmsg, + MACH_SEND_KERNEL_DEFAULT, + MACH_MSG_TIMEOUT_NONE); + if (mr != MACH_MSG_SUCCESS) { + ipc_kmsg_destroy(kmsg); + } + + return mr; } @@ -281,21 +293,21 @@ mach_msg_send_from_remote_kernel(mach_msg_header_t *msg, */ static mach_msg_return_t flipc_cmd_ipc(mnl_msg_t fmsg, - mach_node_t node, - uint32_t flags __unused) + mach_node_t node, + uint32_t flags __unused) { - mach_msg_header_t *mmsg; - - // Convert flipc message into mach message in place to avoid alloc/copy - mmsg = (mach_msg_header_t*)MNL_MSG_PAYLOAD(fmsg); - mmsg->msgh_size = fmsg->size; - mmsg->msgh_remote_port = mnl_name_to_port(fmsg->object); - mmsg->msgh_local_port = mnl_name_to_port((mnl_name_t)mmsg->msgh_local_port); - mmsg->msgh_voucher_port = (mach_port_name_t)MACH_PORT_NULL; - mmsg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); - // unchanged: msgh_id - - return mach_msg_send_from_remote_kernel(mmsg, fmsg->size, node); + mach_msg_header_t *mmsg; + + // Convert flipc message into mach message in place to avoid alloc/copy + mmsg = (mach_msg_header_t*)MNL_MSG_PAYLOAD(fmsg); + mmsg->msgh_size = fmsg->size; + mmsg->msgh_remote_port = mnl_name_to_port(fmsg->object); + mmsg->msgh_local_port = mnl_name_to_port((mnl_name_t)mmsg->msgh_local_port); + mmsg->msgh_voucher_port = (mach_port_name_t)MACH_PORT_NULL; + mmsg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); + // unchanged: msgh_id + + return mach_msg_send_from_remote_kernel(mmsg, fmsg->size, node); } @@ -305,34 +317,35 @@ flipc_cmd_ipc(mnl_msg_t fmsg, */ static void flipc_cmd_ack(flipc_ack_msg_t fmsg, - mach_node_t node __unused, - uint32_t flags __unused) + mach_node_t node __unused, + uint32_t flags __unused) { - unsigned int msg_count = fmsg->msg_count; - thread_t thread = current_thread(); - boolean_t kick = FALSE; + unsigned int msg_count = fmsg->msg_count; + thread_t thread = current_thread(); + boolean_t kick = FALSE; - flipc_port_t fport = (flipc_port_t)mnl_obj_lookup(fmsg->mnl.object); + flipc_port_t fport = (flipc_port_t)mnl_obj_lookup(fmsg->mnl.object); - ipc_port_t lport = fport->lport; - ip_lock(lport); + ipc_port_t lport = fport->lport; + ip_lock(lport); - ipc_mqueue_t lport_mq = &lport->ip_messages; - imq_lock(lport_mq); + ipc_mqueue_t lport_mq = &lport->ip_messages; + imq_lock(lport_mq); - assert(fport->peek_count >= msg_count); // Can't ack what we haven't peeked! + assert(fport->peek_count >= msg_count); // Can't ack what we haven't peeked! - while (msg_count--) { - ipc_mqueue_select_on_thread(lport_mq, IMQ_NULL, 0, 0, thread); - fport->peek_count--; - kick |= ipc_kmsg_delayed_destroy(thread->ith_kmsg); - } + while (msg_count--) { + ipc_mqueue_select_on_thread(lport_mq, IMQ_NULL, 0, 0, thread); + fport->peek_count--; + kick |= ipc_kmsg_delayed_destroy(thread->ith_kmsg); + } - imq_unlock(lport_mq); - ip_unlock(lport); + imq_unlock(lport_mq); + ip_unlock(lport); - if (kick) - ipc_kmsg_reap_delayed(); + if (kick) { + ipc_kmsg_reap_delayed(); + } } @@ -347,17 +360,17 @@ flipc_cmd_ack(flipc_ack_msg_t fmsg, kern_return_t flipc_init(void) { - /* Create zone for flipc ports. - * TODO: Pick a better max value than ipc_port_max>>4 - */ - flipc_port_zone = zinit(sizeof(struct flipc_port), - (ipc_port_max>>4) * sizeof(struct flipc_port), - sizeof(struct flipc_port), - "flipc ports"); - - zone_change(flipc_port_zone, Z_CALLERACCT, FALSE); - zone_change(flipc_port_zone, Z_NOENCRYPT, TRUE); - return KERN_SUCCESS; + /* Create zone for flipc ports. + * TODO: Pick a better max value than ipc_port_max>>4 + */ + flipc_port_zone = zinit(sizeof(struct flipc_port), + (ipc_port_max >> 4) * sizeof(struct flipc_port), + sizeof(struct flipc_port), + "flipc ports"); + + zone_change(flipc_port_zone, Z_CALLERACCT, FALSE); + zone_change(flipc_port_zone, Z_NOENCRYPT, TRUE); + return KERN_SUCCESS; } @@ -371,20 +384,20 @@ flipc_init(void) kern_return_t flipc_node_prepare(mach_node_t node) { - kern_return_t kr; + kern_return_t kr; - assert(MACH_NODE_VALID(node)); - ipc_port_t bs_port = node->bootstrap_port; - assert(IP_VALID(bs_port)); + assert(MACH_NODE_VALID(node)); + ipc_port_t bs_port = node->bootstrap_port; + assert(IP_VALID(bs_port)); - ip_lock(bs_port); + ip_lock(bs_port); - kr = flipc_port_create(bs_port, - node, - MNL_NAME_BOOTSTRAP(node->info.node_id)); - ip_unlock(bs_port); + kr = flipc_port_create(bs_port, + node, + MNL_NAME_BOOTSTRAP(node->info.node_id)); + ip_unlock(bs_port); - return kr; + return kr; } @@ -398,17 +411,18 @@ flipc_node_prepare(mach_node_t node) kern_return_t flipc_node_retire(mach_node_t node) { - if (!MACH_NODE_VALID(node)) - return KERN_NODE_DOWN; - - ipc_port_t bs_port = node->bootstrap_port; - if (IP_VALID(bs_port)) { - ip_lock(bs_port); - flipc_port_destroy(bs_port); - ip_unlock(bs_port); - } - - return KERN_SUCCESS; + if (!MACH_NODE_VALID(node)) { + return KERN_NODE_DOWN; + } + + ipc_port_t bs_port = node->bootstrap_port; + if (IP_VALID(bs_port)) { + ip_lock(bs_port); + flipc_port_destroy(bs_port); + ip_unlock(bs_port); + } + + return KERN_SUCCESS; } @@ -421,83 +435,86 @@ flipc_node_retire(mach_node_t node) */ mnl_msg_t flipc_msg_to_remote_node(mach_node_t to_node, - uint32_t flags __unused) + uint32_t flags __unused) { - mach_port_seqno_t msgoff; - ipc_kmsg_t kmsg = IKM_NULL; - mnl_msg_t fmsg = MNL_MSG_NULL; - - assert(to_node != localnode); - assert(get_preemption_level()==0); - - ipc_mqueue_t portset_mq = &to_node->proxy_port_set->ips_messages; - ipc_mqueue_t port_mq = IMQ_NULL; - - while (!to_node->dead) { - /* Fetch next message from proxy port */ - ipc_mqueue_receive(portset_mq, MACH_PEEK_MSG, 0, 0, THREAD_ABORTSAFE); - - thread_t thread = current_thread(); - if (thread->ith_state == MACH_PEEK_READY) { - port_mq = thread->ith_peekq; - thread->ith_peekq = IMQ_NULL; - } else { - panic("Unexpected thread state %d after ipc_mqueue_receive()", - thread->ith_state); - } - - assert(get_preemption_level()==0); - imq_lock(port_mq); - - flipc_port_t fport = port_mq->data.port.fport; - - if (FPORT_VALID(fport)) { - msgoff = port_mq->data.port.fport->peek_count; - - ipc_mqueue_peek_locked(port_mq, &msgoff, NULL, NULL, NULL, &kmsg); - if (kmsg != IKM_NULL) - port_mq->data.port.fport->peek_count++; - - /* Clean up outstanding prepost on port_mq. - * This also unlocks port_mq. - */ - ipc_mqueue_release_peek_ref(port_mq); - assert(get_preemption_level()==0); - - /* DANGER: The code below must be allowed to allocate so it can't - * run under the protection of the imq_lock, but that leaves mqueue - * open for business for a small window before we examine kmsg. - * This SHOULD be OK, since we are the only thread looking. - */ - if (kmsg != IKM_NULL) - mnl_msg_from_kmsg(kmsg, (mnl_msg_t*)&fmsg); - } else { - /* Must be from the control_port, which is not a flipc port */ - assert(!FPORT_VALID(port_mq->data.port.fport)); - - /* This is a simplified copy of ipc_mqueue_select_on_thread() */ - kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages); - assert(kmsg != IKM_NULL); - ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg); - ipc_mqueue_release_msgcount(port_mq, portset_mq); - imq_unlock(port_mq); - current_task()->messages_received++; - ip_release(to_node->control_port); // Should derive ref from port_mq - - /* We just pass the kmsg payload as the fmsg. - * flipc_msg_free() will notice and free the kmsg properly. - */ - mach_msg_header_t *hdr = kmsg->ikm_header; - fmsg = (mnl_msg_t)(&hdr[1]); - /* Stash kmsg pointer just before fmsg */ - *(ipc_kmsg_t*)((vm_offset_t)fmsg-sizeof(vm_offset_t)) = kmsg; - } - - if (MNL_MSG_VALID(fmsg)) - break; - } - assert(MNL_MSG_VALID(fmsg)); - return fmsg; + mach_port_seqno_t msgoff; + ipc_kmsg_t kmsg = IKM_NULL; + mnl_msg_t fmsg = MNL_MSG_NULL; + + assert(to_node != localnode); + assert(get_preemption_level() == 0); + + ipc_mqueue_t portset_mq = &to_node->proxy_port_set->ips_messages; + ipc_mqueue_t port_mq = IMQ_NULL; + + while (!to_node->dead) { + /* Fetch next message from proxy port */ + ipc_mqueue_receive(portset_mq, MACH_PEEK_MSG, 0, 0, THREAD_ABORTSAFE); + + thread_t thread = current_thread(); + if (thread->ith_state == MACH_PEEK_READY) { + port_mq = thread->ith_peekq; + thread->ith_peekq = IMQ_NULL; + } else { + panic("Unexpected thread state %d after ipc_mqueue_receive()", + thread->ith_state); + } + + assert(get_preemption_level() == 0); + imq_lock(port_mq); + + flipc_port_t fport = port_mq->data.port.fport; + + if (FPORT_VALID(fport)) { + msgoff = port_mq->data.port.fport->peek_count; + + ipc_mqueue_peek_locked(port_mq, &msgoff, NULL, NULL, NULL, &kmsg); + if (kmsg != IKM_NULL) { + port_mq->data.port.fport->peek_count++; + } + + /* Clean up outstanding prepost on port_mq. + * This also unlocks port_mq. + */ + ipc_mqueue_release_peek_ref(port_mq); + assert(get_preemption_level() == 0); + + /* DANGER: The code below must be allowed to allocate so it can't + * run under the protection of the imq_lock, but that leaves mqueue + * open for business for a small window before we examine kmsg. + * This SHOULD be OK, since we are the only thread looking. + */ + if (kmsg != IKM_NULL) { + mnl_msg_from_kmsg(kmsg, (mnl_msg_t*)&fmsg); + } + } else { + /* Must be from the control_port, which is not a flipc port */ + assert(!FPORT_VALID(port_mq->data.port.fport)); + + /* This is a simplified copy of ipc_mqueue_select_on_thread() */ + kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages); + assert(kmsg != IKM_NULL); + ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg); + ipc_mqueue_release_msgcount(port_mq, portset_mq); + imq_unlock(port_mq); + current_task()->messages_received++; + ip_release(to_node->control_port); // Should derive ref from port_mq + + /* We just pass the kmsg payload as the fmsg. + * flipc_msg_free() will notice and free the kmsg properly. + */ + mach_msg_header_t *hdr = kmsg->ikm_header; + fmsg = (mnl_msg_t)(&hdr[1]); + /* Stash kmsg pointer just before fmsg */ + *(ipc_kmsg_t*)((vm_offset_t)fmsg - sizeof(vm_offset_t)) = kmsg; + } + + if (MNL_MSG_VALID(fmsg)) { + break; + } + } + assert(MNL_MSG_VALID(fmsg)); + return fmsg; } @@ -507,34 +524,34 @@ flipc_msg_to_remote_node(mach_node_t to_node, */ void flipc_msg_from_node(mach_node_t from_node __unused, - mnl_msg_t msg, - uint32_t flags) + mnl_msg_t msg, + uint32_t flags) { - /* Note that if flipc message forwarding is supported, the from_node arg - * may not match fmsg->node_id. The former is the node from which we - * received the message; the latter is the node that originated the - * message. We use the originating node, which is where the ack goes. - */ - assert(msg->sub == MACH_NODE_SUB_FLIPC); - mach_node_t node = mach_node_for_id_locked(msg->node_id, FALSE, FALSE); - MACH_NODE_UNLOCK(node); - - switch (msg->cmd) { - case FLIPC_CMD_IPCMESSAGE: - flipc_cmd_ipc(msg, node, flags); - break; - - case FLIPC_CMD_ACKMESSAGE: - case FLIPC_CMD_NAKMESSAGE: - flipc_cmd_ack((flipc_ack_msg_t)msg, node, flags); - break; - - default: + /* Note that if flipc message forwarding is supported, the from_node arg + * may not match fmsg->node_id. The former is the node from which we + * received the message; the latter is the node that originated the + * message. We use the originating node, which is where the ack goes. + */ + assert(msg->sub == MACH_NODE_SUB_FLIPC); + mach_node_t node = mach_node_for_id_locked(msg->node_id, FALSE, FALSE); + MACH_NODE_UNLOCK(node); + + switch (msg->cmd) { + case FLIPC_CMD_IPCMESSAGE: + flipc_cmd_ipc(msg, node, flags); + break; + + case FLIPC_CMD_ACKMESSAGE: + case FLIPC_CMD_NAKMESSAGE: + flipc_cmd_ack((flipc_ack_msg_t)msg, node, flags); + break; + + default: #if DEBUG - PE_enter_debugger("flipc_incoming(): Invalid command"); + PE_enter_debugger("flipc_incoming(): Invalid command"); #endif - break; - } + break; + } } @@ -545,18 +562,18 @@ flipc_msg_from_node(mach_node_t from_node __unused, */ void flipc_msg_free(mnl_msg_t msg, - uint32_t flags) + uint32_t flags) { - switch (msg->cmd) { - case FLIPC_CMD_ACKMESSAGE: // Flipc msg is a kmsg in disguise... - case FLIPC_CMD_NAKMESSAGE: // Convert back to kmsg for disposal - ipc_kmsg_free(*(ipc_kmsg_t*)((vm_offset_t)msg-sizeof(vm_offset_t))); - break; - - default: // Flipc msg is not a kmsg in disguise; dispose of normally - mnl_msg_free(msg, flags); - break; - } + switch (msg->cmd) { + case FLIPC_CMD_ACKMESSAGE: // Flipc msg is a kmsg in disguise... + case FLIPC_CMD_NAKMESSAGE: // Convert back to kmsg for disposal + ipc_kmsg_free(*(ipc_kmsg_t*)((vm_offset_t)msg - sizeof(vm_offset_t))); + break; + + default: // Flipc msg is not a kmsg in disguise; dispose of normally + mnl_msg_free(msg, flags); + break; + } } @@ -574,80 +591,80 @@ flipc_msg_free(mnl_msg_t msg, */ void flipc_msg_ack(mach_node_t node, - ipc_mqueue_t mqueue, - boolean_t delivered) + ipc_mqueue_t mqueue, + boolean_t delivered) { - flipc_port_t fport = mqueue->imq_fport; - - assert(FPORT_VALID(fport)); - assert(MACH_NODE_VALID(node)); - - mnl_name_t name = MNL_NAME_NULL; - mach_node_id_t nid = HOST_LOCAL_NODE; - ipc_port_t ack_port = IP_NULL; - - ip_lock(fport->lport); - name = fport->obj.name; - ip_unlock(fport->lport); - - if (!MNL_NAME_VALID(name)) - return; - - MACH_NODE_LOCK(node); - if (node->active) { - nid = node->info.node_id; - ack_port = node->control_port; - } - MACH_NODE_UNLOCK(node); - - if ( !IP_VALID(ack_port) || !MACH_NODE_ID_VALID(nid) ) - return; - - /* We have a valid node id & obj name, and a port to send the ack to. */ - ipc_kmsg_t kmsg = ipc_kmsg_alloc(sizeof(struct flipc_ack_msg) + MAX_TRAILER_SIZE); - assert((unsigned long long)kmsg >= 4ULL);//!= IKM_NULL); - mach_msg_header_t *msg = kmsg->ikm_header; - - /* Fill in the mach_msg_header struct */ - msg->msgh_bits = MACH_MSGH_BITS_SET(0, 0, 0, 0); - msg->msgh_size = sizeof(msg); - msg->msgh_remote_port = ack_port; - msg->msgh_local_port = MACH_PORT_NULL; - msg->msgh_voucher_port = MACH_PORT_NULL; - msg->msgh_id = FLIPC_CMD_ID; - - /* Fill in the flipc_ack_msg struct */ - flipc_ack_msg_t fmsg = (flipc_ack_msg_t)(&msg[1]); - fmsg->resend_to = HOST_LOCAL_NODE; - fmsg->msg_count = 1; // Might want to coalesce acks to a node/name pair - - /* Fill in the mnl_msg struct */ - fmsg->mnl.sub = MACH_NODE_SUB_FLIPC; - fmsg->mnl.cmd = delivered ? FLIPC_CMD_ACKMESSAGE : FLIPC_CMD_NAKMESSAGE; - fmsg->mnl.qos = 0; // Doesn't do anything yet - fmsg->mnl.flags = 0; - fmsg->mnl.node_id = nid; - fmsg->mnl.object = name; - fmsg->mnl.options = 0; - fmsg->mnl.size = sizeof(struct flipc_ack_msg) - sizeof(struct mnl_msg); + flipc_port_t fport = mqueue->imq_fport; + + assert(FPORT_VALID(fport)); + assert(MACH_NODE_VALID(node)); + + mnl_name_t name = MNL_NAME_NULL; + mach_node_id_t nid = HOST_LOCAL_NODE; + ipc_port_t ack_port = IP_NULL; + + ip_lock(fport->lport); + name = fport->obj.name; + ip_unlock(fport->lport); + + if (!MNL_NAME_VALID(name)) { + return; + } + + MACH_NODE_LOCK(node); + if (node->active) { + nid = node->info.node_id; + ack_port = node->control_port; + } + MACH_NODE_UNLOCK(node); + + if (!IP_VALID(ack_port) || !MACH_NODE_ID_VALID(nid)) { + return; + } + + /* We have a valid node id & obj name, and a port to send the ack to. */ + ipc_kmsg_t kmsg = ipc_kmsg_alloc(sizeof(struct flipc_ack_msg) + MAX_TRAILER_SIZE); + assert((unsigned long long)kmsg >= 4ULL);//!= IKM_NULL); + mach_msg_header_t *msg = kmsg->ikm_header; + + /* Fill in the mach_msg_header struct */ + msg->msgh_bits = MACH_MSGH_BITS_SET(0, 0, 0, 0); + msg->msgh_size = sizeof(msg); + msg->msgh_remote_port = ack_port; + msg->msgh_local_port = MACH_PORT_NULL; + msg->msgh_voucher_port = MACH_PORT_NULL; + msg->msgh_id = FLIPC_CMD_ID; + + /* Fill in the flipc_ack_msg struct */ + flipc_ack_msg_t fmsg = (flipc_ack_msg_t)(&msg[1]); + fmsg->resend_to = HOST_LOCAL_NODE; + fmsg->msg_count = 1; // Might want to coalesce acks to a node/name pair + + /* Fill in the mnl_msg struct */ + fmsg->mnl.sub = MACH_NODE_SUB_FLIPC; + fmsg->mnl.cmd = delivered ? FLIPC_CMD_ACKMESSAGE : FLIPC_CMD_NAKMESSAGE; + fmsg->mnl.qos = 0; // Doesn't do anything yet + fmsg->mnl.flags = 0; + fmsg->mnl.node_id = nid; + fmsg->mnl.object = name; + fmsg->mnl.options = 0; + fmsg->mnl.size = sizeof(struct flipc_ack_msg) - sizeof(struct mnl_msg); #if (0) - mach_msg_return_t mmr; - ipc_mqueue_t ack_mqueue; + mach_msg_return_t mmr; + ipc_mqueue_t ack_mqueue; - ip_lock(ack_port); - ack_mqueue = &ack_port->ip_messages; - imq_lock(ack_mqueue); - ip_unlock(ack_port); + ip_lock(ack_port); + ack_mqueue = &ack_port->ip_messages; + imq_lock(ack_mqueue); + ip_unlock(ack_port); - /* ipc_mqueue_send() unlocks ack_mqueue */ - mmr = ipc_mqueue_send(ack_mqueue, kmsg, 0, 0); + /* ipc_mqueue_send() unlocks ack_mqueue */ + mmr = ipc_mqueue_send(ack_mqueue, kmsg, 0, 0); #else - kern_return_t kr; - kr = ipc_kmsg_send(kmsg, - MACH_SEND_KERNEL_DEFAULT, - MACH_MSG_TIMEOUT_NONE); + kern_return_t kr; + kr = ipc_kmsg_send(kmsg, + MACH_SEND_KERNEL_DEFAULT, + MACH_MSG_TIMEOUT_NONE); #endif } - - diff --git a/osfmk/ipc/flipc.h b/osfmk/ipc/flipc.h index fb89b3ff0..a5049dac4 100644 --- a/osfmk/ipc/flipc.h +++ b/osfmk/ipc/flipc.h @@ -33,7 +33,7 @@ * Definitions for fast local ipc (flipc). */ -#ifndef _IPC_FLIPC_H_ +#ifndef _IPC_FLIPC_H_ #define _IPC_FLIPC_H_ #if MACH_KERNEL_PRIVATE && MACH_FLIPC @@ -55,15 +55,15 @@ __BEGIN_DECLS */ typedef struct flipc_port { - struct mnl_obj obj; // Necessary to be in mnl_name_table[] - ipc_port_t lport; // The associated local ipc_port - mach_node_t hostnode; // Node holding the recieve right + struct mnl_obj obj; // Necessary to be in mnl_name_table[] + ipc_port_t lport; // The associated local ipc_port + mach_node_t hostnode; // Node holding the recieve right uint32_t peek_count; // How many kmsgs in mq have been peeked - uint32_t state:3; // See FPORT_STATE_* defines below + uint32_t state:3;// See FPORT_STATE_* defines below } *flipc_port_t; -#define FPORT_NULL ((flipc_port_t) 0UL) -#define FPORT_VALID(fport) ((fport) != FPORT_NULL) +#define FPORT_NULL ((flipc_port_t) 0UL) +#define FPORT_VALID(fport) ((fport) != FPORT_NULL) #define FPORT_STATE_INIT (0) // Port is being initialized #define FPORT_STATE_PROXY (1) // Principal is on another node @@ -110,10 +110,10 @@ kern_return_t flipc_node_retire(mach_node_t node); */ typedef struct flipc_ack_msg { - struct mnl_msg mnl; // Flipc message starts with mnl message - mach_node_id_t resend_to; // Node ID for resends (if NAK) - uint8_t msg_count; // Number of msgs being ackd/nakd -} __attribute__((__packed__)) *flipc_ack_msg_t; + struct mnl_msg mnl; // Flipc message starts with mnl message + mach_node_id_t resend_to; // Node ID for resends (if NAK) + uint8_t msg_count; // Number of msgs being ackd/nakd +} __attribute__((__packed__)) * flipc_ack_msg_t; #define FLIPC_CMD_ID (0x43504952UL) // msgh_id "RIPC" for FLIPC msgs #define FLIPC_CMD_IPCMESSAGE (1) // IPC Msg: is sender; is dest port @@ -126,21 +126,21 @@ typedef struct flipc_ack_msg { * node is terminated, in which case it returns MNL_MSG_NULL. */ mnl_msg_t flipc_msg_to_remote_node(mach_node_t to_node, - uint32_t flags); + uint32_t flags); /* The node layer calls flipc_msg_to_remote_node() to post the next message * from . This function will block until a message is available * or the node is terminated, in which case it returns MNL_MSG_NULL. */ void flipc_msg_from_node(mach_node_t from_node, - mnl_msg_t msg_arg, - uint32_t flags); + mnl_msg_t msg_arg, + uint32_t flags); /* The node layer calls flipc_msg_free() to dispose of sent messages that * originated in the FLIPC layer. */ void flipc_msg_free(mnl_msg_t msg, - uint32_t flags); + uint32_t flags); /*** FLIPC Message Declarations (used by mach ipc subsystem) ***/ @@ -156,12 +156,11 @@ void flipc_msg_free(mnl_msg_t msg, * Called from mach ipc_mqueue.c when a flipc-originated message is consumed. */ void flipc_msg_ack(mach_node_t node, - ipc_mqueue_t mqueue, - boolean_t delivered); + ipc_mqueue_t mqueue, + boolean_t delivered); __END_DECLS -#endif // MACH_KERNEL_PRIVATE -#endif // _IPC_FLIPC_H_ - +#endif // MACH_KERNEL_PRIVATE +#endif // _IPC_FLIPC_H_ diff --git a/osfmk/ipc/ipc_entry.c b/osfmk/ipc/ipc_entry.c index facaf2af0..e05803973 100644 --- a/osfmk/ipc/ipc_entry.c +++ b/osfmk/ipc/ipc_entry.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -92,8 +92,8 @@ ipc_entry_t ipc_entry_lookup( - ipc_space_t space, - mach_port_name_t name) + ipc_space_t space, + mach_port_name_t name) { mach_port_index_t index; ipc_entry_t entry; @@ -101,14 +101,13 @@ ipc_entry_lookup( assert(is_active(space)); index = MACH_PORT_INDEX(name); - if (index < space->is_table_size) { - entry = &space->is_table[index]; + if (index < space->is_table_size) { + entry = &space->is_table[index]; if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name) || IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) { - entry = IE_NULL; + entry = IE_NULL; } - } - else { + } else { entry = IE_NULL; } @@ -132,14 +131,22 @@ ipc_entry_lookup( kern_return_t ipc_entries_hold( - ipc_space_t space, - uint32_t entries_needed) + ipc_space_t space, + uint32_t entries_needed) { - ipc_entry_t table; mach_port_index_t next_free = 0; uint32_t i; + /* + * Assume that all new entries will need hashing. + * If the table is more than 87.5% full pretend we didn't have space. + */ + if (space->is_table_hashed + entries_needed > + space->is_table_size * 7 / 8) { + return KERN_NO_SPACE; + } + assert(is_active(space)); table = &space->is_table[0]; @@ -163,15 +170,15 @@ ipc_entries_hold( * The space is write-locked and active throughout. * An object may be locked. Will not allocate memory. * - * Note: The returned entry must be marked as modified before - * releasing the space lock + * Note: The returned entry must be marked as modified before + * releasing the space lock */ kern_return_t ipc_entry_claim( - ipc_space_t space, - mach_port_name_t *namep, - ipc_entry_t *entryp) + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp) { ipc_entry_t entry; ipc_entry_t table; @@ -230,15 +237,16 @@ ipc_entry_claim( kern_return_t ipc_entry_get( - ipc_space_t space, - mach_port_name_t *namep, - ipc_entry_t *entryp) + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp) { kern_return_t kr; kr = ipc_entries_hold(space, 1); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { return kr; + } return ipc_entry_claim(space, namep, entryp); } @@ -259,9 +267,9 @@ ipc_entry_get( kern_return_t ipc_entry_alloc( - ipc_space_t space, - mach_port_name_t *namep, - ipc_entry_t *entryp) + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp) { kern_return_t kr; @@ -274,12 +282,14 @@ ipc_entry_alloc( } kr = ipc_entry_get(space, namep, entryp); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { return kr; + } kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; /* space is unlocked */ + } } } @@ -301,15 +311,16 @@ ipc_entry_alloc( kern_return_t ipc_entry_alloc_name( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t *entryp) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp) { mach_port_index_t index = MACH_PORT_INDEX(name); mach_port_gen_t gen = MACH_PORT_GEN(name); - if (index > ipc_table_max_entries()) + if (index > ipc_table_max_entries()) { return KERN_NO_SPACE; + } assert(MACH_PORT_VALID(name)); @@ -343,7 +354,7 @@ ipc_entry_alloc_name( /* case #1 - the entry is reserved */ assert(!IE_BITS_TYPE(entry->ie_bits)); assert(!IE_BITS_GEN(entry->ie_bits)); - is_write_unlock(space); + is_write_unlock(space); return KERN_FAILURE; } else if (IE_BITS_TYPE(entry->ie_bits)) { if (IE_BITS_GEN(entry->ie_bits) == gen) { @@ -353,7 +364,7 @@ ipc_entry_alloc_name( } else { /* case #3 -- the entry is inuse, for a different name. */ /* Collisions are not allowed */ - is_write_unlock(space); + is_write_unlock(space); return KERN_FAILURE; } } else { @@ -365,20 +376,21 @@ ipc_entry_alloc_name( */ for (free_index = 0; - (next_index = table[free_index].ie_next) - != index; - free_index = next_index) + (next_index = table[free_index].ie_next) + != index; + free_index = next_index) { continue; + } table[free_index].ie_next = - table[next_index].ie_next; + table[next_index].ie_next; space->is_table_free--; /* mark the previous entry modified - reconstructing the name */ - ipc_entry_modified(space, - MACH_PORT_MAKE(free_index, - IE_BITS_GEN(table[free_index].ie_bits)), - &table[free_index]); + ipc_entry_modified(space, + MACH_PORT_MAKE(free_index, + IE_BITS_GEN(table[free_index].ie_bits)), + &table[free_index]); entry->ie_bits = gen; entry->ie_request = IE_REQ_NONE; @@ -395,7 +407,7 @@ ipc_entry_alloc_name( * Because the space will be unlocked, * we must restart. */ - kern_return_t kr; + kern_return_t kr; kr = ipc_entry_grow_table(space, index + 1); assert(kr != KERN_NO_SPACE); if (kr != KERN_SUCCESS) { @@ -417,9 +429,9 @@ ipc_entry_alloc_name( void ipc_entry_dealloc( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) { ipc_entry_t table; ipc_entry_num_t size; @@ -430,8 +442,9 @@ ipc_entry_dealloc( assert(entry->ie_request == IE_REQ_NONE); #if 1 - if (entry->ie_request != IE_REQ_NONE) + if (entry->ie_request != IE_REQ_NONE) { panic("ipc_entry_dealloc()\n"); + } #endif index = MACH_PORT_INDEX(name); @@ -449,7 +462,7 @@ ipc_entry_dealloc( * Nothing to do. The entry does not match * so there is nothing to deallocate. */ - assert(index < size); + assert(index < size); assert(entry == &table[index]); assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name)); } @@ -468,8 +481,8 @@ ipc_entry_dealloc( void ipc_entry_modified( - ipc_space_t space, - mach_port_name_t name, + ipc_space_t space, + mach_port_name_t name, __assert_only ipc_entry_t entry) { ipc_entry_t table; @@ -486,13 +499,15 @@ ipc_entry_modified( assert(space->is_low_mod <= size); assert(space->is_high_mod < size); - if (index < space->is_low_mod) + if (index < space->is_low_mod) { space->is_low_mod = index; - if (index > space->is_high_mod) + } + if (index > space->is_high_mod) { space->is_high_mod = index; + } KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_PORT_ENTRY_MODIFY) | DBG_FUNC_NONE, + MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_PORT_ENTRY_MODIFY) | DBG_FUNC_NONE, space->is_task ? task_pid(space->is_task) : 0, name, entry->ie_bits, @@ -507,8 +522,8 @@ static uint64_t ipc_entry_grow_rescan = 0; static uint64_t ipc_entry_grow_rescan_max = 0; static uint64_t ipc_entry_grow_rescan_entries = 0; static uint64_t ipc_entry_grow_rescan_entries_max = 0; -static uint64_t ipc_entry_grow_freelist_entries = 0; -static uint64_t ipc_entry_grow_freelist_entries_max = 0; +static uint64_t ipc_entry_grow_freelist_entries = 0; +static uint64_t ipc_entry_grow_freelist_entries_max = 0; #endif /* @@ -530,8 +545,8 @@ static uint64_t ipc_entry_grow_freelist_entries_max = 0; kern_return_t ipc_entry_grow_table( - ipc_space_t space, - ipc_table_elems_t target_size) + ipc_space_t space, + ipc_table_elems_t target_size) { ipc_entry_num_t osize, size, nsize, psize; @@ -556,17 +571,17 @@ ipc_entry_grow_table( } otable = space->is_table; - + its = space->is_table_next; size = its->its_size; - + /* * Since is_table_next points to the next natural size * we can identify the current size entry. */ oits = its - 1; osize = oits->its_size; - + /* * If there is no target size, then the new size is simply * specified by is_table_next. If there is a target @@ -574,7 +589,7 @@ ipc_entry_grow_table( */ if (target_size != ITS_SIZE_NONE) { if (target_size <= osize) { - /* the space is locked */ + /* the space is locked */ return KERN_SUCCESS; } @@ -594,7 +609,7 @@ ipc_entry_grow_table( is_write_unlock(space); return KERN_NO_SPACE; } - + nits = its + 1; nsize = nits->its_size; assert((osize < size) && (size <= nsize)); @@ -632,7 +647,7 @@ ipc_entry_grow_table( low_mod = 0; hi_mod = osize - 1; - rescan: +rescan: /* * Within the range of the table that changed, determine what we * have to take action on. For each entry, take a snapshot of the @@ -643,28 +658,28 @@ ipc_entry_grow_table( */ for (i = low_mod; i <= hi_mod; i++) { ipc_entry_t entry = &table[i]; - struct ipc_entry osnap = otable[i]; + struct ipc_entry osnap = otable[i]; if (entry->ie_object != osnap.ie_object || IE_BITS_TYPE(entry->ie_bits) != IE_BITS_TYPE(osnap.ie_bits)) { - if (entry->ie_object != IO_NULL && - IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND) + IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND) { ipc_hash_table_delete(table, size, entry->ie_object, i, entry); + } entry->ie_object = osnap.ie_object; entry->ie_bits = osnap.ie_bits; entry->ie_request = osnap.ie_request; /* or ie_next */ if (entry->ie_object != IO_NULL && - IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND) + IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND) { ipc_hash_table_insert(table, size, entry->ie_object, i, entry); + } } else { assert(entry->ie_object == osnap.ie_object); entry->ie_bits = osnap.ie_bits; entry->ie_request = osnap.ie_request; /* or ie_next */ } - } table[0].ie_next = otable[0].ie_next; /* always rebase the freelist */ @@ -675,19 +690,22 @@ ipc_entry_grow_table( */ free_index = 0; for (sanity = 0; sanity < osize; sanity++) { - if (table[free_index].ie_object != IPC_OBJECT_NULL) + if (table[free_index].ie_object != IPC_OBJECT_NULL) { break; + } i = table[free_index].ie_next; - if (i == 0 || i >= osize) + if (i == 0 || i >= osize) { break; + } free_index = i; } #if IPC_ENTRY_GROW_STATS ipc_entry_grow_freelist_entries += sanity; - if (sanity > ipc_entry_grow_freelist_entries_max) + if (sanity > ipc_entry_grow_freelist_entries_max) { ipc_entry_grow_freelist_entries_max = sanity; + } #endif - + is_write_lock(space); /* @@ -720,20 +738,22 @@ ipc_entry_grow_table( is_write_unlock(space); #if IPC_ENTRY_GROW_STATS rescan_count++; - if (rescan_count > ipc_entry_grow_rescan_max) + if (rescan_count > ipc_entry_grow_rescan_max) { ipc_entry_grow_rescan_max = rescan_count; + } ipc_entry_grow_rescan++; ipc_entry_grow_rescan_entries += hi_mod - low_mod + 1; - if (hi_mod - low_mod + 1 > ipc_entry_grow_rescan_entries_max) + if (hi_mod - low_mod + 1 > ipc_entry_grow_rescan_entries_max) { ipc_entry_grow_rescan_entries_max = hi_mod - low_mod + 1; + } #endif goto rescan; } /* link new free entries onto the rest of the freelist */ assert(table[free_index].ie_next == 0 && - table[free_index].ie_object == IO_NULL); + table[free_index].ie_object == IO_NULL); table[free_index].ie_next = osize; assert(space->is_table == otable); diff --git a/osfmk/ipc/ipc_entry.h b/osfmk/ipc/ipc_entry.h index 285a6adf2..f63a3f546 100644 --- a/osfmk/ipc/ipc_entry.h +++ b/osfmk/ipc/ipc_entry.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,7 +64,7 @@ * tasks' capabilities for ports and port sets. */ -#ifndef _IPC_IPC_ENTRY_H_ +#ifndef _IPC_IPC_ENTRY_H_ #define _IPC_IPC_ENTRY_H_ #include @@ -83,6 +83,10 @@ * * The ie_index field of entries in the table implements * a ordered hash table with open addressing and linear probing. + * + * The ie_dist field holds the distance to the desired spot, + * which is used to implement robin-hood hashing. + * * This hash table converts (space, object) -> name. * It is used independently of the other fields. * @@ -94,35 +98,41 @@ * It is used as the head of the free list. */ +#define IPC_ENTRY_DIST_BITS 12 +#define IPC_ENTRY_DIST_MAX ((1 << IPC_ENTRY_DIST_BITS) - 1) +#define IPC_ENTRY_INDEX_BITS 20 +#define IPC_ENTRY_INDEX_MAX ((1 << IPC_ENTRY_INDEX_BITS) - 1) + struct ipc_entry { - struct ipc_object *ie_object; - ipc_entry_bits_t ie_bits; - mach_port_index_t ie_index; + struct ipc_object *ie_object; + ipc_entry_bits_t ie_bits; + uint32_t ie_dist : IPC_ENTRY_DIST_BITS; + mach_port_index_t ie_index : IPC_ENTRY_INDEX_BITS; union { - mach_port_index_t next; /* next in freelist, or... */ - ipc_table_index_t request; /* dead name request notify */ + mach_port_index_t next; /* next in freelist, or... */ + ipc_table_index_t request; /* dead name request notify */ } index; }; -#define ie_request index.request -#define ie_next index.next +#define ie_request index.request +#define ie_next index.next -#define IE_REQ_NONE 0 /* no request */ +#define IE_REQ_NONE 0 /* no request */ -#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */ -#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK) +#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */ +#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK) -#define IE_BITS_TYPE_MASK 0x001f0000 /* 5 bits of capability type */ -#define IE_BITS_TYPE(bits) ((bits) & IE_BITS_TYPE_MASK) +#define IE_BITS_TYPE_MASK 0x001f0000 /* 5 bits of capability type */ +#define IE_BITS_TYPE(bits) ((bits) & IE_BITS_TYPE_MASK) #ifndef NO_PORT_GEN -#define IE_BITS_GEN_MASK 0xff000000 /* 8 bits for generation */ -#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK) -#define IE_BITS_GEN_ONE 0x04000000 /* low bit of generation */ -#define IE_BITS_ROLL_POS 22 /* LSB pos of generation rollover */ -#define IE_BITS_ROLL_BITS 2 /* number of generation rollover bits */ -#define IE_BITS_ROLL_MASK (((1 << IE_BITS_ROLL_BITS) - 1) << IE_BITS_ROLL_POS) -#define IE_BITS_ROLL(bits) ((((bits) & IE_BITS_ROLL_MASK) << 8) ^ IE_BITS_GEN_MASK) +#define IE_BITS_GEN_MASK 0xff000000 /* 8 bits for generation */ +#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK) +#define IE_BITS_GEN_ONE 0x04000000 /* low bit of generation */ +#define IE_BITS_ROLL_POS 22 /* LSB pos of generation rollover */ +#define IE_BITS_ROLL_BITS 2 /* number of generation rollover bits */ +#define IE_BITS_ROLL_MASK (((1 << IE_BITS_ROLL_BITS) - 1) << IE_BITS_ROLL_POS) +#define IE_BITS_ROLL(bits) ((((bits) & IE_BITS_ROLL_MASK) << 8) ^ IE_BITS_GEN_MASK) /* * Restart a generation counter with the specified bits for the rollover point. @@ -133,19 +143,21 @@ struct ipc_entry { * 1 0 32 * 1 1 16 */ -static inline ipc_entry_bits_t ipc_entry_new_rollpoint( +static inline ipc_entry_bits_t +ipc_entry_new_rollpoint( ipc_entry_bits_t rollbits) { rollbits = (rollbits << IE_BITS_ROLL_POS) & IE_BITS_ROLL_MASK; ipc_entry_bits_t newgen = IE_BITS_GEN_MASK + IE_BITS_GEN_ONE; - return (newgen | rollbits); + return newgen | rollbits; } /* * Get the next gencount, modulo the entry's rollover point. If the sum rolls over, * the caller should re-start the generation counter with a different rollpoint. */ -static inline ipc_entry_bits_t ipc_entry_new_gen( +static inline ipc_entry_bits_t +ipc_entry_new_gen( ipc_entry_bits_t oldgen) { ipc_entry_bits_t sum = (oldgen + IE_BITS_GEN_ONE) & IE_BITS_GEN_MASK; @@ -155,7 +167,8 @@ static inline ipc_entry_bits_t ipc_entry_new_gen( } /* Determine if a gencount has rolled over or not. */ -static inline boolean_t ipc_entry_gen_rolled( +static inline boolean_t +ipc_entry_gen_rolled( ipc_entry_bits_t oldgen, ipc_entry_bits_t newgen) { @@ -163,91 +176,94 @@ static inline boolean_t ipc_entry_gen_rolled( } #else -#define IE_BITS_GEN_MASK 0 -#define IE_BITS_GEN(bits) 0 -#define IE_BITS_GEN_ONE 0 -#define IE_BITS_ROLL_POS 0 -#define IE_BITS_ROLL_MASK 0 -#define IE_BITS_ROLL(bits) (bits) - -static inline ipc_entry_bits_t ipc_entry_new_rollpoint( +#define IE_BITS_GEN_MASK 0 +#define IE_BITS_GEN(bits) 0 +#define IE_BITS_GEN_ONE 0 +#define IE_BITS_ROLL_POS 0 +#define IE_BITS_ROLL_MASK 0 +#define IE_BITS_ROLL(bits) (bits) + +static inline ipc_entry_bits_t +ipc_entry_new_rollpoint( ipc_entry_bits_t rollbits) { return 0; } -static inline ipc_entry_bits_t ipc_entry_new_gen( +static inline ipc_entry_bits_t +ipc_entry_new_gen( ipc_entry_bits_t oldgen) { return 0; } -static inline boolean_t ipc_entry_gen_rolled( +static inline boolean_t +ipc_entry_gen_rolled( ipc_entry_bits_t oldgen, ipc_entry_bits_t newgen) { return FALSE; } -#endif /* !USE_PORT_GEN */ +#endif /* !USE_PORT_GEN */ -#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */ +#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */ /* * Exported interfaces */ /* Search for entry in a space by name */ extern ipc_entry_t ipc_entry_lookup( - ipc_space_t space, - mach_port_name_t name); + ipc_space_t space, + mach_port_name_t name); /* Hold a number of entries in a locked space */ extern kern_return_t ipc_entries_hold( - ipc_space_t space, - natural_t count); + ipc_space_t space, + natural_t count); /* claim and initialize a held entry in a locked space */ extern kern_return_t ipc_entry_claim( - ipc_space_t space, - mach_port_name_t *namep, - ipc_entry_t *entryp); + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp); /* Allocate an entry in a space */ extern kern_return_t ipc_entry_get( - ipc_space_t space, - mach_port_name_t *namep, - ipc_entry_t *entryp); + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp); /* Allocate an entry in a space, growing the space if necessary */ extern kern_return_t ipc_entry_alloc( - ipc_space_t space, - mach_port_name_t *namep, - ipc_entry_t *entryp); + ipc_space_t space, + mach_port_name_t *namep, + ipc_entry_t *entryp); /* Allocate/find an entry in a space with a specific name */ extern kern_return_t ipc_entry_alloc_name( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t *entryp); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp); /* Deallocate an entry from a space */ extern void ipc_entry_dealloc( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); /* Mark and entry modified in a space */ extern void ipc_entry_modified( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); /* Grow the table in a space */ extern kern_return_t ipc_entry_grow_table( - ipc_space_t space, - ipc_table_elems_t target_size); + ipc_space_t space, + ipc_table_elems_t target_size); /* mask on/off default entry generation bits */ extern mach_port_name_t ipc_entry_name_mask( mach_port_name_t name); -#endif /* _IPC_IPC_ENTRY_H_ */ +#endif /* _IPC_IPC_ENTRY_H_ */ diff --git a/osfmk/ipc/ipc_hash.c b/osfmk/ipc/ipc_hash.c index a8f79a647..0721448e1 100644 --- a/osfmk/ipc/ipc_hash.c +++ b/osfmk/ipc/ipc_hash.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -72,26 +72,27 @@ #include #include #include +#include #include -#if MACH_IPC_DEBUG +#if MACH_IPC_DEBUG #include #include #include #include -#endif /* MACH_IPC_DEBUG */ +#endif /* MACH_IPC_DEBUG */ /* - * Forward declarations + * Forward declarations */ /* Delete an entry from the local reverse hash table */ void ipc_hash_local_delete( - ipc_space_t space, - ipc_object_t obj, - mach_port_index_t index, - ipc_entry_t entry); + ipc_space_t space, + ipc_object_t obj, + mach_port_index_t index, + ipc_entry_t entry); /* * Routine: ipc_hash_lookup @@ -104,10 +105,10 @@ void ipc_hash_local_delete( boolean_t ipc_hash_lookup( - ipc_space_t space, - ipc_object_t obj, - mach_port_name_t *namep, - ipc_entry_t *entryp) + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp) { return ipc_hash_table_lookup(space->is_table, space->is_table_size, obj, namep, entryp); } @@ -123,14 +124,15 @@ ipc_hash_lookup( void ipc_hash_insert( - ipc_space_t space, - ipc_object_t obj, - mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry) { mach_port_index_t index; index = MACH_PORT_INDEX(name); + space->is_table_hashed++; ipc_hash_table_insert(space->is_table, space->is_table_size, obj, index, entry); } @@ -144,14 +146,15 @@ ipc_hash_insert( void ipc_hash_delete( - ipc_space_t space, - ipc_object_t obj, - mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry) { mach_port_index_t index; index = MACH_PORT_INDEX(name); + space->is_table_hashed--; ipc_hash_table_delete(space->is_table, space->is_table_size, obj, index, entry); } @@ -185,8 +188,8 @@ ipc_hash_delete( * So possibly a small win; probably nothing significant. */ -#define IH_TABLE_HASH(obj, size) \ - ((mach_port_index_t)((((uintptr_t) (obj)) >> 6) % (size))) +#define IH_TABLE_HASH(obj, size) \ + ((mach_port_index_t)(os_hash_kernel_pointer(obj) % (size))) /* * Routine: ipc_hash_table_lookup @@ -198,19 +201,20 @@ ipc_hash_delete( boolean_t ipc_hash_table_lookup( - ipc_entry_t table, - ipc_entry_num_t size, - ipc_object_t obj, - mach_port_name_t *namep, - ipc_entry_t *entryp) + ipc_entry_t table, + ipc_entry_num_t size, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp) { - mach_port_index_t hindex, index; + mach_port_index_t hindex, index, hdist; if (obj == IO_NULL) { return FALSE; } hindex = IH_TABLE_HASH(obj, size); + hdist = 0; /* * Ideally, table[hindex].ie_index is the name we want. @@ -220,19 +224,40 @@ ipc_hash_table_lookup( */ while ((index = table[hindex].ie_index) != 0) { - ipc_entry_t entry; - - assert(index < size); - entry = &table[index]; - if (entry->ie_object == obj) { - *entryp = entry; - *namep = MACH_PORT_MAKE(index, - IE_BITS_GEN(entry->ie_bits)); - return TRUE; + ipc_entry_t entry = &table[index]; + + /* + * if our current displacement is strictly larger + * than the current slot one, then insertion would + * have stolen his place so we can't possibly exist. + */ + if (hdist > table[hindex].ie_dist) { + return FALSE; } - if (++hindex == size) + /* + * If our current displacement is exactly the current + * slot displacement, then it can be a match, let's check. + */ + if (hdist == table[hindex].ie_dist) { + assert(index < size); + if (entry->ie_object == obj) { + *entryp = entry; + *namep = MACH_PORT_MAKE(index, + IE_BITS_GEN(entry->ie_bits)); + return TRUE; + } + } else { + assert(entry->ie_object != obj); + } + + if (hdist < IPC_ENTRY_DIST_MAX) { + /* peg the displacement distance at IPC_ENTRY_DIST_MAX */ + ++hdist; + } + if (++hindex == size) { hindex = 0; + } } return FALSE; @@ -248,18 +273,19 @@ ipc_hash_table_lookup( void ipc_hash_table_insert( - ipc_entry_t table, - ipc_entry_num_t size, - ipc_object_t obj, - mach_port_index_t index, - __assert_only ipc_entry_t entry) + ipc_entry_t table, + ipc_entry_num_t size, + ipc_object_t obj, + mach_port_index_t index, + __assert_only ipc_entry_t entry) { - mach_port_index_t hindex; + mach_port_index_t hindex, hdist; assert(index != 0); assert(obj != IO_NULL); hindex = IH_TABLE_HASH(obj, size); + hdist = 0; assert(entry == &table[index]); assert(entry->ie_object == obj); @@ -268,14 +294,30 @@ ipc_hash_table_insert( * We want to insert at hindex, but there may be collisions. * If a collision occurs, search for the end of the clump * and insert there. + * + * However, Robin Hood steals from the rich, and as we go + * through the clump, if we go over an item that is less + * displaced than we'd be, we steal his slot and + * keep inserting him in our stead. */ - while (table[hindex].ie_index != 0) { - if (++hindex == size) + if (table[hindex].ie_dist < hdist) { +#define swap(a, b) ({ typeof(a) _tmp = (b); (b) = (a); (a) = _tmp; }) + swap(hdist, table[hindex].ie_dist); + swap(index, table[hindex].ie_index); +#undef swap + } + if (hdist < IPC_ENTRY_DIST_MAX) { + /* peg the displacement distance at IPC_ENTRY_DIST_MAX */ + ++hdist; + } + if (++hindex == size) { hindex = 0; + } } table[hindex].ie_index = index; + table[hindex].ie_dist = hdist; } /* @@ -288,13 +330,13 @@ ipc_hash_table_insert( void ipc_hash_table_delete( - ipc_entry_t table, - ipc_entry_num_t size, - ipc_object_t obj, - mach_port_index_t index, - __assert_only ipc_entry_t entry) + ipc_entry_t table, + ipc_entry_num_t size, + ipc_object_t obj, + mach_port_index_t index, + __assert_only ipc_entry_t entry) { - mach_port_index_t hindex, dindex; + mach_port_index_t hindex, dindex, dist; assert(index != MACH_PORT_NULL); assert(obj != IO_NULL); @@ -311,8 +353,9 @@ ipc_hash_table_delete( */ while (table[hindex].ie_index != index) { - if (++hindex == size) + if (++hindex == size) { hindex = 0; + } } /* @@ -334,34 +377,47 @@ ipc_hash_table_delete( * until we get to the end of the clump. */ - for (dindex = hindex; index != 0; hindex = dindex) { - for (;;) { - mach_port_index_t tindex; - ipc_object_t tobj; - - if (++dindex == size) - dindex = 0; - assert(dindex != hindex); - - /* are we at the end of the clump? */ - - index = table[dindex].ie_index; - if (index == 0) - break; - - /* is this a displaced object? */ + for (;;) { + dindex = hindex + 1; + if (dindex == size) { + dindex = 0; + } - tobj = table[index].ie_object; - assert(tobj != IO_NULL); - tindex = IH_TABLE_HASH(tobj, size); + /* + * If the next element is empty or isn't displaced, + * then lookup will end on the next element anyway, + * so we can leave the hole right here, we're done + */ + index = table[dindex].ie_index; + dist = table[dindex].ie_dist; + if (index == 0 || dist == 0) { + table[hindex].ie_index = 0; + table[hindex].ie_dist = 0; + return; + } - if ((dindex < hindex) ? - ((dindex < tindex) && (tindex <= hindex)) : - ((dindex < tindex) || (tindex <= hindex))) - break; + /* + * Move this object closer to its own slot by occupying the hole. + * If its displacement was pegged, recompute it. + */ + if (dist-- == IPC_ENTRY_DIST_MAX) { + uint32_t desired = IH_TABLE_HASH(table[index].ie_object, size); + if (hindex >= desired) { + dist = hindex - desired; + } else { + dist = hindex + size - desired; + } + if (dist > IPC_ENTRY_DIST_MAX) { + dist = IPC_ENTRY_DIST_MAX; + } } + /* + * Move the displaced element closer to its ideal bucket, + * and keep shifting elements back. + */ table[hindex].ie_index = index; + table[hindex].ie_dist = dist; + hindex = dindex; } } - diff --git a/osfmk/ipc/ipc_hash.h b/osfmk/ipc/ipc_hash.h index b0249f0fb..06ee65840 100644 --- a/osfmk/ipc/ipc_hash.h +++ b/osfmk/ipc/ipc_hash.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Declarations of entry hash table operations. */ -#ifndef _IPC_IPC_HASH_H_ +#ifndef _IPC_IPC_HASH_H_ #define _IPC_IPC_HASH_H_ #include @@ -77,24 +77,24 @@ /* Lookup (space, obj) in the appropriate reverse hash table */ extern boolean_t ipc_hash_lookup( - ipc_space_t space, - ipc_object_t obj, - mach_port_name_t *namep, - ipc_entry_t *entryp); + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp); /* Insert an entry into the appropriate reverse hash table */ extern void ipc_hash_insert( - ipc_space_t space, - ipc_object_t obj, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry); /* Delete an entry from the appropriate reverse hash table */ extern void ipc_hash_delete( - ipc_space_t space, - ipc_object_t obj, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry); /* * For use by functions that know what they're doing: @@ -103,38 +103,38 @@ extern void ipc_hash_delete( /* Lookup (space, obj) in local hash table */ extern boolean_t ipc_hash_table_lookup( - ipc_entry_t table, - ipc_entry_num_t size, - ipc_object_t obj, - mach_port_name_t *namep, - ipc_entry_t *entryp); + ipc_entry_t table, + ipc_entry_num_t size, + ipc_object_t obj, + mach_port_name_t *namep, + ipc_entry_t *entryp); /* Inserts an entry into the local reverse hash table */ extern void ipc_hash_table_insert( - ipc_entry_t table, - ipc_entry_num_t size, - ipc_object_t obj, - mach_port_index_t index, - ipc_entry_t entry); + ipc_entry_t table, + ipc_entry_num_t size, + ipc_object_t obj, + mach_port_index_t index, + ipc_entry_t entry); /* Delete an entry from the appropriate reverse hash table */ extern void ipc_hash_table_delete( - ipc_entry_t table, - ipc_entry_num_t size, - ipc_object_t obj, - mach_port_name_t name, - ipc_entry_t entry); + ipc_entry_t table, + ipc_entry_num_t size, + ipc_object_t obj, + mach_port_name_t name, + ipc_entry_t entry); #include -#if MACH_IPC_DEBUG +#if MACH_IPC_DEBUG #include extern natural_t ipc_hash_info( - hash_info_bucket_t *info, + hash_info_bucket_t *info, natural_t count); -#endif /* MACH_IPC_DEBUG */ +#endif /* MACH_IPC_DEBUG */ -#endif /* _IPC_IPC_HASH_H_ */ +#endif /* _IPC_IPC_HASH_H_ */ diff --git a/osfmk/ipc/ipc_importance.c b/osfmk/ipc/ipc_importance.c index 7ab4eb355..86d03a586 100644 --- a/osfmk/ipc/ipc_importance.c +++ b/osfmk/ipc/ipc_importance.c @@ -47,10 +47,10 @@ #include #include -extern int proc_pid(void *); -extern int proc_selfpid(void); +extern int proc_pid(void *); +extern int proc_selfpid(void); extern uint64_t proc_uniqueid(void *p); -extern char *proc_name_address(void *p); +extern char *proc_name_address(void *p); /* * Globals for delayed boost drop processing. @@ -71,18 +71,18 @@ static boolean_t ipc_importance_delayed_drop_call_requested = FALSE; * Importance Voucher Attribute Manager */ -static lck_spin_t ipc_importance_lock_data; /* single lock for now */ +static lck_spin_t ipc_importance_lock_data; /* single lock for now */ #define ipc_importance_lock_init() \ lck_spin_init(&ipc_importance_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define ipc_importance_lock_destroy() \ lck_spin_destroy(&ipc_importance_lock_data, &ipc_lck_grp) -#define ipc_importance_lock() \ - lck_spin_lock(&ipc_importance_lock_data) -#define ipc_importance_lock_try() \ - lck_spin_try_lock(&ipc_importance_lock_data) -#define ipc_importance_unlock() \ +#define ipc_importance_lock() \ + lck_spin_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp) +#define ipc_importance_lock_try() \ + lck_spin_try_lock_grp(&ipc_importance_lock_data, &ipc_lck_grp) +#define ipc_importance_unlock() \ lck_spin_unlock(&ipc_importance_lock_data) #define ipc_importance_assert_held() \ lck_spin_assert(&ipc_importance_lock_data, LCK_ASSERT_OWNED) @@ -91,43 +91,47 @@ static lck_spin_t ipc_importance_lock_data; /* single lock for now */ #define incr_ref_counter(x) (hw_atomic_add(&(x), 1)) static inline -uint32_t ipc_importance_reference_internal(ipc_importance_elem_t elem) +uint32_t +ipc_importance_reference_internal(ipc_importance_elem_t elem) { incr_ref_counter(elem->iie_refs_added); - return (hw_atomic_add(&elem->iie_bits, 1) & IIE_REFS_MASK); + return hw_atomic_add(&elem->iie_bits, 1) & IIE_REFS_MASK; } static inline -uint32_t ipc_importance_release_internal(ipc_importance_elem_t elem) +uint32_t +ipc_importance_release_internal(ipc_importance_elem_t elem) { incr_ref_counter(elem->iie_refs_dropped); - return (hw_atomic_sub(&elem->iie_bits, 1) & IIE_REFS_MASK); + return hw_atomic_sub(&elem->iie_bits, 1) & IIE_REFS_MASK; } -static inline -uint32_t ipc_importance_task_reference_internal(ipc_importance_task_t task_imp) +static inline +uint32_t +ipc_importance_task_reference_internal(ipc_importance_task_t task_imp) { - uint32_t out; - out = ipc_importance_reference_internal(&task_imp->iit_elem); - incr_ref_counter(task_imp->iit_elem.iie_task_refs_added); - return out; + uint32_t out; + out = ipc_importance_reference_internal(&task_imp->iit_elem); + incr_ref_counter(task_imp->iit_elem.iie_task_refs_added); + return out; } static inline -uint32_t ipc_importance_task_release_internal(ipc_importance_task_t task_imp) +uint32_t +ipc_importance_task_release_internal(ipc_importance_task_t task_imp) { - uint32_t out; + uint32_t out; - assert(1 < IIT_REFS(task_imp)); - incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped); - out = ipc_importance_release_internal(&task_imp->iit_elem); - return out; + assert(1 < IIT_REFS(task_imp)); + incr_ref_counter(task_imp->iit_elem.iie_task_refs_dropped); + out = ipc_importance_release_internal(&task_imp->iit_elem); + return out; } static inline -void ipc_importance_counter_init(ipc_importance_elem_t elem) +void +ipc_importance_counter_init(ipc_importance_elem_t elem) { - elem->iie_refs_added = 0; elem->iie_refs_dropped = 0; elem->iie_kmsg_refs_added = 0; @@ -159,10 +163,10 @@ static zone_t ipc_importance_inherit_zone; static ipc_voucher_attr_control_t ipc_importance_control; static boolean_t ipc_importance_task_check_transition(ipc_importance_task_t task_imp, - iit_update_type_t type, uint32_t delta); + iit_update_type_t type, uint32_t delta); static void ipc_importance_task_propagate_assertion_locked(ipc_importance_task_t task_imp, - iit_update_type_t type, boolean_t update_task_imp); + iit_update_type_t type, boolean_t update_task_imp); static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_task, task_t to_task); @@ -179,16 +183,16 @@ static ipc_importance_inherit_t ipc_importance_inherit_from_task(task_t from_tas */ static void ipc_importance_kmsg_link( - ipc_kmsg_t kmsg, - ipc_importance_elem_t elem) + ipc_kmsg_t kmsg, + ipc_importance_elem_t elem) { ipc_importance_elem_t link_elem; assert(IIE_NULL == kmsg->ikm_importance); link_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? - (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : - elem; + (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : + elem; queue_enter(&link_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance); kmsg->ikm_importance = elem; @@ -209,7 +213,7 @@ ipc_importance_kmsg_link( */ static ipc_importance_elem_t ipc_importance_kmsg_unlink( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { ipc_importance_elem_t elem = kmsg->ikm_importance; @@ -217,8 +221,8 @@ ipc_importance_kmsg_unlink( ipc_importance_elem_t unlink_elem; unlink_elem = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? - (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : - elem; + (ipc_importance_elem_t)((ipc_importance_inherit_t)elem)->iii_to_task : + elem; queue_remove(&unlink_elem->iie_kmsgs, kmsg, ipc_kmsg_t, ikm_inheritance); kmsg->ikm_importance = IIE_NULL; @@ -246,11 +250,11 @@ ipc_importance_inherit_link( assert(IIE_NULL == inherit->iii_from_elem); link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? - ((ipc_importance_inherit_t)elem)->iii_to_task : - (ipc_importance_task_t)elem; + ((ipc_importance_inherit_t)elem)->iii_to_task : + (ipc_importance_task_t)elem; - queue_enter(&link_task->iit_inherits, inherit, - ipc_importance_inherit_t, iii_inheritance); + queue_enter(&link_task->iit_inherits, inherit, + ipc_importance_inherit_t, iii_inheritance); inherit->iii_from_elem = elem; } @@ -277,11 +281,11 @@ ipc_importance_inherit_find( ipc_importance_inherit_t inherit; link_task = (IIE_TYPE_INHERIT == IIE_TYPE(from)) ? - ((ipc_importance_inherit_t)from)->iii_to_task : - (ipc_importance_task_t)from; + ((ipc_importance_inherit_t)from)->iii_to_task : + (ipc_importance_task_t)from; queue_iterate(&link_task->iit_inherits, inherit, - ipc_importance_inherit_t, iii_inheritance) { + ipc_importance_inherit_t, iii_inheritance) { if (inherit->iii_to_task == to_task && inherit->iii_depth == depth) { return inherit; } @@ -312,11 +316,11 @@ ipc_importance_inherit_unlink( ipc_importance_task_t unlink_task; unlink_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? - ((ipc_importance_inherit_t)elem)->iii_to_task : - (ipc_importance_task_t)elem; + ((ipc_importance_inherit_t)elem)->iii_to_task : + (ipc_importance_task_t)elem; - queue_remove(&unlink_task->iit_inherits, inherit, - ipc_importance_inherit_t, iii_inheritance); + queue_remove(&unlink_task->iit_inherits, inherit, + ipc_importance_inherit_t, iii_inheritance); inherit->iii_from_elem = IIE_NULL; } return elem; @@ -355,22 +359,26 @@ ipc_importance_release_locked(ipc_importance_elem_t elem) ipc_kmsg_t temp_kmsg; uint32_t expected = 0; - if (0 < elem->iie_made) + if (0 < elem->iie_made) { expected++; + } link_task = (IIE_TYPE_INHERIT == IIE_TYPE(elem)) ? - ((ipc_importance_inherit_t)elem)->iii_to_task : - (ipc_importance_task_t)elem; + ((ipc_importance_inherit_t)elem)->iii_to_task : + (ipc_importance_task_t)elem; queue_iterate(&link_task->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) - if (temp_kmsg->ikm_importance == elem) - expected++; + if (temp_kmsg->ikm_importance == elem) { + expected++; + } queue_iterate(&link_task->iit_inherits, temp_inherit, - ipc_importance_inherit_t, iii_inheritance) - if (temp_inherit->iii_from_elem == elem) - expected++; - if (IIE_REFS(elem) < expected + 1) + ipc_importance_inherit_t, iii_inheritance) + if (temp_inherit->iii_from_elem == elem) { + expected++; + } + if (IIE_REFS(elem) < expected + 1) { panic("ipc_importance_release_locked (%p)", elem); + } #endif /* IMPORTANCE_DEBUG */ if (0 < ipc_importance_release_internal(elem)) { @@ -381,7 +389,6 @@ ipc_importance_release_locked(ipc_importance_elem_t elem) /* last ref */ switch (IIE_TYPE(elem)) { - /* just a "from" task reference to drop */ case IIE_TYPE_TASK: { @@ -480,8 +487,9 @@ ipc_importance_release_locked(ipc_importance_elem_t elem) void ipc_importance_release(ipc_importance_elem_t elem) { - if (IIE_NULL == elem) + if (IIE_NULL == elem) { return; + } ipc_importance_lock(); ipc_importance_release_locked(elem); @@ -490,8 +498,8 @@ ipc_importance_release(ipc_importance_elem_t elem) /* * Routine: ipc_importance_task_reference - - + * + * * Purpose: * Retain a reference on a task importance attribute value. * Conditions: @@ -501,8 +509,9 @@ ipc_importance_release(ipc_importance_elem_t elem) void ipc_importance_task_reference(ipc_importance_task_t task_elem) { - if (IIT_NULL == task_elem) + if (IIT_NULL == task_elem) { return; + } #if IIE_REF_DEBUG incr_ref_counter(task_elem->iit_elem.iie_task_refs_added); #endif @@ -521,8 +530,9 @@ ipc_importance_task_reference(ipc_importance_task_t task_elem) void ipc_importance_task_release(ipc_importance_task_t task_elem) { - if (IIT_NULL == task_elem) + if (IIT_NULL == task_elem) { return; + } ipc_importance_lock(); #if IIE_REF_DEBUG @@ -595,14 +605,15 @@ ipc_importance_task_check_transition( ipc_importance_assert_held(); - if (!ipc_importance_task_is_any_receiver_type(task_imp)) + if (!ipc_importance_task_is_any_receiver_type(task_imp)) { return FALSE; + } #if IMPORTANCE_TRACE int target_pid = task_pid(target_task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_START, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); #endif /* snapshot the effective boosting status before making any changes */ @@ -612,11 +623,11 @@ ipc_importance_task_check_transition( if (boost) { task_imp->iit_assertcnt += delta; #if IMPORTANCE_TRACE - DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid, - task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt); + DTRACE_BOOST6(send_boost, task_t, target_task, int, target_pid, + task_t, current_task(), int, proc_selfpid(), int, delta, int, task_imp->iit_assertcnt); #endif } else { - // assert(delta <= task_imp->iit_assertcnt); + // assert(delta <= task_imp->iit_assertcnt); if (task_imp->iit_assertcnt < delta + IIT_EXTERN(task_imp)) { /* TODO: Turn this back into a panic */ task_imp->iit_assertcnt = IIT_EXTERN(task_imp); @@ -631,17 +642,16 @@ ipc_importance_task_check_transition( #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (((boost) ? IMP_HOLD : IMP_DROP) | TASK_POLICY_INTERNAL))) | DBG_FUNC_END, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); #endif /* did the change result in an effective donor status change? */ after_boosted = (task_imp->iit_assertcnt > 0); if (after_boosted != before_boosted) { - - /* + /* * If the task importance is already on an update queue, we just reversed the need for a - * pending policy update. If the queue is any other than the delayed-drop-queue, pull it + * pending policy update. If the queue is any other than the delayed-drop-queue, pull it * off that queue and release the reference it got going onto the update queue. If it is * the delayed-drop-queue we leave it in place in case it comes back into the drop state * before its time delay is up. @@ -688,8 +698,8 @@ ipc_importance_task_propagate_helper( { ipc_importance_task_t temp_task_imp; - /* - * iterate the downstream kmsgs, adjust their boosts, + /* + * iterate the downstream kmsgs, adjust their boosts, * and capture the next task to adjust for each message */ @@ -752,7 +762,7 @@ ipc_importance_task_propagate_helper( ipc_importance_inherit_t temp_inherit; queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) { - uint32_t assertcnt = III_EXTERN(temp_inherit); + uint32_t assertcnt = III_EXTERN(temp_inherit); temp_task_imp = temp_inherit->iii_to_task; assert(IIT_NULL != temp_task_imp); @@ -773,7 +783,6 @@ ipc_importance_task_propagate_helper( temp_inherit->iii_donating = TRUE; temp_task_imp->iit_externcnt += temp_inherit->iii_externcnt; temp_task_imp->iit_externdrop += temp_inherit->iii_externdrop; - } else { /* if no contributing assertions, move on */ if (0 == assertcnt) { @@ -782,7 +791,7 @@ ipc_importance_task_propagate_helper( } /* nothing to do if the inherit is not donating */ - if (!temp_inherit->iii_donating) { + if (!temp_inherit->iii_donating) { continue; } @@ -795,7 +804,6 @@ ipc_importance_task_propagate_helper( assert(temp_task_imp->iit_externdrop >= temp_inherit->iii_externdrop); temp_task_imp->iit_externcnt -= temp_inherit->iii_externcnt; temp_task_imp->iit_externdrop -= temp_inherit->iii_externdrop; - } /* Adjust the task assertions and determine if an edge was crossed */ @@ -804,7 +812,7 @@ ipc_importance_task_propagate_helper( ipc_importance_task_reference(temp_task_imp); incr_ref_counter(temp_task_imp->iit_elem.iie_task_refs_added_transition); queue_enter(propagation, temp_task_imp, ipc_importance_task_t, iit_props); - } + } } } @@ -836,8 +844,8 @@ ipc_importance_task_process_updates( queue_init(&second_chance); /* process any resulting policy updates */ - retry: - while(!queue_empty(queue)) { +retry: + while (!queue_empty(queue)) { task_t target_task; struct task_pend_token pend_token = {}; @@ -875,14 +883,14 @@ ipc_importance_task_process_updates( continue; } - /* - * Can we get the task lock out-of-order? + /* + * Can we get the task lock out-of-order? * If not, stick this back on the second-chance queue. */ if (!task_lock_try(target_task)) { boolean_t should_wait_lock = (queue == &second_chance); task_imp->iit_updateq = &second_chance; - + /* * If we're already processing second-chances on * tasks, keep this task on the front of the queue. @@ -891,16 +899,16 @@ ipc_importance_task_process_updates( * chance of re-acquiring the lock if we come back * to it right away. */ - if (should_wait_lock){ + if (should_wait_lock) { task_reference(target_task); queue_enter_first(&second_chance, task_imp, - ipc_importance_task_t, iit_updates); + ipc_importance_task_t, iit_updates); } else { - queue_enter(&second_chance, task_imp, - ipc_importance_task_t, iit_updates); + queue_enter(&second_chance, task_imp, + ipc_importance_task_t, iit_updates); } ipc_importance_unlock(); - + if (should_wait_lock) { task_lock(target_task); task_unlock(target_task); @@ -924,8 +932,9 @@ ipc_importance_task_process_updates( task_reference(target_task); /* count the transition */ - if (boost) + if (boost) { task_imp->iit_transitions++; + } ipc_importance_unlock(); @@ -963,14 +972,14 @@ ipc_importance_task_process_updates( static void ipc_importance_task_delayed_drop_scan( __unused void *arg1, - __unused void *arg2) + __unused void *arg2) { ipc_importance_lock(); /* process all queued task drops with timestamps up to TARGET(first)+SKEW */ - ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue, - FALSE, - ipc_importance_delayed_drop_timestamp); + ipc_importance_task_process_updates(&ipc_importance_delayed_drop_queue, + FALSE, + ipc_importance_delayed_drop_timestamp); /* importance lock may have been temporarily dropped */ @@ -989,11 +998,11 @@ ipc_importance_task_delayed_drop_scan( nanoseconds_to_absolutetime(DENAP_DROP_LEEWAY, &leeway); thread_call_enter_delayed_with_leeway( - ipc_importance_delayed_drop_call, - NULL, - deadline, - leeway, - DENAP_DROP_FLAGS); + ipc_importance_delayed_drop_call, + NULL, + deadline, + leeway, + DENAP_DROP_FLAGS); } else { ipc_importance_delayed_drop_call_requested = FALSE; } @@ -1021,8 +1030,8 @@ ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp) * a new reference for the delay drop update queue. */ if (NULL != task_imp->iit_updateq) { - queue_remove(task_imp->iit_updateq, task_imp, - ipc_importance_task_t, iit_updates); + queue_remove(task_imp->iit_updateq, task_imp, + ipc_importance_task_t, iit_updates); } else { ipc_importance_task_reference_internal(task_imp); } @@ -1030,8 +1039,8 @@ ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp) task_imp->iit_updateq = &ipc_importance_delayed_drop_queue; task_imp->iit_updatetime = timestamp; - queue_enter(&ipc_importance_delayed_drop_queue, task_imp, - ipc_importance_task_t, iit_updates); + queue_enter(&ipc_importance_delayed_drop_queue, task_imp, + ipc_importance_task_t, iit_updates); /* request the delayed thread-call if not already requested */ if (!ipc_importance_delayed_drop_call_requested) { @@ -1046,11 +1055,11 @@ ipc_importance_task_delayed_drop(ipc_importance_task_t task_imp) ipc_importance_delayed_drop_call_requested = TRUE; thread_call_enter_delayed_with_leeway( - ipc_importance_delayed_drop_call, - NULL, - deadline, - leeway, - DENAP_DROP_FLAGS); + ipc_importance_delayed_drop_call, + NULL, + deadline, + leeway, + DENAP_DROP_FLAGS); } } @@ -1116,7 +1125,6 @@ ipc_importance_task_propagate_assertion_locked( temp_task_imp->iit_updatepolicy = 0; if (need_update && TASK_NULL != temp_task_imp->iit_task) { if (NULL == temp_task_imp->iit_updateq) { - /* * If a downstream task that needs an update is subjects to AppNap, * drop boosts according to the delay hysteresis. Otherwise, @@ -1132,17 +1140,17 @@ ipc_importance_task_propagate_assertion_locked( ipc_importance_task_reference_internal(temp_task_imp); if (boost) { queue_enter(&updates, temp_task_imp, - ipc_importance_task_t, iit_updates); + ipc_importance_task_t, iit_updates); } else { queue_enter_first(&updates, temp_task_imp, - ipc_importance_task_t, iit_updates); + ipc_importance_task_t, iit_updates); } } } else { /* Must already be on the AppNap hysteresis queue */ assert(ipc_importance_delayed_drop_call != NULL); assert(ipc_importance_task_is_marked_denap_receiver(temp_task_imp)); - } + } } ipc_importance_task_release_internal(temp_task_imp); @@ -1336,7 +1344,7 @@ ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_im int target_pid = task_pid(target_task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); #endif if (IIT_LEGACY_EXTERN(task_imp) == 0) { @@ -1360,20 +1368,20 @@ ipc_importance_task_hold_legacy_external_assertion(ipc_importance_task_t task_im #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_HOLD | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); - // This covers the legacy case where a task takes an extra boost. + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); + // This covers the legacy case where a task takes an extra boost. DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, proc_selfpid(), int, count, int, task_imp->iit_assertcnt); #endif if (KERN_FAILURE == ret && target_task != TASK_NULL) { printf("BUG in process %s[%d]: " - "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. " - "(%d total, %d external, %d legacy-external)\n", - proc_name_address(target_task->bsd_info), task_pid(target_task), - target_assertcnt, target_externcnt, target_legacycnt); + "attempt to acquire an additional legacy external boost assertion without holding an existing legacy external assertion. " + "(%d total, %d external, %d legacy-external)\n", + proc_name_address(target_task->bsd_info), task_pid(target_task), + target_assertcnt, target_externcnt, target_legacycnt); } - return(ret); + return ret; } /* @@ -1402,7 +1410,7 @@ ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_im if (count > 1) { return KERN_INVALID_ARGUMENT; } - + ipc_importance_lock(); target_task = task_imp->iit_task; @@ -1410,7 +1418,7 @@ ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_im int target_pid = task_pid(target_task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_START, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); #endif if (count > IIT_LEGACY_EXTERN(task_imp)) { @@ -1421,7 +1429,7 @@ ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_im target_legacycnt = IIT_LEGACY_EXTERN(task_imp); ret = KERN_FAILURE; } else { - /* + /* * decrement legacy external count from the top level and reflect * into internal for this and all subsequent updates. */ @@ -1443,7 +1451,7 @@ ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_im task_imp->iit_legacy_externcnt = 0; task_imp->iit_legacy_externdrop = 0; } - + /* reflect the drop to the internal assertion count (and effect any importance change) */ if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_DROP, count)) { ipc_importance_task_propagate_assertion_locked(task_imp, IIT_UPDATE_DROP, TRUE); @@ -1452,8 +1460,8 @@ ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_im } #if IMPORTANCE_TRACE - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, (IMP_DROP | TASK_POLICY_EXTERNAL))) | DBG_FUNC_END, + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); #endif ipc_importance_unlock(); @@ -1461,11 +1469,11 @@ ipc_importance_task_drop_legacy_external_assertion(ipc_importance_task_t task_im /* delayed printf for user-supplied data failures */ if (KERN_FAILURE == ret && TASK_NULL != target_task) { printf("BUG in process %s[%d]: over-released legacy external boost assertions (%d total, %d external, %d legacy-external)\n", - proc_name_address(target_task->bsd_info), task_pid(target_task), - target_assertcnt, target_externcnt, target_legacycnt); + proc_name_address(target_task->bsd_info), task_pid(target_task), + target_assertcnt, target_externcnt, target_legacycnt); } - return(ret); + return ret; } @@ -1488,7 +1496,7 @@ ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, int target_pid = task_pid(target_task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_START, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_EXTERN(task_imp), 0); #endif ipc_importance_lock(); @@ -1500,12 +1508,12 @@ ipc_importance_task_externalize_legacy_assertion(ipc_importance_task_t task_imp, #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN)) | DBG_FUNC_END, - proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); - // This is the legacy boosting path + proc_selfpid(), target_pid, task_imp->iit_assertcnt, IIT_LEGACY_EXTERN(task_imp), 0); + // This is the legacy boosting path DTRACE_BOOST5(receive_boost, task_t, target_task, int, target_pid, int, sender_pid, int, count, int, IIT_LEGACY_EXTERN(task_imp)); #endif /* IMPORTANCE_TRACE */ - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1524,7 +1532,7 @@ ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp) boolean_t before_donor; boolean_t after_donor; task_t target_task; - + assert(task_imp != NULL); /* @@ -1552,9 +1560,9 @@ ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp) int target_pid = task_pid(target_task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START, - target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0); -#endif + (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_START, + target_pid, task_imp->iit_donor, task_live_donor, before_donor, 0); +#endif /* update the task importance live donor status based on the task's value */ task_imp->iit_donor = task_live_donor; @@ -1577,8 +1585,8 @@ ipc_importance_task_update_live_donor(ipc_importance_task_t task_imp) #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END, - target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0); + (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_UPDATE_LIVE_DONOR_STATE)) | DBG_FUNC_END, + target_pid, task_imp->iit_donor, task_live_donor, after_donor, 0); #endif ipc_importance_unlock(); @@ -1606,14 +1614,15 @@ ipc_importance_task_mark_donor(ipc_importance_task_t task_imp, boolean_t donatin task_imp->iit_donor = (donating ? 1 : 0); - if (task_imp->iit_donor > 0 && old_donor == 0) + if (task_imp->iit_donor > 0 && old_donor == 0) { task_imp->iit_transitions++; + } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE, - task_pid(task_imp->iit_task), donating, - old_donor, task_imp->iit_donor, 0); - + (IMPORTANCE_CODE(IMP_DONOR_CHANGE, IMP_DONOR_INIT_DONOR_STATE)) | DBG_FUNC_NONE, + task_pid(task_imp->iit_task), donating, + old_donor, task_imp->iit_donor, 0); + ipc_importance_unlock(); } @@ -1630,9 +1639,9 @@ boolean_t ipc_importance_task_is_marked_donor(ipc_importance_task_t task_imp) { if (IIT_NULL == task_imp) { - return FALSE; + return FALSE; } - return (0 != task_imp->iit_donor); + return 0 != task_imp->iit_donor; } /* @@ -1667,9 +1676,9 @@ boolean_t ipc_importance_task_is_marked_live_donor(ipc_importance_task_t task_imp) { if (IIT_NULL == task_imp) { - return FALSE; + return FALSE; } - return (0 != task_imp->iit_live_donor); + return 0 != task_imp->iit_live_donor; } /* @@ -1685,11 +1694,11 @@ boolean_t ipc_importance_task_is_donor(ipc_importance_task_t task_imp) { if (IIT_NULL == task_imp) { - return FALSE; + return FALSE; } - return (ipc_importance_task_is_marked_donor(task_imp) || - (ipc_importance_task_is_marked_receiver(task_imp) && - task_imp->iit_assertcnt > 0)); + return ipc_importance_task_is_marked_donor(task_imp) || + (ipc_importance_task_is_marked_receiver(task_imp) && + task_imp->iit_assertcnt > 0); } /* @@ -1698,17 +1707,17 @@ ipc_importance_task_is_donor(ipc_importance_task_t task_imp) * Query if a given task can ever donate importance. * Conditions: * May be called without taking the importance lock. - * Condition is permanent for a give task. + * Condition is permanent for a give task. */ boolean_t ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp) { if (IIT_NULL == task_imp) { - return FALSE; + return FALSE; } - return (!ipc_importance_task_is_marked_donor(task_imp) && - !ipc_importance_task_is_marked_live_donor(task_imp) && - !ipc_importance_task_is_marked_receiver(task_imp)); + return !ipc_importance_task_is_marked_donor(task_imp) && + !ipc_importance_task_is_marked_live_donor(task_imp) && + !ipc_importance_task_is_marked_receiver(task_imp); } /* @@ -1723,7 +1732,7 @@ ipc_importance_task_is_never_donor(ipc_importance_task_t task_imp) void ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t receiving) { - assert(task_imp != NULL); + assert(task_imp != NULL); ipc_importance_lock(); if (receiving) { @@ -1754,7 +1763,7 @@ ipc_importance_task_mark_receiver(ipc_importance_task_t task_imp, boolean_t rece boolean_t ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp) { - return (IIT_NULL != task_imp && 0 != task_imp->iit_receiver); + return IIT_NULL != task_imp && 0 != task_imp->iit_receiver; } @@ -1770,7 +1779,7 @@ ipc_importance_task_is_marked_receiver(ipc_importance_task_t task_imp) void ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_t denap) { - assert(task_imp != NULL); + assert(task_imp != NULL); ipc_importance_lock(); if (denap) { @@ -1800,7 +1809,7 @@ ipc_importance_task_mark_denap_receiver(ipc_importance_task_t task_imp, boolean_ boolean_t ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp) { - return (IIT_NULL != task_imp && 0 != task_imp->iit_denap); + return IIT_NULL != task_imp && 0 != task_imp->iit_denap; } /* @@ -1815,7 +1824,7 @@ ipc_importance_task_is_marked_denap_receiver(ipc_importance_task_t task_imp) boolean_t ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp) { - return (ipc_importance_task_is_marked_denap_receiver(task_imp)); + return ipc_importance_task_is_marked_denap_receiver(task_imp); } /* @@ -1831,8 +1840,8 @@ ipc_importance_task_is_denap_receiver(ipc_importance_task_t task_imp) boolean_t ipc_importance_task_is_any_receiver_type(ipc_importance_task_t task_imp) { - return (ipc_importance_task_is_marked_receiver(task_imp) || - ipc_importance_task_is_marked_denap_receiver(task_imp)); + return ipc_importance_task_is_marked_receiver(task_imp) || + ipc_importance_task_is_marked_denap_receiver(task_imp); } #if 0 /* currently unused */ @@ -1878,11 +1887,12 @@ ipc_importance_inherit_release_locked(ipc_importance_inherit_t inherit) void ipc_importance_inherit_release(ipc_importance_inherit_t inherit) { - if (III_NULL != inherit) + if (III_NULL != inherit) { ipc_importance_release(&inherit->iii_elem); + } } #endif /* 0 currently unused */ - + /* * Routine: ipc_importance_for_task * Purpose: @@ -1904,10 +1914,11 @@ ipc_importance_for_task(task_t task, boolean_t made) assert(TASK_NULL != task); - retry: +retry: /* No use returning anything for inactive task */ - if (!task->active) + if (!task->active) { return IIT_NULL; + } ipc_importance_lock(); task_elem = task->task_imp_base; @@ -1927,14 +1938,16 @@ ipc_importance_for_task(task_t task, boolean_t made) } ipc_importance_unlock(); - if (!first_pass) + if (!first_pass) { return IIT_NULL; + } first_pass = FALSE; /* Need to make one - may race with others (be prepared to drop) */ task_elem = (ipc_importance_task_t)zalloc(ipc_importance_task_zone); - if (IIT_NULL == task_elem) + if (IIT_NULL == task_elem) { goto retry; + } task_elem->iit_bits = IIE_TYPE_TASK | 2; /* one for task, one for return/made */ task_elem->iit_made = (made) ? 1 : 0; @@ -1987,8 +2000,9 @@ ipc_importance_for_task(task_t task, boolean_t made) } #if DEVELOPMENT || DEBUG -void task_importance_update_owner_info(task_t task) { - +void +task_importance_update_owner_info(task_t task) +{ if (task != TASK_NULL && task->task_imp_base != IIT_NULL) { ipc_importance_task_t task_elem = task->task_imp_base; @@ -2042,8 +2056,8 @@ ipc_importance_reset_locked(ipc_importance_task_t task_imp, boolean_t donor) #if DEVELOPMENT || DEBUG if (task_imp->iit_assertcnt > 0 && task_imp->iit_live_donor) { - printf("Live donor task %s[%d] still has %d importance assertions after reset\n", - task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt); + printf("Live donor task %s[%d] still has %d importance assertions after reset\n", + task_imp->iit_procname, task_imp->iit_bsd_pid, task_imp->iit_assertcnt); } #endif @@ -2112,7 +2126,7 @@ ipc_importance_disconnect_task(task_t task) task_imp->iit_task = TASK_NULL; task->task_imp_base = IIT_NULL; task_unlock(task); - + /* reset the effects the current task hold on the importance */ ipc_importance_reset_locked(task_imp, TRUE); @@ -2198,8 +2212,8 @@ ipc_importance_exec_switch_task( boolean_t ipc_importance_check_circularity( - ipc_port_t port, - ipc_port_t dest) + ipc_port_t port, + ipc_port_t dest) { ipc_importance_task_t imp_task = IIT_NULL; ipc_importance_task_t release_imp_task = IIT_NULL; @@ -2211,8 +2225,9 @@ ipc_importance_check_circularity( assert(port != IP_NULL); assert(dest != IP_NULL); - if (port == dest) + if (port == dest) { return TRUE; + } base = dest; /* Check if destination needs a turnstile */ @@ -2230,7 +2245,7 @@ ipc_importance_check_circularity( */ ip_lock(port); - /* + /* * Even if port is just carrying assertions for others, * we need the importance lock. */ @@ -2246,8 +2261,9 @@ ipc_importance_check_circularity( if (ip_lock_try(dest)) { if (!ip_active(dest) || (dest->ip_receiver_name != MACH_PORT_NULL) || - (dest->ip_destination == IP_NULL)) + (dest->ip_destination == IP_NULL)) { goto not_circular; + } /* dest is in transit; further checking necessary */ @@ -2255,7 +2271,7 @@ ipc_importance_check_circularity( } ip_unlock(port); - /* + /* * We're about to pay the cost to serialize, * just go ahead and grab importance lock. */ @@ -2276,8 +2292,9 @@ ipc_importance_check_circularity( if (!ip_active(base) || (base->ip_receiver_name != MACH_PORT_NULL) || - (base->ip_destination == IP_NULL)) + (base->ip_destination == IP_NULL)) { break; + } base = base->ip_destination; } @@ -2309,8 +2326,9 @@ ipc_importance_check_circularity( base = next; } - if (imp_lock_held) + if (imp_lock_held) { ipc_importance_unlock(); + } ipc_port_send_turnstile_complete(dest); return TRUE; @@ -2360,11 +2378,11 @@ not_circular: */ if (port_send_turnstile(port)) { send_turnstile = turnstile_prepare((uintptr_t)port, - port_send_turnstile_address(port), - TURNSTILE_NULL, TURNSTILE_SYNC_IPC); + port_send_turnstile_address(port), + TURNSTILE_NULL, TURNSTILE_SYNC_IPC); turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest), - (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); /* update complete and turnstile complete called after dropping all locks */ } @@ -2375,13 +2393,13 @@ not_circular: ip_unlock(port); for (;;) { - ipc_port_t next; /* every port along chain track assertions behind it */ ipc_port_impcount_delta(dest, assertcnt, base); - if (dest == base) + if (dest == base) { break; + } /* port is in transit */ @@ -2397,8 +2415,8 @@ not_circular: /* base is not in transit */ assert(!ip_active(base) || - (base->ip_receiver_name != MACH_PORT_NULL) || - (base->ip_destination == IP_NULL)); + (base->ip_receiver_name != MACH_PORT_NULL) || + (base->ip_destination == IP_NULL)); /* * Find the task to boost (if any). @@ -2415,14 +2433,14 @@ not_circular: assert(ipc_importance_task_is_any_receiver_type(imp_task)); } /* otherwise don't boost current task */ - } else if (base->ip_receiver_name != MACH_PORT_NULL) { ipc_space_t space = base->ip_receiver; /* only spaces with boost-accepting tasks */ if (space->is_task != TASK_NULL && - ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) + ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) { imp_task = space->is_task->task_imp_base; + } } /* take reference before unlocking base */ @@ -2446,18 +2464,21 @@ not_circular: if (imp_task != IIT_NULL) { assert(imp_lock_held); - if (transfer_assertions) + if (transfer_assertions) { ipc_importance_task_hold_internal_assertion_locked(imp_task, assertcnt); + } } if (release_imp_task != IIT_NULL) { assert(imp_lock_held); - if (transfer_assertions) + if (transfer_assertions) { ipc_importance_task_drop_internal_assertion_locked(release_imp_task, assertcnt); + } } - if (imp_lock_held) + if (imp_lock_held) { ipc_importance_unlock(); + } /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */ if (send_turnstile) { @@ -2471,11 +2492,13 @@ not_circular: turnstile_cleanup(); } - if (imp_task != IIT_NULL) + if (imp_task != IIT_NULL) { ipc_importance_task_release(imp_task); + } - if (release_imp_task != IIT_NULL) + if (release_imp_task != IIT_NULL) { ipc_importance_task_release(release_imp_task); + } return FALSE; } @@ -2492,8 +2515,8 @@ not_circular: */ boolean_t ipc_importance_send( - ipc_kmsg_t kmsg, - mach_msg_option_t option) + ipc_kmsg_t kmsg, + mach_msg_option_t option) { ipc_port_t port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port; boolean_t port_lock_dropped = FALSE; @@ -2539,14 +2562,14 @@ ipc_importance_send( mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; mach_voucher_attr_value_handle_array_size_t val_count; ipc_voucher_t voucher; - + assert(ip_kotype(kmsg->ikm_voucher) == IKOT_VOUCHER); voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject; /* check to see if the voucher has an importance attribute */ val_count = MACH_VOUCHER_ATTR_VALUE_MAX_NESTED; kr = mach_voucher_attr_control_get_values(ipc_importance_control, voucher, - vals, &val_count); + vals, &val_count); assert(KERN_SUCCESS == kr); /* @@ -2596,8 +2619,9 @@ ipc_importance_send( ipc_importance_unlock(); /* re-acquire port lock, if needed */ - if (TRUE == port_lock_dropped) + if (TRUE == port_lock_dropped) { ip_lock(port); + } return port_lock_dropped; } @@ -2621,11 +2645,11 @@ portupdate: #if IMPORTANCE_TRACE if (kdebug_enable) { mach_msg_max_trailer_t *dbgtrailer = (mach_msg_max_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size)); + ((vm_offset_t)kmsg->ikm_header + round_msg(kmsg->ikm_header->msgh_size)); unsigned int sender_pid = dbgtrailer->msgh_audit.val[5]; mach_msg_id_t imp_msgh_id = kmsg->ikm_header->msgh_id; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_START, - task_pid(task), sender_pid, imp_msgh_id, 0, 0); + task_pid(task), sender_pid, imp_msgh_id, 0, 0); } #endif /* IMPORTANCE_TRACE */ @@ -2643,7 +2667,6 @@ portupdate: /* if this results in a change of state, propagate the transistion */ if (ipc_importance_task_check_transition(task_imp, IIT_UPDATE_HOLD, delta)) { - /* can't hold the port lock during task transition(s) */ if (!need_port_lock) { need_port_lock = TRUE; @@ -2667,7 +2690,7 @@ portupdate: return port_lock_dropped; } - + /* * Routine: ipc_importance_inherit_from_kmsg * Purpose: @@ -2685,10 +2708,10 @@ portupdate: static ipc_importance_inherit_t ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) { - ipc_importance_task_t task_imp = IIT_NULL; - ipc_importance_elem_t from_elem = kmsg->ikm_importance; - ipc_importance_elem_t elem; - task_t task_self = current_task(); + ipc_importance_task_t task_imp = IIT_NULL; + ipc_importance_elem_t from_elem = kmsg->ikm_importance; + ipc_importance_elem_t elem; + task_t task_self = current_task(); ipc_port_t port = kmsg->ikm_header->msgh_remote_port; ipc_importance_inherit_t inherit = III_NULL; @@ -2703,7 +2726,7 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) return III_NULL; } - /* + /* * No need to set up an inherit linkage if the dest isn't a receiver * of one type or the other. */ @@ -2736,7 +2759,6 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) cleared_self_donation = TRUE; } inherit = from_inherit; - } else if (III_DEPTH_MAX == III_DEPTH(from_inherit)) { ipc_importance_task_t to_task; ipc_importance_elem_t unlinked_from; @@ -2744,7 +2766,7 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) /* * Chain too long. Switch to looking * directly at the from_inherit's to-task - * as our source of importance. + * as our source of importance. */ to_task = from_inherit->iii_to_task; ipc_importance_task_reference(to_task); @@ -2758,14 +2780,13 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) ipc_importance_inherit_release_locked(from_inherit); /* importance unlocked */ ipc_importance_lock(); - } else { /* inheriting from an inherit */ depth = from_inherit->iii_depth + 1; } - } + } - /* + /* * Don't allow a task to inherit from itself (would keep it permanently * boosted even if all other donors to the task went away). */ @@ -2774,7 +2795,7 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) goto out_locked; } - /* + /* * But if the message isn't associated with any linked source, it is * intended to be permanently boosting (static boost from kernel). * In that case DO let the process permanently boost itself. @@ -2784,8 +2805,8 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) ipc_importance_task_reference_internal(task_imp); from_elem = (ipc_importance_elem_t)task_imp; } - - /* + + /* * Now that we have the from_elem figured out, * check to see if we already have an inherit for this pairing */ @@ -2801,7 +2822,7 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) /* allocate space */ ipc_importance_unlock(); alloc = (ipc_importance_inherit_t) - zalloc(ipc_importance_inherit_zone); + zalloc(ipc_importance_inherit_zone); ipc_importance_lock(); } } @@ -2866,8 +2887,8 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) #endif } - out_locked: - /* +out_locked: + /* * for those paths that came straight here: snapshot the donating status * (this should match previous snapshot for other paths). */ @@ -2906,8 +2927,9 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) ipc_importance_task_release(task_imp); } - if (III_NULL != alloc) + if (III_NULL != alloc) { zfree(ipc_importance_inherit_zone, alloc); + } } else { /* from_elem and task_imp references transferred to new inherit */ ipc_importance_unlock(); @@ -2918,7 +2940,7 @@ ipc_importance_inherit_from_kmsg(ipc_kmsg_t kmsg) * This is OK to do without the importance lock as we atomically * unlinked the kmsg and snapshot the donating state while holding * the importance lock - */ + */ if (donating || cleared_self_donation) { ip_lock(port); /* drop importance from port and destination task */ @@ -2957,9 +2979,9 @@ ipc_importance_inherit_from_task( task_t from_task, task_t to_task) { - ipc_importance_task_t to_task_imp = IIT_NULL; - ipc_importance_task_t from_task_imp = IIT_NULL; - ipc_importance_elem_t from_elem = IIE_NULL; + ipc_importance_task_t to_task_imp = IIT_NULL; + ipc_importance_task_t from_task_imp = IIT_NULL; + ipc_importance_elem_t from_elem = IIE_NULL; ipc_importance_inherit_t inherit = III_NULL; ipc_importance_inherit_t alloc = III_NULL; @@ -3009,7 +3031,7 @@ ipc_importance_inherit_from_task( /* allocate space */ ipc_importance_unlock(); alloc = (ipc_importance_inherit_t) - zalloc(ipc_importance_inherit_zone); + zalloc(ipc_importance_inherit_zone); ipc_importance_lock(); } } @@ -3129,22 +3151,22 @@ out_locked: */ void ipc_importance_receive( - ipc_kmsg_t kmsg, - mach_msg_option_t option) + ipc_kmsg_t kmsg, + mach_msg_option_t option) { unsigned int sender_pid = ((mach_msg_max_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + - round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5]; + ((vm_offset_t)kmsg->ikm_header + + round_msg(kmsg->ikm_header->msgh_size)))->msgh_audit.val[5]; task_t task_self = current_task(); int impresult = -1; - + /* convert to a voucher with an inherit importance attribute? */ if ((option & MACH_RCV_VOUCHER) != 0) { - uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) + - sizeof(mach_voucher_attr_value_handle_t)]; + uint8_t recipes[2 * sizeof(ipc_voucher_attr_recipe_data_t) + + sizeof(mach_voucher_attr_value_handle_t)]; ipc_voucher_attr_raw_recipe_array_size_t recipe_size = 0; ipc_voucher_attr_recipe_t recipe = (ipc_voucher_attr_recipe_t)recipes; - ipc_voucher_t recv_voucher; + ipc_voucher_t recv_voucher; mach_voucher_attr_value_handle_t handle; ipc_importance_inherit_t inherit; kern_return_t kr; @@ -3188,20 +3210,20 @@ ipc_importance_receive( recipe_size += sizeof(*recipe) + sizeof(mach_voucher_attr_value_handle_t); kr = ipc_voucher_attr_control_create_mach_voucher(ipc_importance_control, - recipes, - recipe_size, - &recv_voucher); + recipes, + recipe_size, + &recv_voucher); assert(KERN_SUCCESS == kr); /* swap the voucher port (and set voucher bits in case it didn't already exist) */ kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16); ipc_port_release_send(kmsg->ikm_voucher); kmsg->ikm_voucher = convert_voucher_to_port(recv_voucher); - if (III_NULL != inherit) + if (III_NULL != inherit) { impresult = 2; + } } } else { /* Don't want a voucher */ - /* got linked importance? have to drop */ if (IIE_NULL != kmsg->ikm_importance) { ipc_importance_elem_t elem; @@ -3239,18 +3261,19 @@ ipc_importance_receive( } #if IMPORTANCE_TRACE - if (-1 < impresult) + if (-1 < impresult) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV)) | DBG_FUNC_NONE, - sender_pid, task_pid(task_self), - kmsg->ikm_header->msgh_id, impresult, 0); - if (impresult == 2){ + sender_pid, task_pid(task_self), + kmsg->ikm_header->msgh_id, impresult, 0); + } + if (impresult == 2) { /* * This probe only covers new voucher-based path. Legacy importance - * will trigger the probe in ipc_importance_task_externalize_assertion() + * will trigger the probe in ipc_importance_task_externalize_assertion() * above and have impresult==1 here. */ DTRACE_BOOST5(receive_boost, task_t, task_self, int, task_pid(task_self), int, sender_pid, int, 1, int, task_self->task_imp_base->iit_assertcnt); - } + } #endif /* IMPORTANCE_TRACE */ } @@ -3264,8 +3287,8 @@ ipc_importance_receive( */ void ipc_importance_unreceive( - ipc_kmsg_t kmsg, - mach_msg_option_t __unused option) + ipc_kmsg_t kmsg, + mach_msg_option_t __unused option) { /* importance should already be in the voucher and out of the kmsg */ assert(IIE_NULL == kmsg->ikm_importance); @@ -3299,13 +3322,13 @@ ipc_importance_unreceive( */ void ipc_importance_clean( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { - ipc_port_t port; + ipc_port_t port; /* Is the kmsg still linked? If so, remove that first */ if (IIE_NULL != kmsg->ikm_importance) { - ipc_importance_elem_t elem; + ipc_importance_elem_t elem; ipc_importance_lock(); elem = ipc_importance_kmsg_unlink(kmsg); @@ -3321,7 +3344,7 @@ ipc_importance_clean( if (IP_VALID(port)) { ip_lock(port); /* inactive ports already had their importance boosts dropped */ - if (!ip_active(port) || + if (!ip_active(port) || ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == FALSE) { ip_unlock(port); } @@ -3332,8 +3355,8 @@ ipc_importance_clean( void ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg) { - assert(IIE_NULL == kmsg->ikm_importance); - assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)); + assert(IIE_NULL == kmsg->ikm_importance); + assert(!MACH_MSGH_BITS_RAISED_IMPORTANCE(kmsg->ikm_header->msgh_bits)); } /* @@ -3342,57 +3365,57 @@ ipc_importance_assert_clean(__assert_only ipc_kmsg_t kmsg) static kern_return_t ipc_importance_release_value( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync); static kern_return_t ipc_importance_get_value( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_voucher_attr_value_handle_array_size_t prev_value_count, - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t content_size, - mach_voucher_attr_value_handle_t *out_value, - mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t prev_values, + mach_voucher_attr_value_handle_array_size_t prev_value_count, + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t content_size, + mach_voucher_attr_value_handle_t *out_value, + mach_voucher_attr_value_flags_t *out_flags, + ipc_voucher_t *out_value_voucher); static kern_return_t ipc_importance_extract_content( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_array_t values, - mach_voucher_attr_value_handle_array_size_t value_count, - mach_voucher_attr_recipe_command_t *out_command, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *in_out_content_size); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_array_t values, + mach_voucher_attr_value_handle_array_size_t value_count, + mach_voucher_attr_recipe_command_t *out_command, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *in_out_content_size); static kern_return_t ipc_importance_command( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, - mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *out_content_size); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_array_t values, + mach_msg_type_number_t value_count, + mach_voucher_attr_command_t command, + mach_voucher_attr_content_t in_content, + mach_voucher_attr_content_size_t in_content_size, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *out_content_size); static void ipc_importance_manager_release( - ipc_voucher_attr_manager_t manager); + ipc_voucher_attr_manager_t manager); struct ipc_voucher_attr_manager ipc_importance_manager = { - .ivam_release_value = ipc_importance_release_value, - .ivam_get_value = ipc_importance_get_value, - .ivam_extract_content = ipc_importance_extract_content, - .ivam_command = ipc_importance_command, - .ivam_release = ipc_importance_manager_release, - .ivam_flags = IVAM_FLAGS_NONE, + .ivam_release_value = ipc_importance_release_value, + .ivam_get_value = ipc_importance_get_value, + .ivam_extract_content = ipc_importance_extract_content, + .ivam_command = ipc_importance_command, + .ivam_release = ipc_importance_manager_release, + .ivam_flags = IVAM_FLAGS_NONE, }; #define IMPORTANCE_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_IMPORTANCE == (key)) @@ -3410,10 +3433,10 @@ struct ipc_voucher_attr_manager ipc_importance_manager = { */ static kern_return_t ipc_importance_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync) { ipc_importance_elem_t elem; @@ -3432,7 +3455,7 @@ ipc_importance_release_value( return KERN_FAILURE; } - /* clear made */ + /* clear made */ elem->iie_made = 0; /* @@ -3487,16 +3510,16 @@ ipc_importance_release_value( */ static kern_return_t ipc_importance_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_voucher_attr_value_handle_array_size_t prev_value_count, - mach_voucher_attr_content_t __unused content, - mach_voucher_attr_content_size_t content_size, - mach_voucher_attr_value_handle_t *out_value, - mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t prev_values, + mach_voucher_attr_value_handle_array_size_t prev_value_count, + mach_voucher_attr_content_t __unused content, + mach_voucher_attr_content_size_t content_size, + mach_voucher_attr_value_handle_t *out_value, + mach_voucher_attr_value_flags_t *out_flags, + ipc_voucher_t *out_value_voucher) { ipc_importance_elem_t elem; task_t self; @@ -3504,14 +3527,14 @@ ipc_importance_get_value( IMPORTANCE_ASSERT_MANAGER(manager); IMPORTANCE_ASSERT_KEY(key); - if (0 != content_size) + if (0 != content_size) { return KERN_INVALID_ARGUMENT; + } *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE; /* never an out voucher */ switch (command) { - case MACH_VOUCHER_ATTR_REDEEM: /* redeem of previous values is the value */ @@ -3542,9 +3565,9 @@ ipc_importance_get_value( *out_value = (mach_voucher_attr_value_handle_t)elem; *out_value_voucher = IPC_VOUCHER_NULL; return KERN_SUCCESS; - + default: - /* + /* * every other command is unknown * * Specifically, there is no mechanism provided to construct an @@ -3568,13 +3591,13 @@ ipc_importance_get_value( */ static kern_return_t ipc_importance_extract_content( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_voucher_attr_value_handle_array_size_t value_count, - mach_voucher_attr_recipe_command_t *out_command, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *in_out_content_size) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t values, + mach_voucher_attr_value_handle_array_size_t value_count, + mach_voucher_attr_recipe_command_t *out_command, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *in_out_content_size) { mach_voucher_attr_content_size_t size = 0; ipc_importance_elem_t elem; @@ -3584,15 +3607,16 @@ ipc_importance_extract_content( IMPORTANCE_ASSERT_KEY(key); /* the first non-default value provides the data */ - for (i = 0; i < value_count ; i++) { + for (i = 0; i < value_count && *in_out_content_size > 0; i++) { elem = (ipc_importance_elem_t)values[i]; - if (IIE_NULL == elem) + if (IIE_NULL == elem) { continue; + } snprintf((char *)out_content, *in_out_content_size, "Importance for pid "); size = (mach_voucher_attr_content_size_t)strlen((char *)out_content); - for(;;) { + for (;;) { ipc_importance_inherit_t inherit = III_NULL; ipc_importance_task_t task_imp; task_t task; @@ -3602,28 +3626,29 @@ ipc_importance_extract_content( task_imp = (ipc_importance_task_t)elem; task = task_imp->iit_task; t_pid = (TASK_NULL != task) ? - task_pid(task) : -1; + task_pid(task) : -1; snprintf((char *)out_content + size, *in_out_content_size - size, "%d", t_pid); } else { inherit = (ipc_importance_inherit_t)elem; task_imp = inherit->iii_to_task; task = task_imp->iit_task; t_pid = (TASK_NULL != task) ? - task_pid(task) : -1; - snprintf((char *)out_content + size, *in_out_content_size - size, - "%d (%d of %d boosts) %s from pid ", t_pid, - III_EXTERN(inherit), inherit->iii_externcnt, - (inherit->iii_donating) ? "donated" : "linked"); + task_pid(task) : -1; + snprintf((char *)out_content + size, *in_out_content_size - size, + "%d (%d of %d boosts) %s from pid ", t_pid, + III_EXTERN(inherit), inherit->iii_externcnt, + (inherit->iii_donating) ? "donated" : "linked"); } - + size = (mach_voucher_attr_content_size_t)strlen((char *)out_content); - if (III_NULL == inherit) + if (III_NULL == inherit) { break; + } elem = inherit->iii_from_elem; - } - size++; /* account for NULL */ + } + size++; /* account for NULL */ } *out_command = MACH_VOUCHER_ATTR_NOOP; /* cannot be used to regenerate value */ *in_out_content_size = size; @@ -3640,15 +3665,15 @@ ipc_importance_extract_content( */ static kern_return_t ipc_importance_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, - mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *out_content_size) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t values, + mach_msg_type_number_t value_count, + mach_voucher_attr_command_t command, + mach_voucher_attr_content_t in_content, + mach_voucher_attr_content_size_t in_content_size, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *out_content_size) { ipc_importance_inherit_t inherit; ipc_importance_task_t to_task; @@ -3683,7 +3708,7 @@ ipc_importance_command( if (III_NULL == inherit) { return KERN_INVALID_ARGUMENT; } - + ipc_importance_lock(); if (0 == refs) { @@ -3727,7 +3752,7 @@ ipc_importance_command( if (ipc_importance_delayed_drop_call != NULL && ipc_importance_task_is_marked_denap_receiver(to_task)) { ipc_importance_task_delayed_drop(to_task); - } + } /* drop task assertions associated with the dropped boosts */ if (ipc_importance_task_check_transition(to_task, IIT_UPDATE_DROP, refs)) { @@ -3769,7 +3794,7 @@ ipc_importance_command( */ static void ipc_importance_manager_release( - ipc_voucher_attr_manager_t __assert_only manager) + ipc_voucher_attr_manager_t __assert_only manager) { IMPORTANCE_ASSERT_MANAGER(manager); panic("Voucher importance manager released"); @@ -3794,19 +3819,19 @@ ipc_importance_init(void) } ipc_importance_task_zone = zinit(sizeof(struct ipc_importance_task), - ipc_importance_max * sizeof(struct ipc_importance_task), - sizeof(struct ipc_importance_task), - "ipc task importance"); + ipc_importance_max * sizeof(struct ipc_importance_task), + sizeof(struct ipc_importance_task), + "ipc task importance"); zone_change(ipc_importance_task_zone, Z_NOENCRYPT, TRUE); ipc_importance_inherit_zone = zinit(sizeof(struct ipc_importance_inherit), - ipc_importance_max * sizeof(struct ipc_importance_inherit), - sizeof(struct ipc_importance_inherit), - "ipc importance inherit"); + ipc_importance_max * sizeof(struct ipc_importance_inherit), + sizeof(struct ipc_importance_inherit), + "ipc importance inherit"); zone_change(ipc_importance_inherit_zone, Z_NOENCRYPT, TRUE); -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG queue_init(&global_iit_alloc_queue); #endif @@ -3814,11 +3839,12 @@ ipc_importance_init(void) ipc_importance_lock_init(); kr = ipc_register_well_known_mach_voucher_attr_manager(&ipc_importance_manager, - (mach_voucher_attr_value_handle_t)0, - MACH_VOUCHER_ATTR_KEY_IMPORTANCE, - &ipc_importance_control); - if (KERN_SUCCESS != kr) + (mach_voucher_attr_value_handle_t)0, + MACH_VOUCHER_ATTR_KEY_IMPORTANCE, + &ipc_importance_control); + if (KERN_SUCCESS != kr) { printf("Voucher importance manager register returned %d", kr); + } } /* @@ -3834,8 +3860,8 @@ ipc_importance_thread_call_init(void) { /* initialize delayed drop queue and thread-call */ queue_init(&ipc_importance_delayed_drop_queue); - ipc_importance_delayed_drop_call = - thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL); + ipc_importance_delayed_drop_call = + thread_call_allocate(ipc_importance_task_delayed_drop_scan, NULL); if (NULL == ipc_importance_delayed_drop_call) { panic("ipc_importance_init"); } @@ -3851,10 +3877,10 @@ extern int task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count) { if (kdp_lck_spin_is_acquired(&ipc_importance_lock_data) || - max_count < 1 || - task->task_imp_base == IIT_NULL || - pid_list == NULL || - flags != TASK_IMP_LIST_DONATING_PIDS) { + max_count < 1 || + task->task_imp_base == IIT_NULL || + pid_list == NULL || + flags != TASK_IMP_LIST_DONATING_PIDS) { return 0; } unsigned int pidcount = 0; @@ -3867,13 +3893,13 @@ task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int m queue_iterate(&task_imp->iit_inherits, temp_inherit, ipc_importance_inherit_t, iii_inheritance) { /* check space in buffer */ - if (pidcount >= max_count) + if (pidcount >= max_count) { break; + } previous_pid = target_pid; target_pid = -1; if (temp_inherit->iii_donating) { - #if DEVELOPMENT || DEBUG target_pid = temp_inherit->iii_to_task->iit_bsd_pid; #else @@ -3889,13 +3915,13 @@ task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int m pid_list += sizeof(target_pid); pidcount++; } - } target_pid = 0; queue_iterate(&task_imp->iit_kmsgs, temp_kmsg, ipc_kmsg_t, ikm_inheritance) { - if (pidcount >= max_count) + if (pidcount >= max_count) { break; + } previous_pid = target_pid; target_pid = -1; elem = temp_kmsg->ikm_importance; @@ -3909,8 +3935,8 @@ task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int m continue; } - if (IIE_TYPE_TASK == IIE_TYPE(elem) && - (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) { + if (IIE_TYPE_TASK == IIE_TYPE(elem) && + (((ipc_importance_task_t)elem)->iit_task != TASK_NULL)) { target_pid = task_pid(((ipc_importance_task_t)elem)->iit_task); } else { temp_inherit = (ipc_importance_inherit_t)elem; @@ -3933,4 +3959,3 @@ task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int m return pidcount; } - diff --git a/osfmk/ipc/ipc_importance.h b/osfmk/ipc/ipc_importance.h index 6f3bc5744..9f69a6af1 100644 --- a/osfmk/ipc/ipc_importance.h +++ b/osfmk/ipc/ipc_importance.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _IPC_IPC_IMPORTANCE_H_ -#define _IPC_IPC_IMPORTANCE_H_ +#ifndef _IPC_IPC_IMPORTANCE_H_ +#define _IPC_IPC_IMPORTANCE_H_ #include #include @@ -59,120 +59,119 @@ */ struct ipc_importance_elem { - uint32_t iie_bits; /* type and refs */ - mach_voucher_attr_value_reference_t iie_made; /* references given to vouchers */ - queue_head_t iie_kmsgs; /* list of kmsgs inheriting from this */ - uint32_t iie_externcnt; /* number of externalized boosts */ - uint32_t iie_externdrop; /* number of those dropped already */ + uint32_t iie_bits; /* type and refs */ + mach_voucher_attr_value_reference_t iie_made; /* references given to vouchers */ + queue_head_t iie_kmsgs; /* list of kmsgs inheriting from this */ + uint32_t iie_externcnt; /* number of externalized boosts */ + uint32_t iie_externdrop; /* number of those dropped already */ #define IIE_REF_DEBUG 0 #if IIE_REF_DEBUG - uint32_t iie_refs_added; /* all refs added via all means */ - uint32_t iie_refs_dropped; /* all refs dropped via all means */ - uint32_t iie_kmsg_refs_added; /* all refs added by kmsgs taking a ref */ - uint32_t iie_kmsg_refs_inherited; /* kmsg refs consumed by a new inherit */ - uint32_t iie_kmsg_refs_coalesced; /* kmsg refs coalesced into an existing inherit */ - uint32_t iie_kmsg_refs_dropped; /* kmsg refs dropped by not accepting msg importance */ - uint32_t iie_task_refs_added; /* refs added by a task reference call */ - uint32_t iie_task_refs_added_inherit_from; /* task references added by inherit from */ - uint32_t iie_task_refs_added_transition; /* task references added by imp transition code */ - uint32_t iie_task_refs_self_added; /* task refs added by self-boost */ - uint32_t iie_task_refs_inherited; /* task refs consumed by a new inherit */ - uint32_t iie_task_refs_coalesced; /* task refs coalesced into an existing inherit */ - uint32_t iie_task_refs_dropped; /* all refs dropped via all task means */ + uint32_t iie_refs_added; /* all refs added via all means */ + uint32_t iie_refs_dropped; /* all refs dropped via all means */ + uint32_t iie_kmsg_refs_added; /* all refs added by kmsgs taking a ref */ + uint32_t iie_kmsg_refs_inherited; /* kmsg refs consumed by a new inherit */ + uint32_t iie_kmsg_refs_coalesced; /* kmsg refs coalesced into an existing inherit */ + uint32_t iie_kmsg_refs_dropped; /* kmsg refs dropped by not accepting msg importance */ + uint32_t iie_task_refs_added; /* refs added by a task reference call */ + uint32_t iie_task_refs_added_inherit_from; /* task references added by inherit from */ + uint32_t iie_task_refs_added_transition; /* task references added by imp transition code */ + uint32_t iie_task_refs_self_added; /* task refs added by self-boost */ + uint32_t iie_task_refs_inherited; /* task refs consumed by a new inherit */ + uint32_t iie_task_refs_coalesced; /* task refs coalesced into an existing inherit */ + uint32_t iie_task_refs_dropped; /* all refs dropped via all task means */ #endif }; -#define IIE_TYPE_MASK 0x80000000 /* Just the high bit for now */ -#define IIE_TYPE_TASK 0x00000000 /* Element is a task element */ -#define IIE_TYPE_INHERIT 0x80000000 /* Element inherits from a previous element */ -#define IIE_TYPE(e) ((e)->iie_bits & IIE_TYPE_MASK) +#define IIE_TYPE_MASK 0x80000000 /* Just the high bit for now */ +#define IIE_TYPE_TASK 0x00000000 /* Element is a task element */ +#define IIE_TYPE_INHERIT 0x80000000 /* Element inherits from a previous element */ +#define IIE_TYPE(e) ((e)->iie_bits & IIE_TYPE_MASK) -#define IIE_REFS_MASK 0x7FFFFFFF /* Mask to extract references */ -#define IIE_REFS_MAX 0x7FFFFFFF -#define IIE_REFS(e) ((e)->iie_bits & IIE_REFS_MASK) +#define IIE_REFS_MASK 0x7FFFFFFF /* Mask to extract references */ +#define IIE_REFS_MAX 0x7FFFFFFF +#define IIE_REFS(e) ((e)->iie_bits & IIE_REFS_MASK) -#define IIE_EXTERN(e) ((e)->iie_externcnt - (e)->iie_externdrop) +#define IIE_EXTERN(e) ((e)->iie_externcnt - (e)->iie_externdrop) #if !IIE_REF_DEBUG -#define ipc_importance_reference_internal(elem) \ +#define ipc_importance_reference_internal(elem) \ (hw_atomic_add(&(elem)->iie_bits, 1) & IIE_REFS_MASK) -#define ipc_importance_release_internal(elem) \ +#define ipc_importance_release_internal(elem) \ (hw_atomic_sub(&(elem)->iie_bits, 1) & IIE_REFS_MASK) #endif struct ipc_importance_task { - struct ipc_importance_elem iit_elem; /* common element parts */ - task_t iit_task; /* task associated with */ - queue_head_t iit_inherits; /* list of inherit elems hung off this */ - queue_t iit_updateq; /* queue chained on for task policy updates */ - queue_chain_t iit_updates; /* link on update chain */ - queue_chain_t iit_props; /* link on propagation chain */ - uint64_t iit_updatetime; /* timestamp of our last policy update request */ - uint64_t iit_transitions;/* total number of boost transitions (lifetime) */ - uint32_t iit_assertcnt; /* net number of boost assertions (internal, external and legacy) */ - uint32_t iit_legacy_externcnt; /* Legacy external boost count */ - uint32_t iit_legacy_externdrop; /* Legacy external boost drop count */ - uint32_t iit_receiver:1, /* the task can receive importance boost */ - iit_denap:1, /* the task can be awaked from App Nap */ - iit_donor:1, /* the task always sends boosts regardless of boost status */ - iit_live_donor:1, /* the task temporarily sends boosts regardless of boost status */ - iit_updatepolicy:1, /* enqueue for policy update at the end of propagation */ - iit_reserved:3, /* reserved for future use */ - iit_filelocks:24; /* number of file lock boosts */ + struct ipc_importance_elem iit_elem; /* common element parts */ + task_t iit_task; /* task associated with */ + queue_head_t iit_inherits; /* list of inherit elems hung off this */ + queue_t iit_updateq; /* queue chained on for task policy updates */ + queue_chain_t iit_updates; /* link on update chain */ + queue_chain_t iit_props; /* link on propagation chain */ + uint64_t iit_updatetime; /* timestamp of our last policy update request */ + uint64_t iit_transitions;/* total number of boost transitions (lifetime) */ + uint32_t iit_assertcnt; /* net number of boost assertions (internal, external and legacy) */ + uint32_t iit_legacy_externcnt; /* Legacy external boost count */ + uint32_t iit_legacy_externdrop; /* Legacy external boost drop count */ + uint32_t iit_receiver:1, /* the task can receive importance boost */ + iit_denap:1, /* the task can be awaked from App Nap */ + iit_donor:1, /* the task always sends boosts regardless of boost status */ + iit_live_donor:1, /* the task temporarily sends boosts regardless of boost status */ + iit_updatepolicy:1, /* enqueue for policy update at the end of propagation */ + iit_reserved:3, /* reserved for future use */ + iit_filelocks:24; /* number of file lock boosts */ #if DEVELOPMENT || DEBUG - char iit_procname[20]; /* name of proc */ - uint32_t iit_bsd_pid; /* pid of proc creating this iit */ - queue_chain_t iit_allocation; /* link on global iit allocation chain */ + char iit_procname[20]; /* name of proc */ + uint32_t iit_bsd_pid; /* pid of proc creating this iit */ + queue_chain_t iit_allocation; /* link on global iit allocation chain */ #endif - }; -#define iit_bits iit_elem.iie_bits -#define iit_made iit_elem.iie_made -#define iit_kmsgs iit_elem.iie_kmsgs -#define iit_externcnt iit_elem.iie_externcnt -#define iit_externdrop iit_elem.iie_externdrop +#define iit_bits iit_elem.iie_bits +#define iit_made iit_elem.iie_made +#define iit_kmsgs iit_elem.iie_kmsgs +#define iit_externcnt iit_elem.iie_externcnt +#define iit_externdrop iit_elem.iie_externdrop -#define IIT_REFS_MAX IIE_REFS_MAX -#define IIT_REFS(t) IIE_REFS(&(t)->iit_elem) -#define IIT_EXTERN(t) IIE_EXTERN(&(t)->iit_elem) -#define IIT_LEGACY_EXTERN(t) ((t)->iit_legacy_externcnt - (t)->iit_legacy_externdrop) +#define IIT_REFS_MAX IIE_REFS_MAX +#define IIT_REFS(t) IIE_REFS(&(t)->iit_elem) +#define IIT_EXTERN(t) IIE_EXTERN(&(t)->iit_elem) +#define IIT_LEGACY_EXTERN(t) ((t)->iit_legacy_externcnt - (t)->iit_legacy_externdrop) #if !IIE_REF_DEBUG -#define ipc_importance_task_reference_internal(task_imp) \ +#define ipc_importance_task_reference_internal(task_imp) \ (ipc_importance_reference_internal(&(task_imp)->iit_elem)) -#define ipc_importance_task_release_internal(task_imp) \ +#define ipc_importance_task_release_internal(task_imp) \ (assert(1 < IIT_REFS(task_imp)), ipc_importance_release_internal(&(task_imp)->iit_elem)) #endif typedef int iit_update_type_t; -#define IIT_UPDATE_HOLD ((iit_update_type_t)1) -#define IIT_UPDATE_DROP ((iit_update_type_t)2) +#define IIT_UPDATE_HOLD ((iit_update_type_t)1) +#define IIT_UPDATE_DROP ((iit_update_type_t)2) struct ipc_importance_inherit { - struct ipc_importance_elem iii_elem; /* common element partss */ - boolean_t iii_donating; /* is this donating importance */ - uint32_t iii_depth; /* nesting depth */ - ipc_importance_task_t iii_to_task; /* donating to */ - ipc_importance_elem_t iii_from_elem; /* other elem contributing */ - queue_chain_t iii_inheritance; /* inherited from link */ + struct ipc_importance_elem iii_elem; /* common element partss */ + boolean_t iii_donating; /* is this donating importance */ + uint32_t iii_depth; /* nesting depth */ + ipc_importance_task_t iii_to_task; /* donating to */ + ipc_importance_elem_t iii_from_elem; /* other elem contributing */ + queue_chain_t iii_inheritance; /* inherited from link */ }; -#define iii_bits iii_elem.iie_bits -#define iii_made iii_elem.iie_made -#define iii_kmsgs iii_elem.iie_kmsgs -#define iii_externcnt iii_elem.iie_externcnt -#define iii_externdrop iii_elem.iie_externdrop -#define III_REFS_MAX IIE_REFS_MAX -#define III_REFS(i) IIE_REFS(&(i)->iii_elem) -#define III_EXTERN(i) IIE_EXTERN(&(i)->iii_elem) - -#define III_DEPTH_RESET 0x80000000 -#define III_DEPTH_MASK 0x000000FF -#define III_DEPTH(i) ((i)->iii_depth & III_DEPTH_MASK) -#define III_DEPTH_MAX 32 /* maximum inherit->inherit chain depth */ - -#define ipc_importance_inherit_reference_internal(inherit) \ +#define iii_bits iii_elem.iie_bits +#define iii_made iii_elem.iie_made +#define iii_kmsgs iii_elem.iie_kmsgs +#define iii_externcnt iii_elem.iie_externcnt +#define iii_externdrop iii_elem.iie_externdrop +#define III_REFS_MAX IIE_REFS_MAX +#define III_REFS(i) IIE_REFS(&(i)->iii_elem) +#define III_EXTERN(i) IIE_EXTERN(&(i)->iii_elem) + +#define III_DEPTH_RESET 0x80000000 +#define III_DEPTH_MASK 0x000000FF +#define III_DEPTH(i) ((i)->iii_depth & III_DEPTH_MASK) +#define III_DEPTH_MAX 32 /* maximum inherit->inherit chain depth */ + +#define ipc_importance_inherit_reference_internal(inherit) \ (ipc_importance_reference_internal(&(inherit)->iii_elem)) __BEGIN_DECLS @@ -227,18 +226,18 @@ extern boolean_t ipc_importance_check_circularity(ipc_port_t port, ipc_port_t de /* prepare importance attributes for sending */ extern boolean_t ipc_importance_send( - ipc_kmsg_t kmsg, - mach_msg_option_t option); + ipc_kmsg_t kmsg, + mach_msg_option_t option); /* receive importance attributes from message */ extern void ipc_importance_receive( - ipc_kmsg_t kmsg, - mach_msg_option_t option); + ipc_kmsg_t kmsg, + mach_msg_option_t option); /* undo receive of importance attributes from message */ extern void ipc_importance_unreceive( - ipc_kmsg_t kmsg, - mach_msg_option_t option); + ipc_kmsg_t kmsg, + mach_msg_option_t option); /* clean importance attributes out of destroyed message */ extern void ipc_importance_clean(ipc_kmsg_t kmsg); @@ -256,7 +255,7 @@ extern void ipc_importance_thread_call_init(void); extern void task_importance_update_owner_info(task_t task); #endif -#if XNU_KERNEL_PRIVATE +#if XNU_KERNEL_PRIVATE #define TASK_IMP_LIST_DONATING_PIDS 0x1 extern int task_importance_list_pids(task_t task, int flags, char *pid_list, unsigned int max_count); #endif @@ -264,5 +263,5 @@ extern int task_importance_list_pids(task_t task, int flags, char *pid_list, uns __END_DECLS #endif /* MACH_KERNEL_PRIVATE */ - -#endif /* _IPC_IPC_IMPORTANCE_H_ */ + +#endif /* _IPC_IPC_IMPORTANCE_H_ */ diff --git a/osfmk/ipc/ipc_init.c b/osfmk/ipc/ipc_init.c index d8e0917e7..4e45ca60e 100644 --- a/osfmk/ipc/ipc_init.c +++ b/osfmk/ipc/ipc_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -115,22 +115,22 @@ vm_map_t ipc_kernel_copy_map; vm_size_t ipc_kernel_copy_map_size = IPC_KERNEL_COPY_MAP_SIZE; vm_size_t ipc_kmsg_max_vm_space = ((IPC_KERNEL_COPY_MAP_SIZE * 7) / 8); -/* +/* * values to limit inline message body handling * avoid copyin/out limits - even after accounting for maximum descriptor expansion. */ #define IPC_KMSG_MAX_SPACE (64 * 1024 * 1024) /* keep in sync with COPYSIZELIMIT_PANIC */ -vm_size_t ipc_kmsg_max_body_space = ((IPC_KMSG_MAX_SPACE * 3)/4 - MAX_TRAILER_SIZE); +vm_size_t ipc_kmsg_max_body_space = ((IPC_KMSG_MAX_SPACE * 3) / 4 - MAX_TRAILER_SIZE); int ipc_space_max; int ipc_port_max; int ipc_pset_max; -lck_grp_t ipc_lck_grp; -lck_attr_t ipc_lck_attr; +lck_grp_t ipc_lck_grp; +lck_attr_t ipc_lck_attr; -static lck_grp_attr_t ipc_lck_grp_attr; +static lck_grp_attr_t ipc_lck_grp_attr; /* * Routine: ipc_bootstrap @@ -143,11 +143,11 @@ void ipc_bootstrap(void) { kern_return_t kr; - + lck_grp_attr_setdefault(&ipc_lck_grp_attr); lck_grp_init(&ipc_lck_grp, "ipc", &ipc_lck_grp_attr); lck_attr_setdefault(&ipc_lck_attr); - + ipc_port_multiple_lock_init(); ipc_port_timestamp_data = 0; @@ -155,28 +155,28 @@ ipc_bootstrap(void) /* all IPC zones should be exhaustible */ ipc_space_zone = zinit(sizeof(struct ipc_space), - ipc_space_max * sizeof(struct ipc_space), - sizeof(struct ipc_space), - "ipc spaces"); + ipc_space_max * sizeof(struct ipc_space), + sizeof(struct ipc_space), + "ipc spaces"); zone_change(ipc_space_zone, Z_NOENCRYPT, TRUE); /* * populate all port(set) zones */ ipc_object_zones[IOT_PORT] = - zinit(sizeof(struct ipc_port), - ipc_port_max * sizeof(struct ipc_port), - sizeof(struct ipc_port), - "ipc ports"); + zinit(sizeof(struct ipc_port), + ipc_port_max * sizeof(struct ipc_port), + sizeof(struct ipc_port), + "ipc ports"); /* cant charge callers for port allocations (references passed) */ zone_change(ipc_object_zones[IOT_PORT], Z_CALLERACCT, FALSE); zone_change(ipc_object_zones[IOT_PORT], Z_NOENCRYPT, TRUE); ipc_object_zones[IOT_PORT_SET] = - zinit(sizeof(struct ipc_pset), - ipc_pset_max * sizeof(struct ipc_pset), - sizeof(struct ipc_pset), - "ipc port sets"); + zinit(sizeof(struct ipc_pset), + ipc_pset_max * sizeof(struct ipc_pset), + sizeof(struct ipc_pset), + "ipc port sets"); zone_change(ipc_object_zones[IOT_PORT_SET], Z_NOENCRYPT, TRUE); /* @@ -184,10 +184,10 @@ ipc_bootstrap(void) * elements at the processor-level to avoid the locking. */ ipc_kmsg_zone = zinit(IKM_SAVED_KMSG_SIZE, - ipc_port_max * MACH_PORT_QLIMIT_DEFAULT * - IKM_SAVED_KMSG_SIZE, - IKM_SAVED_KMSG_SIZE, - "ipc kmsgs"); + ipc_port_max * MACH_PORT_QLIMIT_DEFAULT * + IKM_SAVED_KMSG_SIZE, + IKM_SAVED_KMSG_SIZE, + "ipc kmsgs"); zone_change(ipc_kmsg_zone, Z_CALLERACCT, FALSE); zone_change(ipc_kmsg_zone, Z_CACHING_ENABLED, TRUE); @@ -202,7 +202,7 @@ ipc_bootstrap(void) /* initialize modules with hidden data structures */ -#if MACH_ASSERT +#if MACH_ASSERT ipc_port_debug_init(); #endif mig_init(); @@ -218,8 +218,8 @@ ipc_bootstrap(void) host_notify_init(); } -/* - * XXX tunable, belongs in mach.message.h +/* + * XXX tunable, belongs in mach.message.h */ #define MSG_OOL_SIZE_SMALL_MAX (2*PAGE_SIZE) vm_size_t msg_ool_size_small; @@ -237,38 +237,39 @@ ipc_init(void) vm_offset_t min; retval = kmem_suballoc(kernel_map, &min, ipc_kernel_map_size, - TRUE, - (VM_FLAGS_ANYWHERE), - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_IPC, - &ipc_kernel_map); + TRUE, + (VM_FLAGS_ANYWHERE), + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_IPC, + &ipc_kernel_map); - if (retval != KERN_SUCCESS) + if (retval != KERN_SUCCESS) { panic("ipc_init: kmem_suballoc of ipc_kernel_map failed"); + } retval = kmem_suballoc(kernel_map, &min, ipc_kernel_copy_map_size, - TRUE, - (VM_FLAGS_ANYWHERE), - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_IPC, - &ipc_kernel_copy_map); + TRUE, + (VM_FLAGS_ANYWHERE), + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_IPC, + &ipc_kernel_copy_map); - if (retval != KERN_SUCCESS) + if (retval != KERN_SUCCESS) { panic("ipc_init: kmem_suballoc of ipc_kernel_copy_map failed"); + } ipc_kernel_copy_map->no_zero_fill = TRUE; ipc_kernel_copy_map->wait_for_space = TRUE; /* - * As an optimization, 'small' out of line data regions using a + * As an optimization, 'small' out of line data regions using a * physical copy strategy are copied into kalloc'ed buffers. * The value of 'small' is determined here. Requests kalloc() * with sizes greater or equal to kalloc_max_prerounded may fail. */ - if (kalloc_max_prerounded <= MSG_OOL_SIZE_SMALL_MAX) { + if (kalloc_max_prerounded <= MSG_OOL_SIZE_SMALL_MAX) { msg_ool_size_small = kalloc_max_prerounded; - } - else { + } else { msg_ool_size_small = MSG_OOL_SIZE_SMALL_MAX; } /* account for overhead to avoid spilling over a page */ @@ -276,7 +277,6 @@ ipc_init(void) ipc_host_init(); ux_handler_init(); - } @@ -293,4 +293,3 @@ ipc_thread_call_init(void) ipc_importance_thread_call_init(); #endif } - diff --git a/osfmk/ipc/ipc_init.h b/osfmk/ipc/ipc_init.h index 777c7e482..d41f4f03e 100644 --- a/osfmk/ipc/ipc_init.h +++ b/osfmk/ipc/ipc_init.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,64 +38,64 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.6.1 1994/09/23 02:07:56 ezf - * change marker to not FREE - * [1994/09/22 21:29:04 ezf] + * change marker to not FREE + * [1994/09/22 21:29:04 ezf] * * Revision 1.1.2.4 1993/07/22 16:16:03 rod - * Add ANSI prototypes. CR #9523. - * [1993/07/22 13:29:57 rod] - * + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:29:57 rod] + * * Revision 1.1.2.3 1993/06/07 22:10:25 jeffc - * CR9176 - ANSI C violations: trailing tokens on CPP - * directives, extra semicolons after decl_ ..., asm keywords - * [1993/06/07 19:01:24 jeffc] - * + * CR9176 - ANSI C violations: trailing tokens on CPP + * directives, extra semicolons after decl_ ..., asm keywords + * [1993/06/07 19:01:24 jeffc] + * * Revision 1.1.2.2 1993/06/02 23:31:04 jeffc - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 21:09:31 jeffc] - * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:09:31 jeffc] + * * Revision 1.1 1992/09/30 02:28:50 robert - * Initial revision - * + * Initial revision + * * $EndLog$ */ /* CMU_HIST */ /* * Revision 2.4 91/05/14 16:32:45 mrt - * Correcting copyright - * + * Correcting copyright + * * Revision 2.3 91/02/05 17:21:42 mrt - * Changed to new Mach copyright - * [91/02/01 15:45:16 mrt] - * + * Changed to new Mach copyright + * [91/02/01 15:45:16 mrt] + * * Revision 2.2 90/06/02 14:49:59 rpd - * Created for new IPC. - * [90/03/26 20:55:26 rpd] - * + * Created for new IPC. + * [90/03/26 20:55:26 rpd] + * */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -109,7 +109,7 @@ * Declarations of functions to initialize the IPC system. */ -#ifndef _IPC_IPC_INIT_H_ +#ifndef _IPC_IPC_INIT_H_ #define _IPC_IPC_INIT_H_ extern int ipc_space_max; @@ -129,4 +129,4 @@ extern void ipc_init(void); /* IPC initialization dependent on thread call support */ extern void ipc_thread_call_init(void); -#endif /* _IPC_IPC_INIT_H_ */ +#endif /* _IPC_IPC_INIT_H_ */ diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c index b28449560..803b25bc2 100644 --- a/osfmk/ipc/ipc_kmsg.c +++ b/osfmk/ipc/ipc_kmsg.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -141,38 +141,34 @@ #pragma pack(4) -typedef struct -{ - mach_msg_bits_t msgh_bits; - mach_msg_size_t msgh_size; - mach_port_name_t msgh_remote_port; - mach_port_name_t msgh_local_port; - mach_port_name_t msgh_voucher_port; - mach_msg_id_t msgh_id; +typedef struct{ + mach_msg_bits_t msgh_bits; + mach_msg_size_t msgh_size; + mach_port_name_t msgh_remote_port; + mach_port_name_t msgh_local_port; + mach_port_name_t msgh_voucher_port; + mach_msg_id_t msgh_id; } mach_msg_legacy_header_t; -typedef struct -{ - mach_msg_legacy_header_t header; - mach_msg_body_t body; +typedef struct{ + mach_msg_legacy_header_t header; + mach_msg_body_t body; } mach_msg_legacy_base_t; -typedef struct -{ - mach_port_name_t name; - mach_msg_size_t pad1; - uint32_t pad2 : 16; - mach_msg_type_name_t disposition : 8; - mach_msg_descriptor_type_t type : 8; +typedef struct{ + mach_port_name_t name; + mach_msg_size_t pad1; + uint32_t pad2 : 16; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; } mach_msg_legacy_port_descriptor_t; -typedef union -{ - mach_msg_legacy_port_descriptor_t port; - mach_msg_ool_descriptor32_t out_of_line32; - mach_msg_ool_ports_descriptor32_t ool_ports32; - mach_msg_type_descriptor_t type; +typedef union{ + mach_msg_legacy_port_descriptor_t port; + mach_msg_ool_descriptor32_t out_of_line32; + mach_msg_ool_ports_descriptor32_t ool_ports32; + mach_msg_type_descriptor_t type; } mach_msg_legacy_descriptor_t; #pragma pack() @@ -184,49 +180,49 @@ typedef union #if DEBUG_MSGS_K64 extern void ipc_pset_print64( - ipc_pset_t pset); + ipc_pset_t pset); -extern void ipc_kmsg_print64( - ipc_kmsg_t kmsg, - const char *str); +extern void ipc_kmsg_print64( + ipc_kmsg_t kmsg, + const char *str); -extern void ipc_msg_print64( - mach_msg_header_t *msgh); +extern void ipc_msg_print64( + mach_msg_header_t *msgh); extern ipc_port_t ipc_name_to_data64( - task_t task, - mach_port_name_t name); + task_t task, + mach_port_name_t name); /* * Forward declarations */ void ipc_msg_print_untyped64( - mach_msg_body_t *body); + mach_msg_body_t *body); const char * ipc_type_name64( - int type_name, - boolean_t received); + int type_name, + boolean_t received); void ipc_print_type_name64( - int type_name); + int type_name); const char * msgh_bit_decode64( - mach_msg_bits_t bit); + mach_msg_bits_t bit); const char * mm_copy_options_string64( - mach_msg_copy_options_t option); + mach_msg_copy_options_t option); void db_print_msg_uid64(mach_msg_header_t *); static void ipc_msg_body_print64(void *body, int size) { - uint32_t *word = (uint32_t *) body; - uint32_t *end = (uint32_t *)(((uintptr_t) body) + size - - sizeof(mach_msg_header_t)); - int i; + uint32_t *word = (uint32_t *) body; + uint32_t *end = (uint32_t *)(((uintptr_t) body) + size + - sizeof(mach_msg_header_t)); + int i; kprintf(" body(%p-%p):\n %p: ", body, end, word); for (;;) { @@ -235,7 +231,7 @@ ipc_msg_body_print64(void *body, int size) kprintf("\n"); return; } - kprintf("%08x ", *word); + kprintf("%08x ", *word); } kprintf("\n %p: ", word); } @@ -244,51 +240,51 @@ ipc_msg_body_print64(void *body, int size) const char * ipc_type_name64( - int type_name, - boolean_t received) + int type_name, + boolean_t received) { switch (type_name) { - case MACH_MSG_TYPE_PORT_NAME: + case MACH_MSG_TYPE_PORT_NAME: return "port_name"; - - case MACH_MSG_TYPE_MOVE_RECEIVE: + + case MACH_MSG_TYPE_MOVE_RECEIVE: if (received) { return "port_receive"; } else { return "move_receive"; } - - case MACH_MSG_TYPE_MOVE_SEND: + + case MACH_MSG_TYPE_MOVE_SEND: if (received) { return "port_send"; } else { return "move_send"; } - - case MACH_MSG_TYPE_MOVE_SEND_ONCE: + + case MACH_MSG_TYPE_MOVE_SEND_ONCE: if (received) { return "port_send_once"; } else { return "move_send_once"; } - - case MACH_MSG_TYPE_COPY_SEND: + + case MACH_MSG_TYPE_COPY_SEND: return "copy_send"; - - case MACH_MSG_TYPE_MAKE_SEND: + + case MACH_MSG_TYPE_MAKE_SEND: return "make_send"; - - case MACH_MSG_TYPE_MAKE_SEND_ONCE: + + case MACH_MSG_TYPE_MAKE_SEND_ONCE: return "make_send_once"; - - default: + + default: return (char *) 0; } } - + void ipc_print_type_name64( - int type_name) + int type_name) { const char *name = ipc_type_name64(type_name, TRUE); if (name) { @@ -303,26 +299,26 @@ ipc_print_type_name64( */ void ipc_kmsg_print64( - ipc_kmsg_t kmsg, - const char *str) + ipc_kmsg_t kmsg, + const char *str) { kprintf("%s kmsg=%p:\n", str, kmsg); kprintf(" next=%p, prev=%p, size=%d", - kmsg->ikm_next, - kmsg->ikm_prev, - kmsg->ikm_size); + kmsg->ikm_next, + kmsg->ikm_prev, + kmsg->ikm_size); kprintf("\n"); ipc_msg_print64(kmsg->ikm_header); } const char * msgh_bit_decode64( - mach_msg_bits_t bit) + mach_msg_bits_t bit) { switch (bit) { - case MACH_MSGH_BITS_COMPLEX: return "complex"; - case MACH_MSGH_BITS_CIRCULAR: return "circular"; - default: return (char *) 0; + case MACH_MSGH_BITS_COMPLEX: return "complex"; + case MACH_MSGH_BITS_CIRCULAR: return "circular"; + default: return (char *) 0; } } @@ -331,35 +327,37 @@ msgh_bit_decode64( */ void ipc_msg_print64( - mach_msg_header_t *msgh) + mach_msg_header_t *msgh) { - mach_msg_bits_t mbits; - unsigned int bit, i; - const char *bit_name; - int needs_comma; + mach_msg_bits_t mbits; + unsigned int bit, i; + const char *bit_name; + int needs_comma; mbits = msgh->msgh_bits; kprintf(" msgh_bits=0x%x: l=0x%x,r=0x%x\n", - mbits, - MACH_MSGH_BITS_LOCAL(msgh->msgh_bits), - MACH_MSGH_BITS_REMOTE(msgh->msgh_bits)); + mbits, + MACH_MSGH_BITS_LOCAL(msgh->msgh_bits), + MACH_MSGH_BITS_REMOTE(msgh->msgh_bits)); mbits = MACH_MSGH_BITS_OTHER(mbits) & MACH_MSGH_BITS_USED; kprintf(" decoded bits: "); needs_comma = 0; for (i = 0, bit = 1; i < sizeof(mbits) * 8; ++i, bit <<= 1) { - if ((mbits & bit) == 0) + if ((mbits & bit) == 0) { continue; + } bit_name = msgh_bit_decode64((mach_msg_bits_t)bit); - if (bit_name) + if (bit_name) { kprintf("%s%s", needs_comma ? "," : "", bit_name); - else + } else { kprintf("%sunknown(0x%x),", needs_comma ? "," : "", bit); + } ++needs_comma; } if (msgh->msgh_bits & ~MACH_MSGH_BITS_USED) { kprintf("%sunused=0x%x,", needs_comma ? "," : "", - msgh->msgh_bits & ~MACH_MSGH_BITS_USED); + msgh->msgh_bits & ~MACH_MSGH_BITS_USED); } kprintf("\n"); @@ -374,7 +372,7 @@ ipc_msg_print64( if (msgh->msgh_local_port) { kprintf("%slocal=%p(", needs_comma ? "," : "", - msgh->msgh_local_port); + msgh->msgh_local_port); ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits)); kprintf(")\n"); } else { @@ -382,10 +380,10 @@ ipc_msg_print64( } kprintf(" msgh_id=%d, size=%d\n", - msgh->msgh_id, - msgh->msgh_size); + msgh->msgh_id, + msgh->msgh_size); - if (mbits & MACH_MSGH_BITS_COMPLEX) { + if (mbits & MACH_MSGH_BITS_COMPLEX) { ipc_msg_print_untyped64((mach_msg_body_t *) (msgh + 1)); } @@ -395,27 +393,27 @@ ipc_msg_print64( const char * mm_copy_options_string64( - mach_msg_copy_options_t option) + mach_msg_copy_options_t option) { - const char *name; + const char *name; switch (option) { - case MACH_MSG_PHYSICAL_COPY: + case MACH_MSG_PHYSICAL_COPY: name = "PHYSICAL"; break; - case MACH_MSG_VIRTUAL_COPY: + case MACH_MSG_VIRTUAL_COPY: name = "VIRTUAL"; break; - case MACH_MSG_OVERWRITE: + case MACH_MSG_OVERWRITE: name = "OVERWRITE(DEPRECATED)"; break; - case MACH_MSG_ALLOCATE: + case MACH_MSG_ALLOCATE: name = "ALLOCATE"; break; - case MACH_MSG_KALLOC_COPY_T: + case MACH_MSG_KALLOC_COPY_T: name = "KALLOC_COPY_T"; break; - default: + default: name = "unknown"; break; } @@ -424,128 +422,126 @@ mm_copy_options_string64( void ipc_msg_print_untyped64( - mach_msg_body_t *body) + mach_msg_body_t *body) { - mach_msg_descriptor_t *saddr, *send; - mach_msg_descriptor_type_t type; + mach_msg_descriptor_t *saddr, *send; + mach_msg_descriptor_type_t type; - kprintf(" %d descriptors: \n", body->msgh_descriptor_count); + kprintf(" %d descriptors: \n", body->msgh_descriptor_count); - saddr = (mach_msg_descriptor_t *) (body + 1); - send = saddr + body->msgh_descriptor_count; + saddr = (mach_msg_descriptor_t *) (body + 1); + send = saddr + body->msgh_descriptor_count; - for ( ; saddr < send; saddr++ ) { - - type = saddr->type.type; + for (; saddr < send; saddr++) { + type = saddr->type.type; - switch (type) { - - case MACH_MSG_PORT_DESCRIPTOR: { - mach_msg_port_descriptor_t *dsc; + switch (type) { + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc; - dsc = &saddr->port; - kprintf(" PORT name = %p disp = ", dsc->name); - ipc_print_type_name64(dsc->disposition); - kprintf("\n"); - break; - } - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR: { - mach_msg_ool_descriptor_t *dsc; - - dsc = (mach_msg_ool_descriptor_t *) &saddr->out_of_line; - kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n", - type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE", - dsc->address, dsc->size, - mm_copy_options_string64(dsc->copy), - dsc->deallocate ? "DEALLOC" : ""); - break; - } - case MACH_MSG_OOL_PORTS_DESCRIPTOR : { - mach_msg_ool_ports_descriptor_t *dsc; - - dsc = (mach_msg_ool_ports_descriptor_t *) &saddr->ool_ports; - - kprintf(" OOL_PORTS addr = %p count = 0x%x ", - dsc->address, dsc->count); - kprintf("disp = "); - ipc_print_type_name64(dsc->disposition); - kprintf(" copy = %s %s\n", - mm_copy_options_string64(dsc->copy), - dsc->deallocate ? "DEALLOC" : ""); - break; - } + dsc = &saddr->port; + kprintf(" PORT name = %p disp = ", dsc->name); + ipc_print_type_name64(dsc->disposition); + kprintf("\n"); + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + mach_msg_ool_descriptor_t *dsc; + + dsc = (mach_msg_ool_descriptor_t *) &saddr->out_of_line; + kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n", + type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE", + dsc->address, dsc->size, + mm_copy_options_string64(dsc->copy), + dsc->deallocate ? "DEALLOC" : ""); + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = (mach_msg_ool_ports_descriptor_t *) &saddr->ool_ports; + + kprintf(" OOL_PORTS addr = %p count = 0x%x ", + dsc->address, dsc->count); + kprintf("disp = "); + ipc_print_type_name64(dsc->disposition); + kprintf(" copy = %s %s\n", + mm_copy_options_string64(dsc->copy), + dsc->deallocate ? "DEALLOC" : ""); + break; + } - default: { - kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type); - break; - } + default: { + kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type); + break; + } + } } - } } -#define DEBUG_IPC_KMSG_PRINT(kmsg,string) \ - __unreachable_ok_push \ - if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \ - ipc_kmsg_print64(kmsg, string); \ - } \ +#define DEBUG_IPC_KMSG_PRINT(kmsg, string) \ + __unreachable_ok_push \ + if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \ + ipc_kmsg_print64(kmsg, string); \ + } \ __unreachable_ok_pop -#define DEBUG_IPC_MSG_BODY_PRINT(body,size) \ - __unreachable_ok_push \ - if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \ - ipc_msg_body_print64(body,size);\ - } \ +#define DEBUG_IPC_MSG_BODY_PRINT(body, size) \ + __unreachable_ok_push \ + if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \ + ipc_msg_body_print64(body,size);\ + } \ __unreachable_ok_pop #else /* !DEBUG_MSGS_K64 */ -#define DEBUG_IPC_KMSG_PRINT(kmsg,string) -#define DEBUG_IPC_MSG_BODY_PRINT(body,size) +#define DEBUG_IPC_KMSG_PRINT(kmsg, string) +#define DEBUG_IPC_MSG_BODY_PRINT(body, size) #endif /* !DEBUG_MSGS_K64 */ -extern vm_map_t ipc_kernel_copy_map; -extern vm_size_t ipc_kmsg_max_space; -extern vm_size_t ipc_kmsg_max_vm_space; -extern vm_size_t ipc_kmsg_max_body_space; -extern vm_size_t msg_ool_size_small; +extern vm_map_t ipc_kernel_copy_map; +extern vm_size_t ipc_kmsg_max_space; +extern vm_size_t ipc_kmsg_max_vm_space; +extern vm_size_t ipc_kmsg_max_body_space; +extern vm_size_t msg_ool_size_small; -#define MSG_OOL_SIZE_SMALL msg_ool_size_small +#define MSG_OOL_SIZE_SMALL msg_ool_size_small #if defined(__LP64__) -#define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS) -#define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t -#define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t +#define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS) +#define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t +#define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t #else -#define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS) -#define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t -#define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t +#define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS) +#define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t +#define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t #endif -#define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \ - sizeof(mach_msg_ool_descriptor32_t))) +#define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \ + sizeof(mach_msg_ool_descriptor32_t))) /* scatter list macros */ -#define SKIP_PORT_DESCRIPTORS(s, c) \ -MACRO_BEGIN \ - if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \ - while ((c) > 0) { \ - if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \ - break; \ - (s)++; (c)--; \ - } \ - if (c == 0) \ - (s) = MACH_MSG_DESCRIPTOR_NULL; \ - } \ +#define SKIP_PORT_DESCRIPTORS(s, c) \ +MACRO_BEGIN \ + if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \ + while ((c) > 0) { \ + if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \ + break; \ + (s)++; (c)--; \ + } \ + if (c == 0) \ + (s) = MACH_MSG_DESCRIPTOR_NULL; \ + } \ MACRO_END -#define INCREMENT_SCATTER(s, c, d) \ -MACRO_BEGIN \ - if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \ - s = (d) ? (mach_msg_descriptor_t *) \ - ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \ - (s + 1); \ - (c)--; \ - } \ +#define INCREMENT_SCATTER(s, c, d) \ +MACRO_BEGIN \ + if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \ + s = (d) ? (mach_msg_descriptor_t *) \ + ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \ + (s + 1); \ + (c)--; \ + } \ MACRO_END #define KMSG_TRACE_FLAG_TRACED 0x000001 @@ -583,8 +579,9 @@ MACRO_END #include extern boolean_t kdebug_debugid_enabled(uint32_t debugid); -void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, - mach_msg_option_t option) +void +ipc_kmsg_trace_send(ipc_kmsg_t kmsg, + mach_msg_option_t option) { task_t send_task = TASK_NULL; ipc_port_t dst_port, src_port; @@ -604,35 +601,43 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, * significant amount of processing (and a port lock hold) in * the non-tracing case. */ - if (__probable((kdebug_enable & KDEBUG_TRACE) == 0)) + if (__probable((kdebug_enable & KDEBUG_TRACE) == 0)) { return; - if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO))) + } + if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO))) { return; + } msg = kmsg->ikm_header; dst_port = (ipc_port_t)(msg->msgh_remote_port); - if (!IPC_PORT_VALID(dst_port)) + if (!IPC_PORT_VALID(dst_port)) { return; + } /* * Message properties / options */ - if ((option & (MACH_SEND_MSG|MACH_RCV_MSG)) == (MACH_SEND_MSG|MACH_RCV_MSG)) + if ((option & (MACH_SEND_MSG | MACH_RCV_MSG)) == (MACH_SEND_MSG | MACH_RCV_MSG)) { msg_flags |= KMSG_TRACE_FLAG_SNDRCV; + } if (msg->msgh_id >= is_iokit_subsystem.start && - msg->msgh_id < is_iokit_subsystem.end + 100) + msg->msgh_id < is_iokit_subsystem.end + 100) { msg_flags |= KMSG_TRACE_FLAG_IOKIT; + } /* magic XPC checkin message id (XPC_MESSAGE_ID_CHECKIN) from libxpc */ - else if (msg->msgh_id == 0x77303074u /* w00t */) + else if (msg->msgh_id == 0x77303074u /* w00t */) { msg_flags |= KMSG_TRACE_FLAG_CHECKIN; + } - if (msg->msgh_bits & MACH_MSGH_BITS_RAISEIMP) + if (msg->msgh_bits & MACH_MSGH_BITS_RAISEIMP) { msg_flags |= KMSG_TRACE_FLAG_RAISEIMP; + } - if (unsafe_convert_port_to_voucher(kmsg->ikm_voucher)) + if (unsafe_convert_port_to_voucher(kmsg->ikm_voucher)) { msg_flags |= KMSG_TRACE_FLAG_VOUCHER; + } /* * Sending task / port @@ -641,20 +646,23 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, send_pid = task_pid(send_task); if (send_pid != 0) { - if (task_is_daemon(send_task)) + if (task_is_daemon(send_task)) { msg_flags |= KMSG_TRACE_FLAG_DAEMON_SRC; - else if (task_is_app(send_task)) + } else if (task_is_app(send_task)) { msg_flags |= KMSG_TRACE_FLAG_APP_SRC; + } } is_task_64bit = (send_task->map->max_offset > VM_MAX_ADDRESS); - if (is_task_64bit) + if (is_task_64bit) { msg_flags |= KMSG_TRACE_FLAG_SND64; + } src_port = (ipc_port_t)(msg->msgh_local_port); if (src_port) { - if (src_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) + if (src_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) { msg_flags |= KMSG_TRACE_FLAG_SRC_NDFLTQ; + } switch (MACH_MSGH_BITS_LOCAL(msg->msgh_bits)) { case MACH_MSG_TYPE_MOVE_SEND_ONCE: msg_flags |= KMSG_TRACE_FLAG_SRC_SONCE; @@ -676,10 +684,11 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, dst_pid = (uint32_t)0xfffffff0; } else if (dst_port->ip_tempowner) { msg_flags |= KMSG_TRACE_FLAG_DTMPOWNER; - if (IIT_NULL != dst_port->ip_imp_task) + if (IIT_NULL != dst_port->ip_imp_task) { dst_pid = task_pid(dst_port->ip_imp_task->iit_task); - else + } else { dst_pid = (uint32_t)0xfffffff1; + } } else if (dst_port->ip_receiver_name == MACH_PORT_NULL) { /* dst_port is otherwise in-transit */ dst_pid = (uint32_t)0xfffffff2; @@ -691,10 +700,11 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, dst_space = dst_port->ip_receiver; if (dst_space && is_active(dst_space)) { dst_pid = task_pid(dst_space->is_task); - if (task_is_daemon(dst_space->is_task)) + if (task_is_daemon(dst_space->is_task)) { msg_flags |= KMSG_TRACE_FLAG_DAEMON_DST; - else if (task_is_app(dst_space->is_task)) + } else if (task_is_app(dst_space->is_task)) { msg_flags |= KMSG_TRACE_FLAG_APP_DST; + } } else { /* receiving task is being torn down */ dst_pid = (uint32_t)0xfffffff3; @@ -702,10 +712,12 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, } } - if (dst_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) + if (dst_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) { msg_flags |= KMSG_TRACE_FLAG_DST_NDFLTQ; - if (imq_full(&dst_port->ip_messages)) + } + if (imq_full(&dst_port->ip_messages)) { msg_flags |= KMSG_TRACE_FLAG_DSTQFULL; + } kotype = ip_kotype(dst_port); @@ -729,7 +741,7 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, break; } - switch(MACH_MSGH_BITS_REMOTE(msg->msgh_bits)) { + switch (MACH_MSGH_BITS_REMOTE(msg->msgh_bits)) { case MACH_MSG_TYPE_PORT_SEND_ONCE: msg_flags |= KMSG_TRACE_FLAG_DST_SONCE; break; @@ -755,15 +767,17 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, kern_dsc = (mach_msg_descriptor_t *)(msg_body + 1); /* this is gross: see ipc_kmsg_copyin_body()... */ - if (!is_task_64bit) + if (!is_task_64bit) { msg_size -= (dsc_count * 12); + } for (int i = 0; i < dsc_count; i++) { switch (kern_dsc[i].type.type) { case MACH_MSG_PORT_DESCRIPTOR: num_ports++; - if (is_task_64bit) + if (is_task_64bit) { msg_size -= 12; + } break; case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: case MACH_MSG_OOL_DESCRIPTOR: { @@ -773,22 +787,25 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, msg_size += dsc->size; if ((dsc->size >= MSG_OOL_SIZE_SMALL) && (dsc->copy == MACH_MSG_PHYSICAL_COPY) && - !dsc->deallocate) + !dsc->deallocate) { msg_flags |= KMSG_TRACE_FLAG_PCPY; - else if (dsc->size <= MSG_OOL_SIZE_SMALL) + } else if (dsc->size <= MSG_OOL_SIZE_SMALL) { msg_flags |= KMSG_TRACE_FLAG_PCPY; - else + } else { msg_flags |= KMSG_TRACE_FLAG_VCPY; - if (is_task_64bit) + } + if (is_task_64bit) { msg_size -= 16; - } break; + } + } break; case MACH_MSG_OOL_PORTS_DESCRIPTOR: { - mach_msg_ool_ports_descriptor_t *dsc; + mach_msg_ool_ports_descriptor_t *dsc; dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i]; num_ports += dsc->count; - if (is_task_64bit) + if (is_task_64bit) { msg_size -= 16; - } break; + } + } break; default: break; } @@ -799,7 +816,7 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, * Trailer contents */ trailer = (mach_msg_trailer_t *)((vm_offset_t)msg + - round_msg((vm_offset_t)msg->msgh_size)); + round_msg((vm_offset_t)msg->msgh_size)); if (trailer->msgh_trailer_size <= sizeof(mach_msg_security_trailer_t)) { extern security_token_t KERNEL_SECURITY_TOKEN; mach_msg_security_trailer_t *strailer; @@ -809,46 +826,46 @@ void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, * like self-talk because the sending port is not reset. */ if (memcmp(&strailer->msgh_sender, - &KERNEL_SECURITY_TOKEN, - sizeof(KERNEL_SECURITY_TOKEN)) == 0) { + &KERNEL_SECURITY_TOKEN, + sizeof(KERNEL_SECURITY_TOKEN)) == 0) { send_pid = 0; msg_flags &= ~(KMSG_TRACE_FLAG_APP_SRC | KMSG_TRACE_FLAG_DAEMON_SRC); } } - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, - (uintptr_t)send_pid, - (uintptr_t)dst_pid, - (uintptr_t)msg_size, - (uintptr_t)( - ((msg_flags & KMSG_TRACE_FLAGS_MASK) << KMSG_TRACE_FLAGS_SHIFT) | - ((num_ports & KMSG_TRACE_PORTS_MASK) << KMSG_TRACE_PORTS_SHIFT) - ) - ); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, + (uintptr_t)send_pid, + (uintptr_t)dst_pid, + (uintptr_t)msg_size, + (uintptr_t)( + ((msg_flags & KMSG_TRACE_FLAGS_MASK) << KMSG_TRACE_FLAGS_SHIFT) | + ((num_ports & KMSG_TRACE_PORTS_MASK) << KMSG_TRACE_PORTS_SHIFT) + ) + ); } #endif /* zone for cached ipc_kmsg_t structures */ -zone_t ipc_kmsg_zone; +zone_t ipc_kmsg_zone; /* * Forward declarations */ void ipc_kmsg_clean( - ipc_kmsg_t kmsg); + ipc_kmsg_t kmsg); void ipc_kmsg_clean_body( - ipc_kmsg_t kmsg, - mach_msg_type_number_t number, - mach_msg_descriptor_t *desc); + ipc_kmsg_t kmsg, + mach_msg_type_number_t number, + mach_msg_descriptor_t *desc); void ipc_kmsg_clean_partial( - ipc_kmsg_t kmsg, - mach_msg_type_number_t number, - mach_msg_descriptor_t *desc, - vm_offset_t paddr, - vm_size_t length); + ipc_kmsg_t kmsg, + mach_msg_type_number_t number, + mach_msg_descriptor_t *desc, + vm_offset_t paddr, + vm_size_t length); mach_msg_return_t ipc_kmsg_copyin_body( ipc_kmsg_t kmsg, @@ -895,25 +912,28 @@ ipc_kmsg_alloc( mach_msg_size_t size = msg_and_trailer_size - MAX_TRAILER_SIZE; /* compare against implementation upper limit for the body */ - if (size > ipc_kmsg_max_body_space) + if (size > ipc_kmsg_max_body_space) { return IKM_NULL; + } if (size > sizeof(mach_msg_base_t)) { mach_msg_size_t max_desc = (mach_msg_size_t)(((size - sizeof(mach_msg_base_t)) / - sizeof(mach_msg_ool_descriptor32_t)) * - DESC_SIZE_ADJUSTMENT); + sizeof(mach_msg_ool_descriptor32_t)) * + DESC_SIZE_ADJUSTMENT); /* make sure expansion won't cause wrap */ - if (msg_and_trailer_size > MACH_MSG_SIZE_MAX - max_desc) + if (msg_and_trailer_size > MACH_MSG_SIZE_MAX - max_desc) { return IKM_NULL; + } max_expanded_size = msg_and_trailer_size + max_desc; - } else - max_expanded_size = msg_and_trailer_size; - - if (max_expanded_size < IKM_SAVED_MSG_SIZE) - max_expanded_size = IKM_SAVED_MSG_SIZE; /* round up for ikm_cache */ + } else { + max_expanded_size = msg_and_trailer_size; + } + if (max_expanded_size < IKM_SAVED_MSG_SIZE) { + max_expanded_size = IKM_SAVED_MSG_SIZE; /* round up for ikm_cache */ + } if (max_expanded_size == IKM_SAVED_MSG_SIZE) { kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone); } else { @@ -925,7 +945,7 @@ ipc_kmsg_alloc( ikm_set_header(kmsg, msg_and_trailer_size); } - return(kmsg); + return kmsg; } /* @@ -942,16 +962,16 @@ ipc_kmsg_alloc( void ipc_kmsg_free( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { mach_msg_size_t size = kmsg->ikm_size; ipc_port_t port; assert(!IP_VALID(kmsg->ikm_voucher)); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_FREE) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - 0, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_FREE) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + 0, 0, 0, 0); /* * Check to see if the message is bound to the port. If so, @@ -968,7 +988,7 @@ ipc_kmsg_free( ip_release(port); return; } - ip_unlock(port); + ip_unlock(port); ip_release(port); /* May be last reference */ } @@ -988,8 +1008,8 @@ ipc_kmsg_free( void ipc_kmsg_enqueue( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg) + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) { ipc_kmsg_t first = queue->ikmq_base; ipc_kmsg_t last; @@ -1021,8 +1041,8 @@ ipc_kmsg_enqueue( boolean_t ipc_kmsg_enqueue_qos( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg) + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) { ipc_kmsg_t first = queue->ikmq_base; ipc_kmsg_t prev; @@ -1046,13 +1066,13 @@ ipc_kmsg_enqueue_qos( /* apply QoS overrides towards the head */ override = kmsg->ikm_qos_override; while (prev != kmsg && - override > prev->ikm_qos_override) { + override > prev->ikm_qos_override) { prev->ikm_qos_override = override; prev = prev->ikm_prev; } /* did we adjust everything? */ - return (prev == kmsg); + return prev == kmsg; } /* @@ -1070,7 +1090,7 @@ ipc_kmsg_enqueue_qos( boolean_t ipc_kmsg_override_qos( - ipc_kmsg_queue_t queue, + ipc_kmsg_queue_t queue, ipc_kmsg_t kmsg, mach_msg_priority_t override) { @@ -1080,9 +1100,10 @@ ipc_kmsg_override_qos( /* apply QoS overrides towards the head */ while (override > cur->ikm_qos_override) { cur->ikm_qos_override = override; - if (cur == first) + if (cur == first) { return TRUE; - cur = cur->ikm_prev; + } + cur = cur->ikm_prev; } return FALSE; } @@ -1095,14 +1116,15 @@ ipc_kmsg_override_qos( ipc_kmsg_t ipc_kmsg_dequeue( - ipc_kmsg_queue_t queue) + ipc_kmsg_queue_t queue) { ipc_kmsg_t first; first = ipc_kmsg_queue_first(queue); - if (first != IKM_NULL) + if (first != IKM_NULL) { ipc_kmsg_rmqueue(queue, first); + } return first; } @@ -1115,8 +1137,8 @@ ipc_kmsg_dequeue( void ipc_kmsg_rmqueue( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg) + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) { ipc_kmsg_t next, prev; @@ -1133,12 +1155,13 @@ ipc_kmsg_rmqueue( } else { if (__improbable(next->ikm_prev != kmsg || prev->ikm_next != kmsg)) { panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. " - "(prev->next: %p, next->prev: %p, kmsg: %p)", - prev->ikm_next, next->ikm_prev, kmsg); + "(prev->next: %p, next->prev: %p, kmsg: %p)", + prev->ikm_next, next->ikm_prev, kmsg); } - if (queue->ikmq_base == kmsg) + if (queue->ikmq_base == kmsg) { queue->ikmq_base = next; + } next->ikm_prev = prev; prev->ikm_next = next; @@ -1157,16 +1180,17 @@ ipc_kmsg_rmqueue( ipc_kmsg_t ipc_kmsg_queue_next( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg) + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg) { ipc_kmsg_t next; assert(queue->ikmq_base != IKM_NULL); next = kmsg->ikm_next; - if (queue->ikmq_base == next) + if (queue->ikmq_base == next) { next = IKM_NULL; + } return next; } @@ -1183,7 +1207,7 @@ ipc_kmsg_queue_next( void ipc_kmsg_destroy( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { /* * Destroying a message can cause more messages to be destroyed. @@ -1191,8 +1215,9 @@ ipc_kmsg_destroy( * destruction queue. If this was the first message on the * queue, this instance must process the full queue. */ - if (ipc_kmsg_delayed_destroy(kmsg)) + if (ipc_kmsg_delayed_destroy(kmsg)) { ipc_kmsg_reap_delayed(); + } } /* @@ -1204,7 +1229,8 @@ ipc_kmsg_destroy( * deferred messages. */ -boolean_t ipc_kmsg_delayed_destroy( +boolean_t +ipc_kmsg_delayed_destroy( ipc_kmsg_t kmsg) { ipc_kmsg_queue_t queue = &(current_thread()->ith_messages); @@ -1252,86 +1278,87 @@ ipc_kmsg_reap_delayed(void) static unsigned int _ipc_kmsg_clean_invalid_desc = 0; void ipc_kmsg_clean_body( - __unused ipc_kmsg_t kmsg, - mach_msg_type_number_t number, - mach_msg_descriptor_t *saddr) + __unused ipc_kmsg_t kmsg, + mach_msg_type_number_t number, + mach_msg_descriptor_t *saddr) { - mach_msg_type_number_t i; + mach_msg_type_number_t i; - if ( number == 0 ) - return; + if (number == 0) { + return; + } - for (i = 0 ; i < number; i++, saddr++ ) { - - switch (saddr->type.type) { - - case MACH_MSG_PORT_DESCRIPTOR: { - mach_msg_port_descriptor_t *dsc; + for (i = 0; i < number; i++, saddr++) { + switch (saddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc; - dsc = &saddr->port; + dsc = &saddr->port; - /* - * Destroy port rights carried in the message - */ - if (!IO_VALID((ipc_object_t) dsc->name)) - continue; - ipc_object_destroy((ipc_object_t) dsc->name, dsc->disposition); - break; - } - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR : { - mach_msg_ool_descriptor_t *dsc; - - dsc = (mach_msg_ool_descriptor_t *)&saddr->out_of_line; - - /* - * Destroy memory carried in the message - */ - if (dsc->size == 0) { - assert(dsc->address == (void *) 0); - } else { - vm_map_copy_discard((vm_map_copy_t) dsc->address); + /* + * Destroy port rights carried in the message + */ + if (!IO_VALID((ipc_object_t) dsc->name)) { + continue; + } + ipc_object_destroy((ipc_object_t) dsc->name, dsc->disposition); + break; } - break; - } - case MACH_MSG_OOL_PORTS_DESCRIPTOR : { - ipc_object_t *objects; - mach_msg_type_number_t j; - mach_msg_ool_ports_descriptor_t *dsc; + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + mach_msg_ool_descriptor_t *dsc; - dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports; - objects = (ipc_object_t *) dsc->address; + dsc = (mach_msg_ool_descriptor_t *)&saddr->out_of_line; - if (dsc->count == 0) { + /* + * Destroy memory carried in the message + */ + if (dsc->size == 0) { + assert(dsc->address == (void *) 0); + } else { + vm_map_copy_discard((vm_map_copy_t) dsc->address); + } break; } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + ipc_object_t *objects; + mach_msg_type_number_t j; + mach_msg_ool_ports_descriptor_t *dsc; - assert(objects != (ipc_object_t *) 0); - - /* destroy port rights carried in the message */ - - for (j = 0; j < dsc->count; j++) { - ipc_object_t object = objects[j]; - - if (!IO_VALID(object)) - continue; - - ipc_object_destroy(object, dsc->disposition); - } + dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports; + objects = (ipc_object_t *) dsc->address; - /* destroy memory carried in the message */ + if (dsc->count == 0) { + break; + } - assert(dsc->count != 0); + assert(objects != (ipc_object_t *) 0); - kfree(dsc->address, - (vm_size_t) dsc->count * sizeof(mach_port_t)); - break; - } - default : { - _ipc_kmsg_clean_invalid_desc++; /* don't understand this type of descriptor */ - } + /* destroy port rights carried in the message */ + + for (j = 0; j < dsc->count; j++) { + ipc_object_t object = objects[j]; + + if (!IO_VALID(object)) { + continue; + } + + ipc_object_destroy(object, dsc->disposition); + } + + /* destroy memory carried in the message */ + + assert(dsc->count != 0); + + kfree(dsc->address, + (vm_size_t) dsc->count * sizeof(mach_port_t)); + break; + } + default: { + _ipc_kmsg_clean_invalid_desc++; /* don't understand this type of descriptor */ + } + } } - } } /* @@ -1349,11 +1376,11 @@ ipc_kmsg_clean_body( void ipc_kmsg_clean_partial( - ipc_kmsg_t kmsg, - mach_msg_type_number_t number, - mach_msg_descriptor_t *desc, - vm_offset_t paddr, - vm_size_t length) + ipc_kmsg_t kmsg, + mach_msg_type_number_t number, + mach_msg_descriptor_t *desc, + vm_offset_t paddr, + vm_size_t length) { ipc_object_t object; mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits; @@ -1366,8 +1393,9 @@ ipc_kmsg_clean_partial( ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits)); object = (ipc_object_t) kmsg->ikm_header->msgh_local_port; - if (IO_VALID(object)) + if (IO_VALID(object)) { ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits)); + } object = (ipc_object_t) kmsg->ikm_voucher; if (IO_VALID(object)) { @@ -1394,7 +1422,7 @@ ipc_kmsg_clean_partial( void ipc_kmsg_clean( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { ipc_object_t object; mach_msg_bits_t mbits; @@ -1404,12 +1432,14 @@ ipc_kmsg_clean( mbits = kmsg->ikm_header->msgh_bits; object = (ipc_object_t) kmsg->ikm_header->msgh_remote_port; - if (IO_VALID(object)) + if (IO_VALID(object)) { ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits)); + } object = (ipc_object_t) kmsg->ikm_header->msgh_local_port; - if (IO_VALID(object)) + if (IO_VALID(object)) { ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits)); + } object = (ipc_object_t) kmsg->ikm_voucher; if (IO_VALID(object)) { @@ -1423,7 +1453,7 @@ ipc_kmsg_clean( body = (mach_msg_body_t *) (kmsg->ikm_header + 1); ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count, - (mach_msg_descriptor_t *)(body + 1)); + (mach_msg_descriptor_t *)(body + 1)); } } @@ -1437,11 +1467,11 @@ ipc_kmsg_clean( void ipc_kmsg_set_prealloc( - ipc_kmsg_t kmsg, - ipc_port_t port) + ipc_kmsg_t kmsg, + ipc_port_t port) { assert(kmsg->ikm_prealloc == IP_NULL); - + kmsg->ikm_prealloc = IP_NULL; assert(port_send_turnstile(port) == TURNSTILE_NULL); @@ -1458,8 +1488,8 @@ ipc_kmsg_set_prealloc( */ void ipc_kmsg_clear_prealloc( - ipc_kmsg_t kmsg, - ipc_port_t port) + ipc_kmsg_t kmsg, + ipc_port_t port) { /* take the mqueue lock since the turnstile is protected under it */ imq_lock(&port->ip_messages); @@ -1479,8 +1509,9 @@ ipc_kmsg_t ipc_kmsg_prealloc(mach_msg_size_t size) { #if defined(__LP64__) - if (size > MACH_MSG_SIZE_MAX - LEGACY_HEADER_SIZE_DELTA) + if (size > MACH_MSG_SIZE_MAX - LEGACY_HEADER_SIZE_DELTA) { return IKM_NULL; + } size += LEGACY_HEADER_SIZE_DELTA; #endif @@ -1506,38 +1537,43 @@ ipc_kmsg_prealloc(mach_msg_size_t size) mach_msg_return_t ipc_kmsg_get( - mach_vm_address_t msg_addr, - mach_msg_size_t size, - ipc_kmsg_t *kmsgp) + mach_vm_address_t msg_addr, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp) { - mach_msg_size_t msg_and_trailer_size; - ipc_kmsg_t kmsg; - mach_msg_max_trailer_t *trailer; - mach_msg_legacy_base_t legacy_base; + mach_msg_size_t msg_and_trailer_size; + ipc_kmsg_t kmsg; + mach_msg_max_trailer_t *trailer; + mach_msg_legacy_base_t legacy_base; mach_msg_size_t len_copied; legacy_base.body.msgh_descriptor_count = 0; - if ((size < sizeof(mach_msg_legacy_header_t)) || (size & 3)) + if ((size < sizeof(mach_msg_legacy_header_t)) || (size & 3)) { return MACH_SEND_MSG_TOO_SMALL; + } - if (size > ipc_kmsg_max_body_space) + if (size > ipc_kmsg_max_body_space) { return MACH_SEND_TOO_LARGE; + } - if(size == sizeof(mach_msg_legacy_header_t)) + if (size == sizeof(mach_msg_legacy_header_t)) { len_copied = sizeof(mach_msg_legacy_header_t); - else + } else { len_copied = sizeof(mach_msg_legacy_base_t); + } - if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied)) + if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied)) { return MACH_SEND_INVALID_DATA; + } /* * If the message claims to be complex, it must at least * have the length of a "base" message (header + dsc_count). */ if (len_copied < sizeof(mach_msg_legacy_base_t) && - (legacy_base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX)) + (legacy_base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX)) { return MACH_SEND_MSG_TOO_SMALL; + } msg_addr += sizeof(legacy_base.header); #if defined(__LP64__) @@ -1547,37 +1583,38 @@ ipc_kmsg_get( __unreachable_ok_push if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { unsigned int j; - for (j=0; jikm_header->msgh_size = size; - kmsg->ikm_header->msgh_bits = legacy_base.header.msgh_bits; - kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_remote_port); - kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_local_port); - kmsg->ikm_header->msgh_voucher_port = legacy_base.header.msgh_voucher_port; - kmsg->ikm_header->msgh_id = legacy_base.header.msgh_id; + kmsg->ikm_header->msgh_size = size; + kmsg->ikm_header->msgh_bits = legacy_base.header.msgh_bits; + kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_remote_port); + kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_local_port); + kmsg->ikm_header->msgh_voucher_port = legacy_base.header.msgh_voucher_port; + kmsg->ikm_header->msgh_id = legacy_base.header.msgh_id; DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n" - " size: 0x%.8x\n" - " bits: 0x%.8x\n" - " remote_port: %p\n" - " local_port: %p\n" - " voucher_port: 0x%.8x\n" - " id: %.8d\n", - kmsg->ikm_header->msgh_size, - kmsg->ikm_header->msgh_bits, - kmsg->ikm_header->msgh_remote_port, - kmsg->ikm_header->msgh_local_port, - kmsg->ikm_header->msgh_voucher_port, - kmsg->ikm_header->msgh_id); + " size: 0x%.8x\n" + " bits: 0x%.8x\n" + " remote_port: %p\n" + " local_port: %p\n" + " voucher_port: 0x%.8x\n" + " id: %.8d\n", + kmsg->ikm_header->msgh_size, + kmsg->ikm_header->msgh_bits, + kmsg->ikm_header->msgh_remote_port, + kmsg->ikm_header->msgh_local_port, + kmsg->ikm_header->msgh_voucher_port, + kmsg->ikm_header->msgh_id); if (copyinmsg(msg_addr, (char *)(kmsg->ikm_header + 1), size - (mach_msg_size_t)sizeof(mach_msg_header_t))) { ipc_kmsg_free(kmsg); @@ -1586,19 +1623,17 @@ ipc_kmsg_get( /* unreachable if !DEBUG */ __unreachable_ok_push - if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) - { + if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { kprintf("body: size: %lu\n", (size - sizeof(mach_msg_header_t))); uint32_t i; - for(i=0;i*4 < (size - sizeof(mach_msg_header_t));i++) - { - kprintf("%.4x\n",((uint32_t *)(kmsg->ikm_header + 1))[i]); + for (i = 0; i * 4 < (size - sizeof(mach_msg_header_t)); i++) { + kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]); } } __unreachable_ok_pop DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_get()"); - /* + /* * I reserve for the trailer the largest space (MAX_TRAILER_SIZE) * However, the internal size field of the trailer (msgh_trailer_size) * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize @@ -1611,9 +1646,11 @@ ipc_kmsg_get( trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; #ifdef ppc - if(trcWork.traceMask) dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id, - (unsigned int)kmsg->ikm_header->msgh_remote_port, - (unsigned int)kmsg->ikm_header->msgh_local_port, 0); + if (trcWork.traceMask) { + dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id, + (unsigned int)kmsg->ikm_header->msgh_remote_port, + (unsigned int)kmsg->ikm_header->msgh_local_port, 0); + } #endif trailer->msgh_labels.sender = 0; @@ -1639,14 +1676,14 @@ ipc_kmsg_get( mach_msg_return_t ipc_kmsg_get_from_kernel( - mach_msg_header_t *msg, - mach_msg_size_t size, - ipc_kmsg_t *kmsgp) + mach_msg_header_t *msg, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp) { - ipc_kmsg_t kmsg; - mach_msg_size_t msg_and_trailer_size; + ipc_kmsg_t kmsg; + mach_msg_size_t msg_and_trailer_size; mach_msg_max_trailer_t *trailer; - ipc_port_t dest_port; + ipc_port_t dest_port; assert(size >= sizeof(mach_msg_header_t)); assert((size & 3) == 0); @@ -1678,7 +1715,7 @@ ipc_kmsg_get_from_kernel( if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) { assert(size > sizeof(mach_msg_base_t)); max_desc = ((mach_msg_base_t *)msg)->body.msgh_descriptor_count * - DESC_SIZE_ADJUSTMENT; + DESC_SIZE_ADJUSTMENT; } #endif if (msg_and_trailer_size > kmsg->ikm_size - max_desc) { @@ -1688,12 +1725,11 @@ ipc_kmsg_get_from_kernel( ikm_prealloc_set_inuse(kmsg, dest_port); ikm_set_header(kmsg, msg_and_trailer_size); ip_unlock(dest_port); - } - else - { + } else { kmsg = ipc_kmsg_alloc(msg_and_trailer_size); - if (kmsg == IKM_NULL) + if (kmsg == IKM_NULL) { return MACH_SEND_NO_BUFFER; + } } (void) memcpy((void *) kmsg->ikm_header, (const void *) msg, size); @@ -1702,14 +1738,14 @@ ipc_kmsg_get_from_kernel( kmsg->ikm_header->msgh_size = size; - /* + /* * I reserve for the trailer the largest space (MAX_TRAILER_SIZE) * However, the internal size field of the trailer (msgh_trailer_size) * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to * optimize the cases where no implicit data is requested. */ - trailer = (mach_msg_max_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + size); + trailer = (mach_msg_max_trailer_t *) + ((vm_offset_t)kmsg->ikm_header + size); trailer->msgh_sender = KERNEL_SECURITY_TOKEN; trailer->msgh_audit = KERNEL_AUDIT_TOKEN; trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; @@ -1741,9 +1777,9 @@ ipc_kmsg_get_from_kernel( */ mach_msg_return_t ipc_kmsg_send( - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_msg_timeout_t send_timeout) + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t send_timeout) { ipc_port_t port; thread_t th = current_thread(); @@ -1770,7 +1806,7 @@ ipc_kmsg_send( /* don't allow the creation of a circular loop */ if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_CIRCULAR) { ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_MSGH_BITS_CIRCULAR); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_MSGH_BITS_CIRCULAR); return MACH_MSG_SUCCESS; } @@ -1791,8 +1827,9 @@ retry: if (!ip_active(port)) { ip_unlock(port); #if MACH_FLIPC - if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) - flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE); + if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) { + flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE); + } #endif if (did_importance) { /* @@ -1808,12 +1845,11 @@ retry: ip_release(port); /* JMM - Future: release right, not just ref */ kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL; ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST); return MACH_MSG_SUCCESS; } if (port->ip_receiver == ipc_space_kernel) { - /* * We can check ip_receiver == ipc_space_kernel * before checking that the port is active because @@ -1830,18 +1866,20 @@ retry: * Call the server routine, and get the reply message to send. */ kmsg = ipc_kobject_server(kmsg, option); - if (kmsg == IKM_NULL) + if (kmsg == IKM_NULL) { return MACH_MSG_SUCCESS; + } /* restart the KMSG_INFO tracing for the reply message */ - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port; assert(IP_VALID(port)); ip_lock(port); /* fall thru with reply - same options */ kernel_reply = TRUE; - if (!ip_active(port)) + if (!ip_active(port)) { error = MACH_SEND_INVALID_DEST; + } } #if IMPORTANCE_INHERITANCE @@ -1852,8 +1890,9 @@ retry: */ if (!did_importance) { did_importance = true; - if (ipc_importance_send(kmsg, option)) - goto retry; + if (ipc_importance_send(kmsg, option)) { + goto retry; + } } #endif /* IMPORTANCE_INHERITANCE */ @@ -1872,43 +1911,43 @@ retry: ip_unlock(port); error = ipc_mqueue_send(&port->ip_messages, kmsg, option, - send_timeout); + send_timeout); } #if IMPORTANCE_INHERITANCE if (did_importance) { __unused int importance_cleared = 0; switch (error) { - case MACH_SEND_TIMED_OUT: - case MACH_SEND_NO_BUFFER: - case MACH_SEND_INTERRUPTED: - case MACH_SEND_INVALID_DEST: - /* - * We still have the kmsg and its - * reference on the port. But we - * have to back out the importance - * boost. - * - * The port could have changed hands, - * be inflight to another destination, - * etc... But in those cases our - * back-out will find the new owner - * (and all the operations that - * transferred the right should have - * applied their own boost adjustments - * to the old owner(s)). - */ - importance_cleared = 1; - ipc_importance_clean(kmsg); - break; + case MACH_SEND_TIMED_OUT: + case MACH_SEND_NO_BUFFER: + case MACH_SEND_INTERRUPTED: + case MACH_SEND_INVALID_DEST: + /* + * We still have the kmsg and its + * reference on the port. But we + * have to back out the importance + * boost. + * + * The port could have changed hands, + * be inflight to another destination, + * etc... But in those cases our + * back-out will find the new owner + * (and all the operations that + * transferred the right should have + * applied their own boost adjustments + * to the old owner(s)). + */ + importance_cleared = 1; + ipc_importance_clean(kmsg); + break; - case MACH_MSG_SUCCESS: - default: - break; + case MACH_MSG_SUCCESS: + default: + break; } #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_END, - task_pid(current_task()), sender_pid, imp_msgh_id, importance_cleared, 0); + task_pid(current_task()), sender_pid, imp_msgh_id, importance_cleared, 0); #endif /* IMPORTANCE_TRACE */ } #endif /* IMPORTANCE_INHERITANCE */ @@ -1919,13 +1958,14 @@ retry: */ if (error == MACH_SEND_INVALID_DEST) { #if MACH_FLIPC - if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) - flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE); + if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) { + flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE); + } #endif ip_release(port); /* JMM - Future: release right, not just ref */ kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL; ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST); return MACH_MSG_SUCCESS; } @@ -1936,13 +1976,14 @@ retry: * the message as a successful delivery. */ #if MACH_FLIPC - if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) - flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE); + if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) { + flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE); + } #endif ip_release(port); /* JMM - Future: release right, not just ref */ kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL; ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, error); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, error); return MACH_MSG_SUCCESS; } return error; @@ -1964,12 +2005,12 @@ retry: mach_msg_return_t ipc_kmsg_put( - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_vm_address_t rcv_addr, - mach_msg_size_t rcv_size, - mach_msg_size_t trailer_size, - mach_msg_size_t *sizep) + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_vm_address_t rcv_addr, + mach_msg_size_t rcv_size, + mach_msg_size_t trailer_size, + mach_msg_size_t *sizep) { mach_msg_size_t size = kmsg->ikm_header->msgh_size + trailer_size; mach_msg_return_t mr; @@ -1978,37 +2019,37 @@ ipc_kmsg_put( DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n" - " size: 0x%.8x\n" - " bits: 0x%.8x\n" - " remote_port: %p\n" - " local_port: %p\n" - " voucher_port: 0x%.8x\n" - " id: %.8d\n", - kmsg->ikm_header->msgh_size, - kmsg->ikm_header->msgh_bits, - kmsg->ikm_header->msgh_remote_port, - kmsg->ikm_header->msgh_local_port, - kmsg->ikm_header->msgh_voucher_port, - kmsg->ikm_header->msgh_id); + " size: 0x%.8x\n" + " bits: 0x%.8x\n" + " remote_port: %p\n" + " local_port: %p\n" + " voucher_port: 0x%.8x\n" + " id: %.8d\n", + kmsg->ikm_header->msgh_size, + kmsg->ikm_header->msgh_bits, + kmsg->ikm_header->msgh_remote_port, + kmsg->ikm_header->msgh_local_port, + kmsg->ikm_header->msgh_voucher_port, + kmsg->ikm_header->msgh_id); #if defined(__LP64__) if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; */ - mach_msg_legacy_header_t *legacy_header = - (mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA); + mach_msg_legacy_header_t *legacy_header = + (mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA); - mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits; - mach_msg_size_t msg_size = kmsg->ikm_header->msgh_size; - mach_port_name_t remote_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port); - mach_port_name_t local_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_local_port); - mach_port_name_t voucher_port = kmsg->ikm_header->msgh_voucher_port; - mach_msg_id_t id = kmsg->ikm_header->msgh_id; + mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits; + mach_msg_size_t msg_size = kmsg->ikm_header->msgh_size; + mach_port_name_t remote_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port); + mach_port_name_t local_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_local_port); + mach_port_name_t voucher_port = kmsg->ikm_header->msgh_voucher_port; + mach_msg_id_t id = kmsg->ikm_header->msgh_id; - legacy_header->msgh_id = id; + legacy_header->msgh_id = id; legacy_header->msgh_local_port = local_port; legacy_header->msgh_remote_port = remote_port; legacy_header->msgh_voucher_port = voucher_port; - legacy_header->msgh_size = msg_size - LEGACY_HEADER_SIZE_DELTA; - legacy_header->msgh_bits = bits; + legacy_header->msgh_size = msg_size - LEGACY_HEADER_SIZE_DELTA; + legacy_header->msgh_bits = bits; size -= LEGACY_HEADER_SIZE_DELTA; kmsg->ikm_header = (mach_msg_header_t *)legacy_header; @@ -2020,15 +2061,14 @@ ipc_kmsg_put( if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { kprintf("ipc_kmsg_put header+body: %d\n", (size)); uint32_t i; - for(i=0;i*4 < size;i++) - { - kprintf("%.4x\n",((uint32_t *)kmsg->ikm_header)[i]); + for (i = 0; i * 4 < size; i++) { + kprintf("%.4x\n", ((uint32_t *)kmsg->ikm_header)[i]); } - kprintf("type: %d\n", ((mach_msg_type_descriptor_t *)(((mach_msg_base_t *)kmsg->ikm_header)+1))->type); + kprintf("type: %d\n", ((mach_msg_type_descriptor_t *)(((mach_msg_base_t *)kmsg->ikm_header) + 1))->type); } __unreachable_ok_pop - /* Re-Compute target address if using stack-style delivery */ + /* Re-Compute target address if using stack-style delivery */ if (option & MACH_RCV_STACK) { rcv_addr += rcv_size - size; } @@ -2036,20 +2076,22 @@ ipc_kmsg_put( if (copyoutmsg((const char *) kmsg->ikm_header, rcv_addr, size)) { mr = MACH_RCV_INVALID_DATA; size = 0; - } else + } else { mr = MACH_MSG_SUCCESS; + } - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, - (rcv_addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS || - rcv_addr + size >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) ? (uintptr_t)0 : (uintptr_t)rcv_addr, - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - 1 /* this is on the receive/copyout path */, - 0, - 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, + (rcv_addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS || + rcv_addr + size >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) ? (uintptr_t)0 : (uintptr_t)rcv_addr, + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + 1 /* this is on the receive/copyout path */, + 0, + 0); ipc_kmsg_free(kmsg); - if (sizep) + if (sizep) { *sizep = size; + } return mr; } @@ -2065,9 +2107,9 @@ ipc_kmsg_put( void ipc_kmsg_put_to_kernel( - mach_msg_header_t *msg, - ipc_kmsg_t kmsg, - mach_msg_size_t size) + mach_msg_header_t *msg, + ipc_kmsg_t kmsg, + mach_msg_size_t size) { (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, size); @@ -2111,8 +2153,9 @@ ipc_kmsg_set_qos( if (options & MACH_SEND_OVERRIDE) { pthread_priority_t pp = _pthread_priority_normalize_for_ipc(override); - if (pp > kmsg->ikm_qos) + if (pp > kmsg->ikm_qos) { kmsg->ikm_qos_override = (mach_msg_priority_t)pp; + } } kr = KERN_SUCCESS; @@ -2156,9 +2199,9 @@ ipc_kmsg_set_qos( mach_msg_return_t ipc_kmsg_copyin_header( ipc_kmsg_t kmsg, - ipc_space_t space, + ipc_space_t space, mach_msg_priority_t override, - mach_msg_option_t *optionp) + mach_msg_option_t *optionp) { mach_msg_header_t *msg = kmsg->ikm_header; mach_msg_bits_t mbits = msg->msgh_bits & MACH_MSGH_BITS_USER; @@ -2190,12 +2233,14 @@ ipc_kmsg_copyin_header( if ((mbits != msg->msgh_bits) || (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type)) || ((reply_type == 0) ? - (reply_name != MACH_PORT_NULL) : - !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))) + (reply_name != MACH_PORT_NULL) : + !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))) { return MACH_SEND_INVALID_HEADER; + } - if (!MACH_PORT_VALID(dest_name)) + if (!MACH_PORT_VALID(dest_name)) { return MACH_SEND_INVALID_DEST; + } is_write_lock(space); if (!is_active(space)) { @@ -2210,12 +2255,11 @@ ipc_kmsg_copyin_header( * actually copy in until we validate destination and reply. */ if (voucher_type != MACH_MSGH_BITS_ZERO) { - voucher_name = msg->msgh_voucher_port; if (voucher_name == MACH_PORT_DEAD || (voucher_type != MACH_MSG_TYPE_MOVE_SEND && - voucher_type != MACH_MSG_TYPE_COPY_SEND)) { + voucher_type != MACH_MSG_TYPE_COPY_SEND)) { is_write_unlock(space); if ((*optionp & MACH_SEND_KERNEL) == 0) { mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER); @@ -2225,7 +2269,7 @@ ipc_kmsg_copyin_header( if (voucher_name != MACH_PORT_NULL) { voucher_entry = ipc_entry_lookup(space, voucher_name); - if (voucher_entry == IE_NULL || + if (voucher_entry == IE_NULL || (voucher_entry->ie_bits & MACH_PORT_TYPE_SEND) == 0 || io_kotype(voucher_entry->ie_object) != IKOT_VOUCHER) { is_write_unlock(space); @@ -2245,7 +2289,6 @@ ipc_kmsg_copyin_header( */ if (dest_name == voucher_name) { - /* * If the destination name is the same as the voucher name, * the voucher_entry must already be known. Either that or @@ -2278,44 +2321,43 @@ ipc_kmsg_copyin_header( } } - /* - * Do the joint copyin of the dest disposition and + /* + * Do the joint copyin of the dest disposition and * voucher disposition from the one entry/port. We * already validated that the voucher copyin would * succeed (above). So, any failure in combining * the copyins can be blamed on the destination. */ kr = ipc_right_copyin_two(space, dest_name, dest_entry, - dest_type, voucher_type, - &dest_port, &dest_soright, - &release_port); + dest_type, voucher_type, + &dest_port, &dest_soright, + &release_port); if (kr != KERN_SUCCESS) { assert(kr != KERN_INVALID_CAPABILITY); goto invalid_dest; } voucher_port = (ipc_port_t)dest_port; - /* - * could not have been one of these dispositions, + /* + * could not have been one of these dispositions, * validated the port was a true kernel voucher port above, * AND was successfully able to copyin both dest and voucher. */ assert(dest_type != MACH_MSG_TYPE_MAKE_SEND); assert(dest_type != MACH_MSG_TYPE_MAKE_SEND_ONCE); assert(dest_type != MACH_MSG_TYPE_MOVE_SEND_ONCE); - + /* * Perform the delayed reply right copyin (guaranteed success). */ if (reply_entry != IE_NULL) { kr = ipc_right_copyin(space, reply_name, reply_entry, - reply_type, TRUE, - &reply_port, &reply_soright, - &release_port, &assertcnt); + reply_type, IPC_RIGHT_COPYIN_FLAGS_DEADOK, + &reply_port, &reply_soright, + &release_port, &assertcnt); assert(assertcnt == 0); assert(kr == KERN_SUCCESS); } - } else { if (dest_name == reply_name) { /* @@ -2332,22 +2374,20 @@ ipc_kmsg_copyin_header( reply_entry = dest_entry; assert(reply_type != 0); /* because name not null */ - /* - * Do the joint copyin of the dest disposition and + /* + * Do the joint copyin of the dest disposition and * reply disposition from the one entry/port. */ kr = ipc_right_copyin_two(space, dest_name, dest_entry, - dest_type, reply_type, - &dest_port, &dest_soright, - &release_port); + dest_type, reply_type, + &dest_port, &dest_soright, + &release_port); if (kr == KERN_INVALID_CAPABILITY) { goto invalid_reply; } else if (kr != KERN_SUCCESS) { goto invalid_dest; } reply_port = dest_port; - - } else { /* * Handle destination and reply independently, as @@ -2404,9 +2444,9 @@ ipc_kmsg_copyin_header( * copyin the destination. */ kr = ipc_right_copyin(space, dest_name, dest_entry, - dest_type, FALSE, - &dest_port, &dest_soright, - &release_port, &assertcnt); + dest_type, IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE, + &dest_port, &dest_soright, + &release_port, &assertcnt); assert(assertcnt == 0); if (kr != KERN_SUCCESS) { goto invalid_dest; @@ -2420,9 +2460,9 @@ ipc_kmsg_copyin_header( */ if (MACH_PORT_VALID(reply_name)) { kr = ipc_right_copyin(space, reply_name, reply_entry, - reply_type, TRUE, - &reply_port, &reply_soright, - &release_port, &assertcnt); + reply_type, IPC_RIGHT_COPYIN_FLAGS_DEADOK, + &reply_port, &reply_soright, + &release_port, &assertcnt); assert(assertcnt == 0); assert(kr == KERN_SUCCESS); } else { @@ -2437,11 +2477,11 @@ ipc_kmsg_copyin_header( */ if (IE_NULL != voucher_entry) { kr = ipc_right_copyin(space, voucher_name, voucher_entry, - voucher_type, FALSE, - (ipc_object_t *)&voucher_port, - &voucher_soright, - &voucher_release_port, - &assertcnt); + voucher_type, IPC_RIGHT_COPYIN_FLAGS_NONE, + (ipc_object_t *)&voucher_port, + &voucher_soright, + &voucher_release_port, + &assertcnt); assert(assertcnt == 0); assert(KERN_SUCCESS == kr); assert(IP_VALID(voucher_port)); @@ -2458,8 +2498,9 @@ ipc_kmsg_copyin_header( * but reply and voucher must be distinct entries. */ assert(IE_NULL != dest_entry); - if (IE_NULL != reply_entry) + if (IE_NULL != reply_entry) { assert(reply_entry != voucher_entry); + } if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) { ipc_entry_dealloc(space, dest_name, dest_entry); @@ -2494,7 +2535,7 @@ ipc_kmsg_copyin_header( * early (send may NOT have failed), but better than missing. We assure * we won't miss by forcing MACH_SEND_ALWAYS if we got past arming. */ - if (((*optionp & MACH_SEND_NOTIFY) != 0) && + if (((*optionp & MACH_SEND_NOTIFY) != 0) && dest_type != MACH_MSG_TYPE_PORT_SEND_ONCE && dest_entry != IE_NULL && dest_entry->ie_request != IE_REQ_NONE) { ipc_port_t dport = (ipc_port_t)dest_port; @@ -2504,17 +2545,18 @@ ipc_kmsg_copyin_header( if (ip_active(dport) && dport->ip_receiver != ipc_space_kernel) { if (ip_full(dport)) { #if IMPORTANCE_INHERITANCE - needboost = ipc_port_request_sparm(dport, dest_name, - dest_entry->ie_request, - *optionp, - override); - if (needboost == FALSE) + needboost = ipc_port_request_sparm(dport, dest_name, + dest_entry->ie_request, + *optionp, + override); + if (needboost == FALSE) { ip_unlock(dport); + } #else ipc_port_request_sparm(dport, dest_name, - dest_entry->ie_request, - *optionp, - override); + dest_entry->ie_request, + *optionp, + override); ip_unlock(dport); #endif /* IMPORTANCE_INHERITANCE */ } else { @@ -2529,7 +2571,7 @@ ipc_kmsg_copyin_header( is_write_unlock(space); #if IMPORTANCE_INHERITANCE - /* + /* * If our request is the first boosting send-possible * notification this cycle, push the boost down the * destination port. @@ -2560,7 +2602,6 @@ ipc_kmsg_copyin_header( * qos, and apply any override before we enqueue the kmsg. */ if (IP_VALID(voucher_port)) { - kmsg->ikm_voucher = voucher_port; voucher_type = MACH_MSG_TYPE_MOVE_SEND; } @@ -2572,19 +2613,22 @@ ipc_kmsg_copyin_header( /* capture the qos value(s) for the kmsg */ ipc_kmsg_set_qos(kmsg, *optionp, override); - if (release_port != IP_NULL) + if (release_port != IP_NULL) { ip_release(release_port); + } - if (voucher_release_port != IP_NULL) + if (voucher_release_port != IP_NULL) { ip_release(voucher_release_port); + } return MACH_MSG_SUCCESS; invalid_reply: is_write_unlock(space); - if (release_port != IP_NULL) + if (release_port != IP_NULL) { ip_release(release_port); + } assert(voucher_port == IP_NULL); assert(voucher_soright == IP_NULL); @@ -2597,11 +2641,13 @@ invalid_reply: invalid_dest: is_write_unlock(space); - if (release_port != IP_NULL) + if (release_port != IP_NULL) { ip_release(release_port); + } - if (reply_soright != IP_NULL) + if (reply_soright != IP_NULL) { ipc_notify_port_deleted(reply_soright, reply_name); + } assert(voucher_port == IP_NULL); assert(voucher_soright == IP_NULL); @@ -2619,7 +2665,7 @@ mach_msg_descriptor_t *ipc_kmsg_copyin_port_descriptor( mach_msg_return_t *mr); void ipc_print_type_name( - int type_name); + int type_name); mach_msg_descriptor_t * ipc_kmsg_copyin_port_descriptor( @@ -2631,42 +2677,41 @@ ipc_kmsg_copyin_port_descriptor( mach_msg_option_t *optionp, mach_msg_return_t *mr) { - volatile mach_msg_legacy_port_descriptor_t *user_dsc = user_dsc_in; - mach_msg_type_name_t user_disp; - mach_msg_type_name_t result_disp; - mach_port_name_t name; - ipc_object_t object; - - user_disp = user_dsc->disposition; - result_disp = ipc_object_copyin_type(user_disp); - - name = (mach_port_name_t)user_dsc->name; - if (MACH_PORT_VALID(name)) { - - kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object); - if (kr != KERN_SUCCESS) { + volatile mach_msg_legacy_port_descriptor_t *user_dsc = user_dsc_in; + mach_msg_type_name_t user_disp; + mach_msg_type_name_t result_disp; + mach_port_name_t name; + ipc_object_t object; + + user_disp = user_dsc->disposition; + result_disp = ipc_object_copyin_type(user_disp); + + name = (mach_port_name_t)user_dsc->name; + if (MACH_PORT_VALID(name)) { + kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object); + if (kr != KERN_SUCCESS) { if ((*optionp & MACH_SEND_KERNEL) == 0) { mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT); } - *mr = MACH_SEND_INVALID_RIGHT; - return NULL; - } - - if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) && - ipc_port_check_circularity((ipc_port_t) object, - (ipc_port_t) dest)) { - kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; - } - dsc->name = (ipc_port_t) object; - } else { - dsc->name = CAST_MACH_NAME_TO_PORT(name); - } - dsc->disposition = result_disp; - dsc->type = MACH_MSG_PORT_DESCRIPTOR; - - dsc->pad_end = 0; // debug, unnecessary - - return (mach_msg_descriptor_t *)(user_dsc_in+1); + *mr = MACH_SEND_INVALID_RIGHT; + return NULL; + } + + if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity((ipc_port_t) object, + (ipc_port_t) dest)) { + kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + } + dsc->name = (ipc_port_t) object; + } else { + dsc->name = CAST_MACH_NAME_TO_PORT(name); + } + dsc->disposition = result_disp; + dsc->type = MACH_MSG_PORT_DESCRIPTOR; + + dsc->pad_end = 0; // debug, unnecessary + + return (mach_msg_descriptor_t *)(user_dsc_in + 1); } mach_msg_descriptor_t * ipc_kmsg_copyin_ool_descriptor( @@ -2692,97 +2737,95 @@ ipc_kmsg_copyin_ool_descriptor( __unused mach_msg_option_t *optionp, mach_msg_return_t *mr) { - vm_size_t length; - boolean_t dealloc; - mach_msg_copy_options_t copy_options; - mach_vm_offset_t addr; - mach_msg_descriptor_type_t dsc_type; - - if (is_64bit) { - mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - - addr = (mach_vm_offset_t) user_ool_dsc->address; - length = user_ool_dsc->size; - dealloc = user_ool_dsc->deallocate; - copy_options = user_ool_dsc->copy; - dsc_type = user_ool_dsc->type; - - user_dsc = (typeof(user_dsc))(user_ool_dsc+1); - } else { - mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - - addr = CAST_USER_ADDR_T(user_ool_dsc->address); - dealloc = user_ool_dsc->deallocate; - copy_options = user_ool_dsc->copy; - dsc_type = user_ool_dsc->type; - length = user_ool_dsc->size; - - user_dsc = (typeof(user_dsc))(user_ool_dsc+1); - } - - dsc->size = (mach_msg_size_t)length; - dsc->deallocate = dealloc; - dsc->copy = copy_options; - dsc->type = dsc_type; - - if (length == 0) { - dsc->address = NULL; - } else if ((length >= MSG_OOL_SIZE_SMALL) && - (copy_options == MACH_MSG_PHYSICAL_COPY) && !dealloc) { - - /* - * If the request is a physical copy and the source - * is not being deallocated, then allocate space - * in the kernel's pageable ipc copy map and copy - * the data in. The semantics guarantee that the - * data will have been physically copied before - * the send operation terminates. Thus if the data - * is not being deallocated, we must be prepared - * to page if the region is sufficiently large. - */ - if (copyin(addr, (char *)*paddr, length)) { - *mr = MACH_SEND_INVALID_MEMORY; - return NULL; - } - - /* - * The kernel ipc copy map is marked no_zero_fill. - * If the transfer is not a page multiple, we need - * to zero fill the balance. - */ - if (!page_aligned(length)) { - (void) memset((void *) (*paddr + length), 0, - round_page(length) - length); - } - if (vm_map_copyin(ipc_kernel_copy_map, (vm_map_address_t)*paddr, - (vm_map_size_t)length, TRUE, copy) != KERN_SUCCESS) { - *mr = MACH_MSG_VM_KERNEL; - return NULL; - } - dsc->address = (void *)*copy; - *paddr += round_page(length); - *space_needed -= round_page(length); - } else { - - /* - * Make a vm_map_copy_t of the of the data. If the - * data is small, this will do an optimized physical - * copy. Otherwise, it will do a virtual copy. - * - * NOTE: A virtual copy is OK if the original is being - * deallocted, even if a physical copy was requested. - */ - kern_return_t kr = vm_map_copyin(map, addr, - (vm_map_size_t)length, dealloc, copy); - if (kr != KERN_SUCCESS) { - *mr = (kr == KERN_RESOURCE_SHORTAGE) ? - MACH_MSG_VM_KERNEL : - MACH_SEND_INVALID_MEMORY; - return NULL; - } - dsc->address = (void *)*copy; - } - return user_dsc; + vm_size_t length; + boolean_t dealloc; + mach_msg_copy_options_t copy_options; + mach_vm_offset_t addr; + mach_msg_descriptor_type_t dsc_type; + + if (is_64bit) { + mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + + addr = (mach_vm_offset_t) user_ool_dsc->address; + length = user_ool_dsc->size; + dealloc = user_ool_dsc->deallocate; + copy_options = user_ool_dsc->copy; + dsc_type = user_ool_dsc->type; + + user_dsc = (typeof(user_dsc))(user_ool_dsc + 1); + } else { + mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + + addr = CAST_USER_ADDR_T(user_ool_dsc->address); + dealloc = user_ool_dsc->deallocate; + copy_options = user_ool_dsc->copy; + dsc_type = user_ool_dsc->type; + length = user_ool_dsc->size; + + user_dsc = (typeof(user_dsc))(user_ool_dsc + 1); + } + + dsc->size = (mach_msg_size_t)length; + dsc->deallocate = dealloc; + dsc->copy = copy_options; + dsc->type = dsc_type; + + if (length == 0) { + dsc->address = NULL; + } else if ((length >= MSG_OOL_SIZE_SMALL) && + (copy_options == MACH_MSG_PHYSICAL_COPY) && !dealloc) { + /* + * If the request is a physical copy and the source + * is not being deallocated, then allocate space + * in the kernel's pageable ipc copy map and copy + * the data in. The semantics guarantee that the + * data will have been physically copied before + * the send operation terminates. Thus if the data + * is not being deallocated, we must be prepared + * to page if the region is sufficiently large. + */ + if (copyin(addr, (char *)*paddr, length)) { + *mr = MACH_SEND_INVALID_MEMORY; + return NULL; + } + + /* + * The kernel ipc copy map is marked no_zero_fill. + * If the transfer is not a page multiple, we need + * to zero fill the balance. + */ + if (!page_aligned(length)) { + (void) memset((void *) (*paddr + length), 0, + round_page(length) - length); + } + if (vm_map_copyin(ipc_kernel_copy_map, (vm_map_address_t)*paddr, + (vm_map_size_t)length, TRUE, copy) != KERN_SUCCESS) { + *mr = MACH_MSG_VM_KERNEL; + return NULL; + } + dsc->address = (void *)*copy; + *paddr += round_page(length); + *space_needed -= round_page(length); + } else { + /* + * Make a vm_map_copy_t of the of the data. If the + * data is small, this will do an optimized physical + * copy. Otherwise, it will do a virtual copy. + * + * NOTE: A virtual copy is OK if the original is being + * deallocted, even if a physical copy was requested. + */ + kern_return_t kr = vm_map_copyin(map, addr, + (vm_map_size_t)length, dealloc, copy); + if (kr != KERN_SUCCESS) { + *mr = (kr == KERN_RESOURCE_SHORTAGE) ? + MACH_MSG_VM_KERNEL : + MACH_SEND_INVALID_MEMORY; + return NULL; + } + dsc->address = (void *)*copy; + } + return user_dsc; } mach_msg_descriptor_t * ipc_kmsg_copyin_ool_ports_descriptor( @@ -2808,138 +2851,140 @@ ipc_kmsg_copyin_ool_ports_descriptor( mach_msg_option_t *optionp, mach_msg_return_t *mr) { - void *data; - ipc_object_t *objects; - unsigned int i; - mach_vm_offset_t addr; - mach_msg_type_name_t user_disp; - mach_msg_type_name_t result_disp; - mach_msg_type_number_t count; - mach_msg_copy_options_t copy_option; - boolean_t deallocate; - mach_msg_descriptor_type_t type; - vm_size_t ports_length, names_length; - - if (is_64bit) { - mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - - addr = (mach_vm_offset_t)user_ool_dsc->address; - count = user_ool_dsc->count; - deallocate = user_ool_dsc->deallocate; - copy_option = user_ool_dsc->copy; - user_disp = user_ool_dsc->disposition; - type = user_ool_dsc->type; - - user_dsc = (typeof(user_dsc))(user_ool_dsc+1); - } else { - mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - - addr = CAST_USER_ADDR_T(user_ool_dsc->address); - count = user_ool_dsc->count; - deallocate = user_ool_dsc->deallocate; - copy_option = user_ool_dsc->copy; - user_disp = user_ool_dsc->disposition; - type = user_ool_dsc->type; - - user_dsc = (typeof(user_dsc))(user_ool_dsc+1); - } - - dsc->deallocate = deallocate; - dsc->copy = copy_option; - dsc->type = type; - dsc->count = count; - dsc->address = NULL; /* for now */ - - result_disp = ipc_object_copyin_type(user_disp); - dsc->disposition = result_disp; - - /* We always do a 'physical copy', but you have to specify something valid */ - if (copy_option != MACH_MSG_PHYSICAL_COPY && - copy_option != MACH_MSG_VIRTUAL_COPY) { - *mr = MACH_SEND_INVALID_TYPE; - return NULL; - } - - /* calculate length of data in bytes, rounding up */ - - if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) { - *mr = MACH_SEND_TOO_LARGE; - return NULL; - } - - if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) { - *mr = MACH_SEND_TOO_LARGE; - return NULL; - } - - if (ports_length == 0) { - return user_dsc; - } - - data = kalloc(ports_length); - - if (data == NULL) { - *mr = MACH_SEND_NO_BUFFER; - return NULL; - } - + void *data; + ipc_object_t *objects; + unsigned int i; + mach_vm_offset_t addr; + mach_msg_type_name_t user_disp; + mach_msg_type_name_t result_disp; + mach_msg_type_number_t count; + mach_msg_copy_options_t copy_option; + boolean_t deallocate; + mach_msg_descriptor_type_t type; + vm_size_t ports_length, names_length; + + if (is_64bit) { + mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + + addr = (mach_vm_offset_t)user_ool_dsc->address; + count = user_ool_dsc->count; + deallocate = user_ool_dsc->deallocate; + copy_option = user_ool_dsc->copy; + user_disp = user_ool_dsc->disposition; + type = user_ool_dsc->type; + + user_dsc = (typeof(user_dsc))(user_ool_dsc + 1); + } else { + mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + + addr = CAST_USER_ADDR_T(user_ool_dsc->address); + count = user_ool_dsc->count; + deallocate = user_ool_dsc->deallocate; + copy_option = user_ool_dsc->copy; + user_disp = user_ool_dsc->disposition; + type = user_ool_dsc->type; + + user_dsc = (typeof(user_dsc))(user_ool_dsc + 1); + } + + dsc->deallocate = deallocate; + dsc->copy = copy_option; + dsc->type = type; + dsc->count = count; + dsc->address = NULL; /* for now */ + + result_disp = ipc_object_copyin_type(user_disp); + dsc->disposition = result_disp; + + /* We always do a 'physical copy', but you have to specify something valid */ + if (copy_option != MACH_MSG_PHYSICAL_COPY && + copy_option != MACH_MSG_VIRTUAL_COPY) { + *mr = MACH_SEND_INVALID_TYPE; + return NULL; + } + + /* calculate length of data in bytes, rounding up */ + + if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) { + *mr = MACH_SEND_TOO_LARGE; + return NULL; + } + + if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) { + *mr = MACH_SEND_TOO_LARGE; + return NULL; + } + + if (ports_length == 0) { + return user_dsc; + } + + data = kalloc(ports_length); + + if (data == NULL) { + *mr = MACH_SEND_NO_BUFFER; + return NULL; + } + #ifdef __LP64__ - mach_port_name_t *names = &((mach_port_name_t *)data)[count]; + mach_port_name_t *names = &((mach_port_name_t *)data)[count]; #else - mach_port_name_t *names = ((mach_port_name_t *)data); + mach_port_name_t *names = ((mach_port_name_t *)data); #endif - if (copyinmap(map, addr, names, names_length) != KERN_SUCCESS) { - kfree(data, ports_length); - *mr = MACH_SEND_INVALID_MEMORY; - return NULL; - } + if (copyinmap(map, addr, names, names_length) != KERN_SUCCESS) { + kfree(data, ports_length); + *mr = MACH_SEND_INVALID_MEMORY; + return NULL; + } - if (deallocate) { - (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)ports_length); - } + if (deallocate) { + (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)ports_length); + } - objects = (ipc_object_t *) data; - dsc->address = data; + objects = (ipc_object_t *) data; + dsc->address = data; - for ( i = 0; i < count; i++) { - mach_port_name_t name = names[i]; - ipc_object_t object; + for (i = 0; i < count; i++) { + mach_port_name_t name = names[i]; + ipc_object_t object; - if (!MACH_PORT_VALID(name)) { - objects[i] = (ipc_object_t)CAST_MACH_NAME_TO_PORT(name); - continue; - } + if (!MACH_PORT_VALID(name)) { + objects[i] = (ipc_object_t)CAST_MACH_NAME_TO_PORT(name); + continue; + } - kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object); + kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object); - if (kr != KERN_SUCCESS) { - unsigned int j; + if (kr != KERN_SUCCESS) { + unsigned int j; - for(j = 0; j < i; j++) { - object = objects[j]; - if (IPC_OBJECT_VALID(object)) - ipc_object_destroy(object, result_disp); - } - kfree(data, ports_length); - dsc->address = NULL; + for (j = 0; j < i; j++) { + object = objects[j]; + if (IPC_OBJECT_VALID(object)) { + ipc_object_destroy(object, result_disp); + } + } + kfree(data, ports_length); + dsc->address = NULL; if ((*optionp & MACH_SEND_KERNEL) == 0) { mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT); } - *mr = MACH_SEND_INVALID_RIGHT; - return NULL; - } + *mr = MACH_SEND_INVALID_RIGHT; + return NULL; + } - if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && - ipc_port_check_circularity( - (ipc_port_t) object, - (ipc_port_t) dest)) - kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity( + (ipc_port_t) object, + (ipc_port_t) dest)) { + kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + } - objects[i] = object; - } + objects[i] = object; + } - return user_dsc; + return user_dsc; } /* @@ -2966,140 +3011,140 @@ ipc_kmsg_copyin_ool_ports_descriptor( mach_msg_return_t ipc_kmsg_copyin_body( - ipc_kmsg_t kmsg, - ipc_space_t space, + ipc_kmsg_t kmsg, + ipc_space_t space, vm_map_t map, mach_msg_option_t *optionp) { - ipc_object_t dest; - mach_msg_body_t *body; - mach_msg_descriptor_t *daddr, *naddr; - mach_msg_descriptor_t *user_addr, *kern_addr; - mach_msg_type_number_t dsc_count; - boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS); - boolean_t complex = FALSE; - vm_size_t space_needed = 0; - vm_offset_t paddr = 0; - vm_map_copy_t copy = VM_MAP_COPY_NULL; - mach_msg_type_number_t i; - mach_msg_return_t mr = MACH_MSG_SUCCESS; - - vm_size_t descriptor_size = 0; - - mach_msg_type_number_t total_ool_port_count = 0; - - /* - * Determine if the target is a kernel port. - */ - dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port; - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - naddr = (mach_msg_descriptor_t *) (body + 1); - - dsc_count = body->msgh_descriptor_count; - if (dsc_count == 0) - return MACH_MSG_SUCCESS; + ipc_object_t dest; + mach_msg_body_t *body; + mach_msg_descriptor_t *daddr, *naddr; + mach_msg_descriptor_t *user_addr, *kern_addr; + mach_msg_type_number_t dsc_count; + boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS); + boolean_t complex = FALSE; + vm_size_t space_needed = 0; + vm_offset_t paddr = 0; + vm_map_copy_t copy = VM_MAP_COPY_NULL; + mach_msg_type_number_t i; + mach_msg_return_t mr = MACH_MSG_SUCCESS; + + vm_size_t descriptor_size = 0; + + mach_msg_type_number_t total_ool_port_count = 0; - /* - * Make an initial pass to determine kernal VM space requirements for - * physical copies and possible contraction of the descriptors from - * processes with pointers larger than the kernel's. - */ - daddr = NULL; - for (i = 0; i < dsc_count; i++) { - mach_msg_size_t size; - mach_msg_type_number_t ool_port_count = 0; + /* + * Determine if the target is a kernel port. + */ + dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port; + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + naddr = (mach_msg_descriptor_t *) (body + 1); - daddr = naddr; - - /* make sure the descriptor fits in the message */ - if (is_task_64bit) { - switch (daddr->type.type) { - case MACH_MSG_OOL_DESCRIPTOR: - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_PORTS_DESCRIPTOR: - descriptor_size += 16; - naddr = (typeof(naddr))((vm_offset_t)daddr + 16); - break; - default: - descriptor_size += 12; - naddr = (typeof(naddr))((vm_offset_t)daddr + 12); - break; - } - } else { - descriptor_size += 12; - naddr = (typeof(naddr))((vm_offset_t)daddr + 12); - } - - if (naddr > (mach_msg_descriptor_t *) - ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size)) { - mr = MACH_SEND_MSG_TOO_SMALL; - goto clean_message; + dsc_count = body->msgh_descriptor_count; + if (dsc_count == 0) { + return MACH_MSG_SUCCESS; } - switch (daddr->type.type) { - case MACH_MSG_OOL_DESCRIPTOR: - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - size = (is_task_64bit) ? - ((mach_msg_ool_descriptor64_t *)daddr)->size : - daddr->out_of_line.size; - - if (daddr->out_of_line.copy != MACH_MSG_PHYSICAL_COPY && - daddr->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) { - /* - * Invalid copy option - */ - mr = MACH_SEND_INVALID_TYPE; - goto clean_message; - } + /* + * Make an initial pass to determine kernal VM space requirements for + * physical copies and possible contraction of the descriptors from + * processes with pointers larger than the kernel's. + */ + daddr = NULL; + for (i = 0; i < dsc_count; i++) { + mach_msg_size_t size; + mach_msg_type_number_t ool_port_count = 0; - if ((size >= MSG_OOL_SIZE_SMALL) && - (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) && - !(daddr->out_of_line.deallocate)) { + daddr = naddr; - /* - * Out-of-line memory descriptor, accumulate kernel - * memory requirements - */ - if (space_needed + round_page(size) <= space_needed) { - /* Overflow dectected */ - mr = MACH_MSG_VM_KERNEL; - goto clean_message; + /* make sure the descriptor fits in the message */ + if (is_task_64bit) { + switch (daddr->type.type) { + case MACH_MSG_OOL_DESCRIPTOR: + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + descriptor_size += 16; + naddr = (typeof(naddr))((vm_offset_t)daddr + 16); + break; + default: + descriptor_size += 12; + naddr = (typeof(naddr))((vm_offset_t)daddr + 12); + break; + } + } else { + descriptor_size += 12; + naddr = (typeof(naddr))((vm_offset_t)daddr + 12); } - space_needed += round_page(size); - if (space_needed > ipc_kmsg_max_vm_space) { - /* Per message kernel memory limit exceeded */ - mr = MACH_MSG_VM_KERNEL; - goto clean_message; - } - } - break; - case MACH_MSG_PORT_DESCRIPTOR: - if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) { - /* Overflow detected */ - mr = MACH_SEND_TOO_LARGE; - goto clean_message; - } - break; - case MACH_MSG_OOL_PORTS_DESCRIPTOR: - ool_port_count = (is_task_64bit) ? - ((mach_msg_ool_ports_descriptor64_t *)daddr)->count : - daddr->ool_ports.count; - - if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) { - /* Overflow detected */ - mr = MACH_SEND_TOO_LARGE; + if (naddr > (mach_msg_descriptor_t *) + ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size)) { + mr = MACH_SEND_MSG_TOO_SMALL; goto clean_message; } - if (ool_port_count > (ipc_kmsg_max_vm_space/sizeof(mach_port_t))) { - /* Per message kernel memory limit exceeded */ - mr = MACH_SEND_TOO_LARGE; - goto clean_message; + switch (daddr->type.type) { + case MACH_MSG_OOL_DESCRIPTOR: + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + size = (is_task_64bit) ? + ((mach_msg_ool_descriptor64_t *)daddr)->size : + daddr->out_of_line.size; + + if (daddr->out_of_line.copy != MACH_MSG_PHYSICAL_COPY && + daddr->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) { + /* + * Invalid copy option + */ + mr = MACH_SEND_INVALID_TYPE; + goto clean_message; + } + + if ((size >= MSG_OOL_SIZE_SMALL) && + (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) && + !(daddr->out_of_line.deallocate)) { + /* + * Out-of-line memory descriptor, accumulate kernel + * memory requirements + */ + if (space_needed + round_page(size) <= space_needed) { + /* Overflow dectected */ + mr = MACH_MSG_VM_KERNEL; + goto clean_message; + } + + space_needed += round_page(size); + if (space_needed > ipc_kmsg_max_vm_space) { + /* Per message kernel memory limit exceeded */ + mr = MACH_MSG_VM_KERNEL; + goto clean_message; + } + } + break; + case MACH_MSG_PORT_DESCRIPTOR: + if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) { + /* Overflow detected */ + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + break; + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + ool_port_count = (is_task_64bit) ? + ((mach_msg_ool_ports_descriptor64_t *)daddr)->count : + daddr->ool_ports.count; + + if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) { + /* Overflow detected */ + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + + if (ool_port_count > (ipc_kmsg_max_vm_space / sizeof(mach_port_t))) { + /* Per message kernel memory limit exceeded */ + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + break; } - break; } - } /* Sending more than 16383 rights in one message seems crazy */ if (total_ool_port_count >= (MACH_PORT_UREFS_MAX / 4)) { @@ -3107,79 +3152,79 @@ ipc_kmsg_copyin_body( goto clean_message; } - /* - * Allocate space in the pageable kernel ipc copy map for all the - * ool data that is to be physically copied. Map is marked wait for - * space. - */ - if (space_needed) { - if (vm_allocate_kernel(ipc_kernel_copy_map, &paddr, space_needed, - VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC) != KERN_SUCCESS) { - mr = MACH_MSG_VM_KERNEL; - goto clean_message; - } - } - - /* user_addr = just after base as it was copied in */ - user_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t)); - - /* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors */ - if(descriptor_size != 16*dsc_count) { - vm_offset_t dsc_adjust = 16*dsc_count - descriptor_size; - - memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); - kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust); - - /* Update the message size for the larger in-kernel representation */ - kmsg->ikm_header->msgh_size += (mach_msg_size_t)dsc_adjust; - } - - - /* kern_addr = just after base after it has been (conditionally) moved */ - kern_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t)); - - /* handle the OOL regions and port descriptors. */ - for(i=0;itype.type) { - case MACH_MSG_PORT_DESCRIPTOR: - user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr, - (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, optionp, &mr); - kern_addr++; - complex = TRUE; - break; - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR: - user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr, - user_addr, is_task_64bit, &paddr, ©, &space_needed, map, optionp, &mr); - kern_addr++; - complex = TRUE; - break; - case MACH_MSG_OOL_PORTS_DESCRIPTOR: - user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr, - user_addr, is_task_64bit, map, space, dest, kmsg, optionp, &mr); - kern_addr++; - complex = TRUE; - break; - default: - /* Invalid descriptor */ - mr = MACH_SEND_INVALID_TYPE; - break; - } - - if (MACH_MSG_SUCCESS != mr) { - /* clean from start of message descriptors to i */ - ipc_kmsg_clean_partial(kmsg, i, - (mach_msg_descriptor_t *)((mach_msg_base_t *)kmsg->ikm_header + 1), - paddr, space_needed); - goto out; - } - } /* End of loop */ - - if (!complex) { - kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_COMPLEX; - } - out: - return mr; + /* + * Allocate space in the pageable kernel ipc copy map for all the + * ool data that is to be physically copied. Map is marked wait for + * space. + */ + if (space_needed) { + if (vm_allocate_kernel(ipc_kernel_copy_map, &paddr, space_needed, + VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC) != KERN_SUCCESS) { + mr = MACH_MSG_VM_KERNEL; + goto clean_message; + } + } + + /* user_addr = just after base as it was copied in */ + user_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t)); + + /* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors */ + if (descriptor_size != 16 * dsc_count) { + vm_offset_t dsc_adjust = 16 * dsc_count - descriptor_size; + + memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); + kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust); + + /* Update the message size for the larger in-kernel representation */ + kmsg->ikm_header->msgh_size += (mach_msg_size_t)dsc_adjust; + } + + + /* kern_addr = just after base after it has been (conditionally) moved */ + kern_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t)); + + /* handle the OOL regions and port descriptors. */ + for (i = 0; i < dsc_count; i++) { + switch (user_addr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: + user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr, + (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, optionp, &mr); + kern_addr++; + complex = TRUE; + break; + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: + user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr, + user_addr, is_task_64bit, &paddr, ©, &space_needed, map, optionp, &mr); + kern_addr++; + complex = TRUE; + break; + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr, + user_addr, is_task_64bit, map, space, dest, kmsg, optionp, &mr); + kern_addr++; + complex = TRUE; + break; + default: + /* Invalid descriptor */ + mr = MACH_SEND_INVALID_TYPE; + break; + } + + if (MACH_MSG_SUCCESS != mr) { + /* clean from start of message descriptors to i */ + ipc_kmsg_clean_partial(kmsg, i, + (mach_msg_descriptor_t *)((mach_msg_base_t *)kmsg->ikm_header + 1), + paddr, space_needed); + goto out; + } + } /* End of loop */ + + if (!complex) { + kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_COMPLEX; + } +out: + return mr; clean_message: /* no descriptors have been copied in yet */ @@ -3214,50 +3259,50 @@ clean_message: mach_msg_return_t ipc_kmsg_copyin( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, mach_msg_priority_t override, - mach_msg_option_t *optionp) + mach_msg_option_t *optionp) { - mach_msg_return_t mr; + mach_msg_return_t mr; - kmsg->ikm_header->msgh_bits &= MACH_MSGH_BITS_USER; + kmsg->ikm_header->msgh_bits &= MACH_MSGH_BITS_USER; - mr = ipc_kmsg_copyin_header(kmsg, space, override, optionp); + mr = ipc_kmsg_copyin_header(kmsg, space, override, optionp); - if (mr != MACH_MSG_SUCCESS) - return mr; + if (mr != MACH_MSG_SUCCESS) { + return mr; + } + + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + (uintptr_t)kmsg->ikm_header->msgh_bits, + (uintptr_t)kmsg->ikm_header->msgh_id, + VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg->ikm_voucher)), + 0); + + DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n", + kmsg->ikm_header->msgh_size, + kmsg->ikm_header->msgh_bits, + kmsg->ikm_header->msgh_remote_port, + kmsg->ikm_header->msgh_local_port, + kmsg->ikm_voucher, + kmsg->ikm_header->msgh_id); + + if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) { + return MACH_MSG_SUCCESS; + } - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_MSG_SEND) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - (uintptr_t)kmsg->ikm_header->msgh_bits, - (uintptr_t)kmsg->ikm_header->msgh_id, - VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg->ikm_voucher)), - 0); - - DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n", - kmsg->ikm_header->msgh_size, - kmsg->ikm_header->msgh_bits, - kmsg->ikm_header->msgh_remote_port, - kmsg->ikm_header->msgh_local_port, - kmsg->ikm_voucher, - kmsg->ikm_header->msgh_id); - - if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) - return MACH_MSG_SUCCESS; - mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp); /* unreachable if !DEBUG */ __unreachable_ok_push - if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) - { + if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { kprintf("body:\n"); uint32_t i; - for(i=0;i*4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t));i++) - { - kprintf("%.4x\n",((uint32_t *)(kmsg->ikm_header + 1))[i]); + for (i = 0; i * 4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t)); i++) { + kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]); } } __unreachable_ok_pop @@ -3280,7 +3325,7 @@ ipc_kmsg_copyin( mach_msg_return_t ipc_kmsg_copyin_from_kernel( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits; mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits); @@ -3289,12 +3334,14 @@ ipc_kmsg_copyin_from_kernel( ipc_object_t local = (ipc_object_t) kmsg->ikm_header->msgh_local_port; /* translate the destination and reply ports */ - if (!IO_VALID(remote)) + if (!IO_VALID(remote)) { return MACH_SEND_INVALID_DEST; + } ipc_object_copyin_from_kernel(remote, rname); - if (IO_VALID(local)) + if (IO_VALID(local)) { ipc_object_copyin_from_kernel(local, lname); + } /* * The common case is a complex message with no reply port, @@ -3302,119 +3349,120 @@ ipc_kmsg_copyin_from_kernel( */ if (bits == (MACH_MSGH_BITS_COMPLEX | - MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) { + MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) { bits = (MACH_MSGH_BITS_COMPLEX | - MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0)); + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0)); kmsg->ikm_header->msgh_bits = bits; } else { bits = (MACH_MSGH_BITS_OTHER(bits) | - MACH_MSGH_BITS(ipc_object_copyin_type(rname), - ipc_object_copyin_type(lname))); + MACH_MSGH_BITS(ipc_object_copyin_type(rname), + ipc_object_copyin_type(lname))); kmsg->ikm_header->msgh_bits = bits; - if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) + if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) { return MACH_MSG_SUCCESS; + } } - { - mach_msg_descriptor_t *saddr; - mach_msg_body_t *body; - mach_msg_type_number_t i, count; - - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - saddr = (mach_msg_descriptor_t *) (body + 1); - count = body->msgh_descriptor_count; - - for (i = 0; i < count; i++, saddr++) { - - switch (saddr->type.type) { - - case MACH_MSG_PORT_DESCRIPTOR: { - mach_msg_type_name_t name; - ipc_object_t object; - mach_msg_port_descriptor_t *dsc; - - dsc = &saddr->port; - - /* this is really the type SEND, SEND_ONCE, etc. */ - name = dsc->disposition; - object = (ipc_object_t) dsc->name; - dsc->disposition = ipc_object_copyin_type(name); - - if (!IO_VALID(object)) { - break; - } - - ipc_object_copyin_from_kernel(object, name); - - /* CDY avoid circularity when the destination is also */ - /* the kernel. This check should be changed into an */ - /* assert when the new kobject model is in place since*/ - /* ports will not be used in kernel to kernel chats */ - - if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) { - if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && - ipc_port_check_circularity((ipc_port_t) object, - (ipc_port_t) remote)) { - kmsg->ikm_header->msgh_bits |= - MACH_MSGH_BITS_CIRCULAR; - } - } - break; - } - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR: { - /* - * The sender should supply ready-made memory, i.e. - * a vm_map_copy_t, so we don't need to do anything. - */ - break; - } - case MACH_MSG_OOL_PORTS_DESCRIPTOR: { - ipc_object_t *objects; - unsigned int j; - mach_msg_type_name_t name; - mach_msg_ool_ports_descriptor_t *dsc; - - dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports; - - /* this is really the type SEND, SEND_ONCE, etc. */ - name = dsc->disposition; - dsc->disposition = ipc_object_copyin_type(name); - - objects = (ipc_object_t *) dsc->address; - - for ( j = 0; j < dsc->count; j++) { - ipc_object_t object = objects[j]; - - if (!IO_VALID(object)) - continue; - - ipc_object_copyin_from_kernel(object, name); - - if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && - ipc_port_check_circularity( - (ipc_port_t) object, - (ipc_port_t) remote)) - kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; - } - break; - } - default: { -#if MACH_ASSERT - panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); -#endif /* MACH_ASSERT */ + { + mach_msg_descriptor_t *saddr; + mach_msg_body_t *body; + mach_msg_type_number_t i, count; + + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + count = body->msgh_descriptor_count; + + for (i = 0; i < count; i++, saddr++) { + switch (saddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_type_name_t name; + ipc_object_t object; + mach_msg_port_descriptor_t *dsc; + + dsc = &saddr->port; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + object = (ipc_object_t) dsc->name; + dsc->disposition = ipc_object_copyin_type(name); + + if (!IO_VALID(object)) { + break; + } + + ipc_object_copyin_from_kernel(object, name); + + /* CDY avoid circularity when the destination is also */ + /* the kernel. This check should be changed into an */ + /* assert when the new kobject model is in place since*/ + /* ports will not be used in kernel to kernel chats */ + + if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) { + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity((ipc_port_t) object, + (ipc_port_t) remote)) { + kmsg->ikm_header->msgh_bits |= + MACH_MSGH_BITS_CIRCULAR; + } + } + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + /* + * The sender should supply ready-made memory, i.e. + * a vm_map_copy_t, so we don't need to do anything. + */ + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + ipc_object_t *objects; + unsigned int j; + mach_msg_type_name_t name; + mach_msg_ool_ports_descriptor_t *dsc; + + dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + dsc->disposition = ipc_object_copyin_type(name); + + objects = (ipc_object_t *) dsc->address; + + for (j = 0; j < dsc->count; j++) { + ipc_object_t object = objects[j]; + + if (!IO_VALID(object)) { + continue; + } + + ipc_object_copyin_from_kernel(object, name); + + if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity( + (ipc_port_t) object, + (ipc_port_t) remote)) { + kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + } + } + break; + } + default: { +#if MACH_ASSERT + panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); +#endif /* MACH_ASSERT */ + } + } } - } } - } - return MACH_MSG_SUCCESS; + return MACH_MSG_SUCCESS; } #if IKM_SUPPORT_LEGACY mach_msg_return_t ipc_kmsg_copyin_from_kernel_legacy( - ipc_kmsg_t kmsg) + ipc_kmsg_t kmsg) { mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits; mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits); @@ -3423,12 +3471,14 @@ ipc_kmsg_copyin_from_kernel_legacy( ipc_object_t local = (ipc_object_t) kmsg->ikm_header->msgh_local_port; /* translate the destination and reply ports */ - if (!IO_VALID(remote)) + if (!IO_VALID(remote)) { return MACH_SEND_INVALID_DEST; + } ipc_object_copyin_from_kernel(remote, rname); - if (IO_VALID(local)) + if (IO_VALID(local)) { ipc_object_copyin_from_kernel(local, lname); + } /* * The common case is a complex message with no reply port, @@ -3436,152 +3486,154 @@ ipc_kmsg_copyin_from_kernel_legacy( */ if (bits == (MACH_MSGH_BITS_COMPLEX | - MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) { + MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) { bits = (MACH_MSGH_BITS_COMPLEX | - MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0)); + MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0)); kmsg->ikm_header->msgh_bits = bits; } else { bits = (MACH_MSGH_BITS_OTHER(bits) | - MACH_MSGH_BITS(ipc_object_copyin_type(rname), - ipc_object_copyin_type(lname))); + MACH_MSGH_BITS(ipc_object_copyin_type(rname), + ipc_object_copyin_type(lname))); kmsg->ikm_header->msgh_bits = bits; - if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) + if ((bits & MACH_MSGH_BITS_COMPLEX) == 0) { return MACH_MSG_SUCCESS; + } } - { - mach_msg_legacy_descriptor_t *saddr; - mach_msg_descriptor_t *daddr; - mach_msg_body_t *body; - mach_msg_type_number_t i, count; - - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - saddr = (typeof(saddr)) (body + 1); - count = body->msgh_descriptor_count; - - if(count) { - vm_offset_t dsc_adjust = 4*count; - memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); - kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust); - /* Update the message size for the larger in-kernel representation */ - kmsg->ikm_header->msgh_size += dsc_adjust; - } - daddr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t)); - - for (i = 0; i < count; i++, saddr++, daddr++) { - switch (saddr->type.type) { - - case MACH_MSG_PORT_DESCRIPTOR: { - mach_msg_type_name_t name; - ipc_object_t object; - mach_msg_legacy_port_descriptor_t *dsc; - mach_msg_port_descriptor_t *dest_dsc; - - dsc = (typeof(dsc))&saddr->port; - dest_dsc = &daddr->port; - - /* this is really the type SEND, SEND_ONCE, etc. */ - name = dsc->disposition; - object = (ipc_object_t) CAST_MACH_NAME_TO_PORT(dsc->name); - dest_dsc->disposition = ipc_object_copyin_type(name); - dest_dsc->name = (mach_port_t)object; - dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR; - - if (!IO_VALID(object)) { - break; - } - - ipc_object_copyin_from_kernel(object, name); - - /* CDY avoid circularity when the destination is also */ - /* the kernel. This check should be changed into an */ - /* assert when the new kobject model is in place since*/ - /* ports will not be used in kernel to kernel chats */ - - if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) { - if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && - ipc_port_check_circularity((ipc_port_t) object, - (ipc_port_t) remote)) { - kmsg->ikm_header->msgh_bits |= - MACH_MSGH_BITS_CIRCULAR; - } - } - break; - } - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR: { - /* The sender should supply ready-made memory, i.e. a vm_map_copy_t - * so we don't need to do anything special. */ - - mach_msg_ool_descriptor32_t *source_dsc = &saddr->out_of_line32; - mach_msg_ool_descriptor_t *dest_dsc = (typeof(dest_dsc))&daddr->out_of_line; - - vm_offset_t address = source_dsc->address; - vm_size_t size = source_dsc->size; - boolean_t deallocate = source_dsc->deallocate; - mach_msg_copy_options_t copy = source_dsc->copy; - mach_msg_descriptor_type_t type = source_dsc->type; - - dest_dsc->address = (void *)address; - dest_dsc->size = size; - dest_dsc->deallocate = deallocate; - dest_dsc->copy = copy; - dest_dsc->type = type; - break; - } - case MACH_MSG_OOL_PORTS_DESCRIPTOR: { - ipc_object_t *objects; - unsigned int j; - mach_msg_type_name_t name; - mach_msg_ool_ports_descriptor_t *dest_dsc; - - mach_msg_ool_ports_descriptor32_t *source_dsc = &saddr->ool_ports32; - dest_dsc = (typeof(dest_dsc))&daddr->ool_ports; - - boolean_t deallocate = source_dsc->deallocate; - mach_msg_copy_options_t copy = source_dsc->copy; - mach_msg_size_t port_count = source_dsc->count; - mach_msg_type_name_t disposition = source_dsc->disposition; - - /* this is really the type SEND, SEND_ONCE, etc. */ - name = disposition; - disposition = ipc_object_copyin_type(name); - - objects = (ipc_object_t *) (uintptr_t)source_dsc->address; - - for ( j = 0; j < port_count; j++) { - ipc_object_t object = objects[j]; - - if (!IO_VALID(object)) - continue; - - ipc_object_copyin_from_kernel(object, name); - - if ((disposition == MACH_MSG_TYPE_PORT_RECEIVE) && - ipc_port_check_circularity( - (ipc_port_t) object, - (ipc_port_t) remote)) - kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; - } - - dest_dsc->address = objects; - dest_dsc->deallocate = deallocate; - dest_dsc->copy = copy; - dest_dsc->disposition = disposition; - dest_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; - dest_dsc->count = port_count; - break; - } - default: { -#if MACH_ASSERT - panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); -#endif /* MACH_ASSERT */ + { + mach_msg_legacy_descriptor_t *saddr; + mach_msg_descriptor_t *daddr; + mach_msg_body_t *body; + mach_msg_type_number_t i, count; + + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + saddr = (typeof(saddr))(body + 1); + count = body->msgh_descriptor_count; + + if (count) { + vm_offset_t dsc_adjust = 4 * count; + memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); + kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust); + /* Update the message size for the larger in-kernel representation */ + kmsg->ikm_header->msgh_size += dsc_adjust; + } + daddr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t)); + + for (i = 0; i < count; i++, saddr++, daddr++) { + switch (saddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_type_name_t name; + ipc_object_t object; + mach_msg_legacy_port_descriptor_t *dsc; + mach_msg_port_descriptor_t *dest_dsc; + + dsc = (typeof(dsc)) & saddr->port; + dest_dsc = &daddr->port; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = dsc->disposition; + object = (ipc_object_t) CAST_MACH_NAME_TO_PORT(dsc->name); + dest_dsc->disposition = ipc_object_copyin_type(name); + dest_dsc->name = (mach_port_t)object; + dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR; + + if (!IO_VALID(object)) { + break; + } + + ipc_object_copyin_from_kernel(object, name); + + /* CDY avoid circularity when the destination is also */ + /* the kernel. This check should be changed into an */ + /* assert when the new kobject model is in place since*/ + /* ports will not be used in kernel to kernel chats */ + + if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) { + if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity((ipc_port_t) object, + (ipc_port_t) remote)) { + kmsg->ikm_header->msgh_bits |= + MACH_MSGH_BITS_CIRCULAR; + } + } + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + /* The sender should supply ready-made memory, i.e. a vm_map_copy_t + * so we don't need to do anything special. */ + + mach_msg_ool_descriptor32_t *source_dsc = &saddr->out_of_line32; + mach_msg_ool_descriptor_t *dest_dsc = (typeof(dest_dsc)) & daddr->out_of_line; + + vm_offset_t address = source_dsc->address; + vm_size_t size = source_dsc->size; + boolean_t deallocate = source_dsc->deallocate; + mach_msg_copy_options_t copy = source_dsc->copy; + mach_msg_descriptor_type_t type = source_dsc->type; + + dest_dsc->address = (void *)address; + dest_dsc->size = size; + dest_dsc->deallocate = deallocate; + dest_dsc->copy = copy; + dest_dsc->type = type; + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + ipc_object_t *objects; + unsigned int j; + mach_msg_type_name_t name; + mach_msg_ool_ports_descriptor_t *dest_dsc; + + mach_msg_ool_ports_descriptor32_t *source_dsc = &saddr->ool_ports32; + dest_dsc = (typeof(dest_dsc)) & daddr->ool_ports; + + boolean_t deallocate = source_dsc->deallocate; + mach_msg_copy_options_t copy = source_dsc->copy; + mach_msg_size_t port_count = source_dsc->count; + mach_msg_type_name_t disposition = source_dsc->disposition; + + /* this is really the type SEND, SEND_ONCE, etc. */ + name = disposition; + disposition = ipc_object_copyin_type(name); + + objects = (ipc_object_t *) (uintptr_t)source_dsc->address; + + for (j = 0; j < port_count; j++) { + ipc_object_t object = objects[j]; + + if (!IO_VALID(object)) { + continue; + } + + ipc_object_copyin_from_kernel(object, name); + + if ((disposition == MACH_MSG_TYPE_PORT_RECEIVE) && + ipc_port_check_circularity( + (ipc_port_t) object, + (ipc_port_t) remote)) { + kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR; + } + } + + dest_dsc->address = objects; + dest_dsc->deallocate = deallocate; + dest_dsc->copy = copy; + dest_dsc->disposition = disposition; + dest_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; + dest_dsc->count = port_count; + break; + } + default: { +#if MACH_ASSERT + panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); +#endif /* MACH_ASSERT */ + } + } } - } } - } - return MACH_MSG_SUCCESS; + return MACH_MSG_SUCCESS; } #endif /* IKM_SUPPORT_LEGACY */ @@ -3598,7 +3650,7 @@ ipc_kmsg_copyin_from_kernel_legacy( * Nothing locked. * Returns: * MACH_MSG_SUCCESS Copied out port rights. - * MACH_RCV_INVALID_NOTIFY + * MACH_RCV_INVALID_NOTIFY * Notify is non-null and doesn't name a receive right. * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.) * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE @@ -3614,8 +3666,8 @@ ipc_kmsg_copyin_from_kernel_legacy( mach_msg_return_t ipc_kmsg_copyout_header( ipc_kmsg_t kmsg, - ipc_space_t space, - mach_msg_option_t option) + ipc_space_t space, + mach_msg_option_t option) { mach_msg_header_t *msg = kmsg->ikm_header; mach_msg_bits_t mbits = msg->msgh_bits; @@ -3630,293 +3682,299 @@ ipc_kmsg_copyout_header( */ ipc_port_spnotify(dest); - { - mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits); - mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits); - mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits); - ipc_port_t reply = msg->msgh_local_port; - ipc_port_t release_reply_port = IP_NULL; - mach_port_name_t dest_name, reply_name; - - ipc_port_t voucher = kmsg->ikm_voucher; - ipc_port_t release_voucher_port = IP_NULL; - mach_port_name_t voucher_name; - - uint32_t entries_held = 0; - boolean_t need_write_lock = FALSE; - kern_return_t kr; - - /* - * Reserve any potentially needed entries in the target space. - * We'll free any unused before unlocking the space. - */ - if (IP_VALID(reply)) { - entries_held++; - need_write_lock = TRUE; - } - if (IP_VALID(voucher)) { - assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); + { + mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits); + mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits); + mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits); + ipc_port_t reply = msg->msgh_local_port; + ipc_port_t release_reply_port = IP_NULL; + mach_port_name_t dest_name, reply_name; - if ((option & MACH_RCV_VOUCHER) != 0) - entries_held++; - need_write_lock = TRUE; - } + ipc_port_t voucher = kmsg->ikm_voucher; + ipc_port_t release_voucher_port = IP_NULL; + mach_port_name_t voucher_name; - if (need_write_lock) { + uint32_t entries_held = 0; + boolean_t need_write_lock = FALSE; + kern_return_t kr; - is_write_lock(space); + /* + * Reserve any potentially needed entries in the target space. + * We'll free any unused before unlocking the space. + */ + if (IP_VALID(reply)) { + entries_held++; + need_write_lock = TRUE; + } + if (IP_VALID(voucher)) { + assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); - while(entries_held) { - if (!is_active(space)) { - is_write_unlock(space); - return (MACH_RCV_HEADER_ERROR| - MACH_MSG_IPC_SPACE); + if ((option & MACH_RCV_VOUCHER) != 0) { + entries_held++; } - - kr = ipc_entries_hold(space, entries_held); - if (KERN_SUCCESS == kr) - break; - - kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); - if (KERN_SUCCESS != kr) - return(MACH_RCV_HEADER_ERROR| - MACH_MSG_IPC_SPACE); - /* space was unlocked and relocked - retry */ + need_write_lock = TRUE; } - /* Handle reply port. */ - if (IP_VALID(reply)) { - ipc_entry_t entry; + if (need_write_lock) { + is_write_lock(space); - /* Is there already an entry we can use? */ - if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) && - ipc_right_reverse(space, (ipc_object_t) reply, &reply_name, &entry)) { - /* reply port is locked and active */ - assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE); - } else { - ip_lock(reply); - if (!ip_active(reply)) { - ip_unlock(reply); - - release_reply_port = reply; - reply = IP_DEAD; - reply_name = MACH_PORT_DEAD; - goto done_with_reply; + while (entries_held) { + if (!is_active(space)) { + is_write_unlock(space); + return MACH_RCV_HEADER_ERROR | + MACH_MSG_IPC_SPACE; } - - /* claim a held entry for the reply port */ - assert(entries_held > 0); - entries_held--; - ipc_entry_claim(space, &reply_name, &entry); - assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE); - assert(entry->ie_object == IO_NULL); - entry->ie_object = (ipc_object_t) reply; - } - - /* space and reply port are locked and active */ - ip_reference(reply); /* hold onto the reply port */ - kr = ipc_right_copyout(space, reply_name, entry, - reply_type, TRUE, (ipc_object_t) reply); - assert(kr == KERN_SUCCESS); - /* reply port is unlocked */ - } else - reply_name = CAST_MACH_PORT_TO_NAME(reply); - - done_with_reply: - - /* Handle voucher port. */ - if (voucher_type != MACH_MSGH_BITS_ZERO) { - assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); + kr = ipc_entries_hold(space, entries_held); + if (KERN_SUCCESS == kr) { + break; + } - if (!IP_VALID(voucher)) { - if ((option & MACH_RCV_VOUCHER) == 0) { - voucher_type = MACH_MSGH_BITS_ZERO; + kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); + if (KERN_SUCCESS != kr) { + return MACH_RCV_HEADER_ERROR | + MACH_MSG_IPC_SPACE; } - voucher_name = MACH_PORT_NULL; - goto done_with_voucher; + /* space was unlocked and relocked - retry */ } - - /* clear voucher from its hiding place back in the kmsg */ - kmsg->ikm_voucher = IP_NULL; - if ((option & MACH_RCV_VOUCHER) != 0) { + /* Handle reply port. */ + if (IP_VALID(reply)) { ipc_entry_t entry; - if (ipc_right_reverse(space, (ipc_object_t) voucher, - &voucher_name, &entry)) { - /* voucher port locked */ - assert(entry->ie_bits & MACH_PORT_TYPE_SEND); + /* Is there already an entry we can use? */ + if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) && + ipc_right_reverse(space, (ipc_object_t) reply, &reply_name, &entry)) { + /* reply port is locked and active */ + assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE); } else { + ip_lock(reply); + if (!ip_active(reply)) { + ip_unlock(reply); + + release_reply_port = reply; + reply = IP_DEAD; + reply_name = MACH_PORT_DEAD; + goto done_with_reply; + } + + /* claim a held entry for the reply port */ assert(entries_held > 0); entries_held--; - ipc_entry_claim(space, &voucher_name, &entry); + ipc_entry_claim(space, &reply_name, &entry); assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE); - assert(entry->ie_object == IO_NULL); - entry->ie_object = (ipc_object_t) voucher; - ip_lock(voucher); + assert(entry->ie_object == IO_NULL); + entry->ie_object = (ipc_object_t) reply; } - /* space is locked and active */ - - assert(ip_active(voucher)); - assert(ip_kotype(voucher) == IKOT_VOUCHER); - kr = ipc_right_copyout(space, voucher_name, entry, - MACH_MSG_TYPE_MOVE_SEND, TRUE, - (ipc_object_t) voucher); - /* voucher port is unlocked */ + + /* space and reply port are locked and active */ + ip_reference(reply); /* hold onto the reply port */ + + kr = ipc_right_copyout(space, reply_name, entry, + reply_type, TRUE, (ipc_object_t) reply); + assert(kr == KERN_SUCCESS); + /* reply port is unlocked */ } else { - voucher_type = MACH_MSGH_BITS_ZERO; - release_voucher_port = voucher; - voucher_name = MACH_PORT_NULL; + reply_name = CAST_MACH_PORT_TO_NAME(reply); } - } else { - voucher_name = msg->msgh_voucher_port; - } - done_with_voucher: +done_with_reply: - ip_lock(dest); - is_write_unlock(space); + /* Handle voucher port. */ + if (voucher_type != MACH_MSGH_BITS_ZERO) { + assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); - } else { - /* - * No reply or voucher port! This is an easy case. - * We only need to have the space locked - * when locking the destination. - */ + if (!IP_VALID(voucher)) { + if ((option & MACH_RCV_VOUCHER) == 0) { + voucher_type = MACH_MSGH_BITS_ZERO; + } + voucher_name = MACH_PORT_NULL; + goto done_with_voucher; + } - is_read_lock(space); - if (!is_active(space)) { - is_read_unlock(space); - return MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE; - } + /* clear voucher from its hiding place back in the kmsg */ + kmsg->ikm_voucher = IP_NULL; + + if ((option & MACH_RCV_VOUCHER) != 0) { + ipc_entry_t entry; + + if (ipc_right_reverse(space, (ipc_object_t) voucher, + &voucher_name, &entry)) { + /* voucher port locked */ + assert(entry->ie_bits & MACH_PORT_TYPE_SEND); + } else { + assert(entries_held > 0); + entries_held--; + ipc_entry_claim(space, &voucher_name, &entry); + assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE); + assert(entry->ie_object == IO_NULL); + entry->ie_object = (ipc_object_t) voucher; + ip_lock(voucher); + } + /* space is locked and active */ + + assert(ip_active(voucher)); + assert(ip_kotype(voucher) == IKOT_VOUCHER); + kr = ipc_right_copyout(space, voucher_name, entry, + MACH_MSG_TYPE_MOVE_SEND, TRUE, + (ipc_object_t) voucher); + /* voucher port is unlocked */ + } else { + voucher_type = MACH_MSGH_BITS_ZERO; + release_voucher_port = voucher; + voucher_name = MACH_PORT_NULL; + } + } else { + voucher_name = msg->msgh_voucher_port; + } - ip_lock(dest); - is_read_unlock(space); +done_with_voucher: - reply_name = CAST_MACH_PORT_TO_NAME(reply); + ip_lock(dest); + is_write_unlock(space); + } else { + /* + * No reply or voucher port! This is an easy case. + * We only need to have the space locked + * when locking the destination. + */ - if (voucher_type != MACH_MSGH_BITS_ZERO) { - assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); - if ((option & MACH_RCV_VOUCHER) == 0) { - voucher_type = MACH_MSGH_BITS_ZERO; + is_read_lock(space); + if (!is_active(space)) { + is_read_unlock(space); + return MACH_RCV_HEADER_ERROR | MACH_MSG_IPC_SPACE; + } + + ip_lock(dest); + is_read_unlock(space); + + reply_name = CAST_MACH_PORT_TO_NAME(reply); + + if (voucher_type != MACH_MSGH_BITS_ZERO) { + assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); + if ((option & MACH_RCV_VOUCHER) == 0) { + voucher_type = MACH_MSGH_BITS_ZERO; + } + voucher_name = MACH_PORT_NULL; + } else { + voucher_name = msg->msgh_voucher_port; } - voucher_name = MACH_PORT_NULL; - } else { - voucher_name = msg->msgh_voucher_port; } - } - /* - * At this point, the space is unlocked and the destination - * port is locked. (Lock taken while space was locked.) - * reply_name is taken care of; we still need dest_name. - * We still hold a ref for reply (if it is valid). - * - * If the space holds receive rights for the destination, - * we return its name for the right. Otherwise the task - * managed to destroy or give away the receive right between - * receiving the message and this copyout. If the destination - * is dead, return MACH_PORT_DEAD, and if the receive right - * exists somewhere else (another space, in transit) - * return MACH_PORT_NULL. - * - * Making this copyout operation atomic with the previous - * copyout of the reply port is a bit tricky. If there was - * no real reply port (it wasn't IP_VALID) then this isn't - * an issue. If the reply port was dead at copyout time, - * then we are OK, because if dest is dead we serialize - * after the death of both ports and if dest is alive - * we serialize after reply died but before dest's (later) death. - * So assume reply was alive when we copied it out. If dest - * is alive, then we are OK because we serialize before - * the ports' deaths. So assume dest is dead when we look at it. - * If reply dies/died after dest, then we are OK because - * we serialize after dest died but before reply dies. - * So the hard case is when reply is alive at copyout, - * dest is dead at copyout, and reply died before dest died. - * In this case pretend that dest is still alive, so - * we serialize while both ports are alive. - * - * Because the space lock is held across the copyout of reply - * and locking dest, the receive right for dest can't move - * in or out of the space while the copyouts happen, so - * that isn't an atomicity problem. In the last hard case - * above, this implies that when dest is dead that the - * space couldn't have had receive rights for dest at - * the time reply was copied-out, so when we pretend - * that dest is still alive, we can return MACH_PORT_NULL. - * - * If dest == reply, then we have to make it look like - * either both copyouts happened before the port died, - * or both happened after the port died. This special - * case works naturally if the timestamp comparison - * is done correctly. - */ + /* + * At this point, the space is unlocked and the destination + * port is locked. (Lock taken while space was locked.) + * reply_name is taken care of; we still need dest_name. + * We still hold a ref for reply (if it is valid). + * + * If the space holds receive rights for the destination, + * we return its name for the right. Otherwise the task + * managed to destroy or give away the receive right between + * receiving the message and this copyout. If the destination + * is dead, return MACH_PORT_DEAD, and if the receive right + * exists somewhere else (another space, in transit) + * return MACH_PORT_NULL. + * + * Making this copyout operation atomic with the previous + * copyout of the reply port is a bit tricky. If there was + * no real reply port (it wasn't IP_VALID) then this isn't + * an issue. If the reply port was dead at copyout time, + * then we are OK, because if dest is dead we serialize + * after the death of both ports and if dest is alive + * we serialize after reply died but before dest's (later) death. + * So assume reply was alive when we copied it out. If dest + * is alive, then we are OK because we serialize before + * the ports' deaths. So assume dest is dead when we look at it. + * If reply dies/died after dest, then we are OK because + * we serialize after dest died but before reply dies. + * So the hard case is when reply is alive at copyout, + * dest is dead at copyout, and reply died before dest died. + * In this case pretend that dest is still alive, so + * we serialize while both ports are alive. + * + * Because the space lock is held across the copyout of reply + * and locking dest, the receive right for dest can't move + * in or out of the space while the copyouts happen, so + * that isn't an atomicity problem. In the last hard case + * above, this implies that when dest is dead that the + * space couldn't have had receive rights for dest at + * the time reply was copied-out, so when we pretend + * that dest is still alive, we can return MACH_PORT_NULL. + * + * If dest == reply, then we have to make it look like + * either both copyouts happened before the port died, + * or both happened after the port died. This special + * case works naturally if the timestamp comparison + * is done correctly. + */ - if (ip_active(dest)) { - ipc_object_copyout_dest(space, (ipc_object_t) dest, - dest_type, &dest_name); - /* dest is unlocked */ + if (ip_active(dest)) { + ipc_object_copyout_dest(space, (ipc_object_t) dest, + dest_type, &dest_name); + /* dest is unlocked */ + } else { + ipc_port_timestamp_t timestamp; - } else { - ipc_port_timestamp_t timestamp; + timestamp = dest->ip_timestamp; + ip_unlock(dest); + ip_release(dest); - timestamp = dest->ip_timestamp; - ip_unlock(dest); - ip_release(dest); + if (IP_VALID(reply)) { + ip_lock(reply); + if (ip_active(reply) || + IP_TIMESTAMP_ORDER(timestamp, + reply->ip_timestamp)) { + dest_name = MACH_PORT_DEAD; + } else { + dest_name = MACH_PORT_NULL; + } + ip_unlock(reply); + } else { + dest_name = MACH_PORT_DEAD; + } + } if (IP_VALID(reply)) { - ip_lock(reply); - if (ip_active(reply) || - IP_TIMESTAMP_ORDER(timestamp, - reply->ip_timestamp)) - dest_name = MACH_PORT_DEAD; - else - dest_name = MACH_PORT_NULL; - ip_unlock(reply); - } else - dest_name = MACH_PORT_DEAD; - } + ip_release(reply); + } - if (IP_VALID(reply)) - ip_release(reply); + if (IP_VALID(release_reply_port)) { + if (reply_type == MACH_MSG_TYPE_PORT_SEND_ONCE) { + ipc_port_release_sonce(release_reply_port); + } else { + ipc_port_release_send(release_reply_port); + } + } - if (IP_VALID(release_reply_port)) { - if (reply_type == MACH_MSG_TYPE_PORT_SEND_ONCE) - ipc_port_release_sonce(release_reply_port); - else - ipc_port_release_send(release_reply_port); - } + if (IP_VALID(release_voucher_port)) { + ipc_port_release_send(release_voucher_port); + } - if (IP_VALID(release_voucher_port)) - ipc_port_release_send(release_voucher_port); + if ((option & MACH_RCV_VOUCHER) != 0) { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + (uintptr_t)kmsg->ikm_header->msgh_bits, + (uintptr_t)kmsg->ikm_header->msgh_id, + VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)), + 0); + } else { + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + (uintptr_t)kmsg->ikm_header->msgh_bits, + (uintptr_t)kmsg->ikm_header->msgh_id, + VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)), + 0); + } - if ((option & MACH_RCV_VOUCHER) != 0) { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - (uintptr_t)kmsg->ikm_header->msgh_bits, - (uintptr_t)kmsg->ikm_header->msgh_id, - VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)), - 0); - } else { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - (uintptr_t)kmsg->ikm_header->msgh_bits, - (uintptr_t)kmsg->ikm_header->msgh_id, - VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)), - 0); - } - - msg->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type, - voucher_type, mbits); - msg->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name); - msg->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name); - msg->msgh_voucher_port = voucher_name; - } - - return MACH_MSG_SUCCESS; + msg->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type, + voucher_type, mbits); + msg->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name); + msg->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name); + msg->msgh_voucher_port = voucher_name; + } + + return MACH_MSG_SUCCESS; } /* @@ -3938,10 +3996,10 @@ ipc_kmsg_copyout_header( mach_msg_return_t ipc_kmsg_copyout_object( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - mach_port_name_t *namep) + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep) { kern_return_t kr; @@ -3954,15 +4012,16 @@ ipc_kmsg_copyout_object( if (kr != KERN_SUCCESS) { ipc_object_destroy(object, msgt_name); - if (kr == KERN_INVALID_CAPABILITY) + if (kr == KERN_INVALID_CAPABILITY) { *namep = MACH_PORT_DEAD; - else { + } else { *namep = MACH_PORT_NULL; - if (kr == KERN_RESOURCE_SHORTAGE) + if (kr == KERN_RESOURCE_SHORTAGE) { return MACH_MSG_IPC_KERNEL; - else + } else { return MACH_MSG_IPC_SPACE; + } } } @@ -3971,48 +4030,47 @@ ipc_kmsg_copyout_object( mach_msg_descriptor_t * ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc, - mach_msg_descriptor_t *user_dsc, - ipc_space_t space, - kern_return_t *mr); + mach_msg_descriptor_t *user_dsc, + ipc_space_t space, + kern_return_t *mr); mach_msg_descriptor_t * ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc, - mach_msg_descriptor_t *dest_dsc, - ipc_space_t space, - kern_return_t *mr) + mach_msg_descriptor_t *dest_dsc, + ipc_space_t space, + kern_return_t *mr) { - mach_port_t port; - mach_port_name_t name; - mach_msg_type_name_t disp; - - - /* Copyout port right carried in the message */ - port = dsc->port.name; - disp = dsc->port.disposition; - *mr |= ipc_kmsg_copyout_object(space, - (ipc_object_t)port, - disp, - &name); - - if(current_task() == kernel_task) - { - mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc; - user_dsc--; // point to the start of this port descriptor - bzero((void *)user_dsc, sizeof(*user_dsc)); - user_dsc->name = CAST_MACH_NAME_TO_PORT(name); - user_dsc->disposition = disp; - user_dsc->type = MACH_MSG_PORT_DESCRIPTOR; - dest_dsc = (typeof(dest_dsc))user_dsc; - } else { - mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc; - user_dsc--; // point to the start of this port descriptor - bzero((void *)user_dsc, sizeof(*user_dsc)); - user_dsc->name = CAST_MACH_PORT_TO_NAME(name); - user_dsc->disposition = disp; - user_dsc->type = MACH_MSG_PORT_DESCRIPTOR; - dest_dsc = (typeof(dest_dsc))user_dsc; - } - - return (mach_msg_descriptor_t *)dest_dsc; + mach_port_t port; + mach_port_name_t name; + mach_msg_type_name_t disp; + + + /* Copyout port right carried in the message */ + port = dsc->port.name; + disp = dsc->port.disposition; + *mr |= ipc_kmsg_copyout_object(space, + (ipc_object_t)port, + disp, + &name); + + if (current_task() == kernel_task) { + mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc; + user_dsc--; // point to the start of this port descriptor + bzero((void *)user_dsc, sizeof(*user_dsc)); + user_dsc->name = CAST_MACH_NAME_TO_PORT(name); + user_dsc->disposition = disp; + user_dsc->type = MACH_MSG_PORT_DESCRIPTOR; + dest_dsc = (typeof(dest_dsc))user_dsc; + } else { + mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc; + user_dsc--; // point to the start of this port descriptor + bzero((void *)user_dsc, sizeof(*user_dsc)); + user_dsc->name = CAST_MACH_PORT_TO_NAME(name); + user_dsc->disposition = disp; + user_dsc->type = MACH_MSG_PORT_DESCRIPTOR; + dest_dsc = (typeof(dest_dsc))user_dsc; + } + + return (mach_msg_descriptor_t *)dest_dsc; } mach_msg_descriptor_t * @@ -4020,247 +4078,251 @@ ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descrip mach_msg_descriptor_t * ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr) { - vm_map_copy_t copy; - vm_map_address_t rcv_addr; - mach_msg_copy_options_t copy_options; - vm_map_size_t size; - mach_msg_descriptor_type_t dsc_type; - - //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count); - - copy = (vm_map_copy_t)dsc->address; - size = (vm_map_size_t)dsc->size; - copy_options = dsc->copy; - assert(copy_options != MACH_MSG_KALLOC_COPY_T); - dsc_type = dsc->type; - - if (copy != VM_MAP_COPY_NULL) { - kern_return_t kr; + vm_map_copy_t copy; + vm_map_address_t rcv_addr; + mach_msg_copy_options_t copy_options; + vm_map_size_t size; + mach_msg_descriptor_type_t dsc_type; + + //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count); + + copy = (vm_map_copy_t)dsc->address; + size = (vm_map_size_t)dsc->size; + copy_options = dsc->copy; + assert(copy_options != MACH_MSG_KALLOC_COPY_T); + dsc_type = dsc->type; + + if (copy != VM_MAP_COPY_NULL) { + kern_return_t kr; + + rcv_addr = 0; + if (vm_map_copy_validate_size(map, copy, &size) == FALSE) { + panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p", + dsc, dsc->size, (unsigned long long)copy->size, copy); + } + kr = vm_map_copyout_size(map, &rcv_addr, copy, size); + if (kr != KERN_SUCCESS) { + if (kr == KERN_RESOURCE_SHORTAGE) { + *mr |= MACH_MSG_VM_KERNEL; + } else { + *mr |= MACH_MSG_VM_SPACE; + } + vm_map_copy_discard(copy); + rcv_addr = 0; + size = 0; + } + } else { + rcv_addr = 0; + size = 0; + } - rcv_addr = 0; - if (vm_map_copy_validate_size(map, copy, &size) == FALSE) - panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p", - dsc, dsc->size, (unsigned long long)copy->size, copy); - kr = vm_map_copyout_size(map, &rcv_addr, copy, size); - if (kr != KERN_SUCCESS) { - if (kr == KERN_RESOURCE_SHORTAGE) - *mr |= MACH_MSG_VM_KERNEL; - else - *mr |= MACH_MSG_VM_SPACE; - vm_map_copy_discard(copy); - rcv_addr = 0; - size = 0; - } - } else { - rcv_addr = 0; - size = 0; - } - - /* - * Now update the descriptor as the user would see it. - * This may require expanding the descriptor to the user - * visible size. There is already space allocated for - * this in what naddr points to. - */ - if(current_task() == kernel_task) - { - mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - user_ool_dsc--; - bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); - - user_ool_dsc->address = (void *)(uintptr_t)rcv_addr; - user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? - TRUE : FALSE; - user_ool_dsc->copy = copy_options; - user_ool_dsc->type = dsc_type; - user_ool_dsc->size = (mach_msg_size_t)size; - - user_dsc = (typeof(user_dsc))user_ool_dsc; - } else if (is_64bit) { - mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - user_ool_dsc--; - bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); - - user_ool_dsc->address = rcv_addr; - user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? - TRUE : FALSE; - user_ool_dsc->copy = copy_options; - user_ool_dsc->type = dsc_type; - user_ool_dsc->size = (mach_msg_size_t)size; - - user_dsc = (typeof(user_dsc))user_ool_dsc; - } else { - mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - user_ool_dsc--; - bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); - - user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr); - user_ool_dsc->size = (mach_msg_size_t)size; - user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? - TRUE : FALSE; - user_ool_dsc->copy = copy_options; - user_ool_dsc->type = dsc_type; - - user_dsc = (typeof(user_dsc))user_ool_dsc; - } - return user_dsc; + /* + * Now update the descriptor as the user would see it. + * This may require expanding the descriptor to the user + * visible size. There is already space allocated for + * this in what naddr points to. + */ + if (current_task() == kernel_task) { + mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + user_ool_dsc--; + bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); + + user_ool_dsc->address = (void *)(uintptr_t)rcv_addr; + user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? + TRUE : FALSE; + user_ool_dsc->copy = copy_options; + user_ool_dsc->type = dsc_type; + user_ool_dsc->size = (mach_msg_size_t)size; + + user_dsc = (typeof(user_dsc))user_ool_dsc; + } else if (is_64bit) { + mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + user_ool_dsc--; + bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); + + user_ool_dsc->address = rcv_addr; + user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? + TRUE : FALSE; + user_ool_dsc->copy = copy_options; + user_ool_dsc->type = dsc_type; + user_ool_dsc->size = (mach_msg_size_t)size; + + user_dsc = (typeof(user_dsc))user_ool_dsc; + } else { + mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + user_ool_dsc--; + bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); + + user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr); + user_ool_dsc->size = (mach_msg_size_t)size; + user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? + TRUE : FALSE; + user_ool_dsc->copy = copy_options; + user_ool_dsc->type = dsc_type; + + user_dsc = (typeof(user_dsc))user_ool_dsc; + } + return user_dsc; } mach_msg_descriptor_t * ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc, - mach_msg_descriptor_t *user_dsc, - int is_64bit, - vm_map_t map, - ipc_space_t space, - ipc_kmsg_t kmsg, - mach_msg_return_t *mr); + mach_msg_descriptor_t *user_dsc, + int is_64bit, + vm_map_t map, + ipc_space_t space, + ipc_kmsg_t kmsg, + mach_msg_return_t *mr); mach_msg_descriptor_t * ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc, - mach_msg_descriptor_t *user_dsc, - int is_64bit, - vm_map_t map, - ipc_space_t space, - ipc_kmsg_t kmsg, - mach_msg_return_t *mr) + mach_msg_descriptor_t *user_dsc, + int is_64bit, + vm_map_t map, + ipc_space_t space, + ipc_kmsg_t kmsg, + mach_msg_return_t *mr) { - mach_vm_offset_t rcv_addr = 0; - mach_msg_type_name_t disp; - mach_msg_type_number_t count, i; - vm_size_t ports_length, names_length; - - mach_msg_copy_options_t copy_options = MACH_MSG_VIRTUAL_COPY; + mach_vm_offset_t rcv_addr = 0; + mach_msg_type_name_t disp; + mach_msg_type_number_t count, i; + vm_size_t ports_length, names_length; - //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count); + mach_msg_copy_options_t copy_options = MACH_MSG_VIRTUAL_COPY; - count = dsc->count; - disp = dsc->disposition; - ports_length = count * sizeof(mach_port_t); - names_length = count * sizeof(mach_port_name_t); + //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count); - if (ports_length != 0 && dsc->address != 0) { + count = dsc->count; + disp = dsc->disposition; + ports_length = count * sizeof(mach_port_t); + names_length = count * sizeof(mach_port_name_t); - /* - * Check to see if there is an overwrite descriptor - * specified in the scatter list for this ool data. - * The descriptor has already been verified. - */ + if (ports_length != 0 && dsc->address != 0) { + /* + * Check to see if there is an overwrite descriptor + * specified in the scatter list for this ool data. + * The descriptor has already been verified. + */ #if 0 - if (saddr != MACH_MSG_DESCRIPTOR_NULL) { - if (differs) { - OTHER_OOL_DESCRIPTOR *scatter_dsc; - - scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr; - rcv_addr = (mach_vm_offset_t) scatter_dsc->address; - copy_options = scatter_dsc->copy; - } else { - mach_msg_ool_descriptor_t *scatter_dsc; - - scatter_dsc = &saddr->out_of_line; - rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address); - copy_options = scatter_dsc->copy; - } - INCREMENT_SCATTER(saddr, sdsc_count, differs); - } + if (saddr != MACH_MSG_DESCRIPTOR_NULL) { + if (differs) { + OTHER_OOL_DESCRIPTOR *scatter_dsc; + + scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr; + rcv_addr = (mach_vm_offset_t) scatter_dsc->address; + copy_options = scatter_dsc->copy; + } else { + mach_msg_ool_descriptor_t *scatter_dsc; + + scatter_dsc = &saddr->out_of_line; + rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address); + copy_options = scatter_dsc->copy; + } + INCREMENT_SCATTER(saddr, sdsc_count, differs); + } #endif - if (copy_options == MACH_MSG_VIRTUAL_COPY) { - /* - * Dynamically allocate the region - */ - vm_tag_t tag; - if (vm_kernel_map_is_kernel(map)) tag = VM_KERN_MEMORY_IPC; - else tag = VM_MEMORY_MACH_MSG; - - kern_return_t kr; - if ((kr = mach_vm_allocate_kernel(map, &rcv_addr, - (mach_vm_size_t)names_length, - VM_FLAGS_ANYWHERE, tag)) != KERN_SUCCESS) { - ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc); - rcv_addr = 0; - - if (kr == KERN_RESOURCE_SHORTAGE){ - *mr |= MACH_MSG_VM_KERNEL; - } else { - *mr |= MACH_MSG_VM_SPACE; - } - } - } - - /* - * Handle the port rights and copy out the names - * for those rights out to user-space. - */ - if (rcv_addr != 0) { - mach_port_t *objects = (mach_port_t *) dsc->address; - mach_port_name_t *names = (mach_port_name_t *) dsc->address; - - /* copyout port rights carried in the message */ - - for ( i = 0; i < count ; i++) { - ipc_object_t object = (ipc_object_t)objects[i]; - - *mr |= ipc_kmsg_copyout_object(space, object, - disp, &names[i]); - } - - /* copyout to memory allocated above */ - void *data = dsc->address; - if (copyoutmap(map, data, rcv_addr, names_length) != KERN_SUCCESS) - *mr |= MACH_MSG_VM_SPACE; - kfree(data, ports_length); - } - } else { - rcv_addr = 0; - } - - /* - * Now update the descriptor based on the information - * calculated above. - */ - if(current_task() == kernel_task) { - mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - user_ool_dsc--; - bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); - - user_ool_dsc->address = (void *)(uintptr_t)rcv_addr; - user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? - TRUE : FALSE; - user_ool_dsc->copy = copy_options; - user_ool_dsc->disposition = disp; - user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; - user_ool_dsc->count = count; - - user_dsc = (typeof(user_dsc))user_ool_dsc; - } if (is_64bit) { - mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - user_ool_dsc--; - bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); - - user_ool_dsc->address = rcv_addr; - user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? - TRUE : FALSE; - user_ool_dsc->copy = copy_options; - user_ool_dsc->disposition = disp; - user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; - user_ool_dsc->count = count; - - user_dsc = (typeof(user_dsc))user_ool_dsc; - } else { - mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; - user_ool_dsc--; - bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); - - user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr); - user_ool_dsc->count = count; - user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? - TRUE : FALSE; - user_ool_dsc->copy = copy_options; - user_ool_dsc->disposition = disp; - user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; - - user_dsc = (typeof(user_dsc))user_ool_dsc; - } - return user_dsc; + if (copy_options == MACH_MSG_VIRTUAL_COPY) { + /* + * Dynamically allocate the region + */ + vm_tag_t tag; + if (vm_kernel_map_is_kernel(map)) { + tag = VM_KERN_MEMORY_IPC; + } else { + tag = VM_MEMORY_MACH_MSG; + } + + kern_return_t kr; + if ((kr = mach_vm_allocate_kernel(map, &rcv_addr, + (mach_vm_size_t)names_length, + VM_FLAGS_ANYWHERE, tag)) != KERN_SUCCESS) { + ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc); + rcv_addr = 0; + + if (kr == KERN_RESOURCE_SHORTAGE) { + *mr |= MACH_MSG_VM_KERNEL; + } else { + *mr |= MACH_MSG_VM_SPACE; + } + } + } + + /* + * Handle the port rights and copy out the names + * for those rights out to user-space. + */ + if (rcv_addr != 0) { + mach_port_t *objects = (mach_port_t *) dsc->address; + mach_port_name_t *names = (mach_port_name_t *) dsc->address; + + /* copyout port rights carried in the message */ + + for (i = 0; i < count; i++) { + ipc_object_t object = (ipc_object_t)objects[i]; + + *mr |= ipc_kmsg_copyout_object(space, object, + disp, &names[i]); + } + + /* copyout to memory allocated above */ + void *data = dsc->address; + if (copyoutmap(map, data, rcv_addr, names_length) != KERN_SUCCESS) { + *mr |= MACH_MSG_VM_SPACE; + } + kfree(data, ports_length); + } + } else { + rcv_addr = 0; + } + + /* + * Now update the descriptor based on the information + * calculated above. + */ + if (current_task() == kernel_task) { + mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + user_ool_dsc--; + bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); + + user_ool_dsc->address = (void *)(uintptr_t)rcv_addr; + user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? + TRUE : FALSE; + user_ool_dsc->copy = copy_options; + user_ool_dsc->disposition = disp; + user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; + user_ool_dsc->count = count; + + user_dsc = (typeof(user_dsc))user_ool_dsc; + } else if (is_64bit) { + mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + user_ool_dsc--; + bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); + + user_ool_dsc->address = rcv_addr; + user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? + TRUE : FALSE; + user_ool_dsc->copy = copy_options; + user_ool_dsc->disposition = disp; + user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; + user_ool_dsc->count = count; + + user_dsc = (typeof(user_dsc))user_ool_dsc; + } else { + mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc; + user_ool_dsc--; + bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc)); + + user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr); + user_ool_dsc->count = count; + user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ? + TRUE : FALSE; + user_ool_dsc->copy = copy_options; + user_ool_dsc->disposition = disp; + user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR; + + user_dsc = (typeof(user_dsc))user_ool_dsc; + } + return user_dsc; } /* @@ -4284,67 +4346,65 @@ ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc, mach_msg_return_t ipc_kmsg_copyout_body( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, - mach_msg_body_t *slist) + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist) { - mach_msg_body_t *body; - mach_msg_descriptor_t *kern_dsc, *user_dsc; - mach_msg_descriptor_t *saddr; - mach_msg_type_number_t dsc_count, sdsc_count; - int i; - mach_msg_return_t mr = MACH_MSG_SUCCESS; - boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS); - - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - dsc_count = body->msgh_descriptor_count; - kern_dsc = (mach_msg_descriptor_t *) (body + 1); - /* Point user_dsc just after the end of all the descriptors */ - user_dsc = &kern_dsc[dsc_count]; - - /* Do scatter list setup */ - if (slist != MACH_MSG_BODY_NULL) { - panic("Scatter lists disabled"); - saddr = (mach_msg_descriptor_t *) (slist + 1); - sdsc_count = slist->msgh_descriptor_count; - } - else { - saddr = MACH_MSG_DESCRIPTOR_NULL; - sdsc_count = 0; - } - - /* Now process the descriptors */ - for (i = dsc_count-1; i >= 0; i--) { - switch (kern_dsc[i].type.type) { - - case MACH_MSG_PORT_DESCRIPTOR: - user_dsc = ipc_kmsg_copyout_port_descriptor(&kern_dsc[i], user_dsc, space, &mr); - break; - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR : - user_dsc = ipc_kmsg_copyout_ool_descriptor( - (mach_msg_ool_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, &mr); - break; - case MACH_MSG_OOL_PORTS_DESCRIPTOR : - user_dsc = ipc_kmsg_copyout_ool_ports_descriptor( - (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, space, kmsg, &mr); - break; - default : { - panic("untyped IPC copyout body: invalid message descriptor"); - } - } - } - - if(user_dsc != kern_dsc) { - vm_offset_t dsc_adjust = (vm_offset_t)user_dsc - (vm_offset_t)kern_dsc; - memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); - kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust); - /* Update the message size for the smaller user representation */ - kmsg->ikm_header->msgh_size -= (mach_msg_size_t)dsc_adjust; - } - - return mr; + mach_msg_body_t *body; + mach_msg_descriptor_t *kern_dsc, *user_dsc; + mach_msg_descriptor_t *saddr; + mach_msg_type_number_t dsc_count, sdsc_count; + int i; + mach_msg_return_t mr = MACH_MSG_SUCCESS; + boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS); + + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + dsc_count = body->msgh_descriptor_count; + kern_dsc = (mach_msg_descriptor_t *) (body + 1); + /* Point user_dsc just after the end of all the descriptors */ + user_dsc = &kern_dsc[dsc_count]; + + /* Do scatter list setup */ + if (slist != MACH_MSG_BODY_NULL) { + panic("Scatter lists disabled"); + saddr = (mach_msg_descriptor_t *) (slist + 1); + sdsc_count = slist->msgh_descriptor_count; + } else { + saddr = MACH_MSG_DESCRIPTOR_NULL; + sdsc_count = 0; + } + + /* Now process the descriptors */ + for (i = dsc_count - 1; i >= 0; i--) { + switch (kern_dsc[i].type.type) { + case MACH_MSG_PORT_DESCRIPTOR: + user_dsc = ipc_kmsg_copyout_port_descriptor(&kern_dsc[i], user_dsc, space, &mr); + break; + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: + user_dsc = ipc_kmsg_copyout_ool_descriptor( + (mach_msg_ool_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, &mr); + break; + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + user_dsc = ipc_kmsg_copyout_ool_ports_descriptor( + (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, space, kmsg, &mr); + break; + default: { + panic("untyped IPC copyout body: invalid message descriptor"); + } + } + } + + if (user_dsc != kern_dsc) { + vm_offset_t dsc_adjust = (vm_offset_t)user_dsc - (vm_offset_t)kern_dsc; + memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); + kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust); + /* Update the message size for the smaller user representation */ + kmsg->ikm_header->msgh_size -= (mach_msg_size_t)dsc_adjust; + } + + return mr; } /* @@ -4362,45 +4422,45 @@ ipc_kmsg_copyout_body( mach_msg_size_t ipc_kmsg_copyout_size( - ipc_kmsg_t kmsg, - vm_map_t map) + ipc_kmsg_t kmsg, + vm_map_t map) { - mach_msg_size_t send_size; + mach_msg_size_t send_size; - send_size = kmsg->ikm_header->msgh_size; + send_size = kmsg->ikm_header->msgh_size; - boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS); + boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS); #if defined(__LP64__) send_size -= LEGACY_HEADER_SIZE_DELTA; #endif - if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { - - mach_msg_body_t *body; - mach_msg_descriptor_t *saddr, *eaddr; - - body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - saddr = (mach_msg_descriptor_t *) (body + 1); - eaddr = saddr + body->msgh_descriptor_count; - - for ( ; saddr < eaddr; saddr++ ) { - switch (saddr->type.type) { - case MACH_MSG_OOL_DESCRIPTOR: - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_PORTS_DESCRIPTOR: - if(!is_task_64bit) - send_size -= DESC_SIZE_ADJUSTMENT; - break; - case MACH_MSG_PORT_DESCRIPTOR: - send_size -= DESC_SIZE_ADJUSTMENT; - break; - default: - break; - } - } - } - return send_size; + if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { + mach_msg_body_t *body; + mach_msg_descriptor_t *saddr, *eaddr; + + body = (mach_msg_body_t *) (kmsg->ikm_header + 1); + saddr = (mach_msg_descriptor_t *) (body + 1); + eaddr = saddr + body->msgh_descriptor_count; + + for (; saddr < eaddr; saddr++) { + switch (saddr->type.type) { + case MACH_MSG_OOL_DESCRIPTOR: + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + if (!is_task_64bit) { + send_size -= DESC_SIZE_ADJUSTMENT; + } + break; + case MACH_MSG_PORT_DESCRIPTOR: + send_size -= DESC_SIZE_ADJUSTMENT; + break; + default: + break; + } + } + } + return send_size; } /* @@ -4421,11 +4481,11 @@ ipc_kmsg_copyout_size( mach_msg_return_t ipc_kmsg_copyout( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, - mach_msg_body_t *slist, - mach_msg_option_t option) + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist, + mach_msg_option_t option) { mach_msg_return_t mr; @@ -4437,8 +4497,9 @@ ipc_kmsg_copyout( if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) { mr = ipc_kmsg_copyout_body(kmsg, space, map, slist); - if (mr != MACH_MSG_SUCCESS) + if (mr != MACH_MSG_SUCCESS) { mr |= MACH_RCV_BODY_ERROR; + } } return mr; @@ -4466,10 +4527,10 @@ ipc_kmsg_copyout( mach_msg_return_t ipc_kmsg_copyout_pseudo( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, - mach_msg_body_t *slist) + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist) { mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits; ipc_object_t dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port; @@ -4500,7 +4561,7 @@ ipc_kmsg_copyout_pseudo( #endif mr = (ipc_kmsg_copyout_object(space, dest, dest_type, &dest_name) | - ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name)); + ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name)); kmsg->ikm_header->msgh_bits = mbits & MACH_MSGH_BITS_USER; kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(dest_name); @@ -4513,7 +4574,7 @@ ipc_kmsg_copyout_pseudo( mr |= ipc_kmsg_copyout_object(space, voucher, voucher_type, &voucher_name); kmsg->ikm_header->msgh_voucher_port = voucher_name; } - + if (mbits & MACH_MSGH_BITS_COMPLEX) { mr |= ipc_kmsg_copyout_body(kmsg, space, map, slist); } @@ -4532,8 +4593,8 @@ ipc_kmsg_copyout_pseudo( void ipc_kmsg_copyout_dest( - ipc_kmsg_t kmsg, - ipc_space_t space) + ipc_kmsg_t kmsg, + ipc_space_t space) { mach_msg_bits_t mbits; ipc_object_t dest; @@ -4570,8 +4631,9 @@ ipc_kmsg_copyout_dest( if (IO_VALID(reply)) { ipc_object_destroy(reply, reply_type); reply_name = MACH_PORT_NULL; - } else + } else { reply_name = CAST_MACH_PORT_TO_NAME(reply); + } if (IO_VALID(voucher)) { assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); @@ -4582,7 +4644,7 @@ ipc_kmsg_copyout_dest( } kmsg->ikm_header->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type, - voucher_type, mbits); + voucher_type, mbits); kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name); kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name); kmsg->ikm_header->msgh_voucher_port = voucher_name; @@ -4591,8 +4653,8 @@ ipc_kmsg_copyout_dest( mach_msg_body_t *body; body = (mach_msg_body_t *) (kmsg->ikm_header + 1); - ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count, - (mach_msg_descriptor_t *)(body + 1)); + ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count, + (mach_msg_descriptor_t *)(body + 1)); } } @@ -4611,8 +4673,8 @@ ipc_kmsg_copyout_dest( void ipc_kmsg_copyout_to_kernel( - ipc_kmsg_t kmsg, - ipc_space_t space) + ipc_kmsg_t kmsg, + ipc_space_t space) { ipc_object_t dest; mach_port_t reply; @@ -4637,9 +4699,32 @@ ipc_kmsg_copyout_to_kernel( dest_name = MACH_PORT_DEAD; } + /* + * While MIG kernel users don't receive vouchers, the + * msgh_voucher_port field is intended to be round-tripped through the + * kernel if there is no voucher disposition set. Here we check for a + * non-zero voucher disposition, and consume the voucher send right as + * there is no possible way to specify MACH_RCV_VOUCHER semantics. + */ + mach_msg_type_name_t voucher_type; + voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits); + if (voucher_type != MACH_MSGH_BITS_ZERO) { + assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); + /* + * someone managed to send this kernel routine a message with + * a voucher in it. Cleanup the reference in + * kmsg->ikm_voucher. + */ + if (IP_VALID(kmsg->ikm_voucher)) { + ipc_port_release_send(kmsg->ikm_voucher); + } + kmsg->ikm_voucher = IP_NULL; + kmsg->ikm_header->msgh_voucher_port = 0; + } + kmsg->ikm_header->msgh_bits = - (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) | - MACH_MSGH_BITS(reply_type, dest_type)); + (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) | + MACH_MSGH_BITS(reply_type, dest_type)); kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name); kmsg->ikm_header->msgh_remote_port = reply; } @@ -4647,17 +4732,17 @@ ipc_kmsg_copyout_to_kernel( #if IKM_SUPPORT_LEGACY void ipc_kmsg_copyout_to_kernel_legacy( - ipc_kmsg_t kmsg, - ipc_space_t space) + ipc_kmsg_t kmsg, + ipc_space_t space) { ipc_object_t dest; - ipc_object_t reply; + mach_port_t reply; mach_msg_type_name_t dest_type; mach_msg_type_name_t reply_type; - mach_port_name_t dest_name, reply_name; + mach_port_name_t dest_name; dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port; - reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port; + reply = kmsg->ikm_header->msgh_local_port; dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits); reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits); @@ -4673,93 +4758,106 @@ ipc_kmsg_copyout_to_kernel_legacy( dest_name = MACH_PORT_DEAD; } - reply_name = CAST_MACH_PORT_TO_NAME(reply); + mach_msg_type_name_t voucher_type; + voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits); + if (voucher_type != MACH_MSGH_BITS_ZERO) { + assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND); + assert(IP_VALID(kmsg->ikm_voucher)); + /* + * someone managed to send this kernel routine a message with + * a voucher in it. Cleanup the reference in + * kmsg->ikm_voucher. + */ + ipc_port_release_send(kmsg->ikm_voucher); + kmsg->ikm_voucher = IP_NULL; + kmsg->ikm_header->msgh_voucher_port = 0; + } kmsg->ikm_header->msgh_bits = - (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) | - MACH_MSGH_BITS(reply_type, dest_type)); + (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) | + MACH_MSGH_BITS(reply_type, dest_type)); kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name); - kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name); + kmsg->ikm_header->msgh_remote_port = reply; + + mach_msg_descriptor_t *saddr; + mach_msg_legacy_descriptor_t *daddr; + mach_msg_type_number_t i, count = ((mach_msg_base_t *)kmsg->ikm_header)->body.msgh_descriptor_count; + saddr = (mach_msg_descriptor_t *) (((mach_msg_base_t *)kmsg->ikm_header) + 1); + saddr = &saddr[count - 1]; + daddr = (mach_msg_legacy_descriptor_t *)&saddr[count]; + daddr--; + + vm_offset_t dsc_adjust = 0; + + for (i = 0; i < count; i++, saddr--, daddr--) { + switch (saddr->type.type) { + case MACH_MSG_PORT_DESCRIPTOR: { + mach_msg_port_descriptor_t *dsc = &saddr->port; + mach_msg_legacy_port_descriptor_t *dest_dsc = &daddr->port; + + mach_port_t name = dsc->name; + mach_msg_type_name_t disposition = dsc->disposition; + + dest_dsc->name = CAST_MACH_PORT_TO_NAME(name); + dest_dsc->disposition = disposition; + dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR; + break; + } + case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: + case MACH_MSG_OOL_DESCRIPTOR: { + /* The sender should supply ready-made memory, i.e. a vm_map_copy_t + * so we don't need to do anything special. */ + + mach_msg_ool_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->out_of_line; - mach_msg_descriptor_t *saddr; - mach_msg_legacy_descriptor_t *daddr; - mach_msg_type_number_t i, count = ((mach_msg_base_t *)kmsg->ikm_header)->body.msgh_descriptor_count; - saddr = (mach_msg_descriptor_t *) (((mach_msg_base_t *)kmsg->ikm_header) + 1); - saddr = &saddr[count-1]; - daddr = (mach_msg_legacy_descriptor_t *)&saddr[count]; - daddr--; - - vm_offset_t dsc_adjust = 0; - - for (i = 0; i < count; i++, saddr--, daddr--) { - switch (saddr->type.type) { - case MACH_MSG_PORT_DESCRIPTOR: { - mach_msg_port_descriptor_t *dsc = &saddr->port; - mach_msg_legacy_port_descriptor_t *dest_dsc = &daddr->port; - - mach_port_t name = dsc->name; - mach_msg_type_name_t disposition = dsc->disposition; - - dest_dsc->name = CAST_MACH_PORT_TO_NAME(name); - dest_dsc->disposition = disposition; - dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR; - break; - } - case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR: { - /* The sender should supply ready-made memory, i.e. a vm_map_copy_t - * so we don't need to do anything special. */ - - mach_msg_ool_descriptor_t *source_dsc = (typeof(source_dsc))&saddr->out_of_line; - - mach_msg_ool_descriptor32_t *dest_dsc = &daddr->out_of_line32; - - vm_offset_t address = (vm_offset_t)source_dsc->address; - vm_size_t size = source_dsc->size; - boolean_t deallocate = source_dsc->deallocate; - mach_msg_copy_options_t copy = source_dsc->copy; - mach_msg_descriptor_type_t type = source_dsc->type; - - dest_dsc->address = address; - dest_dsc->size = size; - dest_dsc->deallocate = deallocate; - dest_dsc->copy = copy; - dest_dsc->type = type; - break; - } - case MACH_MSG_OOL_PORTS_DESCRIPTOR: { - mach_msg_ool_ports_descriptor_t *source_dsc = (typeof(source_dsc))&saddr->ool_ports; - - mach_msg_ool_ports_descriptor32_t *dest_dsc = &daddr->ool_ports32; - - vm_offset_t address = (vm_offset_t)source_dsc->address; - vm_size_t port_count = source_dsc->count; - boolean_t deallocate = source_dsc->deallocate; - mach_msg_copy_options_t copy = source_dsc->copy; - mach_msg_descriptor_type_t type = source_dsc->type; - - dest_dsc->address = address; - dest_dsc->count = port_count; - dest_dsc->deallocate = deallocate; - dest_dsc->copy = copy; - dest_dsc->type = type; - break; - } - default: { -#if MACH_ASSERT - panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); -#endif /* MACH_ASSERT */ - } - } - } - - if(count) { - dsc_adjust = 4*count; - memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); - kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust); - /* Update the message size for the smaller user representation */ - kmsg->ikm_header->msgh_size -= dsc_adjust; - } + mach_msg_ool_descriptor32_t *dest_dsc = &daddr->out_of_line32; + + vm_offset_t address = (vm_offset_t)source_dsc->address; + vm_size_t size = source_dsc->size; + boolean_t deallocate = source_dsc->deallocate; + mach_msg_copy_options_t copy = source_dsc->copy; + mach_msg_descriptor_type_t type = source_dsc->type; + + dest_dsc->address = address; + dest_dsc->size = size; + dest_dsc->deallocate = deallocate; + dest_dsc->copy = copy; + dest_dsc->type = type; + break; + } + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + mach_msg_ool_ports_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->ool_ports; + + mach_msg_ool_ports_descriptor32_t *dest_dsc = &daddr->ool_ports32; + + vm_offset_t address = (vm_offset_t)source_dsc->address; + vm_size_t port_count = source_dsc->count; + boolean_t deallocate = source_dsc->deallocate; + mach_msg_copy_options_t copy = source_dsc->copy; + mach_msg_descriptor_type_t type = source_dsc->type; + + dest_dsc->address = address; + dest_dsc->count = port_count; + dest_dsc->deallocate = deallocate; + dest_dsc->copy = copy; + dest_dsc->type = type; + break; + } + default: { +#if MACH_ASSERT + panic("ipc_kmsg_copyin_from_kernel: bad descriptor"); +#endif /* MACH_ASSERT */ + } + } + } + + if (count) { + dsc_adjust = 4 * count; + memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t)); + kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust); + /* Update the message size for the smaller user representation */ + kmsg->ikm_header->msgh_size -= dsc_adjust; + } } #endif /* IKM_SUPPORT_LEGACY */ @@ -4768,7 +4866,7 @@ ipc_kmsg_copyout_to_kernel_legacy( * Just sets those parts of the trailer that aren't set up at allocation time. */ static void -ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit) +ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit) { if (is64bit) { mach_msg_max_trailer64_t *out = (mach_msg_max_trailer64_t*)_out; @@ -4787,22 +4885,22 @@ ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit #endif /* __arm64__ */ mach_msg_trailer_size_t -ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, - mach_msg_option_t option, thread_t thread, - mach_port_seqno_t seqno, boolean_t minimal_trailer, - mach_vm_offset_t context) +ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, + mach_msg_option_t option, thread_t thread, + mach_port_seqno_t seqno, boolean_t minimal_trailer, + mach_vm_offset_t context) { mach_msg_max_trailer_t *trailer; #ifdef __arm64__ mach_msg_max_trailer_t tmp_trailer; /* This accommodates U64, and we'll munge */ void *real_trailer_out = (void*)(mach_msg_max_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + - round_msg(kmsg->ikm_header->msgh_size)); + ((vm_offset_t)kmsg->ikm_header + + round_msg(kmsg->ikm_header->msgh_size)); - /* + /* * Populate scratch with initial values set up at message allocation time. - * After, we reinterpret the space in the message as the right type + * After, we reinterpret the space in the message as the right type * of trailer for the address space in question. */ bcopy(real_trailer_out, &tmp_trailer, MAX_TRAILER_SIZE); @@ -4810,8 +4908,8 @@ ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, #else /* __arm64__ */ (void)thread; trailer = (mach_msg_max_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + - round_msg(kmsg->ikm_header->msgh_size)); + ((vm_offset_t)kmsg->ikm_header + + round_msg(kmsg->ikm_header->msgh_size)); #endif /* __arm64__ */ if (!(option & MACH_RCV_TRAILER_MASK)) { @@ -4822,7 +4920,7 @@ ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, trailer->msgh_context = context; trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option); - if (minimal_trailer) { + if (minimal_trailer) { goto done; } @@ -4836,7 +4934,7 @@ ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused, * and a send right to copyout to the receiver. */ - if (option & MACH_RCV_TRAILER_ELEMENTS (MACH_RCV_TRAILER_LABELS)) { + if (option & MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_LABELS)) { trailer->msgh_labels.sender = 0; } diff --git a/osfmk/ipc/ipc_kmsg.h b/osfmk/ipc/ipc_kmsg.h index f5598615f..74c31f1b4 100644 --- a/osfmk/ipc/ipc_kmsg.h +++ b/osfmk/ipc/ipc_kmsg.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -70,7 +70,7 @@ * Definitions for kernel messages. */ -#ifndef _IPC_IPC_KMSG_H_ +#ifndef _IPC_IPC_KMSG_H_ #define _IPC_IPC_KMSG_H_ #include @@ -115,45 +115,45 @@ struct ipc_kmsg { }; #if defined(__i386__) || defined(__arm__) -#define IKM_SUPPORT_LEGACY 1 +#define IKM_SUPPORT_LEGACY 1 #else -#define IKM_SUPPORT_LEGACY 0 +#define IKM_SUPPORT_LEGACY 0 #endif -#define IKM_OVERHEAD (sizeof(struct ipc_kmsg)) +#define IKM_OVERHEAD (sizeof(struct ipc_kmsg)) -#define ikm_plus_overhead(size) ((mach_msg_size_t)((size) + IKM_OVERHEAD)) -#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD)) +#define ikm_plus_overhead(size) ((mach_msg_size_t)((size) + IKM_OVERHEAD)) +#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD)) /* * XXX For debugging. */ -#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10) +#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10) /* * The size of the kernel message buffers that will be cached. * IKM_SAVED_KMSG_SIZE includes overhead; IKM_SAVED_MSG_SIZE doesn't. */ extern zone_t ipc_kmsg_zone; -#define IKM_SAVED_KMSG_SIZE 256 -#define IKM_SAVED_MSG_SIZE ikm_less_overhead(IKM_SAVED_KMSG_SIZE) +#define IKM_SAVED_KMSG_SIZE 256 +#define IKM_SAVED_MSG_SIZE ikm_less_overhead(IKM_SAVED_KMSG_SIZE) -#define ikm_prealloc_inuse_port(kmsg) \ +#define ikm_prealloc_inuse_port(kmsg) \ ((kmsg)->ikm_prealloc) -#define ikm_prealloc_inuse(kmsg) \ +#define ikm_prealloc_inuse(kmsg) \ ((kmsg)->ikm_prealloc != IP_NULL) -#define ikm_prealloc_set_inuse(kmsg, port) \ -MACRO_BEGIN \ - assert((port) != IP_NULL); \ - (kmsg)->ikm_prealloc = (port); \ - ip_reference(port); \ +#define ikm_prealloc_set_inuse(kmsg, port) \ +MACRO_BEGIN \ + assert((port) != IP_NULL); \ + (kmsg)->ikm_prealloc = (port); \ + ip_reference(port); \ MACRO_END -#define ikm_prealloc_clear_inuse(kmsg, port) \ -MACRO_BEGIN \ - (kmsg)->ikm_prealloc = IP_NULL; \ +#define ikm_prealloc_clear_inuse(kmsg, port) \ +MACRO_BEGIN \ + (kmsg)->ikm_prealloc = IP_NULL; \ MACRO_END #if MACH_FLIPC @@ -162,7 +162,7 @@ MACRO_END #define ikm_flipc_init(kmsg) #endif -#define ikm_init(kmsg, size) \ +#define ikm_init(kmsg, size) \ MACRO_BEGIN \ (kmsg)->ikm_size = (size); \ (kmsg)->ikm_prealloc = IP_NULL; \ @@ -175,21 +175,21 @@ MACRO_END #define ikm_qos_init(kmsg) \ MACRO_BEGIN \ - (kmsg)->ikm_qos = MACH_MSG_PRIORITY_UNSPECIFIED; \ - (kmsg)->ikm_qos_override = MACH_MSG_PRIORITY_UNSPECIFIED; \ + (kmsg)->ikm_qos = MACH_MSG_PRIORITY_UNSPECIFIED; \ + (kmsg)->ikm_qos_override = MACH_MSG_PRIORITY_UNSPECIFIED; \ MACRO_END -#define ikm_check_init(kmsg, size) \ -MACRO_BEGIN \ - assert((kmsg)->ikm_size == (size)); \ - assert((kmsg)->ikm_prev == IKM_BOGUS); \ - assert((kmsg)->ikm_next == IKM_BOGUS); \ +#define ikm_check_init(kmsg, size) \ +MACRO_BEGIN \ + assert((kmsg)->ikm_size == (size)); \ + assert((kmsg)->ikm_prev == IKM_BOGUS); \ + assert((kmsg)->ikm_next == IKM_BOGUS); \ MACRO_END -#define ikm_set_header(kmsg, mtsize) \ -MACRO_BEGIN \ - (kmsg)->ikm_header = (mach_msg_header_t *) \ - ((vm_offset_t)((kmsg) + 1) + (kmsg)->ikm_size - (mtsize)); \ +#define ikm_set_header(kmsg, mtsize) \ +MACRO_BEGIN \ + (kmsg)->ikm_header = (mach_msg_header_t *) \ + ((vm_offset_t)((kmsg) + 1) + (kmsg)->ikm_size - (mtsize)); \ MACRO_END struct ipc_kmsg_queue { @@ -198,28 +198,28 @@ struct ipc_kmsg_queue { typedef struct ipc_kmsg_queue *ipc_kmsg_queue_t; -#define IKMQ_NULL ((ipc_kmsg_queue_t) 0) +#define IKMQ_NULL ((ipc_kmsg_queue_t) 0) /* * Exported interfaces */ -#define ipc_kmsg_queue_init(queue) \ -MACRO_BEGIN \ - (queue)->ikmq_base = IKM_NULL; \ +#define ipc_kmsg_queue_init(queue) \ +MACRO_BEGIN \ + (queue)->ikmq_base = IKM_NULL; \ MACRO_END -#define ipc_kmsg_queue_empty(queue) ((queue)->ikmq_base == IKM_NULL) +#define ipc_kmsg_queue_empty(queue) ((queue)->ikmq_base == IKM_NULL) /* Enqueue a kmsg */ extern void ipc_kmsg_enqueue( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg); + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); extern boolean_t ipc_kmsg_enqueue_qos( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg); + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); extern boolean_t ipc_kmsg_override_qos( ipc_kmsg_queue_t queue, @@ -232,32 +232,32 @@ extern ipc_kmsg_t ipc_kmsg_dequeue( /* Pull a kmsg out of a queue */ extern void ipc_kmsg_rmqueue( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg); + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); /* Pull the (given) first kmsg out of a queue */ extern void ipc_kmsg_rmqueue_first( - ipc_kmsg_queue_t queue, + ipc_kmsg_queue_t queue, ipc_kmsg_t kmsg); -#define ipc_kmsg_queue_first(queue) ((queue)->ikmq_base) +#define ipc_kmsg_queue_first(queue) ((queue)->ikmq_base) /* Return the kmsg following the given kmsg */ extern ipc_kmsg_t ipc_kmsg_queue_next( - ipc_kmsg_queue_t queue, - ipc_kmsg_t kmsg); + ipc_kmsg_queue_t queue, + ipc_kmsg_t kmsg); /* Allocate a kernel message */ extern ipc_kmsg_t ipc_kmsg_alloc( - mach_msg_size_t size); + mach_msg_size_t size); /* Free a kernel message buffer */ extern void ipc_kmsg_free( - ipc_kmsg_t kmsg); + ipc_kmsg_t kmsg); /* Destroy kernel message */ extern void ipc_kmsg_destroy( - ipc_kmsg_t kmsg); + ipc_kmsg_t kmsg); /* Enqueue kernel message for deferred destruction */ extern boolean_t ipc_kmsg_delayed_destroy( @@ -268,144 +268,143 @@ extern void ipc_kmsg_reap_delayed(void); /* Preallocate a kernel message buffer */ extern ipc_kmsg_t ipc_kmsg_prealloc( - mach_msg_size_t size); + mach_msg_size_t size); /* bind a preallocated message buffer to a port */ extern void ipc_kmsg_set_prealloc( - ipc_kmsg_t kmsg, - ipc_port_t port); + ipc_kmsg_t kmsg, + ipc_port_t port); /* Clear preallocated message buffer binding */ extern void ipc_kmsg_clear_prealloc( - ipc_kmsg_t kmsg, - ipc_port_t port); + ipc_kmsg_t kmsg, + ipc_port_t port); /* Allocate a kernel message buffer and copy a user message to the buffer */ extern mach_msg_return_t ipc_kmsg_get( - mach_vm_address_t msg_addr, - mach_msg_size_t size, - ipc_kmsg_t *kmsgp); + mach_vm_address_t msg_addr, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp); /* Allocate a kernel message buffer and copy a kernel message to the buffer */ extern mach_msg_return_t ipc_kmsg_get_from_kernel( - mach_msg_header_t *msg, - mach_msg_size_t size, - ipc_kmsg_t *kmsgp); + mach_msg_header_t *msg, + mach_msg_size_t size, + ipc_kmsg_t *kmsgp); /* Send a message to a port */ extern mach_msg_return_t ipc_kmsg_send( - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_msg_timeout_t timeout_val); + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t timeout_val); /* Copy a kernel message buffer to a user message */ extern mach_msg_return_t ipc_kmsg_put( - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_vm_address_t rcv_addr, - mach_msg_size_t rcv_size, - mach_msg_size_t trailer_size, - mach_msg_size_t *size); + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_vm_address_t rcv_addr, + mach_msg_size_t rcv_size, + mach_msg_size_t trailer_size, + mach_msg_size_t *size); /* Copy a kernel message buffer to a kernel message */ extern void ipc_kmsg_put_to_kernel( - mach_msg_header_t *msg, - ipc_kmsg_t kmsg, - mach_msg_size_t size); + mach_msg_header_t *msg, + ipc_kmsg_t kmsg, + mach_msg_size_t size); /* Copyin port rights in the header of a message */ extern mach_msg_return_t ipc_kmsg_copyin_header( ipc_kmsg_t kmsg, - ipc_space_t space, + ipc_space_t space, mach_msg_priority_t override, - mach_msg_option_t *optionp); + mach_msg_option_t *optionp); /* Copyin port rights and out-of-line memory from a user message */ extern mach_msg_return_t ipc_kmsg_copyin( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, mach_msg_priority_t override, - mach_msg_option_t *optionp); + mach_msg_option_t *optionp); /* Copyin port rights and out-of-line memory from a kernel message */ extern mach_msg_return_t ipc_kmsg_copyin_from_kernel( - ipc_kmsg_t kmsg); + ipc_kmsg_t kmsg); #if IKM_SUPPORT_LEGACY extern mach_msg_return_t ipc_kmsg_copyin_from_kernel_legacy( - ipc_kmsg_t kmsg); + ipc_kmsg_t kmsg); #endif /* Copyout port rights in the header of a message */ extern mach_msg_return_t ipc_kmsg_copyout_header( ipc_kmsg_t kmsg, - ipc_space_t space, - mach_msg_option_t option); + ipc_space_t space, + mach_msg_option_t option); /* Copyout a port right returning a name */ extern mach_msg_return_t ipc_kmsg_copyout_object( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - mach_port_name_t *namep); + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep); /* Copyout the header and body to a user message */ extern mach_msg_return_t ipc_kmsg_copyout( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, - mach_msg_body_t *slist, - mach_msg_option_t option); + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist, + mach_msg_option_t option); /* Copyout port rights and out-of-line memory from the body of a message */ extern mach_msg_return_t ipc_kmsg_copyout_body( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, - mach_msg_body_t *slist); + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist); /* Copyout port rights and out-of-line memory to a user message, - not reversing the ports in the header */ + * not reversing the ports in the header */ extern mach_msg_return_t ipc_kmsg_copyout_pseudo( - ipc_kmsg_t kmsg, - ipc_space_t space, - vm_map_t map, - mach_msg_body_t *slist); + ipc_kmsg_t kmsg, + ipc_space_t space, + vm_map_t map, + mach_msg_body_t *slist); /* Compute size of message as copied out to the specified space/map */ extern mach_msg_size_t ipc_kmsg_copyout_size( - ipc_kmsg_t kmsg, - vm_map_t map); + ipc_kmsg_t kmsg, + vm_map_t map); /* Copyout the destination port in the message */ -extern void ipc_kmsg_copyout_dest( - ipc_kmsg_t kmsg, - ipc_space_t space); +extern void ipc_kmsg_copyout_dest( + ipc_kmsg_t kmsg, + ipc_space_t space); /* kernel's version of ipc_kmsg_copyout_dest */ extern void ipc_kmsg_copyout_to_kernel( - ipc_kmsg_t kmsg, - ipc_space_t space); + ipc_kmsg_t kmsg, + ipc_space_t space); #if IKM_SUPPORT_LEGACY extern void ipc_kmsg_copyout_to_kernel_legacy( - ipc_kmsg_t kmsg, - ipc_space_t space); + ipc_kmsg_t kmsg, + ipc_space_t space); #endif extern mach_msg_trailer_size_t -ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space, - mach_msg_option_t option, thread_t thread, - mach_port_seqno_t seqno, boolean_t minimal_trailer, - mach_vm_offset_t context); +ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space, + mach_msg_option_t option, thread_t thread, + mach_port_seqno_t seqno, boolean_t minimal_trailer, + mach_vm_offset_t context); #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD) extern void ipc_kmsg_trace_send(ipc_kmsg_t kmsg, - mach_msg_option_t option); + mach_msg_option_t option); #else -#define ipc_kmsg_trace_send(a,b) do { } while (0) +#define ipc_kmsg_trace_send(a, b) do { } while (0) #endif -#endif /* _IPC_IPC_KMSG_H_ */ - +#endif /* _IPC_IPC_KMSG_H_ */ diff --git a/osfmk/ipc/ipc_machdep.h b/osfmk/ipc/ipc_machdep.h index 52e9533e1..db0e28cfb 100644 --- a/osfmk/ipc/ipc_machdep.h +++ b/osfmk/ipc/ipc_machdep.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,47 +38,47 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.6.1 1995/01/06 19:45:43 devrcs - * mk6 CR668 - 1.3b26 merge - * new file for mk6 - * [1994/10/12 22:19:20 dwm] + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:19:20 dwm] * * Revision 1.1.3.1 1994/05/06 18:47:26 tmt - * Merge this file in from the osc1.3dec tree. - * [1994/03/30 21:33:42 berube] - * Created from mk80. - * [93/10/05 bruel] - * + * Merge this file in from the osc1.3dec tree. + * [1994/03/30 21:33:42 berube] + * Created from mk80. + * [93/10/05 bruel] + * * $EndLog$ */ /* CMU_HIST */ /* * Revision 2.2 93/01/14 17:32:59 danner - * Created. - * [92/12/10 af] + * Created. + * [92/12/10 af] */ /* CMU_END_HIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -88,12 +88,11 @@ */ /* 64 bit machines */ -#if defined(__alpha) -#define PORT_T_SIZE_IN_BITS 64 +#if defined(__alpha) +#define PORT_T_SIZE_IN_BITS 64 #endif /* default, 32 bit machines */ -#if !defined(PORT_T_SIZE_IN_BITS) -#define PORT_T_SIZE_IN_BITS 32 +#if !defined(PORT_T_SIZE_IN_BITS) +#define PORT_T_SIZE_IN_BITS 32 #endif - diff --git a/osfmk/ipc/ipc_mqueue.c b/osfmk/ipc/ipc_mqueue.c index 685950c90..2a6642598 100644 --- a/osfmk/ipc/ipc_mqueue.c +++ b/osfmk/ipc/ipc_mqueue.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -68,7 +68,7 @@ * is included in support of clause 2.2 (b) of the Apple Public License, * Version 2.0. */ - + #include #include @@ -78,7 +78,7 @@ #include #include #include -#include /* XXX - for mach_msg_receive_continue */ +#include /* XXX - for mach_msg_receive_continue */ #include #include #include @@ -100,10 +100,10 @@ #include -extern char *proc_name_address(void *p); +extern char *proc_name_address(void *p); -int ipc_mqueue_full; /* address is event for queue space */ -int ipc_mqueue_rcv; /* address is event for message arrival */ +int ipc_mqueue_full; /* address is event for queue space */ +int ipc_mqueue_rcv; /* address is event for message arrival */ /* forward declarations */ void ipc_mqueue_receive_results(wait_result_t result); @@ -119,13 +119,13 @@ static void ipc_mqueue_peek_on_thread( */ void ipc_mqueue_init( - ipc_mqueue_t mqueue, - boolean_t is_set) + ipc_mqueue_t mqueue, + boolean_t is_set) { if (is_set) { waitq_set_init(&mqueue->imq_set_queue, - SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST, - NULL, NULL); + SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, + NULL, NULL); } else { waitq_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO | SYNC_POLICY_PORT); ipc_kmsg_queue_init(&mqueue->imq_messages); @@ -140,15 +140,17 @@ ipc_mqueue_init( klist_init(&mqueue->imq_klist); } -void ipc_mqueue_deinit( - ipc_mqueue_t mqueue) +void +ipc_mqueue_deinit( + ipc_mqueue_t mqueue) { boolean_t is_set = imq_is_set(mqueue); - if (is_set) + if (is_set) { waitq_set_deinit(&mqueue->imq_set_queue); - else + } else { waitq_deinit(&mqueue->imq_wait_queue); + } } /* @@ -164,8 +166,7 @@ void imq_reserve_and_lock(ipc_mqueue_t mq, uint64_t *reserved_prepost) { *reserved_prepost = waitq_prepost_reserve(&mq->imq_wait_queue, 0, - WAITQ_KEEP_LOCKED); - + WAITQ_KEEP_LOCKED); } @@ -200,14 +201,13 @@ imq_release_and_unlock(ipc_mqueue_t mq, uint64_t reserved_prepost) boolean_t ipc_mqueue_member( - ipc_mqueue_t port_mqueue, - ipc_mqueue_t set_mqueue) + ipc_mqueue_t port_mqueue, + ipc_mqueue_t set_mqueue) { struct waitq *port_waitq = &port_mqueue->imq_wait_queue; struct waitq_set *set_waitq = &set_mqueue->imq_set_queue; return waitq_member(port_waitq, set_waitq); - } /* @@ -219,8 +219,8 @@ ipc_mqueue_member( kern_return_t ipc_mqueue_remove( - ipc_mqueue_t mqueue, - ipc_mqueue_t set_mqueue) + ipc_mqueue_t mqueue, + ipc_mqueue_t set_mqueue) { struct waitq *mq_waitq = &mqueue->imq_wait_queue; struct waitq_set *set_waitq = &set_mqueue->imq_set_queue; @@ -238,7 +238,7 @@ ipc_mqueue_remove( * mqueue unlocked and set links deallocated */ void -ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue) +ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue) { struct waitq *mq_waitq = &mqueue->imq_wait_queue; kern_return_t kr; @@ -261,7 +261,7 @@ ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue) * mqueue unlocked all set links deallocated */ void -ipc_mqueue_remove_all(ipc_mqueue_t mqueue) +ipc_mqueue_remove_all(ipc_mqueue_t mqueue) { struct waitq_set *mq_setq = &mqueue->imq_set_queue; @@ -285,16 +285,16 @@ ipc_mqueue_remove_all(ipc_mqueue_t mqueue) */ kern_return_t ipc_mqueue_add( - ipc_mqueue_t port_mqueue, - ipc_mqueue_t set_mqueue, - uint64_t *reserved_link, - uint64_t *reserved_prepost) + ipc_mqueue_t port_mqueue, + ipc_mqueue_t set_mqueue, + uint64_t *reserved_link, + uint64_t *reserved_prepost) { struct waitq *port_waitq = &port_mqueue->imq_wait_queue; struct waitq_set *set_waitq = &set_mqueue->imq_set_queue; ipc_kmsg_queue_t kmsgq; ipc_kmsg_t kmsg, next; - kern_return_t kr; + kern_return_t kr; assert(reserved_link && *reserved_link != 0); assert(waitqs_is_linked(set_waitq)); @@ -318,8 +318,8 @@ ipc_mqueue_add( */ kmsgq = &port_mqueue->imq_messages; for (kmsg = ipc_kmsg_queue_first(kmsgq); - kmsg != IKM_NULL; - kmsg = next) { + kmsg != IKM_NULL; + kmsg = next) { next = ipc_kmsg_queue_next(kmsgq, kmsg); for (;;) { @@ -328,15 +328,16 @@ ipc_mqueue_add( spl_t th_spl; th = waitq_wakeup64_identify_locked( - port_waitq, - IPC_MQUEUE_RECEIVE, - THREAD_AWAKENED, &th_spl, - reserved_prepost, WAITQ_ALL_PRIORITIES, - WAITQ_KEEP_LOCKED); + port_waitq, + IPC_MQUEUE_RECEIVE, + THREAD_AWAKENED, &th_spl, + reserved_prepost, WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED); /* waitq/mqueue still locked, thread locked */ - if (th == THREAD_NULL) + if (th == THREAD_NULL) { goto leave; + } /* * If the receiver waited with a facility not directly @@ -353,8 +354,8 @@ ipc_mqueue_add( * if there are any actual receivers */ ipc_mqueue_peek_on_thread(port_mqueue, - th->ith_option, - th); + th->ith_option, + th); } thread_unlock(th); splx(th_spl); @@ -371,7 +372,7 @@ ipc_mqueue_add( */ msize = ipc_kmsg_copyout_size(kmsg, th->map); if (th->ith_rsize < - (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(th), th->ith_option))) { + (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(th), th->ith_option))) { th->ith_state = MACH_RCV_TOO_LARGE; th->ith_msize = msize; if (th->ith_option & MACH_RCV_LARGE) { @@ -404,13 +405,14 @@ ipc_mqueue_add( thread_unlock(th); splx(th_spl); #if MACH_FLIPC - if (MACH_NODE_VALID(node) && FPORT_VALID(port_mqueue->imq_fport)) - flipc_msg_ack(node, port_mqueue, TRUE); + if (MACH_NODE_VALID(node) && FPORT_VALID(port_mqueue->imq_fport)) { + flipc_msg_ack(node, port_mqueue, TRUE); + } #endif break; /* go to next message */ } } - leave: +leave: imq_unlock(port_mqueue); return KERN_SUCCESS; } @@ -422,12 +424,12 @@ ipc_mqueue_add( * Conditions: * The message queue is locked. */ - void ipc_mqueue_changed( - ipc_mqueue_t mqueue) + ipc_space_t space, + ipc_mqueue_t mqueue) { - if (IMQ_KLIST_VALID(mqueue)) { + if (IMQ_KLIST_VALID(mqueue) && SLIST_FIRST(&mqueue->imq_klist)) { /* * Indicate that this message queue is vanishing * @@ -442,17 +444,29 @@ ipc_mqueue_changed( * * Fortunately, we really don't need this linkage anymore after this * point as EV_VANISHED / EV_EOF will be the last thing delivered ever. + * + * Note: we don't have the space lock here, however, this covers the + * case of when a task is terminating the space, triggering + * several knote_vanish() calls. + * + * We don't need the lock to observe that the space is inactive as + * we just deactivated it on the same thread. + * + * We still need to call knote_vanish() so that the knote is + * marked with EV_VANISHED or EV_EOF so that the detach step + * in filt_machportdetach is skipped correctly. */ - knote_vanish(&mqueue->imq_klist); + assert(space); + knote_vanish(&mqueue->imq_klist, is_active(space)); klist_init(&mqueue->imq_klist); } waitq_wakeup64_all_locked(&mqueue->imq_wait_queue, - IPC_MQUEUE_RECEIVE, - THREAD_RESTART, - NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_KEEP_LOCKED); + IPC_MQUEUE_RECEIVE, + THREAD_RESTART, + NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED); } @@ -477,9 +491,9 @@ ipc_mqueue_changed( */ mach_msg_return_t ipc_mqueue_send( - ipc_mqueue_t mqueue, - ipc_kmsg_t kmsg, - mach_msg_option_t option, + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg, + mach_msg_option_t option, mach_msg_timeout_t send_timeout) { int wresult; @@ -492,9 +506,9 @@ ipc_mqueue_send( */ if (!imq_full(mqueue) || (!imq_full_kernel(mqueue) && - ((option & MACH_SEND_ALWAYS) || - (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) == - MACH_MSG_TYPE_PORT_SEND_ONCE)))) { + ((option & MACH_SEND_ALWAYS) || + (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) == + MACH_MSG_TYPE_PORT_SEND_ONCE)))) { mqueue->imq_msgcount++; assert(mqueue->imq_msgcount > 0); imq_unlock(mqueue); @@ -518,16 +532,17 @@ ipc_mqueue_send( } mqueue->imq_fullwaiters = TRUE; - if (option & MACH_SEND_TIMEOUT) - clock_interval_to_deadline(send_timeout, 1000*NSEC_PER_USEC, &deadline); - else + if (option & MACH_SEND_TIMEOUT) { + clock_interval_to_deadline(send_timeout, 1000 * NSEC_PER_USEC, &deadline); + } else { deadline = 0; + } thread_set_pending_block_hint(cur_thread, kThreadWaitPortSend); send_turnstile = turnstile_prepare((uintptr_t)port, - port_send_turnstile_address(port), - TURNSTILE_NULL, TURNSTILE_SYNC_IPC); + port_send_turnstile_address(port), + TURNSTILE_NULL, TURNSTILE_SYNC_IPC); /* Check if the port in is in transit, get the destination port's turnstile */ if (ip_active(port) && @@ -539,19 +554,19 @@ ipc_mqueue_send( } turnstile_update_inheritor(send_turnstile, inheritor, - TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_TURNSTILE); + TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_TURNSTILE); wresult = waitq_assert_wait64_leeway( - &send_turnstile->ts_waitq, - IPC_MQUEUE_FULL, - THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, - deadline, - TIMEOUT_NO_LEEWAY); + &send_turnstile->ts_waitq, + IPC_MQUEUE_FULL, + THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, + TIMEOUT_NO_LEEWAY); imq_unlock(mqueue); turnstile_update_inheritor_complete(send_turnstile, - TURNSTILE_INTERLOCK_NOT_HELD); + TURNSTILE_INTERLOCK_NOT_HELD); if (wresult == THREAD_WAITING) { wresult = thread_block(THREAD_CONTINUE_NULL); @@ -567,7 +582,6 @@ ipc_mqueue_send( turnstile_cleanup(); switch (wresult) { - case THREAD_AWAKENED: /* * we can proceed - inherited msgcount from waker @@ -607,7 +621,8 @@ ipc_mqueue_send( * The message queue is not locked. * The caller holds a reference on the message queue. */ -extern void ipc_mqueue_override_send( +extern void +ipc_mqueue_override_send( ipc_mqueue_t mqueue, mach_msg_priority_t override) { @@ -621,11 +636,17 @@ extern void ipc_mqueue_override_send( ipc_kmsg_t first = ipc_kmsg_queue_first(&mqueue->imq_messages); if (first && ipc_kmsg_override_qos(&mqueue->imq_messages, first, override)) { - if (IMQ_KLIST_VALID(mqueue)) + ipc_port_t port = ip_from_mq(mqueue); + if (ip_active(port) && + port->ip_receiver_name != MACH_PORT_NULL && + is_active(port->ip_receiver) && + IMQ_KLIST_VALID(mqueue)) { KNOTE(&mqueue->imq_klist, 0); + } } - if (!first) + if (!first) { full_queue_empty = TRUE; + } } imq_unlock(mqueue); @@ -665,7 +686,7 @@ ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq, ipc_mqueue_t set_mq) port_mq->imq_msgcount--; if (!imq_full(port_mq) && port_mq->imq_fullwaiters && - send_turnstile != TURNSTILE_NULL) { + send_turnstile != TURNSTILE_NULL) { /* * boost the priority of the awoken thread * (WAITQ_PROMOTE_PRIORITY) to ensure it uses @@ -680,9 +701,9 @@ ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq, ipc_mqueue_t set_mq) * high priority threads trying to send to this port. */ if (waitq_wakeup64_one(&send_turnstile->ts_waitq, - IPC_MQUEUE_FULL, - THREAD_AWAKENED, - WAITQ_PROMOTE_PRIORITY) != KERN_SUCCESS) { + IPC_MQUEUE_FULL, + THREAD_AWAKENED, + WAITQ_PROMOTE_PRIORITY) != KERN_SUCCESS) { port_mq->imq_fullwaiters = FALSE; } else { /* gave away our slot - add reference back */ @@ -739,16 +760,15 @@ ipc_mqueue_post( mach_msg_size_t msize; receiver = waitq_wakeup64_identify_locked(waitq, - IPC_MQUEUE_RECEIVE, - THREAD_AWAKENED, - &th_spl, - &reserved_prepost, - WAITQ_ALL_PRIORITIES, - WAITQ_KEEP_LOCKED); + IPC_MQUEUE_RECEIVE, + THREAD_AWAKENED, + &th_spl, + &reserved_prepost, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED); /* waitq still locked, thread locked */ if (receiver == THREAD_NULL) { - /* * no receivers; queue kmsg if space still reserved * Reservations are cancelled when the port goes inactive. @@ -766,8 +786,14 @@ ipc_mqueue_post( */ if (mqueue->imq_msgcount > 0) { if (ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg)) { - if (IMQ_KLIST_VALID(mqueue)) + /* if the space is dead there is no point calling KNOTE */ + ipc_port_t port = ip_from_mq(mqueue); + if (ip_active(port) && + port->ip_receiver_name != MACH_PORT_NULL && + is_active(port->ip_receiver) && + IMQ_KLIST_VALID(mqueue)) { KNOTE(&mqueue->imq_klist, 0); + } } break; } @@ -816,9 +842,9 @@ ipc_mqueue_post( * If the message is too large or the scatter list is too small * the thread we wake up will get that as its status. */ - msize = ipc_kmsg_copyout_size(kmsg, receiver->map); + msize = ipc_kmsg_copyout_size(kmsg, receiver->map); if (receiver->ith_rsize < - (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(receiver), receiver->ith_option))) { + (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(receiver), receiver->ith_option))) { receiver->ith_msize = msize; receiver->ith_state = MACH_RCV_TOO_LARGE; } else { @@ -844,8 +870,9 @@ ipc_mqueue_post( ipc_mqueue_release_msgcount(mqueue, IMQ_NULL); #if MACH_FLIPC - if (MACH_NODE_VALID(node) && FPORT_VALID(mqueue->imq_fport)) - flipc_msg_ack(node, mqueue, TRUE); + if (MACH_NODE_VALID(node) && FPORT_VALID(mqueue->imq_fport)) { + flipc_msg_ack(node, mqueue, TRUE); + } #endif break; } @@ -866,8 +893,9 @@ out_unlock: /* clear the waitq boost we may have been given */ waitq_clear_promotion_locked(&mqueue->imq_wait_queue, current_thread()); imq_release_and_unlock(mqueue, reserved_prepost); - if (destroy_msg) + if (destroy_msg) { ipc_kmsg_destroy(kmsg); + } current_task()->messages_sent++; return; @@ -877,8 +905,8 @@ out_unlock: /* static */ void ipc_mqueue_receive_results(wait_result_t saved_wait_result) { - thread_t self = current_thread(); - mach_msg_option_t option = self->ith_option; + thread_t self = current_thread(); + mach_msg_option_t option = self->ith_option; /* * why did we wake up? @@ -974,28 +1002,31 @@ ipc_mqueue_receive( imq_lock(mqueue); wresult = ipc_mqueue_receive_on_thread(mqueue, option, max_size, - rcv_timeout, interruptible, - self); + rcv_timeout, interruptible, + self); /* mqueue unlocked */ - if (wresult == THREAD_NOT_WAITING) + if (wresult == THREAD_NOT_WAITING) { return; + } if (wresult == THREAD_WAITING) { counter((interruptible == THREAD_ABORTSAFE) ? - c_ipc_mqueue_receive_block_user++ : - c_ipc_mqueue_receive_block_kernel++); + c_ipc_mqueue_receive_block_user++ : + c_ipc_mqueue_receive_block_kernel++); - if (self->ith_continuation) + if (self->ith_continuation) { thread_block(ipc_mqueue_receive_continue); - /* NOTREACHED */ + } + /* NOTREACHED */ wresult = thread_block(THREAD_CONTINUE_NULL); } ipc_mqueue_receive_results(wresult); } -static int mqueue_process_prepost_receive(void *ctx, struct waitq *waitq, - struct waitq_set *wqset) +static int +mqueue_process_prepost_receive(void *ctx, struct waitq *waitq, + struct waitq_set *wqset) { ipc_mqueue_t port_mq, *pmq_ptr; @@ -1006,8 +1037,9 @@ static int mqueue_process_prepost_receive(void *ctx, struct waitq *waitq, * If there are no messages on this queue, skip it and remove * it from the prepost list */ - if (ipc_kmsg_queue_empty(&port_mq->imq_messages)) + if (ipc_kmsg_queue_empty(&port_mq->imq_messages)) { return WQ_ITERATE_INVALIDATE_CONTINUE; + } /* * There are messages waiting on this port. @@ -1015,8 +1047,9 @@ static int mqueue_process_prepost_receive(void *ctx, struct waitq *waitq, * waitq locked. */ pmq_ptr = (ipc_mqueue_t *)ctx; - if (pmq_ptr) + if (pmq_ptr) { *pmq_ptr = port_mq; + } return WQ_ITERATE_BREAK_KEEP_LOCKED; } @@ -1042,7 +1075,7 @@ ipc_mqueue_receive_on_thread( thread_t thread) { wait_result_t wresult; - uint64_t deadline; + uint64_t deadline; struct turnstile *rcv_turnstile = TURNSTILE_NULL; turnstile_inheritor_t inheritor = NULL; @@ -1065,8 +1098,8 @@ ipc_mqueue_receive_on_thread( ipc_mqueue_t port_mq = IMQ_NULL; (void)waitq_set_iterate_preposts(&mqueue->imq_set_queue, - &port_mq, - mqueue_process_prepost_receive); + &port_mq, + mqueue_process_prepost_receive); if (port_mq != IMQ_NULL) { /* @@ -1084,11 +1117,12 @@ ipc_mqueue_receive_on_thread( * Continue on to handling the message with just * the port mqueue locked. */ - if (option & MACH_PEEK_MSG) + if (option & MACH_PEEK_MSG) { ipc_mqueue_peek_on_thread(port_mq, option, thread); - else + } else { ipc_mqueue_select_on_thread(port_mq, mqueue, option, - max_size, thread); + max_size, thread); + } imq_unlock(port_mq); return THREAD_NOT_WAITING; @@ -1101,17 +1135,18 @@ ipc_mqueue_receive_on_thread( */ kmsgs = &mqueue->imq_messages; if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) { - if (option & MACH_PEEK_MSG) + if (option & MACH_PEEK_MSG) { ipc_mqueue_peek_on_thread(mqueue, option, thread); - else + } else { ipc_mqueue_select_on_thread(mqueue, IMQ_NULL, option, - max_size, thread); + max_size, thread); + } imq_unlock(mqueue); return THREAD_NOT_WAITING; } } else { panic("Unknown mqueue type 0x%x: likely memory corruption!\n", - mqueue->imq_wait_queue.waitq_type); + mqueue->imq_wait_queue.waitq_type); } /* @@ -1131,15 +1166,17 @@ ipc_mqueue_receive_on_thread( thread->ith_rsize = max_size; thread->ith_msize = 0; - if (option & MACH_PEEK_MSG) + if (option & MACH_PEEK_MSG) { thread->ith_state = MACH_PEEK_IN_PROGRESS; - else + } else { thread->ith_state = MACH_RCV_IN_PROGRESS; + } - if (option & MACH_RCV_TIMEOUT) - clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline); - else + if (option & MACH_RCV_TIMEOUT) { + clock_interval_to_deadline(rcv_timeout, 1000 * NSEC_PER_USEC, &deadline); + } else { deadline = 0; + } /* * Threads waiting on a port (not portset) @@ -1161,28 +1198,29 @@ ipc_mqueue_receive_on_thread( if (imq_is_queue(mqueue)) { ipc_port_t port = ip_from_mq(mqueue); rcv_turnstile = turnstile_prepare((uintptr_t)port, - port_rcv_turnstile_address(port), - TURNSTILE_NULL, TURNSTILE_SYNC_IPC); + port_rcv_turnstile_address(port), + TURNSTILE_NULL, TURNSTILE_SYNC_IPC); if (port->ip_specialreply) { inheritor = ipc_port_get_special_reply_port_inheritor(port); } turnstile_update_inheritor(rcv_turnstile, inheritor, - (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_DELAYED_UPDATE)); + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_DELAYED_UPDATE)); } thread_set_pending_block_hint(thread, kThreadWaitPortReceive); wresult = waitq_assert_wait64_locked(&mqueue->imq_wait_queue, - IPC_MQUEUE_RECEIVE, - interruptible, - TIMEOUT_URGENCY_USER_NORMAL, - deadline, - TIMEOUT_NO_LEEWAY, - thread); + IPC_MQUEUE_RECEIVE, + interruptible, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, + TIMEOUT_NO_LEEWAY, + thread); /* preposts should be detected above, not here */ - if (wresult == THREAD_AWAKENED) + if (wresult == THREAD_AWAKENED) { panic("ipc_mqueue_receive_on_thread: sleep walking"); + } imq_unlock(mqueue); @@ -1247,10 +1285,10 @@ ipc_mqueue_peek_on_thread( */ void ipc_mqueue_select_on_thread( - ipc_mqueue_t port_mq, - ipc_mqueue_t set_mq, - mach_msg_option_t option, - mach_msg_size_t max_size, + ipc_mqueue_t port_mq, + ipc_mqueue_t set_mq, + mach_msg_option_t option, + mach_msg_size_t max_size, thread_t thread) { ipc_kmsg_t kmsg; @@ -1285,8 +1323,9 @@ ipc_mqueue_select_on_thread( ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg); #if MACH_FLIPC - if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port_mq->imq_fport)) - flipc_msg_ack(kmsg->ikm_node, port_mq, TRUE); + if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port_mq->imq_fport)) { + flipc_msg_ack(kmsg->ikm_node, port_mq, TRUE); + } #endif ipc_mqueue_release_msgcount(port_mq, set_mq); thread->ith_seqno = port_mq->imq_seqno++; @@ -1312,11 +1351,11 @@ ipc_mqueue_select_on_thread( */ unsigned ipc_mqueue_peek_locked(ipc_mqueue_t mq, - mach_port_seqno_t * seqnop, - mach_msg_size_t * msg_sizep, - mach_msg_id_t * msg_idp, - mach_msg_max_trailer_t * msg_trailerp, - ipc_kmsg_t *kmsgp) + mach_port_seqno_t * seqnop, + mach_msg_size_t * msg_sizep, + mach_msg_id_t * msg_idp, + mach_msg_max_trailer_t * msg_trailerp, + ipc_kmsg_t *kmsgp) { ipc_kmsg_queue_t kmsgq; ipc_kmsg_t kmsg; @@ -1326,17 +1365,19 @@ ipc_mqueue_peek_locked(ipc_mqueue_t mq, assert(!imq_is_set(mq)); seqno = 0; - if (seqnop != NULL) + if (seqnop != NULL) { seqno = *seqnop; + } if (seqno == 0) { seqno = mq->imq_seqno; msgoff = 0; } else if (seqno >= mq->imq_seqno && - seqno < mq->imq_seqno + mq->imq_msgcount) { + seqno < mq->imq_seqno + mq->imq_msgcount) { msgoff = seqno - mq->imq_seqno; - } else + } else { goto out; + } /* look for the message that would match that seqno */ kmsgq = &mq->imq_messages; @@ -1344,23 +1385,29 @@ ipc_mqueue_peek_locked(ipc_mqueue_t mq, while (msgoff-- && kmsg != IKM_NULL) { kmsg = ipc_kmsg_queue_next(kmsgq, kmsg); } - if (kmsg == IKM_NULL) + if (kmsg == IKM_NULL) { goto out; + } /* found one - return the requested info */ - if (seqnop != NULL) + if (seqnop != NULL) { *seqnop = seqno; - if (msg_sizep != NULL) + } + if (msg_sizep != NULL) { *msg_sizep = kmsg->ikm_header->msgh_size; - if (msg_idp != NULL) + } + if (msg_idp != NULL) { *msg_idp = kmsg->ikm_header->msgh_id; - if (msg_trailerp != NULL) + } + if (msg_trailerp != NULL) { memcpy(msg_trailerp, - (mach_msg_max_trailer_t *)((vm_offset_t)kmsg->ikm_header + - round_msg(kmsg->ikm_header->msgh_size)), - sizeof(mach_msg_max_trailer_t)); - if (kmsgp != NULL) + (mach_msg_max_trailer_t *)((vm_offset_t)kmsg->ikm_header + + round_msg(kmsg->ikm_header->msgh_size)), + sizeof(mach_msg_max_trailer_t)); + } + if (kmsgp != NULL) { *kmsgp = kmsg; + } res = 1; @@ -1384,18 +1431,18 @@ out: */ unsigned ipc_mqueue_peek(ipc_mqueue_t mq, - mach_port_seqno_t * seqnop, - mach_msg_size_t * msg_sizep, - mach_msg_id_t * msg_idp, - mach_msg_max_trailer_t * msg_trailerp, - ipc_kmsg_t *kmsgp) + mach_port_seqno_t * seqnop, + mach_msg_size_t * msg_sizep, + mach_msg_id_t * msg_idp, + mach_msg_max_trailer_t * msg_trailerp, + ipc_kmsg_t *kmsgp) { unsigned res; imq_lock(mq); res = ipc_mqueue_peek_locked(mq, seqnop, msg_sizep, msg_idp, - msg_trailerp, kmsgp); + msg_trailerp, kmsgp); imq_unlock(mq); return res; @@ -1414,7 +1461,8 @@ ipc_mqueue_peek(ipc_mqueue_t mq, * (and potentially invalid!) * */ -void ipc_mqueue_release_peek_ref(ipc_mqueue_t mq) +void +ipc_mqueue_release_peek_ref(ipc_mqueue_t mq) { assert(!imq_is_set(mq)); assert(imq_held(mq)); @@ -1441,8 +1489,9 @@ void ipc_mqueue_release_peek_ref(ipc_mqueue_t mq) * queue is checked. If a message wasn't there before we entered here, no need * to find it (if we do, great). */ -static int mqueue_peek_iterator(void *ctx, struct waitq *waitq, - struct waitq_set *wqset) +static int +mqueue_peek_iterator(void *ctx, struct waitq *waitq, + struct waitq_set *wqset) { ipc_mqueue_t port_mq = (ipc_mqueue_t)waitq; ipc_kmsg_queue_t kmsgs = &port_mq->imq_messages; @@ -1450,9 +1499,9 @@ static int mqueue_peek_iterator(void *ctx, struct waitq *waitq, (void)ctx; (void)wqset; - if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) + if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) { return WQ_ITERATE_BREAK; /* break out of the prepost iteration */ - + } return WQ_ITERATE_CONTINUE; } @@ -1478,15 +1527,16 @@ ipc_mqueue_set_peek(ipc_mqueue_t mq) * as invalid. In that case, even though we don't have messages, we * have an end-of-life event to deliver. */ - if (!imq_is_valid(mq)) + if (!imq_is_valid(mq)) { return 1; + } ret = waitq_set_iterate_preposts(&mq->imq_set_queue, NULL, - mqueue_peek_iterator); + mqueue_peek_iterator); imq_unlock(mq); - return (ret == WQ_ITERATE_BREAK); + return ret == WQ_ITERATE_BREAK; } /* @@ -1548,8 +1598,9 @@ ipc_mqueue_set_gather_member_names( assert(IP_VALID(port)); if (ip_active(port) && waitq_member(&mq->imq_wait_queue, wqset)) { - if (actual < maxnames) + if (actual < maxnames) { names[actual] = mq->imq_receiver_name; + } actual++; } } @@ -1567,7 +1618,7 @@ out: * Purpose: * Destroy a (non-set) message queue. * Set any blocked senders running. - * Destroy the kmsgs in the queue. + * Destroy the kmsgs in the queue. * Conditions: * mqueue locked * Receivers were removed when the receive right was "changed" @@ -1591,9 +1642,9 @@ ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue) if (send_turnstile != TURNSTILE_NULL) { waitq_wakeup64_all(&send_turnstile->ts_waitq, - IPC_MQUEUE_FULL, - THREAD_RESTART, - WAITQ_ALL_PRIORITIES); + IPC_MQUEUE_FULL, + THREAD_RESTART, + WAITQ_ALL_PRIORITIES); } /* @@ -1603,13 +1654,15 @@ ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue) kmqueue = &mqueue->imq_messages; while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) { #if MACH_FLIPC - if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(mqueue->imq_fport)) - flipc_msg_ack(kmsg->ikm_node, mqueue, TRUE); + if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(mqueue->imq_fport)) { + flipc_msg_ack(kmsg->ikm_node, mqueue, TRUE); + } #endif boolean_t first; first = ipc_kmsg_delayed_destroy(kmsg); - if (first) + if (first) { reap = first; + } } /* @@ -1647,22 +1700,21 @@ ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue) void ipc_mqueue_set_qlimit( - ipc_mqueue_t mqueue, - mach_port_msgcount_t qlimit) + ipc_mqueue_t mqueue, + mach_port_msgcount_t qlimit) { + assert(qlimit <= MACH_PORT_QLIMIT_MAX); - assert(qlimit <= MACH_PORT_QLIMIT_MAX); - - /* wake up senders allowed by the new qlimit */ - imq_lock(mqueue); - if (qlimit > mqueue->imq_qlimit) { - mach_port_msgcount_t i, wakeup; - struct turnstile *send_turnstile = port_send_turnstile(ip_from_mq(mqueue)); + /* wake up senders allowed by the new qlimit */ + imq_lock(mqueue); + if (qlimit > mqueue->imq_qlimit) { + mach_port_msgcount_t i, wakeup; + struct turnstile *send_turnstile = port_send_turnstile(ip_from_mq(mqueue)); - /* caution: wakeup, qlimit are unsigned */ - wakeup = qlimit - mqueue->imq_qlimit; + /* caution: wakeup, qlimit are unsigned */ + wakeup = qlimit - mqueue->imq_qlimit; - for (i = 0; i < wakeup; i++) { + for (i = 0; i < wakeup; i++) { /* * boost the priority of the awoken thread * (WAITQ_PROMOTE_PRIORITY) to ensure it uses @@ -1672,14 +1724,14 @@ ipc_mqueue_set_qlimit( */ if (send_turnstile == TURNSTILE_NULL || waitq_wakeup64_one(&send_turnstile->ts_waitq, - IPC_MQUEUE_FULL, - THREAD_AWAKENED, - WAITQ_PROMOTE_PRIORITY) == KERN_NOT_WAITING) { + IPC_MQUEUE_FULL, + THREAD_AWAKENED, + WAITQ_PROMOTE_PRIORITY) == KERN_NOT_WAITING) { mqueue->imq_fullwaiters = FALSE; break; } mqueue->imq_msgcount++; /* give it to the awakened thread */ - } + } } mqueue->imq_qlimit = qlimit; imq_unlock(mqueue); @@ -1694,8 +1746,8 @@ ipc_mqueue_set_qlimit( */ void ipc_mqueue_set_seqno( - ipc_mqueue_t mqueue, - mach_port_seqno_t seqno) + ipc_mqueue_t mqueue, + mach_port_seqno_t seqno) { imq_lock(mqueue); mqueue->imq_seqno = seqno; @@ -1722,10 +1774,10 @@ ipc_mqueue_set_seqno( mach_msg_return_t ipc_mqueue_copyin( - ipc_space_t space, - mach_port_name_t name, - ipc_mqueue_t *mqueuep, - ipc_object_t *objectp) + ipc_space_t space, + mach_port_name_t name, + ipc_mqueue_t *mqueuep, + ipc_object_t *objectp) { ipc_entry_t entry; ipc_object_t object; @@ -1757,7 +1809,6 @@ ipc_mqueue_copyin( assert(port->ip_receiver == space); is_read_unlock(space); mqueue = &port->ip_messages; - } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) { ipc_pset_t pset; diff --git a/osfmk/ipc/ipc_mqueue.h b/osfmk/ipc/ipc_mqueue.h index 05d952ef3..140ce1dfd 100644 --- a/osfmk/ipc/ipc_mqueue.h +++ b/osfmk/ipc/ipc_mqueue.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Definitions for message queues. */ -#ifndef _IPC_IPC_MQUEUE_H_ +#ifndef _IPC_IPC_MQUEUE_H_ #define _IPC_IPC_MQUEUE_H_ #include @@ -84,18 +84,18 @@ typedef struct ipc_mqueue { union { struct { - struct waitq waitq; - struct ipc_kmsg_queue messages; - mach_port_seqno_t seqno; - mach_port_name_t receiver_name; - uint16_t msgcount; - uint16_t qlimit; + struct waitq waitq; + struct ipc_kmsg_queue messages; + mach_port_seqno_t seqno; + mach_port_name_t receiver_name; + uint16_t msgcount; + uint16_t qlimit; #if MACH_FLIPC - struct flipc_port *fport; // Null for local port, or ptr to flipc port + struct flipc_port *fport; // Null for local port, or ptr to flipc port #endif } port; struct { - struct waitq_set setq; + struct waitq_set setq; } pset; } data; union { @@ -104,7 +104,7 @@ typedef struct ipc_mqueue { }; } *ipc_mqueue_t; -#define IMQ_NULL ((ipc_mqueue_t) 0) +#define IMQ_NULL ((ipc_mqueue_t) 0) /* * When a receive right is in flight, before it can ever be registered with @@ -119,38 +119,38 @@ typedef struct ipc_mqueue { #define IMQ_INHERITOR(imq) ((struct turnstile *)((imq)->imq_inheritor ^ 1)) #define IMQ_SET_INHERITOR(imq, inheritor) \ MACRO_BEGIN \ - assert(((imq)->imq_inheritor & 1) || SLIST_EMPTY(&(imq)->imq_klist)); \ - ((imq)->imq_inheritor = (uintptr_t)(inheritor) | 1); \ + assert(((imq)->imq_inheritor & 1) || SLIST_EMPTY(&(imq)->imq_klist)); \ + ((imq)->imq_inheritor = (uintptr_t)(inheritor) | 1); \ MACRO_END -#define imq_wait_queue data.port.waitq -#define imq_messages data.port.messages -#define imq_msgcount data.port.msgcount -#define imq_qlimit data.port.qlimit -#define imq_seqno data.port.seqno -#define imq_receiver_name data.port.receiver_name +#define imq_wait_queue data.port.waitq +#define imq_messages data.port.messages +#define imq_msgcount data.port.msgcount +#define imq_qlimit data.port.qlimit +#define imq_seqno data.port.seqno +#define imq_receiver_name data.port.receiver_name #if MACH_FLIPC -#define imq_fport data.port.fport +#define imq_fport data.port.fport #endif /* * we can use the 'eventmask' bits of the waitq b/c * they are only used by global queues */ -#define imq_fullwaiters data.port.waitq.waitq_eventmask -#define imq_in_pset data.port.waitq.waitq_set_id -#define imq_preposts data.port.waitq.waitq_prepost_id +#define imq_fullwaiters data.port.waitq.waitq_eventmask +#define imq_in_pset data.port.waitq.waitq_set_id +#define imq_preposts data.port.waitq.waitq_prepost_id -#define imq_set_queue data.pset.setq -#define imq_is_set(mq) waitqs_is_set(&(mq)->imq_set_queue) -#define imq_is_queue(mq) waitq_is_queue(&(mq)->imq_wait_queue) -#define imq_is_valid(mq) waitq_is_valid(&(mq)->imq_wait_queue) +#define imq_set_queue data.pset.setq +#define imq_is_set(mq) waitqs_is_set(&(mq)->imq_set_queue) +#define imq_is_queue(mq) waitq_is_queue(&(mq)->imq_wait_queue) +#define imq_is_valid(mq) waitq_is_valid(&(mq)->imq_wait_queue) -#define imq_lock(mq) waitq_lock(&(mq)->imq_wait_queue) -#define imq_lock_try(mq) waitq_lock_try(&(mq)->imq_wait_queue) -#define imq_unlock(mq) waitq_unlock(&(mq)->imq_wait_queue) -#define imq_held(mq) waitq_held(&(mq)->imq_wait_queue) -#define imq_valid(mq) waitq_valid(&(mq)->imq_wait_queue) +#define imq_lock(mq) waitq_lock(&(mq)->imq_wait_queue) +#define imq_lock_try(mq) waitq_lock_try(&(mq)->imq_wait_queue) +#define imq_unlock(mq) waitq_unlock(&(mq)->imq_wait_queue) +#define imq_held(mq) waitq_held(&(mq)->imq_wait_queue) +#define imq_valid(mq) waitq_valid(&(mq)->imq_wait_queue) /* * Get an ipc_mqueue pointer from a waitq pointer. These are traditionally the @@ -158,31 +158,31 @@ MACRO_END * member positions - it should allow the waitq to move around in either the * port-set mqueue or the port mqueue independently. */ -#define imq_from_waitq(waitq) (waitq_is_set(waitq) ? \ - ((struct ipc_mqueue *)((void *)( \ - (uintptr_t)(waitq) - \ - __offsetof(struct ipc_mqueue, imq_set_queue)) \ - )) : \ - ((struct ipc_mqueue *)((void *)( \ - (uintptr_t)(waitq) - \ - __offsetof(struct ipc_mqueue, imq_wait_queue)) \ - )) \ - ) +#define imq_from_waitq(waitq) (waitq_is_set(waitq) ? \ + ((struct ipc_mqueue *)((void *)( \ + (uintptr_t)(waitq) - \ + __offsetof(struct ipc_mqueue, imq_set_queue)) \ + )) : \ + ((struct ipc_mqueue *)((void *)( \ + (uintptr_t)(waitq) - \ + __offsetof(struct ipc_mqueue, imq_wait_queue)) \ + )) \ + ) extern void imq_reserve_and_lock(ipc_mqueue_t mq, - uint64_t *reserved_prepost); + uint64_t *reserved_prepost); extern void imq_release_and_unlock(ipc_mqueue_t mq, - uint64_t reserved_prepost); + uint64_t reserved_prepost); -#define imq_full(mq) ((mq)->imq_msgcount >= (mq)->imq_qlimit) -#define imq_full_kernel(mq) ((mq)->imq_msgcount >= MACH_PORT_QLIMIT_KERNEL) +#define imq_full(mq) ((mq)->imq_msgcount >= (mq)->imq_qlimit) +#define imq_full_kernel(mq) ((mq)->imq_msgcount >= MACH_PORT_QLIMIT_KERNEL) extern int ipc_mqueue_full; // extern int ipc_mqueue_rcv; -#define IPC_MQUEUE_FULL CAST_EVENT64_T(&ipc_mqueue_full) -#define IPC_MQUEUE_RECEIVE NO_EVENT64 +#define IPC_MQUEUE_FULL CAST_EVENT64_T(&ipc_mqueue_full) +#define IPC_MQUEUE_RECEIVE NO_EVENT64 /* * Exported interfaces @@ -190,59 +190,60 @@ extern int ipc_mqueue_full; /* Initialize a newly-allocated message queue */ extern void ipc_mqueue_init( - ipc_mqueue_t mqueue, - boolean_t is_set); + ipc_mqueue_t mqueue, + boolean_t is_set); /* de-initialize / cleanup an mqueue (specifically waitq resources) */ extern void ipc_mqueue_deinit( - ipc_mqueue_t mqueue); + ipc_mqueue_t mqueue); /* destroy an mqueue */ extern boolean_t ipc_mqueue_destroy_locked( - ipc_mqueue_t mqueue); + ipc_mqueue_t mqueue); /* Wake up receivers waiting in a message queue */ extern void ipc_mqueue_changed( - ipc_mqueue_t mqueue); + ipc_space_t space, + ipc_mqueue_t mqueue); /* Add the specific mqueue as a member of the set */ extern kern_return_t ipc_mqueue_add( - ipc_mqueue_t mqueue, - ipc_mqueue_t set_mqueue, - uint64_t *reserved_link, - uint64_t *reserved_prepost); + ipc_mqueue_t mqueue, + ipc_mqueue_t set_mqueue, + uint64_t *reserved_link, + uint64_t *reserved_prepost); /* Check to see if mqueue is member of set_mqueue */ extern boolean_t ipc_mqueue_member( - ipc_mqueue_t mqueue, - ipc_mqueue_t set_mqueue); + ipc_mqueue_t mqueue, + ipc_mqueue_t set_mqueue); /* Remove an mqueue from a specific set */ extern kern_return_t ipc_mqueue_remove( - ipc_mqueue_t mqueue, - ipc_mqueue_t set_mqueue); + ipc_mqueue_t mqueue, + ipc_mqueue_t set_mqueue); /* Remove an mqueue from all sets */ extern void ipc_mqueue_remove_from_all( - ipc_mqueue_t mqueue); + ipc_mqueue_t mqueue); /* Remove all the members of the specifiied set */ extern void ipc_mqueue_remove_all( - ipc_mqueue_t mqueue); + ipc_mqueue_t mqueue); /* Send a message to a port */ extern mach_msg_return_t ipc_mqueue_send( - ipc_mqueue_t mqueue, - ipc_kmsg_t kmsg, - mach_msg_option_t option, + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg, + mach_msg_option_t option, mach_msg_timeout_t timeout_val); /* check for queue send queue full of a port */ extern mach_msg_return_t ipc_mqueue_preflight_send( - ipc_mqueue_t mqueue, - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_msg_timeout_t timeout_val); + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_msg_timeout_t timeout_val); /* Set a [send-possible] override on the mqueue */ extern void ipc_mqueue_override_send( @@ -251,21 +252,21 @@ extern void ipc_mqueue_override_send( /* Deliver message to message queue or waiting receiver */ extern void ipc_mqueue_post( - ipc_mqueue_t mqueue, - ipc_kmsg_t kmsg, - mach_msg_option_t option); + ipc_mqueue_t mqueue, + ipc_kmsg_t kmsg, + mach_msg_option_t option); /* Receive a message from a message queue */ extern void ipc_mqueue_receive( - ipc_mqueue_t mqueue, - mach_msg_option_t option, - mach_msg_size_t max_size, - mach_msg_timeout_t timeout_val, + ipc_mqueue_t mqueue, + mach_msg_option_t option, + mach_msg_size_t max_size, + mach_msg_timeout_t timeout_val, int interruptible); /* Receive a message from a message queue using a specified thread */ extern wait_result_t ipc_mqueue_receive_on_thread( - ipc_mqueue_t mqueue, + ipc_mqueue_t mqueue, mach_msg_option_t option, mach_msg_size_t max_size, mach_msg_timeout_t rcv_timeout, @@ -274,71 +275,71 @@ extern wait_result_t ipc_mqueue_receive_on_thread( /* Continuation routine for message receive */ extern void ipc_mqueue_receive_continue( - void *param, - wait_result_t wresult); + void *param, + wait_result_t wresult); /* Select a message from a queue and try to post it to ourself */ extern void ipc_mqueue_select_on_thread( - ipc_mqueue_t port_mq, - ipc_mqueue_t set_mq, - mach_msg_option_t option, - mach_msg_size_t max_size, + ipc_mqueue_t port_mq, + ipc_mqueue_t set_mq, + mach_msg_option_t option, + mach_msg_size_t max_size, thread_t thread); /* Peek into a messaqe queue to see if there are messages */ extern unsigned ipc_mqueue_peek( - ipc_mqueue_t mqueue, - mach_port_seqno_t *msg_seqnop, - mach_msg_size_t *msg_sizep, - mach_msg_id_t *msg_idp, - mach_msg_max_trailer_t *msg_trailerp, - ipc_kmsg_t *kmsgp); + ipc_mqueue_t mqueue, + mach_port_seqno_t *msg_seqnop, + mach_msg_size_t *msg_sizep, + mach_msg_id_t *msg_idp, + mach_msg_max_trailer_t *msg_trailerp, + ipc_kmsg_t *kmsgp); /* Peek into a locked messaqe queue to see if there are messages */ extern unsigned ipc_mqueue_peek_locked( - ipc_mqueue_t mqueue, - mach_port_seqno_t *msg_seqnop, - mach_msg_size_t *msg_sizep, - mach_msg_id_t *msg_idp, - mach_msg_max_trailer_t *msg_trailerp, - ipc_kmsg_t *kmsgp); + ipc_mqueue_t mqueue, + mach_port_seqno_t *msg_seqnop, + mach_msg_size_t *msg_sizep, + mach_msg_id_t *msg_idp, + mach_msg_max_trailer_t *msg_trailerp, + ipc_kmsg_t *kmsgp); /* Peek into a messaqe queue set to see if there are queues with messages */ extern unsigned ipc_mqueue_set_peek( - ipc_mqueue_t mqueue); + ipc_mqueue_t mqueue); /* Release an mqueue/port reference that was granted by MACH_PEEK_MSG */ extern void ipc_mqueue_release_peek_ref( - ipc_mqueue_t mqueue); + ipc_mqueue_t mqueue); /* Gather the names of member port for a given set */ extern void ipc_mqueue_set_gather_member_names( - ipc_space_t space, - ipc_mqueue_t set_mq, - ipc_entry_num_t maxnames, - mach_port_name_t *names, - ipc_entry_num_t *actualp); + ipc_space_t space, + ipc_mqueue_t set_mq, + ipc_entry_num_t maxnames, + mach_port_name_t *names, + ipc_entry_num_t *actualp); /* Clear a message count reservation */ extern void ipc_mqueue_release_msgcount( - ipc_mqueue_t port_mq, - ipc_mqueue_t set_mq); + ipc_mqueue_t port_mq, + ipc_mqueue_t set_mq); /* Change a queue limit */ extern void ipc_mqueue_set_qlimit( - ipc_mqueue_t mqueue, - mach_port_msgcount_t qlimit); + ipc_mqueue_t mqueue, + mach_port_msgcount_t qlimit); /* Change a queue's sequence number */ extern void ipc_mqueue_set_seqno( - ipc_mqueue_t mqueue, - mach_port_seqno_t seqno); + ipc_mqueue_t mqueue, + mach_port_seqno_t seqno); /* Convert a name in a space to a message queue */ extern mach_msg_return_t ipc_mqueue_copyin( - ipc_space_t space, - mach_port_name_t name, - ipc_mqueue_t *mqueuep, - ipc_object_t *objectp); + ipc_space_t space, + mach_port_name_t name, + ipc_mqueue_t *mqueuep, + ipc_object_t *objectp); -#endif /* _IPC_IPC_MQUEUE_H_ */ +#endif /* _IPC_IPC_MQUEUE_H_ */ diff --git a/osfmk/ipc/ipc_notify.c b/osfmk/ipc/ipc_notify.c index 44e6ed6d9..1730c5b41 100644 --- a/osfmk/ipc/ipc_notify.c +++ b/osfmk/ipc/ipc_notify.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -81,8 +81,8 @@ void ipc_notify_port_deleted( - ipc_port_t port, - mach_port_name_t name) + ipc_port_t port, + mach_port_name_t name) { (void)mach_notify_port_deleted(port, name); /* send-once right consumed */ @@ -99,8 +99,8 @@ ipc_notify_port_deleted( void ipc_notify_send_possible( - ipc_port_t port, - mach_port_name_t name) + ipc_port_t port, + mach_port_name_t name) { (void)mach_notify_send_possible(port, name); /* send-once right consumed */ @@ -120,8 +120,8 @@ ipc_notify_send_possible( void ipc_notify_port_destroyed( - ipc_port_t port, - ipc_port_t right) + ipc_port_t port, + ipc_port_t right) { mach_notify_port_destroyed(port, right); /* send-once and receive rights consumed */ @@ -138,8 +138,8 @@ ipc_notify_port_destroyed( void ipc_notify_no_senders( - ipc_port_t port, - mach_port_mscount_t mscount) + ipc_port_t port, + mach_port_mscount_t mscount) { (void)mach_notify_no_senders(port, mscount); /* send-once right consumed */ @@ -156,7 +156,7 @@ ipc_notify_no_senders( void ipc_notify_send_once( - ipc_port_t port) + ipc_port_t port) { ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_SR_NONE, FALSE); @@ -175,8 +175,8 @@ ipc_notify_send_once( void ipc_notify_dead_name( - ipc_port_t port, - mach_port_name_t name) + ipc_port_t port, + mach_port_name_t name) { (void)mach_notify_dead_name(port, name); /* send-once right consumed */ diff --git a/osfmk/ipc/ipc_notify.h b/osfmk/ipc/ipc_notify.h index 0d87a6ec0..4147cf58d 100644 --- a/osfmk/ipc/ipc_notify.h +++ b/osfmk/ipc/ipc_notify.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,40 +63,40 @@ * Declarations of notification-sending functions. */ -#ifndef _IPC_IPC_NOTIFY_H_ +#ifndef _IPC_IPC_NOTIFY_H_ #define _IPC_IPC_NOTIFY_H_ /* - * Exported interfaces + * Exported interfaces */ /* Send a port-deleted notification */ extern void ipc_notify_port_deleted( - ipc_port_t port, - mach_port_name_t name); + ipc_port_t port, + mach_port_name_t name); /* Send a send-possible notification */ extern void ipc_notify_send_possible( - ipc_port_t port, - mach_port_name_t name); + ipc_port_t port, + mach_port_name_t name); /* Send a port-destroyed notification */ extern void ipc_notify_port_destroyed( - ipc_port_t port, - ipc_port_t right); + ipc_port_t port, + ipc_port_t right); /* Send a no-senders notification */ extern void ipc_notify_no_senders( - ipc_port_t port, - mach_port_mscount_t mscount); + ipc_port_t port, + mach_port_mscount_t mscount); /* Send a send-once notification */ extern void ipc_notify_send_once( - ipc_port_t port); + ipc_port_t port); /* Send a dead-name notification */ extern void ipc_notify_dead_name( - ipc_port_t port, - mach_port_name_t name); + ipc_port_t port, + mach_port_name_t name); -#endif /* _IPC_IPC_NOTIFY_H_ */ +#endif /* _IPC_IPC_NOTIFY_H_ */ diff --git a/osfmk/ipc/ipc_object.c b/osfmk/ipc/ipc_object.c index 6b40e4761..27a1cca4a 100644 --- a/osfmk/ipc/ipc_object.c +++ b/osfmk/ipc/ipc_object.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -104,7 +104,7 @@ zone_t ipc_object_zones[IOT_NUMBER]; void ipc_object_reference( - ipc_object_t object) + ipc_object_t object) { io_reference(object); } @@ -117,7 +117,7 @@ ipc_object_reference( void ipc_object_release( - ipc_object_t object) + ipc_object_t object) { io_release(object); } @@ -137,18 +137,19 @@ ipc_object_release( */ kern_return_t ipc_object_translate( - ipc_space_t space, - mach_port_name_t name, - mach_port_right_t right, - ipc_object_t *objectp) + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + ipc_object_t *objectp) { ipc_entry_t entry; ipc_object_t object; kern_return_t kr; kr = ipc_right_lookup_read(space, name, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is read-locked and active */ if ((entry->ie_bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) { @@ -182,13 +183,13 @@ ipc_object_translate( kern_return_t ipc_object_translate_two( - ipc_space_t space, - mach_port_name_t name1, - mach_port_right_t right1, - ipc_object_t *objectp1, - mach_port_name_t name2, - mach_port_right_t right2, - ipc_object_t *objectp2) + ipc_space_t space, + mach_port_name_t name1, + mach_port_right_t right1, + ipc_object_t *objectp1, + mach_port_name_t name2, + mach_port_right_t right2, + ipc_object_t *objectp2) { ipc_entry_t entry1; ipc_entry_t entry2; @@ -196,8 +197,9 @@ ipc_object_translate_two( kern_return_t kr; kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is read-locked and active */ if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) { @@ -241,15 +243,16 @@ ipc_object_translate_two( kern_return_t ipc_object_alloc_dead( - ipc_space_t space, - mach_port_name_t *namep) + ipc_space_t space, + mach_port_name_t *namep) { ipc_entry_t entry; kern_return_t kr; kr = ipc_entry_alloc(space, namep, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is write-locked */ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */ @@ -276,19 +279,21 @@ ipc_object_alloc_dead( kern_return_t ipc_object_alloc_dead_name( - ipc_space_t space, - mach_port_name_t name) + ipc_space_t space, + mach_port_name_t name) { ipc_entry_t entry; kern_return_t kr; kr = ipc_entry_alloc_name(space, name, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is write-locked */ - if (ipc_right_inuse(space, name, entry)) + if (ipc_right_inuse(space, name, entry)) { return KERN_NAME_EXISTS; + } /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */ @@ -305,7 +310,7 @@ ipc_object_alloc_dead_name( * Allocate an object. * Conditions: * Nothing locked. If successful, the object is returned locked. - * The space is write locked on successful return. + * The space is write locked on successful return. * The caller doesn't get a reference for the object. * Returns: * KERN_SUCCESS The object is allocated. @@ -316,12 +321,12 @@ ipc_object_alloc_dead_name( kern_return_t ipc_object_alloc( - ipc_space_t space, - ipc_object_type_t otype, - mach_port_type_t type, - mach_port_urefs_t urefs, - mach_port_name_t *namep, - ipc_object_t *objectp) + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t *namep, + ipc_object_t *objectp) { ipc_object_t object; ipc_entry_t entry; @@ -333,8 +338,9 @@ ipc_object_alloc( assert(urefs <= MACH_PORT_UREFS_MAX); object = io_alloc(otype); - if (object == IO_NULL) + if (object == IO_NULL) { return KERN_RESOURCE_SHORTAGE; + } if (otype == IOT_PORT) { ipc_port_t port = (ipc_port_t)object; @@ -384,12 +390,12 @@ ipc_object_alloc( kern_return_t ipc_object_alloc_name( - ipc_space_t space, - ipc_object_type_t otype, - mach_port_type_t type, - mach_port_urefs_t urefs, - mach_port_name_t name, - ipc_object_t *objectp) + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t name, + ipc_object_t *objectp) { ipc_object_t object; ipc_entry_t entry; @@ -401,8 +407,9 @@ ipc_object_alloc_name( assert(urefs <= MACH_PORT_UREFS_MAX); object = io_alloc(otype); - if (object == IO_NULL) + if (object == IO_NULL) { return KERN_RESOURCE_SHORTAGE; + } if (otype == IOT_PORT) { ipc_port_t port = (ipc_port_t)object; @@ -449,27 +456,26 @@ ipc_object_alloc_name( mach_msg_type_name_t ipc_object_copyin_type( - mach_msg_type_name_t msgt_name) + mach_msg_type_name_t msgt_name) { switch (msgt_name) { - - case MACH_MSG_TYPE_MOVE_RECEIVE: + case MACH_MSG_TYPE_MOVE_RECEIVE: return MACH_MSG_TYPE_PORT_RECEIVE; - case MACH_MSG_TYPE_MOVE_SEND_ONCE: - case MACH_MSG_TYPE_MAKE_SEND_ONCE: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: + case MACH_MSG_TYPE_MAKE_SEND_ONCE: return MACH_MSG_TYPE_PORT_SEND_ONCE; - case MACH_MSG_TYPE_MOVE_SEND: - case MACH_MSG_TYPE_MAKE_SEND: - case MACH_MSG_TYPE_COPY_SEND: + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MAKE_SEND: + case MACH_MSG_TYPE_COPY_SEND: return MACH_MSG_TYPE_PORT_SEND; - case MACH_MSG_TYPE_DISPOSE_RECEIVE: - case MACH_MSG_TYPE_DISPOSE_SEND: - case MACH_MSG_TYPE_DISPOSE_SEND_ONCE: - /* fall thru */ - default: + case MACH_MSG_TYPE_DISPOSE_RECEIVE: + case MACH_MSG_TYPE_DISPOSE_SEND: + case MACH_MSG_TYPE_DISPOSE_SEND_ONCE: + /* fall thru */ + default: return MACH_MSG_TYPE_PORT_NONE; } } @@ -491,10 +497,10 @@ ipc_object_copyin_type( kern_return_t ipc_object_copyin( - ipc_space_t space, - mach_port_name_t name, - mach_msg_type_name_t msgt_name, - ipc_object_t *objectp) + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_object_t *objectp) { ipc_entry_t entry; ipc_port_t soright; @@ -509,18 +515,20 @@ ipc_object_copyin( */ kr = ipc_right_lookup_write(space, name, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is write-locked and active */ release_port = IP_NULL; kr = ipc_right_copyin(space, name, entry, - msgt_name, TRUE, - objectp, &soright, - &release_port, - &assertcnt); - if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + msgt_name, IPC_RIGHT_COPYIN_FLAGS_DEADOK, + objectp, &soright, + &release_port, + &assertcnt); + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) { ipc_entry_dealloc(space, name, entry); + } is_write_unlock(space); #if IMPORTANCE_INHERITANCE @@ -529,11 +537,13 @@ ipc_object_copyin( } #endif /* IMPORTANCE_INHERITANCE */ - if (release_port != IP_NULL) + if (release_port != IP_NULL) { ip_release(release_port); + } - if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) + if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) { ipc_notify_port_deleted(soright, name); + } return kr; } @@ -568,13 +578,13 @@ ipc_object_copyin( void ipc_object_copyin_from_kernel( - ipc_object_t object, - mach_msg_type_name_t msgt_name) + ipc_object_t object, + mach_msg_type_name_t msgt_name) { assert(IO_VALID(object)); switch (msgt_name) { - case MACH_MSG_TYPE_MOVE_RECEIVE: { + case MACH_MSG_TYPE_MOVE_RECEIVE: { ipc_port_t port = (ipc_port_t) object; ip_lock(port); @@ -592,9 +602,9 @@ ipc_object_copyin_from_kernel( imq_unlock(&port->ip_messages); ip_unlock(port); break; - } + } - case MACH_MSG_TYPE_COPY_SEND: { + case MACH_MSG_TYPE_COPY_SEND: { ipc_port_t port = (ipc_port_t) object; ip_lock(port); @@ -605,16 +615,16 @@ ipc_object_copyin_from_kernel( ip_reference(port); ip_unlock(port); break; - } + } - case MACH_MSG_TYPE_MAKE_SEND: { + case MACH_MSG_TYPE_MAKE_SEND: { ipc_port_t port = (ipc_port_t) object; ip_lock(port); if (ip_active(port)) { assert(port->ip_receiver_name != MACH_PORT_NULL); assert((port->ip_receiver == ipc_space_kernel) || - (port->ip_receiver->is_node_id != HOST_LOCAL_NODE)); + (port->ip_receiver->is_node_id != HOST_LOCAL_NODE)); port->ip_mscount++; } @@ -622,15 +632,15 @@ ipc_object_copyin_from_kernel( ip_reference(port); ip_unlock(port); break; - } + } - case MACH_MSG_TYPE_MOVE_SEND: { + case MACH_MSG_TYPE_MOVE_SEND: { /* move naked send right into the message */ assert(((ipc_port_t)object)->ip_srights); break; - } + } - case MACH_MSG_TYPE_MAKE_SEND_ONCE: { + case MACH_MSG_TYPE_MAKE_SEND_ONCE: { ipc_port_t port = (ipc_port_t) object; ip_lock(port); @@ -641,15 +651,15 @@ ipc_object_copyin_from_kernel( ip_reference(port); ip_unlock(port); break; - } + } - case MACH_MSG_TYPE_MOVE_SEND_ONCE: { + case MACH_MSG_TYPE_MOVE_SEND_ONCE: { /* move naked send-once right into the message */ - assert(((ipc_port_t)object)->ip_sorights); + assert(((ipc_port_t)object)->ip_sorights); break; - } + } - default: + default: panic("ipc_object_copyin_from_kernel: strange rights"); } } @@ -667,26 +677,26 @@ ipc_object_copyin_from_kernel( void ipc_object_destroy( - ipc_object_t object, - mach_msg_type_name_t msgt_name) + ipc_object_t object, + mach_msg_type_name_t msgt_name) { assert(IO_VALID(object)); assert(io_otype(object) == IOT_PORT); switch (msgt_name) { - case MACH_MSG_TYPE_PORT_SEND: + case MACH_MSG_TYPE_PORT_SEND: ipc_port_release_send((ipc_port_t) object); break; - case MACH_MSG_TYPE_PORT_SEND_ONCE: + case MACH_MSG_TYPE_PORT_SEND_ONCE: ipc_notify_send_once((ipc_port_t) object); break; - case MACH_MSG_TYPE_PORT_RECEIVE: + case MACH_MSG_TYPE_PORT_RECEIVE: ipc_port_release_receive((ipc_port_t) object); break; - default: + default: panic("ipc_object_destroy: strange rights"); } } @@ -703,26 +713,27 @@ ipc_object_destroy( void ipc_object_destroy_dest( - ipc_object_t object, - mach_msg_type_name_t msgt_name) + ipc_object_t object, + mach_msg_type_name_t msgt_name) { assert(IO_VALID(object)); assert(io_otype(object) == IOT_PORT); switch (msgt_name) { - case MACH_MSG_TYPE_PORT_SEND: + case MACH_MSG_TYPE_PORT_SEND: ipc_port_release_send((ipc_port_t) object); break; - case MACH_MSG_TYPE_PORT_SEND_ONCE: - if (io_active(object) && - !ip_full_kernel((ipc_port_t) object)) + case MACH_MSG_TYPE_PORT_SEND_ONCE: + if (io_active(object) && + !ip_full_kernel((ipc_port_t) object)) { ipc_notify_send_once((ipc_port_t) object); - else + } else { ipc_port_release_sonce((ipc_port_t) object); + } break; - default: + default: panic("ipc_object_destroy_dest: strange rights"); } } @@ -746,11 +757,11 @@ ipc_object_destroy_dest( kern_return_t ipc_object_copyout( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - boolean_t overflow, - mach_port_name_t *namep) + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t *namep) { struct knote *kn = current_thread()->ith_knote; mach_port_name_t name; @@ -762,7 +773,7 @@ ipc_object_copyout( if (ITH_KNOTE_VALID(kn, msgt_name)) { filt_machport_turnstile_prepare_lazily(kn, - msgt_name, (ipc_port_t)object); + msgt_name, (ipc_port_t)object); } is_write_lock(space); @@ -774,7 +785,7 @@ ipc_object_copyout( } if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) && - ipc_right_reverse(space, object, &name, &entry)) { + ipc_right_reverse(space, object, &name, &entry)) { /* object is locked and active */ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE); @@ -787,9 +798,9 @@ ipc_object_copyout( /* unlocks/locks space, so must start again */ kr = ipc_entry_grow_table(space, ITS_SIZE_NONE); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; /* space is unlocked */ - + } continue; } @@ -811,13 +822,14 @@ ipc_object_copyout( /* space is write-locked and active, object is locked and active */ kr = ipc_right_copyout(space, name, entry, - msgt_name, overflow, object); + msgt_name, overflow, object); /* object is unlocked */ is_write_unlock(space); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { *namep = name; + } return kr; } @@ -842,11 +854,11 @@ ipc_object_copyout( kern_return_t ipc_object_copyout_name( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - boolean_t overflow, - mach_port_name_t name) + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t name) { mach_port_name_t oname; ipc_entry_t oentry; @@ -864,12 +876,13 @@ ipc_object_copyout_name( if (ITH_KNOTE_VALID(kn, msgt_name)) { filt_machport_turnstile_prepare_lazily(kn, - msgt_name, (ipc_port_t)object); + msgt_name, (ipc_port_t)object); } kr = ipc_entry_alloc_name(space, name, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is write-locked and active */ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) && @@ -879,8 +892,9 @@ ipc_object_copyout_name( if (name != oname) { io_unlock(object); - if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) + if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE) { ipc_entry_dealloc(space, name, entry); + } is_write_unlock(space); return KERN_RIGHT_EXISTS; @@ -889,8 +903,9 @@ ipc_object_copyout_name( assert(entry == oentry); assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE); } else { - if (ipc_right_inuse(space, name, entry)) + if (ipc_right_inuse(space, name, entry)) { return KERN_NAME_EXISTS; + } assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE); assert(entry->ie_object == IO_NULL); @@ -936,7 +951,7 @@ ipc_object_copyout_name( #endif /* IMPORTANCE_INHERITANCE */ kr = ipc_right_copyout(space, name, entry, - msgt_name, overflow, object); + msgt_name, overflow, object); /* object is unlocked */ is_write_unlock(space); @@ -969,10 +984,10 @@ ipc_object_copyout_name( void ipc_object_copyout_dest( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - mach_port_name_t *namep) + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep) { mach_port_name_t name; @@ -989,15 +1004,16 @@ ipc_object_copyout_dest( */ switch (msgt_name) { - case MACH_MSG_TYPE_PORT_SEND: { + case MACH_MSG_TYPE_PORT_SEND: { ipc_port_t port = (ipc_port_t) object; ipc_port_t nsrequest = IP_NULL; mach_port_mscount_t mscount; - if (port->ip_receiver == space) + if (port->ip_receiver == space) { name = port->ip_receiver_name; - else + } else { name = MACH_PORT_NULL; + } assert(port->ip_srights > 0); if (--port->ip_srights == 0 && @@ -1007,12 +1023,13 @@ ipc_object_copyout_dest( mscount = port->ip_mscount; ip_unlock(port); ipc_notify_no_senders(nsrequest, mscount); - } else + } else { ip_unlock(port); + } break; - } + } - case MACH_MSG_TYPE_PORT_SEND_ONCE: { + case MACH_MSG_TYPE_PORT_SEND_ONCE: { ipc_port_t port = (ipc_port_t) object; assert(port->ip_sorights > 0); @@ -1041,9 +1058,9 @@ ipc_object_copyout_dest( } break; - } + } - default: + default: panic("ipc_object_copyout_dest: strange rights"); name = MACH_PORT_DEAD; } @@ -1067,16 +1084,17 @@ ipc_object_copyout_dest( kern_return_t ipc_object_rename( - ipc_space_t space, - mach_port_name_t oname, - mach_port_name_t nname) + ipc_space_t space, + mach_port_name_t oname, + mach_port_name_t nname) { ipc_entry_t oentry, nentry; kern_return_t kr; - + kr = ipc_entry_alloc_name(space, nname, &nentry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is write-locked and active */ @@ -1105,10 +1123,10 @@ ipc_object_rename( */ void io_free( - unsigned int otype, - ipc_object_t object) + unsigned int otype, + ipc_object_t object) { - ipc_port_t port; + ipc_port_t port; if (otype == IOT_PORT) { port = (ipc_port_t) object; diff --git a/osfmk/ipc/ipc_object.h b/osfmk/ipc/ipc_object.h index 17e5abc02..2e23f5681 100644 --- a/osfmk/ipc/ipc_object.h +++ b/osfmk/ipc/ipc_object.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -69,7 +69,7 @@ * Definitions for IPC objects, for which tasks have capabilities. */ -#ifndef _IPC_IPC_OBJECT_H_ +#ifndef _IPC_IPC_OBJECT_H_ #define _IPC_IPC_OBJECT_H_ #include @@ -81,7 +81,7 @@ #include #include -typedef natural_t ipc_object_refs_t; /* for ipc/ipc_object.h */ +typedef natural_t ipc_object_refs_t; /* for ipc/ipc_object.h */ typedef natural_t ipc_object_bits_t; typedef natural_t ipc_object_type_t; @@ -90,7 +90,7 @@ typedef natural_t ipc_object_type_t; * structures, and (Noto Bene!) pointers to either of these or the * ipc_object at the head of these are freely cast back and forth; hence * the ipc_object MUST BE FIRST in the ipc_common_data. - * + * * If the RPC implementation enabled user-mode code to use kernel-level * data structures (as ours used to), this peculiar structuring would * avoid having anything in user code depend on the kernel configuration @@ -99,7 +99,7 @@ typedef natural_t ipc_object_type_t; struct ipc_object { ipc_object_bits_t io_bits; ipc_object_refs_t io_references; - lck_spin_t io_lock_data; + lck_spin_t io_lock_data; }; /* @@ -117,9 +117,9 @@ struct ipc_object_header { /* * Legacy defines. Should use IPC_OBJECT_NULL, etc... */ -#define IO_NULL ((ipc_object_t) 0) -#define IO_DEAD ((ipc_object_t) ~0UL) -#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD)) +#define IO_NULL ((ipc_object_t) 0) +#define IO_DEAD ((ipc_object_t) ~0UL) +#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD)) /* * IPC steals the high-order bits from the kotype to use @@ -130,34 +130,34 @@ struct ipc_object_header { * to IO_BITS_PORT_INFO must be coordinated with bitfield * definitions in ipc_port.h. */ -#define IO_BITS_PORT_INFO 0x0000f000 /* stupid port tricks */ -#define IO_BITS_KOTYPE 0x00000fff /* used by the object */ -#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */ -#define IO_BITS_ACTIVE 0x80000000 /* is object alive? */ +#define IO_BITS_PORT_INFO 0x0000f000 /* stupid port tricks */ +#define IO_BITS_KOTYPE 0x00000fff /* used by the object */ +#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */ +#define IO_BITS_ACTIVE 0x80000000 /* is object alive? */ -#define io_active(io) (((io)->io_bits & IO_BITS_ACTIVE) != 0) +#define io_active(io) (((io)->io_bits & IO_BITS_ACTIVE) != 0) -#define io_otype(io) (((io)->io_bits & IO_BITS_OTYPE) >> 16) -#define io_kotype(io) ((io)->io_bits & IO_BITS_KOTYPE) +#define io_otype(io) (((io)->io_bits & IO_BITS_OTYPE) >> 16) +#define io_kotype(io) ((io)->io_bits & IO_BITS_KOTYPE) -#define io_makebits(active, otype, kotype) \ +#define io_makebits(active, otype, kotype) \ (((active) ? IO_BITS_ACTIVE : 0) | ((otype) << 16) | (kotype)) /* * Object types: ports, port sets, kernel-loaded ports */ -#define IOT_PORT 0 -#define IOT_PORT_SET 1 -#define IOT_NUMBER 2 /* number of types used */ +#define IOT_PORT 0 +#define IOT_PORT_SET 1 +#define IOT_NUMBER 2 /* number of types used */ extern zone_t ipc_object_zones[IOT_NUMBER]; -#define io_alloc(otype) \ - ((ipc_object_t) zalloc(ipc_object_zones[(otype)])) +#define io_alloc(otype) \ + ((ipc_object_t) zalloc(ipc_object_zones[(otype)])) -extern void io_free( - unsigned int otype, - ipc_object_t object); +extern void io_free( + unsigned int otype, + ipc_object_t object); /* * Here we depend on the ipc_object being first within the kernel struct @@ -167,13 +167,13 @@ extern void io_free( lck_spin_init(&(io)->io_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define io_lock_destroy(io) \ lck_spin_destroy(&(io)->io_lock_data, &ipc_lck_grp) -#define io_lock(io) \ - lck_spin_lock(&(io)->io_lock_data) -#define io_lock_try(io) \ - lck_spin_try_lock(&(io)->io_lock_data) +#define io_lock(io) \ + lck_spin_lock_grp(&(io)->io_lock_data, &ipc_lck_grp) +#define io_lock_try(io) \ + lck_spin_try_lock_grp(&(io)->io_lock_data, &ipc_lck_grp) #define io_lock_held_kdp(io) \ kdp_lck_spin_is_acquired(&(io)->io_lock_data) -#define io_unlock(io) \ +#define io_unlock(io) \ lck_spin_unlock(&(io)->io_lock_data) #define _VOLATILE_ volatile @@ -190,11 +190,12 @@ extern void io_free( * structs, because the io_references field is the first word of the struct, * and zfree modifies that to point to the next free zone element. */ -#define IO_MAX_REFERENCES \ +#define IO_MAX_REFERENCES \ (unsigned)(~0 ^ (1 << (sizeof(int)*BYTE_SIZE - 1))) static inline void -io_reference(ipc_object_t io) { +io_reference(ipc_object_t io) +{ ipc_object_refs_t new_io_references; ipc_object_refs_t old_io_references; @@ -208,12 +209,13 @@ io_reference(ipc_object_t io) { break; } } while (OSCompareAndSwap(old_io_references, new_io_references, - &((io)->io_references)) == FALSE); + &((io)->io_references)) == FALSE); } static inline void -io_release(ipc_object_t io) { +io_release(ipc_object_t io) +{ ipc_object_refs_t new_io_references; ipc_object_refs_t old_io_references; @@ -227,9 +229,9 @@ io_release(ipc_object_t io) { break; } } while (OSCompareAndSwap(old_io_references, new_io_references, - &((io)->io_references)) == FALSE); + &((io)->io_references)) == FALSE); - /* If we just removed the last reference count */ + /* If we just removed the last reference count */ if (1 == old_io_references) { /* Free the object */ io_free(io_otype((io)), (io)); @@ -243,7 +245,7 @@ io_release(ipc_object_t io) { */ struct label; -extern struct label *io_getlabel (ipc_object_t obj); +extern struct label *io_getlabel(ipc_object_t obj); #define io_unlocklabel(obj) /* @@ -252,111 +254,111 @@ extern struct label *io_getlabel (ipc_object_t obj); /* Take a reference to an object */ extern void ipc_object_reference( - ipc_object_t object); + ipc_object_t object); /* Release a reference to an object */ extern void ipc_object_release( - ipc_object_t object); + ipc_object_t object); /* Look up an object in a space */ extern kern_return_t ipc_object_translate( - ipc_space_t space, - mach_port_name_t name, - mach_port_right_t right, - ipc_object_t *objectp); + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + ipc_object_t *objectp); /* Look up two objects in a space, locking them in the order described */ extern kern_return_t ipc_object_translate_two( - ipc_space_t space, - mach_port_name_t name1, - mach_port_right_t right1, - ipc_object_t *objectp1, - mach_port_name_t name2, - mach_port_right_t right2, - ipc_object_t *objectp2); + ipc_space_t space, + mach_port_name_t name1, + mach_port_right_t right1, + ipc_object_t *objectp1, + mach_port_name_t name2, + mach_port_right_t right2, + ipc_object_t *objectp2); /* Allocate a dead-name entry */ extern kern_return_t ipc_object_alloc_dead( - ipc_space_t space, - mach_port_name_t *namep); + ipc_space_t space, + mach_port_name_t *namep); /* Allocate a dead-name entry, with a specific name */ extern kern_return_t ipc_object_alloc_dead_name( - ipc_space_t space, - mach_port_name_t name); + ipc_space_t space, + mach_port_name_t name); /* Allocate an object */ extern kern_return_t ipc_object_alloc( - ipc_space_t space, - ipc_object_type_t otype, - mach_port_type_t type, - mach_port_urefs_t urefs, - mach_port_name_t *namep, - ipc_object_t *objectp); + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t *namep, + ipc_object_t *objectp); /* Allocate an object, with a specific name */ extern kern_return_t ipc_object_alloc_name( - ipc_space_t space, - ipc_object_type_t otype, - mach_port_type_t type, - mach_port_urefs_t urefs, - mach_port_name_t name, - ipc_object_t *objectp); + ipc_space_t space, + ipc_object_type_t otype, + mach_port_type_t type, + mach_port_urefs_t urefs, + mach_port_name_t name, + ipc_object_t *objectp); /* Convert a send type name to a received type name */ extern mach_msg_type_name_t ipc_object_copyin_type( - mach_msg_type_name_t msgt_name); + mach_msg_type_name_t msgt_name); /* Copyin a capability from a space */ extern kern_return_t ipc_object_copyin( - ipc_space_t space, - mach_port_name_t name, - mach_msg_type_name_t msgt_name, - ipc_object_t *objectp); + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_object_t *objectp); /* Copyin a naked capability from the kernel */ extern void ipc_object_copyin_from_kernel( - ipc_object_t object, - mach_msg_type_name_t msgt_name); + ipc_object_t object, + mach_msg_type_name_t msgt_name); /* Destroy a naked capability */ extern void ipc_object_destroy( - ipc_object_t object, - mach_msg_type_name_t msgt_name); + ipc_object_t object, + mach_msg_type_name_t msgt_name); /* Destroy a naked destination capability */ extern void ipc_object_destroy_dest( - ipc_object_t object, - mach_msg_type_name_t msgt_name); + ipc_object_t object, + mach_msg_type_name_t msgt_name); /* Copyout a capability, placing it into a space */ extern kern_return_t ipc_object_copyout( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - boolean_t overflow, - mach_port_name_t *namep); + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t *namep); /* Copyout a capability with a name, placing it into a space */ extern kern_return_t ipc_object_copyout_name( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - boolean_t overflow, - mach_port_name_t name); + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + mach_port_name_t name); /* Translate/consume the destination right of a message */ extern void ipc_object_copyout_dest( - ipc_space_t space, - ipc_object_t object, - mach_msg_type_name_t msgt_name, - mach_port_name_t *namep); + ipc_space_t space, + ipc_object_t object, + mach_msg_type_name_t msgt_name, + mach_port_name_t *namep); /* Rename an entry in a space */ extern kern_return_t ipc_object_rename( - ipc_space_t space, - mach_port_name_t oname, - mach_port_name_t nname); + ipc_space_t space, + mach_port_name_t oname, + mach_port_name_t nname); -#endif /* _IPC_IPC_OBJECT_H_ */ +#endif /* _IPC_IPC_OBJECT_H_ */ diff --git a/osfmk/ipc/ipc_port.c b/osfmk/ipc/ipc_port.c index 823abe3a7..ee9a7571e 100644 --- a/osfmk/ipc/ipc_port.c +++ b/osfmk/ipc/ipc_port.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -98,21 +98,21 @@ #include -decl_lck_spin_data(, ipc_port_multiple_lock_data) -ipc_port_timestamp_t ipc_port_timestamp_data; +decl_lck_spin_data(, ipc_port_multiple_lock_data) +ipc_port_timestamp_t ipc_port_timestamp_data; int ipc_portbt; -#if MACH_ASSERT -void ipc_port_init_debug( - ipc_port_t port, - uintptr_t *callstack, - unsigned int callstack_max); +#if MACH_ASSERT +void ipc_port_init_debug( + ipc_port_t port, + uintptr_t *callstack, + unsigned int callstack_max); + +void ipc_port_callstack_init_debug( + uintptr_t *callstack, + unsigned int callstack_max); -void ipc_port_callstack_init_debug( - uintptr_t *callstack, - unsigned int callstack_max); - -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ void ipc_port_release(ipc_port_t port) @@ -154,22 +154,22 @@ ipc_port_timestamp(void) #if IMPORTANCE_INHERITANCE kern_return_t ipc_port_request_alloc( - ipc_port_t port, - mach_port_name_t name, - ipc_port_t soright, - boolean_t send_possible, - boolean_t immediate, - ipc_port_request_index_t *indexp, - boolean_t *importantp) + ipc_port_t port, + mach_port_name_t name, + ipc_port_t soright, + boolean_t send_possible, + boolean_t immediate, + ipc_port_request_index_t *indexp, + boolean_t *importantp) #else kern_return_t ipc_port_request_alloc( - ipc_port_t port, - mach_port_name_t name, - ipc_port_t soright, - boolean_t send_possible, - boolean_t immediate, - ipc_port_request_index_t *indexp) + ipc_port_t port, + mach_port_name_t name, + ipc_port_t soright, + boolean_t send_possible, + boolean_t immediate, + ipc_port_request_index_t *indexp) #endif /* IMPORTANCE_INHERITANCE */ { ipc_port_request_t ipr, table; @@ -186,19 +186,21 @@ ipc_port_request_alloc( table = port->ip_requests; - if (table == IPR_NULL) + if (table == IPR_NULL) { return KERN_NO_SPACE; + } index = table->ipr_next; - if (index == 0) + if (index == 0) { return KERN_NO_SPACE; + } ipr = &table[index]; assert(ipr->ipr_name == MACH_PORT_NULL); table->ipr_next = ipr->ipr_next; ipr->ipr_name = name; - + if (send_possible) { mask |= IPR_SOR_SPREQ_MASK; if (immediate) { @@ -241,8 +243,8 @@ ipc_port_request_alloc( kern_return_t ipc_port_request_grow( - ipc_port_t port, - ipc_table_elems_t target_size) + ipc_port_t port, + ipc_table_elems_t target_size) { ipc_table_size_t its; ipc_port_request_t otable, ntable; @@ -250,17 +252,18 @@ ipc_port_request_grow( assert(ip_active(port)); otable = port->ip_requests; - if (otable == IPR_NULL) + if (otable == IPR_NULL) { its = &ipc_table_requests[0]; - else + } else { its = otable->ipr_size + 1; + } if (target_size != ITS_SIZE_NONE) { if ((otable != IPR_NULL) && (target_size <= otable->ipr_size->its_size)) { ip_unlock(port); return KERN_SUCCESS; - } + } while ((its->its_size) && (its->its_size < target_size)) { its++; } @@ -289,7 +292,7 @@ ipc_port_request_grow( */ if (ip_active(port) && (port->ip_requests == otable) && - ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) { + ((otable == IPR_NULL) || (otable->ipr_size + 1 == its))) { ipc_table_size_t oits; ipc_table_elems_t osize, nsize; ipc_port_request_index_t free, i; @@ -302,8 +305,8 @@ ipc_port_request_grow( free = otable->ipr_next; (void) memcpy((void *)(ntable + 1), - (const void *)(otable + 1), - (osize - 1) * sizeof(struct ipc_port_request)); + (const void *)(otable + 1), + (osize - 1) * sizeof(struct ipc_port_request)); } else { osize = 1; oits = 0; @@ -331,7 +334,7 @@ ipc_port_request_grow( if (otable != IPR_NULL) { it_requests_free(oits, otable); - } + } } else { ip_unlock(port); ip_release(port); @@ -340,7 +343,7 @@ ipc_port_request_grow( return KERN_SUCCESS; } - + /* * Routine: ipc_port_request_sparm * Purpose: @@ -354,9 +357,9 @@ ipc_port_request_grow( boolean_t ipc_port_request_sparm( - ipc_port_t port, - __assert_only mach_port_name_t name, - ipc_port_request_index_t index, + ipc_port_t port, + __assert_only mach_port_name_t name, + ipc_port_request_index_t index, mach_msg_option_t option, mach_msg_priority_t override) { @@ -364,7 +367,7 @@ ipc_port_request_sparm( ipc_port_request_t ipr, table; assert(ip_active(port)); - + table = port->ip_requests; assert(table != IPR_NULL); @@ -386,13 +389,13 @@ ipc_port_request_sparm( (port->ip_impdonation != 0) && (port->ip_spimportant == 0) && (((option & MACH_SEND_IMPORTANCE) != 0) || - (task_is_importance_donor(current_task())))) { + (task_is_importance_donor(current_task())))) { return TRUE; } #else return TRUE; #endif /* IMPORTANCE_INHERITANCE */ - } + } } return FALSE; } @@ -407,15 +410,15 @@ ipc_port_request_sparm( */ mach_port_type_t ipc_port_request_type( - ipc_port_t port, - __assert_only mach_port_name_t name, - ipc_port_request_index_t index) + ipc_port_t port, + __assert_only mach_port_name_t name, + ipc_port_request_index_t index) { ipc_port_request_t ipr, table; mach_port_type_t type = 0; table = port->ip_requests; - assert (table != IPR_NULL); + assert(table != IPR_NULL); assert(index != IE_REQ_NONE); ipr = &table[index]; @@ -446,9 +449,9 @@ ipc_port_request_type( ipc_port_t ipc_port_request_cancel( - ipc_port_t port, - __assert_only mach_port_name_t name, - ipc_port_request_index_t index) + ipc_port_t port, + __assert_only mach_port_name_t name, + ipc_port_request_index_t index) { ipc_port_request_t ipr, table; ipc_port_t request = IP_NULL; @@ -457,7 +460,7 @@ ipc_port_request_cancel( table = port->ip_requests; assert(table != IPR_NULL); - assert (index != IE_REQ_NONE); + assert(index != IE_REQ_NONE); ipr = &table[index]; assert(ipr->ipr_name == name); request = IPR_SOR_PORT(ipr->ipr_soright); @@ -484,9 +487,9 @@ ipc_port_request_cancel( void ipc_port_pdrequest( - ipc_port_t port, - ipc_port_t notify, - ipc_port_t *previousp) + ipc_port_t port, + ipc_port_t notify, + ipc_port_t *previousp) { ipc_port_t previous; @@ -513,10 +516,10 @@ ipc_port_pdrequest( void ipc_port_nsrequest( - ipc_port_t port, - mach_port_mscount_t sync, - ipc_port_t notify, - ipc_port_t *previousp) + ipc_port_t port, + mach_port_mscount_t sync, + ipc_port_t notify, + ipc_port_t *previousp) { ipc_port_t previous; mach_port_mscount_t mscount; @@ -553,16 +556,16 @@ ipc_port_nsrequest( * whether the caller needs to reap kmsg structures that should * be destroyed (by calling ipc_kmsg_reap_delayed) * - * If should_destroy is FALSE, this always returns FALSE + * If should_destroy is FALSE, this always returns FALSE */ boolean_t ipc_port_clear_receiver( - ipc_port_t port, - boolean_t should_destroy) + ipc_port_t port, + boolean_t should_destroy) { - ipc_mqueue_t mqueue = &port->ip_messages; - boolean_t reap_messages = FALSE; + ipc_mqueue_t mqueue = &port->ip_messages; + boolean_t reap_messages = FALSE; /* * Pull ourselves out of any sets to which we belong. @@ -579,20 +582,33 @@ ipc_port_clear_receiver( * Also clear the mscount and seqno. */ imq_lock(mqueue); - ipc_mqueue_changed(mqueue); + if (port->ip_receiver_name) { + ipc_mqueue_changed(port->ip_receiver, mqueue); + } else { + ipc_mqueue_changed(NULL, mqueue); + } port->ip_mscount = 0; mqueue->imq_seqno = 0; port->ip_context = port->ip_guarded = port->ip_strict_guard = 0; if (should_destroy) { /* - * Mark the mqueue invalid, preventing further send/receive + * Mark the port and mqueue invalid, preventing further send/receive * operations from succeeding. It's important for this to be * done under the same lock hold as the ipc_mqueue_changed * call to avoid additional threads blocking on an mqueue * that's being destroyed. + * + * The port active bit needs to be guarded under mqueue lock for + * turnstiles */ + port->ip_object.io_bits &= ~IO_BITS_ACTIVE; + port->ip_timestamp = ipc_port_timestamp(); reap_messages = ipc_mqueue_destroy_locked(mqueue); + } else { + /* make port be in limbo */ + port->ip_receiver_name = MACH_PORT_NULL; + port->ip_destination = IP_NULL; } imq_unlock(&port->ip_messages); @@ -609,9 +625,9 @@ ipc_port_clear_receiver( void ipc_port_init( - ipc_port_t port, - ipc_space_t space, - mach_port_name_t name) + ipc_port_t port, + ipc_space_t space, + mach_port_name_t name) { /* port->ip_kobject doesn't have to be initialized */ @@ -646,7 +662,7 @@ ipc_port_init( port->ip_send_turnstile = TURNSTILE_NULL; ipc_mqueue_init(&port->ip_messages, - FALSE /* !set */); + FALSE /* !set */); } /* @@ -665,9 +681,9 @@ ipc_port_init( kern_return_t ipc_port_alloc( - ipc_space_t space, - mach_port_name_t *namep, - ipc_port_t *portp) + ipc_space_t space, + mach_port_name_t *namep, + ipc_port_t *portp) { ipc_port_t port; mach_port_name_t name; @@ -677,12 +693,13 @@ ipc_port_alloc( uintptr_t buf[IP_CALLSTACK_MAX]; ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX); #endif /* MACH_ASSERT */ - + kr = ipc_object_alloc(space, IOT_PORT, - MACH_PORT_TYPE_RECEIVE, 0, - &name, (ipc_object_t *) &port); - if (kr != KERN_SUCCESS) + MACH_PORT_TYPE_RECEIVE, 0, + &name, (ipc_object_t *) &port); + if (kr != KERN_SUCCESS) { return kr; + } /* port and space are locked */ ipc_port_init(port, space, name); @@ -716,9 +733,9 @@ ipc_port_alloc( kern_return_t ipc_port_alloc_name( - ipc_space_t space, - mach_port_name_t name, - ipc_port_t *portp) + ipc_space_t space, + mach_port_name_t name, + ipc_port_t *portp) { ipc_port_t port; kern_return_t kr; @@ -726,13 +743,14 @@ ipc_port_alloc_name( #if MACH_ASSERT uintptr_t buf[IP_CALLSTACK_MAX]; ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX); -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ kr = ipc_object_alloc_name(space, IOT_PORT, - MACH_PORT_TYPE_RECEIVE, 0, - name, (ipc_object_t *) &port); - if (kr != KERN_SUCCESS) + MACH_PORT_TYPE_RECEIVE, 0, + name, (ipc_object_t *) &port); + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked */ @@ -740,7 +758,7 @@ ipc_port_alloc_name( #if MACH_ASSERT ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ *portp = port; @@ -748,7 +766,7 @@ ipc_port_alloc_name( } /* - * Routine: ipc_port_spnotify + * Routine: ipc_port_spnotify * Purpose: * Generate send-possible port notifications. * Conditions: @@ -756,7 +774,7 @@ ipc_port_alloc_name( */ void ipc_port_spnotify( - ipc_port_t port) + ipc_port_t port) { ipc_port_request_index_t index = 0; ipc_table_elems_t size = 0; @@ -765,11 +783,12 @@ ipc_port_spnotify( * If the port has no send-possible request * armed, don't bother to lock the port. */ - if (port->ip_sprequests == 0) + if (port->ip_sprequests == 0) { return; + } ip_lock(port); - + #if IMPORTANCE_INHERITANCE if (port->ip_spimportant != 0) { port->ip_spimportant = 0; @@ -797,8 +816,9 @@ revalidate: * no need to go beyond table size when first * we entered - those are future notifications. */ - if (size == 0) + if (size == 0) { size = requests->ipr_size->its_size; + } /* no need to backtrack either */ while (++index < size) { @@ -824,7 +844,7 @@ revalidate: } /* - * Routine: ipc_port_dnnotify + * Routine: ipc_port_dnnotify * Purpose: * Generate dead name notifications for * all outstanding dead-name and send- @@ -836,7 +856,7 @@ revalidate: */ void ipc_port_dnnotify( - ipc_port_t port) + ipc_port_t port) { ipc_port_request_t requests = port->ip_requests; @@ -905,13 +925,15 @@ ipc_port_destroy(ipc_port_t port) /* Otherwise, nothing to drop */ } else { assertcnt = port->ip_impcount; - if (pdrequest != IP_NULL) + if (pdrequest != IP_NULL) { /* mark in limbo for the journey */ port->ip_tempowner = 1; + } } - if (top) + if (top) { self->ith_assertions = assertcnt; + } #endif /* IMPORTANCE_INHERITANCE */ if (pdrequest != IP_NULL) { @@ -922,17 +944,11 @@ ipc_port_destroy(ipc_port_t port) /* we assume the ref for pdrequest */ port->ip_pdrequest = IP_NULL; - - /* make port be in limbo */ - imq_lock(&port->ip_messages); - port->ip_receiver_name = MACH_PORT_NULL; - port->ip_destination = IP_NULL; - imq_unlock(&port->ip_messages); ip_unlock(port); if (special_reply) { ipc_port_adjust_special_reply_port(port, - IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); + IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); } /* consumes our refs for port and pdrequest */ ipc_notify_port_destroyed(pdrequest, port); @@ -940,11 +956,6 @@ ipc_port_destroy(ipc_port_t port) goto drop_assertions; } - /* port active bit needs to be guarded under mqueue lock for turnstiles */ - imq_lock(&port->ip_messages); - port->ip_object.io_bits &= ~IO_BITS_ACTIVE; - port->ip_timestamp = ipc_port_timestamp(); - imq_unlock(&port->ip_messages); nsrequest = port->ip_nsrequest; /* @@ -958,7 +969,7 @@ ipc_port_destroy(ipc_port_t port) * port/mqueue that's been destroyed. */ boolean_t reap_msgs = FALSE; - reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks mqueue inactive */ + reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks port and mqueue inactive */ assert(port->ip_in_pset == 0); assert(port->ip_mscount == 0); @@ -990,19 +1001,20 @@ ipc_port_destroy(ipc_port_t port) /* unlink the kmsg from special reply port */ if (special_reply) { ipc_port_adjust_special_reply_port(port, - IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); + IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE); } /* throw away no-senders request */ - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_send_once(nsrequest); /* consumes ref */ - + } /* * Reap any kmsg objects waiting to be destroyed. * This must be done after we've released the port lock. */ - if (reap_msgs) + if (reap_msgs) { ipc_kmsg_reap_delayed(); + } mqueue = &port->ip_messages; @@ -1016,7 +1028,7 @@ ipc_port_destroy(ipc_port_t port) ip_release(port); /* consume caller's ref */ - drop_assertions: +drop_assertions: #if IMPORTANCE_INHERITANCE if (release_imp_task != IIT_NULL) { if (assertcnt > 0) { @@ -1026,7 +1038,6 @@ ipc_port_destroy(ipc_port_t port) ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); } ipc_importance_task_release(release_imp_task); - } else if (assertcnt > 0) { if (top) { self->ith_assertions = 0; @@ -1058,8 +1069,8 @@ ipc_port_destroy(ipc_port_t port) boolean_t ipc_port_check_circularity( - ipc_port_t port, - ipc_port_t dest) + ipc_port_t port, + ipc_port_t dest) { #if IMPORTANCE_INHERITANCE /* adjust importance counts at the same time */ @@ -1070,8 +1081,9 @@ ipc_port_check_circularity( assert(port != IP_NULL); assert(dest != IP_NULL); - if (port == dest) + if (port == dest) { return TRUE; + } base = dest; /* Check if destination needs a turnstile */ @@ -1085,8 +1097,9 @@ ipc_port_check_circularity( if (ip_lock_try(dest)) { if (!ip_active(dest) || (dest->ip_receiver_name != MACH_PORT_NULL) || - (dest->ip_destination == IP_NULL)) + (dest->ip_destination == IP_NULL)) { goto not_circular; + } /* dest is in transit; further checking necessary */ @@ -1106,8 +1119,9 @@ ipc_port_check_circularity( if (!ip_active(base) || (base->ip_receiver_name != MACH_PORT_NULL) || - (base->ip_destination == IP_NULL)) + (base->ip_destination == IP_NULL)) { break; + } base = base->ip_destination; } @@ -1168,11 +1182,11 @@ not_circular: struct turnstile *send_turnstile = TURNSTILE_NULL; if (port_send_turnstile(port)) { send_turnstile = turnstile_prepare((uintptr_t)port, - port_send_turnstile_address(port), - TURNSTILE_NULL, TURNSTILE_SYNC_IPC); + port_send_turnstile_address(port), + TURNSTILE_NULL, TURNSTILE_SYNC_IPC); turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest), - (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); /* update complete and turnstile complete called after dropping all locks */ } @@ -1185,8 +1199,9 @@ not_circular: for (;;) { ipc_port_t next; - if (dest == base) + if (dest == base) { break; + } /* port is in transit */ @@ -1201,8 +1216,8 @@ not_circular: /* base is not in transit */ assert(!ip_active(base) || - (base->ip_receiver_name != MACH_PORT_NULL) || - (base->ip_destination == IP_NULL)); + (base->ip_receiver_name != MACH_PORT_NULL) || + (base->ip_destination == IP_NULL)); ip_unlock(base); @@ -1264,7 +1279,6 @@ retry_alloc: if (port_send_turnstile(port) == NULL || port_send_turnstile(port)->ts_port_ref == 0) { - if (turnstile == TURNSTILE_NULL) { imq_unlock(&port->ip_messages); turnstile = turnstile_alloc(); @@ -1272,8 +1286,8 @@ retry_alloc: } send_turnstile = turnstile_prepare((uintptr_t)port, - port_send_turnstile_address(port), - turnstile, TURNSTILE_SYNC_IPC); + port_send_turnstile_address(port), + turnstile, TURNSTILE_SYNC_IPC); turnstile = TURNSTILE_NULL; /* @@ -1291,7 +1305,7 @@ retry_alloc: inheritor = ipc_port_get_inheritor(port); } turnstile_update_inheritor(send_turnstile, inheritor, - TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); + TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); /* turnstile complete will be called in ipc_port_send_turnstile_complete */ } @@ -1301,7 +1315,7 @@ retry_alloc: if (send_turnstile) { turnstile_update_inheritor_complete(send_turnstile, - TURNSTILE_INTERLOCK_NOT_HELD); + TURNSTILE_INTERLOCK_NOT_HELD); } if (turnstile != TURNSTILE_NULL) { turnstile_deallocate(turnstile); @@ -1329,7 +1343,7 @@ ipc_port_send_turnstile_complete(ipc_port_t port) port_send_turnstile(port)->ts_port_ref--; if (port_send_turnstile(port)->ts_port_ref == 0) { turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), - &turnstile); + &turnstile); assert(turnstile != TURNSTILE_NULL); } imq_unlock(&port->ip_messages); @@ -1518,11 +1532,11 @@ ipc_port_adjust_special_reply_port_locked( /* Check if the special reply port is marked non-special */ if (special_reply_port->ip_specialreply == 0 || - special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) { + special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) { if (get_turnstile) { turnstile_complete((uintptr_t)special_reply_port, - port_rcv_turnstile_address(special_reply_port), - NULL); + port_rcv_turnstile_address(special_reply_port), + NULL); } imq_unlock(&special_reply_port->ip_messages); ip_unlock(special_reply_port); @@ -1551,7 +1565,7 @@ ipc_port_adjust_special_reply_port_locked( special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT) { if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) { inheritor = filt_machport_stash_port(kn, special_reply_port, - &sync_link_state); + &sync_link_state); } } } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) { @@ -1592,14 +1606,14 @@ ipc_port_adjust_special_reply_port_locked( /* Get thread's turnstile donated to special reply port */ if (get_turnstile) { turnstile_complete((uintptr_t)special_reply_port, - port_rcv_turnstile_address(special_reply_port), - NULL); + port_rcv_turnstile_address(special_reply_port), + NULL); } else { ts = ipc_port_rcv_turnstile(special_reply_port); if (ts) { turnstile_reference(ts); turnstile_update_inheritor(ts, inheritor, - (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); } } @@ -1697,7 +1711,7 @@ ipc_port_impcount_delta( mach_port_delta_t delta, ipc_port_t __unused base) { - mach_port_delta_t absdelta; + mach_port_delta_t absdelta; if (!ip_active(port)) { return 0; @@ -1709,7 +1723,7 @@ ipc_port_impcount_delta( return delta; } - absdelta = 0 - delta; + absdelta = 0 - delta; if (port->ip_impcount >= absdelta) { port->ip_impcount -= absdelta; return delta; @@ -1730,11 +1744,10 @@ ipc_port_impcount_delta( target_pid = -1; } printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), " - "dropping %d assertion(s) but port only has %d remaining.\n", - port->ip_receiver_name, - target_pid, target_procname, - absdelta, port->ip_impcount); - + "dropping %d assertion(s) but port only has %d remaining.\n", + port->ip_receiver_name, + target_pid, target_procname, + absdelta, port->ip_impcount); } else if (base != IP_NULL) { task_t target_task = base->ip_receiver->is_task; ipc_importance_task_t target_imp = target_task->task_imp_base; @@ -1749,12 +1762,12 @@ ipc_port_impcount_delta( target_pid = -1; } printf("Over-release of importance assertions for port 0x%lx " - "enqueued on port 0x%x with receiver pid %d (%s), " - "dropping %d assertion(s) but port only has %d remaining.\n", - (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port), - base->ip_receiver_name, - target_pid, target_procname, - absdelta, port->ip_impcount); + "enqueued on port 0x%x with receiver pid %d (%s), " + "dropping %d assertion(s) but port only has %d remaining.\n", + (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port), + base->ip_receiver_name, + target_pid, target_procname, + absdelta, port->ip_impcount); } #endif @@ -1782,18 +1795,19 @@ ipc_port_impcount_delta( boolean_t ipc_port_importance_delta_internal( - ipc_port_t port, - natural_t options, - mach_port_delta_t *deltap, - ipc_importance_task_t *imp_task) + ipc_port_t port, + natural_t options, + mach_port_delta_t *deltap, + ipc_importance_task_t *imp_task) { ipc_port_t next, base; boolean_t dropped = FALSE; *imp_task = IIT_NULL; - if (*deltap == 0) + if (*deltap == 0) { return FALSE; + } assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE); @@ -1803,17 +1817,15 @@ ipc_port_importance_delta_internal( if (ip_active(port) && port->ip_destination != IP_NULL && port->ip_receiver_name == MACH_PORT_NULL) { - dropped = TRUE; ip_unlock(port); ipc_port_multiple_lock(); /* massive serialization */ ip_lock(base); - while(ip_active(base) && - base->ip_destination != IP_NULL && - base->ip_receiver_name == MACH_PORT_NULL) { - + while (ip_active(base) && + base->ip_destination != IP_NULL && + base->ip_receiver_name == MACH_PORT_NULL) { base = base->ip_destination; ip_lock(base); } @@ -1833,10 +1845,11 @@ ipc_port_importance_delta_internal( */ if (options & IPID_OPTION_SENDPOSSIBLE) { assert(*deltap == 1); - if (port->ip_sprequests && port->ip_spimportant == 0) + if (port->ip_sprequests && port->ip_spimportant == 0) { port->ip_spimportant = 1; - else + } else { *deltap = 0; + } } /* unlock down to the base, adjusting boost(s) at each level */ @@ -1857,10 +1870,10 @@ ipc_port_importance_delta_internal( /* find the task (if any) to boost according to the base */ if (ip_active(base)) { if (base->ip_tempowner != 0) { - if (IIT_NULL != base->ip_imp_task) + if (IIT_NULL != base->ip_imp_task) { *imp_task = base->ip_imp_task; + } /* otherwise don't boost */ - } else if (base->ip_receiver_name != MACH_PORT_NULL) { ipc_space_t space = base->ip_receiver; @@ -1908,27 +1921,30 @@ ipc_port_importance_delta_internal( boolean_t ipc_port_importance_delta( - ipc_port_t port, - natural_t options, - mach_port_delta_t delta) + ipc_port_t port, + natural_t options, + mach_port_delta_t delta) { ipc_importance_task_t imp_task = IIT_NULL; boolean_t dropped; dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task); - if (IIT_NULL == imp_task || delta == 0) + if (IIT_NULL == imp_task || delta == 0) { return dropped; + } - if (!dropped) + if (!dropped) { ip_unlock(port); + } assert(ipc_importance_task_is_any_receiver_type(imp_task)); - if (delta > 0) + if (delta > 0) { ipc_importance_task_hold_internal_assertion(imp_task, delta); - else + } else { ipc_importance_task_drop_internal_assertion(imp_task, -delta); + } ipc_importance_task_release(imp_task); return TRUE; @@ -1942,15 +1958,15 @@ ipc_port_importance_delta( * Returns IP_NULL if name doesn't denote a receive right. * Conditions: * The space must be locked (read or write) and active. - * Being the active space, we can rely on thread server_id + * Being the active space, we can rely on thread server_id * context to give us the proper server level sub-order * within the space. */ ipc_port_t ipc_port_lookup_notify( - ipc_space_t space, - mach_port_name_t name) + ipc_space_t space, + mach_port_name_t name) { ipc_port_t port; ipc_entry_t entry; @@ -1958,10 +1974,12 @@ ipc_port_lookup_notify( assert(is_active(space)); entry = ipc_entry_lookup(space, name); - if (entry == IE_NULL) + if (entry == IE_NULL) { return IP_NULL; - if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) + } + if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) { return IP_NULL; + } __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object); assert(port != IP_NULL); @@ -1988,7 +2006,7 @@ ipc_port_lookup_notify( */ ipc_port_t ipc_port_make_send_locked( - ipc_port_t port) + ipc_port_t port) { assert(ip_active(port)); port->ip_mscount++; @@ -2005,11 +2023,11 @@ ipc_port_make_send_locked( ipc_port_t ipc_port_make_send( - ipc_port_t port) + ipc_port_t port) { - - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return port; + } ip_lock(port); if (ip_active(port)) { @@ -2037,12 +2055,13 @@ ipc_port_make_send( ipc_port_t ipc_port_copy_send( - ipc_port_t port) + ipc_port_t port) { ipc_port_t sright; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return port; + } ip_lock(port); if (ip_active(port)) { @@ -2051,8 +2070,9 @@ ipc_port_copy_send( ip_reference(port); port->ip_srights++; sright = port; - } else + } else { sright = IP_DEAD; + } ip_unlock(port); return sright; @@ -2069,8 +2089,8 @@ ipc_port_copy_send( mach_port_name_t ipc_port_copyout_send( - ipc_port_t sright, - ipc_space_t space) + ipc_port_t sright, + ipc_space_t space) { mach_port_name_t name; @@ -2078,17 +2098,19 @@ ipc_port_copyout_send( kern_return_t kr; kr = ipc_object_copyout(space, (ipc_object_t) sright, - MACH_MSG_TYPE_PORT_SEND, TRUE, &name); + MACH_MSG_TYPE_PORT_SEND, TRUE, &name); if (kr != KERN_SUCCESS) { ipc_port_release_send(sright); - if (kr == KERN_INVALID_CAPABILITY) + if (kr == KERN_INVALID_CAPABILITY) { name = MACH_PORT_DEAD; - else + } else { name = MACH_PORT_NULL; + } } - } else + } else { name = CAST_MACH_PORT_TO_NAME(sright); + } return name; } @@ -2104,25 +2126,27 @@ ipc_port_copyout_send( mach_port_name_t ipc_port_copyout_name_send( - ipc_port_t sright, - ipc_space_t space, + ipc_port_t sright, + ipc_space_t space, mach_port_name_t name) { if (IP_VALID(sright)) { kern_return_t kr; kr = ipc_object_copyout_name(space, (ipc_object_t) sright, - MACH_MSG_TYPE_PORT_SEND, TRUE, name); + MACH_MSG_TYPE_PORT_SEND, TRUE, name); if (kr != KERN_SUCCESS) { ipc_port_release_send(sright); - if (kr == KERN_INVALID_CAPABILITY) + if (kr == KERN_INVALID_CAPABILITY) { name = MACH_PORT_DEAD; - else + } else { name = MACH_PORT_NULL; + } } - } else + } else { name = CAST_MACH_PORT_TO_NAME(sright); + } return name; } @@ -2138,13 +2162,14 @@ ipc_port_copyout_name_send( void ipc_port_release_send( - ipc_port_t port) + ipc_port_t port) { ipc_port_t nsrequest = IP_NULL; mach_port_mscount_t mscount; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return; + } ip_lock(port); @@ -2185,7 +2210,7 @@ ipc_port_release_send( ipc_port_t ipc_port_make_sonce_locked( - ipc_port_t port) + ipc_port_t port) { assert(ip_active(port)); port->ip_sorights++; @@ -2203,10 +2228,11 @@ ipc_port_make_sonce_locked( ipc_port_t ipc_port_make_sonce( - ipc_port_t port) + ipc_port_t port) { - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return port; + } ip_lock(port); if (ip_active(port)) { @@ -2235,10 +2261,11 @@ ipc_port_make_sonce( void ipc_port_release_sonce( - ipc_port_t port) + ipc_port_t port) { - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return; + } ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_SR_NONE, FALSE); @@ -2266,12 +2293,13 @@ ipc_port_release_sonce( void ipc_port_release_receive( - ipc_port_t port) + ipc_port_t port) { ipc_port_t dest; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return; + } ip_lock(port); assert(ip_active(port)); @@ -2298,18 +2326,19 @@ ipc_port_release_receive( ipc_port_t ipc_port_alloc_special( - ipc_space_t space) + ipc_space_t space) { ipc_port_t port; __IGNORE_WCASTALIGN(port = (ipc_port_t) io_alloc(IOT_PORT)); - if (port == IP_NULL) + if (port == IP_NULL) { return IP_NULL; + } #if MACH_ASSERT uintptr_t buf[IP_CALLSTACK_MAX]; ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX); -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ bzero((char *)port, sizeof(*port)); io_lock_init(&port->ip_object); @@ -2320,7 +2349,7 @@ ipc_port_alloc_special( #if MACH_ASSERT ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX); -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ return port; } @@ -2336,8 +2365,8 @@ ipc_port_alloc_special( void ipc_port_dealloc_special( - ipc_port_t port, - __assert_only ipc_space_t space) + ipc_port_t port, + __assert_only ipc_space_t space) { ip_lock(port); assert(ip_active(port)); @@ -2372,7 +2401,7 @@ ipc_port_dealloc_special( */ void ipc_port_finalize( - ipc_port_t port) + ipc_port_t port) { ipc_port_request_t requests = port->ip_requests; @@ -2390,10 +2419,10 @@ ipc_port_finalize( } ipc_mqueue_deinit(&port->ip_messages); - -#if MACH_ASSERT + +#if MACH_ASSERT ipc_port_track_dealloc(port); -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ } /* @@ -2512,7 +2541,7 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, } } -#if MACH_ASSERT +#if MACH_ASSERT #include /* @@ -2521,23 +2550,23 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, * deallocation is intercepted via io_free. */ #if 0 -queue_head_t port_alloc_queue; -lck_spin_t port_alloc_queue_lock; +queue_head_t port_alloc_queue; +lck_spin_t port_alloc_queue_lock; #endif -unsigned long port_count = 0; -unsigned long port_count_warning = 20000; -unsigned long port_timestamp = 0; +unsigned long port_count = 0; +unsigned long port_count_warning = 20000; +unsigned long port_timestamp = 0; -void db_port_stack_trace( - ipc_port_t port); -void db_ref( - int refs); -int db_port_walk( - unsigned int verbose, - unsigned int display, - unsigned int ref_search, - unsigned int ref_target); +void db_port_stack_trace( + ipc_port_t port); +void db_ref( + int refs); +int db_port_walk( + unsigned int verbose, + unsigned int display, + unsigned int ref_search, + unsigned int ref_target); /* * Initialize global state needed for run-time @@ -2551,8 +2580,9 @@ ipc_port_debug_init(void) lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr); #endif - if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof (ipc_portbt))) + if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof(ipc_portbt))) { ipc_portbt = 0; + } } #ifdef MACH_BSD @@ -2565,33 +2595,37 @@ extern int proc_pid(struct proc*); */ void ipc_port_init_debug( - ipc_port_t port, - uintptr_t *callstack, - unsigned int callstack_max) + ipc_port_t port, + uintptr_t *callstack, + unsigned int callstack_max) { - unsigned int i; + unsigned int i; port->ip_thread = current_thread(); port->ip_timetrack = port_timestamp++; - for (i = 0; i < callstack_max; ++i) - port->ip_callstack[i] = callstack[i]; - for (i = 0; i < IP_NSPARES; ++i) - port->ip_spares[i] = 0; + for (i = 0; i < callstack_max; ++i) { + port->ip_callstack[i] = callstack[i]; + } + for (i = 0; i < IP_NSPARES; ++i) { + port->ip_spares[i] = 0; + } #ifdef MACH_BSD task_t task = current_task(); if (task != TASK_NULL) { struct proc* proc = (struct proc*) get_bsdtask_info(task); - if (proc) + if (proc) { port->ip_spares[0] = proc_pid(proc); + } } #endif /* MACH_BSD */ #if 0 lck_spin_lock(&port_alloc_queue_lock); ++port_count; - if (port_count_warning > 0 && port_count >= port_count_warning) + if (port_count_warning > 0 && port_count >= port_count_warning) { assert(port_count < port_count_warning); + } queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links); lck_spin_unlock(&port_alloc_queue_lock); #endif @@ -2608,17 +2642,19 @@ ipc_port_init_debug( */ void ipc_port_callstack_init_debug( - uintptr_t *callstack, - unsigned int callstack_max) + uintptr_t *callstack, + unsigned int callstack_max) { - unsigned int i; + unsigned int i; /* guarantee the callstack is initialized */ - for (i=0; i < callstack_max; i++) - callstack[i] = 0; + for (i = 0; i < callstack_max; i++) { + callstack[i] = 0; + } - if (ipc_portbt) + if (ipc_portbt) { machine_callstack(callstack, callstack_max); + } } /* @@ -2629,13 +2665,13 @@ ipc_port_callstack_init_debug( #if 1 void ipc_port_track_dealloc( - __unused ipc_port_t port) + __unused ipc_port_t port) { } #else void ipc_port_track_dealloc( - ipc_port_t port) + ipc_port_t port) { lck_spin_lock(&port_alloc_queue_lock); assert(port_count > 0); @@ -2646,4 +2682,4 @@ ipc_port_track_dealloc( #endif -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ diff --git a/osfmk/ipc/ipc_port.h b/osfmk/ipc/ipc_port.h index 16addb831..971f77821 100644 --- a/osfmk/ipc/ipc_port.h +++ b/osfmk/ipc/ipc_port.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -69,7 +69,7 @@ * Definitions for ports. */ -#ifndef _IPC_IPC_PORT_H_ +#ifndef _IPC_IPC_PORT_H_ #define _IPC_IPC_PORT_H_ #ifdef MACH_KERNEL_PRIVATE @@ -111,7 +111,6 @@ typedef unsigned int ipc_port_timestamp_t; struct ipc_port { - /* * Initial sub-structure in common with ipc_pset * First element is an ipc_object second is a @@ -145,64 +144,64 @@ struct ipc_port { mach_vm_address_t ip_context; - natural_t ip_sprequests:1, /* send-possible requests outstanding */ - ip_spimportant:1, /* ... at least one is importance donating */ - ip_impdonation:1, /* port supports importance donation */ - ip_tempowner:1, /* dont give donations to current receiver */ - ip_guarded:1, /* port guarded (use context value as guard) */ - ip_strict_guard:1, /* Strict guarding; Prevents user manipulation of context values directly */ - ip_specialreply:1, /* port is a special reply port */ - ip_sync_link_state:3, /* link the special reply port to destination port/ Workloop */ - ip_impcount:22; /* number of importance donations in nested queue */ + natural_t ip_sprequests:1, /* send-possible requests outstanding */ + ip_spimportant:1, /* ... at least one is importance donating */ + ip_impdonation:1, /* port supports importance donation */ + ip_tempowner:1, /* dont give donations to current receiver */ + ip_guarded:1, /* port guarded (use context value as guard) */ + ip_strict_guard:1, /* Strict guarding; Prevents user manipulation of context values directly */ + ip_specialreply:1, /* port is a special reply port */ + ip_sync_link_state:3, /* link the special reply port to destination port/ Workloop */ + ip_impcount:22; /* number of importance donations in nested queue */ mach_port_mscount_t ip_mscount; mach_port_rights_t ip_srights; mach_port_rights_t ip_sorights; -#if MACH_ASSERT -#define IP_NSPARES 4 -#define IP_CALLSTACK_MAX 16 +#if MACH_ASSERT +#define IP_NSPARES 4 +#define IP_CALLSTACK_MAX 16 /* queue_chain_t ip_port_links;*//* all allocated ports */ - thread_t ip_thread; /* who made me? thread context */ - unsigned long ip_timetrack; /* give an idea of "when" created */ - uintptr_t ip_callstack[IP_CALLSTACK_MAX]; /* stack trace */ - unsigned long ip_spares[IP_NSPARES]; /* for debugging */ -#endif /* MACH_ASSERT */ + thread_t ip_thread; /* who made me? thread context */ + unsigned long ip_timetrack; /* give an idea of "when" created */ + uintptr_t ip_callstack[IP_CALLSTACK_MAX]; /* stack trace */ + unsigned long ip_spares[IP_NSPARES]; /* for debugging */ +#endif /* MACH_ASSERT */ #if DEVELOPMENT || DEBUG - uint8_t ip_srp_lost_link:1, /* special reply port turnstile link chain broken */ - ip_srp_msg_sent:1; /* special reply port msg sent */ + uint8_t ip_srp_lost_link:1, /* special reply port turnstile link chain broken */ + ip_srp_msg_sent:1; /* special reply port msg sent */ #endif }; -#define ip_references ip_object.io_references -#define ip_bits ip_object.io_bits +#define ip_references ip_object.io_references +#define ip_bits ip_object.io_bits -#define ip_receiver_name ip_messages.imq_receiver_name -#define ip_in_pset ip_messages.imq_in_pset +#define ip_receiver_name ip_messages.imq_receiver_name +#define ip_in_pset ip_messages.imq_in_pset -#define ip_receiver data.receiver -#define ip_destination data.destination -#define ip_timestamp data.timestamp +#define ip_receiver data.receiver +#define ip_destination data.destination +#define ip_timestamp data.timestamp -#define ip_kobject kdata.kobject -#define ip_imp_task kdata.imp_task -#define ip_sync_inheritor_port kdata.sync_inheritor_port -#define ip_sync_inheritor_knote kdata.sync_inheritor_knote -#define ip_sync_inheritor_ts kdata.sync_inheritor_ts +#define ip_kobject kdata.kobject +#define ip_imp_task kdata.imp_task +#define ip_sync_inheritor_port kdata.sync_inheritor_port +#define ip_sync_inheritor_knote kdata.sync_inheritor_knote +#define ip_sync_inheritor_ts kdata.sync_inheritor_ts -#define ip_premsg kdata2.premsg -#define ip_send_turnstile kdata2.send_turnstile -#define ip_dealloc_elm kdata2.dealloc_elm +#define ip_premsg kdata2.premsg +#define ip_send_turnstile kdata2.send_turnstile +#define ip_dealloc_elm kdata2.dealloc_elm -#define port_send_turnstile(port) (IP_PREALLOC(port) ? (port)->ip_premsg->ikm_turnstile : (port)->ip_send_turnstile) +#define port_send_turnstile(port) (IP_PREALLOC(port) ? (port)->ip_premsg->ikm_turnstile : (port)->ip_send_turnstile) #define set_port_send_turnstile(port, value) \ MACRO_BEGIN \ if (IP_PREALLOC(port)) { \ - (port)->ip_premsg->ikm_turnstile = (value); \ + (port)->ip_premsg->ikm_turnstile = (value); \ } else { \ - (port)->ip_send_turnstile = (value); \ + (port)->ip_send_turnstile = (value); \ } \ MACRO_END @@ -245,31 +244,31 @@ MACRO_END #define PORT_SYNC_LINK_WORKLOOP_STASH (0x3) #define PORT_SYNC_LINK_NO_LINKAGE (0x4) -#define IP_NULL IPC_PORT_NULL -#define IP_DEAD IPC_PORT_DEAD -#define IP_VALID(port) IPC_PORT_VALID(port) +#define IP_NULL IPC_PORT_NULL +#define IP_DEAD IPC_PORT_DEAD +#define IP_VALID(port) IPC_PORT_VALID(port) -#define ip_active(port) io_active(&(port)->ip_object) -#define ip_lock_init(port) io_lock_init(&(port)->ip_object) -#define ip_lock(port) io_lock(&(port)->ip_object) -#define ip_lock_try(port) io_lock_try(&(port)->ip_object) -#define ip_lock_held_kdp(port) io_lock_held_kdp(&(port)->ip_object) -#define ip_unlock(port) io_unlock(&(port)->ip_object) +#define ip_active(port) io_active(&(port)->ip_object) +#define ip_lock_init(port) io_lock_init(&(port)->ip_object) +#define ip_lock(port) io_lock(&(port)->ip_object) +#define ip_lock_try(port) io_lock_try(&(port)->ip_object) +#define ip_lock_held_kdp(port) io_lock_held_kdp(&(port)->ip_object) +#define ip_unlock(port) io_unlock(&(port)->ip_object) -#define ip_reference(port) io_reference(&(port)->ip_object) -#define ip_release(port) io_release(&(port)->ip_object) +#define ip_reference(port) io_reference(&(port)->ip_object) +#define ip_release(port) io_release(&(port)->ip_object) /* get an ipc_port pointer from an ipc_mqueue pointer */ -#define ip_from_mq(mq) \ - __container_of(mq, struct ipc_port, ip_messages) +#define ip_from_mq(mq) \ + __container_of(mq, struct ipc_port, ip_messages) -#define ip_reference_mq(mq) ip_reference(ip_from_mq(mq)) -#define ip_release_mq(mq) ip_release(ip_from_mq(mq)) +#define ip_reference_mq(mq) ip_reference(ip_from_mq(mq)) +#define ip_release_mq(mq) ip_release(ip_from_mq(mq)) -#define ip_kotype(port) io_kotype(&(port)->ip_object) +#define ip_kotype(port) io_kotype(&(port)->ip_object) -#define ip_full_kernel(port) imq_full_kernel(&(port)->ip_messages) -#define ip_full(port) imq_full(&(port)->ip_messages) +#define ip_full_kernel(port) imq_full_kernel(&(port)->ip_messages) +#define ip_full(port) imq_full(&(port)->ip_messages) /* * JMM - Preallocation flag @@ -279,20 +278,20 @@ MACRO_END * sends by critical system threads (which may be needed to free memory and * therefore cannot be blocked waiting for memory themselves). */ -#define IP_BIT_PREALLOC 0x00008000 /* preallocated mesg */ -#define IP_PREALLOC(port) ((port)->ip_bits & IP_BIT_PREALLOC) +#define IP_BIT_PREALLOC 0x00008000 /* preallocated mesg */ +#define IP_PREALLOC(port) ((port)->ip_bits & IP_BIT_PREALLOC) -#define IP_SET_PREALLOC(port, kmsg) \ -MACRO_BEGIN \ - (port)->ip_bits |= IP_BIT_PREALLOC; \ - (port)->ip_premsg = (kmsg); \ +#define IP_SET_PREALLOC(port, kmsg) \ +MACRO_BEGIN \ + (port)->ip_bits |= IP_BIT_PREALLOC; \ + (port)->ip_premsg = (kmsg); \ MACRO_END -#define IP_CLEAR_PREALLOC(port, kmsg) \ -MACRO_BEGIN \ - assert((port)->ip_premsg == kmsg); \ - (port)->ip_bits &= ~IP_BIT_PREALLOC; \ - (port)->ip_premsg = IKM_NULL; \ +#define IP_CLEAR_PREALLOC(port, kmsg) \ +MACRO_BEGIN \ + assert((port)->ip_premsg == kmsg); \ + (port)->ip_bits &= ~IP_BIT_PREALLOC; \ + (port)->ip_premsg = IKM_NULL; \ MACRO_END /* JMM - address alignment/packing for LP64 */ @@ -308,25 +307,25 @@ struct ipc_port_request { } name; }; -#define ipr_next notify.index -#define ipr_size name.size +#define ipr_next notify.index +#define ipr_size name.size -#define ipr_soright notify.port -#define ipr_name name.name +#define ipr_soright notify.port +#define ipr_name name.name /* * Use the low bits in the ipr_soright to specify the request type */ -#define IPR_SOR_SPARM_MASK 1 /* send-possible armed */ -#define IPR_SOR_SPREQ_MASK 2 /* send-possible requested */ -#define IPR_SOR_SPBIT_MASK 3 /* combo */ -#define IPR_SOR_SPARMED(sor) (((uintptr_t)(sor) & IPR_SOR_SPARM_MASK) != 0) -#define IPR_SOR_SPREQ(sor) (((uintptr_t)(sor) & IPR_SOR_SPREQ_MASK) != 0) -#define IPR_SOR_PORT(sor) ((ipc_port_t)((uintptr_t)(sor) & ~IPR_SOR_SPBIT_MASK)) -#define IPR_SOR_MAKE(p,m) ((ipc_port_t)((uintptr_t)(p) | (m))) +#define IPR_SOR_SPARM_MASK 1 /* send-possible armed */ +#define IPR_SOR_SPREQ_MASK 2 /* send-possible requested */ +#define IPR_SOR_SPBIT_MASK 3 /* combo */ +#define IPR_SOR_SPARMED(sor) (((uintptr_t)(sor) & IPR_SOR_SPARM_MASK) != 0) +#define IPR_SOR_SPREQ(sor) (((uintptr_t)(sor) & IPR_SOR_SPREQ_MASK) != 0) +#define IPR_SOR_PORT(sor) ((ipc_port_t)((uintptr_t)(sor) & ~IPR_SOR_SPBIT_MASK)) +#define IPR_SOR_MAKE(p, m) ((ipc_port_t)((uintptr_t)(p) | (m))) -extern lck_grp_t ipc_lck_grp; -extern lck_attr_t ipc_lck_attr; +extern lck_grp_t ipc_lck_grp; +extern lck_attr_t ipc_lck_attr; /* * Taking the ipc_port_multiple lock grants the privilege @@ -336,14 +335,14 @@ extern lck_attr_t ipc_lck_attr; extern lck_spin_t ipc_port_multiple_lock_data; -#define ipc_port_multiple_lock_init() \ - lck_spin_init(&ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr) +#define ipc_port_multiple_lock_init() \ + lck_spin_init(&ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr) -#define ipc_port_multiple_lock() \ - lck_spin_lock(&ipc_port_multiple_lock_data) +#define ipc_port_multiple_lock() \ + lck_spin_lock_grp(&ipc_port_multiple_lock_data, &ipc_lck_grp) -#define ipc_port_multiple_unlock() \ - lck_spin_unlock(&ipc_port_multiple_lock_data) +#define ipc_port_multiple_unlock() \ + lck_spin_unlock(&ipc_port_multiple_lock_data) /* * The port timestamp facility provides timestamps @@ -363,56 +362,56 @@ extern ipc_port_timestamp_t ipc_port_timestamp(void); * as long as one and two aren't too far apart. */ -#define IP_TIMESTAMP_ORDER(one, two) ((int) ((one) - (two)) < 0) +#define IP_TIMESTAMP_ORDER(one, two) ((int) ((one) - (two)) < 0) -#define ipc_port_translate_receive(space, name, portp) \ - ipc_object_translate((space), (name), \ - MACH_PORT_RIGHT_RECEIVE, \ - (ipc_object_t *) (portp)) +#define ipc_port_translate_receive(space, name, portp) \ + ipc_object_translate((space), (name), \ + MACH_PORT_RIGHT_RECEIVE, \ + (ipc_object_t *) (portp)) -#define ipc_port_translate_send(space, name, portp) \ - ipc_object_translate((space), (name), \ - MACH_PORT_RIGHT_SEND, \ - (ipc_object_t *) (portp)) +#define ipc_port_translate_send(space, name, portp) \ + ipc_object_translate((space), (name), \ + MACH_PORT_RIGHT_SEND, \ + (ipc_object_t *) (portp)) /* Allocate a notification request slot */ #if IMPORTANCE_INHERITANCE extern kern_return_t ipc_port_request_alloc( - ipc_port_t port, - mach_port_name_t name, - ipc_port_t soright, - boolean_t send_possible, - boolean_t immediate, - ipc_port_request_index_t *indexp, - boolean_t *importantp); + ipc_port_t port, + mach_port_name_t name, + ipc_port_t soright, + boolean_t send_possible, + boolean_t immediate, + ipc_port_request_index_t *indexp, + boolean_t *importantp); #else extern kern_return_t ipc_port_request_alloc( - ipc_port_t port, - mach_port_name_t name, - ipc_port_t soright, - boolean_t send_possible, - boolean_t immediate, - ipc_port_request_index_t *indexp); + ipc_port_t port, + mach_port_name_t name, + ipc_port_t soright, + boolean_t send_possible, + boolean_t immediate, + ipc_port_request_index_t *indexp); #endif /* IMPORTANCE_INHERITANCE */ /* Grow one of a port's tables of notifcation requests */ extern kern_return_t ipc_port_request_grow( - ipc_port_t port, - ipc_table_elems_t target_size); + ipc_port_t port, + ipc_table_elems_t target_size); /* Return the type(s) of notification requests outstanding */ extern mach_port_type_t ipc_port_request_type( - ipc_port_t port, - mach_port_name_t name, - ipc_port_request_index_t index); + ipc_port_t port, + mach_port_name_t name, + ipc_port_request_index_t index); /* Cancel a notification request and return the send-once right */ extern ipc_port_t ipc_port_request_cancel( - ipc_port_t port, - mach_port_name_t name, - ipc_port_request_index_t index); + ipc_port_t port, + mach_port_name_t name, + ipc_port_request_index_t index); /* Arm any delayed send-possible notification */ extern boolean_t ipc_port_request_sparm( @@ -423,83 +422,83 @@ extern boolean_t ipc_port_request_sparm( mach_msg_priority_t override); /* Macros for manipulating a port's dead name notificaiton requests */ -#define ipc_port_request_rename(port, index, oname, nname) \ -MACRO_BEGIN \ - ipc_port_request_t ipr, table; \ - \ - assert(ip_active(port)); \ - \ - table = port->ip_requests; \ - assert(table != IPR_NULL); \ - \ - ipr = &table[index]; \ - assert(ipr->ipr_name == oname); \ - \ - ipr->ipr_name = nname; \ +#define ipc_port_request_rename(port, index, oname, nname) \ +MACRO_BEGIN \ + ipc_port_request_t ipr, table; \ + \ + assert(ip_active(port)); \ + \ + table = port->ip_requests; \ + assert(table != IPR_NULL); \ + \ + ipr = &table[index]; \ + assert(ipr->ipr_name == oname); \ + \ + ipr->ipr_name = nname; \ MACRO_END /* Make a port-deleted request */ extern void ipc_port_pdrequest( - ipc_port_t port, - ipc_port_t notify, - ipc_port_t *previousp); + ipc_port_t port, + ipc_port_t notify, + ipc_port_t *previousp); /* Make a no-senders request */ extern void ipc_port_nsrequest( - ipc_port_t port, - mach_port_mscount_t sync, - ipc_port_t notify, - ipc_port_t *previousp); - -#define ipc_port_set_mscount(port, mscount) \ -MACRO_BEGIN \ - assert(ip_active(port)); \ - \ - (port)->ip_mscount = (mscount); \ + ipc_port_t port, + mach_port_mscount_t sync, + ipc_port_t notify, + ipc_port_t *previousp); + +#define ipc_port_set_mscount(port, mscount) \ +MACRO_BEGIN \ + assert(ip_active(port)); \ + \ + (port)->ip_mscount = (mscount); \ MACRO_END /* Prepare a receive right for transmission/destruction */ extern boolean_t ipc_port_clear_receiver( - ipc_port_t port, - boolean_t should_destroy); + ipc_port_t port, + boolean_t should_destroy); /* Initialize a newly-allocated port */ extern void ipc_port_init( - ipc_port_t port, - ipc_space_t space, - mach_port_name_t name); + ipc_port_t port, + ipc_space_t space, + mach_port_name_t name); /* Allocate a port */ extern kern_return_t ipc_port_alloc( - ipc_space_t space, - mach_port_name_t *namep, - ipc_port_t *portp); + ipc_space_t space, + mach_port_name_t *namep, + ipc_port_t *portp); /* Allocate a port, with a specific name */ extern kern_return_t ipc_port_alloc_name( - ipc_space_t space, - mach_port_name_t name, - ipc_port_t *portp); + ipc_space_t space, + mach_port_name_t name, + ipc_port_t *portp); /* Generate dead name notifications */ extern void ipc_port_dnnotify( - ipc_port_t port); + ipc_port_t port); /* Generate send-possible notifications */ extern void ipc_port_spnotify( - ipc_port_t port); + ipc_port_t port); /* Destroy a port */ extern void ipc_port_destroy( - ipc_port_t port); + ipc_port_t port); -/* Check if queueing "port" in a message for "dest" would create a circular - group of ports and messages */ +/* Check if queueing "port" in a message for "dest" would create a circular + * group of ports and messages */ extern boolean_t ipc_port_check_circularity( - ipc_port_t port, - ipc_port_t dest); + ipc_port_t port, + ipc_port_t dest); #if IMPORTANCE_INHERITANCE @@ -519,8 +518,8 @@ ipc_port_link_special_reply_port( #define IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE 0x2 #define IPC_PORT_ADJUST_SR_LINK_WORKLOOP 0x4 -#define IPC_PORT_ADJUST_SR_RECEIVED_MSG 0x8 -#define IPC_PORT_ADJUST_SR_ENABLE_EVENT 0x10 +#define IPC_PORT_ADJUST_SR_RECEIVED_MSG 0x8 +#define IPC_PORT_ADJUST_SR_ENABLE_EVENT 0x10 void reset_ip_srp_bits(ipc_port_t special_reply_port); @@ -567,52 +566,52 @@ ipc_port_rcv_turnstile(ipc_port_t port); /* apply importance delta to port only */ extern mach_port_delta_t ipc_port_impcount_delta( - ipc_port_t port, - mach_port_delta_t delta, - ipc_port_t base); + ipc_port_t port, + mach_port_delta_t delta, + ipc_port_t base); /* apply importance delta to port, and return task importance for update */ extern boolean_t ipc_port_importance_delta_internal( - ipc_port_t port, - natural_t options, - mach_port_delta_t *deltap, - ipc_importance_task_t *imp_task); + ipc_port_t port, + natural_t options, + mach_port_delta_t *deltap, + ipc_importance_task_t *imp_task); /* Apply an importance delta to a port and reflect change in receiver task */ extern boolean_t ipc_port_importance_delta( - ipc_port_t port, - natural_t options, - mach_port_delta_t delta); + ipc_port_t port, + natural_t options, + mach_port_delta_t delta); #endif /* IMPORTANCE_INHERITANCE */ /* Make a send-once notify port from a receive right */ extern ipc_port_t ipc_port_lookup_notify( - ipc_space_t space, - mach_port_name_t name); + ipc_space_t space, + mach_port_name_t name); /* Make a naked send right from a receive right - port locked and active */ extern ipc_port_t ipc_port_make_send_locked( - ipc_port_t port); + ipc_port_t port); /* Make a naked send right from a receive right */ extern ipc_port_t ipc_port_make_send( - ipc_port_t port); + ipc_port_t port); /* Make a naked send right from another naked send right */ extern ipc_port_t ipc_port_copy_send( - ipc_port_t port); + ipc_port_t port); /* Copyout a naked send right */ extern mach_port_name_t ipc_port_copyout_send( - ipc_port_t sright, - ipc_space_t space); + ipc_port_t sright, + ipc_space_t space); /* Copyout a naked send right to given name */ extern mach_port_name_t ipc_port_copyout_name_send( - ipc_port_t sright, - ipc_space_t space, + ipc_port_t sright, + ipc_space_t space, mach_port_name_t name); #endif /* MACH_KERNEL_PRIVATE */ @@ -621,7 +620,7 @@ extern mach_port_name_t ipc_port_copyout_name_send( /* Release a (valid) naked send right */ extern void ipc_port_release_send( - ipc_port_t port); + ipc_port_t port); extern void ipc_port_reference( ipc_port_t port); @@ -635,55 +634,55 @@ extern void ipc_port_release( /* Make a naked send-once right from a locked and active receive right */ extern ipc_port_t ipc_port_make_sonce_locked( - ipc_port_t port); + ipc_port_t port); /* Make a naked send-once right from a receive right */ extern ipc_port_t ipc_port_make_sonce( - ipc_port_t port); + ipc_port_t port); /* Release a naked send-once right */ extern void ipc_port_release_sonce( - ipc_port_t port); + ipc_port_t port); /* Release a naked (in limbo or in transit) receive right */ extern void ipc_port_release_receive( - ipc_port_t port); + ipc_port_t port); /* finalize the destruction of a port before it gets freed */ extern void ipc_port_finalize( - ipc_port_t port); + ipc_port_t port); /* Allocate a port in a special space */ extern ipc_port_t ipc_port_alloc_special( - ipc_space_t space); + ipc_space_t space); /* Deallocate a port in a special space */ extern void ipc_port_dealloc_special( - ipc_port_t port, - ipc_space_t space); + ipc_port_t port, + ipc_space_t space); -#if MACH_ASSERT +#if MACH_ASSERT /* Track low-level port deallocation */ extern void ipc_port_track_dealloc( - ipc_port_t port); + ipc_port_t port); /* Initialize general port debugging state */ extern void ipc_port_debug_init(void); -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ extern struct turnstile *ipc_port_get_inheritor( ipc_port_t port); -#define ipc_port_alloc_kernel() \ - ipc_port_alloc_special(ipc_space_kernel) -#define ipc_port_dealloc_kernel(port) \ - ipc_port_dealloc_special((port), ipc_space_kernel) +#define ipc_port_alloc_kernel() \ + ipc_port_alloc_special(ipc_space_kernel) +#define ipc_port_dealloc_kernel(port) \ + ipc_port_dealloc_special((port), ipc_space_kernel) -#define ipc_port_alloc_reply() \ - ipc_port_alloc_special(ipc_space_reply) -#define ipc_port_dealloc_reply(port) \ - ipc_port_dealloc_special((port), ipc_space_reply) +#define ipc_port_alloc_reply() \ + ipc_port_alloc_special(ipc_space_reply) +#define ipc_port_dealloc_reply(port) \ + ipc_port_dealloc_special((port), ipc_space_reply) #endif /* MACH_KERNEL_PRIVATE */ -#endif /* _IPC_IPC_PORT_H_ */ +#endif /* _IPC_IPC_PORT_H_ */ diff --git a/osfmk/ipc/ipc_pset.c b/osfmk/ipc/ipc_pset.c index 8a8e12979..c14e98a79 100644 --- a/osfmk/ipc/ipc_pset.c +++ b/osfmk/ipc/ipc_pset.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -94,17 +94,17 @@ kern_return_t ipc_pset_alloc( - ipc_space_t space, - mach_port_name_t *namep, - ipc_pset_t *psetp) + ipc_space_t space, + mach_port_name_t *namep, + ipc_pset_t *psetp) { ipc_pset_t pset; mach_port_name_t name; kern_return_t kr; kr = ipc_object_alloc(space, IOT_PORT_SET, - MACH_PORT_TYPE_PORT_SET, 0, - &name, (ipc_object_t *) &pset); + MACH_PORT_TYPE_PORT_SET, 0, + &name, (ipc_object_t *) &pset); if (kr != KERN_SUCCESS) { return kr; } @@ -134,16 +134,16 @@ ipc_pset_alloc( kern_return_t ipc_pset_alloc_name( - ipc_space_t space, - mach_port_name_t name, - ipc_pset_t *psetp) + ipc_space_t space, + mach_port_name_t name, + ipc_pset_t *psetp) { ipc_pset_t pset; kern_return_t kr; kr = ipc_object_alloc_name(space, IOT_PORT_SET, - MACH_PORT_TYPE_PORT_SET, 0, - name, (ipc_object_t *) &pset); + MACH_PORT_TYPE_PORT_SET, 0, + name, (ipc_object_t *) &pset); if (kr != KERN_SUCCESS) { return kr; } @@ -202,12 +202,12 @@ ipc_pset_alloc_special( */ boolean_t ipc_pset_member( - ipc_pset_t pset, - ipc_port_t port) + ipc_pset_t pset, + ipc_port_t port) { assert(ip_active(port)); - return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages)); + return ipc_mqueue_member(&port->ip_messages, &pset->ips_messages); } @@ -222,10 +222,10 @@ ipc_pset_member( kern_return_t ipc_pset_add( - ipc_pset_t pset, - ipc_port_t port, - uint64_t *reserved_link, - uint64_t *reserved_prepost) + ipc_pset_t pset, + ipc_port_t port, + uint64_t *reserved_link, + uint64_t *reserved_prepost) { kern_return_t kr; @@ -233,7 +233,7 @@ ipc_pset_add( assert(ip_active(port)); kr = ipc_mqueue_add(&port->ip_messages, &pset->ips_messages, - reserved_link, reserved_prepost); + reserved_link, reserved_prepost); return kr; } @@ -252,15 +252,16 @@ ipc_pset_add( kern_return_t ipc_pset_remove( - ipc_pset_t pset, - ipc_port_t port) + ipc_pset_t pset, + ipc_port_t port) { kern_return_t kr; assert(ip_active(port)); - - if (port->ip_in_pset == 0) + + if (port->ip_in_pset == 0) { return KERN_NOT_IN_SET; + } kr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages); @@ -286,8 +287,9 @@ ipc_pset_lazy_allocate( ipc_pset_t pset; kr = ipc_right_lookup_read(space, psname, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is read-locked and active */ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) { @@ -326,12 +328,13 @@ ipc_pset_lazy_allocate( kern_return_t ipc_pset_remove_from_all( - ipc_port_t port) + ipc_port_t port) { - if (port->ip_in_pset == 0) + if (port->ip_in_pset == 0) { return KERN_NOT_IN_SET; + } - /* + /* * Remove the port's mqueue from all sets */ ipc_mqueue_remove_from_all(&port->ip_messages); @@ -351,7 +354,8 @@ ipc_pset_remove_from_all( void ipc_pset_destroy( - ipc_pset_t pset) + ipc_space_t space, + ipc_pset_t pset) { assert(ips_active(pset)); @@ -368,7 +372,7 @@ ipc_pset_destroy( * discover the change. */ imq_lock(&pset->ips_messages); - ipc_mqueue_changed(&pset->ips_messages); + ipc_mqueue_changed(space, &pset->ips_messages); imq_unlock(&pset->ips_messages); ipc_mqueue_deinit(&pset->ips_messages); @@ -443,14 +447,20 @@ filt_machport_stash_port(struct knote *kn, ipc_port_t port, int *link) struct turnstile *ts = filt_machport_kqueue_turnstile(kn); if (!ts) { - if (link) *link = PORT_SYNC_LINK_NO_LINKAGE; + if (link) { + *link = PORT_SYNC_LINK_NO_LINKAGE; + } } else if (kn->kn_ext[3] == 0) { ip_reference(port); kn->kn_ext[3] = (uintptr_t)port; - if (link) *link = PORT_SYNC_LINK_WORKLOOP_KNOTE; + if (link) { + *link = PORT_SYNC_LINK_WORKLOOP_KNOTE; + } } else { ts = (struct turnstile *)kn->kn_hook; - if (link) *link = PORT_SYNC_LINK_WORKLOOP_STASH; + if (link) { + *link = PORT_SYNC_LINK_WORKLOOP_STASH; + } } return ts; @@ -483,24 +493,25 @@ filt_machport_stashed_special_reply_port_turnstile(ipc_port_t port) */ void filt_machport_turnstile_prepare_lazily( - struct knote *kn, - mach_msg_type_name_t msgt_name, - ipc_port_t port) + struct knote *kn, + mach_msg_type_name_t msgt_name, + ipc_port_t port) { /* This is called from within filt_machportprocess */ assert((kn->kn_status & KN_SUPPRESSED) && (kn->kn_status & KN_LOCKED)); struct turnstile *ts = filt_machport_kqueue_turnstile(kn); - if (ts == TURNSTILE_NULL || kn->kn_ext[3] == 0 || kn->kn_hook) + if (ts == TURNSTILE_NULL || kn->kn_ext[3] == 0 || kn->kn_hook) { return; + } if ((msgt_name == MACH_MSG_TYPE_PORT_SEND_ONCE && port->ip_specialreply) || - (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE)) { + (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE)) { struct turnstile *kn_ts = turnstile_alloc(); kn_ts = turnstile_prepare((uintptr_t)kn, - (struct turnstile **)&kn->kn_hook, kn_ts, TURNSTILE_KNOTE); + (struct turnstile **)&kn->kn_hook, kn_ts, TURNSTILE_KNOTE); turnstile_update_inheritor(kn_ts, ts, - TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE); + TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE); turnstile_cleanup(); } } @@ -531,9 +542,9 @@ filt_machport_turnstile_complete(struct knote *kn) * neuter the linkage. */ if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE && - port->ip_sync_inheritor_knote == kn) { + port->ip_sync_inheritor_knote == kn) { ipc_port_adjust_special_reply_port_locked(port, NULL, - (IPC_PORT_ADJUST_SR_NONE | IPC_PORT_ADJUST_SR_ENABLE_EVENT), FALSE); + (IPC_PORT_ADJUST_SR_NONE | IPC_PORT_ADJUST_SR_ENABLE_EVENT), FALSE); } else { ip_unlock(port); } @@ -558,7 +569,7 @@ filt_machport_turnstile_complete(struct knote *kn) } if (ts) { turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, - TURNSTILE_IMMEDIATE_UPDATE); + TURNSTILE_IMMEDIATE_UPDATE); turnstile_reference(ts); } imq_unlock(mqueue); @@ -578,7 +589,7 @@ filt_machport_turnstile_complete(struct knote *kn) ts = kn->kn_hook; turnstile_update_inheritor(ts, TURNSTILE_INHERITOR_NULL, - TURNSTILE_IMMEDIATE_UPDATE); + TURNSTILE_IMMEDIATE_UPDATE); turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD); turnstile_complete((uintptr_t)kn, (struct turnstile **)&kn->kn_hook, &ts); @@ -591,8 +602,8 @@ filt_machport_turnstile_complete(struct knote *kn) static int filt_machportattach( - struct knote *kn, - __unused struct kevent_internal_s *kev) + struct knote *kn, + __unused struct kevent_internal_s *kev) { mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident; uint64_t wq_link_id = waitq_link_reserve(NULL); @@ -681,7 +692,6 @@ check_lookup: * need an indication of their fired state to be returned * from the attach operation. */ - } else if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) { ipc_port_t port; @@ -714,7 +724,7 @@ check_lookup: if (send_turnstile) { turnstile_reference(send_turnstile); turnstile_update_inheritor(send_turnstile, turnstile, - (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); + (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE)); } if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) { @@ -724,7 +734,7 @@ check_lookup: is_read_unlock(space); if (send_turnstile) { turnstile_update_inheritor_complete(send_turnstile, - TURNSTILE_INTERLOCK_NOT_HELD); + TURNSTILE_INTERLOCK_NOT_HELD); turnstile_deallocate(send_turnstile); } @@ -733,7 +743,7 @@ check_lookup: is_read_unlock(space); error = ENOTSUP; } - } else { + } else { error = ENOENT; } @@ -781,8 +791,8 @@ filt_machportdetach( if (send_turnstile) { turnstile_reference(send_turnstile); turnstile_update_inheritor(send_turnstile, - ipc_port_get_inheritor(port), - TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); + ipc_port_get_inheritor(port), + TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE); } } @@ -792,7 +802,7 @@ filt_machportdetach( if (send_turnstile) { turnstile_update_inheritor_complete(send_turnstile, - TURNSTILE_INTERLOCK_NOT_HELD); + TURNSTILE_INTERLOCK_NOT_HELD); turnstile_deallocate(send_turnstile); } @@ -819,19 +829,9 @@ filt_machportdetach( * the message is to be direct-received, we adjust the * QoS of the knote according the requested and override * QoS of that first message. - * - * NOTE_REVOKE events are a legacy way to indicate that the port/portset - * was deallocated or left the current Mach portspace (modern technique - * is with an EV_VANISHED protocol). If we see NOTE_REVOKE, deliver an - * EV_EOF event for these changes (hopefully it will get delivered before - * the port name recycles to the same generation count and someone tries - * to re-register a kevent for it or the events are udata-specific - - * avoiding a conflict). */ static int -filt_machportevent( - struct knote *kn, - long hint) +filt_machportevent(struct knote *kn, long hint __assert_only) { ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue; ipc_kmsg_t first; @@ -839,11 +839,8 @@ filt_machportevent( /* mqueue locked by caller */ assert(imq_held(mqueue)); - - if (hint == NOTE_REVOKE) { - kn->kn_flags |= EV_EOF | EV_ONESHOT; - result = FILTER_ACTIVE | FILTER_RESET_EVENT_QOS; - } else if (imq_is_valid(mqueue)) { + assert(hint != NOTE_REVOKE); + if (imq_is_valid(mqueue)) { assert(!imq_is_set(mqueue)); if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) { result = FILTER_ACTIVE | filt_machport_adjust_qos(kn, first); @@ -908,7 +905,7 @@ filt_machportprocess( wait_result_t wresult; mach_msg_option_t option; mach_vm_address_t addr; - mach_msg_size_t size; + mach_msg_size_t size; /* Capture current state */ *kev = kn->kn_kevent; @@ -924,8 +921,8 @@ filt_machportprocess( * provided, just force a MACH_RCV_TOO_LARGE to detect the * name of the port and sizeof the waiting message. */ - option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY| - MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER); + option = kn->kn_sfflags & (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | + MACH_RCV_TRAILER_MASK | MACH_RCV_VOUCHER); if (option & MACH_RCV_MSG) { addr = (mach_vm_address_t) kn->kn_ext[0]; @@ -941,8 +938,9 @@ filt_machportprocess( addr = (mach_vm_address_t)process_data->fp_data_out; size = (mach_msg_size_t)process_data->fp_data_resid; option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY); - if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA) + if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA) { option |= MACH_RCV_STACK; + } } } else { /* just detect the port name (if a set) and size of the first message */ @@ -975,12 +973,12 @@ filt_machportprocess( self->ith_knote = kn; wresult = ipc_mqueue_receive_on_thread( - mqueue, - option, - size, /* max_size */ - 0, /* immediate timeout */ - THREAD_INTERRUPTIBLE, - self); + mqueue, + option, + size, /* max_size */ + 0, /* immediate timeout */ + THREAD_INTERRUPTIBLE, + self); /* mqueue unlocked */ /* @@ -1024,10 +1022,11 @@ filt_machportprocess( */ if (kev->fflags == MACH_RCV_TOO_LARGE) { kev->ext[1] = self->ith_msize; - if (option & MACH_RCV_LARGE_IDENTITY) + if (option & MACH_RCV_LARGE_IDENTITY) { kev->data = self->ith_receiver_name; - else + } else { kev->data = MACH_PORT_NULL; + } } else { kev->ext[1] = size; kev->data = MACH_PORT_NULL; @@ -1047,7 +1046,7 @@ filt_machportprocess( } else { assert(option & MACH_RCV_STACK); kev->ext[0] = process_data->fp_data_out + - process_data->fp_data_resid; + process_data->fp_data_resid; } } @@ -1060,7 +1059,7 @@ filt_machportprocess( */ if (kev->fflags == MACH_MSG_SUCCESS) { kev->ext[2] = ((uint64_t)self->ith_qos << 32) | - (uint64_t)self->ith_qos_override; + (uint64_t)self->ith_qos_override; } return FILTER_ACTIVE; diff --git a/osfmk/ipc/ipc_pset.h b/osfmk/ipc/ipc_pset.h index 42008febf..f0e5df942 100644 --- a/osfmk/ipc/ipc_pset.h +++ b/osfmk/ipc/ipc_pset.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Definitions for port sets. */ -#ifndef _IPC_IPC_PSET_H_ +#ifndef _IPC_IPC_PSET_H_ #define _IPC_IPC_PSET_H_ #include @@ -75,73 +75,73 @@ #include struct ipc_pset { - /* * Initial sub-structure in common with all ipc_objects. */ - struct ipc_object ips_object; - struct ipc_mqueue ips_messages; + struct ipc_object ips_object; + struct ipc_mqueue ips_messages; }; -#define ips_references ips_object.io_references +#define ips_references ips_object.io_references -#define ips_active(pset) io_active(&(pset)->ips_object) -#define ips_lock(pset) io_lock(&(pset)->ips_object) -#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object) -#define ips_lock_held_kdp(pset) io_lock_held_kdp(&(pset)->ips_object) -#define ips_unlock(pset) io_unlock(&(pset)->ips_object) -#define ips_reference(pset) io_reference(&(pset)->ips_object) -#define ips_release(pset) io_release(&(pset)->ips_object) +#define ips_active(pset) io_active(&(pset)->ips_object) +#define ips_lock(pset) io_lock(&(pset)->ips_object) +#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object) +#define ips_lock_held_kdp(pset) io_lock_held_kdp(&(pset)->ips_object) +#define ips_unlock(pset) io_unlock(&(pset)->ips_object) +#define ips_reference(pset) io_reference(&(pset)->ips_object) +#define ips_release(pset) io_release(&(pset)->ips_object) /* get an ipc_pset pointer from an ipc_mqueue pointer */ -#define ips_from_mq(mq) \ - __container_of(mq, struct ipc_pset, ips_messages) +#define ips_from_mq(mq) \ + __container_of(mq, struct ipc_pset, ips_messages) /* Allocate a port set */ extern kern_return_t ipc_pset_alloc( - ipc_space_t space, - mach_port_name_t *namep, - ipc_pset_t *psetp); + ipc_space_t space, + mach_port_name_t *namep, + ipc_pset_t *psetp); /* Allocate a port set, with a specific name */ extern kern_return_t ipc_pset_alloc_name( - ipc_space_t space, - mach_port_name_t name, - ipc_pset_t *psetp); + ipc_space_t space, + mach_port_name_t name, + ipc_pset_t *psetp); /* Allocate a port set in a special space */ extern ipc_pset_t ipc_pset_alloc_special( - ipc_space_t space); + ipc_space_t space); /* Add a port to a port set */ extern kern_return_t ipc_pset_add( - ipc_pset_t pset, - ipc_port_t port, - uint64_t *reserved_link, - uint64_t *reserved_prepost); + ipc_pset_t pset, + ipc_port_t port, + uint64_t *reserved_link, + uint64_t *reserved_prepost); /* determine if port is a member of set */ extern boolean_t ipc_pset_member( - ipc_pset_t pset, - ipc_port_t port); + ipc_pset_t pset, + ipc_port_t port); /* Remove a port from a port set */ extern kern_return_t ipc_pset_remove( - ipc_pset_t pset, - ipc_port_t port); + ipc_pset_t pset, + ipc_port_t port); /* lazily initialize the wqset of a port set */ extern kern_return_t ipc_pset_lazy_allocate( - ipc_space_t space, + ipc_space_t space, mach_port_name_t psname); /* Remove a port from all its current port sets */ extern kern_return_t ipc_pset_remove_from_all( - ipc_port_t port); + ipc_port_t port); /* Destroy a port_set */ extern void ipc_pset_destroy( - ipc_pset_t pset); + ipc_space_t space, + ipc_pset_t pset); #if MACH_KERNEL_PRIVATE extern struct turnstile *filt_machport_kqueue_turnstile( @@ -152,7 +152,7 @@ extern struct turnstile *filt_machport_stashed_special_reply_port_turnstile( extern void filt_machport_turnstile_prepare_lazily( struct knote *kn, - mach_msg_type_name_t msgt_name, + mach_msg_type_name_t msgt_name, ipc_port_t port); extern struct turnstile *filt_machport_stash_port( @@ -161,4 +161,4 @@ extern struct turnstile *filt_machport_stash_port( int *link); #endif -#endif /* _IPC_IPC_PSET_H_ */ +#endif /* _IPC_IPC_PSET_H_ */ diff --git a/osfmk/ipc/ipc_right.c b/osfmk/ipc/ipc_right.c index d1925e69c..08a79c300 100644 --- a/osfmk/ipc/ipc_right.c +++ b/osfmk/ipc/ipc_right.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -104,9 +104,9 @@ kern_return_t ipc_right_lookup_write( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t *entryp) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp) { ipc_entry_t entry; @@ -143,11 +143,11 @@ ipc_right_lookup_write( kern_return_t ipc_right_lookup_two_write( - ipc_space_t space, - mach_port_name_t name1, - ipc_entry_t *entryp1, - mach_port_name_t name2, - ipc_entry_t *entryp2) + ipc_space_t space, + mach_port_name_t name1, + ipc_entry_t *entryp1, + mach_port_name_t name2, + ipc_entry_t *entryp2) { ipc_entry_t entry1; ipc_entry_t entry2; @@ -190,10 +190,10 @@ ipc_right_lookup_two_write( boolean_t ipc_right_reverse( - ipc_space_t space, - ipc_object_t object, - mach_port_name_t *namep, - ipc_entry_t *entryp) + ipc_space_t space, + ipc_object_t object, + mach_port_name_t *namep, + ipc_entry_t *entryp) { ipc_port_t port; mach_port_name_t name; @@ -262,12 +262,12 @@ ipc_right_reverse( kern_return_t ipc_right_request_alloc( - ipc_space_t space, - mach_port_name_t name, - boolean_t immediate, - boolean_t send_possible, - ipc_port_t notify, - ipc_port_t *previousp) + ipc_space_t space, + mach_port_name_t name, + boolean_t immediate, + boolean_t send_possible, + ipc_port_t notify, + ipc_port_t *previousp) { ipc_port_request_index_t prev_request; ipc_port_t previous = IP_NULL; @@ -282,11 +282,12 @@ ipc_right_request_alloc( ipc_port_t port = IP_NULL; kr = ipc_right_lookup_write(space, name, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is write-locked and active */ - + prev_request = entry->ie_request; /* if nothing to do or undo, we're done */ @@ -303,13 +304,14 @@ ipc_right_request_alloc( port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (!ipc_right_check(space, port, name, entry)) { + if (!ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { /* port is locked and active */ /* if no new request, just cancel previous */ if (notify == IP_NULL) { - if (prev_request != IE_REQ_NONE) + if (prev_request != IE_REQ_NONE) { previous = ipc_port_request_cancel(port, name, prev_request); + } ip_unlock(port); entry->ie_request = IE_REQ_NONE; ipc_entry_modified(space, name, entry); @@ -323,9 +325,10 @@ ipc_right_request_alloc( */ if (send_possible && immediate && ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) || - port->ip_receiver == ipc_space_kernel || !ip_full(port))) { - if (prev_request != IE_REQ_NONE) + port->ip_receiver == ipc_space_kernel || !ip_full(port))) { + if (prev_request != IE_REQ_NONE) { previous = ipc_port_request_cancel(port, name, prev_request); + } ip_unlock(port); entry->ie_request = IE_REQ_NONE; ipc_entry_modified(space, name, entry); @@ -339,17 +342,18 @@ ipc_right_request_alloc( * If there is a previous request, free it. Any subsequent * allocation cannot fail, thus assuring an atomic swap. */ - if (prev_request != IE_REQ_NONE) + if (prev_request != IE_REQ_NONE) { previous = ipc_port_request_cancel(port, name, prev_request); + } #if IMPORTANCE_INHERITANCE kr = ipc_port_request_alloc(port, name, notify, - send_possible, immediate, - &new_request, &needboost); + send_possible, immediate, + &new_request, &needboost); #else kr = ipc_port_request_alloc(port, name, notify, - send_possible, immediate, - &new_request); + send_possible, immediate, + &new_request); #endif /* IMPORTANCE_INHERITANCE */ if (kr != KERN_SUCCESS) { assert(previous == IP_NULL); @@ -358,8 +362,9 @@ ipc_right_request_alloc( kr = ipc_port_request_grow(port, ITS_SIZE_NONE); /* port is unlocked */ - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } continue; } @@ -372,16 +377,16 @@ ipc_right_request_alloc( #if IMPORTANCE_INHERITANCE if (needboost == TRUE) { - if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) + if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) { ip_unlock(port); + } } else #endif /* IMPORTANCE_INHERITANCE */ - ip_unlock(port); + ip_unlock(port); break; } /* entry may have changed to dead-name by ipc_right_check() */ - } /* treat send_possible requests as immediate w.r.t. dead-name */ @@ -392,15 +397,16 @@ ipc_right_request_alloc( assert(urefs > 0); /* leave urefs pegged to maximum if it overflowed */ - if (urefs < MACH_PORT_UREFS_MAX) + if (urefs < MACH_PORT_UREFS_MAX) { (entry->ie_bits)++; /* increment urefs */ - + } ipc_entry_modified(space, name, entry); is_write_unlock(space); - if (port != IP_NULL) + if (port != IP_NULL) { ip_release(port); + } ipc_notify_dead_name(notify, name); previous = IP_NULL; @@ -412,8 +418,9 @@ ipc_right_request_alloc( is_write_unlock(space); - if (port != IP_NULL) + if (port != IP_NULL) { ip_release(port); + } return kr; } @@ -434,18 +441,19 @@ ipc_right_request_alloc( ipc_port_t ipc_right_request_cancel( - __unused ipc_space_t space, - ipc_port_t port, - mach_port_name_t name, - ipc_entry_t entry) + __unused ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry) { ipc_port_t previous; assert(ip_active(port)); assert(port == (ipc_port_t) entry->ie_object); - if (entry->ie_request == IE_REQ_NONE) + if (entry->ie_request == IE_REQ_NONE) { return IP_NULL; + } previous = ipc_port_request_cancel(port, name, entry->ie_request); entry->ie_request = IE_REQ_NONE; @@ -465,9 +473,9 @@ ipc_right_request_cancel( boolean_t ipc_right_inuse( - ipc_space_t space, - __unused mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + __unused mach_port_name_t name, + ipc_entry_t entry) { if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE) { is_write_unlock(space); @@ -480,10 +488,12 @@ ipc_right_inuse( * Routine: ipc_right_check * Purpose: * Check if the port has died. If it has, + * and IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not + * passed and it is not a send once right then * clean up the entry and return TRUE. * Conditions: * The space is write-locked; the port is not locked. - * If returns FALSE, the port is also locked and active. + * If returns FALSE, the port is also locked. * Otherwise, entry is converted to a dead name. * * Caller is responsible for a reference to port if it @@ -492,10 +502,11 @@ ipc_right_inuse( boolean_t ipc_right_check( - ipc_space_t space, - ipc_port_t port, - mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry, + ipc_right_copyin_flags_t flags) { ipc_entry_bits_t bits; @@ -503,8 +514,12 @@ ipc_right_check( assert(port == (ipc_port_t) entry->ie_object); ip_lock(port); - if (ip_active(port)) + if (ip_active(port) || + ((flags & IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) && + entry->ie_request == IE_REQ_NONE && + (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) { return FALSE; + } /* this was either a pure send right or a send-once right */ @@ -513,16 +528,16 @@ ipc_right_check( assert(IE_BITS_UREFS(bits) > 0); if (bits & MACH_PORT_TYPE_SEND) { - assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); assert(IE_BITS_UREFS(bits) > 0); assert(port->ip_srights > 0); port->ip_srights--; - } else { - assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); - assert(IE_BITS_UREFS(bits) == 1); + } else { + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); + assert(IE_BITS_UREFS(bits) == 1); assert(port->ip_sorights > 0); port->ip_sorights--; - } + } ip_unlock(port); /* @@ -534,8 +549,8 @@ ipc_right_check( } /* convert entry to dead name */ - bits = (bits &~ IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME; - + bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME; + /* * If there was a notification request outstanding on this * name, and the port went dead, that notification @@ -555,10 +570,11 @@ ipc_right_check( if (entry->ie_request != IE_REQ_NONE) { if (ipc_port_request_type(port, name, entry->ie_request) != 0) { /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { bits++; /* increment urefs */ + } } - entry->ie_request = IE_REQ_NONE; + entry->ie_request = IE_REQ_NONE; } entry->ie_bits = bits; entry->ie_object = IO_NULL; @@ -578,9 +594,9 @@ ipc_right_check( void ipc_right_terminate( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) { ipc_entry_bits_t bits; mach_port_type_t type; @@ -600,12 +616,12 @@ ipc_right_terminate( */ switch (type) { - case MACH_PORT_TYPE_DEAD_NAME: + case MACH_PORT_TYPE_DEAD_NAME: assert(entry->ie_request == IE_REQ_NONE); assert(entry->ie_object == IO_NULL); break; - case MACH_PORT_TYPE_PORT_SET: { + case MACH_PORT_TYPE_PORT_SET: { ipc_pset_t pset = (ipc_pset_t) entry->ie_object; assert(entry->ie_request == IE_REQ_NONE); @@ -613,14 +629,14 @@ ipc_right_terminate( ips_lock(pset); assert(ips_active(pset)); - ipc_pset_destroy(pset); /* consumes ref, unlocks */ + ipc_pset_destroy(space, pset); /* consumes ref, unlocks */ break; - } + } - case MACH_PORT_TYPE_SEND: - case MACH_PORT_TYPE_RECEIVE: - case MACH_PORT_TYPE_SEND_RECEIVE: - case MACH_PORT_TYPE_SEND_ONCE: { + case MACH_PORT_TYPE_SEND: + case MACH_PORT_TYPE_RECEIVE: + case MACH_PORT_TYPE_SEND_RECEIVE: + case MACH_PORT_TYPE_SEND_ONCE: { ipc_port_t port = (ipc_port_t) entry->ie_object; ipc_port_t request; ipc_port_t nsrequest = IP_NULL; @@ -635,8 +651,8 @@ ipc_right_terminate( break; } - request = ipc_right_request_cancel_macro(space, port, - name, entry); + request = ipc_right_request_cancel_macro(space, port, + name, entry); if (type & MACH_PORT_TYPE_SEND) { assert(port->ip_srights > 0); @@ -655,7 +671,6 @@ ipc_right_terminate( assert(port->ip_receiver == space); ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */ - } else if (type & MACH_PORT_TYPE_SEND_ONCE) { assert(port->ip_sorights > 0); ip_unlock(port); @@ -665,18 +680,20 @@ ipc_right_terminate( assert(port->ip_receiver != space); ip_unlock(port); - ip_release(port); + ip_release(port); } - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_no_senders(nsrequest, mscount); + } - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - default: + default: panic("ipc_right_terminate: strange type - 0x%x", type); } } @@ -694,11 +711,11 @@ ipc_right_terminate( kern_return_t ipc_right_destroy( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - boolean_t check_guard, - uint64_t guard) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + boolean_t check_guard, + uint64_t guard) { ipc_entry_bits_t bits; mach_port_type_t type; @@ -710,7 +727,7 @@ ipc_right_destroy( assert(is_active(space)); switch (type) { - case MACH_PORT_TYPE_DEAD_NAME: + case MACH_PORT_TYPE_DEAD_NAME: assert(entry->ie_request == IE_REQ_NONE); assert(entry->ie_object == IO_NULL); @@ -718,7 +735,7 @@ ipc_right_destroy( is_write_unlock(space); break; - case MACH_PORT_TYPE_PORT_SET: { + case MACH_PORT_TYPE_PORT_SET: { ipc_pset_t pset = (ipc_pset_t) entry->ie_object; assert(entry->ie_request == IE_REQ_NONE); @@ -731,14 +748,14 @@ ipc_right_destroy( is_write_unlock(space); assert(ips_active(pset)); - ipc_pset_destroy(pset); /* consumes ref, unlocks */ + ipc_pset_destroy(space, pset); /* consumes ref, unlocks */ break; - } + } - case MACH_PORT_TYPE_SEND: - case MACH_PORT_TYPE_RECEIVE: - case MACH_PORT_TYPE_SEND_RECEIVE: - case MACH_PORT_TYPE_SEND_ONCE: { + case MACH_PORT_TYPE_SEND: + case MACH_PORT_TYPE_RECEIVE: + case MACH_PORT_TYPE_SEND_RECEIVE: + case MACH_PORT_TYPE_SEND_ONCE: { ipc_port_t port = (ipc_port_t) entry->ie_object; ipc_port_t nsrequest = IP_NULL; mach_port_mscount_t mscount = 0; @@ -746,9 +763,10 @@ ipc_right_destroy( assert(port != IP_NULL); - if (type == MACH_PORT_TYPE_SEND) + if (type == MACH_PORT_TYPE_SEND) { ipc_hash_delete(space, (ipc_object_t) port, - name, entry); + name, entry); + } ip_lock(port); @@ -773,7 +791,7 @@ ipc_right_destroy( is_write_unlock(space); /* Raise mach port guard exception */ mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY); - return KERN_INVALID_RIGHT; + return KERN_INVALID_RIGHT; } @@ -799,7 +817,6 @@ ipc_right_destroy( assert(port->ip_receiver == space); ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */ - } else if (type & MACH_PORT_TYPE_SEND_ONCE) { assert(port->ip_sorights > 0); ip_unlock(port); @@ -812,17 +829,19 @@ ipc_right_destroy( ip_release(port); } - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_no_senders(nsrequest, mscount); + } - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - default: + default: panic("ipc_right_destroy: strange type"); } @@ -845,9 +864,9 @@ ipc_right_destroy( kern_return_t ipc_right_dealloc( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry) { ipc_port_t port = IP_NULL; ipc_entry_bits_t bits; @@ -860,7 +879,7 @@ ipc_right_dealloc( assert(is_active(space)); switch (type) { - case MACH_PORT_TYPE_PORT_SET: { + case MACH_PORT_TYPE_PORT_SET: { ipc_pset_t pset; assert(IE_BITS_UREFS(bits) == 0); @@ -876,12 +895,12 @@ ipc_right_dealloc( assert(ips_active(pset)); is_write_unlock(space); - ipc_pset_destroy(pset); /* consumes ref, unlocks */ + ipc_pset_destroy(space, pset); /* consumes ref, unlocks */ break; - } + } - case MACH_PORT_TYPE_DEAD_NAME: { - dead_name: + case MACH_PORT_TYPE_DEAD_NAME: { +dead_name: assert(IE_BITS_UREFS(bits) > 0); assert(entry->ie_request == IE_REQ_NONE); @@ -891,19 +910,21 @@ ipc_right_dealloc( ipc_entry_dealloc(space, name, entry); } else { /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits-1; /* decrement urefs */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits - 1; /* decrement urefs */ + } ipc_entry_modified(space, name, entry); } is_write_unlock(space); /* release any port that got converted to dead name below */ - if (port != IP_NULL) + if (port != IP_NULL) { ip_release(port); + } break; - } + } - case MACH_PORT_TYPE_SEND_ONCE: { + case MACH_PORT_TYPE_SEND_ONCE: { ipc_port_t request; assert(IE_BITS_UREFS(bits) == 1); @@ -911,8 +932,7 @@ ipc_right_dealloc( port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { - + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { bits = entry->ie_bits; assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); goto dead_name; /* it will release port */ @@ -931,12 +951,13 @@ ipc_right_dealloc( ipc_notify_send_once(port); - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - case MACH_PORT_TYPE_SEND: { + case MACH_PORT_TYPE_SEND: { ipc_port_t request = IP_NULL; ipc_port_t nsrequest = IP_NULL; mach_port_mscount_t mscount = 0; @@ -947,7 +968,7 @@ ipc_right_dealloc( port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { bits = entry->ie_bits; assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); goto dead_name; /* it will release port */ @@ -966,34 +987,36 @@ ipc_right_dealloc( } request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); ipc_hash_delete(space, (ipc_object_t) port, - name, entry); + name, entry); ip_unlock(port); entry->ie_object = IO_NULL; ipc_entry_dealloc(space, name, entry); is_write_unlock(space); ip_release(port); - } else { ip_unlock(port); /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits-1; /* decrement urefs */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits - 1; /* decrement urefs */ + } ipc_entry_modified(space, name, entry); is_write_unlock(space); } - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_no_senders(nsrequest, mscount); + } - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - case MACH_PORT_TYPE_SEND_RECEIVE: { + case MACH_PORT_TYPE_SEND_RECEIVE: { ipc_port_t nsrequest = IP_NULL; mach_port_mscount_t mscount = 0; @@ -1017,12 +1040,12 @@ ipc_right_dealloc( } } - entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK | - MACH_PORT_TYPE_SEND); + entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | + MACH_PORT_TYPE_SEND); } else { /* if urefs are pegged due to overflow, leave them pegged */ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { - entry->ie_bits = bits-1; /* decrement urefs */ + entry->ie_bits = bits - 1; /* decrement urefs */ } } ip_unlock(port); @@ -1030,12 +1053,13 @@ ipc_right_dealloc( ipc_entry_modified(space, name, entry); is_write_unlock(space); - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_no_senders(nsrequest, mscount); + } break; - } + } - default: + default: is_write_unlock(space); mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT); return KERN_INVALID_RIGHT; @@ -1060,11 +1084,11 @@ ipc_right_dealloc( kern_return_t ipc_right_delta( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_port_right_t right, - mach_port_delta_t delta) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_right_t right, + mach_port_delta_t delta) { ipc_port_t port = IP_NULL; ipc_entry_bits_t bits; @@ -1085,7 +1109,7 @@ ipc_right_delta( /* Rights-specific restrictions and operations. */ switch (right) { - case MACH_PORT_RIGHT_PORT_SET: { + case MACH_PORT_RIGHT_PORT_SET: { ipc_pset_t pset; if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) { @@ -1097,11 +1121,13 @@ ipc_right_delta( assert(IE_BITS_UREFS(bits) == 0); assert(entry->ie_request == IE_REQ_NONE); - if (delta == 0) + if (delta == 0) { goto success; + } - if (delta != -1) + if (delta != -1) { goto invalid_value; + } pset = (ipc_pset_t) entry->ie_object; assert(pset != IPS_NULL); @@ -1113,11 +1139,11 @@ ipc_right_delta( assert(ips_active(pset)); is_write_unlock(space); - ipc_pset_destroy(pset); /* consumes ref, unlocks */ + ipc_pset_destroy(space, pset); /* consumes ref, unlocks */ break; - } + } - case MACH_PORT_RIGHT_RECEIVE: { + case MACH_PORT_RIGHT_RECEIVE: { ipc_port_t request = IP_NULL; if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { @@ -1125,11 +1151,13 @@ ipc_right_delta( goto invalid_right; } - if (delta == 0) + if (delta == 0) { goto success; + } - if (delta != -1) + if (delta != -1) { goto invalid_value; + } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); @@ -1144,9 +1172,9 @@ ipc_right_delta( assert(ip_active(port)); assert(port->ip_receiver_name == name); assert(port->ip_receiver == space); - + /* Mach Port Guard Checking */ - if(port->ip_guarded) { + if (port->ip_guarded) { uint64_t portguard = port->ip_context; ip_unlock(port); is_write_unlock(space); @@ -1154,10 +1182,10 @@ ipc_right_delta( mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS); goto guard_failure; } - + if (bits & MACH_PORT_TYPE_SEND) { assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND_RECEIVE); + MACH_PORT_TYPE_SEND_RECEIVE); assert(IE_BITS_UREFS(bits) > 0); assert(port->ip_srights > 0); @@ -1190,8 +1218,9 @@ ipc_right_delta( if (entry->ie_request) { entry->ie_request = IE_REQ_NONE; /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { bits++; /* increment urefs */ + } } entry->ie_bits = bits; entry->ie_object = IO_NULL; @@ -1202,24 +1231,26 @@ ipc_right_delta( assert(IE_BITS_UREFS(bits) == 0); request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); entry->ie_object = IO_NULL; ipc_entry_dealloc(space, name, entry); } is_write_unlock(space); - ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */ + ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */ - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - case MACH_PORT_RIGHT_SEND_ONCE: { + case MACH_PORT_RIGHT_SEND_ONCE: { ipc_port_t request; - if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) + if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) { goto invalid_right; + } assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE); assert(IE_BITS_UREFS(bits) == 1); @@ -1227,7 +1258,7 @@ ipc_right_delta( port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE)); mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT); goto invalid_right; @@ -1256,21 +1287,21 @@ ipc_right_delta( ipc_notify_send_once(port); - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - case MACH_PORT_RIGHT_DEAD_NAME: { + case MACH_PORT_RIGHT_DEAD_NAME: { ipc_port_t relport = IP_NULL; mach_port_urefs_t urefs; if (bits & MACH_PORT_TYPE_SEND_RIGHTS) { - port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (!ipc_right_check(space, port, name, entry)) { + if (!ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { /* port is locked and active */ ip_unlock(port); port = IP_NULL; @@ -1290,7 +1321,7 @@ ipc_right_delta( assert(entry->ie_object == IO_NULL); assert(entry->ie_request == IE_REQ_NONE); - if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) || + if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) || delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { goto invalid_value; } @@ -1303,11 +1334,13 @@ ipc_right_delta( * only a delta removing all refs at once can change it */ - if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) + if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { delta = 0; + } } else { - if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) + if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) { goto invalid_value; + } if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) { /* leave urefs pegged to maximum if it overflowed */ delta = MACH_PORT_UREFS_MAX - urefs; @@ -1323,13 +1356,14 @@ ipc_right_delta( is_write_unlock(space); - if (relport != IP_NULL) + if (relport != IP_NULL) { ip_release(relport); + } break; - } + } - case MACH_PORT_RIGHT_SEND: { + case MACH_PORT_RIGHT_SEND: { mach_port_urefs_t urefs; ipc_port_t request = IP_NULL; ipc_port_t nsrequest = IP_NULL; @@ -1349,7 +1383,7 @@ ipc_right_delta( port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0); goto invalid_right; } @@ -1357,7 +1391,7 @@ ipc_right_delta( assert(port->ip_srights > 0); - if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) || + if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) || delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { ip_unlock(port); goto invalid_value; @@ -1371,8 +1405,9 @@ ipc_right_delta( * only a delta removing all refs at once can change it */ - if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) + if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { delta = 0; + } } else { if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) { ip_unlock(port); @@ -1398,19 +1433,19 @@ ipc_right_delta( assert(port->ip_receiver == space); ip_unlock(port); assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND_RECEIVE); + MACH_PORT_TYPE_SEND_RECEIVE); - entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK| - MACH_PORT_TYPE_SEND); + entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | + MACH_PORT_TYPE_SEND); ipc_entry_modified(space, name, entry); } else { assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND); + MACH_PORT_TYPE_SEND); request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); ipc_hash_delete(space, (ipc_object_t) port, - name, entry); + name, entry); ip_unlock(port); port_to_release = port; @@ -1428,50 +1463,54 @@ ipc_right_delta( is_write_unlock(space); - if (port_to_release != IP_NULL) + if (port_to_release != IP_NULL) { ip_release(port_to_release); + } - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_no_senders(nsrequest, mscount); + } - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); + } break; - } + } - case MACH_PORT_RIGHT_LABELH: + case MACH_PORT_RIGHT_LABELH: goto invalid_right; - default: + default: panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p", - right, name, (void *)entry, (void *)space); + right, name, (void *)entry, (void *)space); } return KERN_SUCCESS; - success: +success: is_write_unlock(space); return KERN_SUCCESS; - invalid_right: +invalid_right: is_write_unlock(space); - if (port != IP_NULL) + if (port != IP_NULL) { ip_release(port); + } return KERN_INVALID_RIGHT; - invalid_value: +invalid_value: is_write_unlock(space); mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE); return KERN_INVALID_VALUE; - guard_failure: +guard_failure: return KERN_INVALID_RIGHT; } /* * Routine: ipc_right_destruct * Purpose: - * Deallocates the receive right and modifies the + * Deallocates the receive right and modifies the * user-reference count for the send rights as requested. * Conditions: * The space is write-locked, and is unlocked upon return. @@ -1484,11 +1523,11 @@ ipc_right_delta( kern_return_t ipc_right_destruct( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_port_delta_t srdelta, - uint64_t guard) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_delta_t srdelta, + uint64_t guard) { ipc_port_t port = IP_NULL; ipc_entry_bits_t bits; @@ -1509,8 +1548,9 @@ ipc_right_destruct( return KERN_INVALID_RIGHT; } - if (srdelta > 0) + if (srdelta > 0) { goto invalid_value; + } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); @@ -1521,7 +1561,7 @@ ipc_right_destruct( assert(port->ip_receiver == space); /* Mach Port Guard Checking */ - if(port->ip_guarded && (guard != port->ip_context)) { + if (port->ip_guarded && (guard != port->ip_context)) { uint64_t portguard = port->ip_context; ip_unlock(port); is_write_unlock(space); @@ -1537,7 +1577,6 @@ ipc_right_destruct( */ if (srdelta) { - assert(port->ip_srights > 0); urefs = IE_BITS_UREFS(bits); @@ -1556,8 +1595,9 @@ ipc_right_destruct( * urefs are pegged due to an overflow * only a delta removing all refs at once can change it */ - if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) + if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { srdelta = 0; + } } if ((urefs + srdelta) == 0) { @@ -1569,8 +1609,8 @@ ipc_right_destruct( } } assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE); - entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK| - MACH_PORT_TYPE_SEND); + entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | + MACH_PORT_TYPE_SEND); } else { entry->ie_bits = bits + srdelta; } @@ -1614,8 +1654,9 @@ ipc_right_destruct( bits |= MACH_PORT_TYPE_DEAD_NAME; if (entry->ie_request) { entry->ie_request = IE_REQ_NONE; - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { bits++; /* increment urefs */ + } } entry->ie_bits = bits; entry->ie_object = IO_NULL; @@ -1625,7 +1666,7 @@ ipc_right_destruct( assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); assert(IE_BITS_UREFS(bits) == 0); request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); entry->ie_object = IO_NULL; ipc_entry_dealloc(space, name, entry); } @@ -1633,17 +1674,19 @@ ipc_right_destruct( /* Unlock space */ is_write_unlock(space); - if (nsrequest != IP_NULL) + if (nsrequest != IP_NULL) { ipc_notify_no_senders(nsrequest, mscount); + } - ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */ + ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */ - if (request != IP_NULL) + if (request != IP_NULL) { ipc_notify_port_deleted(request, name); - + } + return KERN_SUCCESS; - - invalid_value: + +invalid_value: is_write_unlock(space); mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE); return KERN_INVALID_VALUE; @@ -1663,11 +1706,11 @@ ipc_right_destruct( kern_return_t ipc_right_info( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_port_type_t *typep, - mach_port_urefs_t *urefsp) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_type_t *typep, + mach_port_urefs_t *urefsp) { ipc_port_t port; ipc_entry_bits_t bits; @@ -1688,16 +1731,16 @@ ipc_right_info( ip_unlock(port); } is_write_unlock(space); - } else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) { /* * validate port is still alive - if so, get request * types while we still have it locked. Otherwise, * recapture the (now dead) bits. */ - if (!ipc_right_check(space, port, name, entry)) { - if (request != IE_REQ_NONE) + if (!ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { + if (request != IE_REQ_NONE) { type |= ipc_port_request_type(port, name, request); + } ip_unlock(port); is_write_unlock(space); } else { @@ -1727,47 +1770,53 @@ ipc_right_info( boolean_t ipc_right_copyin_check( - __assert_only ipc_space_t space, - __unused mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name) + __assert_only ipc_space_t space, + __unused mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name) { ipc_entry_bits_t bits; ipc_port_t port; - bits= entry->ie_bits; + bits = entry->ie_bits; assert(is_active(space)); switch (msgt_name) { - case MACH_MSG_TYPE_MAKE_SEND: - if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + case MACH_MSG_TYPE_MAKE_SEND: + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { return FALSE; + } break; - case MACH_MSG_TYPE_MAKE_SEND_ONCE: - if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + case MACH_MSG_TYPE_MAKE_SEND_ONCE: + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { return FALSE; + } break; - case MACH_MSG_TYPE_MOVE_RECEIVE: - if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + case MACH_MSG_TYPE_MOVE_RECEIVE: + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { return FALSE; - if (io_kotype(entry->ie_object) != IKOT_NONE) + } + if (io_kotype(entry->ie_object) != IKOT_NONE) { return FALSE; + } port = (ipc_port_t) entry->ie_object; - if (port->ip_specialreply) + if (port->ip_specialreply) { return FALSE; + } break; - case MACH_MSG_TYPE_COPY_SEND: - case MACH_MSG_TYPE_MOVE_SEND: - case MACH_MSG_TYPE_MOVE_SEND_ONCE: { - - if (bits & MACH_PORT_TYPE_DEAD_NAME) + case MACH_MSG_TYPE_COPY_SEND: + case MACH_MSG_TYPE_MOVE_SEND: + case MACH_MSG_TYPE_MOVE_SEND_ONCE: { + if (bits & MACH_PORT_TYPE_DEAD_NAME) { break; + } - if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) { return FALSE; + } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); @@ -1782,17 +1831,19 @@ ipc_right_copyin_check( } if (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE) { - if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) + if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) { return FALSE; + } } else { - if ((bits & MACH_PORT_TYPE_SEND) == 0) + if ((bits & MACH_PORT_TYPE_SEND) == 0) { return FALSE; + } } break; - } + } - default: + default: panic("ipc_right_copyin_check: strange rights"); } @@ -1824,31 +1875,32 @@ ipc_right_copyin_check( kern_return_t ipc_right_copyin( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name, - boolean_t deadok, - ipc_object_t *objectp, - ipc_port_t *sorightp, - ipc_port_t *releasep, - int *assertcntp) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + ipc_right_copyin_flags_t flags, + ipc_object_t *objectp, + ipc_port_t *sorightp, + ipc_port_t *releasep, + int *assertcntp) { ipc_entry_bits_t bits; ipc_port_t port; *releasep = IP_NULL; *assertcntp = 0; + boolean_t deadok = (flags & IPC_RIGHT_COPYIN_FLAGS_DEADOK) ? TRUE : FALSE; bits = entry->ie_bits; assert(is_active(space)); switch (msgt_name) { - case MACH_MSG_TYPE_MAKE_SEND: { - - if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + case MACH_MSG_TYPE_MAKE_SEND: { + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { goto invalid_right; + } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); @@ -1866,12 +1918,12 @@ ipc_right_copyin( *objectp = (ipc_object_t) port; *sorightp = IP_NULL; break; - } - - case MACH_MSG_TYPE_MAKE_SEND_ONCE: { + } - if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + case MACH_MSG_TYPE_MAKE_SEND_ONCE: { + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { goto invalid_right; + } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); @@ -1888,13 +1940,14 @@ ipc_right_copyin( *objectp = (ipc_object_t) port; *sorightp = IP_NULL; break; - } + } - case MACH_MSG_TYPE_MOVE_RECEIVE: { + case MACH_MSG_TYPE_MOVE_RECEIVE: { ipc_port_t request = IP_NULL; - if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) + if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) { goto invalid_right; + } /* * Disallow moving receive-right kobjects, e.g. mk_timer ports @@ -1923,29 +1976,25 @@ ipc_right_copyin( if (bits & MACH_PORT_TYPE_SEND) { assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND_RECEIVE); + MACH_PORT_TYPE_SEND_RECEIVE); assert(IE_BITS_UREFS(bits) > 0); assert(port->ip_srights > 0); ipc_hash_insert(space, (ipc_object_t) port, - name, entry); + name, entry); ip_reference(port); } else { assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); assert(IE_BITS_UREFS(bits) == 0); request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); entry->ie_object = IO_NULL; } - entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE; + entry->ie_bits = bits & ~MACH_PORT_TYPE_RECEIVE; ipc_entry_modified(space, name, entry); (void)ipc_port_clear_receiver(port, FALSE); /* don't destroy the port/mqueue */ - imq_lock(&port->ip_messages); - port->ip_receiver_name = MACH_PORT_NULL; - port->ip_destination = IP_NULL; - imq_unlock(&port->ip_messages); #if IMPORTANCE_INHERITANCE /* @@ -1971,24 +2020,25 @@ ipc_right_copyin( *objectp = (ipc_object_t) port; *sorightp = request; break; - } - - case MACH_MSG_TYPE_COPY_SEND: { + } - if (bits & MACH_PORT_TYPE_DEAD_NAME) + case MACH_MSG_TYPE_COPY_SEND: { + if (bits & MACH_PORT_TYPE_DEAD_NAME) { goto copy_dead; + } /* allow for dead send-once rights */ - if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) { goto invalid_right; + } assert(IE_BITS_UREFS(bits) > 0); port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { bits = entry->ie_bits; *releasep = port; goto copy_dead; @@ -2012,25 +2062,27 @@ ipc_right_copyin( *objectp = (ipc_object_t) port; *sorightp = IP_NULL; break; - } + } - case MACH_MSG_TYPE_MOVE_SEND: { + case MACH_MSG_TYPE_MOVE_SEND: { ipc_port_t request = IP_NULL; - if (bits & MACH_PORT_TYPE_DEAD_NAME) + if (bits & MACH_PORT_TYPE_DEAD_NAME) { goto move_dead; + } /* allow for dead send-once rights */ - if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) { goto invalid_right; + } assert(IE_BITS_UREFS(bits) > 0); port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { bits = entry->ie_bits; *releasep = port; goto move_dead; @@ -2052,28 +2104,29 @@ ipc_right_copyin( assert(port->ip_receiver_name == name); assert(port->ip_receiver == space); assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND_RECEIVE); + MACH_PORT_TYPE_SEND_RECEIVE); ip_reference(port); } else { assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND); + MACH_PORT_TYPE_SEND); request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); ipc_hash_delete(space, (ipc_object_t) port, - name, entry); + name, entry); entry->ie_object = IO_NULL; /* transfer entry's reference to caller */ } - entry->ie_bits = bits &~ - (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND); + entry->ie_bits = bits & ~ + (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND); } else { port->ip_srights++; ip_reference(port); /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits-1; /* decrement urefs */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits - 1; /* decrement urefs */ + } } ipc_entry_modified(space, name, entry); @@ -2082,30 +2135,37 @@ ipc_right_copyin( *objectp = (ipc_object_t) port; *sorightp = request; break; - } + } - case MACH_MSG_TYPE_MOVE_SEND_ONCE: { + case MACH_MSG_TYPE_MOVE_SEND_ONCE: { ipc_port_t request; - if (bits & MACH_PORT_TYPE_DEAD_NAME) + if (bits & MACH_PORT_TYPE_DEAD_NAME) { goto move_dead; + } /* allow for dead send rights */ - if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) + if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) { goto invalid_right; + } assert(IE_BITS_UREFS(bits) > 0); port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, flags)) { bits = entry->ie_bits; *releasep = port; goto move_dead; } - /* port is locked and active */ + /* + * port is locked, but may not be active: + * Allow copyin of inactive ports with no dead name request and treat it + * as if the copyin of the port was successful and port became inactive + * later. + */ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) { assert(bits & MACH_PORT_TYPE_SEND); @@ -2123,55 +2183,56 @@ ipc_right_copyin( ip_unlock(port); entry->ie_object = IO_NULL; - entry->ie_bits = bits &~ - (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND_ONCE); + entry->ie_bits = bits & ~ + (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND_ONCE); ipc_entry_modified(space, name, entry); *objectp = (ipc_object_t) port; *sorightp = request; break; - } + } - default: - invalid_right: + default: +invalid_right: return KERN_INVALID_RIGHT; } return KERN_SUCCESS; - copy_dead: +copy_dead: assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); assert(IE_BITS_UREFS(bits) > 0); assert(entry->ie_request == IE_REQ_NONE); assert(entry->ie_object == 0); - if (!deadok) + if (!deadok) { goto invalid_right; + } *objectp = IO_DEAD; *sorightp = IP_NULL; return KERN_SUCCESS; - move_dead: +move_dead: assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); assert(IE_BITS_UREFS(bits) > 0); assert(entry->ie_request == IE_REQ_NONE); assert(entry->ie_object == 0); - if (!deadok) + if (!deadok) { goto invalid_right; + } if (IE_BITS_UREFS(bits) == 1) { bits &= ~MACH_PORT_TYPE_DEAD_NAME; } /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits-1; /* decrement urefs */ - + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits - 1; /* decrement urefs */ + } ipc_entry_modified(space, name, entry); *objectp = IO_DEAD; *sorightp = IP_NULL; return KERN_SUCCESS; - } /* @@ -2186,12 +2247,12 @@ ipc_right_copyin( void ipc_right_copyin_undo( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name, - ipc_object_t object, - ipc_port_t soright) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + ipc_object_t object, + ipc_port_t soright) { ipc_entry_bits_t bits; @@ -2200,24 +2261,23 @@ ipc_right_copyin_undo( assert(is_active(space)); assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || - (msgt_name == MACH_MSG_TYPE_COPY_SEND) || - (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); + (msgt_name == MACH_MSG_TYPE_COPY_SEND) || + (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); if (soright != IP_NULL) { assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || - (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); + (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); assert(object != IO_DEAD); - entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) | - MACH_PORT_TYPE_DEAD_NAME | 2); - + entry->ie_bits = ((bits & ~IE_BITS_RIGHT_MASK) | + MACH_PORT_TYPE_DEAD_NAME | 2); } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE) { assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || - (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); + (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE)); - entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) | - MACH_PORT_TYPE_DEAD_NAME | 1); + entry->ie_bits = ((bits & ~IE_BITS_RIGHT_MASK) | + MACH_PORT_TYPE_DEAD_NAME | 1); } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME) { assert(object == IO_DEAD); assert(IE_BITS_UREFS(bits) > 0); @@ -2225,12 +2285,13 @@ ipc_right_copyin_undo( if (msgt_name != MACH_MSG_TYPE_COPY_SEND) { assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX); /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits+1; /* increment urefs */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits + 1; /* increment urefs */ + } } } else { assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || - (msgt_name == MACH_MSG_TYPE_COPY_SEND)); + (msgt_name == MACH_MSG_TYPE_COPY_SEND)); assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); assert(object != IO_DEAD); assert(entry->ie_object == object); @@ -2239,8 +2300,9 @@ ipc_right_copyin_undo( if (msgt_name != MACH_MSG_TYPE_COPY_SEND) { assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX); /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits+1; /* increment urefs */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits + 1; /* increment urefs */ + } } /* @@ -2249,14 +2311,15 @@ ipc_right_copyin_undo( */ (void) ipc_right_check(space, (ipc_port_t) object, - name, entry); + name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE); /* object is dead so it is not locked */ } ipc_entry_modified(space, name, entry); /* release the reference acquired by copyin */ - if (object != IO_DEAD) + if (object != IO_DEAD) { io_release(object); + } } /* @@ -2275,12 +2338,12 @@ ipc_right_copyin_undo( static kern_return_t ipc_right_copyin_two_move_sends( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - ipc_object_t *objectp, - ipc_port_t *sorightp, - ipc_port_t *releasep) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + ipc_object_t *objectp, + ipc_port_t *sorightp, + ipc_port_t *releasep) { ipc_entry_bits_t bits; mach_port_urefs_t urefs; @@ -2293,17 +2356,19 @@ ipc_right_copyin_two_move_sends( bits = entry->ie_bits; - if ((bits & MACH_PORT_TYPE_SEND) == 0) + if ((bits & MACH_PORT_TYPE_SEND) == 0) { goto invalid_right; + } urefs = IE_BITS_UREFS(bits); - if (urefs < 2) + if (urefs < 2) { goto invalid_right; + } port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - if (ipc_right_check(space, port, name, entry)) { + if (ipc_right_check(space, port, name, entry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { *releasep = port; goto invalid_right; } @@ -2316,7 +2381,7 @@ ipc_right_copyin_two_move_sends( assert(port->ip_receiver_name == name); assert(port->ip_receiver == space); assert(IE_BITS_TYPE(bits) == - MACH_PORT_TYPE_SEND_RECEIVE); + MACH_PORT_TYPE_SEND_RECEIVE); port->ip_srights++; ip_reference(port); @@ -2325,22 +2390,23 @@ ipc_right_copyin_two_move_sends( assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND); request = ipc_right_request_cancel_macro(space, port, - name, entry); + name, entry); port->ip_srights++; ip_reference(port); ipc_hash_delete(space, (ipc_object_t) port, - name, entry); + name, entry); entry->ie_object = IO_NULL; } - entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND); + entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND); } else { port->ip_srights += 2; ip_reference(port); ip_reference(port); /* if urefs are pegged due to overflow, leave them pegged */ - if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) - entry->ie_bits = bits-2; /* decrement urefs */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits - 2; /* decrement urefs */ + } } ipc_entry_modified(space, name, entry); @@ -2350,7 +2416,7 @@ ipc_right_copyin_two_move_sends( *sorightp = request; return KERN_SUCCESS; - invalid_right: +invalid_right: return KERN_INVALID_RIGHT; } @@ -2371,21 +2437,21 @@ ipc_right_copyin_two_move_sends( */ kern_return_t ipc_right_copyin_two( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_one, - mach_msg_type_name_t msgt_two, - ipc_object_t *objectp, - ipc_port_t *sorightp, - ipc_port_t *releasep) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_one, + mach_msg_type_name_t msgt_two, + ipc_object_t *objectp, + ipc_port_t *sorightp, + ipc_port_t *releasep) { kern_return_t kr; int assertcnt = 0; assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one)); assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two)); - + /* * Pre-validate the second disposition is possible all by itself. @@ -2425,9 +2491,9 @@ ipc_right_copyin_two( ipc_object_t object_two; kr = ipc_right_copyin(space, name, entry, - msgt_one, FALSE, - objectp, sorightp, releasep, - &assertcnt); + msgt_one, IPC_RIGHT_COPYIN_FLAGS_NONE, + objectp, sorightp, releasep, + &assertcnt); assert(assertcnt == 0); if (kr != KERN_SUCCESS) { return kr; @@ -2444,30 +2510,28 @@ ipc_right_copyin_two( * receive right. */ kr = ipc_right_copyin(space, name, entry, - msgt_two, FALSE, - &object_two, sorightp, releasep, - &assertcnt); + msgt_two, IPC_RIGHT_COPYIN_FLAGS_NONE, + &object_two, sorightp, releasep, + &assertcnt); assert(assertcnt == 0); assert(kr == KERN_SUCCESS); assert(*sorightp == IP_NULL); assert(*releasep == IP_NULL); assert(object_two == *objectp); assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE); - } else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) && - (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) { + (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) { /* * This is an easy case. Just use our * handy-dandy special-purpose copyin call * to get two send rights for the price of one. */ kr = ipc_right_copyin_two_move_sends(space, name, entry, - objectp, sorightp, - releasep); + objectp, sorightp, + releasep); if (kr != KERN_SUCCESS) { return kr; } - } else { mach_msg_type_name_t msgt_name; @@ -2486,9 +2550,9 @@ ipc_right_copyin_two( } kr = ipc_right_copyin(space, name, entry, - msgt_name, FALSE, - objectp, sorightp, releasep, - &assertcnt); + msgt_name, IPC_RIGHT_COPYIN_FLAGS_NONE, + objectp, sorightp, releasep, + &assertcnt); assert(assertcnt == 0); if (kr != KERN_SUCCESS) { return kr; @@ -2529,12 +2593,12 @@ ipc_right_copyin_two( kern_return_t ipc_right_copyout( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name, - __unused boolean_t overflow, - ipc_object_t object) + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + __unused boolean_t overflow, + ipc_object_t object) { ipc_entry_bits_t bits; ipc_port_t port; @@ -2549,7 +2613,7 @@ ipc_right_copyout( port = (ipc_port_t) object; switch (msgt_name) { - case MACH_MSG_TYPE_PORT_SEND_ONCE: + case MACH_MSG_TYPE_PORT_SEND_ONCE: assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); assert(IE_BITS_UREFS(bits) == 0); @@ -2557,7 +2621,7 @@ ipc_right_copyout( if (port->ip_specialreply) { ipc_port_adjust_special_reply_port_locked(port, - current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE); + current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE); /* port unlocked on return */ } else { ip_unlock(port); @@ -2567,7 +2631,7 @@ ipc_right_copyout( ipc_entry_modified(space, name, entry); break; - case MACH_MSG_TYPE_PORT_SEND: + case MACH_MSG_TYPE_PORT_SEND: assert(port->ip_srights > 0); if (bits & MACH_PORT_TYPE_SEND) { @@ -2593,7 +2657,6 @@ ipc_right_copyout( port->ip_srights--; ip_unlock(port); ip_release(port); - } else if (bits & MACH_PORT_TYPE_RECEIVE) { assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); assert(IE_BITS_UREFS(bits) == 0); @@ -2601,7 +2664,6 @@ ipc_right_copyout( /* transfer send right to entry, consume ref */ ip_unlock(port); ip_release(port); - } else { assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); assert(IE_BITS_UREFS(bits) == 0); @@ -2612,14 +2674,14 @@ ipc_right_copyout( /* entry is locked holding ref, so can use port */ ipc_hash_insert(space, (ipc_object_t) port, - name, entry); + name, entry); } entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */ ipc_entry_modified(space, name, entry); break; - case MACH_MSG_TYPE_PORT_RECEIVE: { + case MACH_MSG_TYPE_PORT_RECEIVE: { ipc_port_t dest; turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL; struct turnstile *ts = TURNSTILE_NULL; @@ -2652,7 +2714,7 @@ ipc_right_copyout( } turnstile_reference(ts); turnstile_update_inheritor(ts, inheritor, - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE)); + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE)); } imq_unlock(&port->ip_messages); @@ -2700,9 +2762,9 @@ ipc_right_copyout( ip_release(dest); } break; - } + } - default: + default: panic("ipc_right_copyout: strange rights"); } return KERN_SUCCESS; @@ -2723,11 +2785,11 @@ ipc_right_copyout( kern_return_t ipc_right_rename( - ipc_space_t space, - mach_port_name_t oname, - ipc_entry_t oentry, - mach_port_name_t nname, - ipc_entry_t nentry) + ipc_space_t space, + mach_port_name_t oname, + ipc_entry_t oentry, + mach_port_name_t nname, + ipc_entry_t nentry) { ipc_port_request_index_t request = oentry->ie_request; ipc_entry_bits_t bits = oentry->ie_bits; @@ -2751,7 +2813,7 @@ ipc_right_rename( port = (ipc_port_t) object; assert(port != IP_NULL); - if (ipc_right_check(space, port, oname, oentry)) { + if (ipc_right_check(space, port, oname, oentry, IPC_RIGHT_COPYIN_FLAGS_NONE)) { request = IE_REQ_NONE; object = IO_NULL; bits = oentry->ie_bits; @@ -2775,7 +2837,7 @@ ipc_right_rename( nentry->ie_object = object; switch (IE_BITS_TYPE(bits)) { - case MACH_PORT_TYPE_SEND: { + case MACH_PORT_TYPE_SEND: { ipc_port_t port; port = (ipc_port_t) object; @@ -2784,13 +2846,13 @@ ipc_right_rename( /* remember, there are no other share entries possible */ /* or we can't do the rename. Therefore we do not need */ /* to check the other subspaces */ - ipc_hash_delete(space, (ipc_object_t) port, oname, oentry); + ipc_hash_delete(space, (ipc_object_t) port, oname, oentry); ipc_hash_insert(space, (ipc_object_t) port, nname, nentry); break; - } + } - case MACH_PORT_TYPE_RECEIVE: - case MACH_PORT_TYPE_SEND_RECEIVE: { + case MACH_PORT_TYPE_RECEIVE: + case MACH_PORT_TYPE_SEND_RECEIVE: { ipc_port_t port; port = (ipc_port_t) object; @@ -2806,9 +2868,9 @@ ipc_right_rename( imq_unlock(&port->ip_messages); ip_unlock(port); break; - } + } - case MACH_PORT_TYPE_PORT_SET: { + case MACH_PORT_TYPE_PORT_SET: { ipc_pset_t pset; pset = (ipc_pset_t) object; @@ -2819,13 +2881,13 @@ ipc_right_rename( ips_unlock(pset); break; - } + } - case MACH_PORT_TYPE_SEND_ONCE: - case MACH_PORT_TYPE_DEAD_NAME: + case MACH_PORT_TYPE_SEND_ONCE: + case MACH_PORT_TYPE_DEAD_NAME: break; - default: + default: panic("ipc_right_rename: strange rights"); } @@ -2835,8 +2897,9 @@ ipc_right_rename( ipc_entry_modified(space, nname, nentry); is_write_unlock(space); - if (release_port != IP_NULL) + if (release_port != IP_NULL) { ip_release(release_port); + } return KERN_SUCCESS; } diff --git a/osfmk/ipc/ipc_right.h b/osfmk/ipc/ipc_right.h index a3d4af17e..d995aef3a 100644 --- a/osfmk/ipc/ipc_right.h +++ b/osfmk/ipc/ipc_right.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,169 +63,177 @@ * Declarations of functions to manipulate IPC capabilities. */ -#ifndef _IPC_IPC_RIGHT_H_ -#define _IPC_IPC_RIGHT_H_ +#ifndef _IPC_IPC_RIGHT_H_ +#define _IPC_IPC_RIGHT_H_ #include #include #include #include -#define ipc_right_lookup_read ipc_right_lookup_write -#define ipc_right_lookup_two_read ipc_right_lookup_two_write +#define ipc_right_lookup_read ipc_right_lookup_write +#define ipc_right_lookup_two_read ipc_right_lookup_two_write + +typedef uint32_t ipc_right_copyin_flags_t; + +#define IPC_RIGHT_COPYIN_FLAGS_NONE 0x0 +#define IPC_RIGHT_COPYIN_FLAGS_DEADOK 0x1 +#define IPC_RIGHT_COPYIN_FLAGS_RESERVED 0x2 +#define IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE 0x4 /* allow copyin of a send once right to a dead port with no dead name requests */ /* Find an entry in a space, given the name */ extern kern_return_t ipc_right_lookup_write( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t *entryp); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t *entryp); /* Find two entries in a space, given two names */ extern kern_return_t ipc_right_lookup_two_write( - ipc_space_t space, - mach_port_name_t name1, - ipc_entry_t *entryp1, - mach_port_name_t name2, - ipc_entry_t *entryp2); + ipc_space_t space, + mach_port_name_t name1, + ipc_entry_t *entryp1, + mach_port_name_t name2, + ipc_entry_t *entryp2); /* Translate (space, object) -> (name, entry) */ extern boolean_t ipc_right_reverse( - ipc_space_t space, - ipc_object_t object, - mach_port_name_t *namep, - ipc_entry_t *entryp); + ipc_space_t space, + ipc_object_t object, + mach_port_name_t *namep, + ipc_entry_t *entryp); /* Make a notification request, returning the previous send-once right */ extern kern_return_t ipc_right_request_alloc( - ipc_space_t space, - mach_port_name_t name, - boolean_t immediate, - boolean_t send_possible, - ipc_port_t notify, - ipc_port_t *previousp); + ipc_space_t space, + mach_port_name_t name, + boolean_t immediate, + boolean_t send_possible, + ipc_port_t notify, + ipc_port_t *previousp); /* Cancel a notification request and return the send-once right */ extern ipc_port_t ipc_right_request_cancel( - ipc_space_t space, - ipc_port_t port, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry); -#define ipc_right_request_cancel_macro(space, port, name, entry) \ - ((entry->ie_request == IE_REQ_NONE) ? IP_NULL : \ - ipc_right_request_cancel((space), (port), (name), (entry))) +#define ipc_right_request_cancel_macro(space, port, name, entry) \ + ((entry->ie_request == IE_REQ_NONE) ? IP_NULL : \ + ipc_right_request_cancel((space), (port), (name), (entry))) /* Check if an entry is being used */ extern boolean_t ipc_right_inuse( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); /* Check if the port has died */ extern boolean_t ipc_right_check( - ipc_space_t space, - ipc_port_t port, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + ipc_port_t port, + mach_port_name_t name, + ipc_entry_t entry, + ipc_right_copyin_flags_t flags); /* Clean up an entry in a dead space */ extern void ipc_right_terminate( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); /* Destroy an entry in a space */ extern kern_return_t ipc_right_destroy( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - boolean_t check_guard, - uint64_t guard); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + boolean_t check_guard, + uint64_t guard); /* Release a send/send-once/dead-name user reference */ extern kern_return_t ipc_right_dealloc( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry); /* Modify the user-reference count for a right */ extern kern_return_t ipc_right_delta( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_port_right_t right, - mach_port_delta_t delta); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_right_t right, + mach_port_delta_t delta); /* Destroy a receive right; Modify ref count for send rights */ extern kern_return_t ipc_right_destruct( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_port_delta_t srdelta, - uint64_t guard); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_delta_t srdelta, + uint64_t guard); /* Retrieve information about a right */ extern kern_return_t ipc_right_info( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_port_type_t *typep, - mach_port_urefs_t *urefsp); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_port_type_t *typep, + mach_port_urefs_t *urefsp); /* Check if a subsequent ipc_right_copyin would succeed */ extern boolean_t ipc_right_copyin_check( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name); /* Copyin a capability from a space */ extern kern_return_t ipc_right_copyin( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name, - boolean_t deadok, - ipc_object_t *objectp, - ipc_port_t *sorightp, - ipc_port_t *releasep, - int *assertcntp); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + ipc_right_copyin_flags_t flags, + ipc_object_t *objectp, + ipc_port_t *sorightp, + ipc_port_t *releasep, + int *assertcntp); /* Undo the effects of an ipc_right_copyin */ extern void ipc_right_copyin_undo( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name, - ipc_object_t object, - ipc_port_t soright); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + ipc_object_t object, + ipc_port_t soright); /* Copyin a pair of dispositions from a space */ extern kern_return_t ipc_right_copyin_two( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_one, - mach_msg_type_name_t msgt_two, - ipc_object_t *objectp, - ipc_port_t *sorightp, - ipc_port_t *releasep); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_one, + mach_msg_type_name_t msgt_two, + ipc_object_t *objectp, + ipc_port_t *sorightp, + ipc_port_t *releasep); /* Copyout a capability to a space */ extern kern_return_t ipc_right_copyout( - ipc_space_t space, - mach_port_name_t name, - ipc_entry_t entry, - mach_msg_type_name_t msgt_name, - boolean_t overflow, - ipc_object_t object); + ipc_space_t space, + mach_port_name_t name, + ipc_entry_t entry, + mach_msg_type_name_t msgt_name, + boolean_t overflow, + ipc_object_t object); /* Reanme a capability */ extern kern_return_t ipc_right_rename( - ipc_space_t space, - mach_port_name_t oname, - ipc_entry_t oentry, - mach_port_name_t nname, - ipc_entry_t nentry); + ipc_space_t space, + mach_port_name_t oname, + ipc_entry_t oentry, + mach_port_name_t nname, + ipc_entry_t nentry); -#endif /* _IPC_IPC_RIGHT_H_ */ +#endif /* _IPC_IPC_RIGHT_H_ */ diff --git a/osfmk/ipc/ipc_space.c b/osfmk/ipc/ipc_space.c index 9760d042e..290c71673 100644 --- a/osfmk/ipc/ipc_space.c +++ b/osfmk/ipc/ipc_space.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -105,31 +105,31 @@ ipc_space_t ipc_space_reply; void ipc_space_reference( - ipc_space_t space) + ipc_space_t space) { is_reference(space); } void ipc_space_release( - ipc_space_t space) + ipc_space_t space) { is_release(space); } -/* Routine: ipc_space_get_rollpoint - * Purpose: - * Generate a new gencount rollover point from a space's entropy pool +/* Routine: ipc_space_get_rollpoint + * Purpose: + * Generate a new gencount rollover point from a space's entropy pool */ ipc_entry_bits_t ipc_space_get_rollpoint( - ipc_space_t space) + ipc_space_t space) { return random_bool_gen_bits( - &space->bool_gen, - &space->is_entropy[0], - IS_ENTROPY_CNT, - IE_BITS_ROLL_BITS); + &space->bool_gen, + &space->is_entropy[0], + IS_ENTROPY_CNT, + IE_BITS_ROLL_BITS); } /* @@ -144,10 +144,10 @@ ipc_space_get_rollpoint( */ void ipc_space_rand_freelist( - ipc_space_t space, - ipc_entry_t table, - mach_port_index_t bottom, - mach_port_index_t top) + ipc_space_t space, + ipc_entry_t table, + mach_port_index_t bottom, + mach_port_index_t top) { int at_start = (bottom == 0); #ifdef CONFIG_SEMI_RANDOM_ENTRIES @@ -177,15 +177,15 @@ ipc_space_rand_freelist( * doesn't break programs that might have (sad) hard-coded values for * certain port names. */ - if (at_start && total++ < NUM_SEQ_ENTRIES) + if (at_start && total++ < NUM_SEQ_ENTRIES) { which = 0; - else + } else #endif - which = random_bool_gen_bits( - &space->bool_gen, - &space->is_entropy[0], - IS_ENTROPY_CNT, - 1); + which = random_bool_gen_bits( + &space->bool_gen, + &space->is_entropy[0], + IS_ENTROPY_CNT, + 1); mach_port_index_t next; if (which) { @@ -200,15 +200,17 @@ ipc_space_rand_freelist( * The entry's gencount will roll over on its first allocation, at which * point a random rollover will be set for the entry. */ - entry->ie_bits = IE_BITS_GEN_MASK; + entry->ie_bits = IE_BITS_GEN_MASK; entry->ie_next = next; entry->ie_object = IO_NULL; + entry->ie_dist = 0; entry->ie_index = 0; curr = next; } table[curr].ie_next = 0; table[curr].ie_object = IO_NULL; table[curr].ie_index = 0; + table[curr].ie_dist = 0; table[curr].ie_bits = IE_BITS_GEN_MASK; /* The freelist head should always have generation number set to 0 */ @@ -234,16 +236,17 @@ ipc_space_rand_freelist( kern_return_t ipc_space_create( - ipc_table_size_t initial, - ipc_space_t *spacep) + ipc_table_size_t initial, + ipc_space_t *spacep) { ipc_space_t space; ipc_entry_t table; ipc_entry_num_t new_size; space = is_alloc(); - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_RESOURCE_SHORTAGE; + } table = it_entries_alloc(initial); if (table == IE_NULL) { @@ -262,10 +265,11 @@ ipc_space_create( is_lock_init(space); space->is_bits = 2; /* 2 refs, active, not growing */ + space->is_table_hashed = 0; space->is_table_size = new_size; space->is_table_free = new_size - 1; space->is_table = table; - space->is_table_next = initial+1; + space->is_table_next = initial + 1; space->is_task = NULL; space->is_low_mod = new_size; space->is_high_mod = 0; @@ -292,13 +296,14 @@ ipc_space_create( kern_return_t ipc_space_create_special( - ipc_space_t *spacep) + ipc_space_t *spacep) { ipc_space_t space; space = is_alloc(); - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_RESOURCE_SHORTAGE; + } is_lock_init(space); @@ -334,10 +339,11 @@ ipc_space_clean( * we must wait until they finish and figure * out the space died. */ - retry: +retry: is_write_lock(space); - while (is_growing(space)) + while (is_growing(space)) { is_write_sleep(space); + } if (!is_active(space)) { is_write_unlock(space); @@ -357,19 +363,19 @@ ipc_space_clean( type = IE_BITS_TYPE(entry->ie_bits); if (type != MACH_PORT_TYPE_NONE) { - mach_port_name_t name = MACH_PORT_MAKE(index, - IE_BITS_GEN(entry->ie_bits)); + mach_port_name_t name = MACH_PORT_MAKE(index, + IE_BITS_GEN(entry->ie_bits)); ipc_right_destroy(space, name, entry, FALSE, 0); /* unlocks space */ goto retry; } } - /* + /* * JMM - Now the table is cleaned out. We don't bother shrinking the * size of the table at this point, but we probably should if it is * really large. */ - + is_write_unlock(space); } @@ -385,7 +391,7 @@ ipc_space_clean( void ipc_space_terminate( - ipc_space_t space) + ipc_space_t space) { ipc_entry_t table; ipc_entry_num_t size; @@ -405,8 +411,9 @@ ipc_space_terminate( * we must wait until they finish and figure * out the space died. */ - while (is_growing(space)) + while (is_growing(space)) { is_write_sleep(space); + } is_write_unlock(space); @@ -427,12 +434,12 @@ ipc_space_terminate( mach_port_name_t name; name = MACH_PORT_MAKE(index, - IE_BITS_GEN(entry->ie_bits)); + IE_BITS_GEN(entry->ie_bits)); ipc_right_terminate(space, name, entry); } } - it_entries_free(space->is_table_next-1, table); + it_entries_free(space->is_table_next - 1, table); space->is_table_size = 0; space->is_table_free = 0; @@ -443,5 +450,3 @@ ipc_space_terminate( */ is_release(space); } - - diff --git a/osfmk/ipc/ipc_space.h b/osfmk/ipc/ipc_space.h index 06fdf3e79..c0a2d1d15 100644 --- a/osfmk/ipc/ipc_space.h +++ b/osfmk/ipc/ipc_space.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -69,7 +69,7 @@ * Definitions for IPC spaces of capabilities. */ -#ifndef _IPC_IPC_SPACE_H_ +#ifndef _IPC_IPC_SPACE_H_ #define _IPC_IPC_SPACE_H_ @@ -107,39 +107,40 @@ */ typedef natural_t ipc_space_refs_t; -#define IS_REFS_MAX 0x0fffffff -#define IS_INACTIVE 0x40000000 /* space is inactive */ -#define IS_GROWING 0x20000000 /* space is growing */ -#define IS_ENTROPY_CNT 1 /* per-space entropy pool size */ +#define IS_REFS_MAX 0x0fffffff +#define IS_INACTIVE 0x40000000 /* space is inactive */ +#define IS_GROWING 0x20000000 /* space is growing */ +#define IS_ENTROPY_CNT 1 /* per-space entropy pool size */ struct ipc_space { - lck_spin_t is_lock_data; - ipc_space_refs_t is_bits; /* holds refs, active, growing */ - ipc_entry_num_t is_table_size; /* current size of table */ - ipc_entry_num_t is_table_free; /* count of free elements */ - ipc_entry_t is_table; /* an array of entries */ + lck_spin_t is_lock_data; + ipc_space_refs_t is_bits; /* holds refs, active, growing */ + ipc_entry_num_t is_table_size; /* current size of table */ + ipc_entry_num_t is_table_hashed;/* count of hashed elements */ + ipc_entry_num_t is_table_free; /* count of free elements */ + ipc_entry_t is_table; /* an array of entries */ task_t is_task; /* associated task */ struct ipc_table_size *is_table_next; /* info for larger table */ - ipc_entry_num_t is_low_mod; /* lowest modified entry during growth */ - ipc_entry_num_t is_high_mod; /* highest modified entry during growth */ + ipc_entry_num_t is_low_mod; /* lowest modified entry during growth */ + ipc_entry_num_t is_high_mod; /* highest modified entry during growth */ struct bool_gen bool_gen; /* state for boolean RNG */ unsigned int is_entropy[IS_ENTROPY_CNT]; /* pool of entropy taken from RNG */ - int is_node_id; /* HOST_LOCAL_NODE, or remote node if proxy space */ + int is_node_id; /* HOST_LOCAL_NODE, or remote node if proxy space */ }; -#define IS_NULL ((ipc_space_t) 0) -#define IS_INSPECT_NULL ((ipc_space_inspect_t) 0) +#define IS_NULL ((ipc_space_t) 0) +#define IS_INSPECT_NULL ((ipc_space_inspect_t) 0) -#define is_active(is) (((is)->is_bits & IS_INACTIVE) != IS_INACTIVE) +#define is_active(is) (((is)->is_bits & IS_INACTIVE) != IS_INACTIVE) -static inline void +static inline void is_mark_inactive(ipc_space_t is) { assert(is_active(is)); OSBitOrAtomic(IS_INACTIVE, &is->is_bits); } -#define is_growing(is) (((is)->is_bits & IS_GROWING) == IS_GROWING) +#define is_growing(is) (((is)->is_bits & IS_GROWING) == IS_GROWING) static inline void is_start_growing(ipc_space_t is) @@ -149,7 +150,7 @@ is_start_growing(ipc_space_t is) } static inline void -is_done_growing(ipc_space_t is) +is_done_growing(ipc_space_t is) { assert(is_growing(is)); OSBitAndAtomic(~IS_GROWING, &is->is_bits); @@ -157,40 +158,42 @@ is_done_growing(ipc_space_t is) extern zone_t ipc_space_zone; -#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone)) -#define is_free(is) zfree(ipc_space_zone, (is)) +#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone)) +#define is_free(is) zfree(ipc_space_zone, (is)) extern ipc_space_t ipc_space_kernel; extern ipc_space_t ipc_space_reply; -#if DIPC +#if DIPC extern ipc_space_t ipc_space_remote; -#endif /* DIPC */ -#if DIPC +#endif /* DIPC */ +#if DIPC extern ipc_space_t default_pager_space; -#endif /* DIPC */ +#endif /* DIPC */ -extern lck_grp_t ipc_lck_grp; -extern lck_attr_t ipc_lck_attr; +extern lck_grp_t ipc_lck_grp; +extern lck_attr_t ipc_lck_attr; -#define is_lock_init(is) lck_spin_init(&(is)->is_lock_data, &ipc_lck_grp, &ipc_lck_attr) -#define is_lock_destroy(is) lck_spin_destroy(&(is)->is_lock_data, &ipc_lck_grp) +#define is_lock_init(is) lck_spin_init(&(is)->is_lock_data, &ipc_lck_grp, &ipc_lck_attr) +#define is_lock_destroy(is) lck_spin_destroy(&(is)->is_lock_data, &ipc_lck_grp) -#define is_read_lock(is) lck_spin_lock(&(is)->is_lock_data) -#define is_read_unlock(is) lck_spin_unlock(&(is)->is_lock_data) -#define is_read_sleep(is) lck_spin_sleep(&(is)->is_lock_data, \ - LCK_SLEEP_DEFAULT, \ - (event_t)(is), \ - THREAD_UNINT) +#define is_read_lock(is) lck_spin_lock_grp(&(is)->is_lock_data, &ipc_lck_grp) +#define is_read_unlock(is) lck_spin_unlock(&(is)->is_lock_data) +#define is_read_sleep(is) lck_spin_sleep_grp(&(is)->is_lock_data, \ + LCK_SLEEP_DEFAULT, \ + (event_t)(is), \ + THREAD_UNINT, \ + &ipc_lck_grp) -#define is_write_lock(is) lck_spin_lock(&(is)->is_lock_data) -#define is_write_lock_try(is) lck_spin_try_lock(&(is)->is_lock_data) -#define is_write_unlock(is) lck_spin_unlock(&(is)->is_lock_data) -#define is_write_sleep(is) lck_spin_sleep(&(is)->is_lock_data, \ - LCK_SLEEP_DEFAULT, \ - (event_t)(is), \ - THREAD_UNINT) +#define is_write_lock(is) lck_spin_lock_grp(&(is)->is_lock_data, &ipc_lck_grp) +#define is_write_lock_try(is) lck_spin_try_lock_grp(&(is)->is_lock_data, &ipc_lck_grp) +#define is_write_unlock(is) lck_spin_unlock(&(is)->is_lock_data) +#define is_write_sleep(is) lck_spin_sleep_grp(&(is)->is_lock_data, \ + LCK_SLEEP_DEFAULT, \ + (event_t)(is), \ + THREAD_UNINT, \ + &ipc_lck_grp) -#define is_refs(is) ((is)->is_bits & IS_REFS_MAX) +#define is_refs(is) ((is)->is_bits & IS_REFS_MAX) static inline void is_reference(ipc_space_t is) @@ -201,43 +204,44 @@ is_reference(ipc_space_t is) static inline void -is_release(ipc_space_t is) { +is_release(ipc_space_t is) +{ assert(is_refs(is) > 0); - /* If we just removed the last reference count */ - if ( 1 == (OSDecrementAtomic(&(is->is_bits)) & IS_REFS_MAX)) { + /* If we just removed the last reference count */ + if (1 == (OSDecrementAtomic(&(is->is_bits)) & IS_REFS_MAX)) { assert(!is_active(is)); is_lock_destroy(is); is_free(is); } } - -#define current_space_fast() (current_task_fast()->itk_space) -#define current_space() (current_space_fast()) + +#define current_space_fast() (current_task_fast()->itk_space) +#define current_space() (current_space_fast()) /* Create a special IPC space */ extern kern_return_t ipc_space_create_special( - ipc_space_t *spacep); + ipc_space_t *spacep); /* Create a new IPC space */ extern kern_return_t ipc_space_create( - ipc_table_size_t initial, - ipc_space_t *spacep); + ipc_table_size_t initial, + ipc_space_t *spacep); /* Mark a space as dead and cleans up the entries*/ extern void ipc_space_terminate( - ipc_space_t space); + ipc_space_t space); /* Clean up the entries - but leave the space alive */ extern void ipc_space_clean( - ipc_space_t space); + ipc_space_t space); /* Permute the order of a range within an IPC space */ extern void ipc_space_rand_freelist( - ipc_space_t space, - ipc_entry_t table, - mach_port_index_t bottom, - mach_port_index_t top); + ipc_space_t space, + ipc_entry_t table, + mach_port_index_t bottom, + mach_port_index_t top); /* Generate a new gencount rollover point from a space's entropy pool */ extern ipc_entry_bits_t ipc_space_get_rollpoint(ipc_space_t space); @@ -247,17 +251,17 @@ extern ipc_entry_bits_t ipc_space_get_rollpoint(ipc_space_t space); #ifdef __APPLE_API_UNSTABLE #ifndef MACH_KERNEL_PRIVATE -extern ipc_space_t current_space(void); +extern ipc_space_t current_space(void); #endif /* !MACH_KERNEL_PRIVATE */ #endif /* __APPLE_API_UNSTABLE */ /* Take a reference on a space */ extern void ipc_space_reference( - ipc_space_t space); + ipc_space_t space); /* Realase a reference on a space */ extern void ipc_space_release( - ipc_space_t space); + ipc_space_t space); -#endif /* _IPC_IPC_SPACE_H_ */ +#endif /* _IPC_IPC_SPACE_H_ */ diff --git a/osfmk/ipc/ipc_table.c b/osfmk/ipc/ipc_table.c index 76bc0254a..f903101ff 100644 --- a/osfmk/ipc/ipc_table.c +++ b/osfmk/ipc/ipc_table.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -79,10 +79,10 @@ unsigned int ipc_table_requests_size = 64; static void ipc_table_fill( - ipc_table_size_t its, /* array to fill */ - unsigned int num, /* size of array */ - unsigned int min, /* at least this many elements */ - vm_size_t elemsize) /* size of elements */ + ipc_table_size_t its, /* array to fill */ + unsigned int num, /* size of array */ + unsigned int min, /* at least this many elements */ + vm_size_t elemsize) /* size of elements */ { unsigned int index; vm_size_t minsize = min * elemsize; @@ -92,8 +92,8 @@ ipc_table_fill( /* first use powers of two, up to the page size */ for (index = 0, size = 1; - (index < num) && (size < PAGE_MAX_SIZE); - size <<= 1) { + (index < num) && (size < PAGE_MAX_SIZE); + size <<= 1) { if (size >= minsize) { its[index].its_size = (ipc_table_elems_t)(size / elemsize); index++; @@ -106,15 +106,16 @@ ipc_table_fill( unsigned int period; for (period = 0; - (period < 15) && (index < num); - period++, size += incrsize) { + (period < 15) && (index < num); + period++, size += incrsize) { if (size >= minsize) { its[index].its_size = (ipc_table_elems_t)(size / elemsize); index++; } } - if (incrsize < (vm_size_t)(PAGE_MAX_SIZE << 3)) + if (incrsize < (vm_size_t)(PAGE_MAX_SIZE << 3)) { incrsize <<= 1; + } } } @@ -122,26 +123,29 @@ void ipc_table_init(void) { ipc_table_entries = (ipc_table_size_t) - kalloc(sizeof(struct ipc_table_size) * - ipc_table_entries_size); + kalloc(sizeof(struct ipc_table_size) * + ipc_table_entries_size); assert(ipc_table_entries != ITS_NULL); ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1, - 16, sizeof(struct ipc_entry)); + 16, sizeof(struct ipc_entry)); /* the last two elements should have the same size */ ipc_table_entries[ipc_table_entries_size - 1].its_size = - ipc_table_entries[ipc_table_entries_size - 2].its_size; + ipc_table_entries[ipc_table_entries_size - 2].its_size; + /* make sure the robin hood hashing in ipc hash will work */ + assert(ipc_table_entries[ipc_table_entries_size - 1].its_size <= + IPC_ENTRY_INDEX_MAX); ipc_table_requests = (ipc_table_size_t) - kalloc(sizeof(struct ipc_table_size) * - ipc_table_requests_size); + kalloc(sizeof(struct ipc_table_size) * + ipc_table_requests_size); assert(ipc_table_requests != ITS_NULL); ipc_table_fill(ipc_table_requests, ipc_table_requests_size - 1, - 2, sizeof(struct ipc_port_request)); + 2, sizeof(struct ipc_port_request)); /* the last element should have zero size */ @@ -160,8 +164,9 @@ ipc_table_init(void) unsigned int ipc_table_max_entries(void) { - if (!ipc_table_entries || ipc_table_entries_size < 2) + if (!ipc_table_entries || ipc_table_entries_size < 2) { return 0; + } return (unsigned int)ipc_table_entries[ipc_table_entries_size - 1].its_size; } @@ -177,8 +182,9 @@ ipc_table_max_entries(void) unsigned int ipc_table_max_requests(void) { - if (!ipc_table_requests || ipc_table_requests_size < 2) + if (!ipc_table_requests || ipc_table_requests_size < 2) { return 0; + } return (unsigned int)ipc_table_requests[ipc_table_requests_size - 2].its_size; } @@ -193,7 +199,7 @@ ipc_table_max_requests(void) void * ipc_table_alloc( - vm_size_t size) + vm_size_t size) { return kalloc(size); } @@ -209,8 +215,8 @@ ipc_table_alloc( void ipc_table_free( - vm_size_t size, - void * table) + vm_size_t size, + void * table) { kfree(table, size); } diff --git a/osfmk/ipc/ipc_table.h b/osfmk/ipc/ipc_table.h index 2b092bbca..28e79b356 100644 --- a/osfmk/ipc/ipc_table.h +++ b/osfmk/ipc/ipc_table.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,8 +64,8 @@ * and dead-name requests (ipc_port_request_t). */ -#ifndef _IPC_IPC_TABLE_H_ -#define _IPC_IPC_TABLE_H_ +#ifndef _IPC_IPC_TABLE_H_ +#define _IPC_IPC_TABLE_H_ #include #include @@ -102,7 +102,7 @@ */ struct ipc_table_size { - ipc_table_elems_t its_size; /* number of elements in table */ + ipc_table_elems_t its_size; /* number of elements in table */ }; extern ipc_table_size_t ipc_table_entries; @@ -119,33 +119,33 @@ extern void ipc_table_init(void); /* Allocate a table */ extern void * ipc_table_alloc( - vm_size_t size); + vm_size_t size); /* Free a table */ extern void ipc_table_free( - vm_size_t size, - void * table); + vm_size_t size, + void * table); -#define it_entries_alloc(its) \ - ((ipc_entry_t) \ +#define it_entries_alloc(its) \ + ((ipc_entry_t) \ ipc_table_alloc((its)->its_size * sizeof(struct ipc_entry))) -#define it_entries_free(its, table) \ - ipc_table_free((its)->its_size * sizeof(struct ipc_entry), \ - (void *)(table)) +#define it_entries_free(its, table) \ + ipc_table_free((its)->its_size * sizeof(struct ipc_entry), \ + (void *)(table)) -#define it_requests_alloc(its) \ - ((ipc_port_request_t) \ - ipc_table_alloc((its)->its_size * \ - sizeof(struct ipc_port_request))) +#define it_requests_alloc(its) \ + ((ipc_port_request_t) \ + ipc_table_alloc((its)->its_size * \ + sizeof(struct ipc_port_request))) -#define it_requests_free(its, table) \ - ipc_table_free((its)->its_size * \ - sizeof(struct ipc_port_request), \ - (void *)(table)) +#define it_requests_free(its, table) \ + ipc_table_free((its)->its_size * \ + sizeof(struct ipc_port_request), \ + (void *)(table)) extern unsigned int ipc_table_max_entries(void); extern unsigned int ipc_table_max_requests(void); -#endif /* _IPC_IPC_TABLE_H_ */ +#endif /* _IPC_IPC_TABLE_H_ */ diff --git a/osfmk/ipc/ipc_types.h b/osfmk/ipc/ipc_types.h index 5523bcede..eaf5a3798 100644 --- a/osfmk/ipc/ipc_types.h +++ b/osfmk/ipc/ipc_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,23 +36,23 @@ * port.h and mach_types.h for in-kernel entities. */ -#ifndef _IPC_IPC_TYPES_H_ -#define _IPC_IPC_TYPES_H_ +#ifndef _IPC_IPC_TYPES_H_ +#define _IPC_IPC_TYPES_H_ #include #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE -typedef natural_t ipc_table_index_t; /* index into tables */ -typedef natural_t ipc_table_elems_t; /* size of tables */ +typedef natural_t ipc_table_index_t; /* index into tables */ +typedef natural_t ipc_table_elems_t; /* size of tables */ typedef natural_t ipc_entry_bits_t; -typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */ +typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */ typedef ipc_table_index_t ipc_port_request_index_t; -typedef mach_port_name_t mach_port_index_t; /* index values */ -typedef mach_port_name_t mach_port_gen_t; /* generation numbers */ +typedef mach_port_name_t mach_port_index_t; /* index values */ +typedef mach_port_name_t mach_port_gen_t; /* generation numbers */ typedef struct ipc_entry *ipc_entry_t; @@ -62,38 +62,38 @@ typedef struct ipc_pset *ipc_pset_t; typedef struct ipc_kmsg *ipc_kmsg_t; typedef uint8_t sync_qos_count_t; -#define IE_NULL ((ipc_entry_t) 0) +#define IE_NULL ((ipc_entry_t) 0) -#define ITS_NULL ((ipc_table_size_t) 0) -#define ITS_SIZE_NONE ((ipc_table_elems_t) -1) -#define IPR_NULL ((ipc_port_request_t) 0) -#define IPS_NULL ((ipc_pset_t) 0) -#define IKM_NULL ((ipc_kmsg_t) 0) +#define ITS_NULL ((ipc_table_size_t) 0) +#define ITS_SIZE_NONE ((ipc_table_elems_t) -1) +#define IPR_NULL ((ipc_port_request_t) 0) +#define IPS_NULL ((ipc_pset_t) 0) +#define IKM_NULL ((ipc_kmsg_t) 0) -typedef void (*mach_msg_continue_t)(mach_msg_return_t); /* after wakeup */ -#define MACH_MSG_CONTINUE_NULL ((mach_msg_continue_t) 0) +typedef void (*mach_msg_continue_t)(mach_msg_return_t); /* after wakeup */ +#define MACH_MSG_CONTINUE_NULL ((mach_msg_continue_t) 0) typedef struct ipc_importance_elem *ipc_importance_elem_t; -#define IIE_NULL ((ipc_importance_elem_t)0) +#define IIE_NULL ((ipc_importance_elem_t)0) typedef struct ipc_importance_task *ipc_importance_task_t; -#define IIT_NULL ((ipc_importance_task_t)0) +#define IIT_NULL ((ipc_importance_task_t)0) typedef struct ipc_importance_inherit *ipc_importance_inherit_t; -#define III_NULL ((ipc_importance_inherit_t)0) +#define III_NULL ((ipc_importance_inherit_t)0) -#else /* MACH_KERNEL_PRIVATE */ +#else /* MACH_KERNEL_PRIVATE */ -struct ipc_object ; +struct ipc_object; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -typedef struct ipc_object *ipc_object_t; +typedef struct ipc_object *ipc_object_t; -#define IPC_OBJECT_NULL ((ipc_object_t) 0) -#define IPC_OBJECT_DEAD ((ipc_object_t)~0) -#define IPC_OBJECT_VALID(io) (((io) != IPC_OBJECT_NULL) && \ - ((io) != IPC_OBJECT_DEAD)) +#define IPC_OBJECT_NULL ((ipc_object_t) 0) +#define IPC_OBJECT_DEAD ((ipc_object_t)~0) +#define IPC_OBJECT_VALID(io) (((io) != IPC_OBJECT_NULL) && \ + ((io) != IPC_OBJECT_DEAD)) -#endif /* _IPC_IPC_TYPES_H_ */ +#endif /* _IPC_IPC_TYPES_H_ */ diff --git a/osfmk/ipc/ipc_voucher.c b/osfmk/ipc/ipc_voucher.c index 4e7b4b950..ff6da5605 100644 --- a/osfmk/ipc/ipc_voucher.c +++ b/osfmk/ipc/ipc_voucher.c @@ -67,11 +67,11 @@ static uint32_t ivht_count = 0; lck_spin_init(&ivht_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define ivht_lock_destroy() \ lck_spin_destroy(&ivht_lock_data, &ipc_lck_grp) -#define ivht_lock() \ - lck_spin_lock(&ivht_lock_data) -#define ivht_lock_try() \ - lck_spin_try_lock(&ivht_lock_data) -#define ivht_unlock() \ +#define ivht_lock() \ + lck_spin_lock_grp(&ivht_lock_data, &ipc_lck_grp) +#define ivht_lock_try() \ + lck_spin_try_lock_grp(&ivht_lock_data, &ipc_lck_grp) +#define ivht_unlock() \ lck_spin_unlock(&ivht_lock_data) /* @@ -83,17 +83,17 @@ static uint32_t ivht_count = 0; */ static iv_index_t ivgt_keys_in_use = MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN; static ipc_voucher_global_table_element iv_global_table[MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN]; -static lck_spin_t ivgt_lock_data; +static lck_spin_t ivgt_lock_data; #define ivgt_lock_init() \ lck_spin_init(&ivgt_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define ivgt_lock_destroy() \ lck_spin_destroy(&ivgt_lock_data, &ipc_lck_grp) -#define ivgt_lock() \ - lck_spin_lock(&ivgt_lock_data) -#define ivgt_lock_try() \ - lck_spin_try_lock(&ivgt_lock_data) -#define ivgt_unlock() \ +#define ivgt_lock() \ + lck_spin_lock_grp(&ivgt_lock_data, &ipc_lck_grp) +#define ivgt_lock_try() \ + lck_spin_try_lock_grp(&ivgt_lock_data, &ipc_lck_grp) +#define ivgt_unlock() \ lck_spin_unlock(&ivgt_lock_data) ipc_voucher_t iv_alloc(iv_index_t entries); @@ -155,32 +155,33 @@ static inline iv_index_t iv_key_to_index(mach_voucher_attr_key_t key) { if (MACH_VOUCHER_ATTR_KEY_ALL == key || - MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN < key) + MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN < key) { return IV_UNUSED_KEYINDEX; + } return (iv_index_t)key - 1; } static inline mach_voucher_attr_key_t iv_index_to_key(iv_index_t key_index) { - if (MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN > key_index) + if (MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN > key_index) { return iv_global_table[key_index].ivgte_key; + } return MACH_VOUCHER_ATTR_KEY_NONE; - } static void ivace_release(iv_index_t key_index, iv_index_t value_index); -static void ivace_lookup_values(iv_index_t key_index, iv_index_t value_index, - mach_voucher_attr_value_handle_array_t values, - mach_voucher_attr_value_handle_array_size_t *count); +static void ivace_lookup_values(iv_index_t key_index, iv_index_t value_index, + mach_voucher_attr_value_handle_array_t values, + mach_voucher_attr_value_handle_array_size_t *count); static iv_index_t iv_lookup(ipc_voucher_t, iv_index_t); - + static void ivgt_lookup(iv_index_t, - boolean_t, - ipc_voucher_attr_manager_t *, - ipc_voucher_attr_control_t *); + boolean_t, + ipc_voucher_attr_manager_t *, + ipc_voucher_attr_control_t *); static kern_return_t ipc_voucher_prepare_processing_recipe( @@ -193,7 +194,7 @@ ipc_voucher_prepare_processing_recipe( #if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST) void user_data_attr_manager_init(void); -#endif +#endif void ipc_voucher_init(void) @@ -203,21 +204,22 @@ ipc_voucher_init(void) iv_index_t i; ipc_voucher_zone = zinit(sizeof(struct ipc_voucher), - ipc_voucher_max * sizeof(struct ipc_voucher), - sizeof(struct ipc_voucher), - "ipc vouchers"); + ipc_voucher_max * sizeof(struct ipc_voucher), + sizeof(struct ipc_voucher), + "ipc vouchers"); zone_change(ipc_voucher_zone, Z_NOENCRYPT, TRUE); ipc_voucher_attr_control_zone = zinit(sizeof(struct ipc_voucher_attr_control), - attr_manager_max * sizeof(struct ipc_voucher_attr_control), - sizeof(struct ipc_voucher_attr_control), - "ipc voucher attr controls"); + attr_manager_max * sizeof(struct ipc_voucher_attr_control), + sizeof(struct ipc_voucher_attr_control), + "ipc voucher attr controls"); zone_change(ipc_voucher_attr_control_zone, Z_NOENCRYPT, TRUE); /* initialize voucher hash */ ivht_lock_init(); - for (i = 0; i < IV_HASH_BUCKETS; i++) + for (i = 0; i < IV_HASH_BUCKETS; i++) { queue_init(&ivht_bucket[i]); + } /* initialize global table locking */ ivgt_lock_init(); @@ -235,9 +237,10 @@ iv_alloc(iv_index_t entries) iv = (ipc_voucher_t)zalloc(ipc_voucher_zone); - if (IV_NULL == iv) + if (IV_NULL == iv) { return IV_NULL; - + } + os_ref_init(&iv->iv_refs, &iv_refgrp); iv->iv_sum = 0; iv->iv_hash = 0; @@ -260,10 +263,11 @@ iv_alloc(iv_index_t entries) } /* initialize the table entries */ - for (i=0; i < iv->iv_table_size; i++) + for (i = 0; i < iv->iv_table_size; i++) { iv->iv_table[i] = IV_UNUSED_VALINDEX; - - return (iv); + } + + return iv; } /* @@ -275,9 +279,9 @@ iv_alloc(iv_index_t entries) * they are immutable once references are distributed. */ static void -iv_set(ipc_voucher_t iv, - iv_index_t key_index, - iv_index_t value_index) +iv_set(ipc_voucher_t iv, + iv_index_t key_index, + iv_index_t value_index) { assert(key_index < iv->iv_table_size); iv->iv_table[key_index] = value_index; @@ -300,9 +304,8 @@ iv_dealloc(ipc_voucher_t iv, boolean_t unhash) ivht_count--; ivht_unlock(); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_VOUCHER_DESTROY) | DBG_FUNC_NONE, - VM_KERNEL_ADDRPERM((uintptr_t)iv), 0, ivht_count, 0, 0); - + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_DESTROY) | DBG_FUNC_NONE, + VM_KERNEL_ADDRPERM((uintptr_t)iv), 0, ivht_count, 0, 0); } else { os_ref_count_t cnt __assert_only = os_ref_release(&iv->iv_refs); assert(cnt == 0); @@ -328,10 +331,11 @@ iv_dealloc(ipc_voucher_t iv, boolean_t unhash) iv_set(iv, i, ~0); #endif } - - if (iv->iv_table != iv->iv_inline_table) - kfree(iv->iv_table, - iv->iv_table_size * sizeof(*iv->iv_table)); + + if (iv->iv_table != iv->iv_inline_table) { + kfree(iv->iv_table, + iv->iv_table_size * sizeof(*iv->iv_table)); + } zfree(ipc_voucher_zone, iv); } @@ -347,8 +351,9 @@ iv_dealloc(ipc_voucher_t iv, boolean_t unhash) static inline iv_index_t iv_lookup(ipc_voucher_t iv, iv_index_t key_index) { - if (key_index < iv->iv_table_size) + if (key_index < iv->iv_table_size) { return iv->iv_table[key_index]; + } return IV_UNUSED_VALINDEX; } @@ -367,7 +372,7 @@ iv_lookup(ipc_voucher_t iv, iv_index_t key_index) */ uintptr_t unsafe_convert_port_to_voucher( - ipc_port_t port) + ipc_port_t port) { if (IP_VALID(port)) { uintptr_t voucher = (uintptr_t) port->ip_kobject; @@ -377,8 +382,9 @@ unsafe_convert_port_to_voucher( * port, and if it is a true voucher port, that reference * keeps the voucher bound to the port (and active). */ - if (ip_kotype(port) == IKOT_VOUCHER) - return (voucher); + if (ip_kotype(port) == IKOT_VOUCHER) { + return voucher; + } } return (uintptr_t)IV_NULL; } @@ -395,7 +401,7 @@ unsafe_convert_port_to_voucher( */ ipc_voucher_t convert_port_to_voucher( - ipc_port_t port) + ipc_port_t port) { if (IP_VALID(port)) { ipc_voucher_t voucher = (ipc_voucher_t) port->ip_kobject; @@ -405,13 +411,14 @@ convert_port_to_voucher( * port, and if it is a true voucher port, that reference * keeps the voucher bound to the port (and active). */ - if (ip_kotype(port) != IKOT_VOUCHER) + if (ip_kotype(port) != IKOT_VOUCHER) { return IV_NULL; + } assert(ip_active(port)); ipc_voucher_reference(voucher); - return (voucher); + return voucher; } return IV_NULL; } @@ -427,7 +434,7 @@ convert_port_to_voucher( ipc_voucher_t convert_port_name_to_voucher( - mach_port_name_t voucher_name) + mach_port_name_t voucher_name) { ipc_voucher_t iv; kern_return_t kr; @@ -435,8 +442,9 @@ convert_port_name_to_voucher( if (MACH_PORT_VALID(voucher_name)) { kr = ipc_port_translate_send(current_space(), voucher_name, &port); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { return IV_NULL; + } iv = convert_port_to_voucher(port); ip_unlock(port); @@ -449,8 +457,9 @@ convert_port_name_to_voucher( void ipc_voucher_reference(ipc_voucher_t voucher) { - if (IPC_VOUCHER_NULL == voucher) + if (IPC_VOUCHER_NULL == voucher) { return; + } iv_reference(voucher); } @@ -458,8 +467,9 @@ ipc_voucher_reference(ipc_voucher_t voucher) void ipc_voucher_release(ipc_voucher_t voucher) { - if (IPC_VOUCHER_NULL != voucher) + if (IPC_VOUCHER_NULL != voucher) { iv_release(voucher); + } } /* @@ -468,7 +478,7 @@ ipc_voucher_release(ipc_voucher_t voucher) * Called whenever the Mach port system detects no-senders * on the voucher port. * - * Each time the send-right count goes positive, a no-senders + * Each time the send-right count goes positive, a no-senders * notification is armed (and a voucher reference is donated). * So, each notification that comes in must release a voucher * reference. If more send rights have been added since it @@ -495,10 +505,11 @@ ipc_voucher_notify(mach_msg_header_t *msg) ipc_port_t convert_voucher_to_port(ipc_voucher_t voucher) { - ipc_port_t port, send; + ipc_port_t port, send; - if (IV_NULL == voucher) - return (IP_NULL); + if (IV_NULL == voucher) { + return IP_NULL; + } assert(os_ref_get_count(&voucher->iv_refs) > 0); @@ -517,7 +528,7 @@ convert_voucher_to_port(ipc_voucher_t voucher) assert(port->ip_kobject == (ipc_kobject_t)voucher); } } - + ip_lock(port); assert(ip_active(port)); send = ipc_port_make_send_locked(port); @@ -535,7 +546,7 @@ convert_voucher_to_port(ipc_voucher_t voucher) ip_unlock(port); ipc_voucher_release(voucher); } - return (send); + return send; } #define ivace_reset_data(ivace_elem, next_index) { \ @@ -571,32 +582,33 @@ ivac_alloc(iv_index_t key_index) ivac = (ipc_voucher_attr_control_t)zalloc(ipc_voucher_attr_control_zone); - if (IVAC_NULL == ivac) + if (IVAC_NULL == ivac) { return IVAC_NULL; - + } + os_ref_init(&ivac->ivac_refs, &ivac_refgrp); ivac->ivac_is_growing = FALSE; ivac->ivac_port = IP_NULL; /* start with just the inline table */ - table = (ivac_entry_t) kalloc(IVAC_ENTRIES_MIN * sizeof(ivac_entry)); + table = (ivac_entry_t) kalloc(IVAC_ENTRIES_MIN * sizeof(ivac_entry)); ivac->ivac_table = table; ivac->ivac_table_size = IVAC_ENTRIES_MIN; ivac->ivac_init_table_size = IVAC_ENTRIES_MIN; for (i = 0; i < ivac->ivac_table_size; i++) { - ivace_reset_data(&table[i], i+1); + ivace_reset_data(&table[i], i + 1); } /* the default table entry is never on freelist */ table[0].ivace_next = IV_HASH_END; table[0].ivace_free = FALSE; - table[i-1].ivace_next = IV_FREELIST_END; + table[i - 1].ivace_next = IV_FREELIST_END; ivac->ivac_freelist = 1; ivac_lock_init(ivac); ivac->ivac_key_index = key_index; - return (ivac); + return ivac; } - + void ivac_dealloc(ipc_voucher_attr_control_t ivac) @@ -627,8 +639,9 @@ ivac_dealloc(ipc_voucher_attr_control_t ivac) ivgt_unlock(); /* release the reference held on the resource manager */ - if (IVAM_NULL != ivam) + if (IVAM_NULL != ivam) { (ivam->ivam_release)(ivam); + } /* * if a port was allocated for this voucher, @@ -649,9 +662,11 @@ ivac_dealloc(ipc_voucher_attr_control_t ivac) * table. */ #ifdef MACH_DEBUG - for (i = 0; i < ivac->ivac_table_size; i++) - if (ivac->ivac_table[i].ivace_refs != 0) + for (i = 0; i < ivac->ivac_table_size; i++) { + if (ivac->ivac_table[i].ivace_refs != 0) { panic("deallocing a resource manager with live refs to its attr values\n"); + } + } #endif kfree(ivac->ivac_table, ivac->ivac_table_size * sizeof(*ivac->ivac_table)); ivac_lock_destroy(ivac); @@ -681,7 +696,7 @@ ipc_voucher_attr_control_release(ipc_voucher_attr_control_t control) */ ipc_voucher_attr_control_t convert_port_to_voucher_attr_control( - ipc_port_t port) + ipc_port_t port) { if (IP_VALID(port)) { ipc_voucher_attr_control_t ivac = (ipc_voucher_attr_control_t) port->ip_kobject; @@ -692,13 +707,14 @@ convert_port_to_voucher_attr_control( * that reference keeps the voucher bound to the port * (and active). */ - if (ip_kotype(port) != IKOT_VOUCHER_ATTR_CONTROL) + if (ip_kotype(port) != IKOT_VOUCHER_ATTR_CONTROL) { return IVAC_NULL; + } assert(ip_active(port)); ivac_reference(ivac); - return (ivac); + return ivac; } return IVAC_NULL; } @@ -731,10 +747,11 @@ ipc_voucher_attr_control_notify(mach_msg_header_t *msg) ipc_port_t convert_voucher_attr_control_to_port(ipc_voucher_attr_control_t control) { - ipc_port_t port, send; + ipc_port_t port, send; - if (IVAC_NULL == control) - return (IP_NULL); + if (IVAC_NULL == control) { + return IP_NULL; + } /* create a port if needed */ port = control->ivac_port; @@ -751,8 +768,9 @@ convert_voucher_attr_control_to_port(ipc_voucher_attr_control_t control) assert(ip_kotype(port) == IKOT_VOUCHER_ATTR_CONTROL); assert(port->ip_kobject == (ipc_kobject_t)control); } - } else + } else { ip_lock(port); + } assert(ip_active(port)); send = ipc_port_make_send_locked(port); @@ -770,7 +788,7 @@ convert_voucher_attr_control_to_port(ipc_voucher_attr_control_t control) ip_unlock(port); ivac_release(control); } - return (send); + return send; } /* @@ -778,10 +796,10 @@ convert_voucher_attr_control_to_port(ipc_voucher_attr_control_t control) */ static void ivace_lookup_values( - iv_index_t key_index, - iv_index_t value_index, - mach_voucher_attr_value_handle_array_t values, - mach_voucher_attr_value_handle_array_size_t *count) + iv_index_t key_index, + iv_index_t value_index, + mach_voucher_attr_value_handle_array_t values, + mach_voucher_attr_value_handle_array_size_t *count) { ipc_voucher_attr_control_t ivac; ivac_entry_t ivace; @@ -845,19 +863,19 @@ ivac_grow_table(ipc_voucher_attr_control_t ivac) assert(new_size < IVAC_ENTRIES_MAX); new_table = kalloc(sizeof(ivac_entry) * new_size); - if (!new_table){ + if (!new_table) { panic("Failed to grow ivac table to size %d\n", new_size); return; } /* setup the free list for new entries */ for (i = old_size; i < new_size; i++) { - ivace_reset_data(&new_table[i], i+1); + ivace_reset_data(&new_table[i], i + 1); } ivac_lock(ivac); - - for (i = 0; i < ivac->ivac_table_size; i++){ + + for (i = 0; i < ivac->ivac_table_size; i++) { ivace_copy_data(&ivac->ivac_table[i], &new_table[i]); } @@ -865,14 +883,14 @@ ivac_grow_table(ipc_voucher_attr_control_t ivac) ivac->ivac_table = new_table; ivac->ivac_table_size = new_size; - + /* adding new free entries at head of freelist */ ivac->ivac_table[new_size - 1].ivace_next = ivac->ivac_freelist; ivac->ivac_freelist = old_size; ivac->ivac_is_growing = 0; ivac_wakeup(ivac); - if (old_table){ + if (old_table) { ivac_unlock(ivac); kfree(old_table, old_size * sizeof(ivac_entry)); ivac_lock(ivac); @@ -888,14 +906,15 @@ ivac_grow_table(ipc_voucher_attr_control_t ivac) */ static void ivace_reference_by_index( - iv_index_t key_index, - iv_index_t val_index) + iv_index_t key_index, + iv_index_t val_index) { ipc_voucher_attr_control_t ivac; ivac_entry_t ivace; - if (IV_UNUSED_VALINDEX == val_index) + if (IV_UNUSED_VALINDEX == val_index) { return; + } ivgt_lookup(key_index, FALSE, NULL, &ivac); assert(IVAC_NULL != ivac); @@ -926,8 +945,8 @@ ivace_reference_by_index( */ static iv_index_t ivace_reference_by_value( - ipc_voucher_attr_control_t ivac, - mach_voucher_attr_value_handle_t value, + ipc_voucher_attr_control_t ivac, + mach_voucher_attr_value_handle_t value, mach_voucher_attr_value_flags_t flag) { ivac_entry_t ivace = IVACE_NULL; @@ -938,7 +957,7 @@ ivace_reference_by_value( return IV_UNUSED_VALINDEX; } - ivac_lock(ivac); + ivac_lock(ivac); restart: hash_index = IV_HASH_VAL(ivac->ivac_init_table_size, value); index = ivac->ivac_table[hash_index].ivace_index; @@ -947,15 +966,16 @@ restart: ivace = &ivac->ivac_table[index]; assert(!ivace->ivace_free); - if (ivace->ivace_value == value) + if (ivace->ivace_value == value) { break; + } assert(ivace->ivace_next != index); index = ivace->ivace_next; } /* found it? */ - if (index != IV_HASH_END) { + if (index != IV_HASH_END) { /* only add reference on non-persistent value */ if (!ivace->ivace_persist) { ivace->ivace_refs++; @@ -1000,11 +1020,12 @@ restart: * Release a reference on the given pair. * * Conditions: called with nothing locked, as it may cause - * callouts and/or messaging to the resource + * callouts and/or messaging to the resource * manager. */ -static void ivace_release( - iv_index_t key_index, +static void +ivace_release( + iv_index_t key_index, iv_index_t value_index) { ipc_voucher_attr_control_t ivac; @@ -1017,8 +1038,9 @@ static void ivace_release( kern_return_t kr; /* cant release the default value */ - if (IV_UNUSED_VALINDEX == value_index) + if (IV_UNUSED_VALINDEX == value_index) { return; + } ivgt_lookup(key_index, FALSE, &ivam, &ivac); assert(IVAC_NULL != ivac); @@ -1058,7 +1080,7 @@ static void ivace_release( ivace->ivace_releasing = TRUE; value = ivace->ivace_value; - redrive: +redrive: assert(value == ivace->ivace_value); assert(!ivace->ivace_free); made = ivace->ivace_made; @@ -1080,18 +1102,20 @@ static void ivace_release( * re-drive the release. */ if (ivace->ivace_made != made) { - if (KERN_SUCCESS == kr) + if (KERN_SUCCESS == kr) { ivace->ivace_made -= made; + } - if (0 == ivace->ivace_refs) + if (0 == ivace->ivace_refs) { goto redrive; + } ivace->ivace_releasing = FALSE; ivac_unlock(ivac); return; } else { /* - * If the manager returned FAILURE, someone took a + * If the manager returned FAILURE, someone took a * reference on the value but have not updated the ivace, * release the lock and return since thread who got * the new reference will update the ivace and will have @@ -1155,16 +1179,17 @@ static void ivace_release( */ static void ivgt_lookup(iv_index_t key_index, - boolean_t take_reference, - ipc_voucher_attr_manager_t *manager, - ipc_voucher_attr_control_t *control) + boolean_t take_reference, + ipc_voucher_attr_manager_t *manager, + ipc_voucher_attr_control_t *control) { ipc_voucher_attr_control_t ivac; if (key_index < MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN) { ivgt_lock(); - if (NULL != manager) + if (NULL != manager) { *manager = iv_global_table[key_index].ivgte_manager; + } ivac = iv_global_table[key_index].ivgte_control; if (IVAC_NULL != ivac) { assert(key_index == ivac->ivac_key_index); @@ -1174,18 +1199,21 @@ ivgt_lookup(iv_index_t key_index, } } ivgt_unlock(); - if (NULL != control) + if (NULL != control) { *control = ivac; + } } else { - if (NULL != manager) + if (NULL != manager) { *manager = IVAM_NULL; - if (NULL != control) + } + if (NULL != control) { *control = IVAC_NULL; + } } } /* - * Routine: ipc_replace_voucher_value + * Routine: ipc_replace_voucher_value * Purpose: * Replace the value with the results of * running the supplied command through the resource @@ -1196,11 +1224,11 @@ ivgt_lookup(iv_index_t key_index, */ static kern_return_t ipc_replace_voucher_value( - ipc_voucher_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_recipe_command_t command, - ipc_voucher_t prev_voucher, - mach_voucher_attr_content_t content, + ipc_voucher_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_recipe_command_t command, + ipc_voucher_t prev_voucher, + mach_voucher_attr_content_t content, mach_voucher_attr_content_size_t content_size) { mach_voucher_attr_value_handle_t previous_vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; @@ -1215,15 +1243,16 @@ ipc_replace_voucher_value( iv_index_t val_index; iv_index_t key_index; kern_return_t kr; - + /* * Get the manager for this key_index. * Returns a reference on the control. */ key_index = iv_key_to_index(key); ivgt_lookup(key_index, TRUE, &ivam, &ivac); - if (IVAM_NULL == ivam) + if (IVAM_NULL == ivam) { return KERN_INVALID_ARGUMENT; + } /* save the current value stored in the forming voucher */ save_val_index = iv_lookup(voucher, key_index); @@ -1235,26 +1264,27 @@ ipc_replace_voucher_value( * in the forming voucher. */ prev_val_index = (IV_NULL != prev_voucher) ? - iv_lookup(prev_voucher, key_index) : - save_val_index; + iv_lookup(prev_voucher, key_index) : + save_val_index; ivace_lookup_values(key_index, prev_val_index, - previous_vals, &previous_vals_count); + previous_vals, &previous_vals_count); /* Call out to resource manager to get new value */ new_value_voucher = IV_NULL; kr = (ivam->ivam_get_value)( - ivam, key, command, - previous_vals, previous_vals_count, - content, content_size, - &new_value, &new_flag, &new_value_voucher); + ivam, key, command, + previous_vals, previous_vals_count, + content, content_size, + &new_value, &new_flag, &new_value_voucher); if (KERN_SUCCESS != kr) { ivac_release(ivac); return kr; } /* TODO: value insertion from returned voucher */ - if (IV_NULL != new_value_voucher) + if (IV_NULL != new_value_voucher) { iv_release(new_value_voucher); + } /* * Find or create a slot in the table associated @@ -1272,12 +1302,12 @@ ipc_replace_voucher_value( * as was there before. */ ivace_release(key_index, save_val_index); - + return KERN_SUCCESS; } /* - * Routine: ipc_directly_replace_voucher_value + * Routine: ipc_directly_replace_voucher_value * Purpose: * Replace the value with the value-handle * supplied directly by the attribute manager. @@ -1288,24 +1318,25 @@ ipc_replace_voucher_value( */ static kern_return_t ipc_directly_replace_voucher_value( - ipc_voucher_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_t new_value) + ipc_voucher_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_t new_value) { ipc_voucher_attr_manager_t ivam; ipc_voucher_attr_control_t ivac; iv_index_t save_val_index; iv_index_t val_index; iv_index_t key_index; - + /* * Get the manager for this key_index. * Returns a reference on the control. */ key_index = iv_key_to_index(key); ivgt_lookup(key_index, TRUE, &ivam, &ivac); - if (IVAM_NULL == ivam) + if (IVAM_NULL == ivam) { return KERN_INVALID_ARGUMENT; + } /* save the current value stored in the forming voucher */ save_val_index = iv_lookup(voucher, key_index); @@ -1317,7 +1348,7 @@ ipc_directly_replace_voucher_value( * we find a matching existing value. */ val_index = ivace_reference_by_value(ivac, new_value, - MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE); + MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE); iv_set(voucher, key_index, val_index); /* @@ -1327,26 +1358,25 @@ ipc_directly_replace_voucher_value( * as was there before. */ ivace_release(key_index, save_val_index); - + return KERN_SUCCESS; } static kern_return_t ipc_execute_voucher_recipe_command( - ipc_voucher_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_recipe_command_t command, - ipc_voucher_t prev_iv, - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t content_size, - boolean_t key_priv) + ipc_voucher_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_recipe_command_t command, + ipc_voucher_t prev_iv, + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t content_size, + boolean_t key_priv) { iv_index_t prev_val_index; iv_index_t val_index; kern_return_t kr; switch (command) { - /* * MACH_VOUCHER_ATTR_COPY * Copy the attribute(s) from the previous voucher to the new @@ -1355,22 +1385,24 @@ ipc_execute_voucher_recipe_command( * voucher. */ case MACH_VOUCHER_ATTR_COPY: - + /* no recipe data on a copy */ - if (0 < content_size) + if (0 < content_size) { return KERN_INVALID_ARGUMENT; + } /* nothing to copy from? - done */ - if (IV_NULL == prev_iv) + if (IV_NULL == prev_iv) { return KERN_SUCCESS; + } if (MACH_VOUCHER_ATTR_KEY_ALL == key) { iv_index_t limit, j; /* reconcile possible difference in voucher sizes */ limit = (prev_iv->iv_table_size < voucher->iv_table_size) ? - prev_iv->iv_table_size : - voucher->iv_table_size; + prev_iv->iv_table_size : + voucher->iv_table_size; /* wildcard matching */ for (j = 0; j < limit; j++) { @@ -1388,8 +1420,9 @@ ipc_execute_voucher_recipe_command( /* copy just one key */ key_index = iv_key_to_index(key); - if (ivgt_keys_in_use < key_index) + if (ivgt_keys_in_use < key_index) { return KERN_INVALID_ARGUMENT; + } /* release old value being replaced */ val_index = iv_lookup(voucher, key_index); @@ -1412,16 +1445,17 @@ ipc_execute_voucher_recipe_command( */ case MACH_VOUCHER_ATTR_REMOVE: /* no recipe data on a remove */ - if (0 < content_size) + if (0 < content_size) { return KERN_INVALID_ARGUMENT; + } if (MACH_VOUCHER_ATTR_KEY_ALL == key) { iv_index_t limit, j; /* reconcile possible difference in voucher sizes */ limit = (IV_NULL == prev_iv) ? voucher->iv_table_size : - ((prev_iv->iv_table_size < voucher->iv_table_size) ? - prev_iv->iv_table_size : voucher->iv_table_size); + ((prev_iv->iv_table_size < voucher->iv_table_size) ? + prev_iv->iv_table_size : voucher->iv_table_size); /* wildcard matching */ for (j = 0; j < limit; j++) { @@ -1430,8 +1464,9 @@ ipc_execute_voucher_recipe_command( /* If not matched in previous, skip */ if (IV_NULL != prev_iv) { prev_val_index = iv_lookup(prev_iv, j); - if (val_index != prev_val_index) + if (val_index != prev_val_index) { continue; + } } /* release and clear */ ivace_release(j, val_index); @@ -1442,16 +1477,18 @@ ipc_execute_voucher_recipe_command( /* copy just one key */ key_index = iv_key_to_index(key); - if (ivgt_keys_in_use < key_index) + if (ivgt_keys_in_use < key_index) { return KERN_INVALID_ARGUMENT; + } val_index = iv_lookup(voucher, key_index); /* If not matched in previous, skip */ if (IV_NULL != prev_iv) { prev_val_index = iv_lookup(prev_iv, key_index); - if (val_index != prev_val_index) + if (val_index != prev_val_index) { break; + } } /* release and clear */ @@ -1470,17 +1507,20 @@ ipc_execute_voucher_recipe_command( if (key_priv) { mach_voucher_attr_value_handle_t new_value; - if (sizeof(mach_voucher_attr_value_handle_t) != content_size) + if (sizeof(mach_voucher_attr_value_handle_t) != content_size) { return KERN_INVALID_ARGUMENT; - + } + new_value = *(mach_voucher_attr_value_handle_t *)(void *)content; kr = ipc_directly_replace_voucher_value(voucher, - key, - new_value); - if (KERN_SUCCESS != kr) + key, + new_value); + if (KERN_SUCCESS != kr) { return kr; - } else + } + } else { return KERN_INVALID_CAPABILITY; + } break; /* @@ -1488,19 +1528,20 @@ ipc_execute_voucher_recipe_command( * Redeem the attribute(s) from the previous voucher for a possibly * new value in the new voucher. A wildcard key is an acceptable value, * indicating a desire to redeem all the values. - */ + */ case MACH_VOUCHER_ATTR_REDEEM: if (MACH_VOUCHER_ATTR_KEY_ALL == key) { iv_index_t limit, j; /* reconcile possible difference in voucher sizes */ - if (IV_NULL != prev_iv) + if (IV_NULL != prev_iv) { limit = (prev_iv->iv_table_size < voucher->iv_table_size) ? - prev_iv->iv_table_size : - voucher->iv_table_size; - else + prev_iv->iv_table_size : + voucher->iv_table_size; + } else { limit = voucher->iv_table_size; + } /* wildcard matching */ for (j = 0; j < limit; j++) { @@ -1509,22 +1550,24 @@ ipc_execute_voucher_recipe_command( j_key = iv_index_to_key(j); /* skip non-existent managers */ - if (MACH_VOUCHER_ATTR_KEY_NONE == j_key) + if (MACH_VOUCHER_ATTR_KEY_NONE == j_key) { continue; + } /* get the new value from redeem (skip empty previous) */ kr = ipc_replace_voucher_value(voucher, - j_key, - command, - prev_iv, - content, - content_size); - if (KERN_SUCCESS != kr) + j_key, + command, + prev_iv, + content, + content_size); + if (KERN_SUCCESS != kr) { return kr; + } } break; } - /* fall thru for single key redemption */ + /* fall thru for single key redemption */ /* * DEFAULT: @@ -1534,13 +1577,14 @@ ipc_execute_voucher_recipe_command( */ default: kr = ipc_replace_voucher_value(voucher, - key, - command, - prev_iv, - content, - content_size); - if (KERN_SUCCESS != kr) + key, + command, + prev_iv, + content, + content_size); + if (KERN_SUCCESS != kr) { return kr; + } break; } @@ -1548,7 +1592,7 @@ ipc_execute_voucher_recipe_command( } /* - * Routine: iv_checksum + * Routine: iv_checksum * Purpose: * Compute the voucher sum. This is more position- * relevant than many other checksums - important for @@ -1562,13 +1606,13 @@ iv_checksum(ipc_voucher_t voucher, boolean_t *emptyp) boolean_t empty = TRUE; if (0 < voucher->iv_table_size) { iv_index_t i = voucher->iv_table_size - 1; - + do { iv_index_t v = voucher->iv_table[i]; - c = c << 3 | c >> (32 - 3); /* rotate */ - c = ~c; /* invert */ + c = c << 3 | c >> (32 - 3); /* rotate */ + c = ~c; /* invert */ if (0 < v) { - c += v; /* add in */ + c += v; /* add in */ empty = FALSE; } } while (0 < i--); @@ -1578,7 +1622,7 @@ iv_checksum(ipc_voucher_t voucher, boolean_t *emptyp) } /* - * Routine: iv_dedup + * Routine: iv_dedup * Purpose: * See if the set of values represented by this new voucher * already exist in another voucher. If so return a reference @@ -1593,7 +1637,7 @@ static ipc_voucher_t iv_dedup(ipc_voucher_t new_iv) { boolean_t empty; - iv_index_t sum; + iv_index_t sum; iv_index_t hash; ipc_voucher_t iv; @@ -1616,20 +1660,26 @@ iv_dedup(ipc_voucher_t new_iv) iv_index_t i; assert(iv->iv_table_size <= new_iv->iv_table_size); - + /* and common entries match... */ - for (i = 0; i < iv->iv_table_size; i++) - if (iv->iv_table[i] != new_iv->iv_table[i]) + for (i = 0; i < iv->iv_table_size; i++) { + if (iv->iv_table[i] != new_iv->iv_table[i]) { break; - if (i < iv->iv_table_size) + } + } + if (i < iv->iv_table_size) { continue; + } /* and all extra entries in new one are unused... */ - while (i < new_iv->iv_table_size) - if (new_iv->iv_table[i++] != IV_UNUSED_VALINDEX) + while (i < new_iv->iv_table_size) { + if (new_iv->iv_table[i++] != IV_UNUSED_VALINDEX) { break; - if (i < new_iv->iv_table_size) + } + } + if (i < new_iv->iv_table_size) { continue; + } /* ... we found a match... */ @@ -1710,25 +1760,25 @@ iv_dedup(ipc_voucher_t new_iv) size_t remainder = payload_size % PAYLOAD_PER_TRACEPOINT; if (remainder) { bzero((uint8_t*)payload + payload_size, - PAYLOAD_PER_TRACEPOINT - remainder); + PAYLOAD_PER_TRACEPOINT - remainder); } } KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE), - voucher_addr, new_iv->iv_table_size, ivht_count, - payload_size); + voucher_addr, new_iv->iv_table_size, ivht_count, + payload_size); uintptr_t index = 0; while (attr_tracepoints_needed--) { KDBG(MACHDBG_CODE(DBG_MACH_IPC, - MACH_IPC_VOUCHER_CREATE_ATTR_DATA), payload[index], - payload[index + 1], payload[index + 2], - payload[index + 3]); + MACH_IPC_VOUCHER_CREATE_ATTR_DATA), payload[index], + payload[index + 1], payload[index + 2], + payload[index + 3]); index += 4; } } else { KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE), - voucher_addr, new_iv->iv_table_size, ivht_count); + voucher_addr, new_iv->iv_table_size, ivht_count); } } #endif /* KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD */ @@ -1737,7 +1787,7 @@ iv_dedup(ipc_voucher_t new_iv) } /* - * Routine: ipc_create_mach_voucher + * Routine: ipc_create_mach_voucher * Purpose: * Create a new mach voucher and initialize it with the * value(s) created by having the appropriate resource @@ -1750,9 +1800,9 @@ iv_dedup(ipc_voucher_t new_iv) */ kern_return_t ipc_create_mach_voucher( - ipc_voucher_attr_raw_recipe_array_t recipes, - ipc_voucher_attr_raw_recipe_array_size_t recipe_size, - ipc_voucher_t *new_voucher) + ipc_voucher_attr_raw_recipe_array_t recipes, + ipc_voucher_attr_raw_recipe_array_size_t recipe_size, + ipc_voucher_t *new_voucher) { ipc_voucher_attr_recipe_t sub_recipe; ipc_voucher_attr_recipe_size_t recipe_used = 0; @@ -1767,12 +1817,12 @@ ipc_create_mach_voucher( /* allocate a voucher */ voucher = iv_alloc(ivgt_keys_in_use); - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_RESOURCE_SHORTAGE; + } /* iterate over the recipe items */ while (0 < recipe_size - recipe_used) { - if (recipe_size - recipe_used < sizeof(*sub_recipe)) { kr = KERN_INVALID_ARGUMENT; break; @@ -1787,14 +1837,15 @@ ipc_create_mach_voucher( recipe_used += sizeof(*sub_recipe) + sub_recipe->content_size; kr = ipc_execute_voucher_recipe_command(voucher, - sub_recipe->key, - sub_recipe->command, - sub_recipe->previous_voucher, - sub_recipe->content, - sub_recipe->content_size, - FALSE); - if (KERN_SUCCESS != kr) + sub_recipe->key, + sub_recipe->command, + sub_recipe->previous_voucher, + sub_recipe->content, + sub_recipe->content_size, + FALSE); + if (KERN_SUCCESS != kr) { break; + } } if (KERN_SUCCESS == kr) { @@ -1807,7 +1858,7 @@ ipc_create_mach_voucher( } /* - * Routine: ipc_voucher_attr_control_create_mach_voucher + * Routine: ipc_voucher_attr_control_create_mach_voucher * Purpose: * Create a new mach voucher and initialize it with the * value(s) created by having the appropriate resource @@ -1828,10 +1879,10 @@ ipc_create_mach_voucher( */ kern_return_t ipc_voucher_attr_control_create_mach_voucher( - ipc_voucher_attr_control_t control, - ipc_voucher_attr_raw_recipe_array_t recipes, - ipc_voucher_attr_raw_recipe_array_size_t recipe_size, - ipc_voucher_t *new_voucher) + ipc_voucher_attr_control_t control, + ipc_voucher_attr_raw_recipe_array_t recipes, + ipc_voucher_attr_raw_recipe_array_size_t recipe_size, + ipc_voucher_t *new_voucher) { mach_voucher_attr_key_t control_key; ipc_voucher_attr_recipe_t sub_recipe; @@ -1839,8 +1890,9 @@ ipc_voucher_attr_control_create_mach_voucher( ipc_voucher_t voucher = IV_NULL; kern_return_t kr = KERN_SUCCESS; - if (IPC_VOUCHER_ATTR_CONTROL_NULL == control) + if (IPC_VOUCHER_ATTR_CONTROL_NULL == control) { return KERN_INVALID_CAPABILITY; + } /* if nothing to do ... */ if (0 == recipe_size) { @@ -1850,14 +1902,14 @@ ipc_voucher_attr_control_create_mach_voucher( /* allocate new voucher */ voucher = iv_alloc(ivgt_keys_in_use); - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_RESOURCE_SHORTAGE; + } control_key = iv_index_to_key(control->ivac_key_index); /* iterate over the recipe items */ while (0 < recipe_size - recipe_used) { - if (recipe_size - recipe_used < sizeof(*sub_recipe)) { kr = KERN_INVALID_ARGUMENT; break; @@ -1872,14 +1924,15 @@ ipc_voucher_attr_control_create_mach_voucher( recipe_used += sizeof(*sub_recipe) + sub_recipe->content_size; kr = ipc_execute_voucher_recipe_command(voucher, - sub_recipe->key, - sub_recipe->command, - sub_recipe->previous_voucher, - sub_recipe->content, - sub_recipe->content_size, - (sub_recipe->key == control_key)); - if (KERN_SUCCESS != kr) + sub_recipe->key, + sub_recipe->command, + sub_recipe->previous_voucher, + sub_recipe->content, + sub_recipe->content_size, + (sub_recipe->key == control_key)); + if (KERN_SUCCESS != kr) { break; + } } if (KERN_SUCCESS == kr) { @@ -1892,7 +1945,7 @@ ipc_voucher_attr_control_create_mach_voucher( } /* - * ipc_register_well_known_mach_voucher_attr_manager + * ipc_register_well_known_mach_voucher_attr_manager * * Register the resource manager responsible for a given key value. */ @@ -1900,23 +1953,26 @@ kern_return_t ipc_register_well_known_mach_voucher_attr_manager( ipc_voucher_attr_manager_t manager, mach_voucher_attr_value_handle_t default_value, - mach_voucher_attr_key_t key, + mach_voucher_attr_key_t key, ipc_voucher_attr_control_t *control) { ipc_voucher_attr_control_t new_control; iv_index_t key_index; iv_index_t hash_index; - if (IVAM_NULL == manager) + if (IVAM_NULL == manager) { return KERN_INVALID_ARGUMENT; + } key_index = iv_key_to_index(key); - if (IV_UNUSED_KEYINDEX == key_index) + if (IV_UNUSED_KEYINDEX == key_index) { return KERN_INVALID_ARGUMENT; + } new_control = ivac_alloc(key_index); - if (IVAC_NULL == new_control) + if (IVAC_NULL == new_control) { return KERN_RESOURCE_SHORTAGE; + } /* insert the default value into slot 0 */ new_control->ivac_table[IV_UNUSED_VALINDEX].ivace_value = default_value; @@ -1943,7 +1999,7 @@ ipc_register_well_known_mach_voucher_attr_manager( new_control->ivac_table[hash_index].ivace_index = IV_UNUSED_VALINDEX; ivgt_unlock(); - + /* return the reference on the new cache control to the caller */ *control = new_control; @@ -1951,7 +2007,7 @@ ipc_register_well_known_mach_voucher_attr_manager( } /* - * Routine: mach_voucher_extract_attr_content + * Routine: mach_voucher_extract_attr_content * Purpose: * Extract the content for a given pair. * @@ -1966,10 +2022,10 @@ ipc_register_well_known_mach_voucher_attr_manager( */ kern_return_t mach_voucher_extract_attr_content( - ipc_voucher_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t *in_out_size) + ipc_voucher_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t *in_out_size) { mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; mach_voucher_attr_value_handle_array_size_t vals_count; @@ -1980,8 +2036,9 @@ mach_voucher_extract_attr_content( kern_return_t kr; - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_INVALID_ARGUMENT; + } key_index = iv_key_to_index(key); @@ -2007,20 +2064,20 @@ mach_voucher_extract_attr_content( * for this value_index. */ ivace_lookup_values(key_index, value_index, - vals, &vals_count); + vals, &vals_count); assert(0 < vals_count); /* callout to manager */ - - kr = (manager->ivam_extract_content)(manager, key, - vals, vals_count, - &command, - content, in_out_size); + + kr = (manager->ivam_extract_content)(manager, key, + vals, vals_count, + &command, + content, in_out_size); return kr; } /* - * Routine: mach_voucher_extract_attr_recipe + * Routine: mach_voucher_extract_attr_recipe * Purpose: * Extract a recipe for a given pair. * @@ -2035,10 +2092,10 @@ mach_voucher_extract_attr_content( */ kern_return_t mach_voucher_extract_attr_recipe( - ipc_voucher_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_raw_recipe_t raw_recipe, - mach_voucher_attr_raw_recipe_size_t *in_out_size) + ipc_voucher_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_raw_recipe_t raw_recipe, + mach_voucher_attr_raw_recipe_size_t *in_out_size) { mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; mach_voucher_attr_value_handle_array_size_t vals_count; @@ -2049,8 +2106,9 @@ mach_voucher_extract_attr_recipe( kern_return_t kr; - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_INVALID_ARGUMENT; + } key_index = iv_key_to_index(key); @@ -2060,8 +2118,9 @@ mach_voucher_extract_attr_recipe( return KERN_SUCCESS; } - if (*in_out_size < sizeof(*recipe)) + if (*in_out_size < sizeof(*recipe)) { return KERN_NO_SPACE; + } recipe = (mach_voucher_attr_recipe_t)(void *)raw_recipe; recipe->key = key; @@ -2085,17 +2144,17 @@ mach_voucher_extract_attr_recipe( * for this value_index. */ ivace_lookup_values(key_index, value_index, - vals, &vals_count); + vals, &vals_count); assert(0 < vals_count); /* callout to manager */ - kr = (manager->ivam_extract_content)(manager, key, - vals, vals_count, - &recipe->command, - recipe->content, &recipe->content_size); + kr = (manager->ivam_extract_content)(manager, key, + vals, vals_count, + &recipe->command, + recipe->content, &recipe->content_size); if (KERN_SUCCESS == kr) { - assert(*in_out_size - sizeof(*recipe) >= recipe->content_size); - *in_out_size = sizeof(*recipe) + recipe->content_size; + assert(*in_out_size - sizeof(*recipe) >= recipe->content_size); + *in_out_size = sizeof(*recipe) + recipe->content_size; } return kr; @@ -2104,27 +2163,28 @@ mach_voucher_extract_attr_recipe( /* - * Routine: mach_voucher_extract_all_attr_recipes + * Routine: mach_voucher_extract_all_attr_recipes * Purpose: * Extract all the (non-default) contents for a given voucher, - * building up a recipe that could be provided to a future + * building up a recipe that could be provided to a future * voucher creation call. - * Conditions: + * Conditions: * Nothing locked (may invoke user-space). * Caller holds a reference on the supplied voucher. */ kern_return_t mach_voucher_extract_all_attr_recipes( - ipc_voucher_t voucher, - mach_voucher_attr_raw_recipe_array_t recipes, - mach_voucher_attr_raw_recipe_array_size_t *in_out_size) + ipc_voucher_t voucher, + mach_voucher_attr_raw_recipe_array_t recipes, + mach_voucher_attr_raw_recipe_array_size_t *in_out_size) { mach_voucher_attr_recipe_size_t recipe_size = *in_out_size; mach_voucher_attr_recipe_size_t recipe_used = 0; iv_index_t key_index; - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_INVALID_ARGUMENT; + } for (key_index = 0; key_index < voucher->iv_table_size; key_index++) { mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; @@ -2138,11 +2198,13 @@ mach_voucher_extract_all_attr_recipes( /* don't output anything for a default value */ value_index = iv_lookup(voucher, key_index); - if (IV_UNUSED_VALINDEX == value_index) + if (IV_UNUSED_VALINDEX == value_index) { continue; + } - if (recipe_size - recipe_used < sizeof(*recipe)) + if (recipe_size - recipe_used < sizeof(*recipe)) { return KERN_NO_SPACE; + } /* * Get the manager for this key_index. The @@ -2164,7 +2226,7 @@ mach_voucher_extract_all_attr_recipes( * for this value_index. */ ivace_lookup_values(key_index, value_index, - vals, &vals_count); + vals, &vals_count); assert(0 < vals_count); key = iv_index_to_key(key_index); @@ -2174,12 +2236,13 @@ mach_voucher_extract_all_attr_recipes( recipe->content_size = content_size; /* callout to manager */ - kr = (manager->ivam_extract_content)(manager, key, - vals, vals_count, - &recipe->command, - recipe->content, &recipe->content_size); - if (KERN_SUCCESS != kr) + kr = (manager->ivam_extract_content)(manager, key, + vals, vals_count, + &recipe->command, + recipe->content, &recipe->content_size); + if (KERN_SUCCESS != kr) { return kr; + } assert(recipe->content_size <= content_size); recipe_used += sizeof(*recipe) + recipe->content_size; @@ -2190,10 +2253,10 @@ mach_voucher_extract_all_attr_recipes( } /* - * Routine: mach_voucher_debug_info + * Routine: mach_voucher_debug_info * Purpose: * Extract all the (non-default) contents for a given mach port name, - * building up a recipe that could be provided to a future + * building up a recipe that could be provided to a future * voucher creation call. * Conditions: * Nothing locked (may invoke user-space). @@ -2202,20 +2265,20 @@ mach_voucher_extract_all_attr_recipes( #if !(DEVELOPMENT || DEBUG) kern_return_t mach_voucher_debug_info( - ipc_space_t __unused space, - mach_port_name_t __unused voucher_name, - mach_voucher_attr_raw_recipe_array_t __unused recipes, - mach_voucher_attr_raw_recipe_array_size_t __unused *in_out_size) + ipc_space_t __unused space, + mach_port_name_t __unused voucher_name, + mach_voucher_attr_raw_recipe_array_t __unused recipes, + mach_voucher_attr_raw_recipe_array_size_t __unused *in_out_size) { return KERN_NOT_SUPPORTED; } #else kern_return_t mach_voucher_debug_info( - ipc_space_t space, - mach_port_name_t voucher_name, - mach_voucher_attr_raw_recipe_array_t recipes, - mach_voucher_attr_raw_recipe_array_size_t *in_out_size) + ipc_space_t space, + mach_port_name_t voucher_name, + mach_voucher_attr_raw_recipe_array_t recipes, + mach_voucher_attr_raw_recipe_array_size_t *in_out_size) { ipc_voucher_t voucher = IPC_VOUCHER_NULL; kern_return_t kr; @@ -2226,8 +2289,9 @@ mach_voucher_debug_info( } kr = ipc_port_translate_send(space, voucher_name, &port); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { return KERN_INVALID_ARGUMENT; + } voucher = convert_port_to_voucher(port); ip_unlock(port); @@ -2243,7 +2307,7 @@ mach_voucher_debug_info( #endif /* - * Routine: mach_voucher_attr_command + * Routine: mach_voucher_attr_command * Purpose: * Invoke an attribute-specific command through this voucher. * @@ -2256,13 +2320,13 @@ mach_voucher_debug_info( */ kern_return_t mach_voucher_attr_command( - ipc_voucher_t voucher, - mach_voucher_attr_key_t key, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, - mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *out_content_size) + ipc_voucher_t voucher, + mach_voucher_attr_key_t key, + mach_voucher_attr_command_t command, + mach_voucher_attr_content_t in_content, + mach_voucher_attr_content_size_t in_content_size, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *out_content_size) { mach_voucher_attr_value_handle_t vals[MACH_VOUCHER_ATTR_VALUE_MAX_NESTED]; mach_voucher_attr_value_handle_array_size_t vals_count; @@ -2273,8 +2337,9 @@ mach_voucher_attr_command( kern_return_t kr; - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_INVALID_ARGUMENT; + } key_index = iv_key_to_index(key); @@ -2299,14 +2364,14 @@ mach_voucher_attr_command( */ value_index = iv_lookup(voucher, key_index); ivace_lookup_values(key_index, value_index, - vals, &vals_count); + vals, &vals_count); /* callout to manager */ - kr = (manager->ivam_command)(manager, key, - vals, vals_count, - command, - in_content, in_content_size, - out_content, out_content_size); + kr = (manager->ivam_command)(manager, key, + vals, vals_count, + command, + in_content, in_content_size, + out_content, out_content_size); /* release reference on control */ ivac_release(control); @@ -2315,7 +2380,7 @@ mach_voucher_attr_command( } /* - * Routine: mach_voucher_attr_control_get_values + * Routine: mach_voucher_attr_control_get_values * Purpose: * For a given voucher, get the value handle associated with the * specified attribute manager. @@ -2329,26 +2394,29 @@ mach_voucher_attr_control_get_values( { iv_index_t key_index, value_index; - if (IPC_VOUCHER_ATTR_CONTROL_NULL == control) + if (IPC_VOUCHER_ATTR_CONTROL_NULL == control) { return KERN_INVALID_CAPABILITY; + } - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_INVALID_ARGUMENT; + } - if (0 == *in_out_size) + if (0 == *in_out_size) { return KERN_SUCCESS; + } key_index = control->ivac_key_index; assert(os_ref_get_count(&voucher->iv_refs) > 0); value_index = iv_lookup(voucher, key_index); ivace_lookup_values(key_index, value_index, - out_values, in_out_size); + out_values, in_out_size); return KERN_SUCCESS; } /* - * Routine: mach_voucher_attr_control_create_mach_voucher + * Routine: mach_voucher_attr_control_create_mach_voucher * Purpose: * Create a new mach voucher and initialize it by processing the * supplied recipe(s). @@ -2376,8 +2444,9 @@ mach_voucher_attr_control_create_mach_voucher( ipc_voucher_t voucher = IV_NULL; kern_return_t kr = KERN_SUCCESS; - if (IPC_VOUCHER_ATTR_CONTROL_NULL == control) + if (IPC_VOUCHER_ATTR_CONTROL_NULL == control) { return KERN_INVALID_CAPABILITY; + } /* if nothing to do ... */ if (0 == recipe_size) { @@ -2387,8 +2456,9 @@ mach_voucher_attr_control_create_mach_voucher( /* allocate new voucher */ voucher = iv_alloc(ivgt_keys_in_use); - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_RESOURCE_SHORTAGE; + } control_key = iv_index_to_key(control->ivac_key_index); @@ -2417,16 +2487,17 @@ mach_voucher_attr_control_create_mach_voucher( } kr = ipc_execute_voucher_recipe_command(voucher, - sub_recipe->key, - sub_recipe->command, - prev_iv, - sub_recipe->content, - sub_recipe->content_size, - (sub_recipe->key == control_key)); + sub_recipe->key, + sub_recipe->command, + prev_iv, + sub_recipe->content, + sub_recipe->content_size, + (sub_recipe->key == control_key)); ipc_voucher_release(prev_iv); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { break; + } } if (KERN_SUCCESS == kr) { @@ -2439,7 +2510,7 @@ mach_voucher_attr_control_create_mach_voucher( } /* - * Routine: host_create_mach_voucher + * Routine: host_create_mach_voucher * Purpose: * Create a new mach voucher and initialize it by processing the * supplied recipe(s). @@ -2463,8 +2534,9 @@ host_create_mach_voucher( ipc_voucher_t voucher = IV_NULL; kern_return_t kr = KERN_SUCCESS; - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_ARGUMENT; + } /* if nothing to do ... */ if (0 == recipe_size) { @@ -2474,8 +2546,9 @@ host_create_mach_voucher( /* allocate new voucher */ voucher = iv_alloc(ivgt_keys_in_use); - if (IV_NULL == voucher) + if (IV_NULL == voucher) { return KERN_RESOURCE_SHORTAGE; + } /* iterate over the recipe items */ while (0 < recipe_size - recipe_used) { @@ -2502,16 +2575,17 @@ host_create_mach_voucher( } kr = ipc_execute_voucher_recipe_command(voucher, - sub_recipe->key, - sub_recipe->command, - prev_iv, - sub_recipe->content, - sub_recipe->content_size, - FALSE); + sub_recipe->key, + sub_recipe->command, + prev_iv, + sub_recipe->content, + sub_recipe->content_size, + FALSE); ipc_voucher_release(prev_iv); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { break; + } } if (KERN_SUCCESS == kr) { @@ -2524,10 +2598,10 @@ host_create_mach_voucher( } /* - * Routine: host_register_well_known_mach_voucher_attr_manager + * Routine: host_register_well_known_mach_voucher_attr_manager * Purpose: * Register the user-level resource manager responsible for a given - * key value. + * key value. * Conditions: * The manager port passed in has to be converted/wrapped * in an ipc_voucher_attr_manager_t structure and then call the @@ -2537,14 +2611,15 @@ host_create_mach_voucher( */ kern_return_t host_register_well_known_mach_voucher_attr_manager( - host_t host, + host_t host, mach_voucher_attr_manager_t __unused manager, mach_voucher_attr_value_handle_t __unused default_value, - mach_voucher_attr_key_t __unused key, + mach_voucher_attr_key_t __unused key, ipc_voucher_attr_control_t __unused *control) { - if (HOST_NULL == host) + if (HOST_NULL == host) { return KERN_INVALID_HOST; + } #if 1 return KERN_NOT_SUPPORTED; @@ -2563,18 +2638,19 @@ host_register_well_known_mach_voucher_attr_manager( proxy = mvam_alloc(manager); kr = ipc_register_well_known_mach_voucher_attr_manager(&proxy->mvam_manager, - default_value, - key, - control); - if (KERN_SUCCESS != kr) + default_value, + key, + control); + if (KERN_SUCCESS != kr) { mvam_release(proxy); + } return kr; #endif } /* - * Routine: host_register_mach_voucher_attr_manager + * Routine: host_register_mach_voucher_attr_manager * Purpose: * Register the user-space resource manager and return a * dynamically allocated key. @@ -2585,14 +2661,15 @@ host_register_well_known_mach_voucher_attr_manager( */ kern_return_t host_register_mach_voucher_attr_manager( - host_t host, + host_t host, mach_voucher_attr_manager_t __unused manager, mach_voucher_attr_value_handle_t __unused default_value, - mach_voucher_attr_key_t __unused *key, + mach_voucher_attr_key_t __unused *key, ipc_voucher_attr_control_t __unused *control) { - if (HOST_NULL == host) + if (HOST_NULL == host) { return KERN_INVALID_HOST; + } return KERN_NOT_SUPPORTED; } @@ -2609,7 +2686,7 @@ ipc_get_pthpriority_from_kmsg_voucher( { ipc_voucher_t pthread_priority_voucher; mach_voucher_attr_raw_recipe_size_t content_size = - sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t); + sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t); uint8_t content_data[content_size]; mach_voucher_attr_recipe_t cur_content; kern_return_t kr = KERN_SUCCESS; @@ -2620,9 +2697,9 @@ ipc_get_pthpriority_from_kmsg_voucher( pthread_priority_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject; kr = mach_voucher_extract_attr_recipe(pthread_priority_voucher, - MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, - content_data, - &content_size); + MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, + content_data, + &content_size); if (kr != KERN_SUCCESS) { return kr; } @@ -2651,8 +2728,8 @@ void ipc_voucher_send_preprocessing(ipc_kmsg_t kmsg) { uint8_t recipes[(MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN + 1) * sizeof(ipc_voucher_attr_recipe_data_t)]; - ipc_voucher_attr_raw_recipe_array_size_t recipe_size = (MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN + 1) * - sizeof(ipc_voucher_attr_recipe_data_t); + ipc_voucher_attr_raw_recipe_array_size_t recipe_size = (MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN + 1) * + sizeof(ipc_voucher_attr_recipe_data_t); ipc_voucher_t pre_processed_voucher; ipc_voucher_t voucher_to_send; kern_return_t kr; @@ -2666,9 +2743,9 @@ ipc_voucher_send_preprocessing(ipc_kmsg_t kmsg) pre_processed_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject; kr = ipc_voucher_prepare_processing_recipe(pre_processed_voucher, - (mach_voucher_attr_raw_recipe_array_t)recipes, - &recipe_size, MACH_VOUCHER_ATTR_SEND_PREPROCESS, - IVAM_FLAGS_SUPPORT_SEND_PREPROCESS, &need_preprocessing); + (mach_voucher_attr_raw_recipe_array_t)recipes, + &recipe_size, MACH_VOUCHER_ATTR_SEND_PREPROCESS, + IVAM_FLAGS_SUPPORT_SEND_PREPROCESS, &need_preprocessing); assert(KERN_SUCCESS == kr); /* @@ -2676,8 +2753,8 @@ ipc_voucher_send_preprocessing(ipc_kmsg_t kmsg) */ if (need_preprocessing) { kr = ipc_create_mach_voucher(recipes, - recipe_size, - &voucher_to_send); + recipe_size, + &voucher_to_send); assert(KERN_SUCCESS == kr); ipc_port_release_send(kmsg->ikm_voucher); kmsg->ikm_voucher = convert_voucher_to_port(voucher_to_send); @@ -2695,19 +2772,19 @@ ipc_voucher_send_preprocessing(ipc_kmsg_t kmsg) */ void ipc_voucher_receive_postprocessing( - ipc_kmsg_t kmsg, - mach_msg_option_t option) + ipc_kmsg_t kmsg, + mach_msg_option_t option) { uint8_t recipes[(MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN + 1) * sizeof(ipc_voucher_attr_recipe_data_t)]; - ipc_voucher_attr_raw_recipe_array_size_t recipe_size = (MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN + 1) * - sizeof(ipc_voucher_attr_recipe_data_t); + ipc_voucher_attr_raw_recipe_array_size_t recipe_size = (MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN + 1) * + sizeof(ipc_voucher_attr_recipe_data_t); ipc_voucher_t recv_voucher; ipc_voucher_t sent_voucher; kern_return_t kr; int need_postprocessing = FALSE; if ((option & MACH_RCV_VOUCHER) == 0 || (!IP_VALID(kmsg->ikm_voucher)) || - current_task() == kernel_task) { + current_task() == kernel_task) { return; } @@ -2715,9 +2792,9 @@ ipc_voucher_receive_postprocessing( sent_voucher = (ipc_voucher_t)kmsg->ikm_voucher->ip_kobject; kr = ipc_voucher_prepare_processing_recipe(sent_voucher, - (mach_voucher_attr_raw_recipe_array_t)recipes, - &recipe_size, MACH_VOUCHER_ATTR_AUTO_REDEEM, - IVAM_FLAGS_SUPPORT_RECEIVE_POSTPROCESS, &need_postprocessing); + (mach_voucher_attr_raw_recipe_array_t)recipes, + &recipe_size, MACH_VOUCHER_ATTR_AUTO_REDEEM, + IVAM_FLAGS_SUPPORT_RECEIVE_POSTPROCESS, &need_postprocessing); assert(KERN_SUCCESS == kr); @@ -2726,8 +2803,8 @@ ipc_voucher_receive_postprocessing( */ if (need_postprocessing) { kr = ipc_create_mach_voucher(recipes, - recipe_size, - &recv_voucher); + recipe_size, + &recv_voucher); assert(KERN_SUCCESS == kr); /* swap the voucher port (and set voucher bits in case it didn't already exist) */ kmsg->ikm_header->msgh_bits |= (MACH_MSG_TYPE_MOVE_SEND << 16); @@ -2756,13 +2833,15 @@ ipc_voucher_prepare_processing_recipe( ipc_voucher_attr_raw_recipe_array_size_t recipe_used = 0; iv_index_t key_index; ipc_voucher_attr_recipe_t recipe; - - if (IV_NULL == voucher) + + if (IV_NULL == voucher) { return KERN_INVALID_ARGUMENT; - + } + /* Setup a recipe to copy all attributes. */ - if (recipe_size < sizeof(*recipe)) - return KERN_NO_SPACE; + if (recipe_size < sizeof(*recipe)) { + return KERN_NO_SPACE; + } *need_processing = FALSE; recipe = (ipc_voucher_attr_recipe_t)(void *)&recipes[recipe_used]; @@ -2779,14 +2858,16 @@ ipc_voucher_prepare_processing_recipe( /* don't output anything for a default value */ value_index = iv_lookup(voucher, key_index); - if (IV_UNUSED_VALINDEX == value_index) + if (IV_UNUSED_VALINDEX == value_index) { continue; + } - if (recipe_size - recipe_used < sizeof(*recipe)) + if (recipe_size - recipe_used < sizeof(*recipe)) { return KERN_NO_SPACE; + } recipe = (ipc_voucher_attr_recipe_t)(void *)&recipes[recipe_used]; - + /* * Get the manager for this key_index. The * existence of a non-default value for this @@ -2800,9 +2881,10 @@ ipc_voucher_prepare_processing_recipe( } /* Check if the supported flag is set in the manager */ - if ((manager->ivam_flags & flags) == 0) + if ((manager->ivam_flags & flags) == 0) { continue; - + } + key = iv_index_to_key(key_index); recipe->key = key; @@ -2854,9 +2936,9 @@ mach_generate_activity_id( } activity_id = generate_activity_id(args->count); - kr = copyout(&activity_id, args->activity_id, sizeof (activity_id)); + kr = copyout(&activity_id, args->activity_id, sizeof(activity_id)); - return (kr); + return kr; } #if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST) @@ -2864,15 +2946,15 @@ mach_generate_activity_id( /* * Build-in a simple User Data Resource Manager */ -#define USER_DATA_MAX_DATA (16*1024) +#define USER_DATA_MAX_DATA (16*1024) struct user_data_value_element { - mach_voucher_attr_value_reference_t e_made; - mach_voucher_attr_content_size_t e_size; - iv_index_t e_sum; - iv_index_t e_hash; - queue_chain_t e_hash_link; - uint8_t e_data[]; + mach_voucher_attr_value_reference_t e_made; + mach_voucher_attr_content_size_t e_size; + iv_index_t e_sum; + iv_index_t e_hash; + queue_chain_t e_hash_link; + uint8_t e_data[]; }; typedef struct user_data_value_element *user_data_element_t; @@ -2890,74 +2972,74 @@ static lck_spin_t user_data_lock_data; lck_spin_init(&user_data_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define user_data_lock_destroy() \ lck_spin_destroy(&user_data_lock_data, &ipc_lck_grp) -#define user_data_lock() \ - lck_spin_lock(&user_data_lock_data) -#define user_data_lock_try() \ - lck_spin_try_lock(&user_data_lock_data) -#define user_data_unlock() \ +#define user_data_lock() \ + lck_spin_lock_grp(&user_data_lock_data, &ipc_lck_grp) +#define user_data_lock_try() \ + lck_spin_try_lock_grp(&user_data_lock_data, &ipc_lck_grp) +#define user_data_unlock() \ lck_spin_unlock(&user_data_lock_data) static kern_return_t user_data_release_value( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync); static kern_return_t user_data_get_value( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_voucher_attr_value_handle_array_size_t prev_value_count, - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t content_size, - mach_voucher_attr_value_handle_t *out_value, - mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t prev_values, + mach_voucher_attr_value_handle_array_size_t prev_value_count, + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t content_size, + mach_voucher_attr_value_handle_t *out_value, + mach_voucher_attr_value_flags_t *out_flags, + ipc_voucher_t *out_value_voucher); static kern_return_t user_data_extract_content( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_array_t values, - mach_voucher_attr_value_handle_array_size_t value_count, - mach_voucher_attr_recipe_command_t *out_command, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *in_out_content_size); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_array_t values, + mach_voucher_attr_value_handle_array_size_t value_count, + mach_voucher_attr_recipe_command_t *out_command, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *in_out_content_size); static kern_return_t user_data_command( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_key_t key, - mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, - mach_voucher_attr_command_t command, - mach_voucher_attr_content_t in_content, - mach_voucher_attr_content_size_t in_content_size, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *out_content_size); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_key_t key, + mach_voucher_attr_value_handle_array_t values, + mach_msg_type_number_t value_count, + mach_voucher_attr_command_t command, + mach_voucher_attr_content_t in_content, + mach_voucher_attr_content_size_t in_content_size, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *out_content_size); static void user_data_release( - ipc_voucher_attr_manager_t manager); + ipc_voucher_attr_manager_t manager); struct ipc_voucher_attr_manager user_data_manager = { - .ivam_release_value = user_data_release_value, - .ivam_get_value = user_data_get_value, - .ivam_extract_content = user_data_extract_content, - .ivam_command = user_data_command, - .ivam_release = user_data_release, - .ivam_flags = IVAM_FLAGS_NONE, + .ivam_release_value = user_data_release_value, + .ivam_get_value = user_data_get_value, + .ivam_extract_content = user_data_extract_content, + .ivam_command = user_data_command, + .ivam_release = user_data_release, + .ivam_flags = IVAM_FLAGS_NONE, }; ipc_voucher_attr_control_t user_data_control; ipc_voucher_attr_control_t test_control; #if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) && defined(MACH_VOUCHER_ATTR_KEY_TEST) -#define USER_DATA_ASSERT_KEY(key) \ - assert(MACH_VOUCHER_ATTR_KEY_USER_DATA == (key) || \ +#define USER_DATA_ASSERT_KEY(key) \ + assert(MACH_VOUCHER_ATTR_KEY_USER_DATA == (key) || \ MACH_VOUCHER_ATTR_KEY_TEST == (key)); #elif defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) #define USER_DATA_ASSERT_KEY(key) assert(MACH_VOUCHER_ATTR_KEY_USER_DATA == (key)) @@ -2966,7 +3048,7 @@ ipc_voucher_attr_control_t test_control; #endif /* - * Routine: user_data_release_value + * Routine: user_data_release_value * Purpose: * Release a made reference on a specific value managed by * this voucher attribute manager. @@ -2976,15 +3058,15 @@ ipc_voucher_attr_control_t test_control; */ static kern_return_t user_data_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync) { user_data_element_t elem; iv_index_t hash; - assert (&user_data_manager == manager); + assert(&user_data_manager == manager); USER_DATA_ASSERT_KEY(key); elem = (user_data_element_t)value; @@ -3004,28 +3086,28 @@ user_data_release_value( } /* - * Routine: user_data_checksum + * Routine: user_data_checksum * Purpose: * Provide a rudimentary checksum for the data presented * to these voucher attribute managers. */ static iv_index_t user_data_checksum( - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t content_size) + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t content_size) { mach_voucher_attr_content_size_t i; iv_index_t cksum = 0; - for(i = 0; i < content_size; i++, content++) { + for (i = 0; i < content_size; i++, content++) { cksum = (cksum << 8) ^ (cksum + *(unsigned char *)content); } - return (~cksum); + return ~cksum; } /* - * Routine: user_data_dedup + * Routine: user_data_dedup * Purpose: * See if the content represented by this request already exists * in another user data element. If so return a made reference @@ -3038,10 +3120,10 @@ user_data_checksum( */ static user_data_element_t user_data_dedup( - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t content_size) + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t content_size) { - iv_index_t sum; + iv_index_t sum; iv_index_t hash; user_data_element_t elem; user_data_element_t alloc = NULL; @@ -3049,7 +3131,7 @@ user_data_dedup( sum = user_data_checksum(content, content_size); hash = USER_DATA_HASH_BUCKET(sum); - retry: +retry: user_data_lock(); queue_iterate(&user_data_bucket[hash], elem, user_data_element_t, e_hash_link) { assert(elem->e_hash == hash); @@ -3059,19 +3141,23 @@ user_data_dedup( iv_index_t i; /* and all data matches */ - for (i = 0; i < content_size; i++) - if (elem->e_data[i] != content[i]) + for (i = 0; i < content_size; i++) { + if (elem->e_data[i] != content[i]) { break; - if (i < content_size) + } + } + if (i < content_size) { continue; + } /* ... we found a match... */ elem->e_made++; user_data_unlock(); - if (NULL != alloc) + if (NULL != alloc) { kfree(alloc, sizeof(*alloc) + content_size); + } return elem; } @@ -3097,20 +3183,20 @@ user_data_dedup( static kern_return_t user_data_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t prev_values, - mach_voucher_attr_value_handle_array_size_t prev_value_count, - mach_voucher_attr_content_t content, - mach_voucher_attr_content_size_t content_size, - mach_voucher_attr_value_handle_t *out_value, - mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t prev_values, + mach_voucher_attr_value_handle_array_size_t prev_value_count, + mach_voucher_attr_content_t content, + mach_voucher_attr_content_size_t content_size, + mach_voucher_attr_value_handle_t *out_value, + mach_voucher_attr_value_flags_t *out_flags, + ipc_voucher_t *out_value_voucher) { user_data_element_t elem; - assert (&user_data_manager == manager); + assert(&user_data_manager == manager); USER_DATA_ASSERT_KEY(key); /* never an out voucher */ @@ -3118,7 +3204,6 @@ user_data_get_value( *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE; switch (command) { - case MACH_VOUCHER_ATTR_REDEEM: /* redeem of previous values is the value */ @@ -3135,8 +3220,9 @@ user_data_get_value( return KERN_SUCCESS; case MACH_VOUCHER_ATTR_USER_DATA_STORE: - if (USER_DATA_MAX_DATA < content_size) + if (USER_DATA_MAX_DATA < content_size) { return KERN_RESOURCE_SHORTAGE; + } /* empty is the default */ if (0 == content_size) { @@ -3156,28 +3242,29 @@ user_data_get_value( static kern_return_t user_data_extract_content( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t values, - mach_voucher_attr_value_handle_array_size_t value_count, - mach_voucher_attr_recipe_command_t *out_command, - mach_voucher_attr_content_t out_content, - mach_voucher_attr_content_size_t *in_out_content_size) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t values, + mach_voucher_attr_value_handle_array_size_t value_count, + mach_voucher_attr_recipe_command_t *out_command, + mach_voucher_attr_content_t out_content, + mach_voucher_attr_content_size_t *in_out_content_size) { mach_voucher_attr_content_size_t size = 0; user_data_element_t elem; unsigned int i; - assert (&user_data_manager == manager); + assert(&user_data_manager == manager); USER_DATA_ASSERT_KEY(key); /* concatenate the stored data items */ - for (i = 0; i < value_count ; i++) { + for (i = 0; i < value_count && *in_out_content_size > 0; i++) { elem = (user_data_element_t)values[i]; assert(USER_DATA_MAX_DATA >= elem->e_size); - if (size + elem->e_size > *in_out_content_size) + if (size + elem->e_size > *in_out_content_size) { return KERN_NO_SPACE; + } memcpy(&out_content[size], elem->e_data, elem->e_size); size += elem->e_size; @@ -3189,27 +3276,28 @@ user_data_extract_content( static kern_return_t user_data_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t __unused values, - mach_msg_type_number_t __unused value_count, - mach_voucher_attr_command_t __unused command, - mach_voucher_attr_content_t __unused in_content, - mach_voucher_attr_content_size_t __unused in_content_size, - mach_voucher_attr_content_t __unused out_content, - mach_voucher_attr_content_size_t __unused *out_content_size) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t __unused values, + mach_msg_type_number_t __unused value_count, + mach_voucher_attr_command_t __unused command, + mach_voucher_attr_content_t __unused in_content, + mach_voucher_attr_content_size_t __unused in_content_size, + mach_voucher_attr_content_t __unused out_content, + mach_voucher_attr_content_size_t __unused *out_content_size) { - assert (&user_data_manager == manager); + assert(&user_data_manager == manager); USER_DATA_ASSERT_KEY(key); return KERN_FAILURE; } static void user_data_release( - ipc_voucher_attr_manager_t manager) + ipc_voucher_attr_manager_t manager) { - if (manager != &user_data_manager) + if (manager != &user_data_manager) { return; + } panic("Voucher user-data manager released"); } @@ -3221,35 +3309,38 @@ user_data_attr_manager_init() { kern_return_t kr; -#if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) +#if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) if ((user_data_manager_inited & 0x1) != 0x1) { kr = ipc_register_well_known_mach_voucher_attr_manager(&user_data_manager, - (mach_voucher_attr_value_handle_t)0, - MACH_VOUCHER_ATTR_KEY_USER_DATA, - &user_data_control); - if (KERN_SUCCESS != kr) + (mach_voucher_attr_value_handle_t)0, + MACH_VOUCHER_ATTR_KEY_USER_DATA, + &user_data_control); + if (KERN_SUCCESS != kr) { printf("Voucher user-data manager register(USER-DATA) returned %d", kr); - else + } else { user_data_manager_inited |= 0x1; + } } #endif #if defined(MACH_VOUCHER_ATTR_KEY_TEST) if ((user_data_manager_inited & 0x2) != 0x2) { kr = ipc_register_well_known_mach_voucher_attr_manager(&user_data_manager, - (mach_voucher_attr_value_handle_t)0, - MACH_VOUCHER_ATTR_KEY_TEST, - &test_control); - if (KERN_SUCCESS != kr) + (mach_voucher_attr_value_handle_t)0, + MACH_VOUCHER_ATTR_KEY_TEST, + &test_control); + if (KERN_SUCCESS != kr) { printf("Voucher user-data manager register(TEST) returned %d", kr); - else + } else { user_data_manager_inited |= 0x2; + } } #endif #if defined(MACH_VOUCHER_ATTR_KEY_USER_DATA) || defined(MACH_VOUCHER_ATTR_KEY_TEST) int i; - for (i=0; i < USER_DATA_HASH_BUCKETS; i++) + for (i = 0; i < USER_DATA_HASH_BUCKETS; i++) { queue_init(&user_data_bucket[i]); + } user_data_lock_init(); #endif diff --git a/osfmk/ipc/ipc_voucher.h b/osfmk/ipc/ipc_voucher.h index b306487a4..248fda4c4 100644 --- a/osfmk/ipc/ipc_voucher.h +++ b/osfmk/ipc/ipc_voucher.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _IPC_IPC_VOUCHER_H_ -#define _IPC_IPC_VOUCHER_H_ +#ifndef _IPC_IPC_VOUCHER_H_ +#define _IPC_IPC_VOUCHER_H_ #include #include @@ -42,8 +42,8 @@ #include /* locking */ -extern lck_grp_t ipc_lck_grp; -extern lck_attr_t ipc_lck_attr; +extern lck_grp_t ipc_lck_grp; +extern lck_attr_t ipc_lck_attr; extern void ipc_voucher_init(void); @@ -51,12 +51,12 @@ extern void ipc_voucher_init(void); typedef mach_voucher_attr_value_handle_t iv_value_handle_t; typedef mach_voucher_attr_value_reference_t iv_value_refs_t; -typedef natural_t iv_index_t; -#define IV_UNUSED_VALINDEX ((iv_index_t) 0) -#define IV_UNUSED_KEYINDEX ((iv_index_t) ~0) +typedef natural_t iv_index_t; +#define IV_UNUSED_VALINDEX ((iv_index_t) 0) +#define IV_UNUSED_KEYINDEX ((iv_index_t) ~0) -typedef iv_index_t *iv_entry_t; -#define IVE_NULL ((iv_entry_t) 0) +typedef iv_index_t *iv_entry_t; +#define IVE_NULL ((iv_entry_t) 0) #define IV_ENTRIES_INLINE MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN @@ -68,17 +68,17 @@ typedef iv_index_t *iv_entry_t; * (which themselves are reference counted). */ struct ipc_voucher { - iv_index_t iv_hash; /* checksum hash */ - iv_index_t iv_sum; /* checksum of values */ - os_refcnt_t iv_refs; /* reference count */ - iv_index_t iv_table_size; /* size of the voucher table */ - iv_index_t iv_inline_table[IV_ENTRIES_INLINE]; - iv_entry_t iv_table; /* table of voucher attr entries */ - ipc_port_t iv_port; /* port representing the voucher */ - queue_chain_t iv_hash_link; /* link on hash chain */ + iv_index_t iv_hash; /* checksum hash */ + iv_index_t iv_sum; /* checksum of values */ + os_refcnt_t iv_refs; /* reference count */ + iv_index_t iv_table_size; /* size of the voucher table */ + iv_index_t iv_inline_table[IV_ENTRIES_INLINE]; + iv_entry_t iv_table; /* table of voucher attr entries */ + ipc_port_t iv_port; /* port representing the voucher */ + queue_chain_t iv_hash_link; /* link on hash chain */ }; -#define IV_NULL IPC_VOUCHER_NULL +#define IV_NULL IPC_VOUCHER_NULL /* @@ -88,7 +88,7 @@ struct ipc_voucher { * returned resource manager attribute values. Each value only appears * once in the table. If a value is returned more than once by the * resource manager, the voucher system will increase the reference - * on the previous value. + * on the previous value. * * The voucher itself contains one entry per key, that indexes into * this table. @@ -108,7 +108,7 @@ struct ipc_voucher { * manager accepted the return, and the get-value response raced the * release's reply, the newly made references will look like an extension * of the old value's cache lifetime, rather than a new one. Dropping - * that new lifetime's references to zero would result in a second + * that new lifetime's references to zero would result in a second * release callback to the resource manager - this time with the wrong * "made" reference count. We avoid the race with this flag. */ @@ -116,10 +116,10 @@ struct ipc_voucher { struct ivac_entry_s { iv_value_handle_t ivace_value; iv_value_refs_t ivace_layered:1, /* layered effective entry */ - ivace_releasing:1, /* release in progress */ - ivace_free:1, /* on freelist */ - ivace_persist:1, /* Persist the entry, don't count made refs */ - ivace_refs:28; /* reference count */ + ivace_releasing:1, /* release in progress */ + ivace_free:1, /* on freelist */ + ivace_persist:1, /* Persist the entry, don't count made refs */ + ivace_refs:28; /* reference count */ union { iv_value_refs_t ivaceu_made; /* made count (non-layered) */ iv_index_t ivaceu_layer; /* next effective layer (layered) */ @@ -142,14 +142,14 @@ typedef ivac_entry *ivac_entry_t; struct ipc_voucher_attr_control { os_refcnt_t ivac_refs; - boolean_t ivac_is_growing; /* is the table being grown */ - ivac_entry_t ivac_table; /* table of voucher attr value entries */ - iv_index_t ivac_table_size; /* size of the attr value table */ - iv_index_t ivac_init_table_size; /* size of the attr value table */ - iv_index_t ivac_freelist; /* index of the first free element */ - ipc_port_t ivac_port; /* port for accessing the cache control */ + boolean_t ivac_is_growing; /* is the table being grown */ + ivac_entry_t ivac_table; /* table of voucher attr value entries */ + iv_index_t ivac_table_size; /* size of the attr value table */ + iv_index_t ivac_init_table_size; /* size of the attr value table */ + iv_index_t ivac_freelist; /* index of the first free element */ + ipc_port_t ivac_port; /* port for accessing the cache control */ lck_spin_t ivac_lock_data; - iv_index_t ivac_key_index; /* key index for this value */ + iv_index_t ivac_key_index; /* key index for this value */ }; typedef ipc_voucher_attr_control_t iv_attr_control_t; @@ -164,16 +164,16 @@ extern kern_return_t ipc_get_pthpriority_from_kmsg_voucher(ipc_kmsg_t kmsg, ipc_ lck_spin_init(&(ivac)->ivac_lock_data, &ipc_lck_grp, &ipc_lck_attr) #define ivac_lock_destroy(ivac) \ lck_spin_destroy(&(ivac)->ivac_lock_data, &ipc_lck_grp) -#define ivac_lock(ivac) \ - lck_spin_lock(&(ivac)->ivac_lock_data) -#define ivac_lock_try(ivac) \ - lck_spin_try_lock(&(ivac)->ivac_lock_data) -#define ivac_unlock(ivac) \ +#define ivac_lock(ivac) \ + lck_spin_lock_grp(&(ivac)->ivac_lock_data, &ipc_lck_grp) +#define ivac_lock_try(ivac) \ + lck_spin_try_lock_grp(&(ivac)->ivac_lock_data, &ipc_lck_grp) +#define ivac_unlock(ivac) \ lck_spin_unlock(&(ivac)->ivac_lock_data) -#define ivac_sleep(ivac) lck_spin_sleep(&(ivac)->ivac_lock_data, \ - LCK_SLEEP_DEFAULT, \ - (event_t)(ivac), \ - THREAD_UNINT) +#define ivac_sleep(ivac) lck_spin_sleep_grp(&(ivac)->ivac_lock_data, \ + LCK_SLEEP_DEFAULT, \ + (event_t)(ivac), \ + THREAD_UNINT, &ipc_lck_grp) #define ivac_wakeup(ivac) thread_wakeup((event_t)(ivac)) extern void ivac_dealloc(ipc_voucher_attr_control_t ivac); @@ -181,16 +181,18 @@ extern void ivac_dealloc(ipc_voucher_attr_control_t ivac); static inline void ivac_reference(ipc_voucher_attr_control_t ivac) { - if (ivac == IVAC_NULL) + if (ivac == IVAC_NULL) { return; + } os_ref_retain(&ivac->ivac_refs); } static inline void ivac_release(ipc_voucher_attr_control_t ivac) { - if (IVAC_NULL == ivac) + if (IVAC_NULL == ivac) { return; + } if (os_ref_release(&ivac->ivac_refs) == 0) { ivac_dealloc(ivac); @@ -211,9 +213,9 @@ ivac_release(ipc_voucher_attr_control_t ivac) * find the index by key). */ typedef struct ipc_voucher_global_table_element { - ipc_voucher_attr_manager_t ivgte_manager; - ipc_voucher_attr_control_t ivgte_control; - mach_voucher_attr_key_t ivgte_key; + ipc_voucher_attr_manager_t ivgte_manager; + ipc_voucher_attr_control_t ivgte_control; + mach_voucher_attr_key_t ivgte_key; } ipc_voucher_global_table_element; typedef ipc_voucher_global_table_element *ipc_voucher_global_table_element_t; @@ -228,11 +230,11 @@ typedef ipc_voucher_global_table_element *ipc_voucher_global_table_element_t; */ #pragma pack(1) typedef struct ipc_voucher_attr_recipe_data { - mach_voucher_attr_key_t key; + mach_voucher_attr_key_t key; mach_voucher_attr_recipe_command_t command; - ipc_voucher_t previous_voucher; - mach_voucher_attr_content_size_t content_size; - uint8_t content[]; + ipc_voucher_t previous_voucher; + mach_voucher_attr_content_size_t content_size; + uint8_t content[]; } ipc_voucher_attr_recipe_data_t; typedef ipc_voucher_attr_recipe_data_t *ipc_voucher_attr_recipe_t; typedef mach_msg_type_number_t ipc_voucher_attr_recipe_size_t; @@ -255,50 +257,50 @@ typedef mach_msg_type_number_t ipc_voucher_attr_raw_recipe_array_size_t; */ typedef kern_return_t (*ipc_voucher_attr_manager_release_value_t)(ipc_voucher_attr_manager_t, - mach_voucher_attr_key_t, - mach_voucher_attr_value_handle_t, - mach_voucher_attr_value_reference_t); + mach_voucher_attr_key_t, + mach_voucher_attr_value_handle_t, + mach_voucher_attr_value_reference_t); typedef kern_return_t (*ipc_voucher_attr_manager_get_value_t)(ipc_voucher_attr_manager_t, - mach_voucher_attr_key_t, - mach_voucher_attr_recipe_command_t, - mach_voucher_attr_value_handle_array_t, - mach_voucher_attr_value_handle_array_size_t, - mach_voucher_attr_content_t, - mach_voucher_attr_content_size_t, - mach_voucher_attr_value_handle_t *, - mach_voucher_attr_value_flags_t *, - ipc_voucher_t *); + mach_voucher_attr_key_t, + mach_voucher_attr_recipe_command_t, + mach_voucher_attr_value_handle_array_t, + mach_voucher_attr_value_handle_array_size_t, + mach_voucher_attr_content_t, + mach_voucher_attr_content_size_t, + mach_voucher_attr_value_handle_t *, + mach_voucher_attr_value_flags_t *, + ipc_voucher_t *); typedef kern_return_t (*ipc_voucher_attr_manager_extract_content_t)(ipc_voucher_attr_manager_t, - mach_voucher_attr_key_t, - mach_voucher_attr_value_handle_array_t, - mach_voucher_attr_value_handle_array_size_t, - mach_voucher_attr_recipe_command_t *, - mach_voucher_attr_content_t, - mach_voucher_attr_content_size_t *); + mach_voucher_attr_key_t, + mach_voucher_attr_value_handle_array_t, + mach_voucher_attr_value_handle_array_size_t, + mach_voucher_attr_recipe_command_t *, + mach_voucher_attr_content_t, + mach_voucher_attr_content_size_t *); typedef kern_return_t (*ipc_voucher_attr_manager_command_t)(ipc_voucher_attr_manager_t, - mach_voucher_attr_key_t, - mach_voucher_attr_value_handle_array_t, - mach_voucher_attr_value_handle_array_size_t, - mach_voucher_attr_command_t, - mach_voucher_attr_content_t, - mach_voucher_attr_content_size_t, - mach_voucher_attr_content_t, - mach_voucher_attr_content_size_t *); + mach_voucher_attr_key_t, + mach_voucher_attr_value_handle_array_t, + mach_voucher_attr_value_handle_array_size_t, + mach_voucher_attr_command_t, + mach_voucher_attr_content_t, + mach_voucher_attr_content_size_t, + mach_voucher_attr_content_t, + mach_voucher_attr_content_size_t *); typedef void (*ipc_voucher_attr_manager_release_t)(ipc_voucher_attr_manager_t); typedef uint32_t ipc_voucher_attr_manager_flags; struct ipc_voucher_attr_manager { - ipc_voucher_attr_manager_release_value_t ivam_release_value; - ipc_voucher_attr_manager_get_value_t ivam_get_value; - ipc_voucher_attr_manager_extract_content_t ivam_extract_content; - ipc_voucher_attr_manager_command_t ivam_command; - ipc_voucher_attr_manager_release_t ivam_release; - ipc_voucher_attr_manager_flags ivam_flags; + ipc_voucher_attr_manager_release_value_t ivam_release_value; + ipc_voucher_attr_manager_get_value_t ivam_get_value; + ipc_voucher_attr_manager_extract_content_t ivam_extract_content; + ipc_voucher_attr_manager_command_t ivam_command; + ipc_voucher_attr_manager_release_t ivam_release; + ipc_voucher_attr_manager_flags ivam_flags; }; #define IVAM_FLAGS_NONE 0 @@ -309,83 +311,83 @@ __BEGIN_DECLS /* DEBUG/TRACE Convert from a port to a voucher */ extern uintptr_t unsafe_convert_port_to_voucher( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a voucher */ extern ipc_voucher_t convert_port_to_voucher( - ipc_port_t port); + ipc_port_t port); /* Convert from a port name to an ipc_voucher */ extern ipc_voucher_t convert_port_name_to_voucher( - mach_port_name_t name); + mach_port_name_t name); /* add a reference to the specified voucher */ extern void ipc_voucher_reference( - ipc_voucher_t voucher); + ipc_voucher_t voucher); /* drop the voucher reference picked up above */ extern void ipc_voucher_release( - ipc_voucher_t voucher); + ipc_voucher_t voucher); /* deliver voucher notifications */ extern void ipc_voucher_notify( - mach_msg_header_t *msg); + mach_msg_header_t *msg); /* Convert from a voucher to a port */ extern ipc_port_t convert_voucher_to_port( - ipc_voucher_t voucher); + ipc_voucher_t voucher); /* convert from a voucher attribute control to a port */ extern ipc_port_t convert_voucher_attr_control_to_port( - ipc_voucher_attr_control_t control); + ipc_voucher_attr_control_t control); /* add a reference to the specified voucher */ extern void ipc_voucher_attr_control_reference( - ipc_voucher_attr_control_t control); + ipc_voucher_attr_control_t control); /* drop the reference picked up above */ extern void ipc_voucher_attr_control_release( - ipc_voucher_attr_control_t control); + ipc_voucher_attr_control_t control); /* deliver voucher control notifications */ extern void ipc_voucher_attr_control_notify( - mach_msg_header_t *msg); + mach_msg_header_t *msg); /* convert from a port to a voucher attribute control */ extern ipc_voucher_attr_control_t convert_port_to_voucher_attr_control( - ipc_port_t port); + ipc_port_t port); /* * In-kernel equivalents to the user syscalls */ extern kern_return_t ipc_create_mach_voucher( - ipc_voucher_attr_raw_recipe_array_t recipes, - ipc_voucher_attr_raw_recipe_array_size_t recipe_size, - ipc_voucher_t *new_voucher); + ipc_voucher_attr_raw_recipe_array_t recipes, + ipc_voucher_attr_raw_recipe_array_size_t recipe_size, + ipc_voucher_t *new_voucher); extern kern_return_t ipc_voucher_attr_control_create_mach_voucher( - ipc_voucher_attr_control_t control, - ipc_voucher_attr_raw_recipe_array_t recipes, - ipc_voucher_attr_raw_recipe_array_size_t recipe_size, - ipc_voucher_t *new_voucher); + ipc_voucher_attr_control_t control, + ipc_voucher_attr_raw_recipe_array_t recipes, + ipc_voucher_attr_raw_recipe_array_size_t recipe_size, + ipc_voucher_t *new_voucher); -extern kern_return_t +extern kern_return_t ipc_register_well_known_mach_voucher_attr_manager( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_value_handle_t default_value, - mach_voucher_attr_key_t key, - ipc_voucher_attr_control_t *control); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_value_handle_t default_value, + mach_voucher_attr_key_t key, + ipc_voucher_attr_control_t *control); extern kern_return_t ipc_register_mach_voucher_attr_manager( - ipc_voucher_attr_manager_t manager, - mach_voucher_attr_value_handle_t default_value, - mach_voucher_attr_key_t *key, - ipc_voucher_attr_control_t *control); + ipc_voucher_attr_manager_t manager, + mach_voucher_attr_value_handle_t default_value, + mach_voucher_attr_key_t *key, + ipc_voucher_attr_control_t *control); __END_DECLS - -#endif /* _IPC_IPC_VOUCHER_H_ */ + +#endif /* _IPC_IPC_VOUCHER_H_ */ diff --git a/osfmk/ipc/mach_debug.c b/osfmk/ipc/mach_debug.c index a7b47831b..19df67bf7 100644 --- a/osfmk/ipc/mach_debug.c +++ b/osfmk/ipc/mach_debug.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -106,29 +106,31 @@ #if !MACH_IPC_DEBUG kern_return_t mach_port_get_srights( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused mach_port_rights_t *srightsp) + __unused ipc_space_t space, + __unused mach_port_name_t name, + __unused mach_port_rights_t *srightsp) { - return KERN_FAILURE; + return KERN_FAILURE; } #else kern_return_t mach_port_get_srights( - ipc_space_t space, - mach_port_name_t name, - mach_port_rights_t *srightsp) + ipc_space_t space, + mach_port_name_t name, + mach_port_rights_t *srightsp) { ipc_port_t port; kern_return_t kr; mach_port_rights_t srights; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ srights = port->ip_srights; @@ -156,23 +158,23 @@ mach_port_get_srights( #if !MACH_IPC_DEBUG kern_return_t mach_port_space_info( - __unused ipc_space_t space, - __unused ipc_info_space_t *infop, - __unused ipc_info_name_array_t *tablep, - __unused mach_msg_type_number_t *tableCntp, + __unused ipc_space_t space, + __unused ipc_info_space_t *infop, + __unused ipc_info_name_array_t *tablep, + __unused mach_msg_type_number_t *tableCntp, __unused ipc_info_tree_name_array_t *treep, - __unused mach_msg_type_number_t *treeCntp) + __unused mach_msg_type_number_t *treeCntp) { - return KERN_FAILURE; + return KERN_FAILURE; } #else kern_return_t mach_port_space_info( - ipc_space_t space, - ipc_info_space_t *infop, - ipc_info_name_array_t *tablep, - mach_msg_type_number_t *tableCntp, - __unused ipc_info_tree_name_array_t *treep, + ipc_space_t space, + ipc_info_space_t *infop, + ipc_info_name_array_t *tablep, + mach_msg_type_number_t *tableCntp, + __unused ipc_info_tree_name_array_t *treep, __unused mach_msg_type_number_t *treeCntp) { ipc_info_name_t *table_info; @@ -185,8 +187,9 @@ mach_port_space_info( vm_map_copy_t copy; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } #if !(DEVELOPMENT || DEBUG) && CONFIG_MACF const boolean_t dbg_ok = (mac_task_check_expose_task(kernel_task) == 0); @@ -202,32 +205,34 @@ mach_port_space_info( is_read_lock(space); if (!is_active(space)) { is_read_unlock(space); - if (table_size != 0) + if (table_size != 0) { kmem_free(ipc_kernel_map, - table_addr, table_size); + table_addr, table_size); + } return KERN_INVALID_TASK; } table_size_needed = - vm_map_round_page((space->is_table_size - * sizeof(ipc_info_name_t)), - VM_MAP_PAGE_MASK(ipc_kernel_map)); + vm_map_round_page((space->is_table_size + * sizeof(ipc_info_name_t)), + VM_MAP_PAGE_MASK(ipc_kernel_map)); - if (table_size_needed == table_size) + if (table_size_needed == table_size) { break; + } is_read_unlock(space); if (table_size != table_size_needed) { - if (table_size != 0) + if (table_size != 0) { kmem_free(ipc_kernel_map, table_addr, table_size); - kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size_needed, VM_KERN_MEMORY_IPC); + } + kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size_needed, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) { return KERN_RESOURCE_SHORTAGE; } table_size = table_size_needed; } - } /* space is read-locked and active; we have enough wired memory */ @@ -272,20 +277,21 @@ mach_port_space_info( vm_size_t used_table_size; used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t); - if (table_size > used_table_size) + if (table_size > used_table_size) { bzero((char *)&table_info[infop->iis_table_size], - table_size - used_table_size); + table_size - used_table_size); + } kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(table_addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(table_addr + table_size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); - kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, - (vm_map_size_t)used_table_size, TRUE, ©); + kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, + (vm_map_size_t)used_table_size, TRUE, ©); assert(kr == KERN_SUCCESS); *tablep = (ipc_info_name_t *)copy; *tableCntp = infop->iis_table_size; @@ -316,19 +322,20 @@ mach_port_space_info( #if !MACH_IPC_DEBUG kern_return_t mach_port_space_basic_info( - __unused ipc_space_t space, - __unused ipc_info_space_basic_t *infop) + __unused ipc_space_t space, + __unused ipc_info_space_basic_t *infop) { - return KERN_FAILURE; + return KERN_FAILURE; } #else kern_return_t mach_port_space_basic_info( - ipc_space_t space, - ipc_info_space_basic_t *infop) + ipc_space_t space, + ipc_info_space_basic_t *infop) { - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } is_read_lock(space); @@ -369,31 +376,33 @@ mach_port_space_basic_info( #if !MACH_IPC_DEBUG kern_return_t mach_port_dnrequest_info( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused unsigned int *totalp, - __unused unsigned int *usedp) + __unused ipc_space_t space, + __unused mach_port_name_t name, + __unused unsigned int *totalp, + __unused unsigned int *usedp) { - return KERN_FAILURE; + return KERN_FAILURE; } #else kern_return_t mach_port_dnrequest_info( - ipc_space_t space, - mach_port_name_t name, - unsigned int *totalp, - unsigned int *usedp) + ipc_space_t space, + mach_port_name_t name, + unsigned int *totalp, + unsigned int *usedp) { unsigned int total, used; ipc_port_t port; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ if (port->ip_requests == IPR_NULL) { @@ -406,11 +415,12 @@ mach_port_dnrequest_info( total = requests->ipr_size->its_size; for (index = 1, used = 0; - index < total; index++) { + index < total; index++) { ipc_port_request_t ipr = &requests[index]; - if (ipr->ipr_name != MACH_PORT_NULL) + if (ipr->ipr_name != MACH_PORT_NULL) { used++; + } } } ip_unlock(port); @@ -443,32 +453,34 @@ mach_port_dnrequest_info( #if !MACH_IPC_DEBUG kern_return_t mach_port_kobject( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused natural_t *typep, - __unused mach_vm_address_t *addrp) + __unused ipc_space_t space, + __unused mach_port_name_t name, + __unused natural_t *typep, + __unused mach_vm_address_t *addrp) { - return KERN_FAILURE; + return KERN_FAILURE; } #else kern_return_t mach_port_kobject( - ipc_space_t space, - mach_port_name_t name, - natural_t *typep, - mach_vm_address_t *addrp) + ipc_space_t space, + mach_port_name_t name, + natural_t *typep, + mach_vm_address_t *addrp) { ipc_entry_t entry; ipc_port_t port; kern_return_t kr; mach_vm_address_t kaddr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } kr = ipc_right_lookup_read(space, name, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* space is read-locked and active */ if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) { @@ -492,11 +504,11 @@ mach_port_kobject( ip_unlock(port); #if (DEVELOPMENT || DEBUG) - if (0 != kaddr && is_ipc_kobject(*typep)) + if (0 != kaddr && is_ipc_kobject(*typep)) { *addrp = VM_KERNEL_UNSLIDE_OR_PERM(kaddr); - else + } else #endif - *addrp = 0; + *addrp = 0; return KERN_SUCCESS; } @@ -522,20 +534,20 @@ mach_port_kobject( #if !MACH_IPC_DEBUG kern_return_t mach_port_kernel_object( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused unsigned int *typep, - __unused unsigned int *addrp) + __unused ipc_space_t space, + __unused mach_port_name_t name, + __unused unsigned int *typep, + __unused unsigned int *addrp) { - return KERN_FAILURE; + return KERN_FAILURE; } #else kern_return_t mach_port_kernel_object( - ipc_space_t space, - mach_port_name_t name, - unsigned int *typep, - unsigned int *addrp) + ipc_space_t space, + mach_port_name_t name, + unsigned int *typep, + unsigned int *addrp) { mach_vm_address_t addr = 0; kern_return_t kr; @@ -551,24 +563,28 @@ kern_return_t mach_port_special_reply_port_reset_link( ipc_space_t space, mach_port_name_t name, - boolean_t *srp_lost_link) + boolean_t *srp_lost_link) { ipc_port_t port; kern_return_t kr; thread_t thread = current_thread(); - if (space != current_space()) + if (space != current_space()) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_NAME; + } - if (!IP_VALID(thread->ith_special_reply_port)) + if (!IP_VALID(thread->ith_special_reply_port)) { return KERN_INVALID_VALUE; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } if (thread->ith_special_reply_port != port) { ip_unlock(port); @@ -586,9 +602,9 @@ mach_port_special_reply_port_reset_link( #else kern_return_t mach_port_special_reply_port_reset_link( - __unused ipc_space_t space, - __unused mach_port_name_t name, - __unused boolean_t *srp_lost_link) + __unused ipc_space_t space, + __unused mach_port_name_t name, + __unused boolean_t *srp_lost_link) { return KERN_NOT_SUPPORTED; } diff --git a/osfmk/ipc/mach_kernelrpc.c b/osfmk/ipc/mach_kernelrpc.c index d251ab95c..52bf7b11c 100644 --- a/osfmk/ipc/mach_kernelrpc.c +++ b/osfmk/ipc/mach_kernelrpc.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -45,20 +45,24 @@ _kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args *a task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } - if (copyin(args->addr, (char *)&addr, sizeof (addr))) + if (copyin(args->addr, (char *)&addr, sizeof(addr))) { goto done; + } rv = mach_vm_allocate_external(task->map, &addr, args->size, args->flags); - if (rv == KERN_SUCCESS) - rv = copyout(&addr, args->addr, sizeof (addr)); - + if (rv == KERN_SUCCESS) { + rv = copyout(&addr, args->addr, sizeof(addr)); + } + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -67,15 +71,17 @@ _kernelrpc_mach_vm_deallocate_trap(struct _kernelrpc_mach_vm_deallocate_args *ar task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_vm_deallocate(task->map, args->address, args->size); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -84,16 +90,18 @@ _kernelrpc_mach_vm_protect_trap(struct _kernelrpc_mach_vm_protect_args *args) task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_vm_protect(task->map, args->address, args->size, args->set_maximum, args->new_protection); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -103,22 +111,26 @@ _kernelrpc_mach_vm_map_trap(struct _kernelrpc_mach_vm_map_trap_args *args) task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } - if (copyin(args->addr, (char *)&addr, sizeof (addr))) + if (copyin(args->addr, (char *)&addr, sizeof(addr))) { goto done; + } rv = mach_vm_map_external(task->map, &addr, args->size, args->mask, args->flags, - IPC_PORT_NULL, 0, FALSE, args->cur_protection, VM_PROT_ALL, - VM_INHERIT_DEFAULT); - if (rv == KERN_SUCCESS) - rv = copyout(&addr, args->addr, sizeof (addr)); + IPC_PORT_NULL, 0, FALSE, args->cur_protection, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (rv == KERN_SUCCESS) { + rv = copyout(&addr, args->addr, sizeof(addr)); + } done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -129,23 +141,27 @@ _kernelrpc_mach_vm_purgable_control_trap( task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } - if (copyin(args->state, (char *)&state, sizeof (state))) + if (copyin(args->state, (char *)&state, sizeof(state))) { goto done; + } rv = mach_vm_purgable_control(task->map, - args->address, - args->control, - &state); - if (rv == KERN_SUCCESS) - rv = copyout(&state, args->state, sizeof (state)); - + args->address, + args->control, + &state); + if (rv == KERN_SUCCESS) { + rv = copyout(&state, args->state, sizeof(state)); + } + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -155,18 +171,21 @@ _kernelrpc_mach_port_allocate_trap(struct _kernelrpc_mach_port_allocate_args *ar mach_port_name_t name; int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_allocate(task->itk_space, args->right, &name); - if (rv == KERN_SUCCESS) - rv = copyout(&name, args->name, sizeof (name)); + if (rv == KERN_SUCCESS) { + rv = copyout(&name, args->name, sizeof(name)); + } + - done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -175,15 +194,17 @@ _kernelrpc_mach_port_destroy_trap(struct _kernelrpc_mach_port_destroy_args *args task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_destroy(task->itk_space, args->name); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -192,15 +213,17 @@ _kernelrpc_mach_port_deallocate_trap(struct _kernelrpc_mach_port_deallocate_args task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_deallocate(task->itk_space, args->name); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -209,15 +232,17 @@ _kernelrpc_mach_port_mod_refs_trap(struct _kernelrpc_mach_port_mod_refs_args *ar task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_mod_refs(task->itk_space, args->name, args->right, args->delta); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } @@ -227,15 +252,17 @@ _kernelrpc_mach_port_move_member_trap(struct _kernelrpc_mach_port_move_member_ar task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_move_member(task->itk_space, args->member, args->after); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -246,13 +273,15 @@ _kernelrpc_mach_port_insert_right_trap(struct _kernelrpc_mach_port_insert_right_ mach_msg_type_name_t disp; int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = ipc_object_copyin(task->itk_space, args->poly, args->polyPoly, (ipc_object_t *)&port); - if (rv != KERN_SUCCESS) + if (rv != KERN_SUCCESS) { goto done; + } disp = ipc_object_copyin_type(args->polyPoly); rv = mach_port_insert_right(task->itk_space, args->name, port, disp); @@ -261,11 +290,12 @@ _kernelrpc_mach_port_insert_right_trap(struct _kernelrpc_mach_port_insert_right_ ipc_object_destroy((ipc_object_t)port, disp); } } - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -275,16 +305,17 @@ _kernelrpc_mach_port_get_attributes_trap(struct _kernelrpc_mach_port_get_attribu int rv = MACH_SEND_INVALID_DEST; mach_msg_type_number_t count; - if (task != current_task()) + if (task != current_task()) { goto done; + } // MIG does not define the type or size of the mach_port_info_t out array // anywhere, so derive them from the field in the generated reply struct #define MACH_PORT_INFO_OUT (((__Reply__mach_port_get_attributes_t*)NULL)->port_info_out) #define MACH_PORT_INFO_STACK_LIMIT 80 // current size is 68 == 17 * sizeof(integer_t) _Static_assert(sizeof(MACH_PORT_INFO_OUT) < MACH_PORT_INFO_STACK_LIMIT, - "mach_port_info_t has grown significantly, reevaluate stack usage"); - const mach_msg_type_number_t max_count = (sizeof(MACH_PORT_INFO_OUT)/sizeof(MACH_PORT_INFO_OUT[0])); + "mach_port_info_t has grown significantly, reevaluate stack usage"); + const mach_msg_type_number_t max_count = (sizeof(MACH_PORT_INFO_OUT) / sizeof(MACH_PORT_INFO_OUT[0])); typeof(MACH_PORT_INFO_OUT[0]) info[max_count]; /* @@ -297,19 +328,23 @@ _kernelrpc_mach_port_get_attributes_trap(struct _kernelrpc_mach_port_get_attribu rv = MACH_SEND_INVALID_DATA; goto done; } - if (count > max_count) + if (count > max_count) { count = max_count; + } rv = mach_port_get_attributes(task->itk_space, args->name, args->flavor, info, &count); - if (rv == KERN_SUCCESS) + if (rv == KERN_SUCCESS) { rv = copyout(&count, CAST_USER_ADDR_T(args->count), sizeof(count)); - if (rv == KERN_SUCCESS && count > 0) + } + if (rv == KERN_SUCCESS && count > 0) { rv = copyout(info, CAST_USER_ADDR_T(args->info), count * sizeof(info[0])); + } done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -318,15 +353,17 @@ _kernelrpc_mach_port_insert_member_trap(struct _kernelrpc_mach_port_insert_membe task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_insert_member(task->itk_space, args->name, args->pset); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } @@ -336,15 +373,17 @@ _kernelrpc_mach_port_extract_member_trap(struct _kernelrpc_mach_port_extract_mem task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_extract_member(task->itk_space, args->name, args->pset); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -355,22 +394,25 @@ _kernelrpc_mach_port_construct_trap(struct _kernelrpc_mach_port_construct_args * int rv = MACH_SEND_INVALID_DEST; mach_port_options_t options; - if (copyin(args->options, (char *)&options, sizeof (options))) { + if (copyin(args->options, (char *)&options, sizeof(options))) { rv = MACH_SEND_INVALID_DATA; goto done; } - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_construct(task->itk_space, &options, args->context, &name); - if (rv == KERN_SUCCESS) - rv = copyout(&name, args->name, sizeof (name)); + if (rv == KERN_SUCCESS) { + rv = copyout(&name, args->name, sizeof(name)); + } done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -379,15 +421,17 @@ _kernelrpc_mach_port_destruct_trap(struct _kernelrpc_mach_port_destruct_args *ar task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_destruct(task->itk_space, args->name, args->srdelta, args->guard); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -396,15 +440,17 @@ _kernelrpc_mach_port_guard_trap(struct _kernelrpc_mach_port_guard_args *args) task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_guard(task->itk_space, args->name, args->guard, args->strict); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } int @@ -413,15 +459,17 @@ _kernelrpc_mach_port_unguard_trap(struct _kernelrpc_mach_port_unguard_args *args task_t task = port_name_to_task(args->target); int rv = MACH_SEND_INVALID_DEST; - if (task != current_task()) + if (task != current_task()) { goto done; + } rv = mach_port_unguard(task->itk_space, args->name, args->guard); - + done: - if (task) + if (task) { task_deallocate(task); - return (rv); + } + return rv; } kern_return_t @@ -433,13 +481,15 @@ host_create_mach_voucher_trap(struct host_create_mach_voucher_args *args) mach_port_name_t voucher_name = 0; kern_return_t kr = 0; - if (host == HOST_NULL) + if (host == HOST_NULL) { return MACH_SEND_INVALID_DEST; + } - if (args->recipes_size < 0) + if (args->recipes_size < 0) { return KERN_INVALID_ARGUMENT; - else if (args->recipes_size > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) + } else if (args->recipes_size > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) { return MIG_ARRAY_TOO_LARGE; + } if (args->recipes_size < MACH_VOUCHER_TRAP_STACK_LIMIT) { /* keep small recipes on the stack for speed */ @@ -484,31 +534,36 @@ mach_voucher_extract_attr_recipe_trap(struct mach_voucher_extract_attr_recipe_ar kern_return_t kr = KERN_SUCCESS; mach_msg_type_number_t sz = 0; - if (copyin(args->recipe_size, (void *)&sz, sizeof(sz))) + if (copyin(args->recipe_size, (void *)&sz, sizeof(sz))) { return KERN_MEMORY_ERROR; + } - if (sz > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) + if (sz > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) { return MIG_ARRAY_TOO_LARGE; + } voucher = convert_port_name_to_voucher(args->voucher_name); - if (voucher == IV_NULL) + if (voucher == IV_NULL) { return MACH_SEND_INVALID_DEST; + } mach_msg_type_number_t max_sz = sz; if (sz < MACH_VOUCHER_TRAP_STACK_LIMIT) { /* keep small recipes on the stack for speed */ uint8_t krecipe[sz]; + bzero(krecipe, sz); if (copyin(CAST_USER_ADDR_T(args->recipe), (void *)krecipe, sz)) { kr = KERN_MEMORY_ERROR; goto done; } kr = mach_voucher_extract_attr_recipe(voucher, args->key, - (mach_voucher_attr_raw_recipe_t)krecipe, &sz); + (mach_voucher_attr_raw_recipe_t)krecipe, &sz); assert(sz <= max_sz); - if (kr == KERN_SUCCESS && sz > 0) + if (kr == KERN_SUCCESS && sz > 0) { kr = copyout(krecipe, CAST_USER_ADDR_T(args->recipe), sz); + } } else { uint8_t *krecipe = kalloc((vm_size_t)max_sz); if (!krecipe) { @@ -523,16 +578,18 @@ mach_voucher_extract_attr_recipe_trap(struct mach_voucher_extract_attr_recipe_ar } kr = mach_voucher_extract_attr_recipe(voucher, args->key, - (mach_voucher_attr_raw_recipe_t)krecipe, &sz); + (mach_voucher_attr_raw_recipe_t)krecipe, &sz); assert(sz <= max_sz); - if (kr == KERN_SUCCESS && sz > 0) + if (kr == KERN_SUCCESS && sz > 0) { kr = copyout(krecipe, CAST_USER_ADDR_T(args->recipe), sz); + } kfree(krecipe, (vm_size_t)max_sz); } - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { kr = copyout(&sz, args->recipe_size, sizeof(sz)); + } done: ipc_voucher_release(voucher); diff --git a/osfmk/ipc/mach_msg.c b/osfmk/ipc/mach_msg.c index d17cb24c3..c66b20303 100644 --- a/osfmk/ipc/mach_msg.c +++ b/osfmk/ipc/mach_msg.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -122,30 +122,30 @@ */ mach_msg_return_t mach_msg_send( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_timeout_t send_timeout, - mach_port_name_t notify); + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_timeout_t send_timeout, + mach_port_name_t notify); mach_msg_return_t mach_msg_receive( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - mach_msg_timeout_t rcv_timeout, - void (*continuation)(mach_msg_return_t), - mach_msg_size_t slist_size); + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t rcv_timeout, + void (*continuation)(mach_msg_return_t), + mach_msg_size_t slist_size); mach_msg_return_t msg_receive_error( - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_vm_address_t rcv_addr, - mach_msg_size_t rcv_size, - mach_port_seqno_t seqno, - ipc_space_t space, - mach_msg_size_t *out_size); + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_vm_address_t rcv_addr, + mach_msg_size_t rcv_size, + mach_port_seqno_t seqno, + ipc_space_t space, + mach_msg_size_t *out_size); static mach_msg_return_t mach_msg_rcv_link_special_reply_port( @@ -161,8 +161,8 @@ audit_token_t KERNEL_AUDIT_TOKEN = KERNEL_AUDIT_TOKEN_VALUE; mach_msg_format_0_trailer_t trailer_template = { /* mach_msg_trailer_type_t */ MACH_MSG_TRAILER_FORMAT_0, /* mach_msg_trailer_size_t */ MACH_MSG_TRAILER_MINIMUM_SIZE, - /* mach_port_seqno_t */ 0, - /* security_token_t */ KERNEL_SECURITY_TOKEN_VALUE + /* mach_port_seqno_t */ 0, + /* security_token_t */ KERNEL_SECURITY_TOKEN_VALUE }; /* @@ -193,50 +193,52 @@ mach_msg_format_0_trailer_t trailer_template = { mach_msg_return_t mach_msg_send( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_timeout_t send_timeout, - mach_msg_priority_t override) + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_timeout_t send_timeout, + mach_msg_priority_t override) { ipc_space_t space = current_space(); vm_map_t map = current_map(); ipc_kmsg_t kmsg; mach_msg_return_t mr; - mach_msg_size_t msg_and_trailer_size; - mach_msg_max_trailer_t *trailer; + mach_msg_size_t msg_and_trailer_size; + mach_msg_max_trailer_t *trailer; option |= MACH_SEND_KERNEL; if ((send_size & 3) || send_size < sizeof(mach_msg_header_t) || - (send_size < sizeof(mach_msg_base_t) && (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX))) + (send_size < sizeof(mach_msg_base_t) && (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX))) { return MACH_SEND_MSG_TOO_SMALL; + } - if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE) + if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE) { return MACH_SEND_TOO_LARGE; - - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + } + + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); msg_and_trailer_size = send_size + MAX_TRAILER_SIZE; kmsg = ipc_kmsg_alloc(msg_and_trailer_size); if (kmsg == IKM_NULL) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_NO_BUFFER); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_NO_BUFFER); return MACH_SEND_NO_BUFFER; } - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, - (uintptr_t)0, /* this should only be called from the kernel! */ - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - 0, 0, - 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, + (uintptr_t)0, /* this should only be called from the kernel! */ + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + 0, 0, + 0); (void) memcpy((void *) kmsg->ikm_header, (const void *) msg, send_size); kmsg->ikm_header->msgh_size = send_size; - /* + /* * reserve for the trailer the largest space (MAX_TRAILER_SIZE) * However, the internal size field of the trailer (msgh_trailer_size) * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize @@ -252,35 +254,34 @@ mach_msg_send( if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } mr = ipc_kmsg_send(kmsg, option, send_timeout); if (mr != MACH_MSG_SUCCESS) { - mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL); - (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, - kmsg->ikm_header->msgh_size); - ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL); + (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, + kmsg->ikm_header->msgh_size); + ipc_kmsg_free(kmsg); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); } return mr; } -/* +/* * message header as seen at user-space * (for MACH_RCV_LARGE/IDENTITY updating) */ -typedef struct -{ - mach_msg_bits_t msgh_bits; - mach_msg_size_t msgh_size; - mach_port_name_t msgh_remote_port; - mach_port_name_t msgh_local_port; - mach_msg_size_t msgh_reserved; - mach_msg_id_t msgh_id; +typedef struct{ + mach_msg_bits_t msgh_bits; + mach_msg_size_t msgh_size; + mach_port_name_t msgh_remote_port; + mach_port_name_t msgh_local_port; + mach_msg_size_t msgh_reserved; + mach_msg_id_t msgh_id; } mach_msg_user_header_t; /* @@ -331,9 +332,7 @@ mach_msg_receive_results( io_release(object); if (mr != MACH_MSG_SUCCESS) { - if (mr == MACH_RCV_TOO_LARGE) { - /* * If the receive operation occurs with MACH_RCV_LARGE set * then no message was extracted from the queue, and the size @@ -342,10 +341,8 @@ mach_msg_receive_results( * header. */ if (option & MACH_RCV_LARGE) { - if ((option & MACH_RCV_STACK) == 0 && rcv_size >= offsetof(mach_msg_user_header_t, msgh_reserved)) { - /* * We need to inform the user-level code that it needs more * space. The value for how much space was returned in the @@ -354,28 +351,31 @@ mach_msg_receive_results( */ if (option & MACH_RCV_LARGE_IDENTITY) { if (copyout((char *) &self->ith_receiver_name, - rcv_addr + offsetof(mach_msg_user_header_t, msgh_local_port), - sizeof(mach_port_name_t))) + rcv_addr + offsetof(mach_msg_user_header_t, msgh_local_port), + sizeof(mach_port_name_t))) { mr = MACH_RCV_INVALID_DATA; + } } if (copyout((char *) &self->ith_msize, - rcv_addr + offsetof(mach_msg_user_header_t, msgh_size), - sizeof(mach_msg_size_t))) + rcv_addr + offsetof(mach_msg_user_header_t, msgh_size), + sizeof(mach_msg_size_t))) { mr = MACH_RCV_INVALID_DATA; + } } } else { - /* discard importance in message */ ipc_importance_clean(kmsg); if (msg_receive_error(kmsg, option, rcv_addr, rcv_size, seqno, space, &size) - == MACH_RCV_INVALID_DATA) + == MACH_RCV_INVALID_DATA) { mr = MACH_RCV_INVALID_DATA; + } } } - if (sizep) + if (sizep) { *sizep = size; + } return mr; } @@ -391,24 +391,25 @@ mach_msg_receive_results( /* auto redeem the voucher in the message */ ipc_voucher_receive_postprocessing(kmsg, option); - trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE, - kmsg->ikm_header->msgh_remote_port->ip_context); - + trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, self, seqno, FALSE, + kmsg->ikm_header->msgh_remote_port->ip_context); + mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option); if (mr != MACH_MSG_SUCCESS) { - /* already received importance, so have to undo that here */ ipc_importance_unreceive(kmsg, option); /* if we had a body error copyout what we have, otherwise a simple header/trailer */ - if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { - if (ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, &size) == MACH_RCV_INVALID_DATA) + if ((mr & ~MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + if (ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, &size) == MACH_RCV_INVALID_DATA) { mr = MACH_RCV_INVALID_DATA; + } } else { - if (msg_receive_error(kmsg, option, rcv_addr, rcv_size, seqno, space, &size) - == MACH_RCV_INVALID_DATA) + if (msg_receive_error(kmsg, option, rcv_addr, rcv_size, seqno, space, &size) + == MACH_RCV_INVALID_DATA) { mr = MACH_RCV_INVALID_DATA; + } } } else { /* capture ksmg QoS values to the thread continuation state */ @@ -417,8 +418,9 @@ mach_msg_receive_results( mr = ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, &size); } - if (sizep) + if (sizep) { *sizep = size; + } return mr; } @@ -441,12 +443,12 @@ mach_msg_receive_results( */ mach_msg_return_t mach_msg_receive( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - mach_msg_timeout_t rcv_timeout, - void (*continuation)(mach_msg_return_t), + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t rcv_timeout, + void (*continuation)(mach_msg_return_t), __unused mach_msg_size_t slist_size) { thread_t self = current_thread(); @@ -456,7 +458,7 @@ mach_msg_receive( mach_msg_return_t mr; mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object); - if (mr != MACH_MSG_SUCCESS) { + if (mr != MACH_MSG_SUCCESS) { return mr; } /* hold ref for object */ @@ -470,8 +472,9 @@ mach_msg_receive( self->ith_knote = ITH_KNOTE_NULL; ipc_mqueue_receive(mqueue, option, rcv_size, rcv_timeout, THREAD_ABORTSAFE); - if ((option & MACH_RCV_TIMEOUT) && rcv_timeout == 0) + if ((option & MACH_RCV_TIMEOUT) && rcv_timeout == 0) { thread_poll_yield(self); + } return mach_msg_receive_results(NULL); } @@ -481,10 +484,11 @@ mach_msg_receive_continue(void) mach_msg_return_t mr; thread_t self = current_thread(); - if (self->ith_state == MACH_PEEK_READY) + if (self->ith_state == MACH_PEEK_READY) { mr = MACH_PEEK_READY; - else + } else { mr = mach_msg_receive_results(NULL); + } (*self->ith_continuation)(mr); } @@ -503,14 +507,14 @@ mach_msg_return_t mach_msg_overwrite_trap( struct mach_msg_overwrite_trap_args *args) { - mach_vm_address_t msg_addr = args->msg; - mach_msg_option_t option = args->option; - mach_msg_size_t send_size = args->send_size; - mach_msg_size_t rcv_size = args->rcv_size; - mach_port_name_t rcv_name = args->rcv_name; - mach_msg_timeout_t msg_timeout = args->timeout; + mach_vm_address_t msg_addr = args->msg; + mach_msg_option_t option = args->option; + mach_msg_size_t send_size = args->send_size; + mach_msg_size_t rcv_size = args->rcv_size; + mach_port_name_t rcv_name = args->rcv_name; + mach_msg_timeout_t msg_timeout = args->timeout; mach_msg_priority_t override = args->override; - mach_vm_address_t rcv_msg_addr = args->rcv_msg; + mach_vm_address_t rcv_msg_addr = args->rcv_msg; __unused mach_port_seqno_t temp_seqno = 0; mach_msg_return_t mr = MACH_MSG_SUCCESS; @@ -523,26 +527,26 @@ mach_msg_overwrite_trap( ipc_space_t space = current_space(); ipc_kmsg_t kmsg; - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); mr = ipc_kmsg_get(msg_addr, send_size, &kmsg); if (mr != MACH_MSG_SUCCESS) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, - (uintptr_t)msg_addr, - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - 0, 0, - 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, + (uintptr_t)msg_addr, + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + 0, 0, + 0); mr = ipc_kmsg_copyin(kmsg, space, map, override, &option); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } @@ -551,10 +555,9 @@ mach_msg_overwrite_trap( if (mr != MACH_MSG_SUCCESS) { mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map, MACH_MSG_BODY_NULL); (void) ipc_kmsg_put(kmsg, option, msg_addr, send_size, 0, NULL); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } - } if (option & MACH_RCV_MSG) { @@ -575,17 +578,18 @@ mach_msg_overwrite_trap( __IGNORE_WCASTALIGN(special_reply_port = (ipc_port_t) object); /* link the special reply port to the destination */ mr = mach_msg_rcv_link_special_reply_port(special_reply_port, - (mach_port_name_t)override); + (mach_port_name_t)override); if (mr != MACH_MSG_SUCCESS) { io_release(object); return mr; } } - if (rcv_msg_addr != (mach_vm_address_t)0) + if (rcv_msg_addr != (mach_vm_address_t)0) { self->ith_msg_addr = rcv_msg_addr; - else + } else { self->ith_msg_addr = msg_addr; + } self->ith_object = object; self->ith_rsize = rcv_size; self->ith_msize = 0; @@ -595,8 +599,9 @@ mach_msg_overwrite_trap( self->ith_knote = ITH_KNOTE_NULL; ipc_mqueue_receive(mqueue, option, rcv_size, msg_timeout, THREAD_ABORTSAFE); - if ((option & MACH_RCV_TIMEOUT) && msg_timeout == 0) + if ((option & MACH_RCV_TIMEOUT) && msg_timeout == 0) { thread_poll_yield(self); + } return mach_msg_receive_results(NULL); } @@ -631,8 +636,8 @@ mach_msg_rcv_link_special_reply_port( } kr = ipc_object_copyin(current_space(), - dest_name_port, MACH_MSG_TYPE_COPY_SEND, - (ipc_object_t *) &dest_port); + dest_name_port, MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *) &dest_port); /* * The receive right of dest port might have gone away, @@ -640,7 +645,7 @@ mach_msg_rcv_link_special_reply_port( */ if (kr == KERN_SUCCESS && IP_VALID(dest_port)) { ipc_port_link_special_reply_port(special_reply_port, - dest_port); + dest_port); /* release the send right */ ipc_port_release_send(dest_port); @@ -679,16 +684,15 @@ mach_msg_receive_results_complete(ipc_object_t object) * Don't clear the ip_srp_msg_sent bit if... */ if (!((self->ith_state == MACH_RCV_TOO_LARGE && self->ith_option & MACH_RCV_LARGE) || //msg was too large and the next receive will get it - self->ith_state == MACH_RCV_INTERRUPTED || - self->ith_state == MACH_RCV_TIMED_OUT || - self->ith_state == MACH_RCV_PORT_CHANGED || - self->ith_state == MACH_PEEK_READY)) { - + self->ith_state == MACH_RCV_INTERRUPTED || + self->ith_state == MACH_RCV_TIMED_OUT || + self->ith_state == MACH_RCV_PORT_CHANGED || + self->ith_state == MACH_PEEK_READY)) { flags |= IPC_PORT_ADJUST_SR_RECEIVED_MSG; } ipc_port_adjust_special_reply_port(port, - flags, get_turnstile); + flags, get_turnstile); /* thread now has a turnstile */ } @@ -709,10 +713,10 @@ mach_msg_trap( kern_return_t kr; args->rcv_msg = (mach_vm_address_t)0; - kr = mach_msg_overwrite_trap(args); + kr = mach_msg_overwrite_trap(args); return kr; } - + /* * Routine: msg_receive_error [internal] @@ -728,51 +732,52 @@ mach_msg_trap( * MACH_MSG_SUCCESS minimal header/trailer copied * MACH_RCV_INVALID_DATA copyout to user buffer failed */ - + mach_msg_return_t msg_receive_error( - ipc_kmsg_t kmsg, - mach_msg_option_t option, - mach_vm_address_t rcv_addr, - mach_msg_size_t rcv_size, - mach_port_seqno_t seqno, - ipc_space_t space, - mach_msg_size_t *sizep) + ipc_kmsg_t kmsg, + mach_msg_option_t option, + mach_vm_address_t rcv_addr, + mach_msg_size_t rcv_size, + mach_port_seqno_t seqno, + ipc_space_t space, + mach_msg_size_t *sizep) { - mach_vm_address_t context; + mach_vm_address_t context; mach_msg_trailer_size_t trailer_size; - mach_msg_max_trailer_t *trailer; + mach_msg_max_trailer_t *trailer; context = kmsg->ikm_header->msgh_remote_port->ip_context; /* * Copy out the destination port in the message. - * Destroy all other rights and memory in the message. + * Destroy all other rights and memory in the message. */ ipc_kmsg_copyout_dest(kmsg, space); /* * Build a minimal message with the requested trailer. */ - trailer = (mach_msg_max_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + - round_msg(sizeof(mach_msg_header_t))); + trailer = (mach_msg_max_trailer_t *) + ((vm_offset_t)kmsg->ikm_header + + round_msg(sizeof(mach_msg_header_t))); kmsg->ikm_header->msgh_size = sizeof(mach_msg_header_t); - bcopy( (char *)&trailer_template, - (char *)trailer, - sizeof(trailer_template)); + bcopy((char *)&trailer_template, + (char *)trailer, + sizeof(trailer_template)); - trailer_size = ipc_kmsg_add_trailer(kmsg, space, - option, current_thread(), seqno, - TRUE, context); + trailer_size = ipc_kmsg_add_trailer(kmsg, space, + option, current_thread(), seqno, + TRUE, context); /* * Copy the message to user space and return the size * (note that ipc_kmsg_put may also adjust the actual * size copied out to user-space). */ - if (ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, sizep) == MACH_RCV_INVALID_DATA) - return(MACH_RCV_INVALID_DATA); - else - return(MACH_MSG_SUCCESS); + if (ipc_kmsg_put(kmsg, option, rcv_addr, rcv_size, trailer_size, sizep) == MACH_RCV_INVALID_DATA) { + return MACH_RCV_INVALID_DATA; + } else { + return MACH_MSG_SUCCESS; + } } diff --git a/osfmk/ipc/mach_port.c b/osfmk/ipc/mach_port.c index 7d88c5481..41089c941 100644 --- a/osfmk/ipc/mach_port.c +++ b/osfmk/ipc/mach_port.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -65,7 +65,7 @@ /* * File: ipc/mach_port.c * Author: Rich Draves - * Date: 1989 + * Date: 1989 * * Exported kernel calls. See mach/mach_port.defs. */ @@ -108,27 +108,27 @@ * Forward declarations */ void mach_port_names_helper( - ipc_port_timestamp_t timestamp, - ipc_entry_t entry, - mach_port_name_t name, - mach_port_name_t *names, - mach_port_type_t *types, - ipc_entry_num_t *actualp); + ipc_port_timestamp_t timestamp, + ipc_entry_t entry, + mach_port_name_t name, + mach_port_name_t *names, + mach_port_type_t *types, + ipc_entry_num_t *actualp); void mach_port_gst_helper( - ipc_pset_t pset, - ipc_entry_num_t maxnames, - mach_port_name_t *names, - ipc_entry_num_t *actualp); + ipc_pset_t pset, + ipc_entry_num_t maxnames, + mach_port_name_t *names, + ipc_entry_num_t *actualp); /* Needs port locked */ void mach_port_get_status_helper( - ipc_port_t port, - mach_port_status_t *status); + ipc_port_t port, + mach_port_status_t *status); /* Zeroed template of qos flags */ -static mach_port_qos_t qos_template; +static mach_port_qos_t qos_template; /* * Routine: mach_port_names_helper @@ -141,12 +141,12 @@ static mach_port_qos_t qos_template; void mach_port_names_helper( - ipc_port_timestamp_t timestamp, - ipc_entry_t entry, - mach_port_name_t name, - mach_port_name_t *names, - mach_port_type_t *types, - ipc_entry_num_t *actualp) + ipc_port_timestamp_t timestamp, + ipc_entry_t entry, + mach_port_name_t name, + mach_port_name_t *names, + mach_port_type_t *types, + ipc_entry_num_t *actualp) { ipc_entry_bits_t bits; ipc_port_request_index_t request; @@ -167,7 +167,6 @@ mach_port_names_helper( type |= ipc_port_request_type(port, name, request); ip_unlock(port); } - } else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) { mach_port_type_t reqtype; @@ -175,8 +174,8 @@ mach_port_names_helper( ip_lock(port); reqtype = (request != IE_REQ_NONE) ? - ipc_port_request_type(port, name, request) : 0; - + ipc_port_request_type(port, name, request) : 0; + /* * If the port is alive, or was alive when the mach_port_names * started, then return that fact. Otherwise, pretend we found @@ -188,8 +187,9 @@ mach_port_names_helper( bits &= ~(IE_BITS_TYPE_MASK); bits |= MACH_PORT_TYPE_DEAD_NAME; /* account for additional reference for dead-name notification */ - if (reqtype != 0) + if (reqtype != 0) { bits++; + } } ip_unlock(port); } @@ -199,7 +199,7 @@ mach_port_names_helper( actual = *actualp; names[actual] = name; types[actual] = type; - *actualp = actual+1; + *actualp = actual + 1; } /* @@ -221,32 +221,33 @@ mach_port_names_helper( kern_return_t mach_port_names( - ipc_space_t space, - mach_port_name_t **namesp, - mach_msg_type_number_t *namesCnt, - mach_port_type_t **typesp, - mach_msg_type_number_t *typesCnt) + ipc_space_t space, + mach_port_name_t **namesp, + mach_msg_type_number_t *namesCnt, + mach_port_type_t **typesp, + mach_msg_type_number_t *typesCnt) { ipc_entry_t table; ipc_entry_num_t tsize; mach_port_index_t index; - ipc_entry_num_t actual; /* this many names */ - ipc_port_timestamp_t timestamp; /* logical time of this operation */ + ipc_entry_num_t actual; /* this many names */ + ipc_port_timestamp_t timestamp; /* logical time of this operation */ mach_port_name_t *names; mach_port_type_t *types; kern_return_t kr; - vm_size_t size; /* size of allocated memory */ - vm_offset_t addr1; /* allocated memory, for names */ - vm_offset_t addr2; /* allocated memory, for types */ - vm_map_copy_t memory1; /* copied-in memory, for names */ - vm_map_copy_t memory2; /* copied-in memory, for types */ + vm_size_t size; /* size of allocated memory */ + vm_offset_t addr1; /* allocated memory, for names */ + vm_offset_t addr2; /* allocated memory, for types */ + vm_map_copy_t memory1; /* copied-in memory, for names */ + vm_map_copy_t memory2; /* copied-in memory, for types */ /* safe simplifying assumption */ static_assert(sizeof(mach_port_name_t) == sizeof(mach_port_type_t)); - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } size = 0; @@ -270,8 +271,9 @@ mach_port_names( (bound * sizeof(mach_port_name_t)), VM_MAP_PAGE_MASK(ipc_kernel_map)); - if (size_needed <= size) + if (size_needed <= size) { break; + } is_read_unlock(space); @@ -282,8 +284,9 @@ mach_port_names( size = size_needed; kr = vm_allocate_kernel(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return KERN_RESOURCE_SHORTAGE; + } kr = vm_allocate_kernel(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) { @@ -296,10 +299,10 @@ mach_port_names( kr = vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr1, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr1 + size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr1, size); @@ -310,10 +313,10 @@ mach_port_names( kr = vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr2, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr2 + size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - VM_PROT_READ|VM_PROT_WRITE, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); if (kr != KERN_SUCCESS) { @@ -321,7 +324,6 @@ mach_port_names( kmem_free(ipc_kernel_map, addr2, size); return KERN_RESOURCE_SHORTAGE; } - } /* space is read-locked and active */ @@ -343,7 +345,7 @@ mach_port_names( name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits)); mach_port_names_helper(timestamp, entry, name, names, - types, &actual); + types, &actual); } } @@ -363,8 +365,8 @@ mach_port_names( size_used = actual * sizeof(mach_port_name_t); vm_size_used = - vm_map_round_page(size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)); + vm_map_round_page(size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)); /* * Make used memory pageable and get it into @@ -374,34 +376,34 @@ mach_port_names( kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr1, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr1 + vm_size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr2, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr2 + vm_size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr1, - (vm_map_size_t)size_used, TRUE, &memory1); + (vm_map_size_t)size_used, TRUE, &memory1); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr2, - (vm_map_size_t)size_used, TRUE, &memory2); + (vm_map_size_t)size_used, TRUE, &memory2); assert(kr == KERN_SUCCESS); if (vm_size_used != size) { kmem_free(ipc_kernel_map, - addr1 + vm_size_used, size - vm_size_used); + addr1 + vm_size_used, size - vm_size_used); kmem_free(ipc_kernel_map, - addr2 + vm_size_used, size - vm_size_used); + addr2 + vm_size_used, size - vm_size_used); } } @@ -437,19 +439,21 @@ mach_port_names( kern_return_t mach_port_type( - ipc_space_t space, - mach_port_name_t name, - mach_port_type_t *typep) + ipc_space_t space, + mach_port_name_t name, + mach_port_type_t *typep) { mach_port_urefs_t urefs; ipc_entry_t entry; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (name == MACH_PORT_NULL) + if (name == MACH_PORT_NULL) { return KERN_INVALID_NAME; + } if (name == MACH_PORT_DEAD) { *typep = MACH_PORT_TYPE_DEAD_NAME; @@ -467,8 +471,8 @@ mach_port_type( /* space is unlocked */ #if 1 - /* JMM - workaround rdar://problem/9121297 (CF being too picky on these bits). */ - *typep &= ~(MACH_PORT_TYPE_SPREQUEST | MACH_PORT_TYPE_SPREQUEST_DELAYED); + /* JMM - workaround rdar://problem/9121297 (CF being too picky on these bits). */ + *typep &= ~(MACH_PORT_TYPE_SPREQUEST | MACH_PORT_TYPE_SPREQUEST_DELAYED); #endif return kr; @@ -496,9 +500,9 @@ mach_port_type( kern_return_t mach_port_rename( - __unused ipc_space_t space, - __unused mach_port_name_t oname, - __unused mach_port_name_t nname) + __unused ipc_space_t space, + __unused mach_port_name_t oname, + __unused mach_port_name_t nname) { return KERN_NOT_SUPPORTED; } @@ -541,21 +545,22 @@ mach_port_rename( kern_return_t mach_port_allocate_name( - ipc_space_t space, - mach_port_right_t right, - mach_port_name_t name) + ipc_space_t space, + mach_port_right_t right, + mach_port_name_t name) { - kern_return_t kr; - mach_port_qos_t qos = qos_template; + kern_return_t kr; + mach_port_qos_t qos = qos_template; qos.name = TRUE; - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_VALUE; + } - kr = mach_port_allocate_full (space, right, MACH_PORT_NULL, - &qos, &name); - return (kr); + kr = mach_port_allocate_full(space, right, MACH_PORT_NULL, + &qos, &name); + return kr; } /* @@ -578,24 +583,24 @@ mach_port_allocate_name( kern_return_t mach_port_allocate( - ipc_space_t space, - mach_port_right_t right, - mach_port_name_t *namep) + ipc_space_t space, + mach_port_right_t right, + mach_port_name_t *namep) { - kern_return_t kr; - mach_port_qos_t qos = qos_template; + kern_return_t kr; + mach_port_qos_t qos = qos_template; - kr = mach_port_allocate_full (space, right, MACH_PORT_NULL, - &qos, namep); - return (kr); + kr = mach_port_allocate_full(space, right, MACH_PORT_NULL, + &qos, namep); + return kr; } /* * Routine: mach_port_allocate_qos [kernel call] * Purpose: - * Allocates a right, with qos options, in a space. Like - * mach_port_allocate_name, except that the implementation - * picks a name for the right. The name may be any legal name + * Allocates a right, with qos options, in a space. Like + * mach_port_allocate_name, except that the implementation + * picks a name for the right. The name may be any legal name * in the space that doesn't currently denote a right. * Conditions: * Nothing locked. @@ -611,18 +616,19 @@ mach_port_allocate( kern_return_t mach_port_allocate_qos( - ipc_space_t space, - mach_port_right_t right, - mach_port_qos_t *qosp, - mach_port_name_t *namep) + ipc_space_t space, + mach_port_right_t right, + mach_port_qos_t *qosp, + mach_port_name_t *namep) { - kern_return_t kr; + kern_return_t kr; - if (qosp->name) + if (qosp->name) { return KERN_INVALID_ARGUMENT; - kr = mach_port_allocate_full (space, right, MACH_PORT_NULL, - qosp, namep); - return (kr); + } + kr = mach_port_allocate_full(space, right, MACH_PORT_NULL, + qosp, namep); + return kr; } /* @@ -646,24 +652,27 @@ mach_port_allocate_qos( kern_return_t mach_port_allocate_full( - ipc_space_t space, - mach_port_right_t right, - mach_port_t proto, - mach_port_qos_t *qosp, - mach_port_name_t *namep) + ipc_space_t space, + mach_port_right_t right, + mach_port_t proto, + mach_port_qos_t *qosp, + mach_port_name_t *namep) { - ipc_kmsg_t kmsg = IKM_NULL; - kern_return_t kr; + ipc_kmsg_t kmsg = IKM_NULL; + kern_return_t kr; - if (space == IS_NULL) - return (KERN_INVALID_TASK); + if (space == IS_NULL) { + return KERN_INVALID_TASK; + } - if (proto != MACH_PORT_NULL) - return (KERN_INVALID_VALUE); + if (proto != MACH_PORT_NULL) { + return KERN_INVALID_VALUE; + } if (qosp->name) { - if (!MACH_PORT_VALID (*namep)) - return (KERN_INVALID_VALUE); + if (!MACH_PORT_VALID(*namep)) { + return KERN_INVALID_VALUE; + } } if (qosp->prealloc) { @@ -673,59 +682,63 @@ mach_port_allocate_full( mach_msg_size_t size = qosp->len + MAX_TRAILER_SIZE; if (right != MACH_PORT_RIGHT_RECEIVE) { - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } kmsg = (ipc_kmsg_t)ipc_kmsg_prealloc(size); if (kmsg == IKM_NULL) { - return (KERN_RESOURCE_SHORTAGE); + return KERN_RESOURCE_SHORTAGE; } } } switch (right) { - case MACH_PORT_RIGHT_RECEIVE: - { - ipc_port_t port; + case MACH_PORT_RIGHT_RECEIVE: + { + ipc_port_t port; - if (qosp->name) + if (qosp->name) { kr = ipc_port_alloc_name(space, *namep, &port); - else + } else { kr = ipc_port_alloc(space, namep, &port); + } if (kr == KERN_SUCCESS) { - if (kmsg != IKM_NULL) + if (kmsg != IKM_NULL) { ipc_kmsg_set_prealloc(kmsg, port); + } ip_unlock(port); - - } else if (kmsg != IKM_NULL) + } else if (kmsg != IKM_NULL) { ipc_kmsg_free(kmsg); + } break; - } + } - case MACH_PORT_RIGHT_PORT_SET: - { - ipc_pset_t pset; + case MACH_PORT_RIGHT_PORT_SET: + { + ipc_pset_t pset; - if (qosp->name) + if (qosp->name) { kr = ipc_pset_alloc_name(space, *namep, &pset); - else + } else { kr = ipc_pset_alloc(space, namep, &pset); - if (kr == KERN_SUCCESS) + } + if (kr == KERN_SUCCESS) { ips_unlock(pset); + } break; - } + } - case MACH_PORT_RIGHT_DEAD_NAME: + case MACH_PORT_RIGHT_DEAD_NAME: kr = ipc_object_alloc_dead(space, namep); break; - default: + default: kr = KERN_INVALID_VALUE; break; } - return (kr); + return kr; } /* @@ -747,17 +760,19 @@ mach_port_allocate_full( kern_return_t mach_port_destroy( - ipc_space_t space, - mach_port_name_t name) + ipc_space_t space, + mach_port_name_t name) { ipc_entry_t entry; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_SUCCESS; + } kr = ipc_right_lookup_write(space, name, &entry); if (kr != KERN_SUCCESS) { @@ -790,17 +805,19 @@ mach_port_destroy( kern_return_t mach_port_deallocate( - ipc_space_t space, - mach_port_name_t name) + ipc_space_t space, + mach_port_name_t name) { ipc_entry_t entry; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_SUCCESS; + } kr = ipc_right_lookup_write(space, name, &entry); if (kr != KERN_SUCCESS) { @@ -832,24 +849,26 @@ mach_port_deallocate( kern_return_t mach_port_get_refs( - ipc_space_t space, - mach_port_name_t name, - mach_port_right_t right, - mach_port_urefs_t *urefsp) + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + mach_port_urefs_t *urefsp) { mach_port_type_t type; mach_port_urefs_t urefs; ipc_entry_t entry; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (right >= MACH_PORT_RIGHT_NUMBER) + if (right >= MACH_PORT_RIGHT_NUMBER) { return KERN_INVALID_VALUE; + } if (!MACH_PORT_VALID(name)) { - if (right == MACH_PORT_RIGHT_SEND || + if (right == MACH_PORT_RIGHT_SEND || right == MACH_PORT_RIGHT_SEND_ONCE) { *urefsp = 1; return KERN_SUCCESS; @@ -867,31 +886,33 @@ mach_port_get_refs( kr = ipc_right_info(space, name, entry, &type, &urefs); /* space is unlocked */ - if (kr != KERN_SUCCESS) - return kr; + if (kr != KERN_SUCCESS) { + return kr; + } - if (type & MACH_PORT_TYPE(right)) + if (type & MACH_PORT_TYPE(right)) { switch (right) { - case MACH_PORT_RIGHT_SEND_ONCE: + case MACH_PORT_RIGHT_SEND_ONCE: assert(urefs == 1); - /* fall-through */ + /* fall-through */ - case MACH_PORT_RIGHT_PORT_SET: - case MACH_PORT_RIGHT_RECEIVE: + case MACH_PORT_RIGHT_PORT_SET: + case MACH_PORT_RIGHT_RECEIVE: *urefsp = 1; break; - case MACH_PORT_RIGHT_DEAD_NAME: - case MACH_PORT_RIGHT_SEND: + case MACH_PORT_RIGHT_DEAD_NAME: + case MACH_PORT_RIGHT_SEND: assert(urefs > 0); *urefsp = urefs; break; - default: + default: panic("mach_port_get_refs: strange rights"); } - else + } else { *urefsp = 0; + } return kr; } @@ -918,24 +939,27 @@ mach_port_get_refs( kern_return_t mach_port_mod_refs( - ipc_space_t space, - mach_port_name_t name, - mach_port_right_t right, - mach_port_delta_t delta) + ipc_space_t space, + mach_port_name_t name, + mach_port_right_t right, + mach_port_delta_t delta) { ipc_entry_t entry; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (right >= MACH_PORT_RIGHT_NUMBER) + if (right >= MACH_PORT_RIGHT_NUMBER) { return KERN_INVALID_VALUE; + } if (!MACH_PORT_VALID(name)) { if (right == MACH_PORT_RIGHT_SEND || - right == MACH_PORT_RIGHT_SEND_ONCE) + right == MACH_PORT_RIGHT_SEND_ONCE) { return KERN_SUCCESS; + } return KERN_INVALID_NAME; } @@ -947,7 +971,7 @@ mach_port_mod_refs( /* space is write-locked and active */ - kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */ + kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */ return kr; } @@ -992,25 +1016,27 @@ mach_port_mod_refs( kern_return_t mach_port_peek( - ipc_space_t space, - mach_port_name_t name, - mach_msg_trailer_type_t trailer_type, - mach_port_seqno_t *seqnop, - mach_msg_size_t *msg_sizep, - mach_msg_id_t *msg_idp, - mach_msg_trailer_info_t trailer_infop, - mach_msg_type_number_t *trailer_sizep) + ipc_space_t space, + mach_port_name_t name, + mach_msg_trailer_type_t trailer_type, + mach_port_seqno_t *seqnop, + mach_msg_size_t *msg_sizep, + mach_msg_id_t *msg_idp, + mach_msg_trailer_info_t trailer_infop, + mach_msg_type_number_t *trailer_sizep) { ipc_port_t port; kern_return_t kr; boolean_t found; mach_msg_max_trailer_t max_trailer; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } /* * We don't allow anything greater than the audit trailer - to avoid @@ -1027,20 +1053,21 @@ mach_port_peek( kr = ipc_port_translate_receive(space, name, &port); if (kr != KERN_SUCCESS) { mach_port_guard_exception(name, 0, 0, - ((KERN_INVALID_NAME == kr) ? - kGUARD_EXC_INVALID_NAME : - kGUARD_EXC_INVALID_RIGHT)); + ((KERN_INVALID_NAME == kr) ? + kGUARD_EXC_INVALID_NAME : + kGUARD_EXC_INVALID_RIGHT)); return kr; } /* Port locked and active */ found = ipc_mqueue_peek(&port->ip_messages, seqnop, - msg_sizep, msg_idp, &max_trailer, NULL); + msg_sizep, msg_idp, &max_trailer, NULL); ip_unlock(port); - if (found != TRUE) + if (found != TRUE) { return KERN_FAILURE; + } max_trailer.msgh_seqno = *seqnop; memcpy(trailer_infop, &max_trailer, *trailer_sizep); @@ -1064,22 +1091,25 @@ mach_port_peek( kern_return_t mach_port_set_mscount( - ipc_space_t space, - mach_port_name_t name, - mach_port_mscount_t mscount) + ipc_space_t space, + mach_port_name_t name, + mach_port_mscount_t mscount) { ipc_port_t port; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ ipc_port_set_mscount(port, mscount); @@ -1104,22 +1134,25 @@ mach_port_set_mscount( kern_return_t mach_port_set_seqno( - ipc_space_t space, - mach_port_name_t name, - mach_port_seqno_t seqno) + ipc_space_t space, + mach_port_name_t name, + mach_port_seqno_t seqno) { ipc_port_t port; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ ipc_mqueue_set_seqno(&port->ip_messages, seqno); @@ -1144,30 +1177,34 @@ mach_port_set_seqno( kern_return_t mach_port_get_context( - ipc_space_t space, - mach_port_name_t name, - mach_vm_address_t *context) + ipc_space_t space, + mach_port_name_t name, + mach_vm_address_t *context) { ipc_port_t port; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* Port locked and active */ /* For strictly guarded ports, return empty context (which acts as guard) */ - if (port->ip_strict_guard) + if (port->ip_strict_guard) { *context = 0; - else + } else { *context = port->ip_context; + } ip_unlock(port); return KERN_SUCCESS; @@ -1190,25 +1227,28 @@ mach_port_get_context( kern_return_t mach_port_set_context( - ipc_space_t space, - mach_port_name_t name, - mach_vm_address_t context) + ipc_space_t space, + mach_port_name_t name, + mach_vm_address_t context) { ipc_port_t port; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ - if(port->ip_strict_guard) { + if (port->ip_strict_guard) { uint64_t portguard = port->ip_context; ip_unlock(port); /* For strictly guarded ports, disallow overwriting context; Raise Exception */ @@ -1241,26 +1281,28 @@ mach_port_set_context( kern_return_t mach_port_get_set_status( - ipc_space_t space, - mach_port_name_t name, - mach_port_name_t **members, - mach_msg_type_number_t *membersCnt) + ipc_space_t space, + mach_port_name_t name, + mach_port_name_t **members, + mach_msg_type_number_t *membersCnt) { - ipc_entry_num_t actual; /* this many members */ - ipc_entry_num_t maxnames; /* space for this many members */ + ipc_entry_num_t actual; /* this many members */ + ipc_entry_num_t maxnames; /* space for this many members */ kern_return_t kr; - vm_size_t size; /* size of allocated memory */ - vm_offset_t addr; /* allocated memory */ - vm_map_copy_t memory; /* copied-in memory */ + vm_size_t size; /* size of allocated memory */ + vm_offset_t addr; /* allocated memory */ + vm_map_copy_t memory; /* copied-in memory */ - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } - size = VM_MAP_PAGE_SIZE(ipc_kernel_map); /* initial guess */ + size = VM_MAP_PAGE_SIZE(ipc_kernel_map); /* initial guess */ for (;;) { mach_port_name_t *names; @@ -1268,13 +1310,14 @@ mach_port_get_set_status( ipc_pset_t pset; kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return KERN_RESOURCE_SHORTAGE; + } /* can't fault while we hold locks */ kr = vm_map_wire_kernel(ipc_kernel_map, addr, addr + size, - VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_PORT_SET, &psobj); @@ -1286,7 +1329,7 @@ mach_port_get_set_status( /* just use a portset reference from here on out */ __IGNORE_WCASTALIGN(pset = (ipc_pset_t) psobj); ips_reference(pset); - ips_unlock(pset); + ips_unlock(pset); names = (mach_port_name_t *) addr; maxnames = (ipc_entry_num_t)(size / sizeof(mach_port_name_t)); @@ -1296,15 +1339,16 @@ mach_port_get_set_status( /* release the portset reference */ ips_release(pset); - if (actual <= maxnames) + if (actual <= maxnames) { break; + } /* didn't have enough memory; allocate more */ kmem_free(ipc_kernel_map, addr, size); size = vm_map_round_page( (actual * sizeof(mach_port_name_t)), - VM_MAP_PAGE_MASK(ipc_kernel_map)) + - VM_MAP_PAGE_SIZE(ipc_kernel_map); + VM_MAP_PAGE_MASK(ipc_kernel_map)) + + VM_MAP_PAGE_SIZE(ipc_kernel_map); } if (actual == 0) { @@ -1328,19 +1372,20 @@ mach_port_get_set_status( kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + vm_size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, - (vm_map_size_t)size_used, TRUE, &memory); + (vm_map_size_t)size_used, TRUE, &memory); assert(kr == KERN_SUCCESS); - if (vm_size_used != size) + if (vm_size_used != size) { kmem_free(ipc_kernel_map, - addr + vm_size_used, size - vm_size_used); + addr + vm_size_used, size - vm_size_used); + } } *members = (mach_port_name_t *) memory; @@ -1371,9 +1416,9 @@ mach_port_get_set_status( kern_return_t mach_port_move_member( - ipc_space_t space, - mach_port_name_t member, - mach_port_name_t after) + ipc_space_t space, + mach_port_name_t member, + mach_port_name_t after) { ipc_entry_t entry; ipc_port_t port; @@ -1382,11 +1427,13 @@ mach_port_move_member( uint64_t wq_link_id = 0; uint64_t wq_reserved_prepost = 0; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(member)) + if (!MACH_PORT_VALID(member)) { return KERN_INVALID_RIGHT; + } if (after == MACH_PORT_DEAD) { return KERN_INVALID_RIGHT; @@ -1404,15 +1451,17 @@ mach_port_move_member( */ wq_link_id = waitq_link_reserve(NULL); wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, - WAITQ_DONT_LOCK); + WAITQ_DONT_LOCK); kr = ipc_pset_lazy_allocate(space, after); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto done; + } } kr = ipc_right_lookup_read(space, member, &entry); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto done; + } /* space is read-locked and active */ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) { @@ -1424,9 +1473,9 @@ mach_port_move_member( __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object); assert(port != IP_NULL); - if (after == MACH_PORT_NULL) + if (after == MACH_PORT_NULL) { nset = IPS_NULL; - else { + } else { entry = ipc_entry_lookup(space, after); if (entry == IE_NULL) { is_read_unlock(space); @@ -1455,7 +1504,7 @@ mach_port_move_member( ip_unlock(port); is_read_unlock(space); - done: +done: /* * on success the ipc_pset_add() will consume the wq_link_id @@ -1513,58 +1562,64 @@ mach_port_move_member( kern_return_t mach_port_request_notification( - ipc_space_t space, - mach_port_name_t name, - mach_msg_id_t id, - mach_port_mscount_t sync, - ipc_port_t notify, - ipc_port_t *previousp) + ipc_space_t space, + mach_port_name_t name, + mach_msg_id_t id, + mach_port_mscount_t sync, + ipc_port_t notify, + ipc_port_t *previousp) { kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (notify == IP_DEAD) + if (notify == IP_DEAD) { return KERN_INVALID_CAPABILITY; + } -#if NOTYET +#if NOTYET /* * Requesting notifications on RPC ports is an error. */ { ipc_port_t port; - ipc_entry_t entry; + ipc_entry_t entry; - kr = ipc_right_lookup_write(space, name, &entry); - if (kr != KERN_SUCCESS) + kr = ipc_right_lookup_write(space, name, &entry); + if (kr != KERN_SUCCESS) { return kr; + } port = (ipc_port_t) entry->ie_object; if (port->ip_subsystem != NULL) { is_write_unlock(space); - panic("mach_port_request_notification: on RPC port!!"); + panic("mach_port_request_notification: on RPC port!!"); return KERN_INVALID_CAPABILITY; } is_write_unlock(space); } -#endif /* NOTYET */ +#endif /* NOTYET */ switch (id) { - case MACH_NOTIFY_PORT_DESTROYED: { + case MACH_NOTIFY_PORT_DESTROYED: { ipc_port_t port, previous; - if (sync != 0) + if (sync != 0) { return KERN_INVALID_VALUE; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ /* you cannot register for port death notifications on a kobject */ @@ -1578,54 +1633,58 @@ mach_port_request_notification( *previousp = previous; break; - } + } - case MACH_NOTIFY_NO_SENDERS: { + case MACH_NOTIFY_NO_SENDERS: { ipc_port_t port; - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ ipc_port_nsrequest(port, sync, notify, previousp); /* port is unlocked */ break; - } + } - case MACH_NOTIFY_SEND_POSSIBLE: + case MACH_NOTIFY_SEND_POSSIBLE: - if (!MACH_PORT_VALID(name)) { - return KERN_INVALID_ARGUMENT; + if (!MACH_PORT_VALID(name)) { + return KERN_INVALID_ARGUMENT; } kr = ipc_right_request_alloc(space, name, sync != 0, - TRUE, notify, previousp); - if (kr != KERN_SUCCESS) + TRUE, notify, previousp); + if (kr != KERN_SUCCESS) { return kr; + } break; - case MACH_NOTIFY_DEAD_NAME: + case MACH_NOTIFY_DEAD_NAME: - if (!MACH_PORT_VALID(name)) { + if (!MACH_PORT_VALID(name)) { /* * Already dead. * Should do immediate delivery check - * will do that in the near future. */ - return KERN_INVALID_ARGUMENT; + return KERN_INVALID_ARGUMENT; } kr = ipc_right_request_alloc(space, name, sync != 0, - FALSE, notify, previousp); - if (kr != KERN_SUCCESS) + FALSE, notify, previousp); + if (kr != KERN_SUCCESS) { return kr; + } break; - default: + default: return KERN_INVALID_VALUE; } @@ -1655,23 +1714,26 @@ mach_port_request_notification( kern_return_t mach_port_insert_right( - ipc_space_t space, - mach_port_name_t name, - ipc_port_t poly, - mach_msg_type_name_t polyPoly) + ipc_space_t space, + mach_port_name_t name, + ipc_port_t poly, + mach_msg_type_name_t polyPoly) { - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } if (!MACH_PORT_VALID(name) || - !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly)) + !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly)) { return KERN_INVALID_VALUE; + } - if (!IO_VALID((ipc_object_t) poly)) + if (!IO_VALID((ipc_object_t) poly)) { return KERN_INVALID_CAPABILITY; + } - return ipc_object_copyout_name(space, (ipc_object_t) poly, - polyPoly, FALSE, name); + return ipc_object_copyout_name(space, (ipc_object_t) poly, + polyPoly, FALSE, name); } /* @@ -1692,19 +1754,21 @@ mach_port_insert_right( kern_return_t mach_port_extract_right( - ipc_space_t space, - mach_port_name_t name, - mach_msg_type_name_t msgt_name, - ipc_port_t *poly, - mach_msg_type_name_t *polyPoly) + ipc_space_t space, + mach_port_name_t name, + mach_msg_type_name_t msgt_name, + ipc_port_t *poly, + mach_msg_type_name_t *polyPoly) { kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_MSG_TYPE_PORT_ANY(msgt_name)) + if (!MACH_MSG_TYPE_PORT_ANY(msgt_name)) { return KERN_INVALID_VALUE; + } if (!MACH_PORT_VALID(name)) { /* @@ -1717,8 +1781,9 @@ mach_port_extract_right( kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { *polyPoly = ipc_object_copyin_type(msgt_name); + } return kr; } @@ -1732,9 +1797,10 @@ mach_port_extract_right( * Returns: * None. */ -void mach_port_get_status_helper( - ipc_port_t port, - mach_port_status_t *statusp) +void +mach_port_get_status_helper( + ipc_port_t port, + mach_port_status_t *statusp) { imq_lock(&port->ip_messages); /* don't leak set IDs, just indicate that the port is in one or not */ @@ -1772,97 +1838,109 @@ void mach_port_get_status_helper( kern_return_t mach_port_get_attributes( - ipc_space_t space, - mach_port_name_t name, - int flavor, - mach_port_info_t info, - mach_msg_type_number_t *count) + ipc_space_t space, + mach_port_name_t name, + int flavor, + mach_port_info_t info, + mach_msg_type_number_t *count) { ipc_port_t port; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - switch (flavor) { - case MACH_PORT_LIMITS_INFO: { - mach_port_limits_t *lp = (mach_port_limits_t *)info; + switch (flavor) { + case MACH_PORT_LIMITS_INFO: { + mach_port_limits_t *lp = (mach_port_limits_t *)info; - if (*count < MACH_PORT_LIMITS_INFO_COUNT) - return KERN_FAILURE; + if (*count < MACH_PORT_LIMITS_INFO_COUNT) { + return KERN_FAILURE; + } - if (!MACH_PORT_VALID(name)) { + if (!MACH_PORT_VALID(name)) { *count = 0; break; } - - kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) - return kr; - /* port is locked and active */ - lp->mpl_qlimit = port->ip_messages.imq_qlimit; - *count = MACH_PORT_LIMITS_INFO_COUNT; - ip_unlock(port); - break; - } + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) { + return kr; + } + /* port is locked and active */ + + lp->mpl_qlimit = port->ip_messages.imq_qlimit; + *count = MACH_PORT_LIMITS_INFO_COUNT; + ip_unlock(port); + break; + } - case MACH_PORT_RECEIVE_STATUS: { + case MACH_PORT_RECEIVE_STATUS: { mach_port_status_t *statusp = (mach_port_status_t *)info; - - if (*count < MACH_PORT_RECEIVE_STATUS_COUNT) + + if (*count < MACH_PORT_RECEIVE_STATUS_COUNT) { return KERN_FAILURE; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ mach_port_get_status_helper(port, statusp); *count = MACH_PORT_RECEIVE_STATUS_COUNT; ip_unlock(port); break; } - + case MACH_PORT_DNREQUESTS_SIZE: { - ipc_port_request_t table; - - if (*count < MACH_PORT_DNREQUESTS_SIZE_COUNT) - return KERN_FAILURE; + ipc_port_request_t table; + + if (*count < MACH_PORT_DNREQUESTS_SIZE_COUNT) { + return KERN_FAILURE; + } if (!MACH_PORT_VALID(name)) { *(int *)info = 0; break; } - kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) - return kr; - /* port is locked and active */ - + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) { + return kr; + } + /* port is locked and active */ + table = port->ip_requests; - if (table == IPR_NULL) + if (table == IPR_NULL) { *(int *)info = 0; - else + } else { *(int *)info = table->ipr_size->its_size; - *count = MACH_PORT_DNREQUESTS_SIZE_COUNT; - ip_unlock(port); + } + *count = MACH_PORT_DNREQUESTS_SIZE_COUNT; + ip_unlock(port); break; } case MACH_PORT_INFO_EXT: { mach_port_info_ext_t *mp_info = (mach_port_info_ext_t *)info; - if (*count < MACH_PORT_INFO_EXT_COUNT) + if (*count < MACH_PORT_INFO_EXT_COUNT) { return KERN_FAILURE; - - if (!MACH_PORT_VALID(name)) + } + + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; - + } + kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ mach_port_get_status_helper(port, &mp_info->mpie_status); mp_info->mpie_boost_cnt = port->ip_impcount; @@ -1871,81 +1949,91 @@ mach_port_get_attributes( break; } - default: + default: return KERN_INVALID_ARGUMENT; - /*NOTREACHED*/ - } + /*NOTREACHED*/ + } return KERN_SUCCESS; } kern_return_t mach_port_set_attributes( - ipc_space_t space, - mach_port_name_t name, - int flavor, - mach_port_info_t info, - mach_msg_type_number_t count) + ipc_space_t space, + mach_port_name_t name, + int flavor, + mach_port_info_t info, + mach_msg_type_number_t count) { ipc_port_t port; kern_return_t kr; - - if (space == IS_NULL) + + if (space == IS_NULL) { return KERN_INVALID_TASK; + } + + switch (flavor) { + case MACH_PORT_LIMITS_INFO: { + mach_port_limits_t *mplp = (mach_port_limits_t *)info; + + if (count < MACH_PORT_LIMITS_INFO_COUNT) { + return KERN_FAILURE; + } + + if (mplp->mpl_qlimit > MACH_PORT_QLIMIT_MAX) { + return KERN_INVALID_VALUE; + } - switch (flavor) { - - case MACH_PORT_LIMITS_INFO: { - mach_port_limits_t *mplp = (mach_port_limits_t *)info; - - if (count < MACH_PORT_LIMITS_INFO_COUNT) - return KERN_FAILURE; - - if (mplp->mpl_qlimit > MACH_PORT_QLIMIT_MAX) - return KERN_INVALID_VALUE; - - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } - kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) - return kr; - /* port is locked and active */ + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) { + return kr; + } + /* port is locked and active */ - ipc_mqueue_set_qlimit(&port->ip_messages, mplp->mpl_qlimit); - ip_unlock(port); - break; - } + ipc_mqueue_set_qlimit(&port->ip_messages, mplp->mpl_qlimit); + ip_unlock(port); + break; + } case MACH_PORT_DNREQUESTS_SIZE: { - if (count < MACH_PORT_DNREQUESTS_SIZE_COUNT) - return KERN_FAILURE; + if (count < MACH_PORT_DNREQUESTS_SIZE_COUNT) { + return KERN_FAILURE; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; - - kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) - return kr; - /* port is locked and active */ - + } + + kr = ipc_port_translate_receive(space, name, &port); + if (kr != KERN_SUCCESS) { + return kr; + } + /* port is locked and active */ + kr = ipc_port_request_grow(port, *(int *)info); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } break; } case MACH_PORT_TEMPOWNER: - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } ipc_importance_task_t release_imp_task = IIT_NULL; natural_t assertcnt = 0; kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked and active */ - /* + /* * don't allow temp-owner importance donation if user * associated it with a kobject already (timer, host_notify target), * or is a special reply port. @@ -1973,8 +2061,9 @@ mach_port_set_attributes( /* drop assertions from previous destination task */ if (release_imp_task != IIT_NULL) { assert(ipc_importance_task_is_any_receiver_type(release_imp_task)); - if (assertcnt > 0) + if (assertcnt > 0) { ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt); + } ipc_importance_task_release(release_imp_task); } else if (assertcnt > 0) { release_imp_task = current_task()->task_imp_base; @@ -1984,8 +2073,9 @@ mach_port_set_attributes( } } #else - if (release_imp_task != IIT_NULL) + if (release_imp_task != IIT_NULL) { ipc_importance_task_release(release_imp_task); + } #endif /* IMPORTANCE_INHERITANCE */ break; @@ -1993,14 +2083,16 @@ mach_port_set_attributes( #if IMPORTANCE_INHERITANCE case MACH_PORT_DENAP_RECEIVER: case MACH_PORT_IMPORTANCE_RECEIVER: - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_RIGHT; + } kr = ipc_port_translate_receive(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } - /* + /* * don't allow importance donation if user associated * it with a kobject already (timer, host_notify target), * or is a special reply port. @@ -2017,10 +2109,10 @@ mach_port_set_attributes( break; #endif /* IMPORTANCE_INHERITANCE */ - default: + default: return KERN_INVALID_ARGUMENT; - /*NOTREACHED*/ - } + /*NOTREACHED*/ + } return KERN_SUCCESS; } @@ -2045,9 +2137,9 @@ mach_port_set_attributes( kern_return_t mach_port_insert_member( - ipc_space_t space, - mach_port_name_t name, - mach_port_name_t psname) + ipc_space_t space, + mach_port_name_t name, + mach_port_name_t psname) { ipc_object_t obj; ipc_object_t psobj; @@ -2055,37 +2147,41 @@ mach_port_insert_member( uint64_t wq_link_id; uint64_t wq_reserved_prepost; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) + if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) { return KERN_INVALID_RIGHT; + } wq_link_id = waitq_link_reserve(NULL); wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, - WAITQ_DONT_LOCK); + WAITQ_DONT_LOCK); kr = ipc_pset_lazy_allocate(space, psname); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto done; + } - kr = ipc_object_translate_two(space, - name, MACH_PORT_RIGHT_RECEIVE, &obj, - psname, MACH_PORT_RIGHT_PORT_SET, &psobj); - if (kr != KERN_SUCCESS) + kr = ipc_object_translate_two(space, + name, MACH_PORT_RIGHT_RECEIVE, &obj, + psname, MACH_PORT_RIGHT_PORT_SET, &psobj); + if (kr != KERN_SUCCESS) { goto done; + } /* obj and psobj are locked (and were locked in that order) */ assert(psobj != IO_NULL); assert(obj != IO_NULL); __IGNORE_WCASTALIGN(kr = ipc_pset_add((ipc_pset_t)psobj, (ipc_port_t)obj, - &wq_link_id, &wq_reserved_prepost)); + &wq_link_id, &wq_reserved_prepost)); io_unlock(psobj); io_unlock(obj); - done: +done: /* on success, wq_link_id is reset to 0, so this is always safe */ waitq_link_release(wq_link_id); waitq_prepost_release_reserve(wq_reserved_prepost); @@ -2113,25 +2209,28 @@ mach_port_insert_member( kern_return_t mach_port_extract_member( - ipc_space_t space, - mach_port_name_t name, - mach_port_name_t psname) + ipc_space_t space, + mach_port_name_t name, + mach_port_name_t psname) { ipc_object_t psobj; ipc_object_t obj; kern_return_t kr; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) + if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) { return KERN_INVALID_RIGHT; + } - kr = ipc_object_translate_two(space, - name, MACH_PORT_RIGHT_RECEIVE, &obj, - psname, MACH_PORT_RIGHT_PORT_SET, &psobj); - if (kr != KERN_SUCCESS) + kr = ipc_object_translate_two(space, + name, MACH_PORT_RIGHT_RECEIVE, &obj, + psname, MACH_PORT_RIGHT_PORT_SET, &psobj); + if (kr != KERN_SUCCESS) { return kr; + } /* obj and psobj are both locked (and were locked in that order) */ assert(psobj != IO_NULL); @@ -2152,13 +2251,14 @@ mach_port_extract_member( */ kern_return_t task_set_port_space( - ipc_space_t space, - int table_entries) + ipc_space_t space, + int table_entries) { kern_return_t kr; - - if (space == IS_NULL) + + if (space == IS_NULL) { return KERN_INVALID_TASK; + } is_write_lock(space); @@ -2168,8 +2268,9 @@ task_set_port_space( } kr = ipc_entry_grow_table(space, table_entries); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { is_write_unlock(space); + } return kr; } @@ -2185,12 +2286,13 @@ task_set_port_space( */ static kern_return_t mach_port_guard_locked( - ipc_port_t port, - uint64_t guard, - boolean_t strict) + ipc_port_t port, + uint64_t guard, + boolean_t strict) { - if (port->ip_context) + if (port->ip_context) { return KERN_INVALID_ARGUMENT; + } port->ip_context = guard; port->ip_guarded = 1; @@ -2211,9 +2313,9 @@ mach_port_guard_locked( */ static kern_return_t mach_port_unguard_locked( - ipc_port_t port, - mach_port_name_t name, - uint64_t guard) + ipc_port_t port, + mach_port_name_t name, + uint64_t guard) { /* Port locked and active */ if (!port->ip_guarded) { @@ -2246,10 +2348,10 @@ mach_port_unguard_locked( */ void mach_port_guard_exception( - mach_port_name_t name, - __unused uint64_t inguard, - uint64_t portguard, - unsigned reason) + mach_port_name_t name, + __unused uint64_t inguard, + uint64_t portguard, + unsigned reason) { mach_exception_code_t code = 0; EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT); @@ -2273,7 +2375,7 @@ mach_port_guard_exception( void mach_port_guard_ast(thread_t t, - mach_exception_data_type_t code, mach_exception_data_type_t subcode) + mach_exception_data_type_t code, mach_exception_data_type_t subcode) { unsigned int reason = EXC_GUARD_DECODE_GUARD_FLAVOR(code); task_t task = t->task; @@ -2282,9 +2384,9 @@ mach_port_guard_ast(thread_t t, assert(task != kernel_task); switch (reason) { - /* - * Fatal Mach port guards - always delivered synchronously - */ + /* + * Fatal Mach port guards - always delivered synchronously + */ case kGUARD_EXC_DESTROY: case kGUARD_EXC_MOD_REFS: case kGUARD_EXC_SET_CONTEXT: @@ -2350,22 +2452,24 @@ mach_port_guard_ast(thread_t t, kern_return_t mach_port_construct( - ipc_space_t space, - mach_port_options_t *options, - uint64_t context, - mach_port_name_t *name) + ipc_space_t space, + mach_port_options_t *options, + uint64_t context, + mach_port_name_t *name) { - kern_return_t kr; - ipc_port_t port; + kern_return_t kr; + ipc_port_t port; - if (space == IS_NULL) - return (KERN_INVALID_TASK); + if (space == IS_NULL) { + return KERN_INVALID_TASK; + } /* Allocate a new port in the IPC space */ kr = ipc_port_alloc(space, name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; - + } + /* Port locked and active */ if (options->flags & MPO_CONTEXT_AS_GUARD) { kr = mach_port_guard_locked(port, (uint64_t) context, (options->flags & MPO_STRICT)); @@ -2374,7 +2478,7 @@ mach_port_construct( } else { port->ip_context = context; } - + /* Unlock port */ ip_unlock(port); @@ -2382,37 +2486,43 @@ mach_port_construct( if (options->flags & MPO_QLIMIT) { kr = mach_port_set_attributes(space, *name, MACH_PORT_LIMITS_INFO, - (mach_port_info_t)&options->mpl, sizeof(options->mpl)/sizeof(int)); - if (kr != KERN_SUCCESS) - goto cleanup; + (mach_port_info_t)&options->mpl, sizeof(options->mpl) / sizeof(int)); + if (kr != KERN_SUCCESS) { + goto cleanup; + } } if (options->flags & MPO_TEMPOWNER) { kr = mach_port_set_attributes(space, *name, MACH_PORT_TEMPOWNER, NULL, 0); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto cleanup; + } } if (options->flags & MPO_IMPORTANCE_RECEIVER) { kr = mach_port_set_attributes(space, *name, MACH_PORT_IMPORTANCE_RECEIVER, NULL, 0); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto cleanup; + } } if (options->flags & MPO_DENAP_RECEIVER) { kr = mach_port_set_attributes(space, *name, MACH_PORT_DENAP_RECEIVER, NULL, 0); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto cleanup; + } } if (options->flags & MPO_INSERT_SEND_RIGHT) { kr = ipc_object_copyin(space, *name, MACH_MSG_TYPE_MAKE_SEND, (ipc_object_t *)&port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto cleanup; + } kr = mach_port_insert_right(space, *name, port, MACH_MSG_TYPE_PORT_SEND); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto cleanup; + } } return KERN_SUCCESS; @@ -2442,19 +2552,21 @@ cleanup: kern_return_t mach_port_destruct( - ipc_space_t space, - mach_port_name_t name, - mach_port_delta_t srdelta, - uint64_t guard) + ipc_space_t space, + mach_port_name_t name, + mach_port_delta_t srdelta, + uint64_t guard) { - kern_return_t kr; - ipc_entry_t entry; + kern_return_t kr; + ipc_entry_t entry; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_NAME; + } /* Remove reference for receive right */ kr = ipc_right_lookup_write(space, name, &entry); @@ -2463,7 +2575,7 @@ mach_port_destruct( return kr; } /* space is write-locked and active */ - kr = ipc_right_destruct(space, name, entry, srdelta, guard); /* unlocks */ + kr = ipc_right_destruct(space, name, entry, srdelta, guard); /* unlocks */ return kr; } @@ -2485,27 +2597,29 @@ mach_port_destruct( */ kern_return_t mach_port_guard( - ipc_space_t space, - mach_port_name_t name, - uint64_t guard, - boolean_t strict) + ipc_space_t space, + mach_port_name_t name, + uint64_t guard, + boolean_t strict) { - kern_return_t kr; - ipc_port_t port; + kern_return_t kr; + ipc_port_t port; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_NAME; + } /* Guard can be applied only to receive rights */ kr = ipc_port_translate_receive(space, name, &port); if (kr != KERN_SUCCESS) { mach_port_guard_exception(name, 0, 0, - ((KERN_INVALID_NAME == kr) ? - kGUARD_EXC_INVALID_NAME : - kGUARD_EXC_INVALID_RIGHT)); + ((KERN_INVALID_NAME == kr) ? + kGUARD_EXC_INVALID_NAME : + kGUARD_EXC_INVALID_RIGHT)); return kr; } @@ -2537,26 +2651,27 @@ mach_port_guard( */ kern_return_t mach_port_unguard( - ipc_space_t space, - mach_port_name_t name, - uint64_t guard) + ipc_space_t space, + mach_port_name_t name, + uint64_t guard) { - - kern_return_t kr; - ipc_port_t port; + kern_return_t kr; + ipc_port_t port; - if (space == IS_NULL) + if (space == IS_NULL) { return KERN_INVALID_TASK; + } - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_NAME; + } kr = ipc_port_translate_receive(space, name, &port); if (kr != KERN_SUCCESS) { mach_port_guard_exception(name, 0, 0, - ((KERN_INVALID_NAME == kr) ? - kGUARD_EXC_INVALID_NAME : - kGUARD_EXC_INVALID_RIGHT)); + ((KERN_INVALID_NAME == kr) ? + kGUARD_EXC_INVALID_NAME : + kGUARD_EXC_INVALID_RIGHT)); return kr; } @@ -2566,4 +2681,3 @@ mach_port_unguard( return kr; } - diff --git a/osfmk/ipc/mig_log.c b/osfmk/ipc/mig_log.c index 86b8ae672..3ede23f98 100644 --- a/osfmk/ipc/mig_log.c +++ b/osfmk/ipc/mig_log.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:29 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,26 +38,26 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.2.6.1 1994/09/23 02:14:23 ezf - * change marker to not FREE - * [1994/09/22 21:31:33 ezf] + * change marker to not FREE + * [1994/09/22 21:31:33 ezf] * * Revision 1.2.2.4 1993/08/03 18:29:18 gm - * CR9596: Change KERNEL to MACH_KERNEL. - * [1993/08/02 16:11:07 gm] - * + * CR9596: Change KERNEL to MACH_KERNEL. + * [1993/08/02 16:11:07 gm] + * * Revision 1.2.2.3 1993/07/22 16:18:15 rod - * Add ANSI prototypes. CR #9523. - * [1993/07/22 13:34:22 rod] - * + * Add ANSI prototypes. CR #9523. + * [1993/07/22 13:34:22 rod] + * * Revision 1.2.2.2 1993/06/09 02:33:38 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 21:11:41 jeffc] - * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:11:41 jeffc] + * * Revision 1.2 1993/04/19 16:23:26 devrcs - * Untyped ipc merge: - * Support for logging and tracing within the MIG stubs - * [1993/02/24 14:49:29 travos] - * + * Untyped ipc merge: + * Support for logging and tracing within the MIG stubs + * [1993/02/24 14:49:29 travos] + * * $EndLog$ */ @@ -75,51 +75,53 @@ int mig_tracing, mig_errors, mig_full_tracing; * * At the moment, there is only a printf, which is * activated through the runtime switch: - * mig_tracing to call MigEventTracer - * mig_errors to call MigEventErrors - * For this to work, MIG has to run with the -L option, + * mig_tracing to call MigEventTracer + * mig_errors to call MigEventErrors + * For this to work, MIG has to run with the -L option, * and the mig_debug flags has to be selected * * In the future, it will be possible to collect infos * on the use of MACH IPC with an application similar * to netstat. - * + * * A new option will be generated accordingly to the * kernel configuration rules, e.g * #include - */ + */ void MigEventTracer( - mig_who_t who, - mig_which_event_t what, - mach_msg_id_t msgh_id, - unsigned int size, - unsigned int kpd, - unsigned int retcode, - unsigned int ports, - unsigned int oolports, - unsigned int ool, - char *file, - unsigned int line) + mig_who_t who, + mig_which_event_t what, + mach_msg_id_t msgh_id, + unsigned int size, + unsigned int kpd, + unsigned int retcode, + unsigned int ports, + unsigned int oolports, + unsigned int ool, + char *file, + unsigned int line) { - printf("%d|%d|%d", who, what, msgh_id); - if (mig_full_tracing) - printf(" -- sz%d|kpd%d|ret(0x%x)|p%d|o%d|op%d|%s, %d", - size, kpd, retcode, ports, oolports, ool, file, line); - printf("\n"); + printf("%d|%d|%d", who, what, msgh_id); + if (mig_full_tracing) { + printf(" -- sz%d|kpd%d|ret(0x%x)|p%d|o%d|op%d|%s, %d", + size, kpd, retcode, ports, oolports, ool, file, line); + } + printf("\n"); } void MigEventErrors( - mig_who_t who, - mig_which_error_t what, - void *par, - char *file, - unsigned int line) + mig_who_t who, + mig_which_error_t what, + void *par, + char *file, + unsigned int line) { - if (what == MACH_MSG_ERROR_UNKNOWN_ID) - printf("%d|%d|%d -- %s %d\n", who, what, *(int *)par, file, line); - else - printf("%d|%d|%s -- %s %d\n", who, what, (char *)par, file, line); + if (what == MACH_MSG_ERROR_UNKNOWN_ID) { + printf("%d|%d|%d -- %s %d\n", who, what, *(int *)par, file, line); + } else { + printf("%d|%d|%s -- %s %d\n", who, what, (char *)par, file, line); + } } diff --git a/osfmk/ipc/port.h b/osfmk/ipc/port.h index 7e25b7f05..5b24a9885 100644 --- a/osfmk/ipc/port.h +++ b/osfmk/ipc/port.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,12 +63,12 @@ * Implementation specific complement to mach/port.h. */ -#ifndef _IPC_PORT_H_ +#ifndef _IPC_PORT_H_ #define _IPC_PORT_H_ #include -#define MACH_PORT_NGEN(name) MACH_PORT_MAKE(0, MACH_PORT_GEN(name)) +#define MACH_PORT_NGEN(name) MACH_PORT_MAKE(0, MACH_PORT_GEN(name)) /* * Typedefs for code cleanliness. These must all have @@ -76,22 +76,22 @@ */ -#define MACH_PORT_UREFS_MAX ((mach_port_urefs_t) ((1 << 16) - 1)) +#define MACH_PORT_UREFS_MAX ((mach_port_urefs_t) ((1 << 16) - 1)) -#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \ - (((delta) > 0) && \ - ((((urefs) + (delta)) <= (urefs)) || \ - (((urefs) + (delta)) >= MACH_PORT_UREFS_MAX))) +#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \ + (((delta) > 0) && \ + ((((urefs) + (delta)) <= (urefs)) || \ + (((urefs) + (delta)) >= MACH_PORT_UREFS_MAX))) -#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \ - (((delta) < 0) && (((mach_port_urefs_t)-(delta)) > (urefs))) +#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \ + (((delta) < 0) && (((mach_port_urefs_t)-(delta)) > (urefs))) __BEGIN_DECLS extern void mach_port_guard_exception( - mach_port_name_t name, - uint64_t inguard, - uint64_t portguard, - unsigned reason); + mach_port_name_t name, + uint64_t inguard, + uint64_t portguard, + unsigned reason); __END_DECLS -#endif /* _IPC_PORT_H_ */ +#endif /* _IPC_PORT_H_ */ diff --git a/osfmk/kdp/kdp.h b/osfmk/kdp/kdp.h index 51f77134a..22d7078de 100644 --- a/osfmk/kdp/kdp.h +++ b/osfmk/kdp/kdp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,11 +34,11 @@ void kdp_raise_exception( - unsigned int exception, - unsigned int code, - unsigned int subcode, - void *saved_state -); + unsigned int exception, + unsigned int code, + unsigned int subcode, + void *saved_state + ); /* Reset debugger state. */ diff --git a/osfmk/kdp/kdp_callout.h b/osfmk/kdp/kdp_callout.h index dab21ae3a..9646da05f 100644 --- a/osfmk/kdp/kdp_callout.h +++ b/osfmk/kdp/kdp_callout.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,4 +43,3 @@ typedef void (*kdp_callout_fn_t)(void *arg, kdp_event_t event); * non-trivial service. */ extern void kdp_register_callout(kdp_callout_fn_t fn, void *arg); - diff --git a/osfmk/kdp/kdp_core.c b/osfmk/kdp/kdp_core.c index d214c8e04..08edfb7c6 100644 --- a/osfmk/kdp/kdp_core.c +++ b/osfmk/kdp/kdp_core.c @@ -850,7 +850,7 @@ pmap_traverse_present_mappings(pmap_t __unused pmap, #if defined(__x86_64__) /* Try to skip by 2MB if possible */ - if (((vcur & PDMASK) == 0) && cpu_64bit) { + if ((vcur & PDMASK) == 0) { pd_entry_t *pde; pde = pmap_pde(pmap, vcur); if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) { diff --git a/osfmk/kdp/kdp_core.h b/osfmk/kdp/kdp_core.h index 7e0b17cfd..d69d92b5f 100644 --- a/osfmk/kdp/kdp_core.h +++ b/osfmk/kdp/kdp_core.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,8 +30,8 @@ * 8 Aug. 2003 - Created (Derek Kumar) */ -/* Various protocol definitions - * for the core transfer protocol, which is a variant of TFTP +/* Various protocol definitions + * for the core transfer protocol, which is a variant of TFTP */ #ifndef __KDP_CORE_H #define __KDP_CORE_H @@ -43,45 +43,45 @@ /* * Packet types. */ -#define KDP_RRQ 1 /* read request */ -#define KDP_WRQ 2 /* write request */ -#define KDP_DATA 3 /* data packet */ -#define KDP_ACK 4 /* acknowledgement */ -#define KDP_ERROR 5 /* error code */ +#define KDP_RRQ 1 /* read request */ +#define KDP_WRQ 2 /* write request */ +#define KDP_DATA 3 /* data packet */ +#define KDP_ACK 4 /* acknowledgement */ +#define KDP_ERROR 5 /* error code */ #define KDP_SEEK 6 /* Seek to specified offset */ #define KDP_EOF 7 /* signal end of file */ #define KDP_FLUSH 8 /* flush outstanding data */ -#define KDP_FEATURE_MASK_STRING "features" +#define KDP_FEATURE_MASK_STRING "features" -enum {KDP_FEATURE_LARGE_CRASHDUMPS = 1, KDP_FEATURE_LARGE_PKT_SIZE = 2}; -extern uint32_t kdp_feature_large_crashdumps, kdp_feature_large_pkt_size; +enum {KDP_FEATURE_LARGE_CRASHDUMPS = 1, KDP_FEATURE_LARGE_PKT_SIZE = 2}; +extern uint32_t kdp_feature_large_crashdumps, kdp_feature_large_pkt_size; -struct corehdr { - short th_opcode; /* packet type */ +struct corehdr { + short th_opcode; /* packet type */ union { - unsigned int tu_block; /* block # */ - unsigned int tu_code; /* error code */ - char tu_rpl[1]; /* request packet payload */ + unsigned int tu_block; /* block # */ + unsigned int tu_code; /* error code */ + char tu_rpl[1]; /* request packet payload */ } th_u; - char th_data[0]; /* data or error string */ + char th_data[0]; /* data or error string */ }__attribute__((packed)); -#define th_block th_u.tu_block -#define th_code th_u.tu_code -#define th_stuff th_u.tu_rpl -#define th_msg th_data +#define th_block th_u.tu_block +#define th_code th_u.tu_code +#define th_stuff th_u.tu_rpl +#define th_msg th_data /* * Error codes. */ -#define EUNDEF 0 /* not defined */ -#define ENOTFOUND 1 /* file not found */ -#define EACCESS 2 /* access violation */ -#define ENOSPACE 3 /* disk full or allocation exceeded */ -#define EBADOP 4 /* illegal TFTP operation */ -#define EBADID 5 /* unknown transfer ID */ -#define EEXISTS 6 /* file already exists */ -#define ENOUSER 7 /* no such user */ +#define EUNDEF 0 /* not defined */ +#define ENOTFOUND 1 /* file not found */ +#define EACCESS 2 /* access violation */ +#define ENOSPACE 3 /* disk full or allocation exceeded */ +#define EBADOP 4 /* illegal TFTP operation */ +#define EBADID 5 /* unknown transfer ID */ +#define EEXISTS 6 /* file already exists */ +#define ENOUSER 7 /* no such user */ #define CORE_REMOTE_PORT 1069 /* hardwired, we can't really query the services file */ @@ -124,11 +124,11 @@ void panic_spin_shmcon(void); #endif /* CONFIG_EMBEDDED */ -void kdp_panic_dump (void); +void kdp_panic_dump(void); void begin_panic_transfer(void); -void abort_panic_transfer (void); +void abort_panic_transfer(void); void kdp_set_dump_info(const uint32_t flags, const char *file, const char *destip, - const char *routerip, const uint32_t port); + const char *routerip, const uint32_t port); void kdp_get_dump_info(kdp_dumpinfo_reply_t *rp); enum kern_dump_type { @@ -145,11 +145,11 @@ boolean_t dumped_kernel_core(void); struct corehdr *create_panic_header(unsigned int request, const char *corename, unsigned length, unsigned block); -int kdp_send_crashdump_pkt(unsigned int request, char *corename, - uint64_t length, void *panic_data); +int kdp_send_crashdump_pkt(unsigned int request, char *corename, + uint64_t length, void *panic_data); -int kdp_send_crashdump_data(unsigned int request, char *corename, - uint64_t length, void * txstart); +int kdp_send_crashdump_data(unsigned int request, char *corename, + uint64_t length, void * txstart); void kern_collectth_state_size(uint64_t * tstate_count, uint64_t * tstate_size); @@ -175,9 +175,9 @@ int kern_dump_seek_to_next_file(void *kdp_core_out_varss, uint64_t next_file_off extern boolean_t efi_valid_page(ppnum_t ppn); #if defined(__x86_64__) -#define EFI_VALID_PAGE(x) efi_valid_page(x) +#define EFI_VALID_PAGE(x) efi_valid_page(x) #elif defined(__arm__) || defined(__arm64__) -#define EFI_VALID_PAGE(x) (FALSE) +#define EFI_VALID_PAGE(x) (FALSE) #endif /* defined (__x86_64__) */ #endif /* PRIVATE */ diff --git a/osfmk/kdp/kdp_dyld.h b/osfmk/kdp/kdp_dyld.h index 91110b6d7..b363b896c 100644 --- a/osfmk/kdp/kdp_dyld.h +++ b/osfmk/kdp/kdp_dyld.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,35 +43,35 @@ typedef struct user32_dyld_uuid_info kernel_uuid_info; #endif struct user32_dyld_image_info { - user32_addr_t imageLoadAddress; /* base address image is mapped int */ - user32_addr_t imageFilePath; /* path dyld used to load the image */ - user32_ulong_t imageFileModDate; /* time_t of image file */ + user32_addr_t imageLoadAddress; /* base address image is mapped int */ + user32_addr_t imageFilePath; /* path dyld used to load the image */ + user32_ulong_t imageFileModDate; /* time_t of image file */ }; struct user64_dyld_image_info { - user64_addr_t imageLoadAddress; /* base address image is mapped int */ - user64_addr_t imageFilePath; /* path dyld used to load the image */ - user64_ulong_t imageFileModDate; /* time_t of image file */ + user64_addr_t imageLoadAddress; /* base address image is mapped int */ + user64_addr_t imageFilePath; /* path dyld used to load the image */ + user64_ulong_t imageFileModDate; /* time_t of image file */ }; -// FIXME: dyld is in C++, and some of the fields in dyld_all_image_infos are C++ +// FIXME: dyld is in C++, and some of the fields in dyld_all_image_infos are C++ // native booleans. There must be a better way... typedef uint8_t dyld_bool; struct user32_dyld_all_image_infos { - uint32_t version; - uint32_t infoArrayCount; - user32_addr_t infoArray; - user32_addr_t notification; - dyld_bool processDetachedFromSharedRegion; - dyld_bool libSystemInitialized; - user32_addr_t dyldImageLoadAddress; - user32_addr_t jitInfo; - user32_addr_t dyldVersion; - user32_addr_t errorMessage; - user32_addr_t terminationFlags; - user32_addr_t coreSymbolicationShmPage; - user32_addr_t systemOrderFlag; + uint32_t version; + uint32_t infoArrayCount; + user32_addr_t infoArray; + user32_addr_t notification; + dyld_bool processDetachedFromSharedRegion; + dyld_bool libSystemInitialized; + user32_addr_t dyldImageLoadAddress; + user32_addr_t jitInfo; + user32_addr_t dyldVersion; + user32_addr_t errorMessage; + user32_addr_t terminationFlags; + user32_addr_t coreSymbolicationShmPage; + user32_addr_t systemOrderFlag; user32_size_t uuidArrayCount; // dyld defines this as a uintptr_t despite it being a count user32_addr_t uuidArray; user32_addr_t dyldAllImageInfosAddress; @@ -92,24 +92,24 @@ struct user32_dyld_all_image_infos { uint64_t timestamp; user32_addr_t reserved[14]; /* the following fields are only in version 16 (macOS 10.13, iOS 12.0) and later */ - user32_addr_t compact_dyld_image_info_addr; - user32_size_t compact_dyld_image_info_size; + user32_addr_t compact_dyld_image_info_addr; + user32_size_t compact_dyld_image_info_size; }; struct user64_dyld_all_image_infos { - uint32_t version; - uint32_t infoArrayCount; - user64_addr_t infoArray; - user64_addr_t notification; - dyld_bool processDetachedFromSharedRegion; - dyld_bool libSystemInitialized; - user64_addr_t dyldImageLoadAddress; - user64_addr_t jitInfo; - user64_addr_t dyldVersion; - user64_addr_t errorMessage; - user64_addr_t terminationFlags; - user64_addr_t coreSymbolicationShmPage; - user64_addr_t systemOrderFlag; + uint32_t version; + uint32_t infoArrayCount; + user64_addr_t infoArray; + user64_addr_t notification; + dyld_bool processDetachedFromSharedRegion; + dyld_bool libSystemInitialized; + user64_addr_t dyldImageLoadAddress; + user64_addr_t jitInfo; + user64_addr_t dyldVersion; + user64_addr_t errorMessage; + user64_addr_t terminationFlags; + user64_addr_t coreSymbolicationShmPage; + user64_addr_t systemOrderFlag; user64_size_t uuidArrayCount; // dyld defines this as a uintptr_t despite it being a count user64_addr_t uuidArray; user64_addr_t dyldAllImageInfosAddress; @@ -130,7 +130,6 @@ struct user64_dyld_all_image_infos { uint64_t timestamp; user64_addr_t reserved[14]; /* the following fields are only in version 16 (macOS 10.13, iOS 12.0) and later */ - user64_addr_t compact_dyld_image_info_addr; - user64_size_t compact_dyld_image_info_size; + user64_addr_t compact_dyld_image_info_addr; + user64_size_t compact_dyld_image_info_size; }; - diff --git a/osfmk/kdp/kdp_en_debugger.h b/osfmk/kdp/kdp_en_debugger.h index c8a99822f..edb879981 100644 --- a/osfmk/kdp/kdp_en_debugger.h +++ b/osfmk/kdp/kdp_en_debugger.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,12 +31,11 @@ */ typedef void (*kdp_send_t)(void * pkt, unsigned int pkt_len); -typedef void (*kdp_receive_t)(void * pkt, unsigned int * pkt_len, - unsigned int timeout); +typedef void (*kdp_receive_t)(void * pkt, unsigned int * pkt_len, + unsigned int timeout); -void +void kdp_register_send_receive(kdp_send_t send, kdp_receive_t receive); void kdp_unregister_send_receive(kdp_send_t send, kdp_receive_t receive); - diff --git a/osfmk/kdp/kdp_internal.h b/osfmk/kdp/kdp_internal.h index 9168ac7a8..356429b67 100644 --- a/osfmk/kdp/kdp_internal.h +++ b/osfmk/kdp/kdp_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -39,22 +39,22 @@ #include typedef struct { - void *saved_state; - thread_t kdp_thread; - int kdp_cpu; - uint32_t session_key; - unsigned int conn_seq; - unsigned short reply_port; - unsigned short exception_port; - boolean_t is_conn; - boolean_t is_halted; - unsigned char exception_seq; - boolean_t exception_ack_needed; + void *saved_state; + thread_t kdp_thread; + int kdp_cpu; + uint32_t session_key; + unsigned int conn_seq; + unsigned short reply_port; + unsigned short exception_port; + boolean_t is_conn; + boolean_t is_halted; + unsigned char exception_seq; + boolean_t exception_ack_needed; } kdp_glob_t; -extern kdp_glob_t kdp; +extern kdp_glob_t kdp; -extern volatile int kdp_flag; +extern volatile int kdp_flag; extern int noresume_on_disconnect; extern char kdp_kernelversion_string[256]; @@ -64,107 +64,107 @@ extern char kdp_kernelversion_string[256]; #define KDP_BP_DIS 0x4 #define KDP_GETC_ENA 0x8 #define KDP_PANIC_DUMP_ENABLED 0x10 -#define PANIC_CORE_ON_NMI 0x20 +#define PANIC_CORE_ON_NMI 0x20 #define DBG_POST_CORE 0x40 #define PANIC_LOG_DUMP 0x80 #define REBOOT_POST_CORE 0x100 #define SYSTEM_LOG_DUMP 0x200 typedef boolean_t (*kdp_dispatch_t) ( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); extern boolean_t kdp_packet( - unsigned char *, - int *, - unsigned short * -); + unsigned char *, + int *, + unsigned short * + ); extern boolean_t -kdp_remove_all_breakpoints (void); +kdp_remove_all_breakpoints(void); extern void kdp_exception( - unsigned char *, - int *, - unsigned short *, - unsigned int, - unsigned int, - unsigned int -); + unsigned char *, + int *, + unsigned short *, + unsigned int, + unsigned int, + unsigned int + ); extern boolean_t kdp_exception_ack( - unsigned char *, - int -); + unsigned char *, + int + ); extern void kdp_panic( - const char *msg -); + const char *msg + ); extern void kdp_machine_reboot( - void -); + void + ); extern void kdp_us_spin( - int usec -); + int usec + ); extern int kdp_intr_disbl( - void -); + void + ); extern void kdp_intr_enbl( - int s -); + int s + ); extern kdp_error_t kdp_machine_read_regs( - unsigned int cpu, - unsigned int flavor, - char *data, - int *size -); + unsigned int cpu, + unsigned int flavor, + char *data, + int *size + ); extern kdp_error_t kdp_machine_write_regs( - unsigned int cpu, - unsigned int flavor, - char *data, - int *size -); + unsigned int cpu, + unsigned int flavor, + char *data, + int *size + ); extern void kdp_machine_hostinfo( - kdp_hostinfo_t *hostinfo -); + kdp_hostinfo_t *hostinfo + ); extern void kdp_sync_cache( - void -); + void + ); /* Return a byte array that can be byte-copied to a memory address * to trap into the debugger. Must be 4 bytes or less in the current @@ -174,38 +174,37 @@ kdp_sync_cache( void kdp_machine_get_breakinsn( - uint8_t *bytes, - uint32_t *size -); + uint8_t *bytes, + uint32_t *size + ); extern void kdp_ml_enter_debugger( void -); + ); mach_vm_size_t -kdp_machine_vm_read( mach_vm_address_t, caddr_t, mach_vm_size_t); + kdp_machine_vm_read( mach_vm_address_t, caddr_t, mach_vm_size_t); mach_vm_size_t -kdp_machine_vm_write( caddr_t, mach_vm_address_t, mach_vm_size_t); + kdp_machine_vm_write( caddr_t, mach_vm_address_t, mach_vm_size_t); mach_vm_size_t -kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t /* data */, - uint16_t /* lcpu */); + kdp_machine_phys_read(kdp_readphysmem64_req_t * rq, caddr_t /* data */, + uint16_t /* lcpu */); mach_vm_size_t -kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t /* data */, - uint16_t /* lcpu */); + kdp_machine_phys_write(kdp_writephysmem64_req_t * rq, caddr_t /* data */, + uint16_t /* lcpu */); int -kdp_machine_ioport_read(kdp_readioport_req_t *, caddr_t /* data */, uint16_t /* lcpu */); + kdp_machine_ioport_read(kdp_readioport_req_t *, caddr_t /* data */, uint16_t /* lcpu */); int -kdp_machine_ioport_write(kdp_writeioport_req_t *, caddr_t /* data */, uint16_t /* lcpu */); + kdp_machine_ioport_write(kdp_writeioport_req_t *, caddr_t /* data */, uint16_t /* lcpu */); int -kdp_machine_msr64_read(kdp_readmsr64_req_t *, caddr_t /* data */, uint16_t /* lcpu */); + kdp_machine_msr64_read(kdp_readmsr64_req_t *, caddr_t /* data */, uint16_t /* lcpu */); int -kdp_machine_msr64_write(kdp_writemsr64_req_t *, caddr_t /* data */, uint16_t /* lcpu */); - + kdp_machine_msr64_write(kdp_writemsr64_req_t *, caddr_t /* data */, uint16_t /* lcpu */); diff --git a/osfmk/kdp/kdp_private.h b/osfmk/kdp/kdp_private.h index 07e5123ff..f62c920b3 100644 --- a/osfmk/kdp/kdp_private.h +++ b/osfmk/kdp/kdp_private.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,173 +32,173 @@ static boolean_t kdp_unknown( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_connect( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_disconnect( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_reattach( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_hostinfo( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_suspend( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_readregs( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_writeregs( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_version( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_kernelversion( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_regions( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_maxbytes( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_readmem( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_readmem64( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_readphysmem64( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_writemem( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_writemem64( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_writephysmem64( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_resumecpus( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); -static boolean_t +static boolean_t kdp_breakpoint_set( - kdp_pkt_t *, - int *, - unsigned short *t -); + kdp_pkt_t *, + int *, + unsigned short *t + ); -static boolean_t +static boolean_t kdp_breakpoint64_set( - kdp_pkt_t *, - int *, - unsigned short *t -); + kdp_pkt_t *, + int *, + unsigned short *t + ); static boolean_t kdp_breakpoint_remove( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_breakpoint64_remove( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_reboot( - kdp_pkt_t *, - int *, - unsigned short * -); + kdp_pkt_t *, + int *, + unsigned short * + ); static boolean_t kdp_readioport(kdp_pkt_t *, int *, unsigned short *); diff --git a/osfmk/kdp/kdp_protocol.h b/osfmk/kdp/kdp_protocol.h index 8cc612288..1b17ed0e0 100644 --- a/osfmk/kdp/kdp_protocol.h +++ b/osfmk/kdp/kdp_protocol.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,9 +35,9 @@ #ifdef MACH_KERNEL_PRIVATE -#include +#include #include -#include +#include #endif #ifdef KDP_PROXY_PACK_SUPPORT @@ -50,12 +50,12 @@ /* * Retransmit parameters */ -#if DDEBUG_DEBUG || DEBUG_DEBUG -#define KDP_REXMIT_SECS 20 /* rexmit if no ack in 3 secs */ -#else /* DDEBUG_DEBUG || DEBUG_DEBUG */ -#define KDP_REXMIT_SECS 3 /* rexmit if no ack in 3 secs */ -#endif /* DDEBUG_DEBUG || DEBUG_DEBUG */ -#define KDP_REXMIT_TRIES 8 /* xmit 8 times, then give up */ +#if DDEBUG_DEBUG || DEBUG_DEBUG +#define KDP_REXMIT_SECS 20 /* rexmit if no ack in 3 secs */ +#else /* DDEBUG_DEBUG || DEBUG_DEBUG */ +#define KDP_REXMIT_SECS 3 /* rexmit if no ack in 3 secs */ +#endif /* DDEBUG_DEBUG || DEBUG_DEBUG */ +#define KDP_REXMIT_TRIES 8 /* xmit 8 times, then give up */ /* @@ -63,50 +63,50 @@ * Remote will resume unless KDP requests is received within this * many seconds after an attention (nmi) packet is sent. */ -#define KDP_MAX_ATTN_WAIT 30 /* wait max of 30 seconds */ +#define KDP_MAX_ATTN_WAIT 30 /* wait max of 30 seconds */ /* * Well-known UDP port, debugger side. * FIXME: This is what the 68K guys use, but beats me how they chose it... */ -#define KDP_REMOTE_PORT 41139 /* pick one and register it */ +#define KDP_REMOTE_PORT 41139 /* pick one and register it */ /* * UDP ports, KDB side. 5 port numbers are reserved for each port (request * and exception). This allows multiple KDBs to run on one host. */ -#define UDP_HOST_COMM_BASE 41140 -#define UDP_HOST_EXCEP_BASE 41145 -#define NUM_UDP_HOST_PORTS 5 +#define UDP_HOST_COMM_BASE 41140 +#define UDP_HOST_EXCEP_BASE 41145 +#define NUM_UDP_HOST_PORTS 5 /* * Requests */ typedef enum { /* connection oriented requests */ - KDP_CONNECT, KDP_DISCONNECT, + KDP_CONNECT, KDP_DISCONNECT, /* obtaining client info */ - KDP_HOSTINFO, KDP_VERSION, KDP_MAXBYTES, - + KDP_HOSTINFO, KDP_VERSION, KDP_MAXBYTES, + /* memory access */ - KDP_READMEM, KDP_WRITEMEM, - + KDP_READMEM, KDP_WRITEMEM, + /* register access */ - KDP_READREGS, KDP_WRITEREGS, - + KDP_READREGS, KDP_WRITEREGS, + /* executable image info */ - KDP_LOAD, KDP_IMAGEPATH, - + KDP_LOAD, KDP_IMAGEPATH, + /* execution control */ - KDP_SUSPEND, KDP_RESUMECPUS, - + KDP_SUSPEND, KDP_RESUMECPUS, + /* exception and termination notification, NOT true requests */ - KDP_EXCEPTION, KDP_TERMINATION, + KDP_EXCEPTION, KDP_TERMINATION, /* breakpoint control */ KDP_BREAKPOINT_SET, KDP_BREAKPOINT_REMOVE, - + /* vm regions */ KDP_REGIONS, @@ -117,45 +117,45 @@ typedef enum { KDP_HOSTREBOOT, /* memory access (64-bit wide addresses). Version 11 protocol */ - KDP_READMEM64, KDP_WRITEMEM64, + KDP_READMEM64, KDP_WRITEMEM64, /* breakpoint control (64-bit wide addresses). Version 11 protocol */ KDP_BREAKPOINT64_SET, KDP_BREAKPOINT64_REMOVE, - + /* kernel version string, like "xnu-1234.5~6". Version 11 protocol */ KDP_KERNELVERSION, - + /* physical memory access (64-bit wide addresses). Version 12 protocol */ - KDP_READPHYSMEM64, KDP_WRITEPHYSMEM64, + KDP_READPHYSMEM64, KDP_WRITEPHYSMEM64, - /* ioport access (8-, 16-, and 32-bit) */ - KDP_READIOPORT, KDP_WRITEIOPORT, + /* ioport access (8-, 16-, and 32-bit) */ + KDP_READIOPORT, KDP_WRITEIOPORT, - /* msr access (64-bit) */ - KDP_READMSR64, KDP_WRITEMSR64, + /* msr access (64-bit) */ + KDP_READMSR64, KDP_WRITEMSR64, - /* get/dump panic/corefile info */ - KDP_DUMPINFO, + /* get/dump panic/corefile info */ + KDP_DUMPINFO, /* keep this last */ KDP_INVALID_REQUEST } kdp_req_t; typedef enum { - KDP_DUMPINFO_GETINFO = 0x00000000, - KDP_DUMPINFO_SETINFO = 0x00000001, - KDP_DUMPINFO_CORE = 0x00000102, - KDP_DUMPINFO_PANICLOG = 0x00000103, - KDP_DUMPINFO_SYSTEMLOG = 0x00000104, - KDP_DUMPINFO_DISABLE = 0x00000105, - KDP_DUMPINFO_MASK = 0x00000FFF, - KDP_DUMPINFO_DUMP = 0x00000100, - - KDP_DUMPINFO_REBOOT = 0x10000000, - KDP_DUMPINFO_NORESUME = 0x20000000, - KDP_DUMPINFO_RESUME = 0x00000000, /* default behaviour */ - KDP_DUMPINFO_NOINTR = 0x40000000, /* don't interrupt */ - KDP_DUMPINFO_INTR = 0x00000000, /* default behaviour */ + KDP_DUMPINFO_GETINFO = 0x00000000, + KDP_DUMPINFO_SETINFO = 0x00000001, + KDP_DUMPINFO_CORE = 0x00000102, + KDP_DUMPINFO_PANICLOG = 0x00000103, + KDP_DUMPINFO_SYSTEMLOG = 0x00000104, + KDP_DUMPINFO_DISABLE = 0x00000105, + KDP_DUMPINFO_MASK = 0x00000FFF, + KDP_DUMPINFO_DUMP = 0x00000100, + + KDP_DUMPINFO_REBOOT = 0x10000000, + KDP_DUMPINFO_NORESUME = 0x20000000, + KDP_DUMPINFO_RESUME = 0x00000000, /* default behaviour */ + KDP_DUMPINFO_NOINTR = 0x40000000, /* don't interrupt */ + KDP_DUMPINFO_INTR = 0x00000000, /* default behaviour */ } kdp_dumpinfo_t; /* @@ -165,11 +165,11 @@ typedef enum { * need to be reflected in kgmacros as well. */ typedef struct { - kdp_req_t request:7; /* kdp_req_t, request type */ - unsigned is_reply:1; /* 0 => request, 1 => reply */ - unsigned seq:8; /* sequence number within session */ - unsigned len:16; /* length of entire pkt including hdr */ - unsigned key; /* session key */ + kdp_req_t request:7; /* kdp_req_t, request type */ + unsigned is_reply:1; /* 0 => request, 1 => reply */ + unsigned seq:8; /* sequence number within session */ + unsigned len:16; /* length of entire pkt including hdr */ + unsigned key; /* session key */ } KDP_PACKED kdp_hdr_t; /* @@ -179,19 +179,19 @@ typedef enum { KDPERR_NO_ERROR = 0, KDPERR_ALREADY_CONNECTED, KDPERR_BAD_NBYTES, - KDPERR_BADFLAVOR, /* bad flavor in w/r regs */ - KDPERR_BAD_ACCESS, /* memory reference failure */ + KDPERR_BADFLAVOR, /* bad flavor in w/r regs */ + KDPERR_BAD_ACCESS, /* memory reference failure */ KDPERR_MAX_BREAKPOINTS = 100, KDPERR_BREAKPOINT_NOT_FOUND = 101, KDPERR_BREAKPOINT_ALREADY_SET = 102 } kdp_error_t; -#if defined(__x86_64__) -#define KDPERR_ACCESS(_req,_ret) \ +#if defined(__x86_64__) +#define KDPERR_ACCESS(_req, _ret) \ (((_req) == (uint32_t)(_ret)) ? KDPERR_NO_ERROR : KDPERR_BAD_ACCESS) #else -#define KDPERR_ACCESS(req,cnt) (KDPERR_NO_ERROR) +#define KDPERR_ACCESS(req, cnt) (KDPERR_NO_ERROR) #endif /* x86_64 */ @@ -202,338 +202,338 @@ typedef enum { /* * KDP_CONNECT */ -typedef struct { /* KDP_CONNECT request */ - kdp_hdr_t hdr; - uint16_t req_reply_port; /* udp port which to send replies */ - uint16_t exc_note_port; /* udp port which to send exc notes */ - char greeting[0]; /* "greetings", nul-terminated */ +typedef struct { /* KDP_CONNECT request */ + kdp_hdr_t hdr; + uint16_t req_reply_port; /* udp port which to send replies */ + uint16_t exc_note_port; /* udp port which to send exc notes */ + char greeting[0]; /* "greetings", nul-terminated */ } KDP_PACKED kdp_connect_req_t; -typedef struct { /* KDP_CONNECT reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_CONNECT reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_connect_reply_t; /* * KDP_DISCONNECT */ -typedef struct { /* KDP_DISCONNECT request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_DISCONNECT request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_disconnect_req_t; -typedef struct { /* KDP_DISCONNECT reply */ - kdp_hdr_t hdr; +typedef struct { /* KDP_DISCONNECT reply */ + kdp_hdr_t hdr; } KDP_PACKED kdp_disconnect_reply_t; /* * KDP_REATTACH */ typedef struct { - kdp_hdr_t hdr; - uint16_t req_reply_port; /* udp port which to send replies */ + kdp_hdr_t hdr; + uint16_t req_reply_port; /* udp port which to send replies */ } KDP_PACKED kdp_reattach_req_t; /* * KDP_HOSTINFO */ -typedef struct { /* KDP_HOSTINFO request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_HOSTINFO request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_hostinfo_req_t; typedef struct { - uint32_t cpus_mask; /* bit is 1 if cpu present */ - uint32_t cpu_type; - uint32_t cpu_subtype; + uint32_t cpus_mask; /* bit is 1 if cpu present */ + uint32_t cpu_type; + uint32_t cpu_subtype; } KDP_PACKED kdp_hostinfo_t; -typedef struct { /* KDP_HOSTINFO reply */ - kdp_hdr_t hdr; - kdp_hostinfo_t hostinfo; +typedef struct { /* KDP_HOSTINFO reply */ + kdp_hdr_t hdr; + kdp_hostinfo_t hostinfo; } KDP_PACKED kdp_hostinfo_reply_t; /* * KDP_VERSION */ -typedef struct { /* KDP_VERSION request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_VERSION request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_version_req_t; -#define KDP_FEATURE_BP 0x1 /* local breakpoint support */ +#define KDP_FEATURE_BP 0x1 /* local breakpoint support */ -typedef struct { /* KDP_VERSION reply */ - kdp_hdr_t hdr; - uint32_t version; - uint32_t feature; - uint32_t pad0; - uint32_t pad1; +typedef struct { /* KDP_VERSION reply */ + kdp_hdr_t hdr; + uint32_t version; + uint32_t feature; + uint32_t pad0; + uint32_t pad1; } KDP_PACKED kdp_version_reply_t; -#define VM_PROT_VOLATILE ((vm_prot_t) 0x08) /* not cacheable */ -#define VM_PROT_SPARSE ((vm_prot_t) 0x10) /* sparse addr space */ +#define VM_PROT_VOLATILE ((vm_prot_t) 0x08) /* not cacheable */ +#define VM_PROT_SPARSE ((vm_prot_t) 0x10) /* sparse addr space */ /* * KDP_REGIONS */ -typedef struct { /* KDP_REGIONS request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_REGIONS request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_regions_req_t; typedef struct { - uint32_t address; - uint32_t nbytes; - uint32_t protection; /* vm_prot_t */ + uint32_t address; + uint32_t nbytes; + uint32_t protection; /* vm_prot_t */ } KDP_PACKED kdp_region_t; -typedef struct { /* KDP_REGIONS reply */ - kdp_hdr_t hdr; - uint32_t nregions; - kdp_region_t regions[0]; +typedef struct { /* KDP_REGIONS reply */ + kdp_hdr_t hdr; + uint32_t nregions; + kdp_region_t regions[0]; } KDP_PACKED kdp_regions_reply_t; /* * KDP_MAXBYTES */ -typedef struct { /* KDP_MAXBYTES request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_MAXBYTES request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_maxbytes_req_t; -typedef struct { /* KDP_MAXBYTES reply */ - kdp_hdr_t hdr; - uint32_t max_bytes; +typedef struct { /* KDP_MAXBYTES reply */ + kdp_hdr_t hdr; + uint32_t max_bytes; } KDP_PACKED kdp_maxbytes_reply_t; /* * KDP_READMEM */ -typedef struct { /* KDP_READMEM request */ - kdp_hdr_t hdr; - uint32_t address; - uint32_t nbytes; +typedef struct { /* KDP_READMEM request */ + kdp_hdr_t hdr; + uint32_t address; + uint32_t nbytes; } KDP_PACKED kdp_readmem_req_t; -typedef struct { /* KDP_READMEM reply */ - kdp_hdr_t hdr; - kdp_error_t error; - char data[0]; +typedef struct { /* KDP_READMEM reply */ + kdp_hdr_t hdr; + kdp_error_t error; + char data[0]; } KDP_PACKED kdp_readmem_reply_t; /* * KDP_READMEM64 */ -typedef struct { /* KDP_READMEM64 request */ - kdp_hdr_t hdr; - uint64_t address; - uint32_t nbytes; +typedef struct { /* KDP_READMEM64 request */ + kdp_hdr_t hdr; + uint64_t address; + uint32_t nbytes; } KDP_PACKED kdp_readmem64_req_t; -typedef struct { /* KDP_READMEM64 reply */ - kdp_hdr_t hdr; - kdp_error_t error; - char data[0]; +typedef struct { /* KDP_READMEM64 reply */ + kdp_hdr_t hdr; + kdp_error_t error; + char data[0]; } KDP_PACKED kdp_readmem64_reply_t; /* * KDP_READPHYSMEM64 */ -typedef struct { /* KDP_READPHYSMEM64 request */ - kdp_hdr_t hdr; - uint64_t address; - uint32_t nbytes; +typedef struct { /* KDP_READPHYSMEM64 request */ + kdp_hdr_t hdr; + uint64_t address; + uint32_t nbytes; uint16_t lcpu; } KDP_PACKED kdp_readphysmem64_req_t; -typedef struct { /* KDP_READPHYSMEM64 reply */ - kdp_hdr_t hdr; - kdp_error_t error; - char data[0]; +typedef struct { /* KDP_READPHYSMEM64 reply */ + kdp_hdr_t hdr; + kdp_error_t error; + char data[0]; } KDP_PACKED kdp_readphysmem64_reply_t; /* * KDP_WRITEMEM */ -typedef struct { /* KDP_WRITEMEM request */ - kdp_hdr_t hdr; - uint32_t address; - uint32_t nbytes; - char data[0]; +typedef struct { /* KDP_WRITEMEM request */ + kdp_hdr_t hdr; + uint32_t address; + uint32_t nbytes; + char data[0]; } KDP_PACKED kdp_writemem_req_t; -typedef struct { /* KDP_WRITEMEM reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_WRITEMEM reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_writemem_reply_t; /* * KDP_WRITEMEM64 */ -typedef struct { /* KDP_WRITEMEM64 request */ - kdp_hdr_t hdr; - uint64_t address; - uint32_t nbytes; - char data[0]; +typedef struct { /* KDP_WRITEMEM64 request */ + kdp_hdr_t hdr; + uint64_t address; + uint32_t nbytes; + char data[0]; } KDP_PACKED kdp_writemem64_req_t; -typedef struct { /* KDP_WRITEMEM64 reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_WRITEMEM64 reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_writemem64_reply_t; /* * KDP_WRITEPHYSMEM64 */ -typedef struct { /* KDP_WRITEPHYSMEM64 request */ - kdp_hdr_t hdr; - uint64_t address; - uint32_t nbytes; +typedef struct { /* KDP_WRITEPHYSMEM64 request */ + kdp_hdr_t hdr; + uint64_t address; + uint32_t nbytes; uint16_t lcpu; - char data[0]; + char data[0]; } KDP_PACKED kdp_writephysmem64_req_t; -typedef struct { /* KDP_WRITEPHYSMEM64 reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_WRITEPHYSMEM64 reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_writephysmem64_reply_t; /* * KDP_WRITEIOPORT */ -typedef struct { /* KDP_WRITEIOPORT request */ - kdp_hdr_t hdr; - uint16_t lcpu; - uint16_t address; - uint16_t nbytes; - char data[0]; +typedef struct { /* KDP_WRITEIOPORT request */ + kdp_hdr_t hdr; + uint16_t lcpu; + uint16_t address; + uint16_t nbytes; + char data[0]; } KDP_PACKED kdp_writeioport_req_t; -typedef struct { /* KDP_WRITEIOPORT reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_WRITEIOPORT reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_writeioport_reply_t; /* * KDP_READIOPORT */ -typedef struct { /* KDP_READIOPORT request */ - kdp_hdr_t hdr; - uint16_t lcpu; - uint16_t address; - uint16_t nbytes; +typedef struct { /* KDP_READIOPORT request */ + kdp_hdr_t hdr; + uint16_t lcpu; + uint16_t address; + uint16_t nbytes; } KDP_PACKED kdp_readioport_req_t; -typedef struct { /* KDP_READIOPORT reply */ - kdp_hdr_t hdr; - kdp_error_t error; - char data[0]; +typedef struct { /* KDP_READIOPORT reply */ + kdp_hdr_t hdr; + kdp_error_t error; + char data[0]; } KDP_PACKED kdp_readioport_reply_t; /* * KDP_WRITEMSR64 */ -typedef struct { /* KDP_WRITEMSR64 request */ - kdp_hdr_t hdr; - uint32_t address; - uint16_t lcpu; - char data[0]; +typedef struct { /* KDP_WRITEMSR64 request */ + kdp_hdr_t hdr; + uint32_t address; + uint16_t lcpu; + char data[0]; } KDP_PACKED kdp_writemsr64_req_t; -typedef struct { /* KDP_WRITEMSR64 reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_WRITEMSR64 reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_writemsr64_reply_t; /* * KDP_READMSR64 */ -typedef struct { /* KDP_READMSR64 request */ - kdp_hdr_t hdr; - uint32_t address; - uint16_t lcpu; +typedef struct { /* KDP_READMSR64 request */ + kdp_hdr_t hdr; + uint32_t address; + uint16_t lcpu; } KDP_PACKED kdp_readmsr64_req_t; -typedef struct { /* KDP_READMSR64 reply */ - kdp_hdr_t hdr; - kdp_error_t error; - char data[0]; +typedef struct { /* KDP_READMSR64 reply */ + kdp_hdr_t hdr; + kdp_error_t error; + char data[0]; } KDP_PACKED kdp_readmsr64_reply_t; /* * KDP_READREGS */ -typedef struct { /* KDP_READREGS request */ - kdp_hdr_t hdr; - uint32_t cpu; - uint32_t flavor; +typedef struct { /* KDP_READREGS request */ + kdp_hdr_t hdr; + uint32_t cpu; + uint32_t flavor; } KDP_PACKED kdp_readregs_req_t; -typedef struct { /* KDP_READREGS reply */ - kdp_hdr_t hdr; - kdp_error_t error; /* could be KDPERR_BADFLAVOR */ - char data[0]; +typedef struct { /* KDP_READREGS reply */ + kdp_hdr_t hdr; + kdp_error_t error; /* could be KDPERR_BADFLAVOR */ + char data[0]; } KDP_PACKED kdp_readregs_reply_t; /* * KDP_WRITEREGS */ -typedef struct { /* KDP_WRITEREGS request */ - kdp_hdr_t hdr; - uint32_t cpu; - uint32_t flavor; - char data[0]; +typedef struct { /* KDP_WRITEREGS request */ + kdp_hdr_t hdr; + uint32_t cpu; + uint32_t flavor; + char data[0]; } KDP_PACKED kdp_writeregs_req_t; -typedef struct { /* KDP_WRITEREGS reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_WRITEREGS reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_writeregs_reply_t; /* * KDP_LOAD */ -typedef struct { /* KDP_LOAD request */ - kdp_hdr_t hdr; - char file_args[0]; +typedef struct { /* KDP_LOAD request */ + kdp_hdr_t hdr; + char file_args[0]; } KDP_PACKED kdp_load_req_t; -typedef struct { /* KDP_LOAD reply */ - kdp_hdr_t hdr; - kdp_error_t error; +typedef struct { /* KDP_LOAD reply */ + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_load_reply_t; /* * KDP_IMAGEPATH */ -typedef struct { /* KDP_IMAGEPATH request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_IMAGEPATH request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_imagepath_req_t; -typedef struct { /* KDP_IMAGEPATH reply */ - kdp_hdr_t hdr; - char path[0]; +typedef struct { /* KDP_IMAGEPATH reply */ + kdp_hdr_t hdr; + char path[0]; } KDP_PACKED kdp_imagepath_reply_t; /* * KDP_SUSPEND */ -typedef struct { /* KDP_SUSPEND request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_SUSPEND request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_suspend_req_t; -typedef struct { /* KDP_SUSPEND reply */ - kdp_hdr_t hdr; +typedef struct { /* KDP_SUSPEND reply */ + kdp_hdr_t hdr; } KDP_PACKED kdp_suspend_reply_t; /* * KDP_RESUMECPUS */ -typedef struct { /* KDP_RESUMECPUS request */ - kdp_hdr_t hdr; - uint32_t cpu_mask; +typedef struct { /* KDP_RESUMECPUS request */ + kdp_hdr_t hdr; + uint32_t cpu_mask; } KDP_PACKED kdp_resumecpus_req_t; -typedef struct { /* KDP_RESUMECPUS reply */ - kdp_hdr_t hdr; +typedef struct { /* KDP_RESUMECPUS reply */ + kdp_hdr_t hdr; } KDP_PACKED kdp_resumecpus_reply_t; /* @@ -541,13 +541,13 @@ typedef struct { /* KDP_RESUMECPUS reply */ */ typedef struct { - kdp_hdr_t hdr; - uint32_t address; + kdp_hdr_t hdr; + uint32_t address; } KDP_PACKED kdp_breakpoint_req_t; typedef struct { - kdp_hdr_t hdr; - kdp_error_t error; + kdp_hdr_t hdr; + kdp_error_t error; } KDP_PACKED kdp_breakpoint_reply_t; /* @@ -556,7 +556,7 @@ typedef struct { typedef struct { kdp_hdr_t hdr; - uint64_t address; + uint64_t address; } KDP_PACKED kdp_breakpoint64_req_t; typedef struct { @@ -569,37 +569,37 @@ typedef struct { * (Exception notifications are not requests, and in fact travel from * the remote debugger to the gdb agent KDB.) */ -typedef struct { /* exc. info for one cpu */ - uint32_t cpu; +typedef struct { /* exc. info for one cpu */ + uint32_t cpu; /* * Following info is defined as * per */ - uint32_t exception; - uint32_t code; - uint32_t subcode; + uint32_t exception; + uint32_t code; + uint32_t subcode; } KDP_PACKED kdp_exc_info_t; -typedef struct { /* KDP_EXCEPTION notification */ - kdp_hdr_t hdr; - uint32_t n_exc_info; - kdp_exc_info_t exc_info[0]; +typedef struct { /* KDP_EXCEPTION notification */ + kdp_hdr_t hdr; + uint32_t n_exc_info; + kdp_exc_info_t exc_info[0]; } KDP_PACKED kdp_exception_t; -typedef struct { /* KDP_EXCEPTION acknowledgement */ - kdp_hdr_t hdr; +typedef struct { /* KDP_EXCEPTION acknowledgement */ + kdp_hdr_t hdr; } KDP_PACKED kdp_exception_ack_t; /* * KDP_KERNELVERSION */ -typedef struct { /* KDP_KERNELVERSION request */ - kdp_hdr_t hdr; +typedef struct { /* KDP_KERNELVERSION request */ + kdp_hdr_t hdr; } KDP_PACKED kdp_kernelversion_req_t; -typedef struct { /* KDP_KERNELVERSION reply */ - kdp_hdr_t hdr; - char version[0]; +typedef struct { /* KDP_KERNELVERSION reply */ + kdp_hdr_t hdr; + char version[0]; } KDP_PACKED kdp_kernelversion_reply_t; @@ -607,38 +607,38 @@ typedef struct { /* KDP_KERNELVERSION reply */ * Child termination messages */ typedef enum { - KDP_FAULT = 0, /* child took fault (internal use) */ - KDP_EXIT, /* child exited */ - KDP_POWEROFF, /* child power-off */ - KDP_REBOOT, /* child reboot */ - KDP_COMMAND_MODE /* child exit to mon command_mode */ + KDP_FAULT = 0, /* child took fault (internal use) */ + KDP_EXIT, /* child exited */ + KDP_POWEROFF, /* child power-off */ + KDP_REBOOT, /* child reboot */ + KDP_COMMAND_MODE /* child exit to mon command_mode */ } kdp_termination_code_t; -typedef struct { /* KDP_TERMINATION notification */ - kdp_hdr_t hdr; - uint32_t term_code; /* kdp_termination_code_t */ - uint32_t exit_code; +typedef struct { /* KDP_TERMINATION notification */ + kdp_hdr_t hdr; + uint32_t term_code; /* kdp_termination_code_t */ + uint32_t exit_code; } KDP_PACKED kdp_termination_t; typedef struct { - kdp_hdr_t hdr; + kdp_hdr_t hdr; } KDP_PACKED kdp_termination_ack_t; /* * KDP_DUMPINFO */ -typedef struct { /* KDP_DUMPINFO request */ - kdp_hdr_t hdr; - char name[50]; +typedef struct { /* KDP_DUMPINFO request */ + kdp_hdr_t hdr; + char name[50]; char destip[16]; char routerip[16]; uint32_t port; kdp_dumpinfo_t type; } KDP_PACKED kdp_dumpinfo_req_t; -typedef struct { /* KDP_DUMPINFO reply */ - kdp_hdr_t hdr; - char name[50]; +typedef struct { /* KDP_DUMPINFO reply */ + kdp_hdr_t hdr; + char name[50]; char destip[16]; char routerip[16]; uint32_t port; @@ -647,68 +647,68 @@ typedef struct { /* KDP_DUMPINFO reply */ typedef union { - kdp_hdr_t hdr; - kdp_connect_req_t connect_req; - kdp_connect_reply_t connect_reply; - kdp_disconnect_req_t disconnect_req; - kdp_disconnect_reply_t disconnect_reply; - kdp_hostinfo_req_t hostinfo_req; - kdp_hostinfo_reply_t hostinfo_reply; - kdp_version_req_t version_req; - kdp_version_reply_t version_reply; - kdp_maxbytes_req_t maxbytes_req; - kdp_maxbytes_reply_t maxbytes_reply; - kdp_readmem_req_t readmem_req; - kdp_readmem_reply_t readmem_reply; - kdp_readmem64_req_t readmem64_req; - kdp_readmem64_reply_t readmem64_reply; - kdp_readphysmem64_req_t readphysmem64_req; - kdp_readphysmem64_reply_t readphysmem64_reply; - kdp_writemem_req_t writemem_req; - kdp_writemem_reply_t writemem_reply; - kdp_writemem64_req_t writemem64_req; - kdp_writemem64_reply_t writemem64_reply; - kdp_writephysmem64_req_t writephysmem64_req; - kdp_writephysmem64_reply_t writephysmem64_reply; - kdp_readregs_req_t readregs_req; - kdp_readregs_reply_t readregs_reply; - kdp_writeregs_req_t writeregs_req; - kdp_writeregs_reply_t writeregs_reply; - kdp_load_req_t load_req; - kdp_load_reply_t load_reply; - kdp_imagepath_req_t imagepath_req; - kdp_imagepath_reply_t imagepath_reply; - kdp_suspend_req_t suspend_req; - kdp_suspend_reply_t suspend_reply; - kdp_resumecpus_req_t resumecpus_req; - kdp_resumecpus_reply_t resumecpus_reply; - kdp_exception_t exception; - kdp_exception_ack_t exception_ack; - kdp_termination_t termination; - kdp_termination_ack_t termination_ack; - kdp_breakpoint_req_t breakpoint_req; - kdp_breakpoint_reply_t breakpoint_reply; - kdp_breakpoint64_req_t breakpoint64_req; - kdp_breakpoint64_reply_t breakpoint64_reply; - kdp_reattach_req_t reattach_req; - kdp_regions_req_t regions_req; - kdp_regions_reply_t regions_reply; - kdp_kernelversion_req_t kernelversion_req; - kdp_kernelversion_reply_t kernelversion_reply; - kdp_readioport_req_t readioport_req; - kdp_readioport_reply_t readioport_reply; - kdp_writeioport_req_t writeioport_req; - kdp_writeioport_reply_t writeioport_reply; - kdp_readmsr64_req_t readmsr64_req; - kdp_readmsr64_reply_t readmsr64_reply; - kdp_writemsr64_req_t writemsr64_req; - kdp_writemsr64_reply_t writemsr64_reply; - kdp_dumpinfo_req_t dumpinfo_req; - kdp_dumpinfo_reply_t dumpinfo_reply; + kdp_hdr_t hdr; + kdp_connect_req_t connect_req; + kdp_connect_reply_t connect_reply; + kdp_disconnect_req_t disconnect_req; + kdp_disconnect_reply_t disconnect_reply; + kdp_hostinfo_req_t hostinfo_req; + kdp_hostinfo_reply_t hostinfo_reply; + kdp_version_req_t version_req; + kdp_version_reply_t version_reply; + kdp_maxbytes_req_t maxbytes_req; + kdp_maxbytes_reply_t maxbytes_reply; + kdp_readmem_req_t readmem_req; + kdp_readmem_reply_t readmem_reply; + kdp_readmem64_req_t readmem64_req; + kdp_readmem64_reply_t readmem64_reply; + kdp_readphysmem64_req_t readphysmem64_req; + kdp_readphysmem64_reply_t readphysmem64_reply; + kdp_writemem_req_t writemem_req; + kdp_writemem_reply_t writemem_reply; + kdp_writemem64_req_t writemem64_req; + kdp_writemem64_reply_t writemem64_reply; + kdp_writephysmem64_req_t writephysmem64_req; + kdp_writephysmem64_reply_t writephysmem64_reply; + kdp_readregs_req_t readregs_req; + kdp_readregs_reply_t readregs_reply; + kdp_writeregs_req_t writeregs_req; + kdp_writeregs_reply_t writeregs_reply; + kdp_load_req_t load_req; + kdp_load_reply_t load_reply; + kdp_imagepath_req_t imagepath_req; + kdp_imagepath_reply_t imagepath_reply; + kdp_suspend_req_t suspend_req; + kdp_suspend_reply_t suspend_reply; + kdp_resumecpus_req_t resumecpus_req; + kdp_resumecpus_reply_t resumecpus_reply; + kdp_exception_t exception; + kdp_exception_ack_t exception_ack; + kdp_termination_t termination; + kdp_termination_ack_t termination_ack; + kdp_breakpoint_req_t breakpoint_req; + kdp_breakpoint_reply_t breakpoint_reply; + kdp_breakpoint64_req_t breakpoint64_req; + kdp_breakpoint64_reply_t breakpoint64_reply; + kdp_reattach_req_t reattach_req; + kdp_regions_req_t regions_req; + kdp_regions_reply_t regions_reply; + kdp_kernelversion_req_t kernelversion_req; + kdp_kernelversion_reply_t kernelversion_reply; + kdp_readioport_req_t readioport_req; + kdp_readioport_reply_t readioport_reply; + kdp_writeioport_req_t writeioport_req; + kdp_writeioport_reply_t writeioport_reply; + kdp_readmsr64_req_t readmsr64_req; + kdp_readmsr64_reply_t readmsr64_reply; + kdp_writemsr64_req_t writemsr64_req; + kdp_writemsr64_reply_t writemsr64_reply; + kdp_dumpinfo_req_t dumpinfo_req; + kdp_dumpinfo_reply_t dumpinfo_reply; } kdp_pkt_t; -#define MAX_KDP_PKT_SIZE 1200 /* max packet size */ -#define MAX_KDP_DATA_SIZE 1024 /* max r/w data per packet */ +#define MAX_KDP_PKT_SIZE 1200 /* max packet size */ +#define MAX_KDP_DATA_SIZE 1024 /* max r/w data per packet */ /* * Support relatively small request/responses here. @@ -717,9 +717,9 @@ typedef union { */ #define KDP_MANUAL_PACKET_SIZE 128 struct kdp_manual_pkt { - unsigned char data[KDP_MANUAL_PACKET_SIZE]; - unsigned int len; - boolean_t input; + unsigned char data[KDP_MANUAL_PACKET_SIZE]; + unsigned int len; + boolean_t input; } KDP_PACKED; #ifdef KDP_PROXY_PACK_SUPPORT diff --git a/osfmk/kdp/kdp_serial.c b/osfmk/kdp/kdp_serial.c index 0bf85a9b3..c4e013d52 100644 --- a/osfmk/kdp/kdp_serial.c +++ b/osfmk/kdp/kdp_serial.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include "kdp_serial.h" @@ -41,18 +41,19 @@ static uint32_t dsCRC; static bool dsHaveCRC; -static void kdp_serial_out(unsigned char byte, void (*outFunc)(char)) +static void +kdp_serial_out(unsigned char byte, void (*outFunc)(char)) { //need to escape '\n' because the kernel serial output turns it into a cr/lf - if(byte == SKDP_START_CHAR || byte == SKDP_END_CHAR || byte == SKDP_ESC_CHAR || byte == '\n') - { + if (byte == SKDP_START_CHAR || byte == SKDP_END_CHAR || byte == SKDP_ESC_CHAR || byte == '\n') { outFunc(SKDP_ESC_CHAR); byte = ~byte; } outFunc(byte); } -void kdp_serialize_packet(unsigned char *packet, unsigned int len, void (*outFunc)(char)) +void +kdp_serialize_packet(unsigned char *packet, unsigned int len, void (*outFunc)(char)) { unsigned int index; unsigned char byte; @@ -61,8 +62,8 @@ void kdp_serialize_packet(unsigned char *packet, unsigned int len, void (*outFun // insert the CRC between back to back STARTs which is compatible with old clients crc = (uint32_t) z_crc32(0, packet, len); outFunc(SKDP_START_CHAR); - kdp_serial_out((crc >> 0), outFunc); - kdp_serial_out((crc >> 8), outFunc); + kdp_serial_out((crc >> 0), outFunc); + kdp_serial_out((crc >> 8), outFunc); kdp_serial_out((crc >> 16), outFunc); kdp_serial_out((crc >> 24), outFunc); @@ -74,76 +75,68 @@ void kdp_serialize_packet(unsigned char *packet, unsigned int len, void (*outFun outFunc(SKDP_END_CHAR); } -unsigned char *kdp_unserialize_packet(unsigned char byte, unsigned int *len) +unsigned char * +kdp_unserialize_packet(unsigned char byte, unsigned int *len) { uint32_t crc; - switch(dsState) - { - case DS_WAITSTART: - if(byte == SKDP_START_CHAR) - { + switch (dsState) { + case DS_WAITSTART: + if (byte == SKDP_START_CHAR) { // printf("got start char\n"); - dsState = DS_READING; - dsPos = 0; - *len = SERIALIZE_READING; - dsHaveCRC = false; - return 0; - } - *len = SERIALIZE_WAIT_START; - break; - case DS_READING: - if(byte == SKDP_ESC_CHAR) - { - dsState = DS_ESCAPED; - *len = SERIALIZE_READING; - return 0; - } - if(byte == SKDP_START_CHAR) - { - if (dsPos >= 4) - { - dsHaveCRC = true; - dsCRC = dsBuffer[0] | (dsBuffer[1] << 8) | (dsBuffer[2] << 16) | (dsBuffer[3] << 24); - } - //else printf("unexpected start char, resetting\n"); - dsPos = 0; - *len = SERIALIZE_READING; - return 0; + dsState = DS_READING; + dsPos = 0; + *len = SERIALIZE_READING; + dsHaveCRC = false; + return 0; + } + *len = SERIALIZE_WAIT_START; + break; + case DS_READING: + if (byte == SKDP_ESC_CHAR) { + dsState = DS_ESCAPED; + *len = SERIALIZE_READING; + return 0; + } + if (byte == SKDP_START_CHAR) { + if (dsPos >= 4) { + dsHaveCRC = true; + dsCRC = dsBuffer[0] | (dsBuffer[1] << 8) | (dsBuffer[2] << 16) | (dsBuffer[3] << 24); } - if(byte == SKDP_END_CHAR) - { - dsState = DS_WAITSTART; - if (dsHaveCRC) - { - crc = (uint32_t) z_crc32(0, &dsBuffer[0], dsPos); - if (crc != dsCRC) - { + //else printf("unexpected start char, resetting\n"); + dsPos = 0; + *len = SERIALIZE_READING; + return 0; + } + if (byte == SKDP_END_CHAR) { + dsState = DS_WAITSTART; + if (dsHaveCRC) { + crc = (uint32_t) z_crc32(0, &dsBuffer[0], dsPos); + if (crc != dsCRC) { // printf("bad packet crc 0x%x != 0x%x\n", crc, dsCRC); - dsPos = 0; - *len = SERIALIZE_WAIT_START; - return 0; - } + dsPos = 0; + *len = SERIALIZE_WAIT_START; + return 0; } - *len = dsPos; - dsPos = 0; - return dsBuffer; } - dsBuffer[dsPos++] = byte; - break; - case DS_ESCAPED: + *len = dsPos; + dsPos = 0; + return dsBuffer; + } + dsBuffer[dsPos++] = byte; + break; + case DS_ESCAPED: // printf("unescaping %02x to %02x\n", byte, ~byte); - dsBuffer[dsPos++] = ~byte; - dsState = DS_READING; - *len = SERIALIZE_READING; - break; + dsBuffer[dsPos++] = ~byte; + dsState = DS_READING; + *len = SERIALIZE_READING; + break; } - if(dsPos == sizeof(dsBuffer)) //too much data...forget this packet - { + if (dsPos == sizeof(dsBuffer)) { //too much data...forget this packet dsState = DS_WAITSTART; dsPos = 0; *len = SERIALIZE_WAIT_START; } - + return 0; } diff --git a/osfmk/kdp/kdp_serial.h b/osfmk/kdp/kdp_serial.h index 68dc30129..96f34aa5a 100644 --- a/osfmk/kdp/kdp_serial.h +++ b/osfmk/kdp/kdp_serial.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KDP_SERIAL_H_ diff --git a/osfmk/kdp/kdp_udp.c b/osfmk/kdp/kdp_udp.c index 1a84b2bfc..c8636563a 100644 --- a/osfmk/kdp/kdp_udp.c +++ b/osfmk/kdp/kdp_udp.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -66,7 +66,7 @@ /* we just want the link status flags, so undef KERNEL_PRIVATE for this * header file. */ #undef KERNEL_PRIVATE -#include +#include #define KERNEL_PRIVATE #include @@ -85,7 +85,7 @@ extern int inet_aton(const char *, struct kdp_in_addr *); /* in libkern */ extern char *inet_ntoa_r(struct kdp_in_addr ina, char *buf, size_t buflen); /* in libkern */ -#define DO_ALIGN 1 /* align all packet data accesses */ +#define DO_ALIGN 1 /* align all packet data accesses */ #define KDP_SERIAL_IPADDR 0xABADBABE /* IP address used for serial KDP */ #define LINK_UP_STATUS (IFM_AVALID | IFM_ACTIVE) @@ -100,41 +100,41 @@ static u_short ip_id; /* ip packet ctr, for ids */ * UDP protocol implementation. * Per RFC 768, August, 1980. */ -#define UDP_TTL 60 /* deflt time to live for UDP packets */ +#define UDP_TTL 60 /* deflt time to live for UDP packets */ static int udp_ttl = UDP_TTL; -static unsigned char exception_seq; +static unsigned char exception_seq; struct kdp_ipovly { - uint32_t ih_next, ih_prev; /* for protocol sequence q's */ - u_char ih_x1; /* (unused) */ - u_char ih_pr; /* protocol */ - short ih_len; /* protocol length */ - struct kdp_in_addr ih_src; /* source internet address */ - struct kdp_in_addr ih_dst; /* destination internet address */ + uint32_t ih_next, ih_prev; /* for protocol sequence q's */ + u_char ih_x1; /* (unused) */ + u_char ih_pr; /* protocol */ + short ih_len; /* protocol length */ + struct kdp_in_addr ih_src; /* source internet address */ + struct kdp_in_addr ih_dst; /* destination internet address */ }; struct kdp_udphdr { - u_short uh_sport; /* source port */ - u_short uh_dport; /* destination port */ - short uh_ulen; /* udp length */ - u_short uh_sum; /* udp checksum */ + u_short uh_sport; /* source port */ + u_short uh_dport; /* destination port */ + short uh_ulen; /* udp length */ + u_short uh_sum; /* udp checksum */ }; struct kdp_udpiphdr { - struct kdp_ipovly ui_i; /* overlaid ip structure */ - struct kdp_udphdr ui_u; /* udp header */ + struct kdp_ipovly ui_i; /* overlaid ip structure */ + struct kdp_udphdr ui_u; /* udp header */ }; -#define ui_next ui_i.ih_next -#define ui_prev ui_i.ih_prev -#define ui_x1 ui_i.ih_x1 -#define ui_pr ui_i.ih_pr -#define ui_len ui_i.ih_len -#define ui_src ui_i.ih_src -#define ui_dst ui_i.ih_dst -#define ui_sport ui_u.uh_sport -#define ui_dport ui_u.uh_dport -#define ui_ulen ui_u.uh_ulen -#define ui_sum ui_u.uh_sum +#define ui_next ui_i.ih_next +#define ui_prev ui_i.ih_prev +#define ui_x1 ui_i.ih_x1 +#define ui_pr ui_i.ih_pr +#define ui_len ui_i.ih_len +#define ui_src ui_i.ih_src +#define ui_dst ui_i.ih_dst +#define ui_sport ui_u.uh_sport +#define ui_dport ui_u.uh_dport +#define ui_ulen ui_u.uh_ulen +#define ui_sum ui_u.uh_sum struct kdp_ip { union { @@ -142,38 +142,38 @@ struct kdp_ip { struct { unsigned int #ifdef __LITTLE_ENDIAN__ - ip_xhl:4, /* header length */ - ip_xv:4, /* version */ - ip_xtos:8, /* type of service */ - ip_xlen:16; /* total length */ + ip_xhl:4, /* header length */ + ip_xv:4, /* version */ + ip_xtos:8, /* type of service */ + ip_xlen:16; /* total length */ #endif #ifdef __BIG_ENDIAN__ - ip_xv:4, /* version */ - ip_xhl:4, /* header length */ - ip_xtos:8, /* type of service */ - ip_xlen:16; /* total length */ + ip_xv:4, /* version */ + ip_xhl:4, /* header length */ + ip_xtos:8, /* type of service */ + ip_xlen:16; /* total length */ #endif } ip_x; } ip_vhltl; - u_short ip_id; /* identification */ - short ip_off; /* fragment offset field */ -#define IP_DF 0x4000 /* dont fragment flag */ -#define IP_MF 0x2000 /* more fragments flag */ -#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ - u_char ip_ttl; /* time to live */ - u_char ip_p; /* protocol */ - u_short ip_sum; /* checksum */ - struct kdp_in_addr ip_src,ip_dst; /* source and dest address */ + u_short ip_id; /* identification */ + short ip_off; /* fragment offset field */ +#define IP_DF 0x4000 /* dont fragment flag */ +#define IP_MF 0x2000 /* more fragments flag */ +#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */ + u_char ip_ttl; /* time to live */ + u_char ip_p; /* protocol */ + u_short ip_sum; /* checksum */ + struct kdp_in_addr ip_src, ip_dst; /* source and dest address */ }; -#define ip_v ip_vhltl.ip_x.ip_xv -#define ip_hl ip_vhltl.ip_x.ip_xhl -#define ip_tos ip_vhltl.ip_x.ip_xtos -#define ip_len ip_vhltl.ip_x.ip_xlen +#define ip_v ip_vhltl.ip_x.ip_xv +#define ip_hl ip_vhltl.ip_x.ip_xhl +#define ip_tos ip_vhltl.ip_x.ip_xtos +#define ip_len ip_vhltl.ip_x.ip_xlen -#define IPPROTO_UDP 17 -#define IPVERSION 4 +#define IPPROTO_UDP 17 +#define IPVERSION 4 -#define ETHERTYPE_IP 0x0800 /* IP protocol */ +#define ETHERTYPE_IP 0x0800 /* IP protocol */ /* * Ethernet Address Resolution Protocol. @@ -183,70 +183,70 @@ struct kdp_ip { * RFC 826. */ -#define ETHERTYPE_ARP 0x0806 /* Addr. resolution protocol */ +#define ETHERTYPE_ARP 0x0806 /* Addr. resolution protocol */ struct kdp_arphdr { u_short ar_hrd; /* format of hardware address */ -#define ARPHRD_ETHER 1 /* ethernet hardware format */ -#define ARPHRD_FRELAY 15 /* frame relay hardware format */ +#define ARPHRD_ETHER 1 /* ethernet hardware format */ +#define ARPHRD_FRELAY 15 /* frame relay hardware format */ u_short ar_pro; /* format of protocol address */ u_char ar_hln; /* length of hardware address */ u_char ar_pln; /* length of protocol address */ u_short ar_op; /* one of: */ -#define ARPOP_REQUEST 1 /* request to resolve address */ -#define ARPOP_REPLY 2 /* response to previous request */ -#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */ -#define ARPOP_REVREPLY 4 /* response giving protocol address */ -#define ARPOP_INVREQUEST 8 /* request to identify peer */ -#define ARPOP_INVREPLY 9 /* response identifying peer */ +#define ARPOP_REQUEST 1 /* request to resolve address */ +#define ARPOP_REPLY 2 /* response to previous request */ +#define ARPOP_REVREQUEST 3 /* request protocol address given hardware */ +#define ARPOP_REVREPLY 4 /* response giving protocol address */ +#define ARPOP_INVREQUEST 8 /* request to identify peer */ +#define ARPOP_INVREPLY 9 /* response identifying peer */ }; struct kdp_ether_arp { - struct kdp_arphdr ea_hdr; /* fixed-size header */ + struct kdp_arphdr ea_hdr; /* fixed-size header */ u_char arp_sha[ETHER_ADDR_LEN]; /* sender hardware address */ - u_char arp_spa[4]; /* sender protocol address */ + u_char arp_spa[4]; /* sender protocol address */ u_char arp_tha[ETHER_ADDR_LEN]; /* target hardware address */ - u_char arp_tpa[4]; /* target protocol address */ + u_char arp_tpa[4]; /* target protocol address */ }; -#define arp_hrd ea_hdr.ar_hrd -#define arp_pro ea_hdr.ar_pro -#define arp_hln ea_hdr.ar_hln -#define arp_pln ea_hdr.ar_pln -#define arp_op ea_hdr.ar_op +#define arp_hrd ea_hdr.ar_hrd +#define arp_pro ea_hdr.ar_pro +#define arp_hln ea_hdr.ar_hln +#define arp_pln ea_hdr.ar_pln +#define arp_op ea_hdr.ar_op -#define ETHERMTU 1500 -#define ETHERHDRSIZE 14 -#define ETHERCRC 4 -#define KDP_MAXPACKET (ETHERHDRSIZE + ETHERMTU + ETHERCRC) +#define ETHERMTU 1500 +#define ETHERHDRSIZE 14 +#define ETHERCRC 4 +#define KDP_MAXPACKET (ETHERHDRSIZE + ETHERMTU + ETHERCRC) static struct { - unsigned char data[KDP_MAXPACKET]; - unsigned int off, len; - boolean_t input; + unsigned char data[KDP_MAXPACKET]; + unsigned int off, len; + boolean_t input; } pkt, saved_reply; struct kdp_manual_pkt manual_pkt; struct { struct { - struct kdp_in_addr in; - struct kdp_ether_addr ea; + struct kdp_in_addr in; + struct kdp_ether_addr ea; } loc; struct { - struct kdp_in_addr in; - struct kdp_ether_addr ea; + struct kdp_in_addr in; + struct kdp_ether_addr ea; } rmt; } adr; static const char *exception_message[] = { "Unknown", - "Memory access", /* EXC_BAD_ACCESS */ - "Failed instruction", /* EXC_BAD_INSTRUCTION */ - "Arithmetic", /* EXC_ARITHMETIC */ - "Emulation", /* EXC_EMULATION */ - "Software", /* EXC_SOFTWARE */ - "Breakpoint" /* EXC_BREAKPOINT */ + "Memory access", /* EXC_BAD_ACCESS */ + "Failed instruction", /* EXC_BAD_INSTRUCTION */ + "Arithmetic", /* EXC_ARITHMETIC */ + "Emulation", /* EXC_EMULATION */ + "Software", /* EXC_SOFTWARE */ + "Breakpoint" /* EXC_BREAKPOINT */ }; volatile int kdp_flag = 0; @@ -270,7 +270,7 @@ static void *kdp_current_ifp; static void kdp_handler( void *); -static uint32_t panic_server_ip = 0; +static uint32_t panic_server_ip = 0; static uint32_t parsed_router_ip = 0; static uint32_t router_ip = 0; static uint32_t target_ip = 0; @@ -284,10 +284,10 @@ static unsigned int panicd_port = CORE_REMOTE_PORT; static struct kdp_ether_addr etherbroadcastaddr = {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; -static struct kdp_ether_addr router_mac = {{0, 0, 0 , 0, 0, 0}}; -static struct kdp_ether_addr destination_mac = {{0, 0, 0 , 0, 0, 0}}; -static struct kdp_ether_addr temp_mac = {{0, 0, 0 , 0, 0, 0}}; -static struct kdp_ether_addr current_resolved_MAC = {{0, 0, 0 , 0, 0, 0}}; +static struct kdp_ether_addr router_mac = {{0, 0, 0, 0, 0, 0}}; +static struct kdp_ether_addr destination_mac = {{0, 0, 0, 0, 0, 0}}; +static struct kdp_ether_addr temp_mac = {{0, 0, 0, 0, 0, 0}}; +static struct kdp_ether_addr current_resolved_MAC = {{0, 0, 0, 0, 0, 0}}; static boolean_t flag_panic_dump_in_progress = FALSE; static boolean_t flag_router_mac_initialized = FALSE; @@ -313,26 +313,26 @@ __private_extern__ volatile unsigned int flag_kdp_trigger_reboot = 0; extern unsigned int disableConsoleOutput; -extern void kdp_call(void); +extern void kdp_call(void); -void * kdp_get_interface(void); +void * kdp_get_interface(void); void kdp_set_gateway_mac(void *gatewaymac); -void kdp_set_ip_and_mac_addresses(struct kdp_in_addr *ipaddr, struct kdp_ether_addr *); -void kdp_set_interface(void *interface, const struct kdp_ether_addr *macaddr); +void kdp_set_ip_and_mac_addresses(struct kdp_in_addr *ipaddr, struct kdp_ether_addr *); +void kdp_set_interface(void *interface, const struct kdp_ether_addr *macaddr); -void kdp_disable_arp(void); -static void kdp_arp_reply(struct kdp_ether_arp *); -static void kdp_process_arp_reply(struct kdp_ether_arp *); -static boolean_t kdp_arp_resolve(uint32_t, struct kdp_ether_addr *); +void kdp_disable_arp(void); +static void kdp_arp_reply(struct kdp_ether_arp *); +static void kdp_process_arp_reply(struct kdp_ether_arp *); +static boolean_t kdp_arp_resolve(uint32_t, struct kdp_ether_addr *); -static volatile unsigned kdp_reentry_deadline; +static volatile unsigned kdp_reentry_deadline; static uint32_t kdp_crashdump_feature_mask = KDP_FEATURE_LARGE_CRASHDUMPS | KDP_FEATURE_LARGE_PKT_SIZE; uint32_t kdp_feature_large_crashdumps, kdp_feature_large_pkt_size; char kdp_kernelversion_string[256]; -static boolean_t gKDPDebug = FALSE; +static boolean_t gKDPDebug = FALSE; #define KDP_DEBUG(...) if (gKDPDebug) printf(__VA_ARGS__); @@ -343,15 +343,17 @@ uint64_t kdp_max_superblock_dump_time = 0; uint64_t kdp_superblock_dump_time = 0; uint64_t kdp_superblock_dump_start_time = 0; static thread_call_t -kdp_timer_call; + kdp_timer_call; static void -kdp_ml_enter_debugger_wrapper(__unused void *param0, __unused void *param1) { +kdp_ml_enter_debugger_wrapper(__unused void *param0, __unused void *param1) +{ kdp_ml_enter_debugger(); } static void -kdp_timer_callout_init(void) { +kdp_timer_callout_init(void) +{ kdp_timer_call = thread_call_allocate(kdp_ml_enter_debugger_wrapper, NULL); } @@ -362,12 +364,14 @@ wait_for_link(void) { static int first = 0; - if (!kdp_en_linkstatus) + if (!kdp_en_linkstatus) { return; + } while (((*kdp_en_linkstatus)() & LINK_UP_STATUS) != LINK_UP_STATUS) { - if (first) + if (first) { continue; + } first = 1; printf("Waiting for link to become available.\n"); @@ -408,86 +412,102 @@ kdp_unregister_link(__unused kdp_link_t link, __unused kdp_mode_t mode) void kdp_register_send_receive( - kdp_send_t send, - kdp_receive_t receive) + kdp_send_t send, + kdp_receive_t receive) { - unsigned int debug = 0; + unsigned int debug = 0; - PE_parse_boot_argn("debug", &debug, sizeof (debug)); + PE_parse_boot_argn("debug", &debug, sizeof(debug)); #if defined(__arm__) || defined(__arm64__) { uint32_t debug_flags; - if (!PE_i_can_has_debugger(&debug_flags)) + if (!PE_i_can_has_debugger(&debug_flags)) { debug = 0; + } } #endif - if (!debug) + if (!debug) { return; + } kdp_en_send_pkt = send; kdp_en_recv_pkt = receive; - if (debug & DB_KDP_BP_DIS) - kdp_flag |= KDP_BP_DIS; - if (debug & DB_KDP_GETC_ENA) - kdp_flag |= KDP_GETC_ENA; - if (debug & DB_ARP) + if (debug & DB_KDP_BP_DIS) { + kdp_flag |= KDP_BP_DIS; + } + if (debug & DB_KDP_GETC_ENA) { + kdp_flag |= KDP_GETC_ENA; + } + if (debug & DB_ARP) { kdp_flag |= KDP_ARP; + } - if (debug & DB_KERN_DUMP_ON_PANIC) + if (debug & DB_KERN_DUMP_ON_PANIC) { kdp_flag |= KDP_PANIC_DUMP_ENABLED; - if (debug & DB_KERN_DUMP_ON_NMI) + } + if (debug & DB_KERN_DUMP_ON_NMI) { kdp_flag |= PANIC_CORE_ON_NMI; + } - if (debug & DB_DBG_POST_CORE) + if (debug & DB_DBG_POST_CORE) { kdp_flag |= DBG_POST_CORE; + } - if (debug & DB_PANICLOG_DUMP) + if (debug & DB_PANICLOG_DUMP) { kdp_flag |= PANIC_LOG_DUMP; + } kdp_corezip_disabled = (0 != (debug & DB_DISABLE_GZIP_CORE)); - if (PE_parse_boot_argn("_panicd_ip", panicd_ip_str, sizeof (panicd_ip_str))) + if (PE_parse_boot_argn("_panicd_ip", panicd_ip_str, sizeof(panicd_ip_str))) { panicd_specified = TRUE; + } - if ((debug & DB_REBOOT_POST_CORE) && (panicd_specified == TRUE)) + if ((debug & DB_REBOOT_POST_CORE) && (panicd_specified == TRUE)) { kdp_flag |= REBOOT_POST_CORE; + } - if (PE_parse_boot_argn("_router_ip", router_ip_str, sizeof (router_ip_str))) + if (PE_parse_boot_argn("_router_ip", router_ip_str, sizeof(router_ip_str))) { router_specified = TRUE; + } - if (!PE_parse_boot_argn("panicd_port", &panicd_port, sizeof (panicd_port))) + if (!PE_parse_boot_argn("panicd_port", &panicd_port, sizeof(panicd_port))) { panicd_port = CORE_REMOTE_PORT; + } - if (PE_parse_boot_argn("_panicd_corename", &corename_str, sizeof (corename_str))) + if (PE_parse_boot_argn("_panicd_corename", &corename_str, sizeof(corename_str))) { corename_specified = TRUE; + } kdp_flag |= KDP_READY; current_debugger = KDP_CUR_DB; if ((kdp_current_ip_address != 0) && halt_in_debugger) { - kdp_call(); - halt_in_debugger=0; + kdp_call(); + halt_in_debugger = 0; } } void kdp_unregister_send_receive( - __unused kdp_send_t send, - __unused kdp_receive_t receive) + __unused kdp_send_t send, + __unused kdp_receive_t receive) { - if (current_debugger == KDP_CUR_DB) + if (current_debugger == KDP_CUR_DB) { current_debugger = NO_CUR_DB; + } kdp_flag &= ~KDP_READY; kdp_en_send_pkt = NULL; kdp_en_recv_pkt = NULL; } static void -kdp_schedule_debugger_reentry(unsigned interval) { +kdp_schedule_debugger_reentry(unsigned interval) +{ uint64_t deadline;; clock_interval_to_deadline(interval, 1000 * 1000, &deadline); @@ -496,53 +516,54 @@ kdp_schedule_debugger_reentry(unsigned interval) { static void enaddr_copy( - void *src, - void *dst -) + void *src, + void *dst + ) { - bcopy((char *)src, (char *)dst, sizeof (struct kdp_ether_addr)); + bcopy((char *)src, (char *)dst, sizeof(struct kdp_ether_addr)); } static unsigned short ip_sum( - unsigned char *c, - unsigned int hlen + unsigned char *c, + unsigned int hlen ) { - unsigned int high, low, sum; - + unsigned int high, low, sum; + high = low = 0; while (hlen-- > 0) { low += c[1] + c[3]; high += c[0] + c[2]; - - c += sizeof (int); + + c += sizeof(int); } - + sum = (high << 8) + low; sum = (sum >> 16) + (sum & 65535); - - return (sum > 65535 ? sum - 65535 : sum); + + return sum > 65535 ? sum - 65535 : sum; } static void kdp_reply( - unsigned short reply_port, - const boolean_t sideband - ) + unsigned short reply_port, + const boolean_t sideband + ) { - struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; - struct kdp_ip aligned_ip, *ip = &aligned_ip; - struct kdp_in_addr tmp_ipaddr; - struct kdp_ether_addr tmp_enaddr; - struct kdp_ether_header *eh = NULL; - - if (!pkt.input) + struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; + struct kdp_ip aligned_ip, *ip = &aligned_ip; + struct kdp_in_addr tmp_ipaddr; + struct kdp_ether_addr tmp_enaddr; + struct kdp_ether_header *eh = NULL; + + if (!pkt.input) { kdp_panic("kdp_reply"); - - pkt.off -= (unsigned int)sizeof (struct kdp_udpiphdr); + } + + pkt.off -= (unsigned int)sizeof(struct kdp_udpiphdr); -#if DO_ALIGN +#if DO_ALIGN bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); #else ui = (struct kdp_udpiphdr *)&pkt.data[pkt.off]; @@ -550,7 +571,7 @@ kdp_reply( ui->ui_next = ui->ui_prev = 0; ui->ui_x1 = 0; ui->ui_pr = IPPROTO_UDP; - ui->ui_len = htons((u_short)pkt.len + sizeof (struct kdp_udphdr)); + ui->ui_len = htons((u_short)pkt.len + sizeof(struct kdp_udphdr)); tmp_ipaddr = ui->ui_src; ui->ui_src = ui->ui_dst; ui->ui_dst = tmp_ipaddr; @@ -564,54 +585,57 @@ kdp_reply( #else ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - ip->ip_len = htons(sizeof (struct kdp_udpiphdr) + pkt.len); + ip->ip_len = htons(sizeof(struct kdp_udpiphdr) + pkt.len); ip->ip_v = IPVERSION; ip->ip_id = htons(ip_id++); - ip->ip_hl = sizeof (struct kdp_ip) >> 2; + ip->ip_hl = sizeof(struct kdp_ip) >> 2; ip->ip_ttl = udp_ttl; ip->ip_sum = 0; ip->ip_sum = htons(~ip_sum((unsigned char *)ip, ip->ip_hl)); #if DO_ALIGN bcopy((char *)ip, (char *)&pkt.data[pkt.off], sizeof(*ip)); #endif - - pkt.len += (unsigned int)sizeof (struct kdp_udpiphdr); - - pkt.off -= (unsigned int)sizeof (struct kdp_ether_header); - + + pkt.len += (unsigned int)sizeof(struct kdp_udpiphdr); + + pkt.off -= (unsigned int)sizeof(struct kdp_ether_header); + eh = (struct kdp_ether_header *)&pkt.data[pkt.off]; enaddr_copy(eh->ether_shost, &tmp_enaddr); enaddr_copy(eh->ether_dhost, eh->ether_shost); enaddr_copy(&tmp_enaddr, eh->ether_dhost); eh->ether_type = htons(ETHERTYPE_IP); - - pkt.len += (unsigned int)sizeof (struct kdp_ether_header); - + + pkt.len += (unsigned int)sizeof(struct kdp_ether_header); + // save reply for possible retransmission assert(pkt.len <= KDP_MAXPACKET); - if (!sideband) + if (!sideband) { bcopy((char *)&pkt, (char *)&saved_reply, sizeof(saved_reply)); + } kdp_send_data(&pkt.data[pkt.off], pkt.len); // increment expected sequence number - if (!sideband) + if (!sideband) { exception_seq++; + } } static void kdp_send( - unsigned short remote_port -) + unsigned short remote_port + ) { - struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; - struct kdp_ip aligned_ip, *ip = &aligned_ip; - struct kdp_ether_header *eh; + struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; + struct kdp_ip aligned_ip, *ip = &aligned_ip; + struct kdp_ether_header *eh; - if (pkt.input) + if (pkt.input) { kdp_panic("kdp_send"); + } - pkt.off -= (unsigned int)sizeof (struct kdp_udpiphdr); + pkt.off -= (unsigned int)sizeof(struct kdp_udpiphdr); #if DO_ALIGN bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); @@ -621,7 +645,7 @@ kdp_send( ui->ui_next = ui->ui_prev = 0; ui->ui_x1 = 0; ui->ui_pr = IPPROTO_UDP; - ui->ui_len = htons((u_short)pkt.len + sizeof (struct kdp_udphdr)); + ui->ui_len = htons((u_short)pkt.len + sizeof(struct kdp_udphdr)); ui->ui_src = adr.loc.in; ui->ui_dst = adr.rmt.in; ui->ui_sport = htons(KDP_REMOTE_PORT); @@ -634,10 +658,10 @@ kdp_send( #else ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - ip->ip_len = htons(sizeof (struct kdp_udpiphdr) + pkt.len); + ip->ip_len = htons(sizeof(struct kdp_udpiphdr) + pkt.len); ip->ip_v = IPVERSION; ip->ip_id = htons(ip_id++); - ip->ip_hl = sizeof (struct kdp_ip) >> 2; + ip->ip_hl = sizeof(struct kdp_ip) >> 2; ip->ip_ttl = udp_ttl; ip->ip_sum = 0; ip->ip_sum = htons(~ip_sum((unsigned char *)ip, ip->ip_hl)); @@ -645,16 +669,16 @@ kdp_send( bcopy((char *)ip, (char *)&pkt.data[pkt.off], sizeof(*ip)); #endif - pkt.len += (unsigned int)sizeof (struct kdp_udpiphdr); + pkt.len += (unsigned int)sizeof(struct kdp_udpiphdr); - pkt.off -= (unsigned int)sizeof (struct kdp_ether_header); + pkt.off -= (unsigned int)sizeof(struct kdp_ether_header); eh = (struct kdp_ether_header *)&pkt.data[pkt.off]; enaddr_copy(&adr.loc.ea, eh->ether_shost); enaddr_copy(&adr.rmt.ea, eh->ether_dhost); eh->ether_type = htons(ETHERTYPE_IP); - pkt.len += (unsigned int)sizeof (struct kdp_ether_header); + pkt.len += (unsigned int)sizeof(struct kdp_ether_header); kdp_send_data(&pkt.data[pkt.off], pkt.len); } @@ -664,58 +688,62 @@ debugger_if_necessary(void) { if ((current_debugger == KDP_CUR_DB) && halt_in_debugger) { kdp_call(); - halt_in_debugger=0; + halt_in_debugger = 0; } } /* We don't interpret this pointer, we just give it to the bsd stack - so it can decide when to set the MAC and IP info. We'll - early initialize the MAC/IP info if we can so that we can use - KDP early in boot. These values may subsequently get over-written - when the interface gets initialized for real. -*/ + * so it can decide when to set the MAC and IP info. We'll + * early initialize the MAC/IP info if we can so that we can use + * KDP early in boot. These values may subsequently get over-written + * when the interface gets initialized for real. + */ void kdp_set_interface(void *ifp, const struct kdp_ether_addr *macaddr) { char kdpstr[80]; - struct kdp_in_addr addr = { 0 }; - unsigned int len; - + struct kdp_in_addr addr = { 0 }; + unsigned int len; + kdp_current_ifp = ifp; - if (PE_parse_boot_argn("kdp_ip_addr", kdpstr, sizeof(kdpstr))) { - /* look for a static ip address */ - if (inet_aton(kdpstr, &addr) == FALSE) - goto done; + if (PE_parse_boot_argn("kdp_ip_addr", kdpstr, sizeof(kdpstr))) { + /* look for a static ip address */ + if (inet_aton(kdpstr, &addr) == FALSE) { + goto done; + } - goto config_network; - } + goto config_network; + } - /* use saved ip address */ - save_ip_in_nvram = TRUE; + /* use saved ip address */ + save_ip_in_nvram = TRUE; - len = sizeof(kdpstr); - if (PEReadNVRAMProperty("_kdp_ipstr", kdpstr, &len) == FALSE) - goto done; + len = sizeof(kdpstr); + if (PEReadNVRAMProperty("_kdp_ipstr", kdpstr, &len) == FALSE) { + goto done; + } - kdpstr[len < sizeof(kdpstr) ? len : sizeof(kdpstr) - 1] = '\0'; - if (inet_aton(kdpstr, &addr) == FALSE) - goto done; + kdpstr[len < sizeof(kdpstr) ? len : sizeof(kdpstr) - 1] = '\0'; + if (inet_aton(kdpstr, &addr) == FALSE) { + goto done; + } config_network: - kdp_current_ip_address = addr.s_addr; - if (macaddr) - kdp_current_mac_address = *macaddr; - - /* we can't drop into the debugger at this point because the - link will likely not be up. when getDebuggerLinkStatus() support gets - added to the appropriate network drivers, adding the - following will enable this capability: - debugger_if_necessary(); - */ + kdp_current_ip_address = addr.s_addr; + if (macaddr) { + kdp_current_mac_address = *macaddr; + } + + /* we can't drop into the debugger at this point because the + * link will likely not be up. when getDebuggerLinkStatus() support gets + * added to the appropriate network drivers, adding the + * following will enable this capability: + * debugger_if_necessary(); + */ done: - return; + return; } void * @@ -724,18 +752,19 @@ kdp_get_interface(void) return kdp_current_ifp; } -void +void kdp_set_ip_and_mac_addresses( - struct kdp_in_addr *ipaddr, - struct kdp_ether_addr *macaddr) + struct kdp_in_addr *ipaddr, + struct kdp_ether_addr *macaddr) { static uint64_t last_time = (uint64_t) -1; static uint64_t throttle_val = 0; uint64_t cur_time; char addr[16]; - if (kdp_current_ip_address == ipaddr->s_addr) + if (kdp_current_ip_address == ipaddr->s_addr) { goto done; + } /* don't replace if serial debugging is configured */ if (!KDP_SERIAL_ENABLED() || @@ -744,21 +773,24 @@ kdp_set_ip_and_mac_addresses( kdp_current_ip_address = ipaddr->s_addr; } - if (save_ip_in_nvram == FALSE) + if (save_ip_in_nvram == FALSE) { goto done; + } - if (inet_ntoa_r(*ipaddr, addr, sizeof(addr)) == NULL) + if (inet_ntoa_r(*ipaddr, addr, sizeof(addr)) == NULL) { goto done; + } /* throttle writes if needed */ - if (!throttle_val) + if (!throttle_val) { nanoseconds_to_absolutetime(KDP_THROTTLE_VALUE, &throttle_val); + } cur_time = mach_absolute_time(); if (last_time == (uint64_t) -1 || ((cur_time - last_time) > throttle_val)) { PEWriteNVRAMProperty("_kdp_ipstr", addr, - (const unsigned int) strlen(addr)); + (const unsigned int) strlen(addr)); } last_time = cur_time; @@ -771,15 +803,15 @@ kdp_set_gateway_mac(void *gatewaymac) { router_mac = *(struct kdp_ether_addr *)gatewaymac; flag_router_mac_initialized = TRUE; -} +} -struct kdp_ether_addr +struct kdp_ether_addr kdp_get_mac_addr(void) { return kdp_current_mac_address; } -unsigned int +unsigned int kdp_get_ip_address(void) { return (unsigned int)kdp_current_ip_address; @@ -794,13 +826,13 @@ kdp_disable_arp(void) static void kdp_arp_dispatch(void) { - struct kdp_ether_arp aligned_ea, *ea = &aligned_ea; - unsigned arp_header_offset; + struct kdp_ether_arp aligned_ea, *ea = &aligned_ea; + unsigned arp_header_offset; arp_header_offset = (unsigned)sizeof(struct kdp_ether_header) + pkt.off; memcpy((void *)ea, (void *)&pkt.data[arp_header_offset], sizeof(*ea)); - switch(ntohs(ea->arp_op)) { + switch (ntohs(ea->arp_op)) { case ARPOP_REQUEST: kdp_arp_reply(ea); break; @@ -816,12 +848,14 @@ static void kdp_process_arp_reply(struct kdp_ether_arp *ea) { /* Are we interested in ARP replies? */ - if (flag_arp_resolved == TRUE) + if (flag_arp_resolved == TRUE) { return; + } /* Did we receive a reply from the right source? */ - if (((struct kdp_in_addr *)(ea->arp_spa))->s_addr != target_ip) - return; + if (((struct kdp_in_addr *)(ea->arp_spa))->s_addr != target_ip) { + return; + } flag_arp_resolved = TRUE; current_resolved_MAC = *(struct kdp_ether_addr *) (ea->arp_sha); @@ -833,35 +867,37 @@ kdp_process_arp_reply(struct kdp_ether_arp *ea) * is set. */ -static void +static void kdp_arp_reply(struct kdp_ether_arp *ea) { - struct kdp_ether_header *eh; + struct kdp_ether_header *eh; - struct kdp_in_addr isaddr, itaddr, myaddr; - struct kdp_ether_addr my_enaddr; + struct kdp_in_addr isaddr, itaddr, myaddr; + struct kdp_ether_addr my_enaddr; eh = (struct kdp_ether_header *)&pkt.data[pkt.off]; pkt.off += (unsigned int)sizeof(struct kdp_ether_header); - if(ntohs(ea->arp_op) != ARPOP_REQUEST) - return; + if (ntohs(ea->arp_op) != ARPOP_REQUEST) { + return; + } myaddr.s_addr = kdp_get_ip_address(); my_enaddr = kdp_get_mac_addr(); if ((ntohl(myaddr.s_addr) == 0) || ((my_enaddr.ether_addr_octet[0] & 0xff) == 0 - && (my_enaddr.ether_addr_octet[1] & 0xff) == 0 - && (my_enaddr.ether_addr_octet[2] & 0xff) == 0 - && (my_enaddr.ether_addr_octet[3] & 0xff) == 0 - && (my_enaddr.ether_addr_octet[4] & 0xff) == 0 - && (my_enaddr.ether_addr_octet[5] & 0xff) == 0 - )) + && (my_enaddr.ether_addr_octet[1] & 0xff) == 0 + && (my_enaddr.ether_addr_octet[2] & 0xff) == 0 + && (my_enaddr.ether_addr_octet[3] & 0xff) == 0 + && (my_enaddr.ether_addr_octet[4] & 0xff) == 0 + && (my_enaddr.ether_addr_octet[5] & 0xff) == 0 + )) { return; + } - (void)memcpy((void *)&isaddr, (void *)ea->arp_spa, sizeof (isaddr)); - (void)memcpy((void *)&itaddr, (void *)ea->arp_tpa, sizeof (itaddr)); + (void)memcpy((void *)&isaddr, (void *)ea->arp_spa, sizeof(isaddr)); + (void)memcpy((void *)&itaddr, (void *)ea->arp_tpa, sizeof(itaddr)); if (itaddr.s_addr == myaddr.s_addr) { (void)memcpy((void *)ea->arp_tha, (void *)ea->arp_sha, sizeof(ea->arp_sha)); @@ -871,12 +907,12 @@ kdp_arp_reply(struct kdp_ether_arp *ea) (void)memcpy((void *)ea->arp_spa, (void *) &itaddr, sizeof(ea->arp_spa)); ea->arp_op = htons(ARPOP_REPLY); - ea->arp_pro = htons(ETHERTYPE_IP); + ea->arp_pro = htons(ETHERTYPE_IP); (void)memcpy(eh->ether_dhost, ea->arp_tha, sizeof(eh->ether_dhost)); (void)memcpy(eh->ether_shost, &my_enaddr, sizeof(eh->ether_shost)); eh->ether_type = htons(ETHERTYPE_ARP); (void)memcpy(&pkt.data[pkt.off], ea, sizeof(*ea)); - pkt.off -= (unsigned int)sizeof (struct kdp_ether_header); + pkt.off -= (unsigned int)sizeof(struct kdp_ether_header); /* pkt.len is still the length we want, ether_header+ether_arp */ kdp_send_data(&pkt.data[pkt.off], pkt.len); } @@ -885,16 +921,17 @@ kdp_arp_reply(struct kdp_ether_arp *ea) static void kdp_poll(void) { - struct kdp_ether_header *eh = NULL; - struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; - struct kdp_ip aligned_ip, *ip = &aligned_ip; - static int msg_printed; + struct kdp_ether_header *eh = NULL; + struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; + struct kdp_ip aligned_ip, *ip = &aligned_ip; + static int msg_printed; - if (pkt.input) + if (pkt.input) { kdp_panic("kdp_poll"); - + } + if (!kdp_en_recv_pkt || !kdp_en_send_pkt) { - if( msg_printed == 0) { + if (msg_printed == 0) { msg_printed = 1; printf("kdp_poll: no debugger device\n"); } @@ -902,14 +939,15 @@ kdp_poll(void) } pkt.off = pkt.len = 0; - kdp_receive_data(pkt.data, &pkt.len, 3/* ms */); + kdp_receive_data(pkt.data, &pkt.len, 3 /* ms */); - if (pkt.len == 0) + if (pkt.len == 0) { return; + } if (pkt.len >= sizeof(struct kdp_ether_header)) { - eh = (struct kdp_ether_header *)&pkt.data[pkt.off]; - + eh = (struct kdp_ether_header *)&pkt.data[pkt.off]; + if (kdp_flag & KDP_ARP) { if (ntohs(eh->ether_type) == ETHERTYPE_ARP) { kdp_arp_dispatch(); @@ -918,10 +956,11 @@ kdp_poll(void) } } - if (pkt.len < (sizeof (struct kdp_ether_header) + sizeof (struct kdp_udpiphdr))) + if (pkt.len < (sizeof(struct kdp_ether_header) + sizeof(struct kdp_udpiphdr))) { return; + } - pkt.off += (unsigned int)sizeof (struct kdp_ether_header); + pkt.off += (unsigned int)sizeof(struct kdp_ether_header); if (ntohs(eh->ether_type) != ETHERTYPE_IP) { return; } @@ -934,34 +973,33 @@ kdp_poll(void) ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - pkt.off += (unsigned int)sizeof (struct kdp_udpiphdr); + pkt.off += (unsigned int)sizeof(struct kdp_udpiphdr); if (ui->ui_pr != IPPROTO_UDP) { return; } - - if (ip->ip_hl > (sizeof (struct kdp_ip) >> 2)) { + + if (ip->ip_hl > (sizeof(struct kdp_ip) >> 2)) { return; } if (ntohs(ui->ui_dport) != KDP_REMOTE_PORT) { - if (panicd_port == (ntohs(ui->ui_dport)) && + if (panicd_port == (ntohs(ui->ui_dport)) && flag_panic_dump_in_progress) { last_panic_port = ui->ui_sport; - } - else + } else { return; + } } - /* If we receive a kernel debugging packet whilst a - * core dump is in progress, abort the transfer and - * enter the debugger if not told otherwise. + /* If we receive a kernel debugging packet whilst a + * core dump is in progress, abort the transfer and + * enter the debugger if not told otherwise. */ - else - if (flag_panic_dump_in_progress) { - if (!flag_dont_abort_panic_dump) { - abort_panic_transfer(); - } - return; + else if (flag_panic_dump_in_progress) { + if (!flag_dont_abort_panic_dump) { + abort_panic_transfer(); } + return; + } if (!kdp.is_conn && !flag_panic_dump_in_progress) { enaddr_copy(eh->ether_dhost, &adr.loc.ea); @@ -974,7 +1012,7 @@ kdp_poll(void) /* * Calculate kdp packet length. */ - pkt.len = ntohs((u_short)ui->ui_ulen) - (unsigned int)sizeof (struct kdp_udphdr); + pkt.len = ntohs((u_short)ui->ui_ulen) - (unsigned int)sizeof(struct kdp_udphdr); pkt.input = TRUE; } @@ -986,10 +1024,10 @@ kdp_poll(void) static void transmit_ARP_request(uint32_t ip_addr) { - struct kdp_ether_header *eh = (struct kdp_ether_header *) &pkt.data[0]; - struct kdp_ether_arp *ea = (struct kdp_ether_arp *) &pkt.data[sizeof(struct kdp_ether_header)]; + struct kdp_ether_header *eh = (struct kdp_ether_header *) &pkt.data[0]; + struct kdp_ether_arp *ea = (struct kdp_ether_arp *) &pkt.data[sizeof(struct kdp_ether_header)]; - KDP_DEBUG("Transmitting ARP request\n"); + KDP_DEBUG("Transmitting ARP request\n"); /* Populate the ether_header */ eh->ether_type = htons(ETHERTYPE_ARP); enaddr_copy(&kdp_current_mac_address, eh->ether_shost); @@ -1048,7 +1086,7 @@ TRANSMIT_RETRY: *resolved_MAC = current_resolved_MAC; return TRUE; } - + if (!flag_panic_dump_in_progress || pkt.input) { /* we received a debugging packet, bail*/ printf("Received a debugger packet,transferring control to debugger\n"); /* Indicate that we should wait in the debugger when we return */ @@ -1066,17 +1104,18 @@ TRANSMIT_RETRY: static void kdp_handler( - void *saved_state -) + void *saved_state + ) { - unsigned short reply_port; - kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; + unsigned short reply_port; + kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; kdp.saved_state = saved_state; // see comment in kdp_raise_exception do { - while (!pkt.input) + while (!pkt.input) { kdp_poll(); + } #if DO_ALIGN bcopy((char *)&pkt.data[pkt.off], (char *)hdr, sizeof(*hdr)); @@ -1089,24 +1128,25 @@ kdp_handler( goto again; } - if (hdr->request == KDP_REATTACH) + if (hdr->request == KDP_REATTACH) { exception_seq = hdr->seq; + } // check for retransmitted request if (hdr->seq == (exception_seq - 1)) { /* retransmit last reply */ kdp_send_data(&saved_reply.data[saved_reply.off], - saved_reply.len); + saved_reply.len); goto again; } else if ((hdr->seq != exception_seq) && - (hdr->request != KDP_CONNECT)) { + (hdr->request != KDP_CONNECT)) { printf("kdp: bad sequence %d (want %d)\n", - hdr->seq, exception_seq); + hdr->seq, exception_seq); goto again; } /* This is a manual side-channel to the main KDP protocol. - * A client like GDB/kgmacros can manually construct + * A client like GDB/kgmacros can manually construct * a request, set the input flag, issue a dummy KDP request, * and then manually collect the result */ @@ -1117,24 +1157,24 @@ kdp_handler( /* process */ int packet_length = manual_pkt.len; kdp_packet((unsigned char *)&manual_pkt.data, - &packet_length, - &manual_port_unused); + &packet_length, + &manual_port_unused); manual_pkt.len = packet_length; } manual_pkt.input = 0; } if (kdp_packet((unsigned char*)&pkt.data[pkt.off], - (int *)&pkt.len, - (unsigned short *)&reply_port)) { + (int *)&pkt.len, + (unsigned short *)&reply_port)) { boolean_t sideband = FALSE; /* if it's an already connected error message, - * send a sideband reply for that. for successful connects, - * make sure the sequence number is correct. */ + * send a sideband reply for that. for successful connects, + * make sure the sequence number is correct. */ if (hdr->request == KDP_CONNECT) { kdp_connect_reply_t *rp = - (kdp_connect_reply_t *) &pkt.data[pkt.off]; + (kdp_connect_reply_t *) &pkt.data[pkt.off]; kdp_error_t err = rp->error; if (err == KDPERR_NO_ERROR) { @@ -1155,9 +1195,9 @@ again: static void kdp_connection_wait(void) { - unsigned short reply_port; - struct kdp_ether_addr kdp_mac_addr = kdp_get_mac_addr(); - unsigned int ip_addr = ntohl(kdp_get_ip_address()); + unsigned short reply_port; + struct kdp_ether_addr kdp_mac_addr = kdp_get_mac_addr(); + unsigned int ip_addr = ntohl(kdp_get_ip_address()); /* * Do both a printf() and a kprintf() of the MAC and IP so that @@ -1170,34 +1210,34 @@ kdp_connection_wait(void) kprintf("Using serial KDP.\n"); } else { printf("ethernet MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", - kdp_mac_addr.ether_addr_octet[0] & 0xff, - kdp_mac_addr.ether_addr_octet[1] & 0xff, - kdp_mac_addr.ether_addr_octet[2] & 0xff, - kdp_mac_addr.ether_addr_octet[3] & 0xff, - kdp_mac_addr.ether_addr_octet[4] & 0xff, - kdp_mac_addr.ether_addr_octet[5] & 0xff); + kdp_mac_addr.ether_addr_octet[0] & 0xff, + kdp_mac_addr.ether_addr_octet[1] & 0xff, + kdp_mac_addr.ether_addr_octet[2] & 0xff, + kdp_mac_addr.ether_addr_octet[3] & 0xff, + kdp_mac_addr.ether_addr_octet[4] & 0xff, + kdp_mac_addr.ether_addr_octet[5] & 0xff); kprintf("ethernet MAC address: %02x:%02x:%02x:%02x:%02x:%02x\n", - kdp_mac_addr.ether_addr_octet[0] & 0xff, - kdp_mac_addr.ether_addr_octet[1] & 0xff, - kdp_mac_addr.ether_addr_octet[2] & 0xff, - kdp_mac_addr.ether_addr_octet[3] & 0xff, - kdp_mac_addr.ether_addr_octet[4] & 0xff, - kdp_mac_addr.ether_addr_octet[5] & 0xff); + kdp_mac_addr.ether_addr_octet[0] & 0xff, + kdp_mac_addr.ether_addr_octet[1] & 0xff, + kdp_mac_addr.ether_addr_octet[2] & 0xff, + kdp_mac_addr.ether_addr_octet[3] & 0xff, + kdp_mac_addr.ether_addr_octet[4] & 0xff, + kdp_mac_addr.ether_addr_octet[5] & 0xff); printf("ip address: %d.%d.%d.%d\n", - (ip_addr & 0xff000000) >> 24, - (ip_addr & 0xff0000) >> 16, - (ip_addr & 0xff00) >> 8, - (ip_addr & 0xff)); + (ip_addr & 0xff000000) >> 24, + (ip_addr & 0xff0000) >> 16, + (ip_addr & 0xff00) >> 8, + (ip_addr & 0xff)); kprintf("ip address: %d.%d.%d.%d\n", - (ip_addr & 0xff000000) >> 24, - (ip_addr & 0xff0000) >> 16, - (ip_addr & 0xff00) >> 8, - (ip_addr & 0xff)); + (ip_addr & 0xff000000) >> 24, + (ip_addr & 0xff0000) >> 16, + (ip_addr & 0xff00) >> 8, + (ip_addr & 0xff)); } - + printf("\nWaiting for remote debugger connection.\n"); kprintf("\nWaiting for remote debugger connection.\n"); @@ -1206,7 +1246,7 @@ kdp_connection_wait(void) #endif if (reattach_wait == 0) { - if((kdp_flag & KDP_GETC_ENA) && (0 != kdp_getc())) { + if ((kdp_flag & KDP_GETC_ENA) && (0 != kdp_getc())) { printf("Options..... Type\n"); printf("------------ ----\n"); printf("continue.... 'c'\n"); @@ -1215,15 +1255,15 @@ kdp_connection_wait(void) } else { reattach_wait = 0; } - + exception_seq = 0; do { kdp_hdr_t aligned_hdr, *hdr = &aligned_hdr; - + while (!pkt.input) { if (kdp_flag & KDP_GETC_ENA) { - switch(kdp_getc()) { + switch (kdp_getc()) { case 'c': printf("Continuing...\n"); return; @@ -1233,7 +1273,7 @@ kdp_connection_wait(void) break; default: break; - } + } } kdp_poll(); } @@ -1248,46 +1288,48 @@ kdp_connection_wait(void) /* should not return! */ } if (((hdr->request == KDP_CONNECT) || (hdr->request == KDP_REATTACH)) && - !hdr->is_reply && (hdr->seq == exception_seq)) { - if (kdp_packet((unsigned char *)&pkt.data[pkt.off], - (int *)&pkt.len, - (unsigned short *)&reply_port)) - kdp_reply(reply_port, FALSE); - if (hdr->request == KDP_REATTACH) { + !hdr->is_reply && (hdr->seq == exception_seq)) { + if (kdp_packet((unsigned char *)&pkt.data[pkt.off], + (int *)&pkt.len, + (unsigned short *)&reply_port)) { + kdp_reply(reply_port, FALSE); + } + if (hdr->request == KDP_REATTACH) { reattach_wait = 0; - hdr->request=KDP_DISCONNECT; + hdr->request = KDP_DISCONNECT; exception_seq = 0; } } pkt.input = FALSE; } while (!kdp.is_conn); - - if (current_debugger == KDP_CUR_DB) - active_debugger=1; + + if (current_debugger == KDP_CUR_DB) { + active_debugger = 1; + } printf("Connected to remote debugger.\n"); kprintf("Connected to remote debugger.\n"); } static void kdp_send_exception( - unsigned int exception, - unsigned int code, - unsigned int subcode -) + unsigned int exception, + unsigned int code, + unsigned int subcode + ) { - unsigned short remote_port; - unsigned int timeout_count = 100; - unsigned int poll_timeout; + unsigned short remote_port; + unsigned int timeout_count = 100; + unsigned int poll_timeout; do { - pkt.off = sizeof (struct kdp_ether_header) + sizeof (struct kdp_udpiphdr); + pkt.off = sizeof(struct kdp_ether_header) + sizeof(struct kdp_udpiphdr); kdp_exception((unsigned char *)&pkt.data[pkt.off], - (int *)&pkt.len, - (unsigned short *)&remote_port, - (unsigned int)exception, - (unsigned int)code, - (unsigned int)subcode); + (int *)&pkt.len, + (unsigned short *)&remote_port, + (unsigned int)exception, + (unsigned int)code, + (unsigned int)subcode); kdp_send(remote_port); @@ -1305,31 +1347,33 @@ kdp_send_exception( pkt.input = FALSE; - if (kdp.exception_ack_needed) + if (kdp.exception_ack_needed) { kdp_us_spin(250000); - + } } while (kdp.exception_ack_needed && timeout_count--); if (kdp.exception_ack_needed) { // give up & disconnect printf("kdp: exception ack timeout\n"); - if (current_debugger == KDP_CUR_DB) - active_debugger=0; + if (current_debugger == KDP_CUR_DB) { + active_debugger = 0; + } kdp_reset(); } } -static void +static void kdp_debugger_loop( - unsigned int exception, - unsigned int code, - unsigned int subcode, - void *saved_state) + unsigned int exception, + unsigned int code, + unsigned int subcode, + void *saved_state) { - int index; + int index; - if (saved_state == 0) + if (saved_state == 0) { printf("kdp_raise_exception with NULL state\n"); + } index = exception; if (exception != EXC_BREAKPOINT) { @@ -1337,8 +1381,8 @@ kdp_debugger_loop( index = 0; } printf("%s exception (%x,%x,%x)\n", - exception_message[index], - exception, code, subcode); + exception_message[index], + exception, code, subcode); } kdp_sync_cache(); @@ -1350,29 +1394,32 @@ kdp_debugger_loop( kdp.kdp_cpu = cpu_number(); kdp.kdp_thread = current_thread(); - if (kdp_en_setmode) + if (kdp_en_setmode) { (*kdp_en_setmode)(TRUE); /* enabling link mode */ - - if (pkt.input) + } + if (pkt.input) { kdp_panic("kdp_raise_exception"); + } if (((kdp_flag & KDP_PANIC_DUMP_ENABLED) - || (kdp_flag & PANIC_LOG_DUMP)) + || (kdp_flag & PANIC_LOG_DUMP)) && panic_active()) { kdp_panic_dump(); - if (kdp_flag & REBOOT_POST_CORE && dumped_kernel_core()) + if (kdp_flag & REBOOT_POST_CORE && dumped_kernel_core()) { kdp_machine_reboot(); + } } else { if ((kdp_flag & PANIC_CORE_ON_NMI) && !panic_active() - && !kdp.is_conn) { - + && !kdp.is_conn) { disableConsoleOutput = FALSE; kdp_panic_dump(); - if (kdp_flag & REBOOT_POST_CORE && dumped_kernel_core()) + if (kdp_flag & REBOOT_POST_CORE && dumped_kernel_core()) { kdp_machine_reboot(); + } - if (!(kdp_flag & DBG_POST_CORE)) + if (!(kdp_flag & DBG_POST_CORE)) { goto exit_debugger_loop; + } } } @@ -1389,10 +1436,9 @@ again: } if (kdp.is_conn) { - kdp.is_halted = TRUE; /* XXX */ + kdp.is_halted = TRUE; /* XXX */ kdp_handler(saved_state); - if (!kdp.is_conn) - { + if (!kdp.is_conn) { kdp_remove_all_breakpoints(); printf("Remote debugger disconnected.\n"); } @@ -1405,8 +1451,9 @@ again: if (1 == kdp_trigger_core_dump) { kdp_flag |= KDP_PANIC_DUMP_ENABLED; kdp_panic_dump(); - if (kdp_flag & REBOOT_POST_CORE && dumped_kernel_core()) + if (kdp_flag & REBOOT_POST_CORE && dumped_kernel_core()) { kdp_machine_reboot(); + } kdp_trigger_core_dump = 0; } @@ -1429,12 +1476,14 @@ again: kdp_sync_cache(); - if (reattach_wait == 1) + if (reattach_wait == 1) { goto again; + } exit_debugger_loop: - if (kdp_en_setmode) + if (kdp_en_setmode) { (*kdp_en_setmode)(FALSE); /* link cleanup */ + } } void @@ -1449,21 +1498,21 @@ kdp_reset(void) } struct corehdr * -create_panic_header(unsigned int request, const char *corename, - unsigned length, unsigned int block) +create_panic_header(unsigned int request, const char *corename, + unsigned length, unsigned int block) { - struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; - struct kdp_ip aligned_ip, *ip = &aligned_ip; - struct kdp_ether_header *eh; - struct corehdr *coreh; - const char *mode = "octet"; - char modelen = strlen(mode) + 1; + struct kdp_udpiphdr aligned_ui, *ui = &aligned_ui; + struct kdp_ip aligned_ip, *ip = &aligned_ip; + struct kdp_ether_header *eh; + struct corehdr *coreh; + const char *mode = "octet"; + char modelen = strlen(mode) + 1; - size_t fmask_size = sizeof(KDP_FEATURE_MASK_STRING) + sizeof(kdp_crashdump_feature_mask); + size_t fmask_size = sizeof(KDP_FEATURE_MASK_STRING) + sizeof(kdp_crashdump_feature_mask); - pkt.off = sizeof (struct kdp_ether_header); - pkt.len = (unsigned int)(length + ((request == KDP_WRQ) ? modelen + fmask_size : 0) + - (corename ? (strlen(corename) + 1 ): 0) + sizeof(struct corehdr)); + pkt.off = sizeof(struct kdp_ether_header); + pkt.len = (unsigned int)(length + ((request == KDP_WRQ) ? modelen + fmask_size : 0) + + (corename ? (strlen(corename) + 1): 0) + sizeof(struct corehdr)); #if DO_ALIGN bcopy((char *)&pkt.data[pkt.off], (char *)ui, sizeof(*ui)); @@ -1473,7 +1522,7 @@ create_panic_header(unsigned int request, const char *corename, ui->ui_next = ui->ui_prev = 0; ui->ui_x1 = 0; ui->ui_pr = IPPROTO_UDP; - ui->ui_len = htons((u_short)pkt.len + sizeof (struct kdp_udphdr)); + ui->ui_len = htons((u_short)pkt.len + sizeof(struct kdp_udphdr)); ui->ui_src.s_addr = (uint32_t)kdp_current_ip_address; /* Already in network byte order via inet_aton() */ ui->ui_dst.s_addr = panic_server_ip; @@ -1487,31 +1536,31 @@ create_panic_header(unsigned int request, const char *corename, #else ip = (struct kdp_ip *)&pkt.data[pkt.off]; #endif - ip->ip_len = htons(sizeof (struct kdp_udpiphdr) + pkt.len); + ip->ip_len = htons(sizeof(struct kdp_udpiphdr) + pkt.len); ip->ip_v = IPVERSION; ip->ip_id = htons(ip_id++); - ip->ip_hl = sizeof (struct kdp_ip) >> 2; + ip->ip_hl = sizeof(struct kdp_ip) >> 2; ip->ip_ttl = udp_ttl; ip->ip_sum = 0; ip->ip_sum = htons(~ip_sum((unsigned char *)ip, ip->ip_hl)); #if DO_ALIGN bcopy((char *)ip, (char *)&pkt.data[pkt.off], sizeof(*ip)); #endif - - pkt.len += (unsigned int)sizeof (struct kdp_udpiphdr); - pkt.off += (unsigned int)sizeof (struct kdp_udpiphdr); - + pkt.len += (unsigned int)sizeof(struct kdp_udpiphdr); + + pkt.off += (unsigned int)sizeof(struct kdp_udpiphdr); + coreh = (struct corehdr *) &pkt.data[pkt.off]; coreh->th_opcode = htons((u_short)request); - + if (request == KDP_WRQ) { char *cp; cp = coreh->th_u.tu_rpl; - cp += strlcpy (cp, corename, KDP_MAXPACKET); + cp += strlcpy(cp, corename, KDP_MAXPACKET); *cp++ = '\0'; - cp += strlcpy (cp, mode, KDP_MAXPACKET - strlen(corename)); + cp += strlcpy(cp, mode, KDP_MAXPACKET - strlen(corename)); *cp++ = '\0'; cp += strlcpy(cp, KDP_FEATURE_MASK_STRING, sizeof(KDP_FEATURE_MASK_STRING)); *cp++ = '\0'; /* Redundant */ @@ -1524,15 +1573,15 @@ create_panic_header(unsigned int request, const char *corename, coreh->th_block = htonl((unsigned int) block); } - pkt.off -= (unsigned int)sizeof (struct kdp_udpiphdr); - pkt.off -= (unsigned int)sizeof (struct kdp_ether_header); + pkt.off -= (unsigned int)sizeof(struct kdp_udpiphdr); + pkt.off -= (unsigned int)sizeof(struct kdp_ether_header); eh = (struct kdp_ether_header *)&pkt.data[pkt.off]; enaddr_copy(&kdp_current_mac_address, eh->ether_shost); enaddr_copy(&destination_mac, eh->ether_dhost); eh->ether_type = htons(ETHERTYPE_IP); - - pkt.len += (unsigned int)sizeof (struct kdp_ether_header); + + pkt.len += (unsigned int)sizeof(struct kdp_ether_header); return coreh; } @@ -1542,18 +1591,18 @@ kdp_send_crashdump_seek(char *corename, uint64_t seek_off) int panic_error; if (kdp_feature_large_crashdumps) { - panic_error = kdp_send_crashdump_pkt(KDP_SEEK, corename, - sizeof(seek_off), - &seek_off); + panic_error = kdp_send_crashdump_pkt(KDP_SEEK, corename, + sizeof(seek_off), + &seek_off); } else { uint32_t off = (uint32_t) seek_off; - panic_error = kdp_send_crashdump_pkt(KDP_SEEK, corename, - sizeof(off), &off); + panic_error = kdp_send_crashdump_pkt(KDP_SEEK, corename, + sizeof(off), &off); } if (panic_error < 0) { - printf ("kdp_send_crashdump_pkt failed with error %d\n", - panic_error); + printf("kdp_send_crashdump_pkt failed with error %d\n", + panic_error); return panic_error; } @@ -1562,7 +1611,7 @@ kdp_send_crashdump_seek(char *corename, uint64_t seek_off) int kdp_send_crashdump_data(unsigned int request, char *corename, - uint64_t length, void * txstart) + uint64_t length, void * txstart) { int panic_error = 0; @@ -1570,12 +1619,14 @@ kdp_send_crashdump_data(unsigned int request, char *corename, uint64_t chunk = MIN(kdp_crashdump_pkt_size, length); panic_error = kdp_send_crashdump_pkt(request, corename, chunk, - txstart); + txstart); if (panic_error < 0) { - printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); + printf("kdp_send_crashdump_pkt failed with error %d\n", panic_error); return panic_error; } - if (!txstart) break; + if (!txstart) { + break; + } txstart = (void *)(((uintptr_t) txstart) + chunk); length -= chunk; } @@ -1585,8 +1636,8 @@ kdp_send_crashdump_data(unsigned int request, char *corename, uint32_t kdp_crashdump_short_pkt; int -kdp_send_crashdump_pkt(unsigned int request, char *corename, - uint64_t length, void *panic_data) +kdp_send_crashdump_pkt(unsigned int request, char *corename, + uint64_t length, void *panic_data) { int poll_count; struct corehdr *th = NULL; @@ -1600,25 +1651,27 @@ kdp_send_crashdump_pkt(unsigned int request, char *corename, tretries = rretries = 0; poll_count = KDP_CRASHDUMP_POLL_COUNT; pkt.off = pkt.len = 0; - if (request == KDP_WRQ) /* longer timeout for initial request */ + if (request == KDP_WRQ) { /* longer timeout for initial request */ poll_count += 1000; + } TRANSMIT_RETRY: tretries++; - if (tretries >=15) { + if (tretries >= 15) { /* The crashdump server is unreachable for some reason. This could be a network * issue or, if we've been especially unfortunate, we've hit Radar 2760413, * which is a long standing problem with the IOKit polled mode network driver * shim which can prevent transmits/receives completely. */ - printf ("Cannot contact panic server, timing out.\n"); - return (-3); + printf("Cannot contact panic server, timing out.\n"); + return -3; } - if (tretries > 2) + if (tretries > 2) { printf("TX retry #%d ", tretries ); - + } + th = create_panic_header(request, corename, (unsigned)length, panic_block); if (request == KDP_DATA) { @@ -1627,8 +1680,8 @@ TRANSMIT_RETRY: * bits aren't confusing. */ if (length < kdp_crashdump_pkt_size) { kdp_crashdump_short_pkt++; - memset(th->th_data + length, 'Y', - kdp_crashdump_pkt_size - (uint32_t) length); + memset(th->th_data + length, 'Y', + kdp_crashdump_pkt_size - (uint32_t) length); } if (!kdp_machine_vm_read((mach_vm_address_t)(uintptr_t)panic_data, (caddr_t) th->th_data, length)) { @@ -1641,12 +1694,12 @@ TRANSMIT_RETRY: } } } - } - else if (request == KDP_SEEK) { - if (kdp_feature_large_crashdumps) + } else if (request == KDP_SEEK) { + if (kdp_feature_large_crashdumps) { *(uint64_t *) th->th_data = OSSwapHostToBigInt64((*(uint64_t *) panic_data)); - else + } else { *(unsigned int *) th->th_data = htonl(*(unsigned int *) panic_data); + } } kdp_send_data(&pkt.data[pkt.off], pkt.len); @@ -1660,17 +1713,16 @@ RECEIVE_RETRY: if (pkt.input) { pkt.input = FALSE; - + th = (struct corehdr *) &pkt.data[pkt.off]; if (request == KDP_WRQ) { uint16_t opcode64 = ntohs(th->th_opcode); - uint16_t features64 = (opcode64 & 0xFF00)>>8; + uint16_t features64 = (opcode64 & 0xFF00) >> 8; if ((opcode64 & 0xFF) == KDP_ACK) { kdp_feature_large_crashdumps = features64 & KDP_FEATURE_LARGE_CRASHDUMPS; if (features64 & KDP_FEATURE_LARGE_PKT_SIZE) { kdp_feature_large_pkt_size = 1; - } - else { + } else { kdp_feature_large_pkt_size = 0; kdp_crashdump_pkt_size = 512; } @@ -1686,24 +1738,25 @@ RECEIVE_RETRY: goto TRANSMIT_RETRY; } else if (ntohl(th->th_block) == (panic_block - 1)) { printf("RX retry "); - if (++rretries > 1) + if (++rretries > 1) { goto TRANSMIT_RETRY; - else + } else { goto RECEIVE_RETRY; + } } } } else if (!flag_panic_dump_in_progress) { /* we received a debugging packet, bail*/ - printf("Received a debugger packet,transferring control to debugger\n"); - /* Configure that if not set ..*/ - kdp_flag |= DBG_POST_CORE; - return (-2); - } else { /* We timed out */ - if (0 == poll_count) { - poll_count = 1000; - kdp_us_spin ((tretries%4) * panic_timeout); /* capped linear backoff */ - goto TRANSMIT_RETRY; - } + printf("Received a debugger packet,transferring control to debugger\n"); + /* Configure that if not set ..*/ + kdp_flag |= DBG_POST_CORE; + return -2; + } else { /* We timed out */ + if (0 == poll_count) { + poll_count = 1000; + kdp_us_spin((tretries % 4) * panic_timeout); /* capped linear backoff */ + goto TRANSMIT_RETRY; } + } if (!(++panic_block % SBLOCKSZ)) { uint64_t ctime; @@ -1711,10 +1764,12 @@ RECEIVE_RETRY: ctime = mach_absolute_time(); kdp_superblock_dump_time = ctime - kdp_superblock_dump_start_time; kdp_superblock_dump_start_time = ctime; - if (kdp_superblock_dump_time > kdp_max_superblock_dump_time) + if (kdp_superblock_dump_time > kdp_max_superblock_dump_time) { kdp_max_superblock_dump_time = kdp_superblock_dump_time; - if (kdp_superblock_dump_time < kdp_min_superblock_dump_time) + } + if (kdp_superblock_dump_time < kdp_min_superblock_dump_time) { kdp_min_superblock_dump_time = kdp_superblock_dump_time; + } } if (request == KDP_EOF) { @@ -1726,10 +1781,10 @@ RECEIVE_RETRY: return KERN_SUCCESS; } -static int -isdigit (char c) +static int +isdigit(char c) { - return ((c > 47) && (c < 58)); + return (c > 47) && (c < 58); } /* Horrid hack to extract xnu version if possible - a much cleaner approach @@ -1741,7 +1796,7 @@ isdigit (char c) /* 2006: Incorporated a change from Darwin user P. Lovell to extract * the minor kernel version numbers from the version string. */ -static int +static int kdp_get_xnu_version(char *versionbuf) { char *versionpos; @@ -1755,14 +1810,16 @@ kdp_get_xnu_version(char *versionbuf) versionpos = strnstr(versionbuf, "xnu-", 115); if (versionpos) { strncpy(vstr, versionpos, sizeof(vstr)); - vstr[sizeof(vstr)-1] = '\0'; + vstr[sizeof(vstr) - 1] = '\0'; vptr = vstr + 4; /* Begin after "xnu-" */ - while (*vptr && (isdigit(*vptr) || *vptr == '.')) + while (*vptr && (isdigit(*vptr) || *vptr == '.')) { vptr++; + } *vptr = '\0'; /* Remove trailing period, if any */ - if (*(--vptr) == '.') + if (*(--vptr) == '.') { *vptr = '\0'; + } retval = 0; } } @@ -1771,9 +1828,9 @@ kdp_get_xnu_version(char *versionbuf) } void -kdp_set_dump_info(const uint32_t flags, const char *filename, - const char *destipstr, const char *routeripstr, - const uint32_t port) +kdp_set_dump_info(const uint32_t flags, const char *filename, + const char *destipstr, const char *routeripstr, + const uint32_t port) { uint32_t cmd; @@ -1794,41 +1851,46 @@ kdp_set_dump_info(const uint32_t flags, const char *filename, corename_specified = FALSE; } - if (port) + if (port) { panicd_port = port; + } - /* on a disconnect, should we stay in KDP or not? */ - noresume_on_disconnect = (flags & KDP_DUMPINFO_NORESUME) ? 1 : 0; + /* on a disconnect, should we stay in KDP or not? */ + noresume_on_disconnect = (flags & KDP_DUMPINFO_NORESUME) ? 1 : 0; - if ((flags & KDP_DUMPINFO_DUMP) == 0) + if ((flags & KDP_DUMPINFO_DUMP) == 0) { return; + } /* the rest of the commands can modify kdp_flags */ cmd = flags & KDP_DUMPINFO_MASK; - if (cmd == KDP_DUMPINFO_DISABLE) { + if (cmd == KDP_DUMPINFO_DISABLE) { kdp_flag &= ~KDP_PANIC_DUMP_ENABLED; panicd_specified = 0; kdp_trigger_core_dump = 0; return; - } + } kdp_flag &= ~REBOOT_POST_CORE; - if (flags & KDP_DUMPINFO_REBOOT) - kdp_flag |= REBOOT_POST_CORE; + if (flags & KDP_DUMPINFO_REBOOT) { + kdp_flag |= REBOOT_POST_CORE; + } kdp_flag &= ~PANIC_LOG_DUMP; - if (cmd == KDP_DUMPINFO_PANICLOG) - kdp_flag |= PANIC_LOG_DUMP; - + if (cmd == KDP_DUMPINFO_PANICLOG) { + kdp_flag |= PANIC_LOG_DUMP; + } + kdp_flag &= ~SYSTEM_LOG_DUMP; - if (cmd == KDP_DUMPINFO_SYSTEMLOG) - kdp_flag |= SYSTEM_LOG_DUMP; + if (cmd == KDP_DUMPINFO_SYSTEMLOG) { + kdp_flag |= SYSTEM_LOG_DUMP; + } /* trigger a dump */ kdp_flag |= DBG_POST_CORE; - flag_dont_abort_panic_dump = (flags & KDP_DUMPINFO_NOINTR) ? - TRUE : FALSE; + flag_dont_abort_panic_dump = (flags & KDP_DUMPINFO_NOINTR) ? + TRUE : FALSE; reattach_wait = 1; disableConsoleOutput = 0; @@ -1838,41 +1900,46 @@ kdp_set_dump_info(const uint32_t flags, const char *filename, void kdp_get_dump_info(kdp_dumpinfo_reply_t *rp) { - if (panicd_specified) + if (panicd_specified) { strlcpy(rp->destip, panicd_ip_str, sizeof(rp->destip)); - else + } else { rp->destip[0] = '\0'; + } - if (router_specified) + if (router_specified) { strlcpy(rp->routerip, router_ip_str, sizeof(rp->routerip)); - else + } else { rp->routerip[0] = '\0'; + } - if (corename_specified) + if (corename_specified) { strlcpy(rp->name, corename_str, sizeof(rp->name)); - else + } else { rp->name[0] = '\0'; + } rp->port = panicd_port; rp->type = 0; - if (!panicd_specified) + if (!panicd_specified) { rp->type |= KDP_DUMPINFO_DISABLE; - else if (kdp_flag & PANIC_LOG_DUMP) + } else if (kdp_flag & PANIC_LOG_DUMP) { rp->type |= KDP_DUMPINFO_PANICLOG; - else + } else { rp->type |= KDP_DUMPINFO_CORE; + } - if (noresume_on_disconnect) + if (noresume_on_disconnect) { rp->type |= KDP_DUMPINFO_NORESUME; + } } /* Primary dispatch routine for the system dump */ -void +void kdp_panic_dump(void) { char coreprefix[10]; @@ -1880,13 +1947,13 @@ kdp_panic_dump(void) int panic_error; uint64_t abstime; - uint32_t current_ip = ntohl((uint32_t)kdp_current_ip_address); + uint32_t current_ip = ntohl((uint32_t)kdp_current_ip_address); if (flag_panic_dump_in_progress) { kdb_printf("System dump aborted.\n"); goto panic_dump_exit; } - + printf("Entering system dump routine\n"); if (!kdp_en_recv_pkt || !kdp_en_send_pkt) { @@ -1901,35 +1968,38 @@ kdp_panic_dump(void) flag_panic_dump_in_progress = TRUE; - if (pkt.input) + if (pkt.input) { kdp_panic("kdp_panic_dump: unexpected pending input packet"); + } kdp_get_xnu_version((char *) &pkt.data[0]); if (!corename_specified) { coresuffix[0] = 0; /* Panic log bit takes precedence over core dump bit */ - if ((debugger_panic_str != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP)) + if ((debugger_panic_str != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP)) { strlcpy(coreprefix, "paniclog", sizeof(coreprefix)); - else if (kdp_flag & SYSTEM_LOG_DUMP) + } else if (kdp_flag & SYSTEM_LOG_DUMP) { strlcpy(coreprefix, "systemlog", sizeof(coreprefix)); - else { + } else { strlcpy(coreprefix, "core", sizeof(coreprefix)); - if (!kdp_corezip_disabled) strlcpy(coresuffix, ".gz", sizeof(coresuffix)); + if (!kdp_corezip_disabled) { + strlcpy(coresuffix, ".gz", sizeof(coresuffix)); + } } abstime = mach_absolute_time(); pkt.data[20] = '\0'; - snprintf (corename_str, - sizeof(corename_str), - "%s-%s-%d.%d.%d.%d-%x%s", - coreprefix, &pkt.data[0], - (current_ip & 0xff000000) >> 24, - (current_ip & 0xff0000) >> 16, - (current_ip & 0xff00) >> 8, - (current_ip & 0xff), - (unsigned int) (abstime & 0xffffffff), - coresuffix); + snprintf(corename_str, + sizeof(corename_str), + "%s-%s-%d.%d.%d.%d-%x%s", + coreprefix, &pkt.data[0], + (current_ip & 0xff000000) >> 24, + (current_ip & 0xff0000) >> 16, + (current_ip & 0xff00) >> 8, + (current_ip & 0xff), + (unsigned int) (abstime & 0xffffffff), + coresuffix); } if (0 == inet_aton(panicd_ip_str, (struct kdp_in_addr *) &panic_server_ip)) { @@ -1944,7 +2014,9 @@ kdp_panic_dump(void) kdb_printf("Resolved %s's (or proxy's) link level address\n", panicd_ip_str); destination_mac = temp_mac; } else { - if (!flag_panic_dump_in_progress) goto panic_dump_exit; + if (!flag_panic_dump_in_progress) { + goto panic_dump_exit; + } if (router_specified) { if (0 == inet_aton(router_ip_str, (struct kdp_in_addr *) &parsed_router_ip)) { kdb_printf("inet_aton() failed interpreting %s as an IP\n", router_ip_str); @@ -1958,7 +2030,9 @@ kdp_panic_dump(void) } } - if (!flag_panic_dump_in_progress) goto panic_dump_exit; + if (!flag_panic_dump_in_progress) { + goto panic_dump_exit; + } kdb_printf("Transmitting packets to link level address: %02x:%02x:%02x:%02x:%02x:%02x\n", destination_mac.ether_addr_octet[0] & 0xff, @@ -1969,34 +2043,34 @@ kdp_panic_dump(void) destination_mac.ether_addr_octet[5] & 0xff); kdb_printf("Kernel map size is %llu\n", (unsigned long long) get_vmmap_size(kernel_map)); - kdb_printf("Sending write request for %s\n", corename_str); + kdb_printf("Sending write request for %s\n", corename_str); - if ((panic_error = kdp_send_crashdump_pkt(KDP_WRQ, corename_str, 0 , NULL)) < 0) { - kdb_printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error); + if ((panic_error = kdp_send_crashdump_pkt(KDP_WRQ, corename_str, 0, NULL)) < 0) { + kdb_printf("kdp_send_crashdump_pkt failed with error %d\n", panic_error); goto panic_dump_exit; } /* Just the panic log requested */ if ((debugger_panic_str != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP)) { kdb_printf_unbuffered("Transmitting panic log, please wait: "); - kdp_send_crashdump_data(KDP_DATA, corename_str, - debug_buf_ptr - debug_buf_base, - debug_buf_base); - kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0)); + kdp_send_crashdump_data(KDP_DATA, corename_str, + debug_buf_ptr - debug_buf_base, + debug_buf_base); + kdp_send_crashdump_pkt(KDP_EOF, NULL, 0, ((void *) 0)); printf("Please file a bug report on this panic, if possible.\n"); goto panic_dump_exit; } - + /* maybe we wanted the systemlog */ - if (kdp_flag & SYSTEM_LOG_DUMP) { + if (kdp_flag & SYSTEM_LOG_DUMP) { long start_off = msgbufp->msg_bufx; - long len; + long len; kdb_printf_unbuffered("Transmitting system log, please wait: "); if (start_off >= msgbufp->msg_bufr) { len = msgbufp->msg_size - start_off; - kdp_send_crashdump_data(KDP_DATA, corename_str, len, - msgbufp->msg_bufc + start_off); + kdp_send_crashdump_data(KDP_DATA, corename_str, len, + msgbufp->msg_bufc + start_off); /* seek to remove trailing bytes */ kdp_send_crashdump_seek(corename_str, len); start_off = 0; @@ -2005,12 +2079,12 @@ kdp_panic_dump(void) if (start_off != msgbufp->msg_bufr) { len = msgbufp->msg_bufr - start_off; kdp_send_crashdump_data(KDP_DATA, corename_str, len, - msgbufp->msg_bufc + start_off); + msgbufp->msg_bufc + start_off); } - kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0)); + kdp_send_crashdump_pkt(KDP_EOF, NULL, 0, ((void *) 0)); goto panic_dump_exit; - } + } /* We want a core dump if we're here */ kern_dump(KERN_DUMP_NET); @@ -2027,7 +2101,7 @@ begin_panic_transfer(void) flag_panic_dump_in_progress = TRUE; } -void +void abort_panic_transfer(void) { flag_panic_dump_in_progress = FALSE; @@ -2046,21 +2120,21 @@ kdp_serial_send(void *rpkt, unsigned int rpkt_len) kdp_serialize_packet((unsigned char *)rpkt, rpkt_len, pal_serial_putc_nocr); } -static void +static void kdp_serial_receive(void *rpkt, unsigned int *rpkt_len, unsigned int timeout) { int readkar; uint64_t now, deadline; - + clock_interval_to_deadline(timeout, 1000 * 1000 /* milliseconds */, &deadline); // printf("rx\n"); - for(clock_get_uptime(&now); now < deadline; clock_get_uptime(&now)) { + for (clock_get_uptime(&now); now < deadline; clock_get_uptime(&now)) { readkar = pal_serial_getc(); - if(readkar >= 0) { + if (readkar >= 0) { unsigned char *packet; // printf("got char %02x\n", readkar); - if((packet = kdp_unserialize_packet(readkar,rpkt_len))) { + if ((packet = kdp_unserialize_packet(readkar, rpkt_len))) { memcpy(rpkt, packet, *rpkt_len); return; } @@ -2072,11 +2146,13 @@ kdp_serial_receive(void *rpkt, unsigned int *rpkt_len, unsigned int timeout) static boolean_t kdp_serial_setmode(boolean_t active) { - if (active == FALSE) /* leaving KDP */ + if (active == FALSE) { /* leaving KDP */ return TRUE; + } - if (!needs_serial_init) + if (!needs_serial_init) { return TRUE; + } pal_serial_init(); needs_serial_init = FALSE; @@ -2084,7 +2160,7 @@ kdp_serial_setmode(boolean_t active) } -static void +static void kdp_serial_callout(__unused void *arg, kdp_event_t event) { /* @@ -2098,14 +2174,13 @@ kdp_serial_callout(__unused void *arg, kdp_event_t event) * reinitialization. */ - switch (event) - { - case KDP_EVENT_PANICLOG: - case KDP_EVENT_ENTER: - break; - case KDP_EVENT_EXIT: - needs_serial_init = TRUE; - break; + switch (event) { + case KDP_EVENT_PANICLOG: + case KDP_EVENT_ENTER: + break; + case KDP_EVENT_EXIT: + needs_serial_init = TRUE; + break; } } @@ -2131,16 +2206,17 @@ kdp_init(void) #if defined(__x86_64__) || defined(__arm__) || defined(__arm64__) if (vm_kernel_slide) { - char KASLR_stext[19]; + char KASLR_stext[19]; strlcat(kdp_kernelversion_string, "; stext=", sizeof(kdp_kernelversion_string)); snprintf(KASLR_stext, sizeof(KASLR_stext), "%p", (void *) vm_kernel_stext); strlcat(kdp_kernelversion_string, KASLR_stext, sizeof(kdp_kernelversion_string)); } #endif - if (debug_boot_arg & DB_REBOOT_POST_CORE) + if (debug_boot_arg & DB_REBOOT_POST_CORE) { kdp_flag |= REBOOT_POST_CORE; -#if defined(__x86_64__) + } +#if defined(__x86_64__) kdp_machine_init(); #endif @@ -2157,13 +2233,15 @@ kdp_init(void) boolean_t kdp_not_serial = kdp_match_name_found ? (strncmp(kdpname, "serial", sizeof(kdpname))) : TRUE; #if CONFIG_EMBEDDED - //respect any custom debugger boot-args - if(kdp_match_name_found && kdp_not_serial) + //respect any custom debugger boot-args + if (kdp_match_name_found && kdp_not_serial) { return; + } #else /* CONFIG_EMBEDDED */ - // serial must be explicitly requested - if(!kdp_match_name_found || kdp_not_serial) + // serial must be explicitly requested + if (!kdp_match_name_found || kdp_not_serial) { return; + } #endif /* CONFIG_EMBEDDED */ #if CONFIG_EMBEDDED @@ -2177,9 +2255,9 @@ kdp_init(void) kprintf("Initializing serial KDP\n"); kdp_register_callout(kdp_serial_callout, NULL); - kdp_register_link(NULL, kdp_serial_setmode); + kdp_register_link(NULL, kdp_serial_setmode); kdp_register_send_receive(kdp_serial_send, kdp_serial_receive); - + /* fake up an ip and mac for early serial debugging */ macaddr.ether_addr_octet[0] = 's'; macaddr.ether_addr_octet[1] = 'e'; @@ -2189,13 +2267,13 @@ kdp_init(void) macaddr.ether_addr_octet[5] = 'l'; ipaddr.s_addr = KDP_SERIAL_IPADDR; kdp_set_ip_and_mac_addresses(&ipaddr, &macaddr); - + #endif /* CONFIG_SERIAL_KDP */ } #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ void -kdp_init(void) +kdp_init(void) { } #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ @@ -2215,21 +2293,25 @@ uint32_t kdp_stack_snapshot_bytes_traced(void); void kdp_register_send_receive(__unused void *send, __unused void *receive) -{} +{ +} void kdp_unregister_send_receive(__unused void *send, __unused void *receive) -{} +{ +} void * kdp_get_interface( void) { - return(void *)0; + return (void *)0; } unsigned int kdp_get_ip_address(void ) -{ return 0; } +{ + return 0; +} struct kdp_ether_addr kdp_get_mac_addr(void) @@ -2239,23 +2321,30 @@ kdp_get_mac_addr(void) void kdp_set_ip_and_mac_addresses( - __unused struct kdp_in_addr *ipaddr, - __unused struct kdp_ether_addr *macaddr) -{} + __unused struct kdp_in_addr *ipaddr, + __unused struct kdp_ether_addr *macaddr) +{ +} void kdp_set_gateway_mac(__unused void *gatewaymac) -{} +{ +} void kdp_set_interface(__unused void *ifp) -{} +{ +} -void kdp_register_link(__unused kdp_link_t link, __unused kdp_mode_t mode) -{} +void +kdp_register_link(__unused kdp_link_t link, __unused kdp_mode_t mode) +{ +} -void kdp_unregister_link(__unused kdp_link_t link, __unused kdp_mode_t mode) -{} +void +kdp_unregister_link(__unused kdp_link_t link, __unused kdp_mode_t mode) +{ +} #endif /* !(MACH_KDP && CONFIG_KDP_INTERACTIVE_DEBUGGING) */ @@ -2265,19 +2354,19 @@ extern __attribute__((noreturn)) void panic_spin_forever(void); __attribute__((noreturn)) void kdp_raise_exception( - __unused unsigned int exception, - __unused unsigned int code, - __unused unsigned int subcode, - __unused void *saved_state - ) + __unused unsigned int exception, + __unused unsigned int code, + __unused unsigned int subcode, + __unused void *saved_state + ) #else void kdp_raise_exception( - unsigned int exception, - unsigned int code, - unsigned int subcode, - void *saved_state - ) + unsigned int exception, + unsigned int code, + unsigned int subcode, + void *saved_state + ) #endif { #if CONFIG_EMBEDDED diff --git a/osfmk/kdp/kdp_udp.h b/osfmk/kdp/kdp_udp.h index 6c587d274..eb551eff2 100644 --- a/osfmk/kdp/kdp_udp.h +++ b/osfmk/kdp/kdp_udp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,13 +37,13 @@ #include struct kdp_in_addr { - uint32_t s_addr; + uint32_t s_addr; }; #define ETHER_ADDR_LEN 6 struct kdp_ether_addr { - u_char ether_addr_octet[ETHER_ADDR_LEN]; + u_char ether_addr_octet[ETHER_ADDR_LEN]; }; typedef struct kdp_ether_addr enet_addr_t; @@ -52,9 +52,9 @@ extern struct kdp_ether_addr kdp_get_mac_addr(void); unsigned int kdp_get_ip_address(void); struct kdp_ether_header { - u_char ether_dhost[ETHER_ADDR_LEN]; - u_char ether_shost[ETHER_ADDR_LEN]; - u_short ether_type; + u_char ether_dhost[ETHER_ADDR_LEN]; + u_char ether_shost[ETHER_ADDR_LEN]; + u_short ether_type; }; typedef struct kdp_ether_header ether_header_t; @@ -69,7 +69,7 @@ typedef struct kdp_ether_header ether_header_t; */ typedef uint32_t (*kdp_link_t)(void); typedef boolean_t (*kdp_mode_t)(boolean_t); -void kdp_register_link(kdp_link_t link, kdp_mode_t mode); -void kdp_unregister_link(kdp_link_t link, kdp_mode_t mode); +void kdp_register_link(kdp_link_t link, kdp_mode_t mode); +void kdp_unregister_link(kdp_link_t link, kdp_mode_t mode); #endif /* __KDP_UDP_H */ diff --git a/osfmk/kdp/ml/arm/kdp_machdep.c b/osfmk/kdp/ml/arm/kdp_machdep.c index 4e7fa0639..1e1cb028c 100644 --- a/osfmk/kdp/ml/arm/kdp_machdep.c +++ b/osfmk/kdp/ml/arm/kdp_machdep.c @@ -53,31 +53,31 @@ void halt_all_cpus(boolean_t); void kdp_call(void); int kdp_getc(void); int machine_trace_thread(thread_t thread, - char * tracepos, - char * tracebound, - int nframes, - boolean_t user_p, - boolean_t trace_fp, - uint32_t * thread_trace_flags); + char * tracepos, + char * tracebound, + int nframes, + boolean_t user_p, + boolean_t trace_fp, + uint32_t * thread_trace_flags); int machine_trace_thread64(thread_t thread, - char * tracepos, - char * tracebound, - int nframes, - boolean_t user_p, - boolean_t trace_fp, - uint32_t * thread_trace_flags, - uint64_t *sp); + char * tracepos, + char * tracebound, + int nframes, + boolean_t user_p, + boolean_t trace_fp, + uint32_t * thread_trace_flags, + uint64_t *sp); void kdp_trap(unsigned int, struct arm_saved_state * saved_state); extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags); extern void machine_trace_thread_clear_validation_cache(void); -extern vm_map_t kernel_map; +extern vm_map_t kernel_map; #if CONFIG_KDP_INTERACTIVE_DEBUGGING void kdp_exception( - unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode) + unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode) { struct { kdp_exception_t pkt; @@ -115,13 +115,15 @@ kdp_exception_ack(unsigned char * pkt, int len) kdp_exception_ack_t aligned_pkt; kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt; - if ((unsigned)len < sizeof(*rq)) - return (FALSE); + if ((unsigned)len < sizeof(*rq)) { + return FALSE; + } bcopy((char *)pkt, (char *)rq, sizeof(*rq)); - if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) - return (FALSE); + if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) { + return FALSE; + } dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq)); @@ -129,7 +131,7 @@ kdp_exception_ack(unsigned char * pkt, int len) kdp.exception_ack_needed = FALSE; kdp.exception_seq++; } - return (TRUE); + return TRUE; } static void @@ -258,13 +260,14 @@ kdp_panic(const char * msg) { printf("kdp panic: %s\n", msg); while (1) { - }; + } + ; } int kdp_intr_disbl(void) { - return (splhigh()); + return splhigh(); } void @@ -288,7 +291,7 @@ kdp_call(void) int kdp_getc(void) { - return (cnmaygetc()); + return cnmaygetc(); } void @@ -321,14 +324,14 @@ int kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu) { #pragma unused(rq, data, lcpu) - return 0; + return 0; } int kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu) { #pragma unused(rq, data, lcpu) - return 0; + return 0; } #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ @@ -340,12 +343,14 @@ kdp_trap(unsigned int exception, struct arm_saved_state * saved_state) #if defined(__arm__) if (saved_state->cpsr & PSR_TF) { unsigned short instr = *((unsigned short *)(saved_state->pc)); - if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) + if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) { saved_state->pc += 2; + } } else { unsigned int instr = *((unsigned int *)(saved_state->pc)); - if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) + if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) { saved_state->pc += 4; + } } #elif defined(__arm64__) @@ -358,8 +363,9 @@ kdp_trap(unsigned int exception, struct arm_saved_state * saved_state) * traps to the debugger, we should identify both variants and * increment for both of them. */ - if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) + if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) { set_saved_state_pc(saved_state, get_saved_state_pc(saved_state) + 4); + } #else #error Unknown architecture. #endif @@ -378,12 +384,12 @@ typedef uint32_t uint32_align2_t __attribute__((aligned(2))); int machine_trace_thread(thread_t thread, - char * tracepos, - char * tracebound, - int nframes, - boolean_t user_p, - boolean_t trace_fp, - uint32_t * thread_trace_flags) + char * tracepos, + char * tracebound, + int nframes, + boolean_t user_p, + boolean_t trace_fp, + uint32_t * thread_trace_flags) { uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos; @@ -403,7 +409,7 @@ machine_trace_thread(thread_t thread, nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; if (!nframes) { - return (0); + return 0; } framecount = 0; @@ -454,7 +460,6 @@ machine_trace_thread(thread_t thread, } for (; framecount < nframes; framecount++) { - *tracebuf++ = prevlr; if (trace_fp) { *tracebuf++ = (uint32_t)fp; @@ -475,9 +480,8 @@ machine_trace_thread(thread_t thread, if (fp < stacklimit_bottom) { break; } - /* Stack grows downward */ + /* Stack grows downward */ if (fp < prevfp) { - boolean_t prev_in_interrupt_stack = FALSE; if (!user_p) { @@ -488,26 +492,27 @@ machine_trace_thread(thread_t thread, */ int cpu; int max_cpu = ml_get_max_cpu_number(); - - for (cpu=0; cpu <= max_cpu; cpu++) { + + for (cpu = 0; cpu <= max_cpu; cpu++) { cpu_data_t *target_cpu_datap; - + target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if(target_cpu_datap == (cpu_data_t *)NULL) + if (target_cpu_datap == (cpu_data_t *)NULL) { continue; - - if (prevfp >= (target_cpu_datap->intstack_top-INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) { + } + + if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) { prev_in_interrupt_stack = TRUE; break; } #if defined(__arm__) - if (prevfp >= (target_cpu_datap->fiqstack_top-FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) { + if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) { prev_in_interrupt_stack = TRUE; break; } #elif defined(__arm64__) - if (prevfp >= (target_cpu_datap->excepstack_top-EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) { + if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) { prev_in_interrupt_stack = TRUE; break; } @@ -553,22 +558,21 @@ machine_trace_thread(thread_t thread, *thread_trace_flags |= kThreadTruncatedBT; } } - } /* Reset the target pmap */ machine_trace_thread_clear_validation_cache(); - return ((int)(((char *)tracebuf) - tracepos)); + return (int)(((char *)tracebuf) - tracepos); } int machine_trace_thread64(thread_t thread, - char * tracepos, - char * tracebound, - int nframes, - boolean_t user_p, - boolean_t trace_fp, - uint32_t * thread_trace_flags, - uint64_t *sp_out) + char * tracepos, + char * tracebound, + int nframes, + boolean_t user_p, + boolean_t trace_fp, + uint32_t * thread_trace_flags, + uint64_t *sp_out) { #pragma unused(sp_out) #if defined(__arm__) @@ -595,7 +599,7 @@ machine_trace_thread64(thread_t thread, nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; if (!nframes) { - return (0); + return 0; } framecount = 0; @@ -637,7 +641,6 @@ machine_trace_thread64(thread_t thread, } for (; framecount < nframes; framecount++) { - *tracebuf++ = prevlr; if (trace_fp) { *tracebuf++ = fp; @@ -675,24 +678,25 @@ machine_trace_thread64(thread_t thread, int cpu; int max_cpu = ml_get_max_cpu_number(); - for (cpu=0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= max_cpu; cpu++) { cpu_data_t *target_cpu_datap; target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; - if(target_cpu_datap == (cpu_data_t *)NULL) + if (target_cpu_datap == (cpu_data_t *)NULL) { continue; + } - if (prevfp >= (target_cpu_datap->intstack_top-INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) { + if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) { switched_stacks = TRUE; break; } #if defined(__arm__) - if (prevfp >= (target_cpu_datap->fiqstack_top-FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) { + if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) { switched_stacks = TRUE; break; } #elif defined(__arm64__) - if (prevfp >= (target_cpu_datap->excepstack_top-EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) { + if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) { switched_stacks = TRUE; break; } @@ -734,11 +738,10 @@ machine_trace_thread64(thread_t thread, *thread_trace_flags |= kThreadTruncatedBT; } } - } /* Reset the target pmap */ machine_trace_thread_clear_validation_cache(); - return ((int)(((char *)tracebuf) - tracepos)); + return (int)(((char *)tracebuf) - tracepos); #else #error Unknown architecture. #endif @@ -747,6 +750,5 @@ machine_trace_thread64(thread_t thread, void kdp_ml_enter_debugger(void) { - __asm__ volatile(".long 0xe7ffdefe"); + __asm__ volatile (".long 0xe7ffdefe"); } - diff --git a/osfmk/kdp/ml/arm/kdp_vm.c b/osfmk/kdp/ml/arm/kdp_vm.c index 82f5307c7..7a26f78dd 100644 --- a/osfmk/kdp/ml/arm/kdp_vm.c +++ b/osfmk/kdp/ml/arm/kdp_vm.c @@ -48,7 +48,7 @@ pmap_t kdp_pmap = 0; boolean_t kdp_trans_off; -boolean_t kdp_read_io = 0; +boolean_t kdp_read_io = 0; pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va); @@ -57,30 +57,29 @@ pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va); */ pmap_paddr_t kdp_vtophys( - pmap_t pmap, - vm_offset_t va) + pmap_t pmap, + vm_offset_t va) { pmap_paddr_t pa; ppnum_t pp; /* Ensure that the provided va resides within the provided pmap range. */ - if(!pmap || ((pmap != kernel_pmap) && ((va < pmap->min) || (va >= pmap->max)))) - { + if (!pmap || ((pmap != kernel_pmap) && ((va < pmap->min) || (va >= pmap->max)))) { #ifdef KDP_VTOPHYS_DEBUG printf("kdp_vtophys(%08x, %016lx) not in range %08x .. %08x\n", (unsigned int) pmap, - (unsigned long) va, - (unsigned int) (pmap ? pmap->min : 0), - (unsigned int) (pmap ? pmap->max : 0)); + (unsigned long) va, + (unsigned int) (pmap ? pmap->min : 0), + (unsigned int) (pmap ? pmap->max : 0)); #endif - return 0; /* Just return if no translation */ + return 0; /* Just return if no translation */ } - pp = pmap_find_phys(pmap, va); /* Get the page number */ - if (!pp) - return 0; /* Just return if no translation */ - - pa = ((pmap_paddr_t) pp << PAGE_SHIFT) | (va & PAGE_MASK); /* Insert page offset */ - return (pa); + pp = pmap_find_phys(pmap, va); /* Get the page number */ + if (!pp) { + return 0; /* Just return if no translation */ + } + pa = ((pmap_paddr_t) pp << PAGE_SHIFT) | (va & PAGE_MASK); /* Insert page offset */ + return pa; } @@ -98,7 +97,7 @@ kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len) { addr64_t cur_virt_src, cur_virt_dst; addr64_t cur_phys_src, cur_phys_dst; - mach_vm_size_t resid, cnt; + mach_vm_size_t resid, cnt; pmap_t pmap; #ifdef KDP_VM_READ_DEBUG @@ -117,40 +116,44 @@ kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len) ret = kdp_machine_phys_read(&rq, dst, 0 /* unused */); return ret; } else { - resid = len; - if (kdp_pmap) - pmap = kdp_pmap; /* If special pmap, use it */ - else - pmap = kernel_pmap; /* otherwise, use kernel's */ - + if (kdp_pmap) { + pmap = kdp_pmap; /* If special pmap, use it */ + } else { + pmap = kernel_pmap; /* otherwise, use kernel's */ + } while (resid != 0) { /* * Always translate the destination using the * kernel_pmap. */ - if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) + if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) { goto exit; + } - if ((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0) + if ((cur_phys_src = kdp_vtophys(pmap, cur_virt_src)) == 0) { goto exit; + } /* Attempt to ensure that there are valid translations for src and dst. */ - if (!kdp_read_io && ((!pmap_valid_address(cur_phys_dst)) || (!pmap_valid_address(cur_phys_src)))) + if (!kdp_read_io && ((!pmap_valid_address(cur_phys_dst)) || (!pmap_valid_address(cur_phys_src)))) { goto exit; + } - cnt = ARM_PGBYTES - (cur_virt_src & PAGE_MASK); /* Get length left on - * page */ - if (cnt > (ARM_PGBYTES - (cur_virt_dst & PAGE_MASK))) + cnt = ARM_PGBYTES - (cur_virt_src & PAGE_MASK); /* Get length left on + * page */ + if (cnt > (ARM_PGBYTES - (cur_virt_dst & PAGE_MASK))) { cnt = ARM_PGBYTES - (cur_virt_dst & PAGE_MASK); + } - if (cnt > resid) + if (cnt > resid) { cnt = resid; + } #ifdef KDP_VM_READ_DEBUG kprintf("kdp_machine_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n", - pmap, cur_virt_src, cur_phys_src); + pmap, cur_virt_src, cur_phys_src); #endif bcopy_phys(cur_phys_src, cur_phys_dst, cnt); @@ -163,7 +166,7 @@ exit: #ifdef KDP_VM_READ_DEBUG kprintf("kdp_machine_vm_read: ret %08X\n", len - resid); #endif - return (len - resid); + return len - resid; } mach_vm_size_t @@ -171,7 +174,7 @@ kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, uint16_t lcpu __ { mach_vm_address_t src = rq->address; mach_vm_size_t len = rq->nbytes; - + addr64_t cur_virt_dst; addr64_t cur_phys_src, cur_phys_dst; mach_vm_size_t resid = len; @@ -185,29 +188,31 @@ kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, uint16_t lcpu __ cur_phys_src = (addr64_t) src; while (resid != 0) { - - if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) + if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) { goto exit; + } /* Get length left on page */ - + cnt_src = ARM_PGBYTES - (cur_phys_src & PAGE_MASK); cnt_dst = ARM_PGBYTES - (cur_phys_dst & PAGE_MASK); - if (cnt_src > cnt_dst) + if (cnt_src > cnt_dst) { cnt = cnt_dst; - else + } else { cnt = cnt_src; - if (cnt > resid) + } + if (cnt > resid) { cnt = resid; - - bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */ + } + + bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */ cur_phys_src += cnt; cur_virt_dst += cnt; resid -= cnt; } exit: - return (len - resid); + return len - resid; } /* @@ -230,31 +235,36 @@ kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len) resid = len; while (resid != 0) { - if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) + if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) { goto exit; + } - if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) + if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) { goto exit; + } /* Attempt to ensure that there are valid translations for src and dst. */ /* No support for enabling writes for an invalid translation at the moment. */ - if ((!pmap_valid_address(cur_phys_dst)) || (!pmap_valid_address(cur_phys_src))) + if ((!pmap_valid_address(cur_phys_dst)) || (!pmap_valid_address(cur_phys_src))) { goto exit; + } cnt_src = ((cur_phys_src + ARM_PGBYTES) & (-ARM_PGBYTES)) - cur_phys_src; cnt_dst = ((cur_phys_dst + ARM_PGBYTES) & (-ARM_PGBYTES)) - cur_phys_dst; - if (cnt_src > cnt_dst) + if (cnt_src > cnt_dst) { cnt = cnt_dst; - else + } else { cnt = cnt_src; - if (cnt > resid) + } + if (cnt > resid) { cnt = resid; + } #ifdef KDP_VM_WRITE_DEBUG printf("kdp_vm_write: cur_phys_src %x cur_phys_src %x len %x - %08X %08X\n", src, dst, cnt); #endif - bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */ + bcopy_phys(cur_phys_src, cur_phys_dst, cnt); /* Copy stuff over */ flush_dcache64(cur_phys_dst, (unsigned int)cnt, TRUE); invalidate_icache64(cur_phys_dst, (unsigned int)cnt, TRUE); @@ -263,93 +273,91 @@ kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len) resid -= cnt; } exit: - return (len - resid); + return len - resid; } mach_vm_size_t kdp_machine_phys_write(kdp_writephysmem64_req_t *rq __unused, caddr_t src __unused, - uint16_t lcpu __unused) + uint16_t lcpu __unused) { - return 0; /* unimplemented */ + return 0; /* unimplemented */ } void kern_collectth_state_size(uint64_t * tstate_count, uint64_t * tstate_size) { - uint64_t count = ml_get_max_cpu_number() + 1; + uint64_t count = ml_get_max_cpu_number() + 1; - *tstate_count = count; - *tstate_size = sizeof(struct thread_command) - + (sizeof(arm_state_hdr_t) + *tstate_count = count; + *tstate_size = sizeof(struct thread_command) + + (sizeof(arm_state_hdr_t) #if defined(__arm64__) - + ARM_THREAD_STATE64_COUNT * sizeof(uint32_t)); + + ARM_THREAD_STATE64_COUNT * sizeof(uint32_t)); #else - + ARM_THREAD_STATE32_COUNT * sizeof(uint32_t)); + + ARM_THREAD_STATE32_COUNT * sizeof(uint32_t)); #endif } void kern_collectth_state(thread_t thread __unused, void *buffer, uint64_t size, void ** iter) { - cpu_data_entry_t *cpuentryp = *iter; - if (cpuentryp == NULL) - cpuentryp = &CpuDataEntries[0]; + cpu_data_entry_t *cpuentryp = *iter; + if (cpuentryp == NULL) { + cpuentryp = &CpuDataEntries[0]; + } - if (cpuentryp == &CpuDataEntries[ml_get_max_cpu_number()]) - *iter = NULL; - else - *iter = cpuentryp + 1; + if (cpuentryp == &CpuDataEntries[ml_get_max_cpu_number()]) { + *iter = NULL; + } else { + *iter = cpuentryp + 1; + } - struct cpu_data *cpudatap = cpuentryp->cpu_data_vaddr; + struct cpu_data *cpudatap = cpuentryp->cpu_data_vaddr; - struct thread_command *tc = (struct thread_command *)buffer; - arm_state_hdr_t *hdr = (arm_state_hdr_t *)(void *)(tc + 1); + struct thread_command *tc = (struct thread_command *)buffer; + arm_state_hdr_t *hdr = (arm_state_hdr_t *)(void *)(tc + 1); #if defined(__arm64__) - hdr->flavor = ARM_THREAD_STATE64; - hdr->count = ARM_THREAD_STATE64_COUNT; - arm_thread_state64_t *state = (arm_thread_state64_t *)(void *)(hdr + 1); + hdr->flavor = ARM_THREAD_STATE64; + hdr->count = ARM_THREAD_STATE64_COUNT; + arm_thread_state64_t *state = (arm_thread_state64_t *)(void *)(hdr + 1); #else - hdr->flavor = ARM_THREAD_STATE; - hdr->count = ARM_THREAD_STATE_COUNT; - arm_thread_state_t *state = (arm_thread_state_t *)(void *)(hdr + 1); + hdr->flavor = ARM_THREAD_STATE; + hdr->count = ARM_THREAD_STATE_COUNT; + arm_thread_state_t *state = (arm_thread_state_t *)(void *)(hdr + 1); #endif - tc->cmd = LC_THREAD; - tc->cmdsize = (uint32_t) size; + tc->cmd = LC_THREAD; + tc->cmdsize = (uint32_t) size; - if ((cpudatap != NULL) && (cpudatap->halt_status == CPU_HALTED_WITH_STATE)) { - *state = cpudatap->halt_state; - return; - } + if ((cpudatap != NULL) && (cpudatap->halt_status == CPU_HALTED_WITH_STATE)) { + *state = cpudatap->halt_state; + return; + } - if ((cpudatap == NULL) || (cpudatap->cpu_processor == NULL) || (cpudatap->cpu_processor->active_thread == NULL)) { - bzero(state, hdr->count * sizeof(uint32_t)); - return; - } + if ((cpudatap == NULL) || (cpudatap->cpu_processor == NULL) || (cpudatap->cpu_processor->active_thread == NULL)) { + bzero(state, hdr->count * sizeof(uint32_t)); + return; + } - vm_offset_t kstackptr = (vm_offset_t) cpudatap->cpu_processor->active_thread->machine.kstackptr; - arm_saved_state_t *saved_state = (arm_saved_state_t *) kstackptr; + vm_offset_t kstackptr = (vm_offset_t) cpudatap->cpu_processor->active_thread->machine.kstackptr; + arm_saved_state_t *saved_state = (arm_saved_state_t *) kstackptr; #if defined(__arm64__) - state->fp = saved_state->ss_64.fp; - state->lr = saved_state->ss_64.lr; - state->sp = saved_state->ss_64.sp; - state->pc = saved_state->ss_64.pc; - state->cpsr = saved_state->ss_64.cpsr; - bcopy(&saved_state->ss_64.x[0], &state->x[0], sizeof(state->x)); + state->fp = saved_state->ss_64.fp; + state->lr = saved_state->ss_64.lr; + state->sp = saved_state->ss_64.sp; + state->pc = saved_state->ss_64.pc; + state->cpsr = saved_state->ss_64.cpsr; + bcopy(&saved_state->ss_64.x[0], &state->x[0], sizeof(state->x)); #else /* __arm64__ */ - state->lr = saved_state->lr; - state->sp = saved_state->sp; - state->pc = saved_state->pc; - state->cpsr = saved_state->cpsr; - bcopy(&saved_state->r[0], &state->r[0], sizeof(state->r)); + state->lr = saved_state->lr; + state->sp = saved_state->sp; + state->pc = saved_state->pc; + state->cpsr = saved_state->cpsr; + bcopy(&saved_state->r[0], &state->r[0], sizeof(state->r)); #endif /* !__arm64__ */ - - } - - diff --git a/osfmk/kdp/ml/i386/kdp_x86_common.c b/osfmk/kdp/ml/i386/kdp_x86_common.c index a8b69d57b..934bce867 100644 --- a/osfmk/kdp/ml/i386/kdp_x86_common.c +++ b/osfmk/kdp/ml/i386/kdp_x86_common.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -61,7 +61,7 @@ * A (potentially valid) physical address is not a kernel address * i.e. it'a a user address. */ -#define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr) +#define IS_PHYS_ADDR(addr) IS_USERADDR64_CANONICAL(addr) boolean_t kdp_read_io; boolean_t kdp_trans_off; @@ -70,6 +70,8 @@ pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va); pmap_t kdp_pmap = 0; +kdp_jtag_coredump_t kdp_jtag_coredump; + pmap_paddr_t kdp_vtophys( pmap_t pmap, @@ -79,11 +81,13 @@ kdp_vtophys( ppnum_t pp; pp = pmap_find_phys(pmap, va); - if(!pp) return 0; - + if (!pp) { + return 0; + } + pa = ((pmap_paddr_t)pp << PAGE_SHIFT) | (va & PAGE_MASK); - return(pa); + return pa; } mach_vm_size_t @@ -114,62 +118,70 @@ kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len) * source (cur_virt_src); otherwise, the source is translated using the * kernel_pmap. */ - if (kdp_pmap) + if (kdp_pmap) { src_pmap = kdp_pmap; + } while (resid != 0) { if (!(cur_phys_src = kdp_vtophys(src_pmap, - cur_virt_src))) + cur_virt_src))) { goto exit; + } /* Always translate the destination buffer using the kernel_pmap */ - if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) + if (!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) { goto exit; + } /* Validate physical page numbers unless kdp_read_io is set */ - if (kdp_read_io == FALSE) - if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) + if (kdp_read_io == FALSE) { + if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src))) { goto exit; + } + } /* Get length left on page */ cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK); cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); - if (cnt_src > cnt_dst) + if (cnt_src > cnt_dst) { cnt = cnt_dst; - else + } else { cnt = cnt_src; - if (cnt > resid) + } + if (cnt > resid) { cnt = resid; + } /* Do a physical copy */ if (EFAULT == ml_copy_phys(cur_phys_src, - cur_phys_dst, - (vm_size_t)cnt)) + cur_phys_dst, + (vm_size_t)cnt)) { goto exit; + } cur_virt_src += cnt; cur_virt_dst += cnt; resid -= cnt; } exit: - return (len - resid); + return len - resid; } mach_vm_size_t kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, - uint16_t lcpu) + uint16_t lcpu) { mach_vm_address_t src = rq->address; mach_vm_size_t len = rq->nbytes; - + addr64_t cur_virt_dst; addr64_t cur_phys_dst, cur_phys_src; mach_vm_size_t resid = len; mach_vm_size_t cnt = 0, cnt_src, cnt_dst; - if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { - return (mach_vm_size_t) - kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst); - } + if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { + return (mach_vm_size_t) + kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_read, rq, dst); + } #ifdef KDP_VM_READ_DEBUG printf("kdp_phys_read: src %llx dst %p len %llx\n", src, (void *)dst, len); @@ -179,41 +191,44 @@ kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, cur_phys_src = (addr64_t)src; while (resid != 0) { - - if(!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) + if (!(cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst))) { goto exit; + } /* Get length left on page */ cnt_src = PAGE_SIZE - (cur_phys_src & PAGE_MASK); cnt_dst = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); - if (cnt_src > cnt_dst) + if (cnt_src > cnt_dst) { cnt = cnt_dst; - else + } else { cnt = cnt_src; - if (cnt > resid) + } + if (cnt > resid) { cnt = resid; + } - /* Do a physical copy; use ml_copy_phys() in the event this is - * a short read with potential side effects. - */ + /* Do a physical copy; use ml_copy_phys() in the event this is + * a short read with potential side effects. + */ if (EFAULT == ml_copy_phys(cur_phys_src, - cur_phys_dst, - (vm_size_t)cnt)) + cur_phys_dst, + (vm_size_t)cnt)) { goto exit; + } cur_phys_src += cnt; cur_virt_dst += cnt; resid -= cnt; } exit: - return (len - resid); + return len - resid; } /* - * + * */ mach_vm_size_t kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len) -{ +{ addr64_t cur_virt_src, cur_virt_dst; addr64_t cur_phys_src, cur_phys_dst; unsigned resid, cnt, cnt_src, cnt_dst; @@ -228,51 +243,55 @@ kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len) resid = (unsigned)len; while (resid != 0) { - if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) + if ((cur_phys_dst = kdp_vtophys(kernel_pmap, cur_virt_dst)) == 0) { goto exit; + } - if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) + if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) { goto exit; + } /* Copy as many bytes as possible without crossing a page */ cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); - if (cnt_src > cnt_dst) + if (cnt_src > cnt_dst) { cnt = cnt_dst; - else + } else { cnt = cnt_src; - if (cnt > resid) + } + if (cnt > resid) { cnt = resid; + } - if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) - goto exit; /* Copy stuff over */ - - cur_virt_src +=cnt; - cur_virt_dst +=cnt; + if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) { + goto exit; /* Copy stuff over */ + } + cur_virt_src += cnt; + cur_virt_dst += cnt; resid -= cnt; } exit: - return (len - resid); + return len - resid; } /* - * + * */ mach_vm_size_t kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src, - uint16_t lcpu) -{ + uint16_t lcpu) +{ mach_vm_address_t dst = rq->address; mach_vm_size_t len = rq->nbytes; addr64_t cur_virt_src; addr64_t cur_phys_src, cur_phys_dst; unsigned resid, cnt, cnt_src, cnt_dst; - if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { - return (mach_vm_size_t) - kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src); - } + if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { + return (mach_vm_size_t) + kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_phys_write, rq, src); + } #ifdef KDP_VM_WRITE_DEBUG printf("kdp_phys_write: src %p dst %llx len %llx - %08X %08X\n", (void *)src, dst, len, ((unsigned int *)src)[0], ((unsigned int *)src)[1]); @@ -284,30 +303,33 @@ kdp_machine_phys_write(kdp_writephysmem64_req_t *rq, caddr_t src, resid = (unsigned)len; while (resid != 0) { - if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) + if ((cur_phys_src = kdp_vtophys(kernel_pmap, cur_virt_src)) == 0) { goto exit; + } /* Copy as many bytes as possible without crossing a page */ cnt_src = (unsigned)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); cnt_dst = (unsigned)(PAGE_SIZE - (cur_phys_dst & PAGE_MASK)); - if (cnt_src > cnt_dst) + if (cnt_src > cnt_dst) { cnt = cnt_dst; - else + } else { cnt = cnt_src; - if (cnt > resid) + } + if (cnt > resid) { cnt = resid; + } - if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) - goto exit; /* Copy stuff over */ - - cur_virt_src +=cnt; - cur_phys_dst +=cnt; + if (EFAULT == ml_copy_phys(cur_phys_src, cur_phys_dst, cnt)) { + goto exit; /* Copy stuff over */ + } + cur_virt_src += cnt; + cur_phys_dst += cnt; resid -= cnt; } exit: - return (len - resid); + return len - resid; } int @@ -318,10 +340,9 @@ kdp_machine_ioport_read(kdp_readioport_req_t *rq, caddr_t data, uint16_t lcpu) if ((lcpu != KDP_CURRENT_LCPU) && (lcpu != cpu_number())) { return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_read, rq, data); - } + } - switch (size) - { + switch (size) { case 1: *((uint8_t *) data) = inb(addr); break; @@ -348,8 +369,7 @@ kdp_machine_ioport_write(kdp_writeioport_req_t *rq, caddr_t data, uint16_t lcpu) return (int) kdp_x86_xcpu_invoke(lcpu, (kdp_x86_xcpu_func_t)kdp_machine_ioport_write, rq, data); } - switch (size) - { + switch (size) { case 1: outb(addr, *((uint8_t *) data)); break; @@ -403,20 +423,18 @@ vm_map_offset_t debugger_window_kva; */ void -kdp_machine_init(void) { - if (debug_boot_arg == 0) - return; - +kdp_map_debug_pagetable_window(void) +{ vm_map_entry_t e; kern_return_t kr; kr = vm_map_find_space(kernel_map, - &debugger_window_kva, - PAGE_SIZE, 0, - 0, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_OSFMK, - &e); + &debugger_window_kva, + PAGE_SIZE, 0, + 0, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_OSFMK, + &e); if (kr != KERN_SUCCESS) { panic("%s: vm_map_find_space failed with %d\n", __FUNCTION__, kr); @@ -432,5 +450,31 @@ kdp_machine_init(void) { } } +/* initialize kdp_jtag_coredump with data needed for JTAG coredump extraction */ +void +kdp_jtag_coredump_init(void) +{ + kdp_jtag_coredump.version = (uint64_t) KDP_JTAG_COREDUMP_VERSION_1; + kdp_jtag_coredump.kernel_map_start = (uint64_t) kernel_map->min_offset; + kdp_jtag_coredump.kernel_map_end = (uint64_t) kernel_map->max_offset; + kdp_jtag_coredump.kernel_pmap_pml4 = (uint64_t) kernel_pmap->pm_pml4; + kdp_jtag_coredump.pmap_memory_regions = (uint64_t) &pmap_memory_regions; + kdp_jtag_coredump.pmap_memory_region_count = (uint64_t) pmap_memory_region_count; + kdp_jtag_coredump.pmap_memory_region_t_size = (uint64_t) sizeof(pmap_memory_region_t); + kdp_jtag_coredump.physmap_base = (uint64_t) &physmap_base; + + /* update signature last so that JTAG can trust that structure has valid data */ + kdp_jtag_coredump.signature = (uint64_t) KDP_JTAG_COREDUMP_SIGNATURE; +} +void +kdp_machine_init(void) +{ + if (debug_boot_arg == 0) { + return; + } + + kdp_map_debug_pagetable_window(); + kdp_jtag_coredump_init(); +} diff --git a/osfmk/kdp/ml/i386/kdp_x86_common.h b/osfmk/kdp/ml/i386/kdp_x86_common.h index ec9a0bfeb..e7efb5906 100644 --- a/osfmk/kdp/ml/i386/kdp_x86_common.h +++ b/osfmk/kdp/ml/i386/kdp_x86_common.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,4 +33,26 @@ #include #include +/* data required for JTAG extraction of coredump */ +typedef struct _kdp_jtag_coredump_t { + uint64_t signature; + uint64_t version; + uint64_t kernel_map_start; + uint64_t kernel_map_end; + uint64_t kernel_pmap_pml4; + uint64_t pmap_memory_regions; + uint64_t pmap_memory_region_count; + uint64_t pmap_memory_region_t_size; + uint64_t physmap_base; +} kdp_jtag_coredump_t; + +/* signature used to verify kdp_jtag_coredump_t structure */ +#define KDP_JTAG_COREDUMP_SIGNATURE 0x434f524544554d50 + +/* version of kdp_jtag_coredump_t structure */ +#define KDP_JTAG_COREDUMP_VERSION_1 1 + +void kdp_map_debug_pagetable_window(void); +void kdp_jtag_coredump_init(void); + #endif /* _KDP_X86_COMMON_H_ */ diff --git a/osfmk/kdp/ml/x86_64/kdp_machdep.c b/osfmk/kdp/ml/x86_64/kdp_machdep.c index 5cfc3be33..0dc052a3e 100644 --- a/osfmk/kdp/ml/x86_64/kdp_machdep.c +++ b/osfmk/kdp/ml/x86_64/kdp_machdep.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #include #include #include @@ -60,231 +60,231 @@ extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_ extern void machine_trace_thread_clear_validation_cache(void); extern vm_map_t kernel_map; -void print_saved_state(void *); -void kdp_call(void); -int kdp_getc(void); -void kdp_getstate(x86_thread_state64_t *); -void kdp_setstate(x86_thread_state64_t *); -void kdp_print_phys(int); +void print_saved_state(void *); +void kdp_call(void); +int kdp_getc(void); +void kdp_getstate(x86_thread_state64_t *); +void kdp_setstate(x86_thread_state64_t *); unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len); void kdp_exception( - unsigned char *pkt, - int *len, - unsigned short *remote_port, - unsigned int exception, - unsigned int code, - unsigned int subcode -) + unsigned char *pkt, + int *len, + unsigned short *remote_port, + unsigned int exception, + unsigned int code, + unsigned int subcode + ) { - kdp_exception_t *rq = (kdp_exception_t *)pkt; - - rq->hdr.request = KDP_EXCEPTION; - rq->hdr.is_reply = 0; - rq->hdr.seq = kdp.exception_seq; - rq->hdr.key = 0; - rq->hdr.len = sizeof (*rq); - - rq->n_exc_info = 1; - rq->exc_info[0].cpu = 0; - rq->exc_info[0].exception = exception; - rq->exc_info[0].code = code; - rq->exc_info[0].subcode = subcode; - - rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t); - - bcopy((char *)rq, (char *)pkt, rq->hdr.len); - - kdp.exception_ack_needed = TRUE; - - *remote_port = kdp.exception_port; - *len = rq->hdr.len; + kdp_exception_t *rq = (kdp_exception_t *)pkt; + + rq->hdr.request = KDP_EXCEPTION; + rq->hdr.is_reply = 0; + rq->hdr.seq = kdp.exception_seq; + rq->hdr.key = 0; + rq->hdr.len = sizeof(*rq); + + rq->n_exc_info = 1; + rq->exc_info[0].cpu = 0; + rq->exc_info[0].exception = exception; + rq->exc_info[0].code = code; + rq->exc_info[0].subcode = subcode; + + rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t); + + bcopy((char *)rq, (char *)pkt, rq->hdr.len); + + kdp.exception_ack_needed = TRUE; + + *remote_port = kdp.exception_port; + *len = rq->hdr.len; } boolean_t kdp_exception_ack( - unsigned char *pkt, - int len -) + unsigned char *pkt, + int len + ) { - kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt; - - if (((unsigned int) len) < sizeof (*rq)) - return(FALSE); - - if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) - return(FALSE); - - dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq)); - - if (rq->hdr.seq == kdp.exception_seq) { - kdp.exception_ack_needed = FALSE; - kdp.exception_seq++; - } - return(TRUE); + kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt; + + if (((unsigned int) len) < sizeof(*rq)) { + return FALSE; + } + + if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) { + return FALSE; + } + + dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq)); + + if (rq->hdr.seq == kdp.exception_seq) { + kdp.exception_ack_needed = FALSE; + kdp.exception_seq++; + } + return TRUE; } void kdp_getstate( - x86_thread_state64_t *state -) + x86_thread_state64_t *state + ) { - x86_saved_state64_t *saved_state; - - saved_state = (x86_saved_state64_t *)kdp.saved_state; - - state->rax = saved_state->rax; - state->rbx = saved_state->rbx; - state->rcx = saved_state->rcx; - state->rdx = saved_state->rdx; - state->rdi = saved_state->rdi; - state->rsi = saved_state->rsi; - state->rbp = saved_state->rbp; - - state->r8 = saved_state->r8; - state->r9 = saved_state->r9; - state->r10 = saved_state->r10; - state->r11 = saved_state->r11; - state->r12 = saved_state->r12; - state->r13 = saved_state->r13; - state->r14 = saved_state->r14; - state->r15 = saved_state->r15; - - state->rsp = saved_state->isf.rsp; - state->rflags = saved_state->isf.rflags; - state->rip = saved_state->isf.rip; - - state->cs = saved_state->isf.cs; - state->fs = saved_state->fs; - state->gs = saved_state->gs; + x86_saved_state64_t *saved_state; + + saved_state = (x86_saved_state64_t *)kdp.saved_state; + + state->rax = saved_state->rax; + state->rbx = saved_state->rbx; + state->rcx = saved_state->rcx; + state->rdx = saved_state->rdx; + state->rdi = saved_state->rdi; + state->rsi = saved_state->rsi; + state->rbp = saved_state->rbp; + + state->r8 = saved_state->r8; + state->r9 = saved_state->r9; + state->r10 = saved_state->r10; + state->r11 = saved_state->r11; + state->r12 = saved_state->r12; + state->r13 = saved_state->r13; + state->r14 = saved_state->r14; + state->r15 = saved_state->r15; + + state->rsp = saved_state->isf.rsp; + state->rflags = saved_state->isf.rflags; + state->rip = saved_state->isf.rip; + + state->cs = saved_state->isf.cs; + state->fs = saved_state->fs; + state->gs = saved_state->gs; } void kdp_setstate( - x86_thread_state64_t *state -) + x86_thread_state64_t *state + ) { - x86_saved_state64_t *saved_state; - - saved_state = (x86_saved_state64_t *)kdp.saved_state; - saved_state->rax = state->rax; - saved_state->rbx = state->rbx; - saved_state->rcx = state->rcx; - saved_state->rdx = state->rdx; - saved_state->rdi = state->rdi; - saved_state->rsi = state->rsi; - saved_state->rbp = state->rbp; - saved_state->r8 = state->r8; - saved_state->r9 = state->r9; - saved_state->r10 = state->r10; - saved_state->r11 = state->r11; - saved_state->r12 = state->r12; - saved_state->r13 = state->r13; - saved_state->r14 = state->r14; - saved_state->r15 = state->r15; - - saved_state->isf.rflags = state->rflags; - saved_state->isf.rsp = state->rsp; - saved_state->isf.rip = state->rip; - - saved_state->fs = (uint32_t)state->fs; - saved_state->gs = (uint32_t)state->gs; + x86_saved_state64_t *saved_state; + + saved_state = (x86_saved_state64_t *)kdp.saved_state; + saved_state->rax = state->rax; + saved_state->rbx = state->rbx; + saved_state->rcx = state->rcx; + saved_state->rdx = state->rdx; + saved_state->rdi = state->rdi; + saved_state->rsi = state->rsi; + saved_state->rbp = state->rbp; + saved_state->r8 = state->r8; + saved_state->r9 = state->r9; + saved_state->r10 = state->r10; + saved_state->r11 = state->r11; + saved_state->r12 = state->r12; + saved_state->r13 = state->r13; + saved_state->r14 = state->r14; + saved_state->r15 = state->r15; + + saved_state->isf.rflags = state->rflags; + saved_state->isf.rsp = state->rsp; + saved_state->isf.rip = state->rip; + + saved_state->fs = (uint32_t)state->fs; + saved_state->gs = (uint32_t)state->gs; } kdp_error_t kdp_machine_read_regs( - __unused unsigned int cpu, - unsigned int flavor, - char *data, - int *size -) + __unused unsigned int cpu, + unsigned int flavor, + char *data, + int *size + ) { - static x86_float_state64_t null_fpstate; - - switch (flavor) { - - case x86_THREAD_STATE64: - dprintf(("kdp_readregs THREAD_STATE64\n")); - kdp_getstate((x86_thread_state64_t *)data); - *size = sizeof (x86_thread_state64_t); - return KDPERR_NO_ERROR; - - case x86_FLOAT_STATE64: - dprintf(("kdp_readregs THREAD_FPSTATE64\n")); - *(x86_float_state64_t *)data = null_fpstate; - *size = sizeof (x86_float_state64_t); - return KDPERR_NO_ERROR; - - default: - dprintf(("kdp_readregs bad flavor %d\n", flavor)); - *size = 0; - return KDPERR_BADFLAVOR; - } + static x86_float_state64_t null_fpstate; + + switch (flavor) { + case x86_THREAD_STATE64: + dprintf(("kdp_readregs THREAD_STATE64\n")); + kdp_getstate((x86_thread_state64_t *)data); + *size = sizeof(x86_thread_state64_t); + return KDPERR_NO_ERROR; + + case x86_FLOAT_STATE64: + dprintf(("kdp_readregs THREAD_FPSTATE64\n")); + *(x86_float_state64_t *)data = null_fpstate; + *size = sizeof(x86_float_state64_t); + return KDPERR_NO_ERROR; + + default: + dprintf(("kdp_readregs bad flavor %d\n", flavor)); + *size = 0; + return KDPERR_BADFLAVOR; + } } kdp_error_t kdp_machine_write_regs( - __unused unsigned int cpu, - unsigned int flavor, - char *data, - __unused int *size -) + __unused unsigned int cpu, + unsigned int flavor, + char *data, + __unused int *size + ) { - switch (flavor) { - - case x86_THREAD_STATE64: - dprintf(("kdp_writeregs THREAD_STATE64\n")); - kdp_setstate((x86_thread_state64_t *)data); - return KDPERR_NO_ERROR; - - case x86_FLOAT_STATE64: - dprintf(("kdp_writeregs THREAD_FPSTATE64\n")); - return KDPERR_NO_ERROR; - - default: - dprintf(("kdp_writeregs bad flavor %d\n", flavor)); - return KDPERR_BADFLAVOR; - } + switch (flavor) { + case x86_THREAD_STATE64: + dprintf(("kdp_writeregs THREAD_STATE64\n")); + kdp_setstate((x86_thread_state64_t *)data); + return KDPERR_NO_ERROR; + + case x86_FLOAT_STATE64: + dprintf(("kdp_writeregs THREAD_FPSTATE64\n")); + return KDPERR_NO_ERROR; + + default: + dprintf(("kdp_writeregs bad flavor %d\n", flavor)); + return KDPERR_BADFLAVOR; + } } void kdp_machine_hostinfo( - kdp_hostinfo_t *hostinfo -) + kdp_hostinfo_t *hostinfo + ) { - int i; + int i; - hostinfo->cpus_mask = 0; + hostinfo->cpus_mask = 0; - for (i = 0; i < machine_info.max_cpus; i++) { - if (cpu_data_ptr[i] == NULL) - continue; - - hostinfo->cpus_mask |= (1 << i); - } + for (i = 0; i < machine_info.max_cpus; i++) { + if (cpu_data_ptr[i] == NULL) { + continue; + } - hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64; - hostinfo->cpu_subtype = cpuid_cpusubtype(); + hostinfo->cpus_mask |= (1 << i); + } + + hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64; + hostinfo->cpu_subtype = cpuid_cpusubtype(); } void kdp_panic( - const char *msg -) + const char *msg + ) { - kprintf("kdp panic: %s\n", msg); - __asm__ volatile("hlt"); + kprintf("kdp panic: %s\n", msg); + __asm__ volatile ("hlt"); } int kdp_intr_disbl(void) { - return splhigh(); + return splhigh(); } void @@ -296,182 +296,160 @@ kdp_intr_enbl(int s) int kdp_getc(void) { - return cnmaygetc(); + return cnmaygetc(); } void kdp_us_spin(int usec) { - delay(usec/100); + delay(usec / 100); } -void print_saved_state(void *state) +void +print_saved_state(void *state) { - x86_saved_state64_t *saved_state; + x86_saved_state64_t *saved_state; - saved_state = state; + saved_state = state; kprintf("pc = 0x%llx\n", saved_state->isf.rip); kprintf("cr2= 0x%llx\n", saved_state->cr2); kprintf("rp = TODO FIXME\n"); kprintf("sp = %p\n", saved_state); - } void kdp_sync_cache(void) { - return; /* No op here. */ + return; /* No op here. */ } void kdp_call(void) { - __asm__ volatile ("int $3"); /* Let the processor do the work */ + __asm__ volatile ("int $3"); /* Let the processor do the work */ } typedef struct _cframe_t { - struct _cframe_t *prev; - unsigned caller; - unsigned args[0]; + struct _cframe_t *prev; + unsigned caller; + unsigned args[0]; } cframe_t; -extern pt_entry_t *DMAP2; -extern caddr_t DADDR2; - -void -kdp_print_phys(int src) +boolean_t +kdp_i386_trap( + unsigned int trapno, + x86_saved_state64_t *saved_state, + kern_return_t result, + vm_offset_t va + ) { - unsigned int *iptr; - int i; - - *(int *) DMAP2 = 0x63 | (src & 0xfffff000); - invlpg((uintptr_t) DADDR2); - iptr = (unsigned int *) DADDR2; - for (i = 0; i < 100; i++) { - kprintf("0x%x ", *iptr++); - if ((i % 8) == 0) - kprintf("\n"); + unsigned int exception, code, subcode = 0; + boolean_t prev_interrupts_state; + + if (trapno != T_INT3 && trapno != T_DEBUG) { + kprintf("Debugger: Unexpected kernel trap number: " + "0x%x, RIP: 0x%llx, CR2: 0x%llx\n", + trapno, saved_state->isf.rip, saved_state->cr2); + if (!kdp.is_conn) { + return FALSE; + } } - kprintf("\n"); - *(int *) DMAP2 = 0; -} + prev_interrupts_state = ml_set_interrupts_enabled(FALSE); + disable_preemption(); -boolean_t -kdp_i386_trap( - unsigned int trapno, - x86_saved_state64_t *saved_state, - kern_return_t result, - vm_offset_t va -) -{ - unsigned int exception, code, subcode = 0; - boolean_t prev_interrupts_state; - - if (trapno != T_INT3 && trapno != T_DEBUG) { - kprintf("Debugger: Unexpected kernel trap number: " - "0x%x, RIP: 0x%llx, CR2: 0x%llx\n", - trapno, saved_state->isf.rip, saved_state->cr2); - if (!kdp.is_conn) - return FALSE; - } - - prev_interrupts_state = ml_set_interrupts_enabled(FALSE); - disable_preemption(); - - if (saved_state->isf.rflags & EFL_TF) { - enable_preemption_no_check(); - } - - switch (trapno) { - - case T_DIVIDE_ERROR: - exception = EXC_ARITHMETIC; - code = EXC_I386_DIVERR; - break; - - case T_OVERFLOW: - exception = EXC_SOFTWARE; - code = EXC_I386_INTOFLT; - break; - - case T_OUT_OF_BOUNDS: - exception = EXC_ARITHMETIC; - code = EXC_I386_BOUNDFLT; - break; - - case T_INVALID_OPCODE: - exception = EXC_BAD_INSTRUCTION; - code = EXC_I386_INVOPFLT; - break; - - case T_SEGMENT_NOT_PRESENT: - exception = EXC_BAD_INSTRUCTION; - code = EXC_I386_SEGNPFLT; - subcode = (unsigned int)saved_state->isf.err; - break; - - case T_STACK_FAULT: - exception = EXC_BAD_INSTRUCTION; - code = EXC_I386_STKFLT; - subcode = (unsigned int)saved_state->isf.err; - break; - - case T_GENERAL_PROTECTION: - exception = EXC_BAD_INSTRUCTION; - code = EXC_I386_GPFLT; - subcode = (unsigned int)saved_state->isf.err; - break; - - case T_PAGE_FAULT: - exception = EXC_BAD_ACCESS; - code = result; - subcode = (unsigned int)va; - break; - - case T_WATCHPOINT: - exception = EXC_SOFTWARE; - code = EXC_I386_ALIGNFLT; - break; - - case T_DEBUG: - case T_INT3: - exception = EXC_BREAKPOINT; - code = EXC_I386_BPTFLT; - break; - - default: - exception = EXC_BAD_INSTRUCTION; - code = trapno; - break; - } - - if (current_cpu_datap()->cpu_fatal_trap_state) { - current_cpu_datap()->cpu_post_fatal_trap_state = saved_state; - saved_state = current_cpu_datap()->cpu_fatal_trap_state; - } - - handle_debugger_trap(exception, code, subcode, saved_state); - - enable_preemption(); - ml_set_interrupts_enabled(prev_interrupts_state); - - /* If the instruction single step bit is set, disable kernel preemption - */ - if (saved_state->isf.rflags & EFL_TF) { - disable_preemption(); - } - - return TRUE; + if (saved_state->isf.rflags & EFL_TF) { + enable_preemption_no_check(); + } + + switch (trapno) { + case T_DIVIDE_ERROR: + exception = EXC_ARITHMETIC; + code = EXC_I386_DIVERR; + break; + + case T_OVERFLOW: + exception = EXC_SOFTWARE; + code = EXC_I386_INTOFLT; + break; + + case T_OUT_OF_BOUNDS: + exception = EXC_ARITHMETIC; + code = EXC_I386_BOUNDFLT; + break; + + case T_INVALID_OPCODE: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_INVOPFLT; + break; + + case T_SEGMENT_NOT_PRESENT: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_SEGNPFLT; + subcode = (unsigned int)saved_state->isf.err; + break; + + case T_STACK_FAULT: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_STKFLT; + subcode = (unsigned int)saved_state->isf.err; + break; + + case T_GENERAL_PROTECTION: + exception = EXC_BAD_INSTRUCTION; + code = EXC_I386_GPFLT; + subcode = (unsigned int)saved_state->isf.err; + break; + + case T_PAGE_FAULT: + exception = EXC_BAD_ACCESS; + code = result; + subcode = (unsigned int)va; + break; + + case T_WATCHPOINT: + exception = EXC_SOFTWARE; + code = EXC_I386_ALIGNFLT; + break; + + case T_DEBUG: + case T_INT3: + exception = EXC_BREAKPOINT; + code = EXC_I386_BPTFLT; + break; + + default: + exception = EXC_BAD_INSTRUCTION; + code = trapno; + break; + } + + if (current_cpu_datap()->cpu_fatal_trap_state) { + current_cpu_datap()->cpu_post_fatal_trap_state = saved_state; + saved_state = current_cpu_datap()->cpu_fatal_trap_state; + } + + handle_debugger_trap(exception, code, subcode, saved_state); + + enable_preemption(); + ml_set_interrupts_enabled(prev_interrupts_state); + + /* If the instruction single step bit is set, disable kernel preemption + */ + if (saved_state->isf.rflags & EFL_TF) { + disable_preemption(); + } + + return TRUE; } void kdp_machine_get_breakinsn( - uint8_t *bytes, - uint32_t *size -) + uint8_t *bytes, + uint32_t *size + ) { bytes[0] = 0xcc; *size = 1; @@ -481,12 +459,12 @@ kdp_machine_get_breakinsn( int machine_trace_thread(thread_t thread, - char * tracepos, - char * tracebound, - int nframes, - boolean_t user_p, - boolean_t trace_fp, - uint32_t * thread_trace_flags) + char * tracepos, + char * tracebound, + int nframes, + boolean_t user_p, + boolean_t trace_fp, + uint32_t * thread_trace_flags) { uint32_t * tracebuf = (uint32_t *)tracepos; uint32_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t); @@ -503,20 +481,19 @@ machine_trace_thread(thread_t thread, nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; if (user_p) { - x86_saved_state32_t *iss32; - + x86_saved_state32_t *iss32; + iss32 = USER_REGS32(thread); prev_eip = iss32->eip; stackptr = iss32->ebp; stacklimit = 0xffffffff; bt_vm_map = thread->task->map; - } - else + } else { panic("32-bit trace attempted on 64-bit kernel"); + } for (framecount = 0; framecount < nframes; framecount++) { - *tracebuf++ = prev_eip; if (trace_fp) { *tracebuf++ = stackptr; @@ -531,7 +508,7 @@ machine_trace_thread(thread_t thread, if (stackptr & 0x0000003) { break; } - + if (stackptr <= prevsp) { break; } @@ -550,7 +527,7 @@ machine_trace_thread(thread_t thread, } prev_eip = *(uint32_t *)kern_virt_addr; - + prevsp = stackptr; kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags); @@ -564,14 +541,14 @@ machine_trace_thread(thread_t thread, } } } - + machine_trace_thread_clear_validation_cache(); return (uint32_t) (((char *) tracebuf) - tracepos); } -#define RETURN_OFFSET64 8 +#define RETURN_OFFSET64 8 /* Routine to encapsulate the 64-bit address read hack*/ unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len) @@ -581,13 +558,13 @@ machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len) int machine_trace_thread64(thread_t thread, - char * tracepos, - char * tracebound, - int nframes, - boolean_t user_p, - boolean_t trace_fp, - uint32_t * thread_trace_flags, - uint64_t *sp) + char * tracepos, + char * tracebound, + int nframes, + boolean_t user_p, + boolean_t trace_fp, + uint32_t * thread_trace_flags, + uint64_t *sp) { uint64_t * tracebuf = (uint64_t *)tracepos; unsigned framesize = (trace_fp ? 2 : 1) * sizeof(addr64_t); @@ -603,16 +580,15 @@ machine_trace_thread64(thread_t thread, nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; if (user_p) { - x86_saved_state64_t *iss64; + x86_saved_state64_t *iss64; iss64 = USER_REGS64(thread); prev_rip = iss64->isf.rip; stackptr = iss64->rbp; bt_vm_map = thread->task->map; - if (sp && user_p) { - *sp = iss64->isf.rsp; - } - } - else { + if (sp && user_p) { + *sp = iss64->isf.rsp; + } + } else { stackptr = STACK_IKS(thread->kernel_stack)->k_rbp; prev_rip = STACK_IKS(thread->kernel_stack)->k_rip; prev_rip = VM_KERNEL_UNSLIDE(prev_rip); @@ -620,7 +596,6 @@ machine_trace_thread64(thread_t thread, } for (framecount = 0; framecount < nframes; framecount++) { - *tracebuf++ = prev_rip; if (trace_fp) { *tracebuf++ = stackptr; @@ -671,5 +646,5 @@ machine_trace_thread64(thread_t thread, void kdp_ml_enter_debugger(void) { - __asm__ __volatile__("int3"); + __asm__ __volatile__ ("int3"); } diff --git a/osfmk/kdp/ml/x86_64/kdp_vm.c b/osfmk/kdp/ml/x86_64/kdp_vm.c index 8f5403edf..8b102b787 100644 --- a/osfmk/kdp/ml/x86_64/kdp_vm.c +++ b/osfmk/kdp/ml/x86_64/kdp_vm.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -37,9 +37,9 @@ #include #include -int kdp_dump_trap(int type, x86_saved_state64_t *regs); +int kdp_dump_trap(int type, x86_saved_state64_t *regs); -static const x86_state_hdr_t thread_flavor_array [] = { +static const x86_state_hdr_t thread_flavor_array[] = { {x86_THREAD_STATE64, x86_THREAD_STATE64_COUNT} }; @@ -49,9 +49,10 @@ kern_collectth_state_size(uint64_t * tstate_count, uint64_t * ptstate_size) unsigned int i; uint64_t tstate_size = 0; - for (i = 0; i < sizeof(thread_flavor_array)/sizeof(thread_flavor_array[0]); i++) + for (i = 0; i < sizeof(thread_flavor_array) / sizeof(thread_flavor_array[0]); i++) { tstate_size += sizeof(x86_state_hdr_t) + (thread_flavor_array[i].count * sizeof(int)); + } *tstate_count = 1; *ptstate_size = sizeof(struct thread_command) + tstate_size; @@ -60,20 +61,21 @@ kern_collectth_state_size(uint64_t * tstate_count, uint64_t * ptstate_size) void kern_collectth_state(thread_t thread, void *buffer, uint64_t size, void ** iter) { - size_t hoffset; - uint64_t tstate_size, tstate_count; - unsigned int i; - struct thread_command *tc; - + size_t hoffset; + uint64_t tstate_size, tstate_count; + unsigned int i; + struct thread_command *tc; + *iter = NULL; /* * Fill in thread command structure. */ hoffset = 0; - - if (hoffset + sizeof(struct thread_command) > size) + + if (hoffset + sizeof(struct thread_command) > size) { return; + } kern_collectth_state_size(&tstate_count, &tstate_size); tc = (struct thread_command *) ((uintptr_t)buffer + hoffset); @@ -85,18 +87,19 @@ kern_collectth_state(thread_t thread, void *buffer, uint64_t size, void ** iter) * the appropriate thread state struct for each * thread state flavor. */ - for (i = 0; i < sizeof(thread_flavor_array)/sizeof(thread_flavor_array[0]); i++) { - - if (hoffset + sizeof(x86_state_hdr_t) > size) + for (i = 0; i < sizeof(thread_flavor_array) / sizeof(thread_flavor_array[0]); i++) { + if (hoffset + sizeof(x86_state_hdr_t) > size) { return; + } *(x86_state_hdr_t *)((uintptr_t)buffer + hoffset) = thread_flavor_array[i]; hoffset += sizeof(x86_state_hdr_t); - if (hoffset + thread_flavor_array[i].count*sizeof(int) > size) + if (hoffset + thread_flavor_array[i].count * sizeof(int) > size) { return; + } /* Locate and obtain the non-volatile register context * for this kernel thread. This should ideally be @@ -130,7 +133,7 @@ kern_collectth_state(thread_t thread, void *buffer, uint64_t size, void ** iter) tstate->cs = cpstate->isf.cs; tstate->fs = cpstate->fs; tstate->gs = cpstate->gs; - } else if ((kstack = thread->kernel_stack) != 0){ + } else if ((kstack = thread->kernel_stack) != 0) { struct x86_kernel_state *iks = STACK_IKS(kstack); tstate->rbx = iks->k_rbx; tstate->rsp = iks->k_rsp; @@ -144,10 +147,10 @@ kern_collectth_state(thread_t thread, void *buffer, uint64_t size, void ** iter) } else { void *tstate = (void *)((uintptr_t)buffer + hoffset); - bzero(tstate, thread_flavor_array[i].count*sizeof(int)); + bzero(tstate, thread_flavor_array[i].count * sizeof(int)); } - hoffset += thread_flavor_array[i].count*sizeof(int); + hoffset += thread_flavor_array[i].count * sizeof(int); } } @@ -158,10 +161,10 @@ kern_collectth_state(thread_t thread, void *buffer, uint64_t size, void ** iter) int kdp_dump_trap( int type, - __unused x86_saved_state64_t *saved_state) + __unused x86_saved_state64_t *saved_state) { - printf ("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type); - kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0)); + printf("An unexpected trap (type %d) occurred during the system dump, terminating.\n", type); + kdp_send_crashdump_pkt(KDP_EOF, NULL, 0, ((void *) 0)); abort_panic_transfer(); kdp_flag &= ~KDP_PANIC_DUMP_ENABLED; kdp_flag &= ~PANIC_CORE_ON_NMI; @@ -170,5 +173,5 @@ kdp_dump_trap( kdp_reset(); kdp_raise_exception(EXC_BAD_ACCESS, 0, 0, kdp.saved_state); - return( 0 ); + return 0; } diff --git a/osfmk/kdp/processor_core.c b/osfmk/kdp/processor_core.c index e1c40e141..6050ad502 100644 --- a/osfmk/kdp/processor_core.c +++ b/osfmk/kdp/processor_core.c @@ -37,8 +37,8 @@ #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING -#define roundup(x, y) ((((x) % (y)) == 0) ? \ - (x) : ((x) + ((y) - ((x) % (y))))) +#define roundup(x, y) ((((x) % (y)) == 0) ? \ + (x) : ((x) + ((y) - ((x) % (y))))) /* * The processor_core_context structure describes the current @@ -95,30 +95,36 @@ struct kern_coredump_core *kernel_helper = NULL; static struct kern_coredump_core * kern_register_coredump_helper_internal(int kern_coredump_config_vers, kern_coredump_callback_config *kc_callbacks, - void *refcon, const char *core_description, boolean_t xnu_callback, boolean_t is64bit, - uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) + void *refcon, const char *core_description, boolean_t xnu_callback, boolean_t is64bit, + uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) { struct kern_coredump_core *core_helper = NULL; kern_coredump_callback_config *core_callbacks = NULL; - if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) + if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) { return NULL; - if (kc_callbacks == NULL) - return NULL;; - if (core_description == NULL) + } + if (kc_callbacks == NULL) { + return NULL; + } + ; + if (core_description == NULL) { return NULL; + } if (kc_callbacks->kcc_coredump_get_summary == NULL || - kc_callbacks->kcc_coredump_save_segment_descriptions == NULL || - kc_callbacks->kcc_coredump_save_segment_data == NULL || - kc_callbacks->kcc_coredump_save_thread_state == NULL || - kc_callbacks->kcc_coredump_save_sw_vers == NULL) + kc_callbacks->kcc_coredump_save_segment_descriptions == NULL || + kc_callbacks->kcc_coredump_save_segment_data == NULL || + kc_callbacks->kcc_coredump_save_thread_state == NULL || + kc_callbacks->kcc_coredump_save_sw_vers == NULL) { return NULL; + } #if !defined(__LP64__) /* We don't support generating 64-bit cores on 32-bit platforms */ - if (is64bit) + if (is64bit) { return NULL; + } #endif core_helper = kalloc(sizeof(*core_helper)); @@ -161,15 +167,17 @@ kern_register_coredump_helper_internal(int kern_coredump_config_vers, kern_cored kern_return_t kern_register_coredump_helper(int kern_coredump_config_vers, kern_coredump_callback_config *kc_callbacks, - void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic, - cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) + void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic, + cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) { - if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) + if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) { return KERN_RESOURCE_SHORTAGE; + } if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, FALSE, - is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) + is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) { return KERN_INVALID_ARGUMENT; + } return KERN_SUCCESS; } @@ -184,8 +192,9 @@ kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks) #endif if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", TRUE, is64bit, - _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) + _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) { return KERN_FAILURE; + } return KERN_SUCCESS; } @@ -195,16 +204,17 @@ kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks) */ static int coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count, - uint64_t thread_count, uint64_t thread_state_size, - uint64_t misc_bytes_count, void *context) + uint64_t thread_count, uint64_t thread_state_size, + uint64_t misc_bytes_count, void *context) { processor_core_context *core_context = (processor_core_context *)context; uint32_t sizeofcmds = 0, numcmds = 0; int ret = 0; if (!core_segment_count || !core_byte_count || !thread_count || !thread_state_size - || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) + || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) { return KERN_INVALID_ARGUMENT; + } /* Initialize core_context */ core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count; @@ -217,15 +227,15 @@ coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count, #if defined(__LP64__) if (core_context->core_is64bit) { sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) + - (core_context->core_threads_remaining * core_context->core_thread_state_size) + - /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE); + (core_context->core_threads_remaining * core_context->core_thread_state_size) + + /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE); core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64); } else #endif /* defined(__LP64__) */ { sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) + - (core_context->core_threads_remaining * core_context->core_thread_state_size) + - /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE); + (core_context->core_threads_remaining * core_context->core_thread_state_size) + + /* TODO: LC_NOTE */ 0 + sizeof(struct ident_command) + KERN_COREDUMP_VERSIONSTRINGMAXSIZE); core_context->core_header_size = sizeofcmds + sizeof(struct mach_header); } @@ -234,7 +244,7 @@ coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count, core_context->core_cur_foffset = round_page(core_context->core_header_size); numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + /* TODO: LC_NOTE */ 0 + - 1 /* ident command */); + 1 /* ident command */); /* * Reset the zstream and other output context before writing any data out. We do this here @@ -259,7 +269,7 @@ coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count, ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n", - core_context->core_outvars, sizeof(core_header), &core_header, ret); + core_context->core_outvars, sizeof(core_header), &core_header, ret); return ret; } @@ -281,7 +291,7 @@ coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count, ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_summary() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n", - core_context->core_outvars, sizeof(core_header), &core_header, ret); + core_context->core_outvars, sizeof(core_header), &core_header, ret); return ret; } @@ -296,7 +306,7 @@ coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count, */ static int coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end, - void *context) + void *context) { processor_core_context *core_context = (processor_core_context *)context; int ret; @@ -304,13 +314,13 @@ coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end, if (seg_end <= seg_start) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n", - seg_start, seg_end, context, seg_start, seg_end); + seg_start, seg_end, context, seg_start, seg_end); return KERN_INVALID_ARGUMENT; } if (core_context->core_segments_remaining == 0) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n", - seg_start, seg_end, context, core_context->core_segment_count); + seg_start, seg_end, context, core_context->core_segment_count); return KERN_INVALID_ARGUMENT; } @@ -321,7 +331,7 @@ coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end, if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n", - seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count); + seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count); return KERN_NO_SPACE; } @@ -339,8 +349,8 @@ coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end, ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n", - seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining, - core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret); + seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining, + core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret); return ret; } @@ -352,13 +362,13 @@ coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end, if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n", - seg_start, seg_end, context, seg_start, seg_end); + seg_start, seg_end, context, seg_start, seg_end); return KERN_INVALID_ARGUMENT; } if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n", - seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count); + seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count); return KERN_NO_SPACE; } @@ -376,8 +386,8 @@ coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end, ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_segment_descriptions(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned error 0x%x\n", - seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining, - core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret); + seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining, + core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret); return ret; } @@ -405,20 +415,20 @@ coredump_save_thread_state(void *thread_state, void *context) if (tc->cmd != LC_THREAD) { kern_coredump_log(context, "coredump_save_thread_state(%p, %p) : found %d expected LC_THREAD (%d)\n", - thread_state, context, tc->cmd, LC_THREAD); + thread_state, context, tc->cmd, LC_THREAD); return KERN_INVALID_ARGUMENT; } if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) { kern_coredump_log(context, "coredump_save_thread_state(%p, %p) : ran out of space to save threads with %llu of %llu remaining\n", - thread_state, context, core_context->core_threads_remaining, core_context->core_thread_count); + thread_state, context, core_context->core_threads_remaining, core_context->core_thread_count); return KERN_NO_SPACE; } ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_thread_state(%p, %p) : failed to write thread data : kdp_core_output(%p, %llu, %p) returned 0x%x\n", - thread_state, context, core_context->core_outvars, core_context->core_thread_state_size, thread_state, ret); + thread_state, context, core_context->core_outvars, core_context->core_thread_state_size, thread_state, ret); return ret; } @@ -437,13 +447,13 @@ coredump_save_sw_vers(void *sw_vers, uint64_t length, void *context) if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) { kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : called with invalid length %llu\n", - sw_vers, length, context, length); + sw_vers, length, context, length); return KERN_INVALID_ARGUMENT; } if (core_context->core_cur_hoffset + sizeof(struct ident_command) + length > core_context->core_header_size) { kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : ran out of space to save data\n", - sw_vers, length, context); + sw_vers, length, context); return KERN_NO_SPACE; } @@ -452,14 +462,14 @@ coredump_save_sw_vers(void *sw_vers, uint64_t length, void *context) ret = kdp_core_output(core_context->core_outvars, sizeof(struct ident_command), (caddr_t)&ident); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : failed to write ident command : kdp_core_output(%p, %lu, %p) returned 0x%x\n", - sw_vers, length, context, core_context->core_outvars, sizeof(struct ident_command), &ident, ret); + sw_vers, length, context, core_context->core_outvars, sizeof(struct ident_command), &ident, ret); return ret; } ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)sw_vers); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : failed to write version string : kdp_core_output(%p, %llu, %p) returned 0x%x\n", - sw_vers, length, context, core_context->core_outvars, length, sw_vers, ret); + sw_vers, length, context, core_context->core_outvars, length, sw_vers, ret); return ret; } @@ -468,7 +478,7 @@ coredump_save_sw_vers(void *sw_vers, uint64_t length, void *context) ret = kdp_core_output(core_context->core_outvars, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length), NULL); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_sw_vers(%p, %llu, %p) : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n", - sw_vers, length, context, core_context->core_outvars, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length), ret); + sw_vers, length, context, core_context->core_outvars, (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length), ret); return ret; } } @@ -486,15 +496,15 @@ coredump_save_segment_data(void *seg_data, uint64_t length, void *context) if (length > core_context->core_segment_bytes_remaining) { kern_coredump_log(context, "coredump_save_segment_data(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", - seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining, - core_context->core_segment_bytes_remaining); + seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining, + core_context->core_segment_bytes_remaining); return KERN_INVALID_ARGUMENT; } ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data); if (ret != KERN_SUCCESS) { kern_coredump_log(context, "coredump_save_segment_data(%p, %llu, %p) : failed to write data (%llu bytes remaining) :%d\n", - seg_data, length, context, core_context->core_segment_bytes_remaining, ret); + seg_data, length, context, core_context->core_segment_bytes_remaining, ret); return ret; } @@ -528,7 +538,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor if (ret == KERN_NODE_DOWN) { kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n"); return KERN_SUCCESS; - } else if (ret != KERN_SUCCESS) { + } else if (ret != KERN_SUCCESS) { kern_coredump_log(&context, "(kern_coredump_routine) : coredump_init failed with %d\n", ret); return ret; } @@ -548,7 +558,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor /* Save the segment descriptions for the segments to be included */ ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions, - &context); + &context); if (ret != KERN_SUCCESS) { kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_descriptions failed with %d\n", ret); return ret; @@ -556,7 +566,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor if (context.core_segments_remaining != 0) { kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n", - context.core_segments_remaining, context.core_segment_count); + context.core_segments_remaining, context.core_segment_count); return KERN_FAILURE; } @@ -570,7 +580,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor if (context.core_thread_state_size) { char threadstatebuf[context.core_thread_state_size]; ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state, - &context); + &context); if (ret != KERN_SUCCESS) { kern_coredump_log(&context, "(kern_coredump_routine) : save_thread_state failed with %d\n", ret); return ret; @@ -579,7 +589,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor if (context.core_threads_remaining != 0) { kern_coredump_log(&context, "(kern_coredump_routine) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n", - context.core_threads_remaining, context.core_thread_count); + context.core_threads_remaining, context.core_thread_count); return KERN_FAILURE; } @@ -596,7 +606,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL); if (ret != KERN_SUCCESS) { kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n", - context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret); + context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret); return ret; } @@ -609,7 +619,7 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor if (context.core_segment_bytes_remaining != 0) { kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n", - context.core_segment_bytes_remaining, context.core_segment_byte_total); + context.core_segment_bytes_remaining, context.core_segment_byte_total); return KERN_FAILURE; } @@ -619,13 +629,13 @@ kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_cor ret = kdp_core_output(context.core_outvars, 0, NULL); if (ret != KERN_SUCCESS) { kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", - context.core_outvars, ret); + context.core_outvars, ret); return ret; } kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.", - current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count, - (context.core_thread_count * context.core_thread_state_size), context.core_file_length); + current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count, + (context.core_thread_count * context.core_thread_state_size), context.core_file_length); if (core_begin_offset) { /* If we're writing to disk (we have a begin offset, we need to update the header */ @@ -684,7 +694,7 @@ kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_ return KERN_FAILURE; } - cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset , &prev_core_length, &header_update_failed); + cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, &header_update_failed); if (cur_ret != KERN_SUCCESS) { // As long as we didn't fail while updating the header for the raw file, we should be able to try // to capture other corefiles. @@ -711,8 +721,8 @@ kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_ kern_return_t kern_register_coredump_helper(int kern_coredump_config_vers, kern_coredump_callback_config *kc_callbacks, void* refcon, - const char *core_description, boolean_t is64bit, uint32_t mh_magic, - cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) + const char *core_description, boolean_t is64bit, uint32_t mh_magic, + cpu_type_t cpu_type, cpu_subtype_t cpu_subtype) { #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype) return KERN_NOT_SUPPORTED; diff --git a/osfmk/kdp/processor_core.h b/osfmk/kdp/processor_core.h index d0fd89a60..46bf717d3 100644 --- a/osfmk/kdp/processor_core.h +++ b/osfmk/kdp/processor_core.h @@ -58,7 +58,7 @@ __BEGIN_DECLS * error). */ -void kern_coredump_log(void *context, const char *string, ...) __printflike(2,3); +void kern_coredump_log(void *context, const char *string, ...) __printflike(2, 3); /* * The core_save_summary callback is provided with the call to the kcc_coredump_get_summary @@ -75,8 +75,8 @@ void kern_coredump_log(void *context, const char *string, ...) __printflike(2,3) * context -- Passed to kcc_coredump_get_summary_routine */ typedef kern_return_t (*core_save_summary_cb)(uint64_t core_segment_count, uint64_t core_byte_count, - uint64_t thread_count, uint64_t thread_state_size, - uint64_t misc_bytes_count, void *context); + uint64_t thread_count, uint64_t thread_state_size, + uint64_t misc_bytes_count, void *context); /* * The core_save_segment_descriptions callback is provided with the call to the @@ -93,7 +93,7 @@ typedef kern_return_t (*core_save_summary_cb)(uint64_t core_segment_count, uint6 * context -- Passed to kcc_coredump_save_segment_descriptions routine */ typedef kern_return_t (*core_save_segment_descriptions_cb)(uint64_t seg_start, uint64_t seg_end, - void *context); + void *context); /* * The core_save_thread_state callback is provided with the call to the * kcc_coredump_save_thread_state routine that was registered. @@ -149,14 +149,14 @@ typedef kern_return_t (*core_save_segment_data_cb)(void *seg_data, uint64_t leng typedef kern_return_t (*core_save_misc_data_cb)(void *misc_data, uint64_t length, void *context); typedef struct { - kern_return_t (*kcc_coredump_init)(void *refcon, void *context); /* OPTIONAL -- return KERN_NODE_DOWN if the co-processor should be skipped */ - kern_return_t (*kcc_coredump_get_summary)(void *refcon, core_save_summary_cb callback, void *context); - kern_return_t (*kcc_coredump_save_segment_descriptions)(void *refcon, core_save_segment_descriptions_cb callback, void *context); - kern_return_t (*kcc_coredump_save_thread_state)(void *refcon, void *buf, core_save_thread_state_cb callback, void *context); - kern_return_t (*kcc_coredump_save_sw_vers)(void *refcon, core_save_sw_vers_cb callback, void *context); - kern_return_t (*kcc_coredump_save_segment_data)(void *refcon, core_save_segment_data_cb callback, void *context); - kern_return_t (*kcc_coredump_save_misc_data)(void *refcon, core_save_misc_data_cb callback, void *context); /* OPTIONAL */ - /* End of version 1 */ + kern_return_t (*kcc_coredump_init)(void *refcon, void *context); /* OPTIONAL -- return KERN_NODE_DOWN if the co-processor should be skipped */ + kern_return_t (*kcc_coredump_get_summary)(void *refcon, core_save_summary_cb callback, void *context); + kern_return_t (*kcc_coredump_save_segment_descriptions)(void *refcon, core_save_segment_descriptions_cb callback, void *context); + kern_return_t (*kcc_coredump_save_thread_state)(void *refcon, void *buf, core_save_thread_state_cb callback, void *context); + kern_return_t (*kcc_coredump_save_sw_vers)(void *refcon, core_save_sw_vers_cb callback, void *context); + kern_return_t (*kcc_coredump_save_segment_data)(void *refcon, core_save_segment_data_cb callback, void *context); + kern_return_t (*kcc_coredump_save_misc_data)(void *refcon, core_save_misc_data_cb callback, void *context); /* OPTIONAL */ + /* End of version 1 */ } kern_coredump_callback_config; #define KERN_COREDUMP_MAX_CORES MACH_CORE_FILEHEADER_MAXFILES @@ -170,7 +170,7 @@ typedef struct { * structure, a description of the core should be provided -- i.e.: AP */ kern_return_t kern_register_coredump_helper(int kern_coredump_config_vers, kern_coredump_callback_config *kc_callbacks, void *refcon, - const char *core_description, boolean_t is64bit, uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype); + const char *core_description, boolean_t is64bit, uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype); #if PRIVATE diff --git a/osfmk/kern/Makefile b/osfmk/kern/Makefile index bf4cc9197..c58d9a7f1 100644 --- a/osfmk/kern/Makefile +++ b/osfmk/kern/Makefile @@ -19,9 +19,11 @@ PRIVATE_DATAFILES = \ debug.h \ ecc.h \ block_hint.h \ + lock_stat.h \ monotonic.h \ arithmetic_128.h \ - turnstile.h + turnstile.h \ + remote_time.h EXPORT_FILES = \ affinity.h \ @@ -47,9 +49,11 @@ EXPORT_FILES = \ ledger.h \ lock.h \ locks.h \ + lock_group.h \ host.h \ mach_param.h \ macro_help.h \ + mpqueue.h \ page_decrypt.h \ pms.h \ policy_internal.h \ @@ -75,7 +79,6 @@ PRIVATE_EXPORT_FILES = \ mach_node_link.h \ copyout_shim.h - XNU_ONLY_EXPORTS = \ cpu_quiesce.h \ ipc_kobject.h \ diff --git a/osfmk/kern/affinity.c b/osfmk/kern/affinity.c index bcce9af9b..c04c05f30 100644 --- a/osfmk/kern/affinity.c +++ b/osfmk/kern/affinity.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,9 +37,9 @@ * shared by a task family, this controls affinity tag lookup and * allocation; it anchors all affinity sets in one namespace * - affinity set: - * anchors all threads with membership of this affinity set + * anchors all threads with membership of this affinity set * and which share an affinity tag in the owning namespace. - * + * * Locking: * - The task lock protects the creation of an affinity namespace. * - The affinity namespace mutex protects the inheritance of a namespace @@ -48,20 +48,20 @@ * - The thread mutex protects a thread's affinity set membership, but in * addition, the thread_lock is taken to write thread->affinity_set since this * field (representng the active affinity set) is read by the scheduler. - * + * * The lock ordering is: task lock, thread mutex, namespace mutex, thread lock. */ #if AFFINITY_DEBUG -#define DBG(x...) kprintf("DBG: " x) +#define DBG(x...) kprintf("DBG: " x) #else #define DBG(x...) #endif struct affinity_space { - lck_mtx_t aspc_lock; - uint32_t aspc_task_count; - queue_head_t aspc_affinities; + lck_mtx_t aspc_lock; + uint32_t aspc_task_count; + queue_head_t aspc_affinities; }; typedef struct affinity_space *affinity_space_t; @@ -85,23 +85,23 @@ static affinity_set_t affinity_set_remove(affinity_set_t aset, thread_t thread); * more important than pset affinity. */ #if CONFIG_EMBEDDED -boolean_t affinity_sets_enabled = FALSE; -int affinity_sets_mapping = 0; +boolean_t affinity_sets_enabled = FALSE; +int affinity_sets_mapping = 0; #else /* !CONFIG_EMBEDDED */ -boolean_t affinity_sets_enabled = TRUE; -int affinity_sets_mapping = 1; +boolean_t affinity_sets_enabled = TRUE; +int affinity_sets_mapping = 1; #endif /* !CONFIG_EMBEDDED */ boolean_t thread_affinity_is_supported(void) { - return (ml_get_max_affinity_sets() != 0); + return ml_get_max_affinity_sets() != 0; } /* - * thread_affinity_get() - * Return the affinity tag for a thread. + * thread_affinity_get() + * Return the affinity tag for a thread. * Called with the thread mutex held. */ uint32_t @@ -109,27 +109,28 @@ thread_affinity_get(thread_t thread) { uint32_t tag; - if (thread->affinity_set != NULL) + if (thread->affinity_set != NULL) { tag = thread->affinity_set->aset_tag; - else + } else { tag = THREAD_AFFINITY_TAG_NULL; + } return tag; } /* - * thread_affinity_set() + * thread_affinity_set() * Place a thread in an affinity set identified by a tag. * Called with thread referenced but not locked. */ kern_return_t thread_affinity_set(thread_t thread, uint32_t tag) { - affinity_set_t aset; - affinity_set_t empty_aset = NULL; - affinity_space_t aspc; - affinity_space_t new_aspc = NULL; + affinity_set_t aset; + affinity_set_t empty_aset = NULL; + affinity_space_t aspc; + affinity_space_t new_aspc = NULL; DBG("thread_affinity_set(%p,%u)\n", thread, tag); @@ -138,8 +139,9 @@ thread_affinity_set(thread_t thread, uint32_t tag) if (aspc == NULL) { task_unlock(thread->task); new_aspc = affinity_space_alloc(); - if (new_aspc == NULL) + if (new_aspc == NULL) { return KERN_RESOURCE_SHORTAGE; + } task_lock(thread->task); if (thread->task->affinity_space == NULL) { thread->task->affinity_space = new_aspc; @@ -148,8 +150,9 @@ thread_affinity_set(thread_t thread, uint32_t tag) aspc = thread->task->affinity_space; } task_unlock(thread->task); - if (new_aspc) + if (new_aspc) { affinity_space_free(new_aspc); + } thread_mtx_lock(thread); if (!thread->active) { @@ -165,7 +168,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) * Remove thread from current affinity set */ DBG("thread_affinity_set(%p,%u) removing from aset %p\n", - thread, tag, aset); + thread, tag, aset); empty_aset = affinity_set_remove(aset, thread); } @@ -176,7 +179,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) * Add thread to existing affinity set */ DBG("thread_affinity_set(%p,%u) found aset %p\n", - thread, tag, aset); + thread, tag, aset); } else { /* * Use the new affinity set, add this thread @@ -194,7 +197,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) } } DBG("thread_affinity_set(%p,%u) (re-)using aset %p\n", - thread, tag, aset); + thread, tag, aset); aset->aset_tag = tag; affinity_set_place(aspc, aset); } @@ -208,11 +211,13 @@ thread_affinity_set(thread_t thread, uint32_t tag) * If we wound up not using an empty aset we created, * free it here. */ - if (empty_aset != NULL) + if (empty_aset != NULL) { affinity_set_free(empty_aset); + } - if (thread == current_thread()) - thread_block(THREAD_CONTINUE_NULL); + if (thread == current_thread()) { + thread_block(THREAD_CONTINUE_NULL); + } return KERN_SUCCESS; } @@ -224,7 +229,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) void task_affinity_create(task_t parent_task, task_t child_task) { - affinity_space_t aspc = parent_task->affinity_space; + affinity_space_t aspc = parent_task->affinity_space; DBG("task_affinity_create(%p,%p)\n", parent_task, child_task); @@ -245,12 +250,12 @@ task_affinity_create(task_t parent_task, task_t child_task) * Called from task_deallocate() when there's a namespace to dereference. */ void -task_affinity_deallocate(task_t task) +task_affinity_deallocate(task_t task) { - affinity_space_t aspc = task->affinity_space; + affinity_space_t aspc = task->affinity_space; DBG("task_affinity_deallocate(%p) aspc %p task_count %d\n", - task, aspc, aspc->aspc_task_count); + task, aspc, aspc->aspc_task_count); lck_mtx_lock(&aspc->aspc_lock); if (--(aspc->aspc_task_count) == 0) { @@ -270,13 +275,13 @@ task_affinity_deallocate(task_t task) */ kern_return_t task_affinity_info( - task_t task, - task_info_t task_info_out, - mach_msg_type_number_t *task_info_count) + task_t task, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count) { - affinity_set_t aset; - affinity_space_t aspc; - task_affinity_tag_info_t info; + affinity_set_t aset; + affinity_space_t aspc; + task_affinity_tag_info_t info; *task_info_count = TASK_AFFINITY_TAG_INFO_COUNT; info = (task_affinity_tag_info_t) task_info_out; @@ -289,14 +294,16 @@ task_affinity_info( if (aspc) { lck_mtx_lock(&aspc->aspc_lock); queue_iterate(&aspc->aspc_affinities, - aset, affinity_set_t, aset_affinities) { + aset, affinity_set_t, aset_affinities) { info->set_count++; if (info->min == THREAD_AFFINITY_TAG_NULL || - aset->aset_tag < (uint32_t) info->min) + aset->aset_tag < (uint32_t) info->min) { info->min = aset->aset_tag; + } if (info->max == THREAD_AFFINITY_TAG_NULL || - aset->aset_tag > (uint32_t) info->max) + aset->aset_tag > (uint32_t) info->max) { info->max = aset->aset_tag; + } } info->task_count = aspc->aspc_task_count; lck_mtx_unlock(&aspc->aspc_lock); @@ -312,8 +319,8 @@ task_affinity_info( void thread_affinity_dup(thread_t parent, thread_t child) { - affinity_set_t aset; - affinity_space_t aspc; + affinity_set_t aset; + affinity_space_t aspc; thread_mtx_lock(parent); aset = parent->affinity_set; @@ -335,15 +342,15 @@ thread_affinity_dup(thread_t parent, thread_t child) } /* - * thread_affinity_terminate() + * thread_affinity_terminate() * Remove thread from any affinity set. * Called with the thread mutex locked. */ void thread_affinity_terminate(thread_t thread) { - affinity_set_t aset = thread->affinity_set; - affinity_space_t aspc; + affinity_set_t aset = thread->affinity_set; + affinity_space_t aspc; DBG("thread_affinity_terminate(%p)\n", thread); @@ -363,21 +370,23 @@ thread_affinity_terminate(thread_t thread) void thread_affinity_exec(thread_t thread) { - if (thread->affinity_set != AFFINITY_SET_NULL) + if (thread->affinity_set != AFFINITY_SET_NULL) { thread_affinity_terminate(thread); + } } /* * Create an empty affinity namespace data structure. */ static affinity_space_t -affinity_space_alloc(void) +affinity_space_alloc(void) { - affinity_space_t aspc; + affinity_space_t aspc; aspc = (affinity_space_t) kalloc(sizeof(struct affinity_space)); - if (aspc == NULL) + if (aspc == NULL) { return NULL; + } lck_mtx_init(&aspc->aspc_lock, &task_lck_grp, &task_lck_attr); queue_init(&aspc->aspc_affinities); @@ -406,13 +415,14 @@ affinity_space_free(affinity_space_t aspc) * entering it into a list anchored by the owning task. */ static affinity_set_t -affinity_set_alloc(void) +affinity_set_alloc(void) { - affinity_set_t aset; + affinity_set_t aset; aset = (affinity_set_t) kalloc(sizeof(struct affinity_set)); - if (aset == NULL) + if (aset == NULL) { return NULL; + } aset->aset_thread_count = 0; queue_init(&aset->aset_affinities); @@ -445,11 +455,11 @@ affinity_set_free(affinity_set_t aset) static void affinity_set_add(affinity_set_t aset, thread_t thread) { - spl_t s; + spl_t s; DBG("affinity_set_add(%p,%p)\n", aset, thread); queue_enter(&aset->aset_threads, - thread, thread_t, affinity_threads); + thread, thread_t, affinity_threads); aset->aset_thread_count++; s = splsched(); thread_lock(thread); @@ -465,7 +475,7 @@ affinity_set_add(affinity_set_t aset, thread_t thread) static affinity_set_t affinity_set_remove(affinity_set_t aset, thread_t thread) { - spl_t s; + spl_t s; s = splsched(); thread_lock(thread); @@ -475,10 +485,10 @@ affinity_set_remove(affinity_set_t aset, thread_t thread) aset->aset_thread_count--; queue_remove(&aset->aset_threads, - thread, thread_t, affinity_threads); + thread, thread_t, affinity_threads); if (queue_empty(&aset->aset_threads)) { queue_remove(&aset->aset_space->aspc_affinities, - aset, affinity_set_t, aset_affinities); + aset, affinity_set_t, aset_affinities); assert(aset->aset_thread_count == 0); aset->aset_tag = THREAD_AFFINITY_TAG_NULL; aset->aset_num = 0; @@ -499,13 +509,13 @@ affinity_set_remove(affinity_set_t aset, thread_t thread) static affinity_set_t affinity_set_find(affinity_space_t space, uint32_t tag) { - affinity_set_t aset; + affinity_set_t aset; queue_iterate(&space->aspc_affinities, - aset, affinity_set_t, aset_affinities) { + aset, affinity_set_t, aset_affinities) { if (aset->aset_tag == tag) { DBG("affinity_set_find(%p,%u) finds %p\n", - space, tag, aset); + space, tag, aset); return aset; } } @@ -523,25 +533,27 @@ affinity_set_find(affinity_space_t space, uint32_t tag) static void affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) { - unsigned int num_cpu_asets = ml_get_max_affinity_sets(); - unsigned int set_occupancy[num_cpu_asets]; - unsigned int i; - unsigned int i_least_occupied; - affinity_set_t aset; + unsigned int num_cpu_asets = ml_get_max_affinity_sets(); + unsigned int set_occupancy[num_cpu_asets]; + unsigned int i; + unsigned int i_least_occupied; + affinity_set_t aset; - for (i = 0; i < num_cpu_asets; i++) + for (i = 0; i < num_cpu_asets; i++) { set_occupancy[i] = 0; + } /* * Scan the affinity sets calculating the number of sets * occupy the available physical affinities. */ queue_iterate(&aspc->aspc_affinities, - aset, affinity_set_t, aset_affinities) { - if(aset->aset_num < num_cpu_asets) + aset, affinity_set_t, aset_affinities) { + if (aset->aset_num < num_cpu_asets) { set_occupancy[aset->aset_num]++; - else + } else { panic("aset_num = %d in %s\n", aset->aset_num, __FUNCTION__); + } } /* @@ -551,18 +563,20 @@ affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) * [(unsigned int)aspc % 127] % num_cpu_asets * unless this mapping policy is overridden. */ - if (affinity_sets_mapping == 0) + if (affinity_sets_mapping == 0) { i_least_occupied = 0; - else + } else { i_least_occupied = (unsigned int)(((uintptr_t)aspc % 127) % num_cpu_asets); + } for (i = 0; i < num_cpu_asets; i++) { - unsigned int j = (i_least_occupied + i) % num_cpu_asets; + unsigned int j = (i_least_occupied + i) % num_cpu_asets; if (set_occupancy[j] == 0) { i_least_occupied = j; break; } - if (set_occupancy[j] < set_occupancy[i_least_occupied]) + if (set_occupancy[j] < set_occupancy[i_least_occupied]) { i_least_occupied = j; + } } new_aset->aset_num = i_least_occupied; new_aset->aset_pset = ml_affinity_to_pset(i_least_occupied); @@ -570,7 +584,7 @@ affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) /* Add the new affinity set to the group */ new_aset->aset_space = aspc; queue_enter(&aspc->aspc_affinities, - new_aset, affinity_set_t, aset_affinities); + new_aset, affinity_set_t, aset_affinities); DBG("affinity_set_place(%p,%p) selected affinity %u pset %p\n", aspc, new_aset, new_aset->aset_num, new_aset->aset_pset); diff --git a/osfmk/kern/affinity.h b/osfmk/kern/affinity.h index 98f46cae7..6c1cb2d47 100644 --- a/osfmk/kern/affinity.h +++ b/osfmk/kern/affinity.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#ifndef _KERN_AFFINITY_H_ -#define _KERN_AFFINITY_H_ +#ifndef _KERN_AFFINITY_H_ +#define _KERN_AFFINITY_H_ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -44,34 +44,34 @@ * Affinity sets are used to advise (hint) thread placement. */ struct affinity_set { - struct affinity_space *aset_space; /* namespace */ - queue_chain_t aset_affinities; /* links affinities in group */ - queue_head_t aset_threads; /* threads in affinity set */ - uint32_t aset_thread_count; /* num threads in set */ - uint32_t aset_tag; /* user-assigned tag */ - uint32_t aset_num; /* kernel-assigned affinity */ - processor_set_t aset_pset; /* processor set */ + struct affinity_space *aset_space; /* namespace */ + queue_chain_t aset_affinities; /* links affinities in group */ + queue_head_t aset_threads; /* threads in affinity set */ + uint32_t aset_thread_count; /* num threads in set */ + uint32_t aset_tag; /* user-assigned tag */ + uint32_t aset_num; /* kernel-assigned affinity */ + processor_set_t aset_pset; /* processor set */ }; -extern boolean_t thread_affinity_is_supported(void); -extern void thread_affinity_dup(thread_t parent, thread_t child); -extern void thread_affinity_terminate(thread_t thread); -extern void task_affinity_create( - task_t, - task_t); -extern void task_affinity_deallocate( - task_t); -extern kern_return_t task_affinity_info( - task_t, - task_info_t, - mach_msg_type_number_t *); +extern boolean_t thread_affinity_is_supported(void); +extern void thread_affinity_dup(thread_t parent, thread_t child); +extern void thread_affinity_terminate(thread_t thread); +extern void task_affinity_create( + task_t, + task_t); +extern void task_affinity_deallocate( + task_t); +extern kern_return_t task_affinity_info( + task_t, + task_info_t, + mach_msg_type_number_t *); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -extern kern_return_t thread_affinity_set(thread_t thread, uint32_t tag); -extern uint32_t thread_affinity_get(thread_t thread); -extern void thread_affinity_exec(thread_t thread); +extern kern_return_t thread_affinity_set(thread_t thread, uint32_t tag); +extern uint32_t thread_affinity_get(thread_t thread); +extern void thread_affinity_exec(thread_t thread); -#endif /* _KERN_AFFINITY_H_ */ +#endif /* _KERN_AFFINITY_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/arithmetic_128.h b/osfmk/kern/arithmetic_128.h index 8cff16692..550e5546a 100644 --- a/osfmk/kern/arithmetic_128.h +++ b/osfmk/kern/arithmetic_128.h @@ -2,14 +2,14 @@ * Copyright (c) 1999, 2003, 2006, 2007, 2010 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,15 +17,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ /* * Code duplicated from Libc/gen/nanosleep.c */ -#ifndef _ARITHMETIC_128_H_ -#define _ARITHMETIC_128_H_ +#ifndef _ARITHMETIC_128_H_ +#define _ARITHMETIC_128_H_ #include @@ -42,52 +42,53 @@ multi_overflow(uint64_t a, uint64_t b) #else typedef struct { - uint64_t high; - uint64_t low; + uint64_t high; + uint64_t low; } uint128_data_t; /* 128-bit addition: acc += add */ static __inline void add128_128(uint128_data_t *acc, uint128_data_t *add) { - acc->high += add->high; - acc->low += add->low; - if(acc->low < add->low) - acc->high++; // carry + acc->high += add->high; + acc->low += add->low; + if (acc->low < add->low) { + acc->high++; // carry + } } /* 64x64 -> 128 bit multiplication */ static __inline void mul64x64(uint64_t x, uint64_t y, uint128_data_t *prod) { - uint128_data_t add; - /* - * Split the two 64-bit multiplicands into 32-bit parts: - * x => 2^32 * x1 + x2 - * y => 2^32 * y1 + y2 - */ - uint32_t x1 = (uint32_t)(x >> 32); - uint32_t x2 = (uint32_t)x; - uint32_t y1 = (uint32_t)(y >> 32); - uint32_t y2 = (uint32_t)y; - /* - * direct multiplication: - * x * y => 2^64 * (x1 * y1) + 2^32 (x1 * y2 + x2 * y1) + (x2 * y2) - * The first and last terms are direct assignmenet into the uint128_t - * structure. Then we add the middle two terms separately, to avoid - * 64-bit overflow. (We could use the Karatsuba algorithm to save - * one multiply, but it is harder to deal with 64-bit overflows.) - */ - prod->high = (uint64_t)x1 * (uint64_t)y1; - prod->low = (uint64_t)x2 * (uint64_t)y2; - add.low = (uint64_t)x1 * (uint64_t)y2; - add.high = (add.low >> 32); - add.low <<= 32; - add128_128(prod, &add); - add.low = (uint64_t)x2 * (uint64_t)y1; - add.high = (add.low >> 32); - add.low <<= 32; - add128_128(prod, &add); + uint128_data_t add; + /* + * Split the two 64-bit multiplicands into 32-bit parts: + * x => 2^32 * x1 + x2 + * y => 2^32 * y1 + y2 + */ + uint32_t x1 = (uint32_t)(x >> 32); + uint32_t x2 = (uint32_t)x; + uint32_t y1 = (uint32_t)(y >> 32); + uint32_t y2 = (uint32_t)y; + /* + * direct multiplication: + * x * y => 2^64 * (x1 * y1) + 2^32 (x1 * y2 + x2 * y1) + (x2 * y2) + * The first and last terms are direct assignmenet into the uint128_t + * structure. Then we add the middle two terms separately, to avoid + * 64-bit overflow. (We could use the Karatsuba algorithm to save + * one multiply, but it is harder to deal with 64-bit overflows.) + */ + prod->high = (uint64_t)x1 * (uint64_t)y1; + prod->low = (uint64_t)x2 * (uint64_t)y2; + add.low = (uint64_t)x1 * (uint64_t)y2; + add.high = (add.low >> 32); + add.low <<= 32; + add128_128(prod, &add); + add.low = (uint64_t)x2 * (uint64_t)y1; + add.high = (add.low >> 32); + add.low <<= 32; + add128_128(prod, &add); } static __inline uint64_t @@ -99,4 +100,4 @@ multi_overflow(uint64_t a, uint64_t b) } #endif /* __LP64__ */ -#endif /* _ARITHMETIC_128_H_ */ +#endif /* _ARITHMETIC_128_H_ */ diff --git a/osfmk/kern/assert.h b/osfmk/kern/assert.h index f35eadc43..e6da6c5a3 100644 --- a/osfmk/kern/assert.h +++ b/osfmk/kern/assert.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,58 +22,58 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _KERN_ASSERT_H_ -#define _KERN_ASSERT_H_ +#ifndef _KERN_ASSERT_H_ +#define _KERN_ASSERT_H_ /* assert.h 4.2 85/01/21 */ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #endif __BEGIN_DECLS /* Assert error */ -extern void Assert( - const char *file, - int line, - const char *expression) __attribute__((noinline)); +extern void Assert( + const char *file, + int line, + const char *expression) __attribute__((noinline)); extern int kext_assertions_enable; @@ -82,10 +82,10 @@ extern int kext_assertions_enable; __END_DECLS #ifndef APPLE_KEXT_ASSERTIONS -#define APPLE_KEXT_ASSERTIONS 0 +#define APPLE_KEXT_ASSERTIONS 0 #endif -#if MACH_ASSERT +#if MACH_ASSERT #define assert(ex) \ (__builtin_expect(!!((ex)), 1L) ? (void)0 : Assert(__FILE__, __LINE__, # ex)) @@ -93,7 +93,7 @@ __END_DECLS (__builtin_expect(!!((ex)), 1L) ? (void)0 : __Panic("%s:%d Assertion failed: %s : " fmt, __FILE__, __LINE__, # ex, ##args)) #define __assert_only -#elif APPLE_KEXT_ASSERTIONS && !XNU_KERNEL_PRIVATE /* MACH_ASSERT */ +#elif APPLE_KEXT_ASSERTIONS && !XNU_KERNEL_PRIVATE /* MACH_ASSERT */ #define assert(ex) \ (__builtin_expect(!!(((!kext_assertions_enable) || (ex))), 1L) ? (void)0 : Assert(__FILE__, __LINE__, # ex)) @@ -101,13 +101,13 @@ __END_DECLS (__builtin_expect(!!(((!kext_assertions_enable) || (ex))), 1L) ? (void)0 : __Panic("%s:%d Assertion failed: %s : " fmt, __FILE__, __LINE__, # ex, ##args)) #define __assert_only -#else /* APPLE_KEXT_ASSERTIONS && !XNU_KERNEL_PRIVATE */ +#else /* APPLE_KEXT_ASSERTIONS && !XNU_KERNEL_PRIVATE */ #define assert(ex) ((void)0) #define assertf(ex, fmt, args...) ((void)0) #define __assert_only __unused -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ /* * static_assert is a C11 / C++0x / C++1z feature. @@ -127,19 +127,19 @@ __END_DECLS #endif #else #if !defined(__cpp_static_assert) - /* pre C++11 support */ +/* pre C++11 support */ #define _STATIC_ASSERT_OVERLOADED_MACRO(_1, _2, NAME, ...) NAME #define static_assert(...) _STATIC_ASSERT_OVERLOADED_MACRO(__VA_ARGS__, _static_assert_2_args, _static_assert_1_arg)(__VA_ARGS__) #define _static_assert_2_args(ex, str) _Static_assert((ex), str) #define _static_assert_1_arg(ex) _Static_assert((ex), #ex) #else - /* - * C++11 only supports the 2 argument version of static_assert. - * C++1z has added support for the 1 argument version. - */ +/* + * C++11 only supports the 2 argument version of static_assert. + * C++1z has added support for the 1 argument version. + */ #define _static_assert_1_arg(ex) static_assert((ex), #ex) #endif #endif -#endif /* _KERN_ASSERT_H_ */ +#endif /* _KERN_ASSERT_H_ */ diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c index a75d45dff..21fb3f554 100644 --- a/osfmk/kern/ast.c +++ b/osfmk/kern/ast.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -107,8 +107,9 @@ ast_taken_kernel(void) * It's possible for this to be called after AST_URGENT * has already been handled, due to races in enable_preemption */ - if (ast_peek(AST_URGENT) != AST_URGENT) + if (ast_peek(AST_URGENT) != AST_URGENT) { return; + } /* * Don't preempt if the thread is already preparing to block. @@ -239,7 +240,9 @@ ast_taken_user(void) if (reasons & AST_KEVENT) { thread_ast_clear(thread, AST_KEVENT); uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0); - if (bits) kevent_ast(thread, bits); + if (bits) { + kevent_ast(thread, bits); + } } #if CONFIG_TELEMETRY @@ -280,7 +283,7 @@ ast_taken_user(void) /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */ thread_lock(thread); - preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM)); + preemption_reasons = csw_check(thread, current_processor(), (preemption_reasons & AST_QUANTUM)); thread_unlock(thread); #if CONFIG_SCHED_SFI @@ -322,58 +325,6 @@ ast_taken_user(void) assert(thread->rwlock_count == 0); } -/* - * Handle preemption IPI or IPI in response to setting an AST flag - * Triggered by cause_ast_check - * Called at splsched - */ -void -ast_check(processor_t processor) -{ - if (processor->state != PROCESSOR_RUNNING && - processor->state != PROCESSOR_SHUTDOWN) - return; - - thread_t thread = processor->active_thread; - - assert(thread == current_thread()); - - thread_lock(thread); - - /* - * Propagate thread ast to processor. - * (handles IPI in response to setting AST flag) - */ - ast_propagate(thread); - - boolean_t needs_callout = false; - processor->current_pri = thread->sched_pri; - processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); - processor->current_recommended_pset_type = recommended_pset_type(thread); - perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread); - if (thread_class != processor->current_perfctl_class) { - /* We updated the perfctl class of this thread from another core. - * Since we dont do CLPC callouts from another core, do a callout - * here to let CLPC know that the currently running thread has a new - * class. - */ - needs_callout = true; - } - processor->current_perfctl_class = thread_class; - - ast_t preempt; - - if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) - ast_on(preempt); - - thread_unlock(thread); - - if (needs_callout) { - machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, - mach_approximate_time(), 0, thread); - } -} - /* * Set AST flags on current processor * Called at splsched @@ -456,5 +407,3 @@ ast_dtrace_on(void) { ast_on(AST_DTRACE); } - - diff --git a/osfmk/kern/ast.h b/osfmk/kern/ast.h index 1fd6916ab..23fe59218 100644 --- a/osfmk/kern/ast.h +++ b/osfmk/kern/ast.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -108,39 +108,39 @@ typedef uint32_t ast_t; * TODO: Split the context switch and return-to-user AST namespaces * NOTE: Some of these are exported as the 'reason' code in scheduler tracepoints */ -#define AST_PREEMPT 0x01 -#define AST_QUANTUM 0x02 -#define AST_URGENT 0x04 -#define AST_HANDOFF 0x08 -#define AST_YIELD 0x10 -#define AST_APC 0x20 /* migration APC hook */ -#define AST_LEDGER 0x40 -#define AST_BSD 0x80 -#define AST_KPERF 0x100 /* kernel profiling */ -#define AST_MACF 0x200 /* MACF user ret pending */ +#define AST_PREEMPT 0x01 +#define AST_QUANTUM 0x02 +#define AST_URGENT 0x04 +#define AST_HANDOFF 0x08 +#define AST_YIELD 0x10 +#define AST_APC 0x20 /* migration APC hook */ +#define AST_LEDGER 0x40 +#define AST_BSD 0x80 +#define AST_KPERF 0x100 /* kernel profiling */ +#define AST_MACF 0x200 /* MACF user ret pending */ /* 0x400, 0x800 unused */ -#define AST_GUARD 0x1000 -#define AST_TELEMETRY_USER 0x2000 /* telemetry sample requested on interrupt from userspace */ -#define AST_TELEMETRY_KERNEL 0x4000 /* telemetry sample requested on interrupt from kernel */ -#define AST_TELEMETRY_PMI 0x8000 /* telemetry sample requested on PMI */ -#define AST_SFI 0x10000 /* Evaluate if SFI wait is needed before return to userspace */ -#define AST_DTRACE 0x20000 -#define AST_TELEMETRY_IO 0x40000 /* telemetry sample requested for I/O */ -#define AST_KEVENT 0x80000 +#define AST_GUARD 0x1000 +#define AST_TELEMETRY_USER 0x2000 /* telemetry sample requested on interrupt from userspace */ +#define AST_TELEMETRY_KERNEL 0x4000 /* telemetry sample requested on interrupt from kernel */ +#define AST_TELEMETRY_PMI 0x8000 /* telemetry sample requested on PMI */ +#define AST_SFI 0x10000 /* Evaluate if SFI wait is needed before return to userspace */ +#define AST_DTRACE 0x20000 +#define AST_TELEMETRY_IO 0x40000 /* telemetry sample requested for I/O */ +#define AST_KEVENT 0x80000 #define AST_REBALANCE 0x100000 /* thread context switched due to rebalancing */ #define AST_UNQUIESCE 0x200000 /* catch unquiesced processor before returning to userspace */ -#define AST_NONE 0x00 -#define AST_ALL (~AST_NONE) +#define AST_NONE 0x00 +#define AST_ALL (~AST_NONE) -#define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF) -#define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT) +#define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF) +#define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT) #define AST_TELEMETRY_ALL (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL | \ - AST_TELEMETRY_PMI | AST_TELEMETRY_IO) + AST_TELEMETRY_PMI | AST_TELEMETRY_IO) /* Per-thread ASTs follow the thread at context-switch time. */ -#define AST_PER_THREAD (AST_APC | AST_BSD | AST_MACF | AST_LEDGER | AST_GUARD | AST_TELEMETRY_ALL | AST_KEVENT) +#define AST_PER_THREAD (AST_APC | AST_BSD | AST_MACF | AST_LEDGER | AST_GUARD | AST_TELEMETRY_ALL | AST_KEVENT) /* Handle AST_URGENT detected while in the kernel */ extern void ast_taken_kernel(void); diff --git a/osfmk/kern/audit_sessionport.c b/osfmk/kern/audit_sessionport.c index 9d3bd7ab9..d67c3edaf 100644 --- a/osfmk/kern/audit_sessionport.c +++ b/osfmk/kern/audit_sessionport.c @@ -39,14 +39,14 @@ * Description: Obtain a send right for given audit session. * * Parameters: *aia_p Audit session information to assosiate with - * the new port. - * *sessionport Pointer to the current session port. This may - * actually be set to IPC_PORT_NULL. + * the new port. + * *sessionport Pointer to the current session port. This may + * actually be set to IPC_PORT_NULL. + * + * Returns: !NULL Resulting send right. + * NULL Failed to allocate port (due to lack of memory + * resources). * - * Returns: !NULL Resulting send right. - * NULL Failed to allocate port (due to lack of memory - * resources). - * Assumptions: Caller holds a reference on the session during the call. * If there were no outstanding send rights against the port, * hold a reference on the session and arm a new no-senders @@ -66,11 +66,13 @@ audit_session_mksend(struct auditinfo_addr *aia_p, ipc_port_t *sessionport) port = *sessionport; if (!IP_VALID(port)) { ipc_port_t new_port = ipc_port_alloc_kernel(); - if (!IP_VALID(new_port)) + if (!IP_VALID(new_port)) { return new_port; + } ipc_kobject_set(new_port, (ipc_kobject_t)aia_p, IKOT_AU_SESSIONPORT); - if (!OSCompareAndSwapPtr(port, new_port, sessionport)) + if (!OSCompareAndSwapPtr(port, new_port, sessionport)) { ipc_port_dealloc_kernel(new_port); + } port = *sessionport; } @@ -101,7 +103,7 @@ audit_session_mksend(struct auditinfo_addr *aia_p, ipc_port_t *sessionport) } } - return (sendport); + return sendport; } @@ -109,11 +111,11 @@ audit_session_mksend(struct auditinfo_addr *aia_p, ipc_port_t *sessionport) * audit_session_porttoaia * * Description: Obtain the audit session info associated with the given port. - + * * Parameters: port A Mach port. * * Returns: NULL The given Mach port did not reference audit - * session info. + * session info. * !NULL The audit session info that is associated with * the Mach port. * @@ -133,7 +135,7 @@ audit_session_porttoaia(ipc_port_t port) ip_unlock(port); } - return (aia_p); + return aia_p; } @@ -201,7 +203,7 @@ audit_session_portdestroy(ipc_port_t *sessionport) ipc_port_t port = *sessionport; if (IP_VALID(port)) { - assert (ip_active(port)); + assert(ip_active(port)); assert(IKOT_AU_SESSIONPORT == ip_kotype(port)); ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); ipc_port_dealloc_kernel(port); diff --git a/osfmk/kern/backtrace.c b/osfmk/kern/backtrace.c index 0588970f0..d99787543 100644 --- a/osfmk/kern/backtrace.c +++ b/osfmk/kern/backtrace.c @@ -57,7 +57,7 @@ backtrace(uintptr_t *bt, uint32_t max_frames) * inlined, it doesn't record the frame of the function it's inside (because * there's no stack frame). */ -uint32_t __attribute__((noinline,not_tail_called)) +uint32_t __attribute__((noinline, not_tail_called)) backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame) { thread_t thread = current_thread(); @@ -94,8 +94,7 @@ backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame) */ in_valid_stack = IN_STK_BOUNDS(next_fp); - if (next_fp == NULL || !in_valid_stack) - { + if (next_fp == NULL || !in_valid_stack) { break; } @@ -224,15 +223,15 @@ backtrace_interrupted(uintptr_t *bt, uint32_t max_frames) int backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out, - bool *user_64_out) + bool *user_64_out) { return backtrace_thread_user(current_thread(), bt, max_frames, frames_out, - user_64_out); + user_64_out); } int backtrace_thread_user(void *thread, uintptr_t *bt, uint32_t max_frames, - uint32_t *frames_out, bool *user_64_out) + uint32_t *frames_out, bool *user_64_out) { bool user_64; uintptr_t pc, fp, next_fp; diff --git a/osfmk/kern/backtrace.h b/osfmk/kern/backtrace.h index 246ca5a83..8bdafcddb 100644 --- a/osfmk/kern/backtrace.h +++ b/osfmk/kern/backtrace.h @@ -40,7 +40,7 @@ __BEGIN_DECLS * bt. Returns the number of return addresses stored. */ uint32_t backtrace(uintptr_t *bt, uint32_t max_frames) - __attribute__((noinline)); +__attribute__((noinline)); /* * Backtrace the current thread starting at the frame pointer start_fp, storing @@ -48,7 +48,7 @@ uint32_t backtrace(uintptr_t *bt, uint32_t max_frames) * addresses stored. */ uint32_t backtrace_frame(uintptr_t *bt, uint32_t max_frames, void *start_frame) - __attribute__((noinline,not_tail_called)); +__attribute__((noinline, not_tail_called)); /* * Backtrace the kernel stack of the context that was interrupted, storing up @@ -70,7 +70,7 @@ uint32_t backtrace_interrupted(uintptr_t *bt, uint32_t max_frames); * Must not be called from interrupt context or with interrupts disabled. */ int backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out, - bool *user_64_out); + bool *user_64_out); /* * Backtrace the user stack of the given thread, storing up to max_frames return @@ -82,7 +82,7 @@ int backtrace_user(uintptr_t *bt, uint32_t max_frames, uint32_t *frames_out, * Must not be called from interrupt context or with interrupts disabled. */ int backtrace_thread_user(void *thread, uintptr_t *bt, uint32_t max_frames, - uint32_t *frames_out, bool *user_64_out); + uint32_t *frames_out, bool *user_64_out); __END_DECLS diff --git a/osfmk/kern/bits.h b/osfmk/kern/bits.h index 13ce948d2..47db873e1 100644 --- a/osfmk/kern/bits.h +++ b/osfmk/kern/bits.h @@ -35,17 +35,17 @@ #include #include -typedef unsigned int uint; +typedef unsigned int uint; -#define BIT(b) (1ULL << (b)) +#define BIT(b) (1ULL << (b)) -#define mask(width) (width >= 64 ? -1 : (BIT(width) - 1)) -#define extract(x, shift, width) ((((uint64_t)(x)) >> (shift)) & mask(width)) -#define bits(x, hi, lo) extract((x), (lo), (hi) - (lo) + 1) +#define mask(width) (width >= 64 ? -1 : (BIT(width) - 1)) +#define extract(x, shift, width) ((((uint64_t)(x)) >> (shift)) & mask(width)) +#define bits(x, hi, lo) extract((x), (lo), (hi) - (lo) + 1) -#define bit_set(x, b) ((x) |= BIT(b)) -#define bit_clear(x, b) ((x) &= ~BIT(b)) -#define bit_test(x, b) ((bool)((x) & BIT(b))) +#define bit_set(x, b) ((x) |= BIT(b)) +#define bit_clear(x, b) ((x) &= ~BIT(b)) +#define bit_test(x, b) ((bool)((x) & BIT(b))) inline static uint64_t bit_ror64(uint64_t bitmap, uint n) @@ -53,11 +53,11 @@ bit_ror64(uint64_t bitmap, uint n) #if defined(__arm64__) uint64_t result; uint64_t _n = (uint64_t)n; - asm volatile("ror %0, %1, %2" : "=r" (result) : "r" (bitmap), "r" (_n)); + asm volatile ("ror %0, %1, %2" : "=r" (result) : "r" (bitmap), "r" (_n)); return result; #else n = n & 63; - return ((bitmap >> n) | (bitmap << (64 - n))); + return (bitmap >> n) | (bitmap << (64 - n)); #endif } @@ -68,7 +68,7 @@ bit_rol64(uint64_t bitmap, uint n) return bit_ror64(bitmap, 64U - n); #else n = n & 63; - return ((bitmap << n) | (bitmap >> (64 - n))); + return (bitmap << n) | (bitmap >> (64 - n)); #endif } @@ -76,18 +76,18 @@ bit_rol64(uint64_t bitmap, uint n) inline static bool bit_clear_if_set(uint64_t bitmap, int bit) { - bool bit_is_set = bit_test(bitmap, bit); - bit_clear(bitmap, bit); - return bit_is_set; + bool bit_is_set = bit_test(bitmap, bit); + bit_clear(bitmap, bit); + return bit_is_set; } /* Non-atomically set the bit and returns whether the bit value was changed */ inline static bool bit_set_if_clear(uint64_t bitmap, int bit) { - bool bit_is_set = bit_test(bitmap, bit); - bit_set(bitmap, bit); - return !bit_is_set; + bool bit_is_set = bit_test(bitmap, bit); + bit_set(bitmap, bit); + return !bit_is_set; } /* Returns the most significant '1' bit, or -1 if all zeros */ @@ -96,7 +96,7 @@ bit_first(uint64_t bitmap) { #if defined(__arm64__) int64_t result; - asm volatile("clz %0, %1" : "=r" (result) : "r" (bitmap)); + asm volatile ("clz %0, %1" : "=r" (result) : "r" (bitmap)); return 63 - (int)result; #else return (bitmap == 0) ? -1 : 63 - __builtin_clzll(bitmap); @@ -168,13 +168,13 @@ bit_ceiling(uint64_t n) } /* If n is a power of 2, bit_log2(n) == bit_floor(n) == bit_ceiling(n) */ -#define bit_log2(n) bit_floor((uint64_t)(n)) +#define bit_log2(n) bit_floor((uint64_t)(n)) -typedef _Atomic uint64_t bitmap_t; +typedef uint64_t bitmap_t; inline static bool -atomic_bit_set(bitmap_t *map, int n, int mem_order) +atomic_bit_set(_Atomic bitmap_t *map, int n, int mem_order) { bitmap_t prev; prev = __c11_atomic_fetch_or(map, BIT(n), mem_order); @@ -182,7 +182,7 @@ atomic_bit_set(bitmap_t *map, int n, int mem_order) } inline static bool -atomic_bit_clear(bitmap_t *map, int n, int mem_order) +atomic_bit_clear(_Atomic bitmap_t *map, int n, int mem_order) { bitmap_t prev; prev = __c11_atomic_fetch_and(map, ~BIT(n), mem_order); @@ -190,10 +190,10 @@ atomic_bit_clear(bitmap_t *map, int n, int mem_order) } -#define BITMAP_LEN(n) (((uint)(n) + 63) >> 6) /* Round to 64bit bitmap_t */ -#define BITMAP_SIZE(n) (size_t)(BITMAP_LEN(n) << 3) /* Round to 64bit bitmap_t, then convert to bytes */ -#define bitmap_bit(n) bits(n, 5, 0) -#define bitmap_index(n) bits(n, 63, 6) +#define BITMAP_LEN(n) (((uint)(n) + 63) >> 6) /* Round to 64bit bitmap_t */ +#define BITMAP_SIZE(n) (size_t)(BITMAP_LEN(n) << 3) /* Round to 64bit bitmap_t, then convert to bytes */ +#define bitmap_bit(n) bits(n, 5, 0) +#define bitmap_index(n) bits(n, 63, 6) inline static bitmap_t * bitmap_zero(bitmap_t *map, uint nbits) @@ -238,13 +238,13 @@ bitmap_clear(bitmap_t *map, uint n) } inline static bool -atomic_bitmap_set(bitmap_t *map, uint n, int mem_order) +atomic_bitmap_set(_Atomic bitmap_t *map, uint n, int mem_order) { return atomic_bit_set(&map[bitmap_index(n)], bitmap_bit(n), mem_order); } inline static bool -atomic_bitmap_clear(bitmap_t *map, uint n, int mem_order) +atomic_bitmap_clear(_Atomic bitmap_t *map, uint n, int mem_order) { return atomic_bit_clear(&map[bitmap_index(n)], bitmap_bit(n), mem_order); } diff --git a/osfmk/kern/block_hint.h b/osfmk/kern/block_hint.h index f379d0850..7f351fe98 100644 --- a/osfmk/kern/block_hint.h +++ b/osfmk/kern/block_hint.h @@ -26,11 +26,11 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KERN_BLOCK_HINT_H_ +#ifndef _KERN_BLOCK_HINT_H_ #define _KERN_BLOCK_HINT_H_ typedef enum thread_snapshot_wait_flags { - kThreadWaitNone = 0x00, + kThreadWaitNone = 0x00, kThreadWaitKernelMutex = 0x01, kThreadWaitPortReceive = 0x02, kThreadWaitPortSetReceive = 0x03, @@ -51,7 +51,7 @@ typedef enum thread_snapshot_wait_flags { } __attribute__((packed)) block_hint_t; _Static_assert(sizeof(block_hint_t) <= sizeof(short), - "block_hint_t must fit within a short"); + "block_hint_t must fit within a short"); #ifdef XNU_KERNEL_PRIVATE diff --git a/osfmk/kern/bsd_kern.c b/osfmk/kern/bsd_kern.c index b28d396c2..add2c1d51 100644 --- a/osfmk/kern/bsd_kern.c +++ b/osfmk/kern/bsd_kern.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -57,13 +57,13 @@ /* BSD KERN COMPONENT INTERFACE */ extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */ - + thread_t get_firstthread(task_t); int get_task_userstop(task_t); int get_thread_userstop(thread_t); boolean_t current_thread_aborted(void); -void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); -kern_return_t get_signalact(task_t , thread_t *, int); +void task_act_iterate_wth_args(task_t, void (*)(thread_t, void *), void *); +kern_return_t get_signalact(task_t, thread_t *, int); int fill_task_rusage(task_t task, rusage_info_current *ri); int fill_task_io_rusage(task_t task, rusage_info_current *ri); int fill_task_qos_rusage(task_t task, rusage_info_current *ri); @@ -83,12 +83,14 @@ extern void psignal(void *, int); /* * */ -void *get_bsdtask_info(task_t t) +void * +get_bsdtask_info(task_t t) { - return(t->bsd_info); + return t->bsd_info; } -void task_bsdtask_kill(task_t t) +void +task_bsdtask_kill(task_t t) { void * bsd_info = get_bsdtask_info(t); if (bsd_info != NULL) { @@ -98,34 +100,50 @@ void task_bsdtask_kill(task_t t) /* * */ -void *get_bsdthreadtask_info(thread_t th) +void * +get_bsdthreadtask_info(thread_t th) { - return(th->task != TASK_NULL ? th->task->bsd_info : NULL); + return th->task != TASK_NULL ? th->task->bsd_info : NULL; } /* * */ -void set_bsdtask_info(task_t t,void * v) +void +set_bsdtask_info(task_t t, void * v) { - t->bsd_info=v; + t->bsd_info = v; } /* * */ -void *get_bsdthread_info(thread_t th) +void * +get_bsdthread_info(thread_t th) +{ + return th->uthread; +} + +#if defined(__x86_64__) +/* + * Returns non-zero if the thread has a non-NULL task + * and that task has an LDT. + */ +int +thread_task_has_ldt(thread_t th) { - return(th->uthread); + return th->task && th->task->i386_ldt != 0; } +#endif /* __x86_64__ */ /* * XXX */ -int get_thread_lock_count(thread_t th); /* forced forward */ -int get_thread_lock_count(thread_t th) +int get_thread_lock_count(thread_t th); /* forced forward */ +int +get_thread_lock_count(thread_t th) { - return(th->mutex_count); + return th->mutex_count; } /* @@ -134,41 +152,44 @@ int get_thread_lock_count(thread_t th) * can't go away, so we make sure it is still active after * retrieving the first thread for extra safety. */ -thread_t get_firstthread(task_t task) +thread_t +get_firstthread(task_t task) { - thread_t thread = (thread_t)(void *)queue_first(&task->threads); + thread_t thread = (thread_t)(void *)queue_first(&task->threads); - if (queue_end(&task->threads, (queue_entry_t)thread)) + if (queue_end(&task->threads, (queue_entry_t)thread)) { thread = THREAD_NULL; + } - if (!task->active) - return (THREAD_NULL); + if (!task->active) { + return THREAD_NULL; + } - return (thread); + return thread; } kern_return_t get_signalact( - task_t task, - thread_t *result_out, - int setast) + task_t task, + thread_t *result_out, + int setast) { - kern_return_t result = KERN_SUCCESS; - thread_t inc, thread = THREAD_NULL; + kern_return_t result = KERN_SUCCESS; + thread_t inc, thread = THREAD_NULL; task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } for (inc = (thread_t)(void *)queue_first(&task->threads); - !queue_end(&task->threads, (queue_entry_t)inc); ) { + !queue_end(&task->threads, (queue_entry_t)inc);) { thread_mtx_lock(inc); if (inc->active && - (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) { + (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) { thread = inc; break; } @@ -177,48 +198,50 @@ get_signalact( inc = (thread_t)(void *)queue_next(&inc->task_threads); } - if (result_out) + if (result_out) { *result_out = thread; + } if (thread) { - if (setast) + if (setast) { act_set_astbsd(thread); + } thread_mtx_unlock(thread); - } - else + } else { result = KERN_FAILURE; + } task_unlock(task); - return (result); + return result; } kern_return_t check_actforsig( - task_t task, - thread_t thread, - int setast) + task_t task, + thread_t thread, + int setast) { - kern_return_t result = KERN_FAILURE; - thread_t inc; + kern_return_t result = KERN_FAILURE; + thread_t inc; task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } for (inc = (thread_t)(void *)queue_first(&task->threads); - !queue_end(&task->threads, (queue_entry_t)inc); ) { + !queue_end(&task->threads, (queue_entry_t)inc);) { if (inc == thread) { thread_mtx_lock(inc); - if (inc->active && - (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) { + if (inc->active && + (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) { result = KERN_SUCCESS; break; } @@ -231,20 +254,22 @@ check_actforsig( } if (result == KERN_SUCCESS) { - if (setast) + if (setast) { act_set_astbsd(thread); + } thread_mtx_unlock(thread); } task_unlock(task); - return (result); + return result; } -ledger_t get_task_ledger(task_t t) +ledger_t +get_task_ledger(task_t t) { - return(t->ledger); + return t->ledger; } /* @@ -253,17 +278,20 @@ ledger_t get_task_ledger(task_t t) * the map could be switched for the task (and freed) before * we go to return it here. */ -vm_map_t get_task_map(task_t t) +vm_map_t +get_task_map(task_t t) { - return(t->map); + return t->map; } -vm_map_t get_task_map_reference(task_t t) +vm_map_t +get_task_map_reference(task_t t) { vm_map_t m; - if (t == NULL) + if (t == NULL) { return VM_MAP_NULL; + } task_lock(t); if (!t->active) { @@ -279,40 +307,44 @@ vm_map_t get_task_map_reference(task_t t) /* * */ -ipc_space_t get_task_ipcspace(task_t t) +ipc_space_t +get_task_ipcspace(task_t t) { - return(t->itk_space); + return t->itk_space; } -int get_task_numactivethreads(task_t task) +int +get_task_numactivethreads(task_t task) { - thread_t inc; - int num_active_thr=0; + thread_t inc; + int num_active_thr = 0; task_lock(task); for (inc = (thread_t)(void *)queue_first(&task->threads); - !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads)) - { - if(inc->active) + !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)(void *)queue_next(&inc->task_threads)) { + if (inc->active) { num_active_thr++; + } } task_unlock(task); return num_active_thr; } -int get_task_numacts(task_t t) +int +get_task_numacts(task_t t) { - return(t->thread_count); + return t->thread_count; } /* does this machine need 64bit register set for signal handler */ -int is_64signalregset(void) +int +is_64signalregset(void) { if (task_has_64Bit_data(current_task())) { - return(1); + return 1; } - return(0); + return 0; } /* @@ -325,8 +357,9 @@ swap_task_map(task_t task, thread_t thread, vm_map_t map) vm_map_t old_map; boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE; - if (task != thread->task) + if (task != thread->task) { panic("swap_task_map"); + } task_lock(task); mp_disable_preemption(); @@ -336,16 +369,12 @@ swap_task_map(task_t task, thread_t thread, vm_map_t map) vm_commit_pagezero_status(map); if (doswitch) { -#if defined(__arm__) || defined(__arm64__) - PMAP_SWITCH_USER(thread, map, cpu_number()) -#else - pmap_switch(map->pmap); -#endif + PMAP_SWITCH_USER(thread, map, cpu_number()); } mp_enable_preemption(); task_unlock(task); -#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 +#if defined(__x86_64__) && NCOPY_WINDOWS > 0 inval_copy_windows(thread); #endif @@ -359,39 +388,44 @@ swap_task_map(task_t task, thread_t thread, vm_map_t map) * the map could be switched for the task (and freed) before * we go to return it here. */ -pmap_t get_task_pmap(task_t t) +pmap_t +get_task_pmap(task_t t) { - return(t->map->pmap); + return t->map->pmap; } /* * */ -uint64_t get_task_resident_size(task_t task) +uint64_t +get_task_resident_size(task_t task) { vm_map_t map; - + map = (task == kernel_task) ? kernel_map: task->map; - return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64); + return (uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64; } -uint64_t get_task_compressed(task_t task) +uint64_t +get_task_compressed(task_t task) { vm_map_t map; - + map = (task == kernel_task) ? kernel_map: task->map; - return((uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64); + return (uint64_t)pmap_compressed(map->pmap) * PAGE_SIZE_64; } -uint64_t get_task_resident_max(task_t task) +uint64_t +get_task_resident_max(task_t task) { vm_map_t map; - + map = (task == kernel_task) ? kernel_map: task->map; - return((uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64); + return (uint64_t)pmap_resident_max(map->pmap) * PAGE_SIZE_64; } -uint64_t get_task_purgeable_size(task_t task) +uint64_t +get_task_purgeable_size(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; @@ -417,14 +451,15 @@ uint64_t get_task_purgeable_size(task_t task) /* * */ -uint64_t get_task_phys_footprint(task_t task) +uint64_t +get_task_phys_footprint(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.phys_footprint, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; @@ -434,14 +469,15 @@ uint64_t get_task_phys_footprint(task_t task) /* * */ -uint64_t get_task_phys_footprint_interval_max(task_t task, int reset) +uint64_t +get_task_phys_footprint_interval_max(task_t task, int reset) { kern_return_t ret; ledger_amount_t max; ret = ledger_get_interval_max(task->ledger, task_ledgers.phys_footprint, &max, reset); - if(KERN_SUCCESS == ret) { + if (KERN_SUCCESS == ret) { return max; } @@ -452,14 +488,15 @@ uint64_t get_task_phys_footprint_interval_max(task_t task, int reset) /* * */ -uint64_t get_task_phys_footprint_lifetime_max(task_t task) +uint64_t +get_task_phys_footprint_lifetime_max(task_t task) { kern_return_t ret; ledger_amount_t max; ret = ledger_get_lifetime_max(task->ledger, task_ledgers.phys_footprint, &max); - if(KERN_SUCCESS == ret) { + if (KERN_SUCCESS == ret) { return max; } @@ -469,7 +506,8 @@ uint64_t get_task_phys_footprint_lifetime_max(task_t task) /* * */ -uint64_t get_task_phys_footprint_limit(task_t task) +uint64_t +get_task_phys_footprint_limit(task_t task) { kern_return_t ret; ledger_amount_t max; @@ -482,158 +520,170 @@ uint64_t get_task_phys_footprint_limit(task_t task) return 0; } -uint64_t get_task_internal(task_t task) +uint64_t +get_task_internal(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.internal, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_internal_compressed(task_t task) +uint64_t +get_task_internal_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.internal_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_purgeable_nonvolatile(task_t task) +uint64_t +get_task_purgeable_nonvolatile(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_purgeable_nonvolatile_compressed(task_t task) +uint64_t +get_task_purgeable_nonvolatile_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.purgeable_nonvolatile_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_alternate_accounting(task_t task) +uint64_t +get_task_alternate_accounting(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_alternate_accounting_compressed(task_t task) +uint64_t +get_task_alternate_accounting_compressed(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.alternate_accounting_compressed, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_page_table(task_t task) +uint64_t +get_task_page_table(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.page_table, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_iokit_mapped(task_t task) +uint64_t +get_task_iokit_mapped(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; ret = ledger_get_entries(task->ledger, task_ledgers.iokit_mapped, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; } -uint64_t get_task_network_nonvolatile(task_t task) +uint64_t +get_task_network_nonvolatile(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + kern_return_t ret; + ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile, &credit, &debit); - if (KERN_SUCCESS == ret) { - return (credit - debit); - } + ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile, &credit, &debit); + if (KERN_SUCCESS == ret) { + return credit - debit; + } - return 0; + return 0; } -uint64_t get_task_network_nonvolatile_compressed(task_t task) +uint64_t +get_task_network_nonvolatile_compressed(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + kern_return_t ret; + ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile_compressed, &credit, &debit); - if (KERN_SUCCESS == ret) { - return (credit - debit); - } + ret = ledger_get_entries(task->ledger, task_ledgers.network_nonvolatile_compressed, &credit, &debit); + if (KERN_SUCCESS == ret) { + return credit - debit; + } - return 0; + return 0; } -uint64_t get_task_wired_mem(task_t task) +uint64_t +get_task_wired_mem(task_t task) { - kern_return_t ret; - ledger_amount_t credit, debit; + kern_return_t ret; + ledger_amount_t credit, debit; - ret = ledger_get_entries(task->ledger, task_ledgers.wired_mem, &credit, &debit); - if (KERN_SUCCESS == ret) { - return (credit - debit); - } + ret = ledger_get_entries(task->ledger, task_ledgers.wired_mem, &credit, &debit); + if (KERN_SUCCESS == ret) { + return credit - debit; + } - return 0; + return 0; } -uint64_t get_task_cpu_time(task_t task) +uint64_t +get_task_cpu_time(task_t task) { kern_return_t ret; ledger_amount_t credit, debit; - + ret = ledger_get_entries(task->ledger, task_ledgers.cpu_time, &credit, &debit); if (KERN_SUCCESS == ret) { - return (credit - debit); + return credit - debit; } return 0; @@ -642,9 +692,10 @@ uint64_t get_task_cpu_time(task_t task) /* * */ -task_t get_threadtask(thread_t th) +task_t +get_threadtask(thread_t th) { - return(th->task); + return th->task; } /* @@ -652,9 +703,9 @@ task_t get_threadtask(thread_t th) */ vm_map_offset_t get_map_min( - vm_map_t map) + vm_map_t map) { - return(vm_map_min(map)); + return vm_map_min(map); } /* @@ -662,80 +713,84 @@ get_map_min( */ vm_map_offset_t get_map_max( - vm_map_t map) + vm_map_t map) { - return(vm_map_max(map)); + return vm_map_max(map); } vm_map_size_t get_vmmap_size( - vm_map_t map) + vm_map_t map) { - return(map->size); + return map->size; } #if CONFIG_COREDUMP static int get_vmsubmap_entries( - vm_map_t map, - vm_object_offset_t start, - vm_object_offset_t end) + vm_map_t map, + vm_object_offset_t start, + vm_object_offset_t end) { - int total_entries = 0; - vm_map_entry_t entry; + int total_entries = 0; + vm_map_entry_t entry; - if (not_in_kdp) - vm_map_lock(map); + if (not_in_kdp) { + vm_map_lock(map); + } entry = vm_map_first_entry(map); - while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { entry = entry->vme_next; } - while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { - if(entry->is_sub_map) { - total_entries += - get_vmsubmap_entries(VME_SUBMAP(entry), - VME_OFFSET(entry), - (VME_OFFSET(entry) + - entry->vme_end - - entry->vme_start)); + while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { + if (entry->is_sub_map) { + total_entries += + get_vmsubmap_entries(VME_SUBMAP(entry), + VME_OFFSET(entry), + (VME_OFFSET(entry) + + entry->vme_end - + entry->vme_start)); } else { total_entries += 1; } entry = entry->vme_next; } - if (not_in_kdp) - vm_map_unlock(map); - return(total_entries); + if (not_in_kdp) { + vm_map_unlock(map); + } + return total_entries; } int get_vmmap_entries( - vm_map_t map) + vm_map_t map) { - int total_entries = 0; - vm_map_entry_t entry; + int total_entries = 0; + vm_map_entry_t entry; - if (not_in_kdp) - vm_map_lock(map); + if (not_in_kdp) { + vm_map_lock(map); + } entry = vm_map_first_entry(map); - while(entry != vm_map_to_entry(map)) { - if(entry->is_sub_map) { - total_entries += - get_vmsubmap_entries(VME_SUBMAP(entry), - VME_OFFSET(entry), - (VME_OFFSET(entry) + - entry->vme_end - - entry->vme_start)); + while (entry != vm_map_to_entry(map)) { + if (entry->is_sub_map) { + total_entries += + get_vmsubmap_entries(VME_SUBMAP(entry), + VME_OFFSET(entry), + (VME_OFFSET(entry) + + entry->vme_end - + entry->vme_start)); } else { total_entries += 1; } entry = entry->vme_next; } - if (not_in_kdp) - vm_map_unlock(map); - return(total_entries); + if (not_in_kdp) { + vm_map_unlock(map); + } + return total_entries; } #endif /* CONFIG_COREDUMP */ @@ -749,7 +804,7 @@ int get_task_userstop( task_t task) { - return(task->user_stop_count); + return task->user_stop_count; } /* @@ -759,7 +814,7 @@ int get_thread_userstop( thread_t th) { - return(th->user_stop_count); + return th->user_stop_count; } /* @@ -769,17 +824,17 @@ boolean_t get_task_pidsuspended( task_t task) { - return (task->pidsuspended); + return task->pidsuspended; } /* * */ -boolean_t +boolean_t get_task_frozen( task_t task) { - return (task->frozen); + return task->frozen; } /* @@ -789,7 +844,7 @@ boolean_t thread_should_abort( thread_t th) { - return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT); + return (th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT; } /* @@ -801,20 +856,22 @@ thread_should_abort( * qualifies. */ boolean_t -current_thread_aborted ( - void) +current_thread_aborted( + void) { thread_t th = current_thread(); spl_t s; if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT && - (th->options & TH_OPT_INTMASK) != THREAD_UNINT) - return (TRUE); + (th->options & TH_OPT_INTMASK) != THREAD_UNINT) { + return TRUE; + } if (th->sched_flags & TH_SFLAG_ABORTSAFELY) { s = splsched(); thread_lock(th); - if (th->sched_flags & TH_SFLAG_ABORTSAFELY) + if (th->sched_flags & TH_SFLAG_ABORTSAFELY) { th->sched_flags &= ~TH_SFLAG_ABORTED_MASK; + } thread_unlock(th); splx(s); } @@ -826,16 +883,16 @@ current_thread_aborted ( */ void task_act_iterate_wth_args( - task_t task, - void (*func_callback)(thread_t, void *), - void *func_arg) + task_t task, + void (*func_callback)(thread_t, void *), + void *func_arg) { - thread_t inc; + thread_t inc; task_lock(task); for (inc = (thread_t)(void *)queue_first(&task->threads); - !queue_end(&task->threads, (queue_entry_t)inc); ) { + !queue_end(&task->threads, (queue_entry_t)inc);) { (void) (*func_callback)(inc, func_arg); inc = (thread_t)(void *)queue_next(&inc->task_threads); } @@ -862,11 +919,11 @@ fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) ptinfo->pti_virtual_size = map->size; ptinfo->pti_resident_size = - (mach_vm_size_t)(pmap_resident_count(map->pmap)) - * PAGE_SIZE_64; + (mach_vm_size_t)(pmap_resident_count(map->pmap)) + * PAGE_SIZE_64; ptinfo->pti_policy = ((task != kernel_task)? - POLICY_TIMESHARE: POLICY_RR); + POLICY_TIMESHARE: POLICY_RR); tinfo.threads_user = tinfo.threads_system = 0; tinfo.total_user = task->total_user_time; @@ -876,14 +933,16 @@ fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) uint64_t tval; spl_t x; - if (thread->options & TH_OPT_IDLE_THREAD) + if (thread->options & TH_OPT_IDLE_THREAD) { continue; + } x = splsched(); thread_lock(thread); - if ((thread->state & TH_RUN) == TH_RUN) + if ((thread->state & TH_RUN) == TH_RUN) { numrunning++; + } cswitch += thread->c_switch; tval = timer_grab(&thread->user_timer); tinfo.threads_user += tval; @@ -911,7 +970,7 @@ fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) ptinfo->pti_total_user = tinfo.total_user; ptinfo->pti_threads_system = tinfo.threads_system; ptinfo->pti_threads_user = tinfo.threads_user; - + ptinfo->pti_faults = task->faults; ptinfo->pti_pageins = task->pageins; ptinfo->pti_cow_faults = task->cow_faults; @@ -927,11 +986,11 @@ fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) task_unlock(task); } -int +int fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp) { thread_t thact; - int err=0; + int err = 0; mach_msg_type_number_t count; thread_basic_info_data_t basic_info; kern_return_t kret; @@ -940,15 +999,13 @@ fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_t task_lock(task); for (thact = (thread_t)(void *)queue_first(&task->threads); - !queue_end(&task->threads, (queue_entry_t)thact); ) { + !queue_end(&task->threads, (queue_entry_t)thact);) { addr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self; - if (addr == thaddr) - { - + if (addr == thaddr) { count = THREAD_BASIC_INFO_COUNT; if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) { err = 1; - goto out; + goto out; } ptinfo->pth_user_time = ((basic_info.user_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.user_time.microseconds * (integer_t)NSEC_PER_USEC)); ptinfo->pth_system_time = ((basic_info.system_time.seconds * (integer_t)NSEC_PER_SEC) + (basic_info.system_time.microseconds * (integer_t)NSEC_PER_USEC)); @@ -961,12 +1018,13 @@ fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_t ptinfo->pth_curpri = thact->sched_pri; ptinfo->pth_priority = thact->base_pri; ptinfo->pth_maxpriority = thact->max_priority; - - if ((vpp != NULL) && (thact->uthread != NULL)) + + if ((vpp != NULL) && (thact->uthread != NULL)) { bsd_threadcdir(thact->uthread, vpp, vidp); - bsd_getthreadname(thact->uthread,ptinfo->pth_name); + } + bsd_getthreadname(thact->uthread, ptinfo->pth_name); err = 0; - goto out; + goto out; } thact = (thread_t)(void *)queue_next(&thact->task_threads); } @@ -974,13 +1032,13 @@ fill_taskthreadinfo(task_t task, uint64_t thaddr, bool thuniqueid, struct proc_t out: task_unlock(task); - return(err); + return err; } int fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid) { - int numthr=0; + int numthr = 0; thread_t thact; uint64_t * uptr; uint64_t thaddr; @@ -990,29 +1048,29 @@ fill_taskthreadlist(task_t task, void * buffer, int thcount, bool thuniqueid) task_lock(task); for (thact = (thread_t)(void *)queue_first(&task->threads); - !queue_end(&task->threads, (queue_entry_t)thact); ) { + !queue_end(&task->threads, (queue_entry_t)thact);) { thaddr = (thuniqueid) ? thact->thread_id : thact->machine.cthread_self; *uptr++ = thaddr; numthr++; - if (numthr >= thcount) + if (numthr >= thcount) { goto out; + } thact = (thread_t)(void *)queue_next(&thact->task_threads); } out: task_unlock(task); return (int)(numthr * sizeof(uint64_t)); - } int get_numthreads(task_t task) { - return(task->thread_count); + return task->thread_count; } /* - * Gather the various pieces of info about the designated task, + * Gather the various pieces of info about the designated task, * and collect it all into a single rusage_info. */ int @@ -1030,16 +1088,16 @@ fill_task_rusage(task_t task, rusage_info_current *ri) ri->ri_system_time = powerinfo.total_system; ledger_get_balance(task->ledger, task_ledgers.phys_footprint, - (ledger_amount_t *)&ri->ri_phys_footprint); + (ledger_amount_t *)&ri->ri_phys_footprint); ledger_get_balance(task->ledger, task_ledgers.phys_mem, - (ledger_amount_t *)&ri->ri_resident_size); + (ledger_amount_t *)&ri->ri_resident_size); ledger_get_balance(task->ledger, task_ledgers.wired_mem, - (ledger_amount_t *)&ri->ri_wired_size); + (ledger_amount_t *)&ri->ri_wired_size); ri->ri_pageins = task->pageins; task_unlock(task); - return (0); + return 0; } void @@ -1064,7 +1122,7 @@ fill_task_io_rusage(task_t task, rusage_info_current *ri) ri->ri_diskio_byteswritten = 0; } task_unlock(task); - return (0); + return 0; } int @@ -1077,8 +1135,9 @@ fill_task_qos_rusage(task_t task, rusage_info_current *ri) /* Rollup QoS time of all the threads to task */ queue_iterate(&task->threads, thread, thread_t, task_threads) { - if (thread->options & TH_OPT_IDLE_THREAD) + if (thread->options & TH_OPT_IDLE_THREAD) { continue; + } thread_update_qos_cpu_time(thread); } @@ -1091,7 +1150,7 @@ fill_task_qos_rusage(task_t task, rusage_info_current *ri) ri->ri_cpu_time_qos_user_interactive = task->cpu_time_eqos_stats.cpu_time_qos_user_interactive; task_unlock(task); - return (0); + return 0; } void @@ -1118,14 +1177,14 @@ fill_task_monotonic_rusage(task_t task, rusage_info_current *ri) uint64_t get_task_logical_writes(task_t task) { - assert(task != TASK_NULL); - struct ledger_entry_info lei; + assert(task != TASK_NULL); + struct ledger_entry_info lei; - task_lock(task); - ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei); + task_lock(task); + ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei); - task_unlock(task); - return lei.lei_balance; + task_unlock(task); + return lei.lei_balance; } uint64_t diff --git a/osfmk/kern/btlog.c b/osfmk/kern/btlog.c index a15aef980..584be02cf 100644 --- a/osfmk/kern/btlog.c +++ b/osfmk/kern/btlog.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,12 +43,12 @@ * and to maintain the linked list of active records * in chronological order. */ -#define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */) +#define BTLOG_MAX_RECORDS (0xFFFFFF /* 16777215 */ ) #define BTLOG_RECORDINDEX_NONE (0xFFFFFF) /* * Each record is a stack with a reference count and a list of - * log elements that refer to it. + * log elements that refer to it. * * Each log element is placed in a hash bucket that is contained * within the btlog structure. It contains the index to the record @@ -63,72 +63,72 @@ #define ELEMENT_HASH_BUCKET_COUNT (256) #define BTLOG_HASHELEMINDEX_NONE BTLOG_RECORDINDEX_NONE -#define ZELEMS_DEFAULT (8000) -size_t zelems_count = 0; +#define ZELEMS_DEFAULT (8000) +size_t zelems_count = 0; typedef uint32_t btlog_recordindex_t; /* only 24 bits used */ /* * Queue head for the queue of elements connected to a particular record (stack). - * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode. + * For quick removal of the oldest element referencing the least popular stack. Useful for LEAKS mode. */ -TAILQ_HEAD(_element_record_queue, btlog_element); +TAILQ_HEAD(_element_record_queue, btlog_element); -/* +/* * Queue head for the queue of elements that hash to the same bucket. - * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO. + * For quick removal of the oldest element ever logged. Useful for CORRUPTION mode where we use only bucket i.e. FIFO. */ TAILQ_HEAD(_element_hash_queue, btlog_element); typedef struct btlog_record { - btlog_recordindex_t next:24, - operation:8; - uint32_t ref_count; - uint32_t bthash; - struct _element_record_queue element_record_queue; - void *bt[]; /* variable sized, based on btlog_t params */ + btlog_recordindex_t next:24, + operation:8; + uint32_t ref_count; + uint32_t bthash; + struct _element_record_queue element_record_queue; + void *bt[];/* variable sized, based on btlog_t params */ } btlog_record_t; typedef struct btlog_element { - btlog_recordindex_t recindex:24, - operation:8; + btlog_recordindex_t recindex:24, + operation:8; uintptr_t elem; TAILQ_ENTRY(btlog_element) element_record_link; /* Links to other elements pointing to the same stack. */ TAILQ_ENTRY(btlog_element) element_hash_link; /* Links to other elements in the same hash chain. - * During LEAKS mode, this is used as a singly-linked list because - * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads. - * - * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list. - */ + * During LEAKS mode, this is used as a singly-linked list because + * we don't want to initialize ELEMENT_HASH_BUCKET_COUNT heads. + * + * During CORRUPTION mode with a single hash chain, this is used as a doubly-linked list. + */ } btlog_element_t; struct btlog { - vm_address_t btlog_buffer; /* all memory for this btlog_t */ - vm_size_t btlog_buffersize; - - uintptr_t btrecords; /* use btlog_recordindex_t to lookup */ - size_t btrecord_btdepth; /* BT entries per record */ - size_t btrecord_size; - - btlog_recordindex_t head; /* active record list */ - btlog_recordindex_t tail; - btlog_recordindex_t activerecord; - btlog_recordindex_t freelist_records; - - size_t active_record_count; - size_t active_element_count; - btlog_element_t *freelist_elements; - union { - btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */ - struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */ - } elem_linkage_un; - - decl_simple_lock_data(,btlog_lock); - boolean_t caller_will_remove_entries_for_element; /* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements. - * And so they want to be in charge of explicitly removing elements. Depending on this variable we - * will choose what kind of data structure to use for the elem_linkage_un union above. - */ + vm_address_t btlog_buffer; /* all memory for this btlog_t */ + vm_size_t btlog_buffersize; + + uintptr_t btrecords; /* use btlog_recordindex_t to lookup */ + size_t btrecord_btdepth;/* BT entries per record */ + size_t btrecord_size; + + btlog_recordindex_t head; /* active record list */ + btlog_recordindex_t tail; + btlog_recordindex_t activerecord; + btlog_recordindex_t freelist_records; + + size_t active_record_count; + size_t active_element_count; + btlog_element_t *freelist_elements; + union { + btlog_element_t **elem_recindex_hashtbl; /* LEAKS mode: We use an array of ELEMENT_HASH_BUCKET_COUNT buckets. */ + struct _element_hash_queue *element_hash_queue; /* CORRUPTION mode: We use a single hash bucket i.e. queue */ + } elem_linkage_un; + + decl_simple_lock_data(, btlog_lock); + boolean_t caller_will_remove_entries_for_element;/* If TRUE, this means that the caller is interested in keeping track of abandoned / leaked elements. + * And so they want to be in charge of explicitly removing elements. Depending on this variable we + * will choose what kind of data structure to use for the elem_linkage_un union above. + */ }; extern boolean_t vm_kernel_ready; @@ -146,10 +146,10 @@ btlog_element_t* btlog_get_elem_from_freelist(btlog_t *btlog); uint32_t lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btcount) { - btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE; - btlog_record_t *record = NULL; - size_t i = 0; - boolean_t stack_matched = TRUE; + btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE; + btlog_record_t *record = NULL; + size_t i = 0; + boolean_t stack_matched = TRUE; assert(btcount); assert(bt); @@ -158,9 +158,8 @@ lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btc record = lookup_btrecord(btlog, recindex); while (recindex != BTLOG_RECORDINDEX_NONE) { assert(record->bthash); - assert(! TAILQ_EMPTY(&record->element_record_queue)); + assert(!TAILQ_EMPTY(&record->element_record_queue)); if (record->bthash == md5_hash) { - /* * Make sure that the incoming stack actually matches the * stack in this record. Since we only save off a @@ -184,7 +183,7 @@ lookup_btrecord_byhash(btlog_t *btlog, uint32_t md5_hash, void *bt[], size_t btc } } - for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) { + for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) { if (record->bt[i] != bt[i]) { stack_matched = FALSE; goto next; @@ -220,7 +219,7 @@ calculate_hashidx_for_element(uintptr_t elem, btlog_t *btlog) static void btlog_lock(btlog_t *btlog) { - simple_lock(&btlog->btlog_lock); + simple_lock(&btlog->btlog_lock, LCK_GRP_NULL); } static void btlog_unlock(btlog_t *btlog) @@ -230,8 +229,8 @@ btlog_unlock(btlog_t *btlog) btlog_t * btlog_create(size_t numrecords, - size_t record_btdepth, - boolean_t caller_will_remove_entries_for_element) + size_t record_btdepth, + boolean_t caller_will_remove_entries_for_element) { btlog_t *btlog; vm_size_t buffersize_needed = 0, elemsize_needed = 0; @@ -241,27 +240,31 @@ btlog_create(size_t numrecords, size_t btrecord_size = 0; uintptr_t free_elem = 0, next_free_elem = 0; - if (vm_kernel_ready && !kmem_alloc_ready) + if (vm_kernel_ready && !kmem_alloc_ready) { return NULL; + } - if (numrecords > BTLOG_MAX_RECORDS) + if (numrecords > BTLOG_MAX_RECORDS) { return NULL; + } - if (numrecords == 0) + if (numrecords == 0) { return NULL; + } - if (record_btdepth > BTLOG_MAX_DEPTH) + if (record_btdepth > BTLOG_MAX_DEPTH) { return NULL; + } /* btlog_record_t is variable-sized, calculate needs now */ btrecord_size = sizeof(btlog_record_t) - + sizeof(void *) * record_btdepth; + + sizeof(void *) * record_btdepth; buffersize_needed = sizeof(btlog_t) + numrecords * btrecord_size; buffersize_needed = round_page(buffersize_needed); - + if (zelems_count == 0) { - zelems_count = ((max_mem + (1024*1024*1024) /*GB*/) >> 30) * ZELEMS_DEFAULT; + zelems_count = ((max_mem + (1024 * 1024 * 1024) /*GB*/) >> 30) * ZELEMS_DEFAULT; if (PE_parse_boot_argn("zelems", &zelems_count, sizeof(zelems_count)) == TRUE) { /* @@ -276,12 +279,13 @@ btlog_create(size_t numrecords, /* since rounding to a page size might hold more, recalculate */ numrecords = MIN(BTLOG_MAX_RECORDS, - (buffersize_needed - sizeof(btlog_t))/btrecord_size); + (buffersize_needed - sizeof(btlog_t)) / btrecord_size); if (kmem_alloc_ready) { ret = kmem_alloc(kernel_map, &buffer, buffersize_needed, VM_KERN_MEMORY_DIAG); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return NULL; + } ret = kmem_alloc(kernel_map, &elem_buffer, elemsize_needed, VM_KERN_MEMORY_DIAG); if (ret != KERN_SUCCESS) { @@ -304,7 +308,6 @@ btlog_create(size_t numrecords, elem_buffer = 0; return NULL; } - } else { buffer = (vm_address_t)pmap_steal_memory(buffersize_needed); elem_buffer = (vm_address_t)pmap_steal_memory(elemsize_needed); @@ -341,13 +344,13 @@ btlog_create(size_t numrecords, btlog->active_record_count = 0; btlog->activerecord = BTLOG_RECORDINDEX_NONE; - for (i=0; i < ELEMENT_HASH_BUCKET_COUNT; i++) { - btlog->elem_linkage_un.elem_recindex_hashtbl[i]=0; + for (i = 0; i < ELEMENT_HASH_BUCKET_COUNT; i++) { + btlog->elem_linkage_un.elem_recindex_hashtbl[i] = 0; } /* populate freelist_records with all records in order */ btlog->freelist_records = 0; - for (i=0; i < (numrecords - 1); i++) { + for (i = 0; i < (numrecords - 1); i++) { btlog_record_t *rec = lookup_btrecord(btlog, i); rec->next = (btlog_recordindex_t)(i + 1); } @@ -356,8 +359,7 @@ btlog_create(size_t numrecords, /* populate freelist_elements with all elements in order */ free_elem = (uintptr_t)btlog->freelist_elements; - for (i=0; i < (zelems_count - 1); i++) { - + for (i = 0; i < (zelems_count - 1); i++) { next_free_elem = free_elem + sizeof(btlog_element_t); *(uintptr_t*)free_elem = next_free_elem; free_elem = next_free_elem; @@ -371,7 +373,7 @@ btlog_create(size_t numrecords, static btlog_recordindex_t btlog_get_record_from_freelist(btlog_t *btlog) { - btlog_recordindex_t recindex = btlog->freelist_records; + btlog_recordindex_t recindex = btlog->freelist_records; if (recindex == BTLOG_RECORDINDEX_NONE) { /* nothing on freelist */ @@ -405,7 +407,7 @@ btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex) record->next = btlog->freelist_records; btlog->freelist_records = recindex; - + if (btlog->head == BTLOG_RECORDINDEX_NONE) { /* active list is now empty, update tail */ btlog->tail = BTLOG_RECORDINDEX_NONE; @@ -437,15 +439,14 @@ btlog_add_record_to_freelist(btlog_t *btlog, btlog_recordindex_t recindex) static void btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) { - btlog_recordindex_t recindex = btlog->head; - btlog_record_t *record = NULL; - btlog_element_t *recelem = NULL; + btlog_recordindex_t recindex = btlog->head; + btlog_record_t *record = NULL; + btlog_element_t *recelem = NULL; if (recindex == BTLOG_RECORDINDEX_NONE) { /* nothing on active list */ panic("BTLog: Eviction requested on btlog (0x%lx) with an empty active list.\n", (uintptr_t) btlog); } else { - while (num_elements_to_evict) { /* * LEAKS: reap the oldest element within the record with the lowest refs. @@ -453,18 +454,17 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) */ if (btlog->caller_will_remove_entries_for_element) { - uint32_t max_refs_threshold = UINT32_MAX; - btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0; + uint32_t max_refs_threshold = UINT32_MAX; + btlog_recordindex_t precindex = 0, prev_evictindex = 0, evict_index = 0; prev_evictindex = evict_index = btlog->head; - precindex = recindex = btlog->head; + precindex = recindex = btlog->head; while (recindex != BTLOG_RECORDINDEX_NONE) { - - record = lookup_btrecord(btlog, recindex); + record = lookup_btrecord(btlog, recindex); if (btlog->activerecord == recindex || record->ref_count > max_refs_threshold) { - /* skip this record */ + /* skip this record */ } else { prev_evictindex = precindex; evict_index = recindex; @@ -480,11 +480,10 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) recindex = evict_index; assert(recindex != BTLOG_RECORDINDEX_NONE); - record = lookup_btrecord(btlog, recindex); - + record = lookup_btrecord(btlog, recindex); + recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue); } else { - recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue); recindex = recelem->recindex; record = lookup_btrecord(btlog, recindex); @@ -495,26 +494,24 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) */ while (recelem && num_elements_to_evict) { - TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link); if (btlog->caller_will_remove_entries_for_element) { + btlog_element_t *prev_hashelem = NULL, *hashelem = NULL; + uint32_t hashidx = 0; - btlog_element_t *prev_hashelem = NULL, *hashelem = NULL; - uint32_t hashidx = 0; - hashidx = calculate_hashidx_for_element(~recelem->elem, btlog); prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx]; while (hashelem != NULL) { - if (hashelem == recelem) + if (hashelem == recelem) { break; - else { + } else { prev_hashelem = hashelem; hashelem = TAILQ_NEXT(hashelem, element_hash_link); } } - + if (hashelem == NULL) { panic("BTLog: Missing hashelem for element list of record 0x%lx\n", (uintptr_t) record); } @@ -525,7 +522,6 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link); } } else { - TAILQ_REMOVE(btlog->elem_linkage_un.element_hash_queue, recelem, element_hash_link); } @@ -539,9 +535,8 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) record->ref_count--; if (record->ref_count == 0) { - btlog_add_record_to_freelist(btlog, recindex); - + /* * LEAKS: All done with this record. Need the next least popular record. * CORRUPTION: We don't care about records. We'll just pick the next oldest element. @@ -555,7 +550,6 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) if (btlog->caller_will_remove_entries_for_element) { recelem = TAILQ_LAST(&record->element_record_queue, _element_record_queue); } else { - recelem = TAILQ_LAST(btlog->elem_linkage_un.element_hash_queue, _element_hash_queue); recindex = recelem->recindex; record = lookup_btrecord(btlog, recindex); @@ -569,7 +563,6 @@ btlog_evict_elements_from_record(btlog_t *btlog, int num_elements_to_evict) static void btlog_append_record_to_activelist(btlog_t *btlog, btlog_recordindex_t recindex) { - assert(recindex != BTLOG_RECORDINDEX_NONE); if (btlog->head == BTLOG_RECORDINDEX_NONE) { @@ -617,27 +610,28 @@ btlog_add_elem_to_freelist(btlog_t *btlog, btlog_element_t *elem) void btlog_add_entry(btlog_t *btlog, - void *element, - uint8_t operation, - void *bt[], - size_t btcount) + void *element, + uint8_t operation, + void *bt[], + size_t btcount) { - btlog_recordindex_t recindex = 0; - btlog_record_t *record = NULL; - size_t i; - u_int32_t md5_buffer[4]; - MD5_CTX btlog_ctx; - uint32_t hashidx = 0; + btlog_recordindex_t recindex = 0; + btlog_record_t *record = NULL; + size_t i; + u_int32_t md5_buffer[4]; + MD5_CTX btlog_ctx; + uint32_t hashidx = 0; - btlog_element_t *hashelem = NULL; + btlog_element_t *hashelem = NULL; - if (g_crypto_funcs == NULL) + if (g_crypto_funcs == NULL) { return; + } btlog_lock(btlog); MD5Init(&btlog_ctx); - for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) { + for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) { MD5Update(&btlog_ctx, (u_char *) &bt[i], sizeof(bt[i])); } MD5Final((u_char *) &md5_buffer, &btlog_ctx); @@ -645,7 +639,6 @@ btlog_add_entry(btlog_t *btlog, recindex = lookup_btrecord_byhash(btlog, md5_buffer[0], bt, btcount); if (recindex != BTLOG_RECORDINDEX_NONE) { - record = lookup_btrecord(btlog, recindex); record->ref_count++; assert(record->operation == operation); @@ -655,7 +648,7 @@ retry: recindex = btlog_get_record_from_freelist(btlog); if (recindex == BTLOG_RECORDINDEX_NONE) { /* Use the first active record (FIFO age-out) */ - btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t))/sizeof(btlog_element_t))); + btlog_evict_elements_from_record(btlog, ((2 * sizeof(btlog_record_t)) / sizeof(btlog_element_t))); goto retry; } @@ -668,7 +661,7 @@ retry: record->ref_count = 1; TAILQ_INIT(&record->element_record_queue); - for (i=0; i < MIN(btcount, btlog->btrecord_btdepth); i++) { + for (i = 0; i < MIN(btcount, btlog->btrecord_btdepth); i++) { record->bt[i] = bt[i]; } @@ -695,7 +688,6 @@ retry: if (btlog->caller_will_remove_entries_for_element) { TAILQ_NEXT(hashelem, element_hash_link) = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx]; btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = hashelem; - } else { TAILQ_INSERT_HEAD(btlog->elem_linkage_un.element_hash_queue, hashelem, element_hash_link); } @@ -709,20 +701,21 @@ retry: void btlog_remove_entries_for_element(btlog_t *btlog, - void *element) + void *element) { - btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE; - btlog_record_t *record = NULL; - uint32_t hashidx = 0; - - btlog_element_t *prev_hashelem = NULL, *hashelem = NULL; + btlog_recordindex_t recindex = BTLOG_RECORDINDEX_NONE; + btlog_record_t *record = NULL; + uint32_t hashidx = 0; + + btlog_element_t *prev_hashelem = NULL, *hashelem = NULL; if (btlog->caller_will_remove_entries_for_element == FALSE) { panic("Explicit removal of entry is not permitted for this btlog (%p).\n", btlog); } - if (g_crypto_funcs == NULL) + if (g_crypto_funcs == NULL) { return; + } btlog_lock(btlog); @@ -730,28 +723,26 @@ btlog_remove_entries_for_element(btlog_t *btlog, prev_hashelem = hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx]; while (hashelem != NULL) { - if (~hashelem->elem == (uintptr_t)element) + if (~hashelem->elem == (uintptr_t)element) { break; - else { + } else { prev_hashelem = hashelem; hashelem = TAILQ_NEXT(hashelem, element_hash_link); } } if (hashelem) { - - btlog_element_t *recelem = NULL; + btlog_element_t *recelem = NULL; if (prev_hashelem != hashelem) { TAILQ_NEXT(prev_hashelem, element_hash_link) = TAILQ_NEXT(hashelem, element_hash_link); } else { - btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx] = TAILQ_NEXT(hashelem, element_hash_link); } recindex = hashelem->recindex; record = lookup_btrecord(btlog, recindex); - + recelem = hashelem; TAILQ_REMOVE(&record->element_record_queue, recelem, element_record_link); @@ -774,65 +765,69 @@ btlog_remove_entries_for_element(btlog_t *btlog, void btlog_copy_backtraces_for_elements(btlog_t * btlog, - uintptr_t * instances, - uint32_t * countp, - uint32_t zoneSize, - leak_site_proc proc, - void * refCon) + uintptr_t * instances, + uint32_t * countp, + uint32_t zoneSize, + leak_site_proc proc, + void * refCon) { - btlog_recordindex_t recindex; - btlog_record_t * record; - btlog_element_t * hashelem; - uint32_t hashidx, idx, dups, numSites, siteCount; + btlog_recordindex_t recindex; + btlog_record_t * record; + btlog_element_t * hashelem; + uint32_t hashidx, idx, dups, numSites, siteCount; uintptr_t element, site; - uint32_t count; + uint32_t count; btlog_lock(btlog); - count = *countp; - for (numSites = 0, idx = 0; idx < count; idx++) - { - element = instances[idx]; - - if (kInstanceFlagReferenced & element) continue; - element = INSTANCE_PUT(element) & ~kInstanceFlags; - - site = 0; - hashidx = calculate_hashidx_for_element(element, btlog); - hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx]; - while (hashelem != NULL) - { - if (~hashelem->elem == element) break; - hashelem = TAILQ_NEXT(hashelem, element_hash_link); - } - if (hashelem) - { - recindex = hashelem->recindex; - site = (uintptr_t) lookup_btrecord(btlog, recindex); - } - if (site) element = (site | kInstanceFlagReferenced); - instances[numSites] = INSTANCE_PUT(element); - numSites++; - } - - for (idx = 0; idx < numSites; idx++) - { - site = instances[idx]; - if (!site) continue; - if (!(kInstanceFlagReferenced & site)) continue; - for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) - { - if (instances[dups] == site) - { - siteCount++; - instances[dups] = 0; - } - } - record = (typeof(record)) (INSTANCE_PUT(site) & ~kInstanceFlags); - (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth); - } - - *countp = numSites; + count = *countp; + for (numSites = 0, idx = 0; idx < count; idx++) { + element = instances[idx]; + + if (kInstanceFlagReferenced & element) { + continue; + } + element = INSTANCE_PUT(element) & ~kInstanceFlags; + + site = 0; + hashidx = calculate_hashidx_for_element(element, btlog); + hashelem = btlog->elem_linkage_un.elem_recindex_hashtbl[hashidx]; + while (hashelem != NULL) { + if (~hashelem->elem == element) { + break; + } + hashelem = TAILQ_NEXT(hashelem, element_hash_link); + } + if (hashelem) { + recindex = hashelem->recindex; + site = (uintptr_t) lookup_btrecord(btlog, recindex); + } + if (site) { + element = (site | kInstanceFlagReferenced); + } + instances[numSites] = INSTANCE_PUT(element); + numSites++; + } + + for (idx = 0; idx < numSites; idx++) { + site = instances[idx]; + if (!site) { + continue; + } + if (!(kInstanceFlagReferenced & site)) { + continue; + } + for (siteCount = 1, dups = (idx + 1); dups < numSites; dups++) { + if (instances[dups] == site) { + siteCount++; + instances[dups] = 0; + } + } + record = (typeof(record))(INSTANCE_PUT(site) & ~kInstanceFlags); + (*proc)(refCon, siteCount, zoneSize, (uintptr_t *) &record->bt[0], (uint32_t) btlog->btrecord_btdepth); + } + + *countp = numSites; btlog_unlock(btlog); } @@ -848,7 +843,7 @@ get_btlog_records_count(btlog_t *btlog) if (btlog->btlog_buffersize < sizeof(btlog_t)) { return 0; } - return ((btlog->btlog_buffersize - sizeof(btlog_t))/btlog->btrecord_size); + return (btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size; } /* @@ -863,13 +858,13 @@ get_btlog_records(btlog_t *btlog, zone_btrecord_t *records, unsigned int *numrec unsigned int count, recs_copied, frame; zone_btrecord_t *current_rec; btlog_record_t *zstack_record; - btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE; + btlog_recordindex_t zstack_index = BTLOG_RECORDINDEX_NONE; btlog_lock(btlog); count = 0; if (btlog->btlog_buffersize > sizeof(btlog_t)) { - count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t))/btlog->btrecord_size); + count = (unsigned int)((btlog->btlog_buffersize - sizeof(btlog_t)) / btlog->btrecord_size); } /* Copy out only as many records as the pre-allocated buffer size permits. */ if (count > *numrecs) { diff --git a/osfmk/kern/btlog.h b/osfmk/kern/btlog.h index 3930703ab..549ea48c3 100644 --- a/osfmk/kern/btlog.h +++ b/osfmk/kern/btlog.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KERN_BTLOG_H_ +#ifndef _KERN_BTLOG_H_ #define _KERN_BTLOG_H_ #include @@ -35,7 +35,7 @@ #include #include -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* * The btlog subsystem allows for fast unobtrusive backtraces @@ -63,40 +63,40 @@ * lower levels, when used by the zone allocator logging code. */ -#define BTLOG_MAX_DEPTH 15 +#define BTLOG_MAX_DEPTH 15 struct btlog; typedef struct btlog btlog_t; extern btlog_t *btlog_create(size_t numrecords, - size_t record_btdepth, - boolean_t caller_will_remove_entries_for_element); + size_t record_btdepth, + boolean_t caller_will_remove_entries_for_element); extern void btlog_add_entry(btlog_t *btlog, - void *element, - uint8_t operation, - void *bt[], - size_t btcount); + void *element, + uint8_t operation, + void *bt[], + size_t btcount); extern void btlog_remove_entries_for_element(btlog_t *btlog, - void *element); + void *element); #if DEBUG || DEVELOPMENT void btlog_copy_backtraces_for_elements(btlog_t * btlog, - uintptr_t * instances, - uint32_t * count, - uint32_t zoneSize, - leak_site_proc proc, - void * refCon); + uintptr_t * instances, + uint32_t * count, + uint32_t zoneSize, + leak_site_proc proc, + void * refCon); size_t get_btlog_records_count(btlog_t *btlog); void get_btlog_records(btlog_t *btlog, - zone_btrecord_t *records, - unsigned int *numrecs); + zone_btrecord_t *records, + unsigned int *numrecs); #endif /* DEBUG || DEVELOPMENT */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#endif /* _KERN_BTLOG_H_ */ +#endif /* _KERN_BTLOG_H_ */ diff --git a/osfmk/kern/build_config.c b/osfmk/kern/build_config.c index a1261d715..b011dfbe1 100644 --- a/osfmk/kern/build_config.c +++ b/osfmk/kern/build_config.c @@ -37,4 +37,3 @@ kern_config_is_development(void) return false; #endif } - diff --git a/osfmk/kern/call_entry.h b/osfmk/kern/call_entry.h index dede1bffb..979e57a8f 100644 --- a/osfmk/kern/call_entry.h +++ b/osfmk/kern/call_entry.h @@ -2,7 +2,7 @@ * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,81 +36,82 @@ #include #if !CONFIG_EMBEDDED -#define TIMER_TRACE 1 +#define TIMER_TRACE 1 #endif -typedef void *call_entry_param_t; -typedef void (*call_entry_func_t)( - call_entry_param_t param0, - call_entry_param_t param1); +typedef void *call_entry_param_t; +typedef void (*call_entry_func_t)( + call_entry_param_t param0, + call_entry_param_t param1); typedef struct call_entry { - queue_chain_t q_link; - queue_head_t *queue; - call_entry_func_t func; - call_entry_param_t param0; - call_entry_param_t param1; - uint64_t deadline; + queue_chain_t q_link; + queue_head_t *queue; + call_entry_func_t func; + call_entry_param_t param0; + call_entry_param_t param1; + uint64_t deadline; #if TIMER_TRACE - uint64_t entry_time; + uint64_t entry_time; #endif } call_entry_data_t; -typedef struct call_entry *call_entry_t; +typedef struct call_entry *call_entry_t; #ifdef MACH_KERNEL_PRIVATE -#define call_entry_setup(entry, pfun, p0) \ -MACRO_BEGIN \ - (entry)->func = (call_entry_func_t)(pfun); \ - (entry)->param0 = (call_entry_param_t)(p0); \ - (entry)->queue = NULL; \ - (entry)->deadline = 0; \ - queue_chain_init((entry)->q_link); \ +#define call_entry_setup(entry, pfun, p0) \ +MACRO_BEGIN \ + (entry)->func = (call_entry_func_t)(pfun); \ + (entry)->param0 = (call_entry_param_t)(p0); \ + (entry)->queue = NULL; \ + (entry)->deadline = 0; \ + queue_chain_init((entry)->q_link); \ MACRO_END -#define qe(x) ((queue_entry_t)(x)) -#define CE(x) ((call_entry_t)(x)) +#define qe(x) ((queue_entry_t)(x)) +#define CE(x) ((call_entry_t)(x)) static __inline__ queue_head_t * call_entry_enqueue_tail( - call_entry_t entry, - queue_t queue) + call_entry_t entry, + queue_t queue) { - queue_t old_queue = entry->queue; + queue_t old_queue = entry->queue; - if (old_queue != NULL) + if (old_queue != NULL) { re_queue_tail(queue, &entry->q_link); - else + } else { enqueue_tail(queue, &entry->q_link); + } - entry->queue = queue; + entry->queue = queue; - return (old_queue); + return old_queue; } static __inline__ queue_head_t * call_entry_dequeue( - call_entry_t entry) + call_entry_t entry) { - queue_t old_queue = entry->queue; + queue_t old_queue = entry->queue; if (old_queue != NULL) { (void)remque(qe(entry)); entry->queue = NULL; } - return (old_queue); + return old_queue; } static __inline__ queue_head_t * call_entry_enqueue_deadline( - call_entry_t entry, - queue_head_t *queue, - uint64_t deadline) + call_entry_t entry, + queue_head_t *queue, + uint64_t deadline) { - queue_t old_queue = entry->queue; - call_entry_t current; + queue_t old_queue = entry->queue; + call_entry_t current; if (old_queue != queue || entry->deadline < deadline) { if (old_queue == NULL) { @@ -133,8 +134,7 @@ call_entry_enqueue_deadline( current = CE(queue_next(qe(current))); } insque(qe(entry), qe(current)); - } - else if (deadline < entry->deadline) { + } else if (deadline < entry->deadline) { current = CE(queue_prev(qe(entry))); (void)remque(qe(entry)); @@ -152,7 +152,7 @@ call_entry_enqueue_deadline( entry->queue = queue; entry->deadline = deadline; - return (old_queue); + return old_queue; } #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/clock.c b/osfmk/kern/clock.c index 2cd05c562..578a7f6a6 100644 --- a/osfmk/kern/clock.c +++ b/osfmk/kern/clock.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -86,33 +86,34 @@ #include #include -uint32_t hz_tick_interval = 1; +uint32_t hz_tick_interval = 1; static uint64_t has_monotonic_clock = 0; -decl_simple_lock_data(,clock_lock) +decl_simple_lock_data(, clock_lock) lck_grp_attr_t * settime_lock_grp_attr; lck_grp_t * settime_lock_grp; lck_attr_t * settime_lock_attr; lck_mtx_t settime_lock; -#define clock_lock() \ - simple_lock(&clock_lock) +#define clock_lock() \ + simple_lock(&clock_lock, LCK_GRP_NULL) -#define clock_unlock() \ +#define clock_unlock() \ simple_unlock(&clock_lock) -#define clock_lock_init() \ +#define clock_lock_init() \ simple_lock_init(&clock_lock, 0) #ifdef kdp_simple_lock_is_acquired -boolean_t kdp_clock_is_locked() +boolean_t +kdp_clock_is_locked() { return kdp_simple_lock_is_acquired(&clock_lock); } #endif struct bintime { - time_t sec; + time_t sec; uint64_t frac; }; @@ -123,8 +124,9 @@ bintime_addx(struct bintime *_bt, uint64_t _x) _u = _bt->frac; _bt->frac += _x; - if (_u > _bt->frac) + if (_u > _bt->frac) { _bt->sec++; + } } static __inline void @@ -134,14 +136,15 @@ bintime_subx(struct bintime *_bt, uint64_t _x) _u = _bt->frac; _bt->frac -= _x; - if (_u < _bt->frac) + if (_u < _bt->frac) { _bt->sec--; + } } static __inline void bintime_addns(struct bintime *bt, uint64_t ns) { - bt->sec += ns/ (uint64_t)NSEC_PER_SEC; + bt->sec += ns / (uint64_t)NSEC_PER_SEC; ns = ns % (uint64_t)NSEC_PER_SEC; if (ns) { /* 18446744073 = int(2^64 / NSEC_PER_SEC) */ @@ -153,7 +156,7 @@ bintime_addns(struct bintime *bt, uint64_t ns) static __inline void bintime_subns(struct bintime *bt, uint64_t ns) { - bt->sec -= ns/ (uint64_t)NSEC_PER_SEC; + bt->sec -= ns / (uint64_t)NSEC_PER_SEC; ns = ns % (uint64_t)NSEC_PER_SEC; if (ns) { /* 18446744073 = int(2^64 / NSEC_PER_SEC) */ @@ -165,19 +168,20 @@ bintime_subns(struct bintime *bt, uint64_t ns) static __inline void bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns) { - uint64_t uxns = (xns > 0)?(uint64_t )xns:(uint64_t)-xns; + uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns; uint64_t ns = multi_overflow(a, uxns); if (xns > 0) { - if (ns) + if (ns) { bintime_addns(bt, ns); + } ns = (a * uxns) / (uint64_t)NSEC_PER_SEC; bintime_addx(bt, ns); - } - else{ - if (ns) + } else { + if (ns) { bintime_subns(bt, ns); + } ns = (a * uxns) / (uint64_t)NSEC_PER_SEC; - bintime_subx(bt,ns); + bintime_subx(bt, ns); } } @@ -189,8 +193,9 @@ bintime_add(struct bintime *_bt, const struct bintime *_bt2) _u = _bt->frac; _bt->frac += _bt2->frac; - if (_u > _bt->frac) + if (_u > _bt->frac) { _bt->sec++; + } _bt->sec += _bt2->sec; } @@ -201,15 +206,15 @@ bintime_sub(struct bintime *_bt, const struct bintime *_bt2) _u = _bt->frac; _bt->frac -= _bt2->frac; - if (_u < _bt->frac) + if (_u < _bt->frac) { _bt->sec--; + } _bt->sec -= _bt2->sec; } static __inline void clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt) { - _bt->sec = *secs; /* 18446744073709 = int(2^64 / 1000000) */ _bt->frac = *microsecs * (uint64_t)18446744073709LL; @@ -218,7 +223,6 @@ clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bin static __inline void bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs) { - *secs = _bt->sec; *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32; } @@ -226,7 +230,6 @@ bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *micr static __inline void bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs) { - *secs = _bt->sec; *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32; } @@ -240,8 +243,8 @@ bintime2absolutetime(const struct bintime *_bt, uint64_t *abs) } struct latched_time { - uint64_t monotonic_time_usec; - uint64_t mach_time; + uint64_t monotonic_time_usec; + uint64_t mach_time; }; extern int @@ -255,19 +258,19 @@ kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, s * TOD <- bintime + delta*scale * * where : - * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update. + * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update. * delta is ticks elapsed since last scale update. * scale is computed according to an adjustment provided by ntp_kern. */ static struct clock_calend { - uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */ - int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */ - uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */ - uint64_t offset_count; /* abs time from which apply current scales */ - struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */ - struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */ - struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */ - struct bintime basesleep; + uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */ + int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */ + uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */ + uint64_t offset_count; /* abs time from which apply current scales */ + struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */ + struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */ + struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */ + struct bintime basesleep; } clock_calend; static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */ @@ -282,7 +285,7 @@ static void print_all_clock_variables_internal(const char *, struct clock_calend #define print_all_clock_variables_internal(...) do { } while (0) #endif -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* @@ -296,9 +299,9 @@ static void print_all_clock_variables_internal(const char *, struct clock_calend * is cleared atomically (by using a 1 bit add). */ static struct unlocked_clock_calend { - struct clock_calend calend; /* copy of calendar */ - uint32_t gen; /* generation count */ -} flipflop[ 2]; + struct clock_calend calend; /* copy of calendar */ + uint32_t gen; /* generation count */ +} flipflop[2]; static void clock_track_calend_nowait(void); @@ -311,22 +314,22 @@ void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadlin static uint64_t clock_boottime; static uint32_t clock_boottime_usec; -#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \ -MACRO_BEGIN \ - if (((rfrac) += (frac)) >= (unit)) { \ - (rfrac) -= (unit); \ - (rsecs) += 1; \ - } \ - (rsecs) += (secs); \ +#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \ +MACRO_BEGIN \ + if (((rfrac) += (frac)) >= (unit)) { \ + (rfrac) -= (unit); \ + (rsecs) += 1; \ + } \ + (rsecs) += (secs); \ MACRO_END -#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \ -MACRO_BEGIN \ - if ((int)((rfrac) -= (frac)) < 0) { \ - (rfrac) += (unit); \ - (rsecs) -= 1; \ - } \ - (rsecs) -= (secs); \ +#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \ +MACRO_BEGIN \ + if ((int)((rfrac) -= (frac)) < 0) { \ + (rfrac) += (unit); \ + (rsecs) -= 1; \ + } \ + (rsecs) -= (secs); \ MACRO_END /* @@ -337,7 +340,6 @@ MACRO_END void clock_config(void) { - clock_lock_init(); settime_lock_grp_attr = lck_grp_attr_alloc_init(); @@ -374,7 +376,7 @@ clock_init(void) void clock_timebase_init(void) { - uint64_t abstime; + uint64_t abstime; nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, &abstime); hz_tick_interval = (uint32_t)abstime; @@ -391,14 +393,14 @@ kern_return_t mach_timebase_info_trap( struct mach_timebase_info_trap_args *args) { - mach_vm_address_t out_info_addr = args->info; - mach_timebase_info_data_t info = {}; + mach_vm_address_t out_info_addr = args->info; + mach_timebase_info_data_t info = {}; clock_timebase_info(&info); - copyout((void *)&info, out_info_addr, sizeof (info)); + copyout((void *)&info, out_info_addr, sizeof(info)); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -413,8 +415,8 @@ mach_timebase_info_trap( */ void clock_get_calendar_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs) + clock_sec_t *secs, + clock_usec_t *microsecs) { clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL); } @@ -479,7 +481,7 @@ get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* * Keep it as additional adjustment for the next sec. */ frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment)); - *s_adj_nsx = (frac>0)? frac << 32 : -( (-frac) << 32); + *s_adj_nsx = (frac > 0)? frac << 32 : -((-frac) << 32); return; } @@ -506,18 +508,18 @@ scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec. */ if (delta > ticks_per_sec) { - sec = (delta/ticks_per_sec); + sec = (delta / ticks_per_sec); new_ns = sec * s_scale_ns; bintime_addns(&bt, new_ns); if (s_adj_nsx) { if (sec == 1) { /* shortcut, no overflow can occur */ - if (s_adj_nsx > 0) - bintime_addx(&bt, (uint64_t)s_adj_nsx/ (uint64_t)NSEC_PER_SEC); - else - bintime_subx(&bt, (uint64_t)-s_adj_nsx/ (uint64_t)NSEC_PER_SEC); - } - else{ + if (s_adj_nsx > 0) { + bintime_addx(&bt, (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC); + } else { + bintime_subx(&bt, (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC); + } + } else { /* * s_adj_nsx is 64 bit frac of ns. * sec*s_adj_nsx might overflow in int64_t. @@ -527,10 +529,10 @@ scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t } } delta = (delta % ticks_per_sec); - } + } over = multi_overflow(tick_scale_x, delta); - if(over){ + if (over) { bt.sec += over; } @@ -564,16 +566,17 @@ get_scaled_time(uint64_t now) static void clock_get_calendar_absolute_and_microtime_locked( - clock_sec_t *secs, - clock_usec_t *microsecs, - uint64_t *abstime) + clock_sec_t *secs, + clock_usec_t *microsecs, + uint64_t *abstime) { uint64_t now; struct bintime bt; now = mach_absolute_time(); - if (abstime) + if (abstime) { *abstime = now; + } bt = get_scaled_time(now); bintime_add(&bt, &clock_calend.bintime); @@ -582,16 +585,17 @@ clock_get_calendar_absolute_and_microtime_locked( static void clock_get_calendar_absolute_and_nanotime_locked( - clock_sec_t *secs, - clock_usec_t *nanosecs, - uint64_t *abstime) + clock_sec_t *secs, + clock_usec_t *nanosecs, + uint64_t *abstime) { uint64_t now; struct bintime bt; now = mach_absolute_time(); - if (abstime) + if (abstime) { *abstime = now; + } bt = get_scaled_time(now); bintime_add(&bt, &clock_calend.bintime); @@ -608,11 +612,11 @@ clock_get_calendar_absolute_and_nanotime_locked( */ void clock_get_calendar_absolute_and_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs, - uint64_t *abstime) + clock_sec_t *secs, + clock_usec_t *microsecs, + uint64_t *abstime) { - spl_t s; + spl_t s; s = splclock(); clock_lock(); @@ -635,10 +639,10 @@ clock_get_calendar_absolute_and_microtime( */ void clock_get_calendar_nanotime( - clock_sec_t *secs, - clock_nsec_t *nanosecs) + clock_sec_t *secs, + clock_nsec_t *nanosecs) { - spl_t s; + spl_t s; s = splclock(); clock_lock(); @@ -662,21 +666,21 @@ clock_get_calendar_nanotime( */ void clock_gettimeofday( - clock_sec_t *secs, - clock_usec_t *microsecs) + clock_sec_t *secs, + clock_usec_t *microsecs) { clock_gettimeofday_and_absolute_time(secs, microsecs, NULL); } void clock_gettimeofday_and_absolute_time( - clock_sec_t *secs, - clock_usec_t *microsecs, - uint64_t *mach_time) + clock_sec_t *secs, + clock_usec_t *microsecs, + uint64_t *mach_time) { - uint64_t now; - spl_t s; - struct bintime bt; + uint64_t now; + spl_t s; + struct bintime bt; s = splclock(); clock_lock(); @@ -710,19 +714,19 @@ clock_gettimeofday_and_absolute_time( */ void clock_set_calendar_microtime( - clock_sec_t secs, - clock_usec_t microsecs) + clock_sec_t secs, + clock_usec_t microsecs) { - uint64_t absolutesys; - clock_sec_t newsecs; - clock_sec_t oldsecs; - clock_usec_t newmicrosecs; - clock_usec_t oldmicrosecs; - uint64_t commpage_value; - spl_t s; - struct bintime bt; - clock_sec_t deltasecs; - clock_usec_t deltamicrosecs; + uint64_t absolutesys; + clock_sec_t newsecs; + clock_sec_t oldsecs; + clock_usec_t newmicrosecs; + clock_usec_t oldmicrosecs; + uint64_t commpage_value; + spl_t s; + struct bintime bt; + clock_sec_t deltasecs; + clock_usec_t deltamicrosecs; newsecs = secs; newmicrosecs = microsecs; @@ -753,9 +757,9 @@ clock_set_calendar_microtime( #if DEVELOPMENT || DEBUG if (g_should_log_clock_adjustments) { os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n", - __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys); + __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys); os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n", - __func__, (unsigned long)secs, microsecs ); + __func__, (unsigned long)secs, microsecs ); } #endif @@ -865,7 +869,6 @@ clock_get_calendar_uptime(clock_sec_t *secs) void clock_update_calendar(void) { - uint64_t now, delta; struct bintime bt; spl_t s; @@ -902,7 +905,7 @@ clock_update_calendar(void) os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment); } #endif - + /* * recomputing scale factors. */ @@ -917,13 +920,14 @@ clock_update_calendar(void) clock_unlock(); splx(s); - print_all_clock_variables(__func__, NULL,NULL,NULL,NULL, &calend_cp); + print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp); } #if DEVELOPMENT || DEBUG -void print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp) +void +print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp) { clock_sec_t offset_secs; clock_usec_t offset_microsecs; @@ -931,42 +935,44 @@ void print_all_clock_variables_internal(const char* func, struct clock_calend* c clock_usec_t bintime_microsecs; clock_sec_t bootime_secs; clock_usec_t bootime_microsecs; - - if (!g_should_log_clock_adjustments) - return; + + if (!g_should_log_clock_adjustments) { + return; + } bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs); bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs); bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs); os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n", - func , clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx, - clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count); + func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx, + clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count); os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n", - func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac, - (unsigned long)offset_secs, offset_microsecs); + func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac, + (unsigned long)offset_secs, offset_microsecs); os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n", - func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac, - (unsigned long)bintime_secs, bintime_microsecs); + func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac, + (unsigned long)bintime_secs, bintime_microsecs); os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n", - func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac, - (unsigned long)bootime_secs, bootime_microsecs); + func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac, + (unsigned long)bootime_secs, bootime_microsecs); clock_sec_t basesleep_secs; - clock_usec_t basesleep_microsecs; - + clock_usec_t basesleep_microsecs; + bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs); os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n", - func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac, - (unsigned long)basesleep_secs, basesleep_microsecs); - + func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac, + (unsigned long)basesleep_secs, basesleep_microsecs); } -void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp) +void +print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp) { - if (!g_should_log_clock_adjustments) + if (!g_should_log_clock_adjustments) { return; + } struct bintime bt; clock_sec_t wall_secs; @@ -975,7 +981,7 @@ void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_us uint64_t delta; if (pmu_secs) { - os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec); + os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec); } if (sys_secs) { os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec); @@ -984,14 +990,14 @@ void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_us print_all_clock_variables_internal(func, clock_calend_cp); now = mach_absolute_time(); - delta = now - clock_calend_cp->offset_count; + delta = now - clock_calend_cp->offset_count; - bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx); + bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx); bintime_add(&bt, &clock_calend_cp->bintime); bintime2usclock(&bt, &wall_secs, &wall_microsecs); os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n", - func, (unsigned long)wall_secs, wall_microsecs, now); + func, (unsigned long)wall_secs, wall_microsecs, now); } @@ -1009,20 +1015,20 @@ void print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_us void clock_initialize_calendar(void) { - clock_sec_t sys; // sleepless time since boot in seconds - clock_sec_t secs; // Current UTC time - clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot - clock_usec_t microsys; - clock_usec_t microsecs; - clock_usec_t utc_offset_microsecs; - spl_t s; - struct bintime bt; - struct bintime monotonic_bt; - struct latched_time monotonic_time; - uint64_t monotonic_usec_total; + clock_sec_t sys; // sleepless time since boot in seconds + clock_sec_t secs; // Current UTC time + clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot + clock_usec_t microsys; + clock_usec_t microsecs; + clock_usec_t utc_offset_microsecs; + spl_t s; + struct bintime bt; + struct bintime monotonic_bt; + struct latched_time monotonic_time; + uint64_t monotonic_usec_total; clock_sec_t sys2, monotonic_sec; - clock_usec_t microsys2, monotonic_usec; - size_t size; + clock_usec_t microsys2, monotonic_usec; + size_t size; //Get the UTC time and corresponding sys time PEGetUTCTimeOfDay(&secs, µsecs); @@ -1062,7 +1068,7 @@ clock_initialize_calendar(void) */ if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) { os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n", - __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys); + __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys); secs = utc_offset_secs = sys; microsecs = utc_offset_microsecs = microsys; } @@ -1094,7 +1100,6 @@ clock_initialize_calendar(void) clock_calend.s_adj_nsx = 0; if (has_monotonic_clock) { - monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC; monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC; @@ -1115,13 +1120,13 @@ clock_initialize_calendar(void) clock_unlock(); splx(s); - print_all_clock_variables(__func__, &secs, µsecs, &sys, µsys, &clock_calend_cp); + print_all_clock_variables(__func__, &secs, µsecs, &sys, µsys, &clock_calend_cp); /* * Send host notifications. */ host_notify_calendar_change(); - + #if CONFIG_DTRACE clock_track_calend_nowait(); #endif @@ -1131,22 +1136,22 @@ clock_initialize_calendar(void) void clock_wakeup_calendar(void) { - clock_sec_t wake_sys_sec; + clock_sec_t wake_sys_sec; clock_usec_t wake_sys_usec; - clock_sec_t wake_sec; - clock_usec_t wake_usec; + clock_sec_t wake_sec; + clock_usec_t wake_usec; clock_sec_t wall_time_sec; clock_usec_t wall_time_usec; - clock_sec_t diff_sec; - clock_usec_t diff_usec; + clock_sec_t diff_sec; + clock_usec_t diff_usec; clock_sec_t var_s; clock_usec_t var_us; - spl_t s; - struct bintime bt, last_sleep_bt; + spl_t s; + struct bintime bt, last_sleep_bt; struct latched_time monotonic_time; - uint64_t monotonic_usec_total; - uint64_t wake_abs; - size_t size; + uint64_t monotonic_usec_total; + uint64_t wake_abs; + size_t size; /* * If the platform has the monotonic clock use that to @@ -1174,7 +1179,6 @@ clock_wakeup_calendar(void) * it is doing it only througth the settimeofday interface. */ if (has_monotonic_clock) { - #if DEVELOPMENT || DEBUG /* * Just for debugging, get the wake UTC time. @@ -1204,15 +1208,15 @@ clock_wakeup_calendar(void) } #if DEVELOPMENT || DEBUG - os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs); - if (has_monotonic_clock) { - os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us); - } + os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs); + if (has_monotonic_clock) { + os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us); + } #endif /* DEVELOPMENT || DEBUG */ s = splclock(); clock_lock(); - + commpage_disable_timestamp(); #if DEVELOPMENT || DEBUG @@ -1255,7 +1259,6 @@ clock_wakeup_calendar(void) */ if ((bt.sec > clock_calend.basesleep.sec) || ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) { - //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs) last_sleep_bt = bt; bintime_sub(&last_sleep_bt, &clock_calend.basesleep); @@ -1272,7 +1275,6 @@ clock_wakeup_calendar(void) bintime2usclock(&last_sleep_bt, &var_s, &var_us); os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us); - } else { bintime2usclock(&clock_calend.basesleep, &var_s, &var_us); os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec); @@ -1287,7 +1289,7 @@ clock_wakeup_calendar(void) bintime_add(&bt, &clock_calend.bintime); bintime2usclock(&bt, &wall_time_sec, &wall_time_usec); - if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec) ) { + if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) { os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec); mach_absolutetime_last_sleep = 0; @@ -1315,12 +1317,12 @@ clock_wakeup_calendar(void) } done: KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_CLOCK,MACH_EPOCH_CHANGE) | DBG_FUNC_NONE, - (uintptr_t) mach_absolutetime_last_sleep, - (uintptr_t) mach_absolutetime_asleep, - (uintptr_t) (mach_absolutetime_last_sleep >> 32), - (uintptr_t) (mach_absolutetime_asleep >> 32), - 0); + MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE) | DBG_FUNC_NONE, + (uintptr_t) mach_absolutetime_last_sleep, + (uintptr_t) mach_absolutetime_asleep, + (uintptr_t) (mach_absolutetime_last_sleep >> 32), + (uintptr_t) (mach_absolutetime_asleep >> 32), + 0); commpage_update_mach_continuous_time(mach_absolutetime_asleep); adjust_cont_time_thread_calls(); @@ -1354,10 +1356,10 @@ done: */ void clock_get_boottime_nanotime( - clock_sec_t *secs, - clock_nsec_t *nanosecs) + clock_sec_t *secs, + clock_nsec_t *nanosecs) { - spl_t s; + spl_t s; s = splclock(); clock_lock(); @@ -1376,10 +1378,10 @@ clock_get_boottime_nanotime( */ void clock_get_boottime_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs) + clock_sec_t *secs, + clock_usec_t *microsecs) { - spl_t s; + spl_t s; s = splclock(); clock_lock(); @@ -1397,8 +1399,8 @@ clock_get_boottime_microtime( */ static void mach_wait_until_continue( - __unused void *parameter, - wait_result_t wresult) + __unused void *parameter, + wait_result_t wresult) { thread_syscall_return((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS); /*NOTREACHED*/ @@ -1410,32 +1412,34 @@ mach_wait_until_continue( * Parameters: args->deadline Amount of time to wait * * Returns: 0 Success - * !0 Not success + * !0 Not success * */ kern_return_t mach_wait_until_trap( - struct mach_wait_until_trap_args *args) + struct mach_wait_until_trap_args *args) { - uint64_t deadline = args->deadline; - wait_result_t wresult; + uint64_t deadline = args->deadline; + wait_result_t wresult; wresult = assert_wait_deadline_with_leeway((event_t)mach_wait_until_trap, THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); - if (wresult == THREAD_WAITING) + TIMEOUT_URGENCY_USER_NORMAL, deadline, 0); + if (wresult == THREAD_WAITING) { wresult = thread_block(mach_wait_until_continue); + } - return ((wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS); + return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS; } void clock_delay_until( - uint64_t deadline) + uint64_t deadline) { - uint64_t now = mach_absolute_time(); + uint64_t now = mach_absolute_time(); - if (now >= deadline) + if (now >= deadline) { return; + } _clock_delay_until_deadline(deadline - now, deadline); } @@ -1446,8 +1450,8 @@ clock_delay_until( */ void _clock_delay_until_deadline( - uint64_t interval, - uint64_t deadline) + uint64_t interval, + uint64_t deadline) { _clock_delay_until_deadline_with_leeway(interval, deadline, 0); } @@ -1458,17 +1462,17 @@ _clock_delay_until_deadline( */ void _clock_delay_until_deadline_with_leeway( - uint64_t interval, - uint64_t deadline, - uint64_t leeway) + uint64_t interval, + uint64_t deadline, + uint64_t leeway) { - - if (interval == 0) + if (interval == 0) { return; + } - if ( ml_delay_should_spin(interval) || - get_preemption_level() != 0 || - ml_get_interrupts_enabled() == FALSE ) { + if (ml_delay_should_spin(interval) || + get_preemption_level() != 0 || + ml_get_interrupts_enabled() == FALSE) { machine_delay_until(interval, deadline); } else { /* @@ -1488,10 +1492,10 @@ _clock_delay_until_deadline_with_leeway( void delay_for_interval( - uint32_t interval, - uint32_t scale_factor) + uint32_t interval, + uint32_t scale_factor) { - uint64_t abstime; + uint64_t abstime; clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); @@ -1500,12 +1504,12 @@ delay_for_interval( void delay_for_interval_with_leeway( - uint32_t interval, - uint32_t leeway, - uint32_t scale_factor) + uint32_t interval, + uint32_t leeway, + uint32_t scale_factor) { - uint64_t abstime_interval; - uint64_t abstime_leeway; + uint64_t abstime_interval; + uint64_t abstime_leeway; clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime_interval); clock_interval_to_absolutetime_interval(leeway, scale_factor, &abstime_leeway); @@ -1515,7 +1519,7 @@ delay_for_interval_with_leeway( void delay( - int usec) + int usec) { delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC); } @@ -1525,11 +1529,11 @@ delay( */ void clock_interval_to_deadline( - uint32_t interval, - uint32_t scale_factor, - uint64_t *result) + uint32_t interval, + uint32_t scale_factor, + uint64_t *result) { - uint64_t abstime; + uint64_t abstime; clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); @@ -1538,32 +1542,32 @@ clock_interval_to_deadline( void clock_absolutetime_interval_to_deadline( - uint64_t abstime, - uint64_t *result) + uint64_t abstime, + uint64_t *result) { *result = mach_absolute_time() + abstime; } void clock_continuoustime_interval_to_deadline( - uint64_t conttime, - uint64_t *result) + uint64_t conttime, + uint64_t *result) { *result = mach_continuous_time() + conttime; } void clock_get_uptime( - uint64_t *result) + uint64_t *result) { *result = mach_absolute_time(); } void clock_deadline_for_periodic_event( - uint64_t interval, - uint64_t abstime, - uint64_t *deadline) + uint64_t interval, + uint64_t abstime, + uint64_t *deadline) { assert(interval != 0); @@ -1573,21 +1577,22 @@ clock_deadline_for_periodic_event( *deadline = abstime + interval; abstime = mach_absolute_time(); - if (*deadline <= abstime) + if (*deadline <= abstime) { *deadline = abstime + interval; + } } } uint64_t mach_continuous_time(void) { - while(1) { + while (1) { uint64_t read1 = mach_absolutetime_asleep; uint64_t absolute = mach_absolute_time(); OSMemoryBarrier(); uint64_t read2 = mach_absolutetime_asleep; - if(__builtin_expect(read1 == read2, 1)) { + if (__builtin_expect(read1 == read2, 1)) { return absolute + read1; } } @@ -1596,13 +1601,13 @@ mach_continuous_time(void) uint64_t mach_continuous_approximate_time(void) { - while(1) { + while (1) { uint64_t read1 = mach_absolutetime_asleep; uint64_t absolute = mach_approximate_time(); OSMemoryBarrier(); uint64_t read2 = mach_absolutetime_asleep; - if(__builtin_expect(read1 == read2, 1)) { + if (__builtin_expect(read1 == read2, 1)) { return absolute + read1; } } @@ -1612,28 +1617,31 @@ mach_continuous_approximate_time(void) * continuoustime_to_absolutetime * Must be called with interrupts disabled * Returned value is only valid until the next update to - * mach_continuous_time + * mach_continuous_time */ uint64_t -continuoustime_to_absolutetime(uint64_t conttime) { - if (conttime <= mach_absolutetime_asleep) +continuoustime_to_absolutetime(uint64_t conttime) +{ + if (conttime <= mach_absolutetime_asleep) { return 0; - else + } else { return conttime - mach_absolutetime_asleep; + } } /* * absolutetime_to_continuoustime * Must be called with interrupts disabled * Returned value is only valid until the next update to - * mach_continuous_time + * mach_continuous_time */ uint64_t -absolutetime_to_continuoustime(uint64_t abstime) { +absolutetime_to_continuoustime(uint64_t abstime) +{ return abstime + mach_absolutetime_asleep; } -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * clock_get_calendar_nanotime_nowait @@ -1649,16 +1657,16 @@ absolutetime_to_continuoustime(uint64_t abstime) { */ void clock_get_calendar_nanotime_nowait( - clock_sec_t *secs, - clock_nsec_t *nanosecs) + clock_sec_t *secs, + clock_nsec_t *nanosecs) { int i = 0; - uint64_t now; + uint64_t now; struct unlocked_clock_calend stable; struct bintime bt; for (;;) { - stable = flipflop[i]; /* take snapshot */ + stable = flipflop[i]; /* take snapshot */ /* * Use a barrier instructions to ensure atomicity. We AND @@ -1673,8 +1681,9 @@ clock_get_calendar_nanotime_nowait( * and if we caught it at a good time, it will be equal (and * our snapshot is threfore stable). */ - if (flipflop[i].gen == stable.gen) + if (flipflop[i].gen == stable.gen) { break; + } /* Switch to the other element of the flipflop, and try again. */ i ^= 1; @@ -1689,7 +1698,7 @@ clock_get_calendar_nanotime_nowait( bintime2nsclock(&bt, secs, nanosecs); } -static void +static void clock_track_calend_nowait(void) { int i; @@ -1717,5 +1726,4 @@ clock_track_calend_nowait(void) } } -#endif /* CONFIG_DTRACE */ - +#endif /* CONFIG_DTRACE */ diff --git a/osfmk/kern/clock.h b/osfmk/kern/clock.h index 3671e5efe..d6d9b82ab 100644 --- a/osfmk/kern/clock.h +++ b/osfmk/kern/clock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,8 +31,8 @@ /* */ -#ifndef _KERN_CLOCK_H_ -#define _KERN_CLOCK_H_ +#ifndef _KERN_CLOCK_H_ +#define _KERN_CLOCK_H_ #include #include @@ -46,19 +46,19 @@ #include -#ifdef __LP64__ +#ifdef __LP64__ -typedef unsigned long clock_sec_t; -typedef unsigned int clock_usec_t, clock_nsec_t; +typedef unsigned long clock_sec_t; +typedef unsigned int clock_usec_t, clock_nsec_t; -#else /* __LP64__ */ +#else /* __LP64__ */ -typedef uint32_t clock_sec_t; -typedef uint32_t clock_usec_t, clock_nsec_t; +typedef uint32_t clock_sec_t; +typedef uint32_t clock_usec_t, clock_nsec_t; -#endif /* __LP64__ */ +#endif /* __LP64__ */ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include @@ -66,188 +66,188 @@ typedef uint32_t clock_usec_t, clock_nsec_t; * Clock operations list structure. Contains vectors to machine * dependent clock routines. */ -struct clock_ops { - int (*c_config)(void); /* configuration */ +struct clock_ops { + int (*c_config)(void); /* configuration */ - int (*c_init)(void); /* initialize */ + int (*c_init)(void); /* initialize */ - kern_return_t (*c_gettime)( /* get time */ - mach_timespec_t *cur_time); + kern_return_t (*c_gettime)( /* get time */ + mach_timespec_t *cur_time); - kern_return_t (*c_getattr)( /* get attributes */ - clock_flavor_t flavor, - clock_attr_t attr, - mach_msg_type_number_t *count); + kern_return_t (*c_getattr)( /* get attributes */ + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); }; -typedef const struct clock_ops *clock_ops_t; -typedef struct clock_ops clock_ops_data_t; +typedef const struct clock_ops *clock_ops_t; +typedef struct clock_ops clock_ops_data_t; /* * Actual clock object data structure. Contains the machine * dependent operations list and clock operation ports. */ -struct clock { - clock_ops_t cl_ops; /* operations list */ - struct ipc_port *cl_service; /* service port */ - struct ipc_port *cl_control; /* control port */ +struct clock { + clock_ops_t cl_ops; /* operations list */ + struct ipc_port *cl_service; /* service port */ + struct ipc_port *cl_control; /* control port */ }; -typedef struct clock clock_data_t; +typedef struct clock clock_data_t; /* * Configure the clock system. */ -extern void clock_config(void); -extern void clock_oldconfig(void); +extern void clock_config(void); +extern void clock_oldconfig(void); /* * Initialize the clock system. */ -extern void clock_init(void); -extern void clock_oldinit(void); +extern void clock_init(void); +extern void clock_oldinit(void); -extern void clock_timebase_init(void); +extern void clock_timebase_init(void); /* * Initialize the clock ipc service facility. */ -extern void clock_service_create(void); +extern void clock_service_create(void); extern void clock_gettimeofday_set_commpage( - uint64_t abstime, - uint64_t sec, - uint64_t frac, - uint64_t scale, - uint64_t tick_per_sec); + uint64_t abstime, + uint64_t sec, + uint64_t frac, + uint64_t scale, + uint64_t tick_per_sec); -extern void machine_delay_until(uint64_t interval, - uint64_t deadline); +extern void machine_delay_until(uint64_t interval, + uint64_t deadline); -extern uint32_t hz_tick_interval; +extern uint32_t hz_tick_interval; -extern void nanotime_to_absolutetime( - clock_sec_t secs, - clock_nsec_t nanosecs, - uint64_t *result); +extern void nanotime_to_absolutetime( + clock_sec_t secs, + clock_nsec_t nanosecs, + uint64_t *result); #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -extern void clock_adjtime( - long *secs, - int *microsecs); +extern void clock_adjtime( + long *secs, + int *microsecs); -extern void clock_initialize_calendar(void); +extern void clock_initialize_calendar(void); -extern void clock_wakeup_calendar(void); +extern void clock_wakeup_calendar(void); -extern void clock_update_calendar(void); +extern void clock_update_calendar(void); -extern void clock_get_calendar_uptime(clock_sec_t *secs); +extern void clock_get_calendar_uptime(clock_sec_t *secs); -extern void clock_gettimeofday_new(clock_sec_t *secs, - clock_usec_t *microsecs); -extern void clock_gettimeofday( - clock_sec_t *secs, - clock_usec_t *microsecs); +extern void clock_gettimeofday_new(clock_sec_t *secs, + clock_usec_t *microsecs); +extern void clock_gettimeofday( + clock_sec_t *secs, + clock_usec_t *microsecs); -extern void clock_gettimeofday_and_absolute_time( - clock_sec_t *secs, - clock_usec_t *microsecs, - uint64_t *absolute_time); +extern void clock_gettimeofday_and_absolute_time( + clock_sec_t *secs, + clock_usec_t *microsecs, + uint64_t *absolute_time); -extern void clock_set_calendar_microtime( - clock_sec_t secs, - clock_usec_t microsecs); +extern void clock_set_calendar_microtime( + clock_sec_t secs, + clock_usec_t microsecs); -extern void clock_get_boottime_nanotime( - clock_sec_t *secs, - clock_nsec_t *nanosecs); +extern void clock_get_boottime_nanotime( + clock_sec_t *secs, + clock_nsec_t *nanosecs); -extern void clock_get_boottime_microtime( - clock_sec_t *secs, - clock_nsec_t *microsecs); +extern void clock_get_boottime_microtime( + clock_sec_t *secs, + clock_nsec_t *microsecs); -extern void absolutetime_to_microtime( - uint64_t abstime, - clock_sec_t *secs, - clock_usec_t *microsecs); +extern void absolutetime_to_microtime( + uint64_t abstime, + clock_sec_t *secs, + clock_usec_t *microsecs); -extern void clock_deadline_for_periodic_event( - uint64_t interval, - uint64_t abstime, - uint64_t *deadline); +extern void clock_deadline_for_periodic_event( + uint64_t interval, + uint64_t abstime, + uint64_t *deadline); -#if CONFIG_DTRACE +#if CONFIG_DTRACE -extern void clock_get_calendar_nanotime_nowait( - clock_sec_t *secs, - clock_nsec_t *nanosecs); +extern void clock_get_calendar_nanotime_nowait( + clock_sec_t *secs, + clock_nsec_t *nanosecs); -#endif /* CONFIG_DTRACE */ +#endif /* CONFIG_DTRACE */ boolean_t kdp_clock_is_locked(void); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -extern void clock_get_calendar_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs); +extern void clock_get_calendar_microtime( + clock_sec_t *secs, + clock_usec_t *microsecs); -extern void clock_get_calendar_absolute_and_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs, - uint64_t *abstime); +extern void clock_get_calendar_absolute_and_microtime( + clock_sec_t *secs, + clock_usec_t *microsecs, + uint64_t *abstime); -extern void clock_get_calendar_nanotime( - clock_sec_t *secs, - clock_nsec_t *nanosecs); +extern void clock_get_calendar_nanotime( + clock_sec_t *secs, + clock_nsec_t *nanosecs); -extern void clock_get_system_microtime( - clock_sec_t *secs, - clock_usec_t *microsecs); +extern void clock_get_system_microtime( + clock_sec_t *secs, + clock_usec_t *microsecs); -extern void clock_get_system_nanotime( - clock_sec_t *secs, - clock_nsec_t *nanosecs); +extern void clock_get_system_nanotime( + clock_sec_t *secs, + clock_nsec_t *nanosecs); -extern void clock_timebase_info( - mach_timebase_info_t info); +extern void clock_timebase_info( + mach_timebase_info_t info); -extern void clock_get_uptime( - uint64_t *result); +extern void clock_get_uptime( + uint64_t *result); -extern void clock_interval_to_deadline( - uint32_t interval, - uint32_t scale_factor, - uint64_t *result); +extern void clock_interval_to_deadline( + uint32_t interval, + uint32_t scale_factor, + uint64_t *result); -extern void clock_interval_to_absolutetime_interval( - uint32_t interval, - uint32_t scale_factor, - uint64_t *result); +extern void clock_interval_to_absolutetime_interval( + uint32_t interval, + uint32_t scale_factor, + uint64_t *result); -extern void clock_absolutetime_interval_to_deadline( - uint64_t abstime, - uint64_t *result); +extern void clock_absolutetime_interval_to_deadline( + uint64_t abstime, + uint64_t *result); -extern void clock_continuoustime_interval_to_deadline( - uint64_t abstime, - uint64_t *result); +extern void clock_continuoustime_interval_to_deadline( + uint64_t abstime, + uint64_t *result); -extern void clock_delay_until( - uint64_t deadline); +extern void clock_delay_until( + uint64_t deadline); -extern void absolutetime_to_nanoseconds( - uint64_t abstime, - uint64_t *result); +extern void absolutetime_to_nanoseconds( + uint64_t abstime, + uint64_t *result); -extern void nanoseconds_to_absolutetime( - uint64_t nanoseconds, - uint64_t *result); +extern void nanoseconds_to_absolutetime( + uint64_t nanoseconds, + uint64_t *result); /* * Absolute <-> Continuous Time conversion routines @@ -262,77 +262,77 @@ extern void nanoseconds_to_absolutetime( * is less the amount of the time the system spent asleep and /must/ be * handled. */ -extern uint64_t absolutetime_to_continuoustime( - uint64_t abstime); -extern uint64_t continuoustime_to_absolutetime( - uint64_t conttime); +extern uint64_t absolutetime_to_continuoustime( + uint64_t abstime); +extern uint64_t continuoustime_to_absolutetime( + uint64_t conttime); extern uint64_t mach_absolutetime_asleep; extern uint64_t mach_absolutetime_last_sleep; -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* * Obsolete interfaces. */ -#ifndef __LP64__ - -#define MACH_TIMESPEC_SEC_MAX (0 - 1) -#define MACH_TIMESPEC_NSEC_MAX (NSEC_PER_SEC - 1) - -#define MACH_TIMESPEC_MAX ((mach_timespec_t) { \ - MACH_TIMESPEC_SEC_MAX, \ - MACH_TIMESPEC_NSEC_MAX } ) -#define MACH_TIMESPEC_ZERO ((mach_timespec_t) { 0, 0 } ) - -#define ADD_MACH_TIMESPEC_NSEC(t1, nsec) \ - do { \ - (t1)->tv_nsec += (clock_res_t)(nsec); \ - if ((clock_res_t)(nsec) > 0 && \ - (t1)->tv_nsec >= NSEC_PER_SEC) { \ - (t1)->tv_nsec -= NSEC_PER_SEC; \ - (t1)->tv_sec += 1; \ - } \ - else if ((clock_res_t)(nsec) < 0 && \ - (t1)->tv_nsec < 0) { \ - (t1)->tv_nsec += NSEC_PER_SEC; \ - (t1)->tv_sec -= 1; \ - } \ +#ifndef __LP64__ + +#define MACH_TIMESPEC_SEC_MAX (0 - 1) +#define MACH_TIMESPEC_NSEC_MAX (NSEC_PER_SEC - 1) + +#define MACH_TIMESPEC_MAX ((mach_timespec_t) { \ + MACH_TIMESPEC_SEC_MAX, \ + MACH_TIMESPEC_NSEC_MAX } ) +#define MACH_TIMESPEC_ZERO ((mach_timespec_t) { 0, 0 } ) + +#define ADD_MACH_TIMESPEC_NSEC(t1, nsec) \ + do { \ + (t1)->tv_nsec += (clock_res_t)(nsec); \ + if ((clock_res_t)(nsec) > 0 && \ + (t1)->tv_nsec >= NSEC_PER_SEC) { \ + (t1)->tv_nsec -= NSEC_PER_SEC; \ + (t1)->tv_sec += 1; \ + } \ + else if ((clock_res_t)(nsec) < 0 && \ + (t1)->tv_nsec < 0) { \ + (t1)->tv_nsec += NSEC_PER_SEC; \ + (t1)->tv_sec -= 1; \ + } \ } while (0) #include /* Use mach_absolute_time() */ -extern mach_timespec_t clock_get_system_value(void) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, __IPHONE_2_0, __IPHONE_6_0); +extern mach_timespec_t clock_get_system_value(void) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, __IPHONE_2_0, __IPHONE_6_0); -extern mach_timespec_t clock_get_calendar_value(void) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, __IPHONE_2_0, __IPHONE_6_0); +extern mach_timespec_t clock_get_calendar_value(void) __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0, __MAC_10_8, __IPHONE_2_0, __IPHONE_6_0); -#else /* __LP64__ */ +#else /* __LP64__ */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#define MACH_TIMESPEC_ZERO ((mach_timespec_t) { 0, 0 } ) +#define MACH_TIMESPEC_ZERO ((mach_timespec_t) { 0, 0 } ) -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#endif /* __LP64__ */ +#endif /* __LP64__ */ -extern void delay_for_interval( - uint32_t interval, - uint32_t scale_factor); +extern void delay_for_interval( + uint32_t interval, + uint32_t scale_factor); -extern void delay_for_interval_with_leeway( - uint32_t interval, - uint32_t leeway, - uint32_t scale_factor); +extern void delay_for_interval_with_leeway( + uint32_t interval, + uint32_t leeway, + uint32_t scale_factor); -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE extern void delay(int usec); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ __END_DECLS -#endif /* _KERN_CLOCK_H_ */ +#endif /* _KERN_CLOCK_H_ */ diff --git a/osfmk/kern/clock_oldops.c b/osfmk/kern/clock_oldops.c index c77d40f28..430a2da53 100644 --- a/osfmk/kern/clock_oldops.c +++ b/osfmk/kern/clock_oldops.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -67,90 +67,90 @@ * list and entered in time priority order into the active alarm * chain of the target clock. */ -struct alarm { - struct alarm *al_next; /* next alarm in chain */ - struct alarm *al_prev; /* previous alarm in chain */ - int al_status; /* alarm status */ - mach_timespec_t al_time; /* alarm time */ - struct { /* message alarm data */ - int type; /* alarm type */ - ipc_port_t port; /* alarm port */ +struct alarm { + struct alarm *al_next; /* next alarm in chain */ + struct alarm *al_prev; /* previous alarm in chain */ + int al_status; /* alarm status */ + mach_timespec_t al_time; /* alarm time */ + struct { /* message alarm data */ + int type; /* alarm type */ + ipc_port_t port; /* alarm port */ mach_msg_type_name_t - port_type; /* alarm port type */ - struct clock *clock; /* alarm clock */ - void *data; /* alarm data */ + port_type; /* alarm port type */ + struct clock *clock; /* alarm clock */ + void *data; /* alarm data */ } al_alrm; -#define al_type al_alrm.type -#define al_port al_alrm.port -#define al_port_type al_alrm.port_type -#define al_clock al_alrm.clock -#define al_data al_alrm.data - long al_seqno; /* alarm sequence number */ +#define al_type al_alrm.type +#define al_port al_alrm.port +#define al_port_type al_alrm.port_type +#define al_clock al_alrm.clock +#define al_data al_alrm.data + long al_seqno; /* alarm sequence number */ }; -typedef struct alarm alarm_data_t; +typedef struct alarm alarm_data_t; /* alarm status */ -#define ALARM_FREE 0 /* alarm is on free list */ -#define ALARM_SLEEP 1 /* active clock_sleep() */ -#define ALARM_CLOCK 2 /* active clock_alarm() */ -#define ALARM_DONE 4 /* alarm has expired */ +#define ALARM_FREE 0 /* alarm is on free list */ +#define ALARM_SLEEP 1 /* active clock_sleep() */ +#define ALARM_CLOCK 2 /* active clock_alarm() */ +#define ALARM_DONE 4 /* alarm has expired */ /* local data declarations */ -decl_simple_lock_data(static,alarm_lock) /* alarm synchronization */ -static struct zone *alarm_zone; /* zone for user alarms */ -static struct alarm *alrmfree; /* alarm free list pointer */ -static struct alarm *alrmdone; /* alarm done list pointer */ -static struct alarm *alrmlist; -static long alrm_seqno; /* uniquely identifies alarms */ -static thread_call_data_t alarm_done_call; -static timer_call_data_t alarm_expire_timer; +decl_simple_lock_data(static, alarm_lock) /* alarm synchronization */ +static struct zone *alarm_zone; /* zone for user alarms */ +static struct alarm *alrmfree; /* alarm free list pointer */ +static struct alarm *alrmdone; /* alarm done list pointer */ +static struct alarm *alrmlist; +static long alrm_seqno; /* uniquely identifies alarms */ +static thread_call_data_t alarm_done_call; +static timer_call_data_t alarm_expire_timer; -extern struct clock clock_list[]; -extern int clock_count; +extern struct clock clock_list[]; +extern int clock_count; -static void post_alarm( - alarm_t alarm); +static void post_alarm( + alarm_t alarm); -static void set_alarm( - mach_timespec_t *alarm_time); +static void set_alarm( + mach_timespec_t *alarm_time); -static int check_time( - alarm_type_t alarm_type, - mach_timespec_t *alarm_time, - mach_timespec_t *clock_time); +static int check_time( + alarm_type_t alarm_type, + mach_timespec_t *alarm_time, + mach_timespec_t *clock_time); -static void alarm_done(void); +static void alarm_done(void); -static void alarm_expire(void); +static void alarm_expire(void); -static kern_return_t clock_sleep_internal( - clock_t clock, - sleep_type_t sleep_type, - mach_timespec_t *sleep_time); +static kern_return_t clock_sleep_internal( + clock_t clock, + sleep_type_t sleep_type, + mach_timespec_t *sleep_time); -int rtclock_init(void); +int rtclock_init(void); -kern_return_t rtclock_gettime( - mach_timespec_t *cur_time); +kern_return_t rtclock_gettime( + mach_timespec_t *cur_time); -kern_return_t rtclock_getattr( - clock_flavor_t flavor, - clock_attr_t attr, - mach_msg_type_number_t *count); +kern_return_t rtclock_getattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); SECURITY_READ_ONLY_EARLY(struct clock_ops) sysclk_ops = { - NULL, rtclock_init, + NULL, rtclock_init, rtclock_gettime, rtclock_getattr, }; -kern_return_t calend_gettime( - mach_timespec_t *cur_time); +kern_return_t calend_gettime( + mach_timespec_t *cur_time); -kern_return_t calend_getattr( - clock_flavor_t flavor, - clock_attr_t attr, - mach_msg_type_number_t *count); +kern_return_t calend_getattr( + clock_flavor_t flavor, + clock_attr_t attr, + mach_msg_type_number_t *count); SECURITY_READ_ONLY_EARLY(struct clock_ops) calend_ops = { NULL, NULL, @@ -161,32 +161,31 @@ SECURITY_READ_ONLY_EARLY(struct clock_ops) calend_ops = { /* * List of clock devices. */ -SECURITY_READ_ONLY_LATE(struct clock) clock_list[] = { - +SECURITY_READ_ONLY_LATE(struct clock) clock_list[] = { /* SYSTEM_CLOCK */ { &sysclk_ops, 0, 0 }, /* CALENDAR_CLOCK */ { &calend_ops, 0, 0 } }; -int clock_count = sizeof(clock_list) / sizeof(clock_list[0]); +int clock_count = sizeof(clock_list) / sizeof(clock_list[0]); /* * Macros to lock/unlock clock system. */ -#define LOCK_ALARM(s) \ - s = splclock(); \ - simple_lock(&alarm_lock); +#define LOCK_ALARM(s) \ + s = splclock(); \ + simple_lock(&alarm_lock, LCK_GRP_NULL); -#define UNLOCK_ALARM(s) \ - simple_unlock(&alarm_lock); \ +#define UNLOCK_ALARM(s) \ + simple_unlock(&alarm_lock); \ splx(s); void clock_oldconfig(void) { - clock_t clock; - int i; + clock_t clock; + int i; simple_lock_init(&alarm_lock, 0); thread_call_setup(&alarm_done_call, (thread_call_func_t)alarm_done, NULL); @@ -198,8 +197,9 @@ clock_oldconfig(void) for (i = 0; i < clock_count; i++) { clock = &clock_list[i]; if (clock->cl_ops && clock->cl_ops->c_config) { - if ((*clock->cl_ops->c_config)() == 0) + if ((*clock->cl_ops->c_config)() == 0) { clock->cl_ops = NULL; + } } } @@ -210,16 +210,17 @@ clock_oldconfig(void) void clock_oldinit(void) { - clock_t clock; - int i; + clock_t clock; + int i; /* * Initialize basic clock structures. */ for (i = 0; i < clock_count; i++) { clock = &clock_list[i]; - if (clock->cl_ops && clock->cl_ops->c_init) + if (clock->cl_ops && clock->cl_ops->c_init) { (*clock->cl_ops->c_init)(); + } } } @@ -229,8 +230,8 @@ clock_oldinit(void) void clock_service_create(void) { - clock_t clock; - int i; + clock_t clock; + int i; /* * Initialize ipc clock services. @@ -248,7 +249,7 @@ clock_service_create(void) * initialization. */ i = sizeof(struct alarm); - alarm_zone = zinit(i, (4096/i)*i, 10*i, "alarms"); + alarm_zone = zinit(i, (4096 / i) * i, 10 * i, "alarms"); } /* @@ -256,19 +257,20 @@ clock_service_create(void) */ kern_return_t host_get_clock_service( - host_t host, - clock_id_t clock_id, - clock_t *clock) /* OUT */ + host_t host, + clock_id_t clock_id, + clock_t *clock) /* OUT */ { if (host == HOST_NULL || clock_id < 0 || clock_id >= clock_count) { *clock = CLOCK_NULL; - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } *clock = &clock_list[clock_id]; - if ((*clock)->cl_ops == 0) - return (KERN_FAILURE); - return (KERN_SUCCESS); + if ((*clock)->cl_ops == 0) { + return KERN_FAILURE; + } + return KERN_SUCCESS; } /* @@ -276,20 +278,21 @@ host_get_clock_service( */ kern_return_t host_get_clock_control( - host_priv_t host_priv, - clock_id_t clock_id, - clock_t *clock) /* OUT */ + host_priv_t host_priv, + clock_id_t clock_id, + clock_t *clock) /* OUT */ { if (host_priv == HOST_PRIV_NULL || - clock_id < 0 || clock_id >= clock_count) { + clock_id < 0 || clock_id >= clock_count) { *clock = CLOCK_NULL; - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } *clock = &clock_list[clock_id]; - if ((*clock)->cl_ops == 0) - return (KERN_FAILURE); - return (KERN_SUCCESS); + if ((*clock)->cl_ops == 0) { + return KERN_FAILURE; + } + return KERN_SUCCESS; } /* @@ -297,40 +300,41 @@ host_get_clock_control( */ kern_return_t clock_get_time( - clock_t clock, - mach_timespec_t *cur_time) /* OUT */ + clock_t clock, + mach_timespec_t *cur_time) /* OUT */ { - if (clock == CLOCK_NULL) - return (KERN_INVALID_ARGUMENT); - return ((*clock->cl_ops->c_gettime)(cur_time)); + if (clock == CLOCK_NULL) { + return KERN_INVALID_ARGUMENT; + } + return (*clock->cl_ops->c_gettime)(cur_time); } kern_return_t rtclock_gettime( - mach_timespec_t *time) /* OUT */ + mach_timespec_t *time) /* OUT */ { - clock_sec_t secs; - clock_nsec_t nsecs; + clock_sec_t secs; + clock_nsec_t nsecs; clock_get_system_nanotime(&secs, &nsecs); time->tv_sec = (unsigned int)secs; time->tv_nsec = nsecs; - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t calend_gettime( - mach_timespec_t *time) /* OUT */ + mach_timespec_t *time) /* OUT */ { - clock_sec_t secs; - clock_nsec_t nsecs; + clock_sec_t secs; + clock_nsec_t nsecs; clock_get_calendar_nanotime(&secs, &nsecs); time->tv_sec = (unsigned int)secs; time->tv_nsec = nsecs; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -338,69 +342,71 @@ calend_gettime( */ kern_return_t clock_get_attributes( - clock_t clock, - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ - mach_msg_type_number_t *count) /* IN/OUT */ + clock_t clock, + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ { - if (clock == CLOCK_NULL) - return (KERN_INVALID_ARGUMENT); - if (clock->cl_ops->c_getattr) - return (clock->cl_ops->c_getattr(flavor, attr, count)); - return (KERN_FAILURE); + if (clock == CLOCK_NULL) { + return KERN_INVALID_ARGUMENT; + } + if (clock->cl_ops->c_getattr) { + return clock->cl_ops->c_getattr(flavor, attr, count); + } + return KERN_FAILURE; } kern_return_t rtclock_getattr( - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ - mach_msg_type_number_t *count) /* IN/OUT */ + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ { - if (*count != 1) - return (KERN_FAILURE); + if (*count != 1) { + return KERN_FAILURE; + } switch (flavor) { - - case CLOCK_GET_TIME_RES: /* >0 res */ - case CLOCK_ALARM_CURRES: /* =0 no alarm */ + case CLOCK_GET_TIME_RES: /* >0 res */ + case CLOCK_ALARM_CURRES: /* =0 no alarm */ case CLOCK_ALARM_MINRES: case CLOCK_ALARM_MAXRES: *(clock_res_t *) attr = NSEC_PER_SEC / 100; break; default: - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t calend_getattr( - clock_flavor_t flavor, - clock_attr_t attr, /* OUT */ - mach_msg_type_number_t *count) /* IN/OUT */ + clock_flavor_t flavor, + clock_attr_t attr, /* OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ { - if (*count != 1) - return (KERN_FAILURE); + if (*count != 1) { + return KERN_FAILURE; + } switch (flavor) { - - case CLOCK_GET_TIME_RES: /* >0 res */ + case CLOCK_GET_TIME_RES: /* >0 res */ *(clock_res_t *) attr = NSEC_PER_SEC / 100; break; - case CLOCK_ALARM_CURRES: /* =0 no alarm */ + case CLOCK_ALARM_CURRES: /* =0 no alarm */ case CLOCK_ALARM_MINRES: case CLOCK_ALARM_MAXRES: *(clock_res_t *) attr = 0; break; default: - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -408,12 +414,13 @@ calend_getattr( */ kern_return_t clock_set_time( - clock_t clock, -__unused mach_timespec_t new_time) + clock_t clock, + __unused mach_timespec_t new_time) { - if (clock == CLOCK_NULL) - return (KERN_INVALID_ARGUMENT); - return (KERN_FAILURE); + if (clock == CLOCK_NULL) { + return KERN_INVALID_ARGUMENT; + } + return KERN_FAILURE; } /* @@ -421,14 +428,15 @@ __unused mach_timespec_t new_time) */ kern_return_t clock_set_attributes( - clock_t clock, -__unused clock_flavor_t flavor, -__unused clock_attr_t attr, -__unused mach_msg_type_number_t count) + clock_t clock, + __unused clock_flavor_t flavor, + __unused clock_attr_t attr, + __unused mach_msg_type_number_t count) { - if (clock == CLOCK_NULL) - return (KERN_INVALID_ARGUMENT); - return (KERN_FAILURE); + if (clock == CLOCK_NULL) { + return KERN_INVALID_ARGUMENT; + } + return KERN_FAILURE; } /* @@ -436,24 +444,27 @@ __unused mach_msg_type_number_t count) */ kern_return_t clock_alarm( - clock_t clock, - alarm_type_t alarm_type, - mach_timespec_t alarm_time, - ipc_port_t alarm_port, - mach_msg_type_name_t alarm_port_type) + clock_t clock, + alarm_type_t alarm_type, + mach_timespec_t alarm_time, + ipc_port_t alarm_port, + mach_msg_type_name_t alarm_port_type) { - alarm_t alarm; - mach_timespec_t clock_time; - int chkstat; - kern_return_t reply_code; - spl_t s; - - if (clock == CLOCK_NULL) - return (KERN_INVALID_ARGUMENT); - if (clock != &clock_list[SYSTEM_CLOCK]) - return (KERN_FAILURE); - if (IP_VALID(alarm_port) == 0) - return (KERN_INVALID_CAPABILITY); + alarm_t alarm; + mach_timespec_t clock_time; + int chkstat; + kern_return_t reply_code; + spl_t s; + + if (clock == CLOCK_NULL) { + return KERN_INVALID_ARGUMENT; + } + if (clock != &clock_list[SYSTEM_CLOCK]) { + return KERN_FAILURE; + } + if (IP_VALID(alarm_port) == 0) { + return KERN_INVALID_CAPABILITY; + } /* * Check alarm parameters. If parameters are invalid, @@ -464,8 +475,8 @@ clock_alarm( if (chkstat <= 0) { reply_code = (chkstat < 0 ? KERN_INVALID_VALUE : KERN_SUCCESS); clock_alarm_reply(alarm_port, alarm_port_type, - reply_code, alarm_type, clock_time); - return (KERN_SUCCESS); + reply_code, alarm_type, clock_time); + return KERN_SUCCESS; } /* @@ -476,12 +487,13 @@ clock_alarm( if ((alarm = alrmfree) == 0) { UNLOCK_ALARM(s); alarm = (alarm_t) zalloc(alarm_zone); - if (alarm == 0) - return (KERN_RESOURCE_SHORTAGE); + if (alarm == 0) { + return KERN_RESOURCE_SHORTAGE; + } LOCK_ALARM(s); - } - else + } else { alrmfree = alarm->al_next; + } alarm->al_status = ALARM_CLOCK; alarm->al_time = alarm_time; @@ -493,7 +505,7 @@ clock_alarm( post_alarm(alarm); UNLOCK_ALARM(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -506,22 +518,23 @@ kern_return_t clock_sleep_trap( struct clock_sleep_trap_args *args) { - mach_port_name_t clock_name = args->clock_name; - sleep_type_t sleep_type = args->sleep_type; - int sleep_sec = args->sleep_sec; - int sleep_nsec = args->sleep_nsec; - mach_vm_address_t wakeup_time_addr = args->wakeup_time; - clock_t clock; - mach_timespec_t swtime = {}; - kern_return_t rvalue; + mach_port_name_t clock_name = args->clock_name; + sleep_type_t sleep_type = args->sleep_type; + int sleep_sec = args->sleep_sec; + int sleep_nsec = args->sleep_nsec; + mach_vm_address_t wakeup_time_addr = args->wakeup_time; + clock_t clock; + mach_timespec_t swtime = {}; + kern_return_t rvalue; /* * Convert the trap parameters. */ - if (clock_name == MACH_PORT_NULL) + if (clock_name == MACH_PORT_NULL) { clock = &clock_list[SYSTEM_CLOCK]; - else + } else { clock = port_name_to_clock(clock_name); + } swtime.tv_sec = sleep_sec; swtime.tv_nsec = sleep_nsec; @@ -537,26 +550,28 @@ clock_sleep_trap( if (rvalue != KERN_INVALID_ARGUMENT && rvalue != KERN_FAILURE) { copyout((char *)&swtime, wakeup_time_addr, sizeof(mach_timespec_t)); } - return (rvalue); -} + return rvalue; +} static kern_return_t clock_sleep_internal( - clock_t clock, - sleep_type_t sleep_type, - mach_timespec_t *sleep_time) + clock_t clock, + sleep_type_t sleep_type, + mach_timespec_t *sleep_time) { - alarm_t alarm; - mach_timespec_t clock_time; - kern_return_t rvalue; - int chkstat; - spl_t s; - - if (clock == CLOCK_NULL) - return (KERN_INVALID_ARGUMENT); + alarm_t alarm; + mach_timespec_t clock_time; + kern_return_t rvalue; + int chkstat; + spl_t s; + + if (clock == CLOCK_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (clock != &clock_list[SYSTEM_CLOCK]) - return (KERN_FAILURE); + if (clock != &clock_list[SYSTEM_CLOCK]) { + return KERN_FAILURE; + } /* * Check sleep parameters. If parameters are invalid @@ -565,8 +580,9 @@ clock_sleep_internal( (*clock->cl_ops->c_gettime)(&clock_time); chkstat = check_time(sleep_type, sleep_time, &clock_time); - if (chkstat < 0) - return (KERN_INVALID_VALUE); + if (chkstat < 0) { + return KERN_INVALID_VALUE; + } rvalue = KERN_SUCCESS; if (chkstat > 0) { wait_result_t wait_result; @@ -579,12 +595,13 @@ clock_sleep_internal( if ((alarm = alrmfree) == 0) { UNLOCK_ALARM(s); alarm = (alarm_t) zalloc(alarm_zone); - if (alarm == 0) - return (KERN_RESOURCE_SHORTAGE); + if (alarm == 0) { + return KERN_RESOURCE_SHORTAGE; + } LOCK_ALARM(s); - } - else + } else { alrmfree = alarm->al_next; + } /* * Wait for alarm to occur. @@ -606,8 +623,9 @@ clock_sleep_internal( LOCK_ALARM(s); if (alarm->al_status != ALARM_DONE) { assert(wait_result != THREAD_AWAKENED); - if (((alarm->al_prev)->al_next = alarm->al_next) != NULL) + if (((alarm->al_prev)->al_next = alarm->al_next) != NULL) { (alarm->al_next)->al_prev = alarm->al_prev; + } rvalue = KERN_ABORTED; } *sleep_time = alarm->al_time; @@ -620,11 +638,11 @@ clock_sleep_internal( alarm->al_next = alrmfree; alrmfree = alarm; UNLOCK_ALARM(s); - } - else + } else { *sleep_time = clock_time; + } - return (rvalue); + return rvalue; } /* @@ -633,12 +651,12 @@ clock_sleep_internal( static void alarm_expire(void) { - clock_t clock; - alarm_t alrm1; - alarm_t alrm2; - mach_timespec_t clock_time; - mach_timespec_t *alarm_time; - spl_t s; + clock_t clock; + alarm_t alrm1; + alarm_t alrm2; + mach_timespec_t clock_time; + mach_timespec_t *alarm_time; + spl_t s; clock = &clock_list[SYSTEM_CLOCK]; (*clock->cl_ops->c_gettime)(&clock_time); @@ -651,15 +669,17 @@ alarm_expire(void) alrm1 = (alarm_t)&alrmlist; while ((alrm2 = alrm1->al_next) != NULL) { alarm_time = &alrm2->al_time; - if (CMP_MACH_TIMESPEC(alarm_time, &clock_time) > 0) + if (CMP_MACH_TIMESPEC(alarm_time, &clock_time) > 0) { break; + } /* * Alarm has expired, so remove it from the * clock alarm list. - */ - if ((alrm1->al_next = alrm2->al_next) != NULL) + */ + if ((alrm1->al_next = alrm2->al_next) != NULL) { (alrm1->al_next)->al_prev = alrm1; + } /* * If a clock_sleep() alarm, wakeup the thread @@ -671,18 +691,18 @@ alarm_expire(void) alrm2->al_time = clock_time; thread_wakeup((event_t)alrm2); } - - /* + /* * If a clock_alarm() alarm, place the alarm on * the alarm done list and schedule the alarm * delivery mechanism. */ else { assert(alrm2->al_status == ALARM_CLOCK); - if ((alrm2->al_next = alrmdone) != NULL) + if ((alrm2->al_next = alrmdone) != NULL) { alrmdone->al_prev = alrm2; - else + } else { thread_call_enter(&alarm_done_call); + } alrm2->al_prev = (alarm_t)&alrmdone; alrmdone = alrm2; alrm2->al_status = ALARM_DONE; @@ -693,22 +713,24 @@ alarm_expire(void) /* * Setup to expire for the next pending alarm. */ - if (alrm2) + if (alrm2) { set_alarm(alarm_time); + } UNLOCK_ALARM(s); } static void alarm_done(void) { - alarm_t alrm; - kern_return_t code; - spl_t s; + alarm_t alrm; + kern_return_t code; + spl_t s; LOCK_ALARM(s); while ((alrm = alrmdone) != NULL) { - if ((alrmdone = alrm->al_next) != NULL) + if ((alrmdone = alrm->al_next) != NULL) { alrmdone->al_prev = (alarm_t)&alrmdone; + } UNLOCK_ALARM(s); code = (alrm->al_status == ALARM_DONE? KERN_SUCCESS: KERN_ABORTED); @@ -716,16 +738,16 @@ alarm_done(void) /* Deliver message to designated port */ if (IP_VALID(alrm->al_port)) { clock_alarm_reply(alrm->al_port, alrm->al_port_type, code, - alrm->al_type, alrm->al_time); + alrm->al_type, alrm->al_time); } LOCK_ALARM(s); alrm->al_status = ALARM_FREE; alrm->al_next = alrmfree; alrmfree = alrm; - } - else + } else { panic("clock_alarm_deliver"); + } } UNLOCK_ALARM(s); @@ -738,11 +760,11 @@ alarm_done(void) */ static void post_alarm( - alarm_t alarm) + alarm_t alarm) { - alarm_t alrm1, alrm2; - mach_timespec_t *alarm_time; - mach_timespec_t *queue_time; + alarm_t alrm1, alrm2; + mach_timespec_t *alarm_time; + mach_timespec_t *queue_time; /* * Traverse alarm list until queue time is greater @@ -752,29 +774,32 @@ post_alarm( alrm1 = (alarm_t)&alrmlist; while ((alrm2 = alrm1->al_next) != NULL) { queue_time = &alrm2->al_time; - if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0) + if (CMP_MACH_TIMESPEC(queue_time, alarm_time) > 0) { break; + } alrm1 = alrm2; } alrm1->al_next = alarm; alarm->al_next = alrm2; alarm->al_prev = alrm1; - if (alrm2) + if (alrm2) { alrm2->al_prev = alarm; + } /* * If the inserted alarm is the 'earliest' alarm, * reset the device layer alarm time accordingly. */ - if (alrmlist == alarm) + if (alrmlist == alarm) { set_alarm(alarm_time); + } } static void set_alarm( - mach_timespec_t *alarm_time) + mach_timespec_t *alarm_time) { - uint64_t abstime; + uint64_t abstime; nanotime_to_absolutetime(alarm_time->tv_sec, alarm_time->tv_nsec, &abstime); timer_call_enter_with_leeway(&alarm_expire_timer, NULL, abstime, 0, TIMER_CALL_USER_NORMAL, FALSE); @@ -788,31 +813,34 @@ set_alarm( */ static int check_time( - alarm_type_t alarm_type, - mach_timespec_t *alarm_time, - mach_timespec_t *clock_time) + alarm_type_t alarm_type, + mach_timespec_t *alarm_time, + mach_timespec_t *clock_time) { - int result; + int result; - if (BAD_ALRMTYPE(alarm_type)) - return (-1); - if (BAD_MACH_TIMESPEC(alarm_time)) - return (-1); - if ((alarm_type & ALRMTYPE) == TIME_RELATIVE) + if (BAD_ALRMTYPE(alarm_type)) { + return -1; + } + if (BAD_MACH_TIMESPEC(alarm_time)) { + return -1; + } + if ((alarm_type & ALRMTYPE) == TIME_RELATIVE) { ADD_MACH_TIMESPEC(alarm_time, clock_time); + } result = CMP_MACH_TIMESPEC(alarm_time, clock_time); - return ((result >= 0)? result: 0); + return (result >= 0)? result: 0; } -#ifndef __LP64__ +#ifndef __LP64__ mach_timespec_t clock_get_system_value(void) { - clock_t clock = &clock_list[SYSTEM_CLOCK]; - mach_timespec_t value; + clock_t clock = &clock_list[SYSTEM_CLOCK]; + mach_timespec_t value; (void) (*clock->cl_ops->c_gettime)(&value); @@ -822,12 +850,12 @@ clock_get_system_value(void) mach_timespec_t clock_get_calendar_value(void) { - clock_t clock = &clock_list[CALENDAR_CLOCK]; - mach_timespec_t value = MACH_TIMESPEC_ZERO; + clock_t clock = &clock_list[CALENDAR_CLOCK]; + mach_timespec_t value = MACH_TIMESPEC_ZERO; (void) (*clock->cl_ops->c_gettime)(&value); return value; } -#endif /* __LP64__ */ +#endif /* __LP64__ */ diff --git a/osfmk/kern/coalition.c b/osfmk/kern/coalition.c index 26f9d33a4..0db480817 100644 --- a/osfmk/kern/coalition.c +++ b/osfmk/kern/coalition.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -56,7 +56,7 @@ task_t coalition_get_leader(coalition_t coal); int coalition_get_task_count(coalition_t coal); uint64_t coalition_get_page_count(coalition_t coal, int *ntasks); int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order, - int *pid_list, int list_sz); + int *pid_list, int list_sz); /* defined in task.c */ extern ledger_template_t task_ledger_template; @@ -76,7 +76,7 @@ lck_grp_t coalitions_lck_grp; lck_grp_attr_t coalitions_lck_grp_attr; /* coalitions_list_lock protects coalition_count, coalitions queue, next_coalition_id. */ -decl_lck_mtx_data(static,coalitions_list_lock); +decl_lck_mtx_data(static, coalitions_list_lock); static uint64_t coalition_count; static uint64_t coalition_next_id = 1; static queue_head_t coalitions_q; @@ -86,9 +86,10 @@ coalition_t corpse_coalition[COALITION_NUM_TYPES]; zone_t coalition_zone; -static const char *coal_type_str(int type) +static const char * +coal_type_str(int type) { - switch(type) { + switch (type) { case COALITION_TYPE_RESOURCE: return "RESOURCE"; case COALITION_TYPE_JETSAM: @@ -162,10 +163,10 @@ static void i_coal_resource_dealloc(coalition_t coal); static kern_return_t i_coal_resource_adopt_task(coalition_t coal, task_t task); static kern_return_t i_coal_resource_remove_task(coalition_t coal, task_t task); static kern_return_t i_coal_resource_set_taskrole(coalition_t coal, - task_t task, int role); + task_t task, int role); static int i_coal_resource_get_taskrole(coalition_t coal, task_t task); static void i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, - void (*callback)(coalition_t, void *, task_t)); + void (*callback)(coalition_t, void *, task_t)); /* * Ensure COALITION_NUM_THREAD_QOS_TYPES defined in mach/coalition.h still @@ -189,8 +190,8 @@ struct i_resource_coalition { uint64_t task_count; /* tasks that have started in this coalition */ uint64_t dead_task_count; /* tasks that have exited in this coalition; - subtract from task_count to get count - of "active" tasks */ + * subtract from task_count to get count + * of "active" tasks */ /* * Count the length of time this coalition had at least one active task. * This can be a 'denominator' to turn e.g. cpu_time to %cpu. @@ -210,10 +211,10 @@ static void i_coal_jetsam_dealloc(coalition_t coal); static kern_return_t i_coal_jetsam_adopt_task(coalition_t coal, task_t task); static kern_return_t i_coal_jetsam_remove_task(coalition_t coal, task_t task); static kern_return_t i_coal_jetsam_set_taskrole(coalition_t coal, - task_t task, int role); + task_t task, int role); static int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task); static void i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, - void (*callback)(coalition_t, void *, task_t)); + void (*callback)(coalition_t, void *, task_t)); struct i_jetsam_coalition { task_t leader; @@ -233,29 +234,29 @@ struct coalition { uint32_t role; /* default task role (background, adaptive, interactive, etc) */ uint32_t ref_count; /* Number of references to the memory containing this struct */ uint32_t active_count; /* Number of members of (tasks in) the - coalition, plus vouchers referring - to the coalition */ + * coalition, plus vouchers referring + * to the coalition */ uint32_t focal_task_count; /* Number of TASK_FOREGROUND_APPLICATION tasks in the coalition */ uint32_t nonfocal_task_count; /* Number of TASK_BACKGROUND_APPLICATION tasks in the coalition */ /* coalition flags */ uint32_t privileged : 1; /* Members of this coalition may create - and manage coalitions and may posix_spawn - processes into selected coalitions */ + * and manage coalitions and may posix_spawn + * processes into selected coalitions */ /* ast? */ /* voucher */ uint32_t termrequested : 1; /* launchd has requested termination when coalition becomes empty */ uint32_t terminated : 1; /* coalition became empty and spawns are now forbidden */ uint32_t reaped : 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */ uint32_t notified : 1; /* no-more-processes notification was sent via special port */ - uint32_t efficient : 1; /* launchd has marked the coalition as efficient */ + uint32_t efficient : 1; /* launchd has marked the coalition as efficient */ #if DEVELOPMENT || DEBUG uint32_t should_notify : 1; /* should this coalition send notifications (default: yes) */ #endif queue_chain_t coalitions; /* global list of coalitions */ - decl_lck_mtx_data(,lock) /* Coalition lock. */ + decl_lck_mtx_data(, lock) /* Coalition lock. */ /* put coalition type-specific structures here */ union { @@ -269,7 +270,7 @@ struct coalition { * these must be kept in the order specified in coalition.h */ static const struct coalition_type -s_coalition_types[COALITION_NUM_TYPES] = { + s_coalition_types[COALITION_NUM_TYPES] = { { COALITION_TYPE_RESOURCE, 1, @@ -341,9 +342,10 @@ i_coal_resource_init(coalition_t coal, boolean_t privileged) (void)privileged; assert(coal && coal->type == COALITION_TYPE_RESOURCE); coal->r.ledger = ledger_instantiate(task_ledger_template, - LEDGER_CREATE_ACTIVE_ENTRIES); - if (coal->r.ledger == NULL) + LEDGER_CREATE_ACTIVE_ENTRIES); + if (coal->r.ledger == NULL) { return KERN_RESOURCE_SHORTAGE; + } queue_init(&coal->r.tasks); @@ -370,8 +372,8 @@ i_coal_resource_adopt_task(coalition_t coal, task_t task) if (cr->task_count < cr->dead_task_count) { panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)", - __func__, coal, coal->id, coal_type_str(coal->type), - cr->task_count, cr->dead_task_count); + __func__, coal, coal->id, coal_type_str(coal->type), + cr->task_count, cr->dead_task_count); } /* If moving from 0->1 active tasks */ @@ -383,8 +385,8 @@ i_coal_resource_adopt_task(coalition_t coal, task_t task) enqueue_tail(&cr->tasks, &task->task_coalition[COALITION_TYPE_RESOURCE]); coal_dbg("Added PID:%d to id:%llu, task_count:%llu, dead_count:%llu, nonempty_time:%llu", - task_pid(task), coal->id, cr->task_count, cr->dead_task_count, - cr->last_became_nonempty_time); + task_pid(task), coal->id, cr->task_count, cr->dead_task_count, + cr->last_became_nonempty_time); return KERN_SUCCESS; } @@ -407,7 +409,7 @@ i_coal_resource_remove_task(coalition_t coal, task_t task) if (cr->task_count < cr->dead_task_count) { panic("%s: coalition %p id:%llu type:%s task_count(%llu) < dead_task_count(%llu)", - __func__, coal, coal->id, coal_type_str(coal->type), cr->task_count, cr->dead_task_count); + __func__, coal, coal->id, coal_type_str(coal->type), cr->task_count, cr->dead_task_count); } /* If moving from 1->0 active tasks */ @@ -440,14 +442,14 @@ i_coal_resource_remove_task(coalition_t coal, task_t task) queue_chain_init(task->task_coalition[COALITION_TYPE_RESOURCE]); coal_dbg("removed PID:%d from id:%llu, task_count:%llu, dead_count:%llu", - task_pid(task), coal->id, cr->task_count, cr->dead_task_count); + task_pid(task), coal->id, cr->task_count, cr->dead_task_count); return KERN_SUCCESS; } static kern_return_t i_coal_resource_set_taskrole(__unused coalition_t coal, - __unused task_t task, __unused int role) + __unused task_t task, __unused int role) { return KERN_SUCCESS; } @@ -460,8 +462,9 @@ i_coal_resource_get_taskrole(__unused coalition_t coal, __unused task_t task) assert(coal && coal->type == COALITION_TYPE_RESOURCE); qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) { - if (t == task) + if (t == task) { return COALITION_TASKROLE_UNDEF; + } } return -1; @@ -474,7 +477,7 @@ i_coal_resource_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coal assert(coal && coal->type == COALITION_TYPE_RESOURCE); qe_foreach_element(t, &coal->r.tasks, task_coalition[COALITION_TYPE_RESOURCE]) - callback(coal, ctx, t); + callback(coal, ctx, t); } kern_return_t @@ -484,8 +487,9 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us ledger_amount_t credit, debit; int i; - if (coal->type != COALITION_TYPE_RESOURCE) + if (coal->type != COALITION_TYPE_RESOURCE) { return KERN_INVALID_ARGUMENT; + } /* Return KERN_INVALID_ARGUMENT for Corpse coalition */ for (i = 0; i < COALITION_NUM_TYPES; i++) { @@ -495,8 +499,9 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us } ledger_t sum_ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES); - if (sum_ledger == LEDGER_NULL) + if (sum_ledger == LEDGER_NULL) { return KERN_RESOURCE_SHORTAGE; + } coalition_lock(coal); @@ -583,7 +588,7 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us /* Copy the totals out of sum_ledger */ kr = ledger_get_entries(sum_ledger, task_ledgers.cpu_time, - &credit, &debit); + &credit, &debit); if (kr != KERN_SUCCESS) { credit = 0; } @@ -594,14 +599,14 @@ coalition_resource_usage_internal(coalition_t coal, struct coalition_resource_us cru_out->energy_billed_to_others = (uint64_t)energy_billed_to_others; kr = ledger_get_entries(sum_ledger, task_ledgers.interrupt_wakeups, - &credit, &debit); + &credit, &debit); if (kr != KERN_SUCCESS) { credit = 0; } cru_out->interrupt_wakeups = credit; kr = ledger_get_entries(sum_ledger, task_ledgers.platform_idle_wakeups, - &credit, &debit); + &credit, &debit); if (kr != KERN_SUCCESS) { credit = 0; } @@ -640,7 +645,7 @@ i_coal_jetsam_init(coalition_t coal, boolean_t privileged) assert(coal && coal->type == COALITION_TYPE_JETSAM); (void)privileged; - coal->j.leader= TASK_NULL; + coal->j.leader = TASK_NULL; queue_head_init(coal->j.extensions); queue_head_init(coal->j.services); queue_head_init(coal->j.other); @@ -674,7 +679,7 @@ i_coal_jetsam_adopt_task(coalition_t coal, task_t task) /* put each task initially in the "other" list */ enqueue_tail(&cj->other, &task->task_coalition[COALITION_TYPE_JETSAM]); coal_dbg("coalition %lld adopted PID:%d as UNDEF", - coal->id, task_pid(task)); + coal->id, task_pid(task)); return KERN_SUCCESS; } @@ -686,7 +691,7 @@ i_coal_jetsam_remove_task(coalition_t coal, task_t task) assert(task->coalition[COALITION_TYPE_JETSAM] == coal); coal_dbg("removing PID:%d from coalition id:%lld", - task_pid(task), coal->id); + task_pid(task), coal->id); if (task == coal->j.leader) { coal->j.leader = NULL; @@ -715,11 +720,11 @@ i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role) switch (role) { case COALITION_TASKROLE_LEADER: coal_dbg("setting PID:%d as LEADER of %lld", - task_pid(task), coal->id); + task_pid(task), coal->id); if (cj->leader != TASK_NULL) { /* re-queue the exiting leader onto the "other" list */ coal_dbg(" re-queue existing leader (%d) as OTHER", - task_pid(cj->leader)); + task_pid(cj->leader)); re_queue_tail(&cj->other, &cj->leader->task_coalition[COALITION_TYPE_JETSAM]); } /* @@ -734,12 +739,12 @@ i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role) break; case COALITION_TASKROLE_XPC: coal_dbg("setting PID:%d as XPC in %lld", - task_pid(task), coal->id); + task_pid(task), coal->id); q = (queue_t)&cj->services; break; case COALITION_TASKROLE_EXT: coal_dbg("setting PID:%d as EXT in %lld", - task_pid(task), coal->id); + task_pid(task), coal->id); q = (queue_t)&cj->extensions; break; case COALITION_TASKROLE_NONE: @@ -751,12 +756,12 @@ i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role) */ if (task->coalition[COALITION_TYPE_JETSAM] != coal) { panic("%s: task %p attempting to set role %d " - "in coalition %p to which it does not belong!", __func__, task, role, coal); + "in coalition %p to which it does not belong!", __func__, task, role, coal); } - /* fall through */ + /* fall through */ case COALITION_TASKROLE_UNDEF: coal_dbg("setting PID:%d as UNDEF in %lld", - task_pid(task), coal->id); + task_pid(task), coal->id); q = (queue_t)&cj->other; break; default: @@ -764,8 +769,9 @@ i_coal_jetsam_set_taskrole(coalition_t coal, task_t task, int role) return KERN_INVALID_ARGUMENT; } - if (q != NULL) + if (q != NULL) { re_queue_tail(q, &task->task_coalition[COALITION_TYPE_JETSAM]); + } return KERN_SUCCESS; } @@ -781,22 +787,26 @@ i_coal_jetsam_get_taskrole(coalition_t coal, task_t task) cj = &coal->j; - if (task == cj->leader) + if (task == cj->leader) { return COALITION_TASKROLE_LEADER; + } qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM]) { - if (t == task) + if (t == task) { return COALITION_TASKROLE_XPC; + } } qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM]) { - if (t == task) + if (t == task) { return COALITION_TASKROLE_EXT; + } } qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM]) { - if (t == task) + if (t == task) { return COALITION_TASKROLE_UNDEF; + } } /* task not in the coalition?! */ @@ -813,17 +823,18 @@ i_coal_jetsam_iterate_tasks(coalition_t coal, void *ctx, void (*callback)(coalit cj = &coal->j; - if (cj->leader) + if (cj->leader) { callback(coal, ctx, cj->leader); + } qe_foreach_element(t, &cj->services, task_coalition[COALITION_TYPE_JETSAM]) - callback(coal, ctx, t); + callback(coal, ctx, t); qe_foreach_element(t, &cj->extensions, task_coalition[COALITION_TYPE_JETSAM]) - callback(coal, ctx, t); + callback(coal, ctx, t); qe_foreach_element(t, &cj->other, task_coalition[COALITION_TYPE_JETSAM]) - callback(coal, ctx, t); + callback(coal, ctx, t); } @@ -844,12 +855,14 @@ coalition_create_internal(int type, int role, boolean_t privileged, coalition_t kern_return_t kr; struct coalition *new_coal; - if (type < 0 || type > COALITION_TYPE_MAX) + if (type < 0 || type > COALITION_TYPE_MAX) { return KERN_INVALID_ARGUMENT; + } new_coal = (struct coalition *)zalloc(coalition_zone); - if (new_coal == COALITION_NULL) + if (new_coal == COALITION_NULL) { return KERN_RESOURCE_SHORTAGE; + } bzero(new_coal, sizeof(*new_coal)); new_coal->type = type; @@ -878,7 +891,7 @@ coalition_create_internal(int type, int role, boolean_t privileged, coalition_t enqueue_tail(&coalitions_q, &new_coal->coalitions); KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_NEW), - new_coal->id, new_coal->type); + new_coal->id, new_coal->type); lck_mtx_unlock(&coalitions_list_lock); coal_dbg("id:%llu, type:%s", new_coal->id, coal_type_str(new_coal->type)); @@ -904,8 +917,8 @@ coalition_release(coalition_t coal) #endif /* COALITION_DEBUG */ coal_dbg("id:%llu type:%s ref_count:%u active_count:%u%s", - coal->id, coal_type_str(coal->type), rc, ac, - rc <= 0 ? ", will deallocate now" : ""); + coal->id, coal_type_str(coal->type), rc, ac, + rc <= 0 ? ", will deallocate now" : ""); if (coal->ref_count > 0) { coalition_unlock(coal); @@ -919,7 +932,7 @@ coalition_release(coalition_t coal) assert(coal->focal_task_count == 0); assert(coal->nonfocal_task_count == 0); KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_FREE), - coal->id, coal->type); + coal->id, coal->type); coal_call(coal, dealloc); @@ -983,7 +996,7 @@ coalition_find_by_id(uint64_t cid) if (coal->ref_count == 0) { panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n", - coal, coal->id, coal_type_str(coal->type), coal->active_count); + coal, coal->id, coal_type_str(coal->type), coal->active_count); } coal->ref_count++; #if COALITION_DEBUG @@ -994,7 +1007,7 @@ coalition_find_by_id(uint64_t cid) lck_mtx_unlock(&coalitions_list_lock); coal_dbg("id:%llu type:%s ref_count:%u", - coal->id, coal_type_str(coal->type), rc); + coal->id, coal_type_str(coal->type), rc); return coal; } @@ -1034,7 +1047,7 @@ coalition_find_and_activate_by_id(uint64_t cid) if (coal->ref_count == 0) { panic("resurrecting coalition %p id:%llu type:%s, active_count:%u\n", - coal, coal->id, coal_type_str(coal->type), coal->active_count); + coal, coal->id, coal_type_str(coal->type), coal->active_count); } coal->ref_count++; @@ -1049,7 +1062,7 @@ coalition_find_and_activate_by_id(uint64_t cid) lck_mtx_unlock(&coalitions_list_lock); coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u", - coal->id, coal_type_str(coal->type), rc, ac); + coal->id, coal_type_str(coal->type), rc, ac); return coal; } @@ -1065,10 +1078,11 @@ task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]) { int i; for (i = 0; i < COALITION_NUM_TYPES; i++) { - if (task->coalition[i]) + if (task->coalition[i]) { ids[i] = task->coalition[i]->id; - else + } else { ids[i] = 0; + } } } @@ -1082,7 +1096,7 @@ task_coalition_roles(task_t task, int roles[COALITION_NUM_TYPES]) if (task->coalition[i]) { coalition_lock(task->coalition[i]); roles[i] = coal_call(task->coalition[i], - get_taskrole, task); + get_taskrole, task); coalition_unlock(task->coalition[i]); } else { roles[i] = COALITION_TASKROLE_NONE; @@ -1124,22 +1138,27 @@ coalition_is_privileged(coalition_t coal) boolean_t task_is_in_privileged_coalition(task_t task, int type) { - if (type < 0 || type > COALITION_TYPE_MAX) + if (type < 0 || type > COALITION_TYPE_MAX) { return FALSE; - if (unrestrict_coalition_syscalls) + } + if (unrestrict_coalition_syscalls) { return TRUE; - if (!task->coalition[type]) + } + if (!task->coalition[type]) { return FALSE; + } return task->coalition[type]->privileged; } -void task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta) +void +task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta) { coalition_t coal; assert(task != TASK_NULL); - if (gpu_ns_delta == 0) + if (gpu_ns_delta == 0) { return; + } coal = task->coalition[COALITION_TYPE_RESOURCE]; assert(coal != COALITION_NULL); @@ -1149,61 +1168,71 @@ void task_coalition_update_gpu_stats(task_t task, uint64_t gpu_ns_delta) coalition_unlock(coal); } -boolean_t task_coalition_adjust_focal_count(task_t task, int count, uint32_t *new_count) +boolean_t +task_coalition_adjust_focal_count(task_t task, int count, uint32_t *new_count) { coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING]; - if (coal == COALITION_NULL) - return FALSE; + if (coal == COALITION_NULL) { + return FALSE; + } *new_count = hw_atomic_add(&coal->focal_task_count, count); assert(*new_count != UINT32_MAX); return TRUE; } -uint32_t task_coalition_focal_count(task_t task) +uint32_t +task_coalition_focal_count(task_t task) { coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING]; - if (coal == COALITION_NULL) - return 0; + if (coal == COALITION_NULL) { + return 0; + } return coal->focal_task_count; } -boolean_t task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count) +boolean_t +task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count) { coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING]; - if (coal == COALITION_NULL) - return FALSE; + if (coal == COALITION_NULL) { + return FALSE; + } *new_count = hw_atomic_add(&coal->nonfocal_task_count, count); assert(*new_count != UINT32_MAX); return TRUE; } -uint32_t task_coalition_nonfocal_count(task_t task) +uint32_t +task_coalition_nonfocal_count(task_t task) { coalition_t coal = task->coalition[COALITION_FOCAL_TASKS_ACCOUNTING]; - if (coal == COALITION_NULL) - return 0; + if (coal == COALITION_NULL) { + return 0; + } return coal->nonfocal_task_count; } -void coalition_set_efficient(coalition_t coal) +void +coalition_set_efficient(coalition_t coal) { - coalition_lock(coal); - coal->efficient = TRUE; - coalition_unlock(coal); + coalition_lock(coal); + coal->efficient = TRUE; + coalition_unlock(coal); } -void coalition_for_each_task(coalition_t coal, void *ctx, - void (*callback)(coalition_t, void *, task_t)) +void +coalition_for_each_task(coalition_t coal, void *ctx, + void (*callback)(coalition_t, void *, task_t)) { assert(coal != COALITION_NULL); coal_dbg("iterating tasks in coalition %p id:%llu type:%s, active_count:%u", - coal, coal->id, coal_type_str(coal->type), coal->active_count); + coal, coal->id, coal_type_str(coal->type), coal->active_count); coalition_lock(coal); @@ -1255,7 +1284,7 @@ coalition_remove_active(coalition_t coal) coalition_unlock(coal); coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u,%s", - cid, coal_type_str(ct), rc, ac, do_notify ? " NOTIFY" : " "); + cid, coal_type_str(ct), rc, ac, do_notify ? " NOTIFY" : " "); if (do_notify) { coalition_notify_user(notify_id, notify_flags); @@ -1308,8 +1337,9 @@ coalition_adopt_task_internal(coalition_t coal, task_t task) } kr = coal_call(coal, adopt_task, task); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { goto out_unlock; + } coal->active_count++; @@ -1327,13 +1357,13 @@ out_unlock: if (get_task_uniqueid(task) != UINT64_MAX) { /* On 32-bit targets, uniqueid will get truncated to 32 bits */ KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_ADOPT), - coal->id, get_task_uniqueid(task)); + coal->id, get_task_uniqueid(task)); } coalition_unlock(coal); coal_dbg("task:%d, id:%llu type:%s ref_count:%u, kr=%d", - task_pid(task), cid, coal_type_str(ct), rc, kr); + task_pid(task), cid, coal_type_str(ct), rc, kr); return kr; } @@ -1344,8 +1374,9 @@ coalition_remove_task_internal(task_t task, int type) coalition_t coal = task->coalition[type]; - if (!coal) + if (!coal) { return KERN_SUCCESS; + } assert(coal->type == (uint32_t)type); @@ -1360,11 +1391,11 @@ coalition_remove_task_internal(task_t task, int type) int ct = coal->type; #endif KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_COALITION, MACH_COALITION_REMOVE), - coal->id, get_task_uniqueid(task)); + coal->id, get_task_uniqueid(task)); coalition_unlock(coal); coal_dbg("id:%llu type:%s ref_count:%u, active_count:%u, kr=%d", - cid, coal_type_str(ct), rc, ac, kr); + cid, coal_type_str(ct), rc, ac, kr); coalition_remove_active(coal); @@ -1382,23 +1413,28 @@ coalitions_adopt_task(coalition_t *coals, task_t task) int i; kern_return_t kr; - if (!coals || coals[COALITION_TYPE_RESOURCE] == COALITION_NULL) + if (!coals || coals[COALITION_TYPE_RESOURCE] == COALITION_NULL) { return KERN_INVALID_ARGUMENT; + } /* verify that the incoming coalitions are what they say they are */ - for (i = 0; i < COALITION_NUM_TYPES; i++) - if (coals[i] && coals[i]->type != (uint32_t)i) + for (i = 0; i < COALITION_NUM_TYPES; i++) { + if (coals[i] && coals[i]->type != (uint32_t)i) { return KERN_INVALID_ARGUMENT; + } + } for (i = 0; i < COALITION_NUM_TYPES; i++) { kr = KERN_SUCCESS; - if (coals[i]) + if (coals[i]) { kr = coalition_adopt_task_internal(coals[i], task); + } if (kr != KERN_SUCCESS) { /* dis-associate any coalitions that just adopted this task */ while (--i >= 0) { - if (task->coalition[i]) + if (task->coalition[i]) { coalition_remove_task_internal(task, i); + } } break; } @@ -1448,15 +1484,17 @@ task_release_coalitions(task_t task) * that type (given in the coalitions parameter) then set the role of * the task within that that coalition. */ -kern_return_t coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES], - task_t task, int roles[COALITION_NUM_TYPES]) +kern_return_t +coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES], + task_t task, int roles[COALITION_NUM_TYPES]) { kern_return_t kr = KERN_SUCCESS; int i; for (i = 0; i < COALITION_NUM_TYPES; i++) { - if (!coalitions[i]) + if (!coalitions[i]) { continue; + } coalition_lock(coalitions[i]); kr = coal_call(coalitions[i], set_taskrole, task, roles[i]); coalition_unlock(coalitions[i]); @@ -1576,11 +1614,13 @@ coalition_reap_internal(coalition_t coal) } #if DEVELOPMENT || DEBUG -int coalition_should_notify(coalition_t coal) +int +coalition_should_notify(coalition_t coal) { int should; - if (!coal) + if (!coal) { return -1; + } coalition_lock(coal); should = coal->should_notify; coalition_unlock(coal); @@ -1588,10 +1628,12 @@ int coalition_should_notify(coalition_t coal) return should; } -void coalition_set_notify(coalition_t coal, int notify) +void +coalition_set_notify(coalition_t coal, int notify) { - if (!coal) + if (!coal) { return; + } coalition_lock(coal); coal->should_notify = !!notify; coalition_unlock(coal); @@ -1606,20 +1648,20 @@ coalitions_init(void) const struct coalition_type *ctype; coalition_zone = zinit( - sizeof(struct coalition), - CONFIG_COALITION_MAX * sizeof(struct coalition), - COALITION_CHUNK * sizeof(struct coalition), - "coalitions"); + sizeof(struct coalition), + CONFIG_COALITION_MAX * sizeof(struct coalition), + COALITION_CHUNK * sizeof(struct coalition), + "coalitions"); zone_change(coalition_zone, Z_NOENCRYPT, TRUE); queue_head_init(coalitions_q); if (!PE_parse_boot_argn("unrestrict_coalition_syscalls", &unrestrict_coalition_syscalls, - sizeof (unrestrict_coalition_syscalls))) { + sizeof(unrestrict_coalition_syscalls))) { unrestrict_coalition_syscalls = 0; } if (!PE_parse_boot_argn("tg_adaptive", &merge_adaptive_coalitions, - sizeof (merge_adaptive_coalitions))) { + sizeof(merge_adaptive_coalitions))) { merge_adaptive_coalitions = 0; } @@ -1638,18 +1680,21 @@ coalitions_init(void) !ctype->adopt_task || !ctype->remove_task) { panic("%s: Malformed coalition type %s(%d) in slot for type:%s(%d)", - __func__, coal_type_str(ctype->type), ctype->type, coal_type_str(i), i); + __func__, coal_type_str(ctype->type), ctype->type, coal_type_str(i), i); } - if (!ctype->has_default) + if (!ctype->has_default) { continue; + } kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, TRUE, &init_coalition[ctype->type]); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("%s: could not create init %s coalition: kr:%d", - __func__, coal_type_str(i), kr); + __func__, coal_type_str(i), kr); + } kr = coalition_create_internal(ctype->type, COALITION_ROLE_SYSTEM, FALSE, &corpse_coalition[ctype->type]); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("%s: could not create corpse %s coalition: kr:%d", - __func__, coal_type_str(i), kr); + __func__, coal_type_str(i), kr); + } } /* "Leak" our reference to the global object */ @@ -1659,8 +1704,9 @@ coalitions_init(void) * BSD Kernel interface functions * */ -static void coalition_fill_procinfo(struct coalition *coal, - struct procinfo_coalinfo *coalinfo) +static void +coalition_fill_procinfo(struct coalition *coal, + struct procinfo_coalinfo *coalinfo) { coalinfo->coalition_id = coal->id; coalinfo->coalition_type = coal->type; @@ -1668,7 +1714,8 @@ static void coalition_fill_procinfo(struct coalition *coal, } -int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_sz) +int +coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_sz) { int ncoals = 0; struct coalition *coal; @@ -1676,8 +1723,9 @@ int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_ lck_mtx_lock(&coalitions_list_lock); qe_foreach_element(coal, &coalitions_q, coalitions) { if (!coal->reaped && (type < 0 || type == (int)coal->type)) { - if (coal_list && ncoals < list_sz) + if (coal_list && ncoals < list_sz) { coalition_fill_procinfo(coal, &coal_list[ncoals]); + } ++ncoals; } } @@ -1690,60 +1738,72 @@ int coalitions_get_list(int type, struct procinfo_coalinfo *coal_list, int list_ * Jetsam coalition interface * */ -boolean_t coalition_is_leader(task_t task, int coal_type, coalition_t *coal) +boolean_t +coalition_is_leader(task_t task, int coal_type, coalition_t *coal) { coalition_t c; boolean_t ret; - if (coal) /* handle the error cases gracefully */ + if (coal) { /* handle the error cases gracefully */ *coal = COALITION_NULL; + } - if (!task) + if (!task) { return FALSE; + } - if (coal_type > COALITION_TYPE_MAX) + if (coal_type > COALITION_TYPE_MAX) { return FALSE; + } c = task->coalition[coal_type]; - if (!c) + if (!c) { return FALSE; + } assert((int)c->type == coal_type); coalition_lock(c); - if (coal) + if (coal) { *coal = c; + } ret = FALSE; - if (c->type == COALITION_TYPE_JETSAM && c->j.leader == task) + if (c->type == COALITION_TYPE_JETSAM && c->j.leader == task) { ret = TRUE; + } coalition_unlock(c); return ret; } -kern_return_t coalition_iterate_stackshot(coalition_iterate_fn_t callout, void *arg, uint32_t coalition_type) +kern_return_t +coalition_iterate_stackshot(coalition_iterate_fn_t callout, void *arg, uint32_t coalition_type) { coalition_t coal; int i = 0; qe_foreach_element(coal, &coalitions_q, coalitions) { - if (coal == NULL || !ml_validate_nofault((vm_offset_t)coal, sizeof(struct coalition))) + if (coal == NULL || !ml_validate_nofault((vm_offset_t)coal, sizeof(struct coalition))) { return KERN_FAILURE; + } - if (coalition_type == coal->type) + if (coalition_type == coal->type) { callout(arg, i++, coal); + } } return KERN_SUCCESS; } -task_t kdp_coalition_get_leader(coalition_t coal) +task_t +kdp_coalition_get_leader(coalition_t coal) { - if (!coal) + if (!coal) { return TASK_NULL; + } if (coal->type == COALITION_TYPE_JETSAM) { return coal->j.leader; @@ -1751,20 +1811,24 @@ task_t kdp_coalition_get_leader(coalition_t coal) return TASK_NULL; } -task_t coalition_get_leader(coalition_t coal) +task_t +coalition_get_leader(coalition_t coal) { task_t leader = TASK_NULL; - if (!coal) + if (!coal) { return TASK_NULL; + } coalition_lock(coal); - if (coal->type != COALITION_TYPE_JETSAM) + if (coal->type != COALITION_TYPE_JETSAM) { goto out_unlock; + } leader = coal->j.leader; - if (leader != TASK_NULL) + if (leader != TASK_NULL) { task_reference(leader); + } out_unlock: coalition_unlock(coal); @@ -1772,28 +1836,31 @@ out_unlock: } -int coalition_get_task_count(coalition_t coal) +int +coalition_get_task_count(coalition_t coal) { int ntasks = 0; struct queue_entry *qe; - if (!coal) + if (!coal) { return 0; + } coalition_lock(coal); switch (coal->type) { case COALITION_TYPE_RESOURCE: qe_foreach(qe, &coal->r.tasks) - ntasks++; + ntasks++; break; case COALITION_TYPE_JETSAM: - if (coal->j.leader) + if (coal->j.leader) { ntasks++; + } qe_foreach(qe, &coal->j.other) - ntasks++; + ntasks++; qe_foreach(qe, &coal->j.extensions) - ntasks++; + ntasks++; qe_foreach(qe, &coal->j.services) - ntasks++; + ntasks++; break; default: break; @@ -1804,7 +1871,8 @@ int coalition_get_task_count(coalition_t coal) } -static uint64_t i_get_list_footprint(queue_t list, int type, int *ntasks) +static uint64_t +i_get_list_footprint(queue_t list, int type, int *ntasks) { task_t task; uint64_t bytes = 0; @@ -1812,22 +1880,25 @@ static uint64_t i_get_list_footprint(queue_t list, int type, int *ntasks) qe_foreach_element(task, list, task_coalition[type]) { bytes += get_task_phys_footprint(task); coal_dbg(" [%d] task_pid:%d, type:%d, footprint:%lld", - *ntasks, task_pid(task), type, bytes); + *ntasks, task_pid(task), type, bytes); *ntasks += 1; } return bytes; } -uint64_t coalition_get_page_count(coalition_t coal, int *ntasks) +uint64_t +coalition_get_page_count(coalition_t coal, int *ntasks) { uint64_t bytes = 0; int num_tasks = 0; - if (ntasks) + if (ntasks) { *ntasks = 0; - if (!coal) + } + if (!coal) { return bytes; + } coalition_lock(coal); @@ -1850,8 +1921,9 @@ uint64_t coalition_get_page_count(coalition_t coal, int *ntasks) coalition_unlock(coal); - if (ntasks) + if (ntasks) { *ntasks = num_tasks; + } return bytes / PAGE_SIZE_64; } @@ -1872,7 +1944,8 @@ typedef int (*cmpfunc_t)(const void *a, const void *b); extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp); -static int dflt_cmp(const void *a, const void *b) +static int +dflt_cmp(const void *a, const void *b) { const struct coal_sort_s *csA = (const struct coal_sort_s *)a; const struct coal_sort_s *csB = (const struct coal_sort_s *)b; @@ -1880,14 +1953,16 @@ static int dflt_cmp(const void *a, const void *b) /* * if both A and B are equal, use a memory descending sort */ - if (csA->usr_order == csB->usr_order) + if (csA->usr_order == csB->usr_order) { return (int)((int64_t)csB->bytes - (int64_t)csA->bytes); + } /* otherwise, return the relationship between user specified orders */ - return (csA->usr_order - csB->usr_order); + return csA->usr_order - csB->usr_order; } -static int mem_asc_cmp(const void *a, const void *b) +static int +mem_asc_cmp(const void *a, const void *b) { const struct coal_sort_s *csA = (const struct coal_sort_s *)a; const struct coal_sort_s *csB = (const struct coal_sort_s *)b; @@ -1895,7 +1970,8 @@ static int mem_asc_cmp(const void *a, const void *b) return (int)((int64_t)csA->bytes - (int64_t)csB->bytes); } -static int mem_dec_cmp(const void *a, const void *b) +static int +mem_dec_cmp(const void *a, const void *b) { const struct coal_sort_s *csA = (const struct coal_sort_s *)a; const struct coal_sort_s *csB = (const struct coal_sort_s *)b; @@ -1903,35 +1979,39 @@ static int mem_dec_cmp(const void *a, const void *b) return (int)((int64_t)csB->bytes - (int64_t)csA->bytes); } -static int usr_asc_cmp(const void *a, const void *b) +static int +usr_asc_cmp(const void *a, const void *b) { const struct coal_sort_s *csA = (const struct coal_sort_s *)a; const struct coal_sort_s *csB = (const struct coal_sort_s *)b; - return (csA->usr_order - csB->usr_order); + return csA->usr_order - csB->usr_order; } -static int usr_dec_cmp(const void *a, const void *b) +static int +usr_dec_cmp(const void *a, const void *b) { const struct coal_sort_s *csA = (const struct coal_sort_s *)a; const struct coal_sort_s *csB = (const struct coal_sort_s *)b; - return (csB->usr_order - csA->usr_order); + return csB->usr_order - csA->usr_order; } /* avoid dynamic allocation in this path */ #define MAX_SORTED_PIDS 80 -static int coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list, - struct coal_sort_s *sort_array, int array_sz) +static int +coalition_get_sort_list(coalition_t coal, int sort_order, queue_t list, + struct coal_sort_s *sort_array, int array_sz) { int ntasks = 0; task_t task; assert(sort_array != NULL); - if (array_sz <= 0) + if (array_sz <= 0) { return 0; + } if (!list) { /* @@ -1940,13 +2020,14 @@ static int coalition_get_sort_list(coalition_t coal, int sort_order, queue_t lis * to investigate the leader process */ if (coal->type != COALITION_TYPE_JETSAM || - coal->j.leader == TASK_NULL) + coal->j.leader == TASK_NULL) { return 0; + } sort_array[0].pid = task_pid(coal->j.leader); switch (sort_order) { case COALITION_SORT_DEFAULT: sort_array[0].usr_order = 0; - /* fall-through */ + /* fall-through */ case COALITION_SORT_MEM_ASC: case COALITION_SORT_MEM_DEC: sort_array[0].bytes = get_task_phys_footprint(coal->j.leader); @@ -1964,7 +2045,7 @@ static int coalition_get_sort_list(coalition_t coal, int sort_order, queue_t lis qe_foreach_element(task, list, task_coalition[coal->type]) { if (ntasks >= array_sz) { printf("WARNING: more than %d pids in coalition %llu\n", - MAX_SORTED_PIDS, coal->id); + MAX_SORTED_PIDS, coal->id); break; } @@ -1973,7 +2054,7 @@ static int coalition_get_sort_list(coalition_t coal, int sort_order, queue_t lis switch (sort_order) { case COALITION_SORT_DEFAULT: sort_array[ntasks].usr_order = 0; - /* fall-through */ + /* fall-through */ case COALITION_SORT_MEM_ASC: case COALITION_SORT_MEM_DEC: sort_array[ntasks].bytes = get_task_phys_footprint(task); @@ -1992,20 +2073,21 @@ static int coalition_get_sort_list(coalition_t coal, int sort_order, queue_t lis return ntasks; } -int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order, - int *pid_list, int list_sz) +int +coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order, + int *pid_list, int list_sz) { struct i_jetsam_coalition *cj; int ntasks = 0; cmpfunc_t cmp_func = NULL; - struct coal_sort_s sort_array[MAX_SORTED_PIDS] = { {0,0,0} }; /* keep to < 2k */ + struct coal_sort_s sort_array[MAX_SORTED_PIDS] = { {0, 0, 0} }; /* keep to < 2k */ if (!coal || !(rolemask & COALITION_ROLEMASK_ALLROLES) || !pid_list || list_sz < 1) { coal_dbg("Invalid parameters: coal:%p, type:%d, rolemask:0x%x, " - "pid_list:%p, list_sz:%d", coal, coal ? coal->type : -1, - rolemask, pid_list, list_sz); + "pid_list:%p, list_sz:%d", coal, coal ? coal->type : -1, + rolemask, pid_list, list_sz); return -EINVAL; } @@ -2036,45 +2118,51 @@ int coalition_get_pid_list(coalition_t coal, uint32_t rolemask, int sort_order, if (coal->type == COALITION_TYPE_RESOURCE) { ntasks += coalition_get_sort_list(coal, sort_order, &coal->r.tasks, - sort_array, MAX_SORTED_PIDS); + sort_array, MAX_SORTED_PIDS); goto unlock_coal; } cj = &coal->j; - if (rolemask & COALITION_ROLEMASK_UNDEF) + if (rolemask & COALITION_ROLEMASK_UNDEF) { ntasks += coalition_get_sort_list(coal, sort_order, &cj->other, - sort_array + ntasks, - MAX_SORTED_PIDS - ntasks); + sort_array + ntasks, + MAX_SORTED_PIDS - ntasks); + } - if (rolemask & COALITION_ROLEMASK_XPC) + if (rolemask & COALITION_ROLEMASK_XPC) { ntasks += coalition_get_sort_list(coal, sort_order, &cj->services, - sort_array + ntasks, - MAX_SORTED_PIDS - ntasks); + sort_array + ntasks, + MAX_SORTED_PIDS - ntasks); + } - if (rolemask & COALITION_ROLEMASK_EXT) + if (rolemask & COALITION_ROLEMASK_EXT) { ntasks += coalition_get_sort_list(coal, sort_order, &cj->extensions, - sort_array + ntasks, - MAX_SORTED_PIDS - ntasks); + sort_array + ntasks, + MAX_SORTED_PIDS - ntasks); + } - if (rolemask & COALITION_ROLEMASK_LEADER) + if (rolemask & COALITION_ROLEMASK_LEADER) { ntasks += coalition_get_sort_list(coal, sort_order, NULL, - sort_array + ntasks, - MAX_SORTED_PIDS - ntasks); + sort_array + ntasks, + MAX_SORTED_PIDS - ntasks); + } unlock_coal: coalition_unlock(coal); /* sort based on the chosen criterion (no sense sorting 1 item) */ - if (cmp_func && ntasks > 1) + if (cmp_func && ntasks > 1) { qsort(sort_array, ntasks, sizeof(struct coal_sort_s), cmp_func); + } for (int i = 0; i < ntasks; i++) { - if (i >= list_sz) + if (i >= list_sz) { break; + } coal_dbg(" [%d] PID:%d, footprint:%lld, usr_order:%d", - i, sort_array[i].pid, sort_array[i].bytes, - sort_array[i].usr_order); + i, sort_array[i].pid, sort_array[i].bytes, + sort_array[i].usr_order); pid_list[i] = sort_array[i].pid; } diff --git a/osfmk/kern/coalition.h b/osfmk/kern/coalition.h index 195ba05be..29da7719c 100644 --- a/osfmk/kern/coalition.h +++ b/osfmk/kern/coalition.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -56,7 +56,7 @@ void task_release_coalitions(task_t task); * */ kern_return_t coalitions_set_roles(coalition_t coalitions[COALITION_NUM_TYPES], - task_t task, int roles[COALITION_NUM_TYPES]); + task_t task, int roles[COALITION_NUM_TYPES]); uint64_t coalition_id(coalition_t coal); void task_coalition_ids(task_t task, uint64_t ids[COALITION_NUM_TYPES]); @@ -69,13 +69,13 @@ uint32_t task_coalition_focal_count(task_t task); boolean_t task_coalition_adjust_nonfocal_count(task_t task, int count, uint32_t *new_count); uint32_t task_coalition_nonfocal_count(task_t task); struct thread_group *task_coalition_get_thread_group(task_t task); -void coalition_set_thread_group(coalition_t coal, struct thread_group *tg); +void coalition_set_thread_group(coalition_t coal, struct thread_group *tg); struct thread_group *kdp_coalition_get_thread_group(coalition_t coal); struct thread_group *coalition_get_thread_group(coalition_t coal); void task_coalition_thread_group_focal_update(task_t task); void coalition_for_each_task(coalition_t coal, void *ctx, - void (*callback)(coalition_t, void *, task_t)); + void (*callback)(coalition_t, void *, task_t)); void coalition_set_efficient(coalition_t coal); @@ -143,34 +143,39 @@ void coalition_set_notify(coalition_t coal, int notify); #else /* !CONFIG_COALITIONS */ -static inline void task_coalition_update_gpu_stats(__unused task_t task, - __unused uint64_t gpu_ns_delta) +static inline void +task_coalition_update_gpu_stats(__unused task_t task, + __unused uint64_t gpu_ns_delta) { return; } -static inline boolean_t task_coalition_adjust_focal_count(__unused task_t task, - __unused int count, - __unused uint32_t *new_count) +static inline boolean_t +task_coalition_adjust_focal_count(__unused task_t task, + __unused int count, + __unused uint32_t *new_count) { return FALSE; } -static inline boolean_t task_coalition_adjust_nonfocal_count(__unused task_t task, - __unused int count, - __unused uint32_t *new_count) +static inline boolean_t +task_coalition_adjust_nonfocal_count(__unused task_t task, + __unused int count, + __unused uint32_t *new_count) { return FALSE; } -static inline uint32_t task_coalition_focal_count(__unused task_t task) +static inline uint32_t +task_coalition_focal_count(__unused task_t task) { return 0; } -static inline void coalition_for_each_task(__unused coalition_t coal, - __unused void *ctx, - __unused void (*callback)(coalition_t, void *, task_t)) +static inline void +coalition_for_each_task(__unused coalition_t coal, + __unused void *ctx, + __unused void (*callback)(coalition_t, void *, task_t)) { return; } diff --git a/osfmk/kern/copyout_shim.c b/osfmk/kern/copyout_shim.c index ea553dc6a..0b399e84d 100644 --- a/osfmk/kern/copyout_shim.c +++ b/osfmk/kern/copyout_shim.c @@ -40,60 +40,60 @@ #if (DEVELOPMENT || DEBUG) -copyout_shim_fn_t copyout_shim_fn=NULL; -unsigned co_src_flags=0; +copyout_shim_fn_t copyout_shim_fn = NULL; +unsigned co_src_flags = 0; #endif -kern_return_t register_copyout_shim(void (*fn)(const void *,user_addr_t,vm_size_t,unsigned co_src),unsigned types) +kern_return_t +register_copyout_shim(void (*fn)(const void *, user_addr_t, vm_size_t, unsigned co_src), unsigned types) { #if (DEVELOPMENT || DEBUG) - int copyout_shim_enabled=0; - - if(!fn) - { - /* unregistration is always allowed */ - copyout_shim_fn=NULL; - return KERN_SUCCESS; - } - - if(copyout_shim_fn) - { - //need to unregister first before registering a new one. - return KERN_FAILURE; - } - - if(!PE_parse_boot_argn("enable_copyout_shim",©out_shim_enabled,sizeof(copyout_shim_enabled)) || !copyout_shim_enabled) - { - return KERN_FAILURE; - } - + int copyout_shim_enabled = 0; - co_src_flags=types; - copyout_shim_fn=fn; - return KERN_SUCCESS; + if (!fn) { + /* unregistration is always allowed */ + copyout_shim_fn = NULL; + return KERN_SUCCESS; + } + + if (copyout_shim_fn) { + //need to unregister first before registering a new one. + return KERN_FAILURE; + } + + if (!PE_parse_boot_argn("enable_copyout_shim", ©out_shim_enabled, sizeof(copyout_shim_enabled)) || !copyout_shim_enabled) { + return KERN_FAILURE; + } + + + co_src_flags = types; + copyout_shim_fn = fn; + return KERN_SUCCESS; #else - UNUSED_IN_RELEASE(fn); - UNUSED_IN_RELEASE(types); - return KERN_FAILURE; + UNUSED_IN_RELEASE(fn); + UNUSED_IN_RELEASE(types); + return KERN_FAILURE; #endif } -void *cos_kernel_unslide(const void *ptr) +void * +cos_kernel_unslide(const void *ptr) { #if (DEVELOPMENT || DEBUG) - return (void *)(VM_KERNEL_UNSLIDE(ptr)); + return (void *)(VM_KERNEL_UNSLIDE(ptr)); #else - UNUSED_IN_RELEASE(ptr); - return NULL; + UNUSED_IN_RELEASE(ptr); + return NULL; #endif } -void *cos_kernel_reslide(const void *ptr) +void * +cos_kernel_reslide(const void *ptr) { #if (DEVELOPMENT || DEBUG) - return (void *)(VM_KERNEL_SLIDE(ptr)); + return (void *)(VM_KERNEL_SLIDE(ptr)); #else - UNUSED_IN_RELEASE(ptr); - return NULL; + UNUSED_IN_RELEASE(ptr); + return NULL; #endif } diff --git a/osfmk/kern/copyout_shim.h b/osfmk/kern/copyout_shim.h index 200cd4515..06a65ac9d 100644 --- a/osfmk/kern/copyout_shim.h +++ b/osfmk/kern/copyout_shim.h @@ -46,7 +46,7 @@ #define CO_SRC_MSG (1<<1) //copyoutmsg() called #define CO_SRC_PHYS (1<<2) //copyio(COPYOUTPHYS,...) called -typedef void (*copyout_shim_fn_t)(const void *,user_addr_t,vm_size_t,unsigned co_src); +typedef void (*copyout_shim_fn_t)(const void *, user_addr_t, vm_size_t, unsigned co_src); #ifdef MACH_KERNEL_PRIVATE #if(DEVELOPMENT || DEBUG) && (COPYOUT_SHIM > 0) @@ -56,26 +56,26 @@ extern unsigned co_src_flags; // void call_copyout_shim(const void *kernel_addr,user_addr_t user_addr,vm_size_t nbytes,int copy_type,int copyout_flavors); -#define CALL_COPYOUT_SHIM_NRML(ka,ua,nb) \ +#define CALL_COPYOUT_SHIM_NRML(ka, ua, nb) \ if(copyout_shim_fn && (co_src_flags & CO_SRC_NORMAL)) {copyout_shim_fn(ka,ua,nb,CO_SRC_NORMAL); } -#define CALL_COPYOUT_SHIM_MSG(ka,ua,nb) \ +#define CALL_COPYOUT_SHIM_MSG(ka, ua, nb) \ if(copyout_shim_fn && (co_src_flags & CO_SRC_MSG)){copyout_shim_fn(ka,ua,nb,CO_SRC_MSG); } - -#define CALL_COPYOUT_SHIM_PHYS(ka,ua,nb) \ + +#define CALL_COPYOUT_SHIM_PHYS(ka, ua, nb) \ if(copyout_shim_fn && (co_src_flags & CO_SRC_PHYS)){copyout_shim_fn(ka,ua,nb,CO_SRC_PHYS); } #else - //Make these calls disappear if we're RELEASE or if COPYOUT_SHIM didn't get built -#define CALL_COPYOUT_SHIM_NRML(ka,ua,nb) -#define CALL_COPYOUT_SHIM_MSG(ka,ua,nb) -#define CALL_COPYOUT_SHIM_PHYS(ka,ua,nb) +//Make these calls disappear if we're RELEASE or if COPYOUT_SHIM didn't get built +#define CALL_COPYOUT_SHIM_NRML(ka, ua, nb) +#define CALL_COPYOUT_SHIM_MSG(ka, ua, nb) +#define CALL_COPYOUT_SHIM_PHYS(ka, ua, nb) #endif /* (DEVELOPMENT || DEBUG) && (COPYOUT_SHIM > 0) */ #endif /* MACH_KERNEL_PRIVATE */ kern_return_t -register_copyout_shim(copyout_shim_fn_t copyout_shim_fn,unsigned co_src_flags); +register_copyout_shim(copyout_shim_fn_t copyout_shim_fn, unsigned co_src_flags); #define unregister_copyout_shim() register_copyout_shim(NULL,0) diff --git a/osfmk/kern/counters.c b/osfmk/kern/counters.c index bac65ebd7..2e56e413c 100644 --- a/osfmk/kern/counters.c +++ b/osfmk/kern/counters.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -66,7 +66,7 @@ * This makes them easier to examine with ddb. */ -#if MACH_COUNTERS +#if MACH_COUNTERS mach_counter_t c_action_thread_block = 0; mach_counter_t c_ast_taken_block = 0; mach_counter_t c_dev_io_blocks = 0; @@ -105,4 +105,4 @@ mach_counter_t c_vm_pageout_block = 0; mach_counter_t c_vm_pageout_scan_block = 0; mach_counter_t c_vm_fault_retry_on_w_prot = 0; mach_counter_t c_vm_fault_wait_on_unlock = 0; -#endif /* MACH_COUNTERS */ +#endif /* MACH_COUNTERS */ diff --git a/osfmk/kern/counters.h b/osfmk/kern/counters.h index f42a0a703..e0f9aaea6 100644 --- a/osfmk/kern/counters.h +++ b/osfmk/kern/counters.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _KERN_COUNTERS_ -#define _KERN_COUNTERS_ +#ifndef _KERN_COUNTERS_ +#define _KERN_COUNTERS_ #include @@ -69,17 +69,17 @@ * Use counter_always() for non-conditional counters. */ -#define counter_always(code) code +#define counter_always(code) code -#if MACH_COUNTERS +#if MACH_COUNTERS -#define counter(code) counter_always(code) +#define counter(code) counter_always(code) -#else /* MACH_COUNTERS */ +#else /* MACH_COUNTERS */ #define counter(code) -#endif /* MACH_COUNTERS */ +#endif /* MACH_COUNTERS */ /* * We define the counters with individual integers, @@ -89,7 +89,7 @@ typedef unsigned int mach_counter_t; -#if MACH_COUNTERS +#if MACH_COUNTERS extern mach_counter_t c_action_thread_block; extern mach_counter_t c_ast_taken_block; extern mach_counter_t c_dev_io_blocks; @@ -128,7 +128,6 @@ extern mach_counter_t c_vm_map_simplify_entry_called; extern mach_counter_t c_vm_page_wait_block; extern mach_counter_t c_vm_pageout_block; extern mach_counter_t c_vm_pageout_scan_block; -#endif /* MACH_COUNTERS */ - -#endif /* _KERN_COUNTERS_ */ +#endif /* MACH_COUNTERS */ +#endif /* _KERN_COUNTERS_ */ diff --git a/osfmk/kern/cpu_data.h b/osfmk/kern/cpu_data.h index 58d4ecad8..c7d28d316 100644 --- a/osfmk/kern/cpu_data.h +++ b/osfmk/kern/cpu_data.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,39 +22,39 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#ifndef _KERN_CPU_DATA_H_ -#define _KERN_CPU_DATA_H_ +#ifndef _KERN_CPU_DATA_H_ +#define _KERN_CPU_DATA_H_ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -extern void _disable_preemption(void); -extern void _enable_preemption(void); +extern void _disable_preemption(void); +extern void _enable_preemption(void); #ifndef MACHINE_PREEMPTION_MACROS -#define disable_preemption() _disable_preemption() -#define enable_preemption() _enable_preemption() +#define disable_preemption() _disable_preemption() +#define enable_preemption() _enable_preemption() #endif __END_DECLS -#endif /* _KERN_CPU_DATA_H_ */ +#endif /* _KERN_CPU_DATA_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/cpu_number.h b/osfmk/kern/cpu_number.h index 2894c24a9..2b5bcad5d 100644 --- a/osfmk/kern/cpu_number.h +++ b/osfmk/kern/cpu_number.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,47 +22,47 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#ifndef _KERN_CPU_NUMBER_H_ -#define _KERN_CPU_NUMBER_H_ +#ifndef _KERN_CPU_NUMBER_H_ +#define _KERN_CPU_NUMBER_H_ -extern int master_cpu; +extern int master_cpu; #include -#endif /* _KERN_CPU_NUMBER_H_ */ +#endif /* _KERN_CPU_NUMBER_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/cpu_quiesce.c b/osfmk/kern/cpu_quiesce.c index 977f5e50f..bd04dc7da 100644 --- a/osfmk/kern/cpu_quiesce.c +++ b/osfmk/kern/cpu_quiesce.c @@ -122,14 +122,15 @@ void cpu_quiescent_counter_set_min_interval_us(uint32_t new_value_us) { /* clamp to something vaguely sane */ - if (new_value_us > CPU_CHECKIN_MIN_INTERVAL_MAX_US) + if (new_value_us > CPU_CHECKIN_MIN_INTERVAL_MAX_US) { new_value_us = CPU_CHECKIN_MIN_INTERVAL_MAX_US; + } cpu_checkin_min_interval_us = new_value_us; uint64_t abstime = 0; clock_interval_to_absolutetime_interval(cpu_checkin_min_interval_us, - NSEC_PER_USEC, &abstime); + NSEC_PER_USEC, &abstime); cpu_checkin_min_interval = abstime; } @@ -179,10 +180,10 @@ cpu_quiescent_counter_join(__unused uint64_t ctime) __assert_only int cpuid = processor->cpu_id; assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_NONE || - processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_LEFT); + processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_LEFT); assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & - (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0); + (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0); processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_PENDING_JOIN; @@ -207,7 +208,7 @@ cpu_quiescent_counter_ast(void) /* We had better not already be joined. */ assert((os_atomic_load(&cpu_quiescing_checkin_state, relaxed) & - (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0); + (cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid))) == 0); /* * No release barrier needed because we have no prior state to publish. @@ -230,9 +231,9 @@ cpu_quiescent_counter_ast(void) checkin_mask_t old_mask, new_mask; os_atomic_rmw_loop(&cpu_quiescing_checkin_state, old_mask, new_mask, acquire, { if (old_mask == 0) { - new_mask = old_mask | cpu_expected_bit(cpuid); + new_mask = old_mask | cpu_expected_bit(cpuid); } else { - new_mask = old_mask | cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid); + new_mask = old_mask | cpu_expected_bit(cpuid) | cpu_checked_in_bit(cpuid); } }); } @@ -255,7 +256,7 @@ cpu_quiescent_counter_leave(uint64_t ctime) int cpuid = processor->cpu_id; assert(processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_JOINED || - processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN); + processor->cpu_quiesce_state == CPU_QUIESCE_COUNTER_PENDING_JOIN); /* We no longer need the cpu_quiescent_counter_ast callback to be armed */ ast_off(AST_UNQUIESCE); @@ -272,7 +273,7 @@ cpu_quiescent_counter_leave(uint64_t ctime) checkin_mask_t mask = cpu_checked_in_bit(cpuid) | cpu_expected_bit(cpuid); checkin_mask_t orig_state = os_atomic_and_orig(&cpu_quiescing_checkin_state, - ~mask, acq_rel); + ~mask, acq_rel); assert((orig_state & cpu_expected_bit(cpuid))); @@ -310,12 +311,14 @@ cpu_quiescent_counter_checkin(uint64_t ctime) assert(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_NONE); /* If we're not joined yet, we don't need to check in */ - if (__probable(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_JOINED)) + if (__probable(processor->cpu_quiesce_state != CPU_QUIESCE_COUNTER_JOINED)) { return; + } /* If we've checked in recently, we don't need to check in yet. */ - if (__probable((ctime - processor->cpu_quiesce_last_checkin) <= cpu_checkin_min_interval)) + if (__probable((ctime - processor->cpu_quiesce_last_checkin) <= cpu_checkin_min_interval)) { return; + } processor->cpu_quiesce_last_checkin = ctime; @@ -332,13 +335,13 @@ cpu_quiescent_counter_checkin(uint64_t ctime) } checkin_mask_t orig_state = os_atomic_or_orig(&cpu_quiescing_checkin_state, - cpu_checked_in_bit(cpuid), acq_rel); + cpu_checked_in_bit(cpuid), acq_rel); checkin_mask_t new_state = orig_state | cpu_checked_in_bit(cpuid); if (cpu_quiescent_counter_needs_commit(new_state)) { assertf(!cpu_quiescent_counter_needs_commit(orig_state), - "old: 0x%lx, new: 0x%lx", orig_state, new_state); + "old: 0x%lx, new: 0x%lx", orig_state, new_state); cpu_quiescent_counter_commit(ctime); } } @@ -361,4 +364,3 @@ cpu_quiescent_counter_assert_ast(void) assert((state & cpu_expected_bit(cpuid))); } #endif /* MACH_ASSERT */ - diff --git a/osfmk/kern/cpu_quiesce.h b/osfmk/kern/cpu_quiesce.h index 324a2b038..1c9537042 100644 --- a/osfmk/kern/cpu_quiesce.h +++ b/osfmk/kern/cpu_quiesce.h @@ -61,21 +61,38 @@ extern void cpu_quiescent_counter_set_min_interval_us(uint32_t new_value); /* stub routines for platforms without the counter */ -static inline void cpu_quiescent_counter_join(__unused uint64_t ctime) { } -static inline void cpu_quiescent_counter_leave(__unused uint64_t ctime) { } -static inline void cpu_quiescent_counter_checkin(__unused uint64_t ctime) { } -static inline void cpu_quiescent_counter_ast(void) { } -static inline void cpu_quiescent_counter_init(void) { } +static inline void +cpu_quiescent_counter_join(__unused uint64_t ctime) +{ +} +static inline void +cpu_quiescent_counter_leave(__unused uint64_t ctime) +{ +} +static inline void +cpu_quiescent_counter_checkin(__unused uint64_t ctime) +{ +} +static inline void +cpu_quiescent_counter_ast(void) +{ +} +static inline void +cpu_quiescent_counter_init(void) +{ +} #endif /* CONFIG_QUIESCE_COUNTER */ #if MACH_ASSERT && CONFIG_QUIESCE_COUNTER extern void cpu_quiescent_counter_assert_ast(void); #else -static inline void cpu_quiescent_counter_assert_ast(void) { } +static inline void +cpu_quiescent_counter_assert_ast(void) +{ +} #endif #endif /* XNU_KERNEL_PRIVATE */ #endif /* _KERN_CPU_QUIESCE_H_ */ - diff --git a/osfmk/kern/cs_blobs.h b/osfmk/kern/cs_blobs.h index cafafcaff..cf83fb96e 100644 --- a/osfmk/kern/cs_blobs.h +++ b/osfmk/kern/cs_blobs.h @@ -48,10 +48,10 @@ #define CS_ENTITLEMENTS_VALIDATED 0x00004000 /* code signature permits restricted entitlements */ #define CS_NVRAM_UNRESTRICTED 0x00008000 /* has com.apple.rootless.restricted-nvram-variables.heritable entitlement */ -#define CS_RUNTIME 0x00010000 /* Apply hardened runtime policies */ +#define CS_RUNTIME 0x00010000 /* Apply hardened runtime policies */ #define CS_ALLOWED_MACHO (CS_ADHOC | CS_HARD | CS_KILL | CS_CHECK_EXPIRATION | \ - CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV | CS_RUNTIME) + CS_RESTRICT | CS_ENFORCEMENT | CS_REQUIRE_LV | CS_RUNTIME) #define CS_EXEC_SET_HARD 0x00100000 /* set CS_HARD on any exec'ed process */ #define CS_EXEC_SET_KILL 0x00200000 /* set CS_KILL on any exec'ed process */ @@ -72,33 +72,33 @@ /* executable segment flags */ -#define CS_EXECSEG_MAIN_BINARY 0x1 /* executable segment denotes main binary */ -#define CS_EXECSEG_ALLOW_UNSIGNED 0x10 /* allow unsigned pages (for debugging) */ -#define CS_EXECSEG_DEBUGGER 0x20 /* main binary is debugger */ -#define CS_EXECSEG_JIT 0x40 /* JIT enabled */ -#define CS_EXECSEG_SKIP_LV 0x80 /* OBSOLETE: skip library validation */ -#define CS_EXECSEG_CAN_LOAD_CDHASH 0x100 /* can bless cdhash for execution */ -#define CS_EXECSEG_CAN_EXEC_CDHASH 0x200 /* can execute blessed cdhash */ +#define CS_EXECSEG_MAIN_BINARY 0x1 /* executable segment denotes main binary */ +#define CS_EXECSEG_ALLOW_UNSIGNED 0x10 /* allow unsigned pages (for debugging) */ +#define CS_EXECSEG_DEBUGGER 0x20 /* main binary is debugger */ +#define CS_EXECSEG_JIT 0x40 /* JIT enabled */ +#define CS_EXECSEG_SKIP_LV 0x80 /* OBSOLETE: skip library validation */ +#define CS_EXECSEG_CAN_LOAD_CDHASH 0x100 /* can bless cdhash for execution */ +#define CS_EXECSEG_CAN_EXEC_CDHASH 0x200 /* can execute blessed cdhash */ /* * Magic numbers used by Code Signing */ enum { - CSMAGIC_REQUIREMENT = 0xfade0c00, /* single Requirement blob */ - CSMAGIC_REQUIREMENTS = 0xfade0c01, /* Requirements vector (internal requirements) */ - CSMAGIC_CODEDIRECTORY = 0xfade0c02, /* CodeDirectory blob */ + CSMAGIC_REQUIREMENT = 0xfade0c00, /* single Requirement blob */ + CSMAGIC_REQUIREMENTS = 0xfade0c01, /* Requirements vector (internal requirements) */ + CSMAGIC_CODEDIRECTORY = 0xfade0c02, /* CodeDirectory blob */ CSMAGIC_EMBEDDED_SIGNATURE = 0xfade0cc0, /* embedded form of signature data */ - CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02, /* XXX */ - CSMAGIC_EMBEDDED_ENTITLEMENTS = 0xfade7171, /* embedded entitlements */ + CSMAGIC_EMBEDDED_SIGNATURE_OLD = 0xfade0b02, /* XXX */ + CSMAGIC_EMBEDDED_ENTITLEMENTS = 0xfade7171, /* embedded entitlements */ CSMAGIC_DETACHED_SIGNATURE = 0xfade0cc1, /* multi-arch collection of embedded signatures */ - CSMAGIC_BLOBWRAPPER = 0xfade0b01, /* CMS Signature, among other things */ + CSMAGIC_BLOBWRAPPER = 0xfade0b01, /* CMS Signature, among other things */ CS_SUPPORTSSCATTER = 0x20100, CS_SUPPORTSTEAMID = 0x20200, CS_SUPPORTSCODELIMIT64 = 0x20300, CS_SUPPORTSEXECSEG = 0x20400, - CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */ + CSSLOT_CODEDIRECTORY = 0, /* slot index for CodeDirectory */ CSSLOT_INFOSLOT = 1, CSSLOT_REQUIREMENTS = 2, CSSLOT_RESOURCEDIR = 3, @@ -106,15 +106,15 @@ enum { CSSLOT_ENTITLEMENTS = 5, CSSLOT_ALTERNATE_CODEDIRECTORIES = 0x1000, /* first alternate CodeDirectory, if any */ - CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5, /* max number of alternate CD slots */ + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX = 5, /* max number of alternate CD slots */ CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT = CSSLOT_ALTERNATE_CODEDIRECTORIES + CSSLOT_ALTERNATE_CODEDIRECTORY_MAX, /* one past the last */ - CSSLOT_SIGNATURESLOT = 0x10000, /* CMS Signature */ + CSSLOT_SIGNATURESLOT = 0x10000, /* CMS Signature */ CSSLOT_IDENTIFICATIONSLOT = 0x10001, CSSLOT_TICKETSLOT = 0x10002, - CSTYPE_INDEX_REQUIREMENTS = 0x00000002, /* compat with amfi */ - CSTYPE_INDEX_ENTITLEMENTS = 0x00000005, /* compat with amfi */ + CSTYPE_INDEX_REQUIREMENTS = 0x00000002, /* compat with amfi */ + CSTYPE_INDEX_ENTITLEMENTS = 0x00000005, /* compat with amfi */ CS_HASHTYPE_SHA1 = 1, CS_HASHTYPE_SHA256 = 2, @@ -125,7 +125,7 @@ enum { CS_SHA256_LEN = 32, CS_SHA256_TRUNCATED_LEN = 20, - CS_CDHASH_LEN = 20, /* always - larger hashes are truncated */ + CS_CDHASH_LEN = 20, /* always - larger hashes are truncated */ CS_HASH_MAX_SIZE = 48, /* max size of the hash we'll support */ /* @@ -143,40 +143,40 @@ enum { * C form of a CodeDirectory. */ typedef struct __CodeDirectory { - uint32_t magic; /* magic number (CSMAGIC_CODEDIRECTORY) */ - uint32_t length; /* total length of CodeDirectory blob */ - uint32_t version; /* compatibility version */ - uint32_t flags; /* setup and mode flags */ - uint32_t hashOffset; /* offset of hash slot element at index zero */ - uint32_t identOffset; /* offset of identifier string */ - uint32_t nSpecialSlots; /* number of special hash slots */ - uint32_t nCodeSlots; /* number of ordinary (code) hash slots */ - uint32_t codeLimit; /* limit to main image signature range */ - uint8_t hashSize; /* size of each hash in bytes */ - uint8_t hashType; /* type of hash (cdHashType* constants) */ - uint8_t platform; /* platform identifier; zero if not platform binary */ - uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ - uint32_t spare2; /* unused (must be zero) */ + uint32_t magic; /* magic number (CSMAGIC_CODEDIRECTORY) */ + uint32_t length; /* total length of CodeDirectory blob */ + uint32_t version; /* compatibility version */ + uint32_t flags; /* setup and mode flags */ + uint32_t hashOffset; /* offset of hash slot element at index zero */ + uint32_t identOffset; /* offset of identifier string */ + uint32_t nSpecialSlots; /* number of special hash slots */ + uint32_t nCodeSlots; /* number of ordinary (code) hash slots */ + uint32_t codeLimit; /* limit to main image signature range */ + uint8_t hashSize; /* size of each hash in bytes */ + uint8_t hashType; /* type of hash (cdHashType* constants) */ + uint8_t platform; /* platform identifier; zero if not platform binary */ + uint8_t pageSize; /* log2(page size in bytes); 0 => infinite */ + uint32_t spare2; /* unused (must be zero) */ char end_earliest[0]; /* Version 0x20100 */ - uint32_t scatterOffset; /* offset of optional scatter vector */ + uint32_t scatterOffset; /* offset of optional scatter vector */ char end_withScatter[0]; /* Version 0x20200 */ - uint32_t teamOffset; /* offset of optional team identifier */ + uint32_t teamOffset; /* offset of optional team identifier */ char end_withTeam[0]; /* Version 0x20300 */ - uint32_t spare3; /* unused (must be zero) */ - uint64_t codeLimit64; /* limit to main image signature range, 64 bits */ + uint32_t spare3; /* unused (must be zero) */ + uint64_t codeLimit64; /* limit to main image signature range, 64 bits */ char end_withCodeLimit64[0]; /* Version 0x20400 */ - uint64_t execSegBase; /* offset of executable segment */ - uint64_t execSegLimit; /* limit of executable segment */ - uint64_t execSegFlags; /* executable segment flags */ + uint64_t execSegBase; /* offset of executable segment */ + uint64_t execSegLimit; /* limit of executable segment */ + uint64_t execSegFlags; /* executable segment flags */ char end_withExecSeg[0]; /* followed by dynamic content as located by offset fields above */ @@ -188,33 +188,33 @@ __attribute__ ((aligned(1))); */ typedef struct __BlobIndex { - uint32_t type; /* type of entry */ - uint32_t offset; /* offset of entry */ + uint32_t type; /* type of entry */ + uint32_t offset; /* offset of entry */ } CS_BlobIndex __attribute__ ((aligned(1))); typedef struct __SC_SuperBlob { - uint32_t magic; /* magic number */ - uint32_t length; /* total length of SuperBlob */ - uint32_t count; /* number of index entries following */ - CS_BlobIndex index[]; /* (count) entries */ + uint32_t magic; /* magic number */ + uint32_t length; /* total length of SuperBlob */ + uint32_t count; /* number of index entries following */ + CS_BlobIndex index[]; /* (count) entries */ /* followed by Blobs in no particular order as indicated by offsets in index */ } CS_SuperBlob __attribute__ ((aligned(1))); #define KERNEL_HAVE_CS_GENERICBLOB 1 typedef struct __SC_GenericBlob { - uint32_t magic; /* magic number */ - uint32_t length; /* total length of blob */ + uint32_t magic; /* magic number */ + uint32_t length; /* total length of blob */ char data[]; } CS_GenericBlob __attribute__ ((aligned(1))); typedef struct __SC_Scatter { - uint32_t count; // number of pages; zero for sentinel (only) - uint32_t base; // first page number - uint64_t targetOffset; // offset in target - uint64_t spare; // reserved + uint32_t count; // number of pages; zero for sentinel (only) + uint32_t base; // first page number + uint64_t targetOffset; // offset in target + uint64_t spare; // reserved } SC_Scatter __attribute__ ((aligned(1))); diff --git a/osfmk/kern/debug.c b/osfmk/kern/debug.c index d58ac47f7..dd7a28996 100644 --- a/osfmk/kern/debug.c +++ b/osfmk/kern/debug.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -114,13 +114,16 @@ extern volatile struct xnu_hw_shmem_dbg_command_info *hwsd_info; extern int vsnprintf(char *, size_t, const char *, va_list); #endif -unsigned int halt_in_debugger = 0; -unsigned int current_debugger = 0; -unsigned int active_debugger = 0; -unsigned int panicDebugging = FALSE; -unsigned int kdebug_serial = FALSE; -unsigned int kernel_debugger_entry_count = 0; +unsigned int halt_in_debugger = 0; +unsigned int current_debugger = 0; +unsigned int active_debugger = 0; +unsigned int panicDebugging = FALSE; +unsigned int kdebug_serial = FALSE; +unsigned int kernel_debugger_entry_count = 0; +#if !defined (__x86_64__) +struct additional_panic_data_buffer *panic_data_buffers = NULL; +#endif #if defined(__arm__) #define TRAP_DEBUGGER __asm__ volatile("trap") @@ -136,9 +139,9 @@ unsigned int kernel_debugger_entry_count = 0; #endif #if defined(__i386__) || defined(__x86_64__) -#define panic_stop() pmCPUHalt(PM_HALT_PANIC) +#define panic_stop() pmCPUHalt(PM_HALT_PANIC) #else -#define panic_stop() panic_spin_forever() +#define panic_stop() panic_spin_forever() #endif #define CPUDEBUGGEROP PROCESSOR_DATA(current_processor(), debugger_state).db_current_op @@ -153,12 +156,12 @@ unsigned int kernel_debugger_entry_count = 0; #define CPUPANICCALLER PROCESSOR_DATA(current_processor(), debugger_state).db_panic_caller #if DEVELOPMENT || DEBUG -#define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ -MACRO_BEGIN \ - if (requested) { \ - volatile int *badpointer = (int *)4; \ - *badpointer = 0; \ - } \ +#define DEBUGGER_DEBUGGING_NESTED_PANIC_IF_REQUESTED(requested) \ +MACRO_BEGIN \ + if (requested) { \ + volatile int *badpointer = (int *)4; \ + *badpointer = 0; \ + } \ MACRO_END #endif /* DEVELOPMENT || DEBUG */ @@ -171,7 +174,7 @@ const char *debugger_message = NULL; unsigned long debugger_panic_caller = 0; void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx, - uint64_t panic_options_mask, void *panic_data, unsigned long panic_caller); + uint64_t panic_options_mask, void *panic_data, unsigned long panic_caller); static void kdp_machine_reboot_type(unsigned int type); __attribute__((noreturn)) void panic_spin_forever(void); extern kern_return_t do_stackshot(void); @@ -249,9 +252,9 @@ size_t panic_disk_error_description_size = sizeof(panic_disk_error_description); extern unsigned int write_trace_on_panic; int kext_assertions_enable = #if DEBUG || DEVELOPMENT - TRUE; + TRUE; #else - FALSE; + FALSE; #endif void @@ -275,10 +278,10 @@ panic_init(void) */ debug_boot_arg = 0; #if ((CONFIG_EMBEDDED && MACH_KDP) || defined(__x86_64__)) - if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) { + if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof(debug_boot_arg))) { #if DEVELOPMENT || DEBUG if (debug_boot_arg & DB_HALT) { - halt_in_debugger=1; + halt_in_debugger = 1; } #endif @@ -314,7 +317,6 @@ panic_init(void) debugger_is_panic = FALSE; } #endif - } #if defined (__x86_64__) @@ -375,7 +377,7 @@ DebuggerLock() return; } - while(!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) { + while (!atomic_compare_exchange_strong(&debugger_cpu, &debugger_exp_cpu, my_cpu)) { debugger_exp_cpu = DEBUGGER_NO_CPU; } @@ -424,8 +426,8 @@ DebuggerResumeOtherCores() static void DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_panic_str, - va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, - boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) + va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, + boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) { CPUDEBUGGEROP = db_op; @@ -438,8 +440,9 @@ DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_pani CPUPANICCALLER = db_panic_caller; } else if (CPUDEBUGGERCOUNT > 1 && db_panic_str != NULL) { kprintf("Nested panic detected:"); - if (db_panic_str != NULL) + if (db_panic_str != NULL) { _doprnt(db_panic_str, db_panic_args, PE_kputc, 0); + } } CPUDEBUGGERSYNC = db_proceed_on_sync_failure; @@ -457,15 +460,15 @@ DebuggerSaveState(debugger_op db_op, const char *db_message, const char *db_pani */ kern_return_t DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str, - va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, - boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) + va_list *db_panic_args, uint64_t db_panic_options, void *db_panic_data_ptr, + boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller) { kern_return_t ret; assert(ml_get_interrupts_enabled() == FALSE); DebuggerSaveState(db_op, db_message, db_panic_str, db_panic_args, - db_panic_options, db_panic_data_ptr, - db_proceed_on_sync_failure, db_panic_caller); + db_panic_options, db_panic_data_ptr, + db_proceed_on_sync_failure, db_panic_caller); TRAP_DEBUGGER; @@ -478,10 +481,10 @@ DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_ void __attribute__((noinline)) Assert( - const char *file, - int line, - const char *expression - ) + const char *file, + int line, + const char *expression + ) { if (!mach_assert) { kprintf("%s:%d non-fatal Assertion: %s", file, line, expression); @@ -500,7 +503,7 @@ Debugger(const char *message) void DebuggerWithContext(unsigned int reason, void *ctx, const char *message, - uint64_t debugger_options_mask) + uint64_t debugger_options_mask) { spl_t previous_interrupts_state; boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers; @@ -519,7 +522,7 @@ DebuggerWithContext(unsigned int reason, void *ctx, const char *message, if (!in_panic_kprintf) { in_panic_kprintf = TRUE; kprintf("Detected nested debugger entry count exceeding %d\n", - NESTEDDEBUGGERENTRYMAX); + NESTEDDEBUGGERENTRYMAX); in_panic_kprintf = FALSE; } @@ -538,13 +541,13 @@ DebuggerWithContext(unsigned int reason, void *ctx, const char *message, if (ctx != NULL) { DebuggerSaveState(DBOP_DEBUGGER, message, - NULL, NULL, debugger_options_mask, NULL, TRUE, 0); + NULL, NULL, debugger_options_mask, NULL, TRUE, 0); handle_debugger_trap(reason, 0, 0, ctx); DebuggerSaveState(DBOP_NONE, NULL, NULL, - NULL, 0, NULL, FALSE, 0); + NULL, 0, NULL, FALSE, 0); } else { DebuggerTrapWithState(DBOP_DEBUGGER, message, - NULL, NULL, debugger_options_mask, NULL, TRUE, 0); + NULL, NULL, debugger_options_mask, NULL, TRUE, 0); } CPUDEBUGGERCOUNT--; @@ -570,8 +573,9 @@ kdp_register_callout(kdp_callout_fn_t fn, void * arg) struct kdp_callout * list_head; kcp = kalloc(sizeof(*kcp)); - if (kcp == NULL) + if (kcp == NULL) { panic("kdp_register_callout() kalloc failed"); + } kcp->callout_fn = fn; kcp->callout_arg = arg; @@ -587,7 +591,7 @@ kdp_register_callout(kdp_callout_fn_t fn, void * arg) static void kdp_callouts(kdp_event_t event) { - struct kdp_callout *kcp = kdp_callout_list; + struct kdp_callout *kcp = kdp_callout_list; while (kcp) { if (!kcp->callout_in_progress) { @@ -599,6 +603,46 @@ kdp_callouts(kdp_event_t event) } } +#if !defined (__x86_64__) +/* + * Register an additional buffer with data to include in the panic log + * + * tracks supporting more than one buffer + * + * Note that producer_name and buf should never be de-allocated as we reference these during panic. + */ +void +register_additional_panic_data_buffer(const char *producer_name, void *buf, int len) +{ + if (panic_data_buffers != NULL) { + panic("register_additional_panic_data_buffer called with buffer already registered"); + } + + if (producer_name == NULL || (strlen(producer_name) == 0)) { + panic("register_additional_panic_data_buffer called with invalid producer_name"); + } + + if (buf == NULL) { + panic("register_additional_panic_data_buffer called with invalid buffer pointer"); + } + + if ((len <= 0) || (len > ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN)) { + panic("register_additional_panic_data_buffer called with invalid length"); + } + + struct additional_panic_data_buffer *new_panic_data_buffer = kalloc(sizeof(struct additional_panic_data_buffer)); + new_panic_data_buffer->producer_name = producer_name; + new_panic_data_buffer->buf = buf; + new_panic_data_buffer->len = len; + + if (!OSCompareAndSwapPtr(NULL, new_panic_data_buffer, &panic_data_buffers)) { + panic("register_additional_panic_data_buffer called with buffer already registered"); + } + + return; +} +#endif /* !defined (__x86_64__) */ + /* * An overview of the xnu panic path: * @@ -628,7 +672,7 @@ panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_options_mas va_start(panic_str_args, str); panic_trap_to_debugger(str, &panic_str_args, reason, ctx, (debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK), - NULL, (unsigned long)(char *)__builtin_return_address(0)); + NULL, (unsigned long)(char *)__builtin_return_address(0)); va_end(panic_str_args); } @@ -653,10 +697,9 @@ panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_opti va_start(panic_str_args, str); panic_trap_to_debugger(str, &panic_str_args, reason, ctx, ((debugger_options_mask & ~DEBUGGER_INTERNAL_OPTIONS_MASK) | DEBUGGER_INTERNAL_OPTION_THREAD_BACKTRACE), - thread, (unsigned long)(char *)__builtin_return_address(0)); + thread, (unsigned long)(char *)__builtin_return_address(0)); va_end(panic_str_args); - } #endif /* defined (__x86_64__) */ @@ -664,10 +707,15 @@ panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_opti #pragma clang diagnostic ignored "-Wmissing-noreturn" void panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsigned int reason, void *ctx, - uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller) + uint64_t panic_options_mask, void *panic_data_ptr, unsigned long panic_caller) { #pragma clang diagnostic pop +#if defined(__x86_64__) && (DEVELOPMENT || DEBUG) + /* Turn off I/O tracing once we've panicked */ + mmiotrace_enabled = 0; +#endif + if (ml_wants_panic_trap_to_debugger()) { ml_panic_trap_to_debugger(panic_format_str, panic_args, reason, ctx, panic_options_mask, panic_caller); @@ -689,7 +737,7 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign if (!in_panic_kprintf) { in_panic_kprintf = TRUE; kprintf("Detected nested debugger entry count exceeding %d\n", - NESTEDDEBUGGERENTRYMAX); + NESTEDDEBUGGERENTRYMAX); in_panic_kprintf = FALSE; } @@ -705,8 +753,9 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign #endif #if CONFIG_EMBEDDED - if (PE_arm_debug_panic_hook) + if (PE_arm_debug_panic_hook) { PE_arm_debug_panic_hook(panic_format_str); + } #endif #if defined (__x86_64__) @@ -737,8 +786,8 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign * state on the current CPU and then jump to handle_debugger_trap. */ DebuggerSaveState(DBOP_PANIC, "panic", - panic_format_str, panic_args, - panic_options_mask, panic_data_ptr, TRUE, panic_caller); + panic_format_str, panic_args, + panic_options_mask, panic_data_ptr, TRUE, panic_caller); handle_debugger_trap(reason, 0, 0, ctx); } @@ -746,11 +795,11 @@ panic_trap_to_debugger(const char *panic_format_str, va_list *panic_args, unsign /* * Signal to fastsim that it should open debug ports (nop on hardware) */ - __asm__ volatile("HINT 0x45"); + __asm__ volatile ("HINT 0x45"); #endif /* defined(__arm64__) */ DebuggerTrapWithState(DBOP_PANIC, "panic", panic_format_str, - panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller); + panic_args, panic_options_mask, panic_data_ptr, TRUE, panic_caller); /* * Not reached. @@ -764,7 +813,8 @@ panic_spin_forever() { paniclog_append_noflush("\nPlease go to https://panic.apple.com to report this panic\n"); - for (;;) { } + for (;;) { + } } static void @@ -814,7 +864,7 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned } if ((debugger_current_op == DBOP_PANIC) || - ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { + ((debugger_current_op == DBOP_DEBUGGER) && debugger_is_panic)) { /* * Attempt to notify listeners once and only once that we've started * panicking. Only do this for Debugger() calls if we're treating @@ -879,8 +929,9 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned #endif /* DEBUGGER_OPTION_PANICLOGANDREBOOT is used for two finger resets on embedded so we get a paniclog */ - if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) + if (debugger_panic_options & DEBUGGER_OPTION_PANICLOGANDREBOOT) { PEHaltRestart(kPEPanicRestartCPU); + } } #if CONFIG_KDP_INTERACTIVE_DEBUGGING @@ -890,7 +941,7 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned * allows us to persist any data that's stored in the panic log. */ if ((debugger_panic_options & DEBUGGER_OPTION_SKIP_LOCAL_COREDUMP) && - (debug_boot_arg & DB_REBOOT_POST_CORE)) { + (debug_boot_arg & DB_REBOOT_POST_CORE)) { kdp_machine_reboot_type(kPEPanicRestartCPU); } @@ -902,7 +953,7 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned if (!kdp_has_polled_corefile()) { if (debug_boot_arg & (DB_KERN_DUMP_ON_PANIC | DB_KERN_DUMP_ON_NMI)) { paniclog_append_noflush("skipping local kernel core because core file could not be opened prior to panic (error : 0x%x)", - kdp_polled_corefile_error()); + kdp_polled_corefile_error()); #if CONFIG_EMBEDDED panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED; paniclog_flush(); @@ -940,7 +991,7 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned * or if option to ignore failures is set. */ if ((debug_boot_arg & DB_REBOOT_POST_CORE) && - ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) { + ((ret == 0) || (debugger_panic_options & DEBUGGER_OPTION_ATTEMPTCOREDUMPANDREBOOT))) { kdp_machine_reboot_type(kPEPanicRestartCPU); } } @@ -956,11 +1007,11 @@ debugger_collect_diagnostics(unsigned int exception, unsigned int code, unsigned /* * Only return if we entered via Debugger and it's safe to return * (we halted the other cores successfully, this isn't a nested panic, etc) - */ + */ if (debugger_current_op == DBOP_DEBUGGER && - debugger_safe_to_return && - kernel_debugger_entry_count == 1 && - !debugger_is_panic) { + debugger_safe_to_return && + kernel_debugger_entry_count == 1 && + !debugger_is_panic) { return; } } @@ -1026,7 +1077,7 @@ handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int sub } else { /* Not safe to return from a nested panic/debugger call */ if (debugger_current_op == DBOP_PANIC || - debugger_current_op == DBOP_DEBUGGER) { + debugger_current_op == DBOP_DEBUGGER) { debugger_safe_to_return = FALSE; } @@ -1062,7 +1113,7 @@ handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int sub } else if (debugger_current_op == DBOP_STACKSHOT) { CPUDEBUGGERRET = do_stackshot(); #if PGO - } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) { + } else if (debugger_current_op == DBOP_RESET_PGO_COUNTERS) { CPUDEBUGGERRET = do_pgo_reset_counters(); #endif } else { @@ -1100,18 +1151,19 @@ handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int sub return; } -__attribute__((noinline,not_tail_called)) -void log(__unused int level, char *fmt, ...) +__attribute__((noinline, not_tail_called)) +void +log(__unused int level, char *fmt, ...) { void *caller = __builtin_return_address(0); - va_list listp; - va_list listp2; + va_list listp; + va_list listp2; #ifdef lint level++; #endif /* lint */ -#ifdef MACH_BSD +#ifdef MACH_BSD va_start(listp, fmt); va_copy(listp2, listp); @@ -1138,39 +1190,40 @@ void log(__unused int level, char *fmt, ...) */ boolean_t -oslog_is_safe(void) { - return (kernel_debugger_entry_count == 0 && - not_in_kdp == 1 && - get_preemption_level() == 0 && - ml_get_interrupts_enabled() == TRUE); +oslog_is_safe(void) +{ + return kernel_debugger_entry_count == 0 && + not_in_kdp == 1 && + get_preemption_level() == 0 && + ml_get_interrupts_enabled() == TRUE; } boolean_t debug_mode_active(void) { - return ((0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp)); + return (0 != kernel_debugger_entry_count != 0) || (0 == not_in_kdp); } void debug_putc(char c) { if ((debug_buf_size != 0) && - ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) { - *debug_buf_ptr=c; + ((debug_buf_ptr - debug_buf_base) < (int)debug_buf_size)) { + *debug_buf_ptr = c; debug_buf_ptr++; } } #if defined (__x86_64__) struct pasc { - unsigned a: 7; - unsigned b: 7; - unsigned c: 7; - unsigned d: 7; - unsigned e: 7; - unsigned f: 7; - unsigned g: 7; - unsigned h: 7; + unsigned a: 7; + unsigned b: 7; + unsigned c: 7; + unsigned d: 7; + unsigned e: 7; + unsigned f: 7; + unsigned g: 7; + unsigned h: 7; } __attribute__((packed)); typedef struct pasc pasc_t; @@ -1182,25 +1235,24 @@ typedef struct pasc pasc_t; int packA(char *inbuf, uint32_t length, uint32_t buflen) { - unsigned int i, j = 0; - pasc_t pack; - - length = MIN(((length + 7) & ~7), buflen); - - for (i = 0; i < length; i+=8) - { - pack.a = inbuf[i]; - pack.b = inbuf[i+1]; - pack.c = inbuf[i+2]; - pack.d = inbuf[i+3]; - pack.e = inbuf[i+4]; - pack.f = inbuf[i+5]; - pack.g = inbuf[i+6]; - pack.h = inbuf[i+7]; - bcopy ((char *) &pack, inbuf + j, 7); - j += 7; - } - return j; + unsigned int i, j = 0; + pasc_t pack; + + length = MIN(((length + 7) & ~7), buflen); + + for (i = 0; i < length; i += 8) { + pack.a = inbuf[i]; + pack.b = inbuf[i + 1]; + pack.c = inbuf[i + 2]; + pack.d = inbuf[i + 3]; + pack.e = inbuf[i + 4]; + pack.f = inbuf[i + 5]; + pack.g = inbuf[i + 6]; + pack.h = inbuf[i + 7]; + bcopy((char *) &pack, inbuf + j, 7); + j += 7; + } + return j; } void @@ -1208,19 +1260,19 @@ unpackA(char *inbuf, uint32_t length) { pasc_t packs; unsigned i = 0; - length = (length * 8)/7; + length = (length * 8) / 7; while (i < length) { - packs = *(pasc_t *)&inbuf[i]; - bcopy(&inbuf[i+7], &inbuf[i+8], MAX(0, (int) (length - i - 8))); - inbuf[i++] = packs.a; - inbuf[i++] = packs.b; - inbuf[i++] = packs.c; - inbuf[i++] = packs.d; - inbuf[i++] = packs.e; - inbuf[i++] = packs.f; - inbuf[i++] = packs.g; - inbuf[i++] = packs.h; + packs = *(pasc_t *)&inbuf[i]; + bcopy(&inbuf[i + 7], &inbuf[i + 8], MAX(0, (int) (length - i - 8))); + inbuf[i++] = packs.a; + inbuf[i++] = packs.b; + inbuf[i++] = packs.c; + inbuf[i++] = packs.d; + inbuf[i++] = packs.e; + inbuf[i++] = packs.f; + inbuf[i++] = packs.g; + inbuf[i++] = packs.h; } } #endif /* defined (__x86_64__) */ @@ -1228,55 +1280,68 @@ unpackA(char *inbuf, uint32_t length) extern void *proc_name_address(void *p); static void -panic_display_process_name(void) { +panic_display_process_name(void) +{ /* because of scoping issues len(p_comm) from proc_t is hard coded here */ char proc_name[17] = "Unknown"; task_t ctask = 0; void *cbsd_info = 0; - if (ml_nofault_copy((vm_offset_t)¤t_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t)) - if(ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info)) - if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0)) + if (ml_nofault_copy((vm_offset_t)¤t_thread()->task, (vm_offset_t) &ctask, sizeof(task_t)) == sizeof(task_t)) { + if (ml_nofault_copy((vm_offset_t)&ctask->bsd_info, (vm_offset_t)&cbsd_info, sizeof(cbsd_info)) == sizeof(cbsd_info)) { + if (cbsd_info && (ml_nofault_copy((vm_offset_t) proc_name_address(cbsd_info), (vm_offset_t) &proc_name, sizeof(proc_name)) > 0)) { proc_name[sizeof(proc_name) - 1] = '\0'; + } + } + } paniclog_append_noflush("\nBSD process name corresponding to current thread: %s\n", proc_name); } unsigned -panic_active(void) { - return ((debugger_panic_str != (char *) 0)); +panic_active(void) +{ + return debugger_panic_str != (char *) 0; } void -populate_model_name(char *model_string) { +populate_model_name(char *model_string) +{ strlcpy(model_name, model_string, sizeof(model_name)); } void -panic_display_model_name(void) { +panic_display_model_name(void) +{ char tmp_model_name[sizeof(model_name)]; - if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) + if (ml_nofault_copy((vm_offset_t) &model_name, (vm_offset_t) &tmp_model_name, sizeof(model_name)) != sizeof(model_name)) { return; + } tmp_model_name[sizeof(tmp_model_name) - 1] = '\0'; - if (tmp_model_name[0] != 0) + if (tmp_model_name[0] != 0) { paniclog_append_noflush("System model name: %s\n", tmp_model_name); + } } void -panic_display_kernel_uuid(void) { +panic_display_kernel_uuid(void) +{ char tmp_kernel_uuid[sizeof(kernel_uuid_string)]; - if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) + if (ml_nofault_copy((vm_offset_t) &kernel_uuid_string, (vm_offset_t) &tmp_kernel_uuid, sizeof(kernel_uuid_string)) != sizeof(kernel_uuid_string)) { return; + } - if (tmp_kernel_uuid[0] != '\0') + if (tmp_kernel_uuid[0] != '\0') { paniclog_append_noflush("Kernel UUID: %s\n", tmp_kernel_uuid); + } } void -panic_display_kernel_aslr(void) { +panic_display_kernel_aslr(void) +{ if (vm_kernel_slide) { paniclog_append_noflush("Kernel slide: 0x%016lx\n", (unsigned long) vm_kernel_slide); paniclog_append_noflush("Kernel text base: %p\n", (void *) vm_kernel_stext); @@ -1284,23 +1349,25 @@ panic_display_kernel_aslr(void) { } void -panic_display_hibb(void) { +panic_display_hibb(void) +{ #if defined(__i386__) || defined (__x86_64__) paniclog_append_noflush("__HIB text base: %p\n", (void *) vm_hib_base); #endif } static void -panic_display_uptime(void) { - uint64_t uptime; +panic_display_uptime(void) +{ + uint64_t uptime; absolutetime_to_nanoseconds(mach_absolute_time(), &uptime); paniclog_append_noflush("\nSystem uptime in nanoseconds: %llu\n", uptime); } static void -panic_display_disk_errors(void) { - +panic_display_disk_errors(void) +{ if (panic_disk_error_description[0]) { panic_disk_error_description[sizeof(panic_disk_error_description) - 1] = '\0'; paniclog_append_noflush("Root disk errors: \"%s\"\n", panic_disk_error_description); @@ -1313,16 +1380,19 @@ extern char osversion[]; static volatile uint32_t config_displayed = 0; __private_extern__ void -panic_display_system_configuration(boolean_t launchd_exit) { - - if (!launchd_exit) panic_display_process_name(); +panic_display_system_configuration(boolean_t launchd_exit) +{ + if (!launchd_exit) { + panic_display_process_name(); + } if (OSCompareAndSwap(0, 1, &config_displayed)) { char buf[256]; - if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf))) + if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf))) { paniclog_append_noflush("Boot args: %s\n", buf); + } paniclog_append_noflush("\nMac OS version:\n%s\n", (osversion[0] != 0) ? osversion : "Not yet set"); - paniclog_append_noflush("\nKernel version:\n%s\n",version); + paniclog_append_noflush("\nKernel version:\n%s\n", version); panic_display_kernel_uuid(); if (!launchd_exit) { panic_display_kernel_aslr(); @@ -1342,11 +1412,11 @@ panic_display_system_configuration(boolean_t launchd_exit) { } } -extern unsigned int stack_total; +extern unsigned int stack_total; extern unsigned long long stack_allocs; #if defined (__x86_64__) -extern unsigned int inuse_ptepages_count; +extern unsigned int inuse_ptepages_count; extern long long alloc_ptepages_count; #endif @@ -1357,24 +1427,23 @@ extern vm_size_t panic_kext_memory_size; __private_extern__ void panic_display_zprint() { - if(panic_include_zprint == TRUE) { - - unsigned int i; - struct zone zone_copy; + if (panic_include_zprint == TRUE) { + unsigned int i; + struct zone zone_copy; paniclog_append_noflush("%-20s %10s %10s\n", "Zone Name", "Cur Size", "Free Size"); for (i = 0; i < num_zones; i++) { - if(ml_nofault_copy((vm_offset_t)(&zone_array[i]), (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { - if(zone_copy.cur_size > (1024*1024)) { - paniclog_append_noflush("%-20s %10lu %10lu\n",zone_copy.zone_name, (uintptr_t)zone_copy.cur_size,(uintptr_t)(zone_copy.countfree * zone_copy.elem_size)); + if (ml_nofault_copy((vm_offset_t)(&zone_array[i]), (vm_offset_t)&zone_copy, sizeof(struct zone)) == sizeof(struct zone)) { + if (zone_copy.cur_size > (1024 * 1024)) { + paniclog_append_noflush("%-20s %10lu %10lu\n", zone_copy.zone_name, (uintptr_t)zone_copy.cur_size, (uintptr_t)(zone_copy.countfree * zone_copy.elem_size)); } - } + } } paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks", (uintptr_t)(kernel_stack_size * stack_total)); #if defined (__x86_64__) - paniclog_append_noflush("%-20s %10lu\n", "PageTables",(uintptr_t)(PAGE_SIZE * inuse_ptepages_count)); + paniclog_append_noflush("%-20s %10lu\n", "PageTables", (uintptr_t)(PAGE_SIZE * inuse_ptepages_count)); #endif paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large", (uintptr_t)kalloc_large_total); @@ -1392,7 +1461,7 @@ panic_display_zprint() #if CONFIG_ECC_LOGGING __private_extern__ void -panic_display_ecc_errors() +panic_display_ecc_errors() { uint32_t count = ecc_log_get_correction_count(); @@ -1403,7 +1472,7 @@ panic_display_ecc_errors() #endif /* CONFIG_ECC_LOGGING */ #if CONFIG_ZLEAKS -extern boolean_t panic_include_ztrace; +extern boolean_t panic_include_ztrace; extern struct ztrace* top_ztrace; void panic_print_symbol_name(vm_address_t search); @@ -1414,18 +1483,18 @@ void panic_print_symbol_name(vm_address_t search); __private_extern__ void panic_display_ztrace(void) { - if(panic_include_ztrace == TRUE) { + if (panic_include_ztrace == TRUE) { unsigned int i = 0; - boolean_t keepsyms = FALSE; + boolean_t keepsyms = FALSE; - PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms)); + PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms)); struct ztrace top_ztrace_copy; - + /* Make sure not to trip another panic if there's something wrong with memory */ - if(ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { + if (ml_nofault_copy((vm_offset_t)top_ztrace, (vm_offset_t)&top_ztrace_copy, sizeof(struct ztrace)) == sizeof(struct ztrace)) { paniclog_append_noflush("\nBacktrace suspected of leaking: (outstanding bytes: %lu)\n", (uintptr_t)top_ztrace_copy.zt_size); /* Print the backtrace addresses */ - for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH) ; i++) { + for (i = 0; (i < top_ztrace_copy.zt_depth && i < MAX_ZTRACE_DEPTH); i++) { paniclog_append_noflush("%p ", top_ztrace_copy.zt_stack[i]); if (keepsyms) { panic_print_symbol_name((vm_address_t)top_ztrace_copy.zt_stack[i]); @@ -1434,8 +1503,7 @@ panic_display_ztrace(void) } /* Print any kexts in that backtrace, along with their link addresses so we can properly blame them */ kmod_panic_dump((vm_offset_t *)&top_ztrace_copy.zt_stack[0], top_ztrace_copy.zt_depth); - } - else { + } else { paniclog_append_noflush("\nCan't access top_ztrace...\n"); } paniclog_append_noflush("\n"); @@ -1455,7 +1523,9 @@ telemetry_gather(user_addr_t buffer __unused, uint32_t *length __unused, boolean uint32_t kern_feature_overrides = 0; -boolean_t kern_feature_override(uint32_t fmask) { +boolean_t +kern_feature_override(uint32_t fmask) +{ if (kern_feature_overrides == 0) { uint32_t fdisables = 0; /* Expected to be first invoked early, in a single-threaded @@ -1468,5 +1538,5 @@ boolean_t kern_feature_override(uint32_t fmask) { kern_feature_overrides |= KF_INITIALIZED; } } - return ((kern_feature_overrides & fmask) == fmask); + return (kern_feature_overrides & fmask) == fmask; } diff --git a/osfmk/kern/debug.h b/osfmk/kern/debug.h index 1ad189d54..7e82f1b34 100644 --- a/osfmk/kern/debug.h +++ b/osfmk/kern/debug.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KERN_DEBUG_H_ +#ifndef _KERN_DEBUG_H_ #define _KERN_DEBUG_H_ #include @@ -46,183 +46,179 @@ #ifdef __APPLE_API_UNSTABLE struct thread_snapshot { - uint32_t snapshot_magic; - uint32_t nkern_frames; - uint32_t nuser_frames; - uint64_t wait_event; - uint64_t continuation; - uint64_t thread_id; - uint64_t user_time; - uint64_t system_time; - int32_t state; - int32_t priority; /* static priority */ - int32_t sched_pri; /* scheduled (current) priority */ - int32_t sched_flags; /* scheduler flags */ - char ss_flags; - char ts_qos; /* effective qos */ - char ts_rqos; /* requested qos */ - char ts_rqos_override; /* requested qos override */ - char io_tier; - char _reserved[3]; /* pad for 4 byte alignement packing */ + uint32_t snapshot_magic; + uint32_t nkern_frames; + uint32_t nuser_frames; + uint64_t wait_event; + uint64_t continuation; + uint64_t thread_id; + uint64_t user_time; + uint64_t system_time; + int32_t state; + int32_t priority; /* static priority */ + int32_t sched_pri; /* scheduled (current) priority */ + int32_t sched_flags; /* scheduler flags */ + char ss_flags; + char ts_qos; /* effective qos */ + char ts_rqos; /* requested qos */ + char ts_rqos_override; /* requested qos override */ + char io_tier; + char _reserved[3]; /* pad for 4 byte alignement packing */ /* * I/O Statistics * XXX: These fields must be together */ - uint64_t disk_reads_count; - uint64_t disk_reads_size; - uint64_t disk_writes_count; - uint64_t disk_writes_size; - uint64_t io_priority_count[STACKSHOT_IO_NUM_PRIORITIES]; - uint64_t io_priority_size[STACKSHOT_IO_NUM_PRIORITIES]; - uint64_t paging_count; - uint64_t paging_size; - uint64_t non_paging_count; - uint64_t non_paging_size; - uint64_t data_count; - uint64_t data_size; - uint64_t metadata_count; - uint64_t metadata_size; + uint64_t disk_reads_count; + uint64_t disk_reads_size; + uint64_t disk_writes_count; + uint64_t disk_writes_size; + uint64_t io_priority_count[STACKSHOT_IO_NUM_PRIORITIES]; + uint64_t io_priority_size[STACKSHOT_IO_NUM_PRIORITIES]; + uint64_t paging_count; + uint64_t paging_size; + uint64_t non_paging_count; + uint64_t non_paging_size; + uint64_t data_count; + uint64_t data_size; + uint64_t metadata_count; + uint64_t metadata_size; /* XXX: I/O Statistics end */ - uint64_t voucher_identifier; /* obfuscated voucher identifier */ - uint64_t total_syscalls; - char pth_name[STACKSHOT_MAX_THREAD_NAME_SIZE]; - + uint64_t voucher_identifier; /* obfuscated voucher identifier */ + uint64_t total_syscalls; + char pth_name[STACKSHOT_MAX_THREAD_NAME_SIZE]; } __attribute__((packed)); /* old, non kcdata format */ struct task_snapshot { uint32_t snapshot_magic; int32_t pid; - uint64_t uniqueid; - uint64_t user_time_in_terminated_threads; - uint64_t system_time_in_terminated_threads; - uint8_t shared_cache_identifier[16]; - uint64_t shared_cache_slide; - uint32_t nloadinfos; - int suspend_count; - int task_size; /* pages */ - int faults; /* number of page faults */ - int pageins; /* number of actual pageins */ - int cow_faults; /* number of copy-on-write faults */ - uint32_t ss_flags; - uint64_t p_start_sec; /* from the bsd proc struct */ - uint64_t p_start_usec; /* from the bsd proc struct */ - - /* + uint64_t uniqueid; + uint64_t user_time_in_terminated_threads; + uint64_t system_time_in_terminated_threads; + uint8_t shared_cache_identifier[16]; + uint64_t shared_cache_slide; + uint32_t nloadinfos; + int suspend_count; + int task_size; /* pages */ + int faults; /* number of page faults */ + int pageins; /* number of actual pageins */ + int cow_faults; /* number of copy-on-write faults */ + uint32_t ss_flags; + uint64_t p_start_sec; /* from the bsd proc struct */ + uint64_t p_start_usec; /* from the bsd proc struct */ + + /* * We restrict ourselves to a statically defined * (current as of 2009) length for the * p_comm string, due to scoping issues (osfmk/bsd and user/kernel * binary compatibility). */ - char p_comm[17]; - uint32_t was_throttled; - uint32_t did_throttle; - uint32_t latency_qos; + char p_comm[17]; + uint32_t was_throttled; + uint32_t did_throttle; + uint32_t latency_qos; /* * I/O Statistics * XXX: These fields must be together. */ - uint64_t disk_reads_count; - uint64_t disk_reads_size; - uint64_t disk_writes_count; - uint64_t disk_writes_size; - uint64_t io_priority_count[STACKSHOT_IO_NUM_PRIORITIES]; - uint64_t io_priority_size[STACKSHOT_IO_NUM_PRIORITIES]; - uint64_t paging_count; - uint64_t paging_size; - uint64_t non_paging_count; - uint64_t non_paging_size; - uint64_t data_count; - uint64_t data_size; - uint64_t metadata_count; - uint64_t metadata_size; + uint64_t disk_reads_count; + uint64_t disk_reads_size; + uint64_t disk_writes_count; + uint64_t disk_writes_size; + uint64_t io_priority_count[STACKSHOT_IO_NUM_PRIORITIES]; + uint64_t io_priority_size[STACKSHOT_IO_NUM_PRIORITIES]; + uint64_t paging_count; + uint64_t paging_size; + uint64_t non_paging_count; + uint64_t non_paging_size; + uint64_t data_count; + uint64_t data_size; + uint64_t metadata_count; + uint64_t metadata_size; /* XXX: I/O Statistics end */ - uint32_t donating_pid_count; - + uint32_t donating_pid_count; } __attribute__ ((packed)); struct micro_snapshot { - uint32_t snapshot_magic; - uint32_t ms_cpu; /* cpu number this snapshot was recorded on */ - uint64_t ms_time; /* time at sample (seconds) */ - uint64_t ms_time_microsecs; - uint8_t ms_flags; - uint16_t ms_opaque_flags; /* managed by external entity, e.g. fdrmicrod */ + uint32_t snapshot_magic; + uint32_t ms_cpu; /* cpu number this snapshot was recorded on */ + uint64_t ms_time; /* time at sample (seconds) */ + uint64_t ms_time_microsecs; + uint8_t ms_flags; + uint16_t ms_opaque_flags; /* managed by external entity, e.g. fdrmicrod */ } __attribute__ ((packed)); /* * mirrors the dyld_cache_header struct defined in dyld_cache_format.h from dyld source code */ -struct _dyld_cache_header -{ - char magic[16]; // e.g. "dyld_v0 i386" - uint32_t mappingOffset; // file offset to first dyld_cache_mapping_info - uint32_t mappingCount; // number of dyld_cache_mapping_info entries - uint32_t imagesOffset; // file offset to first dyld_cache_image_info - uint32_t imagesCount; // number of dyld_cache_image_info entries - uint64_t dyldBaseAddress; // base address of dyld when cache was built - uint64_t codeSignatureOffset; // file offset of code signature blob - uint64_t codeSignatureSize; // size of code signature blob (zero means to end of file) - uint64_t slideInfoOffset; // file offset of kernel slid info - uint64_t slideInfoSize; // size of kernel slid info - uint64_t localSymbolsOffset; // file offset of where local symbols are stored - uint64_t localSymbolsSize; // size of local symbols information - uint8_t uuid[16]; // unique value for each shared cache file - uint64_t cacheType; // 0 for development, 1 for production - uint32_t branchPoolsOffset; // file offset to table of uint64_t pool addresses - uint32_t branchPoolsCount; // number of uint64_t entries - uint64_t accelerateInfoAddr; // (unslid) address of optimization info - uint64_t accelerateInfoSize; // size of optimization info - uint64_t imagesTextOffset; // file offset to first dyld_cache_image_text_info - uint64_t imagesTextCount; // number of dyld_cache_image_text_info entries - uint64_t dylibsImageGroupAddr; // (unslid) address of ImageGroup for dylibs in this cache - uint64_t dylibsImageGroupSize; // size of ImageGroup for dylibs in this cache - uint64_t otherImageGroupAddr; // (unslid) address of ImageGroup for other OS dylibs - uint64_t otherImageGroupSize; // size of oImageGroup for other OS dylibs - uint64_t progClosuresAddr; // (unslid) address of list of program launch closures - uint64_t progClosuresSize; // size of list of program launch closures - uint64_t progClosuresTrieAddr; // (unslid) address of trie of indexes into program launch closures - uint64_t progClosuresTrieSize; // size of trie of indexes into program launch closures - uint32_t platform; // platform number (macOS=1, etc) - uint32_t formatVersion : 8, // dyld3::closure::kFormatVersion - dylibsExpectedOnDisk : 1, // dyld should expect the dylib exists on disk and to compare inode/mtime to see if cache is valid - simulator : 1, // for simulator of specified platform - locallyBuiltCache : 1, // 0 for B&I built cache, 1 for locally built cache - padding : 21; // TBD +struct _dyld_cache_header { + char magic[16]; // e.g. "dyld_v0 i386" + uint32_t mappingOffset; // file offset to first dyld_cache_mapping_info + uint32_t mappingCount; // number of dyld_cache_mapping_info entries + uint32_t imagesOffset; // file offset to first dyld_cache_image_info + uint32_t imagesCount; // number of dyld_cache_image_info entries + uint64_t dyldBaseAddress; // base address of dyld when cache was built + uint64_t codeSignatureOffset;// file offset of code signature blob + uint64_t codeSignatureSize; // size of code signature blob (zero means to end of file) + uint64_t slideInfoOffset; // file offset of kernel slid info + uint64_t slideInfoSize; // size of kernel slid info + uint64_t localSymbolsOffset; // file offset of where local symbols are stored + uint64_t localSymbolsSize; // size of local symbols information + uint8_t uuid[16]; // unique value for each shared cache file + uint64_t cacheType; // 0 for development, 1 for production + uint32_t branchPoolsOffset; // file offset to table of uint64_t pool addresses + uint32_t branchPoolsCount; // number of uint64_t entries + uint64_t accelerateInfoAddr; // (unslid) address of optimization info + uint64_t accelerateInfoSize; // size of optimization info + uint64_t imagesTextOffset; // file offset to first dyld_cache_image_text_info + uint64_t imagesTextCount; // number of dyld_cache_image_text_info entries + uint64_t dylibsImageGroupAddr;// (unslid) address of ImageGroup for dylibs in this cache + uint64_t dylibsImageGroupSize;// size of ImageGroup for dylibs in this cache + uint64_t otherImageGroupAddr;// (unslid) address of ImageGroup for other OS dylibs + uint64_t otherImageGroupSize;// size of oImageGroup for other OS dylibs + uint64_t progClosuresAddr; // (unslid) address of list of program launch closures + uint64_t progClosuresSize; // size of list of program launch closures + uint64_t progClosuresTrieAddr;// (unslid) address of trie of indexes into program launch closures + uint64_t progClosuresTrieSize;// size of trie of indexes into program launch closures + uint32_t platform; // platform number (macOS=1, etc) + uint32_t formatVersion : 8,// dyld3::closure::kFormatVersion + dylibsExpectedOnDisk : 1, // dyld should expect the dylib exists on disk and to compare inode/mtime to see if cache is valid + simulator : 1, // for simulator of specified platform + locallyBuiltCache : 1, // 0 for B&I built cache, 1 for locally built cache + padding : 21; // TBD }; /* * mirrors the dyld_cache_image_text_info struct defined in dyld_cache_format.h from dyld source code */ -struct _dyld_cache_image_text_info -{ - uuid_t uuid; - uint64_t loadAddress; // unslid address of start of __TEXT - uint32_t textSegmentSize; - uint32_t pathOffset; // offset from start of cache file +struct _dyld_cache_image_text_info { + uuid_t uuid; + uint64_t loadAddress; // unslid address of start of __TEXT + uint32_t textSegmentSize; + uint32_t pathOffset; // offset from start of cache file }; enum micro_snapshot_flags { - kInterruptRecord = 0x1, - kTimerArmingRecord = 0x2, - kUserMode = 0x4, /* interrupted usermode, or armed by usermode */ - kIORecord = 0x8, - kPMIRecord = 0x10, + kInterruptRecord = 0x1, + kTimerArmingRecord = 0x2, + kUserMode = 0x4, /* interrupted usermode, or armed by usermode */ + kIORecord = 0x8, + kPMIRecord = 0x10, }; /* * Flags used in the following assortment of snapshots. */ enum generic_snapshot_flags { - kUser64_p = 0x1, - kKernel64_p = 0x2 + kUser64_p = 0x1, + kKernel64_p = 0x2 }; #define VM_PRESSURE_TIME_WINDOW 5 /* seconds */ @@ -245,7 +241,7 @@ enum { STACKSHOT_ENABLE_BT_FAULTING = 0x20000, STACKSHOT_COLLECT_DELTA_SNAPSHOT = 0x40000, /* Include the layout of the system shared cache */ - STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT = 0x80000, + STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT = 0x80000, /* * Kernel consumers of stackshot (via stack_snapshot_from_kernel) can ask * that we try to take the stackshot lock, and fail if we don't get it. @@ -277,6 +273,8 @@ enum { boolean_t kern_feature_override(uint32_t fmask); +#define EMBEDDED_PANIC_HEADER_OSVERSION_LEN 32 + /* * Any updates to this header should be also updated in astris as it can not * grab this header from the SDK. @@ -299,12 +297,14 @@ struct embedded_panic_header { union { struct { uint64_t eph_x86_power_state:8, - eph_x86_efi_boot_state:8, - eph_x86_system_state:8, - eph_x86_unused_bits:40; + eph_x86_efi_boot_state:8, + eph_x86_system_state:8, + eph_x86_unused_bits:40; }; // anonymous struct to group the bitfields together. uint64_t eph_x86_do_not_use; /* Used for offsetof/sizeof when parsing header */ }; + char eph_os_version[EMBEDDED_PANIC_HEADER_OSVERSION_LEN]; + char eph_macos_version[EMBEDDED_PANIC_HEADER_OSVERSION_LEN]; } __attribute__((packed)); #define EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE 0x01 @@ -358,7 +358,7 @@ struct macos_panic_header { __BEGIN_DECLS -extern void panic(const char *string, ...) __printflike(1,2); +extern void panic(const char *string, ...) __printflike(1, 2); __END_DECLS @@ -374,7 +374,7 @@ __END_DECLS #if DKPR /* * For the DEBUG kernel, support the following: - * sysctl -w debug.kprint_syscall= + * sysctl -w debug.kprint_syscall= * sysctl -w debug.kprint_syscall_process= * should be an OR of the masks below * for UNIX, MACH, MDEP, or IPC. This debugging aid @@ -384,14 +384,14 @@ __END_DECLS */ extern int debug_kprint_syscall; extern int debug_kprint_current_process(const char **namep); -#define DEBUG_KPRINT_SYSCALL_PREDICATE_INTERNAL(mask, namep) \ +#define DEBUG_KPRINT_SYSCALL_PREDICATE_INTERNAL(mask, namep) \ ( (debug_kprint_syscall & (mask)) && debug_kprint_current_process(namep) ) -#define DEBUG_KPRINT_SYSCALL_MASK(mask, fmt, args...) do { \ - const char *dks_name = NULL; \ - if (DEBUG_KPRINT_SYSCALL_PREDICATE_INTERNAL(mask, &dks_name)) { \ - kprintf("[%s%s%p]" fmt, dks_name ? dks_name : "", \ - dks_name ? "@" : "", current_thread(), args); \ - } \ +#define DEBUG_KPRINT_SYSCALL_MASK(mask, fmt, args...) do { \ + const char *dks_name = NULL; \ + if (DEBUG_KPRINT_SYSCALL_PREDICATE_INTERNAL(mask, &dks_name)) { \ + kprintf("[%s%s%p]" fmt, dks_name ? dks_name : "", \ + dks_name ? "@" : "", current_thread(), args); \ + } \ } while (0) #else /* !DEBUG */ #define DEBUG_KPRINT_SYSCALL_PREDICATE_INTERNAL(mask, namep) (0) @@ -405,45 +405,45 @@ enum { DEBUG_KPRINT_SYSCALL_IPC_MASK = 1 << 3 }; -#define DEBUG_KPRINT_SYSCALL_PREDICATE(mask) \ +#define DEBUG_KPRINT_SYSCALL_PREDICATE(mask) \ DEBUG_KPRINT_SYSCALL_PREDICATE_INTERNAL(mask, NULL) -#define DEBUG_KPRINT_SYSCALL_UNIX(fmt, args...) \ +#define DEBUG_KPRINT_SYSCALL_UNIX(fmt, args...) \ DEBUG_KPRINT_SYSCALL_MASK(DEBUG_KPRINT_SYSCALL_UNIX_MASK,fmt,args) -#define DEBUG_KPRINT_SYSCALL_MACH(fmt, args...) \ +#define DEBUG_KPRINT_SYSCALL_MACH(fmt, args...) \ DEBUG_KPRINT_SYSCALL_MASK(DEBUG_KPRINT_SYSCALL_MACH_MASK,fmt,args) -#define DEBUG_KPRINT_SYSCALL_MDEP(fmt, args...) \ +#define DEBUG_KPRINT_SYSCALL_MDEP(fmt, args...) \ DEBUG_KPRINT_SYSCALL_MASK(DEBUG_KPRINT_SYSCALL_MDEP_MASK,fmt,args) -#define DEBUG_KPRINT_SYSCALL_IPC(fmt, args...) \ +#define DEBUG_KPRINT_SYSCALL_IPC(fmt, args...) \ DEBUG_KPRINT_SYSCALL_MASK(DEBUG_KPRINT_SYSCALL_IPC_MASK,fmt,args) /* Debug boot-args */ -#define DB_HALT 0x1 +#define DB_HALT 0x1 //#define DB_PRT 0x2 -- obsolete -#define DB_NMI 0x4 -#define DB_KPRT 0x8 -#define DB_KDB 0x10 +#define DB_NMI 0x4 +#define DB_KPRT 0x8 +#define DB_KDB 0x10 #define DB_ARP 0x40 #define DB_KDP_BP_DIS 0x80 //#define DB_LOG_PI_SCRN 0x100 -- obsolete #define DB_KDP_GETC_ENA 0x200 -#define DB_KERN_DUMP_ON_PANIC 0x400 /* Trigger core dump on panic*/ -#define DB_KERN_DUMP_ON_NMI 0x800 /* Trigger core dump on NMI */ -#define DB_DBG_POST_CORE 0x1000 /*Wait in debugger after NMI core */ -#define DB_PANICLOG_DUMP 0x2000 /* Send paniclog on panic,not core*/ -#define DB_REBOOT_POST_CORE 0x4000 /* Attempt to reboot after - * post-panic crashdump/paniclog - * dump. - */ -#define DB_NMI_BTN_ENA 0x8000 /* Enable button to directly trigger NMI */ -#define DB_PRT_KDEBUG 0x10000 /* kprintf KDEBUG traces */ +#define DB_KERN_DUMP_ON_PANIC 0x400 /* Trigger core dump on panic*/ +#define DB_KERN_DUMP_ON_NMI 0x800 /* Trigger core dump on NMI */ +#define DB_DBG_POST_CORE 0x1000 /*Wait in debugger after NMI core */ +#define DB_PANICLOG_DUMP 0x2000 /* Send paniclog on panic,not core*/ +#define DB_REBOOT_POST_CORE 0x4000 /* Attempt to reboot after + * post-panic crashdump/paniclog + * dump. + */ +#define DB_NMI_BTN_ENA 0x8000 /* Enable button to directly trigger NMI */ +#define DB_PRT_KDEBUG 0x10000 /* kprintf KDEBUG traces */ #define DB_DISABLE_LOCAL_CORE 0x20000 /* ignore local kernel core dump support */ #define DB_DISABLE_GZIP_CORE 0x40000 /* don't gzip kernel core dumps */ #define DB_DISABLE_CROSS_PANIC 0x80000 /* x86 only - don't trigger cross panics. Only - * necessary to enable x86 kernel debugging on - * configs with a dev-fused co-processor running - * release bridgeOS. - */ + * necessary to enable x86 kernel debugging on + * configs with a dev-fused co-processor running + * release bridgeOS. + */ #define DB_REBOOT_ALWAYS 0x100000 /* Don't wait for debugger connection */ /* @@ -481,11 +481,16 @@ void panic_with_options(unsigned int reason, void *ctx, uint64_t debugger_option void Debugger(const char * message); void populate_model_name(char *); +#if !defined (__x86_64__) +/* Note that producer_name and buf should never be de-allocated as we reference these during panic */ +void register_additional_panic_data_buffer(const char *producer_name, void *buf, int len); +#endif + unsigned panic_active(void); __END_DECLS -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #if XNU_KERNEL_PRIVATE @@ -495,6 +500,17 @@ struct thread; void panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, struct thread* th, const char *str, ...); #endif +/* limit the max size to a reasonable length */ +#define ADDITIONAL_PANIC_DATA_BUFFER_MAX_LEN 64 + +struct additional_panic_data_buffer { + const char *producer_name; + void *buf; + int len; +}; + +extern struct additional_panic_data_buffer *panic_data_buffers; + boolean_t oslog_is_safe(void); boolean_t debug_mode_active(void); boolean_t stackshot_active(void); @@ -518,7 +534,7 @@ extern "C" { #endif kern_return_t stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint32_t flags, - uint64_t delta_since_timestamp, unsigned *bytes_traced); + uint64_t delta_since_timestamp, unsigned *bytes_traced); #ifdef __cplusplus } #endif @@ -529,16 +545,16 @@ extern boolean_t coprocessor_paniclog_flush; extern boolean_t extended_debug_log_enabled;; #endif /* !CONFIG_EMBEDDED */ -extern char *debug_buf_base; +extern char *debug_buf_base; -extern char kernel_uuid_string[]; -extern char panic_disk_error_description[]; -extern size_t panic_disk_error_description_size; +extern char kernel_uuid_string[]; +extern char panic_disk_error_description[]; +extern size_t panic_disk_error_description_size; -extern unsigned char *kernel_uuid; -extern unsigned int debug_boot_arg; +extern unsigned char *kernel_uuid; +extern unsigned int debug_boot_arg; #if DEVELOPMENT || DEBUG -extern boolean_t debug_boot_arg_inited; +extern boolean_t debug_boot_arg_inited; #endif extern boolean_t kernelcache_uuid_valid; @@ -549,38 +565,38 @@ extern uuid_string_t kernelcache_uuid_string; extern "C" { #endif -extern boolean_t doprnt_hide_pointers; +extern boolean_t doprnt_hide_pointers; #ifdef __cplusplus } #endif -extern unsigned int halt_in_debugger; /* pending halt in debugger after boot */ +extern unsigned int halt_in_debugger; /* pending halt in debugger after boot */ extern unsigned int current_debugger; #define NO_CUR_DB 0x0 #define KDP_CUR_DB 0x1 -extern unsigned int active_debugger; -extern unsigned int kernel_debugger_entry_count; +extern unsigned int active_debugger; +extern unsigned int kernel_debugger_entry_count; -extern unsigned int panicDebugging; -extern unsigned int kdebug_serial; +extern unsigned int panicDebugging; +extern unsigned int kdebug_serial; -extern const char *debugger_panic_str; +extern const char *debugger_panic_str; extern char *debug_buf_ptr; extern unsigned int debug_buf_size; -extern void debug_log_init(void); -extern void debug_putc(char); +extern void debug_log_init(void); +extern void debug_putc(char); -extern void panic_init(void); +extern void panic_init(void); #if defined (__x86_64__) extern void extended_debug_log_init(void); -int packA(char *inbuf, uint32_t length, uint32_t buflen); -void unpackA(char *inbuf, uint32_t length); +int packA(char *inbuf, uint32_t length, uint32_t buflen); +void unpackA(char *inbuf, uint32_t length); #if DEVELOPMENT || DEBUG #define PANIC_STACKSHOT_BUFSIZE (1024 * 1024) @@ -590,19 +606,19 @@ extern size_t panic_stackshot_len; #endif /* DEVELOPMENT || DEBUG */ #endif /* defined (__x86_64__) */ -void SavePanicInfo(const char *message, void *panic_data, uint64_t panic_options); +void SavePanicInfo(const char *message, void *panic_data, uint64_t panic_options); void paniclog_flush(void); -void panic_display_system_configuration(boolean_t launchd_exit); -void panic_display_zprint(void); -void panic_display_kernel_aslr(void); -void panic_display_hibb(void); -void panic_display_model_name(void); -void panic_display_kernel_uuid(void); +void panic_display_system_configuration(boolean_t launchd_exit); +void panic_display_zprint(void); +void panic_display_kernel_aslr(void); +void panic_display_hibb(void); +void panic_display_model_name(void); +void panic_display_kernel_uuid(void); #if CONFIG_ZLEAKS -void panic_display_ztrace(void); +void panic_display_ztrace(void); #endif /* CONFIG_ZLEAKS */ #if CONFIG_ECC_LOGGING -void panic_display_ecc_errors(void); +void panic_display_ecc_errors(void); #endif /* CONFIG_ECC_LOGGING */ /* @@ -625,7 +641,7 @@ typedef enum { } debugger_op; kern_return_t DebuggerTrapWithState(debugger_op db_op, const char *db_message, const char *db_panic_str, va_list *db_panic_args, - uint64_t db_panic_options, void *db_panic_data_ptr, boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller); + uint64_t db_panic_options, void *db_panic_data_ptr, boolean_t db_proceed_on_sync_failure, unsigned long db_panic_caller); void handle_debugger_trap(unsigned int exception, unsigned int code, unsigned int subcode, void *state); void DebuggerWithContext(unsigned int reason, void *ctx, const char *message, uint64_t debugger_options_mask); @@ -633,18 +649,17 @@ void DebuggerWithContext(unsigned int reason, void *ctx, const char *message, ui #if DEBUG || DEVELOPMENT /* leak pointer scan definitions */ -enum -{ - kInstanceFlagAddress = 0x01UL, - kInstanceFlagReferenced = 0x02UL, - kInstanceFlags = 0x03UL +enum{ + kInstanceFlagAddress = 0x01UL, + kInstanceFlagReferenced = 0x02UL, + kInstanceFlags = 0x03UL }; #define INSTANCE_GET(x) ((x) & ~kInstanceFlags) #define INSTANCE_PUT(x) ((x) ^ ~kInstanceFlags) typedef void (*leak_site_proc)(void * refCon, uint32_t siteCount, uint32_t zoneSize, - uintptr_t * backtrace, uint32_t btCount); + uintptr_t * backtrace, uint32_t btCount); #ifdef __cplusplus extern "C" { @@ -666,4 +681,4 @@ kdp_is_in_zone(void *addr, const char *zone_name); #endif /* DEBUG || DEVELOPMENT */ #endif /* XNU_KERNEL_PRIVATE */ -#endif /* _KERN_DEBUG_H_ */ +#endif /* _KERN_DEBUG_H_ */ diff --git a/osfmk/kern/ecc.h b/osfmk/kern/ecc.h index f37bf64bc..4e13838d0 100644 --- a/osfmk/kern/ecc.h +++ b/osfmk/kern/ecc.h @@ -9,7 +9,7 @@ __BEGIN_DECLS * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -18,10 +18,10 @@ __BEGIN_DECLS * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -29,27 +29,27 @@ __BEGIN_DECLS * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#define ECC_EVENT_INFO_DATA_ENTRIES 8 +#define ECC_EVENT_INFO_DATA_ENTRIES 8 struct ecc_event { - uint8_t id; // ID of memory (e.g. L2C), platform-specific - uint8_t count; // Of uint64_t's used, starting at index 0 - uint64_t data[ECC_EVENT_INFO_DATA_ENTRIES] __attribute__((aligned(8))); // Event-specific data + uint8_t id; // ID of memory (e.g. L2C), platform-specific + uint8_t count; // Of uint64_t's used, starting at index 0 + uint64_t data[ECC_EVENT_INFO_DATA_ENTRIES] __attribute__((aligned(8))); // Event-specific data }; #ifdef KERNEL_PRIVATE -extern kern_return_t ecc_log_record_event(const struct ecc_event *ev); -#endif +extern kern_return_t ecc_log_record_event(const struct ecc_event *ev); +#endif #ifdef XNU_KERNEL_PRIVATE -extern void ecc_log_init(void); -extern kern_return_t ecc_log_get_next_event(struct ecc_event *ev); -extern uint32_t ecc_log_get_correction_count(void); -#endif +extern void ecc_log_init(void); +extern kern_return_t ecc_log_get_next_event(struct ecc_event *ev); +extern uint32_t ecc_log_get_correction_count(void); +#endif __END_DECLS diff --git a/osfmk/kern/ecc_logging.c b/osfmk/kern/ecc_logging.c index d62ab817c..eb227cb6e 100644 --- a/osfmk/kern/ecc_logging.c +++ b/osfmk/kern/ecc_logging.c @@ -1,30 +1,30 @@ /* -* Copyright (c) 2013 Apple Inc. All rights reserved. -* -* @APPLE_OSREFERENCE_LICENSE_HEADER_START@ -* -* This file contains Original Code and/or Modifications of Original Code -* as defined in and that are subject to the Apple Public Source License -* Version 2.0 (the 'License'). You may not use this file except in -* compliance with the License. The rights granted to you under the License -* may not be used to create, or enable the creation or redistribution of, -* unlawful or unlicensed copies of an Apple operating system, or to -* circumvent, violate, or enable the circumvention or violation of, any -* terms of an Apple operating system software license agreement. -* -* Please obtain a copy of the License at -* http://www.opensource.apple.com/apsl/ and read it before using this file. -* -* The Original Code and all software distributed under the License are -* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER -* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, -* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. -* Please see the License for the specific language governing rights and -* limitations under the License. -* -* @APPLE_OSREFERENCE_LICENSE_HEADER_END@ -*/ + * Copyright (c) 2013 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ #include #include #include @@ -39,14 +39,14 @@ * * Circular buffer of events. When we fill up, drop data. */ -#define ECC_EVENT_BUFFER_COUNT 5 -struct ecc_event ecc_data[ECC_EVENT_BUFFER_COUNT]; -static uint32_t ecc_data_next_read; -static uint32_t ecc_data_next_write; -static boolean_t ecc_data_empty = TRUE; // next read == next write : empty or full? -static lck_grp_t *ecc_data_lock_group; -static lck_spin_t ecc_data_lock; -static uint32_t ecc_correction_count; +#define ECC_EVENT_BUFFER_COUNT 5 +struct ecc_event ecc_data[ECC_EVENT_BUFFER_COUNT]; +static uint32_t ecc_data_next_read; +static uint32_t ecc_data_next_write; +static boolean_t ecc_data_empty = TRUE; // next read == next write : empty or full? +static lck_grp_t *ecc_data_lock_group; +static lck_spin_t ecc_data_lock; +static uint32_t ecc_correction_count; void ecc_log_init() @@ -76,7 +76,7 @@ ecc_log_record_event(const struct ecc_event *ev) ecc_correction_count++; - if (ecc_data_next_read == ecc_data_next_write && !ecc_data_empty) { + if (ecc_data_next_read == ecc_data_next_write && !ecc_data_empty) { lck_spin_unlock(&ecc_data_lock); splx(x); return KERN_FAILURE; @@ -102,7 +102,7 @@ ecc_log_get_next_event(struct ecc_event *ev) x = splhigh(); lck_spin_lock(&ecc_data_lock); - if (ecc_data_empty) { + if (ecc_data_empty) { assert(ecc_data_next_write == ecc_data_next_read); lck_spin_unlock(&ecc_data_lock); diff --git a/osfmk/kern/energy_perf.c b/osfmk/kern/energy_perf.c index 20e42be88..9c62ec4e9 100644 --- a/osfmk/kern/energy_perf.c +++ b/osfmk/kern/energy_perf.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -32,24 +32,32 @@ #include #include -void gpu_describe(__unused gpu_descriptor_t gdesc) { +void +gpu_describe(__unused gpu_descriptor_t gdesc) +{ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ENERGY_PERF, 1), gdesc->gpu_id, gdesc->gpu_max_domains, 0, 0, 0); } -uint64_t gpu_accumulate_time(__unused uint32_t scope, __unused uint32_t gpu_id, __unused uint32_t gpu_domain, __unused uint64_t gpu_accumulated_ns, __unused uint64_t gpu_tstamp_ns) { +uint64_t +gpu_accumulate_time(__unused uint32_t scope, __unused uint32_t gpu_id, __unused uint32_t gpu_domain, __unused uint64_t gpu_accumulated_ns, __unused uint64_t gpu_tstamp_ns) +{ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ENERGY_PERF, 2), scope, gpu_id, gpu_domain, gpu_accumulated_ns, gpu_tstamp_ns); ml_gpu_stat_update(gpu_accumulated_ns); return 0; } -static uint64_t io_rate_update_cb_default(__unused uint64_t io_rate_flags, __unused uint64_t read_ops_delta, __unused uint64_t write_ops_delta, __unused uint64_t read_bytes_delta, __unused uint64_t write_bytes_delta) { +static uint64_t +io_rate_update_cb_default(__unused uint64_t io_rate_flags, __unused uint64_t read_ops_delta, __unused uint64_t write_ops_delta, __unused uint64_t read_bytes_delta, __unused uint64_t write_bytes_delta) +{ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_ENERGY_PERF, 3), io_rate_flags, read_ops_delta, write_ops_delta, read_bytes_delta, write_bytes_delta); return 0; } io_rate_update_callback_t io_rate_update_cb = io_rate_update_cb_default; -void io_rate_update_register(io_rate_update_callback_t io_rate_update_cb_new) { +void +io_rate_update_register(io_rate_update_callback_t io_rate_update_cb_new) +{ if (io_rate_update_cb_new != NULL) { io_rate_update_cb = io_rate_update_cb_new; } else { @@ -57,17 +65,23 @@ void io_rate_update_register(io_rate_update_callback_t io_rate_update_cb_new) { } } -uint64_t io_rate_update(uint64_t io_rate_flags, uint64_t read_ops_delta, uint64_t write_ops_delta, uint64_t read_bytes_delta, uint64_t write_bytes_delta) { +uint64_t +io_rate_update(uint64_t io_rate_flags, uint64_t read_ops_delta, uint64_t write_ops_delta, uint64_t read_bytes_delta, uint64_t write_bytes_delta) +{ return io_rate_update_cb(io_rate_flags, read_ops_delta, write_ops_delta, read_bytes_delta, write_bytes_delta); } -static uint64_t gpu_set_fceiling_cb_default(__unused uint32_t gfr, __unused uint64_t gfp) { +static uint64_t +gpu_set_fceiling_cb_default(__unused uint32_t gfr, __unused uint64_t gfp) +{ return 0ULL; } gpu_set_fceiling_t gpu_set_fceiling_cb = gpu_set_fceiling_cb_default; -void gpu_fceiling_cb_register(gpu_set_fceiling_t gnewcb) { +void +gpu_fceiling_cb_register(gpu_set_fceiling_t gnewcb) +{ if (gnewcb != NULL) { gpu_set_fceiling_cb = gnewcb; } else { @@ -75,12 +89,13 @@ void gpu_fceiling_cb_register(gpu_set_fceiling_t gnewcb) { } } -void gpu_submission_telemetry( +void +gpu_submission_telemetry( __unused uint64_t gpu_ncmds, __unused uint64_t gpu_noutstanding_avg, __unused uint64_t gpu_busy_ns_total, __unused uint64_t gpu_cycles, __unused uint64_t gpu_telemetry_valid_flags, - __unused uint64_t gpu_telemetry_misc) { - + __unused uint64_t gpu_telemetry_misc) +{ } diff --git a/osfmk/kern/energy_perf.h b/osfmk/kern/energy_perf.h index c6b1d40a1..78267daf5 100644 --- a/osfmk/kern/energy_perf.h +++ b/osfmk/kern/energy_perf.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/kern/exc_guard.h b/osfmk/kern/exc_guard.h index dbdb3f193..18ec56256 100644 --- a/osfmk/kern/exc_guard.h +++ b/osfmk/kern/exc_guard.h @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * Copyright (c) 1988 Carnegie-Mellon University @@ -96,7 +96,7 @@ * +----------------------------------------------------------------+ */ -#define GUARD_TYPE_FD 0x2 /* guarded file descriptor */ +#define GUARD_TYPE_FD 0x2 /* guarded file descriptor */ /* * User generated guards use the exception codes this: @@ -112,7 +112,7 @@ * +----------------------------------------------------------------+ */ -#define GUARD_TYPE_USER 0x3 /* Userland assertions */ +#define GUARD_TYPE_USER 0x3 /* Userland assertions */ /* * Vnode guards use the exception codes like this: @@ -128,7 +128,7 @@ * +----------------------------------------------------------------+ */ -#define GUARD_TYPE_VN 0x4 /* guarded vnode */ +#define GUARD_TYPE_VN 0x4 /* guarded vnode */ /* * VM guards use the exception codes like this: @@ -144,7 +144,7 @@ * +----------------------------------------------------------------+ */ -#define GUARD_TYPE_VIRT_MEMORY 0x5 /* VM operation violating guard */ +#define GUARD_TYPE_VIRT_MEMORY 0x5 /* VM operation violating guard */ #ifdef KERNEL @@ -153,7 +153,7 @@ #define EXC_GUARD_ENCODE_FLAVOR(code, flavor) \ ((code) |= (((uint64_t)(flavor) & 0x1fffffffull) << 32)) #define EXC_GUARD_ENCODE_TARGET(code, target) \ - ((code) |= (((uint64_t)(target) & 0xffffffffull))) + ((code) |= (((uint64_t)(target) & 0xffffffffull))) #endif /* KERNEL */ diff --git a/osfmk/kern/exc_resource.h b/osfmk/kern/exc_resource.h index 21d0d0b6d..5b16b8db2 100644 --- a/osfmk/kern/exc_resource.h +++ b/osfmk/kern/exc_resource.h @@ -2,7 +2,7 @@ * Copyright (c) 2011-2012 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * Copyright (c) 1988 Carnegie-Mellon University @@ -58,15 +58,15 @@ (((code) >> 58) & 0x7ULL) /* EXC_RESOURCE Types */ -#define RESOURCE_TYPE_CPU 1 -#define RESOURCE_TYPE_WAKEUPS 2 -#define RESOURCE_TYPE_MEMORY 3 -#define RESOURCE_TYPE_IO 4 -#define RESOURCE_TYPE_THREADS 5 +#define RESOURCE_TYPE_CPU 1 +#define RESOURCE_TYPE_WAKEUPS 2 +#define RESOURCE_TYPE_MEMORY 3 +#define RESOURCE_TYPE_IO 4 +#define RESOURCE_TYPE_THREADS 5 /* RESOURCE_TYPE_CPU flavors */ -#define FLAVOR_CPU_MONITOR 1 -#define FLAVOR_CPU_MONITOR_FATAL 2 +#define FLAVOR_CPU_MONITOR 1 +#define FLAVOR_CPU_MONITOR_FATAL 2 /* * RESOURCE_TYPE_CPU exception code & subcode. @@ -100,7 +100,7 @@ /* RESOURCE_TYPE_WAKEUPS flavors */ -#define FLAVOR_WAKEUPS_MONITOR 1 +#define FLAVOR_WAKEUPS_MONITOR 1 /* * RESOURCE_TYPE_WAKEUPS exception code & subcode. @@ -134,7 +134,7 @@ ((subcode) & 0xFFFFFULL) /* RESOURCE_TYPE_MEMORY flavors */ -#define FLAVOR_HIGH_WATERMARK 1 +#define FLAVOR_HIGH_WATERMARK 1 /* * RESOURCE_TYPE_MEMORY / FLAVOR_HIGH_WATERMARK @@ -162,13 +162,13 @@ ((code) & 0x1FFFULL) /* RESOURCE_TYPE_IO flavors */ -#define FLAVOR_IO_PHYSICAL_WRITES 1 -#define FLAVOR_IO_LOGICAL_WRITES 2 +#define FLAVOR_IO_PHYSICAL_WRITES 1 +#define FLAVOR_IO_LOGICAL_WRITES 2 /* * RESOURCE_TYPE_IO exception code & subcode. * - * This is sent by the kernel when a task crosses its + * This is sent by the kernel when a task crosses its * I/O limits. * * code: @@ -189,11 +189,11 @@ /* RESOURCE_TYPE_IO decoding macros */ #define EXC_RESOURCE_IO_DECODE_INTERVAL(code) \ - (((code) >> 15) & 0x1FFFFULL) + (((code) >> 15) & 0x1FFFFULL) #define EXC_RESOURCE_IO_DECODE_LIMIT(code) \ - ((code) & 0x7FFFULL) + ((code) & 0x7FFFULL) #define EXC_RESOURCE_IO_OBSERVED(subcode) \ - ((subcode) & 0x7FFFULL) + ((subcode) & 0x7FFFULL) /* diff --git a/osfmk/kern/exception.c b/osfmk/kern/exception.c index 9a67b727b..d6d6ffbf2 100644 --- a/osfmk/kern/exception.c +++ b/osfmk/kern/exception.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -103,23 +103,23 @@ unsigned long c_tsk_exc_raise_state_id = 0; /* forward declarations */ kern_return_t exception_deliver( - thread_t thread, - exception_type_t exception, - mach_exception_data_t code, + thread_t thread, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, - lck_mtx_t *mutex); + lck_mtx_t *mutex); static kern_return_t check_exc_receiver_dependency( - exception_type_t exception, - struct exception_action *excp, + exception_type_t exception, + struct exception_action *excp, lck_mtx_t *mutex); #ifdef MACH_BSD kern_return_t bsd_exception( - exception_type_t exception, - mach_exception_data_t code, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt); #endif /* MACH_BSD */ @@ -135,21 +135,21 @@ kern_return_t bsd_exception( * Returns: * KERN_SUCCESS if the exception was handled */ -kern_return_t +kern_return_t exception_deliver( - thread_t thread, - exception_type_t exception, - mach_exception_data_t code, + thread_t thread, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, - lck_mtx_t *mutex) + lck_mtx_t *mutex) { - ipc_port_t exc_port = IPC_PORT_NULL; - exception_data_type_t small_code[EXCEPTION_CODE_MAX]; - int code64; - int behavior; - int flavor; - kern_return_t kr; + ipc_port_t exc_port = IPC_PORT_NULL; + exception_data_type_t small_code[EXCEPTION_CODE_MAX]; + int code64; + int behavior; + int flavor; + kern_return_t kr; task_t task; ipc_port_t thread_port = IPC_PORT_NULL, task_port = IPC_PORT_NULL; @@ -157,19 +157,22 @@ exception_deliver( * Save work if we are terminating. * Just go back to our AST handler. */ - if (!thread->active && !thread->inspection) + if (!thread->active && !thread->inspection) { return KERN_SUCCESS; + } /* * If there are no exception actions defined for this entity, * we can't deliver here. */ - if (excp == NULL) + if (excp == NULL) { return KERN_FAILURE; + } assert(exception < EXC_TYPES_COUNT); - if (exception >= EXC_TYPES_COUNT) + if (exception >= EXC_TYPES_COUNT) { return KERN_FAILURE; + } excp = &excp[exception]; @@ -192,7 +195,7 @@ exception_deliver( lck_mtx_unlock(mutex); return KERN_FAILURE; } - ip_reference(exc_port); + ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); @@ -228,15 +231,13 @@ exception_deliver( if (behavior != EXCEPTION_STATE) { if (thread != current_thread() || exception == EXC_CORPSE_NOTIFY) { - task_reference(task); task_port = convert_task_to_port(task); /* task ref consumed */ thread_reference(thread); thread_port = convert_thread_to_port(thread); /* thread ref consumed */ - } - else { + } else { task_port = retrieve_task_self_fast(thread->task); thread_port = retrieve_thread_self_fast(thread); } @@ -250,33 +251,33 @@ exception_deliver( c_thr_exc_raise_state++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus_to_user(thread, flavor, - (thread_state_t)state, - &state_cnt); + (thread_state_t)state, + &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { - kr = mach_exception_raise_state(exc_port, - exception, - code, - codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); + kr = mach_exception_raise_state(exc_port, + exception, + code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); } else { kr = exception_raise_state(exc_port, exception, - small_code, - codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); + small_code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); } if (kr == KERN_SUCCESS) { - if (exception != EXC_CORPSE_NOTIFY) + if (exception != EXC_CORPSE_NOTIFY) { kr = thread_setstatus_from_user(thread, flavor, - (thread_state_t)state, - state_cnt); + (thread_state_t)state, + state_cnt); + } goto out_release_right; } - } goto out_release_right; @@ -286,18 +287,18 @@ exception_deliver( c_thr_exc_raise++; if (code64) { kr = mach_exception_raise(exc_port, - thread_port, - task_port, - exception, - code, - codeCnt); + thread_port, + task_port, + exception, + code, + codeCnt); } else { kr = exception_raise(exc_port, - thread_port, - task_port, - exception, - small_code, - codeCnt); + thread_port, + task_port, + exception, + small_code, + codeCnt); } goto out_release_right; @@ -309,48 +310,48 @@ exception_deliver( c_thr_exc_raise_state_id++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus_to_user(thread, flavor, - (thread_state_t)state, - &state_cnt); + (thread_state_t)state, + &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state_identity( - exc_port, - thread_port, - task_port, - exception, - code, - codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); + exc_port, + thread_port, + task_port, + exception, + code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); } else { kr = exception_raise_state_identity(exc_port, - thread_port, - task_port, - exception, - small_code, - codeCnt, - &flavor, - state, state_cnt, - state, &state_cnt); + thread_port, + task_port, + exception, + small_code, + codeCnt, + &flavor, + state, state_cnt, + state, &state_cnt); } if (kr == KERN_SUCCESS) { - if (exception != EXC_CORPSE_NOTIFY) + if (exception != EXC_CORPSE_NOTIFY) { kr = thread_setstatus_from_user(thread, flavor, - (thread_state_t)state, - state_cnt); + (thread_state_t)state, + state_cnt); + } goto out_release_right; } - } goto out_release_right; } default: - panic ("bad exception behavior!"); - return KERN_FAILURE; + panic("bad exception behavior!"); + return KERN_FAILURE; }/* switch */ out_release_right: @@ -392,16 +393,18 @@ check_exc_receiver_dependency( { kern_return_t retval = KERN_SUCCESS; - if (excp == NULL || exception != EXC_CRASH) + if (excp == NULL || exception != EXC_CRASH) { return retval; + } task_t task = current_task(); lck_mtx_lock(mutex); ipc_port_t xport = excp[exception].port; - if ( IP_VALID(xport) - && ip_active(xport) - && task->itk_space == xport->ip_receiver) + if (IP_VALID(xport) + && ip_active(xport) + && task->itk_space == xport->ip_receiver) { retval = KERN_FAILURE; + } lck_mtx_unlock(mutex); return retval; } @@ -422,15 +425,15 @@ check_exc_receiver_dependency( */ kern_return_t exception_triage_thread( - exception_type_t exception, - mach_exception_data_t code, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt, - thread_t thread) + thread_t thread) { - task_t task; - host_priv_t host_priv; - lck_mtx_t *mutex; - kern_return_t kr = KERN_FAILURE; + task_t task; + host_priv_t host_priv; + lck_mtx_t *mutex; + kern_return_t kr = KERN_FAILURE; assert(exception != EXC_RPC_ALERT); @@ -450,11 +453,11 @@ exception_triage_thread( * Try to raise the exception at the activation level. */ mutex = &thread->mutex; - if (KERN_SUCCESS == check_exc_receiver_dependency(exception, thread->exc_actions, mutex)) - { + if (KERN_SUCCESS == check_exc_receiver_dependency(exception, thread->exc_actions, mutex)) { kr = exception_deliver(thread, exception, code, codeCnt, thread->exc_actions, mutex); - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) { goto out; + } } /* @@ -462,11 +465,11 @@ exception_triage_thread( */ task = thread->task; mutex = &task->itk_lock_data; - if (KERN_SUCCESS == check_exc_receiver_dependency(exception, task->exc_actions, mutex)) - { + if (KERN_SUCCESS == check_exc_receiver_dependency(exception, task->exc_actions, mutex)) { kr = exception_deliver(thread, exception, code, codeCnt, task->exc_actions, mutex); - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) { goto out; + } } /* @@ -475,17 +478,18 @@ exception_triage_thread( host_priv = host_priv_self(); mutex = &host_priv->lock; - if (KERN_SUCCESS == check_exc_receiver_dependency(exception, host_priv->exc_actions, mutex)) - { + if (KERN_SUCCESS == check_exc_receiver_dependency(exception, host_priv->exc_actions, mutex)) { kr = exception_deliver(thread, exception, code, codeCnt, host_priv->exc_actions, mutex); - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) { goto out; + } } out: if ((exception != EXC_CRASH) && (exception != EXC_RESOURCE) && - (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) + (exception != EXC_GUARD) && (exception != EXC_CORPSE_NOTIFY)) { thread_exception_return(); + } return kr; } @@ -504,8 +508,8 @@ out: */ kern_return_t exception_triage( - exception_type_t exception, - mach_exception_data_t code, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt) { thread_t thread = current_thread(); @@ -514,14 +518,14 @@ exception_triage( kern_return_t bsd_exception( - exception_type_t exception, - mach_exception_data_t code, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt) { - task_t task; - lck_mtx_t *mutex; - thread_t self = current_thread(); - kern_return_t kr; + task_t task; + lck_mtx_t *mutex; + thread_t self = current_thread(); + kern_return_t kr; /* * Maybe the task level will handle it. @@ -531,9 +535,10 @@ bsd_exception( kr = exception_deliver(self, exception, code, codeCnt, task->exc_actions, mutex); - if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) - return(KERN_SUCCESS); - return(KERN_FAILURE); + if (kr == KERN_SUCCESS || kr == MACH_RCV_PORT_DIED) { + return KERN_SUCCESS; + } + return KERN_FAILURE; } @@ -541,11 +546,12 @@ bsd_exception( * Raise an exception on a task. * This should tell launchd to launch Crash Reporter for this task. */ -kern_return_t task_exception_notify(exception_type_t exception, - mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode) +kern_return_t +task_exception_notify(exception_type_t exception, + mach_exception_data_type_t exccode, mach_exception_data_type_t excsubcode) { - mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; - wait_interrupt_t wsave; + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + wait_interrupt_t wsave; kern_return_t kr = KERN_SUCCESS; code[0] = exccode; @@ -562,39 +568,38 @@ kern_return_t task_exception_notify(exception_type_t exception, * Handle interface for special performance monitoring * This is a special case of the host exception handler */ -kern_return_t sys_perf_notify(thread_t thread, int pid) +kern_return_t +sys_perf_notify(thread_t thread, int pid) { - host_priv_t hostp; - ipc_port_t xport; - wait_interrupt_t wsave; - kern_return_t ret; + host_priv_t hostp; + ipc_port_t xport; + wait_interrupt_t wsave; + kern_return_t ret; - hostp = host_priv_self(); /* Get the host privileged ports */ - mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; - code[0] = 0xFF000001; /* Set terminate code */ - code[1] = pid; /* Pass out the pid */ + hostp = host_priv_self(); /* Get the host privileged ports */ + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + code[0] = 0xFF000001; /* Set terminate code */ + code[1] = pid; /* Pass out the pid */ struct task *task = thread->task; - xport = hostp->exc_actions[EXC_RPC_ALERT].port; + xport = hostp->exc_actions[EXC_RPC_ALERT].port; /* Make sure we're not catching our own exception */ if (!IP_VALID(xport) || - !ip_active(xport) || - task->itk_space == xport->data.receiver) { - - return(KERN_FAILURE); + !ip_active(xport) || + task->itk_space == xport->data.receiver) { + return KERN_FAILURE; } - wsave = thread_interrupt_level(THREAD_UNINT); + wsave = thread_interrupt_level(THREAD_UNINT); ret = exception_deliver( - thread, - EXC_RPC_ALERT, - code, - 2, - hostp->exc_actions, - &hostp->lock); + thread, + EXC_RPC_ALERT, + code, + 2, + hostp->exc_actions, + &hostp->lock); (void)thread_interrupt_level(wsave); - return(ret); + return ret; } - diff --git a/osfmk/kern/exception.h b/osfmk/kern/exception.h index 7ab0fcca3..0f5a81eff 100644 --- a/osfmk/kern/exception.h +++ b/osfmk/kern/exception.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -43,30 +43,30 @@ * There are arrays of these maintained at the activation, task, and host. */ struct exception_action { - struct ipc_port *port; /* exception port */ - thread_state_flavor_t flavor; /* state flavor to send */ - exception_behavior_t behavior; /* exception type to raise */ - boolean_t privileged; /* survives ipc_task_reset */ - struct label *label; /* MAC label associated with action */ + struct ipc_port *port; /* exception port */ + thread_state_flavor_t flavor; /* state flavor to send */ + exception_behavior_t behavior; /* exception type to raise */ + boolean_t privileged; /* survives ipc_task_reset */ + struct label *label; /* MAC label associated with action */ }; /* Make an up-call to a thread's exception server */ extern kern_return_t exception_triage( - exception_type_t exception, - mach_exception_data_t code, - mach_msg_type_number_t codeCnt); + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t codeCnt); extern kern_return_t exception_triage_thread( - exception_type_t exception, - mach_exception_data_t code, + exception_type_t exception, + mach_exception_data_t code, mach_msg_type_number_t codeCnt, - thread_t thread); + thread_t thread); /* Notify system performance monitor */ extern kern_return_t sys_perf_notify(thread_t thread, int pid); /* Notify crash reporter */ extern kern_return_t task_exception_notify(exception_type_t exception, - mach_exception_data_type_t code, mach_exception_data_type_t subcode); + mach_exception_data_type_t code, mach_exception_data_type_t subcode); -#endif /* _KERN_EXCEPTION_H_ */ +#endif /* _KERN_EXCEPTION_H_ */ diff --git a/osfmk/kern/extmod_statistics.c b/osfmk/kern/extmod_statistics.c index 4eb26d1ce..58aa462dd 100644 --- a/osfmk/kern/extmod_statistics.c +++ b/osfmk/kern/extmod_statistics.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -74,8 +74,9 @@ extmod_statistics_incr_task_for_pid(task_t target) { task_t ctask = current_task(); - if ((ctask == kernel_task) || (target == TASK_NULL)) + if ((ctask == kernel_task) || (target == TASK_NULL)) { return; + } if (target != ctask) { ctask->extmod_statistics.task_for_pid_caller_count++; @@ -90,13 +91,15 @@ extmod_statistics_incr_thread_set_state(thread_t target) task_t ctask = current_task(); task_t ttask; - if ((ctask == kernel_task) || (target == THREAD_NULL)) + if ((ctask == kernel_task) || (target == THREAD_NULL)) { return; + } ttask = get_threadtask(target); - if (ttask == TASK_NULL) + if (ttask == TASK_NULL) { return; + } if (ttask != ctask) { ctask->extmod_statistics.thread_set_state_caller_count++; @@ -110,8 +113,9 @@ extmod_statistics_incr_thread_create(task_t target) { task_t ctask = current_task(); - if ((ctask == kernel_task) || (target == TASK_NULL)) + if ((ctask == kernel_task) || (target == TASK_NULL)) { return; + } if (target != ctask) { ctask->extmod_statistics.thread_creation_caller_count++; diff --git a/osfmk/kern/extmod_statistics.h b/osfmk/kern/extmod_statistics.h index 4aa18c42a..e518e3e5c 100644 --- a/osfmk/kern/extmod_statistics.h +++ b/osfmk/kern/extmod_statistics.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,7 +33,7 @@ * */ -#ifndef _KERN_EXTMOD_STATISTICS_H_ +#ifndef _KERN_EXTMOD_STATISTICS_H_ #define _KERN_EXTMOD_STATISTICS_H_ #include @@ -43,4 +43,4 @@ extern void extmod_statistics_incr_task_for_pid(task_t target); extern void extmod_statistics_incr_thread_set_state(thread_t target); extern void extmod_statistics_incr_thread_create(task_t target); -#endif /* _KERN_EXTMOD_STATISTICS_H_ */ +#endif /* _KERN_EXTMOD_STATISTICS_H_ */ diff --git a/osfmk/kern/gzalloc.c b/osfmk/kern/gzalloc.c index 64d0ba9eb..465fed0fe 100644 --- a/osfmk/kern/gzalloc.c +++ b/osfmk/kern/gzalloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,7 +37,7 @@ * gzalloc_size=: target all zones with elements of bytes * gzalloc_min=: target zones with elements >= size * gzalloc_max=: target zones with elements <= size - * gzalloc_min/max can be specified in conjunction to target a range of + * gzalloc_min/max can be specified in conjunction to target a range of * sizes * gzalloc_fc_size=: number of zone elements (effectively page * multiple sized) to retain in the free VA cache. This cache is evicted @@ -100,7 +100,7 @@ extern boolean_t vm_kernel_ready, kmem_ready; boolean_t gzalloc_mode = FALSE; uint32_t pdzalloc_count, pdzfree_count; -#define GZALLOC_MIN_DEFAULT (1024) +#define GZALLOC_MIN_DEFAULT (1024) #define GZDEADZONE ((zone_t) 0xDEAD201E) #define GZALLOC_SIGNATURE (0xABADCAFE) #define GZALLOC_RESERVE_SIZE_DEFAULT (2 * 1024 * 1024) @@ -136,21 +136,29 @@ extern zone_t vm_page_zone; static zone_t gztrackzone = NULL; static char gznamedzone[MAX_ZONE_NAME] = ""; -void gzalloc_reconfigure(__unused zone_t z) { +void +gzalloc_reconfigure(__unused zone_t z) +{ /* Nothing for now */ } -boolean_t gzalloc_enabled(void) { +boolean_t +gzalloc_enabled(void) +{ return gzalloc_mode; } -static inline boolean_t gzalloc_tracked(zone_t z) { - return (gzalloc_mode && - (((z->elem_size >= gzalloc_min) && (z->elem_size <= gzalloc_max)) || (z == gztrackzone)) && - (z->gzalloc_exempt == 0)); +static inline boolean_t +gzalloc_tracked(zone_t z) +{ + return gzalloc_mode && + (((z->elem_size >= gzalloc_min) && (z->elem_size <= gzalloc_max)) || (z == gztrackzone)) && + (z->gzalloc_exempt == 0); } -void gzalloc_zone_init(zone_t z) { +void +gzalloc_zone_init(zone_t z) +{ if (gzalloc_mode) { bzero(&z->gz, sizeof(z->gz)); @@ -165,10 +173,11 @@ void gzalloc_zone_init(zone_t z) { /* If the VM/kmem system aren't yet configured, carve * out the free element cache structure directly from the * gzalloc_reserve supplied by the pmap layer. - */ + */ if (!kmem_ready) { - if (gzalloc_reserve_size < gzfcsz) + if (gzalloc_reserve_size < gzfcsz) { panic("gzalloc reserve exhausted"); + } z->gz.gzfc = (vm_offset_t *)gzalloc_reserve; gzalloc_reserve += gzfcsz; @@ -186,7 +195,9 @@ void gzalloc_zone_init(zone_t z) { } /* Called by zdestroy() to dump the free cache elements so the zone count can drop to zero. */ -void gzalloc_empty_free_cache(zone_t zone) { +void +gzalloc_empty_free_cache(zone_t zone) +{ if (__improbable(gzalloc_tracked(zone))) { kern_return_t kr; int freed_elements = 0; @@ -212,10 +223,10 @@ void gzalloc_empty_free_cache(zone_t zone) { free_addr = ((vm_offset_t *)gzfc_copy)[index]; if (free_addr && free_addr >= gzalloc_map_min && free_addr < gzalloc_map_max) { kr = vm_map_remove( - gzalloc_map, - free_addr, - free_addr + rounded_size + (1 * PAGE_SIZE), - VM_MAP_REMOVE_KUNWIRE); + gzalloc_map, + free_addr, + free_addr + rounded_size + (1 * PAGE_SIZE), + VM_MAP_REMOVE_KUNWIRE); if (kr != KERN_SUCCESS) { panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr, kr); } @@ -241,10 +252,12 @@ void gzalloc_empty_free_cache(zone_t zone) { } } -void gzalloc_configure(void) { +void +gzalloc_configure(void) +{ char temp_buf[16]; - if (PE_parse_boot_argn("-gzalloc_mode", temp_buf, sizeof (temp_buf))) { + if (PE_parse_boot_argn("-gzalloc_mode", temp_buf, sizeof(temp_buf))) { gzalloc_mode = TRUE; gzalloc_min = GZALLOC_MIN_DEFAULT; gzalloc_max = ~0U; @@ -257,8 +270,9 @@ void gzalloc_configure(void) { if (PE_parse_boot_argn("gzalloc_max", &gzalloc_max, sizeof(gzalloc_max))) { gzalloc_mode = TRUE; - if (gzalloc_min == ~0U) + if (gzalloc_min == ~0U) { gzalloc_min = 0; + } } if (PE_parse_boot_argn("gzalloc_size", &gzalloc_size, sizeof(gzalloc_size))) { @@ -268,11 +282,11 @@ void gzalloc_configure(void) { (void)PE_parse_boot_argn("gzalloc_fc_size", &gzfc_size, sizeof(gzfc_size)); - if (PE_parse_boot_argn("-gzalloc_wp", temp_buf, sizeof (temp_buf))) { + if (PE_parse_boot_argn("-gzalloc_wp", temp_buf, sizeof(temp_buf))) { gzalloc_prot = VM_PROT_READ; } - if (PE_parse_boot_argn("-gzalloc_uf_mode", temp_buf, sizeof (temp_buf))) { + if (PE_parse_boot_argn("-gzalloc_uf_mode", temp_buf, sizeof(temp_buf))) { gzalloc_uf_mode = TRUE; gzalloc_guard = KMA_GUARD_FIRST; } @@ -283,7 +297,7 @@ void gzalloc_configure(void) { (void) PE_parse_boot_argn("gzalloc_zscale", &gzalloc_zonemap_scale, sizeof(gzalloc_zonemap_scale)); - if (PE_parse_boot_argn("-gzalloc_noconsistency", temp_buf, sizeof (temp_buf))) { + if (PE_parse_boot_argn("-gzalloc_noconsistency", temp_buf, sizeof(temp_buf))) { gzalloc_consistency_checks = FALSE; } @@ -299,8 +313,9 @@ void gzalloc_configure(void) { gzalloc_mode = TRUE; } #endif - if (PE_parse_boot_argn("-nogzalloc_mode", temp_buf, sizeof (temp_buf))) + if (PE_parse_boot_argn("-nogzalloc_mode", temp_buf, sizeof(temp_buf))) { gzalloc_mode = FALSE; + } if (gzalloc_mode) { gzalloc_reserve_size = GZALLOC_RESERVE_SIZE_DEFAULT; @@ -308,7 +323,9 @@ void gzalloc_configure(void) { } } -void gzalloc_init(vm_size_t max_zonemap_size) { +void +gzalloc_init(vm_size_t max_zonemap_size) +{ kern_return_t retval; if (gzalloc_mode) { @@ -317,9 +334,9 @@ void gzalloc_init(vm_size_t max_zonemap_size) { vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_permanent = TRUE; retval = kmem_suballoc(kernel_map, &gzalloc_map_min, (max_zonemap_size * gzalloc_zonemap_scale), - FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE, - &gzalloc_map); - + FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE, + &gzalloc_map); + if (retval != KERN_SUCCESS) { panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed", max_zonemap_size, gzalloc_zonemap_scale); } @@ -328,17 +345,17 @@ void gzalloc_init(vm_size_t max_zonemap_size) { } vm_offset_t -gzalloc_alloc(zone_t zone, boolean_t canblock) { +gzalloc_alloc(zone_t zone, boolean_t canblock) +{ vm_offset_t addr = 0; if (__improbable(gzalloc_tracked(zone))) { - if (get_preemption_level() != 0) { if (canblock == TRUE) { pdzalloc_count++; - } - else + } else { return 0; + } } vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE); @@ -350,8 +367,9 @@ gzalloc_alloc(zone_t zone, boolean_t canblock) { /* Early allocations are supplied directly from the * reserve. */ - if (gzalloc_reserve_size < (rounded_size + PAGE_SIZE)) + if (gzalloc_reserve_size < (rounded_size + PAGE_SIZE)) { panic("gzalloc reserve exhausted"); + } gzaddr = gzalloc_reserve; /* No guard page for these early allocations, just * waste an additional page. @@ -359,15 +377,14 @@ gzalloc_alloc(zone_t zone, boolean_t canblock) { gzalloc_reserve += rounded_size + PAGE_SIZE; gzalloc_reserve_size -= rounded_size + PAGE_SIZE; OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc); - } - else { + } else { kern_return_t kr = kernel_memory_allocate(gzalloc_map, - &gzaddr, rounded_size + (1*PAGE_SIZE), + &gzaddr, rounded_size + (1 * PAGE_SIZE), 0, KMA_KOBJECT | KMA_ATOMIC | gzalloc_guard, VM_KERN_MEMORY_OSFMK); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", (uint64_t)rounded_size, kr); - + } } if (gzalloc_uf_mode) { @@ -420,7 +437,9 @@ gzalloc_alloc(zone_t zone, boolean_t canblock) { return addr; } -boolean_t gzalloc_free(zone_t zone, void *addr) { +boolean_t +gzalloc_free(zone_t zone, void *addr) +{ boolean_t gzfreed = FALSE; kern_return_t kr; @@ -463,8 +482,9 @@ boolean_t gzalloc_free(zone_t zone, void *addr) { panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr, GZALLOC_SIGNATURE, gzh->gzsig); } - if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE)) + if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE)) { panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__, zone, gzh->gzone, (void *)addr); + } /* Partially redundant given the zone check, but may flag header corruption */ if (gzh->gzsize != zone->elem_size) { panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone, gzh->gzsize, (uint32_t) zone->elem_size, (void *)addr); @@ -497,7 +517,7 @@ boolean_t gzalloc_free(zone_t zone, void *addr) { } if (get_preemption_level() != 0) { - pdzfree_count++; + pdzfree_count++; } if (gzfc_size) { @@ -510,8 +530,9 @@ boolean_t gzalloc_free(zone_t zone, void *addr) { saddr + rounded_size + (1 * PAGE_SIZE), gzalloc_prot, FALSE); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__, (void *)saddr, kr); + } } else { free_addr = saddr; } @@ -546,8 +567,9 @@ boolean_t gzalloc_free(zone_t zone, void *addr) { free_addr, free_addr + rounded_size + (1 * PAGE_SIZE), VM_MAP_REMOVE_KUNWIRE); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr); + } // TODO: sysctl-ize for quick reference OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed); OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted); @@ -558,7 +580,9 @@ boolean_t gzalloc_free(zone_t zone, void *addr) { return gzfreed; } -boolean_t gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz) { +boolean_t +gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz) +{ uintptr_t a = (uintptr_t)gzaddr; if (__improbable(gzalloc_mode && (a >= gzalloc_map_min) && (a < gzalloc_map_max))) { gzhdr_t *gzh; @@ -574,7 +598,6 @@ boolean_t gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz) { /* Locate the gzalloc metadata adjoining the element */ if (gzalloc_uf_mode == TRUE) { - /* In underflow detection mode, locate the map entry describing * the element, and then locate the copy of the gzalloc * header at the trailing edge of the range. @@ -587,9 +610,9 @@ boolean_t gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz) { */ uint32_t *p = (uint32_t*) gzvme->vme_start; while (p < (uint32_t *) gzvme->vme_end) { - if (*p == GZALLOC_SIGNATURE) + if (*p == GZALLOC_SIGNATURE) { break; - else { + } else { p++; } } diff --git a/osfmk/kern/hibernate.c b/osfmk/kern/hibernate.c index 0bbc73d49..9e0cd1e96 100644 --- a/osfmk/kern/hibernate.c +++ b/osfmk/kern/hibernate.c @@ -2,7 +2,7 @@ * Copyright (c) 2004-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,113 +44,111 @@ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -boolean_t need_to_unlock_decompressor = FALSE; +boolean_t need_to_unlock_decompressor = FALSE; -kern_return_t +kern_return_t hibernate_alloc_page_lists( - hibernate_page_list_t ** page_list_ret, - hibernate_page_list_t ** page_list_wired_ret, - hibernate_page_list_t ** page_list_pal_ret) + hibernate_page_list_t ** page_list_ret, + hibernate_page_list_t ** page_list_wired_ret, + hibernate_page_list_t ** page_list_pal_ret) { - kern_return_t retval = KERN_SUCCESS; - - hibernate_page_list_t * page_list = NULL; - hibernate_page_list_t * page_list_wired = NULL; - hibernate_page_list_t * page_list_pal = NULL; - - page_list = hibernate_page_list_allocate(TRUE); - if (!page_list) { - - retval = KERN_RESOURCE_SHORTAGE; - goto done; - } - page_list_wired = hibernate_page_list_allocate(FALSE); - if (!page_list_wired) - { - kfree(page_list, page_list->list_size); - - retval = KERN_RESOURCE_SHORTAGE; - goto done; - } - page_list_pal = hibernate_page_list_allocate(FALSE); - if (!page_list_pal) - { - kfree(page_list, page_list->list_size); - kfree(page_list_wired, page_list_wired->list_size); - - retval = KERN_RESOURCE_SHORTAGE; - goto done; - } - *page_list_ret = page_list; - *page_list_wired_ret = page_list_wired; - *page_list_pal_ret = page_list_pal; + kern_return_t retval = KERN_SUCCESS; + + hibernate_page_list_t * page_list = NULL; + hibernate_page_list_t * page_list_wired = NULL; + hibernate_page_list_t * page_list_pal = NULL; + + page_list = hibernate_page_list_allocate(TRUE); + if (!page_list) { + retval = KERN_RESOURCE_SHORTAGE; + goto done; + } + page_list_wired = hibernate_page_list_allocate(FALSE); + if (!page_list_wired) { + kfree(page_list, page_list->list_size); + + retval = KERN_RESOURCE_SHORTAGE; + goto done; + } + page_list_pal = hibernate_page_list_allocate(FALSE); + if (!page_list_pal) { + kfree(page_list, page_list->list_size); + kfree(page_list_wired, page_list_wired->list_size); + + retval = KERN_RESOURCE_SHORTAGE; + goto done; + } + *page_list_ret = page_list; + *page_list_wired_ret = page_list_wired; + *page_list_pal_ret = page_list_pal; done: - return (retval); - + return retval; } extern int sync_internal(void); -kern_return_t +kern_return_t hibernate_setup(IOHibernateImageHeader * header, - boolean_t vmflush, - hibernate_page_list_t * page_list __unused, - hibernate_page_list_t * page_list_wired __unused, - hibernate_page_list_t * page_list_pal __unused) + boolean_t vmflush, + hibernate_page_list_t * page_list __unused, + hibernate_page_list_t * page_list_wired __unused, + hibernate_page_list_t * page_list_pal __unused) { - kern_return_t retval = KERN_SUCCESS; + kern_return_t retval = KERN_SUCCESS; + + hibernate_create_paddr_map(); - hibernate_create_paddr_map(); + hibernate_reset_stats(); - hibernate_reset_stats(); - - if (vmflush && VM_CONFIG_COMPRESSOR_IS_PRESENT) { - - sync_internal(); + if (vmflush && VM_CONFIG_COMPRESSOR_IS_PRESENT) { + sync_internal(); - vm_decompressor_lock(); - need_to_unlock_decompressor = TRUE; + vm_decompressor_lock(); + need_to_unlock_decompressor = TRUE; - hibernate_flush_memory(); - } + hibernate_flush_memory(); + } - // no failures hereafter + // no failures hereafter - hibernate_processor_setup(header); + hibernate_processor_setup(header); - HIBLOG("hibernate_alloc_pages act %d, inact %d, anon %d, throt %d, spec %d, wire %d, wireinit %d\n", - vm_page_active_count, vm_page_inactive_count, - vm_page_anonymous_count, vm_page_throttled_count, vm_page_speculative_count, + HIBLOG("hibernate_alloc_pages act %d, inact %d, anon %d, throt %d, spec %d, wire %d, wireinit %d\n", + vm_page_active_count, vm_page_inactive_count, + vm_page_anonymous_count, vm_page_throttled_count, vm_page_speculative_count, vm_page_wire_count, vm_page_wire_count_initial); - if (retval != KERN_SUCCESS && need_to_unlock_decompressor == TRUE) { - need_to_unlock_decompressor = FALSE; - vm_decompressor_unlock(); - } - return (retval); + if (retval != KERN_SUCCESS && need_to_unlock_decompressor == TRUE) { + need_to_unlock_decompressor = FALSE; + vm_decompressor_unlock(); + } + return retval; } -kern_return_t +kern_return_t hibernate_teardown(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - hibernate_page_list_t * page_list_pal) + hibernate_page_list_t * page_list_wired, + hibernate_page_list_t * page_list_pal) { - hibernate_free_gobble_pages(); - - if (page_list) - kfree(page_list, page_list->list_size); - if (page_list_wired) - kfree(page_list_wired, page_list_wired->list_size); - if (page_list_pal) - kfree(page_list_pal, page_list_pal->list_size); - - if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { - if (need_to_unlock_decompressor == TRUE) { - need_to_unlock_decompressor = FALSE; - vm_decompressor_unlock(); - } - vm_compressor_delay_trim(); - } - return (KERN_SUCCESS); + hibernate_free_gobble_pages(); + + if (page_list) { + kfree(page_list, page_list->list_size); + } + if (page_list_wired) { + kfree(page_list_wired, page_list_wired->list_size); + } + if (page_list_pal) { + kfree(page_list_pal, page_list_pal->list_size); + } + + if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { + if (need_to_unlock_decompressor == TRUE) { + need_to_unlock_decompressor = FALSE; + vm_decompressor_unlock(); + } + vm_compressor_delay_trim(); + } + return KERN_SUCCESS; } diff --git a/osfmk/kern/host.c b/osfmk/kern/host.c index 0f4fe2fb9..3d3e853ef 100644 --- a/osfmk/kern/host.c +++ b/osfmk/kern/host.c @@ -89,7 +89,7 @@ #include #include #include -#include // mach_node_port_changed() +#include // mach_node_port_changed() #include #include @@ -117,8 +117,9 @@ host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_t void * addr; unsigned int count, i; - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_ARGUMENT; + } assert(host_priv == &realhost); @@ -126,17 +127,19 @@ host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_t assert(count != 0); addr = kalloc((vm_size_t)(count * sizeof(mach_port_t))); - if (addr == 0) - return (KERN_RESOURCE_SHORTAGE); + if (addr == 0) { + return KERN_RESOURCE_SHORTAGE; + } tp = (processor_t *)addr; *tp++ = processor = processor_list; if (count > 1) { - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); - for (i = 1; i < count; i++) + for (i = 1; i < count; i++) { *tp++ = processor = processor->processor_list; + } simple_unlock(&processor_list_lock); } @@ -146,17 +149,19 @@ host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_t /* do the conversion that Mig should handle */ tp = (processor_t *)addr; - for (i = 0; i < count; i++) + for (i = 0; i < count; i++) { ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count) { - if (host == HOST_NULL) - return (KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } switch (flavor) { case HOST_BASIC_INFO: { @@ -166,14 +171,19 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num /* * Basic information about this host. */ - if (*count < HOST_BASIC_INFO_OLD_COUNT) - return (KERN_FAILURE); + if (*count < HOST_BASIC_INFO_OLD_COUNT) { + return KERN_FAILURE; + } basic_info = (host_basic_info_t)info; basic_info->memory_size = machine_info.memory_size; basic_info->max_cpus = machine_info.max_cpus; +#if defined(__x86_64__) + basic_info->avail_cpus = processor_avail_count_user; +#else basic_info->avail_cpus = processor_avail_count; +#endif master_id = master_processor->cpu_id; basic_info->cpu_type = slot_type(master_id); basic_info->cpu_subtype = slot_subtype(master_id); @@ -182,7 +192,11 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num basic_info->cpu_threadtype = slot_threadtype(master_id); basic_info->physical_cpu = machine_info.physical_cpu; basic_info->physical_cpu_max = machine_info.physical_cpu_max; +#if defined(__x86_64__) + basic_info->logical_cpu = basic_info->avail_cpus; +#else basic_info->logical_cpu = machine_info.logical_cpu; +#endif basic_info->logical_cpu_max = machine_info.logical_cpu_max; basic_info->max_mem = machine_info.max_mem; @@ -191,7 +205,7 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num *count = HOST_BASIC_INFO_OLD_COUNT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_SCHED_INFO: { @@ -202,8 +216,9 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num /* * Return scheduler information. */ - if (*count < HOST_SCHED_INFO_COUNT) - return (KERN_FAILURE); + if (*count < HOST_SCHED_INFO_COUNT) { + return KERN_FAILURE; + } sched_info = (host_sched_info_t)info; @@ -214,25 +229,27 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num *count = HOST_SCHED_INFO_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_RESOURCE_SIZES: { /* * Return sizes of kernel data structures */ - if (*count < HOST_RESOURCE_SIZES_COUNT) - return (KERN_FAILURE); + if (*count < HOST_RESOURCE_SIZES_COUNT) { + return KERN_FAILURE; + } /* XXX Fail until ledgers are implemented */ - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } case HOST_PRIORITY_INFO: { host_priority_info_t priority_info; - if (*count < HOST_PRIORITY_INFO_COUNT) - return (KERN_FAILURE); + if (*count < HOST_PRIORITY_INFO_COUNT) { + return KERN_FAILURE; + } priority_info = (host_priority_info_t)info; @@ -247,7 +264,7 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num *count = HOST_PRIORITY_INFO_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -256,14 +273,15 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num case HOST_MACH_MSG_TRAP: case HOST_SEMAPHORE_TRAPS: { *count = 0; - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_CAN_HAS_DEBUGGER: { host_can_has_debugger_info_t can_has_debugger_info; - if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) - return (KERN_FAILURE); + if (*count < HOST_CAN_HAS_DEBUGGER_COUNT) { + return KERN_FAILURE; + } can_has_debugger_info = (host_can_has_debugger_info_t)info; can_has_debugger_info->can_has_debugger = PE_i_can_has_debugger(NULL); @@ -273,19 +291,21 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num } case HOST_VM_PURGABLE: { - if (*count < HOST_VM_PURGABLE_COUNT) - return (KERN_FAILURE); + if (*count < HOST_VM_PURGABLE_COUNT) { + return KERN_FAILURE; + } vm_purgeable_stats((vm_purgeable_info_t)info, NULL); *count = HOST_VM_PURGABLE_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_DEBUG_INFO_INTERNAL: { #if DEVELOPMENT || DEBUG - if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) - return (KERN_FAILURE); + if (*count < HOST_DEBUG_INFO_INTERNAL_COUNT) { + return KERN_FAILURE; + } host_debug_info_internal_t debug_info = (host_debug_info_internal_t)info; bzero(debug_info, sizeof(host_debug_info_internal_data_t)); @@ -301,9 +321,9 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num #if CONFIG_CSR debug_info->config_csr = 1; #endif - return (KERN_SUCCESS); + return KERN_SUCCESS; #else /* DEVELOPMENT || DEBUG */ - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; #endif } @@ -313,8 +333,9 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num /* * Basic information about this host. */ - if (*count < HOST_PREFERRED_USER_ARCH_COUNT) - return (KERN_FAILURE); + if (*count < HOST_PREFERRED_USER_ARCH_COUNT) { + return KERN_FAILURE; + } user_arch_info = (host_preferred_user_arch_t)info; @@ -329,10 +350,10 @@ host_info(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_num *count = HOST_PREFERRED_USER_ARCH_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } - default: return (KERN_INVALID_ARGUMENT); + default: return KERN_INVALID_ARGUMENT; } } @@ -343,15 +364,17 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty { uint32_t i; - if (host == HOST_NULL) - return (KERN_INVALID_HOST); + if (host == HOST_NULL) { + return KERN_INVALID_HOST; + } switch (flavor) { case HOST_LOAD_INFO: { host_load_info_t load_info; - if (*count < HOST_LOAD_INFO_COUNT) - return (KERN_FAILURE); + if (*count < HOST_LOAD_INFO_COUNT) { + return KERN_FAILURE; + } load_info = (host_load_info_t)info; @@ -359,7 +382,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty bcopy((char *)mach_factor, (char *)load_info->mach_factor, sizeof mach_factor); *count = HOST_LOAD_INFO_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_VM_INFO: { @@ -369,15 +392,16 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty vm_statistics_t stat32; mach_msg_type_number_t original_count; - if (*count < HOST_VM_INFO_REV0_COUNT) - return (KERN_FAILURE); + if (*count < HOST_VM_INFO_REV0_COUNT) { + return KERN_FAILURE; + } processor = processor_list; stat = &PROCESSOR_DATA(processor, vm_stat); host_vm_stat = *stat; if (processor_count > 1) { - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); while ((processor = processor->processor_list) != NULL) { stat = &PROCESSOR_DATA(processor, vm_stat); @@ -446,15 +470,16 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty /* rev3 changed some of the fields to be 64-bit*/ - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_CPU_LOAD_INFO: { processor_t processor; host_cpu_load_info_t cpu_load_info; - if (*count < HOST_CPU_LOAD_INFO_COUNT) - return (KERN_FAILURE); + if (*count < HOST_CPU_LOAD_INFO_COUNT) { + return KERN_FAILURE; + } #define GET_TICKS_VALUE(state, ticks) \ MACRO_BEGIN cpu_load_info->cpu_ticks[(state)] += (uint32_t)(ticks / hz_tick_interval); \ @@ -469,7 +494,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0; cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); for (processor = processor_list; processor != NULL; processor = processor->processor_list) { timer_t idle_state; @@ -494,7 +519,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty /* Processor is non-idle, so idle timer should be accurate */ GET_TICKS_VALUE_FROM_TIMER(processor, CPU_STATE_IDLE, idle_state); } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) || - (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) { + (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) { /* Idle timer is being updated concurrently, second stamp is good enough */ GET_TICKS_VALUE(CPU_STATE_IDLE, idle_time_snapshot2); } else { @@ -511,12 +536,12 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty *count = HOST_CPU_LOAD_INFO_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_EXPIRED_TASK_INFO: { if (*count < TASK_POWER_INFO_COUNT) { - return (KERN_FAILURE); + return KERN_FAILURE; } task_power_info_t tinfo1 = (task_power_info_t)info; @@ -533,8 +558,7 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty tinfo1->total_system = dead_task_statistics.total_system_time; if (*count < TASK_POWER_INFO_V2_COUNT) { *count = TASK_POWER_INFO_COUNT; - } - else if (*count >= TASK_POWER_INFO_V2_COUNT) { + } else if (*count >= TASK_POWER_INFO_V2_COUNT) { tinfo2->gpu_energy.task_gpu_utilisation = dead_task_statistics.task_gpu_ns; #if defined(__arm__) || defined(__arm64__) tinfo2->task_energy = dead_task_statistics.task_energy; @@ -544,9 +568,9 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty *count = TASK_POWER_INFO_V2_COUNT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } - default: return (KERN_INVALID_ARGUMENT); + default: return KERN_INVALID_ARGUMENT; } } @@ -561,17 +585,17 @@ uint64_t host_statistics_time_window; static lck_mtx_t host_statistics_lck; static lck_grp_t* host_statistics_lck_grp; -#define HOST_VM_INFO64_REV0 0 -#define HOST_VM_INFO64_REV1 1 -#define HOST_EXTMOD_INFO64_REV0 2 -#define HOST_LOAD_INFO_REV0 3 -#define HOST_VM_INFO_REV0 4 -#define HOST_VM_INFO_REV1 5 -#define HOST_VM_INFO_REV2 6 -#define HOST_CPU_LOAD_INFO_REV0 7 -#define HOST_EXPIRED_TASK_INFO_REV0 8 -#define HOST_EXPIRED_TASK_INFO_REV1 9 -#define NUM_HOST_INFO_DATA_TYPES 10 +#define HOST_VM_INFO64_REV0 0 +#define HOST_VM_INFO64_REV1 1 +#define HOST_EXTMOD_INFO64_REV0 2 +#define HOST_LOAD_INFO_REV0 3 +#define HOST_VM_INFO_REV0 4 +#define HOST_VM_INFO_REV1 5 +#define HOST_VM_INFO_REV2 6 +#define HOST_CPU_LOAD_INFO_REV0 7 +#define HOST_EXPIRED_TASK_INFO_REV0 8 +#define HOST_EXPIRED_TASK_INFO_REV1 9 +#define NUM_HOST_INFO_DATA_TYPES 10 static vm_statistics64_data_t host_vm_info64_rev0 = {}; static vm_statistics64_data_t host_vm_info64_rev1 = {}; @@ -617,102 +641,101 @@ host_statistics_init(void) static void cache_host_statistics(int index, host_info64_t info) { - if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) - return; + if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) { + return; + } - task_t task = current_task(); - if (task->t_flags & TF_PLATFORM) - return; + task_t task = current_task(); + if (task->t_flags & TF_PLATFORM) { + return; + } - memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t)); - return; + memcpy((void *)g_host_stats_cache[index].data, info, g_host_stats_cache[index].count * sizeof(integer_t)); + return; } static void get_cached_info(int index, host_info64_t info, mach_msg_type_number_t* count) { - if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) { - *count = 0; - return; - } + if (index < 0 || index >= NUM_HOST_INFO_DATA_TYPES) { + *count = 0; + return; + } - *count = g_host_stats_cache[index].count; - memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t)); + *count = g_host_stats_cache[index].count; + memcpy(info, (void *)g_host_stats_cache[index].data, g_host_stats_cache[index].count * sizeof(integer_t)); } static int get_host_info_data_index(bool is_stat64, host_flavor_t flavor, mach_msg_type_number_t* count, kern_return_t* ret) { switch (flavor) { - - case HOST_VM_INFO64: - if (!is_stat64){ - *ret = KERN_INVALID_ARGUMENT; - return -1; - } - if (*count < HOST_VM_INFO64_REV0_COUNT) { - *ret = KERN_FAILURE; - return -1; - } - if (*count >= HOST_VM_INFO64_REV1_COUNT) { - return HOST_VM_INFO64_REV1; - } - return HOST_VM_INFO64_REV0; + case HOST_VM_INFO64: + if (!is_stat64) { + *ret = KERN_INVALID_ARGUMENT; + return -1; + } + if (*count < HOST_VM_INFO64_REV0_COUNT) { + *ret = KERN_FAILURE; + return -1; + } + if (*count >= HOST_VM_INFO64_REV1_COUNT) { + return HOST_VM_INFO64_REV1; + } + return HOST_VM_INFO64_REV0; case HOST_EXTMOD_INFO64: - if (!is_stat64){ - *ret = KERN_INVALID_ARGUMENT; - return -1; - } - if (*count < HOST_EXTMOD_INFO64_COUNT) { - *ret = KERN_FAILURE; - return -1; - } - return HOST_EXTMOD_INFO64_REV0; + if (!is_stat64) { + *ret = KERN_INVALID_ARGUMENT; + return -1; + } + if (*count < HOST_EXTMOD_INFO64_COUNT) { + *ret = KERN_FAILURE; + return -1; + } + return HOST_EXTMOD_INFO64_REV0; case HOST_LOAD_INFO: - if (*count < HOST_LOAD_INFO_COUNT) { - *ret = KERN_FAILURE; - return -1; - } - return HOST_LOAD_INFO_REV0; + if (*count < HOST_LOAD_INFO_COUNT) { + *ret = KERN_FAILURE; + return -1; + } + return HOST_LOAD_INFO_REV0; case HOST_VM_INFO: - if (*count < HOST_VM_INFO_REV0_COUNT) { - *ret = KERN_FAILURE; - return -1; - } - if (*count >= HOST_VM_INFO_REV2_COUNT) { - return HOST_VM_INFO_REV2; - } - if (*count >= HOST_VM_INFO_REV1_COUNT) { - return HOST_VM_INFO_REV1; - } - return HOST_VM_INFO_REV0; + if (*count < HOST_VM_INFO_REV0_COUNT) { + *ret = KERN_FAILURE; + return -1; + } + if (*count >= HOST_VM_INFO_REV2_COUNT) { + return HOST_VM_INFO_REV2; + } + if (*count >= HOST_VM_INFO_REV1_COUNT) { + return HOST_VM_INFO_REV1; + } + return HOST_VM_INFO_REV0; case HOST_CPU_LOAD_INFO: - if (*count < HOST_CPU_LOAD_INFO_COUNT) { - *ret = KERN_FAILURE; - return -1; - } - return HOST_CPU_LOAD_INFO_REV0; + if (*count < HOST_CPU_LOAD_INFO_COUNT) { + *ret = KERN_FAILURE; + return -1; + } + return HOST_CPU_LOAD_INFO_REV0; case HOST_EXPIRED_TASK_INFO: - if (*count < TASK_POWER_INFO_COUNT){ - *ret = KERN_FAILURE; - return -1; - } - if (*count >= TASK_POWER_INFO_V2_COUNT){ - return HOST_EXPIRED_TASK_INFO_REV1; - } - return HOST_EXPIRED_TASK_INFO_REV0; + if (*count < TASK_POWER_INFO_COUNT) { + *ret = KERN_FAILURE; + return -1; + } + if (*count >= TASK_POWER_INFO_V2_COUNT) { + return HOST_EXPIRED_TASK_INFO_REV1; + } + return HOST_EXPIRED_TASK_INFO_REV0; default: - *ret = KERN_INVALID_ARGUMENT; - return -1; - + *ret = KERN_INVALID_ARGUMENT; + return -1; } - } static bool @@ -735,8 +758,9 @@ rate_limit_host_statistics(bool is_stat64, host_flavor_t flavor, host_info64_t i /* there is a cache for every flavor */ int index = get_host_info_data_index(is_stat64, flavor, count, ret); - if (index == -1) + if (index == -1) { goto out; + } *pindex = index; lck_mtx_lock(&host_statistics_lck); @@ -770,8 +794,9 @@ host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_ms { uint32_t i; - if (host == HOST_NULL) - return (KERN_INVALID_HOST); + if (host == HOST_NULL) { + return KERN_INVALID_HOST; + } switch (flavor) { case HOST_VM_INFO64: /* We were asked to get vm_statistics64 */ @@ -783,15 +808,16 @@ host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_ms unsigned int local_q_internal_count; unsigned int local_q_external_count; - if (*count < HOST_VM_INFO64_REV0_COUNT) - return (KERN_FAILURE); + if (*count < HOST_VM_INFO64_REV0_COUNT) { + return KERN_FAILURE; + } processor = processor_list; stat = &PROCESSOR_DATA(processor, vm_stat); host_vm_stat = *stat; if (processor_count > 1) { - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); while ((processor = processor->processor_list) != NULL) { stat = &PROCESSOR_DATA(processor, vm_stat); @@ -878,26 +904,27 @@ host_statistics64(host_t host, host_flavor_t flavor, host_info64_t info, mach_ms *count = HOST_VM_INFO64_REV1_COUNT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } case HOST_EXTMOD_INFO64: /* We were asked to get vm_statistics64 */ { vm_extmod_statistics_t out_extmod_statistics; - if (*count < HOST_EXTMOD_INFO64_COUNT) - return (KERN_FAILURE); + if (*count < HOST_EXTMOD_INFO64_COUNT) { + return KERN_FAILURE; + } out_extmod_statistics = (vm_extmod_statistics_t)info; *out_extmod_statistics = host_extmod_statistics; *count = HOST_EXTMOD_INFO64_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; } default: /* If we didn't recognize the flavor, send to host_statistics */ - return (host_statistics(host, flavor, (host_info_t)info, count)); + return host_statistics(host, flavor, (host_info_t)info, count); } } @@ -907,19 +934,23 @@ host_statistics64_from_user(host_t host, host_flavor_t flavor, host_info64_t inf kern_return_t ret = KERN_SUCCESS; int index; - if (host == HOST_NULL) - return (KERN_INVALID_HOST); + if (host == HOST_NULL) { + return KERN_INVALID_HOST; + } - if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) + if (rate_limit_host_statistics(TRUE, flavor, info, count, &ret, &index)) { return ret; + } - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return ret; + } ret = host_statistics64(host, flavor, info, count); - if (ret == KERN_SUCCESS) + if (ret == KERN_SUCCESS) { cache_host_statistics(index, info); + } return ret; } @@ -930,19 +961,23 @@ host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, kern_return_t ret = KERN_SUCCESS; int index; - if (host == HOST_NULL) - return (KERN_INVALID_HOST); + if (host == HOST_NULL) { + return KERN_INVALID_HOST; + } - if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) + if (rate_limit_host_statistics(FALSE, flavor, info, count, &ret, &index)) { return ret; + } - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return ret; + } ret = host_statistics(host, flavor, info, count); - if (ret == KERN_SUCCESS) + if (ret == KERN_SUCCESS) { cache_host_statistics(index, info); + } return ret; } @@ -954,14 +989,14 @@ host_statistics_from_user(host_t host, host_flavor_t flavor, host_info64_t info, kern_return_t host_priv_statistics(host_priv_t host_priv, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t * count) { - return (host_statistics((host_t)host_priv, flavor, info, count)); + return host_statistics((host_t)host_priv, flavor, info, count); } kern_return_t set_sched_stats_active(boolean_t active) { sched_stats_active = active; - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -969,19 +1004,19 @@ uint64_t get_pages_grabbed_count(void) { processor_t processor; - uint64_t pages_grabbed_count = 0; + uint64_t pages_grabbed_count = 0; - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); processor = processor_list; while (processor) { - pages_grabbed_count += PROCESSOR_DATA(processor, page_grab_count); + pages_grabbed_count += PROCESSOR_DATA(processor, page_grab_count); processor = processor->processor_list; } simple_unlock(&processor_list_lock); - return(pages_grabbed_count); + return pages_grabbed_count; } @@ -991,14 +1026,14 @@ get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count) processor_t processor; if (!sched_stats_active) { - return (KERN_FAILURE); + return KERN_FAILURE; } - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); if (*count < (processor_count + 1) * sizeof(struct _processor_statistics_np)) { /* One for RT */ simple_unlock(&processor_list_lock); - return (KERN_FAILURE); + return KERN_FAILURE; } processor = processor_list; @@ -1033,18 +1068,19 @@ get_sched_statistics(struct _processor_statistics_np * out, uint32_t * count) out++; *count += (uint32_t)sizeof(struct _processor_statistics_np); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t host_page_size(host_t host, vm_size_t * out_page_size) { - if (host == HOST_NULL) - return (KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } *out_page_size = PAGE_SIZE; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1056,12 +1092,13 @@ extern char version[]; kern_return_t host_kernel_version(host_t host, kernel_version_t out_version) { - if (host == HOST_NULL) - return (KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } (void)strncpy(out_version, version, sizeof(kernel_version_t)); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1074,8 +1111,9 @@ host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_lis { void * addr; - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_ARGUMENT; + } /* * Allocate memory. Can be pageable because it won't be @@ -1083,8 +1121,9 @@ host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_lis */ addr = kalloc((vm_size_t)sizeof(mach_port_t)); - if (addr == 0) - return (KERN_RESOURCE_SHORTAGE); + if (addr == 0) { + return KERN_RESOURCE_SHORTAGE; + } /* do the conversion that Mig should handle */ *((ipc_port_t *)addr) = convert_pset_name_to_port(&pset0); @@ -1092,7 +1131,7 @@ host_processor_sets(host_priv_t host_priv, processor_set_name_array_t * pset_lis *pset_list = (processor_set_array_t)addr; *count = 1; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1106,12 +1145,12 @@ host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, proces if (host_priv == HOST_PRIV_NULL || pset_name == PROCESSOR_SET_NULL) { *pset = PROCESSOR_SET_NULL; - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } *pset = pset_name; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1123,10 +1162,10 @@ host_processor_set_priv(host_priv_t host_priv, processor_set_t pset_name, proces */ kern_return_t host_processor_info(host_t host, - processor_flavor_t flavor, - natural_t * out_pcount, - processor_info_array_t * out_array, - mach_msg_type_number_t * out_array_count) + processor_flavor_t flavor, + natural_t * out_pcount, + processor_info_array_t * out_array, + mach_msg_type_number_t * out_array_count) { kern_return_t result; processor_t processor; @@ -1138,12 +1177,14 @@ host_processor_info(host_t host, vm_size_t size, needed; vm_map_copy_t copy; - if (host == HOST_NULL) - return (KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } result = processor_info_count(flavor, &icount); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } pcount = processor_count; assert(pcount != 0); @@ -1151,8 +1192,9 @@ host_processor_info(host_t host, needed = pcount * icount * sizeof(natural_t); size = vm_map_round_page(needed, VM_MAP_PAGE_MASK(ipc_kernel_map)); result = kmem_alloc(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC); - if (result != KERN_SUCCESS) - return (KERN_RESOURCE_SHORTAGE); + if (result != KERN_SUCCESS) { + return KERN_RESOURCE_SHORTAGE; + } info = (processor_info_t)addr; processor = processor_list; @@ -1161,12 +1203,12 @@ host_processor_info(host_t host, result = processor_info(processor, flavor, &thost, info, &tcount); if (result != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr, size); - return (result); + return result; } if (pcount > 1) { for (i = 1; i < pcount; i++) { - simple_lock(&processor_list_lock); + simple_lock(&processor_list_lock, LCK_GRP_NULL); processor = processor->processor_list; simple_unlock(&processor_list_lock); @@ -1175,16 +1217,17 @@ host_processor_info(host_t host, result = processor_info(processor, flavor, &thost, info, &tcount); if (result != KERN_SUCCESS) { kmem_free(ipc_kernel_map, addr, size); - return (result); + return result; } } } - if (size != needed) + if (size != needed) { bzero((char *)addr + needed, size - needed); + } result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)), - vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); + vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(result == KERN_SUCCESS); result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, ©); assert(result == KERN_SUCCESS); @@ -1193,7 +1236,7 @@ host_processor_info(host_t host, *out_array = (processor_info_array_t)copy; *out_array_count = pcount * icount; - return (KERN_SUCCESS); + return KERN_SUCCESS; } static bool @@ -1212,12 +1255,14 @@ kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) { ipc_port_t old_port; - if (!is_valid_host_special_port(id)) + if (!is_valid_host_special_port(id)) { panic("attempted to set invalid special port %d", id); + } #if !MACH_FLIPC - if (id == HOST_NODE_PORT) - return (KERN_NOT_SUPPORTED); + if (id == HOST_NODE_PORT) { + return KERN_NOT_SUPPORTED; + } #endif host_lock(host_priv); @@ -1226,13 +1271,15 @@ kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) host_unlock(host_priv); #if MACH_FLIPC - if (id == HOST_NODE_PORT) + if (id == HOST_NODE_PORT) { mach_node_port_changed(); + } #endif - if (IP_VALID(old_port)) + if (IP_VALID(old_port)) { ipc_port_release_send(old_port); - return (KERN_SUCCESS); + } + return KERN_SUCCESS; } /* @@ -1241,13 +1288,14 @@ kernel_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) kern_return_t kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp) { - if (!is_valid_host_special_port(id)) + if (!is_valid_host_special_port(id)) { panic("attempted to get invalid special port %d", id); + } host_lock(host_priv); *portp = host_priv->special[id]; host_unlock(host_priv); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1262,15 +1310,17 @@ kernel_get_special_port(host_priv_t host_priv, int id, ipc_port_t * portp) kern_return_t host_set_special_port(host_priv_t host_priv, int id, ipc_port_t port) { - if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL || id <= HOST_MAX_SPECIAL_KERNEL_PORT || id > HOST_MAX_SPECIAL_PORT) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF - if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) - return (KERN_NO_ACCESS); + if (mac_task_check_set_host_special_port(current_task(), id, port) != 0) { + return KERN_NO_ACCESS; + } #endif - return (kernel_set_special_port(host_priv, id, port)); + return kernel_set_special_port(host_priv, id, port); } /* @@ -1287,15 +1337,16 @@ host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port { ipc_port_t port; - if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL || id == HOST_SECURITY_PORT || id > HOST_MAX_SPECIAL_PORT || id < HOST_MIN_SPECIAL_PORT) { + return KERN_INVALID_ARGUMENT; + } host_lock(host_priv); port = realhost.special[id]; *portp = ipc_port_copy_send(port); host_unlock(host_priv); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1306,43 +1357,45 @@ host_get_special_port(host_priv_t host_priv, __unused int node, int id, ipc_port kern_return_t host_get_io_master(host_t host, io_master_t * io_masterp) { - if (host == HOST_NULL) - return (KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } - return (host_get_io_master_port(host_priv_self(), io_masterp)); + return host_get_io_master_port(host_priv_self(), io_masterp); } host_t host_self(void) { - return (&realhost); + return &realhost; } host_priv_t host_priv_self(void) { - return (&realhost); + return &realhost; } host_security_t host_security_self(void) { - return (&realhost); + return &realhost; } kern_return_t host_set_atm_diagnostic_flag(host_priv_t host_priv, uint32_t diagnostic_flag) { - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_ARGUMENT; + } assert(host_priv == &realhost); #if CONFIG_ATM - return (atm_set_diagnostic_config(diagnostic_flag)); + return atm_set_diagnostic_config(diagnostic_flag); #else (void)diagnostic_flag; - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; #endif } @@ -1350,8 +1403,9 @@ kern_return_t host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config) { #if CONFIG_EMBEDDED - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_ARGUMENT; + } assert(host_priv == &realhost); @@ -1360,10 +1414,10 @@ host_set_multiuser_config_flags(host_priv_t host_priv, uint32_t multiuser_config * if a value is written to the commpage word. */ commpage_update_multiuser_config(multiuser_config | kIsMultiUserDevice); - return (KERN_SUCCESS); + return KERN_SUCCESS; #else (void)host_priv; (void)multiuser_config; - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; #endif } diff --git a/osfmk/kern/host.h b/osfmk/kern/host.h index 1b4dca21b..480eb4bf8 100644 --- a/osfmk/kern/host.h +++ b/osfmk/kern/host.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,13 +61,13 @@ * */ -#ifndef _KERN_HOST_H_ +#ifndef _KERN_HOST_H_ #define _KERN_HOST_H_ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -76,18 +76,18 @@ #include #include -struct host { - decl_lck_mtx_data(,lock) /* lock to protect exceptions */ +struct host { + decl_lck_mtx_data(, lock) /* lock to protect exceptions */ ipc_port_t special[HOST_MAX_SPECIAL_PORT + 1]; struct exception_action exc_actions[EXC_TYPES_COUNT]; }; -typedef struct host host_data_t; +typedef struct host host_data_t; -extern host_data_t realhost; +extern host_data_t realhost; -#define host_lock(host) lck_mtx_lock(&(host)->lock) -#define host_unlock(host) lck_mtx_unlock(&(host)->lock) +#define host_lock(host) lck_mtx_lock(&(host)->lock) +#define host_unlock(host) lck_mtx_unlock(&(host)->lock) extern vm_extmod_statistics_data_t host_extmod_statistics; @@ -106,7 +106,7 @@ typedef struct { extern expired_task_statistics_t dead_task_statistics; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ /* * Access routines for inside the kernel. @@ -114,10 +114,10 @@ extern expired_task_statistics_t dead_task_statistics; __BEGIN_DECLS -extern host_t host_self(void); -extern host_priv_t host_priv_self(void); -extern host_security_t host_security_self(void); +extern host_t host_self(void); +extern host_priv_t host_priv_self(void); +extern host_security_t host_security_self(void); __END_DECLS -#endif /* _KERN_HOST_H_ */ +#endif /* _KERN_HOST_H_ */ diff --git a/osfmk/kern/host_notify.c b/osfmk/kern/host_notify.c index a69f109b4..27c8bc750 100644 --- a/osfmk/kern/host_notify.c +++ b/osfmk/kern/host_notify.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,34 +37,35 @@ #include "mach/host_notify_reply.h" -decl_lck_mtx_data(,host_notify_lock) +decl_lck_mtx_data(, host_notify_lock) -lck_mtx_ext_t host_notify_lock_ext; -lck_grp_t host_notify_lock_grp; -lck_attr_t host_notify_lock_attr; -static lck_grp_attr_t host_notify_lock_grp_attr; -static zone_t host_notify_zone; +lck_mtx_ext_t host_notify_lock_ext; +lck_grp_t host_notify_lock_grp; +lck_attr_t host_notify_lock_attr; +static lck_grp_attr_t host_notify_lock_grp_attr; +static zone_t host_notify_zone; -static queue_head_t host_notify_queue[HOST_NOTIFY_TYPE_MAX+1]; +static queue_head_t host_notify_queue[HOST_NOTIFY_TYPE_MAX + 1]; -static mach_msg_id_t host_notify_replyid[HOST_NOTIFY_TYPE_MAX+1] = - { HOST_CALENDAR_CHANGED_REPLYID, - HOST_CALENDAR_SET_REPLYID }; +static mach_msg_id_t host_notify_replyid[HOST_NOTIFY_TYPE_MAX + 1] = +{ HOST_CALENDAR_CHANGED_REPLYID, + HOST_CALENDAR_SET_REPLYID }; struct host_notify_entry { - queue_chain_t entries; - ipc_port_t port; + queue_chain_t entries; + ipc_port_t port; }; -typedef struct host_notify_entry *host_notify_t; +typedef struct host_notify_entry *host_notify_t; void host_notify_init(void) { - int i; + int i; - for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) + for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) { queue_init(&host_notify_queue[i]); + } lck_grp_attr_setdefault(&host_notify_lock_grp_attr); lck_grp_init(&host_notify_lock_grp, "host_notify", &host_notify_lock_grp_attr); @@ -72,31 +73,35 @@ host_notify_init(void) lck_mtx_init_ext(&host_notify_lock, &host_notify_lock_ext, &host_notify_lock_grp, &host_notify_lock_attr); - i = sizeof (struct host_notify_entry); + i = sizeof(struct host_notify_entry); host_notify_zone = - zinit(i, (4096 * i), (16 * i), "host_notify"); + zinit(i, (4096 * i), (16 * i), "host_notify"); } kern_return_t host_request_notification( - host_t host, - host_flavor_t notify_type, - ipc_port_t port) + host_t host, + host_flavor_t notify_type, + ipc_port_t port) { - host_notify_t entry; + host_notify_t entry; - if (host == HOST_NULL) - return (KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (!IP_VALID(port)) - return (KERN_INVALID_CAPABILITY); + if (!IP_VALID(port)) { + return KERN_INVALID_CAPABILITY; + } - if (notify_type > HOST_NOTIFY_TYPE_MAX || notify_type < 0) - return (KERN_INVALID_ARGUMENT); + if (notify_type > HOST_NOTIFY_TYPE_MAX || notify_type < 0) { + return KERN_INVALID_ARGUMENT; + } entry = (host_notify_t)zalloc(host_notify_zone); - if (entry == NULL) - return (KERN_RESOURCE_SHORTAGE); + if (entry == NULL) { + return KERN_RESOURCE_SHORTAGE; + } lck_mtx_lock(&host_notify_lock); @@ -107,7 +112,7 @@ host_request_notification( lck_mtx_unlock(&host_notify_lock); zfree(host_notify_zone, entry); - return (KERN_FAILURE); + return KERN_FAILURE; } entry->port = port; @@ -117,14 +122,14 @@ host_request_notification( enqueue_tail(&host_notify_queue[notify_type], (queue_entry_t)entry); lck_mtx_unlock(&host_notify_lock); - return (KERN_SUCCESS); + return KERN_SUCCESS; } void host_notify_port_destroy( - ipc_port_t port) + ipc_port_t port) { - host_notify_t entry; + host_notify_t entry; lck_mtx_lock(&host_notify_lock); @@ -150,17 +155,17 @@ host_notify_port_destroy( static void host_notify_all( - host_flavor_t notify_type, - mach_msg_header_t *msg, - mach_msg_size_t msg_size) + host_flavor_t notify_type, + mach_msg_header_t *msg, + mach_msg_size_t msg_size) { - queue_t notify_queue = &host_notify_queue[notify_type]; + queue_t notify_queue = &host_notify_queue[notify_type]; lck_mtx_lock(&host_notify_lock); if (!queue_empty(notify_queue)) { - queue_head_t send_queue; - host_notify_t entry; + queue_head_t send_queue; + host_notify_t entry; send_queue = *notify_queue; queue_init(notify_queue); @@ -175,7 +180,7 @@ host_notify_all( msg->msgh_id = host_notify_replyid[notify_type]; while ((entry = (host_notify_t)dequeue(&send_queue)) != NULL) { - ipc_port_t port; + ipc_port_t port; port = entry->port; assert(port != IP_NULL); @@ -203,15 +208,15 @@ host_notify_all( void host_notify_calendar_change(void) { - __Request__host_calendar_changed_t msg; + __Request__host_calendar_changed_t msg; - host_notify_all(HOST_NOTIFY_CALENDAR_CHANGE, &msg.Head, sizeof (msg)); + host_notify_all(HOST_NOTIFY_CALENDAR_CHANGE, &msg.Head, sizeof(msg)); } void host_notify_calendar_set(void) { - __Request__host_calendar_set_t msg; + __Request__host_calendar_set_t msg; - host_notify_all(HOST_NOTIFY_CALENDAR_SET, &msg.Head, sizeof (msg)); + host_notify_all(HOST_NOTIFY_CALENDAR_SET, &msg.Head, sizeof(msg)); } diff --git a/osfmk/kern/host_notify.h b/osfmk/kern/host_notify.h index 12846b3dd..adfffc5e9 100644 --- a/osfmk/kern/host_notify.h +++ b/osfmk/kern/host_notify.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,19 +34,19 @@ * Created. */ -#ifndef _KERN_HOST_NOTIFY_H_ -#define _KERN_HOST_NOTIFY_H_ +#ifndef _KERN_HOST_NOTIFY_H_ +#define _KERN_HOST_NOTIFY_H_ #ifdef MACH_KERNEL_PRIVATE #include -void host_notify_port_destroy( - ipc_port_t port); +void host_notify_port_destroy( + ipc_port_t port); -void host_notify_calendar_change(void); -void host_notify_calendar_set(void); +void host_notify_calendar_change(void); +void host_notify_calendar_set(void); -void host_notify_init(void); +void host_notify_init(void); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/host_statistics.h b/osfmk/kern/host_statistics.h index cbccf8c22..aa7e4a5da 100644 --- a/osfmk/kern/host_statistics.h +++ b/osfmk/kern/host_statistics.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -35,7 +35,7 @@ * */ -#ifndef _KERN_HOST_STATISTICS_H_ +#ifndef _KERN_HOST_STATISTICS_H_ #define _KERN_HOST_STATISTICS_H_ #include @@ -45,14 +45,14 @@ extern uint64_t get_pages_grabbed_count(void); -#define VM_STAT_INCR(event) \ -MACRO_BEGIN \ - OSAddAtomic64(1, (SInt64 *) (&(PROCESSOR_DATA(current_processor(), vm_stat).event))); \ +#define VM_STAT_INCR(event) \ +MACRO_BEGIN \ + OSAddAtomic64(1, (SInt64 *) (&(PROCESSOR_DATA(current_processor(), vm_stat).event))); \ MACRO_END -#define VM_STAT_INCR_BY(event, amount) \ -MACRO_BEGIN \ - OSAddAtomic64((amount), (SInt64 *) (&(PROCESSOR_DATA(current_processor(), vm_stat).event))); \ +#define VM_STAT_INCR_BY(event, amount) \ +MACRO_BEGIN \ + OSAddAtomic64((amount), (SInt64 *) (&(PROCESSOR_DATA(current_processor(), vm_stat).event))); \ MACRO_END -#endif /* _KERN_HOST_STATISTICS_H_ */ +#endif /* _KERN_HOST_STATISTICS_H_ */ diff --git a/osfmk/kern/hv_support.c b/osfmk/kern/hv_support.c index 6803be30a..683076b2f 100644 --- a/osfmk/kern/hv_support.c +++ b/osfmk/kern/hv_support.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,12 +42,12 @@ int hv_support_available = 0; /* callbacks for tasks/threads with associated hv objects */ hv_callbacks_t hv_callbacks = { - .dispatch = NULL, /* thread is being dispatched for execution */ - .preempt = NULL, /* thread is being preempted */ - .suspend = NULL, /* system is being suspended */ - .thread_destroy = NULL, /* thread is being destroyed */ - .task_destroy = NULL, /* task is being destroyed */ - .volatile_state = NULL, /* thread state is becoming volatile */ + .dispatch = NULL, /* thread is being dispatched for execution */ + .preempt = NULL, /* thread is being preempted */ + .suspend = NULL, /* system is being suspended */ + .thread_destroy = NULL, /* thread is being destroyed */ + .task_destroy = NULL, /* task is being destroyed */ + .volatile_state = NULL, /* thread state is becoming volatile */ }; /* trap tables for hv_*_trap syscalls */ @@ -68,7 +68,8 @@ static lck_mtx_t *hv_support_lck_mtx = NULL; /* hv_support boot initialization */ void -hv_support_init(void) { +hv_support_init(void) +{ #if defined(__x86_64__) && CONFIG_VMX hv_support_available = vmx_hv_support(); #endif @@ -82,38 +83,44 @@ hv_support_init(void) { /* returns true if hv_support is available on this machine */ int -hv_get_support(void) { +hv_get_support(void) +{ return hv_support_available; } /* associate an hv object with the current task */ void -hv_set_task_target(void *target) { +hv_set_task_target(void *target) +{ current_task()->hv_task_target = target; } /* associate an hv object with the current thread */ void -hv_set_thread_target(void *target) { +hv_set_thread_target(void *target) +{ current_thread()->hv_thread_target = target; } /* get hv object associated with the current task */ void* -hv_get_task_target(void) { +hv_get_task_target(void) +{ return current_task()->hv_task_target; } /* get hv object associated with the current thread */ void* -hv_get_thread_target(void) { +hv_get_thread_target(void) +{ return current_thread()->hv_thread_target; } /* test if a given thread state may be volatile between dispatch - and preemption */ + * and preemption */ int -hv_get_volatile_state(hv_volatile_state_t state) { +hv_get_volatile_state(hv_volatile_state_t state) +{ int is_volatile = 0; #if (defined(__x86_64__)) @@ -128,13 +135,13 @@ hv_get_volatile_state(hv_volatile_state_t state) { /* register a list of trap handlers for the hv_*_trap syscalls */ kern_return_t hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps, - unsigned trap_count) + unsigned trap_count) { hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; kern_return_t kr = KERN_FAILURE; lck_mtx_lock(hv_support_lck_mtx); - if (trap_table->trap_count == 0) { + if (trap_table->trap_count == 0) { trap_table->traps = traps; OSMemoryBarrier(); trap_table->trap_count = trap_count; @@ -147,7 +154,8 @@ hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps, /* release hv_*_trap traps */ void -hv_release_traps(hv_trap_type_t trap_type) { +hv_release_traps(hv_trap_type_t trap_type) +{ hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; lck_mtx_lock(hv_support_lck_mtx); @@ -158,13 +166,14 @@ hv_release_traps(hv_trap_type_t trap_type) { } /* register callbacks for certain task/thread events for tasks/threads with - associated hv objects */ + * associated hv objects */ kern_return_t -hv_set_callbacks(hv_callbacks_t callbacks) { +hv_set_callbacks(hv_callbacks_t callbacks) +{ kern_return_t kr = KERN_FAILURE; lck_mtx_lock(hv_support_lck_mtx); - if (hv_callbacks_enabled == 0) { + if (hv_callbacks_enabled == 0) { hv_callbacks = callbacks; hv_callbacks_enabled = 1; kr = KERN_SUCCESS; @@ -176,8 +185,9 @@ hv_set_callbacks(hv_callbacks_t callbacks) { /* release callbacks for task/thread events */ void -hv_release_callbacks(void) { - lck_mtx_lock(hv_support_lck_mtx); +hv_release_callbacks(void) +{ + lck_mtx_lock(hv_support_lck_mtx); hv_callbacks = (hv_callbacks_t) { .dispatch = NULL, .preempt = NULL, @@ -194,24 +204,29 @@ hv_release_callbacks(void) { /* system suspend notification */ void -hv_suspend(void) { +hv_suspend(void) +{ if (hv_callbacks_enabled) { hv_callbacks.suspend(); } } /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers, - fail for invalid index or absence of trap handlers, trap handler is - responsible for validating targets */ -#define HV_TRAP_DISPATCH(type, index, target, argument)\ + * fail for invalid index or absence of trap handlers, trap handler is + * responsible for validating targets */ +#define HV_TRAP_DISPATCH(type, index, target, argument) \ ((__probable(index < hv_trap_table[type].trap_count)) ? \ - hv_trap_table[type].traps[index](target, argument) \ - : KERN_INVALID_ARGUMENT) + hv_trap_table[type].traps[index](target, argument) \ + : KERN_INVALID_ARGUMENT) -kern_return_t hv_task_trap(uint64_t index, uint64_t arg) { +kern_return_t +hv_task_trap(uint64_t index, uint64_t arg) +{ return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg); } -kern_return_t hv_thread_trap(uint64_t index, uint64_t arg) { +kern_return_t +hv_thread_trap(uint64_t index, uint64_t arg) +{ return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg); } diff --git a/osfmk/kern/hv_support.h b/osfmk/kern/hv_support.h index fb2bfe55e..72d5bd2cf 100644 --- a/osfmk/kern/hv_support.h +++ b/osfmk/kern/hv_support.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -73,7 +73,7 @@ extern void *hv_get_task_target(void); extern void *hv_get_thread_target(void); extern int hv_get_volatile_state(hv_volatile_state_t state); extern kern_return_t hv_set_traps(hv_trap_type_t trap_type, - const hv_trap_t *traps, unsigned trap_count); + const hv_trap_t *traps, unsigned trap_count); extern void hv_release_traps(hv_trap_type_t trap_type); extern kern_return_t hv_set_callbacks(hv_callbacks_t callbacks); extern void hv_release_callbacks(void); diff --git a/osfmk/kern/ipc_clock.c b/osfmk/kern/ipc_clock.c index 04cefb66e..502f876a4 100644 --- a/osfmk/kern/ipc_clock.c +++ b/osfmk/kern/ipc_clock.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -53,18 +53,20 @@ */ void ipc_clock_init( - clock_t clock) + clock_t clock) { - ipc_port_t port; + ipc_port_t port; port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_clock_init"); + } clock->cl_service = port; port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_clock_init"); + } clock->cl_control = port; } @@ -75,12 +77,12 @@ ipc_clock_init( */ void ipc_clock_enable( - clock_t clock) + clock_t clock) { ipc_kobject_set(clock->cl_service, - (ipc_kobject_t) clock, IKOT_CLOCK); + (ipc_kobject_t) clock, IKOT_CLOCK); ipc_kobject_set(clock->cl_control, - (ipc_kobject_t) clock, IKOT_CLOCK_CTRL); + (ipc_kobject_t) clock, IKOT_CLOCK_CTRL); } /* @@ -94,20 +96,20 @@ ipc_clock_enable( */ clock_t convert_port_to_clock( - ipc_port_t port) + ipc_port_t port) { - clock_t clock = CLOCK_NULL; + clock_t clock = CLOCK_NULL; if (IP_VALID(port)) { ip_lock(port); if (ip_active(port) && ((ip_kotype(port) == IKOT_CLOCK) || - (ip_kotype(port) == IKOT_CLOCK_CTRL))) { + (ip_kotype(port) == IKOT_CLOCK_CTRL))) { clock = (clock_t) port->ip_kobject; } ip_unlock(port); } - return (clock); + return clock; } /* @@ -121,9 +123,9 @@ convert_port_to_clock( */ clock_t convert_port_to_clock_ctrl( - ipc_port_t port) + ipc_port_t port) { - clock_t clock = CLOCK_NULL; + clock_t clock = CLOCK_NULL; if (IP_VALID(port)) { ip_lock(port); @@ -133,7 +135,7 @@ convert_port_to_clock_ctrl( } ip_unlock(port); } - return (clock); + return clock; } /* @@ -146,12 +148,12 @@ convert_port_to_clock_ctrl( */ ipc_port_t convert_clock_to_port( - clock_t clock) + clock_t clock) { - ipc_port_t port; + ipc_port_t port; port = ipc_port_make_send(clock->cl_service); - return (port); + return port; } /* @@ -164,12 +166,12 @@ convert_clock_to_port( */ ipc_port_t convert_clock_ctrl_to_port( - clock_t clock) + clock_t clock) { - ipc_port_t port; + ipc_port_t port; port = ipc_port_make_send(clock->cl_control); - return (port); + return port; } /* @@ -181,17 +183,20 @@ clock_t port_name_to_clock( mach_port_name_t clock_name) { - clock_t clock = CLOCK_NULL; - ipc_space_t space; - ipc_port_t port; + clock_t clock = CLOCK_NULL; + ipc_space_t space; + ipc_port_t port; - if (clock_name == 0) - return (clock); + if (clock_name == 0) { + return clock; + } space = current_space(); - if (ipc_port_translate_send(space, clock_name, &port) != KERN_SUCCESS) - return (clock); - if (ip_active(port) && (ip_kotype(port) == IKOT_CLOCK)) + if (ipc_port_translate_send(space, clock_name, &port) != KERN_SUCCESS) { + return clock; + } + if (ip_active(port) && (ip_kotype(port) == IKOT_CLOCK)) { clock = (clock_t) port->ip_kobject; + } ip_unlock(port); - return (clock); + return clock; } diff --git a/osfmk/kern/ipc_host.c b/osfmk/kern/ipc_host.c index ca2960559..c3b4a4516 100644 --- a/osfmk/kern/ipc_host.c +++ b/osfmk/kern/ipc_host.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -92,12 +92,13 @@ ref_pset_port_locked( * ipc_host_init: set up various things. */ -extern lck_grp_t host_notify_lock_grp; -extern lck_attr_t host_notify_lock_attr; +extern lck_grp_t host_notify_lock_grp; +extern lck_attr_t host_notify_lock_attr; -void ipc_host_init(void) +void +ipc_host_init(void) { - ipc_port_t port; + ipc_port_t port; int i; lck_mtx_init(&realhost.lock, &host_notify_lock_grp, &host_notify_lock_attr); @@ -106,43 +107,46 @@ void ipc_host_init(void) * Allocate and set up the two host ports. */ port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_host_init"); + } ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY); kernel_set_special_port(&realhost, HOST_SECURITY_PORT, - ipc_port_make_send(port)); + ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_host_init"); + } ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); kernel_set_special_port(&realhost, HOST_PORT, - ipc_port_make_send(port)); + ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_host_init"); + } ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); kernel_set_special_port(&realhost, HOST_PRIV_PORT, - ipc_port_make_send(port)); + ipc_port_make_send(port)); /* the rest of the special ports will be set up later */ bzero(&realhost.exc_actions[0], sizeof(realhost.exc_actions[0])); for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { - realhost.exc_actions[i].port = IP_NULL; - /* The mac framework is not yet initialized, so we defer - * initializing the labels to later, when they are set - * for the first time. */ - realhost.exc_actions[i].label = NULL; - /* initialize the entire exception action struct */ - realhost.exc_actions[i].behavior = 0; - realhost.exc_actions[i].flavor = 0; - realhost.exc_actions[i].privileged = FALSE; - }/* for */ + realhost.exc_actions[i].port = IP_NULL; + /* The mac framework is not yet initialized, so we defer + * initializing the labels to later, when they are set + * for the first time. */ + realhost.exc_actions[i].label = NULL; + /* initialize the entire exception action struct */ + realhost.exc_actions[i].behavior = 0; + realhost.exc_actions[i].flavor = 0; + realhost.exc_actions[i].privileged = FALSE; + } /* for */ /* * Set up ipc for default processor set. @@ -191,13 +195,14 @@ host_self_trap( void ipc_processor_init( - processor_t processor) + processor_t processor) { - ipc_port_t port; + ipc_port_t port; port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_processor_init"); + } processor->processor_self = port; } @@ -208,14 +213,14 @@ ipc_processor_init( */ void ipc_processor_enable( - processor_t processor) + processor_t processor) { - ipc_port_t myport; + ipc_port_t myport; myport = processor->processor_self; ipc_kobject_set(myport, (ipc_kobject_t) processor, IKOT_PROCESSOR); } - + /* * ipc_pset_init: * @@ -224,18 +229,20 @@ ipc_processor_enable( void ipc_pset_init( - processor_set_t pset) + processor_set_t pset) { - ipc_port_t port; + ipc_port_t port; port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_pset_init"); + } pset->pset_self = port; port = ipc_port_alloc_kernel(); - if (port == IP_NULL) + if (port == IP_NULL) { panic("ipc_pset_init"); + } pset->pset_name_self = port; } @@ -246,7 +253,7 @@ ipc_pset_init( */ void ipc_pset_enable( - processor_set_t pset) + processor_set_t pset) { ipc_kobject_set(pset->pset_self, (ipc_kobject_t) pset, IKOT_PSET); ipc_kobject_set(pset->pset_name_self, (ipc_kobject_t) pset, IKOT_PSET_NAME); @@ -259,15 +266,16 @@ ipc_pset_enable( */ kern_return_t processor_set_default( - host_t host, - processor_set_t *pset) + host_t host, + processor_set_t *pset) { - if (host == HOST_NULL) - return(KERN_INVALID_ARGUMENT); + if (host == HOST_NULL) { + return KERN_INVALID_ARGUMENT; + } *pset = &pset0; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -281,7 +289,7 @@ processor_set_default( host_t convert_port_to_host( - ipc_port_t port) + ipc_port_t port) { host_t host = HOST_NULL; @@ -306,15 +314,16 @@ convert_port_to_host( host_t convert_port_to_host_priv( - ipc_port_t port) + ipc_port_t port) { host_t host = HOST_NULL; if (IP_VALID(port)) { ip_lock(port); if (ip_active(port) && - (ip_kotype(port) == IKOT_HOST_PRIV)) + (ip_kotype(port) == IKOT_HOST_PRIV)) { host = (host_t) port->ip_kobject; + } ip_unlock(port); } @@ -333,15 +342,16 @@ convert_port_to_host_priv( processor_t convert_port_to_processor( - ipc_port_t port) + ipc_port_t port) { processor_t processor = PROCESSOR_NULL; if (IP_VALID(port)) { ip_lock(port); if (ip_active(port) && - (ip_kotype(port) == IKOT_PROCESSOR)) + (ip_kotype(port) == IKOT_PROCESSOR)) { processor = (processor_t) port->ip_kobject; + } ip_unlock(port); } @@ -360,7 +370,7 @@ convert_port_to_processor( processor_set_t convert_port_to_pset( - ipc_port_t port) + ipc_port_t port) { boolean_t r; processor_set_t pset = PROCESSOR_SET_NULL; @@ -386,7 +396,7 @@ convert_port_to_pset( processor_set_name_t convert_port_to_pset_name( - ipc_port_t port) + ipc_port_t port) { boolean_t r; processor_set_t pset = PROCESSOR_SET_NULL; @@ -407,15 +417,15 @@ ref_pset_port_locked(ipc_port_t port, boolean_t matchn, processor_set_t *ppset) pset = PROCESSOR_SET_NULL; if (ip_active(port) && - ((ip_kotype(port) == IKOT_PSET) || - (matchn && (ip_kotype(port) == IKOT_PSET_NAME)))) { + ((ip_kotype(port) == IKOT_PSET) || + (matchn && (ip_kotype(port) == IKOT_PSET_NAME)))) { pset = (processor_set_t) port->ip_kobject; } *ppset = pset; ip_unlock(port); - return (TRUE); + return TRUE; } /* @@ -429,7 +439,7 @@ ref_pset_port_locked(ipc_port_t port, boolean_t matchn, processor_set_t *ppset) ipc_port_t convert_host_to_port( - host_t host) + host_t host) { ipc_port_t port; @@ -449,12 +459,13 @@ convert_host_to_port( ipc_port_t convert_processor_to_port( - processor_t processor) + processor_t processor) { ipc_port_t port = processor->processor_self; - if (port != IP_NULL) + if (port != IP_NULL) { port = ipc_port_make_send(port); + } return port; } @@ -470,12 +481,13 @@ convert_processor_to_port( ipc_port_t convert_pset_to_port( - processor_set_t pset) + processor_set_t pset) { ipc_port_t port = pset->pset_self; - if (port != IP_NULL) + if (port != IP_NULL) { port = ipc_port_make_send(port); + } return port; } @@ -492,12 +504,13 @@ convert_pset_to_port( ipc_port_t convert_pset_name_to_port( - processor_set_name_t pset) + processor_set_name_t pset) { ipc_port_t port = pset->pset_name_self; - if (port != IP_NULL) + if (port != IP_NULL) { port = ipc_port_make_send(port); + } return port; } @@ -520,8 +533,9 @@ convert_port_to_host_security( if (IP_VALID(port)) { ip_lock(port); if (ip_active(port) && - (ip_kotype(port) == IKOT_HOST_SECURITY)) + (ip_kotype(port) == IKOT_HOST_SECURITY)) { host = (host_t) port->ip_kobject; + } ip_unlock(port); } @@ -546,19 +560,19 @@ convert_port_to_host_security( */ kern_return_t host_set_exception_ports( - host_priv_t host_priv, - exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor) + host_priv_t host_priv, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) { - int i; - ipc_port_t old_port[EXC_TYPES_COUNT]; + int i; + ipc_port_t old_port[EXC_TYPES_COUNT]; #if CONFIG_MACF struct label *deferred_labels[EXC_TYPES_COUNT]; struct label *new_label; -#endif +#endif if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_ARGUMENT; @@ -584,12 +598,14 @@ host_set_exception_ports( * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in * osfmk/mach/ARCHITECTURE/thread_status.h */ - if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) - return (KERN_INVALID_ARGUMENT); + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF - if (mac_task_check_set_host_exception_ports(current_task(), exception_mask) != 0) + if (mac_task_check_set_host_exception_ports(current_task(), exception_mask) != 0) { return KERN_NO_ACCESS; + } new_label = mac_exc_create_label_for_current_proc(); @@ -617,13 +633,13 @@ host_set_exception_ports( if ((exception_mask & (1 << i)) #if CONFIG_MACF - && mac_exc_update_action_label(&host_priv->exc_actions[i], new_label) == 0 + && mac_exc_update_action_label(&host_priv->exc_actions[i], new_label) == 0 #endif - ) { + ) { old_port[i] = host_priv->exc_actions[i].port; host_priv->exc_actions[i].port = - ipc_port_copy_send(new_port); + ipc_port_copy_send(new_port); host_priv->exc_actions[i].behavior = new_behavior; host_priv->exc_actions[i].flavor = new_flavor; } else { @@ -639,10 +655,11 @@ host_set_exception_ports( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { - if (IP_VALID(old_port[i])) + if (IP_VALID(old_port[i])) { ipc_port_release_send(old_port[i]); + } #if CONFIG_MACF if (deferred_labels[i] != NULL) { /* Deferred label went unused: Another thread has completed the lazy initialization. */ @@ -650,10 +667,11 @@ host_set_exception_ports( } #endif } - if (IP_VALID(new_port)) /* consume send right */ + if (IP_VALID(new_port)) { /* consume send right */ ipc_port_release_send(new_port); + } - return KERN_SUCCESS; + return KERN_SUCCESS; } /* @@ -676,24 +694,25 @@ host_set_exception_ports( */ kern_return_t host_get_exception_ports( - host_priv_t host_priv, + host_priv_t host_priv, exception_mask_t exception_mask, - exception_mask_array_t masks, - mach_msg_type_number_t * CountCnt, - exception_port_array_t ports, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors ) + thread_state_flavor_array_t flavors ) { - unsigned int i, j, count; + unsigned int i, j, count; - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_ARGUMENT; + } if (exception_mask & ~EXC_MASK_VALID) { return KERN_INVALID_ARGUMENT; } - assert (host_priv == &realhost); + assert(host_priv == &realhost); host_lock(host_priv); @@ -707,9 +726,8 @@ host_get_exception_ports( * set corresponding mask for this exception. */ if (host_priv->exc_actions[i].port == ports[j] && - host_priv->exc_actions[i].behavior == behaviors[j] - && host_priv->exc_actions[i].flavor == flavors[j]) - { + host_priv->exc_actions[i].behavior == behaviors[j] + && host_priv->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } @@ -717,7 +735,7 @@ host_get_exception_ports( if (j == count) { masks[j] = (1 << i); ports[j] = - ipc_port_copy_send(host_priv->exc_actions[i].port); + ipc_port_copy_send(host_priv->exc_actions[i].port); behaviors[j] = host_priv->exc_actions[i].behavior; flavors[j] = host_priv->exc_actions[i].flavor; count++; @@ -735,29 +753,30 @@ host_get_exception_ports( kern_return_t host_swap_exception_ports( - host_priv_t host_priv, - exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor, - exception_mask_array_t masks, - mach_msg_type_number_t * CountCnt, - exception_port_array_t ports, + host_priv_t host_priv, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor, + exception_mask_array_t masks, + mach_msg_type_number_t * CountCnt, + exception_port_array_t ports, exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors ) + thread_state_flavor_array_t flavors ) { - unsigned int i, - j, - count; - ipc_port_t old_port[EXC_TYPES_COUNT]; + unsigned int i, + j, + count; + ipc_port_t old_port[EXC_TYPES_COUNT]; #if CONFIG_MACF struct label *deferred_labels[EXC_TYPES_COUNT]; struct label *new_label; -#endif +#endif - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_ARGUMENT; + } if (exception_mask & ~EXC_MASK_VALID) { return KERN_INVALID_ARGUMENT; @@ -774,15 +793,17 @@ host_swap_exception_ports( } } - if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) - return (KERN_INVALID_ARGUMENT); + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF - if (mac_task_check_set_host_exception_ports(current_task(), exception_mask) != 0) + if (mac_task_check_set_host_exception_ports(current_task(), exception_mask) != 0) { return KERN_NO_ACCESS; + } new_label = mac_exc_create_label_for_current_proc(); - + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (host_priv->exc_actions[i].label == NULL) { deferred_labels[i] = mac_exc_create_label(); @@ -795,7 +816,7 @@ host_swap_exception_ports( host_lock(host_priv); assert(EXC_TYPES_COUNT > FIRST_EXCEPTION); - for (count=0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; i++) { + for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; i++) { #if CONFIG_MACF if (host_priv->exc_actions[i].label == NULL) { // Lazy initialization (see ipc_port_init). @@ -806,18 +827,17 @@ host_swap_exception_ports( if ((exception_mask & (1 << i)) #if CONFIG_MACF - && mac_exc_update_action_label(&host_priv->exc_actions[i], new_label) == 0 + && mac_exc_update_action_label(&host_priv->exc_actions[i], new_label) == 0 #endif - ) { + ) { for (j = 0; j < count; j++) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ if (host_priv->exc_actions[i].port == ports[j] && - host_priv->exc_actions[i].behavior == behaviors[j] - && host_priv->exc_actions[i].flavor == flavors[j]) - { + host_priv->exc_actions[i].behavior == behaviors[j] + && host_priv->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } @@ -825,14 +845,14 @@ host_swap_exception_ports( if (j == count) { masks[j] = (1 << i); ports[j] = - ipc_port_copy_send(host_priv->exc_actions[i].port); + ipc_port_copy_send(host_priv->exc_actions[i].port); behaviors[j] = host_priv->exc_actions[i].behavior; flavors[j] = host_priv->exc_actions[i].flavor; count++; } old_port[i] = host_priv->exc_actions[i].port; host_priv->exc_actions[i].port = - ipc_port_copy_send(new_port); + ipc_port_copy_send(new_port); host_priv->exc_actions[i].behavior = new_behavior; host_priv->exc_actions[i].flavor = new_flavor; } else { @@ -844,13 +864,14 @@ host_swap_exception_ports( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - + /* * Consume send rights without any lock held. */ while (--i >= FIRST_EXCEPTION) { - if (IP_VALID(old_port[i])) + if (IP_VALID(old_port[i])) { ipc_port_release_send(old_port[i]); + } #if CONFIG_MACF if (deferred_labels[i] != NULL) { mac_exc_free_label(deferred_labels[i]); // Label unused. @@ -858,8 +879,9 @@ host_swap_exception_ports( #endif } - if (IP_VALID(new_port)) /* consume send right */ + if (IP_VALID(new_port)) { /* consume send right */ ipc_port_release_send(new_port); + } *CountCnt = count; return KERN_SUCCESS; diff --git a/osfmk/kern/ipc_host.h b/osfmk/kern/ipc_host.h index e34d69806..6f4aafa57 100644 --- a/osfmk/kern/ipc_host.h +++ b/osfmk/kern/ipc_host.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _KERN_IPC_HOST_H_ -#define _KERN_IPC_HOST_H_ +#ifndef _KERN_IPC_HOST_H_ +#define _KERN_IPC_HOST_H_ #include #include @@ -67,43 +67,43 @@ extern void ipc_host_init(void); /* Initialize ipc access to processor by allocating a port */ extern void ipc_processor_init( - processor_t processor); + processor_t processor); /* Enable ipc control of processor by setting port object */ extern void ipc_processor_enable( - processor_t processor); + processor_t processor); /* Initialize ipc control of a processor set */ extern void ipc_pset_init( - processor_set_t pset); + processor_set_t pset); /* Enable ipc access to a processor set */ extern void ipc_pset_enable( - processor_set_t pset); + processor_set_t pset); /* Initialize ipc control of a clock */ extern void ipc_clock_init( - clock_t clock); + clock_t clock); /* Enable ipc access to a clock */ extern void ipc_clock_enable( - clock_t clock); + clock_t clock); /* Convert from a port to a clock */ extern clock_t convert_port_to_clock( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a clock control */ extern clock_t convert_port_to_clock_ctrl( - ipc_port_t port); + ipc_port_t port); /* Convert from a clock to a port */ extern ipc_port_t convert_clock_to_port( - clock_t clock); + clock_t clock); /* Convert from a clock control to a port */ extern ipc_port_t convert_clock_ctrl_to_port( - clock_t clock); + clock_t clock); /* Convert from a clock name to a clock pointer */ extern clock_t port_name_to_clock( @@ -111,47 +111,47 @@ extern clock_t port_name_to_clock( /* Convert from a port to a host */ extern host_t convert_port_to_host( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a host privilege port */ extern host_t convert_port_to_host_priv( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a host paging port */ extern host_t convert_port_to_host_paging( - ipc_port_t port); + ipc_port_t port); /* Convert from a host to a port */ extern ipc_port_t convert_host_to_port( - host_t host); + host_t host); /* Convert from a port to a processor */ extern processor_t convert_port_to_processor( - ipc_port_t port); + ipc_port_t port); /* Convert from a processor to a port */ extern ipc_port_t convert_processor_to_port( - processor_t processor); + processor_t processor); /* Convert from a port to a processor set */ extern processor_set_t convert_port_to_pset( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a processor set name */ extern processor_set_t convert_port_to_pset_name( - ipc_port_t port); + ipc_port_t port); /* Convert from a processor set to a port */ extern ipc_port_t convert_pset_to_port( - processor_set_t processor); + processor_set_t processor); /* Convert from a processor set name to a port */ extern ipc_port_t convert_pset_name_to_port( - processor_set_t processor); + processor_set_t processor); /* Convert from a port to a host security port */ extern host_t convert_port_to_host_security( - ipc_port_t port); + ipc_port_t port); -#endif /* _KERN_IPC_HOST_H_ */ +#endif /* _KERN_IPC_HOST_H_ */ diff --git a/osfmk/kern/ipc_kobject.c b/osfmk/kern/ipc_kobject.c index 2a216b2fb..2d63117cf 100644 --- a/osfmk/kern/ipc_kobject.c +++ b/osfmk/kern/ipc_kobject.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -110,16 +110,16 @@ #include -#if CONFIG_AUDIT +#if CONFIG_AUDIT #include #endif #if MACH_MACHINE_ROUTINES #include -#endif /* MACH_MACHINE_ROUTINES */ -#if XK_PROXY +#endif /* MACH_MACHINE_ROUTINES */ +#if XK_PROXY #include -#endif /* XK_PROXY */ +#endif /* XK_PROXY */ #include #include @@ -149,14 +149,14 @@ extern int proc_pid(void *p); */ boolean_t ipc_kobject_notify( - mach_msg_header_t *request_header, - mach_msg_header_t *reply_header); + mach_msg_header_t *request_header, + mach_msg_header_t *reply_header); typedef struct { - mach_msg_id_t num; - mig_routine_t routine; + mach_msg_id_t num; + mig_routine_t routine; int size; -#if MACH_COUNTERS +#if MACH_COUNTERS mach_counter_t callcount; #endif } mig_hash_t; @@ -165,7 +165,7 @@ typedef struct { #define MIG_HASH(x) (x) #ifndef max -#define max(a,b) (((a) > (b)) ? (a) : (b)) +#define max(a, b) (((a) > (b)) ? (a) : (b)) #endif /* max */ static mig_hash_t mig_buckets[MAX_MIG_ENTRIES]; @@ -175,16 +175,16 @@ static mach_msg_size_t mig_reply_size = sizeof(mig_reply_error_t); const struct mig_subsystem *mig_e[] = { - (const struct mig_subsystem *)&mach_vm_subsystem, - (const struct mig_subsystem *)&mach_port_subsystem, - (const struct mig_subsystem *)&mach_host_subsystem, - (const struct mig_subsystem *)&host_priv_subsystem, - (const struct mig_subsystem *)&host_security_subsystem, - (const struct mig_subsystem *)&clock_subsystem, - (const struct mig_subsystem *)&clock_priv_subsystem, - (const struct mig_subsystem *)&processor_subsystem, - (const struct mig_subsystem *)&processor_set_subsystem, - (const struct mig_subsystem *)&is_iokit_subsystem, + (const struct mig_subsystem *)&mach_vm_subsystem, + (const struct mig_subsystem *)&mach_port_subsystem, + (const struct mig_subsystem *)&mach_host_subsystem, + (const struct mig_subsystem *)&host_priv_subsystem, + (const struct mig_subsystem *)&host_security_subsystem, + (const struct mig_subsystem *)&clock_subsystem, + (const struct mig_subsystem *)&clock_priv_subsystem, + (const struct mig_subsystem *)&processor_subsystem, + (const struct mig_subsystem *)&processor_set_subsystem, + (const struct mig_subsystem *)&is_iokit_subsystem, (const struct mig_subsystem *)&lock_set_subsystem, (const struct mig_subsystem *)&task_subsystem, (const struct mig_subsystem *)&thread_act_subsystem, @@ -197,58 +197,60 @@ const struct mig_subsystem *mig_e[] = { (const struct mig_subsystem *)&memory_entry_subsystem, #if XK_PROXY - (const struct mig_subsystem *)&do_uproxy_xk_uproxy_subsystem, + (const struct mig_subsystem *)&do_uproxy_xk_uproxy_subsystem, #endif /* XK_PROXY */ #if MACH_MACHINE_ROUTINES - (const struct mig_subsystem *)&MACHINE_SUBSYSTEM, + (const struct mig_subsystem *)&MACHINE_SUBSYSTEM, #endif /* MACH_MACHINE_ROUTINES */ #if MCMSG && iPSC860 (const struct mig_subsystem *)&mcmsg_info_subsystem, #endif /* MCMSG && iPSC860 */ - (const struct mig_subsystem *)&catch_exc_subsystem, - (const struct mig_subsystem *)&catch_mach_exc_subsystem, - + (const struct mig_subsystem *)&catch_exc_subsystem, + (const struct mig_subsystem *)&catch_mach_exc_subsystem, }; void mig_init(void) { - unsigned int i, n = sizeof(mig_e)/sizeof(const struct mig_subsystem *); - int howmany; - mach_msg_id_t j, pos, nentry, range; - - for (i = 0; i < n; i++) { - range = mig_e[i]->end - mig_e[i]->start; - if (!mig_e[i]->start || range < 0) - panic("the msgh_ids in mig_e[] aren't valid!"); - - for (j = 0; j < range; j++) { - if (mig_e[i]->routine[j].stub_routine) { - /* Only put real entries in the table */ - nentry = j + mig_e[i]->start; - for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1; - mig_buckets[pos].num; - pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) { - if (mig_buckets[pos].num == nentry) { - printf("message id = %d\n", nentry); - panic("multiple entries with the same msgh_id"); - } - if (howmany == MAX_MIG_ENTRIES) - panic("the mig dispatch table is too small"); - } - - mig_buckets[pos].num = nentry; - mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine; - if (mig_e[i]->routine[j].max_reply_msg) - mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg; - else - mig_buckets[pos].size = mig_e[i]->maxsize; - - mig_table_max_displ = max(howmany, mig_table_max_displ); - } + unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_subsystem *); + int howmany; + mach_msg_id_t j, pos, nentry, range; + + for (i = 0; i < n; i++) { + range = mig_e[i]->end - mig_e[i]->start; + if (!mig_e[i]->start || range < 0) { + panic("the msgh_ids in mig_e[] aren't valid!"); + } + + for (j = 0; j < range; j++) { + if (mig_e[i]->routine[j].stub_routine) { + /* Only put real entries in the table */ + nentry = j + mig_e[i]->start; + for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1; + mig_buckets[pos].num; + pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) { + if (mig_buckets[pos].num == nentry) { + printf("message id = %d\n", nentry); + panic("multiple entries with the same msgh_id"); + } + if (howmany == MAX_MIG_ENTRIES) { + panic("the mig dispatch table is too small"); + } + } + + mig_buckets[pos].num = nentry; + mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine; + if (mig_e[i]->routine[j].max_reply_msg) { + mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg; + } else { + mig_buckets[pos].size = mig_e[i]->maxsize; + } + + mig_table_max_displ = max(howmany, mig_table_max_displ); + } + } } - } - printf("mig_table_max_displ = %d\n", mig_table_max_displ); + printf("mig_table_max_displ = %d\n", mig_table_max_displ); } @@ -264,7 +266,7 @@ mig_init(void) ipc_kmsg_t ipc_kobject_server( - ipc_kmsg_t request, + ipc_kmsg_t request, mach_msg_option_t __unused option) { mach_msg_size_t reply_size; @@ -283,26 +285,26 @@ ipc_kobject_server( * Find out corresponding mig_hash entry if any */ { - unsigned int i = (unsigned int)MIG_HASH(request_msgh_id); - int max_iter = mig_table_max_displ; - - do { - ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES]; - } while (request_msgh_id != ptr->num && ptr->num && --max_iter); - - if (!ptr->routine || request_msgh_id != ptr->num) { - ptr = (mig_hash_t *)0; - reply_size = mig_reply_size; - } else { - reply_size = ptr->size; -#if MACH_COUNTER - ptr->callcount++; + unsigned int i = (unsigned int)MIG_HASH(request_msgh_id); + int max_iter = mig_table_max_displ; + + do { + ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES]; + } while (request_msgh_id != ptr->num && ptr->num && --max_iter); + + if (!ptr->routine || request_msgh_id != ptr->num) { + ptr = (mig_hash_t *)0; + reply_size = mig_reply_size; + } else { + reply_size = ptr->size; +#if MACH_COUNTER + ptr->callcount++; #endif - } + } } /* round up for trailer size */ - reply_size += MAX_TRAILER_SIZE; + reply_size += MAX_TRAILER_SIZE; reply = ipc_kmsg_alloc(reply_size); if (reply == IKM_NULL) { @@ -316,28 +318,28 @@ ipc_kobject_server( * Initialize reply message. */ { -#define InP ((mach_msg_header_t *) request->ikm_header) -#define OutP ((mig_reply_error_t *) reply->ikm_header) - - /* - * MIG should really assure no data leakage - - * but until it does, pessimistically zero the - * whole reply buffer. - */ - bzero((void *)OutP, reply_size); - - OutP->NDR = NDR_record; - OutP->Head.msgh_size = sizeof(mig_reply_error_t); - - OutP->Head.msgh_bits = - MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0); - OutP->Head.msgh_remote_port = InP->msgh_local_port; - OutP->Head.msgh_local_port = MACH_PORT_NULL; - OutP->Head.msgh_voucher_port = MACH_PORT_NULL; - OutP->Head.msgh_id = InP->msgh_id + 100; - -#undef InP -#undef OutP +#define InP ((mach_msg_header_t *) request->ikm_header) +#define OutP ((mig_reply_error_t *) reply->ikm_header) + + /* + * MIG should really assure no data leakage - + * but until it does, pessimistically zero the + * whole reply buffer. + */ + bzero((void *)OutP, reply_size); + + OutP->NDR = NDR_record; + OutP->Head.msgh_size = sizeof(mig_reply_error_t); + + OutP->Head.msgh_bits = + MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0); + OutP->Head.msgh_remote_port = InP->msgh_local_port; + OutP->Head.msgh_local_port = MACH_PORT_NULL; + OutP->Head.msgh_voucher_port = MACH_PORT_NULL; + OutP->Head.msgh_id = InP->msgh_id + 100; + +#undef InP +#undef OutP } /* @@ -346,50 +348,49 @@ ipc_kobject_server( */ ipc_kmsg_trace_send(request, option); { - if (ptr) { - /* - * Check if the port is a task port, if its a task port then - * snapshot the task exec token before the mig routine call. - */ - ipc_port_t port = request->ikm_header->msgh_remote_port; - if (IP_VALID(port) && ip_kotype(port) == IKOT_TASK) { - task = convert_port_to_task_with_exec_token(port, &exec_token); - } + if (ptr) { + /* + * Check if the port is a task port, if its a task port then + * snapshot the task exec token before the mig routine call. + */ + ipc_port_t port = request->ikm_header->msgh_remote_port; + if (IP_VALID(port) && ip_kotype(port) == IKOT_TASK) { + task = convert_port_to_task_with_exec_token(port, &exec_token); + } - (*ptr->routine)(request->ikm_header, reply->ikm_header); + (*ptr->routine)(request->ikm_header, reply->ikm_header); - /* Check if the exec token changed during the mig routine */ - if (task != TASK_NULL) { - if (exec_token != task->exec_token) { - exec_token_changed = TRUE; + /* Check if the exec token changed during the mig routine */ + if (task != TASK_NULL) { + if (exec_token != task->exec_token) { + exec_token_changed = TRUE; + } + task_deallocate(task); } - task_deallocate(task); - } - kernel_task->messages_received++; - } - else { - if (!ipc_kobject_notify(request->ikm_header, reply->ikm_header)){ + kernel_task->messages_received++; + } else { + if (!ipc_kobject_notify(request->ikm_header, reply->ikm_header)) { #if DEVELOPMENT || DEBUG - printf("ipc_kobject_server: bogus kernel message, id=%d\n", - request->ikm_header->msgh_id); -#endif /* DEVELOPMENT || DEBUG */ - _MIG_MSGID_INVALID(request->ikm_header->msgh_id); - - ((mig_reply_error_t *) reply->ikm_header)->RetCode - = MIG_BAD_ID; + printf("ipc_kobject_server: bogus kernel message, id=%d\n", + request->ikm_header->msgh_id); +#endif /* DEVELOPMENT || DEBUG */ + _MIG_MSGID_INVALID(request->ikm_header->msgh_id); + + ((mig_reply_error_t *) reply->ikm_header)->RetCode + = MIG_BAD_ID; + } else { + kernel_task->messages_received++; + } } - else - kernel_task->messages_received++; - } - kernel_task->messages_sent++; + kernel_task->messages_sent++; } /* * Destroy destination. The following code differs from * ipc_object_destroy in that we release the send-once * right instead of generating a send-once notification - * (which would bring us here again, creating a loop). + * (which would bring us here again, creating a loop). * It also differs in that we only expect send or * send-once rights, never receive rights. * @@ -398,16 +399,16 @@ ipc_kobject_server( */ destp = (ipc_port_t *) &request->ikm_header->msgh_remote_port; switch (MACH_MSGH_BITS_REMOTE(request->ikm_header->msgh_bits)) { - case MACH_MSG_TYPE_PORT_SEND: - ipc_port_release_send(*destp); - break; - - case MACH_MSG_TYPE_PORT_SEND_ONCE: - ipc_port_release_sonce(*destp); - break; - - default: - panic("ipc_kobject_server: strange destination rights"); + case MACH_MSG_TYPE_PORT_SEND: + ipc_port_release_send(*destp); + break; + + case MACH_MSG_TYPE_PORT_SEND_ONCE: + ipc_port_release_sonce(*destp); + break; + + default: + panic("ipc_kobject_server: strange destination rights"); } *destp = IP_NULL; @@ -417,16 +418,17 @@ ipc_kobject_server( */ if (IP_VALID(request->ikm_voucher)) { assert(MACH_MSG_TYPE_PORT_SEND == - MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits)); + MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits)); ipc_port_release_send(request->ikm_voucher); request->ikm_voucher = IP_NULL; } - if (!(reply->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) && - ((mig_reply_error_t *) reply->ikm_header)->RetCode != KERN_SUCCESS) - kr = ((mig_reply_error_t *) reply->ikm_header)->RetCode; - else + if (!(reply->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) && + ((mig_reply_error_t *) reply->ikm_header)->RetCode != KERN_SUCCESS) { + kr = ((mig_reply_error_t *) reply->ikm_header)->RetCode; + } else { kr = KERN_SUCCESS; + } if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) { /* @@ -437,7 +439,6 @@ ipc_kobject_server( * to free the kmsg. */ ipc_kmsg_free(request); - } else { /* * The message contents of the request are intact. @@ -474,8 +475,8 @@ ipc_kobject_server( */ #if DEVELOPMENT || DEBUG printf("%s: refusing to send reply to kobject %d port (id:%d)\n", - __func__, ip_kotype(replyp), request_msgh_id); -#endif /* DEVELOPMENT || DEBUG */ + __func__, ip_kotype(replyp), request_msgh_id); +#endif /* DEVELOPMENT || DEBUG */ ipc_kmsg_destroy(reply); return IKM_NULL; } @@ -496,23 +497,23 @@ ipc_kobject_server( * Initialize the new reply message. */ { -#define OutP_new ((mig_reply_error_t *) new_reply->ikm_header) -#define OutP_old ((mig_reply_error_t *) reply->ikm_header) +#define OutP_new ((mig_reply_error_t *) new_reply->ikm_header) +#define OutP_old ((mig_reply_error_t *) reply->ikm_header) - bzero((void *)OutP_new, reply_size); + bzero((void *)OutP_new, reply_size); - OutP_new->NDR = OutP_old->NDR; - OutP_new->Head.msgh_size = sizeof(mig_reply_error_t); - OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX; - OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port; - OutP_new->Head.msgh_local_port = MACH_PORT_NULL; - OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL; - OutP_new->Head.msgh_id = OutP_old->Head.msgh_id; + OutP_new->NDR = OutP_old->NDR; + OutP_new->Head.msgh_size = sizeof(mig_reply_error_t); + OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX; + OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port; + OutP_new->Head.msgh_local_port = MACH_PORT_NULL; + OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL; + OutP_new->Head.msgh_id = OutP_old->Head.msgh_id; - /* Set the error as KERN_INVALID_TASK */ - OutP_new->RetCode = KERN_INVALID_TASK; + /* Set the error as KERN_INVALID_TASK */ + OutP_new->RetCode = KERN_INVALID_TASK; -#undef OutP_new +#undef OutP_new #undef OutP_old } @@ -526,12 +527,12 @@ ipc_kobject_server( reply = new_reply; } - trailer = (mach_msg_format_0_trailer_t *) - ((vm_offset_t)reply->ikm_header + (int)reply->ikm_header->msgh_size); + trailer = (mach_msg_format_0_trailer_t *) + ((vm_offset_t)reply->ikm_header + (int)reply->ikm_header->msgh_size); - trailer->msgh_sender = KERNEL_SECURITY_TOKEN; - trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; - trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; + trailer->msgh_sender = KERNEL_SECURITY_TOKEN; + trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0; + trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; return reply; } @@ -549,9 +550,9 @@ ipc_kobject_server( */ void ipc_kobject_set( - ipc_port_t port, - ipc_kobject_t kobject, - ipc_kobject_type_t type) + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type) { ip_lock(port); ipc_kobject_set_atomically(port, kobject, type); @@ -560,15 +561,15 @@ ipc_kobject_set( void ipc_kobject_set_atomically( - ipc_port_t port, - ipc_kobject_t kobject, - ipc_kobject_type_t type) + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type) { assert(type == IKOT_NONE || ip_active(port)); -#if MACH_ASSERT +#if MACH_ASSERT port->ip_spares[2] = (port->ip_bits & IO_BITS_KOTYPE); -#endif /* MACH_ASSERT */ - port->ip_bits = (port->ip_bits &~ IO_BITS_KOTYPE) | type; +#endif /* MACH_ASSERT */ + port->ip_bits = (port->ip_bits & ~IO_BITS_KOTYPE) | type; port->ip_kobject = kobject; } @@ -588,10 +589,9 @@ ipc_kobject_set_atomically( void ipc_kobject_destroy( - ipc_port_t port) + ipc_port_t port) { switch (ip_kotype(port)) { - case IKOT_TIMER: mk_timer_port_destroy(port); break; @@ -621,7 +621,7 @@ ipc_kobject_notify( ((mig_reply_error_t *) reply_header)->RetCode = MIG_NO_REPLY; trailer = (mach_msg_max_trailer_t *) - ((vm_offset_t)request_header + request_header->msgh_size); + ((vm_offset_t)request_header + request_header->msgh_size); /* * The kobject notification is privileged and can change the @@ -629,95 +629,93 @@ ipc_kobject_notify( * that the message wasn't faked! */ if (0 != bcmp(&trailer->msgh_audit, &KERNEL_AUDIT_TOKEN, - sizeof(trailer->msgh_audit))) { + sizeof(trailer->msgh_audit))) { return FALSE; } if (0 != bcmp(&trailer->msgh_sender, &KERNEL_SECURITY_TOKEN, - sizeof(trailer->msgh_sender))) { + sizeof(trailer->msgh_sender))) { return FALSE; } switch (request_header->msgh_id) { - case MACH_NOTIFY_NO_SENDERS: - switch (ip_kotype(port)) { - case IKOT_VOUCHER: - ipc_voucher_notify(request_header); - return TRUE; - - case IKOT_VOUCHER_ATTR_CONTROL: - ipc_voucher_attr_control_notify(request_header); - return TRUE; - - case IKOT_SEMAPHORE: - semaphore_notify(request_header); - return TRUE; - - case IKOT_TASK: - task_port_notify(request_header); - return TRUE; - - case IKOT_NAMED_ENTRY: - ip_lock(port); - - /* - * Bring the sequence number and mscount in - * line with ipc_port_destroy assertion. - */ - port->ip_mscount = 0; - port->ip_messages.imq_seqno = 0; - ipc_port_destroy(port); /* releases lock */ - return TRUE; - - case IKOT_UPL: - upl_no_senders( - request_header->msgh_remote_port, - (mach_port_mscount_t) - ((mach_no_senders_notification_t *) - request_header)->not_count); - reply_header->msgh_remote_port = MACH_PORT_NULL; - return TRUE; - -#if CONFIG_AUDIT - case IKOT_AU_SESSIONPORT: - audit_session_nosenders(request_header); - return TRUE; + case MACH_NOTIFY_NO_SENDERS: + switch (ip_kotype(port)) { + case IKOT_VOUCHER: + ipc_voucher_notify(request_header); + return TRUE; + + case IKOT_VOUCHER_ATTR_CONTROL: + ipc_voucher_attr_control_notify(request_header); + return TRUE; + + case IKOT_SEMAPHORE: + semaphore_notify(request_header); + return TRUE; + + case IKOT_TASK: + task_port_notify(request_header); + return TRUE; + + case IKOT_NAMED_ENTRY: + ip_lock(port); + + /* + * Bring the sequence number and mscount in + * line with ipc_port_destroy assertion. + */ + port->ip_mscount = 0; + port->ip_messages.imq_seqno = 0; + ipc_port_destroy(port); /* releases lock */ + return TRUE; + + case IKOT_UPL: + upl_no_senders( + request_header->msgh_remote_port, + (mach_port_mscount_t) + ((mach_no_senders_notification_t *) + request_header)->not_count); + reply_header->msgh_remote_port = MACH_PORT_NULL; + return TRUE; + +#if CONFIG_AUDIT + case IKOT_AU_SESSIONPORT: + audit_session_nosenders(request_header); + return TRUE; #endif - case IKOT_FILEPORT: - fileport_notify(request_header); - return TRUE; - - case IKOT_WORK_INTERVAL: - work_interval_port_notify(request_header); - return TRUE; + case IKOT_FILEPORT: + fileport_notify(request_header); + return TRUE; - } + case IKOT_WORK_INTERVAL: + work_interval_port_notify(request_header); + return TRUE; + } break; - case MACH_NOTIFY_PORT_DELETED: - case MACH_NOTIFY_PORT_DESTROYED: - case MACH_NOTIFY_SEND_ONCE: - case MACH_NOTIFY_DEAD_NAME: + case MACH_NOTIFY_PORT_DELETED: + case MACH_NOTIFY_PORT_DESTROYED: + case MACH_NOTIFY_SEND_ONCE: + case MACH_NOTIFY_DEAD_NAME: break; - default: + default: return FALSE; } switch (ip_kotype(port)) { - #ifdef IOKIT - case IKOT_IOKIT_OBJECT: - case IKOT_IOKIT_CONNECT: - case IKOT_IOKIT_IDENT: - { - return iokit_notify(request_header); - } + case IKOT_IOKIT_OBJECT: + case IKOT_IOKIT_CONNECT: + case IKOT_IOKIT_IDENT: + { + return iokit_notify(request_header); + } #endif - case IKOT_TASK_RESUME: - { - return task_suspension_notify(request_header); - } + case IKOT_TASK_RESUME: + { + return task_suspension_notify(request_header); + } - default: - return FALSE; - } + default: + return FALSE; + } } diff --git a/osfmk/kern/ipc_kobject.h b/osfmk/kern/ipc_kobject.h index 28db4e47d..95f150776 100644 --- a/osfmk/kern/ipc_kobject.h +++ b/osfmk/kern/ipc_kobject.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -74,7 +74,7 @@ #include #endif /* MACH_KERNEL_PRIVATE */ -#ifndef _KERN_IPC_KOBJECT_H_ +#ifndef _KERN_IPC_KOBJECT_H_ #define _KERN_IPC_KOBJECT_H_ #ifdef KERNEL_PRIVATE @@ -87,47 +87,47 @@ #include #include -typedef natural_t ipc_kobject_type_t; - -#define IKOT_NONE 0 -#define IKOT_THREAD 1 -#define IKOT_TASK 2 -#define IKOT_HOST 3 -#define IKOT_HOST_PRIV 4 -#define IKOT_PROCESSOR 5 -#define IKOT_PSET 6 -#define IKOT_PSET_NAME 7 -#define IKOT_TIMER 8 -#define IKOT_PAGING_REQUEST 9 -#define IKOT_MIG 10 -#define IKOT_MEMORY_OBJECT 11 -#define IKOT_XMM_PAGER 12 -#define IKOT_XMM_KERNEL 13 -#define IKOT_XMM_REPLY 14 -#define IKOT_UND_REPLY 15 -#define IKOT_HOST_NOTIFY 16 -#define IKOT_HOST_SECURITY 17 -#define IKOT_LEDGER 18 -#define IKOT_MASTER_DEVICE 19 -#define IKOT_TASK_NAME 20 -#define IKOT_SUBSYSTEM 21 -#define IKOT_IO_DONE_QUEUE 22 -#define IKOT_SEMAPHORE 23 -#define IKOT_LOCK_SET 24 -#define IKOT_CLOCK 25 -#define IKOT_CLOCK_CTRL 26 -#define IKOT_IOKIT_IDENT 27 -#define IKOT_NAMED_ENTRY 28 -#define IKOT_IOKIT_CONNECT 29 -#define IKOT_IOKIT_OBJECT 30 -#define IKOT_UPL 31 -#define IKOT_MEM_OBJ_CONTROL 32 -#define IKOT_AU_SESSIONPORT 33 -#define IKOT_FILEPORT 34 -#define IKOT_LABELH 35 -#define IKOT_TASK_RESUME 36 -#define IKOT_VOUCHER 37 -#define IKOT_VOUCHER_ATTR_CONTROL 38 +typedef natural_t ipc_kobject_type_t; + +#define IKOT_NONE 0 +#define IKOT_THREAD 1 +#define IKOT_TASK 2 +#define IKOT_HOST 3 +#define IKOT_HOST_PRIV 4 +#define IKOT_PROCESSOR 5 +#define IKOT_PSET 6 +#define IKOT_PSET_NAME 7 +#define IKOT_TIMER 8 +#define IKOT_PAGING_REQUEST 9 +#define IKOT_MIG 10 +#define IKOT_MEMORY_OBJECT 11 +#define IKOT_XMM_PAGER 12 +#define IKOT_XMM_KERNEL 13 +#define IKOT_XMM_REPLY 14 +#define IKOT_UND_REPLY 15 +#define IKOT_HOST_NOTIFY 16 +#define IKOT_HOST_SECURITY 17 +#define IKOT_LEDGER 18 +#define IKOT_MASTER_DEVICE 19 +#define IKOT_TASK_NAME 20 +#define IKOT_SUBSYSTEM 21 +#define IKOT_IO_DONE_QUEUE 22 +#define IKOT_SEMAPHORE 23 +#define IKOT_LOCK_SET 24 +#define IKOT_CLOCK 25 +#define IKOT_CLOCK_CTRL 26 +#define IKOT_IOKIT_IDENT 27 +#define IKOT_NAMED_ENTRY 28 +#define IKOT_IOKIT_CONNECT 29 +#define IKOT_IOKIT_OBJECT 30 +#define IKOT_UPL 31 +#define IKOT_MEM_OBJ_CONTROL 32 +#define IKOT_AU_SESSIONPORT 33 +#define IKOT_FILEPORT 34 +#define IKOT_LABELH 35 +#define IKOT_TASK_RESUME 36 +#define IKOT_VOUCHER 37 +#define IKOT_VOUCHER_ATTR_CONTROL 38 #define IKOT_WORK_INTERVAL 39 #define IKOT_UX_HANDLER 40 @@ -136,10 +136,10 @@ typedef natural_t ipc_kobject_type_t; * Please keep ipc/ipc_object.c:ikot_print_array up to date. */ #define IKOT_UNKNOWN 41 /* magic catchall */ -#define IKOT_MAX_TYPE (IKOT_UNKNOWN+1) /* # of IKOT_ types */ +#define IKOT_MAX_TYPE (IKOT_UNKNOWN+1) /* # of IKOT_ types */ -#define is_ipc_kobject(ikot) ((ikot) != IKOT_NONE) +#define is_ipc_kobject(ikot) ((ikot) != IKOT_NONE) #ifdef MACH_KERNEL_PRIVATE @@ -149,30 +149,29 @@ typedef natural_t ipc_kobject_type_t; */ /* Dispatch a kernel server function */ -extern ipc_kmsg_t ipc_kobject_server( - ipc_kmsg_t request, - mach_msg_option_t option); +extern ipc_kmsg_t ipc_kobject_server( + ipc_kmsg_t request, + mach_msg_option_t option); /* Make a port represent a kernel object of the given type */ -extern void ipc_kobject_set( - ipc_port_t port, - ipc_kobject_t kobject, - ipc_kobject_type_t type); +extern void ipc_kobject_set( + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type); -extern void ipc_kobject_set_atomically( - ipc_port_t port, - ipc_kobject_t kobject, - ipc_kobject_type_t type); +extern void ipc_kobject_set_atomically( + ipc_port_t port, + ipc_kobject_t kobject, + ipc_kobject_type_t type); /* Release any kernel object resources associated with a port */ -extern void ipc_kobject_destroy( - ipc_port_t port); +extern void ipc_kobject_destroy( + ipc_port_t port); -#define null_conversion(port) (port) +#define null_conversion(port) (port) #endif /* MACH_KERNEL_PRIVATE */ #endif /* KERNEL_PRIVATE */ #endif /* _KERN_IPC_KOBJECT_H_ */ - diff --git a/osfmk/kern/ipc_mig.c b/osfmk/kern/ipc_mig.c index 818854bf6..4770d8b87 100644 --- a/osfmk/kern/ipc_mig.c +++ b/osfmk/kern/ipc_mig.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -108,31 +108,31 @@ mach_msg_receive_results_complete(ipc_object_t object); #undef mach_msg_send_from_kernel mach_msg_return_t mach_msg_send_from_kernel( - mach_msg_header_t *msg, - mach_msg_size_t send_size); + mach_msg_header_t *msg, + mach_msg_size_t send_size); mach_msg_return_t mach_msg_send_from_kernel( - mach_msg_header_t *msg, - mach_msg_size_t send_size) + mach_msg_header_t *msg, + mach_msg_size_t send_size) { ipc_kmsg_t kmsg; mach_msg_return_t mr; - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); if (mr != MACH_MSG_SUCCESS) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } mr = ipc_kmsg_copyin_from_kernel_legacy(kmsg); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; - } + } /* * respect the thread's SEND_IMPORTANCE option to allow importance @@ -140,13 +140,14 @@ mach_msg_send_from_kernel( * (11938665 & 23925818) */ mach_msg_option_t option = MACH_SEND_KERNEL_DEFAULT; - if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) + if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) { option &= ~MACH_SEND_NOIMPORTANCE; + } mr = ipc_kmsg_send(kmsg, option, MACH_MSG_TIMEOUT_NONE); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); } return mr; @@ -156,24 +157,24 @@ mach_msg_send_from_kernel( mach_msg_return_t mach_msg_send_from_kernel_proper( - mach_msg_header_t *msg, - mach_msg_size_t send_size) + mach_msg_header_t *msg, + mach_msg_size_t send_size) { ipc_kmsg_t kmsg; mach_msg_return_t mr; - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); if (mr != MACH_MSG_SUCCESS) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } mr = ipc_kmsg_copyin_from_kernel(kmsg); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } @@ -183,13 +184,14 @@ mach_msg_send_from_kernel_proper( * (11938665 & 23925818) */ mach_msg_option_t option = MACH_SEND_KERNEL_DEFAULT; - if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) + if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) { option &= ~MACH_SEND_NOIMPORTANCE; + } mr = ipc_kmsg_send(kmsg, option, MACH_MSG_TIMEOUT_NONE); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); } return mr; @@ -197,26 +199,26 @@ mach_msg_send_from_kernel_proper( mach_msg_return_t mach_msg_send_from_kernel_with_options( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_option_t option, - mach_msg_timeout_t timeout_val) + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_option_t option, + mach_msg_timeout_t timeout_val) { ipc_kmsg_t kmsg; mach_msg_return_t mr; - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); if (mr != MACH_MSG_SUCCESS) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } mr = ipc_kmsg_copyin_from_kernel(kmsg); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } @@ -228,18 +230,19 @@ mach_msg_send_from_kernel_with_options( * or the thread's SEND_IMPORTANCE option has been set. * (11938665 & 23925818) */ - if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) + if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) { option &= ~MACH_SEND_NOIMPORTANCE; - else if ((option & MACH_SEND_IMPORTANCE) == 0) + } else if ((option & MACH_SEND_IMPORTANCE) == 0) { option |= MACH_SEND_NOIMPORTANCE; + } mr = ipc_kmsg_send(kmsg, option, timeout_val); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); } - + return mr; } @@ -248,26 +251,26 @@ mach_msg_send_from_kernel_with_options( mach_msg_return_t mach_msg_send_from_kernel_with_options_legacy( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_option_t option, - mach_msg_timeout_t timeout_val) + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_option_t option, + mach_msg_timeout_t timeout_val) { ipc_kmsg_t kmsg; mach_msg_return_t mr; - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); if (mr != MACH_MSG_SUCCESS) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } mr = ipc_kmsg_copyin_from_kernel_legacy(kmsg); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } @@ -277,18 +280,19 @@ mach_msg_send_from_kernel_with_options_legacy( * threads in importance-donating tasks. * (11938665 & 23925818) */ - if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) + if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) { option &= ~MACH_SEND_NOIMPORTANCE; - else + } else { option |= MACH_SEND_NOIMPORTANCE; + } mr = ipc_kmsg_send(kmsg, option, timeout_val); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); } - + return mr; } @@ -309,47 +313,47 @@ mach_msg_send_from_kernel_with_options_legacy( * MACH_RCV_PORT_DIED The reply port was deallocated. */ -mach_msg_return_t mach_msg_rpc_from_kernel_body(mach_msg_header_t *msg, - mach_msg_size_t send_size, mach_msg_size_t rcv_size, boolean_t legacy); +mach_msg_return_t mach_msg_rpc_from_kernel_body(mach_msg_header_t *msg, + mach_msg_size_t send_size, mach_msg_size_t rcv_size, boolean_t legacy); #if IKM_SUPPORT_LEGACY #undef mach_msg_rpc_from_kernel mach_msg_return_t mach_msg_rpc_from_kernel( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size); + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size); mach_msg_return_t mach_msg_rpc_from_kernel( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size) + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size) { - return mach_msg_rpc_from_kernel_body(msg, send_size, rcv_size, TRUE); + return mach_msg_rpc_from_kernel_body(msg, send_size, rcv_size, TRUE); } #endif /* IKM_SUPPORT_LEGACY */ mach_msg_return_t mach_msg_rpc_from_kernel_proper( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size) + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size) { - return mach_msg_rpc_from_kernel_body(msg, send_size, rcv_size, FALSE); + return mach_msg_rpc_from_kernel_body(msg, send_size, rcv_size, FALSE); } mach_msg_return_t mach_msg_rpc_from_kernel_body( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size, + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, #if !IKM_SUPPORT_LEGACY __unused #endif - boolean_t legacy) + boolean_t legacy) { thread_t self = current_thread(); ipc_port_t reply; @@ -359,11 +363,11 @@ mach_msg_rpc_from_kernel_body( assert(msg->msgh_local_port == MACH_PORT_NULL); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); mr = ipc_kmsg_get_from_kernel(msg, send_size, &kmsg); if (mr != MACH_MSG_SUCCESS) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } @@ -371,29 +375,31 @@ mach_msg_rpc_from_kernel_body( if (reply == IP_NULL) { reply = ipc_port_alloc_reply(); if ((reply == IP_NULL) || - (self->ith_rpc_reply != IP_NULL)) + (self->ith_rpc_reply != IP_NULL)) { panic("mach_msg_rpc_from_kernel"); + } self->ith_rpc_reply = reply; } /* insert send-once right for the reply port */ kmsg->ikm_header->msgh_local_port = reply; kmsg->ikm_header->msgh_bits |= - MACH_MSGH_BITS(0, MACH_MSG_TYPE_MAKE_SEND_ONCE); + MACH_MSGH_BITS(0, MACH_MSG_TYPE_MAKE_SEND_ONCE); #if IKM_SUPPORT_LEGACY - if(legacy) - mr = ipc_kmsg_copyin_from_kernel_legacy(kmsg); - else - mr = ipc_kmsg_copyin_from_kernel(kmsg); + if (legacy) { + mr = ipc_kmsg_copyin_from_kernel_legacy(kmsg); + } else { + mr = ipc_kmsg_copyin_from_kernel(kmsg); + } #else - mr = ipc_kmsg_copyin_from_kernel(kmsg); + mr = ipc_kmsg_copyin_from_kernel(kmsg); #endif - if (mr != MACH_MSG_SUCCESS) { - ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); - return mr; - } + if (mr != MACH_MSG_SUCCESS) { + ipc_kmsg_free(kmsg); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + return mr; + } /* * respect the thread's SEND_IMPORTANCE option to force importance @@ -401,13 +407,14 @@ mach_msg_rpc_from_kernel_body( * (11938665 & 23925818) */ mach_msg_option_t option = MACH_SEND_KERNEL_DEFAULT; - if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) + if (current_thread()->options & TH_OPT_SEND_IMPORTANCE) { option &= ~MACH_SEND_NOIMPORTANCE; + } mr = ipc_kmsg_send(kmsg, option, MACH_MSG_TIMEOUT_NONE); if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_destroy(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } @@ -429,10 +436,10 @@ mach_msg_rpc_from_kernel_body( mqueue = &reply->ip_messages; ipc_mqueue_receive(mqueue, - MACH_MSG_OPTION_NONE, - MACH_MSG_SIZE_MAX, - MACH_MSG_TIMEOUT_NONE, - THREAD_INTERRUPTIBLE); + MACH_MSG_OPTION_NONE, + MACH_MSG_SIZE_MAX, + MACH_MSG_TIMEOUT_NONE, + THREAD_INTERRUPTIBLE); mr = self->ith_state; kmsg = self->ith_kmsg; @@ -441,10 +448,9 @@ mach_msg_rpc_from_kernel_body( __IGNORE_WCASTALIGN(object = (ipc_object_t) reply); mach_msg_receive_results_complete(object); - if (mr == MACH_MSG_SUCCESS) - { + if (mr == MACH_MSG_SUCCESS) { break; - } + } assert(mr == MACH_RCV_INTERRUPTED); @@ -453,25 +459,24 @@ mach_msg_rpc_from_kernel_body( if (self->ast & AST_APC) { ipc_port_dealloc_reply(reply); self->ith_rpc_reply = IP_NULL; - return(mr); + return mr; } } - /* + /* * Check to see how much of the message/trailer can be received. * We chose the maximum trailer that will fit, since we don't * have options telling us which trailer elements the caller needed. */ if (rcv_size >= kmsg->ikm_header->msgh_size) { mach_msg_format_0_trailer_t *trailer = (mach_msg_format_0_trailer_t *) - ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size); + ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size); if (rcv_size >= kmsg->ikm_header->msgh_size + MAX_TRAILER_SIZE) { /* Enough room for a maximum trailer */ trailer->msgh_trailer_size = MAX_TRAILER_SIZE; - } - else if (rcv_size < kmsg->ikm_header->msgh_size + - trailer->msgh_trailer_size) { + } else if (rcv_size < kmsg->ikm_header->msgh_size + + trailer->msgh_trailer_size) { /* no room for even the basic (default) trailer */ trailer->msgh_trailer_size = 0; } @@ -489,12 +494,13 @@ mach_msg_rpc_from_kernel_body( * as they are. */ #if IKM_SUPPORT_LEGACY - if(legacy) - ipc_kmsg_copyout_to_kernel_legacy(kmsg, ipc_space_reply); - else - ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply); + if (legacy) { + ipc_kmsg_copyout_to_kernel_legacy(kmsg, ipc_space_reply); + } else { + ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply); + } #else - ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply); + ipc_kmsg_copyout_to_kernel(kmsg, ipc_space_reply); #endif ipc_kmsg_put_to_kernel(msg, kmsg, rcv_size); return mr; @@ -511,7 +517,7 @@ mach_msg_rpc_from_kernel_body( void mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) { - mach_msg_bits_t mbits = msg->msgh_bits; + mach_msg_bits_t mbits = msg->msgh_bits; ipc_object_t object; object = (ipc_object_t) msg->msgh_remote_port; @@ -526,7 +532,7 @@ mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) */ /* MIG kernel users don't receive vouchers */ - assert(!MACH_PORT_VALID(msg->msgh_voucher_port)); + assert(!MACH_MSGH_BITS_VOUCHER(mbits)); /* For simple messages, we're done */ if ((mbits & MACH_MSGH_BITS_COMPLEX) == 0) { @@ -538,9 +544,8 @@ mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) mach_msg_descriptor_t *daddr = (mach_msg_descriptor_t *)(body + 1); mach_msg_size_t i; - for (i = 0 ; i < body->msgh_descriptor_count; i++, daddr++ ) { + for (i = 0; i < body->msgh_descriptor_count; i++, daddr++) { switch (daddr->type.type) { - case MACH_MSG_PORT_DESCRIPTOR: { mach_msg_port_descriptor_t *dsc = &daddr->port; if (IO_VALID((ipc_object_t) dsc->name)) { @@ -549,7 +554,7 @@ mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) break; } case MACH_MSG_OOL_VOLATILE_DESCRIPTOR: - case MACH_MSG_OOL_DESCRIPTOR : { + case MACH_MSG_OOL_DESCRIPTOR: { mach_msg_ool_descriptor_t *dsc = (mach_msg_ool_descriptor_t *)&daddr->out_of_line; @@ -560,12 +565,12 @@ mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) } break; } - case MACH_MSG_OOL_PORTS_DESCRIPTOR : { - ipc_object_t *objects; - mach_msg_type_number_t j; - mach_msg_ool_ports_descriptor_t *dsc; + case MACH_MSG_OOL_PORTS_DESCRIPTOR: { + ipc_object_t *objects; + mach_msg_type_number_t j; + mach_msg_ool_ports_descriptor_t *dsc; - dsc = (mach_msg_ool_ports_descriptor_t *)&daddr->ool_ports; + dsc = (mach_msg_ool_ports_descriptor_t *)&daddr->ool_ports; objects = (ipc_object_t *) dsc->address; if (dsc->count == 0) { @@ -581,7 +586,7 @@ mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) kfree(dsc->address, (vm_size_t) dsc->count * sizeof(mach_port_t)); break; } - default : + default: break; } } @@ -605,15 +610,15 @@ mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg) mach_msg_return_t mach_msg_overwrite( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - __unused mach_msg_timeout_t msg_timeout, - mach_msg_priority_t override, - __unused mach_msg_header_t *rcv_msg, - __unused mach_msg_size_t rcv_msg_size) + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + __unused mach_msg_timeout_t msg_timeout, + mach_msg_priority_t override, + __unused mach_msg_header_t *rcv_msg, + __unused mach_msg_size_t rcv_msg_size) { ipc_space_t space = current_space(); vm_map_t map = current_map(); @@ -623,37 +628,39 @@ mach_msg_overwrite( mach_msg_trailer_size_t trailer_size; if (option & MACH_SEND_MSG) { - mach_msg_size_t msg_and_trailer_size; - mach_msg_max_trailer_t *max_trailer; + mach_msg_size_t msg_and_trailer_size; + mach_msg_max_trailer_t *max_trailer; if ((send_size & 3) || send_size < sizeof(mach_msg_header_t) || - (send_size < sizeof(mach_msg_body_t) && (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX))) + (send_size < sizeof(mach_msg_body_t) && (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX))) { return MACH_SEND_MSG_TOO_SMALL; + } - if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE) + if (send_size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE) { return MACH_SEND_TOO_LARGE; + } - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_START); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START); msg_and_trailer_size = send_size + MAX_TRAILER_SIZE; kmsg = ipc_kmsg_alloc(msg_and_trailer_size); if (kmsg == IKM_NULL) { - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_NO_BUFFER); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_NO_BUFFER); return MACH_SEND_NO_BUFFER; } - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, - (uintptr_t)0, /* this should only be called from the kernel! */ - VM_KERNEL_ADDRPERM((uintptr_t)kmsg), - 0, 0, - 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE, + (uintptr_t)0, /* this should only be called from the kernel! */ + VM_KERNEL_ADDRPERM((uintptr_t)kmsg), + 0, 0, + 0); (void) memcpy((void *) kmsg->ikm_header, (const void *) msg, send_size); kmsg->ikm_header->msgh_size = send_size; - /* + /* * Reserve for the trailer the largest space (MAX_TRAILER_SIZE) * However, the internal size field of the trailer (msgh_trailer_size) * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize @@ -669,13 +676,13 @@ mach_msg_overwrite( if (mr != MACH_MSG_SUCCESS) { ipc_kmsg_free(kmsg); - KDBG(MACHDBG_CODE(DBG_MACH_IPC,MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); + KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, mr); return mr; } do { mr = ipc_kmsg_send(kmsg, MACH_MSG_OPTION_NONE, MACH_MSG_TIMEOUT_NONE); - } while (mr == MACH_SEND_INTERRUPTED); + } while (mr == MACH_SEND_INTERRUPTED); assert(mr == MACH_MSG_SUCCESS); } @@ -688,32 +695,33 @@ mach_msg_overwrite( ipc_mqueue_t mqueue; mr = ipc_mqueue_copyin(space, rcv_name, - &mqueue, &object); - if (mr != MACH_MSG_SUCCESS) + &mqueue, &object); + if (mr != MACH_MSG_SUCCESS) { return mr; + } /* hold ref for object */ self->ith_continuation = (void (*)(mach_msg_return_t))0; ipc_mqueue_receive(mqueue, - MACH_MSG_OPTION_NONE, - MACH_MSG_SIZE_MAX, - MACH_MSG_TIMEOUT_NONE, - THREAD_ABORTSAFE); + MACH_MSG_OPTION_NONE, + MACH_MSG_SIZE_MAX, + MACH_MSG_TIMEOUT_NONE, + THREAD_ABORTSAFE); mr = self->ith_state; kmsg = self->ith_kmsg; seqno = self->ith_seqno; mach_msg_receive_results_complete(object); io_release(object); - } while (mr == MACH_RCV_INTERRUPTED); - if (mr != MACH_MSG_SUCCESS) + if (mr != MACH_MSG_SUCCESS) { return mr; + } trailer_size = ipc_kmsg_add_trailer(kmsg, space, option, current_thread(), seqno, TRUE, - kmsg->ikm_header->msgh_remote_port->ip_context); + kmsg->ikm_header->msgh_remote_port->ip_context); if (rcv_size < (kmsg->ikm_header->msgh_size + trailer_size)) { ipc_kmsg_copyout_dest(kmsg, space); @@ -724,9 +732,9 @@ mach_msg_overwrite( mr = ipc_kmsg_copyout(kmsg, space, map, MACH_MSG_BODY_NULL, option); if (mr != MACH_MSG_SUCCESS) { - if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { + if ((mr & ~MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) { ipc_kmsg_put_to_kernel(msg, kmsg, - kmsg->ikm_header->msgh_size + trailer_size); + kmsg->ikm_header->msgh_size + trailer_size); } else { ipc_kmsg_copyout_dest(kmsg, space); (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, sizeof *msg); @@ -737,7 +745,7 @@ mach_msg_overwrite( } (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, - kmsg->ikm_header->msgh_size + trailer_size); + kmsg->ikm_header->msgh_size + trailer_size); ipc_kmsg_free(kmsg); } @@ -753,7 +761,7 @@ mach_msg_overwrite( mach_port_t mig_get_reply_port(void) { - return (MACH_PORT_NULL); + return MACH_PORT_NULL; } /* @@ -771,7 +779,7 @@ mig_dealloc_reply_port( /* * Routine: mig_put_reply_port * Purpose: - * Called by client side interfaces after each RPC to + * Called by client side interfaces after each RPC to * let the client recycle the reply port if it wishes. */ void @@ -784,35 +792,39 @@ mig_put_reply_port( * mig_strncpy.c - by Joshua Block * * mig_strncp -- Bounded string copy. Does what the library routine strncpy - * OUGHT to do: Copies the (null terminated) string in src into dest, a + * OUGHT to do: Copies the (null terminated) string in src into dest, a * buffer of length len. Assures that the copy is still null terminated * and doesn't overflow the buffer, truncating the copy if necessary. * * Parameters: - * + * * dest - Pointer to destination buffer. - * + * * src - Pointer to source string. - * + * * len - Length of destination buffer. */ -int +int mig_strncpy( - char *dest, - const char *src, - int len) + char *dest, + const char *src, + int len) { - int i = 0; - - if (len > 0) - if (dest != NULL) { - if (src != NULL) - for (i=1; i 0) { + if (dest != NULL) { + if (src != NULL) { + for (i = 1; i < len; i++) { + if (!(*dest++ = *src++)) { + return i; + } + } + } + *dest = '\0'; + } } - return i; + return i; } /* @@ -833,9 +845,9 @@ mig_strncpy( */ int mig_strncpy_zerofill( - char *dest, - const char *src, - int len) + char *dest, + const char *src, + int len) { int i = 0; boolean_t terminated = FALSE; @@ -870,15 +882,15 @@ mig_strncpy_zerofill( void * mig_user_allocate( - vm_size_t size) + vm_size_t size) { return (char *)kalloc(size); } void mig_user_deallocate( - char *data, - vm_size_t size) + char *data, + vm_size_t size) { kfree(data, size); } @@ -891,11 +903,12 @@ mig_user_deallocate( */ kern_return_t mig_object_init( - mig_object_t mig_object, - const IMIGObject *interface) + mig_object_t mig_object, + const IMIGObject *interface) { - if (mig_object == MIG_OBJECT_NULL) + if (mig_object == MIG_OBJECT_NULL) { return KERN_INVALID_ARGUMENT; + } mig_object->pVtbl = (const IMIGObjectVtbl *)interface; mig_object->port = MACH_PORT_NULL; return KERN_SUCCESS; @@ -914,7 +927,7 @@ mig_object_init( */ void mig_object_destroy( - __assert_only mig_object_t mig_object) + __assert_only mig_object_t mig_object) { assert(mig_object->port == MACH_PORT_NULL); return; @@ -930,7 +943,7 @@ mig_object_destroy( */ void mig_object_reference( - mig_object_t mig_object) + mig_object_t mig_object) { assert(mig_object != MIG_OBJECT_NULL); mig_object->pVtbl->AddRef((IMIGObject *)mig_object); @@ -946,7 +959,7 @@ mig_object_reference( */ void mig_object_deallocate( - mig_object_t mig_object) + mig_object_t mig_object) { assert(mig_object != MIG_OBJECT_NULL); mig_object->pVtbl->Release((IMIGObject *)mig_object); @@ -966,33 +979,34 @@ mig_object_deallocate( */ ipc_port_t convert_mig_object_to_port( - mig_object_t mig_object) + mig_object_t mig_object) { - ipc_port_t port; - boolean_t deallocate = TRUE; + ipc_port_t port; + boolean_t deallocate = TRUE; - if (mig_object == MIG_OBJECT_NULL) + if (mig_object == MIG_OBJECT_NULL) { return IP_NULL; + } port = mig_object->port; while ((port == IP_NULL) || - ((port = ipc_port_make_send(port)) == IP_NULL)) { - ipc_port_t previous; + ((port = ipc_port_make_send(port)) == IP_NULL)) { + ipc_port_t previous; /* * Either the port was never set up, or it was just * deallocated out from under us by the no-senders * processing. In either case, we must: * Attempt to make one - * Arrange for no senders + * Arrange for no senders * Try to atomically register it with the object * Destroy it if we are raced. */ port = ipc_port_alloc_kernel(); ip_lock(port); ipc_kobject_set_atomically(port, - (ipc_kobject_t) mig_object, - IKOT_MIG); + (ipc_kobject_t) mig_object, + IKOT_MIG); /* make a sonce right for the notification */ port->ip_sorights++; @@ -1004,7 +1018,7 @@ convert_mig_object_to_port( assert(previous == IP_NULL); if (OSCompareAndSwapPtr((void *)IP_NULL, (void *)port, - (void * volatile *)&mig_object->port)) { + (void * volatile *)&mig_object->port)) { deallocate = FALSE; } else { ipc_port_dealloc_kernel(port); @@ -1012,10 +1026,11 @@ convert_mig_object_to_port( } } - if (deallocate) + if (deallocate) { mig_object->pVtbl->Release((IMIGObject *)mig_object); + } - return (port); + return port; } @@ -1034,14 +1049,15 @@ convert_mig_object_to_port( */ mig_object_t convert_port_to_mig_object( - ipc_port_t port, - const MIGIID *iid) + ipc_port_t port, + const MIGIID *iid) { - mig_object_t mig_object; - void *ppv; + mig_object_t mig_object; + void *ppv; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return NULL; + } ip_lock(port); if (!ip_active(port) || (ip_kotype(port) != IKOT_MIG)) { @@ -1075,19 +1091,19 @@ convert_port_to_mig_object( boolean_t mig_object_no_senders( - ipc_port_t port, - mach_port_mscount_t mscount) + ipc_port_t port, + mach_port_mscount_t mscount) { - mig_object_t mig_object; + mig_object_t mig_object; ip_lock(port); if (port->ip_mscount > mscount) { - ipc_port_t previous; + ipc_port_t previous; /* * Somebody created new send rights while the * notification was in-flight. Just create a - * new send-once right and re-register with + * new send-once right and re-register with * the new (higher) mscount threshold. */ /* make a sonce right for the notification */ @@ -1097,7 +1113,7 @@ mig_object_no_senders( /* port unlocked */ assert(previous == IP_NULL); - return (FALSE); + return FALSE; } /* @@ -1113,13 +1129,13 @@ mig_object_no_senders( port->ip_mscount = 0; port->ip_messages.imq_seqno = 0; ipc_port_destroy(port); /* releases lock */ - + /* * Release the port's reference on the object. */ mig_object->pVtbl->Release((IMIGObject *)mig_object); - return (TRUE); -} + return TRUE; +} /* * Kernel implementation of the notification chain for MIG object diff --git a/osfmk/kern/ipc_mig.h b/osfmk/kern/ipc_mig.h index cf1af4da4..3fb5a8cba 100644 --- a/osfmk/kern/ipc_mig.h +++ b/osfmk/kern/ipc_mig.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _KERN_IPC_MIG_H_ -#define _KERN_IPC_MIG_H_ +#ifndef _KERN_IPC_MIG_H_ +#define _KERN_IPC_MIG_H_ #include #include @@ -39,7 +39,7 @@ #include -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE #include @@ -49,152 +49,152 @@ */ #ifdef _MIG_TRACE_PARAMETERS_ -#define __BeforeRcvCallTrace(msgid,arg1,arg2,arg3,arg4) \ +#define __BeforeRcvCallTrace(msgid, arg1, arg2, arg3, arg4) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ - (unsigned int)(arg1), \ - (unsigned int)(arg2), \ - (unsigned int)(arg3), \ - (unsigned int)(arg4), \ - (unsigned int)(0)); - -#define __AfterRcvCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#define __AfterRcvCallTrace(msgid, arg1, arg2, arg3, arg4) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ - (unsigned int)(arg1), \ - (unsigned int)(arg2), \ - (unsigned int)(arg3), \ - (unsigned int)(arg4), \ - (unsigned int)(0)); - -#define __BeforeSimpleCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#define __BeforeSimpleCallTrace(msgid, arg1, arg2, arg3, arg4) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ - (unsigned int)(arg1), \ - (unsigned int)(arg2), \ - (unsigned int)(arg3), \ - (unsigned int)(arg4), \ - (unsigned int)(0)); - -#define __AfterSimpleCallTrace(msgid,arg1,arg2,arg3,arg4) \ + KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); + +#define __AfterSimpleCallTrace(msgid, arg1, arg2, arg3, arg4) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ - (unsigned int)(arg1), \ - (unsigned int)(arg2), \ - (unsigned int)(arg3), \ - (unsigned int)(arg4), \ - (unsigned int)(0)); + (unsigned int)(arg1), \ + (unsigned int)(arg2), \ + (unsigned int)(arg3), \ + (unsigned int)(arg4), \ + (unsigned int)(0)); #else /* !_MIG_TRACE_PARAMETERS_ */ -#define __BeforeRcvRpc(msgid, _NAME_) \ +#define __BeforeRcvRpc(msgid, _NAME_) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0)); - -#define __AfterRcvRpc(msgid, _NAME_) \ + KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); + +#define __AfterRcvRpc(msgid, _NAME_) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0)); + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); -#define __BeforeRcvSimple(msgid, _NAME_) \ +#define __BeforeRcvSimple(msgid, _NAME_) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0)); - -#define __AfterRcvSimple(msgid, _NAME_) \ + KDBG_MIGCODE(msgid) | DBG_FUNC_START, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); + +#define __AfterRcvSimple(msgid, _NAME_) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0)); + KDBG_MIGCODE(msgid) | DBG_FUNC_END, \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)); #endif /* !_MIG_TRACE_PARAMETERS_ */ -#define _MIG_MSGID_INVALID(msgid) \ +#define _MIG_MSGID_INVALID(msgid) \ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, \ - MACHDBG_CODE(DBG_MACH_MSGID_INVALID, (msgid)), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0), \ - (unsigned int)(0)) + MACHDBG_CODE(DBG_MACH_MSGID_INVALID, (msgid)), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0), \ + (unsigned int)(0)) -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ __BEGIN_DECLS /* Send a message from the kernel */ extern mach_msg_return_t mach_msg_send_from_kernel_proper( - mach_msg_header_t *msg, - mach_msg_size_t send_size); + mach_msg_header_t *msg, + mach_msg_size_t send_size); #define mach_msg_send_from_kernel mach_msg_send_from_kernel_proper extern mach_msg_return_t mach_msg_rpc_from_kernel_proper( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size); + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size); #define mach_msg_rpc_from_kernel mach_msg_rpc_from_kernel_proper extern void mach_msg_destroy_from_kernel_proper( - mach_msg_header_t *msg); + mach_msg_header_t *msg); #define mach_msg_destroy_from_kernel mach_msg_destroy_from_kernel_proper #ifdef XNU_KERNEL_PRIVATE extern mach_msg_return_t mach_msg_send_from_kernel_with_options_legacy( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_option_t option, - mach_msg_timeout_t timeout_val); + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_option_t option, + mach_msg_timeout_t timeout_val); #endif /* XNU_KERNEL_PRIVATE */ extern mach_msg_return_t mach_msg_send_from_kernel_with_options( - mach_msg_header_t *msg, - mach_msg_size_t send_size, - mach_msg_option_t option, - mach_msg_timeout_t timeout_val); + mach_msg_header_t *msg, + mach_msg_size_t send_size, + mach_msg_option_t option, + mach_msg_timeout_t timeout_val); __END_DECLS -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE extern void mach_msg_receive_continue(void); /* Initialize kernel server dispatch table */ -extern void mig_init(void); +extern void mig_init(void); /* * Kernel implementation of the MIG object base class * * Conforms to the MIGObjectInterface defined in * Ports are automatically allocated for the duration of outstanding - * cross-task references and then released. + * cross-task references and then released. */ typedef struct mig_object { - const IMIGObjectVtbl *pVtbl; /* our interface def */ - mach_port_t port; /* our port pointer */ + const IMIGObjectVtbl *pVtbl; /* our interface def */ + mach_port_t port; /* our port pointer */ } mig_object_data_t; @@ -205,34 +205,34 @@ typedef struct mig_object { * chain and deliver the appropriate notification. */ typedef struct mig_notify_object { - const IMIGNotifyObjectVtbl *pVtbl; /* our interface def */ - mach_port_t port; /* our port pointer */ + const IMIGNotifyObjectVtbl *pVtbl; /* our interface def */ + mach_port_t port; /* our port pointer */ } mig_notify_object_data_t; extern kern_return_t mig_object_init( - mig_object_t mig_object, - const IMIGObject *interface); + mig_object_t mig_object, + const IMIGObject *interface); extern void mig_object_destroy( - mig_object_t mig_object); + mig_object_t mig_object); extern void mig_object_reference( - mig_object_t mig_object); + mig_object_t mig_object); extern void mig_object_deallocate( - mig_object_t mig_object); + mig_object_t mig_object); extern ipc_port_t convert_mig_object_to_port( - mig_object_t mig_object); + mig_object_t mig_object); extern mig_object_t convert_port_to_mig_object( - ipc_port_t port, - const MIGIID *iid); + ipc_port_t port, + const MIGIID *iid); boolean_t mig_object_no_senders( - ipc_port_t port, - mach_port_mscount_t mscount); + ipc_port_t port, + mach_port_mscount_t mscount); #endif /* MACH_KERNEL_PRIVATE */ -#endif /* _KERN_IPC_MIG_H_ */ +#endif /* _KERN_IPC_MIG_H_ */ diff --git a/osfmk/kern/ipc_misc.c b/osfmk/kern/ipc_misc.c index 6690db410..655d385e8 100644 --- a/osfmk/kern/ipc_misc.c +++ b/osfmk/kern/ipc_misc.c @@ -44,10 +44,10 @@ extern void fileport_releasefg(struct fileglob *); * Description: Obtain a send right for the given fileglob, which must be * referenced. * - * Parameters: fg A fileglob. + * Parameters: fg A fileglob. * - * Returns: Port of type IKOT_FILEPORT with fileglob set as its kobject. - * Port is returned with a send right. + * Returns: Port of type IKOT_FILEPORT with fileglob set as its kobject. + * Port is returned with a send right. */ ipc_port_t fileport_alloc(struct fileglob *fg) @@ -95,12 +95,14 @@ fileport_port_to_fileglob(ipc_port_t port) { struct fileglob *fg = NULL; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return NULL; + } ip_lock(port); - if (ip_active(port) && IKOT_FILEPORT == ip_kotype(port)) + if (ip_active(port) && IKOT_FILEPORT == ip_kotype(port)) { fg = (void *)port->ip_kobject; + } ip_unlock(port); return fg; @@ -111,8 +113,8 @@ fileport_port_to_fileglob(ipc_port_t port) * fileport_notify * * Description: Handle a no-senders notification for a fileport. Unless - * the message is spoofed, destroys the port and releases - * its reference on the fileglob. + * the message is spoofed, destroys the port and releases + * its reference on the fileglob. * * Parameters: msg A Mach no-senders notification message. */ @@ -123,19 +125,23 @@ fileport_notify(mach_msg_header_t *msg) ipc_port_t port = notification->not_header.msgh_remote_port; struct fileglob *fg = NULL; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { panic("Invalid port passed to fileport_notify()\n"); + } ip_lock(port); fg = (struct fileglob *)port->ip_kobject; - if (!ip_active(port)) + if (!ip_active(port)) { panic("Inactive port passed to fileport_notify()\n"); - if (ip_kotype(port) != IKOT_FILEPORT) + } + if (ip_kotype(port) != IKOT_FILEPORT) { panic("Port of type other than IKOT_FILEPORT passed to fileport_notify()\n"); - if (fg == NULL) + } + if (fg == NULL) { panic("fileport without an assocated fileglob\n"); + } if (port->ip_srights == 0) { ip_unlock(port); @@ -160,8 +166,8 @@ fileport_notify(mach_msg_header_t *msg) */ kern_return_t fileport_invoke(task_t task, mach_port_name_t name, - int (*action)(mach_port_name_t, struct fileglob *, void *), - void *arg, int *rval) + int (*action)(mach_port_name_t, struct fileglob *, void *), + void *arg, int *rval) { kern_return_t kr; ipc_port_t fileport; @@ -169,15 +175,17 @@ fileport_invoke(task_t task, mach_port_name_t name, kr = ipc_object_copyin(task->itk_space, name, MACH_MSG_TYPE_COPY_SEND, (ipc_object_t *)&fileport); - if (kr != KERN_SUCCESS) - return (kr); + if (kr != KERN_SUCCESS) { + return kr; + } - if ((fg = fileport_port_to_fileglob(fileport)) != NULL) + if ((fg = fileport_port_to_fileglob(fileport)) != NULL) { *rval = (*action)(name, fg, arg); - else + } else { kr = KERN_FAILURE; + } ipc_port_release_send(fileport); - return (kr); + return kr; } /* @@ -191,14 +199,14 @@ fileport_invoke(task_t task, mach_port_name_t name, * and (c) if we could ask for port names by kobject type. Not * clear that it's worth all that complexity, though. * - * Parameters: task The target task + * Parameters: task The target task * action The function to invoke on each fileport * arg Anonymous pointer to caller state. */ kern_return_t fileport_walk(task_t task, - int (*action)(mach_port_name_t, struct fileglob *, void *arg), - void *arg) + int (*action)(mach_port_name_t, struct fileglob *, void *arg), + void *arg) { mach_port_name_t *names; mach_msg_type_number_t ncnt, tcnt; @@ -217,24 +225,26 @@ fileport_walk(task_t task, kr = mach_port_names(task->itk_space, (mach_port_name_t **)&map_copy_names, &ncnt, (mach_port_type_t **)&map_copy_types, &tcnt); - if (kr != KERN_SUCCESS) - return (kr); + if (kr != KERN_SUCCESS) { + return kr; + } vm_map_copy_discard(map_copy_types); kr = vm_map_copyout(ipc_kernel_map, &map_names, map_copy_names); if (kr != KERN_SUCCESS) { vm_map_copy_discard(map_copy_names); - return (kr); + return kr; } names = (mach_port_name_t *)(uintptr_t)map_names; - for (rval = 0, i = 0; i < ncnt; i++) + for (rval = 0, i = 0; i < ncnt; i++) { if (fileport_invoke(task, names[i], action, arg, - &rval) == KERN_SUCCESS && -1 == rval) - break; /* early termination clause */ - + &rval) == KERN_SUCCESS && -1 == rval) { + break; /* early termination clause */ + } + } vm_deallocate(ipc_kernel_map, - (vm_address_t)names, ncnt * sizeof (*names)); - return (KERN_SUCCESS); + (vm_address_t)names, ncnt * sizeof(*names)); + return KERN_SUCCESS; } diff --git a/osfmk/kern/ipc_sync.c b/osfmk/kern/ipc_sync.c index 941bd1839..d09b42157 100644 --- a/osfmk/kern/ipc_sync.c +++ b/osfmk/kern/ipc_sync.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ #include @@ -54,8 +54,8 @@ */ kern_return_t port_name_to_semaphore( - mach_port_name_t name, - semaphore_t *semaphorep) + mach_port_name_t name, + semaphore_t *semaphorep) { ipc_port_t kern_port; kern_return_t kr; @@ -66,7 +66,7 @@ port_name_to_semaphore( } kr = ipc_object_translate(current_space(), name, MACH_PORT_RIGHT_SEND, - (ipc_object_t *) &kern_port); + (ipc_object_t *) &kern_port); if (kr != KERN_SUCCESS) { *semaphorep = SEMAPHORE_NULL; return kr; @@ -97,9 +97,8 @@ port_name_to_semaphore( * Port may or may not be locked. */ semaphore_t -convert_port_to_semaphore (ipc_port_t port) +convert_port_to_semaphore(ipc_port_t port) { - if (IP_VALID(port)) { semaphore_t semaphore; @@ -112,7 +111,7 @@ convert_port_to_semaphore (ipc_port_t port) assert(ip_active(port)); semaphore = (semaphore_t) port->ip_kobject; semaphore_reference(semaphore); - return (semaphore); + return semaphore; } } return SEMAPHORE_NULL; @@ -131,12 +130,13 @@ convert_port_to_semaphore (ipc_port_t port) * all extant send rights collectively. */ ipc_port_t -convert_semaphore_to_port (semaphore_t semaphore) +convert_semaphore_to_port(semaphore_t semaphore) { ipc_port_t port, send; - if (semaphore == SEMAPHORE_NULL) - return (IP_NULL); + if (semaphore == SEMAPHORE_NULL) { + return IP_NULL; + } /* caller is donating a reference */ port = semaphore->port; @@ -172,7 +172,7 @@ convert_semaphore_to_port (semaphore_t semaphore) ip_unlock(port); semaphore_dereference(semaphore); } - return (send); + return send; } /* @@ -204,14 +204,13 @@ semaphore_notify(mach_msg_header_t *msg) } lock_set_t -convert_port_to_lock_set (__unused ipc_port_t port) +convert_port_to_lock_set(__unused ipc_port_t port) { - return (LOCK_SET_NULL); + return LOCK_SET_NULL; } ipc_port_t -convert_lock_set_to_port (__unused lock_set_t lock_set) +convert_lock_set_to_port(__unused lock_set_t lock_set) { - return (IP_NULL); + return IP_NULL; } - diff --git a/osfmk/kern/ipc_sync.h b/osfmk/kern/ipc_sync.h index c9fca597b..674dcfd3f 100644 --- a/osfmk/kern/ipc_sync.h +++ b/osfmk/kern/ipc_sync.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ #ifndef _KERN_IPC_SYNC_H_ #define _KERN_IPC_SYNC_H_ @@ -36,14 +36,14 @@ #include #include -extern semaphore_t convert_port_to_semaphore (ipc_port_t port); -extern ipc_port_t convert_semaphore_to_port (semaphore_t semaphore); +extern semaphore_t convert_port_to_semaphore(ipc_port_t port); +extern ipc_port_t convert_semaphore_to_port(semaphore_t semaphore); extern kern_return_t port_name_to_semaphore( - mach_port_name_t name, - semaphore_t *semaphore); + mach_port_name_t name, + semaphore_t *semaphore); extern void semaphore_notify(mach_msg_header_t *msg); -lock_set_t convert_port_to_lock_set (ipc_port_t port); -ipc_port_t convert_lock_set_to_port (lock_set_t lock_set); +lock_set_t convert_port_to_lock_set(ipc_port_t port); +ipc_port_t convert_lock_set_to_port(lock_set_t lock_set); #endif /* _KERN_IPC_SYNC_H_ */ diff --git a/osfmk/kern/ipc_tt.c b/osfmk/kern/ipc_tt.c index eefc9e013..03fdc53bf 100644 --- a/osfmk/kern/ipc_tt.c +++ b/osfmk/kern/ipc_tt.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -122,8 +122,8 @@ kern_return_t task_conversion_eval(task_t caller, task_t victim); void ipc_task_init( - task_t task, - task_t parent) + task_t task, + task_t parent) { ipc_space_t space; ipc_port_t kport; @@ -133,18 +133,21 @@ ipc_task_init( kr = ipc_space_create(&ipc_table_entries[0], &space); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("ipc_task_init"); + } space->is_task = task; kport = ipc_port_alloc_kernel(); - if (kport == IP_NULL) + if (kport == IP_NULL) { panic("ipc_task_init"); + } nport = ipc_port_alloc_kernel(); - if (nport == IP_NULL) + if (nport == IP_NULL) { panic("ipc_task_init"); + } itk_lock_init(task); task->itk_self = kport; @@ -170,18 +173,17 @@ ipc_task_init( #endif /* always zero-out the first (unused) array element */ - bzero(&task->exc_actions[0], sizeof(task->exc_actions[0])); + if (parent == TASK_NULL) { ipc_port_t port; - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = IP_NULL; task->exc_actions[i].flavor = 0; task->exc_actions[i].behavior = 0; task->exc_actions[i].privileged = FALSE; }/* for */ - + kr = host_get_host_port(host_priv_self(), &port); assert(kr == KERN_SUCCESS); task->itk_host = port; @@ -191,47 +193,49 @@ ipc_task_init( task->itk_gssd = IP_NULL; task->itk_task_access = IP_NULL; - for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { task->itk_registered[i] = IP_NULL; + } } else { itk_lock(parent); assert(parent->itk_self != IP_NULL); /* inherit registered ports */ - for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { task->itk_registered[i] = - ipc_port_copy_send(parent->itk_registered[i]); + ipc_port_copy_send(parent->itk_registered[i]); + } /* inherit exception and bootstrap ports */ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { - task->exc_actions[i].port = - ipc_port_copy_send(parent->exc_actions[i].port); - task->exc_actions[i].flavor = - parent->exc_actions[i].flavor; - task->exc_actions[i].behavior = - parent->exc_actions[i].behavior; - task->exc_actions[i].privileged = - parent->exc_actions[i].privileged; + task->exc_actions[i].port = + ipc_port_copy_send(parent->exc_actions[i].port); + task->exc_actions[i].flavor = + parent->exc_actions[i].flavor; + task->exc_actions[i].behavior = + parent->exc_actions[i].behavior; + task->exc_actions[i].privileged = + parent->exc_actions[i].privileged; #if CONFIG_MACF - mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i); + mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i); #endif }/* for */ task->itk_host = - ipc_port_copy_send(parent->itk_host); + ipc_port_copy_send(parent->itk_host); task->itk_bootstrap = - ipc_port_copy_send(parent->itk_bootstrap); + ipc_port_copy_send(parent->itk_bootstrap); task->itk_seatbelt = - ipc_port_copy_send(parent->itk_seatbelt); + ipc_port_copy_send(parent->itk_seatbelt); task->itk_gssd = - ipc_port_copy_send(parent->itk_gssd); + ipc_port_copy_send(parent->itk_gssd); task->itk_task_access = - ipc_port_copy_send(parent->itk_task_access); + ipc_port_copy_send(parent->itk_task_access); itk_unlock(parent); } @@ -247,18 +251,20 @@ ipc_task_init( void ipc_task_enable( - task_t task) + task_t task) { ipc_port_t kport; ipc_port_t nport; itk_lock(task); kport = task->itk_self; - if (kport != IP_NULL) + if (kport != IP_NULL) { ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK); + } nport = task->itk_nself; - if (nport != IP_NULL) + if (nport != IP_NULL) { ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME); + } itk_unlock(task); } @@ -272,7 +278,7 @@ ipc_task_enable( void ipc_task_disable( - task_t task) + task_t task) { ipc_port_t kport; ipc_port_t nport; @@ -280,11 +286,13 @@ ipc_task_disable( itk_lock(task); kport = task->itk_self; - if (kport != IP_NULL) + if (kport != IP_NULL) { ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); + } nport = task->itk_nself; - if (nport != IP_NULL) + if (nport != IP_NULL) { ipc_kobject_set(nport, IKO_NULL, IKOT_NONE); + } rport = task->itk_resume; if (rport != IP_NULL) { @@ -294,7 +302,7 @@ ipc_task_disable( * * There are still outstanding suspensions on this task, * even as it is being torn down. Disconnect the task - * from the rport, thereby "orphaning" the rport. The rport + * from the rport, thereby "orphaning" the rport. The rport * itself will go away only when the last suspension holder * destroys his SO right to it -- when he either * exits, or tries to actually use that last SO right to @@ -316,11 +324,11 @@ ipc_task_disable( void ipc_task_terminate( - task_t task) + task_t task) { ipc_port_t kport; ipc_port_t nport; - ipc_port_t rport; + ipc_port_t rport; int i; itk_lock(task); @@ -344,8 +352,9 @@ ipc_task_terminate( /* release the naked send rights */ - if (IP_VALID(task->itk_sself)) + if (IP_VALID(task->itk_sself)) { ipc_port_release_send(task->itk_sself); + } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (IP_VALID(task->exc_actions[i].port)) { @@ -356,33 +365,42 @@ ipc_task_terminate( #endif } - if (IP_VALID(task->itk_host)) + if (IP_VALID(task->itk_host)) { ipc_port_release_send(task->itk_host); + } - if (IP_VALID(task->itk_bootstrap)) + if (IP_VALID(task->itk_bootstrap)) { ipc_port_release_send(task->itk_bootstrap); + } - if (IP_VALID(task->itk_seatbelt)) + if (IP_VALID(task->itk_seatbelt)) { ipc_port_release_send(task->itk_seatbelt); - - if (IP_VALID(task->itk_gssd)) + } + + if (IP_VALID(task->itk_gssd)) { ipc_port_release_send(task->itk_gssd); + } - if (IP_VALID(task->itk_task_access)) + if (IP_VALID(task->itk_task_access)) { ipc_port_release_send(task->itk_task_access); + } - if (IP_VALID(task->itk_debug_control)) + if (IP_VALID(task->itk_debug_control)) { ipc_port_release_send(task->itk_debug_control); + } - for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) - if (IP_VALID(task->itk_registered[i])) + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { + if (IP_VALID(task->itk_registered[i])) { ipc_port_release_send(task->itk_registered[i]); + } + } /* destroy the kernel ports */ ipc_port_dealloc_kernel(kport); ipc_port_dealloc_kernel(nport); - if (rport != IP_NULL) - ipc_port_dealloc_kernel(rport); + if (rport != IP_NULL) { + ipc_port_dealloc_kernel(rport); + } itk_lock_destroy(task); } @@ -401,7 +419,7 @@ ipc_task_terminate( void ipc_task_reset( - task_t task) + task_t task) { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; @@ -412,10 +430,11 @@ ipc_task_reset( /* Fresh label to unset credentials in existing labels. */ struct label *unset_label = mac_exc_create_label(); #endif - + new_kport = ipc_port_alloc_kernel(); - if (new_kport == IP_NULL) + if (new_kport == IP_NULL) { panic("ipc_task_reset"); + } itk_lock(task); @@ -458,12 +477,12 @@ ipc_task_reset( task->exc_actions[i].port = IP_NULL; } }/* for */ - + if (IP_VALID(task->itk_debug_control)) { ipc_port_release_send(task->itk_debug_control); } task->itk_debug_control = IP_NULL; - + itk_unlock(task); #if CONFIG_MACF @@ -472,8 +491,9 @@ ipc_task_reset( /* release the naked send rights */ - if (IP_VALID(old_sself)) + if (IP_VALID(old_sself)) { ipc_port_release_send(old_sself); + } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { if (IP_VALID(old_exc_actions[i])) { @@ -495,13 +515,14 @@ ipc_task_reset( void ipc_thread_init( - thread_t thread) + thread_t thread) { - ipc_port_t kport; + ipc_port_t kport; kport = ipc_port_alloc_kernel(); - if (kport == IP_NULL) + if (kport == IP_NULL) { panic("ipc_thread_init"); + } thread->ith_self = kport; thread->ith_sself = ipc_port_make_send(kport); @@ -521,7 +542,7 @@ ipc_thread_init( void ipc_thread_init_exc_actions( - thread_t thread) + thread_t thread) { assert(thread->exc_actions == NULL); @@ -537,7 +558,7 @@ ipc_thread_init_exc_actions( void ipc_thread_destroy_exc_actions( - thread_t thread) + thread_t thread) { if (thread->exc_actions != NULL) { #if CONFIG_MACF @@ -546,20 +567,21 @@ ipc_thread_destroy_exc_actions( } #endif - kfree(thread->exc_actions, - sizeof(struct exception_action) * EXC_TYPES_COUNT); + kfree(thread->exc_actions, + sizeof(struct exception_action) * EXC_TYPES_COUNT); thread->exc_actions = NULL; } } void ipc_thread_disable( - thread_t thread) + thread_t thread) { - ipc_port_t kport = thread->ith_self; + ipc_port_t kport = thread->ith_self; - if (kport != IP_NULL) + if (kport != IP_NULL) { ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); + } } /* @@ -572,22 +594,24 @@ ipc_thread_disable( void ipc_thread_terminate( - thread_t thread) + thread_t thread) { - ipc_port_t kport = thread->ith_self; + ipc_port_t kport = thread->ith_self; if (kport != IP_NULL) { - int i; + int i; - if (IP_VALID(thread->ith_sself)) + if (IP_VALID(thread->ith_sself)) { ipc_port_release_send(thread->ith_sself); + } thread->ith_sself = thread->ith_self = IP_NULL; if (thread->exc_actions != NULL) { for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { - if (IP_VALID(thread->exc_actions[i].port)) + if (IP_VALID(thread->exc_actions[i].port)) { ipc_port_release_send(thread->exc_actions[i].port); + } } ipc_thread_destroy_exc_actions(thread); } @@ -606,8 +630,9 @@ ipc_thread_terminate( assert(ipc_kmsg_queue_empty(&thread->ith_messages)); - if (thread->ith_rpc_reply != IP_NULL) + if (thread->ith_rpc_reply != IP_NULL) { ipc_port_dealloc_reply(thread->ith_rpc_reply); + } thread->ith_rpc_reply = IP_NULL; } @@ -617,7 +642,7 @@ ipc_thread_terminate( * Purpose: * Reset the IPC state for a given Mach thread when * its task enters an elevated security context. - * Both the thread port and its exception ports have + * Both the thread port and its exception ports have * to be reset. Its RPC reply port cannot have any * rights outstanding, so it should be fine. * Conditions: @@ -626,21 +651,22 @@ ipc_thread_terminate( void ipc_thread_reset( - thread_t thread) + thread_t thread) { ipc_port_t old_kport, new_kport; ipc_port_t old_sself; ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; - boolean_t has_old_exc_actions = FALSE; - int i; + boolean_t has_old_exc_actions = FALSE; + int i; #if CONFIG_MACF struct label *new_label = mac_exc_create_label(); #endif - + new_kport = ipc_port_alloc_kernel(); - if (new_kport == IP_NULL) + if (new_kport == IP_NULL) { panic("ipc_task_reset"); + } thread_mtx_lock(thread); @@ -666,7 +692,7 @@ ipc_thread_reset( /* * Only ports that were set by root-owned processes - * (privileged ports) should survive + * (privileged ports) should survive */ if (thread->exc_actions != NULL) { has_old_exc_actions = TRUE; @@ -678,7 +704,7 @@ ipc_thread_reset( mac_exc_update_action_label(thread->exc_actions + i, new_label); #endif old_exc_actions[i] = thread->exc_actions[i].port; - thread->exc_actions[i].port = IP_NULL; + thread->exc_actions[i].port = IP_NULL; } } } @@ -688,11 +714,12 @@ ipc_thread_reset( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - + /* release the naked send rights */ - if (IP_VALID(old_sself)) + if (IP_VALID(old_sself)) { ipc_port_release_send(old_sself); + } if (has_old_exc_actions) { for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { @@ -725,7 +752,7 @@ ipc_thread_reset( ipc_port_t retrieve_task_self_fast( - task_t task) + task_t task) { ipc_port_t port; @@ -742,8 +769,9 @@ retrieve_task_self_fast( ip_reference(port); port->ip_srights++; ip_unlock(port); - } else + } else { port = ipc_port_copy_send(port); + } itk_unlock(task); return port; @@ -763,7 +791,7 @@ retrieve_task_self_fast( ipc_port_t retrieve_thread_self_fast( - thread_t thread) + thread_t thread) { ipc_port_t port; @@ -781,9 +809,9 @@ retrieve_thread_self_fast( ip_reference(port); port->ip_srights++; ip_unlock(port); - } - else + } else { port = ipc_port_copy_send(port); + } thread_mtx_unlock(thread); @@ -837,7 +865,6 @@ thread_self_trap( sright = retrieve_thread_self_fast(thread); name = ipc_port_copyout_send(sright, task->itk_space); return name; - } /* @@ -860,10 +887,11 @@ mach_reply_port( kern_return_t kr; kr = ipc_port_alloc(current_task()->itk_space, &name, &port); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { ip_unlock(port); - else + } else { name = MACH_PORT_NULL; + } return name; } @@ -975,7 +1003,7 @@ ipc_port_unbind_special_reply_port( thread->ith_special_reply_port = NULL; ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL, - IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY, FALSE); + IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY, FALSE); /* port unlocked */ ip_release(special_reply_port); @@ -998,36 +1026,37 @@ ipc_port_unbind_special_reply_port( kern_return_t thread_get_special_port( - thread_t thread, - int which, - ipc_port_t *portp) + thread_t thread, + int which, + ipc_port_t *portp) { - kern_return_t result = KERN_SUCCESS; - ipc_port_t *whichp; + kern_return_t result = KERN_SUCCESS; + ipc_port_t *whichp; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } switch (which) { - case THREAD_KERNEL_PORT: whichp = &thread->ith_sself; break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - thread_mtx_lock(thread); + thread_mtx_lock(thread); - if (thread->active) + if (thread->active) { *portp = ipc_port_copy_send(*whichp); - else + } else { result = KERN_FAILURE; + } thread_mtx_unlock(thread); - return (result); + return result; } /* @@ -1047,24 +1076,24 @@ thread_get_special_port( kern_return_t thread_set_special_port( - thread_t thread, - int which, - ipc_port_t port) + thread_t thread, + int which, + ipc_port_t port) { - kern_return_t result = KERN_SUCCESS; - ipc_port_t *whichp, old = IP_NULL; + kern_return_t result = KERN_SUCCESS; + ipc_port_t *whichp, old = IP_NULL; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } switch (which) { - case THREAD_KERNEL_PORT: whichp = &thread->ith_sself; break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } thread_mtx_lock(thread); @@ -1072,16 +1101,17 @@ thread_set_special_port( if (thread->active) { old = *whichp; *whichp = port; - } - else + } else { result = KERN_FAILURE; + } thread_mtx_unlock(thread); - if (IP_VALID(old)) + if (IP_VALID(old)) { ipc_port_release_send(old); + } - return (result); + return result; } /* @@ -1100,14 +1130,15 @@ thread_set_special_port( kern_return_t task_get_special_port( - task_t task, - int which, - ipc_port_t *portp) + task_t task, + int which, + ipc_port_t *portp) { ipc_port_t port; - if (task == TASK_NULL) + if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; + } itk_lock(task); if (task->itk_self == IP_NULL) { @@ -1145,7 +1176,7 @@ task_get_special_port( break; default: - itk_unlock(task); + itk_unlock(task); return KERN_INVALID_ARGUMENT; } itk_unlock(task); @@ -1167,48 +1198,49 @@ task_get_special_port( * KERN_INVALID_ARGUMENT The task is null. * KERN_FAILURE The task/space is dead. * KERN_INVALID_ARGUMENT Invalid special port. - * KERN_NO_ACCESS Attempted overwrite of seatbelt port. + * KERN_NO_ACCESS Attempted overwrite of seatbelt port. */ kern_return_t task_set_special_port( - task_t task, - int which, - ipc_port_t port) + task_t task, + int which, + ipc_port_t port) { ipc_port_t *whichp; ipc_port_t old; - if (task == TASK_NULL) + if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; + } switch (which) { case TASK_KERNEL_PORT: - whichp = &task->itk_sself; - break; + whichp = &task->itk_sself; + break; case TASK_HOST_PORT: - whichp = &task->itk_host; - break; + whichp = &task->itk_host; + break; case TASK_BOOTSTRAP_PORT: - whichp = &task->itk_bootstrap; - break; + whichp = &task->itk_bootstrap; + break; case TASK_SEATBELT_PORT: - whichp = &task->itk_seatbelt; - break; + whichp = &task->itk_seatbelt; + break; case TASK_ACCESS_PORT: - whichp = &task->itk_task_access; - break; + whichp = &task->itk_task_access; + break; case TASK_DEBUG_CONTROL_PORT: - whichp = &task->itk_debug_control; - break; + whichp = &task->itk_debug_control; + break; default: - return KERN_INVALID_ARGUMENT; + return KERN_INVALID_ARGUMENT; }/* switch */ itk_lock(task); @@ -1218,18 +1250,19 @@ task_set_special_port( } /* do not allow overwrite of seatbelt or task access ports */ - if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which) - && IP_VALID(*whichp)) { - itk_unlock(task); - return KERN_NO_ACCESS; + if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which) + && IP_VALID(*whichp)) { + itk_unlock(task); + return KERN_NO_ACCESS; } old = *whichp; *whichp = port; itk_unlock(task); - if (IP_VALID(old)) + if (IP_VALID(old)) { ipc_port_release_send(old); + } return KERN_SUCCESS; } @@ -1256,26 +1289,29 @@ task_set_special_port( kern_return_t mach_ports_register( - task_t task, - mach_port_array_t memory, - mach_msg_type_number_t portsCnt) + task_t task, + mach_port_array_t memory, + mach_msg_type_number_t portsCnt) { ipc_port_t ports[TASK_PORT_REGISTER_MAX]; unsigned int i; if ((task == TASK_NULL) || (portsCnt > TASK_PORT_REGISTER_MAX) || - (portsCnt && memory == NULL)) + (portsCnt && memory == NULL)) { return KERN_INVALID_ARGUMENT; + } /* * Pad the port rights with nulls. */ - for (i = 0; i < portsCnt; i++) + for (i = 0; i < portsCnt; i++) { ports[i] = memory[i]; - for (; i < TASK_PORT_REGISTER_MAX; i++) + } + for (; i < TASK_PORT_REGISTER_MAX; i++) { ports[i] = IP_NULL; + } itk_lock(task); if (task->itk_self == IP_NULL) { @@ -1298,18 +1334,21 @@ mach_ports_register( itk_unlock(task); - for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) - if (IP_VALID(ports[i])) + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { + if (IP_VALID(ports[i])) { ipc_port_release_send(ports[i]); + } + } /* * Now that the operation is known to be successful, * we can free the memory. */ - if (portsCnt != 0) + if (portsCnt != 0) { kfree(memory, - (vm_size_t) (portsCnt * sizeof(mach_port_t))); + (vm_size_t) (portsCnt * sizeof(mach_port_t))); + } return KERN_SUCCESS; } @@ -1330,23 +1369,25 @@ mach_ports_register( kern_return_t mach_ports_lookup( - task_t task, - mach_port_array_t *portsp, - mach_msg_type_number_t *portsCnt) + task_t task, + mach_port_array_t *portsp, + mach_msg_type_number_t *portsCnt) { void *memory; vm_size_t size; ipc_port_t *ports; int i; - if (task == TASK_NULL) + if (task == TASK_NULL) { return KERN_INVALID_ARGUMENT; + } size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t)); memory = kalloc(size); - if (memory == 0) + if (memory == 0) { return KERN_RESOURCE_SHORTAGE; + } itk_lock(task); if (task->itk_self == IP_NULL) { @@ -1363,8 +1404,9 @@ mach_ports_lookup( * is wired, we won't fault while holding the task lock. */ - for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) { ports[i] = ipc_port_copy_send(task->itk_registered[i]); + } itk_unlock(task); @@ -1454,7 +1496,7 @@ convert_port_to_locked_task(ipc_port_t port) */ if (task_lock_try(task)) { ip_unlock(port); - return(task); + return task; } try_failed_count++; @@ -1476,7 +1518,7 @@ convert_port_to_locked_task(ipc_port_t port) task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port) { - int try_failed_count = 0; + int try_failed_count = 0; while (IP_VALID(port)) { task_inspect_t task; @@ -1516,7 +1558,7 @@ convert_port_to_locked_task_inspect(ipc_port_t port) */ task_t convert_port_to_task( - ipc_port_t port) + ipc_port_t port) { return convert_port_to_task_with_exec_token(port, NULL); } @@ -1533,16 +1575,16 @@ convert_port_to_task( */ task_t convert_port_to_task_with_exec_token( - ipc_port_t port, - uint32_t *exec_token) + ipc_port_t port, + uint32_t *exec_token) { - task_t task = TASK_NULL; + task_t task = TASK_NULL; if (IP_VALID(port)) { ip_lock(port); - if ( ip_active(port) && - ip_kotype(port) == IKOT_TASK ) { + if (ip_active(port) && + ip_kotype(port) == IKOT_TASK) { task_t ct = current_task(); task = (task_t)port->ip_kobject; assert(task != TASK_NULL); @@ -1561,7 +1603,7 @@ convert_port_to_task_with_exec_token( ip_unlock(port); } - return (task); + return task; } /* @@ -1575,16 +1617,16 @@ convert_port_to_task_with_exec_token( */ task_name_t convert_port_to_task_name( - ipc_port_t port) + ipc_port_t port) { - task_name_t task = TASK_NULL; + task_name_t task = TASK_NULL; if (IP_VALID(port)) { ip_lock(port); - if ( ip_active(port) && - (ip_kotype(port) == IKOT_TASK || - ip_kotype(port) == IKOT_TASK_NAME)) { + if (ip_active(port) && + (ip_kotype(port) == IKOT_TASK || + ip_kotype(port) == IKOT_TASK_NAME)) { task = (task_name_t)port->ip_kobject; assert(task != TASK_NAME_NULL); @@ -1594,7 +1636,7 @@ convert_port_to_task_name( ip_unlock(port); } - return (task); + return task; } /* @@ -1608,14 +1650,14 @@ convert_port_to_task_name( */ task_inspect_t convert_port_to_task_inspect( - ipc_port_t port) + ipc_port_t port) { task_inspect_t task = TASK_INSPECT_NULL; if (IP_VALID(port)) { ip_lock(port); - if (ip_active(port) && + if (ip_active(port) && ip_kotype(port) == IKOT_TASK) { task = (task_inspect_t)port->ip_kobject; assert(task != TASK_INSPECT_NULL); @@ -1626,7 +1668,7 @@ convert_port_to_task_inspect( ip_unlock(port); } - return (task); + return task; } /* @@ -1640,15 +1682,15 @@ convert_port_to_task_inspect( */ task_suspension_token_t convert_port_to_task_suspension_token( - ipc_port_t port) + ipc_port_t port) { - task_suspension_token_t task = TASK_NULL; + task_suspension_token_t task = TASK_NULL; if (IP_VALID(port)) { ip_lock(port); - if ( ip_active(port) && - ip_kotype(port) == IKOT_TASK_RESUME) { + if (ip_active(port) && + ip_kotype(port) == IKOT_TASK_RESUME) { task = (task_suspension_token_t)port->ip_kobject; assert(task != TASK_NULL); @@ -1658,7 +1700,7 @@ convert_port_to_task_suspension_token( ip_unlock(port); } - return (task); + return task; } /* @@ -1672,25 +1714,26 @@ convert_port_to_task_suspension_token( */ ipc_space_t convert_port_to_space( - ipc_port_t port) + ipc_port_t port) { ipc_space_t space; task_t task; task = convert_port_to_locked_task(port); - if (task == TASK_NULL) + if (task == TASK_NULL) { return IPC_SPACE_NULL; + } if (!task->active) { task_unlock(task); return IPC_SPACE_NULL; } - + space = task->itk_space; is_reference(space); task_unlock(task); - return (space); + return space; } /* @@ -1704,15 +1747,16 @@ convert_port_to_space( */ ipc_space_inspect_t convert_port_to_space_inspect( - ipc_port_t port) + ipc_port_t port) { ipc_space_inspect_t space; task_inspect_t task; task = convert_port_to_locked_task_inspect(port); - if (task == TASK_INSPECT_NULL) + if (task == TASK_INSPECT_NULL) { return IPC_SPACE_INSPECT_NULL; + } if (!task->active) { task_unlock(task); @@ -1737,21 +1781,22 @@ convert_port_to_space_inspect( vm_map_t convert_port_to_map( - ipc_port_t port) + ipc_port_t port) { task_t task; vm_map_t map; task = convert_port_to_locked_task(port); - - if (task == TASK_NULL) + + if (task == TASK_NULL) { return VM_MAP_NULL; + } if (!task->active) { task_unlock(task); return VM_MAP_NULL; } - + map = task->map; vm_map_reference_swap(map); task_unlock(task); @@ -1771,9 +1816,9 @@ convert_port_to_map( thread_t convert_port_to_thread( - ipc_port_t port) + ipc_port_t port) { - thread_t thread = THREAD_NULL; + thread_t thread = THREAD_NULL; if (IP_VALID(port)) { ip_lock(port); @@ -1795,7 +1840,7 @@ convert_port_to_thread( ip_unlock(port); } - return (thread); + return thread; } /* @@ -1809,7 +1854,7 @@ convert_port_to_thread( */ thread_inspect_t convert_port_to_thread_inspect( - ipc_port_t port) + ipc_port_t port) { thread_inspect_t thread = THREAD_INSPECT_NULL; @@ -1860,24 +1905,26 @@ convert_thread_inspect_to_port(thread_inspect_t thread) */ thread_t port_name_to_thread( - mach_port_name_t name) + mach_port_name_t name) { - thread_t thread = THREAD_NULL; - ipc_port_t kport; + thread_t thread = THREAD_NULL; + ipc_port_t kport; if (MACH_PORT_VALID(name)) { if (ipc_object_copyin(current_space(), name, - MACH_MSG_TYPE_COPY_SEND, - (ipc_object_t *)&kport) != KERN_SUCCESS) - return (THREAD_NULL); + MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *)&kport) != KERN_SUCCESS) { + return THREAD_NULL; + } thread = convert_port_to_thread(kport); - - if (IP_VALID(kport)) + + if (IP_VALID(kport)) { ipc_port_release_send(kport); + } } - return (thread); + return thread; } task_t @@ -1890,15 +1937,17 @@ port_name_to_task( if (MACH_PORT_VALID(name)) { kr = ipc_object_copyin(current_space(), name, - MACH_MSG_TYPE_COPY_SEND, - (ipc_object_t *) &kern_port); - if (kr != KERN_SUCCESS) + MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *) &kern_port); + if (kr != KERN_SUCCESS) { return TASK_NULL; + } task = convert_port_to_task(kern_port); - if (IP_VALID(kern_port)) + if (IP_VALID(kern_port)) { ipc_port_release_send(kern_port); + } } return task; } @@ -1913,15 +1962,17 @@ port_name_to_task_inspect( if (MACH_PORT_VALID(name)) { kr = ipc_object_copyin(current_space(), name, - MACH_MSG_TYPE_COPY_SEND, - (ipc_object_t *)&kern_port); - if (kr != KERN_SUCCESS) + MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *)&kern_port); + if (kr != KERN_SUCCESS) { return TASK_NULL; + } ti = convert_port_to_task_inspect(kern_port); - if (IP_VALID(kern_port)) + if (IP_VALID(kern_port)) { ipc_port_release_send(kern_port); + } } return ti; } @@ -1938,7 +1989,6 @@ host_t port_name_to_host( mach_port_name_t name) { - host_t host = HOST_NULL; kern_return_t kr; ipc_port_t port; @@ -1958,23 +2008,24 @@ port_name_to_host( * Purpose: * Convert from a task to a port. * Consumes a task ref; produces a naked send right - * which may be invalid. + * which may be invalid. * Conditions: * Nothing locked. */ ipc_port_t convert_task_to_port( - task_t task) + task_t task) { ipc_port_t port; itk_lock(task); - if (task->itk_self != IP_NULL) + if (task->itk_self != IP_NULL) { port = ipc_port_make_send(task->itk_self); - else + } else { port = IP_NULL; + } itk_unlock(task); @@ -1994,7 +2045,7 @@ convert_task_to_port( */ ipc_port_t convert_task_inspect_to_port( - task_inspect_t task) + task_inspect_t task) { task_deallocate(task); @@ -2006,13 +2057,13 @@ convert_task_inspect_to_port( * Purpose: * Convert from a task suspension token to a port. * Consumes a task suspension token ref; produces a naked send-once right - * which may be invalid. + * which may be invalid. * Conditions: * Nothing locked. */ ipc_port_t convert_task_suspension_token_to_port( - task_suspension_token_t task) + task_suspension_token_t task) { ipc_port_t port; @@ -2050,22 +2101,23 @@ convert_task_suspension_token_to_port( * Purpose: * Convert from a task name ref to a port. * Consumes a task name ref; produces a naked send right - * which may be invalid. + * which may be invalid. * Conditions: * Nothing locked. */ ipc_port_t convert_task_name_to_port( - task_name_t task_name) + task_name_t task_name) { ipc_port_t port; itk_lock(task_name); - if (task_name->itk_nself != IP_NULL) + if (task_name->itk_nself != IP_NULL) { port = ipc_port_make_send(task_name->itk_nself); - else + } else { port = IP_NULL; + } itk_unlock(task_name); task_name_deallocate(task_name); @@ -2084,22 +2136,23 @@ convert_task_name_to_port( ipc_port_t convert_thread_to_port( - thread_t thread) + thread_t thread) { - ipc_port_t port; + ipc_port_t port; thread_mtx_lock(thread); - if (thread->ith_self != IP_NULL) + if (thread->ith_self != IP_NULL) { port = ipc_port_make_send(thread->ith_self); - else + } else { port = IP_NULL; + } thread_mtx_unlock(thread); thread_deallocate(thread); - return (port); + return port; } /* @@ -2112,10 +2165,11 @@ convert_thread_to_port( void space_deallocate( - ipc_space_t space) + ipc_space_t space) { - if (space != IS_NULL) + if (space != IS_NULL) { is_release(space); + } } /* @@ -2128,10 +2182,11 @@ space_deallocate( void space_inspect_deallocate( - ipc_space_inspect_t space) + ipc_space_inspect_t space) { - if (space != IS_INSPECT_NULL) + if (space != IS_INSPECT_NULL) { is_release((ipc_space_t)space); + } } /* @@ -2154,57 +2209,59 @@ space_inspect_deallocate( kern_return_t thread_set_exception_ports( - thread_t thread, - exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor) + thread_t thread, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) { - ipc_port_t old_port[EXC_TYPES_COUNT]; + ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; - register int i; + register int i; #if CONFIG_MACF struct label *new_label; #endif - - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); - if (exception_mask & ~EXC_MASK_VALID) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (exception_mask & ~EXC_MASK_VALID) { + return KERN_INVALID_ARGUMENT; + } if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { - case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } - /* + /* * Check the validity of the thread_state_flavor by calling the * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in * osfmk/mach/ARCHITECTURE/thread_status.h */ - if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) - return (KERN_INVALID_ARGUMENT); + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF new_label = mac_exc_create_label_for_current_proc(); #endif - + thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); - return (KERN_FAILURE); + return KERN_FAILURE; } if (thread->exc_actions == NULL) { @@ -2213,17 +2270,17 @@ thread_set_exception_ports( for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if ((exception_mask & (1 << i)) #if CONFIG_MACF - && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0 + && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0 #endif - ) { + ) { old_port[i] = thread->exc_actions[i].port; thread->exc_actions[i].port = ipc_port_copy_send(new_port); thread->exc_actions[i].behavior = new_behavior; thread->exc_actions[i].flavor = new_flavor; thread->exc_actions[i].privileged = privileged; - } - else + } else { old_port[i] = IP_NULL; + } } thread_mtx_unlock(thread); @@ -2231,49 +2288,53 @@ thread_set_exception_ports( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) - if (IP_VALID(old_port[i])) + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { + if (IP_VALID(old_port[i])) { ipc_port_release_send(old_port[i]); + } + } - if (IP_VALID(new_port)) /* consume send right */ + if (IP_VALID(new_port)) { /* consume send right */ ipc_port_release_send(new_port); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t task_set_exception_ports( - task_t task, - exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor) + task_t task, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) { - ipc_port_t old_port[EXC_TYPES_COUNT]; + ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; - register int i; + register int i; #if CONFIG_MACF struct label *new_label; -#endif +#endif - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (exception_mask & ~EXC_MASK_VALID) - return (KERN_INVALID_ARGUMENT); + if (exception_mask & ~EXC_MASK_VALID) { + return KERN_INVALID_ARGUMENT; + } if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { - case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } @@ -2282,36 +2343,37 @@ task_set_exception_ports( * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in * osfmk/mach/ARCHITECTURE/thread_status.h */ - if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) - return (KERN_INVALID_ARGUMENT); + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF new_label = mac_exc_create_label_for_current_proc(); #endif - + itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if ((exception_mask & (1 << i)) #if CONFIG_MACF - && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0 + && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0 #endif - ) { + ) { old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = - ipc_port_copy_send(new_port); + ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; - } - else + } else { old_port[i] = IP_NULL; + } } itk_unlock(task); @@ -2319,15 +2381,18 @@ task_set_exception_ports( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) - if (IP_VALID(old_port[i])) + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { + if (IP_VALID(old_port[i])) { ipc_port_release_send(old_port[i]); + } + } - if (IP_VALID(new_port)) /* consume send right */ + if (IP_VALID(new_port)) { /* consume send right */ ipc_port_release_send(new_port); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2359,46 +2424,48 @@ task_set_exception_ports( kern_return_t thread_swap_exception_ports( - thread_t thread, - exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor, - exception_mask_array_t masks, - mach_msg_type_number_t *CountCnt, - exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors) + thread_t thread, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - ipc_port_t old_port[EXC_TYPES_COUNT]; + ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; - unsigned int i, j, count; + unsigned int i, j, count; #if CONFIG_MACF struct label *new_label; #endif - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (exception_mask & ~EXC_MASK_VALID) - return (KERN_INVALID_ARGUMENT); + if (exception_mask & ~EXC_MASK_VALID) { + return KERN_INVALID_ARGUMENT; + } if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { - case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } - if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) - return (KERN_INVALID_ARGUMENT); + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF new_label = mac_exc_create_label_for_current_proc(); @@ -2409,7 +2476,7 @@ thread_swap_exception_ports( if (!thread->active) { thread_mtx_unlock(thread); - return (KERN_FAILURE); + return KERN_FAILURE; } if (thread->exc_actions == NULL) { @@ -2420,17 +2487,17 @@ thread_swap_exception_ports( for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) { if ((exception_mask & (1 << i)) #if CONFIG_MACF - && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0 + && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0 #endif - ) { + ) { for (j = 0; j < count; ++j) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ - if ( thread->exc_actions[i].port == ports[j] && - thread->exc_actions[i].behavior == behaviors[j] && - thread->exc_actions[i].flavor == flavors[j] ) { + if (thread->exc_actions[i].port == ports[j] && + thread->exc_actions[i].behavior == behaviors[j] && + thread->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } @@ -2450,9 +2517,9 @@ thread_swap_exception_ports( thread->exc_actions[i].behavior = new_behavior; thread->exc_actions[i].flavor = new_flavor; thread->exc_actions[i].privileged = privileged; - } - else + } else { old_port[i] = IP_NULL; + } } thread_mtx_unlock(thread); @@ -2460,90 +2527,94 @@ thread_swap_exception_ports( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - + while (--i >= FIRST_EXCEPTION) { - if (IP_VALID(old_port[i])) + if (IP_VALID(old_port[i])) { ipc_port_release_send(old_port[i]); + } } - if (IP_VALID(new_port)) /* consume send right */ + if (IP_VALID(new_port)) { /* consume send right */ ipc_port_release_send(new_port); + } *CountCnt = count; - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t task_swap_exception_ports( - task_t task, - exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor, - exception_mask_array_t masks, - mach_msg_type_number_t *CountCnt, - exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors) + task_t task, + exception_mask_t exception_mask, + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - ipc_port_t old_port[EXC_TYPES_COUNT]; + ipc_port_t old_port[EXC_TYPES_COUNT]; boolean_t privileged = current_task()->sec_token.val[0] == 0; - unsigned int i, j, count; + unsigned int i, j, count; #if CONFIG_MACF struct label *new_label; -#endif - - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); +#endif + + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (exception_mask & ~EXC_MASK_VALID) - return (KERN_INVALID_ARGUMENT); + if (exception_mask & ~EXC_MASK_VALID) { + return KERN_INVALID_ARGUMENT; + } if (IP_VALID(new_port)) { switch (new_behavior & ~MACH_EXCEPTION_CODES) { - case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } - if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) - return (KERN_INVALID_ARGUMENT); + if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_MACF new_label = mac_exc_create_label_for_current_proc(); #endif - + itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } assert(EXC_TYPES_COUNT > FIRST_EXCEPTION); for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) { if ((exception_mask & (1 << i)) #if CONFIG_MACF - && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0 + && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0 #endif - ) { + ) { for (j = 0; j < count; j++) { /* * search for an identical entry, if found * set corresponding mask for this exception. */ - if ( task->exc_actions[i].port == ports[j] && - task->exc_actions[i].behavior == behaviors[j] && - task->exc_actions[i].flavor == flavors[j] ) { + if (task->exc_actions[i].port == ports[j] && + task->exc_actions[i].behavior == behaviors[j] && + task->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } @@ -2559,13 +2630,13 @@ task_swap_exception_ports( old_port[i] = task->exc_actions[i].port; - task->exc_actions[i].port = ipc_port_copy_send(new_port); + task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; task->exc_actions[i].privileged = privileged; - } - else + } else { old_port[i] = IP_NULL; + } } itk_unlock(task); @@ -2573,18 +2644,20 @@ task_swap_exception_ports( #if CONFIG_MACF mac_exc_free_label(new_label); #endif - + while (--i >= FIRST_EXCEPTION) { - if (IP_VALID(old_port[i])) + if (IP_VALID(old_port[i])) { ipc_port_release_send(old_port[i]); + } } - if (IP_VALID(new_port)) /* consume send right */ + if (IP_VALID(new_port)) { /* consume send right */ ipc_port_release_send(new_port); + } *CountCnt = count; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2608,28 +2681,30 @@ task_swap_exception_ports( kern_return_t thread_get_exception_ports( - thread_t thread, - exception_mask_t exception_mask, - exception_mask_array_t masks, - mach_msg_type_number_t *CountCnt, - exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors) + thread_t thread, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - unsigned int i, j, count; + unsigned int i, j, count; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (exception_mask & ~EXC_MASK_VALID) - return (KERN_INVALID_ARGUMENT); + if (exception_mask & ~EXC_MASK_VALID) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); - return (KERN_FAILURE); + return KERN_FAILURE; } count = 0; @@ -2645,9 +2720,9 @@ thread_get_exception_ports( * search for an identical entry, if found * set corresponding mask for this exception. */ - if ( thread->exc_actions[i].port == ports[j] && - thread->exc_actions[i].behavior ==behaviors[j] && - thread->exc_actions[i].flavor == flavors[j] ) { + if (thread->exc_actions[i].port == ports[j] && + thread->exc_actions[i].behavior == behaviors[j] && + thread->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } @@ -2659,8 +2734,9 @@ thread_get_exception_ports( behaviors[j] = thread->exc_actions[i].behavior; flavors[j] = thread->exc_actions[i].flavor; ++count; - if (count >= *CountCnt) + if (count >= *CountCnt) { break; + } } } } @@ -2670,33 +2746,35 @@ done: *CountCnt = count; - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t task_get_exception_ports( - task_t task, - exception_mask_t exception_mask, - exception_mask_array_t masks, - mach_msg_type_number_t *CountCnt, - exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors) + task_t task, + exception_mask_t exception_mask, + exception_mask_array_t masks, + mach_msg_type_number_t *CountCnt, + exception_port_array_t ports, + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - unsigned int i, j, count; + unsigned int i, j, count; - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (exception_mask & ~EXC_MASK_VALID) - return (KERN_INVALID_ARGUMENT); + if (exception_mask & ~EXC_MASK_VALID) { + return KERN_INVALID_ARGUMENT; + } itk_lock(task); if (task->itk_self == IP_NULL) { itk_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } count = 0; @@ -2708,9 +2786,9 @@ task_get_exception_ports( * search for an identical entry, if found * set corresponding mask for this exception. */ - if ( task->exc_actions[i].port == ports[j] && - task->exc_actions[i].behavior == behaviors[j] && - task->exc_actions[i].flavor == flavors[j] ) { + if (task->exc_actions[i].port == ports[j] && + task->exc_actions[i].behavior == behaviors[j] && + task->exc_actions[i].flavor == flavors[j]) { masks[j] |= (1 << i); break; } @@ -2722,8 +2800,9 @@ task_get_exception_ports( behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; ++count; - if (count > *CountCnt) + if (count > *CountCnt) { break; + } } } } @@ -2732,5 +2811,5 @@ task_get_exception_ports( *CountCnt = count; - return (KERN_SUCCESS); + return KERN_SUCCESS; } diff --git a/osfmk/kern/ipc_tt.h b/osfmk/kern/ipc_tt.h index c59fdf40a..ce6d746e3 100644 --- a/osfmk/kern/ipc_tt.h +++ b/osfmk/kern/ipc_tt.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _KERN_IPC_TT_H_ +#ifndef _KERN_IPC_TT_H_ #define _KERN_IPC_TT_H_ #include @@ -74,71 +74,71 @@ /* Initialize a task's IPC state */ extern void ipc_task_init( - task_t task, - task_t parent); + task_t task, + task_t parent); /* Enable a task for IPC access */ extern void ipc_task_enable( - task_t task); + task_t task); /* Disable IPC access to a task */ extern void ipc_task_disable( - task_t task); + task_t task); /* Clear out a task's IPC state */ extern void ipc_task_reset( - task_t task); + task_t task); /* Clean up and destroy a task's IPC state */ extern void ipc_task_terminate( - task_t task); + task_t task); /* Initialize a thread's IPC state */ extern void ipc_thread_init( - thread_t thread); + thread_t thread); extern void ipc_thread_init_exc_actions( - thread_t thread); + thread_t thread); extern void ipc_thread_destroy_exc_actions( - thread_t thread); + thread_t thread); /* Disable IPC access to a thread */ extern void ipc_thread_disable( - thread_t thread); + thread_t thread); /* Clean up and destroy a thread's IPC state */ extern void ipc_thread_terminate( - thread_t thread); + thread_t thread); /* Clear out a thread's IPC state */ extern void ipc_thread_reset( - thread_t thread); + thread_t thread); /* Return a send right for the task's user-visible self port */ extern ipc_port_t retrieve_task_self_fast( - task_t task); + task_t task); /* Return a send right for the thread's user-visible self port */ extern ipc_port_t retrieve_thread_self_fast( - thread_t thread); + thread_t thread); /* Convert from a port to a task name */ extern task_name_t convert_port_to_task_name( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a task inspect */ extern task_inspect_t convert_port_to_task_inspect( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a task */ extern task_t convert_port_to_task( - ipc_port_t port); + ipc_port_t port); extern task_t convert_port_to_task_with_exec_token( - ipc_port_t port, - uint32_t *exec_token); + ipc_port_t port, + uint32_t *exec_token); extern task_t port_name_to_task( mach_port_name_t name); @@ -154,35 +154,35 @@ extern boolean_t ref_task_port_locked( /* Convert from a port to a space */ extern ipc_space_t convert_port_to_space( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a space inspection right */ extern ipc_space_inspect_t convert_port_to_space_inspect( - ipc_port_t port); + ipc_port_t port); extern boolean_t ref_space_port_locked( ipc_port_t port, ipc_space_t *pspace); /* Convert from a port to a map */ extern vm_map_t convert_port_to_map( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a thread */ -extern thread_t convert_port_to_thread( - ipc_port_t port); +extern thread_t convert_port_to_thread( + ipc_port_t port); /* Convert from a port to a thread inspect */ -extern thread_inspect_t convert_port_to_thread_inspect( - ipc_port_t port); +extern thread_inspect_t convert_port_to_thread_inspect( + ipc_port_t port); -extern thread_t port_name_to_thread( - mach_port_name_t port_name); +extern thread_t port_name_to_thread( + mach_port_name_t port_name); /* Deallocate a space ref produced by convert_port_to_space */ extern void space_deallocate( - ipc_space_t space); + ipc_space_t space); extern void space_inspect_deallocate( - ipc_space_inspect_t space); + ipc_space_inspect_t space); -#endif /* _KERN_IPC_TT_H_ */ +#endif /* _KERN_IPC_TT_H_ */ diff --git a/osfmk/kern/kalloc.c b/osfmk/kern/kalloc.c index 6527654f2..63b8aaffb 100644 --- a/osfmk/kern/kalloc.c +++ b/osfmk/kern/kalloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -91,7 +91,7 @@ zone_t kalloc_zone(vm_size_t); vm_map_t kalloc_map; vm_size_t kalloc_max; vm_size_t kalloc_max_prerounded; -vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */ +vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */ /* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */ unsigned long kalloc_fallback_count; @@ -102,16 +102,16 @@ vm_size_t kalloc_large_max; vm_size_t kalloc_largest_allocated = 0; uint64_t kalloc_large_sum; -int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */ +int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */ -vm_offset_t kalloc_map_min; -vm_offset_t kalloc_map_max; +vm_offset_t kalloc_map_min; +vm_offset_t kalloc_map_max; -#ifdef MUTEX_ZONE +#ifdef MUTEX_ZONE /* * Diagnostic code to track mutexes separately rather than via the 2^ zones */ - zone_t lck_mtx_zone; +zone_t lck_mtx_zone; #endif static void @@ -147,73 +147,164 @@ KALLOC_ZINFO_SFREE(vm_size_t bytes) #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN) #define KiB(x) (1024 * (x)) +/* + * The k_zone_config table defines the configuration of zones on various platforms. + * The currently defined list of zones and their per-CPU caching behavior are as + * follows (X:zone not present; N:zone present no cpu-caching; Y:zone present with cpu-caching): + * + * Size macOS(64-bit) embedded(32-bit) embedded(64-bit) + *-------- ---------------- ---------------- ---------------- + * + * 8 X Y X + * 16 Y Y Y + * 24 X Y X + * 32 Y Y Y + * 40 X Y X + * 48 Y Y Y + * 64 Y Y Y + * 72 X Y X + * 80 Y X Y + * 88 X Y X + * 96 Y X Y + * 112 X Y X + * 128 Y Y Y + * 160 Y X Y + * 192 Y Y Y + * 224 Y X Y + * 256 Y Y Y + * 288 Y Y Y + * 368 Y X Y + * 384 X Y X + * 400 Y X Y + * 440 X Y X + * 512 Y Y Y + * 576 Y N N + * 768 Y N N + * 1024 Y Y Y + * 1152 N N N + * 1280 N N N + * 1536 X N X + * 1664 N X N + * 2048 Y N N + * 2128 X N X + * 3072 X N X + * 4096 Y N N + * 6144 N N N + * 8192 Y N N + * 16384 N N N + * 32768 N N N + * + */ static const struct kalloc_zone_config { + bool kzc_caching; int kzc_size; const char *kzc_name; } k_zone_config[] = { -#define KZC_ENTRY(SIZE) { .kzc_size = (SIZE), .kzc_name = "kalloc." #SIZE } +#define KZC_ENTRY(SIZE, caching) { .kzc_caching = (caching), .kzc_size = (SIZE), .kzc_name = "kalloc." #SIZE } + +#if CONFIG_EMBEDDED #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4 - /* 64-bit targets, generally */ - KZC_ENTRY(16), - KZC_ENTRY(32), - KZC_ENTRY(48), - KZC_ENTRY(64), - KZC_ENTRY(80), - KZC_ENTRY(96), - KZC_ENTRY(128), - KZC_ENTRY(160), - KZC_ENTRY(192), - KZC_ENTRY(224), - KZC_ENTRY(256), - KZC_ENTRY(288), - KZC_ENTRY(368), - KZC_ENTRY(400), - KZC_ENTRY(512), - KZC_ENTRY(576), - KZC_ENTRY(768), - KZC_ENTRY(1024), - KZC_ENTRY(1152), - KZC_ENTRY(1280), - KZC_ENTRY(1664), - KZC_ENTRY(2048), + /* Zone config for embedded 64-bit platforms */ + KZC_ENTRY(16, true), + KZC_ENTRY(32, true), + KZC_ENTRY(48, true), + KZC_ENTRY(64, true), + KZC_ENTRY(80, true), + KZC_ENTRY(96, true), + KZC_ENTRY(128, true), + KZC_ENTRY(160, true), + KZC_ENTRY(192, true), + KZC_ENTRY(224, true), + KZC_ENTRY(256, true), + KZC_ENTRY(288, true), + KZC_ENTRY(368, true), + KZC_ENTRY(400, true), + KZC_ENTRY(512, true), + KZC_ENTRY(576, false), + KZC_ENTRY(768, false), + KZC_ENTRY(1024, true), + KZC_ENTRY(1152, false), + KZC_ENTRY(1280, false), + KZC_ENTRY(1664, false), + KZC_ENTRY(2048, false), + KZC_ENTRY(4096, false), + KZC_ENTRY(6144, false), + KZC_ENTRY(8192, false), + KZC_ENTRY(16384, false), + KZC_ENTRY(32768, false), + #elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3 - /* 32-bit targets, generally */ - KZC_ENTRY(8), - KZC_ENTRY(16), - KZC_ENTRY(24), - KZC_ENTRY(32), - KZC_ENTRY(40), - KZC_ENTRY(48), - KZC_ENTRY(64), - KZC_ENTRY(72), - KZC_ENTRY(88), - KZC_ENTRY(112), - KZC_ENTRY(128), - KZC_ENTRY(192), - KZC_ENTRY(256), - KZC_ENTRY(288), - KZC_ENTRY(384), - KZC_ENTRY(440), - KZC_ENTRY(512), - KZC_ENTRY(576), - KZC_ENTRY(768), - KZC_ENTRY(1024), - KZC_ENTRY(1152), - KZC_ENTRY(1536), - KZC_ENTRY(2048), - KZC_ENTRY(2128), - KZC_ENTRY(3072), + /* Zone config for embedded 32-bit platforms */ + KZC_ENTRY(8, true), + KZC_ENTRY(16, true), + KZC_ENTRY(24, true), + KZC_ENTRY(32, true), + KZC_ENTRY(40, true), + KZC_ENTRY(48, true), + KZC_ENTRY(64, true), + KZC_ENTRY(72, true), + KZC_ENTRY(88, true), + KZC_ENTRY(112, true), + KZC_ENTRY(128, true), + KZC_ENTRY(192, true), + KZC_ENTRY(256, true), + KZC_ENTRY(288, true), + KZC_ENTRY(384, true), + KZC_ENTRY(440, true), + KZC_ENTRY(512, true), + KZC_ENTRY(576, false), + KZC_ENTRY(768, false), + KZC_ENTRY(1024, true), + KZC_ENTRY(1152, false), + KZC_ENTRY(1280, false), + KZC_ENTRY(1536, false), + KZC_ENTRY(2048, false), + KZC_ENTRY(2128, false), + KZC_ENTRY(3072, false), + KZC_ENTRY(4096, false), + KZC_ENTRY(6144, false), + KZC_ENTRY(8192, false), + KZC_ENTRY(16384, false), + KZC_ENTRY(32768, false), + #else #error missing or invalid zone size parameters for kalloc #endif - /* all configurations get these zones */ - KZC_ENTRY(4096), - KZC_ENTRY(6144), - KZC_ENTRY(8192), - KZC_ENTRY(16384), - KZC_ENTRY(32768), +#else /* CONFIG_EMBEDDED */ + + /* Zone config for macOS 64-bit platforms */ + KZC_ENTRY(16, true), + KZC_ENTRY(32, true), + KZC_ENTRY(48, true), + KZC_ENTRY(64, true), + KZC_ENTRY(80, true), + KZC_ENTRY(96, true), + KZC_ENTRY(128, true), + KZC_ENTRY(160, true), + KZC_ENTRY(192, true), + KZC_ENTRY(224, true), + KZC_ENTRY(256, true), + KZC_ENTRY(288, true), + KZC_ENTRY(368, true), + KZC_ENTRY(400, true), + KZC_ENTRY(512, true), + KZC_ENTRY(576, true), + KZC_ENTRY(768, true), + KZC_ENTRY(1024, true), + KZC_ENTRY(1152, false), + KZC_ENTRY(1280, false), + KZC_ENTRY(1664, false), + KZC_ENTRY(2048, true), + KZC_ENTRY(4096, true), + KZC_ENTRY(6144, false), + KZC_ENTRY(8192, true), + KZC_ENTRY(16384, false), + KZC_ENTRY(32768, false), + +#endif /* CONFIG_EMBEDDED */ + #undef KZC_ENTRY }; @@ -226,13 +317,13 @@ static const struct kalloc_zone_config { * for them in one dereference. */ -#define INDEX_ZDLUT(size) \ - (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN) -#define N_K_ZDLUT (2048 / KALLOC_MINALIGN) - /* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */ -#define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN) +#define INDEX_ZDLUT(size) \ + (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN) +#define N_K_ZDLUT (2048 / KALLOC_MINALIGN) +/* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */ +#define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN) -static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */ +static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */ /* * If there's no hit in the DLUT, then start searching from k_zindex_start. @@ -248,8 +339,8 @@ static zone_t k_zone[MAX_K_ZONE]; lck_grp_t kalloc_lck_grp; lck_mtx_t kalloc_lock; -#define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock) -#define kalloc_unlock() lck_mtx_unlock(&kalloc_lock) +#define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock) +#define kalloc_unlock() lck_mtx_unlock(&kalloc_lock) /* OSMalloc local data declarations */ @@ -259,14 +350,14 @@ queue_head_t OSMalloc_tag_list; lck_grp_t *OSMalloc_tag_lck_grp; lck_mtx_t OSMalloc_tag_lock; -#define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock) -#define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock) +#define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock) +#define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock) /* OSMalloc forward declarations */ void OSMalloc_init(void); -void OSMalloc_Tagref(OSMallocTag tag); -void OSMalloc_Tagrele(OSMallocTag tag); +void OSMalloc_Tagref(OSMallocTag tag); +void OSMalloc_Tagrele(OSMallocTag tag); /* * Initialize the memory allocator. This should be called only @@ -285,30 +376,33 @@ kalloc_init( vm_size_t size, kalloc_map_size; vm_map_kernel_flags_t vmk_flags; - /* - * Scale the kalloc_map_size to physical memory size: stay below + /* + * Scale the kalloc_map_size to physical memory size: stay below * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel). */ kalloc_map_size = (vm_size_t)(sane_size >> 5); #if !__LP64__ - if (kalloc_map_size > KALLOC_MAP_SIZE_MAX) + if (kalloc_map_size > KALLOC_MAP_SIZE_MAX) { kalloc_map_size = KALLOC_MAP_SIZE_MAX; + } #endif /* !__LP64__ */ - if (kalloc_map_size < KALLOC_MAP_SIZE_MIN) + if (kalloc_map_size < KALLOC_MAP_SIZE_MIN) { kalloc_map_size = KALLOC_MAP_SIZE_MIN; + } vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_permanent = TRUE; retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, - FALSE, - (VM_FLAGS_ANYWHERE), - vmk_flags, - VM_KERN_MEMORY_KALLOC, - &kalloc_map); + FALSE, + (VM_FLAGS_ANYWHERE), + vmk_flags, + VM_KERN_MEMORY_KALLOC, + &kalloc_map); - if (retval != KERN_SUCCESS) + if (retval != KERN_SUCCESS) { panic("kalloc_init: kmem_suballoc failed"); + } kalloc_map_min = min; kalloc_map_max = min + kalloc_map_size - 1; @@ -321,7 +415,7 @@ kalloc_init( */ kalloc_max = PAGE_SIZE << 2; if (kalloc_max < KiB(16)) { - kalloc_max = KiB(16); + kalloc_max = KiB(16); } assert(kalloc_max <= KiB(64)); /* assumption made in size arrays */ @@ -342,9 +436,14 @@ kalloc_init( */ zone_change(k_zone[i], Z_CALLERACCT, FALSE); #if VM_MAX_TAG_ZONES - if (zone_tagging_on) zone_change(k_zone[i], Z_TAGS_ENABLED, TRUE); + if (zone_tagging_on) { + zone_change(k_zone[i], Z_TAGS_ENABLED, TRUE); + } #endif zone_change(k_zone[i], Z_KASAN_QUARANTINE, FALSE); + if (k_zone_config[i].kzc_caching) { + zone_change(k_zone[i], Z_CACHING_ENABLED, TRUE); + } } /* @@ -354,8 +453,9 @@ kalloc_init( for (int i = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) { int zindex = 0; - while ((vm_size_t)k_zone_config[zindex].kzc_size < size) + while ((vm_size_t)k_zone_config[zindex].kzc_size < size) { zindex++; + } if (i == N_K_ZDLUT) { k_zindex_start = zindex; @@ -379,24 +479,22 @@ kalloc_init( int zindex; if (testsize < MAX_SIZE_ZDLUT) { - compare += 1; /* 'if' (T) */ + compare += 1; /* 'if' (T) */ long dindex = INDEX_ZDLUT(testsize); zindex = (int)k_zone_dlut[dindex]; - } else if (testsize < kalloc_max_prerounded) { - - compare += 2; /* 'if' (F), 'if' (T) */ + compare += 2; /* 'if' (F), 'if' (T) */ zindex = k_zindex_start; while ((vm_size_t)k_zone_config[zindex].kzc_size < testsize) { zindex++; - compare++; /* 'while' (T) */ + compare++; /* 'while' (T) */ } - compare++; /* 'while' (F) */ - } else - break; /* not zone-backed */ - + compare++; /* 'while' (F) */ + } else { + break; /* not zone-backed */ + } zone_t z = k_zone[zindex]; printf("kalloc_init: req size %4lu: %11s took %d compare%s\n", (unsigned long)testsize, z->zone_name, compare, @@ -407,8 +505,8 @@ kalloc_init( lck_grp_init(&kalloc_lck_grp, "kalloc.large", LCK_GRP_ATTR_NULL); lck_mtx_init(&kalloc_lock, &kalloc_lck_grp, LCK_ATTR_NULL); OSMalloc_init(); -#ifdef MUTEX_ZONE - lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx"); +#ifdef MUTEX_ZONE + lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024 * 256, 4096, "lck_mtx"); #endif } @@ -421,7 +519,7 @@ get_zone_dlut(vm_size_t size) { long dindex = INDEX_ZDLUT(size); int zindex = (int)k_zone_dlut[dindex]; - return (k_zone[zindex]); + return k_zone[zindex]; } /* As above, but linear search k_zone_config[] for the next zone that fits. */ @@ -431,37 +529,38 @@ get_zone_search(vm_size_t size, int zindex) { assert(size < kalloc_max_prerounded); - while ((vm_size_t)k_zone_config[zindex].kzc_size < size) + while ((vm_size_t)k_zone_config[zindex].kzc_size < size) { zindex++; + } assert(zindex < MAX_K_ZONE && (vm_size_t)k_zone_config[zindex].kzc_size < kalloc_max); - return (k_zone[zindex]); + return k_zone[zindex]; } static vm_size_t vm_map_lookup_kalloc_entry_locked( - vm_map_t map, - void *addr) + vm_map_t map, + void *addr) { boolean_t ret; vm_map_entry_t vm_entry = NULL; - + ret = vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry); if (!ret) { - panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n", - map, addr); + panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n", + map, addr); } if (vm_entry->vme_start != (vm_map_offset_t)addr) { panic("Attempting to lookup/free the middle of a kalloc'ed element! (map: %p, addr: %p, entry: %p)\n", - map, addr, vm_entry); + map, addr, vm_entry); } if (!vm_entry->vme_atomic) { panic("Attempting to lookup/free an address not managed by kalloc! (map: %p, addr: %p, entry: %p)\n", - map, addr, vm_entry); + map, addr, vm_entry); } - return (vm_entry->vme_end - vm_entry->vme_start); + return vm_entry->vme_end - vm_entry->vme_start; } #if KASAN_KALLOC @@ -478,10 +577,10 @@ kalloc_size(void *addr) #else vm_size_t kalloc_size( - void *addr) + void *addr) { - vm_map_t map; - vm_size_t size; + vm_map_t map; + vm_size_t size; size = zone_element_size(addr, NULL); if (size) { @@ -501,32 +600,33 @@ kalloc_size( vm_size_t kalloc_bucket_size( - vm_size_t size) + vm_size_t size) { - zone_t z; - vm_map_t map; - + zone_t z; + vm_map_t map; + if (size < MAX_SIZE_ZDLUT) { z = get_zone_dlut(size); return z->elem_size; - } - + } + if (size < kalloc_max_prerounded) { z = get_zone_search(size, k_zindex_start); return z->elem_size; } - if (size >= kalloc_kernmap_size) + if (size >= kalloc_kernmap_size) { map = kernel_map; - else + } else { map = kalloc_map; - + } + return vm_map_round_page(size, VM_MAP_PAGE_MASK(map)); } #if KASAN_KALLOC vm_size_t -kfree_addr(void *addr) +(kfree_addr)(void *addr) { vm_size_t origsz = kalloc_size(addr); kfree(addr, origsz); @@ -534,13 +634,13 @@ kfree_addr(void *addr) } #else vm_size_t -kfree_addr( - void *addr) +(kfree_addr)( + void *addr) { vm_map_t map; vm_size_t size = 0; - kern_return_t ret; - zone_t z; + kern_return_t ret; + zone_t z; size = zone_element_size(addr, &z); if (size) { @@ -561,18 +661,18 @@ kfree_addr( vm_map_lock(map); size = vm_map_lookup_kalloc_entry_locked(map, addr); ret = vm_map_remove_locked(map, - vm_map_trunc_page((vm_map_offset_t)addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page((vm_map_offset_t)addr + size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_KUNWIRE); + vm_map_trunc_page((vm_map_offset_t)addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page((vm_map_offset_t)addr + size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_KUNWIRE); if (ret != KERN_SUCCESS) { panic("vm_map_remove_locked() failed for kalloc vm_entry! addr: %p, map: %p ret: %d\n", - addr, map, ret); + addr, map, ret); } vm_map_unlock(map); DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, size, void*, addr); - + kalloc_spin_lock(); kalloc_large_total -= size; kalloc_large_inuse--; @@ -585,9 +685,9 @@ kfree_addr( void * kalloc_canblock( - vm_size_t * psize, - boolean_t canblock, - vm_allocation_site_t * site) + vm_size_t * psize, + boolean_t canblock, + vm_allocation_site_t * site) { zone_t z; vm_size_t size; @@ -603,11 +703,11 @@ kalloc_canblock( size = kasan_alloc_resize(req_size); #endif - if (size < MAX_SIZE_ZDLUT) + if (size < MAX_SIZE_ZDLUT) { z = get_zone_dlut(size); - else if (size < kalloc_max_prerounded) + } else if (size < kalloc_max_prerounded) { z = get_zone_search(size, k_zindex_start); - else { + } else { /* * If size is too large for a zone, then use kmem_alloc. * (We use kmem_alloc instead of kmem_alloc_kobject so that @@ -617,7 +717,7 @@ kalloc_canblock( /* kmem_alloc could block so we return if noblock */ if (!canblock) { - return(NULL); + return NULL; } #if KASAN_KALLOC @@ -626,23 +726,27 @@ kalloc_canblock( assert(size >= MAX_SIZE_ZDLUT && size >= kalloc_max_prerounded); #endif - if (size >= kalloc_kernmap_size) - alloc_map = kernel_map; - else + if (size >= kalloc_kernmap_size) { + alloc_map = kernel_map; + } else { alloc_map = kalloc_map; + } - if (site) tag = vm_tag_alloc(site); + if (site) { + tag = vm_tag_alloc(site); + } if (kmem_alloc_flags(alloc_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) { if (alloc_map != kernel_map) { if (kalloc_fallback_count++ == 0) { printf("%s: falling back to kernel_map\n", __func__); } - if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) + if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) { addr = NULL; - } - else + } + } else { addr = NULL; + } } if (addr != NULL) { @@ -651,15 +755,17 @@ kalloc_canblock( * Thread-safe version of the workaround for 4740071 * (a double FREE()) */ - if (size > kalloc_largest_allocated) + if (size > kalloc_largest_allocated) { kalloc_largest_allocated = size; + } - kalloc_large_inuse++; - kalloc_large_total += size; + kalloc_large_inuse++; + kalloc_large_total += size; kalloc_large_sum += size; - if (kalloc_large_total > kalloc_large_max) - kalloc_large_max = kalloc_large_total; + if (kalloc_large_total > kalloc_large_max) { + kalloc_large_max = kalloc_large_total; + } kalloc_unlock(); @@ -672,22 +778,24 @@ kalloc_canblock( *psize = round_page(size); #endif DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr); - return(addr); + return addr; } #ifdef KALLOC_DEBUG - if (size > z->elem_size) + if (size > z->elem_size) { panic("%s: z %p (%s) but requested size %lu", __func__, z, z->zone_name, (unsigned long)size); + } #endif assert(size <= z->elem_size); #if VM_MAX_TAG_ZONES - if (z->tags && site) - { + if (z->tags && site) { tag = vm_tag_alloc(site); - if (!canblock && !vm_allocation_zone_totals[tag]) tag = VM_KERN_MEMORY_KALLOC; - } + if (!canblock && !vm_allocation_zone_totals[tag]) { + tag = VM_KERN_MEMORY_KALLOC; + } + } #endif addr = zalloc_canblock_tag(z, canblock, size, tag); @@ -708,18 +816,18 @@ kalloc_canblock( void * kalloc_external( - vm_size_t size); + vm_size_t size); void * kalloc_external( - vm_size_t size) + vm_size_t size) { - return( kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC) ); + return kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC); } void -kfree( - void *data, - vm_size_t size) +(kfree)( + void *data, + vm_size_t size) { zone_t z; @@ -737,17 +845,18 @@ kfree( } #endif - if (size < MAX_SIZE_ZDLUT) + if (size < MAX_SIZE_ZDLUT) { z = get_zone_dlut(size); - else if (size < kalloc_max_prerounded) + } else if (size < kalloc_max_prerounded) { z = get_zone_search(size, k_zindex_start); - else { + } else { /* if size was too large for a zone, then use kmem_free */ vm_map_t alloc_map = kernel_map; - if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max)) + if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max)) { alloc_map = kalloc_map; + } if (size > kalloc_largest_allocated) { panic("kfree: size %lu > kalloc_largest_allocated %lu", (unsigned long)size, (unsigned long)kalloc_largest_allocated); } @@ -769,9 +878,10 @@ kfree( /* free to the appropriate zone */ #ifdef KALLOC_DEBUG - if (size > z->elem_size) + if (size > z->elem_size) { panic("%s: z %p (%s) but requested size %lu", __func__, z, z->zone_name, (unsigned long)size); + } #endif assert(size <= z->elem_size); #if !KASAN_KALLOC @@ -785,11 +895,13 @@ zone_t kalloc_zone( vm_size_t size) { - if (size < MAX_SIZE_ZDLUT) - return (get_zone_dlut(size)); - if (size <= kalloc_max) - return (get_zone_search(size, k_zindex_start)); - return (ZONE_NULL); + if (size < MAX_SIZE_ZDLUT) { + return get_zone_dlut(size); + } + if (size <= kalloc_max) { + return get_zone_search(size, k_zindex_start); + } + return ZONE_NULL; } #endif @@ -805,8 +917,8 @@ OSMalloc_init( OSMallocTag OSMalloc_Tagalloc( - const char *str, - uint32_t flags) + const char *str, + uint32_t flags) { OSMallocTag OSMTag; @@ -814,8 +926,9 @@ OSMalloc_Tagalloc( bzero((void *)OSMTag, sizeof(*OSMTag)); - if (flags & OSMT_PAGEABLE) + if (flags & OSMT_PAGEABLE) { OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE; + } OSMTag->OSMT_refcnt = 1; @@ -825,130 +938,141 @@ OSMalloc_Tagalloc( enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag); OSMalloc_tag_unlock(); OSMTag->OSMT_state = OSMT_VALID; - return(OSMTag); + return OSMTag; } void OSMalloc_Tagref( - OSMallocTag tag) + OSMallocTag tag) { - if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) + if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); + } (void)hw_atomic_add(&tag->OSMT_refcnt, 1); } void OSMalloc_Tagrele( - OSMallocTag tag) + OSMallocTag tag) { - if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) + if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); + } if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) { - if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) { + if (hw_compare_and_store(OSMT_VALID | OSMT_RELEASED, OSMT_VALID | OSMT_RELEASED, &tag->OSMT_state)) { OSMalloc_tag_spin_lock(); (void)remque((queue_entry_t)tag); OSMalloc_tag_unlock(); - kfree((void*)tag, sizeof(*tag)); - } else + kfree(tag, sizeof(*tag)); + } else { panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name); + } } } void OSMalloc_Tagfree( - OSMallocTag tag) + OSMallocTag tag) { - if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) + if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID | OSMT_RELEASED, &tag->OSMT_state)) { panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state); + } if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) { OSMalloc_tag_spin_lock(); (void)remque((queue_entry_t)tag); OSMalloc_tag_unlock(); - kfree((void*)tag, sizeof(*tag)); + kfree(tag, sizeof(*tag)); } } void * OSMalloc( - uint32_t size, - OSMallocTag tag) + uint32_t size, + OSMallocTag tag) { - void *addr=NULL; - kern_return_t kr; + void *addr = NULL; + kern_return_t kr; OSMalloc_Tagref(tag); if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) { - if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS) + if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS) { addr = NULL; - } else + } + } else { addr = kalloc_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); + } - if (!addr) + if (!addr) { OSMalloc_Tagrele(tag); + } - return(addr); + return addr; } void * OSMalloc_nowait( - uint32_t size, - OSMallocTag tag) + uint32_t size, + OSMallocTag tag) { - void *addr=NULL; + void *addr = NULL; - if (tag->OSMT_attr & OSMT_PAGEABLE) - return(NULL); + if (tag->OSMT_attr & OSMT_PAGEABLE) { + return NULL; + } OSMalloc_Tagref(tag); /* XXX: use non-blocking kalloc for now */ addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); - if (addr == NULL) + if (addr == NULL) { OSMalloc_Tagrele(tag); + } - return(addr); + return addr; } void * OSMalloc_noblock( - uint32_t size, - OSMallocTag tag) + uint32_t size, + OSMallocTag tag) { - void *addr=NULL; + void *addr = NULL; - if (tag->OSMT_attr & OSMT_PAGEABLE) - return(NULL); + if (tag->OSMT_attr & OSMT_PAGEABLE) { + return NULL; + } OSMalloc_Tagref(tag); addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); - if (addr == NULL) + if (addr == NULL) { OSMalloc_Tagrele(tag); + } - return(addr); + return addr; } void OSFree( - void *addr, - uint32_t size, - OSMallocTag tag) + void *addr, + uint32_t size, + OSMallocTag tag) { if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) { kmem_free(kernel_map, (vm_offset_t)addr, size); - } else - kfree((void *)addr, size); + } else { + kfree(addr, size); + } OSMalloc_Tagrele(tag); } uint32_t OSMalloc_size( - void *addr) + void *addr) { return (uint32_t)kalloc_size(addr); } - diff --git a/osfmk/kern/kalloc.h b/osfmk/kern/kalloc.h index 3c6f5804b..0a1d56917 100644 --- a/osfmk/kern/kalloc.h +++ b/osfmk/kern/kalloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _KERN_KALLOC_H_ +#ifndef _KERN_KALLOC_H_ #define _KERN_KALLOC_H_ #include @@ -70,21 +70,21 @@ __BEGIN_DECLS extern void * kalloc_canblock( - vm_size_t * size, - boolean_t canblock, - vm_allocation_site_t * site); + vm_size_t * size, + boolean_t canblock, + vm_allocation_site_t * site); extern vm_size_t kalloc_size( - void * addr); + void * addr); extern vm_size_t kfree_addr( - void * addr); + void * addr); extern vm_size_t kalloc_bucket_size( - vm_size_t size); + vm_size_t size); #define kalloc(size) \ ({ VM_ALLOC_SITE_STATIC(0, 0); \ @@ -141,44 +141,67 @@ kalloc_bucket_size( -extern void kfree(void *data, - vm_size_t size); +extern void kfree(void *data, + vm_size_t size); + +#define kfree(data, size) \ +_Pragma("clang diagnostic push") \ +_Pragma("clang diagnostic ignored \"-Wshadow\"") \ + do { \ + _Static_assert(sizeof (data) == sizeof (void *) || sizeof (data) == sizeof (mach_vm_address_t), "data is not a pointer"); \ + void *__tmp_addr = (void *) data; \ + vm_size_t __tmp_size = size; \ + data = (__typeof__(data)) NULL; \ + (kfree)(__tmp_addr, __tmp_size); \ + } while (0) \ +_Pragma("clang diagnostic pop") + +#define kfree_addr(addr) \ +_Pragma("clang diagnostic push") \ +_Pragma("clang diagnostic ignored \"-Wshadow\"") \ + do { \ + _Static_assert(sizeof (addr) == sizeof (void *) || sizeof (addr) == sizeof (mach_vm_address_t), "addr is not a pointer"); \ + void *__tmp_addr = (void *) addr; \ + addr = (__typeof__(addr)) NULL; \ + (kfree_addr)(__tmp_addr); \ + } while (0) \ +_Pragma("clang diagnostic pop") #else /* XNU_KERNEL_PRIVATE */ -extern void *kalloc(vm_size_t size) __attribute__((alloc_size(1))); +extern void *kalloc(vm_size_t size) __attribute__((alloc_size(1))); -extern void *kalloc_noblock(vm_size_t size) __attribute__((alloc_size(1))); +extern void *kalloc_noblock(vm_size_t size) __attribute__((alloc_size(1))); -extern void kfree(void *data, - vm_size_t size); +extern void kfree(void *data, + vm_size_t size); #endif /* !XNU_KERNEL_PRIVATE */ __END_DECLS -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE -extern void kalloc_init(void); +extern void kalloc_init(void); -extern void kalloc_fake_zone_init( int ); +extern void kalloc_fake_zone_init( int ); -extern void kalloc_fake_zone_info( - int *count, - vm_size_t *cur_size, - vm_size_t *max_size, - vm_size_t *elem_size, - vm_size_t *alloc_size, - uint64_t *sum_size, - int *collectable, - int *exhaustable, - int *caller_acct); +extern void kalloc_fake_zone_info( + int *count, + vm_size_t *cur_size, + vm_size_t *max_size, + vm_size_t *elem_size, + vm_size_t *alloc_size, + uint64_t *sum_size, + int *collectable, + int *exhaustable, + int *caller_acct); extern vm_size_t kalloc_max_prerounded; extern vm_size_t kalloc_large_total; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#endif /* _KERN_KALLOC_H_ */ +#endif /* _KERN_KALLOC_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/kern/kcdata.h b/osfmk/kern/kcdata.h index e36c55352..85cf4998b 100644 --- a/osfmk/kern/kcdata.h +++ b/osfmk/kern/kcdata.h @@ -305,7 +305,7 @@ kcs_get_elem_size(kcdata_subtype_descriptor_t d) { if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) { /* size is composed as ((count &0xffff)<<16 | (elem_size & 0xffff)) */ - return (uint32_t)((d->kcs_elem_size & 0xffff) * ((d->kcs_elem_size & 0xffff0000)>>16)); + return (uint32_t)((d->kcs_elem_size & 0xffff) * ((d->kcs_elem_size & 0xffff0000) >> 16)); } return d->kcs_elem_size; } @@ -313,8 +313,9 @@ kcs_get_elem_size(kcdata_subtype_descriptor_t d) static inline uint32_t kcs_get_elem_count(kcdata_subtype_descriptor_t d) { - if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) + if (d->kcs_flags & KCS_SUBTYPE_FLAGS_ARRAY) { return (d->kcs_elem_size >> 16) & 0xffff; + } return 1; } @@ -323,12 +324,11 @@ kcs_set_elem_size(kcdata_subtype_descriptor_t d, uint32_t size, uint32_t count) { if (count > 1) { /* means we are setting up an array */ - if (size > 0xffff || count > 0xffff) + if (size > 0xffff || count > 0xffff) { return -1; //invalid argument + } d->kcs_elem_size = ((count & 0xffff) << 16 | (size & 0xffff)); - } - else - { + } else { d->kcs_elem_size = size; } return 0; @@ -367,9 +367,9 @@ struct kcdata_type_definition { #define KCDATA_TYPE_TYPEDEFINTION 0x12u /* Meta type that describes a type on the fly. */ #define KCDATA_TYPE_CONTAINER_BEGIN \ 0x13u /* Container type which has corresponding CONTAINER_END header. \ - * KCDATA_TYPE_CONTAINER_BEGIN has type in the data segment. \ - * Both headers have (uint64_t) ID for matching up nested data. \ - */ + * KCDATA_TYPE_CONTAINER_BEGIN has type in the data segment. \ + * Both headers have (uint64_t) ID for matching up nested data. \ + */ #define KCDATA_TYPE_CONTAINER_END 0x14u #define KCDATA_TYPE_ARRAY_PAD0 0x20u /* Array of data with 0 byte of padding*/ @@ -423,14 +423,14 @@ struct kcdata_type_definition { /* next type range number available 0x1060 */ /**************** definitions for XNUPOST *********************/ -#define XNUPOST_KCTYPE_TESTCONFIG 0x1040 +#define XNUPOST_KCTYPE_TESTCONFIG 0x1040 /**************** definitions for stackshot *********************/ /* This value must always match IO_NUM_PRIORITIES defined in thread_info.h */ -#define STACKSHOT_IO_NUM_PRIORITIES 4 +#define STACKSHOT_IO_NUM_PRIORITIES 4 /* This value must always match MAXTHREADNAMESIZE used in bsd */ -#define STACKSHOT_MAX_THREAD_NAME_SIZE 64 +#define STACKSHOT_MAX_THREAD_NAME_SIZE 64 /* * NOTE: Please update kcdata/libkdd/kcdtypes.c if you make any changes @@ -485,35 +485,35 @@ struct stack_snapshot_frame32 { }; struct stack_snapshot_frame64 { - uint64_t lr; - uint64_t sp; + uint64_t lr; + uint64_t sp; }; struct dyld_uuid_info_32 { - uint32_t imageLoadAddress; /* base address image is mapped at */ - uuid_t imageUUID; + uint32_t imageLoadAddress; /* base address image is mapped at */ + uuid_t imageUUID; }; struct dyld_uuid_info_64 { - uint64_t imageLoadAddress; /* XXX image slide */ - uuid_t imageUUID; + uint64_t imageLoadAddress; /* XXX image slide */ + uuid_t imageUUID; }; struct dyld_uuid_info_64_v2 { - uint64_t imageLoadAddress; /* XXX image slide */ - uuid_t imageUUID; - /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */ - uint64_t imageSlidBaseAddress; /* slid base address of image */ + uint64_t imageLoadAddress; /* XXX image slide */ + uuid_t imageUUID; + /* end of version 1 of dyld_uuid_info_64. sizeof v1 was 24 */ + uint64_t imageSlidBaseAddress; /* slid base address of image */ }; struct user32_dyld_uuid_info { - uint32_t imageLoadAddress; /* base address image is mapped into */ - uuid_t imageUUID; /* UUID of image */ + uint32_t imageLoadAddress; /* base address image is mapped into */ + uuid_t imageUUID; /* UUID of image */ }; struct user64_dyld_uuid_info { - uint64_t imageLoadAddress; /* base address image is mapped into */ - uuid_t imageUUID; /* UUID of image */ + uint64_t imageLoadAddress; /* base address image is mapped into */ + uuid_t imageUUID; /* UUID of image */ }; enum task_snapshot_flags { @@ -561,22 +561,22 @@ enum thread_snapshot_flags { }; struct mem_and_io_snapshot { - uint32_t snapshot_magic; - uint32_t free_pages; - uint32_t active_pages; - uint32_t inactive_pages; - uint32_t purgeable_pages; - uint32_t wired_pages; - uint32_t speculative_pages; - uint32_t throttled_pages; - uint32_t filebacked_pages; - uint32_t compressions; - uint32_t decompressions; - uint32_t compressor_size; - int32_t busy_buffer_count; - uint32_t pages_wanted; - uint32_t pages_reclaimed; - uint8_t pages_wanted_reclaimed_valid; // did mach_vm_pressure_monitor succeed? + uint32_t snapshot_magic; + uint32_t free_pages; + uint32_t active_pages; + uint32_t inactive_pages; + uint32_t purgeable_pages; + uint32_t wired_pages; + uint32_t speculative_pages; + uint32_t throttled_pages; + uint32_t filebacked_pages; + uint32_t compressions; + uint32_t decompressions; + uint32_t compressor_size; + int32_t busy_buffer_count; + uint32_t pages_wanted; + uint32_t pages_reclaimed; + uint8_t pages_wanted_reclaimed_valid; // did mach_vm_pressure_monitor succeed? } __attribute__((packed)); /* SS_TH_* macros are for ths_state */ @@ -727,8 +727,7 @@ struct thread_delta_snapshot_v3 { uint64_t tds_effective_policy; } __attribute__ ((packed)); -struct io_stats_snapshot -{ +struct io_stats_snapshot { /* * I/O Statistics * XXX: These fields must be together. @@ -748,7 +747,6 @@ struct io_stats_snapshot uint64_t ss_metadata_count; uint64_t ss_metadata_size; /* XXX: I/O Statistics end */ - } __attribute__ ((packed)); struct task_snapshot_v2 { @@ -810,10 +808,10 @@ struct stackshot_fault_stats { } __attribute__((packed)); typedef struct stackshot_thread_waitinfo { - uint64_t owner; /* The thread that owns the object */ - uint64_t waiter; /* The thread that's waiting on the object */ - uint64_t context; /* A context uniquely identifying the object */ - uint8_t wait_type; /* The type of object that the thread is waiting on */ + uint64_t owner; /* The thread that owns the object */ + uint64_t waiter; /* The thread that's waiting on the object */ + uint64_t context; /* A context uniquely identifying the object */ + uint8_t wait_type; /* The type of object that the thread is waiting on */ } __attribute__((packed)) thread_waitinfo_t; #define STACKSHOT_WAITOWNER_KERNEL (UINT64_MAX - 1) @@ -841,12 +839,12 @@ struct stack_snapshot_stacktop { /* FIXME some of these types aren't clean (fixed width, packed, and defined *here*) */ struct crashinfo_proc_uniqidentifierinfo { - uint8_t p_uuid[16]; /* UUID of the main executable */ - uint64_t p_uniqueid; /* 64 bit unique identifier for process */ - uint64_t p_puniqueid; /* unique identifier for process's parent */ - uint64_t p_reserve2; /* reserved for future use */ - uint64_t p_reserve3; /* reserved for future use */ - uint64_t p_reserve4; /* reserved for future use */ + uint8_t p_uuid[16]; /* UUID of the main executable */ + uint64_t p_uniqueid; /* 64 bit unique identifier for process */ + uint64_t p_puniqueid; /* unique identifier for process's parent */ + uint64_t p_reserve2; /* reserved for future use */ + uint64_t p_reserve3; /* reserved for future use */ + uint64_t p_reserve4; /* reserved for future use */ } __attribute__((packed)); #define TASK_CRASHINFO_BEGIN KCDATA_BUFFER_BEGIN_CRASHINFO @@ -861,7 +859,7 @@ struct crashinfo_proc_uniqidentifierinfo { #define TASK_CRASHINFO_PID 0x805 #define TASK_CRASHINFO_PPID 0x806 #define TASK_CRASHINFO_RUSAGE 0x807 /* struct rusage DEPRECATED do not use. - This struct has longs in it */ + * This struct has longs in it */ #define TASK_CRASHINFO_RUSAGE_INFO 0x808 /* struct rusage_info_v3 from resource.h */ #define TASK_CRASHINFO_PROC_NAME 0x809 /* char * */ #define TASK_CRASHINFO_PROC_STARTTIME 0x80B /* struct timeval64 */ @@ -912,10 +910,10 @@ struct crashinfo_proc_uniqidentifierinfo { #define EXIT_REASON_DISPATCH_QUEUE_NO 0x1006 struct exit_reason_snapshot { - uint32_t ers_namespace; - uint64_t ers_code; - /* end of version 1 of exit_reason_snapshot. sizeof v1 was 12 */ - uint64_t ers_flags; + uint32_t ers_namespace; + uint64_t ers_code; + /* end of version 1 of exit_reason_snapshot. sizeof v1 was 12 */ + uint64_t ers_flags; } __attribute__((packed)); #define EXIT_REASON_CODESIG_PATH_MAX 1024 @@ -951,7 +949,9 @@ typedef struct kcdata_iter { static inline -kcdata_iter_t kcdata_iter(void *buffer, unsigned long size) { +kcdata_iter_t +kcdata_iter(void *buffer, unsigned long size) +{ kcdata_iter_t iter; iter.item = (kcdata_item_t) buffer; iter.end = (void*) (((uintptr_t)buffer) + size); @@ -962,7 +962,9 @@ static inline kcdata_iter_t kcdata_iter_unsafe(void *buffer) __attribute__((deprecated)); static inline -kcdata_iter_t kcdata_iter_unsafe(void *buffer) { +kcdata_iter_t +kcdata_iter_unsafe(void *buffer) +{ kcdata_iter_t iter; iter.item = (kcdata_item_t) buffer; iter.end = (void*) (uintptr_t) ~0; @@ -972,15 +974,19 @@ kcdata_iter_t kcdata_iter_unsafe(void *buffer) { static const kcdata_iter_t kcdata_invalid_iter = { .item = 0, .end = 0 }; static inline -int kcdata_iter_valid(kcdata_iter_t iter) { +int +kcdata_iter_valid(kcdata_iter_t iter) +{ return - ( (uintptr_t)iter.item + sizeof(struct kcdata_item) <= (uintptr_t)iter.end ) && - ( (uintptr_t)iter.item + sizeof(struct kcdata_item) + iter.item->size <= (uintptr_t)iter.end); + ((uintptr_t)iter.item + sizeof(struct kcdata_item) <= (uintptr_t)iter.end) && + ((uintptr_t)iter.item + sizeof(struct kcdata_item) + iter.item->size <= (uintptr_t)iter.end); } static inline -kcdata_iter_t kcdata_iter_next(kcdata_iter_t iter) { +kcdata_iter_t +kcdata_iter_next(kcdata_iter_t iter) +{ iter.item = (kcdata_item_t) (((uintptr_t)iter.item) + sizeof(struct kcdata_item) + (iter.item->size)); return iter; } @@ -988,10 +994,11 @@ kcdata_iter_t kcdata_iter_next(kcdata_iter_t iter) { static inline uint32_t kcdata_iter_type(kcdata_iter_t iter) { - if ((iter.item->type & ~0xfu) == KCDATA_TYPE_ARRAY_PAD0) + if ((iter.item->type & ~0xfu) == KCDATA_TYPE_ARRAY_PAD0) { return KCDATA_TYPE_ARRAY; - else + } else { return iter.item->type; + } } static inline uint32_t @@ -1012,9 +1019,8 @@ static inline int kcdata_iter_is_legacy_item(kcdata_iter_t iter, uint32_t legacy_size) { uint32_t legacy_size_padded = legacy_size + kcdata_calc_padding(legacy_size); - return (iter.item->size == legacy_size_padded && - (iter.item->flags & (KCDATA_FLAGS_STRUCT_PADDING_MASK | KCDATA_FLAGS_STRUCT_HAS_PADDING)) == 0); - + return iter.item->size == legacy_size_padded && + (iter.item->flags & (KCDATA_FLAGS_STRUCT_PADDING_MASK | KCDATA_FLAGS_STRUCT_HAS_PADDING)) == 0; } static inline uint32_t @@ -1044,10 +1050,11 @@ kcdata_iter_size(kcdata_iter_t iter) } not_legacy: default: - if (iter.item->size < kcdata_flags_get_padding(iter.item->flags)) + if (iter.item->size < kcdata_flags_get_padding(iter.item->flags)) { return 0; - else + } else { return iter.item->size - kcdata_flags_get_padding(iter.item->flags); + } } } @@ -1058,18 +1065,24 @@ kcdata_iter_flags(kcdata_iter_t iter) } static inline -void * kcdata_iter_payload(kcdata_iter_t iter) { +void * +kcdata_iter_payload(kcdata_iter_t iter) +{ return &iter.item->data; } static inline -uint32_t kcdata_iter_array_elem_type(kcdata_iter_t iter) { +uint32_t +kcdata_iter_array_elem_type(kcdata_iter_t iter) +{ return (iter.item->flags >> 32) & UINT32_MAX; } static inline -uint32_t kcdata_iter_array_elem_count(kcdata_iter_t iter) { +uint32_t +kcdata_iter_array_elem_count(kcdata_iter_t iter) +{ return (iter.item->flags) & UINT32_MAX; } @@ -1083,8 +1096,9 @@ uint32_t kcdata_iter_array_elem_count(kcdata_iter_t iter) { static inline uint32_t -kcdata_iter_array_size_switch(kcdata_iter_t iter) { - switch(kcdata_iter_array_elem_type(iter)) { +kcdata_iter_array_size_switch(kcdata_iter_t iter) +{ + switch (kcdata_iter_array_elem_type(iter)) { case KCDATA_TYPE_LIBRARY_LOADINFO: return sizeof(struct dyld_uuid_info_32); case KCDATA_TYPE_LIBRARY_LOADINFO64: @@ -1099,8 +1113,8 @@ kcdata_iter_array_size_switch(kcdata_iter_t iter) { return sizeof(int32_t); case STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT: return sizeof(struct thread_delta_snapshot_v2); - // This one is only here to make some unit tests work. It should be OK to - // remove. + // This one is only here to make some unit tests work. It should be OK to + // remove. case TASK_CRASHINFO_CRASHED_THREADID: return sizeof(uint64_t); default: @@ -1109,54 +1123,70 @@ kcdata_iter_array_size_switch(kcdata_iter_t iter) { } static inline -int kcdata_iter_array_valid(kcdata_iter_t iter) { - if (!kcdata_iter_valid(iter)) +int +kcdata_iter_array_valid(kcdata_iter_t iter) +{ + if (!kcdata_iter_valid(iter)) { return 0; - if (kcdata_iter_type(iter) != KCDATA_TYPE_ARRAY) + } + if (kcdata_iter_type(iter) != KCDATA_TYPE_ARRAY) { return 0; - if (kcdata_iter_array_elem_count(iter) == 0) + } + if (kcdata_iter_array_elem_count(iter) == 0) { return iter.item->size == 0; + } if (iter.item->type == KCDATA_TYPE_ARRAY) { uint32_t elem_size = kcdata_iter_array_size_switch(iter); - if (elem_size == 0) + if (elem_size == 0) { return 0; + } /* sizes get aligned to the nearest 16. */ return - kcdata_iter_array_elem_count(iter) <= iter.item->size / elem_size && - iter.item->size % kcdata_iter_array_elem_count(iter) < 16; + kcdata_iter_array_elem_count(iter) <= iter.item->size / elem_size && + iter.item->size % kcdata_iter_array_elem_count(iter) < 16; } else { return - (iter.item->type & 0xf) <= iter.item->size && - kcdata_iter_array_elem_count(iter) <= iter.item->size - (iter.item->type & 0xf) && - (iter.item->size - (iter.item->type & 0xf)) % kcdata_iter_array_elem_count(iter) == 0; + (iter.item->type & 0xf) <= iter.item->size && + kcdata_iter_array_elem_count(iter) <= iter.item->size - (iter.item->type & 0xf) && + (iter.item->size - (iter.item->type & 0xf)) % kcdata_iter_array_elem_count(iter) == 0; } } static inline -uint32_t kcdata_iter_array_elem_size(kcdata_iter_t iter) { - if (iter.item->type == KCDATA_TYPE_ARRAY) +uint32_t +kcdata_iter_array_elem_size(kcdata_iter_t iter) +{ + if (iter.item->type == KCDATA_TYPE_ARRAY) { return kcdata_iter_array_size_switch(iter); - if (kcdata_iter_array_elem_count(iter) == 0) + } + if (kcdata_iter_array_elem_count(iter) == 0) { return 0; + } return (iter.item->size - (iter.item->type & 0xf)) / kcdata_iter_array_elem_count(iter); } static inline -int kcdata_iter_container_valid(kcdata_iter_t iter) { +int +kcdata_iter_container_valid(kcdata_iter_t iter) +{ return - kcdata_iter_valid(iter) && - kcdata_iter_type(iter) == KCDATA_TYPE_CONTAINER_BEGIN && - iter.item->size >= sizeof(uint32_t); + kcdata_iter_valid(iter) && + kcdata_iter_type(iter) == KCDATA_TYPE_CONTAINER_BEGIN && + iter.item->size >= sizeof(uint32_t); } static inline -uint32_t kcdata_iter_container_type(kcdata_iter_t iter) { - return * (uint32_t *) kcdata_iter_payload(iter); +uint32_t +kcdata_iter_container_type(kcdata_iter_t iter) +{ + return *(uint32_t *) kcdata_iter_payload(iter); } static inline -uint64_t kcdata_iter_container_id(kcdata_iter_t iter) { +uint64_t +kcdata_iter_container_id(kcdata_iter_t iter) +{ return iter.item->flags; } @@ -1170,22 +1200,27 @@ kcdata_iter_find_type(kcdata_iter_t iter, uint32_t type) { KCDATA_ITER_FOREACH(iter) { - if (kcdata_iter_type(iter) == type) + if (kcdata_iter_type(iter) == type) { return iter; + } } return kcdata_invalid_iter; } static inline -int kcdata_iter_data_with_desc_valid(kcdata_iter_t iter, uint32_t minsize) { +int +kcdata_iter_data_with_desc_valid(kcdata_iter_t iter, uint32_t minsize) +{ return - kcdata_iter_valid(iter) && - kcdata_iter_size(iter) >= KCDATA_DESC_MAXLEN + minsize && - ((char*)kcdata_iter_payload(iter))[KCDATA_DESC_MAXLEN-1] == 0; + kcdata_iter_valid(iter) && + kcdata_iter_size(iter) >= KCDATA_DESC_MAXLEN + minsize && + ((char*)kcdata_iter_payload(iter))[KCDATA_DESC_MAXLEN - 1] == 0; } static inline -char *kcdata_iter_string(kcdata_iter_t iter, uint32_t offset) { +char * +kcdata_iter_string(kcdata_iter_t iter, uint32_t offset) +{ if (offset > kcdata_iter_size(iter)) { return NULL; } @@ -1198,13 +1233,18 @@ char *kcdata_iter_string(kcdata_iter_t iter, uint32_t offset) { } } -static inline void kcdata_iter_get_data_with_desc(kcdata_iter_t iter, char **desc_ptr, void **data_ptr, uint32_t *size_ptr) { - if (desc_ptr) +static inline void +kcdata_iter_get_data_with_desc(kcdata_iter_t iter, char **desc_ptr, void **data_ptr, uint32_t *size_ptr) +{ + if (desc_ptr) { *desc_ptr = (char *)kcdata_iter_payload(iter); - if (data_ptr) + } + if (data_ptr) { *data_ptr = (void *)((uintptr_t)kcdata_iter_payload(iter) + KCDATA_DESC_MAXLEN); - if (size_ptr) + } + if (size_ptr) { *size_ptr = kcdata_iter_size(iter) - KCDATA_DESC_MAXLEN; + } } #endif diff --git a/osfmk/kern/kern_cdata.c b/osfmk/kern/kern_cdata.c index 71a2368f6..91c29df10 100644 --- a/osfmk/kern/kern_cdata.c +++ b/osfmk/kern/kern_cdata.c @@ -45,9 +45,10 @@ static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data * num_items items of known types with overall length payload_size. * * NOTE: This function will not give an accurate estimate for buffers that will - * contain unknown types (those with string descriptions). + * contain unknown types (those with string descriptions). */ -uint32_t kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size) +uint32_t +kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size) { /* * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding @@ -59,7 +60,8 @@ uint32_t kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t paylo return max_padding_bytes + item_description_bytes + begin_and_end_marker_bytes + payload_size; } -kcdata_descriptor_t kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags) +kcdata_descriptor_t +kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags) { kcdata_descriptor_t data = NULL; mach_vm_address_t user_addr = 0; @@ -75,7 +77,7 @@ kcdata_descriptor_t kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, un data->kcd_length = size; /* Initialize the BEGIN header */ - if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)){ + if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) { kcdata_memory_destroy(data); return NULL; } @@ -83,7 +85,8 @@ kcdata_descriptor_t kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, un return data; } -kern_return_t kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags) +kern_return_t +kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags) { mach_vm_address_t user_addr = 0; @@ -100,7 +103,8 @@ kern_return_t kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_addres return kcdata_get_memory_addr(data, data_type, 0, &user_addr); } -void *kcdata_memory_get_begin_addr(kcdata_descriptor_t data) +void * +kcdata_memory_get_begin_addr(kcdata_descriptor_t data) { if (data == NULL) { return NULL; @@ -109,7 +113,8 @@ void *kcdata_memory_get_begin_addr(kcdata_descriptor_t data) return (void *)data->kcd_addr_begin; } -uint64_t kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd) +uint64_t +kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd) { assert(kcd != NULL); return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item); @@ -118,7 +123,8 @@ uint64_t kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd) /* * Free up the memory associated with kcdata */ -kern_return_t kcdata_memory_destroy(kcdata_descriptor_t data) +kern_return_t +kcdata_memory_destroy(kcdata_descriptor_t data) { if (!data) { return KERN_INVALID_ARGUMENT; @@ -178,12 +184,13 @@ kcdata_write_buffer_end(kcdata_descriptor_t data) * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details */ -static kern_return_t kcdata_get_memory_addr_with_flavor( - kcdata_descriptor_t data, - uint32_t type, - uint32_t size, - uint64_t flags, - mach_vm_address_t *user_addr) +static kern_return_t +kcdata_get_memory_addr_with_flavor( + kcdata_descriptor_t data, + uint32_t type, + uint32_t size, + uint64_t flags, + mach_vm_address_t *user_addr) { kern_return_t kr; struct kcdata_item info; @@ -205,20 +212,22 @@ static kern_return_t kcdata_get_memory_addr_with_flavor( /* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */ if (total_size + sizeof(info) > data->kcd_length || - data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) { + data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) { return KERN_RESOURCE_SHORTAGE; } kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info)); - if (kr) + if (kr) { return kr; + } data->kcd_addr_end += sizeof(info); if (padding) { kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding); - if (kr) + if (kr) { return kr; + } } *user_addr = data->kcd_addr_end; @@ -245,12 +254,13 @@ static kern_return_t kcdata_get_memory_addr_with_flavor( * returns: mach_vm_address_t address in user memory for copyout(). */ -kern_return_t kcdata_get_memory_addr_for_array( - kcdata_descriptor_t data, - uint32_t type_of_element, - uint32_t size_of_element, - uint32_t count, - mach_vm_address_t *user_addr) +kern_return_t +kcdata_get_memory_addr_for_array( + kcdata_descriptor_t data, + uint32_t type_of_element, + uint32_t size_of_element, + uint32_t count, + mach_vm_address_t *user_addr) { /* for arrays we record the number of padding bytes as the low-order 4 bits * of the type field. KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */ @@ -272,22 +282,25 @@ kern_return_t kcdata_get_memory_addr_for_array( * returns: return value of kcdata_get_memory_addr() */ -kern_return_t kcdata_add_container_marker( - kcdata_descriptor_t data, - uint32_t header_type, - uint32_t container_type, - uint64_t identifier) +kern_return_t +kcdata_add_container_marker( + kcdata_descriptor_t data, + uint32_t header_type, + uint32_t container_type, + uint64_t identifier) { mach_vm_address_t user_addr; kern_return_t kr; assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN); uint32_t data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0; kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } - if (data_size) + if (data_size) { kr = kcdata_memcpy(data, user_addr, &container_type, data_size); + } return kr; } @@ -323,11 +336,13 @@ kcdata_undo_add_container_begin(kcdata_descriptor_t data) * returns: KERN_NO_ACCESS if copyout fails. */ -kern_return_t kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size) +kern_return_t +kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size) { if (data->kcd_flags & KCFLAG_USE_COPYOUT) { - if (copyout(src_addr, dst_addr, size)) + if (copyout(src_addr, dst_addr, size)) { return KERN_NO_ACCESS; + } } else { memcpy((void *)dst_addr, src_addr, size); } @@ -347,8 +362,9 @@ kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size while (size) { uint32_t block_size = MIN(size, 16); kr = copyout(&zeros, dst_addr, block_size); - if (kr) + if (kr) { return KERN_NO_ACCESS; + } size -= block_size; } return KERN_SUCCESS; @@ -370,12 +386,13 @@ kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size * returns: return code from kcdata_get_memory_addr in case of failure. */ -kern_return_t kcdata_add_type_definition( - kcdata_descriptor_t data, - uint32_t type_id, - char *type_name, - struct kcdata_subtype_descriptor *elements_array_addr, - uint32_t elements_count) +kern_return_t +kcdata_add_type_definition( + kcdata_descriptor_t data, + uint32_t type_id, + char *type_name, + struct kcdata_subtype_descriptor *elements_array_addr, + uint32_t elements_count) { kern_return_t kr = KERN_SUCCESS; struct kcdata_type_definition kc_type_definition; @@ -383,8 +400,9 @@ kern_return_t kcdata_add_type_definition( uint32_t total_size = sizeof(struct kcdata_type_definition); bzero(&kc_type_definition, sizeof(kc_type_definition)); - if (strlen(type_name) >= KCDATA_DESC_MAXLEN) + if (strlen(type_name) >= KCDATA_DESC_MAXLEN) { return KERN_INVALID_ARGUMENT; + } strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN); kc_type_definition.kct_num_elements = elements_count; kc_type_definition.kct_type_identifier = type_id; @@ -392,13 +410,16 @@ kern_return_t kcdata_add_type_definition( total_size += elements_count * sizeof(struct kcdata_subtype_descriptor); /* record number of padding bytes as lower 4 bits of flags */ if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size, - kcdata_calc_padding(total_size), &user_addr))) + kcdata_calc_padding(total_size), &user_addr))) { return kr; - if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) + } + if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) { return kr; + } user_addr += sizeof(struct kcdata_type_definition); - if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) + if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) { return kr; + } return kr; } @@ -420,8 +441,9 @@ struct _uint32_with_description_data { kern_return_t kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description) { - if (strlen(description) >= KCDATA_DESC_MAXLEN) + if (strlen(description) >= KCDATA_DESC_MAXLEN) { return KERN_INVALID_ARGUMENT; + } kern_return_t kr = 0; mach_vm_address_t user_addr; @@ -433,26 +455,30 @@ kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, save_data.data = data; kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) { - if (copyout(&save_data, user_addr, size_req)) + if (copyout(&save_data, user_addr, size_req)) { return KERN_NO_ACCESS; + } } else { memcpy((void *)user_addr, &save_data, size_req); } return KERN_SUCCESS; } -kern_return_t kcdata_add_uint32_with_description( - kcdata_descriptor_t data_desc, - uint32_t data, - const char *description) +kern_return_t +kcdata_add_uint32_with_description( + kcdata_descriptor_t data_desc, + uint32_t data, + const char *description) { assert(strlen(description) < KCDATA_DESC_MAXLEN); - if (strlen(description) >= KCDATA_DESC_MAXLEN) + if (strlen(description) >= KCDATA_DESC_MAXLEN) { return KERN_INVALID_ARGUMENT; + } kern_return_t kr = 0; mach_vm_address_t user_addr; struct _uint32_with_description_data save_data; @@ -463,11 +489,13 @@ kern_return_t kcdata_add_uint32_with_description( save_data.data = data; kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) { - if (copyout(&save_data, user_addr, size_req)) + if (copyout(&save_data, user_addr, size_req)) { return KERN_NO_ACCESS; + } } else { memcpy((void *)user_addr, &save_data, size_req); } diff --git a/osfmk/kern/kern_cdata.h b/osfmk/kern/kern_cdata.h index 39739d76e..398e8a122 100644 --- a/osfmk/kern/kern_cdata.h +++ b/osfmk/kern/kern_cdata.h @@ -52,7 +52,7 @@ #define KCDATA_ITEM_DATA_PTR(item) kcdata_iter_payload(KCDATA_ITEM_ITER(item)) #define KCDATA_ITEM_FIND_TYPE(itemx, type) (kcdata_iter_find_type(KCDATA_ITEM_ITER(itemx), type).item) #define kcdata_get_container_type(buffer) kcdata_iter_container_type(KCDATA_ITEM_ITER(buffer)) -#define kcdata_get_data_with_desc(buf,desc,data) kcdata_iter_get_data_with_desc(KCDATA_ITEM_ITER(buf),desc,data,NULL) +#define kcdata_get_data_with_desc(buf, desc, data) kcdata_iter_get_data_with_desc(KCDATA_ITEM_ITER(buf),desc,data,NULL) /* Do not use these macros! */ #ifdef KERNEL @@ -75,15 +75,15 @@ typedef struct kcdata_descriptor * kcdata_descriptor_t; kcdata_descriptor_t kcdata_memory_alloc_init(mach_vm_address_t crash_data_p, unsigned data_type, unsigned size, unsigned flags); kern_return_t kcdata_memory_static_init( - kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags); + kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags); kern_return_t kcdata_memory_destroy(kcdata_descriptor_t data); kern_return_t kcdata_add_container_marker(kcdata_descriptor_t data, uint32_t header_type, uint32_t container_type, uint64_t identifier); kern_return_t kcdata_add_type_definition(kcdata_descriptor_t data, - uint32_t type_id, - char * type_name, - struct kcdata_subtype_descriptor * elements_array_addr, - uint32_t elements_count); + uint32_t type_id, + char * type_name, + struct kcdata_subtype_descriptor * elements_array_addr, + uint32_t elements_count); kern_return_t kcdata_add_uint64_with_description(kcdata_descriptor_t crashinfo, uint64_t data, const char * description); kern_return_t kcdata_add_uint32_with_description(kcdata_descriptor_t crashinfo, uint32_t data, const char * description); @@ -105,7 +105,7 @@ kern_return_t kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr kern_return_t kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size); kern_return_t kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr); kern_return_t kcdata_get_memory_addr_for_array( - kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, mach_vm_address_t * user_addr); + kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, mach_vm_address_t * user_addr); #endif /* KERNEL */ #endif /* _KERN_CDATA_H_ */ diff --git a/osfmk/kern/kern_monotonic.c b/osfmk/kern/kern_monotonic.c index 75315a764..9837913e1 100644 --- a/osfmk/kern/kern_monotonic.c +++ b/osfmk/kern/kern_monotonic.c @@ -83,7 +83,7 @@ mt_fixed_thread_counts(thread_t thread, uint64_t *counts_out) */ spin: start_gen = atomic_load_explicit(&thread->t_monotonic.mth_gen, - memory_order_acquire); + memory_order_acquire); retry: if (start_gen & 1) { spins++; @@ -107,7 +107,7 @@ retry: * again. */ end_gen = atomic_load_explicit(&thread->t_monotonic.mth_gen, - memory_order_acquire); + memory_order_acquire); if (end_gen != start_gen) { retries++; if (retries > MAXRETRIES) { @@ -147,7 +147,7 @@ mt_update_thread(thread_t thread) * even. */ __assert_only uint64_t enter_gen = atomic_fetch_add_explicit( - &thread->t_monotonic.mth_gen, 1, memory_order_release); + &thread->t_monotonic.mth_gen, 1, memory_order_release); /* * Should not have pre-empted a modification to the counts. */ @@ -163,7 +163,7 @@ mt_update_thread(thread_t thread) * before and after reading don't match. */ __assert_only uint64_t exit_gen = atomic_fetch_add_explicit( - &thread->t_monotonic.mth_gen, 1, memory_order_release); + &thread->t_monotonic.mth_gen, 1, memory_order_release); /* * Make sure no other writers came through behind us. */ @@ -185,11 +185,11 @@ mt_sched_update(thread_t thread) KDBG_RELEASE(MT_KDBG_IC_CPU_CSWITCH, #ifdef MT_CORE_INSTRS - mtc->mtc_counts[MT_CORE_INSTRS], + mtc->mtc_counts[MT_CORE_INSTRS], #else /* defined(MT_CORE_INSTRS) */ - 0, + 0, #endif /* !defined(MT_CORE_INSTRS) */ - mtc->mtc_counts[MT_CORE_CYCLES]); + mtc->mtc_counts[MT_CORE_CYCLES]); } } @@ -262,12 +262,12 @@ mt_mtc_update_count(struct mt_cpu *mtc, unsigned int ctr) if (snap < mtc->mtc_snaps[ctr]) { if (mt_debug) { kprintf("monotonic: cpu %d: thread %#llx: " - "retrograde counter %u value: %llu, last read = %llu\n", - cpu_number(), thread_tid(current_thread()), ctr, snap, - mtc->mtc_snaps[ctr]); + "retrograde counter %u value: %llu, last read = %llu\n", + cpu_number(), thread_tid(current_thread()), ctr, snap, + mtc->mtc_snaps[ctr]); } (void)atomic_fetch_add_explicit(&mt_retrograde, 1, - memory_order_relaxed); + memory_order_relaxed); mtc->mtc_snaps[ctr] = snap; return 0; } @@ -297,7 +297,7 @@ mt_fixed_counts_internal(uint64_t *counts, uint64_t *counts_since) void mt_mtc_update_fixed_counts(struct mt_cpu *mtc, uint64_t *counts, - uint64_t *counts_since) + uint64_t *counts_since) { if (!mt_core_supported) { return; @@ -540,7 +540,7 @@ uint64_t mt_core_reset_values[MT_CORE_NFIXED] = { 0 }; int mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn handler, - void *ctx) + void *ctx) { assert(ctr < MT_CORE_NFIXED); @@ -557,6 +557,9 @@ mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn handler, int error = mt_microstackshot_start_arch(period); if (error) { + mt_microstackshot_ctr = 0; + mt_microstackshot_pmi_handler = NULL; + mt_microstackshot_ctx = NULL; return error; } @@ -573,4 +576,3 @@ mt_microstackshot_stop(void) return 0; } - diff --git a/osfmk/kern/kern_stackshot.c b/osfmk/kern/kern_stackshot.c index 28d6270fa..05ab16ce9 100644 --- a/osfmk/kern/kern_stackshot.c +++ b/osfmk/kern/kern_stackshot.c @@ -113,43 +113,43 @@ void * stackshot_snapbuf = NULL; /* Used by stack_snapshot2 (to be removed) */ __private_extern__ void stackshot_init( void ); static boolean_t memory_iszero(void *addr, size_t size); #if CONFIG_TELEMETRY -kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval); +kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval); #endif -uint32_t get_stackshot_estsize(uint32_t prev_size_hint); -kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, - size_t stackshot_config_size, boolean_t stackshot_from_user); -kern_return_t do_stackshot(void *); -void kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags, kcdata_descriptor_t data_p, uint64_t since_timestamp); +uint32_t get_stackshot_estsize(uint32_t prev_size_hint); +kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, + size_t stackshot_config_size, boolean_t stackshot_from_user); +kern_return_t do_stackshot(void *); +void kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags, kcdata_descriptor_t data_p, uint64_t since_timestamp); boolean_t stackshot_thread_is_idle_worker_unsafe(thread_t thread); -static int kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t *pBytesTraced); -uint32_t kdp_stack_snapshot_bytes_traced(void); -static void kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap); -static boolean_t kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_fault, uint32_t *kdp_fault_result); -static boolean_t kdp_copyin_word(task_t task, uint64_t addr, uint64_t *result, boolean_t try_fault, uint32_t *kdp_fault_results); -static uint64_t proc_was_throttled_from_task(task_t task); -static void stackshot_thread_wait_owner_info(thread_t thread, thread_waitinfo_t * waitinfo); -static int stackshot_thread_has_valid_waitinfo(thread_t thread); +static int kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t *pBytesTraced); +uint32_t kdp_stack_snapshot_bytes_traced(void); +static void kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap); +static boolean_t kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_fault, uint32_t *kdp_fault_result); +static boolean_t kdp_copyin_word(task_t task, uint64_t addr, uint64_t *result, boolean_t try_fault, uint32_t *kdp_fault_results); +static uint64_t proc_was_throttled_from_task(task_t task); +static void stackshot_thread_wait_owner_info(thread_t thread, thread_waitinfo_t * waitinfo); +static int stackshot_thread_has_valid_waitinfo(thread_t thread); #if CONFIG_COALITIONS -static void stackshot_coalition_jetsam_count(void *arg, int i, coalition_t coal); -static void stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal); +static void stackshot_coalition_jetsam_count(void *arg, int i, coalition_t coal); +static void stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal); #endif /* CONFIG_COALITIONS */ -extern uint32_t workqueue_get_pwq_state_kdp(void *proc); +extern uint32_t workqueue_get_pwq_state_kdp(void *proc); -extern int proc_pid(void *p); -extern uint64_t proc_uniqueid(void *p); -extern uint64_t proc_was_throttled(void *p); -extern uint64_t proc_did_throttle(void *p); -extern int proc_exiting(void *p); -extern int proc_in_teardown(void *p); -static uint64_t proc_did_throttle_from_task(task_t task); -extern void proc_name_kdp(task_t task, char * buf, int size); -extern int proc_threadname_kdp(void * uth, char * buf, size_t size); -extern void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime); -extern int memorystatus_get_pressure_status_kdp(void); -extern void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit); +extern int proc_pid(void *p); +extern uint64_t proc_uniqueid(void *p); +extern uint64_t proc_was_throttled(void *p); +extern uint64_t proc_did_throttle(void *p); +extern int proc_exiting(void *p); +extern int proc_in_teardown(void *p); +static uint64_t proc_did_throttle_from_task(task_t task); +extern void proc_name_kdp(task_t task, char * buf, int size); +extern int proc_threadname_kdp(void * uth, char * buf, size_t size); +extern void proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime); +extern int memorystatus_get_pressure_status_kdp(void); +extern void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit); extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */ extern void bcopy_phys(addr64_t, addr64_t, vm_size_t); @@ -197,7 +197,7 @@ typedef struct task_snapshot *task_snapshot_t; #if CONFIG_KDP_INTERACTIVE_DEBUGGING extern kdp_send_t kdp_en_send_pkt; -#endif +#endif /* * Globals to support machine_trace_thread_get_kva. @@ -212,14 +212,16 @@ static boolean_t validate_next_addr = TRUE; static lck_grp_t *stackshot_subsys_lck_grp; static lck_grp_attr_t *stackshot_subsys_lck_grp_attr; static lck_attr_t *stackshot_subsys_lck_attr; -static lck_mtx_t stackshot_subsys_mutex; +static lck_mtx_t stackshot_subsys_mutex; #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex) #define STACKSHOT_SUBSYS_TRY_LOCK() lck_mtx_try_lock(&stackshot_subsys_mutex) #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex) -#define SANE_BOOTPROFILE_TRACEBUF_SIZE (64 * 1024 * 1024) -#define SANE_TRACEBUF_SIZE (8 * 1024 * 1024) +#define SANE_BOOTPROFILE_TRACEBUF_SIZE (64ULL * 1024ULL * 1024ULL) +#define SANE_TRACEBUF_SIZE (8ULL * 1024ULL * 1024ULL) + +#define TRACEBUF_SIZE_PER_GB (1024ULL * 1024ULL) SECURITY_READ_ONLY_LATE(static uint32_t) max_tracebuf_size = SANE_TRACEBUF_SIZE; @@ -232,6 +234,10 @@ SECURITY_READ_ONLY_LATE(static uint32_t) max_tracebuf_size = SANE_TRACEBUF_SIZE; #define STACKSHOT_SUPP_SIZE (16 * 1024) /* Minimum stackshot size */ #define TASK_UUID_AVG_SIZE (16 * sizeof(uuid_t)) /* Average space consumed by UUIDs/task */ +#ifndef ROUNDUP +#define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y)) +#endif + /* * Initialize the mutex governing access to the stack snapshot subsystem * and other stackshot related bits. @@ -250,29 +256,32 @@ stackshot_init( void ) lck_mtx_init(&stackshot_subsys_mutex, stackshot_subsys_lck_grp, stackshot_subsys_lck_attr); clock_timebase_info(&timebase); - fault_stats.sfs_system_max_fault_time = ((KDP_FAULT_PATH_MAX_TIME_PER_STACKSHOT_NSECS * timebase.denom)/ timebase.numer); + fault_stats.sfs_system_max_fault_time = ((KDP_FAULT_PATH_MAX_TIME_PER_STACKSHOT_NSECS * timebase.denom) / timebase.numer); + + max_tracebuf_size = MAX(max_tracebuf_size, (ROUNDUP(max_mem, (1024ULL * 1024ULL * 1024ULL)) / TRACEBUF_SIZE_PER_GB)); PE_parse_boot_argn("stackshot_maxsz", &max_tracebuf_size, sizeof(max_tracebuf_size)); } -/* - * Method for grabbing timer values safely, in the sense that no infinite loop will occur - * Certain flavors of the timer_grab function, which would seem to be the thing to use, - * can loop infinitely if called while the timer is in the process of being updated. - * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of - * the timer using this method. This seems insoluble, since stackshot runs in a context - * where the timer might be half-updated, and has no way of yielding control just long - * enough to finish the update. +/* + * Method for grabbing timer values safely, in the sense that no infinite loop will occur + * Certain flavors of the timer_grab function, which would seem to be the thing to use, + * can loop infinitely if called while the timer is in the process of being updated. + * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of + * the timer using this method. This seems insoluble, since stackshot runs in a context + * where the timer might be half-updated, and has no way of yielding control just long + * enough to finish the update. */ -static uint64_t safe_grab_timer_value(struct timer *t) +static uint64_t +safe_grab_timer_value(struct timer *t) { #if defined(__LP64__) - return t->all_bits; + return t->all_bits; #else - uint64_t time = t->high_bits; /* endian independent grab */ - time = (time << 32) | t->low_bits; - return time; + uint64_t time = t->high_bits; /* endian independent grab */ + time = (time << 32) | t->low_bits; + return time; #endif } @@ -280,10 +289,10 @@ static uint64_t safe_grab_timer_value(struct timer *t) * Called with interrupts disabled after stackshot context has been * initialized. Updates stack_snapshot_ret. */ -static kern_return_t +static kern_return_t stackshot_trap() { - kern_return_t rv; + kern_return_t rv; #if defined(__x86_64__) /* @@ -314,7 +323,7 @@ stackshot_trap() #if defined(__x86_64__) mp_rendezvous_unlock(); #endif - return (rv); + return rv; } @@ -350,10 +359,10 @@ stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint32_t flags, ui struct kcdata_descriptor kcdata; uint32_t hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? - KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT : KCDATA_BUFFER_BEGIN_STACKSHOT; + KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT : KCDATA_BUFFER_BEGIN_STACKSHOT; error = kcdata_memory_static_init(&kcdata, (mach_vm_address_t)buf, hdr_tag, size, - KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER); + KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER); if (error) { goto out; } @@ -421,13 +430,12 @@ stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flag bytes_traced = tracebuf_size; error = telemetry_gather(tracebuf, &bytes_traced, - (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE); + (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE); *retval = (int)bytes_traced; goto unlock_exit; } if (flags & STACKSHOT_GET_BOOT_PROFILE) { - if (tracebuf_size > SANE_BOOTPROFILE_TRACEBUF_SIZE) { error = KERN_INVALID_ARGUMENT; goto unlock_exit; @@ -484,12 +492,12 @@ get_stackshot_estsize(uint32_t prev_size_hint) static kern_return_t stackshot_remap_buffer(void *stackshotbuf, uint32_t bytes_traced, uint64_t out_buffer_addr, uint64_t out_size_addr) { - int error = 0; - mach_vm_offset_t stackshotbuf_user_addr = (mach_vm_offset_t)NULL; - vm_prot_t cur_prot, max_prot; + int error = 0; + mach_vm_offset_t stackshotbuf_user_addr = (mach_vm_offset_t)NULL; + vm_prot_t cur_prot, max_prot; error = mach_vm_remap_kernel(get_task_map(current_task()), &stackshotbuf_user_addr, bytes_traced, 0, - VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE, kernel_map, (mach_vm_offset_t)stackshotbuf, FALSE, &cur_prot, &max_prot, VM_INHERIT_DEFAULT); + VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE, kernel_map, (mach_vm_offset_t)stackshotbuf, FALSE, &cur_prot, &max_prot, VM_INHERIT_DEFAULT); /* * If the call to mach_vm_remap fails, we return the appropriate converted error */ @@ -526,15 +534,15 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi int size_to_free = 0; /* Parsed arguments */ - uint64_t out_buffer_addr; - uint64_t out_size_addr; - int pid = -1; - uint32_t flags; - uint64_t since_timestamp; - uint32_t size_hint = 0; - - if(stackshot_config == NULL) { - return KERN_INVALID_ARGUMENT; + uint64_t out_buffer_addr; + uint64_t out_size_addr; + int pid = -1; + uint32_t flags; + uint64_t since_timestamp; + uint32_t size_hint = 0; + + if (stackshot_config == NULL) { + return KERN_INVALID_ARGUMENT; } #if DEVELOPMENT || DEBUG /* TBD: ask stackshot clients to avoid issuing stackshots in this @@ -546,33 +554,33 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi #endif switch (stackshot_config_version) { - case STACKSHOT_CONFIG_TYPE: - if (stackshot_config_size != sizeof(stackshot_config_t)) { - return KERN_INVALID_ARGUMENT; - } - stackshot_config_t *config = (stackshot_config_t *) stackshot_config; - out_buffer_addr = config->sc_out_buffer_addr; - out_size_addr = config->sc_out_size_addr; - pid = config->sc_pid; - flags = config->sc_flags; - since_timestamp = config->sc_delta_timestamp; - if (config->sc_size <= max_tracebuf_size) { - size_hint = config->sc_size; - } - break; - default: - return KERN_NOT_SUPPORTED; - } - - /* - * Currently saving a kernel buffer and trylock are only supported from the - * internal/KEXT API. - */ - if (stackshot_from_user) { - if (flags & (STACKSHOT_TRYLOCK | STACKSHOT_SAVE_IN_KERNEL_BUFFER | STACKSHOT_FROM_PANIC)) { - return KERN_NO_ACCESS; - } - } else { + case STACKSHOT_CONFIG_TYPE: + if (stackshot_config_size != sizeof(stackshot_config_t)) { + return KERN_INVALID_ARGUMENT; + } + stackshot_config_t *config = (stackshot_config_t *) stackshot_config; + out_buffer_addr = config->sc_out_buffer_addr; + out_size_addr = config->sc_out_size_addr; + pid = config->sc_pid; + flags = config->sc_flags; + since_timestamp = config->sc_delta_timestamp; + if (config->sc_size <= max_tracebuf_size) { + size_hint = config->sc_size; + } + break; + default: + return KERN_NOT_SUPPORTED; + } + + /* + * Currently saving a kernel buffer and trylock are only supported from the + * internal/KEXT API. + */ + if (stackshot_from_user) { + if (flags & (STACKSHOT_TRYLOCK | STACKSHOT_SAVE_IN_KERNEL_BUFFER | STACKSHOT_FROM_PANIC)) { + return KERN_NO_ACCESS; + } + } else { if (!(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) { return KERN_NOT_SUPPORTED; } @@ -617,9 +625,9 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi goto error_exit; } error = stackshot_remap_buffer(kernel_stackshot_buf, kernel_stackshot_buf_size, - out_buffer_addr, out_size_addr); + out_buffer_addr, out_size_addr); /* - * If we successfully remapped the buffer into the user's address space, we + * If we successfully remapped the buffer into the user's address space, we * set buf_to_free and size_to_free so the prior kernel mapping will be removed * and then clear the kernel stackshot pointer and associated size. */ @@ -629,7 +637,7 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi kernel_stackshot_buf = NULL; kernel_stackshot_buf_size = 0; } - + goto error_exit; } @@ -658,7 +666,7 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi uint32_t hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT : KCDATA_BUFFER_BEGIN_STACKSHOT; kcdata_p = kcdata_memory_alloc_init((mach_vm_address_t)stackshotbuf, hdr_tag, stackshotbuf_size, - KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER); + KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER); stackshot_duration_outer = NULL; uint64_t time_start = mach_absolute_time(); @@ -752,7 +760,7 @@ error_exit: if (stackshotbuf != NULL) { kmem_free(kernel_map, (vm_offset_t)stackshotbuf, stackshotbuf_size); } - if (buf_to_free != NULL) { + if (buf_to_free != NULL) { kmem_free(kernel_map, (vm_offset_t)buf_to_free, size_to_free); } STACKSHOT_SUBSYS_UNLOCK(); @@ -764,7 +772,7 @@ error_exit: */ void kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint32_t flags, - kcdata_descriptor_t data_p, uint64_t since_timestamp) + kcdata_descriptor_t data_p, uint64_t since_timestamp) { uint64_t microsecs = 0, secs = 0; clock_get_calendar_microtime((clock_sec_t *)&secs, (clock_usec_t *)µsecs); @@ -794,7 +802,7 @@ panic_stackshot_reset_state() boolean_t stackshot_active() { - return (stackshot_kcdata_p != NULL); + return stackshot_kcdata_p != NULL; } uint32_t @@ -803,12 +811,14 @@ kdp_stack_snapshot_bytes_traced(void) return stack_snapshot_bytes_traced; } -static boolean_t memory_iszero(void *addr, size_t size) +static boolean_t +memory_iszero(void *addr, size_t size) { char *data = (char *)addr; - for (size_t i = 0; i < size; i++){ - if (data[i] != 0) + for (size_t i = 0; i < size; i++) { + if (data[i] != 0) { return FALSE; + } } return TRUE; } @@ -821,12 +831,12 @@ static boolean_t memory_iszero(void *addr, size_t size) */ #define kcd_exit_on_error(action) \ do { \ - if (KERN_SUCCESS != (error = (action))) { \ - if (error == KERN_RESOURCE_SHORTAGE) { \ - error = KERN_INSUFFICIENT_BUFFER_SIZE; \ - } \ - goto error_exit; \ - } \ + if (KERN_SUCCESS != (error = (action))) { \ + if (error == KERN_RESOURCE_SHORTAGE) { \ + error = KERN_INSUFFICIENT_BUFFER_SIZE; \ + } \ + goto error_exit; \ + } \ } while (0); /* end kcd_exit_on_error */ static uint64_t @@ -835,45 +845,59 @@ kcdata_get_task_ss_flags(task_t task) uint64_t ss_flags = 0; boolean_t task_64bit_addr = task_has_64Bit_addr(task); - if (task_64bit_addr) + if (task_64bit_addr) { ss_flags |= kUser64_p; - if (!task->active || task_is_a_corpse(task) || proc_exiting(task->bsd_info)) + } + if (!task->active || task_is_a_corpse(task) || proc_exiting(task->bsd_info)) { ss_flags |= kTerminatedSnapshot; - if (task->pidsuspended) + } + if (task->pidsuspended) { ss_flags |= kPidSuspended; - if (task->frozen) + } + if (task->frozen) { ss_flags |= kFrozen; - if (task->effective_policy.tep_darwinbg == 1) + } + if (task->effective_policy.tep_darwinbg == 1) { ss_flags |= kTaskDarwinBG; - if (task->requested_policy.trp_role == TASK_FOREGROUND_APPLICATION) + } + if (task->requested_policy.trp_role == TASK_FOREGROUND_APPLICATION) { ss_flags |= kTaskIsForeground; - if (task->requested_policy.trp_boosted == 1) + } + if (task->requested_policy.trp_boosted == 1) { ss_flags |= kTaskIsBoosted; - if (task->effective_policy.tep_sup_active == 1) + } + if (task->effective_policy.tep_sup_active == 1) { ss_flags |= kTaskIsSuppressed; + } #if CONFIG_MEMORYSTATUS boolean_t dirty = FALSE, dirty_tracked = FALSE, allow_idle_exit = FALSE; memorystatus_proc_flags_unsafe(task->bsd_info, &dirty, &dirty_tracked, &allow_idle_exit); - if (dirty) + if (dirty) { ss_flags |= kTaskIsDirty; - if (dirty_tracked) + } + if (dirty_tracked) { ss_flags |= kTaskIsDirtyTracked; - if (allow_idle_exit) + } + if (allow_idle_exit) { ss_flags |= kTaskAllowIdleExit; + } #endif - if (task->effective_policy.tep_tal_engaged) + if (task->effective_policy.tep_tal_engaged) { ss_flags |= kTaskTALEngaged; + } ss_flags |= (0x7 & workqueue_get_pwq_state_kdp(task->bsd_info)) << 17; #if IMPORTANCE_INHERITANCE if (task->task_imp_base) { - if (task->task_imp_base->iit_donor) + if (task->task_imp_base->iit_donor) { ss_flags |= kTaskIsImpDonor; - if (task->task_imp_base->iit_live_donor) + } + if (task->task_imp_base->iit_live_donor) { ss_flags |= kTaskIsLiveImpDonor; + } } #endif return ss_flags; @@ -962,7 +986,7 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla if (task_64bit_addr) { struct user64_dyld_all_image_infos task_image_infos; if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos, - sizeof(struct user64_dyld_all_image_infos), should_fault, &kdp_fault_results)) { + sizeof(struct user64_dyld_all_image_infos), should_fault, &kdp_fault_results)) { uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount; uuid_info_addr = task_image_infos.uuidArray; if (task_image_infos.version >= DYLD_ALL_IMAGE_INFOS_TIMESTAMP_MINIMUM_VERSION) { @@ -972,7 +996,7 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla } else { struct user32_dyld_all_image_infos task_image_infos; if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos, - sizeof(struct user32_dyld_all_image_infos), should_fault, &kdp_fault_results)) { + sizeof(struct user32_dyld_all_image_infos), should_fault, &kdp_fault_results)) { uuid_info_count = task_image_infos.uuidArrayCount; uuid_info_addr = task_image_infos.uuidArray; if (task_image_infos.version >= DYLD_ALL_IMAGE_INFOS_TIMESTAMP_MINIMUM_VERSION) { @@ -1004,7 +1028,7 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size; kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, (task_64bit_addr ? KCDATA_TYPE_LIBRARY_LOADINFO64 : KCDATA_TYPE_LIBRARY_LOADINFO), - uuid_info_size, uuid_info_count, &out_addr)); + uuid_info_size, uuid_info_count, &out_addr)); /* Copy in the UUID info array * It may be nonresident, in which case just fix up nloadinfos to 0 in the task_snap @@ -1012,12 +1036,10 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla if (have_pmap && !kdp_copyin(task->map, uuid_info_addr, (void *)out_addr, uuid_info_array_size, should_fault, &kdp_fault_results)) { bzero((void *)out_addr, uuid_info_array_size); } - } else if (task_pid == 0 && uuid_info_count > 0 && uuid_info_count < MAX_LOADINFOS) { uintptr_t image_load_address; do { - #if CONFIG_EMBEDDED if (kernelcache_uuid_valid && !save_kextloadinfo_p) { kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_KERNELCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64), &out_addr)); @@ -1034,26 +1056,26 @@ kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint32_t trace_fla } kcd_exit_on_error(kcdata_get_memory_addr_for_array( - kcd, (sizeof(kernel_uuid_info) == sizeof(struct user64_dyld_uuid_info)) ? KCDATA_TYPE_LIBRARY_LOADINFO64 - : KCDATA_TYPE_LIBRARY_LOADINFO, - sizeof(kernel_uuid_info), uuid_info_count, &out_addr)); + kcd, (sizeof(kernel_uuid_info) == sizeof(struct user64_dyld_uuid_info)) ? KCDATA_TYPE_LIBRARY_LOADINFO64 + : KCDATA_TYPE_LIBRARY_LOADINFO, + sizeof(kernel_uuid_info), uuid_info_count, &out_addr)); kernel_uuid_info *uuid_info_array = (kernel_uuid_info *)out_addr; image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext); uuid_info_array[0].imageLoadAddress = image_load_address; stackshot_memcpy(&uuid_info_array[0].imageUUID, kernel_uuid, sizeof(uuid_t)); - if (save_kextloadinfo_p && - ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader)) && - ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]), - gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) { + if (save_kextloadinfo_p && + ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader)) && + ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]), + gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) { uint32_t kexti; - for (kexti=0 ; kexti < gLoadedKextSummaries->numSummaries; kexti++) { + for (kexti = 0; kexti < gLoadedKextSummaries->numSummaries; kexti++) { image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address); uuid_info_array[kexti + 1].imageLoadAddress = image_load_address; stackshot_memcpy(&uuid_info_array[kexti + 1].imageUUID, &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t)); } } - } while(0); + } while (0); } error_exit: @@ -1095,7 +1117,7 @@ kcdata_record_task_iostats(kcdata_descriptor_t kcd, task_t task) _iostat->ss_metadata_size = task->task_io_stats->metadata.size; _iostat->ss_data_count = (task->task_io_stats->total_io.count - task->task_io_stats->metadata.count); _iostat->ss_data_size = (task->task_io_stats->total_io.size - task->task_io_stats->metadata.size); - for(int i = 0; i < IO_NUM_PRIORITIES; i++) { + for (int i = 0; i < IO_NUM_PRIORITIES; i++) { _iostat->ss_io_priority_count[i] = task->task_io_stats->io_priority[i].count; _iostat->ss_io_priority_size[i] = task->task_io_stats->io_priority[i].size; } @@ -1136,7 +1158,7 @@ kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace boolean_t collect_asid = ((trace_flags & STACKSHOT_ASID) != 0); #endif boolean_t collect_pagetables = ((trace_flags & STACKSHOT_PAGE_TABLES) != 0); - + kern_return_t error = KERN_SUCCESS; mach_vm_address_t out_addr = 0; @@ -1170,7 +1192,7 @@ kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace cur_tsnap->ts_pageins = task->pageins; cur_tsnap->ts_cow_faults = task->cow_faults; cur_tsnap->ts_latency_qos = (task->effective_policy.tep_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) ? - LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.tep_latency_qos); + LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.tep_latency_qos); cur_tsnap->ts_pid = task_pid; #if __arm__ || __arm64__ @@ -1208,13 +1230,12 @@ kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t trace stackshot_memcpy((void*)out_addr, &jetsam_coal_id, sizeof(jetsam_coal_id)); } #endif /* CONFIG_COALITIONS */ - } - else { + } else { cur_tsnap->ts_p_comm[0] = '\0'; #if IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) if (task->task_imp_base != NULL) { stackshot_strlcpy(cur_tsnap->ts_p_comm, &task->task_imp_base->iit_procname[0], - MIN((int)sizeof(task->task_imp_base->iit_procname), (int)sizeof(cur_tsnap->ts_p_comm))); + MIN((int)sizeof(task->task_imp_base->iit_procname), (int)sizeof(cur_tsnap->ts_p_comm))); } #endif /* IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) */ } @@ -1273,9 +1294,9 @@ kcdata_record_task_delta_snapshot(kcdata_descriptor_t kcd, task_t task, uint32_t cur_tsnap->tds_cow_faults = task->cow_faults; cur_tsnap->tds_was_throttled = (uint32_t)proc_was_throttled_from_task(task); cur_tsnap->tds_did_throttle = (uint32_t)proc_did_throttle_from_task(task); - cur_tsnap->tds_latency_qos = (task-> effective_policy.tep_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) - ? LATENCY_QOS_TIER_UNSPECIFIED - : ((0xFF << 16) | task-> effective_policy.tep_latency_qos); + cur_tsnap->tds_latency_qos = (task->effective_policy.tep_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) + ? LATENCY_QOS_TIER_UNSPECIFIED + : ((0xFF << 16) | task->effective_policy.tep_latency_qos); #if __arm__ || __arm64__ if (collect_asid && have_pmap) { @@ -1318,7 +1339,7 @@ kcdata_record_thread_iostats(kcdata_descriptor_t kcd, thread_t thread) _iostat->ss_metadata_size = thread->thread_io_stats->metadata.size; _iostat->ss_data_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->metadata.count); _iostat->ss_data_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->metadata.size); - for(int i = 0; i < IO_NUM_PRIORITIES; i++) { + for (int i = 0; i < IO_NUM_PRIORITIES; i++) { _iostat->ss_io_priority_count[i] = thread->thread_io_stats->io_priority[i].count; _iostat->ss_io_priority_size[i] = thread->thread_io_stats->io_priority[i].size; } @@ -1330,7 +1351,7 @@ error_exit: static kern_return_t kcdata_record_thread_snapshot( - kcdata_descriptor_t kcd, thread_t thread, task_t task, uint32_t trace_flags, boolean_t have_pmap, boolean_t thread_on_core) + kcdata_descriptor_t kcd, thread_t thread, task_t task, uint32_t trace_flags, boolean_t have_pmap, boolean_t thread_on_core) { boolean_t dispatch_p = ((trace_flags & STACKSHOT_GET_DQ) != 0); boolean_t active_kthreads_only_p = ((trace_flags & STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY) != 0); @@ -1359,10 +1380,11 @@ kcdata_record_thread_snapshot( cur_thread_snap->ths_continuation = VM_KERNEL_UNSLIDE(thread->continuation); cur_thread_snap->ths_total_syscalls = thread->syscalls_mach + thread->syscalls_unix; - if (IPC_VOUCHER_NULL != thread->ith_voucher) + if (IPC_VOUCHER_NULL != thread->ith_voucher) { cur_thread_snap->ths_voucher_identifier = VM_KERNEL_ADDRPERM(thread->ith_voucher); - else + } else { cur_thread_snap->ths_voucher_identifier = 0; + } cur_thread_snap->ths_dqserialnum = 0; if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) { @@ -1394,20 +1416,27 @@ kcdata_record_thread_snapshot( } cur_thread_snap->ths_ss_flags = 0; - if (thread->thread_tag & THREAD_TAG_MAINTHREAD) + if (thread->thread_tag & THREAD_TAG_MAINTHREAD) { cur_thread_snap->ths_ss_flags |= kThreadMain; - if (thread->effective_policy.thep_darwinbg) + } + if (thread->effective_policy.thep_darwinbg) { cur_thread_snap->ths_ss_flags |= kThreadDarwinBG; - if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) + } + if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) { cur_thread_snap->ths_ss_flags |= kThreadIOPassive; - if (thread->suspend_count > 0) + } + if (thread->suspend_count > 0) { cur_thread_snap->ths_ss_flags |= kThreadSuspended; - if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) + } + if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { cur_thread_snap->ths_ss_flags |= kGlobalForcedIdle; - if (thread_on_core) + } + if (thread_on_core) { cur_thread_snap->ths_ss_flags |= kThreadOnCore; - if (stackshot_thread_is_idle_worker_unsafe(thread)) + } + if (stackshot_thread_is_idle_worker_unsafe(thread)) { cur_thread_snap->ths_ss_flags |= kThreadIdleWorker; + } /* make sure state flags defined in kcdata.h still match internal flags */ static_assert(SS_TH_WAIT == TH_WAIT); @@ -1427,7 +1456,7 @@ kcdata_record_thread_snapshot( cur_thread_snap->ths_eqos = thread->effective_policy.thep_qos; cur_thread_snap->ths_rqos = thread->requested_policy.thrp_qos; cur_thread_snap->ths_rqos_override = MAX(thread->requested_policy.thrp_qos_override, - thread->requested_policy.thrp_qos_workq_override); + thread->requested_policy.thrp_qos_workq_override); cur_thread_snap->ths_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO); cur_thread_snap->ths_thread_t = VM_KERNEL_UNSLIDE_OR_PERM(thread); @@ -1464,12 +1493,12 @@ kcdata_record_thread_snapshot( uint64_t sp = 0; out_addr = (mach_vm_address_t)kcd_end_address(kcd); saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE, - trace_fp_p, &thread_snapshot_flags, &sp); + trace_fp_p, &thread_snapshot_flags, &sp); if (saved_count > 0) { int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame64) : sizeof(uint64_t); kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_USER_STACKFRAME64 - : STACKSHOT_KCTYPE_USER_STACKLR64, - frame_size, saved_count / frame_size, &out_addr)); + : STACKSHOT_KCTYPE_USER_STACKLR64, + frame_size, saved_count / frame_size, &out_addr)); cur_thread_snap->ths_ss_flags |= kUser64_p; } #if __x86_64__ @@ -1488,12 +1517,12 @@ kcdata_record_thread_snapshot( } else { out_addr = (mach_vm_address_t)kcd_end_address(kcd); saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE, trace_fp_p, - &thread_snapshot_flags); + &thread_snapshot_flags); if (saved_count > 0) { int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame32) : sizeof(uint32_t); kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_USER_STACKFRAME - : STACKSHOT_KCTYPE_USER_STACKLR, - frame_size, saved_count / frame_size, &out_addr)); + : STACKSHOT_KCTYPE_USER_STACKLR, + frame_size, saved_count / frame_size, &out_addr)); } } @@ -1510,23 +1539,23 @@ kcdata_record_thread_snapshot( #if defined(__LP64__) out_addr = (mach_vm_address_t)kcd_end_address(kcd); saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE, trace_fp_p, - &thread_snapshot_flags, NULL); + &thread_snapshot_flags, NULL); if (saved_count > 0) { int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame64) : sizeof(uint64_t); cur_thread_snap->ths_ss_flags |= kKernel64_p; kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_KERN_STACKFRAME64 - : STACKSHOT_KCTYPE_KERN_STACKLR64, - frame_size, saved_count / frame_size, &out_addr)); + : STACKSHOT_KCTYPE_KERN_STACKLR64, + frame_size, saved_count / frame_size, &out_addr)); } #else out_addr = (mach_vm_address_t)kcd_end_address(kcd); saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE, trace_fp_p, - &thread_snapshot_flags); + &thread_snapshot_flags); if (saved_count > 0) { int frame_size = trace_fp_p ? sizeof(struct stack_snapshot_frame32) : sizeof(uint32_t); kcd_exit_on_error( - kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_KERN_STACKFRAME : STACKSHOT_KCTYPE_KERN_STACKLR, - frame_size, saved_count / frame_size, &out_addr)); + kcdata_get_memory_addr_for_array(kcd, trace_fp_p ? STACKSHOT_KCTYPE_KERN_STACKFRAME : STACKSHOT_KCTYPE_KERN_STACKLR, + frame_size, saved_count / frame_size, &out_addr)); } #endif if (thread_snapshot_flags != 0) { @@ -1559,24 +1588,31 @@ static int kcdata_record_thread_delta_snapshot(struct thread_delta_snapshot_v3 * cur_thread_snap, thread_t thread, boolean_t thread_on_core) { cur_thread_snap->tds_thread_id = thread_tid(thread); - if (IPC_VOUCHER_NULL != thread->ith_voucher) + if (IPC_VOUCHER_NULL != thread->ith_voucher) { cur_thread_snap->tds_voucher_identifier = VM_KERNEL_ADDRPERM(thread->ith_voucher); - else + } else { cur_thread_snap->tds_voucher_identifier = 0; + } cur_thread_snap->tds_ss_flags = 0; - if (thread->effective_policy.thep_darwinbg) + if (thread->effective_policy.thep_darwinbg) { cur_thread_snap->tds_ss_flags |= kThreadDarwinBG; - if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) + } + if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) { cur_thread_snap->tds_ss_flags |= kThreadIOPassive; - if (thread->suspend_count > 0) + } + if (thread->suspend_count > 0) { cur_thread_snap->tds_ss_flags |= kThreadSuspended; - if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) + } + if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { cur_thread_snap->tds_ss_flags |= kGlobalForcedIdle; - if (thread_on_core) + } + if (thread_on_core) { cur_thread_snap->tds_ss_flags |= kThreadOnCore; - if (stackshot_thread_is_idle_worker_unsafe(thread)) + } + if (stackshot_thread_is_idle_worker_unsafe(thread)) { cur_thread_snap->tds_ss_flags |= kThreadIdleWorker; + } cur_thread_snap->tds_last_made_runnable_time = thread->last_made_runnable_time; cur_thread_snap->tds_state = thread->state; @@ -1586,7 +1622,7 @@ kcdata_record_thread_delta_snapshot(struct thread_delta_snapshot_v3 * cur_thread cur_thread_snap->tds_eqos = thread->effective_policy.thep_qos; cur_thread_snap->tds_rqos = thread->requested_policy.thrp_qos; cur_thread_snap->tds_rqos_override = MAX(thread->requested_policy.thrp_qos_override, - thread->requested_policy.thrp_qos_workq_override); + thread->requested_policy.thrp_qos_workq_override); cur_thread_snap->tds_io_tier = proc_get_effective_thread_policy(thread, TASK_POLICY_IO); static_assert(sizeof(thread->effective_policy) == sizeof(uint64_t)); @@ -1635,8 +1671,7 @@ classify_thread(thread_t thread, boolean_t * thread_on_core_p, uint32_t trace_fl } } -struct stackshot_context -{ +struct stackshot_context { int pid; uint32_t trace_flags; }; @@ -1693,13 +1728,12 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) /* Trace everything, unless a process was specified */ if ((ctx->pid == -1) || (ctx->pid == task_pid)) { - /* add task snapshot marker */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, - STACKSHOT_KCCONTAINER_TASK, task_uniqueid)); + STACKSHOT_KCCONTAINER_TASK, task_uniqueid)); if (!collect_delta_stackshot || (task_start_abstime == 0) || - (task_start_abstime > stack_snapshot_delta_since_timestamp)) { + (task_start_abstime > stack_snapshot_delta_since_timestamp)) { kcd_exit_on_error(kcdata_record_task_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, &task_snap_ss_flags)); } else { task_delta_stackshot = TRUE; @@ -1717,8 +1751,9 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) goto error_exit; } - if (active_kthreads_only_p && thread->kernel_stack == 0) + if (active_kthreads_only_p && thread->kernel_stack == 0) { continue; + } thread_uniqueid = thread_tid(thread); @@ -1729,13 +1764,13 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) case tc_full_snapshot: /* add thread marker */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN, - STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid)); + STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid)); kcd_exit_on_error( kcdata_record_thread_snapshot(stackshot_kcdata_p, thread, task, ctx->trace_flags, have_pmap, thread_on_core)); /* mark end of thread snapshot data */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, - STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid)); + STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid)); some_thread_ran = TRUE; break; @@ -1748,8 +1783,9 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) /* We want to report owner information regardless of whether a thread * has changed since the last delta, whether it's a normal stackshot, * or whether it's nonrunnable */ - if (save_owner_info && stackshot_thread_has_valid_waitinfo(thread)) + if (save_owner_info && stackshot_thread_has_valid_waitinfo(thread)) { num_waitinfo_threads++; + } } struct thread_delta_snapshot_v3 * delta_snapshots = NULL; @@ -1757,8 +1793,8 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) if (num_delta_thread_snapshots > 0) { kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT, - sizeof(struct thread_delta_snapshot_v3), - num_delta_thread_snapshots, &out_addr)); + sizeof(struct thread_delta_snapshot_v3), + num_delta_thread_snapshots, &out_addr)); delta_snapshots = (struct thread_delta_snapshot_v3 *)out_addr; } @@ -1766,7 +1802,7 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) if (num_nonrunnable_threads > 0) { kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_NONRUNNABLE_TIDS, - sizeof(uint64_t), num_nonrunnable_threads, &out_addr)); + sizeof(uint64_t), num_nonrunnable_threads, &out_addr)); nonrunnable_tids = (uint64_t *)out_addr; } @@ -1775,15 +1811,16 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) if (num_waitinfo_threads > 0) { kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_WAITINFO, - sizeof(thread_waitinfo_t), num_waitinfo_threads, &out_addr)); + sizeof(thread_waitinfo_t), num_waitinfo_threads, &out_addr)); thread_waitinfo = (thread_waitinfo_t *)out_addr; } if (num_delta_thread_snapshots > 0 || num_nonrunnable_threads > 0 || num_waitinfo_threads > 0) { queue_iterate(&task->threads, thread, thread_t, task_threads) { - if (active_kthreads_only_p && thread->kernel_stack == 0) + if (active_kthreads_only_p && thread->kernel_stack == 0) { continue; + } /* If we want owner info, we should capture it regardless of its classification */ if (save_owner_info && stackshot_thread_has_valid_waitinfo(thread)) { @@ -1802,7 +1839,7 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) case tc_delta_snapshot: kcd_exit_on_error(kcdata_record_thread_delta_snapshot(&delta_snapshots[current_delta_snapshot_index++], - thread, thread_on_core)); + thread, thread_on_core)); break; } } @@ -1810,11 +1847,11 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) #if DEBUG || DEVELOPMENT if (current_delta_snapshot_index != num_delta_thread_snapshots) { panic("delta thread snapshot count mismatch while capturing snapshots for task %p. expected %d, found %d", task, - num_delta_thread_snapshots, current_delta_snapshot_index); + num_delta_thread_snapshots, current_delta_snapshot_index); } if (current_waitinfo_index != num_waitinfo_threads) { panic("thread wait info count mismatch while capturing snapshots for task %p. expected %d, found %d", task, - num_waitinfo_threads, current_waitinfo_index); + num_waitinfo_threads, current_waitinfo_index); } #endif } @@ -1823,14 +1860,15 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) if (save_donating_pids_p) { kcd_exit_on_error( ((((mach_vm_address_t)kcd_end_address(stackshot_kcdata_p) + (TASK_IMP_WALK_LIMIT * sizeof(int32_t))) < - (mach_vm_address_t)kcd_max_address(stackshot_kcdata_p)) - ? KERN_SUCCESS - : KERN_RESOURCE_SHORTAGE)); + (mach_vm_address_t)kcd_max_address(stackshot_kcdata_p)) + ? KERN_SUCCESS + : KERN_RESOURCE_SHORTAGE)); saved_count = task_importance_list_pids(task, TASK_IMP_LIST_DONATING_PIDS, - (void *)kcd_end_address(stackshot_kcdata_p), TASK_IMP_WALK_LIMIT); - if (saved_count > 0) + (void *)kcd_end_address(stackshot_kcdata_p), TASK_IMP_WALK_LIMIT); + if (saved_count > 0) { kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_DONATING_PIDS, - sizeof(int32_t), saved_count, &out_addr)); + sizeof(int32_t), saved_count, &out_addr)); + } } #endif @@ -1847,7 +1885,7 @@ kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task) } /* mark end of task snapshot data */ kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_TASK, - task_uniqueid)); + task_uniqueid)); } error_exit: @@ -1888,8 +1926,9 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac fault_stats.sfs_stopped_faulting = (uint8_t) FALSE; } - if (sizeof(void *) == 8) + if (sizeof(void *) == 8) { system_state_flags |= kKernel64_p; + } if (stackshot_kcdata_p == NULL || pBytesTraced == NULL) { error = KERN_INVALID_ARGUMENT; @@ -1945,10 +1984,10 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac /* record system level shared cache load info (if available) */ if (!collect_delta_stackshot && init_task_shared_region && - ml_validate_nofault((vm_offset_t)init_task_shared_region, sizeof(struct vm_shared_region))) { + ml_validate_nofault((vm_offset_t)init_task_shared_region, sizeof(struct vm_shared_region))) { struct dyld_uuid_info_64_v2 *sys_shared_cache_info = NULL; kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, - sizeof(struct dyld_uuid_info_64_v2), &out_addr)); + sizeof(struct dyld_uuid_info_64_v2), &out_addr)); sys_shared_cache_info = (struct dyld_uuid_info_64_v2 *)out_addr; stackshot_memcpy(sys_shared_cache_info->imageUUID, &init_task_shared_region->sr_uuid, sizeof(init_task_shared_region->sr_uuid)); @@ -1961,13 +2000,13 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac * (which is only when the system is using a custom shared cache). */ if (init_task_shared_region->sr_images && ml_validate_nofault((vm_offset_t)init_task_shared_region->sr_images, - (init_task_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64)))) { + (init_task_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64)))) { assert(init_task_shared_region->sr_images_count != 0); kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT, - sizeof(struct dyld_uuid_info_64), - init_task_shared_region->sr_images_count, &out_addr)); + sizeof(struct dyld_uuid_info_64), + init_task_shared_region->sr_images_count, &out_addr)); stackshot_memcpy((void*)out_addr, init_task_shared_region->sr_images, - (init_task_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64))); + (init_task_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64))); } } } @@ -1997,7 +2036,6 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac error = KERN_FAILURE; goto error_exit; } - } #else trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS); @@ -2010,8 +2048,9 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac queue_iterate(&tasks, task, task_t, tasks) { error = kdp_stackshot_record_task(&ctx, task); - if (error) + if (error) { goto error_exit; + } } /* * Iterate over the tasks in the terminated tasks list. We only inspect @@ -2024,14 +2063,15 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac { if (task->bsd_info && !proc_in_teardown(task->bsd_info)) { error = kdp_stackshot_record_task(&ctx, task); - if (error) + if (error) { goto error_exit; + } } } if (use_fault_path) { kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS, - sizeof(struct stackshot_fault_stats), &out_addr)); + sizeof(struct stackshot_fault_stats), &out_addr)); stackshot_memcpy((void*)out_addr, &fault_stats, sizeof(struct stackshot_fault_stats)); } @@ -2039,7 +2079,7 @@ kdp_stackshot_kcdata_format(int pid, uint32_t trace_flags, uint32_t * pBytesTrac abs_time_end = mach_absolute_time(); #if DEVELOPMENT || DEBUG kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_DURATION, - sizeof(struct stackshot_duration), &out_addr)); + sizeof(struct stackshot_duration), &out_addr)); struct stackshot_duration * stackshot_duration = (struct stackshot_duration *)out_addr; stackshot_duration->stackshot_duration = (abs_time_end - abs_time); stackshot_duration->stackshot_duration_outer = 0; @@ -2076,9 +2116,10 @@ proc_was_throttled_from_task(task_t task) { uint64_t was_throttled = 0; - if (task->bsd_info) + if (task->bsd_info) { was_throttled = proc_was_throttled(task->bsd_info); - + } + return was_throttled; } @@ -2087,9 +2128,10 @@ proc_did_throttle_from_task(task_t task) { uint64_t did_throttle = 0; - if (task->bsd_info) + if (task->bsd_info) { did_throttle = proc_did_throttle(task->bsd_info); - + } + return did_throttle; } @@ -2140,7 +2182,7 @@ kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap) memio_snap->compressor_size = VM_PAGE_COMPRESSOR_COUNT; kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted); - if ( ! kErr ) { + if (!kErr) { memio_snap->pages_wanted = (uint32_t)pages_wanted; memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed; memio_snap->pages_wanted_reclaimed_valid = 1; @@ -2163,7 +2205,7 @@ stackshot_memcpy(void *dst, const void *src, size_t len) } } else #endif - memcpy(dst, src, len); + memcpy(dst, src, len); } size_t @@ -2172,10 +2214,10 @@ stackshot_strlcpy(char *dst, const char *src, size_t maxlen) const size_t srclen = strlen(src); if (srclen < maxlen) { - stackshot_memcpy(dst, src, srclen+1); + stackshot_memcpy(dst, src, srclen + 1); } else if (maxlen != 0) { - stackshot_memcpy(dst, src, maxlen-1); - dst[maxlen-1] = '\0'; + stackshot_memcpy(dst, src, maxlen - 1); + dst[maxlen - 1] = '\0'; } return srclen; @@ -2200,8 +2242,9 @@ kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32 cur_phys_addr = kdp_vtophys(map->pmap, target_addr); if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) { if (!try_fault || fault_stats.sfs_stopped_faulting) { - if (kdp_fault_results) + if (kdp_fault_results) { *kdp_fault_results |= KDP_FAULT_RESULT_PAGED_OUT; + } return 0; } @@ -2221,14 +2264,16 @@ kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32 cur_phys_addr += (target_addr & PAGE_MASK); if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) { - if (kdp_fault_results) + if (kdp_fault_results) { *kdp_fault_results |= (KDP_FAULT_RESULT_TRIED_FAULT | KDP_FAULT_RESULT_PAGED_OUT); + } return 0; } - if (kdp_fault_results) + if (kdp_fault_results) { *kdp_fault_results |= KDP_FAULT_RESULT_FAULTED_IN; + } fault_stats.sfs_pages_faulted_in++; } else { @@ -2288,11 +2333,11 @@ kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_ * On embedded the panic buffer is mapped as device memory and doesn't allow * unaligned accesses. To prevent these, we copy over bytes individually here. */ - if (panic_stackshot) + if (panic_stackshot) { stackshot_memcpy(kvaddr, (const void *)phystokv(phys_src), cur_size); - else + } else #endif /* CONFIG_EMBEDDED */ - bcopy_phys(phys_src, phys_dest, cur_size); + bcopy_phys(phys_src, phys_dest, cur_size); } else { break; } @@ -2302,7 +2347,7 @@ kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_ rem -= cur_size; } - return (rem == 0); + return rem == 0; } kern_return_t @@ -2345,7 +2390,6 @@ machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t cur_target_page = atop(cur_target_addr); if ((cur_target_page != prev_target_page) || validate_next_addr) { - /* * Alright; it wasn't our previous page. So * we must validate that there is a page @@ -2407,7 +2451,7 @@ stackshot_thread_is_idle_worker_unsafe(thread_t thread) * struct. See parkit() in kern/kern_support.c in libpthread. */ return (thread->state & TH_WAIT) && - (thread->block_hint == kThreadWaitParkedWorkQueue); + (thread->block_hint == kThreadWaitParkedWorkQueue); } #if CONFIG_COALITIONS @@ -2422,8 +2466,9 @@ stackshot_coalition_jetsam_count(void *arg, int i, coalition_t coal) static void stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal) { - if (coalition_type(coal) != COALITION_TYPE_JETSAM) + if (coalition_type(coal) != COALITION_TYPE_JETSAM) { return; + } struct jetsam_coalition_snapshot *coalitions = (struct jetsam_coalition_snapshot*)arg; struct jetsam_coalition_snapshot *jcs = &coalitions[i]; @@ -2431,21 +2476,26 @@ stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal) jcs->jcs_id = coalition_id(coal); jcs->jcs_flags = 0; - if (coalition_term_requested(coal)) + if (coalition_term_requested(coal)) { jcs->jcs_flags |= kCoalitionTermRequested; - if (coalition_is_terminated(coal)) + } + if (coalition_is_terminated(coal)) { jcs->jcs_flags |= kCoalitionTerminated; - if (coalition_is_reaped(coal)) + } + if (coalition_is_reaped(coal)) { jcs->jcs_flags |= kCoalitionReaped; - if (coalition_is_privileged(coal)) + } + if (coalition_is_privileged(coal)) { jcs->jcs_flags |= kCoalitionPrivileged; + } leader = kdp_coalition_get_leader(coal); - if (leader) + if (leader) { jcs->jcs_leader_task_uniqueid = get_task_uniqueid(leader); - else + } else { jcs->jcs_leader_task_uniqueid = 0; + } } #endif /* CONFIG_COALITIONS */ @@ -2454,24 +2504,25 @@ stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal) static int stackshot_thread_has_valid_waitinfo(thread_t thread) { - if (!(thread->state & TH_WAIT)) + if (!(thread->state & TH_WAIT)) { return 0; + } switch (thread->block_hint) { - // If set to None or is a parked work queue, ignore it - case kThreadWaitParkedWorkQueue: - case kThreadWaitNone: - return 0; - // There is a short window where the pthread kext removes a thread - // from its ksyn wait queue before waking the thread up - case kThreadWaitPThreadMutex: - case kThreadWaitPThreadRWLockRead: - case kThreadWaitPThreadRWLockWrite: - case kThreadWaitPThreadCondVar: - return (kdp_pthread_get_thread_kwq(thread) != NULL); - // All other cases are valid block hints if in a wait state - default: - return 1; + // If set to None or is a parked work queue, ignore it + case kThreadWaitParkedWorkQueue: + case kThreadWaitNone: + return 0; + // There is a short window where the pthread kext removes a thread + // from its ksyn wait queue before waking the thread up + case kThreadWaitPThreadMutex: + case kThreadWaitPThreadRWLockRead: + case kThreadWaitPThreadRWLockWrite: + case kThreadWaitPThreadCondVar: + return kdp_pthread_get_thread_kwq(thread) != NULL; + // All other cases are valid block hints if in a wait state + default: + return 1; } } @@ -2481,42 +2532,41 @@ stackshot_thread_wait_owner_info(thread_t thread, thread_waitinfo_t *waitinfo) waitinfo->waiter = thread_tid(thread); waitinfo->wait_type = thread->block_hint; switch (waitinfo->wait_type) { - case kThreadWaitKernelMutex: - kdp_lck_mtx_find_owner(thread->waitq, thread->wait_event, waitinfo); - break; - case kThreadWaitPortReceive: - kdp_mqueue_recv_find_owner(thread->waitq, thread->wait_event, waitinfo); - break; - case kThreadWaitPortSend: - kdp_mqueue_send_find_owner(thread->waitq, thread->wait_event, waitinfo); - break; - case kThreadWaitSemaphore: - kdp_sema_find_owner(thread->waitq, thread->wait_event, waitinfo); - break; - case kThreadWaitUserLock: - kdp_ulock_find_owner(thread->waitq, thread->wait_event, waitinfo); - break; - case kThreadWaitKernelRWLockRead: - case kThreadWaitKernelRWLockWrite: - case kThreadWaitKernelRWLockUpgrade: - kdp_rwlck_find_owner(thread->waitq, thread->wait_event, waitinfo); - break; - case kThreadWaitPThreadMutex: - case kThreadWaitPThreadRWLockRead: - case kThreadWaitPThreadRWLockWrite: - case kThreadWaitPThreadCondVar: - kdp_pthread_find_owner(thread, waitinfo); - break; - case kThreadWaitWorkloopSyncWait: - kdp_workloop_sync_wait_find_owner(thread, thread->wait_event, waitinfo); - break; - case kThreadWaitOnProcess: - kdp_wait4_find_process(thread, thread->wait_event, waitinfo); - break; - default: - waitinfo->owner = 0; - waitinfo->context = 0; - break; + case kThreadWaitKernelMutex: + kdp_lck_mtx_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; + case kThreadWaitPortReceive: + kdp_mqueue_recv_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; + case kThreadWaitPortSend: + kdp_mqueue_send_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; + case kThreadWaitSemaphore: + kdp_sema_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; + case kThreadWaitUserLock: + kdp_ulock_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; + case kThreadWaitKernelRWLockRead: + case kThreadWaitKernelRWLockWrite: + case kThreadWaitKernelRWLockUpgrade: + kdp_rwlck_find_owner(thread->waitq, thread->wait_event, waitinfo); + break; + case kThreadWaitPThreadMutex: + case kThreadWaitPThreadRWLockRead: + case kThreadWaitPThreadRWLockWrite: + case kThreadWaitPThreadCondVar: + kdp_pthread_find_owner(thread, waitinfo); + break; + case kThreadWaitWorkloopSyncWait: + kdp_workloop_sync_wait_find_owner(thread, thread->wait_event, waitinfo); + break; + case kThreadWaitOnProcess: + kdp_wait4_find_process(thread, thread->wait_event, waitinfo); + break; + default: + waitinfo->owner = 0; + waitinfo->context = 0; + break; } } - diff --git a/osfmk/kern/kern_types.h b/osfmk/kern/kern_types.h index 8308df68d..556f67273 100644 --- a/osfmk/kern/kern_types.h +++ b/osfmk/kern/kern_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,66 +22,66 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _KERN_KERN_TYPES_H_ -#define _KERN_KERN_TYPES_H_ +#ifndef _KERN_KERN_TYPES_H_ +#define _KERN_KERN_TYPES_H_ #include #include #include -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef MACH_KERNEL_PRIVATE +#ifndef MACH_KERNEL_PRIVATE -struct zone ; +struct zone; #ifndef __LP64__ -struct wait_queue { unsigned int opaque[2]; uintptr_t opaquep[2]; } ; +struct wait_queue { unsigned int opaque[2]; uintptr_t opaquep[2]; }; #else struct wait_queue { unsigned char opaque[32]; }; #endif -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -typedef struct zone *zone_t; -#define ZONE_NULL ((zone_t) 0) +typedef struct zone *zone_t; +#define ZONE_NULL ((zone_t) 0) -typedef struct wait_queue *wait_queue_t; -#define WAIT_QUEUE_NULL ((wait_queue_t) 0) -#define SIZEOF_WAITQUEUE sizeof(struct wait_queue) +typedef struct wait_queue *wait_queue_t; +#define WAIT_QUEUE_NULL ((wait_queue_t) 0) +#define SIZEOF_WAITQUEUE sizeof(struct wait_queue) -typedef vm_offset_t ipc_kobject_t; -#define IKO_NULL ((ipc_kobject_t) 0) +typedef vm_offset_t ipc_kobject_t; +#define IKO_NULL ((ipc_kobject_t) 0) -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -typedef void *event_t; /* wait event */ -#define NO_EVENT ((event_t) 0) +typedef void *event_t; /* wait event */ +#define NO_EVENT ((event_t) 0) -typedef uint64_t event64_t; /* 64 bit wait event */ -#define NO_EVENT64 ((event64_t) 0) -#define CAST_EVENT64_T(a_ptr) ((event64_t)((uintptr_t)(a_ptr))) +typedef uint64_t event64_t; /* 64 bit wait event */ +#define NO_EVENT64 ((event64_t) 0) +#define CAST_EVENT64_T(a_ptr) ((event64_t)((uintptr_t)(a_ptr))) /* * Possible wait_result_t values. */ typedef int wait_result_t; -#define THREAD_WAITING -1 /* thread is waiting */ -#define THREAD_AWAKENED 0 /* normal wakeup */ -#define THREAD_TIMED_OUT 1 /* timeout expired */ -#define THREAD_INTERRUPTED 2 /* aborted/interrupted */ -#define THREAD_RESTART 3 /* restart operation entirely */ +#define THREAD_WAITING -1 /* thread is waiting */ +#define THREAD_AWAKENED 0 /* normal wakeup */ +#define THREAD_TIMED_OUT 1 /* timeout expired */ +#define THREAD_INTERRUPTED 2 /* aborted/interrupted */ +#define THREAD_RESTART 3 /* restart operation entirely */ #define THREAD_NOT_WAITING 10 /* thread didn't need to wait */ -typedef void (*thread_continue_t)(void *, wait_result_t); -#define THREAD_CONTINUE_NULL ((thread_continue_t) 0) +typedef void (*thread_continue_t)(void *, wait_result_t); +#define THREAD_CONTINUE_NULL ((thread_continue_t) 0) /* * Interruptible flag for waits. @@ -173,52 +173,52 @@ typedef int wait_interrupt_t; #define THREAD_WAIT_NOREPORT (THREAD_WAIT_NOREPORT_KERNEL | THREAD_WAIT_NOREPORT_USER) typedef int wait_timeout_urgency_t; -#define TIMEOUT_URGENCY_SYS_NORMAL 0x00 /* use default leeway thresholds for system */ -#define TIMEOUT_URGENCY_SYS_CRITICAL 0x01 /* use critical leeway thresholds for system */ -#define TIMEOUT_URGENCY_SYS_BACKGROUND 0x02 /* use background leeway thresholds for system */ +#define TIMEOUT_URGENCY_SYS_NORMAL 0x00 /* use default leeway thresholds for system */ +#define TIMEOUT_URGENCY_SYS_CRITICAL 0x01 /* use critical leeway thresholds for system */ +#define TIMEOUT_URGENCY_SYS_BACKGROUND 0x02 /* use background leeway thresholds for system */ -#define TIMEOUT_URGENCY_USER_MASK 0x10 /* mask to identify user timeout urgency classes */ -#define TIMEOUT_URGENCY_USER_NORMAL 0x10 /* use default leeway thresholds for user */ -#define TIMEOUT_URGENCY_USER_CRITICAL 0x11 /* use critical leeway thresholds for user */ -#define TIMEOUT_URGENCY_USER_BACKGROUND 0x12 /* use background leeway thresholds for user */ +#define TIMEOUT_URGENCY_USER_MASK 0x10 /* mask to identify user timeout urgency classes */ +#define TIMEOUT_URGENCY_USER_NORMAL 0x10 /* use default leeway thresholds for user */ +#define TIMEOUT_URGENCY_USER_CRITICAL 0x11 /* use critical leeway thresholds for user */ +#define TIMEOUT_URGENCY_USER_BACKGROUND 0x12 /* use background leeway thresholds for user */ -#define TIMEOUT_URGENCY_MASK 0x13 /* mask to identify timeout urgency */ +#define TIMEOUT_URGENCY_MASK 0x13 /* mask to identify timeout urgency */ -#define TIMEOUT_URGENCY_LEEWAY 0x20 /* don't ignore provided leeway value */ +#define TIMEOUT_URGENCY_LEEWAY 0x20 /* don't ignore provided leeway value */ -#define TIMEOUT_URGENCY_FIRST_AVAIL 0x40 /* first available bit outside of urgency mask/leeway */ -#define TIMEOUT_URGENCY_RATELIMITED 0x80 +#define TIMEOUT_URGENCY_FIRST_AVAIL 0x40 /* first available bit outside of urgency mask/leeway */ +#define TIMEOUT_URGENCY_RATELIMITED 0x80 /* * Timeout and deadline tokens for waits. * The following tokens define common values for leeway and deadline parameters. */ -#define TIMEOUT_NO_LEEWAY (0ULL) -#define TIMEOUT_WAIT_FOREVER (0ULL) +#define TIMEOUT_NO_LEEWAY (0ULL) +#define TIMEOUT_WAIT_FOREVER (0ULL) -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* * n.b. this is defined in thread_call.h, but in the TIMEOUT_URGENCY flags space: * #define THREAD_CALL_CONTINUOUS 0x100 */ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include -typedef struct clock *clock_t; +typedef struct clock *clock_t; -typedef struct mig_object *mig_object_t; -#define MIG_OBJECT_NULL ((mig_object_t) 0) +typedef struct mig_object *mig_object_t; +#define MIG_OBJECT_NULL ((mig_object_t) 0) -typedef struct mig_notify *mig_notify_t; -#define MIG_NOTIFY_NULL ((mig_notify_t) 0) +typedef struct mig_notify *mig_notify_t; +#define MIG_NOTIFY_NULL ((mig_notify_t) 0) -typedef struct pset_node *pset_node_t; -#define PSET_NODE_NULL ((pset_node_t) 0) +typedef struct pset_node *pset_node_t; +#define PSET_NODE_NULL ((pset_node_t) 0) -typedef struct affinity_set *affinity_set_t; -#define AFFINITY_SET_NULL ((affinity_set_t) 0) +typedef struct affinity_set *affinity_set_t; +#define AFFINITY_SET_NULL ((affinity_set_t) 0) typedef struct run_queue *run_queue_t; #define RUN_QUEUE_NULL ((run_queue_t) 0) @@ -226,61 +226,61 @@ typedef struct run_queue *run_queue_t; typedef struct grrr_run_queue *grrr_run_queue_t; #define GRRR_RUN_QUEUE_NULL ((grrr_run_queue_t) 0) -typedef struct grrr_group *grrr_group_t; -#define GRRR_GROUP_NULL ((grrr_group_t) 0) +typedef struct grrr_group *grrr_group_t; +#define GRRR_GROUP_NULL ((grrr_group_t) 0) #if defined(CONFIG_SCHED_MULTIQ) typedef struct sched_group *sched_group_t; #define SCHED_GROUP_NULL ((sched_group_t) 0) #endif /* defined(CONFIG_SCHED_MULTIQ) */ -#else /* MACH_KERNEL_PRIVATE */ +#else /* MACH_KERNEL_PRIVATE */ -struct wait_queue_set ; -struct _wait_queue_link ; +struct wait_queue_set; +struct _wait_queue_link; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -typedef struct wait_queue_set *wait_queue_set_t; -#define WAIT_QUEUE_SET_NULL ((wait_queue_set_t)0) -#define SIZEOF_WAITQUEUE_SET wait_queue_set_size() +typedef struct wait_queue_set *wait_queue_set_t; +#define WAIT_QUEUE_SET_NULL ((wait_queue_set_t)0) +#define SIZEOF_WAITQUEUE_SET wait_queue_set_size() -typedef struct _wait_queue_link *wait_queue_link_t; -#define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0) -#define SIZEOF_WAITQUEUE_LINK wait_queue_link_size() +typedef struct _wait_queue_link *wait_queue_link_t; +#define WAIT_QUEUE_LINK_NULL ((wait_queue_link_t)0) +#define SIZEOF_WAITQUEUE_LINK wait_queue_link_size() -typedef struct perfcontrol_state *perfcontrol_state_t; -#define PERFCONTROL_STATE_NULL ((perfcontrol_state_t)0) +typedef struct perfcontrol_state *perfcontrol_state_t; +#define PERFCONTROL_STATE_NULL ((perfcontrol_state_t)0) /* * Enum to define the event which caused the CLPC callout */ typedef enum perfcontrol_event { - /* - * Thread State Update Events - * Used to indicate events that update properties for - * a given thread. These events are passed as part of the - * sched_perfcontrol_state_update_t callout - */ - QUANTUM_EXPIRY = 1, - THREAD_GROUP_UPDATE = 2, - PERFCONTROL_ATTR_UPDATE = 3, - /* - * Context Switch Events - * Used to indicate events that switch from one thread - * to the other. These events are passed as part of the - * sched_perfcontrol_csw_t callout. - */ - CONTEXT_SWITCH = 10, - IDLE = 11 + /* + * Thread State Update Events + * Used to indicate events that update properties for + * a given thread. These events are passed as part of the + * sched_perfcontrol_state_update_t callout + */ + QUANTUM_EXPIRY = 1, + THREAD_GROUP_UPDATE = 2, + PERFCONTROL_ATTR_UPDATE = 3, + /* + * Context Switch Events + * Used to indicate events that switch from one thread + * to the other. These events are passed as part of the + * sched_perfcontrol_csw_t callout. + */ + CONTEXT_SWITCH = 10, + IDLE = 11 } perfcontrol_event; -/* +/* * Flags for the sched_perfcontrol_csw_t & sched_perfcontrol_state_update_t * callouts. * Currently defined flags are: - * PERFCONTROL_CALLOUT_WAKE_UNSAFE - Flag to indicate its unsafe to - * do a wakeup as part of this callout. If this is set, it + * PERFCONTROL_CALLOUT_WAKE_UNSAFE - Flag to indicate its unsafe to + * do a wakeup as part of this callout. If this is set, it * indicates that the scheduler holds a spinlock which might be needed * in the wakeup path. In that case CLPC should do a thread_call * instead of a direct wakeup to run their workloop thread. @@ -289,29 +289,29 @@ typedef enum perfcontrol_event { /* * Enum to define the perfcontrol class for thread. - * thread_get_perfcontrol_class() takes the thread's - * priority, QoS, urgency etc. into consideration and + * thread_get_perfcontrol_class() takes the thread's + * priority, QoS, urgency etc. into consideration and * produces a value in this enum. */ typedef enum perfcontrol_class { - /* Idle thread */ - PERFCONTROL_CLASS_IDLE = 1, - /* Kernel thread */ - PERFCONTROL_CLASS_KERNEL = 2, - /* Realtime Thread */ - PERFCONTROL_CLASS_REALTIME = 3, - /* Background Thread */ - PERFCONTROL_CLASS_BACKGROUND = 4, - /* Utility Thread */ - PERFCONTROL_CLASS_UTILITY = 5, - /* Non-UI Thread (Default/Legacy) */ - PERFCONTROL_CLASS_NONUI = 6, - /* UI Thread (UI/IN) */ - PERFCONTROL_CLASS_UI = 7, - /* Above UI Thread */ - PERFCONTROL_CLASS_ABOVEUI = 8, + /* Idle thread */ + PERFCONTROL_CLASS_IDLE = 1, + /* Kernel thread */ + PERFCONTROL_CLASS_KERNEL = 2, + /* Realtime Thread */ + PERFCONTROL_CLASS_REALTIME = 3, + /* Background Thread */ + PERFCONTROL_CLASS_BACKGROUND = 4, + /* Utility Thread */ + PERFCONTROL_CLASS_UTILITY = 5, + /* Non-UI Thread (Default/Legacy) */ + PERFCONTROL_CLASS_NONUI = 6, + /* UI Thread (UI/IN) */ + PERFCONTROL_CLASS_UI = 7, + /* Above UI Thread */ + PERFCONTROL_CLASS_ABOVEUI = 8, } perfcontrol_class_t; -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#endif /* _KERN_KERN_TYPES_H_ */ +#endif /* _KERN_KERN_TYPES_H_ */ diff --git a/osfmk/kern/kext_alloc.c b/osfmk/kern/kext_alloc.c index 02ef41fba..3d5702d35 100644 --- a/osfmk/kern/kext_alloc.c +++ b/osfmk/kern/kext_alloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -60,145 +60,143 @@ static mach_vm_offset_t kext_post_boot_base = 0; * kernel's text segment. To ensure this happens, we snag 2GB of kernel VM * as early as possible for kext allocations. */ -void +void kext_alloc_init(void) { #if CONFIG_KEXT_BASEMENT - kern_return_t rval = 0; - kernel_segment_command_t *text = NULL; - kernel_segment_command_t *prelinkTextSegment = NULL; - mach_vm_offset_t text_end, text_start; - mach_vm_size_t text_size; - mach_vm_size_t kext_alloc_size; - - /* Determine the start of the kernel's __TEXT segment and determine the - * lower bound of the allocated submap for kext allocations. - */ - - text = getsegbyname(SEG_TEXT); - text_start = vm_map_trunc_page(text->vmaddr, - VM_MAP_PAGE_MASK(kernel_map)); - text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1); - text_end = vm_map_round_page(text->vmaddr + text->vmsize, - VM_MAP_PAGE_MASK(kernel_map)); - text_size = text_end - text_start; - - kext_alloc_base = KEXT_ALLOC_BASE(text_end); - kext_alloc_size = KEXT_ALLOC_SIZE(text_size); - kext_alloc_max = kext_alloc_base + kext_alloc_size; - - /* Post boot kext allocation will start after the prelinked kexts */ - prelinkTextSegment = getsegbyname("__PRELINK_TEXT"); - if (prelinkTextSegment) { - /* use kext_post_boot_base to start allocations past all the prelinked - * kexts - */ - kext_post_boot_base = - vm_map_round_page(kext_alloc_base + prelinkTextSegment->vmsize, - VM_MAP_PAGE_MASK(kernel_map)); - } - else { - kext_post_boot_base = kext_alloc_base; - } - - /* Allocate the sub block of the kernel map */ - rval = kmem_suballoc(kernel_map, (vm_offset_t *) &kext_alloc_base, - kext_alloc_size, /* pageable */ TRUE, - VM_FLAGS_FIXED|VM_FLAGS_OVERWRITE, - VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_KEXT, - &g_kext_map); - if (rval != KERN_SUCCESS) { - panic("kext_alloc_init: kmem_suballoc failed 0x%x\n", rval); - } - - if ((kext_alloc_base + kext_alloc_size) > kext_alloc_max) { - panic("kext_alloc_init: failed to get first 2GB\n"); - } - - if (kernel_map->min_offset > kext_alloc_base) { - kernel_map->min_offset = kext_alloc_base; - } - - printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n", - VM_KERNEL_UNSLIDE(kext_alloc_base), - VM_KERNEL_UNSLIDE(kext_alloc_max), - VM_KERNEL_UNSLIDE(text->vmaddr), - VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize)); + kern_return_t rval = 0; + kernel_segment_command_t *text = NULL; + kernel_segment_command_t *prelinkTextSegment = NULL; + mach_vm_offset_t text_end, text_start; + mach_vm_size_t text_size; + mach_vm_size_t kext_alloc_size; + + /* Determine the start of the kernel's __TEXT segment and determine the + * lower bound of the allocated submap for kext allocations. + */ + + text = getsegbyname(SEG_TEXT); + text_start = vm_map_trunc_page(text->vmaddr, + VM_MAP_PAGE_MASK(kernel_map)); + text_start &= ~((512ULL * 1024 * 1024 * 1024) - 1); + text_end = vm_map_round_page(text->vmaddr + text->vmsize, + VM_MAP_PAGE_MASK(kernel_map)); + text_size = text_end - text_start; + + kext_alloc_base = KEXT_ALLOC_BASE(text_end); + kext_alloc_size = KEXT_ALLOC_SIZE(text_size); + kext_alloc_max = kext_alloc_base + kext_alloc_size; + + /* Post boot kext allocation will start after the prelinked kexts */ + prelinkTextSegment = getsegbyname("__PRELINK_TEXT"); + if (prelinkTextSegment) { + /* use kext_post_boot_base to start allocations past all the prelinked + * kexts + */ + kext_post_boot_base = + vm_map_round_page(kext_alloc_base + prelinkTextSegment->vmsize, + VM_MAP_PAGE_MASK(kernel_map)); + } else { + kext_post_boot_base = kext_alloc_base; + } + + /* Allocate the sub block of the kernel map */ + rval = kmem_suballoc(kernel_map, (vm_offset_t *) &kext_alloc_base, + kext_alloc_size, /* pageable */ TRUE, + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_KEXT, + &g_kext_map); + if (rval != KERN_SUCCESS) { + panic("kext_alloc_init: kmem_suballoc failed 0x%x\n", rval); + } + + if ((kext_alloc_base + kext_alloc_size) > kext_alloc_max) { + panic("kext_alloc_init: failed to get first 2GB\n"); + } + + if (kernel_map->min_offset > kext_alloc_base) { + kernel_map->min_offset = kext_alloc_base; + } + + printf("kext submap [0x%lx - 0x%lx], kernel text [0x%lx - 0x%lx]\n", + VM_KERNEL_UNSLIDE(kext_alloc_base), + VM_KERNEL_UNSLIDE(kext_alloc_max), + VM_KERNEL_UNSLIDE(text->vmaddr), + VM_KERNEL_UNSLIDE(text->vmaddr + text->vmsize)); #else - g_kext_map = kernel_map; - kext_alloc_base = VM_MIN_KERNEL_ADDRESS; - kext_alloc_max = VM_MAX_KERNEL_ADDRESS; + g_kext_map = kernel_map; + kext_alloc_base = VM_MIN_KERNEL_ADDRESS; + kext_alloc_max = VM_MAX_KERNEL_ADDRESS; #endif /* CONFIG_KEXT_BASEMENT */ } kern_return_t kext_alloc(vm_offset_t *_addr, vm_size_t size, boolean_t fixed) { - kern_return_t rval = 0; + kern_return_t rval = 0; #if CONFIG_KEXT_BASEMENT - mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base; + mach_vm_offset_t addr = (fixed) ? *_addr : kext_post_boot_base; #else - mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base; + mach_vm_offset_t addr = (fixed) ? *_addr : kext_alloc_base; #endif - int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE; - + int flags = (fixed) ? VM_FLAGS_FIXED : VM_FLAGS_ANYWHERE; + #if CONFIG_KEXT_BASEMENT - /* Allocate the kext virtual memory - * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past - * kext_post_boot_base (when possible). mach_vm_allocate will always - * start at 0 into the map no matter what you pass in addr. We want non - * fixed (post boot) kext allocations to start looking for free space - * just past where prelinked kexts have loaded. - */ - rval = mach_vm_map_kernel(g_kext_map, - &addr, - size, - 0, - flags, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_KEXT, - MACH_PORT_NULL, - 0, - TRUE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); - if (rval != KERN_SUCCESS) { - printf("mach_vm_map failed - %d\n", rval); - goto finish; - } + /* Allocate the kext virtual memory + * 10608884 - use mach_vm_map since we want VM_FLAGS_ANYWHERE allocated past + * kext_post_boot_base (when possible). mach_vm_allocate will always + * start at 0 into the map no matter what you pass in addr. We want non + * fixed (post boot) kext allocations to start looking for free space + * just past where prelinked kexts have loaded. + */ + rval = mach_vm_map_kernel(g_kext_map, + &addr, + size, + 0, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_KEXT, + MACH_PORT_NULL, + 0, + TRUE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (rval != KERN_SUCCESS) { + printf("mach_vm_map failed - %d\n", rval); + goto finish; + } #else - rval = mach_vm_allocate_kernel(g_kext_map, &addr, size, flags, VM_KERN_MEMORY_KEXT); - if (rval != KERN_SUCCESS) { - printf("vm_allocate failed - %d\n", rval); - goto finish; - } + rval = mach_vm_allocate_kernel(g_kext_map, &addr, size, flags, VM_KERN_MEMORY_KEXT); + if (rval != KERN_SUCCESS) { + printf("vm_allocate failed - %d\n", rval); + goto finish; + } #endif - /* Check that the memory is reachable by kernel text */ - if ((addr + size) > kext_alloc_max) { - kext_free((vm_offset_t)addr, size); - rval = KERN_INVALID_ADDRESS; - goto finish; - } + /* Check that the memory is reachable by kernel text */ + if ((addr + size) > kext_alloc_max) { + kext_free((vm_offset_t)addr, size); + rval = KERN_INVALID_ADDRESS; + goto finish; + } - *_addr = (vm_offset_t)addr; - rval = KERN_SUCCESS; + *_addr = (vm_offset_t)addr; + rval = KERN_SUCCESS; #if KASAN - kasan_notify_address(addr, size); + kasan_notify_address(addr, size); #endif finish: - return rval; + return rval; } -void +void kext_free(vm_offset_t addr, vm_size_t size) { - kern_return_t rval; + kern_return_t rval; - rval = mach_vm_deallocate(g_kext_map, addr, size); - assert(rval == KERN_SUCCESS); + rval = mach_vm_deallocate(g_kext_map, addr, size); + assert(rval == KERN_SUCCESS); } - diff --git a/osfmk/kern/kext_alloc.h b/osfmk/kern/kext_alloc.h index 0b4c67d15..79bbbb8d3 100644 --- a/osfmk/kern/kext_alloc.h +++ b/osfmk/kern/kext_alloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KEXT_ALLOC_H_ @@ -42,4 +42,3 @@ void kext_free(vm_offset_t addr, vm_size_t size); __END_DECLS #endif /* _KEXT_ALLOC_H_ */ - diff --git a/osfmk/kern/kmod.c b/osfmk/kern/kmod.c index ac38bac82..287a21382 100644 --- a/osfmk/kern/kmod.c +++ b/osfmk/kern/kmod.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,7 +32,7 @@ * Version 2.0. */ /* - * Copyright (c) 1999 Apple Inc. All rights reserved. + * Copyright (c) 1999 Apple Inc. All rights reserved. * * HISTORY * @@ -53,93 +53,93 @@ #include /********************************************************************* -********************************************************************** -*** KMOD INTERFACE DEPRECATED AS OF SNOWLEOPARD *** -********************************************************************** -********************************************************************** -* Except for kmod_get_info(), which continues to work for K32 with -* 32-bit clients, all remaining functions in this module remain -* for symbol linkage or MIG support only, -* and return KERN_NOT_SUPPORTED. -* -* Some kernel-internal portions have been moved to -* libkern/OSKextLib.cpp and libkern/c++/OSKext.cpp. -**********************************************************************/ + ********************************************************************** + *** KMOD INTERFACE DEPRECATED AS OF SNOWLEOPARD *** + ********************************************************************** + ********************************************************************** + * Except for kmod_get_info(), which continues to work for K32 with + * 32-bit clients, all remaining functions in this module remain + * for symbol linkage or MIG support only, + * and return KERN_NOT_SUPPORTED. + * + * Some kernel-internal portions have been moved to + * libkern/OSKextLib.cpp and libkern/c++/OSKext.cpp. + **********************************************************************/ // bsd/sys/proc.h extern void proc_selfname(char * buf, int size); #define NOT_SUPPORTED_USER64() \ do { \ - char procname[64] = "unknown"; \ - proc_selfname(procname, sizeof(procname)); \ - printf("%s is not supported for 64-bit clients (called from %s)\n", \ - __FUNCTION__, procname); \ + char procname[64] = "unknown"; \ + proc_selfname(procname, sizeof(procname)); \ + printf("%s is not supported for 64-bit clients (called from %s)\n", \ + __FUNCTION__, procname); \ } while (0) #define NOT_SUPPORTED_KERNEL() \ do { \ - char procname[64] = "unknown"; \ - proc_selfname(procname, sizeof(procname)); \ - printf("%s is not supported on this kernel architecture (called from %s)\n", \ - __FUNCTION__, procname); \ + char procname[64] = "unknown"; \ + proc_selfname(procname, sizeof(procname)); \ + printf("%s is not supported on this kernel architecture (called from %s)\n", \ + __FUNCTION__, procname); \ } while (0) #define KMOD_MIG_UNUSED __unused /********************************************************************* -* Old MIG routines that are no longer supported. -********************************************************************** -* We have to keep these around for ppc, i386, and x86_64. A 32-bit -* user-space client might call into the 64-bit kernel. Only -* kmod_get_info() retains a functional implementation (ppc/i386). -**********************************************************************/ + * Old MIG routines that are no longer supported. + ********************************************************************** + * We have to keep these around for ppc, i386, and x86_64. A 32-bit + * user-space client might call into the 64-bit kernel. Only + * kmod_get_info() retains a functional implementation (ppc/i386). + **********************************************************************/ kern_return_t kmod_create( - host_priv_t host_priv __unused, - vm_address_t addr __unused, - kmod_t * id __unused) + host_priv_t host_priv __unused, + vm_address_t addr __unused, + kmod_t * id __unused) { - NOT_SUPPORTED_KERNEL(); - return KERN_NOT_SUPPORTED; + NOT_SUPPORTED_KERNEL(); + return KERN_NOT_SUPPORTED; } /********************************************************************/ kern_return_t kmod_destroy( - host_priv_t host_priv __unused, - kmod_t id __unused) + host_priv_t host_priv __unused, + kmod_t id __unused) { - NOT_SUPPORTED_KERNEL(); - return KERN_NOT_SUPPORTED; + NOT_SUPPORTED_KERNEL(); + return KERN_NOT_SUPPORTED; } /********************************************************************/ kern_return_t kmod_control( - host_priv_t host_priv __unused, - kmod_t id __unused, - kmod_control_flavor_t flavor __unused, - kmod_args_t * data __unused, - mach_msg_type_number_t * dataCount __unused) + host_priv_t host_priv __unused, + kmod_t id __unused, + kmod_control_flavor_t flavor __unused, + kmod_args_t * data __unused, + mach_msg_type_number_t * dataCount __unused) { - NOT_SUPPORTED_KERNEL(); - return KERN_NOT_SUPPORTED; + NOT_SUPPORTED_KERNEL(); + return KERN_NOT_SUPPORTED; }; /********************************************************************/ kern_return_t kmod_get_info( - host_t host __unused, - kmod_info_array_t * kmod_list KMOD_MIG_UNUSED, - mach_msg_type_number_t * kmodCount KMOD_MIG_UNUSED); + host_t host __unused, + kmod_info_array_t * kmod_list KMOD_MIG_UNUSED, + mach_msg_type_number_t * kmodCount KMOD_MIG_UNUSED); kern_return_t kmod_get_info( - host_t host __unused, - kmod_info_array_t * kmod_list KMOD_MIG_UNUSED, - mach_msg_type_number_t * kmodCount KMOD_MIG_UNUSED) + host_t host __unused, + kmod_info_array_t * kmod_list KMOD_MIG_UNUSED, + mach_msg_type_number_t * kmodCount KMOD_MIG_UNUSED) { - NOT_SUPPORTED_KERNEL(); - return KERN_NOT_SUPPORTED; + NOT_SUPPORTED_KERNEL(); + return KERN_NOT_SUPPORTED; } diff --git a/osfmk/kern/kpc.h b/osfmk/kern/kpc.h index 3af897184..b59a37b7d 100644 --- a/osfmk/kern/kpc.h +++ b/osfmk/kern/kpc.h @@ -56,20 +56,20 @@ __BEGIN_DECLS #define KPC_ALL_CPUS (1u << 31) /* action id setters/getters */ -#define FIXED_ACTIONID(ctr) (kpc_actionid[(ctr)]) -#define CONFIGURABLE_ACTIONID(ctr) (kpc_actionid[(ctr) + kpc_fixed_count()]) +#define FIXED_ACTIONID(ctr) (kpc_actionid[(ctr)]) +#define CONFIGURABLE_ACTIONID(ctr) (kpc_actionid[(ctr) + kpc_fixed_count()]) /* reload counter setters/getters */ -#define FIXED_RELOAD(ctr) (current_cpu_datap()->cpu_kpc_reload[(ctr)]) -#define FIXED_RELOAD_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_reload[(ctr)]) -#define CONFIGURABLE_RELOAD(ctr) (current_cpu_datap()->cpu_kpc_reload[(ctr) + kpc_fixed_count()]) -#define CONFIGURABLE_RELOAD_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_reload[(ctr) + kpc_fixed_count()]) +#define FIXED_RELOAD(ctr) (current_cpu_datap()->cpu_kpc_reload[(ctr)]) +#define FIXED_RELOAD_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_reload[(ctr)]) +#define CONFIGURABLE_RELOAD(ctr) (current_cpu_datap()->cpu_kpc_reload[(ctr) + kpc_fixed_count()]) +#define CONFIGURABLE_RELOAD_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_reload[(ctr) + kpc_fixed_count()]) /* shadow counter setters/getters */ -#define FIXED_SHADOW(ctr) (current_cpu_datap()->cpu_kpc_shadow[(ctr)]) -#define FIXED_SHADOW_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_shadow[(ctr)]) -#define CONFIGURABLE_SHADOW(ctr) (current_cpu_datap()->cpu_kpc_shadow[(ctr) + kpc_fixed_count()]) -#define CONFIGURABLE_SHADOW_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_shadow[(ctr) + kpc_fixed_count()]) +#define FIXED_SHADOW(ctr) (current_cpu_datap()->cpu_kpc_shadow[(ctr)]) +#define FIXED_SHADOW_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_shadow[(ctr)]) +#define CONFIGURABLE_SHADOW(ctr) (current_cpu_datap()->cpu_kpc_shadow[(ctr) + kpc_fixed_count()]) +#define CONFIGURABLE_SHADOW_CPU(cpu, ctr) (cpu_datap(cpu)->cpu_kpc_shadow[(ctr) + kpc_fixed_count()]) /** * Callback for notification when PMCs are acquired/released by a task. The @@ -120,12 +120,12 @@ extern int kpc_get_pmu_version(void); extern int kpc_set_running(uint32_t classes); /* Read CPU counters */ -extern int kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, - int *curcpu, uint64_t *buf); +extern int kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, + int *curcpu, uint64_t *buf); /* Read shadow counters */ extern int kpc_get_shadow_counters( boolean_t all_cpus, uint32_t classes, - int *curcpu, uint64_t *buf ); + int *curcpu, uint64_t *buf ); /* Read current thread's counter accumulations */ extern int kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf); @@ -203,9 +203,9 @@ extern int kpc_disable_whitelist( int val ); * This is a deprecated function used by old Power Managers, new Power Managers * should use the @em kpc_reserve_pm_counters() function. This function actually * calls @em kpc_reserve_pm_counters() with the following arguments: - * - handler = handler - * - pmc_mask = 0x83 - * - custom_config = TRUE + * - handler = handler + * - pmc_mask = 0x83 + * - custom_config = TRUE * * See @em kpc_reserve_pm_counters() for more details about the return value. */ @@ -233,7 +233,7 @@ extern boolean_t kpc_register_pm_handler(void (*handler)(boolean_t)); * Manager can start using the reserved PMCs. */ extern boolean_t kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, - boolean_t custom_config); + boolean_t custom_config); /* * Unregister the Power Manager as a PMCs user, and release the previously @@ -293,9 +293,9 @@ struct kpc_config_remote { /* handler for mp operations */ struct kpc_running_remote { - uint32_t classes; /* classes to run */ - uint64_t cfg_target_mask; /* configurable counters selected */ - uint64_t cfg_state_mask; /* configurable counters new state */ + uint32_t classes; /* classes to run */ + uint64_t cfg_target_mask; /* configurable counters selected */ + uint64_t cfg_state_mask; /* configurable counters new state */ }; /* handler for mp operations */ @@ -340,13 +340,12 @@ extern uint64_t kpc_get_configurable_pmc_mask(uint32_t classes); /* Interface for kexts to publish a kpc interface */ -struct kpc_driver -{ +struct kpc_driver { uint32_t (*get_classes)(void); uint32_t (*get_running)(void); int (*set_running)(uint32_t classes); - int (*get_cpu_counters)(boolean_t all_cpus, uint32_t classes, - int *curcpu, uint64_t *buf); + int (*get_cpu_counters)(boolean_t all_cpus, uint32_t classes, + int *curcpu, uint64_t *buf); int (*get_curthread_counters)(uint32_t *inoutcount, uint64_t *buf); uint32_t (*get_counter_count)(uint32_t classes); uint32_t (*get_config_count)(uint32_t classes); diff --git a/osfmk/kern/kpc_common.c b/osfmk/kern/kpc_common.c index 53f382ec4..1208bc2b8 100644 --- a/osfmk/kern/kpc_common.c +++ b/osfmk/kern/kpc_common.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -49,7 +49,7 @@ uint32_t kpc_actionid[KPC_MAX_COUNTERS]; #define COUNTERBUF_SIZE_PER_CPU (KPC_MAX_COUNTERS * sizeof(uint64_t)) #define COUNTERBUF_SIZE (machine_info.logical_cpu_max * \ - COUNTERBUF_SIZE_PER_CPU) + COUNTERBUF_SIZE_PER_CPU) /* locks */ static lck_grp_attr_t *kpc_config_lckgrp_attr = NULL; @@ -91,21 +91,25 @@ kpc_register_cpu(struct cpu_data *cpu_data) * Buffers allocated through kpc_counterbuf_alloc() are large enough to * store all PMCs values from all CPUs. This mimics the userspace API. * This does not suit well with the per-CPU kpc buffers, since: - * 1. Buffers don't need to be this large. - * 2. The actual number of CPUs is not known at this point. + * 1. Buffers don't need to be this large. + * 2. The actual number of CPUs is not known at this point. * * CPUs are asked to callout into kpc when being registered, we'll * allocate the memory here. */ - if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) + if ((cpu_data->cpu_kpc_buf[0] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { goto error; - if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) + } + if ((cpu_data->cpu_kpc_buf[1] = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { goto error; - if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) + } + if ((cpu_data->cpu_kpc_shadow = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { goto error; - if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) + } + if ((cpu_data->cpu_kpc_reload = kalloc(COUNTERBUF_SIZE_PER_CPU)) == NULL) { goto error; + } memset(cpu_data->cpu_kpc_buf[0], 0, COUNTERBUF_SIZE_PER_CPU); memset(cpu_data->cpu_kpc_buf[1], 0, COUNTERBUF_SIZE_PER_CPU); @@ -136,7 +140,7 @@ kpc_unregister_cpu(struct cpu_data *cpu_data) kfree(cpu_data->cpu_kpc_shadow, COUNTERBUF_SIZE_PER_CPU); cpu_data->cpu_kpc_shadow = NULL; } - if (cpu_data->cpu_kpc_reload != NULL) { + if (cpu_data->cpu_kpc_reload != NULL) { kfree(cpu_data->cpu_kpc_reload, COUNTERBUF_SIZE_PER_CPU); cpu_data->cpu_kpc_reload = NULL; } @@ -149,10 +153,11 @@ kpc_task_set_forced_all_ctrs(task_t task, boolean_t state) assert(task); task_lock(task); - if (state) + if (state) { task->t_kpc |= TASK_KPC_FORCED_ALL_CTRS; - else + } else { task->t_kpc &= ~TASK_KPC_FORCED_ALL_CTRS; + } task_unlock(task); } @@ -173,12 +178,14 @@ kpc_force_all_ctrs(task_t task, int val) * Refuse to do the operation if the counters are already forced by * another task. */ - if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) + if (kpc_get_force_all_ctrs() && !kpc_task_get_forced_all_ctrs(task)) { return EACCES; + } /* nothing to do if the state is not changing */ - if (old_state == new_state) + if (old_state == new_state) { return 0; + } /* notify the power manager */ if (kpc_pm_handler) { @@ -250,8 +257,9 @@ kpc_controls_counter(uint32_t ctr) assert(ctr < (kpc_fixed_count() + kpc_configurable_count())); - if (ctr < kpc_fixed_count()) + if (ctr < kpc_fixed_count()) { return kpc_controls_fixed_counters(); + } /* * By default kpc manages all PMCs, but if the Power Manager registered @@ -260,8 +268,9 @@ kpc_controls_counter(uint32_t ctr) * force_all_ctrs. */ pmc_mask = (1ULL << (ctr - kpc_fixed_count())); - if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) + if ((pmc_mask & kpc_pm_pmc_mask) && kpc_pm_has_custom_config && !force_all_ctrs) { return FALSE; + } return TRUE; } @@ -272,16 +281,19 @@ kpc_get_running(void) uint64_t pmc_mask = 0; uint32_t cur_state = 0; - if (kpc_is_running_fixed()) + if (kpc_is_running_fixed()) { cur_state |= KPC_CLASS_FIXED_MASK; + } pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); - if (kpc_is_running_configurable(pmc_mask)) + if (kpc_is_running_configurable(pmc_mask)) { cur_state |= KPC_CLASS_CONFIGURABLE_MASK; + } pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); - if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) + if ((pmc_mask != 0) && kpc_is_running_configurable(pmc_mask)) { cur_state |= KPC_CLASS_POWER_MASK; + } return cur_state; } @@ -290,7 +302,7 @@ kpc_get_running(void) int kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) { - int enabled=0, offset=0; + int enabled = 0, offset = 0; uint64_t pmc_mask = 0ULL; assert(buf); @@ -298,8 +310,9 @@ kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) enabled = ml_set_interrupts_enabled(FALSE); /* grab counters and CPU number as close as possible */ - if (curcpu) + if (curcpu) { *curcpu = current_processor()->cpu_id; + } if (classes & KPC_CLASS_FIXED_MASK) { kpc_get_fixed_counters(&buf[offset]); @@ -326,7 +339,7 @@ kpc_get_curcpu_counters(uint32_t classes, int *curcpu, uint64_t *buf) /* generic counter reading function, public api */ int kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, - int *curcpu, uint64_t *buf) + int *curcpu, uint64_t *buf) { assert(buf); @@ -335,15 +348,16 @@ kpc_get_cpu_counters(boolean_t all_cpus, uint32_t classes, * CPUs is architecture dependent. This allows kpc to make the most of * the platform if memory mapped registers is supported. */ - if (all_cpus) + if (all_cpus) { return kpc_get_all_cpus_counters(classes, curcpu, buf); - else + } else { return kpc_get_curcpu_counters(classes, curcpu, buf); + } } int kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, - int *curcpu, uint64_t *buf) + int *curcpu, uint64_t *buf) { int curcpu_id = current_processor()->cpu_id; uint32_t cfg_count = kpc_configurable_count(), offset = 0; @@ -355,13 +369,15 @@ kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, enabled = ml_set_interrupts_enabled(FALSE); curcpu_id = current_processor()->cpu_id; - if (curcpu) + if (curcpu) { *curcpu = curcpu_id; + } for (int cpu = 0; cpu < machine_info.logical_cpu_max; ++cpu) { /* filter if the caller did not request all cpus */ - if (!all_cpus && (cpu != curcpu_id)) + if (!all_cpus && (cpu != curcpu_id)) { continue; + } if (classes & KPC_CLASS_FIXED_MASK) { uint32_t count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); @@ -372,17 +388,21 @@ kpc_get_shadow_counters(boolean_t all_cpus, uint32_t classes, if (classes & KPC_CLASS_CONFIGURABLE_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); - for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) - if ((1ULL << cfg_ctr) & pmc_mask) + for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { + if ((1ULL << cfg_ctr) & pmc_mask) { buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); + } + } } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); - for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) - if ((1ULL << cfg_ctr) & pmc_mask) + for (uint32_t cfg_ctr = 0; cfg_ctr < cfg_count; ++cfg_ctr) { + if ((1ULL << cfg_ctr) & pmc_mask) { buf[offset++] = CONFIGURABLE_SHADOW_CPU(cpu, cfg_ctr); + } + } } } @@ -396,8 +416,9 @@ kpc_get_counter_count(uint32_t classes) { uint32_t count = 0; - if (classes & KPC_CLASS_FIXED_MASK) + if (classes & KPC_CLASS_FIXED_MASK) { count += kpc_fixed_count(); + } if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { uint64_t pmc_msk = kpc_get_configurable_pmc_mask(classes); @@ -413,16 +434,18 @@ kpc_get_config_count(uint32_t classes) { uint32_t count = 0; - if (classes & KPC_CLASS_FIXED_MASK) + if (classes & KPC_CLASS_FIXED_MASK) { count += kpc_fixed_config_count(); + } if (classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) { uint64_t pmc_mask = kpc_get_configurable_pmc_mask(classes); count += kpc_configurable_config_count(pmc_mask); } - if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients()) + if ((classes & KPC_CLASS_RAWPMU_MASK) && !kpc_multiple_clients()) { count += kpc_rawpmu_config_count(); + } return count; } @@ -451,13 +474,11 @@ kpc_get_config(uint32_t classes, kpc_config_t *current_config) count += kpc_get_config_count(KPC_CLASS_POWER_MASK); } - if (classes & KPC_CLASS_RAWPMU_MASK) - { + if (classes & KPC_CLASS_RAWPMU_MASK) { // Client shouldn't ask for config words that aren't available. // Most likely, they'd misinterpret the returned buffer if we // allowed this. - if( kpc_multiple_clients() ) - { + if (kpc_multiple_clients()) { return EPERM; } kpc_get_rawpmu_config(¤t_config[count]); @@ -485,16 +506,16 @@ kpc_set_config(uint32_t classes, kpc_config_t *configv) /* no clients have the right to modify both classes */ if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && - (classes & (KPC_CLASS_POWER_MASK))) - { + (classes & (KPC_CLASS_POWER_MASK))) { return EPERM; } lck_mtx_lock(&kpc_config_lock); /* translate the power class for the machine layer */ - if (classes & KPC_CLASS_POWER_MASK) + if (classes & KPC_CLASS_POWER_MASK) { mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; + } ret = kpc_set_config_arch( &mp_config ); @@ -567,8 +588,7 @@ kpc_set_period(uint32_t classes, uint64_t *val) /* no clients have the right to modify both classes */ if ((classes & (KPC_CLASS_CONFIGURABLE_MASK)) && - (classes & (KPC_CLASS_POWER_MASK))) - { + (classes & (KPC_CLASS_POWER_MASK))) { return EPERM; } @@ -587,8 +607,9 @@ kpc_set_period(uint32_t classes, uint64_t *val) #endif /* translate the power class for the machine layer */ - if (classes & KPC_CLASS_POWER_MASK) + if (classes & KPC_CLASS_POWER_MASK) { mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; + } kprintf("setting period %u\n", classes); kpc_set_period_arch( &mp_config ); @@ -601,7 +622,7 @@ kpc_set_period(uint32_t classes, uint64_t *val) int kpc_get_period(uint32_t classes, uint64_t *val) { - uint32_t count = 0 ; + uint32_t count = 0; uint64_t pmc_mask = 0ULL; assert(val); @@ -611,8 +632,9 @@ kpc_get_period(uint32_t classes, uint64_t *val) if (classes & KPC_CLASS_FIXED_MASK) { /* convert reload values to periods */ count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); - for (uint32_t i = 0; i < count; ++i) + for (uint32_t i = 0; i < count; ++i) { *val++ = kpc_fixed_max() - FIXED_RELOAD(i); + } } if (classes & KPC_CLASS_CONFIGURABLE_MASK) { @@ -620,9 +642,11 @@ kpc_get_period(uint32_t classes, uint64_t *val) /* convert reload values to periods */ count = kpc_configurable_count(); - for (uint32_t i = 0; i < count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < count; ++i) { + if ((1ULL << i) & pmc_mask) { *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); + } + } } if (classes & KPC_CLASS_POWER_MASK) { @@ -630,9 +654,11 @@ kpc_get_period(uint32_t classes, uint64_t *val) /* convert reload values to periods */ count = kpc_configurable_count(); - for (uint32_t i = 0; i < count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < count; ++i) { + if ((1ULL << i) & pmc_mask) { *val++ = kpc_configurable_max() - CONFIGURABLE_RELOAD(i); + } + } } lck_mtx_unlock(&kpc_config_lock); @@ -654,7 +680,7 @@ kpc_set_actionid(uint32_t classes, uint32_t *val) if (classes & KPC_CLASS_FIXED_MASK) { count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); - memcpy(&FIXED_ACTIONID(0), val, count*sizeof(uint32_t)); + memcpy(&FIXED_ACTIONID(0), val, count * sizeof(uint32_t)); val += count; } @@ -662,18 +688,22 @@ kpc_set_actionid(uint32_t classes, uint32_t *val) pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); count = kpc_configurable_count(); - for (uint32_t i = 0; i < count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < count; ++i) { + if ((1ULL << i) & pmc_mask) { CONFIGURABLE_ACTIONID(i) = *val++; + } + } } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); count = kpc_configurable_count(); - for (uint32_t i = 0; i < count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < count; ++i) { + if ((1ULL << i) & pmc_mask) { CONFIGURABLE_ACTIONID(i) = *val++; + } + } } lck_mtx_unlock(&kpc_config_lock); @@ -681,7 +711,8 @@ kpc_set_actionid(uint32_t classes, uint32_t *val) return 0; } -int kpc_get_actionid(uint32_t classes, uint32_t *val) +int +kpc_get_actionid(uint32_t classes, uint32_t *val) { uint32_t count = 0; uint64_t pmc_mask = 0ULL; @@ -692,7 +723,7 @@ int kpc_get_actionid(uint32_t classes, uint32_t *val) if (classes & KPC_CLASS_FIXED_MASK) { count = kpc_get_counter_count(KPC_CLASS_FIXED_MASK); - memcpy(val, &FIXED_ACTIONID(0), count*sizeof(uint32_t)); + memcpy(val, &FIXED_ACTIONID(0), count * sizeof(uint32_t)); val += count; } @@ -700,24 +731,27 @@ int kpc_get_actionid(uint32_t classes, uint32_t *val) pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_CONFIGURABLE_MASK); count = kpc_configurable_count(); - for (uint32_t i = 0; i < count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < count; ++i) { + if ((1ULL << i) & pmc_mask) { *val++ = CONFIGURABLE_ACTIONID(i); + } + } } if (classes & KPC_CLASS_POWER_MASK) { pmc_mask = kpc_get_configurable_pmc_mask(KPC_CLASS_POWER_MASK); count = kpc_configurable_count(); - for (uint32_t i = 0; i < count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < count; ++i) { + if ((1ULL << i) & pmc_mask) { *val++ = CONFIGURABLE_ACTIONID(i); + } + } } lck_mtx_unlock(&kpc_config_lock); return 0; - } int @@ -725,15 +759,16 @@ kpc_set_running(uint32_t classes) { uint32_t all_cfg_classes = KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK; struct kpc_running_remote mp_config = { - .classes = classes, .cfg_target_mask= 0ULL, .cfg_state_mask = 0ULL + .classes = classes, .cfg_target_mask = 0ULL, .cfg_state_mask = 0ULL }; /* target all available PMCs */ mp_config.cfg_target_mask = kpc_get_configurable_pmc_mask(all_cfg_classes); /* translate the power class for the machine layer */ - if (classes & KPC_CLASS_POWER_MASK) + if (classes & KPC_CLASS_POWER_MASK) { mp_config.classes |= KPC_CLASS_CONFIGURABLE_MASK; + } /* generate the state of each configurable PMCs */ mp_config.cfg_state_mask = kpc_get_configurable_pmc_mask(classes); @@ -749,7 +784,7 @@ kpc_register_pm_handler(kpc_pm_handler_t handler) boolean_t kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, - boolean_t custom_config) + boolean_t custom_config) { uint64_t all_mask = (1ULL << kpc_configurable_count()) - 1; uint64_t req_mask = 0ULL; @@ -768,7 +803,7 @@ kpc_reserve_pm_counters(uint64_t pmc_mask, kpc_pm_handler_t handler, kpc_pm_handler = handler; printf("kpc: pm registered pmc_mask=%llx custom_config=%d\n", - req_mask, custom_config); + req_mask, custom_config); /* post-condition */ { @@ -812,8 +847,7 @@ kpc_get_configurable_pmc_mask(uint32_t classes) /* not configurable classes or no configurable counters */ if (((classes & (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK)) == 0) || - (configurable_count == 0)) - { + (configurable_count == 0)) { goto exit; } @@ -821,30 +855,30 @@ kpc_get_configurable_pmc_mask(uint32_t classes) all_cfg_pmcs_mask = (1ULL << configurable_count) - 1; if (classes & KPC_CLASS_CONFIGURABLE_MASK) { - if (force_all_ctrs == TRUE) + if (force_all_ctrs == TRUE) { cfg_mask |= all_cfg_pmcs_mask; - else + } else { cfg_mask |= (~kpc_pm_pmc_mask) & all_cfg_pmcs_mask; + } } /* * The power class exists iff: - * - No tasks acquired all PMCs - * - PM registered and uses kpc to interact with PMCs + * - No tasks acquired all PMCs + * - PM registered and uses kpc to interact with PMCs */ if ((force_all_ctrs == FALSE) && (kpc_pm_handler != NULL) && (kpc_pm_has_custom_config == FALSE) && - (classes & KPC_CLASS_POWER_MASK)) - { + (classes & KPC_CLASS_POWER_MASK)) { pwr_mask |= kpc_pm_pmc_mask & all_cfg_pmcs_mask; } exit: /* post-conditions */ - assert( ((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 ); - assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count() ); - assert( (cfg_mask & pwr_mask) == 0ULL ); + assert(((cfg_mask | pwr_mask) & (~all_cfg_pmcs_mask)) == 0 ); + assert( kpc_popcount(cfg_mask | pwr_mask) <= kpc_configurable_count()); + assert((cfg_mask & pwr_mask) == 0ULL ); return cfg_mask | pwr_mask; } diff --git a/osfmk/kern/kpc_thread.c b/osfmk/kern/kpc_thread.c index 248ea3c56..aa8edd434 100644 --- a/osfmk/kern/kpc_thread.c +++ b/osfmk/kern/kpc_thread.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -83,10 +83,11 @@ kpc_get_thread_counting(void) lck_mtx_unlock(&kpc_thread_lock); - if( kpc_threads_counting_tmp ) + if (kpc_threads_counting_tmp) { return kpc_thread_classes_tmp; - else + } else { return 0; + } } int @@ -98,14 +99,11 @@ kpc_set_thread_counting(uint32_t classes) count = kpc_get_counter_count(classes); - if( (classes == 0) - || (count == 0) ) - { + if ((classes == 0) + || (count == 0)) { /* shut down */ kpc_threads_counting = FALSE; - } - else - { + } else { /* stash the config */ kpc_thread_classes = classes; @@ -117,14 +115,13 @@ kpc_set_thread_counting(uint32_t classes) kpc_threads_counting = TRUE; /* and schedule an AST for this thread... */ - if( !current_thread()->kpc_buf ) - { + if (!current_thread()->kpc_buf) { current_thread()->kperf_flags |= T_KPC_ALLOC; act_set_kperf(current_thread()); } } - kpc_off_cpu_update(); + kpc_off_cpu_update(); lck_mtx_unlock(&kpc_thread_lock); return 0; @@ -141,17 +138,18 @@ kpc_update_thread_counters( thread_t thread ) cpu = current_cpu_datap(); /* 1. stash current PMCs into latest CPU block */ - kpc_get_cpu_counters( FALSE, kpc_thread_classes, - NULL, cpu->cpu_kpc_buf[1] ); + kpc_get_cpu_counters( FALSE, kpc_thread_classes, + NULL, cpu->cpu_kpc_buf[1] ); /* 2. apply delta to old thread */ - if( thread->kpc_buf ) - for( i = 0; i < kpc_thread_classes_count; i++ ) + if (thread->kpc_buf) { + for (i = 0; i < kpc_thread_classes_count; i++) { thread->kpc_buf[i] += cpu->cpu_kpc_buf[1][i] - cpu->cpu_kpc_buf[0][i]; + } + } /* schedule any necessary allocations */ - if( !current_thread()->kpc_buf ) - { + if (!current_thread()->kpc_buf) { current_thread()->kperf_flags |= T_KPC_ALLOC; act_set_kperf(current_thread()); } @@ -170,21 +168,23 @@ kpc_get_curthread_counters(uint32_t *inoutcount, uint64_t *buf) boolean_t enabled; /* buffer too small :( */ - if( *inoutcount < kpc_thread_classes_count ) + if (*inoutcount < kpc_thread_classes_count) { return EINVAL; + } /* copy data and actual size */ - if( !thread->kpc_buf ) + if (!thread->kpc_buf) { return EINVAL; + } enabled = ml_set_interrupts_enabled(FALSE); /* snap latest version of counters for this thread */ - kpc_update_thread_counters( current_thread() ); - + kpc_update_thread_counters( current_thread()); + /* copy out */ - memcpy( buf, thread->kpc_buf, - kpc_thread_classes_count * sizeof(*buf) ); + memcpy( buf, thread->kpc_buf, + kpc_thread_classes_count * sizeof(*buf)); *inoutcount = kpc_thread_classes_count; ml_set_interrupts_enabled(enabled); @@ -210,8 +210,9 @@ void kpc_thread_create(thread_t thread) { /* nothing to do if we're not counting */ - if(!kpc_threads_counting) + if (!kpc_threads_counting) { return; + } /* give the new thread a counterbuf */ thread->kpc_buf = kpc_counterbuf_alloc(); @@ -223,8 +224,9 @@ kpc_thread_destroy(thread_t thread) uint64_t *buf = NULL; /* usual case: no kpc buf, just return */ - if( !thread->kpc_buf ) + if (!thread->kpc_buf) { return; + } /* otherwise, don't leak */ buf = thread->kpc_buf; @@ -237,6 +239,7 @@ void kpc_thread_ast_handler( thread_t thread ) { /* see if we want an alloc */ - if( thread->kperf_flags & T_KPC_ALLOC ) + if (thread->kperf_flags & T_KPC_ALLOC) { thread->kpc_buf = kpc_counterbuf_alloc(); + } } diff --git a/osfmk/kern/ledger.c b/osfmk/kern/ledger.c index 481378d2f..7f7bba139 100644 --- a/osfmk/kern/ledger.c +++ b/osfmk/kern/ledger.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -52,20 +52,20 @@ * Ledger entry flags. Bits in second nibble (masked by 0xF0) are used for * ledger actions (LEDGER_ACTION_BLOCK, etc). */ -#define LF_ENTRY_ACTIVE 0x0001 /* entry is active if set */ -#define LF_WAKE_NEEDED 0x0100 /* one or more threads are asleep */ -#define LF_WAKE_INPROGRESS 0x0200 /* the wait queue is being processed */ -#define LF_REFILL_SCHEDULED 0x0400 /* a refill timer has been set */ -#define LF_REFILL_INPROGRESS 0x0800 /* the ledger is being refilled */ -#define LF_CALLED_BACK 0x1000 /* callback was called for balance in deficit */ -#define LF_WARNED 0x2000 /* callback was called for balance warning */ -#define LF_TRACKING_MAX 0x4000 /* track max balance. Exclusive w.r.t refill */ -#define LF_PANIC_ON_NEGATIVE 0x8000 /* panic if it goes negative */ -#define LF_TRACK_CREDIT_ONLY 0x10000 /* only update "credit" */ +#define LF_ENTRY_ACTIVE 0x0001 /* entry is active if set */ +#define LF_WAKE_NEEDED 0x0100 /* one or more threads are asleep */ +#define LF_WAKE_INPROGRESS 0x0200 /* the wait queue is being processed */ +#define LF_REFILL_SCHEDULED 0x0400 /* a refill timer has been set */ +#define LF_REFILL_INPROGRESS 0x0800 /* the ledger is being refilled */ +#define LF_CALLED_BACK 0x1000 /* callback was called for balance in deficit */ +#define LF_WARNED 0x2000 /* callback was called for balance warning */ +#define LF_TRACKING_MAX 0x4000 /* track max balance. Exclusive w.r.t refill */ +#define LF_PANIC_ON_NEGATIVE 0x8000 /* panic if it goes negative */ +#define LF_TRACK_CREDIT_ONLY 0x10000 /* only update "credit" */ /* Determine whether a ledger entry exists and has been initialized and active */ -#define ENTRY_VALID(l, e) \ - (((l) != NULL) && ((e) >= 0) && ((e) < (l)->l_size) && \ +#define ENTRY_VALID(l, e) \ + (((l) != NULL) && ((e) >= 0) && ((e) < (l)->l_size) && \ (((l)->l_entries[e].le_flags & LF_ENTRY_ACTIVE) == LF_ENTRY_ACTIVE)) #define ASSERT(a) assert(a) @@ -73,26 +73,26 @@ #ifdef LEDGER_DEBUG int ledger_debug = 0; -#define lprintf(a) if (ledger_debug) { \ +#define lprintf(a) if (ledger_debug) { \ printf("%lld ", abstime_to_nsecs(mach_absolute_time() / 1000000)); \ - printf a ; \ + printf a ; \ } #else -#define lprintf(a) +#define lprintf(a) #endif struct ledger_callback { - ledger_callback_t lc_func; - const void *lc_param0; - const void *lc_param1; + ledger_callback_t lc_func; + const void *lc_param0; + const void *lc_param1; }; struct entry_template { - char et_key[LEDGER_NAME_MAX]; - char et_group[LEDGER_NAME_MAX]; - char et_units[LEDGER_NAME_MAX]; - uint32_t et_flags; - struct ledger_callback *et_callback; + char et_key[LEDGER_NAME_MAX]; + char et_group[LEDGER_NAME_MAX]; + char et_units[LEDGER_NAME_MAX]; + uint32_t et_flags; + struct ledger_callback *et_callback; }; lck_grp_t ledger_lck_grp; @@ -108,29 +108,29 @@ lck_grp_t ledger_lck_grp; * to extract a value from the table - i.e., 2 or 3 memory references. */ struct ledger_template { - const char *lt_name; - int lt_refs; - int lt_cnt; - int lt_table_size; - volatile uint32_t lt_inuse; - lck_mtx_t lt_lock; - zone_t lt_zone; - bool lt_initialized; - struct entry_template *lt_entries; + const char *lt_name; + int lt_refs; + int lt_cnt; + int lt_table_size; + volatile uint32_t lt_inuse; + lck_mtx_t lt_lock; + zone_t lt_zone; + bool lt_initialized; + struct entry_template *lt_entries; }; -#define template_lock(template) lck_mtx_lock(&(template)->lt_lock) -#define template_unlock(template) lck_mtx_unlock(&(template)->lt_lock) +#define template_lock(template) lck_mtx_lock(&(template)->lt_lock) +#define template_unlock(template) lck_mtx_unlock(&(template)->lt_lock) -#define TEMPLATE_INUSE(s, t) { \ - s = splsched(); \ - while (OSCompareAndSwap(0, 1, &((t)->lt_inuse))) \ - ; \ +#define TEMPLATE_INUSE(s, t) { \ + s = splsched(); \ + while (OSCompareAndSwap(0, 1, &((t)->lt_inuse))) \ + ; \ } -#define TEMPLATE_IDLE(s, t) { \ - (t)->lt_inuse = 0; \ - splx(s); \ +#define TEMPLATE_IDLE(s, t) { \ + (t)->lt_inuse = 0; \ + splx(s); \ } static int ledger_cnt = 0; @@ -141,7 +141,7 @@ static uint32_t flag_set(volatile uint32_t *flags, uint32_t bit); static uint32_t flag_clear(volatile uint32_t *flags, uint32_t bit); static void ledger_entry_check_new_balance(thread_t thread, ledger_t ledger, - int entry, struct ledger_entry *le); + int entry, struct ledger_entry *le); #if 0 static void @@ -160,7 +160,7 @@ abstime_to_nsecs(uint64_t abstime) uint64_t nsecs; absolutetime_to_nanoseconds(abstime, &nsecs); - return (nsecs); + return nsecs; } static uint64_t @@ -169,13 +169,13 @@ nsecs_to_abstime(uint64_t nsecs) uint64_t abstime; nanoseconds_to_absolutetime(nsecs, &abstime); - return (abstime); + return abstime; } void ledger_init(void) { - lck_grp_init(&ledger_lck_grp, "ledger", LCK_GRP_ATTR_NULL); + lck_grp_init(&ledger_lck_grp, "ledger", LCK_GRP_ATTR_NULL); } ledger_template_t @@ -183,9 +183,10 @@ ledger_template_create(const char *name) { ledger_template_t template; - template = (ledger_template_t)kalloc(sizeof (*template)); - if (template == NULL) - return (NULL); + template = (ledger_template_t)kalloc(sizeof(*template)); + if (template == NULL) { + return NULL; + } template->lt_name = name; template->lt_refs = 1; @@ -196,13 +197,13 @@ ledger_template_create(const char *name) lck_mtx_init(&template->lt_lock, &ledger_lck_grp, LCK_ATTR_NULL); template->lt_entries = (struct entry_template *) - kalloc(sizeof (struct entry_template) * template->lt_table_size); + kalloc(sizeof(struct entry_template) * template->lt_table_size); if (template->lt_entries == NULL) { - kfree(template, sizeof (*template)); + kfree(template, sizeof(*template)); template = NULL; } - return (template); + return template; } void @@ -212,8 +213,9 @@ ledger_template_dereference(ledger_template_t template) template->lt_refs--; template_unlock(template); - if (template->lt_refs == 0) - kfree(template, sizeof (*template)); + if (template->lt_refs == 0) { + kfree(template, sizeof(*template)); + } } /* @@ -229,8 +231,9 @@ ledger_entry_add(ledger_template_t template, const char *key, int idx; struct entry_template *et; - if ((key == NULL) || (strlen(key) >= LEDGER_NAME_MAX) || (template->lt_zone != NULL)) - return (-1); + if ((key == NULL) || (strlen(key) >= LEDGER_NAME_MAX) || (template->lt_zone != NULL)) { + return -1; + } template_lock(template); @@ -276,7 +279,7 @@ ledger_entry_add(ledger_template_t template, const char *key, idx = template->lt_cnt++; template_unlock(template); - return (idx); + return idx; } @@ -285,14 +288,15 @@ ledger_entry_setactive(ledger_t ledger, int entry) { struct ledger_entry *le; - if ((ledger == NULL) || (entry < 0) || (entry >= ledger->l_size)) - return (KERN_INVALID_ARGUMENT); + if ((ledger == NULL) || (entry < 0) || (entry >= ledger->l_size)) { + return KERN_INVALID_ARGUMENT; + } le = &ledger->l_entries[entry]; if ((le->le_flags & LF_ENTRY_ACTIVE) == 0) { flag_set(&le->le_flags, LF_ENTRY_ACTIVE); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -302,16 +306,19 @@ ledger_key_lookup(ledger_template_t template, const char *key) int idx; template_lock(template); - for (idx = 0; idx < template->lt_cnt; idx++) + for (idx = 0; idx < template->lt_cnt; idx++) { if (template->lt_entries != NULL && - (strcmp(key, template->lt_entries[idx].et_key) == 0)) + (strcmp(key, template->lt_entries[idx].et_key) == 0)) { break; + } + } - if (idx >= template->lt_cnt) + if (idx >= template->lt_cnt) { idx = -1; + } template_unlock(template); - return (idx); + return idx; } /* @@ -326,8 +333,8 @@ ledger_template_complete(ledger_template_t template) size_t ledger_size; ledger_size = sizeof(struct ledger) + (template->lt_cnt * sizeof(struct ledger_entry)); template->lt_zone = zinit(ledger_size, CONFIG_TASK_MAX * ledger_size, - ledger_size, - template->lt_name); + ledger_size, + template->lt_name); template->lt_initialized = true; } @@ -389,36 +396,38 @@ ledger_instantiate(ledger_template_t template, int entry_type) le->le_flags = et->et_flags; /* make entry inactive by removing active bit */ - if (entry_type == LEDGER_CREATE_INACTIVE_ENTRIES) + if (entry_type == LEDGER_CREATE_INACTIVE_ENTRIES) { flag_clear(&le->le_flags, LF_ENTRY_ACTIVE); + } /* * If template has a callback, this entry is opted-in, * by default. */ - if (et->et_callback != NULL) + if (et->et_callback != NULL) { flag_set(&le->le_flags, LEDGER_ACTION_CALLBACK); + } le->le_credit = 0; le->le_debit = 0; le->le_limit = LEDGER_LIMIT_INFINITY; - le->le_warn_level = LEDGER_LIMIT_INFINITY; + le->le_warn_level = LEDGER_LIMIT_INFINITY; le->_le.le_refill.le_refill_period = 0; le->_le.le_refill.le_last_refill = 0; } template_unlock(template); - return (ledger); + return ledger; } static uint32_t flag_set(volatile uint32_t *flags, uint32_t bit) { - return (OSBitOrAtomic(bit, flags)); + return OSBitOrAtomic(bit, flags); } static uint32_t flag_clear(volatile uint32_t *flags, uint32_t bit) { - return (OSBitAndAtomic(~bit, flags)); + return OSBitAndAtomic(~bit, flags); } /* @@ -427,17 +436,19 @@ flag_clear(volatile uint32_t *flags, uint32_t bit) kern_return_t ledger_reference(ledger_t ledger) { - if (!LEDGER_VALID(ledger)) - return (KERN_INVALID_ARGUMENT); + if (!LEDGER_VALID(ledger)) { + return KERN_INVALID_ARGUMENT; + } os_ref_retain(&ledger->l_refs); - return (KERN_SUCCESS); + return KERN_SUCCESS; } int ledger_reference_count(ledger_t ledger) { - if (!LEDGER_VALID(ledger)) - return (-1); + if (!LEDGER_VALID(ledger)) { + return -1; + } return os_ref_get_count(&ledger->l_refs); } @@ -449,8 +460,9 @@ ledger_reference_count(ledger_t ledger) kern_return_t ledger_dereference(ledger_t ledger) { - if (!LEDGER_VALID(ledger)) - return (KERN_INVALID_ARGUMENT); + if (!LEDGER_VALID(ledger)) { + return KERN_INVALID_ARGUMENT; + } if (os_ref_release(&ledger->l_refs) == 0) { if (ledger->l_template->lt_zone) { @@ -460,7 +472,7 @@ ledger_dereference(ledger_t ledger) } } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -482,9 +494,10 @@ warn_level_exceeded(struct ledger_entry *le) * use positive limits. */ balance = le->le_credit - le->le_debit; - if ((le->le_warn_level != LEDGER_LIMIT_INFINITY) && (balance > le->le_warn_level)) - return (1); - return (0); + if ((le->le_warn_level != LEDGER_LIMIT_INFINITY) && (balance > le->le_warn_level)) { + return 1; + } + return 0; } /* @@ -502,12 +515,14 @@ limit_exceeded(struct ledger_entry *le) } balance = le->le_credit - le->le_debit; - if ((le->le_limit <= 0) && (balance < le->le_limit)) - return (1); + if ((le->le_limit <= 0) && (balance < le->le_limit)) { + return 1; + } - if ((le->le_limit > 0) && (balance > le->le_limit)) - return (1); - return (0); + if ((le->le_limit > 0) && (balance > le->le_limit)) { + return 1; + } + return 0; } static inline struct ledger_callback * @@ -520,7 +535,7 @@ entry_get_callback(ledger_t ledger, int entry) callback = ledger->l_template->lt_entries[entry].et_callback; TEMPLATE_IDLE(s, ledger->l_template); - return (callback); + return callback; } /* @@ -605,16 +620,18 @@ ledger_refill(uint64_t now, ledger_t ledger, int entry) * OK, it's been a long time. Do a divide to figure out * how long. */ - if (elapsed > 0) + if (elapsed > 0) { periods = (now - le->_le.le_refill.le_last_refill) / period; + } balance = le->le_credit - le->le_debit; due = periods * le->le_limit; - if (balance - due < 0) + if (balance - due < 0) { due = balance; + } - assertf(due >= 0,"now=%llu, ledger=%p, entry=%d, balance=%lld, due=%lld", now, ledger, entry, balance, due); + assertf(due >= 0, "now=%llu, ledger=%p, entry=%d, balance=%lld, due=%lld", now, ledger, entry, balance, due); OSAddAtomic64(due, &le->le_debit); @@ -625,26 +642,28 @@ ledger_refill(uint64_t now, ledger_t ledger, int entry) * Otherwise set it to the time at which it last should have been * fully refilled. */ - if (balance == due) + if (balance == due) { le->_le.le_refill.le_last_refill = now; - else + } else { le->_le.le_refill.le_last_refill += (le->_le.le_refill.le_refill_period * periods); + } flag_clear(&le->le_flags, LF_REFILL_INPROGRESS); lprintf(("Refill %lld %lld->%lld\n", periods, balance, balance - due)); - if (!limit_exceeded(le)) + if (!limit_exceeded(le)) { ledger_limit_entry_wakeup(le); + } } void ledger_entry_check_new_balance(thread_t thread, ledger_t ledger, - int entry, struct ledger_entry *le) + int entry, struct ledger_entry *le) { if (le->le_flags & LF_TRACKING_MAX) { ledger_amount_t balance = le->le_credit - le->le_debit; - if (balance > le->_le._le_max.le_lifetime_max){ + if (balance > le->_le._le_max.le_lifetime_max) { le->_le._le_max.le_lifetime_max = balance; } @@ -660,8 +679,9 @@ ledger_entry_check_new_balance(thread_t thread, ledger_t ledger, assert(!(le->le_flags & LF_TRACKING_MAX)); uint64_t now = mach_absolute_time(); - if ((now - le->_le.le_refill.le_last_refill) > le->_le.le_refill.le_refill_period) + if ((now - le->_le.le_refill.le_last_refill) > le->_le.le_refill.le_refill_period) { ledger_refill(now, ledger, entry); + } } if (limit_exceeded(le)) { @@ -687,8 +707,9 @@ ledger_entry_check_new_balance(thread_t thread, ledger_t ledger, * If there are any threads blocked on this entry, now would * be a good time to wake them up. */ - if (le->le_flags & LF_WAKE_NEEDED) + if (le->le_flags & LF_WAKE_NEEDED) { ledger_limit_entry_wakeup(le); + } if (le->le_flags & LEDGER_ACTION_CALLBACK) { /* @@ -696,24 +717,24 @@ ledger_entry_check_new_balance(thread_t thread, ledger_t ledger, * the ledger's balance crosses into or out of the warning * level. */ - if (warn_level_exceeded(le)) { - /* - * This ledger's balance is above the warning level. - */ - if ((le->le_flags & LF_WARNED) == 0) { - /* - * If we are above the warning level and - * have not yet invoked the callback, - * set the AST so it can be done before returning - * to userland. - */ + if (warn_level_exceeded(le)) { + /* + * This ledger's balance is above the warning level. + */ + if ((le->le_flags & LF_WARNED) == 0) { + /* + * If we are above the warning level and + * have not yet invoked the callback, + * set the AST so it can be done before returning + * to userland. + */ act_set_astledger_async(thread); } } else { /* * This ledger's balance is below the warning level. */ - if (le->le_flags & LF_WARNED) { + if (le->le_flags & LF_WARNED) { /* * If we are below the warning level and * the LF_WARNED flag is still set, we need @@ -730,10 +751,10 @@ ledger_entry_check_new_balance(thread_t thread, ledger_t ledger, if ((le->le_flags & LF_PANIC_ON_NEGATIVE) && (le->le_credit < le->le_debit)) { panic("ledger_entry_check_new_balance(%p,%d): negative ledger %p credit:%lld debit:%lld balance:%lld\n", - ledger, entry, le, - le->le_credit, - le->le_debit, - le->le_credit - le->le_debit); + ledger, entry, le, + le->le_credit, + le->le_debit, + le->le_credit - le->le_debit); } } @@ -755,11 +776,13 @@ ledger_credit_thread(thread_t thread, ledger_t ledger, int entry, ledger_amount_ ledger_amount_t old, new; struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry) || (amount < 0)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry) || (amount < 0)) { + return KERN_INVALID_VALUE; + } - if (amount == 0) - return (KERN_SUCCESS); + if (amount == 0) { + return KERN_SUCCESS; + } le = &ledger->l_entries[entry]; @@ -771,7 +794,7 @@ ledger_credit_thread(thread_t thread, ledger_t ledger, int entry, ledger_amount_ ledger_entry_check_new_balance(thread, ledger, entry, le); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -811,7 +834,7 @@ ledger_rollup(ledger_t to_ledger, ledger_t from_ledger) ledger_rollup_entry(to_ledger, from_ledger, i); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* Add one ledger entry value to another. @@ -829,15 +852,15 @@ ledger_rollup_entry(ledger_t to_ledger, ledger_t from_ledger, int entry) from_le = &from_ledger->l_entries[entry]; to_le = &to_ledger->l_entries[entry]; OSAddAtomic64(from_le->le_credit, &to_le->le_credit); - OSAddAtomic64(from_le->le_debit, &to_le->le_debit); + OSAddAtomic64(from_le->le_debit, &to_le->le_debit); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* * Zero the balance of a ledger by adding to its credit or debit, whichever is smaller. - * Note that some clients of ledgers (notably, task wakeup statistics) require that + * Note that some clients of ledgers (notably, task wakeup statistics) require that * le_credit only ever increase as a function of ledger_credit(). */ kern_return_t @@ -846,8 +869,9 @@ ledger_zero_balance(ledger_t ledger, int entry) struct ledger_entry *le; ledger_amount_t debit, credit; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } le = &ledger->l_entries[entry]; @@ -862,16 +886,18 @@ top: } lprintf(("%p zeroed %lld->%lld\n", current_thread(), le->le_credit, 0)); } else if (credit > debit) { - if (!OSCompareAndSwap64(debit, credit, &le->le_debit)) + if (!OSCompareAndSwap64(debit, credit, &le->le_debit)) { goto top; + } lprintf(("%p zeroed %lld->%lld\n", current_thread(), le->le_debit, le->le_credit)); } else if (credit < debit) { - if (!OSCompareAndSwap64(credit, debit, &le->le_credit)) + if (!OSCompareAndSwap64(credit, debit, &le->le_credit)) { goto top; + } lprintf(("%p zeroed %lld->%lld\n", current_thread(), le->le_credit, le->le_debit)); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -879,15 +905,16 @@ ledger_get_limit(ledger_t ledger, int entry, ledger_amount_t *limit) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } le = &ledger->l_entries[entry]; *limit = le->le_limit; lprintf(("ledger_get_limit: %lld\n", *limit)); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -895,17 +922,18 @@ ledger_get_limit(ledger_t ledger, int entry, ledger_amount_t *limit) * current balance, so the change doesn't affect the thread until the * next refill. * - * warn_level: If non-zero, causes the callback to be invoked when + * warn_level: If non-zero, causes the callback to be invoked when * the balance exceeds this level. Specified as a percentage [of the limit]. */ kern_return_t ledger_set_limit(ledger_t ledger, int entry, ledger_amount_t limit, - uint8_t warn_level_percentage) + uint8_t warn_level_percentage) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } lprintf(("ledger_set_limit: %lld\n", limit)); le = &ledger->l_entries[entry]; @@ -925,7 +953,7 @@ ledger_set_limit(ledger_t ledger, int entry, ledger_amount_t limit, le->_le.le_refill.le_last_refill = 0; } flag_clear(&le->le_flags, LF_CALLED_BACK); - flag_clear(&le->le_flags, LF_WARNED); + flag_clear(&le->le_flags, LF_WARNED); ledger_limit_entry_wakeup(le); if (warn_level_percentage != 0) { @@ -937,48 +965,48 @@ ledger_set_limit(ledger_t ledger, int entry, ledger_amount_t limit, le->le_warn_level = LEDGER_LIMIT_INFINITY; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } #if CONFIG_LEDGER_INTERVAL_MAX kern_return_t ledger_get_interval_max(ledger_t ledger, int entry, - ledger_amount_t *max_interval_balance, int reset) + ledger_amount_t *max_interval_balance, int reset) { struct ledger_entry *le; le = &ledger->l_entries[entry]; if (!ENTRY_VALID(ledger, entry) || !(le->le_flags & LF_TRACKING_MAX)) { - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } *max_interval_balance = le->_le._le_max.le_interval_max; lprintf(("ledger_get_interval_max: %lld%s\n", *max_interval_balance, - (reset) ? " --> 0" : "")); + (reset) ? " --> 0" : "")); if (reset) { le->_le._le_max.le_interval_max = 0; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } #endif /* CONFIG_LEDGER_INTERVAL_MAX */ kern_return_t ledger_get_lifetime_max(ledger_t ledger, int entry, - ledger_amount_t *max_lifetime_balance) + ledger_amount_t *max_lifetime_balance) { struct ledger_entry *le; le = &ledger->l_entries[entry]; if (!ENTRY_VALID(ledger, entry) || !(le->le_flags & LF_TRACKING_MAX)) { - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } *max_lifetime_balance = le->_le._le_max.le_lifetime_max; lprintf(("ledger_get_lifetime_max: %lld\n", *max_lifetime_balance)); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -986,24 +1014,24 @@ ledger_get_lifetime_max(ledger_t ledger, int entry, */ kern_return_t ledger_track_maximum(ledger_template_t template, int entry, - __unused int period_in_secs) + __unused int period_in_secs) { template_lock(template); if ((entry < 0) || (entry >= template->lt_cnt)) { - template_unlock(template); - return (KERN_INVALID_VALUE); + template_unlock(template); + return KERN_INVALID_VALUE; } /* Refill is incompatible with max tracking. */ if (template->lt_entries[entry].et_flags & LF_REFILL_SCHEDULED) { - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } template->lt_entries[entry].et_flags |= LF_TRACKING_MAX; template_unlock(template); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1013,14 +1041,14 @@ ledger_panic_on_negative(ledger_template_t template, int entry) if ((entry < 0) || (entry >= template->lt_cnt)) { template_unlock(template); - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } template->lt_entries[entry].et_flags |= LF_PANIC_ON_NEGATIVE; template_unlock(template); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1030,14 +1058,14 @@ ledger_track_credit_only(ledger_template_t template, int entry) if ((entry < 0) || (entry >= template->lt_cnt)) { template_unlock(template); - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } template->lt_entries[entry].et_flags |= LF_TRACK_CREDIT_ONLY; template_unlock(template); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1045,16 +1073,17 @@ ledger_track_credit_only(ledger_template_t template, int entry) */ kern_return_t ledger_set_callback(ledger_template_t template, int entry, - ledger_callback_t func, const void *param0, const void *param1) + ledger_callback_t func, const void *param0, const void *param1) { struct entry_template *et; struct ledger_callback *old_cb, *new_cb; - if ((entry < 0) || (entry >= template->lt_cnt)) - return (KERN_INVALID_VALUE); + if ((entry < 0) || (entry >= template->lt_cnt)) { + return KERN_INVALID_VALUE; + } if (func) { - new_cb = (struct ledger_callback *)kalloc(sizeof (*new_cb)); + new_cb = (struct ledger_callback *)kalloc(sizeof(*new_cb)); new_cb->lc_func = func; new_cb->lc_param0 = param0; new_cb->lc_param1 = param1; @@ -1067,10 +1096,11 @@ ledger_set_callback(ledger_template_t template, int entry, old_cb = et->et_callback; et->et_callback = new_cb; template_unlock(template); - if (old_cb) - kfree(old_cb, sizeof (*old_cb)); + if (old_cb) { + kfree(old_cb, sizeof(*old_cb)); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1083,8 +1113,9 @@ ledger_set_callback(ledger_template_t template, int entry, kern_return_t ledger_disable_callback(ledger_t ledger, int entry) { - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } /* * le_warn_level is used to indicate *if* this ledger has a warning configured, @@ -1094,7 +1125,7 @@ ledger_disable_callback(ledger_t ledger, int entry) */ ledger->l_entries[entry].le_warn_level = LEDGER_LIMIT_INFINITY; flag_clear(&ledger->l_entries[entry].le_flags, LEDGER_ACTION_CALLBACK); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1107,13 +1138,14 @@ ledger_disable_callback(ledger_t ledger, int entry) kern_return_t ledger_enable_callback(ledger_t ledger, int entry) { - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } assert(entry_get_callback(ledger, entry) != NULL); flag_set(&ledger->l_entries[entry].le_flags, LEDGER_ACTION_CALLBACK); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1126,13 +1158,14 @@ ledger_get_period(ledger_t ledger, int entry, uint64_t *period) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } le = &ledger->l_entries[entry]; *period = abstime_to_nsecs(le->_le.le_refill.le_refill_period); lprintf(("ledger_get_period: %llx\n", *period)); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1144,8 +1177,9 @@ ledger_set_period(ledger_t ledger, int entry, uint64_t period) struct ledger_entry *le; lprintf(("ledger_set_period: %llx\n", period)); - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } le = &ledger->l_entries[entry]; @@ -1159,7 +1193,7 @@ ledger_set_period(ledger_t ledger, int entry, uint64_t period) /* * Refill is incompatible with rolling max tracking. */ - return (KERN_INVALID_VALUE); + return KERN_INVALID_VALUE; } le->_le.le_refill.le_refill_period = nsecs_to_abstime(period); @@ -1175,7 +1209,7 @@ ledger_set_period(ledger_t ledger, int entry, uint64_t period) flag_set(&le->le_flags, LF_REFILL_SCHEDULED); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1186,36 +1220,39 @@ ledger_disable_refill(ledger_t ledger, int entry) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } le = &ledger->l_entries[entry]; flag_clear(&le->le_flags, LF_REFILL_SCHEDULED); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t ledger_get_actions(ledger_t ledger, int entry, int *actions) { - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } *actions = ledger->l_entries[entry].le_flags & LEDGER_ACTION_MASK; - lprintf(("ledger_get_actions: %#x\n", *actions)); - return (KERN_SUCCESS); + lprintf(("ledger_get_actions: %#x\n", *actions)); + return KERN_SUCCESS; } kern_return_t ledger_set_action(ledger_t ledger, int entry, int action) { lprintf(("ledger_set_action: %#x\n", action)); - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_VALUE); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_VALUE; + } flag_set(&ledger->l_entries[entry].le_flags, action); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1224,11 +1261,13 @@ ledger_debit_thread(thread_t thread, ledger_t ledger, int entry, ledger_amount_t struct ledger_entry *le; ledger_amount_t old, new; - if (!ENTRY_VALID(ledger, entry) || (amount < 0)) - return (KERN_INVALID_ARGUMENT); + if (!ENTRY_VALID(ledger, entry) || (amount < 0)) { + return KERN_INVALID_ARGUMENT; + } - if (amount == 0) - return (KERN_SUCCESS); + if (amount == 0) { + return KERN_SUCCESS; + } le = &ledger->l_entries[entry]; @@ -1246,7 +1285,7 @@ ledger_debit_thread(thread_t thread, ledger_t ledger, int entry, ledger_amount_t ledger_entry_check_new_balance(thread, ledger, entry, le); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1264,13 +1303,13 @@ ledger_debit_nocheck(ledger_t ledger, int entry, ledger_amount_t amount) void ledger_ast(thread_t thread) { - struct ledger *l = thread->t_ledger; - struct ledger *thl; - uint32_t block; - uint64_t now; - uint8_t task_flags; - uint8_t task_percentage; - uint64_t task_interval; + struct ledger *l = thread->t_ledger; + struct ledger *thl; + uint32_t block; + uint64_t now; + uint8_t task_flags; + uint8_t task_percentage; + uint64_t task_interval; kern_return_t ret; task_t task = thread->task; @@ -1297,9 +1336,9 @@ top: */ if (((task_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) != 0) && ((thread->options & TH_OPT_PRVT_CPULIMIT) == 0)) { - uint8_t percentage; + uint8_t percentage; uint64_t interval; - int action; + int action; thread_get_cpulimit(&action, &percentage, &interval); @@ -1313,7 +1352,7 @@ top: assert((thread->options & TH_OPT_PROC_CPULIMIT) != 0); } } else if (((task_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) && - (thread->options & TH_OPT_PROC_CPULIMIT)) { + (thread->options & TH_OPT_PROC_CPULIMIT)) { assert((thread->options & TH_OPT_PRVT_CPULIMIT) == 0); /* @@ -1327,9 +1366,10 @@ top: /* * If the task or thread is being terminated, let's just get on with it */ - if ((l == NULL) || !task->active || task->halting || !thread->active) + if ((l == NULL) || !task->active || task->halting || !thread->active) { return; - + } + /* * Examine all entries in deficit to see which might be eligble for * an automatic refill, which require callbacks to be issued, and @@ -1357,12 +1397,14 @@ top: if (block) { if (LEDGER_VALID(thl)) { ret = ledger_perform_blocking(thl); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { goto top; + } } ret = ledger_perform_blocking(l); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { goto top; + } } /* block */ } @@ -1413,28 +1455,32 @@ ledger_check_needblock(ledger_t l, uint64_t now) if ((le->_le.le_refill.le_last_refill + le->_le.le_refill.le_refill_period) > now) { ledger_refill(now, l, i); - if (limit_exceeded(le) == FALSE) + if (limit_exceeded(le) == FALSE) { continue; + } } } - if (le->le_flags & LEDGER_ACTION_BLOCK) + if (le->le_flags & LEDGER_ACTION_BLOCK) { block = 1; - if ((le->le_flags & LEDGER_ACTION_CALLBACK) == 0) + } + if ((le->le_flags & LEDGER_ACTION_CALLBACK) == 0) { continue; + } - /* - * If the LEDGER_ACTION_CALLBACK flag is on, we expect there to - * be a registered callback. - */ + /* + * If the LEDGER_ACTION_CALLBACK flag is on, we expect there to + * be a registered callback. + */ assert(lc != NULL); flags = flag_set(&le->le_flags, LF_CALLED_BACK); /* Callback has already been called */ - if (flags & LF_CALLED_BACK) + if (flags & LF_CALLED_BACK) { continue; + } lc->lc_func(FALSE, lc->lc_param0, lc->lc_param1); } - return(block); + return block; } @@ -1449,24 +1495,27 @@ ledger_perform_blocking(ledger_t l) for (i = 0; i < l->l_size; i++) { le = &l->l_entries[i]; if ((!limit_exceeded(le)) || - ((le->le_flags & LEDGER_ACTION_BLOCK) == 0)) + ((le->le_flags & LEDGER_ACTION_BLOCK) == 0)) { continue; + } assert(!(le->le_flags & LF_TRACKING_MAX)); /* Prepare to sleep until the resource is refilled */ ret = assert_wait_deadline(le, THREAD_INTERRUPTIBLE, le->_le.le_refill.le_last_refill + le->_le.le_refill.le_refill_period); - if (ret != THREAD_WAITING) - return(KERN_SUCCESS); + if (ret != THREAD_WAITING) { + return KERN_SUCCESS; + } /* Mark that somebody is waiting on this entry */ flag_set(&le->le_flags, LF_WAKE_NEEDED); ret = thread_block_reason(THREAD_CONTINUE_NULL, NULL, AST_LEDGER); - if (ret != THREAD_AWAKENED) - return(KERN_SUCCESS); + if (ret != THREAD_AWAKENED) { + return KERN_SUCCESS; + } /* * The world may have changed while we were asleep. @@ -1474,9 +1523,9 @@ ledger_perform_blocking(ledger_t l) * deficit. Or maybe we're supposed to die now. * Go back to the top and reevaluate. */ - return(KERN_FAILURE); + return KERN_FAILURE; } - return(KERN_SUCCESS); + return KERN_SUCCESS; } @@ -1486,15 +1535,16 @@ ledger_get_entries(ledger_t ledger, int entry, ledger_amount_t *credit, { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_ARGUMENT); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_ARGUMENT; + } le = &ledger->l_entries[entry]; *credit = le->le_credit; *debit = le->le_debit; - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1502,14 +1552,15 @@ ledger_reset_callback_state(ledger_t ledger, int entry) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_ARGUMENT); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_ARGUMENT; + } le = &ledger->l_entries[entry]; flag_clear(&le->le_flags, LF_CALLED_BACK); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1517,14 +1568,15 @@ ledger_disable_panic_on_negative(ledger_t ledger, int entry) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_ARGUMENT); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_ARGUMENT; + } le = &ledger->l_entries[entry]; flag_clear(&le->le_flags, LF_PANIC_ON_NEGATIVE); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1532,8 +1584,9 @@ ledger_get_panic_on_negative(ledger_t ledger, int entry, int *panic_on_negative) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_ARGUMENT); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_ARGUMENT; + } le = &ledger->l_entries[entry]; @@ -1543,7 +1596,7 @@ ledger_get_panic_on_negative(ledger_t ledger, int entry, int *panic_on_negative) *panic_on_negative = FALSE; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1551,8 +1604,9 @@ ledger_get_balance(ledger_t ledger, int entry, ledger_amount_t *balance) { struct ledger_entry *le; - if (!ENTRY_VALID(ledger, entry)) - return (KERN_INVALID_ARGUMENT); + if (!ENTRY_VALID(ledger, entry)) { + return KERN_INVALID_ARGUMENT; + } le = &ledger->l_entries[entry]; @@ -1564,7 +1618,7 @@ ledger_get_balance(ledger_t ledger, int entry, ledger_amount_t *balance) *balance = le->le_credit - le->le_debit; - return (KERN_SUCCESS); + return KERN_SUCCESS; } int @@ -1580,21 +1634,24 @@ ledger_template_info(void **buf, int *len) * caller's as the source. */ l = current_task()->ledger; - if ((*len < 0) || (l == NULL)) - return (EINVAL); - - if (*len > l->l_size) - *len = l->l_size; - lti = kalloc((*len) * sizeof (struct ledger_template_info)); - if (lti == NULL) - return (ENOMEM); + if ((*len < 0) || (l == NULL)) { + return EINVAL; + } + + if (*len > l->l_size) { + *len = l->l_size; + } + lti = kalloc((*len) * sizeof(struct ledger_template_info)); + if (lti == NULL) { + return ENOMEM; + } *buf = lti; template_lock(l->l_template); et = l->l_template->lt_entries; for (i = 0; i < *len; i++) { - memset(lti, 0, sizeof (*lti)); + memset(lti, 0, sizeof(*lti)); strlcpy(lti->lti_name, et->et_key, LEDGER_NAME_MAX); strlcpy(lti->lti_group, et->et_group, LEDGER_NAME_MAX); strlcpy(lti->lti_units, et->et_units, LEDGER_NAME_MAX); @@ -1603,25 +1660,25 @@ ledger_template_info(void **buf, int *len) } template_unlock(l->l_template); - return (0); + return 0; } static void ledger_fill_entry_info(struct ledger_entry *le, - struct ledger_entry_info *lei, - uint64_t now) + struct ledger_entry_info *lei, + uint64_t now) { - assert(le != NULL); + assert(le != NULL); assert(lei != NULL); - memset(lei, 0, sizeof (*lei)); + memset(lei, 0, sizeof(*lei)); lei->lei_limit = le->le_limit; lei->lei_credit = le->le_credit; lei->lei_debit = le->le_debit; lei->lei_balance = lei->lei_credit - lei->lei_debit; - lei->lei_refill_period = (le->le_flags & LF_REFILL_SCHEDULED) ? - abstime_to_nsecs(le->_le.le_refill.le_refill_period) : 0; + lei->lei_refill_period = (le->le_flags & LF_REFILL_SCHEDULED) ? + abstime_to_nsecs(le->_le.le_refill.le_refill_period) : 0; lei->lei_last_refill = abstime_to_nsecs(now - le->_le.le_refill.le_last_refill); } @@ -1634,14 +1691,17 @@ ledger_get_task_entry_info_multiple(task_t task, void **buf, int *len) int i; ledger_t l; - if ((*len < 0) || ((l = task->ledger) == NULL)) - return (EINVAL); + if ((*len < 0) || ((l = task->ledger) == NULL)) { + return EINVAL; + } - if (*len > l->l_size) - *len = l->l_size; - lei = kalloc((*len) * sizeof (struct ledger_entry_info)); - if (lei == NULL) - return (ENOMEM); + if (*len > l->l_size) { + *len = l->l_size; + } + lei = kalloc((*len) * sizeof(struct ledger_entry_info)); + if (lei == NULL) { + return ENOMEM; + } *buf = lei; le = l->l_entries; @@ -1652,13 +1712,13 @@ ledger_get_task_entry_info_multiple(task_t task, void **buf, int *len) lei++; } - return (0); + return 0; } void ledger_get_entry_info(ledger_t ledger, - int entry, - struct ledger_entry_info *lei) + int entry, + struct ledger_entry_info *lei) { uint64_t now = mach_absolute_time(); @@ -1676,15 +1736,16 @@ ledger_info(task_t task, struct ledger_info *info) { ledger_t l; - if ((l = task->ledger) == NULL) - return (ENOENT); + if ((l = task->ledger) == NULL) { + return ENOENT; + } - memset(info, 0, sizeof (*info)); + memset(info, 0, sizeof(*info)); strlcpy(info->li_name, l->l_template->lt_name, LEDGER_NAME_MAX); info->li_id = l->l_id; info->li_entries = l->l_size; - return (0); + return 0; } #ifdef LEDGER_DEBUG @@ -1695,12 +1756,14 @@ ledger_limit(task_t task, struct ledger_limit_args *args) int64_t limit; int idx; - if ((l = task->ledger) == NULL) - return (EINVAL); + if ((l = task->ledger) == NULL) { + return EINVAL; + } idx = ledger_key_lookup(l->l_template, args->lla_name); - if ((idx < 0) || (idx >= l->l_size)) - return (EINVAL); + if ((idx < 0) || (idx >= l->l_size)) { + return EINVAL; + } /* * XXX - this doesn't really seem like the right place to have @@ -1714,7 +1777,7 @@ ledger_limit(task_t task, struct ledger_limit_args *args) if (args->lla_refill_period) { /* - * If a refill is scheduled, then the limit is + * If a refill is scheduled, then the limit is * specified as a percentage of one CPU. The * syscall specifies the refill period in terms of * milliseconds, so we need to convert to nsecs. @@ -1739,11 +1802,12 @@ ledger_limit(task_t task, struct ledger_limit_args *args) lprintf(("%s limited to %lld\n", args->lla_name, limit)); } - if (args->lla_refill_period > 0) + if (args->lla_refill_period > 0) { ledger_set_period(l, idx, args->lla_refill_period); + } ledger_set_limit(l, idx, limit); flag_set(&l->l_entries[idx].le_flags, LEDGER_ACTION_BLOCK); - return (0); + return 0; } #endif diff --git a/osfmk/kern/ledger.h b/osfmk/kern/ledger.h index 55faa7f52..3e3e6c323 100644 --- a/osfmk/kern/ledger.h +++ b/osfmk/kern/ledger.h @@ -2,7 +2,7 @@ * Copyright (c) 2010-2018 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,31 +32,31 @@ #ifndef _KERN_LEDGER_H_ #define _KERN_LEDGER_H_ -#include /* ledger_t */ +#include /* ledger_t */ #ifdef MACH_KERNEL_PRIVATE #include #endif /* MACH_KERNEL_PRIVATE */ -#define LEDGER_INFO 0 -#define LEDGER_ENTRY_INFO 1 -#define LEDGER_TEMPLATE_INFO 2 -#define LEDGER_LIMIT 3 +#define LEDGER_INFO 0 +#define LEDGER_ENTRY_INFO 1 +#define LEDGER_TEMPLATE_INFO 2 +#define LEDGER_LIMIT 3 /* LEDGER_MAX_CMD always tracks the index of the last ledger command. */ -#define LEDGER_MAX_CMD LEDGER_LIMIT +#define LEDGER_MAX_CMD LEDGER_LIMIT -#define LEDGER_NAME_MAX 32 +#define LEDGER_NAME_MAX 32 struct ledger_info { - char li_name[LEDGER_NAME_MAX]; - int64_t li_id; - int64_t li_entries; + char li_name[LEDGER_NAME_MAX]; + int64_t li_id; + int64_t li_entries; }; struct ledger_template_info { - char lti_name[LEDGER_NAME_MAX]; - char lti_group[LEDGER_NAME_MAX]; - char lti_units[LEDGER_NAME_MAX]; + char lti_name[LEDGER_NAME_MAX]; + char lti_group[LEDGER_NAME_MAX]; + char lti_units[LEDGER_NAME_MAX]; }; #ifdef MACH_KERNEL_PRIVATE @@ -104,37 +104,37 @@ struct ledger { #endif /* MACH_KERNEL_PRIVATE */ struct ledger_entry_info { - int64_t lei_balance; - int64_t lei_credit; - int64_t lei_debit; - uint64_t lei_limit; - uint64_t lei_refill_period; /* In nanoseconds */ - uint64_t lei_last_refill; /* Time since last refill */ + int64_t lei_balance; + int64_t lei_credit; + int64_t lei_debit; + uint64_t lei_limit; + uint64_t lei_refill_period; /* In nanoseconds */ + uint64_t lei_last_refill; /* Time since last refill */ }; struct ledger_limit_args { - char lla_name[LEDGER_NAME_MAX]; - uint64_t lla_limit; - uint64_t lla_refill_period; + char lla_name[LEDGER_NAME_MAX]; + uint64_t lla_limit; + uint64_t lla_refill_period; }; -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE typedef struct ledger_template *ledger_template_t; -#define LEDGER_VALID(ledger) (ledger != LEDGER_NULL) +#define LEDGER_VALID(ledger) (ledger != LEDGER_NULL) /* Action to take when a ledger goes into deficit */ -#define LEDGER_ACTION_IGNORE 0x0000 -#define LEDGER_ACTION_BLOCK 0x0010 -#define LEDGER_ACTION_CALLBACK 0x0020 -#define LEDGER_ACTION_MASK 0x00f0 +#define LEDGER_ACTION_IGNORE 0x0000 +#define LEDGER_ACTION_BLOCK 0x0010 +#define LEDGER_ACTION_CALLBACK 0x0020 +#define LEDGER_ACTION_MASK 0x00f0 /* * Types of warnings that trigger a callback. */ -#define LEDGER_WARNING_ROSE_ABOVE 1 -#define LEDGER_WARNING_DIPPED_BELOW 2 +#define LEDGER_WARNING_ROSE_ABOVE 1 +#define LEDGER_WARNING_DIPPED_BELOW 2 typedef void (*ledger_callback_t)(int warning, const void * param0, const void *param1); @@ -145,59 +145,59 @@ extern void ledger_template_dereference(ledger_template_t template); extern int ledger_entry_add(ledger_template_t template, const char *key, const char *group, const char *units); extern kern_return_t ledger_set_callback(ledger_template_t template, int entry, - ledger_callback_t callback, const void *param0, const void *param1); + ledger_callback_t callback, const void *param0, const void *param1); extern kern_return_t ledger_track_maximum(ledger_template_t template, int entry, - int period_in_secs); + int period_in_secs); extern kern_return_t ledger_panic_on_negative(ledger_template_t template, - int entry); + int entry); extern kern_return_t ledger_track_credit_only(ledger_template_t template, - int entry); + int entry); extern int ledger_key_lookup(ledger_template_t template, const char *key); /* value of entry type */ -#define LEDGER_CREATE_ACTIVE_ENTRIES 0 -#define LEDGER_CREATE_INACTIVE_ENTRIES 1 +#define LEDGER_CREATE_ACTIVE_ENTRIES 0 +#define LEDGER_CREATE_INACTIVE_ENTRIES 1 extern ledger_t ledger_instantiate(ledger_template_t template, int entry_type); extern void ledger_template_complete(ledger_template_t template); extern void ledger_template_complete_secure_alloc(ledger_template_t template); extern kern_return_t ledger_disable_callback(ledger_t ledger, int entry); extern kern_return_t ledger_enable_callback(ledger_t ledger, int entry); extern kern_return_t ledger_get_limit(ledger_t ledger, int entry, - ledger_amount_t *limit); + ledger_amount_t *limit); extern kern_return_t ledger_set_limit(ledger_t ledger, int entry, - ledger_amount_t limit, uint8_t warn_level_percentage); + ledger_amount_t limit, uint8_t warn_level_percentage); #if CONFIG_LEDGER_INTERVAL_MAX extern kern_return_t ledger_get_interval_max(ledger_t ledger, int entry, - ledger_amount_t *max_interval_balance, int reset); + ledger_amount_t *max_interval_balance, int reset); #endif /* CONFIG_LEDGER_INTERVAL_MAX */ extern kern_return_t ledger_get_lifetime_max(ledger_t ledger, int entry, - ledger_amount_t *max_lifetime_balance); + ledger_amount_t *max_lifetime_balance); extern kern_return_t ledger_get_actions(ledger_t ledger, int entry, int *actions); extern kern_return_t ledger_set_action(ledger_t ledger, int entry, int action); extern kern_return_t ledger_get_period(ledger_t ledger, int entry, - uint64_t *period); + uint64_t *period); extern kern_return_t ledger_set_period(ledger_t ledger, int entry, - uint64_t period); + uint64_t period); extern kern_return_t ledger_disable_refill(ledger_t l, int entry); extern kern_return_t ledger_entry_setactive(ledger_t ledger, int entry); extern void ledger_check_new_balance(thread_t thread, ledger_t ledger, int entry); extern kern_return_t ledger_credit(ledger_t ledger, int entry, - ledger_amount_t amount); + ledger_amount_t amount); extern kern_return_t ledger_credit_nocheck(ledger_t ledger, int entry, - ledger_amount_t amount); + ledger_amount_t amount); extern kern_return_t ledger_debit(ledger_t ledger, int entry, - ledger_amount_t amount); + ledger_amount_t amount); extern kern_return_t ledger_debit_nocheck(ledger_t ledger, int entry, - ledger_amount_t amount); + ledger_amount_t amount); extern kern_return_t ledger_credit_thread(thread_t thread, ledger_t ledger, - int entry, ledger_amount_t amount); + int entry, ledger_amount_t amount); extern kern_return_t ledger_debit_thread(thread_t thread, ledger_t ledger, - int entry, ledger_amount_t amount); + int entry, ledger_amount_t amount); extern kern_return_t ledger_zero_balance(ledger_t ledger, int entry); extern kern_return_t ledger_get_entries(ledger_t ledger, int entry, - ledger_amount_t *credit, ledger_amount_t *debit); + ledger_amount_t *credit, ledger_amount_t *debit); extern kern_return_t ledger_get_balance(ledger_t ledger, int entry, - ledger_amount_t *balance); + ledger_amount_t *balance); extern kern_return_t ledger_reset_callback_state(ledger_t ledger, int entry); extern kern_return_t ledger_disable_panic_on_negative(ledger_t ledger, int entry); extern kern_return_t ledger_get_panic_on_negative(ledger_t ledger, int entry, int *panic_on_negative); @@ -217,15 +217,15 @@ extern int ledger_limit(task_t task, struct ledger_limit_args *args); #endif extern int ledger_info(task_t task, struct ledger_info *info); -extern int +extern int ledger_get_task_entry_info_multiple(task_t task, void **buf, int *len); extern void ledger_get_entry_info(ledger_t ledger, int entry, - struct ledger_entry_info *lei); + struct ledger_entry_info *lei); extern int ledger_template_info(void **buf, int *len); #endif /* KERNEL_PRIVATE */ -#endif /* _KERN_LEDGER_H_ */ +#endif /* _KERN_LEDGER_H_ */ diff --git a/osfmk/kern/lock.h b/osfmk/kern/lock.h index 27aa5d017..a589db4b5 100644 --- a/osfmk/kern/lock.h +++ b/osfmk/kern/lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,15 +61,15 @@ * Higher Level Locking primitives definitions */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _KERN_LOCK_H_ -#define _KERN_LOCK_H_ +#ifndef _KERN_LOCK_H_ +#define _KERN_LOCK_H_ #include #warning This header is deprecated. Use instead. -#endif /* _KERN_LOCK_H_ */ +#endif /* _KERN_LOCK_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/kern/lock_group.h b/osfmk/kern/lock_group.h new file mode 100644 index 000000000..56472c560 --- /dev/null +++ b/osfmk/kern/lock_group.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2018 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef _KERN_LOCK_GROUP_H +#define _KERN_LOCK_GROUP_H + +#include +#include + +#define LCK_GRP_NULL (lck_grp_t *)0 + + +typedef unsigned int lck_type_t; + +#define LCK_TYPE_SPIN 1 +#define LCK_TYPE_MTX 2 +#define LCK_TYPE_RW 3 + +#if XNU_KERNEL_PRIVATE + +/* + * Arguments wrapped in LCK_GRP_ARG() will be elided + * when LOCK_STATS is not set. + * + * Arguments wrapped with LCK_GRP_PROBEARG() will be + * NULL when LOCK_STATS is not set + */ +#if LOCK_STATS +#define LCK_GRP_ARG(expr) ,expr +#define LCK_GRP_PROBEARG(grp) grp +#else +#define LCK_GRP_ARG(expr) +#define LCK_GRP_PROBEARG(grp) LCK_GRP_NULL +#endif /* LOCK_STATS */ + +typedef struct _lck_grp_stat_ { + uint64_t lgs_count; + uint32_t lgs_enablings; +#if CONFIG_DTRACE + /* + * Protected by dtrace_lock + */ + uint32_t lgs_probeid; + uint64_t lgs_limit; +#endif /* CONFIG_DTRACE */ +} lck_grp_stat_t; + +typedef struct _lck_grp_stats_ { +#if LOCK_STATS + lck_grp_stat_t lgss_spin_held; + lck_grp_stat_t lgss_spin_miss; + lck_grp_stat_t lgss_spin_spin; +#endif /* LOCK_STATS */ + + lck_grp_stat_t lgss_mtx_held; + lck_grp_stat_t lgss_mtx_direct_wait; + lck_grp_stat_t lgss_mtx_miss; + lck_grp_stat_t lgss_mtx_wait; +} lck_grp_stats_t; + +#define LCK_GRP_MAX_NAME 64 + +typedef struct _lck_grp_ { + queue_chain_t lck_grp_link; + uint32_t lck_grp_refcnt; + uint32_t lck_grp_spincnt; + uint32_t lck_grp_mtxcnt; + uint32_t lck_grp_rwcnt; + uint32_t lck_grp_attr; + char lck_grp_name[LCK_GRP_MAX_NAME]; + lck_grp_stats_t lck_grp_stats; +} lck_grp_t; + +#else +typedef struct _lck_grp_ lck_grp_t; +#endif /* XNU_KERNEL_PRIVATE */ + +#ifdef MACH_KERNEL_PRIVATE +typedef struct _lck_grp_attr_ { + uint32_t grp_attr_val; +} lck_grp_attr_t; + +extern lck_grp_attr_t LockDefaultGroupAttr; + +#define LCK_GRP_ATTR_STAT 0x1 +#define LCK_GRP_ATTR_TIME_STAT 0x2 + +#else +typedef struct __lck_grp_attr__ lck_grp_attr_t; +#endif /* MACH_KERNEL_PRIVATE */ + +#define LCK_GRP_ATTR_NULL (lck_grp_attr_t *)0 + +__BEGIN_DECLS + +extern lck_grp_attr_t *lck_grp_attr_alloc_init( + void); + +extern void lck_grp_attr_setdefault( + lck_grp_attr_t *attr); + +extern void lck_grp_attr_setstat( + lck_grp_attr_t *attr); + +extern void lck_grp_attr_free( + lck_grp_attr_t *attr); + +extern lck_grp_t *lck_grp_alloc_init( + const char* grp_name, + lck_grp_attr_t *attr); + +extern void lck_grp_free( + lck_grp_t *grp); + +__END_DECLS + +#ifdef MACH_KERNEL_PRIVATE +extern void lck_grp_init( + lck_grp_t *grp, + const char* grp_name, + lck_grp_attr_t *attr); + +extern void lck_grp_reference( + lck_grp_t *grp); + +extern void lck_grp_deallocate( + lck_grp_t *grp); + +extern void lck_grp_lckcnt_incr( + lck_grp_t *grp, + lck_type_t lck_type); + +extern void lck_grp_lckcnt_decr( + lck_grp_t *grp, + lck_type_t lck_type); + +#endif /* MACH_KERNEL_PRIVATE */ + +#endif /* _KERN_LOCK_GROUP_H */ diff --git a/osfmk/kern/lock_stat.h b/osfmk/kern/lock_stat.h new file mode 100644 index 000000000..e89732566 --- /dev/null +++ b/osfmk/kern/lock_stat.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2018 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef _KERN_LOCKSTAT_H +#define _KERN_LOCKSTAT_H +#include +#include +#include + +/* + * N.B.: On x86, statistics are currently recorded for all indirect mutexes. + * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained + * as a 64-bit quantity (the new x86 specific statistics are also maintained + * as 32-bit quantities). + * + * + * Enable this preprocessor define to record the first miss alone + * By default, we count every miss, hence multiple misses may be + * recorded for a single lock acquire attempt via lck_mtx_lock + */ +#undef LOG_FIRST_MISS_ALONE + +/* + * This preprocessor define controls whether the R-M-W update of the + * per-group statistics elements are atomic (LOCK-prefixed) + * Enabled by default. + */ +#define ATOMIC_STAT_UPDATES 1 + +/* + * DTrace lockstat probe definitions + * + * Spinlocks + */ +#define LS_LCK_SPIN_LOCK_ACQUIRE 0 +#define LS_LCK_SPIN_LOCK_SPIN 1 +#define LS_LCK_SPIN_UNLOCK_RELEASE 2 + +/* + * Mutexes can also have interlock-spin events, which are + * unique to our lock implementation. + */ +#define LS_LCK_MTX_LOCK_ACQUIRE 3 +#define LS_LCK_MTX_LOCK_BLOCK 5 +#define LS_LCK_MTX_LOCK_SPIN 6 +#define LS_LCK_MTX_LOCK_ILK_SPIN 7 +#define LS_LCK_MTX_TRY_LOCK_ACQUIRE 8 +#define LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE 9 +#define LS_LCK_MTX_UNLOCK_RELEASE 10 + +#define LS_LCK_MTX_LOCK_SPIN_ACQUIRE 39 +/* + * Provide a parallel set for indirect mutexes + */ +#define LS_LCK_MTX_EXT_LOCK_ACQUIRE 17 +#define LS_LCK_MTX_EXT_LOCK_BLOCK 18 +#define LS_LCK_MTX_EXT_LOCK_SPIN 19 +#define LS_LCK_MTX_EXT_LOCK_ILK_SPIN 20 +#define LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE 21 +#define LS_LCK_MTX_EXT_UNLOCK_RELEASE 22 + +/* + * Reader-writer locks support a blocking upgrade primitive, as + * well as the possibility of spinning on the interlock. + */ +#define LS_LCK_RW_LOCK_SHARED_ACQUIRE 23 +#define LS_LCK_RW_LOCK_SHARED_BLOCK 24 +#define LS_LCK_RW_LOCK_SHARED_SPIN 25 + +#define LS_LCK_RW_LOCK_EXCL_ACQUIRE 26 +#define LS_LCK_RW_LOCK_EXCL_BLOCK 27 +#define LS_LCK_RW_LOCK_EXCL_SPIN 28 + +#define LS_LCK_RW_DONE_RELEASE 29 + +#define LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE 30 +#define LS_LCK_RW_TRY_LOCK_SHARED_SPIN 31 + +#define LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE 32 +#define LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN 33 + +#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE 34 +#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN 35 +#define LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK 36 + +#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE 37 +#define LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN 38 + +#define LS_NPROBES 40 +#define LS_LCK_INVALID LS_NPROBES + +#if CONFIG_DTRACE +extern uint32_t lockstat_probemap[LS_NPROBES]; +extern void (*lockstat_probe)(uint32_t, uint64_t, uint64_t, + uint64_t, uint64_t, uint64_t); +/* + * Macros to record lockstat probes. + */ +#define LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3) \ + { \ + uint32_t id; \ + if (__improbable(id = lockstat_probemap[(probe)])) { \ + (*lockstat_probe)(id, (uintptr_t)(lp), (arg0), \ + (arg1), (arg2), (arg3)); \ + } \ + } +#define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD4(probe, lp, arg0, arg1, arg2, arg3) +#define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3) +#define LOCKSTAT_RECORD(probe, lp, ...) LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0) +#else +#define LOCKSTAT_RECORD() +#endif /* CONFIG_DTRACE */ + +/* + * Time threshold before dtrace lockstat spin + * probes are triggered + */ +extern uint64_t dtrace_spin_threshold; + +#if CONFIG_DTRACE +void lockprof_invoke(lck_grp_t*, lck_grp_stat_t*, uint64_t); +#endif /* CONFIG_DTRACE */ + +static inline void +lck_grp_stat_enable(lck_grp_stat_t *stat) +{ + stat->lgs_enablings++; +} + +static inline void +lck_grp_stat_disable(lck_grp_stat_t *stat) +{ + stat->lgs_enablings--; +} + +#if MACH_KERNEL_PRIVATE +#if LOCK_STATS + +static inline void +lck_grp_inc_stats(lck_grp_t *grp, lck_grp_stat_t *stat) +{ + if (__improbable(stat->lgs_enablings)) { + uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed); +#if CONFIG_DTRACE + if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) { + lockprof_invoke(grp, stat, val); + } +#else +#pragma unused(val) +#endif /* CONFIG_DTRACE */ + } +} + +static inline void +lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time) +{ + if (__improbable(stat->lgs_enablings)) { + uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed); +#if CONFIG_DTRACE + if (__improbable(stat->lgs_limit)) { + while (__improbable(time > stat->lgs_limit)) { + time -= stat->lgs_limit; + lockprof_invoke(grp, stat, val); + } + if (__improbable(((val % stat->lgs_limit) + time) > stat->lgs_limit)) { + lockprof_invoke(grp, stat, val); + } + } +#else +#pragma unused(val) +#endif /* CONFIG_DTRACE */ + } +} + +#endif /* LOCK_STATS */ + +static inline void +lck_grp_spin_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp)) +{ +#pragma unused(lock) +#if CONFIG_DTRACE + LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp)); +#endif +#if LOCK_STATS + if (!grp) { + return; + } + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_held; + lck_grp_inc_stats(grp, stat); +#endif /* LOCK_STATS */ +} + +static inline void +lck_grp_spin_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp)) +{ +#pragma unused(lock) +#if LOCK_STATS + if (!grp) { + return; + } + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_miss; + lck_grp_inc_stats(grp, stat); +#endif /* LOCK_STATS */ +} + +static inline void +lck_grp_spin_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time) +{ +#pragma unused(lock, time) +#if CONFIG_DTRACE + if (time > dtrace_spin_threshold) { + LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp)); + } +#endif /* CONFIG_DTRACE */ +#if LOCK_STATS + if (!grp) { + return; + } + lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_spin; + lck_grp_inc_time_stats(grp, stat, time); +#endif /* LOCK_STATS */ +} + +static inline boolean_t +lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp)) +{ +#pragma unused(lock) + boolean_t enabled = FALSE; +#if CONFIG_DTRACE + enabled |= lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0; +#endif /* CONFIG_DTRACE */ +#if LOCK_STATS + enabled |= (grp && grp->lck_grp_stats.lgss_spin_spin.lgs_enablings); +#endif /* LOCK_STATS */ + return enabled; +} + +static void inline +lck_grp_mtx_inc_stats( + uint64_t* stat) +{ +#if ATOMIC_STAT_UPDATES + os_atomic_inc(stat, relaxed); +#else + *stat = (*stat)++; +#endif /* ATOMIC_STAT_UPDATES */ +} + +static void inline +lck_grp_mtx_update_miss( + struct _lck_mtx_ext_ *lock, + int *first_miss) +{ +#pragma unused(first_miss) +#if LOG_FIRST_MISS_ALONE + if ((*first_miss & 1) == 0) { +#endif /* LOG_FIRST_MISS_ALONE */ + uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_miss.lgs_count; + lck_grp_mtx_inc_stats(stat); + +#if LOG_FIRST_MISS_ALONE + *first_miss |= 1; +} +#endif /* LOG_FIRST_MISS_ALONE */ +} + +static void inline +lck_grp_mtx_update_direct_wait( + struct _lck_mtx_ext_ *lock) +{ + uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count; + lck_grp_mtx_inc_stats(stat); +} + +static void inline +lck_grp_mtx_update_wait( + struct _lck_mtx_ext_ *lock, + int *first_miss) +{ +#pragma unused(first_miss) +#if LOG_FIRST_MISS_ALONE + if ((*first_miss & 2) == 0) { +#endif /* LOG_FIRST_MISS_ALONE */ + uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_wait.lgs_count; + lck_grp_mtx_inc_stats(stat); + +#if LOG_FIRST_MISS_ALONE + *first_miss |= 2; +} +#endif /* LOG_FIRST_MISS_ALONE */ +} + +static void inline +lck_grp_mtx_update_held( + struct _lck_mtx_ext_ *lock) +{ + uint64_t* stat = &lock->lck_mtx_grp->lck_grp_stats.lgss_mtx_held.lgs_count; + lck_grp_mtx_inc_stats(stat); +} +#endif /* MACH_KERNEL_PRIVATE */ +#endif /* _KERN_LOCKSTAT_H */ diff --git a/osfmk/kern/locks.c b/osfmk/kern/locks.c index 04b5dd239..04106709b 100644 --- a/osfmk/kern/locks.c +++ b/osfmk/kern/locks.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,6 +64,7 @@ #include #include +#include #include #include #include @@ -78,53 +79,43 @@ #include -#if CONFIG_DTRACE -/* - * We need only enough declarations from the BSD-side to be able to - * test if our probe is active, and to call __dtrace_probe(). Setting - * NEED_DTRACE_DEFS gets a local copy of those definitions pulled in. - */ -#define NEED_DTRACE_DEFS -#include <../bsd/sys/lockstat.h> -#endif - -#define LCK_MTX_SLEEP_CODE 0 -#define LCK_MTX_SLEEP_DEADLINE_CODE 1 -#define LCK_MTX_LCK_WAIT_CODE 2 -#define LCK_MTX_UNLCK_WAKEUP_CODE 3 +#define LCK_MTX_SLEEP_CODE 0 +#define LCK_MTX_SLEEP_DEADLINE_CODE 1 +#define LCK_MTX_LCK_WAIT_CODE 2 +#define LCK_MTX_UNLCK_WAKEUP_CODE 3 #if MACH_LDEBUG -#define ALIGN_TEST(p,t) do{if((uintptr_t)p&(sizeof(t)-1)) __builtin_trap();}while(0) +#define ALIGN_TEST(p, t) do{if((uintptr_t)p&(sizeof(t)-1)) __builtin_trap();}while(0) #else -#define ALIGN_TEST(p,t) do{}while(0) +#define ALIGN_TEST(p, t) do{}while(0) #endif /* Silence the volatile to _Atomic cast warning */ -#define ATOMIC_CAST(t,p) ((_Atomic t*)(uintptr_t)(p)) +#define ATOMIC_CAST(t, p) ((_Atomic t*)(uintptr_t)(p)) /* Enforce program order of loads and stores. */ #define ordered_load(target, type) \ - __c11_atomic_load((_Atomic type *)(target), memory_order_relaxed) + __c11_atomic_load((_Atomic type *)(target), memory_order_relaxed) #define ordered_store(target, type, value) \ - __c11_atomic_store((_Atomic type *)(target), value, memory_order_relaxed) + __c11_atomic_store((_Atomic type *)(target), value, memory_order_relaxed) -#define ordered_load_hw(lock) ordered_load(&(lock)->lock_data, uintptr_t) -#define ordered_store_hw(lock, value) ordered_store(&(lock)->lock_data, uintptr_t, (value)) +#define ordered_load_hw(lock) ordered_load(&(lock)->lock_data, uintptr_t) +#define ordered_store_hw(lock, value) ordered_store(&(lock)->lock_data, uintptr_t, (value)) -#define NOINLINE __attribute__((noinline)) +#define NOINLINE __attribute__((noinline)) -static queue_head_t lck_grp_queue; -static unsigned int lck_grp_cnt; +queue_head_t lck_grp_queue; +unsigned int lck_grp_cnt; -decl_lck_mtx_data(static,lck_grp_lock) +decl_lck_mtx_data(, lck_grp_lock) static lck_mtx_ext_t lck_grp_lock_ext; SECURITY_READ_ONLY_LATE(boolean_t) spinlock_timeout_panic = TRUE; -lck_grp_attr_t LockDefaultGroupAttr; -lck_grp_t LockCompatGroup; -lck_attr_t LockDefaultLckAttr; +lck_grp_attr_t LockDefaultGroupAttr; +lck_grp_t LockCompatGroup; +lck_attr_t LockDefaultLckAttr; #if CONFIG_DTRACE && __SMP__ #if defined (__x86_64__) @@ -135,11 +126,13 @@ uint64_t dtrace_spin_threshold = LOCK_PANIC_TIMEOUT / 1000000; // 500ns #endif uintptr_t -unslide_for_kdebug(void* object) { - if (__improbable(kdebug_enable)) +unslide_for_kdebug(void* object) +{ + if (__improbable(kdebug_enable)) { return VM_KERNEL_UNSLIDE_OR_PERM(object); - else + } else { return 0; + } } /* @@ -153,38 +146,43 @@ lck_mod_init( /* * Obtain "lcks" options:this currently controls lock statistics */ - if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof (LcksOpts))) + if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof(LcksOpts))) { LcksOpts = 0; + } #if (DEVELOPMENT || DEBUG) && defined(__x86_64__) - if (!PE_parse_boot_argn("-disable_mtx_chk", &LckDisablePreemptCheck, sizeof (LckDisablePreemptCheck))) + if (!PE_parse_boot_argn("-disable_mtx_chk", &LckDisablePreemptCheck, sizeof(LckDisablePreemptCheck))) { LckDisablePreemptCheck = 0; + } #endif /* (DEVELOPMENT || DEBUG) && defined(__x86_64__) */ queue_init(&lck_grp_queue); - - /* + + /* * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids * grabbing the lck_grp_lock before it is initialized. */ - + bzero(&LockCompatGroup, sizeof(lck_grp_t)); (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME); - - if (LcksOpts & enaLkStat) - LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT; - else - LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE; - + + LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE; + if (LcksOpts & enaLkStat) { + LockCompatGroup.lck_grp_attr |= LCK_GRP_ATTR_STAT; + } + if (LcksOpts & enaLkTimeStat) { + LockCompatGroup.lck_grp_attr |= LCK_GRP_ATTR_TIME_STAT; + } + LockCompatGroup.lck_grp_refcnt = 1; - + enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup); lck_grp_cnt = 1; - + lck_grp_attr_setdefault(&LockDefaultGroupAttr); lck_attr_setdefault(&LockDefaultLckAttr); - + lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr); } @@ -192,16 +190,17 @@ lck_mod_init( * Routine: lck_grp_attr_alloc_init */ -lck_grp_attr_t * +lck_grp_attr_t * lck_grp_attr_alloc_init( void) { - lck_grp_attr_t *attr; + lck_grp_attr_t *attr; - if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0) + if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0) { lck_grp_attr_setdefault(attr); + } - return(attr); + return attr; } @@ -211,34 +210,35 @@ lck_grp_attr_alloc_init( void lck_grp_attr_setdefault( - lck_grp_attr_t *attr) + lck_grp_attr_t *attr) { - if (LcksOpts & enaLkStat) + if (LcksOpts & enaLkStat) { attr->grp_attr_val = LCK_GRP_ATTR_STAT; - else + } else { attr->grp_attr_val = 0; + } } /* - * Routine: lck_grp_attr_setstat + * Routine: lck_grp_attr_setstat */ void lck_grp_attr_setstat( - lck_grp_attr_t *attr) + lck_grp_attr_t *attr) { (void)hw_atomic_or(&attr->grp_attr_val, LCK_GRP_ATTR_STAT); } /* - * Routine: lck_grp_attr_free + * Routine: lck_grp_attr_free */ void lck_grp_attr_free( - lck_grp_attr_t *attr) + lck_grp_attr_t *attr) { kfree(attr, sizeof(lck_grp_attr_t)); } @@ -250,15 +250,16 @@ lck_grp_attr_free( lck_grp_t * lck_grp_alloc_init( - const char* grp_name, - lck_grp_attr_t *attr) + const char* grp_name, + lck_grp_attr_t *attr) { - lck_grp_t *grp; + lck_grp_t *grp; - if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0) + if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0) { lck_grp_init(grp, grp_name, attr); + } - return(grp); + return grp; } /* @@ -275,12 +276,36 @@ lck_grp_init(lck_grp_t * grp, const char * grp_name, lck_grp_attr_t * attr) (void)strlcpy(grp->lck_grp_name, grp_name, LCK_GRP_MAX_NAME); - if (attr != LCK_GRP_ATTR_NULL) + if (attr != LCK_GRP_ATTR_NULL) { grp->lck_grp_attr = attr->grp_attr_val; - else if (LcksOpts & enaLkStat) - grp->lck_grp_attr = LCK_GRP_ATTR_STAT; - else - grp->lck_grp_attr = LCK_ATTR_NONE; + } else { + grp->lck_grp_attr = 0; + if (LcksOpts & enaLkStat) { + grp->lck_grp_attr |= LCK_GRP_ATTR_STAT; + } + if (LcksOpts & enaLkTimeStat) { + grp->lck_grp_attr |= LCK_GRP_ATTR_TIME_STAT; + } + } + + if (grp->lck_grp_attr & LCK_GRP_ATTR_STAT) { + lck_grp_stats_t *stats = &grp->lck_grp_stats; + +#if LOCK_STATS + lck_grp_stat_enable(&stats->lgss_spin_held); + lck_grp_stat_enable(&stats->lgss_spin_miss); +#endif /* LOCK_STATS */ + + lck_grp_stat_enable(&stats->lgss_mtx_held); + lck_grp_stat_enable(&stats->lgss_mtx_miss); + lck_grp_stat_enable(&stats->lgss_mtx_direct_wait); + } + if (grp->lck_grp_attr * LCK_GRP_ATTR_TIME_STAT) { +#if LOCK_STATS + lck_grp_stats_t *stats = &grp->lck_grp_stats; + lck_grp_stat_enable(&stats->lgss_spin_spin); +#endif /* LOCK_STATS */ + } grp->lck_grp_refcnt = 1; @@ -291,12 +316,12 @@ lck_grp_init(lck_grp_t * grp, const char * grp_name, lck_grp_attr_t * attr) } /* - * Routine: lck_grp_free + * Routine: lck_grp_free */ void lck_grp_free( - lck_grp_t *grp) + lck_grp_t *grp) { lck_mtx_lock(&lck_grp_lock); lck_grp_cnt--; @@ -307,27 +332,28 @@ lck_grp_free( /* - * Routine: lck_grp_reference + * Routine: lck_grp_reference */ void lck_grp_reference( - lck_grp_t *grp) + lck_grp_t *grp) { (void)hw_atomic_add(&grp->lck_grp_refcnt, 1); } /* - * Routine: lck_grp_deallocate + * Routine: lck_grp_deallocate */ void lck_grp_deallocate( - lck_grp_t *grp) + lck_grp_t *grp) { - if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0) - kfree(grp, sizeof(lck_grp_t)); + if (hw_atomic_sub(&grp->lck_grp_refcnt, 1) == 0) { + kfree(grp, sizeof(lck_grp_t)); + } } /* @@ -336,10 +362,10 @@ lck_grp_deallocate( void lck_grp_lckcnt_incr( - lck_grp_t *grp, - lck_type_t lck_type) + lck_grp_t *grp, + lck_type_t lck_type) { - unsigned int *lckcnt; + unsigned int *lckcnt; switch (lck_type) { case LCK_TYPE_SPIN: @@ -364,11 +390,11 @@ lck_grp_lckcnt_incr( void lck_grp_lckcnt_decr( - lck_grp_t *grp, - lck_type_t lck_type) + lck_grp_t *grp, + lck_type_t lck_type) { - unsigned int *lckcnt; - int updated; + unsigned int *lckcnt; + int updated; switch (lck_type) { case LCK_TYPE_SPIN: @@ -397,12 +423,13 @@ lck_attr_t * lck_attr_alloc_init( void) { - lck_attr_t *attr; + lck_attr_t *attr; - if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0) + if ((attr = (lck_attr_t *)kalloc(sizeof(lck_attr_t))) != 0) { lck_attr_setdefault(attr); + } - return(attr); + return attr; } @@ -412,23 +439,24 @@ lck_attr_alloc_init( void lck_attr_setdefault( - lck_attr_t *attr) + lck_attr_t *attr) { #if __arm__ || __arm64__ /* : Using LCK_ATTR_DEBUG here causes panic at boot time for arm */ attr->lck_attr_val = LCK_ATTR_NONE; #elif __i386__ || __x86_64__ #if !DEBUG - if (LcksOpts & enaLkDeb) - attr->lck_attr_val = LCK_ATTR_DEBUG; - else - attr->lck_attr_val = LCK_ATTR_NONE; + if (LcksOpts & enaLkDeb) { + attr->lck_attr_val = LCK_ATTR_DEBUG; + } else { + attr->lck_attr_val = LCK_ATTR_NONE; + } #else - attr->lck_attr_val = LCK_ATTR_DEBUG; -#endif /* !DEBUG */ + attr->lck_attr_val = LCK_ATTR_DEBUG; +#endif /* !DEBUG */ #else #error Unknown architecture. -#endif /* __arm__ */ +#endif /* __arm__ */ } @@ -437,7 +465,7 @@ lck_attr_setdefault( */ void lck_attr_setdebug( - lck_attr_t *attr) + lck_attr_t *attr) { (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG); } @@ -447,7 +475,7 @@ lck_attr_setdebug( */ void lck_attr_cleardebug( - lck_attr_t *attr) + lck_attr_t *attr) { (void)hw_atomic_and(&attr->lck_attr_val, ~LCK_ATTR_DEBUG); } @@ -458,7 +486,7 @@ lck_attr_cleardebug( */ void lck_attr_rw_shared_priority( - lck_attr_t *attr) + lck_attr_t *attr) { (void)hw_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY); } @@ -469,7 +497,7 @@ lck_attr_rw_shared_priority( */ void lck_attr_free( - lck_attr_t *attr) + lck_attr_t *attr) { kfree(attr, sizeof(lck_attr_t)); } @@ -493,87 +521,93 @@ hw_lock_init(hw_lock_t lock) * preemption disabled. */ -#if __SMP__ +#if __SMP__ static unsigned int NOINLINE -hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean_t do_panic) +hw_lock_lock_contended(hw_lock_t lock, uintptr_t data, uint64_t timeout, boolean_t do_panic LCK_GRP_ARG(lck_grp_t *grp)) { - uint64_t end = 0; - uintptr_t holder = lock->lock_data; - int i; + uint64_t end = 0; + uintptr_t holder = lock->lock_data; + int i; - if (timeout == 0) + if (timeout == 0) { timeout = LOCK_PANIC_TIMEOUT; -#if CONFIG_DTRACE - uint64_t begin; - boolean_t dtrace_enabled = lockstat_probemap[LS_LCK_SPIN_LOCK_SPIN] != 0; - if (__improbable(dtrace_enabled)) + } +#if CONFIG_DTRACE || LOCK_STATS + uint64_t begin = 0; + boolean_t stat_enabled = lck_grp_spin_spin_enabled(lock LCK_GRP_ARG(grp)); +#endif /* CONFIG_DTRACE || LOCK_STATS */ + +#if LOCK_STATS || CONFIG_DTRACE + if (__improbable(stat_enabled)) { begin = mach_absolute_time(); -#endif - for ( ; ; ) { + } +#endif /* LOCK_STATS || CONFIG_DTRACE */ + for (;;) { for (i = 0; i < LOCK_SNOOP_SPINS; i++) { cpu_pause(); #if (!__ARM_ENABLE_WFE_) || (LOCK_PRETEST) holder = ordered_load_hw(lock); - if (holder != 0) + if (holder != 0) { continue; + } #endif if (atomic_compare_exchange(&lock->lock_data, 0, data, memory_order_acquire_smp, TRUE)) { -#if CONFIG_DTRACE - if (__improbable(dtrace_enabled)) { - uint64_t spintime = mach_absolute_time() - begin; - if (spintime > dtrace_spin_threshold) - LOCKSTAT_RECORD2(LS_LCK_SPIN_LOCK_SPIN, lock, spintime, dtrace_spin_threshold); +#if CONFIG_DTRACE || LOCK_STATS + if (__improbable(stat_enabled)) { + lck_grp_spin_update_spin(lock LCK_GRP_ARG(grp), mach_absolute_time() - begin); } -#endif + lck_grp_spin_update_miss(lock LCK_GRP_ARG(grp)); +#endif /* CONFIG_DTRACE || LOCK_STATS */ return 1; } } if (end == 0) { end = ml_get_timebase() + timeout; - } - else if (ml_get_timebase() >= end) + } else if (ml_get_timebase() >= end) { break; + } } if (do_panic) { // Capture the actual time spent blocked, which may be higher than the timeout // if a misbehaving interrupt stole this thread's CPU time. panic("Spinlock timeout after %llu ticks, %p = %lx", - (ml_get_timebase() - end + timeout), lock, holder); + (ml_get_timebase() - end + timeout), lock, holder); } return 0; } -#endif // __SMP__ +#endif // __SMP__ static inline void -hw_lock_lock_internal(hw_lock_t lock, thread_t thread) +hw_lock_lock_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp)) { - uintptr_t state; + uintptr_t state; state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK; -#if __SMP__ +#if __SMP__ -#if LOCK_PRETEST - if (ordered_load_hw(lock)) +#if LOCK_PRETEST + if (ordered_load_hw(lock)) { goto contended; -#endif // LOCK_PRETEST + } +#endif // LOCK_PRETEST if (atomic_compare_exchange(&lock->lock_data, 0, state, - memory_order_acquire_smp, TRUE)) { + memory_order_acquire_smp, TRUE)) { goto end; } -#if LOCK_PRETEST +#if LOCK_PRETEST contended: -#endif // LOCK_PRETEST - hw_lock_lock_contended(lock, state, 0, spinlock_timeout_panic); +#endif // LOCK_PRETEST + hw_lock_lock_contended(lock, state, 0, spinlock_timeout_panic LCK_GRP_ARG(grp)); end: -#else // __SMP__ - if (lock->lock_data) +#else // __SMP__ + if (lock->lock_data) { panic("Spinlock held %p", lock); + } lock->lock_data = state; -#endif // __SMP__ -#if CONFIG_DTRACE - LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0); -#endif +#endif // __SMP__ + lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); + return; } @@ -584,11 +618,11 @@ end: * return with preemption disabled. */ void -hw_lock_lock(hw_lock_t lock) +(hw_lock_lock)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp)) { thread_t thread = current_thread(); disable_preemption_for_thread(thread); - hw_lock_lock_internal(lock, thread); + hw_lock_lock_internal(lock, thread LCK_GRP_ARG(grp)); } /* @@ -597,12 +631,13 @@ hw_lock_lock(hw_lock_t lock) * Acquire lock, spinning until it becomes available. */ void -hw_lock_lock_nopreempt(hw_lock_t lock) +(hw_lock_lock_nopreempt)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp)) { thread_t thread = current_thread(); - if (__improbable(!preemption_disabled_for_thread(thread))) + if (__improbable(!preemption_disabled_for_thread(thread))) { panic("Attempt to take no-preempt spinlock %p in preemptible context", lock); - hw_lock_lock_internal(lock, thread); + } + hw_lock_lock_internal(lock, thread LCK_GRP_ARG(grp)); } /* @@ -612,43 +647,44 @@ hw_lock_lock_nopreempt(hw_lock_t lock) * Timeout is in mach_absolute_time ticks, return with * preemption disabled. */ -unsigned int -hw_lock_to(hw_lock_t lock, uint64_t timeout) +unsigned +int +(hw_lock_to)(hw_lock_t lock, uint64_t timeout LCK_GRP_ARG(lck_grp_t *grp)) { - thread_t thread; - uintptr_t state; + thread_t thread; + uintptr_t state; unsigned int success = 0; thread = current_thread(); disable_preemption_for_thread(thread); state = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK; -#if __SMP__ +#if __SMP__ -#if LOCK_PRETEST - if (ordered_load_hw(lock)) +#if LOCK_PRETEST + if (ordered_load_hw(lock)) { goto contended; -#endif // LOCK_PRETEST + } +#endif // LOCK_PRETEST if (atomic_compare_exchange(&lock->lock_data, 0, state, - memory_order_acquire_smp, TRUE)) { + memory_order_acquire_smp, TRUE)) { success = 1; goto end; } -#if LOCK_PRETEST +#if LOCK_PRETEST contended: -#endif // LOCK_PRETEST - success = hw_lock_lock_contended(lock, state, timeout, FALSE); +#endif // LOCK_PRETEST + success = hw_lock_lock_contended(lock, state, timeout, FALSE LCK_GRP_ARG(grp)); end: -#else // __SMP__ +#else // __SMP__ (void)timeout; if (ordered_load_hw(lock) == 0) { ordered_store_hw(lock, state); success = 1; } -#endif // __SMP__ -#if CONFIG_DTRACE - if (success) - LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0); -#endif +#endif // __SMP__ + if (success) { + lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); + } return success; } @@ -658,52 +694,56 @@ end: * returns with preemption disabled on success. */ static inline unsigned int -hw_lock_try_internal(hw_lock_t lock, thread_t thread) +hw_lock_try_internal(hw_lock_t lock, thread_t thread LCK_GRP_ARG(lck_grp_t *grp)) { - int success = 0; + int success = 0; -#if __SMP__ -#if LOCK_PRETEST - if (ordered_load_hw(lock)) +#if __SMP__ +#if LOCK_PRETEST + if (ordered_load_hw(lock)) { goto failed; -#endif // LOCK_PRETEST + } +#endif // LOCK_PRETEST success = atomic_compare_exchange(&lock->lock_data, 0, LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK, - memory_order_acquire_smp, FALSE); + memory_order_acquire_smp, FALSE); #else if (lock->lock_data == 0) { lock->lock_data = LCK_MTX_THREAD_TO_STATE(thread) | PLATFORM_LCK_ILOCK; success = 1; } -#endif // __SMP__ +#endif // __SMP__ -#if LOCK_PRETEST +#if LOCK_PRETEST failed: -#endif // LOCK_PRETEST -#if CONFIG_DTRACE - if (success) - LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, 0); -#endif +#endif // LOCK_PRETEST + if (success) { + lck_grp_spin_update_held(lock LCK_GRP_ARG(grp)); + } return success; } -unsigned int -hw_lock_try(hw_lock_t lock) +unsigned +int +(hw_lock_try)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp)) { thread_t thread = current_thread(); disable_preemption_for_thread(thread); - unsigned int success = hw_lock_try_internal(lock, thread); - if (!success) + unsigned int success = hw_lock_try_internal(lock, thread LCK_GRP_ARG(grp)); + if (!success) { enable_preemption(); + } return success; } -unsigned int -hw_lock_try_nopreempt(hw_lock_t lock) +unsigned +int +(hw_lock_try_nopreempt)(hw_lock_t lock LCK_GRP_ARG(lck_grp_t *grp)) { thread_t thread = current_thread(); - if (__improbable(!preemption_disabled_for_thread(thread))) + if (__improbable(!preemption_disabled_for_thread(thread))) { panic("Attempt to test no-preempt spinlock %p in preemptible context", lock); - return hw_lock_try_internal(lock, thread); + } + return hw_lock_try_internal(lock, thread LCK_GRP_ARG(grp)); } /* @@ -718,24 +758,25 @@ hw_lock_unlock_internal(hw_lock_t lock) #if __arm__ || __arm64__ // ARM tests are only for open-source exclusion set_event(); -#endif // __arm__ || __arm64__ -#if CONFIG_DTRACE +#endif // __arm__ || __arm64__ +#if CONFIG_DTRACE LOCKSTAT_RECORD(LS_LCK_SPIN_UNLOCK_RELEASE, lock, 0); #endif /* CONFIG_DTRACE */ } void -hw_lock_unlock(hw_lock_t lock) +(hw_lock_unlock)(hw_lock_t lock) { hw_lock_unlock_internal(lock); enable_preemption(); } void -hw_lock_unlock_nopreempt(hw_lock_t lock) +(hw_lock_unlock_nopreempt)(hw_lock_t lock) { - if (__improbable(!preemption_disabled_for_thread(current_thread()))) + if (__improbable(!preemption_disabled_for_thread(current_thread()))) { panic("Attempt to release no-preempt spinlock %p in preemptible context", lock); + } hw_lock_unlock_internal(lock); } @@ -746,65 +787,77 @@ hw_lock_unlock_nopreempt(hw_lock_t lock) unsigned int hw_lock_held(hw_lock_t lock) { - return (ordered_load_hw(lock) != 0); + return ordered_load_hw(lock) != 0; } /* * Routine: lck_spin_sleep */ wait_result_t -lck_spin_sleep( - lck_spin_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible) +lck_spin_sleep_grp( + lck_spin_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + lck_grp_t *grp) { - wait_result_t res; - - if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) + wait_result_t res; + + if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) { panic("Invalid lock sleep action %x\n", lck_sleep_action); + } res = assert_wait(event, interruptible); if (res == THREAD_WAITING) { lck_spin_unlock(lck); res = thread_block(THREAD_CONTINUE_NULL); - if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) - lck_spin_lock(lck); - } - else - if (lck_sleep_action & LCK_SLEEP_UNLOCK) + if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { + lck_spin_lock_grp(lck, grp); + } + } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) { lck_spin_unlock(lck); + } return res; } +wait_result_t +lck_spin_sleep( + lck_spin_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible) +{ + return lck_spin_sleep_grp(lck, lck_sleep_action, event, interruptible, LCK_GRP_NULL); +} /* * Routine: lck_spin_sleep_deadline */ wait_result_t lck_spin_sleep_deadline( - lck_spin_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline) + lck_spin_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline) { wait_result_t res; - if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) + if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) { panic("Invalid lock sleep action %x\n", lck_sleep_action); + } res = assert_wait_deadline(event, interruptible, deadline); if (res == THREAD_WAITING) { lck_spin_unlock(lck); res = thread_block(THREAD_CONTINUE_NULL); - if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) + if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { lck_spin_lock(lck); - } - else - if (lck_sleep_action & LCK_SLEEP_UNLOCK) + } + } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) { lck_spin_unlock(lck); + } return res; } @@ -814,19 +867,20 @@ lck_spin_sleep_deadline( */ wait_result_t lck_mtx_sleep( - lck_mtx_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible) + lck_mtx_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible) { - wait_result_t res; - thread_t thread = current_thread(); - + wait_result_t res; + thread_t thread = current_thread(); + KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_CODE) | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0); + VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0); - if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) + if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) { panic("Invalid lock sleep action %x\n", lck_sleep_action); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { /* @@ -843,17 +897,17 @@ lck_mtx_sleep( lck_mtx_unlock(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { - if ((lck_sleep_action & LCK_SLEEP_SPIN)) + if ((lck_sleep_action & LCK_SLEEP_SPIN)) { lck_mtx_lock_spin(lck); - else if ((lck_sleep_action & LCK_SLEEP_SPIN_ALWAYS)) + } else if ((lck_sleep_action & LCK_SLEEP_SPIN_ALWAYS)) { lck_mtx_lock_spin_always(lck); - else + } else { lck_mtx_lock(lck); + } } - } - else - if (lck_sleep_action & LCK_SLEEP_UNLOCK) + } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) { lck_mtx_unlock(lck); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { @@ -873,20 +927,21 @@ lck_mtx_sleep( */ wait_result_t lck_mtx_sleep_deadline( - lck_mtx_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline) + lck_mtx_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline) { wait_result_t res; - thread_t thread = current_thread(); + thread_t thread = current_thread(); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_SLEEP_DEADLINE_CODE) | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0); + VM_KERNEL_UNSLIDE_OR_PERM(lck), (int)lck_sleep_action, VM_KERNEL_UNSLIDE_OR_PERM(event), (int)interruptible, 0); - if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) + if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) { panic("Invalid lock sleep action %x\n", lck_sleep_action); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { /* @@ -900,15 +955,15 @@ lck_mtx_sleep_deadline( lck_mtx_unlock(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { - if ((lck_sleep_action & LCK_SLEEP_SPIN)) + if ((lck_sleep_action & LCK_SLEEP_SPIN)) { lck_mtx_lock_spin(lck); - else + } else { lck_mtx_lock(lck); + } } - } - else - if (lck_sleep_action & LCK_SLEEP_UNLOCK) + } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) { lck_mtx_unlock(lck); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { @@ -977,37 +1032,39 @@ lck_mtx_sleep_deadline( * ARM mutex contention logic could avoid taking the thread lock */ void -lck_mtx_lock_wait ( - lck_mtx_t *lck, - thread_t holder) +lck_mtx_lock_wait( + lck_mtx_t *lck, + thread_t holder) { - thread_t self = current_thread(); - lck_mtx_t *mutex; + thread_t self = current_thread(); + lck_mtx_t *mutex; __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); -#if CONFIG_DTRACE - uint64_t sleep_start = 0; +#if CONFIG_DTRACE + uint64_t sleep_start = 0; if (lockstat_probemap[LS_LCK_MTX_LOCK_BLOCK] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_BLOCK]) { sleep_start = mach_absolute_time(); } #endif - if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) + if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { mutex = lck; - else + } else { mutex = &lck->lck_mtx_ptr->lck_mtx; + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_START, - trace_lck, (uintptr_t)thread_tid(thread), 0, 0, 0); + trace_lck, (uintptr_t)thread_tid(thread), 0, 0, 0); spl_t s = splsched(); thread_lock(holder); assert_promotions_invariant(holder); - if ((holder->sched_flags & TH_SFLAG_DEPRESS) == 0) + if ((holder->sched_flags & TH_SFLAG_DEPRESS) == 0) { assert(holder->sched_pri >= mutex->lck_mtx_pri); + } integer_t priority = self->sched_pri; priority = MAX(priority, self->base_pri); @@ -1031,16 +1088,18 @@ lck_mtx_lock_wait ( assert(holder->promotions > 0); assert(holder->promotion_priority >= priority); - if ((holder->sched_flags & TH_SFLAG_DEPRESS) == 0) + if ((holder->sched_flags & TH_SFLAG_DEPRESS) == 0) { assert(holder->sched_pri >= mutex->lck_mtx_pri); + } assert_promotions_invariant(holder); thread_unlock(holder); splx(s); - if (mutex->lck_mtx_pri < priority) + if (mutex->lck_mtx_pri < priority) { mutex->lck_mtx_pri = priority; + } if (self->waiting_for_mutex == NULL) { self->waiting_for_mutex = mutex; @@ -1058,7 +1117,7 @@ lck_mtx_lock_wait ( assert(mutex->lck_mtx_waiters > 0); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_LCK_WAIT_CODE) | DBG_FUNC_END, 0, 0, 0, 0, 0); -#if CONFIG_DTRACE +#if CONFIG_DTRACE /* * Record the DTrace lockstat probe for blocking, block time * measured from when we were entered. @@ -1076,7 +1135,7 @@ lck_mtx_lock_wait ( } /* - * Routine: lck_mtx_lock_acquire + * Routine: lck_mtx_lock_acquire * * Invoked on acquiring the mutex when there is * contention. @@ -1087,16 +1146,17 @@ lck_mtx_lock_wait ( */ int lck_mtx_lock_acquire( - lck_mtx_t *lck) + lck_mtx_t *lck) { - thread_t thread = current_thread(); - lck_mtx_t *mutex; - integer_t priority; + thread_t thread = current_thread(); + lck_mtx_t *mutex; + integer_t priority; - if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) + if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { mutex = lck; - else + } else { mutex = &lck->lck_mtx_ptr->lck_mtx; + } /* * If waiting_for_mutex is set, then this thread was previously blocked waiting on this lock @@ -1138,8 +1198,9 @@ lck_mtx_lock_acquire( assert_promotions_invariant(thread); - if (thread->was_promoted_on_wakeup) + if (thread->was_promoted_on_wakeup) { assert(thread->promotions > 0); + } if (priority) { if (thread->promotions++ == 0) { @@ -1157,33 +1218,26 @@ lck_mtx_lock_acquire( if (thread->was_promoted_on_wakeup) { thread->was_promoted_on_wakeup = 0; - if (--thread->promotions == 0) + if (--thread->promotions == 0) { sched_thread_unpromote(thread, trace_lck); + } } assert_promotions_invariant(thread); - if (priority && (thread->sched_flags & TH_SFLAG_DEPRESS) == 0) + if (priority && (thread->sched_flags & TH_SFLAG_DEPRESS) == 0) { assert(thread->sched_pri >= priority); + } thread_unlock(thread); splx(s); } -#if CONFIG_DTRACE - if (lockstat_probemap[LS_LCK_MTX_LOCK_ACQUIRE] || lockstat_probemap[LS_LCK_MTX_EXT_LOCK_ACQUIRE]) { - if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { - LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, lck, 0); - } else { - LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, lck, 0); - } - } -#endif - return (mutex->lck_mtx_waiters); + return mutex->lck_mtx_waiters; } /* - * Routine: lck_mtx_unlock_wakeup + * Routine: lck_mtx_unlock_wakeup * * Invoked on unlock when there is contention. * @@ -1194,24 +1248,26 @@ lck_mtx_lock_acquire( * This means that here we may do extra unneeded wakeups. */ void -lck_mtx_unlock_wakeup ( - lck_mtx_t *lck, - thread_t holder) +lck_mtx_unlock_wakeup( + lck_mtx_t *lck, + thread_t holder) { - thread_t thread = current_thread(); - lck_mtx_t *mutex; + thread_t thread = current_thread(); + lck_mtx_t *mutex; __kdebug_only uintptr_t trace_lck = unslide_for_kdebug(lck); - if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) + if (lck->lck_mtx_tag != LCK_MTX_TAG_INDIRECT) { mutex = lck; - else + } else { mutex = &lck->lck_mtx_ptr->lck_mtx; + } - if (thread != holder) + if (thread != holder) { panic("lck_mtx_unlock_wakeup: mutex %p holder %p\n", mutex, holder); + } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_MTX_UNLCK_WAKEUP_CODE) | DBG_FUNC_START, - trace_lck, (uintptr_t)thread_tid(thread), 0, 0, 0); + trace_lck, (uintptr_t)thread_tid(thread), 0, 0, 0); assert(mutex->lck_mtx_waiters > 0); assert(thread->was_promoted_on_wakeup == 0); @@ -1221,10 +1277,11 @@ lck_mtx_unlock_wakeup ( * The waiters count does not precisely match the number of threads on the waitqueue, * therefore we cannot assert that we actually wake up a thread here */ - if (mutex->lck_mtx_waiters > 1) + if (mutex->lck_mtx_waiters > 1) { thread_wakeup_one_with_pri(LCK_MTX_EVENT(lck), lck->lck_mtx_pri); - else + } else { thread_wakeup_one(LCK_MTX_EVENT(lck)); + } /* When mutex->lck_mtx_pri is set, it means means I as the owner have a promotion. */ if (mutex->lck_mtx_pri) { @@ -1235,8 +1292,9 @@ lck_mtx_unlock_wakeup ( assert_promotions_invariant(thread); - if (--thread->promotions == 0) + if (--thread->promotions == 0) { sched_thread_unpromote(thread, trace_lck); + } assert_promotions_invariant(thread); @@ -1295,17 +1353,17 @@ lck_mtx_wakeup_adjust_pri(thread_t thread, integer_t priority) /* - * Routine: mutex_pause + * Routine: mutex_pause * * Called by former callers of simple_lock_pause(). */ -#define MAX_COLLISION_COUNTS 32 -#define MAX_COLLISION 8 +#define MAX_COLLISION_COUNTS 32 +#define MAX_COLLISION 8 unsigned int max_collision_count[MAX_COLLISION_COUNTS]; uint32_t collision_backoffs[MAX_COLLISION] = { - 10, 50, 100, 200, 400, 600, 800, 1000 + 10, 50, 100, 200, 400, 600, 800, 1000 }; @@ -1313,14 +1371,16 @@ void mutex_pause(uint32_t collisions) { wait_result_t wait_result; - uint32_t back_off; + uint32_t back_off; - if (collisions >= MAX_COLLISION_COUNTS) - collisions = MAX_COLLISION_COUNTS - 1; + if (collisions >= MAX_COLLISION_COUNTS) { + collisions = MAX_COLLISION_COUNTS - 1; + } max_collision_count[collisions]++; - if (collisions >= MAX_COLLISION) - collisions = MAX_COLLISION - 1; + if (collisions >= MAX_COLLISION) { + collisions = MAX_COLLISION - 1; + } back_off = collision_backoffs[collisions]; wait_result = assert_wait_timeout((event_t)mutex_pause, THREAD_UNINT, back_off, NSEC_PER_USEC); @@ -1336,23 +1396,24 @@ unsigned int mutex_yield_no_wait = 0; void lck_mtx_yield( - lck_mtx_t *lck) + lck_mtx_t *lck) { - int waiters; - + int waiters; + #if DEBUG lck_mtx_assert(lck, LCK_MTX_ASSERT_OWNED); #endif /* DEBUG */ - - if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT) - waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters; - else - waiters = lck->lck_mtx_waiters; - - if ( !waiters) { - mutex_yield_no_wait++; + + if (lck->lck_mtx_tag == LCK_MTX_TAG_INDIRECT) { + waiters = lck->lck_mtx_ptr->lck_mtx.lck_mtx_waiters; + } else { + waiters = lck->lck_mtx_waiters; + } + + if (!waiters) { + mutex_yield_no_wait++; } else { - mutex_yield_wait++; + mutex_yield_wait++; lck_mtx_unlock(lck); mutex_pause(0); lck_mtx_lock(lck); @@ -1365,17 +1426,18 @@ lck_mtx_yield( */ wait_result_t lck_rw_sleep( - lck_rw_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible) + lck_rw_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible) { - wait_result_t res; - lck_rw_type_t lck_rw_type; - thread_t thread = current_thread(); + wait_result_t res; + lck_rw_type_t lck_rw_type; + thread_t thread = current_thread(); - if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) + if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) { panic("Invalid lock sleep action %x\n", lck_sleep_action); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { /* @@ -1394,17 +1456,17 @@ lck_rw_sleep( lck_rw_type = lck_rw_done(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { - if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE))) + if (!(lck_sleep_action & (LCK_SLEEP_SHARED | LCK_SLEEP_EXCLUSIVE))) { lck_rw_lock(lck, lck_rw_type); - else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) + } else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) { lck_rw_lock_exclusive(lck); - else + } else { lck_rw_lock_shared(lck); + } } - } - else - if (lck_sleep_action & LCK_SLEEP_UNLOCK) + } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) { (void)lck_rw_done(lck); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { @@ -1426,18 +1488,19 @@ lck_rw_sleep( */ wait_result_t lck_rw_sleep_deadline( - lck_rw_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline) + lck_rw_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline) { wait_result_t res; - lck_rw_type_t lck_rw_type; - thread_t thread = current_thread(); + lck_rw_type_t lck_rw_type; + thread_t thread = current_thread(); - if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) + if ((lck_sleep_action & ~LCK_SLEEP_MASK) != 0) { panic("Invalid lock sleep action %x\n", lck_sleep_action); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { thread->rwlock_count++; @@ -1448,17 +1511,17 @@ lck_rw_sleep_deadline( lck_rw_type = lck_rw_done(lck); res = thread_block(THREAD_CONTINUE_NULL); if (!(lck_sleep_action & LCK_SLEEP_UNLOCK)) { - if (!(lck_sleep_action & (LCK_SLEEP_SHARED|LCK_SLEEP_EXCLUSIVE))) + if (!(lck_sleep_action & (LCK_SLEEP_SHARED | LCK_SLEEP_EXCLUSIVE))) { lck_rw_lock(lck, lck_rw_type); - else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) + } else if (lck_sleep_action & LCK_SLEEP_EXCLUSIVE) { lck_rw_lock_exclusive(lck); - else + } else { lck_rw_lock_shared(lck); + } } - } - else - if (lck_sleep_action & LCK_SLEEP_UNLOCK) + } else if (lck_sleep_action & LCK_SLEEP_UNLOCK) { (void)lck_rw_done(lck); + } if (lck_sleep_action & LCK_SLEEP_PROMOTED_PRI) { if ((thread->rwlock_count-- == 1 /* field now 0 */) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { @@ -1529,7 +1592,8 @@ lck_rw_sleep_deadline( * lck_rw_clear_promotion: Undo priority promotions when the last RW * lock is released by a thread (if a promotion was active) */ -void lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj) +void +lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj) { assert(thread->rwlock_count == 0); @@ -1537,8 +1601,9 @@ void lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj) spl_t s = splsched(); thread_lock(thread); - if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) + if (thread->sched_flags & TH_SFLAG_RW_PROMOTED) { sched_thread_unpromote_reason(thread, TH_SFLAG_RW_PROMOTED, trace_obj); + } thread_unlock(thread); splx(s); @@ -1553,43 +1618,46 @@ void lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj) void lck_rw_set_promotion_locked(thread_t thread) { - if (LcksOpts & disLkRWPrio) + if (LcksOpts & disLkRWPrio) { return; + } assert(thread->rwlock_count > 0); - if (!(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) + if (!(thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { sched_thread_promote_reason(thread, TH_SFLAG_RW_PROMOTED, 0); + } } kern_return_t host_lockgroup_info( - host_t host, - lockgroup_info_array_t *lockgroup_infop, - mach_msg_type_number_t *lockgroup_infoCntp) + host_t host, + lockgroup_info_array_t *lockgroup_infop, + mach_msg_type_number_t *lockgroup_infoCntp) { - lockgroup_info_t *lockgroup_info_base; - lockgroup_info_t *lockgroup_info; - vm_offset_t lockgroup_info_addr; - vm_size_t lockgroup_info_size; - vm_size_t lockgroup_info_vmsize; - lck_grp_t *lck_grp; - unsigned int i; - vm_map_copy_t copy; - kern_return_t kr; - - if (host == HOST_NULL) + lockgroup_info_t *lockgroup_info_base; + lockgroup_info_t *lockgroup_info; + vm_offset_t lockgroup_info_addr; + vm_size_t lockgroup_info_size; + vm_size_t lockgroup_info_vmsize; + lck_grp_t *lck_grp; + unsigned int i; + vm_map_copy_t copy; + kern_return_t kr; + + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } lck_mtx_lock(&lck_grp_lock); lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info); lockgroup_info_vmsize = round_page(lockgroup_info_size); kr = kmem_alloc_pageable(ipc_kernel_map, - &lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC); + &lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) { lck_mtx_unlock(&lck_grp_lock); - return(kr); + return kr; } lockgroup_info_base = (lockgroup_info_t *) lockgroup_info_addr; @@ -1597,35 +1665,22 @@ host_lockgroup_info( lockgroup_info = lockgroup_info_base; for (i = 0; i < lck_grp_cnt; i++) { - lockgroup_info->lock_spin_cnt = lck_grp->lck_grp_spincnt; - lockgroup_info->lock_spin_util_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_util_cnt; - lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cnt; - lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_miss_cnt; - lockgroup_info->lock_spin_held_max = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_max; - lockgroup_info->lock_spin_held_cum = lck_grp->lck_grp_stat.lck_grp_spin_stat.lck_grp_spin_held_cum; - + lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt; lockgroup_info->lock_mtx_cnt = lck_grp->lck_grp_mtxcnt; - lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt; - lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt; - lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt; - lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt; - lockgroup_info->lock_mtx_held_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max; - lockgroup_info->lock_mtx_held_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cum; - lockgroup_info->lock_mtx_wait_max = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_max; - lockgroup_info->lock_mtx_wait_cum = lck_grp->lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cum; - lockgroup_info->lock_rw_cnt = lck_grp->lck_grp_rwcnt; - lockgroup_info->lock_rw_util_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_util_cnt; - lockgroup_info->lock_rw_held_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cnt; - lockgroup_info->lock_rw_miss_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_miss_cnt; - lockgroup_info->lock_rw_wait_cnt = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cnt; - lockgroup_info->lock_rw_held_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_max; - lockgroup_info->lock_rw_held_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_held_cum; - lockgroup_info->lock_rw_wait_max = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_max; - lockgroup_info->lock_rw_wait_cum = lck_grp->lck_grp_stat.lck_grp_rw_stat.lck_grp_rw_wait_cum; +#if LOCK_STATS + lockgroup_info->lock_spin_held_cnt = lck_grp->lck_grp_stats.lgss_spin_held.lgs_count; + lockgroup_info->lock_spin_miss_cnt = lck_grp->lck_grp_stats.lgss_spin_miss.lgs_count; +#endif /* LOCK_STATS */ - (void) strncpy(lockgroup_info->lockgroup_name,lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME); + // Historically on x86, held was used for "direct wait" and util for "held" + lockgroup_info->lock_mtx_util_cnt = lck_grp->lck_grp_stats.lgss_mtx_held.lgs_count; + lockgroup_info->lock_mtx_held_cnt = lck_grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count; + lockgroup_info->lock_mtx_miss_cnt = lck_grp->lck_grp_stats.lgss_mtx_miss.lgs_count; + lockgroup_info->lock_mtx_wait_cnt = lck_grp->lck_grp_stats.lgss_mtx_wait.lgs_count; + + (void) strncpy(lockgroup_info->lockgroup_name, lck_grp->lck_grp_name, LOCKGROUP_MAX_NAME); lck_grp = (lck_grp_t *)(queue_next((queue_entry_t)(lck_grp))); lockgroup_info++; @@ -1634,70 +1689,70 @@ host_lockgroup_info( *lockgroup_infoCntp = lck_grp_cnt; lck_mtx_unlock(&lck_grp_lock); - if (lockgroup_info_size != lockgroup_info_vmsize) + if (lockgroup_info_size != lockgroup_info_vmsize) { bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size); + } kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr, - (vm_map_size_t)lockgroup_info_size, TRUE, ©); + (vm_map_size_t)lockgroup_info_size, TRUE, ©); assert(kr == KERN_SUCCESS); *lockgroup_infop = (lockgroup_info_t *) copy; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* * Atomic primitives, prototyped in kern/simple_lock.h * Noret versions are more efficient on some architectures */ - + uint32_t hw_atomic_add(volatile uint32_t *dest, uint32_t delt) { - ALIGN_TEST(dest,uint32_t); - return __c11_atomic_fetch_add(ATOMIC_CAST(uint32_t,dest), delt, memory_order_relaxed) + delt; + ALIGN_TEST(dest, uint32_t); + return __c11_atomic_fetch_add(ATOMIC_CAST(uint32_t, dest), delt, memory_order_relaxed) + delt; } uint32_t hw_atomic_sub(volatile uint32_t *dest, uint32_t delt) { - ALIGN_TEST(dest,uint32_t); - return __c11_atomic_fetch_sub(ATOMIC_CAST(uint32_t,dest), delt, memory_order_relaxed) - delt; + ALIGN_TEST(dest, uint32_t); + return __c11_atomic_fetch_sub(ATOMIC_CAST(uint32_t, dest), delt, memory_order_relaxed) - delt; } uint32_t hw_atomic_or(volatile uint32_t *dest, uint32_t mask) { - ALIGN_TEST(dest,uint32_t); - return __c11_atomic_fetch_or(ATOMIC_CAST(uint32_t,dest), mask, memory_order_relaxed) | mask; + ALIGN_TEST(dest, uint32_t); + return __c11_atomic_fetch_or(ATOMIC_CAST(uint32_t, dest), mask, memory_order_relaxed) | mask; } void hw_atomic_or_noret(volatile uint32_t *dest, uint32_t mask) { - ALIGN_TEST(dest,uint32_t); - __c11_atomic_fetch_or(ATOMIC_CAST(uint32_t,dest), mask, memory_order_relaxed); + ALIGN_TEST(dest, uint32_t); + __c11_atomic_fetch_or(ATOMIC_CAST(uint32_t, dest), mask, memory_order_relaxed); } uint32_t hw_atomic_and(volatile uint32_t *dest, uint32_t mask) { - ALIGN_TEST(dest,uint32_t); - return __c11_atomic_fetch_and(ATOMIC_CAST(uint32_t,dest), mask, memory_order_relaxed) & mask; + ALIGN_TEST(dest, uint32_t); + return __c11_atomic_fetch_and(ATOMIC_CAST(uint32_t, dest), mask, memory_order_relaxed) & mask; } void hw_atomic_and_noret(volatile uint32_t *dest, uint32_t mask) { - ALIGN_TEST(dest,uint32_t); - __c11_atomic_fetch_and(ATOMIC_CAST(uint32_t,dest), mask, memory_order_relaxed); + ALIGN_TEST(dest, uint32_t); + __c11_atomic_fetch_and(ATOMIC_CAST(uint32_t, dest), mask, memory_order_relaxed); } uint32_t hw_compare_and_store(uint32_t oldval, uint32_t newval, volatile uint32_t *dest) { - ALIGN_TEST(dest,uint32_t); - return __c11_atomic_compare_exchange_strong(ATOMIC_CAST(uint32_t,dest), &oldval, newval, - memory_order_acq_rel_smp, memory_order_relaxed); + ALIGN_TEST(dest, uint32_t); + return __c11_atomic_compare_exchange_strong(ATOMIC_CAST(uint32_t, dest), &oldval, newval, + memory_order_acq_rel_smp, memory_order_relaxed); } - diff --git a/osfmk/kern/locks.h b/osfmk/kern/locks.h index 4db3c40f5..dd5f3a54a 100644 --- a/osfmk/kern/locks.h +++ b/osfmk/kern/locks.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,265 +22,167 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _KERN_LOCKS_H_ #define _KERN_LOCKS_H_ -#include -#include -#include -#include -#include -#include - -#ifdef MACH_KERNEL_PRIVATE -#include - -extern void lck_mod_init( - void); +#include +#include +#include +#include +#include +#include +#include -typedef unsigned int lck_type_t; +#ifdef MACH_KERNEL_PRIVATE +#include -#define LCK_TYPE_SPIN 1 -#define LCK_TYPE_MTX 2 -#define LCK_TYPE_RW 3 +extern void lck_mod_init( + void); #endif -typedef unsigned int lck_sleep_action_t; +typedef unsigned int lck_sleep_action_t; -#define LCK_SLEEP_DEFAULT 0x00 /* Release the lock while waiting for the event, then reclaim */ - /* RW locks are returned in the same mode */ -#define LCK_SLEEP_UNLOCK 0x01 /* Release the lock and return unheld */ -#define LCK_SLEEP_SHARED 0x02 /* Reclaim the lock in shared mode (RW only) */ -#define LCK_SLEEP_EXCLUSIVE 0x04 /* Reclaim the lock in exclusive mode (RW only) */ -#define LCK_SLEEP_SPIN 0x08 /* Reclaim the lock in spin mode (mutex only) */ -#define LCK_SLEEP_PROMOTED_PRI 0x10 /* Sleep at a promoted priority */ -#define LCK_SLEEP_SPIN_ALWAYS 0x20 /* Reclaim the lock in spin-always mode (mutex only) */ +#define LCK_SLEEP_DEFAULT 0x00 /* Release the lock while waiting for the event, then reclaim */ +/* RW locks are returned in the same mode */ +#define LCK_SLEEP_UNLOCK 0x01 /* Release the lock and return unheld */ +#define LCK_SLEEP_SHARED 0x02 /* Reclaim the lock in shared mode (RW only) */ +#define LCK_SLEEP_EXCLUSIVE 0x04 /* Reclaim the lock in exclusive mode (RW only) */ +#define LCK_SLEEP_SPIN 0x08 /* Reclaim the lock in spin mode (mutex only) */ +#define LCK_SLEEP_PROMOTED_PRI 0x10 /* Sleep at a promoted priority */ +#define LCK_SLEEP_SPIN_ALWAYS 0x20 /* Reclaim the lock in spin-always mode (mutex only) */ -#define LCK_SLEEP_MASK 0x3f /* Valid actions */ +#define LCK_SLEEP_MASK 0x3f /* Valid actions */ -#ifdef MACH_KERNEL_PRIVATE - -typedef struct { - uint64_t lck_grp_spin_util_cnt; - uint64_t lck_grp_spin_held_cnt; - uint64_t lck_grp_spin_miss_cnt; - uint64_t lck_grp_spin_held_max; - uint64_t lck_grp_spin_held_cum; -} lck_grp_spin_stat_t; - -typedef struct { - uint64_t lck_grp_mtx_util_cnt; - /* On x86, this is used as the "direct wait" count */ - uint64_t lck_grp_mtx_held_cnt; - uint64_t lck_grp_mtx_miss_cnt; - uint64_t lck_grp_mtx_wait_cnt; - /* Rest currently unused */ - uint64_t lck_grp_mtx_held_max; - uint64_t lck_grp_mtx_held_cum; - uint64_t lck_grp_mtx_wait_max; - uint64_t lck_grp_mtx_wait_cum; -} lck_grp_mtx_stat_t; - -typedef struct { - uint64_t lck_grp_rw_util_cnt; - uint64_t lck_grp_rw_held_cnt; - uint64_t lck_grp_rw_miss_cnt; - uint64_t lck_grp_rw_wait_cnt; - uint64_t lck_grp_rw_held_max; - uint64_t lck_grp_rw_held_cum; - uint64_t lck_grp_rw_wait_max; - uint64_t lck_grp_rw_wait_cum; -} lck_grp_rw_stat_t; - -typedef struct _lck_grp_stat_ { - lck_grp_spin_stat_t lck_grp_spin_stat; - lck_grp_mtx_stat_t lck_grp_mtx_stat; - lck_grp_rw_stat_t lck_grp_rw_stat; -} lck_grp_stat_t; - -#define LCK_GRP_MAX_NAME 64 - -typedef struct _lck_grp_ { - queue_chain_t lck_grp_link; - uint32_t lck_grp_refcnt; - uint32_t lck_grp_spincnt; - uint32_t lck_grp_mtxcnt; - uint32_t lck_grp_rwcnt; - uint32_t lck_grp_attr; - char lck_grp_name[LCK_GRP_MAX_NAME]; - lck_grp_stat_t lck_grp_stat; -} lck_grp_t; - -#define lck_grp_miss lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt -#define lck_grp_held lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt -#define lck_grp_util lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt -#define lck_grp_wait lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt -#define lck_grp_direct_wait lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt - -#define LCK_GRP_NULL (lck_grp_t *)0 -#else -typedef struct __lck_grp__ lck_grp_t; -#endif +#ifdef MACH_KERNEL_PRIVATE +typedef struct _lck_attr_ { + unsigned int lck_attr_val; +} lck_attr_t; -#ifdef MACH_KERNEL_PRIVATE -typedef struct _lck_grp_attr_ { - uint32_t grp_attr_val; -} lck_grp_attr_t; +extern lck_attr_t LockDefaultLckAttr; -extern lck_grp_attr_t LockDefaultGroupAttr; +#define LCK_ATTR_NONE 0 -#define LCK_GRP_ATTR_STAT 0x1 +#define LCK_ATTR_DEBUG 0x00000001 +#define LCK_ATTR_RW_SHARED_PRIORITY 0x00010000 #else -typedef struct __lck_grp_attr__ lck_grp_attr_t; +typedef struct __lck_attr__ lck_attr_t; #endif -#define LCK_GRP_ATTR_NULL (lck_grp_attr_t *)0 +#define LCK_ATTR_NULL (lck_attr_t *)0 __BEGIN_DECLS -extern lck_grp_attr_t *lck_grp_attr_alloc_init( - void); - -extern void lck_grp_attr_setdefault( - lck_grp_attr_t *attr); - -extern void lck_grp_attr_setstat( - lck_grp_attr_t *attr); - -extern void lck_grp_attr_free( - lck_grp_attr_t *attr); - -extern lck_grp_t *lck_grp_alloc_init( - const char* grp_name, - lck_grp_attr_t *attr); - -__END_DECLS - -#ifdef MACH_KERNEL_PRIVATE -extern void lck_grp_init( - lck_grp_t *grp, - const char* grp_name, - lck_grp_attr_t *attr); - -extern void lck_grp_reference( - lck_grp_t *grp); - -extern void lck_grp_deallocate( - lck_grp_t *grp); - -extern void lck_grp_lckcnt_incr( - lck_grp_t *grp, - lck_type_t lck_type); - -extern void lck_grp_lckcnt_decr( - lck_grp_t *grp, - lck_type_t lck_type); -#endif +extern lck_attr_t *lck_attr_alloc_init( + void); -__BEGIN_DECLS +extern void lck_attr_setdefault( + lck_attr_t *attr); -extern void lck_grp_free( - lck_grp_t *grp); +extern void lck_attr_setdebug( + lck_attr_t *attr); -__END_DECLS +extern void lck_attr_cleardebug( + lck_attr_t *attr); -#ifdef MACH_KERNEL_PRIVATE -typedef struct _lck_attr_ { - unsigned int lck_attr_val; -} lck_attr_t; +#ifdef XNU_KERNEL_PRIVATE -extern lck_attr_t LockDefaultLckAttr; +typedef union { + uint16_t tcurnext; + struct { + uint8_t cticket; + uint8_t nticket; + }; +} lck_ticket_internal; -#define LCK_ATTR_NONE 0 +typedef struct { + lck_ticket_internal tu; + uintptr_t lck_owner; +} lck_ticket_t; -#define LCK_ATTR_DEBUG 0x00000001 -#define LCK_ATTR_RW_SHARED_PRIORITY 0x00010000 +void lck_ticket_init(lck_ticket_t *tlock); +void lck_ticket_lock(lck_ticket_t *tlock); +void lck_ticket_unlock(lck_ticket_t *tlock); +void lck_ticket_assert_owned(lck_ticket_t *tlock); -#else -typedef struct __lck_attr__ lck_attr_t; +extern void lck_attr_rw_shared_priority( + lck_attr_t *attr); #endif -#define LCK_ATTR_NULL (lck_attr_t *)0 - -__BEGIN_DECLS +extern void lck_attr_free( + lck_attr_t *attr); -extern lck_attr_t *lck_attr_alloc_init( - void); +#define decl_lck_spin_data(class, name) class lck_spin_t name; -extern void lck_attr_setdefault( - lck_attr_t *attr); +extern lck_spin_t *lck_spin_alloc_init( + lck_grp_t *grp, + lck_attr_t *attr); -extern void lck_attr_setdebug( - lck_attr_t *attr); +extern void lck_spin_init( + lck_spin_t *lck, + lck_grp_t *grp, + lck_attr_t *attr); -extern void lck_attr_cleardebug( - lck_attr_t *attr); +extern void lck_spin_lock( + lck_spin_t *lck); -#ifdef XNU_KERNEL_PRIVATE -extern void lck_attr_rw_shared_priority( - lck_attr_t *attr); -#endif +extern void lck_spin_lock_grp( + lck_spin_t *lck, + lck_grp_t *grp); -extern void lck_attr_free( - lck_attr_t *attr); +extern void lck_spin_unlock( + lck_spin_t *lck); -#define decl_lck_spin_data(class,name) class lck_spin_t name; +extern void lck_spin_destroy( + lck_spin_t *lck, + lck_grp_t *grp); -extern lck_spin_t *lck_spin_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr); +extern void lck_spin_free( + lck_spin_t *lck, + lck_grp_t *grp); -extern void lck_spin_init( - lck_spin_t *lck, - lck_grp_t *grp, - lck_attr_t *attr); +extern wait_result_t lck_spin_sleep( + lck_spin_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible); -extern void lck_spin_lock( - lck_spin_t *lck); +extern wait_result_t lck_spin_sleep_grp( + lck_spin_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + lck_grp_t *grp); -extern void lck_spin_unlock( - lck_spin_t *lck); +extern wait_result_t lck_spin_sleep_deadline( + lck_spin_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline); -extern void lck_spin_destroy( - lck_spin_t *lck, - lck_grp_t *grp); +#ifdef KERNEL_PRIVATE -extern void lck_spin_free( - lck_spin_t *lck, - lck_grp_t *grp); +extern void lck_spin_lock_nopreempt( lck_spin_t *lck); +extern void lck_spin_lock_nopreempt_grp( lck_spin_t *lck, lck_grp_t *grp); -extern wait_result_t lck_spin_sleep( - lck_spin_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible); +extern void lck_spin_unlock_nopreempt( lck_spin_t *lck); -extern wait_result_t lck_spin_sleep_deadline( - lck_spin_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline); +extern boolean_t lck_spin_try_lock_grp( lck_spin_t *lck, lck_grp_t *grp); -#ifdef KERNEL_PRIVATE +extern boolean_t lck_spin_try_lock( lck_spin_t *lck); -extern void lck_spin_lock_nopreempt( lck_spin_t *lck); - -extern void lck_spin_unlock_nopreempt( lck_spin_t *lck); - -extern boolean_t lck_spin_try_lock( lck_spin_t *lck); - -extern boolean_t lck_spin_try_lock_nopreempt( lck_spin_t *lck); +extern boolean_t lck_spin_try_lock_nopreempt( lck_spin_t *lck); +extern boolean_t lck_spin_try_lock_nopreempt_grp( lck_spin_t *lck, lck_grp_t *grp); /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */ -extern boolean_t kdp_lck_spin_is_acquired( lck_spin_t *lck); +extern boolean_t kdp_lck_spin_is_acquired( lck_spin_t *lck); struct _lck_mtx_ext_; extern void lck_mtx_init_ext(lck_mtx_t *lck, struct _lck_mtx_ext_ *lck_ext, @@ -289,256 +191,255 @@ extern void lck_mtx_init_ext(lck_mtx_t *lck, struct _lck_mtx_ext_ *lck_ext, #endif -#define decl_lck_mtx_data(class,name) class lck_mtx_t name; +#define decl_lck_mtx_data(class, name) class lck_mtx_t name; -extern lck_mtx_t *lck_mtx_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr); +extern lck_mtx_t *lck_mtx_alloc_init( + lck_grp_t *grp, + lck_attr_t *attr); -extern void lck_mtx_init( - lck_mtx_t *lck, - lck_grp_t *grp, - lck_attr_t *attr); -extern void lck_mtx_lock( - lck_mtx_t *lck); +extern void lck_mtx_init( + lck_mtx_t *lck, + lck_grp_t *grp, + lck_attr_t *attr); +extern void lck_mtx_lock( + lck_mtx_t *lck); -extern void lck_mtx_unlock( - lck_mtx_t *lck); +extern void lck_mtx_unlock( + lck_mtx_t *lck); -extern void lck_mtx_destroy( - lck_mtx_t *lck, - lck_grp_t *grp); +extern void lck_mtx_destroy( + lck_mtx_t *lck, + lck_grp_t *grp); -extern void lck_mtx_free( - lck_mtx_t *lck, - lck_grp_t *grp); +extern void lck_mtx_free( + lck_mtx_t *lck, + lck_grp_t *grp); -extern wait_result_t lck_mtx_sleep( - lck_mtx_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible); +extern wait_result_t lck_mtx_sleep( + lck_mtx_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible); -extern wait_result_t lck_mtx_sleep_deadline( - lck_mtx_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline); +extern wait_result_t lck_mtx_sleep_deadline( + lck_mtx_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline); #if DEVELOPMENT || DEBUG -extern void erase_all_test_mtx_stats(void); -extern int get_test_mtx_stats_string(char* buffer, int buffer_size); -extern void lck_mtx_test_init(void); -extern void lck_mtx_test_lock(void); -extern void lck_mtx_test_unlock(void); -extern int lck_mtx_test_mtx_uncontended(int iter, char* buffer, int buffer_size); -extern int lck_mtx_test_mtx_contended(int iter, char* buffer, int buffer_size); -extern int lck_mtx_test_mtx_uncontended_loop_time(int iter, char* buffer, int buffer_size); -extern int lck_mtx_test_mtx_contended_loop_time(int iter, char* buffer, int buffer_size); +extern void erase_all_test_mtx_stats(void); +extern int get_test_mtx_stats_string(char* buffer, int buffer_size); +extern void lck_mtx_test_init(void); +extern void lck_mtx_test_lock(void); +extern void lck_mtx_test_unlock(void); +extern int lck_mtx_test_mtx_uncontended(int iter, char* buffer, int buffer_size); +extern int lck_mtx_test_mtx_contended(int iter, char* buffer, int buffer_size); +extern int lck_mtx_test_mtx_uncontended_loop_time(int iter, char* buffer, int buffer_size); +extern int lck_mtx_test_mtx_contended_loop_time(int iter, char* buffer, int buffer_size); #endif +#ifdef KERNEL_PRIVATE -#ifdef KERNEL_PRIVATE +extern boolean_t lck_mtx_try_lock( + lck_mtx_t *lck); -extern boolean_t lck_mtx_try_lock( - lck_mtx_t *lck); +extern void mutex_pause(uint32_t); -extern void mutex_pause(uint32_t); +extern void lck_mtx_yield( + lck_mtx_t *lck); -extern void lck_mtx_yield ( - lck_mtx_t *lck); +extern boolean_t lck_mtx_try_lock_spin( + lck_mtx_t *lck); -extern boolean_t lck_mtx_try_lock_spin( - lck_mtx_t *lck); +extern void lck_mtx_lock_spin( + lck_mtx_t *lck); -extern void lck_mtx_lock_spin( - lck_mtx_t *lck); +extern boolean_t kdp_lck_mtx_lock_spin_is_acquired( + lck_mtx_t *lck); -extern boolean_t kdp_lck_mtx_lock_spin_is_acquired( - lck_mtx_t *lck); +extern void lck_mtx_convert_spin( + lck_mtx_t *lck); -extern void lck_mtx_convert_spin( - lck_mtx_t *lck); +extern void lck_mtx_lock_spin_always( + lck_mtx_t *lck); -extern void lck_mtx_lock_spin_always( - lck_mtx_t *lck); +extern boolean_t lck_mtx_try_lock_spin_always( + lck_mtx_t *lck); -extern boolean_t lck_mtx_try_lock_spin_always( - lck_mtx_t *lck); +#define lck_mtx_unlock_always(l) lck_mtx_unlock(l) -#define lck_mtx_unlock_always(l) lck_mtx_unlock(l) +extern void lck_spin_assert( + lck_spin_t *lck, + unsigned int type); -extern void lck_spin_assert( - lck_spin_t *lck, - unsigned int type); +extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( + lck_rw_t *lck); -extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( - lck_rw_t *lck); +#endif /* KERNEL_PRIVATE */ -#endif /* KERNEL_PRIVATE */ - -extern void lck_mtx_assert( - lck_mtx_t *lck, - unsigned int type); +extern void lck_mtx_assert( + lck_mtx_t *lck, + unsigned int type); #if MACH_ASSERT -#define LCK_MTX_ASSERT(lck,type) lck_mtx_assert((lck),(type)) -#define LCK_SPIN_ASSERT(lck,type) lck_spin_assert((lck),(type)) -#define LCK_RW_ASSERT(lck,type) lck_rw_assert((lck),(type)) +#define LCK_MTX_ASSERT(lck, type) lck_mtx_assert((lck),(type)) +#define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type)) +#define LCK_RW_ASSERT(lck, type) lck_rw_assert((lck),(type)) #else /* MACH_ASSERT */ -#define LCK_MTX_ASSERT(lck,type) -#define LCK_SPIN_ASSERT(lck,type) -#define LCK_RW_ASSERT(lck,type) +#define LCK_MTX_ASSERT(lck, type) +#define LCK_SPIN_ASSERT(lck, type) +#define LCK_RW_ASSERT(lck, type) #endif /* MACH_ASSERT */ #if DEBUG -#define LCK_MTX_ASSERT_DEBUG(lck,type) lck_mtx_assert((lck),(type)) -#define LCK_SPIN_ASSERT_DEBUG(lck,type) lck_spin_assert((lck),(type)) -#define LCK_RW_ASSERT_DEBUG(lck,type) lck_rw_assert((lck),(type)) +#define LCK_MTX_ASSERT_DEBUG(lck, type) lck_mtx_assert((lck),(type)) +#define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type)) +#define LCK_RW_ASSERT_DEBUG(lck, type) lck_rw_assert((lck),(type)) #else /* DEBUG */ -#define LCK_MTX_ASSERT_DEBUG(lck,type) -#define LCK_SPIN_ASSERT_DEBUG(lck,type) -#define LCK_RW_ASSERT_DEBUG(lck,type) +#define LCK_MTX_ASSERT_DEBUG(lck, type) +#define LCK_SPIN_ASSERT_DEBUG(lck, type) +#define LCK_RW_ASSERT_DEBUG(lck, type) #endif /* DEBUG */ __END_DECLS -#define LCK_ASSERT_OWNED 1 -#define LCK_ASSERT_NOTOWNED 2 +#define LCK_ASSERT_OWNED 1 +#define LCK_ASSERT_NOTOWNED 2 -#define LCK_MTX_ASSERT_OWNED LCK_ASSERT_OWNED -#define LCK_MTX_ASSERT_NOTOWNED LCK_ASSERT_NOTOWNED +#define LCK_MTX_ASSERT_OWNED LCK_ASSERT_OWNED +#define LCK_MTX_ASSERT_NOTOWNED LCK_ASSERT_NOTOWNED -#ifdef MACH_KERNEL_PRIVATE -extern void lck_mtx_lock_wait( - lck_mtx_t *lck, - thread_t holder); +#ifdef MACH_KERNEL_PRIVATE +extern void lck_mtx_lock_wait( + lck_mtx_t *lck, + thread_t holder); -extern int lck_mtx_lock_acquire( - lck_mtx_t *lck); +extern int lck_mtx_lock_acquire( + lck_mtx_t *lck); -extern void lck_mtx_unlock_wakeup( - lck_mtx_t *lck, - thread_t holder); +extern void lck_mtx_unlock_wakeup( + lck_mtx_t *lck, + thread_t holder); -extern boolean_t lck_mtx_ilk_unlock( - lck_mtx_t *lck); +extern boolean_t lck_mtx_ilk_unlock( + lck_mtx_t *lck); -extern boolean_t lck_mtx_ilk_try_lock( - lck_mtx_t *lck); +extern boolean_t lck_mtx_ilk_try_lock( + lck_mtx_t *lck); extern void lck_mtx_wakeup_adjust_pri(thread_t thread, integer_t priority); #endif -#define decl_lck_rw_data(class,name) class lck_rw_t name; +#define decl_lck_rw_data(class, name) class lck_rw_t name; -typedef unsigned int lck_rw_type_t; +typedef unsigned int lck_rw_type_t; -#define LCK_RW_TYPE_SHARED 0x01 -#define LCK_RW_TYPE_EXCLUSIVE 0x02 +#define LCK_RW_TYPE_SHARED 0x01 +#define LCK_RW_TYPE_EXCLUSIVE 0x02 #ifdef XNU_KERNEL_PRIVATE -#define LCK_RW_ASSERT_SHARED 0x01 -#define LCK_RW_ASSERT_EXCLUSIVE 0x02 -#define LCK_RW_ASSERT_HELD 0x03 -#define LCK_RW_ASSERT_NOTHELD 0x04 +#define LCK_RW_ASSERT_SHARED 0x01 +#define LCK_RW_ASSERT_EXCLUSIVE 0x02 +#define LCK_RW_ASSERT_HELD 0x03 +#define LCK_RW_ASSERT_NOTHELD 0x04 #endif __BEGIN_DECLS -extern lck_rw_t *lck_rw_alloc_init( - lck_grp_t *grp, - lck_attr_t *attr); +extern lck_rw_t *lck_rw_alloc_init( + lck_grp_t *grp, + lck_attr_t *attr); -extern void lck_rw_init( - lck_rw_t *lck, - lck_grp_t *grp, - lck_attr_t *attr); +extern void lck_rw_init( + lck_rw_t *lck, + lck_grp_t *grp, + lck_attr_t *attr); -extern void lck_rw_lock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type); +extern void lck_rw_lock( + lck_rw_t *lck, + lck_rw_type_t lck_rw_type); -extern void lck_rw_unlock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type); +extern void lck_rw_unlock( + lck_rw_t *lck, + lck_rw_type_t lck_rw_type); -extern void lck_rw_lock_shared( - lck_rw_t *lck); +extern void lck_rw_lock_shared( + lck_rw_t *lck); -extern void lck_rw_unlock_shared( - lck_rw_t *lck); +extern void lck_rw_unlock_shared( + lck_rw_t *lck); -extern boolean_t lck_rw_lock_yield_shared( - lck_rw_t *lck, - boolean_t force_yield); +extern boolean_t lck_rw_lock_yield_shared( + lck_rw_t *lck, + boolean_t force_yield); -extern void lck_rw_lock_exclusive( - lck_rw_t *lck); +extern void lck_rw_lock_exclusive( + lck_rw_t *lck); -extern void lck_rw_unlock_exclusive( - lck_rw_t *lck); +extern void lck_rw_unlock_exclusive( + lck_rw_t *lck); -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* * CAUTION * read-write locks do not have a concept of ownership, so lck_rw_assert() * merely asserts that someone is holding the lock, not necessarily the caller. */ -extern void lck_rw_assert( - lck_rw_t *lck, - unsigned int type); +extern void lck_rw_assert( + lck_rw_t *lck, + unsigned int type); extern void lck_rw_clear_promotion(thread_t thread, uintptr_t trace_obj); extern void lck_rw_set_promotion_locked(thread_t thread); uintptr_t unslide_for_kdebug(void* object); -#endif +#endif /* XNU_KERNEL_PRIVATE */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -extern lck_rw_type_t lck_rw_done( - lck_rw_t *lck); +extern lck_rw_type_t lck_rw_done( + lck_rw_t *lck); #endif -extern void lck_rw_destroy( - lck_rw_t *lck, - lck_grp_t *grp); +extern void lck_rw_destroy( + lck_rw_t *lck, + lck_grp_t *grp); -extern void lck_rw_free( - lck_rw_t *lck, - lck_grp_t *grp); +extern void lck_rw_free( + lck_rw_t *lck, + lck_grp_t *grp); -extern wait_result_t lck_rw_sleep( - lck_rw_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible); +extern wait_result_t lck_rw_sleep( + lck_rw_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible); -extern wait_result_t lck_rw_sleep_deadline( - lck_rw_t *lck, - lck_sleep_action_t lck_sleep_action, - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline); +extern wait_result_t lck_rw_sleep_deadline( + lck_rw_t *lck, + lck_sleep_action_t lck_sleep_action, + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline); -extern boolean_t lck_rw_lock_shared_to_exclusive( - lck_rw_t *lck); +extern boolean_t lck_rw_lock_shared_to_exclusive( + lck_rw_t *lck); -extern void lck_rw_lock_exclusive_to_shared( - lck_rw_t *lck); +extern void lck_rw_lock_exclusive_to_shared( + lck_rw_t *lck); -extern boolean_t lck_rw_try_lock( - lck_rw_t *lck, - lck_rw_type_t lck_rw_type); +extern boolean_t lck_rw_try_lock( + lck_rw_t *lck, + lck_rw_type_t lck_rw_type); -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -extern boolean_t lck_rw_try_lock_shared( - lck_rw_t *lck); +extern boolean_t lck_rw_try_lock_shared( + lck_rw_t *lck); -extern boolean_t lck_rw_try_lock_exclusive( - lck_rw_t *lck); +extern boolean_t lck_rw_try_lock_exclusive( + lck_rw_t *lck); #endif __END_DECLS diff --git a/osfmk/kern/ltable.c b/osfmk/kern/ltable.c index 43eb19510..7c5b79a24 100644 --- a/osfmk/kern/ltable.c +++ b/osfmk/kern/ltable.c @@ -35,8 +35,8 @@ #include -#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align))) -#define ROUNDDOWN(x,y) (((x)/(y))*(y)) +#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align))) +#define ROUNDDOWN(x, y) (((x)/(y))*(y)) /* ---------------------------------------------------------------------- * @@ -68,9 +68,10 @@ lt_elem_idx(struct link_table *table, uint32_t idx) { int slab_idx = idx / table->slab_elem; struct lt_elem *slab = table->table[slab_idx]; - if (!slab) + if (!slab) { panic("Invalid index:%d slab:%d (NULL) for table:%p\n", - idx, slab_idx, table); + idx, slab_idx, table); + } assert(slab->lt_id.idx <= idx && (slab->lt_id.idx + table->slab_elem) > idx); return lt_elem_ofst_slab(slab, table->slab_msk, (idx - slab->lt_id.idx) * table->elem_sz); } @@ -81,9 +82,10 @@ lt_elem_idx(struct link_table *table, uint32_t idx) { uint32_t ofst = idx * table->elem_sz; struct lt_elem *slab = table->table[ofst >> table->slab_shift]; - if (!slab) + if (!slab) { panic("Invalid index:%d slab:%d (NULL) for table:%p\n", - idx, (ofst >> table->slab_shift), table); + idx, (ofst >> table->slab_shift), table); + } assert(slab->lt_id.idx <= idx && (slab->lt_id.idx + table->slab_elem) > idx); return lt_elem_ofst_slab(slab, table->slab_msk, ofst); } @@ -97,11 +99,13 @@ lt_elem_in_range(struct lt_elem *elem, struct link_table *table) assert(base != NULL); while (*base != NULL) { uintptr_t b = (uintptr_t)(*base); - if (e >= b && e < b + table->slab_sz) + if (e >= b && e < b + table->slab_sz) { return 1; + } base++; - if ((uintptr_t)base >= (uintptr_t)table->table + PAGE_SIZE) + if ((uintptr_t)base >= (uintptr_t)table->table + PAGE_SIZE) { return 0; + } } return 0; } @@ -112,12 +116,13 @@ lt_elem_in_range(struct lt_elem *elem, struct link_table *table) * * NOTE: this does _not_ get or put a reference on 'elem' */ -void lt_elem_invalidate(struct lt_elem *elem) +void +lt_elem_invalidate(struct lt_elem *elem) { uint32_t __assert_only old = OSBitAndAtomic(~LT_BITS_VALID, &elem->lt_bits); OSMemoryBarrier(); assert(((lt_bits_type(old) != LT_RESERVED) && (old & LT_BITS_VALID)) || - ((lt_bits_type(old) == LT_RESERVED) && !(old & LT_BITS_VALID))); + ((lt_bits_type(old) == LT_RESERVED) && !(old & LT_BITS_VALID))); } /** @@ -125,20 +130,22 @@ void lt_elem_invalidate(struct lt_elem *elem) * * NOTE: this does _not_ get or put a reference on 'elem' */ -void lt_elem_mkvalid(struct lt_elem *elem) +void +lt_elem_mkvalid(struct lt_elem *elem) { uint32_t __assert_only old = OSBitOrAtomic(LT_BITS_VALID, &elem->lt_bits); OSMemoryBarrier(); assert(!(old & LT_BITS_VALID)); } -static void lt_elem_set_type(struct lt_elem *elem, int type) +static void +lt_elem_set_type(struct lt_elem *elem, int type) { uint32_t old_bits, new_bits; do { old_bits = elem->lt_bits; new_bits = (old_bits & ~LT_BITS_TYPE) | - ((type & LT_BITS_TYPE_MASK) << LT_BITS_TYPE_SHIFT); + ((type & LT_BITS_TYPE_MASK) << LT_BITS_TYPE_SHIFT); } while (OSCompareAndSwap(old_bits, new_bits, &elem->lt_bits) == FALSE); OSMemoryBarrier(); } @@ -149,19 +156,22 @@ static void lt_elem_set_type(struct lt_elem *elem, int type) * * Called once at system boot */ -void ltable_bootstrap(void) +void +ltable_bootstrap(void) { static int s_is_bootstrapped = 0; uint32_t tmp32 = 0; - if (s_is_bootstrapped) + if (s_is_bootstrapped) { return; + } s_is_bootstrapped = 1; g_lt_max_tbl_size = DEFAULT_MAX_TABLE_SIZE; - if (PE_parse_boot_argn("lt_tbl_size", &tmp32, sizeof(tmp32)) == TRUE) + if (PE_parse_boot_argn("lt_tbl_size", &tmp32, sizeof(tmp32)) == TRUE) { g_lt_max_tbl_size = (vm_size_t)P2ROUNDUP(tmp32, PAGE_SIZE); + } lck_grp_init(&g_lt_lck_grp, "link_table_locks", LCK_GRP_ATTR_NULL); } @@ -170,9 +180,10 @@ void ltable_bootstrap(void) * ltable_init: initialize a link table with given parameters * */ -void ltable_init(struct link_table *table, const char *name, - uint32_t max_tbl_elem, uint32_t elem_sz, - ltable_poison_func poison) +void +ltable_init(struct link_table *table, const char *name, + uint32_t max_tbl_elem, uint32_t elem_sz, + ltable_poison_func poison) { kern_return_t kr; uint32_t slab_sz, slab_shift, slab_msk, slab_elem; @@ -182,9 +193,10 @@ void ltable_init(struct link_table *table, const char *name, #ifndef CONFIG_LTABLE_STATS /* the element size _must_ be a power of two! */ - if ((elem_sz & (elem_sz - 1)) != 0) + if ((elem_sz & (elem_sz - 1)) != 0) { panic("elem_sz:%d for table:'%s' must be a power of two!", - elem_sz, name); + elem_sz, name); + } #endif /* @@ -192,10 +204,11 @@ void ltable_init(struct link_table *table, const char *name, * for the table's element slabs */ kr = kernel_memory_allocate(kernel_map, (vm_offset_t *)&base, - PAGE_SIZE, 0, KMA_NOPAGEWAIT, VM_KERN_MEMORY_LTABLE); - if (kr != KERN_SUCCESS) + PAGE_SIZE, 0, KMA_NOPAGEWAIT, VM_KERN_MEMORY_LTABLE); + if (kr != KERN_SUCCESS) { panic("Cannot initialize %s table: " - "kernel_memory_allocate failed:%d\n", name, kr); + "kernel_memory_allocate failed:%d\n", name, kr); + } memset(base, 0, PAGE_SIZE); /* @@ -209,8 +222,9 @@ void ltable_init(struct link_table *table, const char *name, /* system maximum table size divided by number of slots in a page */ slab_sz = (uint32_t)(max_tbl_sz / (PAGE_SIZE / (sizeof(void *)))); - if (slab_sz < PAGE_SIZE) + if (slab_sz < PAGE_SIZE) { slab_sz = PAGE_SIZE; + } /* make sure the slab size is a power of two */ slab_shift = 0; @@ -220,8 +234,9 @@ void ltable_init(struct link_table *table, const char *name, if ((slab_sz & bit) == slab_sz) { slab_shift = i; slab_msk = 0; - for (uint32_t j = 0; j < i; j++) + for (uint32_t j = 0; j < i; j++) { slab_msk |= (1 << j); + } break; } slab_sz &= ~bit; @@ -230,15 +245,16 @@ void ltable_init(struct link_table *table, const char *name, /* initialize the table's slab zone (for table growth) */ ltdbg("Initializing %s zone: slab:%d (%d,0x%x) max:%ld", - name, slab_sz, slab_shift, slab_msk, max_tbl_sz); + name, slab_sz, slab_shift, slab_msk, max_tbl_sz); slab_zone = zinit(slab_sz, max_tbl_sz, slab_sz, name); assert(slab_zone != ZONE_NULL); /* allocate the first slab and populate it */ base[0] = (struct lt_elem *)zalloc(slab_zone); - if (base[0] == NULL) + if (base[0] == NULL) { panic("Can't allocate a %s table slab from zone:%p", - name, slab_zone); + name, slab_zone); + } memset(base[0], 0, slab_sz); @@ -301,7 +317,8 @@ void ltable_init(struct link_table *table, const char *name, * table mutex is unlocked * calling thread can block */ -void ltable_grow(struct link_table *table, uint32_t min_free) +void +ltable_grow(struct link_table *table, uint32_t min_free) { struct lt_elem *slab, **slot; struct lt_elem *e = NULL, *first_new_elem, *last_new_elem; @@ -341,20 +358,22 @@ void ltable_grow(struct link_table *table, uint32_t min_free) return; } panic("No more room to grow table: %p (nelem: %d, used: %d)", - table, table->nelem, table->used_elem); + table, table->nelem, table->used_elem); } slot = table->next_free_slab; table->next_free_slab++; - if ((uintptr_t)table->next_free_slab >= (uintptr_t)table->table + PAGE_SIZE) + if ((uintptr_t)table->next_free_slab >= (uintptr_t)table->table + PAGE_SIZE) { table->next_free_slab = NULL; + } assert(*slot == NULL); /* allocate another slab */ slab = (struct lt_elem *)zalloc(table->slab_zone); - if (slab == NULL) + if (slab == NULL) { panic("Can't allocate a %s table (%p) slab from zone:%p", - table->slab_zone->zone_name, table, table->slab_zone); + table->slab_zone->zone_name, table, table->slab_zone); + } memset(slab, 0, table->slab_sz); @@ -362,8 +381,9 @@ void ltable_grow(struct link_table *table, uint32_t min_free) ltdbg_v(" init %d new links...", table->slab_elem); for (unsigned l = 0; l < table->slab_elem; l++) { uint32_t idx = l + table->nelem; - if (idx >= (LT_IDX_MAX - 1)) + if (idx >= (LT_IDX_MAX - 1)) { break; /* the last element of the last slab */ + } e = lt_elem_ofst_slab(slab, table->slab_msk, l * table->elem_sz); e->lt_id.idx = idx; e->lt_next_idx = idx + 1; @@ -375,10 +395,11 @@ void ltable_grow(struct link_table *table, uint32_t min_free) /* update table book keeping, and atomically swap the freelist head */ *slot = slab; - if (table->nelem + table->slab_elem >= LT_IDX_MAX) + if (table->nelem + table->slab_elem >= LT_IDX_MAX) { table->nelem = LT_IDX_MAX - 1; - else + } else { table->nelem += table->slab_elem; + } #if CONFIG_LTABLE_STATS table->nslabs += 1; @@ -393,7 +414,7 @@ void ltable_grow(struct link_table *table, uint32_t min_free) /* connect the existing free list to the end of the new free list */ last_new_elem->lt_next_idx = free_id.idx; while (OSCompareAndSwap64(free_id.id, first_new_elem->lt_id.id, - &table->free_list.id) == FALSE) { + &table->free_list.id) == FALSE) { OSMemoryBarrier(); free_id = table->free_list; last_new_elem->lt_next_idx = free_id.idx; @@ -435,8 +456,9 @@ ltable_nelem(struct link_table *table) * the table grows to meet the demand for 'nelem' element(s). */ __attribute__((noinline)) -struct lt_elem *ltable_alloc_elem(struct link_table *table, int type, - int nelem, int nattempts) +struct lt_elem * +ltable_alloc_elem(struct link_table *table, int type, + int nelem, int nattempts) { int nspins = 0, ntries = 0, nalloc = 0; uint32_t table_size; @@ -445,9 +467,10 @@ struct lt_elem *ltable_alloc_elem(struct link_table *table, int type, static const int max_retries = 500; - if (type != LT_ELEM && type != LT_LINK && type != LT_RESERVED) + if (type != LT_ELEM && type != LT_LINK && type != LT_RESERVED) { panic("link_table_aloc of invalid elem type:%d from table @%p", - type, table); + type, table); + } assert(nelem > 0); @@ -472,17 +495,20 @@ try_again: return NULL; } - if (table->used_elem + nelem >= table_size) + if (table->used_elem + nelem >= table_size) { panic("No more room to grow table: 0x%p size:%d, used:%d, requested elem:%d", - table, table_size, table->used_elem, nelem); - if (nelem == 1) + table, table_size, table->used_elem, nelem); + } + if (nelem == 1) { panic("Too many alloc retries: %d, table:%p, type:%d, nelem:%d", - ntries, table, type, nelem); + ntries, table, type, nelem); + } /* don't panic: try allocating one-at-a-time */ while (nelem > 0) { tmp = ltable_alloc_elem(table, type, 1, nattempts); - if (elem) + if (elem) { lt_elem_list_link(table, tmp, elem); + } elem = tmp; --nelem; } @@ -502,9 +528,10 @@ try_again: * We may have just raced with table growth: check * again to make sure there really isn't any space. */ - if (++nspins > 4) + if (++nspins > 4) { panic("Can't grow table %p with preemption" - " disabled!", table); + " disabled!", table); + } delay(1); goto try_again; } @@ -514,8 +541,9 @@ try_again: /* read this value only once before the CAS */ free_id = table->free_list; - if (free_id.idx >= table_size) + if (free_id.idx >= table_size) { goto try_again; + } /* * Find the item on the free list which will become the new free list @@ -525,8 +553,8 @@ try_again: * free list head with the one we've investigated. */ for (struct lt_elem *next_elem = lt_elem_idx(table, free_id.idx); - nalloc < nelem; - nalloc++) { + nalloc < nelem; + nalloc++) { elem = next_elem; next_id.generation = 0; next_id.idx = next_elem->lt_next_idx; @@ -540,8 +568,9 @@ try_again: /* 'elem' points to the last element being allocated */ if (OSCompareAndSwap64(free_id.id, next_id.id, - &table->free_list.id) == FALSE) + &table->free_list.id) == FALSE) { goto try_again; + } /* load barrier */ OSMemoryBarrier(); @@ -566,15 +595,16 @@ try_again: * subsequently marks the element as valid, then the put * will simply drop the reference. */ - for (struct lt_elem *tmp = elem; ; ) { + for (struct lt_elem *tmp = elem;;) { assert(!lt_bits_valid(tmp->lt_bits) && - (lt_bits_refcnt(tmp->lt_bits) == 0)); + (lt_bits_refcnt(tmp->lt_bits) == 0)); --nalloc; tmp->lt_id.generation += 1; tmp->lt_bits = 1; lt_elem_set_type(tmp, type); - if (tmp->lt_next_idx == LT_IDX_MAX) + if (tmp->lt_next_idx == LT_IDX_MAX) { break; + } assert(tmp->lt_next_idx != LT_IDX_MAX); tmp = lt_elem_idx(table, tmp->lt_next_idx); } @@ -583,13 +613,16 @@ try_again: #if CONFIG_LTABLE_STATS uint64_t nreservations; table->nallocs += nelem; - if (type == LT_RESERVED) + if (type == LT_RESERVED) { OSIncrementAtomic64(&table->nreservations); + } nreservations = table->nreservations; - if (table->used_elem > table->max_used) + if (table->used_elem > table->max_used) { table->max_used = table->used_elem; - if (nreservations > table->max_reservations) + } + if (nreservations > table->max_reservations) { table->max_reservations = nreservations; + } table->avg_used = (table->avg_used + table->used_elem) / 2; table->avg_reservations = (table->avg_reservations + nreservations) / 2; #endif @@ -606,11 +639,12 @@ try_again: * is disconnected from any list to which it belongs, and its type is set to * 'type'. */ -void ltable_realloc_elem(struct link_table *table, struct lt_elem *elem, int type) +void +ltable_realloc_elem(struct link_table *table, struct lt_elem *elem, int type) { (void)table; assert(lt_elem_in_range(elem, table) && - !lt_bits_valid(elem->lt_bits)); + !lt_bits_valid(elem->lt_bits)); #if CONFIG_LTABLE_STATS table->nreallocs += 1; @@ -648,40 +682,45 @@ void ltable_realloc_elem(struct link_table *table, struct lt_elem *elem, int typ * 'elem' is _not_ marked valid * 'elem' has a reference count of 0 */ -static void ltable_free_elem(struct link_table *table, struct lt_elem *elem) +static void +ltable_free_elem(struct link_table *table, struct lt_elem *elem) { struct ltable_id next_id; assert(lt_elem_in_range(elem, table) && - !lt_bits_valid(elem->lt_bits) && - (lt_bits_refcnt(elem->lt_bits) == 0)); + !lt_bits_valid(elem->lt_bits) && + (lt_bits_refcnt(elem->lt_bits) == 0)); OSDecrementAtomic(&table->used_elem); #if CONFIG_LTABLE_STATS table->avg_used = (table->avg_used + table->used_elem) / 2; - if (lt_bits_type(elem->lt_bits) == LT_RESERVED) + if (lt_bits_type(elem->lt_bits) == LT_RESERVED) { OSDecrementAtomic64(&table->nreservations); + } table->avg_reservations = (table->avg_reservations + table->nreservations) / 2; #endif elem->lt_bits = 0; - if (table->poison) + if (table->poison) { (table->poison)(table, elem); + } again: next_id = table->free_list; - if (next_id.idx >= table->nelem) + if (next_id.idx >= table->nelem) { elem->lt_next_idx = LT_IDX_MAX; - else + } else { elem->lt_next_idx = next_id.idx; + } /* store barrier */ OSMemoryBarrier(); if (OSCompareAndSwap64(next_id.id, elem->lt_id.id, - &table->free_list.id) == FALSE) + &table->free_list.id) == FALSE) { goto again; + } } @@ -695,7 +734,8 @@ again: * NOTE: if the table element pointed to by 'id' is marked as invalid, * this function will return NULL. */ -struct lt_elem *ltable_get_elem(struct link_table *table, uint64_t id) +struct lt_elem * +ltable_get_elem(struct link_table *table, uint64_t id) { struct lt_elem *elem; uint32_t idx, bits, new_bits; @@ -707,22 +747,25 @@ struct lt_elem *ltable_get_elem(struct link_table *table, uint64_t id) idx = ((struct ltable_id *)&id)->idx; - if (idx >= table->nelem) + if (idx >= table->nelem) { panic("id:0x%llx : idx:%d > %d", id, idx, table->nelem); + } elem = lt_elem_idx(table, idx); /* verify the validity by taking a reference on the table object */ bits = elem->lt_bits; - if (!lt_bits_valid(bits)) + if (!lt_bits_valid(bits)) { return NULL; + } /* * do a pre-verify on the element ID to potentially * avoid 2 compare-and-swaps */ - if (elem->lt_id.id != id) + if (elem->lt_id.id != id) { return NULL; + } new_bits = bits + 1; @@ -749,9 +792,9 @@ struct lt_elem *ltable_get_elem(struct link_table *table, uint64_t id) /* check to see that our reference is to the same generation! */ if (elem->lt_id.id != id) { /* - ltdbg("ID:0x%llx table generation (%d) != %d", - id, elem->lt_id.generation, - ((struct ltable_id *)&id)->generation); + * ltdbg("ID:0x%llx table generation (%d) != %d", + * id, elem->lt_id.generation, + * ((struct ltable_id *)&id)->generation); */ ltable_put_elem(table, elem); return NULL; @@ -769,7 +812,8 @@ struct lt_elem *ltable_get_elem(struct link_table *table, uint64_t id) * when the reference count goes to 0 AND the element has been marked as * invalid. */ -void ltable_put_elem(struct link_table *table, struct lt_elem *elem) +void +ltable_put_elem(struct link_table *table, struct lt_elem *elem) { uint32_t bits, new_bits; @@ -795,8 +839,9 @@ void ltable_put_elem(struct link_table *table, struct lt_elem *elem) * if this was the last reference, and it was marked as invalid, * then we can add this link object back to the free list */ - if (!lt_bits_valid(new_bits) && (lt_bits_refcnt(new_bits) == 0)) + if (!lt_bits_valid(new_bits) && (lt_bits_refcnt(new_bits) == 0)) { ltable_free_elem(table, elem); + } return; } @@ -820,7 +865,8 @@ void ltable_put_elem(struct link_table *table, struct lt_elem *elem) * results in: parent->child * however this could also result in: parent->...->child */ -int lt_elem_list_link(struct link_table *table, struct lt_elem *parent, struct lt_elem *child) +int +lt_elem_list_link(struct link_table *table, struct lt_elem *parent, struct lt_elem *child) { int nelem = 1; @@ -851,30 +897,34 @@ int lt_elem_list_link(struct link_table *table, struct lt_elem *parent, struct l * It does _not_ take an extra reference on the object: the list implicitly * holds that reference. */ -struct lt_elem *lt_elem_list_first(struct link_table *table, uint64_t id) +struct lt_elem * +lt_elem_list_first(struct link_table *table, uint64_t id) { uint32_t idx; struct lt_elem *elem = NULL; - if (id == 0) + if (id == 0) { return NULL; + } idx = ((struct ltable_id *)&id)->idx; - if (idx > table->nelem) + if (idx > table->nelem) { panic("Invalid element for id:0x%llx", id); + } elem = lt_elem_idx(table, idx); /* invalid element: reserved ID was probably already reallocated */ - if (elem->lt_id.id != id) + if (elem->lt_id.id != id) { return NULL; + } /* the returned element should _not_ be marked valid! */ if (lt_bits_valid(elem->lt_bits) || lt_bits_type(elem->lt_bits) != LT_RESERVED || lt_bits_refcnt(elem->lt_bits) != 1) { panic("Valid/unreserved element %p (0x%x) in reserved list", - elem, elem->lt_bits); + elem, elem->lt_bits); } return elem; @@ -886,14 +936,17 @@ struct lt_elem *lt_elem_list_first(struct link_table *table, uint64_t id) * * Note that this will return NULL if 'elem' is actually the end of the list. */ -struct lt_elem *lt_elem_list_next(struct link_table *table, struct lt_elem *head) +struct lt_elem * +lt_elem_list_next(struct link_table *table, struct lt_elem *head) { struct lt_elem *elem; - if (!head) + if (!head) { return NULL; - if (head->lt_next_idx >= table->nelem) + } + if (head->lt_next_idx >= table->nelem) { return NULL; + } elem = lt_elem_idx(table, head->lt_next_idx); assert(lt_elem_in_range(elem, table)); @@ -909,12 +962,14 @@ struct lt_elem *lt_elem_list_next(struct link_table *table, struct lt_elem *head * the list), and return the element subsequent to 'elem' in the list * (which could be NULL) */ -struct lt_elem *lt_elem_list_break(struct link_table *table, struct lt_elem *elem) +struct lt_elem * +lt_elem_list_break(struct link_table *table, struct lt_elem *elem) { struct lt_elem *next; - if (!elem) + if (!elem) { return NULL; + } next = lt_elem_list_next(table, elem); elem->lt_next_idx = LT_IDX_MAX; @@ -931,12 +986,14 @@ struct lt_elem *lt_elem_list_break(struct link_table *table, struct lt_elem *ele * returned object. A realloc is done to reset the type of the object, but it * is still left invalid. */ -struct lt_elem *lt_elem_list_pop(struct link_table *table, uint64_t *id, int type) +struct lt_elem * +lt_elem_list_pop(struct link_table *table, uint64_t *id, int type) { struct lt_elem *first, *next; - if (!id || *id == 0) + if (!id || *id == 0) { return NULL; + } /* pop an item off the reserved stack */ @@ -947,10 +1004,11 @@ struct lt_elem *lt_elem_list_pop(struct link_table *table, uint64_t *id, int typ } next = lt_elem_list_next(table, first); - if (next) + if (next) { *id = next->lt_id.id; - else + } else { *id = 0; + } ltable_realloc_elem(table, first, type); @@ -964,28 +1022,32 @@ struct lt_elem *lt_elem_list_pop(struct link_table *table, uint64_t *id, int typ * to 'table' as free elements. The 'type' parameter is used in development * kernels to assert that all elements on the list are of the given type. */ -int lt_elem_list_release(struct link_table *table, struct lt_elem *head, - int __assert_only type) +int +lt_elem_list_release(struct link_table *table, struct lt_elem *head, + int __assert_only type) { struct lt_elem *elem; struct ltable_id free_id; int nelem = 0; - if (!head) + if (!head) { return 0; + } - for (elem = head; ; ) { + for (elem = head;;) { assert(lt_elem_in_range(elem, table)); assert(!lt_bits_valid(elem->lt_bits) && (lt_bits_refcnt(elem->lt_bits) == 1)); assert(lt_bits_type(elem->lt_bits) == type); nelem++; elem->lt_bits = 0; - if (table->poison) + if (table->poison) { (table->poison)(table, elem); + } - if (elem->lt_next_idx == LT_IDX_MAX) + if (elem->lt_next_idx == LT_IDX_MAX) { break; + } assert(elem->lt_next_idx < table->nelem); elem = lt_elem_idx(table, elem->lt_next_idx); } @@ -999,16 +1061,18 @@ int lt_elem_list_release(struct link_table *table, struct lt_elem *head, again: free_id = table->free_list; - if (free_id.idx >= table->nelem) + if (free_id.idx >= table->nelem) { elem->lt_next_idx = LT_IDX_MAX; - else + } else { elem->lt_next_idx = free_id.idx; + } /* store barrier */ OSMemoryBarrier(); if (OSCompareAndSwap64(free_id.id, head->lt_id.id, - &table->free_list.id) == FALSE) + &table->free_list.id) == FALSE) { goto again; + } OSAddAtomic(-nelem, &table->used_elem); return nelem; diff --git a/osfmk/kern/ltable.h b/osfmk/kern/ltable.h index c95743f10..9b1e47c72 100644 --- a/osfmk/kern/ltable.h +++ b/osfmk/kern/ltable.h @@ -31,23 +31,23 @@ #include #if CONFIG_LTABLE_DEBUG -#define ltdbg(fmt,...) \ +#define ltdbg(fmt, ...) \ printf("LT[%s]: " fmt "\n", __func__, ## __VA_ARGS__) #else -#define ltdbg(fmt,...) do { } while (0) +#define ltdbg(fmt, ...) do { } while (0) #endif #ifdef LTABLE_VERBOSE_DEBUG -#define ltdbg_v(fmt,...) \ +#define ltdbg_v(fmt, ...) \ printf("LT[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__) #else -#define ltdbg_v(fmt,...) do { } while (0) +#define ltdbg_v(fmt, ...) do { } while (0) #endif -#define ltinfo(fmt,...) \ +#define ltinfo(fmt, ...) \ printf("LT[%s]: " fmt "\n", __func__, ## __VA_ARGS__) -#define lterr(fmt,...) \ +#define lterr(fmt, ...) \ printf("LT[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__) @@ -67,7 +67,7 @@ struct ltable_id { * enforce a particular memory layout */ uint64_t idx:18, /* allows indexing up to 8MB of 32byte objects */ - generation:46; + generation:46; }; }; }; @@ -177,8 +177,8 @@ extern void ltable_bootstrap(void); * */ extern void ltable_init(struct link_table *table, const char *name, - uint32_t max_tbl_elem, uint32_t elem_sz, - ltable_poison_func poison); + uint32_t max_tbl_elem, uint32_t elem_sz, + ltable_poison_func poison); /** @@ -205,7 +205,7 @@ extern void ltable_grow(struct link_table *table, uint32_t min_free); */ extern __attribute__((noinline)) struct lt_elem *ltable_alloc_elem(struct link_table *table, int type, - int nelem, int nattempts); + int nelem, int nattempts); #if DEVELOPMENT || DEBUG @@ -226,7 +226,7 @@ int ltable_nelem(struct link_table *table); * 'type'. */ extern void ltable_realloc_elem(struct link_table *table, - struct lt_elem *elem, int type); + struct lt_elem *elem, int type); /** @@ -288,7 +288,7 @@ extern void lt_elem_mkvalid(struct lt_elem *elem); * however this could also result in: parent->...->child */ extern int lt_elem_list_link(struct link_table *table, - struct lt_elem *parent, struct lt_elem *child); + struct lt_elem *parent, struct lt_elem *child); /** @@ -309,7 +309,7 @@ extern struct lt_elem *lt_elem_list_first(struct link_table *table, uint64_t id) * Note that this will return NULL if 'elem' is actually the end of the list. */ extern struct lt_elem *lt_elem_list_next(struct link_table *table, - struct lt_elem *elem); + struct lt_elem *elem); /** @@ -320,7 +320,7 @@ extern struct lt_elem *lt_elem_list_next(struct link_table *table, * (which could be NULL) */ extern struct lt_elem *lt_elem_list_break(struct link_table *table, - struct lt_elem *elem); + struct lt_elem *elem); /** @@ -333,7 +333,7 @@ extern struct lt_elem *lt_elem_list_break(struct link_table *table, * is still left invalid. */ extern struct lt_elem *lt_elem_list_pop(struct link_table *table, - uint64_t *id, int type); + uint64_t *id, int type); /** @@ -344,11 +344,12 @@ extern struct lt_elem *lt_elem_list_pop(struct link_table *table, * kernels to assert that all elements on the list are of the given type. */ extern int lt_elem_list_release(struct link_table *table, - struct lt_elem *head, - int __assert_only type); + struct lt_elem *head, + int __assert_only type); -static inline int lt_elem_list_release_id(struct link_table *table, - uint64_t id, int type) +static inline int +lt_elem_list_release_id(struct link_table *table, + uint64_t id, int type) { return lt_elem_list_release(table, lt_elem_list_first(table, id), type); } diff --git a/osfmk/kern/mach_node.c b/osfmk/kern/mach_node.c index d342f3b48..c4e8347f1 100644 --- a/osfmk/kern/mach_node.c +++ b/osfmk/kern/mach_node.c @@ -58,36 +58,36 @@ #include -#include // OSAddAtomic64(), OSCompareAndSwap() +#include // OSAddAtomic64(), OSCompareAndSwap() #include // OSHostByteOrder() #pragma pack(4) -#define MNL_NAME_TABLE_SIZE (256) // Hash is evenly distributed, so ^2 is ok -#define MNL_NAME_HASH(name) (name % MNL_NAME_TABLE_SIZE) +#define MNL_NAME_TABLE_SIZE (256) // Hash is evenly distributed, so ^2 is ok +#define MNL_NAME_HASH(name) (name % MNL_NAME_TABLE_SIZE) /*** Visible outside mach_node layer ***/ -mach_node_id_t localnode_id = -1; // This node's FLIPC id. +mach_node_id_t localnode_id = -1; // This node's FLIPC id. #if MACH_FLIPC -mach_node_t localnode; // This node's mach_node_t struct +mach_node_t localnode; // This node's mach_node_t struct /*** Private to mach_node layer ***/ -static int mach_nodes_to_publish; -static mach_node_t mach_node_table[MACH_NODES_MAX]; +static int mach_nodes_to_publish; +static mach_node_t mach_node_table[MACH_NODES_MAX]; static lck_spin_t mach_node_table_lock_data; #define MACH_NODE_TABLE_LOCK() lck_spin_lock(&mach_node_table_lock_data) #define MACH_NODE_TABLE_UNLOCK() lck_spin_unlock(&mach_node_table_lock_data) #define MACH_NODE_TABLE_LOCK_INIT() lck_spin_init(&mach_node_table_lock_data, \ - &ipc_lck_grp, &ipc_lck_attr) + &ipc_lck_grp, &ipc_lck_attr) -static volatile SInt64 mnl_name_next; -static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE]; +static volatile SInt64 mnl_name_next; +static queue_head_t mnl_name_table[MNL_NAME_TABLE_SIZE]; static lck_spin_t mnl_name_table_lock_data; #define MNL_NAME_TABLE_LOCK() lck_spin_lock(&mnl_name_table_lock_data) #define MNL_NAME_TABLE_UNLOCK() lck_spin_unlock(&mnl_name_table_lock_data) #define MNL_NAME_TABLE_LOCK_INIT() lck_spin_init(&mnl_name_table_lock_data, \ - &ipc_lck_grp, &ipc_lck_attr) + &ipc_lck_grp, &ipc_lck_attr) static void mach_node_init(void); static void mnl_name_table_init(void); @@ -105,43 +105,43 @@ static kern_return_t mach_node_register(mach_node_t node); void mach_node_init(void) { - mach_node_id_t node_id = 0; // TODO: Read from device tree? + mach_node_id_t node_id = 0; // TODO: Read from device tree? if (OSCompareAndSwap((UInt32)(HOST_LOCAL_NODE), - (UInt32)node_id, - &localnode_id)) { + (UInt32)node_id, + &localnode_id)) { printf("mach_node_init(): localnode_id=%d of %d\n", - localnode_id, MACH_NODES_MAX); + localnode_id, MACH_NODES_MAX); mach_node_table_init(); mnl_name_table_init(); flipc_init(); - } // TODO: else block until init is finished (init completion race) + } // TODO: else block until init is finished (init completion race) } void mach_node_table_init(void) { - MACH_NODE_TABLE_LOCK_INIT(); - MACH_NODE_TABLE_LOCK(); + MACH_NODE_TABLE_LOCK_INIT(); + MACH_NODE_TABLE_LOCK(); - /* Start with an enpty node table. */ - bzero(mach_node_table, sizeof(mach_node_t) * MACH_NODES_MAX); - mach_nodes_to_publish = 0; + /* Start with an enpty node table. */ + bzero(mach_node_table, sizeof(mach_node_t) * MACH_NODES_MAX); + mach_nodes_to_publish = 0; - /* Allocate localnode's struct */ - localnode = mach_node_for_id_locked(localnode_id, 1, 1); - assert(MACH_NODE_VALID(localnode)); + /* Allocate localnode's struct */ + localnode = mach_node_for_id_locked(localnode_id, 1, 1); + assert(MACH_NODE_VALID(localnode)); - MACH_NODE_TABLE_UNLOCK(); + MACH_NODE_TABLE_UNLOCK(); - /* Set up localnode's struct */ - bzero(localnode, sizeof(localnode)); - localnode->info.datamodel = LOCAL_DATA_MODEL; - localnode->info.byteorder = OSHostByteOrder(); - localnode->info.proto_vers_min = MNL_PROTOCOL_V1; - localnode->info.proto_vers_max = MNL_PROTOCOL_V1; - localnode->proto_vers = MNL_PROTOCOL_V1; - localnode->published = 0; - localnode->active = 1; + /* Set up localnode's struct */ + bzero(localnode, sizeof(localnode)); + localnode->info.datamodel = LOCAL_DATA_MODEL; + localnode->info.byteorder = OSHostByteOrder(); + localnode->info.proto_vers_min = MNL_PROTOCOL_V1; + localnode->info.proto_vers_max = MNL_PROTOCOL_V1; + localnode->proto_vers = MNL_PROTOCOL_V1; + localnode->published = 0; + localnode->active = 1; MACH_NODE_UNLOCK(localnode); } @@ -156,46 +156,46 @@ mach_node_table_init(void) void mach_node_publish(mach_node_t node) { - kern_return_t kr; - - if (!MACH_NODE_VALID(node) || (!node->active) || (node->published)) - return; // node is invalid or not suitable for publication - - ipc_port_t bs_port = localnode->bootstrap_port; - if (!IP_VALID(bs_port)) - return; // No bootstrap server to notify! - - /* Node is suitable and server is present, so make registration message */ - struct mach_node_server_register_msg msg; - - msg.node_header.header.msgh_remote_port = bs_port; - msg.node_header.header.msgh_size = sizeof(msg); - msg.node_header.header.msgh_local_port = MACH_PORT_NULL; - msg.node_header.header.msgh_voucher_port = MACH_PORT_NULL; - msg.node_header.header.msgh_id = MACH_NODE_SERVER_MSG_ID; - msg.node_header.node_id = node->info.node_id; - msg.node_header.options = 0; - msg.datamodel = node->info.datamodel; - msg.byteorder = node->info.byteorder; - - if (node == localnode) { - msg.node_header.identifier = MACH_NODE_SM_REG_LOCAL; - msg.node_header.header.msgh_bits = - MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0); - } else { - msg.node_header.identifier = MACH_NODE_SM_REG_REMOTE; - msg.node_header.header.msgh_local_port = node->bootstrap_port; - msg.node_header.header.msgh_bits = MACH_MSGH_BITS_SET - (MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND, 0, 0); - } - - kr = mach_msg_send_from_kernel_proper(&msg.node_header.header, - sizeof (msg)); - if (kr == KERN_SUCCESS) { - node->published = 1; - mach_nodes_to_publish--; - } - printf("mach_node_publish(%d)=%d\n", node->info.node_id, kr); + kern_return_t kr; + + if (!MACH_NODE_VALID(node) || (!node->active) || (node->published)) { + return; // node is invalid or not suitable for publication + } + ipc_port_t bs_port = localnode->bootstrap_port; + if (!IP_VALID(bs_port)) { + return; // No bootstrap server to notify! + } + /* Node is suitable and server is present, so make registration message */ + struct mach_node_server_register_msg msg; + + msg.node_header.header.msgh_remote_port = bs_port; + msg.node_header.header.msgh_size = sizeof(msg); + msg.node_header.header.msgh_local_port = MACH_PORT_NULL; + msg.node_header.header.msgh_voucher_port = MACH_PORT_NULL; + msg.node_header.header.msgh_id = MACH_NODE_SERVER_MSG_ID; + msg.node_header.node_id = node->info.node_id; + msg.node_header.options = 0; + msg.datamodel = node->info.datamodel; + msg.byteorder = node->info.byteorder; + + if (node == localnode) { + msg.node_header.identifier = MACH_NODE_SM_REG_LOCAL; + msg.node_header.header.msgh_bits = + MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0); + } else { + msg.node_header.identifier = MACH_NODE_SM_REG_REMOTE; + msg.node_header.header.msgh_local_port = node->bootstrap_port; + msg.node_header.header.msgh_bits = MACH_MSGH_BITS_SET + (MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND, 0, 0); + } + + kr = mach_msg_send_from_kernel_proper(&msg.node_header.header, + sizeof(msg)); + if (kr == KERN_SUCCESS) { + node->published = 1; + mach_nodes_to_publish--; + } + printf("mach_node_publish(%d)=%d\n", node->info.node_id, kr); } /* Called whenever the node special port changes */ @@ -203,49 +203,52 @@ void mach_node_port_changed(void) { ipc_port_t bs_port; - + mach_node_init(); // Lazy init of mach_node layer - + /* Cleanup previous bootstrap port if necessary */ - MACH_NODE_LOCK(localnode); - flipc_node_retire(localnode); + MACH_NODE_LOCK(localnode); + flipc_node_retire(localnode); bs_port = localnode->bootstrap_port; if (IP_VALID(bs_port)) { localnode->bootstrap_port = IP_NULL; // TODO: destroy send right to outgoing bs_port } - + kernel_get_special_port(host_priv_self(), HOST_NODE_PORT, &bs_port); assert(IP_VALID(bs_port)); - localnode->bootstrap_port = bs_port; - flipc_node_prepare(localnode); - MACH_NODE_UNLOCK(localnode); + localnode->bootstrap_port = bs_port; + flipc_node_prepare(localnode); + MACH_NODE_UNLOCK(localnode); /* Cleanup the publication state of all nodes in the table */ MACH_NODE_TABLE_LOCK(); // TODO: Signup for bootstrap port death notifications localnode->active = 1; - + mach_nodes_to_publish = 0; - + int n; - for (n=0; npublished = 0; - if (np->active == 1) + if (np->active == 1) { mach_nodes_to_publish++; + } } - + mach_node_publish(localnode); // Always publish local node first - - for (n=0; ninfo.node_id = node_id; - } - return node; + mach_node_t node = MACH_NODE_ALLOC(); + if (MACH_NODE_VALID(node)) { + bzero(node, sizeof(struct mach_node)); + MACH_NODE_LOCK_INIT(node); + node->info.node_id = node_id; + } + return node; } @@ -269,132 +272,136 @@ mach_node_alloc_init(mach_node_id_t node_id) * registers it with the mach_node and flipc (if flipc is enabled) layers. */ kern_return_t -mach_node_register(mach_node_t node) -{ - assert(MACH_NODE_VALID(node)); - mach_node_id_t nid = node->info.node_id; - assert(MACH_NODE_ID_VALID(nid)); - - kern_return_t kr; - ipc_space_t proxy_space = IS_NULL; - ipc_pset_t pp_set = IPS_NULL; // pset for proxy ports - ipc_port_t bs_port = MACH_PORT_NULL; - ipc_port_t ack_port = MACH_PORT_NULL; - - printf("mach_node_register(%d)\n", nid); - - /* TODO: Support non-native byte order and data models */ - if ((node->info.byteorder != OSHostByteOrder()) || - (node->info.datamodel != LOCAL_DATA_MODEL)) { - printf("mach_node_register: unsupported byte order (%d) or width (%d)", - node->info.byteorder, node->info.datamodel); - return KERN_INVALID_ARGUMENT; - } - - /* Create the space that holds all local rights assigned to */ - kr = ipc_space_create_special(&proxy_space); - if (kr != KERN_SUCCESS) - goto out; - proxy_space->is_node_id = nid; - - /* Create the bootstrap proxy port for this remote node */ - bs_port = ipc_port_alloc_special(proxy_space); - if (bs_port == MACH_PORT_NULL) { - kr = KERN_RESOURCE_SHORTAGE; - goto out; - } - - /* Create the control (ack) port for this remote node */ - ack_port = ipc_port_alloc_special(proxy_space); - if (ack_port == MACH_PORT_NULL) { - kr = KERN_RESOURCE_SHORTAGE; - goto out; - } - - /* Create the set that holds all proxy ports for this remote node */ - pp_set = ipc_pset_alloc_special(proxy_space); - if (pp_set == IPS_NULL) { - kr = KERN_RESOURCE_SHORTAGE; - goto out; - } - - waitq_set_lazy_init_link(pp_set); - /* Add the bootstrap port to the proxy port set */ - uint64_t wq_link_id = waitq_link_reserve(NULL); - uint64_t wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, - WAITQ_DONT_LOCK); - ips_lock(pp_set); - ip_lock(bs_port); - ipc_pset_add(pp_set, - bs_port, - &wq_link_id, - &wq_reserved_prepost); - ip_unlock(bs_port); - ips_unlock(pp_set); - - waitq_link_release(wq_link_id); - waitq_prepost_release_reserve(wq_reserved_prepost); - - /* Add the control port to the proxy port set */ - wq_link_id = waitq_link_reserve(NULL); - wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, - WAITQ_DONT_LOCK); - ips_lock(pp_set); - ip_lock(ack_port); - ipc_pset_add(pp_set, - ack_port, - &wq_link_id, - &wq_reserved_prepost); - ip_unlock(ack_port); - ips_unlock(pp_set); - - waitq_link_release(wq_link_id); - waitq_prepost_release_reserve(wq_reserved_prepost); - - // Setup mach_node struct - node->published = 0; - node->active = 1; - node->proxy_space = proxy_space; - node->proxy_port_set = pp_set; - node->bootstrap_port = bs_port; - node->proto_vers = node->info.proto_vers_max; - node->control_port = ack_port; - - // Place new mach_node struct into node table - MACH_NODE_TABLE_LOCK(); - - mach_node_t old_node = mach_node_table[nid]; - if (!MACH_NODE_VALID(old_node) || (old_node->dead)) { - node->antecedent = old_node; - flipc_node_prepare(node); - mach_node_table[nid] = node; - mach_nodes_to_publish++; - mach_node_publish(node); - kr = KERN_SUCCESS; - } else { - printf("mach_node_register: id %d already active!", nid); - kr = KERN_FAILURE; - } - MACH_NODE_TABLE_UNLOCK(); +mach_node_register(mach_node_t node) +{ + assert(MACH_NODE_VALID(node)); + mach_node_id_t nid = node->info.node_id; + assert(MACH_NODE_ID_VALID(nid)); + + kern_return_t kr; + ipc_space_t proxy_space = IS_NULL; + ipc_pset_t pp_set = IPS_NULL; // pset for proxy ports + ipc_port_t bs_port = MACH_PORT_NULL; + ipc_port_t ack_port = MACH_PORT_NULL; + + printf("mach_node_register(%d)\n", nid); + + /* TODO: Support non-native byte order and data models */ + if ((node->info.byteorder != OSHostByteOrder()) || + (node->info.datamodel != LOCAL_DATA_MODEL)) { + printf("mach_node_register: unsupported byte order (%d) or width (%d)", + node->info.byteorder, node->info.datamodel); + return KERN_INVALID_ARGUMENT; + } + + /* Create the space that holds all local rights assigned to */ + kr = ipc_space_create_special(&proxy_space); + if (kr != KERN_SUCCESS) { + goto out; + } + proxy_space->is_node_id = nid; + + /* Create the bootstrap proxy port for this remote node */ + bs_port = ipc_port_alloc_special(proxy_space); + if (bs_port == MACH_PORT_NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; + } + + /* Create the control (ack) port for this remote node */ + ack_port = ipc_port_alloc_special(proxy_space); + if (ack_port == MACH_PORT_NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; + } + + /* Create the set that holds all proxy ports for this remote node */ + pp_set = ipc_pset_alloc_special(proxy_space); + if (pp_set == IPS_NULL) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; + } + + waitq_set_lazy_init_link(pp_set); + /* Add the bootstrap port to the proxy port set */ + uint64_t wq_link_id = waitq_link_reserve(NULL); + uint64_t wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, + WAITQ_DONT_LOCK); + ips_lock(pp_set); + ip_lock(bs_port); + ipc_pset_add(pp_set, + bs_port, + &wq_link_id, + &wq_reserved_prepost); + ip_unlock(bs_port); + ips_unlock(pp_set); + + waitq_link_release(wq_link_id); + waitq_prepost_release_reserve(wq_reserved_prepost); + + /* Add the control port to the proxy port set */ + wq_link_id = waitq_link_reserve(NULL); + wq_reserved_prepost = waitq_prepost_reserve(NULL, 10, + WAITQ_DONT_LOCK); + ips_lock(pp_set); + ip_lock(ack_port); + ipc_pset_add(pp_set, + ack_port, + &wq_link_id, + &wq_reserved_prepost); + ip_unlock(ack_port); + ips_unlock(pp_set); + + waitq_link_release(wq_link_id); + waitq_prepost_release_reserve(wq_reserved_prepost); + + // Setup mach_node struct + node->published = 0; + node->active = 1; + node->proxy_space = proxy_space; + node->proxy_port_set = pp_set; + node->bootstrap_port = bs_port; + node->proto_vers = node->info.proto_vers_max; + node->control_port = ack_port; + + // Place new mach_node struct into node table + MACH_NODE_TABLE_LOCK(); + + mach_node_t old_node = mach_node_table[nid]; + if (!MACH_NODE_VALID(old_node) || (old_node->dead)) { + node->antecedent = old_node; + flipc_node_prepare(node); + mach_node_table[nid] = node; + mach_nodes_to_publish++; + mach_node_publish(node); + kr = KERN_SUCCESS; + } else { + printf("mach_node_register: id %d already active!", nid); + kr = KERN_FAILURE; + } + MACH_NODE_TABLE_UNLOCK(); out: - if (kr != KERN_SUCCESS) { // Dispose of whatever we allocated - if (pp_set) { - ips_lock(pp_set); - ipc_pset_destroy(pp_set); - } + if (kr != KERN_SUCCESS) { // Dispose of whatever we allocated + if (pp_set) { + ips_lock(pp_set); + ipc_pset_destroy(proxy_space, pp_set); + } - if (bs_port) - ipc_port_dealloc_special(bs_port, proxy_space); + if (bs_port) { + ipc_port_dealloc_special(bs_port, proxy_space); + } - if (ack_port) - ipc_port_dealloc_special(ack_port, proxy_space); + if (ack_port) { + ipc_port_dealloc_special(ack_port, proxy_space); + } - if (proxy_space) - ipc_space_terminate(proxy_space); - } + if (proxy_space) { + ipc_space_terminate(proxy_space); + } + } - return kr; + return kr; } @@ -409,27 +416,29 @@ out: * Note: This function must be called with the node table lock held! */ mach_node_t -mach_node_for_id_locked(mach_node_id_t node_id, - boolean_t alloc_if_dead, - boolean_t alloc_if_absent) +mach_node_for_id_locked(mach_node_id_t node_id, + boolean_t alloc_if_dead, + boolean_t alloc_if_absent) { - if ((node_id < 0) || (node_id >= MACH_NODES_MAX)) + if ((node_id < 0) || (node_id >= MACH_NODES_MAX)) { return MACH_NODE_NULL; + } mach_node_t node = mach_node_table[node_id]; - - if ( (!MACH_NODE_VALID(node) && alloc_if_absent) || - (MACH_NODE_VALID(node) && node->dead && alloc_if_dead) ) { + + if ((!MACH_NODE_VALID(node) && alloc_if_absent) || + (MACH_NODE_VALID(node) && node->dead && alloc_if_dead)) { node = mach_node_alloc_init(node_id); if (MACH_NODE_VALID(node)) { node->antecedent = mach_node_table[node_id]; mach_node_table[node_id] = node; } } - - if (MACH_NODE_VALID(node)) + + if (MACH_NODE_VALID(node)) { MACH_NODE_LOCK(node); - + } + return node; } @@ -453,7 +462,7 @@ mnl_name_alloc(void) void mnl_name_free(mnl_name_t name __unused) { - ; // Nothing to do for now since we don't recycle mnl names. + ; // Nothing to do for now since we don't recycle mnl names. } @@ -462,15 +471,16 @@ mnl_name_free(mnl_name_t name __unused) void mnl_name_table_init(void) { - MNL_NAME_TABLE_LOCK_INIT(); - MNL_NAME_TABLE_LOCK(); - + MNL_NAME_TABLE_LOCK_INIT(); + MNL_NAME_TABLE_LOCK(); + // Set the first name to this node's bootstrap name mnl_name_next = localnode_id + MACH_NODES_MAX; - - for (int i=0; iname == name) + if (obj->name == name) { break; + } } } return obj; @@ -514,11 +525,12 @@ mnl_obj_t mnl_obj_remove(mnl_name_t name) { mnl_obj_t obj = MNL_OBJ_NULL; - + if (name != MNL_NAME_NULL) { qe_foreach_element_safe(obj, &mnl_name_table[MNL_NAME_HASH(name)], links) { - if (obj->name == name) + if (obj->name == name) { remqueue(&obj->links); + } } } return obj; @@ -528,28 +540,30 @@ mnl_obj_remove(mnl_name_t name) /* Insert an object into the local node's hash table. If the name of the * provided object is MNL_NAME_NULL then a new mnl_name is allocated and * assigned to the object. - * Returns KERN_SUCCESS if obj was added to hash table - * Returns KERN_INVALID_ARGUMENT if obj is invalid - * Returns KERN_NAME_EXISTS if obj's name already exists in hash table + * Returns KERN_SUCCESS if obj was added to hash table + * Returns KERN_INVALID_ARGUMENT if obj is invalid + * Returns KERN_NAME_EXISTS if obj's name already exists in hash table */ kern_return_t mnl_obj_insert(mnl_obj_t obj) { - if (!MNL_OBJ_VALID(obj)) + if (!MNL_OBJ_VALID(obj)) { return KERN_INVALID_ARGUMENT; - + } + MNL_NAME_TABLE_LOCK(); - + if (!MNL_NAME_VALID(obj->name)) { // obj is unnammed, so lets allocate a fresh one obj->name = mnl_name_alloc(); } - + enqueue(&mnl_name_table[MNL_NAME_HASH(obj->name)], &obj->links); MNL_NAME_TABLE_UNLOCK(); - if(obj->name >= (MACH_NODES_MAX<<1)) + if (obj->name >= (MACH_NODES_MAX << 1)) { panic("Unexpected MNL_NAME %lld in obj %p", obj->name, obj); + } return KERN_SUCCESS; } @@ -571,7 +585,7 @@ mnl_obj_insert(mnl_obj_t obj) */ mnl_msg_t mnl_msg_alloc(int payload, - uint32_t flags __unused) + uint32_t flags __unused) { mnl_msg_t msg = kalloc(MNL_MSG_SIZE + payload); @@ -592,10 +606,11 @@ mnl_msg_alloc(int payload, */ void mnl_msg_free(mnl_msg_t msg, - uint32_t flags __unused) + uint32_t flags __unused) { - if (MNL_MSG_VALID(msg)) + if (MNL_MSG_VALID(msg)) { kfree(msg, MNL_MSG_SIZE + msg->size); + } } @@ -615,14 +630,15 @@ mnl_msg_free(mnl_msg_t msg, */ mnl_node_info_t mnl_instantiate(mach_node_id_t nid, - uint32_t flags __unused) + uint32_t flags __unused) { - mach_node_init(); // Lazy init of mach_node layer + mach_node_init(); // Lazy init of mach_node layer - if ((nid==localnode_id) || !MACH_NODE_ID_VALID(nid)) - return MNL_NODE_NULL; + if ((nid == localnode_id) || !MACH_NODE_ID_VALID(nid)) { + return MNL_NODE_NULL; + } - return (mnl_node_info_t)mach_node_alloc_init(nid); + return (mnl_node_info_t)mach_node_alloc_init(nid); } /* The link driver calls mnl_register() to complete the node registration @@ -640,12 +656,13 @@ mnl_instantiate(mach_node_id_t nid, */ kern_return_t mnl_register(mnl_node_info_t node, - uint32_t flags __unused) + uint32_t flags __unused) { - if (MNL_NODE_VALID(node) && (node->node_id != localnode_id)) - return mach_node_register((mach_node_t)node); + if (MNL_NODE_VALID(node) && (node->node_id != localnode_id)) { + return mach_node_register((mach_node_t)node); + } - return KERN_INVALID_ARGUMENT; + return KERN_INVALID_ARGUMENT; } @@ -667,25 +684,25 @@ mnl_register(mnl_node_info_t node, */ kern_return_t mnl_set_link_state(mnl_node_info_t node, - int link, - uint32_t flags __unused) + int link, + uint32_t flags __unused) { - kern_return_t kr; + kern_return_t kr; mach_node_t mnode = (mach_node_t)node; - if (!MACH_NODE_VALID(mnode) || !(link & MNL_LINK_UP) || (link & mnode->link)) - return KERN_INVALID_ARGUMENT; // bad node, or bad link argument - - MACH_NODE_LOCK(mnode); + if (!MACH_NODE_VALID(mnode) || !(link & MNL_LINK_UP) || (link & mnode->link)) { + return KERN_INVALID_ARGUMENT; // bad node, or bad link argument + } + MACH_NODE_LOCK(mnode); - if (mnode->dead) { + if (mnode->dead) { kr = KERN_NODE_DOWN; - } else { - mnode->link |= link; - kr = KERN_SUCCESS; - } + } else { + mnode->link |= link; + kr = KERN_SUCCESS; + } - MACH_NODE_UNLOCK(mnode); + MACH_NODE_UNLOCK(mnode); return kr; } @@ -709,17 +726,17 @@ mnl_set_link_state(mnl_node_info_t node, */ kern_return_t mnl_terminate(mnl_node_info_t node, - uint32_t flags __unused) + uint32_t flags __unused) { kern_return_t kr = KERN_SUCCESS; mach_node_t mnode = (mach_node_t)node; - if (!MACH_NODE_VALID(mnode)) - return KERN_INVALID_ARGUMENT; // bad node - + if (!MACH_NODE_VALID(mnode)) { + return KERN_INVALID_ARGUMENT; // bad node + } MACH_NODE_LOCK(mnode); if (mnode->dead) { - kr = KERN_NODE_DOWN; // node is already terminated + kr = KERN_NODE_DOWN; // node is already terminated goto unlock; } @@ -730,12 +747,12 @@ mnl_terminate(mnl_node_info_t node, flipc_node_retire(mnode); - // Wake any threads sleeping on the proxy port set - if (mnode->proxy_port_set != IPS_NULL) { - ips_lock(mnode->proxy_port_set); - ipc_pset_destroy(mnode->proxy_port_set); - mnode->proxy_port_set = IPS_NULL; - } + // Wake any threads sleeping on the proxy port set + if (mnode->proxy_port_set != IPS_NULL) { + ips_lock(mnode->proxy_port_set); + ipc_pset_destroy(mnode->proxy_space, mnode->proxy_port_set); + mnode->proxy_port_set = IPS_NULL; + } // TODO: Inform node name server (if registered) of termination @@ -756,8 +773,8 @@ unlock: */ void mnl_msg_from_node(mnl_node_info_t node __unused, - mnl_msg_t msg, - uint32_t flags __unused) + mnl_msg_t msg, + uint32_t flags __unused) { assert(MNL_MSG_VALID(msg)); assert(MACH_NODE_ID_VALID(msg->node_id)); @@ -770,16 +787,15 @@ mnl_msg_from_node(mnl_node_info_t node __unused, */ switch (msg->sub) { + case MACH_NODE_SUB_FLIPC: + flipc_msg_from_node((mach_node_t)node, msg, flags); + break; - case MACH_NODE_SUB_FLIPC: - flipc_msg_from_node((mach_node_t)node, msg, flags); - break; - - default: + default: #if DEBUG - PE_enter_debugger("mnl_msg_from_node(): Invalid subsystem"); + PE_enter_debugger("mnl_msg_from_node(): Invalid subsystem"); #endif - break; + break; } } @@ -796,12 +812,12 @@ mnl_msg_from_node(mnl_node_info_t node __unused, */ mnl_msg_t mnl_msg_to_node(mnl_node_info_t node __unused, - uint32_t flags __unused) + uint32_t flags __unused) { assert(MNL_NODE_VALID(node)); #if DEBUG - thread_set_thread_name(current_thread(), "MNL_Link"); + thread_set_thread_name(current_thread(), "MNL_Link"); #endif return flipc_msg_to_remote_node((mach_node_t)node, 0); @@ -818,24 +834,24 @@ mnl_msg_to_node(mnl_node_info_t node __unused, */ void mnl_msg_complete(mnl_node_info_t node __unused, - mnl_msg_t msg, - uint32_t flags) + mnl_msg_t msg, + uint32_t flags) { - switch (msg->sub) { - case MACH_NODE_SUB_NODE: - mnl_msg_free(msg, flags); - break; + switch (msg->sub) { + case MACH_NODE_SUB_NODE: + mnl_msg_free(msg, flags); + break; - case MACH_NODE_SUB_FLIPC: - flipc_msg_free(msg, flags); - break; + case MACH_NODE_SUB_FLIPC: + flipc_msg_free(msg, flags); + break; - default: + default: #if DEBUG - PE_enter_debugger("mnl_msg_complete(): Invalid subsystem"); + PE_enter_debugger("mnl_msg_complete(): Invalid subsystem"); #endif - break; - } + break; + } } #else // MACH_FLIPC not configured, so provide KPI stubs @@ -843,61 +859,61 @@ mnl_msg_complete(mnl_node_info_t node __unused, mnl_msg_t mnl_msg_alloc(int payload __unused, uint32_t flags __unused) { - return MNL_MSG_NULL; + return MNL_MSG_NULL; } void mnl_msg_free(mnl_msg_t msg __unused, uint32_t flags __unused) { - return; + return; } mnl_node_info_t mnl_instantiate(mach_node_id_t nid __unused, uint32_t flags __unused) { - return MNL_NODE_NULL; + return MNL_NODE_NULL; } kern_return_t mnl_register(mnl_node_info_t node __unused, uint32_t flags __unused) { - return KERN_FAILURE; + return KERN_FAILURE; } kern_return_t mnl_set_link_state(mnl_node_info_t node __unused, - int link __unused, - uint32_t flags __unused) + int link __unused, + uint32_t flags __unused) { - return KERN_FAILURE; + return KERN_FAILURE; } kern_return_t mnl_terminate(mnl_node_info_t node __unused, uint32_t flags __unused) { - return KERN_FAILURE; + return KERN_FAILURE; } void mnl_msg_from_node(mnl_node_info_t node __unused, - mnl_msg_t msg __unused, - uint32_t flags __unused) + mnl_msg_t msg __unused, + uint32_t flags __unused) { - return; + return; } mnl_msg_t mnl_msg_to_node(mnl_node_info_t node __unused, uint32_t flags __unused) { - return MNL_MSG_NULL; + return MNL_MSG_NULL; } void mnl_msg_complete(mnl_node_info_t node __unused, - mnl_msg_t msg __unused, - uint32_t flags __unused) + mnl_msg_t msg __unused, + uint32_t flags __unused) { - return; + return; } #endif // MACH_FLIPC diff --git a/osfmk/kern/mach_node.h b/osfmk/kern/mach_node.h index 99bf01128..1dd7adb05 100644 --- a/osfmk/kern/mach_node.h +++ b/osfmk/kern/mach_node.h @@ -49,10 +49,10 @@ */ #pragma pack(4) typedef struct mach_node_server_msg { - mach_msg_header_t header; - uint32_t identifier; // See FLIPC_SM_* defines - uint32_t options; // Currently unused - uint32_t node_id; // Node number + mach_msg_header_t header; + uint32_t identifier; // See FLIPC_SM_* defines + uint32_t options; // Currently unused + uint32_t node_id; // Node number } *mach_node_server_msg_t; #pragma pack() @@ -60,9 +60,9 @@ typedef struct mach_node_server_msg { * layer to the node bootstrap server. */ typedef struct mach_node_server_register_msg { - struct mach_node_server_msg node_header; - uint8_t datamodel; // 1==ILP32, 2==LP64; matches dtrace - uint8_t byteorder; // Uses defines from libkern/OSByteOrder.h + struct mach_node_server_msg node_header; + uint8_t datamodel; // 1==ILP32, 2==LP64; matches dtrace + uint8_t byteorder; // Uses defines from libkern/OSByteOrder.h } *mach_node_server_register_msg_t; #pragma pack() @@ -103,28 +103,28 @@ typedef struct flipc_node *flipc_node_t; // Defined in ipc/flipc.h typedef struct mach_node *mach_node_t; struct mach_node { - /* Static node details, provided by the link driver at registration */ - struct mnl_node_info info; - - lck_spin_t node_lock_data; - - /* Flags and status word */ - uint32_t link:2; // See MNL_LINK* defines - uint32_t published:1; // True if node server has send-right - uint32_t active:1; // True if node is up and ready - uint32_t suspended:1; // True if node is active but sleeping - uint32_t dead:1; // True if node is dead - uint32_t _reserved:26; // Fill out the 32b flags field - - /* port/space/set */ - ipc_space_t proxy_space; // Kernel special space for proxy rights - ipc_pset_t proxy_port_set; // All proxy ports are in this set - ipc_port_t bootstrap_port; // Port for which "noded" holds rcv right - ipc_port_t control_port; // For control & ack/nak messages - - /* Misc */ - int proto_vers; // Protocol version in use for this node - mach_node_t antecedent; // Pointer to prior encarnation of this node id + /* Static node details, provided by the link driver at registration */ + struct mnl_node_info info; + + lck_spin_t node_lock_data; + + /* Flags and status word */ + uint32_t link:2; // See MNL_LINK* defines + uint32_t published:1;// True if node server has send-right + uint32_t active:1; // True if node is up and ready + uint32_t suspended:1;// True if node is active but sleeping + uint32_t dead:1; // True if node is dead + uint32_t _reserved:26;// Fill out the 32b flags field + + /* port/space/set */ + ipc_space_t proxy_space;// Kernel special space for proxy rights + ipc_pset_t proxy_port_set;// All proxy ports are in this set + ipc_port_t bootstrap_port;// Port for which "noded" holds rcv right + ipc_port_t control_port;// For control & ack/nak messages + + /* Misc */ + int proto_vers; // Protocol version in use for this node + mach_node_t antecedent; // Pointer to prior encarnation of this node id }; extern mach_node_t localnode; // This node's mach_node_t struct @@ -136,9 +136,9 @@ extern mach_node_t localnode; // This node's mach_node_t struct #define MACH_NODE_FREE(node) kfree(node, MACH_NODE_SIZE) #define MACH_NODE_LOCK_INIT(np) lck_spin_init(&(np)->node_lock_data, \ - &ipc_lck_grp, &ipc_lck_attr) + &ipc_lck_grp, &ipc_lck_attr) #define MACH_NODE_LOCK_DESTROY(np) lck_spin_destroy(&(np)->node_lock_data, \ - &ipc_lck_grp) + &ipc_lck_grp) #define MACH_NODE_LOCK(np) lck_spin_lock(&(np)->node_lock_data) #define MACH_NODE_UNLOCK(np) lck_spin_unlock(&(np)->node_lock_data) @@ -152,8 +152,8 @@ extern mach_node_t localnode; // This node's mach_node_t struct */ mach_node_t mach_node_for_id_locked(mach_node_id_t node_id, - boolean_t alloc_if_dead, - boolean_t alloc_if_absent); + boolean_t alloc_if_dead, + boolean_t alloc_if_absent); /*** Mach Node Link Name Section @@ -190,8 +190,8 @@ extern void mnl_name_free(mnl_name_t name); * allocate and free the actual objects being stored. */ typedef struct mnl_obj { - queue_chain_t links; // List of mnk_name_obj (See kern/queue.h "Method 1") - mnl_name_t name; // Unique mnl_name + queue_chain_t links;// List of mnk_name_obj (See kern/queue.h "Method 1") + mnl_name_t name;// Unique mnl_name } *mnl_obj_t; #define MNL_OBJ_NULL ((mnl_obj_t) 0UL) @@ -256,4 +256,3 @@ __END_DECLS #endif // MACH_FLIPC && MACH_KERNEL_PRIVATE #endif // _KERN_MACH_NODE_H_ - diff --git a/osfmk/kern/mach_node_link.h b/osfmk/kern/mach_node_link.h index b5da5334d..a848987db 100644 --- a/osfmk/kern/mach_node_link.h +++ b/osfmk/kern/mach_node_link.h @@ -61,12 +61,12 @@ extern mach_node_id_t localnode_id; // This node's unique id. * links are brought up. */ typedef struct mnl_node_info { - mach_node_id_t node_id; // The node ID of this node - uint8_t datamodel; // 1==ILP32, 2==LP64 (matches dtrace) - uint8_t byteorder; // See libkern/OSByteOrder.h - uint32_t proto_vers_min; // Oldest MNL protocol vers node can accept - uint32_t proto_vers_max; // Newest MNL protocol vers node can accept -} __attribute__ ((aligned (8))) *mnl_node_info_t; + mach_node_id_t node_id; // The node ID of this node + uint8_t datamodel; // 1==ILP32, 2==LP64 (matches dtrace) + uint8_t byteorder; // See libkern/OSByteOrder.h + uint32_t proto_vers_min;// Oldest MNL protocol vers node can accept + uint32_t proto_vers_max;// Newest MNL protocol vers node can accept +} __attribute__ ((aligned(8))) * mnl_node_info_t; #define MNL_NODE_NULL ((mnl_node_info_t) 0UL) #define MNL_NODE_VALID(n) ((n) != MNL_NODE_NULL) @@ -95,15 +95,15 @@ typedef uint64_t mnl_name_t; * to/from the mach_node layer without any introspection or byte reordering. */ typedef struct mnl_msg { - uint8_t sub; // 8b subsystem code - uint8_t cmd; // 8b command code - uint8_t qos; // 8b TODO: Doesn't do anything yet - uint8_t flags; // 8b Command-specific flag byte - uint32_t node_id; // 32b id of node that originated message - mnl_name_t object; // 64b object ref (use is determined by sub & cmd) - uint32_t options; // 32b Currently unused - uint32_t size; // 32b Number of bytes that follow mnl_msg header -} __attribute__((__packed__)) *mnl_msg_t; + uint8_t sub; // 8b subsystem code + uint8_t cmd; // 8b command code + uint8_t qos; // 8b TODO: Doesn't do anything yet + uint8_t flags; // 8b Command-specific flag byte + uint32_t node_id;// 32b id of node that originated message + mnl_name_t object; // 64b object ref (use is determined by sub & cmd) + uint32_t options;// 32b Currently unused + uint32_t size; // 32b Number of bytes that follow mnl_msg header +} __attribute__((__packed__)) * mnl_msg_t; /* Allocate a mnl_msg struct plus additional payload. Link drivers are not @@ -152,7 +152,7 @@ void mnl_msg_free(mnl_msg_t msg, uint32_t flags); * *: Pointer to a new mnl_node struct */ mnl_node_info_t mnl_instantiate(mach_node_id_t nid, - uint32_t flags); + uint32_t flags); /* The link driver calls mnl_register() to complete the node registration @@ -169,7 +169,7 @@ mnl_node_info_t mnl_instantiate(mach_node_id_t nid, * KERN_*: Values returned from underlying functions */ kern_return_t mnl_register(mnl_node_info_t node, - uint32_t flags); + uint32_t flags); /* The link driver calls this to report that the link has been raised in one @@ -189,8 +189,8 @@ kern_return_t mnl_register(mnl_node_info_t node, * KERN_*: Values returned from underlying functions. */ kern_return_t mnl_set_link_state(mnl_node_info_t node, - int link, - uint32_t flags); + int link, + uint32_t flags); #define MNL_LINK_DOWN (0UL) #define MNL_LINK_RX (1UL) @@ -216,7 +216,7 @@ kern_return_t mnl_set_link_state(mnl_node_info_t node, * KERN_*: Values returned from underlying functions. */ kern_return_t mnl_terminate(mnl_node_info_t node, - uint32_t flags); + uint32_t flags); /* The link driver calls this to deliver an incoming message. Note that the @@ -229,8 +229,8 @@ kern_return_t mnl_terminate(mnl_node_info_t node, * flags Currently unused; 0 should be passed */ void mnl_msg_from_node(mnl_node_info_t node, - mnl_msg_t msg, - uint32_t flags); + mnl_msg_t msg, + uint32_t flags); /* The link driver calls this to fetch the next message to transmit. @@ -244,7 +244,7 @@ void mnl_msg_from_node(mnl_node_info_t node, * flags Currently unused; 0 should be passed */ mnl_msg_t mnl_msg_to_node(mnl_node_info_t node, - uint32_t flags); + uint32_t flags); /* The link driver calls this to indicate that the specified msg buffer has @@ -256,8 +256,8 @@ mnl_msg_t mnl_msg_to_node(mnl_node_info_t node, * flags Currently unused; 0 should be passed */ void mnl_msg_complete(mnl_node_info_t node, - mnl_msg_t msg, - uint32_t flags); + mnl_msg_t msg, + uint32_t flags); __END_DECLS diff --git a/osfmk/kern/mach_param.h b/osfmk/kern/mach_param.h index a89e08fc3..7e1bc5c3d 100644 --- a/osfmk/kern/mach_param.h +++ b/osfmk/kern/mach_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,29 +64,29 @@ * */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#ifndef _KERN_MACH_PARAM_H_ +#ifndef _KERN_MACH_PARAM_H_ #define _KERN_MACH_PARAM_H_ extern int thread_max, task_threadmax, task_max; -#define THREAD_CHUNK 64 /* Allocation chunk */ -#define TASK_CHUNK 64 /* Allocation chunk */ +#define THREAD_CHUNK 64 /* Allocation chunk */ +#define TASK_CHUNK 64 /* Allocation chunk */ -#define PORT_MAX ((task_max * 3 + thread_max) /* kernel */ \ - + (thread_max * 2) /* user */ \ - + 40000) /* slop for objects */ - /* Number of ports, system-wide */ +#define PORT_MAX ((task_max * 3 + thread_max) /* kernel */ \ + + (thread_max * 2) /* user */ \ + + 40000) /* slop for objects */ +/* Number of ports, system-wide */ -#define SET_MAX (task_max + (thread_max * 2) + 200) - /* Max number of port sets */ +#define SET_MAX (task_max + (thread_max * 2) + 200) +/* Max number of port sets */ -#define SPACE_MAX (task_max + 5) /* Max number of IPC spaces */ +#define SPACE_MAX (task_max + 5) /* Max number of IPC spaces */ -#define SEMAPHORE_MAX (PORT_MAX >> 1) /* Maximum number of semaphores */ +#define SEMAPHORE_MAX (PORT_MAX >> 1) /* Maximum number of semaphores */ -#endif /* _KERN_MACH_PARAM_H_ */ +#endif /* _KERN_MACH_PARAM_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/machine.c b/osfmk/kern/machine.c index 43d69835c..a61ee71db 100644 --- a/osfmk/kern/machine.c +++ b/osfmk/kern/machine.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2009 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -99,15 +99,20 @@ extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t); #endif +#if defined(__x86_64__) +#include +#include +#endif + /* * Exported variables: */ -struct machine_info machine_info; +struct machine_info machine_info; /* Forwards */ -void processor_doshutdown( - processor_t processor); +void processor_doshutdown( + processor_t processor); /* * processor_up: @@ -117,10 +122,10 @@ void processor_doshutdown( */ void processor_up( - processor_t processor) + processor_t processor) { - processor_set_t pset; - spl_t s; + processor_set_t pset; + spl_t s; s = splsched(); init_ast_check(processor); @@ -129,60 +134,65 @@ processor_up( ++pset->online_processor_count; pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); (void)hw_atomic_add(&processor_avail_count, 1); + if (processor->is_recommended) { + (void)hw_atomic_add(&processor_avail_count_user, 1); + } commpage_update_active_cpus(); pset_unlock(pset); ml_cpu_up(); splx(s); #if CONFIG_DTRACE - if (dtrace_cpu_state_changed_hook) + if (dtrace_cpu_state_changed_hook) { (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE); + } #endif } #include kern_return_t host_reboot( - host_priv_t host_priv, - int options) + host_priv_t host_priv, + int options) { - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); #if DEVELOPMENT || DEBUG if (options & HOST_REBOOT_DEBUGGER) { Debugger("Debugger"); - return (KERN_SUCCESS); + return KERN_SUCCESS; } #endif - if (options & HOST_REBOOT_UPSDELAY) { - // UPS power cutoff path - PEHaltRestart( kPEUPSDelayHaltCPU ); - } else { - halt_all_cpus(!(options & HOST_REBOOT_HALT)); - } + if (options & HOST_REBOOT_UPSDELAY) { + // UPS power cutoff path + PEHaltRestart( kPEUPSDelayHaltCPU ); + } else { + halt_all_cpus(!(options & HOST_REBOOT_HALT)); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t processor_assign( - __unused processor_t processor, - __unused processor_set_t new_pset, - __unused boolean_t wait) + __unused processor_t processor, + __unused processor_set_t new_pset, + __unused boolean_t wait) { - return (KERN_FAILURE); + return KERN_FAILURE; } kern_return_t processor_shutdown( - processor_t processor) + processor_t processor) { - processor_set_t pset; - spl_t s; + processor_set_t pset; + spl_t s; s = splsched(); pset = processor->processor_set; @@ -194,7 +204,7 @@ processor_shutdown( pset_unlock(pset); splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } if (processor->state == PROCESSOR_START) { @@ -204,7 +214,7 @@ processor_shutdown( pset_unlock(pset); splx(s); - return (KERN_FAILURE); + return KERN_FAILURE; } /* @@ -225,11 +235,10 @@ processor_shutdown( pset_unlock(pset); splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } pset_update_processor_state(pset, processor, PROCESSOR_SHUTDOWN); - pset_unlock(pset); processor_doshutdown(processor); @@ -237,7 +246,7 @@ processor_shutdown( cpu_exit_wait(processor->cpu_id); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -245,11 +254,11 @@ processor_shutdown( */ void processor_doshutdown( - processor_t processor) + processor_t processor) { - thread_t old_thread, self = current_thread(); - processor_t prev; - processor_set_t pset; + thread_t old_thread, self = current_thread(); + processor_t prev; + processor_set_t pset; /* * Get onto the processor to shutdown @@ -260,8 +269,9 @@ processor_doshutdown( assert(processor->state == PROCESSOR_SHUTDOWN); #if CONFIG_DTRACE - if (dtrace_cpu_state_changed_hook) + if (dtrace_cpu_state_changed_hook) { (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE); + } #endif ml_cpu_down(); @@ -278,6 +288,9 @@ processor_doshutdown( pset_update_processor_state(pset, processor, PROCESSOR_OFF_LINE); --pset->online_processor_count; (void)hw_atomic_sub(&processor_avail_count, 1); + if (processor->is_recommended) { + (void)hw_atomic_sub(&processor_avail_count_user, 1); + } commpage_update_active_cpus(); SCHED(processor_queue_shutdown)(processor); /* pset lock dropped */ @@ -313,7 +326,7 @@ processor_doshutdown( */ void processor_offline( - processor_t processor) + processor_t processor) { assert(processor == current_processor()); assert(processor->active_thread == current_thread()); @@ -323,8 +336,9 @@ processor_offline( if (!new_thread->kernel_stack) { /* the idle thread has a reserved stack, so this will never fail */ - if (!stack_alloc_try(new_thread)) + if (!stack_alloc_try(new_thread)) { panic("processor_offline"); + } } processor->active_thread = new_thread; @@ -344,9 +358,9 @@ processor_offline( timer_stop(PROCESSOR_DATA(processor, current_state), ctime); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE, - old_thread->reason, (uintptr_t)thread_tid(new_thread), - old_thread->sched_pri, new_thread->sched_pri, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE, + old_thread->reason, (uintptr_t)thread_tid(new_thread), + old_thread->sched_pri, new_thread->sched_pri, 0); machine_set_current_thread(new_thread); @@ -363,12 +377,13 @@ processor_offline( kern_return_t host_get_boot_info( - host_priv_t host_priv, - kernel_boot_info_t boot_info) + host_priv_t host_priv, + kernel_boot_info_t boot_info) { const char *src = ""; - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); @@ -377,17 +392,20 @@ host_get_boot_info( * standardized strings generated from boot string. */ src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX); - if (src != boot_info) + if (src != boot_info) { (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX); + } - return (KERN_SUCCESS); + return KERN_SUCCESS; } #if CONFIG_DTRACE #include #endif -unsigned long long ml_io_read(uintptr_t vaddr, int size) { +unsigned long long +ml_io_read(uintptr_t vaddr, int size) +{ unsigned long long result = 0; unsigned char s1; unsigned short s2; @@ -396,66 +414,228 @@ unsigned long long ml_io_read(uintptr_t vaddr, int size) { uint64_t sabs, eabs; boolean_t istate, timeread = FALSE; #if DEVELOPMENT || DEBUG - pmap_verify_noncacheable(vaddr); + extern uint64_t simulate_stretched_io; + uintptr_t paddr = pmap_verify_noncacheable(vaddr); #endif /* x86_64 DEVELOPMENT || DEBUG */ if (__improbable(reportphyreaddelayabs != 0)) { istate = ml_set_interrupts_enabled(FALSE); sabs = mach_absolute_time(); timeread = TRUE; } + +#if DEVELOPMENT || DEBUG + if (__improbable(timeread && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ + #endif /* x86_64 */ switch (size) { - case 1: + case 1: s1 = *(volatile unsigned char *)vaddr; result = s1; break; - case 2: + case 2: s2 = *(volatile unsigned short *)vaddr; result = s2; break; - case 4: + case 4: result = *(volatile unsigned int *)vaddr; break; case 8: result = *(volatile unsigned long long *)vaddr; break; default: - panic("Invalid size %d for ml_io_read(%p)\n", size, (void *)vaddr); + panic("Invalid size %d for ml_io_read(%p)", size, (void *)vaddr); break; - } + } #if defined(__x86_64__) if (__improbable(timeread == TRUE)) { eabs = mach_absolute_time(); - (void)ml_set_interrupts_enabled(istate); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_IO_READ, vaddr, paddr, size, result, sabs, eabs - sabs); +#endif if (__improbable((eabs - sabs) > reportphyreaddelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + (void)ml_set_interrupts_enabled(istate); + if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { - panic("Read from IO virtual addr 0x%lx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr, (eabs - sabs), result, sabs, eabs, reportphyreaddelayabs); + panic_io_port_read(); + panic("Read from IO vaddr 0x%lx paddr 0x%lx took %llu ns, " + "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", + vaddr, paddr, (eabs - sabs), result, sabs, eabs, + reportphyreaddelayabs); + } + + if (reportphyreadosbt) { + OSReportWithBacktrace("ml_io_read(v=%p, p=%p) size %d result 0x%llx " + "took %lluus", + (void *)vaddr, (void *)paddr, size, result, + (eabs - sabs) / NSEC_PER_USEC); } #if CONFIG_DTRACE - DTRACE_PHYSLAT3(physread, uint64_t, (eabs - sabs), - uint64_t, vaddr, uint32_t, size); + DTRACE_PHYSLAT5(physioread, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, result); #endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_READ), + (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, result); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); } } #endif /* x86_64 */ return result; } -unsigned int ml_io_read8(uintptr_t vaddr) { +unsigned int +ml_io_read8(uintptr_t vaddr) +{ return (unsigned) ml_io_read(vaddr, 1); } -unsigned int ml_io_read16(uintptr_t vaddr) { +unsigned int +ml_io_read16(uintptr_t vaddr) +{ return (unsigned) ml_io_read(vaddr, 2); } -unsigned int ml_io_read32(uintptr_t vaddr) { +unsigned int +ml_io_read32(uintptr_t vaddr) +{ return (unsigned) ml_io_read(vaddr, 4); } -unsigned long long ml_io_read64(uintptr_t vaddr) { +unsigned long long +ml_io_read64(uintptr_t vaddr) +{ return ml_io_read(vaddr, 8); } + +/* ml_io_write* */ + +void +ml_io_write(uintptr_t vaddr, uint64_t val, int size) +{ +#if defined(__x86_64__) + uint64_t sabs, eabs; + boolean_t istate, timewrite = FALSE; +#if DEVELOPMENT || DEBUG + extern uint64_t simulate_stretched_io; + uintptr_t paddr = pmap_verify_noncacheable(vaddr); +#endif /* x86_64 DEVELOPMENT || DEBUG */ + if (__improbable(reportphywritedelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timewrite = TRUE; + } + +#if DEVELOPMENT || DEBUG + if (__improbable(timewrite && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ +#endif /* x86_64 */ + + switch (size) { + case 1: + *(volatile uint8_t *)vaddr = (uint8_t)val; + break; + case 2: + *(volatile uint16_t *)vaddr = (uint16_t)val; + break; + case 4: + *(volatile uint32_t *)vaddr = (uint32_t)val; + break; + case 8: + *(volatile uint64_t *)vaddr = (uint64_t)val; + break; + default: + panic("Invalid size %d for ml_io_write(%p, 0x%llx)", size, (void *)vaddr, val); + break; + } + +#if defined(__x86_64__) + if (__improbable(timewrite == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_IO_WRITE, vaddr, paddr, size, val, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphywritedelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + (void)ml_set_interrupts_enabled(istate); + + if (phywritepanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Write to IO vaddr %p paddr %p val 0x%llx took %llu ns," + " (start: %llu, end: %llu), ceiling: %llu", + (void *)vaddr, (void *)paddr, val, (eabs - sabs), sabs, eabs, + reportphywritedelayabs); + } + + if (reportphywriteosbt) { + OSReportWithBacktrace("ml_io_write size %d (v=%p, p=%p, 0x%llx) " + "took %lluus", + size, (void *)vaddr, (void *)paddr, val, (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT5(physiowrite, uint64_t, (eabs - sabs), + uint64_t, vaddr, uint32_t, size, uint64_t, paddr, uint64_t, val); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) { +#if !(DEVELOPMENT || DEBUG) + uintptr_t paddr = kvtophys(vaddr); +#endif + + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_MMIO_WRITE), + (eabs - sabs), VM_KERNEL_UNSLIDE_OR_PERM(vaddr), paddr, val); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } +#endif /* x86_64 */ +} + +void +ml_io_write8(uintptr_t vaddr, uint8_t val) +{ + ml_io_write(vaddr, val, 1); +} + +void +ml_io_write16(uintptr_t vaddr, uint16_t val) +{ + ml_io_write(vaddr, val, 2); +} + +void +ml_io_write32(uintptr_t vaddr, uint32_t val) +{ + ml_io_write(vaddr, val, 4); +} + +void +ml_io_write64(uintptr_t vaddr, uint64_t val) +{ + ml_io_write(vaddr, val, 8); +} diff --git a/osfmk/kern/machine.h b/osfmk/kern/machine.h index a4d4cbcdb..9dbb6eb2b 100644 --- a/osfmk/kern/machine.h +++ b/osfmk/kern/machine.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _KERN_MACHINE_H_ -#define _KERN_MACHINE_H_ +#ifndef _KERN_MACHINE_H_ +#define _KERN_MACHINE_H_ #include #include @@ -39,18 +39,19 @@ #include +#include /* * Machine support declarations. */ -extern void processor_up( - processor_t processor); +extern void processor_up( + processor_t processor); -extern void processor_offline( - processor_t processor); +extern void processor_offline( + processor_t processor); -extern void processor_start_thread(void *machine_param); +extern void processor_start_thread(void *machine_param); /* * Must be implemented in machine dependent code. @@ -58,39 +59,42 @@ extern void processor_start_thread(void *machine_param); /* Initialize machine dependent ast code */ extern void init_ast_check( - processor_t processor); + processor_t processor); /* Cause check for ast */ extern void cause_ast_check( - processor_t processor); + processor_t processor); extern kern_return_t cpu_control( - int slot_num, - processor_info_t info, - unsigned int count); + int slot_num, + processor_info_t info, + unsigned int count); -extern void cpu_sleep(void); +extern void cpu_sleep(void); extern kern_return_t cpu_start( - int slot_num); + int slot_num); extern void cpu_exit_wait( - int slot_num); + int slot_num); + +extern boolean_t cpu_can_exit( + int slot_num); extern kern_return_t cpu_info( - processor_flavor_t flavor, - int slot_num, - processor_info_t info, - unsigned int *count); + processor_flavor_t flavor, + int slot_num, + processor_info_t info, + unsigned int *count); extern kern_return_t cpu_info_count( - processor_flavor_t flavor, - unsigned int *count); + processor_flavor_t flavor, + unsigned int *count); -extern thread_t machine_processor_shutdown( - thread_t thread, - void (*doshutdown)(processor_t), - processor_t processor); +extern thread_t machine_processor_shutdown( + thread_t thread, + void (*doshutdown)(processor_t), + processor_t processor); extern void machine_idle(void); @@ -98,32 +102,32 @@ extern void machine_track_platform_idle(boolean_t); /* Signals a processor to bring it out of idle */ extern void machine_signal_idle( - processor_t processor); + processor_t processor); /* Signals a processor to bring it out of idle unless canceled */ extern void machine_signal_idle_deferred( - processor_t processor); + processor_t processor); /* Cancels an outstanding machine_signal_idle_deferred, if this is supported */ extern void machine_signal_idle_cancel( - processor_t processor); + processor_t processor); extern void halt_cpu(void); extern void halt_all_cpus( - boolean_t reboot); + boolean_t reboot); extern char *machine_boot_info( - char *buf, - vm_size_t buf_len); + char *buf, + vm_size_t buf_len); /* * Machine-dependent routine to fill in an array with up to callstack_max * levels of return pc information. */ extern void machine_callstack( - uintptr_t *buf, - vm_size_t callstack_max); + uintptr_t *buf, + vm_size_t callstack_max); extern void consider_machine_collect(void); @@ -132,17 +136,18 @@ extern void consider_machine_collect(void); * CPU power management about context switches */ -extern void machine_thread_going_on_core(thread_t new_thread, - int urgency, - uint64_t sched_latency, - uint64_t same_pri_latency, - uint64_t dispatch_time); +extern void machine_thread_going_on_core(thread_t new_thread, + thread_urgency_t urgency, + uint64_t sched_latency, + uint64_t same_pri_latency, + uint64_t dispatch_time); -extern void machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch); +extern void machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, + uint64_t last_dispatch, boolean_t thread_runnable); extern void machine_max_runnable_latency(uint64_t bg_max_latency, - uint64_t default_max_latency, - uint64_t realtime_max_latency); + uint64_t default_max_latency, + uint64_t realtime_max_latency); extern void machine_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_args); @@ -150,16 +155,16 @@ extern void machine_work_interval_notify(thread_t thread, struct kern_work_inter extern void machine_perfcontrol_deadline_passed(uint64_t deadline); extern void machine_switch_perfcontrol_context(perfcontrol_event event, - uint64_t timestamp, - uint32_t flags, - uint64_t new_thread_same_pri_latency, - thread_t old, - thread_t new); + uint64_t timestamp, + uint32_t flags, + uint64_t new_thread_same_pri_latency, + thread_t old, + thread_t new); extern void machine_switch_perfcontrol_state_update(perfcontrol_event event, - uint64_t timestamp, - uint32_t flags, - thread_t thread); + uint64_t timestamp, + uint32_t flags, + thread_t thread); -#endif /* _KERN_MACHINE_H_ */ +#endif /* _KERN_MACHINE_H_ */ diff --git a/osfmk/kern/macro_help.h b/osfmk/kern/macro_help.h index 159b43237..03d52e5a9 100644 --- a/osfmk/kern/macro_help.h +++ b/osfmk/kern/macro_help.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,24 +60,24 @@ * * Provide help in making lint-free macro routines * - */ + */ -#ifndef _KERN_MACRO_HELP_H_ -#define _KERN_MACRO_HELP_H_ +#ifndef _KERN_MACRO_HELP_H_ +#define _KERN_MACRO_HELP_H_ #include -#ifdef lint -boolean_t NEVER; -boolean_t ALWAYS; -#else /* lint */ -#define NEVER FALSE -#define ALWAYS TRUE -#endif /* lint */ +#ifdef lint +boolean_t NEVER; +boolean_t ALWAYS; +#else /* lint */ +#define NEVER FALSE +#define ALWAYS TRUE +#endif /* lint */ -#define MACRO_BEGIN do { -#define MACRO_END } while (NEVER) +#define MACRO_BEGIN do { +#define MACRO_END } while (NEVER) -#define MACRO_RETURN if (ALWAYS) return +#define MACRO_RETURN if (ALWAYS) return -#endif /* _KERN_MACRO_HELP_H_ */ +#endif /* _KERN_MACRO_HELP_H_ */ diff --git a/osfmk/kern/memset_s.c b/osfmk/kern/memset_s.c index f13d0f6cd..37b89450d 100644 --- a/osfmk/kern/memset_s.c +++ b/osfmk/kern/memset_s.c @@ -45,19 +45,22 @@ memset_s(void *s, size_t smax, int c, size_t n) { int err = 0; - if (s == NULL) return EINVAL; - if (smax > RSIZE_MAX) return E2BIG; + if (s == NULL) { + return EINVAL; + } + if (smax > RSIZE_MAX) { + return E2BIG; + } if (n > smax) { n = smax; err = EOVERFLOW; } - /* + /* * secure_memset is defined in assembly, we therefore - * expect that the compiler will not inline the call. + * expect that the compiler will not inline the call. */ secure_memset(s, c, n); return err; } - diff --git a/osfmk/kern/misc_protos.h b/osfmk/kern/misc_protos.h index 74a0a7d63..c1dee4267 100644 --- a/osfmk/kern/misc_protos.h +++ b/osfmk/kern/misc_protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -35,8 +35,8 @@ * Version 2.0. */ -#ifndef _MISC_PROTOS_H_ -#define _MISC_PROTOS_H_ +#ifndef _MISC_PROTOS_H_ +#define _MISC_PROTOS_H_ #include #include @@ -49,33 +49,33 @@ #include #ifndef MIN -#define MIN(a,b) (((a)<(b))?(a):(b)) +#define MIN(a, b) (((a)<(b))?(a):(b)) #endif /* MIN */ #ifndef MAX -#define MAX(a,b) (((a)>(b))?(a):(b)) +#define MAX(a, b) (((a)>(b))?(a):(b)) #endif /* MAX */ /* Set a bit in a bit array */ extern void setbit( - int which, - int *bitmap); + int which, + int *bitmap); /* Clear a bit in a bit array */ extern void clrbit( - int which, - int *bitmap); + int which, + int *bitmap); /* Find the first set bit in a bit array */ extern int ffsbit( - int *bitmap); + int *bitmap); extern int ffs( - unsigned int mask); + unsigned int mask); extern int ffsll( unsigned long long mask); /* Find the last set bit in a bit array */ extern int fls( - unsigned int mask); + unsigned int mask); extern int flsll( unsigned long long mask); @@ -83,8 +83,8 @@ extern int flsll( * Test if indicated bit is set in bit string. */ extern int testbit( - int which, - int *bitmap); + int which, + int *bitmap); /* Move an aligned 32 or 64-bit word from user space to kernel space * using a single read instruction @@ -124,12 +124,12 @@ extern void copy_window_fault(thread_t, vm_map_t, int); extern int copyin_validate(const user_addr_t, uintptr_t, vm_size_t); extern int copyout_validate(uintptr_t, const user_addr_t, vm_size_t); -extern int sscanf(const char *input, const char *fmt, ...) __scanflike(2,3); +extern int sscanf(const char *input, const char *fmt, ...) __scanflike(2, 3); -/* sprintf() is being deprecated. Please use snprintf() instead. */ +/* sprintf() is being deprecated. Please use snprintf() instead. */ extern integer_t sprintf(char *buf, const char *fmt, ...) __deprecated; -extern int printf(const char *format, ...) __printflike(1,2); +extern int printf(const char *format, ...) __printflike(1, 2); extern int vprintf(const char *format, va_list ap); #if KERNEL_PRIVATE @@ -144,46 +144,46 @@ int _consume_printf_args(int, ...); #endif #endif -extern int paniclog_append_noflush(const char *format, ...) __printflike(1,2); +extern int paniclog_append_noflush(const char *format, ...) __printflike(1, 2); -extern int kdb_printf(const char *format, ...) __printflike(1,2); +extern int kdb_printf(const char *format, ...) __printflike(1, 2); -extern int kdb_log(const char *format, ...) __printflike(1,2); +extern int kdb_log(const char *format, ...) __printflike(1, 2); -extern int kdb_printf_unbuffered(const char *format, ...) __printflike(1,2); +extern int kdb_printf_unbuffered(const char *format, ...) __printflike(1, 2); extern void printf_init(void); -extern int snprintf(char *, size_t, const char *, ...) __printflike(3,4); +extern int snprintf(char *, size_t, const char *, ...) __printflike(3, 4); extern void log(int level, char *fmt, ...); -void +void _doprnt( - const char *fmt, - va_list *argp, - void (*putc)(char), - int radix); + const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); void _doprnt_log( - const char *fmt, - va_list *argp, - void (*putc)(char), - int radix); + const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); int __doprnt( - const char *fmt, - va_list argp, - void (*putc)(int, void *), + const char *fmt, + va_list argp, + void (*putc)(int, void *), void *arg, - int radix, - int is_log); + int radix, + int is_log); extern void safe_gets( - char *str, - int maxlen); + char *str, + int maxlen); extern void cnputcusr(char); @@ -214,28 +214,28 @@ extern int cngetc(void); extern int cnmaygetc(void); extern int _setjmp( - jmp_buf_t *jmp_buf); + jmp_buf_t *jmp_buf); extern int _longjmp( - jmp_buf_t *jmp_buf, - int value); + jmp_buf_t *jmp_buf, + int value); extern void bootstrap_create(void); -#if DIPC -extern boolean_t no_bootstrap_task(void); -extern ipc_port_t get_root_master_device_port(void); -#endif /* DIPC */ +#if DIPC +extern boolean_t no_bootstrap_task(void); +extern ipc_port_t get_root_master_device_port(void); +#endif /* DIPC */ -extern kern_return_t kernel_set_special_port( - host_priv_t host_priv, - int which, - ipc_port_t port); +extern kern_return_t kernel_set_special_port( + host_priv_t host_priv, + int which, + ipc_port_t port); -extern kern_return_t kernel_get_special_port( - host_priv_t host_priv, - int which, - ipc_port_t *portp); +extern kern_return_t kernel_get_special_port( + host_priv_t host_priv, + int which, + ipc_port_t *portp); user_addr_t get_useraddr(void); @@ -244,4 +244,4 @@ struct kmod_info_t; extern uint64_t early_random(void); -#endif /* _MISC_PROTOS_H_ */ +#endif /* _MISC_PROTOS_H_ */ diff --git a/osfmk/kern/mk_sp.c b/osfmk/kern/mk_sp.c index af9f05cb8..37181373d 100644 --- a/osfmk/kern/mk_sp.c +++ b/osfmk/kern/mk_sp.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* The routines in this module are all obsolete */ @@ -64,39 +64,40 @@ /* * thread_set_policy * - * Set scheduling policy and parameters, both base and limit, for + * Set scheduling policy and parameters, both base and limit, for * the given thread. Policy can be any policy implemented by the - * processor set, whether enabled or not. + * processor set, whether enabled or not. */ kern_return_t thread_set_policy( - thread_t thread, - processor_set_t pset, - policy_t policy, - policy_base_t base, - mach_msg_type_number_t base_count, - policy_limit_t limit, - mach_msg_type_number_t limit_count) + thread_t thread, + processor_set_t pset, + policy_t policy, + policy_base_t base, + mach_msg_type_number_t base_count, + policy_limit_t limit, + mach_msg_type_number_t limit_count) { - int max, bas; - kern_return_t result = KERN_SUCCESS; + int max, bas; + kern_return_t result = KERN_SUCCESS; - if ( thread == THREAD_NULL || - pset == PROCESSOR_SET_NULL || pset != &pset0) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || + pset == PROCESSOR_SET_NULL || pset != &pset0) { + return KERN_INVALID_ARGUMENT; + } - if (invalid_policy(policy)) - return(KERN_INVALID_ARGUMENT); + if (invalid_policy(policy)) { + return KERN_INVALID_ARGUMENT; + } switch (policy) { - case POLICY_RR: { - policy_rr_base_t rr_base = (policy_rr_base_t) base; - policy_rr_limit_t rr_limit = (policy_rr_limit_t) limit; + policy_rr_base_t rr_base = (policy_rr_base_t) base; + policy_rr_limit_t rr_limit = (policy_rr_limit_t) limit; - if ( base_count != POLICY_RR_BASE_COUNT || - limit_count != POLICY_RR_LIMIT_COUNT ) { + if (base_count != POLICY_RR_BASE_COUNT || + limit_count != POLICY_RR_LIMIT_COUNT) { result = KERN_INVALID_ARGUMENT; break; } @@ -113,11 +114,11 @@ thread_set_policy( case POLICY_FIFO: { - policy_fifo_base_t fifo_base = (policy_fifo_base_t) base; - policy_fifo_limit_t fifo_limit = (policy_fifo_limit_t) limit; + policy_fifo_base_t fifo_base = (policy_fifo_base_t) base; + policy_fifo_limit_t fifo_limit = (policy_fifo_limit_t) limit; - if ( base_count != POLICY_FIFO_BASE_COUNT || - limit_count != POLICY_FIFO_LIMIT_COUNT) { + if (base_count != POLICY_FIFO_BASE_COUNT || + limit_count != POLICY_FIFO_LIMIT_COUNT) { result = KERN_INVALID_ARGUMENT; break; } @@ -134,12 +135,12 @@ thread_set_policy( case POLICY_TIMESHARE: { - policy_timeshare_base_t ts_base = (policy_timeshare_base_t) base; - policy_timeshare_limit_t ts_limit = - (policy_timeshare_limit_t) limit; + policy_timeshare_base_t ts_base = (policy_timeshare_base_t) base; + policy_timeshare_limit_t ts_limit = + (policy_timeshare_limit_t) limit; - if ( base_count != POLICY_TIMESHARE_BASE_COUNT || - limit_count != POLICY_TIMESHARE_LIMIT_COUNT ) { + if (base_count != POLICY_TIMESHARE_BASE_COUNT || + limit_count != POLICY_TIMESHARE_LIMIT_COUNT) { result = KERN_INVALID_ARGUMENT; break; } @@ -159,59 +160,59 @@ thread_set_policy( } if (result != KERN_SUCCESS) { - return (result); + return result; } /* Note that we do not pass on max priority. */ if (result == KERN_SUCCESS) { - result = thread_set_mode_and_absolute_pri(thread, policy, bas); + result = thread_set_mode_and_absolute_pri(thread, policy, bas); } - return (result); + return result; } /* - * thread_policy + * thread_policy * * Set scheduling policy and parameters, both base and limit, for * the given thread. Policy must be a policy which is enabled for the - * processor set. Change contained threads if requested. + * processor set. Change contained threads if requested. */ kern_return_t thread_policy( - thread_t thread, - policy_t policy, - policy_base_t base, - mach_msg_type_number_t count, - boolean_t set_limit) + thread_t thread, + policy_t policy, + policy_base_t base, + mach_msg_type_number_t count, + boolean_t set_limit) { - kern_return_t result = KERN_SUCCESS; - processor_set_t pset = &pset0; - policy_limit_t limit = NULL; - int limcount = 0; - policy_rr_limit_data_t rr_limit; - policy_fifo_limit_data_t fifo_limit; - policy_timeshare_limit_data_t ts_limit; - - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + kern_return_t result = KERN_SUCCESS; + processor_set_t pset = &pset0; + policy_limit_t limit = NULL; + int limcount = 0; + policy_rr_limit_data_t rr_limit; + policy_fifo_limit_data_t fifo_limit; + policy_timeshare_limit_data_t ts_limit; + + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); - if ( invalid_policy(policy) || - ((POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO) & policy) == 0 ) { + if (invalid_policy(policy) || + ((POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO) & policy) == 0) { thread_mtx_unlock(thread); - return (KERN_INVALID_POLICY); + return KERN_INVALID_POLICY; } if (set_limit) { /* - * Set scheduling limits to base priority. + * Set scheduling limits to base priority. */ switch (policy) { - case POLICY_RR: { policy_rr_base_t rr_base; @@ -267,15 +268,12 @@ thread_policy( result = KERN_INVALID_POLICY; break; } - - } - else { + } else { /* * Use current scheduling limits. Ensure that the * new base priority will not exceed current limits. */ switch (policy) { - case POLICY_RR: { policy_rr_base_t rr_base; @@ -346,14 +344,14 @@ thread_policy( result = KERN_INVALID_POLICY; break; } - } thread_mtx_unlock(thread); - if (result == KERN_SUCCESS) - result = thread_set_policy(thread, pset, - policy, base, count, limit, limcount); + if (result == KERN_SUCCESS) { + result = thread_set_policy(thread, pset, + policy, base, count, limit, limcount); + } - return(result); + return result; } diff --git a/osfmk/kern/mk_timer.c b/osfmk/kern/mk_timer.c index f968d1898..883a1e31b 100644 --- a/osfmk/kern/mk_timer.c +++ b/osfmk/kern/mk_timer.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,42 +42,45 @@ #include +#include #include #include -static zone_t mk_timer_zone; +static zone_t mk_timer_zone; static mach_port_qos_t mk_timer_qos = { - FALSE, TRUE, 0, sizeof (mk_timer_expire_msg_t) + FALSE, TRUE, 0, sizeof(mk_timer_expire_msg_t) }; -static void mk_timer_expire( - void *p0, - void *p1); +static void mk_timer_expire( + void *p0, + void *p1); mach_port_name_t mk_timer_create_trap( __unused struct mk_timer_create_trap_args *args) { - mk_timer_t timer; - ipc_space_t myspace = current_space(); - mach_port_name_t name = MACH_PORT_NULL; - ipc_port_t port; - kern_return_t result; + mk_timer_t timer; + ipc_space_t myspace = current_space(); + mach_port_name_t name = MACH_PORT_NULL; + ipc_port_t port; + kern_return_t result; timer = (mk_timer_t)zalloc(mk_timer_zone); - if (timer == NULL) - return (MACH_PORT_NULL); + if (timer == NULL) { + return MACH_PORT_NULL; + } result = mach_port_allocate_qos(myspace, MACH_PORT_RIGHT_RECEIVE, - &mk_timer_qos, &name); - if (result == KERN_SUCCESS) + &mk_timer_qos, &name); + if (result == KERN_SUCCESS) { result = ipc_port_translate_receive(myspace, name, &port); + } if (result != KERN_SUCCESS) { zfree(mk_timer_zone, timer); - return (MACH_PORT_NULL); + return MACH_PORT_NULL; } simple_lock_init(&timer->lock, 0); @@ -92,28 +95,29 @@ mk_timer_create_trap( ip_reference(port); ip_unlock(port); - return (name); + return name; } void mk_timer_port_destroy( - ipc_port_t port) + ipc_port_t port) { - mk_timer_t timer = NULL; + mk_timer_t timer = NULL; ip_lock(port); if (ip_kotype(port) == IKOT_TIMER) { timer = (mk_timer_t)port->ip_kobject; assert(timer != NULL); ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); - simple_lock(&timer->lock); + simple_lock(&timer->lock, LCK_GRP_NULL); assert(timer->port == port); } ip_unlock(port); if (timer != NULL) { - if (thread_call_cancel(&timer->call_entry)) + if (thread_call_cancel(&timer->call_entry)) { timer->active--; + } timer->is_armed = FALSE; timer->is_dead = TRUE; @@ -132,7 +136,7 @@ mk_timer_port_destroy( void mk_timer_init(void) { - int s = sizeof (mk_timer_data_t); + int s = sizeof(mk_timer_data_t); assert(!(mk_timer_zone != NULL)); @@ -143,13 +147,13 @@ mk_timer_init(void) static void mk_timer_expire( - void *p0, - __unused void *p1) + void *p0, + __unused void *p1) { - mk_timer_t timer = p0; - ipc_port_t port; + mk_timer_t timer = p0; + ipc_port_t port; - simple_lock(&timer->lock); + simple_lock(&timer->lock, LCK_GRP_NULL); if (timer->active > 1) { timer->active--; @@ -162,7 +166,7 @@ mk_timer_expire( assert(timer->active == 1); while (timer->is_armed && timer->active == 1) { - mk_timer_expire_msg_t msg; + mk_timer_expire_msg_t msg; timer->is_armed = FALSE; simple_unlock(&timer->lock); @@ -176,9 +180,9 @@ mk_timer_expire( msg.unused[0] = msg.unused[1] = msg.unused[2] = 0; - (void) mach_msg_send_from_kernel_proper(&msg.header, sizeof (msg)); + (void) mach_msg_send_from_kernel_proper(&msg.header, sizeof(msg)); - simple_lock(&timer->lock); + simple_lock(&timer->lock, LCK_GRP_NULL); } if (--timer->active == 0 && timer->is_dead) { @@ -201,32 +205,32 @@ mk_timer_expire( * * * Returns: 0 Success - * !0 Not success + * !0 Not success * */ kern_return_t mk_timer_destroy_trap( struct mk_timer_destroy_trap_args *args) { - mach_port_name_t name = args->name; - ipc_space_t myspace = current_space(); - ipc_port_t port; - kern_return_t result; + mach_port_name_t name = args->name; + ipc_space_t myspace = current_space(); + ipc_port_t port; + kern_return_t result; result = ipc_port_translate_receive(myspace, name, &port); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } if (ip_kotype(port) == IKOT_TIMER) { ip_unlock(port); result = mach_port_destroy(myspace, name); - } - else { + } else { ip_unlock(port); result = KERN_INVALID_ARGUMENT; } - return (result); + return result; } /* @@ -239,26 +243,28 @@ mk_timer_destroy_trap( * * * Returns: 0 Success - * !0 Not success + * !0 Not success * */ static kern_return_t -mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t mk_leeway, uint64_t mk_timer_flags) { - mk_timer_t timer; - ipc_space_t myspace = current_space(); - ipc_port_t port; - kern_return_t result; +mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t mk_leeway, uint64_t mk_timer_flags) +{ + mk_timer_t timer; + ipc_space_t myspace = current_space(); + ipc_port_t port; + kern_return_t result; result = ipc_port_translate_receive(myspace, name, &port); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } if (ip_kotype(port) == IKOT_TIMER) { timer = (mk_timer_t)port->ip_kobject; assert(timer != NULL); - simple_lock(&timer->lock); + simple_lock(&timer->lock, LCK_GRP_NULL); assert(timer->port == port); ip_unlock(port); @@ -277,14 +283,14 @@ mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t } if (!thread_call_enter_delayed_with_leeway( - &timer->call_entry, NULL, - expire_time, mk_leeway, tcflags)) { - + &timer->call_entry, NULL, + expire_time, mk_leeway, tcflags)) { timer->active++; } } else { - if (!thread_call_enter1(&timer->call_entry, NULL)) + if (!thread_call_enter1(&timer->call_entry, NULL)) { timer->active++; + } } } @@ -293,16 +299,18 @@ mk_timer_arm_trap_internal(mach_port_name_t name, uint64_t expire_time, uint64_t ip_unlock(port); result = KERN_INVALID_ARGUMENT; } - return (result); + return result; } kern_return_t -mk_timer_arm_trap(struct mk_timer_arm_trap_args *args) { +mk_timer_arm_trap(struct mk_timer_arm_trap_args *args) +{ return mk_timer_arm_trap_internal(args->name, args->expire_time, 0, MK_TIMER_NORMAL); } kern_return_t -mk_timer_arm_leeway_trap(struct mk_timer_arm_leeway_trap_args *args) { +mk_timer_arm_leeway_trap(struct mk_timer_arm_leeway_trap_args *args) +{ return mk_timer_arm_trap_internal(args->name, args->expire_time, args->mk_leeway, args->mk_timer_flags); } @@ -316,51 +324,54 @@ mk_timer_arm_leeway_trap(struct mk_timer_arm_leeway_trap_args *args) { * * * Returns: 0 Success - * !0 Not success + * !0 Not success * */ kern_return_t mk_timer_cancel_trap( struct mk_timer_cancel_trap_args *args) { - mach_port_name_t name = args->name; - mach_vm_address_t result_time_addr = args->result_time; - uint64_t armed_time = 0; - mk_timer_t timer; - ipc_space_t myspace = current_space(); - ipc_port_t port; - kern_return_t result; + mach_port_name_t name = args->name; + mach_vm_address_t result_time_addr = args->result_time; + uint64_t armed_time = 0; + mk_timer_t timer; + ipc_space_t myspace = current_space(); + ipc_port_t port; + kern_return_t result; result = ipc_port_translate_receive(myspace, name, &port); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } if (ip_kotype(port) == IKOT_TIMER) { timer = (mk_timer_t)port->ip_kobject; assert(timer != NULL); - simple_lock(&timer->lock); + simple_lock(&timer->lock, LCK_GRP_NULL); assert(timer->port == port); ip_unlock(port); if (timer->is_armed) { armed_time = timer->call_entry.tc_call.deadline; - if (thread_call_cancel(&timer->call_entry)) + if (thread_call_cancel(&timer->call_entry)) { timer->active--; + } timer->is_armed = FALSE; } simple_unlock(&timer->lock); - } - else { + } else { ip_unlock(port); result = KERN_INVALID_ARGUMENT; } - if (result == KERN_SUCCESS) - if ( result_time_addr != 0 && - copyout((void *)&armed_time, result_time_addr, - sizeof (armed_time)) != 0 ) + if (result == KERN_SUCCESS) { + if (result_time_addr != 0 && + copyout((void *)&armed_time, result_time_addr, + sizeof(armed_time)) != 0) { result = KERN_FAILURE; + } + } - return (result); + return result; } diff --git a/osfmk/kern/mk_timer.h b/osfmk/kern/mk_timer.h index 56904a9b3..01af9ed5a 100644 --- a/osfmk/kern/mk_timer.h +++ b/osfmk/kern/mk_timer.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,8 +34,8 @@ * Created. */ -#ifndef _KERN_MK_TIMER_H_ -#define _KERN_MK_TIMER_H_ +#ifndef _KERN_MK_TIMER_H_ +#define _KERN_MK_TIMER_H_ #ifdef MACH_KERNEL_PRIVATE #include @@ -43,20 +43,20 @@ #include struct mk_timer { - decl_simple_lock_data(,lock); - thread_call_data_t call_entry; - uint32_t is_dead:1, - is_armed:1; - int active; - ipc_port_t port; + decl_simple_lock_data(, lock); + thread_call_data_t call_entry; + uint32_t is_dead:1, + is_armed:1; + int active; + ipc_port_t port; }; -typedef struct mk_timer *mk_timer_t, mk_timer_data_t; +typedef struct mk_timer *mk_timer_t, mk_timer_data_t; -void mk_timer_port_destroy( - ipc_port_t port); +void mk_timer_port_destroy( + ipc_port_t port); -void mk_timer_init(void); +void mk_timer_init(void); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/monotonic.h b/osfmk/kern/monotonic.h index 65fd27140..e8fcde164 100644 --- a/osfmk/kern/monotonic.h +++ b/osfmk/kern/monotonic.h @@ -149,7 +149,7 @@ void mt_stackshot_task(task_t task, uint64_t *instrs, uint64_t *cycles); */ typedef void (*mt_pmi_fn)(bool user_mode, void *ctx); int mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn fn, - void *ctx); + void *ctx); int mt_microstackshot_stop(void); __END_DECLS diff --git a/osfmk/kern/mpqueue.h b/osfmk/kern/mpqueue.h new file mode 100644 index 000000000..0c966c67b --- /dev/null +++ b/osfmk/kern/mpqueue.h @@ -0,0 +1,75 @@ +#ifndef _KERN_MPQUEUE_H +#define _KERN_MPQUEUE_H +#include + +__BEGIN_DECLS + +#ifdef MACH_KERNEL_PRIVATE + +/*----------------------------------------------------------------*/ +/* + * Define macros for queues with locks. + */ +struct mpqueue_head { + struct queue_entry head; /* header for queue */ + uint64_t earliest_soft_deadline; + uint64_t count; + lck_mtx_t lock_data; +#if defined(__i386__) || defined(__x86_64__) + lck_mtx_ext_t lock_data_ext; +#endif +}; + +typedef struct mpqueue_head mpqueue_head_t; + +#define round_mpq(size) (size) + + +#if defined(__i386__) || defined(__x86_64__) + +#define mpqueue_init(q, lck_grp, lck_attr) \ +MACRO_BEGIN \ + queue_init(&(q)->head); \ + lck_mtx_init_ext(&(q)->lock_data, \ + &(q)->lock_data_ext, \ + lck_grp, \ + lck_attr); \ + (q)->earliest_soft_deadline = UINT64_MAX; \ + (q)->count = 0; \ +MACRO_END + +#else + +#define mpqueue_init(q, lck_grp, lck_attr) \ +MACRO_BEGIN \ + queue_init(&(q)->head); \ + lck_mtx_init(&(q)->lock_data, \ + lck_grp, \ + lck_attr); \ +MACRO_END +#endif + + +#define mpenqueue_tail(q, elt) \ +MACRO_BEGIN \ + lck_mtx_lock_spin_always(&(q)->lock_data); \ + enqueue_tail(&(q)->head, elt); \ + lck_mtx_unlock_always(&(q)->lock_data); \ +MACRO_END + +#define mpdequeue_head(q, elt) \ +MACRO_BEGIN \ + lck_mtx_lock_spin_always(&(q)->lock_data); \ + if (queue_empty(&(q)->head)) \ + *(elt) = 0; \ + else \ + *(elt) = dequeue_head(&(q)->head); \ + lck_mtx_unlock_always(&(q)->lock_data); \ +MACRO_END + +#endif /* MACH_KERNEL_PRIVATE */ + +__END_DECLS + + +#endif /* _KERN_QUEUE_H */ diff --git a/osfmk/kern/page_decrypt.c b/osfmk/kern/page_decrypt.c index 9914fe9d0..c95ce3e7f 100644 --- a/osfmk/kern/page_decrypt.c +++ b/osfmk/kern/page_decrypt.c @@ -2,7 +2,7 @@ * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,7 +37,6 @@ static dsmos_page_transform_hook_t dsmos_hook = NULL; void dsmos_page_transform_hook(dsmos_page_transform_hook_t hook) { - printf("DSMOS has arrived\n"); /* set the hook now - new callers will run with it */ dsmos_hook = hook; @@ -55,12 +54,13 @@ dsmos_page_transform(const void* from, void *to, unsigned long long src_offset, } return KERN_ABORTED; } - return (*dsmos_hook) (from, to, src_offset, ops); + return (*dsmos_hook)(from, to, src_offset, ops); } -text_crypter_create_hook_t text_crypter_create=NULL; -void text_crypter_create_hook_set(text_crypter_create_hook_t hook) +text_crypter_create_hook_t text_crypter_create = NULL; +void +text_crypter_create_hook_set(text_crypter_create_hook_t hook) { - text_crypter_create=hook; + text_crypter_create = hook; } diff --git a/osfmk/kern/page_decrypt.h b/osfmk/kern/page_decrypt.h index 3860f9b39..07fa9e1f6 100644 --- a/osfmk/kern/page_decrypt.h +++ b/osfmk/kern/page_decrypt.h @@ -2,7 +2,7 @@ * Copyright (c) 2005-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,38 +22,38 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _KERN_PAGE_DECRYPT_H #define _KERN_PAGE_DECRYPT_H #include -/* - * Interface for DSMOS +/* + * Interface for DSMOS */ -typedef int (*dsmos_page_transform_hook_t) (const void *,void*, unsigned long long, void *); -extern void dsmos_page_transform_hook(dsmos_page_transform_hook_t hook); /* exported */ +typedef int (*dsmos_page_transform_hook_t) (const void *, void*, unsigned long long, void *); +extern void dsmos_page_transform_hook(dsmos_page_transform_hook_t hook); /* exported */ -extern int dsmos_page_transform(const void *,void*, unsigned long long, void*); +extern int dsmos_page_transform(const void *, void*, unsigned long long, void*); /* - *Interface for text decryption family + * Interface for text decryption family */ struct pager_crypt_info { - /* Decrypt one page */ - int (*page_decrypt)(const void *src_vaddr, void *dst_vaddr, - unsigned long long src_offset, void *crypt_ops); - /* Pager using this crypter terminates - crypt module not needed anymore */ - void (*crypt_end)(void *crypt_ops); - /* Private data for the crypter */ - void *crypt_ops; - volatile int crypt_refcnt; + /* Decrypt one page */ + int (*page_decrypt)(const void *src_vaddr, void *dst_vaddr, + unsigned long long src_offset, void *crypt_ops); + /* Pager using this crypter terminates - crypt module not needed anymore */ + void (*crypt_end)(void *crypt_ops); + /* Private data for the crypter */ + void *crypt_ops; + volatile int crypt_refcnt; }; typedef struct pager_crypt_info pager_crypt_info_t; @@ -64,11 +64,11 @@ struct crypt_file_data { }; typedef struct crypt_file_data crypt_file_data_t; -typedef int (*text_crypter_create_hook_t)(struct pager_crypt_info *crypt_info, - const char *id, void *crypt_data); +typedef int (*text_crypter_create_hook_t)(struct pager_crypt_info *crypt_info, + const char *id, void *crypt_data); extern void text_crypter_create_hook_set(text_crypter_create_hook_t hook); extern text_crypter_create_hook_t text_crypter_create; -#endif /* _KERN_PAGE_DECRYPT_H */ +#endif /* _KERN_PAGE_DECRYPT_H */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/kern/pms.h b/osfmk/kern/pms.h index 990c71b2a..a6f165b33 100644 --- a/osfmk/kern/pms.h +++ b/osfmk/kern/pms.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,53 +35,53 @@ #define HalfwayToForever 0x7FFFFFFFFFFFFFFFULL #define century 790560000000000ULL -typedef void (*pmsSetFunc_t)(uint32_t, uint32_t, uint32_t); /* Function used to set hardware power state */ -typedef uint32_t (*pmsQueryFunc_t)(uint32_t, uint32_t); /* Function used to query hardware power state */ +typedef void (*pmsSetFunc_t)(uint32_t, uint32_t, uint32_t); /* Function used to set hardware power state */ +typedef uint32_t (*pmsQueryFunc_t)(uint32_t, uint32_t); /* Function used to query hardware power state */ typedef struct pmsStat { - uint64_t stTime[2]; /* Total time until switch to next step */ - uint32_t stCnt[2]; /* Number of times switched to next step */ + uint64_t stTime[2]; /* Total time until switch to next step */ + uint32_t stCnt[2]; /* Number of times switched to next step */ } pmsStat; typedef struct pmsDef { - uint64_t pmsLimit; /* Max time in this state in microseconds */ - uint32_t pmsStepID; /* Unique ID for this step */ - uint32_t pmsSetCmd; /* Command to select power state */ -#define pmsCngXClk 0x80000000 /* Change external clock */ -#define pmsXUnk 0x7F /* External clock unknown */ -#define pmsXClk 0x7F000000 /* External clock frequency */ -#define pmsCngCPU 0x00800000 /* Change CPU parameters */ -#define pmsSync 0x00400000 /* Make changes synchronously, i.e., spin until delay finished */ -#define pmsMustCmp 0x00200000 /* Delay must complete before next change */ -#define pmsCPU 0x001F0000 /* CPU frequency */ -#define pmsCPUUnk 0x1F /* CPU frequency unknown */ -#define pmsCngVolt 0x00008000 /* Change voltage */ -#define pmsVoltage 0x00007F00 /* Voltage */ -#define pmsVoltUnk 0x7F /* Voltage unknown */ -#define pmsPowerID 0x000000FF /* Identify power state to HW */ + uint64_t pmsLimit; /* Max time in this state in microseconds */ + uint32_t pmsStepID; /* Unique ID for this step */ + uint32_t pmsSetCmd; /* Command to select power state */ +#define pmsCngXClk 0x80000000 /* Change external clock */ +#define pmsXUnk 0x7F /* External clock unknown */ +#define pmsXClk 0x7F000000 /* External clock frequency */ +#define pmsCngCPU 0x00800000 /* Change CPU parameters */ +#define pmsSync 0x00400000 /* Make changes synchronously, i.e., spin until delay finished */ +#define pmsMustCmp 0x00200000 /* Delay must complete before next change */ +#define pmsCPU 0x001F0000 /* CPU frequency */ +#define pmsCPUUnk 0x1F /* CPU frequency unknown */ +#define pmsCngVolt 0x00008000 /* Change voltage */ +#define pmsVoltage 0x00007F00 /* Voltage */ +#define pmsVoltUnk 0x7F /* Voltage unknown */ +#define pmsPowerID 0x000000FF /* Identify power state to HW */ /* Special commands - various things */ -#define pmsDelay 0xFFFFFFFD /* Delayed step, no processor or platform changes. Timer expiration causes transition to pmsTDelay */ -#define pmsParkIt 0xFFFFFFFF /* Enters the parked state. No processor or platform changes. Timers cancelled */ -#define pmsCInit ((pmsXUnk << 24) | (pmsCPUUnk << 16) | (pmsVoltUnk << 8)) /* Initial current set command value */ +#define pmsDelay 0xFFFFFFFD /* Delayed step, no processor or platform changes. Timer expiration causes transition to pmsTDelay */ +#define pmsParkIt 0xFFFFFFFF /* Enters the parked state. No processor or platform changes. Timers cancelled */ +#define pmsCInit ((pmsXUnk << 24) | (pmsCPUUnk << 16) | (pmsVoltUnk << 8)) /* Initial current set command value */ /* Note: pmsSetFuncInd is an index into a table of function pointers and pmsSetFunc is the address * of a function. Initially, when you create a step table, this field is set as an index into * a table of function addresses that gets passed as a parameter to pmsBuild. When pmsBuild * internalizes the step and function tables, it converts the index to the function address. */ union sf { - pmsSetFunc_t pmsSetFunc; /* Function used to set platform power state */ - uint32_t pmsSetFuncInd; /* Index to function in function table */ + pmsSetFunc_t pmsSetFunc; /* Function used to set platform power state */ + uint32_t pmsSetFuncInd; /* Index to function in function table */ } sf; - uint32_t pmsDown; /* Next state if going lower */ - uint32_t pmsNext; /* Normal next state */ - uint32_t pmsTDelay; /* State if command was pmsDelay and timer expired */ + uint32_t pmsDown; /* Next state if going lower */ + uint32_t pmsNext; /* Normal next state */ + uint32_t pmsTDelay; /* State if command was pmsDelay and timer expired */ } pmsDef; typedef struct pmsCtl { - pmsStat (*pmsStats)[pmsMaxStates]; /* Pointer to statistics information, 0 if not enabled */ - pmsDef *pmsDefs[pmsMaxStates]; /* Indexed pointers to steps */ + pmsStat(*pmsStats)[pmsMaxStates]; /* Pointer to statistics information, 0 if not enabled */ + pmsDef *pmsDefs[pmsMaxStates]; /* Indexed pointers to steps */ } pmsCtl; /* @@ -90,62 +90,62 @@ typedef struct pmsCtl { */ typedef struct pmsd { - uint32_t pmsState; /* Current power management state */ - uint32_t pmsCSetCmd; /* Current select command */ - uint64_t pmsPop; /* Time of next step */ - uint64_t pmsStamp; /* Time of transition to current state */ - uint64_t pmsTime; /* Total time in this state */ + uint32_t pmsState; /* Current power management state */ + uint32_t pmsCSetCmd; /* Current select command */ + uint64_t pmsPop; /* Time of next step */ + uint64_t pmsStamp; /* Time of transition to current state */ + uint64_t pmsTime; /* Total time in this state */ } pmsd; /* * Required power management step programs */ - + enum { - pmsIdle = 0, /* Power state in idle loop */ - pmsNorm = 1, /* Normal step - usually low power */ - pmsNormHigh = 2, /* Highest power in normal step */ - pmsBoost = 3, /* Boost/overdrive step */ - pmsLow = 4, /* Lowest non-idle power state, no transitions */ - pmsHigh = 5, /* Power step for full on, no transitions */ - pmsPrepCng = 6, /* Prepare for step table change */ - pmsPrepSleep = 7, /* Prepare for sleep */ - pmsOverTemp = 8, /* Machine is too hot */ - pmsEnterNorm = 9, /* Enter into the normal step program */ - pmsFree = 10, /* First available empty step */ - pmsStartUp = 0xFFFFFFFE, /* Start stepping */ - pmsParked = 0xFFFFFFFF /* Power parked - used when changing stepping table */ + pmsIdle = 0, /* Power state in idle loop */ + pmsNorm = 1, /* Normal step - usually low power */ + pmsNormHigh = 2, /* Highest power in normal step */ + pmsBoost = 3, /* Boost/overdrive step */ + pmsLow = 4, /* Lowest non-idle power state, no transitions */ + pmsHigh = 5, /* Power step for full on, no transitions */ + pmsPrepCng = 6, /* Prepare for step table change */ + pmsPrepSleep = 7, /* Prepare for sleep */ + pmsOverTemp = 8, /* Machine is too hot */ + pmsEnterNorm = 9, /* Enter into the normal step program */ + pmsFree = 10, /* First available empty step */ + pmsStartUp = 0xFFFFFFFE, /* Start stepping */ + pmsParked = 0xFFFFFFFF /* Power parked - used when changing stepping table */ }; /* * Power Management Stepper Control requests */ - + enum { - pmsCPark = 0, /* Parks the stepper */ - pmsCStart = 1, /* Starts normal steppping */ - pmsCFLow = 2, /* Forces low power */ - pmsCFHigh = 3, /* Forces high power */ - pmsCCnfg = 4, /* Loads new stepper program */ - pmsCQuery = 5, /* Query current step and state */ - pmsCExperimental = 6, /* Enter experimental mode */ + pmsCPark = 0, /* Parks the stepper */ + pmsCStart = 1, /* Starts normal steppping */ + pmsCFLow = 2, /* Forces low power */ + pmsCFHigh = 3, /* Forces high power */ + pmsCCnfg = 4, /* Loads new stepper program */ + pmsCQuery = 5, /* Query current step and state */ + pmsCExperimental = 6, /* Enter experimental mode */ pmsGCtls = 7, pmsGStats = 8, pmsCVID = 9, - pmsCFree = 10 /* Next control command to be assigned */ + pmsCFree = 10 /* Next control command to be assigned */ }; /* * User request control structure passed to sysctl */ typedef struct { - uint32_t request; /* stepper control request */ - uint32_t reqsize; /* size of data */ - void *reqaddr; /* read/write data buffer */ + uint32_t request; /* stepper control request */ + uint32_t reqsize; /* size of data */ + void *reqaddr; /* read/write data buffer */ } pmsctl_t; -extern pmsCtl pmsCtls; /* Power Management Stepper control */ -extern uint32_t pmsBroadcastWait; /* Number of outstanding broadcasts */ +extern pmsCtl pmsCtls; /* Power Management Stepper control */ +extern uint32_t pmsBroadcastWait; /* Number of outstanding broadcasts */ extern int pmsInstalled; extern int pmsExperimental; @@ -179,7 +179,7 @@ extern kern_return_t pmsBuild(pmsDef *pd, uint32_t pdsize, pmsSetFunc_t *functab extern void pmsRun(uint32_t nstep); extern void pmsPark(void); extern void pmsStart(void); -extern kern_return_t pmsCPULoadVIDTable(uint16_t *tablep, int nstates); /* i386 only */ +extern kern_return_t pmsCPULoadVIDTable(uint16_t *tablep, int nstates); /* i386 only */ extern kern_return_t pmsCPUSetPStateLimit(uint32_t limit); #ifdef __cplusplus } diff --git a/osfmk/kern/policy_internal.h b/osfmk/kern/policy_internal.h index 3e2814408..0a2e47e35 100644 --- a/osfmk/kern/policy_internal.h +++ b/osfmk/kern/policy_internal.h @@ -89,7 +89,7 @@ extern kern_return_t task_importance(task_t task, integer_t importance); /* unused 0x2B */ #define TASK_POLICY_TERMINATED 0x2C #define TASK_POLICY_NEW_SOCKETS_BG 0x2D -/* unused 0x2E */ +#define TASK_POLICY_SUP_ACTIVE 0x2E #define TASK_POLICY_LATENCY_QOS 0x2F #define TASK_POLICY_THROUGH_QOS 0x30 #define TASK_POLICY_WATCHERS_BG 0x31 @@ -134,12 +134,12 @@ extern int proc_task_role_to_darwin_role(int task_role); /* Functions used by kern_exec.c */ extern void task_set_main_thread_qos(task_t task, thread_t main_thread); extern void proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, - ipc_port_t * portwatch_ports, int portwatch_count); + ipc_port_t * portwatch_ports, int portwatch_count); extern void proc_inherit_task_role(task_t new_task, task_t old_task); /* IO Throttle tiers */ #define THROTTLE_LEVEL_NONE -1 -#define THROTTLE_LEVEL_TIER0 0 /* IOPOL_NORMAL, IOPOL_DEFAULT, IOPOL_PASSIVE */ +#define THROTTLE_LEVEL_TIER0 0 /* IOPOL_NORMAL, IOPOL_DEFAULT, IOPOL_PASSIVE */ #define THROTTLE_LEVEL_THROTTLED 1 #define THROTTLE_LEVEL_TIER1 1 /* IOPOL_STANDARD */ @@ -169,10 +169,10 @@ extern void proc_apply_task_networkbg(void * bsd_info, thread_t thread); /* Functions used by pthread_shims.c */ extern int proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, - int override_qos, boolean_t first_override_for_resource, - user_addr_t resource, int resource_type); + int override_qos, boolean_t first_override_for_resource, + user_addr_t resource, int resource_type); extern int proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, - user_addr_t resource, int resource_type); + user_addr_t resource, int resource_type); extern void thread_reset_workq_qos(thread_t thread, uint32_t qos); extern void thread_set_workq_override(thread_t thread, uint32_t qos); @@ -221,9 +221,9 @@ extern boolean_t proc_task_is_tal(task_t task); #define TASK_POLICY_RESOURCE_ATTRIBUTE_DEFAULT TASK_POLICY_RESOURCE_ATTRIBUTE_NONE extern int proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, - uint64_t *intervalp, uint64_t *deadlinep); + uint64_t *intervalp, uint64_t *deadlinep); extern int proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, - uint64_t interval, uint64_t deadline, int cpumon_entitled); + uint64_t interval, uint64_t deadline, int cpumon_entitled); extern int task_suspend_cpumon(task_t task); extern int task_resume_cpumon(task_t task); extern int proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled); @@ -242,7 +242,7 @@ extern void task_clear_used_for_purging(task_t task); extern int task_importance_estimate(task_t task); extern kern_return_t thread_policy_set_internal(thread_t thread, thread_policy_flavor_t flavor, - thread_policy_t policy_info, mach_msg_type_number_t count); + thread_policy_t policy_info, mach_msg_type_number_t count); extern boolean_t thread_recompute_user_promotion_locked(thread_t thread); extern thread_qos_t thread_user_promotion_qos_for_pri(int priority); @@ -273,14 +273,14 @@ extern thread_qos_t thread_get_requested_qos(thread_t thread, int *relpri); typedef struct task_pend_token { uint32_t tpt_update_sockets :1, - tpt_update_timers :1, - tpt_update_watchers :1, - tpt_update_live_donor :1, - tpt_update_coal_sfi :1, - tpt_update_throttle :1, - tpt_update_thread_sfi :1, - tpt_force_recompute_pri :1, - tpt_update_tg_ui_flag :1; + tpt_update_timers :1, + tpt_update_watchers :1, + tpt_update_live_donor :1, + tpt_update_coal_sfi :1, + tpt_update_throttle :1, + tpt_update_thread_sfi :1, + tpt_force_recompute_pri :1, + tpt_update_tg_ui_flag :1; } *task_pend_token_t; extern void task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token); @@ -355,41 +355,41 @@ extern void thread_policy_update_tasklocked(thread_t thread, integer_t priority, #include "mach/resource_notify.h" /* from MIG */ /*! @function send_resource_violation - @abstract send usage monitor violation notification - - @param violator the task (process) violating its CPU budget - @param ledger_info the entry tracking the resource limit - @param flags see constants for type in sys/reason.h - - @result KERN_SUCCESS if the message was sent - - @discussion - send_resource_violation() calls the corresponding MIG routine - over the host special RESOURCE_NOTIFY port. -*/ + * @abstract send usage monitor violation notification + * + * @param violator the task (process) violating its CPU budget + * @param ledger_info the entry tracking the resource limit + * @param flags see constants for type in sys/reason.h + * + * @result KERN_SUCCESS if the message was sent + * + * @discussion + * send_resource_violation() calls the corresponding MIG routine + * over the host special RESOURCE_NOTIFY port. + */ kern_return_t send_resource_violation(typeof(send_cpu_usage_violation), - task_t violator, - struct ledger_entry_info *ledger_info, - resource_notify_flags_t flags); + task_t violator, + struct ledger_entry_info *ledger_info, + resource_notify_flags_t flags); /*! @function trace_resource_violation - @abstract trace violations on K32/64 - - @param code the (K64) DBG_MACH_RESOURCE trace code - @param ledger_info the entry tracking the resource limit - - @discussion - Trace observed usage and corresponding limit on K32 or K64. On - K32, a pair of trace points are used. The low nibble of the K32 - trace points must start at double the low nibble of the provided - K64 trace point. For example: - #define LOGWRITES_VIOLATED 0x022 - ... - #define LOGWRITES_VIOLATED_K32A 0x024 - #define LOGWRITES_VIOLATED_K32B 0x025 -*/ + * @abstract trace violations on K32/64 + * + * @param code the (K64) DBG_MACH_RESOURCE trace code + * @param ledger_info the entry tracking the resource limit + * + * @discussion + * Trace observed usage and corresponding limit on K32 or K64. On + * K32, a pair of trace points are used. The low nibble of the K32 + * trace points must start at double the low nibble of the provided + * K64 trace point. For example: + #define LOGWRITES_VIOLATED 0x022 + * ... + #define LOGWRITES_VIOLATED_K32A 0x024 + #define LOGWRITES_VIOLATED_K32B 0x025 + */ void trace_resource_violation(uint16_t code, - struct ledger_entry_info *ledger_info); + struct ledger_entry_info *ledger_info); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/printf.c b/osfmk/kern/printf.c index c8abeb624..9fb14d262 100644 --- a/osfmk/kern/printf.c +++ b/osfmk/kern/printf.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -183,7 +183,7 @@ #define isdigit(d) ((d) >= '0' && (d) <= '9') #define Ctod(c) ((c) - '0') -#define MAXBUF (sizeof(long long int) * 8) /* enough for binary */ +#define MAXBUF (sizeof(long long int) * 8) /* enough for binary */ static char digs[] = "0123456789abcdef"; #if CONFIG_NO_PRINTF_STRINGS @@ -191,478 +191,486 @@ static char digs[] = "0123456789abcdef"; #undef printf #endif -int _consume_printf_args(int a __unused, ...) +int +_consume_printf_args(int a __unused, ...) { - return 0; + return 0; } -void _consume_kprintf_args(int a __unused, ...) +void +_consume_kprintf_args(int a __unused, ...) { } static int printnum( - unsigned long long int u, /* number to print */ - int base, - void (*putc)(int, void *), + unsigned long long int u, /* number to print */ + int base, + void (*putc)(int, void *), void *arg) { - char buf[MAXBUF]; /* build number here */ - char * p = &buf[MAXBUF-1]; + char buf[MAXBUF]; /* build number here */ + char * p = &buf[MAXBUF - 1]; int nprinted = 0; do { - *p-- = digs[u % base]; - u /= base; + *p-- = digs[u % base]; + u /= base; } while (u != 0); while (++p != &buf[MAXBUF]) { - (*putc)(*p, arg); - nprinted++; + (*putc)(*p, arg); + nprinted++; } return nprinted; } -boolean_t _doprnt_truncates = FALSE; +boolean_t _doprnt_truncates = FALSE; -#if (DEVELOPMENT || DEBUG) -boolean_t doprnt_hide_pointers = FALSE; +#if (DEVELOPMENT || DEBUG) +boolean_t doprnt_hide_pointers = FALSE; #else -boolean_t doprnt_hide_pointers = TRUE; +boolean_t doprnt_hide_pointers = TRUE; #endif int __doprnt( - const char *fmt, - va_list argp, - /* character output routine */ - void (*putc)(int, void *arg), + const char *fmt, + va_list argp, + /* character output routine */ + void (*putc)(int, void *arg), void *arg, - int radix, /* default radix - for '%r' */ - int is_log) + int radix, /* default radix - for '%r' */ + int is_log) { - int length; - int prec; - boolean_t ladjust; - char padc; - long long n; - unsigned long long u; - int plus_sign; - int sign_char; - boolean_t altfmt, truncate; - int base; - char c; - int capitals; - int long_long; + int length; + int prec; + boolean_t ladjust; + char padc; + long long n; + unsigned long long u; + int plus_sign; + int sign_char; + boolean_t altfmt, truncate; + int base; + char c; + int capitals; + int long_long; int nprinted = 0; while ((c = *fmt) != '\0') { - if (c != '%') { - (*putc)(c, arg); - nprinted++; - fmt++; - continue; - } - - fmt++; - - long_long = 0; - length = 0; - prec = -1; - ladjust = FALSE; - padc = ' '; - plus_sign = 0; - sign_char = 0; - altfmt = FALSE; - - while (TRUE) { - c = *fmt; - if (c == '#') { - altfmt = TRUE; - } - else if (c == '-') { - ladjust = TRUE; - } - else if (c == '+') { - plus_sign = '+'; - } - else if (c == ' ') { - if (plus_sign == 0) - plus_sign = ' '; + if (c != '%') { + (*putc)(c, arg); + nprinted++; + fmt++; + continue; } - else - break; - fmt++; - } - if (c == '0') { - padc = '0'; - c = *++fmt; - } + fmt++; - if (isdigit(c)) { - while(isdigit(c)) { - length = 10 * length + Ctod(c); - c = *++fmt; + long_long = 0; + length = 0; + prec = -1; + ladjust = FALSE; + padc = ' '; + plus_sign = 0; + sign_char = 0; + altfmt = FALSE; + + while (TRUE) { + c = *fmt; + if (c == '#') { + altfmt = TRUE; + } else if (c == '-') { + ladjust = TRUE; + } else if (c == '+') { + plus_sign = '+'; + } else if (c == ' ') { + if (plus_sign == 0) { + plus_sign = ' '; + } + } else { + break; + } + fmt++; } - } - else if (c == '*') { - length = va_arg(argp, int); - c = *++fmt; - if (length < 0) { - ladjust = !ladjust; - length = -length; + + if (c == '0') { + padc = '0'; + c = *++fmt; } - } - if (c == '.') { - c = *++fmt; if (isdigit(c)) { - prec = 0; - while(isdigit(c)) { - prec = 10 * prec + Ctod(c); + while (isdigit(c)) { + length = 10 * length + Ctod(c); + c = *++fmt; + } + } else if (c == '*') { + length = va_arg(argp, int); c = *++fmt; - } + if (length < 0) { + ladjust = !ladjust; + length = -length; + } } - else if (c == '*') { - prec = va_arg(argp, int); - c = *++fmt; + + if (c == '.') { + c = *++fmt; + if (isdigit(c)) { + prec = 0; + while (isdigit(c)) { + prec = 10 * prec + Ctod(c); + c = *++fmt; + } + } else if (c == '*') { + prec = va_arg(argp, int); + c = *++fmt; + } } - } - if (c == 'l') { - c = *++fmt; /* need it if sizeof(int) < sizeof(long) */ - if (sizeof(int) 32; p++) { - (*putc)(c, arg); - nprinted++; - } - nprinted += printnum((unsigned)( (u>>(j-1)) & ((2<<(i-j))-1)), - base, putc, arg); + if (u == 0) { + break; } - else if (u & (1<<(i-1))) { - if (any) - (*putc)(',', arg); - else { - (*putc)('<', arg); - any = TRUE; - } - nprinted++; - for (; (c = *p) > 32; p++) { - (*putc)(c, arg); - nprinted++; - } + + any = FALSE; + while ((i = *p++) != '\0') { + if (*fmt == 'B') { + i = 33 - i; + } + if (*p <= 32) { + /* + * Bit field + */ + int j; + if (any) { + (*putc)(',', arg); + } else { + (*putc)('<', arg); + any = TRUE; + } + nprinted++; + j = *p++; + if (*fmt == 'B') { + j = 32 - j; + } + for (; (c = *p) > 32; p++) { + (*putc)(c, arg); + nprinted++; + } + nprinted += printnum((unsigned)((u >> (j - 1)) & ((2 << (i - j)) - 1)), + base, putc, arg); + } else if (u & (1 << (i - 1))) { + if (any) { + (*putc)(',', arg); + } else { + (*putc)('<', arg); + any = TRUE; + } + nprinted++; + for (; (c = *p) > 32; p++) { + (*putc)(c, arg); + nprinted++; + } + } else { + for (; *p > 32; p++) { + continue; + } + } } - else { - for (; *p > 32; p++) - continue; + if (any) { + (*putc)('>', arg); + nprinted++; } - } - if (any) { - (*putc)('>', arg); - nprinted++; - } - break; + break; } case 'c': - c = va_arg(argp, int); - (*putc)(c, arg); - nprinted++; - break; + c = va_arg(argp, int); + (*putc)(c, arg); + nprinted++; + break; case 's': { - const char *p; - const char *p2; + const char *p; + const char *p2; - if (prec == -1) - prec = 0x7fffffff; /* MAXINT */ + if (prec == -1) { + prec = 0x7fffffff; /* MAXINT */ + } + p = va_arg(argp, char *); - p = va_arg(argp, char *); + if (p == NULL) { + p = ""; + } - if (p == NULL) - p = ""; + if (length > 0 && !ladjust) { + n = 0; + p2 = p; - if (length > 0 && !ladjust) { - n = 0; - p2 = p; + for (; *p != '\0' && n < prec; p++) { + n++; + } - for (; *p != '\0' && n < prec; p++) - n++; + p = p2; - p = p2; + while (n < length) { + (*putc)(' ', arg); + n++; + nprinted++; + } + } - while (n < length) { - (*putc)(' ', arg); - n++; - nprinted++; + n = 0; + + while ((n < prec) && (!(length > 0 && n >= length))) { + if (*p == '\0') { + break; + } + (*putc)(*p++, arg); + nprinted++; + n++; } - } - - n = 0; - - while ((n < prec) && (!(length > 0 && n >= length))) { - if (*p == '\0') { - break; - } - (*putc)(*p++, arg); - nprinted++; - n++; - } - - if (n < length && ladjust) { - while (n < length) { - (*putc)(' ', arg); - n++; - nprinted++; + + if (n < length && ladjust) { + while (n < length) { + (*putc)(' ', arg); + n++; + nprinted++; + } } - } - break; + break; } case 'o': - truncate = _doprnt_truncates; + truncate = _doprnt_truncates; case 'O': - base = 8; - goto print_unsigned; + base = 8; + goto print_unsigned; case 'D': { - unsigned char *up; - char *q, *p; - + unsigned char *up; + char *q, *p; + up = (unsigned char *)va_arg(argp, unsigned char *); p = (char *)va_arg(argp, char *); - if (length == -1) + if (length == -1) { length = 16; - while(length--) { + } + while (length--) { (*putc)(digs[(*up >> 4)], arg); (*putc)(digs[(*up & 0x0f)], arg); nprinted += 2; up++; if (length) { - for (q=p;*q;q++) { + for (q = p; *q; q++) { (*putc)(*q, arg); nprinted++; - } + } } } break; } case 'd': - truncate = _doprnt_truncates; - base = 10; - goto print_signed; + truncate = _doprnt_truncates; + base = 10; + goto print_signed; case 'u': - truncate = _doprnt_truncates; + truncate = _doprnt_truncates; case 'U': - base = 10; - goto print_unsigned; + base = 10; + goto print_unsigned; case 'p': - altfmt = TRUE; - if (sizeof(int)= 0) { - u = n; - sign_char = plus_sign; - } - else { - u = -n; - sign_char = '-'; - } - goto print_num; - - print_unsigned: - if (long_long) { - u = va_arg(argp, unsigned long long); - } else { - u = va_arg(argp, unsigned int); - } - goto print_num; - - print_num: - { - char buf[MAXBUF]; /* build number here */ - char * p = &buf[MAXBUF-1]; - static char digits[] = "0123456789abcdef0123456789ABCDEF"; - const char *prefix = NULL; - - if (truncate) u = (long long)((int)(u)); - - if (doprnt_hide_pointers && is_log) { - const char str[] = ""; - const char* strp = str; - int strl = sizeof(str) - 1; - - - if (u >= VM_MIN_KERNEL_AND_KEXT_ADDRESS && u <= VM_MAX_KERNEL_ADDRESS) { - while(*strp != '\0') { - (*putc)(*strp, arg); - strp++; - } - nprinted += strl; - break; + base = radix; + goto print_unsigned; + +print_signed: + if (long_long) { + n = va_arg(argp, long long); + } else { + n = va_arg(argp, int); } - } - - if (u != 0 && altfmt) { - if (base == 8) - prefix = "0"; - else if (base == 16) - prefix = "0x"; - } - - do { - /* Print in the correct case */ - *p-- = digits[(u % base)+capitals]; - u /= base; - } while (u != 0); - - length -= (int)(&buf[MAXBUF-1] - p); - if (sign_char) - length--; - if (prefix) - length -= (int)strlen(prefix); - - if (padc == ' ' && !ladjust) { - /* blank padding goes before prefix */ - while (--length >= 0) { - (*putc)(' ', arg); - nprinted++; - } - } - if (sign_char) { - (*putc)(sign_char, arg); - nprinted++; - } - if (prefix) { - while (*prefix) { - (*putc)(*prefix++, arg); - nprinted++; + if (n >= 0) { + u = n; + sign_char = plus_sign; + } else { + u = -n; + sign_char = '-'; } - } - if (padc == '0') { - /* zero padding goes after sign and prefix */ - while (--length >= 0) { - (*putc)('0', arg); - nprinted++; - } - } - while (++p != &buf[MAXBUF]) { - (*putc)(*p, arg); - nprinted++; - } - - if (ladjust) { - while (--length >= 0) { - (*putc)(' ', arg); - nprinted++; + goto print_num; + +print_unsigned: + if (long_long) { + u = va_arg(argp, unsigned long long); + } else { + u = va_arg(argp, unsigned int); + } + goto print_num; + +print_num: + { + char buf[MAXBUF];/* build number here */ + char * p = &buf[MAXBUF - 1]; + static char digits[] = "0123456789abcdef0123456789ABCDEF"; + const char *prefix = NULL; + + if (truncate) { + u = (long long)((int)(u)); + } + + if (doprnt_hide_pointers && is_log) { + const char str[] = ""; + const char* strp = str; + int strl = sizeof(str) - 1; + + + if (u >= VM_MIN_KERNEL_AND_KEXT_ADDRESS && u <= VM_MAX_KERNEL_ADDRESS) { + while (*strp != '\0') { + (*putc)(*strp, arg); + strp++; + } + nprinted += strl; + break; + } + } + + if (u != 0 && altfmt) { + if (base == 8) { + prefix = "0"; + } else if (base == 16) { + prefix = "0x"; + } + } + + do { + /* Print in the correct case */ + *p-- = digits[(u % base) + capitals]; + u /= base; + } while (u != 0); + + length -= (int)(&buf[MAXBUF - 1] - p); + if (sign_char) { + length--; + } + if (prefix) { + length -= (int)strlen(prefix); + } + + if (padc == ' ' && !ladjust) { + /* blank padding goes before prefix */ + while (--length >= 0) { + (*putc)(' ', arg); + nprinted++; + } + } + if (sign_char) { + (*putc)(sign_char, arg); + nprinted++; + } + if (prefix) { + while (*prefix) { + (*putc)(*prefix++, arg); + nprinted++; + } + } + if (padc == '0') { + /* zero padding goes after sign and prefix */ + while (--length >= 0) { + (*putc)('0', arg); + nprinted++; + } + } + while (++p != &buf[MAXBUF]) { + (*putc)(*p, arg); + nprinted++; + } + + if (ladjust) { + while (--length >= 0) { + (*putc)(' ', arg); + nprinted++; + } + } + break; } - } - break; - } case '\0': - fmt--; - break; + fmt--; + break; default: - (*putc)(c, arg); - nprinted++; - } - fmt++; + (*putc)(c, arg); + nprinted++; + } + fmt++; } return nprinted; @@ -671,45 +679,42 @@ __doprnt( static void dummy_putc(int ch, void *arg) { - void (*real_putc)(char) = arg; - - real_putc(ch); + void (*real_putc)(char) = arg; + + real_putc(ch); } -void +void _doprnt( - const char *fmt, - va_list *argp, - /* character output routine */ - void (*putc)(char), - int radix) /* default radix - for '%r' */ + const char *fmt, + va_list *argp, + /* character output routine */ + void (*putc)(char), + int radix) /* default radix - for '%r' */ { - __doprnt(fmt, *argp, dummy_putc, putc, radix, FALSE); + __doprnt(fmt, *argp, dummy_putc, putc, radix, FALSE); } -void +void _doprnt_log( - const char *fmt, - va_list *argp, - /* character output routine */ - void (*putc)(char), - int radix) /* default radix - for '%r' */ + const char *fmt, + va_list *argp, + /* character output routine */ + void (*putc)(char), + int radix) /* default radix - for '%r' */ { - __doprnt(fmt, *argp, dummy_putc, putc, radix, TRUE); + __doprnt(fmt, *argp, dummy_putc, putc, radix, TRUE); } -#if MP_PRINTF -boolean_t new_printf_cpu_number = FALSE; -#endif /* MP_PRINTF */ +#if MP_PRINTF +boolean_t new_printf_cpu_number = FALSE; +#endif /* MP_PRINTF */ -decl_simple_lock_data(,printf_lock) -decl_simple_lock_data(,bsd_log_spinlock) +decl_simple_lock_data(, printf_lock) +decl_simple_lock_data(, bsd_log_spinlock) -/* - * Defined here to allow lock group to be statically allocated. - */ -static lck_grp_t oslog_stream_lock_grp; -decl_lck_spin_data(,oslog_stream_lock) +lck_grp_t oslog_stream_lock_grp; +decl_lck_spin_data(, oslog_stream_lock) void oslog_lock_init(void); extern void bsd_log_init(void); @@ -717,7 +722,6 @@ void bsd_log_lock(void); void bsd_log_unlock(void); void - printf_init(void) { /* @@ -731,7 +735,7 @@ printf_init(void) void bsd_log_lock(void) { - simple_lock(&bsd_log_spinlock); + simple_lock(&bsd_log_spinlock, LCK_GRP_NULL); } void @@ -750,8 +754,8 @@ oslog_lock_init(void) /* derived from boot_gets */ void safe_gets( - char *str, - int maxlen) + char *str, + int maxlen) { char *lp; int c; @@ -766,7 +770,7 @@ safe_gets( printf("\n"); *lp++ = 0; return; - + case '\b': case '#': case '\177': @@ -787,8 +791,7 @@ safe_gets( if (lp < strmax) { *lp++ = c; printf("%c", c); - } - else { + } else { printf("%c", '\007'); /* beep */ } } @@ -802,12 +805,14 @@ void conslog_putc( char c) { - if (!disableConsoleOutput) + if (!disableConsoleOutput) { cnputc(c); + } -#ifdef MACH_BSD - if (!kernel_debugger_entry_count) +#ifdef MACH_BSD + if (!kernel_debugger_entry_count) { log_putc(c); + } #endif } @@ -815,8 +820,9 @@ void cons_putc_locked( char c) { - if (!disableConsoleOutput) + if (!disableConsoleOutput) { cnputc(c); + } } static int @@ -850,7 +856,7 @@ vprintf_internal(const char *fmt, va_list ap_in, void *caller) return 0; } -__attribute__((noinline,not_tail_called)) +__attribute__((noinline, not_tail_called)) int printf(const char *fmt, ...) { @@ -864,7 +870,7 @@ printf(const char *fmt, ...) return ret; } -__attribute__((noinline,not_tail_called)) +__attribute__((noinline, not_tail_called)) int vprintf(const char *fmt, va_list ap) { @@ -874,25 +880,29 @@ vprintf(const char *fmt, va_list ap) void consdebug_putc(char c) { - if (!disableConsoleOutput) + if (!disableConsoleOutput) { cnputc(c); + } debug_putc(c); - if (!console_is_serial() && !disable_serial_output) + if (!console_is_serial() && !disable_serial_output) { PE_kputc(c); + } } void consdebug_putc_unbuffered(char c) { - if (!disableConsoleOutput) + if (!disableConsoleOutput) { cnputc_unbuffered(c); + } debug_putc(c); - if (!console_is_serial() && !disable_serial_output) - PE_kputc(c); + if (!console_is_serial() && !disable_serial_output) { + PE_kputc(c); + } } void @@ -910,7 +920,7 @@ consdebug_log(char c) int paniclog_append_noflush(const char *fmt, ...) { - va_list listp; + va_list listp; va_start(listp, fmt); _doprnt_log(fmt, &listp, consdebug_putc, 16); @@ -922,7 +932,7 @@ paniclog_append_noflush(const char *fmt, ...) int kdb_printf(const char *fmt, ...) { - va_list listp; + va_list listp; va_start(listp, fmt); _doprnt_log(fmt, &listp, consdebug_putc, 16); @@ -938,7 +948,7 @@ kdb_printf(const char *fmt, ...) int kdb_log(const char *fmt, ...) { - va_list listp; + va_list listp; va_start(listp, fmt); _doprnt(fmt, &listp, consdebug_log, 16); @@ -954,7 +964,7 @@ kdb_log(const char *fmt, ...) int kdb_printf_unbuffered(const char *fmt, ...) { - va_list listp; + va_list listp; va_start(listp, fmt); _doprnt(fmt, &listp, consdebug_putc_unbuffered, 16); @@ -978,9 +988,9 @@ copybyte(int c, void *arg) * We pass a double pointer, so that we can increment * the inside pointer. */ - char** p = arg; /* cast outside pointer */ - **p = c; /* store character */ - (*p)++; /* increment inside pointer */ + char** p = arg; /* cast outside pointer */ + **p = c; /* store character */ + (*p)++; /* increment inside pointer */ } /* @@ -990,14 +1000,14 @@ copybyte(int c, void *arg) int sprintf(char *buf, const char *fmt, ...) { - va_list listp; + va_list listp; char *copybyte_str; - va_start(listp, fmt); - copybyte_str = buf; - __doprnt(fmt, listp, copybyte, ©byte_str, 16, FALSE); - va_end(listp); + va_start(listp, fmt); + copybyte_str = buf; + __doprnt(fmt, listp, copybyte, ©byte_str, 16, FALSE); + va_end(listp); *copybyte_str = '\0'; - return (int)strlen(buf); + return (int)strlen(buf); } #endif /* !CONFIG_EMBEDDED */ diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index 40eb17242..26c60c043 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -97,15 +97,13 @@ static void sched_update_thread_bucket(thread_t thread); void thread_quantum_expire( - timer_call_param_t p0, - timer_call_param_t p1) + timer_call_param_t p0, + timer_call_param_t p1) { - processor_t processor = p0; - thread_t thread = p1; - ast_t preempt; - uint64_t ctime; - int urgency; - uint64_t ignore1, ignore2; + processor_t processor = p0; + thread_t thread = p1; + ast_t preempt; + uint64_t ctime; assert(processor == current_processor()); assert(thread == current_thread()); @@ -130,7 +128,7 @@ thread_quantum_expire( ledger_credit(thread->t_threadledger, thread_ledgers.cpu_time, thread->quantum_remaining); if (thread->t_bankledger) { ledger_credit(thread->t_bankledger, bank_ledgers.cpu_time, - (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); + (thread->quantum_remaining - thread->t_deduct_bank_ledger_time)); } thread->t_deduct_bank_ledger_time = 0; @@ -165,8 +163,8 @@ thread_quantum_expire( new_computation = ctime - thread->computation_epoch; new_computation += thread->computation_metered; if (new_computation > max_unsafe_computation) { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE)|DBG_FUNC_NONE, - (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_FAILSAFE) | DBG_FUNC_NONE, + (uintptr_t)thread->sched_pri, (uintptr_t)thread->sched_mode, 0, 0, 0); thread->safe_release = ctime + sched_safe_duration; @@ -177,15 +175,15 @@ thread_quantum_expire( /* * Recompute scheduled priority if appropriate. */ - if (SCHED(can_update_priority)(thread)) + if (SCHED(can_update_priority)(thread)) { SCHED(update_priority)(thread); - else + } else { SCHED(lightweight_update_priority)(thread); + } - if (thread->sched_mode != TH_MODE_REALTIME) + if (thread->sched_mode != TH_MODE_REALTIME) { SCHED(quantum_expire)(thread); - - processor_state_update_from_thread(processor, thread); + } /* * This quantum is up, give this thread another. @@ -218,11 +216,13 @@ thread_quantum_expire( */ ast_t check_reason = AST_QUANTUM; - if (thread->task == kernel_task) + if (thread->task == kernel_task) { check_reason |= AST_URGENT; + } - if ((preempt = csw_check(processor, check_reason)) != AST_NONE) + if ((preempt = csw_check(thread, processor, check_reason)) != AST_NONE) { ast_on(preempt); + } /* * AST_KEVENT does not send an IPI when setting the AST, @@ -234,21 +234,22 @@ thread_quantum_expire( thread_unlock(thread); timer_call_quantum_timer_enter(&processor->quantum_timer, thread, - processor->quantum_end, ctime); + processor->quantum_end, ctime); /* Tell platform layer that we are still running this thread */ - urgency = thread_get_urgency(thread, &ignore1, &ignore2); + thread_urgency_t urgency = thread_get_urgency(thread, NULL, NULL); machine_thread_going_on_core(thread, urgency, 0, 0, ctime); machine_switch_perfcontrol_state_update(QUANTUM_EXPIRY, ctime, - 0, thread); + 0, thread); #if defined(CONFIG_SCHED_TIMESHARE_CORE) sched_timeshare_consider_maintenance(ctime); #endif /* CONFIG_SCHED_TIMESHARE_CORE */ #if __arm__ || __arm64__ - if (thread->sched_mode == TH_MODE_REALTIME) + if (thread->sched_mode == TH_MODE_REALTIME) { sched_consider_recommended_cores(ctime, thread); + } #endif /* __arm__ || __arm64__ */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_QUANTUM_EXPIRED) | DBG_FUNC_END, preempt, 0, 0, 0, 0); @@ -270,10 +271,11 @@ sched_set_thread_base_priority(thread_t thread, int priority) assert(priority >= MINPRI); uint64_t ctime = 0; - if (thread->sched_mode == TH_MODE_REALTIME) + if (thread->sched_mode == TH_MODE_REALTIME) { assert(priority <= BASEPRI_RTQUEUES); - else + } else { assert(priority < BASEPRI_RTQUEUES); + } int old_base_pri = thread->base_pri; thread->base_pri = priority; @@ -287,17 +289,17 @@ sched_set_thread_base_priority(thread_t thread, int priority) assert(thread->last_made_runnable_time == THREAD_NOT_RUNNABLE); } - /* - * Currently the perfcontrol_attr depends on the base pri of the - * thread. Therefore, we use this function as the hook for the - * perfcontrol callout. + /* + * Currently the perfcontrol_attr depends on the base pri of the + * thread. Therefore, we use this function as the hook for the + * perfcontrol callout. */ if (thread == current_thread() && old_base_pri != priority) { if (!ctime) { - ctime = mach_approximate_time(); + ctime = mach_approximate_time(); } machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, - ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread); + ctime, PERFCONTROL_CALLOUT_WAKE_UNSAFE, thread); } sched_update_thread_bucket(thread); @@ -327,8 +329,9 @@ thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) int priority = thread->base_pri; - if (sched_mode == TH_MODE_TIMESHARE) + if (sched_mode == TH_MODE_TIMESHARE) { priority = SCHED(compute_timeshare_priority)(thread); + } if (sched_flags & TH_SFLAG_DEPRESS) { /* thread_yield_internal overrides kernel mutex promotion */ @@ -342,19 +345,23 @@ thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) if (sched_flags & TH_SFLAG_PROMOTED) { priority = MAX(priority, thread->promotion_priority); - if (sched_mode != TH_MODE_REALTIME) + if (sched_mode != TH_MODE_REALTIME) { priority = MIN(priority, MAXPRI_PROMOTE); + } } if (sched_flags & TH_SFLAG_PROMOTE_REASON_MASK) { - if (sched_flags & TH_SFLAG_RW_PROMOTED) + if (sched_flags & TH_SFLAG_RW_PROMOTED) { priority = MAX(priority, MINPRI_RWLOCK); + } - if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) + if (sched_flags & TH_SFLAG_WAITQ_PROMOTED) { priority = MAX(priority, MINPRI_WAITQ); + } - if (sched_flags & TH_SFLAG_EXEC_PROMOTED) + if (sched_flags & TH_SFLAG_EXEC_PROMOTED) { priority = MAX(priority, MINPRI_EXEC); + } } } @@ -364,10 +371,10 @@ thread_recompute_sched_pri(thread_t thread, set_sched_pri_options_t options) void sched_default_quantum_expire(thread_t thread __unused) { - /* - * No special behavior when a timeshare, fixed, or realtime thread - * uses up its entire quantum - */ + /* + * No special behavior when a timeshare, fixed, or realtime thread + * uses up its entire quantum + */ } #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -399,15 +406,17 @@ lightweight_update_priority(thread_t thread) * during contention for processor * resources. */ - if (thread->pri_shift < INT8_MAX) + if (thread->pri_shift < INT8_MAX) { thread->sched_usage += delta; + } thread->cpu_delta += delta; priority = sched_compute_timeshare_priority(thread); - if (priority != thread->sched_pri) + if (priority != thread->sched_pri) { thread_recompute_sched_pri(thread, SETPRI_LAZY); + } } } @@ -419,16 +428,16 @@ lightweight_update_priority(thread_t thread) * +/- is determined by the sign of shift 2. */ struct shift_data { - int shift1; - int shift2; + int shift1; + int shift2; }; -#define SCHED_DECAY_TICKS 32 -static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { - {1,1},{1,3},{1,-3},{2,-7},{3,5},{3,-5},{4,-8},{5,7}, - {5,-7},{6,-10},{7,10},{7,-9},{8,-11},{9,12},{9,-11},{10,-13}, - {11,14},{11,-13},{12,-15},{13,17},{13,-15},{14,-17},{15,19},{16,18}, - {16,-19},{17,22},{18,20},{18,-20},{19,26},{20,22},{20,-22},{21,-27} +#define SCHED_DECAY_TICKS 32 +static struct shift_data sched_decay_shifts[SCHED_DECAY_TICKS] = { + {1, 1}, {1, 3}, {1, -3}, {2, -7}, {3, 5}, {3, -5}, {4, -8}, {5, 7}, + {5, -7}, {6, -10}, {7, 10}, {7, -9}, {8, -11}, {9, 12}, {9, -11}, {10, -13}, + {11, 14}, {11, -13}, {12, -15}, {13, 17}, {13, -15}, {14, -17}, {15, 19}, {16, 18}, + {16, -19}, {17, 22}, {18, 20}, {18, -20}, {19, 26}, {20, 22}, {20, -22}, {21, -27} }; /* @@ -478,10 +487,11 @@ sched_compute_timeshare_priority(thread_t thread) /* start with base priority */ int priority = thread->base_pri - (thread->sched_usage >> thread->pri_shift); - if (priority < MINPRI_USER) + if (priority < MINPRI_USER) { priority = MINPRI_USER; - else if (priority > MAXPRI_KERNEL) + } else if (priority > MAXPRI_KERNEL) { priority = MAXPRI_KERNEL; + } return priority; } @@ -497,12 +507,13 @@ sched_compute_timeshare_priority(thread_t thread) */ boolean_t can_update_priority( - thread_t thread) + thread_t thread) { - if (sched_tick == thread->sched_stamp) - return (FALSE); - else - return (TRUE); + if (sched_tick == thread->sched_stamp) { + return FALSE; + } else { + return TRUE; + } } /* @@ -514,7 +525,7 @@ can_update_priority( */ void update_priority( - thread_t thread) + thread_t thread) { uint32_t ticks, delta; @@ -524,8 +535,9 @@ update_priority( thread->sched_stamp += ticks; /* If requested, accelerate aging of sched_usage */ - if (sched_decay_usage_age_factor > 1) + if (sched_decay_usage_age_factor > 1) { ticks *= sched_decay_usage_age_factor; + } /* * Gather cpu usage data. @@ -534,11 +546,12 @@ update_priority( if (ticks < SCHED_DECAY_TICKS) { /* * Accumulate timesharing usage only during contention for processor - * resources. Use the pri_shift from the previous tick window to + * resources. Use the pri_shift from the previous tick window to * determine if the system was in a contended state. */ - if (thread->pri_shift < INT8_MAX) + if (thread->pri_shift < INT8_MAX) { thread->sched_usage += delta; + } thread->cpu_usage += delta + thread->cpu_delta; thread->cpu_delta = 0; @@ -547,14 +560,14 @@ update_priority( if (shiftp->shift2 > 0) { thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) + - (thread->cpu_usage >> shiftp->shift2); + (thread->cpu_usage >> shiftp->shift2); thread->sched_usage = (thread->sched_usage >> shiftp->shift1) + - (thread->sched_usage >> shiftp->shift2); + (thread->sched_usage >> shiftp->shift2); } else { thread->cpu_usage = (thread->cpu_usage >> shiftp->shift1) - - (thread->cpu_usage >> -(shiftp->shift2)); + (thread->cpu_usage >> -(shiftp->shift2)); thread->sched_usage = (thread->sched_usage >> shiftp->shift1) - - (thread->sched_usage >> -(shiftp->shift2)); + (thread->sched_usage >> -(shiftp->shift2)); } } else { thread->cpu_usage = thread->cpu_delta = 0; @@ -579,8 +592,9 @@ update_priority( thread->pri_shift = sched_pri_shifts[thread->th_sched_bucket]; /* Recompute scheduled priority if appropriate. */ - if (thread->sched_mode == TH_MODE_TIMESHARE) + if (thread->sched_mode == TH_MODE_TIMESHARE) { thread_recompute_sched_pri(thread, SETPRI_LAZY); + } } #endif /* CONFIG_SCHED_TIMESHARE_CORE */ @@ -597,7 +611,7 @@ static void sched_incr_bucket(sched_bucket_t bucket) { assert(bucket >= TH_BUCKET_FIXPRI && - bucket <= TH_BUCKET_SHARE_BG); + bucket <= TH_BUCKET_SHARE_BG); hw_atomic_add(&sched_run_buckets[bucket], 1); } @@ -606,7 +620,7 @@ static void sched_decr_bucket(sched_bucket_t bucket) { assert(bucket >= TH_BUCKET_FIXPRI && - bucket <= TH_BUCKET_SHARE_BG); + bucket <= TH_BUCKET_SHARE_BG); assert(sched_run_buckets[bucket] > 0); @@ -618,7 +632,7 @@ sched_decr_bucket(sched_bucket_t bucket) uint32_t sched_run_incr(thread_t thread) { - assert((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN); + assert((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN); uint32_t new_count = hw_atomic_add(&sched_run_buckets[TH_BUCKET_RUN], 1); @@ -630,7 +644,7 @@ sched_run_incr(thread_t thread) uint32_t sched_run_decr(thread_t thread) { - assert((thread->state & (TH_RUN|TH_IDLE)) != TH_RUN); + assert((thread->state & (TH_RUN | TH_IDLE)) != TH_RUN); sched_decr_bucket(thread->th_sched_bucket); @@ -652,14 +666,15 @@ sched_update_thread_bucket(thread_t thread) break; case TH_MODE_TIMESHARE: - if (thread->base_pri > BASEPRI_DEFAULT) + if (thread->base_pri > BASEPRI_DEFAULT) { new_bucket = TH_BUCKET_SHARE_FG; - else if (thread->base_pri > BASEPRI_UTILITY) + } else if (thread->base_pri > BASEPRI_UTILITY) { new_bucket = TH_BUCKET_SHARE_DF; - else if (thread->base_pri > MAXPRI_THROTTLE) + } else if (thread->base_pri > MAXPRI_THROTTLE) { new_bucket = TH_BUCKET_SHARE_UT; - else + } else { new_bucket = TH_BUCKET_SHARE_BG; + } break; default: @@ -671,7 +686,7 @@ sched_update_thread_bucket(thread_t thread) thread->th_sched_bucket = new_bucket; thread->pri_shift = sched_pri_shifts[new_bucket]; - if ((thread->state & (TH_RUN|TH_IDLE)) == TH_RUN) { + if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { sched_decr_bucket(old_bucket); sched_incr_bucket(new_bucket); } @@ -715,8 +730,9 @@ sched_thread_mode_demote(thread_t thread, uint32_t reason) assert(reason & TH_SFLAG_DEMOTED_MASK); assert((thread->sched_flags & reason) != reason); - if (thread->policy_reset) + if (thread->policy_reset) { return; + } if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { /* Another demotion reason is already active */ @@ -736,8 +752,9 @@ sched_thread_mode_demote(thread_t thread, uint32_t reason) thread_recompute_priority(thread); - if (removed) + if (removed) { thread_run_queue_reinsert(thread, SCHED_TAILQ); + } } /* @@ -767,8 +784,9 @@ sched_thread_mode_undemote(thread_t thread, uint32_t reason) thread_recompute_priority(thread); - if (removed) + if (removed) { thread_run_queue_reinsert(thread, SCHED_TAILQ); + } } /* @@ -781,8 +799,8 @@ sched_thread_mode_undemote(thread_t thread, uint32_t reason) */ void sched_thread_promote_to_pri(thread_t thread, - int priority, - __kdebug_only uintptr_t trace_obj /* already unslid */) + int priority, + __kdebug_only uintptr_t trace_obj /* already unslid */) { assert((thread->sched_flags & TH_SFLAG_PROMOTED) != TH_SFLAG_PROMOTED); assert(thread->promotion_priority == 0); @@ -790,7 +808,7 @@ sched_thread_promote_to_pri(thread_t thread, assert(priority > 0); KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTED), - thread_tid(thread), trace_obj, priority); + thread_tid(thread), trace_obj, priority); thread->sched_flags |= TH_SFLAG_PROMOTED; thread->promotion_priority = priority; @@ -808,8 +826,8 @@ sched_thread_promote_to_pri(thread_t thread, */ void sched_thread_update_promotion_to_pri(thread_t thread, - int priority, - __kdebug_only uintptr_t trace_obj /* already unslid */) + int priority, + __kdebug_only uintptr_t trace_obj /* already unslid */) { assert(thread->promotions > 0); assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED); @@ -818,7 +836,7 @@ sched_thread_update_promotion_to_pri(thread_t thread, if (thread->promotion_priority < priority) { KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_PROMOTED_UPDATE), - thread_tid(thread), trace_obj, priority); + thread_tid(thread), trace_obj, priority); thread->promotion_priority = priority; thread_recompute_sched_pri(thread, SETPRI_DEFAULT); @@ -833,13 +851,13 @@ sched_thread_update_promotion_to_pri(thread_t thread, */ void sched_thread_unpromote(thread_t thread, - __kdebug_only uintptr_t trace_obj /* already unslid */) + __kdebug_only uintptr_t trace_obj /* already unslid */) { assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED); assert(thread->promotion_priority > 0); KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_UNPROMOTED), - thread_tid(thread), trace_obj, 0); + thread_tid(thread), trace_obj, 0); thread->sched_flags &= ~TH_SFLAG_PROMOTED; thread->promotion_priority = 0; @@ -851,11 +869,13 @@ sched_thread_unpromote(thread_t thread, void assert_promotions_invariant(thread_t thread) { - if (thread->promotions > 0) + if (thread->promotions > 0) { assert((thread->sched_flags & TH_SFLAG_PROMOTED) == TH_SFLAG_PROMOTED); + } - if (thread->promotions == 0) + if (thread->promotions == 0) { assert((thread->sched_flags & TH_SFLAG_PROMOTED) != TH_SFLAG_PROMOTED); + } } /* @@ -869,8 +889,8 @@ assert_promotions_invariant(thread_t thread) */ void sched_thread_promote_reason(thread_t thread, - uint32_t reason, - __kdebug_only uintptr_t trace_obj /* already unslid */) + uint32_t reason, + __kdebug_only uintptr_t trace_obj /* already unslid */) { assert(reason & TH_SFLAG_PROMOTE_REASON_MASK); assert((thread->sched_flags & reason) != reason); @@ -878,18 +898,18 @@ sched_thread_promote_reason(thread_t thread, switch (reason) { case TH_SFLAG_RW_PROMOTED: KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_PROMOTE), - thread_tid(thread), thread->sched_pri, - thread->base_pri, trace_obj); + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); break; case TH_SFLAG_WAITQ_PROMOTED: KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_PROMOTE), - thread_tid(thread), thread->sched_pri, - thread->base_pri, trace_obj); + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); break; case TH_SFLAG_EXEC_PROMOTED: KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_PROMOTE), - thread_tid(thread), thread->sched_pri, - thread->base_pri, trace_obj); + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); break; } @@ -906,8 +926,8 @@ sched_thread_promote_reason(thread_t thread, */ void sched_thread_unpromote_reason(thread_t thread, - uint32_t reason, - __kdebug_only uintptr_t trace_obj /* already unslid */) + uint32_t reason, + __kdebug_only uintptr_t trace_obj /* already unslid */) { assert(reason & TH_SFLAG_PROMOTE_REASON_MASK); assert((thread->sched_flags & reason) == reason); @@ -915,18 +935,18 @@ sched_thread_unpromote_reason(thread_t thread, switch (reason) { case TH_SFLAG_RW_PROMOTED: KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_RW_DEMOTE), - thread_tid(thread), thread->sched_pri, - thread->base_pri, trace_obj); + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); break; case TH_SFLAG_WAITQ_PROMOTED: KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAITQ_DEMOTE), - thread_tid(thread), thread->sched_pri, - thread->base_pri, trace_obj); + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); break; case TH_SFLAG_EXEC_PROMOTED: KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_EXEC_DEMOTE), - thread_tid(thread), thread->sched_pri, - thread->base_pri, trace_obj); + thread_tid(thread), thread->sched_pri, + thread->base_pri, trace_obj); break; } @@ -934,5 +954,3 @@ sched_thread_unpromote_reason(thread_t thread, thread_recompute_sched_pri(thread, SETPRI_DEFAULT); } - - diff --git a/osfmk/kern/priority_queue.c b/osfmk/kern/priority_queue.c index 5314d6034..85ee093ae 100644 --- a/osfmk/kern/priority_queue.c +++ b/osfmk/kern/priority_queue.c @@ -31,7 +31,7 @@ #ifdef __LP64__ static_assert(PRIORITY_QUEUE_ENTRY_CHILD_BITS >= VM_KERNEL_POINTER_SIGNIFICANT_BITS, - "Priority Queue child pointer packing failed"); + "Priority Queue child pointer packing failed"); #endif priority_queue_entry_t @@ -77,7 +77,7 @@ pqueue_pair_meld(priority_queue_entry_t elt, priority_queue_compare_fn_t cmp_fn) void pqueue_destroy(struct priority_queue *q, size_t offset, - void (^callback)(void *e)) + void (^callback)(void *e)) { assert(callback != NULL); priority_queue_entry_t head = pqueue_unpack_root(q); @@ -87,7 +87,9 @@ pqueue_destroy(struct priority_queue *q, size_t offset, priority_queue_entry_t child_list = pqueue_entry_unpack_child(head); if (child_list) { tail->next = child_list; - while (tail->next) tail = tail->next; + while (tail->next) { + tail = tail->next; + } } priority_queue_entry_t elt = head; diff --git a/osfmk/kern/priority_queue.h b/osfmk/kern/priority_queue.h index ff9836b0c..dcb7d76a8 100644 --- a/osfmk/kern/priority_queue.h +++ b/osfmk/kern/priority_queue.h @@ -178,7 +178,7 @@ typedef struct priority_queue_entry { * comparision result to indicate relative ordering of elements according to the heap type */ typedef int (^priority_queue_compare_fn_t)(struct priority_queue_entry *e1, - struct priority_queue_entry *e2); + struct priority_queue_entry *e2); /* * Standard comparision routines for max and min heap. @@ -191,21 +191,21 @@ priority_queue_element_builtin_key_compare(priority_queue_entry_t e1, priority_q } #define priority_heap_make_comparator(name1, name2, type, field, ...) \ - (^int(priority_queue_entry_t __e1, priority_queue_entry_t __e2){ \ - type *name1 = pqe_element_fast(__e1, type, field); \ - type *name2 = pqe_element_fast(__e2, type, field); \ - __VA_ARGS__; \ - }) + (^int(priority_queue_entry_t __e1, priority_queue_entry_t __e2){ \ + type *name1 = pqe_element_fast(__e1, type, field); \ + type *name2 = pqe_element_fast(__e2, type, field); \ + __VA_ARGS__; \ + }) #define PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE \ - (^int(priority_queue_entry_t e1, priority_queue_entry_t e2){ \ - return -priority_queue_element_builtin_key_compare(e1, e2); \ - }) + (^int(priority_queue_entry_t e1, priority_queue_entry_t e2){ \ + return -priority_queue_element_builtin_key_compare(e1, e2); \ + }) #define PRIORITY_QUEUE_SCHED_PRI_MIN_HEAP_COMPARE \ - (^int(priority_queue_entry_t e1, priority_queue_entry_t e2){ \ - return priority_queue_element_builtin_key_compare(e1, e2); \ - }) + (^int(priority_queue_entry_t e1, priority_queue_entry_t e2){ \ + return priority_queue_element_builtin_key_compare(e1, e2); \ + }) /* * Helper routines for packing/unpacking the child pointer in heap nodes. @@ -266,21 +266,21 @@ struct priority_queue { * containing qe */ #define pqe_element(qe, type, field) ({ \ - priority_queue_entry_t _tmp_entry = (qe); \ - _tmp_entry ? pqe_element_fast(_tmp_entry, type, field) : ((type *)NULL); \ + priority_queue_entry_t _tmp_entry = (qe); \ + _tmp_entry ? pqe_element_fast(_tmp_entry, type, field) : ((type *)NULL); \ }) #define pqueue_has_generic_keys(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_GENERIC_KEY) != 0) + (((p)->pq_root_packed & PRIORITY_QUEUE_GENERIC_KEY) != 0) #define pqueue_has_builtin_keys(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_GENERIC_KEY) == 0) + (((p)->pq_root_packed & PRIORITY_QUEUE_GENERIC_KEY) == 0) #define pqueue_is_min_heap(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_MIN_HEAP) != 0) + (((p)->pq_root_packed & PRIORITY_QUEUE_MIN_HEAP) != 0) #define pqueue_is_max_heap(p) \ - (((p)->pq_root_packed & PRIORITY_QUEUE_MIN_HEAP) == 0) + (((p)->pq_root_packed & PRIORITY_QUEUE_MIN_HEAP) == 0) /* * Macro: pqueue_pack_root @@ -293,8 +293,8 @@ struct priority_queue { */ #define pqueue_pack_root(q, root_ptr) \ MACRO_BEGIN \ - uintptr_t __flags = (q)->pq_root_packed & PRIORITY_QUEUE_ROOT_FLAGS_MASK; \ - (q)->pq_root_packed = (uintptr_t)(root_ptr) | __flags; \ + uintptr_t __flags = (q)->pq_root_packed & PRIORITY_QUEUE_ROOT_FLAGS_MASK; \ + (q)->pq_root_packed = (uintptr_t)(root_ptr) | __flags; \ MACRO_END /* @@ -308,7 +308,7 @@ MACRO_END * */ #define pqueue_unpack_root(q) \ - ((priority_queue_entry_t)((q)->pq_root_packed & PRIORITY_QUEUE_ROOT_POINTER_MASK)) + ((priority_queue_entry_t)((q)->pq_root_packed & PRIORITY_QUEUE_ROOT_POINTER_MASK)) /* * Macro: pqueue_list_remove @@ -333,8 +333,9 @@ pqueue_list_remove(priority_queue_entry_t elt) elt->prev->next = elt->next; } /* Update prev for next element in list */ - if (elt->next != NULL) + if (elt->next != NULL) { elt->next->prev = elt->prev; + } } /* @@ -353,7 +354,7 @@ pqueue_list_remove(priority_queue_entry_t elt) */ static inline priority_queue_entry_t pqueue_merge(priority_queue_entry_t subtree_a, priority_queue_entry_t subtree_b, - priority_queue_compare_fn_t cmp_fn) + priority_queue_compare_fn_t cmp_fn) { priority_queue_entry_t merge_result = NULL; if (subtree_a == NULL) { @@ -370,8 +371,9 @@ pqueue_merge(priority_queue_entry_t subtree_a, priority_queue_entry_t subtree_b, /* Insert the child as the first element in the parent's child list */ child->next = pqueue_entry_unpack_child(parent); child->prev = parent; - if (pqueue_entry_unpack_child(parent) != NULL) + if (pqueue_entry_unpack_child(parent) != NULL) { pqueue_entry_unpack_child(parent)->prev = child; + } /* Create the parent child relationship */ pqueue_entry_pack_child(parent, child); parent->next = NULL; @@ -415,7 +417,7 @@ pqueue_pair_meld(priority_queue_entry_t e, priority_queue_compare_fn_t cmp_fn); */ static inline void pqueue_update_key(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key) + priority_queue_key_t new_key) { if (pqueue_has_builtin_keys(que)) { assert(new_key <= UINT8_MAX); @@ -439,10 +441,12 @@ pqueue_update_key(struct priority_queue *que, priority_queue_entry_t elt, */ static inline priority_queue_entry_t pqueue_remove_root(struct priority_queue *que, priority_queue_entry_t old_root, - priority_queue_compare_fn_t cmp_fn) + priority_queue_compare_fn_t cmp_fn) { priority_queue_entry_t new_root = pqueue_entry_unpack_child(old_root); - if (new_root) new_root = pqueue_pair_meld(new_root, cmp_fn); + if (new_root) { + new_root = pqueue_pair_meld(new_root, cmp_fn); + } pqueue_pack_root(que, new_root); return old_root; } @@ -461,7 +465,7 @@ pqueue_remove_root(struct priority_queue *que, priority_queue_entry_t old_root, */ static inline priority_queue_entry_t pqueue_remove_non_root(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_compare_fn_t cmp_fn) + priority_queue_compare_fn_t cmp_fn) { priority_queue_entry_t child, new_root; @@ -503,8 +507,8 @@ pqueue_remove_non_root(struct priority_queue *que, priority_queue_entry_t elt, * None */ void -pqueue_destroy(struct priority_queue *q, size_t offset, - void (^callback)(void *e)); + pqueue_destroy(struct priority_queue *q, size_t offset, + void (^callback)(void *e)); /* * Priority Queue functionality routines @@ -531,8 +535,8 @@ pqueue_destroy(struct priority_queue *q, size_t offset, * elt */ #define priority_queue_entry_key(q, elt) ({ \ - assert(pqueue_has_builtin_keys(q)); \ - (priority_queue_key_t)((elt)->key); \ + assert(pqueue_has_builtin_keys(q)); \ + (priority_queue_key_t)((elt)->key); \ }) /* @@ -551,8 +555,8 @@ pqueue_destroy(struct priority_queue *q, size_t offset, */ #define priority_queue_init(q, flags) \ MACRO_BEGIN \ - pqueue_pack_root((q), NULL); \ - (q)->pq_root_packed = (flags); \ + pqueue_pack_root((q), NULL); \ + (q)->pq_root_packed = (flags); \ MACRO_END /* @@ -567,10 +571,10 @@ MACRO_END */ #define priority_queue_entry_init(qe) \ MACRO_BEGIN \ - (qe)->next = NULL; \ - (qe)->prev = NULL; \ - pqueue_entry_pack_child((qe), NULL); \ - (qe)->key = PRIORITY_QUEUE_KEY_NONE; \ + (qe)->next = NULL; \ + (qe)->prev = NULL; \ + pqueue_entry_pack_child((qe), NULL); \ + (qe)->key = PRIORITY_QUEUE_KEY_NONE; \ MACRO_END /* @@ -588,7 +592,7 @@ MACRO_END */ static inline boolean_t priority_queue_insert(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) + priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) { priority_queue_entry_t new_root; @@ -612,7 +616,7 @@ priority_queue_insert(struct priority_queue *que, priority_queue_entry_t elt, */ static inline boolean_t priority_queue_remove(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_compare_fn_t cmp_fn) + priority_queue_compare_fn_t cmp_fn) { if (elt == pqueue_unpack_root(que)) { pqueue_remove_root(que, elt, cmp_fn); @@ -651,7 +655,7 @@ priority_queue_remove(struct priority_queue *que, priority_queue_entry_t elt, */ static inline boolean_t priority_queue_entry_decrease(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) + priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) { boolean_t was_root = priority_queue_remove(que, elt, cmp_fn); /* Insert it back in the heap; insertion also causes the priority update in the element */ @@ -685,7 +689,7 @@ priority_queue_entry_decrease(struct priority_queue *que, priority_queue_entry_t */ static inline boolean_t priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t elt, - priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) + priority_queue_key_t new_key, priority_queue_compare_fn_t cmp_fn) { if (elt == pqueue_unpack_root(que)) { pqueue_update_key(que, elt, new_key); @@ -719,8 +723,8 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * max element */ #define priority_queue_max(q, type, field) ({ \ - assert(pqueue_is_max_heap(q)); \ - pqe_element(pqueue_unpack_root(q), type, field); \ + assert(pqueue_is_max_heap(q)); \ + pqe_element(pqueue_unpack_root(q), type, field); \ }) /* @@ -737,8 +741,8 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * min element */ #define priority_queue_min(q, type, field) ({ \ - assert(pqueue_is_min_heap(que)); \ - priority_queue_entry_key(pqueue_unpack_root(q), type, field); \ + assert(pqueue_is_min_heap(que)); \ + priority_queue_entry_key(pqueue_unpack_root(q), type, field); \ }) /* @@ -752,8 +756,8 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * max key */ #define priority_queue_max_key(q) ({ \ - assert(pqueue_is_max_heap(q)); \ - priority_queue_entry_key(q, pqueue_unpack_root(q)); \ + assert(pqueue_is_max_heap(q)); \ + priority_queue_entry_key(q, pqueue_unpack_root(q)); \ }) /* @@ -767,8 +771,8 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * min key */ #define priority_queue_min_key(q) ({ \ - assert(pqueue_is_min_heap(q)); \ - priority_queue_entry_key(pqueue_unpack_root(q)); \ + assert(pqueue_is_min_heap(q)); \ + priority_queue_entry_key(pqueue_unpack_root(q)); \ }) /* @@ -785,8 +789,8 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * max element */ #define priority_queue_remove_max(q, type, field, cmp_fn) ({ \ - assert(pqueue_is_max_heap(q)); \ - pqe_element(pqueue_remove_root(q, pqueue_unpack_root(q), cmp_fn), type, field); \ + assert(pqueue_is_max_heap(q)); \ + pqe_element(pqueue_remove_root(q, pqueue_unpack_root(q), cmp_fn), type, field); \ }) /* @@ -803,8 +807,8 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * min element */ #define priority_queue_remove_min(q, type, field, cmp_fn) ({ \ - assert(pqueue_is_min_heap(que)); \ - pqe_element(pqueue_remove_root(q, pqueue_unpack_root(q), cmp_fn), type, field); \ + assert(pqueue_is_min_heap(que)); \ + pqe_element(pqueue_remove_root(q, pqueue_unpack_root(q), cmp_fn), type, field); \ }) /* @@ -825,7 +829,7 @@ priority_queue_entry_increase(struct priority_queue *que, priority_queue_entry_t * None */ #define priority_queue_destroy(q, type, field, callback, ...) \ - pqueue_destroy(q, offsetof(type, field), callback, ##__VA_ARGS__) + pqueue_destroy(q, offsetof(type, field), callback, ##__VA_ARGS__) __END_DECLS diff --git a/osfmk/kern/processor.c b/osfmk/kern/processor.c index 479094c30..486efc100 100644 --- a/osfmk/kern/processor.c +++ b/osfmk/kern/processor.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -92,32 +92,35 @@ #include #include -struct processor_set pset0; -struct pset_node pset_node0; -decl_simple_lock_data(static,pset_node_lock) +struct processor_set pset0; +struct pset_node pset_node0; +decl_simple_lock_data(static, pset_node_lock) + +lck_grp_t pset_lck_grp; -queue_head_t tasks; -queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */ -queue_head_t corpse_tasks; -int tasks_count; -int terminated_tasks_count; -queue_head_t threads; -int threads_count; -decl_lck_mtx_data(,tasks_threads_lock) -decl_lck_mtx_data(,tasks_corpse_lock) +queue_head_t tasks; +queue_head_t terminated_tasks; /* To be used ONLY for stackshot. */ +queue_head_t corpse_tasks; +int tasks_count; +int terminated_tasks_count; +queue_head_t threads; +int threads_count; +decl_lck_mtx_data(, tasks_threads_lock) +decl_lck_mtx_data(, tasks_corpse_lock) -processor_t processor_list; -unsigned int processor_count; -static processor_t processor_list_tail; -decl_simple_lock_data(,processor_list_lock) +processor_t processor_list; +unsigned int processor_count; +static processor_t processor_list_tail; +decl_simple_lock_data(, processor_list_lock) -uint32_t processor_avail_count; +uint32_t processor_avail_count; +uint32_t processor_avail_count_user; -processor_t master_processor; -int master_cpu = 0; -boolean_t sched_stats_active = FALSE; +processor_t master_processor; +int master_cpu = 0; +boolean_t sched_stats_active = FALSE; -processor_t processor_array[MAX_SCHED_CPUS] = { 0 }; +processor_t processor_array[MAX_SCHED_CPUS] = { 0 }; #if defined(CONFIG_XNUPOST) kern_return_t ipi_test(void); @@ -150,15 +153,18 @@ ipi_test() } #endif /* defined(CONFIG_XNUPOST) */ +int sched_enable_smt = 1; void processor_bootstrap(void) { - pset_init(&pset0, &pset_node0); - pset_node0.psets = &pset0; + lck_grp_init(&pset_lck_grp, "pset", LCK_GRP_ATTR_NULL); simple_lock_init(&pset_node_lock, 0); + pset_node0.psets = &pset0; + pset_init(&pset0, &pset_node0); + queue_init(&tasks); queue_init(&terminated_tasks); queue_init(&threads); @@ -178,11 +184,11 @@ processor_bootstrap(void) */ void processor_init( - processor_t processor, - int cpu_id, - processor_set_t pset) + processor_t processor, + int cpu_id, + processor_set_t pset) { - spl_t s; + spl_t s; if (processor != master_processor) { /* Scheduler state for master_processor initialized in sched_init() */ @@ -210,24 +216,26 @@ processor_init( processor->processor_list = NULL; processor->cpu_quiesce_state = CPU_QUIESCE_COUNTER_NONE; processor->cpu_quiesce_last_checkin = 0; + processor->must_idle = false; s = splsched(); pset_lock(pset); bit_set(pset->cpu_bitmask, cpu_id); - if (pset->cpu_set_count++ == 0) + if (pset->cpu_set_count++ == 0) { pset->cpu_set_low = pset->cpu_set_hi = cpu_id; - else { + } else { pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low; pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi; } pset_unlock(pset); splx(s); - simple_lock(&processor_list_lock); - if (processor_list == NULL) + simple_lock(&processor_list_lock, LCK_GRP_NULL); + if (processor_list == NULL) { processor_list = processor; - else + } else { processor_list_tail->processor_list = processor; + } processor_list_tail = processor; processor_count++; processor_array[cpu_id] = processor; @@ -236,8 +244,8 @@ processor_init( void processor_set_primary( - processor_t processor, - processor_t primary) + processor_t processor, + processor_t primary) { assert(processor->processor_primary == primary || processor->processor_primary == processor); /* Re-adjust primary point for this (possibly) secondary processor */ @@ -256,43 +264,58 @@ processor_set_primary( processor->is_SMT = TRUE; processor_set_t pset = processor->processor_set; - atomic_bit_clear(&pset->primary_map, processor->cpu_id, memory_order_relaxed); + spl_t s = splsched(); + pset_lock(pset); + bit_clear(pset->primary_map, processor->cpu_id); + pset_unlock(pset); + splx(s); } } processor_set_t processor_pset( - processor_t processor) + processor_t processor) { - return (processor->processor_set); + return processor->processor_set; } void processor_state_update_idle(processor_t processor) { - processor->current_pri = IDLEPRI; - processor->current_sfi_class = SFI_CLASS_KERNEL; - processor->current_recommended_pset_type = PSET_SMP; - processor->current_perfctl_class = PERFCONTROL_CLASS_IDLE; + processor->current_pri = IDLEPRI; + processor->current_sfi_class = SFI_CLASS_KERNEL; + processor->current_recommended_pset_type = PSET_SMP; + processor->current_perfctl_class = PERFCONTROL_CLASS_IDLE; + processor->current_urgency = THREAD_URGENCY_NONE; + processor->current_is_NO_SMT = false; + processor->current_is_bound = false; } void processor_state_update_from_thread(processor_t processor, thread_t thread) { - processor->current_pri = thread->sched_pri; - processor->current_sfi_class = thread->sfi_class; - processor->current_recommended_pset_type = recommended_pset_type(thread); - processor->current_perfctl_class = thread_get_perfcontrol_class(thread); + processor->current_pri = thread->sched_pri; + processor->current_sfi_class = thread->sfi_class; + processor->current_recommended_pset_type = recommended_pset_type(thread); + processor->current_perfctl_class = thread_get_perfcontrol_class(thread); + processor->current_urgency = thread_get_urgency(thread, NULL, NULL); +#if DEBUG || DEVELOPMENT + processor->current_is_NO_SMT = (thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT); +#else + processor->current_is_NO_SMT = (thread->sched_flags & TH_SFLAG_NO_SMT); +#endif + processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL; } void -processor_state_update_explicit(processor_t processor, int pri, sfi_class_id_t sfi_class, - pset_cluster_type_t pset_type, perfcontrol_class_t perfctl_class) +processor_state_update_explicit(processor_t processor, int pri, sfi_class_id_t sfi_class, + pset_cluster_type_t pset_type, perfcontrol_class_t perfctl_class, thread_urgency_t urgency) { - processor->current_pri = pri; - processor->current_sfi_class = sfi_class; - processor->current_recommended_pset_type = pset_type; - processor->current_perfctl_class = perfctl_class; + processor->current_pri = pri; + processor->current_sfi_class = sfi_class; + processor->current_recommended_pset_type = pset_type; + processor->current_perfctl_class = perfctl_class; + processor->current_urgency = urgency; } pset_node_t @@ -303,29 +326,31 @@ pset_node_root(void) processor_set_t pset_create( - pset_node_t node) + pset_node_t node) { /* some schedulers do not support multiple psets */ - if (SCHED(multiple_psets_enabled) == FALSE) + if (SCHED(multiple_psets_enabled) == FALSE) { return processor_pset(master_processor); + } - processor_set_t *prev, pset = kalloc(sizeof (*pset)); + processor_set_t *prev, pset = kalloc(sizeof(*pset)); if (pset != PROCESSOR_SET_NULL) { pset_init(pset, node); - simple_lock(&pset_node_lock); + simple_lock(&pset_node_lock, LCK_GRP_NULL); prev = &node->psets; - while (*prev != PROCESSOR_SET_NULL) + while (*prev != PROCESSOR_SET_NULL) { prev = &(*prev)->pset_list; + } *prev = pset; simple_unlock(&pset_node_lock); } - return (pset); + return pset; } /* @@ -337,22 +362,24 @@ pset_find( uint32_t cluster_id, processor_set_t default_pset) { - simple_lock(&pset_node_lock); + simple_lock(&pset_node_lock, LCK_GRP_NULL); pset_node_t node = &pset_node0; processor_set_t pset = NULL; do { pset = node->psets; while (pset != NULL) { - if (pset->pset_cluster_id == cluster_id) + if (pset->pset_cluster_id == cluster_id) { break; + } pset = pset->pset_list; } } while ((node = node->node_list) != NULL); simple_unlock(&pset_node_lock); - if (pset == NULL) + if (pset == NULL) { return default_pset; - return (pset); + } + return pset; } /* @@ -360,8 +387,8 @@ pset_find( */ void pset_init( - processor_set_t pset, - pset_node_t node) + processor_set_t pset, + pset_node_t node) { if (pset != &pset0) { /* Scheduler state for pset0 initialized in sched_init() */ @@ -381,7 +408,8 @@ pset_init( for (uint i = PROCESSOR_SHUTDOWN; i < PROCESSOR_STATE_LEN; i++) { pset->cpu_state_map[i] = 0; } - pset->pending_AST_cpu_mask = 0; + pset->pending_AST_URGENT_cpu_mask = 0; + pset->pending_AST_PREEMPT_cpu_mask = 0; #if defined(CONFIG_SCHED_DEFERRED_AST) pset->pending_deferred_AST_cpu_mask = 0; #endif @@ -393,15 +421,18 @@ pset_init( pset->node = node; pset->pset_cluster_type = PSET_SMP; pset->pset_cluster_id = 0; + + simple_lock(&pset_node_lock, LCK_GRP_NULL); + node->pset_count++; + simple_unlock(&pset_node_lock); } kern_return_t processor_info_count( - processor_flavor_t flavor, - mach_msg_type_number_t *count) + processor_flavor_t flavor, + mach_msg_type_number_t *count) { switch (flavor) { - case PROCESSOR_BASIC_INFO: *count = PROCESSOR_BASIC_INFO_COUNT; break; @@ -411,64 +442,71 @@ processor_info_count( break; default: - return (cpu_info_count(flavor, count)); + return cpu_info_count(flavor, count); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t processor_info( - processor_t processor, - processor_flavor_t flavor, - host_t *host, - processor_info_t info, - mach_msg_type_number_t *count) + processor_t processor, + processor_flavor_t flavor, + host_t *host, + processor_info_t info, + mach_msg_type_number_t *count) { - int cpu_id, state; - kern_return_t result; + int cpu_id, state; + kern_return_t result; - if (processor == PROCESSOR_NULL) - return (KERN_INVALID_ARGUMENT); + if (processor == PROCESSOR_NULL) { + return KERN_INVALID_ARGUMENT; + } cpu_id = processor->cpu_id; switch (flavor) { - case PROCESSOR_BASIC_INFO: { - processor_basic_info_t basic_info; + processor_basic_info_t basic_info; - if (*count < PROCESSOR_BASIC_INFO_COUNT) - return (KERN_FAILURE); + if (*count < PROCESSOR_BASIC_INFO_COUNT) { + return KERN_FAILURE; + } basic_info = (processor_basic_info_t) info; basic_info->cpu_type = slot_type(cpu_id); basic_info->cpu_subtype = slot_subtype(cpu_id); state = processor->state; - if (state == PROCESSOR_OFF_LINE) + if (state == PROCESSOR_OFF_LINE +#if defined(__x86_64__) + || !processor->is_recommended +#endif + ) { basic_info->running = FALSE; - else + } else { basic_info->running = TRUE; + } basic_info->slot_num = cpu_id; - if (processor == master_processor) + if (processor == master_processor) { basic_info->is_master = TRUE; - else + } else { basic_info->is_master = FALSE; + } *count = PROCESSOR_BASIC_INFO_COUNT; *host = &realhost; - return (KERN_SUCCESS); + return KERN_SUCCESS; } case PROCESSOR_CPU_LOAD_INFO: { - processor_cpu_load_info_t cpu_load_info; - timer_t idle_state; - uint64_t idle_time_snapshot1, idle_time_snapshot2; - uint64_t idle_time_tstamp1, idle_time_tstamp2; + processor_cpu_load_info_t cpu_load_info; + timer_t idle_state; + uint64_t idle_time_snapshot1, idle_time_snapshot2; + uint64_t idle_time_tstamp1, idle_time_tstamp2; /* * We capture the accumulated idle time twice over @@ -481,18 +519,19 @@ processor_info( * data. */ - if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) - return (KERN_FAILURE); + if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) { + return KERN_FAILURE; + } cpu_load_info = (processor_cpu_load_info_t) info; if (precise_user_kernel_time) { cpu_load_info->cpu_ticks[CPU_STATE_USER] = - (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); + (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = - (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); + (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); } else { uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) + - timer_grab(&PROCESSOR_DATA(processor, system_state)); + timer_grab(&PROCESSOR_DATA(processor, system_state)); cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; @@ -513,54 +552,56 @@ processor_info( */ if (PROCESSOR_DATA(processor, current_state) != idle_state) { cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = - (uint32_t)(idle_time_snapshot1 / hz_tick_interval); + (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) || - (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){ + (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))) { /* Idle timer is being updated concurrently, second stamp is good enough */ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = - (uint32_t)(idle_time_snapshot2 / hz_tick_interval); + (uint32_t)(idle_time_snapshot2 / hz_tick_interval); } else { /* * Idle timer may be very stale. Fortunately we have established * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging */ idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1; - + cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = - (uint32_t)(idle_time_snapshot1 / hz_tick_interval); + (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; - *count = PROCESSOR_CPU_LOAD_INFO_COUNT; - *host = &realhost; + *count = PROCESSOR_CPU_LOAD_INFO_COUNT; + *host = &realhost; - return (KERN_SUCCESS); + return KERN_SUCCESS; } default: - result = cpu_info(flavor, cpu_id, info, count); - if (result == KERN_SUCCESS) - *host = &realhost; + result = cpu_info(flavor, cpu_id, info, count); + if (result == KERN_SUCCESS) { + *host = &realhost; + } - return (result); + return result; } } kern_return_t processor_start( - processor_t processor) + processor_t processor) { - processor_set_t pset; - thread_t thread; - kern_return_t result; - spl_t s; + processor_set_t pset; + thread_t thread; + kern_return_t result; + spl_t s; - if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL) - return (KERN_INVALID_ARGUMENT); + if (processor == PROCESSOR_NULL || processor->processor_set == PROCESSOR_SET_NULL) { + return KERN_INVALID_ARGUMENT; + } if (processor == master_processor) { - processor_t prev; + processor_t prev; prev = thread_bind(processor); thread_block(THREAD_CONTINUE_NULL); @@ -569,7 +610,20 @@ processor_start( thread_bind(prev); - return (result); + return result; + } + + bool scheduler_disable = false; + + if ((processor->processor_primary != processor) && (sched_enable_smt == 0)) { + if (cpu_can_exit(processor->cpu_id)) { + return KERN_SUCCESS; + } + /* + * This secondary SMT processor must start in order to service interrupts, + * so instead it will be disabled at the scheduler level. + */ + scheduler_disable = true; } s = splsched(); @@ -579,7 +633,7 @@ processor_start( pset_unlock(pset); splx(s); - return (KERN_FAILURE); + return KERN_FAILURE; } pset_update_processor_state(pset, processor, PROCESSOR_START); @@ -598,7 +652,7 @@ processor_start( pset_unlock(pset); splx(s); - return (result); + return result; } } @@ -607,8 +661,8 @@ processor_start( * has never been started. Create a dedicated * start up thread. */ - if ( processor->active_thread == THREAD_NULL && - processor->next_thread == THREAD_NULL ) { + if (processor->active_thread == THREAD_NULL && + processor->next_thread == THREAD_NULL) { result = kernel_thread_create((thread_continue_t)processor_start_thread, NULL, MAXPRI_KERNEL, &thread); if (result != KERN_SUCCESS) { s = splsched(); @@ -617,7 +671,7 @@ processor_start( pset_unlock(pset); splx(s); - return (result); + return result; } s = splsched(); @@ -632,8 +686,9 @@ processor_start( thread_deallocate(thread); } - if (processor->processor_self == IP_NULL) + if (processor->processor_self == IP_NULL) { ipc_processor_init(processor); + } result = cpu_start(processor->cpu_id); if (result != KERN_SUCCESS) { @@ -643,127 +698,222 @@ processor_start( pset_unlock(pset); splx(s); - return (result); + return result; + } + if (scheduler_disable) { + assert(processor->processor_primary != processor); + sched_processor_enable(processor, FALSE); } ipc_processor_enable(processor); - return (KERN_SUCCESS); + return KERN_SUCCESS; } + kern_return_t processor_exit( - processor_t processor) + processor_t processor) +{ + if (processor == PROCESSOR_NULL) { + return KERN_INVALID_ARGUMENT; + } + + return processor_shutdown(processor); +} + + +kern_return_t +processor_start_from_user( + processor_t processor) +{ + kern_return_t ret; + + if (processor == PROCESSOR_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (!cpu_can_exit(processor->cpu_id)) { + ret = sched_processor_enable(processor, TRUE); + } else { + ret = processor_start(processor); + } + + return ret; +} + +kern_return_t +processor_exit_from_user( + processor_t processor) +{ + kern_return_t ret; + + if (processor == PROCESSOR_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (!cpu_can_exit(processor->cpu_id)) { + ret = sched_processor_enable(processor, FALSE); + } else { + ret = processor_shutdown(processor); + } + + return ret; +} + +kern_return_t +enable_smt_processors(bool enable) { - if (processor == PROCESSOR_NULL) - return(KERN_INVALID_ARGUMENT); + if (machine_info.logical_cpu_max == machine_info.physical_cpu_max) { + /* Not an SMT system */ + return KERN_INVALID_ARGUMENT; + } + + int ncpus = machine_info.logical_cpu_max; + + for (int i = 1; i < ncpus; i++) { + processor_t processor = processor_array[i]; + + if (processor->processor_primary != processor) { + if (enable) { + processor_start_from_user(processor); + } else { /* Disable */ + processor_exit_from_user(processor); + } + } + } + +#define BSD_HOST 1 + host_basic_info_data_t hinfo; + mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + kern_return_t kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count); + if (kret != KERN_SUCCESS) { + return kret; + } + + if (enable && (hinfo.logical_cpu != hinfo.logical_cpu_max)) { + return KERN_FAILURE; + } + + if (!enable && (hinfo.logical_cpu != hinfo.physical_cpu)) { + return KERN_FAILURE; + } - return(processor_shutdown(processor)); + return KERN_SUCCESS; } kern_return_t processor_control( - processor_t processor, - processor_info_t info, - mach_msg_type_number_t count) + processor_t processor, + processor_info_t info, + mach_msg_type_number_t count) { - if (processor == PROCESSOR_NULL) - return(KERN_INVALID_ARGUMENT); + if (processor == PROCESSOR_NULL) { + return KERN_INVALID_ARGUMENT; + } - return(cpu_control(processor->cpu_id, info, count)); + return cpu_control(processor->cpu_id, info, count); } - + kern_return_t processor_set_create( - __unused host_t host, - __unused processor_set_t *new_set, - __unused processor_set_t *new_name) + __unused host_t host, + __unused processor_set_t *new_set, + __unused processor_set_t *new_name) { - return(KERN_FAILURE); + return KERN_FAILURE; } kern_return_t processor_set_destroy( - __unused processor_set_t pset) + __unused processor_set_t pset) { - return(KERN_FAILURE); + return KERN_FAILURE; } kern_return_t processor_get_assignment( - processor_t processor, - processor_set_t *pset) + processor_t processor, + processor_set_t *pset) { int state; - if (processor == PROCESSOR_NULL) - return(KERN_INVALID_ARGUMENT); + if (processor == PROCESSOR_NULL) { + return KERN_INVALID_ARGUMENT; + } state = processor->state; - if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE) - return(KERN_FAILURE); + if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE) { + return KERN_FAILURE; + } *pset = &pset0; - return(KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t processor_set_info( - processor_set_t pset, - int flavor, - host_t *host, - processor_set_info_t info, - mach_msg_type_number_t *count) + processor_set_t pset, + int flavor, + host_t *host, + processor_set_info_t info, + mach_msg_type_number_t *count) { - if (pset == PROCESSOR_SET_NULL) - return(KERN_INVALID_ARGUMENT); + if (pset == PROCESSOR_SET_NULL) { + return KERN_INVALID_ARGUMENT; + } if (flavor == PROCESSOR_SET_BASIC_INFO) { - processor_set_basic_info_t basic_info; + processor_set_basic_info_t basic_info; - if (*count < PROCESSOR_SET_BASIC_INFO_COUNT) - return(KERN_FAILURE); + if (*count < PROCESSOR_SET_BASIC_INFO_COUNT) { + return KERN_FAILURE; + } basic_info = (processor_set_basic_info_t) info; +#if defined(__x86_64__) + basic_info->processor_count = processor_avail_count_user; +#else basic_info->processor_count = processor_avail_count; +#endif basic_info->default_policy = POLICY_TIMESHARE; *count = PROCESSOR_SET_BASIC_INFO_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) { - policy_timeshare_base_t ts_base; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_TIMESHARE_DEFAULT) { + policy_timeshare_base_t ts_base; - if (*count < POLICY_TIMESHARE_BASE_COUNT) - return(KERN_FAILURE); + if (*count < POLICY_TIMESHARE_BASE_COUNT) { + return KERN_FAILURE; + } ts_base = (policy_timeshare_base_t) info; ts_base->base_priority = BASEPRI_DEFAULT; *count = POLICY_TIMESHARE_BASE_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) { - policy_fifo_base_t fifo_base; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_FIFO_DEFAULT) { + policy_fifo_base_t fifo_base; - if (*count < POLICY_FIFO_BASE_COUNT) - return(KERN_FAILURE); + if (*count < POLICY_FIFO_BASE_COUNT) { + return KERN_FAILURE; + } fifo_base = (policy_fifo_base_t) info; fifo_base->base_priority = BASEPRI_DEFAULT; *count = POLICY_FIFO_BASE_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_RR_DEFAULT) { - policy_rr_base_t rr_base; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_RR_DEFAULT) { + policy_rr_base_t rr_base; - if (*count < POLICY_RR_BASE_COUNT) - return(KERN_FAILURE); + if (*count < POLICY_RR_BASE_COUNT) { + return KERN_FAILURE; + } rr_base = (policy_rr_base_t) info; rr_base->base_priority = BASEPRI_DEFAULT; @@ -771,86 +921,88 @@ processor_set_info( *count = POLICY_RR_BASE_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) { - policy_timeshare_limit_t ts_limit; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_TIMESHARE_LIMITS) { + policy_timeshare_limit_t ts_limit; - if (*count < POLICY_TIMESHARE_LIMIT_COUNT) - return(KERN_FAILURE); + if (*count < POLICY_TIMESHARE_LIMIT_COUNT) { + return KERN_FAILURE; + } ts_limit = (policy_timeshare_limit_t) info; ts_limit->max_priority = MAXPRI_KERNEL; *count = POLICY_TIMESHARE_LIMIT_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_FIFO_LIMITS) { - policy_fifo_limit_t fifo_limit; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_FIFO_LIMITS) { + policy_fifo_limit_t fifo_limit; - if (*count < POLICY_FIFO_LIMIT_COUNT) - return(KERN_FAILURE); + if (*count < POLICY_FIFO_LIMIT_COUNT) { + return KERN_FAILURE; + } fifo_limit = (policy_fifo_limit_t) info; fifo_limit->max_priority = MAXPRI_KERNEL; *count = POLICY_FIFO_LIMIT_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_RR_LIMITS) { - policy_rr_limit_t rr_limit; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_RR_LIMITS) { + policy_rr_limit_t rr_limit; - if (*count < POLICY_RR_LIMIT_COUNT) - return(KERN_FAILURE); + if (*count < POLICY_RR_LIMIT_COUNT) { + return KERN_FAILURE; + } rr_limit = (policy_rr_limit_t) info; rr_limit->max_priority = MAXPRI_KERNEL; *count = POLICY_RR_LIMIT_COUNT; *host = &realhost; - return(KERN_SUCCESS); - } - else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) { - int *enabled; + return KERN_SUCCESS; + } else if (flavor == PROCESSOR_SET_ENABLED_POLICIES) { + int *enabled; - if (*count < (sizeof(*enabled)/sizeof(int))) - return(KERN_FAILURE); + if (*count < (sizeof(*enabled) / sizeof(int))) { + return KERN_FAILURE; + } enabled = (int *) info; *enabled = POLICY_TIMESHARE | POLICY_RR | POLICY_FIFO; - *count = sizeof(*enabled)/sizeof(int); + *count = sizeof(*enabled) / sizeof(int); *host = &realhost; - return(KERN_SUCCESS); + return KERN_SUCCESS; } *host = HOST_NULL; - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* * processor_set_statistics * - * Returns scheduling statistics for a processor set. + * Returns scheduling statistics for a processor set. */ -kern_return_t +kern_return_t processor_set_statistics( processor_set_t pset, int flavor, processor_set_info_t info, - mach_msg_type_number_t *count) + mach_msg_type_number_t *count) { - if (pset == PROCESSOR_SET_NULL || pset != &pset0) - return (KERN_INVALID_PROCESSOR_SET); + if (pset == PROCESSOR_SET_NULL || pset != &pset0) { + return KERN_INVALID_PROCESSOR_SET; + } if (flavor == PROCESSOR_SET_LOAD_INFO) { processor_set_load_info_t load_info; - if (*count < PROCESSOR_SET_LOAD_INFO_COUNT) - return(KERN_FAILURE); + if (*count < PROCESSOR_SET_LOAD_INFO_COUNT) { + return KERN_FAILURE; + } load_info = (processor_set_load_info_t) info; @@ -861,10 +1013,10 @@ processor_set_statistics( load_info->thread_count = threads_count; *count = PROCESSOR_SET_LOAD_INFO_COUNT; - return(KERN_SUCCESS); + return KERN_SUCCESS; } - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* @@ -872,15 +1024,15 @@ processor_set_statistics( * * Specify max priority permitted on processor set. This affects * newly created and assigned threads. Optionally change existing - * ones. + * ones. */ kern_return_t processor_set_max_priority( - __unused processor_set_t pset, - __unused int max_priority, - __unused boolean_t change_threads) + __unused processor_set_t pset, + __unused int max_priority, + __unused boolean_t change_threads) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* @@ -891,10 +1043,10 @@ processor_set_max_priority( kern_return_t processor_set_policy_enable( - __unused processor_set_t pset, - __unused int policy) + __unused processor_set_t pset, + __unused int policy) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* @@ -905,11 +1057,11 @@ processor_set_policy_enable( */ kern_return_t processor_set_policy_disable( - __unused processor_set_t pset, - __unused int policy, - __unused boolean_t change_threads) + __unused processor_set_t pset, + __unused int policy, + __unused boolean_t change_threads) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* @@ -919,7 +1071,7 @@ processor_set_policy_disable( */ kern_return_t processor_set_things( - processor_set_t pset, + processor_set_t pset, void **thing_list, mach_msg_type_number_t *count, int type) @@ -939,8 +1091,9 @@ processor_set_things( void *addr, *newaddr; vm_size_t size, size_needed; - if (pset == PROCESSOR_SET_NULL || pset != &pset0) - return (KERN_INVALID_ARGUMENT); + if (pset == PROCESSOR_SET_NULL || pset != &pset0) { + return KERN_INVALID_ARGUMENT; + } task_size = 0; task_size_needed = 0; @@ -956,49 +1109,55 @@ processor_set_things( lck_mtx_lock(&tasks_threads_lock); /* do we have the memory we need? */ - if (type == PSET_THING_THREAD) + if (type == PSET_THING_THREAD) { thread_size_needed = threads_count * sizeof(void *); + } #if !CONFIG_MACF else #endif - task_size_needed = tasks_count * sizeof(void *); + task_size_needed = tasks_count * sizeof(void *); if (task_size_needed <= task_size && - thread_size_needed <= thread_size) + thread_size_needed <= thread_size) { break; + } /* unlock and allocate more memory */ lck_mtx_unlock(&tasks_threads_lock); /* grow task array */ if (task_size_needed > task_size) { - if (task_size != 0) + if (task_size != 0) { kfree(task_list, task_size); + } assert(task_size_needed > 0); task_size = task_size_needed; task_list = (task_t *)kalloc(task_size); if (task_list == NULL) { - if (thread_size != 0) + if (thread_size != 0) { kfree(thread_list, thread_size); - return (KERN_RESOURCE_SHORTAGE); + } + return KERN_RESOURCE_SHORTAGE; } } /* grow thread array */ if (thread_size_needed > thread_size) { - if (thread_size != 0) + if (thread_size != 0) { kfree(thread_list, thread_size); + } assert(thread_size_needed > 0); thread_size = thread_size_needed; thread_list = (thread_t *)kalloc(thread_size); if (thread_list == 0) { - if (task_size != 0) + if (task_size != 0) { kfree(task_list, task_size); - return (KERN_RESOURCE_SHORTAGE); + } + return KERN_RESOURCE_SHORTAGE; } } } @@ -1008,36 +1167,36 @@ processor_set_things( /* If we need it, get the thread list */ if (type == PSET_THING_THREAD) { for (thread = (thread_t)queue_first(&threads); - !queue_end(&threads, (queue_entry_t)thread); - thread = (thread_t)queue_next(&thread->threads)) { + !queue_end(&threads, (queue_entry_t)thread); + thread = (thread_t)queue_next(&thread->threads)) { #if defined(SECURE_KERNEL) if (thread->task != kernel_task) { #endif - thread_reference_internal(thread); - thread_list[actual_threads++] = thread; + thread_reference_internal(thread); + thread_list[actual_threads++] = thread; #if defined(SECURE_KERNEL) - } + } #endif } } #if !CONFIG_MACF - else { + else { #endif - /* get a list of the tasks */ - for (task = (task_t)queue_first(&tasks); - !queue_end(&tasks, (queue_entry_t)task); - task = (task_t)queue_next(&task->tasks)) { + /* get a list of the tasks */ + for (task = (task_t)queue_first(&tasks); + !queue_end(&tasks, (queue_entry_t)task); + task = (task_t)queue_next(&task->tasks)) { #if defined(SECURE_KERNEL) - if (task != kernel_task) { + if (task != kernel_task) { #endif - task_reference_internal(task); - task_list[actual_tasks++] = task; + task_reference_internal(task); + task_list[actual_tasks++] = task; #if defined(SECURE_KERNEL) - } + } #endif - } -#if !CONFIG_MACF } +#if !CONFIG_MACF +} #endif lck_mtx_unlock(&tasks_threads_lock); @@ -1057,7 +1216,6 @@ processor_set_things( task_size_needed = actual_tasks * sizeof(void *); if (type == PSET_THING_THREAD) { - /* for each thread (if any), make sure it's task is in the allowed list */ for (i = used = 0; i < actual_threads; i++) { boolean_t found_task = FALSE; @@ -1069,17 +1227,19 @@ processor_set_things( break; } } - if (found_task) + if (found_task) { thread_list[used++] = thread_list[i]; - else + } else { thread_deallocate(thread_list[i]); + } } actual_threads = used; thread_size_needed = actual_threads * sizeof(void *); /* done with the task list */ - for (i = 0; i < actual_tasks; i++) + for (i = 0; i < actual_tasks; i++) { task_deallocate(task_list[i]); + } kfree(task_list, task_size); task_size = 0; actual_tasks = 0; @@ -1091,8 +1251,9 @@ processor_set_things( if (actual_threads == 0) { /* no threads available to return */ assert(task_size == 0); - if (thread_size != 0) + if (thread_size != 0) { kfree(thread_list, thread_size); + } *thing_list = NULL; *count = 0; return KERN_SUCCESS; @@ -1104,12 +1265,13 @@ processor_set_things( if (actual_tasks == 0) { /* no tasks available to return */ assert(thread_size == 0); - if (task_size != 0) + if (task_size != 0) { kfree(task_list, task_size); + } *thing_list = NULL; *count = 0; return KERN_SUCCESS; - } + } size_needed = actual_tasks * sizeof(void *); size = task_size; addr = task_list; @@ -1120,14 +1282,16 @@ processor_set_things( newaddr = kalloc(size_needed); if (newaddr == 0) { for (i = 0; i < actual_tasks; i++) { - if (type == PSET_THING_THREAD) + if (type == PSET_THING_THREAD) { thread_deallocate(thread_list[i]); - else + } else { task_deallocate(task_list[i]); + } } - if (size) + if (size) { kfree(addr, size); - return (KERN_RESOURCE_SHORTAGE); + } + return KERN_RESOURCE_SHORTAGE; } bcopy((void *) addr, (void *) newaddr, size_needed); @@ -1140,7 +1304,7 @@ processor_set_things( *thing_list = (void **)addr; *count = (unsigned int)size / sizeof(void *); - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -1151,20 +1315,22 @@ processor_set_things( */ kern_return_t processor_set_tasks( - processor_set_t pset, - task_array_t *task_list, - mach_msg_type_number_t *count) + processor_set_t pset, + task_array_t *task_list, + mach_msg_type_number_t *count) { kern_return_t ret; mach_msg_type_number_t i; ret = processor_set_things(pset, (void **)task_list, count, PSET_THING_TASK); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return ret; + } /* do the conversion that Mig should handle */ - for (i = 0; i < *count; i++) + for (i = 0; i < *count; i++) { (*task_list)[i] = (task_t)convert_task_to_port((*task_list)[i]); + } return KERN_SUCCESS; } @@ -1176,38 +1342,40 @@ processor_set_tasks( #if defined(SECURE_KERNEL) kern_return_t processor_set_threads( - __unused processor_set_t pset, - __unused thread_array_t *thread_list, - __unused mach_msg_type_number_t *count) + __unused processor_set_t pset, + __unused thread_array_t *thread_list, + __unused mach_msg_type_number_t *count) { - return KERN_FAILURE; + return KERN_FAILURE; } #elif defined(CONFIG_EMBEDDED) kern_return_t processor_set_threads( - __unused processor_set_t pset, - __unused thread_array_t *thread_list, - __unused mach_msg_type_number_t *count) + __unused processor_set_t pset, + __unused thread_array_t *thread_list, + __unused mach_msg_type_number_t *count) { - return KERN_NOT_SUPPORTED; + return KERN_NOT_SUPPORTED; } #else kern_return_t processor_set_threads( - processor_set_t pset, - thread_array_t *thread_list, - mach_msg_type_number_t *count) + processor_set_t pset, + thread_array_t *thread_list, + mach_msg_type_number_t *count) { kern_return_t ret; mach_msg_type_number_t i; ret = processor_set_things(pset, (void **)thread_list, count, PSET_THING_THREAD); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return ret; + } /* do the conversion that Mig should handle */ - for (i = 0; i < *count; i++) + for (i = 0; i < *count; i++) { (*thread_list)[i] = (thread_t)convert_thread_to_port((*thread_list)[i]); + } return KERN_SUCCESS; } #endif @@ -1221,20 +1389,20 @@ processor_set_threads( */ kern_return_t processor_set_policy_control( - __unused processor_set_t pset, - __unused int flavor, - __unused processor_set_info_t policy_info, - __unused mach_msg_type_number_t count, - __unused boolean_t change) + __unused processor_set_t pset, + __unused int flavor, + __unused processor_set_info_t policy_info, + __unused mach_msg_type_number_t count, + __unused boolean_t change) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } #undef pset_deallocate void pset_deallocate(processor_set_t pset); void pset_deallocate( -__unused processor_set_t pset) + __unused processor_set_t pset) { return; } @@ -1243,7 +1411,7 @@ __unused processor_set_t pset) void pset_reference(processor_set_t pset); void pset_reference( -__unused processor_set_t pset) + __unused processor_set_t pset) { return; } diff --git a/osfmk/kern/processor.h b/osfmk/kern/processor.h index 646ea801c..223aae3b7 100644 --- a/osfmk/kern/processor.h +++ b/osfmk/kern/processor.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,8 +60,8 @@ * processor.h: Processor and processor-related definitions. */ -#ifndef _KERN_PROCESSOR_H_ -#define _KERN_PROCESSOR_H_ +#ifndef _KERN_PROCESSOR_H_ +#define _KERN_PROCESSOR_H_ #include #include @@ -69,7 +69,7 @@ #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -130,13 +131,13 @@ */ #endif -#define PROCESSOR_OFF_LINE 0 /* Not available */ -#define PROCESSOR_SHUTDOWN 1 /* Going off-line */ -#define PROCESSOR_START 2 /* Being started */ -/* 3 Formerly Inactive (unavailable) */ -#define PROCESSOR_IDLE 4 /* Idle (available) */ -#define PROCESSOR_DISPATCHING 5 /* Dispatching (idle -> active) */ -#define PROCESSOR_RUNNING 6 /* Normal execution */ +#define PROCESSOR_OFF_LINE 0 /* Not available */ +#define PROCESSOR_SHUTDOWN 1 /* Going off-line */ +#define PROCESSOR_START 2 /* Being started */ +/* 3 Formerly Inactive (unavailable) */ +#define PROCESSOR_IDLE 4 /* Idle (available) */ +#define PROCESSOR_DISPATCHING 5 /* Dispatching (idle -> active) */ +#define PROCESSOR_RUNNING 6 /* Normal execution */ #define PROCESSOR_STATE_LEN (PROCESSOR_RUNNING+1) typedef enum { @@ -156,23 +157,29 @@ struct processor_set { cpumap_t recommended_bitmask; cpumap_t cpu_state_map[PROCESSOR_STATE_LEN]; cpumap_t primary_map; - +#define SCHED_PSET_TLOCK (1) #if __SMP__ - decl_simple_lock_data(,sched_lock) /* lock for above */ +#if defined(SCHED_PSET_TLOCK) + /* TODO: reorder struct for temporal cache locality */ + __attribute__((aligned(128))) lck_ticket_t sched_lock; +#else /* SCHED_PSET_TLOCK*/ + __attribute__((aligned(128))) simple_lock_data_t sched_lock; +#endif /* SCHED_PSET_TLOCK*/ #endif #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) - struct run_queue pset_runq; /* runq for this processor set */ + struct run_queue pset_runq; /* runq for this processor set */ #endif - struct rt_queue rt_runq; /* realtime runq for this processor set */ + struct rt_queue rt_runq; /* realtime runq for this processor set */ #if defined(CONFIG_SCHED_TRADITIONAL) - int pset_runq_bound_count; - /* # of threads in runq bound to any processor in pset */ + int pset_runq_bound_count; + /* # of threads in runq bound to any processor in pset */ #endif /* CPUs that have been sent an unacknowledged remote AST for scheduling purposes */ - cpumap_t pending_AST_cpu_mask; + cpumap_t pending_AST_URGENT_cpu_mask; + cpumap_t pending_AST_PREEMPT_cpu_mask; #if defined(CONFIG_SCHED_DEFERRED_AST) /* * A separate mask, for ASTs that we may be able to cancel. This is dependent on @@ -185,36 +192,37 @@ struct processor_set { * of spurious ASTs in the system, and let processors spend longer periods in * IDLE. */ - cpumap_t pending_deferred_AST_cpu_mask; + cpumap_t pending_deferred_AST_cpu_mask; #endif - cpumap_t pending_spill_cpu_mask; + cpumap_t pending_spill_cpu_mask; - struct ipc_port * pset_self; /* port for operations */ - struct ipc_port * pset_name_self; /* port for information */ + struct ipc_port * pset_self; /* port for operations */ + struct ipc_port * pset_name_self; /* port for information */ - processor_set_t pset_list; /* chain of associated psets */ - pset_node_t node; - uint32_t pset_cluster_id; - pset_cluster_type_t pset_cluster_type; + processor_set_t pset_list; /* chain of associated psets */ + pset_node_t node; + uint32_t pset_cluster_id; + pset_cluster_type_t pset_cluster_type; }; -extern struct processor_set pset0; +extern struct processor_set pset0; struct pset_node { - processor_set_t psets; /* list of associated psets */ + processor_set_t psets; /* list of associated psets */ + uint32_t pset_count; /* count of associated psets */ - pset_node_t nodes; /* list of associated subnodes */ - pset_node_t node_list; /* chain of associated nodes */ + pset_node_t nodes; /* list of associated subnodes */ + pset_node_t node_list; /* chain of associated nodes */ - pset_node_t parent; + pset_node_t parent; }; -extern struct pset_node pset_node0; +extern struct pset_node pset_node0; -extern queue_head_t tasks, terminated_tasks, threads, corpse_tasks; /* Terminated tasks are ONLY for stackshot */ -extern int tasks_count, terminated_tasks_count, threads_count; -decl_lck_mtx_data(extern,tasks_threads_lock) -decl_lck_mtx_data(extern,tasks_corpse_lock) +extern queue_head_t tasks, terminated_tasks, threads, corpse_tasks; /* Terminated tasks are ONLY for stackshot */ +extern int tasks_count, terminated_tasks_count, threads_count; +decl_lck_mtx_data(extern, tasks_threads_lock) +decl_lck_mtx_data(extern, tasks_corpse_lock) struct processor { int state; /* See above */ @@ -224,159 +232,184 @@ struct processor { struct thread *next_thread; /* next thread when dispatched */ struct thread *idle_thread; /* this processor's idle thread. */ - processor_set_t processor_set; /* assigned set */ + processor_set_t processor_set; /* assigned set */ - int current_pri; /* priority of current thread */ - sfi_class_id_t current_sfi_class; /* SFI class of current thread */ - perfcontrol_class_t current_perfctl_class; /* Perfcontrol class for current thread */ - int starting_pri; /* priority of current thread as it was when scheduled */ - pset_cluster_type_t current_recommended_pset_type; /* Cluster type recommended for current thread */ - int cpu_id; /* platform numeric id */ + int current_pri; /* priority of current thread */ + sfi_class_id_t current_sfi_class; /* SFI class of current thread */ + perfcontrol_class_t current_perfctl_class; /* Perfcontrol class for current thread */ + pset_cluster_type_t current_recommended_pset_type; /* Cluster type recommended for current thread */ + thread_urgency_t current_urgency; /* cached urgency of current thread */ + bool current_is_NO_SMT; /* cached TH_SFLAG_NO_SMT of current thread */ + bool current_is_bound; /* current thread is bound to this processor */ + + int starting_pri; /* priority of current thread as it was when scheduled */ + int cpu_id; /* platform numeric id */ cpu_quiescent_state_t cpu_quiesce_state; uint64_t cpu_quiesce_last_checkin; - timer_call_data_t quantum_timer; /* timer for quantum expiration */ - uint64_t quantum_end; /* time when current quantum ends */ - uint64_t last_dispatch; /* time of last dispatch */ + timer_call_data_t quantum_timer; /* timer for quantum expiration */ + uint64_t quantum_end; /* time when current quantum ends */ + uint64_t last_dispatch; /* time of last dispatch */ - uint64_t kperf_last_sample_time; /* time of last kperf sample */ + uint64_t kperf_last_sample_time; /* time of last kperf sample */ - uint64_t deadline; /* current deadline */ + uint64_t deadline; /* current deadline */ bool first_timeslice; /* has the quantum expired since context switch */ + bool must_idle; /* Needs to be forced idle as next selected thread is allowed on this processor */ + + processor_t processor_primary; /* pointer to primary processor for + * secondary SMT processors, or a pointer + * to ourselves for primaries or non-SMT */ + processor_t processor_secondary; #if defined(CONFIG_SCHED_TRADITIONAL) || defined(CONFIG_SCHED_MULTIQ) - struct run_queue runq; /* runq for this processor */ + struct run_queue runq; /* runq for this processor */ #endif #if defined(CONFIG_SCHED_TRADITIONAL) - int runq_bound_count; /* # of threads bound to this processor */ + int runq_bound_count; /* # of threads bound to this processor */ #endif #if defined(CONFIG_SCHED_GRRR) - struct grrr_run_queue grrr_runq; /* Group Ratio Round-Robin runq */ + struct grrr_run_queue grrr_runq; /* Group Ratio Round-Robin runq */ #endif + struct ipc_port * processor_self; /* port for operations */ - processor_t processor_primary; /* pointer to primary processor for - * secondary SMT processors, or a pointer - * to ourselves for primaries or non-SMT */ - processor_t processor_secondary; - struct ipc_port * processor_self; /* port for operations */ - - processor_t processor_list; /* all existing processors */ - processor_data_t processor_data; /* per-processor data */ + processor_t processor_list; /* all existing processors */ + processor_data_t processor_data; /* per-processor data */ }; -extern processor_t processor_list; -decl_simple_lock_data(extern,processor_list_lock) +extern processor_t processor_list; +decl_simple_lock_data(extern, processor_list_lock) #define MAX_SCHED_CPUS 64 /* Maximum number of CPUs supported by the scheduler. bits.h:bitmap_*() macros need to be used to support greater than 64 */ extern processor_t processor_array[MAX_SCHED_CPUS]; /* array indexed by cpuid */ -extern uint32_t processor_avail_count; +extern uint32_t processor_avail_count; +extern uint32_t processor_avail_count_user; -extern processor_t master_processor; +extern processor_t master_processor; -extern boolean_t sched_stats_active; +extern boolean_t sched_stats_active; -extern processor_t current_processor(void); +extern processor_t current_processor(void); /* Lock macros, always acquired and released with interrupts disabled (splsched()) */ +extern lck_grp_t pset_lck_grp; + #if __SMP__ -#define pset_lock(p) simple_lock(&(p)->sched_lock) -#define pset_unlock(p) simple_unlock(&(p)->sched_lock) -#define pset_lock_init(p) simple_lock_init(&(p)->sched_lock, 0) +#if defined(SCHED_PSET_TLOCK) +#define pset_lock_init(p) lck_ticket_init(&(p)->sched_lock) +#define pset_lock(p) lck_ticket_lock(&(p)->sched_lock) +#define pset_unlock(p) lck_ticket_unlock(&(p)->sched_lock) +#define pset_assert_locked(p) lck_ticket_assert_owned(&(p)->sched_lock) +#else /* SCHED_PSET_TLOCK*/ +#define pset_lock(p) simple_lock(&(p)->sched_lock, &pset_lck_grp) +#define pset_unlock(p) simple_unlock(&(p)->sched_lock) +#define pset_lock_init(p) simple_lock_init(&(p)->sched_lock, 0) #if defined(__arm__) || defined(__arm64__) #define pset_assert_locked(p) LCK_SPIN_ASSERT(&(p)->sched_lock, LCK_ASSERT_OWNED) -#else +#else /* arm || arm64 */ /* See pset_lock() should be converted to use lck_spin_lock() instead of simple_lock() */ #define pset_assert_locked(p) do { (void)p; } while(0) -#endif - -#define rt_lock_lock(p) simple_lock(&SCHED(rt_runq)(p)->rt_lock) -#define rt_lock_unlock(p) simple_unlock(&SCHED(rt_runq)(p)->rt_lock) -#define rt_lock_init(p) simple_lock_init(&SCHED(rt_runq)(p)->rt_lock, 0) -#else -#define pset_lock(p) do { (void)p; } while(0) -#define pset_unlock(p) do { (void)p; } while(0) -#define pset_lock_init(p) do { (void)p; } while(0) +#endif /* !arm && !arm64 */ +#endif /* !SCHED_PSET_TLOCK */ +#define rt_lock_lock(p) simple_lock(&SCHED(rt_runq)(p)->rt_lock, &pset_lck_grp) +#define rt_lock_unlock(p) simple_unlock(&SCHED(rt_runq)(p)->rt_lock) +#define rt_lock_init(p) simple_lock_init(&SCHED(rt_runq)(p)->rt_lock, 0) +#else /* !SMP */ +#define pset_lock(p) do { (void)p; } while(0) +#define pset_unlock(p) do { (void)p; } while(0) +#define pset_lock_init(p) do { (void)p; } while(0) #define pset_assert_locked(p) do { (void)p; } while(0) -#define rt_lock_lock(p) do { (void)p; } while(0) -#define rt_lock_unlock(p) do { (void)p; } while(0) -#define rt_lock_init(p) do { (void)p; } while(0) -#endif +#define rt_lock_lock(p) do { (void)p; } while(0) +#define rt_lock_unlock(p) do { (void)p; } while(0) +#define rt_lock_init(p) do { (void)p; } while(0) +#endif /* SMP */ + +extern void processor_bootstrap(void); + +extern void processor_init( + processor_t processor, + int cpu_id, + processor_set_t processor_set); -extern void processor_bootstrap(void); +extern void processor_set_primary( + processor_t processor, + processor_t primary); -extern void processor_init( - processor_t processor, - int cpu_id, - processor_set_t processor_set); +extern kern_return_t processor_shutdown( + processor_t processor); -extern void processor_set_primary( - processor_t processor, - processor_t primary); +extern kern_return_t processor_start_from_user( + processor_t processor); +extern kern_return_t processor_exit_from_user( + processor_t processor); -extern kern_return_t processor_shutdown( - processor_t processor); +kern_return_t +sched_processor_enable(processor_t processor, boolean_t enable); -extern void processor_queue_shutdown( - processor_t processor); +extern void processor_queue_shutdown( + processor_t processor); -extern processor_set_t processor_pset( - processor_t processor); +extern void processor_queue_shutdown( + processor_t processor); -extern pset_node_t pset_node_root(void); +extern processor_set_t processor_pset( + processor_t processor); -extern processor_set_t pset_create( - pset_node_t node); +extern pset_node_t pset_node_root(void); -extern void pset_init( - processor_set_t pset, - pset_node_t node); +extern processor_set_t pset_create( + pset_node_t node); + +extern void pset_init( + processor_set_t pset, + pset_node_t node); extern processor_set_t pset_find( - uint32_t cluster_id, - processor_set_t default_pset); + uint32_t cluster_id, + processor_set_t default_pset); -extern kern_return_t processor_info_count( - processor_flavor_t flavor, - mach_msg_type_number_t *count); +extern kern_return_t processor_info_count( + processor_flavor_t flavor, + mach_msg_type_number_t *count); #define pset_deallocate(x) #define pset_reference(x) -extern void machine_run_count( - uint32_t count); +extern void machine_run_count( + uint32_t count); -extern processor_t machine_choose_processor( - processor_set_t pset, - processor_t processor); +extern processor_t machine_choose_processor( + processor_set_t pset, + processor_t processor); -#define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) +#define next_pset(p) (((p)->pset_list != PROCESSOR_SET_NULL)? (p)->pset_list: (p)->node->psets) -#define PSET_THING_TASK 0 -#define PSET_THING_THREAD 1 +#define PSET_THING_TASK 0 +#define PSET_THING_THREAD 1 -extern kern_return_t processor_set_things( - processor_set_t pset, - void **thing_list, - mach_msg_type_number_t *count, - int type); +extern kern_return_t processor_set_things( + processor_set_t pset, + void **thing_list, + mach_msg_type_number_t *count, + int type); extern pset_cluster_type_t recommended_pset_type(thread_t thread); inline static bool pset_is_recommended(processor_set_t pset) { - return ((pset->recommended_bitmask & pset->cpu_bitmask) != 0); + return (pset->recommended_bitmask & pset->cpu_bitmask) != 0; } extern void processor_state_update_idle(processor_t processor); extern void processor_state_update_from_thread(processor_t processor, thread_t thread); extern void processor_state_update_explicit(processor_t processor, int pri, - sfi_class_id_t sfi_class, pset_cluster_type_t pset_type, - perfcontrol_class_t perfctl_class); + sfi_class_id_t sfi_class, pset_cluster_type_t pset_type, + perfcontrol_class_t perfctl_class, thread_urgency_t urgency); #define PSET_LOAD_NUMERATOR_SHIFT 16 #define PSET_LOAD_FRACTIONAL_SHIFT 4 @@ -409,29 +442,34 @@ pset_update_processor_state(processor_set_t pset, processor_t processor, uint ne if ((old_state == PROCESSOR_RUNNING) || (new_state == PROCESSOR_RUNNING)) { sched_update_pset_load_average(pset); + if (new_state == PROCESSOR_RUNNING) { + assert(processor == current_processor()); + } } } -#else /* MACH_KERNEL_PRIVATE */ +#else /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -extern void pset_deallocate( - processor_set_t pset); +extern void pset_deallocate( + processor_set_t pset); -extern void pset_reference( - processor_set_t pset); +extern void pset_reference( + processor_set_t pset); __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE __BEGIN_DECLS -extern unsigned int processor_count; -extern processor_t cpu_to_processor(int cpu); +extern unsigned int processor_count; +extern processor_t cpu_to_processor(int cpu); + +extern kern_return_t enable_smt_processors(bool enable); __END_DECLS #endif /* KERNEL_PRIVATE */ -#endif /* _KERN_PROCESSOR_H_ */ +#endif /* _KERN_PROCESSOR_H_ */ diff --git a/osfmk/kern/processor_data.c b/osfmk/kern/processor_data.c index a62dbdb14..01a738675 100644 --- a/osfmk/kern/processor_data.c +++ b/osfmk/kern/processor_data.c @@ -2,7 +2,7 @@ * Copyright (c) 2003-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -37,9 +37,9 @@ void processor_data_init( - processor_t processor) + processor_t processor) { - (void)memset(&processor->processor_data, 0, sizeof (processor_data_t)); + (void)memset(&processor->processor_data, 0, sizeof(processor_data_t)); timer_init(&PROCESSOR_DATA(processor, idle_state)); timer_init(&PROCESSOR_DATA(processor, system_state)); diff --git a/osfmk/kern/processor_data.h b/osfmk/kern/processor_data.h index 8e70723bc..6c2f21ec5 100644 --- a/osfmk/kern/processor_data.h +++ b/osfmk/kern/processor_data.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -43,58 +43,58 @@ #include struct processor_sched_statistics { - uint32_t csw_count; - uint32_t preempt_count; - uint32_t preempted_rt_count; - uint32_t preempted_by_rt_count; - uint32_t rt_sched_count; - uint32_t interrupt_count; - uint32_t ipi_count; - uint32_t timer_pop_count; - uint32_t idle_transitions; - uint32_t quantum_timer_expirations; + uint32_t csw_count; + uint32_t preempt_count; + uint32_t preempted_rt_count; + uint32_t preempted_by_rt_count; + uint32_t rt_sched_count; + uint32_t interrupt_count; + uint32_t ipi_count; + uint32_t timer_pop_count; + uint32_t idle_transitions; + uint32_t quantum_timer_expirations; }; struct processor_data { /* Processor state statistics */ - timer_data_t idle_state; - timer_data_t system_state; - timer_data_t user_state; + timer_data_t idle_state; + timer_data_t system_state; + timer_data_t user_state; - timer_t current_state; /* points to processor's idle, system, or user state timer */ + timer_t current_state; /* points to processor's idle, system, or user state timer */ /* Thread execution timers */ - timer_t thread_timer; /* points to current thread's user or system timer */ - timer_t kernel_timer; /* points to current thread's system_timer */ + timer_t thread_timer; /* points to current thread's user or system timer */ + timer_t kernel_timer; /* points to current thread's system_timer */ /* Kernel stack cache */ struct stack_cache { - vm_offset_t free; - unsigned int count; - } stack_cache; + vm_offset_t free; + unsigned int count; + } stack_cache; /* VM event counters */ - vm_statistics64_data_t vm_stat; + vm_statistics64_data_t vm_stat; /* waitq prepost cache */ -#define WQP_CACHE_MAX 50 +#define WQP_CACHE_MAX 50 struct wqp_cache { - uint64_t head; - unsigned int avail; + uint64_t head; + unsigned int avail; } wqp_cache; - int start_color; - unsigned long page_grab_count; - void *free_pages; + int start_color; + unsigned long page_grab_count; + void *free_pages; struct processor_sched_statistics sched_stats; - uint64_t timer_call_ttd; /* current timer call time-to-deadline */ - uint64_t wakeups_issued_total; /* Count of thread wakeups issued - * by this processor - */ + uint64_t timer_call_ttd; /* current timer call time-to-deadline */ + uint64_t wakeups_issued_total; /* Count of thread wakeups issued + * by this processor + */ struct debugger_state { debugger_op db_current_op; const char *db_message; - const char *db_panic_str; + const char *db_panic_str; va_list *db_panic_args; uint64_t db_panic_options; void *db_panic_data_ptr; @@ -105,47 +105,47 @@ struct processor_data { } debugger_state; }; -typedef struct processor_data processor_data_t; +typedef struct processor_data processor_data_t; -#define PROCESSOR_DATA(processor, member) \ - (processor)->processor_data.member +#define PROCESSOR_DATA(processor, member) \ + (processor)->processor_data.member -extern void processor_data_init( - processor_t processor); +extern void processor_data_init( + processor_t processor); -#define SCHED_STATS_INTERRUPT(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.interrupt_count++; \ - } \ -MACRO_END +#define SCHED_STATS_INTERRUPT(p) \ +MACRO_BEGIN \ + if (__builtin_expect(sched_stats_active, 0)) { \ + (p)->processor_data.sched_stats.interrupt_count++; \ + } \ +MACRO_END -#define SCHED_STATS_TIMER_POP(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.timer_pop_count++; \ - } \ +#define SCHED_STATS_TIMER_POP(p) \ +MACRO_BEGIN \ + if (__builtin_expect(sched_stats_active, 0)) { \ + (p)->processor_data.sched_stats.timer_pop_count++; \ + } \ MACRO_END -#define SCHED_STATS_IPI(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.ipi_count++; \ - } \ +#define SCHED_STATS_IPI(p) \ +MACRO_BEGIN \ + if (__builtin_expect(sched_stats_active, 0)) { \ + (p)->processor_data.sched_stats.ipi_count++; \ + } \ MACRO_END -#define SCHED_STATS_CPU_IDLE_START(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.idle_transitions++; \ - } \ +#define SCHED_STATS_CPU_IDLE_START(p) \ +MACRO_BEGIN \ + if (__builtin_expect(sched_stats_active, 0)) { \ + (p)->processor_data.sched_stats.idle_transitions++; \ + } \ MACRO_END -#define SCHED_STATS_QUANTUM_TIMER_EXPIRATION(p) \ -MACRO_BEGIN \ - if (__builtin_expect(sched_stats_active, 0)) { \ - (p)->processor_data.sched_stats.quantum_timer_expirations++; \ - } \ +#define SCHED_STATS_QUANTUM_TIMER_EXPIRATION(p) \ +MACRO_BEGIN \ + if (__builtin_expect(sched_stats_active, 0)) { \ + (p)->processor_data.sched_stats.quantum_timer_expirations++; \ + } \ MACRO_END #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/queue.h b/osfmk/kern/queue.h index ee2f141c6..6af62629f 100644 --- a/osfmk/kern/queue.h +++ b/osfmk/kern/queue.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,27 +22,27 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU @@ -64,8 +64,8 @@ * */ -#ifndef _KERN_QUEUE_H_ -#define _KERN_QUEUE_H_ +#ifndef _KERN_QUEUE_H_ +#define _KERN_QUEUE_H_ #include #include @@ -83,110 +83,110 @@ __BEGIN_DECLS * (there is also a third way maintained in bsd/sys/queue.h) * * Both methods use a common queue head and linkage pattern: - * The head of a queue is declared as: - * queue_head_t q_head; + * The head of a queue is declared as: + * queue_head_t q_head; * - * Elements in this queue are chained together using - * struct queue_entry objects embedded within a structure: - * struct some_data { - * int field1; - * int field2; - * ... - * queue_chain_t link; - * ... - * int last_field; - * }; - * struct some_data is referred to as the queue "element." - * (note that queue_chain_t is typedef'd to struct queue_entry) + * Elements in this queue are chained together using + * struct queue_entry objects embedded within a structure: + * struct some_data { + * int field1; + * int field2; + * ... + * queue_chain_t link; + * ... + * int last_field; + * }; + * struct some_data is referred to as the queue "element." + * (note that queue_chain_t is typedef'd to struct queue_entry) * * IMPORTANT: The two queue iteration methods described below are not * compatible with one another. You must choose one and be careful * to use only the supported APIs for that method. * * Method 1: chaining of queue_chain_t (linkage chains) - * This method uses the next and prev pointers of the struct queue_entry - * linkage object embedded in a queue element to point to the next or - * previous queue_entry structure in the chain. The head of the queue - * (the queue_head_t object) will point to the first and last - * struct queue_entry object, and both the next and prev pointer will - * point back to the head if the queue is empty. + * This method uses the next and prev pointers of the struct queue_entry + * linkage object embedded in a queue element to point to the next or + * previous queue_entry structure in the chain. The head of the queue + * (the queue_head_t object) will point to the first and last + * struct queue_entry object, and both the next and prev pointer will + * point back to the head if the queue is empty. * - * This method is the most flexible method of chaining objects together - * as it allows multiple chains through a given object, by embedding - * multiple queue_chain_t objects in the structure, while simultaneously - * providing fast removal and insertion into the queue using only - * struct queue_entry object pointers. + * This method is the most flexible method of chaining objects together + * as it allows multiple chains through a given object, by embedding + * multiple queue_chain_t objects in the structure, while simultaneously + * providing fast removal and insertion into the queue using only + * struct queue_entry object pointers. * - * ++ Valid APIs for this style queue ++ - * ------------------------------------- - * [C] queue_init - * [C] queue_first - * [C] queue_next - * [C] queue_last - * [C] queue_prev - * [C] queue_end - * [C] queue_empty + * ++ Valid APIs for this style queue ++ + * ------------------------------------- + * [C] queue_init + * [C] queue_first + * [C] queue_next + * [C] queue_last + * [C] queue_prev + * [C] queue_end + * [C] queue_empty * - * [1] enqueue - * [1] dequeue - * [1] enqueue_head - * [1] enqueue_tail - * [1] dequeue_head - * [1] dequeue_tail - * [1] remqueue - * [1] insque - * [1] remque - * [1] re_queue_head - * [1] re_queue_tail - * [1] movqueue - * [1] qe_element - * [1] qe_foreach - * [1] qe_foreach_safe - * [1] qe_foreach_element - * [1] qe_foreach_element_safe + * [1] enqueue + * [1] dequeue + * [1] enqueue_head + * [1] enqueue_tail + * [1] dequeue_head + * [1] dequeue_tail + * [1] remqueue + * [1] insque + * [1] remque + * [1] re_queue_head + * [1] re_queue_tail + * [1] movqueue + * [1] qe_element + * [1] qe_foreach + * [1] qe_foreach_safe + * [1] qe_foreach_element + * [1] qe_foreach_element_safe * * Method 2: chaining of elements (element chains) - * This method uses the next and prev pointers of the struct queue_entry - * linkage object embedded in a queue element to point to the next or - * previous queue element (not another queue_entry). The head of the - * queue will point to the first and last queue element (struct some_data - * from the above example) NOT the embedded queue_entry structure. The - * first queue element will have a prev pointer that points to the - * queue_head_t, and the last queue element will have a next pointer - * that points to the queue_head_t. + * This method uses the next and prev pointers of the struct queue_entry + * linkage object embedded in a queue element to point to the next or + * previous queue element (not another queue_entry). The head of the + * queue will point to the first and last queue element (struct some_data + * from the above example) NOT the embedded queue_entry structure. The + * first queue element will have a prev pointer that points to the + * queue_head_t, and the last queue element will have a next pointer + * that points to the queue_head_t. * - * This method requires knowledge of the queue_head_t of the queue on - * which an element resides in order to remove the element. Iterating - * through the elements of the queue is also more cumbersome because - * a check against the head pointer plus a cast then offset operation - * must be performed at each step of the iteration. + * This method requires knowledge of the queue_head_t of the queue on + * which an element resides in order to remove the element. Iterating + * through the elements of the queue is also more cumbersome because + * a check against the head pointer plus a cast then offset operation + * must be performed at each step of the iteration. * - * ++ Valid APIs for this style queue ++ - * ------------------------------------- - * [C] queue_init - * [C] queue_first - * [C] queue_next - * [C] queue_last - * [C] queue_prev - * [C] queue_end - * [C] queue_empty + * ++ Valid APIs for this style queue ++ + * ------------------------------------- + * [C] queue_init + * [C] queue_first + * [C] queue_next + * [C] queue_last + * [C] queue_prev + * [C] queue_end + * [C] queue_empty * - * [2] queue_enter - * [2] queue_enter_first - * [2] queue_insert_before - * [2] queue_insert_after - * [2] queue_field - * [2] queue_remove - * [2] queue_remove_first - * [2] queue_remove_last - * [2] queue_assign - * [2] queue_new_head - * [2] queue_iterate + * [2] queue_enter + * [2] queue_enter_first + * [2] queue_insert_before + * [2] queue_insert_after + * [2] queue_field + * [2] queue_remove + * [2] queue_remove_first + * [2] queue_remove_last + * [2] queue_assign + * [2] queue_new_head + * [2] queue_iterate * * Legend: - * [C] -> API common to both methods - * [1] -> API used only in method 1 (linkage chains) - * [2] -> API used only in method 2 (element chains) + * [C] -> API common to both methods + * [1] -> API used only in method 1 (linkage chains) + * [2] -> API used only in method 2 (element chains) */ /* @@ -194,26 +194,26 @@ __BEGIN_DECLS */ struct queue_entry { - struct queue_entry *next; /* next element */ - struct queue_entry *prev; /* previous element */ + struct queue_entry *next; /* next element */ + struct queue_entry *prev; /* previous element */ #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers * are 32-bit: * Since this type is so often cast to various 64-bit aligned types * aligning it to 64-bits will avoid -wcast-align without needing - * to disable it entirely. The impact on memory footprint should be + * to disable it entirely. The impact on memory footprint should be * negligible. */ -} __attribute__ ((aligned (8))); +} __attribute__ ((aligned(8))); #else }; #endif -typedef struct queue_entry *queue_t; -typedef struct queue_entry queue_head_t; -typedef struct queue_entry queue_chain_t; -typedef struct queue_entry *queue_entry_t; +typedef struct queue_entry *queue_t; +typedef struct queue_entry queue_head_t; +typedef struct queue_entry queue_chain_t; +typedef struct queue_entry *queue_entry_t; /* * enqueue puts "elt" on the "queue". @@ -221,31 +221,35 @@ typedef struct queue_entry *queue_entry_t; * remqueue removes the specified "elt" from its queue. */ -#define enqueue(queue,elt) enqueue_tail(queue, elt) -#define dequeue(queue) dequeue_head(queue) +#define enqueue(queue, elt) enqueue_tail(queue, elt) +#define dequeue(queue) dequeue_head(queue) #ifdef XNU_KERNEL_PRIVATE #include -static inline void __QUEUE_ELT_VALIDATE(queue_entry_t elt) { - queue_entry_t elt_next, elt_prev; - +static inline void +__QUEUE_ELT_VALIDATE(queue_entry_t elt) +{ + queue_entry_t elt_next, elt_prev; + if (__improbable(elt == (queue_entry_t)0)) { panic("Invalid queue element %p", elt); } - + elt_next = elt->next; elt_prev = elt->prev; - + if (__improbable(elt_next == (queue_entry_t)0 || elt_prev == (queue_entry_t)0)) { panic("Invalid queue element pointers for %p: next %p prev %p", elt, elt_next, elt_prev); } if (__improbable(elt_next->prev != elt || elt_prev->next != elt)) { - panic("Invalid queue element linkage for %p: next %p next->prev %p prev %p prev->next %p", - elt, elt_next, elt_next->prev, elt_prev, elt_prev->next); + panic("Invalid queue element linkage for %p: next %p next->prev %p prev %p prev->next %p", + elt, elt_next, elt_next->prev, elt_prev, elt_prev->next); } } -static inline void __DEQUEUE_ELT_CLEANUP(queue_entry_t elt) { +static inline void +__DEQUEUE_ELT_CLEANUP(queue_entry_t elt) +{ (elt)->next = (queue_entry_t) 0; (elt)->prev = (queue_entry_t) 0; } @@ -256,10 +260,10 @@ static inline void __DEQUEUE_ELT_CLEANUP(queue_entry_t elt) { static __inline__ void enqueue_head( - queue_t que, - queue_entry_t elt) + queue_t que, + queue_entry_t elt) { - queue_entry_t old_head; + queue_entry_t old_head; __QUEUE_ELT_VALIDATE((queue_entry_t)que); old_head = que->next; @@ -271,10 +275,10 @@ enqueue_head( static __inline__ void enqueue_tail( - queue_t que, - queue_entry_t elt) + queue_t que, + queue_entry_t elt) { - queue_entry_t old_tail; + queue_entry_t old_tail; __QUEUE_ELT_VALIDATE((queue_entry_t)que); old_tail = que->prev; @@ -286,10 +290,10 @@ enqueue_tail( static __inline__ queue_entry_t dequeue_head( - queue_t que) + queue_t que) { - queue_entry_t elt = (queue_entry_t) 0; - queue_entry_t new_head; + queue_entry_t elt = (queue_entry_t) 0; + queue_entry_t new_head; if (que->next != que) { elt = que->next; @@ -300,15 +304,15 @@ dequeue_head( __DEQUEUE_ELT_CLEANUP(elt); } - return (elt); + return elt; } static __inline__ queue_entry_t dequeue_tail( - queue_t que) + queue_t que) { - queue_entry_t elt = (queue_entry_t) 0; - queue_entry_t new_tail; + queue_entry_t elt = (queue_entry_t) 0; + queue_entry_t new_tail; if (que->prev != que) { elt = que->prev; @@ -319,14 +323,14 @@ dequeue_tail( __DEQUEUE_ELT_CLEANUP(elt); } - return (elt); + return elt; } static __inline__ void remqueue( - queue_entry_t elt) + queue_entry_t elt) { - queue_entry_t next_elt, prev_elt; + queue_entry_t next_elt, prev_elt; __QUEUE_ELT_VALIDATE(elt); next_elt = elt->next; @@ -338,10 +342,10 @@ remqueue( static __inline__ void insque( - queue_entry_t entry, - queue_entry_t pred) + queue_entry_t entry, + queue_entry_t pred) { - queue_entry_t successor; + queue_entry_t successor; __QUEUE_ELT_VALIDATE(pred); successor = pred->next; @@ -355,7 +359,7 @@ static __inline__ void remque( queue_entry_t elt) { - queue_entry_t next_elt, prev_elt; + queue_entry_t next_elt, prev_elt; __QUEUE_ELT_VALIDATE(elt); next_elt = elt->next; @@ -379,7 +383,7 @@ remque( static __inline__ void re_queue_head(queue_t que, queue_entry_t elt) { - queue_entry_t n_elt, p_elt; + queue_entry_t n_elt, p_elt; __QUEUE_ELT_VALIDATE(elt); __QUEUE_ELT_VALIDATE((queue_entry_t)que); @@ -412,7 +416,7 @@ re_queue_head(queue_t que, queue_entry_t elt) static __inline__ void re_queue_tail(queue_t que, queue_entry_t elt) { - queue_entry_t n_elt, p_elt; + queue_entry_t n_elt, p_elt; __QUEUE_ELT_VALIDATE(elt); __QUEUE_ELT_VALIDATE((queue_entry_t)que); @@ -445,7 +449,7 @@ re_queue_tail(queue_t que, queue_entry_t elt) * Note: * Do not use pointer types for */ -#define qe_element(qe, type, field) \ +#define qe_element(qe, type, field) \ ((type *)((void *)((char *)(qe) - __offsetof(type, field)))) /* @@ -537,7 +541,7 @@ re_queue_tail(queue_t que, queue_entry_t elt) queue_entry_t _tmp_entry = dequeue_head((head)); \ type *_tmp_element = (type*) NULL; \ if (_tmp_entry != (queue_entry_t) NULL) \ - _tmp_element = qe_element(_tmp_entry, type, field); \ + _tmp_element = qe_element(_tmp_entry, type, field); \ _tmp_element; \ }) @@ -546,7 +550,7 @@ re_queue_tail(queue_t que, queue_entry_t elt) queue_entry_t _tmp_entry = dequeue_tail((head)); \ type *_tmp_element = (type*) NULL; \ if (_tmp_entry != (queue_entry_t) NULL) \ - _tmp_element = qe_element(_tmp_entry, type, field); \ + _tmp_element = qe_element(_tmp_entry, type, field); \ _tmp_element; \ }) @@ -555,7 +559,7 @@ re_queue_tail(queue_t que, queue_entry_t elt) queue_entry_t _tmp_entry = queue_first((head)); \ type *_tmp_element = (type*) NULL; \ if (_tmp_entry != (queue_entry_t) head) \ - _tmp_element = qe_element(_tmp_entry, type, field); \ + _tmp_element = qe_element(_tmp_entry, type, field); \ _tmp_element; \ }) @@ -564,7 +568,7 @@ re_queue_tail(queue_t que, queue_entry_t elt) queue_entry_t _tmp_entry = queue_last((head)); \ type *_tmp_element = (type*) NULL; \ if (_tmp_entry != (queue_entry_t) head) \ - _tmp_element = qe_element(_tmp_entry, type, field); \ + _tmp_element = qe_element(_tmp_entry, type, field); \ _tmp_element; \ }) @@ -578,8 +582,8 @@ re_queue_tail(queue_t que, queue_entry_t elt) * void queue_init(q) * queue_t q; \* MODIFIED *\ */ -#define queue_init(q) \ -MACRO_BEGIN \ +#define queue_init(q) \ +MACRO_BEGIN \ (q)->next = (q);\ (q)->prev = (q);\ MACRO_END @@ -614,7 +618,7 @@ MACRO_END * queue_entry_t queue_first(q) * queue_t q; \* IN *\ */ -#define queue_first(q) ((q)->next) +#define queue_first(q) ((q)->next) /* * Macro: queue_next @@ -624,7 +628,7 @@ MACRO_END * queue_entry_t queue_next(qc) * queue_t qc; */ -#define queue_next(qc) ((qc)->next) +#define queue_next(qc) ((qc)->next) /* * Macro: queue_last @@ -634,7 +638,7 @@ MACRO_END * queue_entry_t queue_last(q) * queue_t q; \* IN *\ */ -#define queue_last(q) ((q)->prev) +#define queue_last(q) ((q)->prev) /* * Macro: queue_prev @@ -644,7 +648,7 @@ MACRO_END * queue_entry_t queue_prev(qc) * queue_t qc; */ -#define queue_prev(qc) ((qc)->prev) +#define queue_prev(qc) ((qc)->prev) /* * Macro: queue_end @@ -656,7 +660,7 @@ MACRO_END * queue_t q; * queue_entry_t qe; */ -#define queue_end(q, qe) ((q) == (qe)) +#define queue_end(q, qe) ((q) == (qe)) /* * Macro: queue_empty @@ -666,7 +670,7 @@ MACRO_END * boolean_t queue_empty(q) * queue_t q; */ -#define queue_empty(q) queue_end((q), queue_first(q)) +#define queue_empty(q) queue_end((q), queue_first(q)) /* * Function: movqueue @@ -686,7 +690,7 @@ MACRO_END static __inline__ void movqueue(queue_t _old, queue_t _new) { - queue_entry_t next_elt, prev_elt; + queue_entry_t next_elt, prev_elt; __QUEUE_ELT_VALIDATE((queue_entry_t)_old); @@ -739,22 +743,22 @@ movqueue(queue_t _old, queue_t _new) * could cause stackshot to trip over an inconsistent queue during * iteration. */ -#define queue_enter(head, elt, type, field) \ -MACRO_BEGIN \ - queue_entry_t __prev; \ - \ - __prev = (head)->prev; \ - (elt)->field.prev = __prev; \ - (elt)->field.next = head; \ - __compiler_barrier(); \ - if ((head) == __prev) { \ - (head)->next = (queue_entry_t) (elt); \ - } \ - else { \ - ((type)(void *)__prev)->field.next = \ - (queue_entry_t)(elt); \ - } \ - (head)->prev = (queue_entry_t) elt; \ +#define queue_enter(head, elt, type, field) \ +MACRO_BEGIN \ + queue_entry_t __prev; \ + \ + __prev = (head)->prev; \ + (elt)->field.prev = __prev; \ + (elt)->field.next = head; \ + __compiler_barrier(); \ + if ((head) == __prev) { \ + (head)->next = (queue_entry_t) (elt); \ + } \ + else { \ + ((type)(void *)__prev)->field.next = \ + (queue_entry_t)(elt); \ + } \ + (head)->prev = (queue_entry_t) elt; \ MACRO_END /* @@ -770,21 +774,21 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_enter_first(head, elt, type, field) \ -MACRO_BEGIN \ - queue_entry_t __next; \ - \ - __next = (head)->next; \ - if ((head) == __next) { \ - (head)->prev = (queue_entry_t) (elt); \ - } \ - else { \ - ((type)(void *)__next)->field.prev = \ - (queue_entry_t)(elt); \ - } \ - (elt)->field.next = __next; \ - (elt)->field.prev = head; \ - (head)->next = (queue_entry_t) elt; \ +#define queue_enter_first(head, elt, type, field) \ +MACRO_BEGIN \ + queue_entry_t __next; \ + \ + __next = (head)->next; \ + if ((head) == __next) { \ + (head)->prev = (queue_entry_t) (elt); \ + } \ + else { \ + ((type)(void *)__next)->field.prev = \ + (queue_entry_t)(elt); \ + } \ + (elt)->field.next = __next; \ + (elt)->field.prev = head; \ + (head)->next = (queue_entry_t) elt; \ MACRO_END /* @@ -801,34 +805,34 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_insert_before(head, elt, cur, type, field) \ -MACRO_BEGIN \ - queue_entry_t __prev; \ - \ - if ((head) == (queue_entry_t)(cur)) { \ - (elt)->field.next = (head); \ - if ((head)->next == (head)) { /* only element */ \ - (elt)->field.prev = (head); \ - (head)->next = (queue_entry_t)(elt); \ - } else { /* last element */ \ - __prev = (elt)->field.prev = (head)->prev; \ - ((type)(void *)__prev)->field.next = \ - (queue_entry_t)(elt); \ - } \ - (head)->prev = (queue_entry_t)(elt); \ - } else { \ - (elt)->field.next = (queue_entry_t)(cur); \ - if ((head)->next == (queue_entry_t)(cur)) { \ - /* first element */ \ - (elt)->field.prev = (head); \ - (head)->next = (queue_entry_t)(elt); \ - } else { /* middle element */ \ - __prev = (elt)->field.prev = (cur)->field.prev; \ - ((type)(void *)__prev)->field.next = \ - (queue_entry_t)(elt); \ - } \ - (cur)->field.prev = (queue_entry_t)(elt); \ - } \ +#define queue_insert_before(head, elt, cur, type, field) \ +MACRO_BEGIN \ + queue_entry_t __prev; \ + \ + if ((head) == (queue_entry_t)(cur)) { \ + (elt)->field.next = (head); \ + if ((head)->next == (head)) { /* only element */ \ + (elt)->field.prev = (head); \ + (head)->next = (queue_entry_t)(elt); \ + } else { /* last element */ \ + __prev = (elt)->field.prev = (head)->prev; \ + ((type)(void *)__prev)->field.next = \ + (queue_entry_t)(elt); \ + } \ + (head)->prev = (queue_entry_t)(elt); \ + } else { \ + (elt)->field.next = (queue_entry_t)(cur); \ + if ((head)->next == (queue_entry_t)(cur)) { \ + /* first element */ \ + (elt)->field.prev = (head); \ + (head)->next = (queue_entry_t)(elt); \ + } else { /* middle element */ \ + __prev = (elt)->field.prev = (cur)->field.prev; \ + ((type)(void *)__prev)->field.next = \ + (queue_entry_t)(elt); \ + } \ + (cur)->field.prev = (queue_entry_t)(elt); \ + } \ MACRO_END /* @@ -845,34 +849,34 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_insert_after(head, elt, cur, type, field) \ -MACRO_BEGIN \ - queue_entry_t __next; \ - \ - if ((head) == (queue_entry_t)(cur)) { \ - (elt)->field.prev = (head); \ - if ((head)->next == (head)) { /* only element */ \ - (elt)->field.next = (head); \ - (head)->prev = (queue_entry_t)(elt); \ - } else { /* first element */ \ - __next = (elt)->field.next = (head)->next; \ - ((type)(void *)__next)->field.prev = \ - (queue_entry_t)(elt); \ - } \ - (head)->next = (queue_entry_t)(elt); \ - } else { \ - (elt)->field.prev = (queue_entry_t)(cur); \ - if ((head)->prev == (queue_entry_t)(cur)) { \ - /* last element */ \ - (elt)->field.next = (head); \ - (head)->prev = (queue_entry_t)(elt); \ - } else { /* middle element */ \ - __next = (elt)->field.next = (cur)->field.next; \ - ((type)(void *)__next)->field.prev = \ - (queue_entry_t)(elt); \ - } \ - (cur)->field.next = (queue_entry_t)(elt); \ - } \ +#define queue_insert_after(head, elt, cur, type, field) \ +MACRO_BEGIN \ + queue_entry_t __next; \ + \ + if ((head) == (queue_entry_t)(cur)) { \ + (elt)->field.prev = (head); \ + if ((head)->next == (head)) { /* only element */ \ + (elt)->field.next = (head); \ + (head)->prev = (queue_entry_t)(elt); \ + } else { /* first element */ \ + __next = (elt)->field.next = (head)->next; \ + ((type)(void *)__next)->field.prev = \ + (queue_entry_t)(elt); \ + } \ + (head)->next = (queue_entry_t)(elt); \ + } else { \ + (elt)->field.prev = (queue_entry_t)(cur); \ + if ((head)->prev == (queue_entry_t)(cur)) { \ + /* last element */ \ + (elt)->field.next = (head); \ + (head)->prev = (queue_entry_t)(elt); \ + } else { /* middle element */ \ + __next = (elt)->field.next = (cur)->field.next; \ + ((type)(void *)__next)->field.prev = \ + (queue_entry_t)(elt); \ + } \ + (cur)->field.next = (queue_entry_t)(elt); \ + } \ MACRO_END /* @@ -883,8 +887,8 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_field(head, thing, type, field) \ - (((head) == (thing)) ? (head) : &((type)(void *)(thing))->field) +#define queue_field(head, thing, type, field) \ + (((head) == (thing)) ? (head) : &((type)(void *)(thing))->field) /* * Macro: queue_remove @@ -896,25 +900,25 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_remove(head, elt, type, field) \ -MACRO_BEGIN \ - queue_entry_t __next, __prev; \ - \ - __next = (elt)->field.next; \ - __prev = (elt)->field.prev; \ - \ - if ((head) == __next) \ - (head)->prev = __prev; \ - else \ - ((type)(void *)__next)->field.prev = __prev; \ - \ - if ((head) == __prev) \ - (head)->next = __next; \ - else \ - ((type)(void *)__prev)->field.next = __next; \ - \ - (elt)->field.next = NULL; \ - (elt)->field.prev = NULL; \ +#define queue_remove(head, elt, type, field) \ +MACRO_BEGIN \ + queue_entry_t __next, __prev; \ + \ + __next = (elt)->field.next; \ + __prev = (elt)->field.prev; \ + \ + if ((head) == __next) \ + (head)->prev = __prev; \ + else \ + ((type)(void *)__next)->field.prev = __prev; \ + \ + if ((head) == __prev) \ + (head)->next = __next; \ + else \ + ((type)(void *)__prev)->field.next = __next; \ + \ + (elt)->field.next = NULL; \ + (elt)->field.prev = NULL; \ MACRO_END /* @@ -928,21 +932,21 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_remove_first(head, entry, type, field) \ -MACRO_BEGIN \ - queue_entry_t __next; \ - \ - (entry) = (type)(void *) ((head)->next); \ - __next = (entry)->field.next; \ - \ - if ((head) == __next) \ - (head)->prev = (head); \ - else \ - ((type)(void *)(__next))->field.prev = (head); \ - (head)->next = __next; \ - \ - (entry)->field.next = NULL; \ - (entry)->field.prev = NULL; \ +#define queue_remove_first(head, entry, type, field) \ +MACRO_BEGIN \ + queue_entry_t __next; \ + \ + (entry) = (type)(void *) ((head)->next); \ + __next = (entry)->field.next; \ + \ + if ((head) == __next) \ + (head)->prev = (head); \ + else \ + ((type)(void *)(__next))->field.prev = (head); \ + (head)->next = __next; \ + \ + (entry)->field.next = NULL; \ + (entry)->field.prev = NULL; \ MACRO_END /* @@ -956,21 +960,21 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_remove_last(head, entry, type, field) \ -MACRO_BEGIN \ - queue_entry_t __prev; \ - \ - (entry) = (type)(void *) ((head)->prev); \ - __prev = (entry)->field.prev; \ - \ - if ((head) == __prev) \ - (head)->next = (head); \ - else \ - ((type)(void *)(__prev))->field.next = (head); \ - (head)->prev = __prev; \ - \ - (entry)->field.next = NULL; \ - (entry)->field.prev = NULL; \ +#define queue_remove_last(head, entry, type, field) \ +MACRO_BEGIN \ + queue_entry_t __prev; \ + \ + (entry) = (type)(void *) ((head)->prev); \ + __prev = (entry)->field.prev; \ + \ + if ((head) == __prev) \ + (head)->next = (head); \ + else \ + ((type)(void *)(__prev))->field.next = (head); \ + (head)->prev = __prev; \ + \ + (entry)->field.next = NULL; \ + (entry)->field.prev = NULL; \ MACRO_END /* @@ -978,11 +982,11 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_assign(to, from, type, field) \ -MACRO_BEGIN \ - ((type)(void *)((from)->prev))->field.next = (to); \ - ((type)(void *)((from)->next))->field.prev = (to); \ - *to = *from; \ +#define queue_assign(to, from, type, field) \ +MACRO_BEGIN \ + ((type)(void *)((from)->prev))->field.next = (to); \ + ((type)(void *)((from)->next))->field.prev = (to); \ + *to = *from; \ MACRO_END /* @@ -998,17 +1002,17 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_new_head(old, new, type, field) \ -MACRO_BEGIN \ - if (!queue_empty(old)) { \ - *(new) = *(old); \ - ((type)(void *)((new)->next))->field.prev = \ - (new); \ - ((type)(void *)((new)->prev))->field.next = \ - (new); \ - } else { \ - queue_init(new); \ - } \ +#define queue_new_head(old, new, type, field) \ +MACRO_BEGIN \ + if (!queue_empty(old)) { \ + *(new) = *(old); \ + ((type)(void *)((new)->next))->field.prev = \ + (new); \ + ((type)(void *)((new)->prev))->field.next = \ + (new); \ + } else { \ + queue_init(new); \ + } \ MACRO_END /* @@ -1026,78 +1030,12 @@ MACRO_END * Note: * This should only be used with Method 2 queue iteration (element chains) */ -#define queue_iterate(head, elt, type, field) \ - for ((elt) = (type)(void *) queue_first(head); \ - !queue_end((head), (queue_entry_t)(elt)); \ +#define queue_iterate(head, elt, type, field) \ + for ((elt) = (type)(void *) queue_first(head); \ + !queue_end((head), (queue_entry_t)(elt)); \ (elt) = (type)(void *) queue_next(&(elt)->field)) -#ifdef MACH_KERNEL_PRIVATE - -#include - -/*----------------------------------------------------------------*/ -/* - * Define macros for queues with locks. - */ -struct mpqueue_head { - struct queue_entry head; /* header for queue */ - uint64_t earliest_soft_deadline; - uint64_t count; - lck_mtx_t lock_data; -#if defined(__i386__) || defined(__x86_64__) - lck_mtx_ext_t lock_data_ext; -#endif -}; - -typedef struct mpqueue_head mpqueue_head_t; - -#define round_mpq(size) (size) - - -#if defined(__i386__) || defined(__x86_64__) - -#define mpqueue_init(q, lck_grp, lck_attr) \ -MACRO_BEGIN \ - queue_init(&(q)->head); \ - lck_mtx_init_ext(&(q)->lock_data, \ - &(q)->lock_data_ext, \ - lck_grp, \ - lck_attr); \ - (q)->earliest_soft_deadline = UINT64_MAX; \ - (q)->count = 0; \ -MACRO_END - -#else - -#define mpqueue_init(q, lck_grp, lck_attr) \ -MACRO_BEGIN \ - queue_init(&(q)->head); \ - lck_mtx_init(&(q)->lock_data, \ - lck_grp, \ - lck_attr); \ -MACRO_END -#endif - - -#define mpenqueue_tail(q, elt) \ -MACRO_BEGIN \ - lck_mtx_lock_spin_always(&(q)->lock_data); \ - enqueue_tail(&(q)->head, elt); \ - lck_mtx_unlock_always(&(q)->lock_data); \ -MACRO_END - -#define mpdequeue_head(q, elt) \ -MACRO_BEGIN \ - lck_mtx_lock_spin_always(&(q)->lock_data); \ - if (queue_empty(&(q)->head)) \ - *(elt) = 0; \ - else \ - *(elt) = dequeue_head(&(q)->head); \ - lck_mtx_unlock_always(&(q)->lock_data); \ -MACRO_END - -#endif /* MACH_KERNEL_PRIVATE */ __END_DECLS -#endif /* _KERN_QUEUE_H_ */ +#endif /* _KERN_QUEUE_H_ */ diff --git a/osfmk/kern/remote_time.c b/osfmk/kern/remote_time.c new file mode 100644 index 000000000..11022ed4d --- /dev/null +++ b/osfmk/kern/remote_time.c @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if CONFIG_MACH_BRIDGE_SEND_TIME + +uint32_t bt_enable_flag = 0; +lck_spin_t *bt_maintenance_lock = NULL; +_Atomic uint32_t bt_init_flag = 0; + +void mach_bridge_timer_maintenance(void); +void mach_bridge_timer_init(void); +uint32_t mach_bridge_timer_enable(uint32_t new_value, int change); + +/* + * When CONFIG_MACH_BRIDGE_SEND_TIME is defined, it is expected + * that a machine-specific timestamp sending routine such as + * void mach_bridge_send_timestamp(uint64_t); has also been defined. + */ +extern void mach_bridge_send_timestamp(uint64_t); + +void +mach_bridge_timer_maintenance() +{ + if (!bt_init_flag) { + return; + } + + lck_spin_lock(bt_maintenance_lock); + if (!bt_enable_flag) { + goto done; + } + mach_bridge_send_timestamp(0); + +done: + lck_spin_unlock(bt_maintenance_lock); +} + +/* + * This function should be called only once from the callback + * registration function + */ +void +mach_bridge_timer_init(void) +{ + assert(!bt_init_flag); + /* Initialize the lock */ + static lck_grp_t *bt_lck_grp = NULL; + bt_lck_grp = lck_grp_alloc_init("bridgetimestamp", LCK_GRP_ATTR_NULL); + bt_maintenance_lock = lck_spin_alloc_init(bt_lck_grp, NULL); +} + +/* + * If change = 0, return the current value of bridge_timer_enable + * If change = 1, update bridge_timer_enable and return the updated + * value + */ +uint32_t +mach_bridge_timer_enable(uint32_t new_value, int change) +{ + uint32_t current_value = 0; + assert(bt_init_flag == 1); + lck_spin_lock(bt_maintenance_lock); + if (change) { + bt_enable_flag = new_value; + } + current_value = bt_enable_flag; + lck_spin_unlock(bt_maintenance_lock); + return current_value; +} + +#endif /* CONFIG_MACH_BRIDGE_SEND_TIME */ + +#if CONFIG_MACH_BRIDGE_RECV_TIME +#include + +/* + * functions used by machine-specific code + * that implements CONFIG_MACH_BRIDGE_RECV_TIME + */ +void mach_bridge_add_timestamp(uint64_t remote_timestamp, uint64_t local_timestamp); +void bt_calibration_thread_start(void); +lck_spin_t *ts_conversion_lock = NULL; + +/* function called by sysctl */ +struct bt_params bt_params_get_latest(void); + +/* + * Platform specific bridge time receiving interface. + * These variables should be exported by the platform specific time receiving code. + */ +extern lck_spin_t *bt_spin_lock; +extern _Atomic uint32_t bt_init_flag; + +static uint64_t received_local_timestamp = 0; +static uint64_t received_remote_timestamp = 0; +/* + * Buffer the previous timestamp pairs and rate + * It is protected by the ts_conversion_lock + */ +#define BT_PARAMS_COUNT 10 +static struct bt_params bt_params_hist[BT_PARAMS_COUNT] = {}; +static int bt_params_idx = -1; + +static inline void +bt_params_add(struct bt_params *params) +{ + lck_spin_assert(ts_conversion_lock, LCK_ASSERT_OWNED); + + bt_params_idx = (bt_params_idx + 1) % BT_PARAMS_COUNT; + bt_params_hist[bt_params_idx] = *params; +} + +static inline struct bt_params* +bt_params_find(uint64_t local_ts) +{ + lck_spin_assert(ts_conversion_lock, LCK_ASSERT_OWNED); + + int idx = bt_params_idx; + if (idx < 0) { + return NULL; + } + do { + if (local_ts >= bt_params_hist[idx].base_local_ts) { + return &bt_params_hist[idx]; + } + if (--idx < 0) { + idx = BT_PARAMS_COUNT - 1; + } + } while (idx != bt_params_idx); + + return NULL; +} + +struct bt_params +bt_params_get_latest(void) +{ + struct bt_params latest_params = {}; + + /* Check if ts_converison_lock has been initialized */ + if (atomic_load(&bt_init_flag)) { + lck_spin_lock(ts_conversion_lock); + if (bt_params_idx >= 0) { + latest_params = bt_params_hist[bt_params_idx]; + } + lck_spin_unlock(ts_conversion_lock); + } + return latest_params; +} + +/* + * Conditions: bt_spin_lock held and called from primary interrupt context + */ +void +mach_bridge_add_timestamp(uint64_t remote_timestamp, uint64_t local_timestamp) +{ + lck_spin_assert(bt_spin_lock, LCK_ASSERT_OWNED); + + /* sleep/wake might return the same mach_absolute_time as the previous timestamp pair */ + if ((received_local_timestamp == local_timestamp) || + (received_remote_timestamp == remote_timestamp)) { + return; + } + + received_local_timestamp = local_timestamp; + received_remote_timestamp = remote_timestamp; + thread_wakeup((event_t)bt_params_hist); +} + +static double +mach_bridge_compute_rate(uint64_t new_local_ts, uint64_t new_remote_ts, + uint64_t old_local_ts, uint64_t old_remote_ts) +{ + int64_t rdiff = (int64_t)new_remote_ts - (int64_t)old_remote_ts; + int64_t ldiff = (int64_t)new_local_ts - (int64_t)old_local_ts; + double calc_rate = ((double)rdiff) / ldiff; + return calc_rate; +} + +#define MAX_RECALCULATE_COUNT 8 +#define CUMULATIVE_RATE_DECAY_CONSTANT 0.01 +#define CUMULATIVE_RATE_WEIGHT 0.99 +#define INITIAL_RATE 1.0 +#define MIN_INITIAL_SAMPLE_COUNT 10 +#define MAX_INITIAL_SAMPLE_COUNT 50 +#define MAX_SKIP_RESET_COUNT 2 +#define MIN_LOCAL_TS_DISTANCE_NS 100000000 /* 100 ms */ +#define MAX_LOCAL_TS_DISTANCE_NS 350000000 /* 350 ms */ +#define TS_PAIR_MISMATCH_THRESHOLD_NS 50000000 /* 50 ms */ +#define MAX_TS_PAIR_MISMATCHES 5 +#define MAX_TS_PAIR_MISMATCH_RESET_COUNT 3 +#define MIN_OBSERVED_RATE 0.8 +#define MAX_OBSERVED_RATE 1.2 + +static void +bt_calibration_thread(void) +{ + static uint64_t prev_local_ts = 0, prev_remote_ts = 0, curr_local_ts = 0, curr_remote_ts = 0; + static uint64_t prev_received_local_ts = 0, prev_received_remote_ts = 0; + static double cumulative_rate = INITIAL_RATE; + static uint32_t initial_sample_count = 1; + static uint32_t max_initial_sample_count = MAX_INITIAL_SAMPLE_COUNT; + static uint32_t skip_reset_count = MAX_SKIP_RESET_COUNT; + int recalculate_count = 1; + static bool reset = false; + bool sleep = false; + static bool skip_rcv_ts = false; + static uint64_t ts_pair_mismatch = 0; + static uint32_t ts_pair_mismatch_reset_count = 0; + spl_t s = splsched(); + lck_spin_lock(bt_spin_lock); + if (!received_remote_timestamp) { + if (PE_parse_boot_argn("rt_ini_count", &max_initial_sample_count, + sizeof(uint32_t)) == TRUE) { + if (max_initial_sample_count < MIN_INITIAL_SAMPLE_COUNT) { + max_initial_sample_count = MIN_INITIAL_SAMPLE_COUNT; + } + } + /* Nothing to do the first time */ + goto block; + } + /* + * The values in bt_params are recalculated every time a new timestamp + * pair is received. Firstly, both timestamps are converted to nanoseconds. + * The current and previous timestamp pairs are used to compute the + * observed_rate of the two clocks w.r.t each other. For the first + * MIN_INITIAL_SAMPLE_COUNT number of pairs, the cumulative_rate is a simple + * average of the observed_rate. For the later pairs, the cumulative_rate + * is updated using exponential moving average of the observed_rate. + * The current and bt_params' base timestamp pairs are used to compute + * the rate_from_base. This value ensures that the bt_params base + * timestamp pair curve doesn't stay parallel to the observed timestamp + * pair curve, rather moves in the direction of the observed timestamp curve. + * The bt_params.rate is computed as a weighted average of the cumulative_rate + * and the rate_from_base. For each current local timestamp, the remote_time + * is predicted using the previous values of bt_params. After computing the new + * bt_params.rate, bt_params.base_remote_time is set to this predicted value + * and bt_params.base_local_time is set to the current local timestamp. + */ +recalculate: + assertf(recalculate_count <= MAX_RECALCULATE_COUNT, "bt_caliberation_thread: recalculate \ + invocation exceeds MAX_RECALCULATE_COUNT"); + + if ((received_remote_timestamp == BT_RESET_SENTINEL_TS) || (received_remote_timestamp == BT_WAKE_SENTINEL_TS)) { + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RESET_TS), received_local_timestamp, received_remote_timestamp, 1); + reset = true; + skip_reset_count = MAX_SKIP_RESET_COUNT; + ts_pair_mismatch_reset_count = 0; + goto block; + } else if (received_remote_timestamp == BT_SLEEP_SENTINEL_TS) { + sleep = true; + } else if (!received_local_timestamp) { + /* If the local timestamp isn't accurately captured, the received value will be ignored */ + skip_rcv_ts = true; + goto block; + } + + /* Keep a copy of the prev timestamps to compute distance */ + prev_received_local_ts = curr_local_ts; + prev_received_remote_ts = curr_remote_ts; + + uint64_t curr_local_abs = received_local_timestamp; + absolutetime_to_nanoseconds(curr_local_abs, &curr_local_ts); + curr_remote_ts = received_remote_timestamp; + + /* Prevent unusual rate changes caused by delayed timestamps */ + uint64_t local_diff = curr_local_ts - prev_received_local_ts; + if (!(reset || sleep) && ((local_diff < MIN_LOCAL_TS_DISTANCE_NS) || + (!skip_rcv_ts && (local_diff > MAX_LOCAL_TS_DISTANCE_NS)))) { + /* Skip the current timestamp */ + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_SKIP_TS), curr_local_ts, curr_remote_ts, + prev_received_local_ts); + goto block; + } else { + skip_rcv_ts = false; + /* Use the prev copy of timestamps only if the distance is acceptable */ + prev_local_ts = prev_received_local_ts; + prev_remote_ts = prev_received_remote_ts; + } + lck_spin_unlock(bt_spin_lock); + splx(s); + + struct bt_params bt_params = {}; + + lck_spin_lock(ts_conversion_lock); + if (reset) { + if (skip_reset_count > 0) { + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_SKIP_TS), curr_local_ts, curr_remote_ts, + prev_local_ts, skip_reset_count); + skip_reset_count--; + goto skip_reset; + } + bt_params.base_local_ts = curr_local_ts; + bt_params.base_remote_ts = curr_remote_ts; + bt_params.rate = cumulative_rate; + prev_local_ts = 0; + prev_remote_ts = 0; + ts_pair_mismatch = 0; + initial_sample_count = 1; + reset = false; + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RESET_TS), curr_local_ts, curr_remote_ts, 2); + } else if (sleep) { + absolutetime_to_nanoseconds(mach_absolute_time(), &bt_params.base_local_ts); + bt_params.base_remote_ts = 0; + bt_params.rate = 0; + sleep = false; + } else { + struct bt_params bt_params_snapshot = {}; + if (bt_params_idx >= 0) { + bt_params_snapshot = bt_params_hist[bt_params_idx]; + } + lck_spin_unlock(ts_conversion_lock); + if (bt_params_snapshot.rate == 0.0) { + /* + * The rate should never be 0 because we always expect a reset/wake + * sentinel after sleep, followed by valid timestamp pair data that + * will be handled by the reset clause (above). However, we should + * not rely on a paired version of the remote OS - we could actually + * be running a completely different OS! Treat a timestamp after + * a sleep as a reset condition. + */ + reset = true; + skip_reset_count = MAX_SKIP_RESET_COUNT; + ts_pair_mismatch_reset_count = 0; + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RESET_TS), curr_local_ts, curr_remote_ts, 3); + s = splsched(); + lck_spin_lock(bt_spin_lock); + goto block; + } + + /* Check if the predicted remote timestamp is within the expected current remote timestamp range */ + uint64_t pred_remote_ts = mach_bridge_compute_timestamp(curr_local_ts, &bt_params_snapshot); + uint64_t diff = 0; + if (initial_sample_count >= max_initial_sample_count) { + if (pred_remote_ts > curr_remote_ts) { + diff = pred_remote_ts - curr_remote_ts; + } else { + diff = curr_remote_ts - pred_remote_ts; + } + if (diff > TS_PAIR_MISMATCH_THRESHOLD_NS) { + ts_pair_mismatch++; + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_TS_MISMATCH), curr_local_ts, + curr_remote_ts, pred_remote_ts, ts_pair_mismatch); + } else { + ts_pair_mismatch = 0; + } + if (ts_pair_mismatch > MAX_TS_PAIR_MISMATCHES) { +#if (DEVELOPMENT || DEBUG) + if (ts_pair_mismatch_reset_count == MAX_TS_PAIR_MISMATCH_RESET_COUNT) { + panic("remote_time: timestamp pair mismatch exceeded limit"); + } +#endif /* (DEVELOPMENT || DEBUG) */ + reset = true; + ts_pair_mismatch_reset_count++; + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_RESET_TS), curr_local_ts, curr_remote_ts, 4); + s = splsched(); + lck_spin_lock(bt_spin_lock); + goto block; + } + } + double observed_rate, rate_from_base, new_rate; + observed_rate = mach_bridge_compute_rate(curr_local_ts, curr_remote_ts, prev_local_ts, prev_remote_ts); + /* Log bad observed rates and skip the timestamp pair */ + if ((observed_rate < MIN_OBSERVED_RATE) || (observed_rate > MAX_OBSERVED_RATE)) { + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_OBSV_RATE), *(uint64_t *)((void *)&observed_rate)); + ts_pair_mismatch = ts_pair_mismatch > 0 ? (ts_pair_mismatch - 1) : 0; + s = splsched(); + lck_spin_lock(bt_spin_lock); + goto block; + } + if (initial_sample_count <= MIN_INITIAL_SAMPLE_COUNT) { + initial_sample_count++; + cumulative_rate = cumulative_rate + (observed_rate - cumulative_rate) / initial_sample_count; + } else { + if (initial_sample_count < max_initial_sample_count) { + initial_sample_count++; + } + cumulative_rate = cumulative_rate + CUMULATIVE_RATE_DECAY_CONSTANT * (observed_rate - cumulative_rate); + } + rate_from_base = mach_bridge_compute_rate(curr_local_ts, curr_remote_ts, bt_params_snapshot.base_local_ts, + bt_params_snapshot.base_remote_ts); + new_rate = CUMULATIVE_RATE_WEIGHT * cumulative_rate + (1 - CUMULATIVE_RATE_WEIGHT) * rate_from_base; + /* + * Acquire the lock first to ensure that bt_params.base_local_ts is always + * greater than the last value of now captured by mach_bridge_remote_time. + * This ensures that we always use the same parameters to compute remote + * timestamp for a given local timestamp. + */ + lck_spin_lock(ts_conversion_lock); + absolutetime_to_nanoseconds(mach_absolute_time(), &bt_params.base_local_ts); + bt_params.base_remote_ts = mach_bridge_compute_timestamp(bt_params.base_local_ts, &bt_params_snapshot); + bt_params.rate = new_rate; + } + bt_params_add(&bt_params); + commpage_set_remotetime_params(bt_params.rate, bt_params.base_local_ts, bt_params.base_remote_ts); + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_TS_PARAMS), bt_params.base_local_ts, + bt_params.base_remote_ts, *(uint64_t *)((void *)&bt_params.rate)); + +skip_reset: + lck_spin_unlock(ts_conversion_lock); + + s = splsched(); + lck_spin_lock(bt_spin_lock); + /* Check if a new timestamp pair was received */ + if (received_local_timestamp != curr_local_abs) { + recalculate_count++; + goto recalculate; + } +block: + assert_wait((event_t)bt_params_hist, THREAD_UNINT); + lck_spin_unlock(bt_spin_lock); + splx(s); + thread_block((thread_continue_t)bt_calibration_thread); +} + +void +bt_calibration_thread_start(void) +{ + thread_t thread; + kern_return_t result = kernel_thread_start_priority((thread_continue_t)bt_calibration_thread, + NULL, BASEPRI_KERNEL, &thread); + if (result != KERN_SUCCESS) { + panic("mach_bridge_add_timestamp: thread_timestamp_calibration"); + } + thread_deallocate(thread); +} + +#endif /* CONFIG_MACH_BRIDGE_RECV_TIME */ + +/** + * mach_bridge_remote_time + * + * This function is used to predict the remote CPU's clock time, given + * the local time. + * + * If local_timestamp = 0, then the remote_timestamp is calculated + * corresponding to the current mach_absolute_time. Monotonicity of + * predicted time is guaranteed only for recent local_timestamp values + * lesser than the current mach_absolute_time upto 1 second. + * + * If CONFIG_MACH_BRIDGE_SEND_TIME is true, then the function is compiled + * for the remote CPU. If CONFIG_MACH_BRIDGE_RECV_TIME is true, then the + * the function is compiled for the local CPU. Both config options cannot + * be true simultaneously. + */ +uint64_t +mach_bridge_remote_time(uint64_t local_timestamp) +{ +#if defined(CONFIG_MACH_BRIDGE_SEND_TIME) +#if !defined(CONFIG_MACH_BRIDGE_RECV_TIME) + /* only send side of the bridge is defined: no translation needed */ + if (!local_timestamp) { + return mach_absolute_time(); + } + return 0; +#else +#error "You cannot define both sides of the bridge!" +#endif /* !defined(CONFIG_MACH_BRIDGE_RECV_TIME) */ +#else +#if !defined(CONFIG_MACH_BRIDGE_RECV_TIME) + /* neither the send or receive side of the bridge is defined: echo the input */ + return local_timestamp; +#else + if (!atomic_load(&bt_init_flag)) { + return 0; + } + + lck_spin_lock(ts_conversion_lock); + uint64_t now = mach_absolute_time(); + + uint64_t remote_timestamp = 0; + uint64_t local_timestamp_ns = 0; + if (!local_timestamp) { + local_timestamp = now; + } else if (local_timestamp > now) { + goto out_unlock; + } + absolutetime_to_nanoseconds(local_timestamp, &local_timestamp_ns); + struct bt_params *params = bt_params_find(local_timestamp_ns); + remote_timestamp = mach_bridge_compute_timestamp(local_timestamp_ns, params); + +out_unlock: + lck_spin_unlock(ts_conversion_lock); + KDBG(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_BRIDGE_REMOTE_TIME), local_timestamp, remote_timestamp, now); + return remote_timestamp; +#endif /* !defined(CONFIG_MACH_BRIDGE_RECV_TIME) */ +#endif /* defined(CONFIG_MACH_BRIDGE_SEND_TIME) */ +} diff --git a/osfmk/kern/remote_time.h b/osfmk/kern/remote_time.h new file mode 100644 index 000000000..dc1d04a61 --- /dev/null +++ b/osfmk/kern/remote_time.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef REMOTE_TIME_H +#define REMOTE_TIME_H + +#include +#include +#include + +__BEGIN_DECLS +/* bt_params is an ABI for tracing tools */ +struct bt_params { + double rate; + uint64_t base_local_ts; + uint64_t base_remote_ts; +}; + +/* local_ts_ns should be in nanoseconds */ +static inline uint64_t +mach_bridge_compute_timestamp(uint64_t local_ts_ns, struct bt_params *params) +{ + if (!params || params->rate == 0.0) { + return 0; + } + /* + * Formula to compute remote_timestamp + * remote_timestamp = (bt_params.rate * (local_ts_ns - bt_params.base_local_ts)) + * + bt_params.base_remote_ts + */ + int64_t remote_ts = 0; + int64_t rate_prod = 0; + rate_prod = (int64_t)(params->rate * (double)((int64_t)local_ts_ns - (int64_t)params->base_local_ts)); + if (os_add_overflow((int64_t)params->base_remote_ts, rate_prod, &remote_ts)) { + return 0; + } + + return (uint64_t)remote_ts; +} + +uint64_t mach_bridge_remote_time(uint64_t); +__END_DECLS + +#endif /* REMOTE_TIME_H */ diff --git a/osfmk/kern/sched.h b/osfmk/kern/sched.h index 43211cb9a..f3c1c88a2 100644 --- a/osfmk/kern/sched.h +++ b/osfmk/kern/sched.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,7 +64,7 @@ * */ -#ifndef _KERN_SCHED_H_ +#ifndef _KERN_SCHED_H_ #define _KERN_SCHED_H_ #include @@ -77,12 +77,12 @@ #include #include -#define NRQS 128 /* 128 levels per run queue */ +#define NRQS 128 /* 128 levels per run queue */ -#define MAXPRI (NRQS-1) -#define MINPRI 0 /* lowest legal priority schedulable */ -#define IDLEPRI MINPRI /* idle thread priority */ -#define NOPRI -1 +#define MAXPRI (NRQS-1) +#define MINPRI 0 /* lowest legal priority schedulable */ +#define IDLEPRI MINPRI /* idle thread priority */ +#define NOPRI -1 /* * High-level priority assignments @@ -141,31 +141,31 @@ ************************************************************************* */ -#define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1) /* 97 */ -#define BASEPRI_REALTIME (MAXPRI - (NRQS / 4) + 1) /* 96 */ +#define BASEPRI_RTQUEUES (BASEPRI_REALTIME + 1) /* 97 */ +#define BASEPRI_REALTIME (MAXPRI - (NRQS / 4) + 1) /* 96 */ -#define MAXPRI_KERNEL (BASEPRI_REALTIME - 1) /* 95 */ -#define BASEPRI_PREEMPT_HIGH (BASEPRI_PREEMPT + 1) /* 93 */ -#define BASEPRI_PREEMPT (MAXPRI_KERNEL - 3) /* 92 */ -#define BASEPRI_VM (BASEPRI_PREEMPT - 1) /* 91 */ +#define MAXPRI_KERNEL (BASEPRI_REALTIME - 1) /* 95 */ +#define BASEPRI_PREEMPT_HIGH (BASEPRI_PREEMPT + 1) /* 93 */ +#define BASEPRI_PREEMPT (MAXPRI_KERNEL - 3) /* 92 */ +#define BASEPRI_VM (BASEPRI_PREEMPT - 1) /* 91 */ -#define BASEPRI_KERNEL (MINPRI_KERNEL + 1) /* 81 */ -#define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS / 8) + 1) /* 80 */ +#define BASEPRI_KERNEL (MINPRI_KERNEL + 1) /* 81 */ +#define MINPRI_KERNEL (MAXPRI_KERNEL - (NRQS / 8) + 1) /* 80 */ -#define MAXPRI_RESERVED (MINPRI_KERNEL - 1) /* 79 */ -#define BASEPRI_GRAPHICS (MAXPRI_RESERVED - 3) /* 76 */ -#define MINPRI_RESERVED (MAXPRI_RESERVED - (NRQS / 8) + 1) /* 64 */ +#define MAXPRI_RESERVED (MINPRI_KERNEL - 1) /* 79 */ +#define BASEPRI_GRAPHICS (MAXPRI_RESERVED - 3) /* 76 */ +#define MINPRI_RESERVED (MAXPRI_RESERVED - (NRQS / 8) + 1) /* 64 */ -#define MAXPRI_USER (MINPRI_RESERVED - 1) /* 63 */ -#define BASEPRI_CONTROL (BASEPRI_DEFAULT + 17) /* 48 */ -#define BASEPRI_FOREGROUND (BASEPRI_DEFAULT + 16) /* 47 */ -#define BASEPRI_BACKGROUND (BASEPRI_DEFAULT + 15) /* 46 */ -#define BASEPRI_USER_INITIATED (BASEPRI_DEFAULT + 6) /* 37 */ -#define BASEPRI_DEFAULT (MAXPRI_USER - (NRQS / 4)) /* 31 */ -#define MAXPRI_SUPPRESSED (BASEPRI_DEFAULT - 3) /* 28 */ -#define BASEPRI_UTILITY (BASEPRI_DEFAULT - 11) /* 20 */ -#define MAXPRI_THROTTLE (MINPRI + 4) /* 4 */ -#define MINPRI_USER MINPRI /* 0 */ +#define MAXPRI_USER (MINPRI_RESERVED - 1) /* 63 */ +#define BASEPRI_CONTROL (BASEPRI_DEFAULT + 17) /* 48 */ +#define BASEPRI_FOREGROUND (BASEPRI_DEFAULT + 16) /* 47 */ +#define BASEPRI_BACKGROUND (BASEPRI_DEFAULT + 15) /* 46 */ +#define BASEPRI_USER_INITIATED (BASEPRI_DEFAULT + 6) /* 37 */ +#define BASEPRI_DEFAULT (MAXPRI_USER - (NRQS / 4)) /* 31 */ +#define MAXPRI_SUPPRESSED (BASEPRI_DEFAULT - 3) /* 28 */ +#define BASEPRI_UTILITY (BASEPRI_DEFAULT - 11) /* 20 */ +#define MAXPRI_THROTTLE (MINPRI + 4) /* 4 */ +#define MINPRI_USER MINPRI /* 0 */ #define DEPRESSPRI (MINPRI) /* depress priority */ @@ -177,10 +177,10 @@ /* Type used for thread->sched_mode and saved_mode */ typedef enum { - TH_MODE_NONE = 0, /* unassigned, usually for saved_mode only */ - TH_MODE_REALTIME, /* time constraints supplied */ - TH_MODE_FIXED, /* use fixed priorities, no decay */ - TH_MODE_TIMESHARE, /* use timesharing algorithm */ + TH_MODE_NONE = 0, /* unassigned, usually for saved_mode only */ + TH_MODE_REALTIME, /* time constraints supplied */ + TH_MODE_FIXED, /* use fixed priorities, no decay */ + TH_MODE_TIMESHARE, /* use timesharing algorithm */ } sched_mode_t; /* Buckets used for load calculation */ @@ -200,45 +200,45 @@ typedef enum { #define invalid_pri(pri) ((pri) < MINPRI || (pri) > MAXPRI) struct runq_stats { - uint64_t count_sum; - uint64_t last_change_timestamp; + uint64_t count_sum; + uint64_t last_change_timestamp; }; #if defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO) struct run_queue { - int highq; /* highest runnable queue */ - bitmap_t bitmap[BITMAP_LEN(NRQS)]; /* run queue bitmap array */ - int count; /* # of threads total */ - int urgency; /* level of preemption urgency */ - queue_head_t queues[NRQS]; /* one for each priority */ + int highq; /* highest runnable queue */ + bitmap_t bitmap[BITMAP_LEN(NRQS)]; /* run queue bitmap array */ + int count; /* # of threads total */ + int urgency; /* level of preemption urgency */ + queue_head_t queues[NRQS]; /* one for each priority */ - struct runq_stats runq_stats; + struct runq_stats runq_stats; }; inline static void rq_bitmap_set(bitmap_t *map, u_int n) { - assert(n < NRQS); + assert(n < NRQS); bitmap_set(map, n); } inline static void rq_bitmap_clear(bitmap_t *map, u_int n) { - assert(n < NRQS); + assert(n < NRQS); bitmap_clear(map, n); } #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) || defined(CONFIG_SCHED_PROTO) */ struct rt_queue { - _Atomic int count; /* # of threads total */ - queue_head_t queue; /* all runnable RT threads */ + _Atomic int count; /* # of threads total */ + queue_head_t queue; /* all runnable RT threads */ #if __SMP__ - decl_simple_lock_data(,rt_lock) + decl_simple_lock_data(, rt_lock) #endif - struct runq_stats runq_stats; + struct runq_stats runq_stats; }; typedef struct rt_queue *rt_queue_t; @@ -248,41 +248,41 @@ typedef struct rt_queue *rt_queue_t; * We map standard Mach priorities to an abstract scale that more properly * indicates how we want processor time allocated under contention. */ -typedef uint8_t grrr_proportional_priority_t; +typedef uint8_t grrr_proportional_priority_t; typedef uint8_t grrr_group_index_t; -#define NUM_GRRR_PROPORTIONAL_PRIORITIES 256 +#define NUM_GRRR_PROPORTIONAL_PRIORITIES 256 #define MAX_GRRR_PROPORTIONAL_PRIORITY ((grrr_proportional_priority_t)255) #if 0 -#define NUM_GRRR_GROUPS 8 /* log(256) */ +#define NUM_GRRR_GROUPS 8 /* log(256) */ #endif -#define NUM_GRRR_GROUPS 64 /* 256/4 */ +#define NUM_GRRR_GROUPS 64 /* 256/4 */ struct grrr_group { - queue_chain_t priority_order; /* next greatest weight group */ - grrr_proportional_priority_t minpriority; - grrr_group_index_t index; + queue_chain_t priority_order; /* next greatest weight group */ + grrr_proportional_priority_t minpriority; + grrr_group_index_t index; - queue_head_t clients; - int count; - uint32_t weight; + queue_head_t clients; + int count; + uint32_t weight; #if 0 - uint32_t deferred_removal_weight; + uint32_t deferred_removal_weight; #endif - uint32_t work; - thread_t current_client; + uint32_t work; + thread_t current_client; }; struct grrr_run_queue { - int count; - uint32_t last_rescale_tick; - struct grrr_group groups[NUM_GRRR_GROUPS]; - queue_head_t sorted_group_list; - uint32_t weight; - grrr_group_t current_group; - + int count; + uint32_t last_rescale_tick; + struct grrr_group groups[NUM_GRRR_GROUPS]; + queue_head_t sorted_group_list; + uint32_t weight; + grrr_group_t current_group; + struct runq_stats runq_stats; }; @@ -304,26 +304,31 @@ void sched_group_destroy(sched_group_t sched_group); */ /* Handle quantum expiration for an executing thread */ -extern void thread_quantum_expire( - timer_call_param_t processor, - timer_call_param_t thread); +extern void thread_quantum_expire( + timer_call_param_t processor, + timer_call_param_t thread); /* Context switch check for current processor */ -extern ast_t csw_check(processor_t processor, - ast_t check_reason); +extern ast_t csw_check( + thread_t thread, + processor_t processor, + ast_t check_reason); + +/* Check for pending ASTs */ +extern void ast_check(processor_t processor); extern void sched_update_generation_count(void); #if defined(CONFIG_SCHED_TIMESHARE_CORE) -extern uint32_t std_quantum, min_std_quantum; -extern uint32_t std_quantum_us; +extern uint32_t std_quantum, min_std_quantum; +extern uint32_t std_quantum_us; #endif /* CONFIG_SCHED_TIMESHARE_CORE */ extern uint32_t thread_depress_time; extern uint32_t default_timeshare_computation; extern uint32_t default_timeshare_constraint; -extern uint32_t max_rt_quantum, min_rt_quantum; +extern uint32_t max_rt_quantum, min_rt_quantum; extern int default_preemption_rate; extern int default_bg_preemption_rate; @@ -335,32 +340,32 @@ extern int default_bg_preemption_rate; * Aging may be deferred during periods where all processors are idle * and cumulatively applied during periods of activity. */ -#define SCHED_TICK_SHIFT 3 -#define SCHED_TICK_MAX_DELTA (8) +#define SCHED_TICK_SHIFT 3 +#define SCHED_TICK_MAX_DELTA (8) -extern unsigned sched_tick; -extern uint32_t sched_tick_interval; +extern unsigned sched_tick; +extern uint32_t sched_tick_interval; #endif /* CONFIG_SCHED_TIMESHARE_CORE */ -extern uint64_t sched_one_second_interval; +extern uint64_t sched_one_second_interval; /* Periodic computation of various averages */ extern void compute_sched_load(void); -extern void compute_averages(uint64_t); +extern void compute_averages(uint64_t); -extern void compute_averunnable( - void *nrun); +extern void compute_averunnable( + void *nrun); -extern void compute_stack_target( - void *arg); +extern void compute_stack_target( + void *arg); -extern void compute_pageout_gc_throttle( - void *arg); +extern void compute_pageout_gc_throttle( + void *arg); -extern void compute_pmap_gc_throttle( - void *arg); +extern void compute_pmap_gc_throttle( + void *arg); /* * Conversion factor from usage @@ -369,24 +374,24 @@ extern void compute_pmap_gc_throttle( #if defined(CONFIG_SCHED_TIMESHARE_CORE) #define MAX_LOAD (NRQS - 1) -extern uint32_t sched_pri_shifts[TH_BUCKET_MAX]; -extern uint32_t sched_fixed_shift; -extern int8_t sched_load_shifts[NRQS]; -extern uint32_t sched_decay_usage_age_factor; +extern uint32_t sched_pri_shifts[TH_BUCKET_MAX]; +extern uint32_t sched_fixed_shift; +extern int8_t sched_load_shifts[NRQS]; +extern uint32_t sched_decay_usage_age_factor; void sched_timeshare_consider_maintenance(uint64_t ctime); #endif /* CONFIG_SCHED_TIMESHARE_CORE */ void sched_consider_recommended_cores(uint64_t ctime, thread_t thread); -extern int32_t sched_poll_yield_shift; -extern uint64_t sched_safe_duration; +extern int32_t sched_poll_yield_shift; +extern uint64_t sched_safe_duration; -extern uint32_t sched_load_average, sched_mach_factor; +extern uint32_t sched_load_average, sched_mach_factor; -extern uint32_t avenrun[3], mach_factor[3]; +extern uint32_t avenrun[3], mach_factor[3]; -extern uint64_t max_unsafe_computation; -extern uint64_t max_poll_computation; +extern uint64_t max_unsafe_computation; +extern uint64_t max_poll_computation; extern volatile uint32_t sched_run_buckets[TH_BUCKET_MAX]; @@ -396,12 +401,12 @@ extern uint32_t sched_run_decr(thread_t thread); /* * thread_timer_delta macro takes care of both thread timers. */ -#define thread_timer_delta(thread, delta) \ -MACRO_BEGIN \ - (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \ - &(thread)->system_timer_save); \ - (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \ - &(thread)->user_timer_save); \ +#define thread_timer_delta(thread, delta) \ +MACRO_BEGIN \ + (delta) = (typeof(delta))timer_delta(&(thread)->system_timer, \ + &(thread)->system_timer_save); \ + (delta) += (typeof(delta))timer_delta(&(thread)->user_timer, \ + &(thread)->user_timer_save); \ MACRO_END -#endif /* _KERN_SCHED_H_ */ +#endif /* _KERN_SCHED_H_ */ diff --git a/osfmk/kern/sched_average.c b/osfmk/kern/sched_average.c index e7b24bb0d..709803b9e 100644 --- a/osfmk/kern/sched_average.c +++ b/osfmk/kern/sched_average.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -74,22 +74,22 @@ #include -uint32_t avenrun[3] = {0, 0, 0}; -uint32_t mach_factor[3] = {0, 0, 0}; +uint32_t avenrun[3] = {0, 0, 0}; +uint32_t mach_factor[3] = {0, 0, 0}; -uint32_t sched_load_average, sched_mach_factor; +uint32_t sched_load_average, sched_mach_factor; #if defined(CONFIG_SCHED_TIMESHARE_CORE) /* * Values are scaled by LOAD_SCALE, defined in processor_info.h */ -#define base(n) ((n) << SCHED_TICK_SHIFT) -#define frac(n) (((base(n) - 1) * LOAD_SCALE) / base(n)) +#define base(n) ((n) << SCHED_TICK_SHIFT) +#define frac(n) (((base(n) - 1) * LOAD_SCALE) / base(n)) -static uint32_t fract[3] = { - frac(5), /* 5 second average */ - frac(30), /* 30 second average */ - frac(60), /* 1 minute average */ +static uint32_t fract[3] = { + frac(5), /* 5 second average */ + frac(30), /* 30 second average */ + frac(60), /* 1 minute average */ }; #undef base @@ -97,16 +97,16 @@ static uint32_t fract[3] = { #endif /* CONFIG_SCHED_TIMESHARE_CORE */ -static unsigned int sched_nrun; +static unsigned int sched_nrun; -typedef void (*sched_avg_comp_t)( - void *param); +typedef void (*sched_avg_comp_t)( + void *param); static struct sched_average { - sched_avg_comp_t comp; - void *param; - int period; /* in seconds */ - uint64_t deadline; + sched_avg_comp_t comp; + void *param; + int period; /* in seconds */ + uint64_t deadline; } sched_average[] = { { compute_averunnable, &sched_nrun, 5, 0 }, { compute_stack_target, NULL, 5, 1 }, @@ -118,48 +118,48 @@ static struct sched_average { { NULL, NULL, 0, 0 } }; -typedef struct sched_average *sched_average_t; +typedef struct sched_average *sched_average_t; /* * Scheduler load calculation algorithm * - * The scheduler load values provide an estimate of the number of runnable - * timeshare threads in the system at various priority bands. The load - * ultimately affects the priority shifts applied to all threads in a band - * causing them to timeshare with other threads in the system. The load is + * The scheduler load values provide an estimate of the number of runnable + * timeshare threads in the system at various priority bands. The load + * ultimately affects the priority shifts applied to all threads in a band + * causing them to timeshare with other threads in the system. The load is * maintained in buckets, with each bucket corresponding to a priority band. * - * Each runnable thread on the system contributes its load to its priority - * band and to the bands above it. The contribution of a thread to the bands - * above it is not strictly 1:1 and is weighted based on the priority band - * of the thread. The rules of thread load contribution to each of its higher + * Each runnable thread on the system contributes its load to its priority + * band and to the bands above it. The contribution of a thread to the bands + * above it is not strictly 1:1 and is weighted based on the priority band + * of the thread. The rules of thread load contribution to each of its higher * bands are as follows: * * - DF threads: Upto (2 * NCPUs) threads * - UT threads: Upto NCPUs threads * - BG threads: Upto 1 thread * - * To calculate the load values, the various run buckets are sampled (every + * To calculate the load values, the various run buckets are sampled (every * sched_load_compute_interval_abs) and the weighted contributions of the the - * lower bucket threads are added. The resultant value is plugged into an - * exponentially weighted moving average formula: - * new-load = alpha * old-load + (1 - alpha) * run-bucket-sample-count - * (where, alpha < 1) - * The calculations for the scheduler load are done using fixpoint math with - * a scale factor of 16 to avoid expensive divides and floating point - * operations. The final load values are a smooth curve representative of + * lower bucket threads are added. The resultant value is plugged into an + * exponentially weighted moving average formula: + * new-load = alpha * old-load + (1 - alpha) * run-bucket-sample-count + * (where, alpha < 1) + * The calculations for the scheduler load are done using fixpoint math with + * a scale factor of 16 to avoid expensive divides and floating point + * operations. The final load values are a smooth curve representative of * the actual number of runnable threads in a priority band. */ /* Maintains the current (scaled for fixpoint) load in various buckets */ uint32_t sched_load[TH_BUCKET_MAX]; -/* - * Alpha factor for the EWMA alogrithm. The current values are chosen as - * 6:10 ("old load":"new samples") to make sure the scheduler reacts fast - * enough to changing system load but does not see too many spikes from bursty - * activity. The current values ensure that the scheduler would converge - * to the latest load in 2-3 sched_load_compute_interval_abs intervals +/* + * Alpha factor for the EWMA alogrithm. The current values are chosen as + * 6:10 ("old load":"new samples") to make sure the scheduler reacts fast + * enough to changing system load but does not see too many spikes from bursty + * activity. The current values ensure that the scheduler would converge + * to the latest load in 2-3 sched_load_compute_interval_abs intervals * (which amounts to ~30-45ms with current values). */ #define SCHED_LOAD_EWMA_ALPHA_OLD 6 @@ -168,10 +168,10 @@ uint32_t sched_load[TH_BUCKET_MAX]; static_assert((SCHED_LOAD_EWMA_ALPHA_OLD + SCHED_LOAD_EWMA_ALPHA_NEW) == (1ul << SCHED_LOAD_EWMA_ALPHA_SHIFT)); /* For fixpoint EWMA, roundup the load to make it converge */ -#define SCHED_LOAD_EWMA_ROUNDUP(load) (((load) & (1ul << (SCHED_LOAD_EWMA_ALPHA_SHIFT - 1))) != 0) +#define SCHED_LOAD_EWMA_ROUNDUP(load) (((load) & (1ul << (SCHED_LOAD_EWMA_ALPHA_SHIFT - 1))) != 0) /* Macro to convert scaled sched load to a real load value */ -#define SCHED_LOAD_EWMA_UNSCALE(load) (((load) >> SCHED_LOAD_EWMA_ALPHA_SHIFT) + SCHED_LOAD_EWMA_ROUNDUP(load)) +#define SCHED_LOAD_EWMA_UNSCALE(load) (((load) >> SCHED_LOAD_EWMA_ALPHA_SHIFT) + SCHED_LOAD_EWMA_ROUNDUP(load)) /* * Routine to capture the latest runnable counts and update sched_load */ @@ -201,9 +201,9 @@ compute_sched_load(void) uint32_t nfixpri = load_now[TH_BUCKET_FIXPRI]; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LOAD) | DBG_FUNC_NONE, - load_now[TH_BUCKET_FIXPRI], (load_now[TH_BUCKET_SHARE_FG] + load_now[TH_BUCKET_SHARE_DF]), - load_now[TH_BUCKET_SHARE_BG], load_now[TH_BUCKET_SHARE_UT], 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LOAD) | DBG_FUNC_NONE, + load_now[TH_BUCKET_FIXPRI], (load_now[TH_BUCKET_SHARE_FG] + load_now[TH_BUCKET_SHARE_DF]), + load_now[TH_BUCKET_SHARE_BG], load_now[TH_BUCKET_SHARE_UT], 0); /* * Compute the timeshare priority conversion factor based on loading. @@ -213,20 +213,21 @@ compute_sched_load(void) * is broken, so truncate values in these cases. */ uint32_t timeshare_threads = (nthreads - nfixpri); - for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG ; i++) { - if (load_now[i] > timeshare_threads) + for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG; i++) { + if (load_now[i] > timeshare_threads) { load_now[i] = timeshare_threads; + } } - /* - * Default threads contribute up to (NCPUS * 2) of load to FG threads + /* + * Default threads contribute up to (NCPUS * 2) of load to FG threads */ if (load_now[TH_BUCKET_SHARE_DF] <= (ncpus * 2)) { load_now[TH_BUCKET_SHARE_FG] += load_now[TH_BUCKET_SHARE_DF]; } else { load_now[TH_BUCKET_SHARE_FG] += (ncpus * 2); } - + /* * Utility threads contribute up to NCPUS of load to FG & DF threads */ @@ -255,18 +256,20 @@ compute_sched_load(void) * Zero load results in a out of range shift count. */ - for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG ; i++) { + for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG; i++) { uint32_t bucket_load = 0; if (load_now[i] > ncpus) { /* Normalize the load to number of CPUs */ - if (ncpus > 1) + if (ncpus > 1) { bucket_load = load_now[i] / ncpus; - else + } else { bucket_load = load_now[i]; + } - if (bucket_load > MAX_LOAD) + if (bucket_load > MAX_LOAD) { bucket_load = MAX_LOAD; + } } /* Plug the load values into the EWMA algorithm to calculate (scaled for fixpoint) sched_load */ sched_load[i] = (sched_load[i] * SCHED_LOAD_EWMA_ALPHA_OLD) + ((bucket_load << SCHED_LOAD_EWMA_ALPHA_SHIFT) * SCHED_LOAD_EWMA_ALPHA_NEW); @@ -274,20 +277,19 @@ compute_sched_load(void) } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LOAD_EFFECTIVE) | DBG_FUNC_NONE, - SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_FG]), SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_DF]), - SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_UT]), SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_BG]), 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LOAD_EFFECTIVE) | DBG_FUNC_NONE, + SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_FG]), SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_DF]), + SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_UT]), SCHED_LOAD_EWMA_UNSCALE(sched_load[TH_BUCKET_SHARE_BG]), 0); } void compute_averages(uint64_t stdelta) { - uint32_t nthreads = sched_run_buckets[TH_BUCKET_RUN] - 1; uint32_t ncpus = processor_avail_count; - + /* Update the global pri_shifts based on the latest values */ - for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG ; i++) { + for (uint32_t i = TH_BUCKET_SHARE_FG; i <= TH_BUCKET_SHARE_BG; i++) { uint32_t bucket_load = SCHED_LOAD_EWMA_UNSCALE(sched_load[i]); sched_pri_shifts[i] = sched_fixed_shift - sched_load_shifts[bucket_load]; } @@ -304,10 +306,11 @@ compute_averages(uint64_t stdelta) uint32_t average_now = nthreads * LOAD_SCALE; uint32_t factor_now; - if (nthreads > ncpus) + if (nthreads > ncpus) { factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1); - else + } else { factor_now = (ncpus - nthreads) * LOAD_SCALE; + } /* * For those statistics that formerly relied on being recomputed @@ -325,10 +328,10 @@ compute_averages(uint64_t stdelta) for (uint32_t index = 0; index < stdelta; index++) { for (uint32_t i = 0; i < 3; i++) { mach_factor[i] = ((mach_factor[i] * fract[i]) + - (factor_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; + (factor_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; avenrun[i] = ((avenrun[i] * fract[i]) + - (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; + (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE; } } diff --git a/osfmk/kern/sched_dualq.c b/osfmk/kern/sched_dualq.c index 855d75853..e7f506c06 100644 --- a/osfmk/kern/sched_dualq.c +++ b/osfmk/kern/sched_dualq.c @@ -105,7 +105,7 @@ const struct sched_dispatch_table sched_dualq_dispatch = { .pset_init = sched_dualq_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_dualq_choose_thread, - .steal_thread_enabled = TRUE, + .steal_thread_enabled = sched_steal_thread_enabled, .steal_thread = sched_dualq_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, @@ -146,19 +146,22 @@ const struct sched_dispatch_table sched_dualq_dispatch = { }; __attribute__((always_inline)) -static inline run_queue_t dualq_main_runq(processor_t processor) +static inline run_queue_t +dualq_main_runq(processor_t processor) { return &processor->processor_set->pset_runq; } __attribute__((always_inline)) -static inline run_queue_t dualq_bound_runq(processor_t processor) +static inline run_queue_t +dualq_bound_runq(processor_t processor) { return &processor->runq; } __attribute__((always_inline)) -static inline run_queue_t dualq_runq_for_thread(processor_t processor, thread_t thread) +static inline run_queue_t +dualq_runq_for_thread(processor_t processor, thread_t thread) { if (thread->bound_processor == PROCESSOR_NULL) { return dualq_main_runq(processor); @@ -171,10 +174,11 @@ static inline run_queue_t dualq_runq_for_thread(processor_t processor, thread_t static sched_mode_t sched_dualq_initial_thread_sched_mode(task_t parent_task) { - if (parent_task == kernel_task) + if (parent_task == kernel_task) { return TH_MODE_FIXED; - else + } else { return TH_MODE_TIMESHARE; + } } static void @@ -189,25 +193,31 @@ sched_dualq_pset_init(processor_set_t pset) run_queue_init(&pset->pset_runq); } +extern int sched_allow_NO_SMT_threads; static void sched_dualq_init(void) { sched_timeshare_init(); + + if (PE_parse_boot_argn("disable_NO_SMT_threads", NULL, 0)) { + sched_allow_NO_SMT_threads = 0; + } } static thread_t sched_dualq_choose_thread( - processor_t processor, - int priority, - __unused ast_t reason) + processor_t processor, + int priority, + __unused ast_t reason) { run_queue_t main_runq = dualq_main_runq(processor); run_queue_t bound_runq = dualq_bound_runq(processor); run_queue_t chosen_runq; if (bound_runq->highq < priority && - main_runq->highq < priority) + main_runq->highq < priority) { return THREAD_NULL; + } if (bound_runq->count && main_runq->count) { if (bound_runq->highq >= main_runq->highq) { @@ -220,7 +230,47 @@ sched_dualq_choose_thread( } else if (main_runq->count) { chosen_runq = main_runq; } else { - return (THREAD_NULL); + return THREAD_NULL; + } + + if (chosen_runq == bound_runq) { + return run_queue_dequeue(chosen_runq, SCHED_HEADQ); + } + + if (processor->is_SMT) { + thread_t potential_thread = run_queue_dequeue(chosen_runq, SCHED_PEEK | SCHED_HEADQ); + if (potential_thread == THREAD_NULL) { + return THREAD_NULL; + } + if (processor->processor_primary != processor) { + /* + * Secondary processor may not run a NO_SMT thread, + * nor any thread if the primary is running a NO_SMT thread. + */ + if (thread_no_smt(potential_thread)) { + processor->must_idle = true; + return THREAD_NULL; + } + processor_t primary = processor->processor_primary; + if (primary->state == PROCESSOR_RUNNING) { + if (processor_active_thread_no_smt(primary)) { + processor->must_idle = true; + return THREAD_NULL; + } + } + } else if (processor->processor_secondary != PROCESSOR_NULL) { + processor_t secondary = processor->processor_secondary; + /* + * Primary processor may not run a NO_SMT thread if + * its secondary is running a bound thread. + */ + if (secondary->state == PROCESSOR_RUNNING) { + if (thread_no_smt(potential_thread) && secondary->current_is_bound) { + processor->must_idle = true; + return THREAD_NULL; + } + } + } } return run_queue_dequeue(chosen_runq, SCHED_HEADQ); @@ -228,9 +278,9 @@ sched_dualq_choose_thread( static boolean_t sched_dualq_processor_enqueue( - processor_t processor, - thread_t thread, - integer_t options) + processor_t processor, + thread_t thread, + integer_t options) { run_queue_t rq = dualq_runq_for_thread(processor, thread); boolean_t result; @@ -238,13 +288,13 @@ sched_dualq_processor_enqueue( result = run_queue_enqueue(rq, thread, options); thread->runq = processor; - return (result); + return result; } static boolean_t sched_dualq_processor_queue_empty(processor_t processor) { - return dualq_main_runq(processor)->count == 0 && + return dualq_main_runq(processor)->count == 0 && dualq_bound_runq(processor)->count == 0; } @@ -255,7 +305,7 @@ sched_dualq_processor_csw_check(processor_t processor) int pri; if (sched_dualq_thread_avoid_processor(processor, current_thread())) { - return (AST_PREEMPT | AST_URGENT); + return AST_PREEMPT | AST_URGENT; } run_queue_t main_runq = dualq_main_runq(processor); @@ -272,12 +322,14 @@ sched_dualq_processor_csw_check(processor_t processor) } if (has_higher) { - if (main_runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (main_runq->urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } + + if (bound_runq->urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } - if (bound_runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); - return AST_PREEMPT; } @@ -286,18 +338,19 @@ sched_dualq_processor_csw_check(processor_t processor) static boolean_t sched_dualq_processor_queue_has_priority(processor_t processor, - int priority, - boolean_t gte) + int priority, + boolean_t gte) { run_queue_t main_runq = dualq_main_runq(processor); run_queue_t bound_runq = dualq_bound_runq(processor); int qpri = MAX(main_runq->highq, bound_runq->highq); - if (gte) + if (gte) { return qpri >= priority; - else + } else { return qpri > priority; + } } static int @@ -311,10 +364,11 @@ sched_dualq_runq_stats_count_sum(processor_t processor) { uint64_t bound_sum = dualq_bound_runq(processor)->runq_stats.count_sum; - if (processor->cpu_id == processor->processor_set->cpu_set_low) + if (processor->cpu_id == processor->processor_set->cpu_set_low) { return bound_sum + dualq_main_runq(processor)->runq_stats.count_sum; - else + } else { return bound_sum; + } } static int sched_dualq_processor_bound_count(processor_t processor) @@ -346,7 +400,6 @@ sched_dualq_processor_queue_shutdown(processor_t processor) pset_unlock(pset); qe_foreach_element_safe(thread, &tqueue, runq_links) { - remqueue(&thread->runq_links); thread_lock(thread); @@ -359,8 +412,8 @@ sched_dualq_processor_queue_shutdown(processor_t processor) static boolean_t sched_dualq_processor_queue_remove( - processor_t processor, - thread_t thread) + processor_t processor, + thread_t thread) { run_queue_t rq; processor_set_t pset = processor->processor_set; @@ -375,8 +428,7 @@ sched_dualq_processor_queue_remove( * that run queue. */ run_queue_remove(rq, thread); - } - else { + } else { /* * The thread left the run queue before we could * lock the run queue. @@ -387,35 +439,34 @@ sched_dualq_processor_queue_remove( pset_unlock(pset); - return (processor != PROCESSOR_NULL); + return processor != PROCESSOR_NULL; } static thread_t sched_dualq_steal_thread(processor_set_t pset) { - processor_set_t nset, cset = pset; + processor_set_t cset = pset; + processor_set_t nset = next_pset(cset); thread_t thread; - do { + while (nset != pset) { + pset_unlock(cset); + cset = nset; + pset_lock(cset); + if (cset->pset_runq.count > 0) { + /* Need task_restrict logic here */ thread = run_queue_dequeue(&cset->pset_runq, SCHED_HEADQ); pset_unlock(cset); - return (thread); + return thread; } nset = next_pset(cset); - - if (nset != pset) { - pset_unlock(cset); - - cset = nset; - pset_lock(cset); - } - } while (nset != pset); + } pset_unlock(cset); - return (THREAD_NULL); + return THREAD_NULL; } static void @@ -444,8 +495,9 @@ sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context) pset_unlock(pset); splx(s); - if (restart_needed) + if (restart_needed) { break; + } thread = processor->idle_thread; if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { @@ -458,7 +510,6 @@ sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context) /* Ok, we now have a collection of candidates -- fix them. */ thread_update_process_threads(); - } while (restart_needed); pset = &pset0; @@ -473,13 +524,13 @@ sched_dualq_thread_update_scan(sched_update_scan_context_t scan_context) pset_unlock(pset); splx(s); - if (restart_needed) + if (restart_needed) { break; + } } while ((pset = pset->pset_list) != NULL); /* Ok, we now have a collection of candidates -- fix them. */ thread_update_process_threads(); - } while (restart_needed); } @@ -489,14 +540,46 @@ extern int sched_allow_rt_smt; static bool sched_dualq_thread_avoid_processor(processor_t processor, thread_t thread) { + if (thread->bound_processor == processor) { + /* Thread is bound here */ + return false; + } + if (processor->processor_primary != processor) { /* * This is a secondary SMT processor. If the primary is running * a realtime thread, only allow realtime threads on the secondary. */ - if ((processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) && ((thread->sched_pri < BASEPRI_RTQUEUES) || !sched_allow_rt_smt)) { + processor_t primary = processor->processor_primary; + if ((primary->current_pri >= BASEPRI_RTQUEUES) && ((thread->sched_pri < BASEPRI_RTQUEUES) || !sched_allow_rt_smt)) { return true; } + + /* NO_SMT threads are not allowed on secondary processors */ + if (thread_no_smt(thread)) { + return true; + } + + if (primary->state == PROCESSOR_RUNNING) { + if (processor_active_thread_no_smt(primary)) { + /* No threads allowed on secondary if primary has NO_SMT */ + return true; + } + } + } + + if (processor->processor_secondary != PROCESSOR_NULL) { + /* + * This is a primary SMT processor. If the secondary is running + * a bound thread, the primary may not run a NO_SMT thread. + */ + processor_t secondary = processor->processor_secondary; + + if (secondary->state == PROCESSOR_RUNNING) { + if (secondary->current_is_bound && thread_no_smt(thread)) { + return true; + } + } } return false; diff --git a/osfmk/kern/sched_grrr.c b/osfmk/kern/sched_grrr.c index 8cc140183..af61fd552 100644 --- a/osfmk/kern/sched_grrr.c +++ b/osfmk/kern/sched_grrr.c @@ -68,28 +68,28 @@ grrr_priority_mapping_init(void); static boolean_t grrr_enqueue( - grrr_run_queue_t rq, - thread_t thread); + grrr_run_queue_t rq, + thread_t thread); static thread_t grrr_select( - grrr_run_queue_t rq); + grrr_run_queue_t rq); static void grrr_remove( - grrr_run_queue_t rq, - thread_t thread); + grrr_run_queue_t rq, + thread_t thread); static void grrr_sorted_list_insert_group(grrr_run_queue_t rq, - grrr_group_t group); + grrr_group_t group); static void grrr_rescale_work(grrr_run_queue_t rq); static void -grrr_runqueue_init(grrr_run_queue_t runq); +grrr_runqueue_init(grrr_run_queue_t runq); /* Map Mach priorities to ones suitable for proportional sharing */ static grrr_proportional_priority_t grrr_priority_mapping[NRQS]; @@ -97,7 +97,7 @@ static grrr_proportional_priority_t grrr_priority_mapping[NRQS]; /* Map each proportional priority to its group */ static grrr_group_index_t grrr_group_mapping[NUM_GRRR_PROPORTIONAL_PRIORITIES]; -uint32_t grrr_rescale_tick; +uint32_t grrr_rescale_tick; #endif /* defined(CONFIG_SCHED_GRRR_CORE) */ @@ -119,43 +119,43 @@ static void sched_grrr_maintenance_continuation(void); static thread_t -sched_grrr_choose_thread(processor_t processor, - int priority, - ast_t reason); +sched_grrr_choose_thread(processor_t processor, + int priority, + ast_t reason); static thread_t -sched_grrr_steal_thread(processor_set_t pset); +sched_grrr_steal_thread(processor_set_t pset); static int sched_grrr_compute_priority(thread_t thread); static processor_t -sched_grrr_choose_processor( processor_set_t pset, - processor_t processor, - thread_t thread); +sched_grrr_choose_processor( processor_set_t pset, + processor_t processor, + thread_t thread); static boolean_t sched_grrr_processor_enqueue( - processor_t processor, - thread_t thread, - integer_t options); + processor_t processor, + thread_t thread, + integer_t options); static void sched_grrr_processor_queue_shutdown( - processor_t processor); + processor_t processor); static boolean_t sched_grrr_processor_queue_remove( - processor_t processor, - thread_t thread); + processor_t processor, + thread_t thread); static boolean_t -sched_grrr_processor_queue_empty(processor_t processor); +sched_grrr_processor_queue_empty(processor_t processor); static boolean_t -sched_grrr_processor_queue_has_priority(processor_t processor, - int priority, - boolean_t gte); +sched_grrr_processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte); static boolean_t sched_grrr_priority_is_urgent(int priority); @@ -170,22 +170,22 @@ static sched_mode_t sched_grrr_initial_thread_sched_mode(task_t parent_task); static boolean_t -sched_grrr_can_update_priority(thread_t thread); +sched_grrr_can_update_priority(thread_t thread); static void -sched_grrr_update_priority(thread_t thread); +sched_grrr_update_priority(thread_t thread); static void -sched_grrr_lightweight_update_priority(thread_t thread); +sched_grrr_lightweight_update_priority(thread_t thread); static int -sched_grrr_processor_runq_count(processor_t processor); +sched_grrr_processor_runq_count(processor_t processor); static uint64_t sched_grrr_processor_runq_stats_count_sum(processor_t processor); static int -sched_grrr_processor_bound_count(processor_t processor); +sched_grrr_processor_bound_count(processor_t processor); static void sched_grrr_thread_update_scan(sched_update_scan_context_t scan_context); @@ -198,7 +198,7 @@ const struct sched_dispatch_table sched_grrr_dispatch = { .pset_init = sched_grrr_pset_init, .maintenance_continuation = sched_grrr_maintenance_continuation, .choose_thread = sched_grrr_choose_thread, - .steal_thread_enabled = FALSE, + .steal_thread_enabled = sched_steal_thread_DISABLED, .steal_thread = sched_grrr_steal_thread, .compute_timeshare_priority = sched_grrr_compute_priority, .choose_processor = sched_grrr_choose_processor, @@ -238,18 +238,19 @@ const struct sched_dispatch_table sched_grrr_dispatch = { .thread_should_yield = sched_thread_should_yield, }; -extern int max_unsafe_quanta; +extern int max_unsafe_quanta; static uint32_t grrr_quantum_us; static uint32_t grrr_quantum; -static uint64_t sched_grrr_tick_deadline; +static uint64_t sched_grrr_tick_deadline; static void sched_grrr_init(void) { - if (default_preemption_rate < 1) + if (default_preemption_rate < 1) { default_preemption_rate = 100; + } grrr_quantum_us = (1000 * 1000) / default_preemption_rate; printf("standard grrr timeslicing quantum is %d us\n", grrr_quantum_us); @@ -260,11 +261,11 @@ sched_grrr_init(void) static void sched_grrr_timebase_init(void) { - uint64_t abstime; + uint64_t abstime; /* standard timeslicing quantum */ clock_interval_to_absolutetime_interval( - grrr_quantum_us, NSEC_PER_USEC, &abstime); + grrr_quantum_us, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); grrr_quantum = (uint32_t)abstime; @@ -274,7 +275,6 @@ sched_grrr_timebase_init(void) max_unsafe_computation = max_unsafe_quanta * grrr_quantum; sched_safe_duration = 2 * max_unsafe_quanta * grrr_quantum; - } static void @@ -291,7 +291,7 @@ sched_grrr_pset_init(processor_set_t pset __unused) static void sched_grrr_maintenance_continuation(void) { - uint64_t abstime = mach_absolute_time(); + uint64_t abstime = mach_absolute_time(); grrr_rescale_tick++; @@ -300,11 +300,12 @@ sched_grrr_maintenance_continuation(void) */ compute_averages(1); - if (sched_grrr_tick_deadline == 0) + if (sched_grrr_tick_deadline == 0) { sched_grrr_tick_deadline = abstime; + } - clock_deadline_for_periodic_event(10*sched_one_second_interval, abstime, - &sched_grrr_tick_deadline); + clock_deadline_for_periodic_event(10 * sched_one_second_interval, abstime, + &sched_grrr_tick_deadline); assert_wait_deadline((event_t)sched_grrr_maintenance_continuation, THREAD_UNINT, sched_grrr_tick_deadline); thread_block((thread_continue_t)sched_grrr_maintenance_continuation); @@ -312,17 +313,17 @@ sched_grrr_maintenance_continuation(void) } static thread_t -sched_grrr_choose_thread(processor_t processor, - int priority __unused, - ast_t reason __unused) +sched_grrr_choose_thread(processor_t processor, + int priority __unused, + ast_t reason __unused) { - grrr_run_queue_t rq = &processor->grrr_runq; + grrr_run_queue_t rq = &processor->grrr_runq; - return grrr_select(rq); + return grrr_select(rq); } static thread_t -sched_grrr_steal_thread(processor_set_t pset) +sched_grrr_steal_thread(processor_set_t pset) { pset_unlock(pset); @@ -336,21 +337,21 @@ sched_grrr_compute_priority(thread_t thread) } static processor_t -sched_grrr_choose_processor( processor_set_t pset, - processor_t processor, - thread_t thread) +sched_grrr_choose_processor( processor_set_t pset, + processor_t processor, + thread_t thread) { return choose_processor(pset, processor, thread); } static boolean_t sched_grrr_processor_enqueue( - processor_t processor, - thread_t thread, - integer_t options __unused) + processor_t processor, + thread_t thread, + integer_t options __unused) { - grrr_run_queue_t rq = &processor->grrr_runq; - boolean_t result; + grrr_run_queue_t rq = &processor->grrr_runq; + boolean_t result; result = grrr_enqueue(rq, thread); @@ -361,11 +362,11 @@ sched_grrr_processor_enqueue( static void sched_grrr_processor_queue_shutdown( - processor_t processor) + processor_t processor) { - processor_set_t pset = processor->processor_set; - thread_t thread; - queue_head_t tqueue, bqueue; + processor_set_t pset = processor->processor_set; + thread_t thread; + queue_head_t tqueue, bqueue; queue_init(&tqueue); queue_init(&bqueue); @@ -395,8 +396,8 @@ sched_grrr_processor_queue_shutdown( static boolean_t sched_grrr_processor_queue_remove( - processor_t processor, - thread_t thread) + processor_t processor, + thread_t thread) { processor_set_t pset = processor->processor_set; @@ -407,7 +408,7 @@ sched_grrr_processor_queue_remove( * Thread is on a run queue and we have a lock on * that run queue. */ - grrr_run_queue_t rq = &processor->grrr_runq; + grrr_run_queue_t rq = &processor->grrr_runq; grrr_remove(rq, thread); } else { @@ -421,11 +422,11 @@ sched_grrr_processor_queue_remove( pset_unlock(pset); - return (processor != PROCESSOR_NULL); + return processor != PROCESSOR_NULL; } static boolean_t -sched_grrr_processor_queue_empty(processor_t processor __unused) +sched_grrr_processor_queue_empty(processor_t processor __unused) { boolean_t result; @@ -435,17 +436,18 @@ sched_grrr_processor_queue_empty(processor_t processor __unused) } static boolean_t -sched_grrr_processor_queue_has_priority(processor_t processor, - int priority, - boolean_t gte __unused) +sched_grrr_processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte __unused) { - grrr_run_queue_t rq = &processor->grrr_runq; - unsigned int i; + grrr_run_queue_t rq = &processor->grrr_runq; + unsigned int i; i = grrr_group_mapping[grrr_priority_mapping[priority]]; - for ( ; i < NUM_GRRR_GROUPS; i++) { - if (rq->groups[i].count > 0) + for (; i < NUM_GRRR_GROUPS; i++) { + if (rq->groups[i].count > 0) { return TRUE; + } } return FALSE; @@ -455,14 +457,17 @@ sched_grrr_processor_queue_has_priority(processor_t processor, static boolean_t sched_grrr_priority_is_urgent(int priority) { - if (priority <= BASEPRI_FOREGROUND) + if (priority <= BASEPRI_FOREGROUND) { return FALSE; + } - if (priority < MINPRI_KERNEL) + if (priority < MINPRI_KERNEL) { return TRUE; + } - if (priority >= BASEPRI_PREEMPT) + if (priority >= BASEPRI_PREEMPT) { return TRUE; + } return FALSE; } @@ -470,12 +475,13 @@ sched_grrr_priority_is_urgent(int priority) static ast_t sched_grrr_processor_csw_check(processor_t processor) { - int count; + int count; count = sched_grrr_processor_runq_count(processor); - if (count > 0) + if (count > 0) { return AST_PREEMPT; + } return AST_NONE; } @@ -489,44 +495,45 @@ sched_grrr_initial_quantum_size(thread_t thread __unused) static sched_mode_t sched_grrr_initial_thread_sched_mode(task_t parent_task) { - if (parent_task == kernel_task) + if (parent_task == kernel_task) { return TH_MODE_FIXED; - else + } else { return TH_MODE_TIMESHARE; + } } static boolean_t -sched_grrr_can_update_priority(thread_t thread __unused) +sched_grrr_can_update_priority(thread_t thread __unused) { return FALSE; } static void -sched_grrr_update_priority(thread_t thread __unused) +sched_grrr_update_priority(thread_t thread __unused) { return; } static void -sched_grrr_lightweight_update_priority(thread_t thread __unused) +sched_grrr_lightweight_update_priority(thread_t thread __unused) { return; } static int -sched_grrr_processor_runq_count(processor_t processor) +sched_grrr_processor_runq_count(processor_t processor) { return processor->grrr_runq.count; } static uint64_t -sched_grrr_processor_runq_stats_count_sum(processor_t processor) +sched_grrr_processor_runq_stats_count_sum(processor_t processor) { return processor->grrr_runq.runq_stats.count_sum; } static int -sched_grrr_processor_bound_count(__unused processor_t processor) +sched_grrr_processor_bound_count(__unused processor_t processor) { return 0; } @@ -547,26 +554,27 @@ grrr_priority_mapping_init(void) unsigned int i; /* Map 0->0 up to 10->20 */ - for (i=0; i <= 10; i++) { - grrr_priority_mapping[i] = 2*i; + for (i = 0; i <= 10; i++) { + grrr_priority_mapping[i] = 2 * i; } /* Map user priorities 11->33 up to 51 -> 153 */ - for (i=11; i <= 51; i++) { - grrr_priority_mapping[i] = 3*i; + for (i = 11; i <= 51; i++) { + grrr_priority_mapping[i] = 3 * i; } /* Map high priorities 52->180 up to 127->255 */ - for (i=52; i <= 127; i++) { + for (i = 52; i <= 127; i++) { grrr_priority_mapping[i] = 128 + i; } for (i = 0; i < NUM_GRRR_PROPORTIONAL_PRIORITIES; i++) { - #if 0 unsigned j, k; /* Calculate log(i); */ - for (j=0, k=1; k <= i; j++, k *= 2); + for (j = 0, k = 1; k <= i; j++, k *= 2) { + ; + } #endif /* Groups of 4 */ @@ -618,7 +626,7 @@ grrr_intergroup_schedule(grrr_run_queue_t rq) thread = grrr_intragroup_schedule(group); - if ((group->work >= (UINT32_MAX-256)) || (rq->last_rescale_tick != grrr_rescale_tick)) { + if ((group->work >= (UINT32_MAX - 256)) || (rq->last_rescale_tick != grrr_rescale_tick)) { grrr_rescale_work(rq); } group->work++; @@ -653,7 +661,7 @@ grrr_intergroup_schedule(grrr_run_queue_t rq) } static void -grrr_runqueue_init(grrr_run_queue_t runq) +grrr_runqueue_init(grrr_run_queue_t runq) { grrr_group_index_t index; @@ -663,8 +671,8 @@ grrr_runqueue_init(grrr_run_queue_t runq) unsigned int prisearch; for (prisearch = 0; - prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES; - prisearch++) { + prisearch < NUM_GRRR_PROPORTIONAL_PRIORITIES; + prisearch++) { if (grrr_group_mapping[prisearch] == index) { runq->groups[index].minpriority = (grrr_proportional_priority_t)prisearch; break; @@ -700,12 +708,12 @@ grrr_rescale_work(grrr_run_queue_t rq) static boolean_t grrr_enqueue( - grrr_run_queue_t rq, - thread_t thread) + grrr_run_queue_t rq, + thread_t thread) { - grrr_proportional_priority_t gpriority; - grrr_group_index_t gindex; - grrr_group_t group; + grrr_proportional_priority_t gpriority; + grrr_group_index_t gindex; + grrr_group_t group; gpriority = grrr_priority_mapping[thread->sched_pri]; gindex = grrr_group_mapping[gpriority]; @@ -724,7 +732,7 @@ grrr_enqueue( } else { /* Insert before the current client */ if (group->current_client == THREAD_NULL || - queue_first(&group->clients) == (queue_entry_t)group->current_client) { + queue_first(&group->clients) == (queue_entry_t)group->current_client) { enqueue_head(&group->clients, (queue_entry_t)thread); } else { insque((queue_entry_t)thread, queue_prev((queue_entry_t)group->current_client)); @@ -746,15 +754,15 @@ grrr_enqueue( } static thread_t -grrr_select(grrr_run_queue_t rq) +grrr_select(grrr_run_queue_t rq) { - thread_t thread; + thread_t thread; thread = grrr_intergroup_schedule(rq); if (thread != THREAD_NULL) { - grrr_proportional_priority_t gpriority; - grrr_group_index_t gindex; - grrr_group_t group; + grrr_proportional_priority_t gpriority; + grrr_group_index_t gindex; + grrr_group_t group; gpriority = grrr_priority_mapping[thread->sched_pri]; gindex = grrr_group_mapping[gpriority]; @@ -789,12 +797,12 @@ grrr_select(grrr_run_queue_t rq) static void grrr_remove( - grrr_run_queue_t rq, - thread_t thread) + grrr_run_queue_t rq, + thread_t thread) { - grrr_proportional_priority_t gpriority; - grrr_group_index_t gindex; - grrr_group_t group; + grrr_proportional_priority_t gpriority; + grrr_group_index_t gindex; + grrr_group_t group; gpriority = grrr_priority_mapping[thread->sched_pri]; gindex = grrr_group_mapping[gpriority]; @@ -826,7 +834,7 @@ grrr_remove( static void grrr_sorted_list_insert_group(grrr_run_queue_t rq, - grrr_group_t group) + grrr_group_t group) { /* Simple insertion sort */ if (queue_empty(&rq->sorted_group_list)) { @@ -838,13 +846,13 @@ grrr_sorted_list_insert_group(grrr_run_queue_t rq, * element less than us, so we can insert before it */ search_group = (grrr_group_t)queue_first(&rq->sorted_group_list); - while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group) ) { - + while (!queue_end(&rq->sorted_group_list, (queue_entry_t)search_group)) { if (search_group->weight < group->weight) { /* we should be before this */ search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group); break; - } if (search_group->weight == group->weight) { + } + if (search_group->weight == group->weight) { /* Use group index as a tie breaker */ if (search_group->index < group->index) { search_group = (grrr_group_t)queue_prev((queue_entry_t)search_group); diff --git a/osfmk/kern/sched_multiq.c b/osfmk/kern/sched_multiq.c index fd945ba1c..9b4084777 100644 --- a/osfmk/kern/sched_multiq.c +++ b/osfmk/kern/sched_multiq.c @@ -178,14 +178,14 @@ typedef struct sched_entry { queue_chain_t entry_links; int16_t sched_pri; /* scheduled (current) priority */ int16_t runq; - int32_t pad; + int32_t pad; } *sched_entry_t; typedef run_queue_t entry_queue_t; /* A run queue that holds sched_entries instead of threads */ typedef run_queue_t group_runq_t; /* A run queue that is part of a sched_group */ #define SCHED_ENTRY_NULL ((sched_entry_t) 0) -#define MULTIQ_ERUNQ (-4) /* Indicates entry is on the main runq */ +#define MULTIQ_ERUNQ (-4) /* Indicates entry is on the main runq */ /* Each level in the run queue corresponds to one entry in the entries array */ struct sched_group { @@ -298,7 +298,7 @@ const struct sched_dispatch_table sched_multiq_dispatch = { .pset_init = sched_multiq_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_multiq_choose_thread, - .steal_thread_enabled = FALSE, + .steal_thread_enabled = sched_steal_thread_DISABLED, .steal_thread = sched_multiq_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, @@ -361,13 +361,13 @@ sched_multiq_init(void) } printf("multiq scheduler config: deep-drain %d, ceiling %d, depth limit %d, band limit %d, sanity check %d\n", - deep_drain, drain_ceiling, drain_depth_limit, drain_band_limit, multiq_sanity_check); + deep_drain, drain_ceiling, drain_depth_limit, drain_band_limit, multiq_sanity_check); sched_group_zone = zinit( - sizeof(struct sched_group), - task_max * sizeof(struct sched_group), - PAGE_SIZE, - "sched groups"); + sizeof(struct sched_group), + task_max * sizeof(struct sched_group), + PAGE_SIZE, + "sched groups"); zone_change(sched_group_zone, Z_NOENCRYPT, TRUE); zone_change(sched_group_zone, Z_NOCALLOUT, TRUE); @@ -397,10 +397,11 @@ sched_multiq_pset_init(processor_set_t pset) static sched_mode_t sched_multiq_initial_thread_sched_mode(task_t parent_task) { - if (parent_task == kernel_task) + if (parent_task == kernel_task) { return TH_MODE_FIXED; - else + } else { return TH_MODE_TIMESHARE; + } } sched_group_t @@ -408,8 +409,9 @@ sched_group_create(void) { sched_group_t sched_group; - if (!SCHED(sched_groups_enabled)) + if (!SCHED(sched_groups_enabled)) { return SCHED_GROUP_NULL; + } sched_group = (sched_group_t)zalloc(sched_group_zone); @@ -427,7 +429,7 @@ sched_group_create(void) num_sched_groups++; lck_mtx_unlock(&sched_groups_lock); - return (sched_group); + return sched_group; } void @@ -484,7 +486,7 @@ group_for_entry(sched_entry_t entry) sched_group_t group = (sched_group_t)(entry - entry->sched_pri); #pragma clang diagnostic pop return group; -} +} /* Peek at the head of the runqueue */ static sched_entry_t @@ -553,8 +555,9 @@ entry_queue_check_entry(entry_queue_t runq, sched_entry_t entry, int expected_pr q = &runq->queues[expected_pri]; qe_foreach_element(elem, q, entry_links) { - if (elem == entry) + if (elem == entry) { return; + } } panic("runq %p doesn't contain entry %p at pri %d", runq, entry, expected_pri); @@ -573,8 +576,9 @@ sched_group_check_thread(sched_group_t group, thread_t thread) q = &group->runq.queues[pri]; qe_foreach_element(elem, q, runq_links) { - if (elem == thread) + if (elem == thread) { return; + } } panic("group %p doesn't contain thread %p at pri %d", group, thread, pri); @@ -583,8 +587,9 @@ sched_group_check_thread(sched_group_t group, thread_t thread) static void global_check_entry_queue(entry_queue_t main_entryq) { - if (main_entryq->count == 0) + if (main_entryq->count == 0) { return; + } sched_entry_t entry = entry_queue_first_entry(main_entryq); @@ -605,8 +610,9 @@ global_check_entry_queue(entry_queue_t main_entryq) static void group_check_run_queue(entry_queue_t main_entryq, sched_group_t group) { - if (group->runq.count == 0) + if (group->runq.count == 0) { return; + } thread_t thread = group_first_thread(group); @@ -648,7 +654,7 @@ entry_queue_dequeue_entry(entry_queue_t rq) sched_entry->runq = 0; - return (sched_entry); + return sched_entry; } /* @@ -656,9 +662,9 @@ entry_queue_dequeue_entry(entry_queue_t rq) */ static boolean_t entry_queue_enqueue_entry( - entry_queue_t rq, - sched_entry_t entry, - integer_t options) + entry_queue_t rq, + sched_entry_t entry, + integer_t options) { int sched_pri = entry->sched_pri; queue_t queue = &rq->queues[sched_pri]; @@ -675,19 +681,21 @@ entry_queue_enqueue_entry( result = TRUE; } } else { - if (options & SCHED_TAILQ) + if (options & SCHED_TAILQ) { enqueue_tail(queue, &entry->entry_links); - else + } else { enqueue_head(queue, &entry->entry_links); + } } - if (SCHED(priority_is_urgent)(sched_pri)) + if (SCHED(priority_is_urgent)(sched_pri)) { rq->urgency++; + } SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); rq->count++; entry->runq = MULTIQ_ERUNQ; - return (result); + return result; } /* @@ -695,8 +703,8 @@ entry_queue_enqueue_entry( */ static void entry_queue_remove_entry( - entry_queue_t rq, - sched_entry_t entry) + entry_queue_t rq, + sched_entry_t entry) { int sched_pri = entry->sched_pri; @@ -725,9 +733,9 @@ entry_queue_remove_entry( static void entry_queue_change_entry( - entry_queue_t rq, - sched_entry_t entry, - integer_t options) + entry_queue_t rq, + sched_entry_t entry, + integer_t options) { int sched_pri = entry->sched_pri; queue_t queue = &rq->queues[sched_pri]; @@ -738,10 +746,11 @@ entry_queue_change_entry( } #endif - if (options & SCHED_TAILQ) + if (options & SCHED_TAILQ) { re_queue_tail(queue, &entry->entry_links); - else + } else { re_queue_head(queue, &entry->entry_links); + } } /* * The run queue must not be empty. @@ -750,9 +759,9 @@ entry_queue_change_entry( */ static thread_t group_run_queue_dequeue_thread( - group_runq_t rq, - integer_t *thread_pri, - boolean_t *queue_empty) + group_runq_t rq, + integer_t *thread_pri, + boolean_t *queue_empty) { thread_t thread; queue_t queue = &rq->queues[rq->highq]; @@ -787,10 +796,10 @@ group_run_queue_dequeue_thread( */ static boolean_t group_run_queue_enqueue_thread( - group_runq_t rq, - thread_t thread, - integer_t thread_pri, - integer_t options) + group_runq_t rq, + thread_t thread, + integer_t thread_pri, + integer_t options) { queue_t queue = &rq->queues[thread_pri]; boolean_t result = FALSE; @@ -807,17 +816,19 @@ group_run_queue_enqueue_thread( } result = TRUE; } else { - if (options & SCHED_TAILQ) + if (options & SCHED_TAILQ) { enqueue_tail(queue, &thread->runq_links); - else + } else { enqueue_head(queue, &thread->runq_links); + } } - if (SCHED(priority_is_urgent)(thread_pri)) + if (SCHED(priority_is_urgent)(thread_pri)) { rq->urgency++; + } SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); rq->count++; - return (result); + return result; } /* @@ -826,9 +837,9 @@ group_run_queue_enqueue_thread( */ static boolean_t group_run_queue_remove_thread( - group_runq_t rq, - thread_t thread, - integer_t thread_pri) + group_runq_t rq, + thread_t thread, + integer_t thread_pri) { boolean_t result = FALSE; @@ -920,8 +931,8 @@ sched_global_deep_drain_dequeue_thread(entry_queue_t main_entryq) static thread_t sched_group_dequeue_thread( - entry_queue_t main_entryq, - sched_group_t group) + entry_queue_t main_entryq, + sched_group_t group) { group_runq_t group_runq = &group->runq; boolean_t pri_level_empty = FALSE; @@ -941,9 +952,9 @@ sched_group_dequeue_thread( static void sched_group_remove_thread( - entry_queue_t main_entryq, - sched_group_t group, - thread_t thread) + entry_queue_t main_entryq, + sched_group_t group, + thread_t thread) { integer_t thread_pri = thread->sched_pri; sched_entry_t sched_entry = group_entry_for_pri(group, thread_pri); @@ -974,10 +985,10 @@ sched_group_remove_thread( static void sched_group_enqueue_thread( - entry_queue_t main_entryq, - sched_group_t group, - thread_t thread, - integer_t options) + entry_queue_t main_entryq, + sched_group_t group, + thread_t thread, + integer_t options) { #if defined(MULTIQ_SANITY_CHECK) if (multiq_sanity_check) { @@ -1017,18 +1028,19 @@ sched_group_enqueue_thread( */ static thread_t sched_multiq_choose_thread( - processor_t processor, - int priority, - ast_t reason) + processor_t processor, + int priority, + ast_t reason) { entry_queue_t main_entryq = multiq_main_entryq(processor); run_queue_t bound_runq = multiq_bound_runq(processor); boolean_t choose_bound_runq = FALSE; - if (bound_runq->highq < priority && - main_entryq->highq < priority) + if (bound_runq->highq < priority && + main_entryq->highq < priority) { return THREAD_NULL; + } if (bound_runq->count && main_entryq->count) { if (bound_runq->highq >= main_entryq->highq) { @@ -1041,7 +1053,7 @@ sched_multiq_choose_thread( } else if (main_entryq->count) { /* Use main runq */ } else { - return (THREAD_NULL); + return THREAD_NULL; } if (choose_bound_runq) { @@ -1085,21 +1097,24 @@ sched_multiq_choose_thread( * If there's something elsewhere above the depth limit, * don't pick a thread below the limit. */ - if (global_pri > drain_depth_limit && group_pri <= drain_depth_limit) + if (global_pri > drain_depth_limit && group_pri <= drain_depth_limit) { favor_group = FALSE; + } /* * If there's something at or above the ceiling, * don't favor the group. */ - if (global_pri >= drain_ceiling) + if (global_pri >= drain_ceiling) { favor_group = FALSE; + } /* * Don't go more than X steps below the global highest */ - if ((global_pri - group_pri) >= drain_band_limit) + if ((global_pri - group_pri) >= drain_band_limit) { favor_group = FALSE; + } } if (favor_group) { @@ -1131,9 +1146,9 @@ sched_multiq_choose_thread( */ static boolean_t sched_multiq_processor_enqueue( - processor_t processor, - thread_t thread, - integer_t options) + processor_t processor, + thread_t thread, + integer_t options) { boolean_t result; @@ -1149,12 +1164,12 @@ sched_multiq_processor_enqueue( } sched_group_enqueue_thread(multiq_main_entryq(processor), - thread->sched_group, - thread, options); + thread->sched_group, + thread, options); thread->runq = processor; - return (FALSE); + return FALSE; } /* @@ -1191,7 +1206,7 @@ static boolean_t sched_multiq_processor_queue_empty(processor_t processor) { return multiq_main_entryq(processor)->count == 0 && - multiq_bound_runq(processor)->count == 0; + multiq_bound_runq(processor)->count == 0; } static ast_t @@ -1201,7 +1216,7 @@ sched_multiq_processor_csw_check(processor_t processor) int pri; if (sched_multiq_thread_avoid_processor(processor, current_thread())) { - return (AST_PREEMPT | AST_URGENT); + return AST_PREEMPT | AST_URGENT; } entry_queue_t main_entryq = multiq_main_entryq(processor); @@ -1218,11 +1233,13 @@ sched_multiq_processor_csw_check(processor_t processor) } if (has_higher) { - if (main_entryq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (main_entryq->urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } - if (bound_runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (bound_runq->urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } return AST_PREEMPT; } @@ -1232,19 +1249,20 @@ sched_multiq_processor_csw_check(processor_t processor) static boolean_t sched_multiq_processor_queue_has_priority( - processor_t processor, - int priority, - boolean_t gte) + processor_t processor, + int priority, + boolean_t gte) { run_queue_t main_runq = multiq_main_entryq(processor); run_queue_t bound_runq = multiq_bound_runq(processor); int qpri = MAX(main_runq->highq, bound_runq->highq); - if (gte) + if (gte) { return qpri >= priority; - else + } else { return qpri > priority; + } } static int @@ -1272,10 +1290,11 @@ sched_multiq_runq_stats_count_sum(processor_t processor) uint64_t bound_sum = multiq_bound_runq(processor)->runq_stats.count_sum; - if (processor->cpu_id == processor->processor_set->cpu_set_low) + if (processor->cpu_id == processor->processor_set->cpu_set_low) { return bound_sum + multiq_main_entryq(processor)->runq_stats.count_sum; - else + } else { return bound_sum; + } } static int @@ -1310,7 +1329,6 @@ sched_multiq_processor_queue_shutdown(processor_t processor) pset_unlock(pset); qe_foreach_element_safe(thread, &tqueue, runq_links) { - remqueue(&thread->runq_links); thread_lock(thread); @@ -1329,8 +1347,8 @@ sched_multiq_processor_queue_shutdown(processor_t processor) */ static boolean_t sched_multiq_processor_queue_remove( - processor_t processor, - thread_t thread) + processor_t processor, + thread_t thread) { boolean_t removed = FALSE; processor_set_t pset = processor->processor_set; @@ -1351,8 +1369,8 @@ sched_multiq_processor_queue_remove( thread->runq = PROCESSOR_NULL; } else { sched_group_remove_thread(multiq_main_entryq(processor), - thread->sched_group, - thread); + thread->sched_group, + thread); } removed = TRUE; @@ -1368,7 +1386,7 @@ static thread_t sched_multiq_steal_thread(processor_set_t pset) { pset_unlock(pset); - return (THREAD_NULL); + return THREAD_NULL; } /* @@ -1381,19 +1399,20 @@ sched_multiq_steal_thread(processor_set_t pset) * Returns TRUE if retry is needed. */ static boolean_t -group_scan(entry_queue_t runq, sched_update_scan_context_t scan_context) { +group_scan(entry_queue_t runq, sched_update_scan_context_t scan_context) +{ int count = runq->count; int queue_index; assert(count >= 0); - if (count == 0) + if (count == 0) { return FALSE; + } for (queue_index = bitmap_first(runq->bitmap, NRQS); - queue_index >= 0; - queue_index = bitmap_next(runq->bitmap, queue_index)) { - + queue_index >= 0; + queue_index = bitmap_next(runq->bitmap, queue_index)) { sched_entry_t entry; qe_foreach_element(entry, &runq->queues[queue_index], entry_links) { @@ -1401,14 +1420,15 @@ group_scan(entry_queue_t runq, sched_update_scan_context_t scan_context) { sched_group_t group = group_for_entry(entry); if (group->runq.count > 0) { - if (runq_scan(&group->runq, scan_context)) - return (TRUE); + if (runq_scan(&group->runq, scan_context)) { + return TRUE; + } } count--; } } - return (FALSE); + return FALSE; } static void @@ -1437,8 +1457,9 @@ sched_multiq_thread_update_scan(sched_update_scan_context_t scan_context) pset_unlock(pset); splx(s); - if (restart_needed) + if (restart_needed) { break; + } thread = processor->idle_thread; if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { @@ -1451,7 +1472,6 @@ sched_multiq_thread_update_scan(sched_update_scan_context_t scan_context) /* Ok, we now have a collection of candidates -- fix them. */ thread_update_process_threads(); - } while (restart_needed); pset = &pset0; @@ -1466,13 +1486,13 @@ sched_multiq_thread_update_scan(sched_update_scan_context_t scan_context) pset_unlock(pset); splx(s); - if (restart_needed) + if (restart_needed) { break; + } } while ((pset = pset->pset_list) != NULL); /* Ok, we now have a collection of candidates -- fix them. */ thread_update_process_threads(); - } while (restart_needed); } diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c index 751b57417..e5a3d2e2e 100644 --- a/osfmk/kern/sched_prim.c +++ b/osfmk/kern/sched_prim.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -125,83 +125,86 @@ #include #include -int rt_runq_count(processor_set_t pset) +int +rt_runq_count(processor_set_t pset) { - return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed); + return atomic_load_explicit(&SCHED(rt_runq)(pset)->count, memory_order_relaxed); } -void rt_runq_count_incr(processor_set_t pset) +void +rt_runq_count_incr(processor_set_t pset) { - atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed); + atomic_fetch_add_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed); } -void rt_runq_count_decr(processor_set_t pset) +void +rt_runq_count_decr(processor_set_t pset) { - atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed); + atomic_fetch_sub_explicit(&SCHED(rt_runq)(pset)->count, 1, memory_order_relaxed); } -#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ -int default_preemption_rate = DEFAULT_PREEMPTION_RATE; +#define DEFAULT_PREEMPTION_RATE 100 /* (1/s) */ +int default_preemption_rate = DEFAULT_PREEMPTION_RATE; -#define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */ -int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE; +#define DEFAULT_BG_PREEMPTION_RATE 400 /* (1/s) */ +int default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE; -#define MAX_UNSAFE_QUANTA 800 -int max_unsafe_quanta = MAX_UNSAFE_QUANTA; +#define MAX_UNSAFE_QUANTA 800 +int max_unsafe_quanta = MAX_UNSAFE_QUANTA; -#define MAX_POLL_QUANTA 2 -int max_poll_quanta = MAX_POLL_QUANTA; +#define MAX_POLL_QUANTA 2 +int max_poll_quanta = MAX_POLL_QUANTA; -#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */ -int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; +#define SCHED_POLL_YIELD_SHIFT 4 /* 1/16 */ +int sched_poll_yield_shift = SCHED_POLL_YIELD_SHIFT; -uint64_t max_poll_computation; +uint64_t max_poll_computation; -uint64_t max_unsafe_computation; -uint64_t sched_safe_duration; +uint64_t max_unsafe_computation; +uint64_t sched_safe_duration; #if defined(CONFIG_SCHED_TIMESHARE_CORE) -uint32_t std_quantum; -uint32_t min_std_quantum; -uint32_t bg_quantum; +uint32_t std_quantum; +uint32_t min_std_quantum; +uint32_t bg_quantum; -uint32_t std_quantum_us; -uint32_t bg_quantum_us; +uint32_t std_quantum_us; +uint32_t bg_quantum_us; #endif /* CONFIG_SCHED_TIMESHARE_CORE */ -uint32_t thread_depress_time; -uint32_t default_timeshare_computation; -uint32_t default_timeshare_constraint; +uint32_t thread_depress_time; +uint32_t default_timeshare_computation; +uint32_t default_timeshare_constraint; -uint32_t max_rt_quantum; -uint32_t min_rt_quantum; +uint32_t max_rt_quantum; +uint32_t min_rt_quantum; #if defined(CONFIG_SCHED_TIMESHARE_CORE) -unsigned sched_tick; -uint32_t sched_tick_interval; +unsigned sched_tick; +uint32_t sched_tick_interval; /* Timeshare load calculation interval (15ms) */ -uint32_t sched_load_compute_interval_us = 15000; -uint64_t sched_load_compute_interval_abs; -static _Atomic uint64_t sched_load_compute_deadline; +uint32_t sched_load_compute_interval_us = 15000; +uint64_t sched_load_compute_interval_abs; +static _Atomic uint64_t sched_load_compute_deadline; -uint32_t sched_pri_shifts[TH_BUCKET_MAX]; -uint32_t sched_fixed_shift; +uint32_t sched_pri_shifts[TH_BUCKET_MAX]; +uint32_t sched_fixed_shift; -uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */ +uint32_t sched_decay_usage_age_factor = 1; /* accelerate 5/8^n usage aging */ /* Allow foreground to decay past default to resolve inversions */ #define DEFAULT_DECAY_BAND_LIMIT ((BASEPRI_FOREGROUND - BASEPRI_DEFAULT) + 2) -int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT; +int sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT; /* Defaults for timer deadline profiling */ #define TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT 2000000 /* Timers with deadlines <= - * 2ms */ + * 2ms */ #define TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT 5000000 /* Timers with deadlines - <= 5ms */ + * <= 5ms */ uint64_t timer_deadline_tracking_bin_1; uint64_t timer_deadline_tracking_bin_2; @@ -210,18 +213,18 @@ uint64_t timer_deadline_tracking_bin_2; thread_t sched_maintenance_thread; -#if __arm__ || __arm64__ /* interrupts disabled lock to guard recommended cores state */ -decl_simple_lock_data(static,sched_recommended_cores_lock); -static void sched_recommended_cores_maintenance(void); -static void sched_update_recommended_cores(uint32_t recommended_cores); +decl_simple_lock_data(static, sched_recommended_cores_lock); +static uint64_t usercontrol_requested_recommended_cores = ALL_CORES_RECOMMENDED; +static void sched_update_recommended_cores(uint64_t recommended_cores); +#if __arm__ || __arm64__ +static void sched_recommended_cores_maintenance(void); uint64_t perfcontrol_failsafe_starvation_threshold; extern char *proc_name_address(struct proc *p); - #endif /* __arm__ || __arm64__ */ -uint64_t sched_one_second_interval; +uint64_t sched_one_second_interval; /* Forwards */ @@ -233,24 +236,26 @@ static void preempt_pri_init(void); #endif /* CONFIG_SCHED_TIMESHARE_CORE */ #if CONFIG_SCHED_IDLE_IN_PLACE -static thread_t thread_select_idle( - thread_t thread, - processor_t processor); +static thread_t thread_select_idle( + thread_t thread, + processor_t processor); #endif -thread_t processor_idle( - thread_t thread, - processor_t processor); +thread_t processor_idle( + thread_t thread, + processor_t processor); -ast_t -csw_check_locked( processor_t processor, - processor_set_t pset, - ast_t check_reason); +static ast_t +csw_check_locked( + thread_t thread, + processor_t processor, + processor_set_t pset, + ast_t check_reason); static void processor_setrun( - processor_t processor, - thread_t thread, - integer_t options); + processor_t processor, + thread_t thread, + integer_t options); static void sched_realtime_timebase_init(void); @@ -258,7 +263,7 @@ sched_realtime_timebase_init(void); static void sched_timer_deadline_tracking_init(void); -#if DEBUG +#if DEBUG extern int debug_task; #define TLOG(a, fmt, args...) if(debug_task & a) kprintf(fmt, ## args) #else @@ -267,15 +272,15 @@ extern int debug_task; static processor_t thread_bind_internal( - thread_t thread, - processor_t processor); + thread_t thread, + processor_t processor); static void sched_vm_group_maintenance(void); #if defined(CONFIG_SCHED_TIMESHARE_CORE) -int8_t sched_load_shifts[NRQS]; -bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS)]; +int8_t sched_load_shifts[NRQS]; +bitmap_t sched_preempt_pri[BITMAP_LEN(NRQS)]; #endif /* CONFIG_SCHED_TIMESHARE_CORE */ const struct sched_dispatch_table *sched_current_dispatch = NULL; @@ -306,7 +311,7 @@ static int cpu_throttle_enabled = 1; #if DEBUG -/* Since using the indirect function dispatch table has a negative impact on +/* Since using the indirect function dispatch table has a negative impact on * context switch performance, only allow DEBUG kernels to use that mechanism. */ static void @@ -315,7 +320,7 @@ sched_init_override(void) char sched_arg[SCHED_STRING_MAX_LENGTH] = { '\0' }; /* Check for runtime selection of the scheduler algorithm */ - if (!PE_parse_boot_argn("sched", sched_arg, sizeof (sched_arg))) { + if (!PE_parse_boot_argn("sched", sched_arg, sizeof(sched_arg))) { sched_arg[0] = '\0'; } if (strlen(sched_arg) > 0) { @@ -369,15 +374,15 @@ sched_init(void) if (!PE_parse_boot_argn("sched_pri_decay_limit", &sched_pri_decay_band_limit, sizeof(sched_pri_decay_band_limit))) { /* No boot-args, check in device tree */ if (!PE_get_default("kern.sched_pri_decay_limit", - &sched_pri_decay_band_limit, - sizeof(sched_pri_decay_band_limit))) { + &sched_pri_decay_band_limit, + sizeof(sched_pri_decay_band_limit))) { /* Allow decay all the way to normal limits */ sched_pri_decay_band_limit = DEFAULT_DECAY_BAND_LIMIT; } } kprintf("Setting scheduler priority decay band limit %d\n", sched_pri_decay_band_limit); - + if (PE_parse_boot_argn("sched_debug", &sched_debug_flags, sizeof(sched_debug_flags))) { kprintf("Scheduler: Debug flags 0x%08x\n", sched_debug_flags); } @@ -396,11 +401,11 @@ sched_init(void) void sched_timebase_init(void) { - uint64_t abstime; - + uint64_t abstime; + clock_interval_to_absolutetime_interval(1, NSEC_PER_SEC, &abstime); sched_one_second_interval = abstime; - + SCHED(timebase_init)(); sched_realtime_timebase_init(); } @@ -414,14 +419,16 @@ sched_timeshare_init(void) * Calculate the timeslicing quantum * in us. */ - if (default_preemption_rate < 1) + if (default_preemption_rate < 1) { default_preemption_rate = DEFAULT_PREEMPTION_RATE; + } std_quantum_us = (1000 * 1000) / default_preemption_rate; printf("standard timeslicing quantum is %d us\n", std_quantum_us); - if (default_bg_preemption_rate < 1) + if (default_bg_preemption_rate < 1) { default_bg_preemption_rate = DEFAULT_BG_PREEMPTION_RATE; + } bg_quantum_us = (1000 * 1000) / default_bg_preemption_rate; printf("standard background quantum is %d us\n", bg_quantum_us); @@ -434,12 +441,12 @@ sched_timeshare_init(void) void sched_timeshare_timebase_init(void) { - uint64_t abstime; - uint32_t shift; + uint64_t abstime; + uint32_t shift; /* standard timeslicing quantum */ clock_interval_to_absolutetime_interval( - std_quantum_us, NSEC_PER_USEC, &abstime); + std_quantum_us, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); std_quantum = (uint32_t)abstime; @@ -450,31 +457,33 @@ sched_timeshare_timebase_init(void) /* quantum for background tasks */ clock_interval_to_absolutetime_interval( - bg_quantum_us, NSEC_PER_USEC, &abstime); + bg_quantum_us, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); bg_quantum = (uint32_t)abstime; /* scheduler tick interval */ clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT, - NSEC_PER_USEC, &abstime); + NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); sched_tick_interval = (uint32_t)abstime; /* timeshare load calculation interval & deadline initialization */ clock_interval_to_absolutetime_interval(sched_load_compute_interval_us, NSEC_PER_USEC, &sched_load_compute_interval_abs); - sched_load_compute_deadline = sched_load_compute_interval_abs; - + sched_load_compute_deadline = sched_load_compute_interval_abs; + /* * Compute conversion factor from usage to * timesharing priorities with 5/8 ** n aging. */ abstime = (abstime * 5) / 3; - for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) + for (shift = 0; abstime > BASEPRI_DEFAULT; ++shift) { abstime >>= 1; + } sched_fixed_shift = shift; - for (uint32_t i = 0 ; i < TH_BUCKET_MAX ; i++) + for (uint32_t i = 0; i < TH_BUCKET_MAX; i++) { sched_pri_shifts[i] = INT8_MAX; + } max_unsafe_computation = ((uint64_t)max_unsafe_quanta) * std_quantum; sched_safe_duration = 2 * ((uint64_t)max_unsafe_quanta) * std_quantum; @@ -485,7 +494,7 @@ sched_timeshare_timebase_init(void) default_timeshare_constraint = std_quantum; #if __arm__ || __arm64__ - perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval); + perfcontrol_failsafe_starvation_threshold = (2 * sched_tick_interval); #endif /* __arm__ || __arm64__ */ } @@ -540,10 +549,9 @@ sched_realtime_timebase_init(void) /* maximum rt computation (50 ms) */ clock_interval_to_absolutetime_interval( - 50, 1000*NSEC_PER_USEC, &abstime); + 50, 1000 * NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); max_rt_quantum = (uint32_t)abstime; - } void @@ -560,7 +568,21 @@ sched_thread_should_yield(processor_t processor, thread_t thread) { (void)thread; - return (!SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0); + return !SCHED(processor_queue_empty)(processor) || rt_runq_count(processor->processor_set) > 0; +} + +/* Default implementations of .steal_thread_enabled */ +bool +sched_steal_thread_DISABLED(processor_set_t pset) +{ + (void)pset; + return false; +} + +bool +sched_steal_thread_enabled(processor_set_t pset) +{ + return pset->node->pset_count > 1; } #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -572,16 +594,16 @@ sched_thread_should_yield(processor_t processor, thread_t thread) static void load_shift_init(void) { - int8_t k, *p = sched_load_shifts; - uint32_t i, j; + int8_t k, *p = sched_load_shifts; + uint32_t i, j; - uint32_t sched_decay_penalty = 1; + uint32_t sched_decay_penalty = 1; - if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof (sched_decay_penalty))) { + if (PE_parse_boot_argn("sched_decay_penalty", &sched_decay_penalty, sizeof(sched_decay_penalty))) { kprintf("Overriding scheduler decay penalty %u\n", sched_decay_penalty); } - if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof (sched_decay_usage_age_factor))) { + if (PE_parse_boot_argn("sched_decay_usage_age_factor", &sched_decay_usage_age_factor, sizeof(sched_decay_usage_age_factor))) { kprintf("Overriding scheduler decay usage age factor %u\n", sched_decay_usage_age_factor); } @@ -609,8 +631,9 @@ load_shift_init(void) * array entries to be filled with smaller "k" values */ for (i = 2, j = 1 << sched_decay_penalty, k = 1; i < NRQS; ++k) { - for (j <<= 1; (i < j) && (i < NRQS); ++i) + for (j <<= 1; (i < j) && (i < NRQS); ++i) { *p++ = k; + } } } @@ -619,11 +642,13 @@ preempt_pri_init(void) { bitmap_t *p = sched_preempt_pri; - for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) + for (int i = BASEPRI_FOREGROUND; i < MINPRI_KERNEL; ++i) { bitmap_set(p, i); + } - for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) + for (int i = BASEPRI_PREEMPT; i <= MAXPRI; ++i) { bitmap_set(p, i); + } } #endif /* CONFIG_SCHED_TIMESHARE_CORE */ @@ -633,11 +658,11 @@ preempt_pri_init(void) */ void thread_timer_expire( - void *p0, - __unused void *p1) + void *p0, + __unused void *p1) { - thread_t thread = p0; - spl_t s; + thread_t thread = p0; + spl_t s; assert_thread_magic(thread); @@ -666,13 +691,13 @@ thread_timer_expire( */ boolean_t thread_unblock( - thread_t thread, - wait_result_t wresult) + thread_t thread, + wait_result_t wresult) { - boolean_t ready_for_runq = FALSE; - thread_t cthread = current_thread(); - uint32_t new_run_count; - int old_thread_state; + boolean_t ready_for_runq = FALSE; + thread_t cthread = current_thread(); + uint32_t new_run_count; + int old_thread_state; /* * Set wait_result. @@ -683,8 +708,9 @@ thread_unblock( * Cancel pending wait timer. */ if (thread->wait_timer_is_set) { - if (timer_call_cancel(&thread->wait_timer)) + if (timer_call_cancel(&thread->wait_timer)) { thread->wait_timer_active--; + } thread->wait_timer_is_set = FALSE; } @@ -694,7 +720,7 @@ thread_unblock( */ old_thread_state = thread->state; thread->state = (old_thread_state | TH_RUN) & - ~(TH_WAIT|TH_UNINT|TH_WAIT_REPORT); + ~(TH_WAIT | TH_UNINT | TH_WAIT_REPORT); if ((old_thread_state & TH_RUN) == 0) { uint64_t ctime = mach_approximate_time(); @@ -716,10 +742,11 @@ thread_unblock( */ #if CONFIG_SCHED_IDLE_IN_PLACE if (thread->state & TH_IDLE) { - processor_t processor = thread->last_processor; + processor_t processor = thread->last_processor; - if (processor != current_processor()) + if (processor != current_processor()) { machine_signal_idle(processor); + } } #else assert((thread->state & TH_IDLE) == 0); @@ -765,30 +792,29 @@ thread_unblock( uint64_t ttd = PROCESSOR_DATA(current_processor(), timer_call_ttd); if (ttd) { - if (ttd <= timer_deadline_tracking_bin_1) + if (ttd <= timer_deadline_tracking_bin_1) { thread->thread_timer_wakeups_bin_1++; - else - if (ttd <= timer_deadline_tracking_bin_2) - thread->thread_timer_wakeups_bin_2++; + } else if (ttd <= timer_deadline_tracking_bin_2) { + thread->thread_timer_wakeups_bin_2++; + } } ledger_credit_thread(thread, thread->t_ledger, - task_ledgers.interrupt_wakeups, 1); + task_ledgers.interrupt_wakeups, 1); if (pidle) { ledger_credit_thread(thread, thread->t_ledger, - task_ledgers.platform_idle_wakeups, 1); + task_ledgers.platform_idle_wakeups, 1); } - } else if (thread_get_tag_internal(cthread) & THREAD_TAG_CALLOUT) { /* TODO: what about an interrupt that does a wake taken on a callout thread? */ if (cthread->callout_woken_from_icontext) { ledger_credit_thread(thread, thread->t_ledger, - task_ledgers.interrupt_wakeups, 1); + task_ledgers.interrupt_wakeups, 1); thread->thread_callout_interrupt_wakeups++; if (cthread->callout_woken_from_platform_idle) { ledger_credit_thread(thread, thread->t_ledger, - task_ledgers.platform_idle_wakeups, 1); + task_ledgers.platform_idle_wakeups, 1); thread->thread_callout_platform_idle_wakeups++; } @@ -809,13 +835,13 @@ thread_unblock( #endif /* KPERF */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result, - sched_run_buckets[TH_BUCKET_RUN], 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), thread->sched_pri, thread->wait_result, + sched_run_buckets[TH_BUCKET_RUN], 0); DTRACE_SCHED2(wakeup, struct thread *, thread, struct proc *, thread->task->bsd_info); - return (ready_for_runq); + return ready_for_runq; } /* @@ -833,8 +859,8 @@ thread_unblock( */ kern_return_t thread_go( - thread_t thread, - wait_result_t wresult) + thread_t thread, + wait_result_t wresult) { assert_thread_magic(thread); @@ -842,19 +868,19 @@ thread_go( assert(thread->wait_event == NO_EVENT64); assert(thread->waitq == NULL); - assert(!(thread->state & (TH_TERMINATE|TH_TERMINATE2))); + assert(!(thread->state & (TH_TERMINATE | TH_TERMINATE2))); assert(thread->state & TH_WAIT); if (thread_unblock(thread, wresult)) { -#if SCHED_TRACE_THREAD_WAKEUPS +#if SCHED_TRACE_THREAD_WAKEUPS backtrace(&thread->thread_wakeup_bt[0], - (sizeof(thread->thread_wakeup_bt)/sizeof(uintptr_t))); + (sizeof(thread->thread_wakeup_bt) / sizeof(uintptr_t))); #endif thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -869,13 +895,13 @@ thread_go( __private_extern__ wait_result_t thread_mark_wait_locked( - thread_t thread, - wait_interrupt_t interruptible_orig) + thread_t thread, + wait_interrupt_t interruptible_orig) { - boolean_t at_safe_point; - wait_interrupt_t interruptible = interruptible_orig; + boolean_t at_safe_point; + wait_interrupt_t interruptible = interruptible_orig; - assert(!(thread->state & (TH_WAIT|TH_IDLE|TH_UNINT|TH_TERMINATE2|TH_WAIT_REPORT))); + assert(!(thread->state & (TH_WAIT | TH_IDLE | TH_UNINT | TH_TERMINATE2 | TH_WAIT_REPORT))); /* * The thread may have certain types of interrupts/aborts masked @@ -884,18 +910,19 @@ thread_mark_wait_locked( * not be able to handle aborts at the moment). */ interruptible &= TH_OPT_INTMASK; - if (interruptible > (thread->options & TH_OPT_INTMASK)) + if (interruptible > (thread->options & TH_OPT_INTMASK)) { interruptible = thread->options & TH_OPT_INTMASK; + } at_safe_point = (interruptible == THREAD_ABORTSAFE); - if ( interruptible == THREAD_UNINT || - !(thread->sched_flags & TH_SFLAG_ABORT) || - (!at_safe_point && - (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) { - - if ( !(thread->state & TH_TERMINATE)) + if (interruptible == THREAD_UNINT || + !(thread->sched_flags & TH_SFLAG_ABORT) || + (!at_safe_point && + (thread->sched_flags & TH_SFLAG_ABORTSAFELY))) { + if (!(thread->state & TH_TERMINATE)) { DTRACE_SCHED(sleep); + } int state_bits = TH_WAIT; if (!interruptible) { @@ -919,14 +946,15 @@ thread_mark_wait_locked( thread->block_hint = thread->pending_block_hint; thread->pending_block_hint = kThreadWaitNone; - return (thread->wait_result = THREAD_WAITING); + return thread->wait_result = THREAD_WAITING; } else { - if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) + if (thread->sched_flags & TH_SFLAG_ABORTSAFELY) { thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; + } } thread->pending_block_hint = kThreadWaitNone; - return (thread->wait_result = THREAD_INTERRUPTED); + return thread->wait_result = THREAD_INTERRUPTED; } /* @@ -942,7 +970,7 @@ thread_mark_wait_locked( * Returns: * The old interrupt level for the thread. */ -__private_extern__ +__private_extern__ wait_interrupt_t thread_interrupt_level( wait_interrupt_t new_level) @@ -963,15 +991,16 @@ thread_interrupt_level( */ wait_result_t assert_wait( - event_t event, - wait_interrupt_t interruptible) + event_t event, + wait_interrupt_t interruptible) { - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(event), 0, 0, 0, 0); struct waitq *waitq; waitq = global_eventq(event); @@ -985,25 +1014,26 @@ assert_wait( */ struct waitq * assert_wait_queue( - event_t event) + event_t event) { return global_eventq(event); } wait_result_t assert_wait_timeout( - event_t event, - wait_interrupt_t interruptible, - uint32_t interval, - uint32_t scale_factor) + event_t event, + wait_interrupt_t interruptible, + uint32_t interval, + uint32_t scale_factor) { - thread_t thread = current_thread(); - wait_result_t wresult; - uint64_t deadline; - spl_t s; + thread_t thread = current_thread(); + wait_result_t wresult; + uint64_t deadline; + spl_t s; - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } struct waitq *waitq; waitq = global_eventq(event); @@ -1014,14 +1044,14 @@ assert_wait_timeout( clock_interval_to_deadline(interval, scale_factor, &deadline); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), - interruptible, - TIMEOUT_URGENCY_SYS_NORMAL, - deadline, TIMEOUT_NO_LEEWAY, - thread); + interruptible, + TIMEOUT_URGENCY_SYS_NORMAL, + deadline, TIMEOUT_NO_LEEWAY, + thread); waitq_unlock(waitq); splx(s); @@ -1030,23 +1060,24 @@ assert_wait_timeout( wait_result_t assert_wait_timeout_with_leeway( - event_t event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint32_t interval, - uint32_t leeway, - uint32_t scale_factor) -{ - thread_t thread = current_thread(); - wait_result_t wresult; - uint64_t deadline; - uint64_t abstime; - uint64_t slop; - uint64_t now; - spl_t s; - - if (__improbable(event == NO_EVENT)) + event_t event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint32_t interval, + uint32_t leeway, + uint32_t scale_factor) +{ + thread_t thread = current_thread(); + wait_result_t wresult; + uint64_t deadline; + uint64_t abstime; + uint64_t slop; + uint64_t now; + spl_t s; + + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } now = mach_absolute_time(); clock_interval_to_absolutetime_interval(interval, scale_factor, &abstime); @@ -1061,13 +1092,13 @@ assert_wait_timeout_with_leeway( waitq_lock(waitq); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), - interruptible, - urgency, deadline, slop, - thread); + interruptible, + urgency, deadline, slop, + thread); waitq_unlock(waitq); splx(s); @@ -1076,16 +1107,17 @@ assert_wait_timeout_with_leeway( wait_result_t assert_wait_deadline( - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline) + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline) { - thread_t thread = current_thread(); - wait_result_t wresult; - spl_t s; + thread_t thread = current_thread(); + wait_result_t wresult; + spl_t s; - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } struct waitq *waitq; waitq = global_eventq(event); @@ -1094,13 +1126,13 @@ assert_wait_deadline( waitq_lock(waitq); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), - interruptible, - TIMEOUT_URGENCY_SYS_NORMAL, deadline, - TIMEOUT_NO_LEEWAY, thread); + interruptible, + TIMEOUT_URGENCY_SYS_NORMAL, deadline, + TIMEOUT_NO_LEEWAY, thread); waitq_unlock(waitq); splx(s); return wresult; @@ -1108,18 +1140,19 @@ assert_wait_deadline( wait_result_t assert_wait_deadline_with_leeway( - event_t event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint64_t deadline, - uint64_t leeway) + event_t event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway) { - thread_t thread = current_thread(); - wait_result_t wresult; - spl_t s; + thread_t thread = current_thread(); + wait_result_t wresult; + spl_t s; - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } struct waitq *waitq; waitq = global_eventq(event); @@ -1128,13 +1161,13 @@ assert_wait_deadline_with_leeway( waitq_lock(waitq); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT)|DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_WAIT) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(event), interruptible, deadline, 0, 0); wresult = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), - interruptible, - urgency, deadline, leeway, - thread); + interruptible, + urgency, deadline, leeway, + thread); waitq_unlock(waitq); splx(s); return wresult; @@ -1147,7 +1180,7 @@ assert_wait_deadline_with_leeway( * is needed to pull it out of userspace execution, or if executing in * the kernel, bring to a context switch boundary that would cause * thread state to be serialized in the thread PCB. - * + * * Thread locked, returns the same way. While locked, fields * like "state" cannot change. "runq" can change only from set to unset. */ @@ -1155,20 +1188,23 @@ static inline boolean_t thread_isoncpu(thread_t thread) { /* Not running or runnable */ - if (!(thread->state & TH_RUN)) - return (FALSE); + if (!(thread->state & TH_RUN)) { + return FALSE; + } /* Waiting on a runqueue, not currently running */ /* TODO: This is invalid - it can get dequeued without thread lock, but not context switched. */ - if (thread->runq != PROCESSOR_NULL) - return (FALSE); + if (thread->runq != PROCESSOR_NULL) { + return FALSE; + } /* * Thread does not have a stack yet * It could be on the stack alloc queue or preparing to be invoked */ - if (!thread->kernel_stack) - return (FALSE); + if (!thread->kernel_stack) { + return FALSE; + } /* * Thread must be running on a processor, or @@ -1178,7 +1214,7 @@ thread_isoncpu(thread_t thread) * of userspace and the processor has * context switched (and saved register state). */ - return (TRUE); + return TRUE; } /* @@ -1197,12 +1233,12 @@ thread_isoncpu(thread_t thread) */ boolean_t thread_stop( - thread_t thread, - boolean_t until_not_runnable) + thread_t thread, + boolean_t until_not_runnable) { - wait_result_t wresult; - spl_t s = splsched(); - boolean_t oncpu; + wait_result_t wresult; + spl_t s = splsched(); + boolean_t oncpu; wake_lock(thread); thread_lock(thread); @@ -1215,11 +1251,13 @@ thread_stop( wake_unlock(thread); splx(s); - if (wresult == THREAD_WAITING) + if (wresult == THREAD_WAITING) { wresult = thread_block(THREAD_CONTINUE_NULL); + } - if (wresult != THREAD_AWAKENED) - return (FALSE); + if (wresult != THREAD_AWAKENED) { + return FALSE; + } s = splsched(); wake_lock(thread); @@ -1229,9 +1267,9 @@ thread_stop( thread->state |= TH_SUSP; while ((oncpu = thread_isoncpu(thread)) || - (until_not_runnable && (thread->state & TH_RUN))) { - processor_t processor; - + (until_not_runnable && (thread->state & TH_RUN))) { + processor_t processor; + if (oncpu) { assert(thread->state & TH_RUN); processor = thread->chosen_processor; @@ -1245,12 +1283,13 @@ thread_stop( wake_unlock(thread); splx(s); - if (wresult == THREAD_WAITING) + if (wresult == THREAD_WAITING) { wresult = thread_block(THREAD_CONTINUE_NULL); + } if (wresult != THREAD_AWAKENED) { thread_unstop(thread); - return (FALSE); + return FALSE; } s = splsched(); @@ -1261,7 +1300,7 @@ thread_stop( thread_unlock(thread); wake_unlock(thread); splx(s); - + /* * We return with the thread unlocked. To prevent it from * transitioning to a runnable state (or from TH_RUN to @@ -1269,7 +1308,7 @@ thread_stop( * is stopped via an external means (such as an AST) */ - return (TRUE); + return TRUE; } /* @@ -1282,14 +1321,14 @@ thread_stop( */ void thread_unstop( - thread_t thread) + thread_t thread) { - spl_t s = splsched(); + spl_t s = splsched(); wake_lock(thread); thread_lock(thread); - assert((thread->state & (TH_RUN|TH_WAIT|TH_SUSP)) != TH_SUSP); + assert((thread->state & (TH_RUN | TH_WAIT | TH_SUSP)) != TH_SUSP); if (thread->state & TH_SUSP) { thread->state &= ~TH_SUSP; @@ -1319,13 +1358,13 @@ thread_unstop( */ void thread_wait( - thread_t thread, - boolean_t until_not_runnable) + thread_t thread, + boolean_t until_not_runnable) { - wait_result_t wresult; - boolean_t oncpu; - processor_t processor; - spl_t s = splsched(); + wait_result_t wresult; + boolean_t oncpu; + processor_t processor; + spl_t s = splsched(); wake_lock(thread); thread_lock(thread); @@ -1334,12 +1373,11 @@ thread_wait( * Wait until not running on a CPU. If stronger requirement * desired, wait until not runnable. Assumption: if thread is * on CPU, then TH_RUN is set, so we're not waiting in any case - * where the original, pure "TH_RUN" check would have let us + * where the original, pure "TH_RUN" check would have let us * finish. */ while ((oncpu = thread_isoncpu(thread)) || - (until_not_runnable && (thread->state & TH_RUN))) { - + (until_not_runnable && (thread->state & TH_RUN))) { if (oncpu) { assert(thread->state & TH_RUN); processor = thread->chosen_processor; @@ -1353,8 +1391,9 @@ thread_wait( wake_unlock(thread); splx(s); - if (wresult == THREAD_WAITING) + if (wresult == THREAD_WAITING) { thread_block(THREAD_CONTINUE_NULL); + } s = splsched(); wake_lock(thread); @@ -1384,40 +1423,44 @@ thread_wait( */ __private_extern__ kern_return_t clear_wait_internal( - thread_t thread, - wait_result_t wresult) + thread_t thread, + wait_result_t wresult) { - uint32_t i = LockTimeOutUsec; + uint32_t i = LockTimeOutUsec; struct waitq *waitq = thread->waitq; - + do { - if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) - return (KERN_FAILURE); + if (wresult == THREAD_INTERRUPTED && (thread->state & TH_UNINT)) { + return KERN_FAILURE; + } if (waitq != NULL) { if (!waitq_pull_thread_locked(waitq, thread)) { thread_unlock(thread); delay(1); - if (i > 0 && !machine_timeout_suspended()) + if (i > 0 && !machine_timeout_suspended()) { i--; + } thread_lock(thread); - if (waitq != thread->waitq) + if (waitq != thread->waitq) { return KERN_NOT_WAITING; + } continue; } } /* TODO: Can we instead assert TH_TERMINATE is not set? */ - if ((thread->state & (TH_WAIT|TH_TERMINATE)) == TH_WAIT) - return (thread_go(thread, wresult)); - else - return (KERN_NOT_WAITING); + if ((thread->state & (TH_WAIT | TH_TERMINATE)) == TH_WAIT) { + return thread_go(thread, wresult); + } else { + return KERN_NOT_WAITING; + } } while (i > 0); panic("clear_wait_internal: deadlock: thread=%p, wq=%p, cpu=%d\n", - thread, waitq, cpu_number()); + thread, waitq, cpu_number()); - return (KERN_FAILURE); + return KERN_FAILURE; } @@ -1433,11 +1476,11 @@ clear_wait_internal( */ kern_return_t clear_wait( - thread_t thread, - wait_result_t result) + thread_t thread, + wait_result_t result) { kern_return_t ret; - spl_t s; + spl_t s; s = splsched(); thread_lock(thread); @@ -1457,19 +1500,21 @@ clear_wait( */ kern_return_t thread_wakeup_prim( - event_t event, - boolean_t one_thread, - wait_result_t result) + event_t event, + boolean_t one_thread, + wait_result_t result) { - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } struct waitq *wq = global_eventq(event); - if (one_thread) + if (one_thread) { return waitq_wakeup64_one(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES); - else + } else { return waitq_wakeup64_all(wq, CAST_EVENT64_T(event), result, WAITQ_ALL_PRIORITIES); + } } /* @@ -1477,14 +1522,16 @@ thread_wakeup_prim( */ kern_return_t thread_wakeup_thread( - event_t event, - thread_t thread) + event_t event, + thread_t thread) { - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } - if (__improbable(thread == THREAD_NULL)) + if (__improbable(thread == THREAD_NULL)) { panic("%s() called with THREAD_NULL", __func__); + } struct waitq *wq = global_eventq(event); @@ -1498,11 +1545,12 @@ thread_wakeup_thread( */ kern_return_t thread_wakeup_one_with_pri( - event_t event, - int priority) + event_t event, + int priority) { - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } struct waitq *wq = global_eventq(event); @@ -1518,10 +1566,11 @@ thread_wakeup_one_with_pri( */ thread_t thread_wakeup_identify(event_t event, - int priority) + int priority) { - if (__improbable(event == NO_EVENT)) + if (__improbable(event == NO_EVENT)) { panic("%s() called with NO_EVENT", __func__); + } struct waitq *wq = global_eventq(event); @@ -1541,11 +1590,11 @@ thread_wakeup_identify(event_t event, */ processor_t thread_bind( - processor_t processor) + processor_t processor) { - thread_t self = current_thread(); - processor_t prev; - spl_t s; + thread_t self = current_thread(); + processor_t prev; + spl_t s; s = splsched(); thread_lock(self); @@ -1555,7 +1604,7 @@ thread_bind( thread_unlock(self); splx(s); - return (prev); + return prev; } /* @@ -1575,10 +1624,10 @@ thread_bind( static processor_t thread_bind_internal( - thread_t thread, - processor_t processor) + thread_t thread, + processor_t processor) { - processor_t prev; + processor_t prev; /* */ assert(thread->sched_pri < BASEPRI_RTQUEUES); @@ -1590,7 +1639,7 @@ thread_bind_internal( prev = thread->bound_processor; thread->bound_processor = processor; - return (prev); + return prev; } /* @@ -1622,7 +1671,7 @@ thread_bind_internal( * memorystatus_thread (95) */ #define MAX_VM_BIND_GROUP_COUNT (5) -decl_simple_lock_data(static,sched_vm_group_list_lock); +decl_simple_lock_data(static, sched_vm_group_list_lock); static thread_t sched_vm_group_thread_list[MAX_VM_BIND_GROUP_COUNT]; static int sched_vm_group_thread_count; static boolean_t sched_vm_group_temporarily_unbound = FALSE; @@ -1635,7 +1684,7 @@ thread_vm_bind_group_add(void) thread_reference_internal(self); self->options |= TH_OPT_SCHED_VM_GROUP; - simple_lock(&sched_vm_group_list_lock); + simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL); assert(sched_vm_group_thread_count < MAX_VM_BIND_GROUP_COUNT); sched_vm_group_thread_list[sched_vm_group_thread_count++] = self; simple_unlock(&sched_vm_group_list_lock); @@ -1659,15 +1708,15 @@ sched_vm_group_maintenance(void) processor_t bind_target = PROCESSOR_NULL; /* Make sure nobody attempts to add new threads while we are enumerating them */ - simple_lock(&sched_vm_group_list_lock); + simple_lock(&sched_vm_group_list_lock, LCK_GRP_NULL); s = splsched(); - for (i=0; i < sched_vm_group_thread_count; i++) { + for (i = 0; i < sched_vm_group_thread_count; i++) { thread_t thread = sched_vm_group_thread_list[i]; assert(thread != THREAD_NULL); thread_lock(thread); - if ((thread->state & (TH_RUN|TH_WAIT)) == TH_RUN) { + if ((thread->state & (TH_RUN | TH_WAIT)) == TH_RUN) { if (thread->runq != PROCESSOR_NULL && thread->last_made_runnable_time < longtime) { high_latency_observed = TRUE; } else if (thread->runq == PROCESSOR_NULL) { @@ -1711,7 +1760,7 @@ sched_vm_group_maintenance(void) if (bind_target_changed) { s = splsched(); - for (i=0; i < sched_vm_group_thread_count; i++) { + for (i = 0; i < sched_vm_group_thread_count; i++) { thread_t thread = sched_vm_group_thread_list[i]; boolean_t removed; assert(thread != THREAD_NULL); @@ -1759,36 +1808,39 @@ int sched_smt_balance = 1; #if __SMP__ /* Invoked with pset locked, returns with pset unlocked */ void -sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) { +sched_SMT_balance(processor_t cprocessor, processor_set_t cpset) +{ processor_t ast_processor = NULL; #if (DEVELOPMENT || DEBUG) - if (__improbable(sched_smt_balance == 0)) + if (__improbable(sched_smt_balance == 0)) { goto smt_balance_exit; + } #endif - + assert(cprocessor == current_processor()); - if (cprocessor->is_SMT == FALSE) + if (cprocessor->is_SMT == FALSE) { goto smt_balance_exit; + } processor_t sib_processor = cprocessor->processor_secondary ? cprocessor->processor_secondary : cprocessor->processor_primary; /* Determine if both this processor and its sibling are idle, * indicating an SMT rebalancing opportunity. */ - if (sib_processor->state != PROCESSOR_IDLE) + if (sib_processor->state != PROCESSOR_IDLE) { goto smt_balance_exit; + } processor_t sprocessor; sched_ipi_type_t ipi_type = SCHED_IPI_NONE; uint64_t running_secondary_map = (cpset->cpu_state_map[PROCESSOR_RUNNING] & - ~cpset->primary_map); + ~cpset->primary_map); for (int cpuid = lsb_first(running_secondary_map); cpuid >= 0; cpuid = lsb_next(running_secondary_map, cpuid)) { sprocessor = processor_array[cpuid]; if ((sprocessor->processor_primary->state == PROCESSOR_RUNNING) && (sprocessor->current_pri < BASEPRI_RTQUEUES)) { - ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL); if (ipi_type != SCHED_IPI_NONE) { assert(sprocessor != cprocessor); @@ -1815,9 +1867,35 @@ sched_SMT_balance(__unused processor_t cprocessor, processor_set_t cpset) } #endif /* __SMP__ */ +/* + * Called with pset locked, on a processor that is committing to run a new thread + * Will transition an idle or dispatching processor to running as it picks up + * the first new thread from the idle thread. + */ +static void +pset_commit_processor_to_new_thread(processor_set_t pset, processor_t processor, thread_t new_thread) +{ + if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) { + assert(current_thread() == processor->idle_thread); + + /* + * Dispatching processor is now committed to running new_thread, + * so change its state to PROCESSOR_RUNNING. + */ + pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); + } else { + assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN)); + } + + processor_state_update_from_thread(processor, new_thread); +} + static processor_t choose_processor_for_realtime_thread(processor_set_t pset); static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset); +static bool these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map); +static bool sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor); int sched_allow_rt_smt = 1; +int sched_avoid_cpu0 = 1; /* * thread_select: @@ -1828,26 +1906,38 @@ int sched_allow_rt_smt = 1; */ static thread_t thread_select(thread_t thread, - processor_t processor, - ast_t *reason) + processor_t processor, + ast_t *reason) { - processor_set_t pset = processor->processor_set; - thread_t new_thread = THREAD_NULL; + processor_set_t pset = processor->processor_set; + thread_t new_thread = THREAD_NULL; assert(processor == current_processor()); - assert((thread->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN); + assert((thread->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN); do { /* * Update the priority. */ - if (SCHED(can_update_priority)(thread)) + if (SCHED(can_update_priority)(thread)) { SCHED(update_priority)(thread); - - processor_state_update_from_thread(processor, thread); + } pset_lock(pset); + processor_state_update_from_thread(processor, thread); + +restart: + /* Acknowledge any pending IPIs here with pset lock held */ + bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + +#if defined(CONFIG_SCHED_DEFERRED_AST) + bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id); +#endif + + bool secondary_can_only_run_realtime_thread = false; + assert(processor->state != PROCESSOR_OFF_LINE); if (!processor->is_recommended) { @@ -1871,7 +1961,12 @@ thread_select(thread_t thread, if ((pset->recommended_bitmask & pset->primary_map & pset->cpu_state_map[PROCESSOR_IDLE]) != 0) { goto idle; } - + + /* + * TODO: What if a secondary core beat an idle primary to waking up from an IPI? + * Should it dequeue immediately, or spin waiting for the primary to wake up? + */ + /* There are no idle primaries */ if (processor->processor_primary->current_pri >= BASEPRI_RTQUEUES) { @@ -1879,6 +1974,7 @@ thread_select(thread_t thread, if (!secondary_can_run_realtime_thread) { goto idle; } + secondary_can_only_run_realtime_thread = true; } } } @@ -1895,19 +1991,25 @@ thread_select(thread_t thread, */ /* i.e. not waiting, not TH_SUSP'ed */ - boolean_t still_running = ((thread->state & (TH_TERMINATE|TH_IDLE|TH_WAIT|TH_RUN|TH_SUSP)) == TH_RUN); + bool still_running = ((thread->state & (TH_TERMINATE | TH_IDLE | TH_WAIT | TH_RUN | TH_SUSP)) == TH_RUN); /* * Threads running on SMT processors are forced to context switch. Don't rebalance realtime threads. * TODO: This should check if it's worth it to rebalance, i.e. 'are there any idle primary processors' + * + * + * A yielding thread shouldn't be forced to context switch. */ - boolean_t needs_smt_rebalance = (thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor); - boolean_t affinity_mismatch = (thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset); + bool is_yielding = (*reason & AST_YIELD) == AST_YIELD; + + bool needs_smt_rebalance = !is_yielding && thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_primary != processor; - boolean_t bound_elsewhere = (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor); + bool affinity_mismatch = thread->affinity_set != AFFINITY_SET_NULL && thread->affinity_set->aset_pset != pset; - boolean_t avoid_processor = (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)); + bool bound_elsewhere = thread->bound_processor != PROCESSOR_NULL && thread->bound_processor != processor; + + bool avoid_processor = !is_yielding && SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread); if (still_running && !needs_smt_rebalance && !affinity_mismatch && !bound_elsewhere && !avoid_processor) { /* @@ -1918,19 +2020,17 @@ thread_select(thread_t thread, */ if (thread->sched_pri >= BASEPRI_RTQUEUES && processor->first_timeslice) { if (rt_runq_count(pset) > 0) { - rt_lock_lock(pset); - + if (rt_runq_count(pset) > 0) { - - thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); - - if (next_rt->realtime.deadline < processor->deadline && - (next_rt->bound_processor == PROCESSOR_NULL || - next_rt->bound_processor == processor)) { - /* The next RT thread is better, so pick it off the runqueue. */ - goto pick_new_rt_thread; - } + thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); + + if (next_rt->realtime.deadline < processor->deadline && + (next_rt->bound_processor == PROCESSOR_NULL || + next_rt->bound_processor == processor)) { + /* The next RT thread is better, so pick it off the runqueue. */ + goto pick_new_rt_thread; + } } rt_lock_unlock(pset); @@ -1947,6 +2047,9 @@ thread_select(thread_t thread, if (rt_runq_count(pset) > 0) { next_rt_processor = choose_processor_for_realtime_thread(pset); if (next_rt_processor) { + if (next_rt_processor->state == PROCESSOR_IDLE) { + pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING); + } next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT); } } @@ -1956,7 +2059,7 @@ thread_select(thread_t thread, sched_ipi_perform(next_rt_processor, next_rt_ipi_type); } - return (thread); + return thread; } if ((rt_runq_count(pset) == 0) && @@ -1967,27 +2070,28 @@ thread_select(thread_t thread, sched_update_pset_load_average(pset); pset_unlock(pset); - return (thread); + return thread; } } else { /* * This processor must context switch. * If it's due to a rebalance, we should aggressively find this thread a new home. */ - if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) - *reason |= AST_REBALANCE; + if (needs_smt_rebalance || affinity_mismatch || bound_elsewhere || avoid_processor) { + *reason |= AST_REBALANCE; + } } /* OK, so we're not going to run the current thread. Look at the RT queue. */ - if (rt_runq_count(pset) > 0) { - + bool ok_to_run_realtime_thread = sched_ok_to_run_realtime_thread(pset, processor); + if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) { rt_lock_lock(pset); - if (rt_runq_count(pset) > 0) { + if ((rt_runq_count(pset) > 0) && ok_to_run_realtime_thread) { thread_t next_rt = qe_queue_first(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); if (__probable((next_rt->bound_processor == PROCESSOR_NULL || - (next_rt->bound_processor == processor)))) { + (next_rt->bound_processor == processor)))) { pick_new_rt_thread: new_thread = qe_dequeue_head(&SCHED(rt_runq)(pset)->queue, struct thread, runq_links); @@ -1996,7 +2100,8 @@ pick_new_rt_thread: rt_runq_count_decr(pset); processor->deadline = new_thread->realtime.deadline; - processor_state_update_from_thread(processor, new_thread); + + pset_commit_processor_to_new_thread(pset, processor, new_thread); rt_lock_unlock(pset); sched_update_pset_load_average(pset); @@ -2016,6 +2121,9 @@ pick_new_rt_thread: if (rt_runq_count(pset) > 0) { next_rt_processor = choose_processor_for_realtime_thread(pset); if (next_rt_processor) { + if (next_rt_processor->state == PROCESSOR_IDLE) { + pset_update_processor_state(pset, next_rt_processor, PROCESSOR_DISPATCHING); + } next_rt_ipi_type = sched_ipi_action(next_rt_processor, NULL, false, SCHED_IPI_EVENT_PREEMPT); } } @@ -2029,52 +2137,97 @@ pick_new_rt_thread: sched_ipi_perform(next_rt_processor, next_rt_ipi_type); } - return (new_thread); + return new_thread; } } rt_lock_unlock(pset); } + if (secondary_can_only_run_realtime_thread) { + goto idle; + } processor->deadline = UINT64_MAX; /* No RT threads, so let's look at the regular threads. */ if ((new_thread = SCHED(choose_thread)(processor, MINPRI, *reason)) != THREAD_NULL) { sched_update_pset_load_average(pset); - processor_state_update_from_thread(processor, new_thread); + + pset_commit_processor_to_new_thread(pset, processor, new_thread); + + processor_t ast_processor = PROCESSOR_NULL; + sched_ipi_type_t ipi_type = SCHED_IPI_NONE; + + processor_t sprocessor = processor->processor_secondary; + if ((sprocessor != NULL) && (sprocessor->state == PROCESSOR_RUNNING)) { + if (thread_no_smt(new_thread)) { + ipi_type = sched_ipi_action(sprocessor, NULL, false, SCHED_IPI_EVENT_SMT_REBAL); + ast_processor = sprocessor; + } + } pset_unlock(pset); - return (new_thread); + + if (ast_processor) { + sched_ipi_perform(ast_processor, ipi_type); + } + return new_thread; + } + + if (processor->must_idle) { + processor->must_idle = false; + goto idle; } #if __SMP__ - if (SCHED(steal_thread_enabled)) { + if (SCHED(steal_thread_enabled)(pset)) { /* * No runnable threads, attempt to steal * from other processors. Returns with pset lock dropped. */ if ((new_thread = SCHED(steal_thread)(pset)) != THREAD_NULL) { - return (new_thread); + /* + * Avoid taking the pset_lock unless it is necessary to change state. + * It's safe to read processor->state here, as only the current processor can change state + * from this point (interrupts are disabled and this processor is committed to run new_thread). + */ + if (processor->state == PROCESSOR_DISPATCHING || processor->state == PROCESSOR_IDLE) { + pset_lock(pset); + pset_commit_processor_to_new_thread(pset, processor, new_thread); + pset_unlock(pset); + } else { + assert((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_SHUTDOWN)); + processor_state_update_from_thread(processor, new_thread); + } + + return new_thread; } /* * If other threads have appeared, shortcut * around again. */ - if (!SCHED(processor_queue_empty)(processor) || rt_runq_count(pset) > 0) + if (!SCHED(processor_queue_empty)(processor) || (ok_to_run_realtime_thread && (rt_runq_count(pset) > 0))) { continue; + } pset_lock(pset); + + /* Someone selected this processor while we had dropped the lock */ + if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) { + goto restart; + } } #endif - idle: +idle: /* * Nothing is runnable, so set this processor idle if it * was running. */ - if (processor->state == PROCESSOR_RUNNING) { + if ((processor->state == PROCESSOR_RUNNING) || (processor->state == PROCESSOR_DISPATCHING)) { pset_update_processor_state(pset, processor, PROCESSOR_IDLE); + processor_state_update_idle(processor); } #if __SMP__ @@ -2088,11 +2241,13 @@ pick_new_rt_thread: /* * Choose idle thread if fast idle is not possible. */ - if (processor->processor_primary != processor) - return (processor->idle_thread); + if (processor->processor_primary != processor) { + return processor->idle_thread; + } - if ((thread->state & (TH_IDLE|TH_TERMINATE|TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES) - return (processor->idle_thread); + if ((thread->state & (TH_IDLE | TH_TERMINATE | TH_SUSP)) || !(thread->state & TH_WAIT) || thread->wake_active || thread->sched_pri >= BASEPRI_RTQUEUES) { + return processor->idle_thread; + } /* * Perform idling activities directly without a @@ -2102,7 +2257,7 @@ pick_new_rt_thread: new_thread = thread_select_idle(thread, processor); #else /* !CONFIG_SCHED_IDLE_IN_PLACE */ - + /* * Do a full context switch to idle so that the current * thread can start running on another processor without @@ -2111,10 +2266,9 @@ pick_new_rt_thread: new_thread = processor->idle_thread; #endif /* !CONFIG_SCHED_IDLE_IN_PLACE */ - } while (new_thread == THREAD_NULL); - return (new_thread); + return new_thread; } #if CONFIG_SCHED_IDLE_IN_PLACE @@ -2127,12 +2281,12 @@ pick_new_rt_thread: */ static thread_t thread_select_idle( - thread_t thread, - processor_t processor) + thread_t thread, + processor_t processor) { - thread_t new_thread; - uint64_t arg1, arg2; - int urgency; + thread_t new_thread; + uint64_t arg1, arg2; + int urgency; sched_run_decr(thread); @@ -2141,7 +2295,7 @@ thread_select_idle( /* Reload precise timing global policy to thread-local policy */ thread->precise_user_kernel_time = use_precise_user_kernel_time(thread); - + thread_unlock(thread); /* @@ -2155,7 +2309,7 @@ thread_select_idle( thread->last_run_time = processor->last_dispatch; processor_timer_switch_thread(processor->last_dispatch, - &processor->idle_thread->system_timer); + &processor->idle_thread->system_timer); PROCESSOR_DATA(processor, kernel_timer) = &processor->idle_thread->system_timer; @@ -2198,7 +2352,7 @@ thread_select_idle( thread_quantum_init(thread); processor->quantum_end = time_now + thread->quantum_remaining; timer_call_quantum_timer_enter(&processor->quantum_timer, - thread, processor->quantum_end, time_now); + thread, processor->quantum_end, time_now); processor->first_timeslice = TRUE; thread->computation_epoch = time_now; @@ -2212,7 +2366,7 @@ thread_select_idle( sched_run_incr(thread); - return (new_thread); + return new_thread; } #endif /* CONFIG_SCHED_IDLE_IN_PLACE */ @@ -2232,15 +2386,15 @@ thread_select_idle( */ static boolean_t thread_invoke( - thread_t self, - thread_t thread, - ast_t reason) + thread_t self, + thread_t thread, + ast_t reason) { if (__improbable(get_preemption_level() != 0)) { int pl = get_preemption_level(); panic("thread_invoke: preemption_level %d, possible cause: %s", pl, (pl < 0 ? "unlocking an unlocked mutex or spinlock" : - "blocking while holding a spinlock, or within interrupt context")); + "blocking while holding a spinlock, or within interrupt context")); } thread_continue_t continuation = self->continuation; @@ -2254,8 +2408,9 @@ thread_invoke( #endif #if defined(CONFIG_SCHED_TIMESHARE_CORE) - if ((thread->state & TH_IDLE) == 0) + if ((thread->state & TH_IDLE) == 0) { sched_timeshare_consider_maintenance(ctime); + } #endif #if MONOTONIC @@ -2265,12 +2420,12 @@ thread_invoke( assert_thread_magic(self); assert(self == current_thread()); assert(self->runq == PROCESSOR_NULL); - assert((self->state & (TH_RUN|TH_TERMINATE2)) == TH_RUN); + assert((self->state & (TH_RUN | TH_TERMINATE2)) == TH_RUN); thread_lock(thread); assert_thread_magic(thread); - assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN); + assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN); assert(thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == current_processor()); assert(thread->runq == PROCESSOR_NULL); @@ -2279,21 +2434,23 @@ thread_invoke( /* Update SFI class based on other factors */ thread->sfi_class = sfi_thread_classify(thread); - + /* Update the same_pri_latency for the thread (used by perfcontrol callouts) */ thread->same_pri_latency = ctime - thread->last_basepri_change_time; - /* - * In case a base_pri update happened between the timestamp and - * taking the thread lock + /* + * In case a base_pri update happened between the timestamp and + * taking the thread lock */ - if (ctime <= thread->last_basepri_change_time) + if (ctime <= thread->last_basepri_change_time) { thread->same_pri_latency = ctime - thread->last_made_runnable_time; + } /* Allow realtime threads to hang onto a stack. */ - if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) + if ((self->sched_mode == TH_MODE_REALTIME) && !self->reserved_stack) { self->reserved_stack = self->kernel_stack; + } - /* Prepare for spin debugging */ + /* Prepare for spin debugging */ #if INTERRUPT_MASKED_DEBUG ml_spin_debug_clear(thread); #endif @@ -2305,8 +2462,9 @@ thread_invoke( * check to see whether we can exchange it with * that of the other thread. */ - if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) + if (self->kernel_stack == self->reserved_stack && !thread->reserved_stack) { goto need_stack; + } /* * Context switch by performing a stack handoff. @@ -2319,8 +2477,9 @@ thread_invoke( processor_state_update_from_thread(processor, thread); if (thread->last_processor != processor && thread->last_processor != NULL) { - if (thread->last_processor->processor_set != processor->processor_set) + if (thread->last_processor->processor_set != processor->processor_set) { thread->ps_switch++; + } thread->p_switch++; } thread->last_processor = processor; @@ -2346,12 +2505,12 @@ thread_invoke( } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF)|DBG_FUNC_NONE, - self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); if ((thread->chosen_processor != processor) && (thread->chosen_processor != PROCESSOR_NULL)) { - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); } DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info); @@ -2389,8 +2548,7 @@ thread_invoke( assert(continuation); call_continuation(continuation, parameter, thread->wait_result, TRUE); /*NOTREACHED*/ - } - else if (thread == self) { + } else if (thread == self) { /* same thread but with continuation */ ast_context(self); counter(++c_thread_invoke_same); @@ -2402,8 +2560,8 @@ thread_invoke( #endif /* KPERF */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, - self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); #if KASAN /* stack handoff to self - no thread_dispatch(), so clear the stack @@ -2428,7 +2586,7 @@ need_stack: counter(c_thread_invoke_misses++); thread_unlock(thread); thread_stack_enqueue(thread); - return (FALSE); + return FALSE; } } else if (thread == self) { ast_context(self); @@ -2436,10 +2594,10 @@ need_stack: thread_unlock(self); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, - self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); - return (TRUE); + return TRUE; } } @@ -2449,10 +2607,11 @@ need_stack: processor = current_processor(); processor->active_thread = thread; processor_state_update_from_thread(processor, thread); - + if (thread->last_processor != processor && thread->last_processor != NULL) { - if (thread->last_processor->processor_set != processor->processor_set) + if (thread->last_processor->processor_set != processor->processor_set) { thread->ps_switch++; + } thread->p_switch++; } thread->last_processor = processor; @@ -2480,12 +2639,12 @@ need_stack: } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, - self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE, + self->reason, (uintptr_t)thread_tid(thread), self->sched_pri, thread->sched_pri, 0); if ((thread->chosen_processor != processor) && (thread->chosen_processor != NULL)) { - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED)|DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_MOVED) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)thread->chosen_processor->cpu_id, 0, 0, 0); } DTRACE_SCHED2(off__cpu, struct thread *, thread, struct proc *, thread->task->bsd_info); @@ -2514,7 +2673,7 @@ need_stack: assert(continuation == self->continuation); thread = machine_switch_context(self, continuation, thread); assert(self == current_thread_volatile()); - TLOG(1,"thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread); + TLOG(1, "thread_invoke: returning machine_switch_context: self %p continuation %p thread %p\n", self, continuation, thread); DTRACE_SCHED(on__cpu); @@ -2534,7 +2693,7 @@ need_stack: /*NOTREACHED*/ } - return (TRUE); + return TRUE; } #if defined(CONFIG_SCHED_DEFERRED_AST) @@ -2550,11 +2709,11 @@ need_stack: */ static void pset_cancel_deferred_dispatch( - processor_set_t pset, - processor_t processor) + processor_set_t pset, + processor_t processor) { - processor_t active_processor = NULL; - uint32_t sampled_sched_run_count; + processor_t active_processor = NULL; + uint32_t sampled_sched_run_count; pset_lock(pset); sampled_sched_run_count = (volatile uint32_t) sched_run_buckets[TH_BUCKET_RUN]; @@ -2578,8 +2737,8 @@ pset_cancel_deferred_dispatch( if ((sampled_sched_run_count == 1) && (pset->pending_deferred_AST_cpu_mask)) { uint64_t dispatching_map = (pset->cpu_state_map[PROCESSOR_DISPATCHING] & - pset->pending_deferred_AST_cpu_mask & - ~pset->pending_AST_cpu_mask); + pset->pending_deferred_AST_cpu_mask & + ~pset->pending_AST_URGENT_cpu_mask); for (int cpuid = lsb_first(dispatching_map); cpuid >= 0; cpuid = lsb_next(dispatching_map, cpuid)) { active_processor = processor_array[cpuid]; /* @@ -2616,7 +2775,6 @@ pset_cancel_deferred_dispatch( bit_clear(pset->pending_deferred_AST_cpu_mask, active_processor->cpu_id); machine_signal_idle_cancel(active_processor); } - } } @@ -2628,14 +2786,14 @@ pset_cancel_deferred_dispatch( static void thread_csw_callout( - thread_t old, - thread_t new, - uint64_t timestamp) + thread_t old, + thread_t new, + uint64_t timestamp) { perfcontrol_event event = (new->state & TH_IDLE) ? IDLE : CONTEXT_SWITCH; uint64_t same_pri_latency = (new->state & TH_IDLE) ? 0 : new->same_pri_latency; - machine_switch_perfcontrol_context(event, timestamp, 0, - same_pri_latency, old, new); + machine_switch_perfcontrol_context(event, timestamp, 0, + same_pri_latency, old, new); } @@ -2654,29 +2812,29 @@ thread_csw_callout( */ void thread_dispatch( - thread_t thread, - thread_t self) + thread_t thread, + thread_t self) { - processor_t processor = self->last_processor; + processor_t processor = self->last_processor; assert(processor == current_processor()); assert(self == current_thread_volatile()); assert(thread != self); if (thread != THREAD_NULL) { - /* - * Do the perfcontrol callout for context switch. + /* + * Do the perfcontrol callout for context switch. * The reason we do this here is: - * - thread_dispatch() is called from various places that are not + * - thread_dispatch() is called from various places that are not * the direct context switch path for eg. processor shutdown etc. * So adding the callout here covers all those cases. - * - We want this callout as early as possible to be close + * - We want this callout as early as possible to be close * to the timestamp taken in thread_invoke() - * - We want to avoid holding the thread lock while doing the + * - We want to avoid holding the thread lock while doing the * callout * - We do not want to callout if "thread" is NULL. */ - thread_csw_callout(thread, self, processor->last_dispatch); + thread_csw_callout(thread, self, processor->last_dispatch); #if KASAN if (thread->continuation != NULL) { @@ -2700,21 +2858,23 @@ thread_dispatch( * If blocked at a continuation, discard * the stack. */ - if (thread->continuation != NULL && thread->kernel_stack != 0) + if (thread->continuation != NULL && thread->kernel_stack != 0) { stack_free(thread); + } if (thread->state & TH_IDLE) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), 0, thread->state, - sched_run_buckets[TH_BUCKET_RUN], 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), 0, thread->state, + sched_run_buckets[TH_BUCKET_RUN], 0); } else { int64_t consumed; int64_t remainder = 0; - if (processor->quantum_end > processor->last_dispatch) + if (processor->quantum_end > processor->last_dispatch) { remainder = processor->quantum_end - processor->last_dispatch; + } consumed = thread->quantum_remaining - remainder; @@ -2724,13 +2884,13 @@ thread_dispatch( * the individual thread. */ ledger_credit_thread(thread, thread->t_ledger, - task_ledgers.cpu_time, consumed); + task_ledgers.cpu_time, consumed); ledger_credit_thread(thread, thread->t_threadledger, - thread_ledgers.cpu_time, consumed); + thread_ledgers.cpu_time, consumed); if (thread->t_bankledger) { ledger_credit_thread(thread, thread->t_bankledger, - bank_ledgers.cpu_time, - (consumed - thread->t_deduct_bank_ledger_time)); + bank_ledgers.cpu_time, + (consumed - thread->t_deduct_bank_ledger_time)); } thread->t_deduct_bank_ledger_time = 0; } @@ -2743,8 +2903,9 @@ thread_dispatch( * Do this before checking starting_pri to avoid overpenalizing * repeated rwlock blockers. */ - if (__improbable(thread->rwlock_count != 0)) + if (__improbable(thread->rwlock_count != 0)) { lck_rw_set_promotion_locked(thread); + } boolean_t keep_quantum = processor->first_timeslice; @@ -2752,15 +2913,17 @@ thread_dispatch( * Treat a thread which has dropped priority since it got on core * as having expired its quantum. */ - if (processor->starting_pri > thread->sched_pri) + if (processor->starting_pri > thread->sched_pri) { keep_quantum = FALSE; + } /* Compute remainder of current quantum. */ if (keep_quantum && - processor->quantum_end > processor->last_dispatch) + processor->quantum_end > processor->last_dispatch) { thread->quantum_remaining = (uint32_t)remainder; - else + } else { thread->quantum_remaining = 0; + } if (thread->sched_mode == TH_MODE_REALTIME) { /* @@ -2788,7 +2951,7 @@ thread_dispatch( * If we are doing a direct handoff then * take the remainder of the quantum. */ - if ((thread->reason & (AST_HANDOFF|AST_QUANTUM)) == AST_HANDOFF) { + if ((thread->reason & (AST_HANDOFF | AST_QUANTUM)) == AST_HANDOFF) { self->quantum_remaining = thread->quantum_remaining; thread->reason |= AST_QUANTUM; thread->quantum_remaining = 0; @@ -2816,7 +2979,7 @@ thread_dispatch( */ thread->last_made_runnable_time = thread->last_basepri_change_time = processor->last_dispatch; - machine_thread_going_off_core(thread, FALSE, processor->last_dispatch); + machine_thread_going_off_core(thread, FALSE, processor->last_dispatch, TRUE); ast_t reason = thread->reason; sched_options_t options = SCHED_NONE; @@ -2835,19 +2998,20 @@ thread_dispatch( } } - if (reason & AST_QUANTUM) + if (reason & AST_QUANTUM) { options |= SCHED_TAILQ; - else if (reason & AST_PREEMPT) + } else if (reason & AST_PREEMPT) { options |= SCHED_HEADQ; - else + } else { options |= (SCHED_PREEMPT | SCHED_TAILQ); + } thread_setrun(thread, options); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), thread->reason, thread->state, - sched_run_buckets[TH_BUCKET_RUN], 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), thread->reason, thread->state, + sched_run_buckets[TH_BUCKET_RUN], 0); if (thread->wake_active) { thread->wake_active = FALSE; @@ -2871,7 +3035,7 @@ thread_dispatch( * after explicit termination should add * the thread to the termination queue */ - if ((thread_state & (TH_TERMINATE|TH_TERMINATE2)) == TH_TERMINATE) { + if ((thread_state & (TH_TERMINATE | TH_TERMINATE2)) == TH_TERMINATE) { should_terminate = TRUE; thread_state |= TH_TERMINATE2; } @@ -2891,13 +3055,12 @@ thread_dispatch( thread->wait_sfi_begin_time = processor->last_dispatch; } #endif - - machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch); + machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch, FALSE); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), thread->reason, thread_state, - new_run_count, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_DISPATCH) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), thread->reason, thread_state, + new_run_count, 0); if (thread_state & TH_WAIT_REPORT) { (*thread->sched_call)(SCHED_CALL_BLOCK, thread); @@ -2914,8 +3077,9 @@ thread_dispatch( wake_unlock(thread); - if (should_terminate) + if (should_terminate) { thread_terminate_enqueue(thread); + } } } } @@ -2925,12 +3089,12 @@ thread_dispatch( /* Update (new) current thread and reprogram quantum timer */ thread_lock(self); - + if (!(self->state & TH_IDLE)) { uint64_t arg1, arg2; #if CONFIG_SCHED_SFI - ast_t new_ast; + ast_t new_ast; new_ast = sfi_thread_needs_ast(self, NULL); @@ -2940,8 +3104,8 @@ thread_dispatch( #endif assertf(processor->last_dispatch >= self->last_made_runnable_time, - "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx", - processor->last_dispatch, self->last_made_runnable_time); + "Non-monotonic time? dispatch at 0x%llx, runnable at 0x%llx", + processor->last_dispatch, self->last_made_runnable_time); assert(self->last_made_runnable_time <= self->last_basepri_change_time); @@ -2964,7 +3128,7 @@ thread_dispatch( */ processor->quantum_end = processor->last_dispatch + self->quantum_remaining; timer_call_quantum_timer_enter(&processor->quantum_timer, self, - processor->quantum_end, processor->last_dispatch); + processor->quantum_end, processor->last_dispatch); processor->first_timeslice = TRUE; } else { @@ -2982,7 +3146,7 @@ thread_dispatch( thread_unlock(self); machine_thread_going_on_core(self, urgency, latency, self->same_pri_latency, - processor->last_dispatch); + processor->last_dispatch); #if defined(CONFIG_SCHED_DEFERRED_AST) /* @@ -3007,13 +3171,13 @@ thread_dispatch( * thread resumes, it will execute the continuation function * on a new kernel stack. */ -counter(mach_counter_t c_thread_block_calls = 0;) - +counter(mach_counter_t c_thread_block_calls = 0; ) + wait_result_t thread_block_reason( - thread_continue_t continuation, - void *parameter, - ast_t reason) + thread_continue_t continuation, + void *parameter, + ast_t reason) { thread_t self = current_thread(); processor_t processor; @@ -3027,8 +3191,9 @@ thread_block_reason( processor = current_processor(); /* If we're explicitly yielding, force a subsequent quantum */ - if (reason & AST_YIELD) + if (reason & AST_YIELD) { processor->first_timeslice = FALSE; + } /* We're handling all scheduling AST's */ ast_off(AST_SCHEDULING); @@ -3046,8 +3211,8 @@ thread_block_reason( if (self->state & ~(TH_RUN | TH_IDLE)) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_BLOCK), - reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_BLOCK), + reason, VM_KERNEL_UNSLIDE(continuation), 0, 0, 0); } do { @@ -3058,7 +3223,7 @@ thread_block_reason( splx(s); - return (self->wait_result); + return self->wait_result; } /* @@ -3068,15 +3233,15 @@ thread_block_reason( */ wait_result_t thread_block( - thread_continue_t continuation) + thread_continue_t continuation) { return thread_block_reason(continuation, NULL, AST_NONE); } wait_result_t thread_block_parameter( - thread_continue_t continuation, - void *parameter) + thread_continue_t continuation, + void *parameter) { return thread_block_reason(continuation, parameter, AST_NONE); } @@ -3093,12 +3258,16 @@ thread_block_parameter( */ int thread_run( - thread_t self, - thread_continue_t continuation, - void *parameter, - thread_t new_thread) + thread_t self, + thread_continue_t continuation, + void *parameter, + thread_t new_thread) { - ast_t reason = AST_HANDOFF; + ast_t reason = AST_NONE; + + if ((self->state & TH_IDLE) == 0) { + reason = AST_HANDOFF; + } self->continuation = continuation; self->parameter = parameter; @@ -3114,7 +3283,7 @@ thread_run( thread_unlock(self); } - return (self->wait_result); + return self->wait_result; } /* @@ -3125,7 +3294,7 @@ thread_run( */ void thread_continue( - thread_t thread) + thread_t thread) { thread_t self = current_thread(); thread_continue_t continuation; @@ -3145,12 +3314,12 @@ thread_continue( self->continuation = self->parameter = NULL; #if INTERRUPT_MASKED_DEBUG - /* Reset interrupt-masked spin debugging timeout */ - ml_spin_debug_clear(self); + /* Reset interrupt-masked spin debugging timeout */ + ml_spin_debug_clear(self); #endif TLOG(1, "thread_continue: calling call_continuation\n"); - + boolean_t enable_interrupts = thread != THREAD_NULL; call_continuation(continuation, parameter, self->wait_result, enable_interrupts); /*NOTREACHED*/ @@ -3169,10 +3338,11 @@ thread_quantum_init(thread_t thread) uint32_t sched_timeshare_initial_quantum_size(thread_t thread) { - if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) + if ((thread != THREAD_NULL) && thread->th_sched_bucket == TH_BUCKET_SHARE_BG) { return bg_quantum; - else + } else { return std_quantum; + } } /* @@ -3182,14 +3352,16 @@ sched_timeshare_initial_quantum_size(thread_t thread) */ void run_queue_init( - run_queue_t rq) + run_queue_t rq) { rq->highq = NOPRI; - for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) + for (u_int i = 0; i < BITMAP_LEN(NRQS); i++) { rq->bitmap[i] = 0; + } rq->urgency = rq->count = 0; - for (int i = 0; i < NRQS; i++) + for (int i = 0; i < NRQS; i++) { queue_init(&rq->queues[i]); + } } /* @@ -3203,12 +3375,21 @@ run_queue_init( */ thread_t run_queue_dequeue( - run_queue_t rq, - integer_t options) + run_queue_t rq, + integer_t options) { thread_t thread; queue_t queue = &rq->queues[rq->highq]; + if (options & SCHED_PEEK) { + if (options & SCHED_HEADQ) { + thread = qe_queue_first(queue, struct thread, runq_links); + } else { + thread = qe_queue_last(queue, struct thread, runq_links); + } + return thread; + } + if (options & SCHED_HEADQ) { thread = qe_dequeue_head(queue, struct thread, runq_links); } else { @@ -3242,9 +3423,9 @@ run_queue_dequeue( */ boolean_t run_queue_enqueue( - run_queue_t rq, - thread_t thread, - integer_t options) + run_queue_t rq, + thread_t thread, + integer_t options) { queue_t queue = &rq->queues[thread->sched_pri]; boolean_t result = FALSE; @@ -3260,17 +3441,19 @@ run_queue_enqueue( result = TRUE; } } else { - if (options & SCHED_TAILQ) + if (options & SCHED_TAILQ) { enqueue_tail(queue, &thread->runq_links); - else + } else { enqueue_head(queue, &thread->runq_links); + } } - if (SCHED(priority_is_urgent)(thread->sched_pri)) + if (SCHED(priority_is_urgent)(thread->sched_pri)) { rq->urgency++; + } SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); rq->count++; - return (result); + return result; } /* @@ -3282,8 +3465,8 @@ run_queue_enqueue( */ void run_queue_remove( - run_queue_t rq, - thread_t thread) + run_queue_t rq, + thread_t thread) { assert(thread->runq != PROCESSOR_NULL); assert_thread_magic(thread); @@ -3308,8 +3491,8 @@ run_queue_remove( void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context) { - spl_t s; - thread_t thread; + spl_t s; + thread_t thread; processor_set_t pset = &pset0; @@ -3357,8 +3540,9 @@ realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thre assert_thread_magic(iter_thread); if (deadline < iter_thread->realtime.deadline) { - if (iter == queue_first(queue)) + if (iter == queue_first(queue)) { preempt = TRUE; + } insque(&thread->runq_links, queue_prev(iter)); break; } else if (iter == queue_last(queue)) { @@ -3374,7 +3558,7 @@ realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thre rt_lock_unlock(pset); - return (preempt); + return preempt; } /* @@ -3387,8 +3571,8 @@ realtime_queue_insert(processor_t processor, processor_set_t pset, thread_t thre */ static void realtime_setrun( - processor_t processor, - thread_t thread) + processor_t processor, + thread_t thread) { processor_set_t pset = processor->processor_set; pset_assert_locked(pset); @@ -3404,9 +3588,8 @@ realtime_setrun( /* * Dispatch directly onto idle processor. */ - if ( (thread->bound_processor == processor) - && processor->state == PROCESSOR_IDLE) { - + if ((thread->bound_processor == processor) + && processor->state == PROCESSOR_IDLE) { processor->next_thread = thread; processor_state_update_from_thread(processor, thread); processor->deadline = thread->realtime.deadline; @@ -3418,12 +3601,13 @@ realtime_setrun( return; } - if (processor->current_pri < BASEPRI_RTQUEUES) + if (processor->current_pri < BASEPRI_RTQUEUES) { preempt = (AST_PREEMPT | AST_URGENT); - else if (thread->realtime.deadline < processor->deadline) + } else if (thread->realtime.deadline < processor->deadline) { preempt = (AST_PREEMPT | AST_URGENT); - else + } else { preempt = AST_NONE; + } realtime_queue_insert(processor, pset, thread); @@ -3447,6 +3631,14 @@ realtime_setrun( } else { if (processor == current_processor()) { ast_on(preempt); + + if ((preempt & AST_URGENT) == AST_URGENT) { + bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + } + + if ((preempt & AST_PREEMPT) == AST_PREEMPT) { + bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + } } else { ipi_type = sched_ipi_action(processor, thread, false, SCHED_IPI_EVENT_PREEMPT); } @@ -3460,111 +3652,116 @@ realtime_setrun( } -sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, processor_t dst, - __unused sched_ipi_event_t event) +sched_ipi_type_t +sched_ipi_deferred_policy(processor_set_t pset, processor_t dst, + __unused sched_ipi_event_t event) { #if defined(CONFIG_SCHED_DEFERRED_AST) - if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) { - return SCHED_IPI_DEFERRED; - } + if (!bit_test(pset->pending_deferred_AST_cpu_mask, dst->cpu_id)) { + return SCHED_IPI_DEFERRED; + } #else /* CONFIG_SCHED_DEFERRED_AST */ - panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id); + panic("Request for deferred IPI on an unsupported platform; pset: %p CPU: %d", pset, dst->cpu_id); #endif /* CONFIG_SCHED_DEFERRED_AST */ - return SCHED_IPI_NONE; + return SCHED_IPI_NONE; } -sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event) +sched_ipi_type_t +sched_ipi_action(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event) { - sched_ipi_type_t ipi_type = SCHED_IPI_NONE; - assert(dst != NULL); + sched_ipi_type_t ipi_type = SCHED_IPI_NONE; + assert(dst != NULL); - processor_set_t pset = dst->processor_set; - if (current_processor() == dst) { - return SCHED_IPI_NONE; - } + processor_set_t pset = dst->processor_set; + if (current_processor() == dst) { + return SCHED_IPI_NONE; + } - if (bit_test(pset->pending_AST_cpu_mask, dst->cpu_id)) { - return SCHED_IPI_NONE; - } + if (bit_test(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id)) { + return SCHED_IPI_NONE; + } - ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event); - switch(ipi_type) { + ipi_type = SCHED(ipi_policy)(dst, thread, dst_idle, event); + switch (ipi_type) { case SCHED_IPI_NONE: - return SCHED_IPI_NONE; -#if defined(CONFIG_SCHED_DEFERRED_AST) + return SCHED_IPI_NONE; +#if defined(CONFIG_SCHED_DEFERRED_AST) case SCHED_IPI_DEFERRED: - bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id); - break; + bit_set(pset->pending_deferred_AST_cpu_mask, dst->cpu_id); + break; #endif /* CONFIG_SCHED_DEFERRED_AST */ default: - bit_set(pset->pending_AST_cpu_mask, dst->cpu_id); - break; - } - return ipi_type; + bit_set(pset->pending_AST_URGENT_cpu_mask, dst->cpu_id); + bit_set(pset->pending_AST_PREEMPT_cpu_mask, dst->cpu_id); + break; + } + return ipi_type; } -sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event) +sched_ipi_type_t +sched_ipi_policy(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event) { - sched_ipi_type_t ipi_type = SCHED_IPI_NONE; - boolean_t deferred_ipi_supported = false; - processor_set_t pset = dst->processor_set; + sched_ipi_type_t ipi_type = SCHED_IPI_NONE; + boolean_t deferred_ipi_supported = false; + processor_set_t pset = dst->processor_set; #if defined(CONFIG_SCHED_DEFERRED_AST) - deferred_ipi_supported = true; + deferred_ipi_supported = true; #endif /* CONFIG_SCHED_DEFERRED_AST */ - switch(event) { + switch (event) { case SCHED_IPI_EVENT_SPILL: case SCHED_IPI_EVENT_SMT_REBAL: case SCHED_IPI_EVENT_REBALANCE: case SCHED_IPI_EVENT_BOUND_THR: - /* - * The spill, SMT rebalance, rebalance and the bound thread - * scenarios use immediate IPIs always. - */ - ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; - break; + /* + * The spill, SMT rebalance, rebalance and the bound thread + * scenarios use immediate IPIs always. + */ + ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; + break; case SCHED_IPI_EVENT_PREEMPT: - /* In the preemption case, use immediate IPIs for RT threads */ - if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) { - ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; - break; - } - - /* - * For Non-RT threads preemption, - * If the core is active, use immediate IPIs. - * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI. - */ - if (deferred_ipi_supported && dst_idle) { - return sched_ipi_deferred_policy(pset, dst, event); - } - ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; - break; + /* In the preemption case, use immediate IPIs for RT threads */ + if (thread && (thread->sched_pri >= BASEPRI_RTQUEUES)) { + ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; + break; + } + + /* + * For Non-RT threads preemption, + * If the core is active, use immediate IPIs. + * If the core is idle, use deferred IPIs if supported; otherwise immediate IPI. + */ + if (deferred_ipi_supported && dst_idle) { + return sched_ipi_deferred_policy(pset, dst, event); + } + ipi_type = dst_idle ? SCHED_IPI_IDLE : SCHED_IPI_IMMEDIATE; + break; default: - panic("Unrecognized scheduler IPI event type %d", event); - } - assert(ipi_type != SCHED_IPI_NONE); - return ipi_type; + panic("Unrecognized scheduler IPI event type %d", event); + } + assert(ipi_type != SCHED_IPI_NONE); + return ipi_type; } -void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi) +void +sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi) { - switch (ipi) { + switch (ipi) { case SCHED_IPI_NONE: - break; + break; case SCHED_IPI_IDLE: - machine_signal_idle(dst); - break; + machine_signal_idle(dst); + break; case SCHED_IPI_IMMEDIATE: - cause_ast_check(dst); - break; + cause_ast_check(dst); + break; case SCHED_IPI_DEFERRED: - machine_signal_idle_deferred(dst); - break; + machine_signal_idle_deferred(dst); + break; default: - panic("Unrecognized scheduler IPI type: %d", ipi); - } + panic("Unrecognized scheduler IPI type: %d", ipi); + } } #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -3588,9 +3785,9 @@ priority_is_urgent(int priority) */ static void processor_setrun( - processor_t processor, - thread_t thread, - integer_t options) + processor_t processor, + thread_t thread, + integer_t options) { processor_set_t pset = processor->processor_set; pset_assert_locked(pset); @@ -3601,44 +3798,27 @@ processor_setrun( thread->chosen_processor = processor; - /* - * Dispatch directly onto idle processor. - */ - if ( (SCHED(direct_dispatch_to_idle_processors) || - thread->bound_processor == processor) - && processor->state == PROCESSOR_IDLE) { - - processor->next_thread = thread; - processor_state_update_from_thread(processor, thread); - processor->deadline = UINT64_MAX; - pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING); - - ipi_type = sched_ipi_action(processor, thread, true, SCHED_IPI_EVENT_BOUND_THR); - pset_unlock(pset); - sched_ipi_perform(processor, ipi_type); - return; - } - /* * Set preemption mode. */ #if defined(CONFIG_SCHED_DEFERRED_AST) /* TODO: Do we need to care about urgency (see rdar://problem/20136239)? */ #endif - if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) + if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) { preempt = (AST_PREEMPT | AST_URGENT); - else if(processor->active_thread && thread_eager_preemption(processor->active_thread)) + } else if (processor->active_thread && thread_eager_preemption(processor->active_thread)) { preempt = (AST_PREEMPT | AST_URGENT); - else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) { - if(SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) { + } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) { + if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) { preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE; } else { preempt = AST_NONE; } - } else + } else { preempt = (options & SCHED_PREEMPT)? AST_PREEMPT: AST_NONE; + } - if ((options & (SCHED_PREEMPT|SCHED_REBALANCE)) == (SCHED_PREEMPT|SCHED_REBALANCE)) { + if ((options & (SCHED_PREEMPT | SCHED_REBALANCE)) == (SCHED_PREEMPT | SCHED_REBALANCE)) { /* * Having gone to the trouble of forcing this thread off a less preferred core, * we should force the preferable core to reschedule immediately to give this @@ -3658,14 +3838,14 @@ processor_setrun( processor->deadline = UINT64_MAX; pset_update_processor_state(pset, processor, PROCESSOR_DISPATCHING); ipi_action = eExitIdle; - } else if ( processor->state == PROCESSOR_DISPATCHING) { + } else if (processor->state == PROCESSOR_DISPATCHING) { if ((processor->next_thread == THREAD_NULL) && (processor->current_pri < thread->sched_pri)) { processor_state_update_from_thread(processor, thread); processor->deadline = UINT64_MAX; } - } else if ( (processor->state == PROCESSOR_RUNNING || - processor->state == PROCESSOR_SHUTDOWN) && - (thread->sched_pri >= processor->current_pri)) { + } else if ((processor->state == PROCESSOR_RUNNING || + processor->state == PROCESSOR_SHUTDOWN) && + (thread->sched_pri >= processor->current_pri)) { ipi_action = eInterruptRunning; } } else { @@ -3673,11 +3853,10 @@ processor_setrun( * New thread is not important enough to preempt what is running, but * special processor states may need special handling */ - if (processor->state == PROCESSOR_SHUTDOWN && - thread->sched_pri >= processor->current_pri ) { + if (processor->state == PROCESSOR_SHUTDOWN && + thread->sched_pri >= processor->current_pri) { ipi_action = eInterruptRunning; } else if (processor->state == PROCESSOR_IDLE) { - processor->next_thread = THREAD_NULL; processor_state_update_from_thread(processor, thread); processor->deadline = UINT64_MAX; @@ -3688,19 +3867,32 @@ processor_setrun( } if (ipi_action != eDoNothing) { - if (processor == current_processor()) { - if (csw_check_locked(processor, pset, AST_NONE) != AST_NONE) - ast_on(preempt); - } else { - sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT; - ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event); - } - } - pset_unlock(pset); - sched_ipi_perform(processor, ipi_type); -} + if (processor == current_processor()) { + if ((preempt = csw_check_locked(processor->active_thread, processor, pset, AST_NONE)) != AST_NONE) { + ast_on(preempt); + } -/* + if ((preempt & AST_URGENT) == AST_URGENT) { + bit_set(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + } else { + bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + } + + if ((preempt & AST_PREEMPT) == AST_PREEMPT) { + bit_set(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + } else { + bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + } + } else { + sched_ipi_event_t event = (options & SCHED_REBALANCE) ? SCHED_IPI_EVENT_REBALANCE : SCHED_IPI_EVENT_PREEMPT; + ipi_type = sched_ipi_action(processor, thread, (ipi_action == eExitIdle), event); + } + } + pset_unlock(pset); + sched_ipi_perform(processor, ipi_type); +} + +/* * choose_next_pset: * * Return the next sibling pset containing @@ -3711,15 +3903,15 @@ processor_setrun( */ static processor_set_t choose_next_pset( - processor_set_t pset) + processor_set_t pset) { - processor_set_t nset = pset; + processor_set_t nset = pset; do { nset = next_pset(nset); } while (nset->online_processor_count < 1 && nset != pset); - return (nset); + return nset; } /* @@ -3762,8 +3954,9 @@ choose_processor( if (pset->online_processor_count) { if ((processor == PROCESSOR_NULL) || (processor->processor_set == pset && processor->state == PROCESSOR_IDLE)) { processor_t mc_processor = machine_choose_processor(pset, processor); - if (mc_processor != PROCESSOR_NULL) + if (mc_processor != PROCESSOR_NULL) { processor = mc_processor->processor_primary; + } } } @@ -3778,39 +3971,42 @@ choose_processor( processor = PROCESSOR_NULL; } else if (!processor->is_recommended) { processor = PROCESSOR_NULL; + } else if ((thread->sched_pri >= BASEPRI_RTQUEUES) && !sched_ok_to_run_realtime_thread(pset, processor)) { + processor = PROCESSOR_NULL; } else { switch (processor->state) { - case PROCESSOR_START: - case PROCESSOR_SHUTDOWN: - case PROCESSOR_OFF_LINE: - /* - * Hint is for a processor that cannot support running new threads. - */ - processor = PROCESSOR_NULL; - break; - case PROCESSOR_IDLE: - /* - * Hint is for an idle processor. Assume it is no worse than any other - * idle processor. The platform layer had an opportunity to provide - * the "least cost idle" processor above. - */ - return (processor); - case PROCESSOR_RUNNING: - case PROCESSOR_DISPATCHING: - /* - * Hint is for an active CPU. This fast-path allows - * realtime threads to preempt non-realtime threads - * to regain their previous executing processor. - */ - if ((thread->sched_pri >= BASEPRI_RTQUEUES) && - (processor->current_pri < BASEPRI_RTQUEUES)) - return (processor); - - /* Otherwise, use hint as part of search below */ - break; - default: - processor = PROCESSOR_NULL; - break; + case PROCESSOR_START: + case PROCESSOR_SHUTDOWN: + case PROCESSOR_OFF_LINE: + /* + * Hint is for a processor that cannot support running new threads. + */ + processor = PROCESSOR_NULL; + break; + case PROCESSOR_IDLE: + /* + * Hint is for an idle processor. Assume it is no worse than any other + * idle processor. The platform layer had an opportunity to provide + * the "least cost idle" processor above. + */ + return processor; + case PROCESSOR_RUNNING: + case PROCESSOR_DISPATCHING: + /* + * Hint is for an active CPU. This fast-path allows + * realtime threads to preempt non-realtime threads + * to regain their previous executing processor. + */ + if ((thread->sched_pri >= BASEPRI_RTQUEUES) && + (processor->current_pri < BASEPRI_RTQUEUES)) { + return processor; + } + + /* Otherwise, use hint as part of search below */ + break; + default: + processor = PROCESSOR_NULL; + break; } } } @@ -3831,11 +4027,12 @@ choose_processor( integer_t lowest_priority = MAXPRI + 1; integer_t lowest_secondary_priority = MAXPRI + 1; integer_t lowest_unpaired_primary_priority = MAXPRI + 1; + integer_t lowest_idle_secondary_priority = MAXPRI + 1; integer_t lowest_count = INT_MAX; uint64_t furthest_deadline = 1; processor_t lp_processor = PROCESSOR_NULL; processor_t lp_unpaired_primary_processor = PROCESSOR_NULL; - processor_t lp_unpaired_secondary_processor = PROCESSOR_NULL; + processor_t lp_idle_secondary_processor = PROCESSOR_NULL; processor_t lp_paired_secondary_processor = PROCESSOR_NULL; processor_t lc_processor = PROCESSOR_NULL; processor_t fd_processor = PROCESSOR_NULL; @@ -3857,19 +4054,30 @@ choose_processor( } do { - /* - * Choose an idle processor, in pset traversal order - */ + int cpuid; - uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & - pset->primary_map & - pset->recommended_bitmask & - ~pset->pending_AST_cpu_mask); + if (thread->sched_pri >= BASEPRI_RTQUEUES) { + processor = choose_processor_for_realtime_thread(pset); + if (processor) { + return processor; + } + } else { + /* + * Choose an idle processor, in pset traversal order + */ - int cpuid = lsb_first(idle_primary_map); - if (cpuid >= 0) { - processor = processor_array[cpuid]; - return processor; + uint64_t idle_primary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & + pset->primary_map & + pset->recommended_bitmask); + + /* there shouldn't be a pending AST if the processor is idle */ + assert((idle_primary_map & pset->pending_AST_URGENT_cpu_mask) == 0); + + cpuid = lsb_first(idle_primary_map); + if (cpuid >= 0) { + processor = processor_array[cpuid]; + return processor; + } } /* @@ -3878,18 +4086,27 @@ choose_processor( */ uint64_t active_map = ((pset->cpu_state_map[PROCESSOR_RUNNING] | pset->cpu_state_map[PROCESSOR_DISPATCHING]) & - pset->recommended_bitmask & - ~pset->pending_AST_cpu_mask); + pset->recommended_bitmask & + ~pset->pending_AST_URGENT_cpu_mask); + + if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE) { + active_map &= ~pset->pending_AST_PREEMPT_cpu_mask; + } + active_map = bit_ror64(active_map, (pset->last_chosen + 1)); for (int rotid = lsb_first(active_map); rotid >= 0; rotid = lsb_next(active_map, rotid)) { cpuid = ((rotid + pset->last_chosen + 1) & 63); processor = processor_array[cpuid]; integer_t cpri = processor->current_pri; - if (processor->processor_primary != processor) { - if (cpri < lowest_secondary_priority) { - lowest_secondary_priority = cpri; - lp_paired_secondary_processor = processor; + processor_t primary = processor->processor_primary; + if (primary != processor) { + /* If primary is running a NO_SMT thread, don't choose its secondary */ + if (!((primary->state == PROCESSOR_RUNNING) && processor_active_thread_no_smt(primary))) { + if (cpri < lowest_secondary_priority) { + lowest_secondary_priority = cpri; + lp_paired_secondary_processor = processor; + } } } else { if (cpri < lowest_priority) { @@ -3915,85 +4132,115 @@ choose_processor( * the idle primary would have short-circuited the loop above */ uint64_t idle_secondary_map = (pset->cpu_state_map[PROCESSOR_IDLE] & - ~pset->primary_map & - pset->recommended_bitmask & - ~pset->pending_AST_cpu_mask); + ~pset->primary_map & + pset->recommended_bitmask); + + /* there shouldn't be a pending AST if the processor is idle */ + assert((idle_secondary_map & pset->pending_AST_URGENT_cpu_mask) == 0); + assert((idle_secondary_map & pset->pending_AST_PREEMPT_cpu_mask) == 0); for (cpuid = lsb_first(idle_secondary_map); cpuid >= 0; cpuid = lsb_next(idle_secondary_map, cpuid)) { processor = processor_array[cpuid]; processor_t cprimary = processor->processor_primary; - if (!cprimary->is_recommended) { + integer_t primary_pri = cprimary->current_pri; + + /* + * TODO: This should also make the same decisions + * as secondary_can_run_realtime_thread + * + * TODO: Keep track of the pending preemption priority + * of the primary to make this more accurate. + */ + + /* If the primary is running a no-smt thread, then don't choose its secondary */ + if (cprimary->state == PROCESSOR_RUNNING && + processor_active_thread_no_smt(cprimary)) { continue; } - if (bit_test(pset->pending_AST_cpu_mask, cprimary->cpu_id)) { - continue; + + /* + * Find the idle secondary processor with the lowest priority primary + * + * We will choose this processor as a fallback if we find no better + * primary to preempt. + */ + if (primary_pri < lowest_idle_secondary_priority) { + lp_idle_secondary_processor = processor; + lowest_idle_secondary_priority = primary_pri; } - /* If the primary processor is offline or starting up, it's not a candidate for this path */ - if (cprimary->state == PROCESSOR_RUNNING || cprimary->state == PROCESSOR_DISPATCHING) { - integer_t primary_pri = cprimary->current_pri; + /* Find the the lowest priority active primary with idle secondary */ + if (primary_pri < lowest_unpaired_primary_priority) { + /* If the primary processor is offline or starting up, it's not a candidate for this path */ + if (cprimary->state != PROCESSOR_RUNNING && + cprimary->state != PROCESSOR_DISPATCHING) { + continue; + } - if (primary_pri < lowest_unpaired_primary_priority) { - lowest_unpaired_primary_priority = primary_pri; - lp_unpaired_primary_processor = cprimary; - lp_unpaired_secondary_processor = processor; + if (!cprimary->is_recommended) { + continue; } + + /* if the primary is pending preemption, don't try to re-preempt it */ + if (bit_test(pset->pending_AST_URGENT_cpu_mask, cprimary->cpu_id)) { + continue; + } + + if (SCHED(priority_is_urgent)(thread->sched_pri) == FALSE && + bit_test(pset->pending_AST_PREEMPT_cpu_mask, cprimary->cpu_id)) { + continue; + } + + lowest_unpaired_primary_priority = primary_pri; + lp_unpaired_primary_processor = cprimary; } } + /* + * We prefer preempting a primary processor over waking up its secondary. + * The secondary will then be woken up by the preempted thread. + */ + if (thread->sched_pri > lowest_unpaired_primary_priority) { + pset->last_chosen = lp_unpaired_primary_processor->cpu_id; + return lp_unpaired_primary_processor; + } - if (thread->sched_pri >= BASEPRI_RTQUEUES) { + /* + * We prefer preempting a lower priority active processor over directly + * waking up an idle secondary. + * The preempted thread will then find the idle secondary. + */ + if (thread->sched_pri > lowest_priority) { + pset->last_chosen = lp_processor->cpu_id; + return lp_processor; + } + if (thread->sched_pri >= BASEPRI_RTQUEUES) { /* * For realtime threads, the most important aspect is - * scheduling latency, so we attempt to assign threads - * to good preemption candidates (assuming an idle primary - * processor was not available above). + * scheduling latency, so we will pick an active + * secondary processor in this pset, or preempt + * another RT thread with a further deadline before + * going to the next pset. */ - if (thread->sched_pri > lowest_unpaired_primary_priority) { - pset->last_chosen = lp_unpaired_primary_processor->cpu_id; - return lp_unpaired_primary_processor; - } - if (thread->sched_pri > lowest_priority) { - pset->last_chosen = lp_processor->cpu_id; - return lp_processor; - } if (sched_allow_rt_smt && (thread->sched_pri > lowest_secondary_priority)) { pset->last_chosen = lp_paired_secondary_processor->cpu_id; return lp_paired_secondary_processor; } - if (thread->realtime.deadline < furthest_deadline) - return fd_processor; - - /* - * If all primary and secondary CPUs are busy with realtime - * threads with deadlines earlier than us, move on to next - * pset. - */ - } - else { - if (thread->sched_pri > lowest_unpaired_primary_priority) { - pset->last_chosen = lp_unpaired_primary_processor->cpu_id; - return lp_unpaired_primary_processor; - } - if (thread->sched_pri > lowest_priority) { - pset->last_chosen = lp_processor->cpu_id; - return lp_processor; + if (thread->realtime.deadline < furthest_deadline) { + return fd_processor; } - - /* - * If all primary processor in this pset are running a higher - * priority thread, move on to next pset. Only when we have - * exhausted this search do we fall back to other heuristics. - */ } /* - * Move onto the next processor set. + * If all primary processors in this pset are running a higher + * priority thread, move on to next pset. Only when we have + * exhausted the search for primary processors do we + * fall back to secondaries. */ nset = next_pset(pset); @@ -4008,7 +4255,7 @@ choose_processor( /* * Make sure that we pick a running processor, * and that the correct processor set is locked. - * Since we may have unlock the candidate processor's + * Since we may have unlocked the candidate processor's * pset, it may have changed state. * * All primary processors are running a higher priority @@ -4017,11 +4264,10 @@ choose_processor( * primary, or the least busy primary. */ do { - /* lowest_priority is evaluated in the main loops above */ - if (lp_unpaired_secondary_processor != PROCESSOR_NULL) { - processor = lp_unpaired_secondary_processor; - lp_unpaired_secondary_processor = PROCESSOR_NULL; + if (lp_idle_secondary_processor != PROCESSOR_NULL) { + processor = lp_idle_secondary_processor; + lp_idle_secondary_processor = PROCESSOR_NULL; } else if (lp_paired_secondary_processor != PROCESSOR_NULL) { processor = lp_paired_secondary_processor; lp_paired_secondary_processor = PROCESSOR_NULL; @@ -4032,7 +4278,11 @@ choose_processor( /* * All processors are executing higher * priority threads, and the lowest_count - * candidate was not usable + * candidate was not usable, so we pick a processor + * to give this thread somewhere to be enqueued. + * + * TODO: Need tracepoint or something to show when this happens + * TODO: Prefer a processor in the original pset */ processor = master_processor; } @@ -4054,9 +4304,9 @@ choose_processor( * and that thread needs to be enqueued on its runqueue to run * when the processor is restarted. */ - if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)) + if (processor != master_processor && (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)) { processor = PROCESSOR_NULL; - + } } while (processor == PROCESSOR_NULL); pset->last_chosen = processor->cpu_id; @@ -4074,20 +4324,21 @@ choose_processor( */ void thread_setrun( - thread_t thread, - integer_t options) + thread_t thread, + integer_t options) { - processor_t processor; - processor_set_t pset; + processor_t processor; + processor_set_t pset; - assert((thread->state & (TH_RUN|TH_WAIT|TH_UNINT|TH_TERMINATE|TH_TERMINATE2)) == TH_RUN); + assert((thread->state & (TH_RUN | TH_WAIT | TH_UNINT | TH_TERMINATE | TH_TERMINATE2)) == TH_RUN); assert(thread->runq == PROCESSOR_NULL); /* * Update priority if needed. */ - if (SCHED(can_update_priority)(thread)) + if (SCHED(can_update_priority)(thread)) { SCHED(update_priority)(thread); + } thread->sfi_class = sfi_thread_classify(thread); @@ -4108,8 +4359,8 @@ thread_setrun( processor = SCHED(choose_processor)(pset, PROCESSOR_NULL, thread); pset = processor->processor_set; - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); } else if (thread->last_processor != PROCESSOR_NULL) { /* * Simple (last processor) affinity case. @@ -4120,8 +4371,8 @@ thread_setrun( processor = SCHED(choose_processor)(pset, processor, thread); pset = processor->processor_set; - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0); + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), thread->last_processor->cpu_id, processor->cpu_id, processor->state, 0); } else { /* * No Affinity case: @@ -4129,11 +4380,12 @@ thread_setrun( * Utilitize a per task hint to spread threads * among the available processor sets. */ - task_t task = thread->task; + task_t task = thread->task; pset = task->pset_hint; - if (pset == PROCESSOR_SET_NULL) + if (pset == PROCESSOR_SET_NULL) { pset = current_processor()->processor_set; + } pset = choose_next_pset(pset); pset_lock(pset); @@ -4142,8 +4394,8 @@ thread_setrun( pset = processor->processor_set; task->pset_hint = pset; - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)-1, processor->cpu_id, processor->state, 0); } } else { /* @@ -4155,8 +4407,8 @@ thread_setrun( pset = processor->processor_set; pset_lock(pset); - SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR)|DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0); + SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), (uintptr_t)-2, processor->cpu_id, processor->state, 0); } #else /* !__SMP__ */ /* Only one processor to choose */ @@ -4183,14 +4435,15 @@ thread_setrun( processor_set_t task_choose_pset( - task_t task) + task_t task) { - processor_set_t pset = task->pset_hint; + processor_set_t pset = task->pset_hint; - if (pset != PROCESSOR_SET_NULL) + if (pset != PROCESSOR_SET_NULL) { pset = choose_next_pset(pset); + } - return (pset); + return pset; } /* @@ -4201,22 +4454,33 @@ task_choose_pset( */ ast_t csw_check( - processor_t processor, - ast_t check_reason) + thread_t thread, + processor_t processor, + ast_t check_reason) { - processor_set_t pset = processor->processor_set; - ast_t result; + processor_set_t pset = processor->processor_set; + + assert(thread == processor->active_thread); pset_lock(pset); - /* If we were sent a remote AST and interrupted a running processor, acknowledge it here with pset lock held */ - bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id); + processor_state_update_from_thread(processor, thread); + + ast_t preempt = csw_check_locked(thread, processor, pset, check_reason); + + /* Acknowledge the IPI if we decided not to preempt */ + + if ((preempt & AST_URGENT) == 0) { + bit_clear(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id); + } - result = csw_check_locked(processor, pset, check_reason); + if ((preempt & AST_PREEMPT) == 0) { + bit_clear(pset->pending_AST_PREEMPT_cpu_mask, processor->cpu_id); + } pset_unlock(pset); - return result; + return preempt; } /* @@ -4225,23 +4489,24 @@ csw_check( */ ast_t csw_check_locked( - processor_t processor, - processor_set_t pset, - ast_t check_reason) + thread_t thread, + processor_t processor, + processor_set_t pset, + ast_t check_reason) { - ast_t result; - thread_t thread = processor->active_thread; + ast_t result; if (processor->first_timeslice) { - if (rt_runq_count(pset) > 0) - return (check_reason | AST_PREEMPT | AST_URGENT); - } - else { if (rt_runq_count(pset) > 0) { - if (BASEPRI_RTQUEUES > processor->current_pri) - return (check_reason | AST_PREEMPT | AST_URGENT); - else - return (check_reason | AST_PREEMPT); + return check_reason | AST_PREEMPT | AST_URGENT; + } + } else { + if (rt_runq_count(pset) > 0) { + if (BASEPRI_RTQUEUES > processor->current_pri) { + return check_reason | AST_PREEMPT | AST_URGENT; + } else { + return check_reason | AST_PREEMPT; + } } } @@ -4252,13 +4517,14 @@ csw_check_locked( * try to idle the processor and re-dispatch the thread to a recommended processor. */ if (!processor->is_recommended) { - return (check_reason | AST_PREEMPT | AST_URGENT); + return check_reason | AST_PREEMPT | AST_URGENT; } #endif result = SCHED(processor_csw_check)(processor); - if (result != AST_NONE) - return (check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE)); + if (result != AST_NONE) { + return check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE); + } #if __SMP__ /* @@ -4267,7 +4533,7 @@ csw_check_locked( * TODO: Should these set AST_REBALANCE? */ if (SCHED(avoid_processor_enabled) && SCHED(thread_avoid_processor)(processor, thread)) { - return (check_reason | AST_PREEMPT); + return check_reason | AST_PREEMPT; } /* @@ -4280,12 +4546,14 @@ csw_check_locked( */ if (processor->current_pri < BASEPRI_RTQUEUES && - processor->processor_primary != processor) - return (check_reason | AST_PREEMPT); + processor->processor_primary != processor) { + return check_reason | AST_PREEMPT; + } #endif - if (thread->state & TH_SUSP) - return (check_reason | AST_PREEMPT); + if (thread->state & TH_SUSP) { + return check_reason | AST_PREEMPT; + } #if CONFIG_SCHED_SFI /* @@ -4293,13 +4561,78 @@ csw_check_locked( * an SFI wait? */ result = sfi_thread_needs_ast(thread, NULL); - if (result != AST_NONE) - return (check_reason | result); + if (result != AST_NONE) { + return check_reason | result; + } #endif - return (AST_NONE); + return AST_NONE; } +/* + * Handle preemption IPI or IPI in response to setting an AST flag + * Triggered by cause_ast_check + * Called at splsched + */ +void +ast_check(processor_t processor) +{ + if (processor->state != PROCESSOR_RUNNING && + processor->state != PROCESSOR_SHUTDOWN) { + return; + } + + thread_t thread = processor->active_thread; + + assert(thread == current_thread()); + + thread_lock(thread); + + /* + * Propagate thread ast to processor. + * (handles IPI in response to setting AST flag) + */ + ast_propagate(thread); + + /* + * Stash the old urgency and perfctl values to find out if + * csw_check updates them. + */ + thread_urgency_t old_urgency = processor->current_urgency; + perfcontrol_class_t old_perfctl_class = processor->current_perfctl_class; + + ast_t preempt; + + if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) { + ast_on(preempt); + } + + if (old_urgency != processor->current_urgency) { + /* + * Urgency updates happen with the thread lock held (ugh). + * TODO: This doesn't notice QoS changes... + */ + uint64_t urgency_param1, urgency_param2; + + thread_urgency_t urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2); + thread_tell_urgency(urgency, urgency_param1, urgency_param2, 0, thread); + } + + thread_unlock(thread); + + if (old_perfctl_class != processor->current_perfctl_class) { + /* + * We updated the perfctl class of this thread from another core. + * Let CLPC know that the currently running thread has a new + * class. + */ + + machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, + mach_approximate_time(), 0, thread); + } +} + + /* * set_sched_pri: * @@ -4311,27 +4644,24 @@ csw_check_locked( */ void set_sched_pri( - thread_t thread, - int new_priority, - set_sched_pri_options_t options) + thread_t thread, + int new_priority, + set_sched_pri_options_t options) { - thread_t cthread = current_thread(); - boolean_t is_current_thread = (thread == cthread) ? TRUE : FALSE; - int curgency, nurgency; - uint64_t urgency_param1, urgency_param2; - boolean_t removed_from_runq = FALSE; - + bool is_current_thread = (thread == current_thread()); + bool removed_from_runq = false; bool lazy_update = ((options & SETPRI_LAZY) == SETPRI_LAZY); int old_priority = thread->sched_pri; /* If we're already at this priority, no need to mess with the runqueue */ - if (new_priority == old_priority) + if (new_priority == old_priority) { return; + } if (is_current_thread) { + assert(thread->state & TH_RUN); assert(thread->runq == PROCESSOR_NULL); - curgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2); } else { removed_from_runq = thread_run_queue_remove(thread); } @@ -4339,48 +4669,64 @@ set_sched_pri( thread->sched_pri = new_priority; KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHANGE_PRIORITY), - (uintptr_t)thread_tid(thread), - thread->base_pri, - thread->sched_pri, - thread->sched_usage, - 0); + (uintptr_t)thread_tid(thread), + thread->base_pri, + thread->sched_pri, + thread->sched_usage, + 0); + + if (removed_from_runq) { + thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ); + } else if (is_current_thread) { + processor_t processor = thread->last_processor; + assert(processor == current_processor()); + + thread_urgency_t old_urgency = processor->current_urgency; + + /* + * When dropping in priority, check if the thread no longer belongs on core. + * If a thread raises its own priority, don't aggressively rebalance it. + * + * + * csw_check does a processor_state_update_from_thread, but + * we should do our own if we're being lazy. + */ + if (!lazy_update && new_priority < old_priority) { + ast_t preempt; + + if ((preempt = csw_check(thread, processor, AST_NONE)) != AST_NONE) { + ast_on(preempt); + } + } else { + processor_state_update_from_thread(processor, thread); + } - if (is_current_thread) { - nurgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2); /* * set_sched_pri doesn't alter RT params. We expect direct base priority/QoS * class alterations from user space to occur relatively infrequently, hence * those are lazily handled. QoS classes have distinct priority bands, and QoS * inheritance is expected to involve priority changes. */ - uint64_t ctime = mach_approximate_time(); - if (nurgency != curgency) { - thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread); - } - machine_thread_going_on_core(thread, nurgency, 0, 0, ctime); - } + if (processor->current_urgency != old_urgency) { + uint64_t urgency_param1, urgency_param2; - if (removed_from_runq) - thread_run_queue_reinsert(thread, SCHED_PREEMPT | SCHED_TAILQ); - else if (thread->state & TH_RUN) { - processor_t processor = thread->last_processor; + thread_urgency_t new_urgency = thread_get_urgency(thread, + &urgency_param1, &urgency_param2); - if (is_current_thread) { - processor_state_update_from_thread(processor, thread); + thread_tell_urgency(new_urgency, urgency_param1, + urgency_param2, 0, thread); + } - /* - * When dropping in priority, check if the thread no longer belongs on core. - * If a thread raises its own priority, don't aggressively rebalance it. - * - */ - if (!lazy_update && new_priority < old_priority) { - ast_t preempt; + /* TODO: only call this if current_perfctl_class changed */ + uint64_t ctime = mach_approximate_time(); + machine_thread_going_on_core(thread, processor->current_urgency, 0, 0, ctime); + } else if (thread->state & TH_RUN) { + processor_t processor = thread->last_processor; - if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) - ast_on(preempt); - } - } else if (!lazy_update && processor != PROCESSOR_NULL && - processor != current_processor() && processor->active_thread == thread) { + if (!lazy_update && + processor != PROCESSOR_NULL && + processor != current_processor() && + processor->active_thread == thread) { cause_ast_check(processor); } } @@ -4398,8 +4744,8 @@ set_sched_pri( * This may be different than the thread that was passed in. */ thread_t -thread_run_queue_remove_for_handoff(thread_t thread) { - +thread_run_queue_remove_for_handoff(thread_t thread) +{ thread_t pulled_thread = THREAD_NULL; thread_lock(thread); @@ -4416,9 +4762,9 @@ thread_run_queue_remove_for_handoff(thread_t thread) { processor_t processor = current_processor(); if (processor->current_pri < BASEPRI_RTQUEUES && thread->sched_pri < BASEPRI_RTQUEUES && (thread->bound_processor == PROCESSOR_NULL || thread->bound_processor == processor)) { - - if (thread_run_queue_remove(thread)) - pulled_thread = thread; + if (thread_run_queue_remove(thread)) { + pulled_thread = thread; + } } thread_unlock(thread); @@ -4446,12 +4792,12 @@ thread_run_queue_remove_for_handoff(thread_t thread) { */ boolean_t thread_run_queue_remove( - thread_t thread) + thread_t thread) { boolean_t removed = FALSE; processor_t processor = thread->runq; - if ((thread->state & (TH_RUN|TH_WAIT)) == TH_WAIT) { + if ((thread->state & (TH_RUN | TH_WAIT)) == TH_WAIT) { /* Thread isn't runnable */ assert(thread->runq == PROCESSOR_NULL); return FALSE; @@ -4496,7 +4842,7 @@ thread_run_queue_remove( rt_lock_unlock(pset); - return (removed); + return removed; } /* @@ -4518,67 +4864,100 @@ thread_run_queue_reinsert(thread_t thread, integer_t options) void sys_override_cpu_throttle(boolean_t enable_override) { - if (enable_override) + if (enable_override) { cpu_throttle_enabled = 0; - else + } else { cpu_throttle_enabled = 1; + } } -int +thread_urgency_t thread_get_urgency(thread_t thread, uint64_t *arg1, uint64_t *arg2) { + uint64_t urgency_param1 = 0, urgency_param2 = 0; + + thread_urgency_t urgency; + if (thread == NULL || (thread->state & TH_IDLE)) { - *arg1 = 0; - *arg2 = 0; + urgency_param1 = 0; + urgency_param2 = 0; - return (THREAD_URGENCY_NONE); + urgency = THREAD_URGENCY_NONE; } else if (thread->sched_mode == TH_MODE_REALTIME) { - *arg1 = thread->realtime.period; - *arg2 = thread->realtime.deadline; + urgency_param1 = thread->realtime.period; + urgency_param2 = thread->realtime.deadline; - return (THREAD_URGENCY_REAL_TIME); + urgency = THREAD_URGENCY_REAL_TIME; } else if (cpu_throttle_enabled && - ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) { + (thread->sched_pri <= MAXPRI_THROTTLE) && + (thread->base_pri <= MAXPRI_THROTTLE)) { /* - * Background urgency applied when thread priority is MAXPRI_THROTTLE or lower and thread is not promoted + * Threads that are running at low priority but are not + * tagged with a specific QoS are separated out from + * the "background" urgency. Performance management + * subsystem can decide to either treat these threads + * as normal threads or look at other signals like thermal + * levels for optimal power/perf tradeoffs for a platform. */ - *arg1 = thread->sched_pri; - *arg2 = thread->base_pri; + boolean_t thread_lacks_qos = (proc_get_effective_thread_policy(thread, TASK_POLICY_QOS) == THREAD_QOS_UNSPECIFIED); //thread_has_qos_policy(thread); + boolean_t task_is_suppressed = (proc_get_effective_task_policy(thread->task, TASK_POLICY_SUP_ACTIVE) == 0x1); - return (THREAD_URGENCY_BACKGROUND); - } else { - /* For otherwise unclassified threads, report throughput QoS - * parameters + /* + * Background urgency applied when thread priority is + * MAXPRI_THROTTLE or lower and thread is not promoted + * and thread has a QoS specified */ - *arg1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS); - *arg2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS); + urgency_param1 = thread->sched_pri; + urgency_param2 = thread->base_pri; + + if (thread_lacks_qos && !task_is_suppressed) { + urgency = THREAD_URGENCY_LOWPRI; + } else { + urgency = THREAD_URGENCY_BACKGROUND; + } + } else { + /* For otherwise unclassified threads, report throughput QoS parameters */ + urgency_param1 = proc_get_effective_thread_policy(thread, TASK_POLICY_THROUGH_QOS); + urgency_param2 = proc_get_effective_task_policy(thread->task, TASK_POLICY_THROUGH_QOS); + urgency = THREAD_URGENCY_NORMAL; + } - return (THREAD_URGENCY_NORMAL); + if (arg1 != NULL) { + *arg1 = urgency_param1; + } + if (arg2 != NULL) { + *arg2 = urgency_param2; } + + return urgency; } perfcontrol_class_t thread_get_perfcontrol_class(thread_t thread) { - /* Special case handling */ - if (thread->state & TH_IDLE) - return PERFCONTROL_CLASS_IDLE; - if (thread->task == kernel_task) - return PERFCONTROL_CLASS_KERNEL; - if (thread->sched_mode == TH_MODE_REALTIME) - return PERFCONTROL_CLASS_REALTIME; - - /* perfcontrol_class based on base_pri */ - if (thread->base_pri <= MAXPRI_THROTTLE) - return PERFCONTROL_CLASS_BACKGROUND; - else if (thread->base_pri <= BASEPRI_UTILITY) - return PERFCONTROL_CLASS_UTILITY; - else if (thread->base_pri <= BASEPRI_DEFAULT) - return PERFCONTROL_CLASS_NONUI; - else if (thread->base_pri <= BASEPRI_FOREGROUND) - return PERFCONTROL_CLASS_UI; - else - return PERFCONTROL_CLASS_ABOVEUI; + /* Special case handling */ + if (thread->state & TH_IDLE) { + return PERFCONTROL_CLASS_IDLE; + } + if (thread->task == kernel_task) { + return PERFCONTROL_CLASS_KERNEL; + } + if (thread->sched_mode == TH_MODE_REALTIME) { + return PERFCONTROL_CLASS_REALTIME; + } + + /* perfcontrol_class based on base_pri */ + if (thread->base_pri <= MAXPRI_THROTTLE) { + return PERFCONTROL_CLASS_BACKGROUND; + } else if (thread->base_pri <= BASEPRI_UTILITY) { + return PERFCONTROL_CLASS_UTILITY; + } else if (thread->base_pri <= BASEPRI_DEFAULT) { + return PERFCONTROL_CLASS_NONUI; + } else if (thread->base_pri <= BASEPRI_FOREGROUND) { + return PERFCONTROL_CLASS_UI; + } else { + return PERFCONTROL_CLASS_ABOVEUI; + } } /* @@ -4597,17 +4976,16 @@ thread_get_perfcontrol_class(thread_t thread) thread_t processor_idle( - thread_t thread, - processor_t processor) + thread_t thread, + processor_t processor) { - processor_set_t pset = processor->processor_set; - thread_t new_thread; - int state; + processor_set_t pset = processor->processor_set; + (void)splsched(); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_START, - (uintptr_t)thread_tid(thread), 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_START, + (uintptr_t)thread_tid(thread), 0, 0, 0, 0); SCHED_STATS_CPU_IDLE_START(processor); @@ -4627,32 +5005,38 @@ processor_idle( */ atomic_thread_fence(memory_order_acquire); - if (processor->state != PROCESSOR_IDLE) + if (processor->state != PROCESSOR_IDLE) { break; - if (bit_test(pset->pending_AST_cpu_mask, processor->cpu_id)) + } + if (bit_test(pset->pending_AST_URGENT_cpu_mask, processor->cpu_id)) { break; + } #if defined(CONFIG_SCHED_DEFERRED_AST) - if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) + if (bit_test(pset->pending_deferred_AST_cpu_mask, processor->cpu_id)) { break; + } #endif if (processor->is_recommended && (processor->processor_primary == processor)) { - if (rt_runq_count(pset)) + if (rt_runq_count(pset)) { break; + } } else { - if (SCHED(processor_bound_count)(processor)) + if (SCHED(processor_bound_count)(processor)) { break; + } } #if CONFIG_SCHED_IDLE_IN_PLACE if (thread != THREAD_NULL) { /* Did idle-in-place thread wake up */ - if ((thread->state & (TH_WAIT|TH_SUSP)) != TH_WAIT || thread->wake_active) + if ((thread->state & (TH_WAIT | TH_SUSP)) != TH_WAIT || thread->wake_active) { break; + } } #endif IDLE_KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -1, 0); machine_track_platform_idle(TRUE); @@ -4662,11 +5046,11 @@ processor_idle( (void)splsched(); - /* - * Check if we should call sched_timeshare_consider_maintenance() here. - * The CPU was woken out of idle due to an interrupt and we should do the - * call only if the processor is still idle. If the processor is non-idle, - * the threads running on the processor would do the call as part of + /* + * Check if we should call sched_timeshare_consider_maintenance() here. + * The CPU was woken out of idle due to an interrupt and we should do the + * call only if the processor is still idle. If the processor is non-idle, + * the threads running on the processor would do the call as part of * context swithing. */ if (processor->state == PROCESSOR_IDLE) { @@ -4674,14 +5058,15 @@ processor_idle( } IDLE_KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_NONE, (uintptr_t)thread_tid(thread), rt_runq_count(pset), SCHED(processor_runq_count)(processor), -2, 0); if (!SCHED(processor_queue_empty)(processor)) { /* Secondary SMT processors respond to directed wakeups * exclusively. Some platforms induce 'spurious' SMT wakeups. */ - if (processor->processor_primary == processor) - break; + if (processor->processor_primary == processor) { + break; + } } } @@ -4692,89 +5077,28 @@ processor_idle( cpu_quiescent_counter_join(ctime); - pset_lock(pset); + assert(processor->next_thread == NULL); - /* If we were sent a remote AST and came out of idle, acknowledge it here with pset lock held */ - bit_clear(pset->pending_AST_cpu_mask, processor->cpu_id); -#if defined(CONFIG_SCHED_DEFERRED_AST) - bit_clear(pset->pending_deferred_AST_cpu_mask, processor->cpu_id); -#endif - - state = processor->state; - if (state == PROCESSOR_DISPATCHING) { - /* - * Commmon case -- cpu dispatched. - */ - new_thread = processor->next_thread; - processor->next_thread = THREAD_NULL; - pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); - - if ((new_thread != THREAD_NULL) && (SCHED(processor_queue_has_priority)(processor, new_thread->sched_pri, FALSE) || - (rt_runq_count(pset) > 0)) ) { - /* Something higher priority has popped up on the runqueue - redispatch this thread elsewhere */ - processor_state_update_idle(processor); - processor->deadline = UINT64_MAX; - - pset_unlock(pset); - - thread_lock(new_thread); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REDISPATCH), (uintptr_t)thread_tid(new_thread), new_thread->sched_pri, rt_runq_count(pset), 0, 0); - thread_setrun(new_thread, SCHED_HEADQ); - thread_unlock(new_thread); + ast_t reason = AST_NONE; - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, - (uintptr_t)thread_tid(thread), state, 0, 0, 0); - - return (THREAD_NULL); - } - - sched_update_pset_load_average(pset); - - pset_unlock(pset); - - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, - (uintptr_t)thread_tid(thread), state, (uintptr_t)thread_tid(new_thread), 0, 0); - - return (new_thread); - - } else if (state == PROCESSOR_IDLE) { - pset_update_processor_state(pset, processor, PROCESSOR_RUNNING); - processor_state_update_idle(processor); - processor->deadline = UINT64_MAX; - - } else if (state == PROCESSOR_SHUTDOWN) { - /* - * Going off-line. Force a - * reschedule. - */ - if ((new_thread = processor->next_thread) != THREAD_NULL) { - processor->next_thread = THREAD_NULL; - processor_state_update_idle(processor); - processor->deadline = UINT64_MAX; - - pset_unlock(pset); - - thread_lock(new_thread); - thread_setrun(new_thread, SCHED_HEADQ); - thread_unlock(new_thread); + /* We're handling all scheduling AST's */ + ast_off(AST_SCHEDULING); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, - (uintptr_t)thread_tid(thread), state, 0, 0, 0); - - return (THREAD_NULL); - } - } + /* + * thread_select will move the processor from dispatching to running, + * or put it in idle if there's nothing to do. + */ + thread_t current_thread = current_thread(); - pset_unlock(pset); + thread_lock(current_thread); + thread_t new_thread = thread_select(current_thread, processor, &reason); + thread_unlock(current_thread); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_IDLE) | DBG_FUNC_END, - (uintptr_t)thread_tid(thread), state, 0, 0, 0); - - return (THREAD_NULL); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE) | DBG_FUNC_END, + (uintptr_t)thread_tid(thread), processor->state, (uintptr_t)thread_tid(new_thread), reason, 0); + + return new_thread; } /* @@ -4785,8 +5109,8 @@ processor_idle( void idle_thread(void) { - processor_t processor = current_processor(); - thread_t new_thread; + processor_t processor = current_processor(); + thread_t new_thread; new_thread = processor_idle(THREAD_NULL, processor); if (new_thread != THREAD_NULL) { @@ -4800,16 +5124,17 @@ idle_thread(void) kern_return_t idle_thread_create( - processor_t processor) + processor_t processor) { - kern_return_t result; - thread_t thread; - spl_t s; - char name[MAXTHREADNAMESIZE]; + kern_return_t result; + thread_t thread; + spl_t s; + char name[MAXTHREADNAMESIZE]; result = kernel_thread_create((thread_continue_t)idle_thread, NULL, MAXPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } snprintf(name, sizeof(name), "idle #%d", processor->cpu_id); thread_set_thread_name(thread, name); @@ -4826,7 +5151,7 @@ idle_thread_create( thread_deallocate(thread); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -4839,8 +5164,8 @@ idle_thread_create( void sched_startup(void) { - kern_return_t result; - thread_t thread; + kern_return_t result; + thread_t thread; simple_lock_init(&sched_vm_group_list_lock, 0); @@ -4850,8 +5175,9 @@ sched_startup(void) result = kernel_thread_start_priority((thread_continue_t)sched_init_thread, (void *)SCHED(maintenance_continuation), MAXPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { panic("sched_startup"); + } thread_deallocate(thread); @@ -4875,10 +5201,10 @@ static _Atomic uint64_t sched_perfcontrol_callback_deadline; #if defined(CONFIG_SCHED_TIMESHARE_CORE) -static volatile uint64_t sched_maintenance_deadline; -static uint64_t sched_tick_last_abstime; -static uint64_t sched_tick_delta; -uint64_t sched_tick_max_delta; +static volatile uint64_t sched_maintenance_deadline; +static uint64_t sched_tick_last_abstime; +static uint64_t sched_tick_delta; +uint64_t sched_tick_max_delta; /* @@ -4890,7 +5216,7 @@ uint64_t sched_tick_max_delta; void sched_timeshare_maintenance_continue(void) { - uint64_t sched_tick_ctime, late_time; + uint64_t sched_tick_ctime, late_time; struct sched_update_scan_context scan_context = { .earliest_bg_make_runnable_time = UINT64_MAX, @@ -4898,7 +5224,7 @@ sched_timeshare_maintenance_continue(void) .earliest_rt_make_runnable_time = UINT64_MAX }; - sched_tick_ctime = mach_absolute_time(); + sched_tick_ctime = mach_absolute_time(); if (__improbable(sched_tick_last_abstime == 0)) { sched_tick_last_abstime = sched_tick_ctime; @@ -4925,8 +5251,8 @@ sched_timeshare_maintenance_continue(void) sched_tick_max_delta = MAX(sched_tick_delta, sched_tick_max_delta); } - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE)|DBG_FUNC_START, - sched_tick_delta, late_time, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_START, + sched_tick_delta, late_time, 0, 0, 0); /* Add a number of pseudo-ticks corresponding to the elapsed interval * This could be greater than 1 if substantial intervals where @@ -4954,13 +5280,13 @@ sched_timeshare_maintenance_continue(void) uint64_t ctime = mach_absolute_time(); uint64_t bg_max_latency = (ctime > scan_context.earliest_bg_make_runnable_time) ? - ctime - scan_context.earliest_bg_make_runnable_time : 0; + ctime - scan_context.earliest_bg_make_runnable_time : 0; uint64_t default_max_latency = (ctime > scan_context.earliest_normal_make_runnable_time) ? - ctime - scan_context.earliest_normal_make_runnable_time : 0; + ctime - scan_context.earliest_normal_make_runnable_time : 0; uint64_t realtime_max_latency = (ctime > scan_context.earliest_rt_make_runnable_time) ? - ctime - scan_context.earliest_rt_make_runnable_time : 0; + ctime - scan_context.earliest_rt_make_runnable_time : 0; machine_max_runnable_latency(bg_max_latency, default_max_latency, realtime_max_latency); @@ -4974,7 +5300,7 @@ sched_timeshare_maintenance_continue(void) sched_recommended_cores_maintenance(); #endif /* __arm__ || __arm64__ */ - + #if DEBUG || DEVELOPMENT #if __x86_64__ #include @@ -4984,8 +5310,8 @@ sched_timeshare_maintenance_continue(void) #endif /* DEBUG || DEVELOPMENT */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_MAINTENANCE) | DBG_FUNC_END, - sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG], - sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0); + sched_pri_shifts[TH_BUCKET_SHARE_FG], sched_pri_shifts[TH_BUCKET_SHARE_BG], + sched_pri_shifts[TH_BUCKET_SHARE_UT], sched_pri_shifts[TH_BUCKET_SHARE_DF], 0); assert_wait((event_t)sched_timeshare_maintenance_continue, THREAD_UNINT); thread_block((thread_continue_t)sched_timeshare_maintenance_continue); @@ -5005,15 +5331,16 @@ static uint64_t sched_maintenance_wakeups; * no more than a comparison against the deadline in the common case. */ void -sched_timeshare_consider_maintenance(uint64_t ctime) { - +sched_timeshare_consider_maintenance(uint64_t ctime) +{ cpu_quiescent_counter_checkin(ctime); uint64_t deadline = sched_maintenance_deadline; if (__improbable(ctime >= deadline)) { - if (__improbable(current_thread() == sched_maintenance_thread)) + if (__improbable(current_thread() == sched_maintenance_thread)) { return; + } OSMemoryBarrier(); uint64_t ndeadline = ctime + sched_tick_interval; @@ -5029,7 +5356,7 @@ sched_timeshare_consider_maintenance(uint64_t ctime) { if (__improbable(load_compute_deadline && ctime >= load_compute_deadline)) { uint64_t new_deadline = 0; if (__c11_atomic_compare_exchange_strong(&sched_load_compute_deadline, &load_compute_deadline, new_deadline, - memory_order_relaxed, memory_order_relaxed)) { + memory_order_relaxed, memory_order_relaxed)) { compute_sched_load(); new_deadline = ctime + sched_load_compute_interval_abs; __c11_atomic_store(&sched_load_compute_deadline, new_deadline, memory_order_relaxed); @@ -5042,12 +5369,11 @@ sched_timeshare_consider_maintenance(uint64_t ctime) { if (__improbable(perf_deadline && ctime >= perf_deadline)) { /* CAS in 0, if success, make callback. Otherwise let the next context switch check again. */ if (__c11_atomic_compare_exchange_strong(&sched_perfcontrol_callback_deadline, &perf_deadline, 0, - memory_order_relaxed, memory_order_relaxed)) { + memory_order_relaxed, memory_order_relaxed)) { machine_perfcontrol_deadline_passed(perf_deadline); } } #endif /* __arm64__ */ - } #endif /* CONFIG_SCHED_TIMESHARE_CORE */ @@ -5073,7 +5399,7 @@ sched_init_thread(void (*continuation)(void)) /* * thread_update_scan / runq_scan: * - * Scan the run queues to account for timesharing threads + * Scan the run queues to account for timesharing threads * which need to be updated. * * Scanner runs in two passes. Pass one squirrels likely @@ -5086,7 +5412,7 @@ sched_init_thread(void (*continuation)(void)) * disabling preemption for long periods. */ -#define THREAD_UPDATE_SIZE 128 +#define THREAD_UPDATE_SIZE 128 static thread_t thread_update_array[THREAD_UPDATE_SIZE]; static uint32_t thread_update_count = 0; @@ -5095,12 +5421,13 @@ static uint32_t thread_update_count = 0; boolean_t thread_update_add_thread(thread_t thread) { - if (thread_update_count == THREAD_UPDATE_SIZE) - return (FALSE); + if (thread_update_count == THREAD_UPDATE_SIZE) { + return FALSE; + } thread_update_array[thread_update_count++] = thread; thread_reference_internal(thread); - return (TRUE); + return TRUE; } void @@ -5108,7 +5435,7 @@ thread_update_process_threads(void) { assert(thread_update_count <= THREAD_UPDATE_SIZE); - for (uint32_t i = 0 ; i < thread_update_count ; i++) { + for (uint32_t i = 0; i < thread_update_count; i++) { thread_t thread = thread_update_array[i]; assert_thread_magic(thread); thread_update_array[i] = THREAD_NULL; @@ -5134,21 +5461,21 @@ thread_update_process_threads(void) */ boolean_t runq_scan( - run_queue_t runq, - sched_update_scan_context_t scan_context) + run_queue_t runq, + sched_update_scan_context_t scan_context) { int count = runq->count; int queue_index; assert(count >= 0); - if (count == 0) + if (count == 0) { return FALSE; + } for (queue_index = bitmap_first(runq->bitmap, NRQS); - queue_index >= 0; - queue_index = bitmap_next(runq->bitmap, queue_index)) { - + queue_index >= 0; + queue_index = bitmap_next(runq->bitmap, queue_index)) { thread_t thread; queue_t queue = &runq->queues[queue_index]; @@ -5158,8 +5485,9 @@ runq_scan( if (thread->sched_stamp != sched_tick && thread->sched_mode == TH_MODE_TIMESHARE) { - if (thread_update_add_thread(thread) == FALSE) + if (thread_update_add_thread(thread) == FALSE) { return TRUE; + } } if (cpu_throttle_enabled && ((thread->sched_pri <= MAXPRI_THROTTLE) && (thread->base_pri <= MAXPRI_THROTTLE))) { @@ -5181,13 +5509,13 @@ runq_scan( #endif /* CONFIG_SCHED_TIMESHARE_CORE */ boolean_t -thread_eager_preemption(thread_t thread) +thread_eager_preemption(thread_t thread) { - return ((thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0); + return (thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0; } void -thread_set_eager_preempt(thread_t thread) +thread_set_eager_preempt(thread_t thread) { spl_t x; processor_t p; @@ -5200,8 +5528,7 @@ thread_set_eager_preempt(thread_t thread) thread->sched_flags |= TH_SFLAG_EAGERPREEMPT; if (thread == current_thread()) { - - ast = csw_check(p, AST_NONE); + ast = csw_check(thread, p, AST_NONE); thread_unlock(thread); if (ast != AST_NONE) { (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); @@ -5209,11 +5536,11 @@ thread_set_eager_preempt(thread_t thread) } else { p = thread->last_processor; - if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING && - p->active_thread == thread) { + if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING && + p->active_thread == thread) { cause_ast_check(p); } - + thread_unlock(thread); } @@ -5221,7 +5548,7 @@ thread_set_eager_preempt(thread_t thread) } void -thread_clear_eager_preempt(thread_t thread) +thread_clear_eager_preempt(thread_t thread) { spl_t x; @@ -5229,7 +5556,7 @@ thread_clear_eager_preempt(thread_t thread) thread_lock(thread); thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT; - + thread_unlock(thread); splx(x); } @@ -5242,7 +5569,7 @@ sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int othe { struct processor_sched_statistics *stats; boolean_t to_realtime = FALSE; - + stats = &processor->processor_data.sched_stats; stats->csw_count++; @@ -5256,17 +5583,16 @@ sched_stats_handle_csw(processor_t processor, int reasons, int selfpri, int othe if (selfpri >= BASEPRI_REALTIME) { stats->preempted_rt_count++; - } + } if (to_realtime) { stats->preempted_by_rt_count++; } - } } void -sched_stats_handle_runq_change(struct runq_stats *stats, int old_count) +sched_stats_handle_runq_change(struct runq_stats *stats, int old_count) { uint64_t timestamp = mach_absolute_time(); @@ -5280,23 +5606,24 @@ sched_stats_handle_runq_change(struct runq_stats *stats, int old_count) #undef thread_wakeup void thread_wakeup( - event_t x); + event_t x); void thread_wakeup( - event_t x) + event_t x) { - thread_wakeup_with_result(x, THREAD_AWAKENED); + thread_wakeup_with_result(x, THREAD_AWAKENED); } boolean_t preemption_enabled(void) { - return (get_preemption_level() == 0 && ml_get_interrupts_enabled()); + return get_preemption_level() == 0 && ml_get_interrupts_enabled(); } static void -sched_timer_deadline_tracking_init(void) { +sched_timer_deadline_tracking_init(void) +{ nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_1_DEFAULT, &timer_deadline_tracking_bin_1); nanoseconds_to_absolutetime(TIMER_DEADLINE_TRACKING_BIN_2_DEFAULT, &timer_deadline_tracking_bin_2); } @@ -5339,18 +5666,19 @@ sched_perfcontrol_update_recommended_cores(uint32_t recommended_cores) assert(preemption_enabled()); spl_t s = splsched(); - simple_lock(&sched_recommended_cores_lock); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); perfcontrol_requested_recommended_cores = recommended_cores; perfcontrol_requested_recommended_core_count = __builtin_popcountll(recommended_cores); - if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) - sched_update_recommended_cores(perfcontrol_requested_recommended_cores); - else + if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) { + sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores); + } else { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE, - perfcontrol_requested_recommended_cores, - sched_maintenance_thread->last_made_runnable_time, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE, + perfcontrol_requested_recommended_cores, + sched_maintenance_thread->last_made_runnable_time, 0, 0, 0); + } simple_unlock(&sched_recommended_cores_lock); splx(s); @@ -5360,7 +5688,7 @@ void sched_override_recommended_cores_for_sleep(void) { spl_t s = splsched(); - simple_lock(&sched_recommended_cores_lock); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); if (perfcontrol_sleep_override == false) { perfcontrol_sleep_override = true; @@ -5375,11 +5703,11 @@ void sched_restore_recommended_cores_after_sleep(void) { spl_t s = splsched(); - simple_lock(&sched_recommended_cores_lock); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); if (perfcontrol_sleep_override == true) { perfcontrol_sleep_override = false; - sched_update_recommended_cores(perfcontrol_requested_recommended_cores); + sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores); } simple_unlock(&sched_recommended_cores_lock); @@ -5406,12 +5734,12 @@ sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread) if (__improbable(perfcontrol_failsafe_active == TRUE)) { /* keep track of how long the responsible thread runs */ - simple_lock(&sched_recommended_cores_lock); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); if (perfcontrol_failsafe_active == TRUE && cur_thread->thread_id == perfcontrol_failsafe_tid) { perfcontrol_failsafe_thread_timer_last_seen = timer_grab(&cur_thread->user_timer) + - timer_grab(&cur_thread->system_timer); + timer_grab(&cur_thread->system_timer); } simple_unlock(&sched_recommended_cores_lock); @@ -5421,8 +5749,9 @@ sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread) } /* The failsafe won't help if there are no more processors to enable */ - if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) + if (__probable(perfcontrol_requested_recommended_core_count >= processor_count)) { return; + } uint64_t too_long_ago = ctime - perfcontrol_failsafe_starvation_threshold; @@ -5430,14 +5759,15 @@ sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread) thread_t m_thread = sched_maintenance_thread; /* If it doesn't look bad, nothing to see here */ - if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) + if (__probable(m_thread->last_made_runnable_time >= too_long_ago)) { return; + } /* It looks bad, take the lock to be sure */ thread_lock(m_thread); if (m_thread->runq == PROCESSOR_NULL || - (m_thread->state & (TH_RUN|TH_WAIT)) != TH_RUN || + (m_thread->state & (TH_RUN | TH_WAIT)) != TH_RUN || m_thread->last_made_runnable_time >= too_long_ago) { /* * Maintenance thread is either on cpu or blocked, and @@ -5463,7 +5793,7 @@ sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread) * TODO: Consider weird states like boot, sleep, or debugger */ - simple_lock(&sched_recommended_cores_lock); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); if (perfcontrol_failsafe_active == TRUE) { simple_unlock(&sched_recommended_cores_lock); @@ -5471,8 +5801,8 @@ sched_consider_recommended_cores(uint64_t ctime, thread_t cur_thread) } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START, - perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_START, + perfcontrol_requested_recommended_cores, maintenance_runnable_time, 0, 0, 0); perfcontrol_failsafe_active = TRUE; perfcontrol_failsafe_activation_time = mach_absolute_time(); @@ -5510,8 +5840,9 @@ static void sched_recommended_cores_maintenance(void) { /* Common case - no failsafe, nothing to be done here */ - if (__probable(perfcontrol_failsafe_active == FALSE)) + if (__probable(perfcontrol_failsafe_active == FALSE)) { return; + } uint64_t ctime = mach_absolute_time(); @@ -5519,19 +5850,21 @@ sched_recommended_cores_maintenance(void) char p_name[FAILSAFE_NAME_LEN] = ""; spl_t s = splsched(); - simple_lock(&sched_recommended_cores_lock); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); /* Check again, under the lock, to avoid races */ - if (perfcontrol_failsafe_active == FALSE) + if (perfcontrol_failsafe_active == FALSE) { goto out; + } /* * Ensure that the other cores get another few ticks to run some threads * If we don't have this hysteresis, the maintenance thread is the first * to run, and then it immediately kills the other cores */ - if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) + if ((ctime - perfcontrol_failsafe_activation_time) < perfcontrol_failsafe_starvation_threshold) { goto out; + } /* Capture some diagnostic state under the lock so we can print it out later */ @@ -5539,7 +5872,7 @@ sched_recommended_cores_maintenance(void) uint64_t tid = perfcontrol_failsafe_tid; uint64_t thread_usage = perfcontrol_failsafe_thread_timer_last_seen - - perfcontrol_failsafe_thread_timer_at_start; + perfcontrol_failsafe_thread_timer_at_start; uint32_t rec_cores_before = perfcontrol_failsafe_recommended_at_trigger; uint32_t rec_cores_after = perfcontrol_requested_recommended_cores; uint64_t failsafe_duration = ctime - perfcontrol_failsafe_activation_time; @@ -5553,10 +5886,10 @@ sched_recommended_cores_maintenance(void) perfcontrol_failsafe_active = FALSE; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_SCHED,MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END, - perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_END, + perfcontrol_requested_recommended_cores, failsafe_duration, 0, 0, 0); - sched_update_recommended_cores(perfcontrol_requested_recommended_cores); + sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores); out: simple_unlock(&sched_recommended_cores_lock); @@ -5572,14 +5905,50 @@ out: thread_usage_ms = thread_usage_ms / NSEC_PER_MSEC; printf("recommended core failsafe kicked in for %lld ms " - "likely due to %s[%d] thread 0x%llx spending " - "%lld ms on cpu at realtime priority - " - "new recommendation: 0x%x -> 0x%x\n", - failsafe_duration_ms, p_name, pid, tid, thread_usage_ms, - rec_cores_before, rec_cores_after); + "likely due to %s[%d] thread 0x%llx spending " + "%lld ms on cpu at realtime priority - " + "new recommendation: 0x%x -> 0x%x\n", + failsafe_duration_ms, p_name, pid, tid, thread_usage_ms, + rec_cores_before, rec_cores_after); + } +} + +#endif /* __arm__ || __arm64__ */ + +kern_return_t +sched_processor_enable(processor_t processor, boolean_t enable) +{ + assert(preemption_enabled()); + + spl_t s = splsched(); + simple_lock(&sched_recommended_cores_lock, LCK_GRP_NULL); + + if (enable) { + bit_set(usercontrol_requested_recommended_cores, processor->cpu_id); + } else { + bit_clear(usercontrol_requested_recommended_cores, processor->cpu_id); } + +#if __arm__ || __arm64__ + if ((perfcontrol_failsafe_active == false) && (perfcontrol_sleep_override == false)) { + sched_update_recommended_cores(perfcontrol_requested_recommended_cores & usercontrol_requested_recommended_cores); + } else { + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + MACHDBG_CODE(DBG_MACH_SCHED, MACH_REC_CORES_FAILSAFE) | DBG_FUNC_NONE, + perfcontrol_requested_recommended_cores, + sched_maintenance_thread->last_made_runnable_time, 0, 0, 0); + } +#else /* __arm__ || __arm64__ */ + sched_update_recommended_cores(usercontrol_requested_recommended_cores); +#endif /* !__arm__ || __arm64__ */ + + simple_unlock(&sched_recommended_cores_lock); + splx(s); + + return KERN_SUCCESS; } + /* * Apply a new recommended cores mask to the processors it affects * Runs after considering failsafes and such @@ -5592,26 +5961,32 @@ out: * interrupts disabled, sched_recommended_cores_lock is held */ static void -sched_update_recommended_cores(uint32_t recommended_cores) +sched_update_recommended_cores(uint64_t recommended_cores) { processor_set_t pset, nset; processor_t processor; uint64_t needs_exit_idle_mask = 0x0; + uint32_t avail_count; processor = processor_list; pset = processor->processor_set; - KDBG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START, - recommended_cores, perfcontrol_failsafe_active, 0, 0); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_START, + recommended_cores, +#if __arm__ || __arm64__ + perfcontrol_failsafe_active, 0, 0); +#else /* __arm__ || __arm64__ */ + 0, 0, 0); +#endif /* ! __arm__ || __arm64__ */ - if (__builtin_popcount(recommended_cores) == 0) { + if (__builtin_popcountll(recommended_cores) == 0) { bit_set(recommended_cores, master_processor->cpu_id); /* add boot processor or we hang */ } /* First set recommended cores */ pset_lock(pset); + avail_count = 0; do { - nset = processor->processor_set; if (nset != pset) { pset_unlock(pset); @@ -5628,6 +6003,9 @@ sched_update_recommended_cores(uint32_t recommended_cores) bit_set(needs_exit_idle_mask, processor->cpu_id); } } + if (processor->state != PROCESSOR_OFF_LINE) { + avail_count++; + } } } while ((processor = processor->processor_list) != NULL); pset_unlock(pset); @@ -5638,7 +6016,6 @@ sched_update_recommended_cores(uint32_t recommended_cores) pset_lock(pset); do { - nset = processor->processor_set; if (nset != pset) { pset_unlock(pset); @@ -5671,6 +6048,12 @@ sched_update_recommended_cores(uint32_t recommended_cores) pset_lock(pset); } } while ((processor = processor->processor_list) != NULL); + + processor_avail_count_user = avail_count; +#if defined(__x86_64__) + commpage_update_active_cpus(); +#endif + pset_unlock(pset); /* Issue all pending IPIs now that the pset lock has been dropped */ @@ -5679,54 +6062,76 @@ sched_update_recommended_cores(uint32_t recommended_cores) machine_signal_idle(processor); } - KDBG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END, - needs_exit_idle_mask, 0, 0, 0); + KDBG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_UPDATE_REC_CORES) | DBG_FUNC_END, + needs_exit_idle_mask, 0, 0, 0); } -#endif /* __arm__ || __arm64__ */ -void thread_set_options(uint32_t thopt) { - spl_t x; - thread_t t = current_thread(); - - x = splsched(); - thread_lock(t); - - t->options |= thopt; - - thread_unlock(t); - splx(x); +void +thread_set_options(uint32_t thopt) +{ + spl_t x; + thread_t t = current_thread(); + + x = splsched(); + thread_lock(t); + + t->options |= thopt; + + thread_unlock(t); + splx(x); } -void thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint) { +void +thread_set_pending_block_hint(thread_t thread, block_hint_t block_hint) +{ thread->pending_block_hint = block_hint; } -uint32_t qos_max_parallelism(int qos, uint64_t options) +uint32_t +qos_max_parallelism(int qos, uint64_t options) { - return SCHED(qos_max_parallelism)(qos, options); + return SCHED(qos_max_parallelism)(qos, options); } -uint32_t sched_qos_max_parallelism(__unused int qos, uint64_t options) +uint32_t +sched_qos_max_parallelism(__unused int qos, uint64_t options) { - host_basic_info_data_t hinfo; - mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; - /* Query the machine layer for core information */ - __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO, - (host_info_t)&hinfo, &count); - assert(kret == KERN_SUCCESS); + host_basic_info_data_t hinfo; + mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; + /* Query the machine layer for core information */ + __assert_only kern_return_t kret = host_info(host_self(), HOST_BASIC_INFO, + (host_info_t)&hinfo, &count); + assert(kret == KERN_SUCCESS); + + /* We would not want multiple realtime threads running on the + * same physical core; even for SMT capable machines. + */ + if (options & QOS_PARALLELISM_REALTIME) { + return hinfo.physical_cpu; + } + + if (options & QOS_PARALLELISM_COUNT_LOGICAL) { + return hinfo.logical_cpu; + } else { + return hinfo.physical_cpu; + } +} - /* We would not want multiple realtime threads running on the - * same physical core; even for SMT capable machines. - */ - if (options & QOS_PARALLELISM_REALTIME) { - return hinfo.physical_cpu; - } +int sched_allow_NO_SMT_threads = 1; +bool +thread_no_smt(thread_t thread) +{ +#if DEBUG || DEVELOPMENT + return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && ((thread->sched_flags & TH_SFLAG_NO_SMT) || (thread->task->t_flags & TF_NO_SMT)); +#else + return sched_allow_NO_SMT_threads && (thread->bound_processor == PROCESSOR_NULL) && (thread->sched_flags & TH_SFLAG_NO_SMT); +#endif +} - if (options & QOS_PARALLELISM_COUNT_LOGICAL) { - return hinfo.logical_cpu; - } else { - return hinfo.physical_cpu; - } +bool +processor_active_thread_no_smt(processor_t processor) +{ + return sched_allow_NO_SMT_threads && !processor->current_is_bound && processor->current_is_NO_SMT; } #if __arm64__ @@ -5745,12 +6150,14 @@ sched_perfcontrol_update_callback_deadline(uint64_t new_deadline) */ uint64_t old_deadline = __c11_atomic_load(&sched_perfcontrol_callback_deadline, - memory_order_relaxed); + memory_order_relaxed); while (!__c11_atomic_compare_exchange_weak(&sched_perfcontrol_callback_deadline, - &old_deadline, new_deadline, - memory_order_relaxed, memory_order_relaxed)); + &old_deadline, new_deadline, + memory_order_relaxed, memory_order_relaxed)) { + ; + } /* now old_deadline contains previous value, which might not be the same if it raced */ @@ -5776,9 +6183,20 @@ sched_update_pset_load_average(processor_set_t pset) static processor_t choose_processor_for_realtime_thread(processor_set_t pset) { - uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask & ~pset->pending_AST_cpu_mask); +#if defined(__x86_64__) + bool avoid_cpu0 = sched_avoid_cpu0 && bit_test(pset->cpu_bitmask, 0); +#else + const bool avoid_cpu0 = false; +#endif + + uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask & ~pset->pending_AST_URGENT_cpu_mask); + if (avoid_cpu0) { + cpu_map = bit_ror64(cpu_map, 1); + } + + for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) { + int cpuid = avoid_cpu0 ? ((rotid + 1) & 63) : rotid; - for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) { processor_t processor = processor_array[cpuid]; if (processor->processor_primary != processor) { @@ -5798,7 +6216,6 @@ choose_processor_for_realtime_thread(processor_set_t pset) } return processor; - } if (!sched_allow_rt_smt) { @@ -5806,7 +6223,13 @@ choose_processor_for_realtime_thread(processor_set_t pset) } /* Consider secondary processors */ - for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) { + if (avoid_cpu0) { + /* Also avoid cpu1 */ + cpu_map = bit_ror64(cpu_map, 1); + } + for (int rotid = lsb_first(cpu_map); rotid >= 0; rotid = lsb_next(cpu_map, rotid)) { + int cpuid = avoid_cpu0 ? ((rotid + 2) & 63) : rotid; + processor_t processor = processor_array[cpuid]; if (processor->processor_primary == processor) { @@ -5826,7 +6249,6 @@ choose_processor_for_realtime_thread(processor_set_t pset) } return processor; - } return PROCESSOR_NULL; @@ -5836,15 +6258,18 @@ choose_processor_for_realtime_thread(processor_set_t pset) static bool all_available_primaries_are_running_realtime_threads(processor_set_t pset) { - uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask); + return these_processors_are_running_realtime_threads(pset, pset->primary_map); +} + +/* pset is locked */ +static bool +these_processors_are_running_realtime_threads(processor_set_t pset, uint64_t these_map) +{ + uint64_t cpu_map = (pset->cpu_bitmask & pset->recommended_bitmask) & these_map; for (int cpuid = lsb_first(cpu_map); cpuid >= 0; cpuid = lsb_next(cpu_map, cpuid)) { processor_t processor = processor_array[cpuid]; - if (processor->processor_primary != processor) { - continue; - } - if (processor->state == PROCESSOR_IDLE) { return false; } @@ -5872,4 +6297,72 @@ all_available_primaries_are_running_realtime_threads(processor_set_t pset) return true; } +static bool +sched_ok_to_run_realtime_thread(processor_set_t pset, processor_t processor) +{ + bool ok_to_run_realtime_thread = true; +#if defined(__x86_64__) + if (sched_avoid_cpu0 && processor->cpu_id == 0) { + ok_to_run_realtime_thread = these_processors_are_running_realtime_threads(pset, pset->primary_map & ~0x1); + } else if (sched_avoid_cpu0 && (processor->cpu_id == 1) && processor->is_SMT) { + ok_to_run_realtime_thread = sched_allow_rt_smt && these_processors_are_running_realtime_threads(pset, ~0x2); + } else if (processor->processor_primary != processor) { + ok_to_run_realtime_thread = sched_allow_rt_smt && all_available_primaries_are_running_realtime_threads(pset); + } +#else + (void)pset; + (void)processor; +#endif + return ok_to_run_realtime_thread; +} + +void +thread_set_no_smt(bool set) +{ + thread_t thread = current_thread(); + spl_t s = splsched(); + thread_lock(thread); + if (set) { + thread->sched_flags |= TH_SFLAG_NO_SMT; + } else { + thread->sched_flags &= ~TH_SFLAG_NO_SMT; + } + thread_unlock(thread); + splx(s); +} + +bool +thread_get_no_smt(void) +{ + return current_thread()->sched_flags & TH_SFLAG_NO_SMT; +} + +#if DEBUG || DEVELOPMENT +extern void sysctl_task_set_no_smt(char no_smt); +void +sysctl_task_set_no_smt(char no_smt) +{ + thread_t thread = current_thread(); + task_t task = thread->task; + + if (no_smt == '1') { + task->t_flags |= TF_NO_SMT; + } else { + task->t_flags &= ~TF_NO_SMT; + } +} + +extern char sysctl_task_get_no_smt(void); +char +sysctl_task_get_no_smt(void) +{ + thread_t thread = current_thread(); + task_t task = thread->task; + + if (task->t_flags & TF_NO_SMT) { + return '1'; + } + return '0'; +} +#endif diff --git a/osfmk/kern/sched_prim.h b/osfmk/kern/sched_prim.h index bd67f6869..12776a617 100644 --- a/osfmk/kern/sched_prim.h +++ b/osfmk/kern/sched_prim.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * */ -#ifndef _KERN_SCHED_PRIM_H_ +#ifndef _KERN_SCHED_PRIM_H_ #define _KERN_SCHED_PRIM_H_ #include @@ -75,75 +75,80 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE + +#include /* Initialization */ -extern void sched_init(void); +extern void sched_init(void); -extern void sched_startup(void); +extern void sched_startup(void); -extern void sched_timebase_init(void); +extern void sched_timebase_init(void); -extern void pset_rt_init(processor_set_t pset); +extern void pset_rt_init(processor_set_t pset); -extern void sched_rtglobal_init(processor_set_t pset); +extern void sched_rtglobal_init(processor_set_t pset); -extern rt_queue_t sched_rtglobal_runq(processor_set_t pset); +extern rt_queue_t sched_rtglobal_runq(processor_set_t pset); -extern void sched_rtglobal_queue_shutdown(processor_t processor); +extern void sched_rtglobal_queue_shutdown(processor_t processor); -extern int64_t sched_rtglobal_runq_count_sum(void); +extern int64_t sched_rtglobal_runq_count_sum(void); -extern void sched_check_spill(processor_set_t pset, thread_t thread); +extern void sched_check_spill(processor_set_t pset, thread_t thread); extern bool sched_thread_should_yield(processor_t processor, thread_t thread); +extern bool sched_steal_thread_DISABLED(processor_set_t pset); +extern bool sched_steal_thread_enabled(processor_set_t pset); + /* Force a preemption point for a thread and wait for it to stop running */ -extern boolean_t thread_stop( - thread_t thread, - boolean_t until_not_runnable); +extern boolean_t thread_stop( + thread_t thread, + boolean_t until_not_runnable); /* Release a previous stop request */ -extern void thread_unstop( - thread_t thread); +extern void thread_unstop( + thread_t thread); /* Wait for a thread to stop running */ -extern void thread_wait( - thread_t thread, - boolean_t until_not_runnable); +extern void thread_wait( + thread_t thread, + boolean_t until_not_runnable); /* Unblock thread on wake up */ -extern boolean_t thread_unblock( - thread_t thread, - wait_result_t wresult); +extern boolean_t thread_unblock( + thread_t thread, + wait_result_t wresult); /* Unblock and dispatch thread */ -extern kern_return_t thread_go( - thread_t thread, - wait_result_t wresult); +extern kern_return_t thread_go( + thread_t thread, + wait_result_t wresult); /* Handle threads at context switch */ -extern void thread_dispatch( - thread_t old_thread, - thread_t new_thread); +extern void thread_dispatch( + thread_t old_thread, + thread_t new_thread); /* Switch directly to a particular thread */ -extern int thread_run( - thread_t self, - thread_continue_t continuation, - void *parameter, - thread_t new_thread); +extern int thread_run( + thread_t self, + thread_continue_t continuation, + void *parameter, + thread_t new_thread); /* Resume thread with new stack */ -extern void thread_continue( - thread_t old_thread); +extern void thread_continue( + thread_t old_thread); /* Invoke continuation */ -extern void call_continuation( - thread_continue_t continuation, - void *parameter, - wait_result_t wresult, - boolean_t enable_interrupts); +extern void call_continuation( + thread_continue_t continuation, + void *parameter, + wait_result_t wresult, + boolean_t enable_interrupts); /* * Flags that can be passed to set_sched_pri @@ -156,24 +161,24 @@ typedef enum { /* Set the current scheduled priority */ extern void set_sched_pri( - thread_t thread, - int priority, - set_sched_pri_options_t options); + thread_t thread, + int priority, + set_sched_pri_options_t options); /* Set base priority of the specified thread */ -extern void sched_set_thread_base_priority( - thread_t thread, - int priority); +extern void sched_set_thread_base_priority( + thread_t thread, + int priority); /* Set the thread's true scheduling mode */ extern void sched_set_thread_mode(thread_t thread, - sched_mode_t mode); + sched_mode_t mode); /* Demote the true scheduler mode */ extern void sched_thread_mode_demote(thread_t thread, - uint32_t reason); + uint32_t reason); /* Un-demote the true scheduler mode */ extern void sched_thread_mode_undemote(thread_t thread, - uint32_t reason); + uint32_t reason); extern void sched_thread_promote_to_pri(thread_t thread, int priority, uintptr_t trace_obj); extern void sched_thread_update_promotion_to_pri(thread_t thread, int priority, uintptr_t trace_obj); @@ -189,44 +194,44 @@ void thread_recompute_priority(thread_t thread); /* Re-evaluate scheduled priority of thread (thread locked) */ extern void thread_recompute_sched_pri( - thread_t thread, - set_sched_pri_options_t options); + thread_t thread, + set_sched_pri_options_t options); /* Periodic scheduler activity */ -extern void sched_init_thread(void (*)(void)); +extern void sched_init_thread(void (*)(void)); /* Perform sched_tick housekeeping activities */ -extern boolean_t can_update_priority( - thread_t thread); +extern boolean_t can_update_priority( + thread_t thread); -extern void update_priority( - thread_t thread); +extern void update_priority( + thread_t thread); -extern void lightweight_update_priority( - thread_t thread); +extern void lightweight_update_priority( + thread_t thread); extern void sched_default_quantum_expire(thread_t thread); /* Idle processor thread */ -extern void idle_thread(void); +extern void idle_thread(void); -extern kern_return_t idle_thread_create( - processor_t processor); +extern kern_return_t idle_thread_create( + processor_t processor); /* Continuation return from syscall */ extern void thread_syscall_return( - kern_return_t ret); + kern_return_t ret); /* Context switch */ -extern wait_result_t thread_block_reason( - thread_continue_t continuation, - void *parameter, - ast_t reason); +extern wait_result_t thread_block_reason( + thread_continue_t continuation, + void *parameter, + ast_t reason); /* Reschedule thread for execution */ -extern void thread_setrun( - thread_t thread, - integer_t options); +extern void thread_setrun( + thread_t thread, + integer_t options); typedef enum { SCHED_NONE = 0x0, @@ -234,78 +239,78 @@ typedef enum { SCHED_HEADQ = 0x2, SCHED_PREEMPT = 0x4, SCHED_REBALANCE = 0x8, + SCHED_PEEK = 0x10, } sched_options_t; -extern processor_set_t task_choose_pset( - task_t task); +extern processor_set_t task_choose_pset( + task_t task); /* Bind the current thread to a particular processor */ -extern processor_t thread_bind( - processor_t processor); +extern processor_t thread_bind( + processor_t processor); /* Choose the best processor to run a thread */ -extern processor_t choose_processor( - processor_set_t pset, - processor_t processor, - thread_t thread); +extern processor_t choose_processor( + processor_set_t pset, + processor_t processor, + thread_t thread); extern void sched_SMT_balance( - processor_t processor, - processor_set_t pset); + processor_t processor, + processor_set_t pset); extern void thread_quantum_init( - thread_t thread); - -extern void run_queue_init( - run_queue_t runq); - -extern thread_t run_queue_dequeue( - run_queue_t runq, - integer_t options); - -extern boolean_t run_queue_enqueue( - run_queue_t runq, - thread_t thread, - integer_t options); - -extern void run_queue_remove( - run_queue_t runq, - thread_t thread); - -struct sched_update_scan_context -{ - uint64_t earliest_bg_make_runnable_time; - uint64_t earliest_normal_make_runnable_time; - uint64_t earliest_rt_make_runnable_time; + thread_t thread); + +extern void run_queue_init( + run_queue_t runq); + +extern thread_t run_queue_dequeue( + run_queue_t runq, + integer_t options); + +extern boolean_t run_queue_enqueue( + run_queue_t runq, + thread_t thread, + integer_t options); + +extern void run_queue_remove( + run_queue_t runq, + thread_t thread); + +struct sched_update_scan_context { + uint64_t earliest_bg_make_runnable_time; + uint64_t earliest_normal_make_runnable_time; + uint64_t earliest_rt_make_runnable_time; }; typedef struct sched_update_scan_context *sched_update_scan_context_t; -extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context); +extern void sched_rtglobal_runq_scan(sched_update_scan_context_t scan_context); -/* - * Enum to define various events which need IPIs. The IPI policy - * engine decides what kind of IPI to use based on destination +/* + * Enum to define various events which need IPIs. The IPI policy + * engine decides what kind of IPI to use based on destination * processor state, thread and one of the following scheduling events. */ typedef enum { SCHED_IPI_EVENT_BOUND_THR = 0x1, - SCHED_IPI_EVENT_PREEMPT = 0x2, + SCHED_IPI_EVENT_PREEMPT = 0x2, SCHED_IPI_EVENT_SMT_REBAL = 0x3, - SCHED_IPI_EVENT_SPILL = 0x4, + SCHED_IPI_EVENT_SPILL = 0x4, SCHED_IPI_EVENT_REBALANCE = 0x5, } sched_ipi_event_t; /* Enum to define various IPI types used by the scheduler */ typedef enum { - SCHED_IPI_NONE = 0x0, - SCHED_IPI_IMMEDIATE = 0x1, - SCHED_IPI_IDLE = 0x2, - SCHED_IPI_DEFERRED = 0x3, + SCHED_IPI_NONE = 0x0, + SCHED_IPI_IMMEDIATE = 0x1, + SCHED_IPI_IDLE = 0x2, + SCHED_IPI_DEFERRED = 0x3, } sched_ipi_type_t; /* The IPI policy engine behaves in the following manner: - * - All scheduler events which need an IPI invoke sched_ipi_action() with + * - All scheduler events which need an IPI invoke sched_ipi_action() with * the appropriate destination processor, thread and event. * - sched_ipi_action() performs basic checks, invokes the scheduler specific * ipi_policy routine and sets pending_AST bits based on the result. @@ -313,16 +318,16 @@ typedef enum { * routine which actually sends the appropriate IPI to the destination core. */ extern sched_ipi_type_t sched_ipi_action(processor_t dst, thread_t thread, - boolean_t dst_idle, sched_ipi_event_t event); + boolean_t dst_idle, sched_ipi_event_t event); extern void sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi); /* sched_ipi_policy() is the global default IPI policy for all schedulers */ extern sched_ipi_type_t sched_ipi_policy(processor_t dst, thread_t thread, - boolean_t dst_idle, sched_ipi_event_t event); + boolean_t dst_idle, sched_ipi_event_t event); /* sched_ipi_deferred_policy() is the global default deferred IPI policy for all schedulers */ extern sched_ipi_type_t sched_ipi_deferred_policy(processor_set_t pset, - processor_t dst, sched_ipi_event_t event); + processor_t dst, sched_ipi_event_t event); #if defined(CONFIG_SCHED_TIMESHARE_CORE) @@ -342,63 +347,63 @@ extern int sched_compute_timeshare_priority(thread_t thread); #endif /* CONFIG_SCHED_TIMESHARE_CORE */ /* Remove thread from its run queue */ -extern boolean_t thread_run_queue_remove(thread_t thread); +extern boolean_t thread_run_queue_remove(thread_t thread); thread_t thread_run_queue_remove_for_handoff(thread_t thread); /* Put a thread back in the run queue after being yanked */ extern void thread_run_queue_reinsert(thread_t thread, integer_t options); -extern void thread_timer_expire( - void *thread, - void *p1); +extern void thread_timer_expire( + void *thread, + void *p1); -extern boolean_t thread_eager_preemption( - thread_t thread); +extern boolean_t thread_eager_preemption( + thread_t thread); extern boolean_t sched_generic_direct_dispatch_to_idle_processors; /* Set the maximum interrupt level for the thread */ __private_extern__ wait_interrupt_t thread_interrupt_level( - wait_interrupt_t interruptible); + wait_interrupt_t interruptible); __private_extern__ wait_result_t thread_mark_wait_locked( - thread_t thread, - wait_interrupt_t interruptible); + thread_t thread, + wait_interrupt_t interruptible); /* Wake up locked thread directly, passing result */ __private_extern__ kern_return_t clear_wait_internal( - thread_t thread, - wait_result_t result); + thread_t thread, + wait_result_t result); extern void sched_stats_handle_csw( - processor_t processor, - int reasons, - int selfpri, - int otherpri); + processor_t processor, + int reasons, + int selfpri, + int otherpri); extern void sched_stats_handle_runq_change( - struct runq_stats *stats, - int old_count); + struct runq_stats *stats, + int old_count); #if DEBUG -#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ -do { \ - if (__builtin_expect(sched_stats_active, 0)) { \ - sched_stats_handle_csw((processor), \ - (reasons), (selfpri), (otherpri)); \ - } \ -} while (0) +#define SCHED_STATS_CSW(processor, reasons, selfpri, otherpri) \ +do { \ + if (__builtin_expect(sched_stats_active, 0)) { \ + sched_stats_handle_csw((processor), \ + (reasons), (selfpri), (otherpri)); \ + } \ +} while (0) -#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ -do { \ - if (__builtin_expect(sched_stats_active, 0)) { \ - sched_stats_handle_runq_change((stats), \ - (old_count)); \ - } \ -} while (0) +#define SCHED_STATS_RUNQ_CHANGE(stats, old_count) \ +do { \ + if (__builtin_expect(sched_stats_active, 0)) { \ + sched_stats_handle_runq_change((stats), \ + (old_count)); \ + } \ +} while (0) #else /* DEBUG */ @@ -408,47 +413,28 @@ do { \ #endif /* DEBUG */ extern uint32_t sched_debug_flags; -#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 -#define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 +#define SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS 0x00000001 +#define SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS 0x00000002 -#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \ - if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ - KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ - } \ +#define SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(...) do { \ + if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_PLATFORM_TRACEPOINTS)) { \ + KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ + } \ } while(0) -#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \ - if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ - KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ - } \ +#define SCHED_DEBUG_CHOOSE_PROCESSOR_KERNEL_DEBUG_CONSTANT(...) do { \ + if (__improbable(sched_debug_flags & SCHED_DEBUG_FLAG_CHOOSE_PROCESSOR_TRACEPOINTS)) { \ + KERNEL_DEBUG_CONSTANT(__VA_ARGS__); \ + } \ } while(0) -#define THREAD_URGENCY_NONE 0 /* indicates that there is no currently runnable */ -#define THREAD_URGENCY_BACKGROUND 1 /* indicates that the thread is marked as a "background" thread */ -#define THREAD_URGENCY_NORMAL 2 /* indicates that the thread is marked as a "normal" thread */ -#define THREAD_URGENCY_REAL_TIME 3 /* indicates that the thread is marked as a "real-time" or urgent thread */ -#define THREAD_URGENCY_MAX 4 /* Marker */ -/* Returns the "urgency" of a thread (provided by scheduler) */ -extern int thread_get_urgency( - thread_t thread, - uint64_t *rt_period, - uint64_t *rt_deadline); - -/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */ -extern void thread_tell_urgency( - int urgency, - uint64_t rt_period, - uint64_t rt_deadline, - uint64_t sched_latency, - thread_t nthread); - /* Tells if there are "active" RT threads in the system (provided by CPU PM) */ -extern void active_rt_threads( - boolean_t active); +extern void active_rt_threads( + boolean_t active); /* Returns the perfcontrol attribute for the thread */ extern perfcontrol_class_t thread_get_perfcontrol_class( - thread_t thread); + thread_t thread); /* Generic routine for Non-AMP schedulers to calculate parallelism */ extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options); @@ -457,54 +443,54 @@ extern uint32_t sched_qos_max_parallelism(int qos, uint64_t options); __BEGIN_DECLS -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* Toggles a global override to turn off CPU Throttling */ -extern void sys_override_cpu_throttle(boolean_t enable_override); +extern void sys_override_cpu_throttle(boolean_t enable_override); /* ****************** Only exported until BSD stops using ******************** */ -extern void thread_vm_bind_group_add(void); +extern void thread_vm_bind_group_add(void); /* Wake up thread directly, passing result */ extern kern_return_t clear_wait( - thread_t thread, - wait_result_t result); + thread_t thread, + wait_result_t result); /* Start thread running */ -extern void thread_bootstrap_return(void) __attribute__((noreturn)); +extern void thread_bootstrap_return(void) __attribute__((noreturn)); /* Return from exception (BSD-visible interface) */ -extern void thread_exception_return(void) __dead2; +extern void thread_exception_return(void) __dead2; #define SCHED_STRING_MAX_LENGTH (48) /* String declaring the name of the current scheduler */ extern char sched_string[SCHED_STRING_MAX_LENGTH]; -extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name); +extern thread_t port_name_to_thread_for_ulock(mach_port_name_t thread_name); /* Attempt to context switch to a specific runnable thread */ extern wait_result_t thread_handoff_deallocate(thread_t thread); __attribute__((nonnull(1, 2))) extern void thread_handoff_parameter(thread_t thread, - thread_continue_t continuation, void *parameter) __dead2; + thread_continue_t continuation, void *parameter) __dead2; -extern struct waitq *assert_wait_queue(event_t event); +extern struct waitq *assert_wait_queue(event_t event); extern kern_return_t thread_wakeup_one_with_pri(event_t event, int priority); extern thread_t thread_wakeup_identify(event_t event, int priority); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ #ifdef KERNEL_PRIVATE /* Set pending block hint for a particular object before we go into a wait state */ -extern void thread_set_pending_block_hint( - thread_t thread, - block_hint_t block_hint); +extern void thread_set_pending_block_hint( + thread_t thread, + block_hint_t block_hint); #define QOS_PARALLELISM_COUNT_LOGICAL 0x1 #define QOS_PARALLELISM_REALTIME 0x2 @@ -512,65 +498,65 @@ extern uint32_t qos_max_parallelism(int qos, uint64_t options); #endif /* KERNEL_PRIVATE */ #if XNU_KERNEL_PRIVATE -extern void thread_yield_with_continuation( - thread_continue_t continuation, - void *parameter) __dead2; +extern void thread_yield_with_continuation( + thread_continue_t continuation, + void *parameter) __dead2; #endif /* Context switch */ -extern wait_result_t thread_block( - thread_continue_t continuation); +extern wait_result_t thread_block( + thread_continue_t continuation); -extern wait_result_t thread_block_parameter( - thread_continue_t continuation, - void *parameter); +extern wait_result_t thread_block_parameter( + thread_continue_t continuation, + void *parameter); /* Declare thread will wait on a particular event */ -extern wait_result_t assert_wait( - event_t event, - wait_interrupt_t interruptible); +extern wait_result_t assert_wait( + event_t event, + wait_interrupt_t interruptible); /* Assert that the thread intends to wait with a timeout */ -extern wait_result_t assert_wait_timeout( - event_t event, - wait_interrupt_t interruptible, - uint32_t interval, - uint32_t scale_factor); +extern wait_result_t assert_wait_timeout( + event_t event, + wait_interrupt_t interruptible, + uint32_t interval, + uint32_t scale_factor); /* Assert that the thread intends to wait with an urgency, timeout and leeway */ -extern wait_result_t assert_wait_timeout_with_leeway( - event_t event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint32_t interval, - uint32_t leeway, - uint32_t scale_factor); - -extern wait_result_t assert_wait_deadline( - event_t event, - wait_interrupt_t interruptible, - uint64_t deadline); +extern wait_result_t assert_wait_timeout_with_leeway( + event_t event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint32_t interval, + uint32_t leeway, + uint32_t scale_factor); + +extern wait_result_t assert_wait_deadline( + event_t event, + wait_interrupt_t interruptible, + uint64_t deadline); /* Assert that the thread intends to wait with an urgency, deadline, and leeway */ -extern wait_result_t assert_wait_deadline_with_leeway( - event_t event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint64_t deadline, - uint64_t leeway); +extern wait_result_t assert_wait_deadline_with_leeway( + event_t event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway); /* Wake up thread (or threads) waiting on a particular event */ -extern kern_return_t thread_wakeup_prim( - event_t event, - boolean_t one_thread, - wait_result_t result); - -#define thread_wakeup(x) \ - thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) -#define thread_wakeup_with_result(x, z) \ - thread_wakeup_prim((x), FALSE, (z)) -#define thread_wakeup_one(x) \ - thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) +extern kern_return_t thread_wakeup_prim( + event_t event, + boolean_t one_thread, + wait_result_t result); + +#define thread_wakeup(x) \ + thread_wakeup_prim((x), FALSE, THREAD_AWAKENED) +#define thread_wakeup_with_result(x, z) \ + thread_wakeup_prim((x), FALSE, (z)) +#define thread_wakeup_one(x) \ + thread_wakeup_prim((x), TRUE, THREAD_AWAKENED) /* Wakeup the specified thread if it is waiting on this event */ extern kern_return_t thread_wakeup_thread(event_t event, thread_t thread); @@ -594,8 +580,8 @@ extern boolean_t preemption_enabled(void); #define SCHED(f) (sched_current_dispatch->f) #else /* DEBUG */ -/* - * For DEV & REL kernels, use a static dispatch table instead of +/* + * For DEV & REL kernels, use a static dispatch table instead of * using the indirect function table. */ extern const struct sched_dispatch_table sched_dualq_dispatch; @@ -605,31 +591,31 @@ extern const struct sched_dispatch_table sched_dualq_dispatch; struct sched_dispatch_table { const char *sched_name; - void (*init)(void); /* Init global state */ - void (*timebase_init)(void); /* Timebase-dependent initialization */ - void (*processor_init)(processor_t processor); /* Per-processor scheduler init */ - void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */ + void (*init)(void); /* Init global state */ + void (*timebase_init)(void); /* Timebase-dependent initialization */ + void (*processor_init)(processor_t processor); /* Per-processor scheduler init */ + void (*pset_init)(processor_set_t pset); /* Per-processor set scheduler init */ - void (*maintenance_continuation)(void); /* Function called regularly */ + void (*maintenance_continuation)(void); /* Function called regularly */ /* * Choose a thread of greater or equal priority from the per-processor * runqueue for timeshare/fixed threads */ - thread_t (*choose_thread)( - processor_t processor, - int priority, - ast_t reason); + thread_t (*choose_thread)( + processor_t processor, + int priority, + ast_t reason); - /* True if scheduler supports stealing threads */ - boolean_t steal_thread_enabled; + /* True if scheduler supports stealing threads for this pset */ + bool (*steal_thread_enabled)(processor_set_t pset); /* * Steal a thread from another processor in the pset so that it can run * immediately */ - thread_t (*steal_thread)( - processor_set_t pset); + thread_t (*steal_thread)( + processor_set_t pset); /* * Compute priority for a timeshare thread based on base priority. @@ -639,99 +625,99 @@ struct sched_dispatch_table { /* * Pick the best processor for a thread (any kind of thread) to run on. */ - processor_t (*choose_processor)( - processor_set_t pset, - processor_t processor, - thread_t thread); + processor_t (*choose_processor)( + processor_set_t pset, + processor_t processor, + thread_t thread); /* * Enqueue a timeshare or fixed priority thread onto the per-processor * runqueue */ boolean_t (*processor_enqueue)( - processor_t processor, - thread_t thread, - integer_t options); + processor_t processor, + thread_t thread, + integer_t options); /* Migrate threads away in preparation for processor shutdown */ void (*processor_queue_shutdown)( - processor_t processor); + processor_t processor); /* Remove the specific thread from the per-processor runqueue */ - boolean_t (*processor_queue_remove)( - processor_t processor, - thread_t thread); + boolean_t (*processor_queue_remove)( + processor_t processor, + thread_t thread); /* * Does the per-processor runqueue have any timeshare or fixed priority * threads on it? Called without pset lock held, so should * not assume immutability while executing. */ - boolean_t (*processor_queue_empty)(processor_t processor); + boolean_t (*processor_queue_empty)(processor_t processor); /* * Would this priority trigger an urgent preemption if it's sitting * on the per-processor runqueue? */ - boolean_t (*priority_is_urgent)(int priority); + boolean_t (*priority_is_urgent)(int priority); /* * Does the per-processor runqueue contain runnable threads that * should cause the currently-running thread to be preempted? */ - ast_t (*processor_csw_check)(processor_t processor); + ast_t (*processor_csw_check)(processor_t processor); /* * Does the per-processor runqueue contain a runnable thread * of > or >= priority, as a preflight for choose_thread() or other * thread selection */ - boolean_t (*processor_queue_has_priority)(processor_t processor, - int priority, - boolean_t gte); + boolean_t (*processor_queue_has_priority)(processor_t processor, + int priority, + boolean_t gte); /* Quantum size for the specified non-realtime thread. */ - uint32_t (*initial_quantum_size)(thread_t thread); - + uint32_t (*initial_quantum_size)(thread_t thread); + /* Scheduler mode for a new thread */ - sched_mode_t (*initial_thread_sched_mode)(task_t parent_task); + sched_mode_t (*initial_thread_sched_mode)(task_t parent_task); /* * Is it safe to call update_priority, which may change a thread's * runqueue or other state. This can be used to throttle changes * to dynamic priority. */ - boolean_t (*can_update_priority)(thread_t thread); + boolean_t (*can_update_priority)(thread_t thread); /* * Update both scheduled priority and other persistent state. * Side effects may including migration to another processor's runqueue. */ - void (*update_priority)(thread_t thread); + void (*update_priority)(thread_t thread); /* Lower overhead update to scheduled priority and state. */ - void (*lightweight_update_priority)(thread_t thread); + void (*lightweight_update_priority)(thread_t thread); /* Callback for non-realtime threads when the quantum timer fires */ - void (*quantum_expire)(thread_t thread); + void (*quantum_expire)(thread_t thread); /* * Runnable threads on per-processor runqueue. Should only * be used for relative comparisons of load between processors. */ - int (*processor_runq_count)(processor_t processor); + int (*processor_runq_count)(processor_t processor); /* Aggregate runcount statistics for per-processor runqueue */ uint64_t (*processor_runq_stats_count_sum)(processor_t processor); - boolean_t (*processor_bound_count)(processor_t processor); + boolean_t (*processor_bound_count)(processor_t processor); - void (*thread_update_scan)(sched_update_scan_context_t scan_context); + void (*thread_update_scan)(sched_update_scan_context_t scan_context); /* - * Use processor->next_thread to pin a thread to an idle - * processor. If FALSE, threads are enqueued and can - * be stolen by other processors. - */ + * Use processor->next_thread to pin a thread to an idle + * processor. If FALSE, threads are enqueued and can + * be stolen by other processors. + */ boolean_t direct_dispatch_to_idle_processors; /* Supports more than one pset */ @@ -751,14 +737,14 @@ struct sched_dispatch_table { * Called with pset lock held, returns pset lock unlocked. */ void (*processor_balance)(processor_t processor, processor_set_t pset); - rt_queue_t (*rt_runq)(processor_set_t pset); - void (*rt_init)(processor_set_t pset); - void (*rt_queue_shutdown)(processor_t processor); - void (*rt_runq_scan)(sched_update_scan_context_t scan_context); - int64_t (*rt_runq_count_sum)(void); + rt_queue_t (*rt_runq)(processor_set_t pset); + void (*rt_init)(processor_set_t pset); + void (*rt_queue_shutdown)(processor_t processor); + void (*rt_runq_scan)(sched_update_scan_context_t scan_context); + int64_t (*rt_runq_count_sum)(void); uint32_t (*qos_max_parallelism)(int qos, uint64_t options); - void (*check_spill)(processor_set_t pset, thread_t thread); + void (*check_spill)(processor_set_t pset, thread_t thread); sched_ipi_type_t (*ipi_policy)(processor_t dst, thread_t thread, boolean_t dst_idle, sched_ipi_event_t event); bool (*thread_should_yield)(processor_t processor, thread_t thread); }; @@ -787,8 +773,8 @@ extern const struct sched_dispatch_table sched_grrr_dispatch; */ extern const struct sched_dispatch_table *sched_current_dispatch; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ __END_DECLS -#endif /* _KERN_SCHED_PRIM_H_ */ +#endif /* _KERN_SCHED_PRIM_H_ */ diff --git a/osfmk/kern/sched_proto.c b/osfmk/kern/sched_proto.c index 1df45e7ab..f1297189d 100644 --- a/osfmk/kern/sched_proto.c +++ b/osfmk/kern/sched_proto.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -77,44 +77,44 @@ static void sched_proto_maintenance_continuation(void); static thread_t -sched_proto_choose_thread(processor_t processor, - int priority, - ast_t reason); +sched_proto_choose_thread(processor_t processor, + int priority, + ast_t reason); static thread_t -sched_proto_steal_thread(processor_set_t pset); +sched_proto_steal_thread(processor_set_t pset); static int sched_proto_compute_priority(thread_t thread); static processor_t -sched_proto_choose_processor( processor_set_t pset, - processor_t processor, - thread_t thread); +sched_proto_choose_processor( processor_set_t pset, + processor_t processor, + thread_t thread); static boolean_t sched_proto_processor_enqueue( - processor_t processor, - thread_t thread, - integer_t options); + processor_t processor, + thread_t thread, + integer_t options); static void sched_proto_processor_queue_shutdown( - processor_t processor); + processor_t processor); static boolean_t sched_proto_processor_queue_remove( - processor_t processor, - thread_t thread); + processor_t processor, + thread_t thread); static boolean_t -sched_proto_processor_queue_empty(processor_t processor); +sched_proto_processor_queue_empty(processor_t processor); static boolean_t -sched_proto_processor_queue_has_priority(processor_t processor, - int priority, - boolean_t gte); +sched_proto_processor_queue_has_priority(processor_t processor, + int priority, + boolean_t gte); static boolean_t sched_proto_priority_is_urgent(int priority); @@ -129,16 +129,16 @@ static sched_mode_t sched_proto_initial_thread_sched_mode(task_t parent_task); static boolean_t -sched_proto_can_update_priority(thread_t thread); +sched_proto_can_update_priority(thread_t thread); static void -sched_proto_update_priority(thread_t thread); +sched_proto_update_priority(thread_t thread); static void -sched_proto_lightweight_update_priority(thread_t thread); +sched_proto_lightweight_update_priority(thread_t thread); static void -sched_proto_quantum_expire(thread_t thread); +sched_proto_quantum_expire(thread_t thread); static int sched_proto_processor_runq_count(processor_t processor); @@ -161,7 +161,7 @@ const struct sched_dispatch_table sched_proto_dispatch = { .pset_init = sched_proto_pset_init, .maintenance_continuation = sched_proto_maintenance_continuation, .choose_thread = sched_proto_choose_thread, - .steal_thread_enabled = FALSE, + .steal_thread_enabled = sched_steal_thread_DISABLED, .steal_thread = sched_proto_steal_thread, .compute_timeshare_priority = sched_proto_compute_priority, .choose_processor = sched_proto_choose_processor, @@ -201,57 +201,56 @@ const struct sched_dispatch_table sched_proto_dispatch = { .thread_should_yield = sched_thread_should_yield, }; -static struct run_queue *global_runq; -static struct run_queue global_runq_storage; +static struct run_queue *global_runq; +static struct run_queue global_runq_storage; -#define GLOBAL_RUNQ ((processor_t)-2) -decl_simple_lock_data(static,global_runq_lock); +#define GLOBAL_RUNQ ((processor_t)-2) +decl_simple_lock_data(static, global_runq_lock); -extern int max_unsafe_quanta; +extern int max_unsafe_quanta; static uint32_t proto_quantum_us; static uint32_t proto_quantum; -static uint32_t runqueue_generation; +static uint32_t runqueue_generation; static processor_t proto_processor; -static uint64_t sched_proto_tick_deadline; -static uint32_t sched_proto_tick; +static uint64_t sched_proto_tick_deadline; +static uint32_t sched_proto_tick; static void sched_proto_init(void) { - proto_quantum_us = 10*1000; - + proto_quantum_us = 10 * 1000; + printf("standard proto timeslicing quantum is %d us\n", proto_quantum_us); simple_lock_init(&global_runq_lock, 0); global_runq = &global_runq_storage; run_queue_init(global_runq); runqueue_generation = 0; - + proto_processor = master_processor; } static void sched_proto_timebase_init(void) { - uint64_t abstime; + uint64_t abstime; /* standard timeslicing quantum */ clock_interval_to_absolutetime_interval( - proto_quantum_us, NSEC_PER_USEC, &abstime); + proto_quantum_us, NSEC_PER_USEC, &abstime); assert((abstime >> 32) == 0 && (uint32_t)abstime != 0); proto_quantum = (uint32_t)abstime; - + thread_depress_time = 1 * proto_quantum; default_timeshare_computation = proto_quantum / 2; default_timeshare_constraint = proto_quantum; - + max_unsafe_computation = max_unsafe_quanta * proto_quantum; sched_safe_duration = 2 * max_unsafe_quanta * proto_quantum; - } static void @@ -268,55 +267,57 @@ sched_proto_pset_init(processor_set_t pset __unused) static void sched_proto_maintenance_continuation(void) { - uint64_t abstime = mach_absolute_time(); - + uint64_t abstime = mach_absolute_time(); + sched_proto_tick++; - + /* Every 8 seconds, switch to another processor */ if ((sched_proto_tick & 0x7) == 0) { processor_t new_processor; - + new_processor = proto_processor->processor_list; - if (new_processor == PROCESSOR_NULL) + if (new_processor == PROCESSOR_NULL) { proto_processor = master_processor; - else + } else { proto_processor = new_processor; + } } - - + + /* * Compute various averages. */ compute_averages(1); - - if (sched_proto_tick_deadline == 0) + + if (sched_proto_tick_deadline == 0) { sched_proto_tick_deadline = abstime; - + } + clock_deadline_for_periodic_event(sched_one_second_interval, abstime, - &sched_proto_tick_deadline); - + &sched_proto_tick_deadline); + assert_wait_deadline((event_t)sched_proto_maintenance_continuation, THREAD_UNINT, sched_proto_tick_deadline); thread_block((thread_continue_t)sched_proto_maintenance_continuation); /*NOTREACHED*/ } static thread_t -sched_proto_choose_thread(processor_t processor, - int priority, - ast_t reason __unused) +sched_proto_choose_thread(processor_t processor, + int priority, + ast_t reason __unused) { - run_queue_t rq = global_runq; - queue_t queue; - int pri, count; - thread_t thread; - - - simple_lock(&global_runq_lock); - + run_queue_t rq = global_runq; + queue_t queue; + int pri, count; + thread_t thread; + + + simple_lock(&global_runq_lock, LCK_GRP_NULL); + queue = rq->queues + rq->highq; pri = rq->highq; count = rq->count; - + /* * Since we don't depress priorities, a high priority thread * may get selected over and over again. Put a runqueue @@ -326,15 +327,15 @@ sched_proto_choose_thread(processor_t processor, * perfect, especially if the number of runnable threads always * stays high, but is a workable approximation */ - + while (count > 0 && pri >= priority) { thread = (thread_t)queue_first(queue); while (!queue_end(queue, (queue_entry_t)thread)) { if ((thread->bound_processor == PROCESSOR_NULL || - thread->bound_processor == processor) && - runqueue_generation != thread->runqueue_generation) { + thread->bound_processor == processor) && + runqueue_generation != thread->runqueue_generation) { remqueue((queue_entry_t)thread); - + thread->runq = PROCESSOR_NULL; thread->runqueue_generation = runqueue_generation; SCHED_STATS_RUNQ_CHANGE(&rq->runq_stats, rq->count); @@ -343,31 +344,30 @@ sched_proto_choose_thread(processor_t processor, bitmap_clear(rq->bitmap, pri); rq->highq = bitmap_first(rq->bitmap, NRQS); } - + simple_unlock(&global_runq_lock); - return (thread); + return thread; } count--; - + thread = (thread_t)queue_next((queue_entry_t)thread); } - + queue--; pri--; } - + runqueue_generation++; - + simple_unlock(&global_runq_lock); - return (THREAD_NULL); + return THREAD_NULL; } static thread_t -sched_proto_steal_thread(processor_set_t pset) +sched_proto_steal_thread(processor_set_t pset) { pset_unlock(pset); - - return (THREAD_NULL); - + + return THREAD_NULL; } static int @@ -377,46 +377,46 @@ sched_proto_compute_priority(thread_t thread) } static processor_t -sched_proto_choose_processor( processor_set_t pset, - processor_t processor, - thread_t thread __unused) +sched_proto_choose_processor( processor_set_t pset, + processor_t processor, + thread_t thread __unused) { processor = proto_processor; - + /* * Check that the correct processor set is * returned locked. */ if (pset != processor->processor_set) { pset_unlock(pset); - + pset = processor->processor_set; pset_lock(pset); } - - return (processor); + + return processor; } static boolean_t sched_proto_processor_enqueue( - processor_t processor __unused, - thread_t thread, - integer_t options) + processor_t processor __unused, + thread_t thread, + integer_t options) { - run_queue_t rq = global_runq; - boolean_t result; - - simple_lock(&global_runq_lock); + run_queue_t rq = global_runq; + boolean_t result; + + simple_lock(&global_runq_lock, LCK_GRP_NULL); result = run_queue_enqueue(rq, thread, options); thread->runq = GLOBAL_RUNQ; simple_unlock(&global_runq_lock); - - return (result); + + return result; } static void sched_proto_processor_queue_shutdown( - processor_t processor) + processor_t processor) { /* With a global runqueue, just stop choosing this processor */ (void)processor; @@ -424,16 +424,16 @@ sched_proto_processor_queue_shutdown( static boolean_t sched_proto_processor_queue_remove( - processor_t processor, - thread_t thread) + processor_t processor, + thread_t thread) { - void * rqlock; - run_queue_t rq; + void * rqlock; + run_queue_t rq; rqlock = &global_runq_lock; rq = global_runq; - - simple_lock(rqlock); + + simple_lock(rqlock, LCK_GRP_NULL); if (processor == thread->runq) { /* * Thread is on a run queue and we have a lock on @@ -445,55 +445,55 @@ sched_proto_processor_queue_remove( if (SCHED(priority_is_urgent)(thread->sched_pri)) { rq->urgency--; assert(rq->urgency >= 0); } - + if (queue_empty(rq->queues + thread->sched_pri)) { /* update run queue status */ bitmap_clear(rq->bitmap, thread->sched_pri); rq->highq = bitmap_first(rq->bitmap, NRQS); } - + thread->runq = PROCESSOR_NULL; - } - else { + } else { /* * The thread left the run queue before we could - * lock the run queue. + * lock the run queue. */ assert(thread->runq == PROCESSOR_NULL); processor = PROCESSOR_NULL; } - + simple_unlock(rqlock); - - return (processor != PROCESSOR_NULL); + + return processor != PROCESSOR_NULL; } static boolean_t -sched_proto_processor_queue_empty(processor_t processor __unused) +sched_proto_processor_queue_empty(processor_t processor __unused) { boolean_t result; - + result = (global_runq->count == 0); - + return result; } static boolean_t -sched_proto_processor_queue_has_priority(processor_t processor __unused, - int priority, - boolean_t gte) +sched_proto_processor_queue_has_priority(processor_t processor __unused, + int priority, + boolean_t gte) { boolean_t result; - - simple_lock(&global_runq_lock); - if (gte) + simple_lock(&global_runq_lock, LCK_GRP_NULL); + + if (gte) { result = global_runq->highq >= priority; - else + } else { result = global_runq->highq > priority; + } simple_unlock(&global_runq_lock); - + return result; } @@ -501,37 +501,42 @@ sched_proto_processor_queue_has_priority(processor_t processor __unused, static boolean_t sched_proto_priority_is_urgent(int priority) { - if (priority <= BASEPRI_FOREGROUND) + if (priority <= BASEPRI_FOREGROUND) { return FALSE; - - if (priority < MINPRI_KERNEL) + } + + if (priority < MINPRI_KERNEL) { return TRUE; + } - if (priority >= BASEPRI_PREEMPT) + if (priority >= BASEPRI_PREEMPT) { return TRUE; - + } + return FALSE; } static ast_t sched_proto_processor_csw_check(processor_t processor) { - run_queue_t runq; - int count, urgency; - + run_queue_t runq; + int count, urgency; + runq = global_runq; count = runq->count; urgency = runq->urgency; - + if (count > 0) { - if (urgency > 0) - return (AST_PREEMPT | AST_URGENT); - + if (urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } + return AST_PREEMPT; } - if (proto_processor != processor) + if (proto_processor != processor) { return AST_PREEMPT; + } return AST_NONE; } @@ -545,34 +550,32 @@ sched_proto_initial_quantum_size(thread_t thread __unused) static sched_mode_t sched_proto_initial_thread_sched_mode(task_t parent_task) { - if (parent_task == kernel_task) + if (parent_task == kernel_task) { return TH_MODE_FIXED; - else - return TH_MODE_TIMESHARE; + } else { + return TH_MODE_TIMESHARE; + } } static boolean_t -sched_proto_can_update_priority(thread_t thread __unused) +sched_proto_can_update_priority(thread_t thread __unused) { return FALSE; } static void -sched_proto_update_priority(thread_t thread __unused) +sched_proto_update_priority(thread_t thread __unused) { - } static void -sched_proto_lightweight_update_priority(thread_t thread __unused) +sched_proto_lightweight_update_priority(thread_t thread __unused) { - } static void sched_proto_quantum_expire(thread_t thread __unused) { - } static int @@ -582,7 +585,7 @@ sched_proto_processor_runq_count(processor_t processor) return global_runq->count; } else { return 0; - } + } } static uint64_t @@ -604,8 +607,4 @@ sched_proto_processor_bound_count(__unused processor_t processor) static void sched_proto_thread_update_scan(__unused sched_update_scan_context_t scan_context) { - } - - - diff --git a/osfmk/kern/sched_traditional.c b/osfmk/kern/sched_traditional.c index 02066c97e..e91504583 100644 --- a/osfmk/kern/sched_traditional.c +++ b/osfmk/kern/sched_traditional.c @@ -60,11 +60,14 @@ #include static boolean_t -sched_traditional_use_pset_runqueue = FALSE; + sched_traditional_use_pset_runqueue = FALSE; static void sched_traditional_init(void); +static bool +sched_traditional_steal_thread_enabled(processor_set_t pset); + static thread_t sched_traditional_steal_thread(processor_set_t pset); @@ -136,7 +139,7 @@ const struct sched_dispatch_table sched_traditional_dispatch = { .pset_init = sched_traditional_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_traditional_choose_thread, - .steal_thread_enabled = TRUE, + .steal_thread_enabled = sched_traditional_steal_thread_enabled, .steal_thread = sched_traditional_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, @@ -184,7 +187,7 @@ const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch .pset_init = sched_traditional_pset_init, .maintenance_continuation = sched_timeshare_maintenance_continue, .choose_thread = sched_traditional_choose_thread, - .steal_thread_enabled = TRUE, + .steal_thread_enabled = sched_steal_thread_enabled, .steal_thread = sched_traditional_steal_thread, .compute_timeshare_priority = sched_compute_timeshare_priority, .choose_processor = choose_processor, @@ -220,7 +223,7 @@ const struct sched_dispatch_table sched_traditional_with_pset_runqueue_dispatch .qos_max_parallelism = sched_qos_max_parallelism, .check_spill = sched_check_spill, - .ipi_policy = sched_ipi_policy, + .ipi_policy = sched_ipi_policy, .thread_should_yield = sched_thread_should_yield, }; @@ -256,49 +259,57 @@ sched_traditional_pset_init(processor_set_t pset) } __attribute__((always_inline)) -static inline run_queue_t runq_for_processor(processor_t processor) +static inline run_queue_t +runq_for_processor(processor_t processor) { - if (sched_traditional_use_pset_runqueue) + if (sched_traditional_use_pset_runqueue) { return &processor->processor_set->pset_runq; - else + } else { return &processor->runq; + } } __attribute__((always_inline)) -static inline void runq_consider_incr_bound_count(processor_t processor, - thread_t thread) +static inline void +runq_consider_incr_bound_count(processor_t processor, + thread_t thread) { - if (thread->bound_processor == PROCESSOR_NULL) + if (thread->bound_processor == PROCESSOR_NULL) { return; + } assert(thread->bound_processor == processor); - if (sched_traditional_use_pset_runqueue) + if (sched_traditional_use_pset_runqueue) { processor->processor_set->pset_runq_bound_count++; + } processor->runq_bound_count++; } __attribute__((always_inline)) -static inline void runq_consider_decr_bound_count(processor_t processor, - thread_t thread) +static inline void +runq_consider_decr_bound_count(processor_t processor, + thread_t thread) { - if (thread->bound_processor == PROCESSOR_NULL) + if (thread->bound_processor == PROCESSOR_NULL) { return; + } assert(thread->bound_processor == processor); - if (sched_traditional_use_pset_runqueue) + if (sched_traditional_use_pset_runqueue) { processor->processor_set->pset_runq_bound_count--; + } processor->runq_bound_count--; } static thread_t sched_traditional_choose_thread( - processor_t processor, - int priority, - __unused ast_t reason) + processor_t processor, + int priority, + __unused ast_t reason) { thread_t thread; @@ -322,9 +333,9 @@ sched_traditional_choose_thread( */ static thread_t sched_traditional_choose_thread_from_runq( - processor_t processor, - run_queue_t rq, - int priority) + processor_t processor, + run_queue_t rq, + int priority) { queue_t queue = rq->queues + rq->highq; int pri = rq->highq; @@ -349,7 +360,7 @@ sched_traditional_choose_thread_from_runq( rq->highq = bitmap_first(rq->bitmap, NRQS); } - return (thread); + return thread; } count--; @@ -359,16 +370,17 @@ sched_traditional_choose_thread_from_runq( queue--; pri--; } - return (THREAD_NULL); + return THREAD_NULL; } static sched_mode_t sched_traditional_initial_thread_sched_mode(task_t parent_task) { - if (parent_task == kernel_task) + if (parent_task == kernel_task) { return TH_MODE_FIXED; - else + } else { return TH_MODE_TIMESHARE; + } } /* @@ -385,8 +397,8 @@ sched_traditional_initial_thread_sched_mode(task_t parent_task) */ static boolean_t sched_traditional_processor_enqueue(processor_t processor, - thread_t thread, - integer_t options) + thread_t thread, + integer_t options) { run_queue_t rq = runq_for_processor(processor); boolean_t result; @@ -395,7 +407,7 @@ sched_traditional_processor_enqueue(processor_t processor, thread->runq = processor; runq_consider_incr_bound_count(processor, thread); - return (result); + return result; } static boolean_t @@ -445,8 +457,9 @@ sched_traditional_processor_csw_check(processor_t processor) } if (has_higher) { - if (runq->urgency > 0) - return (AST_PREEMPT | AST_URGENT); + if (runq->urgency > 0) { + return AST_PREEMPT | AST_URGENT; + } return AST_PREEMPT; } @@ -456,13 +469,14 @@ sched_traditional_processor_csw_check(processor_t processor) static boolean_t sched_traditional_processor_queue_has_priority(processor_t processor, - int priority, - boolean_t gte) + int priority, + boolean_t gte) { - if (gte) + if (gte) { return runq_for_processor(processor)->highq >= priority; - else + } else { return runq_for_processor(processor)->highq > priority; + } } static int @@ -480,10 +494,11 @@ sched_traditional_processor_runq_stats_count_sum(processor_t processor) static uint64_t sched_traditional_with_pset_runqueue_processor_runq_stats_count_sum(processor_t processor) { - if (processor->cpu_id == processor->processor_set->cpu_set_low) + if (processor->cpu_id == processor->processor_set->cpu_set_low) { return runq_for_processor(processor)->runq_stats.count_sum; - else + } else { return 0ULL; + } } static int @@ -558,23 +573,26 @@ sched_traditional_processor_queue_shutdown(processor_t processor) #if 0 static void run_queue_check( - run_queue_t rq, - thread_t thread) + run_queue_t rq, + thread_t thread) { queue_t q; queue_entry_t qe; - if (rq != thread->runq) + if (rq != thread->runq) { panic("run_queue_check: thread runq"); + } - if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) + if (thread->sched_pri > MAXPRI || thread->sched_pri < MINPRI) { panic("run_queue_check: thread sched_pri"); + } q = &rq->queues[thread->sched_pri]; qe = queue_first(q); while (!queue_end(q, qe)) { - if (qe == (queue_entry_t)thread) + if (qe == (queue_entry_t)thread) { return; + } qe = queue_next(qe); } @@ -590,7 +608,7 @@ run_queue_check( */ static boolean_t sched_traditional_processor_queue_remove(processor_t processor, - thread_t thread) + thread_t thread) { processor_set_t pset; run_queue_t rq; @@ -607,8 +625,7 @@ sched_traditional_processor_queue_remove(processor_t processor, */ runq_consider_decr_bound_count(processor, thread); run_queue_remove(rq, thread); - } - else { + } else { /* * The thread left the run queue before we could * lock the run queue. @@ -619,7 +636,7 @@ sched_traditional_processor_queue_remove(processor_t processor, pset_unlock(pset); - return (processor != PROCESSOR_NULL); + return processor != PROCESSOR_NULL; } /* @@ -658,7 +675,7 @@ sched_traditional_steal_processor_thread(processor_t processor) rq->highq = bitmap_first(rq->bitmap, NRQS); } - return (thread); + return thread; } count--; @@ -668,7 +685,14 @@ sched_traditional_steal_processor_thread(processor_t processor) queue--; pri--; } - return (THREAD_NULL); + return THREAD_NULL; +} + +static bool +sched_traditional_steal_thread_enabled(processor_set_t pset) +{ + (void)pset; + return true; } /* @@ -690,7 +714,7 @@ sched_traditional_steal_thread(processor_set_t pset) do { uint64_t active_map = (pset->cpu_state_map[PROCESSOR_RUNNING] | - pset->cpu_state_map[PROCESSOR_DISPATCHING]); + pset->cpu_state_map[PROCESSOR_DISPATCHING]); for (int cpuid = lsb_first(active_map); cpuid >= 0; cpuid = lsb_next(active_map, cpuid)) { processor = processor_array[cpuid]; if (runq_for_processor(processor)->count > 0) { @@ -698,7 +722,7 @@ sched_traditional_steal_thread(processor_set_t pset) if (thread != THREAD_NULL) { pset_unlock(cset); - return (thread); + return thread; } } } @@ -715,7 +739,7 @@ sched_traditional_steal_thread(processor_set_t pset) pset_unlock(cset); - return (THREAD_NULL); + return THREAD_NULL; } static void @@ -743,8 +767,9 @@ sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context) pset_unlock(pset); splx(s); - if (restart_needed) + if (restart_needed) { break; + } thread = processor->idle_thread; if (thread != THREAD_NULL && thread->sched_stamp != sched_tick) { @@ -759,4 +784,3 @@ sched_traditional_thread_update_scan(sched_update_scan_context_t scan_context) thread_update_process_threads(); } while (restart_needed); } - diff --git a/osfmk/kern/sched_urgency.h b/osfmk/kern/sched_urgency.h new file mode 100644 index 000000000..811a6d77d --- /dev/null +++ b/osfmk/kern/sched_urgency.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _KERN_SCHED_URGENCY_H_ +#define _KERN_SCHED_URGENCY_H_ + +#ifdef MACH_KERNEL_PRIVATE + +#include + +typedef enum thread_urgency { + THREAD_URGENCY_NONE = 0, /* processor is idle */ + THREAD_URGENCY_BACKGROUND = 1, /* "background" thread (i.e. min-power) */ + THREAD_URGENCY_NORMAL = 2, /* "normal" thread */ + THREAD_URGENCY_REAL_TIME = 3, /* "real-time" or urgent thread */ + THREAD_URGENCY_LOWPRI = 4, /* low priority but not "background" hint for performance management subsystem */ + THREAD_URGENCY_MAX = 5, /* Max */ +} thread_urgency_t; + +/* Returns the "urgency" of a thread (provided by scheduler) */ +extern thread_urgency_t thread_get_urgency( + thread_t thread, + uint64_t *rt_period, + uint64_t *rt_deadline); + +/* Tells the "urgency" of the just scheduled thread (provided by CPU PM) */ +extern void thread_tell_urgency( + thread_urgency_t urgency, + uint64_t rt_period, + uint64_t rt_deadline, + uint64_t sched_latency, + thread_t nthread); + +#endif /* MACH_KERNEL_PRIVATE */ + +__END_DECLS + +#endif /* _KERN_SCHED_URGENCY_H_ */ diff --git a/osfmk/kern/sfi.c b/osfmk/kern/sfi.c index 139a6798e..d9ee0728f 100644 --- a/osfmk/kern/sfi.c +++ b/osfmk/kern/sfi.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -103,9 +103,9 @@ * \__ thread_lock */ -decl_simple_lock_data(static,sfi_lock); +decl_simple_lock_data(static, sfi_lock); static timer_call_data_t sfi_timer_call_entry; -volatile boolean_t sfi_is_enabled; +volatile boolean_t sfi_is_enabled; boolean_t sfi_window_is_set; uint64_t sfi_window_usecs; @@ -113,10 +113,10 @@ uint64_t sfi_window_interval; uint64_t sfi_next_off_deadline; typedef struct { - sfi_class_id_t class_id; - thread_continue_t class_continuation; - const char * class_name; - const char * class_ledger_name; + sfi_class_id_t class_id; + thread_continue_t class_continuation; + const char * class_name; + const char * class_ledger_name; } sfi_class_registration_t; /* @@ -131,58 +131,58 @@ typedef struct { static inline void _sfi_wait_cleanup(void); -#define SFI_CLASS_REGISTER(clsid, ledger_name) \ -static void __attribute__((noinline, noreturn)) \ +#define SFI_CLASS_REGISTER(clsid, ledger_name) \ +static void __attribute__((noinline, noreturn)) \ SFI_ ## clsid ## _THREAD_IS_WAITING(void *arg __unused, wait_result_t wret __unused) \ -{ \ - _sfi_wait_cleanup(); \ - thread_exception_return(); \ -} \ - \ -_Static_assert(SFI_CLASS_ ## clsid < MAX_SFI_CLASS_ID, "Invalid ID"); \ - \ -__attribute__((section("__DATA,__sfi_class_reg"), used)) \ -static sfi_class_registration_t SFI_ ## clsid ## _registration = { \ - .class_id = SFI_CLASS_ ## clsid, \ - .class_continuation = SFI_ ## clsid ## _THREAD_IS_WAITING, \ - .class_name = "SFI_CLASS_" # clsid, \ - .class_ledger_name = "SFI_CLASS_" # ledger_name, \ +{ \ + _sfi_wait_cleanup(); \ + thread_exception_return(); \ +} \ + \ +_Static_assert(SFI_CLASS_ ## clsid < MAX_SFI_CLASS_ID, "Invalid ID"); \ + \ +__attribute__((section("__DATA,__sfi_class_reg"), used)) \ +static sfi_class_registration_t SFI_ ## clsid ## _registration = { \ + .class_id = SFI_CLASS_ ## clsid, \ + .class_continuation = SFI_ ## clsid ## _THREAD_IS_WAITING, \ + .class_name = "SFI_CLASS_" # clsid, \ + .class_ledger_name = "SFI_CLASS_" # ledger_name, \ } /* SFI_CLASS_UNSPECIFIED not included here */ -SFI_CLASS_REGISTER(MAINTENANCE, MAINTENANCE); -SFI_CLASS_REGISTER(DARWIN_BG, DARWIN_BG); -SFI_CLASS_REGISTER(APP_NAP, APP_NAP); -SFI_CLASS_REGISTER(MANAGED_FOCAL, MANAGED); -SFI_CLASS_REGISTER(MANAGED_NONFOCAL, MANAGED); -SFI_CLASS_REGISTER(UTILITY, UTILITY); -SFI_CLASS_REGISTER(DEFAULT_FOCAL, DEFAULT); -SFI_CLASS_REGISTER(DEFAULT_NONFOCAL, DEFAULT); -SFI_CLASS_REGISTER(LEGACY_FOCAL, LEGACY); -SFI_CLASS_REGISTER(LEGACY_NONFOCAL, LEGACY); -SFI_CLASS_REGISTER(USER_INITIATED_FOCAL, USER_INITIATED); -SFI_CLASS_REGISTER(USER_INITIATED_NONFOCAL, USER_INITIATED); -SFI_CLASS_REGISTER(USER_INTERACTIVE_FOCAL, USER_INTERACTIVE); +SFI_CLASS_REGISTER(MAINTENANCE, MAINTENANCE); +SFI_CLASS_REGISTER(DARWIN_BG, DARWIN_BG); +SFI_CLASS_REGISTER(APP_NAP, APP_NAP); +SFI_CLASS_REGISTER(MANAGED_FOCAL, MANAGED); +SFI_CLASS_REGISTER(MANAGED_NONFOCAL, MANAGED); +SFI_CLASS_REGISTER(UTILITY, UTILITY); +SFI_CLASS_REGISTER(DEFAULT_FOCAL, DEFAULT); +SFI_CLASS_REGISTER(DEFAULT_NONFOCAL, DEFAULT); +SFI_CLASS_REGISTER(LEGACY_FOCAL, LEGACY); +SFI_CLASS_REGISTER(LEGACY_NONFOCAL, LEGACY); +SFI_CLASS_REGISTER(USER_INITIATED_FOCAL, USER_INITIATED); +SFI_CLASS_REGISTER(USER_INITIATED_NONFOCAL, USER_INITIATED); +SFI_CLASS_REGISTER(USER_INTERACTIVE_FOCAL, USER_INTERACTIVE); SFI_CLASS_REGISTER(USER_INTERACTIVE_NONFOCAL, USER_INTERACTIVE); -SFI_CLASS_REGISTER(KERNEL, OPTED_OUT); -SFI_CLASS_REGISTER(OPTED_OUT, OPTED_OUT); +SFI_CLASS_REGISTER(KERNEL, OPTED_OUT); +SFI_CLASS_REGISTER(OPTED_OUT, OPTED_OUT); struct sfi_class_state { - uint64_t off_time_usecs; - uint64_t off_time_interval; + uint64_t off_time_usecs; + uint64_t off_time_interval; - timer_call_data_t on_timer; - uint64_t on_timer_deadline; - boolean_t on_timer_programmed; + timer_call_data_t on_timer; + uint64_t on_timer_deadline; + boolean_t on_timer_programmed; - boolean_t class_sfi_is_enabled; - volatile boolean_t class_in_on_phase; + boolean_t class_sfi_is_enabled; + volatile boolean_t class_in_on_phase; - struct waitq waitq; /* threads in ready state */ - thread_continue_t continuation; + struct waitq waitq; /* threads in ready state */ + thread_continue_t continuation; - const char * class_name; - const char * class_ledger_name; + const char * class_name; + const char * class_ledger_name; }; /* Static configuration performed in sfi_early_init() */ @@ -206,7 +206,6 @@ sfi_get_registration_data(unsigned long *count) sectdata = getsectdatafromheader(&_mh_execute_header, "__DATA", "__sfi_class_reg", §len); if (sectdata) { - if (sectlen % sizeof(sfi_class_registration_t) != 0) { /* corrupt data? */ panic("__sfi_class_reg section has invalid size %lu", sectlen); @@ -222,13 +221,14 @@ sfi_get_registration_data(unsigned long *count) } /* Called early in boot, when kernel is single-threaded */ -void sfi_early_init(void) +void +sfi_early_init(void) { unsigned long i, count; sfi_class_registration_t *registrations; registrations = sfi_get_registration_data(&count); - for (i=0; i < count; i++) { + for (i = 0; i < count; i++) { sfi_class_id_t class_id = registrations[i].class_id; assert(class_id < MAX_SFI_CLASS_ID); /* should be caught at compile-time */ @@ -245,7 +245,8 @@ void sfi_early_init(void) } } -void sfi_init(void) +void +sfi_init(void) { sfi_class_id_t i; kern_return_t kret; @@ -261,12 +262,12 @@ void sfi_init(void) if (sfi_classes[i].continuation) { timer_call_setup(&sfi_classes[i].on_timer, sfi_timer_per_class_on, (void *)(uintptr_t)i); sfi_classes[i].on_timer_programmed = FALSE; - - kret = waitq_init(&sfi_classes[i].waitq, SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ); + + kret = waitq_init(&sfi_classes[i].waitq, SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ); assert(kret == KERN_SUCCESS); } else { /* The only allowed gap is for SFI_CLASS_UNSPECIFIED */ - if(i != SFI_CLASS_UNSPECIFIED) { + if (i != SFI_CLASS_UNSPECIFIED) { panic("Gap in registered SFI classes"); } } @@ -311,20 +312,21 @@ sfi_ledger_entry_add(ledger_template_t template, sfi_class_id_t class_id) return ledger_entry_add(template, ledger_name, "sfi", "MATUs"); } -static void sfi_timer_global_off( +static void +sfi_timer_global_off( timer_call_param_t param0 __unused, timer_call_param_t param1 __unused) { - uint64_t now = mach_absolute_time(); - sfi_class_id_t i; - processor_set_t pset, nset; - processor_t processor; - uint32_t needs_cause_ast_mask = 0x0; - spl_t s; + uint64_t now = mach_absolute_time(); + sfi_class_id_t i; + processor_set_t pset, nset; + processor_t processor; + uint32_t needs_cause_ast_mask = 0x0; + spl_t s; s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); if (!sfi_is_enabled) { /* If SFI has been disabled, let all "on" timers drain naturally */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_NONE, 1, 0, 0, 0, 0); @@ -340,7 +342,7 @@ static void sfi_timer_global_off( for (i = 0; i < MAX_SFI_CLASS_ID; i++) { if (sfi_classes[i].class_sfi_is_enabled) { uint64_t on_timer_deadline; - + sfi_classes[i].class_in_on_phase = FALSE; sfi_classes[i].on_timer_programmed = TRUE; @@ -364,9 +366,9 @@ static void sfi_timer_global_off( /* Iterate over processors, call cause_ast_check() on ones running a thread that should be in an off phase */ processor = processor_list; pset = processor->processor_set; - + pset_lock(pset); - + do { nset = processor->processor_set; if (nset != pset) { @@ -395,15 +397,15 @@ static void sfi_timer_global_off( } /* Re-arm timer if still enabled */ - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); if (sfi_is_enabled) { clock_deadline_for_periodic_event(sfi_window_interval, - now, - &sfi_next_off_deadline); + now, + &sfi_next_off_deadline); timer_call_enter1(&sfi_timer_call_entry, - NULL, - sfi_next_off_deadline, - TIMER_CALL_SYS_CRITICAL); + NULL, + sfi_next_off_deadline, + TIMER_CALL_SYS_CRITICAL); } KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_OFF_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0); @@ -413,18 +415,19 @@ static void sfi_timer_global_off( splx(s); } -static void sfi_timer_per_class_on( +static void +sfi_timer_per_class_on( timer_call_param_t param0, timer_call_param_t param1 __unused) { sfi_class_id_t sfi_class_id = (sfi_class_id_t)(uintptr_t)param0; - struct sfi_class_state *sfi_class = &sfi_classes[sfi_class_id]; - kern_return_t kret; - spl_t s; + struct sfi_class_state *sfi_class = &sfi_classes[sfi_class_id]; + kern_return_t kret; + spl_t s; s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_START, sfi_class_id, 0, 0, 0, 0); @@ -438,8 +441,8 @@ static void sfi_timer_per_class_on( sfi_class->on_timer_programmed = FALSE; kret = waitq_wakeup64_all(&sfi_class->waitq, - CAST_EVENT64_T(sfi_class_id), - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + CAST_EVENT64_T(sfi_class_id), + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_ON_TIMER) | DBG_FUNC_END, 0, 0, 0, 0, 0); @@ -450,19 +453,22 @@ static void sfi_timer_per_class_on( } -kern_return_t sfi_set_window(uint64_t window_usecs) +kern_return_t +sfi_set_window(uint64_t window_usecs) { - uint64_t interval, deadline; - uint64_t now = mach_absolute_time(); - sfi_class_id_t i; - spl_t s; - uint64_t largest_class_off_interval = 0; + uint64_t interval, deadline; + uint64_t now = mach_absolute_time(); + sfi_class_id_t i; + spl_t s; + uint64_t largest_class_off_interval = 0; - if (window_usecs < MIN_SFI_WINDOW_USEC) + if (window_usecs < MIN_SFI_WINDOW_USEC) { window_usecs = MIN_SFI_WINDOW_USEC; + } - if (window_usecs > UINT32_MAX) - return (KERN_INVALID_ARGUMENT); + if (window_usecs > UINT32_MAX) { + return KERN_INVALID_ARGUMENT; + } KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_WINDOW), window_usecs, 0, 0, 0, 0); @@ -471,7 +477,7 @@ kern_return_t sfi_set_window(uint64_t window_usecs) s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); /* Check that we are not bringing in the SFI window smaller than any class */ for (i = 0; i < MAX_SFI_CLASS_ID; i++) { @@ -487,7 +493,7 @@ kern_return_t sfi_set_window(uint64_t window_usecs) if (interval <= largest_class_off_interval) { simple_unlock(&sfi_lock); splx(s); - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* @@ -507,35 +513,36 @@ kern_return_t sfi_set_window(uint64_t window_usecs) sfi_is_enabled = TRUE; sfi_next_off_deadline = deadline; timer_call_enter1(&sfi_timer_call_entry, - NULL, - sfi_next_off_deadline, - TIMER_CALL_SYS_CRITICAL); + NULL, + sfi_next_off_deadline, + TIMER_CALL_SYS_CRITICAL); } else if (deadline >= sfi_next_off_deadline) { sfi_next_off_deadline = deadline; } else { sfi_next_off_deadline = deadline; timer_call_enter1(&sfi_timer_call_entry, - NULL, - sfi_next_off_deadline, - TIMER_CALL_SYS_CRITICAL); + NULL, + sfi_next_off_deadline, + TIMER_CALL_SYS_CRITICAL); } simple_unlock(&sfi_lock); splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } -kern_return_t sfi_window_cancel(void) +kern_return_t +sfi_window_cancel(void) { - spl_t s; + spl_t s; s = splsched(); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_WINDOW), 0, 0, 0, 0, 0); /* Disable globals so that global "off-timer" is not re-armed */ - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); sfi_window_is_set = FALSE; sfi_window_usecs = 0; sfi_window_interval = 0; @@ -545,7 +552,7 @@ kern_return_t sfi_window_cancel(void) splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* Defers SFI off and per-class on timers (if live) by the specified interval @@ -556,15 +563,16 @@ kern_return_t sfi_window_cancel(void) * alignment and congruency of the SFI/GFI periods can distort this to some extent. */ -kern_return_t sfi_defer(uint64_t sfi_defer_matus) +kern_return_t +sfi_defer(uint64_t sfi_defer_matus) { - spl_t s; + spl_t s; kern_return_t kr = KERN_FAILURE; s = splsched(); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_GLOBAL_DEFER), sfi_defer_matus, 0, 0, 0, 0); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); if (!sfi_is_enabled) { goto sfi_defer_done; } @@ -591,17 +599,18 @@ sfi_defer_done: splx(s); - return (kr); + return kr; } -kern_return_t sfi_get_window(uint64_t *window_usecs) +kern_return_t +sfi_get_window(uint64_t *window_usecs) { - spl_t s; - uint64_t off_window_us; + spl_t s; + uint64_t off_window_us; s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); off_window_us = sfi_window_usecs; @@ -610,24 +619,28 @@ kern_return_t sfi_get_window(uint64_t *window_usecs) *window_usecs = off_window_us; - return (KERN_SUCCESS); + return KERN_SUCCESS; } -kern_return_t sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usecs) +kern_return_t +sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_usecs) { - uint64_t interval; - spl_t s; - uint64_t off_window_interval; + uint64_t interval; + spl_t s; + uint64_t off_window_interval; - if (offtime_usecs < MIN_SFI_WINDOW_USEC) + if (offtime_usecs < MIN_SFI_WINDOW_USEC) { offtime_usecs = MIN_SFI_WINDOW_USEC; + } - if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) - return (KERN_INVALID_ARGUMENT); + if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) { + return KERN_INVALID_ARGUMENT; + } - if (offtime_usecs > UINT32_MAX) - return (KERN_INVALID_ARGUMENT); + if (offtime_usecs > UINT32_MAX) { + return KERN_INVALID_ARGUMENT; + } KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_SET_CLASS_OFFTIME), offtime_usecs, class_id, 0, 0, 0); @@ -635,14 +648,14 @@ kern_return_t sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_us s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); off_window_interval = sfi_window_interval; /* Check that we are not bringing in class off-time larger than the SFI window */ if (off_window_interval && (interval >= off_window_interval)) { simple_unlock(&sfi_lock); splx(s); - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* We never re-program the per-class on-timer, but rather just let it expire naturally */ @@ -658,30 +671,32 @@ kern_return_t sfi_set_class_offtime(sfi_class_id_t class_id, uint64_t offtime_us sfi_is_enabled = TRUE; sfi_next_off_deadline = mach_absolute_time() + sfi_window_interval; timer_call_enter1(&sfi_timer_call_entry, - NULL, - sfi_next_off_deadline, - TIMER_CALL_SYS_CRITICAL); + NULL, + sfi_next_off_deadline, + TIMER_CALL_SYS_CRITICAL); } simple_unlock(&sfi_lock); splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } -kern_return_t sfi_class_offtime_cancel(sfi_class_id_t class_id) +kern_return_t +sfi_class_offtime_cancel(sfi_class_id_t class_id) { - spl_t s; + spl_t s; - if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) - return (KERN_INVALID_ARGUMENT); + if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) { + return KERN_INVALID_ARGUMENT; + } s = splsched(); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_CANCEL_CLASS_OFFTIME), class_id, 0, 0, 0, 0); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); /* We never re-program the per-class on-timer, but rather just let it expire naturally */ if (sfi_classes[class_id].class_sfi_is_enabled) { @@ -699,20 +714,22 @@ kern_return_t sfi_class_offtime_cancel(sfi_class_id_t class_id) splx(s); - return (KERN_SUCCESS); + return KERN_SUCCESS; } -kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usecs) +kern_return_t +sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_usecs) { - uint64_t off_time_us; - spl_t s; + uint64_t off_time_us; + spl_t s; - if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) - return (0); + if (class_id == SFI_CLASS_UNSPECIFIED || class_id >= MAX_SFI_CLASS_ID) { + return 0; + } s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); off_time_us = sfi_classes[class_id].off_time_usecs; simple_unlock(&sfi_lock); @@ -720,7 +737,7 @@ kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_u *offtime_usecs = off_time_us; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -741,7 +758,8 @@ kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_u * Thread must be locked. Ultimately, the real decision to enter * SFI wait happens at the AST boundary. */ -sfi_class_id_t sfi_thread_classify(thread_t thread) +sfi_class_id_t +sfi_thread_classify(thread_t thread) { task_t task = thread->task; boolean_t is_kernel_thread = (task == kernel_task); @@ -760,8 +778,9 @@ sfi_class_id_t sfi_thread_classify(thread_t thread) return SFI_CLASS_KERNEL; } - if (thread_qos == THREAD_QOS_MAINTENANCE) + if (thread_qos == THREAD_QOS_MAINTENANCE) { return SFI_CLASS_MAINTENANCE; + } if (thread_bg || thread_qos == THREAD_QOS_BACKGROUND) { return SFI_CLASS_DARWIN_BG; @@ -795,13 +814,14 @@ sfi_class_id_t sfi_thread_classify(thread_t thread) case TASK_DEFAULT_APPLICATION: case TASK_UNSPECIFIED: /* Focal if the task is in a coalition with a FG/focal app */ - if (task_coalition_focal_count(thread->task) > 0) + if (task_coalition_focal_count(thread->task) > 0) { focal = TRUE; + } break; case TASK_THROTTLE_APPLICATION: case TASK_DARWINBG_APPLICATION: case TASK_NONUI_APPLICATION: - /* Definitely not focal */ + /* Definitely not focal */ default: break; } @@ -811,17 +831,19 @@ sfi_class_id_t sfi_thread_classify(thread_t thread) case THREAD_QOS_UNSPECIFIED: case THREAD_QOS_LEGACY: case THREAD_QOS_USER_INITIATED: - if (focal) + if (focal) { return SFI_CLASS_MANAGED_FOCAL; - else + } else { return SFI_CLASS_MANAGED_NONFOCAL; + } default: break; } } - if (thread_qos == THREAD_QOS_UTILITY) + if (thread_qos == THREAD_QOS_UTILITY) { return SFI_CLASS_UTILITY; + } /* * Classify threads in non-managed tasks @@ -854,7 +876,8 @@ sfi_class_id_t sfi_thread_classify(thread_t thread) /* * pset must be locked. */ -sfi_class_id_t sfi_processor_active_thread_classify(processor_t processor) +sfi_class_id_t +sfi_processor_active_thread_classify(processor_t processor) { return processor->current_sfi_class; } @@ -864,20 +887,23 @@ sfi_class_id_t sfi_processor_active_thread_classify(processor_t processor) * at the AST boundary, it will be fully evaluated whether we need to * perform an AST wait */ -ast_t sfi_thread_needs_ast(thread_t thread, sfi_class_id_t *out_class) +ast_t +sfi_thread_needs_ast(thread_t thread, sfi_class_id_t *out_class) { sfi_class_id_t class_id; class_id = sfi_thread_classify(thread); - if (out_class) + if (out_class) { *out_class = class_id; + } /* No lock taken, so a stale value may be used. */ - if (!sfi_classes[class_id].class_in_on_phase) + if (!sfi_classes[class_id].class_in_on_phase) { return AST_SFI; - else + } else { return AST_NONE; + } } /* @@ -888,31 +914,33 @@ ast_t sfi_thread_needs_ast(thread_t thread, sfi_class_id_t *out_class) * to evaluate if the current running thread at that * later point in time should be in an SFI wait. */ -ast_t sfi_processor_needs_ast(processor_t processor) +ast_t +sfi_processor_needs_ast(processor_t processor) { sfi_class_id_t class_id; class_id = sfi_processor_active_thread_classify(processor); /* No lock taken, so a stale value may be used. */ - if (!sfi_classes[class_id].class_in_on_phase) + if (!sfi_classes[class_id].class_in_on_phase) { return AST_SFI; - else + } else { return AST_NONE; - + } } -static inline void _sfi_wait_cleanup(void) +static inline void +_sfi_wait_cleanup(void) { thread_t self = current_thread(); spl_t s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); sfi_class_id_t current_sfi_wait_class = self->sfi_wait_class; assert((SFI_CLASS_UNSPECIFIED < current_sfi_wait_class) && - (current_sfi_wait_class < MAX_SFI_CLASS_ID)); + (current_sfi_wait_class < MAX_SFI_CLASS_ID)); self->sfi_wait_class = SFI_CLASS_UNSPECIFIED; @@ -941,7 +969,7 @@ static inline void _sfi_wait_cleanup(void) assert(sfi_wait_time >= 0); ledger_credit(self->task->ledger, task_ledgers.sfi_wait_times[current_sfi_wait_class], - sfi_wait_time); + sfi_wait_time); #endif /* !CONFIG_EMBEDDED */ self->wait_sfi_begin_time = 0; @@ -954,18 +982,19 @@ static inline void _sfi_wait_cleanup(void) * We must take the sfi_lock to check whether we are in the "off" period * for the class, and if so, block. */ -void sfi_ast(thread_t thread) +void +sfi_ast(thread_t thread) { sfi_class_id_t class_id; - spl_t s; - struct sfi_class_state *sfi_class; - wait_result_t waitret; - boolean_t did_wait = FALSE; - thread_continue_t continuation; + spl_t s; + struct sfi_class_state *sfi_class; + wait_result_t waitret; + boolean_t did_wait = FALSE; + thread_continue_t continuation; s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); if (!sfi_is_enabled) { /* @@ -998,11 +1027,11 @@ void sfi_ast(thread_t thread) if (!sfi_class->class_in_on_phase) { /* Need to block thread in wait queue */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_THREAD_DEFER), - thread_tid(thread), class_id, 0, 0, 0); + thread_tid(thread), class_id, 0, 0, 0); waitret = waitq_assert_wait64(&sfi_class->waitq, - CAST_EVENT64_T(class_id), - THREAD_INTERRUPTIBLE | THREAD_WAIT_NOREPORT, 0); + CAST_EVENT64_T(class_id), + THREAD_INTERRUPTIBLE | THREAD_WAIT_NOREPORT, 0); if (waitret == THREAD_WAITING) { thread->sfi_wait_class = class_id; did_wait = TRUE; @@ -1024,16 +1053,17 @@ void sfi_ast(thread_t thread) } /* Thread must be unlocked */ -void sfi_reevaluate(thread_t thread) +void +sfi_reevaluate(thread_t thread) { kern_return_t kret; - spl_t s; + spl_t s; sfi_class_id_t class_id, current_class_id; - ast_t sfi_ast; + ast_t sfi_ast; s = splsched(); - simple_lock(&sfi_lock); + simple_lock(&sfi_lock, LCK_GRP_NULL); thread_lock(thread); sfi_ast = sfi_thread_needs_ast(thread, &class_id); @@ -1057,20 +1087,19 @@ void sfi_reevaluate(thread_t thread) */ if ((current_class_id = thread->sfi_wait_class) != SFI_CLASS_UNSPECIFIED) { - thread_unlock(thread); /* not needed anymore */ assert(current_class_id < MAX_SFI_CLASS_ID); if ((sfi_ast == AST_NONE) || (class_id != current_class_id)) { - struct sfi_class_state *sfi_class = &sfi_classes[current_class_id]; + struct sfi_class_state *sfi_class = &sfi_classes[current_class_id]; KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SFI, SFI_WAIT_CANCELED), thread_tid(thread), current_class_id, class_id, 0, 0); kret = waitq_wakeup64_thread(&sfi_class->waitq, - CAST_EVENT64_T(current_class_id), - thread, - THREAD_AWAKENED); + CAST_EVENT64_T(current_class_id), + thread, + THREAD_AWAKENED); assert(kret == KERN_SUCCESS || kret == KERN_NOT_WAITING); } } else { @@ -1081,14 +1110,14 @@ void sfi_reevaluate(thread_t thread) if ((thread->state & (TH_RUN | TH_IDLE)) == TH_RUN) { if (sfi_ast != AST_NONE) { - if (thread == current_thread()) + if (thread == current_thread()) { ast_on(sfi_ast); - else { + } else { processor_t processor = thread->last_processor; - + if (processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread) { + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { cause_ast_check(processor); } else { /* @@ -1110,44 +1139,52 @@ void sfi_reevaluate(thread_t thread) #else /* !CONFIG_SCHED_SFI */ -kern_return_t sfi_set_window(uint64_t window_usecs __unused) +kern_return_t +sfi_set_window(uint64_t window_usecs __unused) { - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } -kern_return_t sfi_window_cancel(void) +kern_return_t +sfi_window_cancel(void) { - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } -kern_return_t sfi_get_window(uint64_t *window_usecs __unused) +kern_return_t +sfi_get_window(uint64_t *window_usecs __unused) { - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } -kern_return_t sfi_set_class_offtime(sfi_class_id_t class_id __unused, uint64_t offtime_usecs __unused) +kern_return_t +sfi_set_class_offtime(sfi_class_id_t class_id __unused, uint64_t offtime_usecs __unused) { - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } -kern_return_t sfi_class_offtime_cancel(sfi_class_id_t class_id __unused) +kern_return_t +sfi_class_offtime_cancel(sfi_class_id_t class_id __unused) { - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } -kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id __unused, uint64_t *offtime_usecs __unused) +kern_return_t +sfi_get_class_offtime(sfi_class_id_t class_id __unused, uint64_t *offtime_usecs __unused) { - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } -void sfi_reevaluate(thread_t thread __unused) +void +sfi_reevaluate(thread_t thread __unused) { return; } -sfi_class_id_t sfi_thread_classify(thread_t thread) +sfi_class_id_t +sfi_thread_classify(thread_t thread) { task_t task = thread->task; boolean_t is_kernel_thread = (task == kernel_task); diff --git a/osfmk/kern/sfi.h b/osfmk/kern/sfi.h index 7ac6259b3..effa349ef 100644 --- a/osfmk/kern/sfi.h +++ b/osfmk/kern/sfi.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -55,7 +55,7 @@ kern_return_t sfi_get_class_offtime(sfi_class_id_t class_id, uint64_t *offtime_u * Classifying a thread requires no special locks to be held (although attribute * changes that cause an inconsistent snapshot may cause a spurious AST). Final * evaluation will happen at the AST boundary with the thread locked. If possible, - * + * */ sfi_class_id_t sfi_thread_classify(thread_t thread); sfi_class_id_t sfi_processor_active_thread_classify(processor_t processor); diff --git a/osfmk/kern/simple_lock.h b/osfmk/kern/simple_lock.h index b66313f7f..258d323db 100644 --- a/osfmk/kern/simple_lock.h +++ b/osfmk/kern/simple_lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Copyright (C) 1998 Apple Computer * All Rights Reserved */ @@ -36,24 +36,24 @@ * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -65,123 +65,179 @@ * Atomic primitives and Simple Locking primitives definitions */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _KERN_SIMPLE_LOCK_H_ -#define _KERN_SIMPLE_LOCK_H_ +#ifndef _KERN_SIMPLE_LOCK_H_ +#define _KERN_SIMPLE_LOCK_H_ #include #include #include +#include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include -extern void hw_lock_init( - hw_lock_t); +extern void hw_lock_init( + hw_lock_t); + +#if LOCK_STATS +extern void hw_lock_lock( + hw_lock_t, + lck_grp_t*); + +extern void hw_lock_lock_nopreempt( + hw_lock_t, + lck_grp_t*); + +extern unsigned int hw_lock_to( + hw_lock_t, + uint64_t, + lck_grp_t*); -extern void hw_lock_lock( - hw_lock_t); +extern unsigned int hw_lock_try( + hw_lock_t, + lck_grp_t*); -extern void hw_lock_lock_nopreempt( - hw_lock_t); +extern unsigned int hw_lock_try_nopreempt( + hw_lock_t, + lck_grp_t*); -extern void hw_lock_unlock( - hw_lock_t); +#else -extern void hw_lock_unlock_nopreempt( - hw_lock_t); +extern void hw_lock_lock( + hw_lock_t); -extern unsigned int hw_lock_to( - hw_lock_t, - uint64_t); +#define hw_lock_lock(lck, grp) hw_lock_lock(lck) -extern unsigned int hw_lock_try( - hw_lock_t); +extern void hw_lock_lock_nopreempt( + hw_lock_t); +#define hw_lock_lock_nopreempt(lck, grp) hw_lock_lock_nopreempt(lck) -extern unsigned int hw_lock_try_nopreempt( - hw_lock_t); +extern unsigned int hw_lock_to( + hw_lock_t, + uint64_t); +#define hw_lock_to(lck, timeout, grp) hw_lock_to(lck, timeout) -extern unsigned int hw_lock_held( - hw_lock_t); -#endif /* MACH_KERNEL_PRIVATE */ +extern unsigned int hw_lock_try( + hw_lock_t); +#define hw_lock_try(lck, grp) hw_lock_try(lck) + +extern unsigned int hw_lock_try_nopreempt( + hw_lock_t); +#define hw_lock_try_nopreempt(lck, grp) hw_lock_try_nopreempt(lck) + + +#endif /* LOCK_STATS */ + +extern void hw_lock_unlock( + hw_lock_t); + +extern void hw_lock_unlock_nopreempt( + hw_lock_t); + +extern unsigned int hw_lock_held( + hw_lock_t); + +#endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -extern uint32_t hw_atomic_add( - volatile uint32_t *dest, - uint32_t delt); +extern uint32_t hw_atomic_add( + volatile uint32_t *dest, + uint32_t delt); -extern uint32_t hw_atomic_sub( - volatile uint32_t *dest, - uint32_t delt); +extern uint32_t hw_atomic_sub( + volatile uint32_t *dest, + uint32_t delt); -extern uint32_t hw_atomic_or( - volatile uint32_t *dest, - uint32_t mask); +extern uint32_t hw_atomic_or( + volatile uint32_t *dest, + uint32_t mask); -extern uint32_t hw_atomic_and( - volatile uint32_t *dest, - uint32_t mask); +extern uint32_t hw_atomic_and( + volatile uint32_t *dest, + uint32_t mask); /* * Variant of hw_atomic_or which doesn't return a value; potentially * more efficient on some platforms. */ -extern void hw_atomic_or_noret( - volatile uint32_t *dest, - uint32_t mask); +extern void hw_atomic_or_noret( + volatile uint32_t *dest, + uint32_t mask); /* * Variant of hw_atomic_and which doesn't return a value; potentially * more efficient on some platforms. */ -extern void hw_atomic_and_noret( - volatile uint32_t *dest, - uint32_t mask); +extern void hw_atomic_and_noret( + volatile uint32_t *dest, + uint32_t mask); + +extern uint32_t hw_compare_and_store( + uint32_t oldval, + uint32_t newval, + volatile uint32_t *dest); + +extern void hw_queue_atomic( + unsigned int *anchor, + unsigned int *elem, + unsigned int disp); + +extern void hw_queue_atomic_list( + unsigned int *anchor, + unsigned int *first, + unsigned int *last, + unsigned int disp); + +extern unsigned int *hw_dequeue_atomic( + unsigned int *anchor, + unsigned int disp); + +extern void usimple_lock_init( + usimple_lock_t, + unsigned short); + +#if LOCK_STATS +extern void usimple_lock( + usimple_lock_t, + lck_grp_t*); -extern uint32_t hw_compare_and_store( - uint32_t oldval, - uint32_t newval, - volatile uint32_t *dest); +extern unsigned int usimple_lock_try( + usimple_lock_t, + lck_grp_t*); -extern void hw_queue_atomic( - unsigned int *anchor, - unsigned int *elem, - unsigned int disp); +extern void usimple_lock_try_lock_loop( + usimple_lock_t, + lck_grp_t*); +#else +extern void usimple_lock( + usimple_lock_t); +#define usimple_lock(lck, grp) usimple_lock(lck) -extern void hw_queue_atomic_list( - unsigned int *anchor, - unsigned int *first, - unsigned int *last, - unsigned int disp); -extern unsigned int *hw_dequeue_atomic( - unsigned int *anchor, - unsigned int disp); +extern unsigned int usimple_lock_try( + usimple_lock_t); -extern void usimple_lock_init( - usimple_lock_t, - unsigned short); +#define usimple_lock_try(lck, grp) usimple_lock_try(lck) -extern void usimple_lock( - usimple_lock_t); +extern void usimple_lock_try_lock_loop( + usimple_lock_t); +#define usimple_lock_try_lock_loop(lck, grp) usimple_lock_try_lock_loop(lck) -extern void usimple_unlock( - usimple_lock_t); +#endif /* LOCK_STATS */ -extern unsigned int usimple_lock_try( - usimple_lock_t); +extern void usimple_unlock( + usimple_lock_t); -extern void usimple_lock_try_lock_loop( - usimple_lock_t); __END_DECLS -#define ETAP_NO_TRACE 0 -#define ETAP_IO_AHA 0 +#define ETAP_NO_TRACE 0 +#define ETAP_IO_AHA 0 /* * If we got to here and we still don't have simple_lock_init @@ -189,14 +245,14 @@ __END_DECLS * running on a true SMP, or need debug. */ #if !defined(simple_lock_init) -#define simple_lock_init(l,t) usimple_lock_init(l,t) -#define simple_lock(l) usimple_lock(l) -#define simple_unlock(l) usimple_unlock(l) -#define simple_lock_try(l) usimple_lock_try(l) -#define simple_lock_try_lock_loop(l) usimple_lock_try_lock_loop(l) -#define simple_lock_addr(l) (&(l)) +#define simple_lock_init(l, t) usimple_lock_init(l,t) +#define simple_lock(l, grp) usimple_lock(l, grp) +#define simple_unlock(l) usimple_unlock(l) +#define simple_lock_try(l, grp) usimple_lock_try(l, grp) +#define simple_lock_try_lock_loop(l, grp) usimple_lock_try_lock_loop(l, grp) +#define simple_lock_addr(l) (&(l)) #endif /* !defined(simple_lock_init) */ #endif /*!_KERN_SIMPLE_LOCK_H_*/ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/kern/smp.h b/osfmk/kern/smp.h index d4e099b97..d18db82a6 100644 --- a/osfmk/kern/smp.h +++ b/osfmk/kern/smp.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KERN_SMP_H_ -#define _KERN_SMP_H_ +#ifndef _KERN_SMP_H_ +#define _KERN_SMP_H_ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#endif /* _KERN_SMP_H_ */ +#endif /* _KERN_SMP_H_ */ diff --git a/osfmk/kern/spl.c b/osfmk/kern/spl.c index 190bde320..d2a26f335 100644 --- a/osfmk/kern/spl.c +++ b/osfmk/kern/spl.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -30,28 +30,28 @@ #include /* - * spl routines + * spl routines */ -__private_extern__ spl_t +__private_extern__ spl_t splhigh( void) { - return(ml_set_interrupts_enabled(FALSE)); + return ml_set_interrupts_enabled(FALSE); } __private_extern__ spl_t splsched( void) { - return(ml_set_interrupts_enabled(FALSE)); + return ml_set_interrupts_enabled(FALSE); } __private_extern__ spl_t splclock( void) { - return(ml_set_interrupts_enabled(FALSE)); + return ml_set_interrupts_enabled(FALSE); } __private_extern__ void diff --git a/osfmk/kern/spl.h b/osfmk/kern/spl.h index ac615ff05..3352c516f 100644 --- a/osfmk/kern/spl.h +++ b/osfmk/kern/spl.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KERN_SPL_H_ -#define _KERN_SPL_H_ +#ifndef _KERN_SPL_H_ +#define _KERN_SPL_H_ #include typedef unsigned spl_t; -#define splhigh() (spl_t) ml_set_interrupts_enabled(FALSE) -#define splsched() (spl_t) ml_set_interrupts_enabled(FALSE) -#define splclock() (spl_t) ml_set_interrupts_enabled(FALSE) -#define splx(x) (void) ml_set_interrupts_enabled(x) -#define spllo() (void) ml_set_interrupts_enabled(TRUE) +#define splhigh() (spl_t) ml_set_interrupts_enabled(FALSE) +#define splsched() (spl_t) ml_set_interrupts_enabled(FALSE) +#define splclock() (spl_t) ml_set_interrupts_enabled(FALSE) +#define splx(x) (void) ml_set_interrupts_enabled(x) +#define spllo() (void) ml_set_interrupts_enabled(TRUE) -#endif /* _KERN_SPL_H_ */ +#endif /* _KERN_SPL_H_ */ diff --git a/osfmk/kern/stack.c b/osfmk/kern/stack.c index c56a597bc..287458364 100644 --- a/osfmk/kern/stack.c +++ b/osfmk/kern/stack.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -54,46 +55,47 @@ * because stack_alloc_try/thread_invoke operate at splsched. */ -decl_simple_lock_data(static,stack_lock_data) -#define stack_lock() simple_lock(&stack_lock_data) -#define stack_unlock() simple_unlock(&stack_lock_data) +decl_simple_lock_data(static, stack_lock_data) +#define stack_lock() simple_lock(&stack_lock_data, LCK_GRP_NULL) +#define stack_unlock() simple_unlock(&stack_lock_data) -#define STACK_CACHE_SIZE 2 +#define STACK_CACHE_SIZE 2 -static vm_offset_t stack_free_list; +static vm_offset_t stack_free_list; -static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ -static unsigned int stack_hiwat; -unsigned int stack_total; /* current total count */ -unsigned long long stack_allocs; /* total count of allocations */ +static unsigned int stack_free_count, stack_free_hiwat; /* free list count */ +static unsigned int stack_hiwat; +unsigned int stack_total; /* current total count */ +unsigned long long stack_allocs; /* total count of allocations */ -static int stack_fake_zone_index = -1; /* index in zone_info array */ +static int stack_fake_zone_index = -1; /* index in zone_info array */ -static unsigned int stack_free_target; -static int stack_free_delta; +static unsigned int stack_free_target; +static int stack_free_delta; -static unsigned int stack_new_count; /* total new stack allocations */ +static unsigned int stack_new_count; /* total new stack allocations */ -static vm_offset_t stack_addr_mask; +static vm_offset_t stack_addr_mask; -unsigned int kernel_stack_pages; -vm_offset_t kernel_stack_size; -vm_offset_t kernel_stack_mask; -vm_offset_t kernel_stack_depth_max; +unsigned int kernel_stack_pages; +vm_offset_t kernel_stack_size; +vm_offset_t kernel_stack_mask; +vm_offset_t kernel_stack_depth_max; /* * The next field is at the base of the stack, * so the low end is left unsullied. */ -#define stack_next(stack) \ +#define stack_next(stack) \ (*((vm_offset_t *)((stack) + kernel_stack_size) - 1)) static inline int log2(vm_offset_t size) { - int result; - for (result = 0; size > 0; result++) + int result; + for (result = 0; size > 0; result++) { size >>= 1; + } return result; } @@ -117,16 +119,17 @@ stack_init(void) kernel_stack_depth_max = 0; if (PE_parse_boot_argn("kernel_stack_pages", - &kernel_stack_pages, - sizeof (kernel_stack_pages))) { + &kernel_stack_pages, + sizeof(kernel_stack_pages))) { kernel_stack_size = kernel_stack_pages * PAGE_SIZE; printf("stack_init: kernel_stack_pages=%d kernel_stack_size=%p\n", - kernel_stack_pages, (void *) kernel_stack_size); + kernel_stack_pages, (void *) kernel_stack_size); } - if (kernel_stack_size < round_page(kernel_stack_size)) + if (kernel_stack_size < round_page(kernel_stack_size)) { panic("stack_init: stack size %p not a multiple of page size %d\n", - (void *) kernel_stack_size, PAGE_SIZE); + (void *) kernel_stack_size, PAGE_SIZE); + } stack_addr_mask = roundup_pow2(kernel_stack_size) - 1; kernel_stack_mask = ~stack_addr_mask; @@ -142,10 +145,10 @@ stack_init(void) static vm_offset_t stack_alloc_internal(void) { - vm_offset_t stack = 0; - spl_t s; - int flags = 0; - kern_return_t kr = KERN_SUCCESS; + vm_offset_t stack = 0; + spl_t s; + int flags = 0; + kern_return_t kr = KERN_SUCCESS; s = splsched(); stack_lock(); @@ -154,10 +157,10 @@ stack_alloc_internal(void) if (stack != 0) { stack_free_list = stack_next(stack); stack_free_count--; - } - else { - if (++stack_total > stack_hiwat) + } else { + if (++stack_total > stack_hiwat) { stack_hiwat = stack_total; + } stack_new_count++; } stack_free_delta--; @@ -165,7 +168,6 @@ stack_alloc_internal(void) splx(s); if (stack == 0) { - /* * Request guard pages on either side of the stack. Ask * kernel_memory_allocate() for two extra pages to account @@ -174,12 +176,12 @@ stack_alloc_internal(void) flags = KMA_GUARD_FIRST | KMA_GUARD_LAST | KMA_KSTACK | KMA_KOBJECT | KMA_ZERO; kr = kernel_memory_allocate(kernel_map, &stack, - kernel_stack_size + (2*PAGE_SIZE), - stack_addr_mask, - flags, - VM_KERN_MEMORY_STACK); + kernel_stack_size + (2 * PAGE_SIZE), + stack_addr_mask, + flags, + VM_KERN_MEMORY_STACK); if (kr != KERN_SUCCESS) { - panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2*PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr); + panic("stack_alloc: kernel_memory_allocate(size:0x%llx, mask: 0x%llx, flags: 0x%x) failed with %d\n", (uint64_t)(kernel_stack_size + (2 * PAGE_SIZE)), (uint64_t)stack_addr_mask, flags, kr); } /* @@ -194,9 +196,8 @@ stack_alloc_internal(void) void stack_alloc( - thread_t thread) + thread_t thread) { - assert(thread->kernel_stack == 0); machine_stack_attach(thread, stack_alloc_internal()); } @@ -215,9 +216,9 @@ stack_handoff(thread_t from, thread_t to) */ void stack_free( - thread_t thread) + thread_t thread) { - vm_offset_t stack = machine_stack_detach(thread); + vm_offset_t stack = machine_stack_detach(thread); assert(stack); if (stack != thread->reserved_stack) { @@ -227,7 +228,7 @@ stack_free( void stack_free_reserved( - thread_t thread) + thread_t thread) { if (thread->reserved_stack != thread->kernel_stack) { stack_free_stack(thread->reserved_stack); @@ -236,10 +237,10 @@ stack_free_reserved( static void stack_free_stack( - vm_offset_t stack) + vm_offset_t stack) { - struct stack_cache *cache; - spl_t s; + struct stack_cache *cache; + spl_t s; #if KASAN_DEBUG /* Sanity check - stack should be unpoisoned by now */ @@ -252,13 +253,13 @@ stack_free_stack( stack_next(stack) = cache->free; cache->free = stack; cache->count++; - } - else { + } else { stack_lock(); stack_next(stack) = stack_free_list; stack_free_list = stack; - if (++stack_free_count > stack_free_hiwat) + if (++stack_free_count > stack_free_hiwat) { stack_free_hiwat = stack_free_count; + } stack_free_delta++; stack_unlock(); } @@ -277,18 +278,17 @@ stack_free_stack( */ boolean_t stack_alloc_try( - thread_t thread) + thread_t thread) { - struct stack_cache *cache; - vm_offset_t stack; + struct stack_cache *cache; + vm_offset_t stack; cache = &PROCESSOR_DATA(current_processor(), stack_cache); stack = cache->free; if (stack != 0) { cache->free = stack_next(stack); cache->count--; - } - else { + } else { if (stack_free_list != 0) { stack_lock(); stack = stack_free_list; @@ -303,13 +303,13 @@ stack_alloc_try( if (stack != 0 || (stack = thread->reserved_stack) != 0) { machine_stack_attach(thread, stack); - return (TRUE); + return TRUE; } - return (FALSE); + return FALSE; } -static unsigned int stack_collect_tick, last_stack_tick; +static unsigned int stack_collect_tick, last_stack_tick; /* * stack_collect: @@ -321,9 +321,9 @@ void stack_collect(void) { if (stack_collect_tick != last_stack_tick) { - unsigned int target; - vm_offset_t stack; - spl_t s; + unsigned int target; + vm_offset_t stack; + spl_t s; s = splsched(); stack_lock(); @@ -353,10 +353,11 @@ stack_collect(void) if (vm_map_remove( kernel_map, stack, - stack + kernel_stack_size+(2*PAGE_SIZE), + stack + kernel_stack_size + (2 * PAGE_SIZE), VM_MAP_REMOVE_KUNWIRE) - != KERN_SUCCESS) + != KERN_SUCCESS) { panic("stack_collect: vm_map_remove"); + } stack = 0; s = splsched(); @@ -384,18 +385,18 @@ stack_collect(void) */ void compute_stack_target( -__unused void *arg) + __unused void *arg) { - spl_t s; + spl_t s; s = splsched(); stack_lock(); - if (stack_free_target > 5) + if (stack_free_target > 5) { stack_free_target = (4 * stack_free_target) / 5; - else - if (stack_free_target > 0) + } else if (stack_free_target > 0) { stack_free_target--; + } stack_free_target += (stack_free_delta >= 0)? stack_free_delta: -stack_free_delta; @@ -414,12 +415,12 @@ stack_fake_zone_init(int zone_index) void stack_fake_zone_info(int *count, - vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size, - uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct) + vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size, + uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct) { - unsigned int total, hiwat, free; + unsigned int total, hiwat, free; unsigned long long all; - spl_t s; + spl_t s; s = splsched(); stack_lock(); @@ -443,12 +444,12 @@ stack_fake_zone_info(int *count, } /* OBSOLETE */ -void stack_privilege( - thread_t thread); +void stack_privilege( + thread_t thread); void stack_privilege( - __unused thread_t thread) + __unused thread_t thread) { /* OBSOLETE */ } @@ -458,15 +459,15 @@ stack_privilege( */ kern_return_t processor_set_stack_usage( - processor_set_t pset, - unsigned int *totalp, - vm_size_t *spacep, - vm_size_t *residentp, - vm_size_t *maxusagep, - vm_offset_t *maxstackp) + processor_set_t pset, + unsigned int *totalp, + vm_size_t *spacep, + vm_size_t *residentp, + vm_size_t *maxusagep, + vm_offset_t *maxstackp) { #if !MACH_DEBUG - return KERN_NOT_SUPPORTED; + return KERN_NOT_SUPPORTED; #else unsigned int total; vm_size_t maxusage; @@ -475,14 +476,15 @@ processor_set_stack_usage( thread_t *thread_list; thread_t thread; - unsigned int actual; /* this many things */ + unsigned int actual; /* this many things */ unsigned int i; vm_size_t size, size_needed; void *addr; - if (pset == PROCESSOR_SET_NULL || pset != &pset0) + if (pset == PROCESSOR_SET_NULL || pset != &pset0) { return KERN_INVALID_ARGUMENT; + } size = 0; addr = NULL; @@ -495,27 +497,30 @@ processor_set_stack_usage( /* do we have the memory we need? */ size_needed = actual * sizeof(thread_t); - if (size_needed <= size) + if (size_needed <= size) { break; + } lck_mtx_unlock(&tasks_threads_lock); - if (size != 0) + if (size != 0) { kfree(addr, size); + } assert(size_needed > 0); size = size_needed; addr = kalloc(size); - if (addr == 0) + if (addr == 0) { return KERN_RESOURCE_SHORTAGE; + } } /* OK, have memory and list is locked */ thread_list = (thread_t *) addr; for (i = 0, thread = (thread_t)(void *) queue_first(&threads); - !queue_end(&threads, (queue_entry_t) thread); - thread = (thread_t)(void *) queue_next(&thread->threads)) { + !queue_end(&threads, (queue_entry_t) thread); + thread = (thread_t)(void *) queue_next(&thread->threads)) { thread_reference_internal(thread); thread_list[i++] = thread; } @@ -531,14 +536,16 @@ processor_set_stack_usage( while (i > 0) { thread_t threadref = thread_list[--i]; - if (threadref->kernel_stack != 0) + if (threadref->kernel_stack != 0) { total++; + } thread_deallocate(threadref); } - if (size != 0) + if (size != 0) { kfree(addr, size); + } *totalp = total; *residentp = *spacep = total * round_page(kernel_stack_size); @@ -546,15 +553,17 @@ processor_set_stack_usage( *maxstackp = maxstack; return KERN_SUCCESS; -#endif /* MACH_DEBUG */ +#endif /* MACH_DEBUG */ } -vm_offset_t min_valid_stack_address(void) +vm_offset_t +min_valid_stack_address(void) { return (vm_offset_t)vm_map_min(kernel_map); } -vm_offset_t max_valid_stack_address(void) +vm_offset_t +max_valid_stack_address(void) { return (vm_offset_t)vm_map_max(kernel_map); } diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c index c0693a117..199c83afc 100644 --- a/osfmk/kern/startup.c +++ b/osfmk/kern/startup.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -173,10 +173,10 @@ extern void vnguard_policy_init(void); #endif #include -static void kernel_bootstrap_thread(void); +static void kernel_bootstrap_thread(void); -static void load_context( - thread_t thread); +static void load_context( + thread_t thread); #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 extern void cpu_userwindow_init(int); extern void cpu_physwindow_init(int); @@ -184,7 +184,7 @@ extern void cpu_physwindow_init(int); #if CONFIG_ECC_LOGGING #include -#endif +#endif #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX #include @@ -246,9 +246,9 @@ void kernel_early_bootstrap(void) { /* serverperfmode is needed by timer setup */ - if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) { - serverperfmode = 1; - } + if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof(serverperfmode))) { + serverperfmode = 1; + } lck_mod_init(); @@ -265,23 +265,22 @@ kernel_early_bootstrap(void) #endif } -extern boolean_t IORamDiskBSDRoot(void); -extern kern_return_t cpm_preallocate_early(void); void kernel_bootstrap(void) { - kern_return_t result; - thread_t thread; - char namep[16]; + kern_return_t result; + thread_t thread; + char namep[16]; printf("%s\n", version); /* log kernel version */ - if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */ + if (PE_parse_boot_argn("-l", namep, sizeof(namep))) { /* leaks logging */ log_leaks = 1; + } - PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs)); - PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof (wake_nkdbufs)); + PE_parse_boot_argn("trace", &new_nkdbufs, sizeof(new_nkdbufs)); + PE_parse_boot_argn("trace_wake", &wake_nkdbufs, sizeof(wake_nkdbufs)); PE_parse_boot_argn("trace_panic", &write_trace_on_panic, sizeof(write_trace_on_panic)); PE_parse_boot_arg_str("trace_typefilter", trace_typefilter, sizeof(trace_typefilter)); PE_parse_boot_argn("trace_wrap", &trace_wrap, sizeof(trace_wrap)); @@ -363,7 +362,7 @@ kernel_bootstrap(void) PMAP_ACTIVATE_KERNEL(master_cpu); kernel_bootstrap_log("mapping_free_prime"); - mapping_free_prime(); /* Load up with temporary mapping blocks */ + mapping_free_prime(); /* Load up with temporary mapping blocks */ kernel_bootstrap_log("machine_init"); machine_init(); @@ -418,13 +417,21 @@ kernel_bootstrap(void) /* * Create a kernel thread to execute the kernel bootstrap. */ + kernel_bootstrap_log("kernel_thread_create"); result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result); + if (result != KERN_SUCCESS) { + panic("kernel_bootstrap: result = %08X\n", result); + } + + /* The static init_thread is re-used as the bootstrap thread */ + assert(thread == current_thread()); + /* TODO: do a proper thread_start() (without the thread_setrun()) */ thread->state = TH_RUN; thread->last_made_runnable_time = mach_absolute_time(); + thread_deallocate(thread); kernel_bootstrap_log("load_context - done"); @@ -447,7 +454,7 @@ uint64_t vm_kernel_addrhash_salt_ext; static void kernel_bootstrap_thread(void) { - processor_t processor = current_processor(); + processor_t processor = current_processor(); #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ kernel_bootstrap_thread_log("idle_thread_create"); @@ -487,12 +494,6 @@ kernel_bootstrap_thread(void) kernel_bootstrap_thread_log("thread_bind"); thread_bind(processor); -#if __arm64__ - if (IORamDiskBSDRoot()) { - cpm_preallocate_early(); - } -#endif /* __arm64__ */ - /* * Initialize ipc thread call support. */ @@ -529,15 +530,15 @@ kernel_bootstrap_thread(void) if (PE_i_can_has_debugger(NULL)) { unsigned int phys_carveout_mb = 0; if (PE_parse_boot_argn("phys_carveout_mb", &phys_carveout_mb, - sizeof(phys_carveout_mb)) && phys_carveout_mb > 0) { + sizeof(phys_carveout_mb)) && phys_carveout_mb > 0) { phys_carveout_size = phys_carveout_mb * 1024 * 1024; kern_return_t kr = kmem_alloc_contig(kernel_map, - (vm_offset_t *)&phys_carveout, phys_carveout_size, - VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT, - VM_KERN_MEMORY_DIAG); + (vm_offset_t *)&phys_carveout, phys_carveout_size, + VM_MAP_PAGE_MASK(kernel_map), 0, 0, KMA_NOPAGEWAIT, + VM_KERN_MEMORY_DIAG); if (kr != KERN_SUCCESS) { kprintf("failed to allocate %uMB for phys_carveout_mb: %u\n", - phys_carveout_mb, (unsigned int)kr); + phys_carveout_mb, (unsigned int)kr); } else { phys_carveout_pa = kvtophys((vm_offset_t)phys_carveout); } @@ -559,7 +560,7 @@ kernel_bootstrap_thread(void) #if CONFIG_ECC_LOGGING ecc_log_init(); -#endif +#endif #if HYPERVISOR hv_support_init(); @@ -579,7 +580,7 @@ kernel_bootstrap_thread(void) kdebug_init(new_nkdbufs, trace_typefilter, trace_wrap); -#ifdef MACH_BSD +#ifdef MACH_BSD kernel_bootstrap_log("bsd_early_init"); bsd_early_init(); #endif @@ -588,7 +589,7 @@ kernel_bootstrap_thread(void) ml_lockdown_init(); #endif -#ifdef IOKIT +#ifdef IOKIT kernel_bootstrap_log("PE_init_iokit"); PE_init_iokit(); #endif @@ -605,7 +606,7 @@ kernel_bootstrap_thread(void) // Reset interrupts masked timeout before we enable interrupts ml_spin_debug_clear_self(); #endif - (void) spllo(); /* Allow interruptions */ + (void) spllo(); /* Allow interruptions */ #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* @@ -653,7 +654,7 @@ kernel_bootstrap_thread(void) /* * Finalize protections on statically mapped pages now that comm page mapping is established. */ - arm_vm_prot_finalize(PE_state.bootArgs); + arm_vm_prot_finalize(PE_state.bootArgs); #endif #if CONFIG_SCHED_SFI @@ -695,7 +696,7 @@ kernel_bootstrap_thread(void) /* * Start the user bootstrap. */ -#ifdef MACH_BSD +#ifdef MACH_BSD bsd_init(); #endif @@ -705,12 +706,19 @@ kernel_bootstrap_thread(void) */ kdebug_free_early_buf(); - serial_keyboard_init(); /* Start serial keyboard if wanted */ + serial_keyboard_init(); /* Start serial keyboard if wanted */ vm_page_init_local_q(); thread_bind(PROCESSOR_NULL); + /* + * Now that all CPUs are available to run threads, this is essentially + * a background thread. Take this opportunity to initialize and free + * any remaining vm_pages that were delayed earlier by pmap_startup(). + */ + vm_free_delayed_pages(); + /* * Become the pageout daemon. */ @@ -726,8 +734,8 @@ kernel_bootstrap_thread(void) void slave_main(void *machine_param) { - processor_t processor = current_processor(); - thread_t thread; + processor_t processor = current_processor(); + thread_t thread; /* * Use the idle processor thread if there @@ -737,8 +745,7 @@ slave_main(void *machine_param) thread = processor->idle_thread; thread->continuation = (thread_continue_t)processor_start_thread; thread->parameter = machine_param; - } - else { + } else { thread = processor->next_thread; processor->next_thread = THREAD_NULL; } @@ -757,8 +764,8 @@ slave_main(void *machine_param) void processor_start_thread(void *machine_param) { - processor_t processor = current_processor(); - thread_t self = current_thread(); + processor_t processor = current_processor(); + thread_t self = current_thread(); slave_machine_init(machine_param); @@ -766,8 +773,9 @@ processor_start_thread(void *machine_param) * If running the idle processor thread, * reenter the idle loop, else terminate. */ - if (self == processor->idle_thread) + if (self == processor->idle_thread) { thread_block((thread_continue_t)idle_thread); + } thread_terminate(self); /*NOTREACHED*/ @@ -780,9 +788,9 @@ processor_start_thread(void *machine_param) */ static void __attribute__((noreturn)) load_context( - thread_t thread) + thread_t thread) { - processor_t processor = current_processor(); + processor_t processor = current_processor(); #define load_context_kprintf(x...) /* kprintf("load_context: " x) */ @@ -801,23 +809,27 @@ load_context( * to have reserved stack. */ load_context_kprintf("thread %p, stack %lx, stackptr %lx\n", thread, - thread->kernel_stack, thread->machine.kstackptr); + thread->kernel_stack, thread->machine.kstackptr); if (!thread->kernel_stack) { load_context_kprintf("stack_alloc_try\n"); - if (!stack_alloc_try(thread)) + if (!stack_alloc_try(thread)) { panic("load_context"); + } } /* * The idle processor threads are not counted as * running for load calculations. */ - if (!(thread->state & TH_IDLE)) + if (!(thread->state & TH_IDLE)) { sched_run_incr(thread); + } processor->active_thread = thread; - processor_state_update_explicit(processor, thread->sched_pri, - SFI_CLASS_KERNEL, PSET_SMP, thread_get_perfcontrol_class(thread)); + processor_state_update_explicit(processor, thread->sched_pri, + SFI_CLASS_KERNEL, PSET_SMP, thread_get_perfcontrol_class(thread), THREAD_URGENCY_NONE); + processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL; + processor->current_is_NO_SMT = false; processor->starting_pri = thread->sched_pri; processor->deadline = UINT64_MAX; thread->last_processor = processor; @@ -835,6 +847,14 @@ load_context( PMAP_ACTIVATE_USER(thread, processor->cpu_id); load_context_kprintf("machine_load_context\n"); + +#if __arm__ || __arm64__ +#if __SMP__ + /* TODO: Should this be ordered? */ + thread->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU; +#endif /* __SMP__ */ +#endif /* __arm__ || __arm64__ */ + machine_load_context(thread); /*NOTREACHED*/ } @@ -847,19 +867,20 @@ scale_setup() typeof(task_max) task_max_base = task_max; /* Raise limits for servers with >= 16G */ - if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 *1024ULL))) { - scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 *1024ULL)); + if ((serverperfmode != 0) && ((uint64_t)sane_size >= (uint64_t)(16 * 1024 * 1024 * 1024ULL))) { + scale = (int)((uint64_t)sane_size / (uint64_t)(8 * 1024 * 1024 * 1024ULL)); /* limit to 128 G */ - if (scale > 16) + if (scale > 16) { scale = 16; + } task_max_base = 2500; - /* Raise limits for machines with >= 3GB */ - } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 *1024ULL)) { - if ((uint64_t)sane_size < (uint64_t)(8 * 1024 * 1024 *1024ULL)) { + /* Raise limits for machines with >= 3GB */ + } else if ((uint64_t)sane_size >= (uint64_t)(3 * 1024 * 1024 * 1024ULL)) { + if ((uint64_t)sane_size < (uint64_t)(8 * 1024 * 1024 * 1024ULL)) { scale = 2; } else { /* limit to 64GB */ - scale = MIN(16, (int)((uint64_t)sane_size / (uint64_t)(4 * 1024 * 1024 *1024ULL))); + scale = MIN(16, (int)((uint64_t)sane_size / (uint64_t)(4 * 1024 * 1024 * 1024ULL))); } } @@ -867,16 +888,15 @@ scale_setup() if (scale != 0) { task_threadmax = task_max; - thread_max = task_max * 5; + thread_max = task_max * 5; } #endif bsd_scale_setup(scale); - + ipc_space_max = SPACE_MAX; ipc_port_max = PORT_MAX; ipc_pset_max = SET_MAX; semaphore_max = SEMAPHORE_MAX; } - diff --git a/osfmk/kern/startup.h b/osfmk/kern/startup.h index 4555687dc..946454e83 100644 --- a/osfmk/kern/startup.h +++ b/osfmk/kern/startup.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,17 +22,17 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -#ifndef _KERN_STARTUP_H_ -#define _KERN_STARTUP_H_ +#ifndef _KERN_STARTUP_H_ +#define _KERN_STARTUP_H_ #include __BEGIN_DECLS @@ -46,33 +46,33 @@ extern void kernel_early_bootstrap(void); extern void kernel_bootstrap(void); /* Initialize machine dependent stuff */ -extern void machine_init(void); +extern void machine_init(void); -extern void slave_main(void *machine_param); +extern void slave_main(void *machine_param); /* * The following must be implemented in machine dependent code. */ /* Slave cpu initialization */ -extern void slave_machine_init(void *machine_param); +extern void slave_machine_init(void *machine_param); /* Device subystem initialization */ -extern void device_service_create(void); +extern void device_service_create(void); -#ifdef MACH_BSD +#ifdef MACH_BSD /* BSD subsystem initialization */ -extern void bsd_init(void); +extern void bsd_init(void); extern void bsd_early_init(void); /* codesigning subsystem initialization */ -extern void cs_init(void); +extern void cs_init(void); -#endif /* MACH_BSD */ +#endif /* MACH_BSD */ __END_DECLS -#endif /* _KERN_STARTUP_H_ */ +#endif /* _KERN_STARTUP_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/sync_lock.c b/osfmk/kern/sync_lock.c index 3479d0c7b..fe550ee59 100644 --- a/osfmk/kern/sync_lock.c +++ b/osfmk/kern/sync_lock.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* * File: kern/sync_lock.c @@ -57,19 +57,19 @@ /* * OBSOLETE: lock set routines are obsolete - */ + */ kern_return_t -lock_set_create ( - __unused task_t task, - __unused lock_set_t *new_lock_set, - __unused int n_ulocks, - __unused int policy) +lock_set_create( + __unused task_t task, + __unused lock_set_t *new_lock_set, + __unused int n_ulocks, + __unused int policy) { return KERN_FAILURE; } kern_return_t -lock_set_destroy ( +lock_set_destroy( __unused task_t task, __unused lock_set_t lock_set) { @@ -77,7 +77,7 @@ lock_set_destroy ( } kern_return_t -lock_acquire ( +lock_acquire( __unused lock_set_t lock_set, __unused int lock_id) { @@ -85,7 +85,7 @@ lock_acquire ( } kern_return_t -lock_release ( +lock_release( __unused lock_set_t lock_set, __unused int lock_id) { @@ -93,23 +93,23 @@ lock_release ( } kern_return_t -lock_try ( +lock_try( __unused lock_set_t lock_set, __unused int lock_id) { - return KERN_FAILURE; + return KERN_FAILURE; } kern_return_t -lock_make_stable ( +lock_make_stable( __unused lock_set_t lock_set, __unused int lock_id) { - return KERN_FAILURE; + return KERN_FAILURE; } kern_return_t -lock_handoff ( +lock_handoff( __unused lock_set_t lock_set, __unused int lock_id) { @@ -117,7 +117,7 @@ lock_handoff ( } kern_return_t -lock_handoff_accept ( +lock_handoff_accept( __unused lock_set_t lock_set, __unused int lock_id) { @@ -137,4 +137,3 @@ lock_set_dereference( { return; } - diff --git a/osfmk/kern/sync_lock.h b/osfmk/kern/sync_lock.h index fc83bb97d..bcbe466dd 100644 --- a/osfmk/kern/sync_lock.h +++ b/osfmk/kern/sync_lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* * File: kern/sync_lock.h @@ -41,15 +41,15 @@ #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include #include -extern void lock_set_reference (lock_set_t lock_set); -extern void lock_set_dereference (lock_set_t lock_set); +extern void lock_set_reference(lock_set_t lock_set); +extern void lock_set_dereference(lock_set_t lock_set); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ #endif /* _KERN_SYNC_LOCK_H_ */ diff --git a/osfmk/kern/sync_sema.c b/osfmk/kern/sync_sema.c index 98f33ba8d..898a5e8bb 100644 --- a/osfmk/kern/sync_sema.c +++ b/osfmk/kern/sync_sema.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* * File: kern/sync_sema.c @@ -71,67 +71,67 @@ os_refgrp_decl(static, sema_refgrp, "semaphore", NULL); /* Forward declarations */ -kern_return_t +kern_return_t semaphore_wait_trap_internal( - mach_port_name_t name, - void (*caller_cont)(kern_return_t)); + mach_port_name_t name, + void (*caller_cont)(kern_return_t)); -kern_return_t +kern_return_t semaphore_wait_signal_trap_internal( - mach_port_name_t wait_name, - mach_port_name_t signal_name, - void (*caller_cont)(kern_return_t)); + mach_port_name_t wait_name, + mach_port_name_t signal_name, + void (*caller_cont)(kern_return_t)); -kern_return_t +kern_return_t semaphore_timedwait_trap_internal( - mach_port_name_t name, - unsigned int sec, - clock_res_t nsec, - void (*caller_cont)(kern_return_t)); + mach_port_name_t name, + unsigned int sec, + clock_res_t nsec, + void (*caller_cont)(kern_return_t)); -kern_return_t +kern_return_t semaphore_timedwait_signal_trap_internal( - mach_port_name_t wait_name, - mach_port_name_t signal_name, - unsigned int sec, - clock_res_t nsec, - void (*caller_cont)(kern_return_t)); + mach_port_name_t wait_name, + mach_port_name_t signal_name, + unsigned int sec, + clock_res_t nsec, + void (*caller_cont)(kern_return_t)); kern_return_t semaphore_signal_internal_trap(mach_port_name_t sema_name); kern_return_t semaphore_signal_internal( - semaphore_t semaphore, - thread_t thread, - int options); + semaphore_t semaphore, + thread_t thread, + int options); kern_return_t semaphore_convert_wait_result( - int wait_result); + int wait_result); void semaphore_wait_continue(void); static kern_return_t semaphore_wait_internal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - uint64_t deadline, - int option, - void (*caller_cont)(kern_return_t)); + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + uint64_t deadline, + int option, + void (*caller_cont)(kern_return_t)); static __inline__ uint64_t semaphore_deadline( - unsigned int sec, - clock_res_t nsec) + unsigned int sec, + clock_res_t nsec) { - uint64_t abstime; + uint64_t abstime; - nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime); + nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime); clock_absolutetime_interval_to_deadline(abstime, &abstime); - return (abstime); + return abstime; } /* @@ -139,15 +139,15 @@ semaphore_deadline( * * Initialize the semaphore mechanisms. * Right now, we only need to initialize the semaphore zone. - */ + */ void semaphore_init(void) { - semaphore_zone = zinit(sizeof(struct semaphore), - semaphore_max * sizeof(struct semaphore), - sizeof(struct semaphore), - "semaphores"); - zone_change(semaphore_zone, Z_NOENCRYPT, TRUE); + semaphore_zone = zinit(sizeof(struct semaphore), + semaphore_max * sizeof(struct semaphore), + sizeof(struct semaphore), + "semaphores"); + zone_change(semaphore_zone, Z_NOENCRYPT, TRUE); } /* @@ -158,23 +158,25 @@ semaphore_init(void) */ kern_return_t semaphore_create( - task_t task, - semaphore_t *new_semaphore, - int policy, - int value) + task_t task, + semaphore_t *new_semaphore, + int policy, + int value) { - semaphore_t s = SEMAPHORE_NULL; - kern_return_t kret; + semaphore_t s = SEMAPHORE_NULL; + kern_return_t kret; *new_semaphore = SEMAPHORE_NULL; - if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) + if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) { return KERN_INVALID_ARGUMENT; + } - s = (semaphore_t) zalloc (semaphore_zone); + s = (semaphore_t) zalloc(semaphore_zone); - if (s == SEMAPHORE_NULL) - return KERN_RESOURCE_SHORTAGE; + if (s == SEMAPHORE_NULL) { + return KERN_RESOURCE_SHORTAGE; + } kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */ if (kret != KERN_SUCCESS) { @@ -185,7 +187,7 @@ semaphore_create( /* * Initialize the semaphore values. */ - s->port = IP_NULL; + s->port = IP_NULL; os_ref_init(&s->ref_count, &sema_refgrp); s->count = value; s->active = TRUE; @@ -203,7 +205,7 @@ semaphore_create( *new_semaphore = s; return KERN_SUCCESS; -} +} /* * Routine: semaphore_destroy_internal @@ -220,10 +222,10 @@ semaphore_create( */ static void semaphore_destroy_internal( - task_t task, - semaphore_t semaphore) + task_t task, + semaphore_t semaphore) { - int old_count; + int old_count; /* unlink semaphore from owning task */ assert(semaphore->owner == task); @@ -238,17 +240,17 @@ semaphore_destroy_internal( semaphore->active = FALSE; /* - * Wakeup blocked threads + * Wakeup blocked threads */ old_count = semaphore->count; semaphore->count = 0; if (old_count < 0) { waitq_wakeup64_all_locked(&semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_RESTART, NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_UNLOCK); + SEMAPHORE_EVENT, + THREAD_RESTART, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_UNLOCK); /* waitq/semaphore is unlocked */ } else { semaphore_unlock(semaphore); @@ -263,13 +265,14 @@ semaphore_destroy_internal( */ kern_return_t semaphore_destroy( - task_t task, - semaphore_t semaphore) + task_t task, + semaphore_t semaphore) { spl_t spl_level; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } if (task == TASK_NULL) { semaphore_dereference(semaphore); @@ -307,7 +310,7 @@ semaphore_destroy( void semaphore_destroy_all( - task_t task) + task_t task) { uint32_t count; spl_t spl_level; @@ -319,8 +322,9 @@ semaphore_destroy_all( semaphore = (semaphore_t) queue_first(&task->semaphore_list); - if (count == 0) + if (count == 0) { spl_level = splsched(); + } semaphore_lock(semaphore); semaphore_destroy_internal(task, semaphore); @@ -332,8 +336,9 @@ semaphore_destroy_all( splx(spl_level); } } - if (count != 0) + if (count != 0) { splx(spl_level); + } task_unlock(task); } @@ -341,15 +346,15 @@ semaphore_destroy_all( /* * Routine: semaphore_signal_internal * - * Signals the semaphore as direct. + * Signals the semaphore as direct. * Assumptions: * Semaphore is locked. */ kern_return_t semaphore_signal_internal( - semaphore_t semaphore, - thread_t thread, - int options) + semaphore_t semaphore, + thread_t thread, + int options) { kern_return_t kr; spl_t spl_level; @@ -366,11 +371,11 @@ semaphore_signal_internal( if (thread != THREAD_NULL) { if (semaphore->count < 0) { kr = waitq_wakeup64_thread_locked( - &semaphore->waitq, - SEMAPHORE_EVENT, - thread, - THREAD_AWAKENED, - WAITQ_UNLOCK); + &semaphore->waitq, + SEMAPHORE_EVENT, + thread, + THREAD_AWAKENED, + WAITQ_UNLOCK); /* waitq/semaphore is unlocked */ } else { kr = KERN_NOT_WAITING; @@ -378,7 +383,7 @@ semaphore_signal_internal( } splx(spl_level); return kr; - } + } if (options & SEMAPHORE_SIGNAL_ALL) { int old_count = semaphore->count; @@ -387,29 +392,30 @@ semaphore_signal_internal( if (old_count < 0) { semaphore->count = 0; /* always reset */ kr = waitq_wakeup64_all_locked( - &semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_AWAKENED, NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_UNLOCK); + &semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_UNLOCK); /* waitq / semaphore is unlocked */ } else { - if (options & SEMAPHORE_SIGNAL_PREPOST) + if (options & SEMAPHORE_SIGNAL_PREPOST) { semaphore->count++; + } kr = KERN_SUCCESS; semaphore_unlock(semaphore); } splx(spl_level); return kr; } - + if (semaphore->count < 0) { kr = waitq_wakeup64_one_locked( - &semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_AWAKENED, NULL, - WAITQ_ALL_PRIORITIES, - WAITQ_KEEP_LOCKED); + &semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_AWAKENED, NULL, + WAITQ_ALL_PRIORITIES, + WAITQ_KEEP_LOCKED); if (kr == KERN_SUCCESS) { semaphore_unlock(semaphore); splx(spl_level); @@ -438,19 +444,20 @@ semaphore_signal_internal( */ kern_return_t semaphore_signal_thread( - semaphore_t semaphore, - thread_t thread) + semaphore_t semaphore, + thread_t thread) { - kern_return_t ret; + kern_return_t ret; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } ret = semaphore_signal_internal(semaphore, - thread, - SEMAPHORE_OPTION_NONE); + thread, + SEMAPHORE_OPTION_NONE); return ret; -} +} /* * Routine: semaphore_signal_thread_trap @@ -463,27 +470,29 @@ semaphore_signal_thread_trap( { mach_port_name_t sema_name = args->signal_name; mach_port_name_t thread_name = args->thread_name; - semaphore_t semaphore; - thread_t thread; - kern_return_t kr; + semaphore_t semaphore; + thread_t thread; + kern_return_t kr; - /* + /* * MACH_PORT_NULL is not an error. It means that we want to * select any one thread that is already waiting, but not to * pre-post the semaphore. */ if (thread_name != MACH_PORT_NULL) { thread = port_name_to_thread(thread_name); - if (thread == THREAD_NULL) + if (thread == THREAD_NULL) { return KERN_INVALID_ARGUMENT; - } else + } + } else { thread = THREAD_NULL; + } kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_signal_internal(semaphore, - thread, - SEMAPHORE_OPTION_NONE); + thread, + SEMAPHORE_OPTION_NONE); semaphore_dereference(semaphore); } if (thread != THREAD_NULL) { @@ -507,18 +516,20 @@ semaphore_signal_thread_trap( */ kern_return_t semaphore_signal( - semaphore_t semaphore) + semaphore_t semaphore) { - kern_return_t kr; + kern_return_t kr; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); - if (kr == KERN_NOT_WAITING) + THREAD_NULL, + SEMAPHORE_SIGNAL_PREPOST); + if (kr == KERN_NOT_WAITING) { return KERN_SUCCESS; + } return kr; } @@ -533,23 +544,24 @@ semaphore_signal_trap( { mach_port_name_t sema_name = args->signal_name; - return (semaphore_signal_internal_trap(sema_name)); + return semaphore_signal_internal_trap(sema_name); } kern_return_t semaphore_signal_internal_trap(mach_port_name_t sema_name) { - semaphore_t semaphore; + semaphore_t semaphore; kern_return_t kr; kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { - kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); + kr = semaphore_signal_internal(semaphore, + THREAD_NULL, + SEMAPHORE_SIGNAL_PREPOST); semaphore_dereference(semaphore); - if (kr == KERN_NOT_WAITING) + if (kr == KERN_NOT_WAITING) { kr = KERN_SUCCESS; + } } return kr; } @@ -562,18 +574,20 @@ semaphore_signal_internal_trap(mach_port_name_t sema_name) */ kern_return_t semaphore_signal_all( - semaphore_t semaphore) + semaphore_t semaphore) { kern_return_t kr; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_ALL); - if (kr == KERN_NOT_WAITING) + THREAD_NULL, + SEMAPHORE_SIGNAL_ALL); + if (kr == KERN_NOT_WAITING) { return KERN_SUCCESS; + } return kr; } @@ -587,17 +601,18 @@ semaphore_signal_all_trap( struct semaphore_signal_all_trap_args *args) { mach_port_name_t sema_name = args->signal_name; - semaphore_t semaphore; + semaphore_t semaphore; kern_return_t kr; kr = port_name_to_semaphore(sema_name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_signal_internal(semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_ALL); + THREAD_NULL, + SEMAPHORE_SIGNAL_ALL); semaphore_dereference(semaphore); - if (kr == KERN_NOT_WAITING) + if (kr == KERN_NOT_WAITING) { kr = KERN_SUCCESS; + } } return kr; } @@ -618,7 +633,7 @@ semaphore_convert_wait_result(int wait_result) case THREAD_TIMED_OUT: return KERN_OPERATION_TIMED_OUT; - + case THREAD_INTERRUPTED: return KERN_ABORTED; @@ -646,8 +661,9 @@ semaphore_wait_continue(void) assert(self->sth_waitsemaphore != SEMAPHORE_NULL); semaphore_dereference(self->sth_waitsemaphore); - if (self->sth_signalsemaphore != SEMAPHORE_NULL) + if (self->sth_signalsemaphore != SEMAPHORE_NULL) { semaphore_dereference(self->sth_signalsemaphore); + } assert(caller_cont != (void (*)(kern_return_t))0); (*caller_cont)(semaphore_convert_wait_result(wait_result)); @@ -666,15 +682,15 @@ semaphore_wait_continue(void) */ static kern_return_t semaphore_wait_internal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - uint64_t deadline, - int option, - void (*caller_cont)(kern_return_t)) + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + uint64_t deadline, + int option, + void (*caller_cont)(kern_return_t)) { - int wait_result; - spl_t spl_level; - kern_return_t kr = KERN_ALREADY_WAITING; + int wait_result; + spl_t spl_level; + kern_return_t kr = KERN_ALREADY_WAITING; spl_level = splsched(); semaphore_lock(wait_semaphore); @@ -687,18 +703,18 @@ semaphore_wait_internal( } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) { kr = KERN_OPERATION_TIMED_OUT; } else { - thread_t self = current_thread(); + thread_t self = current_thread(); wait_semaphore->count = -1; /* we don't keep an actual count */ thread_set_pending_block_hint(self, kThreadWaitSemaphore); (void)waitq_assert_wait64_locked( - &wait_semaphore->waitq, - SEMAPHORE_EVENT, - THREAD_ABORTSAFE, - TIMEOUT_URGENCY_USER_NORMAL, - deadline, TIMEOUT_NO_LEEWAY, - self); + &wait_semaphore->waitq, + SEMAPHORE_EVENT, + THREAD_ABORTSAFE, + TIMEOUT_URGENCY_USER_NORMAL, + deadline, TIMEOUT_NO_LEEWAY, + self); } semaphore_unlock(wait_semaphore); splx(spl_level); @@ -716,13 +732,13 @@ semaphore_wait_internal( * our intention to wait above). */ signal_kr = semaphore_signal_internal(signal_semaphore, - THREAD_NULL, - SEMAPHORE_SIGNAL_PREPOST); + THREAD_NULL, + SEMAPHORE_SIGNAL_PREPOST); - if (signal_kr == KERN_NOT_WAITING) + if (signal_kr == KERN_NOT_WAITING) { signal_kr = KERN_SUCCESS; - else if (signal_kr == KERN_TERMINATED) { - /* + } else if (signal_kr == KERN_TERMINATED) { + /* * Uh!Oh! The semaphore we were to signal died. * We have to get ourselves out of the wait in * case we get stuck here forever (it is assumed @@ -739,17 +755,19 @@ semaphore_wait_internal( clear_wait(self, THREAD_INTERRUPTED); kr = semaphore_convert_wait_result(self->wait_result); - if (kr == KERN_ABORTED) + if (kr == KERN_ABORTED) { kr = KERN_TERMINATED; + } } } - + /* * If we had an error, or we didn't really need to wait we can * return now that we have signalled the signal semaphore. */ - if (kr != KERN_ALREADY_WAITING) + if (kr != KERN_ALREADY_WAITING) { return kr; + } /* * Now, we can block. If the caller supplied a continuation @@ -765,12 +783,11 @@ semaphore_wait_internal( self->sth_waitsemaphore = wait_semaphore; self->sth_signalsemaphore = signal_semaphore; wait_result = thread_block((thread_continue_t)semaphore_wait_continue); - } - else { + } else { wait_result = thread_block(THREAD_CONTINUE_NULL); } - return (semaphore_convert_wait_result(wait_result)); + return semaphore_convert_wait_result(wait_result); } @@ -778,49 +795,49 @@ semaphore_wait_internal( * Routine: semaphore_wait * * Traditional (non-continuation) interface presented to - * in-kernel clients to wait on a semaphore. + * in-kernel clients to wait on a semaphore. */ kern_return_t semaphore_wait( - semaphore_t semaphore) -{ - - if (semaphore == SEMAPHORE_NULL) + semaphore_t semaphore) +{ + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } - return(semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - 0ULL, SEMAPHORE_OPTION_NONE, - (void (*)(kern_return_t))0)); + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + 0ULL, SEMAPHORE_OPTION_NONE, + (void (*)(kern_return_t))0); } kern_return_t semaphore_wait_noblock( - semaphore_t semaphore) -{ - - if (semaphore == SEMAPHORE_NULL) + semaphore_t semaphore) +{ + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } - return(semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, - (void (*)(kern_return_t))0)); + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK, + (void (*)(kern_return_t))0); } kern_return_t semaphore_wait_deadline( - semaphore_t semaphore, - uint64_t deadline) -{ - - if (semaphore == SEMAPHORE_NULL) + semaphore_t semaphore, + uint64_t deadline) +{ + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; + } - return(semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - deadline, SEMAPHORE_OPTION_NONE, - (void (*)(kern_return_t))0)); + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + deadline, SEMAPHORE_OPTION_NONE, + (void (*)(kern_return_t))0); } /* @@ -834,25 +851,25 @@ kern_return_t semaphore_wait_trap( struct semaphore_wait_trap_args *args) { - return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return)); + return semaphore_wait_trap_internal(args->wait_name, thread_syscall_return); } kern_return_t semaphore_wait_trap_internal( - mach_port_name_t name, + mach_port_name_t name, void (*caller_cont)(kern_return_t)) -{ - semaphore_t semaphore; +{ + semaphore_t semaphore; kern_return_t kr; kr = port_name_to_semaphore(name, &semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - 0ULL, SEMAPHORE_OPTION_NONE, - caller_cont); + SEMAPHORE_NULL, + 0ULL, SEMAPHORE_OPTION_NONE, + caller_cont); semaphore_dereference(semaphore); } return kr; @@ -862,34 +879,36 @@ semaphore_wait_trap_internal( * Routine: semaphore_timedwait * * Traditional (non-continuation) interface presented to - * in-kernel clients to wait on a semaphore with a timeout. + * in-kernel clients to wait on a semaphore with a timeout. * * A timeout of {0,0} is considered non-blocking. */ kern_return_t semaphore_timedwait( - semaphore_t semaphore, - mach_timespec_t wait_time) + semaphore_t semaphore, + mach_timespec_t wait_time) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (semaphore == SEMAPHORE_NULL) + if (semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; - - if(BAD_MACH_TIMESPEC(&wait_time)) + } + + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; + } - if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) + if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); - - return (semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - deadline, option, - (void(*)(kern_return_t))0)); - + } + + return semaphore_wait_internal(semaphore, + SEMAPHORE_NULL, + deadline, option, + (void (*)(kern_return_t))0); } /* @@ -906,9 +925,8 @@ semaphore_timedwait( kern_return_t semaphore_timedwait_trap( struct semaphore_timedwait_trap_args *args) -{ - - return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return)); +{ + return semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return); } @@ -925,23 +943,25 @@ semaphore_timedwait_trap_internal( wait_time.tv_sec = sec; wait_time.tv_nsec = nsec; - if(BAD_MACH_TIMESPEC(&wait_time)) + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; - + } + kr = port_name_to_semaphore(name, &semaphore); if (kr == KERN_SUCCESS) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (sec == 0 && nsec == 0) + if (sec == 0 && nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(sec, nsec); + } kr = semaphore_wait_internal(semaphore, - SEMAPHORE_NULL, - deadline, option, - caller_cont); + SEMAPHORE_NULL, + deadline, option, + caller_cont); semaphore_dereference(semaphore); } return kr; @@ -957,29 +977,30 @@ semaphore_timedwait_trap_internal( */ kern_return_t semaphore_wait_signal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore) + semaphore_t wait_semaphore, + semaphore_t signal_semaphore) { - if (wait_semaphore == SEMAPHORE_NULL) + if (wait_semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; - - return(semaphore_wait_internal(wait_semaphore, - signal_semaphore, - 0ULL, SEMAPHORE_OPTION_NONE, - (void(*)(kern_return_t))0)); + } + + return semaphore_wait_internal(wait_semaphore, + signal_semaphore, + 0ULL, SEMAPHORE_OPTION_NONE, + (void (*)(kern_return_t))0); } /* * Trap: semaphore_wait_signal_trap * * Atomically register a wait on a semaphore and THEN signal - * another. This is the trap version from user space. + * another. This is the trap version from user space. */ kern_return_t semaphore_wait_signal_trap( struct semaphore_wait_signal_trap_args *args) { - return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return)); + return semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return); } kern_return_t @@ -997,9 +1018,9 @@ semaphore_wait_signal_trap_internal( kr = port_name_to_semaphore(wait_name, &wait_semaphore); if (kr == KERN_SUCCESS) { kr = semaphore_wait_internal(wait_semaphore, - signal_semaphore, - 0ULL, SEMAPHORE_OPTION_NONE, - caller_cont); + signal_semaphore, + 0ULL, SEMAPHORE_OPTION_NONE, + caller_cont); semaphore_dereference(wait_semaphore); } semaphore_dereference(signal_semaphore); @@ -1019,41 +1040,44 @@ semaphore_wait_signal_trap_internal( */ kern_return_t semaphore_timedwait_signal( - semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - mach_timespec_t wait_time) + semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t wait_time) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (wait_semaphore == SEMAPHORE_NULL) + if (wait_semaphore == SEMAPHORE_NULL) { return KERN_INVALID_ARGUMENT; - - if(BAD_MACH_TIMESPEC(&wait_time)) + } + + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; + } - if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) + if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec); - - return(semaphore_wait_internal(wait_semaphore, - signal_semaphore, - deadline, option, - (void(*)(kern_return_t))0)); + } + + return semaphore_wait_internal(wait_semaphore, + signal_semaphore, + deadline, option, + (void (*)(kern_return_t))0); } /* * Trap: semaphore_timedwait_signal_trap * * Atomically register a timed wait on a semaphore and THEN signal - * another. This is the trap version from user space. + * another. This is the trap version from user space. */ kern_return_t semaphore_timedwait_signal_trap( struct semaphore_timedwait_signal_trap_args *args) { - return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return)); + return semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return); } kern_return_t @@ -1071,25 +1095,27 @@ semaphore_timedwait_signal_trap_internal( wait_time.tv_sec = sec; wait_time.tv_nsec = nsec; - if(BAD_MACH_TIMESPEC(&wait_time)) + if (BAD_MACH_TIMESPEC(&wait_time)) { return KERN_INVALID_VALUE; - + } + kr = port_name_to_semaphore(signal_name, &signal_semaphore); if (kr == KERN_SUCCESS) { kr = port_name_to_semaphore(wait_name, &wait_semaphore); if (kr == KERN_SUCCESS) { - int option = SEMAPHORE_OPTION_NONE; - uint64_t deadline = 0; + int option = SEMAPHORE_OPTION_NONE; + uint64_t deadline = 0; - if (sec == 0 && nsec == 0) + if (sec == 0 && nsec == 0) { option = SEMAPHORE_TIMEOUT_NOBLOCK; - else + } else { deadline = semaphore_deadline(sec, nsec); + } kr = semaphore_wait_internal(wait_semaphore, - signal_semaphore, - deadline, option, - caller_cont); + signal_semaphore, + deadline, option, + caller_cont); semaphore_dereference(wait_semaphore); } semaphore_dereference(signal_semaphore); @@ -1106,7 +1132,7 @@ semaphore_timedwait_signal_trap_internal( */ void semaphore_reference( - semaphore_t semaphore) + semaphore_t semaphore) { os_ref_retain(&semaphore->ref_count); } @@ -1119,13 +1145,14 @@ semaphore_reference( */ void semaphore_dereference( - semaphore_t semaphore) + semaphore_t semaphore) { uint32_t collisions; spl_t spl_level; - if (semaphore == NULL) + if (semaphore == NULL) { return; + } if (os_ref_release(&semaphore->ref_count) > 0) { return; @@ -1154,7 +1181,7 @@ semaphore_dereference( task_t task = semaphore->owner; assert(task != TASK_NULL); - + if (task_lock_try(task)) { semaphore_destroy_internal(task, semaphore); /* semaphore unlocked */ @@ -1162,7 +1189,7 @@ semaphore_dereference( task_unlock(task); goto out; } - + /* failed to get out-of-order locks */ semaphore_unlock(semaphore); splx(spl_level); @@ -1173,7 +1200,7 @@ semaphore_dereference( semaphore_unlock(semaphore); splx(spl_level); - out: +out: zfree(semaphore_zone, semaphore); } @@ -1186,6 +1213,7 @@ kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_ assert(kdp_is_in_zone(sem, "semaphores")); waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port); - if (sem->owner) + if (sem->owner) { waitinfo->owner = pid_from_task(sem->owner); + } } diff --git a/osfmk/kern/sync_sema.h b/osfmk/kern/sync_sema.h index 144e925d3..bb2f0a174 100644 --- a/osfmk/kern/sync_sema.h +++ b/osfmk/kern/sync_sema.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* * File: kern/sync_sema.h @@ -49,13 +49,13 @@ #include typedef struct semaphore { - queue_chain_t task_link; /* chain of semaphores owned by a task */ - struct waitq waitq; /* queue of blocked threads & lock */ - task_t owner; /* task that owns semaphore */ - ipc_port_t port; /* semaphore port */ - os_refcnt_t ref_count; /* reference count */ - int count; /* current count value */ - boolean_t active; /* active status */ + queue_chain_t task_link; /* chain of semaphores owned by a task */ + struct waitq waitq; /* queue of blocked threads & lock */ + task_t owner; /* task that owns semaphore */ + ipc_port_t port; /* semaphore port */ + os_refcnt_t ref_count; /* reference count */ + int count; /* current count value */ + boolean_t active; /* active status */ } Semaphore; #define semaphore_lock(semaphore) waitq_lock(&(semaphore)->waitq) @@ -63,9 +63,9 @@ typedef struct semaphore { extern void semaphore_init(void); -extern void semaphore_reference(semaphore_t semaphore); -extern void semaphore_dereference(semaphore_t semaphore); -extern void semaphore_destroy_all(task_t task); +extern void semaphore_reference(semaphore_t semaphore); +extern void semaphore_dereference(semaphore_t semaphore); +extern void semaphore_destroy_all(task_t task); #endif /* MACH_KERNEL_PRIVATE */ diff --git a/osfmk/kern/syscall_emulation.c b/osfmk/kern/syscall_emulation.c index c067cb0db..b2de9dfb8 100644 --- a/osfmk/kern/syscall_emulation.c +++ b/osfmk/kern/syscall_emulation.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and ditribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -81,10 +81,10 @@ */ kern_return_t task_set_emulation_vector( - __unused task_t task, - __unused int vector_start, - __unused emulation_vector_t emulation_vector, - __unused mach_msg_type_number_t emulation_vector_count) + __unused task_t task, + __unused int vector_start, + __unused emulation_vector_t emulation_vector, + __unused mach_msg_type_number_t emulation_vector_count) { return KERN_NOT_SUPPORTED; } @@ -97,10 +97,10 @@ task_set_emulation_vector( */ kern_return_t task_get_emulation_vector( - __unused task_t task, - __unused int *vector_start, /* out */ - __unused emulation_vector_t *emulation_vector, /* out */ - __unused mach_msg_type_number_t *emulation_vector_count)/* out */ + __unused task_t task, + __unused int *vector_start, /* out */ + __unused emulation_vector_t *emulation_vector, /* out */ + __unused mach_msg_type_number_t *emulation_vector_count)/* out */ { return KERN_NOT_SUPPORTED; } @@ -111,13 +111,9 @@ task_get_emulation_vector( */ kern_return_t task_set_emulation( - __unused task_t task, - __unused vm_offset_t routine_entry_pt, - __unused int routine_number) + __unused task_t task, + __unused vm_offset_t routine_entry_pt, + __unused int routine_number) { return KERN_NOT_SUPPORTED; } - - - - diff --git a/osfmk/kern/syscall_subr.c b/osfmk/kern/syscall_subr.c index 507d5ec35..0bb52436d 100644 --- a/osfmk/kern/syscall_subr.c +++ b/osfmk/kern/syscall_subr.c @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -84,11 +84,11 @@ static void thread_depress_ms(mach_msg_timeout_t interval); */ kern_return_t pfz_exit( -__unused struct pfz_exit_args *args) + __unused struct pfz_exit_args *args) { /* For now, nothing special to do. We'll pick up the ASTs on kernel exit. */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -102,8 +102,8 @@ __unused struct pfz_exit_args *args) static void swtch_continue(void) { - processor_t myprocessor; - boolean_t result; + processor_t myprocessor; + boolean_t result; disable_preemption(); myprocessor = current_processor(); @@ -120,14 +120,14 @@ boolean_t swtch( __unused struct swtch_args *args) { - processor_t myprocessor; + processor_t myprocessor; disable_preemption(); myprocessor = current_processor(); if (!SCHED(thread_should_yield)(myprocessor, current_thread())) { mp_enable_preemption(); - return (FALSE); + return FALSE; } enable_preemption(); @@ -139,8 +139,8 @@ swtch( static void swtch_pri_continue(void) { - processor_t myprocessor; - boolean_t result; + processor_t myprocessor; + boolean_t result; thread_depress_abort(current_thread()); @@ -157,16 +157,16 @@ swtch_pri_continue(void) boolean_t swtch_pri( -__unused struct swtch_pri_args *args) + __unused struct swtch_pri_args *args) { - processor_t myprocessor; + processor_t myprocessor; disable_preemption(); myprocessor = current_processor(); if (!SCHED(thread_should_yield)(myprocessor, current_thread())) { mp_enable_preemption(); - return (FALSE); + return FALSE; } enable_preemption(); @@ -183,8 +183,9 @@ thread_switch_continue(void *parameter, __unused int ret) thread_t self = current_thread(); int option = (int)(intptr_t)parameter; - if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS) + if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS) { thread_depress_abort(self); + } ml_delay_on_yield(); @@ -201,17 +202,17 @@ kern_return_t thread_switch( struct thread_switch_args *args) { - thread_t thread = THREAD_NULL; - thread_t self = current_thread(); - mach_port_name_t thread_name = args->thread_name; - int option = args->option; - mach_msg_timeout_t option_time = args->option_time; - uint32_t scale_factor = NSEC_PER_MSEC; - boolean_t depress_option = FALSE; - boolean_t wait_option = FALSE; - wait_interrupt_t interruptible = THREAD_ABORTSAFE; - - /* + thread_t thread = THREAD_NULL; + thread_t self = current_thread(); + mach_port_name_t thread_name = args->thread_name; + int option = args->option; + mach_msg_timeout_t option_time = args->option_time; + uint32_t scale_factor = NSEC_PER_MSEC; + boolean_t depress_option = FALSE; + boolean_t wait_option = FALSE; + wait_interrupt_t interruptible = THREAD_ABORTSAFE; + + /* * Validate and process option. */ switch (option) { @@ -237,8 +238,8 @@ thread_switch( interruptible |= THREAD_WAIT_NOREPORT; break; default: - return (KERN_INVALID_ARGUMENT); - } + return KERN_INVALID_ARGUMENT; + } /* * Translate the port name if supplied. @@ -247,7 +248,7 @@ thread_switch( ipc_port_t port; if (ipc_port_translate_send(self->task->itk_space, - thread_name, &port) == KERN_SUCCESS) { + thread_name, &port) == KERN_SUCCESS) { ip_reference(port); ip_unlock(port); @@ -263,7 +264,6 @@ thread_switch( if (option == SWITCH_OPTION_OSLOCK_DEPRESS || option == SWITCH_OPTION_OSLOCK_WAIT) { if (thread != THREAD_NULL) { - if (thread->task != self->task) { /* * OSLock boosting only applies to other threads @@ -297,19 +297,20 @@ thread_switch( /* This may return a different thread if the target is pushing on something */ thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE, - thread_tid(thread), thread->state, - pulled_thread ? TRUE : FALSE, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SWITCH) | DBG_FUNC_NONE, + thread_tid(thread), thread->state, + pulled_thread ? TRUE : FALSE, 0, 0); if (pulled_thread != THREAD_NULL) { /* We can't be dropping the last ref here */ thread_deallocate_safe(thread); - if (wait_option) + if (wait_option) { assert_wait_timeout((event_t)assert_wait_timeout, interruptible, - option_time, scale_factor); - else if (depress_option) + option_time, scale_factor); + } else if (depress_option) { thread_depress_ms(option_time); + } thread_run(self, thread_switch_continue, (void *)(intptr_t)option, pulled_thread); __builtin_unreachable(); @@ -343,8 +344,8 @@ thread_switch( void thread_yield_with_continuation( - thread_continue_t continuation, - void *parameter) + thread_continue_t continuation, + void *parameter) { assert(continuation); thread_block_reason(continuation, parameter, AST_YIELD); @@ -366,7 +367,7 @@ port_name_to_thread_for_ulock(mach_port_name_t thread_name) ipc_port_t port; if (ipc_port_translate_send(self->task->itk_space, - thread_name, &port) == KERN_SUCCESS) { + thread_name, &port) == KERN_SUCCESS) { ip_reference(port); ip_unlock(port); @@ -398,7 +399,7 @@ port_name_to_thread_for_ulock(mach_port_name_t thread_name) */ static wait_result_t thread_handoff_internal(thread_t thread, thread_continue_t continuation, - void *parameter) + void *parameter) { thread_t deallocate_thread = THREAD_NULL; thread_t self = current_thread(); @@ -411,9 +412,9 @@ thread_handoff_internal(thread_t thread, thread_continue_t continuation, thread_t pulled_thread = thread_run_queue_remove_for_handoff(thread); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED_THREAD_SWITCH)|DBG_FUNC_NONE, - thread_tid(thread), thread->state, - pulled_thread ? TRUE : FALSE, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_THREAD_SWITCH) | DBG_FUNC_NONE, + thread_tid(thread), thread->state, + pulled_thread ? TRUE : FALSE, 0, 0); if (pulled_thread != THREAD_NULL) { if (continuation == NULL) { @@ -443,7 +444,7 @@ thread_handoff_internal(thread_t thread, thread_continue_t continuation, void thread_handoff_parameter(thread_t thread, thread_continue_t continuation, - void *parameter) + void *parameter) { thread_handoff_internal(thread, continuation, parameter); panic("NULL continuation passed to %s", __func__); @@ -495,8 +496,9 @@ thread_depress_abstime(uint64_t interval) uint64_t deadline; clock_absolutetime_interval_to_deadline(interval, &deadline); - if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL)) + if (!timer_call_enter(&self->depress_timer, deadline, TIMER_CALL_USER_CRITICAL)) { self->depress_timer_active++; + } } } @@ -518,7 +520,7 @@ thread_depress_ms(mach_msg_timeout_t interval) */ void thread_depress_expire(void *p0, - __unused void *p1) + __unused void *p1) { thread_t thread = (thread_t)p0; @@ -570,8 +572,9 @@ thread_depress_abort(thread_t thread) kern_return_t thread_depress_abort_locked(thread_t thread) { - if ((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0) + if ((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) == 0) { return KERN_NOT_DEPRESSED; + } assert((thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != TH_SFLAG_DEPRESSED_MASK); @@ -579,8 +582,9 @@ thread_depress_abort_locked(thread_t thread) thread_recompute_sched_pri(thread, SETPRI_LAZY); - if (timer_call_cancel(&thread->depress_timer)) + if (timer_call_cancel(&thread->depress_timer)) { thread->depress_timer_active--; + } return KERN_SUCCESS; } @@ -596,14 +600,15 @@ thread_poll_yield(thread_t self) assert(self == current_thread()); assert((self->sched_flags & TH_SFLAG_DEPRESS) == 0); - if (self->sched_mode != TH_MODE_FIXED) + if (self->sched_mode != TH_MODE_FIXED) { return; + } spl_t s = splsched(); uint64_t abstime = mach_absolute_time(); uint64_t total_computation = abstime - - self->computation_epoch + self->computation_metered; + self->computation_epoch + self->computation_metered; if (total_computation >= max_poll_computation) { thread_lock(self); @@ -612,11 +617,12 @@ thread_poll_yield(thread_t self) self->computation_metered = 0; uint64_t yield_expiration = abstime + - (total_computation >> sched_poll_yield_shift); + (total_computation >> sched_poll_yield_shift); if (!timer_call_enter(&self->depress_timer, yield_expiration, - TIMER_CALL_USER_CRITICAL)) + TIMER_CALL_USER_CRITICAL)) { self->depress_timer_active++; + } self->sched_flags |= TH_SFLAG_POLLDEPRESS; thread_recompute_sched_pri(self, SETPRI_DEFAULT); @@ -638,7 +644,7 @@ thread_yield_internal(mach_msg_timeout_t ms) assert((self->sched_flags & TH_SFLAG_DEPRESSED_MASK) != TH_SFLAG_DEPRESSED_MASK); - processor_t myprocessor; + processor_t myprocessor; disable_preemption(); myprocessor = current_processor(); @@ -666,8 +672,8 @@ thread_yield_internal(mach_msg_timeout_t ms) void thread_yield_to_preemption() { - /* - * ast_pending() should ideally be called with interrupts disabled, but + /* + * ast_pending() should ideally be called with interrupts disabled, but * the check here is fine because csw_check() will do the right thing. */ ast_t *pending_ast = ast_pending(); @@ -681,15 +687,14 @@ thread_yield_to_preemption() p = current_processor(); thread_lock(self); - ast = csw_check(p, AST_YIELD); + ast = csw_check(self, p, AST_YIELD); ast_on(ast); thread_unlock(self); if (ast != AST_NONE) { - (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); + (void)thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast); } splx(s); } } - diff --git a/osfmk/kern/syscall_subr.h b/osfmk/kern/syscall_subr.h index 6d0984aec..46b552a1e 100644 --- a/osfmk/kern/syscall_subr.h +++ b/osfmk/kern/syscall_subr.h @@ -68,4 +68,3 @@ extern void thread_depress_expire(void *thread, void *p1); extern void thread_poll_yield(thread_t self); #endif /* _KERN_SYSCALL_SUBR_H_ */ - diff --git a/osfmk/kern/syscall_sw.c b/osfmk/kern/syscall_sw.c index 65e3b2890..b6dee96ef 100644 --- a/osfmk/kern/syscall_sw.c +++ b/osfmk/kern/syscall_sw.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -101,283 +101,284 @@ int kern_invalid_debug = 0; #include #include -const mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { -/* 0 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 1 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 2 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 3 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 4 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 5 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 6 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 7 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 8 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 9 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 10 */ MACH_TRAP(_kernelrpc_mach_vm_allocate_trap, 4, 5, munge_wwlw), -/* 11 */ MACH_TRAP(_kernelrpc_mach_vm_purgable_control_trap, 4, 5, munge_wlww), -/* 12 */ MACH_TRAP(_kernelrpc_mach_vm_deallocate_trap, 3, 5, munge_wll), -/* 13 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 14 */ MACH_TRAP(_kernelrpc_mach_vm_protect_trap, 5, 7, munge_wllww), -/* 15 */ MACH_TRAP(_kernelrpc_mach_vm_map_trap, 6, 8, munge_wwllww), -/* 16 */ MACH_TRAP(_kernelrpc_mach_port_allocate_trap, 3, 3, munge_www), -/* 17 */ MACH_TRAP(_kernelrpc_mach_port_destroy_trap, 2, 2, munge_ww), -/* 18 */ MACH_TRAP(_kernelrpc_mach_port_deallocate_trap, 2, 2, munge_ww), -/* 19 */ MACH_TRAP(_kernelrpc_mach_port_mod_refs_trap, 4, 4, munge_wwww), -/* 20 */ MACH_TRAP(_kernelrpc_mach_port_move_member_trap, 3, 3, munge_www), -/* 21 */ MACH_TRAP(_kernelrpc_mach_port_insert_right_trap, 4, 4, munge_wwww), -/* 22 */ MACH_TRAP(_kernelrpc_mach_port_insert_member_trap, 3, 3, munge_www), -/* 23 */ MACH_TRAP(_kernelrpc_mach_port_extract_member_trap, 3, 3, munge_www), -/* 24 */ MACH_TRAP(_kernelrpc_mach_port_construct_trap, 4, 5, munge_wwlw), -/* 25 */ MACH_TRAP(_kernelrpc_mach_port_destruct_trap, 4, 5, munge_wwwl), -/* 26 */ MACH_TRAP(mach_reply_port, 0, 0, NULL), -/* 27 */ MACH_TRAP(thread_self_trap, 0, 0, NULL), -/* 28 */ MACH_TRAP(task_self_trap, 0, 0, NULL), -/* 29 */ MACH_TRAP(host_self_trap, 0, 0, NULL), -/* 30 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 31 */ MACH_TRAP(mach_msg_trap, 7, 7, munge_wwwwwww), -/* 32 */ MACH_TRAP(mach_msg_overwrite_trap, 8, 8, munge_wwwwwwww), -/* 33 */ MACH_TRAP(semaphore_signal_trap, 1, 1, munge_w), -/* 34 */ MACH_TRAP(semaphore_signal_all_trap, 1, 1, munge_w), -/* 35 */ MACH_TRAP(semaphore_signal_thread_trap, 2, 2, munge_ww), -/* 36 */ MACH_TRAP(semaphore_wait_trap, 1, 1, munge_w), -/* 37 */ MACH_TRAP(semaphore_wait_signal_trap, 2, 2, munge_ww), -/* 38 */ MACH_TRAP(semaphore_timedwait_trap, 3, 3, munge_www), -/* 39 */ MACH_TRAP(semaphore_timedwait_signal_trap, 4, 4, munge_wwww), -/* 40 */ MACH_TRAP(_kernelrpc_mach_port_get_attributes_trap, 5, 5, munge_wwwww), -/* 41 */ MACH_TRAP(_kernelrpc_mach_port_guard_trap, 4, 5, munge_wwlw), -/* 42 */ MACH_TRAP(_kernelrpc_mach_port_unguard_trap, 3, 4, munge_wwl), -/* 43 */ MACH_TRAP(mach_generate_activity_id, 3, 3, munge_www), -/* 44 */ MACH_TRAP(task_name_for_pid, 3, 3, munge_www), -/* 45 */ MACH_TRAP(task_for_pid, 3, 3, munge_www), -/* 46 */ MACH_TRAP(pid_for_task, 2, 2, munge_ww), -/* 47 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 48 */ MACH_TRAP(macx_swapon, 4, 5, munge_lwww), -/* 49 */ MACH_TRAP(macx_swapoff, 2, 3, munge_lw), -/* 50 */ MACH_TRAP(thread_get_special_reply_port, 0, 0, NULL), -/* 51 */ MACH_TRAP(macx_triggers, 4, 4, munge_wwww), -/* 52 */ MACH_TRAP(macx_backing_store_suspend, 1, 1, munge_w), -/* 53 */ MACH_TRAP(macx_backing_store_recovery, 1, 1, munge_w), -/* 54 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 55 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 56 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 57 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 58 */ MACH_TRAP(pfz_exit, 0, 0, NULL), -/* 59 */ MACH_TRAP(swtch_pri, 0, 0, NULL), -/* 60 */ MACH_TRAP(swtch, 0, 0, NULL), -/* 61 */ MACH_TRAP(thread_switch, 3, 3, munge_www), -/* 62 */ MACH_TRAP(clock_sleep_trap, 5, 5, munge_wwwww), -/* 63 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +const mach_trap_t mach_trap_table[MACH_TRAP_TABLE_COUNT] = { +/* 0 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 1 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 2 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 3 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 4 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 5 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 6 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 7 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 8 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 9 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 10 */ MACH_TRAP(_kernelrpc_mach_vm_allocate_trap, 4, 5, munge_wwlw), +/* 11 */ MACH_TRAP(_kernelrpc_mach_vm_purgable_control_trap, 4, 5, munge_wlww), +/* 12 */ MACH_TRAP(_kernelrpc_mach_vm_deallocate_trap, 3, 5, munge_wll), +/* 13 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 14 */ MACH_TRAP(_kernelrpc_mach_vm_protect_trap, 5, 7, munge_wllww), +/* 15 */ MACH_TRAP(_kernelrpc_mach_vm_map_trap, 6, 8, munge_wwllww), +/* 16 */ MACH_TRAP(_kernelrpc_mach_port_allocate_trap, 3, 3, munge_www), +/* 17 */ MACH_TRAP(_kernelrpc_mach_port_destroy_trap, 2, 2, munge_ww), +/* 18 */ MACH_TRAP(_kernelrpc_mach_port_deallocate_trap, 2, 2, munge_ww), +/* 19 */ MACH_TRAP(_kernelrpc_mach_port_mod_refs_trap, 4, 4, munge_wwww), +/* 20 */ MACH_TRAP(_kernelrpc_mach_port_move_member_trap, 3, 3, munge_www), +/* 21 */ MACH_TRAP(_kernelrpc_mach_port_insert_right_trap, 4, 4, munge_wwww), +/* 22 */ MACH_TRAP(_kernelrpc_mach_port_insert_member_trap, 3, 3, munge_www), +/* 23 */ MACH_TRAP(_kernelrpc_mach_port_extract_member_trap, 3, 3, munge_www), +/* 24 */ MACH_TRAP(_kernelrpc_mach_port_construct_trap, 4, 5, munge_wwlw), +/* 25 */ MACH_TRAP(_kernelrpc_mach_port_destruct_trap, 4, 5, munge_wwwl), +/* 26 */ MACH_TRAP(mach_reply_port, 0, 0, NULL), +/* 27 */ MACH_TRAP(thread_self_trap, 0, 0, NULL), +/* 28 */ MACH_TRAP(task_self_trap, 0, 0, NULL), +/* 29 */ MACH_TRAP(host_self_trap, 0, 0, NULL), +/* 30 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 31 */ MACH_TRAP(mach_msg_trap, 7, 7, munge_wwwwwww), +/* 32 */ MACH_TRAP(mach_msg_overwrite_trap, 8, 8, munge_wwwwwwww), +/* 33 */ MACH_TRAP(semaphore_signal_trap, 1, 1, munge_w), +/* 34 */ MACH_TRAP(semaphore_signal_all_trap, 1, 1, munge_w), +/* 35 */ MACH_TRAP(semaphore_signal_thread_trap, 2, 2, munge_ww), +/* 36 */ MACH_TRAP(semaphore_wait_trap, 1, 1, munge_w), +/* 37 */ MACH_TRAP(semaphore_wait_signal_trap, 2, 2, munge_ww), +/* 38 */ MACH_TRAP(semaphore_timedwait_trap, 3, 3, munge_www), +/* 39 */ MACH_TRAP(semaphore_timedwait_signal_trap, 4, 4, munge_wwww), +/* 40 */ MACH_TRAP(_kernelrpc_mach_port_get_attributes_trap, 5, 5, munge_wwwww), +/* 41 */ MACH_TRAP(_kernelrpc_mach_port_guard_trap, 4, 5, munge_wwlw), +/* 42 */ MACH_TRAP(_kernelrpc_mach_port_unguard_trap, 3, 4, munge_wwl), +/* 43 */ MACH_TRAP(mach_generate_activity_id, 3, 3, munge_www), +/* 44 */ MACH_TRAP(task_name_for_pid, 3, 3, munge_www), +/* 45 */ MACH_TRAP(task_for_pid, 3, 3, munge_www), +/* 46 */ MACH_TRAP(pid_for_task, 2, 2, munge_ww), +/* 47 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 48 */ MACH_TRAP(macx_swapon, 4, 5, munge_lwww), +/* 49 */ MACH_TRAP(macx_swapoff, 2, 3, munge_lw), +/* 50 */ MACH_TRAP(thread_get_special_reply_port, 0, 0, NULL), +/* 51 */ MACH_TRAP(macx_triggers, 4, 4, munge_wwww), +/* 52 */ MACH_TRAP(macx_backing_store_suspend, 1, 1, munge_w), +/* 53 */ MACH_TRAP(macx_backing_store_recovery, 1, 1, munge_w), +/* 54 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 55 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 56 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 57 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 58 */ MACH_TRAP(pfz_exit, 0, 0, NULL), +/* 59 */ MACH_TRAP(swtch_pri, 0, 0, NULL), +/* 60 */ MACH_TRAP(swtch, 0, 0, NULL), +/* 61 */ MACH_TRAP(thread_switch, 3, 3, munge_www), +/* 62 */ MACH_TRAP(clock_sleep_trap, 5, 5, munge_wwwww), +/* 63 */ MACH_TRAP(kern_invalid, 0, 0, NULL), /* traps 64 - 95 reserved (debo) */ -/* 64 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 65 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 66 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 67 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 68 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 69 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 70 */ MACH_TRAP(host_create_mach_voucher_trap, 4, 4, munge_wwww), -/* 71 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 72 */ MACH_TRAP(mach_voucher_extract_attr_recipe_trap, 4, 4, munge_wwww), -/* 73 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 74 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 75 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 76 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 77 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 78 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 79 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 80 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 81 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 82 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 83 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 84 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 85 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 86 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 87 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 88 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 89 */ MACH_TRAP(mach_timebase_info_trap, 1, 1, munge_w), -/* 90 */ MACH_TRAP(mach_wait_until_trap, 1, 2, munge_l), -/* 91 */ MACH_TRAP(mk_timer_create_trap, 0, 0, NULL), -/* 92 */ MACH_TRAP(mk_timer_destroy_trap, 1, 1, munge_w), -/* 93 */ MACH_TRAP(mk_timer_arm_trap, 2, 3, munge_wl), -/* 94 */ MACH_TRAP(mk_timer_cancel_trap, 2, 2, munge_ww), -/* 95 */ MACH_TRAP(mk_timer_arm_leeway_trap, 4, 6, munge_wlll), +/* 64 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 65 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 66 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 67 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 68 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 69 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 70 */ MACH_TRAP(host_create_mach_voucher_trap, 4, 4, munge_wwww), +/* 71 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 72 */ MACH_TRAP(mach_voucher_extract_attr_recipe_trap, 4, 4, munge_wwww), +/* 73 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 74 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 75 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 76 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 77 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 78 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 79 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 80 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 81 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 82 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 83 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 84 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 85 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 86 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 87 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 88 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 89 */ MACH_TRAP(mach_timebase_info_trap, 1, 1, munge_w), +/* 90 */ MACH_TRAP(mach_wait_until_trap, 1, 2, munge_l), +/* 91 */ MACH_TRAP(mk_timer_create_trap, 0, 0, NULL), +/* 92 */ MACH_TRAP(mk_timer_destroy_trap, 1, 1, munge_w), +/* 93 */ MACH_TRAP(mk_timer_arm_trap, 2, 3, munge_wl), +/* 94 */ MACH_TRAP(mk_timer_cancel_trap, 2, 2, munge_ww), +/* 95 */ MACH_TRAP(mk_timer_arm_leeway_trap, 4, 6, munge_wlll), /* traps 64 - 95 reserved (debo) */ -/* 96 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 97 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 98 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 99 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* traps 100-107 reserved for iokit (esb) */ -/* 100 */ MACH_TRAP(iokit_user_client_trap, 8, 8, munge_wwwwwwww), -/* 101 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 102 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 103 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 104 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 105 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 106 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 107 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* traps 108-127 unused */ -/* 108 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 109 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 110 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 111 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 112 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 113 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 114 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 115 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 116 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 117 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 118 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 119 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 120 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 121 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 122 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 123 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 124 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 125 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 126 */ MACH_TRAP(kern_invalid, 0, 0, NULL), -/* 127 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 96 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 97 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 98 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 99 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* traps 100-107 reserved for iokit (esb) */ +/* 100 */ MACH_TRAP(iokit_user_client_trap, 8, 8, munge_wwwwwwww), +/* 101 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 102 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 103 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 104 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 105 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 106 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 107 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* traps 108-127 unused */ +/* 108 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 109 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 110 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 111 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 112 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 113 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 114 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 115 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 116 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 117 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 118 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 119 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 120 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 121 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 122 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 123 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 124 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 125 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 126 */ MACH_TRAP(kern_invalid, 0, 0, NULL), +/* 127 */ MACH_TRAP(kern_invalid, 0, 0, NULL), }; const char * mach_syscall_name_table[MACH_TRAP_TABLE_COUNT] = { -/* 0 */ "kern_invalid", -/* 1 */ "kern_invalid", -/* 2 */ "kern_invalid", -/* 3 */ "kern_invalid", -/* 4 */ "kern_invalid", -/* 5 */ "kern_invalid", -/* 6 */ "kern_invalid", -/* 7 */ "kern_invalid", -/* 8 */ "kern_invalid", -/* 9 */ "kern_invalid", -/* 10 */ "_kernelrpc_mach_vm_allocate_trap", -/* 11 */ "kern_invalid", -/* 12 */ "_kernelrpc_mach_vm_deallocate_trap", -/* 13 */ "kern_invalid", -/* 14 */ "_kernelrpc_mach_vm_protect_trap", -/* 15 */ "_kernelrpc_mach_vm_map_trap", -/* 16 */ "_kernelrpc_mach_port_allocate_trap", -/* 17 */ "_kernelrpc_mach_port_destroy_trap", -/* 18 */ "_kernelrpc_mach_port_deallocate_trap", -/* 19 */ "_kernelrpc_mach_port_mod_refs_trap", -/* 20 */ "_kernelrpc_mach_port_move_member_trap", -/* 21 */ "_kernelrpc_mach_port_insert_right_trap", -/* 22 */ "_kernelrpc_mach_port_insert_member_trap", -/* 23 */ "_kernelrpc_mach_port_extract_member_trap", -/* 24 */ "_kernelrpc_mach_port_construct_trap", -/* 25 */ "_kernelrpc_mach_port_destruct_trap", -/* 26 */ "mach_reply_port", -/* 27 */ "thread_self_trap", -/* 28 */ "task_self_trap", -/* 29 */ "host_self_trap", -/* 30 */ "kern_invalid", -/* 31 */ "mach_msg_trap", -/* 32 */ "mach_msg_overwrite_trap", -/* 33 */ "semaphore_signal_trap", -/* 34 */ "semaphore_signal_all_trap", -/* 35 */ "semaphore_signal_thread_trap", -/* 36 */ "semaphore_wait_trap", -/* 37 */ "semaphore_wait_signal_trap", -/* 38 */ "semaphore_timedwait_trap", -/* 39 */ "semaphore_timedwait_signal_trap", -/* 40 */ "kern_invalid", -/* 41 */ "_kernelrpc_mach_port_guard_trap", -/* 42 */ "_kernelrpc_mach_port_unguard_trap", -/* 43 */ "mach_generate_activity_id", -/* 44 */ "task_name_for_pid", -/* 45 */ "task_for_pid", -/* 46 */ "pid_for_task", -/* 47 */ "kern_invalid", -/* 48 */ "macx_swapon", -/* 49 */ "macx_swapoff", -/* 50 */ "thread_get_special_reply_port", -/* 51 */ "macx_triggers", -/* 52 */ "macx_backing_store_suspend", -/* 53 */ "macx_backing_store_recovery", -/* 54 */ "kern_invalid", -/* 55 */ "kern_invalid", -/* 56 */ "kern_invalid", -/* 57 */ "kern_invalid", -/* 58 */ "pfz_exit", -/* 59 */ "swtch_pri", -/* 60 */ "swtch", -/* 61 */ "thread_switch", -/* 62 */ "clock_sleep_trap", -/* 63 */ "kern_invalid", +/* 0 */ "kern_invalid", +/* 1 */ "kern_invalid", +/* 2 */ "kern_invalid", +/* 3 */ "kern_invalid", +/* 4 */ "kern_invalid", +/* 5 */ "kern_invalid", +/* 6 */ "kern_invalid", +/* 7 */ "kern_invalid", +/* 8 */ "kern_invalid", +/* 9 */ "kern_invalid", +/* 10 */ "_kernelrpc_mach_vm_allocate_trap", +/* 11 */ "kern_invalid", +/* 12 */ "_kernelrpc_mach_vm_deallocate_trap", +/* 13 */ "kern_invalid", +/* 14 */ "_kernelrpc_mach_vm_protect_trap", +/* 15 */ "_kernelrpc_mach_vm_map_trap", +/* 16 */ "_kernelrpc_mach_port_allocate_trap", +/* 17 */ "_kernelrpc_mach_port_destroy_trap", +/* 18 */ "_kernelrpc_mach_port_deallocate_trap", +/* 19 */ "_kernelrpc_mach_port_mod_refs_trap", +/* 20 */ "_kernelrpc_mach_port_move_member_trap", +/* 21 */ "_kernelrpc_mach_port_insert_right_trap", +/* 22 */ "_kernelrpc_mach_port_insert_member_trap", +/* 23 */ "_kernelrpc_mach_port_extract_member_trap", +/* 24 */ "_kernelrpc_mach_port_construct_trap", +/* 25 */ "_kernelrpc_mach_port_destruct_trap", +/* 26 */ "mach_reply_port", +/* 27 */ "thread_self_trap", +/* 28 */ "task_self_trap", +/* 29 */ "host_self_trap", +/* 30 */ "kern_invalid", +/* 31 */ "mach_msg_trap", +/* 32 */ "mach_msg_overwrite_trap", +/* 33 */ "semaphore_signal_trap", +/* 34 */ "semaphore_signal_all_trap", +/* 35 */ "semaphore_signal_thread_trap", +/* 36 */ "semaphore_wait_trap", +/* 37 */ "semaphore_wait_signal_trap", +/* 38 */ "semaphore_timedwait_trap", +/* 39 */ "semaphore_timedwait_signal_trap", +/* 40 */ "kern_invalid", +/* 41 */ "_kernelrpc_mach_port_guard_trap", +/* 42 */ "_kernelrpc_mach_port_unguard_trap", +/* 43 */ "mach_generate_activity_id", +/* 44 */ "task_name_for_pid", +/* 45 */ "task_for_pid", +/* 46 */ "pid_for_task", +/* 47 */ "kern_invalid", +/* 48 */ "macx_swapon", +/* 49 */ "macx_swapoff", +/* 50 */ "thread_get_special_reply_port", +/* 51 */ "macx_triggers", +/* 52 */ "macx_backing_store_suspend", +/* 53 */ "macx_backing_store_recovery", +/* 54 */ "kern_invalid", +/* 55 */ "kern_invalid", +/* 56 */ "kern_invalid", +/* 57 */ "kern_invalid", +/* 58 */ "pfz_exit", +/* 59 */ "swtch_pri", +/* 60 */ "swtch", +/* 61 */ "thread_switch", +/* 62 */ "clock_sleep_trap", +/* 63 */ "kern_invalid", /* traps 64 - 95 reserved (debo) */ -/* 64 */ "kern_invalid", -/* 65 */ "kern_invalid", -/* 66 */ "kern_invalid", -/* 67 */ "kern_invalid", -/* 68 */ "kern_invalid", -/* 69 */ "kern_invalid", -/* 70 */ "host_create_mach_voucher_trap", -/* 71 */ "kern_invalid", -/* 72 */ "mach_voucher_extract_attr_recipe_trap", -/* 73 */ "kern_invalid", -/* 74 */ "kern_invalid", -/* 75 */ "kern_invalid", -/* 76 */ "kern_invalid", -/* 77 */ "kern_invalid", -/* 78 */ "kern_invalid", -/* 79 */ "kern_invalid", -/* 80 */ "kern_invalid", -/* 81 */ "kern_invalid", -/* 82 */ "kern_invalid", -/* 83 */ "kern_invalid", -/* 84 */ "kern_invalid", -/* 85 */ "kern_invalid", -/* 86 */ "kern_invalid", -/* 87 */ "kern_invalid", -/* 88 */ "kern_invalid", -/* 89 */ "mach_timebase_info_trap", -/* 90 */ "mach_wait_until_trap", -/* 91 */ "mk_timer_create_trap", -/* 92 */ "mk_timer_destroy_trap", -/* 93 */ "mk_timer_arm_trap", -/* 94 */ "mk_timer_cancel_trap", -/* 95 */ "kern_invalid", +/* 64 */ "kern_invalid", +/* 65 */ "kern_invalid", +/* 66 */ "kern_invalid", +/* 67 */ "kern_invalid", +/* 68 */ "kern_invalid", +/* 69 */ "kern_invalid", +/* 70 */ "host_create_mach_voucher_trap", +/* 71 */ "kern_invalid", +/* 72 */ "mach_voucher_extract_attr_recipe_trap", +/* 73 */ "kern_invalid", +/* 74 */ "kern_invalid", +/* 75 */ "kern_invalid", +/* 76 */ "kern_invalid", +/* 77 */ "kern_invalid", +/* 78 */ "kern_invalid", +/* 79 */ "kern_invalid", +/* 80 */ "kern_invalid", +/* 81 */ "kern_invalid", +/* 82 */ "kern_invalid", +/* 83 */ "kern_invalid", +/* 84 */ "kern_invalid", +/* 85 */ "kern_invalid", +/* 86 */ "kern_invalid", +/* 87 */ "kern_invalid", +/* 88 */ "kern_invalid", +/* 89 */ "mach_timebase_info_trap", +/* 90 */ "mach_wait_until_trap", +/* 91 */ "mk_timer_create_trap", +/* 92 */ "mk_timer_destroy_trap", +/* 93 */ "mk_timer_arm_trap", +/* 94 */ "mk_timer_cancel_trap", +/* 95 */ "kern_invalid", /* traps 64 - 95 reserved (debo) */ -/* 96 */ "kern_invalid", -/* 97 */ "kern_invalid", -/* 98 */ "kern_invalid", -/* 99 */ "kern_invalid", -/* traps 100-107 reserved for iokit (esb) */ -/* 100 */ "iokit_user_client_trap", -/* 101 */ "kern_invalid", -/* 102 */ "kern_invalid", -/* 103 */ "kern_invalid", -/* 104 */ "kern_invalid", -/* 105 */ "kern_invalid", -/* 106 */ "kern_invalid", -/* 107 */ "kern_invalid", -/* traps 108-127 unused */ -/* 108 */ "kern_invalid", -/* 109 */ "kern_invalid", -/* 110 */ "kern_invalid", -/* 111 */ "kern_invalid", -/* 112 */ "kern_invalid", -/* 113 */ "kern_invalid", -/* 114 */ "kern_invalid", -/* 115 */ "kern_invalid", -/* 116 */ "kern_invalid", -/* 117 */ "kern_invalid", -/* 118 */ "kern_invalid", -/* 119 */ "kern_invalid", -/* 120 */ "kern_invalid", -/* 121 */ "kern_invalid", -/* 122 */ "kern_invalid", -/* 123 */ "kern_invalid", -/* 124 */ "kern_invalid", -/* 125 */ "kern_invalid", -/* 126 */ "kern_invalid", -/* 127 */ "kern_invalid", +/* 96 */ "kern_invalid", +/* 97 */ "kern_invalid", +/* 98 */ "kern_invalid", +/* 99 */ "kern_invalid", +/* traps 100-107 reserved for iokit (esb) */ +/* 100 */ "iokit_user_client_trap", +/* 101 */ "kern_invalid", +/* 102 */ "kern_invalid", +/* 103 */ "kern_invalid", +/* 104 */ "kern_invalid", +/* 105 */ "kern_invalid", +/* 106 */ "kern_invalid", +/* 107 */ "kern_invalid", +/* traps 108-127 unused */ +/* 108 */ "kern_invalid", +/* 109 */ "kern_invalid", +/* 110 */ "kern_invalid", +/* 111 */ "kern_invalid", +/* 112 */ "kern_invalid", +/* 113 */ "kern_invalid", +/* 114 */ "kern_invalid", +/* 115 */ "kern_invalid", +/* 116 */ "kern_invalid", +/* 117 */ "kern_invalid", +/* 118 */ "kern_invalid", +/* 119 */ "kern_invalid", +/* 120 */ "kern_invalid", +/* 121 */ "kern_invalid", +/* 122 */ "kern_invalid", +/* 123 */ "kern_invalid", +/* 124 */ "kern_invalid", +/* 125 */ "kern_invalid", +/* 126 */ "kern_invalid", +/* 127 */ "kern_invalid", }; -int mach_trap_count = (sizeof(mach_trap_table) / sizeof(mach_trap_table[0])); +int mach_trap_count = (sizeof(mach_trap_table) / sizeof(mach_trap_table[0])); kern_return_t kern_invalid( __unused struct kern_invalid_args *args) { - if (kern_invalid_debug) Debugger("kern_invalid mach trap"); - return(KERN_INVALID_ARGUMENT); + if (kern_invalid_debug) { + Debugger("kern_invalid mach trap"); + } + return KERN_INVALID_ARGUMENT; } - diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index 792daf7bc..708ef1787 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -114,7 +114,7 @@ #include #include #include -#include /* for thread_wakeup */ +#include /* for thread_wakeup */ #include #include #include @@ -140,7 +140,7 @@ #include #include -#include /* for kernel_map, ipc_kernel_map */ +#include /* for kernel_map, ipc_kernel_map */ #include #include #include @@ -167,7 +167,7 @@ #include #endif -#include /* picks up ledger.h */ +#include /* picks up ledger.h */ #if CONFIG_MACF #include @@ -177,8 +177,8 @@ extern int kpc_force_all_ctrs(task_t, int); #endif -task_t kernel_task; -zone_t task_zone; +task_t kernel_task; +zone_t task_zone; lck_attr_t task_lck_attr; lck_grp_t task_lck_grp; lck_grp_attr_t task_lck_grp_attr; @@ -194,8 +194,8 @@ zinfo_usage_store_t tasks_tkm_private; zinfo_usage_store_t tasks_tkm_shared; /* A container to accumulate statistics for expired tasks */ -expired_task_statistics_t dead_task_statistics; -lck_spin_t dead_task_statistics_lock; +expired_task_statistics_t dead_task_statistics; +lck_spin_t dead_task_statistics_lock; ledger_template_t task_ledger_template = NULL; @@ -224,14 +224,17 @@ SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__( .interrupt_wakeups = -1, #if !CONFIG_EMBEDDED .sfi_wait_times = { 0 /* initialized at runtime */}, -#endif /* !CONFIG_EMBEDDED */ +#endif /* !CONFIG_EMBEDDED */ .cpu_time_billed_to_me = -1, .cpu_time_billed_to_others = -1, .physical_writes = -1, .logical_writes = -1, .energy_billed_to_me = -1, - .energy_billed_to_others = -1 -}; + .energy_billed_to_others = -1, + .pages_grabbed = -1, + .pages_grabbed_kern = -1, + .pages_grabbed_iopl = -1, + .pages_grabbed_upl = -1}; /* System sleep state */ boolean_t tasks_suspend_state; @@ -256,10 +259,10 @@ extern void bsd_copythreadname(void *dst_uth, void *src_uth); extern kern_return_t thread_resume(thread_t thread); // Warn tasks when they hit 80% of their memory limit. -#define PHYS_FOOTPRINT_WARNING_LEVEL 80 +#define PHYS_FOOTPRINT_WARNING_LEVEL 80 -#define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */ -#define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */ +#define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */ +#define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */ /* * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry. @@ -267,7 +270,7 @@ extern kern_return_t thread_resume(thread_t thread); * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user * stacktraces, aka micro-stackshots) */ -#define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70 +#define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */ int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */ @@ -281,15 +284,15 @@ int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage int max_task_footprint_mb = 0; /* Per-task limit on physical memory consumption in megabytes */ /* I/O Monitor Limits */ -#define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */ -#define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */ +#define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */ +#define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */ -uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */ -uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */ +uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */ +uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */ -#define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll) -int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */ -int64_t global_logical_writes_count = 0; /* Global count for logical writes */ +#define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll) +int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */ +int64_t global_logical_writes_count = 0; /* Global count for logical writes */ static boolean_t global_update_logical_writes(int64_t); #define TASK_MAX_THREAD_LIMIT 256 @@ -306,20 +309,20 @@ int hwm_user_cores = 0; /* high watermark violations generate user core files */ #endif #ifdef MACH_BSD -extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long); -extern int proc_pid(struct proc *p); -extern int proc_selfpid(void); +extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long); +extern int proc_pid(struct proc *p); +extern int proc_selfpid(void); extern struct proc *current_proc(void); -extern char *proc_name_address(struct proc *p); +extern char *proc_name_address(struct proc *p); extern uint64_t get_dispatchqueue_offset_from_proc(void *); extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize); extern void workq_proc_suspended(struct proc *p); extern void workq_proc_resumed(struct proc *p); #if CONFIG_MEMORYSTATUS -extern void proc_memstat_terminated(struct proc* p, boolean_t set); -extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); -extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); +extern void proc_memstat_terminated(struct proc* p, boolean_t set); +extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); +extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal); extern boolean_t memorystatus_allowed_vm_map_fork(task_t task); #if DEVELOPMENT || DEBUG @@ -336,7 +339,7 @@ int exc_resource_threads_enabled; #if (DEVELOPMENT || DEBUG) && TASK_EXC_GUARD_DELIVER_CORPSE uint32_t task_exc_guard_default = TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_CORPSE | - TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_CORPSE; + TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_CORPSE; #else uint32_t task_exc_guard_default = 0; #endif @@ -352,9 +355,9 @@ static void task_synchronizer_destroy_all(task_t task); void task_set_64bit( - task_t task, - boolean_t is_64bit, - boolean_t is_64bit_data) + task_t task, + boolean_t is_64bit, + boolean_t is_64bit_data) { #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__) thread_t thread; @@ -379,13 +382,15 @@ task_set_64bit( * Switching to/from 64-bit register state. */ if (is_64bit_data) { - if (task_has_64Bit_data(task)) + if (task_has_64Bit_data(task)) { goto out; + } task_set_64Bit_data(task); } else { - if ( !task_has_64Bit_data(task)) + if (!task_has_64Bit_data(task)) { goto out; + } task_clear_64Bit_data(task); } @@ -442,8 +447,8 @@ task_get_64bit_data(task_t task) void task_set_platform_binary( - task_t task, - boolean_t is_platform) + task_t task, + boolean_t is_platform) { task_lock(task); if (is_platform) { @@ -460,17 +465,18 @@ task_set_platform_binary( */ bool task_set_ca_client_wi( - task_t task, - boolean_t set_or_clear) + task_t task, + boolean_t set_or_clear) { bool ret = true; task_lock(task); if (set_or_clear) { /* Tasks can have only one CA_CLIENT work interval */ - if (task->t_flags & TF_CA_CLIENT_WI) + if (task->t_flags & TF_CA_CLIENT_WI) { ret = false; - else + } else { task->t_flags |= TF_CA_CLIENT_WI; + } } else { task->t_flags &= ~TF_CA_CLIENT_WI; } @@ -480,33 +486,32 @@ task_set_ca_client_wi( void task_set_dyld_info( - task_t task, - mach_vm_address_t addr, - mach_vm_size_t size) + task_t task, + mach_vm_address_t addr, + mach_vm_size_t size) { task_lock(task); task->all_image_info_addr = addr; - task->all_image_info_size = size; - task_unlock(task); + task->all_image_info_size = size; + task_unlock(task); } void -task_atm_reset(__unused task_t task) { - +task_atm_reset(__unused task_t task) +{ #if CONFIG_ATM if (task->atm_context != NULL) { - atm_task_descriptor_destroy(task->atm_context); - task->atm_context = NULL; + atm_task_descriptor_destroy(task->atm_context); + task->atm_context = NULL; } #endif - } void -task_bank_reset(__unused task_t task) { - +task_bank_reset(__unused task_t task) +{ if (task->bank_context != NULL) { - bank_task_destroy(task); + bank_task_destroy(task); } } @@ -516,8 +521,8 @@ task_bank_reset(__unused task_t task) { * proc associated with the task. */ void -task_bank_init(__unused task_t task) { - +task_bank_init(__unused task_t task) +{ if (task->bank_context != NULL) { panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context); } @@ -643,11 +648,11 @@ task_is_halting(task_t task) #include static btlog_t *task_ref_btlog; -#define TASK_REF_OP_INCR 0x1 -#define TASK_REF_OP_DECR 0x2 +#define TASK_REF_OP_INCR 0x1 +#define TASK_REF_OP_DECR 0x2 -#define TASK_REF_NUM_RECORDS 100000 -#define TASK_REF_BTDEPTH 7 +#define TASK_REF_NUM_RECORDS 100000 +#define TASK_REF_BTDEPTH 7 void task_reference_internal(task_t task) @@ -659,7 +664,7 @@ task_reference_internal(task_t task) numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR, - bt, numsaved); + bt, numsaved); } os_ref_count_t @@ -670,7 +675,7 @@ task_deallocate_internal(task_t task) numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH); btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR, - bt, numsaved); + bt, numsaved); return os_ref_release(&task->ref_count); } @@ -680,7 +685,6 @@ task_deallocate_internal(task_t task) void task_init(void) { - lck_grp_attr_setdefault(&task_lck_grp_attr); lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr); lck_attr_setdefault(&task_lck_attr); @@ -688,10 +692,10 @@ task_init(void) lck_mtx_init(&tasks_corpse_lock, &task_lck_grp, &task_lck_attr); task_zone = zinit( - sizeof(struct task), - task_max * sizeof(struct task), - TASK_CHUNK * sizeof(struct task), - "tasks"); + sizeof(struct task), + task_max * sizeof(struct task), + TASK_CHUNK * sizeof(struct task), + "tasks"); zone_change(task_zone, Z_NOENCRYPT, TRUE); @@ -706,12 +710,12 @@ task_init(void) * Setting the boot-arg to 0 disables task limits. */ if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb, - sizeof (max_task_footprint_mb))) { + sizeof(max_task_footprint_mb))) { /* * No limit was found in boot-args, so go look in the device tree. */ if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb, - sizeof(max_task_footprint_mb))) { + sizeof(max_task_footprint_mb))) { /* * No limit was found in device tree. */ @@ -722,12 +726,12 @@ task_init(void) if (max_task_footprint_mb != 0) { #if CONFIG_MEMORYSTATUS if (max_task_footprint_mb < 50) { - printf("Warning: max_task_pmem %d below minimum.\n", - max_task_footprint_mb); - max_task_footprint_mb = 50; + printf("Warning: max_task_pmem %d below minimum.\n", + max_task_footprint_mb); + max_task_footprint_mb = 50; } printf("Limiting task physical memory footprint to %d MB\n", - max_task_footprint_mb); + max_task_footprint_mb); max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes @@ -771,8 +775,8 @@ task_init(void) #if DEVELOPMENT || DEBUG if (!PE_parse_boot_argn("exc_resource_threads", - &exc_resource_threads_enabled, - sizeof(exc_resource_threads_enabled))) { + &exc_resource_threads_enabled, + sizeof(exc_resource_threads_enabled))) { exc_resource_threads_enabled = 1; } PE_parse_boot_argn("task_exc_guard_default", @@ -782,40 +786,40 @@ task_init(void) #if CONFIG_COREDUMP if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores, - sizeof (hwm_user_cores))) { + sizeof(hwm_user_cores))) { hwm_user_cores = 0; } #endif proc_init_cpumon_params(); - if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof (task_wakeups_monitor_rate))) { + if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) { task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT; } - if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof (task_wakeups_monitor_interval))) { + if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) { task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL; } if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct, - sizeof (task_wakeups_monitor_ustackshots_trigger_pct))) { + sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) { task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER; } if (!PE_parse_boot_argn("disable_exc_resource", &disable_exc_resource, - sizeof (disable_exc_resource))) { + sizeof(disable_exc_resource))) { disable_exc_resource = 0; } - if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof (task_iomon_limit_mb))) { + if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) { task_iomon_limit_mb = IOMON_DEFAULT_LIMIT; } - if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof (task_iomon_interval_secs))) { + if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) { task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL; } - if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof (io_telemetry_limit))) { + if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) { io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT; } @@ -843,7 +847,7 @@ task_init(void) #else if (task_create_internal(TASK_NULL, NULL, FALSE, FALSE, FALSE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS) #endif - panic("task_init\n"); + { panic("task_init\n");} vm_map_deallocate(kernel_task->map); @@ -857,54 +861,57 @@ task_init(void) */ kern_return_t kernel_task_create( - __unused task_t parent_task, - __unused vm_offset_t map_base, - __unused vm_size_t map_size, - __unused task_t *child_task) + __unused task_t parent_task, + __unused vm_offset_t map_base, + __unused vm_size_t map_size, + __unused task_t *child_task) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } kern_return_t task_create( - task_t parent_task, - __unused ledger_port_array_t ledger_ports, - __unused mach_msg_type_number_t num_ledger_ports, - __unused boolean_t inherit_memory, - __unused task_t *child_task) /* OUT */ + task_t parent_task, + __unused ledger_port_array_t ledger_ports, + __unused mach_msg_type_number_t num_ledger_ports, + __unused boolean_t inherit_memory, + __unused task_t *child_task) /* OUT */ { - if (parent_task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); + if (parent_task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } /* * No longer supported: too many calls assume that a task has a valid * process attached. */ - return(KERN_FAILURE); + return KERN_FAILURE; } kern_return_t host_security_create_task_token( - host_security_t host_security, - task_t parent_task, - __unused security_token_t sec_token, - __unused audit_token_t audit_token, - __unused host_priv_t host_priv, - __unused ledger_port_array_t ledger_ports, - __unused mach_msg_type_number_t num_ledger_ports, - __unused boolean_t inherit_memory, - __unused task_t *child_task) /* OUT */ -{ - if (parent_task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); - - if (host_security == HOST_NULL) - return(KERN_INVALID_SECURITY); + host_security_t host_security, + task_t parent_task, + __unused security_token_t sec_token, + __unused audit_token_t audit_token, + __unused host_priv_t host_priv, + __unused ledger_port_array_t ledger_ports, + __unused mach_msg_type_number_t num_ledger_ports, + __unused boolean_t inherit_memory, + __unused task_t *child_task) /* OUT */ +{ + if (parent_task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (host_security == HOST_NULL) { + return KERN_INVALID_SECURITY; + } /* * No longer supported. */ - return(KERN_FAILURE); + return KERN_FAILURE; } /* @@ -931,32 +938,37 @@ host_security_create_task_token( * * iokit_mapped * IOKit mappings: The total size of all IOKit mappings in this task, regardless of - clean/dirty or internal/external state]. + * clean/dirty or internal/external state]. * * alternate_accounting * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid * double counting. + * + * pages_grabbed + * pages_grabbed counts all page grabs in a task. It is also broken out into three subtypes + * which track UPL, IOPL and Kernel page grabs. */ void init_task_ledgers(void) { ledger_template_t t; - + assert(task_ledger_template == NULL); assert(kernel_task == TASK_NULL); #if MACH_ASSERT PE_parse_boot_argn("pmap_ledgers_panic", - &pmap_ledgers_panic, - sizeof (pmap_ledgers_panic)); + &pmap_ledgers_panic, + sizeof(pmap_ledgers_panic)); PE_parse_boot_argn("pmap_ledgers_panic_leeway", - &pmap_ledgers_panic_leeway, - sizeof (pmap_ledgers_panic_leeway)); + &pmap_ledgers_panic_leeway, + sizeof(pmap_ledgers_panic_leeway)); #endif /* MACH_ASSERT */ - if ((t = ledger_template_create("Per-task ledger")) == NULL) + if ((t = ledger_template_create("Per-task ledger")) == NULL) { panic("couldn't create task ledger template"); + } task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns"); task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private", @@ -970,21 +982,25 @@ init_task_ledgers(void) task_ledgers.internal = ledger_entry_add(t, "internal", "physmem", "bytes"); task_ledgers.iokit_mapped = ledger_entry_add(t, "iokit_mapped", "mappings", - "bytes"); + "bytes"); task_ledgers.alternate_accounting = ledger_entry_add(t, "alternate_accounting", "physmem", - "bytes"); + "bytes"); task_ledgers.alternate_accounting_compressed = ledger_entry_add(t, "alternate_accounting_compressed", "physmem", - "bytes"); + "bytes"); task_ledgers.page_table = ledger_entry_add(t, "page_table", "physmem", "bytes"); task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem", - "bytes"); + "bytes"); task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem", - "bytes"); + "bytes"); task_ledgers.purgeable_volatile = ledger_entry_add(t, "purgeable_volatile", "physmem", "bytes"); task_ledgers.purgeable_nonvolatile = ledger_entry_add(t, "purgeable_nonvolatile", "physmem", "bytes"); task_ledgers.purgeable_volatile_compressed = ledger_entry_add(t, "purgeable_volatile_compress", "physmem", "bytes"); task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add(t, "purgeable_nonvolatile_compress", "physmem", "bytes"); + task_ledgers.pages_grabbed = ledger_entry_add(t, "pages_grabbed", "physmem", "count"); + task_ledgers.pages_grabbed_kern = ledger_entry_add(t, "pages_grabbed_kern", "physmem", "count"); + task_ledgers.pages_grabbed_iopl = ledger_entry_add(t, "pages_grabbed_iopl", "physmem", "count"); + task_ledgers.pages_grabbed_upl = ledger_entry_add(t, "pages_grabbed_upl", "physmem", "count"); task_ledgers.network_volatile = ledger_entry_add(t, "network_volatile", "physmem", "bytes"); task_ledgers.network_nonvolatile = ledger_entry_add(t, "network_nonvolatile", "physmem", "bytes"); @@ -992,10 +1008,10 @@ init_task_ledgers(void) task_ledgers.network_nonvolatile_compressed = ledger_entry_add(t, "network_nonvolatile_compressed", "physmem", "bytes"); task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power", - "count"); + "count"); task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power", - "count"); - + "count"); + #if CONFIG_SCHED_SFI sfi_class_id_t class_id, ledger_alias; for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) { @@ -1022,7 +1038,7 @@ init_task_ledgers(void) } } - assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID -1] != -1); + assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1); #endif /* CONFIG_SCHED_SFI */ task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns"); @@ -1074,6 +1090,10 @@ init_task_ledgers(void) ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile); ledger_track_credit_only(t, task_ledgers.purgeable_volatile_compressed); ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile_compressed); + ledger_track_credit_only(t, task_ledgers.pages_grabbed); + ledger_track_credit_only(t, task_ledgers.pages_grabbed_kern); + ledger_track_credit_only(t, task_ledgers.pages_grabbed_iopl); + ledger_track_credit_only(t, task_ledgers.pages_grabbed_upl); ledger_track_credit_only(t, task_ledgers.network_volatile); ledger_track_credit_only(t, task_ledgers.network_nonvolatile); @@ -1107,7 +1127,7 @@ init_task_ledgers(void) #endif /* CONFIG_MEMORYSTATUS */ ledger_set_callback(t, task_ledgers.interrupt_wakeups, - task_wakeups_rate_exceeded, NULL, NULL); + task_wakeups_rate_exceeded, NULL, NULL); ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL); ledger_set_callback(t, task_ledgers.logical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL); @@ -1119,23 +1139,24 @@ os_refgrp_decl(static, task_refgrp, "task", NULL); kern_return_t task_create_internal( - task_t parent_task, - coalition_t *parent_coalitions __unused, - boolean_t inherit_memory, - __unused boolean_t is_64bit, + task_t parent_task, + coalition_t *parent_coalitions __unused, + boolean_t inherit_memory, + __unused boolean_t is_64bit, boolean_t is_64bit_data, - uint32_t t_flags, - uint32_t t_procflags, - task_t *child_task) /* OUT */ + uint32_t t_flags, + uint32_t t_procflags, + task_t *child_task) /* OUT */ { - task_t new_task; - vm_shared_region_t shared_region; - ledger_t ledger = NULL; + task_t new_task; + vm_shared_region_t shared_region; + ledger_t ledger = NULL; new_task = (task_t) zalloc(task_zone); - if (new_task == TASK_NULL) - return(KERN_RESOURCE_SHORTAGE); + if (new_task == TASK_NULL) { + return KERN_RESOURCE_SHORTAGE; + } /* one ref for just being alive; one for our caller */ os_ref_init_count(&new_task->ref_count, &task_refgrp, 2); @@ -1143,9 +1164,9 @@ task_create_internal( /* allocate with active entries */ assert(task_ledger_template != NULL); if ((ledger = ledger_instantiate(task_ledger_template, - LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) { + LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) { zfree(task_zone, new_task); - return(KERN_RESOURCE_SHORTAGE); + return KERN_RESOURCE_SHORTAGE; } @@ -1156,16 +1177,18 @@ task_create_internal( #endif /* if inherit_memory is true, parent_task MUST not be NULL */ - if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) + if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) { new_task->map = vm_map_fork(ledger, parent_task->map, 0); - else + } else { new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit), - (vm_map_offset_t)(VM_MIN_ADDRESS), - (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE); + (vm_map_offset_t)(VM_MIN_ADDRESS), + (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE); + } /* Inherit memlock limit from parent */ - if (parent_task) + if (parent_task) { vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit); + } lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr); queue_init(&new_task->threads); @@ -1184,7 +1207,7 @@ task_create_internal( new_task->exec_token = 0; new_task->task_exc_guard = task_exc_guard_default; - + #if CONFIG_ATM new_task->atm_context = NULL; #endif @@ -1275,22 +1298,29 @@ task_create_internal( shared_region = vm_shared_region_get(parent_task); vm_shared_region_set(new_task, shared_region); - if(task_has_64Bit_addr(parent_task)) { + if (task_has_64Bit_addr(parent_task)) { task_set_64Bit_addr(new_task); } - if(task_has_64Bit_data(parent_task)) { + if (task_has_64Bit_data(parent_task)) { task_set_64Bit_data(new_task); } new_task->all_image_info_addr = parent_task->all_image_info_addr; new_task->all_image_info_size = parent_task->all_image_info_size; - if (inherit_memory && parent_task->affinity_space) + if (inherit_memory && parent_task->affinity_space) { task_affinity_create(parent_task, new_task); + } new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task); +#if DEBUG || DEVELOPMENT + if (parent_task->t_flags & TF_NO_SMT) { + new_task->t_flags |= TF_NO_SMT; + } +#endif + new_task->priority = BASEPRI_DEFAULT; new_task->max_priority = MAXPRI_USER; @@ -1299,12 +1329,12 @@ task_create_internal( new_task->sec_token = KERNEL_SECURITY_TOKEN; new_task->audit_token = KERNEL_AUDIT_TOKEN; #ifdef __LP64__ - if(is_64bit) { + if (is_64bit) { task_set_64Bit_addr(new_task); } #endif - if(is_64bit_data) { + if (is_64bit_data) { task_set_64Bit_data(new_task); } @@ -1323,8 +1353,9 @@ task_create_internal( } bzero(new_task->coalition, sizeof(new_task->coalition)); - for (int i = 0; i < COALITION_NUM_TYPES; i++) + for (int i = 0; i < COALITION_NUM_TYPES; i++) { queue_chain_init(new_task->task_coalition[i]); + } /* Allocate I/O Statistics */ new_task->task_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info)); @@ -1444,20 +1475,20 @@ task_create_internal( */ new_task->t_flags &= ~(TF_DARKWAKE_MODE); - queue_init(&new_task->io_user_clients); + queue_init(&new_task->io_user_clients); ipc_task_enable(new_task); lck_mtx_lock(&tasks_threads_lock); queue_enter(&tasks, new_task, task_t, tasks); tasks_count++; - if (tasks_suspend_state) { - task_suspend_internal(new_task); - } + if (tasks_suspend_state) { + task_suspend_internal(new_task); + } lck_mtx_unlock(&tasks_threads_lock); *child_task = new_task; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1529,13 +1560,14 @@ int task_dropped_imp_count = 0; */ void task_deallocate( - task_t task) + task_t task) { ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups; os_ref_count_t refs; - if (task == TASK_NULL) - return; + if (task == TASK_NULL) { + return; + } refs = task_deallocate_internal(task); @@ -1548,8 +1580,9 @@ task_deallocate( * naturally (it may happen on a recursive task_deallocate() * from the ipc_importance_disconnect_task() call). */ - if (IIT_NULL != task->task_imp_base) + if (IIT_NULL != task->task_imp_base) { ipc_importance_disconnect_task(task); + } return; } #endif /* IMPORTANCE_INHERITANCE */ @@ -1573,8 +1606,9 @@ task_deallocate( */ task_bank_reset(task); - if (task->task_io_stats) + if (task->task_io_stats) { kfree(task->task_io_stats, sizeof(struct io_stat_info)); + } /* * Give the machine dependent code a chance @@ -1588,8 +1622,9 @@ task_deallocate( /* let iokit know */ iokit_task_terminate(task); - if (task->affinity_space) + if (task->affinity_space) { task_affinity_deallocate(task); + } #if MACH_ASSERT if (task->ledger != NULL && @@ -1605,19 +1640,19 @@ task_deallocate( if (task->task_volatile_objects != 0 || task->task_nonvolatile_objects != 0) { panic("task_deallocate(%p): " - "volatile_objects=%d nonvolatile_objects=%d\n", - task, - task->task_volatile_objects, - task->task_nonvolatile_objects); + "volatile_objects=%d nonvolatile_objects=%d\n", + task, + task->task_volatile_objects, + task->task_nonvolatile_objects); } vm_map_deallocate(task->map); is_release(task->itk_space); ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups, - &interrupt_wakeups, &debit); + &interrupt_wakeups, &debit); ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups, - &platform_idle_wakeups, &debit); + &platform_idle_wakeups, &debit); #if defined(CONFIG_SCHED_MULTIQ) sched_group_destroy(task->sched_group); @@ -1694,9 +1729,9 @@ task_deallocate( */ void task_name_deallocate( - task_name_t task_name) + task_name_t task_name) { - return(task_deallocate((task_t)task_name)); + return task_deallocate((task_t)task_name); } /* @@ -1706,9 +1741,9 @@ task_name_deallocate( */ void task_inspect_deallocate( - task_inspect_t task_inspect) + task_inspect_t task_inspect) { - return(task_deallocate((task_t)task_inspect)); + return task_deallocate((task_t)task_inspect); } /* @@ -1718,9 +1753,9 @@ task_inspect_deallocate( */ void task_suspension_token_deallocate( - task_suspension_token_t token) + task_suspension_token_t token) { - return(task_deallocate((task_t)token)); + return task_deallocate((task_t)token); } @@ -1756,7 +1791,7 @@ task_collect_crash_info( #if CONFIG_MACF free_label = label = mac_exc_create_label(); #endif - + task_lock(task); assert(is_corpse_fork || task->bsd_info != NULL); @@ -1779,8 +1814,8 @@ task_collect_crash_info( /* Do not get a corpse ref for corpse fork */ crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size, - is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF, - KCFLAG_USE_MEMCOPY); + is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF, + KCFLAG_USE_MEMCOPY); if (crash_data) { task_lock(task); crash_data_release = task->corpse_info; @@ -1832,8 +1867,9 @@ task_deliver_crash_notification( mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; ipc_port_t task_port, old_notify; - if (crash_info == NULL) + if (crash_info == NULL) { return KERN_FAILURE; + } task_lock(task); if (task_is_a_corpse_fork(task)) { @@ -1892,15 +1928,17 @@ task_deliver_crash_notification( kern_return_t task_terminate( - task_t task) + task_t task) { - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (task->bsd_info) - return (KERN_FAILURE); + if (task->bsd_info) { + return KERN_FAILURE; + } - return (task_terminate_internal(task)); + return task_terminate_internal(task); } #if MACH_ASSERT @@ -1912,19 +1950,19 @@ extern void proc_name_kdp(task_t t, char *buf, int size); static void __unused task_partial_reap(task_t task, __unused int pid) { - unsigned int reclaimed_resident = 0; - unsigned int reclaimed_compressed = 0; + unsigned int reclaimed_resident = 0; + unsigned int reclaimed_compressed = 0; uint64_t task_page_count; task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START), - pid, task_page_count, 0, 0, 0); + pid, task_page_count, 0, 0, 0); vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed); - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END), - pid, reclaimed_resident, reclaimed_compressed, 0, 0); + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END), + pid, reclaimed_resident, reclaimed_compressed, 0, 0); } kern_return_t @@ -1945,12 +1983,12 @@ task_mark_corpse(task_t task) #if CONFIG_MACF crash_label = mac_exc_create_label_for_proc((struct proc*)task->bsd_info); #endif - + kr = task_collect_crash_info(task, #if CONFIG_MACF - crash_label, + crash_label, #endif - FALSE); + FALSE); if (kr != KERN_SUCCESS) { goto out; } @@ -1978,7 +2016,7 @@ task_mark_corpse(task_t task) /* Add it to global corpse task list */ task_add_to_corpse_task_list(task); - + task_start_halt(task); thread_terminate_internal(self_thread); @@ -2073,13 +2111,13 @@ task_wait_till_threads_terminate_locked(task_t task) */ kern_return_t task_duplicate_map_and_threads( - task_t task, - void *p, - task_t new_task, - thread_t *thread_ret, - uint64_t **udata_buffer, - int *size, - int *num_udata) + task_t task, + void *p, + task_t new_task, + thread_t *thread_ret, + uint64_t **udata_buffer, + int *size, + int *num_udata) { kern_return_t kr = KERN_SUCCESS; int active; @@ -2122,14 +2160,13 @@ task_duplicate_map_and_threads( /* Check with VM if vm_map_fork is allowed for this task */ if (memorystatus_allowed_vm_map_fork(task)) { - /* Setup new task's vmmap, switch from parent task's map to it COW map */ oldmap = new_task->map; new_task->map = vm_map_fork(new_task->ledger, - task->map, - (VM_MAP_FORK_SHARE_IF_INHERIT_NONE | - VM_MAP_FORK_PRESERVE_PURGEABLE | - VM_MAP_FORK_CORPSE_FOOTPRINT)); + task->map, + (VM_MAP_FORK_SHARE_IF_INHERIT_NONE | + VM_MAP_FORK_PRESERVE_PURGEABLE | + VM_MAP_FORK_CORPSE_FOOTPRINT)); vm_map_deallocate(oldmap); /* copy ledgers that impact the memory footprint */ @@ -2177,7 +2214,6 @@ task_duplicate_map_and_threads( task_unlock(task); for (i = 0; i < array_count; i++) { - kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue); if (kr != KERN_SUCCESS) { break; @@ -2211,8 +2247,7 @@ task_duplicate_map_and_threads( /* return the first thread if we couldn't find the equivalent of current */ if (thread_return == THREAD_NULL) { thread_return = first_thread; - } - else if (first_thread != THREAD_NULL) { + } else if (first_thread != THREAD_NULL) { /* drop the extra ref returned by thread_create_with_continuation */ thread_deallocate(first_thread); } @@ -2243,18 +2278,18 @@ task_duplicate_map_and_threads( #if CONFIG_SECLUDED_MEMORY extern void task_set_can_use_secluded_mem_locked( - task_t task, - boolean_t can_use_secluded_mem); + task_t task, + boolean_t can_use_secluded_mem); #endif /* CONFIG_SECLUDED_MEMORY */ kern_return_t task_terminate_internal( - task_t task) + task_t task) { - thread_t thread, self; - task_t self_task; - boolean_t interrupt_save; - int pid = 0; + thread_t thread, self; + task_t self_task; + boolean_t interrupt_save; + int pid = 0; assert(task != kernel_task); @@ -2265,14 +2300,12 @@ task_terminate_internal( * Get the task locked and make sure that we are not racing * with someone else trying to terminate us. */ - if (task == self_task) + if (task == self_task) { task_lock(task); - else - if (task < self_task) { + } else if (task < self_task) { task_lock(task); task_lock(self_task); - } - else { + } else { task_lock(self_task); task_lock(task); } @@ -2297,10 +2330,11 @@ task_terminate_internal( * will get us to finalize the termination of ourselves. */ task_unlock(task); - if (self_task != task) + if (self_task != task) { task_unlock(self_task); + } - return (KERN_FAILURE); + return KERN_FAILURE; } if (task_corpse_pending_report(task)) { @@ -2311,14 +2345,16 @@ task_terminate_internal( * will get us to finish the path to death */ task_unlock(task); - if (self_task != task) + if (self_task != task) { task_unlock(self_task); + } - return (KERN_FAILURE); + return KERN_FAILURE; } - if (self_task != task) + if (self_task != task) { task_unlock(self_task); + } /* * Make sure the current thread does not get aborted out of @@ -2349,7 +2385,7 @@ task_terminate_internal( * Terminate each thread in the task. */ queue_iterate(&task->threads, thread, thread_t, task_threads) { - thread_terminate_internal(thread); + thread_terminate_internal(thread); } #ifdef MACH_BSD @@ -2361,16 +2397,16 @@ task_terminate_internal( task_unlock(task); proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); + TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE); - /* Early object reap phase */ + /* Early object reap phase */ // PR-17045188: Revisit implementation // task_partial_reap(task, pid); #if CONFIG_EMBEDDED /* - * remove all task watchers + * remove all task watchers */ task_removewatchers(task); @@ -2389,17 +2425,17 @@ task_terminate_internal( #if 00 /* if some ledgers go negative on tear-down again... */ ledger_disable_panic_on_negative(task->map->pmap->ledger, - task_ledgers.phys_footprint); + task_ledgers.phys_footprint); ledger_disable_panic_on_negative(task->map->pmap->ledger, - task_ledgers.internal); + task_ledgers.internal); ledger_disable_panic_on_negative(task->map->pmap->ledger, - task_ledgers.internal_compressed); + task_ledgers.internal_compressed); ledger_disable_panic_on_negative(task->map->pmap->ledger, - task_ledgers.iokit_mapped); + task_ledgers.iokit_mapped); ledger_disable_panic_on_negative(task->map->pmap->ledger, - task_ledgers.alternate_accounting); + task_ledgers.alternate_accounting); ledger_disable_panic_on_negative(task->map->pmap->ledger, - task_ledgers.alternate_accounting_compressed); + task_ledgers.alternate_accounting_compressed); #endif /* @@ -2423,26 +2459,26 @@ task_terminate_internal( char procname[17]; if (task->bsd_info && !task_is_exec_copy(task)) { pid = proc_pid(task->bsd_info); - proc_name_kdp(task, procname, sizeof (procname)); + proc_name_kdp(task, procname, sizeof(procname)); } else { pid = 0; - strlcpy(procname, "", sizeof (procname)); + strlcpy(procname, "", sizeof(procname)); } pmap_set_process(task->map->pmap, pid, procname); #endif /* MACH_ASSERT */ vm_map_remove(task->map, - task->map->min_offset, - task->map->max_offset, - /* - * Final cleanup: - * + no unnesting - * + remove immutable mappings - * + allow gaps in range - */ - (VM_MAP_REMOVE_NO_UNNESTING | - VM_MAP_REMOVE_IMMUTABLE | - VM_MAP_REMOVE_GAPS_OK)); + task->map->min_offset, + task->map->max_offset, + /* + * Final cleanup: + * + no unnesting + * + remove immutable mappings + * + allow gaps in range + */ + (VM_MAP_REMOVE_NO_UNNESTING | + VM_MAP_REMOVE_IMMUTABLE | + VM_MAP_REMOVE_GAPS_OK)); /* release our shared region */ vm_shared_region_set(task, NULL); @@ -2463,8 +2499,9 @@ task_terminate_internal( #if KPC /* force the task to release all ctrs */ - if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) + if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) { kpc_force_all_ctrs(task, 0); + } #endif /* KPC */ #if CONFIG_COALITIONS @@ -2479,7 +2516,7 @@ task_terminate_internal( */ task_deallocate(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } void @@ -2502,7 +2539,7 @@ tasks_system_suspend(boolean_t suspend) /* * task_start_halt: * - * Shut the current task down (except for the current thread) in + * Shut the current task down (except for the current thread) in * preparation for dramatic changes to the task (probably exec). * We hold the task and mark all other threads in the task for * termination. @@ -2527,8 +2564,9 @@ task_start_halt_locked(task_t task, boolean_t should_mark_corpse) self = current_thread(); - if (task != self->task && !task_is_a_corpse_fork(task)) - return (KERN_INVALID_ARGUMENT); + if (task != self->task && !task_is_a_corpse_fork(task)) { + return KERN_INVALID_ARGUMENT; + } if (task->halting || !task->active || !self->active) { /* @@ -2537,7 +2575,7 @@ task_start_halt_locked(task_t task, boolean_t should_mark_corpse) * so that we run our AST special handler to terminate * ourselves. */ - return (KERN_FAILURE); + return KERN_FAILURE; } task->halting = TRUE; @@ -2561,8 +2599,9 @@ task_start_halt_locked(task_t task, boolean_t should_mark_corpse) thread->inspection = TRUE; thread_mtx_unlock(thread); } - if (thread != self) + if (thread != self) { thread_terminate_internal(thread); + } } task->dispatchqueue_offset = dispatchqueue_offset; @@ -2626,16 +2665,16 @@ task_complete_halt(task_t task) * getting a new one. */ vm_map_remove(task->map, task->map->min_offset, - task->map->max_offset, - /* - * Final cleanup: - * + no unnesting - * + remove immutable mappings - * + allow gaps in the range - */ - (VM_MAP_REMOVE_NO_UNNESTING | - VM_MAP_REMOVE_IMMUTABLE | - VM_MAP_REMOVE_GAPS_OK)); + task->map->max_offset, + /* + * Final cleanup: + * + no unnesting + * + remove immutable mappings + * + allow gaps in the range + */ + (VM_MAP_REMOVE_NO_UNNESTING | + VM_MAP_REMOVE_IMMUTABLE | + VM_MAP_REMOVE_GAPS_OK)); /* * Kick out any IOKitUser handles to the task. At best they're stale, @@ -2655,14 +2694,15 @@ task_complete_halt(task_t task) */ void task_hold_locked( - task_t task) + task_t task) { - thread_t thread; + thread_t thread; assert(task->active); - if (task->suspend_count++ > 0) + if (task->suspend_count++ > 0) { return; + } if (task->bsd_info) { workq_proc_suspended(task->bsd_info); @@ -2683,53 +2723,55 @@ task_hold_locked( * * Same as the internal routine above, except that is must lock * and verify that the task is active. This differs from task_suspend - * in that it places a kernel hold on the task rather than just a + * in that it places a kernel hold on the task rather than just a * user-level hold. This keeps users from over resuming and setting * it running out from under the kernel. * - * CONDITIONS: the caller holds a reference on the task + * CONDITIONS: the caller holds a reference on the task */ kern_return_t task_hold( - task_t task) + task_t task) { - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } task_hold_locked(task); task_unlock(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t task_wait( - task_t task, - boolean_t until_not_runnable) + task_t task, + boolean_t until_not_runnable) { - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } task_wait_locked(task, until_not_runnable); task_unlock(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2742,10 +2784,10 @@ task_wait( */ void task_wait_locked( - task_t task, - boolean_t until_not_runnable) + task_t task, + boolean_t until_not_runnable) { - thread_t thread, self; + thread_t thread, self; assert(task->active); assert(task->suspend_count > 0); @@ -2758,8 +2800,9 @@ task_wait_locked( * the task. */ queue_iterate(&task->threads, thread, thread_t, task_threads) { - if (thread != self) + if (thread != self) { thread_wait(thread, until_not_runnable); + } } } @@ -2768,19 +2811,20 @@ task_wait_locked( * * Release a kernel hold on a task. * - * CONDITIONS: the task is locked and active + * CONDITIONS: the task is locked and active */ void task_release_locked( - task_t task) + task_t task) { - thread_t thread; + thread_t thread; assert(task->active); assert(task->suspend_count > 0); - if (--task->suspend_count > 0) + if (--task->suspend_count > 0) { return; + } if (task->bsd_info) { workq_proc_resumed(task->bsd_info); @@ -2799,44 +2843,46 @@ task_release_locked( * Same as the internal routine above, except that it must lock * and verify that the task is active. * - * CONDITIONS: The caller holds a reference to the task + * CONDITIONS: The caller holds a reference to the task */ kern_return_t task_release( - task_t task) + task_t task) { - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } task_release_locked(task); task_unlock(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t task_threads( - task_t task, - thread_act_array_t *threads_out, - mach_msg_type_number_t *count) + task_t task, + thread_act_array_t *threads_out, + mach_msg_type_number_t *count) { - mach_msg_type_number_t actual; - thread_t *thread_list; - thread_t thread; - vm_size_t size, size_needed; - void *addr; - unsigned int i, j; + mach_msg_type_number_t actual; + thread_t *thread_list; + thread_t thread; + vm_size_t size, size_needed; + void *addr; + unsigned int i, j; - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } size = 0; addr = NULL; @@ -2845,31 +2891,35 @@ task_threads( if (!task->active) { task_unlock(task); - if (size != 0) + if (size != 0) { kfree(addr, size); + } - return (KERN_FAILURE); + return KERN_FAILURE; } actual = task->thread_count; /* do we have the memory we need? */ - size_needed = actual * sizeof (mach_port_t); - if (size_needed <= size) + size_needed = actual * sizeof(mach_port_t); + if (size_needed <= size) { break; + } /* unlock the task and allocate more memory */ task_unlock(task); - if (size != 0) + if (size != 0) { kfree(addr, size); + } assert(size_needed > 0); size = size_needed; addr = kalloc(size); - if (addr == 0) - return (KERN_RESOURCE_SHORTAGE); + if (addr == 0) { + return KERN_RESOURCE_SHORTAGE; + } } /* OK, have memory and the task is locked & active */ @@ -2878,7 +2928,7 @@ task_threads( i = j = 0; for (thread = (thread_t)queue_first(&task->threads); i < actual; - ++i, thread = (thread_t)queue_next(&thread->task_threads)) { + ++i, thread = (thread_t)queue_next(&thread->task_threads)) { thread_reference_internal(thread); thread_list[j++] = thread; } @@ -2886,7 +2936,7 @@ task_threads( assert(queue_end(&task->threads, (queue_entry_t)thread)); actual = j; - size_needed = actual * sizeof (mach_port_t); + size_needed = actual * sizeof(mach_port_t); /* can unlock task now that we've got the thread refs */ task_unlock(task); @@ -2897,10 +2947,10 @@ task_threads( *threads_out = NULL; *count = 0; - if (size != 0) + if (size != 0) { kfree(addr, size); - } - else { + } + } else { /* if we allocated too much, must copy */ if (size_needed < size) { @@ -2908,10 +2958,11 @@ task_threads( newaddr = kalloc(size_needed); if (newaddr == 0) { - for (i = 0; i < actual; ++i) + for (i = 0; i < actual; ++i) { thread_deallocate(thread_list[i]); + } kfree(addr, size); - return (KERN_RESOURCE_SHORTAGE); + return KERN_RESOURCE_SHORTAGE; } bcopy(addr, newaddr, size_needed); @@ -2924,25 +2975,26 @@ task_threads( /* do the conversion that Mig should handle */ - for (i = 0; i < actual; ++i) + for (i = 0; i < actual; ++i) { ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]); + } } - return (KERN_SUCCESS); + return KERN_SUCCESS; } -#define TASK_HOLD_NORMAL 0 -#define TASK_HOLD_PIDSUSPEND 1 -#define TASK_HOLD_LEGACY 2 -#define TASK_HOLD_LEGACY_ALL 3 +#define TASK_HOLD_NORMAL 0 +#define TASK_HOLD_PIDSUSPEND 1 +#define TASK_HOLD_LEGACY 2 +#define TASK_HOLD_LEGACY_ALL 3 static kern_return_t -place_task_hold ( +place_task_hold( task_t task, int mode) -{ +{ if (!task->active && !task_is_a_corpse(task)) { - return (KERN_FAILURE); + return KERN_FAILURE; } /* Return success for corpse task */ @@ -2951,7 +3003,7 @@ place_task_hold ( } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_SUSPEND) | DBG_FUNC_NONE, + MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND) | DBG_FUNC_NONE, task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id, task->user_stop_count, task->user_stop_count + 1, 0); @@ -2959,15 +3011,16 @@ place_task_hold ( current_task()->suspends_outstanding++; #endif - if (mode == TASK_HOLD_LEGACY) + if (mode == TASK_HOLD_LEGACY) { task->legacy_stop_count++; + } if (task->user_stop_count++ > 0) { /* * If the stop count was positive, the task is * already stopped and we can exit. */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2978,37 +3031,36 @@ place_task_hold ( */ task_hold_locked(task); task_wait_locked(task, FALSE); - - return (KERN_SUCCESS); + + return KERN_SUCCESS; } static kern_return_t -release_task_hold ( - task_t task, - int mode) +release_task_hold( + task_t task, + int mode) { boolean_t release = FALSE; - + if (!task->active && !task_is_a_corpse(task)) { - return (KERN_FAILURE); + return KERN_FAILURE; } /* Return success for corpse task */ if (task_is_a_corpse(task)) { return KERN_SUCCESS; } - + if (mode == TASK_HOLD_PIDSUSPEND) { - if (task->pidsuspended == FALSE) { - return (KERN_FAILURE); - } - task->pidsuspended = FALSE; + if (task->pidsuspended == FALSE) { + return KERN_FAILURE; + } + task->pidsuspended = FALSE; } if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) { - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_RESUME) | DBG_FUNC_NONE, + MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE, task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id, task->user_stop_count, mode, task->legacy_stop_count); @@ -3030,23 +3082,25 @@ release_task_hold ( } task->legacy_stop_count = 0; } else { - if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) + if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) { task->legacy_stop_count--; - if (--task->user_stop_count == 0) + } + if (--task->user_stop_count == 0) { release = TRUE; + } } - } - else { - return (KERN_FAILURE); + } else { + return KERN_FAILURE; } /* * Release the task if necessary. */ - if (release) + if (release) { task_release_locked(task); - - return (KERN_SUCCESS); + } + + return KERN_SUCCESS; } @@ -3063,29 +3117,31 @@ release_task_hold ( * unique send-once right). * * Conditions: - * The caller holds a reference to the task + * The caller holds a reference to the task */ kern_return_t task_suspend( - task_t task) + task_t task) { - kern_return_t kr; - mach_port_t port, send, old_notify; - mach_port_name_t name; + kern_return_t kr; + mach_port_t port, send, old_notify; + mach_port_name_t name; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); - /* + /* * Claim a send right on the task resume port, and request a no-senders - * notification on that port (if none outstanding). + * notification on that port (if none outstanding). */ if (task->itk_resume == IP_NULL) { task->itk_resume = ipc_port_alloc_kernel(); - if (!IP_VALID(task->itk_resume)) + if (!IP_VALID(task->itk_resume)) { panic("failed to create resume port"); + } ipc_kobject_set(task->itk_resume, (ipc_kobject_t)task, IKOT_TASK_RESUME); } @@ -3122,34 +3178,35 @@ task_suspend( * deallocate the send right will auto-release the suspension. */ if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, (ipc_object_t)send, - MACH_MSG_TYPE_MOVE_SEND, &name)) != KERN_SUCCESS) { + MACH_MSG_TYPE_MOVE_SEND, &name)) != KERN_SUCCESS) { printf("warning: %s(%d) failed to copyout suspension token for pid %d with error: %d\n", - proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), - task_pid(task), kr); - return (kr); + proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), + task_pid(task), kr); + return kr; } - return (kr); + return kr; } /* * task_resume: * Release a user hold on a task. - * + * * Conditions: * The caller holds a reference to the task */ -kern_return_t +kern_return_t task_resume( - task_t task) + task_t task) { - kern_return_t kr; + kern_return_t kr; mach_port_name_t resume_port_name; - ipc_entry_t resume_port_entry; - ipc_space_t space = current_task()->itk_space; + ipc_entry_t resume_port_entry; + ipc_space_t space = current_task()->itk_space; - if (task == TASK_NULL || task == kernel_task ) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } /* release a legacy task hold */ task_lock(task); @@ -3165,17 +3222,19 @@ task_resume( * go ahead and drop all the rights, as someone either already released our holds or the task * is gone. */ - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { ipc_right_dealloc(space, resume_port_name, resume_port_entry); - else + } else { ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0); + } /* space unlocked */ } else { is_write_unlock(space); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { printf("warning: %s(%d) performed out-of-band resume on pid %d\n", - proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), - task_pid(task)); + proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info), + task_pid(task)); + } } return kr; @@ -3188,15 +3247,16 @@ task_resume( kern_return_t task_suspend_internal(task_t task) { - kern_return_t kr; - - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + kern_return_t kr; + + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); kr = place_task_hold(task, TASK_HOLD_NORMAL); task_unlock(task); - return (kr); + return kr; } /* @@ -3205,15 +3265,15 @@ task_suspend_internal(task_t task) */ kern_return_t task_suspend2( - task_t task, + task_t task, task_suspension_token_t *suspend_token) { - kern_return_t kr; - + kern_return_t kr; + kr = task_suspend_internal(task); if (kr != KERN_SUCCESS) { *suspend_token = TASK_NULL; - return (kr); + return kr; } /* @@ -3224,7 +3284,7 @@ task_suspend2( task_reference_internal(task); *suspend_token = task; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -3233,17 +3293,18 @@ task_suspend2( */ kern_return_t task_resume_internal( - task_suspension_token_t task) + task_suspension_token_t task) { kern_return_t kr; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); kr = release_task_hold(task, TASK_HOLD_NORMAL); task_unlock(task); - return (kr); + return kr; } /* @@ -3251,14 +3312,14 @@ task_resume_internal( */ kern_return_t task_resume2( - task_suspension_token_t task) + task_suspension_token_t task) { kern_return_t kr; kr = task_resume_internal(task); task_suspension_token_deallocate(task); - return (kr); + return kr; } boolean_t @@ -3268,11 +3329,10 @@ task_suspension_notify(mach_msg_header_t *request_header) task_t task = convert_port_to_task_suspension_token(port); mach_msg_type_number_t not_count; - if (task == TASK_NULL || task == kernel_task) + if (task == TASK_NULL || task == kernel_task) { return TRUE; /* nothing to do */ - + } switch (request_header->msgh_id) { - case MACH_NOTIFY_SEND_ONCE: /* release the hold held by this specific send-once right */ task_lock(task); @@ -3286,13 +3346,11 @@ task_suspension_notify(mach_msg_header_t *request_header) task_lock(task); ip_lock(port); if (port->ip_mscount == not_count) { - /* release all the [remaining] outstanding legacy holds */ assert(port->ip_nsrequest == IP_NULL); ip_unlock(port); release_task_hold(task, TASK_HOLD_LEGACY_ALL); task_unlock(task); - } else if (port->ip_nsrequest == IP_NULL) { ipc_port_t old_notify; @@ -3332,7 +3390,7 @@ task_pidsuspend_locked(task_t task) task->pidsuspended = FALSE; } out: - return(kr); + return kr; } @@ -3342,16 +3400,17 @@ out: * Suspends a task by placing a hold on its threads. * * Conditions: - * The caller holds a reference to the task + * The caller holds a reference to the task */ kern_return_t task_pidsuspend( - task_t task) + task_t task) { - kern_return_t kr; - - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + kern_return_t kr; + + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); @@ -3359,31 +3418,31 @@ task_pidsuspend( task_unlock(task); - return (kr); + return kr; } /* * task_pidresume: * Resumes a previously suspended task. - * + * * Conditions: * The caller holds a reference to the task */ -kern_return_t +kern_return_t task_pidresume( - task_t task) + task_t task) { - kern_return_t kr; + kern_return_t kr; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); - + #if CONFIG_FREEZE while (task->changing_freeze_state) { - assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT); task_unlock(task); thread_block(THREAD_CONTINUE_NULL); @@ -3401,15 +3460,16 @@ task_pidresume( task_lock(task); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { task->frozen = FALSE; + } task->changing_freeze_state = FALSE; thread_wakeup(&task->changing_freeze_state); - + task_unlock(task); #endif - return (kr); + return kr; } @@ -3420,10 +3480,11 @@ extern void IOSleep(int); kern_return_t task_disconnect_page_mappings(task_t task) { - int n; + int n; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } /* * this function is used to strip all of the mappings from @@ -3435,17 +3496,17 @@ task_disconnect_page_mappings(task_t task) * sweep (at least for a while - I've arbitrarily set the limit at * 100 sweeps to be re-looked at as we gain experience) to get a better * view into what areas within a page are being visited (as opposed to only - * seeing the first fault of a page after the task becomes + * seeing the first fault of a page after the task becomes * runnable)... in the future I may * try to block until awakened by a thread in this task * being made runnable, but for now we'll periodically poll from the * user level debug tool driving the sysctl */ for (n = 0; n < 100; n++) { - thread_t thread; - boolean_t runnable; - boolean_t do_unnest; - int page_count; + thread_t thread; + boolean_t runnable; + boolean_t do_unnest; + int page_count; runnable = FALSE; do_unnest = FALSE; @@ -3453,14 +3514,14 @@ task_disconnect_page_mappings(task_t task) task_lock(task); queue_iterate(&task->threads, thread, thread_t, task_threads) { - if (thread->state & TH_RUN) { runnable = TRUE; break; } } - if (n == 0) + if (n == 0) { task->task_disconnected_count++; + } if (task->task_unnested == FALSE) { if (runnable == TRUE) { @@ -3470,21 +3531,23 @@ task_disconnect_page_mappings(task_t task) } task_unlock(task); - if (runnable == FALSE) + if (runnable == FALSE) { break; + } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START, - task, do_unnest, task->task_disconnected_count, 0, 0); + task, do_unnest, task->task_disconnected_count, 0, 0); page_count = vm_map_disconnect_page_mappings(task->map, do_unnest); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END, - task, page_count, 0, 0, 0); + task, page_count, 0, 0, 0); - if ((n % 5) == 4) + if ((n % 5) == 4) { IOSleep(1); + } } - return (KERN_SUCCESS); + return KERN_SUCCESS; } #endif @@ -3498,10 +3561,10 @@ task_disconnect_page_mappings(task_t task) * Freeze a task. * * Conditions: - * The caller holds a reference to the task + * The caller holds a reference to the task */ -extern void vm_wake_compactor_swapper(void); -extern queue_head_t c_swapout_list_head; +extern void vm_wake_compactor_swapper(void); +extern queue_head_t c_swapout_list_head; kern_return_t task_freeze( @@ -3511,19 +3574,19 @@ task_freeze( uint32_t *clean_count, uint32_t *dirty_count, uint32_t dirty_budget, - uint32_t *shared_count, - int *freezer_error_code, - boolean_t eval_only) + uint32_t *shared_count, + int *freezer_error_code, + boolean_t eval_only) { kern_return_t kr = KERN_SUCCESS; - - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); while (task->changing_freeze_state) { - assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT); task_unlock(task); thread_block(THREAD_CONTINUE_NULL); @@ -3532,21 +3595,21 @@ task_freeze( } if (task->frozen) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } task->changing_freeze_state = TRUE; task_unlock(task); kr = vm_map_freeze(task->map, - purgeable_count, - wired_count, - clean_count, - dirty_count, - dirty_budget, - shared_count, - freezer_error_code, - eval_only); + purgeable_count, + wired_count, + clean_count, + dirty_count, + dirty_budget, + shared_count, + freezer_error_code, + eval_only); task_lock(task); @@ -3556,7 +3619,7 @@ task_freeze( task->changing_freeze_state = FALSE; thread_wakeup(&task->changing_freeze_state); - + task_unlock(task); if (VM_CONFIG_COMPRESSOR_IS_PRESENT && @@ -3572,7 +3635,7 @@ task_freeze( thread_wakeup((event_t)&c_swapout_list_head); } - return (kr); + return kr; } /* @@ -3581,19 +3644,19 @@ task_freeze( * Thaw a currently frozen task. * * Conditions: - * The caller holds a reference to the task + * The caller holds a reference to the task */ kern_return_t task_thaw( - task_t task) + task_t task) { - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); - - while (task->changing_freeze_state) { + while (task->changing_freeze_state) { assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT); task_unlock(task); thread_block(THREAD_CONTINUE_NULL); @@ -3602,36 +3665,38 @@ task_thaw( } if (!task->frozen) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } task->frozen = FALSE; - + task_unlock(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } #endif /* CONFIG_FREEZE */ kern_return_t host_security_set_task_token( - host_security_t host_security, - task_t task, - security_token_t sec_token, - audit_token_t audit_token, - host_priv_t host_priv) + host_security_t host_security, + task_t task, + security_token_t sec_token, + audit_token_t audit_token, + host_priv_t host_priv) { - ipc_port_t host_port; - kern_return_t kr; + ipc_port_t host_port; + kern_return_t kr; - if (task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (host_security == HOST_NULL) - return(KERN_INVALID_SECURITY); + if (host_security == HOST_NULL) { + return KERN_INVALID_SECURITY; + } - task_lock(task); - task->sec_token = sec_token; + task_lock(task); + task->sec_token = sec_token; task->audit_token = audit_token; task_unlock(task); @@ -3643,7 +3708,7 @@ host_security_set_task_token( } assert(kr == KERN_SUCCESS); kr = task_set_special_port(task, TASK_HOST_PORT, host_port); - return(kr); + return kr; } kern_return_t @@ -3665,122 +3730,123 @@ task_send_trace_memory( */ kern_return_t task_set_info( - task_t task, - task_flavor_t flavor, - __unused task_info_t task_info_in, /* pointer to IN array */ - __unused mach_msg_type_number_t task_info_count) + task_t task, + task_flavor_t flavor, + __unused task_info_t task_info_in, /* pointer to IN array */ + __unused mach_msg_type_number_t task_info_count) { - if (task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } switch (flavor) { - #if CONFIG_ATM - case TASK_TRACE_MEMORY_INFO: - { - if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); - - assert(task_info_in != NULL); - task_trace_memory_info_t mem_info; - mem_info = (task_trace_memory_info_t) task_info_in; - kern_return_t kr = atm_register_trace_memory(task, - mem_info->user_memory_address, - mem_info->buffer_size); - return kr; + case TASK_TRACE_MEMORY_INFO: + { + if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; } + assert(task_info_in != NULL); + task_trace_memory_info_t mem_info; + mem_info = (task_trace_memory_info_t) task_info_in; + kern_return_t kr = atm_register_trace_memory(task, + mem_info->user_memory_address, + mem_info->buffer_size); + return kr; + } + #endif - default: - return (KERN_INVALID_ARGUMENT); + default: + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } int radar_20146450 = 1; kern_return_t task_info( - task_t task, - task_flavor_t flavor, - task_info_t task_info_out, - mach_msg_type_number_t *task_info_count) + task_t task, + task_flavor_t flavor, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count) { kern_return_t error = KERN_SUCCESS; - mach_msg_type_number_t original_task_info_count; + mach_msg_type_number_t original_task_info_count; - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } original_task_info_count = *task_info_count; task_lock(task); if ((task != current_task()) && (!task->active)) { task_unlock(task); - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } switch (flavor) { - case TASK_BASIC_INFO_32: case TASK_BASIC2_INFO_32: #if defined(__arm__) || defined(__arm64__) case TASK_BASIC_INFO_64: -#endif - { - task_basic_info_32_t basic_info; - vm_map_t map; - clock_sec_t secs; - clock_usec_t usecs; - - if (*task_info_count < TASK_BASIC_INFO_32_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; - } +#endif + { + task_basic_info_32_t basic_info; + vm_map_t map; + clock_sec_t secs; + clock_usec_t usecs; - basic_info = (task_basic_info_32_t)task_info_out; + if (*task_info_count < TASK_BASIC_INFO_32_COUNT) { + error = KERN_INVALID_ARGUMENT; + break; + } - map = (task == kernel_task)? kernel_map: task->map; - basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size; - if (flavor == TASK_BASIC2_INFO_32) { - /* - * The "BASIC2" flavor gets the maximum resident - * size instead of the current resident size... - */ - basic_info->resident_size = pmap_resident_max(map->pmap); - } else { - basic_info->resident_size = pmap_resident_count(map->pmap); - } - basic_info->resident_size *= PAGE_SIZE; + basic_info = (task_basic_info_32_t)task_info_out; - basic_info->policy = ((task != kernel_task)? - POLICY_TIMESHARE: POLICY_RR); - basic_info->suspend_count = task->user_stop_count; + map = (task == kernel_task)? kernel_map: task->map; + basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size; + if (flavor == TASK_BASIC2_INFO_32) { + /* + * The "BASIC2" flavor gets the maximum resident + * size instead of the current resident size... + */ + basic_info->resident_size = pmap_resident_max(map->pmap); + } else { + basic_info->resident_size = pmap_resident_count(map->pmap); + } + basic_info->resident_size *= PAGE_SIZE; - absolutetime_to_microtime(task->total_user_time, &secs, &usecs); - basic_info->user_time.seconds = - (typeof(basic_info->user_time.seconds))secs; - basic_info->user_time.microseconds = usecs; + basic_info->policy = ((task != kernel_task)? + POLICY_TIMESHARE: POLICY_RR); + basic_info->suspend_count = task->user_stop_count; - absolutetime_to_microtime(task->total_system_time, &secs, &usecs); - basic_info->system_time.seconds = - (typeof(basic_info->system_time.seconds))secs; - basic_info->system_time.microseconds = usecs; + absolutetime_to_microtime(task->total_user_time, &secs, &usecs); + basic_info->user_time.seconds = + (typeof(basic_info->user_time.seconds))secs; + basic_info->user_time.microseconds = usecs; - *task_info_count = TASK_BASIC_INFO_32_COUNT; - break; - } + absolutetime_to_microtime(task->total_system_time, &secs, &usecs); + basic_info->system_time.seconds = + (typeof(basic_info->system_time.seconds))secs; + basic_info->system_time.microseconds = usecs; + + *task_info_count = TASK_BASIC_INFO_32_COUNT; + break; + } #if defined(__arm__) || defined(__arm64__) case TASK_BASIC_INFO_64_2: { - task_basic_info_64_2_t basic_info; - vm_map_t map; - clock_sec_t secs; - clock_usec_t usecs; + task_basic_info_64_2_t basic_info; + vm_map_t map; + clock_sec_t secs; + clock_usec_t usecs; if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } basic_info = (task_basic_info_64_2_t)task_info_out; @@ -3788,21 +3854,21 @@ task_info( map = (task == kernel_task)? kernel_map: task->map; basic_info->virtual_size = map->size; basic_info->resident_size = - (mach_vm_size_t)(pmap_resident_count(map->pmap)) - * PAGE_SIZE_64; + (mach_vm_size_t)(pmap_resident_count(map->pmap)) + * PAGE_SIZE_64; basic_info->policy = ((task != kernel_task)? - POLICY_TIMESHARE: POLICY_RR); + POLICY_TIMESHARE: POLICY_RR); basic_info->suspend_count = task->user_stop_count; absolutetime_to_microtime(task->total_user_time, &secs, &usecs); - basic_info->user_time.seconds = - (typeof(basic_info->user_time.seconds))secs; + basic_info->user_time.seconds = + (typeof(basic_info->user_time.seconds))secs; basic_info->user_time.microseconds = usecs; absolutetime_to_microtime(task->total_system_time, &secs, &usecs); basic_info->system_time.seconds = - (typeof(basic_info->system_time.seconds))secs; + (typeof(basic_info->system_time.seconds))secs; basic_info->system_time.microseconds = usecs; *task_info_count = TASK_BASIC_INFO_64_2_COUNT; @@ -3812,14 +3878,14 @@ task_info( #else /* defined(__arm__) || defined(__arm64__) */ case TASK_BASIC_INFO_64: { - task_basic_info_64_t basic_info; - vm_map_t map; - clock_sec_t secs; - clock_usec_t usecs; + task_basic_info_64_t basic_info; + vm_map_t map; + clock_sec_t secs; + clock_usec_t usecs; if (*task_info_count < TASK_BASIC_INFO_64_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } basic_info = (task_basic_info_64_t)task_info_out; @@ -3827,21 +3893,21 @@ task_info( map = (task == kernel_task)? kernel_map: task->map; basic_info->virtual_size = map->size; basic_info->resident_size = - (mach_vm_size_t)(pmap_resident_count(map->pmap)) - * PAGE_SIZE_64; + (mach_vm_size_t)(pmap_resident_count(map->pmap)) + * PAGE_SIZE_64; basic_info->policy = ((task != kernel_task)? - POLICY_TIMESHARE: POLICY_RR); + POLICY_TIMESHARE: POLICY_RR); basic_info->suspend_count = task->user_stop_count; absolutetime_to_microtime(task->total_user_time, &secs, &usecs); - basic_info->user_time.seconds = - (typeof(basic_info->user_time.seconds))secs; + basic_info->user_time.seconds = + (typeof(basic_info->user_time.seconds))secs; basic_info->user_time.microseconds = usecs; absolutetime_to_microtime(task->total_system_time, &secs, &usecs); basic_info->system_time.seconds = - (typeof(basic_info->system_time.seconds))secs; + (typeof(basic_info->system_time.seconds))secs; basic_info->system_time.microseconds = usecs; *task_info_count = TASK_BASIC_INFO_64_COUNT; @@ -3857,8 +3923,8 @@ task_info( clock_usec_t usecs; if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } basic_info = (mach_task_basic_info_t)task_info_out; @@ -3875,13 +3941,13 @@ task_info( (mach_vm_size_t)(pmap_resident_max(map->pmap)); basic_info->resident_size_max *= PAGE_SIZE_64; - basic_info->policy = ((task != kernel_task) ? - POLICY_TIMESHARE : POLICY_RR); + basic_info->policy = ((task != kernel_task) ? + POLICY_TIMESHARE : POLICY_RR); basic_info->suspend_count = task->user_stop_count; absolutetime_to_microtime(task->total_user_time, &secs, &usecs); - basic_info->user_time.seconds = + basic_info->user_time.seconds = (typeof(basic_info->user_time.seconds))secs; basic_info->user_time.microseconds = usecs; @@ -3896,12 +3962,12 @@ task_info( case TASK_THREAD_TIMES_INFO: { - task_thread_times_info_t times_info; - thread_t thread; + task_thread_times_info_t times_info; + thread_t thread; if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } times_info = (task_thread_times_info_t) task_info_out; @@ -3912,10 +3978,11 @@ task_info( queue_iterate(&task->threads, thread, thread_t, task_threads) { - time_value_t user_time, system_time; + time_value_t user_time, system_time; - if (thread->options & TH_OPT_IDLE_THREAD) + if (thread->options & TH_OPT_IDLE_THREAD) { continue; + } thread_read_times(thread, &user_time, &system_time, NULL); @@ -3929,8 +3996,8 @@ task_info( case TASK_ABSOLUTETIME_INFO: { - task_absolutetime_info_t info; - thread_t thread; + task_absolutetime_info_t info; + thread_t thread; if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -3945,11 +4012,12 @@ task_info( info->total_system = task->total_system_time; queue_iterate(&task->threads, thread, thread_t, task_threads) { - uint64_t tval; - spl_t x; + uint64_t tval; + spl_t x; - if (thread->options & TH_OPT_IDLE_THREAD) + if (thread->options & TH_OPT_IDLE_THREAD) { continue; + } x = splsched(); thread_lock(thread); @@ -3988,7 +4056,7 @@ task_info( * size of the expected result structure. */ #define TASK_LEGACY_DYLD_INFO_COUNT \ - offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t) + offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t) if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -4002,8 +4070,8 @@ task_info( /* only set format on output for those expecting it */ if (*task_info_count >= TASK_DYLD_INFO_COUNT) { info->all_image_info_format = task_has_64Bit_addr(task) ? - TASK_DYLD_ALL_IMAGE_INFO_64 : - TASK_DYLD_ALL_IMAGE_INFO_32 ; + TASK_DYLD_ALL_IMAGE_INFO_64 : + TASK_DYLD_ALL_IMAGE_INFO_32; *task_info_count = TASK_DYLD_INFO_COUNT; } else { *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT; @@ -4037,12 +4105,12 @@ task_info( case TASK_KERNELMEMORY_INFO: { - task_kernelmemory_info_t tkm_info; - ledger_amount_t credit, debit; + task_kernelmemory_info_t tkm_info; + ledger_amount_t credit, debit; if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } tkm_info = (task_kernelmemory_info_t) task_info_out; @@ -4104,7 +4172,6 @@ task_info( /* OBSOLETE */ case TASK_SCHED_FIFO_INFO: { - if (*task_info_count < POLICY_FIFO_BASE_COUNT) { error = KERN_INVALID_ARGUMENT; break; @@ -4117,7 +4184,7 @@ task_info( /* OBSOLETE */ case TASK_SCHED_RR_INFO: { - policy_rr_base_t rr_base; + policy_rr_base_t rr_base; uint32_t quantum_time; uint64_t quantum_ns; @@ -4137,7 +4204,7 @@ task_info( quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); absolutetime_to_nanoseconds(quantum_time, &quantum_ns); - + rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000); *task_info_count = POLICY_RR_BASE_COUNT; @@ -4147,7 +4214,7 @@ task_info( /* OBSOLETE */ case TASK_SCHED_TIMESHARE_INFO: { - policy_timeshare_base_t ts_base; + policy_timeshare_base_t ts_base; if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -4169,11 +4236,11 @@ task_info( case TASK_SECURITY_TOKEN: { - security_token_t *sec_token_p; + security_token_t *sec_token_p; if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } sec_token_p = (security_token_t *) task_info_out; @@ -4183,14 +4250,14 @@ task_info( *task_info_count = TASK_SECURITY_TOKEN_COUNT; break; } - + case TASK_AUDIT_TOKEN: { - audit_token_t *audit_token_p; + audit_token_t *audit_token_p; if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } audit_token_p = (audit_token_t *) task_info_out; @@ -4200,19 +4267,19 @@ task_info( *task_info_count = TASK_AUDIT_TOKEN_COUNT; break; } - + case TASK_SCHED_INFO: error = KERN_INVALID_ARGUMENT; break; case TASK_EVENTS_INFO: { - task_events_info_t events_info; - thread_t thread; + task_events_info_t events_info; + thread_t thread; if (*task_info_count < TASK_EVENTS_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } events_info = (task_events_info_t) task_info_out; @@ -4229,7 +4296,7 @@ task_info( events_info->csw = task->c_switch; queue_iterate(&task->threads, thread, thread_t, task_threads) { - events_info->csw += thread->c_switch; + events_info->csw += thread->c_switch; events_info->syscalls_mach += thread->syscalls_mach; events_info->syscalls_unix += thread->syscalls_unix; } @@ -4241,8 +4308,8 @@ task_info( case TASK_AFFINITY_TAG_INFO: { if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } error = task_affinity_info(task, task_info_out, task_info_count); @@ -4273,12 +4340,12 @@ task_info( case TASK_VM_INFO: case TASK_VM_INFO_PURGEABLE: { - task_vm_info_t vm_info; - vm_map_t map; + task_vm_info_t vm_info; + vm_map_t map; if (*task_info_count < TASK_VM_INFO_REV0_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } vm_info = (task_vm_info_t)task_info_out; @@ -4335,14 +4402,14 @@ task_info( * when the system is under memory pressure. */ vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT * - PAGE_SIZE); + PAGE_SIZE); } else { - mach_vm_size_t volatile_virtual_size; - mach_vm_size_t volatile_resident_size; - mach_vm_size_t volatile_compressed_size; - mach_vm_size_t volatile_pmap_size; - mach_vm_size_t volatile_compressed_pmap_size; - kern_return_t kr; + mach_vm_size_t volatile_virtual_size; + mach_vm_size_t volatile_resident_size; + mach_vm_size_t volatile_compressed_size; + mach_vm_size_t volatile_pmap_size; + mach_vm_size_t volatile_compressed_pmap_size; + kern_return_t kr; if (flavor == TASK_VM_INFO_PURGEABLE) { kr = vm_map_query_volatile( @@ -4354,15 +4421,15 @@ task_info( &volatile_compressed_pmap_size); if (kr == KERN_SUCCESS) { vm_info->purgeable_volatile_pmap = - volatile_pmap_size; + volatile_pmap_size; if (radar_20146450) { - vm_info->compressed -= - volatile_compressed_pmap_size; + vm_info->compressed -= + volatile_compressed_pmap_size; } vm_info->purgeable_volatile_resident = - volatile_resident_size; + volatile_resident_size; vm_info->purgeable_volatile_virtual = - volatile_virtual_size; + volatile_virtual_size; } } } @@ -4370,7 +4437,7 @@ task_info( if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) { vm_info->phys_footprint = - (mach_vm_size_t) get_task_phys_footprint(task); + (mach_vm_size_t) get_task_phys_footprint(task); *task_info_count = TASK_VM_INFO_REV1_COUNT; } if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) { @@ -4388,16 +4455,16 @@ task_info( case TASK_WAIT_STATE_INFO: { - /* - * Deprecated flavor. Currently allowing some results until all users + /* + * Deprecated flavor. Currently allowing some results until all users * stop calling it. The results may not be accurate. - */ - task_wait_state_info_t wait_state_info; + */ + task_wait_state_info_t wait_state_info; uint64_t total_sfi_ledger_val = 0; if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } wait_state_info = (task_wait_state_info_t) task_info_out; @@ -4409,23 +4476,23 @@ task_info( int i, prev_lentry = -1; int64_t val_credit, val_debit; - for (i = 0; i < MAX_SFI_CLASS_ID; i++){ - val_credit =0; + for (i = 0; i < MAX_SFI_CLASS_ID; i++) { + val_credit = 0; /* - * checking with prev_lentry != entry ensures adjacent classes + * checking with prev_lentry != entry ensures adjacent classes * which share the same ledger do not add wait times twice. * Note: Use ledger() call to get data for each individual sfi class. */ if (prev_lentry != task_ledgers.sfi_wait_times[i] && - KERN_SUCCESS == ledger_get_entries(task->ledger, - task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) { + KERN_SUCCESS == ledger_get_entries(task->ledger, + task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) { total_sfi_ledger_val += val_credit; } prev_lentry = task_ledgers.sfi_wait_times[i]; } #endif /* CONFIG_SCHED_SFI */ - wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val; + wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val; *task_info_count = TASK_WAIT_STATE_INFO_COUNT; break; @@ -4433,7 +4500,7 @@ task_info( case TASK_VM_INFO_PURGEABLE_ACCOUNT: { #if DEVELOPMENT || DEBUG - pvm_account_info_t acnt_info; + pvm_account_info_t acnt_info; if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) { error = KERN_INVALID_ARGUMENT; @@ -4459,11 +4526,11 @@ task_info( } case TASK_FLAGS_INFO: { - task_flags_info_t flags_info; + task_flags_info_t flags_info; if (*task_info_count < TASK_FLAGS_INFO_COUNT) { - error = KERN_INVALID_ARGUMENT; - break; + error = KERN_INVALID_ARGUMENT; + break; } flags_info = (task_flags_info_t)task_info_out; @@ -4490,10 +4557,10 @@ task_info( } dbg_info = (task_debug_info_internal_t) task_info_out; dbg_info->ipc_space_size = 0; - if (task->itk_space){ + if (task->itk_space) { dbg_info->ipc_space_size = task->itk_space->is_table_size; } - + dbg_info->suspend_count = task->suspend_count; error = KERN_SUCCESS; @@ -4509,7 +4576,7 @@ task_info( } task_unlock(task); - return (error); + return error; } /* @@ -4527,18 +4594,19 @@ task_info( */ kern_return_t task_info_from_user( - mach_port_t task_port, - task_flavor_t flavor, - task_info_t task_info_out, - mach_msg_type_number_t *task_info_count) + mach_port_t task_port, + task_flavor_t flavor, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count) { task_t task; kern_return_t ret; - if (flavor == TASK_DYLD_INFO) + if (flavor == TASK_DYLD_INFO) { task = convert_port_to_task(task_port); - else + } else { task = convert_port_to_task_name(task_port); + } ret = task_info(task, flavor, task_info_out, task_info_count); @@ -4547,7 +4615,7 @@ task_info_from_user( return ret; } -/* +/* * task_power_info * * Returns power stats for the task. @@ -4555,20 +4623,20 @@ task_info_from_user( */ void task_power_info_locked( - task_t task, - task_power_info_t info, - gpu_energy_data_t ginfo, - task_power_info_v2_t infov2) + task_t task, + task_power_info_t info, + gpu_energy_data_t ginfo, + task_power_info_v2_t infov2) { - thread_t thread; - ledger_amount_t tmp; + thread_t thread; + ledger_amount_t tmp; task_lock_assert_owned(task); ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups, - (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp); + (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp); ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups, - (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp); + (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp); info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1; info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2; @@ -4592,11 +4660,12 @@ task_power_info_locked( } queue_iterate(&task->threads, thread, thread_t, task_threads) { - uint64_t tval; - spl_t x; + uint64_t tval; + spl_t x; - if (thread->options & TH_OPT_IDLE_THREAD) + if (thread->options & TH_OPT_IDLE_THREAD) { continue; + } x = splsched(); thread_lock(thread); @@ -4635,7 +4704,7 @@ task_power_info_locked( } } -/* +/* * task_gpu_utilisation * * Returns the total gpu time used by the all the threads of the task @@ -4643,7 +4712,7 @@ task_power_info_locked( */ uint64_t task_gpu_utilisation( - task_t task) + task_t task) { uint64_t gpu_time = 0; #if !CONFIG_EMBEDDED @@ -4669,7 +4738,7 @@ task_gpu_utilisation( return gpu_time; } -/* +/* * task_energy * * Returns the total energy used by the all the threads of the task @@ -4677,7 +4746,7 @@ task_gpu_utilisation( */ uint64_t task_energy( - task_t task) + task_t task) { uint64_t energy = 0; thread_t thread; @@ -4703,7 +4772,7 @@ uint64_t task_cpu_ptime( __unused task_t task) { - return 0; + return 0; } @@ -4712,7 +4781,7 @@ task_cpu_ptime( */ void task_update_cpu_time_qos_stats( - task_t task, + task_t task, uint64_t *eqos_stats, uint64_t *rqos_stats) { @@ -4755,11 +4824,12 @@ task_update_cpu_time_qos_stats( kern_return_t task_purgable_info( - task_t task, - task_purgable_info_t *stats) + task_t task, + task_purgable_info_t *stats) { - if (task == TASK_NULL || stats == NULL) + if (task == TASK_NULL || stats == NULL) { return KERN_INVALID_ARGUMENT; + } /* Take task reference */ task_reference(task); vm_purgeable_stats((vm_purgeable_info_t)stats, task); @@ -4770,26 +4840,26 @@ task_purgable_info( void task_vtimer_set( - task_t task, - integer_t which) + task_t task, + integer_t which) { - thread_t thread; - spl_t x; + thread_t thread; + spl_t x; task_lock(task); task->vtimers |= which; switch (which) { - case TASK_VTIMER_USER: queue_iterate(&task->threads, thread, thread_t, task_threads) { x = splsched(); thread_lock(thread); - if (thread->precise_user_kernel_time) + if (thread->precise_user_kernel_time) { thread->vtimer_user_save = timer_grab(&thread->user_timer); - else + } else { thread->vtimer_user_save = timer_grab(&thread->system_timer); + } thread_unlock(thread); splx(x); } @@ -4823,8 +4893,8 @@ task_vtimer_set( void task_vtimer_clear( - task_t task, - integer_t which) + task_t task, + integer_t which) { assert(task == current_task()); @@ -4837,15 +4907,15 @@ task_vtimer_clear( void task_vtimer_update( -__unused - task_t task, - integer_t which, - uint32_t *microsecs) + __unused + task_t task, + integer_t which, + uint32_t *microsecs) { - thread_t thread = current_thread(); - uint32_t tdelt = 0; - clock_sec_t secs = 0; - uint64_t tsum; + thread_t thread = current_thread(); + uint32_t tdelt = 0; + clock_sec_t secs = 0; + uint64_t tsum; assert(task == current_task()); @@ -4859,14 +4929,13 @@ __unused } switch (which) { - case TASK_VTIMER_USER: if (thread->precise_user_kernel_time) { tdelt = (uint32_t)timer_delta(&thread->user_timer, - &thread->vtimer_user_save); + &thread->vtimer_user_save); } else { tdelt = (uint32_t)timer_delta(&thread->system_timer, - &thread->vtimer_user_save); + &thread->vtimer_user_save); } absolutetime_to_microtime(tdelt, &secs, microsecs); break; @@ -4877,8 +4946,9 @@ __unused tdelt = (uint32_t)(tsum - thread->vtimer_prof_save); absolutetime_to_microtime(tdelt, &secs, microsecs); /* if the time delta is smaller than a usec, ignore */ - if (*microsecs != 0) + if (*microsecs != 0) { thread->vtimer_prof_save = tsum; + } break; case TASK_VTIMER_RLIM: @@ -4901,11 +4971,11 @@ __unused */ kern_return_t task_assign( - __unused task_t task, - __unused processor_set_t new_pset, - __unused boolean_t assign_threads) + __unused task_t task, + __unused processor_set_t new_pset, + __unused boolean_t assign_threads) { - return(KERN_FAILURE); + return KERN_FAILURE; } /* @@ -4915,10 +4985,10 @@ task_assign( */ kern_return_t task_assign_default( - task_t task, - boolean_t assign_threads) + task_t task, + boolean_t assign_threads) { - return (task_assign(task, &pset0, assign_threads)); + return task_assign(task, &pset0, assign_threads); } /* @@ -4928,11 +4998,12 @@ task_assign_default( */ kern_return_t task_get_assignment( - task_t task, - processor_set_t *pset) + task_t task, + processor_set_t *pset) { - if (!task || !task->active) + if (!task || !task->active) { return KERN_FAILURE; + } *pset = &pset0; @@ -4941,57 +5012,57 @@ task_get_assignment( uint64_t get_task_dispatchqueue_offset( - task_t task) + task_t task) { return task->dispatchqueue_offset; } /* - * task_policy + * task_policy * * Set scheduling policy and parameters, both base and limit, for * the given task. Policy must be a policy which is enabled for the - * processor set. Change contained threads if requested. + * processor set. Change contained threads if requested. */ kern_return_t task_policy( - __unused task_t task, - __unused policy_t policy_id, - __unused policy_base_t base, - __unused mach_msg_type_number_t count, - __unused boolean_t set_limit, - __unused boolean_t change) + __unused task_t task, + __unused policy_t policy_id, + __unused policy_base_t base, + __unused mach_msg_type_number_t count, + __unused boolean_t set_limit, + __unused boolean_t change) { - return(KERN_FAILURE); + return KERN_FAILURE; } /* * task_set_policy * - * Set scheduling policy and parameters, both base and limit, for + * Set scheduling policy and parameters, both base and limit, for * the given task. Policy can be any policy implemented by the * processor set, whether enabled or not. Change contained threads * if requested. */ kern_return_t task_set_policy( - __unused task_t task, - __unused processor_set_t pset, - __unused policy_t policy_id, - __unused policy_base_t base, - __unused mach_msg_type_number_t base_count, - __unused policy_limit_t limit, - __unused mach_msg_type_number_t limit_count, - __unused boolean_t change) + __unused task_t task, + __unused processor_set_t pset, + __unused policy_t policy_id, + __unused policy_base_t base, + __unused mach_msg_type_number_t base_count, + __unused policy_limit_t limit, + __unused mach_msg_type_number_t limit_count, + __unused boolean_t change) { - return(KERN_FAILURE); + return KERN_FAILURE; } kern_return_t task_set_ras_pc( - __unused task_t task, - __unused vm_offset_t pc, - __unused vm_offset_t endpc) + __unused task_t task, + __unused vm_offset_t pc, + __unused vm_offset_t endpc) { return KERN_FAILURE; } @@ -5006,29 +5077,29 @@ task_synchronizer_destroy_all(task_t task) } /* - * Install default (machine-dependent) initial thread state + * Install default (machine-dependent) initial thread state * on the task. Subsequent thread creation will have this initial * state set on the thread by machine_thread_inherit_taskwide(). * Flavors and structures are exactly the same as those to thread_set_state() */ -kern_return_t +kern_return_t task_set_state( - task_t task, - int flavor, - thread_state_t state, + task_t task, + int flavor, + thread_state_t state, mach_msg_type_number_t state_count) { kern_return_t ret; if (task == TASK_NULL) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } ret = machine_task_set_state(task, flavor, state, state_count); @@ -5038,28 +5109,28 @@ task_set_state( } /* - * Examine the default (machine-dependent) initial thread state + * Examine the default (machine-dependent) initial thread state * on the task, as set by task_set_state(). Flavors and structures * are exactly the same as those passed to thread_get_state(). */ -kern_return_t +kern_return_t task_get_state( - task_t task, - int flavor, + task_t task, + int flavor, thread_state_t state, mach_msg_type_number_t *state_count) { kern_return_t ret; if (task == TASK_NULL) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_FAILURE); + return KERN_FAILURE; } ret = machine_task_get_state(task, flavor, state, state_count); @@ -5069,15 +5140,16 @@ task_get_state( } -static kern_return_t __attribute__((noinline,not_tail_called)) +static kern_return_t __attribute__((noinline, not_tail_called)) PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND( mach_exception_code_t code, mach_exception_subcode_t subcode, void *reason) { #ifdef MACH_BSD - if (1 == proc_selfpid()) - return KERN_NOT_SUPPORTED; // initproc is immune + if (1 == proc_selfpid()) { + return KERN_NOT_SUPPORTED; // initproc is immune + } #endif mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = { [0] = code, @@ -5109,19 +5181,19 @@ task_violated_guard( boolean_t task_get_memlimit_is_active(task_t task) { - assert (task != NULL); + assert(task != NULL); if (task->memlimit_is_active == 1) { - return(TRUE); + return TRUE; } else { - return (FALSE); + return FALSE; } } void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active) { - assert (task != NULL); + assert(task != NULL); if (memlimit_is_active) { task->memlimit_is_active = 1; @@ -5132,20 +5204,20 @@ task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active) boolean_t task_get_memlimit_is_fatal(task_t task) -{ +{ assert(task != NULL); if (task->memlimit_is_fatal == 1) { - return(TRUE); - } else { - return(FALSE); - } + return TRUE; + } else { + return FALSE; + } } void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal) { - assert (task != NULL); + assert(task != NULL); if (memlimit_is_fatal) { task->memlimit_is_fatal = 1; @@ -5161,7 +5233,7 @@ task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active) assert(task == current_task()); - /* + /* * Returns true, if task has already triggered an exc_resource exception. */ @@ -5171,7 +5243,7 @@ task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active) triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE); } - return(triggered); + return triggered; } void @@ -5196,10 +5268,10 @@ task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active) void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal) { - task_t task = current_task(); - int pid = 0; - const char *procname = "unknown"; - mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + task_t task = current_task(); + int pid = 0; + const char *procname = "unknown"; + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; boolean_t send_sync_exc_resource = FALSE; #ifdef MACH_BSD @@ -5220,10 +5292,10 @@ PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, #endif #if CONFIG_COREDUMP if (hwm_user_cores) { - int error; - uint64_t starttime, end; - clock_sec_t secs = 0; - uint32_t microsecs = 0; + int error; + uint64_t starttime, end; + clock_sec_t secs = 0; + uint32_t microsecs = 0; starttime = mach_absolute_time(); /* @@ -5235,20 +5307,20 @@ PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error); } /* - * coredump() leaves the task suspended. - */ + * coredump() leaves the task suspended. + */ task_resume_internal(current_task()); end = mach_absolute_time(); absolutetime_to_microtime(end - starttime, &secs, µsecs); printf("coredump of %s[%d] taken in %d secs %d microsecs\n", - proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs); + proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs); } #endif /* CONFIG_COREDUMP */ if (disable_exc_resource) { printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE " - "supressed by a boot-arg.\n", procname, pid, max_footprint_mb); + "supressed by a boot-arg.\n", procname, pid, max_footprint_mb); return; } @@ -5283,10 +5355,10 @@ PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, } else { if (audio_active) { printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE " - "supressed due to audio playback.\n", procname, pid, max_footprint_mb); + "supressed due to audio playback.\n", procname, pid, max_footprint_mb); } else { task_enqueue_exception_with_corpse(task, EXC_RESOURCE, - code, EXCEPTION_CODE_MAX, NULL); + code, EXCEPTION_CODE_MAX, NULL); } } @@ -5306,7 +5378,7 @@ task_footprint_exceeded(int warning, __unused const void *param0, __unused const { ledger_amount_t max_footprint, max_footprint_mb; task_t task; - boolean_t is_warning; + boolean_t is_warning; boolean_t memlimit_is_active; boolean_t memlimit_is_fatal; @@ -5315,19 +5387,19 @@ task_footprint_exceeded(int warning, __unused const void *param0, __unused const * Task memory limits only provide a warning on the way up. */ return; - } else if (warning == LEDGER_WARNING_ROSE_ABOVE) { - /* - * This task is in danger of violating a memory limit, - * It has exceeded a percentage level of the limit. - */ - is_warning = TRUE; - } else { - /* - * The task has exceeded the physical footprint limit. - * This is not a warning but a true limit violation. - */ - is_warning = FALSE; - } + } else if (warning == LEDGER_WARNING_ROSE_ABOVE) { + /* + * This task is in danger of violating a memory limit, + * It has exceeded a percentage level of the limit. + */ + is_warning = TRUE; + } else { + /* + * The task has exceeded the physical footprint limit. + * This is not a warning but a true limit violation. + */ + is_warning = FALSE; + } task = current_task(); @@ -5366,7 +5438,7 @@ task_set_phys_footprint_limit( boolean_t memlimit_is_fatal; if ((error = proc_check_footprint_priv())) { - return (KERN_NO_ACCESS); + return KERN_NO_ACCESS; } /* @@ -5397,7 +5469,7 @@ task_convert_phys_footprint_limit( /* nothing to convert */ *converted_limit_mb = limit_mb; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -5409,17 +5481,17 @@ task_set_phys_footprint_limit_internal( boolean_t memlimit_is_active, boolean_t memlimit_is_fatal) { - ledger_amount_t old; + ledger_amount_t old; ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old); - /* + /* * Check that limit >> 20 will not give an "unexpected" 32-bit * result. There are, however, implicit assumptions that -1 mb limit * equates to LEDGER_LIMIT_INFINITY. */ assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY)); - + if (old_limit_mb) { *old_limit_mb = (int)(old >> 20); } @@ -5429,19 +5501,19 @@ task_set_phys_footprint_limit_internal( * Caller wishes to remove the limit. */ ledger_set_limit(task->ledger, task_ledgers.phys_footprint, - max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY, - max_task_footprint ? max_task_footprint_warning_level : 0); + max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY, + max_task_footprint ? max_task_footprint_warning_level : 0); task_lock(task); task_set_memlimit_is_active(task, memlimit_is_active); task_set_memlimit_is_fatal(task, memlimit_is_fatal); task_unlock(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } #ifdef CONFIG_NOMONITORS - return (KERN_SUCCESS); + return KERN_SUCCESS; #endif /* CONFIG_NOMONITORS */ task_lock(task); @@ -5453,42 +5525,42 @@ task_set_phys_footprint_limit_internal( * memlimit state is not changing */ task_unlock(task); - return(KERN_SUCCESS); + return KERN_SUCCESS; } task_set_memlimit_is_active(task, memlimit_is_active); task_set_memlimit_is_fatal(task, memlimit_is_fatal); ledger_set_limit(task->ledger, task_ledgers.phys_footprint, - (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL); + (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL); - if (task == current_task()) { - ledger_check_new_balance(current_thread(), task->ledger, - task_ledgers.phys_footprint); - } + if (task == current_task()) { + ledger_check_new_balance(current_thread(), task->ledger, + task_ledgers.phys_footprint); + } task_unlock(task); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t -task_get_phys_footprint_limit( +task_get_phys_footprint_limit( task_t task, int *limit_mb) { - ledger_amount_t limit; - + ledger_amount_t limit; + ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit); - /* + /* * Check that limit >> 20 will not give an "unexpected" signed, 32-bit * result. There are, however, implicit assumptions that -1 mb limit * equates to LEDGER_LIMIT_INFINITY. */ assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY)); *limit_mb = (int)(limit >> 20); - - return (KERN_SUCCESS); + + return KERN_SUCCESS; } #else /* CONFIG_MEMORYSTATUS */ kern_return_t @@ -5497,15 +5569,15 @@ task_set_phys_footprint_limit( __unused int new_limit_mb, __unused int *old_limit_mb) { - return (KERN_FAILURE); + return KERN_FAILURE; } kern_return_t -task_get_phys_footprint_limit( +task_get_phys_footprint_limit( __unused task_t task, __unused int *limit_mb) { - return (KERN_FAILURE); + return KERN_FAILURE; } #endif /* CONFIG_MEMORYSTATUS */ @@ -5525,43 +5597,50 @@ task_set_thread_limit(task_t task, uint16_t thread_limit) * are currently implemented in macros within the osfmk * component. Just export them as functions of the same name. */ -boolean_t is_kerneltask(task_t t) +boolean_t +is_kerneltask(task_t t) { - if (t == kernel_task) - return (TRUE); + if (t == kernel_task) { + return TRUE; + } - return (FALSE); + return FALSE; } -boolean_t is_corpsetask(task_t t) +boolean_t +is_corpsetask(task_t t) { - return (task_is_a_corpse(t)); + return task_is_a_corpse(t); } #undef current_task task_t current_task(void); -task_t current_task(void) +task_t +current_task(void) { - return (current_task_fast()); + return current_task_fast(); } #undef task_reference void task_reference(task_t task); void task_reference( - task_t task) + task_t task) { - if (task != TASK_NULL) + if (task != TASK_NULL) { task_reference_internal(task); + } } /* defined in bsd/kern/kern_prot.c */ extern int get_audit_token_pid(audit_token_t *audit_token); -int task_pid(task_t task) +int +task_pid(task_t task) { - if (task) + if (task) { return get_audit_token_pid(&task->audit_token); + } return -1; } @@ -5603,10 +5682,11 @@ task_findtid(task_t task, uint64_t tid) task_unlock(task); - return (found_thread); + return found_thread; } -int pid_from_task(task_t task) +int +pid_from_task(task_t task) { int pid = -1; @@ -5646,8 +5726,8 @@ task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz) task_lock(task); if (*flags & WAKEMON_GET_PARAMS) { - ledger_amount_t limit; - uint64_t period; + ledger_amount_t limit; + uint64_t period; ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit); ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period); @@ -5669,7 +5749,7 @@ task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz) /* * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored. */ - task_unlock(task); + task_unlock(task); return KERN_SUCCESS; } @@ -5691,7 +5771,7 @@ task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz) #ifndef CONFIG_NOMONITORS ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval, - task_wakeups_monitor_ustackshots_trigger_pct); + task_wakeups_monitor_ustackshots_trigger_pct); ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC); ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups); #endif /* CONFIG_NOMONITORS */ @@ -5717,7 +5797,7 @@ void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1) { if (warning == LEDGER_WARNING_ROSE_ABOVE) { -#if CONFIG_TELEMETRY +#if CONFIG_TELEMETRY /* * This task is in danger of violating the wakeups monitor. Enable telemetry on this task * so there are micro-stackshots available if and when EXC_RESOURCE is triggered. @@ -5755,8 +5835,9 @@ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void) #ifdef MACH_BSD pid = proc_selfpid(); - if (task->bsd_info != NULL) + if (task->bsd_info != NULL) { procname = proc_name_address(current_task()->bsd_info); + } #endif ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei); @@ -5772,17 +5853,17 @@ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void) fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON; trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei); os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times " - "over ~%llu seconds, averaging %llu wakes / second and " - "violating a %slimit of %llu wakes over %llu seconds.\n", - procname, pid, - lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC, - lei.lei_last_refill == 0 ? 0 : - (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill), - fatal ? "FATAL " : "", - lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC); + "over ~%llu seconds, averaging %llu wakes / second and " + "violating a %slimit of %llu wakes over %llu seconds.\n", + procname, pid, + lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC, + lei.lei_last_refill == 0 ? 0 : + (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill), + fatal ? "FATAL " : "", + lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC); kr = send_resource_violation(send_cpu_wakes_violation, task, &lei, - fatal ? kRNFatalLimitFlag : 0); + fatal ? kRNFatalLimitFlag : 0); if (kr) { printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr); } @@ -5790,28 +5871,28 @@ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void) #ifdef EXC_RESOURCE_MONITORS if (disable_exc_resource) { printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE " - "supressed by a boot-arg\n", procname, pid); + "supressed by a boot-arg\n", procname, pid); return; } if (audio_active) { os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE " - "supressed due to audio playback\n", procname, pid); + "supressed due to audio playback\n", procname, pid); return; } if (lei.lei_last_refill == 0) { os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE " - "supressed due to lei.lei_last_refill = 0 \n", procname, pid); + "supressed due to lei.lei_last_refill = 0 \n", procname, pid); } code[0] = code[1] = 0; EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS); EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR); EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0], - NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period); + NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period); EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0], - lei.lei_last_refill); + lei.lei_last_refill); EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1], - NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill); + NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill); exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); #endif /* EXC_RESOURCE_MONITORS */ @@ -5820,12 +5901,12 @@ SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void) } } -static boolean_t +static boolean_t global_update_logical_writes(int64_t io_delta) { int64_t old_count, new_count; boolean_t needs_telemetry; - + do { new_count = old_count = global_logical_writes_count; new_count += io_delta; @@ -5835,45 +5916,47 @@ global_update_logical_writes(int64_t io_delta) } else { needs_telemetry = FALSE; } - } while(!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count)); + } while (!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count)); return needs_telemetry; } -void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp) +void +task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp) { int64_t io_delta = 0; boolean_t needs_telemetry = FALSE; - if ((!task) || (!io_size) || (!vp)) + if ((!task) || (!io_size) || (!vp)) { return; - - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE, - task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0); + } + + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE, + task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0); DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp); - switch(flags) { - case TASK_WRITE_IMMEDIATE: - OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes)); - ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); - break; - case TASK_WRITE_DEFERRED: - OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes)); - ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); - break; - case TASK_WRITE_INVALIDATED: - OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes)); - ledger_debit(task->ledger, task_ledgers.logical_writes, io_size); - break; - case TASK_WRITE_METADATA: - OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes)); - ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); - break; + switch (flags) { + case TASK_WRITE_IMMEDIATE: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes)); + ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); + break; + case TASK_WRITE_DEFERRED: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes)); + ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); + break; + case TASK_WRITE_INVALIDATED: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes)); + ledger_debit(task->ledger, task_ledgers.logical_writes, io_size); + break; + case TASK_WRITE_METADATA: + OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes)); + ledger_credit(task->ledger, task_ledgers.logical_writes, io_size); + break; } io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size); - if (io_telemetry_limit != 0) { + if (io_telemetry_limit != 0) { /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */ needs_telemetry = global_update_logical_writes(io_delta); - if (needs_telemetry) { + if (needs_telemetry) { act_set_io_telemetry_ast(current_thread()); } } @@ -5889,14 +5972,13 @@ task_io_monitor_ctl(task_t task, uint32_t *flags) task_lock(task); if (*flags & IOMON_ENABLE) { - /* Configure the physical I/O ledger */ + /* Configure the physical I/O ledger */ ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0); ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC)); /* Configure the logical I/O ledger */ ledger_set_limit(ledger, task_ledgers.logical_writes, (task_iomon_limit_mb * 1024 * 1024), 0); ledger_set_period(ledger, task_ledgers.logical_writes, (task_iomon_interval_secs * NSEC_PER_SEC)); - } else if (*flags & IOMON_DISABLE) { /* * Caller wishes to disable I/O monitor on the task. @@ -5919,7 +6001,8 @@ task_io_rate_exceeded(int warning, const void *param0, __unused const void *para } } -void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor) +void __attribute__((noinline)) +SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor) { int pid = 0; task_t task = current_task(); @@ -5932,20 +6015,20 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO #ifdef MACH_BSD pid = proc_selfpid(); #endif - /* - * Get the ledger entry info. We need to do this before disabling the exception + /* + * Get the ledger entry info. We need to do this before disabling the exception * to get correct values for all fields. */ - switch(flavor) { - case FLAVOR_IO_PHYSICAL_WRITES: - ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei); - break; - case FLAVOR_IO_LOGICAL_WRITES: - ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei); - break; + switch (flavor) { + case FLAVOR_IO_PHYSICAL_WRITES: + ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei); + break; + case FLAVOR_IO_LOGICAL_WRITES: + ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei); + break; } - + /* * Disable the exception notification so we don't overwhelm * the listener with an endless stream of redundant exceptions. @@ -5958,7 +6041,7 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei); } os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n", - pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC)); + pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC)); kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone); if (kr) { @@ -5974,29 +6057,31 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024))); exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX); #endif /* EXC_RESOURCE_MONITORS */ -} +} /* Placeholders for the task set/get voucher interfaces */ -kern_return_t +kern_return_t task_get_mach_voucher( - task_t task, + task_t task, mach_voucher_selector_t __unused which, - ipc_voucher_t *voucher) + ipc_voucher_t *voucher) { - if (TASK_NULL == task) + if (TASK_NULL == task) { return KERN_INVALID_TASK; + } *voucher = NULL; return KERN_SUCCESS; } -kern_return_t +kern_return_t task_set_mach_voucher( - task_t task, - ipc_voucher_t __unused voucher) + task_t task, + ipc_voucher_t __unused voucher) { - if (TASK_NULL == task) + if (TASK_NULL == task) { return KERN_INVALID_TASK; + } return KERN_SUCCESS; } @@ -6017,7 +6102,8 @@ task_swap_mach_voucher( return KERN_NOT_SUPPORTED; } -void task_set_gpu_denied(task_t task, boolean_t denied) +void +task_set_gpu_denied(task_t task, boolean_t denied) { task_lock(task); @@ -6030,23 +6116,25 @@ void task_set_gpu_denied(task_t task, boolean_t denied) task_unlock(task); } -boolean_t task_is_gpu_denied(task_t task) +boolean_t +task_is_gpu_denied(task_t task) { /* We don't need the lock to read this flag */ return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE; } -uint64_t get_task_memory_region_count(task_t task) +uint64_t +get_task_memory_region_count(task_t task) { vm_map_t map; map = (task == kernel_task) ? kernel_map: task->map; - return((uint64_t)get_map_nentries(map)); + return (uint64_t)get_map_nentries(map); } static void kdebug_trace_dyld_internal(uint32_t base_code, - struct dyld_kernel_image_info *info) + struct dyld_kernel_image_info *info) { static_assert(sizeof(info->uuid) >= 16); @@ -6054,34 +6142,34 @@ kdebug_trace_dyld_internal(uint32_t base_code, uint64_t *uuid = (uint64_t *)&(info->uuid); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0], - uuid[1], info->load_addr, - (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32), - 0); + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0], + uuid[1], info->load_addr, + (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32), + 0); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1), - (uint64_t)info->fsobjid.fid_objno | - ((uint64_t)info->fsobjid.fid_generation << 32), - 0, 0, 0, 0); + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1), + (uint64_t)info->fsobjid.fid_objno | + ((uint64_t)info->fsobjid.fid_generation << 32), + 0, 0, 0, 0); #else /* defined(__LP64__) */ uint32_t *uuid = (uint32_t *)&(info->uuid); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0], - uuid[1], uuid[2], uuid[3], 0); + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0], + uuid[1], uuid[2], uuid[3], 0); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3), - (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1], - info->fsobjid.fid_objno, 0); + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3), + (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1], + info->fsobjid.fid_objno, 0); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4), - info->fsobjid.fid_generation, 0, 0, 0, 0); + KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4), + info->fsobjid.fid_generation, 0, 0, 0, 0); #endif /* !defined(__LP64__) */ } static kern_return_t kdebug_trace_dyld(task_t task, uint32_t base_code, - vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len) + vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len) { kern_return_t kr; dyld_kernel_image_info_array_t infos; @@ -6093,8 +6181,7 @@ kdebug_trace_dyld(task_t task, uint32_t base_code, } if (!kdebug_enable || - !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) - { + !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) { vm_map_copy_discard(infos_copy); return KERN_SUCCESS; } @@ -6121,35 +6208,35 @@ kdebug_trace_dyld(task_t task, uint32_t base_code, kern_return_t task_register_dyld_image_infos(task_t task, - dyld_kernel_image_info_array_t infos_copy, - mach_msg_type_number_t infos_len) + dyld_kernel_image_info_array_t infos_copy, + mach_msg_type_number_t infos_len) { return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A, - (vm_map_copy_t)infos_copy, infos_len); + (vm_map_copy_t)infos_copy, infos_len); } kern_return_t task_unregister_dyld_image_infos(task_t task, - dyld_kernel_image_info_array_t infos_copy, - mach_msg_type_number_t infos_len) + dyld_kernel_image_info_array_t infos_copy, + mach_msg_type_number_t infos_len) { return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A, - (vm_map_copy_t)infos_copy, infos_len); + (vm_map_copy_t)infos_copy, infos_len); } kern_return_t task_get_dyld_image_infos(__unused task_t task, - __unused dyld_kernel_image_info_array_t * dyld_images, - __unused mach_msg_type_number_t * dyld_imagesCnt) + __unused dyld_kernel_image_info_array_t * dyld_images, + __unused mach_msg_type_number_t * dyld_imagesCnt) { return KERN_NOT_SUPPORTED; } kern_return_t task_register_dyld_shared_cache_image_info(task_t task, - dyld_kernel_image_info_t cache_img, - __unused boolean_t no_cache, - __unused boolean_t private_cache) + dyld_kernel_image_info_t cache_img, + __unused boolean_t no_cache, + __unused boolean_t private_cache) { if (task == NULL || task != current_task()) { return KERN_INVALID_TASK; @@ -6161,21 +6248,21 @@ task_register_dyld_shared_cache_image_info(task_t task, kern_return_t task_register_dyld_set_dyld_state(__unused task_t task, - __unused uint8_t dyld_state) + __unused uint8_t dyld_state) { return KERN_NOT_SUPPORTED; } kern_return_t task_register_dyld_get_process_state(__unused task_t task, - __unused dyld_kernel_process_info_t * dyld_process_state) + __unused dyld_kernel_process_info_t * dyld_process_state) { return KERN_NOT_SUPPORTED; } kern_return_t task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor, - task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out) + task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out) { #if MONOTONIC task_t task = (task_t)task_insp; @@ -6229,8 +6316,8 @@ int num_tasks_can_use_secluded_mem = 0; void task_set_can_use_secluded_mem( - task_t task, - boolean_t can_use_secluded_mem) + task_t task, + boolean_t can_use_secluded_mem) { if (!task->task_could_use_secluded_mem) { return; @@ -6242,8 +6329,8 @@ task_set_can_use_secluded_mem( void task_set_can_use_secluded_mem_locked( - task_t task, - boolean_t can_use_secluded_mem) + task_t task, + boolean_t can_use_secluded_mem) { assert(task->task_could_use_secluded_mem); if (can_use_secluded_mem && @@ -6251,37 +6338,37 @@ task_set_can_use_secluded_mem_locked( !task->task_can_use_secluded_mem) { assert(num_tasks_can_use_secluded_mem >= 0); OSAddAtomic(+1, - (volatile SInt32 *)&num_tasks_can_use_secluded_mem); + (volatile SInt32 *)&num_tasks_can_use_secluded_mem); task->task_can_use_secluded_mem = TRUE; } else if (!can_use_secluded_mem && - task->task_can_use_secluded_mem) { + task->task_can_use_secluded_mem) { assert(num_tasks_can_use_secluded_mem > 0); OSAddAtomic(-1, - (volatile SInt32 *)&num_tasks_can_use_secluded_mem); + (volatile SInt32 *)&num_tasks_can_use_secluded_mem); task->task_can_use_secluded_mem = FALSE; } } void task_set_could_use_secluded_mem( - task_t task, - boolean_t could_use_secluded_mem) + task_t task, + boolean_t could_use_secluded_mem) { task->task_could_use_secluded_mem = could_use_secluded_mem; } void task_set_could_also_use_secluded_mem( - task_t task, - boolean_t could_also_use_secluded_mem) + task_t task, + boolean_t could_also_use_secluded_mem) { task->task_could_also_use_secluded_mem = could_also_use_secluded_mem; } boolean_t task_can_use_secluded_mem( - task_t task, - boolean_t is_alloc) + task_t task, + boolean_t is_alloc) { if (task->task_can_use_secluded_mem) { assert(task->task_could_use_secluded_mem); @@ -6312,7 +6399,7 @@ task_can_use_secluded_mem( boolean_t task_could_use_secluded_mem( - task_t task) + task_t task) { return task->task_could_use_secluded_mem; } @@ -6321,7 +6408,7 @@ task_could_use_secluded_mem( queue_head_t * task_io_user_clients(task_t task) { - return (&task->io_user_clients); + return &task->io_user_clients; } void @@ -6350,7 +6437,7 @@ void task_self_region_footprint_set( boolean_t newval) { - task_t curtask; + task_t curtask; curtask = current_task(); task_lock(curtask); @@ -6382,14 +6469,14 @@ boolean_t task_get_darkwake_mode(task_t task) { assert(task); - return ((task->t_flags & TF_DARKWAKE_MODE) != 0); + return (task->t_flags & TF_DARKWAKE_MODE) != 0; } #if __arm64__ void task_set_legacy_footprint( - task_t task, - boolean_t new_val) + task_t task, + boolean_t new_val) { task_lock(task); task->task_legacy_footprint = new_val; diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h index 12a216230..00f16e0e4 100644 --- a/osfmk/kern/task.h +++ b/osfmk/kern/task.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -88,7 +88,7 @@ * Copyright (c) 2005 SPARTA, Inc. */ -#ifndef _KERN_TASK_H_ +#ifndef _KERN_TASK_H_ #define _KERN_TASK_H_ #include @@ -101,7 +101,7 @@ #include #endif /* XNU_KERNEL_PRIVATE */ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -134,113 +134,113 @@ #endif struct _cpu_time_qos_stats { - uint64_t cpu_time_qos_default; - uint64_t cpu_time_qos_maintenance; - uint64_t cpu_time_qos_background; - uint64_t cpu_time_qos_utility; - uint64_t cpu_time_qos_legacy; - uint64_t cpu_time_qos_user_initiated; - uint64_t cpu_time_qos_user_interactive; + uint64_t cpu_time_qos_default; + uint64_t cpu_time_qos_maintenance; + uint64_t cpu_time_qos_background; + uint64_t cpu_time_qos_utility; + uint64_t cpu_time_qos_legacy; + uint64_t cpu_time_qos_user_initiated; + uint64_t cpu_time_qos_user_interactive; }; #include struct task { /* Synchronization/destruction information */ - decl_lck_mtx_data(,lock) /* Task's lock */ - os_refcnt_t ref_count; /* Number of references to me */ - boolean_t active; /* Task has not been terminated */ - boolean_t halting; /* Task is being halted */ + decl_lck_mtx_data(, lock) /* Task's lock */ + os_refcnt_t ref_count; /* Number of references to me */ + boolean_t active; /* Task has not been terminated */ + boolean_t halting; /* Task is being halted */ /* Virtual timers */ - uint32_t vtimers; + uint32_t vtimers; /* Miscellaneous */ - vm_map_t map; /* Address space description */ - queue_chain_t tasks; /* global list of tasks */ + vm_map_t map; /* Address space description */ + queue_chain_t tasks; /* global list of tasks */ #if defined(CONFIG_SCHED_MULTIQ) sched_group_t sched_group; #endif /* defined(CONFIG_SCHED_MULTIQ) */ /* Threads in this task */ - queue_head_t threads; + queue_head_t threads; - processor_set_t pset_hint; - struct affinity_space *affinity_space; + processor_set_t pset_hint; + struct affinity_space *affinity_space; - int thread_count; - uint32_t active_thread_count; - int suspend_count; /* Internal scheduling only */ + int thread_count; + uint32_t active_thread_count; + int suspend_count; /* Internal scheduling only */ /* User-visible scheduling information */ - integer_t user_stop_count; /* outstanding stops */ - integer_t legacy_stop_count; /* outstanding legacy stops */ + integer_t user_stop_count; /* outstanding stops */ + integer_t legacy_stop_count; /* outstanding legacy stops */ - integer_t priority; /* base priority for threads */ - integer_t max_priority; /* maximum priority for threads */ + integer_t priority; /* base priority for threads */ + integer_t max_priority; /* maximum priority for threads */ - integer_t importance; /* priority offset (BSD 'nice' value) */ + integer_t importance; /* priority offset (BSD 'nice' value) */ /* Task security and audit tokens */ security_token_t sec_token; - audit_token_t audit_token; + audit_token_t audit_token; /* Statistics */ - uint64_t total_user_time; /* terminated threads only */ - uint64_t total_system_time; - uint64_t total_ptime; - uint64_t total_runnable_time; + uint64_t total_user_time; /* terminated threads only */ + uint64_t total_system_time; + uint64_t total_ptime; + uint64_t total_runnable_time; /* IPC structures */ - decl_lck_mtx_data(,itk_lock_data) - struct ipc_port *itk_self; /* not a right, doesn't hold ref */ - struct ipc_port *itk_nself; /* not a right, doesn't hold ref */ - struct ipc_port *itk_sself; /* a send right */ + decl_lck_mtx_data(, itk_lock_data) + struct ipc_port *itk_self; /* not a right, doesn't hold ref */ + struct ipc_port *itk_nself; /* not a right, doesn't hold ref */ + struct ipc_port *itk_sself; /* a send right */ struct exception_action exc_actions[EXC_TYPES_COUNT]; - /* a send right each valid element */ - struct ipc_port *itk_host; /* a send right */ - struct ipc_port *itk_bootstrap; /* a send right */ - struct ipc_port *itk_seatbelt; /* a send right */ - struct ipc_port *itk_gssd; /* yet another send right */ + /* a send right each valid element */ + struct ipc_port *itk_host; /* a send right */ + struct ipc_port *itk_bootstrap; /* a send right */ + struct ipc_port *itk_seatbelt; /* a send right */ + struct ipc_port *itk_gssd; /* yet another send right */ struct ipc_port *itk_debug_control; /* send right for debugmode communications */ - struct ipc_port *itk_task_access; /* and another send right */ - struct ipc_port *itk_resume; /* a receive right to resume this task */ + struct ipc_port *itk_task_access; /* and another send right */ + struct ipc_port *itk_resume; /* a receive right to resume this task */ struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX]; - /* all send rights */ + /* all send rights */ struct ipc_space *itk_space; - ledger_t ledger; + ledger_t ledger; /* Synchronizer ownership information */ - queue_head_t semaphore_list; /* list of owned semaphores */ - int semaphores_owned; /* number of semaphores owned */ + queue_head_t semaphore_list; /* list of owned semaphores */ + int semaphores_owned; /* number of semaphores owned */ - unsigned int priv_flags; /* privilege resource flags */ -#define VM_BACKING_STORE_PRIV 0x1 + unsigned int priv_flags; /* privilege resource flags */ +#define VM_BACKING_STORE_PRIV 0x1 MACHINE_TASK - + integer_t faults; /* faults counter */ - integer_t pageins; /* pageins counter */ - integer_t cow_faults; /* copy on write fault counter */ - integer_t messages_sent; /* messages sent counter */ - integer_t messages_received; /* messages received counter */ - integer_t syscalls_mach; /* mach system call counter */ - integer_t syscalls_unix; /* unix system call counter */ - uint32_t c_switch; /* total context switches */ - uint32_t p_switch; /* total processor switches */ - uint32_t ps_switch; /* total pset switches */ - -#ifdef MACH_BSD + integer_t pageins; /* pageins counter */ + integer_t cow_faults; /* copy on write fault counter */ + integer_t messages_sent; /* messages sent counter */ + integer_t messages_received; /* messages received counter */ + integer_t syscalls_mach; /* mach system call counter */ + integer_t syscalls_unix; /* unix system call counter */ + uint32_t c_switch; /* total context switches */ + uint32_t p_switch; /* total processor switches */ + uint32_t ps_switch; /* total pset switches */ + +#ifdef MACH_BSD void *bsd_info; -#endif - kcdata_descriptor_t corpse_info; - uint64_t crashed_thread_id; - queue_chain_t corpse_tasks; +#endif + kcdata_descriptor_t corpse_info; + uint64_t crashed_thread_id; + queue_chain_t corpse_tasks; #ifdef CONFIG_MACF - struct label * crash_label; + struct label * crash_label; #endif - struct vm_shared_region *shared_region; + struct vm_shared_region *shared_region; volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */ #define TF_NONE 0 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */ @@ -257,25 +257,26 @@ struct task { #define TF_PLATFORM 0x00000400 /* task is a platform binary */ #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */ #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */ +#define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */ /* * Task is running within a 64-bit address space. */ -#define task_has_64Bit_addr(task) \ +#define task_has_64Bit_addr(task) \ (((task)->t_flags & TF_64B_ADDR) != 0) -#define task_set_64Bit_addr(task) \ +#define task_set_64Bit_addr(task) \ ((task)->t_flags |= TF_64B_ADDR) -#define task_clear_64Bit_addr(task) \ +#define task_clear_64Bit_addr(task) \ ((task)->t_flags &= ~TF_64B_ADDR) /* * Task is using 64-bit machine state. */ -#define task_has_64Bit_data(task) \ +#define task_has_64Bit_data(task) \ (((task)->t_flags & TF_64B_DATA) != 0) -#define task_set_64Bit_data(task) \ +#define task_set_64Bit_data(task) \ ((task)->t_flags |= TF_64B_DATA) -#define task_clear_64Bit_data(task) \ +#define task_clear_64Bit_data(task) \ ((task)->t_flags &= ~TF_64B_DATA) #define task_is_a_corpse(task) \ @@ -284,7 +285,7 @@ struct task { #define task_set_corpse(task) \ ((task)->t_flags |= TF_CORPSE) -#define task_corpse_pending_report(task) \ +#define task_corpse_pending_report(task) \ (((task)->t_flags & TF_PENDING_CORPSE) != 0) #define task_set_corpse_pending_report(task) \ @@ -293,7 +294,7 @@ struct task { #define task_clear_corpse_pending_report(task) \ ((task)->t_flags &= ~TF_PENDING_CORPSE) -#define task_is_a_corpse_fork(task) \ +#define task_is_a_corpse_fork(task) \ (((task)->t_flags & TF_CORPSE_FORK) != 0) uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */ @@ -304,41 +305,41 @@ struct task { #define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */ #endif -#define task_did_exec_internal(task) \ +#define task_did_exec_internal(task) \ (((task)->t_procflags & TPF_DID_EXEC) != 0) -#define task_is_exec_copy_internal(task) \ +#define task_is_exec_copy_internal(task) \ (((task)->t_procflags & TPF_EXEC_COPY) != 0) - mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */ - mach_vm_size_t all_image_info_size; /* section location and size */ + mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */ + mach_vm_size_t all_image_info_size; /* section location and size */ #if KPC -#define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */ +#define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */ uint32_t t_kpc; /* kpc flags */ #endif /* KPC */ boolean_t pidsuspended; /* pid_suspend called; no threads can execute */ boolean_t frozen; /* frozen; private resident pages committed to swap */ - boolean_t changing_freeze_state; /* in the process of freezing or thawing */ + boolean_t changing_freeze_state; /* in the process of freezing or thawing */ uint16_t policy_ru_cpu :4, - policy_ru_cpu_ext :4, - applied_ru_cpu :4, - applied_ru_cpu_ext :4; + policy_ru_cpu_ext :4, + applied_ru_cpu :4, + applied_ru_cpu_ext :4; uint8_t rusage_cpu_flags; - uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */ + uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */ uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */ #if MACH_ASSERT - int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */ + int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */ #endif - uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */ + uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */ uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */ uint64_t rusage_cpu_deadline; thread_call_t rusage_cpu_callt; #if CONFIG_EMBEDDED - queue_head_t task_watchers; /* app state watcher threads */ - int num_taskwatchers; - int watchapplying; + queue_head_t task_watchers; /* app state watcher threads */ + int num_taskwatchers; + int watchapplying; #endif /* CONFIG_EMBEDDED */ #if CONFIG_ATM @@ -347,10 +348,10 @@ struct task { struct bank_task *bank_context; /* pointer to per task bank structure */ #if IMPORTANCE_INHERITANCE - struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */ + struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */ #endif /* IMPORTANCE_INHERITANCE */ - vm_extmod_statistics_data_t extmod_statistics; + vm_extmod_statistics_data_t extmod_statistics; struct task_requested_policy requested_policy; struct task_effective_policy effective_policy; @@ -358,36 +359,36 @@ struct task { /* * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away. */ - uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */ - low_mem_notified_critical :1, /* critical low memory notification is sent to the task */ - purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */ - purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */ - low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */ - mem_notify_reserved :27; /* reserved for future use */ + uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */ + low_mem_notified_critical :1, /* critical low memory notification is sent to the task */ + purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */ + purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */ + low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */ + mem_notify_reserved :27; /* reserved for future use */ uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */ - memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */ - memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */ - memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */ - memlimit_attrs_reserved :28; /* reserved for future use */ - - io_stat_info_t task_io_stats; - uint64_t task_immediate_writes __attribute__((aligned(8))); - uint64_t task_deferred_writes __attribute__((aligned(8))); - uint64_t task_invalidated_writes __attribute__((aligned(8))); - uint64_t task_metadata_writes __attribute__((aligned(8))); - - /* + memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */ + memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */ + memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */ + memlimit_attrs_reserved :28; /* reserved for future use */ + + io_stat_info_t task_io_stats; + uint64_t task_immediate_writes __attribute__((aligned(8))); + uint64_t task_deferred_writes __attribute__((aligned(8))); + uint64_t task_invalidated_writes __attribute__((aligned(8))); + uint64_t task_metadata_writes __attribute__((aligned(8))); + + /* * The cpu_time_qos_stats fields are protected by the task lock */ - struct _cpu_time_qos_stats cpu_time_eqos_stats; - struct _cpu_time_qos_stats cpu_time_rqos_stats; + struct _cpu_time_qos_stats cpu_time_eqos_stats; + struct _cpu_time_qos_stats cpu_time_rqos_stats; /* Statistics accumulated for terminated threads from this task */ - uint32_t task_timer_wakeups_bin_1; - uint32_t task_timer_wakeups_bin_2; - uint64_t task_gpu_ns; - uint64_t task_energy; + uint32_t task_timer_wakeups_bin_1; + uint32_t task_timer_wakeups_bin_2; + uint64_t task_gpu_ns; + uint64_t task_energy; #if MONOTONIC /* Read and written under task_lock */ @@ -395,21 +396,21 @@ struct task { #endif /* MONOTONIC */ /* # of purgeable volatile VM objects owned by this task: */ - int task_volatile_objects; + int task_volatile_objects; /* # of purgeable but not volatile VM objects owned by this task: */ - int task_nonvolatile_objects; - boolean_t task_purgeable_disowning; - boolean_t task_purgeable_disowned; - queue_head_t task_objq; - decl_lck_mtx_data(,task_objq_lock) /* protects "task_objq" */ + int task_nonvolatile_objects; + boolean_t task_purgeable_disowning; + boolean_t task_purgeable_disowned; + queue_head_t task_objq; + decl_lck_mtx_data(, task_objq_lock) /* protects "task_objq" */ - unsigned int task_thread_limit:16; + unsigned int task_thread_limit:16; #if __arm64__ - unsigned int task_legacy_footprint:1; + unsigned int task_legacy_footprint:1; #endif /* __arm64__ */ - unsigned int task_region_footprint:1; - unsigned int task_has_crossed_thread_limit:1; - uint32_t exec_token; + unsigned int task_region_footprint:1; + unsigned int task_has_crossed_thread_limit:1; + uint32_t exec_token; /* * A task's coalition set is "adopted" in task_create_internal * and unset in task_deallocate_internal, so each array member @@ -417,13 +418,13 @@ struct task { * Note: these fields are protected by coalition->lock, * not the task lock. */ - coalition_t coalition[COALITION_NUM_TYPES]; + coalition_t coalition[COALITION_NUM_TYPES]; queue_chain_t task_coalition[COALITION_NUM_TYPES]; uint64_t dispatchqueue_offset; #if DEVELOPMENT || DEBUG - boolean_t task_unnested; - int task_disconnected_count; + boolean_t task_unnested; + int task_disconnected_count; #endif #if HYPERVISOR @@ -431,10 +432,10 @@ struct task { #endif /* HYPERVISOR */ #if CONFIG_SECLUDED_MEMORY - uint8_t task_can_use_secluded_mem; - uint8_t task_could_use_secluded_mem; - uint8_t task_could_also_use_secluded_mem; - uint8_t task_suppressed_secluded; + uint8_t task_can_use_secluded_mem; + uint8_t task_could_use_secluded_mem; + uint8_t task_could_also_use_secluded_mem; + uint8_t task_suppressed_secluded; #endif /* CONFIG_SECLUDED_MEMORY */ uint32_t task_exc_guard; @@ -456,23 +457,23 @@ struct task { extern uint32_t task_exc_guard_default; extern kern_return_t -task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); + task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); -#define task_lock(task) lck_mtx_lock(&(task)->lock) -#define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED) -#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) -#define task_unlock(task) lck_mtx_unlock(&(task)->lock) +#define task_lock(task) lck_mtx_lock(&(task)->lock) +#define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED) +#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) +#define task_unlock(task) lck_mtx_unlock(&(task)->lock) -#define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr) -#define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock) -#define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED) -#define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock) -#define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock) +#define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr) +#define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock) +#define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED) +#define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock) +#define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock) -#define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr) -#define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp) -#define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data) -#define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data) +#define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr) +#define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp) +#define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data) +#define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data) #define TASK_REFERENCE_LEAK_DEBUG 0 @@ -484,37 +485,37 @@ extern os_ref_count_t task_deallocate_internal(task_t task); #define task_deallocate_internal(task) os_ref_release(&(task)->ref_count) #endif -#define task_reference(task) \ -MACRO_BEGIN \ - if ((task) != TASK_NULL) \ - task_reference_internal(task); \ +#define task_reference(task) \ +MACRO_BEGIN \ + if ((task) != TASK_NULL) \ + task_reference_internal(task); \ MACRO_END -extern kern_return_t kernel_task_create( - task_t task, - vm_offset_t map_base, - vm_size_t map_size, - task_t *child); +extern kern_return_t kernel_task_create( + task_t task, + vm_offset_t map_base, + vm_size_t map_size, + task_t *child); /* Initialize task module */ -extern void task_init(void); +extern void task_init(void); /* coalition_init() calls this to initialize ledgers before task_init() */ -extern void init_task_ledgers(void); +extern void init_task_ledgers(void); -#define current_task_fast() (current_thread()->task) -#define current_task() current_task_fast() +#define current_task_fast() (current_thread()->task) +#define current_task() current_task_fast() extern lck_attr_t task_lck_attr; extern lck_grp_t task_lck_grp; -#else /* MACH_KERNEL_PRIVATE */ +#else /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -extern task_t current_task(void); +extern task_t current_task(void); -extern void task_reference(task_t task); +extern void task_reference(task_t task); #define TF_NONE 0 #define TF_LRETURNWAIT 0x00000100 /* task is waiting for fork/posix_spawn/exec to complete */ @@ -526,24 +527,24 @@ extern void task_reference(task_t task); __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* Hold all threads in a task */ -extern kern_return_t task_hold( - task_t task); +extern kern_return_t task_hold( + task_t task); /* Wait for task to stop running, either just to get off CPU or to cease being runnable */ -extern kern_return_t task_wait( - task_t task, - boolean_t until_not_runnable); +extern kern_return_t task_wait( + task_t task, + boolean_t until_not_runnable); /* Release hold on all threads in a task */ -extern kern_return_t task_release( - task_t task); +extern kern_return_t task_release( + task_t task); /* Suspend/resume a task where the kernel owns the suspend count */ extern kern_return_t task_suspend_internal( task_t task); @@ -551,162 +552,162 @@ extern kern_return_t task_resume_internal( task_t task); /* Suspends a task by placing a hold on its threads */ extern kern_return_t task_pidsuspend( - task_t task); + task_t task); extern kern_return_t task_pidsuspend_locked( - task_t task); + task_t task); /* Resumes a previously paused task */ extern kern_return_t task_pidresume( - task_t task); + task_t task); -extern kern_return_t task_send_trace_memory( - task_t task, - uint32_t pid, - uint64_t uniqueid); +extern kern_return_t task_send_trace_memory( + task_t task, + uint32_t pid, + uint64_t uniqueid); #if DEVELOPMENT || DEBUG -extern kern_return_t task_disconnect_page_mappings( - task_t task); +extern kern_return_t task_disconnect_page_mappings( + task_t task); #endif -extern void tasks_system_suspend(boolean_t suspend); +extern void tasks_system_suspend(boolean_t suspend); #if CONFIG_FREEZE /* Freeze a task's resident pages */ -extern kern_return_t task_freeze( - task_t task, - uint32_t *purgeable_count, - uint32_t *wired_count, - uint32_t *clean_count, - uint32_t *dirty_count, - uint32_t dirty_budget, - uint32_t *shared_count, - int *freezer_error_code, - boolean_t eval_only); +extern kern_return_t task_freeze( + task_t task, + uint32_t *purgeable_count, + uint32_t *wired_count, + uint32_t *clean_count, + uint32_t *dirty_count, + uint32_t dirty_budget, + uint32_t *shared_count, + int *freezer_error_code, + boolean_t eval_only); /* Thaw a currently frozen task */ -extern kern_return_t task_thaw( - task_t task); +extern kern_return_t task_thaw( + task_t task); #endif /* CONFIG_FREEZE */ /* Halt all other threads in the current task */ -extern kern_return_t task_start_halt( - task_t task); +extern kern_return_t task_start_halt( + task_t task); /* Wait for other threads to halt and free halting task resources */ -extern void task_complete_halt( - task_t task); - -extern kern_return_t task_terminate_internal( - task_t task); - -extern kern_return_t task_create_internal( - task_t parent_task, - coalition_t *parent_coalitions, - boolean_t inherit_memory, - boolean_t is_64bit, - boolean_t is_64bit_data, - uint32_t flags, - uint32_t procflags, - task_t *child_task); /* OUT */ - -extern kern_return_t task_info( - task_t task, - task_flavor_t flavor, - task_info_t task_info_out, - mach_msg_type_number_t *task_info_count); - -extern void task_power_info_locked( - task_t task, - task_power_info_t info, - gpu_energy_data_t gpu_energy, - task_power_info_v2_t infov2); - -extern uint64_t task_gpu_utilisation( - task_t task); - -extern uint64_t task_energy( - task_t task); - -extern uint64_t task_cpu_ptime( - task_t task); -extern void task_update_cpu_time_qos_stats( - task_t task, - uint64_t *eqos_stats, - uint64_t *rqos_stats); - -extern void task_vtimer_set( - task_t task, - integer_t which); - -extern void task_vtimer_clear( - task_t task, - integer_t which); - -extern void task_vtimer_update( - task_t task, - integer_t which, - uint32_t *microsecs); - -#define TASK_VTIMER_USER 0x01 -#define TASK_VTIMER_PROF 0x02 -#define TASK_VTIMER_RLIM 0x04 - -extern void task_set_64bit( - task_t task, - boolean_t is_64bit, - boolean_t is_64bit_data); - -extern boolean_t task_get_64bit_data( - task_t task); - -extern void task_set_platform_binary( - task_t task, - boolean_t is_platform); -extern bool task_set_ca_client_wi( - task_t task, - boolean_t ca_client_wi); - -extern void task_set_dyld_info( - task_t task, - mach_vm_address_t addr, - mach_vm_size_t size); +extern void task_complete_halt( + task_t task); + +extern kern_return_t task_terminate_internal( + task_t task); + +extern kern_return_t task_create_internal( + task_t parent_task, + coalition_t *parent_coalitions, + boolean_t inherit_memory, + boolean_t is_64bit, + boolean_t is_64bit_data, + uint32_t flags, + uint32_t procflags, + task_t *child_task); /* OUT */ + +extern kern_return_t task_info( + task_t task, + task_flavor_t flavor, + task_info_t task_info_out, + mach_msg_type_number_t *task_info_count); + +extern void task_power_info_locked( + task_t task, + task_power_info_t info, + gpu_energy_data_t gpu_energy, + task_power_info_v2_t infov2); + +extern uint64_t task_gpu_utilisation( + task_t task); + +extern uint64_t task_energy( + task_t task); + +extern uint64_t task_cpu_ptime( + task_t task); +extern void task_update_cpu_time_qos_stats( + task_t task, + uint64_t *eqos_stats, + uint64_t *rqos_stats); + +extern void task_vtimer_set( + task_t task, + integer_t which); + +extern void task_vtimer_clear( + task_t task, + integer_t which); + +extern void task_vtimer_update( + task_t task, + integer_t which, + uint32_t *microsecs); + +#define TASK_VTIMER_USER 0x01 +#define TASK_VTIMER_PROF 0x02 +#define TASK_VTIMER_RLIM 0x04 + +extern void task_set_64bit( + task_t task, + boolean_t is_64bit, + boolean_t is_64bit_data); + +extern boolean_t task_get_64bit_data( + task_t task); + +extern void task_set_platform_binary( + task_t task, + boolean_t is_platform); +extern bool task_set_ca_client_wi( + task_t task, + boolean_t ca_client_wi); + +extern void task_set_dyld_info( + task_t task, + mach_vm_address_t addr, + mach_vm_size_t size); /* Get number of activations in a task */ -extern int get_task_numacts( - task_t task); +extern int get_task_numacts( + task_t task); extern int get_task_numactivethreads(task_t task); struct label; extern kern_return_t task_collect_crash_info( - task_t task, + task_t task, #if CONFIG_MACF - struct label *crash_label, + struct label *crash_label, #endif - int is_corpse_fork); + int is_corpse_fork); void task_port_notify(mach_msg_header_t *msg); void task_wait_till_threads_terminate_locked(task_t task); /* JMM - should just be temporary (implementation in bsd_kern still) */ -extern void set_bsdtask_info(task_t,void *); +extern void set_bsdtask_info(task_t, void *); extern vm_map_t get_task_map_reference(task_t); -extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t); -extern pmap_t get_task_pmap(task_t); -extern uint64_t get_task_resident_size(task_t); -extern uint64_t get_task_compressed(task_t); -extern uint64_t get_task_resident_max(task_t); -extern uint64_t get_task_phys_footprint(task_t); +extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t); +extern pmap_t get_task_pmap(task_t); +extern uint64_t get_task_resident_size(task_t); +extern uint64_t get_task_compressed(task_t); +extern uint64_t get_task_resident_max(task_t); +extern uint64_t get_task_phys_footprint(task_t); #if CONFIG_LEDGER_INTERVAL_MAX -extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset); +extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset); #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */ -extern uint64_t get_task_phys_footprint_lifetime_max(task_t); -extern uint64_t get_task_phys_footprint_limit(task_t); -extern uint64_t get_task_purgeable_size(task_t); -extern uint64_t get_task_cpu_time(task_t); +extern uint64_t get_task_phys_footprint_lifetime_max(task_t); +extern uint64_t get_task_phys_footprint_limit(task_t); +extern uint64_t get_task_purgeable_size(task_t); +extern uint64_t get_task_cpu_time(task_t); extern uint64_t get_task_dispatchqueue_offset(task_t); extern uint64_t get_task_dispatchqueue_serialno_offset(task_t); extern uint64_t get_task_uniqueid(task_t task); @@ -740,22 +741,22 @@ extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit extern void task_set_thread_limit(task_t task, uint16_t thread_limit); -extern boolean_t is_kerneltask(task_t task); -extern boolean_t is_corpsetask(task_t task); +extern boolean_t is_kerneltask(task_t task); +extern boolean_t is_corpsetask(task_t task); extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast); extern kern_return_t machine_task_get_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t *state_count); + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count); extern kern_return_t machine_task_set_state( - task_t task, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count); + task_t task, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count); extern void machine_task_terminate(task_t task); @@ -791,6 +792,10 @@ struct _task_ledger_indices { int logical_writes; int energy_billed_to_me; int energy_billed_to_others; + int pages_grabbed; + int pages_grabbed_kern; + int pages_grabbed_iopl; + int pages_grabbed_upl; }; extern struct _task_ledger_indices task_ledgers; @@ -831,15 +836,15 @@ extern queue_head_t * task_io_user_clients(task_t task); extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -extern void *get_bsdtask_info(task_t); -extern void *get_bsdthreadtask_info(thread_t); +extern void *get_bsdtask_info(task_t); +extern void *get_bsdthreadtask_info(thread_t); extern void task_bsdtask_kill(task_t); extern vm_map_t get_task_map(task_t); -extern ledger_t get_task_ledger(task_t); +extern ledger_t get_task_ledger(task_t); extern boolean_t get_task_pidsuspended(task_t); extern boolean_t get_task_frozen(task_t); @@ -855,11 +860,11 @@ extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t extern boolean_t task_suspension_notify(mach_msg_header_t *); -#define TASK_WRITE_IMMEDIATE 0x1 -#define TASK_WRITE_DEFERRED 0x2 -#define TASK_WRITE_INVALIDATED 0x4 -#define TASK_WRITE_METADATA 0x8 -extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp); +#define TASK_WRITE_IMMEDIATE 0x1 +#define TASK_WRITE_DEFERRED 0x2 +#define TASK_WRITE_INVALIDATED 0x4 +#define TASK_WRITE_METADATA 0x8 +extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp); #if CONFIG_SECLUDED_MEMORY extern void task_set_can_use_secluded_mem( @@ -888,25 +893,25 @@ extern void task_set_legacy_footprint(task_t task, boolean_t new_val); extern struct label *get_task_crash_label(task_t task); #endif /* CONFIG_MACF */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -extern task_t kernel_task; +extern task_t kernel_task; -extern void task_deallocate( - task_t task); +extern void task_deallocate( + task_t task); -extern void task_name_deallocate( - task_name_t task_name); +extern void task_name_deallocate( + task_name_t task_name); -extern void task_inspect_deallocate( - task_inspect_t task_inspect); +extern void task_inspect_deallocate( + task_inspect_t task_inspect); -extern void task_suspension_token_deallocate( - task_suspension_token_t token); +extern void task_suspension_token_deallocate( + task_suspension_token_t token); extern boolean_t task_self_region_footprint(void); extern void task_self_region_footprint_set(boolean_t newval); __END_DECLS -#endif /* _KERN_TASK_H_ */ +#endif /* _KERN_TASK_H_ */ diff --git a/osfmk/kern/task_policy.c b/osfmk/kern/task_policy.c index f44ba4c84..2faf0f7cd 100644 --- a/osfmk/kern/task_policy.c +++ b/osfmk/kern/task_policy.c @@ -173,15 +173,15 @@ extern char * proc_name_address(void *p); extern char * proc_best_name(proc_t proc); extern int proc_pidpathinfo_internal(proc_t p, uint64_t arg, - char *buffer, uint32_t buffersize, - int32_t *retval); + char *buffer, uint32_t buffersize, + int32_t *retval); #endif /* MACH_BSD */ #if CONFIG_EMBEDDED /* TODO: make CONFIG_TASKWATCH */ /* Taskwatch related helper functions */ -static void set_thread_appbg(thread_t thread, int setbg,int importance); +static void set_thread_appbg(thread_t thread, int setbg, int importance); static void add_taskwatch_locked(task_t task, task_watch_t * twp); static void remove_taskwatch_locked(task_t task, task_watch_t * twp); static void task_watch_lock(void); @@ -265,39 +265,46 @@ const struct task_effective_policy default_task_effective_policy = {}; #define DEFAULT_CPUMON_INTERVAL (3 * 60) uint8_t proc_max_cpumon_percentage; -uint64_t proc_max_cpumon_interval; +uint64_t proc_max_cpumon_interval; kern_return_t -qos_latency_policy_validate(task_latency_qos_t ltier) { +qos_latency_policy_validate(task_latency_qos_t ltier) +{ if ((ltier != LATENCY_QOS_TIER_UNSPECIFIED) && - ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) + ((ltier > LATENCY_QOS_TIER_5) || (ltier < LATENCY_QOS_TIER_0))) { return KERN_INVALID_ARGUMENT; + } return KERN_SUCCESS; } kern_return_t -qos_throughput_policy_validate(task_throughput_qos_t ttier) { +qos_throughput_policy_validate(task_throughput_qos_t ttier) +{ if ((ttier != THROUGHPUT_QOS_TIER_UNSPECIFIED) && - ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) + ((ttier > THROUGHPUT_QOS_TIER_5) || (ttier < THROUGHPUT_QOS_TIER_0))) { return KERN_INVALID_ARGUMENT; + } return KERN_SUCCESS; } static kern_return_t -task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count) { - if (count < TASK_QOS_POLICY_COUNT) +task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count) +{ + if (count < TASK_QOS_POLICY_COUNT) { return KERN_INVALID_ARGUMENT; + } task_latency_qos_t ltier = qosinfo->task_latency_qos_tier; task_throughput_qos_t ttier = qosinfo->task_throughput_qos_tier; kern_return_t kr = qos_latency_policy_validate(ltier); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } kr = qos_throughput_policy_validate(ttier); @@ -305,17 +312,20 @@ task_qos_policy_validate(task_qos_policy_t qosinfo, mach_msg_type_number_t count } uint32_t -qos_extract(uint32_t qv) { - return (qv & 0xFF); +qos_extract(uint32_t qv) +{ + return qv & 0xFF; } uint32_t -qos_latency_policy_package(uint32_t qv) { +qos_latency_policy_package(uint32_t qv) +{ return (qv == LATENCY_QOS_TIER_UNSPECIFIED) ? LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | qv); } uint32_t -qos_throughput_policy_package(uint32_t qv) { +qos_throughput_policy_package(uint32_t qv) +{ return (qv == THROUGHPUT_QOS_TIER_UNSPECIFIED) ? THROUGHPUT_QOS_TIER_UNSPECIFIED : ((0xFE << 16) | qv); } @@ -324,64 +334,68 @@ qos_throughput_policy_package(uint32_t qv) { #define TASK_POLICY_SUPPRESSION_NONDONOR 0x4 /* TEMPORARY boot-arg controlling task_policy suppression (App Nap) */ static boolean_t task_policy_suppression_flags = TASK_POLICY_SUPPRESSION_IOTIER2 | - TASK_POLICY_SUPPRESSION_NONDONOR; + TASK_POLICY_SUPPRESSION_NONDONOR; kern_return_t task_policy_set( - task_t task, - task_policy_flavor_t flavor, - task_policy_t policy_info, - mach_msg_type_number_t count) + task_t task, + task_policy_flavor_t flavor, + task_policy_t policy_info, + mach_msg_type_number_t count) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } switch (flavor) { - case TASK_CATEGORY_POLICY: { task_category_policy_t info = (task_category_policy_t)policy_info; - if (count < TASK_CATEGORY_POLICY_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count < TASK_CATEGORY_POLICY_COUNT) { + return KERN_INVALID_ARGUMENT; + } #if CONFIG_EMBEDDED /* On embedded, you can't modify your own role. */ - if (current_task() == task) - return (KERN_INVALID_ARGUMENT); + if (current_task() == task) { + return KERN_INVALID_ARGUMENT; + } #endif - switch(info->role) { - case TASK_FOREGROUND_APPLICATION: - case TASK_BACKGROUND_APPLICATION: - case TASK_DEFAULT_APPLICATION: + switch (info->role) { + case TASK_FOREGROUND_APPLICATION: + case TASK_BACKGROUND_APPLICATION: + case TASK_DEFAULT_APPLICATION: + proc_set_task_policy(task, + TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, + info->role); + break; + + case TASK_CONTROL_APPLICATION: + if (task != current_task() || task->sec_token.val[0] != 0) { + result = KERN_INVALID_ARGUMENT; + } else { proc_set_task_policy(task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, - info->role); - break; - - case TASK_CONTROL_APPLICATION: - if (task != current_task() || task->sec_token.val[0] != 0) - result = KERN_INVALID_ARGUMENT; - else - proc_set_task_policy(task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, - info->role); - break; - - case TASK_GRAPHICS_SERVER: - /* TODO: Restrict this role to FCFS */ - if (task != current_task() || task->sec_token.val[0] != 0) - result = KERN_INVALID_ARGUMENT; - else - proc_set_task_policy(task, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, - info->role); - break; - default: + TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, + info->role); + } + break; + + case TASK_GRAPHICS_SERVER: + /* TODO: Restrict this role to FCFS */ + if (task != current_task() || task->sec_token.val[0] != 0) { result = KERN_INVALID_ARGUMENT; - break; + } else { + proc_set_task_policy(task, + TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, + info->role); + } + break; + default: + result = KERN_INVALID_ARGUMENT; + break; } /* switch (info->role) */ break; @@ -394,16 +408,17 @@ task_policy_set( task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info; kern_return_t kr = task_qos_policy_validate(qosinfo, count); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier); uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier); proc_set_task_policy2(task, TASK_POLICY_ATTRIBUTE, - flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, - lqos, tqos); + flavor == TASK_BASE_QOS_POLICY ? TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS : TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS, + lqos, tqos); } break; @@ -412,8 +427,9 @@ task_policy_set( task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info; kern_return_t kr = task_qos_policy_validate(qosinfo, count); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } uint32_t lqos = qos_extract(qosinfo->task_latency_qos_tier); @@ -426,8 +442,9 @@ task_policy_set( task_qos_policy_t qosinfo = (task_qos_policy_t)policy_info; kern_return_t kr = task_qos_policy_validate(qosinfo, count); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } uint32_t tqos = qos_extract(qosinfo->task_throughput_qos_tier); @@ -448,8 +465,9 @@ task_policy_set( task_suppression_policy_t info = (task_suppression_policy_t)policy_info; - if (count < TASK_SUPPRESSION_POLICY_COUNT) - return (KERN_INVALID_ARGUMENT); + if (count < TASK_SUPPRESSION_POLICY_COUNT) { + return KERN_INVALID_ARGUMENT; + } struct task_qos_policy qosinfo; @@ -458,22 +476,24 @@ task_policy_set( kern_return_t kr = task_qos_policy_validate(&qosinfo, TASK_QOS_POLICY_COUNT); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* TEMPORARY disablement of task suppression */ if (info->active && - (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) + (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_DISABLE)) { return KERN_SUCCESS; + } struct task_pend_token pend_token = {}; task_lock(task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START, - proc_selfpid(), task_pid(task), trequested_0(task), - trequested_1(task), 0); + (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_START, + proc_selfpid(), task_pid(task), trequested_0(task), + trequested_1(task), 0); task->requested_policy.trp_sup_active = (info->active) ? 1 : 0; task->requested_policy.trp_sup_lowpri_cpu = (info->lowpri_cpu) ? 1 : 0; @@ -486,9 +506,9 @@ task_policy_set( task_policy_update_locked(task, &pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END, - proc_selfpid(), task_pid(task), trequested_0(task), - trequested_1(task), 0); + (IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, info->active)) | DBG_FUNC_END, + proc_selfpid(), task_pid(task), trequested_0(task), + trequested_1(task), 0); task_unlock(task); @@ -504,30 +524,31 @@ task_policy_set( break; } - return (result); + return result; } /* Sets BSD 'nice' value on the task */ kern_return_t task_importance( - task_t task, - integer_t importance) + task_t task, + integer_t importance) { - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); if (!task->active) { task_unlock(task); - return (KERN_TERMINATED); + return KERN_TERMINATED; } if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) >= TASK_CONTROL_APPLICATION) { task_unlock(task); - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } task->importance = importance; @@ -540,33 +561,35 @@ task_importance( task_policy_update_complete_unlocked(task, &pend_token); - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t task_policy_get( - task_t task, - task_policy_flavor_t flavor, - task_policy_t policy_info, - mach_msg_type_number_t *count, - boolean_t *get_default) + task_t task, + task_policy_flavor_t flavor, + task_policy_t policy_info, + mach_msg_type_number_t *count, + boolean_t *get_default) { - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } switch (flavor) { - case TASK_CATEGORY_POLICY: { - task_category_policy_t info = (task_category_policy_t)policy_info; + task_category_policy_t info = (task_category_policy_t)policy_info; - if (*count < TASK_CATEGORY_POLICY_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < TASK_CATEGORY_POLICY_COUNT) { + return KERN_INVALID_ARGUMENT; + } - if (*get_default) + if (*get_default) { info->role = TASK_UNSPECIFIED; - else + } else { info->role = proc_get_task_policy(task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE); + } break; } @@ -575,8 +598,9 @@ task_policy_get( { task_qos_policy_t info = (task_qos_policy_t)policy_info; - if (*count < TASK_QOS_POLICY_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < TASK_QOS_POLICY_COUNT) { + return KERN_INVALID_ARGUMENT; + } if (*get_default) { info->task_latency_qos_tier = LATENCY_QOS_TIER_UNSPECIFIED; @@ -588,7 +612,6 @@ task_policy_get( info->task_latency_qos_tier = qos_latency_policy_package(value1); info->task_throughput_qos_tier = qos_throughput_policy_package(value2); - } else if (flavor == TASK_OVERRIDE_QOS_POLICY) { int value1, value2; @@ -605,12 +628,14 @@ task_policy_get( { task_policy_state_t info = (task_policy_state_t)policy_info; - if (*count < TASK_POLICY_STATE_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < TASK_POLICY_STATE_COUNT) { + return KERN_INVALID_ARGUMENT; + } /* Only root can get this info */ - if (current_task()->sec_token.val[0] != 0) + if (current_task()->sec_token.val[0] != 0) { return KERN_PROTECTION_FAILURE; + } if (*get_default) { info->requested = 0; @@ -654,8 +679,9 @@ task_policy_get( { task_suppression_policy_t info = (task_suppression_policy_t)policy_info; - if (*count < TASK_SUPPRESSION_POLICY_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*count < TASK_SUPPRESSION_POLICY_COUNT) { + return KERN_INVALID_ARGUMENT; + } task_lock(task); @@ -685,10 +711,10 @@ task_policy_get( } default: - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -723,16 +749,16 @@ task_policy_create(task_t task, task_t parent_task) } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START, - task_pid(task), teffective_0(task), - teffective_1(task), task->priority, 0); + (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_START, + task_pid(task), teffective_0(task), + teffective_1(task), task->priority, 0); task_policy_update_internal_locked(task, TRUE, NULL); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END, - task_pid(task), teffective_0(task), - teffective_1(task), task->priority, 0); + (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK))) | DBG_FUNC_END, + task_pid(task), teffective_0(task), + teffective_1(task), task->priority, 0); task_importance_update_live_donor(task); } @@ -742,16 +768,16 @@ static void task_policy_update_locked(task_t task, task_pend_token_t pend_token) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START), - task_pid(task), teffective_0(task), - teffective_1(task), task->priority, 0); + (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK) | DBG_FUNC_START), + task_pid(task), teffective_0(task), + teffective_1(task), task->priority, 0); task_policy_update_internal_locked(task, FALSE, pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END, - task_pid(task), teffective_0(task), - teffective_1(task), task->priority, 0); + (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK)) | DBG_FUNC_END, + task_pid(task), teffective_0(task), + teffective_1(task), task->priority, 0); } /* @@ -793,51 +819,50 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t if (requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || requested.trp_apptype == TASK_APPTYPE_APP_TAL) { - switch (next.tep_role) { - case TASK_FOREGROUND_APPLICATION: - /* Foreground apps get urgent scheduler priority */ - next.tep_qos_ui_is_urgent = 1; - next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; - break; - - case TASK_BACKGROUND_APPLICATION: - /* This is really 'non-focal but on-screen' */ - next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; - break; - - case TASK_DEFAULT_APPLICATION: - /* This is 'may render UI but we don't know if it's focal/nonfocal' */ - next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; - break; - - case TASK_NONUI_APPLICATION: - /* i.e. 'off-screen' */ - next.tep_qos_ceiling = THREAD_QOS_LEGACY; - break; - - case TASK_CONTROL_APPLICATION: - case TASK_GRAPHICS_SERVER: - next.tep_qos_ui_is_urgent = 1; - next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; - break; - - case TASK_THROTTLE_APPLICATION: - /* i.e. 'TAL launch' */ - next.tep_qos_ceiling = THREAD_QOS_UTILITY; - break; - - case TASK_DARWINBG_APPLICATION: - /* i.e. 'DARWIN_BG throttled background application' */ - next.tep_qos_ceiling = THREAD_QOS_BACKGROUND; - break; - - case TASK_UNSPECIFIED: - default: - /* Apps that don't have an application role get - * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */ - next.tep_qos_ceiling = THREAD_QOS_LEGACY; - break; + case TASK_FOREGROUND_APPLICATION: + /* Foreground apps get urgent scheduler priority */ + next.tep_qos_ui_is_urgent = 1; + next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; + break; + + case TASK_BACKGROUND_APPLICATION: + /* This is really 'non-focal but on-screen' */ + next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; + break; + + case TASK_DEFAULT_APPLICATION: + /* This is 'may render UI but we don't know if it's focal/nonfocal' */ + next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; + break; + + case TASK_NONUI_APPLICATION: + /* i.e. 'off-screen' */ + next.tep_qos_ceiling = THREAD_QOS_LEGACY; + break; + + case TASK_CONTROL_APPLICATION: + case TASK_GRAPHICS_SERVER: + next.tep_qos_ui_is_urgent = 1; + next.tep_qos_ceiling = THREAD_QOS_UNSPECIFIED; + break; + + case TASK_THROTTLE_APPLICATION: + /* i.e. 'TAL launch' */ + next.tep_qos_ceiling = THREAD_QOS_UTILITY; + break; + + case TASK_DARWINBG_APPLICATION: + /* i.e. 'DARWIN_BG throttled background application' */ + next.tep_qos_ceiling = THREAD_QOS_BACKGROUND; + break; + + case TASK_UNSPECIFIED: + default: + /* Apps that don't have an application role get + * USER_INTERACTIVE and USER_INITIATED squashed to LEGACY */ + next.tep_qos_ceiling = THREAD_QOS_LEGACY; + break; } } else { /* Daemons get USER_INTERACTIVE squashed to USER_INITIATED */ @@ -856,37 +881,41 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t * Backgrounding due to apptype does. */ if (requested.trp_int_darwinbg || requested.trp_ext_darwinbg || - next.tep_role == TASK_DARWINBG_APPLICATION) + next.tep_role == TASK_DARWINBG_APPLICATION) { wants_watchersbg = wants_all_sockets_bg = wants_darwinbg = TRUE; + } /* * Deprecated TAL implementation for TAL apptype * Background TAL apps are throttled when TAL is enabled */ - if (requested.trp_apptype == TASK_APPTYPE_APP_TAL && - requested.trp_role == TASK_BACKGROUND_APPLICATION && - requested.trp_tal_enabled == 1) { + if (requested.trp_apptype == TASK_APPTYPE_APP_TAL && + requested.trp_role == TASK_BACKGROUND_APPLICATION && + requested.trp_tal_enabled == 1) { next.tep_tal_engaged = 1; } /* New TAL implementation based on TAL role alone, works for all apps */ - if ((requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || - requested.trp_apptype == TASK_APPTYPE_APP_TAL) && - requested.trp_role == TASK_THROTTLE_APPLICATION) { + if ((requested.trp_apptype == TASK_APPTYPE_APP_DEFAULT || + requested.trp_apptype == TASK_APPTYPE_APP_TAL) && + requested.trp_role == TASK_THROTTLE_APPLICATION) { next.tep_tal_engaged = 1; } /* Adaptive daemons are DARWIN_BG unless boosted, and don't get network throttled. */ if (requested.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && - requested.trp_boosted == 0) + requested.trp_boosted == 0) { wants_darwinbg = TRUE; + } /* Background daemons are always DARWIN_BG, no exceptions, and don't get network throttled. */ - if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) + if (requested.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) { wants_darwinbg = TRUE; + } - if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND || next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) + if (next.tep_qos_clamp == THREAD_QOS_BACKGROUND || next.tep_qos_clamp == THREAD_QOS_MAINTENANCE) { wants_darwinbg = TRUE; + } /* Calculate side effects of DARWIN_BG */ @@ -897,27 +926,33 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t next.tep_lowpri_cpu = 1; } - if (wants_all_sockets_bg) + if (wants_all_sockets_bg) { next.tep_all_sockets_bg = 1; + } - if (wants_watchersbg) + if (wants_watchersbg) { next.tep_watchers_bg = 1; + } /* Calculate low CPU priority */ boolean_t wants_lowpri_cpu = FALSE; - if (wants_darwinbg) + if (wants_darwinbg) { wants_lowpri_cpu = TRUE; + } - if (next.tep_tal_engaged) + if (next.tep_tal_engaged) { wants_lowpri_cpu = TRUE; + } - if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) + if (requested.trp_sup_lowpri_cpu && requested.trp_boosted == 0) { wants_lowpri_cpu = TRUE; + } - if (wants_lowpri_cpu) + if (wants_lowpri_cpu) { next.tep_lowpri_cpu = 1; + } /* Calculate IO policy */ @@ -926,20 +961,25 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t int iopol = THROTTLE_LEVEL_TIER0; - if (wants_darwinbg) + if (wants_darwinbg) { iopol = MAX(iopol, requested.trp_bg_iotier); + } - if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) + if (requested.trp_apptype == TASK_APPTYPE_DAEMON_STANDARD) { iopol = MAX(iopol, proc_standard_daemon_tier); + } - if (requested.trp_sup_disk && requested.trp_boosted == 0) + if (requested.trp_sup_disk && requested.trp_boosted == 0) { iopol = MAX(iopol, proc_suppressed_disk_tier); + } - if (next.tep_tal_engaged) + if (next.tep_tal_engaged) { iopol = MAX(iopol, proc_tal_disk_tier); + } - if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) + if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) { iopol = MAX(iopol, thread_qos_policy_params.qos_iotier[next.tep_qos_clamp]); + } iopol = MAX(iopol, requested.trp_int_iotier); iopol = MAX(iopol, requested.trp_ext_iotier); @@ -948,59 +988,70 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t /* Calculate Passive IO policy */ - if (requested.trp_ext_iopassive || requested.trp_int_iopassive) + if (requested.trp_ext_iopassive || requested.trp_int_iopassive) { next.tep_io_passive = 1; + } /* Calculate suppression-active flag */ boolean_t appnap_transition = FALSE; - if (requested.trp_sup_active && requested.trp_boosted == 0) + if (requested.trp_sup_active && requested.trp_boosted == 0) { next.tep_sup_active = 1; + } - if (task->effective_policy.tep_sup_active != next.tep_sup_active) + if (task->effective_policy.tep_sup_active != next.tep_sup_active) { appnap_transition = TRUE; + } /* Calculate timer QOS */ int latency_qos = requested.trp_base_latency_qos; - if (requested.trp_sup_timer && requested.trp_boosted == 0) + if (requested.trp_sup_timer && requested.trp_boosted == 0) { latency_qos = requested.trp_sup_timer; + } - if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) + if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) { latency_qos = MAX(latency_qos, (int)thread_qos_policy_params.qos_latency_qos[next.tep_qos_clamp]); + } - if (requested.trp_over_latency_qos != 0) + if (requested.trp_over_latency_qos != 0) { latency_qos = requested.trp_over_latency_qos; + } /* Treat the windowserver special */ - if (requested.trp_role == TASK_GRAPHICS_SERVER) + if (requested.trp_role == TASK_GRAPHICS_SERVER) { latency_qos = proc_graphics_timer_qos; + } next.tep_latency_qos = latency_qos; /* Calculate throughput QOS */ int through_qos = requested.trp_base_through_qos; - if (requested.trp_sup_throughput && requested.trp_boosted == 0) + if (requested.trp_sup_throughput && requested.trp_boosted == 0) { through_qos = requested.trp_sup_throughput; + } - if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) + if (next.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) { through_qos = MAX(through_qos, (int)thread_qos_policy_params.qos_through_qos[next.tep_qos_clamp]); + } - if (requested.trp_over_through_qos != 0) + if (requested.trp_over_through_qos != 0) { through_qos = requested.trp_over_through_qos; + } next.tep_through_qos = through_qos; /* Calculate suppressed CPU priority */ - if (requested.trp_sup_cpu && requested.trp_boosted == 0) + if (requested.trp_sup_cpu && requested.trp_boosted == 0) { next.tep_suppressed_cpu = 1; + } /* * Calculate background sockets * Don't take into account boosting to limit transition frequency. */ - if (requested.trp_sup_bg_sockets){ + if (requested.trp_sup_bg_sockets) { next.tep_all_sockets_bg = 1; next.tep_new_sockets_bg = 1; } @@ -1010,25 +1061,25 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t /* Calculate 'live donor' status for live importance */ switch (requested.trp_apptype) { - case TASK_APPTYPE_APP_TAL: - case TASK_APPTYPE_APP_DEFAULT: - if (requested.trp_ext_darwinbg == 1 || - (next.tep_sup_active == 1 && - (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) || - next.tep_role == TASK_DARWINBG_APPLICATION) { - next.tep_live_donor = 0; - } else { - next.tep_live_donor = 1; - } - break; - - case TASK_APPTYPE_DAEMON_INTERACTIVE: - case TASK_APPTYPE_DAEMON_STANDARD: - case TASK_APPTYPE_DAEMON_ADAPTIVE: - case TASK_APPTYPE_DAEMON_BACKGROUND: - default: + case TASK_APPTYPE_APP_TAL: + case TASK_APPTYPE_APP_DEFAULT: + if (requested.trp_ext_darwinbg == 1 || + (next.tep_sup_active == 1 && + (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_NONDONOR)) || + next.tep_role == TASK_DARWINBG_APPLICATION) { next.tep_live_donor = 0; - break; + } else { + next.tep_live_donor = 1; + } + break; + + case TASK_APPTYPE_DAEMON_INTERACTIVE: + case TASK_APPTYPE_DAEMON_STANDARD: + case TASK_APPTYPE_DAEMON_ADAPTIVE: + case TASK_APPTYPE_DAEMON_BACKGROUND: + default: + next.tep_live_donor = 0; + break; } if (requested.trp_terminated) { @@ -1063,31 +1114,37 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t task->effective_policy = next; /* Don't do anything further to a half-formed task */ - if (in_create) + if (in_create) { return; + } - if (task == kernel_task) + if (task == kernel_task) { panic("Attempting to set task policy on kernel_task"); + } /* * Step 4: * Pend updates that can't be done while holding the task lock */ - if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) + if (prev.tep_all_sockets_bg != next.tep_all_sockets_bg) { pend_token->tpt_update_sockets = 1; + } /* Only re-scan the timer list if the qos level is getting less strong */ - if (prev.tep_latency_qos > next.tep_latency_qos) + if (prev.tep_latency_qos > next.tep_latency_qos) { pend_token->tpt_update_timers = 1; + } #if CONFIG_EMBEDDED - if (prev.tep_watchers_bg != next.tep_watchers_bg) + if (prev.tep_watchers_bg != next.tep_watchers_bg) { pend_token->tpt_update_watchers = 1; + } #endif /* CONFIG_EMBEDDED */ - if (prev.tep_live_donor != next.tep_live_donor) + if (prev.tep_live_donor != next.tep_live_donor) { pend_token->tpt_update_live_donor = 1; + } /* * Step 5: @@ -1100,33 +1157,36 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t * Check for the attributes that thread_policy_update_internal_locked() consults, * and trigger thread policy re-evaluation. */ - if (prev.tep_io_tier != next.tep_io_tier || - prev.tep_bg_iotier != next.tep_bg_iotier || - prev.tep_io_passive != next.tep_io_passive || - prev.tep_darwinbg != next.tep_darwinbg || - prev.tep_qos_clamp != next.tep_qos_clamp || - prev.tep_qos_ceiling != next.tep_qos_ceiling || - prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent || - prev.tep_latency_qos != next.tep_latency_qos || - prev.tep_through_qos != next.tep_through_qos || - prev.tep_lowpri_cpu != next.tep_lowpri_cpu || - prev.tep_new_sockets_bg != next.tep_new_sockets_bg || - prev.tep_terminated != next.tep_terminated ) + if (prev.tep_io_tier != next.tep_io_tier || + prev.tep_bg_iotier != next.tep_bg_iotier || + prev.tep_io_passive != next.tep_io_passive || + prev.tep_darwinbg != next.tep_darwinbg || + prev.tep_qos_clamp != next.tep_qos_clamp || + prev.tep_qos_ceiling != next.tep_qos_ceiling || + prev.tep_qos_ui_is_urgent != next.tep_qos_ui_is_urgent || + prev.tep_latency_qos != next.tep_latency_qos || + prev.tep_through_qos != next.tep_through_qos || + prev.tep_lowpri_cpu != next.tep_lowpri_cpu || + prev.tep_new_sockets_bg != next.tep_new_sockets_bg || + prev.tep_terminated != next.tep_terminated) { update_threads = TRUE; + } /* * Check for the attributes that sfi_thread_classify() consults, * and trigger SFI re-evaluation. */ if (prev.tep_latency_qos != next.tep_latency_qos || - prev.tep_role != next.tep_role || - prev.tep_sfi_managed != next.tep_sfi_managed ) + prev.tep_role != next.tep_role || + prev.tep_sfi_managed != next.tep_sfi_managed) { update_sfi = TRUE; + } /* Reflect task role transitions into the coalition role counters */ if (prev.tep_role != next.tep_role) { - if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) + if (task_policy_update_coalition_focal_tasks(task, prev.tep_role, next.tep_role, pend_token)) { update_sfi = TRUE; + } } boolean_t update_priority = FALSE; @@ -1142,15 +1202,15 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t max_priority = MAXPRI_SUPPRESSED; } else { switch (next.tep_role) { - case TASK_CONTROL_APPLICATION: - priority = BASEPRI_CONTROL; - break; - case TASK_GRAPHICS_SERVER: - priority = BASEPRI_GRAPHICS; - max_priority = MAXPRI_RESERVED; - break; - default: - break; + case TASK_CONTROL_APPLICATION: + priority = BASEPRI_CONTROL; + break; + case TASK_GRAPHICS_SERVER: + priority = BASEPRI_GRAPHICS; + max_priority = MAXPRI_RESERVED; + break; + default: + break; } /* factor in 'nice' value */ @@ -1163,17 +1223,18 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t max_priority = MIN(max_priority, qos_clamp_priority); } - if (priority > max_priority) + if (priority > max_priority) { priority = max_priority; - else if (priority < MINPRI) + } else if (priority < MINPRI) { priority = MINPRI; + } } assert(priority <= max_priority); /* avoid extra work if priority isn't changing */ - if (priority != task->priority || - max_priority != task->max_priority ) { + if (priority != task->priority || + max_priority != task->max_priority) { /* update the scheduling priority for the task */ task->max_priority = max_priority; task->priority = priority; @@ -1191,13 +1252,15 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t queue_iterate(&task->threads, thread, thread_t, task_threads) { struct task_pend_token thread_pend_token = {}; - if (update_sfi) + if (update_sfi) { thread_pend_token.tpt_update_thread_sfi = 1; + } - if (update_priority || update_threads) + if (update_priority || update_threads) { thread_policy_update_tasklocked(thread, - task->priority, task->max_priority, - &thread_pend_token); + task->priority, task->max_priority, + &thread_pend_token); + } assert(!thread_pend_token.tpt_update_sockets); @@ -1214,7 +1277,6 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t */ if (appnap_transition == TRUE) { if (task->effective_policy.tep_sup_active == 1) { - memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), TRUE); } else { memorystatus_update_priority_for_appnap(((proc_t) task->bsd_info), FALSE); @@ -1228,13 +1290,13 @@ task_policy_update_internal_locked(task_t task, boolean_t in_create, task_pend_t */ static boolean_t task_policy_update_coalition_focal_tasks(task_t task, - int prev_role, - int next_role, - task_pend_token_t pend_token) + int prev_role, + int next_role, + task_pend_token_t pend_token) { boolean_t sfi_transition = FALSE; uint32_t new_count = 0; - + /* task moving into/out-of the foreground */ if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) { if (task_coalition_adjust_focal_count(task, 1, &new_count) && (new_count == 1)) { @@ -1250,15 +1312,18 @@ task_policy_update_coalition_focal_tasks(task_t task, /* task moving into/out-of background */ if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) { - if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) + if (task_coalition_adjust_nonfocal_count(task, 1, &new_count) && (new_count == 1)) { sfi_transition = TRUE; + } } else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) { - if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) + if (task_coalition_adjust_nonfocal_count(task, -1, &new_count) && (new_count == 0)) { sfi_transition = TRUE; + } } - if (sfi_transition) - pend_token->tpt_update_coal_sfi = 1; + if (sfi_transition) { + pend_token->tpt_update_coal_sfi = 1; + } return sfi_transition; } @@ -1274,8 +1339,9 @@ task_sfi_reevaluate_cb(coalition_t coal, void *ctx, task_t task) (void)coal; /* skip the task we're re-evaluating on behalf of: it's already updated */ - if (task == (task_t)ctx) + if (task == (task_t)ctx) { return; + } task_lock(task); @@ -1294,27 +1360,32 @@ void task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token) { #ifdef MACH_BSD - if (pend_token->tpt_update_sockets) + if (pend_token->tpt_update_sockets) { proc_apply_task_networkbg(task->bsd_info, THREAD_NULL); + } #endif /* MACH_BSD */ /* The timer throttle has been removed or reduced, we need to look for expired timers and fire them */ - if (pend_token->tpt_update_timers) + if (pend_token->tpt_update_timers) { ml_timer_evaluate(); + } #if CONFIG_EMBEDDED - if (pend_token->tpt_update_watchers) + if (pend_token->tpt_update_watchers) { apply_appstate_watchers(task); + } #endif /* CONFIG_EMBEDDED */ - if (pend_token->tpt_update_live_donor) + if (pend_token->tpt_update_live_donor) { task_importance_update_live_donor(task); + } #if CONFIG_SCHED_SFI /* use the resource coalition for SFI re-evaluation */ - if (pend_token->tpt_update_coal_sfi) + if (pend_token->tpt_update_coal_sfi) { coalition_for_each_task(task->coalition[COALITION_TYPE_RESOURCE], - (void *)task, task_sfi_reevaluate_cb); + (void *)task, task_sfi_reevaluate_cb); + } #endif /* CONFIG_SCHED_SFI */ } @@ -1331,18 +1402,18 @@ task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token) */ void proc_set_task_policy(task_t task, - int category, - int flavor, - int value) + int category, + int flavor, + int value) { struct task_pend_token pend_token = {}; task_lock(task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START, - task_pid(task), trequested_0(task), - trequested_1(task), value, 0); + (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START, + task_pid(task), trequested_0(task), + trequested_1(task), value, 0); proc_set_task_policy_locked(task, category, flavor, value, 0); @@ -1350,9 +1421,9 @@ proc_set_task_policy(task_t task, KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END, - task_pid(task), trequested_0(task), - trequested_1(task), tpending(&pend_token), 0); + (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END, + task_pid(task), trequested_0(task), + trequested_1(task), tpending(&pend_token), 0); task_unlock(task); @@ -1365,28 +1436,28 @@ proc_set_task_policy(task_t task, */ void proc_set_task_policy2(task_t task, - int category, - int flavor, - int value, - int value2) + int category, + int flavor, + int value, + int value2) { struct task_pend_token pend_token = {}; task_lock(task); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START, - task_pid(task), trequested_0(task), - trequested_1(task), value, 0); + (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_START, + task_pid(task), trequested_0(task), + trequested_1(task), value, 0); proc_set_task_policy_locked(task, category, flavor, value, value2); task_policy_update_locked(task, &pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END, - task_pid(task), trequested_0(task), - trequested_1(task), tpending(&pend_token), 0); + (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_TASK))) | DBG_FUNC_END, + task_pid(task), trequested_0(task), + trequested_1(task), tpending(&pend_token), 0); task_unlock(task); @@ -1401,111 +1472,113 @@ proc_set_task_policy2(task_t task, */ static void proc_set_task_policy_locked(task_t task, - int category, - int flavor, - int value, - int value2) + int category, + int flavor, + int value, + int value2) { int tier, passive; struct task_requested_policy requested = task->requested_policy; switch (flavor) { - /* Category: EXTERNAL and INTERNAL */ - case TASK_POLICY_DARWIN_BG: - if (category == TASK_POLICY_EXTERNAL) - requested.trp_ext_darwinbg = value; - else - requested.trp_int_darwinbg = value; - break; + case TASK_POLICY_DARWIN_BG: + if (category == TASK_POLICY_EXTERNAL) { + requested.trp_ext_darwinbg = value; + } else { + requested.trp_int_darwinbg = value; + } + break; - case TASK_POLICY_IOPOL: - proc_iopol_to_tier(value, &tier, &passive); - if (category == TASK_POLICY_EXTERNAL) { - requested.trp_ext_iotier = tier; - requested.trp_ext_iopassive = passive; - } else { - requested.trp_int_iotier = tier; - requested.trp_int_iopassive = passive; - } - break; + case TASK_POLICY_IOPOL: + proc_iopol_to_tier(value, &tier, &passive); + if (category == TASK_POLICY_EXTERNAL) { + requested.trp_ext_iotier = tier; + requested.trp_ext_iopassive = passive; + } else { + requested.trp_int_iotier = tier; + requested.trp_int_iopassive = passive; + } + break; - case TASK_POLICY_IO: - if (category == TASK_POLICY_EXTERNAL) - requested.trp_ext_iotier = value; - else - requested.trp_int_iotier = value; - break; + case TASK_POLICY_IO: + if (category == TASK_POLICY_EXTERNAL) { + requested.trp_ext_iotier = value; + } else { + requested.trp_int_iotier = value; + } + break; - case TASK_POLICY_PASSIVE_IO: - if (category == TASK_POLICY_EXTERNAL) - requested.trp_ext_iopassive = value; - else - requested.trp_int_iopassive = value; - break; + case TASK_POLICY_PASSIVE_IO: + if (category == TASK_POLICY_EXTERNAL) { + requested.trp_ext_iopassive = value; + } else { + requested.trp_int_iopassive = value; + } + break; /* Category: INTERNAL */ - case TASK_POLICY_DARWIN_BG_IOPOL: - assert(category == TASK_POLICY_INTERNAL); - proc_iopol_to_tier(value, &tier, &passive); - requested.trp_bg_iotier = tier; - break; + case TASK_POLICY_DARWIN_BG_IOPOL: + assert(category == TASK_POLICY_INTERNAL); + proc_iopol_to_tier(value, &tier, &passive); + requested.trp_bg_iotier = tier; + break; /* Category: ATTRIBUTE */ - case TASK_POLICY_TAL: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_tal_enabled = value; - break; + case TASK_POLICY_TAL: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_tal_enabled = value; + break; - case TASK_POLICY_BOOST: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_boosted = value; - break; + case TASK_POLICY_BOOST: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_boosted = value; + break; - case TASK_POLICY_ROLE: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_role = value; - break; + case TASK_POLICY_ROLE: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_role = value; + break; - case TASK_POLICY_TERMINATED: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_terminated = value; - break; + case TASK_POLICY_TERMINATED: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_terminated = value; + break; - case TASK_BASE_LATENCY_QOS_POLICY: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_base_latency_qos = value; - break; + case TASK_BASE_LATENCY_QOS_POLICY: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_base_latency_qos = value; + break; - case TASK_BASE_THROUGHPUT_QOS_POLICY: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_base_through_qos = value; - break; + case TASK_BASE_THROUGHPUT_QOS_POLICY: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_base_through_qos = value; + break; - case TASK_POLICY_SFI_MANAGED: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_sfi_managed = value; - break; + case TASK_POLICY_SFI_MANAGED: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_sfi_managed = value; + break; - case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_base_latency_qos = value; - requested.trp_base_through_qos = value2; - break; + case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_base_latency_qos = value; + requested.trp_base_through_qos = value2; + break; - case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.trp_over_latency_qos = value; - requested.trp_over_through_qos = value2; - break; + case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.trp_over_latency_qos = value; + requested.trp_over_through_qos = value2; + break; - default: - panic("unknown task policy: %d %d %d %d", category, flavor, value, value2); - break; + default: + panic("unknown task policy: %d %d %d %d", category, flavor, value, value2); + break; } task->requested_policy = requested; @@ -1516,8 +1589,8 @@ proc_set_task_policy_locked(task_t task, */ int proc_get_task_policy(task_t task, - int category, - int flavor) + int category, + int flavor) { int value = 0; @@ -1526,47 +1599,51 @@ proc_get_task_policy(task_t task, struct task_requested_policy requested = task->requested_policy; switch (flavor) { - case TASK_POLICY_DARWIN_BG: - if (category == TASK_POLICY_EXTERNAL) - value = requested.trp_ext_darwinbg; - else - value = requested.trp_int_darwinbg; - break; - case TASK_POLICY_IOPOL: - if (category == TASK_POLICY_EXTERNAL) - value = proc_tier_to_iopol(requested.trp_ext_iotier, - requested.trp_ext_iopassive); - else - value = proc_tier_to_iopol(requested.trp_int_iotier, - requested.trp_int_iopassive); - break; - case TASK_POLICY_IO: - if (category == TASK_POLICY_EXTERNAL) - value = requested.trp_ext_iotier; - else - value = requested.trp_int_iotier; - break; - case TASK_POLICY_PASSIVE_IO: - if (category == TASK_POLICY_EXTERNAL) - value = requested.trp_ext_iopassive; - else - value = requested.trp_int_iopassive; - break; - case TASK_POLICY_DARWIN_BG_IOPOL: - assert(category == TASK_POLICY_ATTRIBUTE); - value = proc_tier_to_iopol(requested.trp_bg_iotier, 0); - break; - case TASK_POLICY_ROLE: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.trp_role; - break; - case TASK_POLICY_SFI_MANAGED: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.trp_sfi_managed; - break; - default: - panic("unknown policy_flavor %d", flavor); - break; + case TASK_POLICY_DARWIN_BG: + if (category == TASK_POLICY_EXTERNAL) { + value = requested.trp_ext_darwinbg; + } else { + value = requested.trp_int_darwinbg; + } + break; + case TASK_POLICY_IOPOL: + if (category == TASK_POLICY_EXTERNAL) { + value = proc_tier_to_iopol(requested.trp_ext_iotier, + requested.trp_ext_iopassive); + } else { + value = proc_tier_to_iopol(requested.trp_int_iotier, + requested.trp_int_iopassive); + } + break; + case TASK_POLICY_IO: + if (category == TASK_POLICY_EXTERNAL) { + value = requested.trp_ext_iotier; + } else { + value = requested.trp_int_iotier; + } + break; + case TASK_POLICY_PASSIVE_IO: + if (category == TASK_POLICY_EXTERNAL) { + value = requested.trp_ext_iopassive; + } else { + value = requested.trp_int_iopassive; + } + break; + case TASK_POLICY_DARWIN_BG_IOPOL: + assert(category == TASK_POLICY_ATTRIBUTE); + value = proc_tier_to_iopol(requested.trp_bg_iotier, 0); + break; + case TASK_POLICY_ROLE: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.trp_role; + break; + case TASK_POLICY_SFI_MANAGED: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.trp_sfi_managed; + break; + default: + panic("unknown policy_flavor %d", flavor); + break; } task_unlock(task); @@ -1579,31 +1656,31 @@ proc_get_task_policy(task_t task, */ void proc_get_task_policy2(task_t task, - __assert_only int category, - int flavor, - int *value1, - int *value2) + __assert_only int category, + int flavor, + int *value1, + int *value2) { task_lock(task); struct task_requested_policy requested = task->requested_policy; switch (flavor) { - case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - *value1 = requested.trp_base_latency_qos; - *value2 = requested.trp_base_through_qos; - break; + case TASK_POLICY_BASE_LATENCY_AND_THROUGHPUT_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + *value1 = requested.trp_base_latency_qos; + *value2 = requested.trp_base_through_qos; + break; - case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - *value1 = requested.trp_over_latency_qos; - *value2 = requested.trp_over_through_qos; - break; + case TASK_POLICY_OVERRIDE_LATENCY_AND_THROUGHPUT_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + *value1 = requested.trp_over_latency_qos; + *value2 = requested.trp_over_through_qos; + break; - default: - panic("unknown policy_flavor %d", flavor); - break; + default: + panic("unknown policy_flavor %d", flavor); + break; } task_unlock(task); @@ -1623,68 +1700,76 @@ proc_get_task_policy2(task_t task, */ int proc_get_effective_task_policy(task_t task, - int flavor) + int flavor) { int value = 0; switch (flavor) { - case TASK_POLICY_DARWIN_BG: - /* - * This backs the KPI call proc_pidbackgrounded to find - * out if a pid is backgrounded. - * It is used to communicate state to the VM system, as well as - * prioritizing requests to the graphics system. - * Returns 1 for background mode, 0 for normal mode - */ - value = task->effective_policy.tep_darwinbg; - break; - case TASK_POLICY_ALL_SOCKETS_BG: - /* - * do_background_socket() calls this to determine what it should do to the proc's sockets - * Returns 1 for background mode, 0 for normal mode - * - * This consults both thread and task so un-DBGing a thread while the task is BG - * doesn't get you out of the network throttle. - */ - value = task->effective_policy.tep_all_sockets_bg; - break; - case TASK_POLICY_LATENCY_QOS: - /* - * timer arming calls into here to find out the timer coalescing level - * Returns a QoS tier (0-6) - */ - value = task->effective_policy.tep_latency_qos; - break; - case TASK_POLICY_THROUGH_QOS: - /* - * This value is passed into the urgency callout from the scheduler - * to the performance management subsystem. - * Returns a QoS tier (0-6) - */ - value = task->effective_policy.tep_through_qos; - break; - case TASK_POLICY_ROLE: - /* - * This controls various things that ask whether a process is foreground, - * like SFI, VM, access to GPU, etc - */ - value = task->effective_policy.tep_role; - break; - case TASK_POLICY_WATCHERS_BG: - /* - * This controls whether or not a thread watching this process should be BG. - */ - value = task->effective_policy.tep_watchers_bg; - break; - case TASK_POLICY_SFI_MANAGED: - /* - * This controls whether or not a process is targeted for specific control by thermald. - */ - value = task->effective_policy.tep_sfi_managed; - break; - default: - panic("unknown policy_flavor %d", flavor); - break; + case TASK_POLICY_DARWIN_BG: + /* + * This backs the KPI call proc_pidbackgrounded to find + * out if a pid is backgrounded. + * It is used to communicate state to the VM system, as well as + * prioritizing requests to the graphics system. + * Returns 1 for background mode, 0 for normal mode + */ + value = task->effective_policy.tep_darwinbg; + break; + case TASK_POLICY_ALL_SOCKETS_BG: + /* + * do_background_socket() calls this to determine what it should do to the proc's sockets + * Returns 1 for background mode, 0 for normal mode + * + * This consults both thread and task so un-DBGing a thread while the task is BG + * doesn't get you out of the network throttle. + */ + value = task->effective_policy.tep_all_sockets_bg; + break; + case TASK_POLICY_SUP_ACTIVE: + /* + * Is the task in AppNap? This is used to determine the urgency + * that's passed to the performance management subsystem for threads + * that are running at a priority <= MAXPRI_THROTTLE. + */ + value = task->effective_policy.tep_sup_active; + break; + case TASK_POLICY_LATENCY_QOS: + /* + * timer arming calls into here to find out the timer coalescing level + * Returns a QoS tier (0-6) + */ + value = task->effective_policy.tep_latency_qos; + break; + case TASK_POLICY_THROUGH_QOS: + /* + * This value is passed into the urgency callout from the scheduler + * to the performance management subsystem. + * Returns a QoS tier (0-6) + */ + value = task->effective_policy.tep_through_qos; + break; + case TASK_POLICY_ROLE: + /* + * This controls various things that ask whether a process is foreground, + * like SFI, VM, access to GPU, etc + */ + value = task->effective_policy.tep_role; + break; + case TASK_POLICY_WATCHERS_BG: + /* + * This controls whether or not a thread watching this process should be BG. + */ + value = task->effective_policy.tep_watchers_bg; + break; + case TASK_POLICY_SFI_MANAGED: + /* + * This controls whether or not a process is targeted for specific control by thermald. + */ + value = task->effective_policy.tep_sfi_managed; + break; + default: + panic("unknown policy_flavor %d", flavor); + break; } return value; @@ -1703,25 +1788,25 @@ proc_iopol_to_tier(int iopolicy, int *tier, int *passive) *passive = 0; *tier = 0; switch (iopolicy) { - case IOPOL_IMPORTANT: - *tier = THROTTLE_LEVEL_TIER0; - break; - case IOPOL_PASSIVE: - *tier = THROTTLE_LEVEL_TIER0; - *passive = 1; - break; - case IOPOL_STANDARD: - *tier = THROTTLE_LEVEL_TIER1; - break; - case IOPOL_UTILITY: - *tier = THROTTLE_LEVEL_TIER2; - break; - case IOPOL_THROTTLE: - *tier = THROTTLE_LEVEL_TIER3; - break; - default: - panic("unknown I/O policy %d", iopolicy); - break; + case IOPOL_IMPORTANT: + *tier = THROTTLE_LEVEL_TIER0; + break; + case IOPOL_PASSIVE: + *tier = THROTTLE_LEVEL_TIER0; + *passive = 1; + break; + case IOPOL_STANDARD: + *tier = THROTTLE_LEVEL_TIER1; + break; + case IOPOL_UTILITY: + *tier = THROTTLE_LEVEL_TIER2; + break; + case IOPOL_THROTTLE: + *tier = THROTTLE_LEVEL_TIER3; + break; + default: + panic("unknown I/O policy %d", iopolicy); + break; } } @@ -1730,26 +1815,26 @@ proc_tier_to_iopol(int tier, int passive) { if (passive == 1) { switch (tier) { - case THROTTLE_LEVEL_TIER0: - return IOPOL_PASSIVE; - default: - panic("unknown passive tier %d", tier); - return IOPOL_DEFAULT; + case THROTTLE_LEVEL_TIER0: + return IOPOL_PASSIVE; + default: + panic("unknown passive tier %d", tier); + return IOPOL_DEFAULT; } } else { switch (tier) { - case THROTTLE_LEVEL_NONE: - case THROTTLE_LEVEL_TIER0: - return IOPOL_DEFAULT; - case THROTTLE_LEVEL_TIER1: - return IOPOL_STANDARD; - case THROTTLE_LEVEL_TIER2: - return IOPOL_UTILITY; - case THROTTLE_LEVEL_TIER3: - return IOPOL_THROTTLE; - default: - panic("unknown tier %d", tier); - return IOPOL_DEFAULT; + case THROTTLE_LEVEL_NONE: + case THROTTLE_LEVEL_TIER0: + return IOPOL_DEFAULT; + case THROTTLE_LEVEL_TIER1: + return IOPOL_STANDARD; + case THROTTLE_LEVEL_TIER2: + return IOPOL_UTILITY; + case THROTTLE_LEVEL_TIER3: + return IOPOL_THROTTLE; + default: + panic("unknown tier %d", tier); + return IOPOL_DEFAULT; } } } @@ -1760,29 +1845,29 @@ proc_darwin_role_to_task_role(int darwin_role, int* task_role) integer_t role = TASK_UNSPECIFIED; switch (darwin_role) { - case PRIO_DARWIN_ROLE_DEFAULT: - role = TASK_UNSPECIFIED; - break; - case PRIO_DARWIN_ROLE_UI_FOCAL: - role = TASK_FOREGROUND_APPLICATION; - break; - case PRIO_DARWIN_ROLE_UI: - role = TASK_DEFAULT_APPLICATION; - break; - case PRIO_DARWIN_ROLE_NON_UI: - role = TASK_NONUI_APPLICATION; - break; - case PRIO_DARWIN_ROLE_UI_NON_FOCAL: - role = TASK_BACKGROUND_APPLICATION; - break; - case PRIO_DARWIN_ROLE_TAL_LAUNCH: - role = TASK_THROTTLE_APPLICATION; - break; - case PRIO_DARWIN_ROLE_DARWIN_BG: - role = TASK_DARWINBG_APPLICATION; - break; - default: - return EINVAL; + case PRIO_DARWIN_ROLE_DEFAULT: + role = TASK_UNSPECIFIED; + break; + case PRIO_DARWIN_ROLE_UI_FOCAL: + role = TASK_FOREGROUND_APPLICATION; + break; + case PRIO_DARWIN_ROLE_UI: + role = TASK_DEFAULT_APPLICATION; + break; + case PRIO_DARWIN_ROLE_NON_UI: + role = TASK_NONUI_APPLICATION; + break; + case PRIO_DARWIN_ROLE_UI_NON_FOCAL: + role = TASK_BACKGROUND_APPLICATION; + break; + case PRIO_DARWIN_ROLE_TAL_LAUNCH: + role = TASK_THROTTLE_APPLICATION; + break; + case PRIO_DARWIN_ROLE_DARWIN_BG: + role = TASK_DARWINBG_APPLICATION; + break; + default: + return EINVAL; } *task_role = role; @@ -1794,21 +1879,21 @@ int proc_task_role_to_darwin_role(int task_role) { switch (task_role) { - case TASK_FOREGROUND_APPLICATION: - return PRIO_DARWIN_ROLE_UI_FOCAL; - case TASK_BACKGROUND_APPLICATION: - return PRIO_DARWIN_ROLE_UI_NON_FOCAL; - case TASK_NONUI_APPLICATION: - return PRIO_DARWIN_ROLE_NON_UI; - case TASK_DEFAULT_APPLICATION: - return PRIO_DARWIN_ROLE_UI; - case TASK_THROTTLE_APPLICATION: - return PRIO_DARWIN_ROLE_TAL_LAUNCH; - case TASK_DARWINBG_APPLICATION: - return PRIO_DARWIN_ROLE_DARWIN_BG; - case TASK_UNSPECIFIED: - default: - return PRIO_DARWIN_ROLE_DEFAULT; + case TASK_FOREGROUND_APPLICATION: + return PRIO_DARWIN_ROLE_UI_FOCAL; + case TASK_BACKGROUND_APPLICATION: + return PRIO_DARWIN_ROLE_UI_NON_FOCAL; + case TASK_NONUI_APPLICATION: + return PRIO_DARWIN_ROLE_NON_UI; + case TASK_DEFAULT_APPLICATION: + return PRIO_DARWIN_ROLE_UI; + case TASK_THROTTLE_APPLICATION: + return PRIO_DARWIN_ROLE_TAL_LAUNCH; + case TASK_DARWINBG_APPLICATION: + return PRIO_DARWIN_ROLE_DARWIN_BG; + case TASK_UNSPECIFIED: + default: + return PRIO_DARWIN_ROLE_DEFAULT; } } @@ -1823,68 +1908,68 @@ extern boolean_t ipc_importance_interactive_receiver; */ void proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, - ipc_port_t * portwatch_ports, int portwatch_count) + ipc_port_t * portwatch_ports, int portwatch_count) { struct task_pend_token pend_token = {}; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START, - task_pid(task), trequested_0(task), trequested_1(task), - apptype, 0); + (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_START, + task_pid(task), trequested_0(task), trequested_1(task), + apptype, 0); switch (apptype) { - case TASK_APPTYPE_APP_TAL: - case TASK_APPTYPE_APP_DEFAULT: - /* Apps become donors via the 'live-donor' flag instead of the static donor flag */ - task_importance_mark_donor(task, FALSE); - task_importance_mark_live_donor(task, TRUE); - task_importance_mark_receiver(task, FALSE); + case TASK_APPTYPE_APP_TAL: + case TASK_APPTYPE_APP_DEFAULT: + /* Apps become donors via the 'live-donor' flag instead of the static donor flag */ + task_importance_mark_donor(task, FALSE); + task_importance_mark_live_donor(task, TRUE); + task_importance_mark_receiver(task, FALSE); #if CONFIG_EMBEDDED - task_importance_mark_denap_receiver(task, FALSE); + task_importance_mark_denap_receiver(task, FALSE); #else - /* Apps are de-nap recievers on desktop for suppression behaviors */ - task_importance_mark_denap_receiver(task, TRUE); + /* Apps are de-nap recievers on desktop for suppression behaviors */ + task_importance_mark_denap_receiver(task, TRUE); #endif /* CONFIG_EMBEDDED */ - break; + break; - case TASK_APPTYPE_DAEMON_INTERACTIVE: - task_importance_mark_donor(task, TRUE); - task_importance_mark_live_donor(task, FALSE); - - /* - * A boot arg controls whether interactive daemons are importance receivers. - * Normally, they are not. But for testing their behavior as an adaptive - * daemon, the boot-arg can be set. - * - * TODO: remove this when the interactive daemon audit period is over. - */ - task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver); - task_importance_mark_denap_receiver(task, FALSE); - break; + case TASK_APPTYPE_DAEMON_INTERACTIVE: + task_importance_mark_donor(task, TRUE); + task_importance_mark_live_donor(task, FALSE); - case TASK_APPTYPE_DAEMON_STANDARD: - task_importance_mark_donor(task, TRUE); - task_importance_mark_live_donor(task, FALSE); - task_importance_mark_receiver(task, FALSE); - task_importance_mark_denap_receiver(task, FALSE); - break; + /* + * A boot arg controls whether interactive daemons are importance receivers. + * Normally, they are not. But for testing their behavior as an adaptive + * daemon, the boot-arg can be set. + * + * TODO: remove this when the interactive daemon audit period is over. + */ + task_importance_mark_receiver(task, /* FALSE */ ipc_importance_interactive_receiver); + task_importance_mark_denap_receiver(task, FALSE); + break; - case TASK_APPTYPE_DAEMON_ADAPTIVE: - task_importance_mark_donor(task, FALSE); - task_importance_mark_live_donor(task, FALSE); - task_importance_mark_receiver(task, TRUE); - task_importance_mark_denap_receiver(task, FALSE); - break; + case TASK_APPTYPE_DAEMON_STANDARD: + task_importance_mark_donor(task, TRUE); + task_importance_mark_live_donor(task, FALSE); + task_importance_mark_receiver(task, FALSE); + task_importance_mark_denap_receiver(task, FALSE); + break; - case TASK_APPTYPE_DAEMON_BACKGROUND: - task_importance_mark_donor(task, FALSE); - task_importance_mark_live_donor(task, FALSE); - task_importance_mark_receiver(task, FALSE); - task_importance_mark_denap_receiver(task, FALSE); - break; + case TASK_APPTYPE_DAEMON_ADAPTIVE: + task_importance_mark_donor(task, FALSE); + task_importance_mark_live_donor(task, FALSE); + task_importance_mark_receiver(task, TRUE); + task_importance_mark_denap_receiver(task, FALSE); + break; - case TASK_APPTYPE_NONE: - break; + case TASK_APPTYPE_DAEMON_BACKGROUND: + task_importance_mark_donor(task, FALSE); + task_importance_mark_live_donor(task, FALSE); + task_importance_mark_receiver(task, FALSE); + task_importance_mark_denap_receiver(task, FALSE); + break; + + case TASK_APPTYPE_NONE: + break; } if (portwatch_ports != NULL && apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) { @@ -1940,9 +2025,9 @@ proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, task_policy_update_complete_unlocked(task, &pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END, - task_pid(task), trequested_0(task), trequested_1(task), - task_is_importance_receiver(task), 0); + (IMPORTANCE_CODE(IMP_TASK_APPTYPE, apptype)) | DBG_FUNC_END, + task_pid(task), trequested_0(task), trequested_1(task), + task_is_importance_receiver(task), 0); } /* @@ -1950,7 +2035,7 @@ proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, */ void proc_inherit_task_role(task_t new_task, - task_t old_task) + task_t old_task) { int role; @@ -1972,20 +2057,20 @@ task_compute_main_thread_qos(task_t task) int qos_clamp = task->requested_policy.trp_qos_clamp; switch (task->requested_policy.trp_apptype) { - case TASK_APPTYPE_APP_TAL: - case TASK_APPTYPE_APP_DEFAULT: - primordial_qos = THREAD_QOS_USER_INTERACTIVE; - break; + case TASK_APPTYPE_APP_TAL: + case TASK_APPTYPE_APP_DEFAULT: + primordial_qos = THREAD_QOS_USER_INTERACTIVE; + break; - case TASK_APPTYPE_DAEMON_INTERACTIVE: - case TASK_APPTYPE_DAEMON_STANDARD: - case TASK_APPTYPE_DAEMON_ADAPTIVE: - primordial_qos = THREAD_QOS_LEGACY; - break; + case TASK_APPTYPE_DAEMON_INTERACTIVE: + case TASK_APPTYPE_DAEMON_STANDARD: + case TASK_APPTYPE_DAEMON_ADAPTIVE: + primordial_qos = THREAD_QOS_LEGACY; + break; - case TASK_APPTYPE_DAEMON_BACKGROUND: - primordial_qos = THREAD_QOS_BACKGROUND; - break; + case TASK_APPTYPE_DAEMON_BACKGROUND: + primordial_qos = THREAD_QOS_BACKGROUND; + break; } if (task->bsd_info == initproc) { @@ -2055,38 +2140,47 @@ task_grab_latency_qos(task_t task) int proc_get_darwinbgstate(task_t task, uint32_t * flagsp) { - if (task->requested_policy.trp_ext_darwinbg) + if (task->requested_policy.trp_ext_darwinbg) { *flagsp |= PROC_FLAG_EXT_DARWINBG; + } - if (task->requested_policy.trp_int_darwinbg) + if (task->requested_policy.trp_int_darwinbg) { *flagsp |= PROC_FLAG_DARWINBG; + } #if CONFIG_EMBEDDED - if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) + if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_BACKGROUND) { *flagsp |= PROC_FLAG_IOS_APPLEDAEMON; + } - if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) + if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) { *flagsp |= PROC_FLAG_IOS_IMPPROMOTION; + } #endif /* CONFIG_EMBEDDED */ if (task->requested_policy.trp_apptype == TASK_APPTYPE_APP_DEFAULT || - task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) + task->requested_policy.trp_apptype == TASK_APPTYPE_APP_TAL) { *flagsp |= PROC_FLAG_APPLICATION; + } - if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) + if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE) { *flagsp |= PROC_FLAG_ADAPTIVE; + } if (task->requested_policy.trp_apptype == TASK_APPTYPE_DAEMON_ADAPTIVE && - task->requested_policy.trp_boosted == 1) + task->requested_policy.trp_boosted == 1) { *flagsp |= PROC_FLAG_ADAPTIVE_IMPORTANT; + } - if (task_is_importance_donor(task)) + if (task_is_importance_donor(task)) { *flagsp |= PROC_FLAG_IMPORTANCE_DONOR; + } - if (task->effective_policy.tep_sup_active) + if (task->effective_policy.tep_sup_active) { *flagsp |= PROC_FLAG_SUPPRESSED; + } - return(0); + return 0; } /* @@ -2097,7 +2191,7 @@ proc_get_darwinbgstate(task_t task, uint32_t * flagsp) * given tracepoint will emit either requested or effective data, but not both. * * A tracepoint may emit any of task, thread, or task & thread data. - * + * * The type of data emitted varies with pointer size. Where possible, both * task and thread data are emitted. In LP32 systems, the first and second * halves of either the task or thread data is emitted. @@ -2174,7 +2268,11 @@ teffective_1(task_t task) } /* dump pending for tracepoint */ -uint32_t tpending(task_pend_token_t pend_token) { return *(uint32_t*)(void*)(pend_token); } +uint32_t +tpending(task_pend_token_t pend_token) +{ + return *(uint32_t*)(void*)(pend_token); +} uint64_t task_requested_bitfield(task_t task) @@ -2188,19 +2286,19 @@ task_requested_bitfield(task_t task) bits |= (requested.trp_ext_iotier ? (((uint64_t)requested.trp_ext_iotier) << POLICY_REQ_EXT_IO_TIER_SHIFT) : 0); bits |= (requested.trp_int_iopassive ? POLICY_REQ_INT_PASSIVE_IO : 0); bits |= (requested.trp_ext_iopassive ? POLICY_REQ_EXT_PASSIVE_IO : 0); - bits |= (requested.trp_bg_iotier ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT) : 0); + bits |= (requested.trp_bg_iotier ? (((uint64_t)requested.trp_bg_iotier) << POLICY_REQ_BG_IOTIER_SHIFT) : 0); bits |= (requested.trp_terminated ? POLICY_REQ_TERMINATED : 0); bits |= (requested.trp_boosted ? POLICY_REQ_BOOSTED : 0); bits |= (requested.trp_tal_enabled ? POLICY_REQ_TAL_ENABLED : 0); bits |= (requested.trp_apptype ? (((uint64_t)requested.trp_apptype) << POLICY_REQ_APPTYPE_SHIFT) : 0); - bits |= (requested.trp_role ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT) : 0); + bits |= (requested.trp_role ? (((uint64_t)requested.trp_role) << POLICY_REQ_ROLE_SHIFT) : 0); bits |= (requested.trp_sup_active ? POLICY_REQ_SUP_ACTIVE : 0); bits |= (requested.trp_sup_lowpri_cpu ? POLICY_REQ_SUP_LOWPRI_CPU : 0); bits |= (requested.trp_sup_cpu ? POLICY_REQ_SUP_CPU : 0); - bits |= (requested.trp_sup_timer ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0); - bits |= (requested.trp_sup_throughput ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT) : 0); + bits |= (requested.trp_sup_timer ? (((uint64_t)requested.trp_sup_timer) << POLICY_REQ_SUP_TIMER_THROTTLE_SHIFT) : 0); + bits |= (requested.trp_sup_throughput ? (((uint64_t)requested.trp_sup_throughput) << POLICY_REQ_SUP_THROUGHPUT_SHIFT) : 0); bits |= (requested.trp_sup_disk ? POLICY_REQ_SUP_DISK_THROTTLE : 0); bits |= (requested.trp_sup_bg_sockets ? POLICY_REQ_SUP_BG_SOCKETS : 0); @@ -2209,7 +2307,7 @@ task_requested_bitfield(task_t task) bits |= (requested.trp_base_through_qos ? (((uint64_t)requested.trp_base_through_qos) << POLICY_REQ_BASE_THROUGH_QOS_SHIFT) : 0); bits |= (requested.trp_over_through_qos ? (((uint64_t)requested.trp_over_through_qos) << POLICY_REQ_OVER_THROUGH_QOS_SHIFT) : 0); bits |= (requested.trp_sfi_managed ? POLICY_REQ_SFI_MANAGED : 0); - bits |= (requested.trp_qos_clamp ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT) : 0); + bits |= (requested.trp_qos_clamp ? (((uint64_t)requested.trp_qos_clamp) << POLICY_REQ_QOS_CLAMP_SHIFT) : 0); return bits; } @@ -2234,7 +2332,7 @@ task_effective_bitfield(task_t task) bits |= (effective.tep_watchers_bg ? POLICY_EFF_WATCHERS_BG : 0); bits |= (effective.tep_sup_active ? POLICY_EFF_SUP_ACTIVE : 0); bits |= (effective.tep_suppressed_cpu ? POLICY_EFF_SUP_CPU : 0); - bits |= (effective.tep_role ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT) : 0); + bits |= (effective.tep_role ? (((uint64_t)effective.tep_role) << POLICY_EFF_ROLE_SHIFT) : 0); bits |= (effective.tep_latency_qos ? (((uint64_t)effective.tep_latency_qos) << POLICY_EFF_LATENCY_QOS_SHIFT) : 0); bits |= (effective.tep_through_qos ? (((uint64_t)effective.tep_through_qos) << POLICY_EFF_THROUGH_QOS_SHIFT) : 0); bits |= (effective.tep_sfi_managed ? POLICY_EFF_SFI_MANAGED : 0); @@ -2248,16 +2346,15 @@ task_effective_bitfield(task_t task) * Resource usage and CPU related routines */ -int +int proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uint64_t *intervalp, uint64_t *deadlinep) { - int error = 0; int scope; task_lock(task); - + error = task_get_cpuusage(task, percentagep, intervalp, deadlinep, &scope); task_unlock(task); @@ -2272,7 +2369,7 @@ proc_get_task_ruse_cpu(task_t task, uint32_t *policyp, uint8_t *percentagep, uin *policyp = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE; } - return(error); + return error; } /* @@ -2290,13 +2387,11 @@ proc_init_cpumon_params(void) * device tree. */ if (!PE_parse_boot_argn("max_cpumon_percentage", &proc_max_cpumon_percentage, - sizeof (proc_max_cpumon_percentage))) - { + sizeof(proc_max_cpumon_percentage))) { uint64_t max_percentage = 0ULL; if (!PE_get_default("kern.max_cpumon_percentage", &max_percentage, - sizeof(max_percentage))) - { + sizeof(max_percentage))) { max_percentage = DEFAULT_CPUMON_PERCENTAGE; } @@ -2315,11 +2410,9 @@ proc_init_cpumon_params(void) * via boot-args and the device tree. */ if (!PE_parse_boot_argn("max_cpumon_interval", &proc_max_cpumon_interval, - sizeof (proc_max_cpumon_interval))) - { + sizeof(proc_max_cpumon_interval))) { if (!PE_get_default("kern.max_cpumon_interval", &proc_max_cpumon_interval, - sizeof(proc_max_cpumon_interval))) - { + sizeof(proc_max_cpumon_interval))) { proc_max_cpumon_interval = DEFAULT_CPUMON_INTERVAL; } } @@ -2328,8 +2421,8 @@ proc_init_cpumon_params(void) /* TEMPORARY boot arg to control App suppression */ PE_parse_boot_argn("task_policy_suppression_flags", - &task_policy_suppression_flags, - sizeof(task_policy_suppression_flags)); + &task_policy_suppression_flags, + sizeof(task_policy_suppression_flags)); /* adjust suppression disk policy if called for in boot arg */ if (task_policy_suppression_flags & TASK_POLICY_SUPPRESSION_IOTIER2) { @@ -2364,41 +2457,44 @@ proc_init_cpumon_params(void) * Currently, requesting notification via an exception is the only way to get per-thread scope for a * CPU limit. All other types of notifications force task-wide scope for the limit. */ -int +int proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_t interval, uint64_t deadline, - int cpumon_entitled) + int cpumon_entitled) { int error = 0; int scope; - /* - * Enforce the matrix of supported configurations for policy, percentage, and deadline. - */ - switch (policy) { - // If no policy is explicitly given, the default is to throttle. - case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE: + /* + * Enforce the matrix of supported configurations for policy, percentage, and deadline. + */ + switch (policy) { + // If no policy is explicitly given, the default is to throttle. + case TASK_POLICY_RESOURCE_ATTRIBUTE_NONE: case TASK_POLICY_RESOURCE_ATTRIBUTE_THROTTLE: - if (deadline != 0) - return (ENOTSUP); + if (deadline != 0) { + return ENOTSUP; + } scope = TASK_RUSECPU_FLAGS_PROC_LIMIT; break; case TASK_POLICY_RESOURCE_ATTRIBUTE_SUSPEND: case TASK_POLICY_RESOURCE_ATTRIBUTE_TERMINATE: case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_KQ: - if (percentage != 0) - return (ENOTSUP); + if (percentage != 0) { + return ENOTSUP; + } scope = TASK_RUSECPU_FLAGS_DEADLINE; break; - case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC: - if (deadline != 0) - return (ENOTSUP); + case TASK_POLICY_RESOURCE_ATTRIBUTE_NOTIFY_EXC: + if (deadline != 0) { + return ENOTSUP; + } scope = TASK_RUSECPU_FLAGS_PERTHR_LIMIT; #ifdef CONFIG_NOMONITORS - return (error); + return error; #endif /* CONFIG_NOMONITORS */ break; default: - return (EINVAL); + return EINVAL; } task_lock(task); @@ -2409,7 +2505,7 @@ proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_ } error = task_set_cpuusage(task, percentage, interval, deadline, scope, cpumon_entitled); task_unlock(task); - return(error); + return error; } /* TODO: get rid of these */ @@ -2422,7 +2518,7 @@ proc_set_task_ruse_cpu(task_t task, uint32_t policy, uint8_t percentage, uint64_ #define TASK_POLICY_RESOURCE_USAGE_COUNT 6 -int +int proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled) { int error = 0; @@ -2437,8 +2533,9 @@ proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled) } error = task_clear_cpuusage_locked(task, cpumon_entitled); - if (error != 0) - goto out; + if (error != 0) { + goto out; + } action = task->applied_ru_cpu; if (task->applied_ru_cpu_ext != TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) { @@ -2455,8 +2552,7 @@ proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled) out: task_unlock(task); out1: - return(error); - + return error; } /* used to apply resource limit related actions */ @@ -2465,24 +2561,25 @@ task_apply_resource_actions(task_t task, int type) { int action = TASK_POLICY_RESOURCE_ATTRIBUTE_NONE; void * bsdinfo = NULL; - + switch (type) { - case TASK_POLICY_CPU_RESOURCE_USAGE: - break; - case TASK_POLICY_WIREDMEM_RESOURCE_USAGE: - case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE: - case TASK_POLICY_DISK_RESOURCE_USAGE: - case TASK_POLICY_NETWORK_RESOURCE_USAGE: - case TASK_POLICY_POWER_RESOURCE_USAGE: - return(0); + case TASK_POLICY_CPU_RESOURCE_USAGE: + break; + case TASK_POLICY_WIREDMEM_RESOURCE_USAGE: + case TASK_POLICY_VIRTUALMEM_RESOURCE_USAGE: + case TASK_POLICY_DISK_RESOURCE_USAGE: + case TASK_POLICY_NETWORK_RESOURCE_USAGE: + case TASK_POLICY_POWER_RESOURCE_USAGE: + return 0; - default: - return(1); - }; + default: + return 1; + } + ; /* only cpu actions for now */ task_lock(task); - + if (task->applied_ru_cpu_ext == TASK_POLICY_RESOURCE_ATTRIBUTE_NONE) { /* apply action */ task->applied_ru_cpu_ext = task->policy_ru_cpu_ext; @@ -2495,10 +2592,11 @@ task_apply_resource_actions(task_t task, int type) bsdinfo = task->bsd_info; task_unlock(task); proc_apply_resource_actions(bsdinfo, TASK_POLICY_CPU_RESOURCE_USAGE, action); - } else + } else { task_unlock(task); + } - return(0); + return 0; } /* @@ -2529,7 +2627,7 @@ task_get_cpuusage(task_t task, uint8_t *percentagep, uint64_t *intervalp, uint64 *scope = 0; } - return(0); + return 0; } /* @@ -2540,7 +2638,7 @@ int task_suspend_cpumon(task_t task) { thread_t thread; - + task_lock_assert_owned(task); if ((task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) == 0) { @@ -2558,7 +2656,7 @@ task_suspend_cpumon(task_t task) /* * Suspend monitoring for the task, and propagate that change to each thread. */ - task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON); + task->rusage_cpu_flags &= ~(TASK_RUSECPU_FLAGS_PERTHR_LIMIT | TASK_RUSECPU_FLAGS_FATAL_CPUMON); queue_iterate(&task->threads, thread, thread_t, task_threads) { act_set_astledger(thread); } @@ -2577,13 +2675,15 @@ task_disable_cpumon(task_t task) task_lock_assert_owned(task); kret = task_suspend_cpumon(task); - if (kret) return kret; + if (kret) { + return kret; + } /* Once we clear these values, the monitor can't be resumed */ task->rusage_cpu_perthr_percentage = 0; task->rusage_cpu_perthr_interval = 0; - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -2594,7 +2694,7 @@ task_enable_cpumon_locked(task_t task) task_lock_assert_owned(task); if (task->rusage_cpu_perthr_percentage == 0 || - task->rusage_cpu_perthr_interval == 0) { + task->rusage_cpu_perthr_interval == 0) { return KERN_INVALID_ARGUMENT; } @@ -2624,8 +2724,8 @@ task_resume_cpumon(task_t task) /* duplicate values from bsd/sys/process_policy.h */ -#define PROC_POLICY_CPUMON_DISABLE 0xFF -#define PROC_POLICY_CPUMON_DEFAULTS 0xFE +#define PROC_POLICY_CPUMON_DISABLE 0xFF +#define PROC_POLICY_CPUMON_DEFAULTS 0xFE static int task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t deadline, int scope, int cpumon_entitled) @@ -2636,8 +2736,9 @@ task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t d lck_mtx_assert(&task->lock, LCK_MTX_ASSERT_OWNED); /* By default, refill once per second */ - if (interval == 0) + if (interval == 0) { interval = NSEC_PER_SEC; + } if (percentage != 0) { if (scope == TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { @@ -2661,7 +2762,7 @@ task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t d * missing the required entitlement: * com.apple.private.kernel.override-cpumon * - * Instead, treat this as a request to reset its params + * Instead, treat this as a request to reset its params * back to the defaults. */ warn = TRUE; @@ -2682,10 +2783,10 @@ task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t d * - Leave the interval as-is, if there's already a per-thread * limit configured * - Use the system default. - */ + */ if (interval == -1ULL) { if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { - interval = task->rusage_cpu_perthr_interval; + interval = task->rusage_cpu_perthr_interval; } else { interval = proc_max_cpumon_interval; } @@ -2695,18 +2796,18 @@ task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t d * Enforce global caps on CPU usage monitor here if the process is not * entitled to escape the global caps. */ - if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) { + if ((percentage > proc_max_cpumon_percentage) && (cpumon_entitled == 0)) { warn = TRUE; - percentage = proc_max_cpumon_percentage; - } + percentage = proc_max_cpumon_percentage; + } - if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) { + if ((interval > proc_max_cpumon_interval) && (cpumon_entitled == 0)) { warn = TRUE; - interval = proc_max_cpumon_interval; - } + interval = proc_max_cpumon_interval; + } if (warn) { - int pid = 0; + int pid = 0; const char *procname = "unknown"; #ifdef MACH_BSD @@ -2717,7 +2818,7 @@ task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t d #endif printf("process %s[%d] denied attempt to escape CPU monitor" - " (missing required entitlement).\n", procname, pid); + " (missing required entitlement).\n", procname, pid); } /* configure the limit values */ @@ -2770,7 +2871,7 @@ task_set_cpuusage(task_t task, uint8_t percentage, uint64_t interval, uint64_t d } } - return(0); + return 0; } int @@ -2782,7 +2883,7 @@ task_clear_cpuusage(task_t task, int cpumon_entitled) retval = task_clear_cpuusage_locked(task, cpumon_entitled); task_unlock(task); - return(retval); + return retval; } static int @@ -2792,7 +2893,7 @@ task_clear_cpuusage_locked(task_t task, int cpumon_entitled) /* cancel percentage handling if set */ if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PROC_LIMIT) { - task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT; + task->rusage_cpu_flags &= ~TASK_RUSECPU_FLAGS_PROC_LIMIT; ledger_set_limit(task->ledger, task_ledgers.cpu_time, LEDGER_LIMIT_INFINITY, 0); task->rusage_cpu_percentage = 0; task->rusage_cpu_interval = 0; @@ -2818,7 +2919,7 @@ task_clear_cpuusage_locked(task_t task, int cpumon_entitled) task_lock(task); } } - return(0); + return 0; } /* called by ledger unit to enforce action due to resource usage criteria being met */ @@ -2837,7 +2938,7 @@ task_action_cpuusage(thread_call_param_t param0, __unused thread_call_param_t pa #if CONFIG_EMBEDDED -lck_mtx_t task_watch_mtx; +lck_mtx_t task_watch_mtx; void task_watch_init(void) @@ -2862,7 +2963,6 @@ add_taskwatch_locked(task_t task, task_watch_t * twp) { queue_enter(&task->task_watchers, twp, task_watch_t *, tw_links); task->num_taskwatchers++; - } static void @@ -2873,7 +2973,7 @@ remove_taskwatch_locked(task_t task, task_watch_t * twp) } -int +int proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind) { thread_t target_thread = NULL; @@ -2882,8 +2982,9 @@ proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind) task_t task = TASK_NULL; target_thread = task_findtid(curtask, tid); - if (target_thread == NULL) + if (target_thread == NULL) { return ESRCH; + } /* holds thread reference */ if (bind != 0) { @@ -2907,7 +3008,7 @@ proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind) task_watch_lock(); - if (target_thread->taskwatch != NULL){ + if (target_thread->taskwatch != NULL) { /* already bound to another task */ task_watch_unlock(); @@ -2920,24 +3021,25 @@ proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind) setbg = proc_get_effective_task_policy(target_task, TASK_POLICY_WATCHERS_BG); - twp->tw_task = target_task; /* holds the task reference */ - twp->tw_thread = target_thread; /* holds the thread reference */ + twp->tw_task = target_task; /* holds the task reference */ + twp->tw_thread = target_thread; /* holds the thread reference */ twp->tw_state = setbg; twp->tw_importance = target_thread->importance; - + add_taskwatch_locked(target_task, twp); target_thread->taskwatch = twp; task_watch_unlock(); - if (setbg) + if (setbg) { set_thread_appbg(target_thread, setbg, INT_MIN); + } /* retain the thread reference as it is in twp */ target_thread = NULL; } else { - /* unbind */ + /* unbind */ task_watch_lock(); if ((twp = target_thread->taskwatch) != NULL) { task = twp->tw_task; @@ -2946,19 +3048,19 @@ proc_lf_pidbind(task_t curtask, uint64_t tid, task_t target_task, int bind) task_watch_unlock(); - task_deallocate(task); /* drop task ref in twp */ + task_deallocate(task); /* drop task ref in twp */ set_thread_appbg(target_thread, 0, twp->tw_importance); - thread_deallocate(target_thread); /* drop thread ref in twp */ + thread_deallocate(target_thread); /* drop thread ref in twp */ kfree(twp, sizeof(task_watch_t)); } else { task_watch_unlock(); - ret = 0; /* return success if it not alredy bound */ + ret = 0; /* return success if it not alredy bound */ goto out; } } out: - thread_deallocate(target_thread); /* drop thread ref acquired in this routine */ - return(ret); + thread_deallocate(target_thread); /* drop thread ref acquired in this routine */ + return ret; } static void @@ -2978,14 +3080,16 @@ apply_appstate_watchers(task_t task) retry: /* if no watchers on the list return */ - if ((numwatchers = task->num_taskwatchers) == 0) + if ((numwatchers = task->num_taskwatchers) == 0) { return; + } - threadlist = (thread_watchlist_t *)kalloc(numwatchers*sizeof(thread_watchlist_t)); - if (threadlist == NULL) + threadlist = (thread_watchlist_t *)kalloc(numwatchers * sizeof(thread_watchlist_t)); + if (threadlist == NULL) { return; + } - bzero(threadlist, numwatchers*sizeof(thread_watchlist_t)); + bzero(threadlist, numwatchers * sizeof(thread_watchlist_t)); task_watch_lock(); /*serialize application of app state changes */ @@ -2993,41 +3097,42 @@ retry: if (task->watchapplying != 0) { lck_mtx_sleep(&task_watch_mtx, LCK_SLEEP_DEFAULT, &task->watchapplying, THREAD_UNINT); task_watch_unlock(); - kfree(threadlist, numwatchers*sizeof(thread_watchlist_t)); + kfree(threadlist, numwatchers * sizeof(thread_watchlist_t)); goto retry; } if (numwatchers != task->num_taskwatchers) { task_watch_unlock(); - kfree(threadlist, numwatchers*sizeof(thread_watchlist_t)); + kfree(threadlist, numwatchers * sizeof(thread_watchlist_t)); goto retry; } - + setbg = proc_get_effective_task_policy(task, TASK_POLICY_WATCHERS_BG); task->watchapplying = 1; i = 0; queue_iterate(&task->task_watchers, twp, task_watch_t *, tw_links) { - threadlist[i].thread = twp->tw_thread; thread_reference(threadlist[i].thread); if (setbg != 0) { twp->tw_importance = twp->tw_thread->importance; threadlist[i].importance = INT_MIN; - } else + } else { threadlist[i].importance = twp->tw_importance; + } i++; - if (i > numwatchers) + if (i > numwatchers) { break; + } } task_watch_unlock(); - for (j = 0; j< i; j++) { + for (j = 0; j < i; j++) { set_thread_appbg(threadlist[j].thread, setbg, threadlist[j].importance); thread_deallocate(threadlist[j].thread); } - kfree(threadlist, numwatchers*sizeof(thread_watchlist_t)); + kfree(threadlist, numwatchers * sizeof(thread_watchlist_t)); task_watch_lock(); @@ -3066,14 +3171,16 @@ task_removewatchers(task_t task) task_watch_t * twp = NULL; retry: - if ((numwatchers = task->num_taskwatchers) == 0) + if ((numwatchers = task->num_taskwatchers) == 0) { return; + } - twplist = (task_watch_t **)kalloc(numwatchers*sizeof(task_watch_t *)); - if (twplist == NULL) + twplist = (task_watch_t **)kalloc(numwatchers * sizeof(task_watch_t *)); + if (twplist == NULL) { return; + } - bzero(twplist, numwatchers*sizeof(task_watch_t *)); + bzero(twplist, numwatchers * sizeof(task_watch_t *)); task_watch_lock(); if (task->num_taskwatchers == 0) { @@ -3083,31 +3190,30 @@ retry: if (numwatchers != task->num_taskwatchers) { task_watch_unlock(); - kfree(twplist, numwatchers*sizeof(task_watch_t *)); + kfree(twplist, numwatchers * sizeof(task_watch_t *)); numwatchers = 0; goto retry; } - + i = 0; - while((twp = (task_watch_t *)dequeue_head(&task->task_watchers)) != NULL) - { + while ((twp = (task_watch_t *)dequeue_head(&task->task_watchers)) != NULL) { twplist[i] = twp; - task->num_taskwatchers--; + task->num_taskwatchers--; - /* + /* * Since the linkage is removed and thead state cleanup is already set up, * remove the refernce from the thread. */ - twp->tw_thread->taskwatch = NULL; /* removed linkage, clear thread holding ref */ + twp->tw_thread->taskwatch = NULL; /* removed linkage, clear thread holding ref */ i++; - if ((task->num_taskwatchers == 0) || (i > numwatchers)) + if ((task->num_taskwatchers == 0) || (i > numwatchers)) { break; + } } task_watch_unlock(); - for (j = 0; j< i; j++) { - + for (j = 0; j < i; j++) { twp = twplist[j]; /* remove thread and network bg */ set_thread_appbg(twp->tw_thread, 0, twp->tw_importance); @@ -3117,8 +3223,7 @@ retry: } out: - kfree(twplist, numwatchers*sizeof(task_watch_t *)); - + kfree(twplist, numwatchers * sizeof(task_watch_t *)); } #endif /* CONFIG_EMBEDDED */ @@ -3217,7 +3322,9 @@ task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t p ipc_importance_task_t new_task_imp = IIT_NULL; new_task->task_imp_base = NULL; - if (!parent_task) return; + if (!parent_task) { + return; + } if (task_is_marked_importance_donor(parent_task)) { new_task_imp = ipc_importance_for_task(new_task, FALSE); @@ -3225,22 +3332,25 @@ task_importance_init_from_parent(__imp_only task_t new_task, __imp_only task_t p ipc_importance_task_mark_donor(new_task_imp, TRUE); } if (task_is_marked_live_importance_donor(parent_task)) { - if (IIT_NULL == new_task_imp) + if (IIT_NULL == new_task_imp) { new_task_imp = ipc_importance_for_task(new_task, FALSE); + } assert(IIT_NULL != new_task_imp); ipc_importance_task_mark_live_donor(new_task_imp, TRUE); } /* Do not inherit 'receiver' on fork, vfexec or true spawn */ if (task_is_exec_copy(new_task) && - task_is_marked_importance_receiver(parent_task)) { - if (IIT_NULL == new_task_imp) + task_is_marked_importance_receiver(parent_task)) { + if (IIT_NULL == new_task_imp) { new_task_imp = ipc_importance_for_task(new_task, FALSE); + } assert(IIT_NULL != new_task_imp); ipc_importance_task_mark_receiver(new_task_imp, TRUE); } if (task_is_marked_importance_denap_receiver(parent_task)) { - if (IIT_NULL == new_task_imp) + if (IIT_NULL == new_task_imp) { new_task_imp = ipc_importance_for_task(new_task, FALSE); + } assert(IIT_NULL != new_task_imp); ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE); } @@ -3262,20 +3372,20 @@ task_set_boost_locked(task_t task, boolean_t boost_active) { #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_START), - proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0); + proc_selfpid(), task_pid(task), trequested_0(task), trequested_1(task), 0); #endif /* IMPORTANCE_TRACE */ task->requested_policy.trp_boosted = boost_active; #if IMPORTANCE_TRACE - if (boost_active == TRUE){ + if (boost_active == TRUE) { DTRACE_BOOST2(boost, task_t, task, int, task_pid(task)); } else { DTRACE_BOOST2(unboost, task_t, task, int, task_pid(task)); } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_BOOST, (boost_active ? IMP_BOOSTED : IMP_UNBOOSTED)) | DBG_FUNC_END), - proc_selfpid(), task_pid(task), - trequested_0(task), trequested_1(task), 0); + proc_selfpid(), task_pid(task), + trequested_0(task), trequested_1(task), 0); #endif /* IMPORTANCE_TRACE */ } @@ -3301,8 +3411,9 @@ task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t boolean_t task_is_importance_donor(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_donor(task->task_imp_base); } @@ -3312,8 +3423,9 @@ task_is_importance_donor(task_t task) boolean_t task_is_marked_importance_donor(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_marked_donor(task->task_imp_base); } @@ -3323,8 +3435,9 @@ task_is_marked_importance_donor(task_t task) boolean_t task_is_marked_live_importance_donor(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_marked_live_donor(task->task_imp_base); } @@ -3336,8 +3449,9 @@ task_is_marked_live_importance_donor(task_t task) boolean_t task_is_importance_receiver(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_marked_receiver(task->task_imp_base); } @@ -3347,8 +3461,9 @@ task_is_importance_receiver(task_t task) boolean_t task_is_marked_importance_receiver(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_marked_receiver(task->task_imp_base); } @@ -3359,8 +3474,9 @@ task_is_marked_importance_receiver(task_t task) boolean_t task_is_importance_denap_receiver(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_denap_receiver(task->task_imp_base); } @@ -3370,8 +3486,9 @@ task_is_importance_denap_receiver(task_t task) boolean_t task_is_marked_importance_denap_receiver(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; + } return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base); } @@ -3382,10 +3499,11 @@ task_is_marked_importance_denap_receiver(task_t task) boolean_t task_is_importance_receiver_type(task_t task) { - if (task->task_imp_base == IIT_NULL) + if (task->task_imp_base == IIT_NULL) { return FALSE; - return (task_is_importance_receiver(task) || - task_is_importance_denap_receiver(task)); + } + return task_is_importance_receiver(task) || + task_is_importance_denap_receiver(task); } /* @@ -3433,12 +3551,12 @@ task_importance_hold_legacy_external_assertion(task_t target_task, uint32_t coun { ipc_importance_task_t task_imp; kern_return_t ret; - + /* must already have set up an importance */ task_imp = target_task->task_imp_base; if (IIT_NULL == task_imp) { return EOVERFLOW; - } + } ret = ipc_importance_task_hold_legacy_external_assertion(task_imp, count); return (KERN_SUCCESS != ret) ? ENOTSUP : 0; } @@ -3448,7 +3566,7 @@ task_importance_drop_file_lock_assertion(task_t target_task, uint32_t count) { ipc_importance_task_t task_imp; kern_return_t ret; - + /* must already have set up an importance */ task_imp = target_task->task_imp_base; if (IIT_NULL == task_imp) { @@ -3463,7 +3581,7 @@ task_importance_drop_legacy_external_assertion(task_t target_task, uint32_t coun { ipc_importance_task_t task_imp; kern_return_t ret; - + /* must already have set up an importance */ task_imp = target_task->task_imp_base; if (IIT_NULL == task_imp) { @@ -3522,15 +3640,16 @@ task_add_importance_watchport(task_t task, mach_port_t port, int *boostp) } if (IIT_NULL != release_imp_task) { - if (boost > 0) + if (boost > 0) { ipc_importance_task_drop_internal_assertion(release_imp_task, boost); + } // released_pid = task_pid(release_imp_task); /* TODO: Need ref-safe way to get pid */ ipc_importance_task_release(release_imp_task); } #if IMPORTANCE_TRACE KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_WATCHPORT, 0)) | DBG_FUNC_NONE, - proc_selfpid(), pid, boost, released_pid, 0); + proc_selfpid(), pid, boost, released_pid, 0); #endif /* IMPORTANCE_TRACE */ } @@ -3584,13 +3703,14 @@ task_has_been_notified(task_t task, int pressurelevel) if (task == NULL) { return FALSE; } - - if (pressurelevel == kVMPressureWarning) - return (task->low_mem_notified_warn ? TRUE : FALSE); - else if (pressurelevel == kVMPressureCritical) - return (task->low_mem_notified_critical ? TRUE : FALSE); - else + + if (pressurelevel == kVMPressureWarning) { + return task->low_mem_notified_warn ? TRUE : FALSE; + } else if (pressurelevel == kVMPressureCritical) { + return task->low_mem_notified_critical ? TRUE : FALSE; + } else { return TRUE; + } } @@ -3605,19 +3725,20 @@ task_used_for_purging(task_t task, int pressurelevel) if (task == NULL) { return FALSE; } - - if (pressurelevel == kVMPressureWarning) - return (task->purged_memory_warn ? TRUE : FALSE); - else if (pressurelevel == kVMPressureCritical) - return (task->purged_memory_critical ? TRUE : FALSE); - else + + if (pressurelevel == kVMPressureWarning) { + return task->purged_memory_warn ? TRUE : FALSE; + } else if (pressurelevel == kVMPressureCritical) { + return task->purged_memory_critical ? TRUE : FALSE; + } else { return TRUE; + } } /* * Mark the task as notified with memory notification. - * + * * Condition: task lock should be held while calling this function. */ void @@ -3626,11 +3747,12 @@ task_mark_has_been_notified(task_t task, int pressurelevel) if (task == NULL) { return; } - - if (pressurelevel == kVMPressureWarning) + + if (pressurelevel == kVMPressureWarning) { task->low_mem_notified_warn = 1; - else if (pressurelevel == kVMPressureCritical) + } else if (pressurelevel == kVMPressureCritical) { task->low_mem_notified_critical = 1; + } } @@ -3645,17 +3767,18 @@ task_mark_used_for_purging(task_t task, int pressurelevel) if (task == NULL) { return; } - - if (pressurelevel == kVMPressureWarning) + + if (pressurelevel == kVMPressureWarning) { task->purged_memory_warn = 1; - else if (pressurelevel == kVMPressureCritical) + } else if (pressurelevel == kVMPressureCritical) { task->purged_memory_critical = 1; + } } /* * Mark the task eligible for low memory notification. - * + * * Condition: task lock should be held while calling this function. */ void @@ -3664,11 +3787,12 @@ task_clear_has_been_notified(task_t task, int pressurelevel) if (task == NULL) { return; } - - if (pressurelevel == kVMPressureWarning) + + if (pressurelevel == kVMPressureWarning) { task->low_mem_notified_warn = 0; - else if (pressurelevel == kVMPressureCritical) + } else if (pressurelevel == kVMPressureCritical) { task->low_mem_notified_critical = 0; + } } @@ -3683,22 +3807,22 @@ task_clear_used_for_purging(task_t task) if (task == NULL) { return; } - + task->purged_memory_warn = 0; task->purged_memory_critical = 0; } /* - * Estimate task importance for purging its purgeable memory + * Estimate task importance for purging its purgeable memory * and low memory notification. - * + * * Importance is calculated in the following order of criteria: * -Task role : Background vs Foreground * -Boost status: Not boosted vs Boosted * -Darwin BG status. * - * Returns: Estimated task importance. Less important task will have lower + * Returns: Estimated task importance. Less important task will have lower * estimated importance. */ int @@ -3710,27 +3834,29 @@ task_importance_estimate(task_t task) return 0; } - if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) - task_importance += TASK_IMPORTANCE_FOREGROUND; + if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) { + task_importance += TASK_IMPORTANCE_FOREGROUND; + } + + if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) { + task_importance += TASK_IMPORTANCE_NOTDARWINBG; + } - if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG) == 0) - task_importance += TASK_IMPORTANCE_NOTDARWINBG; - return task_importance; } boolean_t task_has_assertions(task_t task) { - return (task->task_imp_base->iit_assertcnt? TRUE : FALSE); + return task->task_imp_base->iit_assertcnt? TRUE : FALSE; } kern_return_t send_resource_violation(typeof(send_cpu_usage_violation) sendfunc, - task_t violator, - struct ledger_entry_info *linfo, - resource_notify_flags_t flags) + task_t violator, + struct ledger_entry_info *linfo, + resource_notify_flags_t flags) { #ifndef MACH_BSD return KERN_NOT_SUPPORTED; @@ -3760,10 +3886,12 @@ send_resource_violation(typeof(send_cpu_usage_violation) sendfunc, pid = task_pid(violator); if (flags & kRNFatalLimitFlag) { kr = proc_pidpathinfo_internal(proc, 0, proc_path, - sizeof(proc_path), NULL); + sizeof(proc_path), NULL); } task_unlock(violator); - if (kr) goto finish; + if (kr) { + goto finish; + } /* violation time ~ now */ clock_get_calendar_nanotime(&secs, &nsecs); @@ -3773,15 +3901,17 @@ send_resource_violation(typeof(send_cpu_usage_violation) sendfunc, /* send message */ kr = host_get_special_port(host_priv_self(), HOST_LOCAL_NODE, - HOST_RESOURCE_NOTIFY_PORT, &dstport); - if (kr) goto finish; + HOST_RESOURCE_NOTIFY_PORT, &dstport); + if (kr) { + goto finish; + } thread_set_honor_qlimit(curthread); kr = sendfunc(dstport, - procname, pid, proc_path, timestamp, - linfo->lei_balance, linfo->lei_last_refill, - linfo->lei_limit, linfo->lei_refill_period, - flags); + procname, pid, proc_path, timestamp, + linfo->lei_balance, linfo->lei_last_refill, + linfo->lei_limit, linfo->lei_refill_period, + flags); thread_clear_honor_qlimit(curthread); ipc_port_release_send(dstport); @@ -3800,18 +3930,18 @@ finish: #ifdef __LP64__ void trace_resource_violation(uint16_t code, - struct ledger_entry_info *linfo) + struct ledger_entry_info *linfo) { KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, code), - linfo->lei_balance, linfo->lei_last_refill, - linfo->lei_limit, linfo->lei_refill_period); + linfo->lei_balance, linfo->lei_last_refill, + linfo->lei_limit, linfo->lei_refill_period); } #else /* K32 */ /* TODO: create/find a trace_two_LLs() for K32 systems */ #define MASK32 0xffffffff void trace_resource_violation(uint16_t code, - struct ledger_entry_info *linfo) + struct ledger_entry_info *linfo) { int8_t lownibble = (code & 0x3) * 2; int16_t codeA = (code & 0xffc) | lownibble; @@ -3828,10 +3958,10 @@ trace_resource_violation(uint16_t code, int32_t refill_period_low = linfo->lei_refill_period & MASK32; KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeA), - balance_high, balance_low, - last_refill_high, last_refill_low); + balance_high, balance_low, + last_refill_high, last_refill_low); KERNEL_DBG_IST_SANE(KDBG_CODE(DBG_MACH, DBG_MACH_RESOURCE, codeB), - limit_high, limit_low, - refill_period_high, refill_period_low); + limit_high, limit_low, + refill_period_high, refill_period_low); } #endif /* K64/K32 */ diff --git a/osfmk/kern/task_swap.c b/osfmk/kern/task_swap.c index 02afda05d..42a731856 100644 --- a/osfmk/kern/task_swap.c +++ b/osfmk/kern/task_swap.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * File: kern/task_swap.c - * + * * Task residency management primitives implementation. */ #include @@ -47,7 +47,7 @@ #include #include -#include /* We use something from in here */ +#include /* We use something from in here */ /* * task_swappable: [exported] @@ -61,14 +61,16 @@ task_swappable( task_t task, __unused boolean_t make_swappable) { - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_ARGUMENT); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (task == TASK_NULL) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL) { + return KERN_INVALID_ARGUMENT; + } /* * We don't support swapping, this call is purely advisory. */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } diff --git a/osfmk/kern/task_swap.h b/osfmk/kern/task_swap.h index e3b50d7fd..5972ca36e 100644 --- a/osfmk/kern/task_swap.h +++ b/osfmk/kern/task_swap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:32 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,98 +38,98 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.4.1 1995/04/07 19:02:38 barbou - * Merged into mainline. - * [95/03/09 barbou] + * Merged into mainline. + * [95/03/09 barbou] * * Revision 1.1.2.2 1995/02/13 15:35:45 barbou - * Merged/ported to MK6. - * + * Merged/ported to MK6. + * * Revision 1.1.1.3 94/08/12 15:44:39 barbou - * VM Merge - Task Swapper. - * - * Changed host_priv_t into host_t. - * [94/07/28 barbou] - * + * VM Merge - Task Swapper. + * + * Changed host_priv_t into host_t. + * [94/07/28 barbou] + * * Revision 1.1.1.2 1994/07/28 15:33:46 barbou - * Copied from IK. - * + * Copied from IK. + * * Revision 3.0.3.2 1994/01/20 19:53:01 chasb - * Remove excessively restrictive copyright notice - * [1994/01/20 17:50:40 chasb] - * + * Remove excessively restrictive copyright notice + * [1994/01/20 17:50:40 chasb] + * * Revision 3.0.3.1 1993/12/20 21:06:49 gupta - * Expanded C O P Y R I G H T - * [1993/12/17 22:19:22 gupta] - * + * Expanded C O P Y R I G H T + * [1993/12/17 22:19:22 gupta] + * * Revision 3.0 1992/12/31 22:08:24 ede - * Initial revision for OSF/1 R1.3 - * + * Initial revision for OSF/1 R1.3 + * * Revision 1.1.4.5 1992/03/16 18:02:52 gmf - * Add TASK_SW_ELIGIBLE flag to swap_flags; prototype - * task_swapout_eligible, task_swapout_ineligible. - * [1992/02/12 22:01:48 gmf] - * + * Add TASK_SW_ELIGIBLE flag to swap_flags; prototype + * task_swapout_eligible, task_swapout_ineligible. + * [1992/02/12 22:01:48 gmf] + * * Revision 1.1.4.4 1992/01/22 22:14:13 gmf - * Change prototype for task_swappable() to use host_priv_t - * instead of host_t. - * [1992/01/17 17:48:13 gmf] - * + * Change prototype for task_swappable() to use host_priv_t + * instead of host_t. + * [1992/01/17 17:48:13 gmf] + * * Revision 1.1.4.3 1991/12/10 17:20:55 gmf - * Add extern declaration for new thread. - * Changed TASK_SW_WAIT flag to TASK_SW_WANT_IN. - * [1991/12/10 16:19:10 gmf] - * + * Add extern declaration for new thread. + * Changed TASK_SW_WAIT flag to TASK_SW_WANT_IN. + * [1991/12/10 16:19:10 gmf] + * * Revision 1.1.4.2 1991/11/21 21:48:35 mmp - * initial task swapping code - * [1991/11/21 21:01:37 mmp] - * + * initial task swapping code + * [1991/11/21 21:01:37 mmp] + * * $EndLog$ */ -/* +/* * File: kern/task_swap.h - * + * * Task residency management primitives declarations. */ -#ifndef _KERN_TASK_SWAP_H_ -#define _KERN_TASK_SWAP_H_ +#ifndef _KERN_TASK_SWAP_H_ +#define _KERN_TASK_SWAP_H_ #include /* * swap states */ -#define TASK_SW_UNSWAPPABLE 1 /* not swappable */ -#define TASK_SW_IN 2 /* swapped in (resident) */ -#define TASK_SW_OUT 3 /* swapped out (non-resident) */ -#define TASK_SW_COMING_IN 4 /* about to be swapped in */ -#define TASK_SW_GOING_OUT 5 /* being swapped out */ +#define TASK_SW_UNSWAPPABLE 1 /* not swappable */ +#define TASK_SW_IN 2 /* swapped in (resident) */ +#define TASK_SW_OUT 3 /* swapped out (non-resident) */ +#define TASK_SW_COMING_IN 4 /* about to be swapped in */ +#define TASK_SW_GOING_OUT 5 /* being swapped out */ /* * swap flags */ -#define TASK_SW_MAKE_UNSWAPPABLE 0x01 /* make it unswappable */ -#define TASK_SW_WANT_IN 0x02 /* sleeping on state */ -#define TASK_SW_ELIGIBLE 0x04 /* eligible for swapping */ +#define TASK_SW_MAKE_UNSWAPPABLE 0x01 /* make it unswappable */ +#define TASK_SW_WANT_IN 0x02 /* sleeping on state */ +#define TASK_SW_ELIGIBLE 0x04 /* eligible for swapping */ /* * exported routines */ extern void task_swapper_init(void); extern kern_return_t task_swapin( - task_t, /* task */ - boolean_t); /* make_unswappable */ + task_t, /* task */ + boolean_t); /* make_unswappable */ extern kern_return_t task_swapout(task_t /* task */); extern void task_swapper(void); extern void task_swap_swapout_thread(void); extern void compute_vm_averages(void); extern kern_return_t task_swappable( - host_priv_t, /* host */ - task_t, /* task */ - boolean_t); /* swappable */ + host_priv_t, /* host */ + task_t, /* task */ + boolean_t); /* swappable */ extern void task_swapout_eligible(task_t /* task */); extern void task_swapout_ineligible(task_t /* task */); extern void swapout_ast(void); -#endif /* _KERN_TASK_SWAP_H_ */ +#endif /* _KERN_TASK_SWAP_H_ */ diff --git a/osfmk/kern/telemetry.c b/osfmk/kern/telemetry.c index 723d48f5b..7df3ce15b 100644 --- a/osfmk/kern/telemetry.c +++ b/osfmk/kern/telemetry.c @@ -60,20 +60,20 @@ #define TELEMETRY_DEBUG 0 -extern int proc_pid(void *); -extern char *proc_name_address(void *p); +extern int proc_pid(void *); +extern char *proc_name_address(void *p); extern uint64_t proc_uniqueid(void *p); extern uint64_t proc_was_throttled(void *p); extern uint64_t proc_did_throttle(void *p); -extern int proc_selfpid(void); +extern int proc_selfpid(void); extern boolean_t task_did_exec(task_t task); extern boolean_t task_is_exec_copy(task_t task); struct micro_snapshot_buffer { - vm_offset_t buffer; - uint32_t size; - uint32_t current_position; - uint32_t end_point; + vm_offset_t buffer; + uint32_t size; + uint32_t current_position; + uint32_t end_point; }; void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer); @@ -83,12 +83,12 @@ int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024) #define TELEMETRY_MAX_BUFFER_SIZE (64*1024) -#define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification -#define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication +#define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification +#define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication -uint32_t telemetry_sample_rate = 0; -volatile boolean_t telemetry_needs_record = FALSE; -volatile boolean_t telemetry_needs_timer_arming_record = FALSE; +uint32_t telemetry_sample_rate = 0; +volatile boolean_t telemetry_needs_record = FALSE; +volatile boolean_t telemetry_needs_timer_arming_record = FALSE; /* * If TRUE, record micro-stackshot samples for all tasks. @@ -108,8 +108,8 @@ uint32_t telemetry_timestamp = 0; */ struct micro_snapshot_buffer telemetry_buffer = {0, 0, 0, 0}; -int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked? -int telemetry_buffer_notify_at = 0; +int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked? +int telemetry_buffer_notify_at = 0; lck_grp_t telemetry_lck_grp; lck_mtx_t telemetry_mtx; @@ -122,10 +122,11 @@ lck_mtx_t telemetry_pmi_mtx; #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0) #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0) -void telemetry_init(void) +void +telemetry_init(void) { kern_return_t ret; - uint32_t telemetry_notification_leeway; + uint32_t telemetry_notification_leeway; lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL); lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); @@ -135,8 +136,9 @@ void telemetry_init(void) telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE; } - if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) + if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) { telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE; + } ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size, VM_KERN_MEMORY_DIAG); if (ret != KERN_SUCCESS) { @@ -153,7 +155,7 @@ void telemetry_init(void) } if (telemetry_notification_leeway >= telemetry_buffer.size) { printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n", - telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY); + telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY); telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; } telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway; @@ -166,18 +168,16 @@ void telemetry_init(void) * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args. */ if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) { - #if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) telemetry_sample_all_tasks = FALSE; #else telemetry_sample_all_tasks = TRUE; #endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */ - } kprintf("Telemetry: Sampling %stasks once per %u second%s\n", - (telemetry_sample_all_tasks) ? "all " : "", - telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s"); + (telemetry_sample_all_tasks) ? "all " : "", + telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s"); } /* @@ -281,14 +281,15 @@ telemetry_is_active(thread_t thread) * sample now. No need to do this one at the AST because we're already at * a safe place in this system call. */ -int telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway) +int +telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway) { if (telemetry_needs_timer_arming_record == TRUE) { telemetry_needs_timer_arming_record = FALSE; telemetry_take_sample(current_thread(), kTimerArmingRecord | kUserMode, &telemetry_buffer); } - return (0); + return 0; } #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) @@ -299,7 +300,8 @@ telemetry_pmi_handler(bool user_mode, __unused void *ctx) } #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */ -int telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period) +int +telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period) { #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) static boolean_t sample_all_tasks_aside = FALSE; @@ -366,7 +368,8 @@ out: * Mark the current thread for an interrupt-based * telemetry record, to be sampled at the next AST boundary. */ -void telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi) +void +telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi) { uint32_t ast_bits = 0; thread_t thread = current_thread(); @@ -389,7 +392,8 @@ void telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi) ast_propagate(thread); } -void compute_telemetry(void *arg __unused) +void +compute_telemetry(void *arg __unused) { if (telemetry_sample_all_tasks || (telemetry_active_tasks > 0)) { if ((++telemetry_timestamp) % telemetry_sample_rate == 0) { @@ -416,7 +420,8 @@ telemetry_notify_user(void) ipc_port_release_send(user_port); } -void telemetry_ast(thread_t thread, ast_t reasons) +void +telemetry_ast(thread_t thread, ast_t reasons) { assert((reasons & AST_TELEMETRY_ALL) != 0); @@ -426,7 +431,7 @@ void telemetry_ast(thread_t thread, ast_t reasons) } if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) { record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord : - kInterruptRecord; + kInterruptRecord; } uint8_t user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0; @@ -436,7 +441,8 @@ void telemetry_ast(thread_t thread, ast_t reasons) telemetry_take_sample(thread, microsnapshot_flags, &telemetry_buffer); } -void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer) +void +telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer) { task_t task; void *p; @@ -451,17 +457,19 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct uint32_t tmp = 0; boolean_t notify = FALSE; - if (thread == THREAD_NULL) + if (thread == THREAD_NULL) { return; + } task = thread->task; - if ((task == TASK_NULL) || (task == kernel_task) || task_did_exec(task) || task_is_exec_copy(task)) + if ((task == TASK_NULL) || (task == kernel_task) || task_did_exec(task) || task_is_exec_copy(task)) { return; + } /* telemetry_XXX accessed outside of lock for instrumentation only */ KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START, - microsnapshot_flags, telemetry_bytes_since_last_mark, 0, - (&telemetry_buffer != current_buffer)); + microsnapshot_flags, telemetry_bytes_since_last_mark, 0, + (&telemetry_buffer != current_buffer)); p = get_bsdtask_info(task); @@ -480,10 +488,10 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct /* * Find the actual [slid] address of the shared cache's UUID, and copy it in from userland. */ - int shared_cache_uuid_valid = 0; - uint64_t shared_cache_base_address; - struct _dyld_cache_header shared_cache_header; - uint64_t shared_cache_slide; + int shared_cache_uuid_valid = 0; + uint64_t shared_cache_base_address; + struct _dyld_cache_header shared_cache_header; + uint64_t shared_cache_slide; /* * Don't copy in the entire shared cache header; we only need the UUID. Calculate the @@ -493,8 +501,8 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct vm_shared_region_t sr = vm_shared_region_get(task); if (sr != NULL) { if ((vm_shared_region_start_address(sr, &shared_cache_base_address) == KERN_SUCCESS) && - (copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid, - sizeof (shared_cache_header.uuid)) == 0)) { + (copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid, + sizeof(shared_cache_header.uuid)) == 0)) { shared_cache_uuid_valid = 1; shared_cache_slide = vm_shared_region_get_slide(sr); } @@ -508,8 +516,8 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct * * XXX - make this common with kdp? */ - uint32_t uuid_info_count = 0; - mach_vm_address_t uuid_info_addr = 0; + uint32_t uuid_info_count = 0; + mach_vm_address_t uuid_info_addr = 0; if (task_has_64Bit_addr(task)) { struct user64_dyld_all_image_infos task_image_infos; if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) { @@ -543,7 +551,7 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct uint32_t uuid_info_size = (uint32_t)(task_has_64Bit_addr(thread->task) ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info)); uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size; - char *uuid_info_array = NULL; + char *uuid_info_array = NULL; if (uuid_info_count > 0) { if ((uuid_info_array = (char *)kalloc(uuid_info_array_size)) == NULL) { @@ -565,7 +573,7 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct * Look for a dispatch queue serial number, and copy it in from userland if present. */ uint64_t dqserialnum = 0; - int dqserialnum_valid = 0; + int dqserialnum_valid = 0; uint64_t dqkeyaddr = thread_dispatchqaddr(thread); if (dqkeyaddr != 0) { @@ -589,8 +597,9 @@ void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct * then we cannot take the sample. Meant to allow us to deallocate the window * buffer if it is disabled. */ - if (!current_buffer->buffer) + if (!current_buffer->buffer) { goto cancel_sample; + } /* * We do the bulk of the operation under the telemetry lock, on assumption that @@ -646,7 +655,7 @@ copytobuffer: tsnap->user_time_in_terminated_threads = task->total_user_time; tsnap->system_time_in_terminated_threads = task->total_system_time; tsnap->suspend_count = task->suspend_count; - tsnap->task_size = (typeof(tsnap->task_size)) (get_task_phys_footprint(task) / PAGE_SIZE); + tsnap->task_size = (typeof(tsnap->task_size))(get_task_phys_footprint(task) / PAGE_SIZE); tsnap->faults = task->faults; tsnap->pageins = task->pageins; tsnap->cow_faults = task->cow_faults; @@ -691,7 +700,7 @@ copytobuffer: if (shared_cache_uuid_valid) { tsnap->shared_cache_slide = shared_cache_slide; - bcopy(shared_cache_header.uuid, tsnap->shared_cache_identifier, sizeof (shared_cache_header.uuid)); + bcopy(shared_cache_header.uuid, tsnap->shared_cache_identifier, sizeof(shared_cache_header.uuid)); } current_buffer->current_position += sizeof(struct task_snapshot); @@ -748,7 +757,7 @@ copytobuffer: thsnap->ts_qos = thread->effective_policy.thep_qos; thsnap->ts_rqos = thread->requested_policy.thrp_qos; thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override, - thread->requested_policy.thrp_qos_workq_override); + thread->requested_policy.thrp_qos_workq_override); if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) { thsnap->ss_flags |= kThreadDarwinBG; @@ -783,8 +792,8 @@ copytobuffer: } thsnap->ss_flags |= kHasDispatchSerial; - bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof (dqserialnum)); - current_buffer->current_position += sizeof (dqserialnum); + bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof(dqserialnum)); + current_buffer->current_position += sizeof(dqserialnum); } if (user64) { @@ -798,7 +807,7 @@ copytobuffer: * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning, * and start again there so that we always store a full record. */ - if ((current_buffer->size - current_buffer->current_position)/framesize < btcount) { + if ((current_buffer->size - current_buffer->current_position) / framesize < btcount) { current_buffer->end_point = current_record_start; current_buffer->current_position = 0; if (current_record_start == 0) { @@ -808,7 +817,7 @@ copytobuffer: goto copytobuffer; } - for (bti=0; bti < btcount; bti++, current_buffer->current_position += framesize) { + for (bti = 0; bti < btcount; bti++, current_buffer->current_position += framesize) { if (framesize == 8) { *(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti]; } else { @@ -842,8 +851,8 @@ cancel_sample: TELEMETRY_UNLOCK(); KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END, - notify, telemetry_bytes_since_last_mark, - current_buffer->current_position, current_buffer->end_point); + notify, telemetry_bytes_since_last_mark, + current_buffer->current_position, current_buffer->end_point); if (notify) { telemetry_notify_user(); @@ -877,19 +886,21 @@ log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz) } #endif -int telemetry_gather(user_addr_t buffer, uint32_t *length, boolean_t mark) +int +telemetry_gather(user_addr_t buffer, uint32_t *length, boolean_t mark) { return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer); } -int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark, struct micro_snapshot_buffer * current_buffer) +int +telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark, struct micro_snapshot_buffer * current_buffer) { int result = 0; uint32_t oldest_record_offset; KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START, - mark, telemetry_bytes_since_last_mark, 0, - (&telemetry_buffer != current_buffer)); + mark, telemetry_bytes_since_last_mark, 0, + (&telemetry_buffer != current_buffer)); TELEMETRY_LOCK(); @@ -911,7 +922,6 @@ int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark do { if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) || ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) { - if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) { /* * There is no magic number at the start of the buffer, which means @@ -931,8 +941,9 @@ int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark break; } - if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) + if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) { break; + } /* * There are no alignment guarantees for micro-stackshot records, so we must search at each @@ -948,7 +959,7 @@ int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark if (oldest_record_offset != 0) { #if TELEMETRY_DEBUG log_telemetry_output(current_buffer->buffer, oldest_record_offset, - current_buffer->end_point - oldest_record_offset); + current_buffer->end_point - oldest_record_offset); #endif if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer, current_buffer->end_point - oldest_record_offset)) != 0) { @@ -979,10 +990,10 @@ out: TELEMETRY_UNLOCK(); KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END, - current_buffer->current_position, *length, - current_buffer->end_point, (&telemetry_buffer != current_buffer)); + current_buffer->current_position, *length, + current_buffer->end_point, (&telemetry_buffer != current_buffer)); - return (result); + return result; } /************************/ @@ -1014,18 +1025,18 @@ out: #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */ -vm_offset_t bootprofile_buffer = 0; -uint32_t bootprofile_buffer_size = 0; -uint32_t bootprofile_buffer_current_position = 0; -uint32_t bootprofile_interval_ms = 0; -uint32_t bootprofile_stackshot_flags = 0; -uint64_t bootprofile_interval_abs = 0; -uint64_t bootprofile_next_deadline = 0; -uint32_t bootprofile_all_procs = 0; -char bootprofile_proc_name[17]; +vm_offset_t bootprofile_buffer = 0; +uint32_t bootprofile_buffer_size = 0; +uint32_t bootprofile_buffer_current_position = 0; +uint32_t bootprofile_interval_ms = 0; +uint32_t bootprofile_stackshot_flags = 0; +uint64_t bootprofile_interval_abs = 0; +uint64_t bootprofile_next_deadline = 0; +uint32_t bootprofile_all_procs = 0; +char bootprofile_proc_name[17]; uint64_t bootprofile_delta_since_timestamp = 0; -lck_grp_t bootprofile_lck_grp; -lck_mtx_t bootprofile_mtx; +lck_grp_t bootprofile_lck_grp; +lck_mtx_t bootprofile_mtx; enum { @@ -1035,7 +1046,7 @@ enum { } bootprofile_type = kBootProfileDisabled; -static timer_call_data_t bootprofile_timer_call_entry; +static timer_call_data_t bootprofile_timer_call_entry; #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0) #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx) @@ -1045,7 +1056,8 @@ static void bootprofile_timer_call( timer_call_param_t param0, timer_call_param_t param1); -void bootprofile_init(void) +void +bootprofile_init(void) { kern_return_t ret; char type[32]; @@ -1057,8 +1069,9 @@ void bootprofile_init(void) bootprofile_buffer_size = 0; } - if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) + if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) { bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE; + } if (!PE_parse_boot_argn("bootprofile_interval_ms", &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) { bootprofile_interval_ms = 0; @@ -1099,21 +1112,21 @@ void bootprofile_init(void) } bzero((void *) bootprofile_buffer, bootprofile_buffer_size); - kprintf("Boot profile: Sampling %s once per %u ms at %s\n", bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms, - bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown")); + kprintf("Boot profile: Sampling %s once per %u ms at %s\n", bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms, + bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown")); timer_call_setup(&bootprofile_timer_call_entry, - bootprofile_timer_call, - NULL); + bootprofile_timer_call, + NULL); if (bootprofile_type == kBootProfileStartTimerAtBoot) { bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs; timer_call_enter_with_leeway(&bootprofile_timer_call_entry, - NULL, - bootprofile_next_deadline, - 0, - TIMER_CALL_SYS_NORMAL, - FALSE); + NULL, + bootprofile_next_deadline, + 0, + TIMER_CALL_SYS_NORMAL, + FALSE); } } @@ -1123,11 +1136,11 @@ bootprofile_wake_from_sleep(void) if (bootprofile_type == kBootProfileStartTimerAtWake) { bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs; timer_call_enter_with_leeway(&bootprofile_timer_call_entry, - NULL, - bootprofile_next_deadline, - 0, - TIMER_CALL_SYS_NORMAL, - FALSE); + NULL, + bootprofile_next_deadline, + 0, + TIMER_CALL_SYS_NORMAL, + FALSE); } } @@ -1155,8 +1168,7 @@ bootprofile_timer_call( if ((current_task() != NULL) && (current_task()->bsd_info != NULL) && (0 == strncmp(bootprofile_proc_name, proc_name_address(current_task()->bsd_info), 17))) { pid_to_profile = proc_selfpid(); - } - else { + } else { /* * Process-specific boot profiling requested but the on-core process is * something else. Nothing to do here. @@ -1168,9 +1180,8 @@ bootprofile_timer_call( /* initiate a stackshot with whatever portion of the buffer is left */ if (bootprofile_buffer_current_position < bootprofile_buffer_size) { - uint32_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO - | STACKSHOT_GET_GLOBAL_MEM_STATS; + | STACKSHOT_GET_GLOBAL_MEM_STATS; #if __x86_64__ flags |= STACKSHOT_SAVE_KEXT_LOADINFO; #endif /* __x86_64__ */ @@ -1180,7 +1191,7 @@ bootprofile_timer_call( flags |= bootprofile_stackshot_flags; if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) { /* Can't take deltas until the first one */ - flags &= ~ STACKSHOT_COLLECT_DELTA_SNAPSHOT; + flags &= ~STACKSHOT_COLLECT_DELTA_SNAPSHOT; } uint64_t timestamp = 0; @@ -1189,8 +1200,8 @@ bootprofile_timer_call( } kern_return_t r = stack_snapshot_from_kernel( - pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position), - bootprofile_buffer_size - bootprofile_buffer_current_position, + pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position), + bootprofile_buffer_size - bootprofile_buffer_current_position, flags, bootprofile_delta_since_timestamp, &retbytes); /* @@ -1204,8 +1215,8 @@ bootprofile_timer_call( goto reprogram; } - if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT && - r == KERN_SUCCESS) { + if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT && + r == KERN_SUCCESS) { bootprofile_delta_since_timestamp = timestamp; } @@ -1227,17 +1238,18 @@ reprogram: } clock_deadline_for_periodic_event(bootprofile_interval_abs, - mach_absolute_time(), - &bootprofile_next_deadline); + mach_absolute_time(), + &bootprofile_next_deadline); timer_call_enter_with_leeway(&bootprofile_timer_call_entry, - NULL, - bootprofile_next_deadline, - 0, - TIMER_CALL_SYS_NORMAL, - FALSE); + NULL, + bootprofile_next_deadline, + 0, + TIMER_CALL_SYS_NORMAL, + FALSE); } -void bootprofile_get(void **buffer, uint32_t *length) +void +bootprofile_get(void **buffer, uint32_t *length) { BOOTPROFILE_LOCK(); *buffer = (void*) bootprofile_buffer; @@ -1245,7 +1257,8 @@ void bootprofile_get(void **buffer, uint32_t *length) BOOTPROFILE_UNLOCK(); } -int bootprofile_gather(user_addr_t buffer, uint32_t *length) +int +bootprofile_gather(user_addr_t buffer, uint32_t *length) { int result = 0; @@ -1275,5 +1288,5 @@ out: BOOTPROFILE_UNLOCK(); - return (result); + return result; } diff --git a/osfmk/kern/telemetry.h b/osfmk/kern/telemetry.h index 166b31c1a..751d6b829 100644 --- a/osfmk/kern/telemetry.h +++ b/osfmk/kern/telemetry.h @@ -2,7 +2,7 @@ * Copyright (c) 2012-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -60,7 +60,7 @@ extern void telemetry_ast(thread_t thread, uint32_t reasons); extern int telemetry_gather(user_addr_t buffer, uint32_t *length, boolean_t mark); extern void telemetry_mark_curthread(boolean_t interrupted_userspace, - boolean_t pmi); + boolean_t pmi); extern void telemetry_task_ctl(task_t task, uint32_t reason, int enable_disable); extern void telemetry_task_ctl_locked(task_t task, uint32_t reason, int enable_disable); diff --git a/osfmk/kern/test_lock.c b/osfmk/kern/test_lock.c index 08056068a..2e1069bb9 100644 --- a/osfmk/kern/test_lock.c +++ b/osfmk/kern/test_lock.c @@ -21,33 +21,33 @@ #include -static lck_mtx_t test_mtx; -static lck_grp_t test_mtx_grp; -static lck_grp_attr_t test_mtx_grp_attr; -static lck_attr_t test_mtx_attr; +static lck_mtx_t test_mtx; +static lck_grp_t test_mtx_grp; +static lck_grp_attr_t test_mtx_grp_attr; +static lck_attr_t test_mtx_attr; static lck_grp_t test_mtx_stats_grp; -static lck_grp_attr_t test_mtx_stats_grp_attr; -static lck_attr_t test_mtx_stats_attr; +static lck_grp_attr_t test_mtx_stats_grp_attr; +static lck_attr_t test_mtx_stats_attr; struct lck_mtx_test_stats_elem { - lck_spin_t lock; - uint64_t samples; - uint64_t avg; - uint64_t max; - uint64_t min; - uint64_t tot; + lck_spin_t lock; + uint64_t samples; + uint64_t avg; + uint64_t max; + uint64_t min; + uint64_t tot; }; -#define TEST_MTX_LOCK_STATS 0 -#define TEST_MTX_TRY_LOCK_STATS 1 -#define TEST_MTX_LOCK_SPIN_STATS 2 -#define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3 -#define TEST_MTX_TRY_LOCK_SPIN_STATS 4 -#define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5 -#define TEST_MTX_UNLOCK_MTX_STATS 6 -#define TEST_MTX_UNLOCK_SPIN_STATS 7 -#define TEST_MTX_MAX_STATS 8 +#define TEST_MTX_LOCK_STATS 0 +#define TEST_MTX_TRY_LOCK_STATS 1 +#define TEST_MTX_LOCK_SPIN_STATS 2 +#define TEST_MTX_LOCK_SPIN_ALWAYS_STATS 3 +#define TEST_MTX_TRY_LOCK_SPIN_STATS 4 +#define TEST_MTX_TRY_LOCK_SPIN_ALWAYS_STATS 5 +#define TEST_MTX_UNLOCK_MTX_STATS 6 +#define TEST_MTX_UNLOCK_SPIN_STATS 7 +#define TEST_MTX_MAX_STATS 8 struct lck_mtx_test_stats_elem lck_mtx_test_stats[TEST_MTX_MAX_STATS]; atomic_bool enabled = TRUE; @@ -62,8 +62,8 @@ init_test_mtx_stats(void) lck_attr_setdefault(&test_mtx_stats_attr); atomic_store(&enabled, TRUE); - for(i = 0; i < TEST_MTX_MAX_STATS; i++){ - memset(&lck_mtx_test_stats[i], 0 , sizeof(struct lck_mtx_test_stats_elem)); + for (i = 0; i < TEST_MTX_MAX_STATS; i++) { + memset(&lck_mtx_test_stats[i], 0, sizeof(struct lck_mtx_test_stats_elem)); lck_mtx_test_stats[i].min = ~0; lck_spin_init(&lck_mtx_test_stats[i].lock, &test_mtx_stats_grp, &test_mtx_stats_attr); } @@ -87,10 +87,12 @@ update_test_mtx_stats( stat->samples++; stat->tot += elapsed; stat->avg = stat->tot / stat->samples; - if (stat->max < elapsed) + if (stat->max < elapsed) { stat->max = elapsed; - if (stat->min > elapsed) + } + if (stat->min > elapsed) { stat->min = elapsed; + } lck_spin_unlock(&stat->lock); } } @@ -247,10 +249,11 @@ lck_mtx_test_init(void) * This should be substituted with a version * of dispatch_once for kernel (rdar:39537874) */ - if (os_atomic_load(&first, acquire) >= 2) + if (os_atomic_load(&first, acquire) >= 2) { return; + } - if (os_atomic_cmpxchg(&first, 0, 1, relaxed)){ + if (os_atomic_cmpxchg(&first, 0, 1, relaxed)) { lck_grp_attr_setdefault(&test_mtx_grp_attr); lck_grp_init(&test_mtx_grp, "testlck_mtx", &test_mtx_grp_attr); lck_attr_setdefault(&test_mtx_attr); @@ -261,7 +264,9 @@ lck_mtx_test_init(void) os_atomic_inc(&first, release); } - while(os_atomic_load(&first, acquire) < 2); + while (os_atomic_load(&first, acquire) < 2) { + ; + } } void @@ -372,7 +377,7 @@ lck_mtx_test_unlock_spin(void) update_test_mtx_stats(start, mach_absolute_time(), TEST_MTX_UNLOCK_SPIN_STATS); } -#define WARMUP_ITER 1000 +#define WARMUP_ITER 1000 int lck_mtx_test_mtx_uncontended_loop_time( @@ -494,7 +499,6 @@ lck_mtx_test_mtx_uncontended_loop_time( string_off += ret; for (i = 0; i < TEST_MTX_MAX_STATS - 2; i++) { - ret = snprintf(&buffer[string_off], size, "total time %llu ns total run time %llu ns ", tot_time[i], run_time[i]); size -= ret; string_off += ret; @@ -629,7 +633,7 @@ lck_mtx_test_mtx_uncontended( lck_mtx_test_mtx_lock_uncontended(iter); lck_mtx_test_mtx_spin_uncontended(iter); - return get_test_mtx_stats_string(buffer,size); + return get_test_mtx_stats_string(buffer, size); } static int synch; @@ -659,41 +663,53 @@ test_mtx_lock_unlock_contended_thread( printf("Starting thread %p\n", current_thread()); - while(os_atomic_load(&info->other_thread, acquire) == NULL); + while (os_atomic_load(&info->other_thread, acquire) == NULL) { + ; + } other_thread = info->other_thread; printf("Other thread %p\n", other_thread); my_locked = &info->my_locked; - other_locked = info->other_locked; + other_locked = info->other_locked; *my_locked = 0; val = os_atomic_inc(&synch, relaxed); - while(os_atomic_load(&synch, relaxed) < 2); + while (os_atomic_load(&synch, relaxed) < 2) { + ; + } //warming up the test for (i = 0; i < WARMUP_ITER; i++) { lck_mtx_test_lock(); - os_atomic_xchg(my_locked, 1 , relaxed); + os_atomic_xchg(my_locked, 1, relaxed); if (i != WARMUP_ITER - 1) { - while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN); - os_atomic_xchg(my_locked, 0 , relaxed); + while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) { + ; + } + os_atomic_xchg(my_locked, 0, relaxed); } lck_mtx_test_unlock(); - if (i != WARMUP_ITER - 1) - while(os_atomic_load(other_locked, relaxed) == 0); + if (i != WARMUP_ITER - 1) { + while (os_atomic_load(other_locked, relaxed) == 0) { + ; + } + } } printf("warmup done %p\n", current_thread()); os_atomic_inc(&synch, relaxed); - while(os_atomic_load(&synch, relaxed) < 4); + while (os_atomic_load(&synch, relaxed) < 4) { + ; + } //erase statistics - if (val == 1) + if (val == 1) { erase_all_test_mtx_stats(); + } *my_locked = 0; /* @@ -701,21 +717,27 @@ test_mtx_lock_unlock_contended_thread( * concurrently. */ os_atomic_inc(&synch, relaxed); - while(os_atomic_load(&synch, relaxed) < 6); + while (os_atomic_load(&synch, relaxed) < 6) { + ; + } for (i = 0; i < iterations; i++) { lck_mtx_test_lock(); - os_atomic_xchg(my_locked, 1 , relaxed); + os_atomic_xchg(my_locked, 1, relaxed); if (i != iterations - 1) { - while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN); - os_atomic_xchg(my_locked, 0 , relaxed); + while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) { + ; + } + os_atomic_xchg(my_locked, 0, relaxed); } lck_mtx_test_unlock_mtx(); - if (i != iterations - 1) - while(os_atomic_load(other_locked, relaxed) == 0); - + if (i != iterations - 1) { + while (os_atomic_load(other_locked, relaxed) == 0) { + ; + } + } } os_atomic_inc(&wait_barrier, relaxed); @@ -740,7 +762,7 @@ lck_mtx_test_mtx_contended( erase_all_test_mtx_stats(); targs[0].other_thread = NULL; - targs[1].other_thread = NULL; + targs[1].other_thread = NULL; result = kernel_thread_start((thread_continue_t)test_mtx_lock_unlock_contended_thread, &targs[0], &thread1); if (result != KERN_SUCCESS) { @@ -777,7 +799,7 @@ lck_mtx_test_mtx_contended( thread_deallocate(thread1); thread_deallocate(thread2); - return get_test_mtx_stats_string(buffer, buffer_size); + return get_test_mtx_stats_string(buffer, buffer_size); } static void @@ -793,7 +815,9 @@ test_mtx_lck_unlock_contended_loop_time_thread( printf("Starting thread %p\n", current_thread()); - while(os_atomic_load(&info->other_thread, acquire) == NULL); + while (os_atomic_load(&info->other_thread, acquire) == NULL) { + ; + } other_thread = info->other_thread; printf("Other thread %p\n", other_thread); @@ -803,28 +827,37 @@ test_mtx_lck_unlock_contended_loop_time_thread( *my_locked = 0; val = os_atomic_inc(&synch, relaxed); - while(os_atomic_load(&synch, relaxed) < 2); + while (os_atomic_load(&synch, relaxed) < 2) { + ; + } //warming up the test for (i = 0; i < WARMUP_ITER; i++) { lck_mtx_lock(&test_mtx); - os_atomic_xchg(my_locked, 1 , relaxed); + os_atomic_xchg(my_locked, 1, relaxed); if (i != WARMUP_ITER - 1) { - while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN); - os_atomic_xchg(my_locked, 0 , relaxed); + while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) { + ; + } + os_atomic_xchg(my_locked, 0, relaxed); } lck_mtx_unlock(&test_mtx); - if (i != WARMUP_ITER - 1) - while(os_atomic_load(other_locked, relaxed) == 0); + if (i != WARMUP_ITER - 1) { + while (os_atomic_load(other_locked, relaxed) == 0) { + ; + } + } } printf("warmup done %p\n", current_thread()); os_atomic_inc(&synch, relaxed); - while(os_atomic_load(&synch, relaxed) < 4); + while (os_atomic_load(&synch, relaxed) < 4) { + ; + } *my_locked = 0; @@ -833,7 +866,9 @@ test_mtx_lck_unlock_contended_loop_time_thread( * concurrently. */ os_atomic_inc(&synch, relaxed); - while(os_atomic_load(&synch, relaxed) < 6); + while (os_atomic_load(&synch, relaxed) < 6) { + ; + } if (val == 1) { start_loop_time_run = thread_get_runtime_self(); @@ -843,16 +878,21 @@ test_mtx_lck_unlock_contended_loop_time_thread( for (i = 0; i < iterations; i++) { lck_mtx_lock(&test_mtx); - os_atomic_xchg(my_locked, 1 , relaxed); + os_atomic_xchg(my_locked, 1, relaxed); if (i != iterations - 1) { - while(os_atomic_load(&other_thread->state, relaxed) & TH_RUN); - os_atomic_xchg(my_locked, 0 , relaxed); + while (os_atomic_load(&other_thread->state, relaxed) & TH_RUN) { + ; + } + os_atomic_xchg(my_locked, 0, relaxed); } lck_mtx_unlock(&test_mtx); - if (i != iterations - 1) - while(os_atomic_load(other_locked, relaxed) == 0); + if (i != iterations - 1) { + while (os_atomic_load(other_locked, relaxed) == 0) { + ; + } + } } if (val == 1) { @@ -929,4 +969,3 @@ lck_mtx_test_mtx_contended_loop_time( return ret; } - diff --git a/osfmk/kern/thread.c b/osfmk/kern/thread.c index 84e0277b0..7dfcb02b3 100644 --- a/osfmk/kern/thread.c +++ b/osfmk/kern/thread.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -147,6 +147,7 @@ #include + /* * Exported interfaces */ @@ -157,38 +158,38 @@ #include #include -static struct zone *thread_zone; -static lck_grp_attr_t thread_lck_grp_attr; -lck_attr_t thread_lck_attr; -lck_grp_t thread_lck_grp; +static struct zone *thread_zone; +static lck_grp_attr_t thread_lck_grp_attr; +lck_attr_t thread_lck_attr; +lck_grp_t thread_lck_grp; -struct zone *thread_qos_override_zone; +struct zone *thread_qos_override_zone; -decl_simple_lock_data(static,thread_stack_lock) -static queue_head_t thread_stack_queue; +decl_simple_lock_data(static, thread_stack_lock) +static queue_head_t thread_stack_queue; -decl_simple_lock_data(static,thread_terminate_lock) -static queue_head_t thread_terminate_queue; +decl_simple_lock_data(static, thread_terminate_lock) +static queue_head_t thread_terminate_queue; -static queue_head_t thread_deallocate_queue; +static queue_head_t thread_deallocate_queue; -static queue_head_t turnstile_deallocate_queue; +static queue_head_t turnstile_deallocate_queue; -static queue_head_t crashed_threads_queue; +static queue_head_t crashed_threads_queue; -static queue_head_t workq_deallocate_queue; +static queue_head_t workq_deallocate_queue; -decl_simple_lock_data(static,thread_exception_lock) -static queue_head_t thread_exception_queue; +decl_simple_lock_data(static, thread_exception_lock) +static queue_head_t thread_exception_queue; struct thread_exception_elt { - queue_chain_t elt; - exception_type_t exception_type; - task_t exception_task; - thread_t exception_thread; + queue_chain_t elt; + exception_type_t exception_type; + task_t exception_task; + thread_t exception_thread; }; -static struct thread thread_template, init_thread; +static struct thread thread_template, init_thread; static void thread_deallocate_enqueue(thread_t thread); static void thread_deallocate_complete(thread_t thread); @@ -205,10 +206,10 @@ extern char * proc_name_address(void *p); extern int disable_exc_resource; extern int audio_active; extern int debug_task; -int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */ +int thread_max = CONFIG_THREAD_MAX; /* Max number of threads */ int task_threadmax = CONFIG_THREAD_MAX; -static uint64_t thread_unique_id = 100; +static uint64_t thread_unique_id = 100; struct _thread_ledger_indices thread_ledgers = { -1 }; static ledger_template_t thread_ledger_template = NULL; @@ -231,7 +232,7 @@ extern int exc_resource_threads_enabled; * (ie when any thread's CPU consumption exceeds 70% of the limit, start taking user * stacktraces, aka micro-stackshots) */ -#define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70 +#define CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT 70 int cpumon_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */ void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void); @@ -315,7 +316,7 @@ thread_bootstrap(void) #if MONOTONIC memset(&thread_template.t_monotonic, 0, - sizeof(thread_template.t_monotonic)); + sizeof(thread_template.t_monotonic)); #endif /* MONOTONIC */ thread_template.bound_processor = PROCESSOR_NULL; @@ -344,7 +345,7 @@ thread_bootstrap(void) thread_template.depress_timer_active = 0; thread_template.recover = (vm_offset_t)NULL; - + thread_template.map = VM_MAP_NULL; #if DEVELOPMENT || DEBUG thread_template.pmap_footprint_suspended = FALSE; @@ -380,7 +381,7 @@ thread_bootstrap(void) thread_template.t_page_creation_time = 0; thread_template.affinity_set = NULL; - + thread_template.syscalls_unix = 0; thread_template.syscalls_mach = 0; @@ -414,6 +415,9 @@ thread_bootstrap(void) init_thread = thread_template; + /* fiddle with init thread to skip asserts in set_sched_pri */ + init_thread.sched_pri = MAXPRI_KERNEL; + machine_set_current_thread(&init_thread); } @@ -423,10 +427,10 @@ void thread_init(void) { thread_zone = zinit( - sizeof(struct thread), - thread_max * sizeof(struct thread), - THREAD_CHUNK * sizeof(struct thread), - "threads"); + sizeof(struct thread), + thread_max * sizeof(struct thread), + THREAD_CHUNK * sizeof(struct thread), + "threads"); thread_qos_override_zone = zinit( sizeof(struct thread_qos_override), @@ -453,11 +457,11 @@ thread_init(void) machine_thread_init(); if (!PE_parse_boot_argn("cpumon_ustackshots_trigger_pct", &cpumon_ustackshots_trigger_pct, - sizeof (cpumon_ustackshots_trigger_pct))) { + sizeof(cpumon_ustackshots_trigger_pct))) { cpumon_ustackshots_trigger_pct = CPUMON_USTACKSHOTS_TRIGGER_DEFAULT_PCT; } - PE_parse_boot_argn("-qos-policy-allow", &allow_qos_policy_set, sizeof(allow_qos_policy_set)); + PE_parse_boot_argn("-qos-policy-allow", &allow_qos_policy_set, sizeof(allow_qos_policy_set)); init_thread_ledgers(); } @@ -465,7 +469,7 @@ thread_init(void) boolean_t thread_is_active(thread_t thread) { - return (thread->active); + return thread->active; } void @@ -500,8 +504,8 @@ thread_terminate_continue(void) void thread_terminate_self(void) { - thread_t thread = current_thread(); - task_t task; + thread_t thread = current_thread(); + task_t task; int threadcnt; pal_thread_terminate_self(thread); @@ -621,10 +625,11 @@ thread_terminate_self(void) delay(delay_us++); - if (delay_us > USEC_PER_SEC) + if (delay_us > USEC_PER_SEC) { panic("depress timer failed to inactivate!" - "thread: %p depress_timer_active: %d", - thread, thread->depress_timer_active); + "thread: %p depress_timer_active: %d", + thread, thread->depress_timer_active); + } s = splsched(); thread_lock(thread); @@ -637,8 +642,9 @@ thread_terminate_self(void) if (thread->wait_timer_is_set) { thread->wait_timer_is_set = FALSE; - if (timer_call_cancel(&thread->wait_timer)) + if (timer_call_cancel(&thread->wait_timer)) { thread->wait_timer_active--; + } } delay_us = 1; @@ -649,10 +655,11 @@ thread_terminate_self(void) delay(delay_us++); - if (delay_us > USEC_PER_SEC) + if (delay_us > USEC_PER_SEC) { panic("wait timer failed to inactivate!" - "thread: %p wait_timer_active: %d", - thread, thread->wait_timer_active); + "thread: %p wait_timer_active: %d", + thread, thread->wait_timer_active); + } s = splsched(); thread_lock(thread); @@ -720,9 +727,9 @@ thread_deallocate(thread_t thread) void thread_deallocate_complete( - thread_t thread) + thread_t thread) { - task_t task; + task_t task; assert_thread_magic(thread); @@ -730,8 +737,9 @@ thread_deallocate_complete( assert(thread_owned_workloops_count(thread) == 0); - if (!(thread->state & TH_TERMINATE2)) + if (!(thread->state & TH_TERMINATE2)) { panic("thread_deallocate: thread not properly terminated\n"); + } assert(thread->runq == PROCESSOR_NULL); @@ -754,23 +762,29 @@ thread_deallocate_complete( } #endif /* MACH_BSD */ - if (thread->t_ledger) + if (thread->t_ledger) { ledger_dereference(thread->t_ledger); - if (thread->t_threadledger) + } + if (thread->t_threadledger) { ledger_dereference(thread->t_threadledger); + } assert(thread->turnstile != TURNSTILE_NULL); - if (thread->turnstile) + if (thread->turnstile) { turnstile_deallocate(thread->turnstile); + } - if (IPC_VOUCHER_NULL != thread->ith_voucher) + if (IPC_VOUCHER_NULL != thread->ith_voucher) { ipc_voucher_release(thread->ith_voucher); + } - if (thread->thread_io_stats) + if (thread->thread_io_stats) { kfree(thread->thread_io_stats, sizeof(struct io_stat_info)); + } - if (thread->kernel_stack != 0) + if (thread->kernel_stack != 0) { stack_free(thread); + } lck_mtx_destroy(&thread->mutex, &thread_lck_grp); machine_thread_destroy(thread); @@ -789,7 +803,7 @@ void thread_starts_owning_workloop(thread_t thread) { atomic_fetch_add_explicit(&thread->kqwl_owning_count, 1, - memory_order_relaxed); + memory_order_relaxed); } void @@ -797,7 +811,7 @@ thread_ends_owning_workloop(thread_t thread) { __assert_only uint32_t count; count = atomic_fetch_sub_explicit(&thread->kqwl_owning_count, 1, - memory_order_relaxed); + memory_order_relaxed); assert(count > 0); } @@ -805,7 +819,7 @@ uint32_t thread_owned_workloops_count(thread_t thread) { return atomic_load_explicit(&thread->kqwl_owning_count, - memory_order_relaxed); + memory_order_relaxed); } /* @@ -815,9 +829,9 @@ thread_owned_workloops_count(thread_t thread) */ void thread_inspect_deallocate( - thread_inspect_t thread_inspect) + thread_inspect_t thread_inspect) { - return(thread_deallocate((thread_t)thread_inspect)); + return thread_deallocate((thread_t)thread_inspect); } /* @@ -833,7 +847,7 @@ thread_exception_daemon(void) thread_t thread; exception_type_t etype; - simple_lock(&thread_exception_lock); + simple_lock(&thread_exception_lock, LCK_GRP_NULL); while ((elt = (struct thread_exception_elt *)dequeue_head(&thread_exception_queue)) != NULL) { simple_unlock(&thread_exception_lock); @@ -842,7 +856,7 @@ thread_exception_daemon(void) thread = elt->exception_thread; assert_thread_magic(thread); - kfree(elt, sizeof (*elt)); + kfree(elt, sizeof(*elt)); /* wait for all the threads in the task to terminate */ task_lock(task); @@ -857,7 +871,7 @@ thread_exception_daemon(void) /* Deliver the notification, also clears the corpse. */ task_deliver_crash_notification(task, thread, etype, 0); - simple_lock(&thread_exception_lock); + simple_lock(&thread_exception_lock, LCK_GRP_NULL); } assert_wait((event_t)&thread_exception_queue, THREAD_UNINT); @@ -873,17 +887,17 @@ thread_exception_daemon(void) */ void thread_exception_enqueue( - task_t task, - thread_t thread, + task_t task, + thread_t thread, exception_type_t etype) { assert(EXC_RESOURCE == etype || EXC_GUARD == etype); - struct thread_exception_elt *elt = kalloc(sizeof (*elt)); + struct thread_exception_elt *elt = kalloc(sizeof(*elt)); elt->exception_type = etype; elt->exception_task = task; elt->exception_thread = thread; - simple_lock(&thread_exception_lock); + simple_lock(&thread_exception_lock, LCK_GRP_NULL); enqueue_tail(&thread_exception_queue, (queue_entry_t)elt); simple_unlock(&thread_exception_lock); @@ -928,25 +942,25 @@ thread_copy_resource_info( static void thread_terminate_daemon(void) { - thread_t self, thread; - task_t task; + thread_t self, thread; + task_t task; self = current_thread(); self->options |= TH_OPT_SYSTEM_CRITICAL; (void)splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); thread_terminate_start: while ((thread = qe_dequeue_head(&thread_terminate_queue, struct thread, runq_links)) != THREAD_NULL) { assert_thread_magic(thread); - /* - * if marked for crash reporting, skip reaping. - * The corpse delivery thread will clear bit and enqueue + /* + * if marked for crash reporting, skip reaping. + * The corpse delivery thread will clear bit and enqueue * for reaping when done */ - if (thread->inspection){ + if (thread->inspection) { enqueue_tail(&crashed_threads_queue, &thread->runq_links); continue; } @@ -987,12 +1001,13 @@ thread_terminate_start: queue_remove(&task->threads, thread, thread_t, task_threads); task->thread_count--; - /* + /* * If the task is being halted, and there is only one thread * left in the task after this one, then wakeup that thread. */ - if (task->thread_count == 1 && task->halting) + if (task->thread_count == 1 && task->halting) { thread_wakeup((event_t)&task->halting); + } task_unlock(task); @@ -1004,7 +1019,7 @@ thread_terminate_start: thread_deallocate(thread); (void)splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); } while ((thread = qe_dequeue_head(&thread_deallocate_queue, struct thread, runq_links)) != THREAD_NULL) { @@ -1016,19 +1031,18 @@ thread_terminate_start: thread_deallocate_complete(thread); (void)splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); } struct turnstile *turnstile; while ((turnstile = qe_dequeue_head(&turnstile_deallocate_queue, struct turnstile, ts_deallocate_link)) != TURNSTILE_NULL) { - simple_unlock(&thread_terminate_lock); (void)spllo(); turnstile_destroy(turnstile); (void)splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); } queue_entry_t qe; @@ -1044,7 +1058,7 @@ thread_terminate_start: workq_destroy((struct workqueue *)qe); (void)splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); } /* @@ -1053,8 +1067,9 @@ thread_terminate_start: */ if (!queue_empty(&thread_terminate_queue) || !queue_empty(&thread_deallocate_queue) || - !queue_empty(&turnstile_deallocate_queue)) + !queue_empty(&turnstile_deallocate_queue)) { goto thread_terminate_start; + } assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT); simple_unlock(&thread_terminate_lock); @@ -1074,11 +1089,11 @@ thread_terminate_start: */ void thread_terminate_enqueue( - thread_t thread) + thread_t thread) { KDBG_RELEASE(TRACE_DATA_THREAD_TERMINATE, thread->thread_id); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); enqueue_tail(&thread_terminate_queue, &thread->runq_links); simple_unlock(&thread_terminate_lock); @@ -1092,11 +1107,11 @@ thread_terminate_enqueue( */ static void thread_deallocate_enqueue( - thread_t thread) + thread_t thread) { spl_t s = splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); enqueue_tail(&thread_deallocate_queue, &thread->runq_links); simple_unlock(&thread_terminate_lock); @@ -1115,7 +1130,7 @@ turnstile_deallocate_enqueue( { spl_t s = splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); enqueue_tail(&turnstile_deallocate_queue, &turnstile->ts_deallocate_link); simple_unlock(&thread_terminate_lock); @@ -1134,7 +1149,7 @@ workq_deallocate_enqueue( { spl_t s = splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); /* * this is just to delay a zfree(), so we link the memory with no regards * for how the struct looks like. @@ -1158,7 +1173,7 @@ thread_terminate_crashed_threads() boolean_t should_wake_terminate_queue = FALSE; spl_t s = splsched(); - simple_lock(&thread_terminate_lock); + simple_lock(&thread_terminate_lock, LCK_GRP_NULL); /* * loop through the crashed threads queue * to put any threads that are not being inspected anymore @@ -1190,11 +1205,11 @@ thread_terminate_crashed_threads() static void thread_stack_daemon(void) { - thread_t thread; - spl_t s; + thread_t thread; + spl_t s; s = splsched(); - simple_lock(&thread_stack_lock); + simple_lock(&thread_stack_lock, LCK_GRP_NULL); while ((thread = qe_dequeue_head(&thread_stack_queue, struct thread, runq_links)) != THREAD_NULL) { assert_thread_magic(thread); @@ -1205,14 +1220,14 @@ thread_stack_daemon(void) /* allocate stack with interrupts enabled so that we can call into VM */ stack_alloc(thread); - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0); - + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_END, thread_tid(thread), 0, 0, 0, 0); + s = splsched(); thread_lock(thread); thread_setrun(thread, SCHED_PREEMPT | SCHED_TAILQ); thread_unlock(thread); - simple_lock(&thread_stack_lock); + simple_lock(&thread_stack_lock, LCK_GRP_NULL); } assert_wait((event_t)&thread_stack_queue, THREAD_UNINT); @@ -1232,12 +1247,12 @@ thread_stack_daemon(void) */ void thread_stack_enqueue( - thread_t thread) + thread_t thread) { - KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_WAIT) | DBG_FUNC_START, thread_tid(thread), 0, 0, 0, 0); assert_thread_magic(thread); - simple_lock(&thread_stack_lock); + simple_lock(&thread_stack_lock, LCK_GRP_NULL); enqueue_tail(&thread_stack_queue, &thread->runq_links); simple_unlock(&thread_stack_lock); @@ -1247,8 +1262,8 @@ thread_stack_enqueue( void thread_daemon_init(void) { - kern_return_t result; - thread_t thread = NULL; + kern_return_t result; + thread_t thread = NULL; simple_lock_init(&thread_terminate_lock, 0); queue_init(&thread_terminate_queue); @@ -1258,8 +1273,9 @@ thread_daemon_init(void) queue_init(&crashed_threads_queue); result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { panic("thread_daemon_init: thread_terminate_daemon"); + } thread_deallocate(thread); @@ -1267,8 +1283,9 @@ thread_daemon_init(void) queue_init(&thread_stack_queue); result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT_HIGH, &thread); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { panic("thread_daemon_init: thread_stack_daemon"); + } thread_deallocate(thread); @@ -1276,16 +1293,17 @@ thread_daemon_init(void) queue_init(&thread_exception_queue); result = kernel_thread_start_priority((thread_continue_t)thread_exception_daemon, NULL, MINPRI_KERNEL, &thread); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { panic("thread_daemon_init: thread_exception_daemon"); + } thread_deallocate(thread); } -#define TH_OPTION_NONE 0x00 -#define TH_OPTION_NOCRED 0x01 -#define TH_OPTION_NOSUSP 0x02 -#define TH_OPTION_WORKQ 0x04 +#define TH_OPTION_NONE 0x00 +#define TH_OPTION_NOCRED 0x01 +#define TH_OPTION_NOSUSP 0x02 +#define TH_OPTION_WORKQ 0x04 /* * Create a new thread. @@ -1295,28 +1313,31 @@ thread_daemon_init(void) */ static kern_return_t thread_create_internal( - task_t parent_task, - integer_t priority, - thread_continue_t continuation, - void *parameter, - int options, - thread_t *out_thread) + task_t parent_task, + integer_t priority, + thread_continue_t continuation, + void *parameter, + int options, + thread_t *out_thread) { - thread_t new_thread; - static thread_t first_thread; + thread_t new_thread; + static thread_t first_thread; /* * Allocate a thread and initialize static fields */ - if (first_thread == THREAD_NULL) + if (first_thread == THREAD_NULL) { new_thread = first_thread = current_thread(); - else + } else { new_thread = (thread_t)zalloc(thread_zone); - if (new_thread == THREAD_NULL) - return (KERN_RESOURCE_SHORTAGE); + } + if (new_thread == THREAD_NULL) { + return KERN_RESOURCE_SHORTAGE; + } - if (new_thread != first_thread) + if (new_thread != first_thread) { *new_thread = thread_template; + } os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2); @@ -1328,7 +1349,7 @@ thread_create_internal( #endif /* MACH_ASSERT */ zfree(thread_zone, new_thread); - return (KERN_RESOURCE_SHORTAGE); + return KERN_RESOURCE_SHORTAGE; } #endif /* MACH_BSD */ @@ -1348,7 +1369,7 @@ thread_create_internal( #endif /* MACH_ASSERT */ zfree(thread_zone, new_thread); - return (KERN_FAILURE); + return KERN_FAILURE; } new_thread->task = parent_task; @@ -1364,7 +1385,7 @@ thread_create_internal( new_thread->parameter = parameter; new_thread->inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE; priority_queue_init(&new_thread->inheritor_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + PRIORITY_QUEUE_BUILTIN_MAX_HEAP); /* Allocate I/O Statistics structure */ new_thread->thread_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info)); @@ -1379,7 +1400,7 @@ thread_create_internal( #if CONFIG_IOSCHED /* Clear out the I/O Scheduling info for AppleFSCompression */ new_thread->decmp_upl = NULL; -#endif /* CONFIG_IOSCHED */ +#endif /* CONFIG_IOSCHED */ #if DEVELOPMENT || DEBUG task_lock(parent_task); @@ -1393,8 +1414,7 @@ thread_create_internal( parent_task->task_has_crossed_thread_limit = TRUE; task_unlock(parent_task); SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(parent_task, thread_count); - } - else { + } else { task_unlock(parent_task); } #endif @@ -1429,9 +1449,12 @@ thread_create_internal( lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); - return (KERN_FAILURE); + return KERN_FAILURE; } + /* Protected by the tasks_threads_lock */ + new_thread->thread_id = ++thread_unique_id; + /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); @@ -1447,8 +1470,7 @@ thread_create_internal( /* Instantiate a thread ledger. Do not fail thread creation if ledger creation fails. */ if ((new_thread->t_threadledger = ledger_instantiate(thread_ledger_template, - LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) { - + LEDGER_CREATE_INACTIVE_ENTRIES)) != LEDGER_NULL) { ledger_entry_setactive(new_thread->t_threadledger, thread_ledgers.cpu_time); } @@ -1457,8 +1479,9 @@ thread_create_internal( new_thread->t_deduct_bank_ledger_energy = 0; new_thread->t_ledger = new_thread->task->ledger; - if (new_thread->t_ledger) + if (new_thread->t_ledger) { ledger_reference(new_thread->t_ledger); + } #if defined(CONFIG_SCHED_MULTIQ) /* Cache the task's sched_group */ @@ -1482,8 +1505,9 @@ thread_create_internal( int new_priority = (priority < 0) ? parent_task->priority: priority; new_priority = (priority < 0)? parent_task->priority: priority; - if (new_priority > new_thread->max_priority) + if (new_priority > new_thread->max_priority) { new_priority = new_thread->max_priority; + } #if CONFIG_EMBEDDED if (new_priority < MAXPRI_THROTTLE) { new_priority = MAXPRI_THROTTLE; @@ -1500,8 +1524,9 @@ thread_create_internal( #endif /* defined(CONFIG_SCHED_TIMESHARE_CORE) */ #if CONFIG_EMBEDDED - if (parent_task->max_priority <= MAXPRI_THROTTLE) + if (parent_task->max_priority <= MAXPRI_THROTTLE) { sched_thread_mode_demote(new_thread, TH_SFLAG_THROTTLED); + } #endif /* CONFIG_EMBEDDED */ thread_policy_create(new_thread); @@ -1513,9 +1538,6 @@ thread_create_internal( /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); - /* Protected by the tasks_threads_lock */ - new_thread->thread_id = ++thread_unique_id; - queue_enter(&threads, new_thread, thread_t, threads); threads_count++; @@ -1553,79 +1575,83 @@ thread_create_internal( args[2] = task_is_exec_copy(parent_task) ? 1 : 0; KDBG_RELEASE(TRACE_DATA_NEWTHREAD, (uintptr_t)thread_tid(new_thread), - args[1], args[2], args[3]); + args[1], args[2], args[3]); kdbg_trace_string(parent_task->bsd_info, &args[0], &args[1], - &args[2], &args[3]); + &args[2], &args[3]); KDBG_RELEASE(TRACE_STRING_NEWTHREAD, args[0], args[1], args[2], - args[3]); + args[3]); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); - return (KERN_SUCCESS); + return KERN_SUCCESS; } static kern_return_t thread_create_internal2( - task_t task, - thread_t *new_thread, - boolean_t from_user, - thread_continue_t continuation) + task_t task, + thread_t *new_thread, + boolean_t from_user, + thread_continue_t continuation) { - kern_return_t result; - thread_t thread; + kern_return_t result; + thread_t thread; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } result = thread_create_internal(task, -1, continuation, NULL, TH_OPTION_NONE, &thread); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } thread->user_stop_count = 1; thread_hold(thread); - if (task->suspend_count > 0) + if (task->suspend_count > 0) { thread_hold(thread); + } - if (from_user) + if (from_user) { extmod_statistics_incr_thread_create(task); + } task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); - + *new_thread = thread; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* No prototype, since task_server.h has the _from_user version if KERNEL_SERVER */ kern_return_t thread_create( - task_t task, - thread_t *new_thread); + task_t task, + thread_t *new_thread); kern_return_t thread_create( - task_t task, - thread_t *new_thread) + task_t task, + thread_t *new_thread) { return thread_create_internal2(task, new_thread, FALSE, (thread_continue_t)thread_bootstrap_return); } kern_return_t thread_create_from_user( - task_t task, - thread_t *new_thread) + task_t task, + thread_t *new_thread) { return thread_create_internal2(task, new_thread, TRUE, (thread_continue_t)thread_bootstrap_return); } kern_return_t thread_create_with_continuation( - task_t task, - thread_t *new_thread, - thread_continue_t continuation) + task_t task, + thread_t *new_thread, + thread_continue_t continuation) { return thread_create_internal2(task, new_thread, FALSE, continuation); } @@ -1645,18 +1671,21 @@ thread_create_waiting_internal( kern_return_t result; thread_t thread; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } result = thread_create_internal(task, -1, continuation, NULL, - options, &thread); - if (result != KERN_SUCCESS) - return (result); + options, &thread); + if (result != KERN_SUCCESS) { + return result; + } /* note no user_stop_count or thread_hold here */ - if (task->suspend_count > 0) + if (task->suspend_count > 0) { thread_hold(thread); + } thread_mtx_lock(thread); thread_set_pending_block_hint(thread, block_hint); @@ -1672,7 +1701,7 @@ thread_create_waiting_internal( *new_thread = thread; - return (KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t @@ -1683,7 +1712,7 @@ thread_create_waiting( thread_t *new_thread) { return thread_create_waiting_internal(task, continuation, event, - kThreadWaitNone, TH_OPTION_NONE, new_thread); + kThreadWaitNone, TH_OPTION_NONE, new_thread); } @@ -1693,31 +1722,34 @@ thread_create_running_internal2( int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, - thread_t *new_thread, - boolean_t from_user) + thread_t *new_thread, + boolean_t from_user) { kern_return_t result; - thread_t thread; + thread_t thread; - if (task == TASK_NULL || task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } result = thread_create_internal(task, -1, - (thread_continue_t)thread_bootstrap_return, NULL, - TH_OPTION_NONE, &thread); - if (result != KERN_SUCCESS) - return (result); + (thread_continue_t)thread_bootstrap_return, NULL, + TH_OPTION_NONE, &thread); + if (result != KERN_SUCCESS) { + return result; + } - if (task->suspend_count > 0) + if (task->suspend_count > 0) { thread_hold(thread); + } if (from_user) { result = machine_thread_state_convert_from_user(thread, flavor, - new_state, new_state_count); + new_state, new_state_count); } if (result == KERN_SUCCESS) { result = machine_thread_set_state(thread, flavor, new_state, - new_state_count); + new_state_count); } if (result != KERN_SUCCESS) { task_unlock(task); @@ -1725,22 +1757,23 @@ thread_create_running_internal2( thread_terminate(thread); thread_deallocate(thread); - return (result); + return result; } thread_mtx_lock(thread); thread_start(thread); thread_mtx_unlock(thread); - if (from_user) + if (from_user) { extmod_statistics_incr_thread_create(task); + } task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); *new_thread = thread; - return (result); + return result; } /* Prototype, see justification above */ @@ -1750,7 +1783,7 @@ thread_create_running( int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, - thread_t *new_thread); + thread_t *new_thread); kern_return_t thread_create_running( @@ -1758,7 +1791,7 @@ thread_create_running( int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, - thread_t *new_thread) + thread_t *new_thread) { return thread_create_running_internal2( task, flavor, new_state, new_state_count, @@ -1771,7 +1804,7 @@ thread_create_running_from_user( int flavor, thread_state_t new_state, mach_msg_type_number_t new_state_count, - thread_t *new_thread) + thread_t *new_thread) { return thread_create_running_internal2( task, flavor, new_state, new_state_count, @@ -1786,7 +1819,7 @@ thread_create_workq_waiting( { int options = TH_OPTION_NOCRED | TH_OPTION_NOSUSP | TH_OPTION_WORKQ; return thread_create_waiting_internal(task, continuation, NULL, - kThreadWaitParkedWorkQueue, options, new_thread); + kThreadWaitParkedWorkQueue, options, new_thread); } /* @@ -1797,19 +1830,20 @@ thread_create_workq_waiting( */ kern_return_t kernel_thread_create( - thread_continue_t continuation, - void *parameter, - integer_t priority, - thread_t *new_thread) + thread_continue_t continuation, + void *parameter, + integer_t priority, + thread_t *new_thread) { - kern_return_t result; - thread_t thread; - task_t task = kernel_task; + kern_return_t result; + thread_t thread; + task_t task = kernel_task; result = thread_create_internal(task, priority, continuation, parameter, - TH_OPTION_NOCRED | TH_OPTION_NONE, &thread); - if (result != KERN_SUCCESS) - return (result); + TH_OPTION_NOCRED | TH_OPTION_NONE, &thread); + if (result != KERN_SUCCESS) { + return result; + } task_unlock(task); lck_mtx_unlock(&tasks_threads_lock); @@ -1821,41 +1855,43 @@ kernel_thread_create( #endif thread->reserved_stack = thread->kernel_stack; -if(debug_task & 1) - kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation); + if (debug_task & 1) { + kprintf("kernel_thread_create: thread = %p continuation = %p\n", thread, continuation); + } *new_thread = thread; - return (result); + return result; } kern_return_t kernel_thread_start_priority( - thread_continue_t continuation, - void *parameter, - integer_t priority, - thread_t *new_thread) + thread_continue_t continuation, + void *parameter, + integer_t priority, + thread_t *new_thread) { - kern_return_t result; - thread_t thread; + kern_return_t result; + thread_t thread; result = kernel_thread_create(continuation, parameter, priority, &thread); - if (result != KERN_SUCCESS) - return (result); + if (result != KERN_SUCCESS) { + return result; + } - *new_thread = thread; + *new_thread = thread; thread_mtx_lock(thread); thread_start(thread); thread_mtx_unlock(thread); - return (result); + return result; } kern_return_t kernel_thread_start( - thread_continue_t continuation, - void *parameter, - thread_t *new_thread) + thread_continue_t continuation, + void *parameter, + thread_t *new_thread) { return kernel_thread_start_priority(continuation, parameter, -1, new_thread); } @@ -1865,18 +1901,19 @@ kernel_thread_start( static void retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info) { - int state, flags; + int state, flags; /* fill in info */ thread_read_times(thread, &basic_info->user_time, - &basic_info->system_time, NULL); + &basic_info->system_time, NULL); /* * Update lazy-evaluated scheduler info because someone wants it. */ - if (SCHED(can_update_priority)(thread)) + if (SCHED(can_update_priority)(thread)) { SCHED(update_priority)(thread); + } basic_info->sleep_time = 0; @@ -1888,44 +1925,44 @@ retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info) basic_info->cpu_usage = 0; #if defined(CONFIG_SCHED_TIMESHARE_CORE) if (sched_tick_interval) { - basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage - * TH_USAGE_SCALE) / sched_tick_interval); + basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage + * TH_USAGE_SCALE) / sched_tick_interval); basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; } #endif - if (basic_info->cpu_usage > TH_USAGE_SCALE) + if (basic_info->cpu_usage > TH_USAGE_SCALE) { basic_info->cpu_usage = TH_USAGE_SCALE; + } basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? - POLICY_TIMESHARE: POLICY_RR); + POLICY_TIMESHARE: POLICY_RR); flags = 0; - if (thread->options & TH_OPT_IDLE_THREAD) + if (thread->options & TH_OPT_IDLE_THREAD) { flags |= TH_FLAGS_IDLE; + } if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) { flags |= TH_FLAGS_GLOBAL_FORCED_IDLE; } - if (!thread->kernel_stack) + if (!thread->kernel_stack) { flags |= TH_FLAGS_SWAPPED; + } state = 0; - if (thread->state & TH_TERMINATE) + if (thread->state & TH_TERMINATE) { state = TH_STATE_HALTED; - else - if (thread->state & TH_RUN) + } else if (thread->state & TH_RUN) { state = TH_STATE_RUNNING; - else - if (thread->state & TH_UNINT) + } else if (thread->state & TH_UNINT) { state = TH_STATE_UNINTERRUPTIBLE; - else - if (thread->state & TH_SUSP) + } else if (thread->state & TH_SUSP) { state = TH_STATE_STOPPED; - else - if (thread->state & TH_WAIT) + } else if (thread->state & TH_WAIT) { state = TH_STATE_WAITING; + } basic_info->run_state = state; basic_info->flags = flags; @@ -1937,20 +1974,21 @@ retrieve_thread_basic_info(thread_t thread, thread_basic_info_t basic_info) kern_return_t thread_info_internal( - thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, /* ptr to OUT array */ - mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ + thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, /* ptr to OUT array */ + mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ { - spl_t s; + spl_t s; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } if (flavor == THREAD_BASIC_INFO) { - - if (*thread_info_count < THREAD_BASIC_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*thread_info_count < THREAD_BASIC_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } s = splsched(); thread_lock(thread); @@ -1962,14 +2000,13 @@ thread_info_internal( *thread_info_count = THREAD_BASIC_INFO_COUNT; - return (KERN_SUCCESS); - } - else - if (flavor == THREAD_IDENTIFIER_INFO) { - thread_identifier_info_t identifier_info; + return KERN_SUCCESS; + } else if (flavor == THREAD_IDENTIFIER_INFO) { + thread_identifier_info_t identifier_info; - if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } identifier_info = (thread_identifier_info_t) thread_info_out; @@ -1983,13 +2020,12 @@ thread_info_internal( thread_unlock(thread); splx(s); return KERN_SUCCESS; - } - else - if (flavor == THREAD_SCHED_TIMESHARE_INFO) { - policy_timeshare_info_t ts_info; + } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { + policy_timeshare_info_t ts_info; - if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } ts_info = (policy_timeshare_info_t)thread_info_out; @@ -1999,44 +2035,41 @@ thread_info_internal( if (thread->sched_mode != TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); - return (KERN_INVALID_POLICY); + return KERN_INVALID_POLICY; } ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; ts_info->depress_priority = thread->base_pri; - } - else { + } else { ts_info->base_priority = thread->base_pri; ts_info->depress_priority = -1; } ts_info->cur_priority = thread->sched_pri; - ts_info->max_priority = thread->max_priority; + ts_info->max_priority = thread->max_priority; thread_unlock(thread); splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; - return (KERN_SUCCESS); - } - else - if (flavor == THREAD_SCHED_FIFO_INFO) { - if (*thread_info_count < POLICY_FIFO_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); + return KERN_SUCCESS; + } else if (flavor == THREAD_SCHED_FIFO_INFO) { + if (*thread_info_count < POLICY_FIFO_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } - return (KERN_INVALID_POLICY); - } - else - if (flavor == THREAD_SCHED_RR_INFO) { - policy_rr_info_t rr_info; + return KERN_INVALID_POLICY; + } else if (flavor == THREAD_SCHED_RR_INFO) { + policy_rr_info_t rr_info; uint32_t quantum_time; uint64_t quantum_ns; - if (*thread_info_count < POLICY_RR_INFO_COUNT) - return (KERN_INVALID_ARGUMENT); + if (*thread_info_count < POLICY_RR_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } rr_info = (policy_rr_info_t) thread_info_out; @@ -2047,15 +2080,14 @@ thread_info_internal( thread_unlock(thread); splx(s); - return (KERN_INVALID_POLICY); - } + return KERN_INVALID_POLICY; + } rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; rr_info->depress_priority = thread->base_pri; - } - else { + } else { rr_info->base_priority = thread->base_pri; rr_info->depress_priority = -1; } @@ -2071,15 +2103,13 @@ thread_info_internal( *thread_info_count = POLICY_RR_INFO_COUNT; - return (KERN_SUCCESS); - } - else - if (flavor == THREAD_EXTENDED_INFO) { - thread_basic_info_data_t basic_info; - thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out; + return KERN_SUCCESS; + } else if (flavor == THREAD_EXTENDED_INFO) { + thread_basic_info_data_t basic_info; + thread_extended_info_t extended_info = (thread_extended_info_t) thread_info_out; if (*thread_info_count < THREAD_EXTENDED_INFO_COUNT) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } s = splsched(); @@ -2101,47 +2131,47 @@ thread_info_internal( extended_info->pth_priority = thread->base_pri; extended_info->pth_maxpriority = thread->max_priority; - bsd_getthreadname(thread->uthread,extended_info->pth_name); + bsd_getthreadname(thread->uthread, extended_info->pth_name); thread_unlock(thread); splx(s); *thread_info_count = THREAD_EXTENDED_INFO_COUNT; - return (KERN_SUCCESS); - } - else - if (flavor == THREAD_DEBUG_INFO_INTERNAL) { + return KERN_SUCCESS; + } else if (flavor == THREAD_DEBUG_INFO_INTERNAL) { #if DEVELOPMENT || DEBUG thread_debug_info_internal_t dbg_info; - if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) - return (KERN_NOT_SUPPORTED); + if (*thread_info_count < THREAD_DEBUG_INFO_INTERNAL_COUNT) { + return KERN_NOT_SUPPORTED; + } - if (thread_info_out == NULL) - return (KERN_INVALID_ARGUMENT); + if (thread_info_out == NULL) { + return KERN_INVALID_ARGUMENT; + } dbg_info = (thread_debug_info_internal_t) thread_info_out; dbg_info->page_creation_count = thread->t_page_creation_count; *thread_info_count = THREAD_DEBUG_INFO_INTERNAL_COUNT; - return (KERN_SUCCESS); + return KERN_SUCCESS; #endif /* DEVELOPMENT || DEBUG */ - return (KERN_NOT_SUPPORTED); + return KERN_NOT_SUPPORTED; } - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } void thread_read_times( - thread_t thread, - time_value_t *user_time, - time_value_t *system_time, - time_value_t *runnable_time) + thread_t thread, + time_value_t *user_time, + time_value_t *system_time, + time_value_t *runnable_time) { - clock_sec_t secs; - clock_usec_t usecs; - uint64_t tval_user, tval_system; + clock_sec_t secs; + clock_usec_t usecs; + uint64_t tval_user, tval_system; tval_user = timer_grab(&thread->user_timer); tval_system = timer_grab(&thread->system_timer); @@ -2173,7 +2203,8 @@ thread_read_times( } } -uint64_t thread_get_runtime_self(void) +uint64_t +thread_get_runtime_self(void) { boolean_t interrupt_state; uint64_t runtime; @@ -2194,10 +2225,10 @@ uint64_t thread_get_runtime_self(void) kern_return_t thread_assign( - __unused thread_t thread, - __unused processor_set_t new_pset) + __unused thread_t thread, + __unused processor_set_t new_pset) { - return (KERN_FAILURE); + return KERN_FAILURE; } /* @@ -2208,27 +2239,28 @@ thread_assign( */ kern_return_t thread_assign_default( - thread_t thread) + thread_t thread) { - return (thread_assign(thread, &pset0)); + return thread_assign(thread, &pset0); } /* * thread_get_assignment * * Return current assignment for this thread. - */ + */ kern_return_t thread_get_assignment( - thread_t thread, - processor_set_t *pset) + thread_t thread, + processor_set_t *pset) { - if (thread == NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == NULL) { + return KERN_INVALID_ARGUMENT; + } *pset = &pset0; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2239,31 +2271,34 @@ thread_get_assignment( */ kern_return_t thread_wire_internal( - host_priv_t host_priv, - thread_t thread, - boolean_t wired, - boolean_t *prev_state) + host_priv_t host_priv, + thread_t thread, + boolean_t wired, + boolean_t *prev_state) { - if (host_priv == NULL || thread != current_thread()) - return (KERN_INVALID_ARGUMENT); + if (host_priv == NULL || thread != current_thread()) { + return KERN_INVALID_ARGUMENT; + } assert(host_priv == &realhost); - if (prev_state) - *prev_state = (thread->options & TH_OPT_VMPRIV) != 0; - - if (wired) { - if (!(thread->options & TH_OPT_VMPRIV)) - vm_page_free_reserve(1); /* XXX */ - thread->options |= TH_OPT_VMPRIV; + if (prev_state) { + *prev_state = (thread->options & TH_OPT_VMPRIV) != 0; } - else { - if (thread->options & TH_OPT_VMPRIV) - vm_page_free_reserve(-1); /* XXX */ - thread->options &= ~TH_OPT_VMPRIV; + + if (wired) { + if (!(thread->options & TH_OPT_VMPRIV)) { + vm_page_free_reserve(1); /* XXX */ + } + thread->options |= TH_OPT_VMPRIV; + } else { + if (thread->options & TH_OPT_VMPRIV) { + vm_page_free_reserve(-1); /* XXX */ + } + thread->options &= ~TH_OPT_VMPRIV; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -2274,11 +2309,11 @@ thread_wire_internal( */ kern_return_t thread_wire( - host_priv_t host_priv, - thread_t thread, - boolean_t wired) + host_priv_t host_priv, + thread_t thread, + boolean_t wired) { - return (thread_wire_internal(host_priv, thread, wired, NULL)); + return thread_wire_internal(host_priv, thread, wired, NULL); } @@ -2293,17 +2328,19 @@ set_vm_privilege(boolean_t privileged) { boolean_t was_vmpriv; - if (current_thread()->options & TH_OPT_VMPRIV) + if (current_thread()->options & TH_OPT_VMPRIV) { was_vmpriv = TRUE; - else + } else { was_vmpriv = FALSE; + } - if (privileged != FALSE) + if (privileged != FALSE) { current_thread()->options |= TH_OPT_VMPRIV; - else + } else { current_thread()->options &= ~TH_OPT_VMPRIV; + } - return (was_vmpriv); + return was_vmpriv; } void @@ -2318,7 +2355,6 @@ clear_thread_rwlock_boost(void) thread_t thread = current_thread(); if ((thread->rwlock_count-- == 1) && (thread->sched_flags & TH_SFLAG_RW_PROMOTED)) { - lck_rw_clear_promotion(thread, 0); } } @@ -2334,8 +2370,9 @@ thread_guard_violation(thread_t thread, assert(thread == current_thread()); /* don't set up the AST for kernel threads */ - if (thread->task == kernel_task) + if (thread->task == kernel_task) { return; + } spl_t s = splsched(); /* @@ -2357,7 +2394,7 @@ thread_guard_violation(thread_t thread, * * Handle AST_GUARD for a thread. This routine looks at the * state saved in the thread structure to determine the cause - * of this exception. Based on this value, it invokes the + * of this exception. Based on this value, it invokes the * appropriate routine which determines other exception related * info and raises the exception. */ @@ -2365,12 +2402,12 @@ void guard_ast(thread_t t) { const mach_exception_data_type_t - code = t->guard_exc_info.code, - subcode = t->guard_exc_info.subcode; + code = t->guard_exc_info.code, + subcode = t->guard_exc_info.subcode; t->guard_exc_info.code = 0; t->guard_exc_info.subcode = 0; - + switch (EXC_GUARD_DECODE_GUARD_TYPE(code)) { case GUARD_TYPE_NONE: /* lingering AST_GUARD on the processor? */ @@ -2398,7 +2435,7 @@ static void thread_cputime_callback(int warning, __unused const void *arg0, __unused const void *arg1) { if (warning == LEDGER_WARNING_ROSE_ABOVE) { -#if CONFIG_TELEMETRY +#if CONFIG_TELEMETRY /* * This thread is in danger of violating the CPU usage monitor. Enable telemetry * on the entire task so there are micro-stackshots available if and when @@ -2428,10 +2465,10 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) { int pid = 0; - task_t task = current_task(); + task_t task = current_task(); thread_t thread = current_thread(); uint64_t tid = thread->thread_id; - const char *procname = "unknown"; + const char *procname = "unknown"; time_value_t thread_total_time = {0, 0}; time_value_t thread_system_time; time_value_t thread_user_time; @@ -2441,14 +2478,14 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) uint32_t interval_sec; uint64_t interval_ns; uint64_t balance_ns; - boolean_t fatal = FALSE; - boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */ - kern_return_t kr; + boolean_t fatal = FALSE; + boolean_t send_exc_resource = TRUE; /* in addition to RESOURCE_NOTIFY */ + kern_return_t kr; #ifdef EXC_RESOURCE_MONITORS - mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; + mach_exception_data_type_t code[EXCEPTION_CODE_MAX]; #endif /* EXC_RESOURCE_MONITORS */ - struct ledger_entry_info lei; + struct ledger_entry_info lei; assert(thread->t_threadledger != LEDGER_NULL); @@ -2461,15 +2498,17 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) send_exc_resource = TRUE; } /* Only one thread can be here at a time. Whichever makes it through - first will successfully suspend the monitor and proceed to send the - notification. Other threads will get an error trying to suspend the - monitor and give up on sending the notification. In the first release, - the monitor won't be resumed for a number of seconds, but we may - eventually need to handle low-latency resume. + * first will successfully suspend the monitor and proceed to send the + * notification. Other threads will get an error trying to suspend the + * monitor and give up on sending the notification. In the first release, + * the monitor won't be resumed for a number of seconds, but we may + * eventually need to handle low-latency resume. */ kr = task_suspend_cpumon(task); task_unlock(task); - if (kr == KERN_INVALID_ARGUMENT) return; + if (kr == KERN_INVALID_ARGUMENT) { + return; + } #ifdef MACH_BSD pid = proc_selfpid(); @@ -2488,33 +2527,33 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) ledger_get_entry_info(thread->t_threadledger, thread_ledgers.cpu_time, &lei); /* credit/debit/balance/limit are in absolute time units; - the refill info is in nanoseconds. */ + * the refill info is in nanoseconds. */ absolutetime_to_nanoseconds(lei.lei_balance, &balance_ns); if (lei.lei_last_refill > 0) { - usage_percent = (uint32_t)((balance_ns*100ULL) / lei.lei_last_refill); + usage_percent = (uint32_t)((balance_ns * 100ULL) / lei.lei_last_refill); } /* TODO: show task total runtime (via TASK_ABSOLUTETIME_INFO)? */ printf("process %s[%d] thread %llu caught burning CPU! " - "It used more than %d%% CPU over %u seconds " - "(actual recent usage: %d%% over ~%llu seconds). " - "Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys) " - "ledger balance: %lld mabs credit: %lld mabs debit: %lld mabs " - "limit: %llu mabs period: %llu ns last refill: %llu ns%s.\n", - procname, pid, tid, - percentage, interval_sec, - usage_percent, - (lei.lei_last_refill + NSEC_PER_SEC/2) / NSEC_PER_SEC, - thread_total_time.seconds, thread_total_time.microseconds, - thread_user_time.seconds, thread_user_time.microseconds, - thread_system_time.seconds,thread_system_time.microseconds, - lei.lei_balance, lei.lei_credit, lei.lei_debit, - lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill, - (fatal ? " [fatal violation]" : "")); + "It used more than %d%% CPU over %u seconds " + "(actual recent usage: %d%% over ~%llu seconds). " + "Thread lifetime cpu usage %d.%06ds, (%d.%06d user, %d.%06d sys) " + "ledger balance: %lld mabs credit: %lld mabs debit: %lld mabs " + "limit: %llu mabs period: %llu ns last refill: %llu ns%s.\n", + procname, pid, tid, + percentage, interval_sec, + usage_percent, + (lei.lei_last_refill + NSEC_PER_SEC / 2) / NSEC_PER_SEC, + thread_total_time.seconds, thread_total_time.microseconds, + thread_user_time.seconds, thread_user_time.microseconds, + thread_system_time.seconds, thread_system_time.microseconds, + lei.lei_balance, lei.lei_credit, lei.lei_debit, + lei.lei_limit, lei.lei_refill_period, lei.lei_last_refill, + (fatal ? " [fatal violation]" : "")); /* - For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once - we have logging parity, we will stop sending EXC_RESOURCE (24508922). + * For now, send RESOURCE_NOTIFY in parallel with EXC_RESOURCE. Once + * we have logging parity, we will stop sending EXC_RESOURCE (24508922). */ /* RESOURCE_NOTIFY MIG specifies nanoseconds of CPU time */ @@ -2522,7 +2561,7 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) absolutetime_to_nanoseconds(lei.lei_limit, &lei.lei_limit); trace_resource_violation(RMON_CPUUSAGE_VIOLATED, &lei); kr = send_resource_violation(send_cpu_usage_violation, task, &lei, - fatal ? kRNFatalLimitFlag : 0); + fatal ? kRNFatalLimitFlag : 0); if (kr) { printf("send_resource_violation(CPU usage, ...): error %#x\n", kr); } @@ -2531,15 +2570,15 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) if (send_exc_resource) { if (disable_exc_resource) { printf("process %s[%d] thread %llu caught burning CPU! " - "EXC_RESOURCE%s supressed by a boot-arg\n", - procname, pid, tid, fatal ? " (and termination)" : ""); + "EXC_RESOURCE%s supressed by a boot-arg\n", + procname, pid, tid, fatal ? " (and termination)" : ""); return; } if (audio_active) { printf("process %s[%d] thread %llu caught burning CPU! " - "EXC_RESOURCE & termination supressed due to audio playback\n", - procname, pid, tid); + "EXC_RESOURCE & termination supressed due to audio playback\n", + procname, pid, tid); return; } } @@ -2550,7 +2589,7 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_CPU); if (fatal) { EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR_FATAL); - }else { + } else { EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_CPU_MONITOR); } EXC_RESOURCE_CPUMONITOR_ENCODE_INTERVAL(code[0], interval_sec); @@ -2570,11 +2609,12 @@ SENDING_NOTIFICATION__THIS_THREAD_IS_CONSUMING_TOO_MUCH_CPU(void) } #if DEVELOPMENT || DEBUG -void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count) +void __attribute__((noinline)) +SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(task_t task, int thread_count) { mach_exception_data_type_t code[EXCEPTION_CODE_MAX] = {0}; int pid = task_pid(task); - char procname[MAXCOMLEN+1] = "unknown"; + char procname[MAXCOMLEN + 1] = "unknown"; if (pid == 1) { /* @@ -2587,25 +2627,25 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(t if (disable_exc_resource) { printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE " - "supressed by a boot-arg. \n", procname, pid, thread_count); + "supressed by a boot-arg. \n", procname, pid, thread_count); return; } if (audio_active) { printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE " - "supressed due to audio playback.\n", procname, pid, thread_count); + "supressed due to audio playback.\n", procname, pid, thread_count); return; } if (exc_via_corpse_forking == 0) { printf("process %s[%d] crossed thread count high watermark (%d), EXC_RESOURCE " - "supressed due to corpse forking being disabled.\n", procname, pid, - thread_count); + "supressed due to corpse forking being disabled.\n", procname, pid, + thread_count); return; } printf("process %s[%d] crossed thread count high watermark (%d), sending " - "EXC_RESOURCE\n", procname, pid, thread_count); + "EXC_RESOURCE\n", procname, pid, thread_count); EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_THREADS); EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_THREADS_HIGH_WATERMARK); @@ -2615,30 +2655,32 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(t } #endif /* DEVELOPMENT || DEBUG */ -void thread_update_io_stats(thread_t thread, int size, int io_flags) +void +thread_update_io_stats(thread_t thread, int size, int io_flags) { int io_tier; - if (thread->thread_io_stats == NULL || thread->task->task_io_stats == NULL) + if (thread->thread_io_stats == NULL || thread->task->task_io_stats == NULL) { return; + } if (io_flags & DKIO_READ) { UPDATE_IO_STATS(thread->thread_io_stats->disk_reads, size); UPDATE_IO_STATS_ATOMIC(thread->task->task_io_stats->disk_reads, size); } - + if (io_flags & DKIO_META) { UPDATE_IO_STATS(thread->thread_io_stats->metadata, size); UPDATE_IO_STATS_ATOMIC(thread->task->task_io_stats->metadata, size); } - + if (io_flags & DKIO_PAGING) { UPDATE_IO_STATS(thread->thread_io_stats->paging, size); UPDATE_IO_STATS_ATOMIC(thread->task->task_io_stats->paging, size); } io_tier = ((io_flags & DKIO_TIER_MASK) >> DKIO_TIER_SHIFT); - assert (io_tier < IO_NUM_PRIORITIES); + assert(io_tier < IO_NUM_PRIORITIES); UPDATE_IO_STATS(thread->thread_io_stats->io_priority[io_tier], size); UPDATE_IO_STATS_ATOMIC(thread->task->task_io_stats->io_priority[io_tier], size); @@ -2654,21 +2696,23 @@ void thread_update_io_stats(thread_t thread, int size, int io_flags) } static void -init_thread_ledgers(void) { +init_thread_ledgers(void) +{ ledger_template_t t; int idx; - + assert(thread_ledger_template == NULL); - if ((t = ledger_template_create("Per-thread ledger")) == NULL) + if ((t = ledger_template_create("Per-thread ledger")) == NULL) { panic("couldn't create thread ledger template"); + } if ((idx = ledger_entry_add(t, "cpu_time", "sched", "ns")) < 0) { panic("couldn't create cpu_time entry for thread ledger template"); } if (ledger_set_callback(t, idx, thread_cputime_callback, NULL, NULL) < 0) { - panic("couldn't set thread ledger callback for cpu_time entry"); + panic("couldn't set thread ledger callback for cpu_time entry"); } thread_ledgers.cpu_time = idx; @@ -2683,9 +2727,9 @@ init_thread_ledgers(void) { int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns) { - int64_t abstime = 0; - uint64_t limittime = 0; - thread_t thread = current_thread(); + int64_t abstime = 0; + uint64_t limittime = 0; + thread_t thread = current_thread(); *percentage = 0; *interval_ns = 0; @@ -2696,7 +2740,7 @@ thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns) * This thread has no per-thread ledger, so it can't possibly * have a CPU limit applied. */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } ledger_get_period(thread->t_threadledger, thread_ledgers.cpu_time, interval_ns); @@ -2707,7 +2751,7 @@ thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns) * This thread's CPU time ledger has no period or limit; so it * doesn't have a CPU limit applied. */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2729,7 +2773,7 @@ thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns) *action = THREAD_CPULIMIT_DISABLE; } - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2740,10 +2784,10 @@ thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns) int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns) { - thread_t thread = current_thread(); - ledger_t l; - uint64_t limittime = 0; - uint64_t abstime = 0; + thread_t thread = current_thread(); + ledger_t l; + uint64_t limittime = 0; + uint64_t abstime = 0; assert(percentage <= 100); @@ -2758,20 +2802,21 @@ thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns) thread->options &= ~(TH_OPT_PROC_CPULIMIT | TH_OPT_PRVT_CPULIMIT); } - return (0); + return 0; } if (interval_ns < MINIMUM_CPULIMIT_INTERVAL_MS * NSEC_PER_MSEC) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - l = thread->t_threadledger; + l = thread->t_threadledger; if (l == LEDGER_NULL) { /* * This thread doesn't yet have a per-thread ledger; so create one with the CPU time entry active. */ - if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) - return (KERN_RESOURCE_SHORTAGE); + if ((l = ledger_instantiate(thread_ledger_template, LEDGER_CREATE_INACTIVE_ENTRIES)) == LEDGER_NULL) { + return KERN_RESOURCE_SHORTAGE; + } /* * We are the first to create this thread's ledger, so only activate our entry. @@ -2785,7 +2830,7 @@ thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns) * Calculate the amount of CPU time that the thread needs to consume in order to hit the limit. */ limittime = (interval_ns * percentage) / 100; - nanoseconds_to_absolutetime(limittime, &abstime); + nanoseconds_to_absolutetime(limittime, &abstime); ledger_set_limit(l, thread_ledgers.cpu_time, abstime, cpumon_ustackshots_trigger_pct); /* * Refill the thread's allotted CPU time every interval_ns nanoseconds. @@ -2813,7 +2858,7 @@ thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns) * We deliberately override any CPU limit imposed by a task-wide limit (eg * CPU usage monitor). */ - thread->options &= ~TH_OPT_PROC_CPULIMIT; + thread->options &= ~TH_OPT_PROC_CPULIMIT; thread->options |= TH_OPT_PRVT_CPULIMIT; /* The per-thread ledger template by default has a callback for CPU time */ @@ -2821,13 +2866,13 @@ thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns) ledger_set_action(l, thread_ledgers.cpu_time, LEDGER_ACTION_BLOCK); } - return (0); + return 0; } void thread_sched_call( - thread_t thread, - sched_call_t call) + thread_t thread, + sched_call_t call) { assert((thread->state & TH_WAIT_REPORT) == 0); thread->sched_call = call; @@ -2835,9 +2880,9 @@ thread_sched_call( uint64_t thread_tid( - thread_t thread) + thread_t thread) { - return (thread != THREAD_NULL? thread->thread_id: 0); + return thread != THREAD_NULL? thread->thread_id: 0; } uint16_t @@ -2860,42 +2905,47 @@ thread_last_run_time(thread_t th) uint64_t thread_dispatchqaddr( - thread_t thread) + thread_t thread) { - uint64_t dispatchqueue_addr; - uint64_t thread_handle; + uint64_t dispatchqueue_addr; + uint64_t thread_handle; - if (thread == THREAD_NULL) + if (thread == THREAD_NULL) { return 0; + } thread_handle = thread->machine.cthread_self; - if (thread_handle == 0) + if (thread_handle == 0) { return 0; - - if (thread->inspection == TRUE) + } + + if (thread->inspection == TRUE) { dispatchqueue_addr = thread_handle + get_task_dispatchqueue_offset(thread->task); - else if (thread->task->bsd_info) + } else if (thread->task->bsd_info) { dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); - else + } else { dispatchqueue_addr = 0; + } return dispatchqueue_addr; } uint64_t thread_rettokern_addr( - thread_t thread) + thread_t thread) { - uint64_t rettokern_addr; - uint64_t rettokern_offset; - uint64_t thread_handle; + uint64_t rettokern_addr; + uint64_t rettokern_offset; + uint64_t thread_handle; - if (thread == THREAD_NULL) + if (thread == THREAD_NULL) { return 0; + } thread_handle = thread->machine.cthread_self; - if (thread_handle == 0) + if (thread_handle == 0) { return 0; + } if (thread->task->bsd_info) { rettokern_offset = get_return_to_kernel_offset_from_proc(thread->task->bsd_info); @@ -2938,19 +2988,20 @@ thread_mtx_unlock(thread_t thread) void thread_reference(thread_t thread); void thread_reference( - thread_t thread) + thread_t thread) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { thread_reference_internal(thread); + } } #undef thread_should_halt boolean_t thread_should_halt( - thread_t th) + thread_t th) { - return (thread_should_halt_fast(th)); + return thread_should_halt_fast(th); } /* @@ -2972,16 +3023,18 @@ thread_set_voucher_name(mach_port_name_t voucher_name) ledger_t bankledger = NULL; struct thread_group *banktg = NULL; - if (MACH_PORT_DEAD == voucher_name) + if (MACH_PORT_DEAD == voucher_name) { return KERN_INVALID_RIGHT; + } /* * agressively convert to voucher reference */ if (MACH_PORT_VALID(voucher_name)) { new_voucher = convert_port_name_to_voucher(voucher_name); - if (IPC_VOUCHER_NULL == new_voucher) + if (IPC_VOUCHER_NULL == new_voucher) { return KERN_INVALID_ARGUMENT; + } } bank_get_bank_ledger_and_thread_group(new_voucher, &bankledger, &banktg); @@ -2994,19 +3047,20 @@ thread_set_voucher_name(mach_port_name_t voucher_name) bank_swap_thread_bank_ledger(thread, bankledger); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), - (uintptr_t)voucher_name, - VM_KERNEL_ADDRPERM((uintptr_t)new_voucher), - 1, 0); + MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), + (uintptr_t)voucher_name, + VM_KERNEL_ADDRPERM((uintptr_t)new_voucher), + 1, 0); - if (IPC_VOUCHER_NULL != voucher) + if (IPC_VOUCHER_NULL != voucher) { ipc_voucher_release(voucher); + } return KERN_SUCCESS; } -/* +/* * thread_get_mach_voucher - return a voucher reference for the specified thread voucher * * Conditions: nothing locked @@ -3015,20 +3069,21 @@ thread_set_voucher_name(mach_port_name_t voucher_name) * but nobody has done a lookup yet. In that case, we'll have to do the equivalent * lookup here. * - * NOTE: At the moment, there is no distinction between the current and effective + * NOTE: At the moment, there is no distinction between the current and effective * vouchers because we only set them at the thread level currently. */ -kern_return_t +kern_return_t thread_get_mach_voucher( - thread_act_t thread, + thread_act_t thread, mach_voucher_selector_t __unused which, - ipc_voucher_t *voucherp) + ipc_voucher_t *voucherp) { - ipc_voucher_t voucher; - mach_port_name_t voucher_name; + ipc_voucher_t voucher; + mach_port_name_t voucher_name; - if (THREAD_NULL == thread) + if (THREAD_NULL == thread) { return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); voucher = thread->ith_voucher; @@ -3049,7 +3104,7 @@ thread_get_mach_voucher( if (KERN_SUCCESS != ipc_object_copyin(thread->task->itk_space, voucher_name, - MACH_MSG_TYPE_COPY_SEND, (ipc_object_t *)&port)) { + MACH_MSG_TYPE_COPY_SEND, (ipc_object_t *)&port)) { thread->ith_voucher_name = MACH_PORT_NULL; thread_mtx_unlock(thread); *voucherp = IPC_VOUCHER_NULL; @@ -3063,22 +3118,23 @@ thread_get_mach_voucher( thread_mtx_unlock(thread); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), - (uintptr_t)port, - VM_KERNEL_ADDRPERM((uintptr_t)voucher), - 2, 0); + MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), + (uintptr_t)port, + VM_KERNEL_ADDRPERM((uintptr_t)voucher), + 2, 0); ipc_port_release_send(port); - } else + } else { thread_mtx_unlock(thread); + } *voucherp = voucher; return KERN_SUCCESS; } -/* +/* * thread_set_mach_voucher - set a voucher reference for the specified thread voucher * * Conditions: callers holds a reference on the voucher. @@ -3088,20 +3144,22 @@ thread_get_mach_voucher( * binding is erased. The old voucher reference associated with the thread is * discarded. */ -kern_return_t +kern_return_t thread_set_mach_voucher( - thread_t thread, - ipc_voucher_t voucher) + thread_t thread, + ipc_voucher_t voucher) { ipc_voucher_t old_voucher; ledger_t bankledger = NULL; struct thread_group *banktg = NULL; - if (THREAD_NULL == thread) + if (THREAD_NULL == thread) { return KERN_INVALID_ARGUMENT; + } - if (thread != current_thread() && thread->started) + if (thread != current_thread() && thread->started) { return KERN_INVALID_ARGUMENT; + } ipc_voucher_reference(voucher); bank_get_bank_ledger_and_thread_group(voucher, &bankledger, &banktg); @@ -3115,18 +3173,18 @@ thread_set_mach_voucher( bank_swap_thread_bank_ledger(thread, bankledger); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - MACHDBG_CODE(DBG_MACH_IPC,MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, - (uintptr_t)thread_tid(thread), - (uintptr_t)MACH_PORT_NULL, - VM_KERNEL_ADDRPERM((uintptr_t)voucher), - 3, 0); + MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER) | DBG_FUNC_NONE, + (uintptr_t)thread_tid(thread), + (uintptr_t)MACH_PORT_NULL, + VM_KERNEL_ADDRPERM((uintptr_t)voucher), + 3, 0); ipc_voucher_release(old_voucher); return KERN_SUCCESS; } -/* +/* * thread_swap_mach_voucher - swap a voucher reference for the specified thread voucher * * Conditions: callers holds a reference on the new and presumed old voucher(s). @@ -3136,9 +3194,9 @@ thread_set_mach_voucher( */ kern_return_t thread_swap_mach_voucher( - __unused thread_t thread, - __unused ipc_voucher_t new_voucher, - ipc_voucher_t *in_out_old_voucher) + __unused thread_t thread, + __unused ipc_voucher_t new_voucher, + ipc_voucher_t *in_out_old_voucher) { /* * Currently this function is only called from a MIG generated @@ -3150,7 +3208,7 @@ thread_swap_mach_voucher( return KERN_NOT_SUPPORTED; } -/* +/* * thread_get_current_voucher_origin_pid - get the pid of the originator of the current voucher. */ kern_return_t @@ -3160,15 +3218,15 @@ thread_get_current_voucher_origin_pid( uint32_t buf_size; kern_return_t kr; thread_t thread = current_thread(); - + buf_size = sizeof(*pid); kr = mach_voucher_attr_command(thread->ith_voucher, - MACH_VOUCHER_ATTR_KEY_BANK, - BANK_ORIGINATOR_PID, - NULL, - 0, - (mach_voucher_attr_content_t)pid, - &buf_size); + MACH_VOUCHER_ATTR_KEY_BANK, + BANK_ORIGINATOR_PID, + NULL, + 0, + (mach_voucher_attr_content_t)pid, + &buf_size); return kr; } @@ -3211,25 +3269,30 @@ thread_clear_honor_qlimit(thread_t thread) /* * thread_enable_send_importance - set/clear the SEND_IMPORTANCE thread option bit. */ -void thread_enable_send_importance(thread_t thread, boolean_t enable) +void +thread_enable_send_importance(thread_t thread, boolean_t enable) { - if (enable == TRUE) + if (enable == TRUE) { thread->options |= TH_OPT_SEND_IMPORTANCE; - else + } else { thread->options &= ~TH_OPT_SEND_IMPORTANCE; + } } /* * thread_set_allocation_name - . */ -kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t new_name) +kern_allocation_name_t +thread_set_allocation_name(kern_allocation_name_t new_name) { kern_allocation_name_t ret; thread_kernel_state_t kstate = thread_get_kernel_state(current_thread()); ret = kstate->allocation_name; // fifo - if (!new_name || !kstate->allocation_name) kstate->allocation_name = new_name; + if (!new_name || !kstate->allocation_name) { + kstate->allocation_name = new_name; + } return ret; } @@ -3240,23 +3303,28 @@ thread_get_last_wait_duration(thread_t thread) } #if CONFIG_DTRACE -uint32_t dtrace_get_thread_predcache(thread_t thread) +uint32_t +dtrace_get_thread_predcache(thread_t thread) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { return thread->t_dtrace_predcache; - else + } else { return 0; + } } -int64_t dtrace_get_thread_vtime(thread_t thread) +int64_t +dtrace_get_thread_vtime(thread_t thread) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { return thread->t_dtrace_vtime; - else + } else { return 0; + } } -int dtrace_get_thread_last_cpu_id(thread_t thread) +int +dtrace_get_thread_last_cpu_id(thread_t thread) { if ((thread != THREAD_NULL) && (thread->last_processor != PROCESSOR_NULL)) { return thread->last_processor->cpu_id; @@ -3265,28 +3333,34 @@ int dtrace_get_thread_last_cpu_id(thread_t thread) } } -int64_t dtrace_get_thread_tracing(thread_t thread) +int64_t +dtrace_get_thread_tracing(thread_t thread) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { return thread->t_dtrace_tracing; - else + } else { return 0; + } } -boolean_t dtrace_get_thread_reentering(thread_t thread) +boolean_t +dtrace_get_thread_reentering(thread_t thread) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { return (thread->options & TH_OPT_DTRACE) ? TRUE : FALSE; - else + } else { return 0; + } } -vm_offset_t dtrace_get_kernel_stack(thread_t thread) +vm_offset_t +dtrace_get_kernel_stack(thread_t thread) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { return thread->kernel_stack; - else + } else { return 0; + } } #if KASAN @@ -3297,50 +3371,61 @@ kasan_get_thread_data(thread_t thread) } #endif -int64_t dtrace_calc_thread_recent_vtime(thread_t thread) +int64_t +dtrace_calc_thread_recent_vtime(thread_t thread) { if (thread != THREAD_NULL) { processor_t processor = current_processor(); - uint64_t abstime = mach_absolute_time(); - timer_t timer; + uint64_t abstime = mach_absolute_time(); + timer_t timer; timer = PROCESSOR_DATA(processor, thread_timer); return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) + - (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */ - } else + (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */ + } else { return 0; + } } -void dtrace_set_thread_predcache(thread_t thread, uint32_t predcache) +void +dtrace_set_thread_predcache(thread_t thread, uint32_t predcache) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { thread->t_dtrace_predcache = predcache; + } } -void dtrace_set_thread_vtime(thread_t thread, int64_t vtime) +void +dtrace_set_thread_vtime(thread_t thread, int64_t vtime) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { thread->t_dtrace_vtime = vtime; + } } -void dtrace_set_thread_tracing(thread_t thread, int64_t accum) +void +dtrace_set_thread_tracing(thread_t thread, int64_t accum) { - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { thread->t_dtrace_tracing = accum; + } } -void dtrace_set_thread_reentering(thread_t thread, boolean_t vbool) +void +dtrace_set_thread_reentering(thread_t thread, boolean_t vbool) { if (thread != THREAD_NULL) { - if (vbool) + if (vbool) { thread->options |= TH_OPT_DTRACE; - else + } else { thread->options &= (~TH_OPT_DTRACE); + } } } -vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover) +vm_offset_t +dtrace_set_thread_recover(thread_t thread, vm_offset_t recover) { vm_offset_t prev = 0; @@ -3351,7 +3436,14 @@ vm_offset_t dtrace_set_thread_recover(thread_t thread, vm_offset_t recover) return prev; } -void dtrace_thread_bootstrap(void) +vm_offset_t +dtrace_sign_and_set_thread_recover(thread_t thread, vm_offset_t recover) +{ + return dtrace_set_thread_recover(thread, recover); +} + +void +dtrace_thread_bootstrap(void) { task_t task = current_task(); @@ -3360,13 +3452,12 @@ void dtrace_thread_bootstrap(void) if (thread->t_dtrace_flags & TH_DTRACE_EXECSUCCESS) { thread->t_dtrace_flags &= ~TH_DTRACE_EXECSUCCESS; DTRACE_PROC(exec__success); - KDBG(BSDDBG_CODE(DBG_BSD_PROC,BSD_PROC_EXEC), - task_pid(task)); + KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXEC), + task_pid(task)); } DTRACE_PROC(start); } DTRACE_PROC(lwp__start); - } void diff --git a/osfmk/kern/thread.h b/osfmk/kern/thread.h index d0a5212a2..39b9c4fcf 100644 --- a/osfmk/kern/thread.h +++ b/osfmk/kern/thread.h @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2016 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -81,7 +81,7 @@ * */ -#ifndef _KERN_THREAD_H_ +#ifndef _KERN_THREAD_H_ #define _KERN_THREAD_H_ #include @@ -98,7 +98,7 @@ #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -151,7 +151,6 @@ typedef struct task_watcher task_watch_t; #endif /* CONFIG_EMBEDDED */ struct thread { - #if MACH_ASSERT #define THREAD_MAGIC 0x1234ABCDDCBA4321ULL /* Ensure nothing uses &thread as a queue entry */ @@ -171,9 +170,9 @@ struct thread { * anywhere in the thread structure. */ union { - queue_chain_t runq_links; /* run queue links */ - queue_chain_t wait_links; /* wait queue links */ - struct priority_queue_entry wait_prioq_links; /* priority ordered waitq links */ + queue_chain_t runq_links; /* run queue links */ + queue_chain_t wait_links; /* wait queue links */ + struct priority_queue_entry wait_prioq_links; /* priority ordered waitq links */ }; processor_t runq; /* run queue assignment */ @@ -186,93 +185,93 @@ struct thread { /* Data updated during assert_wait/thread_wakeup */ #if __SMP__ - decl_simple_lock_data(,sched_lock) /* scheduling lock (thread_lock()) */ - decl_simple_lock_data(,wake_lock) /* for thread stop / wait (wake_lock()) */ + decl_simple_lock_data(, sched_lock) /* scheduling lock (thread_lock()) */ + decl_simple_lock_data(, wake_lock) /* for thread stop / wait (wake_lock()) */ #endif - integer_t options; /* options set by thread itself */ -#define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */ -#define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */ -#define TH_OPT_DTRACE 0x0008 /* executing under dtrace_probe */ -#define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */ -#define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */ -#define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */ -#define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */ -#define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */ -#define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */ -#define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */ -#define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */ -#define TH_OPT_ZONE_GC 0x1000 /* zone_gc() called on this thread */ - - boolean_t wake_active; /* wake event on stop */ - int at_safe_point; /* thread_abort_safely allowed */ - ast_t reason; /* why we blocked */ - uint32_t quantum_remaining; - wait_result_t wait_result; /* outcome of wait - - * may be examined by this thread - * WITHOUT locking */ - thread_continue_t continuation; /* continue here next dispatch */ - void *parameter; /* continuation parameter */ + integer_t options; /* options set by thread itself */ +#define TH_OPT_INTMASK 0x0003 /* interrupt / abort level */ +#define TH_OPT_VMPRIV 0x0004 /* may allocate reserved memory */ +#define TH_OPT_DTRACE 0x0008 /* executing under dtrace_probe */ +#define TH_OPT_SYSTEM_CRITICAL 0x0010 /* Thread must always be allowed to run - even under heavy load */ +#define TH_OPT_PROC_CPULIMIT 0x0020 /* Thread has a task-wide CPU limit applied to it */ +#define TH_OPT_PRVT_CPULIMIT 0x0040 /* Thread has a thread-private CPU limit applied to it */ +#define TH_OPT_IDLE_THREAD 0x0080 /* Thread is a per-processor idle thread */ +#define TH_OPT_GLOBAL_FORCED_IDLE 0x0100 /* Thread performs forced idle for thermal control */ +#define TH_OPT_SCHED_VM_GROUP 0x0200 /* Thread belongs to special scheduler VM group */ +#define TH_OPT_HONOR_QLIMIT 0x0400 /* Thread will honor qlimit while sending mach_msg, regardless of MACH_SEND_ALWAYS */ +#define TH_OPT_SEND_IMPORTANCE 0x0800 /* Thread will allow importance donation from kernel rpc */ +#define TH_OPT_ZONE_GC 0x1000 /* zone_gc() called on this thread */ + + boolean_t wake_active; /* wake event on stop */ + int at_safe_point; /* thread_abort_safely allowed */ + ast_t reason; /* why we blocked */ + uint32_t quantum_remaining; + wait_result_t wait_result; /* outcome of wait - + * may be examined by this thread + * WITHOUT locking */ + thread_continue_t continuation; /* continue here next dispatch */ + void *parameter; /* continuation parameter */ /* Data updated/used in thread_invoke */ - vm_offset_t kernel_stack; /* current kernel stack */ - vm_offset_t reserved_stack; /* reserved kernel stack */ + vm_offset_t kernel_stack; /* current kernel stack */ + vm_offset_t reserved_stack; /* reserved kernel stack */ #if KASAN struct kasan_thread_data kasan_data; #endif /* Thread state: */ - int state; + int state; /* * Thread states [bits or'ed] */ -#define TH_WAIT 0x01 /* queued for waiting */ -#define TH_SUSP 0x02 /* stopped or requested to stop */ -#define TH_RUN 0x04 /* running or on runq */ -#define TH_UNINT 0x08 /* waiting uninteruptibly */ -#define TH_TERMINATE 0x10 /* halted at termination */ -#define TH_TERMINATE2 0x20 /* added to termination queue */ -#define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call, - only set if TH_WAIT is also set */ -#define TH_IDLE 0x80 /* idling processor */ +#define TH_WAIT 0x01 /* queued for waiting */ +#define TH_SUSP 0x02 /* stopped or requested to stop */ +#define TH_RUN 0x04 /* running or on runq */ +#define TH_UNINT 0x08 /* waiting uninteruptibly */ +#define TH_TERMINATE 0x10 /* halted at termination */ +#define TH_TERMINATE2 0x20 /* added to termination queue */ +#define TH_WAIT_REPORT 0x40 /* the wait is using the sched_call, + * only set if TH_WAIT is also set */ +#define TH_IDLE 0x80 /* idling processor */ /* Scheduling information */ - sched_mode_t sched_mode; /* scheduling mode */ - sched_mode_t saved_mode; /* saved mode during forced mode demotion */ + sched_mode_t sched_mode; /* scheduling mode */ + sched_mode_t saved_mode; /* saved mode during forced mode demotion */ /* This thread's contribution to global sched counters */ sched_bucket_t th_sched_bucket; - sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */ - sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */ + sfi_class_id_t sfi_class; /* SFI class (XXX Updated on CSW/QE/AST) */ + sfi_class_id_t sfi_wait_class; /* Currently in SFI wait for this class, protected by sfi_lock */ - uint32_t sched_flags; /* current flag bits */ -/* TH_SFLAG_FAIRSHARE_TRIPPED (unused) 0x0001 */ -#define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */ -#define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */ -#define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE) /* saved_mode contains previous sched_mode */ + uint32_t sched_flags; /* current flag bits */ +#define TH_SFLAG_NO_SMT 0x0001 /* On an SMT CPU, this thread must be scheduled alone */ +#define TH_SFLAG_FAILSAFE 0x0002 /* fail-safe has tripped */ +#define TH_SFLAG_THROTTLED 0x0004 /* throttled thread forced to timeshare mode (may be applied in addition to failsafe) */ +#define TH_SFLAG_DEMOTED_MASK (TH_SFLAG_THROTTLED | TH_SFLAG_FAILSAFE) /* saved_mode contains previous sched_mode */ -#define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted by kernel mutex priority promotion */ -#define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */ -#define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */ -#define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY) -#define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */ -#define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */ -#define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS) +#define TH_SFLAG_PROMOTED 0x0008 /* sched pri has been promoted by kernel mutex priority promotion */ +#define TH_SFLAG_ABORT 0x0010 /* abort interruptible waits */ +#define TH_SFLAG_ABORTSAFELY 0x0020 /* ... but only those at safe point */ +#define TH_SFLAG_ABORTED_MASK (TH_SFLAG_ABORT | TH_SFLAG_ABORTSAFELY) +#define TH_SFLAG_DEPRESS 0x0040 /* normal depress yield */ +#define TH_SFLAG_POLLDEPRESS 0x0080 /* polled depress yield */ +#define TH_SFLAG_DEPRESSED_MASK (TH_SFLAG_DEPRESS | TH_SFLAG_POLLDEPRESS) /* unused TH_SFLAG_PRI_UPDATE 0x0100 */ -#define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */ -#define TH_SFLAG_RW_PROMOTED 0x0400 /* promote reason: blocking with RW lock held */ +#define TH_SFLAG_EAGERPREEMPT 0x0200 /* Any preemption of this thread should be treated as if AST_URGENT applied */ +#define TH_SFLAG_RW_PROMOTED 0x0400 /* promote reason: blocking with RW lock held */ /* unused TH_SFLAG_THROTTLE_DEMOTED 0x0800 */ -#define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* promote reason: waitq wakeup (generally for IPC receive) */ +#define TH_SFLAG_WAITQ_PROMOTED 0x1000 /* promote reason: waitq wakeup (generally for IPC receive) */ -#define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */ +#define TH_SFLAG_EXEC_PROMOTED 0x8000 /* promote reason: thread is in an exec */ /* 'promote reasons' that request a priority floor only, not a custom priority */ #define TH_SFLAG_PROMOTE_REASON_MASK (TH_SFLAG_RW_PROMOTED | TH_SFLAG_WAITQ_PROMOTED | TH_SFLAG_EXEC_PROMOTED) -#define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */ +#define TH_SFLAG_RW_PROMOTED_BIT (10) /* 0x400 */ int16_t sched_pri; /* scheduled (current) priority */ int16_t base_pri; /* base priority */ @@ -282,135 +281,135 @@ struct thread { #if defined(CONFIG_SCHED_GRRR) #if 0 - uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */ + uint16_t grrr_deficit; /* fixed point (1/1000th quantum) fractional deficit */ #endif #endif - int16_t promotions; /* level of promotion */ - int iotier_override; /* atomic operations to set, cleared on ret to user */ - struct os_refcnt ref_count; /* number of references to me */ + int16_t promotions; /* level of promotion */ + int iotier_override; /* atomic operations to set, cleared on ret to user */ + struct os_refcnt ref_count; /* number of references to me */ lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */ - uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */ + uint32_t rwlock_count; /* Number of lck_rw_t locks held by thread */ - integer_t importance; /* task-relative importance */ + integer_t importance; /* task-relative importance */ uint32_t was_promoted_on_wakeup; /* thread promoted on wakeup to acquire mutex */ /* Priority depression expiration */ - integer_t depress_timer_active; - timer_call_data_t depress_timer; - /* real-time parameters */ - struct { /* see mach/thread_policy.h */ - uint32_t period; - uint32_t computation; - uint32_t constraint; - boolean_t preemptible; - uint64_t deadline; - } realtime; - - uint64_t last_run_time; /* time when thread was switched away from */ - uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */ - uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */ - uint64_t same_pri_latency; + integer_t depress_timer_active; + timer_call_data_t depress_timer; + /* real-time parameters */ + struct { /* see mach/thread_policy.h */ + uint32_t period; + uint32_t computation; + uint32_t constraint; + boolean_t preemptible; + uint64_t deadline; + } realtime; + + uint64_t last_run_time; /* time when thread was switched away from */ + uint64_t last_made_runnable_time; /* time when thread was unblocked or preempted */ + uint64_t last_basepri_change_time; /* time when thread was last changed in basepri while runnable */ + uint64_t same_pri_latency; #define THREAD_NOT_RUNNABLE (~0ULL) #if defined(CONFIG_SCHED_MULTIQ) - sched_group_t sched_group; + sched_group_t sched_group; #endif /* defined(CONFIG_SCHED_MULTIQ) */ - /* Data used during setrun/dispatch */ - timer_data_t system_timer; /* system mode timer */ - processor_t bound_processor; /* bound to a processor? */ - processor_t last_processor; /* processor last dispatched on */ - processor_t chosen_processor; /* Where we want to run this thread */ + /* Data used during setrun/dispatch */ + timer_data_t system_timer; /* system mode timer */ + processor_t bound_processor; /* bound to a processor? */ + processor_t last_processor; /* processor last dispatched on */ + processor_t chosen_processor; /* Where we want to run this thread */ /* Fail-safe computation since last unblock or qualifying yield */ - uint64_t computation_metered; - uint64_t computation_epoch; - uint64_t safe_release; /* when to release fail-safe */ + uint64_t computation_metered; + uint64_t computation_epoch; + uint64_t safe_release; /* when to release fail-safe */ /* Call out from scheduler */ - void (*sched_call)( - int type, - thread_t thread); + void (*sched_call)( + int type, + thread_t thread); #if defined(CONFIG_SCHED_PROTO) - uint32_t runqueue_generation; /* last time runqueue was drained */ + uint32_t runqueue_generation; /* last time runqueue was drained */ #endif /* Statistics and timesharing calculations */ #if defined(CONFIG_SCHED_TIMESHARE_CORE) - natural_t sched_stamp; /* last scheduler tick */ - natural_t sched_usage; /* timesharing cpu usage [sched] */ - natural_t pri_shift; /* usage -> priority from pset */ - natural_t cpu_usage; /* instrumented cpu usage [%cpu] */ - natural_t cpu_delta; /* accumulated cpu_usage delta */ + natural_t sched_stamp; /* last scheduler tick */ + natural_t sched_usage; /* timesharing cpu usage [sched] */ + natural_t pri_shift; /* usage -> priority from pset */ + natural_t cpu_usage; /* instrumented cpu usage [%cpu] */ + natural_t cpu_delta; /* accumulated cpu_usage delta */ #endif /* CONFIG_SCHED_TIMESHARE_CORE */ - uint32_t c_switch; /* total context switches */ - uint32_t p_switch; /* total processor switches */ - uint32_t ps_switch; /* total pset switches */ + uint32_t c_switch; /* total context switches */ + uint32_t p_switch; /* total processor switches */ + uint32_t ps_switch; /* total pset switches */ integer_t mutex_count; /* total count of locks held */ /* Timing data structures */ - int precise_user_kernel_time; /* precise user/kernel enabled for this thread */ - timer_data_t user_timer; /* user mode timer */ - uint64_t user_timer_save; /* saved user timer value */ - uint64_t system_timer_save; /* saved system timer value */ - uint64_t vtimer_user_save; /* saved values for vtimers */ - uint64_t vtimer_prof_save; - uint64_t vtimer_rlim_save; - uint64_t vtimer_qos_save; - - timer_data_t ptime; /* time executing in P mode */ - timer_data_t runnable_timer; /* time the thread is runnable (including running) */ + int precise_user_kernel_time; /* precise user/kernel enabled for this thread */ + timer_data_t user_timer; /* user mode timer */ + uint64_t user_timer_save; /* saved user timer value */ + uint64_t system_timer_save; /* saved system timer value */ + uint64_t vtimer_user_save; /* saved values for vtimers */ + uint64_t vtimer_prof_save; + uint64_t vtimer_rlim_save; + uint64_t vtimer_qos_save; + + timer_data_t ptime; /* time executing in P mode */ + timer_data_t runnable_timer; /* time the thread is runnable (including running) */ #if CONFIG_SCHED_SFI /* Timing for wait state */ - uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */ + uint64_t wait_sfi_begin_time; /* start time for thread waiting in SFI */ #endif /* Timed wait expiration */ - timer_call_data_t wait_timer; - integer_t wait_timer_active; - boolean_t wait_timer_is_set; + timer_call_data_t wait_timer; + integer_t wait_timer_active; + boolean_t wait_timer_is_set; /* * Processor/cache affinity * - affinity_threads links task threads with the same affinity set */ - affinity_set_t affinity_set; - queue_chain_t affinity_threads; + affinity_set_t affinity_set; + queue_chain_t affinity_threads; /* Various bits of state to stash across a continuation, exclusive to the current thread block point */ union { struct { - mach_msg_return_t state; /* receive state */ - mach_port_seqno_t seqno; /* seqno of recvd message */ - ipc_object_t object; /* object received on */ - mach_vm_address_t msg_addr; /* receive buffer pointer */ - mach_msg_size_t rsize; /* max size for recvd msg */ - mach_msg_size_t msize; /* actual size for recvd msg */ - mach_msg_option_t option; /* options for receive */ - mach_port_name_t receiver_name; /* the receive port name */ - struct knote *knote; /* knote fired for rcv */ + mach_msg_return_t state; /* receive state */ + mach_port_seqno_t seqno; /* seqno of recvd message */ + ipc_object_t object; /* object received on */ + mach_vm_address_t msg_addr; /* receive buffer pointer */ + mach_msg_size_t rsize; /* max size for recvd msg */ + mach_msg_size_t msize; /* actual size for recvd msg */ + mach_msg_option_t option; /* options for receive */ + mach_port_name_t receiver_name; /* the receive port name */ + struct knote *knote; /* knote fired for rcv */ union { - struct ipc_kmsg *kmsg; /* received message */ - struct ipc_mqueue *peekq; /* mqueue to peek at */ + struct ipc_kmsg *kmsg; /* received message */ + struct ipc_mqueue *peekq; /* mqueue to peek at */ struct { - mach_msg_priority_t qos; /* received message qos */ - mach_msg_priority_t oqos; /* override qos for message */ + mach_msg_priority_t qos; /* received message qos */ + mach_msg_priority_t oqos; /* override qos for message */ } received_qos; }; - mach_msg_continue_t continuation; + mach_msg_continue_t continuation; } receive; struct { - struct semaphore *waitsemaphore; /* semaphore ref */ - struct semaphore *signalsemaphore; /* semaphore ref */ - int options; /* semaphore options */ - kern_return_t result; /* primary result */ + struct semaphore *waitsemaphore; /* semaphore ref */ + struct semaphore *signalsemaphore; /* semaphore ref */ + int options; /* semaphore options */ + kern_return_t result; /* primary result */ mach_msg_continue_t continuation; } sema; } saved; @@ -437,67 +436,67 @@ struct thread { /* IPC data structures */ #if IMPORTANCE_INHERITANCE - natural_t ith_assertions; /* assertions pending drop */ + natural_t ith_assertions; /* assertions pending drop */ #endif - struct ipc_kmsg_queue ith_messages; /* messages to reap */ - mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ + struct ipc_kmsg_queue ith_messages; /* messages to reap */ + mach_port_t ith_rpc_reply; /* reply port for kernel RPCs */ /* Ast/Halt data structures */ - vm_offset_t recover; /* page fault recover(copyin/out) */ + vm_offset_t recover; /* page fault recover(copyin/out) */ - queue_chain_t threads; /* global list of all threads */ + queue_chain_t threads; /* global list of all threads */ /* Activation */ - queue_chain_t task_threads; + queue_chain_t task_threads; - /* Task membership */ - struct task *task; - vm_map_t map; + /* Task membership */ + struct task *task; + vm_map_t map; #if DEVELOPMENT || DEBUG boolean_t pmap_footprint_suspended; #endif /* DEVELOPMENT || DEBUG */ - decl_lck_mtx_data(,mutex) + decl_lck_mtx_data(, mutex) - /* Pending thread ast(s) */ - ast_t ast; + /* Pending thread ast(s) */ + ast_t ast; - /* Miscellaneous bits guarded by mutex */ - uint32_t - active:1, /* Thread is active and has not been terminated */ - started:1, /* Thread has been started after creation */ - static_param:1, /* Disallow policy parameter changes */ - inspection:1, /* TRUE when task is being inspected by crash reporter */ - policy_reset:1, /* Disallow policy parameter changes on terminating threads */ - suspend_parked:1, /* thread parked in thread_suspended */ - corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */ - :0; + /* Miscellaneous bits guarded by mutex */ + uint32_t + active:1, /* Thread is active and has not been terminated */ + started:1, /* Thread has been started after creation */ + static_param:1, /* Disallow policy parameter changes */ + inspection:1, /* TRUE when task is being inspected by crash reporter */ + policy_reset:1, /* Disallow policy parameter changes on terminating threads */ + suspend_parked:1, /* thread parked in thread_suspended */ + corpse_dup:1, /* TRUE when thread is an inactive duplicate in a corpse */ + :0; - /* Ports associated with this thread */ - struct ipc_port *ith_self; /* not a right, doesn't hold ref */ - struct ipc_port *ith_sself; /* a send right */ - struct ipc_port *ith_special_reply_port; /* ref to special reply port */ - struct exception_action *exc_actions; + /* Ports associated with this thread */ + struct ipc_port *ith_self; /* not a right, doesn't hold ref */ + struct ipc_port *ith_sself; /* a send right */ + struct ipc_port *ith_special_reply_port; /* ref to special reply port */ + struct exception_action *exc_actions; -#ifdef MACH_BSD - void *uthread; +#ifdef MACH_BSD + void *uthread; #endif #if CONFIG_DTRACE - uint32_t t_dtrace_flags; /* DTrace thread states */ -#define TH_DTRACE_EXECSUCCESS 0x01 - uint32_t t_dtrace_predcache;/* DTrace per thread predicate value hint */ - int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */ - int64_t t_dtrace_vtime; + uint32_t t_dtrace_flags; /* DTrace thread states */ +#define TH_DTRACE_EXECSUCCESS 0x01 + uint32_t t_dtrace_predcache; /* DTrace per thread predicate value hint */ + int64_t t_dtrace_tracing; /* Thread time under dtrace_probe() */ + int64_t t_dtrace_vtime; #endif - clock_sec_t t_page_creation_time; - uint32_t t_page_creation_count; - uint32_t t_page_creation_throttled; + clock_sec_t t_page_creation_time; + uint32_t t_page_creation_count; + uint32_t t_page_creation_throttled; #if (DEVELOPMENT || DEBUG) - uint64_t t_page_creation_throttled_hard; - uint64_t t_page_creation_throttled_soft; + uint64_t t_page_creation_throttled_hard; + uint64_t t_page_creation_throttled_soft; #endif /* DEVELOPMENT || DEBUG */ #ifdef KPERF @@ -529,16 +528,16 @@ struct thread { void *hv_thread_target; #endif /* HYPERVISOR */ - uint64_t thread_id; /*system wide unique thread-id*/ + uint64_t thread_id; /*system wide unique thread-id*/ /* Statistics accumulated per-thread and aggregated per-task */ - uint32_t syscalls_unix; - uint32_t syscalls_mach; - ledger_t t_ledger; - ledger_t t_threadledger; /* per thread ledger */ - ledger_t t_bankledger; /* ledger to charge someone */ - uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */ - uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */ + uint32_t syscalls_unix; + uint32_t syscalls_mach; + ledger_t t_ledger; + ledger_t t_threadledger; /* per thread ledger */ + ledger_t t_bankledger; /* ledger to charge someone */ + uint64_t t_deduct_bank_ledger_time; /* cpu time to be deducted from bank ledger */ + uint64_t t_deduct_bank_ledger_energy; /* energy to be deducted from bank ledger */ #if MONOTONIC struct mt_thread t_monotonic; @@ -553,11 +552,11 @@ struct thread { /* usynch override is protected by the task lock, eventually will be thread mutex */ struct thread_qos_override { - struct thread_qos_override *override_next; - uint32_t override_contended_resource_count; - int16_t override_qos; - int16_t override_resource_type; - user_addr_t override_resource; + struct thread_qos_override *override_next; + uint32_t override_contended_resource_count; + int16_t override_qos; + int16_t override_resource_type; + user_addr_t override_resource; } *overrides; uint32_t ipc_overrides; @@ -566,33 +565,33 @@ struct thread { uint16_t user_promotion_basepri; _Atomic uint16_t kevent_ast_bits; - io_stat_info_t thread_io_stats; /* per-thread I/O statistics */ + io_stat_info_t thread_io_stats; /* per-thread I/O statistics */ #if CONFIG_EMBEDDED - task_watch_t * taskwatch; /* task watch */ + task_watch_t * taskwatch; /* task watch */ #endif /* CONFIG_EMBEDDED */ - uint32_t thread_callout_interrupt_wakeups; - uint32_t thread_callout_platform_idle_wakeups; - uint32_t thread_timer_wakeups_bin_1; - uint32_t thread_timer_wakeups_bin_2; - uint16_t thread_tag; - uint16_t callout_woken_from_icontext:1, - callout_woken_from_platform_idle:1, - callout_woke_thread:1, - thread_bitfield_unused:13; - - mach_port_name_t ith_voucher_name; - ipc_voucher_t ith_voucher; + uint32_t thread_callout_interrupt_wakeups; + uint32_t thread_callout_platform_idle_wakeups; + uint32_t thread_timer_wakeups_bin_1; + uint32_t thread_timer_wakeups_bin_2; + uint16_t thread_tag; + uint16_t callout_woken_from_icontext:1, + callout_woken_from_platform_idle:1, + callout_woke_thread:1, + thread_bitfield_unused:13; + + mach_port_name_t ith_voucher_name; + ipc_voucher_t ith_voucher; #if CONFIG_IOSCHED - void *decmp_upl; + void *decmp_upl; #endif /* CONFIG_IOSCHED */ /* work interval (if any) associated with the thread. Uses thread mutex */ struct work_interval *th_work_interval; -#if SCHED_TRACE_THREAD_WAKEUPS - uintptr_t thread_wakeup_bt[64]; +#if SCHED_TRACE_THREAD_WAKEUPS + uintptr_t thread_wakeup_bt[64]; #endif turnstile_update_flags_t inheritor_flags; /* inheritor flags for inheritor field */ block_hint_t pending_block_hint; @@ -604,7 +603,7 @@ struct thread { #define ith_msg_addr saved.receive.msg_addr #define ith_rsize saved.receive.rsize #define ith_msize saved.receive.msize -#define ith_option saved.receive.option +#define ith_option saved.receive.option #define ith_receiver_name saved.receive.receiver_name #define ith_continuation saved.receive.continuation #define ith_kmsg saved.receive.kmsg @@ -628,230 +627,232 @@ struct thread { * being RECEIVE or SEND_ONCE is about. */ #define ITH_KNOTE_VALID(kn, msgt_name) \ - (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \ - ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \ - (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE)) + (((kn) != ITH_KNOTE_NULL && (kn) != ITH_KNOTE_PSEUDO) && \ + ((msgt_name) == MACH_MSG_TYPE_PORT_RECEIVE || \ + (msgt_name) == MACH_MSG_TYPE_PORT_SEND_ONCE)) #if MACH_ASSERT #define assert_thread_magic(thread) assertf((thread)->thread_magic == THREAD_MAGIC, \ - "bad thread magic 0x%llx for thread %p, expected 0x%llx", \ - (thread)->thread_magic, (thread), THREAD_MAGIC) + "bad thread magic 0x%llx for thread %p, expected 0x%llx", \ + (thread)->thread_magic, (thread), THREAD_MAGIC) #else #define assert_thread_magic(thread) do { (void)(thread); } while (0) #endif -extern void thread_bootstrap(void); +extern void thread_bootstrap(void); -extern void thread_init(void); +extern void thread_init(void); -extern void thread_daemon_init(void); +extern void thread_daemon_init(void); -#define thread_reference_internal(thread) \ - os_ref_retain(&(thread)->ref_count); +#define thread_reference_internal(thread) \ + os_ref_retain(&(thread)->ref_count); -#define thread_reference(thread) \ -MACRO_BEGIN \ - if ((thread) != THREAD_NULL) \ - thread_reference_internal(thread); \ +#define thread_reference(thread) \ +MACRO_BEGIN \ + if ((thread) != THREAD_NULL) \ + thread_reference_internal(thread); \ MACRO_END -extern void thread_deallocate( - thread_t thread); +extern void thread_deallocate( + thread_t thread); + +extern void thread_deallocate_safe( + thread_t thread); -extern void thread_deallocate_safe( - thread_t thread); +extern void thread_inspect_deallocate( + thread_inspect_t thread); -extern void thread_inspect_deallocate( - thread_inspect_t thread); +extern void thread_terminate_self(void); -extern void thread_terminate_self(void); +extern kern_return_t thread_terminate_internal( + thread_t thread); -extern kern_return_t thread_terminate_internal( - thread_t thread); +extern void thread_start( + thread_t thread) __attribute__ ((noinline)); -extern void thread_start( - thread_t thread) __attribute__ ((noinline)); +extern void thread_start_in_assert_wait( + thread_t thread, + event_t event, + wait_interrupt_t interruptible) __attribute__ ((noinline)); -extern void thread_start_in_assert_wait( - thread_t thread, - event_t event, - wait_interrupt_t interruptible) __attribute__ ((noinline)); +extern void thread_terminate_enqueue( + thread_t thread); -extern void thread_terminate_enqueue( - thread_t thread); +extern void thread_exception_enqueue( + task_t task, + thread_t thread, + exception_type_t etype); -extern void thread_exception_enqueue( - task_t task, - thread_t thread, - exception_type_t etype); +extern void thread_copy_resource_info( + thread_t dst_thread, + thread_t src_thread); -extern void thread_copy_resource_info( - thread_t dst_thread, - thread_t src_thread); +extern void thread_terminate_crashed_threads(void); -extern void thread_terminate_crashed_threads(void); +extern void turnstile_deallocate_enqueue( + struct turnstile *turnstile); -extern void turnstile_deallocate_enqueue( - struct turnstile *turnstile); +extern void thread_stack_enqueue( + thread_t thread); -extern void thread_stack_enqueue( - thread_t thread); +extern void thread_hold( + thread_t thread); -extern void thread_hold( - thread_t thread); +extern void thread_release( + thread_t thread); -extern void thread_release( - thread_t thread); +extern void thread_corpse_continue(void); -extern void thread_corpse_continue(void); +extern boolean_t thread_is_active(thread_t thread); -extern boolean_t thread_is_active(thread_t thread); +extern lck_grp_t thread_lck_grp; /* Locking for scheduler state, always acquired with interrupts disabled (splsched()) */ #if __SMP__ -#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0) -#define thread_lock(th) simple_lock(&(th)->sched_lock) -#define thread_unlock(th) simple_unlock(&(th)->sched_lock) +#define thread_lock_init(th) simple_lock_init(&(th)->sched_lock, 0) +#define thread_lock(th) simple_lock(&(th)->sched_lock, &thread_lck_grp) +#define thread_unlock(th) simple_unlock(&(th)->sched_lock) -#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0) -#define wake_lock(th) simple_lock(&(th)->wake_lock) -#define wake_unlock(th) simple_unlock(&(th)->wake_lock) +#define wake_lock_init(th) simple_lock_init(&(th)->wake_lock, 0) +#define wake_lock(th) simple_lock(&(th)->wake_lock, &thread_lck_grp) +#define wake_unlock(th) simple_unlock(&(th)->wake_lock) #else -#define thread_lock_init(th) do { (void)th; } while(0) -#define thread_lock(th) do { (void)th; } while(0) -#define thread_unlock(th) do { (void)th; } while(0) +#define thread_lock_init(th) do { (void)th; } while(0) +#define thread_lock(th) do { (void)th; } while(0) +#define thread_unlock(th) do { (void)th; } while(0) -#define wake_lock_init(th) do { (void)th; } while(0) -#define wake_lock(th) do { (void)th; } while(0) -#define wake_unlock(th) do { (void)th; } while(0) +#define wake_lock_init(th) do { (void)th; } while(0) +#define wake_lock(th) do { (void)th; } while(0) +#define wake_unlock(th) do { (void)th; } while(0) #endif -#define thread_should_halt_fast(thread) (!(thread)->active) +#define thread_should_halt_fast(thread) (!(thread)->active) -extern void stack_alloc( - thread_t thread); +extern void stack_alloc( + thread_t thread); -extern void stack_handoff( - thread_t from, - thread_t to); +extern void stack_handoff( + thread_t from, + thread_t to); -extern void stack_free( - thread_t thread); +extern void stack_free( + thread_t thread); -extern void stack_free_reserved( - thread_t thread); +extern void stack_free_reserved( + thread_t thread); -extern boolean_t stack_alloc_try( - thread_t thread); +extern boolean_t stack_alloc_try( + thread_t thread); -extern void stack_collect(void); +extern void stack_collect(void); -extern void stack_init(void); +extern void stack_init(void); -extern kern_return_t thread_info_internal( - thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, - mach_msg_type_number_t *thread_info_count); +extern kern_return_t thread_info_internal( + thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count); -extern kern_return_t kernel_thread_create( - thread_continue_t continuation, - void *parameter, - integer_t priority, - thread_t *new_thread); +extern kern_return_t kernel_thread_create( + thread_continue_t continuation, + void *parameter, + integer_t priority, + thread_t *new_thread); -extern kern_return_t kernel_thread_start_priority( - thread_continue_t continuation, - void *parameter, - integer_t priority, - thread_t *new_thread); +extern kern_return_t kernel_thread_start_priority( + thread_continue_t continuation, + void *parameter, + integer_t priority, + thread_t *new_thread); -extern void machine_stack_attach( - thread_t thread, - vm_offset_t stack); +extern void machine_stack_attach( + thread_t thread, + vm_offset_t stack); -extern vm_offset_t machine_stack_detach( - thread_t thread); +extern vm_offset_t machine_stack_detach( + thread_t thread); -extern void machine_stack_handoff( - thread_t old, - thread_t new); +extern void machine_stack_handoff( + thread_t old, + thread_t new); -extern thread_t machine_switch_context( - thread_t old_thread, - thread_continue_t continuation, - thread_t new_thread); +extern thread_t machine_switch_context( + thread_t old_thread, + thread_continue_t continuation, + thread_t new_thread); -extern void machine_load_context( - thread_t thread) __attribute__((noreturn)); +extern void machine_load_context( + thread_t thread) __attribute__((noreturn)); -extern kern_return_t machine_thread_state_initialize( - thread_t thread); +extern kern_return_t machine_thread_state_initialize( + thread_t thread); -extern kern_return_t machine_thread_set_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t state, - mach_msg_type_number_t count); +extern kern_return_t machine_thread_set_state( + thread_t thread, + thread_flavor_t flavor, + thread_state_t state, + mach_msg_type_number_t count); -extern kern_return_t machine_thread_get_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t state, - mach_msg_type_number_t *count); +extern kern_return_t machine_thread_get_state( + thread_t thread, + thread_flavor_t flavor, + thread_state_t state, + mach_msg_type_number_t *count); -extern kern_return_t machine_thread_state_convert_from_user( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t count); +extern kern_return_t machine_thread_state_convert_from_user( + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t count); -extern kern_return_t machine_thread_state_convert_to_user( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t *count); +extern kern_return_t machine_thread_state_convert_to_user( + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count); -extern kern_return_t machine_thread_dup( - thread_t self, - thread_t target, - boolean_t is_corpse); +extern kern_return_t machine_thread_dup( + thread_t self, + thread_t target, + boolean_t is_corpse); -extern void machine_thread_init(void); +extern void machine_thread_init(void); -extern kern_return_t machine_thread_create( - thread_t thread, - task_t task); -extern void machine_thread_switch_addrmode( - thread_t thread); +extern kern_return_t machine_thread_create( + thread_t thread, + task_t task); +extern void machine_thread_switch_addrmode( + thread_t thread); -extern void machine_thread_destroy( - thread_t thread); +extern void machine_thread_destroy( + thread_t thread); -extern void machine_set_current_thread( - thread_t thread); +extern void machine_set_current_thread( + thread_t thread); -extern kern_return_t machine_thread_get_kern_state( - thread_t thread, - thread_flavor_t flavor, - thread_state_t tstate, - mach_msg_type_number_t *count); +extern kern_return_t machine_thread_get_kern_state( + thread_t thread, + thread_flavor_t flavor, + thread_state_t tstate, + mach_msg_type_number_t *count); -extern kern_return_t machine_thread_inherit_taskwide( - thread_t thread, - task_t parent_task); +extern kern_return_t machine_thread_inherit_taskwide( + thread_t thread, + task_t parent_task); -extern kern_return_t machine_thread_set_tsd_base( - thread_t thread, - mach_vm_offset_t tsd_base); +extern kern_return_t machine_thread_set_tsd_base( + thread_t thread, + mach_vm_offset_t tsd_base); -#define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex) -#define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex) -#define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex) +#define thread_mtx_lock(thread) lck_mtx_lock(&(thread)->mutex) +#define thread_mtx_try(thread) lck_mtx_try_lock(&(thread)->mutex) +#define thread_mtx_unlock(thread) lck_mtx_unlock(&(thread)->mutex) extern void thread_apc_ast(thread_t thread); @@ -859,22 +860,28 @@ extern void thread_update_qos_cpu_time(thread_t thread); void act_machine_sv_free(thread_t, int); -vm_offset_t min_valid_stack_address(void); -vm_offset_t max_valid_stack_address(void); +vm_offset_t min_valid_stack_address(void); +vm_offset_t max_valid_stack_address(void); -static inline uint16_t thread_set_tag_internal(thread_t thread, uint16_t tag) { +static inline uint16_t +thread_set_tag_internal(thread_t thread, uint16_t tag) +{ return __sync_fetch_and_or(&thread->thread_tag, tag); } -static inline uint16_t thread_get_tag_internal(thread_t thread) { +static inline uint16_t +thread_get_tag_internal(thread_t thread) +{ return thread->thread_tag; } +extern bool thread_no_smt(thread_t thread); +extern bool processor_active_thread_no_smt(processor_t processor); extern void thread_set_options(uint32_t thopt); -#else /* MACH_KERNEL_PRIVATE */ +#else /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -882,46 +889,46 @@ extern void thread_mtx_lock(thread_t thread); extern void thread_mtx_unlock(thread_t thread); -extern thread_t current_thread(void); +extern thread_t current_thread(void); -extern void thread_reference( - thread_t thread); +extern void thread_reference( + thread_t thread); -extern void thread_deallocate( - thread_t thread); +extern void thread_deallocate( + thread_t thread); __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE __BEGIN_DECLS -extern void thread_starts_owning_workloop( - thread_t thread); +extern void thread_starts_owning_workloop( + thread_t thread); -extern void thread_ends_owning_workloop( - thread_t thread); +extern void thread_ends_owning_workloop( + thread_t thread); -extern uint32_t thread_owned_workloops_count( - thread_t thread); +extern uint32_t thread_owned_workloops_count( + thread_t thread); -extern uint64_t thread_dispatchqaddr( - thread_t thread); +extern uint64_t thread_dispatchqaddr( + thread_t thread); -extern uint64_t thread_rettokern_addr( - thread_t thread); +extern uint64_t thread_rettokern_addr( + thread_t thread); __END_DECLS -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #ifdef KERNEL __BEGIN_DECLS -extern uint64_t thread_tid(thread_t thread); +extern uint64_t thread_tid(thread_t thread); __END_DECLS @@ -929,79 +936,79 @@ __END_DECLS __BEGIN_DECLS -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* * Thread tags; for easy identification. */ -#define THREAD_TAG_MAINTHREAD 0x1 -#define THREAD_TAG_CALLOUT 0x2 -#define THREAD_TAG_IOWORKLOOP 0x4 +#define THREAD_TAG_MAINTHREAD 0x1 +#define THREAD_TAG_CALLOUT 0x2 +#define THREAD_TAG_IOWORKLOOP 0x4 -#define THREAD_TAG_PTHREAD 0x10 -#define THREAD_TAG_WORKQUEUE 0x20 +#define THREAD_TAG_PTHREAD 0x10 +#define THREAD_TAG_WORKQUEUE 0x20 -uint16_t thread_set_tag(thread_t, uint16_t); -uint16_t thread_get_tag(thread_t); -uint64_t thread_last_run_time(thread_t); +uint16_t thread_set_tag(thread_t, uint16_t); +uint16_t thread_get_tag(thread_t); +uint64_t thread_last_run_time(thread_t); extern kern_return_t thread_state_initialize( - thread_t thread); - -extern kern_return_t thread_setstatus( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t count); - -extern kern_return_t thread_setstatus_from_user( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t count); - -extern kern_return_t thread_getstatus( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t *count); - -extern kern_return_t thread_getstatus_to_user( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t *count); - -extern kern_return_t thread_create_with_continuation( - task_t task, - thread_t *new_thread, - thread_continue_t continuation); + thread_t thread); + +extern kern_return_t thread_setstatus( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count); + +extern kern_return_t thread_setstatus_from_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count); + +extern kern_return_t thread_getstatus( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count); + +extern kern_return_t thread_getstatus_to_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count); + +extern kern_return_t thread_create_with_continuation( + task_t task, + thread_t *new_thread, + thread_continue_t continuation); extern kern_return_t thread_create_waiting(task_t task, - thread_continue_t continuation, - event_t event, - thread_t *new_thread); + thread_continue_t continuation, + event_t event, + thread_t *new_thread); -extern kern_return_t thread_create_workq_waiting( - task_t task, - thread_continue_t thread_return, - thread_t *new_thread); +extern kern_return_t thread_create_workq_waiting( + task_t task, + thread_continue_t thread_return, + thread_t *new_thread); -extern void thread_yield_internal( - mach_msg_timeout_t interval); +extern void thread_yield_internal( + mach_msg_timeout_t interval); -extern void thread_yield_to_preemption(void); +extern void thread_yield_to_preemption(void); /* * Thread-private CPU limits: apply a private CPU limit to this thread only. Available actions are: - * + * * 1) Block. Prevent CPU consumption of the thread from exceeding the limit. * 2) Exception. Generate a resource consumption exception when the limit is exceeded. * 3) Disable. Remove any existing CPU limit. */ -#define THREAD_CPULIMIT_BLOCK 0x1 -#define THREAD_CPULIMIT_EXCEPTION 0x2 -#define THREAD_CPULIMIT_DISABLE 0x3 +#define THREAD_CPULIMIT_BLOCK 0x1 +#define THREAD_CPULIMIT_EXCEPTION 0x2 +#define THREAD_CPULIMIT_DISABLE 0x3 struct _thread_ledger_indices { int cpu_time; @@ -1012,116 +1019,119 @@ extern struct _thread_ledger_indices thread_ledgers; extern int thread_get_cpulimit(int *action, uint8_t *percentage, uint64_t *interval_ns); extern int thread_set_cpulimit(int action, uint8_t percentage, uint64_t interval_ns); -extern void thread_read_times( - thread_t thread, - time_value_t *user_time, - time_value_t *system_time, - time_value_t *runnable_time); +extern void thread_read_times( + thread_t thread, + time_value_t *user_time, + time_value_t *system_time, + time_value_t *runnable_time); -extern uint64_t thread_get_runtime_self(void); +extern uint64_t thread_get_runtime_self(void); -extern void thread_setuserstack( - thread_t thread, - mach_vm_offset_t user_stack); +extern void thread_setuserstack( + thread_t thread, + mach_vm_offset_t user_stack); -extern uint64_t thread_adjuserstack( - thread_t thread, - int adjust); +extern uint64_t thread_adjuserstack( + thread_t thread, + int adjust); -extern void thread_setentrypoint( - thread_t thread, - mach_vm_offset_t entry); +extern void thread_setentrypoint( + thread_t thread, + mach_vm_offset_t entry); -extern kern_return_t thread_set_tsd_base( - thread_t thread, - mach_vm_offset_t tsd_base); +extern kern_return_t thread_set_tsd_base( + thread_t thread, + mach_vm_offset_t tsd_base); -extern kern_return_t thread_setsinglestep( - thread_t thread, - int on); +extern kern_return_t thread_setsinglestep( + thread_t thread, + int on); -extern kern_return_t thread_userstack( - thread_t, - int, - thread_state_t, - unsigned int, - mach_vm_offset_t *, - int *, - boolean_t); +extern kern_return_t thread_userstack( + thread_t, + int, + thread_state_t, + unsigned int, + mach_vm_offset_t *, + int *, + boolean_t); -extern kern_return_t thread_entrypoint( - thread_t, - int, - thread_state_t, - unsigned int, - mach_vm_offset_t *); +extern kern_return_t thread_entrypoint( + thread_t, + int, + thread_state_t, + unsigned int, + mach_vm_offset_t *); -extern kern_return_t thread_userstackdefault( - mach_vm_offset_t *, - boolean_t); +extern kern_return_t thread_userstackdefault( + mach_vm_offset_t *, + boolean_t); -extern kern_return_t thread_wire_internal( - host_priv_t host_priv, - thread_t thread, - boolean_t wired, - boolean_t *prev_state); +extern kern_return_t thread_wire_internal( + host_priv_t host_priv, + thread_t thread, + boolean_t wired, + boolean_t *prev_state); -extern kern_return_t thread_dup(thread_t); +extern kern_return_t thread_dup(thread_t); extern kern_return_t thread_dup2(thread_t, thread_t); #if !defined(_SCHED_CALL_T_DEFINED) #define _SCHED_CALL_T_DEFINED -typedef void (*sched_call_t)( - int type, - thread_t thread); +typedef void (*sched_call_t)( + int type, + thread_t thread); #endif -#define SCHED_CALL_BLOCK 0x1 -#define SCHED_CALL_UNBLOCK 0x2 +#define SCHED_CALL_BLOCK 0x1 +#define SCHED_CALL_UNBLOCK 0x2 -extern void thread_sched_call( - thread_t thread, - sched_call_t call); +extern void thread_sched_call( + thread_t thread, + sched_call_t call); -extern boolean_t thread_is_static_param( - thread_t thread); +extern boolean_t thread_is_static_param( + thread_t thread); -extern task_t get_threadtask(thread_t); +extern task_t get_threadtask(thread_t); /* * Thread is running within a 64-bit address space. */ -#define thread_is_64bit_addr(thd) \ +#define thread_is_64bit_addr(thd) \ task_has_64Bit_addr(get_threadtask(thd)) /* * Thread is using 64-bit machine state. */ -#define thread_is_64bit_data(thd) \ +#define thread_is_64bit_data(thd) \ task_has_64Bit_data(get_threadtask(thd)) -extern void *get_bsdthread_info(thread_t); -extern void set_bsdthread_info(thread_t, void *); -extern void *uthread_alloc(task_t, thread_t, int); -extern event_t workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/ -extern void uthread_cleanup_name(void *uthread); -extern void uthread_cleanup(task_t, void *, void *); -extern void uthread_zone_free(void *); -extern void uthread_cred_free(void *); - -extern void uthread_reset_proc_refcount(void *); +#if defined(__x86_64__) +extern int thread_task_has_ldt(thread_t); +#endif +extern void *get_bsdthread_info(thread_t); +extern void set_bsdthread_info(thread_t, void *); +extern void *uthread_alloc(task_t, thread_t, int); +extern event_t workq_thread_init_and_wq_lock(task_t, thread_t); // bsd/pthread/ +extern void uthread_cleanup_name(void *uthread); +extern void uthread_cleanup(task_t, void *, void *); +extern void uthread_zone_free(void *); +extern void uthread_cred_free(void *); + +extern void uthread_reset_proc_refcount(void *); #if PROC_REF_DEBUG -extern int uthread_get_proc_refcount(void *); -extern int proc_ref_tracking_disabled; +extern int uthread_get_proc_refcount(void *); +extern int proc_ref_tracking_disabled; #endif -extern boolean_t thread_should_halt( - thread_t thread); +extern boolean_t thread_should_halt( + thread_t thread); -extern boolean_t thread_should_abort( - thread_t); +extern boolean_t thread_should_abort( + thread_t); extern int is_64signalregset(void); @@ -1141,59 +1151,60 @@ extern void dtrace_set_thread_vtime(thread_t, int64_t); extern void dtrace_set_thread_tracing(thread_t, int64_t); extern void dtrace_set_thread_reentering(thread_t, boolean_t); extern vm_offset_t dtrace_set_thread_recover(thread_t, vm_offset_t); +extern vm_offset_t dtrace_sign_and_set_thread_recover(thread_t, vm_offset_t); extern void dtrace_thread_bootstrap(void); extern void dtrace_thread_didexec(thread_t); extern int64_t dtrace_calc_thread_recent_vtime(thread_t); -extern kern_return_t thread_set_wq_state32( - thread_t thread, - thread_state_t tstate); +extern kern_return_t thread_set_wq_state32( + thread_t thread, + thread_state_t tstate); -extern kern_return_t thread_set_wq_state64( - thread_t thread, - thread_state_t tstate); +extern kern_return_t thread_set_wq_state64( + thread_t thread, + thread_state_t tstate); -extern vm_offset_t kernel_stack_mask; -extern vm_offset_t kernel_stack_size; -extern vm_offset_t kernel_stack_depth_max; +extern vm_offset_t kernel_stack_mask; +extern vm_offset_t kernel_stack_size; +extern vm_offset_t kernel_stack_depth_max; extern void guard_ast(thread_t); extern void fd_guard_ast(thread_t, - mach_exception_code_t, mach_exception_subcode_t); + mach_exception_code_t, mach_exception_subcode_t); #if CONFIG_VNGUARD extern void vn_guard_ast(thread_t, - mach_exception_code_t, mach_exception_subcode_t); + mach_exception_code_t, mach_exception_subcode_t); #endif extern void mach_port_guard_ast(thread_t, - mach_exception_code_t, mach_exception_subcode_t); + mach_exception_code_t, mach_exception_subcode_t); extern void virt_memory_guard_ast(thread_t, - mach_exception_code_t, mach_exception_subcode_t); + mach_exception_code_t, mach_exception_subcode_t); extern void thread_guard_violation(thread_t, - mach_exception_code_t, mach_exception_subcode_t); + mach_exception_code_t, mach_exception_subcode_t); extern void thread_update_io_stats(thread_t, int size, int io_flags); -extern kern_return_t thread_set_voucher_name(mach_port_name_t name); +extern kern_return_t thread_set_voucher_name(mach_port_name_t name); extern kern_return_t thread_get_current_voucher_origin_pid(int32_t *pid); extern void set_thread_rwlock_boost(void); extern void clear_thread_rwlock_boost(void); /*! @function thread_has_thread_name - @abstract Checks if a thread has a name. - @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it. - @param th The thread to inspect. - @result TRUE if the thread has a name, FALSE otherwise. -*/ + * @abstract Checks if a thread has a name. + * @discussion This function takes one input, a thread, and returns a boolean value indicating if that thread already has a name associated with it. + * @param th The thread to inspect. + * @result TRUE if the thread has a name, FALSE otherwise. + */ extern boolean_t thread_has_thread_name(thread_t th); /*! @function thread_set_thread_name - @abstract Set a thread's name. - @discussion This function takes two input parameters: a thread to name, and the name to apply to the thread. The name will be attached to the thread in order to better identify the thread. - @param th The thread to be named. - @param name The name to apply to the thread. -*/ + * @abstract Set a thread's name. + * @discussion This function takes two input parameters: a thread to name, and the name to apply to the thread. The name will be attached to the thread in order to better identify the thread. + * @param th The thread to be named. + * @param name The name to apply to the thread. + */ extern void thread_set_thread_name(thread_t th, const char* name); extern void thread_enable_send_importance(thread_t thread, boolean_t enable); @@ -1202,18 +1213,26 @@ extern void thread_enable_send_importance(thread_t thread, boolean_t enable); * Translate signal context data pointer to userspace representation */ -extern kern_return_t machine_thread_siguctx_pointer_convert_to_user( - thread_t thread, - user_addr_t *uctxp); +extern kern_return_t machine_thread_siguctx_pointer_convert_to_user( + thread_t thread, + user_addr_t *uctxp); + +extern void machine_tecs(thread_t thr); + +typedef enum cpuvn { + CPUVN_CI = 1 +} cpuvn_e; + +extern int machine_csv(cpuvn_e cve); /* * Translate array of function pointer syscall arguments from userspace representation */ -extern kern_return_t machine_thread_function_pointers_convert_from_user( - thread_t thread, - user_addr_t *fptrs, - uint32_t count); +extern kern_return_t machine_thread_function_pointers_convert_from_user( + thread_t thread, + user_addr_t *fptrs, + uint32_t count); /* Get a backtrace for a threads kernel or user stack (user_p), with pc and optionally * frame pointer (getfp). Returns bytes added to buffer, and kThreadTruncatedBT in @@ -1221,45 +1240,48 @@ extern kern_return_t machine_thread_function_pointers_convert_from_user( * called. */ -extern int machine_trace_thread( - thread_t thread, - char *tracepos, - char *tracebound, - int nframes, - boolean_t user_p, - boolean_t getfp, - uint32_t *thread_trace_flags); - -extern int machine_trace_thread64(thread_t thread, - char *tracepos, - char *tracebound, - int nframes, - boolean_t user_p, - boolean_t getfp, - uint32_t *thread_trace_flags, - uint64_t *sp); +extern int machine_trace_thread( + thread_t thread, + char *tracepos, + char *tracebound, + int nframes, + boolean_t user_p, + boolean_t getfp, + uint32_t *thread_trace_flags); + +extern int machine_trace_thread64(thread_t thread, + char *tracepos, + char *tracebound, + int nframes, + boolean_t user_p, + boolean_t getfp, + uint32_t *thread_trace_flags, + uint64_t *sp); /* * Get the duration of the given thread's last wait. */ uint64_t thread_get_last_wait_duration(thread_t thread); -#endif /* XNU_KERNEL_PRIVATE */ +extern void thread_set_no_smt(bool set); +extern bool thread_get_no_smt(void); + +#endif /* XNU_KERNEL_PRIVATE */ /*! @function kernel_thread_start - @abstract Create a kernel thread. - @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread). - @param continuation A C-function pointer where the thread will begin execution. - @param parameter Caller specified data to be passed to the new thread. - @param new_thread Reference to the new thread is returned in this parameter. - @result Returns KERN_SUCCESS on success or an appropriate kernel code type. -*/ - -extern kern_return_t kernel_thread_start( - thread_continue_t continuation, - void *parameter, - thread_t *new_thread); + * @abstract Create a kernel thread. + * @discussion This function takes three input parameters, namely reference to the function that the thread should execute, caller specified data and a reference which is used to return the newly created kernel thread. The function returns KERN_SUCCESS on success or an appropriate kernel code type indicating the error. It may be noted that the caller is responsible for explicitly releasing the reference to the created thread when no longer needed. This should be done by calling thread_deallocate(new_thread). + * @param continuation A C-function pointer where the thread will begin execution. + * @param parameter Caller specified data to be passed to the new thread. + * @param new_thread Reference to the new thread is returned in this parameter. + * @result Returns KERN_SUCCESS on success or an appropriate kernel code type. + */ + +extern kern_return_t kernel_thread_start( + thread_continue_t continuation, + void *parameter, + thread_t *new_thread); #ifdef KERNEL_PRIVATE void thread_set_eager_preempt(thread_t thread); @@ -1275,4 +1297,4 @@ extern kern_allocation_name_t thread_set_allocation_name(kern_allocation_name_t __END_DECLS -#endif /* _KERN_THREAD_H_ */ +#endif /* _KERN_THREAD_H_ */ diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c index 4faa1e9b5..c93dda8e3 100644 --- a/osfmk/kern/thread_act.c +++ b/osfmk/kern/thread_act.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -91,7 +91,7 @@ static void thread_set_apc_ast_locked(thread_t thread); */ void thread_start( - thread_t thread) + thread_t thread) { clear_wait(thread, THREAD_AWAKENED); thread->started = TRUE; @@ -105,7 +105,7 @@ thread_start( * * Always called with the thread mutex locked. * - * Task and task_threads mutexes also held + * Task and task_threads mutexes also held * (so nobody can set the thread running before * this point) * @@ -114,7 +114,7 @@ thread_start( */ void thread_start_in_assert_wait( - thread_t thread, + thread_t thread, event_t event, wait_interrupt_t interruptible) { @@ -134,12 +134,12 @@ thread_start_in_assert_wait( /* assert wait interruptibly forever */ wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), - interruptible, - TIMEOUT_URGENCY_SYS_NORMAL, - TIMEOUT_WAIT_FOREVER, - TIMEOUT_NO_LEEWAY, - thread); - assert (wait_result == THREAD_WAITING); + interruptible, + TIMEOUT_URGENCY_SYS_NORMAL, + TIMEOUT_WAIT_FOREVER, + TIMEOUT_NO_LEEWAY, + thread); + assert(wait_result == THREAD_WAITING); /* mark thread started while we still hold the waitq lock */ thread_lock(thread); @@ -156,9 +156,9 @@ thread_start_in_assert_wait( */ kern_return_t thread_terminate_internal( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; thread_mtx_lock(thread); @@ -167,24 +167,26 @@ thread_terminate_internal( act_abort(thread); - if (thread->started) + if (thread->started) { clear_wait(thread, THREAD_INTERRUPTED); - else { + } else { thread_start(thread); } - } - else + } else { result = KERN_TERMINATED; + } - if (thread->affinity_set != NULL) + if (thread->affinity_set != NULL) { thread_affinity_terminate(thread); + } thread_mtx_unlock(thread); - if (thread != current_thread() && result == KERN_SUCCESS) + if (thread != current_thread() && result == KERN_SUCCESS) { thread_wait(thread, FALSE); + } - return (result); + return result; } /* @@ -192,14 +194,16 @@ thread_terminate_internal( */ kern_return_t thread_terminate( - thread_t thread) + thread_t thread) { - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } /* Kernel threads can't be terminated without their own cooperation */ - if (thread->task == kernel_task && thread != current_thread()) - return (KERN_FAILURE); + if (thread->task == kernel_task && thread != current_thread()) { + return KERN_FAILURE; + } kern_return_t result = thread_terminate_internal(thread); @@ -217,7 +221,7 @@ thread_terminate( /* NOTREACHED */ } - return (result); + return result; } /* @@ -251,8 +255,9 @@ thread_release(thread_t thread) assertf(thread->suspend_count > 0, "thread %p over-resumed", thread); /* fail-safe on non-assert builds */ - if (thread->suspend_count == 0) + if (thread->suspend_count == 0) { return; + } if (--thread->suspend_count == 0) { if (!thread->started) { @@ -269,24 +274,27 @@ thread_suspend(thread_t thread) { kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL || thread->task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || thread->task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { - if (thread->user_stop_count++ == 0) + if (thread->user_stop_count++ == 0) { thread_hold(thread); + } } else { result = KERN_TERMINATED; } thread_mtx_unlock(thread); - if (thread != current_thread() && result == KERN_SUCCESS) + if (thread != current_thread() && result == KERN_SUCCESS) { thread_wait(thread, FALSE); + } - return (result); + return result; } kern_return_t @@ -294,15 +302,17 @@ thread_resume(thread_t thread) { kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL || thread->task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || thread->task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count > 0) { - if (--thread->user_stop_count == 0) + if (--thread->user_stop_count == 0) { thread_release(thread); + } } else { result = KERN_FAILURE; } @@ -312,7 +322,7 @@ thread_resume(thread_t thread) thread_mtx_unlock(thread); - return (result); + return result; } /* @@ -325,19 +335,21 @@ thread_depress_abort_from_user(thread_t thread) { kern_return_t result; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); - if (thread->active) + if (thread->active) { result = thread_depress_abort(thread); - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } @@ -349,9 +361,9 @@ thread_depress_abort_from_user(thread_t thread) */ static void act_abort( - thread_t thread) + thread_t thread) { - spl_t s = splsched(); + spl_t s = splsched(); thread_lock(thread); @@ -369,44 +381,46 @@ act_abort( kern_return_t thread_abort( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { act_abort(thread); clear_wait(thread, THREAD_INTERRUPTED); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } kern_return_t thread_abort_safely( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { - spl_t s = splsched(); + spl_t s = splsched(); thread_lock(thread); if (!thread->at_safe_point || - clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { + clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { if (!(thread->sched_flags & TH_SFLAG_ABORT)) { thread->sched_flags |= TH_SFLAG_ABORTED_MASK; thread_set_apc_ast_locked(thread); @@ -421,7 +435,7 @@ thread_abort_safely( thread_mtx_unlock(thread); - return (result); + return result; } /*** backward compatibility hacks ***/ @@ -431,41 +445,44 @@ thread_abort_safely( kern_return_t thread_info( - thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, - mach_msg_type_number_t *thread_info_count) + thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count) { - kern_return_t result; + kern_return_t result; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); - if (thread->active || thread->inspection) + if (thread->active || thread->inspection) { result = thread_info_internal( - thread, flavor, thread_info_out, thread_info_count); - else + thread, flavor, thread_info_out, thread_info_count); + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } static inline kern_return_t thread_get_state_internal( - thread_t thread, - int flavor, - thread_state_t state, /* pointer to OUT array */ - mach_msg_type_number_t *state_count, /*IN/OUT*/ - boolean_t to_user) + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count, /*IN/OUT*/ + boolean_t to_user) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -478,63 +495,60 @@ thread_get_state_internal( if (thread_stop(thread, FALSE)) { thread_mtx_lock(thread); result = machine_thread_get_state( - thread, flavor, state, state_count); + thread, flavor, state, state_count); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_get_state( - thread, flavor, state, state_count); - } - else if (thread->inspection) - { + thread, flavor, state, state_count); + } + } else if (thread->inspection) { result = machine_thread_get_state( - thread, flavor, state, state_count); - } - else + thread, flavor, state, state_count); + } else { result = KERN_TERMINATED; + } if (to_user && result == KERN_SUCCESS) { result = machine_thread_state_convert_to_user(thread, flavor, state, - state_count); + state_count); } thread_mtx_unlock(thread); - return (result); + return result; } /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */ kern_return_t thread_get_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t *state_count); + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count); kern_return_t thread_get_state( - thread_t thread, - int flavor, - thread_state_t state, /* pointer to OUT array */ - mach_msg_type_number_t *state_count) /*IN/OUT*/ + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ { return thread_get_state_internal(thread, flavor, state, state_count, FALSE); } kern_return_t thread_get_state_to_user( - thread_t thread, - int flavor, - thread_state_t state, /* pointer to OUT array */ - mach_msg_type_number_t *state_count) /*IN/OUT*/ + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ { return thread_get_state_internal(thread, flavor, state, state_count, TRUE); } @@ -545,23 +559,24 @@ thread_get_state_to_user( */ static inline kern_return_t thread_set_state_internal( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count, - boolean_t from_user) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count, + boolean_t from_user) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { if (from_user) { result = machine_thread_state_convert_from_user(thread, flavor, - state, state_count); + state, state_count); if (result != KERN_SUCCESS) { goto out; } @@ -574,60 +589,60 @@ thread_set_state_internal( if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_set_state( - thread, flavor, state, state_count); + thread, flavor, state, state_count); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_set_state( - thread, flavor, state, state_count); - } - else + thread, flavor, state, state_count); + } + } else { result = KERN_TERMINATED; + } - if ((result == KERN_SUCCESS) && from_user) + if ((result == KERN_SUCCESS) && from_user) { extmod_statistics_incr_thread_set_state(thread); + } out: thread_mtx_unlock(thread); - return (result); + return result; } -/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */ +/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */ kern_return_t thread_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count); + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count); kern_return_t thread_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { return thread_set_state_internal(thread, flavor, state, state_count, FALSE); } - + kern_return_t thread_set_state_from_user( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { return thread_set_state_internal(thread, flavor, state, state_count, TRUE); } - + /* * Kernel-internal "thread" interfaces used outside this file: */ @@ -637,12 +652,13 @@ thread_set_state_from_user( */ kern_return_t thread_state_initialize( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -656,35 +672,35 @@ thread_state_initialize( thread_mtx_lock(thread); result = machine_thread_state_initialize( thread ); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_state_initialize( thread ); - } - else + } + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } kern_return_t thread_dup( - thread_t target) + thread_t target) { - thread_t self = current_thread(); - kern_return_t result = KERN_SUCCESS; + thread_t self = current_thread(); + kern_return_t result = KERN_SUCCESS; - if (target == THREAD_NULL || target == self) - return (KERN_INVALID_ARGUMENT); + if (target == THREAD_NULL || target == self) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(target); @@ -697,36 +713,37 @@ thread_dup( thread_mtx_lock(target); result = machine_thread_dup(self, target, FALSE); - if (self->affinity_set != AFFINITY_SET_NULL) + if (self->affinity_set != AFFINITY_SET_NULL) { thread_affinity_dup(self, target); + } thread_unstop(target); - } - else { + } else { thread_mtx_lock(target); result = KERN_ABORTED; } thread_release(target); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(target); - return (result); + return result; } kern_return_t thread_dup2( - thread_t source, - thread_t target) + thread_t source, + thread_t target) { - kern_return_t result = KERN_SUCCESS; - uint32_t active = 0; + kern_return_t result = KERN_SUCCESS; + uint32_t active = 0; - if (source == THREAD_NULL || target == THREAD_NULL || target == source) - return (KERN_INVALID_ARGUMENT); + if (source == THREAD_NULL || target == THREAD_NULL || target == source) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(source); active = source->active; @@ -746,23 +763,23 @@ thread_dup2( if (thread_stop(target, TRUE)) { thread_mtx_lock(target); result = machine_thread_dup(source, target, TRUE); - if (source->affinity_set != AFFINITY_SET_NULL) + if (source->affinity_set != AFFINITY_SET_NULL) { thread_affinity_dup(source, target); + } thread_unstop(target); - } - else { + } else { thread_mtx_lock(target); result = KERN_ABORTED; } thread_release(target); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(target); - return (result); + return result; } /* @@ -773,24 +790,22 @@ thread_dup2( */ kern_return_t thread_setstatus( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { - - return (thread_set_state(thread, flavor, tstate, count)); + return thread_set_state(thread, flavor, tstate, count); } kern_return_t thread_setstatus_from_user( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { - - return (thread_set_state_from_user(thread, flavor, tstate, count)); + return thread_set_state_from_user(thread, flavor, tstate, count); } /* @@ -800,22 +815,22 @@ thread_setstatus_from_user( */ kern_return_t thread_getstatus( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - return (thread_get_state(thread, flavor, tstate, count)); + return thread_get_state(thread, flavor, tstate, count); } kern_return_t thread_getstatus_to_user( - thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - return (thread_get_state_to_user(thread, flavor, tstate, count)); + return thread_get_state_to_user(thread, flavor, tstate, count); } /* @@ -824,13 +839,14 @@ thread_getstatus_to_user( */ kern_return_t thread_set_tsd_base( - thread_t thread, - mach_vm_offset_t tsd_base) + thread_t thread, + mach_vm_offset_t tsd_base) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -844,23 +860,22 @@ thread_set_tsd_base( thread_mtx_lock(thread); result = machine_thread_set_tsd_base(thread, tsd_base); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_set_tsd_base(thread, tsd_base); - } - else + } + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } /* @@ -928,13 +943,15 @@ thread_suspended(__unused void *parameter, wait_result_t result) thread_mtx_lock(thread); - if (result == THREAD_INTERRUPTED) + if (result == THREAD_INTERRUPTED) { thread->suspend_parked = FALSE; - else + } else { assert(thread->suspend_parked == FALSE); + } - if (thread->suspend_count > 0) + if (thread->suspend_count > 0) { thread_set_apc_ast(thread); + } thread_mtx_unlock(thread); @@ -975,7 +992,7 @@ thread_apc_ast(thread_t thread) if (thread->suspend_count > 0) { thread->suspend_parked = TRUE; assert_wait(&thread->suspend_count, - THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER); + THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER); thread_mtx_unlock(thread); thread_block(thread_suspended); @@ -988,77 +1005,79 @@ thread_apc_ast(thread_t thread) /* Prototype, see justification above */ kern_return_t act_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count); + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count); kern_return_t act_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_set_state(thread, flavor, state, count)); - + return thread_set_state(thread, flavor, state, count); } kern_return_t act_set_state_from_user( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_set_state_from_user(thread, flavor, state, count)); - + return thread_set_state_from_user(thread, flavor, state, count); } /* Prototype, see justification above */ kern_return_t act_get_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t *count); + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count); kern_return_t act_get_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_get_state(thread, flavor, state, count)); + return thread_get_state(thread, flavor, state, count); } kern_return_t act_get_state_to_user( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_get_state_to_user(thread, flavor, state, count)); + return thread_get_state_to_user(thread, flavor, state, count); } static void act_set_ast( - thread_t thread, - ast_t ast) + thread_t thread, + ast_t ast) { spl_t s = splsched(); @@ -1071,10 +1090,11 @@ act_set_ast( thread_lock(thread); thread_ast_set(thread, ast); processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) + if (processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { cause_ast_check(processor); + } thread_unlock(thread); } @@ -1091,7 +1111,7 @@ act_set_ast( */ static void act_set_ast_async(thread_t thread, - ast_t ast) + ast_t ast) { thread_ast_set(thread, ast); @@ -1104,7 +1124,7 @@ act_set_ast_async(thread_t thread, void act_set_astbsd( - thread_t thread) + thread_t thread) { act_set_ast( thread, AST_BSD ); } @@ -1120,12 +1140,14 @@ act_set_astkevent(thread_t thread, uint16_t bits) void act_set_kperf( - thread_t thread) + thread_t thread) { /* safety check */ - if (thread != current_thread()) - if( !ml_get_interrupts_enabled() ) + if (thread != current_thread()) { + if (!ml_get_interrupts_enabled()) { panic("unsafe act_set_kperf operation"); + } + } act_set_ast( thread, AST_KPERF ); } @@ -1133,7 +1155,7 @@ act_set_kperf( #if CONFIG_MACF void act_set_astmacf( - thread_t thread) + thread_t thread) { act_set_ast( thread, AST_MACF); } @@ -1164,4 +1186,3 @@ act_set_io_telemetry_ast(thread_t thread) { act_set_ast(thread, AST_TELEMETRY_IO); } - diff --git a/osfmk/kern/thread_call.c b/osfmk/kern/thread_call.c index ec92802f8..5f2676de7 100644 --- a/osfmk/kern/thread_call.c +++ b/osfmk/kern/thread_call.c @@ -2,7 +2,7 @@ * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - + #include #include @@ -54,8 +54,8 @@ #endif #include -static zone_t thread_call_zone; -static struct waitq daemon_waitq; +static zone_t thread_call_zone; +static struct waitq daemon_waitq; typedef enum { TCF_ABSOLUTE = 0, @@ -72,23 +72,22 @@ typedef enum { static struct thread_call_group { const char * tcg_name; - queue_head_t pending_queue; - uint32_t pending_count; + queue_head_t pending_queue; + uint32_t pending_count; queue_head_t delayed_queues[TCF_COUNT]; timer_call_data_t delayed_timers[TCF_COUNT]; - timer_call_data_t dealloc_timer; + timer_call_data_t dealloc_timer; - struct waitq idle_waitq; - uint32_t idle_count, active_count, blocked_count; + struct waitq idle_waitq; + uint32_t idle_count, active_count, blocked_count; uint32_t tcg_thread_pri; - uint32_t target_thread_count; - uint64_t idle_timestamp; + uint32_t target_thread_count; + uint64_t idle_timestamp; thread_call_group_flags_t flags; - } thread_call_groups[THREAD_CALL_INDEX_MAX] = { [THREAD_CALL_INDEX_HIGH] = { .tcg_name = "high", @@ -140,40 +139,40 @@ static struct thread_call_group { }, }; -typedef struct thread_call_group *thread_call_group_t; +typedef struct thread_call_group *thread_call_group_t; -#define INTERNAL_CALL_COUNT 768 +#define INTERNAL_CALL_COUNT 768 #define THREAD_CALL_DEALLOC_INTERVAL_NS (5 * NSEC_PER_MSEC) /* 5 ms */ -#define THREAD_CALL_ADD_RATIO 4 -#define THREAD_CALL_MACH_FACTOR_CAP 3 -#define THREAD_CALL_GROUP_MAX_THREADS 500 - -static boolean_t thread_call_daemon_awake; -static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; -static queue_head_t thread_call_internal_queue; -int thread_call_internal_queue_count = 0; -static uint64_t thread_call_dealloc_interval_abs; - -static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); -static __inline__ void _internal_call_release(thread_call_t call); -static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); +#define THREAD_CALL_ADD_RATIO 4 +#define THREAD_CALL_MACH_FACTOR_CAP 3 +#define THREAD_CALL_GROUP_MAX_THREADS 500 + +static boolean_t thread_call_daemon_awake; +static thread_call_data_t internal_call_storage[INTERNAL_CALL_COUNT]; +static queue_head_t thread_call_internal_queue; +int thread_call_internal_queue_count = 0; +static uint64_t thread_call_dealloc_interval_abs; + +static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0); +static __inline__ void _internal_call_release(thread_call_t call); +static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, thread_call_group_t group); static boolean_t _delayed_call_enqueue(thread_call_t call, thread_call_group_t group, - uint64_t deadline, thread_call_flavor_t flavor); -static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); -static __inline__ void thread_call_wake(thread_call_group_t group); -static void thread_call_daemon(void *arg); -static void thread_call_thread(thread_call_group_t group, wait_result_t wres); -static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); + uint64_t deadline, thread_call_flavor_t flavor); +static __inline__ boolean_t _call_dequeue(thread_call_t call, thread_call_group_t group); +static __inline__ void thread_call_wake(thread_call_group_t group); +static void thread_call_daemon(void *arg); +static void thread_call_thread(thread_call_group_t group, wait_result_t wres); +static void thread_call_dealloc_timer(timer_call_param_t p0, timer_call_param_t p1); static void thread_call_group_setup(thread_call_group_t group); -static void sched_call_thread(int type, thread_t thread); -static void thread_call_start_deallocate_timer(thread_call_group_t group); -static void thread_call_wait_locked(thread_call_t call, spl_t s); +static void sched_call_thread(int type, thread_t thread); +static void thread_call_start_deallocate_timer(thread_call_group_t group); +static void thread_call_wait_locked(thread_call_t call, spl_t s); static boolean_t thread_call_wait_once_locked(thread_call_t call, spl_t s); -static boolean_t thread_call_enter_delayed_internal(thread_call_t call, - thread_call_func_t alt_func, thread_call_param_t alt_param0, - thread_call_param_t param1, uint64_t deadline, - uint64_t leeway, unsigned int flags); +static boolean_t thread_call_enter_delayed_internal(thread_call_t call, + thread_call_func_t alt_func, thread_call_param_t alt_param0, + thread_call_param_t param1, uint64_t deadline, + uint64_t leeway, unsigned int flags); /* non-static so dtrace can find it rdar://problem/31156135&31379348 */ extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1); @@ -181,15 +180,15 @@ extern void thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t lck_grp_t thread_call_lck_grp; lck_mtx_t thread_call_lock_data; -#define thread_call_lock_spin() \ +#define thread_call_lock_spin() \ lck_mtx_lock_spin_always(&thread_call_lock_data) -#define thread_call_unlock() \ +#define thread_call_unlock() \ lck_mtx_unlock_always(&thread_call_lock_data) #define tc_deadline tc_call.deadline -extern boolean_t mach_timer_coalescing_enabled; +extern boolean_t mach_timer_coalescing_enabled; static inline spl_t disable_ints_and_lock(void) @@ -210,7 +209,7 @@ enable_ints_and_unlock(spl_t s) static inline boolean_t group_isparallel(thread_call_group_t group) { - return ((group->flags & TCG_PARALLEL) != 0); + return (group->flags & TCG_PARALLEL) != 0; } static boolean_t @@ -218,8 +217,8 @@ thread_call_group_should_add_thread(thread_call_group_t group) { if ((group->active_count + group->blocked_count + group->idle_count) >= THREAD_CALL_GROUP_MAX_THREADS) { panic("thread_call group '%s' reached max thread cap (%d): active: %d, blocked: %d, idle: %d", - group->tcg_name, THREAD_CALL_GROUP_MAX_THREADS, - group->active_count, group->blocked_count, group->idle_count); + group->tcg_name, THREAD_CALL_GROUP_MAX_THREADS, + group->active_count, group->blocked_count, group->idle_count); } if (group_isparallel(group) == FALSE) { @@ -242,13 +241,13 @@ thread_call_group_should_add_thread(thread_call_group_t group) * the group has fewer than its target number of * threads, or the amount of work is large relative * to the number of threads. In the last case, pay attention - * to the total load on the system, and back off if + * to the total load on the system, and back off if * it's high. */ if ((thread_count == 0) || - (thread_count < group->target_thread_count) || - ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && - (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { + (thread_count < group->target_thread_count) || + ((group->pending_count > THREAD_CALL_ADD_RATIO * thread_count) && + (sched_mach_factor < THREAD_CALL_MACH_FACTOR_CAP))) { return TRUE; } } @@ -282,21 +281,21 @@ thread_call_group_setup(thread_call_group_t group) queue_init(&group->delayed_queues[TCF_CONTINUOUS]); /* TODO: Consolidate to one hard timer for each group */ - timer_call_setup(&group->delayed_timers[TCF_ABSOLUTE], thread_call_delayed_timer, group); + timer_call_setup(&group->delayed_timers[TCF_ABSOLUTE], thread_call_delayed_timer, group); timer_call_setup(&group->delayed_timers[TCF_CONTINUOUS], thread_call_delayed_timer, group); timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group); /* Reverse the wait order so we re-use the most recently parked thread from the pool */ - waitq_init(&group->idle_waitq, SYNC_POLICY_REVERSED|SYNC_POLICY_DISABLE_IRQ); + waitq_init(&group->idle_waitq, SYNC_POLICY_REVERSED | SYNC_POLICY_DISABLE_IRQ); } /* - * Simple wrapper for creating threads bound to + * Simple wrapper for creating threads bound to * thread call groups. */ static kern_return_t thread_call_thread_create( - thread_call_group_t group) + thread_call_group_t group) { thread_t thread; kern_return_t result; @@ -304,7 +303,7 @@ thread_call_thread_create( int thread_pri = group->tcg_thread_pri; result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, - group, thread_pri, &thread); + group, thread_pri, &thread); if (result != KERN_SUCCESS) { return result; } @@ -337,7 +336,7 @@ thread_call_thread_create( void thread_call_initialize(void) { - int tc_size = sizeof (thread_call_data_t); + int tc_size = sizeof(thread_call_data_t); thread_call_zone = zinit(tc_size, 4096 * tc_size, 16 * tc_size, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); @@ -348,17 +347,17 @@ thread_call_initialize(void) nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); waitq_init(&daemon_waitq, SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_FIFO); - for (uint32_t i = 0; i < THREAD_CALL_INDEX_MAX; i++) + for (uint32_t i = 0; i < THREAD_CALL_INDEX_MAX; i++) { thread_call_group_setup(&thread_call_groups[i]); + } spl_t s = disable_ints_and_lock(); queue_init(&thread_call_internal_queue); for ( - thread_call_t call = internal_call_storage; - call < &internal_call_storage[INTERNAL_CALL_COUNT]; - call++) { - + thread_call_t call = internal_call_storage; + call < &internal_call_storage[INTERNAL_CALL_COUNT]; + call++) { enqueue_tail(&thread_call_internal_queue, &call->tc_call.q_link); thread_call_internal_queue_count++; } @@ -371,18 +370,19 @@ thread_call_initialize(void) kern_return_t result; result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, - NULL, BASEPRI_PREEMPT_HIGH + 1, &thread); - if (result != KERN_SUCCESS) + NULL, BASEPRI_PREEMPT_HIGH + 1, &thread); + if (result != KERN_SUCCESS) { panic("thread_call_initialize"); + } thread_deallocate(thread); } void thread_call_setup( - thread_call_t call, - thread_call_func_t func, - thread_call_param_t param0) + thread_call_t call, + thread_call_func_t func, + thread_call_param_t param0) { bzero(call, sizeof(*call)); call_entry_setup((call_entry_t)call, func, param0); @@ -403,20 +403,21 @@ thread_call_setup( static __inline__ thread_call_t _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0) { - thread_call_t call; - - if (queue_empty(&thread_call_internal_queue)) - panic("_internal_call_allocate"); + thread_call_t call; + + if (queue_empty(&thread_call_internal_queue)) { + panic("_internal_call_allocate"); + } call = qe_dequeue_head(&thread_call_internal_queue, struct thread_call, tc_call.q_link); - thread_call_internal_queue_count--; + thread_call_internal_queue_count--; - thread_call_setup(call, func, param0); - call->tc_refs = 0; - call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */ + thread_call_setup(call, func, param0); + call->tc_refs = 0; + call->tc_flags = 0; /* THREAD_CALL_ALLOC not set, do not free back to zone */ - return (call); + return call; } /* @@ -427,7 +428,7 @@ _internal_call_allocate(thread_call_func_t func, thread_call_param_t param0) * safe to call on a non-internal entry, in which * case nothing happens. * - * Called with thread_call_lock held. + * Called with thread_call_lock held. */ static __inline__ void _internal_call_release(thread_call_t call) @@ -453,19 +454,20 @@ _internal_call_release(thread_call_t call) */ static __inline__ boolean_t _pending_call_enqueue(thread_call_t call, - thread_call_group_t group) + thread_call_group_t group) { if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) - == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { + == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { call->tc_deadline = 0; uint32_t flags = call->tc_flags; call->tc_flags |= THREAD_CALL_RESCHEDULE; - if ((flags & THREAD_CALL_RESCHEDULE) != 0) - return (TRUE); - else - return (FALSE); + if ((flags & THREAD_CALL_RESCHEDULE) != 0) { + return TRUE; + } else { + return FALSE; + } } queue_head_t *old_queue = call_entry_enqueue_tail(CE(call), &group->pending_queue); @@ -473,8 +475,8 @@ _pending_call_enqueue(thread_call_t call, if (old_queue == NULL) { call->tc_submit_count++; } else if (old_queue != &group->pending_queue && - old_queue != &group->delayed_queues[TCF_ABSOLUTE] && - old_queue != &group->delayed_queues[TCF_CONTINUOUS]) { + old_queue != &group->delayed_queues[TCF_ABSOLUTE] && + old_queue != &group->delayed_queues[TCF_CONTINUOUS]) { panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); } @@ -482,7 +484,7 @@ _pending_call_enqueue(thread_call_t call, thread_call_wake(group); - return (old_queue != NULL); + return old_queue != NULL; } /* @@ -490,7 +492,7 @@ _pending_call_enqueue(thread_call_t call, * * Place an entry on the delayed queue, * after existing entries with an earlier - * (or identical) deadline. + * (or identical) deadline. * * Returns TRUE if the entry was already * on a queue. @@ -505,35 +507,36 @@ _delayed_call_enqueue( thread_call_flavor_t flavor) { if ((THREAD_CALL_ONCE | THREAD_CALL_RUNNING) - == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { + == (call->tc_flags & (THREAD_CALL_ONCE | THREAD_CALL_RUNNING))) { call->tc_deadline = deadline; uint32_t flags = call->tc_flags; call->tc_flags |= THREAD_CALL_RESCHEDULE; - if ((flags & THREAD_CALL_RESCHEDULE) != 0) - return (TRUE); - else - return (FALSE); + if ((flags & THREAD_CALL_RESCHEDULE) != 0) { + return TRUE; + } else { + return FALSE; + } } queue_head_t *old_queue = call_entry_enqueue_deadline(CE(call), - &group->delayed_queues[flavor], - deadline); + &group->delayed_queues[flavor], + deadline); if (old_queue == &group->pending_queue) { group->pending_count--; } else if (old_queue == NULL) { call->tc_submit_count++; } else if (old_queue == &group->delayed_queues[TCF_ABSOLUTE] || - old_queue == &group->delayed_queues[TCF_CONTINUOUS]) { + old_queue == &group->delayed_queues[TCF_CONTINUOUS]) { /* TODO: if it's in the other delayed queue, that might not be OK */ // we did nothing, and that's fine } else { panic("tried to move a thread call (%p) between groups (old_queue: %p)", call, old_queue); } - return (old_queue != NULL); + return old_queue != NULL; } /* @@ -547,24 +550,25 @@ _delayed_call_enqueue( */ static __inline__ boolean_t _call_dequeue( - thread_call_t call, - thread_call_group_t group) + thread_call_t call, + thread_call_group_t group) { - queue_head_t *old_queue; + queue_head_t *old_queue; old_queue = call_entry_dequeue(CE(call)); if (old_queue != NULL) { assert(old_queue == &group->pending_queue || - old_queue == &group->delayed_queues[TCF_ABSOLUTE] || - old_queue == &group->delayed_queues[TCF_CONTINUOUS]); + old_queue == &group->delayed_queues[TCF_ABSOLUTE] || + old_queue == &group->delayed_queues[TCF_CONTINUOUS]); call->tc_finish_count++; - if (old_queue == &group->pending_queue) + if (old_queue == &group->pending_queue) { group->pending_count--; + } } - return (old_queue != NULL); + return old_queue != NULL; } /* @@ -583,18 +587,20 @@ _call_dequeue( */ static bool _arm_delayed_call_timer(thread_call_t new_call, - thread_call_group_t group, - thread_call_flavor_t flavor) + thread_call_group_t group, + thread_call_flavor_t flavor) { /* No calls implies no timer needed */ - if (queue_empty(&group->delayed_queues[flavor])) + if (queue_empty(&group->delayed_queues[flavor])) { return false; + } thread_call_t call = qe_queue_first(&group->delayed_queues[flavor], struct thread_call, tc_call.q_link); /* We only need to change the hard timer if this new call is the first in the list */ - if (new_call != NULL && new_call != call) + if (new_call != NULL && new_call != call) { return false; + } assert((call->tc_soft_deadline != 0) && ((call->tc_soft_deadline <= call->tc_call.deadline))); @@ -617,7 +623,7 @@ _arm_delayed_call_timer(thread_call_t new_call, timer_call_enter_with_leeway(&group->delayed_timers[flavor], (timer_call_param_t)flavor, fire_at, leeway, - TIMER_CALL_SYS_CRITICAL|TIMER_CALL_LEEWAY, + TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LEEWAY, ((call->tc_flags & THREAD_CALL_RATELIMITED) == THREAD_CALL_RATELIMITED)); return true; @@ -636,16 +642,16 @@ _arm_delayed_call_timer(thread_call_t new_call, */ static boolean_t _cancel_func_from_queue(thread_call_func_t func, - thread_call_param_t param0, - thread_call_group_t group, - boolean_t remove_all, - queue_head_t *queue) + thread_call_param_t param0, + thread_call_group_t group, + boolean_t remove_all, + queue_head_t *queue) { boolean_t call_removed = FALSE; thread_call_t call; qe_foreach_element_safe(call, queue, tc_call.q_link) { - if (call->tc_call.func != func || + if (call->tc_call.func != func || call->tc_call.param0 != param0) { continue; } @@ -655,11 +661,12 @@ _cancel_func_from_queue(thread_call_func_t func, _internal_call_release(call); call_removed = TRUE; - if (!remove_all) + if (!remove_all) { break; + } } - return (call_removed); + return call_removed; } /* @@ -670,9 +677,9 @@ _cancel_func_from_queue(thread_call_func_t func, */ void thread_call_func_delayed( - thread_call_func_t func, - thread_call_param_t param, - uint64_t deadline) + thread_call_func_t func, + thread_call_param_t param, + uint64_t deadline) { (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, 0, 0); } @@ -686,11 +693,11 @@ thread_call_func_delayed( void thread_call_func_delayed_with_leeway( - thread_call_func_t func, - thread_call_param_t param, - uint64_t deadline, - uint64_t leeway, - uint32_t flags) + thread_call_func_t func, + thread_call_param_t param, + uint64_t deadline, + uint64_t leeway, + uint32_t flags) { (void)thread_call_enter_delayed_internal(NULL, func, param, 0, deadline, leeway, flags); } @@ -712,11 +719,11 @@ thread_call_func_delayed_with_leeway( */ boolean_t thread_call_func_cancel( - thread_call_func_t func, - thread_call_param_t param, - boolean_t cancel_all) + thread_call_func_t func, + thread_call_param_t param, + boolean_t cancel_all) { - boolean_t result; + boolean_t result; assert(func != NULL); @@ -728,18 +735,18 @@ thread_call_func_cancel( if (cancel_all) { /* exhaustively search every queue, and return true if any search found something */ result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) | - _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) | - _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); + _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) | + _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); } else { /* early-exit as soon as we find something, don't search other queues */ result = _cancel_func_from_queue(func, param, group, cancel_all, &group->pending_queue) || - _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) || - _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); + _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_ABSOLUTE]) || + _cancel_func_from_queue(func, param, group, cancel_all, &group->delayed_queues[TCF_CONTINUOUS]); } enable_ints_and_unlock(s); - return (result); + return result; } /* @@ -750,48 +757,48 @@ thread_call_func_cancel( */ thread_call_t thread_call_allocate_with_priority( - thread_call_func_t func, - thread_call_param_t param0, - thread_call_priority_t pri) + thread_call_func_t func, + thread_call_param_t param0, + thread_call_priority_t pri) { return thread_call_allocate_with_options(func, param0, pri, 0); } thread_call_t thread_call_allocate_with_options( - thread_call_func_t func, - thread_call_param_t param0, - thread_call_priority_t pri, - thread_call_options_t options) + thread_call_func_t func, + thread_call_param_t param0, + thread_call_priority_t pri, + thread_call_options_t options) { thread_call_t call = thread_call_allocate(func, param0); switch (pri) { - case THREAD_CALL_PRIORITY_HIGH: - call->tc_index = THREAD_CALL_INDEX_HIGH; - break; - case THREAD_CALL_PRIORITY_KERNEL: - call->tc_index = THREAD_CALL_INDEX_KERNEL; - break; - case THREAD_CALL_PRIORITY_USER: - call->tc_index = THREAD_CALL_INDEX_USER; - break; - case THREAD_CALL_PRIORITY_LOW: - call->tc_index = THREAD_CALL_INDEX_LOW; - break; - case THREAD_CALL_PRIORITY_KERNEL_HIGH: - call->tc_index = THREAD_CALL_INDEX_KERNEL_HIGH; - break; - default: - panic("Invalid thread call pri value: %d", pri); - break; + case THREAD_CALL_PRIORITY_HIGH: + call->tc_index = THREAD_CALL_INDEX_HIGH; + break; + case THREAD_CALL_PRIORITY_KERNEL: + call->tc_index = THREAD_CALL_INDEX_KERNEL; + break; + case THREAD_CALL_PRIORITY_USER: + call->tc_index = THREAD_CALL_INDEX_USER; + break; + case THREAD_CALL_PRIORITY_LOW: + call->tc_index = THREAD_CALL_INDEX_LOW; + break; + case THREAD_CALL_PRIORITY_KERNEL_HIGH: + call->tc_index = THREAD_CALL_INDEX_KERNEL_HIGH; + break; + default: + panic("Invalid thread call pri value: %d", pri); + break; } if (options & THREAD_CALL_OPTIONS_ONCE) { - call->tc_flags |= THREAD_CALL_ONCE; + call->tc_flags |= THREAD_CALL_ONCE; } if (options & THREAD_CALL_OPTIONS_SIGNAL) { - call->tc_flags |= THREAD_CALL_SIGNAL | THREAD_CALL_ONCE; + call->tc_flags |= THREAD_CALL_SIGNAL | THREAD_CALL_ONCE; } return call; @@ -799,39 +806,40 @@ thread_call_allocate_with_options( thread_call_t thread_call_allocate_with_qos(thread_call_func_t func, - thread_call_param_t param0, - int qos_tier, - thread_call_options_t options) + thread_call_param_t param0, + int qos_tier, + thread_call_options_t options) { thread_call_t call = thread_call_allocate(func, param0); switch (qos_tier) { - case THREAD_QOS_UNSPECIFIED: - call->tc_index = THREAD_CALL_INDEX_HIGH; - break; - case THREAD_QOS_LEGACY: - call->tc_index = THREAD_CALL_INDEX_USER; - break; - case THREAD_QOS_MAINTENANCE: - case THREAD_QOS_BACKGROUND: - call->tc_index = THREAD_CALL_INDEX_LOW; - break; - case THREAD_QOS_UTILITY: - call->tc_index = THREAD_CALL_INDEX_QOS_UT; - break; - case THREAD_QOS_USER_INITIATED: - call->tc_index = THREAD_CALL_INDEX_QOS_IN; - break; - case THREAD_QOS_USER_INTERACTIVE: - call->tc_index = THREAD_CALL_INDEX_QOS_UI; - break; - default: - panic("Invalid thread call qos value: %d", qos_tier); - break; + case THREAD_QOS_UNSPECIFIED: + call->tc_index = THREAD_CALL_INDEX_HIGH; + break; + case THREAD_QOS_LEGACY: + call->tc_index = THREAD_CALL_INDEX_USER; + break; + case THREAD_QOS_MAINTENANCE: + case THREAD_QOS_BACKGROUND: + call->tc_index = THREAD_CALL_INDEX_LOW; + break; + case THREAD_QOS_UTILITY: + call->tc_index = THREAD_CALL_INDEX_QOS_UT; + break; + case THREAD_QOS_USER_INITIATED: + call->tc_index = THREAD_CALL_INDEX_QOS_IN; + break; + case THREAD_QOS_USER_INTERACTIVE: + call->tc_index = THREAD_CALL_INDEX_QOS_UI; + break; + default: + panic("Invalid thread call qos value: %d", qos_tier); + break; } - if (options & THREAD_CALL_OPTIONS_ONCE) + if (options & THREAD_CALL_OPTIONS_ONCE) { call->tc_flags |= THREAD_CALL_ONCE; + } /* does not support THREAD_CALL_OPTIONS_SIGNAL */ @@ -846,16 +854,16 @@ thread_call_allocate_with_qos(thread_call_func_t func, */ thread_call_t thread_call_allocate( - thread_call_func_t func, - thread_call_param_t param0) + thread_call_func_t func, + thread_call_param_t param0) { - thread_call_t call = zalloc(thread_call_zone); + thread_call_t call = zalloc(thread_call_zone); thread_call_setup(call, func, param0); call->tc_refs = 1; call->tc_flags = THREAD_CALL_ALLOC; - return (call); + return call; } /* @@ -871,16 +879,16 @@ thread_call_allocate( */ boolean_t thread_call_free( - thread_call_t call) + thread_call_t call) { spl_t s = disable_ints_and_lock(); if (call->tc_call.queue != NULL || - ((call->tc_flags & THREAD_CALL_RESCHEDULE) != 0)) { + ((call->tc_flags & THREAD_CALL_RESCHEDULE) != 0)) { thread_call_unlock(); splx(s); - return (FALSE); + return FALSE; } int32_t refs = --call->tc_refs; @@ -889,7 +897,7 @@ thread_call_free( } if ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) - == ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) & call->tc_flags)) { + == ((THREAD_CALL_SIGNAL | THREAD_CALL_RUNNING) & call->tc_flags)) { thread_call_wait_once_locked(call, s); /* thread call lock has been unlocked */ } else { @@ -901,7 +909,7 @@ thread_call_free( zfree(thread_call_zone, call); } - return (TRUE); + return TRUE; } /* @@ -914,18 +922,18 @@ thread_call_free( */ boolean_t thread_call_enter( - thread_call_t call) + thread_call_t call) { return thread_call_enter1(call, 0); } boolean_t thread_call_enter1( - thread_call_t call, - thread_call_param_t param1) + thread_call_t call, + thread_call_param_t param1) { - boolean_t result = TRUE; - thread_call_group_t group; + boolean_t result = TRUE; + thread_call_group_t group; assert(call->tc_call.func != NULL); @@ -943,7 +951,7 @@ thread_call_enter1( enable_ints_and_unlock(s); - return (result); + return result; } /* @@ -957,8 +965,8 @@ thread_call_enter1( */ boolean_t thread_call_enter_delayed( - thread_call_t call, - uint64_t deadline) + thread_call_t call, + uint64_t deadline) { assert(call != NULL); return thread_call_enter_delayed_internal(call, NULL, 0, 0, deadline, 0, 0); @@ -966,9 +974,9 @@ thread_call_enter_delayed( boolean_t thread_call_enter1_delayed( - thread_call_t call, - thread_call_param_t param1, - uint64_t deadline) + thread_call_t call, + thread_call_param_t param1, + uint64_t deadline) { assert(call != NULL); return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, 0, 0); @@ -976,11 +984,11 @@ thread_call_enter1_delayed( boolean_t thread_call_enter_delayed_with_leeway( - thread_call_t call, - thread_call_param_t param1, - uint64_t deadline, - uint64_t leeway, - unsigned int flags) + thread_call_t call, + thread_call_param_t param1, + uint64_t deadline, + uint64_t leeway, + unsigned int flags) { assert(call != NULL); return thread_call_enter_delayed_internal(call, NULL, 0, param1, deadline, leeway, flags); @@ -1000,22 +1008,22 @@ thread_call_enter_delayed_with_leeway( * flags - THREAD_CALL_DELAY_XXX : classification of caller's desires wrt timer coalescing. * THREAD_CALL_DELAY_LEEWAY : value in leeway is used for timer coalescing. * THREAD_CALL_CONTINUOUS: thread call will be called according to mach_continuous_time rather - * than mach_absolute_time + * than mach_absolute_time */ boolean_t thread_call_enter_delayed_internal( - thread_call_t call, - thread_call_func_t alt_func, - thread_call_param_t alt_param0, - thread_call_param_t param1, - uint64_t deadline, - uint64_t leeway, - unsigned int flags) + thread_call_t call, + thread_call_func_t alt_func, + thread_call_param_t alt_param0, + thread_call_param_t param1, + uint64_t deadline, + uint64_t leeway, + unsigned int flags) { - boolean_t result = TRUE; - thread_call_group_t group; - uint64_t now, sdeadline, slop; - uint32_t urgency; + boolean_t result = TRUE; + thread_call_group_t group; + uint64_t now, sdeadline, slop; + uint32_t urgency; thread_call_flavor_t flavor = (flags & THREAD_CALL_CONTINUOUS) ? TCF_CONTINUOUS : TCF_ABSOLUTE; @@ -1048,13 +1056,15 @@ thread_call_enter_delayed_internal( boolean_t ratelimited = FALSE; slop = timer_call_slop(deadline, now, urgency, current_thread(), &ratelimited); - if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop) + if ((flags & THREAD_CALL_DELAY_LEEWAY) != 0 && leeway > slop) { slop = leeway; + } - if (UINT64_MAX - deadline <= slop) + if (UINT64_MAX - deadline <= slop) { deadline = UINT64_MAX; - else + } else { deadline += slop; + } if (ratelimited) { call->tc_flags |= TIMER_CALL_RATELIMITED; @@ -1072,13 +1082,13 @@ thread_call_enter_delayed_internal( #if CONFIG_DTRACE DTRACE_TMR5(thread_callout__create, thread_call_func_t, call->tc_call.func, - uint64_t, (deadline - sdeadline), uint64_t, (call->tc_ttd >> 32), - (unsigned) (call->tc_ttd & 0xFFFFFFFF), call); + uint64_t, (deadline - sdeadline), uint64_t, (call->tc_ttd >> 32), + (unsigned) (call->tc_ttd & 0xFFFFFFFF), call); #endif enable_ints_and_unlock(s); - return (result); + return result; } /* @@ -1109,14 +1119,15 @@ thread_call_cancel_locked(thread_call_t call) canceled = _call_dequeue(call, group); if (do_cancel_callout) { - if (_arm_delayed_call_timer(NULL, group, flavor) == false) + if (_arm_delayed_call_timer(NULL, group, flavor) == false) { timer_call_cancel(&group->delayed_timers[flavor]); + } } } #if CONFIG_DTRACE DTRACE_TMR4(thread_callout__cancel, thread_call_func_t, call->tc_call.func, - 0, (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF)); + 0, (call->tc_ttd >> 32), (unsigned) (call->tc_ttd & 0xFFFFFFFF)); #endif return canceled; @@ -1153,15 +1164,18 @@ thread_call_cancel(thread_call_t call) boolean_t thread_call_cancel_wait(thread_call_t call) { - if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) + if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { panic("thread_call_cancel_wait: can't wait on thread call whose storage I don't own"); + } - if (!ml_get_interrupts_enabled()) + if (!ml_get_interrupts_enabled()) { panic("unsafe thread_call_cancel_wait"); + } - if (current_thread()->thc_state.thc_call == call) + if (current_thread()->thc_state.thc_call == call) { panic("thread_call_cancel_wait: deadlock waiting on self from inside call: %p to function %p", - call, call->tc_call.func); + call, call->tc_call.func); + } spl_t s = disable_ints_and_lock(); @@ -1210,15 +1224,15 @@ thread_call_cancel_wait(thread_call_t call) */ static __inline__ void thread_call_wake( - thread_call_group_t group) + thread_call_group_t group) { - /* + /* * New behavior: use threads if you've got 'em. * Traditional behavior: wake only if no threads running. */ if (group_isparallel(group) || group->active_count == 0) { if (waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) { + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES) == KERN_SUCCESS) { group->idle_count--; group->active_count++; if (group->idle_count == 0 && (group->flags & TCG_DEALLOC_ACTIVE) == TCG_DEALLOC_ACTIVE) { @@ -1230,7 +1244,7 @@ thread_call_wake( if (!thread_call_daemon_awake && thread_call_group_should_add_thread(group)) { thread_call_daemon_awake = TRUE; waitq_wakeup64_one(&daemon_waitq, NO_EVENT64, - THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); + THREAD_AWAKENED, WAITQ_ALL_PRIORITIES); } } } @@ -1243,10 +1257,10 @@ thread_call_wake( */ static void sched_call_thread( - int type, - thread_t thread) + int type, + thread_t thread) { - thread_call_group_t group; + thread_call_group_t group; group = thread->thc_state.thc_group; assert((group - &thread_call_groups[0]) < THREAD_CALL_INDEX_MAX); @@ -1254,27 +1268,27 @@ sched_call_thread( thread_call_lock_spin(); switch (type) { + case SCHED_CALL_BLOCK: + assert(group->active_count); + --group->active_count; + group->blocked_count++; + if (group->pending_count > 0) { + thread_call_wake(group); + } + break; - case SCHED_CALL_BLOCK: - assert(group->active_count); - --group->active_count; - group->blocked_count++; - if (group->pending_count > 0) - thread_call_wake(group); - break; - - case SCHED_CALL_UNBLOCK: - assert(group->blocked_count); - --group->blocked_count; - group->active_count++; - break; + case SCHED_CALL_UNBLOCK: + assert(group->blocked_count); + --group->blocked_count; + group->active_count++; + break; } thread_call_unlock(); } -/* - * Interrupts disabled, lock held; returns the same way. +/* + * Interrupts disabled, lock held; returns the same way. * Only called on thread calls whose storage we own. Wakes up * anyone who might be waiting on this work item and frees it * if the client has so requested. @@ -1291,12 +1305,13 @@ thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) flags = call->tc_flags; signal = ((THREAD_CALL_SIGNAL & flags) != 0); - if (!signal) { + if (!signal) { /* The thread call thread owns a ref until the call is finished */ - if (call->tc_refs <= 0) + if (call->tc_refs <= 0) { panic("thread_call_finish: detected over-released thread call: %p", call); + } call->tc_refs--; - } + } call->tc_flags &= ~(THREAD_CALL_RESCHEDULE | THREAD_CALL_RUNNING | THREAD_CALL_WAIT); @@ -1353,7 +1368,7 @@ thread_call_finish(thread_call_t call, thread_call_group_t group, spl_t *s) /* THREAD_CALL_SIGNAL call may have been freed */ } - return (repend); + return repend; } /* @@ -1370,28 +1385,28 @@ thread_call_invoke(thread_call_func_t func, thread_call_param_t param0, thread_c #if DEVELOPMENT || DEBUG KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_START, - VM_KERNEL_UNSLIDE(func), VM_KERNEL_ADDRHIDE(param0), VM_KERNEL_ADDRHIDE(param1), 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_START, + VM_KERNEL_UNSLIDE(func), VM_KERNEL_ADDRHIDE(param0), VM_KERNEL_ADDRHIDE(param1), 0, 0); #endif /* DEVELOPMENT || DEBUG */ #if CONFIG_DTRACE uint64_t tc_ttd = call->tc_ttd; boolean_t is_delayed = call->tc_flags & THREAD_CALL_DELAYED; DTRACE_TMR6(thread_callout__start, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32), - (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); + (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); #endif (*func)(param0, param1); #if CONFIG_DTRACE DTRACE_TMR6(thread_callout__end, thread_call_func_t, func, int, 0, int, (tc_ttd >> 32), - (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); + (unsigned) (tc_ttd & 0xFFFFFFFF), is_delayed, call); #endif #if DEVELOPMENT || DEBUG KERNEL_DEBUG_CONSTANT( - MACHDBG_CODE(DBG_MACH_SCHED,MACH_CALLOUT) | DBG_FUNC_END, - VM_KERNEL_UNSLIDE(func), 0, 0, 0, 0); + MACHDBG_CODE(DBG_MACH_SCHED, MACH_CALLOUT) | DBG_FUNC_END, + VM_KERNEL_UNSLIDE(func), 0, 0, 0, 0); #endif /* DEVELOPMENT || DEBUG */ current_thread()->thc_state.thc_call = NULL; @@ -1402,17 +1417,18 @@ thread_call_invoke(thread_call_func_t func, thread_call_param_t param0, thread_c */ static void thread_call_thread( - thread_call_group_t group, - wait_result_t wres) + thread_call_group_t group, + wait_result_t wres) { - thread_t self = current_thread(); - boolean_t canwait; + thread_t self = current_thread(); + boolean_t canwait; - if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) + if ((thread_get_tag_internal(self) & THREAD_TAG_CALLOUT) == 0) { (void)thread_set_tag_internal(self, THREAD_TAG_CALLOUT); + } /* - * A wakeup with THREAD_INTERRUPTED indicates that + * A wakeup with THREAD_INTERRUPTED indicates that * we should terminate. */ if (wres == THREAD_INTERRUPTED) { @@ -1428,9 +1444,9 @@ thread_call_thread( thread_sched_call(self, sched_call_thread); while (group->pending_count > 0) { - thread_call_t call; - thread_call_func_t func; - thread_call_param_t param0, param1; + thread_call_t call; + thread_call_func_t func; + thread_call_param_t param0, param1; call = qe_dequeue_head(&group->pending_queue, struct thread_call, tc_call.q_link); assert(call != NULL); @@ -1451,9 +1467,10 @@ thread_call_thread( if ((call->tc_flags & THREAD_CALL_ALLOC) != 0) { canwait = TRUE; call->tc_flags |= THREAD_CALL_RUNNING; - call->tc_refs++; /* Delay free until we're done */ - } else + call->tc_refs++; /* Delay free until we're done */ + } else { canwait = FALSE; + } enable_ints_and_unlock(s); @@ -1462,7 +1479,7 @@ thread_call_thread( if (get_preemption_level() != 0) { int pl = get_preemption_level(); panic("thread_call_thread: preemption_level %d, last callout %p(%p, %p)", - pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); + pl, (void *)VM_KERNEL_UNSLIDE(func), param0, param1); } s = disable_ints_and_lock(); @@ -1475,23 +1492,24 @@ thread_call_thread( thread_sched_call(self, NULL); group->active_count--; - + if (self->callout_woken_from_icontext && !self->callout_woke_thread) { ledger_credit(self->t_ledger, task_ledgers.interrupt_wakeups, 1); - if (self->callout_woken_from_platform_idle) - ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1); + if (self->callout_woken_from_platform_idle) { + ledger_credit(self->t_ledger, task_ledgers.platform_idle_wakeups, 1); + } } - + self->callout_woken_from_icontext = FALSE; self->callout_woken_from_platform_idle = FALSE; self->callout_woke_thread = FALSE; if (group_isparallel(group)) { /* - * For new style of thread group, thread always blocks. + * For new style of thread group, thread always blocks. * If we have more than the target number of threads, - * and this is the first to block, and it isn't active - * already, set a timer for deallocating a thread if we + * and this is the first to block, and it isn't active + * already, set a timer for deallocating a thread if we * continue to have a surplus. */ group->idle_count++; @@ -1535,8 +1553,8 @@ thread_call_thread( /* * thread_call_daemon: walk list of groups, allocating - * threads if appropriate (as determined by - * thread_call_group_should_add_thread()). + * threads if appropriate (as determined by + * thread_call_group_should_add_thread()). */ static void thread_call_daemon_continue(__unused void *arg) @@ -1554,7 +1572,7 @@ thread_call_daemon_continue(__unused void *arg) kern_return_t kr = thread_call_thread_create(group); if (kr != KERN_SUCCESS) { /* - * On failure, just pause for a moment and give up. + * On failure, just pause for a moment and give up. * We can try again later. */ delay(10000); /* 10 ms */ @@ -1578,12 +1596,12 @@ out: static void thread_call_daemon( - __unused void *arg) + __unused void *arg) { - thread_t self = current_thread(); + thread_t self = current_thread(); self->options |= TH_OPT_VMPRIV; - vm_page_free_reserve(2); /* XXX */ + vm_page_free_reserve(2); /* XXX */ thread_set_thread_name(self, "thread_call_daemon"); @@ -1592,7 +1610,7 @@ thread_call_daemon( } /* - * Schedule timer to deallocate a worker thread if we have a surplus + * Schedule timer to deallocate a worker thread if we have a surplus * of threads (in excess of the group's target) and at least one thread * is idle the whole time. */ @@ -1627,20 +1645,22 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) thread_call_lock_spin(); - if (flavor == TCF_CONTINUOUS) + if (flavor == TCF_CONTINUOUS) { now = mach_continuous_time(); - else if (flavor == TCF_ABSOLUTE) + } else if (flavor == TCF_ABSOLUTE) { now = mach_absolute_time(); - else + } else { panic("invalid timer flavor: %d", flavor); + } - do { + do { restart = FALSE; qe_foreach_element_safe(call, &group->delayed_queues[flavor], tc_call.q_link) { - if (flavor == TCF_CONTINUOUS) + if (flavor == TCF_CONTINUOUS) { assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == THREAD_CALL_CONTINUOUS); - else + } else { assert((call->tc_flags & THREAD_CALL_CONTINUOUS) == 0); + } /* * if we hit a call that isn't yet ready to expire, @@ -1649,8 +1669,9 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) * and therefore be ready to expire. * Sort by deadline then by soft deadline to avoid this */ - if (call->tc_soft_deadline > now) + if (call->tc_soft_deadline > now) { break; + } /* * If we hit a rate-limited timer, don't eagerly wake it up. @@ -1670,7 +1691,7 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) old_queue = call_entry_dequeue(&call->tc_call); assert(old_queue == &group->delayed_queues[flavor]); - do { + do { thread_call_func_t func = call->tc_call.func; thread_call_param_t param0 = call->tc_call.param0; thread_call_param_t param1 = call->tc_call.param1; @@ -1681,7 +1702,7 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) thread_call_lock_spin(); repend = thread_call_finish(call, group, NULL); - } while (repend); + } while (repend); /* call may have been freed */ restart = TRUE; @@ -1699,7 +1720,7 @@ thread_call_delayed_timer(timer_call_param_t p0, timer_call_param_t p1) static void thread_call_delayed_timer_rescan(thread_call_group_t group, - thread_call_flavor_t flavor) + thread_call_flavor_t flavor) { thread_call_t call; uint64_t now; @@ -1719,7 +1740,7 @@ thread_call_delayed_timer_rescan(thread_call_group_t group, _pending_call_enqueue(call, group); } else { uint64_t skew = call->tc_call.deadline - call->tc_soft_deadline; - assert (call->tc_call.deadline >= call->tc_soft_deadline); + assert(call->tc_call.deadline >= call->tc_soft_deadline); /* * On a latency quality-of-service level change, * re-sort potentially rate-limited callout. The platform @@ -1738,7 +1759,8 @@ thread_call_delayed_timer_rescan(thread_call_group_t group, } void -thread_call_delayed_timer_rescan_all(void) { +thread_call_delayed_timer_rescan_all(void) +{ for (int i = 0; i < THREAD_CALL_INDEX_MAX; i++) { thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_ABSOLUTE); thread_call_delayed_timer_rescan(&thread_call_groups[i], TCF_CONTINUOUS); @@ -1752,8 +1774,8 @@ thread_call_delayed_timer_rescan_all(void) { */ static void thread_call_dealloc_timer( - timer_call_param_t p0, - __unused timer_call_param_t p1) + timer_call_param_t p0, + __unused timer_call_param_t p1) { thread_call_group_t group = (thread_call_group_t)p0; uint64_t now; @@ -1771,7 +1793,7 @@ thread_call_dealloc_timer( terminated = TRUE; group->idle_count--; res = waitq_wakeup64_one(&group->idle_waitq, NO_EVENT64, - THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES); + THREAD_INTERRUPTED, WAITQ_ALL_PRIORITIES); if (res != KERN_SUCCESS) { panic("Unable to wake up idle thread for termination?"); } @@ -1827,14 +1849,16 @@ thread_call_wait_once_locked(thread_call_t call, spl_t s) call->tc_flags |= THREAD_CALL_WAIT; wait_result_t res = assert_wait(call, THREAD_UNINT); - if (res != THREAD_WAITING) + if (res != THREAD_WAITING) { panic("Unable to assert wait: %d", res); + } enable_ints_and_unlock(s); res = thread_block(THREAD_CONTINUE_NULL); - if (res != THREAD_AWAKENED) + if (res != THREAD_AWAKENED) { panic("Awoken with %d?", res); + } /* returns unlocked */ return TRUE; @@ -1852,18 +1876,22 @@ thread_call_wait_once_locked(thread_call_t call, spl_t s) boolean_t thread_call_wait_once(thread_call_t call) { - if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) + if ((call->tc_flags & THREAD_CALL_ALLOC) == 0) { panic("thread_call_wait_once: can't wait on thread call whose storage I don't own"); + } - if ((call->tc_flags & THREAD_CALL_ONCE) == 0) + if ((call->tc_flags & THREAD_CALL_ONCE) == 0) { panic("thread_call_wait_once: can't wait_once on a non-once call"); + } - if (!ml_get_interrupts_enabled()) + if (!ml_get_interrupts_enabled()) { panic("unsafe thread_call_wait_once"); + } - if (current_thread()->thc_state.thc_call == call) + if (current_thread()->thc_state.thc_call == call) { panic("thread_call_wait_once: deadlock waiting on self from inside call: %p to function %p", - call, call->tc_call.func); + call, call->tc_call.func); + } spl_t s = disable_ints_and_lock(); @@ -1896,14 +1924,16 @@ thread_call_wait_locked(thread_call_t call, spl_t s) call->tc_flags |= THREAD_CALL_WAIT; res = assert_wait(call, THREAD_UNINT); - if (res != THREAD_WAITING) + if (res != THREAD_WAITING) { panic("Unable to assert wait: %d", res); + } enable_ints_and_unlock(s); res = thread_block(THREAD_CONTINUE_NULL); - if (res != THREAD_AWAKENED) + if (res != THREAD_AWAKENED) { panic("Awoken with %d?", res); + } s = disable_ints_and_lock(); } @@ -1916,7 +1946,7 @@ thread_call_wait_locked(thread_call_t call, spl_t s) * currently being executed. */ boolean_t -thread_call_isactive(thread_call_t call) +thread_call_isactive(thread_call_t call) { boolean_t active; @@ -1946,4 +1976,3 @@ adjust_cont_time_thread_calls(void) enable_ints_and_unlock(s); } - diff --git a/osfmk/kern/thread_call.h b/osfmk/kern/thread_call.h index 7b326053e..f9730a78e 100644 --- a/osfmk/kern/thread_call.h +++ b/osfmk/kern/thread_call.h @@ -2,7 +2,7 @@ * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*! - @header thread_call.h - @discussion Facilities for executing work asynchronously. + * @header thread_call.h + * @discussion Facilities for executing work asynchronously. */ #ifndef _KERN_THREAD_CALL_H_ @@ -45,25 +45,25 @@ typedef struct thread_call *thread_call_t; typedef void *thread_call_param_t; typedef void (*thread_call_func_t)( - thread_call_param_t param0, - thread_call_param_t param1); + thread_call_param_t param0, + thread_call_param_t param1); /*! - @enum thread_call_priority_t - @discussion Thread call priorities should not be assumed to have any specific - numerical value; they should be interpreted as importances or roles for work - items, priorities for which will be reasonably managed by the subsystem. - @constant THREAD_CALL_PRIORITY_HIGH Importance above everything but realtime. - Thread calls allocated with this priority execute at extremely high priority, - above everything but realtime threads. They are generally executed in serial. - Though they may execute concurrently under some circumstances, no fan-out is implied. - These work items should do very small amounts of work or risk disrupting system - responsiveness. - @constant THREAD_CALL_PRIORITY_KERNEL Importance similar to that of normal kernel - threads. - @constant THREAD_CALL_PRIORITY_USER Importance similar to that of normal user threads. - @constant THREAD_CALL_PRIORITY_LOW Very low importance. - @constant THREAD_CALL_PRIORITY_KERNEL_HIGH Importance higher than most kernel - threads. + * @enum thread_call_priority_t + * @discussion Thread call priorities should not be assumed to have any specific + * numerical value; they should be interpreted as importances or roles for work + * items, priorities for which will be reasonably managed by the subsystem. + * @constant THREAD_CALL_PRIORITY_HIGH Importance above everything but realtime. + * Thread calls allocated with this priority execute at extremely high priority, + * above everything but realtime threads. They are generally executed in serial. + * Though they may execute concurrently under some circumstances, no fan-out is implied. + * These work items should do very small amounts of work or risk disrupting system + * responsiveness. + * @constant THREAD_CALL_PRIORITY_KERNEL Importance similar to that of normal kernel + * threads. + * @constant THREAD_CALL_PRIORITY_USER Importance similar to that of normal user threads. + * @constant THREAD_CALL_PRIORITY_LOW Very low importance. + * @constant THREAD_CALL_PRIORITY_KERNEL_HIGH Importance higher than most kernel + * threads. */ typedef enum { THREAD_CALL_PRIORITY_HIGH = 0, @@ -86,62 +86,62 @@ typedef uint32_t thread_call_options_t; __BEGIN_DECLS /*! - @function thread_call_enter - @abstract Submit a thread call work item for immediate execution. - @discussion If the work item is already scheduled for delayed execution, and it has - not yet begun to run, that delayed invocation will be cancelled. Note that if a - thread call is rescheduled from its own callback, then multiple invocations of the - callback may be in flight at the same time. - @result TRUE if the call was already pending for either delayed or immediate - execution, FALSE otherwise. - @param call The thread call to execute. + * @function thread_call_enter + * @abstract Submit a thread call work item for immediate execution. + * @discussion If the work item is already scheduled for delayed execution, and it has + * not yet begun to run, that delayed invocation will be cancelled. Note that if a + * thread call is rescheduled from its own callback, then multiple invocations of the + * callback may be in flight at the same time. + * @result TRUE if the call was already pending for either delayed or immediate + * execution, FALSE otherwise. + * @param call The thread call to execute. */ -extern boolean_t thread_call_enter( - thread_call_t call); +extern boolean_t thread_call_enter( + thread_call_t call); /*! - @function thread_call_enter1 - @abstract Submit a thread call work item for immediate execution, with an extra parameter. - @discussion This routine is identical to thread_call_enter(), except that - the second parameter to the callback is specified. - @result TRUE if the call was already pending for either delayed or immediate - execution, FALSE otherwise. - @param call The thread call to execute. - @param param1 Parameter to pass callback. + * @function thread_call_enter1 + * @abstract Submit a thread call work item for immediate execution, with an extra parameter. + * @discussion This routine is identical to thread_call_enter(), except that + * the second parameter to the callback is specified. + * @result TRUE if the call was already pending for either delayed or immediate + * execution, FALSE otherwise. + * @param call The thread call to execute. + * @param param1 Parameter to pass callback. */ -extern boolean_t thread_call_enter1( - thread_call_t call, - thread_call_param_t param1); - -/*! - @function thread_call_enter_delayed - @abstract Submit a thread call to be executed at some point in the future. - @discussion If the work item is already scheduled for delayed or immediate execution, - and it has not yet begun to run, that invocation will be cancelled in favor of execution - at the newly specified time. Note that if a thread call is rescheduled from its own callback, - then multiple invocations of the callback may be in flight at the same time. - @result TRUE if the call was already pending for either delayed or immediate - execution, FALSE otherwise. - @param call The thread call to execute. - @param deadline Time, in absolute time units, at which to execute callback. +extern boolean_t thread_call_enter1( + thread_call_t call, + thread_call_param_t param1); + +/*! + * @function thread_call_enter_delayed + * @abstract Submit a thread call to be executed at some point in the future. + * @discussion If the work item is already scheduled for delayed or immediate execution, + * and it has not yet begun to run, that invocation will be cancelled in favor of execution + * at the newly specified time. Note that if a thread call is rescheduled from its own callback, + * then multiple invocations of the callback may be in flight at the same time. + * @result TRUE if the call was already pending for either delayed or immediate + * execution, FALSE otherwise. + * @param call The thread call to execute. + * @param deadline Time, in absolute time units, at which to execute callback. */ -extern boolean_t thread_call_enter_delayed( - thread_call_t call, - uint64_t deadline); -/*! - @function thread_call_enter1_delayed - @abstract Submit a thread call to be executed at some point in the future, with an extra parameter. - @discussion This routine is identical to thread_call_enter_delayed(), - except that a second parameter to the callback is specified. - @result TRUE if the call was already pending for either delayed or immediate - execution, FALSE otherwise. - @param call The thread call to execute. - @param param1 Second parameter to callback. - @param deadline Time, in absolute time units, at which to execute callback. +extern boolean_t thread_call_enter_delayed( + thread_call_t call, + uint64_t deadline); +/*! + * @function thread_call_enter1_delayed + * @abstract Submit a thread call to be executed at some point in the future, with an extra parameter. + * @discussion This routine is identical to thread_call_enter_delayed(), + * except that a second parameter to the callback is specified. + * @result TRUE if the call was already pending for either delayed or immediate + * execution, FALSE otherwise. + * @param call The thread call to execute. + * @param param1 Second parameter to callback. + * @param deadline Time, in absolute time units, at which to execute callback. */ -extern boolean_t thread_call_enter1_delayed( - thread_call_t call, - thread_call_param_t param1, - uint64_t deadline); +extern boolean_t thread_call_enter1_delayed( + thread_call_t call, + thread_call_param_t param1, + uint64_t deadline); #ifdef XNU_KERNEL_PRIVATE /* @@ -157,16 +157,16 @@ extern boolean_t thread_call_enter1_delayed( * attributes, in addition to the per-thread_call urgency specification, * are used to establish coalescing behavior. */ -#define THREAD_CALL_DELAY_SYS_NORMAL TIMEOUT_URGENCY_SYS_NORMAL -#define THREAD_CALL_DELAY_SYS_CRITICAL TIMEOUT_URGENCY_SYS_CRITICAL -#define THREAD_CALL_DELAY_SYS_BACKGROUND TIMEOUT_URGENCY_SYS_BACKGROUND +#define THREAD_CALL_DELAY_SYS_NORMAL TIMEOUT_URGENCY_SYS_NORMAL +#define THREAD_CALL_DELAY_SYS_CRITICAL TIMEOUT_URGENCY_SYS_CRITICAL +#define THREAD_CALL_DELAY_SYS_BACKGROUND TIMEOUT_URGENCY_SYS_BACKGROUND -#define THREAD_CALL_DELAY_USER_MASK TIMEOUT_URGENCY_USER_MASK -#define THREAD_CALL_DELAY_USER_NORMAL TIMEOUT_URGENCY_USER_NORMAL -#define THREAD_CALL_DELAY_USER_CRITICAL TIMEOUT_URGENCY_USER_CRITICAL -#define THREAD_CALL_DELAY_USER_BACKGROUND TIMEOUT_URGENCY_USER_BACKGROUND +#define THREAD_CALL_DELAY_USER_MASK TIMEOUT_URGENCY_USER_MASK +#define THREAD_CALL_DELAY_USER_NORMAL TIMEOUT_URGENCY_USER_NORMAL +#define THREAD_CALL_DELAY_USER_CRITICAL TIMEOUT_URGENCY_USER_CRITICAL +#define THREAD_CALL_DELAY_USER_BACKGROUND TIMEOUT_URGENCY_USER_BACKGROUND -#define THREAD_CALL_DELAY_URGENCY_MASK TIMEOUT_URGENCY_MASK +#define THREAD_CALL_DELAY_URGENCY_MASK TIMEOUT_URGENCY_MASK /* * Indicate that a specific leeway value is being provided (otherwise @@ -174,7 +174,7 @@ extern boolean_t thread_call_enter1_delayed( * only be used to extend the leeway calculated internally from the * urgency class provided. */ -#define THREAD_CALL_DELAY_LEEWAY TIMEOUT_URGENCY_LEEWAY +#define THREAD_CALL_DELAY_LEEWAY TIMEOUT_URGENCY_LEEWAY /* * Indicates that the time parameters should be interpreted as @@ -183,164 +183,164 @@ extern boolean_t thread_call_enter1_delayed( */ #define THREAD_CALL_CONTINUOUS 0x100 -/*! - @function thread_call_enter_delayed_with_leeway - @abstract Submit a thread call to be executed at some point in the future. - @discussion If the work item is already scheduled for delayed or immediate execution, - and it has not yet begun to run, that invocation will be cancelled in favor of execution - at the newly specified time. Note that if a thread call is rescheduled from its own callback, - then multiple invocations of the callback may be in flight at the same time. - @result TRUE if the call was already pending for either delayed or immediate - execution, FALSE otherwise. - @param call The thread call to execute. - @param param1 Second parameter to callback. - @param deadline Time, in absolute time units, at which to execute callback. - @param leeway Time delta, in absolute time units, which sets range of time allowing kernel - to decide appropriate time to run. - @param flags configuration for timers in kernel. +/*! + * @function thread_call_enter_delayed_with_leeway + * @abstract Submit a thread call to be executed at some point in the future. + * @discussion If the work item is already scheduled for delayed or immediate execution, + * and it has not yet begun to run, that invocation will be cancelled in favor of execution + * at the newly specified time. Note that if a thread call is rescheduled from its own callback, + * then multiple invocations of the callback may be in flight at the same time. + * @result TRUE if the call was already pending for either delayed or immediate + * execution, FALSE otherwise. + * @param call The thread call to execute. + * @param param1 Second parameter to callback. + * @param deadline Time, in absolute time units, at which to execute callback. + * @param leeway Time delta, in absolute time units, which sets range of time allowing kernel + * to decide appropriate time to run. + * @param flags configuration for timers in kernel. */ -extern boolean_t thread_call_enter_delayed_with_leeway( - thread_call_t call, - thread_call_param_t param1, - uint64_t deadline, - uint64_t leeway, - uint32_t flags); +extern boolean_t thread_call_enter_delayed_with_leeway( + thread_call_t call, + thread_call_param_t param1, + uint64_t deadline, + uint64_t leeway, + uint32_t flags); #endif /* XNU_KERNEL_PRIVATE */ /*! - @function thread_call_cancel - @abstract Attempt to cancel a pending invocation of a thread call. - @discussion Attempt to cancel a thread call which has been scheduled - for execution with a thread_call_enter* variant. If the call has not - yet begun executing, the pending invocation will be cancelled and TRUE - will be returned. If the work item has already begun executing, - thread_call_cancel will return FALSE immediately; the callback may be - about to run, currently running, or already done executing. - @result TRUE if the call was successfully cancelled, FALSE otherwise. + * @function thread_call_cancel + * @abstract Attempt to cancel a pending invocation of a thread call. + * @discussion Attempt to cancel a thread call which has been scheduled + * for execution with a thread_call_enter* variant. If the call has not + * yet begun executing, the pending invocation will be cancelled and TRUE + * will be returned. If the work item has already begun executing, + * thread_call_cancel will return FALSE immediately; the callback may be + * about to run, currently running, or already done executing. + * @result TRUE if the call was successfully cancelled, FALSE otherwise. + */ +extern boolean_t thread_call_cancel( + thread_call_t call); +/*! + * @function thread_call_cancel_wait + * @abstract Attempt to cancel a pending invocation of a thread call. + * If unable to cancel, wait for current invocation to finish. + * @discussion Attempt to cancel a thread call which has been scheduled + * for execution with a thread_call_enter* variant. If the call has not + * yet begun executing, the pending invocation will be cancelled and TRUE + * will be returned. If the work item has already begun executing, + * thread_call_cancel_wait waits for the most recent invocation to finish. When + * called on a work item which has already finished, it will return FALSE immediately. + * Note that this routine can only be used on thread calls set up with either + * thread_call_allocate or thread_call_allocate_with_priority, and that invocations + * of the thread call after the current invocation may be in flight when + * thread_call_cancel_wait returns. + * @result TRUE if the call was successfully cancelled, FALSE otherwise. + */ +extern boolean_t thread_call_cancel_wait( + thread_call_t call); + +/*! + * @function thread_call_allocate + * @abstract Allocate a thread call to execute with default (high) priority. + * @discussion Allocates a thread call that will run with properties of + * THREAD_CALL_PRIORITY_HIGH, binding the first parameter to the callback. + * @param func Callback to invoke when thread call is scheduled. + * @param param0 First argument ot pass to callback. + * @result Thread call which can be passed to thread_call_enter variants. + */ +extern thread_call_t thread_call_allocate( + thread_call_func_t func, + thread_call_param_t param0); + +/*! + * @function thread_call_allocate_with_priority + * @abstract Allocate a thread call to execute with a specified priority. + * @discussion Identical to thread_call_allocate, except that priority + * is specified by caller. + * @param func Callback to invoke when thread call is scheduled. + * @param param0 First argument to pass to callback. + * @param pri Priority of item. + * @result Thread call which can be passed to thread_call_enter variants. */ -extern boolean_t thread_call_cancel( - thread_call_t call); +extern thread_call_t thread_call_allocate_with_priority( + thread_call_func_t func, + thread_call_param_t param0, + thread_call_priority_t pri); + /*! - @function thread_call_cancel_wait - @abstract Attempt to cancel a pending invocation of a thread call. - If unable to cancel, wait for current invocation to finish. - @discussion Attempt to cancel a thread call which has been scheduled - for execution with a thread_call_enter* variant. If the call has not - yet begun executing, the pending invocation will be cancelled and TRUE - will be returned. If the work item has already begun executing, - thread_call_cancel_wait waits for the most recent invocation to finish. When - called on a work item which has already finished, it will return FALSE immediately. - Note that this routine can only be used on thread calls set up with either - thread_call_allocate or thread_call_allocate_with_priority, and that invocations - of the thread call after the current invocation may be in flight when - thread_call_cancel_wait returns. - @result TRUE if the call was successfully cancelled, FALSE otherwise. + * @function thread_call_allocate_with_options + * @abstract Allocate a thread call to execute with a specified priority. + * @discussion Identical to thread_call_allocate, except that priority + * and options are specified by caller. + * @param func Callback to invoke when thread call is scheduled. + * @param param0 First argument to pass to callback. + * @param pri Priority of item. + * @param options Options for item. + * @result Thread call which can be passed to thread_call_enter variants. */ -extern boolean_t thread_call_cancel_wait( - thread_call_t call); - - /*! - @function thread_call_allocate - @abstract Allocate a thread call to execute with default (high) priority. - @discussion Allocates a thread call that will run with properties of - THREAD_CALL_PRIORITY_HIGH, binding the first parameter to the callback. - @param func Callback to invoke when thread call is scheduled. - @param param0 First argument ot pass to callback. - @result Thread call which can be passed to thread_call_enter variants. - */ -extern thread_call_t thread_call_allocate( - thread_call_func_t func, - thread_call_param_t param0); - - /*! - @function thread_call_allocate_with_priority - @abstract Allocate a thread call to execute with a specified priority. - @discussion Identical to thread_call_allocate, except that priority - is specified by caller. - @param func Callback to invoke when thread call is scheduled. - @param param0 First argument to pass to callback. - @param pri Priority of item. - @result Thread call which can be passed to thread_call_enter variants. - */ -extern thread_call_t thread_call_allocate_with_priority( - thread_call_func_t func, - thread_call_param_t param0, - thread_call_priority_t pri); - - /*! - @function thread_call_allocate_with_options - @abstract Allocate a thread call to execute with a specified priority. - @discussion Identical to thread_call_allocate, except that priority - and options are specified by caller. - @param func Callback to invoke when thread call is scheduled. - @param param0 First argument to pass to callback. - @param pri Priority of item. - @param options Options for item. - @result Thread call which can be passed to thread_call_enter variants. - */ -extern thread_call_t thread_call_allocate_with_options( - thread_call_func_t func, - thread_call_param_t param0, - thread_call_priority_t pri, - thread_call_options_t options); +extern thread_call_t thread_call_allocate_with_options( + thread_call_func_t func, + thread_call_param_t param0, + thread_call_priority_t pri, + thread_call_options_t options); #ifdef KERNEL_PRIVATE - /*! - @function thread_call_allocate_with_qos - @abstract Allocate a thread call to execute with a specified QoS. - @discussion Identical to thread_call_allocate_with_options, except it uses the QoS namespace. - Private interface for pthread kext. - @param func Callback to invoke when thread call is scheduled. - @param param0 First argument to pass to callback. - @param qos_tier QoS tier to execute callback at (as in THREAD_QOS_POLICY) - @param options flags from thread_call_options_t to influence the thread call behavior - @result Thread call which can be passed to thread_call_enter variants. - */ +/*! + * @function thread_call_allocate_with_qos + * @abstract Allocate a thread call to execute with a specified QoS. + * @discussion Identical to thread_call_allocate_with_options, except it uses the QoS namespace. + * Private interface for pthread kext. + * @param func Callback to invoke when thread call is scheduled. + * @param param0 First argument to pass to callback. + * @param qos_tier QoS tier to execute callback at (as in THREAD_QOS_POLICY) + * @param options flags from thread_call_options_t to influence the thread call behavior + * @result Thread call which can be passed to thread_call_enter variants. + */ extern thread_call_t thread_call_allocate_with_qos(thread_call_func_t func, - thread_call_param_t param0, - int qos_tier, - thread_call_options_t options); + thread_call_param_t param0, + int qos_tier, + thread_call_options_t options); /*! - @function thread_call_wait_once - @abstract Wait for a THREAD_CALL_OPTIONS_ONCE call to finish executing if it is executing - @discussion Only works on THREAD_CALL_OPTIONS_ONCE calls - @param call The thread call to wait for - @result True if it waited, false if it did not wait + * @function thread_call_wait_once + * @abstract Wait for a THREAD_CALL_OPTIONS_ONCE call to finish executing if it is executing + * @discussion Only works on THREAD_CALL_OPTIONS_ONCE calls + * @param call The thread call to wait for + * @result True if it waited, false if it did not wait */ extern boolean_t thread_call_wait_once(thread_call_t call); #endif /* KERNEL_PRIVATE */ /*! - @function thread_call_free - @abstract Release a thread call. - @discussion Should only be used on thread calls allocated with thread_call_allocate - or thread_call_allocate_with_priority. Once thread_call_free has been called, - no other operations may be performed on a thread call. If the thread call is - currently pending, thread_call_free will return FALSE and will have no effect. - Calling thread_call_free from a thread call's own callback is safe; the work - item is not considering "pending" at that point. - @result TRUE if the thread call has been successfully released, else FALSE. - @param call The thread call to release. + * @function thread_call_free + * @abstract Release a thread call. + * @discussion Should only be used on thread calls allocated with thread_call_allocate + * or thread_call_allocate_with_priority. Once thread_call_free has been called, + * no other operations may be performed on a thread call. If the thread call is + * currently pending, thread_call_free will return FALSE and will have no effect. + * Calling thread_call_free from a thread call's own callback is safe; the work + * item is not considering "pending" at that point. + * @result TRUE if the thread call has been successfully released, else FALSE. + * @param call The thread call to release. */ -extern boolean_t thread_call_free( - thread_call_t call); +extern boolean_t thread_call_free( + thread_call_t call); /*! - @function thread_call_isactive - @abstract Determine whether a thread call is pending or currently executing. - @param call Thread call to examine. - @result TRUE if the thread call is either scheduled for execution (immediately - or at some point in the future) or is currently executing. + * @function thread_call_isactive + * @abstract Determine whether a thread call is pending or currently executing. + * @param call Thread call to examine. + * @result TRUE if the thread call is either scheduled for execution (immediately + * or at some point in the future) or is currently executing. */ -boolean_t thread_call_isactive( - thread_call_t call); +boolean_t thread_call_isactive( + thread_call_t call); __END_DECLS -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include @@ -357,14 +357,14 @@ typedef enum { } thread_call_index_t; struct thread_call { - struct call_entry tc_call; /* Must be first for queue macros */ - uint64_t tc_submit_count; - uint64_t tc_finish_count; - uint64_t tc_ttd; /* Time to deadline at creation */ - uint64_t tc_soft_deadline; - thread_call_index_t tc_index; - uint32_t tc_flags; - int32_t tc_refs; + struct call_entry tc_call; /* Must be first for queue macros */ + uint64_t tc_submit_count; + uint64_t tc_finish_count; + uint64_t tc_ttd; /* Time to deadline at creation */ + uint64_t tc_soft_deadline; + thread_call_index_t tc_index; + uint32_t tc_flags; + int32_t tc_refs; }; #define THREAD_CALL_ALLOC 0x01 /* memory owned by thread_call.c */ @@ -379,17 +379,17 @@ struct thread_call { typedef struct thread_call thread_call_data_t; -extern void thread_call_initialize(void); +extern void thread_call_initialize(void); -extern void thread_call_setup( - thread_call_t call, - thread_call_func_t func, - thread_call_param_t param0); +extern void thread_call_setup( + thread_call_t call, + thread_call_func_t func, + thread_call_param_t param0); -extern void thread_call_delayed_timer_rescan_all(void); -#endif /* MACH_KERNEL_PRIVATE */ +extern void thread_call_delayed_timer_rescan_all(void); +#endif /* MACH_KERNEL_PRIVATE */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE __BEGIN_DECLS @@ -400,17 +400,17 @@ __BEGIN_DECLS * is exhausted. */ -extern void thread_call_func_delayed( - thread_call_func_t func, - thread_call_param_t param, - uint64_t deadline); +extern void thread_call_func_delayed( + thread_call_func_t func, + thread_call_param_t param, + uint64_t deadline); -extern void thread_call_func_delayed_with_leeway( - thread_call_func_t func, - thread_call_param_t param, - uint64_t deadline, - uint64_t leeway, - uint32_t flags); +extern void thread_call_func_delayed_with_leeway( + thread_call_func_t func, + thread_call_param_t param, + uint64_t deadline, + uint64_t leeway, + uint32_t flags); /* * This iterates all of the pending or delayed thread calls in the group, @@ -418,18 +418,18 @@ extern void thread_call_func_delayed_with_leeway( * * This is deprecated, switch to an allocated thread call instead. */ -extern boolean_t thread_call_func_cancel( - thread_call_func_t func, - thread_call_param_t param, - boolean_t cancel_all); +extern boolean_t thread_call_func_cancel( + thread_call_func_t func, + thread_call_param_t param, + boolean_t cancel_all); -/* +/* * Called on the wake path to adjust the thread callouts running in mach_continuous_time */ -void adjust_cont_time_thread_calls(void); +void adjust_cont_time_thread_calls(void); __END_DECLS -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#endif /* _KERN_THREAD_CALL_H_ */ +#endif /* _KERN_THREAD_CALL_H_ */ diff --git a/osfmk/kern/thread_group.c b/osfmk/kern/thread_group.c index 87d740bc4..f67111223 100644 --- a/osfmk/kern/thread_group.c +++ b/osfmk/kern/thread_group.c @@ -44,7 +44,12 @@ #if CONFIG_EMBEDDED void -sched_perfcontrol_thread_group_recommend(void *machine_data __unused, cluster_type_t new_recommendation __unused) +thread_group_join_io_storage(void) +{ +} + +void +sched_perfcontrol_thread_group_recommend(void *machine_data __unused, cluster_type_t new_recommendation __unused) { } #endif /* CONFIG_EMBEDDED */ diff --git a/osfmk/kern/thread_group.h b/osfmk/kern/thread_group.h index e19269ee2..3e5aaa61d 100644 --- a/osfmk/kern/thread_group.h +++ b/osfmk/kern/thread_group.h @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/kern/thread_kernel_state.h b/osfmk/kern/thread_kernel_state.h index 521835ddc..36e95fb3a 100644 --- a/osfmk/kern/thread_kernel_state.h +++ b/osfmk/kern/thread_kernel_state.h @@ -26,13 +26,13 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _KERN_THREAD_KERNEL_STATE_H_ +#ifndef _KERN_THREAD_KERNEL_STATE_H_ #include struct thread_kernel_state { - machine_thread_kernel_state machine; /* must be first */ - kern_allocation_name_t allocation_name; + machine_thread_kernel_state machine; /* must be first */ + kern_allocation_name_t allocation_name; } __attribute__((aligned(16))); typedef struct thread_kernel_state * thread_kernel_state_t; diff --git a/osfmk/kern/thread_policy.c b/osfmk/kern/thread_policy.c index 7b7e4f87d..75f81a456 100644 --- a/osfmk/kern/thread_policy.c +++ b/osfmk/kern/thread_policy.c @@ -143,7 +143,8 @@ static void thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_priority, task_pend_token_t pend_token); void -thread_policy_init(void) { +thread_policy_init(void) +{ if (PE_parse_boot_argn("qos_override_mode", &qos_override_mode, sizeof(qos_override_mode))) { printf("QOS override mode: 0x%08x\n", qos_override_mode); } else { @@ -152,22 +153,22 @@ thread_policy_init(void) { } boolean_t -thread_has_qos_policy(thread_t thread) { +thread_has_qos_policy(thread_t thread) +{ return (proc_get_thread_policy(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS) != THREAD_QOS_UNSPECIFIED) ? TRUE : FALSE; } static void thread_remove_qos_policy_locked(thread_t thread, - task_pend_token_t pend_token) + task_pend_token_t pend_token) { - __unused int prev_qos = thread->requested_policy.thrp_qos; DTRACE_PROC2(qos__remove, thread_t, thread, int, prev_qos); proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, - THREAD_QOS_UNSPECIFIED, 0, pend_token); + THREAD_QOS_UNSPECIFIED, 0, pend_token); } kern_return_t @@ -212,28 +213,30 @@ thread_qos_scaled_relative_priority(int qos, int qos_relprio) int next_lower_qos; /* Fast path, since no validation or scaling is needed */ - if (qos_relprio == 0) return 0; + if (qos_relprio == 0) { + return 0; + } switch (qos) { - case THREAD_QOS_USER_INTERACTIVE: - next_lower_qos = THREAD_QOS_USER_INITIATED; - break; - case THREAD_QOS_USER_INITIATED: - next_lower_qos = THREAD_QOS_LEGACY; - break; - case THREAD_QOS_LEGACY: - next_lower_qos = THREAD_QOS_UTILITY; - break; - case THREAD_QOS_UTILITY: - next_lower_qos = THREAD_QOS_BACKGROUND; - break; - case THREAD_QOS_MAINTENANCE: - case THREAD_QOS_BACKGROUND: - next_lower_qos = 0; - break; - default: - panic("Unrecognized QoS %d", qos); - return 0; + case THREAD_QOS_USER_INTERACTIVE: + next_lower_qos = THREAD_QOS_USER_INITIATED; + break; + case THREAD_QOS_USER_INITIATED: + next_lower_qos = THREAD_QOS_LEGACY; + break; + case THREAD_QOS_LEGACY: + next_lower_qos = THREAD_QOS_UTILITY; + break; + case THREAD_QOS_UTILITY: + next_lower_qos = THREAD_QOS_BACKGROUND; + break; + case THREAD_QOS_MAINTENANCE: + case THREAD_QOS_BACKGROUND: + next_lower_qos = 0; + break; + default: + panic("Unrecognized QoS %d", qos); + return 0; } int prio_range_max = thread_qos_policy_params.qos_pri[qos]; @@ -259,25 +262,28 @@ boolean_t allow_qos_policy_set = FALSE; kern_return_t thread_policy_set( - thread_t thread, - thread_policy_flavor_t flavor, - thread_policy_t policy_info, - mach_msg_type_number_t count) + thread_t thread, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t count) { thread_qos_policy_data_t req_qos; kern_return_t kr; - + req_qos.qos_tier = THREAD_QOS_UNSPECIFIED; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } if (allow_qos_policy_set == FALSE) { - if (thread_is_static_param(thread)) - return (KERN_POLICY_STATIC); + if (thread_is_static_param(thread)) { + return KERN_POLICY_STATIC; + } - if (flavor == THREAD_QOS_POLICY) - return (KERN_INVALID_ARGUMENT); + if (flavor == THREAD_QOS_POLICY) { + return KERN_INVALID_ARGUMENT; + } } /* Threads without static_param set reset their QoS when other policies are applied. */ @@ -307,10 +313,10 @@ thread_policy_set( kern_return_t thread_policy_set_internal( - thread_t thread, - thread_policy_flavor_t flavor, - thread_policy_t policy_info, - mach_msg_type_number_t count) + thread_t thread, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t count) { kern_return_t result = KERN_SUCCESS; struct task_pend_token pend_token = {}; @@ -319,11 +325,10 @@ thread_policy_set_internal( if (!thread->active) { thread_mtx_unlock(thread); - return (KERN_TERMINATED); + return KERN_TERMINATED; } switch (flavor) { - case THREAD_EXTENDED_POLICY: { boolean_t timeshare = TRUE; @@ -360,9 +365,9 @@ thread_policy_set_internal( } info = (thread_time_constraint_policy_t)policy_info; - if (info->constraint < info->computation || - info->computation > max_rt_quantum || - info->computation < min_rt_quantum ) { + if (info->constraint < info->computation || + info->computation > max_rt_quantum || + info->computation < min_rt_quantum) { result = KERN_INVALID_ARGUMENT; break; } @@ -451,10 +456,11 @@ thread_policy_set_internal( int enable; - if (info->priority == THREAD_BACKGROUND_POLICY_DARWIN_BG) + if (info->priority == THREAD_BACKGROUND_POLICY_DARWIN_BG) { enable = TASK_POLICY_ENABLE; - else + } else { enable = TASK_POLICY_DISABLE; + } int category = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL; @@ -474,13 +480,14 @@ thread_policy_set_internal( break; } - if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) != KERN_SUCCESS) + if ((result = qos_throughput_policy_validate(info->thread_throughput_qos_tier)) != KERN_SUCCESS) { break; + } tqos = qos_extract(info->thread_throughput_qos_tier); proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_THROUGH_QOS, tqos, 0, &pend_token); + TASK_POLICY_THROUGH_QOS, tqos, 0, &pend_token); break; } @@ -495,13 +502,14 @@ thread_policy_set_internal( break; } - if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) != KERN_SUCCESS) + if ((result = qos_latency_policy_validate(info->thread_latency_qos_tier)) != KERN_SUCCESS) { break; + } lqos = qos_extract(info->thread_latency_qos_tier); proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_LATENCY_QOS, lqos, 0, &pend_token); + TASK_POLICY_LATENCY_QOS, lqos, 0, &pend_token); break; } @@ -531,7 +539,7 @@ thread_policy_set_internal( } proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_AND_RELPRIO, - info->qos_tier, -info->tier_importance, &pend_token); + info->qos_tier, -info->tier_importance, &pend_token); break; } @@ -545,7 +553,7 @@ thread_policy_set_internal( thread_policy_update_complete_unlocked(thread, &pend_token); - return (result); + return result; } /* @@ -553,16 +561,17 @@ thread_policy_set_internal( * Both result in FIXED mode scheduling. */ static sched_mode_t -convert_policy_to_sched_mode(integer_t policy) { +convert_policy_to_sched_mode(integer_t policy) +{ switch (policy) { - case POLICY_TIMESHARE: - return TH_MODE_TIMESHARE; - case POLICY_RR: - case POLICY_FIFO: - return TH_MODE_FIXED; - default: - panic("unexpected sched policy: %d", policy); - return TH_MODE_NONE; + case POLICY_TIMESHARE: + return TH_MODE_TIMESHARE; + case POLICY_RR: + case POLICY_FIFO: + return TH_MODE_FIXED; + default: + panic("unexpected sched policy: %d", policy); + return TH_MODE_NONE; } } @@ -572,9 +581,9 @@ convert_policy_to_sched_mode(integer_t policy) { */ static kern_return_t thread_set_mode_and_absolute_pri_internal(thread_t thread, - sched_mode_t mode, - integer_t priority, - task_pend_token_t pend_token) + sched_mode_t mode, + integer_t priority, + task_pend_token_t pend_token) { kern_return_t kr = KERN_SUCCESS; @@ -602,28 +611,31 @@ thread_set_mode_and_absolute_pri_internal(thread_t thread, * TODO: Store the absolute priority value instead */ - if (priority >= thread->max_priority) + if (priority >= thread->max_priority) { priority = thread->max_priority - thread->task_priority; - else if (priority >= MINPRI_KERNEL) + } else if (priority >= MINPRI_KERNEL) { priority -= MINPRI_KERNEL; - else if (priority >= MINPRI_RESERVED) + } else if (priority >= MINPRI_RESERVED) { priority -= MINPRI_RESERVED; - else + } else { priority -= BASEPRI_DEFAULT; + } priority += thread->task_priority; - if (priority > thread->max_priority) + if (priority > thread->max_priority) { priority = thread->max_priority; - else if (priority < MINPRI) + } else if (priority < MINPRI) { priority = MINPRI; + } thread->importance = priority - thread->task_priority; thread_set_user_sched_mode_and_recompute_pri(thread, mode); - if (mode != old_mode) + if (mode != old_mode) { pend_token->tpt_update_thread_sfi = 1; + } unlock: thread_unlock(thread); @@ -675,10 +687,10 @@ thread_reset_workq_qos(thread_t thread, uint32_t qos) thread_lock(thread); proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_AND_RELPRIO, qos, 0, &pend_token); + TASK_POLICY_QOS_AND_RELPRIO, qos, 0, &pend_token); proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_WORKQ_OVERRIDE, THREAD_QOS_UNSPECIFIED, 0, - &pend_token); + TASK_POLICY_QOS_WORKQ_OVERRIDE, THREAD_QOS_UNSPECIFIED, 0, + &pend_token); assert(pend_token.tpt_update_sockets == 0); @@ -706,7 +718,7 @@ thread_set_workq_override(thread_t thread, uint32_t qos) thread_lock(thread); proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_WORKQ_OVERRIDE, qos, 0, &pend_token); + TASK_POLICY_QOS_WORKQ_OVERRIDE, qos, 0, &pend_token); assert(pend_token.tpt_update_sockets == 0); @@ -725,9 +737,9 @@ thread_set_workq_override(thread_t thread, uint32_t qos) */ void thread_set_workq_pri(thread_t thread, - thread_qos_t qos, - integer_t priority, - integer_t policy) + thread_qos_t qos, + integer_t priority, + integer_t policy) { struct task_pend_token pend_token = {}; sched_mode_t mode = convert_policy_to_sched_mode(policy); @@ -735,17 +747,18 @@ thread_set_workq_pri(thread_t thread, assert(qos < THREAD_QOS_LAST); assert(thread->static_param); - if (!thread->static_param || !thread->active) + if (!thread->static_param || !thread->active) { return; + } spl_t s = splsched(); thread_lock(thread); proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_AND_RELPRIO, qos, 0, &pend_token); + TASK_POLICY_QOS_AND_RELPRIO, qos, 0, &pend_token); proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_WORKQ_OVERRIDE, THREAD_QOS_UNSPECIFIED, - 0, &pend_token); + TASK_POLICY_QOS_WORKQ_OVERRIDE, THREAD_QOS_UNSPECIFIED, + 0, &pend_token); thread_unlock(thread); splx(s); @@ -754,11 +767,12 @@ thread_set_workq_pri(thread_t thread, __assert_only kern_return_t kr; kr = thread_set_mode_and_absolute_pri_internal(thread, mode, priority, - &pend_token); + &pend_token); assert(kr == KERN_SUCCESS); - if (pend_token.tpt_update_thread_sfi) + if (pend_token.tpt_update_thread_sfi) { sfi_reevaluate(thread); + } } /* @@ -771,8 +785,8 @@ thread_set_workq_pri(thread_t thread, */ kern_return_t thread_set_mode_and_absolute_pri(thread_t thread, - integer_t policy, - integer_t priority) + integer_t policy, + integer_t priority) { kern_return_t kr = KERN_SUCCESS; struct task_pend_token pend_token = {}; @@ -792,8 +806,9 @@ thread_set_mode_and_absolute_pri(thread_t thread, } /* Setting legacy policies on threads kills the current QoS */ - if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) + if (thread->requested_policy.thrp_qos != THREAD_QOS_UNSPECIFIED) { thread_remove_qos_policy_locked(thread, &pend_token); + } kr = thread_set_mode_and_absolute_pri_internal(thread, mode, priority, &pend_token); @@ -802,7 +817,7 @@ unlock: thread_policy_update_complete_unlocked(thread, &pend_token); - return (kr); + return kr; } /* @@ -816,8 +831,9 @@ unlock: static void thread_set_user_sched_mode_and_recompute_pri(thread_t thread, sched_mode_t mode) { - if (thread->policy_reset) + if (thread->policy_reset) { return; + } boolean_t removed = thread_run_queue_remove(thread); @@ -826,15 +842,17 @@ thread_set_user_sched_mode_and_recompute_pri(thread_t thread, sched_mode_t mode) * That way there's zero confusion over which the user wants * and which the kernel wants. */ - if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) + if (thread->sched_flags & TH_SFLAG_DEMOTED_MASK) { thread->saved_mode = mode; - else + } else { sched_set_thread_mode(thread, mode); + } thread_recompute_priority(thread); - if (removed) + if (removed) { thread_run_queue_reinsert(thread, SCHED_TAILQ); + } } /* called at splsched with thread lock locked */ @@ -866,30 +884,30 @@ thread_update_qos_cpu_time_locked(thread_t thread) /* Update the task-level effective and requested qos stats atomically, because we don't have the task lock. */ switch (thread->effective_policy.thep_qos) { - case THREAD_QOS_UNSPECIFIED: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_default; break; - case THREAD_QOS_MAINTENANCE: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_maintenance; break; - case THREAD_QOS_BACKGROUND: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_background; break; - case THREAD_QOS_UTILITY: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_utility; break; - case THREAD_QOS_LEGACY: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_legacy; break; - case THREAD_QOS_USER_INITIATED: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_user_initiated; break; - case THREAD_QOS_USER_INTERACTIVE: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_user_interactive; break; - default: - panic("unknown effective QoS: %d", thread->effective_policy.thep_qos); + case THREAD_QOS_UNSPECIFIED: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_default; break; + case THREAD_QOS_MAINTENANCE: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_maintenance; break; + case THREAD_QOS_BACKGROUND: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_background; break; + case THREAD_QOS_UTILITY: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_utility; break; + case THREAD_QOS_LEGACY: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_legacy; break; + case THREAD_QOS_USER_INITIATED: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_user_initiated; break; + case THREAD_QOS_USER_INTERACTIVE: task_counter = &task->cpu_time_eqos_stats.cpu_time_qos_user_interactive; break; + default: + panic("unknown effective QoS: %d", thread->effective_policy.thep_qos); } OSAddAtomic64(timer_delta, task_counter); /* Update the task-level qos stats atomically, because we don't have the task lock. */ switch (thread->requested_policy.thrp_qos) { - case THREAD_QOS_UNSPECIFIED: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_default; break; - case THREAD_QOS_MAINTENANCE: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_maintenance; break; - case THREAD_QOS_BACKGROUND: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_background; break; - case THREAD_QOS_UTILITY: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_utility; break; - case THREAD_QOS_LEGACY: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_legacy; break; - case THREAD_QOS_USER_INITIATED: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_user_initiated; break; - case THREAD_QOS_USER_INTERACTIVE: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_user_interactive; break; - default: - panic("unknown requested QoS: %d", thread->requested_policy.thrp_qos); + case THREAD_QOS_UNSPECIFIED: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_default; break; + case THREAD_QOS_MAINTENANCE: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_maintenance; break; + case THREAD_QOS_BACKGROUND: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_background; break; + case THREAD_QOS_UTILITY: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_utility; break; + case THREAD_QOS_LEGACY: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_legacy; break; + case THREAD_QOS_USER_INITIATED: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_user_initiated; break; + case THREAD_QOS_USER_INTERACTIVE: task_counter = &task->cpu_time_rqos_stats.cpu_time_qos_user_interactive; break; + default: + panic("unknown requested QoS: %d", thread->requested_policy.thrp_qos); } OSAddAtomic64(timer_delta, task_counter); @@ -922,12 +940,13 @@ thread_update_qos_cpu_time(thread_t thread) */ void thread_recompute_priority( - thread_t thread) + thread_t thread) { - integer_t priority; + integer_t priority; - if (thread->policy_reset) + if (thread->policy_reset) { return; + } if (thread->sched_mode == TH_MODE_REALTIME) { sched_set_thread_base_priority(thread, BASEPRI_RTQUEUES); @@ -953,12 +972,13 @@ thread_recompute_priority( priority += qos_scaled_relprio; } else { - if (thread->importance > MAXPRI) + if (thread->importance > MAXPRI) { priority = MAXPRI; - else if (thread->importance < -MAXPRI) + } else if (thread->importance < -MAXPRI) { priority = -MAXPRI; - else + } else { priority = thread->importance; + } priority += thread->task_priority; } @@ -974,14 +994,16 @@ thread_recompute_priority( * Note that thread->importance is user-settable to any integer * via THREAD_PRECEDENCE_POLICY. */ - if (priority > thread->max_priority) + if (priority > thread->max_priority) { priority = thread->max_priority; - else if (priority < MINPRI) + } else if (priority < MINPRI) { priority = MINPRI; + } if (thread->saved_mode == TH_MODE_REALTIME && - thread->sched_flags & TH_SFLAG_FAILSAFE) + thread->sched_flags & TH_SFLAG_FAILSAFE) { priority = DEPRESSPRI; + } if (thread->effective_policy.thep_terminated == TRUE) { /* @@ -991,16 +1013,19 @@ thread_recompute_priority( * so that the thread is no longer clamped to background * during the final exit phase. */ - if (priority < thread->task_priority) + if (priority < thread->task_priority) { priority = thread->task_priority; - if (priority < BASEPRI_DEFAULT) + } + if (priority < BASEPRI_DEFAULT) { priority = BASEPRI_DEFAULT; + } } #if CONFIG_EMBEDDED /* No one can have a base priority less than MAXPRI_THROTTLE */ - if (priority < MAXPRI_THROTTLE) + if (priority < MAXPRI_THROTTLE) { priority = MAXPRI_THROTTLE; + } #endif /* CONFIG_EMBEDDED */ sched_set_thread_base_priority(thread, priority); @@ -1009,10 +1034,10 @@ thread_recompute_priority( /* Called with the task lock held, but not the thread mutex or spinlock */ void thread_policy_update_tasklocked( - thread_t thread, - integer_t priority, - integer_t max_priority, - task_pend_token_t pend_token) + thread_t thread, + integer_t priority, + integer_t max_priority, + task_pend_token_t pend_token) { thread_mtx_lock(thread); @@ -1064,20 +1089,22 @@ thread_policy_update_tasklocked( */ void thread_policy_reset( - thread_t thread) + thread_t thread) { - spl_t s; + spl_t s; assert(thread == current_thread()); s = splsched(); thread_lock(thread); - if (thread->sched_flags & TH_SFLAG_FAILSAFE) + if (thread->sched_flags & TH_SFLAG_FAILSAFE) { sched_thread_mode_undemote(thread, TH_SFLAG_FAILSAFE); + } - if (thread->sched_flags & TH_SFLAG_THROTTLED) + if (thread->sched_flags & TH_SFLAG_THROTTLED) { sched_thread_mode_undemote(thread, TH_SFLAG_THROTTLED); + } /* At this point, the various demotions should be inactive */ assert(!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)); @@ -1102,50 +1129,51 @@ thread_policy_reset( kern_return_t thread_policy_get( - thread_t thread, - thread_policy_flavor_t flavor, - thread_policy_t policy_info, - mach_msg_type_number_t *count, - boolean_t *get_default) + thread_t thread, + thread_policy_flavor_t flavor, + thread_policy_t policy_info, + mach_msg_type_number_t *count, + boolean_t *get_default) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (!thread->active) { thread_mtx_unlock(thread); - return (KERN_TERMINATED); + return KERN_TERMINATED; } switch (flavor) { - case THREAD_EXTENDED_POLICY: { - boolean_t timeshare = TRUE; + boolean_t timeshare = TRUE; if (!(*get_default)) { spl_t s = splsched(); thread_lock(thread); - if ( (thread->sched_mode != TH_MODE_REALTIME) && - (thread->saved_mode != TH_MODE_REALTIME) ) { - if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) + if ((thread->sched_mode != TH_MODE_REALTIME) && + (thread->saved_mode != TH_MODE_REALTIME)) { + if (!(thread->sched_flags & TH_SFLAG_DEMOTED_MASK)) { timeshare = (thread->sched_mode == TH_MODE_TIMESHARE) != 0; - else + } else { timeshare = (thread->saved_mode == TH_MODE_TIMESHARE) != 0; - } - else + } + } else { *get_default = TRUE; + } thread_unlock(thread); splx(s); } if (*count >= THREAD_EXTENDED_POLICY_COUNT) { - thread_extended_policy_t info; + thread_extended_policy_t info; info = (thread_extended_policy_t)policy_info; info->timeshare = timeshare; @@ -1156,7 +1184,7 @@ thread_policy_get( case THREAD_TIME_CONSTRAINT_POLICY: { - thread_time_constraint_policy_t info; + thread_time_constraint_policy_t info; if (*count < THREAD_TIME_CONSTRAINT_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; @@ -1169,15 +1197,15 @@ thread_policy_get( spl_t s = splsched(); thread_lock(thread); - if ( (thread->sched_mode == TH_MODE_REALTIME) || - (thread->saved_mode == TH_MODE_REALTIME) ) { + if ((thread->sched_mode == TH_MODE_REALTIME) || + (thread->saved_mode == TH_MODE_REALTIME)) { info->period = thread->realtime.period; info->computation = thread->realtime.computation; info->constraint = thread->realtime.constraint; info->preemptible = thread->realtime.preemptible; - } - else + } else { *get_default = TRUE; + } thread_unlock(thread); splx(s); @@ -1195,7 +1223,7 @@ thread_policy_get( case THREAD_PRECEDENCE_POLICY: { - thread_precedence_policy_t info; + thread_precedence_policy_t info; if (*count < THREAD_PRECEDENCE_POLICY_COUNT) { result = KERN_INVALID_ARGUMENT; @@ -1212,16 +1240,16 @@ thread_policy_get( thread_unlock(thread); splx(s); - } - else + } else { info->importance = 0; + } break; } case THREAD_AFFINITY_POLICY: { - thread_affinity_policy_t info; + thread_affinity_policy_t info; if (!thread_affinity_is_supported()) { result = KERN_NOT_SUPPORTED; @@ -1234,17 +1262,18 @@ thread_policy_get( info = (thread_affinity_policy_t)policy_info; - if (!(*get_default)) + if (!(*get_default)) { info->affinity_tag = thread_affinity_get(thread); - else + } else { info->affinity_tag = THREAD_AFFINITY_TAG_NULL; + } break; } case THREAD_POLICY_STATE: { - thread_policy_state_t info; + thread_policy_state_t info; if (*count < THREAD_POLICY_STATE_COUNT) { result = KERN_INVALID_ARGUMENT; @@ -1286,7 +1315,7 @@ thread_policy_get( break; } - + case THREAD_LATENCY_QOS_POLICY: { thread_latency_qos_policy_t info = (thread_latency_qos_policy_t) policy_info; @@ -1339,7 +1368,7 @@ thread_policy_get( if (!(*get_default)) { int relprio_value = 0; info->qos_tier = proc_get_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_AND_RELPRIO, &relprio_value); + TASK_POLICY_QOS_AND_RELPRIO, &relprio_value); info->tier_importance = -relprio_value; } else { @@ -1357,16 +1386,16 @@ thread_policy_get( thread_mtx_unlock(thread); - return (result); + return result; } void thread_policy_create(thread_t thread) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_THREAD))) | DBG_FUNC_START, - thread_tid(thread), theffective_0(thread), - theffective_1(thread), thread->base_pri, 0); + (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_THREAD))) | DBG_FUNC_START, + thread_tid(thread), theffective_0(thread), + theffective_1(thread), thread->base_pri, 0); /* We pass a pend token but ignore it */ struct task_pend_token pend_token = {}; @@ -1374,25 +1403,25 @@ thread_policy_create(thread_t thread) thread_policy_update_internal_spinlocked(thread, TRUE, &pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_THREAD))) | DBG_FUNC_END, - thread_tid(thread), theffective_0(thread), - theffective_1(thread), thread->base_pri, 0); + (IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_THREAD))) | DBG_FUNC_END, + thread_tid(thread), theffective_0(thread), + theffective_1(thread), thread->base_pri, 0); } static void thread_policy_update_spinlocked(thread_t thread, boolean_t recompute_priority, task_pend_token_t pend_token) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_THREAD) | DBG_FUNC_START), - thread_tid(thread), theffective_0(thread), - theffective_1(thread), thread->base_pri, 0); + (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_THREAD) | DBG_FUNC_START), + thread_tid(thread), theffective_0(thread), + theffective_1(thread), thread->base_pri, 0); thread_policy_update_internal_spinlocked(thread, recompute_priority, pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_THREAD)) | DBG_FUNC_END, - thread_tid(thread), theffective_0(thread), - theffective_1(thread), thread->base_pri, 0); + (IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_THREAD)) | DBG_FUNC_END, + thread_tid(thread), theffective_0(thread), + theffective_1(thread), thread->base_pri, 0); } @@ -1409,7 +1438,7 @@ thread_policy_update_spinlocked(thread_t thread, boolean_t recompute_priority, t */ static void thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_priority, - task_pend_token_t pend_token) + task_pend_token_t pend_token) { /* * Step 1: @@ -1443,10 +1472,11 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr /* A task clamp will result in an effective QoS even when requested is UNSPECIFIED */ if (task_effective.tep_qos_clamp != THREAD_QOS_UNSPECIFIED) { - if (next.thep_qos != THREAD_QOS_UNSPECIFIED) + if (next.thep_qos != THREAD_QOS_UNSPECIFIED) { next.thep_qos = MIN(task_effective.tep_qos_clamp, next.thep_qos); - else + } else { next.thep_qos = task_effective.tep_qos_clamp; + } } /* @@ -1457,7 +1487,7 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr /* The ceiling only applies to threads that are in the QoS world */ if (task_effective.tep_qos_ceiling != THREAD_QOS_UNSPECIFIED && - next.thep_qos != THREAD_QOS_UNSPECIFIED) { + next.thep_qos != THREAD_QOS_UNSPECIFIED) { next.thep_qos = MIN(task_effective.tep_qos_ceiling, next.thep_qos); } @@ -1490,30 +1520,37 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr * but only some types of darwinbg change the sockets * after they're created */ - if (requested.thrp_int_darwinbg || requested.thrp_ext_darwinbg) + if (requested.thrp_int_darwinbg || requested.thrp_ext_darwinbg) { wants_all_sockets_bg = wants_darwinbg = TRUE; + } - if (requested.thrp_pidbind_bg) + if (requested.thrp_pidbind_bg) { wants_all_sockets_bg = wants_darwinbg = TRUE; + } - if (task_effective.tep_darwinbg) + if (task_effective.tep_darwinbg) { wants_darwinbg = TRUE; + } if (next.thep_qos == THREAD_QOS_BACKGROUND || - next.thep_qos == THREAD_QOS_MAINTENANCE) + next.thep_qos == THREAD_QOS_MAINTENANCE) { wants_darwinbg = TRUE; + } /* Calculate side effects of DARWIN_BG */ - if (wants_darwinbg) + if (wants_darwinbg) { next.thep_darwinbg = 1; + } - if (next.thep_darwinbg || task_effective.tep_new_sockets_bg) + if (next.thep_darwinbg || task_effective.tep_new_sockets_bg) { next.thep_new_sockets_bg = 1; + } /* Don't use task_effective.tep_all_sockets_bg here */ - if (wants_all_sockets_bg) + if (wants_all_sockets_bg) { next.thep_all_sockets_bg = 1; + } /* darwinbg implies background QOS (or lower) */ if (next.thep_darwinbg && @@ -1527,8 +1564,9 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr int iopol = THROTTLE_LEVEL_TIER0; /* Factor in the task's IO policy */ - if (next.thep_darwinbg) + if (next.thep_darwinbg) { iopol = MAX(iopol, task_effective.tep_bg_iotier); + } iopol = MAX(iopol, task_effective.tep_io_tier); @@ -1547,15 +1585,17 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr */ boolean_t qos_io_override_active = FALSE; if (thread_qos_policy_params.qos_iotier[next.thep_qos] < - thread_qos_policy_params.qos_iotier[requested.thrp_qos]) + thread_qos_policy_params.qos_iotier[requested.thrp_qos]) { qos_io_override_active = TRUE; + } /* Calculate Passive IO policy */ - if (requested.thrp_ext_iopassive || - requested.thrp_int_iopassive || - qos_io_override_active || - task_effective.tep_io_passive ) + if (requested.thrp_ext_iopassive || + requested.thrp_int_iopassive || + qos_io_override_active || + task_effective.tep_io_passive) { next.thep_io_passive = 1; + } /* Calculate timer QOS */ uint32_t latency_qos = requested.thrp_latency_qos; @@ -1600,20 +1640,23 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr * Pend updates that can't be done while holding the thread lock */ - if (prev.thep_all_sockets_bg != next.thep_all_sockets_bg) + if (prev.thep_all_sockets_bg != next.thep_all_sockets_bg) { pend_token->tpt_update_sockets = 1; + } /* TODO: Doesn't this only need to be done if the throttle went up? */ - if (prev.thep_io_tier != next.thep_io_tier) + if (prev.thep_io_tier != next.thep_io_tier) { pend_token->tpt_update_throttle = 1; + } /* * Check for the attributes that sfi_thread_classify() consults, * and trigger SFI re-evaluation. */ - if (prev.thep_qos != next.thep_qos || - prev.thep_darwinbg != next.thep_darwinbg ) + if (prev.thep_qos != next.thep_qos || + prev.thep_darwinbg != next.thep_darwinbg) { pend_token->tpt_update_thread_sfi = 1; + } /* * Step 5: @@ -1621,11 +1664,11 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr */ /* Check for the attributes that thread_recompute_priority() consults */ - if (prev.thep_qos != next.thep_qos || - prev.thep_qos_relprio != next.thep_qos_relprio || - prev.thep_qos_ui_is_urgent != next.thep_qos_ui_is_urgent || - prev.thep_terminated != next.thep_terminated || - pend_token->tpt_force_recompute_pri == 1 || + if (prev.thep_qos != next.thep_qos || + prev.thep_qos_relprio != next.thep_qos_relprio || + prev.thep_qos_ui_is_urgent != next.thep_qos_ui_is_urgent || + prev.thep_terminated != next.thep_terminated || + pend_token->tpt_force_recompute_pri == 1 || recompute_priority) { thread_recompute_priority(thread); } @@ -1641,16 +1684,17 @@ thread_policy_update_internal_spinlocked(thread_t thread, boolean_t recompute_pr */ void proc_set_thread_policy_with_tid(task_t task, - uint64_t tid, - int category, - int flavor, - int value) + uint64_t tid, + int category, + int flavor, + int value) { /* takes task lock, returns ref'ed thread or NULL */ thread_t thread = task_findtid(task, tid); - if (thread == THREAD_NULL) + if (thread == THREAD_NULL) { return; + } proc_set_thread_policy(thread, category, flavor, value); @@ -1664,9 +1708,9 @@ proc_set_thread_policy_with_tid(task_t task, */ void proc_set_thread_policy(thread_t thread, - int category, - int flavor, - int value) + int category, + int flavor, + int value) { struct task_pend_token pend_token = {}; @@ -1694,15 +1738,18 @@ void thread_policy_update_complete_unlocked(thread_t thread, task_pend_token_t pend_token) { #ifdef MACH_BSD - if (pend_token->tpt_update_sockets) + if (pend_token->tpt_update_sockets) { proc_apply_task_networkbg(thread->task->bsd_info, thread); + } #endif /* MACH_BSD */ - if (pend_token->tpt_update_throttle) + if (pend_token->tpt_update_throttle) { rethrottle_thread(thread->uthread); + } - if (pend_token->tpt_update_thread_sfi) + if (pend_token->tpt_update_thread_sfi) { sfi_reevaluate(thread); + } } /* @@ -1711,11 +1758,11 @@ thread_policy_update_complete_unlocked(thread_t thread, task_pend_token_t pend_t */ static void proc_set_thread_policy_locked(thread_t thread, - int category, - int flavor, - int value, - int value2, - task_pend_token_t pend_token) + int category, + int flavor, + int value, + int value2, + task_pend_token_t pend_token) { spl_t s = splsched(); thread_lock(thread); @@ -1732,25 +1779,25 @@ proc_set_thread_policy_locked(thread_t thread, */ static void proc_set_thread_policy_spinlocked(thread_t thread, - int category, - int flavor, - int value, - int value2, - task_pend_token_t pend_token) + int category, + int flavor, + int value, + int value2, + task_pend_token_t pend_token) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_START, - thread_tid(thread), threquested_0(thread), - threquested_1(thread), value, 0); + (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_START, + thread_tid(thread), threquested_0(thread), + threquested_1(thread), value, 0); thread_set_requested_policy_spinlocked(thread, category, flavor, value, value2); thread_policy_update_spinlocked(thread, FALSE, pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_END, - thread_tid(thread), threquested_0(thread), - threquested_1(thread), tpending(pend_token), 0); + (IMPORTANCE_CODE(flavor, (category | TASK_POLICY_THREAD))) | DBG_FUNC_END, + thread_tid(thread), threquested_0(thread), + threquested_1(thread), tpending(pend_token), 0); } /* @@ -1758,108 +1805,110 @@ proc_set_thread_policy_spinlocked(thread_t thread, */ static void thread_set_requested_policy_spinlocked(thread_t thread, - int category, - int flavor, - int value, - int value2) + int category, + int flavor, + int value, + int value2) { int tier, passive; struct thread_requested_policy requested = thread->requested_policy; switch (flavor) { - /* Category: EXTERNAL and INTERNAL, thread and task */ - case TASK_POLICY_DARWIN_BG: - if (category == TASK_POLICY_EXTERNAL) - requested.thrp_ext_darwinbg = value; - else - requested.thrp_int_darwinbg = value; - break; + case TASK_POLICY_DARWIN_BG: + if (category == TASK_POLICY_EXTERNAL) { + requested.thrp_ext_darwinbg = value; + } else { + requested.thrp_int_darwinbg = value; + } + break; - case TASK_POLICY_IOPOL: - proc_iopol_to_tier(value, &tier, &passive); - if (category == TASK_POLICY_EXTERNAL) { - requested.thrp_ext_iotier = tier; - requested.thrp_ext_iopassive = passive; - } else { - requested.thrp_int_iotier = tier; - requested.thrp_int_iopassive = passive; - } - break; + case TASK_POLICY_IOPOL: + proc_iopol_to_tier(value, &tier, &passive); + if (category == TASK_POLICY_EXTERNAL) { + requested.thrp_ext_iotier = tier; + requested.thrp_ext_iopassive = passive; + } else { + requested.thrp_int_iotier = tier; + requested.thrp_int_iopassive = passive; + } + break; - case TASK_POLICY_IO: - if (category == TASK_POLICY_EXTERNAL) - requested.thrp_ext_iotier = value; - else - requested.thrp_int_iotier = value; - break; + case TASK_POLICY_IO: + if (category == TASK_POLICY_EXTERNAL) { + requested.thrp_ext_iotier = value; + } else { + requested.thrp_int_iotier = value; + } + break; - case TASK_POLICY_PASSIVE_IO: - if (category == TASK_POLICY_EXTERNAL) - requested.thrp_ext_iopassive = value; - else - requested.thrp_int_iopassive = value; - break; + case TASK_POLICY_PASSIVE_IO: + if (category == TASK_POLICY_EXTERNAL) { + requested.thrp_ext_iopassive = value; + } else { + requested.thrp_int_iopassive = value; + } + break; /* Category: ATTRIBUTE, thread only */ - case TASK_POLICY_PIDBIND_BG: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_pidbind_bg = value; - break; + case TASK_POLICY_PIDBIND_BG: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_pidbind_bg = value; + break; - case TASK_POLICY_LATENCY_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_latency_qos = value; - break; + case TASK_POLICY_LATENCY_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_latency_qos = value; + break; - case TASK_POLICY_THROUGH_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_through_qos = value; - break; + case TASK_POLICY_THROUGH_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_through_qos = value; + break; - case TASK_POLICY_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_qos = value; - break; + case TASK_POLICY_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_qos = value; + break; - case TASK_POLICY_QOS_OVERRIDE: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_qos_override = value; - break; + case TASK_POLICY_QOS_OVERRIDE: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_qos_override = value; + break; - case TASK_POLICY_QOS_AND_RELPRIO: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_qos = value; - requested.thrp_qos_relprio = value2; - DTRACE_BOOST3(qos_set, uint64_t, thread->thread_id, int, requested.thrp_qos, int, requested.thrp_qos_relprio); - break; + case TASK_POLICY_QOS_AND_RELPRIO: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_qos = value; + requested.thrp_qos_relprio = value2; + DTRACE_BOOST3(qos_set, uint64_t, thread->thread_id, int, requested.thrp_qos, int, requested.thrp_qos_relprio); + break; - case TASK_POLICY_QOS_WORKQ_OVERRIDE: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_qos_workq_override = value; - break; + case TASK_POLICY_QOS_WORKQ_OVERRIDE: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_qos_workq_override = value; + break; - case TASK_POLICY_QOS_PROMOTE: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_qos_promote = value; - break; + case TASK_POLICY_QOS_PROMOTE: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_qos_promote = value; + break; - case TASK_POLICY_QOS_IPC_OVERRIDE: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_qos_ipc_override = value; - break; + case TASK_POLICY_QOS_IPC_OVERRIDE: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_qos_ipc_override = value; + break; - case TASK_POLICY_TERMINATED: - assert(category == TASK_POLICY_ATTRIBUTE); - requested.thrp_terminated = value; - break; + case TASK_POLICY_TERMINATED: + assert(category == TASK_POLICY_ATTRIBUTE); + requested.thrp_terminated = value; + break; - default: - panic("unknown task policy: %d %d %d", category, flavor, value); - break; + default: + panic("unknown task policy: %d %d %d", category, flavor, value); + break; } thread->requested_policy = requested; @@ -1871,8 +1920,8 @@ thread_set_requested_policy_spinlocked(thread_t thread, */ int proc_get_thread_policy(thread_t thread, - int category, - int flavor) + int category, + int flavor) { int value = 0; thread_mtx_lock(thread); @@ -1883,9 +1932,9 @@ proc_get_thread_policy(thread_t thread, static int proc_get_thread_policy_locked(thread_t thread, - int category, - int flavor, - int* value2) + int category, + int flavor, + int* value2) { int value = 0; @@ -1905,83 +1954,87 @@ proc_get_thread_policy_locked(thread_t thread, */ static int thread_get_requested_policy_spinlocked(thread_t thread, - int category, - int flavor, - int* value2) + int category, + int flavor, + int* value2) { int value = 0; struct thread_requested_policy requested = thread->requested_policy; switch (flavor) { - case TASK_POLICY_DARWIN_BG: - if (category == TASK_POLICY_EXTERNAL) - value = requested.thrp_ext_darwinbg; - else - value = requested.thrp_int_darwinbg; - break; - case TASK_POLICY_IOPOL: - if (category == TASK_POLICY_EXTERNAL) - value = proc_tier_to_iopol(requested.thrp_ext_iotier, - requested.thrp_ext_iopassive); - else - value = proc_tier_to_iopol(requested.thrp_int_iotier, - requested.thrp_int_iopassive); - break; - case TASK_POLICY_IO: - if (category == TASK_POLICY_EXTERNAL) - value = requested.thrp_ext_iotier; - else - value = requested.thrp_int_iotier; - break; - case TASK_POLICY_PASSIVE_IO: - if (category == TASK_POLICY_EXTERNAL) - value = requested.thrp_ext_iopassive; - else - value = requested.thrp_int_iopassive; - break; - case TASK_POLICY_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_qos; - break; - case TASK_POLICY_QOS_OVERRIDE: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_qos_override; - break; - case TASK_POLICY_LATENCY_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_latency_qos; - break; - case TASK_POLICY_THROUGH_QOS: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_through_qos; - break; - case TASK_POLICY_QOS_WORKQ_OVERRIDE: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_qos_workq_override; - break; - case TASK_POLICY_QOS_AND_RELPRIO: - assert(category == TASK_POLICY_ATTRIBUTE); - assert(value2 != NULL); - value = requested.thrp_qos; - *value2 = requested.thrp_qos_relprio; - break; - case TASK_POLICY_QOS_PROMOTE: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_qos_promote; - break; - case TASK_POLICY_QOS_IPC_OVERRIDE: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_qos_ipc_override; - break; - case TASK_POLICY_TERMINATED: - assert(category == TASK_POLICY_ATTRIBUTE); - value = requested.thrp_terminated; - break; + case TASK_POLICY_DARWIN_BG: + if (category == TASK_POLICY_EXTERNAL) { + value = requested.thrp_ext_darwinbg; + } else { + value = requested.thrp_int_darwinbg; + } + break; + case TASK_POLICY_IOPOL: + if (category == TASK_POLICY_EXTERNAL) { + value = proc_tier_to_iopol(requested.thrp_ext_iotier, + requested.thrp_ext_iopassive); + } else { + value = proc_tier_to_iopol(requested.thrp_int_iotier, + requested.thrp_int_iopassive); + } + break; + case TASK_POLICY_IO: + if (category == TASK_POLICY_EXTERNAL) { + value = requested.thrp_ext_iotier; + } else { + value = requested.thrp_int_iotier; + } + break; + case TASK_POLICY_PASSIVE_IO: + if (category == TASK_POLICY_EXTERNAL) { + value = requested.thrp_ext_iopassive; + } else { + value = requested.thrp_int_iopassive; + } + break; + case TASK_POLICY_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_qos; + break; + case TASK_POLICY_QOS_OVERRIDE: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_qos_override; + break; + case TASK_POLICY_LATENCY_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_latency_qos; + break; + case TASK_POLICY_THROUGH_QOS: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_through_qos; + break; + case TASK_POLICY_QOS_WORKQ_OVERRIDE: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_qos_workq_override; + break; + case TASK_POLICY_QOS_AND_RELPRIO: + assert(category == TASK_POLICY_ATTRIBUTE); + assert(value2 != NULL); + value = requested.thrp_qos; + *value2 = requested.thrp_qos_relprio; + break; + case TASK_POLICY_QOS_PROMOTE: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_qos_promote; + break; + case TASK_POLICY_QOS_IPC_OVERRIDE: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_qos_ipc_override; + break; + case TASK_POLICY_TERMINATED: + assert(category == TASK_POLICY_ATTRIBUTE); + value = requested.thrp_terminated; + break; - default: - panic("unknown policy_flavor %d", flavor); - break; + default: + panic("unknown policy_flavor %d", flavor); + break; } return value; @@ -2001,90 +2054,92 @@ thread_get_requested_policy_spinlocked(thread_t thread, */ int proc_get_effective_thread_policy(thread_t thread, - int flavor) + int flavor) { int value = 0; switch (flavor) { - case TASK_POLICY_DARWIN_BG: - /* - * This call is used within the timer layer, as well as - * prioritizing requests to the graphics system. - * It also informs SFI and originator-bg-state. - * Returns 1 for background mode, 0 for normal mode - */ - - value = thread->effective_policy.thep_darwinbg ? 1 : 0; - break; - case TASK_POLICY_IO: - /* - * The I/O system calls here to find out what throttling tier to apply to an operation. - * Returns THROTTLE_LEVEL_* values - */ - value = thread->effective_policy.thep_io_tier; - if (thread->iotier_override != THROTTLE_LEVEL_NONE) - value = MIN(value, thread->iotier_override); - break; - case TASK_POLICY_PASSIVE_IO: - /* - * The I/O system calls here to find out whether an operation should be passive. - * (i.e. not cause operations with lower throttle tiers to be throttled) - * Returns 1 for passive mode, 0 for normal mode - * - * If an override is causing IO to go into a lower tier, we also set - * the passive bit so that a thread doesn't end up stuck in its own throttle - * window when the override goes away. - */ - value = thread->effective_policy.thep_io_passive ? 1 : 0; - if (thread->iotier_override != THROTTLE_LEVEL_NONE && - thread->iotier_override < thread->effective_policy.thep_io_tier) - value = 1; - break; - case TASK_POLICY_ALL_SOCKETS_BG: - /* - * do_background_socket() calls this to determine whether - * it should change the thread's sockets - * Returns 1 for background mode, 0 for normal mode - * This consults both thread and task so un-DBGing a thread while the task is BG - * doesn't get you out of the network throttle. - */ - value = (thread->effective_policy.thep_all_sockets_bg || - thread->task->effective_policy.tep_all_sockets_bg) ? 1 : 0; - break; - case TASK_POLICY_NEW_SOCKETS_BG: - /* - * socreate() calls this to determine if it should mark a new socket as background - * Returns 1 for background mode, 0 for normal mode - */ - value = thread->effective_policy.thep_new_sockets_bg ? 1 : 0; - break; - case TASK_POLICY_LATENCY_QOS: - /* - * timer arming calls into here to find out the timer coalescing level - * Returns a latency QoS tier (0-6) - */ - value = thread->effective_policy.thep_latency_qos; - break; - case TASK_POLICY_THROUGH_QOS: - /* - * This value is passed into the urgency callout from the scheduler - * to the performance management subsystem. - * - * Returns a throughput QoS tier (0-6) - */ - value = thread->effective_policy.thep_through_qos; - break; - case TASK_POLICY_QOS: - /* - * This is communicated to the performance management layer and SFI. - * - * Returns a QoS policy tier - */ - value = thread->effective_policy.thep_qos; - break; - default: - panic("unknown thread policy flavor %d", flavor); - break; + case TASK_POLICY_DARWIN_BG: + /* + * This call is used within the timer layer, as well as + * prioritizing requests to the graphics system. + * It also informs SFI and originator-bg-state. + * Returns 1 for background mode, 0 for normal mode + */ + + value = thread->effective_policy.thep_darwinbg ? 1 : 0; + break; + case TASK_POLICY_IO: + /* + * The I/O system calls here to find out what throttling tier to apply to an operation. + * Returns THROTTLE_LEVEL_* values + */ + value = thread->effective_policy.thep_io_tier; + if (thread->iotier_override != THROTTLE_LEVEL_NONE) { + value = MIN(value, thread->iotier_override); + } + break; + case TASK_POLICY_PASSIVE_IO: + /* + * The I/O system calls here to find out whether an operation should be passive. + * (i.e. not cause operations with lower throttle tiers to be throttled) + * Returns 1 for passive mode, 0 for normal mode + * + * If an override is causing IO to go into a lower tier, we also set + * the passive bit so that a thread doesn't end up stuck in its own throttle + * window when the override goes away. + */ + value = thread->effective_policy.thep_io_passive ? 1 : 0; + if (thread->iotier_override != THROTTLE_LEVEL_NONE && + thread->iotier_override < thread->effective_policy.thep_io_tier) { + value = 1; + } + break; + case TASK_POLICY_ALL_SOCKETS_BG: + /* + * do_background_socket() calls this to determine whether + * it should change the thread's sockets + * Returns 1 for background mode, 0 for normal mode + * This consults both thread and task so un-DBGing a thread while the task is BG + * doesn't get you out of the network throttle. + */ + value = (thread->effective_policy.thep_all_sockets_bg || + thread->task->effective_policy.tep_all_sockets_bg) ? 1 : 0; + break; + case TASK_POLICY_NEW_SOCKETS_BG: + /* + * socreate() calls this to determine if it should mark a new socket as background + * Returns 1 for background mode, 0 for normal mode + */ + value = thread->effective_policy.thep_new_sockets_bg ? 1 : 0; + break; + case TASK_POLICY_LATENCY_QOS: + /* + * timer arming calls into here to find out the timer coalescing level + * Returns a latency QoS tier (0-6) + */ + value = thread->effective_policy.thep_latency_qos; + break; + case TASK_POLICY_THROUGH_QOS: + /* + * This value is passed into the urgency callout from the scheduler + * to the performance management subsystem. + * + * Returns a throughput QoS tier (0-6) + */ + value = thread->effective_policy.thep_through_qos; + break; + case TASK_POLICY_QOS: + /* + * This is communicated to the performance management layer and SFI. + * + * Returns a QoS policy tier + */ + value = thread->effective_policy.thep_qos; + break; + default: + panic("unknown thread policy flavor %d", flavor); + break; } return value; @@ -2207,7 +2262,8 @@ theffective_1(thread_t thread) * However a thread reference must be held on the thread. */ -void set_thread_iotier_override(thread_t thread, int policy) +void +set_thread_iotier_override(thread_t thread, int policy) { int current_override; @@ -2215,8 +2271,9 @@ void set_thread_iotier_override(thread_t thread, int policy) do { current_override = thread->iotier_override; - if (current_override != THROTTLE_LEVEL_NONE) + if (current_override != THROTTLE_LEVEL_NONE) { policy = MIN(current_override, policy); + } if (current_override == policy) { /* no effective change */ @@ -2248,7 +2305,9 @@ void set_thread_iotier_override(thread_t thread, int policy) * to be handled specially in the future, but for now it's fine to slam * *resource to USER_ADDR_NULL even if it was previously a wildcard. */ -static void canonicalize_resource_and_type(user_addr_t *resource, int *resource_type) { +static void +canonicalize_resource_and_type(user_addr_t *resource, int *resource_type) +{ if (qos_override_mode == QOS_OVERRIDE_MODE_OVERHANG_PEAK || qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) { /* Map all input resource/type to a single one */ *resource = USER_ADDR_NULL; @@ -2266,8 +2325,8 @@ static void canonicalize_resource_and_type(user_addr_t *resource, int *resource_ /* This helper routine finds an existing override if known. Locking should be done by caller */ static struct thread_qos_override * find_qos_override(thread_t thread, - user_addr_t resource, - int resource_type) + user_addr_t resource, + int resource_type) { struct thread_qos_override *override; @@ -2286,10 +2345,10 @@ find_qos_override(thread_t thread, static void find_and_decrement_qos_override(thread_t thread, - user_addr_t resource, - int resource_type, - boolean_t reset, - struct thread_qos_override **free_override_list) + user_addr_t resource, + int resource_type, + boolean_t reset, + struct thread_qos_override **free_override_list) { struct thread_qos_override *override, *override_prev; @@ -2298,9 +2357,8 @@ find_and_decrement_qos_override(thread_t thread, while (override) { struct thread_qos_override *override_next = override->override_next; - if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD == resource || override->override_resource == resource) && + if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD == resource || override->override_resource == resource) && (THREAD_QOS_OVERRIDE_TYPE_WILDCARD == resource_type || override->override_resource_type == resource_type)) { - if (reset) { override->override_contended_resource_count = 0; } else { @@ -2360,10 +2418,10 @@ calculate_requested_qos_override(thread_t thread) */ static int proc_thread_qos_add_override_internal(thread_t thread, - int override_qos, - boolean_t first_override_for_resource, - user_addr_t resource, - int resource_type) + int override_qos, + boolean_t first_override_for_resource, + user_addr_t resource, + int resource_type) { struct task_pend_token pend_token = {}; int rc = 0; @@ -2371,12 +2429,12 @@ proc_thread_qos_add_override_internal(thread_t thread, thread_mtx_lock(thread); KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_START, - thread_tid(thread), override_qos, first_override_for_resource ? 1 : 0, 0, 0); + thread_tid(thread), override_qos, first_override_for_resource ? 1 : 0, 0, 0); DTRACE_BOOST5(qos_add_override_pre, uint64_t, thread_tid(thread), - uint64_t, thread->requested_policy.thrp_qos, - uint64_t, thread->effective_policy.thep_qos, - int, override_qos, boolean_t, first_override_for_resource); + uint64_t, thread->requested_policy.thrp_qos, + uint64_t, thread->effective_policy.thep_qos, + int, override_qos, boolean_t, first_override_for_resource); struct thread_qos_override *override; struct thread_qos_override *override_new = NULL; @@ -2411,10 +2469,11 @@ proc_thread_qos_add_override_internal(thread_t thread, } if (override) { - if (override->override_qos == THREAD_QOS_UNSPECIFIED) + if (override->override_qos == THREAD_QOS_UNSPECIFIED) { override->override_qos = override_qos; - else + } else { override->override_qos = MAX(override->override_qos, override_qos); + } } /* Determine how to combine the various overrides into a single current @@ -2423,12 +2482,12 @@ proc_thread_qos_add_override_internal(thread_t thread, new_qos_override = calculate_requested_qos_override(thread); prev_qos_override = proc_get_thread_policy_locked(thread, - TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, NULL); + TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, NULL); if (new_qos_override != prev_qos_override) { proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_OVERRIDE, - new_qos_override, 0, &pend_token); + TASK_POLICY_QOS_OVERRIDE, + new_qos_override, 0, &pend_token); } new_effective_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS); @@ -2442,22 +2501,22 @@ proc_thread_qos_add_override_internal(thread_t thread, } DTRACE_BOOST4(qos_add_override_post, int, prev_qos_override, - int, new_qos_override, int, new_effective_qos, int, rc); + int, new_qos_override, int, new_effective_qos, int, rc); KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_END, - new_qos_override, resource, resource_type, 0, 0); + new_qos_override, resource, resource_type, 0, 0); return rc; } int proc_thread_qos_add_override(task_t task, - thread_t thread, - uint64_t tid, - int override_qos, - boolean_t first_override_for_resource, - user_addr_t resource, - int resource_type) + thread_t thread, + uint64_t tid, + int override_qos, + boolean_t first_override_for_resource, + user_addr_t resource, + int resource_type) { boolean_t has_thread_reference = FALSE; int rc = 0; @@ -2468,7 +2527,7 @@ proc_thread_qos_add_override(task_t task, if (thread == THREAD_NULL) { KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_NONE, - tid, 0, 0xdead, 0, 0); + tid, 0, 0xdead, 0, 0); return ESRCH; } has_thread_reference = TRUE; @@ -2476,7 +2535,7 @@ proc_thread_qos_add_override(task_t task, assert(thread->task == task); } rc = proc_thread_qos_add_override_internal(thread, override_qos, - first_override_for_resource, resource, resource_type); + first_override_for_resource, resource, resource_type); if (has_thread_reference) { thread_deallocate(thread); } @@ -2486,9 +2545,9 @@ proc_thread_qos_add_override(task_t task, static void proc_thread_qos_remove_override_internal(thread_t thread, - user_addr_t resource, - int resource_type, - boolean_t reset) + user_addr_t resource, + int resource_type, + boolean_t reset) { struct task_pend_token pend_token = {}; @@ -2502,11 +2561,11 @@ proc_thread_qos_remove_override_internal(thread_t thread, find_and_decrement_qos_override(thread, resource, resource_type, reset, &deferred_free_override_list); KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_START, - thread_tid(thread), resource, reset, 0, 0); + thread_tid(thread), resource, reset, 0, 0); DTRACE_BOOST3(qos_remove_override_pre, uint64_t, thread_tid(thread), - uint64_t, thread->requested_policy.thrp_qos, - uint64_t, thread->effective_policy.thep_qos); + uint64_t, thread->requested_policy.thrp_qos, + uint64_t, thread->effective_policy.thep_qos); /* Determine how to combine the various overrides into a single current requested override */ new_qos_override = calculate_requested_qos_override(thread); @@ -2521,8 +2580,9 @@ proc_thread_qos_remove_override_internal(thread_t thread, */ prev_qos_override = thread_get_requested_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, NULL); - if (new_qos_override != prev_qos_override) + if (new_qos_override != prev_qos_override) { proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS_OVERRIDE, new_qos_override, 0, &pend_token); + } new_effective_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS); @@ -2541,18 +2601,18 @@ proc_thread_qos_remove_override_internal(thread_t thread, } DTRACE_BOOST3(qos_remove_override_post, int, prev_qos_override, - int, new_qos_override, int, new_effective_qos); + int, new_qos_override, int, new_effective_qos); KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_END, - thread_tid(thread), 0, 0, 0, 0); + thread_tid(thread), 0, 0, 0, 0); } int proc_thread_qos_remove_override(task_t task, - thread_t thread, - uint64_t tid, - user_addr_t resource, - int resource_type) + thread_t thread, + uint64_t tid, + user_addr_t resource, + int resource_type) { boolean_t has_thread_reference = FALSE; @@ -2562,7 +2622,7 @@ proc_thread_qos_remove_override(task_t task, if (thread == THREAD_NULL) { KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_NONE, - tid, 0, 0xdead, 0, 0); + tid, 0, 0xdead, 0, 0); return ESRCH; } has_thread_reference = TRUE; @@ -2572,14 +2632,16 @@ proc_thread_qos_remove_override(task_t task, proc_thread_qos_remove_override_internal(thread, resource, resource_type, FALSE); - if (has_thread_reference) + if (has_thread_reference) { thread_deallocate(thread); + } return 0; } /* Deallocate before thread termination */ -void proc_thread_qos_deallocate(thread_t thread) +void +proc_thread_qos_deallocate(thread_t thread) { /* This thread must have no more IPC overrides. */ assert(thread->ipc_overrides == 0); @@ -2611,7 +2673,8 @@ void proc_thread_qos_deallocate(thread_t thread) * Set up the primordial thread's QoS */ void -task_set_main_thread_qos(task_t task, thread_t thread) { +task_set_main_thread_qos(task_t task, thread_t thread) +{ struct task_pend_token pend_token = {}; assert(thread->task == task); @@ -2619,23 +2682,23 @@ task_set_main_thread_qos(task_t task, thread_t thread) { thread_mtx_lock(thread); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS, 0)) | DBG_FUNC_START, - thread_tid(thread), threquested_0(thread), threquested_1(thread), - thread->requested_policy.thrp_qos, 0); + (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS, 0)) | DBG_FUNC_START, + thread_tid(thread), threquested_0(thread), threquested_1(thread), + thread->requested_policy.thrp_qos, 0); int primordial_qos = task_compute_main_thread_qos(task); proc_set_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, TASK_POLICY_QOS, - primordial_qos, 0, &pend_token); + primordial_qos, 0, &pend_token); thread_mtx_unlock(thread); thread_policy_update_complete_unlocked(thread, &pend_token); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS, 0)) | DBG_FUNC_END, - thread_tid(thread), threquested_0(thread), threquested_1(thread), - primordial_qos, 0); + (IMPORTANCE_CODE(IMP_MAIN_THREAD_QOS, 0)) | DBG_FUNC_END, + thread_tid(thread), threquested_0(thread), threquested_1(thread), + primordial_qos, 0); } /* @@ -2649,8 +2712,9 @@ task_get_default_manager_qos(task_t task) { int primordial_qos = task_compute_main_thread_qos(task); - if (primordial_qos == THREAD_QOS_LEGACY) + if (primordial_qos == THREAD_QOS_LEGACY) { primordial_qos = THREAD_QOS_USER_INITIATED; + } return primordial_qos; } @@ -2676,11 +2740,11 @@ thread_recompute_user_promotion_locked(thread_t thread) return needs_update; } else { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_PRIORITY_OPERATIONS, (THREAD_USER_PROMOTION_CHANGE))) | DBG_FUNC_NONE, - thread_tid(thread), - user_promotion_basepri, - thread->user_promotion_basepri, - 0, 0); + (TURNSTILE_CODE(TURNSTILE_PRIORITY_OPERATIONS, (THREAD_USER_PROMOTION_CHANGE))) | DBG_FUNC_NONE, + thread_tid(thread), + user_promotion_basepri, + thread->user_promotion_basepri, + 0, 0); } /* Update the user promotion base pri */ @@ -2694,7 +2758,7 @@ thread_recompute_user_promotion_locked(thread_t thread) } proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_PROMOTE, qos_promotion, 0, &pend_token); + TASK_POLICY_QOS_PROMOTE, qos_promotion, 0, &pend_token); if (thread_get_waiting_turnstile(thread) && thread->base_pri != old_base_pri) { @@ -2739,8 +2803,8 @@ thread_user_promotion_qos_for_pri(int priority) */ static void thread_ipc_override(thread_t thread, - uint32_t qos_override, - boolean_t is_new_override) + uint32_t qos_override, + boolean_t is_new_override) { struct task_pend_token pend_token = {}; boolean_t needs_update; @@ -2779,8 +2843,8 @@ thread_ipc_override(thread_t thread, if (needs_update) { proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_IPC_OVERRIDE, - qos_override, 0, &pend_token); + TASK_POLICY_QOS_IPC_OVERRIDE, + qos_override, 0, &pend_token); assert(pend_token.tpt_update_sockets == 0); } @@ -2792,14 +2856,14 @@ thread_ipc_override(thread_t thread, void thread_add_ipc_override(thread_t thread, - uint32_t qos_override) + uint32_t qos_override) { thread_ipc_override(thread, qos_override, TRUE); } void thread_update_ipc_override(thread_t thread, - uint32_t qos_override) + uint32_t qos_override) { thread_ipc_override(thread, qos_override, FALSE); } @@ -2821,8 +2885,8 @@ thread_drop_ipc_override(thread_t thread) */ proc_set_thread_policy_spinlocked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_IPC_OVERRIDE, THREAD_QOS_UNSPECIFIED, - 0, &pend_token); + TASK_POLICY_QOS_IPC_OVERRIDE, THREAD_QOS_UNSPECIFIED, + 0, &pend_token); } thread_unlock(thread); @@ -2839,8 +2903,10 @@ thread_get_requested_qos(thread_t thread, int *relpri) thread_qos_t qos; qos = proc_get_thread_policy_locked(thread, TASK_POLICY_ATTRIBUTE, - TASK_POLICY_QOS_AND_RELPRIO, &relprio_value); - if (relpri) *relpri = -relprio_value; + TASK_POLICY_QOS_AND_RELPRIO, &relprio_value); + if (relpri) { + *relpri = -relprio_value; + } return qos; } @@ -2879,4 +2945,3 @@ thread_clear_exec_promotion(thread_t thread) thread_unlock(thread); splx(s); } - diff --git a/osfmk/kern/timer.c b/osfmk/kern/timer.c index 8ccba9e2c..4be4464cb 100644 --- a/osfmk/kern/timer.c +++ b/osfmk/kern/timer.c @@ -79,7 +79,7 @@ timer_delta(timer_t timer, uint64_t *prev_in_cur_out) { uint64_t old = *prev_in_cur_out; uint64_t new = *prev_in_cur_out = timer_grab(timer); - return (new - old); + return new - old; } static void @@ -89,11 +89,11 @@ timer_advance(timer_t timer, uint64_t delta) timer->all_bits += delta; #else /* defined(__LP64__) */ extern void timer_advance_internal_32(timer_t timer, uint32_t high, - uint32_t low); + uint32_t low); uint64_t low = delta + timer->low_bits; if (low >> 32) { timer_advance_internal_32(timer, - (uint32_t)(timer->high_bits + (low >> 32)), (uint32_t)low); + (uint32_t)(timer->high_bits + (low >> 32)), (uint32_t)low); } else { timer->low_bits = (uint32_t)low; } diff --git a/osfmk/kern/timer.h b/osfmk/kern/timer.h index 648a47c79..2ac35e414 100644 --- a/osfmk/kern/timer.h +++ b/osfmk/kern/timer.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ diff --git a/osfmk/kern/timer_call.c b/osfmk/kern/timer_call.c index 86bd1a8df..06918edf5 100644 --- a/osfmk/kern/timer_call.c +++ b/osfmk/kern/timer_call.c @@ -2,7 +2,7 @@ * Copyright (c) 1993-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -48,7 +48,7 @@ #if DEBUG -#define TIMER_ASSERT 1 +#define TIMER_ASSERT 1 #endif //#define TIMER_ASSERT 1 @@ -61,7 +61,7 @@ #endif #if TIMER_TRACE -#define TIMER_KDEBUG_TRACE KERNEL_DEBUG_CONSTANT_IST +#define TIMER_KDEBUG_TRACE KERNEL_DEBUG_CONSTANT_IST #else #define TIMER_KDEBUG_TRACE(x...) #endif @@ -77,20 +77,20 @@ lck_grp_attr_t timer_longterm_lck_grp_attr; /* Timer queue lock must be acquired with interrupts disabled (under splclock()) */ #if __SMP__ -#define timer_queue_lock_spin(queue) \ +#define timer_queue_lock_spin(queue) \ lck_mtx_lock_spin_always(&queue->lock_data) -#define timer_queue_unlock(queue) \ +#define timer_queue_unlock(queue) \ lck_mtx_unlock_always(&queue->lock_data) #else -#define timer_queue_lock_spin(queue) (void)1 -#define timer_queue_unlock(queue) (void)1 +#define timer_queue_lock_spin(queue) (void)1 +#define timer_queue_unlock(queue) (void)1 #endif -#define QUEUE(x) ((queue_t)(x)) -#define MPQUEUE(x) ((mpqueue_head_t *)(x)) -#define TIMER_CALL(x) ((timer_call_t)(x)) -#define TCE(x) (&(x->call_entry)) +#define QUEUE(x) ((queue_t)(x)) +#define MPQUEUE(x) ((mpqueue_head_t *)(x)) +#define TIMER_CALL(x) ((timer_call_t)(x)) +#define TCE(x) (&(x->call_entry)) /* * The longterm timer object is a global structure holding all timers * beyond the short-term, local timer queue threshold. The boot processor @@ -99,82 +99,82 @@ lck_grp_attr_t timer_longterm_lck_grp_attr; */ /* Sentinel for "no time set": */ -#define TIMER_LONGTERM_NONE EndOfAllTime -/* The default threadhold is the delta above which a timer is "long-term" */ +#define TIMER_LONGTERM_NONE EndOfAllTime +/* The default threadhold is the delta above which a timer is "long-term" */ #if defined(__x86_64__) -#define TIMER_LONGTERM_THRESHOLD (1ULL * NSEC_PER_SEC) /* 1 sec */ +#define TIMER_LONGTERM_THRESHOLD (1ULL * NSEC_PER_SEC) /* 1 sec */ #else -#define TIMER_LONGTERM_THRESHOLD TIMER_LONGTERM_NONE /* disabled */ +#define TIMER_LONGTERM_THRESHOLD TIMER_LONGTERM_NONE /* disabled */ #endif /* * The scan_limit throttles processing of the longterm queue. - * If the scan time exceeds this limit, we terminate, unlock + * If the scan time exceeds this limit, we terminate, unlock * and defer for scan_interval. This prevents unbounded holding of * timer queue locks with interrupts masked. */ -#define TIMER_LONGTERM_SCAN_LIMIT (100ULL * NSEC_PER_USEC) /* 100 us */ -#define TIMER_LONGTERM_SCAN_INTERVAL (100ULL * NSEC_PER_USEC) /* 100 us */ +#define TIMER_LONGTERM_SCAN_LIMIT (100ULL * NSEC_PER_USEC) /* 100 us */ +#define TIMER_LONGTERM_SCAN_INTERVAL (100ULL * NSEC_PER_USEC) /* 100 us */ /* Sentinel for "scan limit exceeded": */ -#define TIMER_LONGTERM_SCAN_AGAIN 0 +#define TIMER_LONGTERM_SCAN_AGAIN 0 typedef struct { - uint64_t interval; /* longterm timer interval */ - uint64_t margin; /* fudge factor (10% of interval */ - uint64_t deadline; /* first/soonest longterm deadline */ - uint64_t preempted; /* sooner timer has pre-empted */ - timer_call_t call; /* first/soonest longterm timer call */ - uint64_t deadline_set; /* next timer set */ - timer_call_data_t timer; /* timer used by threshold management */ - /* Stats: */ - uint64_t scans; /* num threshold timer scans */ - uint64_t preempts; /* num threshold reductions */ - uint64_t latency; /* average threshold latency */ - uint64_t latency_min; /* minimum threshold latency */ - uint64_t latency_max; /* maximum threshold latency */ + uint64_t interval; /* longterm timer interval */ + uint64_t margin; /* fudge factor (10% of interval */ + uint64_t deadline; /* first/soonest longterm deadline */ + uint64_t preempted; /* sooner timer has pre-empted */ + timer_call_t call; /* first/soonest longterm timer call */ + uint64_t deadline_set; /* next timer set */ + timer_call_data_t timer; /* timer used by threshold management */ + /* Stats: */ + uint64_t scans; /* num threshold timer scans */ + uint64_t preempts; /* num threshold reductions */ + uint64_t latency; /* average threshold latency */ + uint64_t latency_min; /* minimum threshold latency */ + uint64_t latency_max; /* maximum threshold latency */ } threshold_t; typedef struct { - mpqueue_head_t queue; /* longterm timer list */ - uint64_t enqueues; /* num timers queued */ - uint64_t dequeues; /* num timers dequeued */ - uint64_t escalates; /* num timers becoming shortterm */ - uint64_t scan_time; /* last time the list was scanned */ - threshold_t threshold; /* longterm timer threshold */ - uint64_t scan_limit; /* maximum scan time */ - uint64_t scan_interval; /* interval between LT "escalation" scans */ - uint64_t scan_pauses; /* num scans exceeding time limit */ + mpqueue_head_t queue; /* longterm timer list */ + uint64_t enqueues; /* num timers queued */ + uint64_t dequeues; /* num timers dequeued */ + uint64_t escalates; /* num timers becoming shortterm */ + uint64_t scan_time; /* last time the list was scanned */ + threshold_t threshold; /* longterm timer threshold */ + uint64_t scan_limit; /* maximum scan time */ + uint64_t scan_interval; /* interval between LT "escalation" scans */ + uint64_t scan_pauses; /* num scans exceeding time limit */ } timer_longterm_t; -timer_longterm_t timer_longterm = { - .scan_limit = TIMER_LONGTERM_SCAN_LIMIT, - .scan_interval = TIMER_LONGTERM_SCAN_INTERVAL, - }; - -static mpqueue_head_t *timer_longterm_queue = NULL; - -static void timer_longterm_init(void); -static void timer_longterm_callout( - timer_call_param_t p0, - timer_call_param_t p1); -extern void timer_longterm_scan( - timer_longterm_t *tlp, - uint64_t now); -static void timer_longterm_update( - timer_longterm_t *tlp); -static void timer_longterm_update_locked( - timer_longterm_t *tlp); -static mpqueue_head_t * timer_longterm_enqueue_unlocked( - timer_call_t call, - uint64_t now, - uint64_t deadline, - mpqueue_head_t ** old_queue, - uint64_t soft_deadline, - uint64_t ttd, - timer_call_param_t param1, - uint32_t callout_flags); -static void timer_longterm_dequeued_locked( - timer_call_t call); +timer_longterm_t timer_longterm = { + .scan_limit = TIMER_LONGTERM_SCAN_LIMIT, + .scan_interval = TIMER_LONGTERM_SCAN_INTERVAL, +}; + +static mpqueue_head_t *timer_longterm_queue = NULL; + +static void timer_longterm_init(void); +static void timer_longterm_callout( + timer_call_param_t p0, + timer_call_param_t p1); +extern void timer_longterm_scan( + timer_longterm_t *tlp, + uint64_t now); +static void timer_longterm_update( + timer_longterm_t *tlp); +static void timer_longterm_update_locked( + timer_longterm_t *tlp); +static mpqueue_head_t * timer_longterm_enqueue_unlocked( + timer_call_t call, + uint64_t now, + uint64_t deadline, + mpqueue_head_t ** old_queue, + uint64_t soft_deadline, + uint64_t ttd, + timer_call_param_t param1, + uint32_t callout_flags); +static void timer_longterm_dequeued_locked( + timer_call_t call); uint64_t past_deadline_timers; uint64_t past_deadline_deltas; @@ -185,19 +185,19 @@ enum {PAST_DEADLINE_TIMER_ADJUSTMENT_NS = 10 * 1000}; uint64_t past_deadline_timer_adjustment; static boolean_t timer_call_enter_internal(timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint64_t leeway, uint32_t flags, boolean_t ratelimited); -boolean_t mach_timer_coalescing_enabled = TRUE; +boolean_t mach_timer_coalescing_enabled = TRUE; -mpqueue_head_t *timer_call_enqueue_deadline_unlocked( - timer_call_t call, - mpqueue_head_t *queue, - uint64_t deadline, - uint64_t soft_deadline, - uint64_t ttd, - timer_call_param_t param1, - uint32_t flags); +mpqueue_head_t *timer_call_enqueue_deadline_unlocked( + timer_call_t call, + mpqueue_head_t *queue, + uint64_t deadline, + uint64_t soft_deadline, + uint64_t ttd, + timer_call_param_t param1, + uint32_t flags); -mpqueue_head_t *timer_call_dequeue_unlocked( - timer_call_t call); +mpqueue_head_t *timer_call_dequeue_unlocked( + timer_call_t call); timer_coalescing_priority_params_t tcoal_prio_params; @@ -269,9 +269,9 @@ timer_call_queue_init(mpqueue_head_t *queue) void timer_call_setup( - timer_call_t call, - timer_call_func_t func, - timer_call_param_t param0) + timer_call_t call, + timer_call_func_t func, + timer_call_param_t param0) { DBG("timer_call_setup(%p,%p,%p)\n", call, func, param0); call_entry_setup(TCE(call), func, param0); @@ -281,46 +281,51 @@ timer_call_setup( #if TIMER_ASSERT static __inline__ mpqueue_head_t * timer_call_entry_dequeue( - timer_call_t entry) + timer_call_t entry) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); - if (!hw_lock_held((hw_lock_t)&entry->lock)) + if (!hw_lock_held((hw_lock_t)&entry->lock)) { panic("_call_entry_dequeue() " - "entry %p is not locked\n", entry); + "entry %p is not locked\n", entry); + } /* * XXX The queue lock is actually a mutex in spin mode * but there's no way to test for it being held * so we pretend it's a spinlock! */ - if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) + if (!hw_lock_held((hw_lock_t)&old_queue->lock_data)) { panic("_call_entry_dequeue() " - "queue %p is not locked\n", old_queue); + "queue %p is not locked\n", old_queue); + } call_entry_dequeue(TCE(entry)); old_queue->count--; - return (old_queue); + return old_queue; } static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( - timer_call_t entry, - mpqueue_head_t *queue, - uint64_t deadline) + timer_call_t entry, + mpqueue_head_t *queue, + uint64_t deadline) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); - if (!hw_lock_held((hw_lock_t)&entry->lock)) + if (!hw_lock_held((hw_lock_t)&entry->lock)) { panic("_call_entry_enqueue_deadline() " - "entry %p is not locked\n", entry); + "entry %p is not locked\n", entry); + } /* XXX More lock pretense: */ - if (!hw_lock_held((hw_lock_t)&queue->lock_data)) + if (!hw_lock_held((hw_lock_t)&queue->lock_data)) { panic("_call_entry_enqueue_deadline() " - "queue %p is not locked\n", queue); - if (old_queue != NULL && old_queue != queue) + "queue %p is not locked\n", queue); + } + if (old_queue != NULL && old_queue != queue) { panic("_call_entry_enqueue_deadline() " - "old_queue %p != queue", old_queue); + "old_queue %p != queue", old_queue); + } call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline); @@ -328,23 +333,24 @@ timer_call_entry_enqueue_deadline( * fuzzy decisions can be made without lock acquisitions. */ timer_call_t thead = (timer_call_t)queue_first(&queue->head); - + queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; - if (old_queue) + if (old_queue) { old_queue->count--; + } queue->count++; - return (old_queue); + return old_queue; } #else static __inline__ mpqueue_head_t * timer_call_entry_dequeue( - timer_call_t entry) + timer_call_t entry) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); call_entry_dequeue(TCE(entry)); old_queue->count--; @@ -354,11 +360,11 @@ timer_call_entry_dequeue( static __inline__ mpqueue_head_t * timer_call_entry_enqueue_deadline( - timer_call_t entry, - mpqueue_head_t *queue, - uint64_t deadline) + timer_call_t entry, + mpqueue_head_t *queue, + uint64_t deadline) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); call_entry_enqueue_deadline(TCE(entry), QUEUE(queue), deadline); @@ -369,8 +375,9 @@ timer_call_entry_enqueue_deadline( timer_call_t thead = (timer_call_t)queue_first(&queue->head); queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; - if (old_queue) + if (old_queue) { old_queue->count--; + } queue->count++; return old_queue; @@ -380,8 +387,8 @@ timer_call_entry_enqueue_deadline( static __inline__ void timer_call_entry_enqueue_tail( - timer_call_t entry, - mpqueue_head_t *queue) + timer_call_t entry, + mpqueue_head_t *queue) { call_entry_enqueue_tail(TCE(entry), QUEUE(queue)); queue->count++; @@ -394,9 +401,9 @@ timer_call_entry_enqueue_tail( */ static __inline__ void timer_call_entry_dequeue_async( - timer_call_t entry) + timer_call_t entry) { - mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); + mpqueue_head_t *old_queue = MPQUEUE(TCE(entry)->queue); if (old_queue) { old_queue->count--; (void) remque(qe(entry)); @@ -414,20 +421,20 @@ unsigned timer_call_enqueue_deadline_unlocked_async2; */ __inline__ mpqueue_head_t * timer_call_enqueue_deadline_unlocked( - timer_call_t call, - mpqueue_head_t *queue, - uint64_t deadline, - uint64_t soft_deadline, - uint64_t ttd, - timer_call_param_t param1, - uint32_t callout_flags) + timer_call_t call, + mpqueue_head_t *queue, + uint64_t deadline, + uint64_t soft_deadline, + uint64_t ttd, + timer_call_param_t param1, + uint32_t callout_flags) { - call_entry_t entry = TCE(call); - mpqueue_head_t *old_queue; + call_entry_t entry = TCE(call); + mpqueue_head_t *old_queue; DBG("timer_call_enqueue_deadline_unlocked(%p,%p,)\n", call, queue); - simple_lock(&call->lock); + simple_lock(&call->lock, LCK_GRP_NULL); old_queue = MPQUEUE(entry->queue); @@ -436,12 +443,12 @@ timer_call_enqueue_deadline_unlocked( if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - 0x1c, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + call->async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + 0x1c, 0); timer_call_enqueue_deadline_unlocked_async1++; #endif call->async_dequeue = FALSE; @@ -452,8 +459,9 @@ timer_call_enqueue_deadline_unlocked( timer_call_enqueue_deadline_unlocked_async2++; #endif } - if (old_queue == timer_longterm_queue) + if (old_queue == timer_longterm_queue) { timer_longterm_dequeued_locked(call); + } if (old_queue != queue) { timer_queue_unlock(old_queue); timer_queue_lock_spin(queue); @@ -471,7 +479,7 @@ timer_call_enqueue_deadline_unlocked( timer_queue_unlock(queue); simple_unlock(&call->lock); - return (old_queue); + return old_queue; } #if TIMER_ASSERT @@ -480,34 +488,34 @@ unsigned timer_call_dequeue_unlocked_async2; #endif mpqueue_head_t * timer_call_dequeue_unlocked( - timer_call_t call) + timer_call_t call) { - call_entry_t entry = TCE(call); - mpqueue_head_t *old_queue; + call_entry_t entry = TCE(call); + mpqueue_head_t *old_queue; DBG("timer_call_dequeue_unlocked(%p)\n", call); - simple_lock(&call->lock); + simple_lock(&call->lock, LCK_GRP_NULL); old_queue = MPQUEUE(entry->queue); #if TIMER_ASSERT - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - 0, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + call->async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + 0, 0); #endif if (old_queue != NULL) { timer_queue_lock_spin(old_queue); if (call->async_dequeue) { /* collision (1c): timer already dequeued, clear flag */ #if TIMER_ASSERT - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - 0x1c, 0); + call->async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + 0x1c, 0); timer_call_dequeue_unlocked_async1++; #endif call->async_dequeue = FALSE; @@ -515,27 +523,30 @@ timer_call_dequeue_unlocked( } else { timer_call_entry_dequeue(call); } - if (old_queue == timer_longterm_queue) + if (old_queue == timer_longterm_queue) { timer_longterm_dequeued_locked(call); + } timer_queue_unlock(old_queue); } simple_unlock(&call->lock); - return (old_queue); + return old_queue; } static uint64_t past_deadline_timer_handle(uint64_t deadline, uint64_t ctime) { - uint64_t delta = (ctime - deadline); - - past_deadline_timers++; - past_deadline_deltas += delta; - if (delta > past_deadline_longest) - past_deadline_longest = deadline; - if (delta < past_deadline_shortest) - past_deadline_shortest = delta; - - return (ctime + past_deadline_timer_adjustment); + uint64_t delta = (ctime - deadline); + + past_deadline_timers++; + past_deadline_deltas += delta; + if (delta > past_deadline_longest) { + past_deadline_longest = deadline; + } + if (delta < past_deadline_shortest) { + past_deadline_shortest = delta; + } + + return ctime + past_deadline_timer_adjustment; } /* @@ -578,25 +589,25 @@ past_deadline_timer_handle(uint64_t deadline, uint64_t ctime) * methods to operate on timer_call structs as if they are call_entry structs. * These structures are identical except for their queue head pointer fields. * - * In the debug case, we assert that the timer call locking protocol + * In the debug case, we assert that the timer call locking protocol * is being obeyed. */ -static boolean_t +static boolean_t timer_call_enter_internal( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t leeway, - uint32_t flags, - boolean_t ratelimited) + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint64_t leeway, + uint32_t flags, + boolean_t ratelimited) { - mpqueue_head_t *queue = NULL; - mpqueue_head_t *old_queue; - spl_t s; - uint64_t slop; - uint32_t urgency; - uint64_t sdeadline, ttd; + mpqueue_head_t *queue = NULL; + mpqueue_head_t *old_queue; + spl_t s; + uint64_t slop; + uint32_t urgency; + uint64_t sdeadline, ttd; assert(call->call_entry.func != NULL); s = splclock(); @@ -605,7 +616,7 @@ timer_call_enter_internal( uint64_t ctime = mach_absolute_time(); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ENTER | DBG_FUNC_START, + DECR_TIMER_ENTER | DBG_FUNC_START, VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_ADDRHIDE(param1), deadline, flags, 0); @@ -614,8 +625,9 @@ timer_call_enter_internal( boolean_t slop_ratelimited = FALSE; slop = timer_call_slop(deadline, ctime, urgency, current_thread(), &slop_ratelimited); - if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop) + if ((flags & TIMER_CALL_LEEWAY) != 0 && leeway > slop) { slop = leeway; + } if (UINT64_MAX - deadline <= slop) { deadline = UINT64_MAX; @@ -637,7 +649,7 @@ timer_call_enter_internal( ttd = sdeadline - ctime; #if CONFIG_DTRACE DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, - timer_call_param_t, TCE(call)->param0, uint32_t, flags, + timer_call_param_t, TCE(call)->param0, uint32_t, flags, (deadline - sdeadline), (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); #endif @@ -660,13 +672,13 @@ timer_call_enter_internal( #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ENTER | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), - (old_queue != NULL), deadline, queue->count, 0); + DECR_TIMER_ENTER | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), + (old_queue != NULL), deadline, queue->count, 0); splx(s); - return (old_queue != NULL); + return old_queue != NULL; } /* @@ -675,41 +687,41 @@ timer_call_enter_internal( */ boolean_t timer_call_enter( - timer_call_t call, - uint64_t deadline, - uint32_t flags) + timer_call_t call, + uint64_t deadline, + uint32_t flags) { return timer_call_enter_internal(call, NULL, deadline, 0, flags, FALSE); } boolean_t timer_call_enter1( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint32_t flags) + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint32_t flags) { return timer_call_enter_internal(call, param1, deadline, 0, flags, FALSE); } boolean_t timer_call_enter_with_leeway( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t leeway, - uint32_t flags, - boolean_t ratelimited) + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint64_t leeway, + uint32_t flags, + boolean_t ratelimited) { return timer_call_enter_internal(call, param1, deadline, leeway, flags, ratelimited); } -boolean_t +boolean_t timer_call_quantum_timer_enter( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t ctime) + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint64_t ctime) { assert(call->call_entry.func != NULL); assert(ml_get_interrupts_enabled() == FALSE); @@ -717,10 +729,10 @@ timer_call_quantum_timer_enter( uint32_t flags = TIMER_CALL_SYS_CRITICAL | TIMER_CALL_LOCAL; TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_ADDRHIDE(param1), deadline, - flags, 0); - + VM_KERNEL_UNSLIDE_OR_PERM(call), + VM_KERNEL_ADDRHIDE(param1), deadline, + flags, 0); + if (__improbable(deadline < ctime)) { deadline = past_deadline_timer_handle(deadline, ctime); } @@ -728,10 +740,10 @@ timer_call_quantum_timer_enter( uint64_t ttd = deadline - ctime; #if CONFIG_DTRACE DTRACE_TMR7(callout__create, timer_call_func_t, TCE(call)->func, - timer_call_param_t, TCE(call)->param0, uint32_t, flags, 0, - (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); + timer_call_param_t, TCE(call)->param0, uint32_t, flags, 0, + (ttd >> 32), (unsigned) (ttd & 0xFFFFFFFF), call); #endif - + quantum_timer_set_deadline(deadline); TCE(call)->deadline = deadline; TCE(call)->param1 = param1; @@ -743,9 +755,9 @@ timer_call_quantum_timer_enter( #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ENTER | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), - 1, deadline, 0, 0); - + VM_KERNEL_UNSLIDE_OR_PERM(call), + 1, deadline, 0, 0); + return true; } @@ -757,19 +769,19 @@ timer_call_quantum_timer_cancel( assert(ml_get_interrupts_enabled() == FALSE); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CANCEL | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline, - 0, call->flags, 0); - + DECR_TIMER_CANCEL | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline, + 0, call->flags, 0); + TCE(call)->deadline = 0; quantum_timer_set_deadline(0); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CANCEL | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), 0, - TCE(call)->deadline - mach_absolute_time(), - TCE(call)->deadline - TCE(call)->entry_time, 0); - + DECR_TIMER_CANCEL | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), 0, + TCE(call)->deadline - mach_absolute_time(), + TCE(call)->deadline - TCE(call)->entry_time, 0); + #if CONFIG_DTRACE DTRACE_TMR6(callout__cancel, timer_call_func_t, TCE(call)->func, timer_call_param_t, TCE(call)->param0, uint32_t, call->flags, 0, @@ -781,17 +793,17 @@ timer_call_quantum_timer_cancel( boolean_t timer_call_cancel( - timer_call_t call) + timer_call_t call) { - mpqueue_head_t *old_queue; - spl_t s; + mpqueue_head_t *old_queue; + spl_t s; s = splclock(); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CANCEL | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), - TCE(call)->deadline, call->soft_deadline, call->flags, 0); + DECR_TIMER_CANCEL | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), + TCE(call)->deadline, call->soft_deadline, call->flags, 0); old_queue = timer_call_dequeue_unlocked(call); @@ -799,21 +811,20 @@ timer_call_cancel( timer_queue_lock_spin(old_queue); if (!queue_empty(&old_queue->head)) { timer_queue_cancel(old_queue, TCE(call)->deadline, CE(queue_first(&old_queue->head))->deadline); - timer_call_t thead = (timer_call_t)queue_first(&old_queue->head); - old_queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; - } - else { + timer_call_t thead = (timer_call_t)queue_first(&old_queue->head); + old_queue->earliest_soft_deadline = thead->flags & TIMER_CALL_RATELIMITED ? TCE(thead)->deadline : thead->soft_deadline; + } else { timer_queue_cancel(old_queue, TCE(call)->deadline, UINT64_MAX); old_queue->earliest_soft_deadline = UINT64_MAX; } timer_queue_unlock(old_queue); } TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CANCEL | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_UNSLIDE_OR_PERM(old_queue), - TCE(call)->deadline - mach_absolute_time(), - TCE(call)->deadline - TCE(call)->entry_time, 0); + DECR_TIMER_CANCEL | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), + VM_KERNEL_UNSLIDE_OR_PERM(old_queue), + TCE(call)->deadline - mach_absolute_time(), + TCE(call)->deadline - TCE(call)->entry_time, 0); splx(s); #if CONFIG_DTRACE @@ -822,19 +833,19 @@ timer_call_cancel( (call->ttd >> 32), (unsigned) (call->ttd & 0xFFFFFFFF)); #endif - return (old_queue != NULL); + return old_queue != NULL; } -static uint32_t timer_queue_shutdown_lock_skips; +static uint32_t timer_queue_shutdown_lock_skips; static uint32_t timer_queue_shutdown_discarded; void timer_queue_shutdown( - mpqueue_head_t *queue) + mpqueue_head_t *queue) { - timer_call_t call; - mpqueue_head_t *new_queue; - spl_t s; + timer_call_t call; + mpqueue_head_t *new_queue; + spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); @@ -845,7 +856,7 @@ timer_queue_shutdown( while ((void)timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); - if (!simple_lock_try(&call->lock)) { + if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer @@ -854,12 +865,12 @@ timer_queue_shutdown( timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - call->async_dequeue, - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - 0x2b, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + call->async_dequeue, + VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + 0x2b, 0); #endif timer_queue_unlock(queue); continue; @@ -893,56 +904,57 @@ timer_queue_shutdown( void quantum_timer_expire( - uint64_t deadline) + uint64_t deadline) { processor_t processor = current_processor(); timer_call_t call = TIMER_CALL(&(processor->quantum_timer)); - if (__improbable(TCE(call)->deadline > deadline)) + if (__improbable(TCE(call)->deadline > deadline)) { panic("CPU quantum timer deadlin out of sync with timer call deadline"); + } - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_EXPIRE | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(call), TCE(call)->deadline, TCE(call)->deadline, TCE(call)->entry_time, 0); - + timer_call_func_t func = TCE(call)->func; - timer_call_param_t param0 = TCE(call)->param0; + timer_call_param_t param0 = TCE(call)->param0; timer_call_param_t param1 = TCE(call)->param1; - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CALLOUT | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), - VM_KERNEL_ADDRHIDE(param0), - VM_KERNEL_ADDRHIDE(param1), - 0); + + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_CALLOUT | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), + VM_KERNEL_ADDRHIDE(param0), + VM_KERNEL_ADDRHIDE(param1), + 0); #if CONFIG_DTRACE DTRACE_TMR7(callout__start, timer_call_func_t, func, - timer_call_param_t, param0, unsigned, call->flags, - 0, (call->ttd >> 32), - (unsigned) (call->ttd & 0xFFFFFFFF), call); + timer_call_param_t, param0, unsigned, call->flags, + 0, (call->ttd >> 32), + (unsigned) (call->ttd & 0xFFFFFFFF), call); #endif (*func)(param0, param1); - - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CALLOUT | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), - VM_KERNEL_ADDRHIDE(param0), - VM_KERNEL_ADDRHIDE(param1), - 0); + + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_CALLOUT | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), + VM_KERNEL_ADDRHIDE(param0), + VM_KERNEL_ADDRHIDE(param1), + 0); } -static uint32_t timer_queue_expire_lock_skips; +static uint32_t timer_queue_expire_lock_skips; uint64_t timer_queue_expire_with_options( - mpqueue_head_t *queue, - uint64_t deadline, - boolean_t rescan) + mpqueue_head_t *queue, + uint64_t deadline, + boolean_t rescan) { - timer_call_t call = NULL; + timer_call_t call = NULL; uint32_t tc_iterations = 0; DBG("timer_queue_expire(%p,)\n", queue); @@ -953,31 +965,34 @@ timer_queue_expire_with_options( /* Upon processing one or more timer calls, refresh the * deadline to account for time elapsed in the callout */ - if (++tc_iterations > 1) + if (++tc_iterations > 1) { cur_deadline = mach_absolute_time(); + } - if (call == NULL) + if (call == NULL) { call = TIMER_CALL(queue_first(&queue->head)); + } if (call->soft_deadline <= cur_deadline) { - timer_call_func_t func; - timer_call_param_t param0, param1; + timer_call_func_t func; + timer_call_param_t param0, param1; TCOAL_DEBUG(0xDDDD0000, queue->earliest_soft_deadline, call->soft_deadline, 0, 0, 0); - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_EXPIRE | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - call->soft_deadline, - TCE(call)->deadline, - TCE(call)->entry_time, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_EXPIRE | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + call->soft_deadline, + TCE(call)->deadline, + TCE(call)->entry_time, 0); if ((call->flags & TIMER_CALL_RATELIMITED) && (TCE(call)->deadline > cur_deadline)) { - if (rescan == FALSE) + if (rescan == FALSE) { break; + } } - if (!simple_lock_try(&call->lock)) { + if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { /* case (2b) lock inversion, dequeue and skip */ timer_queue_expire_lock_skips++; timer_call_entry_dequeue_async(call); @@ -994,12 +1009,12 @@ timer_queue_expire_with_options( simple_unlock(&call->lock); timer_queue_unlock(queue); - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CALLOUT | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), - VM_KERNEL_ADDRHIDE(param0), - VM_KERNEL_ADDRHIDE(param1), - 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_CALLOUT | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), + VM_KERNEL_ADDRHIDE(param0), + VM_KERNEL_ADDRHIDE(param1), + 0); #if CONFIG_DTRACE DTRACE_TMR7(callout__start, timer_call_func_t, func, @@ -1019,12 +1034,12 @@ timer_queue_expire_with_options( param0, param1, call); #endif - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_CALLOUT | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), - VM_KERNEL_ADDRHIDE(param0), - VM_KERNEL_ADDRHIDE(param1), - 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_CALLOUT | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(call), VM_KERNEL_UNSLIDE(func), + VM_KERNEL_ADDRHIDE(param0), + VM_KERNEL_ADDRHIDE(param1), + 0); call = NULL; timer_queue_lock_spin(queue); } else { @@ -1043,9 +1058,9 @@ timer_queue_expire_with_options( * annuls all timer adjustments, i.e. the "soft * deadline" is the sort key. */ - + if (timer_resort_threshold(skew)) { - if (__probable(simple_lock_try(&call->lock))) { + if (__probable(simple_lock_try(&call->lock, LCK_GRP_NULL))) { timer_call_entry_dequeue(call); timer_call_entry_enqueue_deadline(call, queue, call->soft_deadline); simple_unlock(&call->lock); @@ -1054,8 +1069,9 @@ timer_queue_expire_with_options( } if (call) { call = TIMER_CALL(queue_next(qe(call))); - if (queue_end(&queue->head, qe(call))) + if (queue_end(&queue->head, qe(call))) { break; + } } } } @@ -1071,19 +1087,19 @@ timer_queue_expire_with_options( timer_queue_unlock(queue); - return (cur_deadline); + return cur_deadline; } uint64_t timer_queue_expire( - mpqueue_head_t *queue, - uint64_t deadline) + mpqueue_head_t *queue, + uint64_t deadline) { return timer_queue_expire_with_options(queue, deadline, FALSE); } extern int serverperfmode; -static uint32_t timer_queue_migrate_lock_skips; +static uint32_t timer_queue_migrate_lock_skips; /* * timer_queue_migrate() is called by timer_queue_migrate_cpu() * to move timer requests from the local processor (queue_from) @@ -1092,9 +1108,9 @@ static uint32_t timer_queue_migrate_lock_skips; int timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) { - timer_call_t call; - timer_call_t head_to; - int timers_migrated = 0; + timer_call_t call; + timer_call_t head_to; + int timers_migrated = 0; DBG("timer_queue_migrate(%p,%p)\n", queue_from, queue_to); @@ -1123,7 +1139,7 @@ timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) * so that we need not have the target resync. */ - timer_queue_lock_spin(queue_to); + timer_queue_lock_spin(queue_to); head_to = TIMER_CALL(queue_first(&queue_to->head)); if (queue_empty(&queue_to->head)) { @@ -1131,7 +1147,7 @@ timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) goto abort1; } - timer_queue_lock_spin(queue_from); + timer_queue_lock_spin(queue_from); if (queue_empty(&queue_from->head)) { timers_migrated = -2; @@ -1156,15 +1172,15 @@ timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) /* migration loop itself -- both queues are locked */ while (!queue_empty(&queue_from->head)) { call = TIMER_CALL(queue_first(&queue_from->head)); - if (!simple_lock_try(&call->lock)) { + if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { /* case (2b) lock order inversion, dequeue only */ #ifdef TIMER_ASSERT - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), - 0x2b, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), + 0x2b, 0); #endif timer_queue_migrate_lock_skips++; timer_call_entry_dequeue_async(call); @@ -1178,9 +1194,9 @@ timer_queue_migrate(mpqueue_head_t *queue_from, mpqueue_head_t *queue_to) } queue_from->earliest_soft_deadline = UINT64_MAX; abort2: - timer_queue_unlock(queue_from); + timer_queue_unlock(queue_from); abort1: - timer_queue_unlock(queue_to); + timer_queue_unlock(queue_to); return timers_migrated; } @@ -1190,44 +1206,45 @@ timer_queue_trace_cpu(int ncpu) { timer_call_nosync_cpu( ncpu, - (void(*)(void *))timer_queue_trace, + (void (*)(void *))timer_queue_trace, (void*) timer_queue_cpu(ncpu)); } void timer_queue_trace( - mpqueue_head_t *queue) + mpqueue_head_t *queue) { - timer_call_t call; - spl_t s; + timer_call_t call; + spl_t s; - if (!kdebug_enable) + if (!kdebug_enable) { return; + } s = splclock(); timer_queue_lock_spin(queue); TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_QUEUE | DBG_FUNC_START, - queue->count, mach_absolute_time(), 0, 0, 0); + DECR_TIMER_QUEUE | DBG_FUNC_START, + queue->count, mach_absolute_time(), 0, 0, 0); if (!queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); do { TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_QUEUE | DBG_FUNC_NONE, - call->soft_deadline, - TCE(call)->deadline, - TCE(call)->entry_time, - VM_KERNEL_UNSLIDE(TCE(call)->func), - 0); + DECR_TIMER_QUEUE | DBG_FUNC_NONE, + call->soft_deadline, + TCE(call)->deadline, + TCE(call)->entry_time, + VM_KERNEL_UNSLIDE(TCE(call)->func), + 0); call = TIMER_CALL(queue_next(qe(call))); } while (!queue_end(&queue->head, qe(call))); } TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_QUEUE | DBG_FUNC_END, - queue->count, mach_absolute_time(), 0, 0, 0); + DECR_TIMER_QUEUE | DBG_FUNC_END, + queue->count, mach_absolute_time(), 0, 0, 0); timer_queue_unlock(queue); splx(s); @@ -1236,11 +1253,12 @@ timer_queue_trace( void timer_longterm_dequeued_locked(timer_call_t call) { - timer_longterm_t *tlp = &timer_longterm; + timer_longterm_t *tlp = &timer_longterm; tlp->dequeues++; - if (call == tlp->threshold.call) + if (call == tlp->threshold.call) { tlp->threshold.call = NULL; + } } /* @@ -1248,18 +1266,18 @@ timer_longterm_dequeued_locked(timer_call_t call) * and adjust the next timer callout deadline if the new timer is first. */ mpqueue_head_t * -timer_longterm_enqueue_unlocked(timer_call_t call, - uint64_t now, - uint64_t deadline, - mpqueue_head_t **old_queue, - uint64_t soft_deadline, - uint64_t ttd, - timer_call_param_t param1, - uint32_t callout_flags) +timer_longterm_enqueue_unlocked(timer_call_t call, + uint64_t now, + uint64_t deadline, + mpqueue_head_t **old_queue, + uint64_t soft_deadline, + uint64_t ttd, + timer_call_param_t param1, + uint32_t callout_flags) { - timer_longterm_t *tlp = &timer_longterm; - boolean_t update_required = FALSE; - uint64_t longterm_threshold; + timer_longterm_t *tlp = &timer_longterm; + boolean_t update_required = FALSE; + uint64_t longterm_threshold; longterm_threshold = now + tlp->threshold.interval; @@ -1271,11 +1289,12 @@ timer_longterm_enqueue_unlocked(timer_call_t call, */ if ((callout_flags & TIMER_CALL_LOCAL) != 0 || (tlp->threshold.interval == TIMER_LONGTERM_NONE) || - (deadline <= longterm_threshold)) + (deadline <= longterm_threshold)) { return NULL; + } /* - * Remove timer from its current queue, if any. + * Remove timer from its current queue, if any. */ *old_queue = timer_call_dequeue_unlocked(call); @@ -1284,7 +1303,7 @@ timer_longterm_enqueue_unlocked(timer_call_t call, * whether an update is necessary. */ assert(!ml_get_interrupts_enabled()); - simple_lock(&call->lock); + simple_lock(&call->lock, LCK_GRP_NULL); timer_queue_lock_spin(timer_longterm_queue); TCE(call)->deadline = deadline; TCE(call)->param1 = param1; @@ -1292,13 +1311,13 @@ timer_longterm_enqueue_unlocked(timer_call_t call, call->soft_deadline = soft_deadline; call->flags = callout_flags; timer_call_entry_enqueue_tail(call, timer_longterm_queue); - + tlp->enqueues++; /* * We'll need to update the currently set threshold timer * if the new deadline is sooner and no sooner update is in flight. - */ + */ if (deadline < tlp->threshold.deadline && deadline < tlp->threshold.preempted) { tlp->threshold.preempted = deadline; @@ -1307,7 +1326,7 @@ timer_longterm_enqueue_unlocked(timer_call_t call, } timer_queue_unlock(timer_longterm_queue); simple_unlock(&call->lock); - + if (update_required) { /* * Note: this call expects that calling the master cpu @@ -1315,7 +1334,7 @@ timer_longterm_enqueue_unlocked(timer_call_t call, */ timer_call_nosync_cpu( master_cpu, - (void (*)(void *)) timer_longterm_update, + (void (*)(void *))timer_longterm_update, (void *)tlp); } @@ -1330,7 +1349,7 @@ timer_longterm_enqueue_unlocked(timer_call_t call, * The scan is similar to the timer migrate sequence but is performed by * successively examining each timer on the longterm queue: * - if within the short-term threshold - * - enter on the local queue (unless being deleted), + * - enter on the local queue (unless being deleted), * - otherwise: * - if sooner, deadline becomes the next threshold deadline. * The total scan time is limited to TIMER_LONGTERM_SCAN_LIMIT. Should this be @@ -1338,27 +1357,29 @@ timer_longterm_enqueue_unlocked(timer_call_t call, * the timer queues. Longterm timers firing late is not critical. */ void -timer_longterm_scan(timer_longterm_t *tlp, - uint64_t time_start) +timer_longterm_scan(timer_longterm_t *tlp, + uint64_t time_start) { - queue_entry_t qe; - timer_call_t call; - uint64_t threshold; - uint64_t deadline; - uint64_t time_limit = time_start + tlp->scan_limit; - mpqueue_head_t *timer_master_queue; + queue_entry_t qe; + timer_call_t call; + uint64_t threshold; + uint64_t deadline; + uint64_t time_limit = time_start + tlp->scan_limit; + mpqueue_head_t *timer_master_queue; assert(!ml_get_interrupts_enabled()); assert(cpu_number() == master_cpu); - if (tlp->threshold.interval != TIMER_LONGTERM_NONE) + if (tlp->threshold.interval != TIMER_LONGTERM_NONE) { threshold = time_start + tlp->threshold.interval; + } tlp->threshold.deadline = TIMER_LONGTERM_NONE; tlp->threshold.call = NULL; - if (queue_empty(&timer_longterm_queue->head)) + if (queue_empty(&timer_longterm_queue->head)) { return; + } timer_master_queue = timer_queue_cpu(master_cpu); timer_queue_lock_spin(timer_master_queue); @@ -1368,15 +1389,15 @@ timer_longterm_scan(timer_longterm_t *tlp, call = TIMER_CALL(qe); deadline = call->soft_deadline; qe = queue_next(qe); - if (!simple_lock_try(&call->lock)) { + if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { /* case (2c) lock order inversion, dequeue only */ #ifdef TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), - VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), - 0x2c, 0); + DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + VM_KERNEL_UNSLIDE_OR_PERM(TCE(call)->queue), + VM_KERNEL_UNSLIDE_OR_PERM(call->lock.interlock.lock_data), + 0x2c, 0); #endif timer_call_entry_dequeue_async(call); continue; @@ -1387,22 +1408,23 @@ timer_longterm_scan(timer_longterm_t *tlp, * to the local (boot) processor's queue. */ #ifdef TIMER_ASSERT - if (deadline < time_start) + if (deadline < time_start) { TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_OVERDUE | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - deadline, - time_start, - threshold, - 0); + DECR_TIMER_OVERDUE | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + deadline, + time_start, + threshold, + 0); + } #endif TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_ESCALATE | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(call), - TCE(call)->deadline, - TCE(call)->entry_time, - VM_KERNEL_UNSLIDE(TCE(call)->func), - 0); + DECR_TIMER_ESCALATE | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(call), + TCE(call)->deadline, + TCE(call)->entry_time, + VM_KERNEL_UNSLIDE(TCE(call)->func), + 0); tlp->escalates++; timer_call_entry_dequeue(call); timer_call_entry_enqueue_deadline( @@ -1425,7 +1447,7 @@ timer_longterm_scan(timer_longterm_t *tlp, tlp->threshold.deadline = TIMER_LONGTERM_SCAN_AGAIN; tlp->scan_pauses++; DBG("timer_longterm_scan() paused %llu, qlen: %llu\n", - time_limit, tlp->queue.count); + time_limit, tlp->queue.count); break; } } @@ -1436,7 +1458,7 @@ timer_longterm_scan(timer_longterm_t *tlp, void timer_longterm_callout(timer_call_param_t p0, __unused timer_call_param_t p1) { - timer_longterm_t *tlp = (timer_longterm_t *) p0; + timer_longterm_t *tlp = (timer_longterm_t *) p0; timer_longterm_update(tlp); } @@ -1444,14 +1466,14 @@ timer_longterm_callout(timer_call_param_t p0, __unused timer_call_param_t p1) void timer_longterm_update_locked(timer_longterm_t *tlp) { - uint64_t latency; + uint64_t latency; - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_UPDATE | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue), - tlp->threshold.deadline, - tlp->threshold.preempted, - tlp->queue.count, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_UPDATE | DBG_FUNC_START, + VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue), + tlp->threshold.deadline, + tlp->threshold.preempted, + tlp->queue.count, 0); tlp->scan_time = mach_absolute_time(); if (tlp->threshold.preempted != TIMER_LONGTERM_NONE) { @@ -1470,20 +1492,21 @@ timer_longterm_update_locked(timer_longterm_t *tlp) * Maintain a moving average of our wakeup latency. * Clamp latency to 0 and ignore above threshold interval. */ - if (tlp->scan_time > tlp->threshold.deadline_set) + if (tlp->scan_time > tlp->threshold.deadline_set) { latency = tlp->scan_time - tlp->threshold.deadline_set; - else + } else { latency = 0; + } if (latency < tlp->threshold.interval) { tlp->threshold.latency_min = - MIN(tlp->threshold.latency_min, latency); + MIN(tlp->threshold.latency_min, latency); tlp->threshold.latency_max = - MAX(tlp->threshold.latency_max, latency); + MAX(tlp->threshold.latency_max, latency); tlp->threshold.latency = - (tlp->threshold.latency*99 + latency) / 100; + (tlp->threshold.latency * 99 + latency) / 100; } - timer_longterm_scan(tlp, tlp->scan_time); + timer_longterm_scan(tlp, tlp->scan_time); } tlp->threshold.deadline_set = tlp->threshold.deadline; @@ -1493,38 +1516,41 @@ timer_longterm_update_locked(timer_longterm_t *tlp) tlp->threshold.deadline_set -= tlp->threshold.margin; tlp->threshold.deadline_set -= tlp->threshold.latency; } - + /* Throttle next scan time */ uint64_t scan_clamp = mach_absolute_time() + tlp->scan_interval; - if (tlp->threshold.deadline_set < scan_clamp) + if (tlp->threshold.deadline_set < scan_clamp) { tlp->threshold.deadline_set = scan_clamp; + } - TIMER_KDEBUG_TRACE(KDEBUG_TRACE, - DECR_TIMER_UPDATE | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue), - tlp->threshold.deadline, - tlp->threshold.scans, - tlp->queue.count, 0); + TIMER_KDEBUG_TRACE(KDEBUG_TRACE, + DECR_TIMER_UPDATE | DBG_FUNC_END, + VM_KERNEL_UNSLIDE_OR_PERM(&tlp->queue), + tlp->threshold.deadline, + tlp->threshold.scans, + tlp->queue.count, 0); } void timer_longterm_update(timer_longterm_t *tlp) { - spl_t s = splclock(); + spl_t s = splclock(); timer_queue_lock_spin(timer_longterm_queue); - if (cpu_number() != master_cpu) + if (cpu_number() != master_cpu) { panic("timer_longterm_update_master() on non-boot cpu"); + } timer_longterm_update_locked(tlp); - if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) + if (tlp->threshold.deadline != TIMER_LONGTERM_NONE) { timer_call_enter( &tlp->threshold.timer, tlp->threshold.deadline_set, TIMER_CALL_LOCAL | TIMER_CALL_SYS_CRITICAL); - + } + timer_queue_unlock(timer_longterm_queue); splx(s); } @@ -1532,30 +1558,30 @@ timer_longterm_update(timer_longterm_t *tlp) void timer_longterm_init(void) { - uint32_t longterm; - timer_longterm_t *tlp = &timer_longterm; + uint32_t longterm; + timer_longterm_t *tlp = &timer_longterm; DBG("timer_longterm_init() tlp: %p, queue: %p\n", tlp, &tlp->queue); /* * Set the longterm timer threshold. Defaults to TIMER_LONGTERM_THRESHOLD * or TIMER_LONGTERM_NONE (disabled) for server; - * overridden longterm boot-arg + * overridden longterm boot-arg */ tlp->threshold.interval = serverperfmode ? TIMER_LONGTERM_NONE - : TIMER_LONGTERM_THRESHOLD; - if (PE_parse_boot_argn("longterm", &longterm, sizeof (longterm))) { + : TIMER_LONGTERM_THRESHOLD; + if (PE_parse_boot_argn("longterm", &longterm, sizeof(longterm))) { tlp->threshold.interval = (longterm == 0) ? - TIMER_LONGTERM_NONE : - longterm * NSEC_PER_MSEC; + TIMER_LONGTERM_NONE : + longterm * NSEC_PER_MSEC; } if (tlp->threshold.interval != TIMER_LONGTERM_NONE) { printf("Longterm timer threshold: %llu ms\n", - tlp->threshold.interval / NSEC_PER_MSEC); + tlp->threshold.interval / NSEC_PER_MSEC); kprintf("Longterm timer threshold: %llu ms\n", - tlp->threshold.interval / NSEC_PER_MSEC); + tlp->threshold.interval / NSEC_PER_MSEC); nanoseconds_to_absolutetime(tlp->threshold.interval, - &tlp->threshold.interval); + &tlp->threshold.interval); tlp->threshold.margin = tlp->threshold.interval / 10; tlp->threshold.latency_min = EndOfAllTime; tlp->threshold.latency_max = 0; @@ -1567,12 +1593,12 @@ timer_longterm_init(void) lck_attr_setdefault(&timer_longterm_lck_attr); lck_grp_attr_setdefault(&timer_longterm_lck_grp_attr); lck_grp_init(&timer_longterm_lck_grp, - "timer_longterm", &timer_longterm_lck_grp_attr); + "timer_longterm", &timer_longterm_lck_grp_attr); mpqueue_init(&tlp->queue, - &timer_longterm_lck_grp, &timer_longterm_lck_attr); + &timer_longterm_lck_grp, &timer_longterm_lck_attr); timer_call_setup(&tlp->threshold.timer, - timer_longterm_callout, (timer_call_param_t) tlp); + timer_longterm_callout, (timer_call_param_t) tlp); timer_longterm_queue = &tlp->queue; } @@ -1585,12 +1611,12 @@ enum { uint64_t timer_sysctl_get(int oid) { - timer_longterm_t *tlp = &timer_longterm; + timer_longterm_t *tlp = &timer_longterm; switch (oid) { case THRESHOLD: return (tlp->threshold.interval == TIMER_LONGTERM_NONE) ? - 0 : tlp->threshold.interval / NSEC_PER_MSEC; + 0 : tlp->threshold.interval / NSEC_PER_MSEC; case QCOUNT: return tlp->queue.count; case ENQUEUES: @@ -1625,19 +1651,20 @@ timer_sysctl_get(int oid) * since it un-escalates timers to the longterm queue. */ static void -timer_master_scan(timer_longterm_t *tlp, - uint64_t now) +timer_master_scan(timer_longterm_t *tlp, + uint64_t now) { - queue_entry_t qe; - timer_call_t call; - uint64_t threshold; - uint64_t deadline; - mpqueue_head_t *timer_master_queue; + queue_entry_t qe; + timer_call_t call; + uint64_t threshold; + uint64_t deadline; + mpqueue_head_t *timer_master_queue; - if (tlp->threshold.interval != TIMER_LONGTERM_NONE) + if (tlp->threshold.interval != TIMER_LONGTERM_NONE) { threshold = now + tlp->threshold.interval; - else + } else { threshold = TIMER_LONGTERM_NONE; + } timer_master_queue = timer_queue_cpu(master_cpu); timer_queue_lock_spin(timer_master_queue); @@ -1647,9 +1674,10 @@ timer_master_scan(timer_longterm_t *tlp, call = TIMER_CALL(qe); deadline = TCE(call)->deadline; qe = queue_next(qe); - if ((call->flags & TIMER_CALL_LOCAL) != 0) + if ((call->flags & TIMER_CALL_LOCAL) != 0) { continue; - if (!simple_lock_try(&call->lock)) { + } + if (!simple_lock_try(&call->lock, LCK_GRP_NULL)) { /* case (2c) lock order inversion, dequeue only */ timer_call_entry_dequeue_async(call); continue; @@ -1671,9 +1699,9 @@ timer_master_scan(timer_longterm_t *tlp, static void timer_sysctl_set_threshold(uint64_t value) { - timer_longterm_t *tlp = &timer_longterm; - spl_t s = splclock(); - boolean_t threshold_increase; + timer_longterm_t *tlp = &timer_longterm; + spl_t s = splclock(); + boolean_t threshold_increase; timer_queue_lock_spin(timer_longterm_queue); @@ -1687,21 +1715,22 @@ timer_sysctl_set_threshold(uint64_t value) threshold_increase = TRUE; timer_call_cancel(&tlp->threshold.timer); } else { - uint64_t old_interval = tlp->threshold.interval; + uint64_t old_interval = tlp->threshold.interval; tlp->threshold.interval = value * NSEC_PER_MSEC; nanoseconds_to_absolutetime(tlp->threshold.interval, - &tlp->threshold.interval); + &tlp->threshold.interval); tlp->threshold.margin = tlp->threshold.interval / 10; - if (old_interval == TIMER_LONGTERM_NONE) + if (old_interval == TIMER_LONGTERM_NONE) { threshold_increase = FALSE; - else + } else { threshold_increase = (tlp->threshold.interval > old_interval); + } } if (threshold_increase /* or removal */) { /* Escalate timers from the longterm queue */ timer_longterm_scan(tlp, mach_absolute_time()); - } else /* decrease or addition */ { + } else { /* decrease or addition */ /* * We scan the local/master queue for timers now longterm. * To be strictly correct, we should scan all processor queues @@ -1744,7 +1773,7 @@ timer_sysctl_set(int oid, uint64_t value) case THRESHOLD: timer_call_cpu( master_cpu, - (void (*)(void *)) timer_sysctl_set_threshold, + (void (*)(void *))timer_sysctl_set_threshold, (void *) value); return KERN_SUCCESS; case SCAN_LIMIT: @@ -1760,7 +1789,9 @@ timer_sysctl_set(int oid, uint64_t value) /* Select timer coalescing window based on per-task quality-of-service hints */ -static boolean_t tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) { +static boolean_t +tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) +{ uint32_t latency_qos; boolean_t adjusted = FALSE; task_t ctask = t->task; @@ -1789,16 +1820,17 @@ static boolean_t tcoal_qos_adjust(thread_t t, int32_t *tshift, uint64_t *tmax_ab * processed than is technically possible when the HW deadline arrives. */ static void -timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) { +timer_compute_leeway(thread_t cthread, int32_t urgency, int32_t *tshift, uint64_t *tmax_abstime, boolean_t *pratelimited) +{ int16_t tpri = cthread->sched_pri; if ((urgency & TIMER_CALL_USER_MASK) != 0) { if (tpri >= BASEPRI_RTQUEUES || - urgency == TIMER_CALL_USER_CRITICAL) { + urgency == TIMER_CALL_USER_CRITICAL) { *tshift = tcoal_prio_params.timer_coalesce_rt_shift; *tmax_abstime = tcoal_prio_params.timer_coalesce_rt_abstime_max; TCOAL_PRIO_STAT(rt_tcl); } else if (proc_get_effective_thread_policy(cthread, TASK_POLICY_DARWIN_BG) || - (urgency == TIMER_CALL_USER_BACKGROUND)) { + (urgency == TIMER_CALL_USER_BACKGROUND)) { /* Determine if timer should be subjected to a lower QoS */ if (tcoal_qos_adjust(cthread, tshift, tmax_abstime, pratelimited)) { if (*tmax_abstime > tcoal_prio_params.timer_coalesce_bg_abstime_max) { @@ -1849,32 +1881,37 @@ timer_call_slop(uint64_t deadline, uint64_t now, uint32_t flags, thread_t cthrea uint64_t adjval; uint32_t urgency = (flags & TIMER_CALL_URGENCY_MASK); - if (mach_timer_coalescing_enabled && + if (mach_timer_coalescing_enabled && (deadline > now) && (urgency != TIMER_CALL_SYS_CRITICAL)) { timer_compute_leeway(cthread, urgency, &tcs_shift, &tcs_max_abstime, pratelimited); - - if (tcs_shift >= 0) + + if (tcs_shift >= 0) { adjval = MIN((deadline - now) >> tcs_shift, tcs_max_abstime); - else + } else { adjval = MIN((deadline - now) << (-tcs_shift), tcs_max_abstime); + } /* Apply adjustments derived from "user idle level" heuristic */ adjval += (adjval * timer_user_idle_level) >> 7; return adjval; - } else { + } else { return 0; } } int -timer_get_user_idle_level(void) { +timer_get_user_idle_level(void) +{ return timer_user_idle_level; } -kern_return_t timer_set_user_idle_level(int ilevel) { +kern_return_t +timer_set_user_idle_level(int ilevel) +{ boolean_t do_reeval = FALSE; - if ((ilevel < 0) || (ilevel > 128)) + if ((ilevel < 0) || (ilevel > 128)) { return KERN_INVALID_ARGUMENT; + } if (ilevel < timer_user_idle_level) { do_reeval = TRUE; @@ -1882,8 +1919,9 @@ kern_return_t timer_set_user_idle_level(int ilevel) { timer_user_idle_level = ilevel; - if (do_reeval) + if (do_reeval) { ml_timer_evaluate(); + } return KERN_SUCCESS; } diff --git a/osfmk/kern/timer_call.h b/osfmk/kern/timer_call.h index 6581d90c2..a817ce088 100644 --- a/osfmk/kern/timer_call.h +++ b/osfmk/kern/timer_call.h @@ -2,7 +2,7 @@ * Copyright (c) 1993-1995, 1999-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -42,6 +42,7 @@ #ifdef MACH_KERNEL_PRIVATE #include +#include extern boolean_t mach_timer_coalescing_enabled; extern void timer_call_queue_init(mpqueue_head_t *); @@ -52,21 +53,21 @@ extern void timer_call_queue_init(mpqueue_head_t *); * of this data structure, and the two had better match. */ typedef struct timer_call { - struct call_entry call_entry; - decl_simple_lock_data( ,lock); /* protects call_entry queue */ - uint64_t soft_deadline; - uint32_t flags; - boolean_t async_dequeue; /* this field is protected by - call_entry queue's lock */ - uint64_t ttd; /* Time to deadline at creation */ + struct call_entry call_entry; + decl_simple_lock_data(, lock); /* protects call_entry queue */ + uint64_t soft_deadline; + uint32_t flags; + boolean_t async_dequeue; /* this field is protected by + * call_entry queue's lock */ + uint64_t ttd; /* Time to deadline at creation */ } timer_call_data_t, *timer_call_t; -#define EndOfAllTime 0xFFFFFFFFFFFFFFFFULL +#define EndOfAllTime 0xFFFFFFFFFFFFFFFFULL -typedef void *timer_call_param_t; -typedef void (*timer_call_func_t)( - timer_call_param_t param0, - timer_call_param_t param1); +typedef void *timer_call_param_t; +typedef void (*timer_call_func_t)( + timer_call_param_t param0, + timer_call_param_t param1); /* * Flags to alter the default timer/timeout coalescing behavior @@ -81,16 +82,16 @@ typedef void (*timer_call_func_t)( * attributes, in addition to the per-timer_call urgency specification, * are used to establish coalescing behavior. */ -#define TIMER_CALL_SYS_NORMAL TIMEOUT_URGENCY_SYS_NORMAL -#define TIMER_CALL_SYS_CRITICAL TIMEOUT_URGENCY_SYS_CRITICAL -#define TIMER_CALL_SYS_BACKGROUND TIMEOUT_URGENCY_SYS_BACKGROUND +#define TIMER_CALL_SYS_NORMAL TIMEOUT_URGENCY_SYS_NORMAL +#define TIMER_CALL_SYS_CRITICAL TIMEOUT_URGENCY_SYS_CRITICAL +#define TIMER_CALL_SYS_BACKGROUND TIMEOUT_URGENCY_SYS_BACKGROUND -#define TIMER_CALL_USER_MASK TIMEOUT_URGENCY_USER_MASK -#define TIMER_CALL_USER_NORMAL TIMEOUT_URGENCY_USER_NORMAL -#define TIMER_CALL_USER_CRITICAL TIMEOUT_URGENCY_USER_CRITICAL -#define TIMER_CALL_USER_BACKGROUND TIMEOUT_URGENCY_USER_BACKGROUND +#define TIMER_CALL_USER_MASK TIMEOUT_URGENCY_USER_MASK +#define TIMER_CALL_USER_NORMAL TIMEOUT_URGENCY_USER_NORMAL +#define TIMER_CALL_USER_CRITICAL TIMEOUT_URGENCY_USER_CRITICAL +#define TIMER_CALL_USER_BACKGROUND TIMEOUT_URGENCY_USER_BACKGROUND -#define TIMER_CALL_URGENCY_MASK TIMEOUT_URGENCY_MASK +#define TIMER_CALL_URGENCY_MASK TIMEOUT_URGENCY_MASK /* * Indicate that a specific leeway value is being provided (otherwise @@ -98,50 +99,50 @@ typedef void (*timer_call_func_t)( * only be used to extend the leeway calculated internally from the * urgency class provided. */ -#define TIMER_CALL_LEEWAY TIMEOUT_URGENCY_LEEWAY +#define TIMER_CALL_LEEWAY TIMEOUT_URGENCY_LEEWAY /* * Non-migratable timer_call */ -#define TIMER_CALL_LOCAL TIMEOUT_URGENCY_FIRST_AVAIL -#define TIMER_CALL_RATELIMITED TIMEOUT_URGENCY_RATELIMITED -extern boolean_t timer_call_enter( - timer_call_t call, - uint64_t deadline, - uint32_t flags); - -extern boolean_t timer_call_enter1( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint32_t flags); - -extern boolean_t timer_call_enter_with_leeway( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t leeway, - uint32_t flags, - boolean_t ratelimited); - -extern boolean_t timer_call_quantum_timer_enter( - timer_call_t call, - timer_call_param_t param1, - uint64_t deadline, - uint64_t ctime); - -extern boolean_t timer_call_cancel( - timer_call_t call); - -extern boolean_t timer_call_quantum_timer_cancel( - timer_call_t call); - -extern void timer_call_init(void); - -extern void timer_call_setup( - timer_call_t call, - timer_call_func_t func, - timer_call_param_t param0); +#define TIMER_CALL_LOCAL TIMEOUT_URGENCY_FIRST_AVAIL +#define TIMER_CALL_RATELIMITED TIMEOUT_URGENCY_RATELIMITED +extern boolean_t timer_call_enter( + timer_call_t call, + uint64_t deadline, + uint32_t flags); + +extern boolean_t timer_call_enter1( + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint32_t flags); + +extern boolean_t timer_call_enter_with_leeway( + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint64_t leeway, + uint32_t flags, + boolean_t ratelimited); + +extern boolean_t timer_call_quantum_timer_enter( + timer_call_t call, + timer_call_param_t param1, + uint64_t deadline, + uint64_t ctime); + +extern boolean_t timer_call_cancel( + timer_call_t call); + +extern boolean_t timer_call_quantum_timer_cancel( + timer_call_t call); + +extern void timer_call_init(void); + +extern void timer_call_setup( + timer_call_t call, + timer_call_func_t func, + timer_call_param_t param0); extern int timer_get_user_idle_level(void); extern kern_return_t timer_set_user_idle_level(int ilevel); diff --git a/osfmk/kern/timer_queue.h b/osfmk/kern/timer_queue.h index 99284d42e..08c841925 100644 --- a/osfmk/kern/timer_queue.h +++ b/osfmk/kern/timer_queue.h @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,66 +36,67 @@ #ifdef MACH_KERNEL_PRIVATE +#include #include /* Kernel trace events associated with timers and timer queues */ -#define DECR_TRAP_LATENCY MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) -#define DECR_SET_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) -#define DECR_TIMER_CALLOUT MACHDBG_CODE(DBG_MACH_EXCP_DECI, 2) -#define DECR_PM_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 3) -#define DECR_TIMER_MIGRATE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 4) +#define DECR_TRAP_LATENCY MACHDBG_CODE(DBG_MACH_EXCP_DECI, 0) +#define DECR_SET_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1) +#define DECR_TIMER_CALLOUT MACHDBG_CODE(DBG_MACH_EXCP_DECI, 2) +#define DECR_PM_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 3) +#define DECR_TIMER_MIGRATE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 4) #if defined(i386) || defined(x86_64) -#define DECR_RDHPET MACHDBG_CODE(DBG_MACH_EXCP_DECI, 5) -#define DECR_SET_TSC_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 6) -#define DECR_SET_APIC_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 16) +#define DECR_RDHPET MACHDBG_CODE(DBG_MACH_EXCP_DECI, 5) +#define DECR_SET_TSC_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 6) +#define DECR_SET_APIC_DEADLINE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 16) #endif -#define DECR_TIMER_ENTER MACHDBG_CODE(DBG_MACH_EXCP_DECI, 7) -#define DECR_TIMER_CANCEL MACHDBG_CODE(DBG_MACH_EXCP_DECI, 8) -#define DECR_TIMER_QUEUE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 9) -#define DECR_TIMER_EXPIRE MACHDBG_CODE(DBG_MACH_EXCP_DECI,10) -#define DECR_TIMER_ASYNC_DEQ MACHDBG_CODE(DBG_MACH_EXCP_DECI,11) -#define DECR_TIMER_UPDATE MACHDBG_CODE(DBG_MACH_EXCP_DECI,12) -#define DECR_TIMER_ESCALATE MACHDBG_CODE(DBG_MACH_EXCP_DECI,13) -#define DECR_TIMER_OVERDUE MACHDBG_CODE(DBG_MACH_EXCP_DECI,14) -#define DECR_TIMER_RESCAN MACHDBG_CODE(DBG_MACH_EXCP_DECI,15) +#define DECR_TIMER_ENTER MACHDBG_CODE(DBG_MACH_EXCP_DECI, 7) +#define DECR_TIMER_CANCEL MACHDBG_CODE(DBG_MACH_EXCP_DECI, 8) +#define DECR_TIMER_QUEUE MACHDBG_CODE(DBG_MACH_EXCP_DECI, 9) +#define DECR_TIMER_EXPIRE MACHDBG_CODE(DBG_MACH_EXCP_DECI,10) +#define DECR_TIMER_ASYNC_DEQ MACHDBG_CODE(DBG_MACH_EXCP_DECI,11) +#define DECR_TIMER_UPDATE MACHDBG_CODE(DBG_MACH_EXCP_DECI,12) +#define DECR_TIMER_ESCALATE MACHDBG_CODE(DBG_MACH_EXCP_DECI,13) +#define DECR_TIMER_OVERDUE MACHDBG_CODE(DBG_MACH_EXCP_DECI,14) +#define DECR_TIMER_RESCAN MACHDBG_CODE(DBG_MACH_EXCP_DECI,15) /* * Invoked by kernel, implemented by platform. */ /* Request an expiration deadline, returns queue association */ -extern mpqueue_head_t * timer_queue_assign( - uint64_t deadline); +extern mpqueue_head_t * timer_queue_assign( + uint64_t deadline); -extern uint64_t timer_call_slop( - uint64_t deadline, - uint64_t armtime, - uint32_t urgency, - thread_t arming_thread, - boolean_t *rlimited); -extern boolean_t timer_resort_threshold(uint64_t); +extern uint64_t timer_call_slop( + uint64_t deadline, + uint64_t armtime, + uint32_t urgency, + thread_t arming_thread, + boolean_t *rlimited); +extern boolean_t timer_resort_threshold(uint64_t); /* Cancel an associated expiration deadline and specify new deadline */ -extern void timer_queue_cancel( - mpqueue_head_t *queue, - uint64_t deadline, - uint64_t new_deadline); +extern void timer_queue_cancel( + mpqueue_head_t *queue, + uint64_t deadline, + uint64_t new_deadline); /* Return a pointer to the local timer queue for a given cpu */ -extern mpqueue_head_t * timer_queue_cpu( - int cpu); +extern mpqueue_head_t * timer_queue_cpu( + int cpu); /* Call a function with argument on a cpu */ -extern void timer_call_cpu( - int cpu, - void (*fn)(void *), - void *arg); +extern void timer_call_cpu( + int cpu, + void (*fn)(void *), + void *arg); /* Queue a function to be called with argument on a cpu */ -extern void timer_call_nosync_cpu( - int cpu, - void (*fn)(void *), - void *arg); +extern void timer_call_nosync_cpu( + int cpu, + void (*fn)(void *), + void *arg); /* * Invoked by platform, implemented by kernel. @@ -132,63 +133,63 @@ typedef struct { extern timer_coalescing_priority_params_ns_t * timer_call_get_priority_params(void); -extern uint64_t timer_call_slop( - uint64_t deadline, - uint64_t armtime, - uint32_t urgency, - thread_t arming_thread, - boolean_t *rlimited); +extern uint64_t timer_call_slop( + uint64_t deadline, + uint64_t armtime, + uint32_t urgency, + thread_t arming_thread, + boolean_t *rlimited); /* Process deadline expiration for queue, returns new deadline */ -extern uint64_t timer_queue_expire( - mpqueue_head_t *queue, - uint64_t deadline); +extern uint64_t timer_queue_expire( + mpqueue_head_t *queue, + uint64_t deadline); -extern uint64_t timer_queue_expire_with_options( - mpqueue_head_t *, - uint64_t, - boolean_t); +extern uint64_t timer_queue_expire_with_options( + mpqueue_head_t *, + uint64_t, + boolean_t); -extern void quantum_timer_expire( - uint64_t deadline); +extern void quantum_timer_expire( + uint64_t deadline); /* Shutdown a timer queue and reassign existing activities */ -extern void timer_queue_shutdown( - mpqueue_head_t *queue); +extern void timer_queue_shutdown( + mpqueue_head_t *queue); /* Move timer requests from one queue to another */ -extern int timer_queue_migrate( - mpqueue_head_t *from, - mpqueue_head_t *to); +extern int timer_queue_migrate( + mpqueue_head_t *from, + mpqueue_head_t *to); /* * Invoked by platform, implemented by platfrom. */ -extern void timer_intr(int inuser, uint64_t iaddr); +extern void timer_intr(int inuser, uint64_t iaddr); #if defined(i386) || defined(x86_64) -extern uint64_t setPop(uint64_t time); +extern uint64_t setPop(uint64_t time); #else -extern int setPop(uint64_t time); +extern int setPop(uint64_t time); #endif -extern void timer_resync_deadlines(void); +extern void timer_resync_deadlines(void); -extern void timer_set_deadline(uint64_t deadline); +extern void timer_set_deadline(uint64_t deadline); -extern void quantum_timer_set_deadline(uint64_t deadline); +extern void quantum_timer_set_deadline(uint64_t deadline); /* Migrate the local timer queue of a given cpu to the master cpu */ -extern uint32_t timer_queue_migrate_cpu(int target_cpu); +extern uint32_t timer_queue_migrate_cpu(int target_cpu); -extern void timer_queue_trace( - mpqueue_head_t *queue); -extern void timer_queue_trace_cpu(int cpu); +extern void timer_queue_trace( + mpqueue_head_t *queue); +extern void timer_queue_trace_cpu(int cpu); -extern uint64_t timer_sysctl_get(int oid); -extern int timer_sysctl_set(int oid, uint64_t value); +extern uint64_t timer_sysctl_get(int oid); +extern int timer_sysctl_set(int oid, uint64_t value); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#endif /* _KERN_TIMER_QUEUE_H_ */ +#endif /* _KERN_TIMER_QUEUE_H_ */ diff --git a/osfmk/kern/tlock.c b/osfmk/kern/tlock.c new file mode 100644 index 000000000..22c57cc56 --- /dev/null +++ b/osfmk/kern/tlock.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2018 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#define ATOMIC_PRIVATE 1 +#define LOCK_PRIVATE 1 + +#include +#include +#include +#include +#include + +#if defined(__x86_64__) +#include +extern uint64_t LockTimeOutTSC; +#define TICKET_LOCK_PANIC_TIMEOUT LockTimeOutTSC +#endif + +#if defined(__arm__) || defined(__arm64__) +extern uint64_t TLockTimeOut; +#define TICKET_LOCK_PANIC_TIMEOUT TLockTimeOut +#endif +/* "Ticket": A FIFO spinlock with constant backoff + * cf. Algorithms for Scalable Synchronization on Shared-Memory Multiprocessors + * by Mellor-Crumney and Scott, 1991 + */ + +/* TODO: proportional back-off based on desired-current ticket distance + * This has the potential to considerably reduce snoop traffic + * but must be tuned carefully + * TODO: UP implementation. + * Currently only used within the scheduler, where it is acquired with + * interrupts masked, and consequently doesn't require a uniprocessor + * implementation. + * TODO: Evaluate a bias towards the performant clusters on + * asymmetric efficient/performant multi-cluster systems, while + * retaining the starvation-free property. A small intra-cluster bias may + * be profitable for overall throughput + */ + +void +lck_ticket_init(lck_ticket_t *tlock) +{ + memset(tlock, 0, sizeof(*tlock)); + /* Current ticket size limit--tickets can be trivially expanded + * to 16-bits if needed + */ + static_assert(MAX_CPUS < 256); + + __assert_only lck_ticket_internal *tlocki = &tlock->tu; + /* Verify alignment */ + __assert_only uintptr_t tcn = (uintptr_t) &tlocki->tcurnext; + __assert_only uintptr_t tc = (uintptr_t) &tlocki->cticket; + __assert_only uintptr_t tn = (uintptr_t) &tlocki->nticket; + + assert(((tcn & 3) == 0) && (tcn == tc) && (tn == (tc + 1))); +} + +static void +tlock_mark_owned(lck_ticket_t *tlock, thread_t cthread) +{ + assert(tlock->lck_owner == 0); + /* There is a small pre-emption disabled window (also interrupts masked + * for the pset lock) between the acquisition of the lock and the + * population of the advisory 'owner' thread field + * On architectures with a DCAS (ARM v8.1 or x86), conceivably we could + * populate the next ticket and the thread atomically, with + * possible overhead, potential loss of micro-architectural fwd progress + * properties of an unconditional fetch-add, and a 16 byte alignment requirement. + */ + __c11_atomic_store((_Atomic thread_t *)&tlock->lck_owner, cthread, __ATOMIC_RELAXED); +} + +/* On contention, poll for ownership + * Returns when the current ticket is observed equal to "mt" + */ +static void __attribute__((noinline)) +tlock_contended(uint8_t *tp, uint8_t mt, lck_ticket_t *tlock, thread_t cthread) +{ + uint8_t cticket; + uint64_t etime = 0, ctime = 0, stime = 0; + + assertf(tlock->lck_owner != (uintptr_t) cthread, "Recursive ticket lock, owner: %p, current thread: %p", (void *) tlock->lck_owner, (void *) cthread); + + for (;;) { + for (int i = 0; i < LOCK_SNOOP_SPINS; i++) { +#if (__ARM_ENABLE_WFE_) + if ((cticket = load_exclusive_acquire8(tp)) != mt) { + wait_for_event(); + } else { + /* Some micro-architectures may benefit + * from disarming the monitor. + * TODO: determine specific micro-architectures + * which benefit, modern CPUs may not + */ + clear_exclusive(); + tlock_mark_owned(tlock, cthread); + return; + } +#else /* !WFE */ +#if defined(__x86_64__) + __builtin_ia32_pause(); +#endif /* x64 */ + if ((cticket = __c11_atomic_load((_Atomic uint8_t *) tp, __ATOMIC_SEQ_CST)) == mt) { + tlock_mark_owned(tlock, cthread); + return; + } +#endif /* !WFE */ + } + + if (etime == 0) { + stime = ml_get_timebase(); + etime = stime + TICKET_LOCK_PANIC_TIMEOUT; + } else if ((ctime = ml_get_timebase()) >= etime) { + break; + } + } +#if defined (__x86_64__) + uintptr_t lowner = tlock->lck_owner; + uint32_t ocpu = spinlock_timeout_NMI(lowner); + panic("Ticket spinlock timeout; start: 0x%llx, end: 0x%llx, current: 0x%llx, lock: %p, *lock: 0x%x, waiting for 0x%x, pre-NMI owner: %p, current owner: %p, owner CPU: 0x%x", stime, etime, ctime, tp, *tp, mt, (void *) lowner, (void *) tlock->lck_owner, ocpu); +#else + panic("Ticket spinlock timeout; start: 0x%llx, end: 0x%llx, current: 0x%llx, lock: %p, *lock: 0x%x, waiting for 0x%x, owner: %p", stime, etime, ctime, tp, *tp, mt, (void *) tlock->lck_owner); +#endif +} + +void +lck_ticket_lock(lck_ticket_t *tlock) +{ + lck_ticket_internal *tlocki = &tlock->tu; + thread_t cthread = current_thread(); + lck_ticket_internal tlocka; + + disable_preemption_for_thread(cthread); + /* Atomically load both the current and next ticket, and increment the + * latter. Wrap of the ticket field is OK as long as the total + * number of contending CPUs is < maximum ticket + */ + tlocka.tcurnext = __c11_atomic_fetch_add((_Atomic uint16_t *)&tlocki->tcurnext, 1U << 8, __ATOMIC_ACQUIRE); + + /* Contention? branch to out of line contended block */ + if (__improbable(tlocka.cticket != tlocka.nticket)) { + return tlock_contended(&tlocki->cticket, tlocka.nticket, tlock, cthread); + } + + tlock_mark_owned(tlock, cthread); +} + +void +lck_ticket_unlock(lck_ticket_t *tlock) +{ + lck_ticket_internal *tlocki = &tlock->tu; + + assertf(tlock->lck_owner == (uintptr_t) current_thread(), "Ticket unlock non-owned, owner: %p", (void *) tlock->lck_owner); + + __c11_atomic_store((_Atomic uintptr_t *)&tlock->lck_owner, 0, __ATOMIC_RELAXED); + +#if defined(__x86_64__) + /* Communicate desired release semantics to the compiler */ + __c11_atomic_thread_fence(__ATOMIC_RELEASE); + /* '+ constraint indicates a read modify write */ + /* I have not yet located a c11 primitive which synthesizes an 'INC ', + * i.e. a specified-granule non-atomic memory read-modify-write. + */ + __asm__ volatile ("incb %0" : "+m"(tlocki->cticket) :: "cc"); +#else /* !x86_64 */ + uint8_t cticket = __c11_atomic_load((_Atomic uint8_t *) &tlocki->cticket, __ATOMIC_RELAXED); + cticket++; + __c11_atomic_store((_Atomic uint8_t *) &tlocki->cticket, cticket, __ATOMIC_RELEASE); +#if __arm__ + set_event(); +#endif // __arm__ +#endif /* !x86_64 */ + enable_preemption(); +} + +void +lck_ticket_assert_owned(__assert_only lck_ticket_t *tlock) +{ + assertf(__c11_atomic_load((_Atomic thread_t *)&tlock->lck_owner, __ATOMIC_RELAXED) == current_thread(), "lck_ticket_assert_owned: owner %p, current: %p", (void *) tlock->lck_owner, current_thread()); +} diff --git a/osfmk/kern/trustcache.h b/osfmk/kern/trustcache.h index 889e407d9..355039d84 100644 --- a/osfmk/kern/trustcache.h +++ b/osfmk/kern/trustcache.h @@ -39,10 +39,10 @@ * Used for loadable trust caches only, until phasing out support. */ typedef uint8_t trust_cache_hash0[CS_CDHASH_LEN]; struct trust_cache_module0 { - uint32_t version; - uuid_t uuid; - uint32_t num_hashes; - trust_cache_hash0 hashes[]; + uint32_t version; + uuid_t uuid; + uint32_t num_hashes; + trust_cache_hash0 hashes[]; } __attribute__((__packed__)); @@ -56,14 +56,14 @@ struct trust_cache_entry1 { } __attribute__((__packed__)); struct trust_cache_module1 { - uint32_t version; - uuid_t uuid; - uint32_t num_entries; - struct trust_cache_entry1 entries[]; + uint32_t version; + uuid_t uuid; + uint32_t num_entries; + struct trust_cache_entry1 entries[]; } __attribute__((__packed__)); // Trust Cache Entry Flags -#define CS_TRUST_CACHE_AMFID 0x1 // valid cdhash for amfid +#define CS_TRUST_CACHE_AMFID 0x1 // valid cdhash for amfid #define TC_LOOKUP_HASH_TYPE_SHIFT 16 #define TC_LOOKUP_HASH_TYPE_MASK 0xff0000L; @@ -81,8 +81,8 @@ struct trust_cache_module1 { /* This is how iBoot delivers them to us. */ struct serialized_trust_caches { - uint32_t num_caches; - uint32_t offsets[0]; + uint32_t num_caches; + uint32_t offsets[0]; } __attribute__((__packed__)); @@ -91,9 +91,9 @@ void trust_cache_init(void); uint32_t lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]); bool lookup_in_trust_cache_module(struct trust_cache_module1 const * const module, - uint8_t const cdhash[CS_CDHASH_LEN], - uint8_t * const hash_type, - uint8_t * const flags); + uint8_t const cdhash[CS_CDHASH_LEN], + uint8_t * const hash_type, + uint8_t * const flags); #endif diff --git a/osfmk/kern/turnstile.c b/osfmk/kern/turnstile.c index 7a113412e..ea58c5477 100644 --- a/osfmk/kern/turnstile.c +++ b/osfmk/kern/turnstile.c @@ -44,6 +44,7 @@ #include #include +#include #include static zone_t turnstiles_zone; @@ -69,19 +70,19 @@ os_refgrp_decl(static, turnstile_refgrp, "turnstile", NULL); static queue_head_t turnstiles_list; static lck_spin_t global_turnstile_lock; -lck_grp_t turnstiles_dev_lock_grp; -lck_attr_t turnstiles_dev_lock_attr; -lck_grp_attr_t turnstiles_dev_lock_grp_attr; +lck_grp_t turnstiles_dev_lock_grp; +lck_attr_t turnstiles_dev_lock_attr; +lck_grp_attr_t turnstiles_dev_lock_grp_attr; #define global_turnstiles_lock_init() \ lck_spin_init(&global_turnstile_lock, &turnstiles_dev_lock_grp, &turnstiles_dev_lock_attr) #define global_turnstiles_lock_destroy() \ lck_spin_destroy(&global_turnstile_lock, &turnstiles_dev_lock_grp) -#define global_turnstiles_lock() \ - lck_spin_lock(&global_turnstile_lock) -#define global_turnstiles_lock_try() \ - lck_spin_try_lock(&global_turnstile_lock) -#define global_turnstiles_unlock() \ +#define global_turnstiles_lock() \ + lck_spin_lock_grp(&global_turnstile_lock, &turnstiles_dev_lock_grp) +#define global_turnstiles_lock_try() \ + lck_spin_try_lock_grp(&global_turnstile_lock, &turnstiles_dev_lock_grp) +#define global_turnstiles_unlock() \ lck_spin_unlock(&global_turnstile_lock) /* Array to store stats for multi-hop boosting */ @@ -93,7 +94,7 @@ uint64_t thread_block_on_regular_waitq_count; #endif #ifndef max -#define max(a,b) (((a) > (b)) ? (a) : (b)) +#define max(a, b) (((a) > (b)) ? (a) : (b)) #endif /* max */ /* Static function declarations */ @@ -109,53 +110,53 @@ static void turnstile_update_inheritor_workq_priority_chain(struct turnstile *in_turnstile, spl_t s); static void turnstile_update_inheritor_thread_priority_chain(struct turnstile **in_turnstile, - thread_t *out_thread, int total_hop, turnstile_stats_update_flags_t tsu_flags); + thread_t *out_thread, int total_hop, turnstile_stats_update_flags_t tsu_flags); static void turnstile_update_inheritor_turnstile_priority_chain(struct turnstile **in_out_turnstile, - int total_hop, turnstile_stats_update_flags_t tsu_flags); + int total_hop, turnstile_stats_update_flags_t tsu_flags); static void thread_update_waiting_turnstile_priority_chain(thread_t *in_thread, - struct turnstile **out_turnstile, int thread_hop, int total_hop, - turnstile_stats_update_flags_t tsu_flags); + struct turnstile **out_turnstile, int thread_hop, int total_hop, + turnstile_stats_update_flags_t tsu_flags); static boolean_t turnstile_update_turnstile_promotion_locked(struct turnstile *dst_turnstile, - struct turnstile *src_turnstile); + struct turnstile *src_turnstile); static boolean_t turnstile_update_turnstile_promotion(struct turnstile *dst_turnstile, - struct turnstile *src_turnstile); + struct turnstile *src_turnstile); static boolean_t turnstile_need_turnstile_promotion_update(struct turnstile *dst_turnstile, - struct turnstile *src_turnstile); + struct turnstile *src_turnstile); static boolean_t turnstile_add_turnstile_promotion(struct turnstile *dst_turnstile, - struct turnstile *src_turnstile); + struct turnstile *src_turnstile); static boolean_t turnstile_remove_turnstile_promotion(struct turnstile *dst_turnstile, - struct turnstile *src_turnstile); + struct turnstile *src_turnstile); static boolean_t turnstile_update_thread_promotion_locked(struct turnstile *dst_turnstile, - thread_t thread); + thread_t thread); static boolean_t turnstile_need_thread_promotion_update(struct turnstile *dst_turnstile, - thread_t thread); + thread_t thread); static boolean_t thread_add_turnstile_promotion( - thread_t thread, struct turnstile *turnstile); + thread_t thread, struct turnstile *turnstile); static boolean_t thread_remove_turnstile_promotion( - thread_t thread, struct turnstile *turnstile); + thread_t thread, struct turnstile *turnstile); static boolean_t thread_needs_turnstile_promotion_update(thread_t thread, - struct turnstile *turnstile); + struct turnstile *turnstile); static boolean_t thread_update_turnstile_promotion( - thread_t thread, struct turnstile *turnstile); + thread_t thread, struct turnstile *turnstile); static boolean_t thread_update_turnstile_promotion_locked( - thread_t thread, struct turnstile *turnstile); + thread_t thread, struct turnstile *turnstile); static boolean_t workq_add_turnstile_promotion( - struct workqueue *wq_inheritor, struct turnstile *turnstile); + struct workqueue *wq_inheritor, struct turnstile *turnstile); static turnstile_stats_update_flags_t thread_get_update_flags_for_turnstile_propagation_stoppage(thread_t thread); static turnstile_stats_update_flags_t @@ -180,7 +181,7 @@ union turnstile_type_gencount { uint32_t value; struct { uint32_t ts_type:(8 * sizeof(turnstile_type_t)), - ts_gencount: (8 *(sizeof(uint32_t) - sizeof(turnstile_type_t))); + ts_gencount: (8 * (sizeof(uint32_t) - sizeof(turnstile_type_t))); }; }; @@ -218,9 +219,9 @@ turnstile_set_type_and_increment_gencount(struct turnstile *turnstile, turnstile /* Turnstile hashtable Implementation */ /* - * Maximum number of buckets in the turnstile hashtable. This number affects the - * performance of the hashtable since it determines the hash collision - * rate. To experiment with the number of buckets in this hashtable use the + * Maximum number of buckets in the turnstile hashtable. This number affects the + * performance of the hashtable since it determines the hash collision + * rate. To experiment with the number of buckets in this hashtable use the * "ts_htable_buckets" boot-arg. */ #define TURNSTILE_HTABLE_BUCKETS_DEFAULT 32 @@ -229,8 +230,8 @@ turnstile_set_type_and_increment_gencount(struct turnstile *turnstile, turnstile SLIST_HEAD(turnstile_hashlist, turnstile); struct turnstile_htable_bucket { - lck_spin_t ts_ht_bucket_lock; - struct turnstile_hashlist ts_ht_bucket_list; + lck_spin_t ts_ht_bucket_lock; + struct turnstile_hashlist ts_ht_bucket_list; }; SECURITY_READ_ONLY_LATE(static uint32_t) ts_htable_buckets; @@ -243,11 +244,11 @@ lck_attr_t turnstiles_htable_lock_attr; lck_grp_attr_t turnstiles_htable_lock_grp_attr; #define turnstile_bucket_lock_init(bucket) \ - lck_spin_init(&bucket->ts_ht_bucket_lock, &turnstiles_htable_lock_grp, &turnstiles_htable_lock_attr) + lck_spin_init(&bucket->ts_ht_bucket_lock, &turnstiles_htable_lock_grp, &turnstiles_htable_lock_attr) #define turnstile_bucket_lock(bucket) \ - lck_spin_lock(&bucket->ts_ht_bucket_lock) + lck_spin_lock_grp(&bucket->ts_ht_bucket_lock, &turnstiles_htable_lock_grp) #define turnstile_bucket_unlock(bucket) \ - lck_spin_unlock(&bucket->ts_ht_bucket_lock) + lck_spin_unlock(&bucket->ts_ht_bucket_lock) /* * Name: turnstiles_hashtable_init @@ -264,15 +265,17 @@ static void turnstiles_hashtable_init(void) { /* Initialize number of buckets in the hashtable */ - if (PE_parse_boot_argn("ts_htable_buckets", &ts_htable_buckets, sizeof(ts_htable_buckets)) != TRUE) + if (PE_parse_boot_argn("ts_htable_buckets", &ts_htable_buckets, sizeof(ts_htable_buckets)) != TRUE) { ts_htable_buckets = TURNSTILE_HTABLE_BUCKETS_DEFAULT; - + } + assert(ts_htable_buckets <= TURNSTILE_HTABLE_BUCKETS_MAX); uint32_t ts_htable_size = ts_htable_buckets * sizeof(struct turnstile_htable_bucket); turnstile_htable = (struct turnstile_htable_bucket *)kalloc(ts_htable_size); - if (turnstile_htable == NULL) + if (turnstile_htable == NULL) { panic("Turnstiles hash table memory allocation failed!"); - + } + lck_grp_attr_setdefault(&turnstiles_htable_lock_grp_attr); lck_grp_init(&turnstiles_htable_lock_grp, "turnstiles_htable_locks", &turnstiles_htable_lock_grp_attr); lck_attr_setdefault(&turnstiles_htable_lock_attr); @@ -367,13 +370,11 @@ turnstile_freelist_remove( * Returns: * hash table bucket index for provided proprietor */ -static inline uint32_t +static inline uint32_t turnstile_hash(uintptr_t proprietor) { - char *key = (char *)&proprietor; - uint32_t hash = jenkins_hash(key, sizeof(key)); - hash &= (ts_htable_buckets - 1); - return hash; + uint32_t hash = os_hash_kernel_pointer((void *)proprietor); + return hash & (ts_htable_buckets - 1); } /* @@ -394,21 +395,21 @@ turnstile_hash(uintptr_t proprietor) */ static struct turnstile * turnstile_htable_lookup_add( - uintptr_t proprietor, + uintptr_t proprietor, struct turnstile *new_turnstile) { uint32_t index = turnstile_hash(proprietor); assert(index < ts_htable_buckets); struct turnstile_htable_bucket *ts_bucket = &(turnstile_htable[index]); spl_t s; - + s = splsched(); turnstile_bucket_lock(ts_bucket); struct turnstile *ts; SLIST_FOREACH(ts, &ts_bucket->ts_ht_bucket_list, ts_htable_link) { if (ts->ts_proprietor == proprietor) { - /* + /* * Found an entry in the hashtable for this proprietor; add thread turnstile to freelist * and return this turnstile */ @@ -478,7 +479,7 @@ turnstable_htable_lookup_remove( *free_turnstile = ret_turnstile; return TURNSTILE_NULL; } else { - /* + /* * Turnstile has free turnstiles on its list; leave the hashtable unchanged * and return the first turnstile in the freelist as the free turnstile */ @@ -542,14 +543,14 @@ void turnstiles_init(void) { turnstiles_zone = zinit(sizeof(struct turnstile), - MAX_TURNSTILES * sizeof(struct turnstile), - TURNSTILES_CHUNK * sizeof(struct turnstile), - "turnstiles"); + MAX_TURNSTILES * sizeof(struct turnstile), + TURNSTILES_CHUNK * sizeof(struct turnstile), + "turnstiles"); if (!PE_parse_boot_argn("turnstile_max_hop", &turnstile_max_hop, sizeof(turnstile_max_hop))) { turnstile_max_hop = TURNSTILE_MAX_HOP_DEFAULT; } - + turnstiles_hashtable_init(); #if DEVELOPMENT || DEBUG @@ -591,7 +592,7 @@ turnstile_alloc(void) /* Add turnstile to global list */ global_turnstiles_lock(); queue_enter(&turnstiles_list, turnstile, - struct turnstile *, ts_global_elm); + struct turnstile *, ts_global_elm); global_turnstiles_unlock(); #endif return turnstile; @@ -614,7 +615,7 @@ turnstile_init(struct turnstile *turnstile) /* Initialize the waitq */ kret = waitq_init(&turnstile->ts_waitq, SYNC_POLICY_DISABLE_IRQ | SYNC_POLICY_REVERSED | - SYNC_POLICY_TURNSTILE); + SYNC_POLICY_TURNSTILE); assert(kret == KERN_SUCCESS); turnstile->ts_inheritor = TURNSTILE_INHERITOR_NULL; @@ -628,7 +629,7 @@ turnstile_init(struct turnstile *turnstile) turnstile->ts_inheritor_flags = TURNSTILE_UPDATE_FLAGS_NONE; turnstile->ts_port_ref = 0; priority_queue_init(&turnstile->ts_inheritor_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + PRIORITY_QUEUE_BUILTIN_MAX_HEAP); #if DEVELOPMENT || DEBUG turnstile->ts_thread = current_thread(); @@ -721,7 +722,7 @@ turnstile_destroy(struct turnstile *turnstile) /* Remove turnstile from global list */ global_turnstiles_lock(); queue_remove(&turnstiles_list, turnstile, - struct turnstile *, ts_global_elm); + struct turnstile *, ts_global_elm); global_turnstiles_unlock(); #endif zfree(turnstiles_zone, turnstile); @@ -776,11 +777,11 @@ turnstile_prepare( thread_turnstile->ts_prev_thread = thread_turnstile->ts_thread; thread_turnstile->ts_thread = NULL; #endif - + if (tstore != NULL) { - /* - * If the primitive stores the turnstile, - * If there is already a turnstile, put the thread_turnstile if the primitive currently does not have a + /* + * If the primitive stores the turnstile, + * If there is already a turnstile, put the thread_turnstile if the primitive currently does not have a * turnstile. * Else, add the thread turnstile to freelist of the primitive turnstile. */ @@ -789,29 +790,29 @@ turnstile_prepare( turnstile_state_add(thread_turnstile, TURNSTILE_STATE_PROPRIETOR); *tstore = thread_turnstile; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_FREELIST_OPERATIONS, (TURNSTILE_PREPARE))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(thread_turnstile), - VM_KERNEL_UNSLIDE_OR_PERM(proprietor), - turnstile_get_type(thread_turnstile), 0, 0); + (TURNSTILE_CODE(TURNSTILE_FREELIST_OPERATIONS, (TURNSTILE_PREPARE))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(thread_turnstile), + VM_KERNEL_UNSLIDE_OR_PERM(proprietor), + turnstile_get_type(thread_turnstile), 0, 0); } else { turnstile_freelist_insert(ret_turnstile, thread_turnstile); } ret_turnstile = *tstore; } else { - /* + /* * Lookup the primitive in the turnstile hash table and see if it already has an entry. */ ret_turnstile = turnstile_htable_lookup_add(proprietor, thread_turnstile); if (ret_turnstile == NULL) { ret_turnstile = thread_turnstile; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_FREELIST_OPERATIONS, (TURNSTILE_PREPARE))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(thread_turnstile), - VM_KERNEL_UNSLIDE_OR_PERM(proprietor), - turnstile_get_type(thread_turnstile), 0, 0); + (TURNSTILE_CODE(TURNSTILE_FREELIST_OPERATIONS, (TURNSTILE_PREPARE))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(thread_turnstile), + VM_KERNEL_UNSLIDE_OR_PERM(proprietor), + turnstile_get_type(thread_turnstile), 0, 0); } } - + return ret_turnstile; } @@ -870,7 +871,7 @@ turnstile_complete( */ if (thread_turnstile->ts_inheritor != TURNSTILE_INHERITOR_NULL) { turnstile_update_inheritor(thread_turnstile, TURNSTILE_INHERITOR_NULL, - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); /* * old inheritor is set in curret thread and its priority propagation * will happen in turnstile cleanup call @@ -879,17 +880,17 @@ turnstile_complete( assert(thread_turnstile->ts_inheritor == TURNSTILE_INHERITOR_NULL); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_FREELIST_OPERATIONS, (TURNSTILE_COMPLETE))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(thread_turnstile), - VM_KERNEL_UNSLIDE_OR_PERM(proprietor), - turnstile_get_type(thread_turnstile), 0, 0); + (TURNSTILE_CODE(TURNSTILE_FREELIST_OPERATIONS, (TURNSTILE_COMPLETE))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(thread_turnstile), + VM_KERNEL_UNSLIDE_OR_PERM(proprietor), + turnstile_get_type(thread_turnstile), 0, 0); } else { /* If primitive's turnstile needs priority update, set it up for turnstile cleanup */ if (turnstile_recompute_priority(primitive_turnstile)) { turnstile_reference(primitive_turnstile); thread->inheritor = primitive_turnstile; thread->inheritor_flags = (TURNSTILE_INHERITOR_TURNSTILE | - TURNSTILE_INHERITOR_NEEDS_PRI_UPDATE); + TURNSTILE_INHERITOR_NEEDS_PRI_UPDATE); } } @@ -932,7 +933,7 @@ turnstile_update_inheritor_locked( boolean_t old_inheritor_needs_update = FALSE; boolean_t new_inheritor_needs_update = FALSE; turnstile_stats_update_flags_t tsu_flags = - turnstile_get_update_flags_for_above_UI_pri_change(turnstile); + turnstile_get_update_flags_for_above_UI_pri_change(turnstile); assert(waitq_held(&turnstile->ts_waitq)); @@ -961,7 +962,6 @@ turnstile_update_inheritor_locked( /* adjust turnstile position in the thread's inheritor list */ new_inheritor_needs_update = thread_update_turnstile_promotion( thread_inheritor, turnstile); - } else if (new_inheritor_flags & TURNSTILE_INHERITOR_TURNSTILE) { struct turnstile *inheritor_turnstile = new_inheritor; @@ -969,7 +969,6 @@ turnstile_update_inheritor_locked( new_inheritor_needs_update = turnstile_update_turnstile_promotion( inheritor_turnstile, turnstile); - } else if (new_inheritor_flags & TURNSTILE_INHERITOR_WORKQ) { /* * When we are still picking "WORKQ" then possible racing @@ -977,7 +976,7 @@ turnstile_update_inheritor_locked( * and we don't need to update anything here. */ turnstile_stats_update(1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, turnstile); + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, turnstile); } else { panic("Inheritor flags lost along the way"); } @@ -985,7 +984,7 @@ turnstile_update_inheritor_locked( /* Update turnstile stats */ if (!new_inheritor_needs_update) { turnstile_stats_update(1, TSU_PRI_PROPAGATION | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG | tsu_flags, turnstile); + TSU_TURNSTILE_ARG | TSU_BOOST_ARG | tsu_flags, turnstile); } break; } @@ -996,27 +995,25 @@ turnstile_update_inheritor_locked( /* remove turnstile from thread's inheritor list */ old_inheritor_needs_update = thread_remove_turnstile_promotion(thread_inheritor, turnstile); - } else if (old_inheritor_flags & TURNSTILE_INHERITOR_TURNSTILE) { struct turnstile *old_turnstile = old_inheritor; old_inheritor_needs_update = turnstile_remove_turnstile_promotion( old_turnstile, turnstile); - } else if (old_inheritor_flags & TURNSTILE_INHERITOR_WORKQ) { /* * We don't need to do anything when the push was WORKQ * because nothing is pushed on in the first place. */ turnstile_stats_update(1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG, turnstile); + TSU_TURNSTILE_ARG, turnstile); } else { panic("Inheritor flags lost along the way"); } /* Update turnstile stats */ if (!old_inheritor_needs_update) { turnstile_stats_update(1, TSU_PRI_PROPAGATION | TSU_TURNSTILE_ARG, - turnstile); + turnstile); } } @@ -1027,22 +1024,20 @@ turnstile_update_inheritor_locked( assert(new_inheritor_flags & TURNSTILE_INHERITOR_THREAD); /* add turnstile to thread's inheritor list */ new_inheritor_needs_update = thread_add_turnstile_promotion( - thread_inheritor, turnstile); - + thread_inheritor, turnstile); } else if (new_inheritor_flags & TURNSTILE_INHERITOR_TURNSTILE) { struct turnstile *new_turnstile = new_inheritor; new_inheritor_needs_update = turnstile_add_turnstile_promotion( new_turnstile, turnstile); - } else if (new_inheritor_flags & TURNSTILE_INHERITOR_WORKQ) { struct workqueue *wq_inheritor = new_inheritor; new_inheritor_needs_update = workq_add_turnstile_promotion( - wq_inheritor, turnstile); + wq_inheritor, turnstile); if (!new_inheritor_needs_update) { turnstile_stats_update(1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, turnstile); + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, turnstile); } } else { panic("Inheritor flags lost along the way"); @@ -1050,7 +1045,7 @@ turnstile_update_inheritor_locked( /* Update turnstile stats */ if (!new_inheritor_needs_update) { turnstile_stats_update(1, TSU_PRI_PROPAGATION | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG | tsu_flags,turnstile); + TSU_TURNSTILE_ARG | TSU_BOOST_ARG | tsu_flags, turnstile); } } @@ -1126,7 +1121,7 @@ turnstile_update_inheritor( workq_reference((struct workqueue *)new_inheritor); } else { panic("Missing type in flags (%x) for inheritor (%p)", flags, - new_inheritor); + new_inheritor); } /* Do not perform the update if delayed update is specified */ @@ -1169,7 +1164,7 @@ turnstile_need_thread_promotion_update( boolean_t needs_update = FALSE; thread_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_waitq.waitq_prio_queue), - &(thread->wait_prioq_links)); + &(thread->wait_prioq_links)); needs_update = (thread_link_priority == thread->base_pri) ? FALSE : TRUE; return needs_update; @@ -1188,18 +1183,18 @@ turnstile_need_thread_promotion_update( */ static boolean_t turnstile_priority_queue_update_entry_key(struct priority_queue *q, - priority_queue_entry_t elt, priority_queue_key_t pri) + priority_queue_entry_t elt, priority_queue_key_t pri) { priority_queue_key_t old_key = priority_queue_max_key(q); if (priority_queue_entry_key(q, elt) < pri) { if (priority_queue_entry_increase(q, elt, pri, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { return old_key != priority_queue_max_key(q); } } else if (priority_queue_entry_key(q, elt) > pri) { if (priority_queue_entry_decrease(q, elt, pri, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { return old_key != priority_queue_max_key(q); } } @@ -1227,20 +1222,20 @@ turnstile_update_thread_promotion_locked( thread_t thread) { int thread_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_waitq.waitq_prio_queue), - &(thread->wait_prioq_links)); + &(thread->wait_prioq_links)); if (thread->base_pri != thread_link_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_MOVED_IN_TURNSTILE_WAITQ))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), - thread_tid(thread), - thread->base_pri, - thread_link_priority, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_MOVED_IN_TURNSTILE_WAITQ))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), + thread_tid(thread), + thread->base_pri, + thread_link_priority, 0); } if (!turnstile_priority_queue_update_entry_key( - &dst_turnstile->ts_waitq.waitq_prio_queue, - &thread->wait_prioq_links, thread->base_pri)) { + &dst_turnstile->ts_waitq.waitq_prio_queue, + &thread->wait_prioq_links, thread->base_pri)) { return FALSE; } @@ -1273,15 +1268,15 @@ thread_add_turnstile_promotion( thread_lock(thread); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_ADDED_TO_THREAD_HEAP))) | DBG_FUNC_NONE, - thread_tid(thread), - VM_KERNEL_UNSLIDE_OR_PERM(turnstile), - turnstile->ts_priority, 0, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_ADDED_TO_THREAD_HEAP))) | DBG_FUNC_NONE, + thread_tid(thread), + VM_KERNEL_UNSLIDE_OR_PERM(turnstile), + turnstile->ts_priority, 0, 0); priority_queue_entry_init(&(turnstile->ts_inheritor_links)); if (priority_queue_insert(&thread->inheritor_queue, - &turnstile->ts_inheritor_links, turnstile->ts_priority, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &turnstile->ts_inheritor_links, turnstile->ts_priority, + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { /* Update thread priority */ needs_update = thread_recompute_user_promotion_locked(thread); } @@ -1289,9 +1284,9 @@ thread_add_turnstile_promotion( /* Update turnstile stats */ if (!needs_update) { turnstile_stats_update(1, - thread_get_update_flags_for_turnstile_propagation_stoppage(thread) | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, - turnstile); + thread_get_update_flags_for_turnstile_propagation_stoppage(thread) | + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, + turnstile); } thread_unlock(thread); @@ -1323,14 +1318,14 @@ thread_remove_turnstile_promotion( thread_lock(thread); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_REMOVED_FROM_THREAD_HEAP))) | DBG_FUNC_NONE, - thread_tid(thread), - VM_KERNEL_UNSLIDE_OR_PERM(turnstile), - 0, 0, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_REMOVED_FROM_THREAD_HEAP))) | DBG_FUNC_NONE, + thread_tid(thread), + VM_KERNEL_UNSLIDE_OR_PERM(turnstile), + 0, 0, 0); if (priority_queue_remove(&thread->inheritor_queue, - &turnstile->ts_inheritor_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &turnstile->ts_inheritor_links, + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { /* Update thread priority */ needs_update = thread_recompute_user_promotion_locked(thread); } @@ -1338,8 +1333,8 @@ thread_remove_turnstile_promotion( /* Update turnstile stats */ if (!needs_update) { turnstile_stats_update(1, - thread_get_update_flags_for_turnstile_propagation_stoppage(thread) | TSU_TURNSTILE_ARG, - turnstile); + thread_get_update_flags_for_turnstile_propagation_stoppage(thread) | TSU_TURNSTILE_ARG, + turnstile); } thread_unlock(thread); @@ -1369,7 +1364,7 @@ thread_needs_turnstile_promotion_update( /* Update the pairing heap */ turnstile_link_priority = priority_queue_entry_key(&(thread->inheritor_queue), - &(turnstile->ts_inheritor_links)); + &(turnstile->ts_inheritor_links)); needs_update = (turnstile_link_priority == turnstile->ts_priority) ? FALSE : TRUE; return needs_update; @@ -1394,19 +1389,19 @@ thread_update_turnstile_promotion_locked( struct turnstile *turnstile) { int turnstile_link_priority = priority_queue_entry_key(&(thread->inheritor_queue), - &(turnstile->ts_inheritor_links)); + &(turnstile->ts_inheritor_links)); if (turnstile->ts_priority != turnstile_link_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_MOVED_IN_THREAD_HEAP))) | DBG_FUNC_NONE, - thread_tid(thread), - VM_KERNEL_UNSLIDE_OR_PERM(turnstile), - turnstile->ts_priority, - turnstile_link_priority, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_MOVED_IN_THREAD_HEAP))) | DBG_FUNC_NONE, + thread_tid(thread), + VM_KERNEL_UNSLIDE_OR_PERM(turnstile), + turnstile->ts_priority, + turnstile_link_priority, 0); } if (!turnstile_priority_queue_update_entry_key(&thread->inheritor_queue, - &turnstile->ts_inheritor_links, turnstile->ts_priority)) { + &turnstile->ts_inheritor_links, turnstile->ts_priority)) { return FALSE; } @@ -1438,7 +1433,7 @@ thread_update_turnstile_promotion( if (!needs_update) { turnstile_stats_update(1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, turnstile); + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, turnstile); return needs_update; } @@ -1449,9 +1444,9 @@ thread_update_turnstile_promotion( /* Update turnstile stats */ if (!needs_update) { turnstile_stats_update(1, - thread_get_update_flags_for_turnstile_propagation_stoppage(thread) | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, - turnstile); + thread_get_update_flags_for_turnstile_propagation_stoppage(thread) | + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, + turnstile); } thread_unlock(thread); return needs_update; @@ -1475,11 +1470,11 @@ thread_get_inheritor_turnstile_priority(thread_t thread) struct turnstile *max_turnstile; max_turnstile = priority_queue_max(&thread->inheritor_queue, - struct turnstile, ts_inheritor_links); + struct turnstile, ts_inheritor_links); if (max_turnstile) { return priority_queue_entry_key(&thread->inheritor_queue, - &max_turnstile->ts_inheritor_links); + &max_turnstile->ts_inheritor_links); } return MAXPRI_THROTTLE; @@ -1599,7 +1594,6 @@ turnstile_get_update_flags_for_above_UI_pri_change(struct turnstile *turnstile) (thread_qos_policy_params.qos_pri[THREAD_QOS_USER_INTERACTIVE] + 1) && turnstile_get_type(turnstile) != TURNSTILE_ULOCK) { return TSU_ABOVE_UI_PRI_CHANGE; - } return TSU_FLAGS_NONE; @@ -1651,7 +1645,7 @@ turnstile_need_turnstile_promotion_update( boolean_t needs_update = FALSE; src_turnstile_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_inheritor_queue), - &(src_turnstile->ts_inheritor_links)); + &(src_turnstile->ts_inheritor_links)); needs_update = (src_turnstile_link_priority == src_turnstile->ts_priority) ? FALSE : TRUE; return needs_update; @@ -1678,19 +1672,19 @@ turnstile_update_turnstile_promotion_locked( { int src_turnstile_link_priority; src_turnstile_link_priority = priority_queue_entry_key(&(dst_turnstile->ts_inheritor_queue), - &(src_turnstile->ts_inheritor_links)); + &(src_turnstile->ts_inheritor_links)); if (src_turnstile->ts_priority != src_turnstile_link_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_MOVED_IN_TURNSTILE_HEAP))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), - VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), - src_turnstile->ts_priority, src_turnstile_link_priority, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_MOVED_IN_TURNSTILE_HEAP))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), + VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), + src_turnstile->ts_priority, src_turnstile_link_priority, 0); } if (!turnstile_priority_queue_update_entry_key( - &dst_turnstile->ts_inheritor_queue, &src_turnstile->ts_inheritor_links, - src_turnstile->ts_priority)) { + &dst_turnstile->ts_inheritor_queue, &src_turnstile->ts_inheritor_links, + src_turnstile->ts_priority)) { return FALSE; } @@ -1721,8 +1715,8 @@ turnstile_update_turnstile_promotion( boolean_t needs_update = turnstile_need_turnstile_promotion_update(dst_turnstile, src_turnstile); if (!needs_update) { turnstile_stats_update(1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, - src_turnstile); + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, + src_turnstile); return needs_update; } @@ -1733,8 +1727,8 @@ turnstile_update_turnstile_promotion( /* Update turnstile stats */ if (!needs_update) { turnstile_stats_update(1, - (dst_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, src_turnstile); + (dst_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, src_turnstile); } waitq_unlock(&dst_turnstile->ts_waitq); return needs_update; @@ -1765,15 +1759,15 @@ turnstile_add_turnstile_promotion( waitq_lock(&dst_turnstile->ts_waitq); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_ADDED_TO_TURNSTILE_HEAP))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), - VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), - src_turnstile->ts_priority, 0, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_ADDED_TO_TURNSTILE_HEAP))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), + VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), + src_turnstile->ts_priority, 0, 0); priority_queue_entry_init(&(src_turnstile->ts_inheritor_links)); if (priority_queue_insert(&dst_turnstile->ts_inheritor_queue, - &src_turnstile->ts_inheritor_links, src_turnstile->ts_priority, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &src_turnstile->ts_inheritor_links, src_turnstile->ts_priority, + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { /* Update dst turnstile priority */ needs_update = turnstile_recompute_priority_locked(dst_turnstile); } @@ -1781,8 +1775,8 @@ turnstile_add_turnstile_promotion( /* Update turnstile stats */ if (!needs_update) { turnstile_stats_update(1, - (dst_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | - TSU_TURNSTILE_ARG | TSU_BOOST_ARG, src_turnstile); + (dst_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | + TSU_TURNSTILE_ARG | TSU_BOOST_ARG, src_turnstile); } waitq_unlock(&dst_turnstile->ts_waitq); @@ -1814,14 +1808,14 @@ turnstile_remove_turnstile_promotion( waitq_lock(&dst_turnstile->ts_waitq); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_REMOVED_FROM_TURNSTILE_HEAP))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), - VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), - 0, 0, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (TURNSTILE_REMOVED_FROM_TURNSTILE_HEAP))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(dst_turnstile), + VM_KERNEL_UNSLIDE_OR_PERM(src_turnstile), + 0, 0, 0); if (priority_queue_remove(&dst_turnstile->ts_inheritor_queue, - &src_turnstile->ts_inheritor_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { + &src_turnstile->ts_inheritor_links, + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE)) { /* Update dst turnstile priority */ needs_update = turnstile_recompute_priority_locked(dst_turnstile); } @@ -1829,8 +1823,8 @@ turnstile_remove_turnstile_promotion( /* Update turnstile stats */ if (!needs_update) { turnstile_stats_update(1, - (dst_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | - TSU_TURNSTILE_ARG, src_turnstile); + (dst_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | + TSU_TURNSTILE_ARG, src_turnstile); } waitq_unlock(&dst_turnstile->ts_waitq); @@ -1864,26 +1858,25 @@ turnstile_recompute_priority_locked( int turnstile_max_pri = MAXPRI_THROTTLE; switch (turnstile_promote_policy[turnstile_get_type(turnstile)]) { - case TURNSTILE_USER_PROMOTE: case TURNSTILE_USER_IPC_PROMOTE: old_priority = turnstile->ts_priority; max_thread = priority_queue_max(&turnstile->ts_waitq.waitq_prio_queue, - struct thread, wait_prioq_links); + struct thread, wait_prioq_links); if (max_thread) { thread_max_pri = priority_queue_entry_key(&turnstile->ts_waitq.waitq_prio_queue, - &max_thread->wait_prioq_links); + &max_thread->wait_prioq_links); } max_turnstile = priority_queue_max(&turnstile->ts_inheritor_queue, - struct turnstile, ts_inheritor_links); + struct turnstile, ts_inheritor_links); if (max_turnstile) { turnstile_max_pri = priority_queue_entry_key(&turnstile->ts_inheritor_queue, - &max_turnstile->ts_inheritor_links); + &max_turnstile->ts_inheritor_links); } new_priority = max(thread_max_pri, turnstile_max_pri); @@ -1891,31 +1884,29 @@ turnstile_recompute_priority_locked( if (old_priority != new_priority) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_PRIORITY_OPERATIONS, - (TURNSTILE_PRIORITY_CHANGE))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(turnstile), - new_priority, - old_priority, - 0, 0); + (TURNSTILE_CODE(TURNSTILE_PRIORITY_OPERATIONS, + (TURNSTILE_PRIORITY_CHANGE))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(turnstile), + new_priority, + old_priority, + 0, 0); } needs_priority_update = (!(old_priority == new_priority)) && - (turnstile->ts_inheritor != NULL); - break; + (turnstile->ts_inheritor != NULL); + break; case TURNSTILE_PROMOTE_NONE: case TURNSTILE_KERNEL_PROMOTE: - /* The turnstile was repurposed, do nothing */ - break; + /* The turnstile was repurposed, do nothing */ + break; default: - panic("Needs implementation for turnstile_recompute_priority"); - break; - + panic("Needs implementation for turnstile_recompute_priority"); + break; } return needs_priority_update; - } @@ -1945,7 +1936,6 @@ turnstile_recompute_priority( waitq_unlock(&turnstile->ts_waitq); splx(s); return needs_priority_update; - } @@ -1980,10 +1970,10 @@ turnstile_workq_proprietor_of_max_turnstile( waitq_lock(&turnstile->ts_waitq); max_turnstile = priority_queue_max(&turnstile->ts_inheritor_queue, - struct turnstile, ts_inheritor_links); + struct turnstile, ts_inheritor_links); if (max_turnstile) { max_priority = priority_queue_entry_key(&turnstile->ts_inheritor_queue, - &max_turnstile->ts_inheritor_links); + &max_turnstile->ts_inheritor_links); proprietor = max_turnstile->ts_proprietor; } @@ -1994,7 +1984,9 @@ turnstile_workq_proprietor_of_max_turnstile( max_priority = 0; proprietor = 0; } - if (proprietor_out) *proprietor_out = proprietor; + if (proprietor_out) { + *proprietor_out = proprietor; + } return max_priority; } @@ -2020,7 +2012,7 @@ turnstile_update_inheritor_priority_chain( int total_hop = 0, thread_hop = 0; spl_t s; turnstile_stats_update_flags_t tsu_flags = ((turnstile_flags & TURNSTILE_UPDATE_BOOST) ? - TSU_BOOST_ARG : TSU_FLAGS_NONE) | TSU_PRI_PROPAGATION; + TSU_BOOST_ARG : TSU_FLAGS_NONE) | TSU_PRI_PROPAGATION; if (inheritor == NULL) { return; @@ -2050,32 +2042,29 @@ turnstile_update_inheritor_priority_chain( if (turnstile != TURNSTILE_NULL) { if (turnstile->ts_inheritor == NULL) { turnstile_stats_update(total_hop + 1, TSU_NO_INHERITOR | - TSU_TURNSTILE_ARG | tsu_flags, - turnstile); + TSU_TURNSTILE_ARG | tsu_flags, + turnstile); waitq_unlock(&turnstile->ts_waitq); turnstile = TURNSTILE_NULL; break; } if (turnstile->ts_inheritor_flags & TURNSTILE_INHERITOR_THREAD) { turnstile_update_inheritor_thread_priority_chain(&turnstile, &thread, - total_hop, tsu_flags); - + total_hop, tsu_flags); } else if (turnstile->ts_inheritor_flags & TURNSTILE_INHERITOR_TURNSTILE) { turnstile_update_inheritor_turnstile_priority_chain(&turnstile, - total_hop, tsu_flags); - + total_hop, tsu_flags); } else if (turnstile->ts_inheritor_flags & TURNSTILE_INHERITOR_WORKQ) { turnstile_update_inheritor_workq_priority_chain(turnstile, s); turnstile_stats_update(total_hop + 1, TSU_NO_PRI_CHANGE_NEEDED | tsu_flags, - NULL); + NULL); return; - } else { panic("Inheritor flags not passed in turnstile_update_inheritor"); } } else if (thread != THREAD_NULL) { thread_update_waiting_turnstile_priority_chain(&thread, &turnstile, - thread_hop, total_hop, tsu_flags); + thread_hop, total_hop, tsu_flags); thread_hop++; } total_hop++; @@ -2114,7 +2103,7 @@ turnstile_update_inheritor_complete( /* Perform priority update for new inheritor */ if (inheritor_flags & TURNSTILE_NEEDS_PRI_UPDATE) { turnstile_update_inheritor_priority_chain(turnstile, - TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_UPDATE_BOOST); + TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_UPDATE_BOOST); } } @@ -2147,7 +2136,7 @@ turnstile_cleanup(void) /* Perform priority demotion for old inheritor */ if (inheritor_flags & TURNSTILE_INHERITOR_NEEDS_PRI_UPDATE) { turnstile_update_inheritor_priority_chain(old_inheritor, - inheritor_flags); + inheritor_flags); } /* Drop thread reference for old inheritor */ @@ -2186,13 +2175,17 @@ turnstile_update_inheritor_workq_priority_chain(struct turnstile *turnstile, spl return; } - if (!workq_lock_held) workq_reference(wq); + if (!workq_lock_held) { + workq_reference(wq); + } waitq_unlock(&turnstile->ts_waitq); splx(s); workq_schedule_creator_turnstile_redrive(wq, workq_lock_held); - if (!workq_lock_held) workq_deallocate_safe(wq); + if (!workq_lock_held) { + workq_deallocate_safe(wq); + } } /* @@ -2231,7 +2224,7 @@ turnstile_update_inheritor_thread_priority_chain( needs_update = thread_needs_turnstile_promotion_update(thread_inheritor, turnstile); if (!needs_update && !first_update) { turnstile_stats_update(total_hop + 1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG | tsu_flags, turnstile); + TSU_TURNSTILE_ARG | tsu_flags, turnstile); waitq_unlock(&turnstile->ts_waitq); return; } @@ -2252,9 +2245,9 @@ turnstile_update_inheritor_thread_priority_chain( if (!needs_update && !first_update) { /* Update turnstile stats before returning */ turnstile_stats_update(total_hop + 1, - (thread_get_update_flags_for_turnstile_propagation_stoppage(thread_inheritor)) | - TSU_TURNSTILE_ARG | tsu_flags, - turnstile); + (thread_get_update_flags_for_turnstile_propagation_stoppage(thread_inheritor)) | + TSU_TURNSTILE_ARG | tsu_flags, + turnstile); thread_unlock(thread_inheritor); waitq_unlock(&turnstile->ts_waitq); return; @@ -2301,8 +2294,8 @@ turnstile_update_inheritor_turnstile_priority_chain( needs_update = turnstile_need_turnstile_promotion_update(inheritor_turnstile, turnstile); if (!needs_update && !first_update) { turnstile_stats_update(total_hop + 1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_TURNSTILE_ARG | tsu_flags, - turnstile); + TSU_TURNSTILE_ARG | tsu_flags, + turnstile); waitq_unlock(&turnstile->ts_waitq); return; } @@ -2322,9 +2315,9 @@ turnstile_update_inheritor_turnstile_priority_chain( if (!needs_update && !first_update) { /* Update turnstile stats before returning */ turnstile_stats_update(total_hop + 1, - (inheritor_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | - TSU_TURNSTILE_ARG | tsu_flags, - turnstile); + (inheritor_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | + TSU_TURNSTILE_ARG | tsu_flags, + turnstile); waitq_unlock(&inheritor_turnstile->ts_waitq); waitq_unlock(&turnstile->ts_waitq); return; @@ -2373,15 +2366,15 @@ thread_update_waiting_turnstile_priority_chain( if (waiting_turnstile == TURNSTILE_NULL || thread_hop > turnstile_max_hop) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, - (waiting_turnstile ? TURNSTILE_UPDATE_STOPPED_BY_LIMIT : THREAD_NOT_WAITING_ON_TURNSTILE) - )) | DBG_FUNC_NONE, - thread_tid(thread), - turnstile_max_hop, - thread_hop, - VM_KERNEL_UNSLIDE_OR_PERM(waiting_turnstile), 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, + (waiting_turnstile ? TURNSTILE_UPDATE_STOPPED_BY_LIMIT : THREAD_NOT_WAITING_ON_TURNSTILE) + )) | DBG_FUNC_NONE, + thread_tid(thread), + turnstile_max_hop, + thread_hop, + VM_KERNEL_UNSLIDE_OR_PERM(waiting_turnstile), 0); turnstile_stats_update(total_hop + 1, TSU_NO_TURNSTILE | - TSU_THREAD_ARG | tsu_flags, thread); + TSU_THREAD_ARG | tsu_flags, thread); thread_unlock(thread); return; } @@ -2390,7 +2383,7 @@ thread_update_waiting_turnstile_priority_chain( needs_update = turnstile_need_thread_promotion_update(waiting_turnstile, thread); if (!needs_update && !first_update) { turnstile_stats_update(total_hop + 1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_THREAD_ARG | tsu_flags, thread); + TSU_THREAD_ARG | tsu_flags, thread); thread_unlock(thread); return; } @@ -2409,7 +2402,7 @@ thread_update_waiting_turnstile_priority_chain( if (turnstile_gencount != turnstile_get_gencount(waiting_turnstile) || waiting_turnstile != thread_get_waiting_turnstile(thread)) { turnstile_stats_update(total_hop + 1, TSU_NO_PRI_CHANGE_NEEDED | - TSU_THREAD_ARG | tsu_flags, thread); + TSU_THREAD_ARG | tsu_flags, thread); /* No updates required, bail out */ thread_unlock(thread); waitq_unlock(&waiting_turnstile->ts_waitq); @@ -2438,8 +2431,8 @@ thread_update_waiting_turnstile_priority_chain( */ if (!needs_update && !first_update) { turnstile_stats_update(total_hop + 1, - (waiting_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | - TSU_THREAD_ARG | tsu_flags, thread); + (waiting_turnstile->ts_inheritor ? TSU_NO_PRI_CHANGE_NEEDED : TSU_NO_INHERITOR) | + TSU_THREAD_ARG | tsu_flags, thread); thread_unlock(thread); waitq_unlock(&waiting_turnstile->ts_waitq); return; @@ -2544,7 +2537,7 @@ turnstile_stats_update( #if DEVELOPMENT || DEBUG -int sysctl_io_opaque(void *req,void *pValue, size_t valueSize, int *changed); +int sysctl_io_opaque(void *req, void *pValue, size_t valueSize, int *changed); /* * Name: turnstile_get_boost_stats_sysctl @@ -2559,7 +2552,7 @@ int turnstile_get_boost_stats_sysctl( void *req) { - return sysctl_io_opaque(req, turnstile_boost_stats, sizeof (struct turnstile_stats) * TURNSTILE_MAX_HOP_DEFAULT, NULL); + return sysctl_io_opaque(req, turnstile_boost_stats, sizeof(struct turnstile_stats) * TURNSTILE_MAX_HOP_DEFAULT, NULL); } /* @@ -2575,13 +2568,13 @@ int turnstile_get_unboost_stats_sysctl( void *req) { - return sysctl_io_opaque(req, turnstile_unboost_stats, sizeof (struct turnstile_stats) * TURNSTILE_MAX_HOP_DEFAULT, NULL); + return sysctl_io_opaque(req, turnstile_unboost_stats, sizeof(struct turnstile_stats) * TURNSTILE_MAX_HOP_DEFAULT, NULL); } /* Testing interface for Development kernels */ -#define tstile_test_prim_lock_interlock(test_prim) \ +#define tstile_test_prim_lock_interlock(test_prim) \ lck_spin_lock(&test_prim->ttprim_interlock) -#define tstile_test_prim_unlock_interlock(test_prim) \ +#define tstile_test_prim_unlock_interlock(test_prim) \ lck_spin_unlock(&test_prim->ttprim_interlock) static void @@ -2618,8 +2611,8 @@ lock_start: /* primitive locked, get a turnstile */ prim_turnstile = turnstile_prepare((uintptr_t)test_prim, - use_hashtable ? NULL : &test_prim->ttprim_turnstile, - TURNSTILE_NULL, TURNSTILE_ULOCK); + use_hashtable ? NULL : &test_prim->ttprim_turnstile, + TURNSTILE_NULL, TURNSTILE_ULOCK); assert(prim_turnstile != TURNSTILE_NULL); @@ -2630,13 +2623,13 @@ lock_start: /* Update the turnstile owner */ turnstile_update_inheritor(prim_turnstile, - current_thread(), - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); + current_thread(), + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); turnstile_update_inheritor_complete(prim_turnstile, TURNSTILE_INTERLOCK_HELD); turnstile_complete((uintptr_t)test_prim, - use_hashtable ? NULL : &test_prim->ttprim_turnstile, NULL); + use_hashtable ? NULL : &test_prim->ttprim_turnstile, NULL); tstile_test_prim_unlock_interlock(test_prim); @@ -2647,12 +2640,12 @@ lock_start: test_prim->tt_prim_waiters++; turnstile_update_inheritor(prim_turnstile, - test_prim->ttprim_owner, - (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); + test_prim->ttprim_owner, + (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD)); waitq_assert_wait64(&prim_turnstile->ts_waitq, - CAST_EVENT64_T(test_prim), THREAD_ABORTSAFE, - TIMEOUT_WAIT_FOREVER); + CAST_EVENT64_T(test_prim), THREAD_ABORTSAFE, + TIMEOUT_WAIT_FOREVER); /* drop the interlock */ tstile_test_prim_unlock_interlock(test_prim); @@ -2666,7 +2659,7 @@ lock_start: tstile_test_prim_lock_interlock(test_prim); test_prim->tt_prim_waiters--; turnstile_complete((uintptr_t)test_prim, - use_hashtable ? NULL : &test_prim->ttprim_turnstile, NULL); + use_hashtable ? NULL : &test_prim->ttprim_turnstile, NULL); tstile_test_prim_unlock_interlock(test_prim); @@ -2683,7 +2676,6 @@ lock_start: int tstile_test_prim_unlock(boolean_t use_hashtable) { - struct tstile_test_prim *test_prim = use_hashtable ? test_prim_global_htable : test_prim_ts_inline; /* take the interlock of the primitive */ tstile_test_prim_lock_interlock(test_prim); @@ -2711,24 +2703,24 @@ tstile_test_prim_unlock(boolean_t use_hashtable) /* primitive locked, get a turnstile */ prim_turnstile = turnstile_prepare((uintptr_t)test_prim, - use_hashtable ? NULL : &test_prim->ttprim_turnstile, - TURNSTILE_NULL, TURNSTILE_ULOCK); + use_hashtable ? NULL : &test_prim->ttprim_turnstile, + TURNSTILE_NULL, TURNSTILE_ULOCK); assert(prim_turnstile != TURNSTILE_NULL); /* Update the turnstile owner */ turnstile_update_inheritor(prim_turnstile, - NULL, - (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); + NULL, + (TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD)); waitq_wakeup64_one(&prim_turnstile->ts_waitq, - CAST_EVENT64_T(test_prim), - THREAD_AWAKENED, WAITQ_SELECT_MAX_PRI); + CAST_EVENT64_T(test_prim), + THREAD_AWAKENED, WAITQ_SELECT_MAX_PRI); turnstile_update_inheritor_complete(prim_turnstile, TURNSTILE_INTERLOCK_HELD); turnstile_complete((uintptr_t)test_prim, - use_hashtable ? NULL : &test_prim->ttprim_turnstile, NULL); + use_hashtable ? NULL : &test_prim->ttprim_turnstile, NULL); tstile_test_prim_unlock_interlock(test_prim); diff --git a/osfmk/kern/turnstile.h b/osfmk/kern/turnstile.h index f8f9ebe87..b67497581 100644 --- a/osfmk/kern/turnstile.h +++ b/osfmk/kern/turnstile.h @@ -134,46 +134,46 @@ typedef enum __attribute__((flag_enum)) turnstile_promote_policy { * The flag updates are done while holding the primitive interlock. * */ -#define TURNSTILE_STATE_THREAD 0x1 -#define TURNSTILE_STATE_FREELIST 0x2 -#define TURNSTILE_STATE_HASHTABLE 0x4 -#define TURNSTILE_STATE_PROPRIETOR 0x8 +#define TURNSTILE_STATE_THREAD 0x1 +#define TURNSTILE_STATE_FREELIST 0x2 +#define TURNSTILE_STATE_HASHTABLE 0x4 +#define TURNSTILE_STATE_PROPRIETOR 0x8 /* Helper macros to set/unset turnstile state flags */ #if DEVELOPMENT || DEBUG #define turnstile_state_init(ts, state) \ MACRO_BEGIN \ - ts->ts_state = state; \ + ts->ts_state = state; \ MACRO_END #define turnstile_state_add(ts, state) \ MACRO_BEGIN \ - assert((ts->ts_state & (state)) == 0); \ - ts->ts_state |= state; \ + assert((ts->ts_state & (state)) == 0); \ + ts->ts_state |= state; \ MACRO_END #define turnstile_state_remove(ts, state) \ MACRO_BEGIN \ - assert(ts->ts_state & (state)); \ - ts->ts_state &= ~(state); \ + assert(ts->ts_state & (state)); \ + ts->ts_state &= ~(state); \ MACRO_END #else /* DEVELOPMENT || DEBUG */ #define turnstile_state_init(ts, state) \ MACRO_BEGIN \ - (void)ts; \ + (void)ts; \ MACRO_END #define turnstile_state_add(ts, state) \ MACRO_BEGIN \ - (void)ts; \ + (void)ts; \ MACRO_END #define turnstile_state_remove(ts, state) \ MACRO_BEGIN \ - (void)ts; \ + (void)ts; \ MACRO_END #endif /* DEVELOPMENT || DEBUG */ @@ -306,7 +306,7 @@ struct turnstile { turnstile_inheritor_t ts_inheritor; /* thread/turnstile inheriting the priority (IL, WL) */ union { struct turnstile_list ts_free_turnstiles; /* turnstile free list (IL) */ - SLIST_ENTRY(turnstile) ts_free_elm; /* turnstile free list element (IL) */ + SLIST_ENTRY(turnstile) ts_free_elm; /* turnstile free list element (IL) */ }; struct priority_queue ts_inheritor_queue; /* Queue of turnstile with us as an inheritor (WL) */ union { @@ -676,10 +676,10 @@ extern void workq_deallocate_safe(struct workqueue *wq); extern void workq_destroy(struct workqueue *wq); extern bool workq_is_current_thread_updating_turnstile(struct workqueue *wq); extern void workq_schedule_creator_turnstile_redrive(struct workqueue *wq, - bool locked); + bool locked); /* thread.c */ -extern void workq_deallocate_enqueue(struct workqueue *wq); +extern void workq_deallocate_enqueue(struct workqueue *wq); #endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/kern/ux_handler.c b/osfmk/kern/ux_handler.c index 5f81a2ea9..a20237379 100644 --- a/osfmk/kern/ux_handler.c +++ b/osfmk/kern/ux_handler.c @@ -70,8 +70,9 @@ ux_handler_init(void) { ux_handler_port = ipc_port_alloc_kernel(); - if (ux_handler_port == IP_NULL) + if (ux_handler_port == IP_NULL) { panic("can't allocate unix exception port"); + } ipc_kobject_set(ux_handler_port, (ipc_kobject_t)&ux_handler_kobject, IKOT_UX_HANDLER); } @@ -86,8 +87,9 @@ ux_handler_setup(void) { ipc_port_t ux_handler_send_right = ipc_port_make_send(ux_handler_port); - if (!IP_VALID(ux_handler_send_right)) + if (!IP_VALID(ux_handler_send_right)) { panic("Couldn't allocate send right for ux_handler_port!\n"); + } kern_return_t kr = KERN_SUCCESS; @@ -97,13 +99,14 @@ ux_handler_setup(void) * Instruments uses the RPC_ALERT port, so don't register for that. */ kr = host_set_exception_ports(host_priv_self(), - EXC_MASK_ALL & ~(EXC_MASK_RPC_ALERT), - ux_handler_send_right, - EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, - 0); + EXC_MASK_ALL & ~(EXC_MASK_RPC_ALERT), + ux_handler_send_right, + EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES, + 0); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { panic("host_set_exception_ports failed to set ux_handler! %d", kr); + } } /* @@ -113,23 +116,25 @@ ux_handler_setup(void) boolean_t is_ux_handler_port(mach_port_t port) { - if (ux_handler_port == port) + if (ux_handler_port == port) { return TRUE; - else + } else { return FALSE; + } } kern_return_t catch_mach_exception_raise( - mach_port_t exception_port, - mach_port_t thread_port, - mach_port_t task_port, - exception_type_t exception, - mach_exception_data_t code, - __unused mach_msg_type_number_t codeCnt) + mach_port_t exception_port, + mach_port_t thread_port, + mach_port_t task_port, + exception_type_t exception, + mach_exception_data_t code, + __unused mach_msg_type_number_t codeCnt) { - if (exception_port != ux_handler_port) + if (exception_port != ux_handler_port) { return KERN_FAILURE; + } kern_return_t kr = KERN_SUCCESS; @@ -171,15 +176,16 @@ out: kern_return_t catch_exception_raise( - mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t exception, - exception_data_t code, - mach_msg_type_number_t codeCnt) + mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + exception_data_t code, + mach_msg_type_number_t codeCnt) { - if (exception_port != ux_handler_port) + if (exception_port != ux_handler_port) { return KERN_FAILURE; + } mach_exception_data_type_t big_code[EXCEPTION_CODE_MAX] = { [0] = code[0], @@ -187,74 +193,73 @@ catch_exception_raise( }; return catch_mach_exception_raise(exception_port, - thread, - task, - exception, - big_code, - codeCnt); + thread, + task, + exception, + big_code, + codeCnt); } kern_return_t catch_exception_raise_state( - __unused mach_port_t exception_port, - __unused exception_type_t exception, - __unused const exception_data_t code, - __unused mach_msg_type_number_t codeCnt, - __unused int *flavor, - __unused const thread_state_t old_state, - __unused mach_msg_type_number_t old_stateCnt, - __unused thread_state_t new_state, - __unused mach_msg_type_number_t *new_stateCnt) + __unused mach_port_t exception_port, + __unused exception_type_t exception, + __unused const exception_data_t code, + __unused mach_msg_type_number_t codeCnt, + __unused int *flavor, + __unused const thread_state_t old_state, + __unused mach_msg_type_number_t old_stateCnt, + __unused thread_state_t new_state, + __unused mach_msg_type_number_t *new_stateCnt) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } kern_return_t catch_mach_exception_raise_state( - __unused mach_port_t exception_port, - __unused exception_type_t exception, - __unused const mach_exception_data_t code, - __unused mach_msg_type_number_t codeCnt, - __unused int *flavor, - __unused const thread_state_t old_state, - __unused mach_msg_type_number_t old_stateCnt, - __unused thread_state_t new_state, - __unused mach_msg_type_number_t *new_stateCnt) + __unused mach_port_t exception_port, + __unused exception_type_t exception, + __unused const mach_exception_data_t code, + __unused mach_msg_type_number_t codeCnt, + __unused int *flavor, + __unused const thread_state_t old_state, + __unused mach_msg_type_number_t old_stateCnt, + __unused thread_state_t new_state, + __unused mach_msg_type_number_t *new_stateCnt) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } kern_return_t catch_exception_raise_state_identity( - __unused mach_port_t exception_port, - __unused mach_port_t thread, - __unused mach_port_t task, - __unused exception_type_t exception, - __unused exception_data_t code, - __unused mach_msg_type_number_t codeCnt, - __unused int *flavor, - __unused thread_state_t old_state, - __unused mach_msg_type_number_t old_stateCnt, - __unused thread_state_t new_state, - __unused mach_msg_type_number_t *new_stateCnt) + __unused mach_port_t exception_port, + __unused mach_port_t thread, + __unused mach_port_t task, + __unused exception_type_t exception, + __unused exception_data_t code, + __unused mach_msg_type_number_t codeCnt, + __unused int *flavor, + __unused thread_state_t old_state, + __unused mach_msg_type_number_t old_stateCnt, + __unused thread_state_t new_state, + __unused mach_msg_type_number_t *new_stateCnt) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } kern_return_t catch_mach_exception_raise_state_identity( - __unused mach_port_t exception_port, - __unused mach_port_t thread, - __unused mach_port_t task, - __unused exception_type_t exception, - __unused mach_exception_data_t code, - __unused mach_msg_type_number_t codeCnt, - __unused int *flavor, - __unused thread_state_t old_state, - __unused mach_msg_type_number_t old_stateCnt, - __unused thread_state_t new_state, - __unused mach_msg_type_number_t *new_stateCnt) + __unused mach_port_t exception_port, + __unused mach_port_t thread, + __unused mach_port_t task, + __unused exception_type_t exception, + __unused mach_exception_data_t code, + __unused mach_msg_type_number_t codeCnt, + __unused int *flavor, + __unused thread_state_t old_state, + __unused mach_msg_type_number_t old_stateCnt, + __unused thread_state_t new_state, + __unused mach_msg_type_number_t *new_stateCnt) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - diff --git a/osfmk/kern/ux_handler.h b/osfmk/kern/ux_handler.h index a3c473b84..6784c7125 100644 --- a/osfmk/kern/ux_handler.h +++ b/osfmk/kern/ux_handler.h @@ -36,4 +36,3 @@ extern void ux_handler_setup(void); extern boolean_t is_ux_handler_port(mach_port_t port); #endif /* !defined(_KERN_UX_HANDLER_H_) */ - diff --git a/osfmk/kern/waitq.c b/osfmk/kern/waitq.c index 98ee900ba..1a38d76fe 100644 --- a/osfmk/kern/waitq.c +++ b/osfmk/kern/waitq.c @@ -75,6 +75,7 @@ #include #include +#include #include #include #include @@ -91,52 +92,54 @@ #endif #if CONFIG_WAITQ_DEBUG -#define wqdbg(fmt,...) \ +#define wqdbg(fmt, ...) \ printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__) #else -#define wqdbg(fmt,...) do { } while (0) +#define wqdbg(fmt, ...) do { } while (0) #endif #ifdef WAITQ_VERBOSE_DEBUG -#define wqdbg_v(fmt,...) \ +#define wqdbg_v(fmt, ...) \ printf("WQ[v:%s]: " fmt "\n", __func__, ## __VA_ARGS__) #else -#define wqdbg_v(fmt,...) do { } while (0) +#define wqdbg_v(fmt, ...) do { } while (0) #endif -#define wqinfo(fmt,...) \ +#define wqinfo(fmt, ...) \ printf("WQ[%s]: " fmt "\n", __func__, ## __VA_ARGS__) -#define wqerr(fmt,...) \ +#define wqerr(fmt, ...) \ printf("WQ[%s] ERROR: " fmt "\n", __func__, ## __VA_ARGS__) /* * file-static functions / data */ static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event, - uint64_t *reserved_preposts, - int priority, spl_t *spl); + uint64_t *reserved_preposts, + int priority, spl_t *spl); static kern_return_t waitq_select_thread_locked(struct waitq *waitq, - event64_t event, - thread_t thread, spl_t *spl); + event64_t event, + thread_t thread, spl_t *spl); #define WAITQ_SET_MAX (task_max * 3) static zone_t waitq_set_zone; -#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align))) -#define ROUNDDOWN(x,y) (((x)/(y))*(y)) +#define P2ROUNDUP(x, align) (-(-((uint32_t)(x)) & -(align))) +#define ROUNDDOWN(x, y) (((x)/(y))*(y)) #if CONFIG_LTABLE_STATS || CONFIG_WAITQ_STATS static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip); #endif +lck_grp_t waitq_lck_grp; + #if __arm64__ -#define waitq_lock_to(wq,to) \ - (hw_lock_bit_to(&(wq)->waitq_interlock, LCK_ILOCK, to)) +#define waitq_lock_to(wq, to) \ + (hw_lock_bit_to(&(wq)->waitq_interlock, LCK_ILOCK, to, &waitq_lck_grp)) #define waitq_lock_unlock(wq) \ (hw_unlock_bit(&(wq)->waitq_interlock, LCK_ILOCK)) @@ -146,8 +149,8 @@ static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int s #else -#define waitq_lock_to(wq,to) \ - (hw_lock_to(&(wq)->waitq_interlock, to)) +#define waitq_lock_to(wq, to) \ + (hw_lock_to(&(wq)->waitq_interlock, to, &waitq_lck_grp)) #define waitq_lock_unlock(wq) \ (hw_lock_unlock(&(wq)->waitq_interlock)) @@ -155,7 +158,7 @@ static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int s #define waitq_lock_init(wq) \ (hw_lock_init(&(wq)->waitq_interlock)) -#endif /* __arm64__ */ +#endif /* __arm64__ */ /* * Prepost callback function for specially marked waitq sets @@ -212,7 +215,7 @@ struct waitq_link { }; #if !defined(KEEP_WAITQ_LINK_STATS) static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0, - "waitq_link struct must be a power of two!"); + "waitq_link struct must be a power of two!"); #endif #define wql_refcnt(link) \ @@ -223,8 +226,8 @@ static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0 #define wql_mkvalid(link) \ do { \ - lt_elem_mkvalid(&(link)->wqte); \ - wql_do_mkvalid_stats(&(link)->wqte); \ + lt_elem_mkvalid(&(link)->wqte); \ + wql_do_mkvalid_stats(&(link)->wqte); \ } while (0) #define wql_is_valid(link) \ @@ -235,7 +238,8 @@ static_assert((sizeof(struct waitq_link) & (sizeof(struct waitq_link) - 1)) == 0 #define WQL_WQS_POISON ((void *)(0xf00df00d)) #define WQL_LINK_POISON (0x0bad0badffffffffull) -static void wql_poison(struct link_table *table, struct lt_elem *elem) +static void +wql_poison(struct link_table *table, struct lt_elem *elem) { struct waitq_link *link = (struct waitq_link *)elem; (void)table; @@ -265,7 +269,8 @@ static void wql_poison(struct link_table *table, struct lt_elem *elem) } #ifdef KEEP_WAITQ_LINK_STATS -static __inline__ void wql_do_alloc_stats(struct lt_elem *elem) +static __inline__ void +wql_do_alloc_stats(struct lt_elem *elem) { if (elem) { struct waitq_link *link = (struct waitq_link *)elem; @@ -282,12 +287,14 @@ static __inline__ void wql_do_alloc_stats(struct lt_elem *elem) } } -static __inline__ void wql_do_invalidate_stats(struct lt_elem *elem) +static __inline__ void +wql_do_invalidate_stats(struct lt_elem *elem) { struct waitq_link *link = (struct waitq_link *)elem; - if (!elem) + if (!elem) { return; + } assert(link->sl_mkvalid_ts > 0); @@ -296,12 +303,14 @@ static __inline__ void wql_do_invalidate_stats(struct lt_elem *elem) waitq_grab_backtrace(link->sl_invalidate_bt, 0); } -static __inline__ void wql_do_mkvalid_stats(struct lt_elem *elem) +static __inline__ void +wql_do_mkvalid_stats(struct lt_elem *elem) { struct waitq_link *link = (struct waitq_link *)elem; - if (!elem) + if (!elem) { return; + } memset(link->sl_mkvalid_bt, 0, sizeof(link->sl_mkvalid_bt)); link->sl_mkvalid_ts = mach_absolute_time(); @@ -313,28 +322,32 @@ static __inline__ void wql_do_mkvalid_stats(struct lt_elem *elem) #define wql_do_mkvalid_stats(e) #endif /* KEEP_WAITQ_LINK_STATS */ -static void wql_init(void) +static void +wql_init(void) { uint32_t tablesz = 0, max_links = 0; - if (PE_parse_boot_argn("wql_tsize", &tablesz, sizeof(tablesz)) != TRUE) + if (PE_parse_boot_argn("wql_tsize", &tablesz, sizeof(tablesz)) != TRUE) { tablesz = (uint32_t)g_lt_max_tbl_size; + } tablesz = P2ROUNDUP(tablesz, PAGE_SIZE); max_links = tablesz / sizeof(struct waitq_link); assert(max_links > 0 && tablesz > 0); /* we have a restricted index range */ - if (max_links > (LT_IDX_MAX + 1)) + if (max_links > (LT_IDX_MAX + 1)) { max_links = LT_IDX_MAX + 1; + } wqinfo("init linktable with max:%d elements (%d bytes)", - max_links, tablesz); + max_links, tablesz); ltable_init(&g_wqlinktable, "wqslab.wql", max_links, - sizeof(struct waitq_link), wql_poison); + sizeof(struct waitq_link), wql_poison); } -static void wql_ensure_free_space(void) +static void +wql_ensure_free_space(void) { if (g_wqlinktable.nelem - g_wqlinktable.used_elem < g_min_free_table_elem) { /* @@ -342,14 +355,15 @@ static void wql_ensure_free_space(void) */ if (g_wqlinktable.used_elem <= g_wqlinktable.nelem) { wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d", - g_wqlinktable.nelem, g_wqlinktable.used_elem, - g_min_free_table_elem); + g_wqlinktable.nelem, g_wqlinktable.used_elem, + g_min_free_table_elem); ltable_grow(&g_wqlinktable, g_min_free_table_elem); } } } -static struct waitq_link *wql_alloc_link(int type) +static struct waitq_link * +wql_alloc_link(int type) { struct lt_elem *elem; @@ -358,7 +372,8 @@ static struct waitq_link *wql_alloc_link(int type) return (struct waitq_link *)elem; } -static void wql_realloc_link(struct waitq_link *link, int type) +static void +wql_realloc_link(struct waitq_link *link, int type) { ltable_realloc_elem(&g_wqlinktable, &link->wqte, type); #ifdef KEEP_WAITQ_LINK_STATS @@ -371,13 +386,15 @@ static void wql_realloc_link(struct waitq_link *link, int type) #endif } -static void wql_invalidate(struct waitq_link *link) +static void +wql_invalidate(struct waitq_link *link) { lt_elem_invalidate(&link->wqte); wql_do_invalidate_stats(&link->wqte); } -static struct waitq_link *wql_get_link(uint64_t setid) +static struct waitq_link * +wql_get_link(uint64_t setid) { struct lt_elem *elem; @@ -385,30 +402,34 @@ static struct waitq_link *wql_get_link(uint64_t setid) return (struct waitq_link *)elem; } -static void wql_put_link(struct waitq_link *link) +static void +wql_put_link(struct waitq_link *link) { - if (!link) + if (!link) { return; + } ltable_put_elem(&g_wqlinktable, (struct lt_elem *)link); } -static struct waitq_link *wql_get_reserved(uint64_t setid, int type) +static struct waitq_link * +wql_get_reserved(uint64_t setid, int type) { struct lt_elem *elem; elem = lt_elem_list_first(&g_wqlinktable, setid); - if (!elem) + if (!elem) { return NULL; + } ltable_realloc_elem(&g_wqlinktable, elem, type); return (struct waitq_link *)elem; } static inline int waitq_maybe_remove_link(struct waitq *waitq, - uint64_t setid, - struct waitq_link *parent, - struct waitq_link *left, - struct waitq_link *right); + uint64_t setid, + struct waitq_link *parent, + struct waitq_link *left, + struct waitq_link *right); enum { LINK_WALK_ONE_LEVEL = 0, @@ -417,7 +438,7 @@ enum { }; typedef int (*wql_callback_func)(struct waitq *waitq, void *ctx, - struct waitq_link *link); + struct waitq_link *link); /** * walk_waitq_links: walk all table elements (of type 'link_type') pointed to by 'setid' @@ -474,9 +495,10 @@ typedef int (*wql_callback_func)(struct waitq *waitq, void *ctx, * 'right_setid' pointer in the link object */ static __attribute__((noinline)) -int walk_waitq_links(int walk_type, struct waitq *waitq, - uint64_t setid, int link_type, - void *ctx, wql_callback_func cb) +int +walk_waitq_links(int walk_type, struct waitq *waitq, + uint64_t setid, int link_type, + void *ctx, wql_callback_func cb) { struct waitq_link *link; uint64_t nextid; @@ -485,8 +507,9 @@ int walk_waitq_links(int walk_type, struct waitq *waitq, link = wql_get_link(setid); /* invalid link */ - if (!link) + if (!link) { return WQ_ITERATE_CONTINUE; + } setid = nextid = 0; wqltype = wql_type(link); @@ -512,7 +535,7 @@ int walk_waitq_links(int walk_type, struct waitq *waitq, if (wqltype == WQL_WQS && (walk_type == LINK_WALK_FULL_DAG || - walk_type == LINK_WALK_FULL_DAG_UNLOCKED)) { + walk_type == LINK_WALK_FULL_DAG_UNLOCKED)) { /* * Recurse down any sets to which this wait queue set was * added. We do this just before we put our reference to @@ -544,9 +567,10 @@ int walk_waitq_links(int walk_type, struct waitq *waitq, wqset_setid = wqset->wqset_q.waitq_set_id; - if (wqset_setid > 0) + if (wqset_setid > 0) { ret = walk_waitq_links(walk_type, &wqset->wqset_q, - wqset_setid, link_type, ctx, cb); + wqset_setid, link_type, ctx, cb); + } if (should_unlock) { waitq_set_unlock(wqset); } @@ -561,13 +585,15 @@ int walk_waitq_links(int walk_type, struct waitq *waitq, /* recurse down left side of the tree */ if (setid) { int ret = walk_waitq_links(walk_type, waitq, setid, link_type, ctx, cb); - if (ret != WQ_ITERATE_CONTINUE) + if (ret != WQ_ITERATE_CONTINUE) { return ret; + } } /* recurse down right side of the tree */ - if (nextid) + if (nextid) { return walk_waitq_links(walk_type, waitq, nextid, link_type, ctx, cb); + } return WQ_ITERATE_CONTINUE; } @@ -607,7 +633,7 @@ struct wq_prepost { }; #if !defined(KEEP_WAITQ_PREPOST_STATS) static_assert((sizeof(struct wq_prepost) & (sizeof(struct wq_prepost) - 1)) == 0, - "wq_prepost struct must be a power of two!"); + "wq_prepost struct must be a power of two!"); #endif #define wqp_refcnt(wqp) \ @@ -627,7 +653,8 @@ static_assert((sizeof(struct wq_prepost) & (sizeof(struct wq_prepost) - 1)) == 0 #define WQP_WQ_POISON (0x0bad0badffffffffull) #define WQP_POST_POISON (0xf00df00df00df00d) -static void wqp_poison(struct link_table *table, struct lt_elem *elem) +static void +wqp_poison(struct link_table *table, struct lt_elem *elem) { struct wq_prepost *wqp = (struct wq_prepost *)elem; (void)table; @@ -645,10 +672,12 @@ static void wqp_poison(struct link_table *table, struct lt_elem *elem) } #ifdef KEEP_WAITQ_PREPOST_STATS -static __inline__ void wqp_do_alloc_stats(struct lt_elem *elem) +static __inline__ void +wqp_do_alloc_stats(struct lt_elem *elem) { - if (!elem) + if (!elem) { return; + } struct wq_prepost *wqp = (struct wq_prepost *)elem; uintptr_t alloc_bt[sizeof(wqp->wqp_alloc_bt)]; @@ -661,51 +690,58 @@ static __inline__ void wqp_do_alloc_stats(struct lt_elem *elem) wqp->wqp_alloc_th = current_thread(); wqp->wqp_alloc_task = current_task(); wqp = (struct wq_prepost *)lt_elem_list_next(&g_prepost_table, &wqp->wqte); - if (!wqp) + if (!wqp) { break; + } } } #else #define wqp_do_alloc_stats(e) #endif /* KEEP_WAITQ_LINK_STATS */ -static void wqp_init(void) +static void +wqp_init(void) { uint32_t tablesz = 0, max_wqp = 0; - if (PE_parse_boot_argn("wqp_tsize", &tablesz, sizeof(tablesz)) != TRUE) + if (PE_parse_boot_argn("wqp_tsize", &tablesz, sizeof(tablesz)) != TRUE) { tablesz = (uint32_t)g_lt_max_tbl_size; + } tablesz = P2ROUNDUP(tablesz, PAGE_SIZE); max_wqp = tablesz / sizeof(struct wq_prepost); assert(max_wqp > 0 && tablesz > 0); /* we have a restricted index range */ - if (max_wqp > (LT_IDX_MAX + 1)) + if (max_wqp > (LT_IDX_MAX + 1)) { max_wqp = LT_IDX_MAX + 1; + } wqinfo("init prepost table with max:%d elements (%d bytes)", - max_wqp, tablesz); + max_wqp, tablesz); ltable_init(&g_prepost_table, "wqslab.prepost", max_wqp, - sizeof(struct wq_prepost), wqp_poison); + sizeof(struct wq_prepost), wqp_poison); } /* * Refill the per-CPU cache. */ -static void wq_prepost_refill_cpu_cache(uint32_t nalloc) +static void +wq_prepost_refill_cpu_cache(uint32_t nalloc) { struct lt_elem *new_head, *old_head; struct wqp_cache *cache; /* require preemption enabled to allocate elements */ - if (get_preemption_level() != 0) + if (get_preemption_level() != 0) { return; + } new_head = ltable_alloc_elem(&g_prepost_table, - LT_RESERVED, nalloc, 1); - if (new_head == NULL) + LT_RESERVED, nalloc, 1); + if (new_head == NULL) { return; + } disable_preemption(); cache = &PROCESSOR_DATA(current_processor(), wqp_cache); @@ -732,14 +768,16 @@ out: return; } -static void wq_prepost_ensure_free_space(void) +static void +wq_prepost_ensure_free_space(void) { uint32_t free_elem; uint32_t min_free; struct wqp_cache *cache; - if (g_min_free_cache == 0) + if (g_min_free_cache == 0) { g_min_free_cache = (WQP_CACHE_MAX * ml_get_max_cpus()); + } /* * Ensure that we always have a pool of per-CPU prepost elements @@ -749,8 +787,9 @@ static void wq_prepost_ensure_free_space(void) free_elem = cache->avail; enable_preemption(); - if (free_elem < (WQP_CACHE_MAX / 3)) + if (free_elem < (WQP_CACHE_MAX / 3)) { wq_prepost_refill_cpu_cache(WQP_CACHE_MAX - free_elem); + } /* * Now ensure that we have a sufficient amount of free table space @@ -763,23 +802,26 @@ static void wq_prepost_ensure_free_space(void) */ if (g_prepost_table.used_elem <= g_prepost_table.nelem) { wqdbg_v("Forcing table growth: nelem=%d, used=%d, min_free=%d+%d", - g_prepost_table.nelem, g_prepost_table.used_elem, - g_min_free_table_elem, g_min_free_cache); + g_prepost_table.nelem, g_prepost_table.used_elem, + g_min_free_table_elem, g_min_free_cache); ltable_grow(&g_prepost_table, min_free); } } } -static struct wq_prepost *wq_prepost_alloc(int type, int nelem) +static struct wq_prepost * +wq_prepost_alloc(int type, int nelem) { struct lt_elem *elem; struct wq_prepost *wqp; struct wqp_cache *cache; - if (type != LT_RESERVED) + if (type != LT_RESERVED) { goto do_alloc; - if (nelem == 0) + } + if (nelem == 0) { return NULL; + } /* * First try to grab the elements from the per-CPU cache if we are @@ -806,10 +848,11 @@ static struct wq_prepost *wq_prepost_alloc(int type, int nelem) } } assert(nalloc == 0); - if (!next) + if (!next) { cache->head = LT_IDX_MAX; - else + } else { cache->head = next->lt_id.id; + } /* assert that we don't have mis-matched book keeping */ assert(!(cache->head == LT_IDX_MAX && cache->avail > 0)); enable_preemption(); @@ -821,8 +864,9 @@ static struct wq_prepost *wq_prepost_alloc(int type, int nelem) do_alloc: /* fall-back to standard table allocation */ elem = ltable_alloc_elem(&g_prepost_table, type, nelem, 0); - if (!elem) + if (!elem) { return NULL; + } out: wqp = (struct wq_prepost *)elem; @@ -830,12 +874,14 @@ out: return wqp; } -static void wq_prepost_invalidate(struct wq_prepost *wqp) +static void +wq_prepost_invalidate(struct wq_prepost *wqp) { lt_elem_invalidate(&wqp->wqte); } -static struct wq_prepost *wq_prepost_get(uint64_t wqp_id) +static struct wq_prepost * +wq_prepost_get(uint64_t wqp_id) { struct lt_elem *elem; @@ -843,30 +889,35 @@ static struct wq_prepost *wq_prepost_get(uint64_t wqp_id) return (struct wq_prepost *)elem; } -static void wq_prepost_put(struct wq_prepost *wqp) +static void +wq_prepost_put(struct wq_prepost *wqp) { ltable_put_elem(&g_prepost_table, (struct lt_elem *)wqp); } -static int wq_prepost_rlink(struct wq_prepost *parent, struct wq_prepost *child) +static int +wq_prepost_rlink(struct wq_prepost *parent, struct wq_prepost *child) { return lt_elem_list_link(&g_prepost_table, &parent->wqte, &child->wqte); } -static struct wq_prepost *wq_prepost_get_rnext(struct wq_prepost *head) +static struct wq_prepost * +wq_prepost_get_rnext(struct wq_prepost *head) { struct lt_elem *elem; struct wq_prepost *wqp; uint64_t id; elem = lt_elem_list_next(&g_prepost_table, &head->wqte); - if (!elem) + if (!elem) { return NULL; + } id = elem->lt_id.id; elem = ltable_get_elem(&g_prepost_table, id); - if (!elem) + if (!elem) { return NULL; + } wqp = (struct wq_prepost *)elem; if (elem->lt_id.id != id || wqp_type(wqp) != WQP_POST || @@ -878,7 +929,8 @@ static struct wq_prepost *wq_prepost_get_rnext(struct wq_prepost *head) return wqp; } -static void wq_prepost_reset_rnext(struct wq_prepost *wqp) +static void +wq_prepost_reset_rnext(struct wq_prepost *wqp) { (void)lt_elem_list_break(&g_prepost_table, &wqp->wqte); } @@ -896,8 +948,9 @@ static void wq_prepost_reset_rnext(struct wq_prepost *wqp) * prepost ID, and the next element of the prepost list may be * consumed as well (if the list contained only 2 objects) */ -static int wq_prepost_remove(struct waitq_set *wqset, - struct wq_prepost *wqp) +static int +wq_prepost_remove(struct waitq_set *wqset, + struct wq_prepost *wqp) { int more_posts = 1; uint64_t next_id = wqp->wqp_post.wqp_next_id; @@ -946,8 +999,9 @@ static int wq_prepost_remove(struct waitq_set *wqset, wq_prepost_rlink(next_wqp, prev_wqp); /* If we remove the head of the list, update the wqset */ - if (wqp_id == wqset->wqset_prepost_id) + if (wqp_id == wqset->wqset_prepost_id) { wqset->wqset_prepost_id = next_id; + } wq_prepost_put(prev_wqp); wq_prepost_put(next_wqp); @@ -958,7 +1012,8 @@ out: return more_posts; } -static struct wq_prepost *wq_prepost_rfirst(uint64_t id) +static struct wq_prepost * +wq_prepost_rfirst(uint64_t id) { struct lt_elem *elem; elem = lt_elem_list_first(&g_prepost_table, id); @@ -966,7 +1021,8 @@ static struct wq_prepost *wq_prepost_rfirst(uint64_t id) return (struct wq_prepost *)(void *)elem; } -static struct wq_prepost *wq_prepost_rpop(uint64_t *id, int type) +static struct wq_prepost * +wq_prepost_rpop(uint64_t *id, int type) { struct lt_elem *elem; elem = lt_elem_list_pop(&g_prepost_table, id, type); @@ -974,14 +1030,16 @@ static struct wq_prepost *wq_prepost_rpop(uint64_t *id, int type) return (struct wq_prepost *)(void *)elem; } -static void wq_prepost_release_rlist(struct wq_prepost *wqp) +static void +wq_prepost_release_rlist(struct wq_prepost *wqp) { int nelem = 0; struct wqp_cache *cache; struct lt_elem *elem; - if (!wqp) + if (!wqp) { return; + } elem = &wqp->wqte; @@ -993,8 +1051,9 @@ static void wq_prepost_release_rlist(struct wq_prepost *wqp) cache = &PROCESSOR_DATA(current_processor(), wqp_cache); if (cache->avail < WQP_CACHE_MAX) { struct lt_elem *tmp = NULL; - if (cache->head != LT_IDX_MAX) + if (cache->head != LT_IDX_MAX) { tmp = lt_elem_list_first(&g_prepost_table, cache->head); + } nelem = lt_elem_list_link(&g_prepost_table, elem, tmp); cache->head = elem->lt_id.id; cache->avail += nelem; @@ -1013,9 +1072,9 @@ static void wq_prepost_release_rlist(struct wq_prepost *wqp) } typedef int (*wqp_callback_func)(struct waitq_set *wqset, - void *ctx, - struct wq_prepost *wqp, - struct waitq *waitq); + void *ctx, + struct wq_prepost *wqp, + struct waitq *waitq); /** * iterate over a chain of preposts associated with a waitq set. @@ -1028,16 +1087,18 @@ typedef int (*wqp_callback_func)(struct waitq_set *wqset, * may reset or adjust the waitq set's prepost ID pointer. If you don't * want this extra processing, you can use wq_prepost_iterate(). */ -static int wq_prepost_foreach_locked(struct waitq_set *wqset, - void *ctx, wqp_callback_func cb) +static int +wq_prepost_foreach_locked(struct waitq_set *wqset, + void *ctx, wqp_callback_func cb) { int ret = WQ_ITERATE_SUCCESS; struct wq_prepost *wqp, *tmp_wqp; assert(cb != NULL); - if (!wqset || !waitq_set_maybe_preposted(wqset)) + if (!wqset || !waitq_set_maybe_preposted(wqset)) { return WQ_ITERATE_SUCCESS; + } restart: wqp = wq_prepost_get(wqset->wqset_prepost_id); @@ -1060,14 +1121,14 @@ restart: /* the caller wants to remove the only prepost here */ assert(wqp_id == wqset->wqset_prepost_id); wqset->wqset_prepost_id = 0; - /* fall through */ + /* fall through */ case WQ_ITERATE_CONTINUE: wq_prepost_put(wqp); ret = WQ_ITERATE_SUCCESS; break; case WQ_ITERATE_RESTART: wq_prepost_put(wqp); - /* fall through */ + /* fall through */ case WQ_ITERATE_DROPPED: goto restart; default: @@ -1135,7 +1196,7 @@ restart: goto next_prepost; case WQ_ITERATE_RESTART: wq_prepost_put(wqp); - /* fall-through */ + /* fall-through */ case WQ_ITERATE_DROPPED: /* the callback dropped the ref to wqp: just restart */ goto restart; @@ -1157,8 +1218,9 @@ restart: next_prepost: /* this was the last object in the list */ - if (wqp_id == last_id) + if (wqp_id == last_id) { break; + } /* get the next object */ tmp_wqp = wq_prepost_get(next_id); @@ -1170,9 +1232,9 @@ next_prepost: * then something is wrong. */ panic("Invalid WQP_POST member 0x%llx in waitq set " - "0x%llx prepost list (first:%llx, " - "wqp:%p)", - next_id, wqset->wqset_id, first_id, wqp); + "0x%llx prepost list (first:%llx, " + "wqp:%p)", + next_id, wqset->wqset_id, first_id, wqp); } wq_prepost_put(wqp); wqp = tmp_wqp; @@ -1182,8 +1244,9 @@ next_prepost: finish_prepost_foreach: wq_prepost_put(wqp); - if (ret == WQ_ITERATE_CONTINUE) + if (ret == WQ_ITERATE_CONTINUE) { ret = WQ_ITERATE_SUCCESS; + } return ret; } @@ -1203,26 +1266,31 @@ finish_prepost_foreach: * want automatic prepost chain management (at a cost of extra CPU time), * you can use: wq_prepost_foreach_locked(). */ -static int wq_prepost_iterate(uint64_t prepost_id, - void *ctx, wqp_callback_func cb) +static int +wq_prepost_iterate(uint64_t prepost_id, + void *ctx, wqp_callback_func cb) { int ret; struct wq_prepost *wqp; - if (!prepost_id) + if (!prepost_id) { return WQ_ITERATE_SUCCESS; + } wqp = wq_prepost_get(prepost_id); - if (!wqp) + if (!wqp) { return WQ_ITERATE_SUCCESS; + } if (wqp_type(wqp) == WQP_WQ) { ret = WQ_ITERATE_SUCCESS; - if (cb) + if (cb) { ret = cb(NULL, ctx, wqp, wqp->wqp_wq.wqp_wq_ptr); + } - if (ret != WQ_ITERATE_DROPPED) + if (ret != WQ_ITERATE_DROPPED) { wq_prepost_put(wqp); + } return ret; } @@ -1246,13 +1314,16 @@ static int wq_prepost_iterate(uint64_t prepost_id, wq = tmp_wqp->wqp_wq.wqp_wq_ptr; } - if (cb) + if (cb) { ret = cb(NULL, ctx, wqp, wq); - if (tmp_wqp) + } + if (tmp_wqp) { wq_prepost_put(tmp_wqp); + } - if (ret != WQ_ITERATE_CONTINUE) + if (ret != WQ_ITERATE_CONTINUE) { break; + } tmp_wqp = wq_prepost_get(next_id); if (!tmp_wqp) { @@ -1270,11 +1341,13 @@ static int wq_prepost_iterate(uint64_t prepost_id, assert(wqp_type(wqp) == WQP_POST); } while (next_id != prepost_id); - if (ret != WQ_ITERATE_DROPPED) + if (ret != WQ_ITERATE_DROPPED) { wq_prepost_put(wqp); + } - if (ret == WQ_ITERATE_CONTINUE) + if (ret == WQ_ITERATE_CONTINUE) { ret = WQ_ITERATE_SUCCESS; + } return ret; } @@ -1284,8 +1357,9 @@ struct _is_posted_ctx { int did_prepost; }; -static int wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx, - struct wq_prepost *wqp, struct waitq *waitq) +static int +wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx, + struct wq_prepost *wqp, struct waitq *waitq) { struct _is_posted_ctx *pctx = (struct _is_posted_ctx *)ctx; @@ -1296,8 +1370,9 @@ static int wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx, * Don't early-out, run through the _entire_ list: * This ensures that we retain a minimum number of invalid elements. */ - if (pctx->posting_wq == waitq) + if (pctx->posting_wq == waitq) { pctx->did_prepost = 1; + } return WQ_ITERATE_CONTINUE; } @@ -1315,7 +1390,8 @@ static int wq_is_preposted_on_set_cb(struct waitq_set *wqset, void *ctx, * * Returns non-zero if 'waitq' has already preposted to 'wqset' */ -static int wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset) +static int +wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset) { int ret; struct _is_posted_ctx pctx; @@ -1325,18 +1401,20 @@ static int wq_is_preposted_on_set(struct waitq *waitq, struct waitq_set *wqset) * then it obviously already preposted to the set. */ if (waitq->waitq_prepost_id != 0 && - wqset->wqset_prepost_id == waitq->waitq_prepost_id) + wqset->wqset_prepost_id == waitq->waitq_prepost_id) { return 1; + } /* use full prepost iteration: always trim the list */ pctx.posting_wq = waitq; pctx.did_prepost = 0; ret = wq_prepost_foreach_locked(wqset, (void *)&pctx, - wq_is_preposted_on_set_cb); + wq_is_preposted_on_set_cb); return pctx.did_prepost; } -static struct wq_prepost *wq_get_prepost_obj(uint64_t *reserved, int type) +static struct wq_prepost * +wq_get_prepost_obj(uint64_t *reserved, int type) { struct wq_prepost *wqp = NULL; /* @@ -1355,8 +1433,9 @@ static struct wq_prepost *wq_get_prepost_obj(uint64_t *reserved, int type) wqp = wq_prepost_alloc(type, 1); } - if (wqp == NULL) + if (wqp == NULL) { panic("Couldn't allocate prepost object!"); + } return wqp; } @@ -1376,9 +1455,10 @@ static struct wq_prepost *wq_get_prepost_obj(uint64_t *reserved, int type) * Notes: * If reserved is NULL, this may block on prepost table growth. */ -static void wq_prepost_do_post_locked(struct waitq_set *wqset, - struct waitq *waitq, - uint64_t *reserved) +static void +wq_prepost_do_post_locked(struct waitq_set *wqset, + struct waitq *waitq, + uint64_t *reserved) { struct wq_prepost *wqp_post, *wqp_head, *wqp_tail; @@ -1388,8 +1468,9 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, * nothing to do if it's already preposted: * note that this also culls any invalid prepost objects */ - if (wq_is_preposted_on_set(waitq, wqset)) + if (wq_is_preposted_on_set(waitq, wqset)) { return; + } assert(waitqs_is_linked(wqset)); @@ -1412,8 +1493,8 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, #endif wqdbg_v("preposting waitq %p (0x%llx) to set 0x%llx", - (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), - waitq->waitq_prepost_id, wqset->wqset_id); + (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), + waitq->waitq_prepost_id, wqset->wqset_id); if (wqset->wqset_prepost_id == 0) { /* the set has no previous preposts */ @@ -1440,7 +1521,7 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, wqp_post->wqp_post.wqp_wq_id = waitq->waitq_prepost_id; wqdbg_v("POST 0x%llx :: WQ 0x%llx", wqp_post->wqp_prepostid.id, - waitq->waitq_prepost_id); + waitq->waitq_prepost_id); if (wqp_type(wqp_head) == WQP_WQ) { /* @@ -1449,8 +1530,8 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, */ uint64_t wqp_id = wqp_head->wqp_prepostid.id; wqdbg_v("set 0x%llx previous had 1 WQ prepost (0x%llx): " - "replacing with two POST preposts", - wqset->wqset_id, wqp_id); + "replacing with two POST preposts", + wqset->wqset_id, wqp_id); /* drop the old reference */ wq_prepost_put(wqp_head); @@ -1461,8 +1542,8 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, /* point this one to the original WQP_WQ object */ wqp_head->wqp_post.wqp_wq_id = wqp_id; wqdbg_v("POST 0x%llx :: WQ 0x%llx", - wqp_head->wqp_prepostid.id, wqp_id); - + wqp_head->wqp_prepostid.id, wqp_id); + /* link it to the new wqp_post object allocated earlier */ wqp_head->wqp_post.wqp_next_id = wqp_post->wqp_prepostid.id; /* make the list a double-linked and circular */ @@ -1486,10 +1567,10 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, wq_prepost_put(wqp_post); wqdbg_v("set 0x%llx: 0x%llx/0x%llx -> 0x%llx/0x%llx -> 0x%llx", - wqset->wqset_id, wqset->wqset_prepost_id, - wqp_head->wqp_prepostid.id, wqp_head->wqp_post.wqp_next_id, - wqp_post->wqp_prepostid.id, - wqp_post->wqp_post.wqp_next_id); + wqset->wqset_id, wqset->wqset_prepost_id, + wqp_head->wqp_prepostid.id, wqp_head->wqp_post.wqp_next_id, + wqp_post->wqp_prepostid.id, + wqp_post->wqp_post.wqp_next_id); return; } @@ -1523,9 +1604,9 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, wq_prepost_put(wqp_post); wqdbg_v("set 0x%llx (wqp:0x%llx) last_prepost:0x%llx, " - "new_prepost:0x%llx->0x%llx", wqset->wqset_id, - wqset->wqset_prepost_id, wqp_head->wqp_prepostid.id, - wqp_post->wqp_prepostid.id, wqp_post->wqp_post.wqp_next_id); + "new_prepost:0x%llx->0x%llx", wqset->wqset_id, + wqset->wqset_prepost_id, wqp_head->wqp_prepostid.id, + wqp_post->wqp_prepostid.id, wqp_post->wqp_post.wqp_next_id); return; } @@ -1537,7 +1618,8 @@ static void wq_prepost_do_post_locked(struct waitq_set *wqset, * * ---------------------------------------------------------------------- */ #if CONFIG_LTABLE_STATS && CONFIG_WAITQ_STATS -static void wq_table_stats(struct link_table *table, struct wq_table_stats *stats) +static void +wq_table_stats(struct link_table *table, struct wq_table_stats *stats) { stats->version = WAITQ_STATS_VERSION; stats->table_elements = table->nelem; @@ -1556,14 +1638,17 @@ static void wq_table_stats(struct link_table *table, struct wq_table_stats *stat stats->table_avg_reservations = table->avg_reservations; } -void waitq_link_stats(struct wq_table_stats *stats) +void +waitq_link_stats(struct wq_table_stats *stats) { - if (!stats) + if (!stats) { return; + } wq_table_stats(&g_wqlinktable, stats); } -void waitq_prepost_stats(struct wq_table_stats *stats) +void +waitq_prepost_stats(struct wq_table_stats *stats) { wq_table_stats(&g_prepost_table, stats); } @@ -1585,22 +1670,25 @@ static uint32_t g_num_waitqs = 1; */ #define _CAST_TO_EVENT_MASK(event) ((uintptr_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul)) -static __inline__ uint32_t waitq_hash(char *key, size_t length) +static __inline__ uint32_t +waitq_hash(char *key, size_t length) { - uint32_t hash = jenkins_hash(key, length); + uint32_t hash = os_hash_jenkins(key, length); hash &= (g_num_waitqs - 1); return hash; } /* return a global waitq pointer corresponding to the given event */ -struct waitq *_global_eventq(char *event, size_t event_length) +struct waitq * +_global_eventq(char *event, size_t event_length) { return &global_waitqs[waitq_hash(event, event_length)]; } /* return an indexed global waitq pointer */ -struct waitq *global_waitq(int index) +struct waitq * +global_waitq(int index) { return &global_waitqs[index % g_num_waitqs]; } @@ -1610,11 +1698,13 @@ struct waitq *global_waitq(int index) /* this global is for lldb */ const uint32_t g_nwaitq_btframes = NWAITQ_BTFRAMES; -static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip) +static __inline__ void +waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int skip) { uintptr_t buf[NWAITQ_BTFRAMES + skip]; - if (skip < 0) + if (skip < 0) { skip = 0; + } memset(buf, 0, (NWAITQ_BTFRAMES + skip) * sizeof(uintptr_t)); backtrace(buf, g_nwaitq_btframes + skip); memcpy(&bt[0], &buf[skip], NWAITQ_BTFRAMES * sizeof(uintptr_t)); @@ -1628,12 +1718,15 @@ static __inline__ void waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], int s struct wq_stats g_boot_stats; struct wq_stats *g_waitq_stats = &g_boot_stats; -static __inline__ struct wq_stats *waitq_global_stats(struct waitq *waitq) { +static __inline__ struct wq_stats * +waitq_global_stats(struct waitq *waitq) +{ struct wq_stats *wqs; uint32_t idx; - if (!waitq_is_global(waitq)) + if (!waitq_is_global(waitq)) { return NULL; + } idx = (uint32_t)(((uintptr_t)waitq - (uintptr_t)global_waitqs) / sizeof(*waitq)); assert(idx < g_num_waitqs); @@ -1641,7 +1734,8 @@ static __inline__ struct wq_stats *waitq_global_stats(struct waitq *waitq) { return wqs; } -static __inline__ void waitq_stats_count_wait(struct waitq *waitq) +static __inline__ void +waitq_stats_count_wait(struct waitq *waitq) { struct wq_stats *wqs = waitq_global_stats(waitq); if (wqs != NULL) { @@ -1650,7 +1744,8 @@ static __inline__ void waitq_stats_count_wait(struct waitq *waitq) } } -static __inline__ void waitq_stats_count_wakeup(struct waitq *waitq) +static __inline__ void +waitq_stats_count_wakeup(struct waitq *waitq) { struct wq_stats *wqs = waitq_global_stats(waitq); if (wqs != NULL) { @@ -1659,7 +1754,8 @@ static __inline__ void waitq_stats_count_wakeup(struct waitq *waitq) } } -static __inline__ void waitq_stats_count_clear_wakeup(struct waitq *waitq) +static __inline__ void +waitq_stats_count_clear_wakeup(struct waitq *waitq) { struct wq_stats *wqs = waitq_global_stats(waitq); if (wqs != NULL) { @@ -1669,7 +1765,8 @@ static __inline__ void waitq_stats_count_clear_wakeup(struct waitq *waitq) } } -static __inline__ void waitq_stats_count_fail(struct waitq *waitq) +static __inline__ void +waitq_stats_count_fail(struct waitq *waitq) { struct wq_stats *wqs = waitq_global_stats(waitq); if (wqs != NULL) { @@ -1684,30 +1781,36 @@ static __inline__ void waitq_stats_count_fail(struct waitq *waitq) #define waitq_stats_count_fail(q) do { } while (0) #endif -int waitq_is_valid(struct waitq *waitq) +int +waitq_is_valid(struct waitq *waitq) { return (waitq != NULL) && waitq->waitq_isvalid; } -int waitq_set_is_valid(struct waitq_set *wqset) +int +waitq_set_is_valid(struct waitq_set *wqset) { return (wqset != NULL) && wqset->wqset_q.waitq_isvalid && waitqs_is_set(wqset); } -int waitq_is_global(struct waitq *waitq) +int +waitq_is_global(struct waitq *waitq) { - if (waitq >= global_waitqs && waitq < global_waitqs + g_num_waitqs) + if (waitq >= global_waitqs && waitq < global_waitqs + g_num_waitqs) { return 1; + } return 0; } -int waitq_irq_safe(struct waitq *waitq) +int +waitq_irq_safe(struct waitq *waitq) { /* global wait queues have this bit set on initialization */ return waitq->waitq_irq; } -struct waitq * waitq_get_safeq(struct waitq *waitq) +struct waitq * +waitq_get_safeq(struct waitq *waitq) { struct waitq *safeq; @@ -1721,12 +1824,14 @@ struct waitq * waitq_get_safeq(struct waitq *waitq) return safeq; } -static uint32_t waitq_hash_size(void) +static uint32_t +waitq_hash_size(void) { uint32_t hsize, queues; - - if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize))) - return (hsize); + + if (PE_parse_boot_argn("wqsize", &hsize, sizeof(hsize))) { + return hsize; + } queues = thread_max / 5; hsize = P2ROUNDUP(queues * sizeof(struct waitq), PAGE_SIZE); @@ -1734,39 +1839,40 @@ static uint32_t waitq_hash_size(void) return hsize; } -/* - * Since the priority ordered waitq uses basepri as the +/* + * Since the priority ordered waitq uses basepri as the * ordering key assert that this value fits in a uint8_t. */ static_assert(MAXPRI <= UINT8_MAX); -static inline void waitq_thread_insert(struct waitq *wq, - thread_t thread, boolean_t fifo) +static inline void +waitq_thread_insert(struct waitq *wq, + thread_t thread, boolean_t fifo) { if (waitq_is_turnstile_queue(wq)) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_ADDED_TO_TURNSTILE_WAITQ))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)), - thread_tid(thread), - thread->base_pri, 0, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_ADDED_TO_TURNSTILE_WAITQ))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)), + thread_tid(thread), + thread->base_pri, 0, 0); turnstile_stats_update(0, TSU_TURNSTILE_BLOCK_COUNT, NULL); /* - * For turnstile queues (which use priority queues), - * insert the thread in the heap based on its current - * base_pri. Note that the priority queue implementation - * is currently not stable, so does not maintain fifo for - * threads at the same base_pri. Also, if the base_pri - * of the thread changes while its blocked in the waitq, - * the thread position should be updated in the priority - * queue by calling priority queue increase/decrease + * For turnstile queues (which use priority queues), + * insert the thread in the heap based on its current + * base_pri. Note that the priority queue implementation + * is currently not stable, so does not maintain fifo for + * threads at the same base_pri. Also, if the base_pri + * of the thread changes while its blocked in the waitq, + * the thread position should be updated in the priority + * queue by calling priority queue increase/decrease * operations. */ priority_queue_entry_init(&(thread->wait_prioq_links)); priority_queue_insert(&wq->waitq_prio_queue, - &thread->wait_prioq_links, thread->base_pri, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + &thread->wait_prioq_links, thread->base_pri, + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); } else { turnstile_stats_update(0, TSU_REGULAR_WAITQ_BLOCK_COUNT, NULL); if (fifo) { @@ -1777,32 +1883,37 @@ static inline void waitq_thread_insert(struct waitq *wq, } } -static inline void waitq_thread_remove(struct waitq *wq, - thread_t thread) +static inline void +waitq_thread_remove(struct waitq *wq, + thread_t thread) { if (waitq_is_turnstile_queue(wq)) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_REMOVED_FROM_TURNSTILE_WAITQ))) | DBG_FUNC_NONE, - VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)), - thread_tid(thread), - 0, 0, 0); + (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS, (THREAD_REMOVED_FROM_TURNSTILE_WAITQ))) | DBG_FUNC_NONE, + VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq)), + thread_tid(thread), + 0, 0, 0); priority_queue_remove(&wq->waitq_prio_queue, &thread->wait_prioq_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); } else { remqueue(&(thread->wait_links)); } } -void waitq_bootstrap(void) +void +waitq_bootstrap(void) { kern_return_t kret; uint32_t whsize, qsz, tmp32; g_min_free_table_elem = DEFAULT_MIN_FREE_TABLE_ELEM; - if (PE_parse_boot_argn("wqt_min_free", &tmp32, sizeof(tmp32)) == TRUE) + if (PE_parse_boot_argn("wqt_min_free", &tmp32, sizeof(tmp32)) == TRUE) { g_min_free_table_elem = tmp32; + } wqdbg("Minimum free table elements: %d", tmp32); + lck_grp_init(&waitq_lck_grp, "waitq", LCK_GRP_ATTR_NULL); + /* * Determine the amount of memory we're willing to reserve for * the waitqueue hash table @@ -1820,8 +1931,9 @@ void waitq_bootstrap(void) */ for (uint32_t i = 0; i < 31; i++) { uint32_t bit = (1 << i); - if ((g_num_waitqs & bit) == g_num_waitqs) + if ((g_num_waitqs & bit) == g_num_waitqs) { break; + } g_num_waitqs &= ~bit; } assert(g_num_waitqs > 0); @@ -1831,29 +1943,31 @@ void waitq_bootstrap(void) wqdbg("allocating %d global queues (%d bytes)", g_num_waitqs, whsize); kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&global_waitqs, - whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ); - if (kret != KERN_SUCCESS || global_waitqs == NULL) + whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ); + if (kret != KERN_SUCCESS || global_waitqs == NULL) { panic("kernel_memory_allocate() failed to alloc global_waitqs" - ", error: %d, whsize: 0x%x", kret, whsize); + ", error: %d, whsize: 0x%x", kret, whsize); + } #if CONFIG_WAITQ_STATS whsize = P2ROUNDUP(g_num_waitqs * sizeof(struct wq_stats), PAGE_SIZE); kret = kernel_memory_allocate(kernel_map, (vm_offset_t *)&g_waitq_stats, - whsize, 0, KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ); - if (kret != KERN_SUCCESS || global_waitqs == NULL) + whsize, 0, KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_WAITQ); + if (kret != KERN_SUCCESS || global_waitqs == NULL) { panic("kernel_memory_allocate() failed to alloc g_waitq_stats" - ", error: %d, whsize: 0x%x", kret, whsize); + ", error: %d, whsize: 0x%x", kret, whsize); + } memset(g_waitq_stats, 0, whsize); #endif for (uint32_t i = 0; i < g_num_waitqs; i++) { - waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO|SYNC_POLICY_DISABLE_IRQ); + waitq_init(&global_waitqs[i], SYNC_POLICY_FIFO | SYNC_POLICY_DISABLE_IRQ); } waitq_set_zone = zinit(sizeof(struct waitq_set), - WAITQ_SET_MAX * sizeof(struct waitq_set), - sizeof(struct waitq_set), - "waitq sets"); + WAITQ_SET_MAX * sizeof(struct waitq_set), + sizeof(struct waitq_set), + "waitq sets"); zone_change(waitq_set_zone, Z_NOENCRYPT, TRUE); /* initialize the global waitq link table */ @@ -1878,27 +1992,30 @@ void waitq_bootstrap(void) */ /* For x86, the hardware timeout is in TSC units. */ #if defined(__i386__) || defined(__x86_64__) -#define hwLockTimeOut LockTimeOutTSC +#define hwLockTimeOut LockTimeOutTSC #else -#define hwLockTimeOut LockTimeOut +#define hwLockTimeOut LockTimeOut #endif -void waitq_lock(struct waitq *wq) +void +waitq_lock(struct waitq *wq) { if (__improbable(waitq_lock_to(wq, - hwLockTimeOut * 2) == 0)) { + hwLockTimeOut * 2) == 0)) { boolean_t wql_acquired = FALSE; while (machine_timeout_suspended()) { mp_enable_preemption(); wql_acquired = waitq_lock_to(wq, - hwLockTimeOut * 2); - if (wql_acquired) + hwLockTimeOut * 2); + if (wql_acquired) { break; + } } - if (wql_acquired == FALSE) + if (wql_acquired == FALSE) { panic("waitq deadlock - waitq=%p, cpu=%d\n", - wq, cpu_number()); + wq, cpu_number()); + } } #if defined(__x86_64__) pltrace(FALSE); @@ -1906,7 +2023,8 @@ void waitq_lock(struct waitq *wq) assert(waitq_held(wq)); } -void waitq_unlock(struct waitq *wq) +void +waitq_unlock(struct waitq *wq) { assert(waitq_held(wq)); #if defined(__x86_64__) @@ -1922,7 +2040,8 @@ void waitq_unlock(struct waitq *wq) * Conditions: * 'thread' is locked */ -static inline void thread_clear_waitq_state(thread_t thread) +static inline void +thread_clear_waitq_state(thread_t thread) { thread->waitq = NULL; thread->wait_event = NO_EVENT64; @@ -1931,7 +2050,7 @@ static inline void thread_clear_waitq_state(thread_t thread) typedef thread_t (*waitq_select_cb)(void *ctx, struct waitq *waitq, - int is_global, thread_t thread); + int is_global, thread_t thread); struct waitq_select_args { /* input parameters */ @@ -1966,8 +2085,9 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args); * If no threads were selected, it preposts the input waitq * onto the waitq set pointed to by 'link'. */ -static int waitq_select_walk_cb(struct waitq *waitq, void *ctx, - struct waitq_link *link) +static int +waitq_select_walk_cb(struct waitq *waitq, void *ctx, + struct waitq_link *link) { int ret = WQ_ITERATE_CONTINUE; struct waitq_select_args args = *((struct waitq_select_args *)ctx); @@ -1987,8 +2107,9 @@ static int waitq_select_walk_cb(struct waitq *waitq, void *ctx, * verify that the link wasn't invalidated just before * we were able to take the lock. */ - if (wqset->wqset_id != link->wql_setid.id) + if (wqset->wqset_id != link->wql_setid.id) { goto out_unlock; + } assert(waitqs_is_linked(wqset)); @@ -2049,9 +2170,10 @@ out_unlock: * The previous interrupt state is returned in args->spl and should * be used in a call to splx() if threads are returned to the caller. */ -static thread_t waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *waitq, - spl_t spl, struct waitq_select_args *args, - uint32_t *remaining_eventmask) +static thread_t +waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *waitq, + spl_t spl, struct waitq_select_args *args, + uint32_t *remaining_eventmask) { int max_threads = args->max_threads; int *nthreads = args->nthreads; @@ -2064,38 +2186,42 @@ static thread_t waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *wa /* * For non-priority ordered waitqs, we allow multiple events to be - * mux'ed into the same waitq. Also safeqs may contain threads from + * mux'ed into the same waitq. Also safeqs may contain threads from * multiple waitqs. Only pick threads that match the - * requested wait event. + * requested wait event. */ if (thread->waitq == waitq && thread->wait_event == args->event) { t = thread; - if (first_thread == THREAD_NULL) + if (first_thread == THREAD_NULL) { first_thread = thread; + } /* allow the caller to futher refine the selection */ - if (args->select_cb) + if (args->select_cb) { t = args->select_cb(args->select_ctx, waitq, - waitq_is_global(waitq), thread); + waitq_is_global(waitq), thread); + } if (t != THREAD_NULL) { *nthreads += 1; if (args->threadq) { /* if output queue, add locked thread to it */ - if (*nthreads == 1) + if (*nthreads == 1) { *(args->spl) = (safeq != waitq) ? spl : splsched(); + } thread_lock(t); thread_clear_waitq_state(t); re_queue_tail(args->threadq, &t->wait_links); } /* only enqueue up to 'max' threads */ - if (*nthreads >= max_threads && max_threads > 0) + if (*nthreads >= max_threads && max_threads > 0) { break; + } } } /* thread wasn't selected so track it's event */ if (t == THREAD_NULL) { *remaining_eventmask |= (thread->waitq != safeq) ? - _CAST_TO_EVENT_MASK(thread->waitq) : _CAST_TO_EVENT_MASK(thread->wait_event); + _CAST_TO_EVENT_MASK(thread->waitq) : _CAST_TO_EVENT_MASK(thread->wait_event); } } @@ -2121,20 +2247,21 @@ static thread_t waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *wa * events in the same queue. The way to implement that would be to keep removing * elements from the waitq and if the event does not match the requested one, * add it to a local list. This local list of elements needs to be re-inserted - * into the priority queue at the end and the select_cb return value & - * remaining_eventmask would need to be handled appropriately. The implementation - * is not very efficient but would work functionally. + * into the priority queue at the end and the select_cb return value & + * remaining_eventmask would need to be handled appropriately. The implementation + * is not very efficient but would work functionally. */ -static thread_t waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq, - spl_t spl, struct waitq_select_args *args, - uint32_t *remaining_eventmask) +static thread_t +waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq, + spl_t spl, struct waitq_select_args *args, + uint32_t *remaining_eventmask) { int max_threads = args->max_threads; int *nthreads = args->nthreads; thread_t first_thread = THREAD_NULL; thread_t thread = THREAD_NULL; - /* + /* * The waitq select routines need to handle two cases: * Case 1: Peek at maximum priority thread in the waitq (remove_op = 0) * Get the maximum priority thread from the waitq without removing it. @@ -2142,16 +2269,15 @@ static thread_t waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *wa * Case 2: Remove 'n' highest priority threads from waitq (remove_op = 1) * Get max_threads (if available) while removing them from the waitq. * In that case args->threadq != NULL and max_threads is one of {-1, 1}. - * - * The only possible values for remaining_eventmask for the priority queue - * waitq are either 0 (for the remove all threads case) or the original + * + * The only possible values for remaining_eventmask for the priority queue + * waitq are either 0 (for the remove all threads case) or the original * safeq->waitq_eventmask (for the lookup/remove one thread cases). */ *remaining_eventmask = safeq->waitq_eventmask; boolean_t remove_op = !!(args->threadq); while ((max_threads <= 0) || (*nthreads < max_threads)) { - if (priority_queue_empty(&(safeq->waitq_prio_queue))) { *remaining_eventmask = 0; break; @@ -2159,54 +2285,57 @@ static thread_t waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *wa if (remove_op) { thread = priority_queue_remove_max(&safeq->waitq_prio_queue, - struct thread, wait_prioq_links, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + struct thread, wait_prioq_links, + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); } else { /* For the peek operation, the only valid value for max_threads is 1 */ assert(max_threads == 1); thread = priority_queue_max(&safeq->waitq_prio_queue, - struct thread, wait_prioq_links); + struct thread, wait_prioq_links); } - /* - * Ensure the wait event matches since priority ordered waitqs do not + /* + * Ensure the wait event matches since priority ordered waitqs do not * support multiple events in the same waitq. */ assert((thread->waitq == waitq) && (thread->wait_event == args->event)); - + if (args->select_cb) { /* - * Call the select_cb passed into the waitq_select args. The callback - * updates the select_ctx with information about the highest priority - * thread which is eventually used by the caller. + * Call the select_cb passed into the waitq_select args. The callback + * updates the select_ctx with information about the highest priority + * thread which is eventually used by the caller. */ - thread_t __assert_only ret_thread = args->select_cb(args->select_ctx, waitq, - waitq_is_global(waitq), thread); + thread_t __assert_only ret_thread = args->select_cb(args->select_ctx, waitq, + waitq_is_global(waitq), thread); if (!remove_op) { /* For the peek operation, the thread should not be selected for addition */ assert(ret_thread == THREAD_NULL); } else { - /* - * For the remove operation, the select routine should always return a valid - * thread for priority waitqs. Since all threads in a prioq are equally - * eligible, it should match the thread removed from the prioq. If this - * invariant changes, the implementation would need to handle the + /* + * For the remove operation, the select routine should always return a valid + * thread for priority waitqs. Since all threads in a prioq are equally + * eligible, it should match the thread removed from the prioq. If this + * invariant changes, the implementation would need to handle the * remaining_eventmask here correctly. */ assert(ret_thread == thread); } } - - if (first_thread == THREAD_NULL) + + if (first_thread == THREAD_NULL) { first_thread = thread; + } /* For the peek operation, break out early */ - if (!remove_op) + if (!remove_op) { break; + } /* Add the thread to the result thread list */ *nthreads += 1; - if (*nthreads == 1) + if (*nthreads == 1) { *(args->spl) = (safeq != waitq) ? spl : splsched(); + } thread_lock(thread); thread_clear_waitq_state(thread); enqueue_tail(args->threadq, &(thread->wait_links)); @@ -2231,7 +2360,8 @@ static thread_t waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *wa * The previous interrupt state is returned in args->spl and should * be used in a call to splx() if threads are returned to the caller. */ -static void do_waitq_select_n_locked(struct waitq_select_args *args) +static void +do_waitq_select_n_locked(struct waitq_select_args *args) { struct waitq *waitq = args->waitq; int max_threads = args->max_threads; @@ -2248,8 +2378,9 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args) /* JMM - add flag to waitq to avoid global lookup if no waiters */ eventmask = _CAST_TO_EVENT_MASK(waitq); safeq = waitq_get_safeq(waitq); - if (*nthreads == 0) + if (*nthreads == 0) { spl = splsched(); + } waitq_lock(safeq); } else { eventmask = _CAST_TO_EVENT_MASK(args->event); @@ -2263,15 +2394,14 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args) */ if (!waitq_is_global(safeq) || (safeq->waitq_eventmask & eventmask) == eventmask) { - if (waitq_is_turnstile_queue(safeq)) { first_thread = waitq_prioq_iterate_locked(safeq, waitq, - spl, args, - &remaining_eventmask); + spl, args, + &remaining_eventmask); } else { first_thread = waitq_queue_iterate_locked(safeq, waitq, - spl, args, - &remaining_eventmask); + spl, args, + &remaining_eventmask); } /* @@ -2284,10 +2414,11 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args) * computed is complete - so reset it. */ if (waitq_is_global(safeq)) { - if (waitq_empty(safeq)) + if (waitq_empty(safeq)) { safeq->waitq_eventmask = 0; - else if (max_threads < 0 || *nthreads < max_threads) + } else if (max_threads < 0 || *nthreads < max_threads) { safeq->waitq_eventmask = remaining_eventmask; + } } } @@ -2306,26 +2437,30 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args) enqueue_tail(args->threadq, &(first_thread->wait_links)); /* update the eventmask on [now] empty global queues */ - if (waitq_is_global(safeq) && waitq_empty(safeq)) + if (waitq_is_global(safeq) && waitq_empty(safeq)) { safeq->waitq_eventmask = 0; + } } /* unlock the safe queue if we locked one above */ if (safeq != waitq) { waitq_unlock(safeq); - if (*nthreads == 0) + if (*nthreads == 0) { splx(spl); + } } - - if (max_threads > 0 && *nthreads >= max_threads) + + if (max_threads > 0 && *nthreads >= max_threads) { return; + } /* * wait queues that are not in any sets * are the bottom of the recursion */ - if (!waitq->waitq_set_id) + if (!waitq->waitq_set_id) { return; + } /* check to see if the set ID for this wait queue is valid */ struct waitq_link *link = wql_get_link(waitq->waitq_set_id); @@ -2346,7 +2481,7 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args) * recurse down wait queue set's with non-zero wqset_q.waitq_set_id */ (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id, - WQL_WQS, (void *)args, waitq_select_walk_cb); + WQL_WQS, (void *)args, waitq_select_walk_cb); } /** @@ -2360,21 +2495,22 @@ static void do_waitq_select_n_locked(struct waitq_select_args *args) * been placed onto the input 'threadq' * * Notes: - * The 'select_cb' function is invoked for every thread found waiting on - * 'waitq' for 'event'. The thread is _not_ locked upon callback + * The 'select_cb' function is invoked for every thread found waiting on + * 'waitq' for 'event'. The thread is _not_ locked upon callback * invocation. This parameter may be NULL. * * If one or more threads are returned in 'threadq' then the caller is * responsible to call splx() using the returned 'spl' value. Each * returned thread is locked. */ -static __inline__ int waitq_select_n_locked(struct waitq *waitq, - event64_t event, - waitq_select_cb select_cb, - void *select_ctx, - uint64_t *reserved_preposts, - queue_t threadq, - int max_threads, spl_t *spl) +static __inline__ int +waitq_select_n_locked(struct waitq *waitq, + event64_t event, + waitq_select_cb select_cb, + void *select_ctx, + uint64_t *reserved_preposts, + queue_t threadq, + int max_threads, spl_t *spl) { int nthreads = 0; @@ -2406,9 +2542,10 @@ static __inline__ int waitq_select_n_locked(struct waitq *waitq, * yet been put on a run queue. Caller is responsible to call splx * with the '*spl' value. */ -static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event, - uint64_t *reserved_preposts, - int priority, spl_t *spl) +static thread_t +waitq_select_one_locked(struct waitq *waitq, event64_t event, + uint64_t *reserved_preposts, + int priority, spl_t *spl) { (void)priority; int nthreads; @@ -2417,7 +2554,7 @@ static thread_t waitq_select_one_locked(struct waitq *waitq, event64_t event, queue_init(&threadq); nthreads = waitq_select_n_locked(waitq, event, NULL, NULL, - reserved_preposts, &threadq, 1, spl); + reserved_preposts, &threadq, 1, spl); /* if we selected a thread, return it (still locked) */ if (!queue_empty(&threadq)) { @@ -2447,9 +2584,9 @@ struct find_max_pri_ctx { */ static thread_t waitq_find_max_pri_cb(void *ctx_in, - __unused struct waitq *waitq, - __unused int is_global, - thread_t thread) + __unused struct waitq *waitq, + __unused int is_global, + thread_t thread) { struct find_max_pri_ctx *ctx = (struct find_max_pri_ctx *)ctx_in; @@ -2486,8 +2623,8 @@ waitq_find_max_pri_cb(void *ctx_in, */ static thread_t waitq_select_max_locked(struct waitq *waitq, event64_t event, - uint64_t *reserved_preposts, - spl_t *spl) + uint64_t *reserved_preposts, + spl_t *spl) { __assert_only int nthreads; assert(!waitq->waitq_set_id); /* doesn't support recursive sets */ @@ -2503,8 +2640,8 @@ waitq_select_max_locked(struct waitq *waitq, event64_t event, * This doesn't remove any thread from the queue */ nthreads = waitq_select_n_locked(waitq, event, - waitq_find_max_pri_cb, - &ctx, reserved_preposts, NULL, 1, spl); + waitq_find_max_pri_cb, + &ctx, reserved_preposts, NULL, 1, spl); assert(nthreads == 0); @@ -2542,8 +2679,9 @@ struct select_thread_ctx { * caller is responsible to call splx() with the returned interrupt state * in ctx->spl. */ -static int waitq_select_thread_cb(struct waitq *waitq, void *ctx, - struct waitq_link *link) +static int +waitq_select_thread_cb(struct waitq *waitq, void *ctx, + struct waitq_link *link) { struct select_thread_ctx *stctx = (struct select_thread_ctx *)ctx; struct waitq_set *wqset; @@ -2552,12 +2690,13 @@ static int waitq_select_thread_cb(struct waitq *waitq, void *ctx, spl_t s; (void)waitq; - + thread_t thread = stctx->thread; event64_t event = stctx->event; - if (wql_type(link) != WQL_WQS) + if (wql_type(link) != WQL_WQS) { return WQ_ITERATE_CONTINUE; + } wqset = link->wql_wqs.wql_set; wqsetq = &wqset->wqset_q; @@ -2607,9 +2746,10 @@ static int waitq_select_thread_cb(struct waitq *waitq, void *ctx, * 'waitq' is locked * 'thread' is unlocked */ -static kern_return_t waitq_select_thread_locked(struct waitq *waitq, - event64_t event, - thread_t thread, spl_t *spl) +static kern_return_t +waitq_select_thread_locked(struct waitq *waitq, + event64_t event, + thread_t thread, spl_t *spl) { struct waitq *safeq; struct waitq_link *link; @@ -2642,13 +2782,15 @@ static kern_return_t waitq_select_thread_locked(struct waitq *waitq, thread_unlock(thread); - if (safeq != waitq) + if (safeq != waitq) { waitq_unlock(safeq); + } splx(s); - if (!waitq->waitq_set_id) + if (!waitq->waitq_set_id) { return KERN_NOT_WAITING; + } /* check to see if the set ID for this wait queue is valid */ link = wql_get_link(waitq->waitq_set_id); @@ -2668,21 +2810,23 @@ static kern_return_t waitq_select_thread_locked(struct waitq *waitq, ctx.event = event; ctx.spl = spl; kr = walk_waitq_links(LINK_WALK_FULL_DAG, waitq, waitq->waitq_set_id, - WQL_WQS, (void *)&ctx, waitq_select_thread_cb); + WQL_WQS, (void *)&ctx, waitq_select_thread_cb); wql_put_link(link); /* we found a thread, return success */ - if (kr == WQ_ITERATE_FOUND) + if (kr == WQ_ITERATE_FOUND) { return KERN_SUCCESS; + } return KERN_NOT_WAITING; } -static int prepost_exists_cb(struct waitq_set __unused *wqset, - void __unused *ctx, - struct wq_prepost __unused *wqp, - struct waitq __unused *waitq) +static int +prepost_exists_cb(struct waitq_set __unused *wqset, + void __unused *ctx, + struct wq_prepost __unused *wqp, + struct waitq __unused *waitq) { /* if we get here, then we know that there is a valid prepost object! */ return WQ_ITERATE_FOUND; @@ -2694,13 +2838,14 @@ static int prepost_exists_cb(struct waitq_set __unused *wqset, * Conditions: * 'waitq' is locked */ -wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, - event64_t wait_event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint64_t deadline, - uint64_t leeway, - thread_t thread) +wait_result_t +waitq_assert_wait64_locked(struct waitq *waitq, + event64_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway, + thread_t thread) { wait_result_t wait_result; int realtime = 0; @@ -2715,8 +2860,9 @@ wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, */ assert(!thread->started || thread == current_thread()); - if (thread->waitq != NULL) + if (thread->waitq != NULL) { panic("thread already waiting on %p", thread->waitq); + } if (waitq_is_set(waitq)) { struct waitq_set *wqset = (struct waitq_set *)waitq; @@ -2732,7 +2878,7 @@ wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, * if we find just one prepost object. */ ret = wq_prepost_foreach_locked(wqset, NULL, - prepost_exists_cb); + prepost_exists_cb); if (ret == WQ_ITERATE_FOUND) { s = splsched(); thread_lock(thread); @@ -2770,8 +2916,9 @@ wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, * to run the realtime thread, but without causing the * lock contention of that scenario. */ - if (thread->sched_pri >= BASEPRI_REALTIME) + if (thread->sched_pri >= BASEPRI_REALTIME) { realtime = 1; + } /* * This is the extent to which we currently take scheduling attributes @@ -2782,12 +2929,12 @@ wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, wait_result = thread_mark_wait_locked(thread, interruptible); /* thread->wait_result has been set */ if (wait_result == THREAD_WAITING) { - if (!safeq->waitq_fifo - || (thread->options & TH_OPT_VMPRIV) || realtime) + || (thread->options & TH_OPT_VMPRIV) || realtime) { waitq_thread_insert(safeq, thread, false); - else + } else { waitq_thread_insert(safeq, thread, true); + } /* mark the event and real waitq, even if enqueued on a global safeq */ thread->wait_event = wait_event; @@ -2797,16 +2944,18 @@ wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, boolean_t act; act = timer_call_enter_with_leeway(&thread->wait_timer, - NULL, - deadline, leeway, - urgency, FALSE); - if (!act) + NULL, + deadline, leeway, + urgency, FALSE); + if (!act) { thread->wait_timer_active++; + } thread->wait_timer_is_set = TRUE; } - if (waitq_is_global(safeq)) + if (waitq_is_global(safeq)) { safeq->waitq_eventmask |= eventmask; + } waitq_stats_count_wait(waitq); } @@ -2841,7 +2990,8 @@ wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, * sched_prim.c from the thread timer wakeup path * (i.e. the thread was waiting on 'waitq' with a timeout that expired) */ -int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread) +int +waitq_pull_thread_locked(struct waitq *waitq, thread_t thread) { struct waitq *safeq; @@ -2856,8 +3006,9 @@ int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread) } /* thread is already locked so have to try for the waitq lock */ - if (!waitq_lock_try(safeq)) + if (!waitq_lock_try(safeq)) { return 0; + } waitq_thread_remove(safeq, thread); thread_clear_waitq_state(thread); @@ -2876,11 +3027,11 @@ int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread) static __inline__ -void maybe_adjust_thread_pri(thread_t thread, - int priority, - __kdebug_only struct waitq *waitq) +void +maybe_adjust_thread_pri(thread_t thread, + int priority, + __kdebug_only struct waitq *waitq) { - /* * If the caller is requesting the waitq subsystem to promote the * priority of the awoken thread, then boost the thread's priority to @@ -2895,8 +3046,9 @@ void maybe_adjust_thread_pri(thread_t thread, */ if (priority == WAITQ_PROMOTE_PRIORITY) { uintptr_t trace_waitq = 0; - if (__improbable(kdebug_enable)) + if (__improbable(kdebug_enable)) { trace_waitq = VM_KERNEL_UNSLIDE_OR_PERM(waitq); + } sched_thread_promote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, trace_waitq); } else if (priority > 0) { @@ -2911,7 +3063,8 @@ void maybe_adjust_thread_pri(thread_t thread, * * This must be called on the thread which was woken up with TH_SFLAG_WAITQ_PROMOTED. */ -void waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread) +void +waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread) { spl_t s; @@ -2920,18 +3073,21 @@ void waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread) assert(thread == current_thread()); /* This flag is only cleared by the thread itself, so safe to check outside lock */ - if ((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) != TH_SFLAG_WAITQ_PROMOTED) + if ((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) != TH_SFLAG_WAITQ_PROMOTED) { return; + } - if (!waitq_irq_safe(waitq)) + if (!waitq_irq_safe(waitq)) { s = splsched(); + } thread_lock(thread); sched_thread_unpromote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, 0); thread_unlock(thread); - if (!waitq_irq_safe(waitq)) + if (!waitq_irq_safe(waitq)) { splx(s); + } } /** @@ -2948,12 +3104,13 @@ void waitq_clear_promotion_locked(struct waitq *waitq, thread_t thread) * been unlocked before calling thread_go() on any returned threads, and * is guaranteed to be unlocked upon function return. */ -kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - uint64_t *reserved_preposts, - int priority, - waitq_lock_state_t lock_state) +kern_return_t +waitq_wakeup64_all_locked(struct waitq *waitq, + event64_t wake_event, + wait_result_t result, + uint64_t *reserved_preposts, + int priority, + waitq_lock_state_t lock_state) { kern_return_t ret; thread_t thread; @@ -2965,18 +3122,19 @@ kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, queue_init(&wakeup_queue); nthreads = waitq_select_n_locked(waitq, wake_event, NULL, NULL, - reserved_preposts, - &wakeup_queue, -1, &th_spl); + reserved_preposts, + &wakeup_queue, -1, &th_spl); /* set each thread running */ ret = KERN_NOT_WAITING; #if CONFIG_WAITQ_STATS qe_foreach_element(thread, &wakeup_queue, wait_links) - waitq_stats_count_wakeup(waitq); + waitq_stats_count_wakeup(waitq); #endif - if (lock_state == WAITQ_UNLOCK) + if (lock_state == WAITQ_UNLOCK) { waitq_unlock(waitq); + } qe_foreach_element_safe(thread, &wakeup_queue, wait_links) { assert_thread_magic(thread); @@ -2986,10 +3144,11 @@ kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, assert(ret == KERN_SUCCESS); thread_unlock(thread); } - if (nthreads > 0) + if (nthreads > 0) { splx(th_spl); - else + } else { waitq_stats_count_fail(waitq); + } return ret; } @@ -3003,12 +3162,13 @@ kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, * Notes: * May temporarily disable and re-enable interrupts. */ -kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - uint64_t *reserved_preposts, - int priority, - waitq_lock_state_t lock_state) +kern_return_t +waitq_wakeup64_one_locked(struct waitq *waitq, + event64_t wake_event, + wait_result_t result, + uint64_t *reserved_preposts, + int priority, + waitq_lock_state_t lock_state) { thread_t thread; spl_t th_spl; @@ -3017,22 +3177,24 @@ kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, if (priority == WAITQ_SELECT_MAX_PRI) { thread = waitq_select_max_locked(waitq, wake_event, - reserved_preposts, - &th_spl); + reserved_preposts, + &th_spl); } else { thread = waitq_select_one_locked(waitq, wake_event, - reserved_preposts, - priority, &th_spl); + reserved_preposts, + priority, &th_spl); } - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { waitq_stats_count_wakeup(waitq); - else + } else { waitq_stats_count_fail(waitq); + } - if (lock_state == WAITQ_UNLOCK) + if (lock_state == WAITQ_UNLOCK) { waitq_unlock(waitq); + } if (thread != THREAD_NULL) { maybe_adjust_thread_pri(thread, priority, waitq); @@ -3060,12 +3222,12 @@ kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, */ thread_t waitq_wakeup64_identify_locked(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - spl_t *spl, - uint64_t *reserved_preposts, - int priority, - waitq_lock_state_t lock_state) + event64_t wake_event, + wait_result_t result, + spl_t *spl, + uint64_t *reserved_preposts, + int priority, + waitq_lock_state_t lock_state) { thread_t thread; @@ -3073,21 +3235,23 @@ waitq_wakeup64_identify_locked(struct waitq *waitq, if (priority == WAITQ_SELECT_MAX_PRI) { thread = waitq_select_max_locked(waitq, wake_event, - reserved_preposts, - spl); + reserved_preposts, + spl); } else { thread = waitq_select_one_locked(waitq, wake_event, - reserved_preposts, - priority, spl); + reserved_preposts, + priority, spl); } - if (thread != THREAD_NULL) + if (thread != THREAD_NULL) { waitq_stats_count_wakeup(waitq); - else + } else { waitq_stats_count_fail(waitq); + } - if (lock_state == WAITQ_UNLOCK) + if (lock_state == WAITQ_UNLOCK) { waitq_unlock(waitq); + } if (thread != THREAD_NULL) { kern_return_t __assert_only ret; @@ -3112,11 +3276,12 @@ waitq_wakeup64_identify_locked(struct waitq *waitq, * unlocked before calling thread_go() if 'thread' is to be awoken, and * is guaranteed to be unlocked upon function return. */ -kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, - event64_t wake_event, - thread_t thread, - wait_result_t result, - waitq_lock_state_t lock_state) +kern_return_t +waitq_wakeup64_thread_locked(struct waitq *waitq, + event64_t wake_event, + thread_t thread, + wait_result_t result, + waitq_lock_state_t lock_state) { kern_return_t ret; spl_t th_spl; @@ -3130,16 +3295,19 @@ kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, */ ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl); - if (ret == KERN_SUCCESS) + if (ret == KERN_SUCCESS) { waitq_stats_count_wakeup(waitq); - else + } else { waitq_stats_count_fail(waitq); + } - if (lock_state == WAITQ_UNLOCK) + if (lock_state == WAITQ_UNLOCK) { waitq_unlock(waitq); + } - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return KERN_NOT_WAITING; + } ret = thread_go(thread, result); assert(ret == KERN_SUCCESS); @@ -3160,13 +3328,15 @@ kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, /** * initialize a waitq object */ -kern_return_t waitq_init(struct waitq *waitq, int policy) +kern_return_t +waitq_init(struct waitq *waitq, int policy) { assert(waitq != NULL); /* only FIFO and LIFO for now */ - if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0) + if ((policy & SYNC_POLICY_FIXED_PRIORITY) != 0) { return KERN_INVALID_ARGUMENT; + } waitq->waitq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0); waitq->waitq_irq = !!(policy & SYNC_POLICY_DISABLE_IRQ); @@ -3182,7 +3352,7 @@ kern_return_t waitq_init(struct waitq *waitq, int policy) if (waitq_is_turnstile_queue(waitq)) { /* For turnstile, initialize it as a priority queue */ priority_queue_init(&waitq->waitq_prio_queue, - PRIORITY_QUEUE_BUILTIN_MAX_HEAP); + PRIORITY_QUEUE_BUILTIN_MAX_HEAP); assert(waitq->waitq_fifo == 0); } else { queue_init(&waitq->waitq_queue); @@ -3198,7 +3368,7 @@ struct wq_unlink_ctx { }; static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx, - struct wq_prepost *wqp, struct waitq *waitq); + struct wq_prepost *wqp, struct waitq *waitq); /** * walk_waitq_links callback to invalidate 'link' parameter @@ -3209,13 +3379,15 @@ static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx, * the 'waitq' parameter, specifically it does not have to be locked or * even valid. */ -static int waitq_unlink_all_cb(struct waitq *waitq, void *ctx, - struct waitq_link *link) +static int +waitq_unlink_all_cb(struct waitq *waitq, void *ctx, + struct waitq_link *link) { (void)waitq; (void)ctx; - if (wql_type(link) == WQL_LINK && wql_is_valid(link)) + if (wql_type(link) == WQL_LINK && wql_is_valid(link)) { wql_invalidate(link); + } if (wql_type(link) == WQL_WQS) { struct waitq_set *wqset; @@ -3227,8 +3399,9 @@ static int waitq_unlink_all_cb(struct waitq *waitq, void *ctx, * on the IPC send path which would otherwise have to iterate * over lots of dead port preposts. */ - if (waitq->waitq_prepost_id == 0) + if (waitq->waitq_prepost_id == 0) { goto out; + } wqset = link->wql_wqs.wql_set; assert(wqset != NULL); @@ -3240,13 +3413,14 @@ static int waitq_unlink_all_cb(struct waitq *waitq, void *ctx, /* someone raced us to teardown */ goto out_unlock; } - if (!waitq_set_maybe_preposted(wqset)) + if (!waitq_set_maybe_preposted(wqset)) { goto out_unlock; + } ulctx.unlink_wq = waitq; ulctx.unlink_wqset = wqset; (void)wq_prepost_iterate(wqset->wqset_prepost_id, &ulctx, - waitq_unlink_prepost_cb); + waitq_unlink_prepost_cb); out_unlock: waitq_set_unlock(wqset); } @@ -3259,20 +3433,24 @@ out: /** * cleanup any link/prepost table resources associated with a waitq */ -void waitq_deinit(struct waitq *waitq) +void +waitq_deinit(struct waitq *waitq) { spl_t s; - if (!waitq || !waitq_is_queue(waitq)) + if (!waitq || !waitq_is_queue(waitq)) { return; + } - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { s = splsched(); + } waitq_lock(waitq); if (!waitq_valid(waitq)) { waitq_unlock(waitq); - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { splx(s); + } return; } @@ -3289,7 +3467,8 @@ void waitq_deinit(struct waitq *waitq) assert(waitq_empty(waitq)); } -void waitq_invalidate_locked(struct waitq *waitq) +void +waitq_invalidate_locked(struct waitq *waitq) { assert(waitq_held(waitq)); assert(waitq_is_valid(waitq)); @@ -3302,13 +3481,15 @@ void waitq_invalidate_locked(struct waitq *waitq) * Conditions: * Called from wq_prepost_iterate (_not_ from wq_prepost_foreach_locked!) */ -static int wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset, - void __unused *ctx, - struct wq_prepost *wqp, - struct waitq __unused *waitq) +static int +wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset, + void __unused *ctx, + struct wq_prepost *wqp, + struct waitq __unused *waitq) { - if (wqp_type(wqp) == WQP_POST) + if (wqp_type(wqp) == WQP_POST) { wq_prepost_invalidate(wqp); + } return WQ_ITERATE_CONTINUE; } @@ -3326,13 +3507,15 @@ static int wqset_clear_prepost_chain_cb(struct waitq_set __unused *wqset, * * NULL on failure */ -struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook) +struct waitq_set * +waitq_set_alloc(int policy, void *prepost_hook) { struct waitq_set *wqset; wqset = (struct waitq_set *)zalloc(waitq_set_zone); - if (!wqset) + if (!wqset) { panic("Can't allocate a new waitq set from zone %p", waitq_set_zone); + } kern_return_t ret; ret = waitq_set_init(wqset, policy, NULL, prepost_hook); @@ -3351,9 +3534,10 @@ struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook) * the waitq_link will be lazily allocated * on demand through waitq_set_lazy_init_link. */ -kern_return_t waitq_set_init(struct waitq_set *wqset, - int policy, uint64_t *reserved_link, - void *prepost_hook) +kern_return_t +waitq_set_init(struct waitq_set *wqset, + int policy, uint64_t *reserved_link, + void *prepost_hook) { struct waitq_link *link; kern_return_t ret; @@ -3361,8 +3545,9 @@ kern_return_t waitq_set_init(struct waitq_set *wqset, memset(wqset, 0, sizeof(*wqset)); ret = waitq_init(&wqset->wqset_q, policy); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { return ret; + } wqset->wqset_q.waitq_type = WQT_SET; if (policy & SYNC_POLICY_PREPOST) { @@ -3377,8 +3562,9 @@ kern_return_t waitq_set_init(struct waitq_set *wqset, if (reserved_link && *reserved_link != 0) { link = wql_get_reserved(*reserved_link, WQL_WQS); - if (!link) + if (!link) { panic("Can't allocate link object for waitq set: %p", wqset); + } /* always consume the caller's reference */ *reserved_link = 0; @@ -3388,7 +3574,6 @@ kern_return_t waitq_set_init(struct waitq_set *wqset, wqset->wqset_id = link->wql_setid.id; wql_put_link(link); - } else { /* * Lazy allocate the link only when an actual id is needed. @@ -3425,7 +3610,7 @@ waitq_set_lazy_init_link(struct waitq_set *wqset) assert(get_preemption_level() == 0 && waitq_wait_possible(current_thread())); waitq_set_lock(wqset); - if (!waitq_set_should_lazy_init_link(wqset)){ + if (!waitq_set_should_lazy_init_link(wqset)) { waitq_set_unlock(wqset); return; } @@ -3434,8 +3619,9 @@ waitq_set_lazy_init_link(struct waitq_set *wqset) waitq_set_unlock(wqset); link = wql_alloc_link(WQL_WQS); - if (!link) + if (!link) { panic("Can't allocate link object for waitq set: %p", wqset); + } link->wql_wqs.wql_set = wqset; @@ -3477,13 +3663,15 @@ waitq_set_should_lazy_init_link(struct waitq_set *wqset) * This will render the waitq set invalid, and it must * be re-initialized with waitq_set_init before it can be used again */ -void waitq_set_deinit(struct waitq_set *wqset) +void +waitq_set_deinit(struct waitq_set *wqset) { struct waitq_link *link = NULL; uint64_t set_id, prepost_id; - if (!waitqs_is_set(wqset)) + if (!waitqs_is_set(wqset)) { panic("trying to de-initialize an invalid wqset @%p", wqset); + } assert(!waitq_irq_safe(&wqset->wqset_q)); @@ -3492,7 +3680,6 @@ void waitq_set_deinit(struct waitq_set *wqset) set_id = wqset->wqset_id; if (waitqs_is_linked(wqset) || set_id == 0) { - /* grab the set's link object */ link = wql_get_link(set_id); if (link) { @@ -3547,16 +3734,18 @@ void waitq_set_deinit(struct waitq_set *wqset) * this function. That way we ensure that the waitq set memory will * remain valid even though it's been cleared out. */ - while (wql_refcnt(link) > 1) + while (wql_refcnt(link) > 1) { delay(1); + } wql_put_link(link); } /* drop / unlink all the prepost table objects */ /* JMM - can this happen before the delay? */ - if (prepost_id) + if (prepost_id) { (void)wq_prepost_iterate(prepost_id, NULL, - wqset_clear_prepost_chain_cb); + wqset_clear_prepost_chain_cb); + } } /** @@ -3565,7 +3754,8 @@ void waitq_set_deinit(struct waitq_set *wqset) * Conditions: * may block */ -kern_return_t waitq_set_free(struct waitq_set *wqset) +kern_return_t +waitq_set_free(struct waitq_set *wqset) { waitq_set_deinit(wqset); @@ -3580,10 +3770,12 @@ kern_return_t waitq_set_free(struct waitq_set *wqset) /** * return the set ID of 'wqset' */ -uint64_t wqset_id(struct waitq_set *wqset) +uint64_t +wqset_id(struct waitq_set *wqset) { - if (!wqset) + if (!wqset) { return 0; + } assert(waitqs_is_set(wqset)); @@ -3597,10 +3789,12 @@ uint64_t wqset_id(struct waitq_set *wqset) /** * returns a pointer to the waitq object embedded in 'wqset' */ -struct waitq *wqset_waitq(struct waitq_set *wqset) +struct waitq * +wqset_waitq(struct waitq_set *wqset) { - if (!wqset) + if (!wqset) { return NULL; + } assert(waitqs_is_set(wqset)); @@ -3623,25 +3817,26 @@ struct waitq *wqset_waitq(struct waitq_set *wqset) * The return value of the function indicates whether or not this * happened: 1 == lock was dropped, 0 == lock held */ -int waitq_clear_prepost_locked(struct waitq *waitq) +int +waitq_clear_prepost_locked(struct waitq *waitq) { struct wq_prepost *wqp; int dropped_lock = 0; assert(!waitq_irq_safe(waitq)); - if (waitq->waitq_prepost_id == 0) + if (waitq->waitq_prepost_id == 0) { return 0; + } wqp = wq_prepost_get(waitq->waitq_prepost_id); waitq->waitq_prepost_id = 0; if (wqp) { uint64_t wqp_id = wqp->wqp_prepostid.id; wqdbg_v("invalidate prepost 0x%llx (refcnt:%d)", - wqp->wqp_prepostid.id, wqp_refcnt(wqp)); + wqp->wqp_prepostid.id, wqp_refcnt(wqp)); wq_prepost_invalidate(wqp); while (wqp_refcnt(wqp) > 1) { - /* * Some other thread must have raced us to grab a link * object reference before we invalidated it. This @@ -3670,8 +3865,9 @@ int waitq_clear_prepost_locked(struct waitq *waitq) enable_preemption(); } - if (wqp_refcnt(wqp) > 0 && wqp->wqp_prepostid.id == wqp_id) + if (wqp_refcnt(wqp) > 0 && wqp->wqp_prepostid.id == wqp_id) { wq_prepost_put(wqp); + } } return dropped_lock; @@ -3684,7 +3880,8 @@ int waitq_clear_prepost_locked(struct waitq *waitq) * 'waitq' is not locked * may disable and re-enable interrupts */ -void waitq_clear_prepost(struct waitq *waitq) +void +waitq_clear_prepost(struct waitq *waitq) { assert(waitq_valid(waitq)); assert(!waitq_irq_safe(waitq)); @@ -3701,20 +3898,23 @@ void waitq_clear_prepost(struct waitq *waitq) * Conditions: * 'waitq' is unlocked */ -uint64_t waitq_get_prepost_id(struct waitq *waitq) +uint64_t +waitq_get_prepost_id(struct waitq *waitq) { struct wq_prepost *wqp; uint64_t wqp_id = 0; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { return 0; - + } + assert(!waitq_irq_safe(waitq)); waitq_lock(waitq); - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { goto out_unlock; + } if (waitq->waitq_prepost_id) { wqp_id = waitq->waitq_prepost_id; @@ -3725,8 +3925,9 @@ uint64_t waitq_get_prepost_id(struct waitq *waitq) waitq_unlock(waitq); wqp = wq_prepost_alloc(WQP_WQ, 1); - if (!wqp) + if (!wqp) { return 0; + } /* re-acquire the waitq lock */ waitq_lock(waitq); @@ -3759,7 +3960,8 @@ out_unlock: } -static int waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *link) +static int +waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *link) { uint64_t setid = *(uint64_t *)ctx; int wqltype = wql_type(link); @@ -3775,8 +3977,9 @@ static int waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *lin */ wqdbg_v(" waitq already in set 0x%llx (WQL_LINK)", setid); if (link->wql_link.left_setid == setid || - link->wql_link.right_setid == setid) + link->wql_link.right_setid == setid) { return WQ_ITERATE_FOUND; + } } return WQ_ITERATE_CONTINUE; @@ -3789,22 +3992,26 @@ static int waitq_inset_cb(struct waitq *waitq, void *ctx, struct waitq_link *lin * neither 'waitq' nor 'wqset' is not locked * may disable and re-enable interrupts while locking 'waitq' */ -boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset) +boolean_t +waitq_member(struct waitq *waitq, struct waitq_set *wqset) { kern_return_t kr = WQ_ITERATE_SUCCESS; uint64_t setid; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } assert(!waitq_irq_safe(waitq)); - if (!waitqs_is_set(wqset)) + if (!waitqs_is_set(wqset)) { return FALSE; + } waitq_lock(waitq); - if (!waitqs_is_linked(wqset)) - goto out_unlock; + if (!waitqs_is_linked(wqset)) { + goto out_unlock; + } setid = wqset->wqset_id; @@ -3816,28 +4023,31 @@ boolean_t waitq_member(struct waitq *waitq, struct waitq_set *wqset) /* walk the link table and look for the Set ID of wqset */ kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id, - WQL_ALL, (void *)&setid, waitq_inset_cb); + WQL_ALL, (void *)&setid, waitq_inset_cb); out_unlock: waitq_unlock(waitq); - return (kr == WQ_ITERATE_FOUND); + return kr == WQ_ITERATE_FOUND; } /** * Returns true is the given waitq is a member of at least 1 set */ -boolean_t waitq_in_set(struct waitq *waitq) +boolean_t +waitq_in_set(struct waitq *waitq) { struct waitq_link *link; boolean_t inset = FALSE; - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { return FALSE; + } waitq_lock(waitq); - if (!waitq->waitq_set_id) + if (!waitq->waitq_set_id) { goto out_unlock; + } link = wql_get_link(waitq->waitq_set_id); if (link) { @@ -3862,7 +4072,8 @@ out_unlock: * 'waitq' is not locked * may (rarely) block if link table needs to grow */ -uint64_t waitq_link_reserve(struct waitq *waitq) +uint64_t +waitq_link_reserve(struct waitq *waitq) { struct waitq_link *link; uint64_t reserved_id = 0; @@ -3877,8 +4088,9 @@ uint64_t waitq_link_reserve(struct waitq *waitq) (void)waitq; link = wql_alloc_link(LT_RESERVED); - if (!link) + if (!link) { return 0; + } reserved_id = link->wql_setid.id; @@ -3888,16 +4100,19 @@ uint64_t waitq_link_reserve(struct waitq *waitq) /** * release a pre-allocated waitq link structure */ -void waitq_link_release(uint64_t id) +void +waitq_link_release(uint64_t id) { struct waitq_link *link; - if (id == 0) + if (id == 0) { return; + } link = wql_get_reserved(id, WQL_LINK); - if (!link) + if (!link) { return; + } /* * if we successfully got a link object, then we know @@ -3917,8 +4132,9 @@ void waitq_link_release(uint64_t id) * 'waitq' is locked * caller should have a reference to the 'link' object */ -static kern_return_t waitq_link_internal(struct waitq *waitq, - uint64_t setid, struct waitq_link *link) +static kern_return_t +waitq_link_internal(struct waitq *waitq, + uint64_t setid, struct waitq_link *link) { struct waitq_link *qlink; kern_return_t kr; @@ -3954,9 +4170,10 @@ static kern_return_t waitq_link_internal(struct waitq *waitq, * TODO: check for cycles! */ kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id, - WQL_ALL, (void *)&setid, waitq_inset_cb); - if (kr == WQ_ITERATE_FOUND) + WQL_ALL, (void *)&setid, waitq_inset_cb); + if (kr == WQ_ITERATE_FOUND) { return KERN_ALREADY_IN_SET; + } /* * This wait queue is a member of at least one set already, @@ -3991,22 +4208,25 @@ static kern_return_t waitq_link_internal(struct waitq *waitq, * Notes: * The caller can guarantee that this function will never block by * - pre-allocating a link table object and passing its ID in 'reserved_link' - * - and pre-allocating the waitq set link calling waitq_set_lazy_init_link. - * It is not possible to provide a reserved_link without having also linked + * - and pre-allocating the waitq set link calling waitq_set_lazy_init_link. + * It is not possible to provide a reserved_link without having also linked * the wqset. */ -kern_return_t waitq_link(struct waitq *waitq, struct waitq_set *wqset, - waitq_lock_state_t lock_state, uint64_t *reserved_link) +kern_return_t +waitq_link(struct waitq *waitq, struct waitq_set *wqset, + waitq_lock_state_t lock_state, uint64_t *reserved_link) { kern_return_t kr; struct waitq_link *link; int should_lock = (lock_state == WAITQ_SHOULD_LOCK); - if (!waitq_valid(waitq) || waitq_irq_safe(waitq)) + if (!waitq_valid(waitq) || waitq_irq_safe(waitq)) { panic("Invalid waitq: %p", waitq); + } - if (!waitqs_is_set(wqset)) + if (!waitqs_is_set(wqset)) { return KERN_INVALID_ARGUMENT; + } if (!reserved_link || *reserved_link == 0) { if (!waitqs_is_linked(wqset)) { @@ -4015,7 +4235,7 @@ kern_return_t waitq_link(struct waitq *waitq, struct waitq_set *wqset, } wqdbg_v("Link waitq %p to wqset 0x%llx", - (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id); + (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id); /* * We _might_ need a new link object here, so we'll grab outside @@ -4033,8 +4253,9 @@ kern_return_t waitq_link(struct waitq *waitq, struct waitq_set *wqset, } else { link = wql_alloc_link(WQL_LINK); } - if (!link) + if (!link) { return KERN_NO_SPACE; + } if (should_lock) { waitq_lock(waitq); @@ -4064,11 +4285,12 @@ kern_return_t waitq_link(struct waitq *waitq, struct waitq_set *wqset, * unused or unnecessary links. See comments below for different * scenarios. */ -static inline int waitq_maybe_remove_link(struct waitq *waitq, - uint64_t setid, - struct waitq_link *parent, - struct waitq_link *left, - struct waitq_link *right) +static inline int +waitq_maybe_remove_link(struct waitq *waitq, + uint64_t setid, + struct waitq_link *parent, + struct waitq_link *left, + struct waitq_link *right) { uint64_t *wq_setid = &waitq->waitq_set_id; @@ -4239,17 +4461,19 @@ static inline int waitq_maybe_remove_link(struct waitq *waitq, * uses waitq_maybe_remove_link() to compress the linktable and * perform the actual unlinking */ -static int waitq_unlink_cb(struct waitq *waitq, void *ctx, - struct waitq_link *link) +static int +waitq_unlink_cb(struct waitq *waitq, void *ctx, + struct waitq_link *link) { uint64_t setid = *((uint64_t *)ctx); struct waitq_link *right, *left; int ret = 0; - if (wql_type(link) != WQL_LINK) + if (wql_type(link) != WQL_LINK) { return WQ_ITERATE_CONTINUE; + } - do { + do { left = wql_get_link(link->wql_link.left_setid); right = wql_get_link(link->wql_link.right_setid); @@ -4258,8 +4482,9 @@ static int waitq_unlink_cb(struct waitq *waitq, void *ctx, wql_put_link(left); wql_put_link(right); - if (!wql_is_valid(link)) + if (!wql_is_valid(link)) { return WQ_ITERATE_INVALID; + } /* A ret value of UNLINKED will break us out of table walk */ } while (ret == WQ_ITERATE_INVALID); @@ -4275,13 +4500,15 @@ static int waitq_unlink_cb(struct waitq *waitq, void *ctx, * 'wqset' may be NULL * (ctx)->unlink_wqset is locked */ -static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx, - struct wq_prepost *wqp, struct waitq *waitq) +static int +waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx, + struct wq_prepost *wqp, struct waitq *waitq) { struct wq_unlink_ctx *ulctx = (struct wq_unlink_ctx *)ctx; - if (waitq != ulctx->unlink_wq) + if (waitq != ulctx->unlink_wq) { return WQ_ITERATE_CONTINUE; + } if (wqp_type(wqp) == WQP_WQ && wqp->wqp_prepostid.id == ulctx->unlink_wqset->wqset_prepost_id) { @@ -4313,8 +4540,9 @@ static int waitq_unlink_prepost_cb(struct waitq_set __unused *wqset, void *ctx, * may (rarely) spin in prepost clear and drop/re-acquire 'waitq' lock * (see waitq_clear_prepost_locked) */ -static kern_return_t waitq_unlink_locked(struct waitq *waitq, - struct waitq_set *wqset) +static kern_return_t +waitq_unlink_locked(struct waitq *waitq, + struct waitq_set *wqset) { uint64_t setid; kern_return_t kr; @@ -4328,8 +4556,9 @@ static kern_return_t waitq_unlink_locked(struct waitq *waitq, * This is an artifact of not cleaning up after kqueues when * they prepost into select sets... */ - if (waitq->waitq_prepost_id != 0) + if (waitq->waitq_prepost_id != 0) { (void)waitq_clear_prepost_locked(waitq); + } return KERN_NOT_IN_SET; } @@ -4371,7 +4600,7 @@ static kern_return_t waitq_unlink_locked(struct waitq *waitq, * from set B. */ kr = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id, - WQL_LINK, (void *)&setid, waitq_unlink_cb); + WQL_LINK, (void *)&setid, waitq_unlink_cb); if (kr == WQ_ITERATE_UNLINKED) { struct wq_unlink_ctx ulctx; @@ -4379,8 +4608,9 @@ static kern_return_t waitq_unlink_locked(struct waitq *waitq, kr = KERN_SUCCESS; /* found it and dis-associated it */ /* don't look for preposts if it's not prepost-enabled */ - if (!wqset->wqset_q.waitq_prepost) + if (!wqset->wqset_q.waitq_prepost) { goto out; + } assert(!waitq_irq_safe(&wqset->wqset_q)); @@ -4393,7 +4623,7 @@ static kern_return_t waitq_unlink_locked(struct waitq *waitq, ulctx.unlink_wq = waitq; ulctx.unlink_wqset = wqset; (void)wq_prepost_iterate(wqset->wqset_prepost_id, (void *)&ulctx, - waitq_unlink_prepost_cb); + waitq_unlink_prepost_cb); waitq_set_unlock(wqset); } else { kr = KERN_NOT_IN_SET; /* waitq is _not_ associated with wqset */ @@ -4412,7 +4642,8 @@ out: * may (rarely) spin in prepost clear * (see waitq_clear_prepost_locked) */ -kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset) +kern_return_t +waitq_unlink(struct waitq *waitq, struct waitq_set *wqset) { kern_return_t kr = KERN_SUCCESS; @@ -4422,11 +4653,12 @@ kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset) * we allow the waitq to be invalid because the caller may be trying * to clear out old/dirty state */ - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { return KERN_INVALID_ARGUMENT; + } wqdbg_v("unlink waitq %p from set 0x%llx", - (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id); + (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), wqset->wqset_id); assert(!waitq_irq_safe(waitq)); @@ -4445,7 +4677,8 @@ kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset) * 'wqset' is unlocked * wqp_id may be valid or invalid */ -void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset) +void +waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset) { struct wq_prepost *wqp; @@ -4495,7 +4728,8 @@ void waitq_unlink_by_prepost_id(uint64_t wqp_id, struct waitq_set *wqset) * a locked waitq if wqp_id was valid * NULL on failure */ -struct waitq *waitq_lock_by_prepost_id(uint64_t wqp_id) +struct waitq * +waitq_lock_by_prepost_id(uint64_t wqp_id) { struct waitq *wq = NULL; struct wq_prepost *wqp; @@ -4532,11 +4766,12 @@ struct waitq *waitq_lock_by_prepost_id(uint64_t wqp_id) * Notes: * may (rarely) spin (see waitq_clear_prepost_locked) */ -kern_return_t waitq_unlink_all_unlock(struct waitq *waitq) +kern_return_t +waitq_unlink_all_unlock(struct waitq *waitq) { uint64_t old_set_id = 0; wqdbg_v("unlink waitq %p from all sets", - (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq)); + (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq)); assert(!waitq_irq_safe(waitq)); /* it's not a member of any sets */ @@ -4565,7 +4800,7 @@ kern_return_t waitq_unlink_all_unlock(struct waitq *waitq) * because WQL_LINK objects are private to each wait queue */ (void)walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, old_set_id, - WQL_LINK, NULL, waitq_unlink_all_cb); + WQL_LINK, NULL, waitq_unlink_all_cb); } return KERN_SUCCESS; @@ -4580,12 +4815,14 @@ kern_return_t waitq_unlink_all_unlock(struct waitq *waitq) * may (rarely) spin * (see waitq_unlink_all_locked, waitq_clear_prepost_locked) */ -kern_return_t waitq_unlink_all(struct waitq *waitq) +kern_return_t +waitq_unlink_all(struct waitq *waitq) { kern_return_t kr = KERN_SUCCESS; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } assert(!waitq_irq_safe(waitq)); waitq_lock(waitq); @@ -4611,7 +4848,8 @@ kern_return_t waitq_unlink_all(struct waitq *waitq) * Note: * may (rarely) spin/block (see waitq_clear_prepost_locked) */ -kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset) +kern_return_t +waitq_set_unlink_all_unlock(struct waitq_set *wqset) { struct waitq_link *link; uint64_t prepost_id; @@ -4623,8 +4861,7 @@ kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset) * constituent wait queues. All we have to do is invalidate the SetID */ - if (waitqs_is_linked(wqset)){ - + if (waitqs_is_linked(wqset)) { /* invalidate and re-alloc the link object first */ link = wql_get_link(wqset->wqset_id); @@ -4647,8 +4884,9 @@ kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset) /* clear any preposts attached to this set */ prepost_id = 0; - if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) + if (wqset->wqset_q.waitq_prepost && wqset->wqset_prepost_id) { prepost_id = wqset->wqset_prepost_id; + } /* else { TODO: notify kqueue subsystem? } */ wqset->wqset_prepost_id = 0; @@ -4664,9 +4902,10 @@ kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset) /* wqset->wqset_q unlocked */ /* drop / unlink all the prepost table objects */ - if (prepost_id) + if (prepost_id) { (void)wq_prepost_iterate(prepost_id, NULL, - wqset_clear_prepost_chain_cb); + wqset_clear_prepost_chain_cb); + } return KERN_SUCCESS; } @@ -4678,7 +4917,8 @@ kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset) * 'wqset' is not locked * may (rarely) spin/block (see waitq_clear_prepost_locked) */ -kern_return_t waitq_set_unlink_all(struct waitq_set *wqset) +kern_return_t +waitq_set_unlink_all(struct waitq_set *wqset) { assert(waitqs_is_set(wqset)); assert(!waitq_irq_safe(&wqset->wqset_q)); @@ -4688,8 +4928,9 @@ kern_return_t waitq_set_unlink_all(struct waitq_set *wqset) /* wqset unlocked and set links and preposts deallocated */ } -static int waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx, - struct waitq_link *link) +static int +waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx, + struct waitq_link *link) { uint32_t *num = (uint32_t *)ctx; (void)waitq; @@ -4704,14 +4945,16 @@ static int waitq_prepost_reserve_cb(struct waitq *waitq, void *ctx, * check to see if the associated waitq actually supports * preposting */ - if (waitq_set_can_prepost(link->wql_wqs.wql_set)) + if (waitq_set_can_prepost(link->wql_wqs.wql_set)) { *num += 2; + } } return WQ_ITERATE_CONTINUE; } -static int waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq, - int *did_unlock, struct wq_prepost **wqp) +static int +waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq, + int *did_unlock, struct wq_prepost **wqp) { struct wq_prepost *tmp; struct wqp_cache *cache; @@ -4727,8 +4970,9 @@ static int waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq, if (waitq) { disable_preemption(); cache = &PROCESSOR_DATA(current_processor(), wqp_cache); - if (nalloc <= (int)cache->avail) + if (nalloc <= (int)cache->avail) { goto do_alloc; + } enable_preemption(); /* unlock the waitq to perform the allocation */ @@ -4738,9 +4982,10 @@ static int waitq_alloc_prepost_reservation(int nalloc, struct waitq *waitq, do_alloc: tmp = wq_prepost_alloc(LT_RESERVED, nalloc); - if (!tmp) + if (!tmp) { panic("Couldn't reserve %d preposts for waitq @%p (wqp@%p)", - nalloc, waitq, *wqp); + nalloc, waitq, *wqp); + } if (*wqp) { /* link the two lists */ int __assert_only rc; @@ -4755,8 +5000,9 @@ do_alloc: * objects for callers such as selwakeup() that can be called with * spin locks held. */ - if (get_preemption_level() == 0) + if (get_preemption_level() == 0) { wq_prepost_ensure_free_space(); + } if (waitq) { if (*did_unlock == 0) { @@ -4771,7 +5017,8 @@ do_alloc: return nalloc; } -static int waitq_count_prepost_reservation(struct waitq *waitq, int extra, int keep_locked) +static int +waitq_count_prepost_reservation(struct waitq *waitq, int extra, int keep_locked) { int npreposts = 0; @@ -4785,8 +5032,9 @@ static int waitq_count_prepost_reservation(struct waitq *waitq, int extra, int k npreposts = 3; } else { /* this queue has never been preposted before */ - if (waitq->waitq_prepost_id == 0) + if (waitq->waitq_prepost_id == 0) { npreposts = 3; + } /* * Walk the set of table linkages associated with this waitq @@ -4802,13 +5050,14 @@ static int waitq_count_prepost_reservation(struct waitq *waitq, int extra, int k * contention on any sets to which this waitq belongs. */ (void)walk_waitq_links(LINK_WALK_FULL_DAG_UNLOCKED, - waitq, waitq->waitq_set_id, - WQL_WQS, (void *)&npreposts, - waitq_prepost_reserve_cb); + waitq, waitq->waitq_set_id, + WQL_WQS, (void *)&npreposts, + waitq_prepost_reserve_cb); } - if (extra > 0) + if (extra > 0) { npreposts += extra; + } if (npreposts == 0 && !keep_locked) { /* @@ -4849,8 +5098,9 @@ static int waitq_count_prepost_reservation(struct waitq *waitq, int extra, int k * is guaranteed to have enough pre-allocated prepost object to avoid * any (rare) blocking in the wakeup path. */ -uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, - waitq_lock_state_t lock_state) +uint64_t +waitq_prepost_reserve(struct waitq *waitq, int extra, + waitq_lock_state_t lock_state) { uint64_t reserved = 0; uint64_t prev_setid = 0, prev_prepostid = 0; @@ -4860,7 +5110,7 @@ uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, int unlocked = 0; wqdbg_v("Attempting to reserve prepost linkages for waitq %p (extra:%d)", - (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), extra); + (void *)VM_KERNEL_UNSLIDE_OR_PERM(waitq), extra); if (waitq == NULL && extra > 0) { /* @@ -4870,7 +5120,7 @@ uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, * to the number of preposts requested by the caller */ nalloc = waitq_alloc_prepost_reservation(extra + 2, NULL, - &unlocked, &wqp); + &unlocked, &wqp); assert(nalloc == extra + 2); return wqp->wqp_prepostid.id; } @@ -4890,27 +5140,30 @@ uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, * keep the set locked, then we don't have to reserve * anything! */ - if (prev_setid == 0 && keep_locked) + if (prev_setid == 0 && keep_locked) { goto out; + } npreposts = waitq_count_prepost_reservation(waitq, extra, keep_locked); /* nothing for us to do! */ if (npreposts == 0) { - if (keep_locked) + if (keep_locked) { goto out; + } goto out_unlock; } try_alloc: /* this _may_ unlock and relock the waitq! */ nalloc = waitq_alloc_prepost_reservation(npreposts, waitq, - &unlocked, &wqp); + &unlocked, &wqp); if (!unlocked) { /* allocation held the waitq lock: we'd done! */ - if (keep_locked) + if (keep_locked) { goto out; + } goto out_unlock; } @@ -4930,9 +5183,10 @@ try_alloc: */ if ((waitq->waitq_set_id == 0) || (waitq->waitq_set_id == prev_setid && - waitq->waitq_prepost_id == prev_prepostid)) { - if (keep_locked) + waitq->waitq_prepost_id == prev_prepostid)) { + if (keep_locked) { goto out; + } goto out_unlock; } @@ -4945,14 +5199,16 @@ try_alloc: goto try_alloc; } - if (keep_locked) + if (keep_locked) { goto out; + } out_unlock: waitq_unlock(waitq); out: - if (wqp) + if (wqp) { reserved = wqp->wqp_prepostid.id; + } return reserved; } @@ -4963,15 +5219,17 @@ out: * Conditions: * may (rarely) spin waiting for prepost table growth memcpy */ -void waitq_prepost_release_reserve(uint64_t id) +void +waitq_prepost_release_reserve(uint64_t id) { struct wq_prepost *wqp; wqdbg_v("releasing reserved preposts starting at: 0x%llx", id); wqp = wq_prepost_rfirst(id); - if (!wqp) + if (!wqp) { return; + } wq_prepost_release_rlist(wqp); } @@ -4983,32 +5241,37 @@ void waitq_prepost_release_reserve(uint64_t id) * Conditions: * 'wqset' is not locked */ -void waitq_set_clear_preposts(struct waitq_set *wqset) +void +waitq_set_clear_preposts(struct waitq_set *wqset) { uint64_t prepost_id; spl_t spl; assert(waitqs_is_set(wqset)); - if (!wqset->wqset_q.waitq_prepost || !wqset->wqset_prepost_id) + if (!wqset->wqset_q.waitq_prepost || !wqset->wqset_prepost_id) { return; + } wqdbg_v("Clearing all preposted queues on waitq_set: 0x%llx", - wqset->wqset_id); + wqset->wqset_id); - if (waitq_irq_safe(&wqset->wqset_q)) + if (waitq_irq_safe(&wqset->wqset_q)) { spl = splsched(); + } waitq_set_lock(wqset); prepost_id = wqset->wqset_prepost_id; wqset->wqset_prepost_id = 0; waitq_set_unlock(wqset); - if (waitq_irq_safe(&wqset->wqset_q)) + if (waitq_irq_safe(&wqset->wqset_q)) { splx(spl); + } /* drop / unlink all the prepost table objects */ - if (prepost_id) + if (prepost_id) { (void)wq_prepost_iterate(prepost_id, NULL, - wqset_clear_prepost_chain_cb); + wqset_clear_prepost_chain_cb); + } } @@ -5024,8 +5287,9 @@ struct wq_it_ctx { waitq_iterator_t it; }; -static int waitq_iterate_sets_cb(struct waitq *waitq, void *ctx, - struct waitq_link *link) +static int +waitq_iterate_sets_cb(struct waitq *waitq, void *ctx, + struct waitq_link *link) { struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx); struct waitq_set *wqset; @@ -5057,8 +5321,9 @@ static int waitq_iterate_sets_cb(struct waitq *waitq, void *ctx, * Called from wq_prepost_foreach_locked * (wqset locked, waitq _not_ locked) */ -static int wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx, - struct wq_prepost *wqp, struct waitq *waitq) +static int +wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx, + struct wq_prepost *wqp, struct waitq *waitq) { struct wq_it_ctx *wctx = (struct wq_it_ctx *)(ctx); uint64_t wqp_id; @@ -5078,11 +5343,13 @@ static int wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx, */ assert(!waitq_irq_safe(waitq)); - if (waitq_lock_try(waitq)) + if (waitq_lock_try(waitq)) { goto call_iterator; + } - if (!wqp_is_valid(wqp)) + if (!wqp_is_valid(wqp)) { return WQ_ITERATE_RESTART; + } /* We are passed a prepost object with a reference on it. If neither * the waitq set nor the waitq require interrupts disabled, then we @@ -5094,13 +5361,14 @@ static int wqset_iterate_prepost_cb(struct waitq_set *wqset, void *ctx, wq_prepost_put(wqp); waitq_set_unlock(wqset); wqdbg_v("dropped set:%p lock waiting for wqp:%p (0x%llx -> wq:%p)", - wqset, wqp, wqp->wqp_prepostid.id, waitq); + wqset, wqp, wqp->wqp_prepostid.id, waitq); delay(1); waitq_set_lock(wqset); wqp = wq_prepost_get(wqp_id); - if (!wqp) + if (!wqp) { /* someone cleared preposts while we slept! */ return WQ_ITERATE_DROPPED; + } /* * TODO: @@ -5136,9 +5404,10 @@ out: * iterator over all sets to which the given waitq has been linked * * Conditions: - * 'waitq' is locked + * 'waitq' is locked */ -int waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it) +int +waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it) { int ret; struct wq_it_ctx wctx = { @@ -5146,13 +5415,15 @@ int waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it) .ctx = ctx, .it = it, }; - if (!it || !waitq) + if (!it || !waitq) { return KERN_INVALID_ARGUMENT; + } ret = walk_waitq_links(LINK_WALK_ONE_LEVEL, waitq, waitq->waitq_set_id, - WQL_WQS, (void *)&wctx, waitq_iterate_sets_cb); - if (ret == WQ_ITERATE_CONTINUE) + WQL_WQS, (void *)&wctx, waitq_iterate_sets_cb); + if (ret == WQ_ITERATE_CONTINUE) { ret = WQ_ITERATE_SUCCESS; + } return ret; } @@ -5160,23 +5431,25 @@ int waitq_iterate_sets(struct waitq *waitq, void *ctx, waitq_iterator_t it) * iterator over all preposts in the given wqset * * Conditions: - * 'wqset' is locked + * 'wqset' is locked */ -int waitq_set_iterate_preposts(struct waitq_set *wqset, - void *ctx, waitq_iterator_t it) +int +waitq_set_iterate_preposts(struct waitq_set *wqset, + void *ctx, waitq_iterator_t it) { struct wq_it_ctx wctx = { .input = (void *)wqset, .ctx = ctx, .it = it, }; - if (!it || !wqset) + if (!it || !wqset) { return WQ_ITERATE_INVALID; + } assert(waitq_held(&wqset->wqset_q)); return wq_prepost_foreach_locked(wqset, (void *)&wctx, - wqset_iterate_prepost_cb); + wqset_iterate_prepost_cb); } @@ -5193,29 +5466,33 @@ int waitq_set_iterate_preposts(struct waitq_set *wqset, * Conditions: * 'waitq' is not locked */ -wait_result_t waitq_assert_wait64(struct waitq *waitq, - event64_t wait_event, - wait_interrupt_t interruptible, - uint64_t deadline) +wait_result_t +waitq_assert_wait64(struct waitq *waitq, + event64_t wait_event, + wait_interrupt_t interruptible, + uint64_t deadline) { thread_t thread = current_thread(); wait_result_t ret; spl_t s; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { s = splsched(); + } waitq_lock(waitq); ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible, - TIMEOUT_URGENCY_SYS_NORMAL, - deadline, TIMEOUT_NO_LEEWAY, thread); + TIMEOUT_URGENCY_SYS_NORMAL, + deadline, TIMEOUT_NO_LEEWAY, thread); waitq_unlock(waitq); - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { splx(s); + } return ret; } @@ -5227,30 +5504,34 @@ wait_result_t waitq_assert_wait64(struct waitq *waitq, * 'waitq' is not locked * will disable and re-enable interrupts while locking current_thread() */ -wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq, - event64_t wait_event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint64_t deadline, - uint64_t leeway) +wait_result_t +waitq_assert_wait64_leeway(struct waitq *waitq, + event64_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway) { wait_result_t ret; thread_t thread = current_thread(); spl_t s; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { s = splsched(); + } waitq_lock(waitq); ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible, - urgency, deadline, leeway, thread); + urgency, deadline, leeway, thread); waitq_unlock(waitq); - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { splx(s); + } return ret; } @@ -5266,15 +5547,17 @@ wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq, * Notes: * will _not_ block if waitq is global (or not a member of any set) */ -kern_return_t waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event, - wait_result_t result, int priority) +kern_return_t +waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event, + wait_result_t result, int priority) { kern_return_t kr; uint64_t reserved_preposts = 0; spl_t spl; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } if (!waitq_irq_safe(waitq)) { /* reserve preposts in addition to locking the waitq */ @@ -5286,10 +5569,11 @@ kern_return_t waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event, /* waitq is locked upon return */ kr = waitq_wakeup64_one_locked(waitq, wake_event, result, - &reserved_preposts, priority, WAITQ_UNLOCK); + &reserved_preposts, priority, WAITQ_UNLOCK); - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { splx(spl); + } /* release any left-over prepost object (won't block/lock anything) */ waitq_prepost_release_reserve(reserved_preposts); @@ -5308,38 +5592,40 @@ kern_return_t waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event, * Notes: * will _not_ block if waitq is global (or not a member of any set) */ -kern_return_t waitq_wakeup64_all(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - int priority) +kern_return_t +waitq_wakeup64_all(struct waitq *waitq, + event64_t wake_event, + wait_result_t result, + int priority) { kern_return_t ret; uint64_t reserved_preposts = 0; spl_t s; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } if (!waitq_irq_safe(waitq)) { /* reserve preposts in addition to locking waitq */ reserved_preposts = waitq_prepost_reserve(waitq, 0, - WAITQ_KEEP_LOCKED); + WAITQ_KEEP_LOCKED); } else { s = splsched(); waitq_lock(waitq); } ret = waitq_wakeup64_all_locked(waitq, wake_event, result, - &reserved_preposts, priority, - WAITQ_UNLOCK); + &reserved_preposts, priority, + WAITQ_UNLOCK); - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { splx(s); + } waitq_prepost_release_reserve(reserved_preposts); return ret; - } /** @@ -5351,19 +5637,22 @@ kern_return_t waitq_wakeup64_all(struct waitq *waitq, * Notes: * May temporarily disable and re-enable interrupts */ -kern_return_t waitq_wakeup64_thread(struct waitq *waitq, - event64_t wake_event, - thread_t thread, - wait_result_t result) +kern_return_t +waitq_wakeup64_thread(struct waitq *waitq, + event64_t wake_event, + thread_t thread, + wait_result_t result) { kern_return_t ret; spl_t s, th_spl; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { s = splsched(); + } waitq_lock(waitq); ret = waitq_select_thread_locked(waitq, wake_event, thread, &th_spl); @@ -5382,8 +5671,9 @@ kern_return_t waitq_wakeup64_thread(struct waitq *waitq, waitq_stats_count_fail(waitq); } - if (waitq_irq_safe(waitq)) + if (waitq_irq_safe(waitq)) { splx(s); + } return ret; } @@ -5403,17 +5693,18 @@ kern_return_t waitq_wakeup64_thread(struct waitq *waitq, */ thread_t waitq_wakeup64_identify(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - int priority) + event64_t wake_event, + wait_result_t result, + int priority) { uint64_t reserved_preposts = 0; spl_t thread_spl = 0; thread_t thread; spl_t spl; - if (!waitq_valid(waitq)) + if (!waitq_valid(waitq)) { panic("Invalid waitq: %p", waitq); + } if (!waitq_irq_safe(waitq)) { /* reserve preposts in addition to locking waitq */ @@ -5424,8 +5715,8 @@ waitq_wakeup64_identify(struct waitq *waitq, } thread = waitq_wakeup64_identify_locked(waitq, wake_event, result, - &thread_spl, &reserved_preposts, - priority, WAITQ_UNLOCK); + &thread_spl, &reserved_preposts, + priority, WAITQ_UNLOCK); /* waitq is unlocked, thread is locked */ if (thread != THREAD_NULL) { @@ -5433,9 +5724,10 @@ waitq_wakeup64_identify(struct waitq *waitq, thread_unlock(thread); splx(thread_spl); } - - if (waitq_irq_safe(waitq)) - splx(spl); + + if (waitq_irq_safe(waitq)) { + splx(spl); + } /* release any left-over prepost object (won't block/lock anything) */ waitq_prepost_release_reserve(reserved_preposts); @@ -5443,4 +5735,3 @@ waitq_wakeup64_identify(struct waitq *waitq, /* returns +1 ref to running thread or THREAD_NULL */ return thread; } - diff --git a/osfmk/kern/waitq.h b/osfmk/kern/waitq.h index c3fee4a8c..9eb863a7b 100644 --- a/osfmk/kern/waitq.h +++ b/osfmk/kern/waitq.h @@ -27,13 +27,13 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include #include -#include /* for kern_return_t */ +#include /* for kern_return_t */ -#include /* for wait_queue_t */ +#include /* for wait_queue_t */ #include #include @@ -59,30 +59,6 @@ typedef enum e_waitq_lock_state { WAITQ_DONT_LOCK = 0x10, } waitq_lock_state_t; -/* - * The Jenkins "one at a time" hash. - * TBD: There may be some value to unrolling here, - * depending on the architecture. - */ -static __inline__ uint32_t -jenkins_hash(char *key, size_t length) -{ - uint32_t hash = 0; - size_t i; - - for (i = 0; i < length; i++) { - hash += (uint32_t)key[i]; - hash += (hash << 10); - hash ^= (hash >> 6); - } - - hash += (hash << 3); - hash ^= (hash >> 11); - hash += (hash << 15); - - return hash; -} - /* Opaque sizes and alignment used for struct verification */ #if __arm__ || __arm64__ #define WQ_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__ @@ -160,25 +136,25 @@ struct wq_stats { */ struct waitq { uint32_t /* flags */ - waitq_type:2, /* only public field */ - waitq_fifo:1, /* fifo wakeup policy? */ - waitq_prepost:1, /* waitq supports prepost? */ - waitq_irq:1, /* waitq requires interrupts disabled */ - waitq_isvalid:1, /* waitq structure is valid */ - waitq_turnstile_or_port:1, /* waitq is embedded in a turnstile (if irq safe), or port (if not irq safe) */ - waitq_eventmask:_EVENT_MASK_BITS; - /* the wait queue set (set-of-sets) to which this queue belongs */ + waitq_type:2, /* only public field */ + waitq_fifo:1, /* fifo wakeup policy? */ + waitq_prepost:1, /* waitq supports prepost? */ + waitq_irq:1, /* waitq requires interrupts disabled */ + waitq_isvalid:1, /* waitq structure is valid */ + waitq_turnstile_or_port:1, /* waitq is embedded in a turnstile (if irq safe), or port (if not irq safe) */ + waitq_eventmask:_EVENT_MASK_BITS; + /* the wait queue set (set-of-sets) to which this queue belongs */ #if __arm64__ - hw_lock_bit_t waitq_interlock; /* interlock */ + hw_lock_bit_t waitq_interlock; /* interlock */ #else - hw_lock_data_t waitq_interlock; /* interlock */ + hw_lock_data_t waitq_interlock; /* interlock */ #endif /* __arm64__ */ uint64_t waitq_set_id; uint64_t waitq_prepost_id; union { - queue_head_t waitq_queue; /* queue of elements */ - struct priority_queue waitq_prio_queue; /* priority ordered queue of elements */ + queue_head_t waitq_queue; /* queue of elements */ + struct priority_queue waitq_prio_queue; /* priority ordered queue of elements */ }; }; @@ -228,12 +204,13 @@ extern void waitq_bootstrap(void); /* * Invalidate a waitq. The only valid waitq functions to call after this are: - * waitq_deinit() - * waitq_set_deinit() + * waitq_deinit() + * waitq_set_deinit() */ extern void waitq_invalidate_locked(struct waitq *wq); -static inline boolean_t waitq_empty(struct waitq *wq) +static inline boolean_t +waitq_empty(struct waitq *wq) { if (waitq_is_turnstile_queue(wq)) { return priority_queue_empty(&(wq->waitq_prio_queue)); @@ -242,13 +219,15 @@ static inline boolean_t waitq_empty(struct waitq *wq) } } +extern lck_grp_t waitq_lck_grp; + #if __arm64__ #define waitq_held(wq) \ (hw_lock_bit_held(&(wq)->waitq_interlock, LCK_ILOCK)) #define waitq_lock_try(wq) \ - (hw_lock_bit_try(&(wq)->waitq_interlock, LCK_ILOCK)) + (hw_lock_bit_try(&(wq)->waitq_interlock, LCK_ILOCK, &waitq_lck_grp)) #else @@ -256,7 +235,7 @@ static inline boolean_t waitq_empty(struct waitq *wq) (hw_lock_held(&(wq)->waitq_interlock)) #define waitq_lock_try(wq) \ - (hw_lock_try(&(wq)->waitq_interlock)) + (hw_lock_try(&(wq)->waitq_interlock, &waitq_lck_grp)) #endif /* __arm64__ */ @@ -265,61 +244,61 @@ static inline boolean_t waitq_empty(struct waitq *wq) extern void waitq_lock(struct waitq *wq); -#define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q) -#define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q) -#define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q) -#define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \ - (wqs)->wqset_q.waitq_prepost) -#define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \ - (wqs)->wqset_prepost_id > 0) -#define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \ - !((wqs)->wqset_q.waitq_prepost) && \ - (wqs)->wqset_prepost_hook) +#define waitq_set_lock(wqs) waitq_lock(&(wqs)->wqset_q) +#define waitq_set_unlock(wqs) waitq_unlock(&(wqs)->wqset_q) +#define waitq_set_lock_try(wqs) waitq_lock_try(&(wqs)->wqset_q) +#define waitq_set_can_prepost(wqs) (waitqs_is_set(wqs) && \ + (wqs)->wqset_q.waitq_prepost) +#define waitq_set_maybe_preposted(wqs) ((wqs)->wqset_q.waitq_prepost && \ + (wqs)->wqset_prepost_id > 0) +#define waitq_set_has_prepost_hook(wqs) (waitqs_is_set(wqs) && \ + !((wqs)->wqset_q.waitq_prepost) && \ + (wqs)->wqset_prepost_hook) /* assert intent to wait on a locked wait queue */ extern wait_result_t waitq_assert_wait64_locked(struct waitq *waitq, - event64_t wait_event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint64_t deadline, - uint64_t leeway, - thread_t thread); + event64_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway, + thread_t thread); /* pull a thread from its wait queue */ extern int waitq_pull_thread_locked(struct waitq *waitq, thread_t thread); /* wakeup all threads waiting for a particular event on locked queue */ extern kern_return_t waitq_wakeup64_all_locked(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - uint64_t *reserved_preposts, - int priority, - waitq_lock_state_t lock_state); + event64_t wake_event, + wait_result_t result, + uint64_t *reserved_preposts, + int priority, + waitq_lock_state_t lock_state); /* wakeup one thread waiting for a particular event on locked queue */ extern kern_return_t waitq_wakeup64_one_locked(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - uint64_t *reserved_preposts, - int priority, - waitq_lock_state_t lock_state); + event64_t wake_event, + wait_result_t result, + uint64_t *reserved_preposts, + int priority, + waitq_lock_state_t lock_state); /* return identity of a thread awakened for a particular */ extern thread_t waitq_wakeup64_identify_locked(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - spl_t *spl, - uint64_t *reserved_preposts, - int priority, - waitq_lock_state_t lock_state); + event64_t wake_event, + wait_result_t result, + spl_t *spl, + uint64_t *reserved_preposts, + int priority, + waitq_lock_state_t lock_state); /* wakeup thread iff its still waiting for a particular event on locked queue */ extern kern_return_t waitq_wakeup64_thread_locked(struct waitq *waitq, - event64_t wake_event, - thread_t thread, - wait_result_t result, - waitq_lock_state_t lock_state); + event64_t wake_event, + thread_t thread, + wait_result_t result, + waitq_lock_state_t lock_state); /* clear all preposts generated by the given waitq */ extern int waitq_clear_prepost_locked(struct waitq *waitq); @@ -340,7 +319,7 @@ extern kern_return_t waitq_set_unlink_all_unlock(struct waitq_set *wqset); * (given via WAITQ_PROMOTE_PRIORITY in the wakeup function) */ extern void waitq_clear_promotion_locked(struct waitq *waitq, - thread_t thread); + thread_t thread); /* * waitq iteration @@ -363,21 +342,21 @@ enum waitq_iteration_constant { /* callback invoked with both 'waitq' and 'wqset' locked */ typedef int (*waitq_iterator_t)(void *ctx, struct waitq *waitq, - struct waitq_set *wqset); + struct waitq_set *wqset); /* iterate over all sets to which waitq belongs */ extern int waitq_iterate_sets(struct waitq *waitq, void *ctx, - waitq_iterator_t it); + waitq_iterator_t it); /* iterator over all waitqs that have preposted to wqset */ extern int waitq_set_iterate_preposts(struct waitq_set *wqset, - void *ctx, waitq_iterator_t it); + void *ctx, waitq_iterator_t it); /* * prepost reservation */ extern uint64_t waitq_prepost_reserve(struct waitq *waitq, int extra, - waitq_lock_state_t lock_state); + waitq_lock_state_t lock_state); extern void waitq_prepost_release_reserve(uint64_t id); @@ -390,7 +369,7 @@ extern void waitq_prepost_release_reserve(uint64_t id); struct waitq { char opaque[WQ_OPAQUE_SIZE]; } __attribute__((aligned(WQ_OPAQUE_ALIGN))); struct waitq_set { char opaque[WQS_OPAQUE_SIZE]; } __attribute__((aligned(WQS_OPAQUE_ALIGN))); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -415,8 +394,8 @@ extern struct waitq *global_waitq(int index); extern struct waitq_set *waitq_set_alloc(int policy, void *prepost_hook); extern kern_return_t waitq_set_init(struct waitq_set *wqset, - int policy, uint64_t *reserved_link, - void *prepost_hook); + int policy, uint64_t *reserved_link, + void *prepost_hook); extern void waitq_set_deinit(struct waitq_set *wqset); @@ -449,9 +428,9 @@ extern boolean_t waitq_in_set(struct waitq *waitq); /* on success, consumes an reserved_link reference */ extern kern_return_t waitq_link(struct waitq *waitq, - struct waitq_set *wqset, - waitq_lock_state_t lock_state, - uint64_t *reserved_link); + struct waitq_set *wqset, + waitq_lock_state_t lock_state, + uint64_t *reserved_link); extern kern_return_t waitq_unlink(struct waitq *waitq, struct waitq_set *wqset); @@ -521,43 +500,43 @@ extern void waitq_prepost_stats(struct wq_table_stats *stats); /* assert intent to wait on pair */ extern wait_result_t waitq_assert_wait64(struct waitq *waitq, - event64_t wait_event, - wait_interrupt_t interruptible, - uint64_t deadline); + event64_t wait_event, + wait_interrupt_t interruptible, + uint64_t deadline); extern wait_result_t waitq_assert_wait64_leeway(struct waitq *waitq, - event64_t wait_event, - wait_interrupt_t interruptible, - wait_timeout_urgency_t urgency, - uint64_t deadline, - uint64_t leeway); + event64_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway); /* wakeup the most appropriate thread waiting on pair */ extern kern_return_t waitq_wakeup64_one(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - int priority); + event64_t wake_event, + wait_result_t result, + int priority); /* wakeup all the threads waiting on pair */ extern kern_return_t waitq_wakeup64_all(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - int priority); + event64_t wake_event, + wait_result_t result, + int priority); -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* wakeup a specified thread iff it's waiting on pair */ extern kern_return_t waitq_wakeup64_thread(struct waitq *waitq, - event64_t wake_event, - thread_t thread, - wait_result_t result); + event64_t wake_event, + thread_t thread, + wait_result_t result); /* return a reference to the thread that was woken up */ extern thread_t waitq_wakeup64_identify(struct waitq *waitq, - event64_t wake_event, - wait_result_t result, - int priority); + event64_t wake_event, + wait_result_t result, + int priority); /* take the waitq lock */ extern void waitq_unlock(struct waitq *wq); @@ -566,5 +545,5 @@ extern void waitq_unlock(struct waitq *wq); __END_DECLS -#endif /* KERNEL_PRIVATE */ -#endif /* _WAITQ_H_ */ +#endif /* KERNEL_PRIVATE */ +#endif /* _WAITQ_H_ */ diff --git a/osfmk/kern/work_interval.c b/osfmk/kern/work_interval.c index 9e9e189e7..4c1d4cbda 100644 --- a/osfmk/kern/work_interval.c +++ b/osfmk/kern/work_interval.c @@ -88,7 +88,7 @@ wi_retain(struct work_interval *work_interval) { uint32_t old_count; old_count = atomic_fetch_add_explicit(&work_interval->wi_ref_count, - 1, memory_order_relaxed); + 1, memory_order_relaxed); assert(old_count > 0); } @@ -97,12 +97,11 @@ wi_release(struct work_interval *work_interval) { uint32_t old_count; old_count = atomic_fetch_sub_explicit(&work_interval->wi_ref_count, - 1, memory_order_relaxed); + 1, memory_order_relaxed); assert(old_count > 0); if (old_count == 1) { - kfree(work_interval, sizeof(struct work_interval)); } } @@ -124,14 +123,15 @@ work_interval_port_alloc(struct work_interval *work_interval) { ipc_port_t work_interval_port = ipc_port_alloc_kernel(); - if (work_interval_port == IP_NULL) + if (work_interval_port == IP_NULL) { panic("failed to allocate work interval port"); + } assert(work_interval->wi_port == IP_NULL); ip_lock(work_interval_port); ipc_kobject_set_atomically(work_interval_port, (ipc_kobject_t)work_interval, - IKOT_WORK_INTERVAL); + IKOT_WORK_INTERVAL); ipc_port_t notify_port = ipc_port_make_sonce_locked(work_interval_port); ipc_port_t old_notify_port = IP_NULL; @@ -160,14 +160,17 @@ work_interval_port_convert_locked(ipc_port_t port) { struct work_interval *work_interval = NULL; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { return NULL; + } - if (!ip_active(port)) + if (!ip_active(port)) { return NULL; + } - if (IKOT_WORK_INTERVAL != ip_kotype(port)) + if (IKOT_WORK_INTERVAL != ip_kotype(port)) { return NULL; + } work_interval = (struct work_interval *)port->ip_kobject; @@ -188,17 +191,19 @@ work_interval_port_convert_locked(ipc_port_t port) */ static kern_return_t port_name_to_work_interval(mach_port_name_t name, - struct work_interval **work_interval) + struct work_interval **work_interval) { - if (!MACH_PORT_VALID(name)) + if (!MACH_PORT_VALID(name)) { return KERN_INVALID_NAME; + } ipc_port_t port = IPC_PORT_NULL; kern_return_t kr = KERN_SUCCESS; kr = ipc_port_translate_send(current_space(), name, &port); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* port is locked */ assert(IP_VALID(port)); @@ -208,16 +213,17 @@ port_name_to_work_interval(mach_port_name_t name, converted_work_interval = work_interval_port_convert_locked(port); /* the port is valid, but doesn't denote a work_interval */ - if (converted_work_interval == NULL) + if (converted_work_interval == NULL) { kr = KERN_INVALID_CAPABILITY; + } ip_unlock(port); - if (kr == KERN_SUCCESS) + if (kr == KERN_SUCCESS) { *work_interval = converted_work_interval; + } return kr; - } @@ -240,30 +246,36 @@ work_interval_port_notify(mach_msg_header_t *msg) ipc_port_t port = notification->not_header.msgh_remote_port; struct work_interval *work_interval = NULL; - if (!IP_VALID(port)) + if (!IP_VALID(port)) { panic("work_interval_port_notify(): invalid port"); + } ip_lock(port); - if (!ip_active(port)) + if (!ip_active(port)) { panic("work_interval_port_notify(): inactive port %p", port); + } - if (ip_kotype(port) != IKOT_WORK_INTERVAL) + if (ip_kotype(port) != IKOT_WORK_INTERVAL) { panic("work_interval_port_notify(): not the right kobject: %p, %d\n", - port, ip_kotype(port)); + port, ip_kotype(port)); + } - if (port->ip_mscount != notification->not_count) + if (port->ip_mscount != notification->not_count) { panic("work_interval_port_notify(): unexpected make-send count: %p, %d, %d", - port, port->ip_mscount, notification->not_count); + port, port->ip_mscount, notification->not_count); + } - if (port->ip_srights != 0) + if (port->ip_srights != 0) { panic("work_interval_port_notify(): unexpected send right count: %p, %d", - port, port->ip_srights); + port, port->ip_srights); + } work_interval = (struct work_interval *)port->ip_kobject; - if (work_interval == NULL) + if (work_interval == NULL) { panic("work_interval_port_notify(): missing kobject: %p", port); + } ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); @@ -285,7 +297,7 @@ work_interval_port_notify(mach_msg_header_t *msg) */ static void thread_set_work_interval(thread_t thread, - struct work_interval *work_interval) + struct work_interval *work_interval) { assert(thread == current_thread()); @@ -295,15 +307,17 @@ thread_set_work_interval(thread_t thread, thread->th_work_interval = work_interval; - if (old_th_wi != NULL) + if (old_th_wi != NULL) { wi_release(old_th_wi); + } } void work_interval_thread_terminate(thread_t thread) { - if (thread->th_work_interval != NULL) + if (thread->th_work_interval != NULL) { thread_set_work_interval(thread, NULL); + } } @@ -319,15 +333,15 @@ kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_a if (work_interval == NULL || work_interval->wi_id != kwi_args->work_interval_id) { /* This thread must have adopted the work interval to be able to notify */ - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } task_t notifying_task = current_task(); - if (work_interval->wi_creator_uniqueid != get_task_uniqueid(notifying_task) || + if (work_interval->wi_creator_uniqueid != get_task_uniqueid(notifying_task) || work_interval->wi_creator_pidversion != get_task_version(notifying_task)) { /* Only the creating task can do a notify */ - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } spl_t s = splsched(); @@ -341,7 +355,7 @@ kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_a /* called without interrupts disabled */ machine_work_interval_notify(thread, kwi_args); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* Start at 1, 0 is not a valid work interval ID */ @@ -349,24 +363,25 @@ static _Atomic uint64_t unique_work_interval_id = 1; kern_return_t kern_work_interval_create(thread_t thread, - struct kern_work_interval_create_args *create_params) + struct kern_work_interval_create_args *create_params) { assert(thread == current_thread()); if (thread->th_work_interval != NULL) { /* already assigned a work interval */ - return (KERN_FAILURE); + return KERN_FAILURE; } struct work_interval *work_interval = kalloc(sizeof(*work_interval)); - if (work_interval == NULL) + if (work_interval == NULL) { panic("failed to allocate work_interval"); + } bzero(work_interval, sizeof(*work_interval)); uint64_t old_value = atomic_fetch_add_explicit(&unique_work_interval_id, 1, - memory_order_relaxed); + memory_order_relaxed); uint64_t work_interval_id = old_value + 1; @@ -380,12 +395,15 @@ kern_work_interval_create(thread_t thread, * There can only be one CA_CLIENT work interval (created by UIKit) * per each application task */ - if (create_flags & (WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP)) - return (KERN_FAILURE); - if (!task_is_app(creating_task)) - return (KERN_NOT_SUPPORTED); - if (task_set_ca_client_wi(creating_task, true) == false) - return (KERN_FAILURE); + if (create_flags & (WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP)) { + return KERN_FAILURE; + } + if (!task_is_app(creating_task)) { + return KERN_NOT_SUPPORTED; + } + if (task_set_ca_client_wi(creating_task, true) == false) { + return KERN_FAILURE; + } } *work_interval = (struct work_interval) { @@ -429,13 +447,14 @@ kern_work_interval_create(thread_t thread, kern_return_t kern_work_interval_destroy(thread_t thread, uint64_t work_interval_id) { - if (work_interval_id == 0) + if (work_interval_id == 0) { return KERN_INVALID_ARGUMENT; + } if (thread->th_work_interval == NULL || thread->th_work_interval->wi_id != work_interval_id) { /* work ID isn't valid or doesn't match joined work interval ID */ - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } thread_set_work_interval(thread, NULL); @@ -445,7 +464,7 @@ kern_work_interval_destroy(thread_t thread, uint64_t work_interval_id) kern_return_t kern_work_interval_join(thread_t thread, - mach_port_name_t port_name) + mach_port_name_t port_name) { struct work_interval *work_interval = NULL; kern_return_t kr; @@ -457,8 +476,9 @@ kern_work_interval_join(thread_t thread, } kr = port_name_to_work_interval(port_name, &work_interval); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } /* work_interval has a +1 ref */ assert(work_interval != NULL); diff --git a/osfmk/kern/xpr.c b/osfmk/kern/xpr.c index a08724be9..0c28eabab 100644 --- a/osfmk/kern/xpr.c +++ b/osfmk/kern/xpr.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -75,33 +75,34 @@ * Just set xprenable false so the buffer isn't overwritten. */ -decl_simple_lock_data(,xprlock) -boolean_t xprenable = TRUE; /* Enable xpr tracing */ -int nxprbufs = 0; /* Number of contiguous xprbufs allocated */ -int xprflags = 0; /* Bit mask of xpr flags enabled */ -struct xprbuf *xprbase; /* Pointer to circular buffer nxprbufs*sizeof(xprbuf)*/ -struct xprbuf *xprptr; /* Currently allocated xprbuf */ -struct xprbuf *xprlast; /* Pointer to end of circular buffer */ +decl_simple_lock_data(, xprlock) +boolean_t xprenable = TRUE; /* Enable xpr tracing */ +int nxprbufs = 0; /* Number of contiguous xprbufs allocated */ +int xprflags = 0; /* Bit mask of xpr flags enabled */ +struct xprbuf *xprbase; /* Pointer to circular buffer nxprbufs*sizeof(xprbuf)*/ +struct xprbuf *xprptr; /* Currently allocated xprbuf */ +struct xprbuf *xprlast; /* Pointer to end of circular buffer */ void xpr( - const char *msg, - long arg1, - long arg2, - long arg3, - long arg4, - long arg5) + const char *msg, + long arg1, + long arg2, + long arg3, + long arg4, + long arg5) { spl_t s; struct xprbuf *x; /* If we aren't initialized, ignore trace request */ - if (!xprenable || (xprptr == 0)) + if (!xprenable || (xprptr == 0)) { return; + } /* Guard against all interrupts and allocate next buffer. */ s = splhigh(); - simple_lock(&xprlock); + simple_lock(&xprlock, LCK_GRP_NULL); x = xprptr++; if (xprptr >= xprlast) { /* wrap around */ @@ -122,4 +123,3 @@ xpr( x->cpuinfo = cpu_number(); mp_enable_preemption(); } - diff --git a/osfmk/kern/xpr.h b/osfmk/kern/xpr.h index 3a2a92d9b..e63d9e6d3 100644 --- a/osfmk/kern/xpr.h +++ b/osfmk/kern/xpr.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,45 +22,45 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ /* - * Include file for xpr circular buffer silent tracing. + * Include file for xpr circular buffer silent tracing. * */ /* - * If the kernel flag XPRDEBUG is set, the XPR macro is enabled. The + * If the kernel flag XPRDEBUG is set, the XPR macro is enabled. The * macro should be invoked something like the following: * XPR(XPR_SYSCALLS, "syscall: %d, 0x%x\n", syscallno, arg1, 0,0,0); * which will expand into the following code: @@ -73,29 +73,29 @@ * of the kernel can easily be reconstructed in a post-processor which * performs the printf processing. * - * If the XPRDEBUG compilation switch is not set, the XPR macro expands + * If the XPRDEBUG compilation switch is not set, the XPR macro expands * to nothing. */ -#ifndef _KERN_XPR_H_ +#ifndef _KERN_XPR_H_ #define _KERN_XPR_H_ -#ifdef MACH_KERNEL +#ifdef MACH_KERNEL #include -#else /* MACH_KERNEL */ +#else /* MACH_KERNEL */ #include -#endif /* MACH_KERNEL */ +#endif /* MACH_KERNEL */ #include -#if XPR_DEBUG +#if XPR_DEBUG -#define XPR(flags, msg, arg1, arg2, arg3, arg4, arg5) \ -MACRO_BEGIN \ - if (xprflags & (flags)) { \ - xpr((msg), (long)(arg1), (long)(arg2), \ - (long)(arg3), (long)(arg4), (long)(arg5)); \ - } \ +#define XPR(flags, msg, arg1, arg2, arg3, arg4, arg5) \ +MACRO_BEGIN \ + if (xprflags & (flags)) { \ + xpr((msg), (long)(arg1), (long)(arg2), \ + (long)(arg3), (long)(arg4), (long)(arg5)); \ + } \ MACRO_END extern int xprflags; @@ -103,33 +103,33 @@ extern int xprflags; /* * flags for message types. */ -#define XPR_TRAPS (1 << 1) -#define XPR_SCHED (1 << 2) -#define XPR_LOCK (1 << 3) -#define XPR_SLOCK (1 << 4) -#define XPR_PMAP (1 << 6) -#define XPR_VM_MAP (1 << 7) -#define XPR_VM_OBJECT (1 << 8) -#define XPR_VM_OBJECT_CACHE (1 << 9) -#define XPR_VM_PAGE (1 << 10) -#define XPR_VM_PAGEOUT (1 << 11) -#define XPR_MEMORY_OBJECT (1 << 12) -#define XPR_VM_FAULT (1 << 13) -#define XPR_VM_OBJECT_REP (1 << 14) -#define XPR_DEFAULT_PAGER (1 << 15) -#define XPR_INODE_PAGER (1 << 16) -#define XPR_INODE_PAGER_DATA (1 << 17) -#define XPR_XMM (1 << 18) +#define XPR_TRAPS (1 << 1) +#define XPR_SCHED (1 << 2) +#define XPR_LOCK (1 << 3) +#define XPR_SLOCK (1 << 4) +#define XPR_PMAP (1 << 6) +#define XPR_VM_MAP (1 << 7) +#define XPR_VM_OBJECT (1 << 8) +#define XPR_VM_OBJECT_CACHE (1 << 9) +#define XPR_VM_PAGE (1 << 10) +#define XPR_VM_PAGEOUT (1 << 11) +#define XPR_MEMORY_OBJECT (1 << 12) +#define XPR_VM_FAULT (1 << 13) +#define XPR_VM_OBJECT_REP (1 << 14) +#define XPR_DEFAULT_PAGER (1 << 15) +#define XPR_INODE_PAGER (1 << 16) +#define XPR_INODE_PAGER_DATA (1 << 17) +#define XPR_XMM (1 << 18) -#else /* XPR_DEBUG */ +#else /* XPR_DEBUG */ #define XPR(flags, msg, arg1, arg2, arg3, arg4, arg5) -#endif /* XPR_DEBUG */ +#endif /* XPR_DEBUG */ struct xprbuf { - const char *msg; - long arg1,arg2,arg3,arg4,arg5; - int timestamp; - int cpuinfo; + const char *msg; + long arg1, arg2, arg3, arg4, arg5; + int timestamp; + int cpuinfo; }; /* Bootstrap XPR facility */ @@ -140,11 +140,11 @@ extern void xprinit(void); /* Log an XPR message */ extern void xpr( - const char *msg, - long arg1, - long arg2, - long arg3, - long arg4, - long arg5); + const char *msg, + long arg1, + long arg2, + long arg3, + long arg4, + long arg5); #endif /* _KERN_XPR_H_ */ diff --git a/osfmk/kern/zalloc.c b/osfmk/kern/zalloc.c index a9091abf4..19562002c 100644 --- a/osfmk/kern/zalloc.c +++ b/osfmk/kern/zalloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -106,13 +106,22 @@ #include +/* + * The zone_locks_grp allows for collecting lock statistics. + * All locks are associated to this group in zinit. + * Look at tools/lockstat for debugging lock contention. + */ + +lck_grp_t zone_locks_grp; +lck_grp_attr_t zone_locks_grp_attr; + /* * ZONE_ALIAS_ADDR (deprecated) */ #define from_zone_map(addr, size) \ - ((vm_offset_t)(addr) >= zone_map_min_address && \ - ((vm_offset_t)(addr) + size - 1) < zone_map_max_address ) + ((vm_offset_t)(addr) >= zone_map_min_address && \ + ((vm_offset_t)(addr) + size - 1) < zone_map_max_address ) /* * Zone Corruption Debugging @@ -179,7 +188,6 @@ sample_counter(volatile uint32_t * count_p, uint32_t factor) } else { rolled_over = FALSE; } - } while (!OSCompareAndSwap(old_count, new_count, count_p)); return rolled_over; @@ -266,10 +274,11 @@ zp_init(void) if (zp_factor != 0) { uint32_t rand_bits = early_random() & 0x3; - if (rand_bits == 0x1) + if (rand_bits == 0x1) { zp_factor += 1; - else if (rand_bits == 0x2) + } else if (rand_bits == 0x2) { zp_factor -= 1; + } /* if 0x0 or 0x3, leave it alone */ } @@ -299,9 +308,10 @@ zp_init(void) zp_nopoison_cookie = (uintptr_t) early_random(); #if MACH_ASSERT - if (zp_poisoned_cookie == zp_nopoison_cookie) + if (zp_poisoned_cookie == zp_nopoison_cookie) { panic("early_random() is broken: %p and %p are not random\n", - (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie); + (void *) zp_poisoned_cookie, (void *) zp_nopoison_cookie); + } #endif /* @@ -337,14 +347,14 @@ zp_init(void) * of pages being used by the zone currently. The * z->page_count is not protected by the zone lock. */ -#define ZONE_PAGE_COUNT_INCR(z, count) \ -{ \ - OSAddAtomic64(count, &(z->page_count)); \ +#define ZONE_PAGE_COUNT_INCR(z, count) \ +{ \ + OSAddAtomic64(count, &(z->page_count)); \ } -#define ZONE_PAGE_COUNT_DECR(z, count) \ -{ \ - OSAddAtomic64(-count, &(z->page_count)); \ +#define ZONE_PAGE_COUNT_DECR(z, count) \ +{ \ + OSAddAtomic64(-count, &(z->page_count)); \ } vm_map_t zone_map = VM_MAP_NULL; @@ -355,14 +365,14 @@ vm_offset_t zone_map_min_address = 0; /* initialized in zone_init */ vm_offset_t zone_map_max_address = 0; /* Globals for random boolean generator for elements in free list */ -#define MAX_ENTROPY_PER_ZCRAM 4 +#define MAX_ENTROPY_PER_ZCRAM 4 /* VM region for all metadata structures */ -vm_offset_t zone_metadata_region_min = 0; -vm_offset_t zone_metadata_region_max = 0; -decl_lck_mtx_data(static ,zone_metadata_region_lck) +vm_offset_t zone_metadata_region_min = 0; +vm_offset_t zone_metadata_region_max = 0; +decl_lck_mtx_data(static, zone_metadata_region_lck) lck_attr_t zone_metadata_lock_attr; -lck_mtx_ext_t zone_metadata_region_lck_ext; +lck_mtx_ext_t zone_metadata_region_lck_ext; /* Helpful for walking through a zone's free element list. */ struct zone_free_element { @@ -386,14 +396,15 @@ bool use_caching = FALSE; bool cache_all_zones = FALSE; /* - * Specifies a single zone to enable CPU caching for. - * Can be set using boot-args: zcc_enable_for_zone_name= + * Specifies a single zone to enable CPU caching for. + * Can be set using boot-args: zcc_enable_for_zone_name= */ static char cache_zone_name[MAX_ZONE_NAME]; -static inline bool zone_caching_enabled(zone_t z) +static inline bool +zone_caching_enabled(zone_t z) { - return (z->cpu_cache_enabled && !z->tags && !z->zleak_on); + return z->cpu_cache_enabled && !z->tags && !z->zleak_on; } #endif /* CONFIG_ZCACHE */ @@ -422,65 +433,65 @@ static boolean_t zone_test_running = FALSE; static zone_t test_zone_ptr = NULL; #endif /* DEBUG || DEVELOPMENT */ -#define PAGE_METADATA_GET_ZINDEX(page_meta) \ +#define PAGE_METADATA_GET_ZINDEX(page_meta) \ (page_meta->zindex) -#define PAGE_METADATA_GET_ZONE(page_meta) \ +#define PAGE_METADATA_GET_ZONE(page_meta) \ (&(zone_array[page_meta->zindex])) -#define PAGE_METADATA_SET_ZINDEX(page_meta, index) \ +#define PAGE_METADATA_SET_ZINDEX(page_meta, index) \ page_meta->zindex = (index); struct zone_page_metadata { - queue_chain_t pages; /* linkage pointer for metadata lists */ + queue_chain_t pages; /* linkage pointer for metadata lists */ /* Union for maintaining start of element free list and real metadata (for multipage allocations) */ union { - /* - * The start of the freelist can be maintained as a 32-bit offset instead of a pointer because - * the free elements would be at max ZONE_MAX_ALLOC_SIZE bytes away from the metadata. Offset + /* + * The start of the freelist can be maintained as a 32-bit offset instead of a pointer because + * the free elements would be at max ZONE_MAX_ALLOC_SIZE bytes away from the metadata. Offset * from start of the allocation chunk to free element list head. */ - uint32_t freelist_offset; - /* - * This field is used to lookup the real metadata for multipage allocations, where we mark the - * metadata for all pages except the first as "fake" metadata using MULTIPAGE_METADATA_MAGIC. + uint32_t freelist_offset; + /* + * This field is used to lookup the real metadata for multipage allocations, where we mark the + * metadata for all pages except the first as "fake" metadata using MULTIPAGE_METADATA_MAGIC. * Offset from this fake metadata to real metadata of allocation chunk (-ve offset). */ - uint32_t real_metadata_offset; + uint32_t real_metadata_offset; }; - /* - * For the first page in the allocation chunk, this represents the total number of free elements in - * the chunk. + /* + * For the first page in the allocation chunk, this represents the total number of free elements in + * the chunk. */ - uint16_t free_count; - unsigned zindex : ZINDEX_BITS; /* Zone index within the zone_array */ - unsigned page_count : PAGECOUNT_BITS; /* Count of pages within the allocation chunk */ + uint16_t free_count; + unsigned zindex : ZINDEX_BITS; /* Zone index within the zone_array */ + unsigned page_count : PAGECOUNT_BITS; /* Count of pages within the allocation chunk */ }; /* Macro to get page index (within zone_map) of page containing element */ -#define PAGE_INDEX_FOR_ELEMENT(element) \ +#define PAGE_INDEX_FOR_ELEMENT(element) \ (((vm_offset_t)trunc_page(element) - zone_map_min_address) / PAGE_SIZE) /* Macro to get metadata structure given a page index in zone_map */ -#define PAGE_METADATA_FOR_PAGE_INDEX(index) \ +#define PAGE_METADATA_FOR_PAGE_INDEX(index) \ (zone_metadata_region_min + ((index) * sizeof(struct zone_page_metadata))) /* Macro to get index (within zone_map) for given metadata */ -#define PAGE_INDEX_FOR_METADATA(page_meta) \ +#define PAGE_INDEX_FOR_METADATA(page_meta) \ (((vm_offset_t)page_meta - zone_metadata_region_min) / sizeof(struct zone_page_metadata)) /* Macro to get page for given page index in zone_map */ -#define PAGE_FOR_PAGE_INDEX(index) \ - (zone_map_min_address + (PAGE_SIZE * (index))) +#define PAGE_FOR_PAGE_INDEX(index) \ + (zone_map_min_address + (PAGE_SIZE * (index))) /* Macro to get the actual metadata for a given address */ -#define PAGE_METADATA_FOR_ELEMENT(element) \ +#define PAGE_METADATA_FOR_ELEMENT(element) \ (struct zone_page_metadata *)(PAGE_METADATA_FOR_PAGE_INDEX(PAGE_INDEX_FOR_ELEMENT(element))) /* Magic value to indicate empty element free list */ -#define PAGE_METADATA_EMPTY_FREELIST ((uint32_t)(~0)) +#define PAGE_METADATA_EMPTY_FREELIST ((uint32_t)(~0)) vm_map_copy_t create_vm_map_copy(vm_offset_t start_addr, vm_size_t total_size, vm_size_t used_size); boolean_t get_zone_info(zone_t z, mach_zone_name_t *zn, mach_zone_info_t *zi); @@ -491,13 +502,14 @@ static inline void * page_metadata_get_freelist(struct zone_page_metadata *page_meta) { assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); - if (page_meta->freelist_offset == PAGE_METADATA_EMPTY_FREELIST) + if (page_meta->freelist_offset == PAGE_METADATA_EMPTY_FREELIST) { return NULL; - else { - if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) + } else { + if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) { return (void *)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta)) + page_meta->freelist_offset); - else + } else { return (void *)((vm_offset_t)page_meta + page_meta->freelist_offset); + } } } @@ -505,13 +517,14 @@ static inline void page_metadata_set_freelist(struct zone_page_metadata *page_meta, void *addr) { assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); - if (addr == NULL) + if (addr == NULL) { page_meta->freelist_offset = PAGE_METADATA_EMPTY_FREELIST; - else { - if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) + } else { + if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) { page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta))); - else + } else { page_meta->freelist_offset = (uint32_t)((vm_offset_t)(addr) - (vm_offset_t)page_meta); + } } } @@ -522,36 +535,36 @@ page_metadata_get_realmeta(struct zone_page_metadata *page_meta) return (struct zone_page_metadata *)((vm_offset_t)page_meta - page_meta->real_metadata_offset); } -static inline void +static inline void page_metadata_set_realmeta(struct zone_page_metadata *page_meta, struct zone_page_metadata *real_meta) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC); - assert(PAGE_METADATA_GET_ZINDEX(real_meta) != MULTIPAGE_METADATA_MAGIC); - assert((vm_offset_t)page_meta > (vm_offset_t)real_meta); - vm_offset_t offset = (vm_offset_t)page_meta - (vm_offset_t)real_meta; - assert(offset <= UINT32_MAX); - page_meta->real_metadata_offset = (uint32_t)offset; + assert(PAGE_METADATA_GET_ZINDEX(page_meta) == MULTIPAGE_METADATA_MAGIC); + assert(PAGE_METADATA_GET_ZINDEX(real_meta) != MULTIPAGE_METADATA_MAGIC); + assert((vm_offset_t)page_meta > (vm_offset_t)real_meta); + vm_offset_t offset = (vm_offset_t)page_meta - (vm_offset_t)real_meta; + assert(offset <= UINT32_MAX); + page_meta->real_metadata_offset = (uint32_t)offset; } /* The backup pointer is stored in the last pointer-sized location in an element. */ static inline vm_offset_t * get_backup_ptr(vm_size_t elem_size, - vm_offset_t *element) + vm_offset_t *element) { return (vm_offset_t *) ((vm_offset_t)element + elem_size - sizeof(vm_offset_t)); } -/* +/* * Routine to populate a page backing metadata in the zone_metadata_region. - * Must be called without the zone lock held as it might potentially block. + * Must be called without the zone lock held as it might potentially block. */ static inline void -zone_populate_metadata_page(struct zone_page_metadata *page_meta) +zone_populate_metadata_page(struct zone_page_metadata *page_meta) { vm_offset_t page_metadata_begin = trunc_page(page_meta); vm_offset_t page_metadata_end = trunc_page((vm_offset_t)page_meta + sizeof(struct zone_page_metadata)); - for(;page_metadata_begin <= page_metadata_end; page_metadata_begin += PAGE_SIZE) { + for (; page_metadata_begin <= page_metadata_end; page_metadata_begin += PAGE_SIZE) { #if !KASAN /* * This can race with another thread doing a populate on the same metadata @@ -559,17 +572,18 @@ zone_populate_metadata_page(struct zone_page_metadata *page_meta) * fault in the shadow when we first access the metadata page. Avoid this * by always synchronizing on the zone_metadata_region lock with KASan. */ - if (pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) + if (pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) { continue; + } #endif /* All updates to the zone_metadata_region are done under the zone_metadata_region_lck */ lck_mtx_lock(&zone_metadata_region_lck); if (0 == pmap_find_phys(kernel_pmap, (vm_map_address_t)page_metadata_begin)) { kern_return_t __assert_only ret = kernel_memory_populate(zone_map, - page_metadata_begin, - PAGE_SIZE, - KMA_KOBJECT, - VM_KERN_MEMORY_OSFMK); + page_metadata_begin, + PAGE_SIZE, + KMA_KOBJECT, + VM_KERN_MEMORY_OSFMK); /* should not fail with the given arguments */ assert(ret == KERN_SUCCESS); @@ -582,13 +596,13 @@ zone_populate_metadata_page(struct zone_page_metadata *page_meta) static inline uint16_t get_metadata_alloc_count(struct zone_page_metadata *page_meta) { - assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); - struct zone *z = PAGE_METADATA_GET_ZONE(page_meta); - return ((page_meta->page_count * PAGE_SIZE) / z->elem_size); + assert(PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC); + struct zone *z = PAGE_METADATA_GET_ZONE(page_meta); + return (page_meta->page_count * PAGE_SIZE) / z->elem_size; } -/* - * Routine to lookup metadata for any given address. +/* + * Routine to lookup metadata for any given address. * If init is marked as TRUE, this should be called without holding the zone lock * since the initialization might block. */ @@ -597,27 +611,29 @@ get_zone_page_metadata(struct zone_free_element *element, boolean_t init) { struct zone_page_metadata *page_meta = 0; - if (from_zone_map(element, sizeof(struct zone_free_element))) { + if (from_zone_map(element, sizeof(struct zone_free_element))) { page_meta = (struct zone_page_metadata *)(PAGE_METADATA_FOR_ELEMENT(element)); - if (init) + if (init) { zone_populate_metadata_page(page_meta); + } } else { page_meta = (struct zone_page_metadata *)(trunc_page((vm_offset_t)element)); } if (init) { bzero((char *)page_meta, sizeof(struct zone_page_metadata)); } - return ((PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC) ? page_meta : page_metadata_get_realmeta(page_meta)); + return (PAGE_METADATA_GET_ZINDEX(page_meta) != MULTIPAGE_METADATA_MAGIC) ? page_meta : page_metadata_get_realmeta(page_meta); } /* Routine to get the page for a given metadata */ static inline vm_offset_t get_zone_page(struct zone_page_metadata *page_meta) { - if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) + if (from_zone_map(page_meta, sizeof(struct zone_page_metadata))) { return (vm_offset_t)(PAGE_FOR_PAGE_INDEX(PAGE_INDEX_FOR_METADATA(page_meta))); - else + } else { return (vm_offset_t)(trunc_page(page_meta)); + } } /* @@ -638,14 +654,14 @@ get_zone_page(struct zone_page_metadata *page_meta) // pointer to the tag for an element #define ZTAG(zone, element) \ ({ \ - vm_tag_t * result; \ - if ((zone)->tags_inline) { \ - result = (vm_tag_t *) ZTAGBASE((zone), (element)); \ - if ((page_mask & element) >= (zone)->elem_size) result++; \ - } else { \ - result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / (zone)->elem_size]; \ - } \ - result; \ + vm_tag_t * result; \ + if ((zone)->tags_inline) { \ + result = (vm_tag_t *) ZTAGBASE((zone), (element)); \ + if ((page_mask & element) >= (zone)->elem_size) result++; \ + } else { \ + result = &((vm_tag_t *)zone_tags_min)[ZTAGBASE((zone), (element))[0] + ((element) & page_mask) / (zone)->elem_size]; \ + } \ + result; \ }) @@ -661,21 +677,19 @@ static vm_map_t zone_tags_map; // simple heap allocator for allocating the tags for new memory -decl_lck_mtx_data(,ztLock) /* heap lock */ -enum -{ - ztFreeIndexCount = 8, - ztFreeIndexMax = (ztFreeIndexCount - 1), - ztTagsPerBlock = 4 +decl_lck_mtx_data(, ztLock) /* heap lock */ +enum{ + ztFreeIndexCount = 8, + ztFreeIndexMax = (ztFreeIndexCount - 1), + ztTagsPerBlock = 4 }; -struct ztBlock -{ +struct ztBlock { #if __LITTLE_ENDIAN__ - uint64_t free:1, - next:21, - prev:21, - size:21; + uint64_t free:1, + next:21, + prev:21, + size:21; #else // ztBlock needs free bit least significant #error !__LITTLE_ENDIAN__ @@ -690,54 +704,56 @@ static uint32_t ztBlocksFree; static uint32_t ztLog2up(uint32_t size) { - if (1 == size) size = 0; - else size = 32 - __builtin_clz(size - 1); - return (size); + if (1 == size) { + size = 0; + } else { + size = 32 - __builtin_clz(size - 1); + } + return size; } static uint32_t ztLog2down(uint32_t size) { - size = 31 - __builtin_clz(size); - return (size); + size = 31 - __builtin_clz(size); + return size; } static void ztFault(vm_map_t map, const void * address, size_t size, uint32_t flags) { - vm_map_offset_t addr = (vm_map_offset_t) address; - vm_map_offset_t page, end; - - page = trunc_page(addr); - end = round_page(addr + size); - - for (; page < end; page += page_size) - { - if (!pmap_find_phys(kernel_pmap, page)) - { - kern_return_t __unused - ret = kernel_memory_populate(map, page, PAGE_SIZE, - KMA_KOBJECT | flags, VM_KERN_MEMORY_DIAG); - assert(ret == KERN_SUCCESS); - } - } + vm_map_offset_t addr = (vm_map_offset_t) address; + vm_map_offset_t page, end; + + page = trunc_page(addr); + end = round_page(addr + size); + + for (; page < end; page += page_size) { + if (!pmap_find_phys(kernel_pmap, page)) { + kern_return_t __unused + ret = kernel_memory_populate(map, page, PAGE_SIZE, + KMA_KOBJECT | flags, VM_KERN_MEMORY_DIAG); + assert(ret == KERN_SUCCESS); + } + } } static boolean_t ztPresent(const void * address, size_t size) { - vm_map_offset_t addr = (vm_map_offset_t) address; - vm_map_offset_t page, end; - boolean_t result; - - page = trunc_page(addr); - end = round_page(addr + size); - for (result = TRUE; (page < end); page += page_size) - { - result = pmap_find_phys(kernel_pmap, page); - if (!result) break; - } - return (result); + vm_map_offset_t addr = (vm_map_offset_t) address; + vm_map_offset_t page, end; + boolean_t result; + + page = trunc_page(addr); + end = round_page(addr + size); + for (result = TRUE; (page < end); page += page_size) { + result = pmap_find_phys(kernel_pmap, page); + if (!result) { + break; + } + } + return result; } @@ -746,40 +762,43 @@ ztDump(boolean_t sanity); void __unused ztDump(boolean_t sanity) { - uint32_t q, cq, p; - - for (q = 0; q <= ztFreeIndexMax; q++) - { - p = q; - do - { - if (sanity) - { - cq = ztLog2down(ztBlocks[p].size); - if (cq > ztFreeIndexMax) cq = ztFreeIndexMax; - if (!ztBlocks[p].free - || ((p != q) && (q != cq)) - || (ztBlocks[ztBlocks[p].next].prev != p) - || (ztBlocks[ztBlocks[p].prev].next != p)) - { - kprintf("zterror at %d", p); - ztDump(FALSE); - kprintf("zterror at %d", p); - assert(FALSE); - } - continue; - } - kprintf("zt[%03d]%c %d, %d, %d\n", - p, ztBlocks[p].free ? 'F' : 'A', - ztBlocks[p].next, ztBlocks[p].prev, - ztBlocks[p].size); - p = ztBlocks[p].next; - if (p == q) break; - } - while (p != q); - if (!sanity) printf("\n"); - } - if (!sanity) printf("-----------------------\n"); + uint32_t q, cq, p; + + for (q = 0; q <= ztFreeIndexMax; q++) { + p = q; + do{ + if (sanity) { + cq = ztLog2down(ztBlocks[p].size); + if (cq > ztFreeIndexMax) { + cq = ztFreeIndexMax; + } + if (!ztBlocks[p].free + || ((p != q) && (q != cq)) + || (ztBlocks[ztBlocks[p].next].prev != p) + || (ztBlocks[ztBlocks[p].prev].next != p)) { + kprintf("zterror at %d", p); + ztDump(FALSE); + kprintf("zterror at %d", p); + assert(FALSE); + } + continue; + } + kprintf("zt[%03d]%c %d, %d, %d\n", + p, ztBlocks[p].free ? 'F' : 'A', + ztBlocks[p].next, ztBlocks[p].prev, + ztBlocks[p].size); + p = ztBlocks[p].next; + if (p == q) { + break; + } + }while (p != q); + if (!sanity) { + printf("\n"); + } + } + if (!sanity) { + printf("-----------------------\n"); + } } @@ -791,254 +810,267 @@ ztDump(boolean_t sanity) static void ztFree(zone_t zone __unused, uint32_t index, uint32_t count) { - uint32_t q, w, p, size, merge; - - assert(count); - ztBlocksFree += count; - - // merge with preceding - merge = (index + count); - if ((merge < ztBlocksCount) - && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge])) - && ztBlocks[merge].free) - { - ZTBDEQ(merge); - count += ztBlocks[merge].size; - } - - // merge with following - merge = (index - 1); - if ((merge > ztFreeIndexMax) - && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge])) - && ztBlocks[merge].free) - { - size = ztBlocks[merge].size; - count += size; - index -= size; - ZTBDEQ(index); - } - - q = ztLog2down(count); - if (q > ztFreeIndexMax) q = ztFreeIndexMax; - w = q; - // queue in order of size - while (TRUE) - { - p = ztBlocks[w].next; - if (p == q) break; - if (ztBlocks[p].size >= count) break; - w = p; - } - ztBlocks[p].prev = index; - ztBlocks[w].next = index; - - // fault in first - ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0); - - // mark first & last with free flag and size - ztBlocks[index].free = TRUE; - ztBlocks[index].size = count; - ztBlocks[index].prev = w; - ztBlocks[index].next = p; - if (count > 1) - { - index += (count - 1); - // fault in last - ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0); - ztBlocks[index].free = TRUE; - ztBlocks[index].size = count; - } + uint32_t q, w, p, size, merge; + + assert(count); + ztBlocksFree += count; + + // merge with preceding + merge = (index + count); + if ((merge < ztBlocksCount) + && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge])) + && ztBlocks[merge].free) { + ZTBDEQ(merge); + count += ztBlocks[merge].size; + } + + // merge with following + merge = (index - 1); + if ((merge > ztFreeIndexMax) + && ztPresent(&ztBlocks[merge], sizeof(ztBlocks[merge])) + && ztBlocks[merge].free) { + size = ztBlocks[merge].size; + count += size; + index -= size; + ZTBDEQ(index); + } + + q = ztLog2down(count); + if (q > ztFreeIndexMax) { + q = ztFreeIndexMax; + } + w = q; + // queue in order of size + while (TRUE) { + p = ztBlocks[w].next; + if (p == q) { + break; + } + if (ztBlocks[p].size >= count) { + break; + } + w = p; + } + ztBlocks[p].prev = index; + ztBlocks[w].next = index; + + // fault in first + ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0); + + // mark first & last with free flag and size + ztBlocks[index].free = TRUE; + ztBlocks[index].size = count; + ztBlocks[index].prev = w; + ztBlocks[index].next = p; + if (count > 1) { + index += (count - 1); + // fault in last + ztFault(zone_tags_map, &ztBlocks[index], sizeof(ztBlocks[index]), 0); + ztBlocks[index].free = TRUE; + ztBlocks[index].size = count; + } } static uint32_t ztAlloc(zone_t zone, uint32_t count) { - uint32_t q, w, p, leftover; - - assert(count); - - q = ztLog2up(count); - if (q > ztFreeIndexMax) q = ztFreeIndexMax; - do - { - w = q; - while (TRUE) - { - p = ztBlocks[w].next; - if (p == q) break; - if (ztBlocks[p].size >= count) - { - // dequeue, mark both ends allocated - ztBlocks[w].next = ztBlocks[p].next; - ztBlocks[ztBlocks[p].next].prev = w; - ztBlocks[p].free = FALSE; - ztBlocksFree -= ztBlocks[p].size; - if (ztBlocks[p].size > 1) ztBlocks[p + ztBlocks[p].size - 1].free = FALSE; - - // fault all the allocation - ztFault(zone_tags_map, &ztBlocks[p], count * sizeof(ztBlocks[p]), 0); - // mark last as allocated - if (count > 1) ztBlocks[p + count - 1].free = FALSE; - // free remainder - leftover = ztBlocks[p].size - count; - if (leftover) ztFree(zone, p + ztBlocks[p].size - leftover, leftover); - - return (p); - } - w = p; - } - q++; - } - while (q <= ztFreeIndexMax); - - return (-1U); + uint32_t q, w, p, leftover; + + assert(count); + + q = ztLog2up(count); + if (q > ztFreeIndexMax) { + q = ztFreeIndexMax; + } + do{ + w = q; + while (TRUE) { + p = ztBlocks[w].next; + if (p == q) { + break; + } + if (ztBlocks[p].size >= count) { + // dequeue, mark both ends allocated + ztBlocks[w].next = ztBlocks[p].next; + ztBlocks[ztBlocks[p].next].prev = w; + ztBlocks[p].free = FALSE; + ztBlocksFree -= ztBlocks[p].size; + if (ztBlocks[p].size > 1) { + ztBlocks[p + ztBlocks[p].size - 1].free = FALSE; + } + + // fault all the allocation + ztFault(zone_tags_map, &ztBlocks[p], count * sizeof(ztBlocks[p]), 0); + // mark last as allocated + if (count > 1) { + ztBlocks[p + count - 1].free = FALSE; + } + // free remainder + leftover = ztBlocks[p].size - count; + if (leftover) { + ztFree(zone, p + ztBlocks[p].size - leftover, leftover); + } + + return p; + } + w = p; + } + q++; + }while (q <= ztFreeIndexMax); + + return -1U; } static void ztInit(vm_size_t max_zonemap_size, lck_grp_t * group) { - kern_return_t ret; - vm_map_kernel_flags_t vmk_flags; - uint32_t idx; - - lck_mtx_init(&ztLock, group, LCK_ATTR_NULL); - - // allocate submaps VM_KERN_MEMORY_DIAG - - zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t); - vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - vmk_flags.vmkf_permanent = TRUE; - ret = kmem_suballoc(kernel_map, &zone_tagbase_min, zone_tagbase_map_size, - FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG, - &zone_tagbase_map); - - if (ret != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed"); - zone_tagbase_max = zone_tagbase_min + round_page(zone_tagbase_map_size); - - zone_tags_map_size = 2048*1024 * sizeof(vm_tag_t); - vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; - vmk_flags.vmkf_permanent = TRUE; - ret = kmem_suballoc(kernel_map, &zone_tags_min, zone_tags_map_size, - FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG, - &zone_tags_map); - - if (ret != KERN_SUCCESS) panic("zone_init: kmem_suballoc failed"); - zone_tags_max = zone_tags_min + round_page(zone_tags_map_size); - - ztBlocks = (ztBlock *) zone_tags_min; - ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock)); - - // initialize the qheads - lck_mtx_lock(&ztLock); - - ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0); - for (idx = 0; idx < ztFreeIndexCount; idx++) - { - ztBlocks[idx].free = TRUE; - ztBlocks[idx].next = idx; - ztBlocks[idx].prev = idx; - ztBlocks[idx].size = 0; - } - // free remaining space - ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount); - - lck_mtx_unlock(&ztLock); + kern_return_t ret; + vm_map_kernel_flags_t vmk_flags; + uint32_t idx; + + lck_mtx_init(&ztLock, group, LCK_ATTR_NULL); + + // allocate submaps VM_KERN_MEMORY_DIAG + + zone_tagbase_map_size = atop(max_zonemap_size) * sizeof(uint32_t); + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_permanent = TRUE; + ret = kmem_suballoc(kernel_map, &zone_tagbase_min, zone_tagbase_map_size, + FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG, + &zone_tagbase_map); + + if (ret != KERN_SUCCESS) { + panic("zone_init: kmem_suballoc failed"); + } + zone_tagbase_max = zone_tagbase_min + round_page(zone_tagbase_map_size); + + zone_tags_map_size = 2048 * 1024 * sizeof(vm_tag_t); + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; + vmk_flags.vmkf_permanent = TRUE; + ret = kmem_suballoc(kernel_map, &zone_tags_min, zone_tags_map_size, + FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_DIAG, + &zone_tags_map); + + if (ret != KERN_SUCCESS) { + panic("zone_init: kmem_suballoc failed"); + } + zone_tags_max = zone_tags_min + round_page(zone_tags_map_size); + + ztBlocks = (ztBlock *) zone_tags_min; + ztBlocksCount = (uint32_t)(zone_tags_map_size / sizeof(ztBlock)); + + // initialize the qheads + lck_mtx_lock(&ztLock); + + ztFault(zone_tags_map, &ztBlocks[0], sizeof(ztBlocks[0]), 0); + for (idx = 0; idx < ztFreeIndexCount; idx++) { + ztBlocks[idx].free = TRUE; + ztBlocks[idx].next = idx; + ztBlocks[idx].prev = idx; + ztBlocks[idx].size = 0; + } + // free remaining space + ztFree(NULL, ztFreeIndexCount, ztBlocksCount - ztFreeIndexCount); + + lck_mtx_unlock(&ztLock); } static void ztMemoryAdd(zone_t zone, vm_offset_t mem, vm_size_t size) { - uint32_t * tagbase; - uint32_t count, block, blocks, idx; - size_t pages; - - pages = atop(size); - tagbase = ZTAGBASE(zone, mem); - - lck_mtx_lock(&ztLock); - - // fault tagbase - ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0); - - if (!zone->tags_inline) - { - // allocate tags - count = (uint32_t)(size / zone->elem_size); - blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); - block = ztAlloc(zone, blocks); - if (-1U == block) ztDump(false); - assert(-1U != block); - } - - lck_mtx_unlock(&ztLock); - - if (!zone->tags_inline) - { - // set tag base for each page - block *= ztTagsPerBlock; - for (idx = 0; idx < pages; idx++) - { - tagbase[idx] = block + (uint32_t)((ptoa(idx) + (zone->elem_size - 1)) / zone->elem_size); - } - } + uint32_t * tagbase; + uint32_t count, block, blocks, idx; + size_t pages; + + pages = atop(size); + tagbase = ZTAGBASE(zone, mem); + + lck_mtx_lock(&ztLock); + + // fault tagbase + ztFault(zone_tagbase_map, tagbase, pages * sizeof(uint32_t), 0); + + if (!zone->tags_inline) { + // allocate tags + count = (uint32_t)(size / zone->elem_size); + blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); + block = ztAlloc(zone, blocks); + if (-1U == block) { + ztDump(false); + } + assert(-1U != block); + } + + lck_mtx_unlock(&ztLock); + + if (!zone->tags_inline) { + // set tag base for each page + block *= ztTagsPerBlock; + for (idx = 0; idx < pages; idx++) { + tagbase[idx] = block + (uint32_t)((ptoa(idx) + (zone->elem_size - 1)) / zone->elem_size); + } + } } static void ztMemoryRemove(zone_t zone, vm_offset_t mem, vm_size_t size) { - uint32_t * tagbase; - uint32_t count, block, blocks, idx; - size_t pages; - - // set tag base for each page - pages = atop(size); - tagbase = ZTAGBASE(zone, mem); - block = tagbase[0]; - for (idx = 0; idx < pages; idx++) - { - tagbase[idx] = 0xFFFFFFFF; - } - - lck_mtx_lock(&ztLock); - if (!zone->tags_inline) - { - count = (uint32_t)(size / zone->elem_size); - blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); - assert(block != 0xFFFFFFFF); - block /= ztTagsPerBlock; - ztFree(NULL /* zone is unlocked */, block, blocks); - } - - lck_mtx_unlock(&ztLock); + uint32_t * tagbase; + uint32_t count, block, blocks, idx; + size_t pages; + + // set tag base for each page + pages = atop(size); + tagbase = ZTAGBASE(zone, mem); + block = tagbase[0]; + for (idx = 0; idx < pages; idx++) { + tagbase[idx] = 0xFFFFFFFF; + } + + lck_mtx_lock(&ztLock); + if (!zone->tags_inline) { + count = (uint32_t)(size / zone->elem_size); + blocks = ((count + ztTagsPerBlock - 1) / ztTagsPerBlock); + assert(block != 0xFFFFFFFF); + block /= ztTagsPerBlock; + ztFree(NULL /* zone is unlocked */, block, blocks); + } + + lck_mtx_unlock(&ztLock); } uint32_t zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size) { - zone_t z; - uint32_t idx; + zone_t z; + uint32_t idx; - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); - for (idx = 0; idx < num_zones; idx++) - { + for (idx = 0; idx < num_zones; idx++) { z = &(zone_array[idx]); - if (!z->tags) continue; - if (tag_zone_index != z->tag_zone_index) continue; - *elem_size = z->elem_size; - break; - } + if (!z->tags) { + continue; + } + if (tag_zone_index != z->tag_zone_index) { + continue; + } + *elem_size = z->elem_size; + break; + } - simple_unlock(&all_zones_lock); + simple_unlock(&all_zones_lock); - if (idx == num_zones) idx = -1U; + if (idx == num_zones) { + idx = -1U; + } - return (idx); + return idx; } #endif /* VM_MAX_TAG_ZONES */ -/* Routine to get the size of a zone allocated address. If the address doesnt belong to the +/* Routine to get the size of a zone allocated address. If the address doesnt belong to the * zone_map, returns 0. */ vm_size_t @@ -1051,7 +1083,7 @@ zone_element_size(void *addr, zone_t *z) if (z) { *z = src_zone; } - return (src_zone->elem_size); + return src_zone->elem_size; } else { #if CONFIG_GZALLOC vm_size_t gzsize; @@ -1077,9 +1109,9 @@ zone_element_info(void *addr, vm_tag_t * ptag) struct zone_page_metadata *page_meta = get_zone_page_metadata((struct zone_free_element *)addr, FALSE); src_zone = PAGE_METADATA_GET_ZONE(page_meta); #if VM_MAX_TAG_ZONES - if (__improbable(src_zone->tags)) { + if (__improbable(src_zone->tags)) { tag = (ZTAG(src_zone, (vm_offset_t) addr)[0] >> 1); - } + } #endif /* VM_MAX_TAG_ZONES */ size = src_zone->elem_size; } else { @@ -1099,17 +1131,19 @@ zone_element_info(void *addr, vm_tag_t * ptag) * A pointer that doesn't satisfy these conditions indicates corruption */ static inline boolean_t -is_sane_zone_ptr(zone_t zone, - vm_offset_t addr, - size_t obj_size) +is_sane_zone_ptr(zone_t zone, + vm_offset_t addr, + size_t obj_size) { /* Must be aligned to pointer boundary */ - if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0)) + if (__improbable((addr & (sizeof(vm_offset_t) - 1)) != 0)) { return FALSE; + } /* Must be a kernel address */ - if (__improbable(!pmap_kernel_va(addr))) + if (__improbable(!pmap_kernel_va(addr))) { return FALSE; + } /* Must be from zone map if the zone only uses memory from the zone_map */ /* @@ -1118,9 +1152,10 @@ is_sane_zone_ptr(zone_t zone, */ if (zone->collectable && !zone->allows_foreign) { /* check if addr is from zone map */ - if (addr >= zone_map_min_address && - (addr + obj_size - 1) < zone_map_max_address ) + if (addr >= zone_map_min_address && + (addr + obj_size - 1) < zone_map_max_address) { return TRUE; + } return FALSE; } @@ -1129,43 +1164,46 @@ is_sane_zone_ptr(zone_t zone, } static inline boolean_t -is_sane_zone_page_metadata(zone_t zone, - vm_offset_t page_meta) +is_sane_zone_page_metadata(zone_t zone, + vm_offset_t page_meta) { /* NULL page metadata structures are invalid */ - if (page_meta == 0) + if (page_meta == 0) { return FALSE; + } return is_sane_zone_ptr(zone, page_meta, sizeof(struct zone_page_metadata)); } static inline boolean_t is_sane_zone_element(zone_t zone, - vm_offset_t addr) + vm_offset_t addr) { /* NULL is OK because it indicates the tail of the list */ - if (addr == 0) + if (addr == 0) { return TRUE; + } return is_sane_zone_ptr(zone, addr, zone->elem_size); } - + /* Someone wrote to freed memory. */ -static inline void /* noreturn */ +static inline void +/* noreturn */ zone_element_was_modified_panic(zone_t zone, - vm_offset_t element, - vm_offset_t found, - vm_offset_t expected, - vm_offset_t offset) + vm_offset_t element, + vm_offset_t found, + vm_offset_t expected, + vm_offset_t offset) { panic("a freed zone element has been modified in zone %s: expected %p but found %p, bits changed %p, at offset %d of %d in element %p, cookies %p %p", - zone->zone_name, - (void *) expected, - (void *) found, - (void *) (expected ^ found), - (uint32_t) offset, - (uint32_t) zone->elem_size, - (void *) element, - (void *) zp_nopoison_cookie, - (void *) zp_poisoned_cookie); + zone->zone_name, + (void *) expected, + (void *) found, + (void *) (expected ^ found), + (uint32_t) offset, + (uint32_t) zone->elem_size, + (void *) element, + (void *) zp_nopoison_cookie, + (void *) zp_poisoned_cookie); } /* @@ -1174,11 +1212,12 @@ zone_element_was_modified_panic(zone_t zone, * probably should have been, and panic. * I would like to mark this as noreturn, but panic() isn't marked noreturn. */ -static void /* noreturn */ +static void +/* noreturn */ backup_ptr_mismatch_panic(zone_t zone, - vm_offset_t element, - vm_offset_t primary, - vm_offset_t backup) + vm_offset_t element, + vm_offset_t primary, + vm_offset_t backup) { vm_offset_t likely_backup; vm_offset_t likely_primary; @@ -1190,10 +1229,11 @@ backup_ptr_mismatch_panic(zone_t zone, #if defined(__LP64__) /* We can inspect the tag in the upper bits for additional confirmation */ - if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000) + if ((backup & 0xFFFFFF0000000000) == 0xFACADE0000000000) { element_was_poisoned = TRUE; - else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000) + } else if ((backup & 0xFFFFFF0000000000) == 0xC0FFEE0000000000) { element_was_poisoned = FALSE; + } #endif if (element_was_poisoned) { @@ -1205,14 +1245,16 @@ backup_ptr_mismatch_panic(zone_t zone, } /* The primary is definitely the corrupted one */ - if (!sane_primary && sane_backup) + if (!sane_primary && sane_backup) { zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0); + } /* The backup is definitely the corrupted one */ - if (sane_primary && !sane_backup) + if (sane_primary && !sane_backup) { zone_element_was_modified_panic(zone, element, backup, - (likely_primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)), - zone->elem_size - sizeof(vm_offset_t)); + (likely_primary ^ (element_was_poisoned ? zp_poisoned_cookie : zp_nopoison_cookie)), + zone->elem_size - sizeof(vm_offset_t)); + } /* * Not sure which is the corrupted one. @@ -1220,8 +1262,9 @@ backup_ptr_mismatch_panic(zone_t zone, * ( (sane address) ^ (valid cookie) ), so we'll guess that the * primary pointer has been overwritten with a sane but incorrect address. */ - if (sane_primary && sane_backup) + if (sane_primary && sane_backup) { zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0); + } /* Neither are sane, so just guess. */ zone_element_was_modified_panic(zone, element, primary, (likely_backup ^ zp_nopoison_cookie), 0); @@ -1233,8 +1276,8 @@ backup_ptr_mismatch_panic(zone_t zone, */ static inline void free_to_zone(zone_t zone, - vm_offset_t element, - boolean_t poison) + vm_offset_t element, + boolean_t poison) { vm_offset_t old_head; struct zone_page_metadata *page_meta; @@ -1246,17 +1289,20 @@ free_to_zone(zone_t zone, assert(PAGE_METADATA_GET_ZONE(page_meta) == zone); old_head = (vm_offset_t)page_metadata_get_freelist(page_meta); - if (__improbable(!is_sane_zone_element(zone, old_head))) + if (__improbable(!is_sane_zone_element(zone, old_head))) { panic("zfree: invalid head pointer %p for freelist of zone %s\n", - (void *) old_head, zone->zone_name); + (void *) old_head, zone->zone_name); + } - if (__improbable(!is_sane_zone_element(zone, element))) + if (__improbable(!is_sane_zone_element(zone, element))) { panic("zfree: freeing invalid pointer %p to zone %s\n", - (void *) element, zone->zone_name); + (void *) element, zone->zone_name); + } - if (__improbable(old_head == element)) + if (__improbable(old_head == element)) { panic("zfree: double free of %p to zone %s\n", - (void *) element, zone->zone_name); + (void *) element, zone->zone_name); + } /* * Always write a redundant next pointer * So that it is more difficult to forge, xor it with a random cookie @@ -1266,9 +1312,9 @@ free_to_zone(zone_t zone, *backup = old_head ^ (poison ? zp_poisoned_cookie : zp_nopoison_cookie); - /* - * Insert this element at the head of the free list. We also xor the - * primary pointer with the zp_nopoison_cookie to make sure a free + /* + * Insert this element at the head of the free list. We also xor the + * primary pointer with the zp_nopoison_cookie to make sure a free * element does not provide the location of the next free element directly. */ *primary = old_head ^ zp_nopoison_cookie; @@ -1305,8 +1351,8 @@ free_to_zone(zone_t zone, */ static inline vm_offset_t try_alloc_from_zone(zone_t zone, - vm_tag_t tag __unused, - boolean_t* check_poison) + vm_tag_t tag __unused, + boolean_t* check_poison) { vm_offset_t element; struct zone_page_metadata *page_meta; @@ -1314,11 +1360,11 @@ try_alloc_from_zone(zone_t zone, *check_poison = FALSE; /* if zone is empty, bail */ - if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign)) + if (zone->allows_foreign && !queue_empty(&zone->pages.any_free_foreign)) { page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign); - else if (!queue_empty(&zone->pages.intermediate)) + } else if (!queue_empty(&zone->pages.intermediate)) { page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate); - else if (!queue_empty(&zone->pages.all_free)) { + } else if (!queue_empty(&zone->pages.all_free)) { page_meta = (struct zone_page_metadata *)queue_first(&zone->pages.all_free); assert(zone->count_all_free_pages >= page_meta->page_count); zone->count_all_free_pages -= page_meta->page_count; @@ -1326,20 +1372,22 @@ try_alloc_from_zone(zone_t zone, return 0; } /* Check if page_meta passes is_sane_zone_element */ - if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta))) + if (__improbable(!is_sane_zone_page_metadata(zone, (vm_offset_t)page_meta))) { panic("zalloc: invalid metadata structure %p for freelist of zone %s\n", - (void *) page_meta, zone->zone_name); + (void *) page_meta, zone->zone_name); + } assert(PAGE_METADATA_GET_ZONE(page_meta) == zone); element = (vm_offset_t)page_metadata_get_freelist(page_meta); - if (__improbable(!is_sane_zone_ptr(zone, element, zone->elem_size))) + if (__improbable(!is_sane_zone_ptr(zone, element, zone->elem_size))) { panic("zfree: invalid head pointer %p for freelist of zone %s\n", - (void *) element, zone->zone_name); + (void *) element, zone->zone_name); + } vm_offset_t *primary = (vm_offset_t *) element; vm_offset_t *backup = get_backup_ptr(zone->elem_size, primary); - /* + /* * Since the primary next pointer is xor'ed with zp_nopoison_cookie * for obfuscation, retrieve the original value back */ @@ -1351,16 +1399,17 @@ try_alloc_from_zone(zone_t zone, * backup_ptr_mismatch_panic will determine what next_element * should have been, and print it appropriately */ - if (__improbable(!is_sane_zone_element(zone, next_element))) + if (__improbable(!is_sane_zone_element(zone, next_element))) { backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup); + } /* Check the backup pointer for the regular cookie */ if (__improbable(next_element != (next_element_backup ^ zp_nopoison_cookie))) { - /* Check for the poisoned cookie instead */ - if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie))) + if (__improbable(next_element != (next_element_backup ^ zp_poisoned_cookie))) { /* Neither cookie is valid, corruption has occurred */ backup_ptr_mismatch_panic(zone, element, next_element_primary, next_element_backup); + } /* * Element was marked as poisoned, so check its integrity before using it. @@ -1369,15 +1418,17 @@ try_alloc_from_zone(zone_t zone, } /* Make sure the page_meta is at the correct offset from the start of page */ - if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element, FALSE))) + if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)element, FALSE))) { panic("zalloc: Incorrect metadata %p found in zone %s page queue. Expected metadata: %p\n", - page_meta, zone->zone_name, get_zone_page_metadata((struct zone_free_element *)element, FALSE)); + page_meta, zone->zone_name, get_zone_page_metadata((struct zone_free_element *)element, FALSE)); + } /* Make sure next_element belongs to the same page as page_meta */ if (next_element) { - if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element, FALSE))) + if (__improbable(page_meta != get_zone_page_metadata((struct zone_free_element *)next_element, FALSE))) { panic("zalloc: next element pointer %p for element %p points to invalid element for zone %s\n", - (void *)next_element, (void *)element, zone->zone_name); + (void *)next_element, (void *)element, zone->zone_name); + } } /* Remove this element from the free list */ @@ -1400,10 +1451,10 @@ try_alloc_from_zone(zone_t zone, zone->sum_count++; #if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) { + if (__improbable(zone->tags)) { // set the tag with b0 clear so the block remains inuse ZTAG(zone, element)[0] = (tag << 1); - } + } #endif /* VM_MAX_TAG_ZONES */ @@ -1421,26 +1472,26 @@ try_alloc_from_zone(zone_t zone, /* * Zone info options */ -#define ZINFO_SLOTS MAX_ZONES /* for now */ +#define ZINFO_SLOTS MAX_ZONES /* for now */ -zone_t zone_find_largest(void); +zone_t zone_find_largest(void); -/* - * Async allocation of zones - * This mechanism allows for bootstrapping an empty zone which is setup with +/* + * Async allocation of zones + * This mechanism allows for bootstrapping an empty zone which is setup with * non-blocking flags. The first call to zalloc_noblock() will kick off a thread_call - * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free. + * to zalloc_async. We perform a zalloc() (which may block) and then an immediate free. * This will prime the zone for the next use. * * Currently the thread_callout function (zalloc_async) will loop through all zones - * looking for any zone with async_pending set and do the work for it. - * + * looking for any zone with async_pending set and do the work for it. + * * NOTE: If the calling thread for zalloc_noblock is lower priority than thread_call, - * then zalloc_noblock to an empty zone may succeed. + * then zalloc_noblock to an empty zone may succeed. */ -void zalloc_async( - thread_call_param_t p0, - thread_call_param_t p1); +void zalloc_async( + thread_call_param_t p0, + thread_call_param_t p1); static thread_call_data_t call_async_alloc; @@ -1450,26 +1501,18 @@ static thread_call_data_t call_async_alloc; #define ZONE_ELEMENT_ALIGNMENT 32 #define zone_wakeup(zone) thread_wakeup((event_t)(zone)) -#define zone_sleep(zone) \ +#define zone_sleep(zone) \ (void) lck_mtx_sleep(&(zone)->lock, LCK_SLEEP_SPIN_ALWAYS, (event_t)(zone), THREAD_UNINT); -/* - * The zone_locks_grp allows for collecting lock statistics. - * All locks are associated to this group in zinit. - * Look at tools/lockstat for debugging lock contention. - */ - -lck_grp_t zone_locks_grp; -lck_grp_attr_t zone_locks_grp_attr; -#define lock_zone_init(zone) \ -MACRO_BEGIN \ - lck_attr_setdefault(&(zone)->lock_attr); \ - lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \ - &zone_locks_grp, &(zone)->lock_attr); \ +#define lock_zone_init(zone) \ +MACRO_BEGIN \ + lck_attr_setdefault(&(zone)->lock_attr); \ + lck_mtx_init_ext(&(zone)->lock, &(zone)->lock_ext, \ + &zone_locks_grp, &(zone)->lock_attr); \ MACRO_END -#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock) +#define lock_try_zone(zone) lck_mtx_try_lock_spin(&zone->lock) /* * Exclude more than one concurrent garbage collection @@ -1487,8 +1530,8 @@ boolean_t panic_include_zprint = FALSE; mach_memory_info_t *panic_kext_memory_info = NULL; vm_size_t panic_kext_memory_size = 0; -#define ZALLOC_DEBUG_ZONEGC 0x00000001 -#define ZALLOC_DEBUG_ZCRAM 0x00000002 +#define ZALLOC_DEBUG_ZONEGC 0x00000001 +#define ZALLOC_DEBUG_ZCRAM 0x00000002 uint32_t zalloc_debug = 0; /* @@ -1500,7 +1543,7 @@ uint32_t zalloc_debug = 0; * off by default. * * Enable the logging via the boot-args. Add the parameter "zlog=" to boot-args where - * is the name of the zone you wish to log. + * is the name of the zone you wish to log. * * This code only tracks one zone, so you need to identify which one is leaking first. * Generally, you'll know you have a leak when you get a "zalloc retry failed 3" panic from the zone @@ -1522,14 +1565,14 @@ uint32_t zalloc_debug = 0; */ static boolean_t log_records_init = FALSE; -static int log_records; /* size of the log, expressed in number of records */ +static int log_records; /* size of the log, expressed in number of records */ -#define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */ +#define MAX_NUM_ZONES_ALLOWED_LOGGING 10 /* Maximum 10 zones can be logged at once */ static int max_num_zones_to_log = MAX_NUM_ZONES_ALLOWED_LOGGING; static int num_zones_logged = 0; -static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */ +static char zone_name_to_log[MAX_ZONE_NAME] = ""; /* the zone name we're logging, if any */ /* Log allocations and frees to help debug a zone element corruption */ boolean_t corruption_debug_flag = DEBUG; /* enabled by "-zc" boot-arg */ @@ -1541,17 +1584,17 @@ boolean_t leak_scan_debug_flag = FALSE; /* enabled by "-zl" boot-ar /* - * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to + * The number of records in the log is configurable via the zrecs parameter in boot-args. Set this to * the number of records you want in the log. For example, "zrecs=10" sets it to 10 records. Since this * is the number of stacks suspected of leaking, we don't need many records. */ -#if defined(__LP64__) -#define ZRECORDS_MAX 2560 /* Max records allowed in the log */ +#if defined(__LP64__) +#define ZRECORDS_MAX 2560 /* Max records allowed in the log */ #else -#define ZRECORDS_MAX 1536 /* Max records allowed in the log */ +#define ZRECORDS_MAX 1536 /* Max records allowed in the log */ #endif -#define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */ +#define ZRECORDS_DEFAULT 1024 /* default records in log if zrecs is not specificed in boot-args */ /* * Each record in the log contains a pointer to the zone element it refers to, @@ -1581,22 +1624,23 @@ track_this_zone(const char *zonename, const char *logname) */ for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) { - /* * If the current characters don't match, check for a space in * in the zone name and a corresponding period in the log name. * If that's not there, then the strings don't match. */ - if (*zc != *lc && !(*zc == ' ' && *lc == '.')) + if (*zc != *lc && !(*zc == ' ' && *lc == '.')) { break; + } /* * The strings are equal so far. If we're at the end, then it's a match. */ - if (*zc == '\0') + if (*zc == '\0') { return TRUE; + } } return FALSE; @@ -1608,7 +1652,7 @@ track_this_zone(const char *zonename, const char *logname) * the buffer for the records has been allocated. */ -#define DO_LOGGING(z) (z->zone_logging == TRUE && z->zlog_btlog) +#define DO_LOGGING(z) (z->zone_logging == TRUE && z->zlog_btlog) extern boolean_t kmem_alloc_ready; @@ -1616,13 +1660,13 @@ extern boolean_t kmem_alloc_ready; #pragma mark - #pragma mark Zone Leak Detection -/* +/* * The zone leak detector, abbreviated 'zleak', keeps track of a subset of the currently outstanding * allocations made by the zone allocator. Every zleak_sample_factor allocations in each zone, we capture a - * backtrace. Every free, we examine the table and determine if the allocation was being tracked, + * backtrace. Every free, we examine the table and determine if the allocation was being tracked, * and stop tracking it if it was being tracked. * - * We track the allocations in the zallocations hash table, which stores the address that was returned from + * We track the allocations in the zallocations hash table, which stores the address that was returned from * the zone allocator. Each stored entry in the zallocations table points to an entry in the ztraces table, which * stores the backtrace associated with that allocation. This provides uniquing for the relatively large * backtraces - we don't store them more than once. @@ -1630,20 +1674,20 @@ extern boolean_t kmem_alloc_ready; * Data collection begins when the zone map is 50% full, and only occurs for zones that are taking up * a large amount of virtual space. */ -#define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */ -#define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */ -#define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */ -#define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */ -uint32_t zleak_state = 0; /* State of collection, as above */ +#define ZLEAK_STATE_ENABLED 0x01 /* Zone leak monitoring should be turned on if zone_map fills up. */ +#define ZLEAK_STATE_ACTIVE 0x02 /* We are actively collecting traces. */ +#define ZLEAK_STATE_ACTIVATING 0x04 /* Some thread is doing setup; others should move along. */ +#define ZLEAK_STATE_FAILED 0x08 /* Attempt to allocate tables failed. We will not try again. */ +uint32_t zleak_state = 0; /* State of collection, as above */ -boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */ -vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */ -vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */ -unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */ +boolean_t panic_include_ztrace = FALSE; /* Enable zleak logging on panic */ +vm_size_t zleak_global_tracking_threshold; /* Size of zone map at which to start collecting data */ +vm_size_t zleak_per_zone_tracking_threshold; /* Size a zone will have before we will collect data on it */ +unsigned int zleak_sample_factor = 1000; /* Allocations per sample attempt */ /* * Counters for allocation statistics. - */ + */ /* Times two active records want to occupy the same spot */ unsigned int z_alloc_collisions = 0; @@ -1654,11 +1698,11 @@ unsigned int z_alloc_overwrites = 0; unsigned int z_trace_overwrites = 0; /* Times a new alloc or trace is put into the hash table */ -unsigned int z_alloc_recorded = 0; -unsigned int z_trace_recorded = 0; +unsigned int z_alloc_recorded = 0; +unsigned int z_trace_recorded = 0; /* Times zleak_log returned false due to not being able to acquire the lock */ -unsigned int z_total_conflicts = 0; +unsigned int z_total_conflicts = 0; #pragma mark struct zallocation @@ -1667,11 +1711,11 @@ unsigned int z_total_conflicts = 0; * An allocation bucket is in use if its element is not NULL */ struct zallocation { - uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */ - vm_size_t za_size; /* how much memory did this allocation take up? */ - uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */ + uintptr_t za_element; /* the element that was zalloc'ed or zfree'ed, NULL if bucket unused */ + vm_size_t za_size; /* how much memory did this allocation take up? */ + uint32_t za_trace_index; /* index into ztraces for backtrace associated with allocation */ /* TODO: #if this out */ - uint32_t za_hit_count; /* for determining effectiveness of hash function */ + uint32_t za_hit_count; /* for determining effectiveness of hash function */ }; /* Size must be a power of two for the zhash to be able to just mask off bits instead of mod */ @@ -1681,29 +1725,29 @@ uint32_t zleak_trace_buckets = CONFIG_ZLEAK_TRACE_MAP_NUM; vm_size_t zleak_max_zonemap_size; /* Hashmaps of allocations and their corresponding traces */ -static struct zallocation* zallocations; -static struct ztrace* ztraces; +static struct zallocation* zallocations; +static struct ztrace* ztraces; /* not static so that panic can see this, see kern/debug.c */ -struct ztrace* top_ztrace; +struct ztrace* top_ztrace; /* Lock to protect zallocations, ztraces, and top_ztrace from concurrent modification. */ -static lck_spin_t zleak_lock; -static lck_attr_t zleak_lock_attr; -static lck_grp_t zleak_lock_grp; -static lck_grp_attr_t zleak_lock_grp_attr; +static lck_spin_t zleak_lock; +static lck_attr_t zleak_lock_attr; +static lck_grp_t zleak_lock_grp; +static lck_grp_attr_t zleak_lock_grp_attr; /* * Initializes the zone leak monitor. Called from zone_init() */ -static void -zleak_init(vm_size_t max_zonemap_size) +static void +zleak_init(vm_size_t max_zonemap_size) { - char scratch_buf[16]; - boolean_t zleak_enable_flag = FALSE; + char scratch_buf[16]; + boolean_t zleak_enable_flag = FALSE; zleak_max_zonemap_size = max_zonemap_size; - zleak_global_tracking_threshold = max_zonemap_size / 2; + zleak_global_tracking_threshold = max_zonemap_size / 2; zleak_per_zone_tracking_threshold = zleak_global_tracking_threshold / 8; #if CONFIG_EMBEDDED @@ -1724,7 +1768,7 @@ zleak_init(vm_size_t max_zonemap_size) printf("zone leak detection enabled\n"); } #endif /* CONFIG_EMBEDDED */ - + /* zfactor=XXXX (override how often to sample the zone allocator) */ if (PE_parse_boot_argn("zfactor", &zleak_sample_factor, sizeof(zleak_sample_factor))) { printf("Zone leak factor override: %u\n", zleak_sample_factor); @@ -1734,26 +1778,26 @@ zleak_init(vm_size_t max_zonemap_size) if (PE_parse_boot_argn("zleak-allocs", &zleak_alloc_buckets, sizeof(zleak_alloc_buckets))) { printf("Zone leak alloc buckets override: %u\n", zleak_alloc_buckets); /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */ - if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets-1))) { + if (zleak_alloc_buckets == 0 || (zleak_alloc_buckets & (zleak_alloc_buckets - 1))) { printf("Override isn't a power of two, bad things might happen!\n"); } } - + /* zleak-traces=XXXX (override number of buckets in ztraces) */ if (PE_parse_boot_argn("zleak-traces", &zleak_trace_buckets, sizeof(zleak_trace_buckets))) { printf("Zone leak trace buckets override: %u\n", zleak_trace_buckets); /* uses 'is power of 2' trick: (0x01000 & 0x00FFF == 0) */ - if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets-1))) { + if (zleak_trace_buckets == 0 || (zleak_trace_buckets & (zleak_trace_buckets - 1))) { printf("Override isn't a power of two, bad things might happen!\n"); } } - + /* allocate the zleak_lock */ lck_grp_attr_setdefault(&zleak_lock_grp_attr); lck_grp_init(&zleak_lock_grp, "zleak_lock", &zleak_lock_grp_attr); lck_attr_setdefault(&zleak_lock_attr); lck_spin_init(&zleak_lock, &zleak_lock_grp, &zleak_lock_attr); - + if (zleak_enable_flag) { zleak_state = ZLEAK_STATE_ENABLED; } @@ -1768,11 +1812,13 @@ zleak_init(vm_size_t max_zonemap_size) int get_zleak_state(void) { - if (zleak_state & ZLEAK_STATE_FAILED) - return (-1); - if (zleak_state & ZLEAK_STATE_ACTIVE) - return (1); - return (0); + if (zleak_state & ZLEAK_STATE_FAILED) { + return -1; + } + if (zleak_state & ZLEAK_STATE_ACTIVE) { + return 1; + } + return 0; } #endif @@ -1821,7 +1867,7 @@ zleak_activate(void) ztraces = traces_ptr; /* - * Initialize the top_ztrace to the first entry in ztraces, + * Initialize the top_ztrace to the first entry in ztraces, * so we don't have to check for null in zleak_log */ top_ztrace = &ztraces[0]; @@ -1835,10 +1881,10 @@ zleak_activate(void) zleak_state |= ZLEAK_STATE_ACTIVE; zleak_state &= ~ZLEAK_STATE_ACTIVATING; lck_spin_unlock(&zleak_lock); - + return 0; -fail: +fail: /* * If we fail to allocate memory, don't further tax * the system by trying again. @@ -1860,15 +1906,15 @@ fail: } /* - * TODO: What about allocations that never get deallocated, + * TODO: What about allocations that never get deallocated, * especially ones with unique backtraces? Should we wait to record - * until after boot has completed? + * until after boot has completed? * (How many persistent zallocs are there?) */ /* - * This function records the allocation in the allocations table, - * and stores the associated backtrace in the traces table + * This function records the allocation in the allocations table, + * and stores the associated backtrace in the traces table * (or just increments the refcount if the trace is already recorded) * If the allocation slot is in use, the old allocation is replaced with the new allocation, and * the associated trace's refcount is decremented. @@ -1878,47 +1924,47 @@ fail: */ static boolean_t zleak_log(uintptr_t* bt, - uintptr_t addr, - uint32_t depth, - vm_size_t allocation_size) + uintptr_t addr, + uint32_t depth, + vm_size_t allocation_size) { /* Quit if there's someone else modifying the hash tables */ if (!lck_spin_try_lock(&zleak_lock)) { z_total_conflicts++; return FALSE; } - - struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)]; - + + struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)]; + uint32_t trace_index = hashbacktrace(bt, depth, zleak_trace_buckets); struct ztrace* trace = &ztraces[trace_index]; - + allocation->za_hit_count++; trace->zt_hit_count++; - - /* + + /* * If the allocation bucket we want to be in is occupied, and if the occupier - * has the same trace as us, just bail. + * has the same trace as us, just bail. */ if (allocation->za_element != (uintptr_t) 0 && trace_index == allocation->za_trace_index) { z_alloc_collisions++; - + lck_spin_unlock(&zleak_lock); return TRUE; } - + /* STEP 1: Store the backtrace in the traces array. */ /* A size of zero indicates that the trace bucket is free. */ - - if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0 ) { - /* + + if (trace->zt_size > 0 && bcmp(trace->zt_stack, bt, (depth * sizeof(uintptr_t))) != 0) { + /* * Different unique trace with same hash! * Just bail - if we're trying to record the leaker, hopefully the other trace will be deallocated * and get out of the way for later chances */ trace->zt_collisions++; z_trace_collisions++; - + lck_spin_unlock(&zleak_lock); return TRUE; } else if (trace->zt_size > 0) { @@ -1926,28 +1972,29 @@ zleak_log(uintptr_t* bt, trace->zt_size += allocation_size; } else { /* Found an unused trace bucket, record the trace here! */ - if (trace->zt_depth != 0) /* if this slot was previously used but not currently in use */ + if (trace->zt_depth != 0) { /* if this slot was previously used but not currently in use */ z_trace_overwrites++; - + } + z_trace_recorded++; - trace->zt_size = allocation_size; - memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t)) ); - - trace->zt_depth = depth; - trace->zt_collisions = 0; + trace->zt_size = allocation_size; + memcpy(trace->zt_stack, bt, (depth * sizeof(uintptr_t))); + + trace->zt_depth = depth; + trace->zt_collisions = 0; } - + /* STEP 2: Store the allocation record in the allocations array. */ - + if (allocation->za_element != (uintptr_t) 0) { - /* + /* * Straight up replace any allocation record that was there. We don't want to do the work - * to preserve the allocation entries that were there, because we only record a subset of the + * to preserve the allocation entries that were there, because we only record a subset of the * allocations anyways. */ - + z_alloc_collisions++; - + struct ztrace* associated_trace = &ztraces[allocation->za_trace_index]; /* Knock off old allocation's size, not the new allocation */ associated_trace->zt_size -= allocation->za_size; @@ -1956,15 +2003,16 @@ zleak_log(uintptr_t* bt, z_alloc_overwrites++; } - allocation->za_element = addr; - allocation->za_trace_index = trace_index; - allocation->za_size = allocation_size; - + allocation->za_element = addr; + allocation->za_trace_index = trace_index; + allocation->za_size = allocation_size; + z_alloc_recorded++; - - if (top_ztrace->zt_size < trace->zt_size) + + if (top_ztrace->zt_size < trace->zt_size) { top_ztrace = trace; - + } + lck_spin_unlock(&zleak_lock); return TRUE; } @@ -1975,37 +2023,38 @@ zleak_log(uintptr_t* bt, */ static void zleak_free(uintptr_t addr, - vm_size_t allocation_size) + vm_size_t allocation_size) { - if (addr == (uintptr_t) 0) + if (addr == (uintptr_t) 0) { return; - + } + struct zallocation* allocation = &zallocations[hashaddr(addr, zleak_alloc_buckets)]; - + /* Double-checked locking: check to find out if we're interested, lock, check to make * sure it hasn't changed, then modify it, and release the lock. */ - + if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) { /* if the allocation was the one, grab the lock, check again, then delete it */ lck_spin_lock(&zleak_lock); - + if (allocation->za_element == addr && allocation->za_trace_index < zleak_trace_buckets) { struct ztrace *trace; /* allocation_size had better match what was passed into zleak_log - otherwise someone is freeing into the wrong zone! */ if (allocation->za_size != allocation_size) { - panic("Freeing as size %lu memory that was allocated with size %lu\n", - (uintptr_t)allocation_size, (uintptr_t)allocation->za_size); + panic("Freeing as size %lu memory that was allocated with size %lu\n", + (uintptr_t)allocation_size, (uintptr_t)allocation->za_size); } - + trace = &ztraces[allocation->za_trace_index]; - + /* size of 0 indicates trace bucket is unused */ if (trace->zt_size > 0) { trace->zt_size -= allocation_size; } - + /* A NULL element means the allocation bucket is unused */ allocation->za_element = 0; } @@ -2026,16 +2075,16 @@ hash_mix(uintptr_t x) #ifndef __LP64__ x += ~(x << 15); x ^= (x >> 10); - x += (x << 3 ); - x ^= (x >> 6 ); + x += (x << 3); + x ^= (x >> 6); x += ~(x << 11); x ^= (x >> 16); #else x += ~(x << 32); x ^= (x >> 22); x += ~(x << 13); - x ^= (x >> 8 ); - x += (x << 3 ); + x ^= (x >> 8); + x += (x << 3); x ^= (x >> 15); x += ~(x << 27); x ^= (x >> 31); @@ -2046,7 +2095,6 @@ hash_mix(uintptr_t x) uint32_t hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size) { - uintptr_t hash = 0; uintptr_t mask = max_size - 1; @@ -2081,7 +2129,7 @@ hashaddr(uintptr_t pt, uint32_t max_size) /* End of all leak-detection code */ #pragma mark - -#define ZONE_MAX_ALLOC_SIZE (32 * 1024) +#define ZONE_MAX_ALLOC_SIZE (32 * 1024) #define ZONE_ALLOC_FRAG_PERCENT(alloc_size, ele_size) (((alloc_size % ele_size) * 100) / alloc_size) /* Used to manage copying in of new zone names */ @@ -2095,15 +2143,16 @@ compute_element_size(vm_size_t requested_size) /* Zone elements must fit both a next pointer and a backup pointer */ vm_size_t minimum_element_size = sizeof(vm_offset_t) * 2; - if (element_size < minimum_element_size) + if (element_size < minimum_element_size) { element_size = minimum_element_size; + } /* * Round element size to a multiple of sizeof(pointer) * This also enforces that allocations will be aligned on pointer boundaries */ - element_size = ((element_size-1) + sizeof(vm_offset_t)) - - ((element_size-1) % sizeof(vm_offset_t)); + element_size = ((element_size - 1) + sizeof(vm_offset_t)) - + ((element_size - 1) % sizeof(vm_offset_t)); return element_size; } @@ -2117,10 +2166,10 @@ compute_element_size(vm_size_t requested_size) */ static void kasan_update_element_size_for_redzone( - zone_t zone, /* the zone that needs to be updated */ - vm_size_t *size, /* requested zone element size */ - vm_size_t *max, /* maximum memory to use */ - const char *name) /* zone name */ + zone_t zone, /* the zone that needs to be updated */ + vm_size_t *size, /* requested zone element size */ + vm_size_t *max, /* maximum memory to use */ + const char *name) /* zone name */ { /* Expand the zone allocation size to include the redzones. For page-multiple * zones add a full guard page because they likely require alignment. kalloc @@ -2151,13 +2200,13 @@ kasan_update_element_size_for_redzone( */ static vm_offset_t kasan_fixup_allocated_element_address( - zone_t zone, /* the zone the element belongs to */ - vm_offset_t addr) /* address of the element, including the redzone */ + zone_t zone, /* the zone the element belongs to */ + vm_offset_t addr) /* address of the element, including the redzone */ { /* Fixup the return address to skip the redzone */ if (zone->kasan_redzone) { addr = kasan_alloc(addr, zone->elem_size, - zone->elem_size - 2 * zone->kasan_redzone, zone->kasan_redzone); + zone->elem_size - 2 * zone->kasan_redzone, zone->kasan_redzone); } return addr; } @@ -2171,8 +2220,8 @@ kasan_fixup_allocated_element_address( */ static bool kasan_quarantine_freed_element( - zone_t *zonep, /* the zone the element is being freed to */ - void **addrp) /* address of the element being freed */ + zone_t *zonep, /* the zone the element is being freed to */ + void **addrp) /* address of the element being freed */ { zone_t zone = *zonep; void *addr = *addrp; @@ -2211,24 +2260,24 @@ kasan_quarantine_freed_element( zone_t zinit( - vm_size_t size, /* the size of an element */ - vm_size_t max, /* maximum memory to use */ - vm_size_t alloc, /* allocation size */ - const char *name) /* a name for the zone */ + vm_size_t size, /* the size of an element */ + vm_size_t max, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + const char *name) /* a name for the zone */ { - zone_t z; + zone_t z; size = compute_element_size(size); - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); assert(num_zones < MAX_ZONES); assert(num_zones_in_use <= num_zones); /* If possible, find a previously zdestroy'ed zone in the zone_array that we can reuse instead of initializing a new zone. */ for (int index = bitmap_first(zone_empty_bitmap, MAX_ZONES); - index >= 0 && index < (int)num_zones; - index = bitmap_next(zone_empty_bitmap, index)) { + index >= 0 && index < (int)num_zones; + index = bitmap_next(zone_empty_bitmap, index)) { z = &(zone_array[index]); /* @@ -2248,7 +2297,7 @@ zinit( /* All other state is already set up since the zone was previously in use. Return early. */ simple_unlock(&all_zones_lock); - return (z); + return z; } } } @@ -2293,8 +2342,9 @@ zinit( } alloc = best_alloc; - if (max && (max < alloc)) + if (max && (max < alloc)) { max = alloc; + } z->free_elements = NULL; queue_init(&z->pages.any_free_foreign); @@ -2345,12 +2395,12 @@ zinit( * kexts can be loaded (unloaded). So we should be fine with just a pointer in this case. */ if (kmem_alloc_ready) { - size_t len = MIN(strlen(name)+1, MACH_ZONE_NAME_MAX_LEN); + size_t len = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN); if (zone_names_start == 0 || ((zone_names_next - zone_names_start) + len) > PAGE_SIZE) { printf("zalloc: allocating memory for zone names buffer\n"); kern_return_t retval = kmem_alloc_kobject(kernel_map, &zone_names_start, - PAGE_SIZE, VM_KERN_MEMORY_OSFMK); + PAGE_SIZE, VM_KERN_MEMORY_OSFMK); if (retval != KERN_SUCCESS) { panic("zalloc: zone_names memory allocation failed"); } @@ -2377,13 +2427,11 @@ zinit( */ if (num_zones_logged < max_num_zones_to_log) { - - int i = 1; /* zlog0 isn't allowed. */ - boolean_t zone_logging_enabled = FALSE; - char zlog_name[MAX_ZONE_NAME] = ""; /* Temp. buffer to create the strings zlog1, zlog2 etc... */ + int i = 1; /* zlog0 isn't allowed. */ + boolean_t zone_logging_enabled = FALSE; + char zlog_name[MAX_ZONE_NAME] = ""; /* Temp. buffer to create the strings zlog1, zlog2 etc... */ while (i <= max_num_zones_to_log) { - snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i); if (PE_parse_boot_argn(zlog_name, zone_name_to_log, sizeof(zone_name_to_log)) == TRUE) { @@ -2400,7 +2448,7 @@ zinit( } if (zone_logging_enabled == FALSE) { - /* + /* * Backwards compat. with the old boot-arg used to specify single zone logging i.e. zlog * Needs to happen after the newer zlogn checks because the prefix will match all the zlogn * boot-args. @@ -2417,7 +2465,7 @@ zinit( } if (log_records_init == FALSE && zone_logging_enabled == TRUE) { - if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) { + if (PE_parse_boot_argn("zrecs", &log_records, sizeof(log_records)) == TRUE) { /* * Don't allow more than ZRECORDS_MAX records even if the user asked for more. * This prevents accidentally hogging too much kernel memory and making the system @@ -2441,16 +2489,14 @@ zinit( * right now. */ if (kmem_alloc_ready) { - zone_t curr_zone = NULL; unsigned int max_zones = 0, zone_idx = 0; - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = num_zones; simple_unlock(&all_zones_lock); for (zone_idx = 0; zone_idx < max_zones; zone_idx++) { - curr_zone = &(zone_array[zone_idx]); if (!curr_zone->zone_valid) { @@ -2465,36 +2511,33 @@ zinit( * We don't expect these zones to be needed at this early a time in boot and so take this chance. */ if (curr_zone->zone_logging && curr_zone->zlog_btlog == NULL) { - curr_zone->zlog_btlog = btlog_create(log_records, MAX_ZTRACE_DEPTH, (corruption_debug_flag == FALSE) /* caller_will_remove_entries_for_element? */); if (curr_zone->zlog_btlog) { - printf("zone: logging started for zone %s\n", curr_zone->zone_name); } else { printf("zone: couldn't allocate memory for zrecords, turning off zleak logging\n"); curr_zone->zone_logging = FALSE; } } - } } } -#if CONFIG_GZALLOC +#if CONFIG_GZALLOC gzalloc_zone_init(z); #endif -#if CONFIG_ZCACHE +#if CONFIG_ZCACHE /* Check if boot-arg specified it should have a cache */ if (cache_all_zones || track_this_zone(name, cache_zone_name)) { zone_change(z, Z_CACHING_ENABLED, TRUE); } #endif - return(z); + return z; } -unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated, zone_replenish_throttle_count; +unsigned zone_replenish_loops, zone_replenish_wakeups, zone_replenish_wakeups_initiated, zone_replenish_throttle_count; static void zone_replenish_thread(zone_t); @@ -2519,18 +2562,20 @@ zone_replenish_thread(zone_t z) assert(z->async_prio_refill == TRUE); unlock_zone(z); - int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT; + int zflags = KMA_KOBJECT | KMA_NOPAGEWAIT; vm_offset_t space, alloc_size; kern_return_t kr; - - if (vm_pool_low()) + + if (vm_pool_low()) { alloc_size = round_page(z->elem_size); - else + } else { alloc_size = z->alloc_size; - - if (z->noencrypt) + } + + if (z->noencrypt) { zflags |= KMA_NOENCRYPT; - + } + /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ if (is_zone_map_nearing_exhaustion()) { thread_wakeup((event_t) &vm_pageout_garbage_collect); @@ -2571,7 +2616,8 @@ zone_replenish_thread(zone_t z) } void -zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) { +zone_prio_refill_configure(zone_t z, vm_size_t low_water_mark) +{ z->prio_refill_watermark = low_water_mark; z->async_prio_refill = TRUE; @@ -2621,7 +2667,7 @@ zdestroy(zone_t z) /* Dump all the free elements */ drop_free_elements(z); -#if CONFIG_GZALLOC +#if CONFIG_GZALLOC /* If the zone is gzalloc managed dump all the elements in the free cache */ gzalloc_empty_free_cache(z); #endif @@ -2646,7 +2692,7 @@ zdestroy(zone_t z) unlock_zone(z); - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); assert(!bitmap_test(zone_empty_bitmap, zindex)); /* Mark the zone as empty in the bitmap */ @@ -2680,16 +2726,16 @@ zcram_metadata_init(vm_offset_t newmem, vm_size_t size, struct zone_page_metadat static void random_free_to_zone( - zone_t zone, - vm_offset_t newmem, - vm_offset_t first_element_offset, - int element_count, - unsigned int *entropy_buffer) + zone_t zone, + vm_offset_t newmem, + vm_offset_t first_element_offset, + int element_count, + unsigned int *entropy_buffer) { - vm_offset_t last_element_offset; - vm_offset_t element_addr; + vm_offset_t last_element_offset; + vm_offset_t element_addr; vm_size_t elem_size; - int index; + int index; assert(element_count && element_count <= ZONE_CHUNK_MAXELEMENTS); elem_size = zone->elem_size; @@ -2698,9 +2744,9 @@ random_free_to_zone( assert(first_element_offset <= last_element_offset); if ( #if DEBUG || DEVELOPMENT - leak_scan_debug_flag || __improbable(zone->tags) || + leak_scan_debug_flag || __improbable(zone->tags) || #endif /* DEBUG || DEVELOPMENT */ - random_bool_gen_bits(&zone_bool_gen, entropy_buffer, MAX_ENTROPY_PER_ZCRAM, 1)) { + random_bool_gen_bits(&zone_bool_gen, entropy_buffer, MAX_ENTROPY_PER_ZCRAM, 1)) { element_addr = newmem + first_element_offset; first_element_offset += elem_size; } else { @@ -2720,11 +2766,11 @@ random_free_to_zone( */ void zcram( - zone_t zone, - vm_offset_t newmem, - vm_size_t size) + zone_t zone, + vm_offset_t newmem, + vm_size_t size) { - vm_size_t elem_size; + vm_size_t elem_size; boolean_t from_zm = FALSE; int element_count; unsigned int entropy_buffer[MAX_ENTROPY_PER_ZCRAM] = { 0 }; @@ -2732,14 +2778,15 @@ zcram( /* Basic sanity checks */ assert(zone != ZONE_NULL && newmem != (vm_offset_t)0); assert(!zone->collectable || zone->allows_foreign - || (from_zone_map(newmem, size))); + || (from_zone_map(newmem, size))); elem_size = zone->elem_size; KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_START, zone->index, size); - if (from_zone_map(newmem, size)) + if (from_zone_map(newmem, size)) { from_zm = TRUE; + } if (!from_zm) { /* We cannot support elements larger than page size for foreign memory because we @@ -2749,9 +2796,10 @@ zcram( assert((zone->allows_foreign == TRUE) && (zone->elem_size <= (PAGE_SIZE - sizeof(struct zone_page_metadata)))); } - if (zalloc_debug & ZALLOC_DEBUG_ZCRAM) + if (zalloc_debug & ZALLOC_DEBUG_ZCRAM) { kprintf("zcram(%p[%s], 0x%lx%s, 0x%lx)\n", zone, zone->zone_name, - (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size); + (unsigned long)newmem, from_zm ? "" : "[F]", (unsigned long)size); + } ZONE_PAGE_COUNT_INCR(zone, (size / PAGE_SIZE)); @@ -2778,10 +2826,10 @@ zcram( zcram_metadata_init(newmem, size, chunk_metadata); #if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) { - assert(from_zm); - ztMemoryAdd(zone, newmem, size); - } + if (__improbable(zone->tags)) { + assert(from_zm); + ztMemoryAdd(zone, newmem, size); + } #endif /* VM_MAX_TAG_ZONES */ lock_zone(zone); @@ -2789,29 +2837,28 @@ zcram( enqueue_tail(&zone->pages.all_used, &(chunk_metadata->pages)); if (!from_zm) { - /* We cannot support elements larger than page size for foreign memory because we - * put metadata on the page itself for each page of foreign memory. We need to do - * this in order to be able to reach the metadata when any element is freed + /* We cannot support elements larger than page size for foreign memory because we + * put metadata on the page itself for each page of foreign memory. We need to do + * this in order to be able to reach the metadata when any element is freed */ for (; size > 0; newmem += PAGE_SIZE, size -= PAGE_SIZE) { vm_offset_t first_element_offset = 0; - if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0){ + if (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT == 0) { first_element_offset = zone_page_metadata_size; } else { first_element_offset = zone_page_metadata_size + (ZONE_ELEMENT_ALIGNMENT - (zone_page_metadata_size % ZONE_ELEMENT_ALIGNMENT)); } element_count = (unsigned int)((PAGE_SIZE - first_element_offset) / elem_size); - random_free_to_zone(zone, newmem, first_element_offset, element_count, entropy_buffer); + random_free_to_zone(zone, newmem, first_element_offset, element_count, entropy_buffer); } } else { element_count = (unsigned int)(size / elem_size); - random_free_to_zone(zone, newmem, 0, element_count, entropy_buffer); + random_free_to_zone(zone, newmem, 0, element_count, entropy_buffer); } unlock_zone(zone); - - KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, zone->index); + KDBG(MACHDBG_CODE(DBG_MACH_ZALLOC, ZALLOC_ZCRAM) | DBG_FUNC_END, zone->index); } /* @@ -2822,11 +2869,11 @@ zcram( */ int zfill( - zone_t zone, - int nelem) + zone_t zone, + int nelem) { kern_return_t kr; - vm_offset_t memory; + vm_offset_t memory; vm_size_t alloc_size = zone->alloc_size; vm_size_t elem_per_alloc = alloc_size / zone->elem_size; @@ -2843,7 +2890,7 @@ zfill( kr = kernel_memory_allocate(zone_map, &memory, nalloc * alloc_size, 0, KMA_KOBJECT, VM_KERN_MEMORY_ZONE); if (kr != KERN_SUCCESS) { printf("%s: kernel_memory_allocate() of %lu bytes failed\n", - __func__, (unsigned long)(nalloc * alloc_size)); + __func__, (unsigned long)(nalloc * alloc_size)); return 0; } @@ -2864,8 +2911,9 @@ zone_bootstrap(void) { char temp_buf[16]; - if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug))) + if (!PE_parse_boot_argn("zalloc_debug", &zalloc_debug, sizeof(zalloc_debug))) { zalloc_debug = 0; + } /* Set up zone element poisoning */ zp_init(); @@ -2912,20 +2960,20 @@ zone_bootstrap(void) lck_grp_attr_setdefault(&zone_locks_grp_attr); lck_grp_init(&zone_locks_grp, "zone_locks", &zone_locks_grp_attr); - lck_attr_setdefault(&zone_metadata_lock_attr); + lck_attr_setdefault(&zone_metadata_lock_attr); lck_mtx_init_ext(&zone_metadata_region_lck, &zone_metadata_region_lck_ext, &zone_locks_grp, &zone_metadata_lock_attr); -#if CONFIG_ZCACHE - /* zcc_enable_for_zone_name=: enable per-cpu zone caching for . */ +#if CONFIG_ZCACHE + /* zcc_enable_for_zone_name=: enable per-cpu zone caching for . */ if (PE_parse_boot_arg_str("zcc_enable_for_zone_name", cache_zone_name, sizeof(cache_zone_name))) { printf("zcache: caching enabled for zone %s\n", cache_zone_name); } - /* -zcache_all: enable per-cpu zone caching for all zones, overrides 'zcc_enable_for_zone_name'. */ - if (PE_parse_boot_argn("-zcache_all", temp_buf, sizeof(temp_buf))) { - cache_all_zones = TRUE; - printf("zcache: caching enabled for all zones\n"); - } + /* -zcache_all: enable per-cpu zone caching for all zones, overrides 'zcc_enable_for_zone_name'. */ + if (PE_parse_boot_argn("-zcache_all", temp_buf, sizeof(temp_buf))) { + cache_all_zones = TRUE; + printf("zcache: caching enabled for all zones\n"); + } #endif /* CONFIG_ZCACHE */ } @@ -2952,20 +3000,23 @@ extern pid_t find_largest_process_vm_map_entries(void); */ boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid); -void get_zone_map_size(uint64_t *current_size, uint64_t *capacity) +void +get_zone_map_size(uint64_t *current_size, uint64_t *capacity) { *current_size = zone_map->size; *capacity = vm_map_max(zone_map) - vm_map_min(zone_map); } -void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size) +void +get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size) { zone_t largest_zone = zone_find_largest(); strlcpy(zone_name, largest_zone->zone_name, zone_name_len); *zone_size = largest_zone->cur_size; } -boolean_t is_zone_map_nearing_exhaustion(void) +boolean_t +is_zone_map_nearing_exhaustion(void) { uint64_t size = zone_map->size; uint64_t capacity = vm_map_max(zone_map) - vm_map_min(zone_map); @@ -2984,13 +3035,14 @@ extern zone_t vm_object_zone; * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread * to walk through the jetsam priority bands and kill processes. */ -static void kill_process_in_largest_zone(void) +static void +kill_process_in_largest_zone(void) { pid_t pid = -1; zone_t largest_zone = zone_find_largest(); printf("zone_map_exhaustion: Zone map size %lld, capacity %lld [jetsam limit %d%%]\n", (uint64_t)zone_map->size, - (uint64_t)(vm_map_max(zone_map) - vm_map_min(zone_map)), zone_map_jetsam_limit); + (uint64_t)(vm_map_max(zone_map) - vm_map_min(zone_map)), zone_map_jetsam_limit); printf("zone_map_exhaustion: Largest zone %s, size %lu\n", largest_zone->zone_name, (uintptr_t)largest_zone->cur_size); /* @@ -3031,27 +3083,31 @@ void zone_init( vm_size_t max_zonemap_size) { - kern_return_t retval; - vm_offset_t zone_min; - vm_offset_t zone_max; - vm_offset_t zone_metadata_space; - unsigned int zone_pages; + kern_return_t retval; + vm_offset_t zone_min; + vm_offset_t zone_max; + vm_offset_t zone_metadata_space; + unsigned int zone_pages; vm_map_kernel_flags_t vmk_flags; #if VM_MAX_TAG_ZONES - if (zone_tagging_on) ztInit(max_zonemap_size, &zone_locks_grp); + if (zone_tagging_on) { + ztInit(max_zonemap_size, &zone_locks_grp); + } #endif vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_permanent = TRUE; retval = kmem_suballoc(kernel_map, &zone_min, max_zonemap_size, - FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE, - &zone_map); + FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE, + &zone_map); - if (retval != KERN_SUCCESS) + if (retval != KERN_SUCCESS) { panic("zone_init: kmem_suballoc failed"); + } zone_max = zone_min + round_page(max_zonemap_size); -#if CONFIG_GZALLOC + +#if CONFIG_GZALLOC gzalloc_init(max_zonemap_size); #endif @@ -3064,9 +3120,10 @@ zone_init( zone_pages = (unsigned int)atop_kernel(zone_max - zone_min); zone_metadata_space = round_page(zone_pages * sizeof(struct zone_page_metadata)); retval = kernel_memory_allocate(zone_map, &zone_metadata_region_min, zone_metadata_space, - 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_OSFMK); - if (retval != KERN_SUCCESS) + 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_OSFMK); + if (retval != KERN_SUCCESS) { panic("zone_init: zone_metadata_region initialization failed!"); + } zone_metadata_region_max = zone_metadata_region_min + zone_metadata_space; #if defined(__LP64__) @@ -3075,18 +3132,20 @@ zone_init( * the vm_page zone can be packed properly (see vm_page.h * for the packing requirements */ - if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_metadata_region_max))) != (vm_page_t)zone_metadata_region_max) + if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_metadata_region_max))) != (vm_page_t)zone_metadata_region_max) { panic("VM_PAGE_PACK_PTR failed on zone_metadata_region_max - %p", (void *)zone_metadata_region_max); + } - if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address))) != (vm_page_t)zone_map_max_address) + if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(zone_map_max_address))) != (vm_page_t)zone_map_max_address) { panic("VM_PAGE_PACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address); + } #endif lck_grp_attr_setdefault(&zone_gc_lck_grp_attr); lck_grp_init(&zone_gc_lck_grp, "zone_gc", &zone_gc_lck_grp_attr); lck_attr_setdefault(&zone_gc_lck_attr); lck_mtx_init_ext(&zone_gc_lock, &zone_gc_lck_ext, &zone_gc_lck_grp, &zone_gc_lck_attr); - + #if CONFIG_ZLEAKS /* * Initialize the zone leak monitor @@ -3095,13 +3154,16 @@ zone_init( #endif /* CONFIG_ZLEAKS */ #if VM_MAX_TAG_ZONES - if (zone_tagging_on) vm_allocation_zones_init(); + if (zone_tagging_on) { + vm_allocation_zones_init(); + } #endif int jetsam_limit_temp = 0; - if (PE_parse_boot_argn("zone_map_jetsam_limit", &jetsam_limit_temp, sizeof (jetsam_limit_temp)) && - jetsam_limit_temp > 0 && jetsam_limit_temp <= 100) + if (PE_parse_boot_argn("zone_map_jetsam_limit", &jetsam_limit_temp, sizeof(jetsam_limit_temp)) && + jetsam_limit_temp > 0 && jetsam_limit_temp <= 100) { zone_map_jetsam_limit = jetsam_limit_temp; + } } #pragma mark - @@ -3117,13 +3179,15 @@ zalloc_poison_element(boolean_t check_poison, zone_t zone, vm_offset_t addr) vm_offset_t *element_cursor = ((vm_offset_t *) addr) + 1; vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *) addr); - for ( ; element_cursor < backup ; element_cursor++) - if (__improbable(*element_cursor != ZP_POISON)) + for (; element_cursor < backup; element_cursor++) { + if (__improbable(*element_cursor != ZP_POISON)) { zone_element_was_modified_panic(zone, - addr, - *element_cursor, - ZP_POISON, - ((vm_offset_t)element_cursor) - addr); + addr, + *element_cursor, + ZP_POISON, + ((vm_offset_t)element_cursor) - addr); + } + } } if (addr) { @@ -3145,27 +3209,27 @@ zalloc_poison_element(boolean_t check_poison, zone_t zone, vm_offset_t addr) */ static void * zalloc_internal( - zone_t zone, + zone_t zone, boolean_t canblock, boolean_t nopagewait, vm_size_t #if !VM_MAX_TAG_ZONES - __unused + __unused #endif - reqsize, + reqsize, vm_tag_t tag) { - vm_offset_t addr = 0; - kern_return_t retval; - uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */ - unsigned int numsaved = 0; - boolean_t zone_replenish_wakeup = FALSE, zone_alloc_throttle = FALSE; + vm_offset_t addr = 0; + kern_return_t retval; + uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* used in zone leak logging and zone leak detection */ + unsigned int numsaved = 0; + boolean_t zone_replenish_wakeup = FALSE, zone_alloc_throttle = FALSE; thread_t thr = current_thread(); boolean_t check_poison = FALSE; boolean_t set_doing_alloc_with_vm_priv = FALSE; #if CONFIG_ZLEAKS - uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */ + uint32_t zleak_tracedepth = 0; /* log this allocation if nonzero */ #endif /* CONFIG_ZLEAKS */ #if KASAN @@ -3186,14 +3250,15 @@ zalloc_internal( assert(zone != ZONE_NULL); assert(irq_safe || ml_get_interrupts_enabled() || ml_is_quiescing() || debug_mode_active() || !early_boot_complete); -#if CONFIG_GZALLOC +#if CONFIG_GZALLOC addr = gzalloc_alloc(zone, canblock); #endif /* * If zone logging is turned on and this is the zone we're tracking, grab a backtrace. */ - if (__improbable(DO_LOGGING(zone))) - numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH); + if (__improbable(DO_LOGGING(zone))) { + numsaved = OSBacktrace((void*) zbt, MAX_ZTRACE_DEPTH); + } #if CONFIG_ZLEAKS /* @@ -3202,15 +3267,18 @@ zalloc_internal( */ if (__improbable(zone->zleak_on && sample_counter(&zone->zleak_capture, zleak_sample_factor) == TRUE)) { /* Avoid backtracing twice if zone logging is on */ - if (numsaved == 0) + if (numsaved == 0) { zleak_tracedepth = backtrace(zbt, MAX_ZTRACE_DEPTH); - else + } else { zleak_tracedepth = numsaved; + } } #endif /* CONFIG_ZLEAKS */ #if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) vm_tag_will_update_zone(tag, zone->tag_zone_index); + if (__improbable(zone->tags)) { + vm_tag_will_update_zone(tag, zone->tag_zone_index); + } #endif /* VM_MAX_TAG_ZONES */ #if CONFIG_ZCACHE @@ -3222,7 +3290,7 @@ zalloc_internal( addr = kasan_fixup_allocated_element_address(zone, addr); #endif DTRACE_VM2(zalloc, zone_t, zone, void*, addr); - return((void *)addr); + return (void *)addr; } } } @@ -3232,64 +3300,66 @@ zalloc_internal( assert(zone->zone_valid); if (zone->async_prio_refill && zone->zone_replenish_thread) { - vm_size_t zfreec = (zone->cur_size - (zone->count * zone->elem_size)); - vm_size_t zrefillwm = zone->prio_refill_watermark * zone->elem_size; - zone_replenish_wakeup = (zfreec < zrefillwm); - zone_alloc_throttle = (((zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0)) || (zfreec == 0)); + vm_size_t zfreec = (zone->cur_size - (zone->count * zone->elem_size)); + vm_size_t zrefillwm = zone->prio_refill_watermark * zone->elem_size; + zone_replenish_wakeup = (zfreec < zrefillwm); + zone_alloc_throttle = (((zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0)) || (zfreec == 0)); + + do { + if (zone_replenish_wakeup) { + zone_replenish_wakeups_initiated++; + /* Signal the potentially waiting + * refill thread. + */ + thread_wakeup(&zone->zone_replenish_thread); - do { - if (zone_replenish_wakeup) { - zone_replenish_wakeups_initiated++; - /* Signal the potentially waiting - * refill thread. - */ - thread_wakeup(&zone->zone_replenish_thread); + /* We don't want to wait around for zone_replenish_thread to bump up the free count + * if we're in zone_gc(). This keeps us from deadlocking with zone_replenish_thread. + */ + if (thr->options & TH_OPT_ZONE_GC) { + break; + } - /* We don't want to wait around for zone_replenish_thread to bump up the free count - * if we're in zone_gc(). This keeps us from deadlocking with zone_replenish_thread. - */ - if (thr->options & TH_OPT_ZONE_GC) - break; + unlock_zone(zone); + /* Scheduling latencies etc. may prevent + * the refill thread from keeping up + * with demand. Throttle consumers + * when we fall below half the + * watermark, unless VM privileged + */ + if (zone_alloc_throttle) { + zone_replenish_throttle_count++; + assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC); + thread_block(THREAD_CONTINUE_NULL); + } + lock_zone(zone); + assert(zone->zone_valid); + } + + zfreec = (zone->cur_size - (zone->count * zone->elem_size)); + zrefillwm = zone->prio_refill_watermark * zone->elem_size; + zone_replenish_wakeup = (zfreec < zrefillwm); + zone_alloc_throttle = (((zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0)) || (zfreec == 0)); + } while (zone_alloc_throttle == TRUE); + } - unlock_zone(zone); - /* Scheduling latencies etc. may prevent - * the refill thread from keeping up - * with demand. Throttle consumers - * when we fall below half the - * watermark, unless VM privileged - */ - if (zone_alloc_throttle) { - zone_replenish_throttle_count++; - assert_wait_timeout(zone, THREAD_UNINT, 1, NSEC_PER_MSEC); - thread_block(THREAD_CONTINUE_NULL); - } - lock_zone(zone); - assert(zone->zone_valid); - } - - zfreec = (zone->cur_size - (zone->count * zone->elem_size)); - zrefillwm = zone->prio_refill_watermark * zone->elem_size; - zone_replenish_wakeup = (zfreec < zrefillwm); - zone_alloc_throttle = (((zfreec < (zrefillwm / 2)) && ((thr->options & TH_OPT_VMPRIV) == 0)) || (zfreec == 0)); - - } while (zone_alloc_throttle == TRUE); - } - - if (__probable(addr == 0)) + if (__probable(addr == 0)) { addr = try_alloc_from_zone(zone, tag, &check_poison); + } /* If we're here because of zone_gc(), we didn't wait for zone_replenish_thread to finish. * So we need to ensure that we did successfully grab an element. And we only need to assert * this for zones that have a replenish thread configured (in this case, the Reserved VM map * entries zone). */ - if (thr->options & TH_OPT_ZONE_GC && zone->async_prio_refill) + if (thr->options & TH_OPT_ZONE_GC && zone->async_prio_refill) { assert(addr != 0); + } while ((addr == 0) && canblock) { /* - * zone is empty, try to expand it - * + * zone is empty, try to expand it + * * Note that we now allow up to 2 threads (1 vm_privliged and 1 non-vm_privliged) * to expand the zone concurrently... this is necessary to avoid stalling * vm_privileged threads running critical code necessary to continue compressing/swapping @@ -3317,8 +3387,9 @@ zalloc_internal( if ((zone->cur_size + zone->elem_size) > zone->max_size) { - if (zone->exhaustible) + if (zone->exhaustible) { break; + } if (zone->expandable) { /* * We're willing to overflow certain @@ -3328,7 +3399,7 @@ zalloc_internal( * with the collectable flag. What we * want is an assurance we can get the * memory back, assuming there's no - * leak. + * leak. */ zone->max_size += (zone->max_size >> 1); } else { @@ -3336,40 +3407,43 @@ zalloc_internal( panic_include_zprint = TRUE; #if CONFIG_ZLEAKS - if (zleak_state & ZLEAK_STATE_ACTIVE) + if (zleak_state & ZLEAK_STATE_ACTIVE) { panic_include_ztrace = TRUE; + } #endif /* CONFIG_ZLEAKS */ panic("zalloc: zone \"%s\" empty.", zone->zone_name); } } - /* + /* * It is possible that a BG thread is refilling/expanding the zone * and gets pre-empted during that operation. That blocks all other * threads from making progress leading to a watchdog timeout. To * avoid that, boost the thread priority using the rwlock boost */ set_thread_rwlock_boost(); - + if ((thr->options & TH_OPT_VMPRIV)) { - zone->doing_alloc_with_vm_priv = TRUE; + zone->doing_alloc_with_vm_priv = TRUE; set_doing_alloc_with_vm_priv = TRUE; } else { - zone->doing_alloc_without_vm_priv = TRUE; + zone->doing_alloc_without_vm_priv = TRUE; } unlock_zone(zone); for (;;) { - int zflags = KMA_KOBJECT|KMA_NOPAGEWAIT; + int zflags = KMA_KOBJECT | KMA_NOPAGEWAIT; - if (vm_pool_low() || retry >= 1) - alloc_size = - round_page(zone->elem_size); - else + if (vm_pool_low() || retry >= 1) { + alloc_size = + round_page(zone->elem_size); + } else { alloc_size = zone->alloc_size; - - if (zone->noencrypt) + } + + if (zone->noencrypt) { zflags |= KMA_NOENCRYPT; - + } + /* Trigger jetsams via the vm_pageout_garbage_collect thread if we're running out of zone memory */ if (is_zone_map_nearing_exhaustion()) { thread_wakeup((event_t) &vm_pageout_garbage_collect); @@ -3381,39 +3455,38 @@ zalloc_internal( if ((zleak_state & (ZLEAK_STATE_ENABLED | ZLEAK_STATE_ACTIVE)) == ZLEAK_STATE_ENABLED) { if (zone_map->size >= zleak_global_tracking_threshold) { kern_return_t kr; - + kr = zleak_activate(); if (kr != KERN_SUCCESS) { printf("Failed to activate live zone leak debugging (%d).\n", kr); } } } - + if ((zleak_state & ZLEAK_STATE_ACTIVE) && !(zone->zleak_on)) { if (zone->cur_size > zleak_per_zone_tracking_threshold) { zone->zleak_on = TRUE; - } + } } #endif /* CONFIG_ZLEAKS */ zcram(zone, space, alloc_size); - + break; } else if (retval != KERN_RESOURCE_SHORTAGE) { retry++; - + if (retry == 3) { panic_include_zprint = TRUE; #if CONFIG_ZLEAKS if ((zleak_state & ZLEAK_STATE_ACTIVE)) { panic_include_ztrace = TRUE; } -#endif /* CONFIG_ZLEAKS */ +#endif /* CONFIG_ZLEAKS */ if (retval == KERN_NO_SPACE) { zone_t zone_largest = zone_find_largest(); panic("zalloc: zone map exhausted while allocating from zone %s, likely due to memory leak in zone %s (%lu total bytes, %d elements allocated)", - zone->zone_name, zone_largest->zone_name, - (unsigned long)zone_largest->cur_size, zone_largest->count); - + zone->zone_name, zone_largest->zone_name, + (unsigned long)zone_largest->cur_size, zone_largest->count); } panic("zalloc: \"%s\" (%d elements) retry fail %d", zone->zone_name, zone->count, retval); } @@ -3424,13 +3497,14 @@ zalloc_internal( lock_zone(zone); assert(zone->zone_valid); - if (set_doing_alloc_with_vm_priv == TRUE) - zone->doing_alloc_with_vm_priv = FALSE; - else - zone->doing_alloc_without_vm_priv = FALSE; - + if (set_doing_alloc_with_vm_priv == TRUE) { + zone->doing_alloc_with_vm_priv = FALSE; + } else { + zone->doing_alloc_without_vm_priv = FALSE; + } + if (zone->waiting) { - zone->waiting = FALSE; + zone->waiting = FALSE; zone_wakeup(zone); } clear_thread_rwlock_boost(); @@ -3438,8 +3512,9 @@ zalloc_internal( addr = try_alloc_from_zone(zone, tag, &check_poison); if (addr == 0 && retval == KERN_RESOURCE_SHORTAGE) { - if (nopagewait == TRUE) - break; /* out of the main while loop */ + if (nopagewait == TRUE) { + break; /* out of the main while loop */ + } unlock_zone(zone); VM_PAGE_WAIT(); @@ -3447,24 +3522,25 @@ zalloc_internal( assert(zone->zone_valid); } } - if (addr == 0) + if (addr == 0) { addr = try_alloc_from_zone(zone, tag, &check_poison); + } } #if CONFIG_ZLEAKS /* Zone leak detection: - * If we're sampling this allocation, add it to the zleaks hash table. + * If we're sampling this allocation, add it to the zleaks hash table. */ - if (addr && zleak_tracedepth > 0) { + if (addr && zleak_tracedepth > 0) { /* Sampling can fail if another sample is happening at the same time in a different zone. */ if (!zleak_log(zbt, addr, zleak_tracedepth, zone->elem_size)) { /* If it failed, roll back the counter so we sample the next allocation instead. */ zone->zleak_capture = zleak_sample_factor; } } -#endif /* CONFIG_ZLEAKS */ - - +#endif /* CONFIG_ZLEAKS */ + + if ((addr == 0) && (!canblock || nopagewait) && (zone->async_pending == FALSE) && (zone->no_callout == FALSE) && (zone->exhaustible == FALSE) && (!vm_pool_low())) { zone->async_pending = TRUE; unlock_zone(zone); @@ -3475,10 +3551,12 @@ zalloc_internal( } #if VM_MAX_TAG_ZONES - if (__improbable(zone->tags) && addr) { - if (reqsize) reqsize = zone->elem_size - reqsize; - vm_tag_update_zone_size(tag, zone->tag_zone_index, zone->elem_size, reqsize); - } + if (__improbable(zone->tags) && addr) { + if (reqsize) { + reqsize = zone->elem_size - reqsize; + } + vm_tag_update_zone_size(tag, zone->tag_zone_index, zone->elem_size, reqsize); + } #endif /* VM_MAX_TAG_ZONES */ unlock_zone(zone); @@ -3494,10 +3572,16 @@ zalloc_internal( if (__improbable(leak_scan_debug_flag && !(zone->elem_size & (sizeof(uintptr_t) - 1)))) { unsigned int count, idx; /* Fill element, from tail, with backtrace in reverse order */ - if (numsaved == 0) numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH); + if (numsaved == 0) { + numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH); + } count = (unsigned int)(zone->elem_size / sizeof(uintptr_t)); - if (count >= numsaved) count = numsaved - 1; - for (idx = 0; idx < count; idx++) ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1]; + if (count >= numsaved) { + count = numsaved - 1; + } + for (idx = 0; idx < count; idx++) { + ((uintptr_t *)addr)[count - 1 - idx] = zbt[idx + 1]; + } } #endif /* DEBUG || DEVELOPMENT */ } @@ -3511,37 +3595,37 @@ zalloc_internal( DTRACE_VM2(zalloc, zone_t, zone, void*, addr); - return((void *)addr); + return (void *)addr; } void * zalloc(zone_t zone) { - return (zalloc_internal(zone, TRUE, FALSE, 0, VM_KERN_MEMORY_NONE)); + return zalloc_internal(zone, TRUE, FALSE, 0, VM_KERN_MEMORY_NONE); } void * zalloc_noblock(zone_t zone) { - return (zalloc_internal(zone, FALSE, FALSE, 0, VM_KERN_MEMORY_NONE)); + return zalloc_internal(zone, FALSE, FALSE, 0, VM_KERN_MEMORY_NONE); } void * zalloc_nopagewait(zone_t zone) { - return (zalloc_internal(zone, TRUE, TRUE, 0, VM_KERN_MEMORY_NONE)); + return zalloc_internal(zone, TRUE, TRUE, 0, VM_KERN_MEMORY_NONE); } void * zalloc_canblock_tag(zone_t zone, boolean_t canblock, vm_size_t reqsize, vm_tag_t tag) { - return (zalloc_internal(zone, canblock, FALSE, reqsize, tag)); + return zalloc_internal(zone, canblock, FALSE, reqsize, tag); } void * zalloc_canblock(zone_t zone, boolean_t canblock) { - return (zalloc_internal(zone, canblock, FALSE, 0, VM_KERN_MEMORY_NONE)); + return zalloc_internal(zone, canblock, FALSE, 0, VM_KERN_MEMORY_NONE); } void * @@ -3556,7 +3640,7 @@ zalloc_attempt(zone_t zone) void zfree_direct(zone_t zone, vm_offset_t elem) { - boolean_t poison = zfree_poison_element(zone, elem); + boolean_t poison = zfree_poison_element(zone, elem); free_to_zone(zone, elem, poison); } @@ -3570,8 +3654,8 @@ zalloc_async( unsigned int max_zones, i; void *elt = NULL; boolean_t pending = FALSE; - - simple_lock(&all_zones_lock); + + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = num_zones; simple_unlock(&all_zones_lock); for (i = 0; i < max_zones; i++) { @@ -3603,50 +3687,54 @@ zalloc_async( */ void * zget( - zone_t zone) + zone_t zone) { - return zalloc_internal(zone, FALSE, TRUE, 0, VM_KERN_MEMORY_NONE); + return zalloc_internal(zone, FALSE, TRUE, 0, VM_KERN_MEMORY_NONE); } /* Keep this FALSE by default. Large memory machine run orders of magnitude - slower in debug mode when true. Use debugger to enable if needed */ + * slower in debug mode when true. Use debugger to enable if needed */ /* static */ boolean_t zone_check = FALSE; -static void zone_check_freelist(zone_t zone, vm_offset_t elem) +static void +zone_check_freelist(zone_t zone, vm_offset_t elem) { struct zone_free_element *this; struct zone_page_metadata *thispage; if (zone->allows_foreign) { for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.any_free_foreign); - !queue_end(&zone->pages.any_free_foreign, &(thispage->pages)); - thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { + !queue_end(&zone->pages.any_free_foreign, &(thispage->pages)); + thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { for (this = page_metadata_get_freelist(thispage); - this != NULL; - this = this->next) { - if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) + this != NULL; + this = this->next) { + if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) { panic("zone_check_freelist"); + } } } } for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.all_free); - !queue_end(&zone->pages.all_free, &(thispage->pages)); - thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { + !queue_end(&zone->pages.all_free, &(thispage->pages)); + thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { for (this = page_metadata_get_freelist(thispage); - this != NULL; - this = this->next) { - if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) + this != NULL; + this = this->next) { + if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) { panic("zone_check_freelist"); + } } } for (thispage = (struct zone_page_metadata *)queue_first(&zone->pages.intermediate); - !queue_end(&zone->pages.intermediate, &(thispage->pages)); - thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { + !queue_end(&zone->pages.intermediate, &(thispage->pages)); + thispage = (struct zone_page_metadata *)queue_next(&(thispage->pages))) { for (this = page_metadata_get_freelist(thispage); - this != NULL; - this = this->next) { - if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) + this != NULL; + this = this->next) { + if (!is_sane_zone_element(zone, (vm_address_t)this) || (vm_address_t)this == elem) { panic("zone_check_freelist"); + } } } } @@ -3668,36 +3756,37 @@ zfree_poison_element(zone_t zone, vm_offset_t elem) uint32_t sample_factor = zp_factor + (((uint32_t)inner_size) >> zp_scale); - if (inner_size <= zp_tiny_zone_limit) + if (inner_size <= zp_tiny_zone_limit) { poison = TRUE; - else if (zp_factor != 0 && sample_counter(&zone->zp_count, sample_factor) == TRUE) + } else if (zp_factor != 0 && sample_counter(&zone->zp_count, sample_factor) == TRUE) { poison = TRUE; + } if (__improbable(poison)) { - /* memset_pattern{4|8} could help make this faster: */ /* Poison everything but primary and backup */ vm_offset_t *element_cursor = ((vm_offset_t *) elem) + 1; vm_offset_t *backup = get_backup_ptr(inner_size, (vm_offset_t *)elem); - for ( ; element_cursor < backup; element_cursor++) + for (; element_cursor < backup; element_cursor++) { *element_cursor = ZP_POISON; + } } } return poison; } void -zfree( - zone_t zone, - void *addr) +(zfree)( + zone_t zone, + void *addr) { - vm_offset_t elem = (vm_offset_t) addr; - uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */ - unsigned int numsaved = 0; - boolean_t gzfreed = FALSE; + vm_offset_t elem = (vm_offset_t) addr; + uintptr_t zbt[MAX_ZTRACE_DEPTH]; /* only used if zone logging is enabled via boot-args */ + unsigned int numsaved = 0; + boolean_t gzfreed = FALSE; boolean_t poison = FALSE; #if VM_MAX_TAG_ZONES - vm_tag_t tag; + vm_tag_t tag; #endif /* VM_MAX_TAG_ZONES */ assert(zone != ZONE_NULL); @@ -3713,16 +3802,18 @@ zfree( * If zone logging is turned on and this is the zone we're tracking, grab a backtrace. */ - if (__improbable(DO_LOGGING(zone) && corruption_debug_flag)) + if (__improbable(DO_LOGGING(zone) && corruption_debug_flag)) { numsaved = OSBacktrace((void *)zbt, MAX_ZTRACE_DEPTH); + } #if MACH_ASSERT /* Basic sanity checks */ - if (zone == ZONE_NULL || elem == (vm_offset_t)0) + if (zone == ZONE_NULL || elem == (vm_offset_t)0) { panic("zfree: NULL"); + } #endif -#if CONFIG_GZALLOC +#if CONFIG_GZALLOC gzfreed = gzalloc_free(zone, addr); #endif @@ -3736,7 +3827,7 @@ zfree( TRACE_MACHLEAKS(ZFREE_CODE, ZFREE_CODE_2, zone->elem_size, (uintptr_t)addr); if (__improbable(!gzfreed && zone->collectable && !zone->allows_foreign && - !from_zone_map(elem, zone->elem_size))) { + !from_zone_map(elem, zone->elem_size))) { panic("zfree: non-allocated memory in collectable zone!"); } @@ -3768,11 +3859,11 @@ zfree( } #if CONFIG_ZCACHE - if (zone_caching_enabled(zone)) { - int __assert_only ret = zcache_free_to_cpu_cache(zone, addr); - assert(ret != FALSE); - return; - } + if (zone_caching_enabled(zone)) { + int __assert_only ret = zcache_free_to_cpu_cache(zone, addr); + assert(ret != FALSE); + return; + } #endif /* CONFIG_ZCACHE */ lock_zone(zone); @@ -3784,29 +3875,29 @@ zfree( if (__probable(!gzfreed)) { #if VM_MAX_TAG_ZONES - if (__improbable(zone->tags)) { + if (__improbable(zone->tags)) { tag = (ZTAG(zone, elem)[0] >> 1); // set the tag with b0 clear so the block remains inuse ZTAG(zone, elem)[0] = 0xFFFE; - } + } #endif /* VM_MAX_TAG_ZONES */ free_to_zone(zone, elem, poison); } if (__improbable(zone->count < 0)) { panic("zfree: zone count underflow in zone %s while freeing element %p, possible cause: double frees or freeing memory that did not come from this zone", - zone->zone_name, addr); + zone->zone_name, addr); } #if CONFIG_ZLEAKS /* - * Zone leak detection: un-track the allocation + * Zone leak detection: un-track the allocation */ if (zone->zleak_on) { zleak_free(elem, zone->elem_size); } #endif /* CONFIG_ZLEAKS */ - + #if VM_MAX_TAG_ZONES if (__improbable(zone->tags) && __probable(!gzfreed)) { vm_tag_update_zone_size(tag, zone->tag_zone_index, -((int64_t)zone->elem_size), 0); @@ -3821,83 +3912,82 @@ zfree( */ void zone_change( - zone_t zone, - unsigned int item, - boolean_t value) + zone_t zone, + unsigned int item, + boolean_t value) { assert( zone != ZONE_NULL ); assert( value == TRUE || value == FALSE ); - switch(item){ - case Z_NOENCRYPT: - zone->noencrypt = value; - break; - case Z_EXHAUST: - zone->exhaustible = value; - break; - case Z_COLLECT: - zone->collectable = value; - break; - case Z_EXPAND: - zone->expandable = value; - break; - case Z_FOREIGN: - zone->allows_foreign = value; - break; - case Z_CALLERACCT: - zone->caller_acct = value; - break; - case Z_NOCALLOUT: - zone->no_callout = value; - break; - case Z_TAGS_ENABLED: + switch (item) { + case Z_NOENCRYPT: + zone->noencrypt = value; + break; + case Z_EXHAUST: + zone->exhaustible = value; + break; + case Z_COLLECT: + zone->collectable = value; + break; + case Z_EXPAND: + zone->expandable = value; + break; + case Z_FOREIGN: + zone->allows_foreign = value; + break; + case Z_CALLERACCT: + zone->caller_acct = value; + break; + case Z_NOCALLOUT: + zone->no_callout = value; + break; + case Z_TAGS_ENABLED: #if VM_MAX_TAG_ZONES - { - static int tag_zone_index; - zone->tags = TRUE; - zone->tags_inline = (((page_size + zone->elem_size - 1) / zone->elem_size) <= (sizeof(uint32_t) / sizeof(uint16_t))); - zone->tag_zone_index = OSAddAtomic(1, &tag_zone_index); - } + { + static int tag_zone_index; + zone->tags = TRUE; + zone->tags_inline = (((page_size + zone->elem_size - 1) / zone->elem_size) <= (sizeof(uint32_t) / sizeof(uint16_t))); + zone->tag_zone_index = OSAddAtomic(1, &tag_zone_index); + } #endif /* VM_MAX_TAG_ZONES */ - break; - case Z_GZALLOC_EXEMPT: - zone->gzalloc_exempt = value; -#if CONFIG_GZALLOC - gzalloc_reconfigure(zone); + break; + case Z_GZALLOC_EXEMPT: + zone->gzalloc_exempt = value; +#if CONFIG_GZALLOC + gzalloc_reconfigure(zone); #endif - break; - case Z_ALIGNMENT_REQUIRED: - zone->alignment_required = value; + break; + case Z_ALIGNMENT_REQUIRED: + zone->alignment_required = value; #if KASAN_ZALLOC - if (zone->kasan_redzone == KASAN_GUARD_SIZE) { - /* Don't disturb alignment with the redzone for zones with - * specific alignment requirements. */ - zone->elem_size -= zone->kasan_redzone * 2; - zone->kasan_redzone = 0; - } + if (zone->kasan_redzone == KASAN_GUARD_SIZE) { + /* Don't disturb alignment with the redzone for zones with + * specific alignment requirements. */ + zone->elem_size -= zone->kasan_redzone * 2; + zone->kasan_redzone = 0; + } #endif -#if CONFIG_GZALLOC - gzalloc_reconfigure(zone); +#if CONFIG_GZALLOC + gzalloc_reconfigure(zone); #endif - break; - case Z_KASAN_QUARANTINE: - zone->kasan_quarantine = value; - break; - case Z_CACHING_ENABLED: -#if CONFIG_ZCACHE - if (value == TRUE && use_caching) { - if (zcache_ready()) { - zcache_init(zone); - } else { - zone->cpu_cache_enable_when_ready = TRUE; - } - + break; + case Z_KASAN_QUARANTINE: + zone->kasan_quarantine = value; + break; + case Z_CACHING_ENABLED: +#if CONFIG_ZCACHE + if (value == TRUE && use_caching) { + if (zcache_ready()) { + zcache_init(zone); + } else { + zone->cpu_cache_enable_when_ready = TRUE; } + } #endif - break; - default: - panic("Zone_change: Wrong Item Type!"); - /* break; */ + break; + default: + panic("Zone_change: Wrong Item Type!"); + /* break; */ } } @@ -3919,18 +4009,18 @@ zone_free_count(zone_t zone) assert(free_count >= 0); - return(free_count); + return free_count; } /* Drops the elements in the free queue of a zone. Called by zone_gc() on each zone, and when a zone is zdestroy'ed. */ void drop_free_elements(zone_t z) { - vm_size_t elt_size, size_freed; - unsigned int total_freed_pages = 0; - uint64_t old_all_free_count; - struct zone_page_metadata *page_meta; - queue_head_t page_meta_head; + vm_size_t elt_size, size_freed; + unsigned int total_freed_pages = 0; + uint64_t old_all_free_count; + struct zone_page_metadata *page_meta; + queue_head_t page_meta_head; lock_zone(z); if (queue_empty(&z->pages.all_free)) { @@ -3958,7 +4048,7 @@ drop_free_elements(zone_t z) /* Update the zone size and free element count */ lock_zone(z); z->cur_size -= size_freed; - z->countfree -= size_freed/elt_size; + z->countfree -= size_freed / elt_size; unlock_zone(z); while ((page_meta = (struct zone_page_metadata *)dequeue_head(&page_meta_head)) != NULL) { @@ -3972,7 +4062,9 @@ drop_free_elements(zone_t z) kasan_poison_range(free_page_address, page_meta->page_count * PAGE_SIZE, ASAN_VALID); #endif #if VM_MAX_TAG_ZONES - if (z->tags) ztMemoryRemove(z, free_page_address, (page_meta->page_count * PAGE_SIZE)); + if (z->tags) { + ztMemoryRemove(z, free_page_address, (page_meta->page_count * PAGE_SIZE)); + } #endif /* VM_MAX_TAG_ZONES */ kmem_free(zone_map, free_page_address, (page_meta->page_count * PAGE_SIZE)); if (current_thread()->options & TH_OPT_ZONE_GC) { @@ -3983,8 +4075,9 @@ drop_free_elements(zone_t z) /* We freed all the pages from the all_free list for this zone */ assert(old_all_free_count == 0); - if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) - kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed/elt_size, total_freed_pages); + if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) { + kprintf("zone_gc() of zone %s freed %lu elements, %d pages\n", z->zone_name, (unsigned long)size_freed / elt_size, total_freed_pages); + } } /* Zone garbage collection @@ -3999,9 +4092,9 @@ drop_free_elements(zone_t z) void zone_gc(boolean_t consider_jetsams) { - unsigned int max_zones; - zone_t z; - unsigned int i; + unsigned int max_zones; + zone_t z; + unsigned int i; if (consider_jetsams) { kill_process_in_largest_zone(); @@ -4016,12 +4109,13 @@ zone_gc(boolean_t consider_jetsams) current_thread()->options |= TH_OPT_ZONE_GC; - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = num_zones; simple_unlock(&all_zones_lock); - if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) + if (zalloc_debug & ZALLOC_DEBUG_ZONEGC) { kprintf("zone_gc() starting...\n"); + } for (i = 0; i < max_zones; i++) { z = &(zone_array[i]); @@ -4038,7 +4132,7 @@ zone_gc(boolean_t consider_jetsams) if (queue_empty(&z->pages.all_free)) { continue; } - + drop_free_elements(z); } @@ -4069,8 +4163,9 @@ consider_zone_gc(boolean_t consider_jetsams) kmapoff_kaddr = 0; } - if (zone_gc_allowed) + if (zone_gc_allowed) { zone_gc(consider_jetsams); + } } /* @@ -4081,14 +4176,14 @@ consider_zone_gc(boolean_t consider_jetsams) */ vm_map_copy_t create_vm_map_copy( - vm_offset_t start_addr, - vm_size_t total_size, - vm_size_t used_size) + vm_offset_t start_addr, + vm_size_t total_size, + vm_size_t used_size) { - kern_return_t kr; - vm_offset_t end_addr; - vm_size_t free_size; - vm_map_copy_t copy; + kern_return_t kr; + vm_offset_t end_addr; + vm_size_t free_size; + vm_map_copy_t copy; if (used_size != total_size) { end_addr = start_addr + used_size; @@ -4096,13 +4191,13 @@ create_vm_map_copy( if (free_size >= PAGE_SIZE) { kmem_free(ipc_kernel_map, - round_page(end_addr), free_size); + round_page(end_addr), free_size); } bzero((char *) end_addr, round_page(end_addr) - end_addr); } kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr, - (vm_map_size_t)used_size, TRUE, ©); + (vm_map_size_t)used_size, TRUE, ©); assert(kr == KERN_SUCCESS); return copy; @@ -4110,9 +4205,9 @@ create_vm_map_copy( boolean_t get_zone_info( - zone_t z, - mach_zone_name_t *zn, - mach_zone_info_t *zi) + zone_t z, + mach_zone_name_t *zn, + mach_zone_info_t *zi) { struct zone zcopy; @@ -4128,7 +4223,7 @@ get_zone_info( if (zn != NULL) { /* assuming here the name data is static */ (void) __nosan_strlcpy(zn->mzn_name, zcopy.zone_name, - strlen(zcopy.zone_name)+1); + strlen(zcopy.zone_name) + 1); } if (zi != NULL) { @@ -4151,10 +4246,10 @@ get_zone_info( kern_return_t task_zone_info( - __unused task_t task, - __unused mach_zone_name_array_t *namesp, + __unused task_t task, + __unused mach_zone_name_array_t *namesp, __unused mach_msg_type_number_t *namesCntp, - __unused task_zone_info_array_t *infop, + __unused task_zone_info_array_t *infop, __unused mach_msg_type_number_t *infoCntp) { return KERN_FAILURE; @@ -4162,52 +4257,54 @@ task_zone_info( kern_return_t mach_zone_info( - host_priv_t host, - mach_zone_name_array_t *namesp, + host_priv_t host, + mach_zone_name_array_t *namesp, mach_msg_type_number_t *namesCntp, - mach_zone_info_array_t *infop, + mach_zone_info_array_t *infop, mach_msg_type_number_t *infoCntp) { - return (mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL)); + return mach_memory_info(host, namesp, namesCntp, infop, infoCntp, NULL, NULL); } kern_return_t mach_memory_info( - host_priv_t host, - mach_zone_name_array_t *namesp, + host_priv_t host, + mach_zone_name_array_t *namesp, mach_msg_type_number_t *namesCntp, - mach_zone_info_array_t *infop, + mach_zone_info_array_t *infop, mach_msg_type_number_t *infoCntp, mach_memory_info_array_t *memoryInfop, mach_msg_type_number_t *memoryInfoCntp) { - mach_zone_name_t *names; - vm_offset_t names_addr; - vm_size_t names_size; - - mach_zone_info_t *info; - vm_offset_t info_addr; - vm_size_t info_size; - - mach_memory_info_t *memory_info; - vm_offset_t memory_info_addr; - vm_size_t memory_info_size; - vm_size_t memory_info_vmsize; - unsigned int num_info; - - unsigned int max_zones, used_zones, i; - mach_zone_name_t *zn; - mach_zone_info_t *zi; - kern_return_t kr; - - uint64_t zones_collectable_bytes = 0; - - if (host == HOST_NULL) + mach_zone_name_t *names; + vm_offset_t names_addr; + vm_size_t names_size; + + mach_zone_info_t *info; + vm_offset_t info_addr; + vm_size_t info_size; + + mach_memory_info_t *memory_info; + vm_offset_t memory_info_addr; + vm_size_t memory_info_size; + vm_size_t memory_info_vmsize; + unsigned int num_info; + + unsigned int max_zones, used_zones, i; + mach_zone_name_t *zn; + mach_zone_info_t *zi; + kern_return_t kr; + + uint64_t zones_collectable_bytes = 0; + + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } #if CONFIG_DEBUGGER_FOR_ZONE_INFO - if (!PE_i_can_has_debugger(NULL)) + if (!PE_i_can_has_debugger(NULL)) { return KERN_INVALID_HOST; + } #endif /* @@ -4215,23 +4312,24 @@ mach_memory_info( * We won't pick up any zones that are allocated later. */ - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = (unsigned int)(num_zones); simple_unlock(&all_zones_lock); names_size = round_page(max_zones * sizeof *names); kr = kmem_alloc_pageable(ipc_kernel_map, - &names_addr, names_size, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + &names_addr, names_size, VM_KERN_MEMORY_IPC); + if (kr != KERN_SUCCESS) { return kr; + } names = (mach_zone_name_t *) names_addr; info_size = round_page(max_zones * sizeof *info); kr = kmem_alloc_pageable(ipc_kernel_map, - &info_addr, info_size, VM_KERN_MEMORY_IPC); + &info_addr, info_size, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) { kmem_free(ipc_kernel_map, - names_addr, names_size); + names_addr, names_size); return kr; } info = (mach_zone_info_t *) info_addr; @@ -4255,24 +4353,23 @@ mach_memory_info( *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info); *infoCntp = used_zones; - + num_info = 0; memory_info_addr = 0; - if (memoryInfop && memoryInfoCntp) - { - vm_map_copy_t copy; + if (memoryInfop && memoryInfoCntp) { + vm_map_copy_t copy; num_info = vm_page_diagnose_estimate(); memory_info_size = num_info * sizeof(*memory_info); memory_info_vmsize = round_page(memory_info_size); kr = kmem_alloc_pageable(ipc_kernel_map, - &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC); + &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC); if (kr != KERN_SUCCESS) { return kr; } kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, - VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); memory_info = (mach_memory_info_t *) memory_info_addr; @@ -4280,9 +4377,9 @@ mach_memory_info( kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE); assert(kr == KERN_SUCCESS); - + kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr, - (vm_map_size_t)memory_info_size, TRUE, ©); + (vm_map_size_t)memory_info_size, TRUE, ©); assert(kr == KERN_SUCCESS); *memoryInfop = (mach_memory_info_t *) copy; @@ -4294,25 +4391,27 @@ mach_memory_info( kern_return_t mach_zone_info_for_zone( - host_priv_t host, - mach_zone_name_t name, - mach_zone_info_t *infop) + host_priv_t host, + mach_zone_name_t name, + mach_zone_info_t *infop) { unsigned int max_zones, i; zone_t zone_ptr; - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } #if CONFIG_DEBUGGER_FOR_ZONE_INFO - if (!PE_i_can_has_debugger(NULL)) + if (!PE_i_can_has_debugger(NULL)) { return KERN_INVALID_HOST; + } #endif if (infop == NULL) { return KERN_INVALID_ARGUMENT; } - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = (unsigned int)(num_zones); simple_unlock(&all_zones_lock); @@ -4341,15 +4440,17 @@ mach_zone_info_for_zone( kern_return_t mach_zone_info_for_largest_zone( - host_priv_t host, - mach_zone_name_t *namep, - mach_zone_info_t *infop) + host_priv_t host, + mach_zone_name_t *namep, + mach_zone_info_t *infop) { - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } #if CONFIG_DEBUGGER_FOR_ZONE_INFO - if (!PE_i_can_has_debugger(NULL)) + if (!PE_i_can_has_debugger(NULL)) { return KERN_INVALID_HOST; + } #endif if (namep == NULL || infop == NULL) { @@ -4369,7 +4470,7 @@ get_zones_collectable_bytes(void) uint64_t zones_collectable_bytes = 0; mach_zone_info_t zi; - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = (unsigned int)(num_zones); simple_unlock(&all_zones_lock); @@ -4384,8 +4485,8 @@ get_zones_collectable_bytes(void) kern_return_t mach_zone_get_zlog_zones( - host_priv_t host, - mach_zone_name_array_t *namesp, + host_priv_t host, + mach_zone_name_array_t *namesp, mach_msg_type_number_t *namesCntp) { #if DEBUG || DEVELOPMENT @@ -4396,21 +4497,24 @@ mach_zone_get_zlog_zones( vm_offset_t names_addr; vm_size_t names_size; - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } - if (namesp == NULL || namesCntp == NULL) + if (namesp == NULL || namesCntp == NULL) { return KERN_INVALID_ARGUMENT; + } - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = (unsigned int)(num_zones); simple_unlock(&all_zones_lock); names_size = round_page(max_zones * sizeof *names); kr = kmem_alloc_pageable(ipc_kernel_map, - &names_addr, names_size, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + &names_addr, names_size, VM_KERN_MEMORY_IPC); + if (kr != KERN_SUCCESS) { return kr; + } names = (mach_zone_name_t *) names_addr; zone_ptr = ZONE_NULL; @@ -4420,7 +4524,7 @@ mach_zone_get_zlog_zones( assert(z != ZONE_NULL); /* Copy out the zone name if zone logging is enabled */ - if(z->zlog_btlog) { + if (z->zlog_btlog) { get_zone_info(z, &names[logged_zones], NULL); logged_zones++; } @@ -4439,10 +4543,10 @@ mach_zone_get_zlog_zones( kern_return_t mach_zone_get_btlog_records( - host_priv_t host, - mach_zone_name_t name, - zone_btrecord_array_t *recsp, - mach_msg_type_number_t *recsCntp) + host_priv_t host, + mach_zone_name_t name, + zone_btrecord_array_t *recsp, + mach_msg_type_number_t *recsCntp) { #if DEBUG || DEVELOPMENT unsigned int max_zones, i, numrecs = 0; @@ -4452,13 +4556,15 @@ mach_zone_get_btlog_records( vm_offset_t recs_addr; vm_size_t recs_size; - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } - if (recsp == NULL || recsCntp == NULL) + if (recsp == NULL || recsCntp == NULL) { return KERN_INVALID_ARGUMENT; + } - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = (unsigned int)(num_zones); simple_unlock(&all_zones_lock); @@ -4498,7 +4604,7 @@ mach_zone_get_btlog_records( * (the btlog lock). So these pages need to be wired. */ kr = vm_map_wire_kernel(ipc_kernel_map, recs_addr, recs_addr + recs_size, - VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); recs = (zone_btrecord_t *)recs_addr; @@ -4524,56 +4630,62 @@ mach_zone_get_btlog_records( kern_return_t mach_memory_info_check(void) { - mach_memory_info_t * memory_info; - mach_memory_info_t * info; - zone_t zone; - unsigned int idx, num_info, max_zones; - vm_offset_t memory_info_addr; + mach_memory_info_t * memory_info; + mach_memory_info_t * info; + zone_t zone; + unsigned int idx, num_info, max_zones; + vm_offset_t memory_info_addr; kern_return_t kr; - size_t memory_info_size, memory_info_vmsize; + size_t memory_info_size, memory_info_vmsize; uint64_t top_wired, zonestotal, total; num_info = vm_page_diagnose_estimate(); memory_info_size = num_info * sizeof(*memory_info); memory_info_vmsize = round_page(memory_info_size); kr = kmem_alloc(kernel_map, &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_DIAG); - assert (kr == KERN_SUCCESS); + assert(kr == KERN_SUCCESS); memory_info = (mach_memory_info_t *) memory_info_addr; vm_page_diagnose(memory_info, num_info, 0); - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = num_zones; simple_unlock(&all_zones_lock); - top_wired = total = zonestotal = 0; - for (idx = 0; idx < max_zones; idx++) - { + top_wired = total = zonestotal = 0; + for (idx = 0; idx < max_zones; idx++) { zone = &(zone_array[idx]); assert(zone != ZONE_NULL); lock_zone(zone); - zonestotal += ptoa_64(zone->page_count); + zonestotal += ptoa_64(zone->page_count); unlock_zone(zone); } - for (idx = 0; idx < num_info; idx++) - { + for (idx = 0; idx < num_info; idx++) { info = &memory_info[idx]; - if (!info->size) continue; - if (VM_KERN_COUNT_WIRED == info->site) top_wired = info->size; - if (VM_KERN_SITE_HIDE & info->flags) continue; - if (!(VM_KERN_SITE_WIRED & info->flags)) continue; + if (!info->size) { + continue; + } + if (VM_KERN_COUNT_WIRED == info->site) { + top_wired = info->size; + } + if (VM_KERN_SITE_HIDE & info->flags) { + continue; + } + if (!(VM_KERN_SITE_WIRED & info->flags)) { + continue; + } total += info->size; - } + } total += zonestotal; printf("vm_page_diagnose_check %qd of %qd, zones %qd, short 0x%qx\n", total, top_wired, zonestotal, top_wired - total); - kmem_free(kernel_map, memory_info_addr, memory_info_vmsize); + kmem_free(kernel_map, memory_info_addr, memory_info_vmsize); - return (kr); + return kr; } -extern boolean_t (* volatile consider_buffer_cache_collect)(int); +extern boolean_t(*volatile consider_buffer_cache_collect)(int); #endif /* DEBUG || DEVELOPMENT */ @@ -4581,8 +4693,9 @@ kern_return_t mach_zone_force_gc( host_t host) { - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } #if DEBUG || DEVELOPMENT /* Callout to buffer cache GC to drop elements in the apfs zones */ @@ -4591,7 +4704,7 @@ mach_zone_force_gc( } consider_zone_gc(FALSE); #endif /* DEBUG || DEVELOPMENT */ - return (KERN_SUCCESS); + return KERN_SUCCESS; } extern unsigned int stack_total; @@ -4607,13 +4720,13 @@ zone_find_largest(void) { unsigned int i; unsigned int max_zones; - zone_t the_zone; + zone_t the_zone; zone_t zone_largest; - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = num_zones; simple_unlock(&all_zones_lock); - + zone_largest = &(zone_array[0]); for (i = 0; i < max_zones; i++) { the_zone = &(zone_array[i]); @@ -4624,17 +4737,17 @@ zone_find_largest(void) return zone_largest; } -#if ZONE_DEBUG +#if ZONE_DEBUG /* should we care about locks here ? */ -#define zone_in_use(z) ( z->count || z->free_elements \ - || !queue_empty(&z->pages.all_free) \ - || !queue_empty(&z->pages.intermediate) \ - || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign))) +#define zone_in_use(z) ( z->count || z->free_elements \ + || !queue_empty(&z->pages.all_free) \ + || !queue_empty(&z->pages.intermediate) \ + || (z->allows_foreign && !queue_empty(&z->pages.any_free_foreign))) -#endif /* ZONE_DEBUG */ +#endif /* ZONE_DEBUG */ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -4644,146 +4757,159 @@ zone_find_largest(void) static uintptr_t * zone_copy_all_allocations_inqueue(zone_t z, queue_head_t * queue, uintptr_t * elems) { - struct zone_page_metadata *page_meta; - vm_offset_t free, elements; - vm_offset_t idx, numElements, freeCount, bytesAvail, metaSize; - - queue_iterate(queue, page_meta, struct zone_page_metadata *, pages) - { - elements = get_zone_page(page_meta); - bytesAvail = ptoa(page_meta->page_count); - freeCount = 0; - if (z->allows_foreign && !from_zone_map(elements, z->elem_size)) - { - metaSize = (sizeof(struct zone_page_metadata) + ZONE_ELEMENT_ALIGNMENT - 1) & ~(ZONE_ELEMENT_ALIGNMENT - 1); - bytesAvail -= metaSize; - elements += metaSize; - } - numElements = bytesAvail / z->elem_size; - // construct array of all possible elements - for (idx = 0; idx < numElements; idx++) - { - elems[idx] = INSTANCE_PUT(elements + idx * z->elem_size); - } - // remove from the array all free elements - free = (vm_offset_t)page_metadata_get_freelist(page_meta); - while (free) - { - // find idx of free element - for (idx = 0; (idx < numElements) && (elems[idx] != INSTANCE_PUT(free)); idx++) {} - assert(idx < numElements); - // remove it - bcopy(&elems[idx + 1], &elems[idx], (numElements - (idx + 1)) * sizeof(elems[0])); - numElements--; - freeCount++; - // next free element - vm_offset_t *primary = (vm_offset_t *) free; - free = *primary ^ zp_nopoison_cookie; - } - elems += numElements; - } - - return (elems); + struct zone_page_metadata *page_meta; + vm_offset_t free, elements; + vm_offset_t idx, numElements, freeCount, bytesAvail, metaSize; + + queue_iterate(queue, page_meta, struct zone_page_metadata *, pages) + { + elements = get_zone_page(page_meta); + bytesAvail = ptoa(page_meta->page_count); + freeCount = 0; + if (z->allows_foreign && !from_zone_map(elements, z->elem_size)) { + metaSize = (sizeof(struct zone_page_metadata) + ZONE_ELEMENT_ALIGNMENT - 1) & ~(ZONE_ELEMENT_ALIGNMENT - 1); + bytesAvail -= metaSize; + elements += metaSize; + } + numElements = bytesAvail / z->elem_size; + // construct array of all possible elements + for (idx = 0; idx < numElements; idx++) { + elems[idx] = INSTANCE_PUT(elements + idx * z->elem_size); + } + // remove from the array all free elements + free = (vm_offset_t)page_metadata_get_freelist(page_meta); + while (free) { + // find idx of free element + for (idx = 0; (idx < numElements) && (elems[idx] != INSTANCE_PUT(free)); idx++) { + } + assert(idx < numElements); + // remove it + bcopy(&elems[idx + 1], &elems[idx], (numElements - (idx + 1)) * sizeof(elems[0])); + numElements--; + freeCount++; + // next free element + vm_offset_t *primary = (vm_offset_t *) free; + free = *primary ^ zp_nopoison_cookie; + } + elems += numElements; + } + + return elems; } kern_return_t zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc, void * refCon) { - uintptr_t zbt[MAX_ZTRACE_DEPTH]; - zone_t zone; - uintptr_t * array; - uintptr_t * next; - uintptr_t element, bt; - uint32_t idx, count, found; - uint32_t btidx, btcount, nobtcount, btfound; - uint32_t elemSize; - uint64_t maxElems; + uintptr_t zbt[MAX_ZTRACE_DEPTH]; + zone_t zone; + uintptr_t * array; + uintptr_t * next; + uintptr_t element, bt; + uint32_t idx, count, found; + uint32_t btidx, btcount, nobtcount, btfound; + uint32_t elemSize; + uint64_t maxElems; unsigned int max_zones; kern_return_t kr; - simple_lock(&all_zones_lock); + simple_lock(&all_zones_lock, &zone_locks_grp); max_zones = num_zones; simple_unlock(&all_zones_lock); - for (idx = 0; idx < max_zones; idx++) - { - if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) break; - } - if (idx >= max_zones) return (KERN_INVALID_NAME); - zone = &zone_array[idx]; - - elemSize = (uint32_t) zone->elem_size; - maxElems = ptoa(zone->page_count) / elemSize; - - if ((zone->alloc_size % elemSize) - && !leak_scan_debug_flag) return (KERN_INVALID_CAPABILITY); - - kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array, - maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG); - if (KERN_SUCCESS != kr) return (kr); - - lock_zone(zone); - - next = array; - next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next); - next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next); - next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next); - count = (uint32_t)(next - array); - - unlock_zone(zone); - - zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found); - assert(found <= count); - - for (idx = 0; idx < count; idx++) - { - element = array[idx]; - if (kInstanceFlagReferenced & element) continue; - element = INSTANCE_PUT(element) & ~kInstanceFlags; - } - - if (zone->zlog_btlog && !corruption_debug_flag) - { - // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found - btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon); - } - - for (nobtcount = idx = 0; idx < count; idx++) - { - element = array[idx]; - if (!element) continue; - if (kInstanceFlagReferenced & element) continue; - element = INSTANCE_PUT(element) & ~kInstanceFlags; - - // see if we can find any backtrace left in the element - btcount = (typeof(btcount)) (zone->elem_size / sizeof(uintptr_t)); - if (btcount >= MAX_ZTRACE_DEPTH) btcount = MAX_ZTRACE_DEPTH - 1; - for (btfound = btidx = 0; btidx < btcount; btidx++) - { - bt = ((uintptr_t *)element)[btcount - 1 - btidx]; - if (!VM_KERNEL_IS_SLID(bt)) break; - zbt[btfound++] = bt; - } - if (btfound) (*proc)(refCon, 1, elemSize, &zbt[0], btfound); - else nobtcount++; - } - if (nobtcount) - { - // fake backtrace when we found nothing - zbt[0] = (uintptr_t) &zalloc; - (*proc)(refCon, nobtcount, elemSize, &zbt[0], 1); - } - - kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t)); - - return (KERN_SUCCESS); + for (idx = 0; idx < max_zones; idx++) { + if (!strncmp(zoneName, zone_array[idx].zone_name, nameLen)) { + break; + } + } + if (idx >= max_zones) { + return KERN_INVALID_NAME; + } + zone = &zone_array[idx]; + + elemSize = (uint32_t) zone->elem_size; + maxElems = ptoa(zone->page_count) / elemSize; + + if ((zone->alloc_size % elemSize) + && !leak_scan_debug_flag) { + return KERN_INVALID_CAPABILITY; + } + + kr = kmem_alloc_kobject(kernel_map, (vm_offset_t *) &array, + maxElems * sizeof(uintptr_t), VM_KERN_MEMORY_DIAG); + if (KERN_SUCCESS != kr) { + return kr; + } + + lock_zone(zone); + + next = array; + next = zone_copy_all_allocations_inqueue(zone, &zone->pages.any_free_foreign, next); + next = zone_copy_all_allocations_inqueue(zone, &zone->pages.intermediate, next); + next = zone_copy_all_allocations_inqueue(zone, &zone->pages.all_used, next); + count = (uint32_t)(next - array); + + unlock_zone(zone); + + zone_leaks_scan(array, count, (uint32_t)zone->elem_size, &found); + assert(found <= count); + + for (idx = 0; idx < count; idx++) { + element = array[idx]; + if (kInstanceFlagReferenced & element) { + continue; + } + element = INSTANCE_PUT(element) & ~kInstanceFlags; + } + + if (zone->zlog_btlog && !corruption_debug_flag) { + // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found + btlog_copy_backtraces_for_elements(zone->zlog_btlog, array, &count, elemSize, proc, refCon); + } + + for (nobtcount = idx = 0; idx < count; idx++) { + element = array[idx]; + if (!element) { + continue; + } + if (kInstanceFlagReferenced & element) { + continue; + } + element = INSTANCE_PUT(element) & ~kInstanceFlags; + + // see if we can find any backtrace left in the element + btcount = (typeof(btcount))(zone->elem_size / sizeof(uintptr_t)); + if (btcount >= MAX_ZTRACE_DEPTH) { + btcount = MAX_ZTRACE_DEPTH - 1; + } + for (btfound = btidx = 0; btidx < btcount; btidx++) { + bt = ((uintptr_t *)element)[btcount - 1 - btidx]; + if (!VM_KERNEL_IS_SLID(bt)) { + break; + } + zbt[btfound++] = bt; + } + if (btfound) { + (*proc)(refCon, 1, elemSize, &zbt[0], btfound); + } else { + nobtcount++; + } + } + if (nobtcount) { + // fake backtrace when we found nothing + zbt[0] = (uintptr_t) &zalloc; + (*proc)(refCon, nobtcount, elemSize, &zbt[0], 1); + } + + kmem_free(kernel_map, (vm_offset_t) array, maxElems * sizeof(uintptr_t)); + + return KERN_SUCCESS; } boolean_t kdp_is_in_zone(void *addr, const char *zone_name) { zone_t z; - return (zone_element_size(addr, &z) && !strcmp(z->zone_name, zone_name)); + return zone_element_size(addr, &z) && !strcmp(z->zone_name, zone_name); } boolean_t @@ -4793,7 +4919,7 @@ run_zone_test(void) void * test_ptr; zone_t test_zone; - simple_lock(&zone_test_lock); + simple_lock(&zone_test_lock, &zone_locks_grp); if (!zone_test_running) { zone_test_running = TRUE; } else { @@ -4846,7 +4972,7 @@ run_zone_test(void) printf("run_zone_test: Test passed\n"); - simple_lock(&zone_test_lock); + simple_lock(&zone_test_lock, &zone_locks_grp); zone_test_running = FALSE; simple_unlock(&zone_test_lock); diff --git a/osfmk/kern/zalloc.h b/osfmk/kern/zalloc.h index b45020f11..412390316 100644 --- a/osfmk/kern/zalloc.h +++ b/osfmk/kern/zalloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,9 +62,9 @@ * */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -#ifndef _KERN_ZALLOC_H_ +#ifndef _KERN_ZALLOC_H_ #define _KERN_ZALLOC_H_ #include @@ -72,7 +72,7 @@ #include #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -89,10 +89,10 @@ #include #endif -#if CONFIG_GZALLOC +#if CONFIG_GZALLOC typedef struct gzalloc_data { - uint32_t gzfc_index; - vm_offset_t *gzfc; + uint32_t gzfc_index; + vm_offset_t *gzfc; } gzalloc_data_t; #endif @@ -108,45 +108,45 @@ struct zone_free_element; struct zone_page_metadata; struct zone { -#ifdef CONFIG_ZCACHE +#ifdef CONFIG_ZCACHE struct zone_cache *zcache; -#endif /* CONFIG_ZCACHE */ - struct zone_free_element *free_elements; /* free elements directly linked */ +#endif /* CONFIG_ZCACHE */ + struct zone_free_element *free_elements; /* free elements directly linked */ struct { - queue_head_t any_free_foreign; /* foreign pages crammed into zone */ - queue_head_t all_free; - queue_head_t intermediate; - queue_head_t all_used; - } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */ - int count; /* Number of elements used now */ - int countfree; /* Number of free elements */ - int count_all_free_pages; /* Number of pages collectable by GC */ - lck_attr_t lock_attr; /* zone lock attribute */ - decl_lck_mtx_data(,lock) /* zone lock */ - lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */ - vm_size_t cur_size; /* current memory utilization */ - vm_size_t max_size; /* how large can this zone grow */ - vm_size_t elem_size; /* size of an element */ - vm_size_t alloc_size; /* size used for more memory */ - uint64_t page_count __attribute__((aligned(8))); /* number of pages used by this zone */ - uint64_t sum_count; /* count of allocs (life of zone) */ + queue_head_t any_free_foreign; /* foreign pages crammed into zone */ + queue_head_t all_free; + queue_head_t intermediate; + queue_head_t all_used; + } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */ + int count; /* Number of elements used now */ + int countfree; /* Number of free elements */ + int count_all_free_pages; /* Number of pages collectable by GC */ + lck_attr_t lock_attr; /* zone lock attribute */ + decl_lck_mtx_data(, lock) /* zone lock */ + lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */ + vm_size_t cur_size; /* current memory utilization */ + vm_size_t max_size; /* how large can this zone grow */ + vm_size_t elem_size; /* size of an element */ + vm_size_t alloc_size; /* size used for more memory */ + uint64_t page_count __attribute__((aligned(8))); /* number of pages used by this zone */ + uint64_t sum_count; /* count of allocs (life of zone) */ uint32_t - /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ - /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ - /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ + /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ + /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ + /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ /* boolean_t */ allows_foreign :1, /* (F) allow non-zalloc space */ - /* boolean_t */ doing_alloc_without_vm_priv:1, /* is zone expanding now via a non-vm_privileged thread? */ + /* boolean_t */ doing_alloc_without_vm_priv:1, /* is zone expanding now via a non-vm_privileged thread? */ /* boolean_t */ doing_alloc_with_vm_priv:1, /* is zone expanding now via a vm_privileged thread? */ - /* boolean_t */ waiting :1, /* is thread waiting for expansion? */ - /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */ - /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */ - /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */ + /* boolean_t */ waiting :1, /* is thread waiting for expansion? */ + /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */ + /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */ + /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */ /* boolean_t */ noencrypt :1, - /* boolean_t */ no_callout :1, - /* boolean_t */ async_prio_refill :1, - /* boolean_t */ gzalloc_exempt :1, - /* boolean_t */ alignment_required :1, - /* boolean_t */ zone_logging :1, /* Enable zone logging for this zone. */ + /* boolean_t */ no_callout :1, + /* boolean_t */ async_prio_refill :1, + /* boolean_t */ gzalloc_exempt :1, + /* boolean_t */ alignment_required :1, + /* boolean_t */ zone_logging :1, /* Enable zone logging for this zone. */ /* boolean_t */ zone_replenishing :1, /* boolean_t */ kasan_quarantine :1, /* boolean_t */ tags :1, @@ -157,24 +157,24 @@ struct zone { /* boolean_t */ cpu_cache_enabled :1, /* future */ _reserved :3; - int index; /* index into zone_info arrays for this zone */ - const char *zone_name; /* a name for the zone */ + int index; /* index into zone_info arrays for this zone */ + const char *zone_name; /* a name for the zone */ #if CONFIG_ZLEAKS - uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ + uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ #endif /* CONFIG_ZLEAKS */ uint32_t zp_count; /* counter for poisoning every N frees */ - vm_size_t prio_refill_watermark; - thread_t zone_replenish_thread; -#if CONFIG_GZALLOC - gzalloc_data_t gz; + vm_size_t prio_refill_watermark; + thread_t zone_replenish_thread; +#if CONFIG_GZALLOC + gzalloc_data_t gz; #endif /* CONFIG_GZALLOC */ #if KASAN_ZALLOC vm_size_t kasan_redzone; #endif - btlog_t *zlog_btlog; /* zone logging structure to hold stacks and element references to those stacks. */ + btlog_t *zlog_btlog; /* zone logging structure to hold stacks and element references to those stacks. */ }; /* @@ -183,8 +183,8 @@ struct zone { */ typedef struct zinfo_usage_store_t { /* These fields may be updated atomically, and so must be 8 byte aligned */ - uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ - uint64_t free __attribute__((aligned(8))); /* free counter */ + uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ + uint64_t free __attribute__((aligned(8))); /* free counter */ } zinfo_usage_store_t; /* @@ -199,46 +199,46 @@ extern uint64_t get_zones_collectable_bytes(void); * consider_jetsams is set to TRUE. To avoid deadlocks, we only pass a value of TRUE from within the * vm_pageout_garbage_collect thread. */ -extern void zone_gc(boolean_t consider_jetsams); -extern void consider_zone_gc(boolean_t consider_jetsams); -extern void drop_free_elements(zone_t z); +extern void zone_gc(boolean_t consider_jetsams); +extern void consider_zone_gc(boolean_t consider_jetsams); +extern void drop_free_elements(zone_t z); /* Debug logging for zone-map-exhaustion jetsams. */ -extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); -extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); +extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); +extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); /* Bootstrap zone module (create zone zone) */ -extern void zone_bootstrap(void); +extern void zone_bootstrap(void); /* Init zone module */ -extern void zone_init( - vm_size_t map_size); +extern void zone_init( + vm_size_t map_size); /* Stack use statistics */ -extern void stack_fake_zone_init(int zone_index); -extern void stack_fake_zone_info( - int *count, - vm_size_t *cur_size, - vm_size_t *max_size, - vm_size_t *elem_size, - vm_size_t *alloc_size, - uint64_t *sum_size, - int *collectable, - int *exhaustable, - int *caller_acct); - -#if ZONE_DEBUG - -extern void zone_debug_enable( - zone_t z); - -extern void zone_debug_disable( - zone_t z); +extern void stack_fake_zone_init(int zone_index); +extern void stack_fake_zone_info( + int *count, + vm_size_t *cur_size, + vm_size_t *max_size, + vm_size_t *elem_size, + vm_size_t *alloc_size, + uint64_t *sum_size, + int *collectable, + int *exhaustable, + int *caller_acct); + +#if ZONE_DEBUG + +extern void zone_debug_enable( + zone_t z); + +extern void zone_debug_disable( + zone_t z); #define zone_debug_enabled(z) z->active_zones.next -#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) -#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) -#endif /* ZONE_DEBUG */ +#define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y)) +#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) +#endif /* ZONE_DEBUG */ extern unsigned int num_zones; extern struct zone zone_array[]; @@ -258,97 +258,97 @@ extern struct zone zone_array[]; */ #define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX) -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS /* Item definitions for zalloc/zinit/zone_change */ -#define Z_EXHAUST 1 /* Make zone exhaustible */ -#define Z_COLLECT 2 /* Make zone collectable */ -#define Z_EXPAND 3 /* Make zone expandable */ -#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ -#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ -#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ -#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */ +#define Z_EXHAUST 1 /* Make zone exhaustible */ +#define Z_COLLECT 2 /* Make zone collectable */ +#define Z_EXPAND 3 /* Make zone expandable */ +#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ +#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ +#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ +#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */ #define Z_ALIGNMENT_REQUIRED 8 -#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ -#define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */ -#ifdef XNU_KERNEL_PRIVATE -#define Z_TAGS_ENABLED 11 /* Store tags */ +#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ +#define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */ +#ifdef XNU_KERNEL_PRIVATE +#define Z_TAGS_ENABLED 11 /* Store tags */ #endif /* XNU_KERNEL_PRIVATE */ -#define Z_CACHING_ENABLED 12 /*enable and initialize per-cpu caches for the zone*/ +#define Z_CACHING_ENABLED 12 /*enable and initialize per-cpu caches for the zone*/ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE extern vm_offset_t zone_map_min_address; extern vm_offset_t zone_map_max_address; /* free an element with no regard for gzalloc, zleaks, or kasan*/ -extern void zfree_direct( zone_t zone, - vm_offset_t elem); +extern void zfree_direct( zone_t zone, + vm_offset_t elem); /* attempts to allocate an element with no regard for gzalloc, zleaks, or kasan*/ -extern void * zalloc_attempt( zone_t zone); +extern void * zalloc_attempt( zone_t zone); /* Non-waiting for memory version of zalloc */ -extern void * zalloc_nopagewait( - zone_t zone); +extern void * zalloc_nopagewait( + zone_t zone); /* selective version of zalloc */ -extern void * zalloc_canblock( - zone_t zone, - boolean_t canblock); +extern void * zalloc_canblock( + zone_t zone, + boolean_t canblock); /* selective version of zalloc */ -extern void * zalloc_canblock_tag( - zone_t zone, - boolean_t canblock, - vm_size_t reqsize, - vm_tag_t tag); +extern void * zalloc_canblock_tag( + zone_t zone, + boolean_t canblock, + vm_size_t reqsize, + vm_tag_t tag); /* Get from zone free list */ -extern void * zget( - zone_t zone); +extern void * zget( + zone_t zone); /* Fill zone with memory */ -extern void zcram( - zone_t zone, - vm_offset_t newmem, - vm_size_t size); +extern void zcram( + zone_t zone, + vm_offset_t newmem, + vm_size_t size); /* Initially fill zone with specified number of elements */ -extern int zfill( - zone_t zone, - int nelem); +extern int zfill( + zone_t zone, + int nelem); -extern void zone_prio_refill_configure(zone_t, vm_size_t); +extern void zone_prio_refill_configure(zone_t, vm_size_t); /* See above/top of file. Z_* definitions moved so they would be usable by kexts */ /* Preallocate space for zone from zone map */ -extern void zprealloc( - zone_t zone, - vm_size_t size); +extern void zprealloc( + zone_t zone, + vm_size_t size); -extern integer_t zone_free_count( - zone_t zone); +extern integer_t zone_free_count( + zone_t zone); -extern vm_size_t zone_element_size( - void *addr, - zone_t *z); +extern vm_size_t zone_element_size( + void *addr, + zone_t *z); -/* +/* * Structure for keeping track of a backtrace, used for leak detection. * This is in the .h file because it is used during panic, see kern/debug.c * A non-zero size indicates that the trace is in use. */ struct ztrace { - vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */ - uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */ - void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */ - uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */ - uint32_t zt_hit_count; /* for determining effectiveness of hash function */ + vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */ + uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */ + void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */ + uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */ + uint32_t zt_hit_count; /* for determining effectiveness of hash function */ }; #if CONFIG_ZLEAKS @@ -362,7 +362,7 @@ extern vm_size_t zleak_per_zone_tracking_threshold; extern int get_zleak_state(void); -#endif /* CONFIG_ZLEAKS */ +#endif /* CONFIG_ZLEAKS */ #ifndef VM_MAX_TAG_ZONES #error MAX_TAG_ZONES @@ -380,17 +380,17 @@ extern uintptr_t hash_mix(uintptr_t); extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t); extern uint32_t hashaddr(uintptr_t, uint32_t); -#define lock_zone(zone) \ -MACRO_BEGIN \ - lck_mtx_lock_spin_always(&(zone)->lock); \ +#define lock_zone(zone) \ +MACRO_BEGIN \ + lck_mtx_lock_spin_always(&(zone)->lock); \ MACRO_END -#define unlock_zone(zone) \ -MACRO_BEGIN \ - lck_mtx_unlock(&(zone)->lock); \ +#define unlock_zone(zone) \ +MACRO_BEGIN \ + lck_mtx_unlock(&(zone)->lock); \ MACRO_END -#if CONFIG_GZALLOC +#if CONFIG_GZALLOC void gzalloc_init(vm_size_t); void gzalloc_zone_init(zone_t); void gzalloc_configure(void); @@ -408,7 +408,7 @@ void zlog_btlog_lock(__unused void *); void zlog_btlog_unlock(__unused void *); #ifdef MACH_KERNEL_PRIVATE -#define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ +#define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ int track_this_zone(const char *zonename, const char *logname); #endif @@ -417,40 +417,54 @@ extern boolean_t run_zone_test(void); extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag); #endif /* DEBUG || DEVELOPMENT */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ /* Allocate from zone */ -extern void * zalloc( - zone_t zone); +extern void * zalloc( + zone_t zone); /* Non-blocking version of zalloc */ -extern void * zalloc_noblock( - zone_t zone); +extern void * zalloc_noblock( + zone_t zone); /* Free zone element */ -extern void zfree( - zone_t zone, - void *elem); +extern void zfree( + zone_t zone, + void *elem); + +#ifdef XNU_KERNEL_PRIVATE +#define zfree(zone, elem) \ +_Pragma("clang diagnostic push") \ +_Pragma("clang diagnostic ignored \"-Wshadow\"") \ + do { \ + _Static_assert(sizeof (elem) == sizeof (void *) || sizeof (elem) == sizeof (mach_vm_address_t), "elem is not a pointer"); \ + void *__tmp_addr = (void *) elem; \ + zone_t __tmp_zone = zone; \ + elem = (__typeof__(elem)) NULL; \ + (zfree)(__tmp_zone, __tmp_addr); \ + } while (0) \ +_Pragma("clang diagnostic pop") +#endif /* XNU_KERNEL_PRIVATE */ /* Create zone */ -extern zone_t zinit( - vm_size_t size, /* the size of an element */ - vm_size_t maxmem, /* maximum memory to use */ - vm_size_t alloc, /* allocation size */ - const char *name); /* a name for the zone */ +extern zone_t zinit( + vm_size_t size, /* the size of an element */ + vm_size_t maxmem, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + const char *name); /* a name for the zone */ /* Change zone parameters */ -extern void zone_change( - zone_t zone, - unsigned int item, - boolean_t value); +extern void zone_change( + zone_t zone, + unsigned int item, + boolean_t value); /* Destroy the zone */ -extern void zdestroy( - zone_t zone); +extern void zdestroy( + zone_t zone); __END_DECLS -#endif /* _KERN_ZALLOC_H_ */ +#endif /* _KERN_ZALLOC_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/kern/zcache.c b/osfmk/kern/zcache.c index dab30e61e..0ca209fe2 100644 --- a/osfmk/kern/zcache.c +++ b/osfmk/kern/zcache.c @@ -40,51 +40,51 @@ #include #endif -#define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */ -#define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */ -#define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/ +#define DEFAULT_MAGAZINE_SIZE 8 /* Default number of elements for all magazines allocated from the magazine_zone */ +#define DEFAULT_DEPOT_SIZE 8 /* Default number of elements for the array zcc_depot_list */ +#define ZCC_MAX_CPU_CACHE_LINE_SIZE 64 /* We should use a platform specific macro for this in the future, right now this is the max cache line size for all platforms*/ -lck_grp_t zcache_locks_grp; /* lock group for depot_lock */ -zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */ -uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */ -uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */ -bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */ -uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */ +lck_grp_t zcache_locks_grp; /* lock group for depot_lock */ +zone_t magazine_zone; /* zone to allocate zcc_magazine structs from */ +uint16_t magazine_element_count = 0; /* Size of array in magazine determined by boot-arg or default */ +uint16_t depot_element_count = 0; /* Size of depot lists determined by boot-arg or default */ +bool zone_cache_ready = FALSE; /* Flag to check if zone caching has been set up by zcache_bootstrap */ +uintptr_t zcache_canary = 0; /* Canary used for the caching layer to prevent UaF attacks */ /* The zcc_magazine is used as a stack to store cached zone elements. These * sets of elements can be moved around to perform bulk operations. -*/ + */ struct zcc_magazine { - uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */ - uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */ - void *zcc_elements[0]; /* Array of pointers to objects */ + uint32_t zcc_magazine_index; /* Used as a stack pointer to acess elements in the array */ + uint32_t zcc_magazine_capacity; /* Number of pointers able to be stored in the zcc_elements array */ + void *zcc_elements[0]; /* Array of pointers to objects */ }; -/* Each CPU will use one of these to store its elements -*/ +/* Each CPU will use one of these to store its elements + */ struct zcc_per_cpu_cache { - struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */ - struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */ -} __attribute__(( aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE) )); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */ + struct zcc_magazine *current; /* Magazine from which we will always try to allocate from and free to first */ + struct zcc_magazine *previous; /* Dedicated magazine for a quick reload and to prevent thrashing wen we swap with the depot */ +} __attribute__((aligned(ZCC_MAX_CPU_CACHE_LINE_SIZE))); /* we want to align this to a cache line size so it does not thrash when multiple cpus want to access their caches in paralell */ /* * The depot layer can be invalid while zone_gc() is draining it out. - * During that time, the CPU caches are active. For CPU magazine allocs and + * During that time, the CPU caches are active. For CPU magazine allocs and * frees, the caching layer reaches directly into the zone allocator. */ -#define ZCACHE_DEPOT_INVALID -1 -#define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID) +#define ZCACHE_DEPOT_INVALID -1 +#define zcache_depot_available(zcache) (zcache->zcc_depot_index != ZCACHE_DEPOT_INVALID) /* This is the basic struct to take care of cahing and is included within - * the zone. -*/ + * the zone. + */ struct zone_cache { - lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */ - struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */ - int zcc_depot_index; /* marks the point in the array where empty magazines begin */ - struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */ + lck_mtx_t zcc_depot_lock; /* Lock for the depot layer of caching */ + struct zcc_per_cpu_cache zcc_per_cpu_caches[MAX_CPUS]; /* An array of caches, one for each CPU */ + int zcc_depot_index; /* marks the point in the array where empty magazines begin */ + struct zcc_magazine *zcc_depot_list[0]; /* Stores full and empty magazines in the depot layer */ }; @@ -109,7 +109,9 @@ void zcache_canary_validate(zone_t zone, void *addr); * Description: returns whether or not the zone caches are ready to use * */ -bool zcache_ready(void){ +bool +zcache_ready(void) +{ return zone_cache_ready; } @@ -122,10 +124,12 @@ bool zcache_ready(void){ * system is single threaded so we don't have to take the lock. * */ -void zcache_init_marked_zones(void){ +void +zcache_init_marked_zones(void) +{ unsigned int i; - for(i = 0; i < num_zones; i ++){ - if(zone_array[i].cpu_cache_enable_when_ready){ + for (i = 0; i < num_zones; i++) { + if (zone_array[i].cpu_cache_enable_when_ready) { zcache_init(&zone_array[i]); zone_array[i].cpu_cache_enable_when_ready = FALSE; } @@ -140,21 +144,24 @@ void zcache_init_marked_zones(void){ * boot-args or default values * */ -void zcache_bootstrap(void) +void +zcache_bootstrap(void) { /* use boot-arg for custom magazine size*/ - if (! PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof (uint16_t))) + if (!PE_parse_boot_argn("zcc_magazine_element_count", &magazine_element_count, sizeof(uint16_t))) { magazine_element_count = DEFAULT_MAGAZINE_SIZE; + } int magazine_size = sizeof(struct zcc_magazine) + magazine_element_count * sizeof(void *); - magazine_zone = zinit(magazine_size, 100000 * magazine_size , magazine_size, "zcc_magazine_zone"); + magazine_zone = zinit(magazine_size, 100000 * magazine_size, magazine_size, "zcc_magazine_zone"); assert(magazine_zone != NULL); /* use boot-arg for custom depot size*/ - if (! PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof (uint16_t))) + if (!PE_parse_boot_argn("zcc_depot_element_count", &depot_element_count, sizeof(uint16_t))) { depot_element_count = DEFAULT_DEPOT_SIZE; + } lck_grp_init(&zcache_locks_grp, "zcc_depot_lock", LCK_GRP_ATTR_NULL); @@ -175,31 +182,32 @@ void zcache_bootstrap(void) * Parameters: zone pointer to zone on which to iniitalize caching * */ - void zcache_init(zone_t zone) - { - int i; /* used as index in for loops */ - vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */ - struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */ +void +zcache_init(zone_t zone) +{ + int i; /* used as index in for loops */ + vm_size_t total_size; /* Used for allocating the zone_cache struct with the proper size of depot list */ + struct zone_cache *temp_cache; /* Temporary variable to initialize a zone_cache before assigning to the specified zone */ /* Allocate chunk of memory for all structs */ total_size = sizeof(struct zone_cache) + (depot_element_count * sizeof(void *)); - + temp_cache = (struct zone_cache *) kalloc(total_size); - /* Initialize a cache for every CPU */ - for (i = 0; i < MAX_CPUS; i++) { - temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone); - temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone); + /* Initialize a cache for every CPU */ + for (i = 0; i < MAX_CPUS; i++) { + temp_cache->zcc_per_cpu_caches[i].current = (struct zcc_magazine *)zalloc(magazine_zone); + temp_cache->zcc_per_cpu_caches[i].previous = (struct zcc_magazine *)zalloc(magazine_zone); - assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL); + assert(temp_cache->zcc_per_cpu_caches[i].current != NULL && temp_cache->zcc_per_cpu_caches[i].previous != NULL); - zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count); - zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count); - } + zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].current, magazine_element_count); + zcache_mag_init(temp_cache->zcc_per_cpu_caches[i].previous, magazine_element_count); + } - /* Initialize the lock on the depot layer */ - lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL); + /* Initialize the lock on the depot layer */ + lck_mtx_init(&(temp_cache->zcc_depot_lock), &zcache_locks_grp, LCK_ATTR_NULL); /* Initialize empty magazines in the depot list */ for (i = 0; i < depot_element_count; i++) { @@ -212,13 +220,13 @@ void zcache_bootstrap(void) temp_cache->zcc_depot_index = 0; - lock_zone(zone); + lock_zone(zone); zone->zcache = temp_cache; - /* Set flag to know caching is enabled */ - zone->cpu_cache_enabled = TRUE; - unlock_zone(zone); - return; - } + /* Set flag to know caching is enabled */ + zone->cpu_cache_enabled = TRUE; + unlock_zone(zone); + return; +} /* * zcache_drain_depot @@ -232,13 +240,14 @@ void zcache_bootstrap(void) * Returns: None * */ -void zcache_drain_depot(zone_t zone) +void +zcache_drain_depot(zone_t zone) { struct zone_cache *zcache = zone->zcache; int drain_depot_index = 0; /* - * Grab the current depot list from the zone cache. If it has full magazines, + * Grab the current depot list from the zone cache. If it has full magazines, * mark the depot as invalid and drain it. */ lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); @@ -253,8 +262,9 @@ void zcache_drain_depot(zone_t zone) lck_mtx_unlock(&(zcache->zcc_depot_lock)); /* Now drain the full magazines in the depot */ - for (int i = 0; i < drain_depot_index; i++) + for (int i = 0; i < drain_depot_index; i++) { zcache_mag_drain(zone, zcache->zcc_depot_list[i]); + } lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); /* Mark the depot as available again */ @@ -275,11 +285,12 @@ void zcache_drain_depot(zone_t zone) * * Precondition: check that caching is enabled for zone */ -bool zcache_free_to_cpu_cache(zone_t zone, void *addr) +bool +zcache_free_to_cpu_cache(zone_t zone, void *addr) { - int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */ - struct zone_cache *zcache; /* local storage of the zone's cache */ - struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */ + int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */ + struct zone_cache *zcache; /* local storage of the zone's cache */ + struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */ disable_preemption(); curcpu = current_processor()->cpu_id; @@ -293,7 +304,7 @@ bool zcache_free_to_cpu_cache(zone_t zone, void *addr) /* If able, swap current and previous magazine and retry */ zcache_swap_magazines(&per_cpu_cache->previous, &per_cpu_cache->current); goto free_to_current; - } else{ + } else { lck_mtx_lock_spin_always(&(zcache->zcc_depot_lock)); if (zcache_depot_available(zcache) && (zcache->zcc_depot_index < depot_element_count)) { /* If able, rotate in a new empty magazine from the depot and retry */ @@ -304,7 +315,7 @@ bool zcache_free_to_cpu_cache(zone_t zone, void *addr) lck_mtx_unlock(&(zcache->zcc_depot_lock)); /* Attempt to free an entire magazine of elements */ zcache_mag_drain(zone, per_cpu_cache->current); - if(zcache_mag_has_space(per_cpu_cache->current)){ + if (zcache_mag_has_space(per_cpu_cache->current)) { goto free_to_current; } } @@ -338,12 +349,13 @@ free_to_current: * * Precondition: check that caching is enabled for zone */ -vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone) +vm_offset_t +zcache_alloc_from_cpu_cache(zone_t zone) { - int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */ - void *ret = NULL; /* Points to the element which will be returned */ - struct zone_cache *zcache; /* local storage of the zone's cache */ - struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */ + int curcpu; /* Current cpu is used to index into array of zcc_per_cpu_cache structs */ + void *ret = NULL; /* Points to the element which will be returned */ + struct zone_cache *zcache; /* local storage of the zone's cache */ + struct zcc_per_cpu_cache *per_cpu_cache; /* locally store the current per_cpu_cache */ disable_preemption(); curcpu = current_processor()->cpu_id; @@ -367,7 +379,7 @@ vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone) } lck_mtx_unlock(&(zcache->zcc_depot_lock)); /* Attempt to allocate an entire magazine of elements */ - if(zcache_mag_fill(zone, per_cpu_cache->current)){ + if (zcache_mag_fill(zone, per_cpu_cache->current)) { goto allocate_from_current; } } @@ -398,7 +410,8 @@ allocate_from_current: * Parameters: mag pointer to magazine to initialize * */ -void zcache_mag_init(struct zcc_magazine *mag, int count) +void +zcache_mag_init(struct zcc_magazine *mag, int count) { mag->zcc_magazine_index = 0; mag->zcc_magazine_capacity = count; @@ -409,22 +422,23 @@ void zcache_mag_init(struct zcc_magazine *mag, int count) * zcache_mag_fill * * Description: fills a magazine with as many elements as the zone can give - * without blocking to carve out more memory + * without blocking to carve out more memory * * Parameters: zone zone from which to allocate * mag pointer to magazine to fill * * Return: True if able to allocate elements, false is mag is still empty */ -bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag) +bool +zcache_mag_fill(zone_t zone, struct zcc_magazine *mag) { assert(mag->zcc_magazine_index == 0); void* elem = NULL; uint32_t i; lock_zone(zone); - for(i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i ++){ + for (i = mag->zcc_magazine_index; i < mag->zcc_magazine_capacity; i++) { elem = zalloc_attempt(zone); - if(elem) { + if (elem) { zcache_canary_add(zone, elem); zcache_mag_push(mag, elem); #if KASAN_ZALLOC @@ -435,7 +449,7 @@ bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag) } } unlock_zone(zone); - if (i == 0){ + if (i == 0) { return FALSE; } return TRUE; @@ -450,14 +464,15 @@ bool zcache_mag_fill(zone_t zone, struct zcc_magazine *mag) * mag pointer to magazine to empty * */ -void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag) +void +zcache_mag_drain(zone_t zone, struct zcc_magazine *mag) { assert(mag->zcc_magazine_index == mag->zcc_magazine_capacity); lock_zone(zone); - while(mag->zcc_magazine_index > 0){ + while (mag->zcc_magazine_index > 0) { uint32_t index = --mag->zcc_magazine_index; zcache_canary_validate(zone, mag->zcc_elements[index]); - zfree_direct(zone,(vm_offset_t)mag->zcc_elements[index]); + zfree_direct(zone, (vm_offset_t)mag->zcc_elements[index]); mag->zcc_elements[mag->zcc_magazine_index] = 0; } unlock_zone(zone); @@ -477,9 +492,10 @@ void zcache_mag_drain(zone_t zone, struct zcc_magazine *mag) * * Precondition: must check that magazine is not empty before calling */ -void *zcache_mag_pop(struct zcc_magazine *mag) +void * +zcache_mag_pop(struct zcc_magazine *mag) { - void *elem; + void *elem; assert(zcache_mag_has_elements(mag)); elem = mag->zcc_elements[--mag->zcc_magazine_index]; /* Ensure pointer to element cannot be accessed after we pop it */ @@ -502,10 +518,11 @@ void *zcache_mag_pop(struct zcc_magazine *mag) * * Precondition: must check that magazine is not full before calling */ -void zcache_mag_push(struct zcc_magazine *mag, void *elem) +void +zcache_mag_push(struct zcc_magazine *mag, void *elem) { assert(zcache_mag_has_space(mag)); - mag->zcc_elements[mag->zcc_magazine_index ++] = elem; + mag->zcc_elements[mag->zcc_magazine_index++] = elem; } @@ -519,9 +536,10 @@ void zcache_mag_push(struct zcc_magazine *mag, void *elem) * Returns: true if magazine is full * */ -bool zcache_mag_has_space(struct zcc_magazine *mag) +bool +zcache_mag_has_space(struct zcc_magazine *mag) { - return (mag->zcc_magazine_index < mag->zcc_magazine_capacity); + return mag->zcc_magazine_index < mag->zcc_magazine_capacity; } @@ -535,9 +553,10 @@ bool zcache_mag_has_space(struct zcc_magazine *mag) * Returns: true if magazine has no elements * */ -bool zcache_mag_has_elements(struct zcc_magazine *mag) +bool +zcache_mag_has_elements(struct zcc_magazine *mag) { - return (mag->zcc_magazine_index > 0); + return mag->zcc_magazine_index > 0; } @@ -549,7 +568,8 @@ bool zcache_mag_has_elements(struct zcc_magazine *mag) * Parameters: a pointer to first pointer * b pointer to second pointer */ -void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b) +void +zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b) { struct zcc_magazine *temp = *a; *a = *b; @@ -567,12 +587,13 @@ void zcache_swap_magazines(struct zcc_magazine **a, struct zcc_magazine **b) * * Precondition: Check that the depot list has full elements */ -void zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache) +void +zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache) { /* Loads a full magazine from which we can allocate */ assert(zcache_depot_available(zcache)); assert(zcache->zcc_depot_index > 0); - zcache->zcc_depot_index --; + zcache->zcc_depot_index--; zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]); } @@ -587,26 +608,28 @@ void zcache_mag_depot_swap_for_alloc(struct zone_cache *zcache, struct zcc_per_c * * Precondition: Check that the depot list has empty elements */ -void zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache) +void +zcache_mag_depot_swap_for_free(struct zone_cache *zcache, struct zcc_per_cpu_cache *cache) { /* Loads an empty magazine into which we can free */ assert(zcache_depot_available(zcache)); assert(zcache->zcc_depot_index < depot_element_count); zcache_swap_magazines(&cache->current, &zcache->zcc_depot_list[zcache->zcc_depot_index]); - zcache->zcc_depot_index ++; + zcache->zcc_depot_index++; } /* * zcache_canary_add * - * Description: Adds a canary to an element by putting zcache_canary at the first - * and last location of the element + * Description: Adds a canary to an element by putting zcache_canary at the first + * and last location of the element * * Parameters: zone zone for the element - * addr element address to add canary to + * addr element address to add canary to * */ -void zcache_canary_add(zone_t zone, void *element) +void +zcache_canary_add(zone_t zone, void *element) { vm_offset_t *primary = (vm_offset_t *)element; vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t)); @@ -616,14 +639,15 @@ void zcache_canary_add(zone_t zone, void *element) /* * zcache_canary_validate * - * Description: Validates an element of the zone cache to make sure it still contains the zone - * caching canary. + * Description: Validates an element of the zone cache to make sure it still contains the zone + * caching canary. * * Parameters: zone zone for the element - * addr element address to validate + * addr element address to validate * */ -void zcache_canary_validate(zone_t zone, void *element) +void +zcache_canary_validate(zone_t zone, void *element) { vm_offset_t *primary = (vm_offset_t *)element; vm_offset_t *backup = (vm_offset_t *)((vm_offset_t)primary + zone->elem_size - sizeof(vm_offset_t)); @@ -631,12 +655,12 @@ void zcache_canary_validate(zone_t zone, void *element) vm_offset_t primary_value = (*primary ^ (uintptr_t)element); if (primary_value != zcache_canary) { panic("Zone cache element was used after free! Element %p was corrupted at beginning; Expected %p but found %p; canary %p", - element, (void *)(zcache_canary ^ (uintptr_t)element) , (void *)(*primary), (void *)zcache_canary); + element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*primary), (void *)zcache_canary); } - + vm_offset_t backup_value = (*backup ^ (uintptr_t)element); if (backup_value != zcache_canary) { panic("Zone cache element was used after free! Element %p was corrupted at end; Expected %p but found %p; canary %p", - element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary); + element, (void *)(zcache_canary ^ (uintptr_t)element), (void *)(*backup), (void *)zcache_canary); } } diff --git a/osfmk/kern/zcache.h b/osfmk/kern/zcache.h index 6919aa5b8..0c6fb19d5 100644 --- a/osfmk/kern/zcache.h +++ b/osfmk/kern/zcache.h @@ -26,13 +26,13 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Below is a diagram of the caching system. This design is based of the + * Below is a diagram of the caching system. This design is based of the * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams. It is divided into 3 * layers: the Per-cpu Layer, the Depot Layer, and the Zone Allocator. The * Per-CPU and Depot layers store elements using arrays we call magazines. * - * Magazines function like a stack (we push and pop elements) and can be + * Magazines function like a stack (we push and pop elements) and can be * moved around for bulk operations. * _________ _________ _________ * | CPU 1 | | CPU 2 | | CPU 3 | @@ -59,7 +59,7 @@ * requires no locking, so we can access multiple CPU's caches concurrently. * This is the main source of the speedup. * - * We have two magazines here to prevent thrashing when swapping magazines + * We have two magazines here to prevent thrashing when swapping magazines * with the depot layer. If a certain pattern of alloc and free are called we * can waste a lot of time swapping magazines to and from the depot layer. We * prevent this by dividing the per-cpu cache into two separate magazines. @@ -71,7 +71,7 @@ * point at the first empty magazine. Since this layer is per-zone, it allows us * to balance the cache between cpus, but does require taking a lock. * - * When neither the current nor previous magazine for a given CPU can + * When neither the current nor previous magazine for a given CPU can * satisfy the free or allocation, we look to the depot layer. If there are * magazines in the depot that can satisfy the free or allocation we swap * that magazine into the current position. In the example below, to allocate on @@ -97,7 +97,7 @@ * try to allocate an entire magazine of elements or free an entire magazine of * elements at once. * - * Caching must be enabled explicitly, by calling zone_change() with the + * Caching must be enabled explicitly, by calling zone_change() with the * Z_CACHING_ENABLED flag, for every zone you want to cache elements for. Zones * which are good candidates for this are ones with highly contended zone locks. * @@ -106,7 +106,7 @@ * * * Some factors can be tuned by boot-arg: - * zcc_enable_for_zone_name name of a single zone to enable caching for + * zcc_enable_for_zone_name name of a single zone to enable caching for * (replace space characters with '.') * * zcc_magazine_element_count integer value for magazine size used for all @@ -116,7 +116,7 @@ * magazines to store in the depot, if N specified * depot will have N full and N empty magazines * (default 16 used if not specified) -*/ + */ #include #include @@ -127,7 +127,7 @@ * Description: returns whether or not the zone caches are ready to use * */ -bool zcache_ready(void); +bool zcache_ready(void); /* @@ -136,7 +136,7 @@ bool zcache_ready(void); * Description: initializes zone to allocate magazines from * */ -void zcache_bootstrap(void); +void zcache_bootstrap(void); /* @@ -147,7 +147,7 @@ void zcache_bootstrap(void); * Parameters: zone pointer to zone on which to iniitalize caching * */ -void zcache_init(zone_t zone); +void zcache_init(zone_t zone); /* @@ -162,7 +162,7 @@ void zcache_init(zone_t zone); * * Precondition: check that caching is enabled for zone */ -bool zcache_free_to_cpu_cache(zone_t zone, void *addr); +bool zcache_free_to_cpu_cache(zone_t zone, void *addr); /* @@ -176,7 +176,7 @@ bool zcache_free_to_cpu_cache(zone_t zone, void *addr); * * Precondition: check that caching is enabled for zone */ -vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone); +vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone); /* * zcache_drain_depot @@ -189,4 +189,4 @@ vm_offset_t zcache_alloc_from_cpu_cache(zone_t zone); * Returns: None * */ -void zcache_drain_depot(zone_t zone); +void zcache_drain_depot(zone_t zone); diff --git a/osfmk/kperf/action.c b/osfmk/kperf/action.c index 94afda342..2ff723f93 100644 --- a/osfmk/kperf/action.c +++ b/osfmk/kperf/action.c @@ -94,7 +94,7 @@ kperf_action_has_task(unsigned int actionid) return false; } - return (actionv[actionid - 1].sample & SAMPLER_TASK_MASK); + return actionv[actionid - 1].sample & SAMPLER_TASK_MASK; } bool @@ -104,27 +104,27 @@ kperf_action_has_thread(unsigned int actionid) return false; } - return (actionv[actionid - 1].sample & SAMPLER_THREAD_MASK); + return actionv[actionid - 1].sample & SAMPLER_THREAD_MASK; } static void kperf_system_memory_log(void) { BUF_DATA(PERF_MI_SYS_DATA, (uintptr_t)vm_page_free_count, - (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count, - (uintptr_t)(vm_page_active_count + vm_page_inactive_count + - vm_page_speculative_count)); + (uintptr_t)vm_page_wire_count, (uintptr_t)vm_page_external_count, + (uintptr_t)(vm_page_active_count + vm_page_inactive_count + + vm_page_speculative_count)); BUF_DATA(PERF_MI_SYS_DATA_2, (uintptr_t)vm_page_anonymous_count, - (uintptr_t)vm_page_internal_count, - (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions, - (uintptr_t)VM_PAGE_COMPRESSOR_COUNT); + (uintptr_t)vm_page_internal_count, + (uintptr_t)vm_pageout_vminfo.vm_pageout_compressions, + (uintptr_t)VM_PAGE_COMPRESSOR_COUNT); } static kern_return_t kperf_sample_internal(struct kperf_sample *sbuf, - struct kperf_context *context, - unsigned sample_what, unsigned sample_flags, - unsigned actionid, uint32_t ucallstack_depth) + struct kperf_context *context, + unsigned sample_what, unsigned sample_flags, + unsigned actionid, uint32_t ucallstack_depth) { int pended_ucallstack = 0; int pended_th_dispatch = 0; @@ -149,7 +149,7 @@ kperf_sample_internal(struct kperf_sample *sbuf, } assert((sample_flags & (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY)) - != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY)); + != (SAMPLE_FLAG_THREAD_ONLY | SAMPLE_FLAG_TASK_ONLY)); if (sample_flags & SAMPLE_FLAG_THREAD_ONLY) { sample_what &= SAMPLER_THREAD_MASK; } @@ -204,7 +204,7 @@ kperf_sample_internal(struct kperf_sample *sbuf, if (sample_what & SAMPLER_KSTACK) { if (sample_flags & SAMPLE_FLAG_CONTINUATION) { kperf_continuation_sample(&(sbuf->kcallstack), context); - /* outside of interrupt context, backtrace the current thread */ + /* outside of interrupt context, backtrace the current thread */ } else if (sample_flags & SAMPLE_FLAG_NON_INTERRUPT) { kperf_backtrace_sample(&(sbuf->kcallstack), context); } else { @@ -254,8 +254,7 @@ log_sample: /* avoid logging if this sample only pended samples */ if (sample_flags & SAMPLE_FLAG_PEND_USER && - !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) - { + !(sample_what & ~(SAMPLER_USTACK | SAMPLER_TH_DISPATCH))) { return SAMPLE_CONTINUE; } @@ -265,7 +264,7 @@ log_sample: boolean_t enabled = ml_set_interrupts_enabled(FALSE); BUF_DATA(PERF_GEN_EVENT | DBG_FUNC_START, sample_what, - actionid, userdata, sample_flags); + actionid, userdata, sample_flags); if (sample_flags & SAMPLE_FLAG_SYSTEM) { if (sample_what & SAMPLER_SYS_MEM) { @@ -342,8 +341,8 @@ log_sample_end: /* Translate actionid into sample bits and take a sample */ kern_return_t kperf_sample(struct kperf_sample *sbuf, - struct kperf_context *context, - unsigned actionid, unsigned sample_flags) + struct kperf_context *context, + unsigned actionid, unsigned sample_flags) { /* work out what to sample, if anything */ if ((actionid > actionc) || (actionid == 0)) { @@ -363,8 +362,8 @@ kperf_sample(struct kperf_sample *sbuf, /* do the actual sample operation */ return kperf_sample_internal(sbuf, context, sample_what, - sample_flags, actionid, - actionv[actionid - 1].ucallstack_depth); + sample_flags, actionid, + actionv[actionid - 1].ucallstack_depth); } void diff --git a/osfmk/kperf/action.h b/osfmk/kperf/action.h index be150c401..420720b86 100644 --- a/osfmk/kperf/action.h +++ b/osfmk/kperf/action.h @@ -54,10 +54,10 @@ struct kperf_context; #define SAMPLER_TK_INFO (1U << 13) #define SAMPLER_TASK_MASK (SAMPLER_MEMINFO | SAMPLER_TK_SNAPSHOT | \ - SAMPLER_TK_INFO) + SAMPLER_TK_INFO) #define SAMPLER_THREAD_MASK (SAMPLER_TH_INFO | SAMPLER_TH_SNAPSHOT | \ - SAMPLER_KSTACK | SAMPLER_USTACK | SAMPLER_PMC_THREAD | \ - SAMPLER_TH_SCHEDULING | SAMPLER_TH_DISPATCH | SAMPLER_TH_INSCYC) + SAMPLER_KSTACK | SAMPLER_USTACK | SAMPLER_PMC_THREAD | \ + SAMPLER_TH_SCHEDULING | SAMPLER_TH_DISPATCH | SAMPLER_TH_INSCYC) /* flags for sample calls */ @@ -82,9 +82,9 @@ struct kperf_context; /* Take a sample into "sbuf" using current thread "cur_thread" */ kern_return_t kperf_sample(struct kperf_sample *sbuf, - struct kperf_context *ctx, - unsigned actionid, - unsigned sample_flags); + struct kperf_context *ctx, + unsigned actionid, + unsigned sample_flags); /* Whether the action provided samples non-system values. */ bool kperf_action_has_non_system(unsigned actionid); diff --git a/osfmk/kperf/arm/kperf_mp.c b/osfmk/kperf/arm/kperf_mp.c index ee298723e..92bae56de 100644 --- a/osfmk/kperf/arm/kperf_mp.c +++ b/osfmk/kperf/arm/kperf_mp.c @@ -44,12 +44,12 @@ kperf_signal_handler(unsigned int cpu_number) uint64_t cpu_mask = UINT64_C(1) << cpu_number; /* find all the timers that caused a signal */ - for(int i = 0; i < (int)kperf_timerc; i++) { + for (int i = 0; i < (int)kperf_timerc; i++) { uint64_t pending_cpus; struct kperf_timer *timer = &kperf_timerv[i]; pending_cpus = atomic_fetch_and_explicit(&timer->pending_cpus, - ~cpu_mask, memory_order_relaxed); + ~cpu_mask, memory_order_relaxed); if (pending_cpus & cpu_mask) { kperf_ipi_handler(timer); } @@ -70,9 +70,8 @@ kperf_mp_broadcast_other_running(struct kperf_timer *timer) /* do not IPI processors that are not scheduling threads */ if (processor == PROCESSOR_NULL || - processor->state != PROCESSOR_RUNNING || - processor->active_thread == THREAD_NULL) - { + processor->state != PROCESSOR_RUNNING || + processor->active_thread == THREAD_NULL) { continue; } @@ -83,12 +82,11 @@ kperf_mp_broadcast_other_running(struct kperf_timer *timer) /* nor processors that have not responded to the last broadcast */ uint64_t already_pending = atomic_fetch_or_explicit( - &timer->pending_cpus, i_bit, memory_order_relaxed); - if (already_pending & i_bit) - { + &timer->pending_cpus, i_bit, memory_order_relaxed); + if (already_pending & i_bit) { #if DEVELOPMENT || DEBUG atomic_fetch_add_explicit(&kperf_pending_ipis, 1, - memory_order_relaxed); + memory_order_relaxed); #endif /* DEVELOPMENT || DEBUG */ continue; } diff --git a/osfmk/kperf/ast.h b/osfmk/kperf/ast.h index 65d5044bc..d43ce88b4 100644 --- a/osfmk/kperf/ast.h +++ b/osfmk/kperf/ast.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/kperf/buffer.h b/osfmk/kperf/buffer.h index fb24d4097..224240eb8 100644 --- a/osfmk/kperf/buffer.h +++ b/osfmk/kperf/buffer.h @@ -152,8 +152,7 @@ #define PERF_MI_SYS_DATA_2 PERF_MI_CODE(3) /* error sub-codes for trace data */ -enum -{ +enum{ ERR_TASK, ERR_THREAD, ERR_PID, @@ -217,4 +216,3 @@ extern int kperf_debug_level; #define BUF_VERB2(EVENTID, A1, A2, A3, A4) BUF_VERB_INT(EVENTID, A1, A2, 0, 0) #define BUF_VERB3(EVENTID, A1, A2, A3, A4) BUF_VERB_INT(EVENTID, A1, A2, A3, 0) #define BUF_VERB4(EVENTID, A1, A2, A3, A4) BUF_VERB_INT(EVENTID, A1, A2, A3, A4) - diff --git a/osfmk/kperf/callstack.c b/osfmk/kperf/callstack.c index 7c93e8137..228cd9fe0 100644 --- a/osfmk/kperf/callstack.c +++ b/osfmk/kperf/callstack.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -61,17 +61,17 @@ callstack_fixup_user(struct callstack *cs, thread_t thread) user_64 = is_saved_state64(state); if (user_64) { - sp_user = saved_state64(state)->isf.rsp; + sp_user = saved_state64(state)->isf.rsp; } else { sp_user = saved_state32(state)->uesp; } if (thread == current_thread()) { (void)copyin(sp_user, (char *)&fixup_val, - user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); + user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); } else { (void)vm_map_read_user(get_task_map(get_threadtask(thread)), sp_user, - &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); + &fixup_val, user_64 ? sizeof(uint64_t) : sizeof(uint32_t)); } #elif defined(__arm64__) || defined(__arm__) @@ -135,7 +135,7 @@ interrupted_kernel_sp_value(uintptr_t *sp_val) bottom = current_thread()->kernel_stack; top = bottom + kernel_stack_size; if (sp >= bottom && sp < top) { - return KERN_FAILURE; + return KERN_FAILURE; } *sp_val = *(uintptr_t *)sp; @@ -242,7 +242,7 @@ kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context) BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_START, 1); cs->nframes = backtrace_frame((uintptr_t *)&(cs->frames), cs->nframes - 1, - context->starting_fp); + context->starting_fp); if (cs->nframes > 0) { cs->flags |= CALLSTACK_VALID; /* @@ -257,8 +257,8 @@ kperf_backtrace_sample(struct callstack *cs, struct kperf_context *context) } kern_return_t chudxnu_thread_get_callstack64_kperf(thread_t thread, - uint64_t *callStack, mach_msg_type_number_t *count, - boolean_t user_only); + uint64_t *callStack, mach_msg_type_number_t *count, + boolean_t user_only); void kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context) @@ -273,7 +273,7 @@ kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context) assert(thread != NULL); BUF_INFO(PERF_CS_KSAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread), - cs->nframes); + cs->nframes); cs->flags = CALLSTACK_KERNEL; @@ -285,7 +285,7 @@ kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context) assert(thread == current_thread()); cs->flags |= CALLSTACK_KERNEL_WORDS; cs->nframes = backtrace_interrupted((uintptr_t *)cs->frames, - cs->nframes - 1); + cs->nframes - 1); if (cs->nframes != 0) { callstack_fixup_interrupted(cs); } @@ -296,7 +296,7 @@ kperf_kcallstack_sample(struct callstack *cs, struct kperf_context *context) */ kern_return_t kr; kr = chudxnu_thread_get_callstack64_kperf(thread, cs->frames, - &cs->nframes, FALSE); + &cs->nframes, FALSE); if (kr == KERN_SUCCESS) { cs->flags |= CALLSTACK_VALID; } else if (kr == KERN_RESOURCE_SHORTAGE) { @@ -330,12 +330,12 @@ kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context) assert(thread != NULL); BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_START, (uintptr_t)thread_tid(thread), - cs->nframes); + cs->nframes); cs->flags = 0; err = backtrace_thread_user(thread, (uintptr_t *)cs->frames, - cs->nframes - 1, &cs->nframes, &user_64); + cs->nframes - 1, &cs->nframes, &user_64); cs->flags |= CALLSTACK_KERNEL_WORDS; if (user_64) { cs->flags |= CALLSTACK_64BIT; @@ -350,7 +350,7 @@ kperf_ucallstack_sample(struct callstack *cs, struct kperf_context *context) } BUF_INFO(PERF_CS_USAMPLE | DBG_FUNC_END, (uintptr_t)thread_tid(thread), - cs->flags, cs->nframes); + cs->flags, cs->nframes); } static inline uintptr_t @@ -400,20 +400,20 @@ callstack_log(struct callstack *cs, uint32_t hcode, uint32_t dcode) for (unsigned int i = 0; i < n; i++) { unsigned int j = i * 4; BUF_DATA(dcode, - scrub_word(frames, nframes, j + 0, kern), - scrub_word(frames, nframes, j + 1, kern), - scrub_word(frames, nframes, j + 2, kern), - scrub_word(frames, nframes, j + 3, kern)); + scrub_word(frames, nframes, j + 0, kern), + scrub_word(frames, nframes, j + 1, kern), + scrub_word(frames, nframes, j + 2, kern), + scrub_word(frames, nframes, j + 3, kern)); } } else { for (unsigned int i = 0; i < n; i++) { uint64_t *frames = cs->frames; unsigned int j = i * 4; BUF_DATA(dcode, - scrub_frame(frames, nframes, j + 0), - scrub_frame(frames, nframes, j + 1), - scrub_frame(frames, nframes, j + 2), - scrub_frame(frames, nframes, j + 3)); + scrub_frame(frames, nframes, j + 0), + scrub_frame(frames, nframes, j + 1), + scrub_frame(frames, nframes, j + 2), + scrub_frame(frames, nframes, j + 3)); } } @@ -444,16 +444,16 @@ kperf_ucallstack_pend(struct kperf_context * context, uint32_t depth) static kern_return_t chudxnu_kern_read(void *dstaddr, vm_offset_t srcaddr, vm_size_t size) { - return ((ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ? - KERN_SUCCESS : KERN_FAILURE); + return (ml_nofault_copy(srcaddr, (vm_offset_t)dstaddr, size) == size) ? + KERN_SUCCESS : KERN_FAILURE; } static kern_return_t chudxnu_task_read( - task_t task, - void *kernaddr, - uint64_t usraddr, - vm_size_t size) + task_t task, + void *kernaddr, + uint64_t usraddr, + vm_size_t size) { //ppc version ported to arm kern_return_t ret = KERN_SUCCESS; @@ -463,13 +463,9 @@ chudxnu_task_read( } if (current_task() == task) { - thread_t cur_thr = current_thread(); - vm_offset_t recover_handler = cur_thr->recover; - if (copyin(usraddr, kernaddr, size)) { ret = KERN_FAILURE; } - cur_thr->recover = recover_handler; } else { vm_map_t map = get_task_map(task); ret = vm_map_read_user(map, usraddr, kernaddr, size); @@ -481,8 +477,9 @@ chudxnu_task_read( static inline uint64_t chudxnu_vm_unslide( uint64_t ptr, int kaddr ) { - if (!kaddr) + if (!kaddr) { return ptr; + } return VM_KERNEL_UNSLIDE(ptr); } @@ -492,38 +489,39 @@ chudxnu_vm_unslide( uint64_t ptr, int kaddr ) #define CS_FLAG_EXTRASP 1 // capture extra sp register static kern_return_t chudxnu_thread_get_callstack64_internal( - thread_t thread, - uint64_t *callStack, - mach_msg_type_number_t *count, - boolean_t user_only, + thread_t thread, + uint64_t *callStack, + mach_msg_type_number_t *count, + boolean_t user_only, int flags) { kern_return_t kr; - task_t task; - uint64_t currPC=0ULL, currLR=0ULL, currSP=0ULL; - uint64_t prevPC = 0ULL; - uint32_t kernStackMin = thread->kernel_stack; - uint32_t kernStackMax = kernStackMin + kernel_stack_size; + task_t task; + uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL; + uint64_t prevPC = 0ULL; + uint32_t kernStackMin = thread->kernel_stack; + uint32_t kernStackMax = kernStackMin + kernel_stack_size; uint64_t *buffer = callStack; - uint32_t frame[2]; + uint32_t frame[2]; int bufferIndex = 0; int bufferMaxIndex = 0; boolean_t supervisor = FALSE; struct arm_saved_state *state = NULL; - uint32_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL; - uint64_t pc = 0ULL; + uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL; + uint64_t pc = 0ULL; task = get_threadtask(thread); bufferMaxIndex = *count; //get thread state - if (user_only) + if (user_only) { state = find_user_regs(thread); - else + } else { state = find_kern_regs(thread); + } if (!state) { - *count = 0; + *count = 0; return KERN_FAILURE; } @@ -531,13 +529,14 @@ chudxnu_thread_get_callstack64_internal( supervisor = ARM_SUPERVISOR_MODE(state->cpsr); /* can't take a kernel callstack if we've got a user frame */ - if( !user_only && !supervisor ) + if (!user_only && !supervisor) { return KERN_FAILURE; + } /* - * Reserve space for saving LR (and sometimes SP) at the end of the - * backtrace. - */ + * Reserve space for saving LR (and sometimes SP) at the end of the + * backtrace. + */ if (flags & CS_FLAG_EXTRASP) { bufferMaxIndex -= 2; } else { @@ -550,9 +549,9 @@ chudxnu_thread_get_callstack64_internal( } currPC = (uint64_t)state->pc; /* r15 */ - if (state->cpsr & PSR_TF) - currPC |= 1ULL; /* encode thumb mode into low bit of PC */ - + if (state->cpsr & PSR_TF) { + currPC |= 1ULL; /* encode thumb mode into low bit of PC */ + } currLR = (uint64_t)state->lr; /* r14 */ currSP = (uint64_t)state->sp; /* r13 */ @@ -580,7 +579,7 @@ chudxnu_thread_get_callstack64_internal( * necessary. */ - if((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) { + if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) { /* frame pointer is invalid - stop backtracing */ pc = 0ULL; break; @@ -592,8 +591,8 @@ chudxnu_thread_get_callstack64_internal( kr = KERN_FAILURE; } else { kr = chudxnu_kern_read(&frame, - (vm_offset_t)fp, - (vm_size_t)sizeof(frame)); + (vm_offset_t)fp, + (vm_size_t)sizeof(frame)); if (kr == KERN_SUCCESS) { pc = (uint64_t)frame[1]; nextFramePointer = (uint32_t *) (frame[0]); @@ -605,9 +604,9 @@ chudxnu_thread_get_callstack64_internal( } } else { kr = chudxnu_task_read(task, - &frame, - (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL), - sizeof(frame)); + &frame, + (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL), + sizeof(frame)); if (kr == KERN_SUCCESS) { pc = (uint64_t) frame[1]; nextFramePointer = (uint32_t *) (frame[0]); @@ -628,10 +627,11 @@ chudxnu_thread_get_callstack64_internal( prevPC = pc; } - if (nextFramePointer < fp) + if (nextFramePointer < fp) { break; - else + } else { fp = nextFramePointer; + } } if (bufferIndex >= bufferMaxIndex) { @@ -643,21 +643,20 @@ chudxnu_thread_get_callstack64_internal( // Save link register and R13 (sp) at bottom of stack (used for later fixup). buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, supervisor); - if( flags & CS_FLAG_EXTRASP ) + if (flags & CS_FLAG_EXTRASP) { buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, supervisor); + } *count = bufferIndex; return kr; - - } kern_return_t chudxnu_thread_get_callstack64_kperf( - thread_t thread, - uint64_t *callStack, - mach_msg_type_number_t *count, - boolean_t user_only) + thread_t thread, + uint64_t *callStack, + mach_msg_type_number_t *count, + boolean_t user_only) { return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 ); } @@ -682,41 +681,42 @@ chudxnu_thread_get_callstack64_kperf( static kern_return_t chudxnu_thread_get_callstack64_internal( - thread_t thread, - uint64_t *callStack, - mach_msg_type_number_t *count, - boolean_t user_only, + thread_t thread, + uint64_t *callStack, + mach_msg_type_number_t *count, + boolean_t user_only, int flags) { kern_return_t kr = KERN_SUCCESS; - task_t task; - uint64_t currPC=0ULL, currLR=0ULL, currSP=0ULL; - uint64_t prevPC = 0ULL; - uint64_t kernStackMin = thread->kernel_stack; - uint64_t kernStackMax = kernStackMin + kernel_stack_size; + task_t task; + uint64_t currPC = 0ULL, currLR = 0ULL, currSP = 0ULL; + uint64_t prevPC = 0ULL; + uint64_t kernStackMin = thread->kernel_stack; + uint64_t kernStackMax = kernStackMin + kernel_stack_size; uint64_t *buffer = callStack; int bufferIndex = 0; int bufferMaxIndex = 0; boolean_t kernel = FALSE; struct arm_saved_state *sstate = NULL; - uint64_t pc = 0ULL; + uint64_t pc = 0ULL; task = get_threadtask(thread); bufferMaxIndex = *count; //get thread state - if (user_only) + if (user_only) { sstate = find_user_regs(thread); - else + } else { sstate = find_kern_regs(thread); + } if (!sstate) { - *count = 0; + *count = 0; return KERN_FAILURE; } if (is_saved_state64(sstate)) { struct arm_saved_state64 *state = NULL; - uint64_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL; + uint64_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL; uint64_t frame[2]; state = saved_state64(sstate); @@ -725,8 +725,9 @@ chudxnu_thread_get_callstack64_internal( kernel = PSR64_IS_KERNEL(state->cpsr); /* can't take a kernel callstack if we've got a user frame */ - if( !user_only && !kernel ) + if (!user_only && !kernel) { return KERN_FAILURE; + } /* * Reserve space for saving LR (and sometimes SP) at the end of the @@ -773,7 +774,7 @@ chudxnu_thread_get_callstack64_internal( * necessary. */ - if((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) { + if ((uint64_t)fp == 0 || ((uint64_t)fp & 0x3) != 0) { /* frame pointer is invalid - stop backtracing */ pc = 0ULL; break; @@ -785,8 +786,8 @@ chudxnu_thread_get_callstack64_internal( kr = KERN_FAILURE; } else { kr = chudxnu_kern_read(&frame, - (vm_offset_t)fp, - (vm_size_t)sizeof(frame)); + (vm_offset_t)fp, + (vm_size_t)sizeof(frame)); if (kr == KERN_SUCCESS) { pc = frame[1]; nextFramePointer = (uint64_t *)frame[0]; @@ -798,9 +799,9 @@ chudxnu_thread_get_callstack64_internal( } } else { kr = chudxnu_task_read(task, - &frame, - (vm_offset_t)fp, - (vm_size_t)sizeof(frame)); + &frame, + (vm_offset_t)fp, + (vm_size_t)sizeof(frame)); if (kr == KERN_SUCCESS) { pc = frame[1]; nextFramePointer = (uint64_t *)(frame[0]); @@ -821,10 +822,11 @@ chudxnu_thread_get_callstack64_internal( prevPC = pc; } - if (nextFramePointer < fp) + if (nextFramePointer < fp) { break; - else + } else { fp = nextFramePointer; + } } BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex); @@ -838,24 +840,26 @@ chudxnu_thread_get_callstack64_internal( // Save link register and SP at bottom of stack (used for later fixup). buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel); - if( flags & CS_FLAG_EXTRASP ) - buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel); + if (flags & CS_FLAG_EXTRASP) { + buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel); + } } else { struct arm_saved_state32 *state = NULL; - uint32_t *fp=NULL, *nextFramePointer=NULL, *topfp=NULL; + uint32_t *fp = NULL, *nextFramePointer = NULL, *topfp = NULL; /* 64-bit kernel stacks, 32-bit user stacks */ uint64_t frame[2]; uint32_t frame32[2]; - + state = saved_state32(sstate); /* make sure it is safe to dereference before you do it */ kernel = ARM_SUPERVISOR_MODE(state->cpsr); /* can't take a kernel callstack if we've got a user frame */ - if( !user_only && !kernel ) + if (!user_only && !kernel) { return KERN_FAILURE; + } /* * Reserve space for saving LR (and sometimes SP) at the end of the @@ -873,9 +877,9 @@ chudxnu_thread_get_callstack64_internal( } currPC = (uint64_t)state->pc; /* r15 */ - if (state->cpsr & PSR_TF) - currPC |= 1ULL; /* encode thumb mode into low bit of PC */ - + if (state->cpsr & PSR_TF) { + currPC |= 1ULL; /* encode thumb mode into low bit of PC */ + } currLR = (uint64_t)state->lr; /* r14 */ currSP = (uint64_t)state->sp; /* r13 */ @@ -905,7 +909,7 @@ chudxnu_thread_get_callstack64_internal( * necessary. */ - if((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) { + if ((uint32_t)fp == 0 || ((uint32_t)fp & 0x3) != 0) { /* frame pointer is invalid - stop backtracing */ pc = 0ULL; break; @@ -917,8 +921,8 @@ chudxnu_thread_get_callstack64_internal( kr = KERN_FAILURE; } else { kr = chudxnu_kern_read(&frame, - (vm_offset_t)fp, - (vm_size_t)sizeof(frame)); + (vm_offset_t)fp, + (vm_size_t)sizeof(frame)); if (kr == KERN_SUCCESS) { pc = (uint64_t)frame[1]; nextFramePointer = (uint32_t *) (frame[0]); @@ -930,9 +934,9 @@ chudxnu_thread_get_callstack64_internal( } } else { kr = chudxnu_task_read(task, - &frame32, - (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL), - sizeof(frame32)); + &frame32, + (((uint64_t)(uint32_t)fp) & 0x00000000FFFFFFFFULL), + sizeof(frame32)); if (kr == KERN_SUCCESS) { pc = (uint64_t)frame32[1]; nextFramePointer = (uint32_t *)(uintptr_t)(frame32[0]); @@ -953,10 +957,11 @@ chudxnu_thread_get_callstack64_internal( prevPC = pc; } - if (nextFramePointer < fp) + if (nextFramePointer < fp) { break; - else + } else { fp = nextFramePointer; + } } BUF_VERB(PERF_CS_BACKTRACE | DBG_FUNC_END, bufferIndex); @@ -972,8 +977,9 @@ chudxnu_thread_get_callstack64_internal( // Save link register and R13 (sp) at bottom of stack (used for later fixup). buffer[bufferIndex++] = chudxnu_vm_unslide(currLR, kernel); - if( flags & CS_FLAG_EXTRASP ) + if (flags & CS_FLAG_EXTRASP) { buffer[bufferIndex++] = chudxnu_vm_unslide(currSP, kernel); + } } *count = bufferIndex; @@ -982,10 +988,10 @@ chudxnu_thread_get_callstack64_internal( kern_return_t chudxnu_thread_get_callstack64_kperf( - thread_t thread, - uint64_t *callStack, - mach_msg_type_number_t *count, - boolean_t user_only) + thread_t thread, + uint64_t *callStack, + mach_msg_type_number_t *count, + boolean_t user_only) { return chudxnu_thread_get_callstack64_internal( thread, callStack, count, user_only, 0 ); } @@ -998,30 +1004,31 @@ chudxnu_thread_get_callstack64_kperf( ((uint64_t)addr != 0ULL && ((uint64_t)addr <= 0x00007FFFFFFFFFFFULL || (uint64_t)addr >= 0xFFFF800000000000ULL))) typedef struct _cframe64_t { - uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel - uint64_t caller; - uint64_t args[0]; + uint64_t prevFP; // can't use a real pointer here until we're a 64 bit kernel + uint64_t caller; + uint64_t args[0]; }cframe64_t; typedef struct _cframe_t { - uint32_t prev; // this is really a user32-space pointer to the previous frame - uint32_t caller; - uint32_t args[0]; + uint32_t prev; // this is really a user32-space pointer to the previous frame + uint32_t caller; + uint32_t args[0]; } cframe_t; extern void * find_user_regs(thread_t); extern x86_saved_state32_t *find_kern_regs(thread_t); -static kern_return_t do_kernel_backtrace( +static kern_return_t +do_kernel_backtrace( thread_t thread, - struct x86_kernel_state *regs, + struct x86_kernel_state *regs, uint64_t *frames, mach_msg_type_number_t *start_idx, mach_msg_type_number_t max_idx) { uint64_t kernStackMin = (uint64_t)thread->kernel_stack; - uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size; + uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size; mach_msg_type_number_t ct = *start_idx; kern_return_t kr = KERN_FAILURE; @@ -1030,10 +1037,10 @@ static kern_return_t do_kernel_backtrace( uint64_t currFP = 0ULL; uint64_t prevPC = 0ULL; uint64_t prevFP = 0ULL; - if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) { + if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_rip), sizeof(uint64_t))) { return KERN_FAILURE; } - if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) { + if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_rbp), sizeof(uint64_t))) { return KERN_FAILURE; } #else @@ -1041,18 +1048,18 @@ static kern_return_t do_kernel_backtrace( uint32_t currFP = 0U; uint32_t prevPC = 0U; uint32_t prevFP = 0U; - if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) { + if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(regs->k_eip), sizeof(uint32_t))) { return KERN_FAILURE; } - if(KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) { + if (KERN_SUCCESS != chudxnu_kern_read(&currFP, (vm_offset_t)&(regs->k_ebp), sizeof(uint32_t))) { return KERN_FAILURE; } #endif - if(*start_idx >= max_idx) - return KERN_RESOURCE_SHORTAGE; // no frames traced - - if(!currPC) { + if (*start_idx >= max_idx) { + return KERN_RESOURCE_SHORTAGE; // no frames traced + } + if (!currPC) { return KERN_FAILURE; } @@ -1060,52 +1067,52 @@ static kern_return_t do_kernel_backtrace( // build a backtrace of this kernel state #if __LP64__ - while(VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) { + while (VALID_STACK_ADDRESS64(TRUE, currFP, kernStackMin, kernStackMax)) { // this is the address where caller lives in the user thread uint64_t caller = currFP + sizeof(uint64_t); #else - while(VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) { + while (VALID_STACK_ADDRESS(TRUE, currFP, kernStackMin, kernStackMax)) { uint32_t caller = (uint32_t)currFP + sizeof(uint32_t); #endif - if(!currFP || !currPC) { - currPC = 0; - break; - } + if (!currFP || !currPC) { + currPC = 0; + break; + } - if(ct >= max_idx) { + if (ct >= max_idx) { *start_idx = ct; - return KERN_RESOURCE_SHORTAGE; - } + return KERN_RESOURCE_SHORTAGE; + } /* read our caller */ kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(currPC)); - if(kr != KERN_SUCCESS || !currPC) { + if (kr != KERN_SUCCESS || !currPC) { currPC = 0UL; break; } - /* - * retrive contents of the frame pointer and advance to the next stack - * frame if it's valid - */ - prevFP = 0; + /* + * retrive contents of the frame pointer and advance to the next stack + * frame if it's valid + */ + prevFP = 0; kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(currPC)); #if __LP64__ - if(VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) { + if (VALID_STACK_ADDRESS64(TRUE, prevFP, kernStackMin, kernStackMax)) { #else - if(VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) { + if (VALID_STACK_ADDRESS(TRUE, prevFP, kernStackMin, kernStackMax)) { #endif - frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1); - prevPC = currPC; - } - if(prevFP <= currFP) { - break; - } else { - currFP = prevFP; - } + frames[ct++] = chudxnu_vm_unslide((uint64_t)currPC, 1); + prevPC = currPC; + } + if (prevFP <= currFP) { + break; + } else { + currFP = prevFP; + } } *start_idx = ct; @@ -1114,10 +1121,11 @@ static kern_return_t do_kernel_backtrace( -static kern_return_t do_backtrace32( +static kern_return_t +do_backtrace32( task_t task, thread_t thread, - x86_saved_state32_t *regs, + x86_saved_state32_t *regs, uint64_t *frames, mach_msg_type_number_t *start_idx, mach_msg_type_number_t max_idx, @@ -1129,74 +1137,75 @@ static kern_return_t do_backtrace32( uint64_t prevPC = 0ULL; uint64_t prevFP = 0ULL; uint64_t kernStackMin = thread->kernel_stack; - uint64_t kernStackMax = kernStackMin + kernel_stack_size; + uint64_t kernStackMax = kernStackMin + kernel_stack_size; mach_msg_type_number_t ct = *start_idx; kern_return_t kr = KERN_FAILURE; - if(ct >= max_idx) - return KERN_RESOURCE_SHORTAGE; // no frames traced - + if (ct >= max_idx) { + return KERN_RESOURCE_SHORTAGE; // no frames traced + } frames[ct++] = chudxnu_vm_unslide(currPC, supervisor); // build a backtrace of this 32 bit state. - while(VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) { + while (VALID_STACK_ADDRESS(supervisor, currFP, kernStackMin, kernStackMax)) { cframe_t *fp = (cframe_t *) (uintptr_t) currFP; - if(!currFP) { - currPC = 0; - break; - } + if (!currFP) { + currPC = 0; + break; + } - if(ct >= max_idx) { + if (ct >= max_idx) { *start_idx = ct; - return KERN_RESOURCE_SHORTAGE; - } + return KERN_RESOURCE_SHORTAGE; + } /* read our caller */ - if(supervisor) { + if (supervisor) { kr = chudxnu_kern_read(&tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t)); } else { kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t) &fp->caller, sizeof(uint32_t)); } - if(kr != KERN_SUCCESS) { + if (kr != KERN_SUCCESS) { currPC = 0ULL; break; } currPC = (uint64_t) tmpWord; // promote 32 bit address - /* - * retrive contents of the frame pointer and advance to the next stack - * frame if it's valid - */ - prevFP = 0; - if(supervisor) { + /* + * retrive contents of the frame pointer and advance to the next stack + * frame if it's valid + */ + prevFP = 0; + if (supervisor) { kr = chudxnu_kern_read(&tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t)); } else { kr = chudxnu_task_read(task, &tmpWord, (vm_offset_t)&fp->prev, sizeof(uint32_t)); } prevFP = (uint64_t) tmpWord; // promote 32 bit address - if(prevFP) { - frames[ct++] = chudxnu_vm_unslide(currPC, supervisor); - prevPC = currPC; - } - if(prevFP < currFP) { - break; - } else { - currFP = prevFP; - } + if (prevFP) { + frames[ct++] = chudxnu_vm_unslide(currPC, supervisor); + prevPC = currPC; + } + if (prevFP < currFP) { + break; + } else { + currFP = prevFP; + } } *start_idx = ct; return KERN_SUCCESS; } -static kern_return_t do_backtrace64( +static kern_return_t +do_backtrace64( task_t task, thread_t thread, - x86_saved_state64_t *regs, + x86_saved_state64_t *regs, uint64_t *frames, mach_msg_type_number_t *start_idx, mach_msg_type_number_t max_idx, @@ -1207,62 +1216,62 @@ static kern_return_t do_backtrace64( uint64_t prevPC = 0ULL; uint64_t prevFP = 0ULL; uint64_t kernStackMin = (uint64_t)thread->kernel_stack; - uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size; + uint64_t kernStackMax = (uint64_t)kernStackMin + kernel_stack_size; mach_msg_type_number_t ct = *start_idx; kern_return_t kr = KERN_FAILURE; - if(*start_idx >= max_idx) - return KERN_RESOURCE_SHORTAGE; // no frames traced - + if (*start_idx >= max_idx) { + return KERN_RESOURCE_SHORTAGE; // no frames traced + } frames[ct++] = chudxnu_vm_unslide(currPC, supervisor); // build a backtrace of this 32 bit state. - while(VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) { + while (VALID_STACK_ADDRESS64(supervisor, currFP, kernStackMin, kernStackMax)) { // this is the address where caller lives in the user thread uint64_t caller = currFP + sizeof(uint64_t); - if(!currFP) { - currPC = 0; - break; - } + if (!currFP) { + currPC = 0; + break; + } - if(ct >= max_idx) { + if (ct >= max_idx) { *start_idx = ct; - return KERN_RESOURCE_SHORTAGE; - } + return KERN_RESOURCE_SHORTAGE; + } /* read our caller */ - if(supervisor) { + if (supervisor) { kr = chudxnu_kern_read(&currPC, (vm_offset_t)caller, sizeof(uint64_t)); } else { kr = chudxnu_task_read(task, &currPC, caller, sizeof(uint64_t)); } - if(kr != KERN_SUCCESS) { + if (kr != KERN_SUCCESS) { currPC = 0ULL; break; } - /* - * retrive contents of the frame pointer and advance to the next stack - * frame if it's valid - */ - prevFP = 0; - if(supervisor) { + /* + * retrive contents of the frame pointer and advance to the next stack + * frame if it's valid + */ + prevFP = 0; + if (supervisor) { kr = chudxnu_kern_read(&prevFP, (vm_offset_t)currFP, sizeof(uint64_t)); } else { kr = chudxnu_task_read(task, &prevFP, currFP, sizeof(uint64_t)); } - if(VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) { - frames[ct++] = chudxnu_vm_unslide(currPC, supervisor); - prevPC = currPC; - } - if(prevFP < currFP) { - break; - } else { - currFP = prevFP; - } + if (VALID_STACK_ADDRESS64(supervisor, prevFP, kernStackMin, kernStackMax)) { + frames[ct++] = chudxnu_vm_unslide(currPC, supervisor); + prevPC = currPC; + } + if (prevFP < currFP) { + break; + } else { + currFP = prevFP; + } } *start_idx = ct; @@ -1271,41 +1280,40 @@ static kern_return_t do_backtrace64( static kern_return_t chudxnu_thread_get_callstack64_internal( - thread_t thread, - uint64_t *callstack, - mach_msg_type_number_t *count, - boolean_t user_only, - boolean_t kern_only) + thread_t thread, + uint64_t *callstack, + mach_msg_type_number_t *count, + boolean_t user_only, + boolean_t kern_only) { kern_return_t kr = KERN_FAILURE; - task_t task = thread->task; - uint64_t currPC = 0ULL; + task_t task = thread->task; + uint64_t currPC = 0ULL; boolean_t supervisor = FALSE; - mach_msg_type_number_t bufferIndex = 0; - mach_msg_type_number_t bufferMaxIndex = *count; - x86_saved_state_t *tagged_regs = NULL; // kernel register state + mach_msg_type_number_t bufferIndex = 0; + mach_msg_type_number_t bufferMaxIndex = *count; + x86_saved_state_t *tagged_regs = NULL; // kernel register state x86_saved_state64_t *regs64 = NULL; x86_saved_state32_t *regs32 = NULL; x86_saved_state32_t *u_regs32 = NULL; x86_saved_state64_t *u_regs64 = NULL; struct x86_kernel_state *kregs = NULL; - if(ml_at_interrupt_context()) { - - if(user_only) { + if (ml_at_interrupt_context()) { + if (user_only) { /* can't backtrace user state on interrupt stack. */ return KERN_FAILURE; } /* backtracing at interrupt context? */ - if(thread == current_thread() && current_cpu_datap()->cpu_int_state) { - /* + if (thread == current_thread() && current_cpu_datap()->cpu_int_state) { + /* * Locate the registers for the interrupted thread, assuming it is - * current_thread(). + * current_thread(). */ tagged_regs = current_cpu_datap()->cpu_int_state; - - if(is_saved_state64(tagged_regs)) { + + if (is_saved_state64(tagged_regs)) { /* 64 bit registers */ regs64 = saved_state64(tagged_regs); supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); @@ -1314,12 +1322,11 @@ chudxnu_thread_get_callstack64_internal( regs32 = saved_state32(tagged_regs); supervisor = ((regs32->cs & SEL_PL) != SEL_PL_U); } - } + } } - if(!ml_at_interrupt_context() && kernel_task == task) { - - if(!thread->kernel_stack) { + if (!ml_at_interrupt_context() && kernel_task == task) { + if (!thread->kernel_stack) { return KERN_FAILURE; } @@ -1327,7 +1334,7 @@ chudxnu_thread_get_callstack64_internal( kregs = (struct x86_kernel_state *)NULL; // nofault read of the thread->kernel_stack pointer - if(KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) { + if (KERN_SUCCESS != chudxnu_kern_read(&kregs, (vm_offset_t)&(thread->kernel_stack), sizeof(void *))) { return KERN_FAILURE; } @@ -1335,16 +1342,16 @@ chudxnu_thread_get_callstack64_internal( kregs = STACK_IKS((vm_offset_t)(uintptr_t)kregs); supervisor = TRUE; - } else if(!tagged_regs) { - /* + } else if (!tagged_regs) { + /* * not at interrupt context, or tracing a different thread than - * current_thread() at interrupt context + * current_thread() at interrupt context */ tagged_regs = USER_STATE(thread); - if(is_saved_state64(tagged_regs)) { + if (is_saved_state64(tagged_regs)) { /* 64 bit registers */ regs64 = saved_state64(tagged_regs); - supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); + supervisor = ((regs64->isf.cs & SEL_PL) != SEL_PL_U); } else { /* 32 bit registers */ regs32 = saved_state32(tagged_regs); @@ -1352,11 +1359,11 @@ chudxnu_thread_get_callstack64_internal( } } - *count = 0; + *count = 0; - if(supervisor) { + if (supervisor) { // the caller only wants a user callstack. - if(user_only) { + if (user_only) { // bail - we've only got kernel state return KERN_FAILURE; } @@ -1370,13 +1377,13 @@ chudxnu_thread_get_callstack64_internal( if (user_only) { /* we only want to backtrace the user mode */ - if(!(u_regs32 || u_regs64)) { + if (!(u_regs32 || u_regs64)) { /* no user state to look at */ return KERN_FAILURE; } } - /* + /* * Order of preference for top of stack: * 64 bit kernel state (not likely) * 32 bit kernel state @@ -1384,39 +1391,39 @@ chudxnu_thread_get_callstack64_internal( * 32 bit user land state */ - if(kregs) { + if (kregs) { /* * nofault read of the registers from the kernel stack (as they can * disappear on the fly). */ - if(KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) { + if (KERN_SUCCESS != chudxnu_kern_read(&currPC, (vm_offset_t)&(kregs->k_rip), sizeof(uint64_t))) { return KERN_FAILURE; } - } else if(regs64) { + } else if (regs64) { currPC = regs64->isf.rip; - } else if(regs32) { + } else if (regs32) { currPC = (uint64_t) regs32->eip; - } else if(u_regs64) { + } else if (u_regs64) { currPC = u_regs64->isf.rip; - } else if(u_regs32) { + } else if (u_regs32) { currPC = (uint64_t) u_regs32->eip; } - - if(!currPC) { + + if (!currPC) { /* no top of the stack, bail out */ return KERN_FAILURE; } bufferIndex = 0; - - if(bufferMaxIndex < 1) { + + if (bufferMaxIndex < 1) { *count = 0; return KERN_RESOURCE_SHORTAGE; } /* backtrace kernel */ - if(kregs) { + if (kregs) { addr64_t address = 0ULL; size_t size = 0UL; @@ -1426,71 +1433,70 @@ chudxnu_thread_get_callstack64_internal( // and do a nofault read of (r|e)sp uint64_t rsp = 0ULL; size = sizeof(uint64_t); - - if(KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) { + + if (KERN_SUCCESS != chudxnu_kern_read(&address, (vm_offset_t)&(kregs->k_rsp), size)) { address = 0ULL; } - if(address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) { + if (address && KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t)address, size) && bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = (uint64_t)rsp; } - } else if(regs64) { + } else if (regs64) { uint64_t rsp = 0ULL; // backtrace the 64bit side. kr = do_backtrace64(task, thread, regs64, callstack, &bufferIndex, - bufferMaxIndex - 1, TRUE); + bufferMaxIndex - 1, TRUE); - if(KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) && - bufferIndex < bufferMaxIndex) { + if (KERN_SUCCESS == chudxnu_kern_read(&rsp, (vm_offset_t) regs64->isf.rsp, sizeof(uint64_t)) && + bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = rsp; } - - } else if(regs32) { + } else if (regs32) { uint32_t esp = 0UL; // backtrace the 32bit side. kr = do_backtrace32(task, thread, regs32, callstack, &bufferIndex, - bufferMaxIndex - 1, TRUE); - - if(KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) && - bufferIndex < bufferMaxIndex) { + bufferMaxIndex - 1, TRUE); + + if (KERN_SUCCESS == chudxnu_kern_read(&esp, (vm_offset_t) regs32->uesp, sizeof(uint32_t)) && + bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = (uint64_t) esp; } - } else if(u_regs64 && !kern_only) { + } else if (u_regs64 && !kern_only) { /* backtrace user land */ uint64_t rsp = 0ULL; - + kr = do_backtrace64(task, thread, u_regs64, callstack, &bufferIndex, - bufferMaxIndex - 1, FALSE); + bufferMaxIndex - 1, FALSE); - if(KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) && - bufferIndex < bufferMaxIndex) { + if (KERN_SUCCESS == chudxnu_task_read(task, &rsp, (addr64_t) u_regs64->isf.rsp, sizeof(uint64_t)) && + bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = rsp; } - - } else if(u_regs32 && !kern_only) { + } else if (u_regs32 && !kern_only) { uint32_t esp = 0UL; - + kr = do_backtrace32(task, thread, u_regs32, callstack, &bufferIndex, - bufferMaxIndex - 1, FALSE); + bufferMaxIndex - 1, FALSE); - if(KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) && - bufferIndex < bufferMaxIndex) { + if (KERN_SUCCESS == chudxnu_task_read(task, &esp, (addr64_t) u_regs32->uesp, sizeof(uint32_t)) && + bufferIndex < bufferMaxIndex) { callstack[bufferIndex++] = (uint64_t) esp; } } - *count = bufferIndex; - return kr; + *count = bufferIndex; + return kr; } __private_extern__ -kern_return_t chudxnu_thread_get_callstack64_kperf( - thread_t thread, - uint64_t *callstack, - mach_msg_type_number_t *count, - boolean_t is_user) +kern_return_t +chudxnu_thread_get_callstack64_kperf( + thread_t thread, + uint64_t *callstack, + mach_msg_type_number_t *count, + boolean_t is_user) { return chudxnu_thread_get_callstack64_internal(thread, callstack, count, is_user, !is_user); } diff --git a/osfmk/kperf/kdebug_trigger.c b/osfmk/kperf/kdebug_trigger.c index b649888ac..885d8a606 100644 --- a/osfmk/kperf/kdebug_trigger.c +++ b/osfmk/kperf/kdebug_trigger.c @@ -98,7 +98,7 @@ int kperf_kdebug_init(void) { kperf_kdebug_filter = kalloc_tag(sizeof(*kperf_kdebug_filter), - VM_KERN_MEMORY_DIAG); + VM_KERN_MEMORY_DIAG); if (kperf_kdebug_filter == NULL) { return ENOMEM; } @@ -136,7 +136,7 @@ kperf_kdebug_should_trigger(uint32_t debugid) */ for (uint8_t i = 0; i < kperf_kdebug_filter->n_debugids; i++) { uint32_t check_debugid = - kperf_kdebug_filter->debugids[i]; + kperf_kdebug_filter->debugids[i]; uint32_t mask = debugid_masks[DECODE_TYPE(kperf_kdebug_filter->types, i)]; if ((debugid & mask) == check_debugid) { @@ -170,8 +170,7 @@ kperf_kdebug_set_filter(user_addr_t user_filter, uint32_t user_size) } if ((err = copyin(user_filter, (char *)kperf_kdebug_filter, - KPERF_KDEBUG_FILTER_SIZE(n_debugids_provided)))) - { + KPERF_KDEBUG_FILTER_SIZE(n_debugids_provided)))) { bzero(kperf_kdebug_filter, sizeof(*kperf_kdebug_filter)); goto out; } @@ -244,8 +243,7 @@ kperf_kdebug_update(void) } if (kperf_kdebug_action != 0 && - kperf_kdebug_filter->n_debugids != 0) - { + kperf_kdebug_filter->n_debugids != 0) { kperf_kdebug_active = TRUE; } else { kperf_kdebug_active = FALSE; diff --git a/osfmk/kperf/kperf.c b/osfmk/kperf/kperf.c index 7d33d9f1b..bd5c582f0 100644 --- a/osfmk/kperf/kperf.c +++ b/osfmk/kperf/kperf.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include /* port_name_to_task */ @@ -97,7 +97,7 @@ kperf_init(void) /* create buffers to remember which threads don't need to be sampled by PET */ kperf_tid_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_tid_on_cpus), - VM_KERN_MEMORY_DIAG); + VM_KERN_MEMORY_DIAG); if (kperf_tid_on_cpus == NULL) { err = ENOMEM; goto error; @@ -107,7 +107,7 @@ kperf_init(void) /* create the interrupt buffers */ intr_samplec = ncpus; intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev), - VM_KERN_MEMORY_DIAG); + VM_KERN_MEMORY_DIAG); if (intr_samplev == NULL) { err = ENOMEM; goto error; @@ -224,10 +224,10 @@ out: } void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_fp); + uintptr_t *starting_fp); void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_fp) + uintptr_t *starting_fp) { if (kperf_kdebug_cswitch) { /* trace the new thread's PID for Instruments */ @@ -246,8 +246,8 @@ void kperf_on_cpu_update(void) { kperf_on_cpu_active = kperf_kdebug_cswitch || - kperf_lightweight_pet_active || - kperf_lazy_wait_action != 0; + kperf_lightweight_pet_active || + kperf_lazy_wait_action != 0; } /* random misc-ish functions */ @@ -318,7 +318,7 @@ kperf_sampling_disable(void) boolean_t kperf_thread_get_dirty(thread_t thread) { - return (thread->c_switch != thread->kperf_c_switch); + return thread->c_switch != thread->kperf_c_switch; } void diff --git a/osfmk/kperf/kperf.h b/osfmk/kperf/kperf.h index 673a02cd3..63434af8f 100644 --- a/osfmk/kperf/kperf.h +++ b/osfmk/kperf/kperf.h @@ -88,11 +88,11 @@ void kperf_on_cpu_update(void); /* for scheduler switching threads on */ static inline void kperf_on_cpu(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_fp) + uintptr_t *starting_fp) { extern boolean_t kperf_on_cpu_active; void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_fp); + uintptr_t *starting_fp); if (__improbable(kperf_on_cpu_active)) { kperf_on_cpu_internal(thread, continuation, starting_fp); @@ -129,7 +129,7 @@ kperf_interrupt(void) { extern unsigned int kperf_lazy_cpu_action; extern void kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, - bool interrupt); + bool interrupt); if (__improbable(kperf_lazy_cpu_action != 0)) { kperf_lazy_cpu_sample(current_thread(), 0, true); diff --git a/osfmk/kperf/kperf_kpc.c b/osfmk/kperf/kperf_kpc.c index 5090e8c8c..26b7b777e 100644 --- a/osfmk/kperf/kperf_kpc.c +++ b/osfmk/kperf/kperf_kpc.c @@ -55,10 +55,10 @@ kperf_kpc_thread_sample(struct kpcdata *kpcd, int sample_config) /* let kpc_get_curthread_counters set the correct count */ kpcd->counterc = KPC_MAX_COUNTERS; if (kpc_get_curthread_counters(&kpcd->counterc, - kpcd->counterv)) { + kpcd->counterv)) { /* if thread counters aren't ready, default to 0 */ memset(kpcd->counterv, 0, - sizeof(uint64_t) * kpcd->counterc); + sizeof(uint64_t) * kpcd->counterc); } /* help out Instruments by sampling KPC's config */ if (!sample_config) { @@ -78,8 +78,8 @@ kperf_kpc_cpu_sample(struct kpcdata *kpcd, int sample_config) kpcd->running = kpc_get_running(); kpcd->counterc = kpc_get_cpu_counters(0, kpcd->running, - &kpcd->curcpu, - kpcd->counterv); + &kpcd->curcpu, + kpcd->counterv); if (!sample_config) { kpcd->configc = 0; } else { @@ -94,10 +94,10 @@ static void kperf_kpc_config_log(const struct kpcdata *kpcd) { BUF_DATA(PERF_KPC_CONFIG, - kpcd->running, - kpcd->counterc, - kpc_get_counter_count(KPC_CLASS_FIXED_MASK), - kpcd->configc); + kpcd->running, + kpcd->counterc, + kpc_get_counter_count(KPC_CLASS_FIXED_MASK), + kpcd->configc); } static void @@ -110,38 +110,38 @@ kperf_kpc_log(uint32_t code, uint32_t code32, const struct kpcdata *kpcd) /* config registers */ for (i = 0; i < ((kpcd->configc + 3) / 4); i++) { BUF_DATA(PERF_KPC_CFG_REG, - kpcd->configv[0 + i * 4], - kpcd->configv[1 + i * 4], - kpcd->configv[2 + i * 4], - kpcd->configv[3 + i * 4]); + kpcd->configv[0 + i * 4], + kpcd->configv[1 + i * 4], + kpcd->configv[2 + i * 4], + kpcd->configv[3 + i * 4]); } /* and the actual counts with one 64-bit argument each */ for (i = 0; i < ((kpcd->counterc + 3) / 4); i++) { BUF_DATA(code, - kpcd->counterv[0 + i * 4], - kpcd->counterv[1 + i * 4], - kpcd->counterv[2 + i * 4], - kpcd->counterv[3 + i * 4]); + kpcd->counterv[0 + i * 4], + kpcd->counterv[1 + i * 4], + kpcd->counterv[2 + i * 4], + kpcd->counterv[3 + i * 4]); } #else (void)code; /* config registers */ for (i = 0; i < ((kpcd->configc + 1) / 2); i++) { BUF_DATA(PERF_KPC_CFG_REG32, - (kpcd->configv[0 + i * 2] >> 32ULL), - kpcd->configv[0 + i * 2] & 0xffffffffULL, - (kpcd->configv[1 + i * 2] >> 32ULL), - kpcd->configv[1 + i * 2] & 0xffffffffULL); + (kpcd->configv[0 + i * 2] >> 32ULL), + kpcd->configv[0 + i * 2] & 0xffffffffULL, + (kpcd->configv[1 + i * 2] >> 32ULL), + kpcd->configv[1 + i * 2] & 0xffffffffULL); } /* and the actual counts with two 32-bit trace arguments each */ for (i = 0; i < ((kpcd->counterc + 1) / 2); i++) { BUF_DATA(code32, - (kpcd->counterv[0 + i * 2] >> 32ULL), - kpcd->counterv[0 + i * 2] & 0xffffffffULL, - (kpcd->counterv[1 + i * 2] >> 32ULL), - kpcd->counterv[1 + i * 2] & 0xffffffffULL); + (kpcd->counterv[0 + i * 2] >> 32ULL), + kpcd->counterv[0 + i * 2] & 0xffffffffULL, + (kpcd->counterv[1 + i * 2] >> 32ULL), + kpcd->counterv[1 + i * 2] & 0xffffffffULL); } #endif } diff --git a/osfmk/kperf/kperf_kpc.h b/osfmk/kperf/kperf_kpc.h index 84ec687b9..9b5d58b71 100644 --- a/osfmk/kperf/kperf_kpc.h +++ b/osfmk/kperf/kperf_kpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,12 +36,11 @@ void kperf_kpc_thread_ast(thread_t thread); /* KPC sample data */ -struct kpcdata -{ +struct kpcdata { int curcpu; uint32_t running; uint32_t counterc; - uint64_t counterv[KPC_MAX_COUNTERS+1]; + uint64_t counterv[KPC_MAX_COUNTERS + 1]; uint32_t configc; uint64_t configv[KPC_MAX_COUNTERS]; }; diff --git a/osfmk/kperf/kperf_timer.c b/osfmk/kperf/kperf_timer.c index 49c16419c..7d1c478ba 100644 --- a/osfmk/kperf/kperf_timer.c +++ b/osfmk/kperf/kperf_timer.c @@ -56,6 +56,10 @@ unsigned int kperf_timerc = 0; static unsigned int pet_timer_id = 999; +#define KPERF_TMR_ACTION_MASK (0xff) +#define KPERF_TMR_ACTION(action_state) ((action_state) & KPERF_TMR_ACTION_MASK) +#define KPERF_TMR_ACTIVE (0x100) + /* maximum number of timers we can construct */ #define TIMER_MAX (16) @@ -103,7 +107,7 @@ kperf_timer_schedule(struct kperf_timer *timer, uint64_t now) static void kperf_sample_cpu(struct kperf_timer *timer, bool system_sample, - bool only_system) + bool only_system) { assert(timer != NULL); @@ -144,14 +148,14 @@ kperf_sample_cpu(struct kperf_timer *timer, bool system_sample, /* call the action -- kernel-only from interrupt, pend user */ int r = kperf_sample(intbuf, &ctx, timer->actionid, - SAMPLE_FLAG_PEND_USER | (system_sample ? SAMPLE_FLAG_SYSTEM : 0) | - (only_system ? SAMPLE_FLAG_ONLY_SYSTEM : 0)); + SAMPLE_FLAG_PEND_USER | (system_sample ? SAMPLE_FLAG_SYSTEM : 0) | + (only_system ? SAMPLE_FLAG_ONLY_SYSTEM : 0)); /* end tracepoint is informational */ BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r); (void)atomic_fetch_and_explicit(&timer->pending_cpus, - ~(UINT64_C(1) << ncpu), memory_order_relaxed); + ~(UINT64_C(1) << ncpu), memory_order_relaxed); } void @@ -168,11 +172,14 @@ kperf_timer_handler(void *param0, __unused void *param1) unsigned int ncpus = machine_info.logical_cpu_max; bool system_only_self = true; - if (timer->actionid == 0) { + uint32_t action_state = atomic_fetch_or(&timer->action_state, + KPERF_TMR_ACTIVE); + + uint32_t actionid = KPERF_TMR_ACTION(action_state); + if (actionid == 0) { return; } - timer->active = 1; #if DEVELOPMENT || DEBUG timer->fire_time = mach_absolute_time(); #endif /* DEVELOPMENT || DEBUG */ @@ -183,7 +190,7 @@ kperf_timer_handler(void *param0, __unused void *param1) } BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period, - timer->actionid); + actionid); if (ntimer == pet_timer_id) { kperf_pet_fire_before(); @@ -195,7 +202,7 @@ kperf_timer_handler(void *param0, __unused void *param1) /* * IPI other cores only if the action has non-system samplers. */ - if (kperf_action_has_non_system(timer->actionid)) { + if (kperf_action_has_non_system(actionid)) { /* * If the core that's handling the timer is not scheduling * threads, only run system samplers. @@ -210,16 +217,16 @@ kperf_timer_handler(void *param0, __unused void *param1) kperf_pet_fire_after(); } else { /* - * FIXME: Get the current time from elsewhere. The next - * timer's period now includes the time taken to reach this - * point. This causes a bias towards longer sampling periods - * than requested. - */ + * FIXME: Get the current time from elsewhere. The next + * timer's period now includes the time taken to reach this + * point. This causes a bias towards longer sampling periods + * than requested. + */ kperf_timer_schedule(timer, mach_absolute_time()); } deactivate: - timer->active = 0; + atomic_fetch_and(&timer->action_state, ~KPERF_TMR_ACTIVE); } /* program the timer from the PET thread */ @@ -290,30 +297,54 @@ kperf_timer_go(void) uint64_t now = mach_absolute_time(); for (unsigned int i = 0; i < kperf_timerc; i++) { - if (kperf_timerv[i].period == 0) { + struct kperf_timer *timer = &kperf_timerv[i]; + if (timer->period == 0) { continue; } - kperf_timer_schedule(&(kperf_timerv[i]), now); + atomic_store(&timer->action_state, + timer->actionid & KPERF_TMR_ACTION_MASK); + kperf_timer_schedule(timer, now); } } void kperf_timer_stop(void) { + /* + * Determine which timers are running and store them in a bitset, while + * cancelling their timer call. + */ + uint64_t running_timers = 0; for (unsigned int i = 0; i < kperf_timerc; i++) { - if (kperf_timerv[i].period == 0) { + struct kperf_timer *timer = &kperf_timerv[i]; + if (timer->period == 0) { continue; } - /* wait for the timer to stop */ - while (kperf_timerv[i].active); + uint32_t action_state = atomic_fetch_and(&timer->action_state, + ~KPERF_TMR_ACTION_MASK); + if (action_state & KPERF_TMR_ACTIVE) { + bit_set(running_timers, i); + } + + timer_call_cancel(&timer->tcall); + } - timer_call_cancel(&kperf_timerv[i].tcall); + /* + * Wait for any running timers to finish their critical sections. + */ + for (unsigned int i = lsb_first(running_timers); i < kperf_timerc; + i = lsb_next(running_timers, i)) { + while (atomic_load(&kperf_timerv[i].action_state) != 0) { + delay(10); + } } - /* wait for PET to stop, too */ - kperf_pet_config(0); + if (pet_timer_id < kperf_timerc) { + /* wait for PET to stop, too */ + kperf_pet_config(0); + } } unsigned int @@ -417,7 +448,7 @@ kperf_timer_reset(void) for (unsigned int i = 0; i < kperf_timerc; i++) { kperf_timerv[i].period = 0; kperf_timerv[i].actionid = 0; - kperf_timerv[i].pending_cpus = 0; + atomic_store_explicit(&kperf_timerv[i].pending_cpus, 0, memory_order_relaxed); } } @@ -432,7 +463,7 @@ kperf_timer_set_count(unsigned int count) nanoseconds_to_absolutetime(KP_MIN_PERIOD_BG_NS, &min_period_bg_abstime); nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_NS, &min_period_pet_abstime); nanoseconds_to_absolutetime(KP_MIN_PERIOD_PET_BG_NS, - &min_period_pet_bg_abstime); + &min_period_pet_bg_abstime); assert(min_period_abstime > 0); } @@ -469,7 +500,7 @@ kperf_timer_set_count(unsigned int count) /* create a new array */ new_timerv = kalloc_tag(count * sizeof(struct kperf_timer), - VM_KERN_MEMORY_DIAG); + VM_KERN_MEMORY_DIAG); if (new_timerv == NULL) { return ENOMEM; } @@ -478,12 +509,12 @@ kperf_timer_set_count(unsigned int count) if (old_timerv != NULL) { bcopy(kperf_timerv, new_timerv, - kperf_timerc * sizeof(struct kperf_timer)); + kperf_timerc * sizeof(struct kperf_timer)); } /* zero the new entries */ bzero(&(new_timerv[kperf_timerc]), - (count - old_count) * sizeof(struct kperf_timer)); + (count - old_count) * sizeof(struct kperf_timer)); /* (re-)setup the timer call info for all entries */ for (unsigned int i = 0; i < count; i++) { diff --git a/osfmk/kperf/kperf_timer.h b/osfmk/kperf/kperf_timer.h index 3d5b91cad..b59e1f8a3 100644 --- a/osfmk/kperf/kperf_timer.h +++ b/osfmk/kperf/kperf_timer.h @@ -4,7 +4,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -13,10 +13,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -24,7 +24,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,8 +34,8 @@ struct kperf_timer { struct timer_call tcall; uint64_t period; - unsigned actionid; - volatile unsigned active; + _Atomic uint32_t action_state; + uint32_t actionid; /* * A bitmap of CPUs that have a pending timer to service. On Intel, it @@ -44,7 +44,7 @@ struct kperf_timer { * the signal handler to multiplex simultaneous fires of different * timers. */ - bitmap_t pending_cpus; + _Atomic bitmap_t pending_cpus; #if DEVELOPMENT || DEBUG uint64_t fire_time; diff --git a/osfmk/kperf/kperfbsd.c b/osfmk/kperf/kperfbsd.c index 6fe1b5c29..c50ca6b5a 100644 --- a/osfmk/kperf/kperfbsd.c +++ b/osfmk/kperf/kperfbsd.c @@ -107,7 +107,7 @@ _Atomic long long kperf_pending_ipis = 0; static int kperf_sysctl_get_set_uint32(struct sysctl_req *req, - uint32_t (*get)(void), int (*set)(uint32_t)) + uint32_t (*get)(void), int (*set)(uint32_t)) { assert(req != NULL); assert(get != NULL); @@ -129,7 +129,7 @@ kperf_sysctl_get_set_uint32(struct sysctl_req *req, static int kperf_sysctl_get_set_int(struct sysctl_req *req, - int (*get)(void), int (*set)(int)) + int (*get)(void), int (*set)(int)) { assert(req != NULL); assert(get != NULL); @@ -151,7 +151,7 @@ kperf_sysctl_get_set_int(struct sysctl_req *req, static int kperf_sysctl_get_set_uint64(struct sysctl_req *req, - uint64_t (*get)(void), int (*set)(uint64_t)) + uint64_t (*get)(void), int (*set)(uint64_t)) { assert(req != NULL); assert(get != NULL); @@ -173,7 +173,7 @@ kperf_sysctl_get_set_uint64(struct sysctl_req *req, static int kperf_sysctl_get_set_unsigned_uint32(struct sysctl_req *req, - int (*get)(unsigned int, uint32_t *), int (*set)(unsigned int, uint32_t)) + int (*get)(unsigned int, uint32_t *), int (*set)(unsigned int, uint32_t)) { assert(req != NULL); assert(get != NULL); @@ -274,7 +274,7 @@ sysctl_action_filter(struct sysctl_req *req, bool is_task_t) return copyout(inputs, req->oldptr, sizeof(inputs)); } else { int pid = is_task_t ? kperf_port_to_pid((mach_port_name_t)new_filter) - : new_filter; + : new_filter; return kperf_action_set_filter(actionid, pid); } @@ -299,35 +299,35 @@ static int sysctl_action_samplers(struct sysctl_req *req) { return kperf_sysctl_get_set_unsigned_uint32(req, - kperf_action_get_samplers, kperf_action_set_samplers); + kperf_action_get_samplers, kperf_action_set_samplers); } static int sysctl_action_userdata(struct sysctl_req *req) { return kperf_sysctl_get_set_unsigned_uint32(req, - kperf_action_get_userdata, kperf_action_set_userdata); + kperf_action_get_userdata, kperf_action_set_userdata); } static int sysctl_action_ucallstack_depth(struct sysctl_req *req) { return kperf_sysctl_get_set_unsigned_uint32(req, - kperf_action_get_ucallstack_depth, kperf_action_set_ucallstack_depth); + kperf_action_get_ucallstack_depth, kperf_action_set_ucallstack_depth); } static int sysctl_action_kcallstack_depth(struct sysctl_req *req) { return kperf_sysctl_get_set_unsigned_uint32(req, - kperf_action_get_kcallstack_depth, kperf_action_set_kcallstack_depth); + kperf_action_get_kcallstack_depth, kperf_action_set_kcallstack_depth); } static int sysctl_kdebug_action(struct sysctl_req *req) { return kperf_sysctl_get_set_int(req, kperf_kdebug_get_action, - kperf_kdebug_set_action); + kperf_kdebug_set_action); } static int @@ -366,43 +366,43 @@ static int sysctl_sampling(struct sysctl_req *req) { return kperf_sysctl_get_set_uint32(req, kperf_sampling_status, - kperf_sampling_set); + kperf_sampling_set); } static int sysctl_action_count(struct sysctl_req *req) { return kperf_sysctl_get_set_uint32(req, kperf_action_get_count, - kperf_action_set_count); + kperf_action_set_count); } static int sysctl_timer_count(struct sysctl_req *req) { return kperf_sysctl_get_set_uint32(req, kperf_timer_get_count, - kperf_timer_set_count); + kperf_timer_set_count); } static int sysctl_timer_action(struct sysctl_req *req) { return kperf_sysctl_get_set_unsigned_uint32(req, kperf_timer_get_action, - kperf_timer_set_action); + kperf_timer_set_action); } static int sysctl_timer_pet(struct sysctl_req *req) { return kperf_sysctl_get_set_uint32(req, kperf_timer_get_petid, - kperf_timer_set_petid); + kperf_timer_set_petid); } static int sysctl_bless_preempt(struct sysctl_req *req) { return sysctl_io_number(req, ktrace_root_set_owner_allowed, - sizeof(ktrace_root_set_owner_allowed), - &ktrace_root_set_owner_allowed, NULL); + sizeof(ktrace_root_set_owner_allowed), + &ktrace_root_set_owner_allowed, NULL); } static int @@ -411,7 +411,7 @@ sysctl_kperf_reset(struct sysctl_req *req) int should_reset = 0; int error = sysctl_io_number(req, should_reset, sizeof(should_reset), - &should_reset, NULL); + &should_reset, NULL); if (error) { return error; } @@ -426,49 +426,49 @@ static int sysctl_pet_idle_rate(struct sysctl_req *req) { return kperf_sysctl_get_set_int(req, kperf_get_pet_idle_rate, - kperf_set_pet_idle_rate); + kperf_set_pet_idle_rate); } static int sysctl_lightweight_pet(struct sysctl_req *req) { return kperf_sysctl_get_set_int(req, kperf_get_lightweight_pet, - kperf_set_lightweight_pet); + kperf_set_lightweight_pet); } static int sysctl_kdbg_cswitch(struct sysctl_req *req) { return kperf_sysctl_get_set_int(req, kperf_kdbg_cswitch_get, - kperf_kdbg_cswitch_set); + kperf_kdbg_cswitch_set); } static int sysctl_lazy_wait_time_threshold(struct sysctl_req *req) { return kperf_sysctl_get_set_uint64(req, kperf_lazy_get_wait_time_threshold, - kperf_lazy_set_wait_time_threshold); + kperf_lazy_set_wait_time_threshold); } static int sysctl_lazy_wait_action(struct sysctl_req *req) { return kperf_sysctl_get_set_int(req, kperf_lazy_get_wait_action, - kperf_lazy_set_wait_action); + kperf_lazy_set_wait_action); } static int sysctl_lazy_cpu_time_threshold(struct sysctl_req *req) { return kperf_sysctl_get_set_uint64(req, kperf_lazy_get_cpu_time_threshold, - kperf_lazy_set_cpu_time_threshold); + kperf_lazy_set_cpu_time_threshold); } static int sysctl_lazy_cpu_action(struct sysctl_req *req) { return kperf_sysctl_get_set_int(req, kperf_lazy_get_cpu_action, - kperf_lazy_set_cpu_action); + kperf_lazy_set_cpu_action); } static int @@ -550,19 +550,19 @@ kperf_sysctl SYSCTL_HANDLER_ARGS break; case REQ_LIGHTWEIGHT_PET: ret = sysctl_lightweight_pet(req); - break; + break; case REQ_LAZY_WAIT_TIME_THRESHOLD: ret = sysctl_lazy_wait_time_threshold(req); - break; + break; case REQ_LAZY_WAIT_ACTION: ret = sysctl_lazy_wait_action(req); - break; + break; case REQ_LAZY_CPU_TIME_THRESHOLD: ret = sysctl_lazy_cpu_time_threshold(req); - break; + break; case REQ_LAZY_CPU_ACTION: ret = sysctl_lazy_cpu_action(req); - break; + break; default: ret = ENOENT; break; @@ -590,9 +590,8 @@ kperf_sysctl_bless_handler SYSCTL_HANDLER_ARGS * ownership to unset it). */ if (!((ktrace_root_set_owner_allowed || - ktrace_keep_ownership_on_reset) && - kauth_cred_issuser(kauth_cred_get()))) - { + ktrace_keep_ownership_on_reset) && + kauth_cred_issuser(kauth_cred_get()))) { if ((ret = ktrace_configure(KTRACE_KPERF))) { ktrace_unlock(); return ret; @@ -620,179 +619,179 @@ kperf_sysctl_bless_handler SYSCTL_HANDLER_ARGS /* root kperf node */ SYSCTL_NODE(, OID_AUTO, kperf, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "kperf"); + "kperf"); /* actions */ SYSCTL_NODE(_kperf, OID_AUTO, action, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "action"); + "action"); SYSCTL_PROC(_kperf_action, OID_AUTO, count, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | - CTLFLAG_MASKED, - (void *)REQ_ACTION_COUNT, - sizeof(int), kperf_sysctl, "I", "Number of actions"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | + CTLFLAG_MASKED, + (void *)REQ_ACTION_COUNT, + sizeof(int), kperf_sysctl, "I", "Number of actions"); SYSCTL_PROC(_kperf_action, OID_AUTO, samplers, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_ACTION_SAMPLERS, - 3 * sizeof(uint64_t), kperf_sysctl, "UQ", - "What to sample when a trigger fires an action"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_ACTION_SAMPLERS, + 3 * sizeof(uint64_t), kperf_sysctl, "UQ", + "What to sample when a trigger fires an action"); SYSCTL_PROC(_kperf_action, OID_AUTO, userdata, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_ACTION_USERDATA, - 3 * sizeof(uint64_t), kperf_sysctl, "UQ", - "User data to attribute to action"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_ACTION_USERDATA, + 3 * sizeof(uint64_t), kperf_sysctl, "UQ", + "User data to attribute to action"); SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_task, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_ACTION_FILTER_BY_TASK, - 3 * sizeof(uint64_t), kperf_sysctl, "UQ", - "Apply a task filter to the action"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_ACTION_FILTER_BY_TASK, + 3 * sizeof(uint64_t), kperf_sysctl, "UQ", + "Apply a task filter to the action"); SYSCTL_PROC(_kperf_action, OID_AUTO, filter_by_pid, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_ACTION_FILTER_BY_PID, - 3 * sizeof(uint64_t), kperf_sysctl, "UQ", - "Apply a pid filter to the action"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_ACTION_FILTER_BY_PID, + 3 * sizeof(uint64_t), kperf_sysctl, "UQ", + "Apply a pid filter to the action"); SYSCTL_PROC(_kperf_action, OID_AUTO, ucallstack_depth, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_ACTION_UCALLSTACK_DEPTH, - sizeof(int), kperf_sysctl, "I", - "Maximum number of frames to include in user callstacks"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_ACTION_UCALLSTACK_DEPTH, + sizeof(int), kperf_sysctl, "I", + "Maximum number of frames to include in user callstacks"); SYSCTL_PROC(_kperf_action, OID_AUTO, kcallstack_depth, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_ACTION_KCALLSTACK_DEPTH, - sizeof(int), kperf_sysctl, "I", - "Maximum number of frames to include in kernel callstacks"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_ACTION_KCALLSTACK_DEPTH, + sizeof(int), kperf_sysctl, "I", + "Maximum number of frames to include in kernel callstacks"); /* timers */ SYSCTL_NODE(_kperf, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "timer"); + "timer"); SYSCTL_PROC(_kperf_timer, OID_AUTO, count, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void *)REQ_TIMER_COUNT, - sizeof(int), kperf_sysctl, "I", "Number of time triggers"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void *)REQ_TIMER_COUNT, + sizeof(int), kperf_sysctl, "I", "Number of time triggers"); SYSCTL_PROC(_kperf_timer, OID_AUTO, period, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_TIMER_PERIOD, - 2 * sizeof(uint64_t), kperf_sysctl, "UQ", - "Timer number and period"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_TIMER_PERIOD, + 2 * sizeof(uint64_t), kperf_sysctl, "UQ", + "Timer number and period"); SYSCTL_PROC(_kperf_timer, OID_AUTO, action, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_TIMER_ACTION, - 2 * sizeof(uint64_t), kperf_sysctl, "UQ", - "Timer number and actionid"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_TIMER_ACTION, + 2 * sizeof(uint64_t), kperf_sysctl, "UQ", + "Timer number and actionid"); SYSCTL_PROC(_kperf_timer, OID_AUTO, pet_timer, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void *)REQ_TIMER_PET, - sizeof(int), kperf_sysctl, "I", "Which timer ID does PET"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void *)REQ_TIMER_PET, + sizeof(int), kperf_sysctl, "I", "Which timer ID does PET"); /* kdebug trigger */ SYSCTL_NODE(_kperf, OID_AUTO, kdebug, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "kdebug"); + "kdebug"); SYSCTL_PROC(_kperf_kdebug, OID_AUTO, action, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void*)REQ_KDEBUG_ACTION, - sizeof(int), kperf_sysctl, "I", "ID of action to trigger on kdebug events"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void*)REQ_KDEBUG_ACTION, + sizeof(int), kperf_sysctl, "I", "ID of action to trigger on kdebug events"); SYSCTL_PROC(_kperf_kdebug, OID_AUTO, filter, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void*)REQ_KDEBUG_FILTER, - sizeof(int), kperf_sysctl, "P", "The filter that determines which kdebug events trigger a sample"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void*)REQ_KDEBUG_FILTER, + sizeof(int), kperf_sysctl, "P", "The filter that determines which kdebug events trigger a sample"); /* lazy sampling */ SYSCTL_NODE(_kperf, OID_AUTO, lazy, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "lazy"); + "lazy"); SYSCTL_PROC(_kperf_lazy, OID_AUTO, wait_time_threshold, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_LAZY_WAIT_TIME_THRESHOLD, - sizeof(uint64_t), kperf_sysctl, "UQ", - "How many ticks a thread must wait to take a sample"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_LAZY_WAIT_TIME_THRESHOLD, + sizeof(uint64_t), kperf_sysctl, "UQ", + "How many ticks a thread must wait to take a sample"); SYSCTL_PROC(_kperf_lazy, OID_AUTO, wait_action, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_LAZY_WAIT_ACTION, - sizeof(uint64_t), kperf_sysctl, "UQ", - "Which action to fire when a thread waits longer than threshold"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_LAZY_WAIT_ACTION, + sizeof(uint64_t), kperf_sysctl, "UQ", + "Which action to fire when a thread waits longer than threshold"); SYSCTL_PROC(_kperf_lazy, OID_AUTO, cpu_time_threshold, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_LAZY_CPU_TIME_THRESHOLD, - sizeof(uint64_t), kperf_sysctl, "UQ", - "Minimum number of ticks a CPU must run between samples"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_LAZY_CPU_TIME_THRESHOLD, + sizeof(uint64_t), kperf_sysctl, "UQ", + "Minimum number of ticks a CPU must run between samples"); SYSCTL_PROC(_kperf_lazy, OID_AUTO, cpu_action, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_LAZY_CPU_ACTION, - sizeof(uint64_t), kperf_sysctl, "UQ", - "Which action to fire for lazy CPU samples"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_LAZY_CPU_ACTION, + sizeof(uint64_t), kperf_sysctl, "UQ", + "Which action to fire for lazy CPU samples"); /* misc */ SYSCTL_PROC(_kperf, OID_AUTO, sampling, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void *)REQ_SAMPLING, - sizeof(int), kperf_sysctl, "I", "Sampling running"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void *)REQ_SAMPLING, + sizeof(int), kperf_sysctl, "I", "Sampling running"); SYSCTL_PROC(_kperf, OID_AUTO, reset, - CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, - (void *)REQ_RESET, - 0, kperf_sysctl, "-", "Reset kperf"); + CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_LOCKED, + (void *)REQ_RESET, + 0, kperf_sysctl, "-", "Reset kperf"); SYSCTL_PROC(_kperf, OID_AUTO, blessed_pid, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED /* must be root */ - | CTLFLAG_MASKED, - (void *)REQ_BLESS, - sizeof(int), kperf_sysctl_bless_handler, "I", "Blessed pid"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED /* must be root */ + | CTLFLAG_MASKED, + (void *)REQ_BLESS, + sizeof(int), kperf_sysctl_bless_handler, "I", "Blessed pid"); SYSCTL_PROC(_kperf, OID_AUTO, blessed_preempt, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | - CTLFLAG_MASKED, - (void *)REQ_BLESS_PREEMPT, - sizeof(int), kperf_sysctl, "I", "Blessed preemption"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED | + CTLFLAG_MASKED, + (void *)REQ_BLESS_PREEMPT, + sizeof(int), kperf_sysctl, "I", "Blessed preemption"); SYSCTL_PROC(_kperf, OID_AUTO, kdbg_cswitch, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void *)REQ_KDBG_CSWITCH, - sizeof(int), kperf_sysctl, "I", "Generate context switch info"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void *)REQ_KDBG_CSWITCH, + sizeof(int), kperf_sysctl, "I", "Generate context switch info"); SYSCTL_PROC(_kperf, OID_AUTO, pet_idle_rate, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void *)REQ_PET_IDLE_RATE, - sizeof(int), kperf_sysctl, "I", - "Rate at which unscheduled threads are forced to be sampled in " - "PET mode"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void *)REQ_PET_IDLE_RATE, + sizeof(int), kperf_sysctl, "I", + "Rate at which unscheduled threads are forced to be sampled in " + "PET mode"); SYSCTL_PROC(_kperf, OID_AUTO, lightweight_pet, - CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED - | CTLFLAG_MASKED, - (void *)REQ_LIGHTWEIGHT_PET, - sizeof(int), kperf_sysctl, "I", - "Status of lightweight PET mode"); + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED + | CTLFLAG_MASKED, + (void *)REQ_LIGHTWEIGHT_PET, + sizeof(int), kperf_sysctl, "I", + "Status of lightweight PET mode"); /* limits */ SYSCTL_NODE(_kperf, OID_AUTO, limits, CTLFLAG_RW | CTLFLAG_LOCKED, 0, - "limits"); + "limits"); enum kperf_limit_request { REQ_LIM_PERIOD_NS, @@ -833,28 +832,28 @@ kperf_sysctl_limits SYSCTL_HANDLER_ARGS } SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_period_ns, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - (void *)REQ_LIM_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, - "Q", "Minimum timer period in nanoseconds"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + (void *)REQ_LIM_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, + "Q", "Minimum timer period in nanoseconds"); SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_bg_period_ns, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - (void *)REQ_LIM_BG_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, - "Q", "Minimum background timer period in nanoseconds"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + (void *)REQ_LIM_BG_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, + "Q", "Minimum background timer period in nanoseconds"); SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_pet_period_ns, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - (void *)REQ_LIM_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, - "Q", "Minimum PET timer period in nanoseconds"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + (void *)REQ_LIM_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, + "Q", "Minimum PET timer period in nanoseconds"); SYSCTL_PROC(_kperf_limits, OID_AUTO, timer_min_bg_pet_period_ns, - CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, - (void *)REQ_LIM_BG_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, - "Q", "Minimum background PET timer period in nanoseconds"); + CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_ANYBODY | CTLFLAG_LOCKED, + (void *)REQ_LIM_BG_PET_PERIOD_NS, sizeof(uint64_t), kperf_sysctl_limits, + "Q", "Minimum background PET timer period in nanoseconds"); /* debug */ SYSCTL_INT(_kperf, OID_AUTO, debug_level, CTLFLAG_RW | CTLFLAG_LOCKED, - &kperf_debug_level, 0, "debug level"); + &kperf_debug_level, 0, "debug level"); #if DEVELOPMENT || DEBUG SYSCTL_QUAD(_kperf, OID_AUTO, already_pending_ipis, - CTLFLAG_RD | CTLFLAG_LOCKED, - &kperf_pending_ipis, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, + &kperf_pending_ipis, ""); #endif /* DEVELOPMENT || DEBUG */ diff --git a/osfmk/kperf/kperfbsd.h b/osfmk/kperf/kperfbsd.h index 2e71d403c..cba74c077 100644 --- a/osfmk/kperf/kperfbsd.h +++ b/osfmk/kperf/kperfbsd.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/kperf/lazy.c b/osfmk/kperf/lazy.c index 78e01b206..339a16644 100644 --- a/osfmk/kperf/lazy.c +++ b/osfmk/kperf/lazy.c @@ -57,7 +57,7 @@ kperf_lazy_off_cpu(thread_t thread) /* try to lazily sample the CPU if the thread was pre-empted */ if ((thread->reason & AST_SCHEDULING) != 0) { kperf_lazy_cpu_sample(thread, 0, 0); - } + } } void @@ -72,13 +72,13 @@ kperf_lazy_make_runnable(thread_t thread, bool in_interrupt) uint64_t wait_time = thread_get_last_wait_duration(thread); if (wait_time > kperf_lazy_wait_time_threshold) { BUF_DATA(PERF_LZ_MKRUNNABLE, (uintptr_t)thread_tid(thread), - thread->sched_pri, in_interrupt ? 1 : 0); + thread->sched_pri, in_interrupt ? 1 : 0); } } void kperf_lazy_wait_sample(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_fp) + uintptr_t *starting_fp) { /* ignore idle threads */ if (thread->last_made_runnable_time == THREAD_NOT_RUNNABLE) { @@ -98,7 +98,7 @@ kperf_lazy_wait_sample(thread_t thread, thread_continue_t continuation, uint64_t runnable_time = timer_grab(&thread->runnable_timer); uint64_t running_time = timer_grab(&thread->user_timer) + - timer_grab(&thread->system_timer); + timer_grab(&thread->system_timer); BUF_DATA(PERF_LZ_WAITSAMPLE, wait_time, runnable_time, running_time); @@ -140,10 +140,10 @@ kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, bool interrupt) uint64_t runnable_time = timer_grab(&thread->runnable_timer); uint64_t running_time = timer_grab(&thread->user_timer) + - timer_grab(&thread->system_timer); + timer_grab(&thread->system_timer); BUF_DATA(PERF_LZ_CPUSAMPLE, running_time, runnable_time, - thread->sched_pri, interrupt ? 1 : 0); + thread->sched_pri, interrupt ? 1 : 0); task_t task = get_threadtask(thread); struct kperf_context ctx = { @@ -160,7 +160,7 @@ kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, bool interrupt) } kperf_sample(sample, &ctx, kperf_lazy_cpu_action, - SAMPLE_FLAG_PEND_USER | flags); + SAMPLE_FLAG_PEND_USER | flags); } } @@ -168,7 +168,11 @@ kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, bool interrupt) * Accessors for configuration. */ -int kperf_lazy_get_wait_action(void) { return kperf_lazy_wait_action; } +int +kperf_lazy_get_wait_action(void) +{ + return kperf_lazy_wait_action; +} int kperf_lazy_set_wait_action(int action_id) @@ -195,7 +199,11 @@ kperf_lazy_set_wait_time_threshold(uint64_t threshold) return 0; } -int kperf_lazy_get_cpu_action(void) { return kperf_lazy_cpu_action; } +int +kperf_lazy_get_cpu_action(void) +{ + return kperf_lazy_cpu_action; +} int kperf_lazy_set_cpu_action(int action_id) @@ -220,4 +228,3 @@ kperf_lazy_set_cpu_time_threshold(uint64_t threshold) kperf_lazy_cpu_time_threshold = threshold; return 0; } - diff --git a/osfmk/kperf/lazy.h b/osfmk/kperf/lazy.h index c09fabe7c..17054b91e 100644 --- a/osfmk/kperf/lazy.h +++ b/osfmk/kperf/lazy.h @@ -40,7 +40,7 @@ void kperf_lazy_reset(void); void kperf_lazy_off_cpu(thread_t thread); void kperf_lazy_make_runnable(thread_t thread, bool in_interrupt); void kperf_lazy_wait_sample(thread_t thread, - thread_continue_t continuation, uintptr_t *starting_fp); + thread_continue_t continuation, uintptr_t *starting_fp); void kperf_lazy_cpu_sample(thread_t thread, unsigned int flags, bool interrupt); /* accessors for configuration */ diff --git a/osfmk/kperf/meminfo.c b/osfmk/kperf/meminfo.c index 03616d085..4a9209c17 100644 --- a/osfmk/kperf/meminfo.c +++ b/osfmk/kperf/meminfo.c @@ -50,7 +50,7 @@ kperf_meminfo_sample(task_t task, struct meminfo *mi) mi->phys_footprint = get_task_phys_footprint(task); kr = ledger_get_entries(task->ledger, task_ledgers.purgeable_volatile, - &credit, &debit); + &credit, &debit); if (kr == KERN_SUCCESS) { mi->purgeable_volatile = credit - debit; } else { @@ -58,8 +58,8 @@ kperf_meminfo_sample(task_t task, struct meminfo *mi) } kr = ledger_get_entries(task->ledger, - task_ledgers.purgeable_volatile_compressed, - &credit, &debit); + task_ledgers.purgeable_volatile_compressed, + &credit, &debit); if (kr == KERN_SUCCESS) { mi->purgeable_volatile_compressed = credit - debit; } else { @@ -74,5 +74,5 @@ void kperf_meminfo_log(struct meminfo *mi) { BUF_DATA(PERF_MI_DATA, mi->phys_footprint, mi->purgeable_volatile, - mi->purgeable_volatile_compressed); + mi->purgeable_volatile_compressed); } diff --git a/osfmk/kperf/meminfo.h b/osfmk/kperf/meminfo.h index a51c1794f..5917b7a26 100644 --- a/osfmk/kperf/meminfo.h +++ b/osfmk/kperf/meminfo.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/kperf/pet.c b/osfmk/kperf/pet.c index 0bfb626ce..0db17185b 100644 --- a/osfmk/kperf/pet.c +++ b/osfmk/kperf/pet.c @@ -121,7 +121,7 @@ static kern_return_t pet_threads_prepare(task_t task); static void pet_sample_all_tasks(uint32_t idle_rate); static void pet_sample_task(task_t task, uint32_t idle_rate); static void pet_sample_thread(int pid, task_t task, thread_t thread, - uint32_t idle_rate); + uint32_t idle_rate); /* functions called by other areas of kperf */ @@ -154,11 +154,16 @@ kperf_pet_fire_after(void) void kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_fp) + uintptr_t *starting_fp) { assert(thread != NULL); assert(ml_get_interrupts_enabled() == FALSE); + uint32_t actionid = pet_action_id; + if (actionid == 0) { + return; + } + if (thread->kperf_pet_gen != kperf_pet_gen) { BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_START, kperf_pet_gen, thread->kperf_pet_gen); @@ -183,7 +188,7 @@ kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, if (continuation != NULL) { flags |= SAMPLE_FLAG_CONTINUATION; } - kperf_sample(sample, &ctx, pet_action_id, flags); + kperf_sample(sample, &ctx, actionid, flags); BUF_VERB(PERF_PET_SAMPLE_THREAD | DBG_FUNC_END); } else { @@ -194,6 +199,10 @@ kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, void kperf_pet_config(unsigned int action_id) { + if (action_id == 0 && !pet_initted) { + return; + } + kern_return_t kr = pet_init(); if (kr != KERN_SUCCESS) { return; @@ -281,7 +290,7 @@ pet_init(void) /* make the sync point */ pet_lock = lck_mtx_alloc_init(&kperf_lck_grp, NULL); - assert(pet_lock); + assert(pet_lock != NULL); /* create the thread */ @@ -315,8 +324,10 @@ pet_thread_idle(void) { lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); - (void)lck_mtx_sleep(pet_lock, LCK_SLEEP_DEFAULT, &pet_action_id, - THREAD_UNINT); + do { + (void)lck_mtx_sleep(pet_lock, LCK_SLEEP_DEFAULT, &pet_action_id, + THREAD_UNINT); + } while (pet_action_id == 0); } __attribute__((noreturn)) @@ -582,6 +593,7 @@ static void pet_sample_all_tasks(uint32_t idle_rate) { lck_mtx_assert(pet_lock, LCK_MTX_ASSERT_OWNED); + assert(pet_action_id > 0); BUF_INFO(PERF_PET_SAMPLE | DBG_FUNC_START); @@ -598,7 +610,7 @@ pet_sample_all_tasks(uint32_t idle_rate) pet_sample_task(task, idle_rate); } - for(unsigned int i = 0; i < pet_tasks_count; i++) { + for (unsigned int i = 0; i < pet_tasks_count; i++) { task_deallocate(pet_tasks[i]); } diff --git a/osfmk/kperf/pet.h b/osfmk/kperf/pet.h index f8c6c1720..8f9d54279 100644 --- a/osfmk/kperf/pet.h +++ b/osfmk/kperf/pet.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -42,7 +42,7 @@ void kperf_pet_fire_after(void); /* notify PET of new threads switching on */ void kperf_pet_on_cpu(thread_t thread, thread_continue_t continuation, - uintptr_t *starting_frame); + uintptr_t *starting_frame); /* get/set rate at which idle threads are sampled by PET */ int kperf_get_pet_idle_rate(void); diff --git a/osfmk/kperf/task_samplers.c b/osfmk/kperf/task_samplers.c index 3d521b782..5c2308f76 100644 --- a/osfmk/kperf/task_samplers.c +++ b/osfmk/kperf/task_samplers.c @@ -34,7 +34,7 @@ #include extern void memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, - boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit); + boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit); void kperf_task_snapshot_sample(task_t task, struct kperf_task_snapshot *tksn) @@ -82,19 +82,19 @@ kperf_task_snapshot_log(struct kperf_task_snapshot *tksn) #if defined(__LP64__) BUF_DATA(PERF_TK_SNAP_DATA, tksn->kptksn_flags, - ENCODE_UPPER_64(tksn->kptksn_suspend_count) | - ENCODE_LOWER_64(tksn->kptksn_pageins), - tksn->kptksn_user_time_in_terminated_threads, - tksn->kptksn_system_time_in_terminated_threads); + ENCODE_UPPER_64(tksn->kptksn_suspend_count) | + ENCODE_LOWER_64(tksn->kptksn_pageins), + tksn->kptksn_user_time_in_terminated_threads, + tksn->kptksn_system_time_in_terminated_threads); #else BUF_DATA(PERF_TK_SNAP_DATA1_32, UPPER_32(tksn->kptksn_flags), - LOWER_32(tksn->kptksn_flags), - tksn->kptksn_suspend_count, - tksn->kptksn_pageins); + LOWER_32(tksn->kptksn_flags), + tksn->kptksn_suspend_count, + tksn->kptksn_pageins); BUF_DATA(PERF_TK_SNAP_DATA2_32, UPPER_32(tksn->kptksn_user_time_in_terminated_threads), - LOWER_32(tksn->kptksn_user_time_in_terminated_threads), - UPPER_32(tksn->kptksn_system_time_in_terminated_threads), - LOWER_32(tksn->kptksn_system_time_in_terminated_threads)); + LOWER_32(tksn->kptksn_user_time_in_terminated_threads), + UPPER_32(tksn->kptksn_system_time_in_terminated_threads), + LOWER_32(tksn->kptksn_system_time_in_terminated_threads)); #endif /* defined(__LP64__) */ } diff --git a/osfmk/kperf/thread_samplers.c b/osfmk/kperf/thread_samplers.c index 36e2196fe..91ebb5026 100644 --- a/osfmk/kperf/thread_samplers.c +++ b/osfmk/kperf/thread_samplers.c @@ -94,12 +94,12 @@ kperf_thread_info_runmode_legacy(thread_t thread) #if !CONFIG_EMBEDDED /* on desktop, if state is blank, leave not idle set */ if (kperf_state == 0) { - return (TH_IDLE << 16); + return TH_IDLE << 16; } #endif /* !CONFIG_EMBEDDED */ /* high two bytes are inverted mask, low two bytes are normal */ - return (((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff)); + return ((~kperf_state & 0xffff) << 16) | (kperf_state & 0xffff); } void @@ -121,7 +121,7 @@ void kperf_thread_info_log(struct kperf_thread_info *ti) { BUF_DATA(PERF_TI_DATA, ti->kpthi_pid, ti->kpthi_tid /* K64-only */, - ti->kpthi_dq_addr, ti->kpthi_runmode); + ti->kpthi_dq_addr, ti->kpthi_runmode); } /* @@ -131,7 +131,7 @@ kperf_thread_info_log(struct kperf_thread_info *ti) void kperf_thread_scheduling_sample(struct kperf_thread_scheduling *thsc, - struct kperf_context *context) + struct kperf_context *context) { assert(thsc != NULL); assert(context != NULL); @@ -157,7 +157,7 @@ kperf_thread_scheduling_sample(struct kperf_thread_scheduling *thsc, thsc->kpthsc_effective_qos = thread->effective_policy.thep_qos; thsc->kpthsc_requested_qos = thread->requested_policy.thrp_qos; thsc->kpthsc_requested_qos_override = MAX(thread->requested_policy.thrp_qos_override, - thread->requested_policy.thrp_qos_workq_override); + thread->requested_policy.thrp_qos_workq_override); thsc->kpthsc_requested_qos_promote = thread->requested_policy.thrp_qos_promote; thsc->kpthsc_requested_qos_ipc_override = thread->requested_policy.thrp_qos_ipc_override; thsc->kpthsc_requested_qos_sync_ipc_override = thread->requested_policy.thrp_qos_sync_ipc_override; @@ -173,38 +173,38 @@ kperf_thread_scheduling_log(struct kperf_thread_scheduling *thsc) assert(thsc != NULL); #if defined(__LP64__) BUF_DATA(PERF_TI_SCHEDDATA_2, thsc->kpthsc_user_time, - thsc->kpthsc_system_time, - (((uint64_t)thsc->kpthsc_base_priority) << 48) - | ((uint64_t)thsc->kpthsc_sched_priority << 32) - | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24) - | (thsc->kpthsc_effective_qos << 6) - | (thsc->kpthsc_requested_qos << 3) - | thsc->kpthsc_requested_qos_override, - ((uint64_t)thsc->kpthsc_effective_latency_qos << 61) - | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58) - | ((uint64_t)thsc->kpthsc_requested_qos_ipc_override << 55) - | ((uint64_t)thsc->kpthsc_requested_qos_sync_ipc_override << 52) - ); + thsc->kpthsc_system_time, + (((uint64_t)thsc->kpthsc_base_priority) << 48) + | ((uint64_t)thsc->kpthsc_sched_priority << 32) + | ((uint64_t)(thsc->kpthsc_state & 0xff) << 24) + | (thsc->kpthsc_effective_qos << 6) + | (thsc->kpthsc_requested_qos << 3) + | thsc->kpthsc_requested_qos_override, + ((uint64_t)thsc->kpthsc_effective_latency_qos << 61) + | ((uint64_t)thsc->kpthsc_requested_qos_promote << 58) + | ((uint64_t)thsc->kpthsc_requested_qos_ipc_override << 55) + | ((uint64_t)thsc->kpthsc_requested_qos_sync_ipc_override << 52) + ); BUF_DATA(PERF_TI_SCHEDDATA_3, thsc->kpthsc_runnable_time); #else BUF_DATA(PERF_TI_SCHEDDATA1_32, UPPER_32(thsc->kpthsc_user_time), - LOWER_32(thsc->kpthsc_user_time), - UPPER_32(thsc->kpthsc_system_time), - LOWER_32(thsc->kpthsc_system_time) - ); + LOWER_32(thsc->kpthsc_user_time), + UPPER_32(thsc->kpthsc_system_time), + LOWER_32(thsc->kpthsc_system_time) + ); BUF_DATA(PERF_TI_SCHEDDATA2_32_2, (((uint32_t)thsc->kpthsc_base_priority) << 16) - | thsc->kpthsc_sched_priority, - ((thsc->kpthsc_state & 0xff) << 24) - | (thsc->kpthsc_effective_qos << 6) - | (thsc->kpthsc_requested_qos << 3) - | thsc->kpthsc_requested_qos_override, - ((uint32_t)thsc->kpthsc_effective_latency_qos << 29) - | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26) - | ((uint32_t)thsc->kpthsc_requested_qos_ipc_override << 23) - | ((uint32_t)thsc->kpthsc_requested_qos_sync_ipc_override << 20) - ); + | thsc->kpthsc_sched_priority, + ((thsc->kpthsc_state & 0xff) << 24) + | (thsc->kpthsc_effective_qos << 6) + | (thsc->kpthsc_requested_qos << 3) + | thsc->kpthsc_requested_qos_override, + ((uint32_t)thsc->kpthsc_effective_latency_qos << 29) + | ((uint32_t)thsc->kpthsc_requested_qos_promote << 26) + | ((uint32_t)thsc->kpthsc_requested_qos_ipc_override << 23) + | ((uint32_t)thsc->kpthsc_requested_qos_sync_ipc_override << 20) + ); BUF_DATA(PERF_TI_SCHEDDATA3_32, UPPER_32(thsc->kpthsc_runnable_time), - LOWER_32(thsc->kpthsc_runnable_time)); + LOWER_32(thsc->kpthsc_runnable_time)); #endif /* defined(__LP64__) */ } @@ -221,7 +221,7 @@ kperf_thread_scheduling_log(struct kperf_thread_scheduling *thsc) void kperf_thread_snapshot_sample(struct kperf_thread_snapshot *thsn, - struct kperf_context *context) + struct kperf_context *context) { assert(thsn != NULL); assert(context != NULL); @@ -258,13 +258,13 @@ kperf_thread_snapshot_log(struct kperf_thread_snapshot *thsn) assert(thsn != NULL); #if defined(__LP64__) BUF_DATA(PERF_TI_SNAPDATA, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8) - | (thsn->kpthsn_io_tier << 24), - thsn->kpthsn_last_made_runnable_time); + | (thsn->kpthsn_io_tier << 24), + thsn->kpthsn_last_made_runnable_time); #else BUF_DATA(PERF_TI_SNAPDATA_32, thsn->kpthsn_flags | ((uint32_t)(thsn->kpthsn_suspend_count) << 8) - | (thsn->kpthsn_io_tier << 24), - UPPER_32(thsn->kpthsn_last_made_runnable_time), - LOWER_32(thsn->kpthsn_last_made_runnable_time)); + | (thsn->kpthsn_io_tier << 24), + UPPER_32(thsn->kpthsn_last_made_runnable_time), + LOWER_32(thsn->kpthsn_last_made_runnable_time)); #endif /* defined(__LP64__) */ } @@ -277,7 +277,7 @@ kperf_thread_snapshot_log(struct kperf_thread_snapshot *thsn) void kperf_thread_dispatch_sample(struct kperf_thread_dispatch *thdi, - struct kperf_context *context) + struct kperf_context *context) { assert(thdi != NULL); assert(context != NULL); @@ -298,20 +298,18 @@ kperf_thread_dispatch_sample(struct kperf_thread_dispatch *thdi, uint64_t user_dq_addr; if ((copyin((user_addr_t)user_dq_key_addr, - (char *)&user_dq_addr, - user_addr_size) != 0) || - (user_dq_addr == 0)) - { + (char *)&user_dq_addr, + user_addr_size) != 0) || + (user_dq_addr == 0)) { goto error; } uint64_t user_dq_serialno_addr = - user_dq_addr + get_task_dispatchqueue_serialno_offset(task); + user_dq_addr + get_task_dispatchqueue_serialno_offset(task); if (copyin((user_addr_t)user_dq_serialno_addr, - (char *)&(thdi->kpthdi_dq_serialno), - user_addr_size) == 0) - { + (char *)&(thdi->kpthdi_dq_serialno), + user_addr_size) == 0) { goto out; } @@ -336,7 +334,7 @@ kperf_thread_dispatch_log(struct kperf_thread_dispatch *thdi) BUF_DATA(PERF_TI_DISPDATA, thdi->kpthdi_dq_serialno); #else BUF_DATA(PERF_TI_DISPDATA_32, UPPER_32(thdi->kpthdi_dq_serialno), - LOWER_32(thdi->kpthdi_dq_serialno)); + LOWER_32(thdi->kpthdi_dq_serialno)); #endif /* defined(__LP64__) */ } @@ -367,11 +365,10 @@ kperf_thread_inscyc_log(struct kperf_context *context) #else /* defined(__LP64__) */ /* 32-bit platforms don't count instructions */ BUF_DATA(PERF_TI_INSCYCDATA_32, 0, 0, UPPER_32(counts[MT_CORE_CYCLES]), - LOWER_32(counts[MT_CORE_CYCLES])); + LOWER_32(counts[MT_CORE_CYCLES])); #endif /* !defined(__LP64__) */ #else #pragma unused(context) #endif /* MONOTONIC */ - } diff --git a/osfmk/kperf/thread_samplers.h b/osfmk/kperf/thread_samplers.h index f443be7dd..9696dfec5 100644 --- a/osfmk/kperf/thread_samplers.h +++ b/osfmk/kperf/thread_samplers.h @@ -40,7 +40,7 @@ struct kperf_thread_info { }; void kperf_thread_info_sample(struct kperf_thread_info *, - struct kperf_context *); + struct kperf_context *); void kperf_thread_info_log(struct kperf_thread_info *); /* scheduling information */ @@ -52,16 +52,16 @@ struct kperf_thread_scheduling { uint16_t kpthsc_base_priority; uint16_t kpthsc_sched_priority; unsigned int kpthsc_effective_qos :3, - kpthsc_requested_qos :3, - kpthsc_requested_qos_override :3, - kpthsc_requested_qos_promote :3, - kpthsc_requested_qos_ipc_override :3, - kpthsc_requested_qos_sync_ipc_override :3, - kpthsc_effective_latency_qos :3; + kpthsc_requested_qos :3, + kpthsc_requested_qos_override :3, + kpthsc_requested_qos_promote :3, + kpthsc_requested_qos_ipc_override :3, + kpthsc_requested_qos_sync_ipc_override :3, + kpthsc_effective_latency_qos :3; }; void kperf_thread_scheduling_sample(struct kperf_thread_scheduling *, - struct kperf_context *); + struct kperf_context *); void kperf_thread_scheduling_log(struct kperf_thread_scheduling *); /* thread snapshot information */ @@ -73,7 +73,7 @@ struct kperf_thread_snapshot { }; void kperf_thread_snapshot_sample(struct kperf_thread_snapshot *, - struct kperf_context *); + struct kperf_context *); void kperf_thread_snapshot_log(struct kperf_thread_snapshot *); /* libdispatch information */ @@ -82,7 +82,7 @@ struct kperf_thread_dispatch { }; void kperf_thread_dispatch_sample(struct kperf_thread_dispatch *, - struct kperf_context *); + struct kperf_context *); int kperf_thread_dispatch_pend(struct kperf_context *); void kperf_thread_dispatch_log(struct kperf_thread_dispatch *); diff --git a/osfmk/kperf/x86_64/kperf_mp.c b/osfmk/kperf/x86_64/kperf_mp.c index 25c510ed5..8cbb676e9 100644 --- a/osfmk/kperf/x86_64/kperf_mp.c +++ b/osfmk/kperf/x86_64/kperf_mp.c @@ -49,13 +49,12 @@ kperf_mp_broadcast_other_running(struct kperf_timer *trigger) /* do not IPI processors that are not scheduling threads */ if (processor == PROCESSOR_NULL || - processor->state != PROCESSOR_RUNNING || - processor->active_thread == THREAD_NULL) - { + processor->state != PROCESSOR_RUNNING || + processor->active_thread == THREAD_NULL) { #if DEVELOPMENT || DEBUG BUF_VERB(PERF_TM_SKIPPED, i, - processor != PROCESSOR_NULL ? processor->state : 0, - processor != PROCESSOR_NULL ? processor->active_thread : 0); + processor != PROCESSOR_NULL ? processor->state : 0, + processor != PROCESSOR_NULL ? processor->active_thread : 0); #endif /* DEVELOPMENT || DEBUG */ continue; } @@ -68,13 +67,13 @@ kperf_mp_broadcast_other_running(struct kperf_timer *trigger) /* nor processors that have not responded to the last IPI */ uint64_t already_pending = atomic_fetch_or_explicit( - &trigger->pending_cpus, i_bit, - memory_order_relaxed); + &trigger->pending_cpus, i_bit, + memory_order_relaxed); if (already_pending & i_bit) { #if DEVELOPMENT || DEBUG BUF_VERB(PERF_TM_PENDING, i_bit, already_pending); atomic_fetch_add_explicit(&kperf_pending_ipis, 1, - memory_order_relaxed); + memory_order_relaxed); #endif /* DEVELOPMENT || DEBUG */ continue; } diff --git a/osfmk/libsa/arm/types.h b/osfmk/libsa/arm/types.h index 9a6836ca3..532b4ceea 100644 --- a/osfmk/libsa/arm/types.h +++ b/osfmk/libsa/arm/types.h @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.2 1998/09/30 21:21:00 wsanchez * Merged in IntelMerge1 (mburg: Intel support) * @@ -41,35 +41,35 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.2.1 1996/09/17 16:56:28 bruel - * created from standalone mach servers - * [1996/09/17 16:18:08 bruel] + * created from standalone mach servers + * [1996/09/17 16:18:08 bruel] * * Revision 1.1.7.1 1996/04/11 13:46:28 barbou - * Self-Contained Mach Distribution: - * created. - * [95/12/28 barbou] - * [96/03/28 barbou] - * + * Self-Contained Mach Distribution: + * created. + * [95/12/28 barbou] + * [96/03/28 barbou] + * * $EndLog$ */ -#ifndef _MACH_MACHINE_TYPES_H_ +#ifndef _MACH_MACHINE_TYPES_H_ #define _MACH_MACHINE_TYPES_H_ 1 -typedef long dev_t; /* device number (major+minor) */ +typedef long dev_t; /* device number (major+minor) */ -typedef signed char bit8_t; /* signed 8-bit quantity */ -typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ +typedef signed char bit8_t; /* signed 8-bit quantity */ +typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ -typedef short bit16_t; /* signed 16-bit quantity */ -typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ +typedef short bit16_t; /* signed 16-bit quantity */ +typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ -typedef int bit32_t; /* signed 32-bit quantity */ -typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ +typedef int bit32_t; /* signed 32-bit quantity */ +typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ /* Only 32 bits of the "bit64_t" are significant on this 32-bit machine */ -typedef struct { int __val[2]; } bit64_t; /* signed 64-bit quantity */ +typedef struct { int __val[2]; } bit64_t; /* signed 64-bit quantity */ typedef struct { unsigned int __val[2]; } u_bit64_t;/* unsigned 64-bit quantity */ -#define _SIG64_BITS __val[0] /* bits of interest (32) */ +#define _SIG64_BITS __val[0] /* bits of interest (32) */ #endif /* _MACH_MACHINE_TYPES_H_ */ diff --git a/osfmk/libsa/i386/types.h b/osfmk/libsa/i386/types.h index 3cbf43855..b8459230c 100644 --- a/osfmk/libsa/i386/types.h +++ b/osfmk/libsa/i386/types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.2 1998/09/30 21:21:00 wsanchez * Merged in IntelMerge1 (mburg: Intel support) * @@ -41,36 +41,35 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.2.1 1996/09/17 16:56:28 bruel - * created from standalone mach servers - * [1996/09/17 16:18:08 bruel] + * created from standalone mach servers + * [1996/09/17 16:18:08 bruel] * * Revision 1.1.7.1 1996/04/11 13:46:28 barbou - * Self-Contained Mach Distribution: - * created. - * [95/12/28 barbou] - * [96/03/28 barbou] - * + * Self-Contained Mach Distribution: + * created. + * [95/12/28 barbou] + * [96/03/28 barbou] + * * $EndLog$ */ -#ifndef _MACH_MACHINE_TYPES_H_ +#ifndef _MACH_MACHINE_TYPES_H_ #define _MACH_MACHINE_TYPES_H_ 1 -typedef int dev_t; /* device number (major+minor) */ +typedef int dev_t; /* device number (major+minor) */ -typedef signed char bit8_t; /* signed 8-bit quantity */ -typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ +typedef signed char bit8_t; /* signed 8-bit quantity */ +typedef unsigned char u_bit8_t; /* unsigned 8-bit quantity */ -typedef short bit16_t; /* signed 16-bit quantity */ -typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ +typedef short bit16_t; /* signed 16-bit quantity */ +typedef unsigned short u_bit16_t; /* unsigned 16-bit quantity */ -typedef int bit32_t; /* signed 32-bit quantity */ -typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ +typedef int bit32_t; /* signed 32-bit quantity */ +typedef unsigned int u_bit32_t; /* unsigned 32-bit quantity */ /* Only 32 bits of the "bit64_t" are significant on this 32-bit machine */ -typedef struct { int __val[2]; } bit64_t; /* signed 64-bit quantity */ +typedef struct { int __val[2]; } bit64_t; /* signed 64-bit quantity */ typedef struct { unsigned int __val[2]; } u_bit64_t;/* unsigned 64-bit quantity */ -#define _SIG64_BITS __val[0] /* bits of interest (32) */ +#define _SIG64_BITS __val[0] /* bits of interest (32) */ #endif /* _MACH_MACHINE_TYPES_H_ */ - diff --git a/osfmk/libsa/machine/types.h b/osfmk/libsa/machine/types.h index 08753f955..c860bd949 100644 --- a/osfmk/libsa/machine/types.h +++ b/osfmk/libsa/machine/types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACH_MACHINE_TYPES_H @@ -30,7 +30,7 @@ #if defined (__i386__) || defined (__x86_64__) #include "libsa/i386/types.h" -#elif defined (__arm__)|| defined (__arm64__) +#elif defined (__arm__) || defined (__arm64__) #include "libsa/arm/types.h" #else #error architecture not supported diff --git a/osfmk/libsa/stdlib.h b/osfmk/libsa/stdlib.h index 35ccc599d..ad2788148 100644 --- a/osfmk/libsa/stdlib.h +++ b/osfmk/libsa/stdlib.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -39,21 +39,21 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.4.1 1997/02/21 15:43:19 barbou - * Removed "size_t" definition, include "types.h" instead. - * [1997/02/21 15:36:24 barbou] + * Removed "size_t" definition, include "types.h" instead. + * [1997/02/21 15:36:24 barbou] * * Revision 1.1.2.3 1996/09/30 10:14:34 bruel - * Added strtol and strtoul prototypes. - * [96/09/30 bruel] - * + * Added strtol and strtoul prototypes. + * [96/09/30 bruel] + * * Revision 1.1.2.2 1996/09/23 15:06:22 bruel - * removed bzero and bcopy definitions. - * [96/09/23 bruel] - * + * removed bzero and bcopy definitions. + * [96/09/23 bruel] + * * Revision 1.1.2.1 1996/09/17 16:56:24 bruel - * created from standalone mach servers. - * [96/09/17 bruel] - * + * created from standalone mach servers. + * [96/09/17 bruel] + * * $EndLog$ */ @@ -62,23 +62,23 @@ #include -#ifndef NULL -#define NULL (void *)0 +#ifndef NULL +#define NULL (void *)0 #endif extern int atoi(const char *); extern int atoi_term(char *, char **); extern char *itoa(int, char *); -extern void free(void *); -extern void *malloc(size_t); -extern void *realloc(void *, size_t); +extern void free(void *); +extern void *malloc(size_t); +extern void *realloc(void *, size_t); -extern char *getenv(const char *); +extern char *getenv(const char *); -extern void exit(int); +extern void exit(int); -extern long int strtol (const char *, char **, int); -extern unsigned long int strtoul (const char *, char **, int); +extern long int strtol(const char *, char **, int); +extern unsigned long int strtoul(const char *, char **, int); #endif /* _MACH_STDLIB_H_ */ diff --git a/osfmk/libsa/string.h b/osfmk/libsa/string.h index 894027e28..2a1b67746 100644 --- a/osfmk/libsa/string.h +++ b/osfmk/libsa/string.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,11 +32,11 @@ * Version 2.0. */ /* - * HISTORY + * HISTORY * @OSF_COPYRIGHT@ */ -#ifndef _STRING_H_ -#define _STRING_H_ 1 +#ifndef _STRING_H_ +#define _STRING_H_ 1 #ifdef MACH_KERNEL_PRIVATE #include @@ -48,7 +48,7 @@ extern "C" { #endif -#ifndef NULL +#ifndef NULL #if defined (__cplusplus) #define NULL 0 #else @@ -56,43 +56,43 @@ extern "C" { #endif #endif -extern void *memcpy(void *, const void *, size_t); -extern int memcmp(const void *, const void *, size_t); -extern void *memmove(void *, const void *, size_t); -extern void *memset(void *, int, size_t); -extern int memset_s(void *, size_t, int, size_t); +extern void *memcpy(void *, const void *, size_t); +extern int memcmp(const void *, const void *, size_t); +extern void *memmove(void *, const void *, size_t); +extern void *memset(void *, int, size_t); +extern int memset_s(void *, size_t, int, size_t); -extern size_t strlen(const char *); -extern size_t strnlen(const char *, size_t); +extern size_t strlen(const char *); +extern size_t strnlen(const char *, size_t); /* strcpy() is being deprecated. Please use strlcpy() instead. */ -extern char *strcpy(char *, const char *) __deprecated; -extern char *strncpy(char *, const char *, size_t); +extern char *strcpy(char *, const char *) __deprecated; +extern char *strncpy(char *, const char *, size_t); -extern size_t strlcat(char *, const char *, size_t); -extern size_t strlcpy(char *, const char *, size_t); +extern size_t strlcat(char *, const char *, size_t); +extern size_t strlcpy(char *, const char *, size_t); /* strcat() is being deprecated. Please use strlcat() instead. */ -extern char *strcat(char *, const char *) __deprecated; -extern char *strncat(char *, const char *, size_t); +extern char *strcat(char *, const char *) __deprecated; +extern char *strncat(char *, const char *, size_t); /* strcmp() is being deprecated. Please use strncmp() instead. */ -extern int strcmp(const char *, const char *); -extern int strncmp(const char *,const char *, size_t); +extern int strcmp(const char *, const char *); +extern int strncmp(const char *, const char *, size_t); -extern int strcasecmp(const char *s1, const char *s2); -extern int strncasecmp(const char *s1, const char *s2, size_t n); -extern char *strnstr(char *s, const char *find, size_t slen); -extern char *strchr(const char *s, int c); +extern int strcasecmp(const char *s1, const char *s2); +extern int strncasecmp(const char *s1, const char *s2, size_t n); +extern char *strnstr(char *s, const char *find, size_t slen); +extern char *strchr(const char *s, int c); #ifdef XNU_KERNEL_PRIVATE -extern char *strrchr(const char *s, int c); +extern char *strrchr(const char *s, int c); #endif -extern char *STRDUP(const char *, int); -extern int strprefix(const char *s1, const char *s2); +extern char *STRDUP(const char *, int); +extern int strprefix(const char *s1, const char *s2); -extern int bcmp(const void *, const void *, size_t); -extern void bcopy(const void *, void *, size_t); -extern void bzero(void *, size_t); +extern int bcmp(const void *, const void *, size_t); +extern void bcopy(const void *, void *, size_t); +extern void bzero(void *, size_t); #ifdef PRIVATE #include @@ -144,4 +144,4 @@ extern void bzero(void *, size_t); } #endif -#endif /* _STRING_H_ */ +#endif /* _STRING_H_ */ diff --git a/osfmk/libsa/sys/timers.h b/osfmk/libsa/sys/timers.h index e03d7737c..53a7b3d36 100644 --- a/osfmk/libsa/sys/timers.h +++ b/osfmk/libsa/sys/timers.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,17 +38,17 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.4.1 1997/01/31 15:46:34 emcmanus - * Merged with nmk22b1_shared. - * [1997/01/30 08:47:46 emcmanus] + * Merged with nmk22b1_shared. + * [1997/01/30 08:47:46 emcmanus] * * Revision 1.1.2.2 1996/11/29 13:04:58 emcmanus - * Added TIMEOFDAY and getclock() prototype. - * [1996/11/29 09:59:33 emcmanus] - * + * Added TIMEOFDAY and getclock() prototype. + * [1996/11/29 09:59:33 emcmanus] + * * Revision 1.1.2.1 1996/10/14 13:31:49 emcmanus - * Created. - * [1996/10/14 13:30:09 emcmanus] - * + * Created. + * [1996/10/14 13:30:09 emcmanus] + * * $EndLog$ */ @@ -56,7 +56,7 @@ #define _SYS_TIMERS_H_ /* POSIX . For now, we define just enough to be able to build - the pthread library, with its pthread_cond_timedwait() interface. */ + * the pthread library, with its pthread_cond_timedwait() interface. */ struct timespec { unsigned long tv_sec; long tv_nsec; @@ -66,4 +66,4 @@ struct timespec { extern int getclock(int, struct timespec *); -#endif /* _SYS_TIMERS_H_ */ +#endif /* _SYS_TIMERS_H_ */ diff --git a/osfmk/libsa/types.h b/osfmk/libsa/types.h index 341e42b0e..737206913 100644 --- a/osfmk/libsa/types.h +++ b/osfmk/libsa/types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:51 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,8 +38,8 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.2.1 1996/09/17 16:56:21 bruel - * created from standalone mach servers. - * [96/09/17 bruel] + * created from standalone mach servers. + * [96/09/17 bruel] * * $EndLog$ */ @@ -49,51 +49,51 @@ #include "libsa/machine/types.h" -#ifndef _SIZE_T +#ifndef _SIZE_T #define _SIZE_T -typedef unsigned long size_t; -#endif /* _SIZE_T */ +typedef unsigned long size_t; +#endif /* _SIZE_T */ /* * Common type definitions that lots of old files seem to want. */ -typedef unsigned char u_char; /* unsigned char */ -typedef unsigned short u_short; /* unsigned short */ -typedef unsigned int u_int; /* unsigned int */ -typedef unsigned long u_long; /* unsigned long */ +typedef unsigned char u_char; /* unsigned char */ +typedef unsigned short u_short; /* unsigned short */ +typedef unsigned int u_int; /* unsigned int */ +typedef unsigned long u_long; /* unsigned long */ typedef struct _quad_ { - unsigned int val[2]; /* 2 32-bit values make... */ -} quad; /* an 8-byte item */ + unsigned int val[2]; /* 2 32-bit values make... */ +} quad; /* an 8-byte item */ -typedef char * caddr_t; /* address of a (signed) char */ +typedef char * caddr_t; /* address of a (signed) char */ -typedef unsigned int daddr_t; /* an unsigned 32 */ +typedef unsigned int daddr_t; /* an unsigned 32 */ #if 0 /* off_t should be 64-bit ! */ -typedef unsigned int off_t; /* another unsigned 32 */ +typedef unsigned int off_t; /* another unsigned 32 */ #endif -#define major(i) (((i) >> 8) & 0xFF) -#define minor(i) ((i) & 0xFF) -#define makedev(i,j) ((((i) & 0xFF) << 8) | ((j) & 0xFF)) +#define major(i) (((i) >> 8) & 0xFF) +#define minor(i) ((i) & 0xFF) +#define makedev(i, j) ((((i) & 0xFF) << 8) | ((j) & 0xFF)) -#ifndef NULL -#define NULL ((void *) 0) /* the null pointer */ +#ifndef NULL +#define NULL ((void *) 0) /* the null pointer */ #endif /* * Shorthand type definitions for unsigned storage classes */ -typedef unsigned char uchar_t; -typedef unsigned short ushort_t; -typedef unsigned int uint_t; -typedef unsigned long ulong_t; -typedef volatile unsigned char vuchar_t; -typedef volatile unsigned short vushort_t; -typedef volatile unsigned int vuint_t; -typedef volatile unsigned long vulong_t; +typedef unsigned char uchar_t; +typedef unsigned short ushort_t; +typedef unsigned int uint_t; +typedef unsigned long ulong_t; +typedef volatile unsigned char vuchar_t; +typedef volatile unsigned short vushort_t; +typedef volatile unsigned int vuint_t; +typedef volatile unsigned long vulong_t; /* * Deprecation macro @@ -104,4 +104,4 @@ typedef volatile unsigned long vulong_t; #define __deprecated /* nothing */ #endif -#endif /* _MACH_TYPES_H_ */ +#endif /* _MACH_TYPES_H_ */ diff --git a/osfmk/lockd/lockd_mach_types.h b/osfmk/lockd/lockd_mach_types.h index 1e59569e9..4196b720b 100644 --- a/osfmk/lockd/lockd_mach_types.h +++ b/osfmk/lockd/lockd_mach_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,13 +31,13 @@ #define _LOCKD_MACH_TYPES_H_ /* - * XXX NFSV3_MAX_FH_SIZE is defined in sys/mount.h, but we can't include + * XXX NFSV3_MAX_FH_SIZE is defined in sys/mount.h, but we can't include * that here. Osfmk includes libsa/types.h which causes massive conflicts * with sys/types.h that get indirectly included with sys/mount.h. In user * land below will work on a build that does not yet have the new macro * definition. */ - + #ifndef NFSV3_MAX_FH_SIZE #define NFSV3_MAX_FH_SIZE 64 #endif diff --git a/osfmk/mach/arm/_structs.h b/osfmk/mach/arm/_structs.h index 239230760..cc815f80b 100644 --- a/osfmk/mach/arm/_structs.h +++ b/osfmk/mach/arm/_structs.h @@ -28,92 +28,92 @@ /* * @OSF_COPYRIGHT@ */ -#ifndef _MACH_ARM__STRUCTS_H_ -#define _MACH_ARM__STRUCTS_H_ +#ifndef _MACH_ARM__STRUCTS_H_ +#define _MACH_ARM__STRUCTS_H_ #include /* __DARWIN_UNIX03 */ #include /* __uint32_t */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_EXCEPTION_STATE struct __darwin_arm_exception_state +#define _STRUCT_ARM_EXCEPTION_STATE struct __darwin_arm_exception_state _STRUCT_ARM_EXCEPTION_STATE { - __uint32_t __exception; /* number of arm exception taken */ - __uint32_t __fsr; /* Fault status */ - __uint32_t __far; /* Virtual Fault Address */ + __uint32_t __exception; /* number of arm exception taken */ + __uint32_t __fsr; /* Fault status */ + __uint32_t __far; /* Virtual Fault Address */ }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_EXCEPTION_STATE struct arm_exception_state +#define _STRUCT_ARM_EXCEPTION_STATE struct arm_exception_state _STRUCT_ARM_EXCEPTION_STATE { - __uint32_t exception; /* number of arm exception taken */ - __uint32_t fsr; /* Fault status */ - __uint32_t far; /* Virtual Fault Address */ + __uint32_t exception; /* number of arm exception taken */ + __uint32_t fsr; /* Fault status */ + __uint32_t far; /* Virtual Fault Address */ }; #endif /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_EXCEPTION_STATE64 struct __darwin_arm_exception_state64 +#define _STRUCT_ARM_EXCEPTION_STATE64 struct __darwin_arm_exception_state64 _STRUCT_ARM_EXCEPTION_STATE64 { - __uint64_t __far; /* Virtual Fault Address */ - __uint32_t __esr; /* Exception syndrome */ - __uint32_t __exception; /* number of arm exception taken */ + __uint64_t __far; /* Virtual Fault Address */ + __uint32_t __esr; /* Exception syndrome */ + __uint32_t __exception; /* number of arm exception taken */ }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_EXCEPTION_STATE64 struct arm_exception_state64 +#define _STRUCT_ARM_EXCEPTION_STATE64 struct arm_exception_state64 _STRUCT_ARM_EXCEPTION_STATE64 { - __uint64_t far; /* Virtual Fault Address */ - __uint32_t esr; /* Exception syndrome */ - __uint32_t exception; /* number of arm exception taken */ + __uint64_t far; /* Virtual Fault Address */ + __uint32_t esr; /* Exception syndrome */ + __uint32_t exception; /* number of arm exception taken */ }; #endif /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_THREAD_STATE struct __darwin_arm_thread_state +#define _STRUCT_ARM_THREAD_STATE struct __darwin_arm_thread_state _STRUCT_ARM_THREAD_STATE { - __uint32_t __r[13]; /* General purpose register r0-r12 */ - __uint32_t __sp; /* Stack pointer r13 */ - __uint32_t __lr; /* Link register r14 */ - __uint32_t __pc; /* Program counter r15 */ - __uint32_t __cpsr; /* Current program status register */ + __uint32_t __r[13]; /* General purpose register r0-r12 */ + __uint32_t __sp; /* Stack pointer r13 */ + __uint32_t __lr; /* Link register r14 */ + __uint32_t __pc; /* Program counter r15 */ + __uint32_t __cpsr; /* Current program status register */ }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_THREAD_STATE struct arm_thread_state +#define _STRUCT_ARM_THREAD_STATE struct arm_thread_state _STRUCT_ARM_THREAD_STATE { - __uint32_t r[13]; /* General purpose register r0-r12 */ - __uint32_t sp; /* Stack pointer r13 */ - __uint32_t lr; /* Link register r14 */ - __uint32_t pc; /* Program counter r15 */ - __uint32_t cpsr; /* Current program status register */ + __uint32_t r[13]; /* General purpose register r0-r12 */ + __uint32_t sp; /* Stack pointer r13 */ + __uint32_t lr; /* Link register r14 */ + __uint32_t pc; /* Program counter r15 */ + __uint32_t cpsr; /* Current program status register */ }; #endif /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_THREAD_STATE64 struct __darwin_arm_thread_state64 +#define _STRUCT_ARM_THREAD_STATE64 struct __darwin_arm_thread_state64 _STRUCT_ARM_THREAD_STATE64 { - __uint64_t __x[29]; /* General purpose registers x0-x28 */ - __uint64_t __fp; /* Frame pointer x29 */ - __uint64_t __lr; /* Link register x30 */ - __uint64_t __sp; /* Stack pointer x31 */ - __uint64_t __pc; /* Program counter */ - __uint32_t __cpsr; /* Current program status register */ + __uint64_t __x[29]; /* General purpose registers x0-x28 */ + __uint64_t __fp; /* Frame pointer x29 */ + __uint64_t __lr; /* Link register x30 */ + __uint64_t __sp; /* Stack pointer x31 */ + __uint64_t __pc; /* Program counter */ + __uint32_t __cpsr; /* Current program status register */ __uint32_t __pad; /* Same size for 32-bit or 64-bit clients */ }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_THREAD_STATE64 struct arm_thread_state64 +#define _STRUCT_ARM_THREAD_STATE64 struct arm_thread_state64 _STRUCT_ARM_THREAD_STATE64 { - __uint64_t x[29]; /* General purpose registers x0-x28 */ - __uint64_t fp; /* Frame pointer x29 */ - __uint64_t lr; /* Link register x30 */ - __uint64_t sp; /* Stack pointer x31 */ - __uint64_t pc; /* Program counter */ - __uint32_t cpsr; /* Current program status register */ + __uint64_t x[29]; /* General purpose registers x0-x28 */ + __uint64_t fp; /* Frame pointer x29 */ + __uint64_t lr; /* Link register x30 */ + __uint64_t sp; /* Stack pointer x31 */ + __uint64_t pc; /* Program counter */ + __uint32_t cpsr; /* Current program status register */ __uint32_t __pad; /* Same size for 32-bit or 64-bit clients */ }; #endif /* __DARWIN_UNIX03 */ @@ -121,60 +121,59 @@ _STRUCT_ARM_THREAD_STATE64 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) #if __DARWIN_UNIX03 #define __darwin_arm_thread_state64_get_pc(ts) \ - ((ts).__pc) + ((ts).__pc) #define __darwin_arm_thread_state64_get_pc_fptr(ts) \ - ((void*)(uintptr_t)((ts).__pc)) + ((void*)(uintptr_t)((ts).__pc)) #define __darwin_arm_thread_state64_set_pc_fptr(ts, fptr) \ - ((ts).__pc = (uintptr_t)(fptr)) + ((ts).__pc = (uintptr_t)(fptr)) #define __darwin_arm_thread_state64_get_lr(ts) \ - ((ts).__lr) + ((ts).__lr) #define __darwin_arm_thread_state64_get_lr_fptr(ts) \ - ((void*)(uintptr_t)((ts).__lr)) + ((void*)(uintptr_t)((ts).__lr)) #define __darwin_arm_thread_state64_set_lr_fptr(ts, fptr) \ - ((ts).__lr = (uintptr_t)(fptr)) + ((ts).__lr = (uintptr_t)(fptr)) #define __darwin_arm_thread_state64_get_sp(ts) \ - ((ts).__sp) + ((ts).__sp) #define __darwin_arm_thread_state64_set_sp(ts, ptr) \ - ((ts).__sp = (uintptr_t)(ptr)) + ((ts).__sp = (uintptr_t)(ptr)) #define __darwin_arm_thread_state64_get_fp(ts) \ - ((ts).__fp) + ((ts).__fp) #define __darwin_arm_thread_state64_set_fp(ts, ptr) \ - ((ts).__fp = (uintptr_t)(ptr)) + ((ts).__fp = (uintptr_t)(ptr)) #else /* !__DARWIN_UNIX03 */ #define __darwin_arm_thread_state64_get_pc(ts) \ - ((ts).pc) + ((ts).pc) #define __darwin_arm_thread_state64_get_pc_fptr(ts) \ - ((void*)(uintptr_t)((ts).pc)) + ((void*)(uintptr_t)((ts).pc)) #define __darwin_arm_thread_state64_set_pc_fptr(ts, fptr) \ - ((ts).pc = (uintptr_t)(fptr)) + ((ts).pc = (uintptr_t)(fptr)) #define __darwin_arm_thread_state64_get_lr(ts) \ - ((ts).lr) + ((ts).lr) #define __darwin_arm_thread_state64_get_lr_fptr(ts) \ - ((void*)(uintptr_t)((ts).lr)) + ((void*)(uintptr_t)((ts).lr)) #define __darwin_arm_thread_state64_set_lr_fptr(ts, fptr) \ - ((ts).lr = (uintptr_t)(fptr)) + ((ts).lr = (uintptr_t)(fptr)) #define __darwin_arm_thread_state64_get_sp(ts) \ - ((ts).sp) + ((ts).sp) #define __darwin_arm_thread_state64_set_sp(ts, ptr) \ - ((ts).sp = (uintptr_t)(ptr)) + ((ts).sp = (uintptr_t)(ptr)) #define __darwin_arm_thread_state64_get_fp(ts) \ - ((ts).fp) + ((ts).fp) #define __darwin_arm_thread_state64_set_fp(ts, ptr) \ - ((ts).fp = (uintptr_t)(ptr)) + ((ts).fp = (uintptr_t)(ptr)) #endif /* __DARWIN_UNIX03 */ #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */ #endif /* !defined(KERNEL) */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_VFP_STATE struct __darwin_arm_vfp_state +#define _STRUCT_ARM_VFP_STATE struct __darwin_arm_vfp_state _STRUCT_ARM_VFP_STATE { __uint32_t __r[64]; __uint32_t __fpscr; - }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_VFP_STATE struct arm_vfp_state +#define _STRUCT_ARM_VFP_STATE struct arm_vfp_state _STRUCT_ARM_VFP_STATE { __uint32_t r[64]; @@ -183,8 +182,8 @@ _STRUCT_ARM_VFP_STATE #endif /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_NEON_STATE64 struct __darwin_arm_neon_state64 -#define _STRUCT_ARM_NEON_STATE struct __darwin_arm_neon_state +#define _STRUCT_ARM_NEON_STATE64 struct __darwin_arm_neon_state64 +#define _STRUCT_ARM_NEON_STATE struct __darwin_arm_neon_state #if defined(__arm64__) _STRUCT_ARM_NEON_STATE64 @@ -226,17 +225,15 @@ _STRUCT_ARM_NEON_STATE #if defined(__arm64__) _STRUCT_ARM_NEON_STATE64 { - __uint128_t q[32]; - uint32_t fpsr; - uint32_t fpcr; - + __uint128_t q[32]; + uint32_t fpsr; + uint32_t fpcr; }; _STRUCT_ARM_NEON_STATE { - __uint128_t q[16]; - uint32_t fpsr; - uint32_t fpcr; - + __uint128_t q[16]; + uint32_t fpsr; + uint32_t fpcr; }; #elif defined(__arm__) /* @@ -265,7 +262,7 @@ _STRUCT_ARM_NEON_STATE /* Old-fashioned debug state is only for ARM */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_DEBUG_STATE struct __darwin_arm_debug_state +#define _STRUCT_ARM_DEBUG_STATE struct __darwin_arm_debug_state _STRUCT_ARM_DEBUG_STATE { __uint32_t __bvr[16]; @@ -274,7 +271,7 @@ _STRUCT_ARM_DEBUG_STATE __uint32_t __wcr[16]; }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_DEBUG_STATE struct arm_debug_state +#define _STRUCT_ARM_DEBUG_STATE struct arm_debug_state _STRUCT_ARM_DEBUG_STATE { __uint32_t bvr[16]; @@ -289,7 +286,7 @@ _STRUCT_ARM_DEBUG_STATE /* ARM's arm_debug_state is ARM64's arm_legacy_debug_state */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_LEGACY_DEBUG_STATE struct arm_legacy_debug_state +#define _STRUCT_ARM_LEGACY_DEBUG_STATE struct arm_legacy_debug_state _STRUCT_ARM_LEGACY_DEBUG_STATE { __uint32_t __bvr[16]; @@ -298,7 +295,7 @@ _STRUCT_ARM_LEGACY_DEBUG_STATE __uint32_t __wcr[16]; }; #else /* __DARWIN_UNIX03 */ -#define _STRUCT_ARM_LEGACY_DEBUG_STATE struct arm_legacy_debug_state +#define _STRUCT_ARM_LEGACY_DEBUG_STATE struct arm_legacy_debug_state _STRUCT_ARM_LEGACY_DEBUG_STATE { __uint32_t bvr[16]; @@ -312,55 +309,55 @@ _STRUCT_ARM_LEGACY_DEBUG_STATE #endif #if __DARWIN_UNIX03 -#define _STRUCT_ARM_DEBUG_STATE32 struct __darwin_arm_debug_state32 +#define _STRUCT_ARM_DEBUG_STATE32 struct __darwin_arm_debug_state32 _STRUCT_ARM_DEBUG_STATE32 { __uint32_t __bvr[16]; __uint32_t __bcr[16]; __uint32_t __wvr[16]; __uint32_t __wcr[16]; - __uint64_t __mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ + __uint64_t __mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ }; -#define _STRUCT_ARM_DEBUG_STATE64 struct __darwin_arm_debug_state64 +#define _STRUCT_ARM_DEBUG_STATE64 struct __darwin_arm_debug_state64 _STRUCT_ARM_DEBUG_STATE64 { __uint64_t __bvr[16]; __uint64_t __bcr[16]; __uint64_t __wvr[16]; __uint64_t __wcr[16]; - __uint64_t __mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ + __uint64_t __mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ }; #else /* !__DARWIN_UNIX03 */ -#define _STRUCT_ARM_DEBUG_STATE32 struct arm_debug_state32 +#define _STRUCT_ARM_DEBUG_STATE32 struct arm_debug_state32 _STRUCT_ARM_DEBUG_STATE32 { __uint32_t bvr[16]; __uint32_t bcr[16]; __uint32_t wvr[16]; __uint32_t wcr[16]; - __uint64_t mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ + __uint64_t mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ }; -#define _STRUCT_ARM_DEBUG_STATE64 struct arm_debug_state64 +#define _STRUCT_ARM_DEBUG_STATE64 struct arm_debug_state64 _STRUCT_ARM_DEBUG_STATE64 { __uint64_t bvr[16]; __uint64_t bcr[16]; __uint64_t wvr[16]; __uint64_t wcr[16]; - __uint64_t mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ + __uint64_t mdscr_el1; /* Bit 0 is SS (Hardware Single Step) */ }; #endif /* __DARWIN_UNIX03 */ #if __DARWIN_UNIX03 -#define _STRUCT_ARM_CPMU_STATE64 struct __darwin_arm_cpmu_state64 +#define _STRUCT_ARM_CPMU_STATE64 struct __darwin_arm_cpmu_state64 _STRUCT_ARM_CPMU_STATE64 { __uint64_t __ctrs[16]; }; #else /* __DARWIN_UNIX03 */ -#define _STRUCT_ARM_CPMU_STATE64 struct arm_cpmu_state64 +#define _STRUCT_ARM_CPMU_STATE64 struct arm_cpmu_state64 _STRUCT_ARM_CPMU_STATE64 { __uint64_t ctrs[16]; diff --git a/osfmk/mach/arm/boolean.h b/osfmk/mach/arm/boolean.h index dd0afc6e3..4ff6580cf 100644 --- a/osfmk/mach/arm/boolean.h +++ b/osfmk/mach/arm/boolean.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,9 +62,9 @@ * Boolean type, for ARM. */ -#ifndef _MACH_ARM_BOOLEAN_H_ +#ifndef _MACH_ARM_BOOLEAN_H_ #define _MACH_ARM_BOOLEAN_H_ -typedef int boolean_t; +typedef int boolean_t; -#endif /* _MACH_ARM_BOOLEAN_H_ */ +#endif /* _MACH_ARM_BOOLEAN_H_ */ diff --git a/osfmk/mach/arm/exception.h b/osfmk/mach/arm/exception.h index 857c1fa0a..14478d091 100644 --- a/osfmk/mach/arm/exception.h +++ b/osfmk/mach/arm/exception.h @@ -26,7 +26,7 @@ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_ARM_EXCEPTION_H_ +#ifndef _MACH_ARM_EXCEPTION_H_ #define _MACH_ARM_EXCEPTION_H_ #define EXC_TYPES_COUNT 14 /* incl. illegal exception 0 */ @@ -43,7 +43,7 @@ * EXC_BAD_INSTRUCTION */ -#define EXC_ARM_UNDEFINED 1 /* Undefined */ +#define EXC_ARM_UNDEFINED 1 /* Undefined */ /* @@ -51,16 +51,16 @@ * Note: do not conflict with kern_return_t values returned by vm_fault */ -#define EXC_ARM_DA_ALIGN 0x101 /* Alignment Fault */ -#define EXC_ARM_DA_DEBUG 0x102 /* Debug (watch/break) Fault */ -#define EXC_ARM_SP_ALIGN 0x103 /* SP Alignment Fault */ -#define EXC_ARM_SWP 0x104 /* SWP instruction */ +#define EXC_ARM_DA_ALIGN 0x101 /* Alignment Fault */ +#define EXC_ARM_DA_DEBUG 0x102 /* Debug (watch/break) Fault */ +#define EXC_ARM_SP_ALIGN 0x103 /* SP Alignment Fault */ +#define EXC_ARM_SWP 0x104 /* SWP instruction */ /* * EXC_BREAKPOINT */ -#define EXC_ARM_BREAKPOINT 1 /* breakpoint trap */ +#define EXC_ARM_BREAKPOINT 1 /* breakpoint trap */ -#endif /* _MACH_ARM_EXCEPTION_H_ */ +#endif /* _MACH_ARM_EXCEPTION_H_ */ diff --git a/osfmk/mach/arm/kern_return.h b/osfmk/mach/arm/kern_return.h index eb36fc2b2..ece0c4173 100644 --- a/osfmk/mach/arm/kern_return.h +++ b/osfmk/mach/arm/kern_return.h @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,11 +64,11 @@ * Machine-dependent kernel return definitions. */ -#ifndef _MACH_ARM_KERN_RETURN_H_ +#ifndef _MACH_ARM_KERN_RETURN_H_ #define _MACH_ARM_KERN_RETURN_H_ -#ifndef ASSEMBLER -typedef int kern_return_t; -#endif /* ASSEMBLER */ +#ifndef ASSEMBLER +typedef int kern_return_t; +#endif /* ASSEMBLER */ -#endif /* _MACH_ARM_KERN_RETURN_H_ */ +#endif /* _MACH_ARM_KERN_RETURN_H_ */ diff --git a/osfmk/mach/arm/ndr_def.h b/osfmk/mach/arm/ndr_def.h index 49ea78052..3cd9046e2 100644 --- a/osfmk/mach/arm/ndr_def.h +++ b/osfmk/mach/arm/ndr_def.h @@ -34,10 +34,10 @@ #include NDR_record_t NDR_record = { - 0, /* mig_reserved */ - 0, /* mig_reserved */ - 0, /* mig_reserved */ - NDR_PROTOCOL_2_0, + 0, /* mig_reserved */ + 0, /* mig_reserved */ + 0, /* mig_reserved */ + NDR_PROTOCOL_2_0, NDR_INT_LITTLE_ENDIAN, NDR_CHAR_ASCII, NDR_FLOAT_IEEE, diff --git a/osfmk/mach/arm/processor_info.h b/osfmk/mach/arm/processor_info.h index 67f9df9b8..8f311b328 100644 --- a/osfmk/mach/arm/processor_info.h +++ b/osfmk/mach/arm/processor_info.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Apple Inc. All rights reserved. + * Copyright (c) 2007-2018 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -25,33 +25,48 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * FILE_ID: processor_info.h - */ #ifndef _MACH_ARM_PROCESSOR_INFO_H_ #define _MACH_ARM_PROCESSOR_INFO_H_ -#define PROCESSOR_CPU_STAT 0x10000003 /* Low level cpu statistics */ +#define PROCESSOR_CPU_STAT 0x10000003 /* Low-level CPU statistics */ +#define PROCESSOR_CPU_STAT64 0x10000004 /* Low-level CPU statistics, in full 64-bit */ -#include /* uint32_t */ +#include /* uint32_t, uint64_t */ struct processor_cpu_stat { - uint32_t irq_ex_cnt; - uint32_t ipi_cnt; - uint32_t timer_cnt; - uint32_t undef_ex_cnt; - uint32_t unaligned_cnt; - uint32_t vfp_cnt; - uint32_t vfp_shortv_cnt; - uint32_t data_ex_cnt; - uint32_t instr_ex_cnt; + uint32_t irq_ex_cnt; + uint32_t ipi_cnt; + uint32_t timer_cnt; + uint32_t undef_ex_cnt; + uint32_t unaligned_cnt; + uint32_t vfp_cnt; + uint32_t vfp_shortv_cnt; + uint32_t data_ex_cnt; + uint32_t instr_ex_cnt; }; -typedef struct processor_cpu_stat processor_cpu_stat_data_t; -typedef struct processor_cpu_stat *processor_cpu_stat_t; -#define PROCESSOR_CPU_STAT_COUNT ((mach_msg_type_number_t) \ - (sizeof(processor_cpu_stat_data_t)/sizeof(natural_t))) +typedef struct processor_cpu_stat processor_cpu_stat_data_t; +typedef struct processor_cpu_stat *processor_cpu_stat_t; +#define PROCESSOR_CPU_STAT_COUNT ((mach_msg_type_number_t) \ + (sizeof(processor_cpu_stat_data_t) / sizeof(natural_t))) + +struct processor_cpu_stat64 { + uint64_t irq_ex_cnt; + uint64_t ipi_cnt; + uint64_t timer_cnt; + uint64_t undef_ex_cnt; + uint64_t unaligned_cnt; + uint64_t vfp_cnt; + uint64_t vfp_shortv_cnt; + uint64_t data_ex_cnt; + uint64_t instr_ex_cnt; + uint64_t pmi_cnt; +} __attribute__((packed, aligned(4))); +typedef struct processor_cpu_stat64 processor_cpu_stat64_data_t; +typedef struct processor_cpu_stat64 *processor_cpu_stat64_t; +#define PROCESSOR_CPU_STAT64_COUNT ((mach_msg_type_number_t) \ + (sizeof(processor_cpu_stat64_data_t) / sizeof(integer_t))) #endif /* _MACH_ARM_PROCESSOR_INFO_H_ */ diff --git a/osfmk/mach/arm/rpc.h b/osfmk/mach/arm/rpc.h index 9aebaead5..915e96063 100644 --- a/osfmk/mach/arm/rpc.h +++ b/osfmk/mach/arm/rpc.h @@ -29,7 +29,7 @@ * @OSF_COPYRIGHT@ */ -#ifndef _MACH_ARM_RPC_H_ -#define _MACH_ARM_RPC_H_ +#ifndef _MACH_ARM_RPC_H_ +#define _MACH_ARM_RPC_H_ #endif /* _MACH_ARM_RPC_H_ */ diff --git a/osfmk/mach/arm/thread_state.h b/osfmk/mach/arm/thread_state.h index 02d787317..3a6369f8f 100644 --- a/osfmk/mach/arm/thread_state.h +++ b/osfmk/mach/arm/thread_state.h @@ -33,12 +33,12 @@ #define _MACH_ARM_THREAD_STATE_H_ /* Size of maximum exported thread state in words */ -#define ARM_THREAD_STATE_MAX (144) /* Size of biggest state possible */ +#define ARM_THREAD_STATE_MAX (144) /* Size of biggest state possible */ #if defined (__arm__) || defined(__arm64__) -#define THREAD_STATE_MAX ARM_THREAD_STATE_MAX +#define THREAD_STATE_MAX ARM_THREAD_STATE_MAX #else #error Unsupported arch #endif -#endif /* _MACH_ARM_THREAD_STATE_H_ */ +#endif /* _MACH_ARM_THREAD_STATE_H_ */ diff --git a/osfmk/mach/arm/thread_status.h b/osfmk/mach/arm/thread_status.h index 8bdbe8a9e..27a5441d8 100644 --- a/osfmk/mach/arm/thread_status.h +++ b/osfmk/mach/arm/thread_status.h @@ -46,78 +46,78 @@ * Flavors */ -#define ARM_THREAD_STATE 1 +#define ARM_THREAD_STATE 1 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE -#define ARM_VFP_STATE 2 -#define ARM_EXCEPTION_STATE 3 -#define ARM_DEBUG_STATE 4 /* pre-armv8 */ -#define THREAD_STATE_NONE 5 -#define ARM_THREAD_STATE64 6 -#define ARM_EXCEPTION_STATE64 7 +#define ARM_VFP_STATE 2 +#define ARM_EXCEPTION_STATE 3 +#define ARM_DEBUG_STATE 4 /* pre-armv8 */ +#define THREAD_STATE_NONE 5 +#define ARM_THREAD_STATE64 6 +#define ARM_EXCEPTION_STATE64 7 // ARM_THREAD_STATE_LAST (legacy) 8 -#define ARM_THREAD_STATE32 9 +#define ARM_THREAD_STATE32 9 /* API */ -#define ARM_DEBUG_STATE32 14 -#define ARM_DEBUG_STATE64 15 -#define ARM_NEON_STATE 16 -#define ARM_NEON_STATE64 17 -#define ARM_CPMU_STATE64 18 +#define ARM_DEBUG_STATE32 14 +#define ARM_DEBUG_STATE64 15 +#define ARM_NEON_STATE 16 +#define ARM_NEON_STATE64 17 +#define ARM_CPMU_STATE64 18 #ifdef XNU_KERNEL_PRIVATE /* For kernel use */ -#define ARM_SAVED_STATE32 20 -#define ARM_SAVED_STATE64 21 -#define ARM_NEON_SAVED_STATE32 22 -#define ARM_NEON_SAVED_STATE64 23 +#define ARM_SAVED_STATE32 20 +#define ARM_SAVED_STATE64 21 +#define ARM_NEON_SAVED_STATE32 22 +#define ARM_NEON_SAVED_STATE64 23 #endif /* XNU_KERNEL_PRIVATE */ -#define VALID_THREAD_STATE_FLAVOR(x)\ -((x == ARM_THREAD_STATE) || \ - (x == ARM_VFP_STATE) || \ - (x == ARM_EXCEPTION_STATE) || \ - (x == ARM_DEBUG_STATE) || \ +#define VALID_THREAD_STATE_FLAVOR(x) \ +((x == ARM_THREAD_STATE) || \ + (x == ARM_VFP_STATE) || \ + (x == ARM_EXCEPTION_STATE) || \ + (x == ARM_DEBUG_STATE) || \ (x == THREAD_STATE_NONE) || \ - (x == ARM_THREAD_STATE32) || \ - (x == ARM_THREAD_STATE64) || \ - (x == ARM_EXCEPTION_STATE64) || \ - (x == ARM_NEON_STATE) || \ - (x == ARM_NEON_STATE64) || \ - (x == ARM_DEBUG_STATE32) || \ + (x == ARM_THREAD_STATE32) || \ + (x == ARM_THREAD_STATE64) || \ + (x == ARM_EXCEPTION_STATE64) || \ + (x == ARM_NEON_STATE) || \ + (x == ARM_NEON_STATE64) || \ + (x == ARM_DEBUG_STATE32) || \ (x == ARM_DEBUG_STATE64)) struct arm_state_hdr { - uint32_t flavor; - uint32_t count; + uint32_t flavor; + uint32_t count; }; typedef struct arm_state_hdr arm_state_hdr_t; -typedef _STRUCT_ARM_THREAD_STATE arm_thread_state_t; -typedef _STRUCT_ARM_THREAD_STATE arm_thread_state32_t; -typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t; +typedef _STRUCT_ARM_THREAD_STATE arm_thread_state_t; +typedef _STRUCT_ARM_THREAD_STATE arm_thread_state32_t; +typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t; #if !defined(KERNEL) #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) #define arm_thread_state64_get_pc(ts) \ - __darwin_arm_thread_state64_get_pc(ts) + __darwin_arm_thread_state64_get_pc(ts) #define arm_thread_state64_get_pc_fptr(ts) \ - __darwin_arm_thread_state64_get_pc_fptr(ts) + __darwin_arm_thread_state64_get_pc_fptr(ts) #define arm_thread_state64_set_pc_fptr(ts, fptr) \ - __darwin_arm_thread_state64_set_pc_fptr(ts, fptr) + __darwin_arm_thread_state64_set_pc_fptr(ts, fptr) #define arm_thread_state64_get_lr(ts) \ - __darwin_arm_thread_state64_get_lr(ts) + __darwin_arm_thread_state64_get_lr(ts) #define arm_thread_state64_get_lr_fptr(ts) \ - __darwin_arm_thread_state64_get_lr_fptr(ts) + __darwin_arm_thread_state64_get_lr_fptr(ts) #define arm_thread_state64_set_lr_fptr(ts, fptr) \ - __darwin_arm_thread_state64_set_lr_fptr(ts, fptr) + __darwin_arm_thread_state64_set_lr_fptr(ts, fptr) #define arm_thread_state64_get_sp(ts) \ - __darwin_arm_thread_state64_get_sp(ts) + __darwin_arm_thread_state64_get_sp(ts) #define arm_thread_state64_set_sp(ts, ptr) \ - __darwin_arm_thread_state64_set_sp(ts, ptr) + __darwin_arm_thread_state64_set_sp(ts, ptr) #define arm_thread_state64_get_fp(ts) \ - __darwin_arm_thread_state64_get_fp(ts) + __darwin_arm_thread_state64_get_fp(ts) #define arm_thread_state64_set_fp(ts, ptr) \ - __darwin_arm_thread_state64_set_fp(ts, ptr) + __darwin_arm_thread_state64_set_fp(ts, ptr) #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */ #endif /* !defined(KERNEL) */ @@ -128,8 +128,8 @@ struct arm_unified_thread_state { arm_thread_state64_t ts_64; } uts; }; -#define ts_32 uts.ts_32 -#define ts_64 uts.ts_64 +#define ts_32 uts.ts_32 +#define ts_64 uts.ts_64 typedef struct arm_unified_thread_state arm_unified_thread_state_t; #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \ @@ -142,17 +142,17 @@ typedef struct arm_unified_thread_state arm_unified_thread_state_t; (sizeof (arm_unified_thread_state_t)/sizeof(uint32_t))) -typedef _STRUCT_ARM_VFP_STATE arm_vfp_state_t; -typedef _STRUCT_ARM_NEON_STATE arm_neon_state_t; -typedef _STRUCT_ARM_NEON_STATE arm_neon_state32_t; -typedef _STRUCT_ARM_NEON_STATE64 arm_neon_state64_t; +typedef _STRUCT_ARM_VFP_STATE arm_vfp_state_t; +typedef _STRUCT_ARM_NEON_STATE arm_neon_state_t; +typedef _STRUCT_ARM_NEON_STATE arm_neon_state32_t; +typedef _STRUCT_ARM_NEON_STATE64 arm_neon_state64_t; -typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state_t; -typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state32_t; -typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t; +typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state_t; +typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state32_t; +typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t; -typedef _STRUCT_ARM_DEBUG_STATE32 arm_debug_state32_t; -typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t; +typedef _STRUCT_ARM_DEBUG_STATE32 arm_debug_state32_t; +typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t; #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) /* See below for ARM64 kernel structure definition for arm_debug_state. */ @@ -162,9 +162,9 @@ typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t; * arm_debug_state for binary compatability of userland consumers of this file. */ #if defined(__arm__) -typedef _STRUCT_ARM_DEBUG_STATE arm_debug_state_t; +typedef _STRUCT_ARM_DEBUG_STATE arm_debug_state_t; #elif defined(__arm64__) -typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t; +typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t; #else #error Undefined architecture #endif @@ -194,26 +194,26 @@ typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t; #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \ (sizeof (arm_neon_state64_t)/sizeof(uint32_t))) -#define MACHINE_THREAD_STATE ARM_THREAD_STATE -#define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT +#define MACHINE_THREAD_STATE ARM_THREAD_STATE +#define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT /* * Largest state on this machine: */ -#define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX +#define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX #ifdef XNU_KERNEL_PRIVATE static inline boolean_t is_thread_state32(const arm_unified_thread_state_t *its) { - return (its->ash.flavor == ARM_THREAD_STATE32); + return its->ash.flavor == ARM_THREAD_STATE32; } static inline boolean_t is_thread_state64(const arm_unified_thread_state_t *its) { - return (its->ash.flavor == ARM_THREAD_STATE64); + return its->ash.flavor == ARM_THREAD_STATE64; } static inline arm_thread_state32_t* @@ -243,17 +243,17 @@ const_thread_state64(const arm_unified_thread_state_t *its) #if defined(__arm__) #include -#define ARM_SAVED_STATE THREAD_STATE_NONE + 1 +#define ARM_SAVED_STATE THREAD_STATE_NONE + 1 struct arm_saved_state { - uint32_t r[13]; /* General purpose register r0-r12 */ - uint32_t sp; /* Stack pointer r13 */ - uint32_t lr; /* Link register r14 */ - uint32_t pc; /* Program counter r15 */ - uint32_t cpsr; /* Current program status register */ - uint32_t fsr; /* Fault status */ - uint32_t far; /* Virtual Fault Address */ - uint32_t exception; /* exception number */ + uint32_t r[13]; /* General purpose register r0-r12 */ + uint32_t sp; /* Stack pointer r13 */ + uint32_t lr; /* Link register r14 */ + uint32_t pc; /* Program counter r15 */ + uint32_t cpsr; /* Current program status register */ + uint32_t fsr; /* Fault status */ + uint32_t far; /* Virtual Fault Address */ + uint32_t exception;/* exception number */ }; typedef struct arm_saved_state arm_saved_state_t; @@ -265,24 +265,24 @@ typedef struct arm_saved_state arm_saved_state32_t; static inline arm_saved_state32_t* saved_state32(arm_saved_state_t *iss) { - return iss; + return iss; } static inline boolean_t is_saved_state32(const arm_saved_state_t *iss __unused) { - return TRUE; + return TRUE; } struct arm_saved_state_tagged { - uint32_t tag; - struct arm_saved_state state; + uint32_t tag; + struct arm_saved_state state; }; typedef struct arm_saved_state_tagged arm_saved_state_tagged_t; #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ - (sizeof (arm_saved_state_t)/sizeof(unsigned int))) + (sizeof (arm_saved_state_t)/sizeof(unsigned int))) static inline register_t @@ -368,46 +368,46 @@ set_saved_state_reg(arm_saved_state_t *iss, unsigned regno, register_t val) */ struct arm_saved_state32 { - uint32_t r[13]; /* General purpose register r0-r12 */ - uint32_t sp; /* Stack pointer r13 */ - uint32_t lr; /* Link register r14 */ - uint32_t pc; /* Program counter r15 */ - uint32_t cpsr; /* Current program status register */ - uint32_t far; /* Virtual fault address */ - uint32_t esr; /* Exception syndrome register */ - uint32_t exception; /* Exception number */ + uint32_t r[13]; /* General purpose register r0-r12 */ + uint32_t sp; /* Stack pointer r13 */ + uint32_t lr; /* Link register r14 */ + uint32_t pc; /* Program counter r15 */ + uint32_t cpsr; /* Current program status register */ + uint32_t far; /* Virtual fault address */ + uint32_t esr; /* Exception syndrome register */ + uint32_t exception; /* Exception number */ }; typedef struct arm_saved_state32 arm_saved_state32_t; struct arm_saved_state32_tagged { - uint32_t tag; - struct arm_saved_state32 state; + uint32_t tag; + struct arm_saved_state32 state; }; typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t; #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ - (sizeof (arm_saved_state32_t)/sizeof(unsigned int))) + (sizeof (arm_saved_state32_t)/sizeof(unsigned int))) struct arm_saved_state64 { - uint64_t x[29]; /* General purpose registers x0-x28 */ - uint64_t fp; /* Frame pointer x29 */ - uint64_t lr; /* Link register x30 */ - uint64_t sp; /* Stack pointer x31 */ - uint64_t pc; /* Program counter */ - uint32_t cpsr; /* Current program status register */ - uint32_t reserved; /* Reserved padding */ - uint64_t far; /* Virtual fault address */ - uint32_t esr; /* Exception syndrome register */ - uint32_t exception; /* Exception number */ + uint64_t x[29]; /* General purpose registers x0-x28 */ + uint64_t fp; /* Frame pointer x29 */ + uint64_t lr; /* Link register x30 */ + uint64_t sp; /* Stack pointer x31 */ + uint64_t pc; /* Program counter */ + uint32_t cpsr; /* Current program status register */ + uint32_t reserved; /* Reserved padding */ + uint64_t far; /* Virtual fault address */ + uint32_t esr; /* Exception syndrome register */ + uint32_t exception; /* Exception number */ }; typedef struct arm_saved_state64 arm_saved_state64_t; #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \ - (sizeof (arm_saved_state64_t)/sizeof(unsigned int))) + (sizeof (arm_saved_state64_t)/sizeof(unsigned int))) struct arm_saved_state64_tagged { - uint32_t tag; - struct arm_saved_state64 state; + uint32_t tag; + struct arm_saved_state64 state; }; typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t; @@ -418,8 +418,8 @@ struct arm_saved_state { struct arm_saved_state64 ss_64; } uss; } __attribute__((aligned(16))); -#define ss_32 uss.ss_32 -#define ss_64 uss.ss_64 +#define ss_32 uss.ss_32 +#define ss_64 uss.ss_64 typedef struct arm_saved_state arm_saved_state_t; @@ -427,13 +427,13 @@ typedef struct arm_saved_state arm_saved_state_t; static inline boolean_t is_saved_state32(const arm_saved_state_t *iss) { - return (iss->ash.flavor == ARM_SAVED_STATE32); + return iss->ash.flavor == ARM_SAVED_STATE32; } static inline boolean_t is_saved_state64(const arm_saved_state_t *iss) { - return (iss->ash.flavor == ARM_SAVED_STATE64); + return iss->ash.flavor == ARM_SAVED_STATE64; } static inline arm_saved_state32_t* @@ -463,7 +463,7 @@ const_saved_state64(const arm_saved_state_t *iss) static inline register_t get_saved_state_pc(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc); + return is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc; } static inline void @@ -479,7 +479,7 @@ set_saved_state_pc(arm_saved_state_t *iss, register_t pc) static inline register_t get_saved_state_sp(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp); + return is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp; } static inline void @@ -495,7 +495,7 @@ set_saved_state_sp(arm_saved_state_t *iss, register_t sp) static inline register_t get_saved_state_lr(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr); + return is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr; } static inline void @@ -511,7 +511,7 @@ set_saved_state_lr(arm_saved_state_t *iss, register_t lr) static inline register_t get_saved_state_fp(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp); + return is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp; } static inline void @@ -525,23 +525,27 @@ set_saved_state_fp(arm_saved_state_t *iss, register_t fp) } static inline int -check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg) +check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg) { - return (is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT)); + return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT); } static inline register_t get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg) { - if (!check_saved_state_reglimit(iss, reg)) return 0; + if (!check_saved_state_reglimit(iss, reg)) { + return 0; + } - return (is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg])); + return is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]); } static inline void set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value) { - if (!check_saved_state_reglimit(iss, reg)) return; + if (!check_saved_state_reglimit(iss, reg)) { + return; + } if (is_saved_state32(iss)) { saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value); @@ -553,7 +557,7 @@ set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value) static inline uint32_t get_saved_state_cpsr(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr); + return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr; } static inline void @@ -569,7 +573,7 @@ set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr) static inline register_t get_saved_state_far(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far); + return is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far; } static inline void @@ -585,7 +589,7 @@ set_saved_state_far(arm_saved_state_t *iss, register_t far) static inline uint32_t get_saved_state_esr(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr); + return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr; } static inline void @@ -601,7 +605,7 @@ set_saved_state_esr(arm_saved_state_t *iss, uint32_t esr) static inline uint32_t get_saved_state_exc(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? const_saved_state32(iss)->exception : const_saved_state64(iss)->exception); + return is_saved_state32(iss) ? const_saved_state32(iss)->exception : const_saved_state64(iss)->exception; } static inline void @@ -617,19 +621,19 @@ set_saved_state_exc(arm_saved_state_t *iss, uint32_t exc) extern void panic_unimplemented(void); static inline int -get_saved_state_svc_number(const arm_saved_state_t *iss) +get_saved_state_svc_number(const arm_saved_state_t *iss) { - return (is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]); /* Only first word counts here */ + return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */ } -typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t; +typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t; struct arm_debug_aggregate_state { - arm_state_hdr_t dsh; - union { - arm_debug_state32_t ds32; - arm_debug_state64_t ds64; - } uds; + arm_state_hdr_t dsh; + union { + arm_debug_state32_t ds32; + arm_debug_state64_t ds64; + } uds; } __attribute__((aligned(16))); typedef struct arm_debug_aggregate_state arm_debug_state_t; @@ -646,31 +650,31 @@ typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4))); struct arm_neon_saved_state32 { union { - uint128_t q[16]; - uint64_t d[32]; - uint32_t s[32]; + uint128_t q[16]; + uint64_t d[32]; + uint32_t s[32]; } v; - uint32_t fpsr; - uint32_t fpcr; + uint32_t fpsr; + uint32_t fpcr; }; typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t; #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ - (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int))) + (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int))) struct arm_neon_saved_state64 { union { - uint128_t q[32]; - uint64x2_t d[32]; - uint32x4_t s[32]; + uint128_t q[32]; + uint64x2_t d[32]; + uint32x4_t s[32]; } v; - uint32_t fpsr; - uint32_t fpcr; + uint32_t fpsr; + uint32_t fpcr; }; typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t; #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \ - (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int))) + (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int))) struct arm_neon_saved_state { arm_state_hdr_t nsh; @@ -680,19 +684,19 @@ struct arm_neon_saved_state { } uns; }; typedef struct arm_neon_saved_state arm_neon_saved_state_t; -#define ns_32 uns.ns_32 -#define ns_64 uns.ns_64 +#define ns_32 uns.ns_32 +#define ns_64 uns.ns_64 static inline boolean_t is_neon_saved_state32(const arm_neon_saved_state_t *state) { - return (state->nsh.flavor == ARM_NEON_SAVED_STATE32); + return state->nsh.flavor == ARM_NEON_SAVED_STATE32; } static inline boolean_t is_neon_saved_state64(const arm_neon_saved_state_t *state) { - return (state->nsh.flavor == ARM_NEON_SAVED_STATE64); + return state->nsh.flavor == ARM_NEON_SAVED_STATE64; } static inline arm_neon_saved_state32_t * diff --git a/osfmk/mach/arm/vm_param.h b/osfmk/mach/arm/vm_param.h index c0ed53a8e..8f43cebb4 100644 --- a/osfmk/mach/arm/vm_param.h +++ b/osfmk/mach/arm/vm_param.h @@ -33,7 +33,7 @@ * ARM machine dependent virtual memory parameters. */ -#ifndef _MACH_ARM_VM_PARAM_H_ +#ifndef _MACH_ARM_VM_PARAM_H_ #define _MACH_ARM_VM_PARAM_H_ #if defined(KERNEL_PRIVATE) && __ARM_16K_PG__ @@ -44,69 +44,69 @@ #include #endif -#define BYTE_SIZE 8 /* byte size in bits */ +#define BYTE_SIZE 8 /* byte size in bits */ #if defined (KERNEL) #ifndef __ASSEMBLER__ -#ifdef __arm__ -#define PAGE_SHIFT_CONST 12 +#ifdef __arm__ +#define PAGE_SHIFT_CONST 12 #elif defined(__arm64__) -extern unsigned PAGE_SHIFT_CONST; +extern unsigned PAGE_SHIFT_CONST; #else #error Unsupported arch #endif #if defined(KERNEL_PRIVATE) && __ARM_16K_PG__ -#define PAGE_SHIFT ARM_PGSHIFT +#define PAGE_SHIFT ARM_PGSHIFT #else -#define PAGE_SHIFT PAGE_SHIFT_CONST +#define PAGE_SHIFT PAGE_SHIFT_CONST #endif -#define PAGE_SIZE (1 << PAGE_SHIFT) -#define PAGE_MASK (PAGE_SIZE-1) +#define PAGE_SIZE (1 << PAGE_SHIFT) +#define PAGE_MASK (PAGE_SIZE-1) -#define VM_PAGE_SIZE PAGE_SIZE +#define VM_PAGE_SIZE PAGE_SIZE -#define machine_ptob(x) ((x) << PAGE_SHIFT) +#define machine_ptob(x) ((x) << PAGE_SHIFT) /* * Defined for the purpose of testing the pmap advertised page * size; this does not necessarily match the hardware page size. */ -#define TEST_PAGE_SIZE_16K ((PAGE_SHIFT_CONST == 14)) -#define TEST_PAGE_SIZE_4K ((PAGE_SHIFT_CONST == 12)) +#define TEST_PAGE_SIZE_16K ((PAGE_SHIFT_CONST == 14)) +#define TEST_PAGE_SIZE_4K ((PAGE_SHIFT_CONST == 12)) -#endif /* !__ASSEMBLER__ */ +#endif /* !__ASSEMBLER__ */ #else -#define PAGE_SHIFT vm_page_shift -#define PAGE_SIZE vm_page_size -#define PAGE_MASK vm_page_mask +#define PAGE_SHIFT vm_page_shift +#define PAGE_SIZE vm_page_size +#define PAGE_MASK vm_page_mask -#define VM_PAGE_SIZE vm_page_size +#define VM_PAGE_SIZE vm_page_size -#define machine_ptob(x) ((x) << PAGE_SHIFT) +#define machine_ptob(x) ((x) << PAGE_SHIFT) #endif -#define PAGE_MAX_SHIFT 14 -#define PAGE_MAX_SIZE (1 << PAGE_MAX_SHIFT) -#define PAGE_MAX_MASK (PAGE_MAX_SIZE-1) +#define PAGE_MAX_SHIFT 14 +#define PAGE_MAX_SIZE (1 << PAGE_MAX_SHIFT) +#define PAGE_MAX_MASK (PAGE_MAX_SIZE-1) -#define PAGE_MIN_SHIFT 12 -#define PAGE_MIN_SIZE (1 << PAGE_MIN_SHIFT) -#define PAGE_MIN_MASK (PAGE_MIN_SIZE-1) +#define PAGE_MIN_SHIFT 12 +#define PAGE_MIN_SIZE (1 << PAGE_MIN_SHIFT) +#define PAGE_MIN_MASK (PAGE_MIN_SIZE-1) #ifndef __ASSEMBLER__ #ifdef MACH_KERNEL_PRIVATE -#define VM32_SUPPORT 1 -#define VM32_MIN_ADDRESS ((vm32_offset_t) 0) -#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) -#define VM_MAX_PAGE_ADDRESS VM_MAX_ADDRESS /* ARM64_TODO: ?? */ +#define VM32_SUPPORT 1 +#define VM32_MIN_ADDRESS ((vm32_offset_t) 0) +#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) +#define VM_MAX_PAGE_ADDRESS VM_MAX_ADDRESS /* ARM64_TODO: ?? */ /* * kalloc() parameters: @@ -124,13 +124,13 @@ extern unsigned PAGE_SHIFT_CONST; #if defined (__arm__) -#define KALLOC_MINSIZE 8 /* minimum allocation size */ -#define KALLOC_LOG2_MINALIGN 3 /* log2 minimum alignment */ +#define KALLOC_MINSIZE 8 /* minimum allocation size */ +#define KALLOC_LOG2_MINALIGN 3 /* log2 minimum alignment */ #elif defined(__arm64__) -#define KALLOC_MINSIZE 16 /* minimum allocation size */ -#define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */ +#define KALLOC_MINSIZE 16 /* minimum allocation size */ +#define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */ #else #error Unsupported arch @@ -140,21 +140,21 @@ extern unsigned PAGE_SHIFT_CONST; #if defined (__arm__) -#define VM_MIN_ADDRESS ((vm_address_t) 0x00000000) -#define VM_MAX_ADDRESS ((vm_address_t) 0x80000000) +#define VM_MIN_ADDRESS ((vm_address_t) 0x00000000) +#define VM_MAX_ADDRESS ((vm_address_t) 0x80000000) /* system-wide values */ -#define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0) -#define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) VM_MAX_ADDRESS) +#define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0) +#define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) VM_MAX_ADDRESS) #elif defined (__arm64__) -#define VM_MIN_ADDRESS ((vm_address_t) 0x0000000000000000ULL) -#define VM_MAX_ADDRESS ((vm_address_t) 0x0000000080000000ULL) +#define VM_MIN_ADDRESS ((vm_address_t) 0x0000000000000000ULL) +#define VM_MAX_ADDRESS ((vm_address_t) 0x0000000080000000ULL) /* system-wide values */ -#define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0x0ULL) -#define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) 0x0000000FC0000000ULL) +#define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0x0ULL) +#define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) 0x0000000FC0000000ULL) #else #error architecture not supported @@ -163,31 +163,31 @@ extern unsigned PAGE_SHIFT_CONST; #define VM_MAP_MIN_ADDRESS VM_MIN_ADDRESS #define VM_MAP_MAX_ADDRESS VM_MAX_ADDRESS -#ifdef KERNEL +#ifdef KERNEL #if defined (__arm__) #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 32 -#define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0x80000000) -#define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xFFFEFFFF) -#define VM_HIGH_KERNEL_WINDOW ((vm_address_t) 0xFFFE0000) +#define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0x80000000) +#define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xFFFEFFFF) +#define VM_HIGH_KERNEL_WINDOW ((vm_address_t) 0xFFFE0000) #elif defined (__arm64__) /* * The minimum and maximum kernel address; some configurations may * constrain the address space further. */ #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 37 -#define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0xffffffe000000000ULL) -#define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xfffffff3ffffffffULL) +#define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0xffffffe000000000ULL) +#define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xfffffff3ffffffffULL) #else #error architecture not supported #endif -#define VM_MIN_KERNEL_AND_KEXT_ADDRESS \ - VM_MIN_KERNEL_ADDRESS +#define VM_MIN_KERNEL_AND_KEXT_ADDRESS \ + VM_MIN_KERNEL_ADDRESS #define VM_KERNEL_STRIP_PTR(_v) (_v) -#define VM_KERNEL_ADDRESS(_va) \ +#define VM_KERNEL_ADDRESS(_va) \ ((((vm_address_t)VM_KERNEL_STRIP_PTR(_va)) >= VM_MIN_KERNEL_ADDRESS) && \ (((vm_address_t)VM_KERNEL_STRIP_PTR(_va)) <= VM_MAX_KERNEL_ADDRESS)) @@ -195,28 +195,28 @@ extern unsigned PAGE_SHIFT_CONST; /* * Physical memory is mapped linearly at an offset virtual memory. */ -extern unsigned long gVirtBase, gPhysBase, gPhysSize; +extern unsigned long gVirtBase, gPhysBase, gPhysSize; -#define isphysmem(a) (((vm_address_t)(a) - gPhysBase) < gPhysSize) +#define isphysmem(a) (((vm_address_t)(a) - gPhysBase) < gPhysSize) #if KASAN /* Increase the stack sizes to account for the redzones that get added to every * stack object. */ -# define KERNEL_STACK_SIZE (4*4*4096) +# define KERNEL_STACK_SIZE (4*4*4096) #else -# define KERNEL_STACK_SIZE (4*4096) +# define KERNEL_STACK_SIZE (4*4096) #endif -#define INTSTACK_SIZE (4*4096) +#define INTSTACK_SIZE (4*4096) #ifdef __arm64__ -#define EXCEPSTACK_SIZE (4*4096) +#define EXCEPSTACK_SIZE (4*4096) #else -#define FIQSTACK_SIZE (4096) +#define FIQSTACK_SIZE (4096) #endif #if defined (__arm__) -#define HIGH_EXC_VECTORS ((vm_address_t) 0xFFFF0000) +#define HIGH_EXC_VECTORS ((vm_address_t) 0xFFFF0000) #endif /* @@ -233,11 +233,11 @@ extern unsigned long gVirtBase, gPhysBase, gPhysSize; #error architecture not supported #endif -#endif /* MACH_KERNEL_PRIVATE */ -#endif /* KERNEL */ +#endif /* MACH_KERNEL_PRIVATE */ +#endif /* KERNEL */ -#endif /* !__ASSEMBLER__ */ +#endif /* !__ASSEMBLER__ */ -#define SWI_SYSCALL 0x80 +#define SWI_SYSCALL 0x80 -#endif /* _MACH_ARM_VM_PARAM_H_ */ +#endif /* _MACH_ARM_VM_PARAM_H_ */ diff --git a/osfmk/mach/arm/vm_types.h b/osfmk/mach/arm/vm_types.h index d15ce8c1b..be471a238 100644 --- a/osfmk/mach/arm/vm_types.h +++ b/osfmk/mach/arm/vm_types.h @@ -28,28 +28,28 @@ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,10 +64,10 @@ * Header file for VM data types. ARM version. */ -#ifndef _MACH_ARM_VM_TYPES_H_ +#ifndef _MACH_ARM_VM_TYPES_H_ #define _MACH_ARM_VM_TYPES_H_ -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include @@ -90,32 +90,32 @@ * * New use of these types is discouraged. */ -typedef __darwin_natural_t natural_t; -typedef int integer_t; +typedef __darwin_natural_t natural_t; +typedef int integer_t; /* * A vm_offset_t is a type-neutral pointer, * e.g. an offset into a virtual memory space. */ #ifdef __LP64__ -typedef uintptr_t vm_offset_t; -typedef uintptr_t vm_size_t; +typedef uintptr_t vm_offset_t; +typedef uintptr_t vm_size_t; -typedef uint64_t mach_vm_address_t; -typedef uint64_t mach_vm_offset_t; -typedef uint64_t mach_vm_size_t; +typedef uint64_t mach_vm_address_t; +typedef uint64_t mach_vm_offset_t; +typedef uint64_t mach_vm_size_t; -typedef uint64_t vm_map_offset_t; -typedef uint64_t vm_map_address_t; -typedef uint64_t vm_map_size_t; +typedef uint64_t vm_map_offset_t; +typedef uint64_t vm_map_address_t; +typedef uint64_t vm_map_size_t; #else -typedef natural_t vm_offset_t; +typedef natural_t vm_offset_t; /* * A vm_size_t is the proper type for e.g. * expressing the difference between two * vm_offset_t entities. */ -typedef natural_t vm_size_t; +typedef natural_t vm_size_t; /* * This new type is independent of a particular vm map's @@ -125,37 +125,37 @@ typedef natural_t vm_size_t; * want to have to distinguish. */ #if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && (__IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_5_0) -typedef uint32_t mach_vm_address_t; -typedef uint32_t mach_vm_offset_t; -typedef uint32_t mach_vm_size_t; +typedef uint32_t mach_vm_address_t; +typedef uint32_t mach_vm_offset_t; +typedef uint32_t mach_vm_size_t; #else -typedef uint64_t mach_vm_address_t; -typedef uint64_t mach_vm_offset_t; -typedef uint64_t mach_vm_size_t; +typedef uint64_t mach_vm_address_t; +typedef uint64_t mach_vm_offset_t; +typedef uint64_t mach_vm_size_t; #endif -typedef uint32_t vm_map_offset_t; -typedef uint32_t vm_map_address_t; -typedef uint32_t vm_map_size_t; +typedef uint32_t vm_map_offset_t; +typedef uint32_t vm_map_address_t; +typedef uint32_t vm_map_size_t; #endif /* __LP64__ */ -typedef uint32_t vm32_offset_t; -typedef uint32_t vm32_address_t; -typedef uint32_t vm32_size_t; +typedef uint32_t vm32_offset_t; +typedef uint32_t vm32_address_t; +typedef uint32_t vm32_size_t; -typedef vm_offset_t mach_port_context_t; +typedef vm_offset_t mach_port_context_t; #ifdef MACH_KERNEL_PRIVATE -typedef vm32_offset_t mach_port_context32_t; -typedef mach_vm_offset_t mach_port_context64_t; +typedef vm32_offset_t mach_port_context32_t; +typedef mach_vm_offset_t mach_port_context64_t; #endif -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ /* * If composing messages by hand (please do not) */ -#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32 +#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32 -#endif /* _MACH_ARM_VM_TYPES_H_ */ +#endif /* _MACH_ARM_VM_TYPES_H_ */ diff --git a/osfmk/mach/boolean.h b/osfmk/mach/boolean.h index 641c3962d..6ef6d4bcd 100644 --- a/osfmk/mach/boolean.h +++ b/osfmk/mach/boolean.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,27 +62,27 @@ * */ -#ifndef _MACH_BOOLEAN_H_ -#define _MACH_BOOLEAN_H_ +#ifndef _MACH_BOOLEAN_H_ +#define _MACH_BOOLEAN_H_ /* * Pick up "boolean_t" type definition */ -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ /* * Define TRUE and FALSE if not defined. */ -#ifndef TRUE -#define TRUE 1 -#endif /* TRUE */ +#ifndef TRUE +#define TRUE 1 +#endif /* TRUE */ -#ifndef FALSE -#define FALSE 0 -#endif /* FALSE */ +#ifndef FALSE +#define FALSE 0 +#endif /* FALSE */ -#endif /* _MACH_BOOLEAN_H_ */ +#endif /* _MACH_BOOLEAN_H_ */ diff --git a/osfmk/mach/bootstrap.h b/osfmk/mach/bootstrap.h index 67667c308..e8300859c 100644 --- a/osfmk/mach/bootstrap.h +++ b/osfmk/mach/bootstrap.h @@ -2,7 +2,7 @@ * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,13 +22,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Mach bootstrap interfaces (obsolete: header included only for compatibility) */ -#ifndef _MACH_BOOTSTRAP_H_ +#ifndef _MACH_BOOTSTRAP_H_ #define _MACH_BOOTSTRAP_H_ #endif /* _MACH_BOOTSTRAP_H_ */ diff --git a/osfmk/mach/clock_types.h b/osfmk/mach/clock_types.h index eb274f070..9b3d49a94 100644 --- a/osfmk/mach/clock_types.h +++ b/osfmk/mach/clock_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,8 +39,8 @@ * All interfaces defined here are obsolete. */ -#ifndef _MACH_CLOCK_TYPES_H_ -#define _MACH_CLOCK_TYPES_H_ +#ifndef _MACH_CLOCK_TYPES_H_ +#define _MACH_CLOCK_TYPES_H_ #include #include @@ -48,80 +48,80 @@ /* * Type definitions. */ -typedef int alarm_type_t; /* alarm time type */ -typedef int sleep_type_t; /* sleep time type */ -typedef int clock_id_t; /* clock identification type */ -typedef int clock_flavor_t; /* clock flavor type */ -typedef int *clock_attr_t; /* clock attribute type */ -typedef int clock_res_t; /* clock resolution type */ +typedef int alarm_type_t; /* alarm time type */ +typedef int sleep_type_t; /* sleep time type */ +typedef int clock_id_t; /* clock identification type */ +typedef int clock_flavor_t; /* clock flavor type */ +typedef int *clock_attr_t; /* clock attribute type */ +typedef int clock_res_t; /* clock resolution type */ /* * Normal time specification used by the kernel clock facility. */ struct mach_timespec { - unsigned int tv_sec; /* seconds */ - clock_res_t tv_nsec; /* nanoseconds */ + unsigned int tv_sec; /* seconds */ + clock_res_t tv_nsec; /* nanoseconds */ }; -typedef struct mach_timespec mach_timespec_t; +typedef struct mach_timespec mach_timespec_t; /* * Reserved clock id values for default clocks. */ -#define SYSTEM_CLOCK 0 -#define CALENDAR_CLOCK 1 +#define SYSTEM_CLOCK 0 +#define CALENDAR_CLOCK 1 -#define REALTIME_CLOCK 0 +#define REALTIME_CLOCK 0 /* * Attribute names. */ -#define CLOCK_GET_TIME_RES 1 /* get_time call resolution */ +#define CLOCK_GET_TIME_RES 1 /* get_time call resolution */ /* 2 * was map_time call resolution */ -#define CLOCK_ALARM_CURRES 3 /* current alarm resolution */ -#define CLOCK_ALARM_MINRES 4 /* minimum alarm resolution */ -#define CLOCK_ALARM_MAXRES 5 /* maximum alarm resolution */ +#define CLOCK_ALARM_CURRES 3 /* current alarm resolution */ +#define CLOCK_ALARM_MINRES 4 /* minimum alarm resolution */ +#define CLOCK_ALARM_MAXRES 5 /* maximum alarm resolution */ -#define NSEC_PER_USEC 1000ull /* nanoseconds per microsecond */ -#define USEC_PER_SEC 1000000ull /* microseconds per second */ -#define NSEC_PER_SEC 1000000000ull /* nanoseconds per second */ -#define NSEC_PER_MSEC 1000000ull /* nanoseconds per millisecond */ +#define NSEC_PER_USEC 1000ull /* nanoseconds per microsecond */ +#define USEC_PER_SEC 1000000ull /* microseconds per second */ +#define NSEC_PER_SEC 1000000000ull /* nanoseconds per second */ +#define NSEC_PER_MSEC 1000000ull /* nanoseconds per millisecond */ -#define BAD_MACH_TIMESPEC(t) \ +#define BAD_MACH_TIMESPEC(t) \ ((t)->tv_nsec < 0 || (t)->tv_nsec >= (long)NSEC_PER_SEC) /* t1 <=> t2, also (t1 - t2) in nsec with max of +- 1 sec */ -#define CMP_MACH_TIMESPEC(t1, t2) \ - ((t1)->tv_sec > (t2)->tv_sec ? (long) +NSEC_PER_SEC : \ - ((t1)->tv_sec < (t2)->tv_sec ? (long) -NSEC_PER_SEC : \ - (t1)->tv_nsec - (t2)->tv_nsec)) +#define CMP_MACH_TIMESPEC(t1, t2) \ + ((t1)->tv_sec > (t2)->tv_sec ? (long) +NSEC_PER_SEC : \ + ((t1)->tv_sec < (t2)->tv_sec ? (long) -NSEC_PER_SEC : \ + (t1)->tv_nsec - (t2)->tv_nsec)) /* t1 += t2 */ -#define ADD_MACH_TIMESPEC(t1, t2) \ - do { \ - if (((t1)->tv_nsec += (t2)->tv_nsec) >= (long) NSEC_PER_SEC) { \ - (t1)->tv_nsec -= (long) NSEC_PER_SEC; \ - (t1)->tv_sec += 1; \ - } \ - (t1)->tv_sec += (t2)->tv_sec; \ +#define ADD_MACH_TIMESPEC(t1, t2) \ + do { \ + if (((t1)->tv_nsec += (t2)->tv_nsec) >= (long) NSEC_PER_SEC) { \ + (t1)->tv_nsec -= (long) NSEC_PER_SEC; \ + (t1)->tv_sec += 1; \ + } \ + (t1)->tv_sec += (t2)->tv_sec; \ } while (0) /* t1 -= t2 */ -#define SUB_MACH_TIMESPEC(t1, t2) \ - do { \ - if (((t1)->tv_nsec -= (t2)->tv_nsec) < 0) { \ - (t1)->tv_nsec += (long) NSEC_PER_SEC; \ - (t1)->tv_sec -= 1; \ - } \ - (t1)->tv_sec -= (t2)->tv_sec; \ +#define SUB_MACH_TIMESPEC(t1, t2) \ + do { \ + if (((t1)->tv_nsec -= (t2)->tv_nsec) < 0) { \ + (t1)->tv_nsec += (long) NSEC_PER_SEC; \ + (t1)->tv_sec -= 1; \ + } \ + (t1)->tv_sec -= (t2)->tv_sec; \ } while (0) /* * Alarm parameter defines. */ -#define ALRMTYPE 0xff /* type (8-bit field) */ -#define TIME_ABSOLUTE 0x00 /* absolute time */ -#define TIME_RELATIVE 0x01 /* relative time */ +#define ALRMTYPE 0xff /* type (8-bit field) */ +#define TIME_ABSOLUTE 0x00 /* absolute time */ +#define TIME_RELATIVE 0x01 /* relative time */ -#define BAD_ALRMTYPE(t) (((t) &~ TIME_RELATIVE) != 0) +#define BAD_ALRMTYPE(t) (((t) &~ TIME_RELATIVE) != 0) #endif /* _MACH_CLOCK_TYPES_H_ */ diff --git a/osfmk/mach/coalition.h b/osfmk/mach/coalition.h index f7a24ef19..2974440ff 100644 --- a/osfmk/mach/coalition.h +++ b/osfmk/mach/coalition.h @@ -42,9 +42,9 @@ #define COALITION_CREATE_FLAGS_SET_TYPE(flags, type) \ do { \ - flags &= ~COALITION_CREATE_FLAGS_TYPE_MASK; \ - flags |= (((type) << COALITION_CREATE_FLAGS_TYPE_SHIFT) \ - & COALITION_CREATE_FLAGS_TYPE_MASK); \ + flags &= ~COALITION_CREATE_FLAGS_TYPE_MASK; \ + flags |= (((type) << COALITION_CREATE_FLAGS_TYPE_SHIFT) \ + & COALITION_CREATE_FLAGS_TYPE_MASK); \ } while (0) #define COALITION_CREATE_FLAGS_ROLE_MASK ((uint32_t)0xF00) @@ -55,9 +55,9 @@ #define COALITION_CREATE_FLAGS_SET_ROLE(flags, role) \ do { \ - flags &= ~COALITION_CREATE_FLAGS_ROLE_MASK; \ - flags |= (((role) << COALITION_CREATE_FLAGS_ROLE_SHIFT) \ - & COALITION_CREATE_FLAGS_ROLE_MASK); \ + flags &= ~COALITION_CREATE_FLAGS_ROLE_MASK; \ + flags |= (((role) << COALITION_CREATE_FLAGS_ROLE_SHIFT) \ + & COALITION_CREATE_FLAGS_ROLE_MASK); \ } while (0) /* @@ -115,10 +115,10 @@ * { "Efficient" : COALITION_FLAGS_EFFICIENT, } */ static const char *coalition_efficiency_names[] = { - "Efficient", + "Efficient", }; static const uint64_t coalition_efficiency_flags[] = { - COALITION_FLAGS_EFFICIENT, + COALITION_FLAGS_EFFICIENT, }; struct coalition_resource_usage { @@ -141,7 +141,7 @@ struct coalition_resource_usage { uint64_t energy_billed_to_me; uint64_t energy_billed_to_others; uint64_t cpu_ptime; - uint64_t cpu_time_eqos_len; /* Stores the number of thread QoS types */ + uint64_t cpu_time_eqos_len; /* Stores the number of thread QoS types */ uint64_t cpu_time_eqos[COALITION_NUM_THREAD_QOS_TYPES]; }; diff --git a/osfmk/mach/error.h b/osfmk/mach/error.h index c0ceeebea..50c77b9cd 100644 --- a/osfmk/mach/error.h +++ b/osfmk/mach/error.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,53 +62,53 @@ * */ -#ifndef _MACH_ERROR_H_ -#define _MACH_ERROR_H_ +#ifndef _MACH_ERROR_H_ +#define _MACH_ERROR_H_ #include /* * error number layout as follows: * - * hi lo + * hi lo * | system(6) | subsystem(12) | code(14) | */ -#define err_none (mach_error_t)0 -#define ERR_SUCCESS (mach_error_t)0 -#define ERR_ROUTINE_NIL (mach_error_fn_t)0 +#define err_none (mach_error_t)0 +#define ERR_SUCCESS (mach_error_t)0 +#define ERR_ROUTINE_NIL (mach_error_fn_t)0 -#define err_system(x) ((signed)((((unsigned)(x))&0x3f)<<26)) -#define err_sub(x) (((x)&0xfff)<<14) +#define err_system(x) ((signed)((((unsigned)(x))&0x3f)<<26)) +#define err_sub(x) (((x)&0xfff)<<14) -#define err_get_system(err) (((err)>>26)&0x3f) -#define err_get_sub(err) (((err)>>14)&0xfff) -#define err_get_code(err) ((err)&0x3fff) +#define err_get_system(err) (((err)>>26)&0x3f) +#define err_get_sub(err) (((err)>>14)&0xfff) +#define err_get_code(err) ((err)&0x3fff) -#define system_emask (err_system(0x3f)) -#define sub_emask (err_sub(0xfff)) -#define code_emask (0x3fff) +#define system_emask (err_system(0x3f)) +#define sub_emask (err_sub(0xfff)) +#define code_emask (0x3fff) /* major error systems */ -#define err_kern err_system(0x0) /* kernel */ -#define err_us err_system(0x1) /* user space library */ -#define err_server err_system(0x2) /* user space servers */ -#define err_ipc err_system(0x3) /* old ipc errors */ -#define err_mach_ipc err_system(0x4) /* mach-ipc errors */ -#define err_dipc err_system(0x7) /* distributed ipc */ -#define err_local err_system(0x3e) /* user defined errors */ -#define err_ipc_compat err_system(0x3f) /* (compatibility) mach-ipc errors */ +#define err_kern err_system(0x0) /* kernel */ +#define err_us err_system(0x1) /* user space library */ +#define err_server err_system(0x2) /* user space servers */ +#define err_ipc err_system(0x3) /* old ipc errors */ +#define err_mach_ipc err_system(0x4) /* mach-ipc errors */ +#define err_dipc err_system(0x7) /* distributed ipc */ +#define err_local err_system(0x3e) /* user defined errors */ +#define err_ipc_compat err_system(0x3f) /* (compatibility) mach-ipc errors */ -#define err_max_system 0x3f +#define err_max_system 0x3f /* unix errors get lumped into one subsystem */ -#define unix_err(errno) (err_kern|err_sub(3)|errno) +#define unix_err(errno) (err_kern|err_sub(3)|errno) -typedef kern_return_t mach_error_t; -typedef mach_error_t (* mach_error_fn_t)( void ); +typedef kern_return_t mach_error_t; +typedef mach_error_t (* mach_error_fn_t)( void ); -#endif /* _MACH_ERROR_H_ */ +#endif /* _MACH_ERROR_H_ */ diff --git a/osfmk/mach/events_info.h b/osfmk/mach/events_info.h index c48772069..cf0dea162 100644 --- a/osfmk/mach/events_info.h +++ b/osfmk/mach/events_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,23 +39,23 @@ * thread execution, and to summarize this information for tasks. */ -#ifndef _MACH_EVENTS_INFO_H_ +#ifndef _MACH_EVENTS_INFO_H_ #define _MACH_EVENTS_INFO_H_ #include struct events_info { - integer_t faults; /* number of page faults */ - integer_t zero_fills; /* number of zero fill pages */ - integer_t reactivations; /* number of reactivated pages */ - integer_t pageins; /* number of actual pageins */ - integer_t cow_faults; /* number of copy-on-write faults */ - integer_t messages_sent; /* number of messages sent */ - integer_t messages_received; /* number of messages received */ + integer_t faults; /* number of page faults */ + integer_t zero_fills; /* number of zero fill pages */ + integer_t reactivations; /* number of reactivated pages */ + integer_t pageins; /* number of actual pageins */ + integer_t cow_faults; /* number of copy-on-write faults */ + integer_t messages_sent; /* number of messages sent */ + integer_t messages_received; /* number of messages received */ }; -typedef struct events_info events_info_data_t; -typedef struct events_info *events_info_t; -#define EVENTS_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(events_info_data_t) / sizeof(integer_t))) +typedef struct events_info events_info_data_t; +typedef struct events_info *events_info_t; +#define EVENTS_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(events_info_data_t) / sizeof(integer_t))) -#endif /*_MACH_EVENTS_INFO_H_*/ +#endif /*_MACH_EVENTS_INFO_H_*/ diff --git a/osfmk/mach/exception.h b/osfmk/mach/exception.h index e7d5d236c..7baea7034 100644 --- a/osfmk/mach/exception.h +++ b/osfmk/mach/exception.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifndef _MACH_EXCEPTION_H_ -#define _MACH_EXCEPTION_H_ +#ifndef _MACH_EXCEPTION_H_ +#define _MACH_EXCEPTION_H_ #include -#endif /* _MACH_EXCEPTION_H_ */ +#endif /* _MACH_EXCEPTION_H_ */ diff --git a/osfmk/mach/exception_types.h b/osfmk/mach/exception_types.h index 6fc534c5b..83c8c90e7 100644 --- a/osfmk/mach/exception_types.h +++ b/osfmk/mach/exception_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_EXCEPTION_TYPES_H_ -#define _MACH_EXCEPTION_TYPES_H_ +#ifndef _MACH_EXCEPTION_TYPES_H_ +#define _MACH_EXCEPTION_TYPES_H_ #include @@ -65,65 +65,65 @@ * Machine-independent exception definitions. */ -#define EXC_BAD_ACCESS 1 /* Could not access memory */ - /* Code contains kern_return_t describing error. */ - /* Subcode contains bad memory address. */ +#define EXC_BAD_ACCESS 1 /* Could not access memory */ +/* Code contains kern_return_t describing error. */ +/* Subcode contains bad memory address. */ -#define EXC_BAD_INSTRUCTION 2 /* Instruction failed */ - /* Illegal or undefined instruction or operand */ +#define EXC_BAD_INSTRUCTION 2 /* Instruction failed */ +/* Illegal or undefined instruction or operand */ -#define EXC_ARITHMETIC 3 /* Arithmetic exception */ - /* Exact nature of exception is in code field */ +#define EXC_ARITHMETIC 3 /* Arithmetic exception */ +/* Exact nature of exception is in code field */ -#define EXC_EMULATION 4 /* Emulation instruction */ - /* Emulation support instruction encountered */ - /* Details in code and subcode fields */ +#define EXC_EMULATION 4 /* Emulation instruction */ +/* Emulation support instruction encountered */ +/* Details in code and subcode fields */ -#define EXC_SOFTWARE 5 /* Software generated exception */ - /* Exact exception is in code field. */ - /* Codes 0 - 0xFFFF reserved to hardware */ - /* Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) */ +#define EXC_SOFTWARE 5 /* Software generated exception */ +/* Exact exception is in code field. */ +/* Codes 0 - 0xFFFF reserved to hardware */ +/* Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) */ -#define EXC_BREAKPOINT 6 /* Trace, breakpoint, etc. */ - /* Details in code field. */ +#define EXC_BREAKPOINT 6 /* Trace, breakpoint, etc. */ +/* Details in code field. */ -#define EXC_SYSCALL 7 /* System calls. */ +#define EXC_SYSCALL 7 /* System calls. */ -#define EXC_MACH_SYSCALL 8 /* Mach system calls. */ +#define EXC_MACH_SYSCALL 8 /* Mach system calls. */ -#define EXC_RPC_ALERT 9 /* RPC alert */ +#define EXC_RPC_ALERT 9 /* RPC alert */ -#define EXC_CRASH 10 /* Abnormal process exit */ +#define EXC_CRASH 10 /* Abnormal process exit */ -#define EXC_RESOURCE 11 /* Hit resource consumption limit */ - /* Exact resource is in code field. */ +#define EXC_RESOURCE 11 /* Hit resource consumption limit */ +/* Exact resource is in code field. */ -#define EXC_GUARD 12 /* Violated guarded resource protections */ +#define EXC_GUARD 12 /* Violated guarded resource protections */ -#define EXC_CORPSE_NOTIFY 13 /* Abnormal process exited to corpse state */ +#define EXC_CORPSE_NOTIFY 13 /* Abnormal process exited to corpse state */ -#define EXC_CORPSE_VARIANT_BIT 0x100 /* bit set for EXC_*_CORPSE variants of EXC_* */ +#define EXC_CORPSE_VARIANT_BIT 0x100 /* bit set for EXC_*_CORPSE variants of EXC_* */ /* * Machine-independent exception behaviors */ -# define EXCEPTION_DEFAULT 1 +# define EXCEPTION_DEFAULT 1 /* Send a catch_exception_raise message including the identity. */ -# define EXCEPTION_STATE 2 +# define EXCEPTION_STATE 2 /* Send a catch_exception_raise_state message including the * thread state. */ -# define EXCEPTION_STATE_IDENTITY 3 +# define EXCEPTION_STATE_IDENTITY 3 /* Send a catch_exception_raise_state_identity message including * the thread identity and state. */ -#define MACH_EXCEPTION_CODES 0x80000000 +#define MACH_EXCEPTION_CODES 0x80000000 /* Send 64-bit code and subcode in the exception header */ /* @@ -131,51 +131,51 @@ * bit zero is unused, therefore 1 word = 31 exception types */ -#define EXC_MASK_BAD_ACCESS (1 << EXC_BAD_ACCESS) -#define EXC_MASK_BAD_INSTRUCTION (1 << EXC_BAD_INSTRUCTION) -#define EXC_MASK_ARITHMETIC (1 << EXC_ARITHMETIC) -#define EXC_MASK_EMULATION (1 << EXC_EMULATION) -#define EXC_MASK_SOFTWARE (1 << EXC_SOFTWARE) -#define EXC_MASK_BREAKPOINT (1 << EXC_BREAKPOINT) -#define EXC_MASK_SYSCALL (1 << EXC_SYSCALL) -#define EXC_MASK_MACH_SYSCALL (1 << EXC_MACH_SYSCALL) -#define EXC_MASK_RPC_ALERT (1 << EXC_RPC_ALERT) -#define EXC_MASK_CRASH (1 << EXC_CRASH) -#define EXC_MASK_RESOURCE (1 << EXC_RESOURCE) -#define EXC_MASK_GUARD (1 << EXC_GUARD) -#define EXC_MASK_CORPSE_NOTIFY (1 << EXC_CORPSE_NOTIFY) - -#define EXC_MASK_ALL (EXC_MASK_BAD_ACCESS | \ - EXC_MASK_BAD_INSTRUCTION | \ - EXC_MASK_ARITHMETIC | \ - EXC_MASK_EMULATION | \ - EXC_MASK_SOFTWARE | \ - EXC_MASK_BREAKPOINT | \ - EXC_MASK_SYSCALL | \ - EXC_MASK_MACH_SYSCALL | \ - EXC_MASK_RPC_ALERT | \ - EXC_MASK_RESOURCE | \ - EXC_MASK_GUARD | \ - EXC_MASK_MACHINE) - -#ifdef KERNEL_PRIVATE -#define EXC_MASK_VALID (EXC_MASK_ALL | EXC_MASK_CRASH | EXC_MASK_CORPSE_NOTIFY) +#define EXC_MASK_BAD_ACCESS (1 << EXC_BAD_ACCESS) +#define EXC_MASK_BAD_INSTRUCTION (1 << EXC_BAD_INSTRUCTION) +#define EXC_MASK_ARITHMETIC (1 << EXC_ARITHMETIC) +#define EXC_MASK_EMULATION (1 << EXC_EMULATION) +#define EXC_MASK_SOFTWARE (1 << EXC_SOFTWARE) +#define EXC_MASK_BREAKPOINT (1 << EXC_BREAKPOINT) +#define EXC_MASK_SYSCALL (1 << EXC_SYSCALL) +#define EXC_MASK_MACH_SYSCALL (1 << EXC_MACH_SYSCALL) +#define EXC_MASK_RPC_ALERT (1 << EXC_RPC_ALERT) +#define EXC_MASK_CRASH (1 << EXC_CRASH) +#define EXC_MASK_RESOURCE (1 << EXC_RESOURCE) +#define EXC_MASK_GUARD (1 << EXC_GUARD) +#define EXC_MASK_CORPSE_NOTIFY (1 << EXC_CORPSE_NOTIFY) + +#define EXC_MASK_ALL (EXC_MASK_BAD_ACCESS | \ + EXC_MASK_BAD_INSTRUCTION | \ + EXC_MASK_ARITHMETIC | \ + EXC_MASK_EMULATION | \ + EXC_MASK_SOFTWARE | \ + EXC_MASK_BREAKPOINT | \ + EXC_MASK_SYSCALL | \ + EXC_MASK_MACH_SYSCALL | \ + EXC_MASK_RPC_ALERT | \ + EXC_MASK_RESOURCE | \ + EXC_MASK_GUARD | \ + EXC_MASK_MACHINE) + +#ifdef KERNEL_PRIVATE +#define EXC_MASK_VALID (EXC_MASK_ALL | EXC_MASK_CRASH | EXC_MASK_CORPSE_NOTIFY) #endif /* KERNEL_PRIVATE */ -#define FIRST_EXCEPTION 1 /* ZERO is illegal */ +#define FIRST_EXCEPTION 1 /* ZERO is illegal */ /* * Machine independent codes for EXC_SOFTWARE - * Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) + * Codes 0x10000 - 0x1FFFF reserved for OS emulation (Unix) * 0x10000 - 0x10002 in use for unix signals * 0x20000 - 0x2FFFF reserved for MACF */ -#define EXC_SOFT_SIGNAL 0x10003 /* Unix signal exceptions */ +#define EXC_SOFT_SIGNAL 0x10003 /* Unix signal exceptions */ -#define EXC_MACF_MIN 0x20000 /* MACF exceptions */ -#define EXC_MACF_MAX 0x2FFFF +#define EXC_MACF_MIN 0x20000 /* MACF exceptions */ +#define EXC_MACF_MAX 0x2FFFF -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include @@ -184,20 +184,20 @@ * Exported types */ -typedef int exception_type_t; -typedef integer_t exception_data_type_t; -typedef int64_t mach_exception_data_type_t; -typedef int exception_behavior_t; -typedef exception_data_type_t *exception_data_t; -typedef mach_exception_data_type_t *mach_exception_data_t; -typedef unsigned int exception_mask_t; -typedef exception_mask_t *exception_mask_array_t; -typedef exception_behavior_t *exception_behavior_array_t; -typedef thread_state_flavor_t *exception_flavor_array_t; -typedef mach_port_t *exception_port_array_t; -typedef mach_exception_data_type_t mach_exception_code_t; -typedef mach_exception_data_type_t mach_exception_subcode_t; - -#endif /* ASSEMBLER */ - -#endif /* _MACH_EXCEPTION_TYPES_H_ */ +typedef int exception_type_t; +typedef integer_t exception_data_type_t; +typedef int64_t mach_exception_data_type_t; +typedef int exception_behavior_t; +typedef exception_data_type_t *exception_data_t; +typedef mach_exception_data_type_t *mach_exception_data_t; +typedef unsigned int exception_mask_t; +typedef exception_mask_t *exception_mask_array_t; +typedef exception_behavior_t *exception_behavior_array_t; +typedef thread_state_flavor_t *exception_flavor_array_t; +typedef mach_port_t *exception_port_array_t; +typedef mach_exception_data_type_t mach_exception_code_t; +typedef mach_exception_data_type_t mach_exception_subcode_t; + +#endif /* ASSEMBLER */ + +#endif /* _MACH_EXCEPTION_TYPES_H_ */ diff --git a/osfmk/mach/host_info.h b/osfmk/mach/host_info.h index 716c17960..2bca65e0d 100644 --- a/osfmk/mach/host_info.h +++ b/osfmk/mach/host_info.h @@ -60,8 +60,8 @@ * Definitions for host_info call. */ -#ifndef _MACH_HOST_INFO_H_ -#define _MACH_HOST_INFO_H_ +#ifndef _MACH_HOST_INFO_H_ +#define _MACH_HOST_INFO_H_ #include #include @@ -74,158 +74,158 @@ /* * Generic information structure to allow for expansion. */ -typedef integer_t *host_info_t; /* varying array of int. */ -typedef integer_t *host_info64_t; /* varying array of int. */ +typedef integer_t *host_info_t; /* varying array of int. */ +typedef integer_t *host_info64_t; /* varying array of int. */ -#define HOST_INFO_MAX (1024) /* max array size */ -typedef integer_t host_info_data_t[HOST_INFO_MAX]; +#define HOST_INFO_MAX (1024) /* max array size */ +typedef integer_t host_info_data_t[HOST_INFO_MAX]; #define KERNEL_VERSION_MAX (512) -typedef char kernel_version_t[KERNEL_VERSION_MAX]; +typedef char kernel_version_t[KERNEL_VERSION_MAX]; #define KERNEL_BOOT_INFO_MAX (4096) -typedef char kernel_boot_info_t[KERNEL_BOOT_INFO_MAX]; +typedef char kernel_boot_info_t[KERNEL_BOOT_INFO_MAX]; /* * Currently defined information. */ /* host_info() */ -typedef integer_t host_flavor_t; -#define HOST_BASIC_INFO 1 /* basic info */ -#define HOST_SCHED_INFO 3 /* scheduling info */ -#define HOST_RESOURCE_SIZES 4 /* kernel struct sizes */ -#define HOST_PRIORITY_INFO 5 /* priority information */ -#define HOST_SEMAPHORE_TRAPS 7 /* Has semaphore traps */ -#define HOST_MACH_MSG_TRAP 8 /* Has mach_msg_trap */ -#define HOST_VM_PURGABLE 9 /* purg'e'able memory info */ -#define HOST_DEBUG_INFO_INTERNAL 10 /* Used for kernel internal development tests only */ -#define HOST_CAN_HAS_DEBUGGER 11 -#define HOST_PREFERRED_USER_ARCH 12 /* Get the preferred user-space architecture */ +typedef integer_t host_flavor_t; +#define HOST_BASIC_INFO 1 /* basic info */ +#define HOST_SCHED_INFO 3 /* scheduling info */ +#define HOST_RESOURCE_SIZES 4 /* kernel struct sizes */ +#define HOST_PRIORITY_INFO 5 /* priority information */ +#define HOST_SEMAPHORE_TRAPS 7 /* Has semaphore traps */ +#define HOST_MACH_MSG_TRAP 8 /* Has mach_msg_trap */ +#define HOST_VM_PURGABLE 9 /* purg'e'able memory info */ +#define HOST_DEBUG_INFO_INTERNAL 10 /* Used for kernel internal development tests only */ +#define HOST_CAN_HAS_DEBUGGER 11 +#define HOST_PREFERRED_USER_ARCH 12 /* Get the preferred user-space architecture */ #ifdef MACH_KERNEL_PRIVATE struct host_basic_info_old { - integer_t max_cpus; /* max number of cpus possible */ - uint32_t avail_cpus; /* number of cpus now available */ - natural_t memory_size; /* size of memory in bytes */ - cpu_type_t cpu_type; /* cpu type */ - cpu_subtype_t cpu_subtype; /* cpu subtype */ + integer_t max_cpus; /* max number of cpus possible */ + uint32_t avail_cpus; /* number of cpus now available */ + natural_t memory_size; /* size of memory in bytes */ + cpu_type_t cpu_type; /* cpu type */ + cpu_subtype_t cpu_subtype; /* cpu subtype */ }; -typedef struct host_basic_info_old host_basic_info_data_old_t; -typedef struct host_basic_info_old *host_basic_info_old_t; +typedef struct host_basic_info_old host_basic_info_data_old_t; +typedef struct host_basic_info_old *host_basic_info_old_t; #define HOST_BASIC_INFO_OLD_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_basic_info_data_old_t)/sizeof(integer_t))) + (sizeof(host_basic_info_data_old_t)/sizeof(integer_t))) #endif /* MACH_KERNEL_PRIVATE */ struct host_can_has_debugger_info { - boolean_t can_has_debugger; + boolean_t can_has_debugger; }; -typedef struct host_can_has_debugger_info host_can_has_debugger_info_data_t; -typedef struct host_can_has_debugger_info *host_can_has_debugger_info_t; +typedef struct host_can_has_debugger_info host_can_has_debugger_info_data_t; +typedef struct host_can_has_debugger_info *host_can_has_debugger_info_t; #define HOST_CAN_HAS_DEBUGGER_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_can_has_debugger_info_data_t)/sizeof(integer_t))) + (sizeof(host_can_has_debugger_info_data_t)/sizeof(integer_t))) #pragma pack(4) struct host_basic_info { - integer_t max_cpus; /* max number of CPUs possible */ - integer_t avail_cpus; /* number of CPUs now available */ - natural_t memory_size; /* size of memory in bytes, capped at 2 GB */ - cpu_type_t cpu_type; /* cpu type */ - cpu_subtype_t cpu_subtype; /* cpu subtype */ - cpu_threadtype_t cpu_threadtype; /* cpu threadtype */ - integer_t physical_cpu; /* number of physical CPUs now available */ - integer_t physical_cpu_max; /* max number of physical CPUs possible */ - integer_t logical_cpu; /* number of logical cpu now available */ - integer_t logical_cpu_max; /* max number of physical CPUs possible */ - uint64_t max_mem; /* actual size of physical memory */ + integer_t max_cpus; /* max number of CPUs possible */ + integer_t avail_cpus; /* number of CPUs now available */ + natural_t memory_size; /* size of memory in bytes, capped at 2 GB */ + cpu_type_t cpu_type; /* cpu type */ + cpu_subtype_t cpu_subtype; /* cpu subtype */ + cpu_threadtype_t cpu_threadtype; /* cpu threadtype */ + integer_t physical_cpu; /* number of physical CPUs now available */ + integer_t physical_cpu_max; /* max number of physical CPUs possible */ + integer_t logical_cpu; /* number of logical cpu now available */ + integer_t logical_cpu_max; /* max number of physical CPUs possible */ + uint64_t max_mem; /* actual size of physical memory */ }; #pragma pack() -typedef struct host_basic_info host_basic_info_data_t; -typedef struct host_basic_info *host_basic_info_t; +typedef struct host_basic_info host_basic_info_data_t; +typedef struct host_basic_info *host_basic_info_t; #define HOST_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_basic_info_data_t)/sizeof(integer_t))) + (sizeof(host_basic_info_data_t)/sizeof(integer_t))) struct host_sched_info { - integer_t min_timeout; /* minimum timeout in milliseconds */ - integer_t min_quantum; /* minimum quantum in milliseconds */ + integer_t min_timeout; /* minimum timeout in milliseconds */ + integer_t min_quantum; /* minimum quantum in milliseconds */ }; -typedef struct host_sched_info host_sched_info_data_t; -typedef struct host_sched_info *host_sched_info_t; +typedef struct host_sched_info host_sched_info_data_t; +typedef struct host_sched_info *host_sched_info_t; #define HOST_SCHED_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_sched_info_data_t)/sizeof(integer_t))) + (sizeof(host_sched_info_data_t)/sizeof(integer_t))) struct kernel_resource_sizes { - natural_t task; - natural_t thread; - natural_t port; - natural_t memory_region; - natural_t memory_object; + natural_t task; + natural_t thread; + natural_t port; + natural_t memory_region; + natural_t memory_object; }; -typedef struct kernel_resource_sizes kernel_resource_sizes_data_t; -typedef struct kernel_resource_sizes *kernel_resource_sizes_t; +typedef struct kernel_resource_sizes kernel_resource_sizes_data_t; +typedef struct kernel_resource_sizes *kernel_resource_sizes_t; #define HOST_RESOURCE_SIZES_COUNT ((mach_msg_type_number_t) \ - (sizeof(kernel_resource_sizes_data_t)/sizeof(integer_t))) + (sizeof(kernel_resource_sizes_data_t)/sizeof(integer_t))) struct host_priority_info { - integer_t kernel_priority; - integer_t system_priority; - integer_t server_priority; - integer_t user_priority; - integer_t depress_priority; - integer_t idle_priority; - integer_t minimum_priority; - integer_t maximum_priority; + integer_t kernel_priority; + integer_t system_priority; + integer_t server_priority; + integer_t user_priority; + integer_t depress_priority; + integer_t idle_priority; + integer_t minimum_priority; + integer_t maximum_priority; }; -typedef struct host_priority_info host_priority_info_data_t; -typedef struct host_priority_info *host_priority_info_t; +typedef struct host_priority_info host_priority_info_data_t; +typedef struct host_priority_info *host_priority_info_t; #define HOST_PRIORITY_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_priority_info_data_t)/sizeof(integer_t))) + (sizeof(host_priority_info_data_t)/sizeof(integer_t))) /* host_statistics() */ -#define HOST_LOAD_INFO 1 /* System loading stats */ -#define HOST_VM_INFO 2 /* Virtual memory stats */ -#define HOST_CPU_LOAD_INFO 3 /* CPU load stats */ +#define HOST_LOAD_INFO 1 /* System loading stats */ +#define HOST_VM_INFO 2 /* Virtual memory stats */ +#define HOST_CPU_LOAD_INFO 3 /* CPU load stats */ /* host_statistics64() */ -#define HOST_VM_INFO64 4 /* 64-bit virtual memory stats */ -#define HOST_EXTMOD_INFO64 5 /* External modification stats */ -#define HOST_EXPIRED_TASK_INFO 6 /* Statistics for expired tasks */ +#define HOST_VM_INFO64 4 /* 64-bit virtual memory stats */ +#define HOST_EXTMOD_INFO64 5 /* External modification stats */ +#define HOST_EXPIRED_TASK_INFO 6 /* Statistics for expired tasks */ #ifdef XNU_KERNEL_PRIVATE void host_statistics_init(void); #endif struct host_load_info { - integer_t avenrun[3]; /* scaled by LOAD_SCALE */ - integer_t mach_factor[3]; /* scaled by LOAD_SCALE */ + integer_t avenrun[3]; /* scaled by LOAD_SCALE */ + integer_t mach_factor[3]; /* scaled by LOAD_SCALE */ }; -typedef struct host_load_info host_load_info_data_t; -typedef struct host_load_info *host_load_info_t; -#define HOST_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_load_info_data_t)/sizeof(integer_t))) +typedef struct host_load_info host_load_info_data_t; +typedef struct host_load_info *host_load_info_t; +#define HOST_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(host_load_info_data_t)/sizeof(integer_t))) -typedef struct vm_purgeable_info host_purgable_info_data_t; -typedef struct vm_purgeable_info *host_purgable_info_t; +typedef struct vm_purgeable_info host_purgable_info_data_t; +typedef struct vm_purgeable_info *host_purgable_info_t; #define HOST_VM_PURGABLE_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_purgable_info_data_t)/sizeof(integer_t))) + (sizeof(host_purgable_info_data_t)/sizeof(integer_t))) /* in */ /* vm_statistics64 */ -#define HOST_VM_INFO64_COUNT ((mach_msg_type_number_t) \ - (sizeof(vm_statistics64_data_t)/sizeof(integer_t))) +#define HOST_VM_INFO64_COUNT ((mach_msg_type_number_t) \ + (sizeof(vm_statistics64_data_t)/sizeof(integer_t))) /* size of the latest version of the structure */ #define HOST_VM_INFO64_LATEST_COUNT HOST_VM_INFO64_COUNT -#define HOST_VM_INFO64_REV1_COUNT HOST_VM_INFO64_LATEST_COUNT +#define HOST_VM_INFO64_REV1_COUNT HOST_VM_INFO64_LATEST_COUNT /* previous versions: adjust the size according to what was added each time */ -#define HOST_VM_INFO64_REV0_COUNT /* added compression and swapper info (14 ints) */\ +#define HOST_VM_INFO64_REV0_COUNT /* added compression and swapper info (14 ints) */ \ ((mach_msg_type_number_t) \ (HOST_VM_INFO64_REV1_COUNT - 14)) @@ -238,69 +238,69 @@ typedef struct vm_purgeable_info *host_purgable_info_t; #define HOST_EXTMOD_INFO64_LATEST_COUNT HOST_EXTMOD_INFO64_COUNT /* vm_statistics */ -#define HOST_VM_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(vm_statistics_data_t)/sizeof(integer_t))) +#define HOST_VM_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(vm_statistics_data_t)/sizeof(integer_t))) /* size of the latest version of the structure */ #define HOST_VM_INFO_LATEST_COUNT HOST_VM_INFO_COUNT -#define HOST_VM_INFO_REV2_COUNT HOST_VM_INFO_LATEST_COUNT +#define HOST_VM_INFO_REV2_COUNT HOST_VM_INFO_LATEST_COUNT /* previous versions: adjust the size according to what was added each time */ -#define HOST_VM_INFO_REV1_COUNT /* added "speculative_count" (1 int) */ \ +#define HOST_VM_INFO_REV1_COUNT /* added "speculative_count" (1 int) */ \ ((mach_msg_type_number_t) \ (HOST_VM_INFO_REV2_COUNT - 1)) -#define HOST_VM_INFO_REV0_COUNT /* added "purgable" info (2 ints) */ \ +#define HOST_VM_INFO_REV0_COUNT /* added "purgable" info (2 ints) */ \ ((mach_msg_type_number_t) \ (HOST_VM_INFO_REV1_COUNT - 2)) -struct host_cpu_load_info { /* number of ticks while running... */ - natural_t cpu_ticks[CPU_STATE_MAX]; /* ... in the given mode */ +struct host_cpu_load_info { /* number of ticks while running... */ + natural_t cpu_ticks[CPU_STATE_MAX]; /* ... in the given mode */ }; -typedef struct host_cpu_load_info host_cpu_load_info_data_t; -typedef struct host_cpu_load_info *host_cpu_load_info_t; +typedef struct host_cpu_load_info host_cpu_load_info_data_t; +typedef struct host_cpu_load_info *host_cpu_load_info_t; #define HOST_CPU_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof (host_cpu_load_info_data_t) / sizeof (integer_t))) + (sizeof (host_cpu_load_info_data_t) / sizeof (integer_t))) struct host_preferred_user_arch { - cpu_type_t cpu_type; /* Preferred user-space cpu type */ - cpu_subtype_t cpu_subtype; /* Preferred user-space cpu subtype */ + cpu_type_t cpu_type; /* Preferred user-space cpu type */ + cpu_subtype_t cpu_subtype; /* Preferred user-space cpu subtype */ }; -typedef struct host_preferred_user_arch host_preferred_user_arch_data_t; -typedef struct host_preferred_user_arch *host_preferred_user_arch_t; +typedef struct host_preferred_user_arch host_preferred_user_arch_data_t; +typedef struct host_preferred_user_arch *host_preferred_user_arch_t; #define HOST_PREFERRED_USER_ARCH_COUNT ((mach_msg_type_number_t) \ - (sizeof(host_preferred_user_arch_data_t)/sizeof(integer_t))) + (sizeof(host_preferred_user_arch_data_t)/sizeof(integer_t))) #ifdef PRIVATE /* * CPU Statistics information */ struct _processor_statistics_np { - int32_t ps_cpuid; + int32_t ps_cpuid; - uint32_t ps_csw_count; - uint32_t ps_preempt_count; - uint32_t ps_preempted_rt_count; - uint32_t ps_preempted_by_rt_count; + uint32_t ps_csw_count; + uint32_t ps_preempt_count; + uint32_t ps_preempted_rt_count; + uint32_t ps_preempted_by_rt_count; - uint32_t ps_rt_sched_count; + uint32_t ps_rt_sched_count; - uint32_t ps_interrupt_count; - uint32_t ps_ipi_count; - uint32_t ps_timer_pop_count; + uint32_t ps_interrupt_count; + uint32_t ps_ipi_count; + uint32_t ps_timer_pop_count; - uint64_t ps_runq_count_sum __attribute((aligned(8))); + uint64_t ps_runq_count_sum __attribute((aligned(8))); - uint32_t ps_idle_transitions; - uint32_t ps_quantum_timer_expirations; + uint32_t ps_idle_transitions; + uint32_t ps_quantum_timer_expirations; }; struct host_debug_info_internal { uint64_t config_bank:1, /* built configurations */ - config_atm:1, - config_csr:1, - config_coalitions:1, - config_unused:60; + config_atm:1, + config_csr:1, + config_coalitions:1, + config_unused:60; }; typedef struct host_debug_info_internal *host_debug_info_internal_t; @@ -312,13 +312,13 @@ typedef struct host_debug_info_internal host_debug_info_internal_data_t; #ifdef KERNEL_PRIVATE -extern kern_return_t set_sched_stats_active( - boolean_t active); +extern kern_return_t set_sched_stats_active( + boolean_t active); -extern kern_return_t get_sched_statistics( - struct _processor_statistics_np *out, - uint32_t *count); +extern kern_return_t get_sched_statistics( + struct _processor_statistics_np *out, + uint32_t *count); #endif /* KERNEL_PRIVATE */ -#endif /* _MACH_HOST_INFO_H_ */ +#endif /* _MACH_HOST_INFO_H_ */ diff --git a/osfmk/mach/host_notify.h b/osfmk/mach/host_notify.h index 0a15991d2..cda654bf4 100644 --- a/osfmk/mach/host_notify.h +++ b/osfmk/mach/host_notify.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,18 +22,18 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_HOST_NOTIFY_H_ -#define _MACH_HOST_NOTIFY_H_ +#ifndef _MACH_HOST_NOTIFY_H_ +#define _MACH_HOST_NOTIFY_H_ -#define HOST_NOTIFY_CALENDAR_CHANGE 0 -#define HOST_NOTIFY_CALENDAR_SET 1 -#define HOST_NOTIFY_TYPE_MAX 1 +#define HOST_NOTIFY_CALENDAR_CHANGE 0 +#define HOST_NOTIFY_CALENDAR_SET 1 +#define HOST_NOTIFY_TYPE_MAX 1 -#define HOST_CALENDAR_CHANGED_REPLYID 950 -#define HOST_CALENDAR_SET_REPLYID 951 +#define HOST_CALENDAR_CHANGED_REPLYID 950 +#define HOST_CALENDAR_SET_REPLYID 951 #endif /* _MACH_HOST_NOTIFY_H_ */ diff --git a/osfmk/mach/host_reboot.h b/osfmk/mach/host_reboot.h index 65609dd7f..02d8d089a 100644 --- a/osfmk/mach/host_reboot.h +++ b/osfmk/mach/host_reboot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,6 +34,6 @@ #define HOST_REBOOT_HALT 0x0008 #define HOST_REBOOT_UPSDELAY 0x0100 -#define HOST_REBOOT_DEBUGGER 0x1000 +#define HOST_REBOOT_DEBUGGER 0x1000 -#endif /* _MACH_HOST_REBOOT_ */ +#endif /* _MACH_HOST_REBOOT_ */ diff --git a/osfmk/mach/host_special_ports.h b/osfmk/mach/host_special_ports.h index 8c97b882e..f4632ed13 100644 --- a/osfmk/mach/host_special_ports.h +++ b/osfmk/mach/host_special_ports.h @@ -2,7 +2,7 @@ * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -93,22 +93,22 @@ #define HOST_SEATBELT_PORT (7 + HOST_MAX_SPECIAL_KERNEL_PORT) #define HOST_KEXTD_PORT (8 + HOST_MAX_SPECIAL_KERNEL_PORT) #define HOST_LAUNCHCTL_PORT (9 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_UNFREED_PORT (10 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_AMFID_PORT (11 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_GSSD_PORT (12 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_TELEMETRY_PORT (13 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_ATM_NOTIFICATION_PORT (14 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_COALITION_PORT (15 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_UNFREED_PORT (10 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_AMFID_PORT (11 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_GSSD_PORT (12 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_TELEMETRY_PORT (13 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_ATM_NOTIFICATION_PORT (14 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_COALITION_PORT (15 + HOST_MAX_SPECIAL_KERNEL_PORT) #define HOST_SYSDIAGNOSE_PORT (16 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_XPC_EXCEPTION_PORT (17 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_CONTAINERD_PORT (18 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_NODE_PORT (19 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_RESOURCE_NOTIFY_PORT (20 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_CLOSURED_PORT (21 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_SYSPOLICYD_PORT (22 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_XPC_EXCEPTION_PORT (17 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_CONTAINERD_PORT (18 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_NODE_PORT (19 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_RESOURCE_NOTIFY_PORT (20 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_CLOSURED_PORT (21 + HOST_MAX_SPECIAL_KERNEL_PORT) +#define HOST_SYSPOLICYD_PORT (22 + HOST_MAX_SPECIAL_KERNEL_PORT) -#define HOST_MAX_SPECIAL_PORT HOST_SYSPOLICYD_PORT - /* MAX = last since rdar://35861175 */ +#define HOST_MAX_SPECIAL_PORT HOST_SYSPOLICYD_PORT +/* MAX = last since rdar://35861175 */ /* obsolete name */ #define HOST_CHUD_PORT HOST_LAUNCHCTL_PORT @@ -116,8 +116,8 @@ /* * Special node identifier to always represent the local node. */ -#define HOST_LOCAL_NODE -1 - +#define HOST_LOCAL_NODE -1 + /* * Definitions for ease of use. * @@ -125,64 +125,64 @@ * be the local node host port. In the set call, the host must the per-node * host port for the node being affected. */ -#define host_get_host_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_host_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_PORT, (port))) #define host_set_host_port(host, port) (KERN_INVALID_ARGUMENT) -#define host_get_host_priv_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_host_priv_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_PRIV_PORT, (port))) #define host_set_host_priv_port(host, port) (KERN_INVALID_ARGUMENT) -#define host_get_io_master_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_io_master_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_IO_MASTER_PORT, (port))) #define host_set_io_master_port(host, port) (KERN_INVALID_ARGUMENT) /* * User-settable special ports. */ -#define host_get_dynamic_pager_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_dynamic_pager_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_DYNAMIC_PAGER_PORT, (port))) -#define host_set_dynamic_pager_port(host, port) \ +#define host_set_dynamic_pager_port(host, port) \ (host_set_special_port((host), HOST_DYNAMIC_PAGER_PORT, (port))) -#define host_get_audit_control_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_audit_control_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_AUDIT_CONTROL_PORT, (port))) -#define host_set_audit_control_port(host, port) \ +#define host_set_audit_control_port(host, port) \ (host_set_special_port((host), HOST_AUDIT_CONTROL_PORT, (port))) -#define host_get_user_notification_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_user_notification_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_USER_NOTIFICATION_PORT, (port))) -#define host_set_user_notification_port(host, port) \ +#define host_set_user_notification_port(host, port) \ (host_set_special_port((host), HOST_USER_NOTIFICATION_PORT, (port))) -#define host_get_automountd_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_automountd_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_AUTOMOUNTD_PORT, (port))) -#define host_set_automountd_port(host, port) \ +#define host_set_automountd_port(host, port) \ (host_set_special_port((host), HOST_AUTOMOUNTD_PORT, (port))) -#define host_get_lockd_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_lockd_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_LOCKD_PORT, (port))) -#define host_set_lockd_port(host, port) \ +#define host_set_lockd_port(host, port) \ (host_set_special_port((host), HOST_LOCKD_PORT, (port))) -#define host_get_ktrace_background_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_ktrace_background_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_KTRACE_BACKGROUND_PORT, (port))) -#define host_set_ktrace_background_port(host, port) \ +#define host_set_ktrace_background_port(host, port) \ (host_set_special_port((host), HOST_KTRACE_BACKGROUND_PORT, (port))) -#define host_get_kextd_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_kextd_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_KEXTD_PORT, (port))) -#define host_set_kextd_port(host, port) \ +#define host_set_kextd_port(host, port) \ (host_set_special_port((host), HOST_KEXTD_PORT, (port))) #define host_get_launchctl_port(host, port) \ @@ -194,74 +194,74 @@ #define host_get_chud_port(host, port) host_get_launchctl_port(host, port) #define host_set_chud_port(host, port) host_set_launchctl_port(host, port) -#define host_get_unfreed_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_unfreed_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_UNFREED_PORT, (port))) -#define host_set_unfreed_port(host, port) \ +#define host_set_unfreed_port(host, port) \ (host_set_special_port((host), HOST_UNFREED_PORT, (port))) -#define host_get_amfid_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_amfid_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_AMFID_PORT, (port))) -#define host_set_amfid_port(host, port) \ +#define host_set_amfid_port(host, port) \ (host_set_special_port((host), HOST_AMFID_PORT, (port))) -#define host_get_gssd_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_gssd_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_GSSD_PORT, (port))) -#define host_set_gssd_port(host, port) \ +#define host_set_gssd_port(host, port) \ (host_set_special_port((host), HOST_GSSD_PORT, (port))) -#define host_get_telemetry_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_telemetry_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_TELEMETRY_PORT, (port))) -#define host_set_telemetry_port(host, port) \ +#define host_set_telemetry_port(host, port) \ (host_set_special_port((host), HOST_TELEMETRY_PORT, (port))) -#define host_get_atm_notification_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_atm_notification_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_ATM_NOTIFICATION_PORT, (port))) -#define host_set_atm_notification_port(host, port) \ +#define host_set_atm_notification_port(host, port) \ (host_set_special_port((host), HOST_ATM_NOTIFICATION_PORT, (port))) -#define host_get_coalition_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_coalition_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_COALITION_PORT, (port))) -#define host_set_coalition_port(host, port) \ +#define host_set_coalition_port(host, port) \ (host_set_special_port((host), HOST_COALITION_PORT, (port))) -#define host_get_sysdiagnose_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_sysdiagnose_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_SYSDIAGNOSE_PORT, (port))) -#define host_set_sysdiagnose_port(host, port) \ +#define host_set_sysdiagnose_port(host, port) \ (host_set_special_port((host), HOST_SYSDIAGNOSE_PORT, (port))) -#define host_get_container_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_container_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_CONTAINERD_PORT, (port))) -#define host_set_container_port(host, port) \ +#define host_set_container_port(host, port) \ (host_set_special_port((host), HOST_CONTAINERD_PORT, (port))) -#define host_get_node_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_node_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_NODE_PORT, (port))) -#define host_set_node_port(host, port) \ +#define host_set_node_port(host, port) \ (host_set_special_port((host), HOST_NODE_PORT, (port))) -#define host_get_closured_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_closured_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_CLOSURED_PORT, (port))) -#define host_set_closured_port(host, port) \ +#define host_set_closured_port(host, port) \ (host_set_special_port((host), HOST_CLOSURED_PORT, (port))) -#define host_get_syspolicyd_port(host, port) \ - (host_get_special_port((host), \ +#define host_get_syspolicyd_port(host, port) \ + (host_get_special_port((host), \ HOST_LOCAL_NODE, HOST_SYSPOLICYD_PORT, (port))) -#define host_set_syspolicyd_port(host, port) \ +#define host_set_syspolicyd_port(host, port) \ (host_set_special_port((host), HOST_SYSPOLICYD_PORT, (port))) /* HOST_RESOURCE_NOTIFY_PORT doesn't #defines these conveniences. - All lookups go through send_resource_violation() + * All lookups go through send_resource_violation() */ -#endif /* _MACH_HOST_SPECIAL_PORTS_H_ */ +#endif /* _MACH_HOST_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/i386/_structs.h b/osfmk/mach/i386/_structs.h index 0f687777c..3f63a0058 100644 --- a/osfmk/mach/i386/_structs.h +++ b/osfmk/mach/i386/_structs.h @@ -682,6 +682,32 @@ _STRUCT_X86_THREAD_STATE64 }; #endif /* !__DARWIN_UNIX03 */ +/* + * 64 bit versions of the above (complete) + */ + +#if __DARWIN_UNIX03 +#define _STRUCT_X86_THREAD_FULL_STATE64 struct __darwin_x86_thread_full_state64 +_STRUCT_X86_THREAD_FULL_STATE64 +{ + _STRUCT_X86_THREAD_STATE64 ss64; + __uint64_t __ds; + __uint64_t __es; + __uint64_t __ss; + __uint64_t __gsbase; +}; +#else /* !__DARWIN_UNIX03 */ +#define _STRUCT_X86_THREAD_FULL_STATE64 struct x86_thread_full_state64 +_STRUCT_X86_THREAD_FULL_STATE64 +{ + _STRUCT_X86_THREAD_STATE64 ss64; + __uint64_t ds; + __uint64_t es; + __uint64_t ss; + __uint64_t gsbase; +}; +#endif /* !__DARWIN_UNIX03 */ + #if __DARWIN_UNIX03 #define _STRUCT_X86_FLOAT_STATE64 struct __darwin_x86_float_state64 diff --git a/osfmk/mach/i386/boolean.h b/osfmk/mach/i386/boolean.h index 100f7e7b5..702375938 100644 --- a/osfmk/mach/i386/boolean.h +++ b/osfmk/mach/i386/boolean.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,13 +62,13 @@ * Boolean type, for I386. */ -#ifndef _MACH_I386_BOOLEAN_H_ +#ifndef _MACH_I386_BOOLEAN_H_ #define _MACH_I386_BOOLEAN_H_ #if defined(__x86_64__) && !defined(KERNEL) -typedef unsigned int boolean_t; +typedef unsigned int boolean_t; #else -typedef int boolean_t; +typedef int boolean_t; #endif -#endif /* _MACH_I386_BOOLEAN_H_ */ +#endif /* _MACH_I386_BOOLEAN_H_ */ diff --git a/osfmk/mach/i386/exception.h b/osfmk/mach/i386/exception.h index 3bdf1fcdd..44b5272d3 100644 --- a/osfmk/mach/i386/exception.h +++ b/osfmk/mach/i386/exception.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,72 +22,72 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_I386_EXCEPTION_H_ +#ifndef _MACH_I386_EXCEPTION_H_ #define _MACH_I386_EXCEPTION_H_ /* * No machine dependent types for the 80386 */ -#define EXC_TYPES_COUNT 14 /* incl. illegal exception 0 */ +#define EXC_TYPES_COUNT 14 /* incl. illegal exception 0 */ /* * Codes and subcodes for 80386 exceptions. */ -#define EXCEPTION_CODE_MAX 2 /* currently code and subcode */ +#define EXCEPTION_CODE_MAX 2 /* currently code and subcode */ /* * EXC_BAD_INSTRUCTION */ -#define EXC_I386_INVOP 1 +#define EXC_I386_INVOP 1 /* * EXC_ARITHMETIC */ -#define EXC_I386_DIV 1 -#define EXC_I386_INTO 2 -#define EXC_I386_NOEXT 3 -#define EXC_I386_EXTOVR 4 -#define EXC_I386_EXTERR 5 -#define EXC_I386_EMERR 6 -#define EXC_I386_BOUND 7 +#define EXC_I386_DIV 1 +#define EXC_I386_INTO 2 +#define EXC_I386_NOEXT 3 +#define EXC_I386_EXTOVR 4 +#define EXC_I386_EXTERR 5 +#define EXC_I386_EMERR 6 +#define EXC_I386_BOUND 7 #define EXC_I386_SSEEXTERR 8 /* @@ -103,33 +103,33 @@ * EXC_BREAKPOINT */ -#define EXC_I386_SGL 1 -#define EXC_I386_BPT 2 - -#define EXC_I386_DIVERR 0 /* divide by 0 eprror */ -#define EXC_I386_SGLSTP 1 /* single step */ -#define EXC_I386_NMIFLT 2 /* NMI */ -#define EXC_I386_BPTFLT 3 /* breakpoint fault */ -#define EXC_I386_INTOFLT 4 /* INTO overflow fault */ -#define EXC_I386_BOUNDFLT 5 /* BOUND instruction fault */ -#define EXC_I386_INVOPFLT 6 /* invalid opcode fault */ -#define EXC_I386_NOEXTFLT 7 /* extension not available fault*/ -#define EXC_I386_DBLFLT 8 /* double fault */ -#define EXC_I386_EXTOVRFLT 9 /* extension overrun fault */ -#define EXC_I386_INVTSSFLT 10 /* invalid TSS fault */ -#define EXC_I386_SEGNPFLT 11 /* segment not present fault */ -#define EXC_I386_STKFLT 12 /* stack fault */ -#define EXC_I386_GPFLT 13 /* general protection fault */ -#define EXC_I386_PGFLT 14 /* page fault */ -#define EXC_I386_EXTERRFLT 16 /* extension error fault */ -#define EXC_I386_ALIGNFLT 17 /* Alignment fault */ -#define EXC_I386_ENDPERR 33 /* emulated extension error flt */ -#define EXC_I386_ENOEXTFLT 32 /* emulated ext not present */ +#define EXC_I386_SGL 1 +#define EXC_I386_BPT 2 + +#define EXC_I386_DIVERR 0 /* divide by 0 eprror */ +#define EXC_I386_SGLSTP 1 /* single step */ +#define EXC_I386_NMIFLT 2 /* NMI */ +#define EXC_I386_BPTFLT 3 /* breakpoint fault */ +#define EXC_I386_INTOFLT 4 /* INTO overflow fault */ +#define EXC_I386_BOUNDFLT 5 /* BOUND instruction fault */ +#define EXC_I386_INVOPFLT 6 /* invalid opcode fault */ +#define EXC_I386_NOEXTFLT 7 /* extension not available fault*/ +#define EXC_I386_DBLFLT 8 /* double fault */ +#define EXC_I386_EXTOVRFLT 9 /* extension overrun fault */ +#define EXC_I386_INVTSSFLT 10 /* invalid TSS fault */ +#define EXC_I386_SEGNPFLT 11 /* segment not present fault */ +#define EXC_I386_STKFLT 12 /* stack fault */ +#define EXC_I386_GPFLT 13 /* general protection fault */ +#define EXC_I386_PGFLT 14 /* page fault */ +#define EXC_I386_EXTERRFLT 16 /* extension error fault */ +#define EXC_I386_ALIGNFLT 17 /* Alignment fault */ +#define EXC_I386_ENDPERR 33 /* emulated extension error flt */ +#define EXC_I386_ENOEXTFLT 32 /* emulated ext not present */ /* * machine dependent exception masks */ -#define EXC_MASK_MACHINE 0 +#define EXC_MASK_MACHINE 0 -#endif /* _MACH_I386_EXCEPTION_H_ */ +#endif /* _MACH_I386_EXCEPTION_H_ */ diff --git a/osfmk/mach/i386/fp_reg.h b/osfmk/mach/i386/fp_reg.h index 718298b59..3d2c69302 100644 --- a/osfmk/mach/i386/fp_reg.h +++ b/osfmk/mach/i386/fp_reg.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,178 +22,178 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1992-1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _I386_FP_SAVE_H_ -#define _I386_FP_SAVE_H_ +#ifndef _I386_FP_SAVE_H_ +#define _I386_FP_SAVE_H_ #ifdef MACH_KERNEL_PRIVATE -struct x86_fx_thread_state { +struct x86_fx_thread_state { unsigned short fx_control; /* control */ unsigned short fx_status; /* status */ - unsigned char fx_tag; /* register tags */ - unsigned char fx_bbz1; /* better be zero when calling fxrtstor */ + unsigned char fx_tag; /* register tags */ + unsigned char fx_bbz1; /* better be zero when calling fxrtstor */ unsigned short fx_opcode; union { - struct { /* 32-bit layout: */ - unsigned int fx_eip; /* eip instruction */ - unsigned short fx_cs; /* cs instruction */ - unsigned short fx_bbz2; /* better be zero when calling fxrtstor */ - unsigned int fx_dp; /* data address */ - unsigned short fx_ds; /* data segment */ - unsigned short fx_bbz3; /* better be zero when calling fxrtstor */ - }; - struct { /* 64-bit layout: */ - uint64_t fx_rip; /* instruction pointer */ - uint64_t fx_rdp; /* data pointer */ - }; + struct { /* 32-bit layout: */ + unsigned int fx_eip; /* eip instruction */ + unsigned short fx_cs; /* cs instruction */ + unsigned short fx_bbz2;/* better be zero when calling fxrtstor */ + unsigned int fx_dp; /* data address */ + unsigned short fx_ds; /* data segment */ + unsigned short fx_bbz3;/* better be zero when calling fxrtstor */ + }; + struct { /* 64-bit layout: */ + uint64_t fx_rip; /* instruction pointer */ + uint64_t fx_rdp; /* data pointer */ + }; }; - unsigned int fx_MXCSR; - unsigned int fx_MXCSR_MASK; + unsigned int fx_MXCSR; + unsigned int fx_MXCSR_MASK; unsigned short fx_reg_word[8][8]; /* STx/MMx registers */ - unsigned short fx_XMM_reg[8][16]; /* XMM0-XMM15 on 64 bit processors */ - /* XMM0-XMM7 on 32 bit processors... unused storage reserved */ - - unsigned char fx_reserved[16*5]; /* reserved by intel for future - * expansion */ - unsigned int fp_valid; - unsigned int fp_save_layout; - unsigned char fx_pad[8]; + unsigned short fx_XMM_reg[8][16]; /* XMM0-XMM15 on 64 bit processors */ + /* XMM0-XMM7 on 32 bit processors... unused storage reserved */ + + unsigned char fx_reserved[16 * 5]; /* reserved by intel for future + * expansion */ + unsigned int fp_valid; + unsigned int fp_save_layout; + unsigned char fx_pad[8]; }__attribute__ ((packed)); -struct xsave_header { - uint64_t xstate_bv; - uint64_t xcomp_bv; - uint8_t xhrsvd[48]; +struct xsave_header { + uint64_t xstate_bv; + uint64_t xcomp_bv; + uint8_t xhrsvd[48]; }; -typedef struct { uint64_t lo64, hi64; }__attribute__ ((packed)) reg128_t; +typedef struct { uint64_t lo64, hi64; }__attribute__ ((packed)) reg128_t; typedef struct { reg128_t lo128, hi128; }__attribute__ ((packed)) reg256_t; typedef struct { reg256_t lo256, hi256; }__attribute__ ((packed)) reg512_t; struct x86_avx_thread_state { - struct x86_fx_thread_state fp; - struct xsave_header _xh; /* Offset 512, xsave header */ - reg128_t x_YMM_Hi128[16]; /* Offset 576, high YMMs `*/ - /* Offset 832, end */ + struct x86_fx_thread_state fp; + struct xsave_header _xh; /* Offset 512, xsave header */ + reg128_t x_YMM_Hi128[16]; /* Offset 576, high YMMs `*/ + /* Offset 832, end */ }__attribute__ ((packed)); struct x86_avx512_thread_state { - struct x86_fx_thread_state fp; - struct xsave_header _xh; /* Offset 512, xsave header */ - reg128_t x_YMM_Hi128[16]; /* Offset 576, high YMMs */ - - uint64_t x_pad[16]; /* Offset 832, unused AMD LWP */ - uint64_t x_BNDREGS[8]; /* Offset 960, unused MPX */ - uint64_t x_BNDCTL[8]; /* Offset 1024, unused MPX */ - - uint64_t x_Opmask[8]; /* Offset 1088, K0-K7 */ - reg256_t x_ZMM_Hi256[16]; /* Offset 1152, ZMM0..15[511:256] */ - reg512_t x_Hi16_ZMM[16]; /* Offset 1664, ZMM16..31[511:0] */ - /* Offset 2688, end */ + struct x86_fx_thread_state fp; + struct xsave_header _xh; /* Offset 512, xsave header */ + reg128_t x_YMM_Hi128[16]; /* Offset 576, high YMMs */ + + uint64_t x_pad[16]; /* Offset 832, unused AMD LWP */ + uint64_t x_BNDREGS[8]; /* Offset 960, unused MPX */ + uint64_t x_BNDCTL[8]; /* Offset 1024, unused MPX */ + + uint64_t x_Opmask[8]; /* Offset 1088, K0-K7 */ + reg256_t x_ZMM_Hi256[16]; /* Offset 1152, ZMM0..15[511:256] */ + reg512_t x_Hi16_ZMM[16]; /* Offset 1664, ZMM16..31[511:0] */ + /* Offset 2688, end */ }__attribute__ ((packed)); typedef union { - struct x86_fx_thread_state fx; - struct x86_avx_thread_state avx; + struct x86_fx_thread_state fx; + struct x86_avx_thread_state avx; #if !defined(RC_HIDE_XNU_J137) - struct x86_avx512_thread_state avx512; + struct x86_avx512_thread_state avx512; #endif } x86_ext_thread_state_t; -#define EVEX_PREFIX 0x62 /* AVX512's EVEX vector operation prefix */ -#define VEX2_PREFIX 0xC5 /* VEX 2-byte prefix for Opmask instructions */ -#define VEX3_PREFIX 0xC4 /* VEX 3-byte prefix for Opmask instructions */ +#define EVEX_PREFIX 0x62 /* AVX512's EVEX vector operation prefix */ +#define VEX2_PREFIX 0xC5 /* VEX 2-byte prefix for Opmask instructions */ +#define VEX3_PREFIX 0xC4 /* VEX 3-byte prefix for Opmask instructions */ #endif /* MACH_KERNEL_PRIVATE */ /* * Control register */ -#define FPC_IE 0x0001 /* enable invalid operation - exception */ -#define FPC_IM FPC_IE -#define FPC_DE 0x0002 /* enable denormalized operation - exception */ -#define FPC_DM FPC_DE -#define FPC_ZE 0x0004 /* enable zero-divide exception */ -#define FPC_ZM FPC_ZE -#define FPC_OE 0x0008 /* enable overflow exception */ -#define FPC_OM FPC_OE -#define FPC_UE 0x0010 /* enable underflow exception */ -#define FPC_PE 0x0020 /* enable precision exception */ -#define FPC_PC 0x0300 /* precision control: */ -#define FPC_PC_24 0x0000 /* 24 bits */ -#define FPC_PC_53 0x0200 /* 53 bits */ -#define FPC_PC_64 0x0300 /* 64 bits */ -#define FPC_RC 0x0c00 /* rounding control: */ -#define FPC_RC_RN 0x0000 /* round to nearest or even */ -#define FPC_RC_RD 0x0400 /* round down */ -#define FPC_RC_RU 0x0800 /* round up */ -#define FPC_RC_CHOP 0x0c00 /* chop */ -#define FPC_IC 0x1000 /* infinity control (obsolete) */ -#define FPC_IC_PROJ 0x0000 /* projective infinity */ -#define FPC_IC_AFF 0x1000 /* affine infinity (std) */ +#define FPC_IE 0x0001 /* enable invalid operation + * exception */ +#define FPC_IM FPC_IE +#define FPC_DE 0x0002 /* enable denormalized operation + * exception */ +#define FPC_DM FPC_DE +#define FPC_ZE 0x0004 /* enable zero-divide exception */ +#define FPC_ZM FPC_ZE +#define FPC_OE 0x0008 /* enable overflow exception */ +#define FPC_OM FPC_OE +#define FPC_UE 0x0010 /* enable underflow exception */ +#define FPC_PE 0x0020 /* enable precision exception */ +#define FPC_PC 0x0300 /* precision control: */ +#define FPC_PC_24 0x0000 /* 24 bits */ +#define FPC_PC_53 0x0200 /* 53 bits */ +#define FPC_PC_64 0x0300 /* 64 bits */ +#define FPC_RC 0x0c00 /* rounding control: */ +#define FPC_RC_RN 0x0000 /* round to nearest or even */ +#define FPC_RC_RD 0x0400 /* round down */ +#define FPC_RC_RU 0x0800 /* round up */ +#define FPC_RC_CHOP 0x0c00 /* chop */ +#define FPC_IC 0x1000 /* infinity control (obsolete) */ +#define FPC_IC_PROJ 0x0000 /* projective infinity */ +#define FPC_IC_AFF 0x1000 /* affine infinity (std) */ /* * Status register */ -#define FPS_IE 0x0001 /* invalid operation */ -#define FPS_DE 0x0002 /* denormalized operand */ -#define FPS_ZE 0x0004 /* divide by zero */ -#define FPS_OE 0x0008 /* overflow */ -#define FPS_UE 0x0010 /* underflow */ -#define FPS_PE 0x0020 /* precision */ -#define FPS_SF 0x0040 /* stack flag */ -#define FPS_ES 0x0080 /* error summary */ -#define FPS_C0 0x0100 /* condition code bit 0 */ -#define FPS_C1 0x0200 /* condition code bit 1 */ -#define FPS_C2 0x0400 /* condition code bit 2 */ -#define FPS_TOS 0x3800 /* top-of-stack pointer */ -#define FPS_TOS_SHIFT 11 -#define FPS_C3 0x4000 /* condition code bit 3 */ -#define FPS_BUSY 0x8000 /* FPU busy */ +#define FPS_IE 0x0001 /* invalid operation */ +#define FPS_DE 0x0002 /* denormalized operand */ +#define FPS_ZE 0x0004 /* divide by zero */ +#define FPS_OE 0x0008 /* overflow */ +#define FPS_UE 0x0010 /* underflow */ +#define FPS_PE 0x0020 /* precision */ +#define FPS_SF 0x0040 /* stack flag */ +#define FPS_ES 0x0080 /* error summary */ +#define FPS_C0 0x0100 /* condition code bit 0 */ +#define FPS_C1 0x0200 /* condition code bit 1 */ +#define FPS_C2 0x0400 /* condition code bit 2 */ +#define FPS_TOS 0x3800 /* top-of-stack pointer */ +#define FPS_TOS_SHIFT 11 +#define FPS_C3 0x4000 /* condition code bit 3 */ +#define FPS_BUSY 0x8000 /* FPU busy */ /* * Kind of floating-point support provided by kernel. */ -#define FP_NO 0 /* no floating point */ -#define FP_SOFT 1 /* software FP emulator */ -#define FP_287 2 /* 80287 */ -#define FP_387 3 /* 80387 or 80486 */ -#define FP_FXSR 4 /* Fast save/restore SIMD Extension */ +#define FP_NO 0 /* no floating point */ +#define FP_SOFT 1 /* software FP emulator */ +#define FP_287 2 /* 80287 */ +#define FP_387 3 /* 80387 or 80486 */ +#define FP_FXSR 4 /* Fast save/restore SIMD Extension */ -#endif /* _I386_FP_SAVE_H_ */ +#endif /* _I386_FP_SAVE_H_ */ diff --git a/osfmk/mach/i386/kern_return.h b/osfmk/mach/i386/kern_return.h index a9df708de..5caefe8a6 100644 --- a/osfmk/mach/i386/kern_return.h +++ b/osfmk/mach/i386/kern_return.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,11 +64,11 @@ * Machine-dependent kernel return definitions. */ -#ifndef _MACH_I386_KERN_RETURN_H_ +#ifndef _MACH_I386_KERN_RETURN_H_ #define _MACH_I386_KERN_RETURN_H_ -#ifndef ASSEMBLER -typedef int kern_return_t; -#endif /* ASSEMBLER */ +#ifndef ASSEMBLER +typedef int kern_return_t; +#endif /* ASSEMBLER */ -#endif /* _MACH_I386_KERN_RETURN_H_ */ +#endif /* _MACH_I386_KERN_RETURN_H_ */ diff --git a/osfmk/mach/i386/ndr_def.h b/osfmk/mach/i386/ndr_def.h index 5163f639c..0e36b2ff7 100644 --- a/osfmk/mach/i386/ndr_def.h +++ b/osfmk/mach/i386/ndr_def.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,10 +34,10 @@ #include NDR_record_t NDR_record = { - 0, /* mig_reserved */ - 0, /* mig_reserved */ - 0, /* mig_reserved */ - NDR_PROTOCOL_2_0, + 0, /* mig_reserved */ + 0, /* mig_reserved */ + 0, /* mig_reserved */ + NDR_PROTOCOL_2_0, NDR_INT_LITTLE_ENDIAN, NDR_CHAR_ASCII, NDR_FLOAT_IEEE, diff --git a/osfmk/mach/i386/processor_info.h b/osfmk/mach/i386/processor_info.h index 8272c6d82..a1930895e 100644 --- a/osfmk/mach/i386/processor_info.h +++ b/osfmk/mach/i386/processor_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,7 +31,7 @@ * Data structure definitions for i386 specific processor control */ -#ifndef _MACH_I386_PROCESSOR_INFO_H_ +#ifndef _MACH_I386_PROCESSOR_INFO_H_ #define _MACH_I386_PROCESSOR_INFO_H_ -#endif /* _MACH_I386_PROCESSOR_INFO_H_ */ +#endif /* _MACH_I386_PROCESSOR_INFO_H_ */ diff --git a/osfmk/mach/i386/rpc.h b/osfmk/mach/i386/rpc.h index 55561f9eb..396bdea32 100644 --- a/osfmk/mach/i386/rpc.h +++ b/osfmk/mach/i386/rpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _MACH_I386_RPC_H_ -#define _MACH_I386_RPC_H_ - -#endif /* _MACH_I386_RPC_H_ */ +#ifndef _MACH_I386_RPC_H_ +#define _MACH_I386_RPC_H_ +#endif /* _MACH_I386_RPC_H_ */ diff --git a/osfmk/mach/i386/thread_state.h b/osfmk/mach/i386/thread_state.h index 9ed704007..69d1a03d9 100644 --- a/osfmk/mach/i386/thread_state.h +++ b/osfmk/mach/i386/thread_state.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,13 +34,13 @@ /* Size of maximum exported thread state in words */ #if !defined(RC_HIDE_XNU_J137) -#define I386_THREAD_STATE_MAX (614) /* Size of biggest state possible */ +#define I386_THREAD_STATE_MAX (614) /* Size of biggest state possible */ #else -#define I386_THREAD_STATE_MAX (224) /* Size of biggest state possible */ +#define I386_THREAD_STATE_MAX (224) /* Size of biggest state possible */ #endif /* !defined(RC_HIDE_XNU_J137) */ #if defined (__i386__) || defined(__x86_64__) -#define THREAD_STATE_MAX I386_THREAD_STATE_MAX +#define THREAD_STATE_MAX I386_THREAD_STATE_MAX #endif -#endif /* _MACH_I386_THREAD_STATE_H_ */ +#endif /* _MACH_I386_THREAD_STATE_H_ */ diff --git a/osfmk/mach/i386/thread_status.h b/osfmk/mach/i386/thread_status.h index fea611b4d..324ac645b 100644 --- a/osfmk/mach/i386/thread_status.h +++ b/osfmk/mach/i386/thread_status.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,7 +64,7 @@ * state as applied to I386 processors. */ -#ifndef _MACH_I386_THREAD_STATUS_H_ +#ifndef _MACH_I386_THREAD_STATUS_H_ #define _MACH_I386_THREAD_STATUS_H_ #include @@ -93,44 +93,44 @@ * they are externally known which is the only reason we don't just get * rid of them */ -#define i386_THREAD_STATE 1 -#define i386_FLOAT_STATE 2 -#define i386_EXCEPTION_STATE 3 +#define i386_THREAD_STATE 1 +#define i386_FLOAT_STATE 2 +#define i386_EXCEPTION_STATE 3 /* * THREAD_STATE_FLAVOR_LIST 0 - * these are the supported flavors + * these are the supported flavors */ -#define x86_THREAD_STATE32 1 -#define x86_FLOAT_STATE32 2 -#define x86_EXCEPTION_STATE32 3 -#define x86_THREAD_STATE64 4 -#define x86_FLOAT_STATE64 5 -#define x86_EXCEPTION_STATE64 6 -#define x86_THREAD_STATE 7 -#define x86_FLOAT_STATE 8 -#define x86_EXCEPTION_STATE 9 -#define x86_DEBUG_STATE32 10 -#define x86_DEBUG_STATE64 11 -#define x86_DEBUG_STATE 12 -#define THREAD_STATE_NONE 13 +#define x86_THREAD_STATE32 1 +#define x86_FLOAT_STATE32 2 +#define x86_EXCEPTION_STATE32 3 +#define x86_THREAD_STATE64 4 +#define x86_FLOAT_STATE64 5 +#define x86_EXCEPTION_STATE64 6 +#define x86_THREAD_STATE 7 +#define x86_FLOAT_STATE 8 +#define x86_EXCEPTION_STATE 9 +#define x86_DEBUG_STATE32 10 +#define x86_DEBUG_STATE64 11 +#define x86_DEBUG_STATE 12 +#define THREAD_STATE_NONE 13 /* 14 and 15 are used for the internal x86_SAVED_STATE flavours */ /* Arrange for flavors to take sequential values, 32-bit, 64-bit, non-specific */ -#define x86_AVX_STATE32 16 -#define x86_AVX_STATE64 (x86_AVX_STATE32 + 1) -#define x86_AVX_STATE (x86_AVX_STATE32 + 2) +#define x86_AVX_STATE32 16 +#define x86_AVX_STATE64 (x86_AVX_STATE32 + 1) +#define x86_AVX_STATE (x86_AVX_STATE32 + 2) #if !defined(RC_HIDE_XNU_J137) -#define x86_AVX512_STATE32 19 -#define x86_AVX512_STATE64 (x86_AVX512_STATE32 + 1) -#define x86_AVX512_STATE (x86_AVX512_STATE32 + 2) +#define x86_AVX512_STATE32 19 +#define x86_AVX512_STATE64 (x86_AVX512_STATE32 + 1) +#define x86_AVX512_STATE (x86_AVX512_STATE32 + 2) #endif /* not RC_HIDE_XNU_J137 */ - +#define x86_THREAD_FULL_STATE64 23 /* * Largest state on this machine: * (be sure mach/machine/thread_state.h matches!) */ -#define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX +#define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX /* * VALID_THREAD_STATE_FLAVOR is a platform specific macro that when passed @@ -140,24 +140,25 @@ */ #if !defined(RC_HIDE_XNU_J137) #define VALID_THREAD_STATE_FLAVOR(x) \ - ((x == x86_THREAD_STATE32) || \ - (x == x86_FLOAT_STATE32) || \ - (x == x86_EXCEPTION_STATE32) || \ - (x == x86_DEBUG_STATE32) || \ - (x == x86_THREAD_STATE64) || \ - (x == x86_FLOAT_STATE64) || \ - (x == x86_EXCEPTION_STATE64) || \ - (x == x86_DEBUG_STATE64) || \ - (x == x86_THREAD_STATE) || \ - (x == x86_FLOAT_STATE) || \ - (x == x86_EXCEPTION_STATE) || \ - (x == x86_DEBUG_STATE) || \ - (x == x86_AVX_STATE32) || \ - (x == x86_AVX_STATE64) || \ - (x == x86_AVX_STATE) || \ - (x == x86_AVX512_STATE32) || \ - (x == x86_AVX512_STATE64) || \ - (x == x86_AVX512_STATE) || \ + ((x == x86_THREAD_STATE32) || \ + (x == x86_FLOAT_STATE32) || \ + (x == x86_EXCEPTION_STATE32) || \ + (x == x86_DEBUG_STATE32) || \ + (x == x86_THREAD_STATE64) || \ + (x == x86_THREAD_FULL_STATE64) || \ + (x == x86_FLOAT_STATE64) || \ + (x == x86_EXCEPTION_STATE64) || \ + (x == x86_DEBUG_STATE64) || \ + (x == x86_THREAD_STATE) || \ + (x == x86_FLOAT_STATE) || \ + (x == x86_EXCEPTION_STATE) || \ + (x == x86_DEBUG_STATE) || \ + (x == x86_AVX_STATE32) || \ + (x == x86_AVX_STATE64) || \ + (x == x86_AVX_STATE) || \ + (x == x86_AVX512_STATE32) || \ + (x == x86_AVX512_STATE64) || \ + (x == x86_AVX512_STATE) || \ (x == THREAD_STATE_NONE)) #else #define VALID_THREAD_STATE_FLAVOR(x) \ @@ -180,29 +181,29 @@ #endif /* not RC_HIDE_XNU_J137 */ struct x86_state_hdr { - uint32_t flavor; - uint32_t count; + uint32_t flavor; + uint32_t count; }; typedef struct x86_state_hdr x86_state_hdr_t; /* * Default segment register values. */ - -#define USER_CODE_SELECTOR 0x0017 -#define USER_DATA_SELECTOR 0x001f -#define KERN_CODE_SELECTOR 0x0008 -#define KERN_DATA_SELECTOR 0x0010 + +#define USER_CODE_SELECTOR 0x0017 +#define USER_DATA_SELECTOR 0x001f +#define KERN_CODE_SELECTOR 0x0008 +#define KERN_DATA_SELECTOR 0x0010 /* * to be deprecated in the future */ typedef _STRUCT_X86_THREAD_STATE32 i386_thread_state_t; -#define i386_THREAD_STATE_COUNT ((mach_msg_type_number_t) \ +#define i386_THREAD_STATE_COUNT ((mach_msg_type_number_t) \ ( sizeof (i386_thread_state_t) / sizeof (int) )) typedef _STRUCT_X86_THREAD_STATE32 x86_thread_state32_t; -#define x86_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \ +#define x86_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \ ( sizeof (x86_thread_state32_t) / sizeof (int) )) /* @@ -210,31 +211,31 @@ typedef _STRUCT_X86_THREAD_STATE32 x86_thread_state32_t; */ typedef _STRUCT_X86_FLOAT_STATE32 i386_float_state_t; #define i386_FLOAT_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof(i386_float_state_t)/sizeof(unsigned int))) + (sizeof(i386_float_state_t)/sizeof(unsigned int))) typedef _STRUCT_X86_FLOAT_STATE32 x86_float_state32_t; #define x86_FLOAT_STATE32_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_float_state32_t)/sizeof(unsigned int))) + (sizeof(x86_float_state32_t)/sizeof(unsigned int))) typedef _STRUCT_X86_AVX_STATE32 x86_avx_state32_t; #define x86_AVX_STATE32_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_avx_state32_t)/sizeof(unsigned int))) + (sizeof(x86_avx_state32_t)/sizeof(unsigned int))) #if !defined(RC_HIDE_XNU_J137) typedef _STRUCT_X86_AVX512_STATE32 x86_avx512_state32_t; #define x86_AVX512_STATE32_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_avx512_state32_t)/sizeof(unsigned int))) + (sizeof(x86_avx512_state32_t)/sizeof(unsigned int))) #endif /* not RC_HIDE_XNU_J137 */ /* * to be deprecated in the future */ typedef _STRUCT_X86_EXCEPTION_STATE32 i386_exception_state_t; -#define i386_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \ +#define i386_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \ ( sizeof (i386_exception_state_t) / sizeof (int) )) typedef _STRUCT_X86_EXCEPTION_STATE32 x86_exception_state32_t; -#define x86_EXCEPTION_STATE32_COUNT ((mach_msg_type_number_t) \ +#define x86_EXCEPTION_STATE32_COUNT ((mach_msg_type_number_t) \ ( sizeof (x86_exception_state32_t) / sizeof (int) )) #define I386_EXCEPTION_STATE_COUNT i386_EXCEPTION_STATE_COUNT @@ -246,31 +247,35 @@ typedef _STRUCT_X86_DEBUG_STATE32 x86_debug_state32_t; #define X86_DEBUG_STATE32_COUNT x86_DEBUG_STATE32_COUNT typedef _STRUCT_X86_THREAD_STATE64 x86_thread_state64_t; -#define x86_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \ +#define x86_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \ ( sizeof (x86_thread_state64_t) / sizeof (int) )) +typedef _STRUCT_X86_THREAD_FULL_STATE64 x86_thread_full_state64_t; +#define x86_THREAD_FULL_STATE64_COUNT ((mach_msg_type_number_t) \ + ( sizeof (x86_thread_full_state64_t) / sizeof (int) )) + typedef _STRUCT_X86_FLOAT_STATE64 x86_float_state64_t; #define x86_FLOAT_STATE64_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_float_state64_t)/sizeof(unsigned int))) + (sizeof(x86_float_state64_t)/sizeof(unsigned int))) typedef _STRUCT_X86_AVX_STATE64 x86_avx_state64_t; #define x86_AVX_STATE64_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_avx_state64_t)/sizeof(unsigned int))) + (sizeof(x86_avx_state64_t)/sizeof(unsigned int))) #if !defined(RC_HIDE_XNU_J137) typedef _STRUCT_X86_AVX512_STATE64 x86_avx512_state64_t; #define x86_AVX512_STATE64_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_avx512_state64_t)/sizeof(unsigned int))) + (sizeof(x86_avx512_state64_t)/sizeof(unsigned int))) #endif /* not RC_HIDE_XNU_J137 */ typedef _STRUCT_X86_EXCEPTION_STATE64 x86_exception_state64_t; -#define x86_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \ +#define x86_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \ ( sizeof (x86_exception_state64_t) / sizeof (int) )) #define X86_EXCEPTION_STATE64_COUNT x86_EXCEPTION_STATE64_COUNT typedef _STRUCT_X86_DEBUG_STATE64 x86_debug_state64_t; -#define x86_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \ +#define x86_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \ ( sizeof (x86_debug_state64_t) / sizeof (int) )) #define X86_DEBUG_STATE64_COUNT x86_DEBUG_STATE64_COUNT @@ -279,92 +284,92 @@ typedef _STRUCT_X86_DEBUG_STATE64 x86_debug_state64_t; * Combined thread, float and exception states */ struct x86_thread_state { - x86_state_hdr_t tsh; + x86_state_hdr_t tsh; union { - x86_thread_state32_t ts32; - x86_thread_state64_t ts64; + x86_thread_state32_t ts32; + x86_thread_state64_t ts64; } uts; }; struct x86_float_state { - x86_state_hdr_t fsh; + x86_state_hdr_t fsh; union { - x86_float_state32_t fs32; - x86_float_state64_t fs64; + x86_float_state32_t fs32; + x86_float_state64_t fs64; } ufs; }; struct x86_exception_state { - x86_state_hdr_t esh; + x86_state_hdr_t esh; union { - x86_exception_state32_t es32; - x86_exception_state64_t es64; + x86_exception_state32_t es32; + x86_exception_state64_t es64; } ues; }; struct x86_debug_state { - x86_state_hdr_t dsh; + x86_state_hdr_t dsh; union { - x86_debug_state32_t ds32; - x86_debug_state64_t ds64; + x86_debug_state32_t ds32; + x86_debug_state64_t ds64; } uds; }; struct x86_avx_state { - x86_state_hdr_t ash; + x86_state_hdr_t ash; union { - x86_avx_state32_t as32; - x86_avx_state64_t as64; + x86_avx_state32_t as32; + x86_avx_state64_t as64; } ufs; }; #if !defined(RC_HIDE_XNU_J137) struct x86_avx512_state { - x86_state_hdr_t ash; + x86_state_hdr_t ash; union { - x86_avx512_state32_t as32; - x86_avx512_state64_t as64; + x86_avx512_state32_t as32; + x86_avx512_state64_t as64; } ufs; }; #endif /* not RC_HIDE_XNU_J137 */ typedef struct x86_thread_state x86_thread_state_t; -#define x86_THREAD_STATE_COUNT ((mach_msg_type_number_t) \ - ( sizeof (x86_thread_state_t) / sizeof (int) )) +#define x86_THREAD_STATE_COUNT ((mach_msg_type_number_t) \ + ( sizeof (x86_thread_state_t) / sizeof (int) )) typedef struct x86_float_state x86_float_state_t; #define x86_FLOAT_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_float_state_t)/sizeof(unsigned int))) + (sizeof(x86_float_state_t)/sizeof(unsigned int))) typedef struct x86_exception_state x86_exception_state_t; #define x86_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_exception_state_t)/sizeof(unsigned int))) + (sizeof(x86_exception_state_t)/sizeof(unsigned int))) typedef struct x86_debug_state x86_debug_state_t; #define x86_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_debug_state_t)/sizeof(unsigned int))) + (sizeof(x86_debug_state_t)/sizeof(unsigned int))) typedef struct x86_avx_state x86_avx_state_t; #define x86_AVX_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_avx_state_t)/sizeof(unsigned int))) + (sizeof(x86_avx_state_t)/sizeof(unsigned int))) #if !defined(RC_HIDE_XNU_J137) typedef struct x86_avx512_state x86_avx512_state_t; #define x86_AVX512_STATE_COUNT ((mach_msg_type_number_t) \ - (sizeof(x86_avx512_state_t)/sizeof(unsigned int))) + (sizeof(x86_avx512_state_t)/sizeof(unsigned int))) #endif /* not RC_HIDE_XNU_J137 */ /* * Machine-independent way for servers and Mach's exception mechanism to * choose the most efficient state flavor for exception RPC's: */ -#define MACHINE_THREAD_STATE x86_THREAD_STATE -#define MACHINE_THREAD_STATE_COUNT x86_THREAD_STATE_COUNT +#define MACHINE_THREAD_STATE x86_THREAD_STATE +#define MACHINE_THREAD_STATE_COUNT x86_THREAD_STATE_COUNT #ifdef XNU_KERNEL_PRIVATE -#define x86_SAVED_STATE32 THREAD_STATE_NONE + 1 -#define x86_SAVED_STATE64 THREAD_STATE_NONE + 2 +#define x86_SAVED_STATE32 THREAD_STATE_NONE + 1 +#define x86_SAVED_STATE64 THREAD_STATE_NONE + 2 /* * The format in which thread state is saved by Mach on this machine. This @@ -372,30 +377,30 @@ typedef struct x86_avx512_state x86_avx512_state_t; * servers, because copying can be avoided: */ struct x86_saved_state32 { - uint32_t gs; - uint32_t fs; - uint32_t es; - uint32_t ds; - uint32_t edi; - uint32_t esi; - uint32_t ebp; - uint32_t cr2; /* kernel esp stored by pusha - we save cr2 here later */ - uint32_t ebx; - uint32_t edx; - uint32_t ecx; - uint32_t eax; - uint16_t trapno; - uint16_t cpu; - uint32_t err; - uint32_t eip; - uint32_t cs; - uint32_t efl; - uint32_t uesp; - uint32_t ss; + uint32_t gs; + uint32_t fs; + uint32_t es; + uint32_t ds; + uint32_t edi; + uint32_t esi; + uint32_t ebp; + uint32_t cr2; /* kernel esp stored by pusha - we save cr2 here later */ + uint32_t ebx; + uint32_t edx; + uint32_t ecx; + uint32_t eax; + uint16_t trapno; + uint16_t cpu; + uint32_t err; + uint32_t eip; + uint32_t cs; + uint32_t efl; + uint32_t uesp; + uint32_t ss; }; typedef struct x86_saved_state32 x86_saved_state32_t; -#define x86_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ +#define x86_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \ (sizeof (x86_saved_state32_t)/sizeof(unsigned int))) #pragma pack(4) @@ -405,20 +410,20 @@ typedef struct x86_saved_state32 x86_saved_state32_t; * on any exception/trap/interrupt. */ struct x86_64_intr_stack_frame { - uint16_t trapno; - uint16_t cpu; - uint32_t _pad; - uint64_t trapfn; - uint64_t err; - uint64_t rip; - uint64_t cs; - uint64_t rflags; - uint64_t rsp; - uint64_t ss; + uint16_t trapno; + uint16_t cpu; + uint32_t _pad; + uint64_t trapfn; + uint64_t err; + uint64_t rip; + uint64_t cs; + uint64_t rflags; + uint64_t rsp; + uint64_t ss; }; typedef struct x86_64_intr_stack_frame x86_64_intr_stack_frame_t; _Static_assert((sizeof(x86_64_intr_stack_frame_t) % 16) == 0, - "interrupt stack frame size must be a multiple of 16 bytes"); + "interrupt stack frame size must be a multiple of 16 bytes"); /* * thread state format for task running in 64bit long mode @@ -427,33 +432,34 @@ _Static_assert((sizeof(x86_64_intr_stack_frame_t) % 16) == 0, * is no need for an x86_saved_state64_from_kernel variant */ struct x86_saved_state64 { - uint64_t rdi; /* arg0 for system call */ - uint64_t rsi; - uint64_t rdx; - uint64_t r10; /* R10 := RCX prior to syscall trap */ - uint64_t r8; - uint64_t r9; /* arg5 for system call */ - - uint64_t cr2; - uint64_t r15; - uint64_t r14; - uint64_t r13; - uint64_t r12; - uint64_t r11; - uint64_t rbp; - uint64_t rbx; - uint64_t rcx; - uint64_t rax; - - uint32_t gs; - uint32_t fs; - - uint64_t _pad; - - struct x86_64_intr_stack_frame isf; + uint64_t rdi; /* arg0 for system call */ + uint64_t rsi; + uint64_t rdx; + uint64_t r10; /* R10 := RCX prior to syscall trap */ + uint64_t r8; + uint64_t r9; /* arg5 for system call */ + + uint64_t cr2; + uint64_t r15; + uint64_t r14; + uint64_t r13; + uint64_t r12; + uint64_t r11; + uint64_t rbp; + uint64_t rbx; + uint64_t rcx; + uint64_t rax; + + uint32_t gs; + uint32_t fs; + + uint32_t ds; + uint32_t es; + + struct x86_64_intr_stack_frame isf; }; typedef struct x86_saved_state64 x86_saved_state64_t; -#define x86_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \ +#define x86_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \ (sizeof (struct x86_saved_state64)/sizeof(unsigned int))) extern uint32_t get_eflags_exportmask(void); @@ -462,41 +468,41 @@ extern uint32_t get_eflags_exportmask(void); * Unified, tagged saved state: */ typedef struct { - uint32_t flavor; - uint32_t _pad_for_16byte_alignment[3]; + uint32_t flavor; + uint32_t _pad_for_16byte_alignment[3]; union { - x86_saved_state32_t ss_32; - x86_saved_state64_t ss_64; + x86_saved_state32_t ss_32; + x86_saved_state64_t ss_64; } uss; } x86_saved_state_t; -#define ss_32 uss.ss_32 -#define ss_64 uss.ss_64 +#define ss_32 uss.ss_32 +#define ss_64 uss.ss_64 #pragma pack() static inline boolean_t is_saved_state64(x86_saved_state_t *iss) { - return (iss->flavor == x86_SAVED_STATE64); + return iss->flavor == x86_SAVED_STATE64; } static inline boolean_t is_saved_state32(x86_saved_state_t *iss) { - return (iss->flavor == x86_SAVED_STATE32); + return iss->flavor == x86_SAVED_STATE32; } static inline x86_saved_state32_t * saved_state32(x86_saved_state_t *iss) { - return &iss->ss_32; + return &iss->ss_32; } static inline x86_saved_state64_t * saved_state64(x86_saved_state_t *iss) { - return &iss->ss_64; + return &iss->ss_64; } #endif /* XNU_KERNEL_PRIVATE */ -#endif /* _MACH_I386_THREAD_STATUS_H_ */ +#endif /* _MACH_I386_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/i386/vm_param.h b/osfmk/mach/i386/vm_param.h index 18a8fcab8..f140978d7 100644 --- a/osfmk/mach/i386/vm_param.h +++ b/osfmk/mach/i386/vm_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,39 +22,39 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* +/* * Copyright (c) 1994 The University of Utah and * the Computer Systems Laboratory at the University of Utah (CSL). * All rights reserved. @@ -87,17 +87,17 @@ * them. */ -#ifndef _MACH_I386_VM_PARAM_H_ +#ifndef _MACH_I386_VM_PARAM_H_ #define _MACH_I386_VM_PARAM_H_ -#define BYTE_SIZE 8 /* byte size in bits */ +#define BYTE_SIZE 8 /* byte size in bits */ -#define I386_PGBYTES 4096 /* bytes per 80386 page */ -#define I386_PGSHIFT 12 /* bitshift for pages */ +#define I386_PGBYTES 4096 /* bytes per 80386 page */ +#define I386_PGSHIFT 12 /* bitshift for pages */ -#define PAGE_SIZE I386_PGBYTES -#define PAGE_SHIFT I386_PGSHIFT -#define PAGE_MASK (PAGE_SIZE - 1) +#define PAGE_SIZE I386_PGBYTES +#define PAGE_SHIFT I386_PGSHIFT +#define PAGE_MASK (PAGE_SIZE - 1) #define PAGE_MAX_SHIFT PAGE_SHIFT #define PAGE_MAX_SIZE PAGE_SIZE @@ -107,19 +107,19 @@ #define PAGE_MIN_SIZE PAGE_SIZE #define PAGE_MIN_MASK PAGE_MASK -#define I386_LPGBYTES 2*1024*1024 /* bytes per large page */ -#define I386_LPGSHIFT 21 /* bitshift for large pages */ -#define I386_LPGMASK (I386_LPGBYTES-1) +#define I386_LPGBYTES 2*1024*1024 /* bytes per large page */ +#define I386_LPGSHIFT 21 /* bitshift for large pages */ +#define I386_LPGMASK (I386_LPGBYTES-1) /* * Convert bytes to pages and convert pages to bytes. * No rounding is used. */ -#define i386_btop(x) ((ppnum_t)((x) >> I386_PGSHIFT)) -#define machine_btop(x) i386_btop(x) -#define i386_ptob(x) (((pmap_paddr_t)(x)) << I386_PGSHIFT) -#define machine_ptob(x) i386_ptob(x) +#define i386_btop(x) ((ppnum_t)((x) >> I386_PGSHIFT)) +#define machine_btop(x) i386_btop(x) +#define i386_ptob(x) (((pmap_paddr_t)(x)) << I386_PGSHIFT) +#define machine_ptob(x) i386_ptob(x) /* * Round off or truncate to the nearest page. These will work @@ -127,29 +127,29 @@ * bytes. */ -#define i386_round_page(x) ((((pmap_paddr_t)(x)) + I386_PGBYTES - 1) & \ - ~(I386_PGBYTES-1)) -#define i386_trunc_page(x) (((pmap_paddr_t)(x)) & ~(I386_PGBYTES-1)) +#define i386_round_page(x) ((((pmap_paddr_t)(x)) + I386_PGBYTES - 1) & \ + ~(I386_PGBYTES-1)) +#define i386_trunc_page(x) (((pmap_paddr_t)(x)) & ~(I386_PGBYTES-1)) -#define VM_MIN_ADDRESS64 ((user_addr_t) 0x0000000000000000ULL) +#define VM_MIN_ADDRESS64 ((user_addr_t) 0x0000000000000000ULL) /* * default top of user stack... it grows down from here */ -#define VM_USRSTACK64 ((user_addr_t) 0x00007FFEEFC00000ULL) +#define VM_USRSTACK64 ((user_addr_t) 0x00007FFEEFC00000ULL) /* * XXX TODO: Obsolete? */ -#define VM_DYLD64 ((user_addr_t) 0x00007FFF5FC00000ULL) -#define VM_LIB64_SHR_DATA ((user_addr_t) 0x00007FFF60000000ULL) -#define VM_LIB64_SHR_TEXT ((user_addr_t) 0x00007FFF80000000ULL) +#define VM_DYLD64 ((user_addr_t) 0x00007FFF5FC00000ULL) +#define VM_LIB64_SHR_DATA ((user_addr_t) 0x00007FFF60000000ULL) +#define VM_LIB64_SHR_TEXT ((user_addr_t) 0x00007FFF80000000ULL) /* * the end of the usable user address space , for now about 47 bits. * the 64 bit commpage is past the end of this */ -#define VM_MAX_PAGE_ADDRESS ((user_addr_t) 0x00007FFFFFE00000ULL) +#define VM_MAX_PAGE_ADDRESS ((user_addr_t) 0x00007FFFFFE00000ULL) /* * canonical end of user address space for limits checking */ @@ -157,32 +157,31 @@ /* system-wide values */ -#define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0) -#define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) VM_MAX_PAGE_ADDRESS) +#define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0) +#define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) VM_MAX_PAGE_ADDRESS) /* process-relative values (all 32-bit legacy only for now) */ -#define VM_MIN_ADDRESS ((vm_offset_t) 0) -#define VM_USRSTACK32 ((vm_offset_t) 0xC0000000) /* ASLR slides stack down by up to 1 MB */ -#define VM_MAX_ADDRESS ((vm_offset_t) 0xFFE00000) +#define VM_MIN_ADDRESS ((vm_offset_t) 0) +#define VM_USRSTACK32 ((vm_offset_t) 0xC0000000) /* ASLR slides stack down by up to 1 MB */ +#define VM_MAX_ADDRESS ((vm_offset_t) 0xFFE00000) -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #define TEST_PAGE_SIZE_16K FALSE #define TEST_PAGE_SIZE_4K TRUE /* Kernel-wide values */ - -#define KB (1024ULL) -#define MB (1024*KB) -#define GB (1024*MB) +#define KB (1024ULL) +#define MB (1024*KB) +#define GB (1024*MB) /* * Maximum physical memory supported. */ -#define K32_MAXMEM (32*GB) -#define K64_MAXMEM (252*GB) -#define KERNEL_MAXMEM K64_MAXMEM +#define K32_MAXMEM (32*GB) +#define K64_MAXMEM (1536*GB) +#define KERNEL_MAXMEM K64_MAXMEM /* * XXX @@ -195,22 +194,22 @@ #define KERNEL_IMAGE_TO_PHYS(x) (x) #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 39 -#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) -#define VM_MIN_KERNEL_PAGE ((ppnum_t)0) -#define VM_MIN_KERNEL_AND_KEXT_ADDRESS (VM_MIN_KERNEL_ADDRESS - 0x80000000ULL) -#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFFFFFFFFEFFFUL) -#define VM_MAX_KERNEL_ADDRESS_EFI32 ((vm_offset_t) 0xFFFFFF80FFFFEFFFUL) +#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) +#define VM_MIN_KERNEL_PAGE ((ppnum_t)0) +#define VM_MIN_KERNEL_AND_KEXT_ADDRESS (VM_MIN_KERNEL_ADDRESS - 0x80000000ULL) +#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0xFFFFFFFFFFFFEFFFUL) +#define VM_MAX_KERNEL_ADDRESS_EFI32 ((vm_offset_t) 0xFFFFFF80FFFFEFFFUL) #define KEXT_ALLOC_MAX_OFFSET (2 * 1024 * 1024 * 1024UL) #define KEXT_ALLOC_BASE(x) ((x) - KEXT_ALLOC_MAX_OFFSET) #define KEXT_ALLOC_SIZE(x) (KEXT_ALLOC_MAX_OFFSET - (x)) #define VM_KERNEL_STRIP_PTR(_v) (_v) -#define VM_KERNEL_ADDRESS(va) ((((vm_address_t)(va))>=VM_MIN_KERNEL_AND_KEXT_ADDRESS) && \ - (((vm_address_t)(va))<=VM_MAX_KERNEL_ADDRESS)) +#define VM_KERNEL_ADDRESS(va) ((((vm_address_t)(va))>=VM_MIN_KERNEL_AND_KEXT_ADDRESS) && \ + (((vm_address_t)(va))<=VM_MAX_KERNEL_ADDRESS)) -#define VM_MAP_MIN_ADDRESS MACH_VM_MIN_ADDRESS -#define VM_MAP_MAX_ADDRESS MACH_VM_MAX_ADDRESS +#define VM_MAP_MIN_ADDRESS MACH_VM_MIN_ADDRESS +#define VM_MAP_MAX_ADDRESS MACH_VM_MAX_ADDRESS /* FIXME - always leave like this? */ #if KASAN @@ -226,12 +225,12 @@ # define KERNEL_STACK_SIZE (I386_PGBYTES*4) #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE /* For implementing legacy 32-bit interfaces */ -#define VM32_SUPPORT 1 -#define VM32_MIN_ADDRESS ((vm32_offset_t) 0) -#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) +#define VM32_SUPPORT 1 +#define VM32_MIN_ADDRESS ((vm32_offset_t) 0) +#define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_PAGE_ADDRESS & 0xFFFFFFFF)) /* * kalloc() parameters: @@ -247,13 +246,13 @@ */ -#define KALLOC_MINSIZE 16 /* minimum allocation size */ -#define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */ +#define KALLOC_MINSIZE 16 /* minimum allocation size */ +#define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */ -#define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0x00000000) +#define LINEAR_KERNEL_ADDRESS ((vm_offset_t) 0x00000000) -#define VM_MIN_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) -#define VM_MAX_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF801FFFFFFFUL) +#define VM_MIN_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF8000000000UL) +#define VM_MAX_KERNEL_LOADED_ADDRESS ((vm_offset_t) 0xFFFFFF801FFFFFFFUL) #define NCOPY_WINDOWS 0 @@ -263,30 +262,30 @@ * Conversion between 80386 pages and VM pages */ -#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p)))) -#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p)))) -#define vm_to_i386(p) (i386_btop(ptoa(p))) +#define trunc_i386_to_vm(p) (atop(trunc_page(i386_ptob(p)))) +#define round_i386_to_vm(p) (atop(round_page(i386_ptob(p)))) +#define vm_to_i386(p) (i386_btop(ptoa(p))) -#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ - MACRO_BEGIN \ - pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), (cache_attr)); \ - (object)->set_cache_attr = TRUE; \ - (void) batch_pmap_op; \ - MACRO_END +#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ + MACRO_BEGIN \ + pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), (cache_attr)); \ + (object)->set_cache_attr = TRUE; \ + (void) batch_pmap_op; \ + MACRO_END -#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op)\ - MACRO_BEGIN \ - (void) user_page_list; \ - (void) num_pages; \ - (void) batch_pmap_op; \ +#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, cache_attr, num_pages, batch_pmap_op) \ + MACRO_BEGIN \ + (void) user_page_list; \ + (void) num_pages; \ + (void) batch_pmap_op; \ MACRO_END -#define IS_USERADDR64_CANONICAL(addr) \ +#define IS_USERADDR64_CANONICAL(addr) \ ((addr) < (VM_MAX_USER_PAGE_ADDRESS)) -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#endif /* _MACH_I386_VM_PARAM_H_ */ +#endif /* _MACH_I386_VM_PARAM_H_ */ diff --git a/osfmk/mach/i386/vm_types.h b/osfmk/mach/i386/vm_types.h index 7e590842a..e4442e30d 100644 --- a/osfmk/mach/i386/vm_types.h +++ b/osfmk/mach/i386/vm_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,10 +64,10 @@ * Header file for VM data types. I386 version. */ -#ifndef _MACH_I386_VM_TYPES_H_ +#ifndef _MACH_I386_VM_TYPES_H_ #define _MACH_I386_VM_TYPES_H_ -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include @@ -90,18 +90,18 @@ * * New use of these types is discouraged. */ -typedef __darwin_natural_t natural_t; -typedef int integer_t; +typedef __darwin_natural_t natural_t; +typedef int integer_t; /* * A vm_offset_t is a type-neutral pointer, * e.g. an offset into a virtual memory space. */ #ifdef __LP64__ -typedef uintptr_t vm_offset_t; -#else /* __LP64__ */ -typedef natural_t vm_offset_t; -#endif /* __LP64__ */ +typedef uintptr_t vm_offset_t; +#else /* __LP64__ */ +typedef natural_t vm_offset_t; +#endif /* __LP64__ */ /* * A vm_size_t is the proper type for e.g. @@ -109,10 +109,10 @@ typedef natural_t vm_offset_t; * vm_offset_t entities. */ #ifdef __LP64__ -typedef uintptr_t vm_size_t; -#else /* __LP64__ */ -typedef natural_t vm_size_t; -#endif /* __LP64__ */ +typedef uintptr_t vm_size_t; +#else /* __LP64__ */ +typedef natural_t vm_size_t; +#endif /* __LP64__ */ /* * This new type is independent of a particular vm map's @@ -121,17 +121,17 @@ typedef natural_t vm_size_t; * where the size of the map is not known - or we don't * want to have to distinguish. */ -typedef uint64_t mach_vm_address_t; -typedef uint64_t mach_vm_offset_t; -typedef uint64_t mach_vm_size_t; +typedef uint64_t mach_vm_address_t; +typedef uint64_t mach_vm_offset_t; +typedef uint64_t mach_vm_size_t; -typedef uint64_t vm_map_offset_t; -typedef uint64_t vm_map_address_t; -typedef uint64_t vm_map_size_t; +typedef uint64_t vm_map_offset_t; +typedef uint64_t vm_map_address_t; +typedef uint64_t vm_map_size_t; -typedef mach_vm_address_t mach_port_context_t; +typedef mach_vm_address_t mach_port_context_t; -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #ifdef VM32_SUPPORT @@ -139,19 +139,19 @@ typedef mach_vm_address_t mach_port_context_t; * These are types used internal to Mach to implement the * legacy 32-bit VM APIs published by the kernel. */ -typedef uint32_t vm32_address_t; -typedef uint32_t vm32_offset_t; -typedef uint32_t vm32_size_t; +typedef uint32_t vm32_address_t; +typedef uint32_t vm32_offset_t; +typedef uint32_t vm32_size_t; -#endif /* VM32_SUPPORT */ +#endif /* VM32_SUPPORT */ -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ /* * If composing messages by hand (please do not) */ -#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32 +#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_32 -#endif /* _MACH_I386_VM_TYPES_H_ */ +#endif /* _MACH_I386_VM_TYPES_H_ */ diff --git a/osfmk/mach/kern_return.h b/osfmk/mach/kern_return.h index f0fa37d27..cbc29d937 100644 --- a/osfmk/mach/kern_return.h +++ b/osfmk/mach/kern_return.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,267 +64,267 @@ * */ -#ifndef _MACH_KERN_RETURN_H_ +#ifndef _MACH_KERN_RETURN_H_ #define _MACH_KERN_RETURN_H_ #include -#define KERN_SUCCESS 0 - -#define KERN_INVALID_ADDRESS 1 - /* Specified address is not currently valid. - */ - -#define KERN_PROTECTION_FAILURE 2 - /* Specified memory is valid, but does not permit the - * required forms of access. - */ - -#define KERN_NO_SPACE 3 - /* The address range specified is already in use, or - * no address range of the size specified could be - * found. - */ - -#define KERN_INVALID_ARGUMENT 4 - /* The function requested was not applicable to this - * type of argument, or an argument is invalid - */ - -#define KERN_FAILURE 5 - /* The function could not be performed. A catch-all. - */ - -#define KERN_RESOURCE_SHORTAGE 6 - /* A system resource could not be allocated to fulfill - * this request. This failure may not be permanent. - */ - -#define KERN_NOT_RECEIVER 7 - /* The task in question does not hold receive rights - * for the port argument. - */ - -#define KERN_NO_ACCESS 8 - /* Bogus access restriction. - */ - -#define KERN_MEMORY_FAILURE 9 - /* During a page fault, the target address refers to a - * memory object that has been destroyed. This - * failure is permanent. - */ - -#define KERN_MEMORY_ERROR 10 - /* During a page fault, the memory object indicated - * that the data could not be returned. This failure - * may be temporary; future attempts to access this - * same data may succeed, as defined by the memory - * object. - */ - -#define KERN_ALREADY_IN_SET 11 - /* The receive right is already a member of the portset. - */ - -#define KERN_NOT_IN_SET 12 - /* The receive right is not a member of a port set. - */ - -#define KERN_NAME_EXISTS 13 - /* The name already denotes a right in the task. - */ - -#define KERN_ABORTED 14 - /* The operation was aborted. Ipc code will - * catch this and reflect it as a message error. - */ - -#define KERN_INVALID_NAME 15 - /* The name doesn't denote a right in the task. - */ - -#define KERN_INVALID_TASK 16 - /* Target task isn't an active task. - */ - -#define KERN_INVALID_RIGHT 17 - /* The name denotes a right, but not an appropriate right. - */ - -#define KERN_INVALID_VALUE 18 - /* A blatant range error. - */ - -#define KERN_UREFS_OVERFLOW 19 - /* Operation would overflow limit on user-references. - */ - -#define KERN_INVALID_CAPABILITY 20 - /* The supplied (port) capability is improper. - */ - -#define KERN_RIGHT_EXISTS 21 - /* The task already has send or receive rights - * for the port under another name. - */ - -#define KERN_INVALID_HOST 22 - /* Target host isn't actually a host. - */ - -#define KERN_MEMORY_PRESENT 23 - /* An attempt was made to supply "precious" data - * for memory that is already present in a - * memory object. - */ - -#define KERN_MEMORY_DATA_MOVED 24 - /* A page was requested of a memory manager via - * memory_object_data_request for an object using - * a MEMORY_OBJECT_COPY_CALL strategy, with the - * VM_PROT_WANTS_COPY flag being used to specify - * that the page desired is for a copy of the - * object, and the memory manager has detected - * the page was pushed into a copy of the object - * while the kernel was walking the shadow chain - * from the copy to the object. This error code - * is delivered via memory_object_data_error - * and is handled by the kernel (it forces the - * kernel to restart the fault). It will not be - * seen by users. - */ - -#define KERN_MEMORY_RESTART_COPY 25 - /* A strategic copy was attempted of an object - * upon which a quicker copy is now possible. - * The caller should retry the copy using - * vm_object_copy_quickly. This error code - * is seen only by the kernel. - */ - -#define KERN_INVALID_PROCESSOR_SET 26 - /* An argument applied to assert processor set privilege - * was not a processor set control port. - */ - -#define KERN_POLICY_LIMIT 27 - /* The specified scheduling attributes exceed the thread's - * limits. - */ - -#define KERN_INVALID_POLICY 28 - /* The specified scheduling policy is not currently - * enabled for the processor set. - */ - -#define KERN_INVALID_OBJECT 29 - /* The external memory manager failed to initialize the - * memory object. - */ - -#define KERN_ALREADY_WAITING 30 - /* A thread is attempting to wait for an event for which - * there is already a waiting thread. - */ - -#define KERN_DEFAULT_SET 31 - /* An attempt was made to destroy the default processor - * set. - */ - -#define KERN_EXCEPTION_PROTECTED 32 - /* An attempt was made to fetch an exception port that is - * protected, or to abort a thread while processing a - * protected exception. - */ - -#define KERN_INVALID_LEDGER 33 - /* A ledger was required but not supplied. - */ - -#define KERN_INVALID_MEMORY_CONTROL 34 - /* The port was not a memory cache control port. - */ - -#define KERN_INVALID_SECURITY 35 - /* An argument supplied to assert security privilege - * was not a host security port. - */ - -#define KERN_NOT_DEPRESSED 36 - /* thread_depress_abort was called on a thread which - * was not currently depressed. - */ - -#define KERN_TERMINATED 37 - /* Object has been terminated and is no longer available - */ - -#define KERN_LOCK_SET_DESTROYED 38 - /* Lock set has been destroyed and is no longer available. - */ - -#define KERN_LOCK_UNSTABLE 39 - /* The thread holding the lock terminated before releasing - * the lock - */ - -#define KERN_LOCK_OWNED 40 - /* The lock is already owned by another thread - */ - -#define KERN_LOCK_OWNED_SELF 41 - /* The lock is already owned by the calling thread - */ - -#define KERN_SEMAPHORE_DESTROYED 42 - /* Semaphore has been destroyed and is no longer available. - */ - -#define KERN_RPC_SERVER_TERMINATED 43 - /* Return from RPC indicating the target server was - * terminated before it successfully replied - */ - -#define KERN_RPC_TERMINATE_ORPHAN 44 - /* Terminate an orphaned activation. - */ - -#define KERN_RPC_CONTINUE_ORPHAN 45 - /* Allow an orphaned activation to continue executing. - */ - -#define KERN_NOT_SUPPORTED 46 - /* Empty thread activation (No thread linked to it) - */ - -#define KERN_NODE_DOWN 47 - /* Remote node down or inaccessible. - */ - -#define KERN_NOT_WAITING 48 - /* A signalled thread was not actually waiting. */ - -#define KERN_OPERATION_TIMED_OUT 49 - /* Some thread-oriented operation (semaphore_wait) timed out - */ - -#define KERN_CODESIGN_ERROR 50 - /* During a page fault, indicates that the page was rejected - * as a result of a signature check. - */ - -#define KERN_POLICY_STATIC 51 - /* The requested property cannot be changed at this time. - */ - -#define KERN_INSUFFICIENT_BUFFER_SIZE 52 - /* The provided buffer is of insufficient size for the requested data. - */ - -#define KERN_RETURN_MAX 0x100 - /* Maximum return value allowable - */ - -#endif /* _MACH_KERN_RETURN_H_ */ +#define KERN_SUCCESS 0 + +#define KERN_INVALID_ADDRESS 1 +/* Specified address is not currently valid. + */ + +#define KERN_PROTECTION_FAILURE 2 +/* Specified memory is valid, but does not permit the + * required forms of access. + */ + +#define KERN_NO_SPACE 3 +/* The address range specified is already in use, or + * no address range of the size specified could be + * found. + */ + +#define KERN_INVALID_ARGUMENT 4 +/* The function requested was not applicable to this + * type of argument, or an argument is invalid + */ + +#define KERN_FAILURE 5 +/* The function could not be performed. A catch-all. + */ + +#define KERN_RESOURCE_SHORTAGE 6 +/* A system resource could not be allocated to fulfill + * this request. This failure may not be permanent. + */ + +#define KERN_NOT_RECEIVER 7 +/* The task in question does not hold receive rights + * for the port argument. + */ + +#define KERN_NO_ACCESS 8 +/* Bogus access restriction. + */ + +#define KERN_MEMORY_FAILURE 9 +/* During a page fault, the target address refers to a + * memory object that has been destroyed. This + * failure is permanent. + */ + +#define KERN_MEMORY_ERROR 10 +/* During a page fault, the memory object indicated + * that the data could not be returned. This failure + * may be temporary; future attempts to access this + * same data may succeed, as defined by the memory + * object. + */ + +#define KERN_ALREADY_IN_SET 11 +/* The receive right is already a member of the portset. + */ + +#define KERN_NOT_IN_SET 12 +/* The receive right is not a member of a port set. + */ + +#define KERN_NAME_EXISTS 13 +/* The name already denotes a right in the task. + */ + +#define KERN_ABORTED 14 +/* The operation was aborted. Ipc code will + * catch this and reflect it as a message error. + */ + +#define KERN_INVALID_NAME 15 +/* The name doesn't denote a right in the task. + */ + +#define KERN_INVALID_TASK 16 +/* Target task isn't an active task. + */ + +#define KERN_INVALID_RIGHT 17 +/* The name denotes a right, but not an appropriate right. + */ + +#define KERN_INVALID_VALUE 18 +/* A blatant range error. + */ + +#define KERN_UREFS_OVERFLOW 19 +/* Operation would overflow limit on user-references. + */ + +#define KERN_INVALID_CAPABILITY 20 +/* The supplied (port) capability is improper. + */ + +#define KERN_RIGHT_EXISTS 21 +/* The task already has send or receive rights + * for the port under another name. + */ + +#define KERN_INVALID_HOST 22 +/* Target host isn't actually a host. + */ + +#define KERN_MEMORY_PRESENT 23 +/* An attempt was made to supply "precious" data + * for memory that is already present in a + * memory object. + */ + +#define KERN_MEMORY_DATA_MOVED 24 +/* A page was requested of a memory manager via + * memory_object_data_request for an object using + * a MEMORY_OBJECT_COPY_CALL strategy, with the + * VM_PROT_WANTS_COPY flag being used to specify + * that the page desired is for a copy of the + * object, and the memory manager has detected + * the page was pushed into a copy of the object + * while the kernel was walking the shadow chain + * from the copy to the object. This error code + * is delivered via memory_object_data_error + * and is handled by the kernel (it forces the + * kernel to restart the fault). It will not be + * seen by users. + */ + +#define KERN_MEMORY_RESTART_COPY 25 +/* A strategic copy was attempted of an object + * upon which a quicker copy is now possible. + * The caller should retry the copy using + * vm_object_copy_quickly. This error code + * is seen only by the kernel. + */ + +#define KERN_INVALID_PROCESSOR_SET 26 +/* An argument applied to assert processor set privilege + * was not a processor set control port. + */ + +#define KERN_POLICY_LIMIT 27 +/* The specified scheduling attributes exceed the thread's + * limits. + */ + +#define KERN_INVALID_POLICY 28 +/* The specified scheduling policy is not currently + * enabled for the processor set. + */ + +#define KERN_INVALID_OBJECT 29 +/* The external memory manager failed to initialize the + * memory object. + */ + +#define KERN_ALREADY_WAITING 30 +/* A thread is attempting to wait for an event for which + * there is already a waiting thread. + */ + +#define KERN_DEFAULT_SET 31 +/* An attempt was made to destroy the default processor + * set. + */ + +#define KERN_EXCEPTION_PROTECTED 32 +/* An attempt was made to fetch an exception port that is + * protected, or to abort a thread while processing a + * protected exception. + */ + +#define KERN_INVALID_LEDGER 33 +/* A ledger was required but not supplied. + */ + +#define KERN_INVALID_MEMORY_CONTROL 34 +/* The port was not a memory cache control port. + */ + +#define KERN_INVALID_SECURITY 35 +/* An argument supplied to assert security privilege + * was not a host security port. + */ + +#define KERN_NOT_DEPRESSED 36 +/* thread_depress_abort was called on a thread which + * was not currently depressed. + */ + +#define KERN_TERMINATED 37 +/* Object has been terminated and is no longer available + */ + +#define KERN_LOCK_SET_DESTROYED 38 +/* Lock set has been destroyed and is no longer available. + */ + +#define KERN_LOCK_UNSTABLE 39 +/* The thread holding the lock terminated before releasing + * the lock + */ + +#define KERN_LOCK_OWNED 40 +/* The lock is already owned by another thread + */ + +#define KERN_LOCK_OWNED_SELF 41 +/* The lock is already owned by the calling thread + */ + +#define KERN_SEMAPHORE_DESTROYED 42 +/* Semaphore has been destroyed and is no longer available. + */ + +#define KERN_RPC_SERVER_TERMINATED 43 +/* Return from RPC indicating the target server was + * terminated before it successfully replied + */ + +#define KERN_RPC_TERMINATE_ORPHAN 44 +/* Terminate an orphaned activation. + */ + +#define KERN_RPC_CONTINUE_ORPHAN 45 +/* Allow an orphaned activation to continue executing. + */ + +#define KERN_NOT_SUPPORTED 46 +/* Empty thread activation (No thread linked to it) + */ + +#define KERN_NODE_DOWN 47 +/* Remote node down or inaccessible. + */ + +#define KERN_NOT_WAITING 48 +/* A signalled thread was not actually waiting. */ + +#define KERN_OPERATION_TIMED_OUT 49 +/* Some thread-oriented operation (semaphore_wait) timed out + */ + +#define KERN_CODESIGN_ERROR 50 +/* During a page fault, indicates that the page was rejected + * as a result of a signature check. + */ + +#define KERN_POLICY_STATIC 51 +/* The requested property cannot be changed at this time. + */ + +#define KERN_INSUFFICIENT_BUFFER_SIZE 52 +/* The provided buffer is of insufficient size for the requested data. + */ + +#define KERN_RETURN_MAX 0x100 +/* Maximum return value allowable + */ + +#endif /* _MACH_KERN_RETURN_H_ */ diff --git a/osfmk/mach/kmod.h b/osfmk/mach/kmod.h index 412246592..361cfdfd2 100644 --- a/osfmk/mach/kmod.h +++ b/osfmk/mach/kmod.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -71,8 +71,8 @@ typedef kern_return_t kmod_stop_func_t(struct kmod_info * ki, void * data); /* Run-time struct only; never saved to a file */ typedef struct kmod_reference { - struct kmod_reference * next; - struct kmod_info * info; + struct kmod_reference * next; + struct kmod_info * info; } kmod_reference_t; /*********************************************************************** @@ -85,52 +85,52 @@ typedef struct kmod_reference { * the kernel, please use the compatibility definitions below. */ typedef struct kmod_info { - struct kmod_info * next; - int32_t info_version; // version of this structure - uint32_t id; - char name[KMOD_MAX_NAME]; - char version[KMOD_MAX_NAME]; - int32_t reference_count; // # linkage refs to this - kmod_reference_t * reference_list; // who this refs (links on) - vm_address_t address; // starting address - vm_size_t size; // total size - vm_size_t hdr_size; // unwired hdr size - kmod_start_func_t * start; - kmod_stop_func_t * stop; + struct kmod_info * next; + int32_t info_version; // version of this structure + uint32_t id; + char name[KMOD_MAX_NAME]; + char version[KMOD_MAX_NAME]; + int32_t reference_count; // # linkage refs to this + kmod_reference_t * reference_list; // who this refs (links on) + vm_address_t address; // starting address + vm_size_t size; // total size + vm_size_t hdr_size; // unwired hdr size + kmod_start_func_t * start; + kmod_stop_func_t * stop; } kmod_info_t; /* A compatibility definition of kmod_info_t for 32-bit kexts. */ typedef struct kmod_info_32_v1 { - uint32_t next_addr; - int32_t info_version; - uint32_t id; - uint8_t name[KMOD_MAX_NAME]; - uint8_t version[KMOD_MAX_NAME]; - int32_t reference_count; - uint32_t reference_list_addr; - uint32_t address; - uint32_t size; - uint32_t hdr_size; - uint32_t start_addr; - uint32_t stop_addr; + uint32_t next_addr; + int32_t info_version; + uint32_t id; + uint8_t name[KMOD_MAX_NAME]; + uint8_t version[KMOD_MAX_NAME]; + int32_t reference_count; + uint32_t reference_list_addr; + uint32_t address; + uint32_t size; + uint32_t hdr_size; + uint32_t start_addr; + uint32_t stop_addr; } kmod_info_32_v1_t; /* A compatibility definition of kmod_info_t for 64-bit kexts. */ typedef struct kmod_info_64_v1 { - uint64_t next_addr; - int32_t info_version; - uint32_t id; - uint8_t name[KMOD_MAX_NAME]; - uint8_t version[KMOD_MAX_NAME]; - int32_t reference_count; - uint64_t reference_list_addr; - uint64_t address; - uint64_t size; - uint64_t hdr_size; - uint64_t start_addr; - uint64_t stop_addr; + uint64_t next_addr; + int32_t info_version; + uint32_t id; + uint8_t name[KMOD_MAX_NAME]; + uint8_t version[KMOD_MAX_NAME]; + int32_t reference_count; + uint64_t reference_list_addr; + uint64_t address; + uint64_t size; + uint64_t hdr_size; + uint64_t start_addr; + uint64_t stop_addr; } kmod_info_64_v1_t; #pragma pack() @@ -148,14 +148,14 @@ typedef struct kmod_info_64_v1 { static kmod_start_func_t name ## _module_start; \ static kmod_stop_func_t name ## _module_stop; \ kmod_info_t KMOD_INFO_NAME = { 0, KMOD_INFO_VERSION, -1U, \ - { #name }, { version }, -1, 0, 0, 0, 0, \ - name ## _module_start, \ - name ## _module_stop }; + { #name }, { version }, -1, 0, 0, 0, 0, \ + name ## _module_start, \ + name ## _module_stop }; #define KMOD_EXPLICIT_DECL(name, version, start, stop) \ kmod_info_t KMOD_INFO_NAME = { 0, KMOD_INFO_VERSION, -1U, \ - { #name }, { version }, -1, 0, 0, 0, 0, \ - start, stop }; + { #name }, { version }, -1, 0, 0, 0, 0, \ + start, stop }; #if PRAGMA_MARK #pragma mark Kernel private declarations @@ -174,8 +174,8 @@ extern void kmod_panic_dump(vm_offset_t * addr, unsigned int dump_cnt); * probes immediately based on kernel symbols. This per kext * flag overrides system mode in dtrace_modload(). */ -#define KMOD_DTRACE_FORCE_INIT 0x01 -#define KMOD_DTRACE_STATIC_KEXT 0x02 +#define KMOD_DTRACE_FORCE_INIT 0x01 +#define KMOD_DTRACE_STATIC_KEXT 0x02 #endif /* CONFIG_DTRACE */ #endif /* KERNEL_PRIVATE */ diff --git a/osfmk/mach/mach.h b/osfmk/mach/mach.h index df61ab89a..de28553ae 100644 --- a/osfmk/mach/mach.h +++ b/osfmk/mach/mach.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,26 +22,26 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1989 Carnegie-Mellon University * All rights reserved. The CMU software License Agreement specifies * the terms and conditions for use and redistribution. */ -/* +/* * Includes all the types that a normal user * of Mach programs should need */ -#ifndef _MACH_H_ -#define _MACH_H_ +#ifndef _MACH_H_ +#define _MACH_H_ #include #include #include #include -#endif /* _MACH_H_ */ +#endif /* _MACH_H_ */ diff --git a/osfmk/mach/mach_interface.h b/osfmk/mach/mach_interface.h index 6f8cf8e03..660eba04c 100644 --- a/osfmk/mach/mach_interface.h +++ b/osfmk/mach/mach_interface.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_INTERFACE_H_ +#ifndef _MACH_INTERFACE_H_ #define _MACH_INTERFACE_H_ #include diff --git a/osfmk/mach/mach_param.h b/osfmk/mach/mach_param.h index e4ead673e..18e2cb68b 100644 --- a/osfmk/mach/mach_param.h +++ b/osfmk/mach/mach_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,11 +63,11 @@ * Mach system sizing parameters */ -#ifndef _MACH_MACH_PARAM_H_ +#ifndef _MACH_MACH_PARAM_H_ #define _MACH_MACH_PARAM_H_ /* Number of "registered" ports */ -#define TASK_PORT_REGISTER_MAX 3 +#define TASK_PORT_REGISTER_MAX 3 -#endif /* _MACH_MACH_PARAM_H_ */ +#endif /* _MACH_MACH_PARAM_H_ */ diff --git a/osfmk/mach/mach_syscalls.h b/osfmk/mach/mach_syscalls.h index 0e04e4f31..29399ce82 100644 --- a/osfmk/mach/mach_syscalls.h +++ b/osfmk/mach/mach_syscalls.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,8 +30,8 @@ */ #ifndef _MACH_MACH_SYSCALLS_H_ -#define _MACH_MACH_SYSCALLS_H_ +#define _MACH_MACH_SYSCALLS_H_ #include -#endif /* _MACH_MACH_SYSCALLS_H_ */ +#endif /* _MACH_MACH_SYSCALLS_H_ */ diff --git a/osfmk/mach/mach_time.h b/osfmk/mach/mach_time.h index e24219e75..b41206e1f 100644 --- a/osfmk/mach/mach_time.h +++ b/osfmk/mach/mach_time.h @@ -2,7 +2,7 @@ * Copyright (c) 2001-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,64 +22,64 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_MACH_TIME_H_ -#define _MACH_MACH_TIME_H_ +#ifndef _MACH_MACH_TIME_H_ +#define _MACH_MACH_TIME_H_ #include #include #include struct mach_timebase_info { - uint32_t numer; - uint32_t denom; + uint32_t numer; + uint32_t denom; }; -typedef struct mach_timebase_info *mach_timebase_info_t; -typedef struct mach_timebase_info mach_timebase_info_data_t; +typedef struct mach_timebase_info *mach_timebase_info_t; +typedef struct mach_timebase_info mach_timebase_info_data_t; __BEGIN_DECLS -#ifndef KERNEL +#ifndef KERNEL -kern_return_t mach_timebase_info( - mach_timebase_info_t info); +kern_return_t mach_timebase_info( + mach_timebase_info_t info); -kern_return_t mach_wait_until( - uint64_t deadline); +kern_return_t mach_wait_until( + uint64_t deadline); -#endif /* KERNEL */ +#endif /* KERNEL */ -uint64_t mach_absolute_time(void); +uint64_t mach_absolute_time(void); __OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_8_0) -uint64_t mach_approximate_time(void); +uint64_t mach_approximate_time(void); /* * like mach_absolute_time, but advances during sleep */ __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) -uint64_t mach_continuous_time(void); +uint64_t mach_continuous_time(void); /* * like mach_approximate_time, but advances during sleep */ __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) -uint64_t mach_continuous_approximate_time(void); +uint64_t mach_continuous_approximate_time(void); #if !defined(KERNEL) && defined(PRIVATE) // Forward definition because this is a BSD value struct timespec; __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) -kern_return_t mach_get_times(uint64_t* absolute_time, - uint64_t* continuous_time, - struct timespec *tp); +kern_return_t mach_get_times(uint64_t* absolute_time, + uint64_t* continuous_time, + struct timespec *tp); __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) -uint64_t mach_boottime_usec(void); +uint64_t mach_boottime_usec(void); #endif /* KERNEL */ diff --git a/osfmk/mach/mach_traps.h b/osfmk/mach/mach_traps.h index 9e712b9b1..2639dbfb1 100644 --- a/osfmk/mach/mach_traps.h +++ b/osfmk/mach/mach_traps.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Kernel RPC functions are defined in . */ -#ifndef _MACH_MACH_TRAPS_H_ +#ifndef _MACH_MACH_TRAPS_H_ #define _MACH_MACH_TRAPS_H_ #include @@ -81,9 +81,9 @@ __BEGIN_DECLS -#ifndef KERNEL +#ifndef KERNEL -#ifdef PRIVATE +#ifdef PRIVATE extern mach_port_name_t mach_reply_port(void); @@ -94,253 +94,253 @@ extern mach_port_name_t thread_self_trap(void); extern mach_port_name_t host_self_trap(void); extern mach_msg_return_t mach_msg_trap( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - mach_msg_timeout_t timeout, - mach_port_name_t notify); + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify); extern mach_msg_return_t mach_msg_overwrite_trap( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - mach_msg_timeout_t timeout, - mach_msg_priority_t override, - mach_msg_header_t *rcv_msg, - mach_msg_size_t rcv_limit); + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_msg_priority_t override, + mach_msg_header_t *rcv_msg, + mach_msg_size_t rcv_limit); extern kern_return_t semaphore_signal_trap( - mach_port_name_t signal_name); - + mach_port_name_t signal_name); + extern kern_return_t semaphore_signal_all_trap( - mach_port_name_t signal_name); + mach_port_name_t signal_name); extern kern_return_t semaphore_signal_thread_trap( - mach_port_name_t signal_name, - mach_port_name_t thread_name); + mach_port_name_t signal_name, + mach_port_name_t thread_name); extern kern_return_t semaphore_wait_trap( - mach_port_name_t wait_name); + mach_port_name_t wait_name); extern kern_return_t semaphore_wait_signal_trap( - mach_port_name_t wait_name, - mach_port_name_t signal_name); + mach_port_name_t wait_name, + mach_port_name_t signal_name); extern kern_return_t semaphore_timedwait_trap( - mach_port_name_t wait_name, - unsigned int sec, - clock_res_t nsec); + mach_port_name_t wait_name, + unsigned int sec, + clock_res_t nsec); extern kern_return_t semaphore_timedwait_signal_trap( - mach_port_name_t wait_name, - mach_port_name_t signal_name, - unsigned int sec, - clock_res_t nsec); + mach_port_name_t wait_name, + mach_port_name_t signal_name, + unsigned int sec, + clock_res_t nsec); -#endif /* PRIVATE */ +#endif /* PRIVATE */ extern kern_return_t clock_sleep_trap( - mach_port_name_t clock_name, - sleep_type_t sleep_type, - int sleep_sec, - int sleep_nsec, - mach_timespec_t *wakeup_time); + mach_port_name_t clock_name, + sleep_type_t sleep_type, + int sleep_sec, + int sleep_nsec, + mach_timespec_t *wakeup_time); extern kern_return_t _kernelrpc_mach_vm_allocate_trap( - mach_port_name_t target, - mach_vm_offset_t *addr, - mach_vm_size_t size, - int flags); + mach_port_name_t target, + mach_vm_offset_t *addr, + mach_vm_size_t size, + int flags); extern kern_return_t _kernelrpc_mach_vm_deallocate_trap( - mach_port_name_t target, - mach_vm_address_t address, - mach_vm_size_t size -); + mach_port_name_t target, + mach_vm_address_t address, + mach_vm_size_t size + ); extern kern_return_t _kernelrpc_mach_vm_protect_trap( - mach_port_name_t target, - mach_vm_address_t address, - mach_vm_size_t size, - boolean_t set_maximum, - vm_prot_t new_protection -); + mach_port_name_t target, + mach_vm_address_t address, + mach_vm_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection + ); extern kern_return_t _kernelrpc_mach_vm_map_trap( - mach_port_name_t target, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, - vm_prot_t cur_protection -); + mach_port_name_t target, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_prot_t cur_protection + ); extern kern_return_t _kernelrpc_mach_vm_purgable_control_trap( - mach_port_name_t target, - mach_vm_offset_t address, - vm_purgable_t control, - int *state); + mach_port_name_t target, + mach_vm_offset_t address, + vm_purgable_t control, + int *state); extern kern_return_t _kernelrpc_mach_port_allocate_trap( - mach_port_name_t target, - mach_port_right_t right, - mach_port_name_t *name -); + mach_port_name_t target, + mach_port_right_t right, + mach_port_name_t *name + ); extern kern_return_t _kernelrpc_mach_port_destroy_trap( - mach_port_name_t target, - mach_port_name_t name -); + mach_port_name_t target, + mach_port_name_t name + ); extern kern_return_t _kernelrpc_mach_port_deallocate_trap( - mach_port_name_t target, - mach_port_name_t name -); + mach_port_name_t target, + mach_port_name_t name + ); extern kern_return_t _kernelrpc_mach_port_mod_refs_trap( - mach_port_name_t target, - mach_port_name_t name, - mach_port_right_t right, - mach_port_delta_t delta -); + mach_port_name_t target, + mach_port_name_t name, + mach_port_right_t right, + mach_port_delta_t delta + ); extern kern_return_t _kernelrpc_mach_port_move_member_trap( - mach_port_name_t target, - mach_port_name_t member, - mach_port_name_t after -); + mach_port_name_t target, + mach_port_name_t member, + mach_port_name_t after + ); extern kern_return_t _kernelrpc_mach_port_insert_right_trap( - mach_port_name_t target, - mach_port_name_t name, - mach_port_name_t poly, - mach_msg_type_name_t polyPoly -); + mach_port_name_t target, + mach_port_name_t name, + mach_port_name_t poly, + mach_msg_type_name_t polyPoly + ); extern kern_return_t _kernelrpc_mach_port_get_attributes_trap( - mach_port_name_t target, - mach_port_name_t name, - mach_port_flavor_t flavor, - mach_port_info_t port_info_out, - mach_msg_type_number_t *port_info_outCnt -); + mach_port_name_t target, + mach_port_name_t name, + mach_port_flavor_t flavor, + mach_port_info_t port_info_out, + mach_msg_type_number_t *port_info_outCnt + ); extern kern_return_t _kernelrpc_mach_port_insert_member_trap( - mach_port_name_t target, - mach_port_name_t name, - mach_port_name_t pset -); + mach_port_name_t target, + mach_port_name_t name, + mach_port_name_t pset + ); extern kern_return_t _kernelrpc_mach_port_extract_member_trap( - mach_port_name_t target, - mach_port_name_t name, - mach_port_name_t pset -); + mach_port_name_t target, + mach_port_name_t name, + mach_port_name_t pset + ); extern kern_return_t _kernelrpc_mach_port_construct_trap( - mach_port_name_t target, - mach_port_options_t *options, - uint64_t context, - mach_port_name_t *name -); + mach_port_name_t target, + mach_port_options_t *options, + uint64_t context, + mach_port_name_t *name + ); extern kern_return_t _kernelrpc_mach_port_destruct_trap( - mach_port_name_t target, - mach_port_name_t name, - mach_port_delta_t srdelta, - uint64_t guard -); + mach_port_name_t target, + mach_port_name_t name, + mach_port_delta_t srdelta, + uint64_t guard + ); extern kern_return_t _kernelrpc_mach_port_guard_trap( - mach_port_name_t target, - mach_port_name_t name, - uint64_t guard, - boolean_t strict -); + mach_port_name_t target, + mach_port_name_t name, + uint64_t guard, + boolean_t strict + ); extern kern_return_t _kernelrpc_mach_port_unguard_trap( - mach_port_name_t target, - mach_port_name_t name, - uint64_t guard -); + mach_port_name_t target, + mach_port_name_t name, + uint64_t guard + ); extern kern_return_t mach_generate_activity_id( - mach_port_name_t target, - int count, - uint64_t *activity_id -); + mach_port_name_t target, + int count, + uint64_t *activity_id + ); extern kern_return_t macx_swapon( - uint64_t filename, - int flags, - int size, - int priority); + uint64_t filename, + int flags, + int size, + int priority); extern kern_return_t macx_swapoff( - uint64_t filename, - int flags); + uint64_t filename, + int flags); extern kern_return_t macx_triggers( - int hi_water, - int low_water, - int flags, - mach_port_t alert_port); + int hi_water, + int low_water, + int flags, + mach_port_t alert_port); extern kern_return_t macx_backing_store_suspend( - boolean_t suspend); + boolean_t suspend); extern kern_return_t macx_backing_store_recovery( - int pid); + int pid); extern boolean_t swtch_pri(int pri); extern boolean_t swtch(void); extern kern_return_t thread_switch( - mach_port_name_t thread_name, - int option, - mach_msg_timeout_t option_time); + mach_port_name_t thread_name, + int option, + mach_msg_timeout_t option_time); extern mach_port_name_t task_self_trap(void); extern kern_return_t host_create_mach_voucher_trap( - mach_port_name_t host, - mach_voucher_attr_raw_recipe_array_t recipes, - int recipes_size, - mach_port_name_t *voucher); + mach_port_name_t host, + mach_voucher_attr_raw_recipe_array_t recipes, + int recipes_size, + mach_port_name_t *voucher); extern kern_return_t mach_voucher_extract_attr_recipe_trap( - mach_port_name_t voucher_name, - mach_voucher_attr_key_t key, - mach_voucher_attr_raw_recipe_t recipe, - mach_msg_type_number_t *recipe_size); + mach_port_name_t voucher_name, + mach_voucher_attr_key_t key, + mach_voucher_attr_raw_recipe_t recipe, + mach_msg_type_number_t *recipe_size); /* * Obsolete interfaces. */ extern kern_return_t task_for_pid( - mach_port_name_t target_tport, - int pid, - mach_port_name_t *t); + mach_port_name_t target_tport, + int pid, + mach_port_name_t *t); extern kern_return_t task_name_for_pid( - mach_port_name_t target_tport, - int pid, - mach_port_name_t *tn); + mach_port_name_t target_tport, + int pid, + mach_port_name_t *tn); extern kern_return_t pid_for_task( - mach_port_name_t t, - int *x); + mach_port_name_t t, + int *x); -#else /* KERNEL */ +#else /* KERNEL */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE /* Syscall data translations routines * @@ -348,12 +348,12 @@ extern kern_return_t pid_for_task( * argument structures with elements large enough for any of them. */ #if CONFIG_REQUIRES_U32_MUNGING -#define PAD_(t) (sizeof(uint64_t) <= sizeof(t) \ - ? 0 : sizeof(uint64_t) - sizeof(t)) +#define PAD_(t) (sizeof(uint64_t) <= sizeof(t) \ + ? 0 : sizeof(uint64_t) - sizeof(t)) #define PAD_ARG_8 #else -#define PAD_(t) (sizeof(uint32_t) <= sizeof(t) \ - ? 0 : sizeof(uint32_t) - sizeof(t)) +#define PAD_(t) (sizeof(uint32_t) <= sizeof(t) \ + ? 0 : sizeof(uint32_t) - sizeof(t)) #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4) #define PAD_ARG_8 #else @@ -362,11 +362,11 @@ extern kern_return_t pid_for_task( #endif #if BYTE_ORDER == LITTLE_ENDIAN -#define PADL_(t) 0 -#define PADR_(t) PAD_(t) +#define PADL_(t) 0 +#define PADR_(t) PAD_(t) #else -#define PADL_(t) PAD_(t) -#define PADR_(t) 0 +#define PADL_(t) PAD_(t) +#define PADR_(t) 0 #endif #define PAD_ARG_(arg_type, arg_name) \ @@ -383,37 +383,37 @@ struct kern_invalid_args { int32_t dummy; }; extern kern_return_t kern_invalid( - struct kern_invalid_args *args); + struct kern_invalid_args *args); struct mach_reply_port_args { int32_t dummy; }; extern mach_port_name_t mach_reply_port( - struct mach_reply_port_args *args); + struct mach_reply_port_args *args); struct thread_get_special_reply_port_args { int32_t dummy; }; extern mach_port_name_t thread_get_special_reply_port( - struct thread_get_special_reply_port_args *args); + struct thread_get_special_reply_port_args *args); struct thread_self_trap_args { int32_t dummy; }; extern mach_port_name_t thread_self_trap( - struct thread_self_trap_args *args); + struct thread_self_trap_args *args); struct task_self_trap_args { int32_t dummy; }; extern mach_port_name_t task_self_trap( - struct task_self_trap_args *args); + struct task_self_trap_args *args); struct host_self_trap_args { int32_t dummy; }; extern mach_port_name_t host_self_trap( - struct host_self_trap_args *args); + struct host_self_trap_args *args); struct mach_msg_overwrite_trap_args { PAD_ARG_(user_addr_t, msg); @@ -424,44 +424,44 @@ struct mach_msg_overwrite_trap_args { PAD_ARG_(mach_msg_timeout_t, timeout); PAD_ARG_(mach_msg_priority_t, override); PAD_ARG_8 - PAD_ARG_(user_addr_t, rcv_msg); /* Unused on mach_msg_trap */ + PAD_ARG_(user_addr_t, rcv_msg); /* Unused on mach_msg_trap */ }; extern mach_msg_return_t mach_msg_trap( - struct mach_msg_overwrite_trap_args *args); + struct mach_msg_overwrite_trap_args *args); extern mach_msg_return_t mach_msg_overwrite_trap( - struct mach_msg_overwrite_trap_args *args); + struct mach_msg_overwrite_trap_args *args); struct semaphore_signal_trap_args { PAD_ARG_(mach_port_name_t, signal_name); }; extern kern_return_t semaphore_signal_trap( - struct semaphore_signal_trap_args *args); - + struct semaphore_signal_trap_args *args); + struct semaphore_signal_all_trap_args { PAD_ARG_(mach_port_name_t, signal_name); }; extern kern_return_t semaphore_signal_all_trap( - struct semaphore_signal_all_trap_args *args); + struct semaphore_signal_all_trap_args *args); struct semaphore_signal_thread_trap_args { PAD_ARG_(mach_port_name_t, signal_name); PAD_ARG_(mach_port_name_t, thread_name); }; extern kern_return_t semaphore_signal_thread_trap( - struct semaphore_signal_thread_trap_args *args); + struct semaphore_signal_thread_trap_args *args); struct semaphore_wait_trap_args { PAD_ARG_(mach_port_name_t, wait_name); }; extern kern_return_t semaphore_wait_trap( - struct semaphore_wait_trap_args *args); + struct semaphore_wait_trap_args *args); struct semaphore_wait_signal_trap_args { PAD_ARG_(mach_port_name_t, wait_name); PAD_ARG_(mach_port_name_t, signal_name); }; extern kern_return_t semaphore_wait_signal_trap( - struct semaphore_wait_signal_trap_args *args); + struct semaphore_wait_signal_trap_args *args); struct semaphore_timedwait_trap_args { PAD_ARG_(mach_port_name_t, wait_name); @@ -469,7 +469,7 @@ struct semaphore_timedwait_trap_args { PAD_ARG_(clock_res_t, nsec); }; extern kern_return_t semaphore_timedwait_trap( - struct semaphore_timedwait_trap_args *args); + struct semaphore_timedwait_trap_args *args); struct semaphore_timedwait_signal_trap_args { PAD_ARG_(mach_port_name_t, wait_name); @@ -478,7 +478,7 @@ struct semaphore_timedwait_signal_trap_args { PAD_ARG_(clock_res_t, nsec); }; extern kern_return_t semaphore_timedwait_signal_trap( - struct semaphore_timedwait_signal_trap_args *args); + struct semaphore_timedwait_signal_trap_args *args); struct task_for_pid_args { PAD_ARG_(mach_port_name_t, target_tport); @@ -486,7 +486,7 @@ struct task_for_pid_args { PAD_ARG_(user_addr_t, t); }; extern kern_return_t task_for_pid( - struct task_for_pid_args *args); + struct task_for_pid_args *args); struct task_name_for_pid_args { PAD_ARG_(mach_port_name_t, target_tport); @@ -494,14 +494,14 @@ struct task_name_for_pid_args { PAD_ARG_(user_addr_t, t); }; extern kern_return_t task_name_for_pid( - struct task_name_for_pid_args *args); + struct task_name_for_pid_args *args); struct pid_for_task_args { PAD_ARG_(mach_port_name_t, t); PAD_ARG_(user_addr_t, pid); }; extern kern_return_t pid_for_task( - struct pid_for_task_args *args); + struct pid_for_task_args *args); struct macx_swapon_args { PAD_ARG_(uint64_t, filename); @@ -510,14 +510,14 @@ struct macx_swapon_args { PAD_ARG_(int, priority); }; extern kern_return_t macx_swapon( - struct macx_swapon_args *args); + struct macx_swapon_args *args); struct macx_swapoff_args { - PAD_ARG_(uint64_t, filename); - PAD_ARG_(int, flags); + PAD_ARG_(uint64_t, filename); + PAD_ARG_(int, flags); }; extern kern_return_t macx_swapoff( - struct macx_swapoff_args *args); + struct macx_swapoff_args *args); struct macx_triggers_args { PAD_ARG_(int, hi_water); @@ -526,39 +526,39 @@ struct macx_triggers_args { PAD_ARG_(mach_port_t, alert_port); }; extern kern_return_t macx_triggers( - struct macx_triggers_args *args); + struct macx_triggers_args *args); struct macx_backing_store_suspend_args { PAD_ARG_(boolean_t, suspend); }; extern kern_return_t macx_backing_store_suspend( - struct macx_backing_store_suspend_args *args); + struct macx_backing_store_suspend_args *args); struct macx_backing_store_recovery_args { PAD_ARG_(int, pid); }; extern kern_return_t macx_backing_store_recovery( - struct macx_backing_store_recovery_args *args); + struct macx_backing_store_recovery_args *args); struct swtch_pri_args { PAD_ARG_(int, pri); }; extern boolean_t swtch_pri( - struct swtch_pri_args *args); + struct swtch_pri_args *args); struct pfz_exit_args { int32_t dummy; }; extern kern_return_t pfz_exit( - struct pfz_exit_args *args); + struct pfz_exit_args *args); struct swtch_args { - int32_t dummy; + int32_t dummy; }; extern boolean_t swtch( - struct swtch_args *args); + struct swtch_args *args); -struct clock_sleep_trap_args{ +struct clock_sleep_trap_args { PAD_ARG_(mach_port_name_t, clock_name); PAD_ARG_(sleep_type_t, sleep_type); PAD_ARG_(int, sleep_sec); @@ -566,7 +566,7 @@ struct clock_sleep_trap_args{ PAD_ARG_(user_addr_t, wakeup_time); }; extern kern_return_t clock_sleep_trap( - struct clock_sleep_trap_args *args); + struct clock_sleep_trap_args *args); struct thread_switch_args { PAD_ARG_(mach_port_name_t, thread_name); @@ -574,38 +574,38 @@ struct thread_switch_args { PAD_ARG_(mach_msg_timeout_t, option_time); }; extern kern_return_t thread_switch( - struct thread_switch_args *args); + struct thread_switch_args *args); struct mach_timebase_info_trap_args { PAD_ARG_(user_addr_t, info); }; extern kern_return_t mach_timebase_info_trap( - struct mach_timebase_info_trap_args *args); + struct mach_timebase_info_trap_args *args); struct mach_wait_until_trap_args { PAD_ARG_(uint64_t, deadline); }; extern kern_return_t mach_wait_until_trap( - struct mach_wait_until_trap_args *args); + struct mach_wait_until_trap_args *args); struct mk_timer_create_trap_args { - int32_t dummy; + int32_t dummy; }; extern mach_port_name_t mk_timer_create_trap( - struct mk_timer_create_trap_args *args); + struct mk_timer_create_trap_args *args); struct mk_timer_destroy_trap_args { PAD_ARG_(mach_port_name_t, name); }; extern kern_return_t mk_timer_destroy_trap( - struct mk_timer_destroy_trap_args *args); + struct mk_timer_destroy_trap_args *args); struct mk_timer_arm_trap_args { PAD_ARG_(mach_port_name_t, name); PAD_ARG_(uint64_t, expire_time); }; extern kern_return_t mk_timer_arm_trap( - struct mk_timer_arm_trap_args *args); + struct mk_timer_arm_trap_args *args); struct mk_timer_arm_leeway_trap_args { PAD_ARG_(mach_port_name_t, name); @@ -614,42 +614,42 @@ struct mk_timer_arm_leeway_trap_args { PAD_ARG_(uint64_t, mk_leeway); }; extern kern_return_t mk_timer_arm_leeway_trap( - struct mk_timer_arm_leeway_trap_args *args); + struct mk_timer_arm_leeway_trap_args *args); struct mk_timer_cancel_trap_args { - PAD_ARG_(mach_port_name_t, name); - PAD_ARG_(user_addr_t, result_time); + PAD_ARG_(mach_port_name_t, name); + PAD_ARG_(user_addr_t, result_time); }; extern kern_return_t mk_timer_cancel_trap( - struct mk_timer_cancel_trap_args *args); + struct mk_timer_cancel_trap_args *args); struct _kernelrpc_mach_vm_allocate_trap_args { - PAD_ARG_(mach_port_name_t, target); /* 1 word */ - PAD_ARG_(user_addr_t, addr); /* 1 word */ - PAD_ARG_(mach_vm_size_t, size); /* 2 words */ - PAD_ARG_(int, flags); /* 1 word */ -}; /* Total: 5 */ + PAD_ARG_(mach_port_name_t, target); /* 1 word */ + PAD_ARG_(user_addr_t, addr); /* 1 word */ + PAD_ARG_(mach_vm_size_t, size); /* 2 words */ + PAD_ARG_(int, flags); /* 1 word */ +}; /* Total: 5 */ extern kern_return_t _kernelrpc_mach_vm_allocate_trap( - struct _kernelrpc_mach_vm_allocate_trap_args *args); + struct _kernelrpc_mach_vm_allocate_trap_args *args); struct _kernelrpc_mach_vm_deallocate_args { - PAD_ARG_(mach_port_name_t, target); /* 1 word */ - PAD_ARG_(mach_vm_address_t, address); /* 2 words */ - PAD_ARG_(mach_vm_size_t, size); /* 2 words */ -}; /* Total: 5 */ + PAD_ARG_(mach_port_name_t, target); /* 1 word */ + PAD_ARG_(mach_vm_address_t, address); /* 2 words */ + PAD_ARG_(mach_vm_size_t, size); /* 2 words */ +}; /* Total: 5 */ extern kern_return_t _kernelrpc_mach_vm_deallocate_trap( - struct _kernelrpc_mach_vm_deallocate_args *args); + struct _kernelrpc_mach_vm_deallocate_args *args); struct _kernelrpc_mach_vm_protect_args { - PAD_ARG_(mach_port_name_t, target); /* 1 word */ - PAD_ARG_(mach_vm_address_t, address); /* 2 words */ - PAD_ARG_(mach_vm_size_t, size); /* 2 words */ - PAD_ARG_(boolean_t, set_maximum); /* 1 word */ - PAD_ARG_(vm_prot_t, new_protection); /* 1 word */ -}; /* Total: 7 */ + PAD_ARG_(mach_port_name_t, target); /* 1 word */ + PAD_ARG_(mach_vm_address_t, address); /* 2 words */ + PAD_ARG_(mach_vm_size_t, size); /* 2 words */ + PAD_ARG_(boolean_t, set_maximum); /* 1 word */ + PAD_ARG_(vm_prot_t, new_protection); /* 1 word */ +}; /* Total: 7 */ extern kern_return_t _kernelrpc_mach_vm_protect_trap( - struct _kernelrpc_mach_vm_protect_args *args); + struct _kernelrpc_mach_vm_protect_args *args); struct _kernelrpc_mach_vm_map_trap_args { PAD_ARG_(mach_port_name_t, target); @@ -658,17 +658,17 @@ struct _kernelrpc_mach_vm_map_trap_args { PAD_ARG_(mach_vm_offset_t, mask); PAD_ARG_(int, flags); PAD_ARG_8 - PAD_ARG_(vm_prot_t, cur_protection); + PAD_ARG_(vm_prot_t, cur_protection); }; extern kern_return_t _kernelrpc_mach_vm_map_trap( - struct _kernelrpc_mach_vm_map_trap_args *args); + struct _kernelrpc_mach_vm_map_trap_args *args); struct _kernelrpc_mach_vm_purgable_control_trap_args { - PAD_ARG_(mach_port_name_t, target); /* 1 word */ - PAD_ARG_(mach_vm_offset_t, address); /* 2 words */ - PAD_ARG_(vm_purgable_t, control); /* 1 word */ - PAD_ARG_(user_addr_t, state); /* 1 word */ -}; /* Total: 5 */ + PAD_ARG_(mach_port_name_t, target); /* 1 word */ + PAD_ARG_(mach_vm_offset_t, address); /* 2 words */ + PAD_ARG_(vm_purgable_t, control); /* 1 word */ + PAD_ARG_(user_addr_t, state); /* 1 word */ +}; /* Total: 5 */ extern kern_return_t _kernelrpc_mach_vm_purgable_control_trap( struct _kernelrpc_mach_vm_purgable_control_trap_args *args); @@ -679,7 +679,7 @@ struct _kernelrpc_mach_port_allocate_args { PAD_ARG_(user_addr_t, name); }; extern kern_return_t _kernelrpc_mach_port_allocate_trap( - struct _kernelrpc_mach_port_allocate_args *args); + struct _kernelrpc_mach_port_allocate_args *args); struct _kernelrpc_mach_port_destroy_args { @@ -687,14 +687,14 @@ struct _kernelrpc_mach_port_destroy_args { PAD_ARG_(mach_port_name_t, name); }; extern kern_return_t _kernelrpc_mach_port_destroy_trap( - struct _kernelrpc_mach_port_destroy_args *args); + struct _kernelrpc_mach_port_destroy_args *args); struct _kernelrpc_mach_port_deallocate_args { PAD_ARG_(mach_port_name_t, target); PAD_ARG_(mach_port_name_t, name); }; extern kern_return_t _kernelrpc_mach_port_deallocate_trap( - struct _kernelrpc_mach_port_deallocate_args *args); + struct _kernelrpc_mach_port_deallocate_args *args); struct _kernelrpc_mach_port_mod_refs_args { PAD_ARG_(mach_port_name_t, target); @@ -703,7 +703,7 @@ struct _kernelrpc_mach_port_mod_refs_args { PAD_ARG_(mach_port_delta_t, delta); }; extern kern_return_t _kernelrpc_mach_port_mod_refs_trap( - struct _kernelrpc_mach_port_mod_refs_args *args); + struct _kernelrpc_mach_port_mod_refs_args *args); struct _kernelrpc_mach_port_move_member_args { PAD_ARG_(mach_port_name_t, target); @@ -711,7 +711,7 @@ struct _kernelrpc_mach_port_move_member_args { PAD_ARG_(mach_port_name_t, after); }; extern kern_return_t _kernelrpc_mach_port_move_member_trap( - struct _kernelrpc_mach_port_move_member_args *args); + struct _kernelrpc_mach_port_move_member_args *args); struct _kernelrpc_mach_port_insert_right_args { PAD_ARG_(mach_port_name_t, target); @@ -720,7 +720,7 @@ struct _kernelrpc_mach_port_insert_right_args { PAD_ARG_(mach_msg_type_name_t, polyPoly); }; extern kern_return_t _kernelrpc_mach_port_insert_right_trap( - struct _kernelrpc_mach_port_insert_right_args *args); + struct _kernelrpc_mach_port_insert_right_args *args); struct _kernelrpc_mach_port_get_attributes_args { PAD_ARG_(mach_port_name_t, target); @@ -730,7 +730,7 @@ struct _kernelrpc_mach_port_get_attributes_args { PAD_ARG_(user_addr_t, count); }; extern kern_return_t _kernelrpc_mach_port_get_attributes_trap( - struct _kernelrpc_mach_port_get_attributes_args *args); + struct _kernelrpc_mach_port_get_attributes_args *args); struct _kernelrpc_mach_port_insert_member_args { PAD_ARG_(mach_port_name_t, target); @@ -738,7 +738,7 @@ struct _kernelrpc_mach_port_insert_member_args { PAD_ARG_(mach_port_name_t, pset); }; extern kern_return_t _kernelrpc_mach_port_insert_member_trap( - struct _kernelrpc_mach_port_insert_member_args *args); + struct _kernelrpc_mach_port_insert_member_args *args); struct _kernelrpc_mach_port_extract_member_args { PAD_ARG_(mach_port_name_t, target); @@ -746,7 +746,7 @@ struct _kernelrpc_mach_port_extract_member_args { PAD_ARG_(mach_port_name_t, pset); }; extern kern_return_t _kernelrpc_mach_port_extract_member_trap( - struct _kernelrpc_mach_port_extract_member_args *args); + struct _kernelrpc_mach_port_extract_member_args *args); struct _kernelrpc_mach_port_construct_args { PAD_ARG_(mach_port_name_t, target); @@ -755,7 +755,7 @@ struct _kernelrpc_mach_port_construct_args { PAD_ARG_(user_addr_t, name); }; extern kern_return_t _kernelrpc_mach_port_construct_trap( - struct _kernelrpc_mach_port_construct_args *args); + struct _kernelrpc_mach_port_construct_args *args); struct _kernelrpc_mach_port_destruct_args { PAD_ARG_(mach_port_name_t, target); @@ -764,7 +764,7 @@ struct _kernelrpc_mach_port_destruct_args { PAD_ARG_(uint64_t, guard); }; extern kern_return_t _kernelrpc_mach_port_destruct_trap( - struct _kernelrpc_mach_port_destruct_args *args); + struct _kernelrpc_mach_port_destruct_args *args); struct _kernelrpc_mach_port_guard_args { PAD_ARG_(mach_port_name_t, target); @@ -773,7 +773,7 @@ struct _kernelrpc_mach_port_guard_args { PAD_ARG_(boolean_t, strict); }; extern kern_return_t _kernelrpc_mach_port_guard_trap( - struct _kernelrpc_mach_port_guard_args *args); + struct _kernelrpc_mach_port_guard_args *args); struct _kernelrpc_mach_port_unguard_args { PAD_ARG_(mach_port_name_t, target); @@ -781,7 +781,7 @@ struct _kernelrpc_mach_port_unguard_args { PAD_ARG_(uint64_t, guard); }; extern kern_return_t _kernelrpc_mach_port_unguard_trap( - struct _kernelrpc_mach_port_unguard_args *args); + struct _kernelrpc_mach_port_unguard_args *args); struct mach_generate_activity_id_args { PAD_ARG_(mach_port_name_t, target); @@ -789,7 +789,7 @@ struct mach_generate_activity_id_args { PAD_ARG_(user_addr_t, activity_id); }; extern kern_return_t mach_generate_activity_id( - struct mach_generate_activity_id_args *args); + struct mach_generate_activity_id_args *args); /* * Voucher trap interfaces @@ -802,7 +802,7 @@ struct host_create_mach_voucher_args { PAD_ARG_(user_addr_t, voucher); }; extern kern_return_t host_create_mach_voucher_trap( - struct host_create_mach_voucher_args *args); + struct host_create_mach_voucher_args *args); struct mach_voucher_extract_attr_recipe_args { PAD_ARG_(mach_port_name_t, voucher_name); @@ -812,7 +812,7 @@ struct mach_voucher_extract_attr_recipe_args { }; extern kern_return_t mach_voucher_extract_attr_recipe_trap( - struct mach_voucher_extract_attr_recipe_args *args); + struct mach_voucher_extract_attr_recipe_args *args); /* not published to LP64 clients yet */ @@ -824,11 +824,11 @@ struct iokit_user_client_trap_args { PAD_ARG_(void *, p3); PAD_ARG_(void *, p4); PAD_ARG_(void *, p5); - PAD_ARG_8 - PAD_ARG_(void *, p6); + PAD_ARG_8 + PAD_ARG_(void *, p6); }; kern_return_t iokit_user_client_trap( - struct iokit_user_client_trap_args *args); + struct iokit_user_client_trap_args *args); #undef PAD_ #undef PADL_ @@ -836,10 +836,10 @@ kern_return_t iokit_user_client_trap( #undef PAD_ARG_ #undef PAD_ARG_8 -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#endif /* KERNEL */ +#endif /* KERNEL */ __END_DECLS -#endif /* _MACH_MACH_TRAPS_H_ */ +#endif /* _MACH_MACH_TRAPS_H_ */ diff --git a/osfmk/mach/mach_types.defs b/osfmk/mach/mach_types.defs index dc6b2e47d..27dbd26a6 100644 --- a/osfmk/mach/mach_types.defs +++ b/osfmk/mach/mach_types.defs @@ -372,18 +372,27 @@ type processor_t = mach_port_t #endif /* KERNEL_SERVER */ ; -type processor_array_t = ^array[] of processor_t; +type processor_array_t = ^array[] of processor_t; - /* processor_info_t: variable-sized inline array that can + /* + * processor_info_t: variable-sized inline array that can * contain: - * processor_basic_info_t: (5 ints) - * processor_cpu_load_info_t:(4 ints) - * processor_machine_info_t :(12 ints) + * + * - processor_basic_info_t: (5 ints) + * - processor_cpu_load_info_t: (4 ints) + * - processor_machine_info_t: (12 ints) + * - processor_cpu_stat_t: (10 ints) + * - processor_cpu_stat64_t: (20 ints) + * * If other processor_info flavors are added, this definition - * may need to be changed. (See mach/processor_info.h) */ -type processor_flavor_t = int; -type processor_info_t = array[*:12] of integer_t; -type processor_info_array_t = ^array[] of integer_t; + * may need to be changed. + * + * See mach/processor_info.h and mach/arm/processor_info.h. + */ + +type processor_flavor_t = int; +type processor_info_t = array[*:20] of integer_t; +type processor_info_array_t = ^array[] of integer_t; type processor_set_t = mach_port_t #if KERNEL_SERVER diff --git a/osfmk/mach/mach_types.h b/osfmk/mach/mach_types.h index eab35b14c..480c60768 100644 --- a/osfmk/mach/mach_types.h +++ b/osfmk/mach/mach_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -70,7 +70,7 @@ * */ -#ifndef _MACH_MACH_TYPES_H_ +#ifndef _MACH_MACH_TYPES_H_ #define _MACH_MACH_TYPES_H_ #include @@ -110,7 +110,7 @@ #include #include -#ifdef KERNEL +#ifdef KERNEL #include @@ -118,96 +118,96 @@ * If we are in the kernel, then pick up the kernel definitions for * the basic mach types. */ -typedef struct task *task_t, *task_name_t, *task_inspect_t, *task_suspension_token_t; -typedef struct thread *thread_t, *thread_act_t, *thread_inspect_t; -typedef struct ipc_space *ipc_space_t, *ipc_space_inspect_t; -typedef struct coalition *coalition_t; -typedef struct host *host_t; -typedef struct host *host_priv_t; -typedef struct host *host_security_t; -typedef struct processor *processor_t; -typedef struct processor_set *processor_set_t; -typedef struct processor_set *processor_set_control_t; -typedef struct semaphore *semaphore_t; -typedef struct ledger *ledger_t; -typedef struct alarm *alarm_t; -typedef struct clock *clock_serv_t; -typedef struct clock *clock_ctrl_t; +typedef struct task *task_t, *task_name_t, *task_inspect_t, *task_suspension_token_t; +typedef struct thread *thread_t, *thread_act_t, *thread_inspect_t; +typedef struct ipc_space *ipc_space_t, *ipc_space_inspect_t; +typedef struct coalition *coalition_t; +typedef struct host *host_t; +typedef struct host *host_priv_t; +typedef struct host *host_security_t; +typedef struct processor *processor_t; +typedef struct processor_set *processor_set_t; +typedef struct processor_set *processor_set_control_t; +typedef struct semaphore *semaphore_t; +typedef struct ledger *ledger_t; +typedef struct alarm *alarm_t; +typedef struct clock *clock_serv_t; +typedef struct clock *clock_ctrl_t; /* * OBSOLETE: lock_set interfaces are obsolete. */ -typedef struct lock_set *lock_set_t; -struct lock_set ; +typedef struct lock_set *lock_set_t; +struct lock_set; -#ifndef MACH_KERNEL_PRIVATE +#ifndef MACH_KERNEL_PRIVATE __BEGIN_DECLS -struct task ; -struct thread ; -struct host ; -struct processor ; -struct processor_set ; -struct semaphore ; -struct ledger ; -struct alarm ; -struct clock ; +struct task; +struct thread; +struct host; +struct processor; +struct processor_set; +struct semaphore; +struct ledger; +struct alarm; +struct clock; __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#else /* KERNEL */ +#else /* KERNEL */ /* * If we are not in the kernel, then these will all be represented by * ports at user-space. */ -typedef mach_port_t task_t; -typedef mach_port_t task_name_t; -typedef mach_port_t task_inspect_t; -typedef mach_port_t task_suspension_token_t; -typedef mach_port_t thread_t; -typedef mach_port_t thread_act_t; -typedef mach_port_t thread_inspect_t; -typedef mach_port_t ipc_space_t; -typedef mach_port_t ipc_space_inspect_t; -typedef mach_port_t coalition_t; -typedef mach_port_t host_t; -typedef mach_port_t host_priv_t; -typedef mach_port_t host_security_t; -typedef mach_port_t processor_t; -typedef mach_port_t processor_set_t; -typedef mach_port_t processor_set_control_t; -typedef mach_port_t semaphore_t; -typedef mach_port_t lock_set_t; -typedef mach_port_t ledger_t; -typedef mach_port_t alarm_t; -typedef mach_port_t clock_serv_t; -typedef mach_port_t clock_ctrl_t; +typedef mach_port_t task_t; +typedef mach_port_t task_name_t; +typedef mach_port_t task_inspect_t; +typedef mach_port_t task_suspension_token_t; +typedef mach_port_t thread_t; +typedef mach_port_t thread_act_t; +typedef mach_port_t thread_inspect_t; +typedef mach_port_t ipc_space_t; +typedef mach_port_t ipc_space_inspect_t; +typedef mach_port_t coalition_t; +typedef mach_port_t host_t; +typedef mach_port_t host_priv_t; +typedef mach_port_t host_security_t; +typedef mach_port_t processor_t; +typedef mach_port_t processor_set_t; +typedef mach_port_t processor_set_control_t; +typedef mach_port_t semaphore_t; +typedef mach_port_t lock_set_t; +typedef mach_port_t ledger_t; +typedef mach_port_t alarm_t; +typedef mach_port_t clock_serv_t; +typedef mach_port_t clock_ctrl_t; -#endif /* KERNEL */ +#endif /* KERNEL */ /* * These aren't really unique types. They are just called * out as unique types at one point in history. So we list * them here for compatibility. */ -typedef processor_set_t processor_set_name_t; +typedef processor_set_t processor_set_name_t; /* * These types are just hard-coded as ports */ -typedef mach_port_t clock_reply_t; -typedef mach_port_t bootstrap_t; -typedef mach_port_t mem_entry_name_port_t; -typedef mach_port_t exception_handler_t; -typedef exception_handler_t *exception_handler_array_t; -typedef mach_port_t vm_task_entry_t; -typedef mach_port_t io_master_t; -typedef mach_port_t UNDServerRef; +typedef mach_port_t clock_reply_t; +typedef mach_port_t bootstrap_t; +typedef mach_port_t mem_entry_name_port_t; +typedef mach_port_t exception_handler_t; +typedef exception_handler_t *exception_handler_array_t; +typedef mach_port_t vm_task_entry_t; +typedef mach_port_t io_master_t; +typedef mach_port_t UNDServerRef; /* * Mig doesn't translate the components of an array. @@ -216,13 +216,13 @@ typedef mach_port_t UNDServerRef; * are not completely accurate at the moment for other kernel * components. */ -typedef task_t *task_array_t; -typedef thread_t *thread_array_t; -typedef processor_set_t *processor_set_array_t; -typedef processor_set_t *processor_set_name_array_t; -typedef processor_t *processor_array_t; -typedef thread_act_t *thread_act_array_t; -typedef ledger_t *ledger_array_t; +typedef task_t *task_array_t; +typedef thread_t *thread_array_t; +typedef processor_set_t *processor_set_array_t; +typedef processor_set_t *processor_set_name_array_t; +typedef processor_t *processor_array_t; +typedef thread_act_t *thread_act_array_t; +typedef ledger_t *ledger_array_t; /* * However the real mach_types got declared, we also have to declare @@ -230,69 +230,69 @@ typedef ledger_t *ledger_array_t; * had declared the user interfaces at one point. Someday these should * go away. */ -typedef task_t task_port_t; -typedef task_array_t task_port_array_t; -typedef thread_t thread_port_t; -typedef thread_array_t thread_port_array_t; -typedef ipc_space_t ipc_space_port_t; -typedef host_t host_name_t; -typedef host_t host_name_port_t; -typedef processor_set_t processor_set_port_t; -typedef processor_set_t processor_set_name_port_t; -typedef processor_set_array_t processor_set_name_port_array_t; -typedef processor_set_t processor_set_control_port_t; -typedef processor_t processor_port_t; -typedef processor_array_t processor_port_array_t; -typedef thread_act_t thread_act_port_t; -typedef thread_act_array_t thread_act_port_array_t; -typedef semaphore_t semaphore_port_t; -typedef lock_set_t lock_set_port_t; -typedef ledger_t ledger_port_t; -typedef ledger_array_t ledger_port_array_t; -typedef alarm_t alarm_port_t; -typedef clock_serv_t clock_serv_port_t; -typedef clock_ctrl_t clock_ctrl_port_t; -typedef exception_handler_t exception_port_t; +typedef task_t task_port_t; +typedef task_array_t task_port_array_t; +typedef thread_t thread_port_t; +typedef thread_array_t thread_port_array_t; +typedef ipc_space_t ipc_space_port_t; +typedef host_t host_name_t; +typedef host_t host_name_port_t; +typedef processor_set_t processor_set_port_t; +typedef processor_set_t processor_set_name_port_t; +typedef processor_set_array_t processor_set_name_port_array_t; +typedef processor_set_t processor_set_control_port_t; +typedef processor_t processor_port_t; +typedef processor_array_t processor_port_array_t; +typedef thread_act_t thread_act_port_t; +typedef thread_act_array_t thread_act_port_array_t; +typedef semaphore_t semaphore_port_t; +typedef lock_set_t lock_set_port_t; +typedef ledger_t ledger_port_t; +typedef ledger_array_t ledger_port_array_t; +typedef alarm_t alarm_port_t; +typedef clock_serv_t clock_serv_port_t; +typedef clock_ctrl_t clock_ctrl_port_t; +typedef exception_handler_t exception_port_t; typedef exception_handler_array_t exception_port_arrary_t; -#define TASK_NULL ((task_t) 0) -#define TASK_NAME_NULL ((task_name_t) 0) -#define TASK_INSPECT_NULL ((task_inspect_t) 0) -#define THREAD_NULL ((thread_t) 0) -#define THREAD_INSPECT_NULL ((thread_inspect_t)0) -#define TID_NULL ((uint64_t) 0) -#define THR_ACT_NULL ((thread_act_t) 0) -#define IPC_SPACE_NULL ((ipc_space_t) 0) -#define IPC_SPACE_INSPECT_NULL ((ipc_space_inspect_t) 0) -#define COALITION_NULL ((coalition_t) 0) -#define HOST_NULL ((host_t) 0) -#define HOST_PRIV_NULL ((host_priv_t)0) -#define HOST_SECURITY_NULL ((host_security_t)0) -#define PROCESSOR_SET_NULL ((processor_set_t) 0) -#define PROCESSOR_NULL ((processor_t) 0) -#define SEMAPHORE_NULL ((semaphore_t) 0) -#define LOCK_SET_NULL ((lock_set_t) 0) -#define LEDGER_NULL ((ledger_t) 0) -#define ALARM_NULL ((alarm_t) 0) -#define CLOCK_NULL ((clock_t) 0) -#define UND_SERVER_NULL ((UNDServerRef) 0) +#define TASK_NULL ((task_t) 0) +#define TASK_NAME_NULL ((task_name_t) 0) +#define TASK_INSPECT_NULL ((task_inspect_t) 0) +#define THREAD_NULL ((thread_t) 0) +#define THREAD_INSPECT_NULL ((thread_inspect_t)0) +#define TID_NULL ((uint64_t) 0) +#define THR_ACT_NULL ((thread_act_t) 0) +#define IPC_SPACE_NULL ((ipc_space_t) 0) +#define IPC_SPACE_INSPECT_NULL ((ipc_space_inspect_t) 0) +#define COALITION_NULL ((coalition_t) 0) +#define HOST_NULL ((host_t) 0) +#define HOST_PRIV_NULL ((host_priv_t)0) +#define HOST_SECURITY_NULL ((host_security_t)0) +#define PROCESSOR_SET_NULL ((processor_set_t) 0) +#define PROCESSOR_NULL ((processor_t) 0) +#define SEMAPHORE_NULL ((semaphore_t) 0) +#define LOCK_SET_NULL ((lock_set_t) 0) +#define LEDGER_NULL ((ledger_t) 0) +#define ALARM_NULL ((alarm_t) 0) +#define CLOCK_NULL ((clock_t) 0) +#define UND_SERVER_NULL ((UNDServerRef) 0) /* DEPRECATED */ -typedef natural_t ledger_item_t; -#define LEDGER_ITEM_INFINITY ((ledger_item_t) (~0)) +typedef natural_t ledger_item_t; +#define LEDGER_ITEM_INFINITY ((ledger_item_t) (~0)) -typedef int64_t ledger_amount_t; +typedef int64_t ledger_amount_t; #define LEDGER_LIMIT_INFINITY ((ledger_amount_t)((1ULL << 63) - 1)) -typedef mach_vm_offset_t *emulation_vector_t; -typedef char *user_subsystem_t; +typedef mach_vm_offset_t *emulation_vector_t; +typedef char *user_subsystem_t; -typedef char *labelstr_t; +typedef char *labelstr_t; /* * Backwards compatibility, for those programs written * before mach/{std,mach}_types.{defs,h} were set up. */ #include -#endif /* _MACH_MACH_TYPES_H_ */ +#endif /* _MACH_MACH_TYPES_H_ */ diff --git a/osfmk/mach/mach_voucher_types.h b/osfmk/mach/mach_voucher_types.h index 3eb982d5f..6181a64e4 100644 --- a/osfmk/mach/mach_voucher_types.h +++ b/osfmk/mach/mach_voucher_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_VOUCHER_TYPES_H_ -#define _MACH_VOUCHER_TYPES_H_ +#ifndef _MACH_VOUCHER_TYPES_H_ +#define _MACH_VOUCHER_TYPES_H_ #include #include @@ -50,13 +50,13 @@ * attribute value handle itself doesn't change, the value the handle refers * to is free to change at will). */ -typedef mach_port_t mach_voucher_t; -#define MACH_VOUCHER_NULL ((mach_voucher_t) 0) +typedef mach_port_t mach_voucher_t; +#define MACH_VOUCHER_NULL ((mach_voucher_t) 0) -typedef mach_port_name_t mach_voucher_name_t; -#define MACH_VOUCHER_NAME_NULL ((mach_voucher_name_t) 0) +typedef mach_port_name_t mach_voucher_name_t; +#define MACH_VOUCHER_NAME_NULL ((mach_voucher_name_t) 0) -typedef mach_voucher_name_t *mach_voucher_name_array_t; +typedef mach_voucher_name_t *mach_voucher_name_array_t; #define MACH_VOUCHER_NAME_ARRAY_NULL ((mach_voucher_name_array_t) 0) /* @@ -64,14 +64,14 @@ typedef mach_voucher_name_t *mach_voucher_name_array_t; * a port at user-space and a reference to an ipc_voucher structure in-kernel. */ #if !defined(KERNEL) -typedef mach_voucher_t ipc_voucher_t; +typedef mach_voucher_t ipc_voucher_t; #else #if !defined(MACH_KERNEL_PRIVATE) -struct ipc_voucher ; +struct ipc_voucher; #endif -typedef struct ipc_voucher *ipc_voucher_t; +typedef struct ipc_voucher *ipc_voucher_t; #endif -#define IPC_VOUCHER_NULL ((ipc_voucher_t) 0) +#define IPC_VOUCHER_NULL ((ipc_voucher_t) 0) /* * mach_voucher_selector_t - A means of specifying which thread/task value to extract - @@ -79,32 +79,32 @@ typedef struct ipc_voucher *ipc_voucher_t; * the full [layered] effective value for the task/thread. */ typedef uint32_t mach_voucher_selector_t; -#define MACH_VOUCHER_SELECTOR_CURRENT ((mach_voucher_selector_t)0) -#define MACH_VOUCHER_SELECTOR_EFFECTIVE ((mach_voucher_selector_t)1) +#define MACH_VOUCHER_SELECTOR_CURRENT ((mach_voucher_selector_t)0) +#define MACH_VOUCHER_SELECTOR_EFFECTIVE ((mach_voucher_selector_t)1) /* * mach_voucher_attr_key_t - The key used to identify a particular managed resource or * to select the specific resource manager’s data associated * with a given voucher. - */ + */ typedef uint32_t mach_voucher_attr_key_t; typedef mach_voucher_attr_key_t *mach_voucher_attr_key_array_t; -#define MACH_VOUCHER_ATTR_KEY_ALL ((mach_voucher_attr_key_t)~0) -#define MACH_VOUCHER_ATTR_KEY_NONE ((mach_voucher_attr_key_t)0) +#define MACH_VOUCHER_ATTR_KEY_ALL ((mach_voucher_attr_key_t)~0) +#define MACH_VOUCHER_ATTR_KEY_NONE ((mach_voucher_attr_key_t)0) /* other well-known-keys will be added here */ -#define MACH_VOUCHER_ATTR_KEY_ATM ((mach_voucher_attr_key_t)1) -#define MACH_VOUCHER_ATTR_KEY_IMPORTANCE ((mach_voucher_attr_key_t)2) -#define MACH_VOUCHER_ATTR_KEY_BANK ((mach_voucher_attr_key_t)3) -#define MACH_VOUCHER_ATTR_KEY_PTHPRIORITY ((mach_voucher_attr_key_t)4) +#define MACH_VOUCHER_ATTR_KEY_ATM ((mach_voucher_attr_key_t)1) +#define MACH_VOUCHER_ATTR_KEY_IMPORTANCE ((mach_voucher_attr_key_t)2) +#define MACH_VOUCHER_ATTR_KEY_BANK ((mach_voucher_attr_key_t)3) +#define MACH_VOUCHER_ATTR_KEY_PTHPRIORITY ((mach_voucher_attr_key_t)4) -#define MACH_VOUCHER_ATTR_KEY_USER_DATA ((mach_voucher_attr_key_t)7) -#define MACH_VOUCHER_ATTR_KEY_BITS MACH_VOUCHER_ATTR_KEY_USER_DATA /* deprecated */ -#define MACH_VOUCHER_ATTR_KEY_TEST ((mach_voucher_attr_key_t)8) +#define MACH_VOUCHER_ATTR_KEY_USER_DATA ((mach_voucher_attr_key_t)7) +#define MACH_VOUCHER_ATTR_KEY_BITS MACH_VOUCHER_ATTR_KEY_USER_DATA /* deprecated */ +#define MACH_VOUCHER_ATTR_KEY_TEST ((mach_voucher_attr_key_t)8) -#define MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN MACH_VOUCHER_ATTR_KEY_TEST +#define MACH_VOUCHER_ATTR_KEY_NUM_WELL_KNOWN MACH_VOUCHER_ATTR_KEY_TEST /* * mach_voucher_attr_content_t @@ -123,8 +123,8 @@ typedef uint32_t mach_voucher_attr_content_size_t; typedef uint32_t mach_voucher_attr_command_t; /* - * mach_voucher_attr_recipe_command_t - * + * mach_voucher_attr_recipe_command_t + * * The verbs used to create/morph a voucher attribute value. * We define some system-wide commands here - related to creation, and transport of * vouchers and attributes. Additional commands can be defined by, and supported by, @@ -133,25 +133,25 @@ typedef uint32_t mach_voucher_attr_command_t; typedef uint32_t mach_voucher_attr_recipe_command_t; typedef mach_voucher_attr_recipe_command_t *mach_voucher_attr_recipe_command_array_t; -#define MACH_VOUCHER_ATTR_NOOP ((mach_voucher_attr_recipe_command_t)0) -#define MACH_VOUCHER_ATTR_COPY ((mach_voucher_attr_recipe_command_t)1) -#define MACH_VOUCHER_ATTR_REMOVE ((mach_voucher_attr_recipe_command_t)2) -#define MACH_VOUCHER_ATTR_SET_VALUE_HANDLE ((mach_voucher_attr_recipe_command_t)3) -#define MACH_VOUCHER_ATTR_AUTO_REDEEM ((mach_voucher_attr_recipe_command_t)4) -#define MACH_VOUCHER_ATTR_SEND_PREPROCESS ((mach_voucher_attr_recipe_command_t)5) +#define MACH_VOUCHER_ATTR_NOOP ((mach_voucher_attr_recipe_command_t)0) +#define MACH_VOUCHER_ATTR_COPY ((mach_voucher_attr_recipe_command_t)1) +#define MACH_VOUCHER_ATTR_REMOVE ((mach_voucher_attr_recipe_command_t)2) +#define MACH_VOUCHER_ATTR_SET_VALUE_HANDLE ((mach_voucher_attr_recipe_command_t)3) +#define MACH_VOUCHER_ATTR_AUTO_REDEEM ((mach_voucher_attr_recipe_command_t)4) +#define MACH_VOUCHER_ATTR_SEND_PREPROCESS ((mach_voucher_attr_recipe_command_t)5) /* redeem is on its way out? */ -#define MACH_VOUCHER_ATTR_REDEEM ((mach_voucher_attr_recipe_command_t)10) +#define MACH_VOUCHER_ATTR_REDEEM ((mach_voucher_attr_recipe_command_t)10) /* recipe command(s) for importance attribute manager */ -#define MACH_VOUCHER_ATTR_IMPORTANCE_SELF ((mach_voucher_attr_recipe_command_t)200) +#define MACH_VOUCHER_ATTR_IMPORTANCE_SELF ((mach_voucher_attr_recipe_command_t)200) /* recipe command(s) for bit-store attribute manager */ -#define MACH_VOUCHER_ATTR_USER_DATA_STORE ((mach_voucher_attr_recipe_command_t)211) -#define MACH_VOUCHER_ATTR_BITS_STORE MACH_VOUCHER_ATTR_USER_DATA_STORE /* deprecated */ +#define MACH_VOUCHER_ATTR_USER_DATA_STORE ((mach_voucher_attr_recipe_command_t)211) +#define MACH_VOUCHER_ATTR_BITS_STORE MACH_VOUCHER_ATTR_USER_DATA_STORE /* deprecated */ /* recipe command(s) for test attribute manager */ -#define MACH_VOUCHER_ATTR_TEST_STORE MACH_VOUCHER_ATTR_USER_DATA_STORE +#define MACH_VOUCHER_ATTR_TEST_STORE MACH_VOUCHER_ATTR_USER_DATA_STORE /* * mach_voucher_attr_recipe_t @@ -161,11 +161,11 @@ typedef mach_voucher_attr_recipe_command_t *mach_voucher_attr_recipe_command_arr #pragma pack(1) typedef struct mach_voucher_attr_recipe_data { - mach_voucher_attr_key_t key; + mach_voucher_attr_key_t key; mach_voucher_attr_recipe_command_t command; mach_voucher_name_t previous_voucher; - mach_voucher_attr_content_size_t content_size; - uint8_t content[]; + mach_voucher_attr_content_size_t content_size; + uint8_t content[]; } mach_voucher_attr_recipe_data_t; typedef mach_voucher_attr_recipe_data_t *mach_voucher_attr_recipe_t; typedef mach_msg_type_number_t mach_voucher_attr_recipe_size_t; @@ -184,14 +184,14 @@ typedef mach_msg_type_number_t mach_voucher_attr_raw_recipe_array_size_t; /* * VOUCHER ATTRIBUTE MANAGER Writer types */ - + /* * mach_voucher_attr_manager_t * * A handle through which the mach voucher mechanism communicates with the voucher * attribute manager for a given attribute key. */ -typedef mach_port_t mach_voucher_attr_manager_t; +typedef mach_port_t mach_voucher_attr_manager_t; #define MACH_VOUCHER_ATTR_MANAGER_NULL ((mach_voucher_attr_manager_t) 0) /* @@ -200,7 +200,7 @@ typedef mach_port_t mach_voucher_attr_manager_t; * A handle provided to the voucher attribute manager for a given attribute key * through which it makes inquiries or control operations of the mach voucher mechanism. */ -typedef mach_port_t mach_voucher_attr_control_t; +typedef mach_port_t mach_voucher_attr_control_t; #define MACH_VOUCHER_ATTR_CONTROL_NULL ((mach_voucher_attr_control_t) 0) /* @@ -209,12 +209,12 @@ typedef mach_port_t mach_voucher_attr_control_t; * types in the Mach portion of the kernel. */ #if !defined(KERNEL) -typedef mach_port_t ipc_voucher_attr_manager_t; -typedef mach_port_t ipc_voucher_attr_control_t; +typedef mach_port_t ipc_voucher_attr_manager_t; +typedef mach_port_t ipc_voucher_attr_control_t; #else #if !defined(MACH_KERNEL_PRIVATE) -struct ipc_voucher_attr_manager ; -struct ipc_voucher_attr_control ; +struct ipc_voucher_attr_manager; +struct ipc_voucher_attr_control; #endif typedef struct ipc_voucher_attr_manager *ipc_voucher_attr_manager_t; typedef struct ipc_voucher_attr_control *ipc_voucher_attr_control_t; @@ -224,15 +224,15 @@ typedef struct ipc_voucher_attr_control *ipc_voucher_attr_control_t; /* * mach_voucher_attr_value_handle_t - * + * * The private handle that the voucher attribute manager provides to * the mach voucher mechanism to represent a given attr content/value. - */ + */ typedef uint64_t mach_voucher_attr_value_handle_t; typedef mach_voucher_attr_value_handle_t *mach_voucher_attr_value_handle_array_t; typedef mach_msg_type_number_t mach_voucher_attr_value_handle_array_size_t; -#define MACH_VOUCHER_ATTR_VALUE_MAX_NESTED ((mach_voucher_attr_value_handle_array_size_t)4) +#define MACH_VOUCHER_ATTR_VALUE_MAX_NESTED ((mach_voucher_attr_value_handle_array_size_t)4) typedef uint32_t mach_voucher_attr_value_reference_t; typedef uint32_t mach_voucher_attr_value_flags_t; @@ -241,12 +241,12 @@ typedef uint32_t mach_voucher_attr_value_flags_t; /* USE - TBD */ typedef uint32_t mach_voucher_attr_control_flags_t; -#define MACH_VOUCHER_ATTR_CONTROL_FLAGS_NONE ((mach_voucher_attr_control_flags_t)0) +#define MACH_VOUCHER_ATTR_CONTROL_FLAGS_NONE ((mach_voucher_attr_control_flags_t)0) /* * Commands and types for the IPC Importance Attribute Manager * - * These are the valid mach_voucher_attr_command() options with the + * These are the valid mach_voucher_attr_command() options with the * MACH_VOUCHER_ATTR_KEY_IMPORTANCE key. */ #define MACH_VOUCHER_IMPORTANCE_ATTR_ADD_EXTERNAL 1 /* Add some number of external refs (not supported) */ @@ -258,4 +258,4 @@ typedef uint32_t mach_voucher_attr_importance_refs; */ #define MACH_ACTIVITY_ID_COUNT_MAX 16 -#endif /* _MACH_VOUCHER_TYPES_H_ */ +#endif /* _MACH_VOUCHER_TYPES_H_ */ diff --git a/osfmk/mach/machine.h b/osfmk/mach/machine.h index b7a5d36b6..672bd17cb 100644 --- a/osfmk/mach/machine.h +++ b/osfmk/mach/machine.h @@ -3,7 +3,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +12,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,31 +23,31 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -58,7 +58,7 @@ * Machine independent machine abstraction. */ -#ifndef _MACH_MACHINE_H_ +#ifndef _MACH_MACHINE_H_ #define _MACH_MACHINE_H_ #ifndef __ASSEMBLER__ @@ -67,98 +67,98 @@ #include #include -typedef integer_t cpu_type_t; -typedef integer_t cpu_subtype_t; -typedef integer_t cpu_threadtype_t; +typedef integer_t cpu_type_t; +typedef integer_t cpu_subtype_t; +typedef integer_t cpu_threadtype_t; -#define CPU_STATE_MAX 4 +#define CPU_STATE_MAX 4 -#define CPU_STATE_USER 0 -#define CPU_STATE_SYSTEM 1 -#define CPU_STATE_IDLE 2 -#define CPU_STATE_NICE 3 +#define CPU_STATE_USER 0 +#define CPU_STATE_SYSTEM 1 +#define CPU_STATE_IDLE 2 +#define CPU_STATE_NICE 3 -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include __BEGIN_DECLS -cpu_type_t cpu_type(void); +cpu_type_t cpu_type(void); -cpu_subtype_t cpu_subtype(void); +cpu_subtype_t cpu_subtype(void); -cpu_threadtype_t cpu_threadtype(void); +cpu_threadtype_t cpu_threadtype(void); __END_DECLS -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE struct machine_info { - integer_t major_version; /* kernel major version id */ - integer_t minor_version; /* kernel minor version id */ - integer_t max_cpus; /* max number of CPUs possible */ - uint32_t memory_size; /* size of memory in bytes, capped at 2 GB */ - uint64_t max_mem; /* actual size of physical memory */ - uint32_t physical_cpu; /* number of physical CPUs now available */ - integer_t physical_cpu_max; /* max number of physical CPUs possible */ - uint32_t logical_cpu; /* number of logical cpu now available */ - integer_t logical_cpu_max; /* max number of physical CPUs possible */ + integer_t major_version; /* kernel major version id */ + integer_t minor_version; /* kernel minor version id */ + integer_t max_cpus; /* max number of CPUs possible */ + uint32_t memory_size; /* size of memory in bytes, capped at 2 GB */ + uint64_t max_mem; /* actual size of physical memory */ + uint32_t physical_cpu; /* number of physical CPUs now available */ + integer_t physical_cpu_max; /* max number of physical CPUs possible */ + uint32_t logical_cpu; /* number of logical cpu now available */ + integer_t logical_cpu_max; /* max number of physical CPUs possible */ }; -typedef struct machine_info *machine_info_t; -typedef struct machine_info machine_info_data_t; +typedef struct machine_info *machine_info_t; +typedef struct machine_info machine_info_data_t; -extern struct machine_info machine_info; +extern struct machine_info machine_info; __BEGIN_DECLS -cpu_type_t slot_type( - int slot_num); +cpu_type_t slot_type( + int slot_num); -cpu_subtype_t slot_subtype( - int slot_num); +cpu_subtype_t slot_subtype( + int slot_num); -cpu_threadtype_t slot_threadtype( - int slot_num); +cpu_threadtype_t slot_threadtype( + int slot_num); __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ -#endif /* KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ /* * Capability bits used in the definition of cpu_type. */ -#define CPU_ARCH_MASK 0xff000000 /* mask for architecture bits */ -#define CPU_ARCH_ABI64 0x01000000 /* 64 bit ABI */ +#define CPU_ARCH_MASK 0xff000000 /* mask for architecture bits */ +#define CPU_ARCH_ABI64 0x01000000 /* 64 bit ABI */ /* * Machine types known by all. */ - -#define CPU_TYPE_ANY ((cpu_type_t) -1) -#define CPU_TYPE_VAX ((cpu_type_t) 1) +#define CPU_TYPE_ANY ((cpu_type_t) -1) + +#define CPU_TYPE_VAX ((cpu_type_t) 1) /* skip ((cpu_type_t) 2) */ /* skip ((cpu_type_t) 3) */ /* skip ((cpu_type_t) 4) */ /* skip ((cpu_type_t) 5) */ -#define CPU_TYPE_MC680x0 ((cpu_type_t) 6) -#define CPU_TYPE_X86 ((cpu_type_t) 7) -#define CPU_TYPE_I386 CPU_TYPE_X86 /* compatibility */ -#define CPU_TYPE_X86_64 (CPU_TYPE_X86 | CPU_ARCH_ABI64) +#define CPU_TYPE_MC680x0 ((cpu_type_t) 6) +#define CPU_TYPE_X86 ((cpu_type_t) 7) +#define CPU_TYPE_I386 CPU_TYPE_X86 /* compatibility */ +#define CPU_TYPE_X86_64 (CPU_TYPE_X86 | CPU_ARCH_ABI64) /* skip CPU_TYPE_MIPS ((cpu_type_t) 8) */ -/* skip ((cpu_type_t) 9) */ -#define CPU_TYPE_MC98000 ((cpu_type_t) 10) +/* skip ((cpu_type_t) 9) */ +#define CPU_TYPE_MC98000 ((cpu_type_t) 10) #define CPU_TYPE_HPPA ((cpu_type_t) 11) -#define CPU_TYPE_ARM ((cpu_type_t) 12) -#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64) -#define CPU_TYPE_MC88000 ((cpu_type_t) 13) -#define CPU_TYPE_SPARC ((cpu_type_t) 14) -#define CPU_TYPE_I860 ((cpu_type_t) 15) +#define CPU_TYPE_ARM ((cpu_type_t) 12) +#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64) +#define CPU_TYPE_MC88000 ((cpu_type_t) 13) +#define CPU_TYPE_SPARC ((cpu_type_t) 14) +#define CPU_TYPE_I860 ((cpu_type_t) 15) /* skip CPU_TYPE_ALPHA ((cpu_type_t) 16) */ /* skip ((cpu_type_t) 17) */ -#define CPU_TYPE_POWERPC ((cpu_type_t) 18) -#define CPU_TYPE_POWERPC64 (CPU_TYPE_POWERPC | CPU_ARCH_ABI64) +#define CPU_TYPE_POWERPC ((cpu_type_t) 18) +#define CPU_TYPE_POWERPC64 (CPU_TYPE_POWERPC | CPU_ARCH_ABI64) /* * Machine subtypes (these are defined here, instead of in a machine @@ -169,8 +169,8 @@ __END_DECLS /* * Capability bits used in the definition of cpu_subtype. */ -#define CPU_SUBTYPE_MASK 0xff000000 /* mask for feature flags */ -#define CPU_SUBTYPE_LIB64 0x80000000 /* 64 bit libraries */ +#define CPU_SUBTYPE_MASK 0xff000000 /* mask for feature flags */ +#define CPU_SUBTYPE_LIB64 0x80000000 /* 64 bit libraries */ /* @@ -184,42 +184,42 @@ __END_DECLS * It is the responsibility of the implementor to make sure the * software handles unsupported implementations elegantly. */ -#define CPU_SUBTYPE_MULTIPLE ((cpu_subtype_t) -1) -#define CPU_SUBTYPE_LITTLE_ENDIAN ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_BIG_ENDIAN ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MULTIPLE ((cpu_subtype_t) -1) +#define CPU_SUBTYPE_LITTLE_ENDIAN ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_BIG_ENDIAN ((cpu_subtype_t) 1) /* * Machine threadtypes. * This is none - not defined - for most machine types/subtypes. */ -#define CPU_THREADTYPE_NONE ((cpu_threadtype_t) 0) +#define CPU_THREADTYPE_NONE ((cpu_threadtype_t) 0) /* * VAX subtypes (these do *not* necessary conform to the actual cpu * ID assigned by DEC available via the SID register). */ -#define CPU_SUBTYPE_VAX_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_VAX780 ((cpu_subtype_t) 1) -#define CPU_SUBTYPE_VAX785 ((cpu_subtype_t) 2) -#define CPU_SUBTYPE_VAX750 ((cpu_subtype_t) 3) -#define CPU_SUBTYPE_VAX730 ((cpu_subtype_t) 4) -#define CPU_SUBTYPE_UVAXI ((cpu_subtype_t) 5) -#define CPU_SUBTYPE_UVAXII ((cpu_subtype_t) 6) -#define CPU_SUBTYPE_VAX8200 ((cpu_subtype_t) 7) -#define CPU_SUBTYPE_VAX8500 ((cpu_subtype_t) 8) -#define CPU_SUBTYPE_VAX8600 ((cpu_subtype_t) 9) -#define CPU_SUBTYPE_VAX8650 ((cpu_subtype_t) 10) -#define CPU_SUBTYPE_VAX8800 ((cpu_subtype_t) 11) -#define CPU_SUBTYPE_UVAXIII ((cpu_subtype_t) 12) +#define CPU_SUBTYPE_VAX_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_VAX780 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_VAX785 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_VAX750 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_VAX730 ((cpu_subtype_t) 4) +#define CPU_SUBTYPE_UVAXI ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_UVAXII ((cpu_subtype_t) 6) +#define CPU_SUBTYPE_VAX8200 ((cpu_subtype_t) 7) +#define CPU_SUBTYPE_VAX8500 ((cpu_subtype_t) 8) +#define CPU_SUBTYPE_VAX8600 ((cpu_subtype_t) 9) +#define CPU_SUBTYPE_VAX8650 ((cpu_subtype_t) 10) +#define CPU_SUBTYPE_VAX8800 ((cpu_subtype_t) 11) +#define CPU_SUBTYPE_UVAXIII ((cpu_subtype_t) 12) /* - * 680x0 subtypes + * 680x0 subtypes * * The subtype definitions here are unusual for historical reasons. * NeXT used to consider 68030 code as generic 68000 code. For * backwards compatability: - * + * * CPU_SUBTYPE_MC68030 symbol has been preserved for source code * compatability. * @@ -230,119 +230,119 @@ __END_DECLS * files to be tagged as containing 68030-specific instructions. */ -#define CPU_SUBTYPE_MC680x0_ALL ((cpu_subtype_t) 1) -#define CPU_SUBTYPE_MC68030 ((cpu_subtype_t) 1) /* compat */ -#define CPU_SUBTYPE_MC68040 ((cpu_subtype_t) 2) -#define CPU_SUBTYPE_MC68030_ONLY ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_MC680x0_ALL ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MC68030 ((cpu_subtype_t) 1) /* compat */ +#define CPU_SUBTYPE_MC68040 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_MC68030_ONLY ((cpu_subtype_t) 3) /* * I386 subtypes */ -#define CPU_SUBTYPE_INTEL(f, m) ((cpu_subtype_t) (f) + ((m) << 4)) - -#define CPU_SUBTYPE_I386_ALL CPU_SUBTYPE_INTEL(3, 0) -#define CPU_SUBTYPE_386 CPU_SUBTYPE_INTEL(3, 0) -#define CPU_SUBTYPE_486 CPU_SUBTYPE_INTEL(4, 0) -#define CPU_SUBTYPE_486SX CPU_SUBTYPE_INTEL(4, 8) // 8 << 4 = 128 -#define CPU_SUBTYPE_586 CPU_SUBTYPE_INTEL(5, 0) -#define CPU_SUBTYPE_PENT CPU_SUBTYPE_INTEL(5, 0) -#define CPU_SUBTYPE_PENTPRO CPU_SUBTYPE_INTEL(6, 1) -#define CPU_SUBTYPE_PENTII_M3 CPU_SUBTYPE_INTEL(6, 3) -#define CPU_SUBTYPE_PENTII_M5 CPU_SUBTYPE_INTEL(6, 5) -#define CPU_SUBTYPE_CELERON CPU_SUBTYPE_INTEL(7, 6) -#define CPU_SUBTYPE_CELERON_MOBILE CPU_SUBTYPE_INTEL(7, 7) -#define CPU_SUBTYPE_PENTIUM_3 CPU_SUBTYPE_INTEL(8, 0) -#define CPU_SUBTYPE_PENTIUM_3_M CPU_SUBTYPE_INTEL(8, 1) -#define CPU_SUBTYPE_PENTIUM_3_XEON CPU_SUBTYPE_INTEL(8, 2) -#define CPU_SUBTYPE_PENTIUM_M CPU_SUBTYPE_INTEL(9, 0) -#define CPU_SUBTYPE_PENTIUM_4 CPU_SUBTYPE_INTEL(10, 0) -#define CPU_SUBTYPE_PENTIUM_4_M CPU_SUBTYPE_INTEL(10, 1) -#define CPU_SUBTYPE_ITANIUM CPU_SUBTYPE_INTEL(11, 0) -#define CPU_SUBTYPE_ITANIUM_2 CPU_SUBTYPE_INTEL(11, 1) -#define CPU_SUBTYPE_XEON CPU_SUBTYPE_INTEL(12, 0) -#define CPU_SUBTYPE_XEON_MP CPU_SUBTYPE_INTEL(12, 1) - -#define CPU_SUBTYPE_INTEL_FAMILY(x) ((x) & 15) -#define CPU_SUBTYPE_INTEL_FAMILY_MAX 15 - -#define CPU_SUBTYPE_INTEL_MODEL(x) ((x) >> 4) -#define CPU_SUBTYPE_INTEL_MODEL_ALL 0 +#define CPU_SUBTYPE_INTEL(f, m) ((cpu_subtype_t) (f) + ((m) << 4)) + +#define CPU_SUBTYPE_I386_ALL CPU_SUBTYPE_INTEL(3, 0) +#define CPU_SUBTYPE_386 CPU_SUBTYPE_INTEL(3, 0) +#define CPU_SUBTYPE_486 CPU_SUBTYPE_INTEL(4, 0) +#define CPU_SUBTYPE_486SX CPU_SUBTYPE_INTEL(4, 8) // 8 << 4 = 128 +#define CPU_SUBTYPE_586 CPU_SUBTYPE_INTEL(5, 0) +#define CPU_SUBTYPE_PENT CPU_SUBTYPE_INTEL(5, 0) +#define CPU_SUBTYPE_PENTPRO CPU_SUBTYPE_INTEL(6, 1) +#define CPU_SUBTYPE_PENTII_M3 CPU_SUBTYPE_INTEL(6, 3) +#define CPU_SUBTYPE_PENTII_M5 CPU_SUBTYPE_INTEL(6, 5) +#define CPU_SUBTYPE_CELERON CPU_SUBTYPE_INTEL(7, 6) +#define CPU_SUBTYPE_CELERON_MOBILE CPU_SUBTYPE_INTEL(7, 7) +#define CPU_SUBTYPE_PENTIUM_3 CPU_SUBTYPE_INTEL(8, 0) +#define CPU_SUBTYPE_PENTIUM_3_M CPU_SUBTYPE_INTEL(8, 1) +#define CPU_SUBTYPE_PENTIUM_3_XEON CPU_SUBTYPE_INTEL(8, 2) +#define CPU_SUBTYPE_PENTIUM_M CPU_SUBTYPE_INTEL(9, 0) +#define CPU_SUBTYPE_PENTIUM_4 CPU_SUBTYPE_INTEL(10, 0) +#define CPU_SUBTYPE_PENTIUM_4_M CPU_SUBTYPE_INTEL(10, 1) +#define CPU_SUBTYPE_ITANIUM CPU_SUBTYPE_INTEL(11, 0) +#define CPU_SUBTYPE_ITANIUM_2 CPU_SUBTYPE_INTEL(11, 1) +#define CPU_SUBTYPE_XEON CPU_SUBTYPE_INTEL(12, 0) +#define CPU_SUBTYPE_XEON_MP CPU_SUBTYPE_INTEL(12, 1) + +#define CPU_SUBTYPE_INTEL_FAMILY(x) ((x) & 15) +#define CPU_SUBTYPE_INTEL_FAMILY_MAX 15 + +#define CPU_SUBTYPE_INTEL_MODEL(x) ((x) >> 4) +#define CPU_SUBTYPE_INTEL_MODEL_ALL 0 /* * X86 subtypes. */ -#define CPU_SUBTYPE_X86_ALL ((cpu_subtype_t)3) -#define CPU_SUBTYPE_X86_64_ALL ((cpu_subtype_t)3) -#define CPU_SUBTYPE_X86_ARCH1 ((cpu_subtype_t)4) -#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell feature subset */ +#define CPU_SUBTYPE_X86_ALL ((cpu_subtype_t)3) +#define CPU_SUBTYPE_X86_64_ALL ((cpu_subtype_t)3) +#define CPU_SUBTYPE_X86_ARCH1 ((cpu_subtype_t)4) +#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell feature subset */ -#define CPU_THREADTYPE_INTEL_HTT ((cpu_threadtype_t) 1) +#define CPU_THREADTYPE_INTEL_HTT ((cpu_threadtype_t) 1) /* * Mips subtypes. */ -#define CPU_SUBTYPE_MIPS_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_MIPS_R2300 ((cpu_subtype_t) 1) -#define CPU_SUBTYPE_MIPS_R2600 ((cpu_subtype_t) 2) -#define CPU_SUBTYPE_MIPS_R2800 ((cpu_subtype_t) 3) -#define CPU_SUBTYPE_MIPS_R2000a ((cpu_subtype_t) 4) /* pmax */ -#define CPU_SUBTYPE_MIPS_R2000 ((cpu_subtype_t) 5) -#define CPU_SUBTYPE_MIPS_R3000a ((cpu_subtype_t) 6) /* 3max */ -#define CPU_SUBTYPE_MIPS_R3000 ((cpu_subtype_t) 7) +#define CPU_SUBTYPE_MIPS_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_MIPS_R2300 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MIPS_R2600 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_MIPS_R2800 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_MIPS_R2000a ((cpu_subtype_t) 4) /* pmax */ +#define CPU_SUBTYPE_MIPS_R2000 ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_MIPS_R3000a ((cpu_subtype_t) 6) /* 3max */ +#define CPU_SUBTYPE_MIPS_R3000 ((cpu_subtype_t) 7) /* * MC98000 (PowerPC) subtypes */ -#define CPU_SUBTYPE_MC98000_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_MC98601 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MC98000_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_MC98601 ((cpu_subtype_t) 1) /* * HPPA subtypes for Hewlett-Packard HP-PA family of - * risc processors. Port by NeXT to 700 series. + * risc processors. Port by NeXT to 700 series. */ -#define CPU_SUBTYPE_HPPA_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_HPPA_7100 ((cpu_subtype_t) 0) /* compat */ -#define CPU_SUBTYPE_HPPA_7100LC ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_HPPA_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_HPPA_7100 ((cpu_subtype_t) 0) /* compat */ +#define CPU_SUBTYPE_HPPA_7100LC ((cpu_subtype_t) 1) /* * MC88000 subtypes. */ -#define CPU_SUBTYPE_MC88000_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_MC88100 ((cpu_subtype_t) 1) -#define CPU_SUBTYPE_MC88110 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_MC88000_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_MC88100 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_MC88110 ((cpu_subtype_t) 2) /* * SPARC subtypes */ -#define CPU_SUBTYPE_SPARC_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_SPARC_ALL ((cpu_subtype_t) 0) /* * I860 subtypes */ -#define CPU_SUBTYPE_I860_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_I860_860 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_I860_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_I860_860 ((cpu_subtype_t) 1) /* * PowerPC subtypes */ -#define CPU_SUBTYPE_POWERPC_ALL ((cpu_subtype_t) 0) -#define CPU_SUBTYPE_POWERPC_601 ((cpu_subtype_t) 1) -#define CPU_SUBTYPE_POWERPC_602 ((cpu_subtype_t) 2) -#define CPU_SUBTYPE_POWERPC_603 ((cpu_subtype_t) 3) -#define CPU_SUBTYPE_POWERPC_603e ((cpu_subtype_t) 4) -#define CPU_SUBTYPE_POWERPC_603ev ((cpu_subtype_t) 5) -#define CPU_SUBTYPE_POWERPC_604 ((cpu_subtype_t) 6) -#define CPU_SUBTYPE_POWERPC_604e ((cpu_subtype_t) 7) -#define CPU_SUBTYPE_POWERPC_620 ((cpu_subtype_t) 8) -#define CPU_SUBTYPE_POWERPC_750 ((cpu_subtype_t) 9) -#define CPU_SUBTYPE_POWERPC_7400 ((cpu_subtype_t) 10) -#define CPU_SUBTYPE_POWERPC_7450 ((cpu_subtype_t) 11) -#define CPU_SUBTYPE_POWERPC_970 ((cpu_subtype_t) 100) +#define CPU_SUBTYPE_POWERPC_ALL ((cpu_subtype_t) 0) +#define CPU_SUBTYPE_POWERPC_601 ((cpu_subtype_t) 1) +#define CPU_SUBTYPE_POWERPC_602 ((cpu_subtype_t) 2) +#define CPU_SUBTYPE_POWERPC_603 ((cpu_subtype_t) 3) +#define CPU_SUBTYPE_POWERPC_603e ((cpu_subtype_t) 4) +#define CPU_SUBTYPE_POWERPC_603ev ((cpu_subtype_t) 5) +#define CPU_SUBTYPE_POWERPC_604 ((cpu_subtype_t) 6) +#define CPU_SUBTYPE_POWERPC_604e ((cpu_subtype_t) 7) +#define CPU_SUBTYPE_POWERPC_620 ((cpu_subtype_t) 8) +#define CPU_SUBTYPE_POWERPC_750 ((cpu_subtype_t) 9) +#define CPU_SUBTYPE_POWERPC_7400 ((cpu_subtype_t) 10) +#define CPU_SUBTYPE_POWERPC_7450 ((cpu_subtype_t) 11) +#define CPU_SUBTYPE_POWERPC_970 ((cpu_subtype_t) 100) /* * ARM subtypes @@ -351,16 +351,16 @@ __END_DECLS #define CPU_SUBTYPE_ARM_V4T ((cpu_subtype_t) 5) #define CPU_SUBTYPE_ARM_V6 ((cpu_subtype_t) 6) #define CPU_SUBTYPE_ARM_V5TEJ ((cpu_subtype_t) 7) -#define CPU_SUBTYPE_ARM_XSCALE ((cpu_subtype_t) 8) -#define CPU_SUBTYPE_ARM_V7 ((cpu_subtype_t) 9) -#define CPU_SUBTYPE_ARM_V7F ((cpu_subtype_t) 10) /* Cortex A9 */ -#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t) 11) /* Swift */ -#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t) 12) -#define CPU_SUBTYPE_ARM_V6M ((cpu_subtype_t) 14) /* Not meant to be run under xnu */ -#define CPU_SUBTYPE_ARM_V7M ((cpu_subtype_t) 15) /* Not meant to be run under xnu */ -#define CPU_SUBTYPE_ARM_V7EM ((cpu_subtype_t) 16) /* Not meant to be run under xnu */ +#define CPU_SUBTYPE_ARM_XSCALE ((cpu_subtype_t) 8) +#define CPU_SUBTYPE_ARM_V7 ((cpu_subtype_t) 9) +#define CPU_SUBTYPE_ARM_V7F ((cpu_subtype_t) 10) /* Cortex A9 */ +#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t) 11) /* Swift */ +#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t) 12) +#define CPU_SUBTYPE_ARM_V6M ((cpu_subtype_t) 14) /* Not meant to be run under xnu */ +#define CPU_SUBTYPE_ARM_V7M ((cpu_subtype_t) 15) /* Not meant to be run under xnu */ +#define CPU_SUBTYPE_ARM_V7EM ((cpu_subtype_t) 16) /* Not meant to be run under xnu */ -#define CPU_SUBTYPE_ARM_V8 ((cpu_subtype_t) 13) +#define CPU_SUBTYPE_ARM_V8 ((cpu_subtype_t) 13) /* * ARM64 subtypes @@ -382,37 +382,37 @@ __END_DECLS * Use feature flags (eg, hw.optional.altivec) to test for optional * functionality. */ -#define CPUFAMILY_UNKNOWN 0 -#define CPUFAMILY_POWERPC_G3 0xcee41549 -#define CPUFAMILY_POWERPC_G4 0x77c184ae -#define CPUFAMILY_POWERPC_G5 0xed76d8aa -#define CPUFAMILY_INTEL_6_13 0xaa33392b -#define CPUFAMILY_INTEL_PENRYN 0x78ea4fbc -#define CPUFAMILY_INTEL_NEHALEM 0x6b5a4cd2 -#define CPUFAMILY_INTEL_WESTMERE 0x573b5eec -#define CPUFAMILY_INTEL_SANDYBRIDGE 0x5490b78c -#define CPUFAMILY_INTEL_IVYBRIDGE 0x1f65e835 -#define CPUFAMILY_INTEL_HASWELL 0x10b282dc -#define CPUFAMILY_INTEL_BROADWELL 0x582ed09c -#define CPUFAMILY_INTEL_SKYLAKE 0x37fc219f -#define CPUFAMILY_INTEL_KABYLAKE 0x0f817246 -#define CPUFAMILY_ARM_9 0xe73283ae -#define CPUFAMILY_ARM_11 0x8ff620d8 -#define CPUFAMILY_ARM_XSCALE 0x53b005f5 +#define CPUFAMILY_UNKNOWN 0 +#define CPUFAMILY_POWERPC_G3 0xcee41549 +#define CPUFAMILY_POWERPC_G4 0x77c184ae +#define CPUFAMILY_POWERPC_G5 0xed76d8aa +#define CPUFAMILY_INTEL_6_13 0xaa33392b +#define CPUFAMILY_INTEL_PENRYN 0x78ea4fbc +#define CPUFAMILY_INTEL_NEHALEM 0x6b5a4cd2 +#define CPUFAMILY_INTEL_WESTMERE 0x573b5eec +#define CPUFAMILY_INTEL_SANDYBRIDGE 0x5490b78c +#define CPUFAMILY_INTEL_IVYBRIDGE 0x1f65e835 +#define CPUFAMILY_INTEL_HASWELL 0x10b282dc +#define CPUFAMILY_INTEL_BROADWELL 0x582ed09c +#define CPUFAMILY_INTEL_SKYLAKE 0x37fc219f +#define CPUFAMILY_INTEL_KABYLAKE 0x0f817246 +#define CPUFAMILY_ARM_9 0xe73283ae +#define CPUFAMILY_ARM_11 0x8ff620d8 +#define CPUFAMILY_ARM_XSCALE 0x53b005f5 #define CPUFAMILY_ARM_12 0xbd1b0ae9 -#define CPUFAMILY_ARM_13 0x0cc90e64 -#define CPUFAMILY_ARM_14 0x96077ef1 -#define CPUFAMILY_ARM_15 0xa8511bca -#define CPUFAMILY_ARM_SWIFT 0x1e2d6381 -#define CPUFAMILY_ARM_CYCLONE 0x37a09642 -#define CPUFAMILY_ARM_TYPHOON 0x2c91a47e -#define CPUFAMILY_ARM_TWISTER 0x92fb37c8 -#define CPUFAMILY_ARM_HURRICANE 0x67ceee93 -#define CPUFAMILY_ARM_MONSOON_MISTRAL 0xe81e7ef6 +#define CPUFAMILY_ARM_13 0x0cc90e64 +#define CPUFAMILY_ARM_14 0x96077ef1 +#define CPUFAMILY_ARM_15 0xa8511bca +#define CPUFAMILY_ARM_SWIFT 0x1e2d6381 +#define CPUFAMILY_ARM_CYCLONE 0x37a09642 +#define CPUFAMILY_ARM_TYPHOON 0x2c91a47e +#define CPUFAMILY_ARM_TWISTER 0x92fb37c8 +#define CPUFAMILY_ARM_HURRICANE 0x67ceee93 +#define CPUFAMILY_ARM_MONSOON_MISTRAL 0xe81e7ef6 /* The following synonyms are deprecated: */ -#define CPUFAMILY_INTEL_6_23 CPUFAMILY_INTEL_PENRYN -#define CPUFAMILY_INTEL_6_26 CPUFAMILY_INTEL_NEHALEM +#define CPUFAMILY_INTEL_6_23 CPUFAMILY_INTEL_PENRYN +#define CPUFAMILY_INTEL_6_26 CPUFAMILY_INTEL_NEHALEM -#endif /* _MACH_MACHINE_H_ */ +#endif /* _MACH_MACHINE_H_ */ diff --git a/osfmk/mach/machine/_structs.h b/osfmk/mach/machine/_structs.h index 89aa41f91..858368fc5 100644 --- a/osfmk/mach/machine/_structs.h +++ b/osfmk/mach/machine/_structs.h @@ -2,7 +2,7 @@ * Copyright (c) 2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/boolean.h b/osfmk/mach/machine/boolean.h index 7df6d4f68..dcc5d133f 100644 --- a/osfmk/mach/machine/boolean.h +++ b/osfmk/mach/machine/boolean.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/exception.h b/osfmk/mach/machine/exception.h index c0e76acfe..b98d4b0ef 100644 --- a/osfmk/mach/machine/exception.h +++ b/osfmk/mach/machine/exception.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/kern_return.h b/osfmk/mach/machine/kern_return.h index a948cf4b4..adae6e8bc 100644 --- a/osfmk/mach/machine/kern_return.h +++ b/osfmk/mach/machine/kern_return.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/ndr_def.h b/osfmk/mach/machine/ndr_def.h index 15c17d31a..7d38c0b1c 100644 --- a/osfmk/mach/machine/ndr_def.h +++ b/osfmk/mach/machine/ndr_def.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/processor_info.h b/osfmk/mach/machine/processor_info.h index 237fad686..8150d716a 100644 --- a/osfmk/mach/machine/processor_info.h +++ b/osfmk/mach/machine/processor_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/rpc.h b/osfmk/mach/machine/rpc.h index fc4ccbad9..37f902472 100644 --- a/osfmk/mach/machine/rpc.h +++ b/osfmk/mach/machine/rpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,4 +37,4 @@ #error architecture not supported #endif -#endif /* _MACH_MACHINE_RPC_H_ */ +#endif /* _MACH_MACHINE_RPC_H_ */ diff --git a/osfmk/mach/machine/sdt.h b/osfmk/mach/machine/sdt.h index 599d6b944..cceb7b419 100644 --- a/osfmk/mach/machine/sdt.h +++ b/osfmk/mach/machine/sdt.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007 Apple Inc. All rights reserved. + * Copyright (c) 2007-2019 Apple Inc. All rights reserved. */ /* * CDDL HEADER START @@ -28,7 +28,7 @@ */ #ifndef _MACH_MACHINE_SYS_SDT_H -#define _MACH_MACHINE_SYS_SDT_H +#define _MACH_MACHINE_SYS_SDT_H #include @@ -42,113 +42,113 @@ * types is undefined. */ -#define DTRACE_PROBE(provider, name) { \ - DTRACE_CALL0ARGS(provider, name) \ +#define DTRACE_PROBE(provider, name) { \ + DTRACE_CALL0ARGS(provider, name) \ } -#define DTRACE_PROBE1(provider, name, arg0) { \ - uintptr_t __dtrace_args[ARG1_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - DTRACE_CALL1ARG(provider, name) \ +#define DTRACE_PROBE1(provider, name, arg0) { \ + uintptr_t __dtrace_args[ARG1_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + DTRACE_CALL1ARG(provider, name) \ } -#define DTRACE_PROBE2(provider, name, arg0, arg1) { \ - uintptr_t __dtrace_args[ARGS2_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - DTRACE_CALL2ARGS(provider, name) \ +#define DTRACE_PROBE2(provider, name, arg0, arg1) { \ + uintptr_t __dtrace_args[ARGS2_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + DTRACE_CALL2ARGS(provider, name) \ } -#define DTRACE_PROBE3(provider, name, arg0, arg1, arg2) { \ - uintptr_t __dtrace_args[ARGS3_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - DTRACE_CALL3ARGS(provider, name) \ +#define DTRACE_PROBE3(provider, name, arg0, arg1, arg2) { \ + uintptr_t __dtrace_args[ARGS3_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + DTRACE_CALL3ARGS(provider, name) \ } -#define DTRACE_PROBE4(provider, name, arg0, arg1, arg2, arg3) { \ - uintptr_t __dtrace_args[ARGS4_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - DTRACE_CALL4ARGS(provider, name) \ +#define DTRACE_PROBE4(provider, name, arg0, arg1, arg2, arg3) { \ + uintptr_t __dtrace_args[ARGS4_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + DTRACE_CALL4ARGS(provider, name) \ } -#define DTRACE_PROBE5(provider, name, arg0, arg1, arg2, arg3, arg4) { \ - uintptr_t __dtrace_args[ARGS5_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - __dtrace_args[4] = (uintptr_t)arg4; \ - DTRACE_CALL5ARGS(provider, name) \ +#define DTRACE_PROBE5(provider, name, arg0, arg1, arg2, arg3, arg4) { \ + uintptr_t __dtrace_args[ARGS5_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + __dtrace_args[4] = (uintptr_t)arg4; \ + DTRACE_CALL5ARGS(provider, name) \ } -#define DTRACE_PROBE6(provider, name, arg0, arg1, arg2, arg3, arg4, arg5) { \ - uintptr_t __dtrace_args[ARGS6_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - __dtrace_args[4] = (uintptr_t)arg4; \ - __dtrace_args[5] = (uintptr_t)arg5; \ - DTRACE_CALL6ARGS(provider, name) \ +#define DTRACE_PROBE6(provider, name, arg0, arg1, arg2, arg3, arg4, arg5) { \ + uintptr_t __dtrace_args[ARGS6_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + __dtrace_args[4] = (uintptr_t)arg4; \ + __dtrace_args[5] = (uintptr_t)arg5; \ + DTRACE_CALL6ARGS(provider, name) \ } -#define DTRACE_PROBE7(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6) { \ - uintptr_t __dtrace_args[ARGS7_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - __dtrace_args[4] = (uintptr_t)arg4; \ - __dtrace_args[5] = (uintptr_t)arg5; \ - __dtrace_args[6] = (uintptr_t)arg6; \ - DTRACE_CALL7ARGS(provider, name) \ +#define DTRACE_PROBE7(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6) { \ + uintptr_t __dtrace_args[ARGS7_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + __dtrace_args[4] = (uintptr_t)arg4; \ + __dtrace_args[5] = (uintptr_t)arg5; \ + __dtrace_args[6] = (uintptr_t)arg6; \ + DTRACE_CALL7ARGS(provider, name) \ } -#define DTRACE_PROBE8(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) { \ - uintptr_t __dtrace_args[ARGS8_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - __dtrace_args[4] = (uintptr_t)arg4; \ - __dtrace_args[5] = (uintptr_t)arg5; \ - __dtrace_args[6] = (uintptr_t)arg6; \ - __dtrace_args[7] = (uintptr_t)arg7; \ - DTRACE_CALL8ARGS(provider, name) \ +#define DTRACE_PROBE8(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) { \ + uintptr_t __dtrace_args[ARGS8_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + __dtrace_args[4] = (uintptr_t)arg4; \ + __dtrace_args[5] = (uintptr_t)arg5; \ + __dtrace_args[6] = (uintptr_t)arg6; \ + __dtrace_args[7] = (uintptr_t)arg7; \ + DTRACE_CALL8ARGS(provider, name) \ } -#define DTRACE_PROBE9(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) { \ - uintptr_t __dtrace_args[ARGS9_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - __dtrace_args[4] = (uintptr_t)arg4; \ - __dtrace_args[5] = (uintptr_t)arg5; \ - __dtrace_args[6] = (uintptr_t)arg6; \ - __dtrace_args[7] = (uintptr_t)arg7; \ - __dtrace_args[8] = (uintptr_t)arg8; \ - DTRACE_CALL9ARGS(provider, name) \ +#define DTRACE_PROBE9(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) { \ + uintptr_t __dtrace_args[ARGS9_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + __dtrace_args[4] = (uintptr_t)arg4; \ + __dtrace_args[5] = (uintptr_t)arg5; \ + __dtrace_args[6] = (uintptr_t)arg6; \ + __dtrace_args[7] = (uintptr_t)arg7; \ + __dtrace_args[8] = (uintptr_t)arg8; \ + DTRACE_CALL9ARGS(provider, name) \ } -#define DTRACE_PROBE10(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) { \ - uintptr_t __dtrace_args[ARGS10_EXTENT] __attribute__ ((aligned (16))); \ - __dtrace_args[0] = (uintptr_t)arg0; \ - __dtrace_args[1] = (uintptr_t)arg1; \ - __dtrace_args[2] = (uintptr_t)arg2; \ - __dtrace_args[3] = (uintptr_t)arg3; \ - __dtrace_args[4] = (uintptr_t)arg4; \ - __dtrace_args[5] = (uintptr_t)arg5; \ - __dtrace_args[6] = (uintptr_t)arg6; \ - __dtrace_args[7] = (uintptr_t)arg7; \ - __dtrace_args[8] = (uintptr_t)arg8; \ - __dtrace_args[9] = (uintptr_t)arg9; \ - DTRACE_CALL10ARGS(provider, name) \ +#define DTRACE_PROBE10(provider, name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) { \ + uintptr_t __dtrace_args[ARGS10_EXTENT] __attribute__ ((aligned (16))); \ + __dtrace_args[0] = (uintptr_t)arg0; \ + __dtrace_args[1] = (uintptr_t)arg1; \ + __dtrace_args[2] = (uintptr_t)arg2; \ + __dtrace_args[3] = (uintptr_t)arg3; \ + __dtrace_args[4] = (uintptr_t)arg4; \ + __dtrace_args[5] = (uintptr_t)arg5; \ + __dtrace_args[6] = (uintptr_t)arg6; \ + __dtrace_args[7] = (uintptr_t)arg7; \ + __dtrace_args[8] = (uintptr_t)arg8; \ + __dtrace_args[9] = (uintptr_t)arg9; \ + DTRACE_CALL10ARGS(provider, name) \ } #else @@ -166,216 +166,222 @@ #endif /* CONFIG_DTRACE */ -#define DTRACE_SCHED(name) \ +#define DTRACE_SCHED(name) \ DTRACE_PROBE(__sched_, name); -#define DTRACE_SCHED1(name, type1, arg1) \ +#define DTRACE_SCHED1(name, type1, arg1) \ DTRACE_PROBE1(__sched_, name, arg1); -#define DTRACE_SCHED2(name, type1, arg1, type2, arg2) \ +#define DTRACE_SCHED2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__sched_, name, arg1, arg2); -#define DTRACE_SCHED3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_SCHED3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__sched_, name, arg1, arg2, arg3); -#define DTRACE_SCHED4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_SCHED4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__sched_, name, arg1, arg2, arg3, arg4); -#define DTRACE_PROC(name) \ +#define DTRACE_PROC(name) \ DTRACE_PROBE(__proc_, name); -#define DTRACE_PROC1(name, type1, arg1) \ +#define DTRACE_PROC1(name, type1, arg1) \ DTRACE_PROBE1(__proc_, name, arg1); -#define DTRACE_PROC2(name, type1, arg1, type2, arg2) \ +#define DTRACE_PROC2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__proc_, name, arg1, arg2); -#define DTRACE_PROC3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_PROC3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__proc_, name, arg1, arg2, arg3); -#define DTRACE_PROC4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_PROC4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__proc_, name, arg1, arg2, arg3, arg4); -#define DTRACE_IO(name) \ +#define DTRACE_IO(name) \ DTRACE_PROBE(__io_, name); -#define DTRACE_IO1(name, type1, arg1) \ +#define DTRACE_IO1(name, type1, arg1) \ DTRACE_PROBE1(__io_, name, arg1); -#define DTRACE_IO2(name, type1, arg1, type2, arg2) \ +#define DTRACE_IO2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__io_, name, arg1, arg2); -#define DTRACE_IO3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_IO3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__io_, name, arg1, arg2, arg3); -#define DTRACE_IO4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_IO4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__io_, name, arg1, arg2, arg3, arg4); -#define DTRACE_INT5(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4, type5, arg5) \ +#define DTRACE_INT5(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5) \ DTRACE_PROBE5(__sdt_, name, arg1, arg2, arg3, arg4, arg5); -#define DTRACE_MEMORYSTATUS2(name, type1, arg1, type2, arg2) \ +#define DTRACE_MEMORYSTATUS2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__sdt_, name, arg1, arg2); -#define DTRACE_MEMORYSTATUS3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_MEMORYSTATUS3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__sdt_, name, arg1, arg2, arg3); -#define DTRACE_MEMORYSTATUS6(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4, type5, arg5, type6, arg6) \ +#define DTRACE_MEMORYSTATUS6(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5, type6, arg6) \ DTRACE_PROBE6(__vminfo_, name, arg1, arg2, arg3, arg4, arg5, arg6) -#define DTRACE_TMR3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_TMR3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__sdt_, name, arg1, arg2, arg3); -#define DTRACE_TMR4(name, type1, arg1, arg2, arg3, arg4) \ +#define DTRACE_TMR4(name, type1, arg1, arg2, arg3, arg4) \ DTRACE_PROBE4(__sdt_, name, arg1, arg2, arg3, arg4); -#define DTRACE_TMR5(name, type1, arg1, type2, arg2, type3, arg3, arg4, arg5) \ +#define DTRACE_TMR5(name, type1, arg1, type2, arg2, type3, arg3, arg4, arg5) \ DTRACE_PROBE5(__sdt_, name, arg1, arg2, arg3, arg4, arg5); -#define DTRACE_TMR6(name, type1, arg1, type2, arg2, type3, arg3, arg4, arg5, arg6) \ +#define DTRACE_TMR6(name, type1, arg1, type2, arg2, type3, arg3, arg4, arg5, arg6) \ DTRACE_PROBE6(__sdt_, name, arg1, arg2, arg3, arg4, arg5, arg6); -#define DTRACE_TMR7(name, type1, arg1, type2, arg2, type3, arg3, arg4, arg5, arg6, arg7) \ +#define DTRACE_TMR7(name, type1, arg1, type2, arg2, type3, arg3, arg4, arg5, arg6, arg7) \ DTRACE_PROBE7(__sdt_, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7); -#define DTRACE_PHYSLAT3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_PHYSLAT3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__sdt_, name, arg1, arg2, arg3); -#define DTRACE_VM(name) \ +#define DTRACE_PHYSLAT4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \ + DTRACE_PROBE4(__sdt_, name, arg1, arg2, arg3, arg4); + +#define DTRACE_PHYSLAT5(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4, type5, arg5) \ + DTRACE_PROBE5(__sdt_, name, arg1, arg2, arg3, arg4, arg5); + +#define DTRACE_VM(name) \ DTRACE_PROBE(__vminfo_, name) -#define DTRACE_VM1(name, type1, arg1) \ +#define DTRACE_VM1(name, type1, arg1) \ DTRACE_PROBE1(__vminfo_, name, arg1) -#define DTRACE_VM2(name, type1, arg1, type2, arg2) \ +#define DTRACE_VM2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__vminfo_, name, arg1, arg2) -#define DTRACE_VM3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_VM3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__vminfo_, name, arg1, arg2, arg3) -#define DTRACE_VM4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_VM4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__vminfo_, name, arg1, arg2, arg3, arg4) -#define DTRACE_VM5(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4, type5, arg5) \ +#define DTRACE_VM5(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5) \ DTRACE_PROBE5(__vminfo_, name, arg1, arg2, arg3, arg4, arg5) -#define DTRACE_VM6(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4, type5, arg5, type6, arg6) \ +#define DTRACE_VM6(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5, type6, arg6) \ DTRACE_PROBE6(__vminfo_, name, arg1, arg2, arg3, arg4, arg5, arg6) -#define DTRACE_IP(name) \ +#define DTRACE_IP(name) \ DTRACE_PROBE(__ip_, name) -#define DTRACE_IP1(name, type1, arg1) \ +#define DTRACE_IP1(name, type1, arg1) \ DTRACE_PROBE1(__ip_, name, arg1) -#define DTRACE_IP2(name, type1, arg1, type2, arg2) \ +#define DTRACE_IP2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__ip_, name, arg1, arg2) -#define DTRACE_IP3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_IP3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__ip_, name, arg1, arg2, arg3) -#define DTRACE_IP4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_IP4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__ip_, name, arg1, arg2, arg3, arg4) -#define DTRACE_IP5(name, typ1, arg1, type2, arg2, type3, arg3, \ - type4, arg4, type5, arg5) \ +#define DTRACE_IP5(name, typ1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5) \ DTRACE_PROBE5(__ip_, name, arg1, arg2, arg3, arg4, arg5) -#define DTRACE_IP6(name, type1, arg1, type2, arg2, type3, arg3, \ - type4,arg4, type5, arg5, type6, arg6) \ +#define DTRACE_IP6(name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6) \ DTRACE_PROBE6(__ip_, name, arg1, arg2, arg3, arg4, arg5, arg6) -#define DTRACE_IP7(name, type1, arg1, type2, arg2, type3, arg3, \ - type4, arg4, type5, arg5, type6, arg6, type7, arg7) \ +#define DTRACE_IP7(name, type1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6, type7, arg7) \ DTRACE_PROBE7(__ip_, name, arg1, arg2, arg3, arg4, arg5, arg6, arg7) #define DTRACE_TCP(name) \ - DTRACE_PROBE(__tcp_, name) + DTRACE_PROBE(__tcp_, name) #define DTRACE_TCP1(name, type1, arg1) \ - DTRACE_PROBE1(__tcp_, name, arg1) + DTRACE_PROBE1(__tcp_, name, arg1) #define DTRACE_TCP2(name, type1, arg1, type2, arg2) \ - DTRACE_PROBE2(__tcp_, name, arg1, arg2) + DTRACE_PROBE2(__tcp_, name, arg1, arg2) #define DTRACE_TCP3(name, type1, arg1, type2, arg2, type3, arg3) \ - DTRACE_PROBE3(__tcp_, name, arg1, arg2, arg3) + DTRACE_PROBE3(__tcp_, name, arg1, arg2, arg3) #define DTRACE_TCP4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ - DTRACE_PROBE4(__tcp_, name, arg1, arg2, arg3, arg4) + type3, arg3, type4, arg4) \ + DTRACE_PROBE4(__tcp_, name, arg1, arg2, arg3, arg4) #define DTRACE_TCP5(name, typ1, arg1, type2, arg2, type3, arg3, \ - type4, arg4, type5, arg5) \ - DTRACE_PROBE5(__tcp_, name, arg1, arg2, arg3, arg4, arg5) + type4, arg4, type5, arg5) \ + DTRACE_PROBE5(__tcp_, name, arg1, arg2, arg3, arg4, arg5) -#define DTRACE_MPTCP(name) \ +#define DTRACE_MPTCP(name) \ DTRACE_PROBE(__mptcp_, name) -#define DTRACE_MPTCP1(name, type1, arg1) \ +#define DTRACE_MPTCP1(name, type1, arg1) \ DTRACE_PROBE1(__mptcp_, name, arg1) -#define DTRACE_MPTCP2(name, type1, arg1, type2, arg2) \ +#define DTRACE_MPTCP2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__mptcp_, name, arg1, arg2) -#define DTRACE_MPTCP3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_MPTCP3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__mptcp_, name, arg1, arg2, arg3) -#define DTRACE_MPTCP4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_MPTCP4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__mptcp_, name, arg1, arg2, arg3, arg4) -#define DTRACE_MPTCP5(name, typ1, arg1, type2, arg2, type3, arg3, \ - type4, arg4, type5, arg5) \ +#define DTRACE_MPTCP5(name, typ1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5) \ DTRACE_PROBE5(__mptcp_, name, arg1, arg2, arg3, arg4, arg5) -#define DTRACE_MPTCP6(name, typ1, arg1, type2, arg2, type3, arg3, \ - type4, arg4, type5, arg5, type6, arg6) \ +#define DTRACE_MPTCP6(name, typ1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6) \ DTRACE_PROBE6(__mptcp_, name, arg1, arg2, arg3, arg4, arg5, arg6) -#define DTRACE_MPTCP7(name, typ1, arg1, type2, arg2, type3, arg3, \ - type4, arg4, type5, arg5, type6, arg6, \ - type7, arg7) \ - DTRACE_PROBE7(__mptcp_, name, arg1, arg2, arg3, arg4, arg5, \ - arg6, arg7) +#define DTRACE_MPTCP7(name, typ1, arg1, type2, arg2, type3, arg3, \ + type4, arg4, type5, arg5, type6, arg6, \ + type7, arg7) \ + DTRACE_PROBE7(__mptcp_, name, arg1, arg2, arg3, arg4, arg5, \ + arg6, arg7) -#define DTRACE_FSINFO(name, type, vp) \ +#define DTRACE_FSINFO(name, type, vp) \ DTRACE_PROBE1(__fsinfo_, name, vp) -#define DTRACE_FSINFO_IO(name, type1, vp, type2, size) \ +#define DTRACE_FSINFO_IO(name, type1, vp, type2, size) \ DTRACE_PROBE2(__fsinfo_, name, vp, size) -#define DTRACE_BOOST(name) \ +#define DTRACE_BOOST(name) \ DTRACE_PROBE(__boost_, name); -#define DTRACE_BOOST1(name, type1, arg1) \ +#define DTRACE_BOOST1(name, type1, arg1) \ DTRACE_PROBE1(__boost_, name, arg1); -#define DTRACE_BOOST2(name, type1, arg1, type2, arg2) \ +#define DTRACE_BOOST2(name, type1, arg1, type2, arg2) \ DTRACE_PROBE2(__boost_, name, arg1, arg2); -#define DTRACE_BOOST3(name, type1, arg1, type2, arg2, type3, arg3) \ +#define DTRACE_BOOST3(name, type1, arg1, type2, arg2, type3, arg3) \ DTRACE_PROBE3(__boost_, name, arg1, arg2, arg3); -#define DTRACE_BOOST4(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4) \ +#define DTRACE_BOOST4(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4) \ DTRACE_PROBE4(__boost_, name, arg1, arg2, arg3, arg4); -#define DTRACE_BOOST5(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4, type5, arg5) \ +#define DTRACE_BOOST5(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5) \ DTRACE_PROBE5(__boost_, name, arg1, arg2, arg3, arg4, arg5); -#define DTRACE_BOOST6(name, type1, arg1, type2, arg2, \ - type3, arg3, type4, arg4, type5, arg5, type6, arg6) \ +#define DTRACE_BOOST6(name, type1, arg1, type2, arg2, \ + type3, arg3, type4, arg4, type5, arg5, type6, arg6) \ DTRACE_PROBE6(__boost_, name, arg1, arg2, arg3, arg4, arg5, arg6); #if PRIVATE @@ -383,4 +389,4 @@ #endif /* KERNEL */ -#endif /* _MACH_MACHINE_SYS_SDT_H */ +#endif /* _MACH_MACHINE_SYS_SDT_H */ diff --git a/osfmk/mach/machine/thread_state.h b/osfmk/mach/machine/thread_state.h index aca71548f..06f38104c 100644 --- a/osfmk/mach/machine/thread_state.h +++ b/osfmk/mach/machine/thread_state.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/thread_status.h b/osfmk/mach/machine/thread_status.h index 9ce08e045..3e319e8ce 100644 --- a/osfmk/mach/machine/thread_status.h +++ b/osfmk/mach/machine/thread_status.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/vm_param.h b/osfmk/mach/machine/vm_param.h index 30f94df85..dcc0ec539 100644 --- a/osfmk/mach/machine/vm_param.h +++ b/osfmk/mach/machine/vm_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/machine/vm_types.h b/osfmk/mach/machine/vm_types.h index 68be87969..7f1605a27 100644 --- a/osfmk/mach/machine/vm_types.h +++ b/osfmk/mach/machine/vm_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/mach/memory_object.h b/osfmk/mach/memory_object.h index 68223b8fa..04f4d5c1d 100644 --- a/osfmk/mach/memory_object.h +++ b/osfmk/mach/memory_object.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,7 +62,7 @@ * External memory management interface definition. */ -#ifndef _MACH_MEMORY_OBJECT_H_ +#ifndef _MACH_MEMORY_OBJECT_H_ #define _MACH_MEMORY_OBJECT_H_ /* @@ -74,122 +74,122 @@ #include #include -typedef mach_port_t memory_object_t; - /* A memory object ... */ - /* Used by the kernel to retrieve */ - /* or store data */ +typedef mach_port_t memory_object_t; +/* A memory object ... */ +/* Used by the kernel to retrieve */ +/* or store data */ -typedef mach_port_t memory_object_control_t; - /* Provided to a memory manager; ... */ - /* used to control a memory object */ +typedef mach_port_t memory_object_control_t; +/* Provided to a memory manager; ... */ +/* used to control a memory object */ -typedef mach_port_t memory_object_name_t; - /* Used to describe the memory ... */ - /* object in vm_regions() calls */ +typedef mach_port_t memory_object_name_t; +/* Used to describe the memory ... */ +/* object in vm_regions() calls */ typedef mach_port_t memory_object_rep_t; - /* Per-client handle for mem object */ - /* Used by user programs to specify */ - /* the object to map */ - -typedef int memory_object_copy_strategy_t; - /* How memory manager handles copy: */ -#define MEMORY_OBJECT_COPY_NONE 0 - /* ... No special support */ -#define MEMORY_OBJECT_COPY_CALL 1 - /* ... Make call on memory manager */ -#define MEMORY_OBJECT_COPY_DELAY 2 - /* ... Memory manager doesn't - * change data externally. - */ -#define MEMORY_OBJECT_COPY_TEMPORARY 3 - /* ... Memory manager doesn't - * change data externally, and - * doesn't need to see changes. - */ -#define MEMORY_OBJECT_COPY_SYMMETRIC 4 - /* ... Memory manager doesn't - * change data externally, - * doesn't need to see changes, - * and object will not be - * multiply mapped. - * - * XXX - * Not yet safe for non-kernel use. - */ - -#define MEMORY_OBJECT_COPY_INVALID 5 - /* ... An invalid copy strategy, - * for external objects which - * have not been initialized. - * Allows copy_strategy to be - * examined without also - * examining pager_ready and - * internal. - */ - -typedef int memory_object_return_t; - /* Which pages to return to manager - this time (lock_request) */ -#define MEMORY_OBJECT_RETURN_NONE 0 - /* ... don't return any. */ -#define MEMORY_OBJECT_RETURN_DIRTY 1 - /* ... only dirty pages. */ -#define MEMORY_OBJECT_RETURN_ALL 2 - /* ... dirty and precious pages. */ -#define MEMORY_OBJECT_RETURN_ANYTHING 3 - /* ... any resident page. */ - -#define MEMORY_OBJECT_NULL MACH_PORT_NULL +/* Per-client handle for mem object */ +/* Used by user programs to specify */ +/* the object to map */ + +typedef int memory_object_copy_strategy_t; +/* How memory manager handles copy: */ +#define MEMORY_OBJECT_COPY_NONE 0 +/* ... No special support */ +#define MEMORY_OBJECT_COPY_CALL 1 +/* ... Make call on memory manager */ +#define MEMORY_OBJECT_COPY_DELAY 2 +/* ... Memory manager doesn't + * change data externally. + */ +#define MEMORY_OBJECT_COPY_TEMPORARY 3 +/* ... Memory manager doesn't + * change data externally, and + * doesn't need to see changes. + */ +#define MEMORY_OBJECT_COPY_SYMMETRIC 4 +/* ... Memory manager doesn't + * change data externally, + * doesn't need to see changes, + * and object will not be + * multiply mapped. + * + * XXX + * Not yet safe for non-kernel use. + */ + +#define MEMORY_OBJECT_COPY_INVALID 5 +/* ... An invalid copy strategy, + * for external objects which + * have not been initialized. + * Allows copy_strategy to be + * examined without also + * examining pager_ready and + * internal. + */ + +typedef int memory_object_return_t; +/* Which pages to return to manager + * this time (lock_request) */ +#define MEMORY_OBJECT_RETURN_NONE 0 +/* ... don't return any. */ +#define MEMORY_OBJECT_RETURN_DIRTY 1 +/* ... only dirty pages. */ +#define MEMORY_OBJECT_RETURN_ALL 2 +/* ... dirty and precious pages. */ +#define MEMORY_OBJECT_RETURN_ANYTHING 3 +/* ... any resident page. */ + +#define MEMORY_OBJECT_NULL MACH_PORT_NULL /* * Types for the memory object flavor interfaces */ -#define MEMORY_OBJECT_INFO_MAX (1024) -typedef int *memory_object_info_t; -typedef int memory_object_flavor_t; +#define MEMORY_OBJECT_INFO_MAX (1024) +typedef int *memory_object_info_t; +typedef int memory_object_flavor_t; typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; -#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 -#define MEMORY_OBJECT_PERFORMANCE_INFO 11 -#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 -#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 -#define MEMORY_OBJECT_BEHAVIOR_INFO 15 +#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 +#define MEMORY_OBJECT_PERFORMANCE_INFO 11 +#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 +#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 +#define MEMORY_OBJECT_BEHAVIOR_INFO 15 struct old_memory_object_behave_info { - memory_object_copy_strategy_t copy_strategy; - boolean_t temporary; - boolean_t invalidate; + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; }; struct memory_object_perf_info { - memory_object_cluster_size_t cluster_size; - boolean_t may_cache; + memory_object_cluster_size_t cluster_size; + boolean_t may_cache; }; -struct old_memory_object_attr_info { /* old attr list */ - boolean_t object_ready; - boolean_t may_cache; - memory_object_copy_strategy_t copy_strategy; +struct old_memory_object_attr_info { /* old attr list */ + boolean_t object_ready; + boolean_t may_cache; + memory_object_copy_strategy_t copy_strategy; }; struct memory_object_attr_info { - memory_object_copy_strategy_t copy_strategy; - memory_object_cluster_size_t cluster_size; - boolean_t may_cache_object; - boolean_t temporary; + memory_object_copy_strategy_t copy_strategy; + memory_object_cluster_size_t cluster_size; + boolean_t may_cache_object; + boolean_t temporary; }; struct memory_object_behave_info { - memory_object_copy_strategy_t copy_strategy; - boolean_t temporary; - boolean_t invalidate; - boolean_t silent_overwrite; - boolean_t advisory_pageout; + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; + boolean_t silent_overwrite; + boolean_t advisory_pageout; }; typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; @@ -198,31 +198,31 @@ typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_ typedef struct memory_object_behave_info *memory_object_behave_info_t; typedef struct memory_object_behave_info memory_object_behave_info_data_t; -typedef struct memory_object_perf_info *memory_object_perf_info_t; -typedef struct memory_object_perf_info memory_object_perf_info_data_t; +typedef struct memory_object_perf_info *memory_object_perf_info_t; +typedef struct memory_object_perf_info memory_object_perf_info_data_t; typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; -typedef struct memory_object_attr_info *memory_object_attr_info_t; -typedef struct memory_object_attr_info memory_object_attr_info_data_t; - -#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) -#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(memory_object_behave_info_data_t)/sizeof(int))) -#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(memory_object_perf_info_data_t)/sizeof(int))) -#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) -#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(memory_object_attr_info_data_t)/sizeof(int))) - -#define invalid_memory_object_flavor(f) \ - (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ - f != MEMORY_OBJECT_PERFORMANCE_INFO && \ - f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ - f != MEMORY_OBJECT_BEHAVIOR_INFO && \ +typedef struct memory_object_attr_info *memory_object_attr_info_t; +typedef struct memory_object_attr_info memory_object_attr_info_data_t; + +#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) +#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(memory_object_behave_info_data_t)/sizeof(int))) +#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(memory_object_perf_info_data_t)/sizeof(int))) +#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) +#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(memory_object_attr_info_data_t)/sizeof(int))) + +#define invalid_memory_object_flavor(f) \ + (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ + f != MEMORY_OBJECT_PERFORMANCE_INFO && \ + f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ + f != MEMORY_OBJECT_BEHAVIOR_INFO && \ f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) -#endif /* _MACH_MEMORY_OBJECT_H_ */ +#endif /* _MACH_MEMORY_OBJECT_H_ */ diff --git a/osfmk/mach/memory_object_types.h b/osfmk/mach/memory_object_types.h index 2a5def180..fec1df84a 100644 --- a/osfmk/mach/memory_object_types.h +++ b/osfmk/mach/memory_object_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,7 +62,7 @@ * External memory management interface definition. */ -#ifndef _MACH_MEMORY_OBJECT_TYPES_H_ +#ifndef _MACH_MEMORY_OBJECT_TYPES_H_ #define _MACH_MEMORY_OBJECT_TYPES_H_ /* @@ -81,24 +81,24 @@ #define VM_64_BIT_DATA_OBJECTS -typedef unsigned long long memory_object_offset_t; -typedef unsigned long long memory_object_size_t; -typedef natural_t memory_object_cluster_size_t; -typedef natural_t * memory_object_fault_info_t; +typedef unsigned long long memory_object_offset_t; +typedef unsigned long long memory_object_size_t; +typedef natural_t memory_object_cluster_size_t; +typedef natural_t * memory_object_fault_info_t; -typedef unsigned long long vm_object_id_t; +typedef unsigned long long vm_object_id_t; /* * Temporary until real EMMI version gets re-implemented */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* IMPORTANT: this type must match "ipc_object_bits_t" from ipc/ipc_port.h */ typedef natural_t mo_ipc_object_bits_t; -struct memory_object_pager_ops; /* forward declaration */ +struct memory_object_pager_ops; /* forward declaration */ /* * "memory_object" and "memory_object_control" types used to be Mach ports @@ -107,15 +107,15 @@ struct memory_object_pager_ops; /* forward declaration */ * "struct ipc_object" to identify them as a "IKOT_MEMORY_OBJECT" and * "IKOT_MEM_OBJ_CONTROL" respectively. */ -typedef struct memory_object { - mo_ipc_object_bits_t mo_ikot; /* DO NOT CHANGE */ - const struct memory_object_pager_ops *mo_pager_ops; - struct memory_object_control *mo_control; +typedef struct memory_object { + mo_ipc_object_bits_t mo_ikot; /* DO NOT CHANGE */ + const struct memory_object_pager_ops *mo_pager_ops; + struct memory_object_control *mo_control; } *memory_object_t; -typedef struct memory_object_control { - mo_ipc_object_bits_t moc_ikot; /* DO NOT CHANGE */ - struct vm_object *moc_object; +typedef struct memory_object_control { + mo_ipc_object_bits_t moc_ikot; /* DO NOT CHANGE */ + struct vm_object *moc_object; } *memory_object_control_t; typedef const struct memory_object_pager_ops { @@ -169,121 +169,121 @@ typedef const struct memory_object_pager_ops { const char *memory_object_pager_name; } * memory_object_pager_ops_t; -#else /* KERNEL_PRIVATE */ +#else /* KERNEL_PRIVATE */ -typedef mach_port_t memory_object_t; -typedef mach_port_t memory_object_control_t; +typedef mach_port_t memory_object_t; +typedef mach_port_t memory_object_control_t; -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ typedef memory_object_t *memory_object_array_t; - /* A memory object ... */ - /* Used by the kernel to retrieve */ - /* or store data */ - -typedef mach_port_t memory_object_name_t; - /* Used to describe the memory ... */ - /* object in vm_regions() calls */ - -typedef mach_port_t memory_object_default_t; - /* Registered with the host ... */ - /* for creating new internal objects */ - -#define MEMORY_OBJECT_NULL ((memory_object_t) 0) -#define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0) -#define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0) -#define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0) - - -typedef int memory_object_copy_strategy_t; - /* How memory manager handles copy: */ -#define MEMORY_OBJECT_COPY_NONE 0 - /* ... No special support */ -#define MEMORY_OBJECT_COPY_CALL 1 - /* ... Make call on memory manager */ -#define MEMORY_OBJECT_COPY_DELAY 2 - /* ... Memory manager doesn't - * change data externally. - */ -#define MEMORY_OBJECT_COPY_TEMPORARY 3 - /* ... Memory manager doesn't - * change data externally, and - * doesn't need to see changes. - */ -#define MEMORY_OBJECT_COPY_SYMMETRIC 4 - /* ... Memory manager doesn't - * change data externally, - * doesn't need to see changes, - * and object will not be - * multiply mapped. - * - * XXX - * Not yet safe for non-kernel use. - */ - -#define MEMORY_OBJECT_COPY_INVALID 5 - /* ... An invalid copy strategy, - * for external objects which - * have not been initialized. - * Allows copy_strategy to be - * examined without also - * examining pager_ready and - * internal. - */ - -typedef int memory_object_return_t; - /* Which pages to return to manager - this time (lock_request) */ -#define MEMORY_OBJECT_RETURN_NONE 0 - /* ... don't return any. */ -#define MEMORY_OBJECT_RETURN_DIRTY 1 - /* ... only dirty pages. */ -#define MEMORY_OBJECT_RETURN_ALL 2 - /* ... dirty and precious pages. */ -#define MEMORY_OBJECT_RETURN_ANYTHING 3 - /* ... any resident page. */ - -/* +/* A memory object ... */ +/* Used by the kernel to retrieve */ +/* or store data */ + +typedef mach_port_t memory_object_name_t; +/* Used to describe the memory ... */ +/* object in vm_regions() calls */ + +typedef mach_port_t memory_object_default_t; +/* Registered with the host ... */ +/* for creating new internal objects */ + +#define MEMORY_OBJECT_NULL ((memory_object_t) 0) +#define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0) +#define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0) +#define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0) + + +typedef int memory_object_copy_strategy_t; +/* How memory manager handles copy: */ +#define MEMORY_OBJECT_COPY_NONE 0 +/* ... No special support */ +#define MEMORY_OBJECT_COPY_CALL 1 +/* ... Make call on memory manager */ +#define MEMORY_OBJECT_COPY_DELAY 2 +/* ... Memory manager doesn't + * change data externally. + */ +#define MEMORY_OBJECT_COPY_TEMPORARY 3 +/* ... Memory manager doesn't + * change data externally, and + * doesn't need to see changes. + */ +#define MEMORY_OBJECT_COPY_SYMMETRIC 4 +/* ... Memory manager doesn't + * change data externally, + * doesn't need to see changes, + * and object will not be + * multiply mapped. + * + * XXX + * Not yet safe for non-kernel use. + */ + +#define MEMORY_OBJECT_COPY_INVALID 5 +/* ... An invalid copy strategy, + * for external objects which + * have not been initialized. + * Allows copy_strategy to be + * examined without also + * examining pager_ready and + * internal. + */ + +typedef int memory_object_return_t; +/* Which pages to return to manager + * this time (lock_request) */ +#define MEMORY_OBJECT_RETURN_NONE 0 +/* ... don't return any. */ +#define MEMORY_OBJECT_RETURN_DIRTY 1 +/* ... only dirty pages. */ +#define MEMORY_OBJECT_RETURN_ALL 2 +/* ... dirty and precious pages. */ +#define MEMORY_OBJECT_RETURN_ANYTHING 3 +/* ... any resident page. */ + +/* * Data lock request flags */ -#define MEMORY_OBJECT_DATA_FLUSH 0x1 -#define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 -#define MEMORY_OBJECT_DATA_PURGE 0x4 -#define MEMORY_OBJECT_COPY_SYNC 0x8 -#define MEMORY_OBJECT_DATA_SYNC 0x10 +#define MEMORY_OBJECT_DATA_FLUSH 0x1 +#define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 +#define MEMORY_OBJECT_DATA_PURGE 0x4 +#define MEMORY_OBJECT_COPY_SYNC 0x8 +#define MEMORY_OBJECT_DATA_SYNC 0x10 #define MEMORY_OBJECT_IO_SYNC 0x20 -#define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40 +#define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40 /* * Types for the memory object flavor interfaces */ -#define MEMORY_OBJECT_INFO_MAX (1024) -typedef int *memory_object_info_t; -typedef int memory_object_flavor_t; +#define MEMORY_OBJECT_INFO_MAX (1024) +typedef int *memory_object_info_t; +typedef int memory_object_flavor_t; typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; -#define MEMORY_OBJECT_PERFORMANCE_INFO 11 -#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 -#define MEMORY_OBJECT_BEHAVIOR_INFO 15 +#define MEMORY_OBJECT_PERFORMANCE_INFO 11 +#define MEMORY_OBJECT_ATTRIBUTE_INFO 14 +#define MEMORY_OBJECT_BEHAVIOR_INFO 15 -#ifdef PRIVATE +#ifdef PRIVATE -#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 -#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 +#define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 +#define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 struct old_memory_object_behave_info { - memory_object_copy_strategy_t copy_strategy; - boolean_t temporary; - boolean_t invalidate; + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; }; -struct old_memory_object_attr_info { /* old attr list */ - boolean_t object_ready; - boolean_t may_cache; - memory_object_copy_strategy_t copy_strategy; +struct old_memory_object_attr_info { /* old attr list */ + boolean_t object_ready; + boolean_t may_cache; + memory_object_copy_strategy_t copy_strategy; }; typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; @@ -291,10 +291,10 @@ typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_ typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; -#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) -#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) +#define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) +#define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) #ifdef KERNEL @@ -312,106 +312,106 @@ __END_DECLS #endif /* KERNEL */ -#endif /* PRIVATE */ +#endif /* PRIVATE */ struct memory_object_perf_info { - memory_object_cluster_size_t cluster_size; - boolean_t may_cache; + memory_object_cluster_size_t cluster_size; + boolean_t may_cache; }; struct memory_object_attr_info { - memory_object_copy_strategy_t copy_strategy; - memory_object_cluster_size_t cluster_size; - boolean_t may_cache_object; - boolean_t temporary; + memory_object_copy_strategy_t copy_strategy; + memory_object_cluster_size_t cluster_size; + boolean_t may_cache_object; + boolean_t temporary; }; struct memory_object_behave_info { - memory_object_copy_strategy_t copy_strategy; - boolean_t temporary; - boolean_t invalidate; - boolean_t silent_overwrite; - boolean_t advisory_pageout; + memory_object_copy_strategy_t copy_strategy; + boolean_t temporary; + boolean_t invalidate; + boolean_t silent_overwrite; + boolean_t advisory_pageout; }; typedef struct memory_object_behave_info *memory_object_behave_info_t; typedef struct memory_object_behave_info memory_object_behave_info_data_t; -typedef struct memory_object_perf_info *memory_object_perf_info_t; -typedef struct memory_object_perf_info memory_object_perf_info_data_t; +typedef struct memory_object_perf_info *memory_object_perf_info_t; +typedef struct memory_object_perf_info memory_object_perf_info_data_t; -typedef struct memory_object_attr_info *memory_object_attr_info_t; -typedef struct memory_object_attr_info memory_object_attr_info_data_t; +typedef struct memory_object_attr_info *memory_object_attr_info_t; +typedef struct memory_object_attr_info memory_object_attr_info_data_t; -#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(memory_object_behave_info_data_t)/sizeof(int))) -#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(memory_object_perf_info_data_t)/sizeof(int))) -#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(memory_object_attr_info_data_t)/sizeof(int))) +#define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(memory_object_behave_info_data_t)/sizeof(int))) +#define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(memory_object_perf_info_data_t)/sizeof(int))) +#define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(memory_object_attr_info_data_t)/sizeof(int))) -#define invalid_memory_object_flavor(f) \ - (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ - f != MEMORY_OBJECT_PERFORMANCE_INFO && \ - f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ - f != MEMORY_OBJECT_BEHAVIOR_INFO && \ +#define invalid_memory_object_flavor(f) \ + (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ + f != MEMORY_OBJECT_PERFORMANCE_INFO && \ + f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ + f != MEMORY_OBJECT_BEHAVIOR_INFO && \ f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) /* * Used to support options on memory_object_release_name call */ -#define MEMORY_OBJECT_TERMINATE_IDLE 0x1 -#define MEMORY_OBJECT_RESPECT_CACHE 0x2 -#define MEMORY_OBJECT_RELEASE_NO_OP 0x4 +#define MEMORY_OBJECT_TERMINATE_IDLE 0x1 +#define MEMORY_OBJECT_RESPECT_CACHE 0x2 +#define MEMORY_OBJECT_RELEASE_NO_OP 0x4 /* named entry processor mapping options */ /* enumerated */ -#define MAP_MEM_NOOP 0 -#define MAP_MEM_COPYBACK 1 -#define MAP_MEM_IO 2 -#define MAP_MEM_WTHRU 3 -#define MAP_MEM_WCOMB 4 /* Write combining mode */ - /* aka store gather */ -#define MAP_MEM_INNERWBACK 5 -#define MAP_MEM_POSTED 6 - -#define GET_MAP_MEM(flags) \ +#define MAP_MEM_NOOP 0 +#define MAP_MEM_COPYBACK 1 +#define MAP_MEM_IO 2 +#define MAP_MEM_WTHRU 3 +#define MAP_MEM_WCOMB 4 /* Write combining mode */ + /* aka store gather */ +#define MAP_MEM_INNERWBACK 5 +#define MAP_MEM_POSTED 6 + +#define GET_MAP_MEM(flags) \ ((((unsigned int)(flags)) >> 24) & 0xFF) -#define SET_MAP_MEM(caching, flags) \ +#define SET_MAP_MEM(caching, flags) \ ((flags) = ((((unsigned int)(caching)) << 24) \ - & 0xFF000000) | ((flags) & 0xFFFFFF)); + & 0xFF000000) | ((flags) & 0xFFFFFF)); /* leave room for vm_prot bits (0xFF ?) */ #define MAP_MEM_LEDGER_TAG_NETWORK 0x002000 /* charge to "network" ledger */ #define MAP_MEM_PURGABLE_KERNEL_ONLY 0x004000 /* volatility controlled by kernel */ -#define MAP_MEM_GRAB_SECLUDED 0x008000 /* can grab secluded pages */ -#define MAP_MEM_ONLY 0x010000 /* change processor caching */ -#define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */ -#define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */ -#define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */ -#define MAP_MEM_USE_DATA_ADDR 0x100000 /* preserve address of data, rather than base of page */ -#define MAP_MEM_VM_COPY 0x200000 /* make a copy of a VM range */ -#define MAP_MEM_VM_SHARE 0x400000 /* extract a VM range for remap */ -#define MAP_MEM_4K_DATA_ADDR 0x800000 /* preserve 4K aligned address of data */ +#define MAP_MEM_GRAB_SECLUDED 0x008000 /* can grab secluded pages */ +#define MAP_MEM_ONLY 0x010000 /* change processor caching */ +#define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */ +#define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */ +#define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */ +#define MAP_MEM_USE_DATA_ADDR 0x100000 /* preserve address of data, rather than base of page */ +#define MAP_MEM_VM_COPY 0x200000 /* make a copy of a VM range */ +#define MAP_MEM_VM_SHARE 0x400000 /* extract a VM range for remap */ +#define MAP_MEM_4K_DATA_ADDR 0x800000 /* preserve 4K aligned address of data */ #define MAP_MEM_FLAGS_MASK 0x00FFFF00 -#define MAP_MEM_FLAGS_USER ( \ - MAP_MEM_PURGABLE_KERNEL_ONLY | \ - MAP_MEM_GRAB_SECLUDED | \ - MAP_MEM_ONLY | \ - MAP_MEM_NAMED_CREATE | \ - MAP_MEM_PURGABLE | \ - MAP_MEM_NAMED_REUSE | \ - MAP_MEM_USE_DATA_ADDR | \ - MAP_MEM_VM_COPY | \ - MAP_MEM_VM_SHARE | \ +#define MAP_MEM_FLAGS_USER ( \ + MAP_MEM_PURGABLE_KERNEL_ONLY | \ + MAP_MEM_GRAB_SECLUDED | \ + MAP_MEM_ONLY | \ + MAP_MEM_NAMED_CREATE | \ + MAP_MEM_PURGABLE | \ + MAP_MEM_NAMED_REUSE | \ + MAP_MEM_USE_DATA_ADDR | \ + MAP_MEM_VM_COPY | \ + MAP_MEM_VM_SHARE | \ MAP_MEM_4K_DATA_ADDR) -#define MAP_MEM_FLAGS_ALL ( \ - MAP_MEM_LEDGER_TAG_NETWORK | \ +#define MAP_MEM_FLAGS_ALL ( \ + MAP_MEM_LEDGER_TAG_NETWORK | \ MAP_MEM_FLAGS_USER) #ifdef KERNEL @@ -425,106 +425,106 @@ typedef struct memory_object_attr_info memory_object_attr_info_data_t; * each of those pages. */ #ifdef PRIVATE -#define MAX_UPL_TRANSFER_BYTES (1024 * 1024) -#define MAX_UPL_SIZE_BYTES (1024 * 1024 * 64) +#define MAX_UPL_TRANSFER_BYTES (1024 * 1024) +#define MAX_UPL_SIZE_BYTES (1024 * 1024 * 64) #ifndef CONFIG_EMBEDDED -#define MAX_UPL_SIZE (MAX_UPL_SIZE_BYTES / PAGE_SIZE) -#define MAX_UPL_TRANSFER (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE) +#define MAX_UPL_SIZE (MAX_UPL_SIZE_BYTES / PAGE_SIZE) +#define MAX_UPL_TRANSFER (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE) #endif struct upl_page_info { - ppnum_t phys_addr; /* physical page index number */ + ppnum_t phys_addr; /* physical page index number */ unsigned int #ifdef XNU_KERNEL_PRIVATE - free_when_done:1,/* page is to be freed on commit */ - absent:1, /* No valid data in this page */ - dirty:1, /* Page must be cleaned (O) */ - precious:1, /* must be cleaned, we have only copy */ - device:1, /* no page data, mapped dev memory */ - speculative:1, /* page is valid, but not yet accessed */ - cs_validated:1, /* CODE SIGNING: page was validated */ - cs_tainted:1, /* CODE SIGNING: page is tainted */ - cs_nx:1, /* CODE SIGNING: page is NX */ - needed:1, /* page should be left in cache on abort */ - mark:1, /* a mark flag for the creator to use as they wish */ - :0; /* force to long boundary */ + free_when_done:1, /* page is to be freed on commit */ + absent:1, /* No valid data in this page */ + dirty:1, /* Page must be cleaned (O) */ + precious:1, /* must be cleaned, we have only copy */ + device:1, /* no page data, mapped dev memory */ + speculative:1, /* page is valid, but not yet accessed */ + cs_validated:1, /* CODE SIGNING: page was validated */ + cs_tainted:1, /* CODE SIGNING: page is tainted */ + cs_nx:1, /* CODE SIGNING: page is NX */ + needed:1, /* page should be left in cache on abort */ + mark:1, /* a mark flag for the creator to use as they wish */ + :0; /* force to long boundary */ #else - opaque; /* use upl_page_xxx() accessor funcs */ + opaque; /* use upl_page_xxx() accessor funcs */ #endif /* XNU_KERNEL_PRIVATE */ }; #else struct upl_page_info { - unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */ + unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */ }; #endif /* PRIVATE */ -typedef struct upl_page_info upl_page_info_t; -typedef upl_page_info_t *upl_page_info_array_t; -typedef upl_page_info_array_t upl_page_list_ptr_t; +typedef struct upl_page_info upl_page_info_t; +typedef upl_page_info_t *upl_page_info_array_t; +typedef upl_page_info_array_t upl_page_list_ptr_t; -typedef uint32_t upl_offset_t; /* page-aligned byte offset */ -typedef uint32_t upl_size_t; /* page-aligned byte size */ +typedef uint32_t upl_offset_t; /* page-aligned byte offset */ +typedef uint32_t upl_size_t; /* page-aligned byte size */ /* upl invocation flags */ /* top nibble is used by super upl */ typedef uint64_t upl_control_flags_t; -#define UPL_FLAGS_NONE 0x00000000ULL -#define UPL_COPYOUT_FROM 0x00000001ULL -#define UPL_PRECIOUS 0x00000002ULL -#define UPL_NO_SYNC 0x00000004ULL -#define UPL_CLEAN_IN_PLACE 0x00000008ULL -#define UPL_NOBLOCK 0x00000010ULL -#define UPL_RET_ONLY_DIRTY 0x00000020ULL -#define UPL_SET_INTERNAL 0x00000040ULL -#define UPL_QUERY_OBJECT_TYPE 0x00000080ULL -#define UPL_RET_ONLY_ABSENT 0x00000100ULL /* used only for COPY_FROM = FALSE */ +#define UPL_FLAGS_NONE 0x00000000ULL +#define UPL_COPYOUT_FROM 0x00000001ULL +#define UPL_PRECIOUS 0x00000002ULL +#define UPL_NO_SYNC 0x00000004ULL +#define UPL_CLEAN_IN_PLACE 0x00000008ULL +#define UPL_NOBLOCK 0x00000010ULL +#define UPL_RET_ONLY_DIRTY 0x00000020ULL +#define UPL_SET_INTERNAL 0x00000040ULL +#define UPL_QUERY_OBJECT_TYPE 0x00000080ULL +#define UPL_RET_ONLY_ABSENT 0x00000100ULL /* used only for COPY_FROM = FALSE */ #define UPL_FILE_IO 0x00000200ULL -#define UPL_SET_LITE 0x00000400ULL -#define UPL_SET_INTERRUPTIBLE 0x00000800ULL -#define UPL_SET_IO_WIRE 0x00001000ULL -#define UPL_FOR_PAGEOUT 0x00002000ULL +#define UPL_SET_LITE 0x00000400ULL +#define UPL_SET_INTERRUPTIBLE 0x00000800ULL +#define UPL_SET_IO_WIRE 0x00001000ULL +#define UPL_FOR_PAGEOUT 0x00002000ULL #define UPL_WILL_BE_DUMPED 0x00004000ULL -#define UPL_FORCE_DATA_SYNC 0x00008000ULL +#define UPL_FORCE_DATA_SYNC 0x00008000ULL /* continued after the ticket bits... */ -#define UPL_PAGE_TICKET_MASK 0x000F0000ULL +#define UPL_PAGE_TICKET_MASK 0x000F0000ULL #define UPL_PAGE_TICKET_SHIFT 16 /* ... flags resume here */ -#define UPL_BLOCK_ACCESS 0x00100000ULL -#define UPL_ENCRYPT 0x00200000ULL -#define UPL_NOZEROFILL 0x00400000ULL -#define UPL_WILL_MODIFY 0x00800000ULL /* caller will modify the pages */ - -#define UPL_NEED_32BIT_ADDR 0x01000000ULL -#define UPL_UBC_MSYNC 0x02000000ULL -#define UPL_UBC_PAGEOUT 0x04000000ULL -#define UPL_UBC_PAGEIN 0x08000000ULL -#define UPL_REQUEST_SET_DIRTY 0x10000000ULL -#define UPL_REQUEST_NO_FAULT 0x20000000ULL /* fail if pages not all resident */ -#define UPL_NOZEROFILLIO 0x40000000ULL /* allow non zerofill pages present */ -#define UPL_REQUEST_FORCE_COHERENCY 0x80000000ULL +#define UPL_BLOCK_ACCESS 0x00100000ULL +#define UPL_ENCRYPT 0x00200000ULL +#define UPL_NOZEROFILL 0x00400000ULL +#define UPL_WILL_MODIFY 0x00800000ULL /* caller will modify the pages */ + +#define UPL_NEED_32BIT_ADDR 0x01000000ULL +#define UPL_UBC_MSYNC 0x02000000ULL +#define UPL_UBC_PAGEOUT 0x04000000ULL +#define UPL_UBC_PAGEIN 0x08000000ULL +#define UPL_REQUEST_SET_DIRTY 0x10000000ULL +#define UPL_REQUEST_NO_FAULT 0x20000000ULL /* fail if pages not all resident */ +#define UPL_NOZEROFILLIO 0x40000000ULL /* allow non zerofill pages present */ +#define UPL_REQUEST_FORCE_COHERENCY 0x80000000ULL /* UPL flags known by this kernel */ -#define UPL_VALID_FLAGS 0xFFFFFFFFFFULL +#define UPL_VALID_FLAGS 0xFFFFFFFFFFULL /* upl abort error flags */ -#define UPL_ABORT_RESTART 0x1 -#define UPL_ABORT_UNAVAILABLE 0x2 -#define UPL_ABORT_ERROR 0x4 -#define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */ -#define UPL_ABORT_DUMP_PAGES 0x10 -#define UPL_ABORT_NOTIFY_EMPTY 0x20 +#define UPL_ABORT_RESTART 0x1 +#define UPL_ABORT_UNAVAILABLE 0x2 +#define UPL_ABORT_ERROR 0x4 +#define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */ +#define UPL_ABORT_DUMP_PAGES 0x10 +#define UPL_ABORT_NOTIFY_EMPTY 0x20 /* deprecated: #define UPL_ABORT_ALLOW_ACCESS 0x40 */ -#define UPL_ABORT_REFERENCE 0x80 +#define UPL_ABORT_REFERENCE 0x80 /* upl pages check flags */ #define UPL_CHECK_DIRTY 0x1 @@ -533,40 +533,40 @@ typedef uint64_t upl_control_flags_t; /* * upl pagein/pageout flags * - * + * * when I/O is issued from this UPL it should be done synchronously */ -#define UPL_IOSYNC 0x1 +#define UPL_IOSYNC 0x1 /* - * the passed in UPL should not have either a commit or abort + * the passed in UPL should not have either a commit or abort * applied to it by the underlying layers... the site that * created the UPL is responsible for cleaning it up. */ -#define UPL_NOCOMMIT 0x2 +#define UPL_NOCOMMIT 0x2 /* * turn off any speculative read-ahead applied at the I/O layer */ -#define UPL_NORDAHEAD 0x4 +#define UPL_NORDAHEAD 0x4 /* * pageout request is targeting a real file * as opposed to a swap file. */ -#define UPL_VNODE_PAGER 0x8 +#define UPL_VNODE_PAGER 0x8 /* * this pageout is being originated as part of an explicit * memory synchronization operation... no speculative clustering * should be applied, only the range specified should be pushed. */ -#define UPL_MSYNC 0x10 +#define UPL_MSYNC 0x10 /* * */ -#define UPL_PAGING_ENCRYPTED 0x20 +#define UPL_PAGING_ENCRYPTED 0x20 /* * this pageout is being originated as part of an explicit @@ -574,7 +574,7 @@ typedef uint64_t upl_control_flags_t; * errors and taking it's own action... if an error occurs, * just abort the pages back into the cache unchanged */ -#define UPL_KEEPCACHED 0x40 +#define UPL_KEEPCACHED 0x40 /* * this pageout originated from within cluster_io to deal @@ -584,7 +584,7 @@ typedef uint64_t upl_control_flags_t; * pageout will reenter the FS for the same file currently * being handled in this context. */ -#define UPL_NESTED_PAGEOUT 0x80 +#define UPL_NESTED_PAGEOUT 0x80 /* * we've detected a sequential access pattern and @@ -592,7 +592,7 @@ typedef uint64_t upl_control_flags_t; * pages in... do not count these as real PAGEINs * w/r to our hard throttle maintenance */ -#define UPL_IOSTREAMING 0x100 +#define UPL_IOSTREAMING 0x100 /* * Currently, it's only used for the swap pagein path. @@ -602,70 +602,70 @@ typedef uint64_t upl_control_flags_t; * I/O (correctly) for valid pages. So, this flag is used * to override that logic in the vnode I/O path. */ -#define UPL_IGNORE_VALID_PAGE_CHECK 0x200 +#define UPL_IGNORE_VALID_PAGE_CHECK 0x200 /* upl commit flags */ -#define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */ -#define UPL_COMMIT_CLEAR_DIRTY 0x2 -#define UPL_COMMIT_SET_DIRTY 0x4 -#define UPL_COMMIT_INACTIVATE 0x8 -#define UPL_COMMIT_NOTIFY_EMPTY 0x10 +#define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */ +#define UPL_COMMIT_CLEAR_DIRTY 0x2 +#define UPL_COMMIT_SET_DIRTY 0x4 +#define UPL_COMMIT_INACTIVATE 0x8 +#define UPL_COMMIT_NOTIFY_EMPTY 0x10 /* deprecated: #define UPL_COMMIT_ALLOW_ACCESS 0x20 */ -#define UPL_COMMIT_CS_VALIDATED 0x40 -#define UPL_COMMIT_CLEAR_PRECIOUS 0x80 -#define UPL_COMMIT_SPECULATE 0x100 -#define UPL_COMMIT_FREE_ABSENT 0x200 -#define UPL_COMMIT_WRITTEN_BY_KERNEL 0x400 +#define UPL_COMMIT_CS_VALIDATED 0x40 +#define UPL_COMMIT_CLEAR_PRECIOUS 0x80 +#define UPL_COMMIT_SPECULATE 0x100 +#define UPL_COMMIT_FREE_ABSENT 0x200 +#define UPL_COMMIT_WRITTEN_BY_KERNEL 0x400 -#define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT) +#define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT) /* flags for return of state from vm_map_get_upl, vm_upl address space */ /* based call */ -#define UPL_DEV_MEMORY 0x1 -#define UPL_PHYS_CONTIG 0x2 +#define UPL_DEV_MEMORY 0x1 +#define UPL_PHYS_CONTIG 0x2 -/* +/* * Flags for the UPL page ops routine. This routine is not exported * out of the kernel at the moment and so the defs live here. */ -#define UPL_POP_DIRTY 0x1 -#define UPL_POP_PAGEOUT 0x2 -#define UPL_POP_PRECIOUS 0x4 -#define UPL_POP_ABSENT 0x8 -#define UPL_POP_BUSY 0x10 - -#define UPL_POP_PHYSICAL 0x10000000 -#define UPL_POP_DUMP 0x20000000 -#define UPL_POP_SET 0x40000000 -#define UPL_POP_CLR 0x80000000 - -/* - * Flags for the UPL range op routine. This routine is not exported +#define UPL_POP_DIRTY 0x1 +#define UPL_POP_PAGEOUT 0x2 +#define UPL_POP_PRECIOUS 0x4 +#define UPL_POP_ABSENT 0x8 +#define UPL_POP_BUSY 0x10 + +#define UPL_POP_PHYSICAL 0x10000000 +#define UPL_POP_DUMP 0x20000000 +#define UPL_POP_SET 0x40000000 +#define UPL_POP_CLR 0x80000000 + +/* + * Flags for the UPL range op routine. This routine is not exported * out of the kernel at the moemet and so the defs live here. */ /* * UPL_ROP_ABSENT: Returns the extent of the range presented which - * is absent, starting with the start address presented + * is absent, starting with the start address presented */ -#define UPL_ROP_ABSENT 0x01 +#define UPL_ROP_ABSENT 0x01 /* * UPL_ROP_PRESENT: Returns the extent of the range presented which * is present (i.e. resident), starting with the start address presented */ -#define UPL_ROP_PRESENT 0x02 +#define UPL_ROP_PRESENT 0x02 /* * UPL_ROP_DUMP: Dump the pages which are found in the target object * for the target range. */ -#define UPL_ROP_DUMP 0x04 +#define UPL_ROP_DUMP 0x04 -#ifdef PRIVATE +#ifdef PRIVATE -#define UPL_REPRIO_INFO_MASK (0xFFFFFFFF) -#define UPL_REPRIO_INFO_SHIFT 32 +#define UPL_REPRIO_INFO_MASK (0xFFFFFFFF) +#define UPL_REPRIO_INFO_SHIFT 32 /* access macros for upl_t */ @@ -694,7 +694,7 @@ typedef uint64_t upl_control_flags_t; (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].free_when_done) : FALSE) #define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \ - (((upl)[(index)].phys_addr != 0) ? \ + (((upl)[(index)].phys_addr != 0) ? \ ((upl)[(index)].free_when_done = TRUE) : FALSE) #define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \ @@ -702,7 +702,7 @@ typedef uint64_t upl_control_flags_t; ((upl)[(index)].free_when_done = FALSE) : FALSE) #define UPL_REPRIO_INFO_BLKNO(upl, index) \ - (((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK) + (((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK) #define UPL_REPRIO_INFO_LEN(upl, index) \ ((((upl)->upl_reprio_info[(index)]) >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK) @@ -724,10 +724,10 @@ typedef uint64_t upl_control_flags_t; /* The call prototyped below is used strictly by UPL_GET_INTERNAL_PAGE_LIST */ -extern vm_size_t upl_offset_to_pagelist; -extern vm_size_t upl_get_internal_pagelist_offset(void); -extern void* upl_get_internal_vectorupl(upl_t); -extern upl_page_info_t* upl_get_internal_vectorupl_pagelist(upl_t); +extern vm_size_t upl_offset_to_pagelist; +extern vm_size_t upl_get_internal_pagelist_offset(void); +extern void* upl_get_internal_vectorupl(upl_t); +extern upl_page_info_t* upl_get_internal_vectorupl_pagelist(upl_t); /*Use this variant to get the UPL's page list iff:*/ /*- the upl being passed in is already part of a vector UPL*/ @@ -750,15 +750,15 @@ extern upl_page_info_t* upl_get_internal_vectorupl_pagelist(upl_t); __BEGIN_DECLS -extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index); -extern boolean_t upl_device_page(upl_page_info_t *upl); -extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index); -extern void upl_clear_dirty(upl_t upl, boolean_t value); -extern void upl_set_referenced(upl_t upl, boolean_t value); -extern void upl_range_needed(upl_t upl, int index, int count); +extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index); +extern boolean_t upl_device_page(upl_page_info_t *upl); +extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index); +extern void upl_clear_dirty(upl_t upl, boolean_t value); +extern void upl_set_referenced(upl_t upl, boolean_t value); +extern void upl_range_needed(upl_t upl, int index, int count); #if CONFIG_IOSCHED extern int64_t upl_blkno(upl_page_info_t *upl, int index); -extern void upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno); +extern void upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno); #endif __END_DECLS @@ -767,12 +767,12 @@ __END_DECLS __BEGIN_DECLS -extern boolean_t upl_page_present(upl_page_info_t *upl, int index); -extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index); -extern boolean_t upl_valid_page(upl_page_info_t *upl, int index); -extern void upl_deallocate(upl_t upl); -extern void upl_mark_decmp(upl_t upl); -extern void upl_unmark_decmp(upl_t upl); +extern boolean_t upl_page_present(upl_page_info_t *upl, int index); +extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index); +extern boolean_t upl_valid_page(upl_page_info_t *upl, int index); +extern void upl_deallocate(upl_t upl); +extern void upl_mark_decmp(upl_t upl); +extern void upl_unmark_decmp(upl_t upl); #ifdef KERNEL_PRIVATE @@ -785,4 +785,4 @@ __END_DECLS #endif /* KERNEL */ -#endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ +#endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ diff --git a/osfmk/mach/message.h b/osfmk/mach/message.h index edc862503..ceb069a6a 100644 --- a/osfmk/mach/message.h +++ b/osfmk/mach/message.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -68,7 +68,7 @@ * Mach IPC message and primitive function definitions. */ -#ifndef _MACH_MESSAGE_H_ +#ifndef _MACH_MESSAGE_H_ #define _MACH_MESSAGE_H_ #include @@ -95,7 +95,7 @@ typedef natural_t mach_msg_timeout_t; * (No MACH_SEND_TIMEOUT/MACH_RCV_TIMEOUT option.) */ -#define MACH_MSG_TIMEOUT_NONE ((mach_msg_timeout_t) 0) +#define MACH_MSG_TIMEOUT_NONE ((mach_msg_timeout_t) 0) /* * The kernel uses MACH_MSGH_BITS_COMPLEX as a hint. If it isn't on, it @@ -129,76 +129,76 @@ typedef natural_t mach_msg_timeout_t; * or for future interface expansion. */ -#define MACH_MSGH_BITS_ZERO 0x00000000 +#define MACH_MSGH_BITS_ZERO 0x00000000 -#define MACH_MSGH_BITS_REMOTE_MASK 0x0000001f -#define MACH_MSGH_BITS_LOCAL_MASK 0x00001f00 -#define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000 +#define MACH_MSGH_BITS_REMOTE_MASK 0x0000001f +#define MACH_MSGH_BITS_LOCAL_MASK 0x00001f00 +#define MACH_MSGH_BITS_VOUCHER_MASK 0x001f0000 -#define MACH_MSGH_BITS_PORTS_MASK \ - (MACH_MSGH_BITS_REMOTE_MASK | \ - MACH_MSGH_BITS_LOCAL_MASK | \ - MACH_MSGH_BITS_VOUCHER_MASK) +#define MACH_MSGH_BITS_PORTS_MASK \ + (MACH_MSGH_BITS_REMOTE_MASK | \ + MACH_MSGH_BITS_LOCAL_MASK | \ + MACH_MSGH_BITS_VOUCHER_MASK) -#define MACH_MSGH_BITS_COMPLEX 0x80000000U /* message is complex */ +#define MACH_MSGH_BITS_COMPLEX 0x80000000U /* message is complex */ -#define MACH_MSGH_BITS_USER 0x801f1f1fU /* allowed bits user->kernel */ +#define MACH_MSGH_BITS_USER 0x801f1f1fU /* allowed bits user->kernel */ -#define MACH_MSGH_BITS_RAISEIMP 0x20000000U /* importance raised due to msg */ -#define MACH_MSGH_BITS_DENAP MACH_MSGH_BITS_RAISEIMP +#define MACH_MSGH_BITS_RAISEIMP 0x20000000U /* importance raised due to msg */ +#define MACH_MSGH_BITS_DENAP MACH_MSGH_BITS_RAISEIMP -#define MACH_MSGH_BITS_IMPHOLDASRT 0x10000000U /* assertion help, userland private */ -#define MACH_MSGH_BITS_DENAPHOLDASRT MACH_MSGH_BITS_IMPHOLDASRT +#define MACH_MSGH_BITS_IMPHOLDASRT 0x10000000U /* assertion help, userland private */ +#define MACH_MSGH_BITS_DENAPHOLDASRT MACH_MSGH_BITS_IMPHOLDASRT -#define MACH_MSGH_BITS_CIRCULAR 0x10000000U /* message circular, kernel private */ +#define MACH_MSGH_BITS_CIRCULAR 0x10000000U /* message circular, kernel private */ -#define MACH_MSGH_BITS_USED 0xb01f1f1fU +#define MACH_MSGH_BITS_USED 0xb01f1f1fU /* setter macros for the bits */ -#define MACH_MSGH_BITS(remote, local) /* legacy */ \ - ((remote) | ((local) << 8)) -#define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \ - (((remote) & MACH_MSGH_BITS_REMOTE_MASK) | \ - (((local) << 8) & MACH_MSGH_BITS_LOCAL_MASK) | \ +#define MACH_MSGH_BITS(remote, local) /* legacy */ \ + ((remote) | ((local) << 8)) +#define MACH_MSGH_BITS_SET_PORTS(remote, local, voucher) \ + (((remote) & MACH_MSGH_BITS_REMOTE_MASK) | \ + (((local) << 8) & MACH_MSGH_BITS_LOCAL_MASK) | \ (((voucher) << 16) & MACH_MSGH_BITS_VOUCHER_MASK)) -#define MACH_MSGH_BITS_SET(remote, local, voucher, other) \ +#define MACH_MSGH_BITS_SET(remote, local, voucher, other) \ (MACH_MSGH_BITS_SET_PORTS((remote), (local), (voucher)) \ | ((other) &~ MACH_MSGH_BITS_PORTS_MASK)) /* getter macros for pulling values out of the bits field */ -#define MACH_MSGH_BITS_REMOTE(bits) \ - ((bits) & MACH_MSGH_BITS_REMOTE_MASK) -#define MACH_MSGH_BITS_LOCAL(bits) \ - (((bits) & MACH_MSGH_BITS_LOCAL_MASK) >> 8) -#define MACH_MSGH_BITS_VOUCHER(bits) \ - (((bits) & MACH_MSGH_BITS_VOUCHER_MASK) >> 16) -#define MACH_MSGH_BITS_PORTS(bits) \ +#define MACH_MSGH_BITS_REMOTE(bits) \ + ((bits) & MACH_MSGH_BITS_REMOTE_MASK) +#define MACH_MSGH_BITS_LOCAL(bits) \ + (((bits) & MACH_MSGH_BITS_LOCAL_MASK) >> 8) +#define MACH_MSGH_BITS_VOUCHER(bits) \ + (((bits) & MACH_MSGH_BITS_VOUCHER_MASK) >> 16) +#define MACH_MSGH_BITS_PORTS(bits) \ ((bits) & MACH_MSGH_BITS_PORTS_MASK) -#define MACH_MSGH_BITS_OTHER(bits) \ - ((bits) &~ MACH_MSGH_BITS_PORTS_MASK) +#define MACH_MSGH_BITS_OTHER(bits) \ + ((bits) &~ MACH_MSGH_BITS_PORTS_MASK) /* checking macros */ -#define MACH_MSGH_BITS_HAS_REMOTE(bits) \ +#define MACH_MSGH_BITS_HAS_REMOTE(bits) \ (MACH_MSGH_BITS_REMOTE(bits) != MACH_MSGH_BITS_ZERO) -#define MACH_MSGH_BITS_HAS_LOCAL(bits) \ +#define MACH_MSGH_BITS_HAS_LOCAL(bits) \ (MACH_MSGH_BITS_LOCAL(bits) != MACH_MSGH_BITS_ZERO) -#define MACH_MSGH_BITS_HAS_VOUCHER(bits) \ +#define MACH_MSGH_BITS_HAS_VOUCHER(bits) \ (MACH_MSGH_BITS_VOUCHER(bits) != MACH_MSGH_BITS_ZERO) -#define MACH_MSGH_BITS_IS_COMPLEX(bits) \ +#define MACH_MSGH_BITS_IS_COMPLEX(bits) \ (((bits) & MACH_MSGH_BITS_COMPLEX) != MACH_MSGH_BITS_ZERO) /* importance checking macros */ -#define MACH_MSGH_BITS_RAISED_IMPORTANCE(bits) \ +#define MACH_MSGH_BITS_RAISED_IMPORTANCE(bits) \ (((bits) & MACH_MSGH_BITS_RAISEIMP) != MACH_MSGH_BITS_ZERO) -#define MACH_MSGH_BITS_HOLDS_IMPORTANCE_ASSERTION(bits) \ +#define MACH_MSGH_BITS_HOLDS_IMPORTANCE_ASSERTION(bits) \ (((bits) & MACH_MSGH_BITS_IMPHOLDASRT) != MACH_MSGH_BITS_ZERO) /* * Every message starts with a message header. * Following the message header, if the message is complex, are a count - * of type descriptors and the type descriptors themselves - * (mach_msg_descriptor_t). The size of the message must be specified in - * bytes, and includes the message header, descriptor count, descriptors, + * of type descriptors and the type descriptors themselves + * (mach_msg_descriptor_t). The size of the message must be specified in + * bytes, and includes the message header, descriptor count, descriptors, * and inline data. * * The msgh_remote_port field specifies the destination of the message. @@ -219,7 +219,7 @@ typedef natural_t mach_msg_timeout_t; */ typedef unsigned int mach_msg_bits_t; -typedef natural_t mach_msg_size_t; +typedef natural_t mach_msg_size_t; typedef integer_t mach_msg_id_t; #define MACH_MSG_SIZE_NULL (mach_msg_size_t *) 0 @@ -230,144 +230,136 @@ typedef unsigned int mach_msg_priority_t; typedef unsigned int mach_msg_type_name_t; -#define MACH_MSG_TYPE_MOVE_RECEIVE 16 /* Must hold receive right */ -#define MACH_MSG_TYPE_MOVE_SEND 17 /* Must hold send right(s) */ -#define MACH_MSG_TYPE_MOVE_SEND_ONCE 18 /* Must hold sendonce right */ -#define MACH_MSG_TYPE_COPY_SEND 19 /* Must hold send right(s) */ -#define MACH_MSG_TYPE_MAKE_SEND 20 /* Must hold receive right */ -#define MACH_MSG_TYPE_MAKE_SEND_ONCE 21 /* Must hold receive right */ -#define MACH_MSG_TYPE_COPY_RECEIVE 22 /* NOT VALID */ -#define MACH_MSG_TYPE_DISPOSE_RECEIVE 24 /* must hold receive right */ -#define MACH_MSG_TYPE_DISPOSE_SEND 25 /* must hold send right(s) */ -#define MACH_MSG_TYPE_DISPOSE_SEND_ONCE 26 /* must hold sendonce right */ +#define MACH_MSG_TYPE_MOVE_RECEIVE 16 /* Must hold receive right */ +#define MACH_MSG_TYPE_MOVE_SEND 17 /* Must hold send right(s) */ +#define MACH_MSG_TYPE_MOVE_SEND_ONCE 18 /* Must hold sendonce right */ +#define MACH_MSG_TYPE_COPY_SEND 19 /* Must hold send right(s) */ +#define MACH_MSG_TYPE_MAKE_SEND 20 /* Must hold receive right */ +#define MACH_MSG_TYPE_MAKE_SEND_ONCE 21 /* Must hold receive right */ +#define MACH_MSG_TYPE_COPY_RECEIVE 22 /* NOT VALID */ +#define MACH_MSG_TYPE_DISPOSE_RECEIVE 24 /* must hold receive right */ +#define MACH_MSG_TYPE_DISPOSE_SEND 25 /* must hold send right(s) */ +#define MACH_MSG_TYPE_DISPOSE_SEND_ONCE 26 /* must hold sendonce right */ typedef unsigned int mach_msg_copy_options_t; -#define MACH_MSG_PHYSICAL_COPY 0 -#define MACH_MSG_VIRTUAL_COPY 1 -#define MACH_MSG_ALLOCATE 2 -#define MACH_MSG_OVERWRITE 3 /* deprecated */ +#define MACH_MSG_PHYSICAL_COPY 0 +#define MACH_MSG_VIRTUAL_COPY 1 +#define MACH_MSG_ALLOCATE 2 +#define MACH_MSG_OVERWRITE 3 /* deprecated */ #ifdef MACH_KERNEL -#define MACH_MSG_KALLOC_COPY_T 4 +#define MACH_MSG_KALLOC_COPY_T 4 #endif /* MACH_KERNEL */ /* - * In a complex mach message, the mach_msg_header_t is followed by - * a descriptor count, then an array of that number of descriptors + * In a complex mach message, the mach_msg_header_t is followed by + * a descriptor count, then an array of that number of descriptors * (mach_msg_*_descriptor_t). The type field of mach_msg_type_descriptor_t * (which any descriptor can be cast to) indicates the flavor of the * descriptor. * * Note that in LP64, the various types of descriptors are no longer all - * the same size as mach_msg_descriptor_t, so the array cannot be indexed + * the same size as mach_msg_descriptor_t, so the array cannot be indexed * as expected. */ typedef unsigned int mach_msg_descriptor_type_t; -#define MACH_MSG_PORT_DESCRIPTOR 0 -#define MACH_MSG_OOL_DESCRIPTOR 1 -#define MACH_MSG_OOL_PORTS_DESCRIPTOR 2 -#define MACH_MSG_OOL_VOLATILE_DESCRIPTOR 3 +#define MACH_MSG_PORT_DESCRIPTOR 0 +#define MACH_MSG_OOL_DESCRIPTOR 1 +#define MACH_MSG_OOL_PORTS_DESCRIPTOR 2 +#define MACH_MSG_OOL_VOLATILE_DESCRIPTOR 3 #pragma pack(4) -typedef struct -{ - natural_t pad1; - mach_msg_size_t pad2; - unsigned int pad3 : 24; - mach_msg_descriptor_type_t type : 8; +typedef struct{ + natural_t pad1; + mach_msg_size_t pad2; + unsigned int pad3 : 24; + mach_msg_descriptor_type_t type : 8; } mach_msg_type_descriptor_t; -typedef struct -{ - mach_port_t name; +typedef struct{ + mach_port_t name; #if !(defined(KERNEL) && defined(__LP64__)) // Pad to 8 bytes everywhere except the K64 kernel where mach_port_t is 8 bytes - mach_msg_size_t pad1; + mach_msg_size_t pad1; #endif - unsigned int pad2 : 16; - mach_msg_type_name_t disposition : 8; - mach_msg_descriptor_type_t type : 8; + unsigned int pad2 : 16; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; #if defined(KERNEL) - uint32_t pad_end; + uint32_t pad_end; #endif } mach_msg_port_descriptor_t; -typedef struct -{ - uint32_t address; - mach_msg_size_t size; - boolean_t deallocate: 8; - mach_msg_copy_options_t copy: 8; - unsigned int pad1: 8; - mach_msg_descriptor_type_t type: 8; +typedef struct{ + uint32_t address; + mach_msg_size_t size; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + unsigned int pad1: 8; + mach_msg_descriptor_type_t type: 8; } mach_msg_ool_descriptor32_t; -typedef struct -{ - uint64_t address; - boolean_t deallocate: 8; - mach_msg_copy_options_t copy: 8; - unsigned int pad1: 8; - mach_msg_descriptor_type_t type: 8; - mach_msg_size_t size; +typedef struct{ + uint64_t address; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + unsigned int pad1: 8; + mach_msg_descriptor_type_t type: 8; + mach_msg_size_t size; } mach_msg_ool_descriptor64_t; -typedef struct -{ - void* address; +typedef struct{ + void* address; #if !defined(__LP64__) - mach_msg_size_t size; + mach_msg_size_t size; #endif - boolean_t deallocate: 8; - mach_msg_copy_options_t copy: 8; - unsigned int pad1: 8; - mach_msg_descriptor_type_t type: 8; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + unsigned int pad1: 8; + mach_msg_descriptor_type_t type: 8; #if defined(__LP64__) - mach_msg_size_t size; + mach_msg_size_t size; #endif #if defined(KERNEL) && !defined(__LP64__) - uint32_t pad_end; + uint32_t pad_end; #endif } mach_msg_ool_descriptor_t; -typedef struct -{ - uint32_t address; - mach_msg_size_t count; - boolean_t deallocate: 8; - mach_msg_copy_options_t copy: 8; - mach_msg_type_name_t disposition : 8; - mach_msg_descriptor_type_t type : 8; +typedef struct{ + uint32_t address; + mach_msg_size_t count; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; } mach_msg_ool_ports_descriptor32_t; -typedef struct -{ - uint64_t address; - boolean_t deallocate: 8; - mach_msg_copy_options_t copy: 8; - mach_msg_type_name_t disposition : 8; - mach_msg_descriptor_type_t type : 8; - mach_msg_size_t count; +typedef struct{ + uint64_t address; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; + mach_msg_size_t count; } mach_msg_ool_ports_descriptor64_t; -typedef struct -{ - void* address; +typedef struct{ + void* address; #if !defined(__LP64__) - mach_msg_size_t count; + mach_msg_size_t count; #endif - boolean_t deallocate: 8; - mach_msg_copy_options_t copy: 8; - mach_msg_type_name_t disposition : 8; - mach_msg_descriptor_type_t type : 8; + boolean_t deallocate: 8; + mach_msg_copy_options_t copy: 8; + mach_msg_type_name_t disposition : 8; + mach_msg_descriptor_type_t type : 8; #if defined(__LP64__) - mach_msg_size_t count; + mach_msg_size_t count; #endif #if defined(KERNEL) && !defined(__LP64__) - uint32_t pad_end; + uint32_t pad_end; #endif } mach_msg_ool_ports_descriptor_t; @@ -377,61 +369,55 @@ typedef struct * are of the same size in that environment. */ #if defined(__LP64__) && defined(KERNEL) -typedef union -{ - mach_msg_port_descriptor_t port; - mach_msg_ool_descriptor32_t out_of_line; - mach_msg_ool_ports_descriptor32_t ool_ports; - mach_msg_type_descriptor_t type; +typedef union{ + mach_msg_port_descriptor_t port; + mach_msg_ool_descriptor32_t out_of_line; + mach_msg_ool_ports_descriptor32_t ool_ports; + mach_msg_type_descriptor_t type; } mach_msg_descriptor_t; #else -typedef union -{ - mach_msg_port_descriptor_t port; - mach_msg_ool_descriptor_t out_of_line; - mach_msg_ool_ports_descriptor_t ool_ports; - mach_msg_type_descriptor_t type; +typedef union{ + mach_msg_port_descriptor_t port; + mach_msg_ool_descriptor_t out_of_line; + mach_msg_ool_ports_descriptor_t ool_ports; + mach_msg_type_descriptor_t type; } mach_msg_descriptor_t; #endif -typedef struct -{ - mach_msg_size_t msgh_descriptor_count; +typedef struct{ + mach_msg_size_t msgh_descriptor_count; } mach_msg_body_t; #define MACH_MSG_BODY_NULL (mach_msg_body_t *) 0 #define MACH_MSG_DESCRIPTOR_NULL (mach_msg_descriptor_t *) 0 -typedef struct -{ - mach_msg_bits_t msgh_bits; - mach_msg_size_t msgh_size; - mach_port_t msgh_remote_port; - mach_port_t msgh_local_port; - mach_port_name_t msgh_voucher_port; - mach_msg_id_t msgh_id; +typedef struct{ + mach_msg_bits_t msgh_bits; + mach_msg_size_t msgh_size; + mach_port_t msgh_remote_port; + mach_port_t msgh_local_port; + mach_port_name_t msgh_voucher_port; + mach_msg_id_t msgh_id; } mach_msg_header_t; -#define msgh_reserved msgh_voucher_port -#define MACH_MSG_NULL (mach_msg_header_t *) 0 +#define msgh_reserved msgh_voucher_port +#define MACH_MSG_NULL (mach_msg_header_t *) 0 -typedef struct -{ - mach_msg_header_t header; - mach_msg_body_t body; +typedef struct{ + mach_msg_header_t header; + mach_msg_body_t body; } mach_msg_base_t; -typedef unsigned int mach_msg_trailer_type_t; +typedef unsigned int mach_msg_trailer_type_t; -#define MACH_MSG_TRAILER_FORMAT_0 0 +#define MACH_MSG_TRAILER_FORMAT_0 0 -typedef unsigned int mach_msg_trailer_size_t; +typedef unsigned int mach_msg_trailer_size_t; typedef char *mach_msg_trailer_info_t; -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; } mach_msg_trailer_t; /* @@ -443,24 +429,21 @@ typedef struct * multiple threads receive and/or process received * messages. */ -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; } mach_msg_seqno_trailer_t; -typedef struct -{ - unsigned int val[2]; +typedef struct{ + unsigned int val[2]; } security_token_t; -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; } mach_msg_security_trailer_t; /* @@ -472,98 +455,89 @@ typedef struct * of the subject identity within the token may change * over time. */ -typedef struct -{ - unsigned int val[8]; +typedef struct{ + unsigned int val[8]; } audit_token_t; -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; } mach_msg_audit_trailer_t; -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; - mach_port_context_t msgh_context; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; + mach_port_context_t msgh_context; } mach_msg_context_trailer_t; #if defined(MACH_KERNEL_PRIVATE) && defined(__arm64__) -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; - mach_port_context32_t msgh_context; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; + mach_port_context32_t msgh_context; } mach_msg_context_trailer32_t; -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; - mach_port_context64_t msgh_context; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; + mach_port_context64_t msgh_context; } mach_msg_context_trailer64_t; #endif -typedef struct -{ - mach_port_name_t sender; +typedef struct{ + mach_port_name_t sender; } msg_labels_t; -/* - Trailer type to pass MAC policy label info as a mach message trailer. - -*/ - -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; - mach_port_context_t msgh_context; - int msgh_ad; - msg_labels_t msgh_labels; +/* + * Trailer type to pass MAC policy label info as a mach message trailer. + * + */ + +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; + mach_port_context_t msgh_context; + int msgh_ad; + msg_labels_t msgh_labels; } mach_msg_mac_trailer_t; #if defined(MACH_KERNEL_PRIVATE) && defined(__arm64__) -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; - mach_port_context32_t msgh_context; - int msgh_ad; - msg_labels_t msgh_labels; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; + mach_port_context32_t msgh_context; + int msgh_ad; + msg_labels_t msgh_labels; } mach_msg_mac_trailer32_t; -typedef struct -{ - mach_msg_trailer_type_t msgh_trailer_type; - mach_msg_trailer_size_t msgh_trailer_size; - mach_port_seqno_t msgh_seqno; - security_token_t msgh_sender; - audit_token_t msgh_audit; - mach_port_context64_t msgh_context; - int msgh_ad; - msg_labels_t msgh_labels; +typedef struct{ + mach_msg_trailer_type_t msgh_trailer_type; + mach_msg_trailer_size_t msgh_trailer_size; + mach_port_seqno_t msgh_seqno; + security_token_t msgh_sender; + audit_token_t msgh_audit; + mach_port_context64_t msgh_context; + int msgh_ad; + msg_labels_t msgh_labels; } mach_msg_mac_trailer64_t; #endif @@ -597,7 +571,7 @@ typedef mach_msg_mac_trailer_t mach_msg_max_trailer_t; typedef mach_msg_security_trailer_t mach_msg_format_0_trailer_t; /*typedef mach_msg_mac_trailer_t mach_msg_format_0_trailer_t; -*/ + */ #define MACH_MSG_TRAILER_FORMAT_0_SIZE sizeof(mach_msg_format_0_trailer_t) @@ -607,35 +581,32 @@ extern security_token_t KERNEL_SECURITY_TOKEN; #define KERNEL_AUDIT_TOKEN_VALUE { {0, 0, 0, 0, 0, 0, 0, 0} } extern audit_token_t KERNEL_AUDIT_TOKEN; -typedef integer_t mach_msg_options_t; +typedef integer_t mach_msg_options_t; -typedef struct -{ - mach_msg_header_t header; +typedef struct{ + mach_msg_header_t header; } mach_msg_empty_send_t; -typedef struct -{ - mach_msg_header_t header; - mach_msg_trailer_t trailer; +typedef struct{ + mach_msg_header_t header; + mach_msg_trailer_t trailer; } mach_msg_empty_rcv_t; -typedef union -{ - mach_msg_empty_send_t send; - mach_msg_empty_rcv_t rcv; +typedef union{ + mach_msg_empty_send_t send; + mach_msg_empty_rcv_t rcv; } mach_msg_empty_t; #pragma pack() /* utility to round the message size - will become machine dependent */ -#define round_msg(x) (((mach_msg_size_t)(x) + sizeof (natural_t) - 1) & \ - ~(sizeof (natural_t) - 1)) +#define round_msg(x) (((mach_msg_size_t)(x) + sizeof (natural_t) - 1) & \ + ~(sizeof (natural_t) - 1)) /* * There is no fixed upper bound to the size of Mach messages. */ -#define MACH_MSG_SIZE_MAX ((mach_msg_size_t) ~0) +#define MACH_MSG_SIZE_MAX ((mach_msg_size_t) ~0) #if defined(__APPLE_API_PRIVATE) /* @@ -646,16 +617,16 @@ typedef union * In either case, waiting for memory is [currently] outside * the scope of send timeout values provided to IPC. */ -#define MACH_MSG_SIZE_RELIABLE ((mach_msg_size_t) 256 * 1024) +#define MACH_MSG_SIZE_RELIABLE ((mach_msg_size_t) 256 * 1024) #endif /* * Compatibility definitions, for code written * when there was a msgh_kind instead of msgh_seqno. */ -#define MACH_MSGH_KIND_NORMAL 0x00000000 -#define MACH_MSGH_KIND_NOTIFICATION 0x00000001 -#define msgh_kind msgh_seqno -#define mach_msg_kind_t mach_port_seqno_t +#define MACH_MSGH_KIND_NORMAL 0x00000000 +#define MACH_MSGH_KIND_NOTIFICATION 0x00000001 +#define msgh_kind msgh_seqno +#define mach_msg_kind_t mach_port_seqno_t typedef natural_t mach_msg_type_size_t; typedef natural_t mach_msg_type_number_t; @@ -669,72 +640,72 @@ typedef natural_t mach_msg_type_number_t; * are not transferred, just the port name.) */ -#define MACH_MSG_TYPE_PORT_NONE 0 +#define MACH_MSG_TYPE_PORT_NONE 0 -#define MACH_MSG_TYPE_PORT_NAME 15 -#define MACH_MSG_TYPE_PORT_RECEIVE MACH_MSG_TYPE_MOVE_RECEIVE -#define MACH_MSG_TYPE_PORT_SEND MACH_MSG_TYPE_MOVE_SEND -#define MACH_MSG_TYPE_PORT_SEND_ONCE MACH_MSG_TYPE_MOVE_SEND_ONCE +#define MACH_MSG_TYPE_PORT_NAME 15 +#define MACH_MSG_TYPE_PORT_RECEIVE MACH_MSG_TYPE_MOVE_RECEIVE +#define MACH_MSG_TYPE_PORT_SEND MACH_MSG_TYPE_MOVE_SEND +#define MACH_MSG_TYPE_PORT_SEND_ONCE MACH_MSG_TYPE_MOVE_SEND_ONCE -#define MACH_MSG_TYPE_LAST 22 /* Last assigned */ +#define MACH_MSG_TYPE_LAST 22 /* Last assigned */ /* * A dummy value. Mostly used to indicate that the actual value * will be filled in later, dynamically. */ -#define MACH_MSG_TYPE_POLYMORPHIC ((mach_msg_type_name_t) -1) +#define MACH_MSG_TYPE_POLYMORPHIC ((mach_msg_type_name_t) -1) /* * Is a given item a port type? */ -#define MACH_MSG_TYPE_PORT_ANY(x) \ - (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \ +#define MACH_MSG_TYPE_PORT_ANY(x) \ + (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \ ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE)) -#define MACH_MSG_TYPE_PORT_ANY_SEND(x) \ - (((x) >= MACH_MSG_TYPE_MOVE_SEND) && \ +#define MACH_MSG_TYPE_PORT_ANY_SEND(x) \ + (((x) >= MACH_MSG_TYPE_MOVE_SEND) && \ ((x) <= MACH_MSG_TYPE_MAKE_SEND_ONCE)) -#define MACH_MSG_TYPE_PORT_ANY_RIGHT(x) \ - (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \ +#define MACH_MSG_TYPE_PORT_ANY_RIGHT(x) \ + (((x) >= MACH_MSG_TYPE_MOVE_RECEIVE) && \ ((x) <= MACH_MSG_TYPE_MOVE_SEND_ONCE)) typedef integer_t mach_msg_option_t; -#define MACH_MSG_OPTION_NONE 0x00000000 +#define MACH_MSG_OPTION_NONE 0x00000000 -#define MACH_SEND_MSG 0x00000001 -#define MACH_RCV_MSG 0x00000002 +#define MACH_SEND_MSG 0x00000001 +#define MACH_RCV_MSG 0x00000002 -#define MACH_RCV_LARGE 0x00000004 /* report large message sizes */ -#define MACH_RCV_LARGE_IDENTITY 0x00000008 /* identify source of large messages */ +#define MACH_RCV_LARGE 0x00000004 /* report large message sizes */ +#define MACH_RCV_LARGE_IDENTITY 0x00000008 /* identify source of large messages */ -#define MACH_SEND_TIMEOUT 0x00000010 /* timeout value applies to send */ +#define MACH_SEND_TIMEOUT 0x00000010 /* timeout value applies to send */ #define MACH_SEND_OVERRIDE 0x00000020 /* priority override for send */ -#define MACH_SEND_INTERRUPT 0x00000040 /* don't restart interrupted sends */ -#define MACH_SEND_NOTIFY 0x00000080 /* arm send-possible notify */ -#define MACH_SEND_ALWAYS 0x00010000 /* ignore qlimits - kernel only */ -#define MACH_SEND_TRAILER 0x00020000 /* sender-provided trailer */ +#define MACH_SEND_INTERRUPT 0x00000040 /* don't restart interrupted sends */ +#define MACH_SEND_NOTIFY 0x00000080 /* arm send-possible notify */ +#define MACH_SEND_ALWAYS 0x00010000 /* ignore qlimits - kernel only */ +#define MACH_SEND_TRAILER 0x00020000 /* sender-provided trailer */ #define MACH_SEND_NOIMPORTANCE 0x00040000 /* msg won't carry importance */ -#define MACH_SEND_NODENAP MACH_SEND_NOIMPORTANCE -#define MACH_SEND_IMPORTANCE 0x00080000 /* msg carries importance - kernel only */ -#define MACH_SEND_SYNC_OVERRIDE 0x00100000 /* msg should do sync ipc override */ -#define MACH_SEND_PROPAGATE_QOS 0x00200000 /* IPC should propagate the caller's QoS */ -#define MACH_SEND_SYNC_USE_THRPRI MACH_SEND_PROPAGATE_QOS /* obsolete name */ +#define MACH_SEND_NODENAP MACH_SEND_NOIMPORTANCE +#define MACH_SEND_IMPORTANCE 0x00080000 /* msg carries importance - kernel only */ +#define MACH_SEND_SYNC_OVERRIDE 0x00100000 /* msg should do sync ipc override */ +#define MACH_SEND_PROPAGATE_QOS 0x00200000 /* IPC should propagate the caller's QoS */ +#define MACH_SEND_SYNC_USE_THRPRI MACH_SEND_PROPAGATE_QOS /* obsolete name */ #define MACH_SEND_KERNEL 0x00400000 /* full send from kernel space - kernel only */ -#define MACH_RCV_TIMEOUT 0x00000100 /* timeout value applies to receive */ -#define MACH_RCV_NOTIFY 0x00000200 /* reserved - legacy */ -#define MACH_RCV_INTERRUPT 0x00000400 /* don't restart interrupted receive */ -#define MACH_RCV_VOUCHER 0x00000800 /* willing to receive voucher port */ -#define MACH_RCV_OVERWRITE 0x00001000 /* scatter receive (deprecated) */ -#define MACH_RCV_SYNC_WAIT 0x00004000 /* sync waiter waiting for rcv */ +#define MACH_RCV_TIMEOUT 0x00000100 /* timeout value applies to receive */ +#define MACH_RCV_NOTIFY 0x00000200 /* reserved - legacy */ +#define MACH_RCV_INTERRUPT 0x00000400 /* don't restart interrupted receive */ +#define MACH_RCV_VOUCHER 0x00000800 /* willing to receive voucher port */ +#define MACH_RCV_OVERWRITE 0x00001000 /* scatter receive (deprecated) */ +#define MACH_RCV_SYNC_WAIT 0x00004000 /* sync waiter waiting for rcv */ #ifdef XNU_KERNEL_PRIVATE -#define MACH_RCV_STACK 0x00002000 /* receive into highest addr of buffer */ +#define MACH_RCV_STACK 0x00002000 /* receive into highest addr of buffer */ /* * NOTE: @@ -742,17 +713,17 @@ typedef integer_t mach_msg_option_t; * If more than one thread attempts to MACH_PEEK_MSG on a port or set, one of * the threads may miss messages (in fact, it may never wake up). */ -#define MACH_PEEK_MSG 0x80000000 /* receive, but leave msgs queued */ +#define MACH_PEEK_MSG 0x80000000 /* receive, but leave msgs queued */ #endif -/* +/* * NOTE: a 0x00------ RCV mask implies to ask for - * a MACH_MSG_TRAILER_FORMAT_0 with 0 Elements, + * a MACH_MSG_TRAILER_FORMAT_0 with 0 Elements, * which is equivalent to a mach_msg_trailer_t. * * XXXMAC: unlike the rest of the MACH_RCV_* flags, MACH_RCV_TRAILER_LABELS - * needs its own private bit since we only calculate its fields when absolutely + * needs its own private bit since we only calculate its fields when absolutely * required. */ #define MACH_RCV_TRAILER_NULL 0 @@ -763,28 +734,28 @@ typedef integer_t mach_msg_option_t; #define MACH_RCV_TRAILER_AV 7 #define MACH_RCV_TRAILER_LABELS 8 -#define MACH_RCV_TRAILER_TYPE(x) (((x) & 0xf) << 28) -#define MACH_RCV_TRAILER_ELEMENTS(x) (((x) & 0xf) << 24) -#define MACH_RCV_TRAILER_MASK ((0xf << 24)) +#define MACH_RCV_TRAILER_TYPE(x) (((x) & 0xf) << 28) +#define MACH_RCV_TRAILER_ELEMENTS(x) (((x) & 0xf) << 24) +#define MACH_RCV_TRAILER_MASK ((0xf << 24)) #define GET_RCV_ELEMENTS(y) (((y) >> 24) & 0xf) #ifdef MACH_KERNEL_PRIVATE /* The options that the kernel honors when passed from user space */ #define MACH_SEND_USER (MACH_SEND_MSG | MACH_SEND_TIMEOUT | \ - MACH_SEND_NOTIFY | MACH_SEND_OVERRIDE | \ - MACH_SEND_TRAILER | MACH_SEND_NOIMPORTANCE | \ - MACH_SEND_SYNC_OVERRIDE | MACH_SEND_PROPAGATE_QOS) + MACH_SEND_NOTIFY | MACH_SEND_OVERRIDE | \ + MACH_SEND_TRAILER | MACH_SEND_NOIMPORTANCE | \ + MACH_SEND_SYNC_OVERRIDE | MACH_SEND_PROPAGATE_QOS) #define MACH_RCV_USER (MACH_RCV_MSG | MACH_RCV_TIMEOUT | \ - MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ - MACH_RCV_VOUCHER | MACH_RCV_TRAILER_MASK | \ - MACH_RCV_SYNC_WAIT) + MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | \ + MACH_RCV_VOUCHER | MACH_RCV_TRAILER_MASK | \ + MACH_RCV_SYNC_WAIT) -#define MACH_MSG_OPTION_USER (MACH_SEND_USER | MACH_RCV_USER) +#define MACH_MSG_OPTION_USER (MACH_SEND_USER | MACH_RCV_USER) /* The options implemented by the library interface to mach_msg et. al. */ -#define MACH_MSG_OPTION_LIB (MACH_SEND_INTERRUPT | MACH_RCV_INTERRUPT) +#define MACH_MSG_OPTION_LIB (MACH_SEND_INTERRUPT | MACH_RCV_INTERRUPT) /* * Default options to use when sending from the kernel. @@ -795,52 +766,52 @@ typedef integer_t mach_msg_option_t; * (11938665 & 23925818) */ #define MACH_SEND_KERNEL_DEFAULT (MACH_SEND_MSG | \ - MACH_SEND_ALWAYS | MACH_SEND_NOIMPORTANCE) + MACH_SEND_ALWAYS | MACH_SEND_NOIMPORTANCE) #endif /* MACH_KERNEL_PRIVATE */ -/* - * XXXMAC: note that in the case of MACH_RCV_TRAILER_LABELS, +/* + * XXXMAC: note that in the case of MACH_RCV_TRAILER_LABELS, * we just fall through to mach_msg_max_trailer_t. * This is correct behavior since mach_msg_max_trailer_t is defined as * mac_msg_mac_trailer_t which is used for the LABELS trailer. - * It also makes things work properly if MACH_RCV_TRAILER_LABELS is ORed + * It also makes things work properly if MACH_RCV_TRAILER_LABELS is ORed * with one of the other options. */ -#define REQUESTED_TRAILER_SIZE_NATIVE(y) \ - ((mach_msg_trailer_size_t) \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_NULL) ? \ - sizeof(mach_msg_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SEQNO) ? \ - sizeof(mach_msg_seqno_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SENDER) ? \ - sizeof(mach_msg_security_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AUDIT) ? \ - sizeof(mach_msg_audit_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_CTX) ? \ - sizeof(mach_msg_context_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AV) ? \ - sizeof(mach_msg_mac_trailer_t) : \ +#define REQUESTED_TRAILER_SIZE_NATIVE(y) \ + ((mach_msg_trailer_size_t) \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_NULL) ? \ + sizeof(mach_msg_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SEQNO) ? \ + sizeof(mach_msg_seqno_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SENDER) ? \ + sizeof(mach_msg_security_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AUDIT) ? \ + sizeof(mach_msg_audit_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_CTX) ? \ + sizeof(mach_msg_context_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AV) ? \ + sizeof(mach_msg_mac_trailer_t) : \ sizeof(mach_msg_max_trailer_t)))))))) #ifdef XNU_KERNEL_PRIVATE #if defined(__arm64__) -#define REQUESTED_TRAILER_SIZE(is64, y) \ - ((mach_msg_trailer_size_t) \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_NULL) ? \ - sizeof(mach_msg_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SEQNO) ? \ - sizeof(mach_msg_seqno_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SENDER) ? \ - sizeof(mach_msg_security_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AUDIT) ? \ - sizeof(mach_msg_audit_trailer_t) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_CTX) ? \ +#define REQUESTED_TRAILER_SIZE(is64, y) \ + ((mach_msg_trailer_size_t) \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_NULL) ? \ + sizeof(mach_msg_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SEQNO) ? \ + sizeof(mach_msg_seqno_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_SENDER) ? \ + sizeof(mach_msg_security_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AUDIT) ? \ + sizeof(mach_msg_audit_trailer_t) : \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_CTX) ? \ ((is64) ? sizeof(mach_msg_context_trailer64_t) : sizeof(mach_msg_context_trailer32_t)) : \ - ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AV) ? \ + ((GET_RCV_ELEMENTS(y) == MACH_RCV_TRAILER_AV) ? \ ((is64) ? sizeof(mach_msg_mac_trailer64_t) : sizeof(mach_msg_mac_trailer32_t)) : \ sizeof(mach_msg_max_trailer_t)))))))) #else @@ -864,93 +835,93 @@ typedef integer_t mach_msg_option_t; typedef kern_return_t mach_msg_return_t; -#define MACH_MSG_SUCCESS 0x00000000 - - -#define MACH_MSG_MASK 0x00003e00 - /* All special error code bits defined below. */ -#define MACH_MSG_IPC_SPACE 0x00002000 - /* No room in IPC name space for another capability name. */ -#define MACH_MSG_VM_SPACE 0x00001000 - /* No room in VM address space for out-of-line memory. */ -#define MACH_MSG_IPC_KERNEL 0x00000800 - /* Kernel resource shortage handling an IPC capability. */ -#define MACH_MSG_VM_KERNEL 0x00000400 - /* Kernel resource shortage handling out-of-line memory. */ - -#define MACH_SEND_IN_PROGRESS 0x10000001 - /* Thread is waiting to send. (Internal use only.) */ -#define MACH_SEND_INVALID_DATA 0x10000002 - /* Bogus in-line data. */ -#define MACH_SEND_INVALID_DEST 0x10000003 - /* Bogus destination port. */ -#define MACH_SEND_TIMED_OUT 0x10000004 - /* Message not sent before timeout expired. */ -#define MACH_SEND_INVALID_VOUCHER 0x10000005 - /* Bogus voucher port. */ -#define MACH_SEND_INTERRUPTED 0x10000007 - /* Software interrupt. */ -#define MACH_SEND_MSG_TOO_SMALL 0x10000008 - /* Data doesn't contain a complete message. */ -#define MACH_SEND_INVALID_REPLY 0x10000009 - /* Bogus reply port. */ -#define MACH_SEND_INVALID_RIGHT 0x1000000a - /* Bogus port rights in the message body. */ -#define MACH_SEND_INVALID_NOTIFY 0x1000000b - /* Bogus notify port argument. */ -#define MACH_SEND_INVALID_MEMORY 0x1000000c - /* Invalid out-of-line memory pointer. */ -#define MACH_SEND_NO_BUFFER 0x1000000d - /* No message buffer is available. */ -#define MACH_SEND_TOO_LARGE 0x1000000e - /* Send is too large for port */ -#define MACH_SEND_INVALID_TYPE 0x1000000f - /* Invalid msg-type specification. */ -#define MACH_SEND_INVALID_HEADER 0x10000010 - /* A field in the header had a bad value. */ -#define MACH_SEND_INVALID_TRAILER 0x10000011 - /* The trailer to be sent does not match kernel format. */ -#define MACH_SEND_INVALID_RT_OOL_SIZE 0x10000015 - /* compatibility: no longer a returned error */ - -#define MACH_RCV_IN_PROGRESS 0x10004001 - /* Thread is waiting for receive. (Internal use only.) */ -#define MACH_RCV_INVALID_NAME 0x10004002 - /* Bogus name for receive port/port-set. */ -#define MACH_RCV_TIMED_OUT 0x10004003 - /* Didn't get a message within the timeout value. */ -#define MACH_RCV_TOO_LARGE 0x10004004 - /* Message buffer is not large enough for inline data. */ -#define MACH_RCV_INTERRUPTED 0x10004005 - /* Software interrupt. */ -#define MACH_RCV_PORT_CHANGED 0x10004006 - /* compatibility: no longer a returned error */ -#define MACH_RCV_INVALID_NOTIFY 0x10004007 - /* Bogus notify port argument. */ -#define MACH_RCV_INVALID_DATA 0x10004008 - /* Bogus message buffer for inline data. */ -#define MACH_RCV_PORT_DIED 0x10004009 - /* Port/set was sent away/died during receive. */ -#define MACH_RCV_IN_SET 0x1000400a - /* compatibility: no longer a returned error */ -#define MACH_RCV_HEADER_ERROR 0x1000400b - /* Error receiving message header. See special bits. */ -#define MACH_RCV_BODY_ERROR 0x1000400c - /* Error receiving message body. See special bits. */ -#define MACH_RCV_INVALID_TYPE 0x1000400d - /* Invalid msg-type specification in scatter list. */ -#define MACH_RCV_SCATTER_SMALL 0x1000400e - /* Out-of-line overwrite region is not large enough */ -#define MACH_RCV_INVALID_TRAILER 0x1000400f - /* trailer type or number of trailer elements not supported */ +#define MACH_MSG_SUCCESS 0x00000000 + + +#define MACH_MSG_MASK 0x00003e00 +/* All special error code bits defined below. */ +#define MACH_MSG_IPC_SPACE 0x00002000 +/* No room in IPC name space for another capability name. */ +#define MACH_MSG_VM_SPACE 0x00001000 +/* No room in VM address space for out-of-line memory. */ +#define MACH_MSG_IPC_KERNEL 0x00000800 +/* Kernel resource shortage handling an IPC capability. */ +#define MACH_MSG_VM_KERNEL 0x00000400 +/* Kernel resource shortage handling out-of-line memory. */ + +#define MACH_SEND_IN_PROGRESS 0x10000001 +/* Thread is waiting to send. (Internal use only.) */ +#define MACH_SEND_INVALID_DATA 0x10000002 +/* Bogus in-line data. */ +#define MACH_SEND_INVALID_DEST 0x10000003 +/* Bogus destination port. */ +#define MACH_SEND_TIMED_OUT 0x10000004 +/* Message not sent before timeout expired. */ +#define MACH_SEND_INVALID_VOUCHER 0x10000005 +/* Bogus voucher port. */ +#define MACH_SEND_INTERRUPTED 0x10000007 +/* Software interrupt. */ +#define MACH_SEND_MSG_TOO_SMALL 0x10000008 +/* Data doesn't contain a complete message. */ +#define MACH_SEND_INVALID_REPLY 0x10000009 +/* Bogus reply port. */ +#define MACH_SEND_INVALID_RIGHT 0x1000000a +/* Bogus port rights in the message body. */ +#define MACH_SEND_INVALID_NOTIFY 0x1000000b +/* Bogus notify port argument. */ +#define MACH_SEND_INVALID_MEMORY 0x1000000c +/* Invalid out-of-line memory pointer. */ +#define MACH_SEND_NO_BUFFER 0x1000000d +/* No message buffer is available. */ +#define MACH_SEND_TOO_LARGE 0x1000000e +/* Send is too large for port */ +#define MACH_SEND_INVALID_TYPE 0x1000000f +/* Invalid msg-type specification. */ +#define MACH_SEND_INVALID_HEADER 0x10000010 +/* A field in the header had a bad value. */ +#define MACH_SEND_INVALID_TRAILER 0x10000011 +/* The trailer to be sent does not match kernel format. */ +#define MACH_SEND_INVALID_RT_OOL_SIZE 0x10000015 +/* compatibility: no longer a returned error */ + +#define MACH_RCV_IN_PROGRESS 0x10004001 +/* Thread is waiting for receive. (Internal use only.) */ +#define MACH_RCV_INVALID_NAME 0x10004002 +/* Bogus name for receive port/port-set. */ +#define MACH_RCV_TIMED_OUT 0x10004003 +/* Didn't get a message within the timeout value. */ +#define MACH_RCV_TOO_LARGE 0x10004004 +/* Message buffer is not large enough for inline data. */ +#define MACH_RCV_INTERRUPTED 0x10004005 +/* Software interrupt. */ +#define MACH_RCV_PORT_CHANGED 0x10004006 +/* compatibility: no longer a returned error */ +#define MACH_RCV_INVALID_NOTIFY 0x10004007 +/* Bogus notify port argument. */ +#define MACH_RCV_INVALID_DATA 0x10004008 +/* Bogus message buffer for inline data. */ +#define MACH_RCV_PORT_DIED 0x10004009 +/* Port/set was sent away/died during receive. */ +#define MACH_RCV_IN_SET 0x1000400a +/* compatibility: no longer a returned error */ +#define MACH_RCV_HEADER_ERROR 0x1000400b +/* Error receiving message header. See special bits. */ +#define MACH_RCV_BODY_ERROR 0x1000400c +/* Error receiving message body. See special bits. */ +#define MACH_RCV_INVALID_TYPE 0x1000400d +/* Invalid msg-type specification in scatter list. */ +#define MACH_RCV_SCATTER_SMALL 0x1000400e +/* Out-of-line overwrite region is not large enough */ +#define MACH_RCV_INVALID_TRAILER 0x1000400f +/* trailer type or number of trailer elements not supported */ #define MACH_RCV_IN_PROGRESS_TIMED 0x10004011 - /* Waiting for receive with timeout. (Internal use only.) */ +/* Waiting for receive with timeout. (Internal use only.) */ #ifdef XNU_KERNEL_PRIVATE -#define MACH_PEEK_IN_PROGRESS 0x10008001 - /* Waiting for a peek. (Internal use only.) */ -#define MACH_PEEK_READY 0x10008002 - /* Waiting for a peek. (Internal use only.) */ +#define MACH_PEEK_IN_PROGRESS 0x10008001 +/* Waiting for a peek. (Internal use only.) */ +#define MACH_PEEK_READY 0x10008002 +/* Waiting for a peek. (Internal use only.) */ #endif @@ -973,18 +944,18 @@ __BEGIN_DECLS * receiving of the message. */ __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg_overwrite( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - mach_msg_timeout_t timeout, - mach_port_name_t notify, - mach_msg_header_t *rcv_msg, - mach_msg_size_t rcv_limit); - -#ifndef KERNEL +extern mach_msg_return_t mach_msg_overwrite( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify, + mach_msg_header_t *rcv_msg, + mach_msg_size_t rcv_limit); + +#ifndef KERNEL /* * Routine: mach_msg @@ -995,14 +966,14 @@ extern mach_msg_return_t mach_msg_overwrite( * operation silently (trap version does not restart). */ __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern mach_msg_return_t mach_msg( - mach_msg_header_t *msg, - mach_msg_option_t option, - mach_msg_size_t send_size, - mach_msg_size_t rcv_size, - mach_port_name_t rcv_name, - mach_msg_timeout_t timeout, - mach_port_name_t notify); +extern mach_msg_return_t mach_msg( + mach_msg_header_t *msg, + mach_msg_option_t option, + mach_msg_size_t send_size, + mach_msg_size_t rcv_size, + mach_port_name_t rcv_name, + mach_msg_timeout_t timeout, + mach_port_name_t notify); /* * Routine: mach_voucher_deallocate @@ -1011,16 +982,15 @@ extern mach_msg_return_t mach_msg( * one (send right) reference to the voucher. */ __WATCHOS_PROHIBITED __TVOS_PROHIBITED -extern kern_return_t mach_voucher_deallocate( - mach_port_name_t voucher); +extern kern_return_t mach_voucher_deallocate( + mach_port_name_t voucher); #elif defined(MACH_KERNEL_PRIVATE) -extern mach_msg_return_t mach_msg_receive_results(mach_msg_size_t *size); +extern mach_msg_return_t mach_msg_receive_results(mach_msg_size_t *size); -#endif /* KERNEL */ +#endif /* KERNEL */ __END_DECLS -#endif /* _MACH_MESSAGE_H_ */ - +#endif /* _MACH_MESSAGE_H_ */ diff --git a/osfmk/mach/mig.h b/osfmk/mach/mig.h index 44a208dd6..ee94955bc 100644 --- a/osfmk/mach/mig.h +++ b/osfmk/mach/mig.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,7 +33,7 @@ * Mach MIG Subsystem Interfaces */ -#ifndef _MACH_MIG_H_ +#ifndef _MACH_MIG_H_ #define _MACH_MIG_H_ #include @@ -77,8 +77,8 @@ * unpack the request message, call the server procedure, and pack the * reply message. */ -typedef void (*mig_stub_routine_t) (mach_msg_header_t *InHeadP, - mach_msg_header_t *OutHeadP); +typedef void (*mig_stub_routine_t) (mach_msg_header_t *InHeadP, + mach_msg_header_t *OutHeadP); typedef mig_stub_routine_t mig_routine_t; @@ -103,13 +103,13 @@ typedef mach_msg_type_descriptor_t *mig_routine_arg_descriptor_t; #define MIG_ROUTINE_ARG_DESCRIPTOR_NULL ((mig_routine_arg_descriptor_t)0) struct routine_descriptor { - mig_impl_routine_t impl_routine; /* Server work func pointer */ - mig_stub_routine_t stub_routine; /* Unmarshalling func pointer */ - unsigned int argc; /* Number of argument words */ - unsigned int descr_count; /* Number complex descriptors */ + mig_impl_routine_t impl_routine; /* Server work func pointer */ + mig_stub_routine_t stub_routine; /* Unmarshalling func pointer */ + unsigned int argc; /* Number of argument words */ + unsigned int descr_count; /* Number complex descriptors */ routine_arg_descriptor_t - arg_descr; /* pointer to descriptor array*/ - unsigned int max_reply_msg; /* Max size for reply msg */ + arg_descr; /* pointer to descriptor array*/ + unsigned int max_reply_msg; /* Max size for reply msg */ }; typedef struct routine_descriptor *routine_descriptor_t; @@ -119,28 +119,28 @@ typedef mig_routine_descriptor *mig_routine_descriptor_t; #define MIG_ROUTINE_DESCRIPTOR_NULL ((mig_routine_descriptor_t)0) typedef struct mig_subsystem { - mig_server_routine_t server; /* pointer to demux routine */ - mach_msg_id_t start; /* Min routine number */ - mach_msg_id_t end; /* Max routine number + 1 */ - mach_msg_size_t maxsize; /* Max reply message size */ - vm_address_t reserved; /* reserved for MIG use */ + mig_server_routine_t server; /* pointer to demux routine */ + mach_msg_id_t start; /* Min routine number */ + mach_msg_id_t end; /* Max routine number + 1 */ + mach_msg_size_t maxsize; /* Max reply message size */ + vm_address_t reserved; /* reserved for MIG use */ mig_routine_descriptor - routine[1]; /* Routine descriptor array */ + routine[1]; /* Routine descriptor array */ } *mig_subsystem_t; -#define MIG_SUBSYSTEM_NULL ((mig_subsystem_t)0) +#define MIG_SUBSYSTEM_NULL ((mig_subsystem_t)0) typedef struct mig_symtab { - char *ms_routine_name; - int ms_routine_number; - void (*ms_routine)(void); /* Since the functions in the - * symbol table have unknown - * signatures, this is the best - * we can do... - */ + char *ms_routine_name; + int ms_routine_number; + void (*ms_routine)(void); /* Since the functions in the + * symbol table have unknown + * signatures, this is the best + * we can do... + */ } mig_symtab_t; -#ifdef PRIVATE +#ifdef PRIVATE /* MIG object runtime - not ready for public consumption */ @@ -164,51 +164,51 @@ typedef struct mig_symtab { * Coming soon: * - User-level support */ -typedef unsigned int mig_notify_type_t; +typedef unsigned int mig_notify_type_t; typedef struct MIGIID { - unsigned long data1; - unsigned short data2; - unsigned short data3; - unsigned char data4[8]; + unsigned long data1; + unsigned short data2; + unsigned short data3; + unsigned char data4[8]; } MIGIID; -typedef struct IMIGObjectVtbl IMIGObjectVtbl; -typedef struct IMIGNotifyObjectVtbl IMIGNotifyObjectVtbl; +typedef struct IMIGObjectVtbl IMIGObjectVtbl; +typedef struct IMIGNotifyObjectVtbl IMIGNotifyObjectVtbl; typedef struct IMIGObject { - const IMIGObjectVtbl *pVtbl; + const IMIGObjectVtbl *pVtbl; } IMIGObject; typedef struct IMIGNotifyObject { - const IMIGNotifyObjectVtbl *pVtbl; + const IMIGNotifyObjectVtbl *pVtbl; } IMIGNotifyObject; struct IMIGObjectVtbl { kern_return_t (*QueryInterface)( - IMIGObject *object, - const MIGIID *iid, - void **ppv); + IMIGObject *object, + const MIGIID *iid, + void **ppv); unsigned long (*AddRef)( - IMIGObject *object); + IMIGObject *object); - unsigned long (*Release)( - IMIGObject *object); + unsigned long (*Release)( + IMIGObject *object); unsigned long (*GetServer)( - IMIGObject *object, - mig_server_routine_t *server); - + IMIGObject *object, + mig_server_routine_t *server); + boolean_t (*RaiseNotification)( - IMIGObject *object, - mig_notify_type_t notify_type); + IMIGObject *object, + mig_notify_type_t notify_type); boolean_t (*RequestNotification)( - IMIGObject *object, - IMIGNotifyObject *notify, - mig_notify_type_t notify_type); -}; + IMIGObject *object, + IMIGNotifyObject *notify, + mig_notify_type_t notify_type); +}; /* * IMIGNotifyObject @@ -228,36 +228,36 @@ struct IMIGObjectVtbl { */ struct IMIGNotifyObjectVtbl { kern_return_t (*QueryInterface)( - IMIGNotifyObject *notify, - const MIGIID *iid, - void **ppv); + IMIGNotifyObject *notify, + const MIGIID *iid, + void **ppv); - unsigned long (*AddRef)( - IMIGNotifyObject *notify); + unsigned long (*AddRef)( + IMIGNotifyObject *notify); - unsigned long (*Release)( - IMIGNotifyObject *notify); + unsigned long (*Release)( + IMIGNotifyObject *notify); unsigned long (*GetServer)( - IMIGNotifyObject *notify, - mig_server_routine_t *server); + IMIGNotifyObject *notify, + mig_server_routine_t *server); boolean_t (*RaiseNotification)( - IMIGNotifyObject *notify, - mig_notify_type_t notify_type); + IMIGNotifyObject *notify, + mig_notify_type_t notify_type); boolean_t (*RequestNotification)( - IMIGNotifyObject *notify, - IMIGNotifyObject *notify_notify, - mig_notify_type_t notify_type); + IMIGNotifyObject *notify, + IMIGNotifyObject *notify_notify, + mig_notify_type_t notify_type); void (*HandleNotification)( - IMIGNotifyObject *notify, - IMIGObject *object, - mig_notify_type_t notify_type); + IMIGNotifyObject *notify, + IMIGObject *object, + mig_notify_type_t notify_type); }; -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #endif /* PRIVATE */ __BEGIN_DECLS @@ -272,8 +272,8 @@ extern void mig_dealloc_reply_port(mach_port_t reply_port); extern void mig_put_reply_port(mach_port_t reply_port); /* Bounded string copy */ -extern int mig_strncpy(char *dest, const char *src, int len); -extern int mig_strncpy_zerofill(char *dest, const char *src, int len); +extern int mig_strncpy(char *dest, const char *src, int len); +extern int mig_strncpy_zerofill(char *dest, const char *src, int len); #ifdef KERNEL_PRIVATE @@ -295,4 +295,4 @@ extern void mig_deallocate(vm_address_t, vm_size_t); __END_DECLS -#endif /* _MACH_MIG_H_ */ +#endif /* _MACH_MIG_H_ */ diff --git a/osfmk/mach/mig_errors.h b/osfmk/mach/mig_errors.h index f1234bc7b..418a05da3 100644 --- a/osfmk/mach/mig_errors.h +++ b/osfmk/mach/mig_errors.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,7 +60,7 @@ * */ -#ifndef _MACH_MIG_ERRORS_H_ +#ifndef _MACH_MIG_ERRORS_H_ #define _MACH_MIG_ERRORS_H_ #include @@ -79,15 +79,15 @@ * the knowledge to convert the codes in this situation. */ -#define MIG_TYPE_ERROR -300 /* client type check failure */ -#define MIG_REPLY_MISMATCH -301 /* wrong reply message ID */ -#define MIG_REMOTE_ERROR -302 /* server detected error */ -#define MIG_BAD_ID -303 /* bad request message ID */ -#define MIG_BAD_ARGUMENTS -304 /* server type check failure */ -#define MIG_NO_REPLY -305 /* no reply should be send */ -#define MIG_EXCEPTION -306 /* server raised exception */ -#define MIG_ARRAY_TOO_LARGE -307 /* array not large enough */ -#define MIG_SERVER_DIED -308 /* server died */ +#define MIG_TYPE_ERROR -300 /* client type check failure */ +#define MIG_REPLY_MISMATCH -301 /* wrong reply message ID */ +#define MIG_REMOTE_ERROR -302 /* server detected error */ +#define MIG_BAD_ID -303 /* bad request message ID */ +#define MIG_BAD_ARGUMENTS -304 /* server type check failure */ +#define MIG_NO_REPLY -305 /* no reply should be send */ +#define MIG_EXCEPTION -306 /* server raised exception */ +#define MIG_ARRAY_TOO_LARGE -307 /* array not large enough */ +#define MIG_SERVER_DIED -308 /* server died */ #define MIG_TRAILER_ERROR -309 /* trailer has an unknown format */ /* @@ -97,9 +97,9 @@ */ #pragma pack(4) typedef struct { - mach_msg_header_t Head; - NDR_record_t NDR; - kern_return_t RetCode; + mach_msg_header_t Head; + NDR_record_t NDR; + kern_return_t RetCode; } mig_reply_error_t; #pragma pack() @@ -113,12 +113,13 @@ static __inline__ void __NDR_convert__mig_reply_error_t(__unused mig_reply_error_t *x) { #if defined(__NDR_convert__int_rep__kern_return_t__defined) - if (x->NDR.int_rep != NDR_record.int_rep) + if (x->NDR.int_rep != NDR_record.int_rep) { __NDR_convert__int_rep__kern_return_t(&x->RetCode, x->NDR.int_rep); + } #endif /* __NDR_convert__int_rep__kern_return_t__defined */ } #endif /* !defined(__NDR_convert__mig_reply_error_t__defined) */ __END_DECLS -#endif /* _MACH_MIG_ERRORS_H_ */ +#endif /* _MACH_MIG_ERRORS_H_ */ diff --git a/osfmk/mach/mig_log.h b/osfmk/mach/mig_log.h index 44ce00002..ae5e6c7a2 100644 --- a/osfmk/mach/mig_log.h +++ b/osfmk/mach/mig_log.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -56,7 +56,7 @@ typedef enum { extern void MigEventTracer #if defined(__STDC__) ( - mig_who_t who, + mig_who_t who, mig_which_event_t what, mach_msg_id_t msgh_id, unsigned int size, @@ -68,20 +68,20 @@ extern void MigEventTracer char *file, unsigned int line ); -#else /* !defined(__STDC__) */ +#else /* !defined(__STDC__) */ (); #endif /* !defined(__STDC__) */ extern void MigEventErrors #if defined(__STDC__) ( - mig_who_t who, + mig_who_t who, mig_which_error_t what, void *par, char *file, unsigned int line ); -#else /* !defined(__STDC__) */ +#else /* !defined(__STDC__) */ (); #endif /* !defined(__STDC__) */ @@ -94,4 +94,3 @@ extern int mig_tracing; #endif /* __APPLE_API_OBSOLETE */ #endif /* _mach_log_ */ - diff --git a/osfmk/mach/mk_timer.h b/osfmk/mach/mk_timer.h index 17dddfcca..56b554bcd 100644 --- a/osfmk/mach/mk_timer.h +++ b/osfmk/mach/mk_timer.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,43 +34,43 @@ * Created. */ -#ifndef _MACH_MK_TIMER_H_ -#define _MACH_MK_TIMER_H_ +#ifndef _MACH_MK_TIMER_H_ +#define _MACH_MK_TIMER_H_ #include #include __BEGIN_DECLS -mach_port_name_t mk_timer_create(void); +mach_port_name_t mk_timer_create(void); -kern_return_t mk_timer_destroy( - mach_port_name_t name); +kern_return_t mk_timer_destroy( + mach_port_name_t name); -kern_return_t mk_timer_arm( - mach_port_name_t name, - uint64_t expire_time); +kern_return_t mk_timer_arm( + mach_port_name_t name, + uint64_t expire_time); -kern_return_t mk_timer_cancel( - mach_port_name_t name, - uint64_t *result_time); +kern_return_t mk_timer_cancel( + mach_port_name_t name, + uint64_t *result_time); #define MK_TIMER_NORMAL (0) #define MK_TIMER_CRITICAL (1) -kern_return_t mk_timer_arm_leeway( - mach_port_name_t name, - uint64_t mk_timer_flags, - uint64_t mk_timer_expire_time, - uint64_t mk_timer_leeway); +kern_return_t mk_timer_arm_leeway( + mach_port_name_t name, + uint64_t mk_timer_flags, + uint64_t mk_timer_expire_time, + uint64_t mk_timer_leeway); __END_DECLS #pragma pack(4) struct mk_timer_expire_msg { - mach_msg_header_t header; - uint64_t unused[3]; + mach_msg_header_t header; + uint64_t unused[3]; }; #pragma pack() -typedef struct mk_timer_expire_msg mk_timer_expire_msg_t; +typedef struct mk_timer_expire_msg mk_timer_expire_msg_t; #endif /* _MACH_MK_TIMER_H_ */ diff --git a/osfmk/mach/mk_traps.h b/osfmk/mach/mk_traps.h index 68e71faa9..2bf572483 100644 --- a/osfmk/mach/mk_traps.h +++ b/osfmk/mach/mk_traps.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,7 +34,7 @@ * Created. */ -#ifndef _MACH_MK_TRAPS_H_ -#define _MACH_MK_TRAPS_H_ +#ifndef _MACH_MK_TRAPS_H_ +#define _MACH_MK_TRAPS_H_ #endif /* _MACH_MK_TRAPS_H_ */ diff --git a/osfmk/mach/msg_type.h b/osfmk/mach/msg_type.h index fb280331b..78215d399 100644 --- a/osfmk/mach/msg_type.h +++ b/osfmk/mach/msg_type.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,37 +38,37 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.2.6.1 1994/09/23 02:40:41 ezf - * change marker to not FREE - * [1994/09/22 21:41:56 ezf] + * change marker to not FREE + * [1994/09/22 21:41:56 ezf] * * Revision 1.2.2.2 1993/06/09 02:42:32 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 21:17:31 jeffc] - * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:17:31 jeffc] + * * Revision 1.2 1993/04/19 16:37:56 devrcs - * ansi C conformance changes - * [1993/02/02 18:53:54 david] - * + * ansi C conformance changes + * [1993/02/02 18:53:54 david] + * * Revision 1.1 1992/09/30 02:31:51 robert - * Initial revision - * + * Initial revision + * * $EndLog$ */ /* CMU_HIST */ /* * Revision 2.3 91/05/14 16:58:02 mrt - * Correcting copyright - * + * Correcting copyright + * * Revision 2.2 91/02/05 17:35:10 mrt - * Changed to new Mach copyright - * [91/02/01 17:17:32 mrt] - * + * Changed to new Mach copyright + * [91/02/01 17:17:32 mrt] + * * Revision 2.1 89/08/03 16:03:38 rwd * Created. - * + * * Revision 2.3 89/02/25 18:39:26 gm0w - * Changes for cleanup. - * + * Changes for cleanup. + * * 4-Mar-87 Michael Young (mwyoung) at Carnegie-Mellon University * Added MSG_TYPE_RPC. * @@ -77,28 +77,28 @@ * */ /* CMU_ENDHIST */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -107,17 +107,17 @@ /* * This file defines user msg types that may be ored into * the msg_type field in a msg header. Values 0-5 are reserved - * for use by the kernel and are defined in message.h. + * for use by the kernel and are defined in message.h. * */ -#ifndef MSG_TYPE_H_ -#define MSG_TYPE_H_ +#ifndef MSG_TYPE_H_ +#define MSG_TYPE_H_ -#define MSG_TYPE_CAMELOT (1 << 6) -#define MSG_TYPE_ENCRYPTED (1 << 7) -#define MSG_TYPE_RPC (1 << 8) /* Reply expected */ +#define MSG_TYPE_CAMELOT (1 << 6) +#define MSG_TYPE_ENCRYPTED (1 << 7) +#define MSG_TYPE_RPC (1 << 8) /* Reply expected */ #include -#endif /* MSG_TYPE_H_ */ +#endif /* MSG_TYPE_H_ */ diff --git a/osfmk/mach/ndr.h b/osfmk/mach/ndr.h index 9baa731fe..61c00ff1d 100644 --- a/osfmk/mach/ndr.h +++ b/osfmk/mach/ndr.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -38,14 +38,14 @@ typedef struct { - unsigned char mig_vers; - unsigned char if_vers; - unsigned char reserved1; - unsigned char mig_encoding; - unsigned char int_rep; - unsigned char char_rep; - unsigned char float_rep; - unsigned char reserved2; + unsigned char mig_vers; + unsigned char if_vers; + unsigned char reserved1; + unsigned char mig_encoding; + unsigned char int_rep; + unsigned char char_rep; + unsigned char float_rep; + unsigned char reserved2; } NDR_record_t; /* @@ -87,9 +87,9 @@ extern NDR_record_t NDR_record; #if __NDR_convert__ -#define __NDR_convert__NOOP do ; while (0) -#define __NDR_convert__UNKNOWN(s) __NDR_convert__NOOP -#define __NDR_convert__SINGLE(a, f, r) do { r((a), (f)); } while (0) +#define __NDR_convert__NOOP do ; while (0) +#define __NDR_convert__UNKNOWN(s) __NDR_convert__NOOP +#define __NDR_convert__SINGLE(a, f, r) do { r((a), (f)); } while (0) #define __NDR_convert__ARRAY(a, f, c, r) \ do { int __i__, __C__ = (c); \ for (__i__ = 0; __i__ < __C__; __i__++) \ @@ -101,18 +101,20 @@ extern NDR_record_t NDR_record; #if __NDR_convert__int_rep__ -#define __NDR_READSWAP_assign(a, rs) do { *(a) = rs(a); } while (0) +#define __NDR_READSWAP_assign(a, rs) do { *(a) = rs(a); } while (0) -#define __NDR_READSWAP__uint16_t(a) OSReadSwapInt16((void *)a, 0) -#define __NDR_READSWAP__int16_t(a) (int16_t)OSReadSwapInt16((void *)a, 0) -#define __NDR_READSWAP__uint32_t(a) OSReadSwapInt32((void *)a, 0) -#define __NDR_READSWAP__int32_t(a) (int32_t)OSReadSwapInt32((void *)a, 0) -#define __NDR_READSWAP__uint64_t(a) OSReadSwapInt64((void *)a, 0) -#define __NDR_READSWAP__int64_t(a) (int64_t)OSReadSwapInt64((void *)a, 0) +#define __NDR_READSWAP__uint16_t(a) OSReadSwapInt16((void *)a, 0) +#define __NDR_READSWAP__int16_t(a) (int16_t)OSReadSwapInt16((void *)a, 0) +#define __NDR_READSWAP__uint32_t(a) OSReadSwapInt32((void *)a, 0) +#define __NDR_READSWAP__int32_t(a) (int32_t)OSReadSwapInt32((void *)a, 0) +#define __NDR_READSWAP__uint64_t(a) OSReadSwapInt64((void *)a, 0) +#define __NDR_READSWAP__int64_t(a) (int64_t)OSReadSwapInt64((void *)a, 0) __BEGIN_DECLS -static __inline__ float __NDR_READSWAP__float(float *argp) { +static __inline__ float +__NDR_READSWAP__float(float *argp) +{ union { float sv; uint32_t ull; @@ -121,7 +123,9 @@ static __inline__ float __NDR_READSWAP__float(float *argp) { return result.sv; } -static __inline__ double __NDR_READSWAP__double(double *argp) { +static __inline__ double +__NDR_READSWAP__double(double *argp) +{ union { double sv; uint64_t ull; @@ -133,51 +137,51 @@ static __inline__ double __NDR_READSWAP__double(double *argp) { __END_DECLS #define __NDR_convert__int_rep__int16_t__defined -#define __NDR_convert__int_rep__int16_t(v,f) \ +#define __NDR_convert__int_rep__int16_t(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__int16_t) #define __NDR_convert__int_rep__uint16_t__defined -#define __NDR_convert__int_rep__uint16_t(v,f) \ +#define __NDR_convert__int_rep__uint16_t(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__uint16_t) #define __NDR_convert__int_rep__int32_t__defined -#define __NDR_convert__int_rep__int32_t(v,f) \ +#define __NDR_convert__int_rep__int32_t(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__int32_t) #define __NDR_convert__int_rep__uint32_t__defined -#define __NDR_convert__int_rep__uint32_t(v,f) \ +#define __NDR_convert__int_rep__uint32_t(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__uint32_t) #define __NDR_convert__int_rep__int64_t__defined -#define __NDR_convert__int_rep__int64_t(v,f) \ +#define __NDR_convert__int_rep__int64_t(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__int64_t) #define __NDR_convert__int_rep__uint64_t__defined -#define __NDR_convert__int_rep__uint64_t(v,f) \ +#define __NDR_convert__int_rep__uint64_t(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__uint64_t) #define __NDR_convert__int_rep__float__defined -#define __NDR_convert__int_rep__float(v,f) \ +#define __NDR_convert__int_rep__float(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__float) #define __NDR_convert__int_rep__double__defined -#define __NDR_convert__int_rep__double(v,f) \ +#define __NDR_convert__int_rep__double(v, f) \ __NDR_READSWAP_assign(v, __NDR_READSWAP__double) #define __NDR_convert__int_rep__boolean_t__defined -#define __NDR_convert__int_rep__boolean_t(v, f) \ +#define __NDR_convert__int_rep__boolean_t(v, f) \ __NDR_convert__int_rep__int32_t(v,f) #define __NDR_convert__int_rep__kern_return_t__defined -#define __NDR_convert__int_rep__kern_return_t(v,f) \ +#define __NDR_convert__int_rep__kern_return_t(v, f) \ __NDR_convert__int_rep__int32_t(v,f) #define __NDR_convert__int_rep__mach_port_name_t__defined -#define __NDR_convert__int_rep__mach_port_name_t(v,f) \ +#define __NDR_convert__int_rep__mach_port_name_t(v, f) \ __NDR_convert__int_rep__uint32_t(v,f) #define __NDR_convert__int_rep__mach_msg_type_number_t__defined -#define __NDR_convert__int_rep__mach_msg_type_number_t(v,f) \ +#define __NDR_convert__int_rep__mach_msg_type_number_t(v, f) \ __NDR_convert__int_rep__uint32_t(v,f) #endif /* __NDR_convert__int_rep__ */ @@ -185,16 +189,16 @@ __END_DECLS #if __NDR_convert__char_rep__ #warning NDR character representation conversions not implemented yet! -#define __NDR_convert__char_rep__char(v,f) __NDR_convert__NOOP -#define __NDR_convert__char_rep__string(v,f,l) __NDR_convert__NOOP +#define __NDR_convert__char_rep__char(v, f) __NDR_convert__NOOP +#define __NDR_convert__char_rep__string(v, f, l) __NDR_convert__NOOP #endif /* __NDR_convert__char_rep__ */ #if __NDR_convert__float_rep__ #warning NDR floating point representation conversions not implemented yet! -#define __NDR_convert__float_rep__float(v,f) __NDR_convert__NOOP -#define __NDR_convert__float_rep__double(v,f) __NDR_convert__NOOP +#define __NDR_convert__float_rep__float(v, f) __NDR_convert__NOOP +#define __NDR_convert__float_rep__double(v, f) __NDR_convert__NOOP #endif /* __NDR_convert__float_rep__ */ diff --git a/osfmk/mach/notify.h b/osfmk/mach/notify.h index 845646c5c..5737dbc9d 100644 --- a/osfmk/mach/notify.h +++ b/osfmk/mach/notify.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * Kernel notification message definitions. */ -#ifndef _MACH_NOTIFY_H_ +#ifndef _MACH_NOTIFY_H_ #define _MACH_NOTIFY_H_ #include @@ -73,69 +73,69 @@ * may be found in mach/notify.defs. */ -#define MACH_NOTIFY_FIRST 0100 -#define MACH_NOTIFY_PORT_DELETED (MACH_NOTIFY_FIRST + 001) - /* A send or send-once right was deleted. */ -#define MACH_NOTIFY_SEND_POSSIBLE (MACH_NOTIFY_FIRST + 002) - /* Now possible to send using specified right */ -#define MACH_NOTIFY_PORT_DESTROYED (MACH_NOTIFY_FIRST + 005) - /* A receive right was (would have been) deallocated */ -#define MACH_NOTIFY_NO_SENDERS (MACH_NOTIFY_FIRST + 006) - /* Receive right has no extant send rights */ -#define MACH_NOTIFY_SEND_ONCE (MACH_NOTIFY_FIRST + 007) - /* An extant send-once right died */ -#define MACH_NOTIFY_DEAD_NAME (MACH_NOTIFY_FIRST + 010) - /* Send or send-once right died, leaving a dead-name */ -#define MACH_NOTIFY_LAST (MACH_NOTIFY_FIRST + 015) +#define MACH_NOTIFY_FIRST 0100 +#define MACH_NOTIFY_PORT_DELETED (MACH_NOTIFY_FIRST + 001) +/* A send or send-once right was deleted. */ +#define MACH_NOTIFY_SEND_POSSIBLE (MACH_NOTIFY_FIRST + 002) +/* Now possible to send using specified right */ +#define MACH_NOTIFY_PORT_DESTROYED (MACH_NOTIFY_FIRST + 005) +/* A receive right was (would have been) deallocated */ +#define MACH_NOTIFY_NO_SENDERS (MACH_NOTIFY_FIRST + 006) +/* Receive right has no extant send rights */ +#define MACH_NOTIFY_SEND_ONCE (MACH_NOTIFY_FIRST + 007) +/* An extant send-once right died */ +#define MACH_NOTIFY_DEAD_NAME (MACH_NOTIFY_FIRST + 010) +/* Send or send-once right died, leaving a dead-name */ +#define MACH_NOTIFY_LAST (MACH_NOTIFY_FIRST + 015) typedef mach_port_t notify_port_t; /* * Hard-coded message structures for receiving Mach port notification - * messages. However, they are not actual large enough to receive + * messages. However, they are not actual large enough to receive * the largest trailers current exported by Mach IPC (so they cannot * be used for space allocations in situations using these new larger * trailers). Instead, the MIG-generated server routines (and * related prototypes should be used). */ typedef struct { - mach_msg_header_t not_header; - NDR_record_t NDR; - mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ - mach_msg_format_0_trailer_t trailer; + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ + mach_msg_format_0_trailer_t trailer; } mach_port_deleted_notification_t; typedef struct { - mach_msg_header_t not_header; - NDR_record_t NDR; - mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ - mach_msg_format_0_trailer_t trailer; + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ + mach_msg_format_0_trailer_t trailer; } mach_send_possible_notification_t; typedef struct { - mach_msg_header_t not_header; - mach_msg_body_t not_body; - mach_msg_port_descriptor_t not_port;/* MACH_MSG_TYPE_PORT_RECEIVE */ - mach_msg_format_0_trailer_t trailer; + mach_msg_header_t not_header; + mach_msg_body_t not_body; + mach_msg_port_descriptor_t not_port;/* MACH_MSG_TYPE_PORT_RECEIVE */ + mach_msg_format_0_trailer_t trailer; } mach_port_destroyed_notification_t; typedef struct { - mach_msg_header_t not_header; - NDR_record_t NDR; - mach_msg_type_number_t not_count; - mach_msg_format_0_trailer_t trailer; + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_msg_type_number_t not_count; + mach_msg_format_0_trailer_t trailer; } mach_no_senders_notification_t; typedef struct { - mach_msg_header_t not_header; - mach_msg_format_0_trailer_t trailer; + mach_msg_header_t not_header; + mach_msg_format_0_trailer_t trailer; } mach_send_once_notification_t; typedef struct { - mach_msg_header_t not_header; - NDR_record_t NDR; - mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ - mach_msg_format_0_trailer_t trailer; + mach_msg_header_t not_header; + NDR_record_t NDR; + mach_port_name_t not_port;/* MACH_MSG_TYPE_PORT_NAME */ + mach_msg_format_0_trailer_t trailer; } mach_dead_name_notification_t; -#endif /* _MACH_NOTIFY_H_ */ +#endif /* _MACH_NOTIFY_H_ */ diff --git a/osfmk/mach/policy.h b/osfmk/mach/policy.h index b6ec795a2..836b95f74 100644 --- a/osfmk/mach/policy.h +++ b/osfmk/mach/policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_POLICY_H_ +#ifndef _MACH_POLICY_H_ #define _MACH_POLICY_H_ /* @@ -76,160 +76,160 @@ /* * Old scheduling control interface */ -typedef int policy_t; -typedef integer_t *policy_info_t; -typedef integer_t *policy_base_t; -typedef integer_t *policy_limit_t; +typedef int policy_t; +typedef integer_t *policy_info_t; +typedef integer_t *policy_base_t; +typedef integer_t *policy_limit_t; /* * Policy definitions. Policies should be powers of 2, * but cannot be or'd together other than to test for a * policy 'class'. */ -#define POLICY_NULL 0 /* none */ -#define POLICY_TIMESHARE 1 /* timesharing */ -#define POLICY_RR 2 /* fixed round robin */ -#define POLICY_FIFO 4 /* fixed fifo */ +#define POLICY_NULL 0 /* none */ +#define POLICY_TIMESHARE 1 /* timesharing */ +#define POLICY_RR 2 /* fixed round robin */ +#define POLICY_FIFO 4 /* fixed fifo */ #define __NEW_SCHEDULING_FRAMEWORK__ /* * Check if policy is of 'class' fixed-priority. */ -#define POLICYCLASS_FIXEDPRI (POLICY_RR | POLICY_FIFO) +#define POLICYCLASS_FIXEDPRI (POLICY_RR | POLICY_FIFO) /* * Check if policy is valid. */ -#define invalid_policy(policy) \ - ((policy) != POLICY_TIMESHARE && \ - (policy) != POLICY_RR && \ +#define invalid_policy(policy) \ + ((policy) != POLICY_TIMESHARE && \ + (policy) != POLICY_RR && \ (policy) != POLICY_FIFO) /* - * Types for TIMESHARE policy + * Types for TIMESHARE policy */ -struct policy_timeshare_base { - integer_t base_priority; +struct policy_timeshare_base { + integer_t base_priority; }; struct policy_timeshare_limit { - integer_t max_priority; + integer_t max_priority; }; struct policy_timeshare_info { - integer_t max_priority; - integer_t base_priority; - integer_t cur_priority; - boolean_t depressed; - integer_t depress_priority; + integer_t max_priority; + integer_t base_priority; + integer_t cur_priority; + boolean_t depressed; + integer_t depress_priority; }; -typedef struct policy_timeshare_base *policy_timeshare_base_t; -typedef struct policy_timeshare_limit *policy_timeshare_limit_t; -typedef struct policy_timeshare_info *policy_timeshare_info_t; +typedef struct policy_timeshare_base *policy_timeshare_base_t; +typedef struct policy_timeshare_limit *policy_timeshare_limit_t; +typedef struct policy_timeshare_info *policy_timeshare_info_t; -typedef struct policy_timeshare_base policy_timeshare_base_data_t; -typedef struct policy_timeshare_limit policy_timeshare_limit_data_t; -typedef struct policy_timeshare_info policy_timeshare_info_data_t; +typedef struct policy_timeshare_base policy_timeshare_base_data_t; +typedef struct policy_timeshare_limit policy_timeshare_limit_data_t; +typedef struct policy_timeshare_info policy_timeshare_info_data_t; -#define POLICY_TIMESHARE_BASE_COUNT ((mach_msg_type_number_t) \ +#define POLICY_TIMESHARE_BASE_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_timeshare_base)/sizeof(integer_t))) -#define POLICY_TIMESHARE_LIMIT_COUNT ((mach_msg_type_number_t) \ +#define POLICY_TIMESHARE_LIMIT_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_timeshare_limit)/sizeof(integer_t))) -#define POLICY_TIMESHARE_INFO_COUNT ((mach_msg_type_number_t) \ +#define POLICY_TIMESHARE_INFO_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_timeshare_info)/sizeof(integer_t))) /* * Types for the ROUND ROBIN (RR) policy */ -struct policy_rr_base { - integer_t base_priority; - integer_t quantum; +struct policy_rr_base { + integer_t base_priority; + integer_t quantum; }; struct policy_rr_limit { - integer_t max_priority; + integer_t max_priority; }; struct policy_rr_info { - integer_t max_priority; - integer_t base_priority; - integer_t quantum; - boolean_t depressed; - integer_t depress_priority; + integer_t max_priority; + integer_t base_priority; + integer_t quantum; + boolean_t depressed; + integer_t depress_priority; }; -typedef struct policy_rr_base *policy_rr_base_t; -typedef struct policy_rr_limit *policy_rr_limit_t; -typedef struct policy_rr_info *policy_rr_info_t; +typedef struct policy_rr_base *policy_rr_base_t; +typedef struct policy_rr_limit *policy_rr_limit_t; +typedef struct policy_rr_info *policy_rr_info_t; -typedef struct policy_rr_base policy_rr_base_data_t; -typedef struct policy_rr_limit policy_rr_limit_data_t; -typedef struct policy_rr_info policy_rr_info_data_t; +typedef struct policy_rr_base policy_rr_base_data_t; +typedef struct policy_rr_limit policy_rr_limit_data_t; +typedef struct policy_rr_info policy_rr_info_data_t; -#define POLICY_RR_BASE_COUNT ((mach_msg_type_number_t) \ +#define POLICY_RR_BASE_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_rr_base)/sizeof(integer_t))) -#define POLICY_RR_LIMIT_COUNT ((mach_msg_type_number_t) \ +#define POLICY_RR_LIMIT_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_rr_limit)/sizeof(integer_t))) -#define POLICY_RR_INFO_COUNT ((mach_msg_type_number_t) \ +#define POLICY_RR_INFO_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_rr_info)/sizeof(integer_t))) /* - * Types for the FIRST-IN-FIRST-OUT (FIFO) policy + * Types for the FIRST-IN-FIRST-OUT (FIFO) policy */ -struct policy_fifo_base { - integer_t base_priority; +struct policy_fifo_base { + integer_t base_priority; }; struct policy_fifo_limit { - integer_t max_priority; + integer_t max_priority; }; struct policy_fifo_info { - integer_t max_priority; - integer_t base_priority; - boolean_t depressed; - integer_t depress_priority; + integer_t max_priority; + integer_t base_priority; + boolean_t depressed; + integer_t depress_priority; }; -typedef struct policy_fifo_base *policy_fifo_base_t; -typedef struct policy_fifo_limit *policy_fifo_limit_t; -typedef struct policy_fifo_info *policy_fifo_info_t; +typedef struct policy_fifo_base *policy_fifo_base_t; +typedef struct policy_fifo_limit *policy_fifo_limit_t; +typedef struct policy_fifo_info *policy_fifo_info_t; -typedef struct policy_fifo_base policy_fifo_base_data_t; -typedef struct policy_fifo_limit policy_fifo_limit_data_t; -typedef struct policy_fifo_info policy_fifo_info_data_t; +typedef struct policy_fifo_base policy_fifo_base_data_t; +typedef struct policy_fifo_limit policy_fifo_limit_data_t; +typedef struct policy_fifo_info policy_fifo_info_data_t; -#define POLICY_FIFO_BASE_COUNT ((mach_msg_type_number_t) \ +#define POLICY_FIFO_BASE_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_fifo_base)/sizeof(integer_t))) -#define POLICY_FIFO_LIMIT_COUNT ((mach_msg_type_number_t) \ +#define POLICY_FIFO_LIMIT_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_fifo_limit)/sizeof(integer_t))) -#define POLICY_FIFO_INFO_COUNT ((mach_msg_type_number_t) \ +#define POLICY_FIFO_INFO_COUNT ((mach_msg_type_number_t) \ (sizeof(struct policy_fifo_info)/sizeof(integer_t))) /* - * Aggregate policy types + * Aggregate policy types */ struct policy_bases { - policy_timeshare_base_data_t ts; - policy_rr_base_data_t rr; - policy_fifo_base_data_t fifo; + policy_timeshare_base_data_t ts; + policy_rr_base_data_t rr; + policy_fifo_base_data_t fifo; }; struct policy_limits { - policy_timeshare_limit_data_t ts; - policy_rr_limit_data_t rr; - policy_fifo_limit_data_t fifo; + policy_timeshare_limit_data_t ts; + policy_rr_limit_data_t rr; + policy_fifo_limit_data_t fifo; }; struct policy_infos { - policy_timeshare_info_data_t ts; - policy_rr_info_data_t rr; - policy_fifo_info_data_t fifo; + policy_timeshare_info_data_t ts; + policy_rr_info_data_t rr; + policy_fifo_info_data_t fifo; }; -typedef struct policy_bases policy_base_data_t; -typedef struct policy_limits policy_limit_data_t; -typedef struct policy_infos policy_info_data_t; +typedef struct policy_bases policy_base_data_t; +typedef struct policy_limits policy_limit_data_t; +typedef struct policy_infos policy_info_data_t; -#endif /* _MACH_POLICY_H_ */ +#endif /* _MACH_POLICY_H_ */ diff --git a/osfmk/mach/port.h b/osfmk/mach/port.h index 763e6f94f..db15ea83f 100644 --- a/osfmk/mach/port.h +++ b/osfmk/mach/port.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -71,7 +71,7 @@ * also exist). * * Unique collections of these endpoints are maintained for each - * Mach task. Each Mach port in the task's collection is given a + * Mach task. Each Mach port in the task's collection is given a * [task-local] name to identify it - and the the various "rights" * held by the task for that specific endpoint. * @@ -83,7 +83,7 @@ * */ -#ifndef _MACH_PORT_H_ +#ifndef _MACH_PORT_H_ #define _MACH_PORT_H_ #include @@ -102,13 +102,13 @@ * See mach_port_t for a type that implies a "named right." * */ - + typedef natural_t mach_port_name_t; typedef mach_port_name_t *mach_port_name_array_t; -#ifdef KERNEL +#ifdef KERNEL -/* +/* * mach_port_t - a named port right * * In the kernel, "rights" are represented [named] by pointers to @@ -119,7 +119,7 @@ typedef mach_port_name_t *mach_port_name_array_t; * code - including, but not limited to, Mach IPC code - lives in the * limbo between the current user-level task and the "next" task. Very * little of the kernel code runs in full kernel task context. So very - * little of it gets to use the kernel task's port name space. + * little of it gets to use the kernel task's port name space. * * Because of this implementation approach, all in-kernel rights for * a given port coalesce [have the same name/pointer]. The actual @@ -129,23 +129,23 @@ typedef mach_port_name_t *mach_port_name_array_t; * */ -#ifndef MACH_KERNEL_PRIVATE +#ifndef MACH_KERNEL_PRIVATE /* * For kernel code that resides outside of Mach proper, we opaque the * port structure definition. */ -struct ipc_port ; +struct ipc_port; -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -typedef struct ipc_port *ipc_port_t; +typedef struct ipc_port *ipc_port_t; -#define IPC_PORT_NULL ((ipc_port_t) 0UL) -#define IPC_PORT_DEAD ((ipc_port_t)~0UL) +#define IPC_PORT_NULL ((ipc_port_t) 0UL) +#define IPC_PORT_DEAD ((ipc_port_t)~0UL) #define IPC_PORT_VALID(port) \ ((port) != IPC_PORT_NULL && (port) != IPC_PORT_DEAD) -typedef ipc_port_t mach_port_t; +typedef ipc_port_t mach_port_t; /* * Since the 32-bit and 64-bit representations of ~0 are different, @@ -155,30 +155,30 @@ typedef ipc_port_t mach_port_t; #define CAST_MACH_PORT_TO_NAME(x) ((mach_port_name_t)(uintptr_t)(x)) #define CAST_MACH_NAME_TO_PORT(x) ((x) == MACH_PORT_DEAD ? (mach_port_t)IPC_PORT_DEAD : (mach_port_t)(uintptr_t)(x)) -#else /* KERNEL */ +#else /* KERNEL */ -/* +/* * mach_port_t - a named port right * * In user-space, "rights" are represented by the name of the * right in the Mach port namespace. Even so, this type is * presented as a unique one to more clearly denote the presence - * of a right coming along with the name. + * of a right coming along with the name. * * Often, various rights for a port held in a single name space * will coalesce and are, therefore, be identified by a single name * [this is the case for send and receive rights]. But not * always [send-once rights currently get a unique name for - * each right]. + * each right]. * */ #include #include -#endif /* KERNEL */ +#endif /* KERNEL */ -typedef mach_port_t *mach_port_array_t; +typedef mach_port_t *mach_port_array_t; /* * MACH_PORT_NULL is a legal value that can be carried in messages. @@ -189,11 +189,11 @@ typedef mach_port_t *mach_port_array_t; * that a port right was present, but it died. */ -#define MACH_PORT_NULL 0 /* intentional loose typing */ -#define MACH_PORT_DEAD ((mach_port_name_t) ~0) -#define MACH_PORT_VALID(name) \ - (((name) != MACH_PORT_NULL) && \ - ((name) != MACH_PORT_DEAD)) +#define MACH_PORT_NULL 0 /* intentional loose typing */ +#define MACH_PORT_DEAD ((mach_port_name_t) ~0) +#define MACH_PORT_VALID(name) \ + (((name) != MACH_PORT_NULL) && \ + ((name) != MACH_PORT_DEAD)) /* @@ -210,20 +210,20 @@ typedef mach_port_t *mach_port_array_t; * */ -#ifndef NO_PORT_GEN +#ifndef NO_PORT_GEN -#define MACH_PORT_INDEX(name) ((name) >> 8) -#define MACH_PORT_GEN(name) (((name) & 0xff) << 24) -#define MACH_PORT_MAKE(index, gen) \ - (((index) << 8) | (gen) >> 24) +#define MACH_PORT_INDEX(name) ((name) >> 8) +#define MACH_PORT_GEN(name) (((name) & 0xff) << 24) +#define MACH_PORT_MAKE(index, gen) \ + (((index) << 8) | (gen) >> 24) -#else /* NO_PORT_GEN */ +#else /* NO_PORT_GEN */ -#define MACH_PORT_INDEX(name) (name) -#define MACH_PORT_GEN(name) (0) -#define MACH_PORT_MAKE(index, gen) (index) +#define MACH_PORT_INDEX(name) (name) +#define MACH_PORT_GEN(name) (0) +#define MACH_PORT_MAKE(index, gen) (index) -#endif /* NO_PORT_GEN */ +#endif /* NO_PORT_GEN */ /* @@ -238,135 +238,135 @@ typedef mach_port_t *mach_port_array_t; typedef natural_t mach_port_right_t; -#define MACH_PORT_RIGHT_SEND ((mach_port_right_t) 0) -#define MACH_PORT_RIGHT_RECEIVE ((mach_port_right_t) 1) -#define MACH_PORT_RIGHT_SEND_ONCE ((mach_port_right_t) 2) -#define MACH_PORT_RIGHT_PORT_SET ((mach_port_right_t) 3) -#define MACH_PORT_RIGHT_DEAD_NAME ((mach_port_right_t) 4) -#define MACH_PORT_RIGHT_LABELH ((mach_port_right_t) 5) -#define MACH_PORT_RIGHT_NUMBER ((mach_port_right_t) 6) +#define MACH_PORT_RIGHT_SEND ((mach_port_right_t) 0) +#define MACH_PORT_RIGHT_RECEIVE ((mach_port_right_t) 1) +#define MACH_PORT_RIGHT_SEND_ONCE ((mach_port_right_t) 2) +#define MACH_PORT_RIGHT_PORT_SET ((mach_port_right_t) 3) +#define MACH_PORT_RIGHT_DEAD_NAME ((mach_port_right_t) 4) +#define MACH_PORT_RIGHT_LABELH ((mach_port_right_t) 5) +#define MACH_PORT_RIGHT_NUMBER ((mach_port_right_t) 6) typedef natural_t mach_port_type_t; typedef mach_port_type_t *mach_port_type_array_t; -#define MACH_PORT_TYPE(right) \ - ((mach_port_type_t)(((mach_port_type_t) 1) \ - << ((right) + ((mach_port_right_t) 16)))) -#define MACH_PORT_TYPE_NONE ((mach_port_type_t) 0L) -#define MACH_PORT_TYPE_SEND MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND) -#define MACH_PORT_TYPE_RECEIVE MACH_PORT_TYPE(MACH_PORT_RIGHT_RECEIVE) +#define MACH_PORT_TYPE(right) \ + ((mach_port_type_t)(((mach_port_type_t) 1) \ + << ((right) + ((mach_port_right_t) 16)))) +#define MACH_PORT_TYPE_NONE ((mach_port_type_t) 0L) +#define MACH_PORT_TYPE_SEND MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND) +#define MACH_PORT_TYPE_RECEIVE MACH_PORT_TYPE(MACH_PORT_RIGHT_RECEIVE) #define MACH_PORT_TYPE_SEND_ONCE MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND_ONCE) -#define MACH_PORT_TYPE_PORT_SET MACH_PORT_TYPE(MACH_PORT_RIGHT_PORT_SET) +#define MACH_PORT_TYPE_PORT_SET MACH_PORT_TYPE(MACH_PORT_RIGHT_PORT_SET) #define MACH_PORT_TYPE_DEAD_NAME MACH_PORT_TYPE(MACH_PORT_RIGHT_DEAD_NAME) #define MACH_PORT_TYPE_LABELH MACH_PORT_TYPE(MACH_PORT_RIGHT_LABELH) /* Convenient combinations. */ -#define MACH_PORT_TYPE_SEND_RECEIVE \ - (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_RECEIVE) -#define MACH_PORT_TYPE_SEND_RIGHTS \ - (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE) -#define MACH_PORT_TYPE_PORT_RIGHTS \ - (MACH_PORT_TYPE_SEND_RIGHTS|MACH_PORT_TYPE_RECEIVE) -#define MACH_PORT_TYPE_PORT_OR_DEAD \ - (MACH_PORT_TYPE_PORT_RIGHTS|MACH_PORT_TYPE_DEAD_NAME) -#define MACH_PORT_TYPE_ALL_RIGHTS \ - (MACH_PORT_TYPE_PORT_OR_DEAD|MACH_PORT_TYPE_PORT_SET) +#define MACH_PORT_TYPE_SEND_RECEIVE \ + (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_RECEIVE) +#define MACH_PORT_TYPE_SEND_RIGHTS \ + (MACH_PORT_TYPE_SEND|MACH_PORT_TYPE_SEND_ONCE) +#define MACH_PORT_TYPE_PORT_RIGHTS \ + (MACH_PORT_TYPE_SEND_RIGHTS|MACH_PORT_TYPE_RECEIVE) +#define MACH_PORT_TYPE_PORT_OR_DEAD \ + (MACH_PORT_TYPE_PORT_RIGHTS|MACH_PORT_TYPE_DEAD_NAME) +#define MACH_PORT_TYPE_ALL_RIGHTS \ + (MACH_PORT_TYPE_PORT_OR_DEAD|MACH_PORT_TYPE_PORT_SET) /* Dummy type bits that mach_port_type/mach_port_names can return. */ -#define MACH_PORT_TYPE_DNREQUEST 0x80000000 -#define MACH_PORT_TYPE_SPREQUEST 0x40000000 -#define MACH_PORT_TYPE_SPREQUEST_DELAYED 0x20000000 +#define MACH_PORT_TYPE_DNREQUEST 0x80000000 +#define MACH_PORT_TYPE_SPREQUEST 0x40000000 +#define MACH_PORT_TYPE_SPREQUEST_DELAYED 0x20000000 /* User-references for capabilities. */ typedef natural_t mach_port_urefs_t; -typedef integer_t mach_port_delta_t; /* change in urefs */ +typedef integer_t mach_port_delta_t; /* change in urefs */ /* Attributes of ports. (See mach_port_get_receive_status.) */ -typedef natural_t mach_port_seqno_t; /* sequence number */ -typedef natural_t mach_port_mscount_t; /* make-send count */ -typedef natural_t mach_port_msgcount_t; /* number of msgs */ -typedef natural_t mach_port_rights_t; /* number of rights */ +typedef natural_t mach_port_seqno_t; /* sequence number */ +typedef natural_t mach_port_mscount_t; /* make-send count */ +typedef natural_t mach_port_msgcount_t; /* number of msgs */ +typedef natural_t mach_port_rights_t; /* number of rights */ /* * Are there outstanding send rights for a given port? */ -#define MACH_PORT_SRIGHTS_NONE 0 /* no srights */ -#define MACH_PORT_SRIGHTS_PRESENT 1 /* srights */ -typedef unsigned int mach_port_srights_t; /* status of send rights */ +#define MACH_PORT_SRIGHTS_NONE 0 /* no srights */ +#define MACH_PORT_SRIGHTS_PRESENT 1 /* srights */ +typedef unsigned int mach_port_srights_t; /* status of send rights */ typedef struct mach_port_status { - mach_port_rights_t mps_pset; /* count of containing port sets */ - mach_port_seqno_t mps_seqno; /* sequence number */ - mach_port_mscount_t mps_mscount; /* make-send count */ - mach_port_msgcount_t mps_qlimit; /* queue limit */ - mach_port_msgcount_t mps_msgcount; /* number in the queue */ - mach_port_rights_t mps_sorights; /* how many send-once rights */ - boolean_t mps_srights; /* do send rights exist? */ - boolean_t mps_pdrequest; /* port-deleted requested? */ - boolean_t mps_nsrequest; /* no-senders requested? */ - natural_t mps_flags; /* port flags */ + mach_port_rights_t mps_pset; /* count of containing port sets */ + mach_port_seqno_t mps_seqno; /* sequence number */ + mach_port_mscount_t mps_mscount; /* make-send count */ + mach_port_msgcount_t mps_qlimit; /* queue limit */ + mach_port_msgcount_t mps_msgcount; /* number in the queue */ + mach_port_rights_t mps_sorights; /* how many send-once rights */ + boolean_t mps_srights; /* do send rights exist? */ + boolean_t mps_pdrequest; /* port-deleted requested? */ + boolean_t mps_nsrequest; /* no-senders requested? */ + natural_t mps_flags; /* port flags */ } mach_port_status_t; /* System-wide values for setting queue limits on a port */ -#define MACH_PORT_QLIMIT_ZERO (0) -#define MACH_PORT_QLIMIT_BASIC (5) -#define MACH_PORT_QLIMIT_SMALL (16) -#define MACH_PORT_QLIMIT_LARGE (1024) -#define MACH_PORT_QLIMIT_KERNEL (65534) -#define MACH_PORT_QLIMIT_MIN MACH_PORT_QLIMIT_ZERO -#define MACH_PORT_QLIMIT_DEFAULT MACH_PORT_QLIMIT_BASIC -#define MACH_PORT_QLIMIT_MAX MACH_PORT_QLIMIT_LARGE +#define MACH_PORT_QLIMIT_ZERO (0) +#define MACH_PORT_QLIMIT_BASIC (5) +#define MACH_PORT_QLIMIT_SMALL (16) +#define MACH_PORT_QLIMIT_LARGE (1024) +#define MACH_PORT_QLIMIT_KERNEL (65534) +#define MACH_PORT_QLIMIT_MIN MACH_PORT_QLIMIT_ZERO +#define MACH_PORT_QLIMIT_DEFAULT MACH_PORT_QLIMIT_BASIC +#define MACH_PORT_QLIMIT_MAX MACH_PORT_QLIMIT_LARGE typedef struct mach_port_limits { - mach_port_msgcount_t mpl_qlimit; /* number of msgs */ + mach_port_msgcount_t mpl_qlimit; /* number of msgs */ } mach_port_limits_t; /* Possible values for mps_flags (part of mach_port_status_t) */ -#define MACH_PORT_STATUS_FLAG_TEMPOWNER 0x01 -#define MACH_PORT_STATUS_FLAG_GUARDED 0x02 -#define MACH_PORT_STATUS_FLAG_STRICT_GUARD 0x04 -#define MACH_PORT_STATUS_FLAG_IMP_DONATION 0x08 -#define MACH_PORT_STATUS_FLAG_REVIVE 0x10 -#define MACH_PORT_STATUS_FLAG_TASKPTR 0x20 +#define MACH_PORT_STATUS_FLAG_TEMPOWNER 0x01 +#define MACH_PORT_STATUS_FLAG_GUARDED 0x02 +#define MACH_PORT_STATUS_FLAG_STRICT_GUARD 0x04 +#define MACH_PORT_STATUS_FLAG_IMP_DONATION 0x08 +#define MACH_PORT_STATUS_FLAG_REVIVE 0x10 +#define MACH_PORT_STATUS_FLAG_TASKPTR 0x20 typedef struct mach_port_info_ext { - mach_port_status_t mpie_status; - mach_port_msgcount_t mpie_boost_cnt; - uint32_t reserved[6]; + mach_port_status_t mpie_status; + mach_port_msgcount_t mpie_boost_cnt; + uint32_t reserved[6]; } mach_port_info_ext_t; -typedef integer_t *mach_port_info_t; /* varying array of natural_t */ +typedef integer_t *mach_port_info_t; /* varying array of natural_t */ /* Flavors for mach_port_get/set_attributes() */ -typedef int mach_port_flavor_t; -#define MACH_PORT_LIMITS_INFO 1 /* uses mach_port_limits_t */ -#define MACH_PORT_RECEIVE_STATUS 2 /* uses mach_port_status_t */ -#define MACH_PORT_DNREQUESTS_SIZE 3 /* info is int */ -#define MACH_PORT_TEMPOWNER 4 /* indicates receive right will be reassigned to another task */ -#define MACH_PORT_IMPORTANCE_RECEIVER 5 /* indicates recieve right accepts priority donation */ -#define MACH_PORT_DENAP_RECEIVER 6 /* indicates receive right accepts de-nap donation */ -#define MACH_PORT_INFO_EXT 7 /* uses mach_port_info_ext_t */ - -#define MACH_PORT_LIMITS_INFO_COUNT ((natural_t) \ +typedef int mach_port_flavor_t; +#define MACH_PORT_LIMITS_INFO 1 /* uses mach_port_limits_t */ +#define MACH_PORT_RECEIVE_STATUS 2 /* uses mach_port_status_t */ +#define MACH_PORT_DNREQUESTS_SIZE 3 /* info is int */ +#define MACH_PORT_TEMPOWNER 4 /* indicates receive right will be reassigned to another task */ +#define MACH_PORT_IMPORTANCE_RECEIVER 5 /* indicates recieve right accepts priority donation */ +#define MACH_PORT_DENAP_RECEIVER 6 /* indicates receive right accepts de-nap donation */ +#define MACH_PORT_INFO_EXT 7 /* uses mach_port_info_ext_t */ + +#define MACH_PORT_LIMITS_INFO_COUNT ((natural_t) \ (sizeof(mach_port_limits_t)/sizeof(natural_t))) -#define MACH_PORT_RECEIVE_STATUS_COUNT ((natural_t) \ +#define MACH_PORT_RECEIVE_STATUS_COUNT ((natural_t) \ (sizeof(mach_port_status_t)/sizeof(natural_t))) #define MACH_PORT_DNREQUESTS_SIZE_COUNT 1 -#define MACH_PORT_INFO_EXT_COUNT ((natural_t) \ +#define MACH_PORT_INFO_EXT_COUNT ((natural_t) \ (sizeof(mach_port_info_ext_t)/sizeof(natural_t))) /* * Structure used to pass information about port allocation requests. * Must be padded to 64-bits total length. */ typedef struct mach_port_qos { - unsigned int name:1; /* name given */ - unsigned int prealloc:1; /* prealloced message */ - boolean_t pad1:30; - natural_t len; + unsigned int name:1; /* name given */ + unsigned int prealloc:1; /* prealloced message */ + boolean_t pad1:30; + natural_t len; } mach_port_qos_t; /* Mach Port Guarding definitions */ @@ -377,21 +377,21 @@ typedef struct mach_port_qos { * Indicates attributes to be set for the newly * allocated port. */ -#define MPO_CONTEXT_AS_GUARD 0x01 /* Add guard to the port */ -#define MPO_QLIMIT 0x02 /* Set qlimit for the port msg queue */ -#define MPO_TEMPOWNER 0x04 /* Set the tempowner bit of the port */ -#define MPO_IMPORTANCE_RECEIVER 0x08 /* Mark the port as importance receiver */ -#define MPO_INSERT_SEND_RIGHT 0x10 /* Insert a send right for the port */ -#define MPO_STRICT 0x20 /* Apply strict guarding for port */ -#define MPO_DENAP_RECEIVER 0x40 /* Mark the port as App de-nap receiver */ +#define MPO_CONTEXT_AS_GUARD 0x01 /* Add guard to the port */ +#define MPO_QLIMIT 0x02 /* Set qlimit for the port msg queue */ +#define MPO_TEMPOWNER 0x04 /* Set the tempowner bit of the port */ +#define MPO_IMPORTANCE_RECEIVER 0x08 /* Mark the port as importance receiver */ +#define MPO_INSERT_SEND_RIGHT 0x10 /* Insert a send right for the port */ +#define MPO_STRICT 0x20 /* Apply strict guarding for port */ +#define MPO_DENAP_RECEIVER 0x40 /* Mark the port as App de-nap receiver */ /* * Structure to define optional attributes for a newly * constructed port. */ typedef struct mach_port_options { - uint32_t flags; /* Flags defining attributes for port */ - mach_port_limits_t mpl; /* Message queue limit for port */ - uint64_t reserved[2]; /* Reserved */ + uint32_t flags; /* Flags defining attributes for port */ + mach_port_limits_t mpl; /* Message queue limit for port */ + uint64_t reserved[2]; /* Reserved */ }mach_port_options_t; typedef mach_port_options_t *mach_port_options_ptr_t; @@ -401,16 +401,16 @@ typedef mach_port_options_t *mach_port_options_ptr_t; * mach ports and file descriptors. GUARD_TYPE_ is used * to differentiate among them. */ -#define GUARD_TYPE_MACH_PORT 0x1 +#define GUARD_TYPE_MACH_PORT 0x1 /* Reasons for exception for a guarded mach port */ enum mach_port_guard_exception_codes { - kGUARD_EXC_DESTROY = 1u << 0, - kGUARD_EXC_MOD_REFS = 1u << 1, - kGUARD_EXC_SET_CONTEXT = 1u << 2, - kGUARD_EXC_UNGUARDED = 1u << 3, - kGUARD_EXC_INCORRECT_GUARD = 1u << 4, - /* start of non-fatal guards */ + kGUARD_EXC_DESTROY = 1u << 0, + kGUARD_EXC_MOD_REFS = 1u << 1, + kGUARD_EXC_SET_CONTEXT = 1u << 2, + kGUARD_EXC_UNGUARDED = 1u << 3, + kGUARD_EXC_INCORRECT_GUARD = 1u << 4, + /* start of non-fatal guards */ kGUARD_EXC_INVALID_RIGHT = 1u << 8, kGUARD_EXC_INVALID_NAME = 1u << 9, kGUARD_EXC_INVALID_VALUE = 1u << 10, @@ -426,21 +426,21 @@ enum mach_port_guard_exception_codes { kGUARD_EXC_RCV_INVALID_NOTIFY = 1u << 19 }; -#if !__DARWIN_UNIX03 && !defined(_NO_PORT_T_FROM_MACH) +#if !__DARWIN_UNIX03 && !defined(_NO_PORT_T_FROM_MACH) /* * Mach 3.0 renamed everything to have mach_ in front of it. * These types and macros are provided for backward compatibility * but are deprecated. */ -typedef mach_port_t port_t; -typedef mach_port_name_t port_name_t; -typedef mach_port_name_t *port_name_array_t; +typedef mach_port_t port_t; +typedef mach_port_name_t port_name_t; +typedef mach_port_name_t *port_name_array_t; -#define PORT_NULL ((port_t) 0) -#define PORT_DEAD ((port_t) ~0) +#define PORT_NULL ((port_t) 0) +#define PORT_DEAD ((port_t) ~0) #define PORT_VALID(name) \ - ((port_t)(name) != PORT_NULL && (port_t)(name) != PORT_DEAD) + ((port_t)(name) != PORT_NULL && (port_t)(name) != PORT_DEAD) -#endif /* !__DARWIN_UNIX03 && !_NO_PORT_T_FROM_MACH */ +#endif /* !__DARWIN_UNIX03 && !_NO_PORT_T_FROM_MACH */ -#endif /* _MACH_PORT_H_ */ +#endif /* _MACH_PORT_H_ */ diff --git a/osfmk/mach/processor.defs b/osfmk/mach/processor.defs index 99ea969a7..1503c313c 100644 --- a/osfmk/mach/processor.defs +++ b/osfmk/mach/processor.defs @@ -78,15 +78,26 @@ subsystem /* * Start processor. */ + +#ifdef KERNEL_SERVER +routine processor_start_from_user( + processor : processor_t); +#else routine processor_start( processor : processor_t); +#endif /* * Exit processor -- may not be restartable. */ +#ifdef KERNEL_SERVER +routine processor_exit_from_user( + processor : processor_t); +#else routine processor_exit( processor : processor_t); +#endif /* * Return information about this processor. diff --git a/osfmk/mach/processor_info.h b/osfmk/mach/processor_info.h index bd1f74056..739a68e06 100644 --- a/osfmk/mach/processor_info.h +++ b/osfmk/mach/processor_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,7 +64,7 @@ * Data structure definitions for processor_info, processor_set_info */ -#ifndef _MACH_PROCESSOR_INFO_H_ +#ifndef _MACH_PROCESSOR_INFO_H_ #define _MACH_PROCESSOR_INFO_H_ #include @@ -74,86 +74,86 @@ /* * Generic information structure to allow for expansion. */ -typedef integer_t *processor_info_t; /* varying array of int. */ -typedef integer_t *processor_info_array_t; /* varying array of int */ +typedef integer_t *processor_info_t; /* varying array of int. */ +typedef integer_t *processor_info_array_t; /* varying array of int */ -#define PROCESSOR_INFO_MAX (1024) /* max array size */ -typedef integer_t processor_info_data_t[PROCESSOR_INFO_MAX]; +#define PROCESSOR_INFO_MAX (1024) /* max array size */ +typedef integer_t processor_info_data_t[PROCESSOR_INFO_MAX]; -typedef integer_t *processor_set_info_t; /* varying array of int. */ +typedef integer_t *processor_set_info_t; /* varying array of int. */ -#define PROCESSOR_SET_INFO_MAX (1024) /* max array size */ -typedef integer_t processor_set_info_data_t[PROCESSOR_SET_INFO_MAX]; +#define PROCESSOR_SET_INFO_MAX (1024) /* max array size */ +typedef integer_t processor_set_info_data_t[PROCESSOR_SET_INFO_MAX]; /* * Currently defined information. */ -typedef int processor_flavor_t; -#define PROCESSOR_BASIC_INFO 1 /* basic information */ -#define PROCESSOR_CPU_LOAD_INFO 2 /* cpu load information */ -#define PROCESSOR_PM_REGS_INFO 0x10000001 /* performance monitor register info */ -#define PROCESSOR_TEMPERATURE 0x10000002 /* Processor core temperature */ +typedef int processor_flavor_t; +#define PROCESSOR_BASIC_INFO 1 /* basic information */ +#define PROCESSOR_CPU_LOAD_INFO 2 /* cpu load information */ +#define PROCESSOR_PM_REGS_INFO 0x10000001 /* performance monitor register info */ +#define PROCESSOR_TEMPERATURE 0x10000002 /* Processor core temperature */ struct processor_basic_info { - cpu_type_t cpu_type; /* type of cpu */ - cpu_subtype_t cpu_subtype; /* subtype of cpu */ - boolean_t running; /* is processor running */ - int slot_num; /* slot number */ - boolean_t is_master; /* is this the master processor */ + cpu_type_t cpu_type; /* type of cpu */ + cpu_subtype_t cpu_subtype; /* subtype of cpu */ + boolean_t running; /* is processor running */ + int slot_num; /* slot number */ + boolean_t is_master; /* is this the master processor */ }; -typedef struct processor_basic_info processor_basic_info_data_t; -typedef struct processor_basic_info *processor_basic_info_t; -#define PROCESSOR_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(processor_basic_info_data_t)/sizeof(natural_t))) +typedef struct processor_basic_info processor_basic_info_data_t; +typedef struct processor_basic_info *processor_basic_info_t; +#define PROCESSOR_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(processor_basic_info_data_t)/sizeof(natural_t))) struct processor_cpu_load_info { /* number of ticks while running... */ - unsigned int cpu_ticks[CPU_STATE_MAX]; /* ... in the given mode */ -}; + unsigned int cpu_ticks[CPU_STATE_MAX]; /* ... in the given mode */ +}; -typedef struct processor_cpu_load_info processor_cpu_load_info_data_t; -typedef struct processor_cpu_load_info *processor_cpu_load_info_t; -#define PROCESSOR_CPU_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(processor_cpu_load_info_data_t)/sizeof(natural_t))) +typedef struct processor_cpu_load_info processor_cpu_load_info_data_t; +typedef struct processor_cpu_load_info *processor_cpu_load_info_t; +#define PROCESSOR_CPU_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(processor_cpu_load_info_data_t)/sizeof(natural_t))) /* * Scaling factor for load_average, mach_factor. */ -#define LOAD_SCALE 1000 +#define LOAD_SCALE 1000 -typedef int processor_set_flavor_t; -#define PROCESSOR_SET_BASIC_INFO 5 /* basic information */ +typedef int processor_set_flavor_t; +#define PROCESSOR_SET_BASIC_INFO 5 /* basic information */ struct processor_set_basic_info { - int processor_count; /* How many processors */ - int default_policy; /* When others not enabled */ + int processor_count; /* How many processors */ + int default_policy; /* When others not enabled */ }; -typedef struct processor_set_basic_info processor_set_basic_info_data_t; -typedef struct processor_set_basic_info *processor_set_basic_info_t; -#define PROCESSOR_SET_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(processor_set_basic_info_data_t)/sizeof(natural_t))) +typedef struct processor_set_basic_info processor_set_basic_info_data_t; +typedef struct processor_set_basic_info *processor_set_basic_info_t; +#define PROCESSOR_SET_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(processor_set_basic_info_data_t)/sizeof(natural_t))) -#define PROCESSOR_SET_LOAD_INFO 4 /* scheduling statistics */ +#define PROCESSOR_SET_LOAD_INFO 4 /* scheduling statistics */ struct processor_set_load_info { - int task_count; /* How many tasks */ - int thread_count; /* How many threads */ - integer_t load_average; /* Scaled */ - integer_t mach_factor; /* Scaled */ + int task_count; /* How many tasks */ + int thread_count; /* How many threads */ + integer_t load_average; /* Scaled */ + integer_t mach_factor; /* Scaled */ }; typedef struct processor_set_load_info processor_set_load_info_data_t; typedef struct processor_set_load_info *processor_set_load_info_t; -#define PROCESSOR_SET_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(processor_set_load_info_data_t)/sizeof(natural_t))) +#define PROCESSOR_SET_LOAD_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(processor_set_load_info_data_t)/sizeof(natural_t))) -#ifdef PRIVATE +#ifdef PRIVATE #define PROCESSOR_SET_ENABLED_POLICIES 3 #define PROCESSOR_SET_ENABLED_POLICIES_COUNT ((mach_msg_type_number_t) \ - (sizeof(policy_t)/sizeof(natural_t))) + (sizeof(policy_t)/sizeof(natural_t))) #define PROCESSOR_SET_TIMESHARE_DEFAULT 10 #define PROCESSOR_SET_TIMESHARE_LIMITS 11 @@ -164,6 +164,6 @@ typedef struct processor_set_load_info *processor_set_load_info_t; #define PROCESSOR_SET_FIFO_DEFAULT 30 #define PROCESSOR_SET_FIFO_LIMITS 31 -#endif /* PRIVATE */ +#endif /* PRIVATE */ -#endif /* _MACH_PROCESSOR_INFO_H_ */ +#endif /* _MACH_PROCESSOR_INFO_H_ */ diff --git a/osfmk/mach/prof_types.h b/osfmk/mach/prof_types.h index a41037212..f15e6117a 100644 --- a/osfmk/mach/prof_types.h +++ b/osfmk/mach/prof_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:30 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,54 +38,54 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.2.7.2 1995/01/26 22:15:46 ezf - * corrected CR - * [1995/01/26 21:16:02 ezf] + * corrected CR + * [1995/01/26 21:16:02 ezf] * * Revision 1.2.3.2 1993/06/09 02:43:16 gm - * Added to OSF/1 R1.3 from NMK15.0. - * [1993/06/02 21:18:04 jeffc] - * + * Added to OSF/1 R1.3 from NMK15.0. + * [1993/06/02 21:18:04 jeffc] + * * Revision 1.2 1993/04/19 16:39:03 devrcs - * ansi C conformance changes - * [1993/02/02 18:54:26 david] - * + * ansi C conformance changes + * [1993/02/02 18:54:26 david] + * * Revision 1.1 1992/09/30 02:32:04 robert - * Initial revision - * + * Initial revision + * * $EndLog$ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_PROF_TYPES_H -#define _MACH_PROF_TYPES_H +#ifndef _MACH_PROF_TYPES_H +#define _MACH_PROF_TYPES_H -#define SAMPLE_MAX 256 /* Max array size */ -typedef unsigned sample_array_t[SAMPLE_MAX]; +#define SAMPLE_MAX 256 /* Max array size */ +typedef unsigned sample_array_t[SAMPLE_MAX]; -#endif /* _MACH_PROF_TYPES_H */ +#endif /* _MACH_PROF_TYPES_H */ diff --git a/osfmk/mach/resource_monitors.h b/osfmk/mach/resource_monitors.h index a6bad0b6c..f18900bce 100644 --- a/osfmk/mach/resource_monitors.h +++ b/osfmk/mach/resource_monitors.h @@ -47,7 +47,7 @@ typedef uint64_t resource_notify_flags_t; #define kRNFatalLimitFlag (1ULL << 32) /* For the disk writes I/O monitor. - The default is logical writes. */ + * The default is logical writes. */ #define kRNPhysicalWritesFlag (1ULL < 1) /* TEMPORARY compatibility, to be removed */ @@ -65,8 +65,8 @@ typedef uint64_t resource_notify_flags_t; */ #define MAXCOMLEN 16 -typedef char command_t[MAXCOMLEN+1]; -typedef char proc_name_t[2*MAXCOMLEN+1]; +typedef char command_t[MAXCOMLEN + 1]; +typedef char proc_name_t[2*MAXCOMLEN + 1]; typedef char posix_path_t[PATH_MAX]; __END_DECLS diff --git a/osfmk/mach/rpc.h b/osfmk/mach/rpc.h index 565d7ac6d..f3361d769 100644 --- a/osfmk/mach/rpc.h +++ b/osfmk/mach/rpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,7 +33,7 @@ * Mach RPC Subsystem Interfaces */ -#ifndef _MACH_RPC_H_ +#ifndef _MACH_RPC_H_ #define _MACH_RPC_H_ #include @@ -57,39 +57,39 @@ * Basic mach rpc types. */ typedef unsigned int routine_arg_type; -typedef unsigned int routine_arg_offset; -typedef unsigned int routine_arg_size; +typedef unsigned int routine_arg_offset; +typedef unsigned int routine_arg_size; /* * Definitions for a signature's argument and routine descriptor's. */ struct rpc_routine_arg_descriptor { - routine_arg_type type; /* Port, Array, etc. */ - routine_arg_size size; /* element size in bytes */ - routine_arg_size count; /* number of elements */ - routine_arg_offset offset; /* Offset in list of routine args */ + routine_arg_type type; /* Port, Array, etc. */ + routine_arg_size size; /* element size in bytes */ + routine_arg_size count; /* number of elements */ + routine_arg_offset offset; /* Offset in list of routine args */ }; typedef struct rpc_routine_arg_descriptor *rpc_routine_arg_descriptor_t; struct rpc_routine_descriptor { - mig_impl_routine_t impl_routine; /* Server work func pointer */ - mig_stub_routine_t stub_routine; /* Unmarshalling func pointer */ - unsigned int argc; /* Number of argument words */ - unsigned int descr_count; /* Number of complex argument */ - /* descriptors */ + mig_impl_routine_t impl_routine; /* Server work func pointer */ + mig_stub_routine_t stub_routine; /* Unmarshalling func pointer */ + unsigned int argc; /* Number of argument words */ + unsigned int descr_count; /* Number of complex argument */ + /* descriptors */ rpc_routine_arg_descriptor_t - arg_descr; /* Pointer to beginning of */ - /* the arg_descr array */ - unsigned int max_reply_msg; /* Max size for reply msg */ + arg_descr; /* Pointer to beginning of */ + /* the arg_descr array */ + unsigned int max_reply_msg; /* Max size for reply msg */ }; typedef struct rpc_routine_descriptor *rpc_routine_descriptor_t; #define RPC_DESCR_SIZE(x) ((x)->descr_count * \ - sizeof(struct rpc_routine_arg_descriptor)) + sizeof(struct rpc_routine_arg_descriptor)) struct rpc_signature { - struct rpc_routine_descriptor rd; - struct rpc_routine_arg_descriptor rad[1]; + struct rpc_routine_descriptor rd; + struct rpc_routine_arg_descriptor rad[1]; }; #define RPC_SIGBUF_SIZE 8 @@ -113,23 +113,23 @@ struct rpc_signature { * contiguous. */ struct rpc_subsystem { - void *reserved; /* Reserved for system use */ + void *reserved; /* Reserved for system use */ - mach_msg_id_t start; /* Min routine number */ - mach_msg_id_t end; /* Max routine number + 1 */ - unsigned int maxsize; /* Max mach_msg size */ - vm_address_t base_addr; /* Address of this struct in user */ + mach_msg_id_t start; /* Min routine number */ + mach_msg_id_t end; /* Max routine number + 1 */ + unsigned int maxsize; /* Max mach_msg size */ + vm_address_t base_addr; /* Address of this struct in user */ - struct rpc_routine_descriptor /* Array of routine descriptors */ - routine[1 /* Actually, (start-end+1) */ - ]; + struct rpc_routine_descriptor /* Array of routine descriptors */ + routine[1 /* Actually, (start-end+1) */ + ]; struct rpc_routine_arg_descriptor - arg_descriptor[1 /* Actually, the sum of the descr_ */ - ]; /* count fields for all routines */ + arg_descriptor[1 /* Actually, the sum of the descr_ */ + ]; /* count fields for all routines */ }; typedef struct rpc_subsystem *rpc_subsystem_t; -#define RPC_SUBSYSTEM_NULL ((rpc_subsystem_t) 0) +#define RPC_SUBSYSTEM_NULL ((rpc_subsystem_t) 0) -#endif /* _MACH_RPC_H_ */ +#endif /* _MACH_RPC_H_ */ diff --git a/osfmk/mach/sdt.h b/osfmk/mach/sdt.h index 3268551b2..dd604aeac 100644 --- a/osfmk/mach/sdt.h +++ b/osfmk/mach/sdt.h @@ -25,8 +25,8 @@ */ #ifndef _MACH_SDT_H -#define _MACH_SDT_H +#define _MACH_SDT_H #include -#endif /* _MACH_SDT_H */ +#endif /* _MACH_SDT_H */ diff --git a/osfmk/mach/semaphore.h b/osfmk/mach/semaphore.h index 36ba3d00f..6c31f21ea 100644 --- a/osfmk/mach/semaphore.h +++ b/osfmk/mach/semaphore.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,88 +52,88 @@ #include __BEGIN_DECLS -extern kern_return_t semaphore_signal (semaphore_t semaphore); -extern kern_return_t semaphore_signal_all (semaphore_t semaphore); +extern kern_return_t semaphore_signal(semaphore_t semaphore); +extern kern_return_t semaphore_signal_all(semaphore_t semaphore); -extern kern_return_t semaphore_wait (semaphore_t semaphore); +extern kern_return_t semaphore_wait(semaphore_t semaphore); -#ifdef KERNEL +#ifdef KERNEL -#ifdef __LP64__ +#ifdef __LP64__ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -extern kern_return_t semaphore_timedwait (semaphore_t semaphore, - mach_timespec_t wait_time); +extern kern_return_t semaphore_timedwait(semaphore_t semaphore, + mach_timespec_t wait_time); -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#else /* __LP64__ */ +#else /* __LP64__ */ -extern kern_return_t semaphore_timedwait (semaphore_t semaphore, - mach_timespec_t wait_time); +extern kern_return_t semaphore_timedwait(semaphore_t semaphore, + mach_timespec_t wait_time); -#endif /* __LP64__ */ +#endif /* __LP64__ */ -extern kern_return_t semaphore_wait_deadline (semaphore_t semaphore, - uint64_t deadline); -extern kern_return_t semaphore_wait_noblock (semaphore_t semaphore); +extern kern_return_t semaphore_wait_deadline(semaphore_t semaphore, + uint64_t deadline); +extern kern_return_t semaphore_wait_noblock(semaphore_t semaphore); -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE -extern kern_return_t semaphore_wait_signal (semaphore_t wait_semaphore, - semaphore_t signal_semaphore); +extern kern_return_t semaphore_wait_signal(semaphore_t wait_semaphore, + semaphore_t signal_semaphore); -extern kern_return_t semaphore_timedwait_signal(semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - mach_timespec_t wait_time); +extern kern_return_t semaphore_timedwait_signal(semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t wait_time); -extern kern_return_t semaphore_signal_thread (semaphore_t semaphore, - thread_t thread); +extern kern_return_t semaphore_signal_thread(semaphore_t semaphore, + thread_t thread); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -#else /* KERNEL */ +#else /* KERNEL */ -extern kern_return_t semaphore_timedwait (semaphore_t semaphore, - mach_timespec_t wait_time); +extern kern_return_t semaphore_timedwait(semaphore_t semaphore, + mach_timespec_t wait_time); -extern kern_return_t semaphore_timedwait_signal(semaphore_t wait_semaphore, - semaphore_t signal_semaphore, - mach_timespec_t wait_time); +extern kern_return_t semaphore_timedwait_signal(semaphore_t wait_semaphore, + semaphore_t signal_semaphore, + mach_timespec_t wait_time); -extern kern_return_t semaphore_wait_signal (semaphore_t wait_semaphore, - semaphore_t signal_semaphore); +extern kern_return_t semaphore_wait_signal(semaphore_t wait_semaphore, + semaphore_t signal_semaphore); -extern kern_return_t semaphore_signal_thread (semaphore_t semaphore, - thread_t thread); +extern kern_return_t semaphore_signal_thread(semaphore_t semaphore, + thread_t thread); -#endif /* KERNEL */ +#endif /* KERNEL */ __END_DECLS -#ifdef PRIVATE +#ifdef PRIVATE -#define SEMAPHORE_OPTION_NONE 0x00000000 +#define SEMAPHORE_OPTION_NONE 0x00000000 -#define SEMAPHORE_SIGNAL 0x00000001 -#define SEMAPHORE_WAIT 0x00000002 -#define SEMAPHORE_WAIT_ON_SIGNAL 0x00000008 +#define SEMAPHORE_SIGNAL 0x00000001 +#define SEMAPHORE_WAIT 0x00000002 +#define SEMAPHORE_WAIT_ON_SIGNAL 0x00000008 -#define SEMAPHORE_SIGNAL_TIMEOUT 0x00000010 -#define SEMAPHORE_SIGNAL_ALL 0x00000020 -#define SEMAPHORE_SIGNAL_INTERRUPT 0x00000040 /* libmach implements */ -#define SEMAPHORE_SIGNAL_PREPOST 0x00000080 +#define SEMAPHORE_SIGNAL_TIMEOUT 0x00000010 +#define SEMAPHORE_SIGNAL_ALL 0x00000020 +#define SEMAPHORE_SIGNAL_INTERRUPT 0x00000040 /* libmach implements */ +#define SEMAPHORE_SIGNAL_PREPOST 0x00000080 -#define SEMAPHORE_WAIT_TIMEOUT 0x00000100 -#define SEMAPHORE_WAIT_INTERRUPT 0x00000400 /* libmach implements */ +#define SEMAPHORE_WAIT_TIMEOUT 0x00000100 +#define SEMAPHORE_WAIT_INTERRUPT 0x00000400 /* libmach implements */ -#define SEMAPHORE_TIMEOUT_NOBLOCK 0x00100000 -#define SEMAPHORE_TIMEOUT_RELATIVE 0x00200000 +#define SEMAPHORE_TIMEOUT_NOBLOCK 0x00100000 +#define SEMAPHORE_TIMEOUT_RELATIVE 0x00200000 -#define SEMAPHORE_USE_SAVED_RESULT 0x01000000 /* internal use only */ -#define SEMAPHORE_SIGNAL_RELEASE 0x02000000 /* internal use only */ +#define SEMAPHORE_USE_SAVED_RESULT 0x01000000 /* internal use only */ +#define SEMAPHORE_SIGNAL_RELEASE 0x02000000 /* internal use only */ -#endif /* PRIVATE */ +#endif /* PRIVATE */ -#endif /* _MACH_SEMAPHORE_H_ */ +#endif /* _MACH_SEMAPHORE_H_ */ diff --git a/osfmk/mach/sfi_class.h b/osfmk/mach/sfi_class.h index 8f856435b..3841f37dd 100644 --- a/osfmk/mach/sfi_class.h +++ b/osfmk/mach/sfi_class.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,12 +22,12 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifndef _MACH_SFI_CLASS_H_ -#define _MACH_SFI_CLASS_H_ +#ifndef _MACH_SFI_CLASS_H_ +#define _MACH_SFI_CLASS_H_ #include #include @@ -52,12 +52,12 @@ typedef uint32_t sfi_class_id_t; * Total number of classes supported including SFI_CLASS_UNSPECIFIED. * If new class is defined increase this number. */ -#define MAX_SFI_CLASS_ID 0x00000011 +#define MAX_SFI_CLASS_ID 0x00000011 /* * Threads may initially start out unspecified */ -#define SFI_CLASS_UNSPECIFIED 0x00000000 +#define SFI_CLASS_UNSPECIFIED 0x00000000 #endif /* XNU_KERNEL_PRIVATE */ @@ -66,13 +66,13 @@ typedef uint32_t sfi_class_id_t; * processes in a background state using APIs such as setpriority(2), * specifying PRIO_DARWIN_THREAD or PRIO_DARWIN_PROCESS. */ -#define SFI_CLASS_DARWIN_BG 0x00000001 +#define SFI_CLASS_DARWIN_BG 0x00000001 /* * Threads are placed in this class as a result of an application * entering "Nap mode". */ -#define SFI_CLASS_APP_NAP 0x00000002 +#define SFI_CLASS_APP_NAP 0x00000002 /* * Threads are placed in this class by making per coalition (by @@ -81,11 +81,11 @@ typedef uint32_t sfi_class_id_t; * SFI_CLASS_MANAGED)). FOCAL/NONFOCAL is a function of a task's * role. */ -#define SFI_CLASS_MANAGED_FOCAL 0x00000003 +#define SFI_CLASS_MANAGED_FOCAL 0x00000003 -#define SFI_CLASS_MANAGED_NONFOCAL 0x00000004 +#define SFI_CLASS_MANAGED_NONFOCAL 0x00000004 -#define SFI_CLASS_MANAGED SFI_CLASS_MANAGED_FOCAL +#define SFI_CLASS_MANAGED SFI_CLASS_MANAGED_FOCAL /* * Coalitions/processes that have not been explicitly tagged @@ -93,18 +93,18 @@ typedef uint32_t sfi_class_id_t; * fall into the default categories. FOCAL/NONFOCAL is a function * of a task's role. */ -#define SFI_CLASS_DEFAULT_FOCAL 0x00000005 +#define SFI_CLASS_DEFAULT_FOCAL 0x00000005 -#define SFI_CLASS_DEFAULT_NONFOCAL 0x00000006 +#define SFI_CLASS_DEFAULT_NONFOCAL 0x00000006 -#define SFI_CLASS_DEFAULT SFI_CLASS_DEFAULT_FOCAL +#define SFI_CLASS_DEFAULT SFI_CLASS_DEFAULT_FOCAL /* * Threads that are part of the kernel task should be duty-cycled * only as an extreme last resort, since they must be preempted * while locks may be held in kernel mode. */ -#define SFI_CLASS_KERNEL 0x00000007 +#define SFI_CLASS_KERNEL 0x00000007 /* * Threads that must not be part of "Selective Forced Idle" are @@ -112,15 +112,15 @@ typedef uint32_t sfi_class_id_t; * processes such as WindowServer that are critical to good user * experience, should be placed in this class. */ -#define SFI_CLASS_OPTED_OUT 0x00000008 +#define SFI_CLASS_OPTED_OUT 0x00000008 /* * Threads running in various QOS classes */ -#define SFI_CLASS_UTILITY 0x00000009 -#define SFI_CLASS_LEGACY_FOCAL 0x0000000A -#define SFI_CLASS_LEGACY_NONFOCAL 0x0000000B -#define SFI_CLASS_USER_INITIATED_FOCAL 0x0000000C +#define SFI_CLASS_UTILITY 0x00000009 +#define SFI_CLASS_LEGACY_FOCAL 0x0000000A +#define SFI_CLASS_LEGACY_NONFOCAL 0x0000000B +#define SFI_CLASS_USER_INITIATED_FOCAL 0x0000000C #define SFI_CLASS_USER_INITIATED_NONFOCAL 0x0000000D #define SFI_CLASS_USER_INTERACTIVE_FOCAL 0x0000000E #define SFI_CLASS_USER_INTERACTIVE_NONFOCAL 0x0000000F @@ -130,6 +130,6 @@ typedef uint32_t sfi_class_id_t; * Windows that are specified smaller than MIN_SFI_WINDOW_USEC * will be automatically rounded up. */ -#define MIN_SFI_WINDOW_USEC 500 +#define MIN_SFI_WINDOW_USEC 500 -#endif /* _MACH_SFI_CLASS_H_ */ +#endif /* _MACH_SFI_CLASS_H_ */ diff --git a/osfmk/mach/shared_memory_server.h b/osfmk/mach/shared_memory_server.h index bd792319c..c4eeb4726 100644 --- a/osfmk/mach/shared_memory_server.h +++ b/osfmk/mach/shared_memory_server.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,20 +22,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * * File: mach/shared_memory_server.h * - * protos and struct definitions for shared library + * protos and struct definitions for shared library * server and interface */ /* * XXX - * + * * NOTE: this file is deprecated and will be removed in the near future. * Any project that includes this file should be changed to: * 1. use instead of this file, @@ -55,40 +55,40 @@ #define VM_PROT_COW 0x8 /* must not interfere with normal prot assignments */ #define VM_PROT_ZF 0x10 /* must not interfere with normal prot assignments */ -#ifdef __arm__ -#define GLOBAL_SHARED_TEXT_SEGMENT 0x30000000U -#define GLOBAL_SHARED_DATA_SEGMENT 0x38000000U -#define GLOBAL_SHARED_SEGMENT_MASK 0xF8000000U +#ifdef __arm__ +#define GLOBAL_SHARED_TEXT_SEGMENT 0x30000000U +#define GLOBAL_SHARED_DATA_SEGMENT 0x38000000U +#define GLOBAL_SHARED_SEGMENT_MASK 0xF8000000U -#define SHARED_TEXT_REGION_SIZE 0x08000000 -#define SHARED_DATA_REGION_SIZE 0x08000000 +#define SHARED_TEXT_REGION_SIZE 0x08000000 +#define SHARED_DATA_REGION_SIZE 0x08000000 #else -#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U -#define GLOBAL_SHARED_DATA_SEGMENT 0xA0000000U -#define GLOBAL_SHARED_SEGMENT_MASK 0xF0000000U +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define GLOBAL_SHARED_DATA_SEGMENT 0xA0000000U +#define GLOBAL_SHARED_SEGMENT_MASK 0xF0000000U -#define SHARED_TEXT_REGION_SIZE 0x10000000 -#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 +#define SHARED_DATA_REGION_SIZE 0x10000000 #endif #if !defined(__LP64__) -#define SHARED_LIBRARY_SERVER_SUPPORTED +#define SHARED_LIBRARY_SERVER_SUPPORTED -#define SHARED_ALTERNATE_LOAD_BASE 0x09000000 +#define SHARED_ALTERNATE_LOAD_BASE 0x09000000 -/* - * Note: the two masks below are useful because the assumption is - * made that these shared regions will always be mapped on natural boundaries - * i.e. if the size is 0x10000000 the object can be mapped at +/* + * Note: the two masks below are useful because the assumption is + * made that these shared regions will always be mapped on natural boundaries + * i.e. if the size is 0x10000000 the object can be mapped at * 0x20000000, or 0x30000000, but not 0x1000000 */ -#ifdef __arm__ -#define SHARED_TEXT_REGION_MASK 0x07FFFFFF -#define SHARED_DATA_REGION_MASK 0x07FFFFFF +#ifdef __arm__ +#define SHARED_TEXT_REGION_MASK 0x07FFFFFF +#define SHARED_DATA_REGION_MASK 0x07FFFFFF #else -#define SHARED_TEXT_REGION_MASK 0x0FFFFFFF -#define SHARED_DATA_REGION_MASK 0x0FFFFFFF +#define SHARED_TEXT_REGION_MASK 0x0FFFFFFF +#define SHARED_DATA_REGION_MASK 0x0FFFFFFF #endif @@ -97,7 +97,7 @@ /* IN */ #define ALTERNATE_LOAD_SITE 0x1 #define NEW_LOCAL_SHARED_REGIONS 0x2 -#define QUERY_IS_SYSTEM_REGION 0x4 +#define QUERY_IS_SYSTEM_REGION 0x4 /* OUT */ #define SF_PREV_LOADED 0x1 @@ -105,43 +105,43 @@ struct sf_mapping { - vm_offset_t mapping_offset; - vm_size_t size; - vm_offset_t file_offset; - vm_prot_t protection; /* read/write/execute/COW/ZF */ - vm_offset_t cksum; + vm_offset_t mapping_offset; + vm_size_t size; + vm_offset_t file_offset; + vm_prot_t protection; /* read/write/execute/COW/ZF */ + vm_offset_t cksum; }; typedef struct sf_mapping sf_mapping_t; #endif /* !defined(__LP64__) */ -/* +/* * All shared_region_* declarations are a private interface * between dyld and the kernel. * */ struct shared_file_mapping_np { - mach_vm_address_t sfm_address; - mach_vm_size_t sfm_size; - mach_vm_offset_t sfm_file_offset; - vm_prot_t sfm_max_prot; - vm_prot_t sfm_init_prot; + mach_vm_address_t sfm_address; + mach_vm_size_t sfm_size; + mach_vm_offset_t sfm_file_offset; + vm_prot_t sfm_max_prot; + vm_prot_t sfm_init_prot; }; struct shared_region_range_np { - mach_vm_address_t srr_address; - mach_vm_size_t srr_size; + mach_vm_address_t srr_address; + mach_vm_size_t srr_size; }; #ifndef KERNEL __BEGIN_DECLS -int shared_region_map_file_np(int fd, - uint32_t mappingCount, - const struct shared_file_mapping_np *mappings, - int64_t *slide_p); -int shared_region_make_private_np(uint32_t rangeCount, - const struct shared_region_range_np *ranges); +int shared_region_map_file_np(int fd, + uint32_t mappingCount, + const struct shared_file_mapping_np *mappings, + int64_t *slide_p); +int shared_region_make_private_np(uint32_t rangeCount, + const struct shared_region_range_np *ranges); __END_DECLS #endif /* !KERNEL */ diff --git a/osfmk/mach/shared_region.h b/osfmk/mach/shared_region.h index d5799dc2e..be70167dc 100644 --- a/osfmk/mach/shared_region.h +++ b/osfmk/mach/shared_region.h @@ -2,14 +2,14 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,14 +17,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ /* * * File: mach/shared_region.h * - * protos and struct definitions for shared region + * protos and struct definitions for shared region */ #ifndef _MACH_SHARED_REGION_H_ @@ -35,80 +35,80 @@ #include #include -#define SHARED_REGION_BASE_I386 0x90000000ULL -#define SHARED_REGION_SIZE_I386 0x20000000ULL -#define SHARED_REGION_NESTING_BASE_I386 0x90000000ULL -#define SHARED_REGION_NESTING_SIZE_I386 0x20000000ULL -#define SHARED_REGION_NESTING_MIN_I386 0x00200000ULL -#define SHARED_REGION_NESTING_MAX_I386 0xFFE00000ULL - -#define SHARED_REGION_BASE_X86_64 0x00007FFF00000000ULL -#define SHARED_REGION_SIZE_X86_64 0x00000000FFE00000ULL -#define SHARED_REGION_NESTING_BASE_X86_64 0x00007FFF00000000ULL -#define SHARED_REGION_NESTING_SIZE_X86_64 0x00000000FFE00000ULL -#define SHARED_REGION_NESTING_MIN_X86_64 0x0000000000200000ULL -#define SHARED_REGION_NESTING_MAX_X86_64 0xFFFFFFFFFFE00000ULL - -#define SHARED_REGION_BASE_PPC 0x90000000ULL -#define SHARED_REGION_SIZE_PPC 0x20000000ULL -#define SHARED_REGION_NESTING_BASE_PPC 0x90000000ULL -#define SHARED_REGION_NESTING_SIZE_PPC 0x10000000ULL -#define SHARED_REGION_NESTING_MIN_PPC 0x10000000ULL -#define SHARED_REGION_NESTING_MAX_PPC 0x10000000ULL - -#define SHARED_REGION_BASE_PPC64 0x00007FFF60000000ULL -#define SHARED_REGION_SIZE_PPC64 0x00000000A0000000ULL -#define SHARED_REGION_NESTING_BASE_PPC64 0x00007FFF60000000ULL -#define SHARED_REGION_NESTING_SIZE_PPC64 0x00000000A0000000ULL -#define SHARED_REGION_NESTING_MIN_PPC64 0x0000000010000000ULL -#define SHARED_REGION_NESTING_MAX_PPC64 0x0000000010000000ULL - -#define SHARED_REGION_BASE_ARM 0x1A000000ULL -#define SHARED_REGION_SIZE_ARM 0x26000000ULL -#define SHARED_REGION_NESTING_BASE_ARM 0x1A000000ULL -#define SHARED_REGION_NESTING_SIZE_ARM 0x26000000ULL -#define SHARED_REGION_NESTING_MIN_ARM ? -#define SHARED_REGION_NESTING_MAX_ARM ? +#define SHARED_REGION_BASE_I386 0x90000000ULL +#define SHARED_REGION_SIZE_I386 0x20000000ULL +#define SHARED_REGION_NESTING_BASE_I386 0x90000000ULL +#define SHARED_REGION_NESTING_SIZE_I386 0x20000000ULL +#define SHARED_REGION_NESTING_MIN_I386 0x00200000ULL +#define SHARED_REGION_NESTING_MAX_I386 0xFFE00000ULL + +#define SHARED_REGION_BASE_X86_64 0x00007FFF00000000ULL +#define SHARED_REGION_SIZE_X86_64 0x00000000FFE00000ULL +#define SHARED_REGION_NESTING_BASE_X86_64 0x00007FFF00000000ULL +#define SHARED_REGION_NESTING_SIZE_X86_64 0x00000000FFE00000ULL +#define SHARED_REGION_NESTING_MIN_X86_64 0x0000000000200000ULL +#define SHARED_REGION_NESTING_MAX_X86_64 0xFFFFFFFFFFE00000ULL + +#define SHARED_REGION_BASE_PPC 0x90000000ULL +#define SHARED_REGION_SIZE_PPC 0x20000000ULL +#define SHARED_REGION_NESTING_BASE_PPC 0x90000000ULL +#define SHARED_REGION_NESTING_SIZE_PPC 0x10000000ULL +#define SHARED_REGION_NESTING_MIN_PPC 0x10000000ULL +#define SHARED_REGION_NESTING_MAX_PPC 0x10000000ULL + +#define SHARED_REGION_BASE_PPC64 0x00007FFF60000000ULL +#define SHARED_REGION_SIZE_PPC64 0x00000000A0000000ULL +#define SHARED_REGION_NESTING_BASE_PPC64 0x00007FFF60000000ULL +#define SHARED_REGION_NESTING_SIZE_PPC64 0x00000000A0000000ULL +#define SHARED_REGION_NESTING_MIN_PPC64 0x0000000010000000ULL +#define SHARED_REGION_NESTING_MAX_PPC64 0x0000000010000000ULL + +#define SHARED_REGION_BASE_ARM 0x1A000000ULL +#define SHARED_REGION_SIZE_ARM 0x26000000ULL +#define SHARED_REGION_NESTING_BASE_ARM 0x1A000000ULL +#define SHARED_REGION_NESTING_SIZE_ARM 0x26000000ULL +#define SHARED_REGION_NESTING_MIN_ARM ? +#define SHARED_REGION_NESTING_MAX_ARM ? #ifdef XNU_KERNEL_PRIVATE /* ARM64_TODO: move to higher memory */ #endif -#define SHARED_REGION_BASE_ARM64 0x180000000ULL -#define SHARED_REGION_SIZE_ARM64 0x100000000ULL -#define SHARED_REGION_NESTING_BASE_ARM64 0x180000000ULL -#define SHARED_REGION_NESTING_SIZE_ARM64 0x100000000ULL -#define SHARED_REGION_NESTING_MIN_ARM64 ? -#define SHARED_REGION_NESTING_MAX_ARM64 ? +#define SHARED_REGION_BASE_ARM64 0x180000000ULL +#define SHARED_REGION_SIZE_ARM64 0x100000000ULL +#define SHARED_REGION_NESTING_BASE_ARM64 0x180000000ULL +#define SHARED_REGION_NESTING_SIZE_ARM64 0x100000000ULL +#define SHARED_REGION_NESTING_MIN_ARM64 ? +#define SHARED_REGION_NESTING_MAX_ARM64 ? #if defined(__i386__) -#define SHARED_REGION_BASE SHARED_REGION_BASE_I386 -#define SHARED_REGION_SIZE SHARED_REGION_SIZE_I386 -#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_I386 -#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_I386 -#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_I386 -#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_I386 +#define SHARED_REGION_BASE SHARED_REGION_BASE_I386 +#define SHARED_REGION_SIZE SHARED_REGION_SIZE_I386 +#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_I386 +#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_I386 +#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_I386 +#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_I386 #elif defined(__x86_64__) -#define SHARED_REGION_BASE SHARED_REGION_BASE_X86_64 -#define SHARED_REGION_SIZE SHARED_REGION_SIZE_X86_64 -#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_X86_64 -#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_X86_64 -#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_X86_64 -#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_X86_64 +#define SHARED_REGION_BASE SHARED_REGION_BASE_X86_64 +#define SHARED_REGION_SIZE SHARED_REGION_SIZE_X86_64 +#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_X86_64 +#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_X86_64 +#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_X86_64 +#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_X86_64 #elif defined(__arm__) -#define SHARED_REGION_BASE SHARED_REGION_BASE_ARM -#define SHARED_REGION_SIZE SHARED_REGION_SIZE_ARM -#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_ARM -#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_ARM -#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_ARM -#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_ARM +#define SHARED_REGION_BASE SHARED_REGION_BASE_ARM +#define SHARED_REGION_SIZE SHARED_REGION_SIZE_ARM +#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_ARM +#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_ARM +#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_ARM +#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_ARM #elif defined(__arm64__) && defined(__LP64__) -#define SHARED_REGION_BASE SHARED_REGION_BASE_ARM64 -#define SHARED_REGION_SIZE SHARED_REGION_SIZE_ARM64 -#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_ARM64 -#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_ARM64 -#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_ARM64 -#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_ARM64 +#define SHARED_REGION_BASE SHARED_REGION_BASE_ARM64 +#define SHARED_REGION_SIZE SHARED_REGION_SIZE_ARM64 +#define SHARED_REGION_NESTING_BASE SHARED_REGION_NESTING_BASE_ARM64 +#define SHARED_REGION_NESTING_SIZE SHARED_REGION_NESTING_SIZE_ARM64 +#define SHARED_REGION_NESTING_MIN SHARED_REGION_NESTING_MIN_ARM64 +#define SHARED_REGION_NESTING_MAX SHARED_REGION_NESTING_MAX_ARM64 #endif #ifdef KERNEL_PRIVATE @@ -123,17 +123,17 @@ void post_sys_powersource(int); #endif /* KERNEL_PRIVATE */ -/* +/* * All shared_region_* declarations are a private interface * between dyld and the kernel. * */ struct shared_file_mapping_np { - mach_vm_address_t sfm_address; - mach_vm_size_t sfm_size; - mach_vm_offset_t sfm_file_offset; - vm_prot_t sfm_max_prot; - vm_prot_t sfm_init_prot; + mach_vm_address_t sfm_address; + mach_vm_size_t sfm_size; + mach_vm_offset_t sfm_file_offset; + vm_prot_t sfm_max_prot; + vm_prot_t sfm_init_prot; }; #define VM_PROT_COW 0x8 /* must not interfere with normal prot assignments */ #define VM_PROT_ZF 0x10 /* must not interfere with normal prot assignments */ @@ -142,11 +142,11 @@ struct shared_file_mapping_np { #ifndef KERNEL __BEGIN_DECLS -int shared_region_check_np(uint64_t *startaddress); -int shared_region_map_np(int fd, - uint32_t mappingCount, - const struct shared_file_mapping_np *mappings); -int shared_region_slide_np(void); +int shared_region_check_np(uint64_t *startaddress); +int shared_region_map_np(int fd, + uint32_t mappingCount, + const struct shared_file_mapping_np *mappings); +int shared_region_slide_np(void); __END_DECLS #endif /* !KERNEL */ diff --git a/osfmk/mach/std_types.h b/osfmk/mach/std_types.h index 2ad966c71..5815302d4 100644 --- a/osfmk/mach/std_types.h +++ b/osfmk/mach/std_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -60,8 +60,8 @@ * */ -#ifndef _MACH_STD_TYPES_H_ -#define _MACH_STD_TYPES_H_ +#ifndef _MACH_STD_TYPES_H_ +#define _MACH_STD_TYPES_H_ #include #include @@ -72,4 +72,4 @@ #include #include -#endif /* _MACH_STD_TYPES_H_ */ +#endif /* _MACH_STD_TYPES_H_ */ diff --git a/osfmk/mach/sync_policy.h b/osfmk/mach/sync_policy.h index 239d11baf..605388fcc 100644 --- a/osfmk/mach/sync_policy.h +++ b/osfmk/mach/sync_policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,14 +22,14 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -#ifndef _MACH_SYNC_POLICY_H_ +#ifndef _MACH_SYNC_POLICY_H_ #define _MACH_SYNC_POLICY_H_ typedef int sync_policy_t; @@ -37,13 +37,13 @@ typedef int sync_policy_t; /* * These options define the wait ordering of the synchronizers */ -#define SYNC_POLICY_FIFO 0x0 -#define SYNC_POLICY_FIXED_PRIORITY 0x1 -#define SYNC_POLICY_REVERSED 0x2 -#define SYNC_POLICY_ORDER_MASK 0x3 -#define SYNC_POLICY_LIFO (SYNC_POLICY_FIFO|SYNC_POLICY_REVERSED) +#define SYNC_POLICY_FIFO 0x0 +#define SYNC_POLICY_FIXED_PRIORITY 0x1 +#define SYNC_POLICY_REVERSED 0x2 +#define SYNC_POLICY_ORDER_MASK 0x3 +#define SYNC_POLICY_LIFO (SYNC_POLICY_FIFO|SYNC_POLICY_REVERSED) -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* * These options provide addition (kernel-private) behaviors @@ -59,8 +59,8 @@ typedef int sync_policy_t; #define SYNC_POLICY_TURNSTILE 0x10 #define SYNC_POLICY_PORT 0x10 -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#define SYNC_POLICY_MAX 0x7 +#define SYNC_POLICY_MAX 0x7 -#endif /* _MACH_SYNC_POLICY_H_ */ +#endif /* _MACH_SYNC_POLICY_H_ */ diff --git a/osfmk/mach/task_info.h b/osfmk/mach/task_info.h index 62824b760..1248749f1 100644 --- a/osfmk/mach/task_info.h +++ b/osfmk/mach/task_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007, 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -62,8 +62,8 @@ * */ -#ifndef _MACH_TASK_INFO_H_ -#define _MACH_TASK_INFO_H_ +#ifndef _MACH_TASK_INFO_H_ +#define _MACH_TASK_INFO_H_ #include #include @@ -77,12 +77,12 @@ /* * Generic information structure to allow for expansion. */ -typedef natural_t task_flavor_t; -typedef integer_t *task_info_t; /* varying array of int */ +typedef natural_t task_flavor_t; +typedef integer_t *task_info_t; /* varying array of int */ /* Deprecated, use per structure _data_t's instead */ -#define TASK_INFO_MAX (1024) /* maximum array size */ -typedef integer_t task_info_data_t[TASK_INFO_MAX]; +#define TASK_INFO_MAX (1024) /* maximum array size */ +typedef integer_t task_info_data_t[TASK_INFO_MAX]; /* * Currently defined information structures. @@ -95,97 +95,97 @@ typedef integer_t task_info_data_t[TASK_INFO_MAX]; #define TASK_BASIC2_INFO_32 6 struct task_basic_info_32 { - integer_t suspend_count; /* suspend count for task */ - natural_t virtual_size; /* virtual memory size (bytes) */ - natural_t resident_size; /* resident memory size (bytes) */ - time_value_t user_time; /* total user run time for - terminated threads */ - time_value_t system_time; /* total system run time for - terminated threads */ - policy_t policy; /* default policy for new threads */ + integer_t suspend_count; /* suspend count for task */ + natural_t virtual_size; /* virtual memory size (bytes) */ + natural_t resident_size; /* resident memory size (bytes) */ + time_value_t user_time; /* total user run time for + * terminated threads */ + time_value_t system_time; /* total system run time for + * terminated threads */ + policy_t policy; /* default policy for new threads */ }; typedef struct task_basic_info_32 task_basic_info_32_data_t; typedef struct task_basic_info_32 *task_basic_info_32_t; #define TASK_BASIC_INFO_32_COUNT \ - (sizeof(task_basic_info_32_data_t) / sizeof(natural_t)) + (sizeof(task_basic_info_32_data_t) / sizeof(natural_t)) /* Don't use this, use MACH_TASK_BASIC_INFO instead */ struct task_basic_info_64 { - integer_t suspend_count; /* suspend count for task */ + integer_t suspend_count; /* suspend count for task */ #if defined(__arm__) || defined(__arm64__) -#if defined(KERNEL) +#if defined(KERNEL) /* Compatibility with old 32-bit mach_vm_size_t */ - natural_t virtual_size; /* virtual memory size (bytes) */ - natural_t resident_size; /* resident memory size (bytes) */ -#else - mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ - mach_vm_size_t resident_size; /* resident memory size (bytes) */ -#endif + natural_t virtual_size; /* virtual memory size (bytes) */ + natural_t resident_size; /* resident memory size (bytes) */ +#else + mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ + mach_vm_size_t resident_size; /* resident memory size (bytes) */ +#endif #else /* defined(__arm__) || defined(__arm64__) */ - mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ - mach_vm_size_t resident_size; /* resident memory size (bytes) */ + mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ + mach_vm_size_t resident_size; /* resident memory size (bytes) */ #endif /* defined(__arm__) || defined(__arm64__) */ - time_value_t user_time; /* total user run time for - terminated threads */ - time_value_t system_time; /* total system run time for - terminated threads */ - policy_t policy; /* default policy for new threads */ + time_value_t user_time; /* total user run time for + * terminated threads */ + time_value_t system_time; /* total system run time for + * terminated threads */ + policy_t policy; /* default policy for new threads */ }; typedef struct task_basic_info_64 task_basic_info_64_data_t; typedef struct task_basic_info_64 *task_basic_info_64_t; #if defined(__arm__) || defined(__arm64__) - #if defined(KERNEL) - /* - * Backwards-compatibility for old mach_vm*_t types. - * The kernel knows about old and new, and if you are compiled - * to run on an earlier iOS version, you interact with the old - * (narrow) version. If you are compiled for a newer OS - * version, however, you are mapped to the wide version. - */ - - #define TASK_BASIC_INFO_64 5 + #if defined(KERNEL) +/* + * Backwards-compatibility for old mach_vm*_t types. + * The kernel knows about old and new, and if you are compiled + * to run on an earlier iOS version, you interact with the old + * (narrow) version. If you are compiled for a newer OS + * version, however, you are mapped to the wide version. + */ + + #define TASK_BASIC_INFO_64 5 #define TASK_BASIC_INFO_64_COUNT \ - (sizeof(task_basic_info_64_data_t) / sizeof(natural_t)) + (sizeof(task_basic_info_64_data_t) / sizeof(natural_t)) #elif defined(__arm__) && defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && (__IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_5_0) - /* - * Note: arm64 can't use the old flavor. If you somehow manage to, - * you can cope with the nonsense data yourself. - */ - #define TASK_BASIC_INFO_64 5 +/* + * Note: arm64 can't use the old flavor. If you somehow manage to, + * you can cope with the nonsense data yourself. + */ + #define TASK_BASIC_INFO_64 5 #define TASK_BASIC_INFO_64_COUNT \ - (sizeof(task_basic_info_64_data_t) / sizeof(natural_t)) - - #else - - #define TASK_BASIC_INFO_64 TASK_BASIC_INFO_64_2 - #define TASK_BASIC_INFO_64_COUNT TASK_BASIC_INFO_64_2_COUNT - #endif + (sizeof(task_basic_info_64_data_t) / sizeof(natural_t)) + + #else + + #define TASK_BASIC_INFO_64 TASK_BASIC_INFO_64_2 + #define TASK_BASIC_INFO_64_COUNT TASK_BASIC_INFO_64_2_COUNT + #endif #else /* defined(__arm__) || defined(__arm64__) */ #define TASK_BASIC_INFO_64 5 /* 64-bit capable basic info */ #define TASK_BASIC_INFO_64_COUNT \ - (sizeof(task_basic_info_64_data_t) / sizeof(natural_t)) + (sizeof(task_basic_info_64_data_t) / sizeof(natural_t)) #endif /* localized structure - cannot be safely passed between tasks of differing sizes */ /* Don't use this, use MACH_TASK_BASIC_INFO instead */ struct task_basic_info { - integer_t suspend_count; /* suspend count for task */ - vm_size_t virtual_size; /* virtual memory size (bytes) */ - vm_size_t resident_size; /* resident memory size (bytes) */ - time_value_t user_time; /* total user run time for - terminated threads */ - time_value_t system_time; /* total system run time for - terminated threads */ - policy_t policy; /* default policy for new threads */ + integer_t suspend_count; /* suspend count for task */ + vm_size_t virtual_size; /* virtual memory size (bytes) */ + vm_size_t resident_size; /* resident memory size (bytes) */ + time_value_t user_time; /* total user run time for + * terminated threads */ + time_value_t system_time; /* total system run time for + * terminated threads */ + policy_t policy; /* default policy for new threads */ }; typedef struct task_basic_info task_basic_info_data_t; typedef struct task_basic_info *task_basic_info_t; #define TASK_BASIC_INFO_COUNT \ - (sizeof(task_basic_info_data_t) / sizeof(natural_t)) + (sizeof(task_basic_info_data_t) / sizeof(natural_t)) #if !defined(__LP64__) #define TASK_BASIC_INFO TASK_BASIC_INFO_32 #else @@ -194,101 +194,101 @@ typedef struct task_basic_info *task_basic_info_t; -#define TASK_EVENTS_INFO 2 /* various event counts */ +#define TASK_EVENTS_INFO 2 /* various event counts */ struct task_events_info { - integer_t faults; /* number of page faults */ - integer_t pageins; /* number of actual pageins */ - integer_t cow_faults; /* number of copy-on-write faults */ - integer_t messages_sent; /* number of messages sent */ - integer_t messages_received; /* number of messages received */ - integer_t syscalls_mach; /* number of mach system calls */ - integer_t syscalls_unix; /* number of unix system calls */ - integer_t csw; /* number of context switches */ + integer_t faults; /* number of page faults */ + integer_t pageins; /* number of actual pageins */ + integer_t cow_faults; /* number of copy-on-write faults */ + integer_t messages_sent; /* number of messages sent */ + integer_t messages_received; /* number of messages received */ + integer_t syscalls_mach; /* number of mach system calls */ + integer_t syscalls_unix; /* number of unix system calls */ + integer_t csw; /* number of context switches */ }; -typedef struct task_events_info task_events_info_data_t; -typedef struct task_events_info *task_events_info_t; -#define TASK_EVENTS_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(task_events_info_data_t) / sizeof(natural_t))) +typedef struct task_events_info task_events_info_data_t; +typedef struct task_events_info *task_events_info_t; +#define TASK_EVENTS_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(task_events_info_data_t) / sizeof(natural_t))) -#define TASK_THREAD_TIMES_INFO 3 /* total times for live threads - - only accurate if suspended */ +#define TASK_THREAD_TIMES_INFO 3 /* total times for live threads - + * only accurate if suspended */ struct task_thread_times_info { - time_value_t user_time; /* total user run time for - live threads */ - time_value_t system_time; /* total system run time for - live threads */ + time_value_t user_time; /* total user run time for + * live threads */ + time_value_t system_time; /* total system run time for + * live threads */ }; -typedef struct task_thread_times_info task_thread_times_info_data_t; -typedef struct task_thread_times_info *task_thread_times_info_t; -#define TASK_THREAD_TIMES_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) +typedef struct task_thread_times_info task_thread_times_info_data_t; +typedef struct task_thread_times_info *task_thread_times_info_t; +#define TASK_THREAD_TIMES_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof(task_thread_times_info_data_t) / sizeof(natural_t))) -#define TASK_ABSOLUTETIME_INFO 1 +#define TASK_ABSOLUTETIME_INFO 1 struct task_absolutetime_info { - uint64_t total_user; - uint64_t total_system; - uint64_t threads_user; /* existing threads only */ - uint64_t threads_system; + uint64_t total_user; + uint64_t total_system; + uint64_t threads_user; /* existing threads only */ + uint64_t threads_system; }; -typedef struct task_absolutetime_info task_absolutetime_info_data_t; -typedef struct task_absolutetime_info *task_absolutetime_info_t; -#define TASK_ABSOLUTETIME_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof (task_absolutetime_info_data_t) / sizeof (natural_t))) +typedef struct task_absolutetime_info task_absolutetime_info_data_t; +typedef struct task_absolutetime_info *task_absolutetime_info_t; +#define TASK_ABSOLUTETIME_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof (task_absolutetime_info_data_t) / sizeof (natural_t))) -#define TASK_KERNELMEMORY_INFO 7 +#define TASK_KERNELMEMORY_INFO 7 struct task_kernelmemory_info { - uint64_t total_palloc; /* private kernel mem alloc'ed */ - uint64_t total_pfree; /* private kernel mem freed */ - uint64_t total_salloc; /* shared kernel mem alloc'ed */ - uint64_t total_sfree; /* shared kernel mem freed */ + uint64_t total_palloc; /* private kernel mem alloc'ed */ + uint64_t total_pfree; /* private kernel mem freed */ + uint64_t total_salloc; /* shared kernel mem alloc'ed */ + uint64_t total_sfree; /* shared kernel mem freed */ }; -typedef struct task_kernelmemory_info task_kernelmemory_info_data_t; -typedef struct task_kernelmemory_info *task_kernelmemory_info_t; -#define TASK_KERNELMEMORY_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof (task_kernelmemory_info_data_t) / sizeof (natural_t))) +typedef struct task_kernelmemory_info task_kernelmemory_info_data_t; +typedef struct task_kernelmemory_info *task_kernelmemory_info_t; +#define TASK_KERNELMEMORY_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof (task_kernelmemory_info_data_t) / sizeof (natural_t))) -#define TASK_SECURITY_TOKEN 13 -#define TASK_SECURITY_TOKEN_COUNT ((mach_msg_type_number_t) \ - (sizeof(security_token_t) / sizeof(natural_t))) +#define TASK_SECURITY_TOKEN 13 +#define TASK_SECURITY_TOKEN_COUNT ((mach_msg_type_number_t) \ + (sizeof(security_token_t) / sizeof(natural_t))) -#define TASK_AUDIT_TOKEN 15 -#define TASK_AUDIT_TOKEN_COUNT \ - (sizeof(audit_token_t) / sizeof(natural_t)) +#define TASK_AUDIT_TOKEN 15 +#define TASK_AUDIT_TOKEN_COUNT \ + (sizeof(audit_token_t) / sizeof(natural_t)) -#define TASK_AFFINITY_TAG_INFO 16 /* This is experimental. */ +#define TASK_AFFINITY_TAG_INFO 16 /* This is experimental. */ struct task_affinity_tag_info { - integer_t set_count; - integer_t min; - integer_t max; - integer_t task_count; + integer_t set_count; + integer_t min; + integer_t max; + integer_t task_count; }; -typedef struct task_affinity_tag_info task_affinity_tag_info_data_t; -typedef struct task_affinity_tag_info *task_affinity_tag_info_t; -#define TASK_AFFINITY_TAG_INFO_COUNT \ - (sizeof(task_affinity_tag_info_data_t) / sizeof(natural_t)) +typedef struct task_affinity_tag_info task_affinity_tag_info_data_t; +typedef struct task_affinity_tag_info *task_affinity_tag_info_t; +#define TASK_AFFINITY_TAG_INFO_COUNT \ + (sizeof(task_affinity_tag_info_data_t) / sizeof(natural_t)) -#define TASK_DYLD_INFO 17 +#define TASK_DYLD_INFO 17 struct task_dyld_info { - mach_vm_address_t all_image_info_addr; - mach_vm_size_t all_image_info_size; - integer_t all_image_info_format; + mach_vm_address_t all_image_info_addr; + mach_vm_size_t all_image_info_size; + integer_t all_image_info_format; }; -typedef struct task_dyld_info task_dyld_info_data_t; -typedef struct task_dyld_info *task_dyld_info_t; -#define TASK_DYLD_INFO_COUNT \ - (sizeof(task_dyld_info_data_t) / sizeof(natural_t)) -#define TASK_DYLD_ALL_IMAGE_INFO_32 0 /* format value */ -#define TASK_DYLD_ALL_IMAGE_INFO_64 1 /* format value */ +typedef struct task_dyld_info task_dyld_info_data_t; +typedef struct task_dyld_info *task_dyld_info_t; +#define TASK_DYLD_INFO_COUNT \ + (sizeof(task_dyld_info_data_t) / sizeof(natural_t)) +#define TASK_DYLD_ALL_IMAGE_INFO_32 0 /* format value */ +#define TASK_DYLD_ALL_IMAGE_INFO_64 1 /* format value */ #if defined(__arm__) || defined(__arm64__) @@ -297,161 +297,161 @@ typedef struct task_dyld_info *task_dyld_info_t; #define TASK_BASIC_INFO_64_2 18 /* 64-bit capable basic info */ struct task_basic_info_64_2 { - integer_t suspend_count; /* suspend count for task */ - mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ - mach_vm_size_t resident_size; /* resident memory size (bytes) */ - time_value_t user_time; /* total user run time for - terminated threads */ - time_value_t system_time; /* total system run time for - terminated threads */ - policy_t policy; /* default policy for new threads */ + integer_t suspend_count; /* suspend count for task */ + mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ + mach_vm_size_t resident_size; /* resident memory size (bytes) */ + time_value_t user_time; /* total user run time for + * terminated threads */ + time_value_t system_time; /* total system run time for + * terminated threads */ + policy_t policy; /* default policy for new threads */ }; typedef struct task_basic_info_64_2 task_basic_info_64_2_data_t; typedef struct task_basic_info_64_2 *task_basic_info_64_2_t; #define TASK_BASIC_INFO_64_2_COUNT \ - (sizeof(task_basic_info_64_2_data_t) / sizeof(natural_t)) + (sizeof(task_basic_info_64_2_data_t) / sizeof(natural_t)) #endif -#define TASK_EXTMOD_INFO 19 +#define TASK_EXTMOD_INFO 19 struct task_extmod_info { - unsigned char task_uuid[16]; - vm_extmod_statistics_data_t extmod_statistics; + unsigned char task_uuid[16]; + vm_extmod_statistics_data_t extmod_statistics; }; -typedef struct task_extmod_info task_extmod_info_data_t; -typedef struct task_extmod_info *task_extmod_info_t; -#define TASK_EXTMOD_INFO_COUNT \ - (sizeof(task_extmod_info_data_t) / sizeof(natural_t)) +typedef struct task_extmod_info task_extmod_info_data_t; +typedef struct task_extmod_info *task_extmod_info_t; +#define TASK_EXTMOD_INFO_COUNT \ + (sizeof(task_extmod_info_data_t) / sizeof(natural_t)) #define MACH_TASK_BASIC_INFO 20 /* always 64-bit basic info */ struct mach_task_basic_info { - mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ - mach_vm_size_t resident_size; /* resident memory size (bytes) */ - mach_vm_size_t resident_size_max; /* maximum resident memory size (bytes) */ - time_value_t user_time; /* total user run time for - terminated threads */ - time_value_t system_time; /* total system run time for - terminated threads */ - policy_t policy; /* default policy for new threads */ - integer_t suspend_count; /* suspend count for task */ + mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ + mach_vm_size_t resident_size; /* resident memory size (bytes) */ + mach_vm_size_t resident_size_max; /* maximum resident memory size (bytes) */ + time_value_t user_time; /* total user run time for + * terminated threads */ + time_value_t system_time; /* total system run time for + * terminated threads */ + policy_t policy; /* default policy for new threads */ + integer_t suspend_count; /* suspend count for task */ }; typedef struct mach_task_basic_info mach_task_basic_info_data_t; typedef struct mach_task_basic_info *mach_task_basic_info_t; #define MACH_TASK_BASIC_INFO_COUNT \ - (sizeof(mach_task_basic_info_data_t) / sizeof(natural_t)) + (sizeof(mach_task_basic_info_data_t) / sizeof(natural_t)) -#define TASK_POWER_INFO 21 +#define TASK_POWER_INFO 21 struct task_power_info { - uint64_t total_user; - uint64_t total_system; - uint64_t task_interrupt_wakeups; - uint64_t task_platform_idle_wakeups; - uint64_t task_timer_wakeups_bin_1; - uint64_t task_timer_wakeups_bin_2; + uint64_t total_user; + uint64_t total_system; + uint64_t task_interrupt_wakeups; + uint64_t task_platform_idle_wakeups; + uint64_t task_timer_wakeups_bin_1; + uint64_t task_timer_wakeups_bin_2; }; -typedef struct task_power_info task_power_info_data_t; -typedef struct task_power_info *task_power_info_t; -#define TASK_POWER_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof (task_power_info_data_t) / sizeof (natural_t))) +typedef struct task_power_info task_power_info_data_t; +typedef struct task_power_info *task_power_info_t; +#define TASK_POWER_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof (task_power_info_data_t) / sizeof (natural_t))) -#define TASK_VM_INFO 22 -#define TASK_VM_INFO_PURGEABLE 23 +#define TASK_VM_INFO 22 +#define TASK_VM_INFO_PURGEABLE 23 struct task_vm_info { - mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ - integer_t region_count; /* number of memory regions */ - integer_t page_size; - mach_vm_size_t resident_size; /* resident memory size (bytes) */ - mach_vm_size_t resident_size_peak; /* peak resident size (bytes) */ - - mach_vm_size_t device; - mach_vm_size_t device_peak; - mach_vm_size_t internal; - mach_vm_size_t internal_peak; - mach_vm_size_t external; - mach_vm_size_t external_peak; - mach_vm_size_t reusable; - mach_vm_size_t reusable_peak; - mach_vm_size_t purgeable_volatile_pmap; - mach_vm_size_t purgeable_volatile_resident; - mach_vm_size_t purgeable_volatile_virtual; - mach_vm_size_t compressed; - mach_vm_size_t compressed_peak; - mach_vm_size_t compressed_lifetime; + mach_vm_size_t virtual_size; /* virtual memory size (bytes) */ + integer_t region_count; /* number of memory regions */ + integer_t page_size; + mach_vm_size_t resident_size; /* resident memory size (bytes) */ + mach_vm_size_t resident_size_peak; /* peak resident size (bytes) */ + + mach_vm_size_t device; + mach_vm_size_t device_peak; + mach_vm_size_t internal; + mach_vm_size_t internal_peak; + mach_vm_size_t external; + mach_vm_size_t external_peak; + mach_vm_size_t reusable; + mach_vm_size_t reusable_peak; + mach_vm_size_t purgeable_volatile_pmap; + mach_vm_size_t purgeable_volatile_resident; + mach_vm_size_t purgeable_volatile_virtual; + mach_vm_size_t compressed; + mach_vm_size_t compressed_peak; + mach_vm_size_t compressed_lifetime; /* added for rev1 */ - mach_vm_size_t phys_footprint; + mach_vm_size_t phys_footprint; /* added for rev2 */ - mach_vm_address_t min_address; - mach_vm_address_t max_address; + mach_vm_address_t min_address; + mach_vm_address_t max_address; }; -typedef struct task_vm_info task_vm_info_data_t; -typedef struct task_vm_info *task_vm_info_t; -#define TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof (task_vm_info_data_t) / sizeof (natural_t))) +typedef struct task_vm_info task_vm_info_data_t; +typedef struct task_vm_info *task_vm_info_t; +#define TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \ + (sizeof (task_vm_info_data_t) / sizeof (natural_t))) #define TASK_VM_INFO_REV2_COUNT TASK_VM_INFO_COUNT #define TASK_VM_INFO_REV1_COUNT /* doesn't include min and max address */ \ ((mach_msg_type_number_t) (TASK_VM_INFO_REV2_COUNT - 4)) #define TASK_VM_INFO_REV0_COUNT /* doesn't include phys_footprint */ \ ((mach_msg_type_number_t) (TASK_VM_INFO_REV1_COUNT - 2)) -typedef struct vm_purgeable_info task_purgable_info_t; +typedef struct vm_purgeable_info task_purgable_info_t; #define TASK_TRACE_MEMORY_INFO 24 struct task_trace_memory_info { - uint64_t user_memory_address; /* address of start of trace memory buffer */ - uint64_t buffer_size; /* size of buffer in bytes */ - uint64_t mailbox_array_size; /* size of mailbox area in bytes */ + uint64_t user_memory_address; /* address of start of trace memory buffer */ + uint64_t buffer_size; /* size of buffer in bytes */ + uint64_t mailbox_array_size; /* size of mailbox area in bytes */ }; typedef struct task_trace_memory_info task_trace_memory_info_data_t; typedef struct task_trace_memory_info * task_trace_memory_info_t; #define TASK_TRACE_MEMORY_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(task_trace_memory_info_data_t) / sizeof(natural_t))) + (sizeof(task_trace_memory_info_data_t) / sizeof(natural_t))) #define TASK_WAIT_STATE_INFO 25 /* deprecated. */ struct task_wait_state_info { - uint64_t total_wait_state_time; /* Time that all threads past and present have been in a wait state */ - uint64_t total_wait_sfi_state_time; /* Time that threads have been in SFI wait (should be a subset of total wait state time */ + uint64_t total_wait_state_time; /* Time that all threads past and present have been in a wait state */ + uint64_t total_wait_sfi_state_time; /* Time that threads have been in SFI wait (should be a subset of total wait state time */ uint32_t _reserved[4]; }; typedef struct task_wait_state_info task_wait_state_info_data_t; typedef struct task_wait_state_info * task_wait_state_info_t; #define TASK_WAIT_STATE_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(task_wait_state_info_data_t) / sizeof(natural_t))) + (sizeof(task_wait_state_info_data_t) / sizeof(natural_t))) -#define TASK_POWER_INFO_V2 26 +#define TASK_POWER_INFO_V2 26 typedef struct { - uint64_t task_gpu_utilisation; - uint64_t task_gpu_stat_reserved0; - uint64_t task_gpu_stat_reserved1; - uint64_t task_gpu_stat_reserved2; + uint64_t task_gpu_utilisation; + uint64_t task_gpu_stat_reserved0; + uint64_t task_gpu_stat_reserved1; + uint64_t task_gpu_stat_reserved2; } gpu_energy_data; typedef gpu_energy_data *gpu_energy_data_t; struct task_power_info_v2 { - task_power_info_data_t cpu_energy; + task_power_info_data_t cpu_energy; gpu_energy_data gpu_energy; #if defined(__arm__) || defined(__arm64__) - uint64_t task_energy; + uint64_t task_energy; #endif - uint64_t task_ptime; - uint64_t task_pset_switches; + uint64_t task_ptime; + uint64_t task_pset_switches; }; -typedef struct task_power_info_v2 task_power_info_v2_data_t; -typedef struct task_power_info_v2 *task_power_info_v2_t; -#define TASK_POWER_INFO_V2_COUNT_OLD \ - ((mach_msg_type_number_t) (sizeof (task_power_info_v2_data_t) - sizeof(uint64_t)*2) / sizeof (natural_t)) -#define TASK_POWER_INFO_V2_COUNT \ - ((mach_msg_type_number_t) (sizeof (task_power_info_v2_data_t) / sizeof (natural_t))) +typedef struct task_power_info_v2 task_power_info_v2_data_t; +typedef struct task_power_info_v2 *task_power_info_v2_t; +#define TASK_POWER_INFO_V2_COUNT_OLD \ + ((mach_msg_type_number_t) (sizeof (task_power_info_v2_data_t) - sizeof(uint64_t)*2) / sizeof (natural_t)) +#define TASK_POWER_INFO_V2_COUNT \ + ((mach_msg_type_number_t) (sizeof (task_power_info_v2_data_t) / sizeof (natural_t))) #define TASK_VM_INFO_PURGEABLE_ACCOUNT 27 /* Used for xnu purgeable vm unit tests */ @@ -467,32 +467,32 @@ typedef struct pvm_account_info *pvm_account_info_t; typedef struct pvm_account_info pvm_account_info_data_t; #define PVM_ACCOUNT_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof (pvm_account_info_data_t) / sizeof (natural_t))) + (sizeof (pvm_account_info_data_t) / sizeof (natural_t))) #endif /* PRIVATE */ -#define TASK_FLAGS_INFO 28 /* return t_flags field */ +#define TASK_FLAGS_INFO 28 /* return t_flags field */ struct task_flags_info { - uint32_t flags; /* task flags */ + uint32_t flags; /* task flags */ }; typedef struct task_flags_info task_flags_info_data_t; typedef struct task_flags_info * task_flags_info_t; #define TASK_FLAGS_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(task_flags_info_data_t) / sizeof (natural_t))) + (sizeof(task_flags_info_data_t) / sizeof (natural_t))) -#define TF_LP64 0x00000001 /* task has 64-bit addressing */ -#define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */ +#define TF_LP64 0x00000001 /* task has 64-bit addressing */ +#define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */ #define TASK_DEBUG_INFO_INTERNAL 29 /* Used for kernel internal development tests. */ #ifdef PRIVATE struct task_debug_info_internal { - integer_t suspend_count; + integer_t suspend_count; uint64_t ipc_space_size; }; typedef struct task_debug_info_internal *task_debug_info_internal_t; typedef struct task_debug_info_internal task_debug_info_internal_data_t; #define TASK_DEBUG_INFO_INTERNAL_COUNT ((mach_msg_type_number_t) \ - (sizeof (task_debug_info_internal_data_t) / sizeof(natural_t))) + (sizeof (task_debug_info_internal_data_t) / sizeof(natural_t))) #endif /* PRIVATE */ @@ -500,12 +500,12 @@ typedef struct task_debug_info_internal task_debug_info_internal_data_t; * Obsolete interfaces. */ -#define TASK_SCHED_TIMESHARE_INFO 10 -#define TASK_SCHED_RR_INFO 11 -#define TASK_SCHED_FIFO_INFO 12 +#define TASK_SCHED_TIMESHARE_INFO 10 +#define TASK_SCHED_RR_INFO 11 +#define TASK_SCHED_FIFO_INFO 12 -#define TASK_SCHED_INFO 14 +#define TASK_SCHED_INFO 14 #pragma pack() -#endif /* _MACH_TASK_INFO_H_ */ +#endif /* _MACH_TASK_INFO_H_ */ diff --git a/osfmk/mach/task_ledger.h b/osfmk/mach/task_ledger.h index 5d4bc2585..10dd3e611 100644 --- a/osfmk/mach/task_ledger.h +++ b/osfmk/mach/task_ledger.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -39,13 +39,13 @@ /* * Definitions for task ledger line items */ -#define ITEM_THREADS 0 /* number of threads */ -#define ITEM_TASKS 1 /* number of tasks */ +#define ITEM_THREADS 0 /* number of threads */ +#define ITEM_TASKS 1 /* number of tasks */ -#define ITEM_VM 2 /* virtual space (bytes)*/ +#define ITEM_VM 2 /* virtual space (bytes)*/ -#define LEDGER_N_ITEMS 3 /* Total line items */ +#define LEDGER_N_ITEMS 3 /* Total line items */ -#define LEDGER_UNLIMITED 0 /* ignored item.maximum */ +#define LEDGER_UNLIMITED 0 /* ignored item.maximum */ #endif /* _MACH_TASK_LEDGER_H_ */ diff --git a/osfmk/mach/task_policy.h b/osfmk/mach/task_policy.h index 1c58d6067..f1b7cc0c5 100644 --- a/osfmk/mach/task_policy.h +++ b/osfmk/mach/task_policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,23 +48,23 @@ * and the default ones are being returned instead. */ -typedef natural_t task_policy_flavor_t; -typedef integer_t *task_policy_t; +typedef natural_t task_policy_flavor_t; +typedef integer_t *task_policy_t; /* -kern_return_t task_policy_set( - task_t task, - task_policy_flavor_t flavor, - task_policy_t policy_info, - mach_msg_type_number_t count); - -kern_return_t task_policy_get( - task_t task, - task_policy_flavor_t flavor, - task_policy_t policy_info, - mach_msg_type_number_t *count, - boolean_t *get_default); -*/ + * kern_return_t task_policy_set( + * task_t task, + * task_policy_flavor_t flavor, + * task_policy_t policy_info, + * mach_msg_type_number_t count); + * + * kern_return_t task_policy_get( + * task_t task, + * task_policy_flavor_t flavor, + * task_policy_t policy_info, + * mach_msg_type_number_t *count, + * boolean_t *get_default); + */ /* * Defined flavors. @@ -101,14 +101,14 @@ kern_return_t task_policy_get( * a time with this designation, which is assigned FCFS. */ -#define TASK_CATEGORY_POLICY 1 +#define TASK_CATEGORY_POLICY 1 -#define TASK_SUPPRESSION_POLICY 3 -#define TASK_POLICY_STATE 4 -#define TASK_BASE_QOS_POLICY 8 -#define TASK_OVERRIDE_QOS_POLICY 9 -#define TASK_BASE_LATENCY_QOS_POLICY 10 -#define TASK_BASE_THROUGHPUT_QOS_POLICY 11 +#define TASK_SUPPRESSION_POLICY 3 +#define TASK_POLICY_STATE 4 +#define TASK_BASE_QOS_POLICY 8 +#define TASK_OVERRIDE_QOS_POLICY 9 +#define TASK_BASE_LATENCY_QOS_POLICY 10 +#define TASK_BASE_THROUGHPUT_QOS_POLICY 11 enum task_role { @@ -124,44 +124,43 @@ enum task_role { TASK_DARWINBG_APPLICATION = 8, }; -typedef integer_t task_role_t; +typedef integer_t task_role_t; struct task_category_policy { - task_role_t role; + task_role_t role; }; -typedef struct task_category_policy task_category_policy_data_t; -typedef struct task_category_policy *task_category_policy_t; +typedef struct task_category_policy task_category_policy_data_t; +typedef struct task_category_policy *task_category_policy_t; -#define TASK_CATEGORY_POLICY_COUNT ((mach_msg_type_number_t) \ +#define TASK_CATEGORY_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (task_category_policy_data_t) / sizeof (integer_t))) enum task_latency_qos { LATENCY_QOS_TIER_UNSPECIFIED = 0x0, - LATENCY_QOS_TIER_0 = ((0xFF<<16) | 1), - LATENCY_QOS_TIER_1 = ((0xFF<<16) | 2), - LATENCY_QOS_TIER_2 = ((0xFF<<16) | 3), - LATENCY_QOS_TIER_3 = ((0xFF<<16) | 4), - LATENCY_QOS_TIER_4 = ((0xFF<<16) | 5), - LATENCY_QOS_TIER_5 = ((0xFF<<16) | 6) - + LATENCY_QOS_TIER_0 = ((0xFF << 16) | 1), + LATENCY_QOS_TIER_1 = ((0xFF << 16) | 2), + LATENCY_QOS_TIER_2 = ((0xFF << 16) | 3), + LATENCY_QOS_TIER_3 = ((0xFF << 16) | 4), + LATENCY_QOS_TIER_4 = ((0xFF << 16) | 5), + LATENCY_QOS_TIER_5 = ((0xFF << 16) | 6) }; -typedef integer_t task_latency_qos_t; +typedef integer_t task_latency_qos_t; enum task_throughput_qos { THROUGHPUT_QOS_TIER_UNSPECIFIED = 0x0, - THROUGHPUT_QOS_TIER_0 = ((0xFE<<16) | 1), - THROUGHPUT_QOS_TIER_1 = ((0xFE<<16) | 2), - THROUGHPUT_QOS_TIER_2 = ((0xFE<<16) | 3), - THROUGHPUT_QOS_TIER_3 = ((0xFE<<16) | 4), - THROUGHPUT_QOS_TIER_4 = ((0xFE<<16) | 5), - THROUGHPUT_QOS_TIER_5 = ((0xFE<<16) | 6), + THROUGHPUT_QOS_TIER_0 = ((0xFE << 16) | 1), + THROUGHPUT_QOS_TIER_1 = ((0xFE << 16) | 2), + THROUGHPUT_QOS_TIER_2 = ((0xFE << 16) | 3), + THROUGHPUT_QOS_TIER_3 = ((0xFE << 16) | 4), + THROUGHPUT_QOS_TIER_4 = ((0xFE << 16) | 5), + THROUGHPUT_QOS_TIER_5 = ((0xFE << 16) | 6), }; #define LATENCY_QOS_LAUNCH_DEFAULT_TIER LATENCY_QOS_TIER_3 #define THROUGHPUT_QOS_LAUNCH_DEFAULT_TIER THROUGHPUT_QOS_TIER_3 -typedef integer_t task_throughput_qos_t; +typedef integer_t task_throughput_qos_t; struct task_qos_policy { task_latency_qos_t task_latency_qos_tier; @@ -169,12 +168,12 @@ struct task_qos_policy { }; typedef struct task_qos_policy *task_qos_policy_t; -#define TASK_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ +#define TASK_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (struct task_qos_policy) / sizeof (integer_t))) /* These should be removed - they belong in proc_info.h */ #define PROC_FLAG_DARWINBG 0x8000 /* process in darwin background */ -#define PROC_FLAG_EXT_DARWINBG 0x10000 /* process in darwin background - external enforcement */ +#define PROC_FLAG_EXT_DARWINBG 0x10000 /* process in darwin background - external enforcement */ #define PROC_FLAG_IOS_APPLEDAEMON 0x20000 /* process is apple ios daemon */ #define PROC_FLAG_IOS_IMPPROMOTION 0x80000 /* process is apple ios daemon */ #define PROC_FLAG_ADAPTIVE 0x100000 /* Process is adaptive */ @@ -198,61 +197,61 @@ typedef struct task_qos_policy *task_qos_policy_t; struct task_requested_policy { uint64_t trp_int_darwinbg :1, /* marked as darwinbg via setpriority */ - trp_ext_darwinbg :1, - trp_int_iotier :2, /* IO throttle tier */ - trp_ext_iotier :2, - trp_int_iopassive :1, /* should IOs cause lower tiers to be throttled */ - trp_ext_iopassive :1, - trp_bg_iotier :2, /* what IO throttle tier should apply to me when I'm darwinbg? (pushed to threads) */ - trp_terminated :1, /* all throttles should be removed for quick exit or SIGTERM handling */ - trp_base_latency_qos :3, /* Timer latency QoS */ - trp_base_through_qos :3, /* Computation throughput QoS */ - - trp_apptype :3, /* What apptype did launchd tell us this was (inherited) */ - trp_boosted :1, /* Has a non-zero importance assertion count */ - trp_role :4, /* task's system role */ - trp_tal_enabled :1, /* TAL mode is enabled */ - trp_over_latency_qos :3, /* Timer latency QoS override */ - trp_over_through_qos :3, /* Computation throughput QoS override */ - trp_sfi_managed :1, /* SFI Managed task */ - trp_qos_clamp :3, /* task qos clamp */ + trp_ext_darwinbg :1, + trp_int_iotier :2, /* IO throttle tier */ + trp_ext_iotier :2, + trp_int_iopassive :1, /* should IOs cause lower tiers to be throttled */ + trp_ext_iopassive :1, + trp_bg_iotier :2, /* what IO throttle tier should apply to me when I'm darwinbg? (pushed to threads) */ + trp_terminated :1, /* all throttles should be removed for quick exit or SIGTERM handling */ + trp_base_latency_qos :3, /* Timer latency QoS */ + trp_base_through_qos :3, /* Computation throughput QoS */ + + trp_apptype :3, /* What apptype did launchd tell us this was (inherited) */ + trp_boosted :1, /* Has a non-zero importance assertion count */ + trp_role :4, /* task's system role */ + trp_tal_enabled :1, /* TAL mode is enabled */ + trp_over_latency_qos :3, /* Timer latency QoS override */ + trp_over_through_qos :3, /* Computation throughput QoS override */ + trp_sfi_managed :1, /* SFI Managed task */ + trp_qos_clamp :3, /* task qos clamp */ /* suppression policies (non-embedded only) */ - trp_sup_active :1, /* Suppression is on */ - trp_sup_lowpri_cpu :1, /* Wants low priority CPU (MAXPRI_THROTTLE) */ - trp_sup_timer :3, /* Wanted timer throttling QoS tier */ - trp_sup_disk :1, /* Wants disk throttling */ - trp_sup_throughput :3, /* Wants throughput QoS tier */ - trp_sup_cpu :1, /* Wants suppressed CPU priority (MAXPRI_SUPPRESSED) */ - trp_sup_bg_sockets :1, /* Wants background sockets */ - - trp_reserved :17; + trp_sup_active :1, /* Suppression is on */ + trp_sup_lowpri_cpu :1, /* Wants low priority CPU (MAXPRI_THROTTLE) */ + trp_sup_timer :3, /* Wanted timer throttling QoS tier */ + trp_sup_disk :1, /* Wants disk throttling */ + trp_sup_throughput :3, /* Wants throughput QoS tier */ + trp_sup_cpu :1, /* Wants suppressed CPU priority (MAXPRI_SUPPRESSED) */ + trp_sup_bg_sockets :1, /* Wants background sockets */ + + trp_reserved :17; }; struct task_effective_policy { uint64_t tep_darwinbg :1, /* marked as 'background', and sockets are marked bg when created */ - tep_lowpri_cpu :1, /* cpu priority == MAXPRI_THROTTLE */ - tep_io_tier :2, /* effective throttle tier */ - tep_io_passive :1, /* should IOs cause lower tiers to be throttled */ - tep_all_sockets_bg :1, /* All existing sockets in process are marked as bg (thread: all created by thread) */ - tep_new_sockets_bg :1, /* Newly created sockets should be marked as bg */ - tep_bg_iotier :2, /* What throttle tier should I be in when darwinbg is set? */ - tep_terminated :1, /* all throttles have been removed for quick exit or SIGTERM handling */ - tep_qos_ui_is_urgent :1, /* bump UI-Interactive QoS up to the urgent preemption band */ - tep_latency_qos :3, /* Timer latency QoS level */ - tep_through_qos :3, /* Computation throughput QoS level */ - - tep_tal_engaged :1, /* TAL mode is in effect */ - tep_watchers_bg :1, /* watchers are BG-ed */ - tep_sup_active :1, /* suppression behaviors are in effect */ - tep_role :4, /* task's system role */ - tep_suppressed_cpu :1, /* cpu priority == MAXPRI_SUPPRESSED (trumped by lowpri_cpu) */ - tep_sfi_managed :1, /* SFI Managed task */ - tep_live_donor :1, /* task is a live importance boost donor */ - tep_qos_clamp :3, /* task qos clamp (applies to qos-disabled threads too) */ - tep_qos_ceiling :3, /* task qos ceiling (applies to only qos-participating threads) */ - - tep_reserved :31; + tep_lowpri_cpu :1, /* cpu priority == MAXPRI_THROTTLE */ + tep_io_tier :2, /* effective throttle tier */ + tep_io_passive :1, /* should IOs cause lower tiers to be throttled */ + tep_all_sockets_bg :1, /* All existing sockets in process are marked as bg (thread: all created by thread) */ + tep_new_sockets_bg :1, /* Newly created sockets should be marked as bg */ + tep_bg_iotier :2, /* What throttle tier should I be in when darwinbg is set? */ + tep_terminated :1, /* all throttles have been removed for quick exit or SIGTERM handling */ + tep_qos_ui_is_urgent :1, /* bump UI-Interactive QoS up to the urgent preemption band */ + tep_latency_qos :3, /* Timer latency QoS level */ + tep_through_qos :3, /* Computation throughput QoS level */ + + tep_tal_engaged :1, /* TAL mode is in effect */ + tep_watchers_bg :1, /* watchers are BG-ed */ + tep_sup_active :1, /* suppression behaviors are in effect */ + tep_role :4, /* task's system role */ + tep_suppressed_cpu :1, /* cpu priority == MAXPRI_SUPPRESSED (trumped by lowpri_cpu) */ + tep_sfi_managed :1, /* SFI Managed task */ + tep_live_donor :1, /* task is a live importance boost donor */ + tep_qos_clamp :3, /* task qos clamp (applies to qos-disabled threads too) */ + tep_qos_ceiling :3, /* task qos ceiling (applies to only qos-participating threads) */ + + tep_reserved :31; }; #endif /* PRIVATE */ @@ -263,18 +262,18 @@ extern const struct task_requested_policy default_task_requested_policy; extern const struct task_effective_policy default_task_effective_policy; extern kern_return_t -qos_latency_policy_validate(task_latency_qos_t); + qos_latency_policy_validate(task_latency_qos_t); extern kern_return_t -qos_throughput_policy_validate(task_throughput_qos_t); + qos_throughput_policy_validate(task_throughput_qos_t); extern uint32_t -qos_extract(uint32_t); + qos_extract(uint32_t); extern uint32_t -qos_latency_policy_package(uint32_t); + qos_latency_policy_package(uint32_t); extern uint32_t -qos_throughput_policy_package(uint32_t); + qos_throughput_policy_package(uint32_t); #endif /* MACH_KERNEL_PRIVATE */ @@ -298,7 +297,7 @@ struct task_suppression_policy { typedef struct task_suppression_policy *task_suppression_policy_t; -#define TASK_SUPPRESSION_POLICY_COUNT ((mach_msg_type_number_t) \ +#define TASK_SUPPRESSION_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (struct task_suppression_policy) / sizeof (integer_t))) struct task_policy_state { @@ -315,7 +314,7 @@ struct task_policy_state { typedef struct task_policy_state *task_policy_state_t; -#define TASK_POLICY_STATE_COUNT ((mach_msg_type_number_t) \ +#define TASK_POLICY_STATE_COUNT ((mach_msg_type_number_t) \ (sizeof (struct task_policy_state) / sizeof (integer_t))) @@ -342,8 +341,8 @@ typedef struct task_policy_state *task_policy_state_t; /* task policy state flags */ #define TASK_IMP_RECEIVER 0x00000001 #define TASK_IMP_DONOR 0x00000002 -#define TASK_IMP_LIVE_DONOR 0x00000004 -#define TASK_DENAP_RECEIVER 0x00000008 +#define TASK_IMP_LIVE_DONOR 0x00000004 +#define TASK_DENAP_RECEIVER 0x00000008 /* requested_policy */ #define POLICY_REQ_INT_DARWIN_BG 0x00000001 @@ -442,4 +441,4 @@ typedef struct task_policy_state *task_policy_state_t; #endif /* PRIVATE */ -#endif /* _MACH_TASK_POLICY_H_ */ +#endif /* _MACH_TASK_POLICY_H_ */ diff --git a/osfmk/mach/task_special_ports.h b/osfmk/mach/task_special_ports.h index 9080a451e..779071686 100644 --- a/osfmk/mach/task_special_ports.h +++ b/osfmk/mach/task_special_ports.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,36 +61,36 @@ * Defines codes for special_purpose task ports. These are NOT * port identifiers - they are only used for the task_get_special_port * and task_set_special_port routines. - * + * */ -#ifndef _MACH_TASK_SPECIAL_PORTS_H_ +#ifndef _MACH_TASK_SPECIAL_PORTS_H_ #define _MACH_TASK_SPECIAL_PORTS_H_ -typedef int task_special_port_t; +typedef int task_special_port_t; -#define TASK_KERNEL_PORT 1 /* Represents task to the outside - world.*/ +#define TASK_KERNEL_PORT 1 /* Represents task to the outside + * world.*/ -#define TASK_HOST_PORT 2 /* The host (priv) port for task. */ +#define TASK_HOST_PORT 2 /* The host (priv) port for task. */ -#define TASK_NAME_PORT 3 /* the name (unpriv) port for task */ +#define TASK_NAME_PORT 3 /* the name (unpriv) port for task */ -#define TASK_BOOTSTRAP_PORT 4 /* Bootstrap environment for task. */ +#define TASK_BOOTSTRAP_PORT 4 /* Bootstrap environment for task. */ /* * Evolving and likely to change. */ -#define TASK_SEATBELT_PORT 7 /* Seatbelt compiler/DEM port for task. */ +#define TASK_SEATBELT_PORT 7 /* Seatbelt compiler/DEM port for task. */ /* PORT 8 was the GSSD TASK PORT which transformed to a host port */ -#define TASK_ACCESS_PORT 9 /* Permission check for task_for_pid. */ +#define TASK_ACCESS_PORT 9 /* Permission check for task_for_pid. */ -#define TASK_DEBUG_CONTROL_PORT 10 /* debug control port */ +#define TASK_DEBUG_CONTROL_PORT 10 /* debug control port */ -#define TASK_RESOURCE_NOTIFY_PORT 11 /* overrides host special RN port */ +#define TASK_RESOURCE_NOTIFY_PORT 11 /* overrides host special RN port */ #define TASK_MAX_SPECIAL_PORT TASK_RESOURCE_NOTIFY_PORT @@ -98,34 +98,34 @@ typedef int task_special_port_t; * Definitions for ease of use */ -#define task_get_kernel_port(task, port) \ - (task_get_special_port((task), TASK_KERNEL_PORT, (port))) +#define task_get_kernel_port(task, port) \ + (task_get_special_port((task), TASK_KERNEL_PORT, (port))) -#define task_set_kernel_port(task, port) \ - (task_set_special_port((task), TASK_KERNEL_PORT, (port))) +#define task_set_kernel_port(task, port) \ + (task_set_special_port((task), TASK_KERNEL_PORT, (port))) -#define task_get_host_port(task, port) \ - (task_get_special_port((task), TASK_HOST_PORT, (port))) +#define task_get_host_port(task, port) \ + (task_get_special_port((task), TASK_HOST_PORT, (port))) -#define task_set_host_port(task, port) \ - (task_set_special_port((task), TASK_HOST_PORT, (port))) +#define task_set_host_port(task, port) \ + (task_set_special_port((task), TASK_HOST_PORT, (port))) -#define task_get_bootstrap_port(task, port) \ - (task_get_special_port((task), TASK_BOOTSTRAP_PORT, (port))) +#define task_get_bootstrap_port(task, port) \ + (task_get_special_port((task), TASK_BOOTSTRAP_PORT, (port))) #define task_get_debug_control_port(task, port) \ - (task_get_special_port((task), TASK_DEBUG_CONTROL_PORT, (port))) + (task_get_special_port((task), TASK_DEBUG_CONTROL_PORT, (port))) -#define task_set_bootstrap_port(task, port) \ - (task_set_special_port((task), TASK_BOOTSTRAP_PORT, (port))) +#define task_set_bootstrap_port(task, port) \ + (task_set_special_port((task), TASK_BOOTSTRAP_PORT, (port))) -#define task_get_task_access_port(task, port) \ - (task_get_special_port((task), TASK_ACCESS_PORT, (port))) +#define task_get_task_access_port(task, port) \ + (task_get_special_port((task), TASK_ACCESS_PORT, (port))) -#define task_set_task_access_port(task, port) \ - (task_set_special_port((task), TASK_ACCESS_PORT, (port))) +#define task_set_task_access_port(task, port) \ + (task_set_special_port((task), TASK_ACCESS_PORT, (port))) #define task_set_task_debug_control_port(task, port) \ - (task_set_special_port((task), TASK_DEBUG_CONTROL_PORT, (port))) + (task_set_special_port((task), TASK_DEBUG_CONTROL_PORT, (port))) -#endif /* _MACH_TASK_SPECIAL_PORTS_H_ */ +#endif /* _MACH_TASK_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/thread_info.h b/osfmk/mach/thread_info.h index e39523ffc..dfe51dd7d 100644 --- a/osfmk/mach/thread_info.h +++ b/osfmk/mach/thread_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005, 2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -66,7 +66,7 @@ * */ -#ifndef _MACH_THREAD_INFO_H_ +#ifndef _MACH_THREAD_INFO_H_ #define _MACH_THREAD_INFO_H_ #include @@ -78,127 +78,127 @@ /* * Generic information structure to allow for expansion. */ -typedef natural_t thread_flavor_t; -typedef integer_t *thread_info_t; /* varying array of int */ +typedef natural_t thread_flavor_t; +typedef integer_t *thread_info_t; /* varying array of int */ -#define THREAD_INFO_MAX (32) /* maximum array size */ -typedef integer_t thread_info_data_t[THREAD_INFO_MAX]; +#define THREAD_INFO_MAX (32) /* maximum array size */ +typedef integer_t thread_info_data_t[THREAD_INFO_MAX]; /* * Currently defined information. */ -#define THREAD_BASIC_INFO 3 /* basic information */ +#define THREAD_BASIC_INFO 3 /* basic information */ struct thread_basic_info { - time_value_t user_time; /* user run time */ - time_value_t system_time; /* system run time */ - integer_t cpu_usage; /* scaled cpu usage percentage */ - policy_t policy; /* scheduling policy in effect */ - integer_t run_state; /* run state (see below) */ - integer_t flags; /* various flags (see below) */ - integer_t suspend_count; /* suspend count for thread */ - integer_t sleep_time; /* number of seconds that thread - has been sleeping */ + time_value_t user_time; /* user run time */ + time_value_t system_time; /* system run time */ + integer_t cpu_usage; /* scaled cpu usage percentage */ + policy_t policy; /* scheduling policy in effect */ + integer_t run_state; /* run state (see below) */ + integer_t flags; /* various flags (see below) */ + integer_t suspend_count; /* suspend count for thread */ + integer_t sleep_time; /* number of seconds that thread + * has been sleeping */ }; typedef struct thread_basic_info thread_basic_info_data_t; typedef struct thread_basic_info *thread_basic_info_t; #define THREAD_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(thread_basic_info_data_t) / sizeof(natural_t))) + (sizeof(thread_basic_info_data_t) / sizeof(natural_t))) -#define THREAD_IDENTIFIER_INFO 4 /* thread id and other information */ +#define THREAD_IDENTIFIER_INFO 4 /* thread id and other information */ struct thread_identifier_info { - uint64_t thread_id; /* system-wide unique 64-bit thread id */ - uint64_t thread_handle; /* handle to be used by libproc */ - uint64_t dispatch_qaddr; /* libdispatch queue address */ + uint64_t thread_id; /* system-wide unique 64-bit thread id */ + uint64_t thread_handle; /* handle to be used by libproc */ + uint64_t dispatch_qaddr; /* libdispatch queue address */ }; typedef struct thread_identifier_info thread_identifier_info_data_t; typedef struct thread_identifier_info *thread_identifier_info_t; #define THREAD_IDENTIFIER_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(thread_identifier_info_data_t) / sizeof(natural_t))) + (sizeof(thread_identifier_info_data_t) / sizeof(natural_t))) /* * Scale factor for usage field. */ -#define TH_USAGE_SCALE 1000 +#define TH_USAGE_SCALE 1000 /* * Thread run states (state field). */ -#define TH_STATE_RUNNING 1 /* thread is running normally */ -#define TH_STATE_STOPPED 2 /* thread is stopped */ -#define TH_STATE_WAITING 3 /* thread is waiting normally */ -#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible - wait */ -#define TH_STATE_HALTED 5 /* thread is halted at a - clean point */ +#define TH_STATE_RUNNING 1 /* thread is running normally */ +#define TH_STATE_STOPPED 2 /* thread is stopped */ +#define TH_STATE_WAITING 3 /* thread is waiting normally */ +#define TH_STATE_UNINTERRUPTIBLE 4 /* thread is in an uninterruptible + * wait */ +#define TH_STATE_HALTED 5 /* thread is halted at a + * clean point */ /* * Thread flags (flags field). */ -#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */ -#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */ -#define TH_FLAGS_GLOBAL_FORCED_IDLE 0x4 /* thread performs global forced idle */ +#define TH_FLAGS_SWAPPED 0x1 /* thread is swapped out */ +#define TH_FLAGS_IDLE 0x2 /* thread is an idle thread */ +#define TH_FLAGS_GLOBAL_FORCED_IDLE 0x4 /* thread performs global forced idle */ /* * Thread extended info (returns same info as proc_pidinfo(...,PROC_PIDTHREADINFO,...) */ #define THREAD_EXTENDED_INFO 5 #define MAXTHREADNAMESIZE 64 -struct thread_extended_info { // same as proc_threadinfo (from proc_info.h) & proc_threadinfo_internal (from bsd_taskinfo.h) - uint64_t pth_user_time; /* user run time */ - uint64_t pth_system_time; /* system run time */ - int32_t pth_cpu_usage; /* scaled cpu usage percentage */ - int32_t pth_policy; /* scheduling policy in effect */ - int32_t pth_run_state; /* run state (see below) */ - int32_t pth_flags; /* various flags (see below) */ - int32_t pth_sleep_time; /* number of seconds that thread */ - int32_t pth_curpri; /* cur priority*/ - int32_t pth_priority; /* priority*/ - int32_t pth_maxpriority; /* max priority*/ - char pth_name[MAXTHREADNAMESIZE]; /* thread name, if any */ +struct thread_extended_info { // same as proc_threadinfo (from proc_info.h) & proc_threadinfo_internal (from bsd_taskinfo.h) + uint64_t pth_user_time; /* user run time */ + uint64_t pth_system_time; /* system run time */ + int32_t pth_cpu_usage; /* scaled cpu usage percentage */ + int32_t pth_policy; /* scheduling policy in effect */ + int32_t pth_run_state; /* run state (see below) */ + int32_t pth_flags; /* various flags (see below) */ + int32_t pth_sleep_time; /* number of seconds that thread */ + int32_t pth_curpri; /* cur priority*/ + int32_t pth_priority; /* priority*/ + int32_t pth_maxpriority; /* max priority*/ + char pth_name[MAXTHREADNAMESIZE]; /* thread name, if any */ }; typedef struct thread_extended_info thread_extended_info_data_t; typedef struct thread_extended_info * thread_extended_info_t; #define THREAD_EXTENDED_INFO_COUNT ((mach_msg_type_number_t) \ - (sizeof(thread_extended_info_data_t) / sizeof (natural_t))) + (sizeof(thread_extended_info_data_t) / sizeof (natural_t))) #define THREAD_DEBUG_INFO_INTERNAL 6 /* for kernel development internal info */ #if PRIVATE -struct thread_debug_info_internal{ +struct thread_debug_info_internal { uint64_t page_creation_count; }; typedef struct thread_debug_info_internal *thread_debug_info_internal_t; typedef struct thread_debug_info_internal thread_debug_info_internal_data_t; -#define THREAD_DEBUG_INFO_INTERNAL_COUNT ((mach_msg_type_number_t) \ - (sizeof (thread_debug_info_internal_data_t) / sizeof (natural_t))) +#define THREAD_DEBUG_INFO_INTERNAL_COUNT ((mach_msg_type_number_t) \ + (sizeof (thread_debug_info_internal_data_t) / sizeof (natural_t))) #endif /* PRIVATE */ -#define IO_NUM_PRIORITIES 4 +#define IO_NUM_PRIORITIES 4 -#define UPDATE_IO_STATS(info, size) \ -{ \ - info.count++; \ - info.size += size; \ +#define UPDATE_IO_STATS(info, size) \ +{ \ + info.count++; \ + info.size += size; \ } -#define UPDATE_IO_STATS_ATOMIC(info, io_size) \ -{ \ - OSIncrementAtomic64((SInt64 *)&(info.count)); \ - OSAddAtomic64(io_size, (SInt64 *)&(info.size)); \ +#define UPDATE_IO_STATS_ATOMIC(info, io_size) \ +{ \ + OSIncrementAtomic64((SInt64 *)&(info.count)); \ + OSAddAtomic64(io_size, (SInt64 *)&(info.size)); \ } struct io_stat_entry { - uint64_t count; - uint64_t size; + uint64_t count; + uint64_t size; }; struct io_stat_info { @@ -211,7 +211,13 @@ struct io_stat_info { typedef struct io_stat_info *io_stat_info_t; -/* +#if KERNEL_PRIVATE +__BEGIN_DECLS +void thread_group_join_io_storage(void); +__END_DECLS +#endif + +/* * Obsolete interfaces. */ @@ -219,4 +225,4 @@ typedef struct io_stat_info *io_stat_info_t; #define THREAD_SCHED_RR_INFO 11 #define THREAD_SCHED_FIFO_INFO 12 -#endif /* _MACH_THREAD_INFO_H_ */ +#endif /* _MACH_THREAD_INFO_H_ */ diff --git a/osfmk/mach/thread_policy.h b/osfmk/mach/thread_policy.h index 626e0d3f1..7f6ac49ff 100644 --- a/osfmk/mach/thread_policy.h +++ b/osfmk/mach/thread_policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -48,23 +48,23 @@ * and the default ones are being returned instead. */ -typedef natural_t thread_policy_flavor_t; -typedef integer_t *thread_policy_t; +typedef natural_t thread_policy_flavor_t; +typedef integer_t *thread_policy_t; /* -kern_return_t thread_policy_set( - thread_t thread, - thread_policy_flavor_t flavor, - thread_policy_t policy_info, - mach_msg_type_number_t count); - -kern_return_t thread_policy_get( - thread_t thread, - thread_policy_flavor_t flavor, - thread_policy_t policy_info, - mach_msg_type_number_t *count, - boolean_t *get_default); -*/ + * kern_return_t thread_policy_set( + * thread_t thread, + * thread_policy_flavor_t flavor, + * thread_policy_t policy_info, + * mach_msg_type_number_t count); + * + * kern_return_t thread_policy_get( + * thread_t thread, + * thread_policy_flavor_t flavor, + * thread_policy_t policy_info, + * mach_msg_type_number_t *count, + * boolean_t *get_default); + */ /* * Defined flavors. @@ -81,16 +81,16 @@ kern_return_t thread_policy_get( * [none] */ -#define THREAD_STANDARD_POLICY 1 +#define THREAD_STANDARD_POLICY 1 struct thread_standard_policy { - natural_t no_data; + natural_t no_data; }; -typedef struct thread_standard_policy thread_standard_policy_data_t; -typedef struct thread_standard_policy *thread_standard_policy_t; +typedef struct thread_standard_policy thread_standard_policy_data_t; +typedef struct thread_standard_policy *thread_standard_policy_t; -#define THREAD_STANDARD_POLICY_COUNT 0 +#define THREAD_STANDARD_POLICY_COUNT 0 /* * THREAD_EXTENDED_POLICY: @@ -104,16 +104,16 @@ typedef struct thread_standard_policy *thread_standard_policy_t; * behavior as THREAD_STANDARD_POLICY. */ -#define THREAD_EXTENDED_POLICY 1 +#define THREAD_EXTENDED_POLICY 1 struct thread_extended_policy { - boolean_t timeshare; + boolean_t timeshare; }; -typedef struct thread_extended_policy thread_extended_policy_data_t; -typedef struct thread_extended_policy *thread_extended_policy_t; +typedef struct thread_extended_policy thread_extended_policy_data_t; +typedef struct thread_extended_policy *thread_extended_policy_t; -#define THREAD_EXTENDED_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_EXTENDED_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_extended_policy_data_t) / sizeof (integer_t))) /* @@ -143,21 +143,21 @@ typedef struct thread_extended_policy *thread_extended_policy_t; * interrupted, subject to the constraint specified above. */ -#define THREAD_TIME_CONSTRAINT_POLICY 2 +#define THREAD_TIME_CONSTRAINT_POLICY 2 struct thread_time_constraint_policy { - uint32_t period; - uint32_t computation; - uint32_t constraint; - boolean_t preemptible; + uint32_t period; + uint32_t computation; + uint32_t constraint; + boolean_t preemptible; }; -typedef struct thread_time_constraint_policy \ - thread_time_constraint_policy_data_t; -typedef struct thread_time_constraint_policy \ - *thread_time_constraint_policy_t; +typedef struct thread_time_constraint_policy \ + thread_time_constraint_policy_data_t; +typedef struct thread_time_constraint_policy \ + *thread_time_constraint_policy_t; -#define THREAD_TIME_CONSTRAINT_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_TIME_CONSTRAINT_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_time_constraint_policy_data_t) / sizeof (integer_t))) /* @@ -171,26 +171,26 @@ typedef struct thread_time_constraint_policy \ * importance: The importance is specified as a signed value. */ -#define THREAD_PRECEDENCE_POLICY 3 +#define THREAD_PRECEDENCE_POLICY 3 struct thread_precedence_policy { - integer_t importance; + integer_t importance; }; -typedef struct thread_precedence_policy thread_precedence_policy_data_t; -typedef struct thread_precedence_policy *thread_precedence_policy_t; +typedef struct thread_precedence_policy thread_precedence_policy_data_t; +typedef struct thread_precedence_policy *thread_precedence_policy_t; -#define THREAD_PRECEDENCE_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_PRECEDENCE_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_precedence_policy_data_t) / sizeof (integer_t))) /* * THREAD_AFFINITY_POLICY: * * This policy is experimental. - * This may be used to express affinity relationships + * This may be used to express affinity relationships * between threads in the task. Threads with the same affinity tag will * be scheduled to share an L2 cache if possible. That is, affinity tags - * are a hint to the scheduler for thread placement. + * are a hint to the scheduler for thread placement. * * The namespace of affinity tags is generally local to one task. However, * a child task created after the assignment of affinity tags by its parent @@ -201,63 +201,63 @@ typedef struct thread_precedence_policy *thread_precedence_policy_t; * tag: The affinity set identifier. */ -#define THREAD_AFFINITY_POLICY 4 +#define THREAD_AFFINITY_POLICY 4 struct thread_affinity_policy { - integer_t affinity_tag; + integer_t affinity_tag; }; -#define THREAD_AFFINITY_TAG_NULL 0 +#define THREAD_AFFINITY_TAG_NULL 0 -typedef struct thread_affinity_policy thread_affinity_policy_data_t; -typedef struct thread_affinity_policy *thread_affinity_policy_t; +typedef struct thread_affinity_policy thread_affinity_policy_data_t; +typedef struct thread_affinity_policy *thread_affinity_policy_t; -#define THREAD_AFFINITY_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_AFFINITY_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_affinity_policy_data_t) / sizeof (integer_t))) /* * THREAD_BACKGROUND_POLICY: */ -#define THREAD_BACKGROUND_POLICY 5 +#define THREAD_BACKGROUND_POLICY 5 struct thread_background_policy { - integer_t priority; + integer_t priority; }; #define THREAD_BACKGROUND_POLICY_DARWIN_BG 0x1000 -typedef struct thread_background_policy thread_background_policy_data_t; -typedef struct thread_background_policy *thread_background_policy_t; +typedef struct thread_background_policy thread_background_policy_data_t; +typedef struct thread_background_policy *thread_background_policy_t; -#define THREAD_BACKGROUND_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_BACKGROUND_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_background_policy_data_t) / sizeof (integer_t))) -#define THREAD_LATENCY_QOS_POLICY 7 -typedef integer_t thread_latency_qos_t; +#define THREAD_LATENCY_QOS_POLICY 7 +typedef integer_t thread_latency_qos_t; struct thread_latency_qos_policy { thread_latency_qos_t thread_latency_qos_tier; }; -typedef struct thread_latency_qos_policy thread_latency_qos_policy_data_t; -typedef struct thread_latency_qos_policy *thread_latency_qos_policy_t; +typedef struct thread_latency_qos_policy thread_latency_qos_policy_data_t; +typedef struct thread_latency_qos_policy *thread_latency_qos_policy_t; -#define THREAD_LATENCY_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_LATENCY_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_latency_qos_policy_data_t) / sizeof (integer_t))) -#define THREAD_THROUGHPUT_QOS_POLICY 8 -typedef integer_t thread_throughput_qos_t; +#define THREAD_THROUGHPUT_QOS_POLICY 8 +typedef integer_t thread_throughput_qos_t; struct thread_throughput_qos_policy { thread_throughput_qos_t thread_throughput_qos_tier; }; -typedef struct thread_throughput_qos_policy thread_throughput_qos_policy_data_t; -typedef struct thread_throughput_qos_policy *thread_throughput_qos_policy_t; +typedef struct thread_throughput_qos_policy thread_throughput_qos_policy_data_t; +typedef struct thread_throughput_qos_policy *thread_throughput_qos_policy_t; -#define THREAD_THROUGHPUT_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ +#define THREAD_THROUGHPUT_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_throughput_qos_policy_data_t) / sizeof (integer_t))) #ifdef PRIVATE @@ -265,7 +265,7 @@ typedef struct thread_throughput_qos_policy *thread_throughput_qos_policy_t; /* * THREAD_POLICY_STATE: */ -#define THREAD_POLICY_STATE 6 +#define THREAD_POLICY_STATE 6 #define THREAD_POLICY_STATE_FLAG_STATIC_PARAM 0x1 @@ -283,10 +283,10 @@ struct thread_policy_state { uint64_t reserved[2]; }; -typedef struct thread_policy_state thread_policy_state_data_t; -typedef struct thread_policy_state *thread_policy_state_t; +typedef struct thread_policy_state thread_policy_state_data_t; +typedef struct thread_policy_state *thread_policy_state_t; -#define THREAD_POLICY_STATE_COUNT ((mach_msg_type_number_t) \ +#define THREAD_POLICY_STATE_COUNT ((mach_msg_type_number_t) \ (sizeof (thread_policy_state_data_t) / sizeof (integer_t))) /* @@ -307,7 +307,7 @@ typedef uint8_t thread_qos_t; #define THREAD_QOS_LAST 7 -#define THREAD_QOS_MIN_TIER_IMPORTANCE (-15) +#define THREAD_QOS_MIN_TIER_IMPORTANCE (-15) /* * Overrides are inputs to the task/thread policy engine that @@ -360,7 +360,7 @@ typedef struct thread_qos_policy thread_qos_policy_data_t; typedef struct thread_qos_policy *thread_qos_policy_t; #define THREAD_QOS_POLICY_COUNT ((mach_msg_type_number_t) \ - (sizeof (thread_qos_policy_data_t) / sizeof (integer_t))) + (sizeof (thread_qos_policy_data_t) / sizeof (integer_t))) #endif /* PRIVATE */ @@ -377,46 +377,46 @@ typedef struct thread_qos_policy *thread_qos_policy_t; struct thread_requested_policy { uint64_t thrp_int_darwinbg :1, /* marked as darwinbg via setpriority */ - thrp_ext_darwinbg :1, - thrp_int_iotier :2, /* IO throttle tier */ - thrp_ext_iotier :2, - thrp_int_iopassive :1, /* should IOs cause lower tiers to be throttled */ - thrp_ext_iopassive :1, - thrp_latency_qos :3, /* Timer latency QoS */ - thrp_through_qos :3, /* Computation throughput QoS */ - - thrp_pidbind_bg :1, /* task i'm bound to is marked 'watchbg' */ - thrp_qos :3, /* thread qos class */ - thrp_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ - thrp_qos_override :3, /* thread qos class override */ - thrp_qos_promote :3, /* thread qos class from promotion */ - thrp_qos_ipc_override :3, /* thread qos class from ipc override */ - thrp_terminated :1, /* heading for termination */ - thrp_qos_sync_ipc_override:3, /* now unused */ - thrp_qos_workq_override :3, /* thread qos class override (workq) */ - - thrp_reserved :26; + thrp_ext_darwinbg :1, + thrp_int_iotier :2, /* IO throttle tier */ + thrp_ext_iotier :2, + thrp_int_iopassive :1, /* should IOs cause lower tiers to be throttled */ + thrp_ext_iopassive :1, + thrp_latency_qos :3, /* Timer latency QoS */ + thrp_through_qos :3, /* Computation throughput QoS */ + + thrp_pidbind_bg :1, /* task i'm bound to is marked 'watchbg' */ + thrp_qos :3, /* thread qos class */ + thrp_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ + thrp_qos_override :3, /* thread qos class override */ + thrp_qos_promote :3, /* thread qos class from promotion */ + thrp_qos_ipc_override :3, /* thread qos class from ipc override */ + thrp_terminated :1, /* heading for termination */ + thrp_qos_sync_ipc_override:3, /* now unused */ + thrp_qos_workq_override :3, /* thread qos class override (workq) */ + + thrp_reserved :26; }; struct thread_effective_policy { uint64_t thep_darwinbg :1, /* marked as 'background', and sockets are marked bg when created */ - thep_io_tier :2, /* effective throttle tier */ - thep_io_passive :1, /* should IOs cause lower tiers to be throttled */ - thep_all_sockets_bg :1, /* All existing sockets in process are marked as bg (thread: all created by thread) */ - thep_new_sockets_bg :1, /* Newly created sockets should be marked as bg */ - thep_terminated :1, /* all throttles have been removed for quick exit or SIGTERM handling */ - thep_qos_ui_is_urgent :1, /* bump UI-Interactive QoS up to the urgent preemption band */ - thep_latency_qos :3, /* Timer latency QoS level */ - thep_through_qos :3, /* Computation throughput QoS level */ - - thep_qos :3, /* thread qos class */ - thep_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ - thep_qos_promote :3, /* thread qos class used for promotion */ - - thep_reserved :40; + thep_io_tier :2, /* effective throttle tier */ + thep_io_passive :1, /* should IOs cause lower tiers to be throttled */ + thep_all_sockets_bg :1, /* All existing sockets in process are marked as bg (thread: all created by thread) */ + thep_new_sockets_bg :1, /* Newly created sockets should be marked as bg */ + thep_terminated :1, /* all throttles have been removed for quick exit or SIGTERM handling */ + thep_qos_ui_is_urgent :1, /* bump UI-Interactive QoS up to the urgent preemption band */ + thep_latency_qos :3, /* Timer latency QoS level */ + thep_through_qos :3, /* Computation throughput QoS level */ + + thep_qos :3, /* thread qos class */ + thep_qos_relprio :4, /* thread qos relative priority (store as inverse, -10 -> 0xA) */ + thep_qos_promote :3, /* thread qos class used for promotion */ + + thep_reserved :40; }; #endif /* PRIVATE */ -#endif /* _MACH_THREAD_POLICY_H_ */ +#endif /* _MACH_THREAD_POLICY_H_ */ diff --git a/osfmk/mach/thread_special_ports.h b/osfmk/mach/thread_special_ports.h index 1d44c02ed..02199835a 100644 --- a/osfmk/mach/thread_special_ports.h +++ b/osfmk/mach/thread_special_ports.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,23 +61,23 @@ * Defines codes for special_purpose thread ports. These are NOT * port identifiers - they are only used for the thread_get_special_port * and thread_set_special_port routines. - * + * */ -#ifndef _MACH_THREAD_SPECIAL_PORTS_H_ +#ifndef _MACH_THREAD_SPECIAL_PORTS_H_ #define _MACH_THREAD_SPECIAL_PORTS_H_ -#define THREAD_KERNEL_PORT 1 /* Represents the thread to the outside - world.*/ +#define THREAD_KERNEL_PORT 1 /* Represents the thread to the outside + * world.*/ /* * Definitions for ease of use */ -#define thread_get_kernel_port(thread, port) \ - (thread_get_special_port((thread), THREAD_KERNEL_PORT, (port))) +#define thread_get_kernel_port(thread, port) \ + (thread_get_special_port((thread), THREAD_KERNEL_PORT, (port))) -#define thread_set_kernel_port(thread, port) \ - (thread_set_special_port((thread), THREAD_KERNEL_PORT, (port))) +#define thread_set_kernel_port(thread, port) \ + (thread_set_special_port((thread), THREAD_KERNEL_PORT, (port))) -#endif /* _MACH_THREAD_SPECIAL_PORTS_H_ */ +#endif /* _MACH_THREAD_SPECIAL_PORTS_H_ */ diff --git a/osfmk/mach/thread_status.h b/osfmk/mach/thread_status.h index dd799ec0e..90ff1e0cb 100644 --- a/osfmk/mach/thread_status.h +++ b/osfmk/mach/thread_status.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,24 +32,24 @@ * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -65,8 +65,8 @@ * */ -#ifndef _MACH_THREAD_STATUS_H_ -#define _MACH_THREAD_STATUS_H_ +#ifndef _MACH_THREAD_STATUS_H_ +#define _MACH_THREAD_STATUS_H_ /* * The actual structure that comprises the thread state is defined @@ -80,17 +80,17 @@ * Generic definition for machine-dependent thread status. */ -typedef natural_t *thread_state_t; /* Variable-length array */ +typedef natural_t *thread_state_t; /* Variable-length array */ /* THREAD_STATE_MAX is now defined in */ -typedef natural_t thread_state_data_t[THREAD_STATE_MAX]; +typedef natural_t thread_state_data_t[THREAD_STATE_MAX]; -#define THREAD_STATE_FLAVOR_LIST 0 /* List of valid flavors */ -#define THREAD_STATE_FLAVOR_LIST_NEW 128 -#define THREAD_STATE_FLAVOR_LIST_10_9 129 -#define THREAD_STATE_FLAVOR_LIST_10_13 130 +#define THREAD_STATE_FLAVOR_LIST 0 /* List of valid flavors */ +#define THREAD_STATE_FLAVOR_LIST_NEW 128 +#define THREAD_STATE_FLAVOR_LIST_10_9 129 +#define THREAD_STATE_FLAVOR_LIST_10_13 130 -typedef int thread_state_flavor_t; -typedef thread_state_flavor_t *thread_state_flavor_array_t; +typedef int thread_state_flavor_t; +typedef thread_state_flavor_t *thread_state_flavor_array_t; -#endif /* _MACH_THREAD_STATUS_H_ */ +#endif /* _MACH_THREAD_STATUS_H_ */ diff --git a/osfmk/mach/thread_switch.h b/osfmk/mach/thread_switch.h index abce09f6e..17cccd1b2 100644 --- a/osfmk/mach/thread_switch.h +++ b/osfmk/mach/thread_switch.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_THREAD_SWITCH_H_ -#define _MACH_THREAD_SWITCH_H_ +#ifndef _MACH_THREAD_SWITCH_H_ +#define _MACH_THREAD_SWITCH_H_ #include #include @@ -68,17 +68,17 @@ * Constant definitions for thread_switch trap. */ -#define SWITCH_OPTION_NONE 0 -#define SWITCH_OPTION_DEPRESS 1 -#define SWITCH_OPTION_WAIT 2 +#define SWITCH_OPTION_NONE 0 +#define SWITCH_OPTION_DEPRESS 1 +#define SWITCH_OPTION_WAIT 2 #ifdef PRIVATE /* Workqueue should not consider thread blocked, and option_time is in us */ -#define SWITCH_OPTION_DISPATCH_CONTENTION 3 +#define SWITCH_OPTION_DISPATCH_CONTENTION 3 /* Handoff to lock owner and temporarily grant matching IO throttling policy */ -#define SWITCH_OPTION_OSLOCK_DEPRESS 4 -#define SWITCH_OPTION_OSLOCK_WAIT 5 +#define SWITCH_OPTION_OSLOCK_DEPRESS 4 +#define SWITCH_OPTION_OSLOCK_WAIT 5 #endif /* PRIVATE */ -#define valid_switch_option(opt) (0 <= (opt) && (opt) <= 5) +#define valid_switch_option(opt) (0 <= (opt) && (opt) <= 5) -#endif /* _MACH_THREAD_SWITCH_H_ */ +#endif /* _MACH_THREAD_SWITCH_H_ */ diff --git a/osfmk/mach/time_value.h b/osfmk/mach/time_value.h index e4f912d9e..8cfd37d74 100644 --- a/osfmk/mach/time_value.h +++ b/osfmk/mach/time_value.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,40 +22,40 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -#ifndef _MACH_TIME_VALUE_H_ -#define _MACH_TIME_VALUE_H_ +#ifndef _MACH_TIME_VALUE_H_ +#define _MACH_TIME_VALUE_H_ #include @@ -68,29 +68,29 @@ struct time_value { integer_t microseconds; }; -typedef struct time_value time_value_t; +typedef struct time_value time_value_t; /* * Macros to manipulate time values. Assume that time values * are normalized (microseconds <= 999999). */ -#define TIME_MICROS_MAX (1000000) +#define TIME_MICROS_MAX (1000000) -#define time_value_add_usec(val, micros) { \ - if (((val)->microseconds += (micros)) \ - >= TIME_MICROS_MAX) { \ - (val)->microseconds -= TIME_MICROS_MAX; \ - (val)->seconds++; \ - } \ +#define time_value_add_usec(val, micros) { \ + if (((val)->microseconds += (micros)) \ + >= TIME_MICROS_MAX) { \ + (val)->microseconds -= TIME_MICROS_MAX; \ + (val)->seconds++; \ + } \ } -#define time_value_add(result, addend) { \ - (result)->microseconds += (addend)->microseconds; \ - (result)->seconds += (addend)->seconds; \ - if ((result)->microseconds >= TIME_MICROS_MAX) { \ - (result)->microseconds -= TIME_MICROS_MAX; \ - (result)->seconds++; \ - } \ +#define time_value_add(result, addend) { \ + (result)->microseconds += (addend)->microseconds; \ + (result)->seconds += (addend)->seconds; \ + if ((result)->microseconds >= TIME_MICROS_MAX) { \ + (result)->microseconds -= TIME_MICROS_MAX; \ + (result)->seconds++; \ + } \ } -#endif /* _MACH_TIME_VALUE_H_ */ +#endif /* _MACH_TIME_VALUE_H_ */ diff --git a/osfmk/mach/vm_attributes.h b/osfmk/mach/vm_attributes.h index 2be32337b..bac0993cb 100644 --- a/osfmk/mach/vm_attributes.h +++ b/osfmk/mach/vm_attributes.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -67,33 +67,33 @@ * */ -#ifndef _MACH_VM_ATTRIBUTES_H_ -#define _MACH_VM_ATTRIBUTES_H_ +#ifndef _MACH_VM_ATTRIBUTES_H_ +#define _MACH_VM_ATTRIBUTES_H_ /* * Types of machine-dependent attributes */ -typedef unsigned int vm_machine_attribute_t; +typedef unsigned int vm_machine_attribute_t; -#define MATTR_CACHE 1 /* cachability */ -#define MATTR_MIGRATE 2 /* migrability */ -#define MATTR_REPLICATE 4 /* replicability */ +#define MATTR_CACHE 1 /* cachability */ +#define MATTR_MIGRATE 2 /* migrability */ +#define MATTR_REPLICATE 4 /* replicability */ /* * Values for the above, e.g. operations on attribute */ -typedef int vm_machine_attribute_val_t; +typedef int vm_machine_attribute_val_t; -#define MATTR_VAL_OFF 0 /* (generic) turn attribute off */ -#define MATTR_VAL_ON 1 /* (generic) turn attribute on */ -#define MATTR_VAL_GET 2 /* (generic) return current value */ +#define MATTR_VAL_OFF 0 /* (generic) turn attribute off */ +#define MATTR_VAL_ON 1 /* (generic) turn attribute on */ +#define MATTR_VAL_GET 2 /* (generic) return current value */ -#define MATTR_VAL_CACHE_FLUSH 6 /* flush from all caches */ -#define MATTR_VAL_DCACHE_FLUSH 7 /* flush from data caches */ -#define MATTR_VAL_ICACHE_FLUSH 8 /* flush from instruction caches */ -#define MATTR_VAL_CACHE_SYNC 9 /* sync I+D caches */ -#define MATTR_VAL_CACHE_SYNC 9 /* sync I+D caches */ +#define MATTR_VAL_CACHE_FLUSH 6 /* flush from all caches */ +#define MATTR_VAL_DCACHE_FLUSH 7 /* flush from data caches */ +#define MATTR_VAL_ICACHE_FLUSH 8 /* flush from instruction caches */ +#define MATTR_VAL_CACHE_SYNC 9 /* sync I+D caches */ +#define MATTR_VAL_CACHE_SYNC 9 /* sync I+D caches */ -#define MATTR_VAL_GET_INFO 10 /* get page info (stats) */ +#define MATTR_VAL_GET_INFO 10 /* get page info (stats) */ -#endif /* _MACH_VM_ATTRIBUTES_H_ */ +#endif /* _MACH_VM_ATTRIBUTES_H_ */ diff --git a/osfmk/mach/vm_behavior.h b/osfmk/mach/vm_behavior.h index 13a0b301f..727980d51 100644 --- a/osfmk/mach/vm_behavior.h +++ b/osfmk/mach/vm_behavior.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -35,7 +35,7 @@ * */ -#ifndef _MACH_VM_BEHAVIOR_H_ +#ifndef _MACH_VM_BEHAVIOR_H_ #define _MACH_VM_BEHAVIOR_H_ /* @@ -44,12 +44,12 @@ * vm_behavior_t behavior codes. */ -typedef int vm_behavior_t; +typedef int vm_behavior_t; /* * Enumeration of valid values for vm_behavior_t. - * These describe expected page reference behavior for - * for a given range of virtual memory. For implementation + * These describe expected page reference behavior for + * for a given range of virtual memory. For implementation * details see vm/vm_fault.c */ @@ -58,22 +58,22 @@ typedef int vm_behavior_t; * The following behaviors affect the memory region's future behavior * and are stored in the VM map entry data structure. */ -#define VM_BEHAVIOR_DEFAULT ((vm_behavior_t) 0) /* default */ -#define VM_BEHAVIOR_RANDOM ((vm_behavior_t) 1) /* random */ -#define VM_BEHAVIOR_SEQUENTIAL ((vm_behavior_t) 2) /* forward sequential */ -#define VM_BEHAVIOR_RSEQNTL ((vm_behavior_t) 3) /* reverse sequential */ +#define VM_BEHAVIOR_DEFAULT ((vm_behavior_t) 0) /* default */ +#define VM_BEHAVIOR_RANDOM ((vm_behavior_t) 1) /* random */ +#define VM_BEHAVIOR_SEQUENTIAL ((vm_behavior_t) 2) /* forward sequential */ +#define VM_BEHAVIOR_RSEQNTL ((vm_behavior_t) 3) /* reverse sequential */ /* * The following "behaviors" affect the memory region only at the time of the * call and are not stored in the VM map entry. */ -#define VM_BEHAVIOR_WILLNEED ((vm_behavior_t) 4) /* will need in near future */ -#define VM_BEHAVIOR_DONTNEED ((vm_behavior_t) 5) /* dont need in near future */ -#define VM_BEHAVIOR_FREE ((vm_behavior_t) 6) /* free memory without write-back */ -#define VM_BEHAVIOR_ZERO_WIRED_PAGES ((vm_behavior_t) 7) /* zero out the wired pages of an entry if it is being deleted without unwiring them first */ -#define VM_BEHAVIOR_REUSABLE ((vm_behavior_t) 8) -#define VM_BEHAVIOR_REUSE ((vm_behavior_t) 9) -#define VM_BEHAVIOR_CAN_REUSE ((vm_behavior_t) 10) -#define VM_BEHAVIOR_PAGEOUT ((vm_behavior_t) 11) +#define VM_BEHAVIOR_WILLNEED ((vm_behavior_t) 4) /* will need in near future */ +#define VM_BEHAVIOR_DONTNEED ((vm_behavior_t) 5) /* dont need in near future */ +#define VM_BEHAVIOR_FREE ((vm_behavior_t) 6) /* free memory without write-back */ +#define VM_BEHAVIOR_ZERO_WIRED_PAGES ((vm_behavior_t) 7) /* zero out the wired pages of an entry if it is being deleted without unwiring them first */ +#define VM_BEHAVIOR_REUSABLE ((vm_behavior_t) 8) +#define VM_BEHAVIOR_REUSE ((vm_behavior_t) 9) +#define VM_BEHAVIOR_CAN_REUSE ((vm_behavior_t) 10) +#define VM_BEHAVIOR_PAGEOUT ((vm_behavior_t) 11) -#endif /*_MACH_VM_BEHAVIOR_H_*/ +#endif /*_MACH_VM_BEHAVIOR_H_*/ diff --git a/osfmk/mach/vm_inherit.h b/osfmk/mach/vm_inherit.h index d6440e441..528d69179 100644 --- a/osfmk/mach/vm_inherit.h +++ b/osfmk/mach/vm_inherit.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,8 +63,8 @@ * */ -#ifndef _MACH_VM_INHERIT_H_ -#define _MACH_VM_INHERIT_H_ +#ifndef _MACH_VM_INHERIT_H_ +#define _MACH_VM_INHERIT_H_ /* * Types defined: @@ -72,18 +72,18 @@ * vm_inherit_t inheritance codes. */ -typedef unsigned int vm_inherit_t; /* might want to change this */ +typedef unsigned int vm_inherit_t; /* might want to change this */ /* * Enumeration of valid values for vm_inherit_t. */ -#define VM_INHERIT_SHARE ((vm_inherit_t) 0) /* share with child */ -#define VM_INHERIT_COPY ((vm_inherit_t) 1) /* copy into child */ -#define VM_INHERIT_NONE ((vm_inherit_t) 2) /* absent from child */ -#define VM_INHERIT_DONATE_COPY ((vm_inherit_t) 3) /* copy and delete */ +#define VM_INHERIT_SHARE ((vm_inherit_t) 0) /* share with child */ +#define VM_INHERIT_COPY ((vm_inherit_t) 1) /* copy into child */ +#define VM_INHERIT_NONE ((vm_inherit_t) 2) /* absent from child */ +#define VM_INHERIT_DONATE_COPY ((vm_inherit_t) 3) /* copy and delete */ -#define VM_INHERIT_DEFAULT VM_INHERIT_COPY +#define VM_INHERIT_DEFAULT VM_INHERIT_COPY #define VM_INHERIT_LAST_VALID VM_INHERIT_NONE -#endif /* _MACH_VM_INHERIT_H_ */ +#endif /* _MACH_VM_INHERIT_H_ */ diff --git a/osfmk/mach/vm_param.h b/osfmk/mach/vm_param.h index f6709419a..2bb038e21 100644 --- a/osfmk/mach/vm_param.h +++ b/osfmk/mach/vm_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,16 +64,16 @@ * */ -#ifndef _MACH_VM_PARAM_H_ +#ifndef _MACH_VM_PARAM_H_ #define _MACH_VM_PARAM_H_ #include -#ifdef KERNEL +#ifdef KERNEL -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ #include #include @@ -83,10 +83,10 @@ * is some number of hardware pages, depending on the target machine. */ -#ifndef ASSEMBLER +#ifndef ASSEMBLER -#define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ -#define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ +#define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE /* pagesize in addr units */ +#define PAGE_MASK_64 (unsigned long long)PAGE_MASK /* mask for off in page */ /* * Convert addresses to pages and vice versa. No rounding is used. @@ -109,8 +109,8 @@ */ #if 1 -#define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) -#define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) +#define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT) +#define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT) #else #define atop(x) (0UL = 0) #define ptoa(x) (0UL = 0) @@ -123,9 +123,9 @@ #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK)) #define round_page_overflow(in, out) __os_warn_unused(({ \ - bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ - *out &= ~((__typeof__(*out))PAGE_MASK); \ - __ovr; \ + bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \ + *out &= ~((__typeof__(*out))PAGE_MASK); \ + __ovr; \ })) static inline int OS_WARN_RESULT @@ -188,23 +188,23 @@ mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) #define atop_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #define ptoa_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #define round_page_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #define trunc_page_32(x) \ (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \ - (*(long *)0), \ - (0UL)) = 0) + (*(long *)0), \ + (0UL)) = 0) #else #define atop_32(x) (0) @@ -226,10 +226,10 @@ mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out) * an exact page multiple. */ -#define page_aligned(x) (((x) & PAGE_MASK) == 0) +#define page_aligned(x) (((x) & PAGE_MASK) == 0) -extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ -extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ +extern vm_size_t mem_size; /* 32-bit size of memory - limited by maxmem - deprecated */ +extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ /* * The default pager does not handle 64-bit offsets inside its objects, @@ -237,42 +237,51 @@ extern uint64_t max_mem; /* 64-bit size of memory - limited by maxmem */ * When we need to allocate a chunk of anonymous memory over that size, * we have to allocate more than one chunk. */ -#define ANON_MAX_SIZE 0xFFFFF000ULL +#define ANON_MAX_SIZE 0xFFFFF000ULL /* * Work-around for * Break large anonymous memory areas into 128MB chunks to alleviate * the cost of copying when copy-on-write is not possible because a small * portion of it being wired. */ -#define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ +#define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */ + +/* + * The 'medium' malloc allocator would like its regions + * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks + * and backed by different objects. This avoids contention + * on a single large object and showed solid improvements on high + * core machines with workloads involving video and graphics processing. + */ +#define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE #include -extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ -extern uint64_t sane_size; /* Memory size to use for defaults calculations */ -extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ +extern uint64_t mem_actual; /* 64-bit size of memory - not limited by maxmem */ +extern uint64_t sane_size; /* Memory size to use for defaults calculations */ +extern addr64_t vm_last_addr; /* Highest kernel virtual address known to the VM system */ -extern const vm_offset_t vm_min_kernel_address; -extern const vm_offset_t vm_max_kernel_address; +extern const vm_offset_t vm_min_kernel_address; +extern const vm_offset_t vm_max_kernel_address; extern vm_offset_t vm_kernel_stext; extern vm_offset_t vm_kernel_etext; -extern vm_offset_t vm_kernel_slid_base; -extern vm_offset_t vm_kernel_slid_top; -extern vm_offset_t vm_kernel_slide; -extern vm_offset_t vm_kernel_addrperm; -extern vm_offset_t vm_kext_base; -extern vm_offset_t vm_kext_top; -extern vm_offset_t vm_kernel_base; -extern vm_offset_t vm_kernel_top; -extern vm_offset_t vm_hib_base; - -extern vm_offset_t vm_kernel_builtinkmod_text; -extern vm_offset_t vm_kernel_builtinkmod_text_end; - -#define VM_KERNEL_IS_SLID(_o) \ +extern vm_offset_t vm_kernel_slid_base; +extern vm_offset_t vm_kernel_slid_top; +extern vm_offset_t vm_kernel_slide; +extern vm_offset_t vm_kernel_addrperm; +extern vm_offset_t vm_kext_base; +extern vm_offset_t vm_kext_top; +extern vm_offset_t vm_kernel_base; +extern vm_offset_t vm_kernel_top; +extern vm_offset_t vm_hib_base; + +extern vm_offset_t vm_kernel_builtinkmod_text; +extern vm_offset_t vm_kernel_builtinkmod_text_end; + +#define VM_KERNEL_IS_SLID(_o) \ (((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) >= vm_kernel_slid_base) && \ ((vm_offset_t)VM_KERNEL_STRIP_PTR(_o) < vm_kernel_slid_top)) @@ -332,13 +341,13 @@ __END_DECLS #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v)) #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \ - VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ - VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ - (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ + VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \ + VM_KERNEL_ADDRESS(_v) ? ((vm_offset_t)VM_KERNEL_STRIP_PTR(_v) + vm_kernel_addrperm) : \ + (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \ }) #define VM_KERNEL_UNSLIDE(_v) ({ \ - VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ + VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \ }) #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v) @@ -384,11 +393,11 @@ round_page_32(uint32_t x) return x; } -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -extern vm_size_t page_size; -extern vm_size_t page_mask; -extern int page_shift; +extern vm_size_t page_size; +extern vm_size_t page_mask; +extern int page_shift; /* We need a way to get rid of compiler warnings when we cast from */ /* a 64 bit value to an address (which may be 32 bits or 64-bits). */ @@ -402,12 +411,12 @@ extern int page_shift; #define CAST_DOWN( type, addr ) \ ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) ) -#define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) +#define CAST_DOWN_EXPLICIT( type, addr ) ( ((type)((uintptr_t) (addr))) ) #endif /* __CAST_DOWN_CHECK */ -#endif /* ASSEMBLER */ +#endif /* ASSEMBLER */ -#endif /* KERNEL */ +#endif /* KERNEL */ -#endif /* _MACH_VM_PARAM_H_ */ +#endif /* _MACH_VM_PARAM_H_ */ diff --git a/osfmk/mach/vm_prot.h b/osfmk/mach/vm_prot.h index 6998a31f0..fa1eb0344 100644 --- a/osfmk/mach/vm_prot.h +++ b/osfmk/mach/vm_prot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,8 +63,8 @@ * */ -#ifndef _MACH_VM_PROT_H_ -#define _MACH_VM_PROT_H_ +#ifndef _MACH_VM_PROT_H_ +#define _MACH_VM_PROT_H_ /* * Types defined: @@ -72,29 +72,29 @@ * vm_prot_t VM protection values. */ -typedef int vm_prot_t; +typedef int vm_prot_t; /* * Protection values, defined as bits within the vm_prot_t type */ -#define VM_PROT_NONE ((vm_prot_t) 0x00) +#define VM_PROT_NONE ((vm_prot_t) 0x00) -#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */ -#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */ -#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */ +#define VM_PROT_READ ((vm_prot_t) 0x01) /* read permission */ +#define VM_PROT_WRITE ((vm_prot_t) 0x02) /* write permission */ +#define VM_PROT_EXECUTE ((vm_prot_t) 0x04) /* execute permission */ /* * The default protection for newly-created virtual memory */ -#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE) +#define VM_PROT_DEFAULT (VM_PROT_READ|VM_PROT_WRITE) /* * The maximum privileges possible, for parameter checking. */ -#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) +#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE) /* * An invalid protection value. @@ -103,15 +103,15 @@ typedef int vm_prot_t; * looks like VM_PROT_ALL and then some. */ -#define VM_PROT_NO_CHANGE ((vm_prot_t) 0x08) +#define VM_PROT_NO_CHANGE ((vm_prot_t) 0x08) -/* +/* * When a caller finds that he cannot obtain write permission on a * mapped entry, the following flag can be used. The entry will * be made "needs copy" effectively copying the object (using COW), * and write permission will be added to the maximum protections - * for the associated entry. - */ + * for the associated entry. + */ #define VM_PROT_COPY ((vm_prot_t) 0x10) @@ -127,7 +127,7 @@ typedef int vm_prot_t; * walking down the shadow chain. */ -#define VM_PROT_WANTS_COPY ((vm_prot_t) 0x10) +#define VM_PROT_WANTS_COPY ((vm_prot_t) 0x10) #ifdef PRIVATE /* @@ -135,15 +135,15 @@ typedef int vm_prot_t; * code signature. */ -#define VM_PROT_TRUSTED ((vm_prot_t) 0x20) +#define VM_PROT_TRUSTED ((vm_prot_t) 0x20) #endif /* PRIVATE */ /* - * Another invalid protection value. + * Another invalid protection value. * Indicates that the other protection bits are to be applied as a mask * against the actual protection bits of the map entry. */ -#define VM_PROT_IS_MASK ((vm_prot_t) 0x40) +#define VM_PROT_IS_MASK ((vm_prot_t) 0x40) /* * Another invalid protection value to support execute-only protection. @@ -154,7 +154,7 @@ typedef int vm_prot_t; * the memory should be executable and explicitly not readable. It will * be ignored on platforms that do not support this type of protection. */ -#define VM_PROT_STRIP_READ ((vm_prot_t) 0x80) -#define VM_PROT_EXECUTE_ONLY (VM_PROT_EXECUTE|VM_PROT_STRIP_READ) +#define VM_PROT_STRIP_READ ((vm_prot_t) 0x80) +#define VM_PROT_EXECUTE_ONLY (VM_PROT_EXECUTE|VM_PROT_STRIP_READ) -#endif /* _MACH_VM_PROT_H_ */ +#endif /* _MACH_VM_PROT_H_ */ diff --git a/osfmk/mach/vm_purgable.h b/osfmk/mach/vm_purgable.h index 4ae0892b8..80ea756d9 100644 --- a/osfmk/mach/vm_purgable.h +++ b/osfmk/mach/vm_purgable.h @@ -2,7 +2,7 @@ * Copyright (c) 2003-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,27 +22,27 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * Virtual memory map purgeable object definitions. - * Objects that will be needed in the future (forward cached objects) should be queued LIFO. + * Objects that will be needed in the future (forward cached objects) should be queued LIFO. * Objects that have been used and are cached for reuse (backward cached) should be queued FIFO. * Every user of purgeable memory is entitled to using the highest volatile group (7). * Only if a client wants some of its objects to definitely be purged earlier, it can put those in - * another group. This could be used to make all FIFO objects (in the lower group) go away before + * another group. This could be used to make all FIFO objects (in the lower group) go away before * any LIFO objects (in the higher group) go away. * Objects that should not get any chance to stay around can be marked as "obsolete". They will * be emptied before any other objects or pages are reclaimed. Obsolete objects are not emptied * in any particular order. - * 'purgeable' is recognized as the correct spelling. For historical reasons, definitions + * 'purgeable' is recognized as the correct spelling. For historical reasons, definitions * in this file are spelled 'purgable'. */ -#ifndef _MACH_VM_PURGABLE_H_ -#define _MACH_VM_PURGABLE_H_ +#ifndef _MACH_VM_PURGABLE_H_ +#define _MACH_VM_PURGABLE_H_ /* * Types defined: @@ -50,14 +50,14 @@ * vm_purgable_t purgeable object control codes. */ -typedef int vm_purgable_t; +typedef int vm_purgable_t; /* * Enumeration of valid values for vm_purgable_t. */ -#define VM_PURGABLE_SET_STATE ((vm_purgable_t) 0) /* set state of purgeable object */ -#define VM_PURGABLE_GET_STATE ((vm_purgable_t) 1) /* get state of purgeable object */ -#define VM_PURGABLE_PURGE_ALL ((vm_purgable_t) 2) /* purge all volatile objects now */ +#define VM_PURGABLE_SET_STATE ((vm_purgable_t) 0) /* set state of purgeable object */ +#define VM_PURGABLE_GET_STATE ((vm_purgable_t) 1) /* get state of purgeable object */ +#define VM_PURGABLE_PURGE_ALL ((vm_purgable_t) 2) /* purge all volatile objects now */ #define VM_PURGABLE_SET_STATE_FROM_KERNEL ((vm_purgable_t) 3) /* set state from kernel */ /* @@ -69,45 +69,45 @@ typedef int vm_purgable_t; * +-----+--+-----+--+----+-+-+---+---+---+ * " ": unused (i.e. reserved) * STA: purgeable state - * see: VM_PURGABLE_NONVOLATILE=0 to VM_PURGABLE_DENY=3 + * see: VM_PURGABLE_NONVOLATILE=0 to VM_PURGABLE_DENY=3 * ORD: order - * see:VM_VOLATILE_ORDER_* + * see:VM_VOLATILE_ORDER_* * B: behavior - * see: VM_PURGABLE_BEHAVIOR_* + * see: VM_PURGABLE_BEHAVIOR_* * GRP: group - * see: VM_VOLATILE_GROUP_* + * see: VM_VOLATILE_GROUP_* * DEBUG: debug - * see: VM_PURGABLE_DEBUG_* + * see: VM_PURGABLE_DEBUG_* * NA: no aging - * see: VM_PURGABLE_NO_AGING* + * see: VM_PURGABLE_NO_AGING* */ -#define VM_PURGABLE_NO_AGING_SHIFT 16 -#define VM_PURGABLE_NO_AGING_MASK (0x1 << VM_PURGABLE_NO_AGING_SHIFT) -#define VM_PURGABLE_NO_AGING (0x1 << VM_PURGABLE_NO_AGING_SHIFT) +#define VM_PURGABLE_NO_AGING_SHIFT 16 +#define VM_PURGABLE_NO_AGING_MASK (0x1 << VM_PURGABLE_NO_AGING_SHIFT) +#define VM_PURGABLE_NO_AGING (0x1 << VM_PURGABLE_NO_AGING_SHIFT) -#define VM_PURGABLE_DEBUG_SHIFT 12 -#define VM_PURGABLE_DEBUG_MASK (0x3 << VM_PURGABLE_DEBUG_SHIFT) -#define VM_PURGABLE_DEBUG_EMPTY (0x1 << VM_PURGABLE_DEBUG_SHIFT) -#define VM_PURGABLE_DEBUG_FAULT (0x2 << VM_PURGABLE_DEBUG_SHIFT) +#define VM_PURGABLE_DEBUG_SHIFT 12 +#define VM_PURGABLE_DEBUG_MASK (0x3 << VM_PURGABLE_DEBUG_SHIFT) +#define VM_PURGABLE_DEBUG_EMPTY (0x1 << VM_PURGABLE_DEBUG_SHIFT) +#define VM_PURGABLE_DEBUG_FAULT (0x2 << VM_PURGABLE_DEBUG_SHIFT) /* * Volatile memory ordering groups (group zero objects are purged before group 1, etc... * It is implementation dependent as to whether these groups are global or per-address space. * (for the moment, they are global). */ -#define VM_VOLATILE_GROUP_SHIFT 8 -#define VM_VOLATILE_GROUP_MASK (7 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_SHIFT 8 +#define VM_VOLATILE_GROUP_MASK (7 << VM_VOLATILE_GROUP_SHIFT) #define VM_VOLATILE_GROUP_DEFAULT VM_VOLATILE_GROUP_0 -#define VM_VOLATILE_GROUP_0 (0 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_1 (1 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_2 (2 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_3 (3 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_4 (4 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_5 (5 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_6 (6 << VM_VOLATILE_GROUP_SHIFT) -#define VM_VOLATILE_GROUP_7 (7 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_0 (0 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_1 (1 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_2 (2 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_3 (3 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_4 (4 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_5 (5 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_6 (6 << VM_VOLATILE_GROUP_SHIFT) +#define VM_VOLATILE_GROUP_7 (7 << VM_VOLATILE_GROUP_SHIFT) /* * Purgeable behavior @@ -126,37 +126,37 @@ typedef int vm_purgable_t; * to be purged. * - Input only, not returned on state queries. */ -#define VM_PURGABLE_ORDERING_SHIFT 5 -#define VM_PURGABLE_ORDERING_MASK (1 << VM_PURGABLE_ORDERING_SHIFT) -#define VM_PURGABLE_ORDERING_OBSOLETE (1 << VM_PURGABLE_ORDERING_SHIFT) -#define VM_PURGABLE_ORDERING_NORMAL (0 << VM_PURGABLE_ORDERING_SHIFT) +#define VM_PURGABLE_ORDERING_SHIFT 5 +#define VM_PURGABLE_ORDERING_MASK (1 << VM_PURGABLE_ORDERING_SHIFT) +#define VM_PURGABLE_ORDERING_OBSOLETE (1 << VM_PURGABLE_ORDERING_SHIFT) +#define VM_PURGABLE_ORDERING_NORMAL (0 << VM_PURGABLE_ORDERING_SHIFT) /* * Obsolete parameter - do not use */ -#define VM_VOLATILE_ORDER_SHIFT 4 -#define VM_VOLATILE_ORDER_MASK (1 << VM_VOLATILE_ORDER_SHIFT) -#define VM_VOLATILE_MAKE_FIRST_IN_GROUP (1 << VM_VOLATILE_ORDER_SHIFT) -#define VM_VOLATILE_MAKE_LAST_IN_GROUP (0 << VM_VOLATILE_ORDER_SHIFT) +#define VM_VOLATILE_ORDER_SHIFT 4 +#define VM_VOLATILE_ORDER_MASK (1 << VM_VOLATILE_ORDER_SHIFT) +#define VM_VOLATILE_MAKE_FIRST_IN_GROUP (1 << VM_VOLATILE_ORDER_SHIFT) +#define VM_VOLATILE_MAKE_LAST_IN_GROUP (0 << VM_VOLATILE_ORDER_SHIFT) /* * Valid states of a purgeable object. */ -#define VM_PURGABLE_STATE_MIN 0 /* minimum purgeable object state value */ -#define VM_PURGABLE_STATE_MAX 3 /* maximum purgeable object state value */ -#define VM_PURGABLE_STATE_MASK 3 /* mask to separate state from group */ - -#define VM_PURGABLE_NONVOLATILE 0 /* purgeable object is non-volatile */ -#define VM_PURGABLE_VOLATILE 1 /* purgeable object is volatile */ -#define VM_PURGABLE_EMPTY 2 /* purgeable object is volatile and empty */ -#define VM_PURGABLE_DENY 3 /* (mark) object not purgeable */ - -#define VM_PURGABLE_ALL_MASKS (VM_PURGABLE_STATE_MASK | \ - VM_VOLATILE_ORDER_MASK | \ - VM_PURGABLE_ORDERING_MASK | \ - VM_PURGABLE_BEHAVIOR_MASK | \ - VM_VOLATILE_GROUP_MASK | \ - VM_PURGABLE_DEBUG_MASK | \ - VM_PURGABLE_NO_AGING_MASK) -#endif /* _MACH_VM_PURGABLE_H_ */ +#define VM_PURGABLE_STATE_MIN 0 /* minimum purgeable object state value */ +#define VM_PURGABLE_STATE_MAX 3 /* maximum purgeable object state value */ +#define VM_PURGABLE_STATE_MASK 3 /* mask to separate state from group */ + +#define VM_PURGABLE_NONVOLATILE 0 /* purgeable object is non-volatile */ +#define VM_PURGABLE_VOLATILE 1 /* purgeable object is volatile */ +#define VM_PURGABLE_EMPTY 2 /* purgeable object is volatile and empty */ +#define VM_PURGABLE_DENY 3 /* (mark) object not purgeable */ + +#define VM_PURGABLE_ALL_MASKS (VM_PURGABLE_STATE_MASK | \ + VM_VOLATILE_ORDER_MASK | \ + VM_PURGABLE_ORDERING_MASK | \ + VM_PURGABLE_BEHAVIOR_MASK | \ + VM_VOLATILE_GROUP_MASK | \ + VM_PURGABLE_DEBUG_MASK | \ + VM_PURGABLE_NO_AGING_MASK) +#endif /* _MACH_VM_PURGABLE_H_ */ diff --git a/osfmk/mach/vm_region.h b/osfmk/mach/vm_region.h index 22744725a..416699cb8 100644 --- a/osfmk/mach/vm_region.h +++ b/osfmk/mach/vm_region.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -35,7 +35,7 @@ * */ -#ifndef _MACH_VM_REGION_H_ +#ifndef _MACH_VM_REGION_H_ #define _MACH_VM_REGION_H_ #include @@ -64,28 +64,28 @@ typedef uint32_t vm32_object_id_t; */ #define VM_REGION_INFO_MAX (1024) -typedef int *vm_region_info_t; -typedef int *vm_region_info_64_t; -typedef int *vm_region_recurse_info_t; -typedef int *vm_region_recurse_info_64_t; -typedef int vm_region_flavor_t; -typedef int vm_region_info_data_t[VM_REGION_INFO_MAX]; - -#define VM_REGION_BASIC_INFO_64 9 +typedef int *vm_region_info_t; +typedef int *vm_region_info_64_t; +typedef int *vm_region_recurse_info_t; +typedef int *vm_region_recurse_info_64_t; +typedef int vm_region_flavor_t; +typedef int vm_region_info_data_t[VM_REGION_INFO_MAX]; + +#define VM_REGION_BASIC_INFO_64 9 struct vm_region_basic_info_64 { - vm_prot_t protection; - vm_prot_t max_protection; - vm_inherit_t inheritance; - boolean_t shared; - boolean_t reserved; - memory_object_offset_t offset; - vm_behavior_t behavior; - unsigned short user_wired_count; + vm_prot_t protection; + vm_prot_t max_protection; + vm_inherit_t inheritance; + boolean_t shared; + boolean_t reserved; + memory_object_offset_t offset; + vm_behavior_t behavior; + unsigned short user_wired_count; }; -typedef struct vm_region_basic_info_64 *vm_region_basic_info_64_t; -typedef struct vm_region_basic_info_64 vm_region_basic_info_data_64_t; +typedef struct vm_region_basic_info_64 *vm_region_basic_info_64_t; +typedef struct vm_region_basic_info_64 vm_region_basic_info_data_64_t; -#define VM_REGION_BASIC_INFO_COUNT_64 ((mach_msg_type_number_t) \ +#define VM_REGION_BASIC_INFO_COUNT_64 ((mach_msg_type_number_t) \ (sizeof(vm_region_basic_info_data_64_t)/sizeof(int))) /* @@ -93,7 +93,7 @@ typedef struct vm_region_basic_info_64 vm_region_basic_info_data_64_t; * automatically converts it to a VM_REGION_BASIC_INFO_64. * Please use that explicitly instead. */ -#define VM_REGION_BASIC_INFO 10 +#define VM_REGION_BASIC_INFO 10 /* * This is the legacy basic info structure. It is @@ -101,18 +101,18 @@ typedef struct vm_region_basic_info_64 vm_region_basic_info_data_64_t; * offset back - too small for many larger objects (e.g. files). */ struct vm_region_basic_info { - vm_prot_t protection; - vm_prot_t max_protection; - vm_inherit_t inheritance; - boolean_t shared; - boolean_t reserved; - uint32_t offset; /* too small for a real offset */ - vm_behavior_t behavior; - unsigned short user_wired_count; + vm_prot_t protection; + vm_prot_t max_protection; + vm_inherit_t inheritance; + boolean_t shared; + boolean_t reserved; + uint32_t offset; /* too small for a real offset */ + vm_behavior_t behavior; + unsigned short user_wired_count; }; -typedef struct vm_region_basic_info *vm_region_basic_info_t; -typedef struct vm_region_basic_info vm_region_basic_info_data_t; +typedef struct vm_region_basic_info *vm_region_basic_info_t; +typedef struct vm_region_basic_info vm_region_basic_info_data_t; #define VM_REGION_BASIC_INFO_COUNT ((mach_msg_type_number_t) \ (sizeof(vm_region_basic_info_data_t)/sizeof(int))) @@ -126,7 +126,7 @@ typedef struct vm_region_basic_info vm_region_basic_info_data_t; #define SM_SHARED_ALIASED 7 #define SM_LARGE_PAGE 8 -/* +/* * For submap info, the SM flags above are overlayed when a submap * is encountered. The field denotes whether or not machine level mapping * information is being shared. PTE's etc. When such sharing is taking @@ -135,18 +135,18 @@ typedef struct vm_region_basic_info vm_region_basic_info_data_t; */ #ifdef MACH_KERNEL_PRIVATE -#define VM_REGION_EXTENDED_INFO__legacy 11 +#define VM_REGION_EXTENDED_INFO__legacy 11 struct vm_region_extended_info__legacy { - vm_prot_t protection; - unsigned int user_tag; - unsigned int pages_resident; - unsigned int pages_shared_now_private; - unsigned int pages_swapped_out; - unsigned int pages_dirtied; - unsigned int ref_count; - unsigned short shadow_depth; - unsigned char external_pager; - unsigned char share_mode; + vm_prot_t protection; + unsigned int user_tag; + unsigned int pages_resident; + unsigned int pages_shared_now_private; + unsigned int pages_swapped_out; + unsigned int pages_dirtied; + unsigned int ref_count; + unsigned short shadow_depth; + unsigned char external_pager; + unsigned char share_mode; /* * XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX * DO NOT EXTEND THIS DATA STRUCTURE. @@ -154,57 +154,57 @@ struct vm_region_extended_info__legacy { * XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX XXX */ }; -#define VM_REGION_EXTENDED_INFO_COUNT__legacy \ - ((mach_msg_type_number_t) \ +#define VM_REGION_EXTENDED_INFO_COUNT__legacy \ + ((mach_msg_type_number_t) \ (sizeof (struct vm_region_extended_info__legacy) / \ sizeof (natural_t))) #endif /* MACH_KERNEL_PRIVATE */ -#define VM_REGION_EXTENDED_INFO 13 +#define VM_REGION_EXTENDED_INFO 13 struct vm_region_extended_info { - vm_prot_t protection; - unsigned int user_tag; - unsigned int pages_resident; - unsigned int pages_shared_now_private; - unsigned int pages_swapped_out; - unsigned int pages_dirtied; - unsigned int ref_count; - unsigned short shadow_depth; - unsigned char external_pager; - unsigned char share_mode; - unsigned int pages_reusable; + vm_prot_t protection; + unsigned int user_tag; + unsigned int pages_resident; + unsigned int pages_shared_now_private; + unsigned int pages_swapped_out; + unsigned int pages_dirtied; + unsigned int ref_count; + unsigned short shadow_depth; + unsigned char external_pager; + unsigned char share_mode; + unsigned int pages_reusable; }; -typedef struct vm_region_extended_info *vm_region_extended_info_t; -typedef struct vm_region_extended_info vm_region_extended_info_data_t; -#define VM_REGION_EXTENDED_INFO_COUNT \ - ((mach_msg_type_number_t) \ +typedef struct vm_region_extended_info *vm_region_extended_info_t; +typedef struct vm_region_extended_info vm_region_extended_info_data_t; +#define VM_REGION_EXTENDED_INFO_COUNT \ + ((mach_msg_type_number_t) \ (sizeof (vm_region_extended_info_data_t) / sizeof (natural_t))) -#define VM_REGION_TOP_INFO 12 +#define VM_REGION_TOP_INFO 12 struct vm_region_top_info { - unsigned int obj_id; - unsigned int ref_count; - unsigned int private_pages_resident; - unsigned int shared_pages_resident; - unsigned char share_mode; + unsigned int obj_id; + unsigned int ref_count; + unsigned int private_pages_resident; + unsigned int shared_pages_resident; + unsigned char share_mode; }; -typedef struct vm_region_top_info *vm_region_top_info_t; -typedef struct vm_region_top_info vm_region_top_info_data_t; +typedef struct vm_region_top_info *vm_region_top_info_t; +typedef struct vm_region_top_info vm_region_top_info_data_t; -#define VM_REGION_TOP_INFO_COUNT \ - ((mach_msg_type_number_t) \ +#define VM_REGION_TOP_INFO_COUNT \ + ((mach_msg_type_number_t) \ (sizeof(vm_region_top_info_data_t) / sizeof(natural_t))) -/* +/* * vm_region_submap_info will return information on a submap or object. * The user supplies a nesting level on the call. When a walk of the * user's map is done and a submap is encountered, the nesting count is @@ -221,65 +221,65 @@ typedef struct vm_region_top_info vm_region_top_info_data_t; * * Object only fields are filled in through a walking of the object shadow * chain (where one is present), and a walking of the resident page queue. - * + * */ struct vm_region_submap_info { - vm_prot_t protection; /* present access protection */ - vm_prot_t max_protection; /* max avail through vm_prot */ - vm_inherit_t inheritance;/* behavior of map/obj on fork */ - uint32_t offset; /* offset into object/map */ - unsigned int user_tag; /* user tag on map entry */ - unsigned int pages_resident; /* only valid for objects */ - unsigned int pages_shared_now_private; /* only for objects */ - unsigned int pages_swapped_out; /* only for objects */ - unsigned int pages_dirtied; /* only for objects */ - unsigned int ref_count; /* obj/map mappers, etc */ - unsigned short shadow_depth; /* only for obj */ - unsigned char external_pager; /* only for obj */ - unsigned char share_mode; /* see enumeration */ - boolean_t is_submap; /* submap vs obj */ - vm_behavior_t behavior; /* access behavior hint */ - vm32_object_id_t object_id; /* obj/map name, not a handle */ - unsigned short user_wired_count; + vm_prot_t protection; /* present access protection */ + vm_prot_t max_protection; /* max avail through vm_prot */ + vm_inherit_t inheritance;/* behavior of map/obj on fork */ + uint32_t offset; /* offset into object/map */ + unsigned int user_tag; /* user tag on map entry */ + unsigned int pages_resident; /* only valid for objects */ + unsigned int pages_shared_now_private; /* only for objects */ + unsigned int pages_swapped_out; /* only for objects */ + unsigned int pages_dirtied; /* only for objects */ + unsigned int ref_count; /* obj/map mappers, etc */ + unsigned short shadow_depth; /* only for obj */ + unsigned char external_pager; /* only for obj */ + unsigned char share_mode; /* see enumeration */ + boolean_t is_submap; /* submap vs obj */ + vm_behavior_t behavior; /* access behavior hint */ + vm32_object_id_t object_id; /* obj/map name, not a handle */ + unsigned short user_wired_count; }; -typedef struct vm_region_submap_info *vm_region_submap_info_t; -typedef struct vm_region_submap_info vm_region_submap_info_data_t; +typedef struct vm_region_submap_info *vm_region_submap_info_t; +typedef struct vm_region_submap_info vm_region_submap_info_data_t; -#define VM_REGION_SUBMAP_INFO_COUNT \ - ((mach_msg_type_number_t) \ +#define VM_REGION_SUBMAP_INFO_COUNT \ + ((mach_msg_type_number_t) \ (sizeof(vm_region_submap_info_data_t) / sizeof(natural_t))) struct vm_region_submap_info_64 { - vm_prot_t protection; /* present access protection */ - vm_prot_t max_protection; /* max avail through vm_prot */ - vm_inherit_t inheritance;/* behavior of map/obj on fork */ - memory_object_offset_t offset; /* offset into object/map */ - unsigned int user_tag; /* user tag on map entry */ - unsigned int pages_resident; /* only valid for objects */ - unsigned int pages_shared_now_private; /* only for objects */ - unsigned int pages_swapped_out; /* only for objects */ - unsigned int pages_dirtied; /* only for objects */ - unsigned int ref_count; /* obj/map mappers, etc */ - unsigned short shadow_depth; /* only for obj */ - unsigned char external_pager; /* only for obj */ - unsigned char share_mode; /* see enumeration */ - boolean_t is_submap; /* submap vs obj */ - vm_behavior_t behavior; /* access behavior hint */ - vm32_object_id_t object_id; /* obj/map name, not a handle */ - unsigned short user_wired_count; - unsigned int pages_reusable; + vm_prot_t protection; /* present access protection */ + vm_prot_t max_protection; /* max avail through vm_prot */ + vm_inherit_t inheritance;/* behavior of map/obj on fork */ + memory_object_offset_t offset; /* offset into object/map */ + unsigned int user_tag; /* user tag on map entry */ + unsigned int pages_resident; /* only valid for objects */ + unsigned int pages_shared_now_private; /* only for objects */ + unsigned int pages_swapped_out; /* only for objects */ + unsigned int pages_dirtied; /* only for objects */ + unsigned int ref_count; /* obj/map mappers, etc */ + unsigned short shadow_depth; /* only for obj */ + unsigned char external_pager; /* only for obj */ + unsigned char share_mode; /* see enumeration */ + boolean_t is_submap; /* submap vs obj */ + vm_behavior_t behavior; /* access behavior hint */ + vm32_object_id_t object_id; /* obj/map name, not a handle */ + unsigned short user_wired_count; + unsigned int pages_reusable; }; -typedef struct vm_region_submap_info_64 *vm_region_submap_info_64_t; -typedef struct vm_region_submap_info_64 vm_region_submap_info_data_64_t; +typedef struct vm_region_submap_info_64 *vm_region_submap_info_64_t; +typedef struct vm_region_submap_info_64 vm_region_submap_info_data_64_t; -#define VM_REGION_SUBMAP_INFO_V1_SIZE \ +#define VM_REGION_SUBMAP_INFO_V1_SIZE \ (sizeof (vm_region_submap_info_data_64_t)) -#define VM_REGION_SUBMAP_INFO_V0_SIZE \ +#define VM_REGION_SUBMAP_INFO_V0_SIZE \ (VM_REGION_SUBMAP_INFO_V1_SIZE - \ - sizeof (unsigned int) /* pages_reusable */) + sizeof (unsigned int) /* pages_reusable */ ) #define VM_REGION_SUBMAP_INFO_V1_COUNT_64 \ ((mach_msg_type_number_t) \ @@ -289,29 +289,29 @@ typedef struct vm_region_submap_info_64 vm_region_submap_info_data_64_t; (VM_REGION_SUBMAP_INFO_V0_SIZE / sizeof (natural_t))) /* set this to the latest version */ -#define VM_REGION_SUBMAP_INFO_COUNT_64 VM_REGION_SUBMAP_INFO_V1_COUNT_64 +#define VM_REGION_SUBMAP_INFO_COUNT_64 VM_REGION_SUBMAP_INFO_V1_COUNT_64 struct vm_region_submap_short_info_64 { - vm_prot_t protection; /* present access protection */ - vm_prot_t max_protection; /* max avail through vm_prot */ - vm_inherit_t inheritance;/* behavior of map/obj on fork */ - memory_object_offset_t offset; /* offset into object/map */ - unsigned int user_tag; /* user tag on map entry */ - unsigned int ref_count; /* obj/map mappers, etc */ - unsigned short shadow_depth; /* only for obj */ - unsigned char external_pager; /* only for obj */ - unsigned char share_mode; /* see enumeration */ - boolean_t is_submap; /* submap vs obj */ - vm_behavior_t behavior; /* access behavior hint */ - vm32_object_id_t object_id; /* obj/map name, not a handle */ - unsigned short user_wired_count; + vm_prot_t protection; /* present access protection */ + vm_prot_t max_protection; /* max avail through vm_prot */ + vm_inherit_t inheritance;/* behavior of map/obj on fork */ + memory_object_offset_t offset; /* offset into object/map */ + unsigned int user_tag; /* user tag on map entry */ + unsigned int ref_count; /* obj/map mappers, etc */ + unsigned short shadow_depth; /* only for obj */ + unsigned char external_pager; /* only for obj */ + unsigned char share_mode; /* see enumeration */ + boolean_t is_submap; /* submap vs obj */ + vm_behavior_t behavior; /* access behavior hint */ + vm32_object_id_t object_id; /* obj/map name, not a handle */ + unsigned short user_wired_count; }; -typedef struct vm_region_submap_short_info_64 *vm_region_submap_short_info_64_t; -typedef struct vm_region_submap_short_info_64 vm_region_submap_short_info_data_64_t; +typedef struct vm_region_submap_short_info_64 *vm_region_submap_short_info_64_t; +typedef struct vm_region_submap_short_info_64 vm_region_submap_short_info_data_64_t; -#define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 \ - ((mach_msg_type_number_t) \ +#define VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 \ + ((mach_msg_type_number_t) \ (sizeof (vm_region_submap_short_info_data_64_t) / sizeof (natural_t))) @@ -322,48 +322,48 @@ struct mach_vm_read_entry { }; struct vm_read_entry { - vm_address_t address; - vm_size_t size; + vm_address_t address; + vm_size_t size; }; #ifdef VM32_SUPPORT struct vm32_read_entry { - vm32_address_t address; - vm32_size_t size; + vm32_address_t address; + vm32_size_t size; }; #endif #define VM_MAP_ENTRY_MAX (256) -typedef struct mach_vm_read_entry mach_vm_read_entry_t[VM_MAP_ENTRY_MAX]; -typedef struct vm_read_entry vm_read_entry_t[VM_MAP_ENTRY_MAX]; +typedef struct mach_vm_read_entry mach_vm_read_entry_t[VM_MAP_ENTRY_MAX]; +typedef struct vm_read_entry vm_read_entry_t[VM_MAP_ENTRY_MAX]; #ifdef VM32_SUPPORT -typedef struct vm32_read_entry vm32_read_entry_t[VM_MAP_ENTRY_MAX]; +typedef struct vm32_read_entry vm32_read_entry_t[VM_MAP_ENTRY_MAX]; #endif #pragma pack() -#define VM_PAGE_INFO_MAX +#define VM_PAGE_INFO_MAX typedef int *vm_page_info_t; typedef int vm_page_info_data_t[VM_PAGE_INFO_MAX]; typedef int vm_page_info_flavor_t; -#define VM_PAGE_INFO_BASIC 1 +#define VM_PAGE_INFO_BASIC 1 struct vm_page_info_basic { - int disposition; - int ref_count; - vm_object_id_t object_id; - memory_object_offset_t offset; - int depth; - int __pad; /* pad to 64-bit boundary */ + int disposition; + int ref_count; + vm_object_id_t object_id; + memory_object_offset_t offset; + int depth; + int __pad; /* pad to 64-bit boundary */ }; -typedef struct vm_page_info_basic *vm_page_info_basic_t; -typedef struct vm_page_info_basic vm_page_info_basic_data_t; +typedef struct vm_page_info_basic *vm_page_info_basic_t; +typedef struct vm_page_info_basic vm_page_info_basic_data_t; -#define VM_PAGE_INFO_BASIC_COUNT ((mach_msg_type_number_t) \ +#define VM_PAGE_INFO_BASIC_COUNT ((mach_msg_type_number_t) \ (sizeof(vm_page_info_basic_data_t)/sizeof(int))) -#endif /*_MACH_VM_REGION_H_*/ +#endif /*_MACH_VM_REGION_H_*/ diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h index e4552b60f..9e72c81d2 100644 --- a/osfmk/mach/vm_statistics.h +++ b/osfmk/mach/vm_statistics.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,8 +63,8 @@ * */ -#ifndef _MACH_VM_STATISTICS_H_ -#define _MACH_VM_STATISTICS_H_ +#ifndef _MACH_VM_STATISTICS_H_ +#define _MACH_VM_STATISTICS_H_ #include @@ -73,31 +73,31 @@ * vm_statistics * * History: - * rev0 - original structure. - * rev1 - added purgable info (purgable_count and purges). - * rev2 - added speculative_count. + * rev0 - original structure. + * rev1 - added purgable info (purgable_count and purges). + * rev2 - added speculative_count. * * Note: you cannot add any new fields to this structure. Add them below in - * vm_statistics64. + * vm_statistics64. */ struct vm_statistics { - natural_t free_count; /* # of pages free */ - natural_t active_count; /* # of pages active */ - natural_t inactive_count; /* # of pages inactive */ - natural_t wire_count; /* # of pages wired down */ - natural_t zero_fill_count; /* # of zero fill pages */ - natural_t reactivations; /* # of pages reactivated */ - natural_t pageins; /* # of pageins */ - natural_t pageouts; /* # of pageouts */ - natural_t faults; /* # of faults */ - natural_t cow_faults; /* # of copy-on-writes */ - natural_t lookups; /* object cache lookups */ - natural_t hits; /* object cache hits */ + natural_t free_count; /* # of pages free */ + natural_t active_count; /* # of pages active */ + natural_t inactive_count; /* # of pages inactive */ + natural_t wire_count; /* # of pages wired down */ + natural_t zero_fill_count; /* # of zero fill pages */ + natural_t reactivations; /* # of pages reactivated */ + natural_t pageins; /* # of pageins */ + natural_t pageouts; /* # of pageouts */ + natural_t faults; /* # of faults */ + natural_t cow_faults; /* # of copy-on-writes */ + natural_t lookups; /* object cache lookups */ + natural_t hits; /* object cache hits */ /* added for rev1 */ - natural_t purgeable_count; /* # of pages purgeable */ - natural_t purges; /* # of pages purged */ + natural_t purgeable_count; /* # of pages purgeable */ + natural_t purges; /* # of pages purged */ /* added for rev2 */ /* @@ -106,23 +106,23 @@ struct vm_statistics { * used to hold data that was read speculatively from disk but * haven't actually been used by anyone so far. */ - natural_t speculative_count; /* # of pages speculative */ + natural_t speculative_count; /* # of pages speculative */ }; /* Used by all architectures */ -typedef struct vm_statistics *vm_statistics_t; -typedef struct vm_statistics vm_statistics_data_t; +typedef struct vm_statistics *vm_statistics_t; +typedef struct vm_statistics vm_statistics_data_t; -/* +/* * vm_statistics64 * * History: - * rev0 - original structure. - * rev1 - added purgable info (purgable_count and purges). - * rev2 - added speculative_count. + * rev0 - original structure. + * rev1 - added purgable info (purgable_count and purges). + * rev2 - added speculative_count. * ---- - * rev3 - changed name to vm_statistics64. - * changed some fields in structure to 64-bit on + * rev3 - changed name to vm_statistics64. + * changed some fields in structure to 64-bit on * arm, i386 and x86_64 architectures. * rev4 - require 64-bit alignment for efficient access * in the kernel. No change to reported data. @@ -130,44 +130,44 @@ typedef struct vm_statistics vm_statistics_data_t; */ struct vm_statistics64 { - natural_t free_count; /* # of pages free */ - natural_t active_count; /* # of pages active */ - natural_t inactive_count; /* # of pages inactive */ - natural_t wire_count; /* # of pages wired down */ - uint64_t zero_fill_count; /* # of zero fill pages */ - uint64_t reactivations; /* # of pages reactivated */ - uint64_t pageins; /* # of pageins */ - uint64_t pageouts; /* # of pageouts */ - uint64_t faults; /* # of faults */ - uint64_t cow_faults; /* # of copy-on-writes */ - uint64_t lookups; /* object cache lookups */ - uint64_t hits; /* object cache hits */ - uint64_t purges; /* # of pages purged */ - natural_t purgeable_count; /* # of pages purgeable */ + natural_t free_count; /* # of pages free */ + natural_t active_count; /* # of pages active */ + natural_t inactive_count; /* # of pages inactive */ + natural_t wire_count; /* # of pages wired down */ + uint64_t zero_fill_count; /* # of zero fill pages */ + uint64_t reactivations; /* # of pages reactivated */ + uint64_t pageins; /* # of pageins */ + uint64_t pageouts; /* # of pageouts */ + uint64_t faults; /* # of faults */ + uint64_t cow_faults; /* # of copy-on-writes */ + uint64_t lookups; /* object cache lookups */ + uint64_t hits; /* object cache hits */ + uint64_t purges; /* # of pages purged */ + natural_t purgeable_count; /* # of pages purgeable */ /* * NB: speculative pages are already accounted for in "free_count", * so "speculative_count" is the number of "free" pages that are * used to hold data that was read speculatively from disk but * haven't actually been used by anyone so far. */ - natural_t speculative_count; /* # of pages speculative */ + natural_t speculative_count; /* # of pages speculative */ /* added for rev1 */ - uint64_t decompressions; /* # of pages decompressed */ - uint64_t compressions; /* # of pages compressed */ - uint64_t swapins; /* # of pages swapped in (via compression segments) */ - uint64_t swapouts; /* # of pages swapped out (via compression segments) */ - natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */ - natural_t throttled_count; /* # of pages throttled */ - natural_t external_page_count; /* # of pages that are file-backed (non-swap) */ - natural_t internal_page_count; /* # of pages that are anonymous */ - uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */ + uint64_t decompressions; /* # of pages decompressed */ + uint64_t compressions; /* # of pages compressed */ + uint64_t swapins; /* # of pages swapped in (via compression segments) */ + uint64_t swapouts; /* # of pages swapped out (via compression segments) */ + natural_t compressor_page_count; /* # of pages used by the compressed pager to hold all the compressed data */ + natural_t throttled_count; /* # of pages throttled */ + natural_t external_page_count; /* # of pages that are file-backed (non-swap) */ + natural_t internal_page_count; /* # of pages that are anonymous */ + uint64_t total_uncompressed_pages_in_compressor; /* # of pages (uncompressed) held within the compressor. */ } __attribute__((aligned(8))); -typedef struct vm_statistics64 *vm_statistics64_t; -typedef struct vm_statistics64 vm_statistics64_data_t; +typedef struct vm_statistics64 *vm_statistics64_t; +typedef struct vm_statistics64 vm_statistics64_data_t; -/* +/* * VM_STATISTICS_TRUNCATE_TO_32_BIT * * This is used by host_statistics() to truncate and peg the 64-bit in-kernel values from @@ -175,31 +175,31 @@ typedef struct vm_statistics64 vm_statistics64_data_t; */ #define VM_STATISTICS_TRUNCATE_TO_32_BIT(value) ((uint32_t)(((value) > UINT32_MAX ) ? UINT32_MAX : (value))) -/* +/* * vm_extmod_statistics * * Structure to record modifications to a task by an * external agent. * * History: - * rev0 - original structure. + * rev0 - original structure. */ struct vm_extmod_statistics { - int64_t task_for_pid_count; /* # of times task port was looked up */ - int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */ - int64_t thread_creation_count; /* # of threads created in task */ - int64_t thread_creation_caller_count; /* # of threads created by task */ - int64_t thread_set_state_count; /* # of register state sets in task */ - int64_t thread_set_state_caller_count; /* # of register state sets by task */ + int64_t task_for_pid_count; /* # of times task port was looked up */ + int64_t task_for_pid_caller_count; /* # of times this task called task_for_pid */ + int64_t thread_creation_count; /* # of threads created in task */ + int64_t thread_creation_caller_count; /* # of threads created by task */ + int64_t thread_set_state_count; /* # of register state sets in task */ + int64_t thread_set_state_caller_count; /* # of register state sets by task */ } __attribute__((aligned(8))); typedef struct vm_extmod_statistics *vm_extmod_statistics_t; typedef struct vm_extmod_statistics vm_extmod_statistics_data_t; typedef struct vm_purgeable_stat { - uint64_t count; - uint64_t size; + uint64_t count; + uint64_t size; }vm_purgeable_stat_t; struct vm_purgeable_info { @@ -208,7 +208,7 @@ struct vm_purgeable_info { vm_purgeable_stat_t lifo_data[8]; }; -typedef struct vm_purgeable_info *vm_purgeable_info_t; +typedef struct vm_purgeable_info *vm_purgeable_info_t; /* included for the vm_map_page_query call */ @@ -218,13 +218,13 @@ typedef struct vm_purgeable_info *vm_purgeable_info_t; #define VM_PAGE_QUERY_PAGE_DIRTY 0x8 #define VM_PAGE_QUERY_PAGE_PAGED_OUT 0x10 #define VM_PAGE_QUERY_PAGE_COPIED 0x20 -#define VM_PAGE_QUERY_PAGE_SPECULATIVE 0x40 -#define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80 -#define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100 -#define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200 -#define VM_PAGE_QUERY_PAGE_CS_NX 0x400 +#define VM_PAGE_QUERY_PAGE_SPECULATIVE 0x40 +#define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80 +#define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100 +#define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200 +#define VM_PAGE_QUERY_PAGE_CS_NX 0x400 -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE /* * Each machine dependent implementation is expected to @@ -234,41 +234,41 @@ typedef struct vm_purgeable_info *vm_purgeable_info_t; */ struct pmap_statistics { - integer_t resident_count; /* # of pages mapped (total)*/ - integer_t resident_max; /* # of pages mapped (peak) */ - integer_t wired_count; /* # of pages wired */ - - integer_t device; - integer_t device_peak; - integer_t internal; - integer_t internal_peak; - integer_t external; - integer_t external_peak; - integer_t reusable; - integer_t reusable_peak; - uint64_t compressed __attribute__((aligned(8))); - uint64_t compressed_peak __attribute__((aligned(8))); - uint64_t compressed_lifetime __attribute__((aligned(8))); + integer_t resident_count; /* # of pages mapped (total)*/ + integer_t resident_max; /* # of pages mapped (peak) */ + integer_t wired_count; /* # of pages wired */ + + integer_t device; + integer_t device_peak; + integer_t internal; + integer_t internal_peak; + integer_t external; + integer_t external_peak; + integer_t reusable; + integer_t reusable_peak; + uint64_t compressed __attribute__((aligned(8))); + uint64_t compressed_peak __attribute__((aligned(8))); + uint64_t compressed_lifetime __attribute__((aligned(8))); }; -typedef struct pmap_statistics *pmap_statistics_t; +typedef struct pmap_statistics *pmap_statistics_t; -#define PMAP_STATS_PEAK(field) \ - MACRO_BEGIN \ - if (field > field##_peak) { \ - field##_peak = field; \ - } \ +#define PMAP_STATS_PEAK(field) \ + MACRO_BEGIN \ + if (field > field##_peak) { \ + field##_peak = field; \ + } \ MACRO_END -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ /* * VM allocation flags: - * + * * VM_FLAGS_FIXED - * (really the absence of VM_FLAGS_ANYWHERE) + * (really the absence of VM_FLAGS_ANYWHERE) * Allocate new VM region at the specified virtual address, if possible. - * + * * VM_FLAGS_ANYWHERE * Allocate new VM region anywhere it would fit in the address space. * @@ -294,111 +294,111 @@ typedef struct pmap_statistics *pmap_statistics_t; * cached so that they will be stolen first if memory runs low. */ -#define VM_FLAGS_FIXED 0x0000 -#define VM_FLAGS_ANYWHERE 0x0001 -#define VM_FLAGS_PURGABLE 0x0002 -#define VM_FLAGS_4GB_CHUNK 0x0004 -#define VM_FLAGS_RANDOM_ADDR 0x0008 -#define VM_FLAGS_NO_CACHE 0x0010 -#define VM_FLAGS_RESILIENT_CODESIGN 0x0020 -#define VM_FLAGS_RESILIENT_MEDIA 0x0040 -#define VM_FLAGS_OVERWRITE 0x4000 /* delete any existing mappings first */ +#define VM_FLAGS_FIXED 0x0000 +#define VM_FLAGS_ANYWHERE 0x0001 +#define VM_FLAGS_PURGABLE 0x0002 +#define VM_FLAGS_4GB_CHUNK 0x0004 +#define VM_FLAGS_RANDOM_ADDR 0x0008 +#define VM_FLAGS_NO_CACHE 0x0010 +#define VM_FLAGS_RESILIENT_CODESIGN 0x0020 +#define VM_FLAGS_RESILIENT_MEDIA 0x0040 +#define VM_FLAGS_OVERWRITE 0x4000 /* delete any existing mappings first */ /* * VM_FLAGS_SUPERPAGE_MASK * 3 bits that specify whether large pages should be used instead of * base pages (!=0), as well as the requested page size. */ -#define VM_FLAGS_SUPERPAGE_MASK 0x70000 /* bits 0x10000, 0x20000, 0x40000 */ -#define VM_FLAGS_RETURN_DATA_ADDR 0x100000 /* Return address of target data, rather than base of page */ -#define VM_FLAGS_RETURN_4K_DATA_ADDR 0x800000 /* Return 4K aligned address of target data */ -#define VM_FLAGS_ALIAS_MASK 0xFF000000 -#define VM_GET_FLAGS_ALIAS(flags, alias) \ - (alias) = ((flags) & VM_FLAGS_ALIAS_MASK) >> 24 +#define VM_FLAGS_SUPERPAGE_MASK 0x70000 /* bits 0x10000, 0x20000, 0x40000 */ +#define VM_FLAGS_RETURN_DATA_ADDR 0x100000 /* Return address of target data, rather than base of page */ +#define VM_FLAGS_RETURN_4K_DATA_ADDR 0x800000 /* Return 4K aligned address of target data */ +#define VM_FLAGS_ALIAS_MASK 0xFF000000 +#define VM_GET_FLAGS_ALIAS(flags, alias) \ + (alias) = ((flags) & VM_FLAGS_ALIAS_MASK) >> 24 #if !XNU_KERNEL_PRIVATE -#define VM_SET_FLAGS_ALIAS(flags, alias) \ - (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \ - (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24)) +#define VM_SET_FLAGS_ALIAS(flags, alias) \ + (flags) = (((flags) & ~VM_FLAGS_ALIAS_MASK) | \ + (((alias) & ~VM_FLAGS_ALIAS_MASK) << 24)) #endif /* !XNU_KERNEL_PRIVATE */ /* These are the flags that we accept from user-space */ -#define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \ - VM_FLAGS_ANYWHERE | \ - VM_FLAGS_PURGABLE | \ - VM_FLAGS_4GB_CHUNK | \ - VM_FLAGS_RANDOM_ADDR | \ - VM_FLAGS_NO_CACHE | \ - VM_FLAGS_OVERWRITE | \ - VM_FLAGS_SUPERPAGE_MASK | \ - VM_FLAGS_ALIAS_MASK) -#define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \ - VM_FLAGS_RETURN_4K_DATA_ADDR | \ - VM_FLAGS_RETURN_DATA_ADDR) -#define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \ - VM_FLAGS_ANYWHERE | \ - VM_FLAGS_RANDOM_ADDR | \ - VM_FLAGS_OVERWRITE| \ - VM_FLAGS_RETURN_DATA_ADDR |\ - VM_FLAGS_RESILIENT_CODESIGN) +#define VM_FLAGS_USER_ALLOCATE (VM_FLAGS_FIXED | \ + VM_FLAGS_ANYWHERE | \ + VM_FLAGS_PURGABLE | \ + VM_FLAGS_4GB_CHUNK | \ + VM_FLAGS_RANDOM_ADDR | \ + VM_FLAGS_NO_CACHE | \ + VM_FLAGS_OVERWRITE | \ + VM_FLAGS_SUPERPAGE_MASK | \ + VM_FLAGS_ALIAS_MASK) +#define VM_FLAGS_USER_MAP (VM_FLAGS_USER_ALLOCATE | \ + VM_FLAGS_RETURN_4K_DATA_ADDR | \ + VM_FLAGS_RETURN_DATA_ADDR) +#define VM_FLAGS_USER_REMAP (VM_FLAGS_FIXED | \ + VM_FLAGS_ANYWHERE | \ + VM_FLAGS_RANDOM_ADDR | \ + VM_FLAGS_OVERWRITE| \ + VM_FLAGS_RETURN_DATA_ADDR |\ + VM_FLAGS_RESILIENT_CODESIGN) #define VM_FLAGS_SUPERPAGE_SHIFT 16 -#define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */ -#define SUPERPAGE_SIZE_ANY 1 +#define SUPERPAGE_NONE 0 /* no superpages, if all bits are 0 */ +#define SUPERPAGE_SIZE_ANY 1 #define VM_FLAGS_SUPERPAGE_NONE (SUPERPAGE_NONE << VM_FLAGS_SUPERPAGE_SHIFT) #define VM_FLAGS_SUPERPAGE_SIZE_ANY (SUPERPAGE_SIZE_ANY << VM_FLAGS_SUPERPAGE_SHIFT) #if defined(__x86_64__) || !defined(KERNEL) -#define SUPERPAGE_SIZE_2MB 2 +#define SUPERPAGE_SIZE_2MB 2 #define VM_FLAGS_SUPERPAGE_SIZE_2MB (SUPERPAGE_SIZE_2MB< @@ -37,8 +37,8 @@ #include -typedef vm_offset_t pointer_t; -typedef vm_offset_t vm_address_t; +typedef vm_offset_t pointer_t; +typedef vm_offset_t vm_address_t; /* * We use addr64_t for 64-bit addresses that are used on both @@ -46,7 +46,7 @@ typedef vm_offset_t vm_address_t; * two adjacent 32-bit GPRs. We use addr64_t in places where * common code must be useable both on 32 and 64-bit machines. */ -typedef uint64_t addr64_t; /* Basic effective address */ +typedef uint64_t addr64_t; /* Basic effective address */ /* * We use reg64_t for addresses that are 32 bits on a 32-bit @@ -58,22 +58,22 @@ typedef uint64_t addr64_t; /* Basic effective address */ * type in prototypes of functions that are written in and called * from assembly language. This type is basically a comment. */ -typedef uint32_t reg64_t; +typedef uint32_t reg64_t; /* * To minimize the use of 64-bit fields, we keep some physical - * addresses (that are page aligned) as 32-bit page numbers. + * addresses (that are page aligned) as 32-bit page numbers. * This limits the physical address space to 16TB of RAM. */ -typedef uint32_t ppnum_t; /* Physical page number */ +typedef uint32_t ppnum_t; /* Physical page number */ #define PPNUM_MAX UINT32_MAX -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include -#ifndef MACH_KERNEL_PRIVATE +#ifndef MACH_KERNEL_PRIVATE /* * Use specifically typed null structures for these in * other parts of the kernel to enable compiler warnings @@ -82,36 +82,36 @@ typedef uint32_t ppnum_t; /* Physical page number */ */ __BEGIN_DECLS -struct pmap ; -struct _vm_map ; -struct vm_object ; +struct pmap; +struct _vm_map; +struct vm_object; __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -typedef struct pmap *pmap_t; -typedef struct _vm_map *vm_map_t; -typedef struct vm_object *vm_object_t; -typedef struct vm_object_fault_info *vm_object_fault_info_t; +typedef struct pmap *pmap_t; +typedef struct _vm_map *vm_map_t; +typedef struct vm_object *vm_object_t; +typedef struct vm_object_fault_info *vm_object_fault_info_t; -#define PMAP_NULL ((pmap_t) 0) -#define VM_OBJECT_NULL ((vm_object_t) 0) +#define PMAP_NULL ((pmap_t) 0) +#define VM_OBJECT_NULL ((vm_object_t) 0) #else /* KERNEL_PRIVATE */ -typedef mach_port_t vm_map_t; +typedef mach_port_t vm_map_t; #endif /* KERNEL_PRIVATE */ -#define VM_MAP_NULL ((vm_map_t) 0) +#define VM_MAP_NULL ((vm_map_t) 0) /* * Evolving definitions, likely to change. */ -typedef uint64_t vm_object_offset_t; -typedef uint64_t vm_object_size_t; +typedef uint64_t vm_object_offset_t; +typedef uint64_t vm_object_size_t; #ifdef XNU_KERNEL_PRIVATE @@ -120,60 +120,57 @@ typedef uint64_t vm_object_size_t; typedef uint16_t vm_tag_t; -#define VM_TAG_NAME_LEN_MAX 0x7F -#define VM_TAG_NAME_LEN_SHIFT 0 -#define VM_TAG_BT 0x0080 -#define VM_TAG_UNLOAD 0x0100 -#define VM_TAG_KMOD 0x0200 +#define VM_TAG_NAME_LEN_MAX 0x7F +#define VM_TAG_NAME_LEN_SHIFT 0 +#define VM_TAG_BT 0x0080 +#define VM_TAG_UNLOAD 0x0100 +#define VM_TAG_KMOD 0x0200 #if DEBUG || DEVELOPMENT -#define VM_MAX_TAG_ZONES 28 +#define VM_MAX_TAG_ZONES 28 #else -#define VM_MAX_TAG_ZONES 0 +#define VM_MAX_TAG_ZONES 0 #endif #if VM_MAX_TAG_ZONES // must be multiple of 64 -#define VM_MAX_TAG_VALUE 1536 +#define VM_MAX_TAG_VALUE 1536 #else -#define VM_MAX_TAG_VALUE 256 +#define VM_MAX_TAG_VALUE 256 #endif -#define ARRAY_COUNT(a) (sizeof((a)) / sizeof((a)[0])) +#define ARRAY_COUNT(a) (sizeof((a)) / sizeof((a)[0])) -struct vm_allocation_total -{ - vm_tag_t tag; - uint64_t total; +struct vm_allocation_total { + vm_tag_t tag; + uint64_t total; }; -struct vm_allocation_zone_total -{ - uint64_t total; - uint64_t peak; - uint32_t waste; - uint32_t wastediv; +struct vm_allocation_zone_total { + uint64_t total; + uint64_t peak; + uint32_t waste; + uint32_t wastediv; }; typedef struct vm_allocation_zone_total vm_allocation_zone_total_t; -struct vm_allocation_site -{ - uint64_t total; +struct vm_allocation_site { + uint64_t total; #if DEBUG || DEVELOPMENT - uint64_t peak; + uint64_t peak; #endif /* DEBUG || DEVELOPMENT */ - uint64_t mapped; - int16_t refcount; - vm_tag_t tag; - uint16_t flags; - uint16_t subtotalscount; - struct vm_allocation_total subtotals[0]; - char name[0]; + uint64_t mapped; + int16_t refcount; + vm_tag_t tag; + uint16_t flags; + uint16_t subtotalscount; + struct vm_allocation_total subtotals[0]; + char name[0]; }; typedef struct vm_allocation_site vm_allocation_site_t; -#define VM_ALLOC_SITE_STATIC(iflags, itag) \ +#define VM_ALLOC_SITE_STATIC(iflags, itag) \ static vm_allocation_site_t site __attribute__((section("__DATA, __data"))) \ = { .refcount = 2, .tag = (itag), .flags = (iflags) }; @@ -184,33 +181,33 @@ extern unsigned int vmrtfaultinfo_bufsz(void); #ifdef KERNEL_PRIVATE -#ifndef MACH_KERNEL_PRIVATE +#ifndef MACH_KERNEL_PRIVATE __BEGIN_DECLS -struct upl ; -struct vm_map_copy ; -struct vm_named_entry ; +struct upl; +struct vm_map_copy; +struct vm_named_entry; __END_DECLS -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -typedef struct upl *upl_t; -typedef struct vm_map_copy *vm_map_copy_t; -typedef struct vm_named_entry *vm_named_entry_t; +typedef struct upl *upl_t; +typedef struct vm_map_copy *vm_map_copy_t; +typedef struct vm_named_entry *vm_named_entry_t; -#define VM_MAP_COPY_NULL ((vm_map_copy_t) 0) +#define VM_MAP_COPY_NULL ((vm_map_copy_t) 0) -#else /* KERNEL_PRIVATE */ +#else /* KERNEL_PRIVATE */ -typedef mach_port_t upl_t; -typedef mach_port_t vm_named_entry_t; +typedef mach_port_t upl_t; +typedef mach_port_t vm_named_entry_t; -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#define UPL_NULL ((upl_t) 0) -#define VM_NAMED_ENTRY_NULL ((vm_named_entry_t) 0) +#define UPL_NULL ((upl_t) 0) +#define VM_NAMED_ENTRY_NULL ((vm_named_entry_t) 0) #ifdef PRIVATE typedef struct { uint64_t rtfabstime; // mach_continuous_time at start of fault @@ -222,4 +219,4 @@ typedef struct { uint64_t rtftype; // fault type } vm_rtfault_record_t; #endif -#endif /* _MACH_VM_TYPES_H_ */ +#endif /* _MACH_VM_TYPES_H_ */ diff --git a/osfmk/mach_debug/hash_info.h b/osfmk/mach_debug/hash_info.h index 1ceb1361e..ba4bd39e8 100644 --- a/osfmk/mach_debug/hash_info.h +++ b/osfmk/mach_debug/hash_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_DEBUG_HASH_INFO_H_ +#ifndef _MACH_DEBUG_HASH_INFO_H_ #define _MACH_DEBUG_HASH_INFO_H_ #include /* natural_t */ @@ -67,9 +67,9 @@ */ typedef struct hash_info_bucket { - natural_t hib_count; /* number of records in bucket */ + natural_t hib_count; /* number of records in bucket */ } hash_info_bucket_t; typedef hash_info_bucket_t *hash_info_bucket_array_t; -#endif /* _MACH_DEBUG_HASH_INFO_H_ */ +#endif /* _MACH_DEBUG_HASH_INFO_H_ */ diff --git a/osfmk/mach_debug/ipc_info.h b/osfmk/mach_debug/ipc_info.h index c9a8ab61f..520830894 100644 --- a/osfmk/mach_debug/ipc_info.h +++ b/osfmk/mach_debug/ipc_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Definitions for the IPC debugging interface. */ -#ifndef _MACH_DEBUG_IPC_INFO_H_ +#ifndef _MACH_DEBUG_IPC_INFO_H_ #define _MACH_DEBUG_IPC_INFO_H_ #include @@ -76,30 +76,30 @@ */ typedef struct ipc_info_space { - natural_t iis_genno_mask; /* generation number mask */ - natural_t iis_table_size; /* size of table */ - natural_t iis_table_next; /* next possible size of table */ - natural_t iis_tree_size; /* size of tree (UNUSED) */ - natural_t iis_tree_small; /* # of small entries in tree (UNUSED) */ - natural_t iis_tree_hash; /* # of hashed entries in tree (UNUSED) */ + natural_t iis_genno_mask; /* generation number mask */ + natural_t iis_table_size; /* size of table */ + natural_t iis_table_next; /* next possible size of table */ + natural_t iis_tree_size; /* size of tree (UNUSED) */ + natural_t iis_tree_small; /* # of small entries in tree (UNUSED) */ + natural_t iis_tree_hash; /* # of hashed entries in tree (UNUSED) */ } ipc_info_space_t; typedef struct ipc_info_space_basic { - natural_t iisb_genno_mask; /* generation number mask */ - natural_t iisb_table_size; /* size of table */ - natural_t iisb_table_next; /* next possible size of table */ - natural_t iisb_table_inuse; /* number of entries in use */ - natural_t iisb_reserved[2]; /* future expansion */ + natural_t iisb_genno_mask; /* generation number mask */ + natural_t iisb_table_size; /* size of table */ + natural_t iisb_table_next; /* next possible size of table */ + natural_t iisb_table_inuse; /* number of entries in use */ + natural_t iisb_reserved[2]; /* future expansion */ } ipc_info_space_basic_t; typedef struct ipc_info_name { - mach_port_name_t iin_name; /* port name, including gen number */ -/*boolean_t*/integer_t iin_collision; /* collision at this entry? */ - mach_port_type_t iin_type; /* straight port type */ - mach_port_urefs_t iin_urefs; /* user-references */ - natural_t iin_object; /* object pointer/identifier */ - natural_t iin_next; /* marequest/next in free list */ - natural_t iin_hash; /* hash index */ + mach_port_name_t iin_name; /* port name, including gen number */ +/*boolean_t*/ integer_t iin_collision; /* collision at this entry? */ + mach_port_type_t iin_type; /* straight port type */ + mach_port_urefs_t iin_urefs; /* user-references */ + natural_t iin_object; /* object pointer/identifier */ + natural_t iin_next; /* marequest/next in free list */ + natural_t iin_hash; /* hash index */ } ipc_info_name_t; typedef ipc_info_name_t *ipc_info_name_array_t; @@ -107,10 +107,10 @@ typedef ipc_info_name_t *ipc_info_name_array_t; /* UNUSED */ typedef struct ipc_info_tree_name { ipc_info_name_t iitn_name; - mach_port_name_t iitn_lchild; /* name of left child */ - mach_port_name_t iitn_rchild; /* name of right child */ + mach_port_name_t iitn_lchild; /* name of left child */ + mach_port_name_t iitn_rchild; /* name of right child */ } ipc_info_tree_name_t; typedef ipc_info_tree_name_t *ipc_info_tree_name_array_t; -#endif /* _MACH_DEBUG_IPC_INFO_H_ */ +#endif /* _MACH_DEBUG_IPC_INFO_H_ */ diff --git a/osfmk/mach_debug/lockgroup_info.h b/osfmk/mach_debug/lockgroup_info.h index 99c908875..ee744bb57 100644 --- a/osfmk/mach_debug/lockgroup_info.h +++ b/osfmk/mach_debug/lockgroup_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -31,45 +31,44 @@ * Definitions for host_lockgroup_info call. */ -#ifndef _MACH_DEBUG_LOCKGROUP_INFO_H_ -#define _MACH_DEBUG_LOCKGROUP_INFO_H_ +#ifndef _MACH_DEBUG_LOCKGROUP_INFO_H_ +#define _MACH_DEBUG_LOCKGROUP_INFO_H_ #include -#define LOCKGROUP_MAX_NAME 64 +#define LOCKGROUP_MAX_NAME 64 -#define LOCKGROUP_ATTR_STAT 0x01ULL +#define LOCKGROUP_ATTR_STAT 0x01ULL typedef struct lockgroup_info { - char lockgroup_name[LOCKGROUP_MAX_NAME]; - uint64_t lockgroup_attr; - uint64_t lock_spin_cnt; - uint64_t lock_spin_util_cnt; - uint64_t lock_spin_held_cnt; - uint64_t lock_spin_miss_cnt; - uint64_t lock_spin_held_max; - uint64_t lock_spin_held_cum; - uint64_t lock_mtx_cnt; - uint64_t lock_mtx_util_cnt; - uint64_t lock_mtx_held_cnt; - uint64_t lock_mtx_miss_cnt; - uint64_t lock_mtx_wait_cnt; - uint64_t lock_mtx_held_max; - uint64_t lock_mtx_held_cum; - uint64_t lock_mtx_wait_max; - uint64_t lock_mtx_wait_cum; - uint64_t lock_rw_cnt; - uint64_t lock_rw_util_cnt; - uint64_t lock_rw_held_cnt; - uint64_t lock_rw_miss_cnt; - uint64_t lock_rw_wait_cnt; - uint64_t lock_rw_held_max; - uint64_t lock_rw_held_cum; - uint64_t lock_rw_wait_max; - uint64_t lock_rw_wait_cum; + char lockgroup_name[LOCKGROUP_MAX_NAME]; + uint64_t lockgroup_attr; + uint64_t lock_spin_cnt; + uint64_t lock_spin_util_cnt; + uint64_t lock_spin_held_cnt; + uint64_t lock_spin_miss_cnt; + uint64_t lock_spin_held_max; + uint64_t lock_spin_held_cum; + uint64_t lock_mtx_cnt; + uint64_t lock_mtx_util_cnt; + uint64_t lock_mtx_held_cnt; + uint64_t lock_mtx_miss_cnt; + uint64_t lock_mtx_wait_cnt; + uint64_t lock_mtx_held_max; + uint64_t lock_mtx_held_cum; + uint64_t lock_mtx_wait_max; + uint64_t lock_mtx_wait_cum; + uint64_t lock_rw_cnt; + uint64_t lock_rw_util_cnt; + uint64_t lock_rw_held_cnt; + uint64_t lock_rw_miss_cnt; + uint64_t lock_rw_wait_cnt; + uint64_t lock_rw_held_max; + uint64_t lock_rw_held_cum; + uint64_t lock_rw_wait_max; + uint64_t lock_rw_wait_cum; } lockgroup_info_t; typedef lockgroup_info_t *lockgroup_info_array_t; -#endif /* _MACH_DEBUG_LOCKGROUP_INFO_H_ */ - +#endif /* _MACH_DEBUG_LOCKGROUP_INFO_H_ */ diff --git a/osfmk/mach_debug/mach_debug.h b/osfmk/mach_debug/mach_debug.h index ca02a8d8d..db22d6616 100644 --- a/osfmk/mach_debug/mach_debug.h +++ b/osfmk/mach_debug/mach_debug.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -36,7 +36,7 @@ * wraps up all the new interface headers generated from * each of the new .defs resulting from that decomposition. */ -#ifndef _MACH_DEBUG_MACH_DEBUG_H_ +#ifndef _MACH_DEBUG_MACH_DEBUG_H_ #define _MACH_DEBUG_MACH_DEBUG_H_ #include @@ -46,5 +46,3 @@ #include #endif /* _MACH_DEBUG_MACH_DEBUG_H_ */ - - diff --git a/osfmk/mach_debug/mach_debug_types.h b/osfmk/mach_debug/mach_debug_types.h index 54e81d7e7..4ba2440df 100644 --- a/osfmk/mach_debug/mach_debug_types.h +++ b/osfmk/mach_debug/mach_debug_types.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -59,7 +59,7 @@ * Mach kernel debugging interface type declarations */ -#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_H_ +#ifndef _MACH_DEBUG_MACH_DEBUG_TYPES_H_ #define _MACH_DEBUG_MACH_DEBUG_TYPES_H_ #include @@ -69,27 +69,25 @@ #include #include -#define MACH_CORE_FILEHEADER_SIGNATURE 0x0063614d20646152ULL +#define MACH_CORE_FILEHEADER_SIGNATURE 0x0063614d20646152ULL #define MACH_CORE_FILEHEADER_MAXFILES 16 #define MACH_CORE_FILEHEADER_NAMELEN 16 -typedef char symtab_name_t[32]; +typedef char symtab_name_t[32]; -struct mach_core_details -{ - uint64_t gzip_offset; - uint64_t gzip_length; - char core_name[MACH_CORE_FILEHEADER_NAMELEN]; +struct mach_core_details { + uint64_t gzip_offset; + uint64_t gzip_length; + char core_name[MACH_CORE_FILEHEADER_NAMELEN]; }; -struct mach_core_fileheader -{ - uint64_t signature; - uint64_t log_offset; - uint64_t log_length; - uint64_t num_files; - struct mach_core_details files[MACH_CORE_FILEHEADER_MAXFILES]; +struct mach_core_fileheader { + uint64_t signature; + uint64_t log_offset; + uint64_t log_length; + uint64_t num_files; + struct mach_core_details files[MACH_CORE_FILEHEADER_MAXFILES]; }; -#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_H_ */ +#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_H_ */ diff --git a/osfmk/mach_debug/page_info.h b/osfmk/mach_debug/page_info.h index 296c613b6..b0b5db387 100644 --- a/osfmk/mach_debug/page_info.h +++ b/osfmk/mach_debug/page_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,43 +22,43 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef MACH_DEBUG_PAGE_INFO_H -#define MACH_DEBUG_PAGE_INFO_H +#ifndef MACH_DEBUG_PAGE_INFO_H +#define MACH_DEBUG_PAGE_INFO_H #include -typedef vm_offset_t *page_address_array_t; -#endif /* MACH_DEBUG_PAGE_INFO_H */ +typedef vm_offset_t *page_address_array_t; +#endif /* MACH_DEBUG_PAGE_INFO_H */ diff --git a/osfmk/mach_debug/vm_info.h b/osfmk/mach_debug/vm_info.h index 91786c8ba..8e2eb5f05 100644 --- a/osfmk/mach_debug/vm_info.h +++ b/osfmk/mach_debug/vm_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * Definitions for the VM debugging interface. */ -#ifndef _MACH_DEBUG_VM_INFO_H_ +#ifndef _MACH_DEBUG_VM_INFO_H_ #define _MACH_DEBUG_VM_INFO_H_ #include @@ -77,58 +77,58 @@ * in mach_debug_types.defs when adding/removing fields. */ typedef struct mach_vm_info_region { - mach_vm_offset_t vir_start; /* start of region */ - mach_vm_offset_t vir_end; /* end of region */ - mach_vm_offset_t vir_object; /* the mapped object(kernal addr) */ - memory_object_offset_t vir_offset; /* offset into object */ - boolean_t vir_needs_copy; /* does object need to be copied? */ - vm_prot_t vir_protection; /* protection code */ - vm_prot_t vir_max_protection; /* maximum protection */ - vm_inherit_t vir_inheritance; /* inheritance */ - natural_t vir_wired_count; /* number of times wired */ + mach_vm_offset_t vir_start; /* start of region */ + mach_vm_offset_t vir_end; /* end of region */ + mach_vm_offset_t vir_object; /* the mapped object(kernal addr) */ + memory_object_offset_t vir_offset; /* offset into object */ + boolean_t vir_needs_copy; /* does object need to be copied? */ + vm_prot_t vir_protection; /* protection code */ + vm_prot_t vir_max_protection; /* maximum protection */ + vm_inherit_t vir_inheritance; /* inheritance */ + natural_t vir_wired_count; /* number of times wired */ natural_t vir_user_wired_count; /* number of times user has wired */ } mach_vm_info_region_t; typedef struct vm_info_region_64 { - natural_t vir_start; /* start of region */ - natural_t vir_end; /* end of region */ - natural_t vir_object; /* the mapped object */ - memory_object_offset_t vir_offset; /* offset into object */ - boolean_t vir_needs_copy; /* does object need to be copied? */ - vm_prot_t vir_protection; /* protection code */ - vm_prot_t vir_max_protection; /* maximum protection */ - vm_inherit_t vir_inheritance; /* inheritance */ - natural_t vir_wired_count; /* number of times wired */ + natural_t vir_start; /* start of region */ + natural_t vir_end; /* end of region */ + natural_t vir_object; /* the mapped object */ + memory_object_offset_t vir_offset; /* offset into object */ + boolean_t vir_needs_copy; /* does object need to be copied? */ + vm_prot_t vir_protection; /* protection code */ + vm_prot_t vir_max_protection; /* maximum protection */ + vm_inherit_t vir_inheritance; /* inheritance */ + natural_t vir_wired_count; /* number of times wired */ natural_t vir_user_wired_count; /* number of times user has wired */ } vm_info_region_64_t; typedef struct vm_info_region { - natural_t vir_start; /* start of region */ - natural_t vir_end; /* end of region */ - natural_t vir_object; /* the mapped object */ - natural_t vir_offset; /* offset into object */ - boolean_t vir_needs_copy; /* does object need to be copied? */ - vm_prot_t vir_protection; /* protection code */ - vm_prot_t vir_max_protection; /* maximum protection */ - vm_inherit_t vir_inheritance; /* inheritance */ - natural_t vir_wired_count; /* number of times wired */ + natural_t vir_start; /* start of region */ + natural_t vir_end; /* end of region */ + natural_t vir_object; /* the mapped object */ + natural_t vir_offset; /* offset into object */ + boolean_t vir_needs_copy; /* does object need to be copied? */ + vm_prot_t vir_protection; /* protection code */ + vm_prot_t vir_max_protection; /* maximum protection */ + vm_inherit_t vir_inheritance; /* inheritance */ + natural_t vir_wired_count; /* number of times wired */ natural_t vir_user_wired_count; /* number of times user has wired */ } vm_info_region_t; typedef struct vm_info_object { - natural_t vio_object; /* this object */ - natural_t vio_size; /* object size (valid if internal - but too small) */ - unsigned int vio_ref_count; /* number of references */ + natural_t vio_object; /* this object */ + natural_t vio_size; /* object size (valid if internal - but too small) */ + unsigned int vio_ref_count; /* number of references */ unsigned int vio_resident_page_count; /* number of resident pages */ - unsigned int vio_absent_count; /* number requested but not filled */ - natural_t vio_copy; /* copy object */ - natural_t vio_shadow; /* shadow object */ - natural_t vio_shadow_offset; /* offset into shadow object */ - natural_t vio_paging_offset; /* offset into memory object */ + unsigned int vio_absent_count; /* number requested but not filled */ + natural_t vio_copy; /* copy object */ + natural_t vio_shadow; /* shadow object */ + natural_t vio_shadow_offset; /* offset into shadow object */ + natural_t vio_paging_offset; /* offset into memory object */ memory_object_copy_strategy_t vio_copy_strategy; - /* how to handle data copy */ - vm_offset_t vio_last_alloc; /* offset of last allocation */ + /* how to handle data copy */ + vm_offset_t vio_last_alloc; /* offset of last allocation */ /* many random attributes */ unsigned int vio_paging_in_progress; boolean_t vio_pager_created; @@ -146,4 +146,4 @@ typedef vm_info_object_t *vm_info_object_array_t; #pragma pack() -#endif /* _MACH_DEBUG_VM_INFO_H_ */ +#endif /* _MACH_DEBUG_VM_INFO_H_ */ diff --git a/osfmk/mach_debug/zone_info.h b/osfmk/mach_debug/zone_info.h index 9544b454f..1022eca68 100644 --- a/osfmk/mach_debug/zone_info.h +++ b/osfmk/mach_debug/zone_info.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef _MACH_DEBUG_ZONE_INFO_H_ +#ifndef _MACH_DEBUG_ZONE_INFO_H_ #define _MACH_DEBUG_ZONE_INFO_H_ #include @@ -68,25 +68,25 @@ * mach_zone_info() inteface and types below. */ -#define ZONE_NAME_MAX_LEN 80 +#define ZONE_NAME_MAX_LEN 80 typedef struct zone_name { - char zn_name[ZONE_NAME_MAX_LEN]; + char zn_name[ZONE_NAME_MAX_LEN]; } zone_name_t; typedef zone_name_t *zone_name_array_t; typedef struct zone_info { - integer_t zi_count; /* Number of elements used now */ - vm_size_t zi_cur_size; /* current memory utilization */ - vm_size_t zi_max_size; /* how large can this zone grow */ - vm_size_t zi_elem_size; /* size of an element */ - vm_size_t zi_alloc_size; /* size used for more memory */ - integer_t zi_pageable; /* zone pageable? */ - integer_t zi_sleepable; /* sleep if empty? */ - integer_t zi_exhaustible; /* merely return if empty? */ - integer_t zi_collectable; /* garbage collect elements? */ + integer_t zi_count; /* Number of elements used now */ + vm_size_t zi_cur_size; /* current memory utilization */ + vm_size_t zi_max_size; /* how large can this zone grow */ + vm_size_t zi_elem_size; /* size of an element */ + vm_size_t zi_alloc_size; /* size used for more memory */ + integer_t zi_pageable; /* zone pageable? */ + integer_t zi_sleepable; /* sleep if empty? */ + integer_t zi_exhaustible; /* merely return if empty? */ + integer_t zi_collectable; /* garbage collect elements? */ } zone_info_t; typedef zone_info_t *zone_info_array_t; @@ -97,23 +97,23 @@ typedef zone_info_t *zone_info_array_t; * in mach_debug_types.defs when adding/removing fields. */ -#define MACH_ZONE_NAME_MAX_LEN 80 +#define MACH_ZONE_NAME_MAX_LEN 80 typedef struct mach_zone_name { - char mzn_name[ZONE_NAME_MAX_LEN]; + char mzn_name[ZONE_NAME_MAX_LEN]; } mach_zone_name_t; typedef mach_zone_name_t *mach_zone_name_array_t; typedef struct mach_zone_info_data { - uint64_t mzi_count; /* count of elements in use */ - uint64_t mzi_cur_size; /* current memory utilization */ - uint64_t mzi_max_size; /* how large can this zone grow */ - uint64_t mzi_elem_size; /* size of an element */ - uint64_t mzi_alloc_size; /* size used for more memory */ - uint64_t mzi_sum_size; /* sum of all allocs (life of zone) */ - uint64_t mzi_exhaustible; /* merely return if empty? */ - uint64_t mzi_collectable; /* garbage collect elements? and how much? */ + uint64_t mzi_count; /* count of elements in use */ + uint64_t mzi_cur_size; /* current memory utilization */ + uint64_t mzi_max_size; /* how large can this zone grow */ + uint64_t mzi_elem_size; /* size of an element */ + uint64_t mzi_alloc_size; /* size used for more memory */ + uint64_t mzi_sum_size; /* sum of all allocs (life of zone) */ + uint64_t mzi_exhaustible; /* merely return if empty? */ + uint64_t mzi_collectable; /* garbage collect elements? and how much? */ } mach_zone_info_t; typedef mach_zone_info_t *mach_zone_info_array_t; @@ -123,31 +123,31 @@ typedef mach_zone_info_t *mach_zone_info_array_t; * is collectable by zone_gc(). The higher bits contain the size in bytes * that can be collected. */ -#define GET_MZI_COLLECTABLE_BYTES(val) ((val) >> 1) -#define GET_MZI_COLLECTABLE_FLAG(val) ((val) & 1) +#define GET_MZI_COLLECTABLE_BYTES(val) ((val) >> 1) +#define GET_MZI_COLLECTABLE_FLAG(val) ((val) & 1) -#define SET_MZI_COLLECTABLE_BYTES(val, size) \ +#define SET_MZI_COLLECTABLE_BYTES(val, size) \ (val) = ((val) & 1) | ((size) << 1) -#define SET_MZI_COLLECTABLE_FLAG(val, flag) \ +#define SET_MZI_COLLECTABLE_FLAG(val, flag) \ (val) = (flag) ? ((val) | 1) : (val) typedef struct task_zone_info_data { - uint64_t tzi_count; /* count of elements in use */ - uint64_t tzi_cur_size; /* current memory utilization */ - uint64_t tzi_max_size; /* how large can this zone grow */ - uint64_t tzi_elem_size; /* size of an element */ - uint64_t tzi_alloc_size; /* size used for more memory */ - uint64_t tzi_sum_size; /* sum of all allocs (life of zone) */ - uint64_t tzi_exhaustible; /* merely return if empty? */ - uint64_t tzi_collectable; /* garbage collect elements? */ - uint64_t tzi_caller_acct; /* charged to caller (or kernel) */ - uint64_t tzi_task_alloc; /* sum of all allocs by this task */ - uint64_t tzi_task_free; /* sum of all frees by this task */ + uint64_t tzi_count; /* count of elements in use */ + uint64_t tzi_cur_size; /* current memory utilization */ + uint64_t tzi_max_size; /* how large can this zone grow */ + uint64_t tzi_elem_size; /* size of an element */ + uint64_t tzi_alloc_size; /* size used for more memory */ + uint64_t tzi_sum_size; /* sum of all allocs (life of zone) */ + uint64_t tzi_exhaustible; /* merely return if empty? */ + uint64_t tzi_collectable; /* garbage collect elements? */ + uint64_t tzi_caller_acct; /* charged to caller (or kernel) */ + uint64_t tzi_task_alloc; /* sum of all allocs by this task */ + uint64_t tzi_task_free; /* sum of all frees by this task */ } task_zone_info_t; typedef task_zone_info_t *task_zone_info_array_t; -#define MACH_MEMORY_INFO_NAME_MAX_LEN 80 +#define MACH_MEMORY_INFO_NAME_MAX_LEN 80 typedef struct mach_memory_info { uint64_t flags; @@ -177,25 +177,25 @@ typedef mach_memory_info_t *mach_memory_info_array_t; * mach_debug_types.defs if this changes. */ -#define MAX_ZTRACE_DEPTH 15 +#define MAX_ZTRACE_DEPTH 15 /* * Opcodes for the btlog operation field: */ -#define ZOP_ALLOC 1 -#define ZOP_FREE 0 +#define ZOP_ALLOC 1 +#define ZOP_FREE 0 /* * Structure used to copy out btlog records to userspace, via the MIG call * mach_zone_get_btlog_records(). */ typedef struct zone_btrecord { - uint32_t ref_count; /* no. of active references on the record */ - uint32_t operation_type; /* operation type (alloc/free) */ - uint64_t bt[MAX_ZTRACE_DEPTH]; /* backtrace */ + uint32_t ref_count; /* no. of active references on the record */ + uint32_t operation_type; /* operation type (alloc/free) */ + uint64_t bt[MAX_ZTRACE_DEPTH]; /* backtrace */ } zone_btrecord_t; typedef zone_btrecord_t *zone_btrecord_array_t; -#endif /* _MACH_DEBUG_ZONE_INFO_H_ */ +#endif /* _MACH_DEBUG_ZONE_INFO_H_ */ diff --git a/osfmk/machine/Makefile b/osfmk/machine/Makefile index abe9992d9..8542493e9 100644 --- a/osfmk/machine/Makefile +++ b/osfmk/machine/Makefile @@ -17,6 +17,7 @@ KERNELFILES = \ lock.h \ locks.h \ machine_cpuid.h \ + machine_remote_time.h \ machine_routines.h \ machine_kpc.h \ monotonic.h \ @@ -24,8 +25,8 @@ KERNELFILES = \ pal_hibernate.h \ simple_lock.h -EXPORT_FILES = - +EXPORT_FILES = \ + machine_remote_time.h INSTALL_MI_LCL_LIST = ${PRIVATE_DATAFILES} diff --git a/osfmk/machine/atomic.h b/osfmk/machine/atomic.h index c9aeda0ea..3c3676248 100644 --- a/osfmk/machine/atomic.h +++ b/osfmk/machine/atomic.h @@ -32,22 +32,22 @@ #include #define _os_atomic_c11_atomic(p) \ - ((typeof(*(p)) _Atomic *)(p)) + ((typeof(*(p)) _Atomic *)(p)) #define _os_atomic_basetypeof(p) \ - typeof(atomic_load(((typeof(*(p)) _Atomic *)(p)))) + typeof(atomic_load(((typeof(*(p)) _Atomic *)(p)))) #define _os_atomic_c11_op_orig(p, v, m, o) \ - atomic_##o##_explicit(_os_atomic_c11_atomic(p), v, \ - memory_order_##m) + atomic_##o##_explicit(_os_atomic_c11_atomic(p), v, \ + memory_order_##m) #define _os_atomic_c11_op(p, v, m, o, op) \ - ({ typeof(v) _v = (v); _os_atomic_c11_op_orig(p, v, m, o) op _v; }) + ({ typeof(v) _v = (v); _os_atomic_c11_op_orig(p, v, m, o) op _v; }) #define os_atomic_thread_fence(m) atomic_thread_fence(memory_order_##m) #define os_atomic_load(p, m) \ - atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) + atomic_load_explicit(_os_atomic_c11_atomic(p), memory_order_##m) #define os_atomic_store(p, v, m) _os_atomic_c11_op_orig(p, v, m, store) #define os_atomic_add_orig(p, v, m) _os_atomic_c11_op_orig(p, v, m, fetch_add) @@ -74,37 +74,37 @@ #define os_atomic_xchg(p, v, m) _os_atomic_c11_op_orig(p, v, m, exchange) #define os_atomic_cmpxchg(p, e, v, m) \ - ({ _os_atomic_basetypeof(p) _r = (e); \ - atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, v, memory_order_##m, memory_order_relaxed); }) + ({ _os_atomic_basetypeof(p) _r = (e); \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, v, memory_order_##m, memory_order_relaxed); }) #define os_atomic_cmpxchgv(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _r = (e); int _b = \ - atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ - &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) + ({ _os_atomic_basetypeof(p) _r = (e); int _b = \ + atomic_compare_exchange_strong_explicit(_os_atomic_c11_atomic(p), \ + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) #define os_atomic_cmpxchgvw(p, e, v, g, m) \ - ({ _os_atomic_basetypeof(p) _r = (e); int _b = \ - atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ - &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) + ({ _os_atomic_basetypeof(p) _r = (e); int _b = \ + atomic_compare_exchange_weak_explicit(_os_atomic_c11_atomic(p), \ + &_r, v, memory_order_##m, memory_order_relaxed); *(g) = _r; _b; }) #define os_atomic_rmw_loop(p, ov, nv, m, ...) ({ \ - bool _result = false; \ - typeof(p) _p = (p); \ - ov = os_atomic_load(_p, relaxed); \ - do { \ - __VA_ARGS__; \ - _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ - } while (!_result); \ - _result; \ + bool _result = false; \ + typeof(p) _p = (p); \ + ov = os_atomic_load(_p, relaxed); \ + do { \ + __VA_ARGS__; \ + _result = os_atomic_cmpxchgvw(_p, ov, nv, &ov, m); \ + } while (!_result); \ + _result; \ }) #define os_atomic_rmw_loop_give_up_with_fence(m, expr) \ - ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) + ({ os_atomic_thread_fence(m); expr; __builtin_unreachable(); }) #define os_atomic_rmw_loop_give_up(expr) \ - os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) + os_atomic_rmw_loop_give_up_with_fence(relaxed, expr) #define os_atomic_force_dependency_on(p, e) (p) #define os_atomic_load_with_dependency_on(p, e) \ - os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) + os_atomic_load(os_atomic_force_dependency_on(p, e), relaxed) #if defined (__x86_64__) #include "i386/atomic.h" diff --git a/osfmk/machine/commpage.h b/osfmk/machine/commpage.h index 93eb341d5..8b6aa878d 100644 --- a/osfmk/machine/commpage.h +++ b/osfmk/machine/commpage.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,11 +37,11 @@ #error architecture not supported #endif -#ifndef __ASSEMBLER__ +#ifndef __ASSEMBLER__ #include -extern void commpage_populate( void ); /* called once during startup */ -extern void commpage_text_populate( void ); -#endif /* __ASSEMBLER__ */ +extern void commpage_populate( void ); /* called once during startup */ +extern void commpage_text_populate( void ); +#endif /* __ASSEMBLER__ */ -#endif /* _MACHINE_COMMPAGE_H */ +#endif /* _MACHINE_COMMPAGE_H */ diff --git a/osfmk/machine/config.h b/osfmk/machine/config.h index 8bf35954b..05a332273 100644 --- a/osfmk/machine/config.h +++ b/osfmk/machine/config.h @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_CONFIG_H diff --git a/osfmk/machine/cpu_affinity.h b/osfmk/machine/cpu_affinity.h index b8915c4e4..b62ea782f 100644 --- a/osfmk/machine/cpu_affinity.h +++ b/osfmk/machine/cpu_affinity.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _MACHINE_CPU_AFFINITY_H #define _MACHINE_CPU_AFFINITY_H @@ -40,4 +40,4 @@ #endif /* _MACHINE_CPU_AFFINITY_H */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/machine/cpu_capabilities.h b/osfmk/machine/cpu_capabilities.h index 2a9615cef..1fca46d61 100644 --- a/osfmk/machine/cpu_capabilities.h +++ b/osfmk/machine/cpu_capabilities.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef PRIVATE +#ifdef PRIVATE #ifndef _MACHINE_CPU_CAPABILITIES_H #define _MACHINE_CPU_CAPABILITIES_H diff --git a/osfmk/machine/cpu_data.h b/osfmk/machine/cpu_data.h index 8e44b2e71..47bdb2089 100644 --- a/osfmk/machine/cpu_data.h +++ b/osfmk/machine/cpu_data.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_CPU_DATA_H diff --git a/osfmk/machine/cpu_number.h b/osfmk/machine/cpu_number.h index a26e933ba..947804363 100644 --- a/osfmk/machine/cpu_number.h +++ b/osfmk/machine/cpu_number.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _MACHINE_CPU_NUMBER_H #define _MACHINE_CPU_NUMBER_H @@ -40,4 +40,4 @@ #endif /* _MACHINE_CPU_NUMBER_H */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/machine/endian.h b/osfmk/machine/endian.h index d7ee76198..fbd818005 100644 --- a/osfmk/machine/endian.h +++ b/osfmk/machine/endian.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_ENDIAN_H diff --git a/osfmk/machine/io_map_entries.h b/osfmk/machine/io_map_entries.h index 2de96f22b..9abb5a5a3 100644 --- a/osfmk/machine/io_map_entries.h +++ b/osfmk/machine/io_map_entries.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _MACHINE_IO_MAP_ENTRIES_H_ #define _MACHINE_IO_MAP_ENTRIES_H_ @@ -40,4 +40,4 @@ #endif /* _MACHINE_IO_MAP_ENTRIES_H_ */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/machine/lock.h b/osfmk/machine/lock.h index 6c4181b0a..f89ca11d8 100644 --- a/osfmk/machine/lock.h +++ b/osfmk/machine/lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _MACHINE_LOCK_H_ #define _MACHINE_LOCK_H_ diff --git a/osfmk/machine/locks.h b/osfmk/machine/locks.h index 5f198abc0..65a863248 100644 --- a/osfmk/machine/locks.h +++ b/osfmk/machine/locks.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_LOCKS_H_ diff --git a/osfmk/machine/lowglobals.h b/osfmk/machine/lowglobals.h index 08ebee88d..50d0e5e6e 100644 --- a/osfmk/machine/lowglobals.h +++ b/osfmk/machine/lowglobals.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_LOWGLOBALS_H @@ -30,7 +30,7 @@ #if defined (__x86_64__) #include "x86_64/lowglobals.h" -#elif defined (__arm__) +#elif defined (__arm__) #include "arm/lowglobals.h" #elif defined (__arm64__) #include "arm64/lowglobals.h" diff --git a/osfmk/machine/machine_cpu.h b/osfmk/machine/machine_cpu.h index b383bc710..11423fad7 100644 --- a/osfmk/machine/machine_cpu.h +++ b/osfmk/machine/machine_cpu.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_MACHINE_CPU_H diff --git a/osfmk/machine/machine_cpuid.h b/osfmk/machine/machine_cpuid.h index dd441dcbe..c0b40ecf3 100644 --- a/osfmk/machine/machine_cpuid.h +++ b/osfmk/machine/machine_cpuid.h @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _MACHINE_CPUID_H #define _MACHINE_CPUID_H @@ -40,4 +40,4 @@ #endif /* _MACHINE_CPUID_H */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ diff --git a/osfmk/machine/machine_kpc.h b/osfmk/machine/machine_kpc.h index 3aaece0c0..ac7d54071 100644 --- a/osfmk/machine/machine_kpc.h +++ b/osfmk/machine/machine_kpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_MACHINE_KPC_H diff --git a/osfmk/machine/machine_remote_time.h b/osfmk/machine/machine_remote_time.h new file mode 100644 index 000000000..fe2484572 --- /dev/null +++ b/osfmk/machine/machine_remote_time.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef MACHINE_REMOTE_TIME_H +#define MACHINE_REMOTE_TIME_H + +#if defined (__x86_64__) +#include "x86_64/machine_remote_time.h" +#elif defined (__arm64__) +#include "arm64/machine_remote_time.h" +#endif + +#define BT_SLEEP_SENTINEL_TS (~1ULL) +#define BT_WAKE_SENTINEL_TS (~2ULL) +#define BT_RESET_SENTINEL_TS (~3ULL) + +#endif /* MACHINE_REMOTE_TIME_H */ diff --git a/osfmk/machine/machine_routines.h b/osfmk/machine/machine_routines.h index 3fd9a0a79..c12c8ee2b 100644 --- a/osfmk/machine/machine_routines.h +++ b/osfmk/machine/machine_routines.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_MACHINE_ROUTINES_H diff --git a/osfmk/machine/machine_rpc.h b/osfmk/machine/machine_rpc.h index 1b306f3d3..03fff52f3 100644 --- a/osfmk/machine/machine_rpc.h +++ b/osfmk/machine/machine_rpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_MACHINE_RPC_H diff --git a/osfmk/machine/machlimits.h b/osfmk/machine/machlimits.h index 70bb0e797..57083ee78 100644 --- a/osfmk/machine/machlimits.h +++ b/osfmk/machine/machlimits.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_MACHLIMITS_H diff --git a/osfmk/machine/machparam.h b/osfmk/machine/machparam.h index a62b1965a..eedc93f6d 100644 --- a/osfmk/machine/machparam.h +++ b/osfmk/machine/machparam.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_MACHPARAM_H diff --git a/osfmk/machine/monotonic.h b/osfmk/machine/monotonic.h index 9de057044..4b3d9df31 100644 --- a/osfmk/machine/monotonic.h +++ b/osfmk/machine/monotonic.h @@ -61,7 +61,7 @@ struct mt_task { struct mt_cpu *mt_cur_cpu(void); void mt_mtc_update_fixed_counts(struct mt_cpu *mtc, uint64_t *counts, - uint64_t *counts_since); + uint64_t *counts_since); uint64_t mt_mtc_update_count(struct mt_cpu *mtc, unsigned int ctr); uint64_t mt_core_snap(unsigned int ctr); void mt_core_set_snap(unsigned int ctr, uint64_t snap); diff --git a/osfmk/machine/pal_hibernate.h b/osfmk/machine/pal_hibernate.h index 3c8d6a1c0..4277f9939 100644 --- a/osfmk/machine/pal_hibernate.h +++ b/osfmk/machine/pal_hibernate.h @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_PAL_HIBERNATE_H diff --git a/osfmk/machine/pal_routines.h b/osfmk/machine/pal_routines.h index 9cc6a139d..6935a94ca 100644 --- a/osfmk/machine/pal_routines.h +++ b/osfmk/machine/pal_routines.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_PAL_ROUTINES_H diff --git a/osfmk/machine/pmap.h b/osfmk/machine/pmap.h index 475700c55..05059864f 100644 --- a/osfmk/machine/pmap.h +++ b/osfmk/machine/pmap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_PMAP_H diff --git a/osfmk/machine/sched_param.h b/osfmk/machine/sched_param.h index 3ce7907ec..025244b68 100644 --- a/osfmk/machine/sched_param.h +++ b/osfmk/machine/sched_param.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_SCHED_PARAM_H diff --git a/osfmk/machine/setjmp.h b/osfmk/machine/setjmp.h index 51a877635..19d885dcf 100644 --- a/osfmk/machine/setjmp.h +++ b/osfmk/machine/setjmp.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_SETJMP_H diff --git a/osfmk/machine/simple_lock.h b/osfmk/machine/simple_lock.h index 0613ecf65..6678fcef9 100644 --- a/osfmk/machine/simple_lock.h +++ b/osfmk/machine/simple_lock.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,10 +22,10 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifndef _MACHINE_SIMPLE_LOCK_H_ #define _MACHINE_SIMPLE_LOCK_H_ diff --git a/osfmk/machine/smp.h b/osfmk/machine/smp.h index 108c153ad..2714ba5a2 100644 --- a/osfmk/machine/smp.h +++ b/osfmk/machine/smp.h @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/machine/task.h b/osfmk/machine/task.h index 4c0a3688a..9f8dad22f 100644 --- a/osfmk/machine/task.h +++ b/osfmk/machine/task.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_TASK_H diff --git a/osfmk/machine/thread.h b/osfmk/machine/thread.h index 0b91ffe04..79234038a 100644 --- a/osfmk/machine/thread.h +++ b/osfmk/machine/thread.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_THREAD_H diff --git a/osfmk/machine/trap.h b/osfmk/machine/trap.h index b08f4c64f..f5b9e94eb 100644 --- a/osfmk/machine/trap.h +++ b/osfmk/machine/trap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_TRAP_H diff --git a/osfmk/machine/vm_tuning.h b/osfmk/machine/vm_tuning.h index 84bb4efe5..4563ded35 100644 --- a/osfmk/machine/vm_tuning.h +++ b/osfmk/machine/vm_tuning.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_VM_TUNING_H diff --git a/osfmk/machine/xpr.h b/osfmk/machine/xpr.h index 9ed04ff5f..ee3be2d26 100644 --- a/osfmk/machine/xpr.h +++ b/osfmk/machine/xpr.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_XPR_H diff --git a/osfmk/prng/prng_random.c b/osfmk/prng/prng_random.c index ec90cc789..44c865a17 100644 --- a/osfmk/prng/prng_random.c +++ b/osfmk/prng/prng_random.c @@ -71,7 +71,7 @@ rdseed_step(uint64_t * seed) { uint8_t ok; - asm volatile("rdseed %0; setc %1" : "=r"(*seed), "=qm"(ok)); + asm volatile ("rdseed %0; setc %1" : "=r"(*seed), "=qm"(ok)); return (int)ok; } @@ -85,7 +85,7 @@ rdseed_retry(uint64_t * seed, size_t nretries) if (rdseed_step(seed)) { return 1; } else { - asm volatile("pause"); + asm volatile ("pause"); } } @@ -117,7 +117,7 @@ rdrand_step(uint64_t * rand) { uint8_t ok; - asm volatile("rdrand %0; setc %1" : "=r"(*rand), "=qm"(ok)); + asm volatile ("rdrand %0; setc %1" : "=r"(*rand), "=qm"(ok)); return (int)ok; } @@ -198,9 +198,9 @@ void entropy_buffer_read(void * buffer, size_t * count); typedef void (*entropysource)(void * buf, size_t * nbytes); static const entropysource entropysources[] = { - entropy_buffer_read, + entropy_buffer_read, #if defined(__x86_64__) - intel_entropysource, + intel_entropysource, #endif }; @@ -290,9 +290,9 @@ static struct { struct ccdrbg_info drbg_info; const struct ccdrbg_nisthmac_custom drbg_custom; } erandom = {.drbg_custom = { - .di = &ccsha1_eay_di, - .strictFIPS = 0, - }}; + .di = &ccsha1_eay_di, + .strictFIPS = 0, + }}; static void read_erandom(void * buf, uint32_t nbytes); @@ -310,26 +310,28 @@ entropy_buffer_read(void * buffer, size_t * count) *count = ENTROPY_BUFFER_BYTE_SIZE; } - current_state = ml_set_interrupts_enabled(FALSE); + current_state = ml_early_set_interrupts_enabled(FALSE); memcpy(buffer, EntropyData.buffer, *count); /* Consider removing this mixing step rdar://problem/31668239 */ - for (i = 0, j = (ENTROPY_BUFFER_SIZE - 1); i < ENTROPY_BUFFER_SIZE; j = i, i++) + for (i = 0, j = (ENTROPY_BUFFER_SIZE - 1); i < ENTROPY_BUFFER_SIZE; j = i, i++) { EntropyData.buffer[i] = EntropyData.buffer[i] ^ EntropyData.buffer[j]; + } - (void)ml_set_interrupts_enabled(current_state); + (void) ml_early_set_interrupts_enabled(current_state); #if DEVELOPMENT || DEBUG uint32_t * word = buffer; /* Good for both 32-bit and 64-bit kernels. */ - for (i = 0; i < ENTROPY_BUFFER_SIZE; i += 4) + for (i = 0; i < ENTROPY_BUFFER_SIZE; i += 4) { /* * We use "EARLY" here so that we can grab early entropy on * ARM, where tracing is not started until after PRNG is * initialized. */ KERNEL_DEBUG_EARLY(ENTROPY_READ(i / 4), word[i + 0], word[i + 1], word[i + 2], word[i + 3]); + } #endif } @@ -406,17 +408,35 @@ early_random(void) ps = 0; /* boot cpu */ rc = ccdrbg_init(&erandom.drbg_info, state, sizeof(erandom.seed), erandom.seed, sizeof(nonce), &nonce, sizeof(ps), &ps); cc_clear(sizeof(nonce), &nonce); - if (rc != CCDRBG_STATUS_OK) + if (rc != CCDRBG_STATUS_OK) { panic("ccdrbg_init() returned %d", rc); + } /* Generate output */ rc = ccdrbg_generate(&erandom.drbg_info, state, sizeof(result), &result, 0, NULL); - if (rc != CCDRBG_STATUS_OK) + if (rc != CCDRBG_STATUS_OK) { panic("ccdrbg_generate() returned %d", rc); + } return result; - }; + } + ; +#if defined(__x86_64__) + /* + * Calling read_erandom() before gsbase is initialized is potentially + * catastrophic, so assert that it's not set to the magic value set + * in i386_init.c before proceeding with the call. We cannot use + * assert here because it ultimately calls panic, which executes + * operations that involve accessing %gs-relative data (and additionally + * causes a debug trap which will not work properly this early in boot.) + */ + if (rdmsr64(MSR_IA32_GS_BASE) == EARLY_GSBASE_MAGIC) { + kprintf("[early_random] Cannot proceed: GSBASE is not initialized\n"); + hlt(); + /*NOTREACHED*/ + } +#endif read_erandom(&result, sizeof(result)); return result; @@ -437,16 +457,18 @@ read_erandom(void * buffer, u_int numBytes) for (;;) { /* Generate output */ rc = ccdrbg_generate(&erandom.drbg_info, state, numBytes, buffer, 0, NULL); - if (rc == CCDRBG_STATUS_OK) + if (rc == CCDRBG_STATUS_OK) { break; + } if (rc == CCDRBG_STATUS_NEED_RESEED) { /* It's time to reseed. Get more entropy */ nbytes = entropy_readall(erandom.seed, EARLY_RANDOM_SEED_SIZE); assert(nbytes >= EARLY_RANDOM_SEED_SIZE); rc = ccdrbg_reseed(&erandom.drbg_info, state, sizeof(erandom.seed), erandom.seed, 0, NULL); cc_clear(sizeof(erandom.seed), erandom.seed); - if (rc == CCDRBG_STATUS_OK) + if (rc == CCDRBG_STATUS_OK) { continue; + } panic("read_erandom reseed error %d\n", rc); } panic("read_erandom ccdrbg error %d\n", rc); @@ -500,8 +522,9 @@ early_random_cpu_init(int cpu) nonce = ml_get_timebase(); rc = ccdrbg_init(&erandom.drbg_info, state, sizeof(erandom.seed), erandom.seed, sizeof(nonce), &nonce, sizeof(cpu), &cpu); cc_clear(sizeof(nonce), &nonce); - if (rc != CCDRBG_STATUS_OK) + if (rc != CCDRBG_STATUS_OK) { panic("ccdrbg_init() returned %d", rc); + } } void @@ -552,8 +575,9 @@ read_random(void * buffer, u_int numbytes) */ for (;;) { PRNG_CCKPRNG(err = prng_generate(&prng.ctx, numbytes, buffer)); - if (err == CCKPRNG_OK) + if (err == CCKPRNG_OK) { break; + } if (err == CCKPRNG_NEED_ENTROPY) { Reseed(); continue; @@ -607,7 +631,7 @@ random_bool_init(struct bool_gen * bg) void random_bool_gen_entropy(struct bool_gen * bg, unsigned int * buffer, int count) { - simple_lock(&bg->lock); + simple_lock(&bg->lock, LCK_GRP_NULL); int i, t; for (i = 0; i < count; i++) { bg->seed[1] ^= (bg->seed[1] << 5); @@ -634,8 +658,9 @@ random_bool_gen_bits(struct bool_gen * bg, unsigned int * buffer, unsigned int c * Find a portion of the buffer that hasn't been emptied. * We might have emptied our last index in the previous iteration. */ - while (index < count && buffer[index] == 0) + while (index < count && buffer[index] == 0) { index++; + } /* If we've exhausted the pool, refill it. */ if (index == count) { diff --git a/osfmk/profiling/i386/profile-md.h b/osfmk/profiling/i386/profile-md.h index 86ed5b023..942a5438e 100644 --- a/osfmk/profiling/i386/profile-md.h +++ b/osfmk/profiling/i386/profile-md.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,108 +38,108 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.5.2 1996/07/31 09:57:36 paire - * Added some more constraints to __asm__ functions for compilation - * under gcc2.7.1 for PROF_CNT_[L]{ADD|SUB} macros - * [96/06/14 paire] + * Added some more constraints to __asm__ functions for compilation + * under gcc2.7.1 for PROF_CNT_[L]{ADD|SUB} macros + * [96/06/14 paire] * * Revision 1.1.5.1 1995/01/06 19:53:52 devrcs - * mk6 CR668 - 1.3b26 merge - * new file for mk6 - * [1994/10/12 22:25:27 dwm] - * + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:27 dwm] + * * Revision 1.1.2.2 1994/05/16 19:19:26 meissner - * Add {,L}PROF_CNT_{SUB,LSUB,OVERFLOW} macros for gprof command. - * [1994/05/10 10:36:06 meissner] - * - * Correct 64-bit integer asms to specify result values as inputs, and use =g instead of =m. - * Cast the integer argument to PROF_CNT_ADD to unsigned long, so a short register is widened. - * Add more support for writing the gprof command. - * PROF_CNT_{EQ,NE} should not use ^=, it just uses ^. - * Round PROF_CNT_DIGITS up to 24 bytes so it is word aligned. - * _profile_cnt_to_decimal now takes the low/high values as separate arguments. - * Delete _profile_cnt_to_hex. - * [1994/04/28 21:45:07 meissner] - * - * Add more 64 bit arithmetic macros to support writing gprof. - * [1994/04/20 15:47:05 meissner] - * + * Add {,L}PROF_CNT_{SUB,LSUB,OVERFLOW} macros for gprof command. + * [1994/05/10 10:36:06 meissner] + * + * Correct 64-bit integer asms to specify result values as inputs, and use =g instead of =m. + * Cast the integer argument to PROF_CNT_ADD to unsigned long, so a short register is widened. + * Add more support for writing the gprof command. + * PROF_CNT_{EQ,NE} should not use ^=, it just uses ^. + * Round PROF_CNT_DIGITS up to 24 bytes so it is word aligned. + * _profile_cnt_to_decimal now takes the low/high values as separate arguments. + * Delete _profile_cnt_to_hex. + * [1994/04/28 21:45:07 meissner] + * + * Add more 64 bit arithmetic macros to support writing gprof. + * [1994/04/20 15:47:05 meissner] + * * Revision 1.1.2.1 1994/04/08 17:51:56 meissner - * Correct spelling on LPROF_CNT_TO_LDOUBLE macro. - * [1994/04/08 16:18:06 meissner] - * - * Make LHISTCOUNTER be 64 bits. - * Define LPROF_CNT_INC to increment LHISTCOUNTER. - * [1994/04/08 12:40:32 meissner] - * - * Make most stats 64 bits, except for things like memory allocation. - * [1994/04/02 14:58:34 meissner] - * - * Add overflow support for {gprof,prof,old,dummy}_mcount counters. - * [1994/03/17 20:13:37 meissner] - * - * Add gprof/prof overflow support - * [1994/03/17 14:56:56 meissner] - * - * Define LHISTCOUNTER. - * [1994/02/28 12:05:16 meissner] - * - * Set HISTFRACTION to 4, so new lprofil call takes the same space. - * [1994/02/24 16:15:34 meissner] - * - * Add too_low/too_high to profile_stats. - * [1994/02/16 22:38:23 meissner] - * - * Make prof_cnt_t unsigned long. - * [1994/02/11 16:52:09 meissner] - * - * Remember function unique ptr in gfuncs structure to reset profiling. - * Add support for range checking gprof arc {from,self}pc addresses. - * Add counter for # times acontext was locked. - * Expand copyright. - * [1994/02/07 12:41:08 meissner] - * - * Keep track of the number of times the kernel overflows the HISTCOUNTER counter. - * [1994/02/03 20:13:31 meissner] - * - * Add stats for {user,kernel,idle} mode in the kernel. - * [1994/02/03 15:17:36 meissner] - * - * No change. - * [1994/02/03 00:58:59 meissner] - * - * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. - * [1994/02/01 12:04:04 meissner] - * - * Split # records to # gprof and # prof records. - * Add my_cpu/max_cpu fields. - * [1994/01/28 23:33:30 meissner] - * - * Eliminate hash_{size,mask} from gfuncs structure. - * [1994/01/26 20:23:41 meissner] - * - * Add structure size fields to _profile_{vars,stats,md}. - * Add major/minor version number to _profile_md. - * Move allocation context block pointer to main structure. - * Delete shift count for allocation contexts. - * [1994/01/25 01:46:08 meissner] - * - * Add HASHFRACTION - * [1994/01/22 01:14:02 meissner] - * - * Split profile-md.h into profile-internal.h and profile-md. - * [1994/01/20 20:57:18 meissner] - * - * Fixup copyright. - * [1994/01/18 23:08:14 meissner] - * - * Make flags byte-sized. - * Add have_bb flag. - * Add init_format flag. - * [1994/01/18 21:57:18 meissner] - * - * CR 10198 - Initial version. - * [1994/01/18 19:44:59 meissner] - * + * Correct spelling on LPROF_CNT_TO_LDOUBLE macro. + * [1994/04/08 16:18:06 meissner] + * + * Make LHISTCOUNTER be 64 bits. + * Define LPROF_CNT_INC to increment LHISTCOUNTER. + * [1994/04/08 12:40:32 meissner] + * + * Make most stats 64 bits, except for things like memory allocation. + * [1994/04/02 14:58:34 meissner] + * + * Add overflow support for {gprof,prof,old,dummy}_mcount counters. + * [1994/03/17 20:13:37 meissner] + * + * Add gprof/prof overflow support + * [1994/03/17 14:56:56 meissner] + * + * Define LHISTCOUNTER. + * [1994/02/28 12:05:16 meissner] + * + * Set HISTFRACTION to 4, so new lprofil call takes the same space. + * [1994/02/24 16:15:34 meissner] + * + * Add too_low/too_high to profile_stats. + * [1994/02/16 22:38:23 meissner] + * + * Make prof_cnt_t unsigned long. + * [1994/02/11 16:52:09 meissner] + * + * Remember function unique ptr in gfuncs structure to reset profiling. + * Add support for range checking gprof arc {from,self}pc addresses. + * Add counter for # times acontext was locked. + * Expand copyright. + * [1994/02/07 12:41:08 meissner] + * + * Keep track of the number of times the kernel overflows the HISTCOUNTER counter. + * [1994/02/03 20:13:31 meissner] + * + * Add stats for {user,kernel,idle} mode in the kernel. + * [1994/02/03 15:17:36 meissner] + * + * No change. + * [1994/02/03 00:58:59 meissner] + * + * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. + * [1994/02/01 12:04:04 meissner] + * + * Split # records to # gprof and # prof records. + * Add my_cpu/max_cpu fields. + * [1994/01/28 23:33:30 meissner] + * + * Eliminate hash_{size,mask} from gfuncs structure. + * [1994/01/26 20:23:41 meissner] + * + * Add structure size fields to _profile_{vars,stats,md}. + * Add major/minor version number to _profile_md. + * Move allocation context block pointer to main structure. + * Delete shift count for allocation contexts. + * [1994/01/25 01:46:08 meissner] + * + * Add HASHFRACTION + * [1994/01/22 01:14:02 meissner] + * + * Split profile-md.h into profile-internal.h and profile-md. + * [1994/01/20 20:57:18 meissner] + * + * Fixup copyright. + * [1994/01/18 23:08:14 meissner] + * + * Make flags byte-sized. + * Add have_bb flag. + * Add init_format flag. + * [1994/01/18 21:57:18 meissner] + * + * CR 10198 - Initial version. + * [1994/01/18 19:44:59 meissner] + * * $EndLog$ */ @@ -161,11 +161,11 @@ * These hold either a pointer or a signed/unsigned int. * They are 32 bit on i386 and 64 bit on x86_64. */ -typedef long prof_ptrint_t; -typedef unsigned long prof_uptrint_t; +typedef long prof_ptrint_t; +typedef unsigned long prof_uptrint_t; -typedef long prof_lock_t; /* lock word type */ -typedef unsigned char prof_flag_t; /* type for boolean flags */ +typedef long prof_lock_t; /* lock word type */ +typedef unsigned char prof_flag_t; /* type for boolean flags */ /* * Double precision counter. @@ -175,51 +175,51 @@ typedef unsigned char prof_flag_t; /* type for boolean flags */ typedef unsigned long prof_cnt_t; /* x86_64 */ -#define PROF_CNT_INC(cnt) (cnt++) -#define PROF_CNT_ADD(cnt,val) (cnt+=val) -#define PROF_CNT_LADD(cnt,val) (cnt+=val) -#define PROF_CNT_SUB(cnt,val) (cnt-=val) -#define PROF_CNT_LSUB(cnt,val) (cnt-=val) - -#define PROF_ULONG_TO_CNT(cnt,val) (((cnt).high = 0), ((cnt).low = val)) -#define PROF_CNT_OVERFLOW(cnt,high,low) (((high) = (cnt).high), ((low) = (cnt).low)) -#define PROF_CNT_TO_ULONG(cnt) (((cnt).high == 0) ? (cnt).low : 0xffffffffu) -#define PROF_CNT_TO_LDOUBLE(cnt) ((((long double)(cnt).high) * 4294967296.0L) + (long double)(cnt).low) -#define PROF_CNT_TO_DECIMAL(buf,cnt) _profile_cnt_to_decimal(buf, (cnt).low, (cnt).high) -#define PROF_CNT_EQ_0(cnt) (((cnt).high | (cnt).low) == 0) -#define PROF_CNT_NE_0(cnt) (((cnt).high | (cnt).low) != 0) -#define PROF_CNT_EQ(cnt1,cnt2) ((((cnt1).high ^ (cnt2).high) | ((cnt1).low ^ (cnt2).low)) == 0) -#define PROF_CNT_NE(cnt1,cnt2) ((((cnt1).high ^ (cnt2).high) | ((cnt1).low ^ (cnt2).low)) != 0) -#define PROF_CNT_GT(cnt1,cnt2) (((cnt1).high > (cnt2).high) || ((cnt1).low > (cnt2).low)) -#define PROF_CNT_LT(cnt1,cnt2) (((cnt1).high < (cnt2).high) || ((cnt1).low < (cnt2).low)) +#define PROF_CNT_INC(cnt) (cnt++) +#define PROF_CNT_ADD(cnt, val) (cnt+=val) +#define PROF_CNT_LADD(cnt, val) (cnt+=val) +#define PROF_CNT_SUB(cnt, val) (cnt-=val) +#define PROF_CNT_LSUB(cnt, val) (cnt-=val) + +#define PROF_ULONG_TO_CNT(cnt, val) (((cnt).high = 0), ((cnt).low = val)) +#define PROF_CNT_OVERFLOW(cnt, high, low) (((high) = (cnt).high), ((low) = (cnt).low)) +#define PROF_CNT_TO_ULONG(cnt) (((cnt).high == 0) ? (cnt).low : 0xffffffffu) +#define PROF_CNT_TO_LDOUBLE(cnt) ((((long double)(cnt).high) * 4294967296.0L) + (long double)(cnt).low) +#define PROF_CNT_TO_DECIMAL(buf, cnt) _profile_cnt_to_decimal(buf, (cnt).low, (cnt).high) +#define PROF_CNT_EQ_0(cnt) (((cnt).high | (cnt).low) == 0) +#define PROF_CNT_NE_0(cnt) (((cnt).high | (cnt).low) != 0) +#define PROF_CNT_EQ(cnt1, cnt2) ((((cnt1).high ^ (cnt2).high) | ((cnt1).low ^ (cnt2).low)) == 0) +#define PROF_CNT_NE(cnt1, cnt2) ((((cnt1).high ^ (cnt2).high) | ((cnt1).low ^ (cnt2).low)) != 0) +#define PROF_CNT_GT(cnt1, cnt2) (((cnt1).high > (cnt2).high) || ((cnt1).low > (cnt2).low)) +#define PROF_CNT_LT(cnt1, cnt2) (((cnt1).high < (cnt2).high) || ((cnt1).low < (cnt2).low)) /* max # digits + null to hold prof_cnt_t values (round up to multiple of 4) */ -#define PROF_CNT_DIGITS 24 +#define PROF_CNT_DIGITS 24 /* * Types of the profil counter. */ -typedef unsigned short HISTCOUNTER; /* profil */ -typedef prof_cnt_t LHISTCOUNTER; /* lprofil */ - -#define LPROF_ULONG_TO_CNT(cnt,val) PROF_ULONG_TO_CNT(cnt,val) -#define LPROF_CNT_INC(lp) PROF_CNT_INC(lp) -#define LPROF_CNT_ADD(lp,val) PROF_CNT_ADD(lp,val) -#define LPROF_CNT_LADD(lp,val) PROF_CNT_LADD(lp,val) -#define LPROF_CNT_SUB(lp,val) PROF_CNT_SUB(lp,val) -#define LPROF_CNT_LSUB(lp,val) PROF_CNT_LSUB(lp,val) -#define LPROF_CNT_OVERFLOW(lp,high,low) PROF_CNT_OVERFLOW(lp,high,low) -#define LPROF_CNT_TO_ULONG(lp) PROF_CNT_TO_ULONG(lp) -#define LPROF_CNT_TO_LDOUBLE(lp) PROF_CNT_TO_LDOUBLE(lp) -#define LPROF_CNT_TO_DECIMAL(buf,cnt) PROF_CNT_TO_DECIMAL(buf,cnt) -#define LPROF_CNT_EQ_0(cnt) PROF_CNT_EQ_0(cnt) -#define LPROF_CNT_NE_0(cnt) PROF_CNT_NE_0(cnt) -#define LPROF_CNT_EQ(cnt1,cnt2) PROF_CNT_EQ(cnt1,cnt2) -#define LPROF_CNT_NE(cnt1,cnt2) PROF_CNT_NE(cnt1,cnt2) -#define LPROF_CNT_GT(cnt1,cnt2) PROF_CNT_GT(cnt1,cnt2) -#define LPROF_CNT_LT(cnt1,cnt2) PROF_CNT_LT(cnt1,cnt2) -#define LPROF_CNT_DIGITS PROF_CNT_DIGITS +typedef unsigned short HISTCOUNTER; /* profil */ +typedef prof_cnt_t LHISTCOUNTER; /* lprofil */ + +#define LPROF_ULONG_TO_CNT(cnt, val) PROF_ULONG_TO_CNT(cnt,val) +#define LPROF_CNT_INC(lp) PROF_CNT_INC(lp) +#define LPROF_CNT_ADD(lp, val) PROF_CNT_ADD(lp,val) +#define LPROF_CNT_LADD(lp, val) PROF_CNT_LADD(lp,val) +#define LPROF_CNT_SUB(lp, val) PROF_CNT_SUB(lp,val) +#define LPROF_CNT_LSUB(lp, val) PROF_CNT_LSUB(lp,val) +#define LPROF_CNT_OVERFLOW(lp, high, low) PROF_CNT_OVERFLOW(lp,high,low) +#define LPROF_CNT_TO_ULONG(lp) PROF_CNT_TO_ULONG(lp) +#define LPROF_CNT_TO_LDOUBLE(lp) PROF_CNT_TO_LDOUBLE(lp) +#define LPROF_CNT_TO_DECIMAL(buf, cnt) PROF_CNT_TO_DECIMAL(buf,cnt) +#define LPROF_CNT_EQ_0(cnt) PROF_CNT_EQ_0(cnt) +#define LPROF_CNT_NE_0(cnt) PROF_CNT_NE_0(cnt) +#define LPROF_CNT_EQ(cnt1, cnt2) PROF_CNT_EQ(cnt1,cnt2) +#define LPROF_CNT_NE(cnt1, cnt2) PROF_CNT_NE(cnt1,cnt2) +#define LPROF_CNT_GT(cnt1, cnt2) PROF_CNT_GT(cnt1,cnt2) +#define LPROF_CNT_LT(cnt1, cnt2) PROF_CNT_LT(cnt1,cnt2) +#define LPROF_CNT_DIGITS PROF_CNT_DIGITS /* * fraction of text space to allocate for histogram counters @@ -231,15 +231,15 @@ typedef prof_cnt_t LHISTCOUNTER; /* lprofil */ * Fraction of text space to allocate for from hash buckets. */ -#define HASHFRACTION HISTFRACTION +#define HASHFRACTION HISTFRACTION /* * Prof call count, external format. */ struct prof_ext { - prof_uptrint_t cvalue; /* caller address */ - prof_uptrint_t cncall; /* # of calls */ + prof_uptrint_t cvalue; /* caller address */ + prof_uptrint_t cncall; /* # of calls */ }; /* @@ -247,8 +247,8 @@ struct prof_ext { */ struct prof_int { - struct prof_ext prof; /* external prof struct */ - prof_uptrint_t overflow; /* # times prof counter overflowed */ + struct prof_ext prof; /* external prof struct */ + prof_uptrint_t overflow; /* # times prof counter overflowed */ }; /* @@ -256,9 +256,9 @@ struct prof_int { */ struct gprof_arc { - prof_uptrint_t frompc; /* caller's caller */ - prof_uptrint_t selfpc; /* caller's address */ - prof_uptrint_t count; /* # times arc traversed */ + prof_uptrint_t frompc; /* caller's caller */ + prof_uptrint_t selfpc; /* caller's address */ + prof_uptrint_t count; /* # times arc traversed */ }; /* @@ -266,68 +266,68 @@ struct gprof_arc { */ struct hasharc { - struct hasharc *next; /* next gprof record */ - struct gprof_arc arc; /* gprof record */ - prof_uptrint_t overflow; /* # times counter overflowed */ + struct hasharc *next; /* next gprof record */ + struct gprof_arc arc; /* gprof record */ + prof_uptrint_t overflow; /* # times counter overflowed */ }; /* * Linked list of all function profile blocks. */ -#define MAX_CACHE 3 /* # cache table entries */ +#define MAX_CACHE 3 /* # cache table entries */ struct gfuncs { - struct hasharc **hash_ptr; /* gprof hash table */ - struct hasharc **unique_ptr; /* function unique pointer */ - struct prof_int prof; /* -p stats for elf */ - struct hasharc *cache_ptr[MAX_CACHE]; /* cache element pointers */ + struct hasharc **hash_ptr; /* gprof hash table */ + struct hasharc **unique_ptr; /* function unique pointer */ + struct prof_int prof; /* -p stats for elf */ + struct hasharc *cache_ptr[MAX_CACHE]; /* cache element pointers */ }; /* * Profile information which might be written out in ELF {,g}mon.out files. */ -#define MAX_BUCKETS 9 /* max bucket chain to print out */ - -struct profile_stats { /* Debugging counters */ - prof_uptrint_t major_version; /* major version number */ - prof_uptrint_t minor_version; /* minor version number */ - prof_uptrint_t stats_size; /* size of profile_vars structure */ - prof_uptrint_t profil_buckets; /* # profil buckets */ - prof_uptrint_t my_cpu; /* identify current cpu/thread */ - prof_uptrint_t max_cpu; /* identify max cpu/thread */ - prof_uptrint_t prof_records; /* # of functions profiled */ - prof_uptrint_t gprof_records; /* # of gprof arcs */ - prof_uptrint_t hash_buckets; /* # gprof hash buckets */ - prof_uptrint_t bogus_count; /* # of bogus functions found in gprof */ - - prof_cnt_t cnt; /* # of calls to _{,g}prof_mcount */ - prof_cnt_t dummy; /* # of calls to _dummy_mcount */ - prof_cnt_t old_mcount; /* # of calls to old mcount */ - prof_cnt_t hash_search; /* # hash buckets searched */ - prof_cnt_t hash_num; /* # times hash table searched */ - prof_cnt_t user_ticks; /* # ticks in user space */ - prof_cnt_t kernel_ticks; /* # ticks in kernel space */ - prof_cnt_t idle_ticks; /* # ticks in idle mode */ - prof_cnt_t overflow_ticks; /* # ticks where HISTCOUNTER overflowed */ - prof_cnt_t acontext_locked; /* # times an acontext was locked */ - prof_cnt_t too_low; /* # times a histogram tick was too low */ - prof_cnt_t too_high; /* # times a histogram tick was too high */ - prof_cnt_t prof_overflow; /* # times a prof count field overflowed */ - prof_cnt_t gprof_overflow; /* # times a gprof count field overflowed */ - - /* allocation statistics */ - prof_uptrint_t num_alloc [(int)ACONTEXT_MAX]; /* # allocations */ - prof_uptrint_t bytes_alloc[(int)ACONTEXT_MAX]; /* bytes allocated */ - prof_uptrint_t num_context[(int)ACONTEXT_MAX]; /* # contexts */ - prof_uptrint_t wasted [(int)ACONTEXT_MAX]; /* wasted bytes */ - prof_uptrint_t overhead [(int)ACONTEXT_MAX]; /* overhead bytes */ - - prof_uptrint_t buckets[MAX_BUCKETS+1]; /* # hash indexes that have n buckets */ +#define MAX_BUCKETS 9 /* max bucket chain to print out */ + +struct profile_stats { /* Debugging counters */ + prof_uptrint_t major_version; /* major version number */ + prof_uptrint_t minor_version; /* minor version number */ + prof_uptrint_t stats_size; /* size of profile_vars structure */ + prof_uptrint_t profil_buckets; /* # profil buckets */ + prof_uptrint_t my_cpu; /* identify current cpu/thread */ + prof_uptrint_t max_cpu; /* identify max cpu/thread */ + prof_uptrint_t prof_records; /* # of functions profiled */ + prof_uptrint_t gprof_records; /* # of gprof arcs */ + prof_uptrint_t hash_buckets; /* # gprof hash buckets */ + prof_uptrint_t bogus_count; /* # of bogus functions found in gprof */ + + prof_cnt_t cnt; /* # of calls to _{,g}prof_mcount */ + prof_cnt_t dummy; /* # of calls to _dummy_mcount */ + prof_cnt_t old_mcount; /* # of calls to old mcount */ + prof_cnt_t hash_search; /* # hash buckets searched */ + prof_cnt_t hash_num; /* # times hash table searched */ + prof_cnt_t user_ticks; /* # ticks in user space */ + prof_cnt_t kernel_ticks; /* # ticks in kernel space */ + prof_cnt_t idle_ticks; /* # ticks in idle mode */ + prof_cnt_t overflow_ticks; /* # ticks where HISTCOUNTER overflowed */ + prof_cnt_t acontext_locked; /* # times an acontext was locked */ + prof_cnt_t too_low; /* # times a histogram tick was too low */ + prof_cnt_t too_high; /* # times a histogram tick was too high */ + prof_cnt_t prof_overflow; /* # times a prof count field overflowed */ + prof_cnt_t gprof_overflow; /* # times a gprof count field overflowed */ + + /* allocation statistics */ + prof_uptrint_t num_alloc[(int)ACONTEXT_MAX]; /* # allocations */ + prof_uptrint_t bytes_alloc[(int)ACONTEXT_MAX]; /* bytes allocated */ + prof_uptrint_t num_context[(int)ACONTEXT_MAX]; /* # contexts */ + prof_uptrint_t wasted[(int)ACONTEXT_MAX]; /* wasted bytes */ + prof_uptrint_t overhead[(int)ACONTEXT_MAX]; /* overhead bytes */ + + prof_uptrint_t buckets[MAX_BUCKETS + 1]; /* # hash indexes that have n buckets */ prof_cnt_t cache_hits[MAX_CACHE]; /* # times nth cache entry matched */ - prof_cnt_t stats_unused[64]; /* reserved for future use */ + prof_cnt_t stats_unused[64]; /* reserved for future use */ }; #define PROFILE_MAJOR_VERSION 1 @@ -338,18 +338,18 @@ struct profile_stats { /* Debugging counters */ */ struct profile_md { - int major_version; /* major version number */ - int minor_version; /* minor version number */ - size_t md_size; /* size of profile_md structure */ - struct hasharc **hash_ptr; /* gprof hash table */ - size_t hash_size; /* size of hash table */ - prof_uptrint_t num_cache; /* # of cache entries */ - void (*save_mcount_ptr)(void); /* save for _mcount_ptr */ - void (**mcount_ptr_ptr)(void); /* pointer to _mcount_ptr */ - struct hasharc *dummy_ptr; /* pointer to dummy gprof record */ - void *(*alloc_pages)(size_t); /* pointer to _profile_alloc_pages */ + int major_version; /* major version number */ + int minor_version; /* minor version number */ + size_t md_size; /* size of profile_md structure */ + struct hasharc **hash_ptr; /* gprof hash table */ + size_t hash_size; /* size of hash table */ + prof_uptrint_t num_cache; /* # of cache entries */ + void (*save_mcount_ptr)(void); /* save for _mcount_ptr */ + void(**mcount_ptr_ptr)(void); /* pointer to _mcount_ptr */ + struct hasharc *dummy_ptr; /* pointer to dummy gprof record */ + void *(*alloc_pages)(size_t); /* pointer to _profile_alloc_pages */ char num_buffer[PROF_CNT_DIGITS]; /* convert 64 bit ints to string */ - long md_unused[58]; /* add unused fields */ + long md_unused[58]; /* add unused fields */ }; /* diff --git a/osfmk/profiling/machine/profile-md.h b/osfmk/profiling/machine/profile-md.h index 0488b28ff..11a08f978 100644 --- a/osfmk/profiling/machine/profile-md.h +++ b/osfmk/profiling/machine/profile-md.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACH_MACHINE_PROFILE_MD_H_ diff --git a/osfmk/profiling/profile-internal.h b/osfmk/profiling/profile-internal.h index 2667419e0..8f6cdfeef 100644 --- a/osfmk/profiling/profile-internal.h +++ b/osfmk/profiling/profile-internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -49,7 +49,7 @@ * Scaling factor for the profil system call. */ -#define SCALE_1_TO_1 0x10000L +#define SCALE_1_TO_1 0x10000L /* @@ -89,52 +89,52 @@ typedef enum profile_alloc_mem { */ typedef enum acontext_type { - ACONTEXT_PROF, /* 0: prof records */ - ACONTEXT_GPROF, /* 1: gprof arcs */ - ACONTEXT_GFUNC, /* 2: gprof function headers */ - ACONTEXT_MISC, /* 3: misc. allocations */ - ACONTEXT_PROFIL, /* 4: profil based allocations */ - ACONTEXT_DCI, /* 5: dci based allocations */ - ACONTEXT_BASIC_BLOCK, /* 6: basic block allocations */ - ACONTEXT_CALLBACK, /* 7: callback structures */ - ACONTEXT_MAX = 32 /* # allocation contexts */ + ACONTEXT_PROF, /* 0: prof records */ + ACONTEXT_GPROF, /* 1: gprof arcs */ + ACONTEXT_GFUNC, /* 2: gprof function headers */ + ACONTEXT_MISC, /* 3: misc. allocations */ + ACONTEXT_PROFIL, /* 4: profil based allocations */ + ACONTEXT_DCI, /* 5: dci based allocations */ + ACONTEXT_BASIC_BLOCK, /* 6: basic block allocations */ + ACONTEXT_CALLBACK, /* 7: callback structures */ + ACONTEXT_MAX = 32 /* # allocation contexts */ } acontext_type_t; #define ACONTEXT_FIRST ACONTEXT_PROF -#define ACONTEXT_NAMES { \ - "prof", \ - "gprof", \ - "gfunc", \ - "misc", \ - "profil", \ - "dci", \ - "bb", \ - "callback", \ - "#8", \ - "#9", \ - "#10", \ - "#11", \ - "#12", \ - "#13", \ - "#14", \ - "#15", \ - "#16", \ - "#17", \ - "#18", \ - "#19", \ - "#20", \ - "#21", \ - "#22", \ - "#23", \ - "#24", \ - "#25", \ - "#26", \ - "#27", \ - "#28", \ - "#29", \ - "#30", \ - "#31", \ +#define ACONTEXT_NAMES { \ + "prof", \ + "gprof", \ + "gfunc", \ + "misc", \ + "profil", \ + "dci", \ + "bb", \ + "callback", \ + "#8", \ + "#9", \ + "#10", \ + "#11", \ + "#12", \ + "#13", \ + "#14", \ + "#15", \ + "#16", \ + "#17", \ + "#18", \ + "#19", \ + "#20", \ + "#21", \ + "#22", \ + "#23", \ + "#24", \ + "#25", \ + "#26", \ + "#27", \ + "#28", \ + "#29", \ + "#30", \ + "#31", \ } /* @@ -142,34 +142,34 @@ typedef enum acontext_type { */ typedef enum kgmon_control { - KGMON_UNUSED, /* insure no 0 is ever used */ - KGMON_GET_STATUS, /* return whether or not profiling is active */ - KGMON_GET_PROFILE_VARS, /* return the _profile_vars structure */ - KGMON_GET_PROFILE_STATS, /* return the _profile_stats structure */ - KGMON_GET_DEBUG, /* return whether or not debugging is on */ - - KGMON_SET_PROFILE_ON = 50, /* turn on profiling */ - KGMON_SET_PROFILE_OFF, /* turn off profiling */ - KGMON_SET_PROFILE_RESET, /* reset profiling tables */ - KGMON_SET_DEBUG_ON, /* turn on debugging */ - KGMON_SET_DEBUG_OFF /* turn off debugging */ + KGMON_UNUSED, /* insure no 0 is ever used */ + KGMON_GET_STATUS, /* return whether or not profiling is active */ + KGMON_GET_PROFILE_VARS, /* return the _profile_vars structure */ + KGMON_GET_PROFILE_STATS, /* return the _profile_stats structure */ + KGMON_GET_DEBUG, /* return whether or not debugging is on */ + + KGMON_SET_PROFILE_ON = 50, /* turn on profiling */ + KGMON_SET_PROFILE_OFF, /* turn off profiling */ + KGMON_SET_PROFILE_RESET, /* reset profiling tables */ + KGMON_SET_DEBUG_ON, /* turn on debugging */ + KGMON_SET_DEBUG_OFF /* turn off debugging */ } kgmon_control_t; -#define KGMON_GET_MIN KGMON_GET_STATUS -#define KGMON_GET_MAX KGMON_GET_DEBUG -#define KGMON_SET_MIN KGMON_SET_PROFILE_ON -#define KGMON_SET_MAX KGMON_SET_DEBUG_OFF +#define KGMON_GET_MIN KGMON_GET_STATUS +#define KGMON_GET_MAX KGMON_GET_DEBUG +#define KGMON_SET_MIN KGMON_SET_PROFILE_ON +#define KGMON_SET_MAX KGMON_SET_DEBUG_OFF -#define ENCODE_KGMON(num, control, cpu_thread) \ +#define ENCODE_KGMON(num, control, cpu_thread) \ ((num) = ((cpu_thread) << 8) | (control)) -#define DECODE_KGMON(num, control, cpu_thread) \ -do { \ - control = (num) & 0xff; \ - cpu_thread = (num) >> 8; \ +#define DECODE_KGMON(num, control, cpu_thread) \ +do { \ + control = (num) & 0xff; \ + cpu_thread = (num) >> 8; \ } while (0) -#define LEGAL_KGMON(num) (((unsigned long)(num)) <= 0xffff) +#define LEGAL_KGMON(num) (((unsigned long)(num)) <= 0xffff) /* * Pull in all of the machine dependent types now after defining the enums. @@ -181,20 +181,20 @@ do { \ * general rounding functions. */ -#define ROUNDDOWN(x,y) (((x)/(y))*(y)) -#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) +#define ROUNDDOWN(x, y) (((x)/(y))*(y)) +#define ROUNDUP(x, y) ((((x)+(y)-1)/(y))*(y)) /* * Linked list of pages allocated for a particular allocation context block. */ struct page_list { - void *first; /* pointer to first byte available */ - void *ptr; /* pointer to next available byte */ - struct page_list *next; /* next page allocated */ - size_t bytes_free; /* # bytes available */ - size_t bytes_allocated; /* # bytes allocates so far */ - size_t num_allocations; /* # of allocations */ + void *first; /* pointer to first byte available */ + void *ptr; /* pointer to next available byte */ + struct page_list *next; /* next page allocated */ + size_t bytes_free; /* # bytes available */ + size_t bytes_allocated; /* # bytes allocates so far */ + size_t num_allocations; /* # of allocations */ }; /* @@ -202,9 +202,9 @@ struct page_list { */ struct alloc_context { - struct alloc_context *next; /* next allocation context block */ - struct page_list *plist; /* head of page list */ - prof_lock_t lock; /* lock field available to asm */ + struct alloc_context *next; /* next allocation context block */ + struct page_list *plist; /* head of page list */ + prof_lock_t lock; /* lock field available to asm */ }; @@ -216,14 +216,14 @@ struct alloc_context { #define STR_MAX 32 struct callback { - void *sec_ptr; /* callback user data */ - /* callback function */ + void *sec_ptr; /* callback user data */ + /* callback function */ size_t (*callback)(struct profile_vars *, struct callback *); - long sec_val1; /* section specific value */ - long sec_val2; /* section specific value */ - size_t sec_recsize; /* record size */ - size_t sec_length; /* total length */ - char sec_name[STR_MAX]; /* section name */ + long sec_val1; /* section specific value */ + long sec_val2; /* section specific value */ + size_t sec_recsize; /* record size */ + size_t sec_length; /* total length */ + char sec_name[STR_MAX]; /* section name */ }; /* @@ -231,13 +231,13 @@ struct callback { */ struct profile_profil { - prof_uptrint_t lowpc; /* lowest address */ - prof_uptrint_t highpc; /* highest address */ - size_t text_len; /* highpc-lowpc */ - size_t profil_len; /* length of the profil buffer */ - size_t counter_size; /* size of indivual counters (HISTCOUNTER) */ - unsigned long scale; /* scaling factor (65536 / scale) */ - unsigned long profil_unused[8]; /* currently unused */ + prof_uptrint_t lowpc; /* lowest address */ + prof_uptrint_t highpc; /* highest address */ + size_t text_len; /* highpc-lowpc */ + size_t profil_len; /* length of the profil buffer */ + size_t counter_size; /* size of indivual counters (HISTCOUNTER) */ + unsigned long scale; /* scaling factor (65536 / scale) */ + unsigned long profil_unused[8]; /* currently unused */ }; /* @@ -245,71 +245,71 @@ struct profile_profil { */ struct profile_vars { - int major_version; /* major version number */ - int minor_version; /* minor version number */ - size_t vars_size; /* size of profile_vars structure */ - size_t plist_size; /* size of page_list structure */ - size_t acontext_size; /* size of allocation context struct */ - size_t callback_size; /* size of callback structure */ - profile_type_t type; /* profile type */ - const char *error_msg; /* error message for perror */ - const char *filename; /* filename to write to */ - char *str_ptr; /* string table */ + int major_version; /* major version number */ + int minor_version; /* minor version number */ + size_t vars_size; /* size of profile_vars structure */ + size_t plist_size; /* size of page_list structure */ + size_t acontext_size; /* size of allocation context struct */ + size_t callback_size; /* size of callback structure */ + profile_type_t type; /* profile type */ + const char *error_msg; /* error message for perror */ + const char *filename; /* filename to write to */ + char *str_ptr; /* string table */ #if !defined(MACH_KERNEL) && !defined(_KERNEL) - FILE *stream; /* stdio stream to write to */ - FILE *diag_stream; /* stdio stream to write diagnostics to */ - /* function to write out some bytes */ + FILE *stream; /* stdio stream to write to */ + FILE *diag_stream; /* stdio stream to write diagnostics to */ + /* function to write out some bytes */ size_t (*fwrite_func)(const void *, size_t, size_t, FILE *); #else - void *stream; /* pointer passed to fwrite_func */ - void *diag_stream; /* stdio stream to write diagnostics to */ - /* function to write out some bytes */ + void *stream; /* pointer passed to fwrite_func */ + void *diag_stream; /* stdio stream to write diagnostics to */ + /* function to write out some bytes */ size_t (*fwrite_func)(const void *, size_t, size_t, void *); #endif - size_t page_size; /* machine pagesize */ - size_t str_bytes; /* # bytes in string table */ - size_t str_total; /* # bytes allocated total for string table */ - long clock_ticks; /* # clock ticks per second */ + size_t page_size; /* machine pagesize */ + size_t str_bytes; /* # bytes in string table */ + size_t str_total; /* # bytes allocated total for string table */ + long clock_ticks; /* # clock ticks per second */ - /* profil related variables */ + /* profil related variables */ struct profile_profil profil_info; /* profil information */ - HISTCOUNTER *profil_buf; /* profil buffer */ + HISTCOUNTER *profil_buf; /* profil buffer */ - /* Profiling output selection */ - void (*output_init)(struct profile_vars *); /* output init function */ - void (*output)(struct profile_vars *); /* output function */ - void *output_ptr; /* output specific info */ + /* Profiling output selection */ + void (*output_init)(struct profile_vars *); /* output init function */ + void (*output)(struct profile_vars *); /* output function */ + void *output_ptr; /* output specific info */ - /* allocation contexts */ + /* allocation contexts */ struct alloc_context *acontext[(int)ACONTEXT_MAX]; - void (*bogus_func)(void); /* Function to use if address out of bounds */ - prof_uptrint_t vars_unused[63]; /* future growth */ + void (*bogus_func)(void); /* Function to use if address out of bounds */ + prof_uptrint_t vars_unused[63]; /* future growth */ - /* Various flags */ - prof_flag_t init; /* != 0 if initialized */ - prof_flag_t active; /* != 0 if profiling is active */ - prof_flag_t do_profile; /* != 0 if profiling is being done */ - prof_flag_t use_dci; /* != 0 if using DCI */ + /* Various flags */ + prof_flag_t init; /* != 0 if initialized */ + prof_flag_t active; /* != 0 if profiling is active */ + prof_flag_t do_profile; /* != 0 if profiling is being done */ + prof_flag_t use_dci; /* != 0 if using DCI */ - prof_flag_t use_profil; /* != 0 if using profil */ - prof_flag_t recursive_alloc; /* != 0 if alloc taking place */ - prof_flag_t output_uarea; /* != 0 if output the uarea */ - prof_flag_t output_stats; /* != 0 if output the stats */ + prof_flag_t use_profil; /* != 0 if using profil */ + prof_flag_t recursive_alloc; /* != 0 if alloc taking place */ + prof_flag_t output_uarea; /* != 0 if output the uarea */ + prof_flag_t output_stats; /* != 0 if output the stats */ - prof_flag_t output_clock; /* != 0 if output the clock ticks */ - prof_flag_t multiple_sections; /* != 0 if output allows multiple sections */ - prof_flag_t have_bb; /* != 0 if we have basic block data */ - prof_flag_t init_format; /* != 0 if output format has been chosen */ + prof_flag_t output_clock; /* != 0 if output the clock ticks */ + prof_flag_t multiple_sections; /* != 0 if output allows multiple sections */ + prof_flag_t have_bb; /* != 0 if we have basic block data */ + prof_flag_t init_format; /* != 0 if output format has been chosen */ - prof_flag_t debug; /* != 0 if debugging */ - prof_flag_t check_funcs; /* != 0 if check gprof arcs for being in range */ - prof_flag_t flag_unused[62]; /* space for more flags */ + prof_flag_t debug; /* != 0 if debugging */ + prof_flag_t check_funcs; /* != 0 if check gprof arcs for being in range */ + prof_flag_t flag_unused[62]; /* space for more flags */ - struct profile_stats stats; /* profiling statistics */ - struct profile_md md; /* machine dependent info */ + struct profile_stats stats; /* profiling statistics */ + struct profile_md md; /* machine dependent info */ }; /* @@ -330,11 +330,11 @@ extern struct profile_vars _profile_vars; #if defined(_KERNEL) || defined(MACH_KERNEL) #define _profile_printf printf #else -extern int _profile_printf(const char *, ...) __attribute__((format(printf,1,2))); +extern int _profile_printf(const char *, ...) __attribute__((format(printf, 1, 2))); #endif -extern void *_profile_alloc_pages (size_t); -extern void _profile_free_pages (void *, size_t); +extern void *_profile_alloc_pages(size_t); +extern void _profile_free_pages(void *, size_t); extern void _profile_error(struct profile_vars *); /* @@ -360,11 +360,11 @@ extern void _profile_merge_stats(struct profile_stats *, const struct profile_st */ extern long _profile_kgmon(int, - size_t, - long, - int, - void **, - void (*)(kgmon_control_t)); + size_t, + long, + int, + void **, + void (*)(kgmon_control_t)); #ifdef _KERNEL extern void kgmon_server_control(kgmon_control_t); diff --git a/osfmk/profiling/profile-kgmon.c b/osfmk/profiling/profile-kgmon.c index c00ff59f6..c29f61037 100644 --- a/osfmk/profiling/profile-kgmon.c +++ b/osfmk/profiling/profile-kgmon.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ /* * HISTORY - * + * * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez * Import of Mac OS X kernel (~semeria) * @@ -38,35 +38,35 @@ * Import of OSF Mach kernel (~mburg) * * Revision 1.1.5.1 1995/01/06 19:54:04 devrcs - * mk6 CR668 - 1.3b26 merge - * new file for mk6 - * [1994/10/12 22:25:34 dwm] + * mk6 CR668 - 1.3b26 merge + * new file for mk6 + * [1994/10/12 22:25:34 dwm] * * Revision 1.1.2.1 1994/04/08 17:52:05 meissner - * Add callback function to _profile_kgmon. - * [1994/02/16 22:38:31 meissner] - * - * _profile_kgmon now returns pointer to area, doesn't do move itself. - * [1994/02/11 16:52:17 meissner] - * - * Move all printfs into if (pv->debug) { ... } blocks. - * Add debug printfs protected by if (pv->debug) for all error conditions. - * Add code to reset profiling information. - * Add code to get/set debug flag. - * Expand copyright. - * [1994/02/07 12:41:14 meissner] - * - * Add support to copy arbitrary regions. - * Delete several of the KGMON_GET commands, now that arb. regions are supported. - * Explicitly call _profile_update_stats before dumping vars or stats. - * [1994/02/03 00:59:05 meissner] - * - * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. - * [1994/02/01 12:04:09 meissner] - * - * CR 10198 - Initial version. - * [1994/01/28 23:33:37 meissner] - * + * Add callback function to _profile_kgmon. + * [1994/02/16 22:38:31 meissner] + * + * _profile_kgmon now returns pointer to area, doesn't do move itself. + * [1994/02/11 16:52:17 meissner] + * + * Move all printfs into if (pv->debug) { ... } blocks. + * Add debug printfs protected by if (pv->debug) for all error conditions. + * Add code to reset profiling information. + * Add code to get/set debug flag. + * Expand copyright. + * [1994/02/07 12:41:14 meissner] + * + * Add support to copy arbitrary regions. + * Delete several of the KGMON_GET commands, now that arb. regions are supported. + * Explicitly call _profile_update_stats before dumping vars or stats. + * [1994/02/03 00:59:05 meissner] + * + * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars. + * [1994/02/01 12:04:09 meissner] + * + * CR 10198 - Initial version. + * [1994/01/28 23:33:37 meissner] + * * $EndLog$ */ @@ -87,11 +87,11 @@ long _profile_kgmon(int write, - size_t count, - long indx, - int max_cpus, - void **p_ptr, - void (*control_func)(kgmon_control_t)) + size_t count, + long indx, + int max_cpus, + void **p_ptr, + void (*control_func)(kgmon_control_t)) { kgmon_control_t kgmon; int cpu; @@ -111,17 +111,16 @@ _profile_kgmon(int write, if (!write) { if (PROFILE_VARS(0)->debug) { printf("_profile_kgmon: copy %5ld bytes, from 0x%lx\n", - (long)count, - (long)indx); + (long)count, + (long)indx); } - } else { if (PROFILE_VARS(0)->debug) { printf("_profile_kgmon: copy %5ld bytes, to 0x%lx\n", - (long)count, - (long)indx); + (long)count, + (long)indx); } - } + } return count; } @@ -134,7 +133,7 @@ _profile_kgmon(int write, if (PROFILE_VARS(0)->debug) { printf("_profile_kgmon: start: kgmon control = %2d, cpu = %d, count = %ld\n", - kgmon, cpu, (long)count); + kgmon, cpu, (long)count); } /* Validate the CPU number */ @@ -144,7 +143,6 @@ _profile_kgmon(int write, } return -1; - } else { pv = PROFILE_VARS(cpu); @@ -158,7 +156,7 @@ _profile_kgmon(int write, error = -1; break; - case KGMON_GET_STATUS: /* return whether or not profiling is active */ + case KGMON_GET_STATUS: /* return whether or not profiling is active */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_GET_STATUS: cpu = %d\n", cpu); @@ -171,8 +169,8 @@ _profile_kgmon(int write, if (count != sizeof(pv->active)) { if (PROFILE_VARS(0)->debug) { printf("KGMON_GET_STATUS: count = %ld, should be %ld\n", - (long)count, - (long)sizeof(pv->active)); + (long)count, + (long)sizeof(pv->active)); } error = -1; @@ -182,7 +180,7 @@ _profile_kgmon(int write, *p_ptr = (void *)&pv->active; break; - case KGMON_GET_DEBUG: /* return whether or not debugging is active */ + case KGMON_GET_DEBUG: /* return whether or not debugging is active */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_GET_DEBUG: cpu = %d\n", cpu); @@ -195,8 +193,8 @@ _profile_kgmon(int write, if (count != sizeof(pv->debug)) { if (PROFILE_VARS(0)->debug) { printf("KGMON_GET_DEBUG: count = %ld, should be %ld\n", - (long)count, - (long)sizeof(pv->active)); + (long)count, + (long)sizeof(pv->active)); } error = -1; @@ -206,12 +204,12 @@ _profile_kgmon(int write, *p_ptr = (void *)&pv->debug; break; - case KGMON_GET_PROFILE_VARS: /* return the _profile_vars structure */ + case KGMON_GET_PROFILE_VARS: /* return the _profile_vars structure */ if (count != sizeof(struct profile_vars)) { if (PROFILE_VARS(0)->debug) { printf("KGMON_GET_PROFILE_VARS: count = %ld, should be %ld\n", - (long)count, - (long)sizeof(struct profile_vars)); + (long)count, + (long)sizeof(struct profile_vars)); } error = -1; @@ -222,12 +220,12 @@ _profile_kgmon(int write, *p_ptr = (void *)pv; break; - case KGMON_GET_PROFILE_STATS: /* return the _profile_stats structure */ + case KGMON_GET_PROFILE_STATS: /* return the _profile_stats structure */ if (count != sizeof(struct profile_stats)) { if (PROFILE_VARS(0)->debug) { printf("KGMON_GET_PROFILE_STATS: count = %ld, should be = %ld\n", - (long)count, - (long)sizeof(struct profile_stats)); + (long)count, + (long)sizeof(struct profile_stats)); } error = -1; @@ -238,7 +236,6 @@ _profile_kgmon(int write, *p_ptr = (void *)&pv->stats; break; } - } else { switch (kgmon) { default: @@ -249,7 +246,7 @@ _profile_kgmon(int write, error = -1; break; - case KGMON_SET_PROFILE_ON: /* turn on profiling */ + case KGMON_SET_PROFILE_ON: /* turn on profiling */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_SET_PROFILE_ON, cpu = %d\n", cpu); @@ -274,7 +271,7 @@ _profile_kgmon(int write, count = 0; break; - case KGMON_SET_PROFILE_OFF: /* turn off profiling */ + case KGMON_SET_PROFILE_OFF: /* turn off profiling */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_SET_PROFILE_OFF, cpu = %d\n", cpu); @@ -299,7 +296,7 @@ _profile_kgmon(int write, count = 0; break; - case KGMON_SET_PROFILE_RESET: /* reset profiling */ + case KGMON_SET_PROFILE_RESET: /* reset profiling */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_SET_PROFILE_RESET, cpu = %d\n", cpu); @@ -320,7 +317,7 @@ _profile_kgmon(int write, count = 0; break; - case KGMON_SET_DEBUG_ON: /* turn on profiling */ + case KGMON_SET_DEBUG_ON: /* turn on profiling */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_SET_DEBUG_ON, cpu = %d\n", cpu); @@ -343,7 +340,7 @@ _profile_kgmon(int write, count = 0; break; - case KGMON_SET_DEBUG_OFF: /* turn off profiling */ + case KGMON_SET_DEBUG_OFF: /* turn off profiling */ if (cpu != 0) { if (PROFILE_VARS(0)->debug) { printf("KGMON_SET_DEBUG_OFF, cpu = %d\n", cpu); @@ -372,7 +369,7 @@ _profile_kgmon(int write, if (error) { if (PROFILE_VARS(0)->debug) { printf("_profile_kgmon: done: kgmon control = %2d, cpu = %d, error = %d\n", - kgmon, cpu, error); + kgmon, cpu, error); } return -1; @@ -380,7 +377,7 @@ _profile_kgmon(int write, if (PROFILE_VARS(0)->debug) { printf("_profile_kgmon: done: kgmon control = %2d, cpu = %d, count = %ld\n", - kgmon, cpu, (long)count); + kgmon, cpu, (long)count); } return count; diff --git a/osfmk/profiling/profile-mk.c b/osfmk/profiling/profile-mk.c index 43e1376ea..4111735ab 100644 --- a/osfmk/profiling/profile-mk.c +++ b/osfmk/profiling/profile-mk.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -48,7 +48,7 @@ extern char etext[], pstart[]; void * -_profile_alloc_pages (size_t size) +_profile_alloc_pages(size_t size) { vm_offset_t addr; @@ -71,7 +71,7 @@ _profile_alloc_pages (size_t size) printf("Allocated %d bytes for profiling, address 0x%x\n", (int)size, (int)addr); } - return((caddr_t)addr); + return (caddr_t)addr; } void @@ -85,7 +85,8 @@ _profile_free_pages(void *addr, size_t size) return; } -void _profile_error(struct profile_vars *pv) +void +_profile_error(struct profile_vars *pv) { panic("Fatal error in profiling"); } @@ -104,8 +105,8 @@ kmstartup(void) * so the rest of the scaling (here and in gprof) stays in ints. */ - lowpc = ROUNDDOWN((prof_uptrint_t)&pstart[0], HISTFRACTION*sizeof(LHISTCOUNTER)); - highpc = ROUNDUP((prof_uptrint_t)&etext[0], HISTFRACTION*sizeof(LHISTCOUNTER)); + lowpc = ROUNDDOWN((prof_uptrint_t)&pstart[0], HISTFRACTION * sizeof(LHISTCOUNTER)); + highpc = ROUNDUP((prof_uptrint_t)&etext[0], HISTFRACTION * sizeof(LHISTCOUNTER)); textsize = highpc - lowpc; monsize = (textsize / HISTFRACTION) * sizeof(LHISTCOUNTER); @@ -118,7 +119,7 @@ kmstartup(void) _profile_md_init(pv, PROFILE_GPROF, PROFILE_ALLOC_MEM_YES); /* Profil related variables */ - pv->profil_buf = _profile_alloc (pv, monsize, ACONTEXT_PROFIL); + pv->profil_buf = _profile_alloc(pv, monsize, ACONTEXT_PROFIL); pv->profil_info.highpc = highpc; pv->profil_info.lowpc = lowpc; pv->profil_info.text_len = textsize; @@ -134,15 +135,15 @@ kmstartup(void) pv->active = 1; pv->use_dci = 0; pv->use_profil = 1; - pv->check_funcs = 1; /* for now */ + pv->check_funcs = 1; /* for now */ if (pv->debug) { printf("Profiling kernel, s_textsize=%ld, monsize=%ld [0x%lx..0x%lx], cpu = %d\n", - (long)textsize, - (long)monsize, - (long)lowpc, - (long)highpc, - 0); + (long)textsize, + (long)monsize, + (long)lowpc, + (long)highpc, + 0); } _profile_md_start(); @@ -153,7 +154,7 @@ kmstartup(void) int gprofprobe(caddr_t port, void *ctlr) { - return(1); + return 1; } void @@ -166,17 +167,18 @@ gprofattach(void) /* struct bus_device *gprofinfo[NGPROF]; */ struct bus_device *gprofinfo[1]; -struct bus_driver gprof_driver = { - gprofprobe, 0, gprofattach, 0, 0, "gprof", gprofinfo, "gprofc", 0, 0}; +struct bus_driver gprof_driver = { + gprofprobe, 0, gprofattach, 0, 0, "gprof", gprofinfo, "gprofc", 0, 0 +}; io_return_t gprofopen(dev_t dev, - int flags, - io_req_t ior) + int flags, + io_req_t ior) { ior->io_error = D_SUCCESS; - return(0); + return 0; } void @@ -191,15 +193,14 @@ gprofstrategy(io_req_t ior) void *sys_ptr = (void *)0; long count = _profile_kgmon(!(ior->io_op & IO_READ), - ior->io_count, - ior->io_recnum, - 1, - &sys_ptr, - (void (*)(kgmon_control_t))0); + ior->io_count, + ior->io_recnum, + 1, + &sys_ptr, + (void (*)(kgmon_control_t))0); if (count < 0) { ior->io_error = D_INVALID_RECNUM; - } else { if (count > 0 && sys_ptr != (void *)0) { if (ior->io_op & IO_READ) { @@ -218,14 +219,14 @@ gprofstrategy(io_req_t ior) io_return_t gprofread(dev_t dev, - io_req_t ior) + io_req_t ior) { - return(block_io(gprofstrategy, minphys, ior)); + return block_io(gprofstrategy, minphys, ior); } io_return_t gprofwrite(dev_t dev, - io_req_t ior) + io_req_t ior) { - return (block_io(gprofstrategy, minphys, ior)); + return block_io(gprofstrategy, minphys, ior); } diff --git a/osfmk/profiling/profile-mk.h b/osfmk/profiling/profile-mk.h index f2da965f8..8e3690085 100644 --- a/osfmk/profiling/profile-mk.h +++ b/osfmk/profiling/profile-mk.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -56,5 +56,3 @@ extern int gprofwrite(dev_t, io_req_t); */ #define PROFILE_VARS(cpu) (&_profile_vars) - - diff --git a/osfmk/tests/bitmap_test.c b/osfmk/tests/bitmap_test.c index 121d92ea1..8eb5c35cc 100644 --- a/osfmk/tests/bitmap_test.c +++ b/osfmk/tests/bitmap_test.c @@ -2,7 +2,7 @@ * Copyright (c) 2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -60,7 +60,7 @@ dump_bitmap_lsb(bitmap_t *map, uint nbits) #ifdef assert #undef assert #endif -#define assert(x) T_ASSERT(x, NULL) +#define assert(x) T_ASSERT(x, NULL) #endif void diff --git a/osfmk/tests/kernel_tests.c b/osfmk/tests/kernel_tests.c index 9bac2abff..01669bf9c 100644 --- a/osfmk/tests/kernel_tests.c +++ b/osfmk/tests/kernel_tests.c @@ -89,28 +89,27 @@ extern kern_return_t test_thread_call(void); struct xnupost_panic_widget xt_panic_widgets = {NULL, NULL, NULL, NULL}; struct xnupost_test kernel_post_tests[] = {XNUPOST_TEST_CONFIG_BASIC(zalloc_test), - XNUPOST_TEST_CONFIG_BASIC(RandomULong_test), - XNUPOST_TEST_CONFIG_BASIC(test_os_log), - XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel), + XNUPOST_TEST_CONFIG_BASIC(RandomULong_test), + XNUPOST_TEST_CONFIG_BASIC(test_os_log), + XNUPOST_TEST_CONFIG_BASIC(test_os_log_parallel), #ifdef __arm64__ - XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test), - XNUPOST_TEST_CONFIG_BASIC(ex_cb_test), + XNUPOST_TEST_CONFIG_BASIC(arm64_munger_test), + XNUPOST_TEST_CONFIG_BASIC(ex_cb_test), #if __ARM_PAN_AVAILABLE__ - XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test), + XNUPOST_TEST_CONFIG_BASIC(arm64_pan_test), #endif #endif /* __arm64__ */ - XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test), - XNUPOST_TEST_CONFIG_BASIC(console_serial_test), - XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests), - XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests), + XNUPOST_TEST_CONFIG_BASIC(kcdata_api_test), + XNUPOST_TEST_CONFIG_BASIC(console_serial_test), + XNUPOST_TEST_CONFIG_BASIC(console_serial_alloc_rel_tests), + XNUPOST_TEST_CONFIG_BASIC(console_serial_parallel_log_tests), #if defined(__arm__) || defined(__arm64__) - XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test), + XNUPOST_TEST_CONFIG_BASIC(pmap_coredump_test), #endif - XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test), - //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests) - XNUPOST_TEST_CONFIG_BASIC(test_thread_call), - XNUPOST_TEST_CONFIG_BASIC(priority_queue_test), -}; + XNUPOST_TEST_CONFIG_BASIC(bitmap_post_test), + //XNUPOST_TEST_CONFIG_TEST_PANIC(kcdata_api_assert_tests) + XNUPOST_TEST_CONFIG_BASIC(test_thread_call), + XNUPOST_TEST_CONFIG_BASIC(priority_queue_test), }; uint32_t kernel_post_tests_count = sizeof(kernel_post_tests) / sizeof(xnupost_test_data_t); @@ -127,8 +126,9 @@ boolean_t xnupost_should_run_test(uint32_t test_num); kern_return_t xnupost_parse_config() { - if (parse_config_retval != KERN_INVALID_CAPABILITY) + if (parse_config_retval != KERN_INVALID_CAPABILITY) { return parse_config_retval; + } PE_parse_boot_argn("kernPOST", &kernel_post_args, sizeof(kernel_post_args)); if (PE_parse_boot_argn("kernPOST_config", &kernel_post_test_configs[0], sizeof(kernel_post_test_configs)) == TRUE) { @@ -158,8 +158,9 @@ xnupost_should_run_test(uint32_t test_num) /* skip to the next "," */ while (*b != ',') { - if (*b == '\0') + if (*b == '\0') { return FALSE; + } b++; } /* skip past the ',' */ @@ -173,8 +174,9 @@ xnupost_should_run_test(uint32_t test_num) kern_return_t xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count) { - if (KERN_SUCCESS != xnupost_parse_config()) + if (KERN_SUCCESS != xnupost_parse_config()) { return KERN_FAILURE; + } xnupost_test_t testp; for (uint32_t i = 0; i < test_count; i++) { @@ -192,7 +194,7 @@ xnupost_list_tests(xnupost_test_t test_list, uint32_t test_count) } } printf("\n[TEST] TOC#%u name: %s expected: %d config: %x\n", testp->xt_test_num, testp->xt_name, testp->xt_expected_retval, - testp->xt_config); + testp->xt_config); } return KERN_SUCCESS; @@ -224,8 +226,8 @@ xnupost_run_tests(xnupost_test_t test_list, uint32_t test_count) */ if ((testp->xt_config & XT_CONFIG_EXPECT_PANIC) && !(kernel_post_args & POSTARGS_CONTROLLER_AVAILABLE)) { T_SKIP( - "Test expects panic but " - "no controller is present"); + "Test expects panic but " + "no controller is present"); testp->xt_test_actions = XT_ACTION_SKIPPED; continue; } @@ -265,8 +267,9 @@ kernel_do_post() kern_return_t xnupost_register_panic_widget(xt_panic_widget_func funcp, const char * funcname, void * context, void ** outval) { - if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) + if (xt_panic_widgets.xtp_context_p != NULL || xt_panic_widgets.xtp_func != NULL) { return KERN_RESOURCE_SHORTAGE; + } xt_panic_widgets.xtp_context_p = context; xt_panic_widgets.xtp_func = funcp; @@ -339,8 +342,9 @@ _xt_generic_assert_check(const char * s, void * str_to_match, void ** outval) ret = XT_RET_W_SUCCESS; } - if (outval) + if (outval) { *outval = (void *)(uintptr_t)ret; + } return ret; } @@ -419,16 +423,16 @@ compare_numbers_descending(const void * a, const void * b) /* Node structure for the priority queue tests */ struct priority_queue_test_node { - struct priority_queue_entry link; - priority_queue_key_t node_key; + struct priority_queue_entry link; + priority_queue_key_t node_key; }; static void priority_queue_test_queue(struct priority_queue *pq, int type, - priority_queue_compare_fn_t cmp_fn) + priority_queue_compare_fn_t cmp_fn) { /* Configuration for the test */ -#define PRIORITY_QUEUE_NODES 7 +#define PRIORITY_QUEUE_NODES 7 static uint32_t priority_list[] = { 20, 3, 7, 6, 50, 2, 8}; uint32_t increase_pri = 100; uint32_t decrease_pri = 90; @@ -479,14 +483,14 @@ priority_queue_test_queue(struct priority_queue *pq, int type, /* Test the maximum operation by comparing max node with local list */ result = priority_queue_max(pq, struct priority_queue_test_node, link); - T_ASSERT((result->node_key == priority_list[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup", - (uint32_t)result->node_key, priority_list[0]); + T_ASSERT((result->node_key == priority_list[0]), "(heap (%u) == qsort (%u)) priority queue max node lookup", + (uint32_t)result->node_key, priority_list[0]); /* Remove all remaining elements and verify they match local list */ for (int i = 0; i < PRIORITY_QUEUE_NODES; i++) { result = priority_queue_remove_max(pq, struct priority_queue_test_node, link, cmp_fn); - T_ASSERT((result->node_key == priority_list[i]), "(heap (%u) == qsort (%u)) priority queue max node removal", - (uint32_t)result->node_key, priority_list[i]); + T_ASSERT((result->node_key == priority_list[i]), "(heap (%u) == qsort (%u)) priority queue max node removal", + (uint32_t)result->node_key, priority_list[i]); } priority_queue_destroy(pq, struct priority_queue_test_node, link, ^(void *n) { @@ -513,12 +517,12 @@ priority_queue_test(void) T_SETUPEND; priority_queue_test_queue(&pq, PRIORITY_QUEUE_BUILTIN_KEY, - PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); + PRIORITY_QUEUE_SCHED_PRI_MAX_HEAP_COMPARE); priority_queue_test_queue(&pq_nodes, PRIORITY_QUEUE_GENERIC_KEY, - priority_heap_make_comparator(a, b, struct priority_queue_test_node, link, { - return (a->node_key > b->node_key) ? 1 : ((a->node_key == b->node_key) ? 0 : -1); - })); + priority_heap_make_comparator(a, b, struct priority_queue_test_node, link, { + return (a->node_key > b->node_key) ? 1 : ((a->node_key == b->node_key) ? 0 : -1); + })); return KERN_SUCCESS; } @@ -588,14 +592,16 @@ RandomULong_test() */ for (i = 1; i < CONF_ITERATIONS; i++) { bit_entropy = count_bits(numbers[i - 1] ^ numbers[i]); - if (bit_entropy < min_bit_entropy) + if (bit_entropy < min_bit_entropy) { min_bit_entropy = bit_entropy; - if (bit_entropy > max_bit_entropy) + } + if (bit_entropy > max_bit_entropy) { max_bit_entropy = bit_entropy; + } if (bit_entropy < CONF_MIN_ENTROPY) { T_EXPECT_GE_UINT(bit_entropy, CONF_MIN_ENTROPY, - "Number of differing bits in consecutive numbers does not satisfy the min criteria."); + "Number of differing bits in consecutive numbers does not satisfy the min criteria."); } aggregate_bit_entropy += bit_entropy; @@ -606,7 +612,7 @@ RandomULong_test() T_EXPECT_GE_UINT(mean_bit_entropy, CONF_MEAN_ENTROPY, "Test criteria for mean number of differing bits."); T_PASS("Mean bit entropy criteria satisfied (Required %d, Actual: %d).", CONF_MEAN_ENTROPY, mean_bit_entropy); T_LOG("{PERFORMANCE} iterations: %d, min_bit_entropy: %d, mean_bit_entropy: %d, max_bit_entropy: %d", CONF_ITERATIONS, - min_bit_entropy, mean_bit_entropy, max_bit_entropy); + min_bit_entropy, mean_bit_entropy, max_bit_entropy); T_PERF("min_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), min_bit_entropy, "bits", "minimum bit entropy in RNG. High is better"); T_PERF("mean_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), mean_bit_entropy, "bits", "mean bit entropy in RNG. High is better"); T_PERF("max_bit_entropy_" T_TOSTRING(CONF_ITERATIONS), max_bit_entropy, "bits", "max bit entropy in RNG. High is better"); @@ -623,15 +629,17 @@ RandomULong_test() * Set the window */ window_end = window_start + CONF_WINDOW_SIZE - 1; - if (window_end >= CONF_ITERATIONS) + if (window_end >= CONF_ITERATIONS) { window_end = CONF_ITERATIONS - 1; + } trend = 0; for (i = window_start; i < window_end; i++) { - if (numbers[i] < numbers[i + 1]) + if (numbers[i] < numbers[i + 1]) { trend++; - else if (numbers[i] > numbers[i + 1]) + } else if (numbers[i] > numbers[i + 1]) { trend--; + } } /* * Check that there is no increasing or decreasing trend @@ -648,7 +656,6 @@ RandomULong_test() * Move to the next window */ window_start++; - } while (window_start < (CONF_ITERATIONS - 1)); T_PASS("Did not find increasing/decreasing trends in a window of %d numbers.", CONF_WINDOW_SIZE); @@ -678,10 +685,10 @@ struct sample_disk_io_stats { } __attribute__((packed)); struct kcdata_subtype_descriptor test_disk_io_stats_def[] = { - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"}, - {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"}, - {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"}, - {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 0 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_count"}, + {KCS_SUBTYPE_FLAGS_NONE, KC_ST_UINT64, 1 * sizeof(uint64_t), sizeof(uint64_t), "disk_reads_size"}, + {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, 2 * sizeof(uint64_t), KCS_SUBTYPE_PACK_SIZE(4, sizeof(uint64_t)), "io_priority_count"}, + {KCS_SUBTYPE_FLAGS_ARRAY, KC_ST_UINT64, (2 + 4) * sizeof(uint64_t), sizeof(uint64_t), "io_priority_size"}, }; kern_return_t @@ -696,12 +703,12 @@ kcdata_api_test() /* another negative test with buffer size < 32 bytes */ char data[30] = "sample_disk_io_stats"; retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)&data, KCDATA_BUFFER_BEGIN_CRASHINFO, sizeof(data), - KCFLAG_USE_MEMCOPY); + KCFLAG_USE_MEMCOPY); T_ASSERT(retval == KERN_RESOURCE_SHORTAGE, "init with 30 bytes failed as expected with KERN_RESOURCE_SHORTAGE"); /* test with COPYOUT for 0x0 address. Should return KERN_NO_ACCESS */ retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)0, KCDATA_BUFFER_BEGIN_CRASHINFO, PAGE_SIZE, - KCFLAG_USE_COPYOUT); + KCFLAG_USE_COPYOUT); T_ASSERT(retval == KERN_NO_ACCESS, "writing to 0x0 returned KERN_NO_ACCESS"); /* test with successful kcdata_memory_static_init */ @@ -710,7 +717,7 @@ kcdata_api_test() T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data."); retval = kcdata_memory_static_init(&test_kc_data, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE, - KCFLAG_USE_MEMCOPY); + KCFLAG_USE_MEMCOPY); T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call"); @@ -786,38 +793,38 @@ kcdata_api_test() /* test adding of custom type */ retval = kcdata_add_type_definition(&test_kc_data, 0x999, data, &test_disk_io_stats_def[0], - sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor)); + sizeof(test_disk_io_stats_def) / sizeof(struct kcdata_subtype_descriptor)); T_ASSERT(retval == KERN_SUCCESS, "adding custom type succeeded."); return KERN_SUCCESS; } /* -kern_return_t -kcdata_api_assert_tests() -{ - kern_return_t retval = 0; - void * assert_check_retval = NULL; - test_kc_data2.kcd_length = 0xdeadbeef; - mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE); - T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data."); - - retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE, - KCFLAG_USE_MEMCOPY); - - T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call"); - - retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval); - T_ASSERT(retval == KERN_SUCCESS, "registered assert widget"); - - // this will assert - retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata"); - T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly"); - T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit"); - - return KERN_SUCCESS; -} -*/ + * kern_return_t + * kcdata_api_assert_tests() + * { + * kern_return_t retval = 0; + * void * assert_check_retval = NULL; + * test_kc_data2.kcd_length = 0xdeadbeef; + * mach_vm_address_t address = (mach_vm_address_t)kalloc(PAGE_SIZE); + * T_EXPECT_NOTNULL(address, "kalloc of PAGE_SIZE data."); + * + * retval = kcdata_memory_static_init(&test_kc_data2, (mach_vm_address_t)address, KCDATA_BUFFER_BEGIN_STACKSHOT, PAGE_SIZE, + * KCFLAG_USE_MEMCOPY); + * + * T_ASSERT(retval == KERN_SUCCESS, "successful kcdata_memory_static_init call"); + * + * retval = T_REGISTER_ASSERT_CHECK("KCDATA_DESC_MAXLEN", &assert_check_retval); + * T_ASSERT(retval == KERN_SUCCESS, "registered assert widget"); + * + * // this will assert + * retval = kcdata_add_uint32_with_description(&test_kc_data2, 0xc0ffee, "really long description string for kcdata"); + * T_ASSERT(retval == KERN_INVALID_ARGUMENT, "API param check returned KERN_INVALID_ARGUMENT correctly"); + * T_ASSERT(assert_check_retval == (void *)XT_RET_W_SUCCESS, "assertion handler verified that it was hit"); + * + * return KERN_SUCCESS; + * } + */ #if defined(__arm__) || defined(__arm64__) @@ -838,12 +845,13 @@ extern unsigned long gPhysBase, gPhysSize, first_avail; static inline uintptr_t astris_vm_page_unpack_ptr(uintptr_t p) { - if (!p) - return ((uintptr_t)0); + if (!p) { + return (uintptr_t)0; + } return (p & lowGlo.lgPmapMemFromArrayMask) - ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize - : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift); + ? lowGlo.lgPmapMemStartAddr + (p & ~(lowGlo.lgPmapMemFromArrayMask)) * lowGlo.lgPmapMemPagesize + : lowGlo.lgPmapMemPackedBaseAddr + (p << lowGlo.lgPmapMemPackedShift); } // assume next pointer is the first element @@ -872,8 +880,8 @@ static inline ppnum_t astris_vm_page_get_phys_page(uintptr_t m) { return (m >= lowGlo.lgPmapMemStartAddr && m < lowGlo.lgPmapMemEndAddr) - ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum) - : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset)); + ? (ppnum_t)((m - lowGlo.lgPmapMemStartAddr) / lowGlo.lgPmapMemPagesize + lowGlo.lgPmapMemFirstppnum) + : *((ppnum_t *)(m + lowGlo.lgPmapMemPageOffset)); } kern_return_t @@ -891,7 +899,7 @@ pmap_coredump_test(void) T_ASSERT_EQ_ULONG(lowGlo.lgLayoutMagic, LOWGLO_LAYOUT_MAGIC, NULL); // check the constant values in lowGlo - T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((uint64_t) & (pmap_object_store.memq)), NULL); + T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemQ, ((uint64_t) &(pmap_object_store.memq)), NULL); T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPageOffset, offsetof(struct vm_page_with_ppnum, vmp_phys_page), NULL); T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemChainOffset, offsetof(struct vm_page, vmp_listq), NULL); T_ASSERT_EQ_ULONG(lowGlo.lgPmapMemPagesize, sizeof(struct vm_page), NULL); diff --git a/osfmk/tests/ktest.c b/osfmk/tests/ktest.c index 14dcb69d5..356eeb245 100644 --- a/osfmk/tests/ktest.c +++ b/osfmk/tests/ktest.c @@ -32,31 +32,36 @@ #include void -ktest_start(void) { +ktest_start(void) +{ ktest_emit_start(); } void -ktest_finish(void) { +ktest_finish(void) +{ ktest_emit_finish(); } void -ktest_testbegin(const char * test_name) { +ktest_testbegin(const char * test_name) +{ ktest_current_time = mach_absolute_time(); ktest_test_name = test_name; ktest_emit_testbegin(test_name); } void -ktest_testend() { +ktest_testend() +{ ktest_current_time = mach_absolute_time(); ktest_emit_testend(); ktest_test_index++; } void -ktest_testskip(const char * msg, ...) { +ktest_testskip(const char * msg, ...) +{ va_list args; ktest_current_time = mach_absolute_time(); @@ -64,11 +69,11 @@ ktest_testskip(const char * msg, ...) { va_start(args, msg); ktest_emit_testskip(msg, args); va_end(args); - } void -ktest_log(const char * msg, ...) { +ktest_log(const char * msg, ...) +{ va_list args; ktest_current_time = mach_absolute_time(); @@ -90,28 +95,28 @@ ktest_testcase(int success) { ktest_current_time = mach_absolute_time(); - if(success && !ktest_expectfail) { + if (success && !ktest_expectfail) { /* PASS */ ktest_passcount++; ktest_testcase_result = T_RESULT_PASS; - } else if(!success && !ktest_expectfail) { + } else if (!success && !ktest_expectfail) { /* FAIL */ ktest_failcount++; ktest_testcase_result = T_RESULT_FAIL; - } else if(success && ktest_expectfail) { + } else if (success && ktest_expectfail) { /* UXPASS */ ktest_xpasscount++; ktest_testcase_result = T_RESULT_UXPASS; - } else if(!success && ktest_expectfail) { + } else if (!success && ktest_expectfail) { /* XFAIL */ ktest_xfailcount++; ktest_testcase_result = T_RESULT_XFAIL; } ktest_update_test_result_state(); - if(ktest_quiet == 0 || - ktest_testcase_result == T_RESULT_FAIL || - ktest_testcase_result == T_RESULT_UXPASS) { + if (ktest_quiet == 0 || + ktest_testcase_result == T_RESULT_FAIL || + ktest_testcase_result == T_RESULT_UXPASS) { ktest_emit_testcase(); } ktest_expression_index++; @@ -121,7 +126,7 @@ ktest_testcase(int success) ktest_output_buf[0] = '\0'; ktest_current_msg[0] = '\0'; ktest_current_expr[0] = '\0'; - for(int i = 0; i < KTEST_MAXVARS; i++) { + for (int i = 0; i < KTEST_MAXVARS; i++) { ktest_current_var_names[i][0] = '\0'; ktest_current_var_values[i][0] = '\0'; } @@ -129,14 +134,16 @@ ktest_testcase(int success) } void -ktest_update_test_result_state(void) { +ktest_update_test_result_state(void) +{ ktest_test_result = ktest_test_result_statetab[ktest_test_result] - [ktest_testcase_result] - [ktest_testcase_mode]; + [ktest_testcase_result] + [ktest_testcase_mode]; } void -ktest_assertion_check(void) { +ktest_assertion_check(void) +{ if (ktest_testcase_result == T_RESULT_FAIL || ktest_testcase_result == T_RESULT_UXPASS) { ktest_testend(); panic("XNUPOST: Assertion failed : %s : at %s:%d", ktest_test_name, ktest_current_file, ktest_current_line); diff --git a/osfmk/tests/ktest.h b/osfmk/tests/ktest.h index 4cc95c3c8..09451ae6b 100644 --- a/osfmk/tests/ktest.h +++ b/osfmk/tests/ktest.h @@ -58,28 +58,28 @@ void T_SYM(set_current_expr)(const char * expr_fmt, ...); void T_SYM(set_current_var)(const char * name, const char * value_fmt, ...); typedef union { - char _char; - unsigned char _uchar; + char _char; + unsigned char _uchar; - short _short; - unsigned short _ushort; + short _short; + unsigned short _ushort; - int _int; - unsigned int _uint; + int _int; + unsigned int _uint; - long _long; - unsigned long _ulong; + long _long; + unsigned long _ulong; - long long _llong; - unsigned long long _ullong; + long long _llong; + unsigned long long _ullong; - float _float; + float _float; - double _double; + double _double; - long double _ldouble; + long double _ldouble; - void* _ptr; + void* _ptr; } T_SYM(temp); extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); @@ -117,7 +117,7 @@ extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); } while(0) #define T_SET_AUX_VARS do {\ - /* Only used in userspace lib for now */\ + /* Only used in userspace lib for now */ \ } while(0) #define T_ASSERTION_CHECK do {\ @@ -129,8 +129,8 @@ extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); T_SYM(temp1).type = (lhs);\ T_SYM(temp2).type = (rhs);\ T_SYM(set_current_expr)(T_TOSTRING(lhs) " "\ - T_TOSTRING(cmp) " "\ - T_TOSTRING(rhs));\ + T_TOSTRING(cmp) " "\ + T_TOSTRING(rhs));\ T_SYM(set_current_var)(T_TOSTRING(lhs), fmt, T_SYM(temp1).type);\ T_SYM(set_current_var)(T_TOSTRING(rhs), fmt, T_SYM(temp2).type);\ T_SET_AUX_VARS;\ @@ -259,8 +259,8 @@ extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); T_SYM(temp1)._int = (int)(!!(expr));\ T_SYM(set_current_expr)(T_TOSTRING(expr) " != NULL");\ T_SYM(set_current_var)(T_TOSTRING(expr),\ - "%s",\ - T_SYM(temp1)._int ? "" : "NULL");\ + "%s",\ + T_SYM(temp1)._int ? "" : "NULL");\ T_SET_AUX_VARS;\ T_SYM(set_current_msg)(msg, ## __VA_ARGS__);\ T_SYM(testcase)(T_SYM(temp1)._int);\ @@ -271,8 +271,8 @@ extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); T_SYM(temp1)._int = (int)(!(expr));\ T_SYM(set_current_expr)(T_TOSTRING(expr) " == NULL");\ T_SYM(set_current_var)(T_TOSTRING(expr),\ - "%s",\ - T_SYM(temp1)._int ? "NULL" : "");\ + "%s",\ + T_SYM(temp1)._int ? "NULL" : "");\ T_SET_AUX_VARS;\ T_SYM(set_current_msg)(msg, ## __VA_ARGS__);\ T_SYM(testcase)(T_SYM(temp1)._int);\ @@ -327,310 +327,310 @@ extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); /* char */ -#define T_EXPECT_EQ_CHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_CHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_char, "%c", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_CHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_CHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_char, "%c", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_CHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_CHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_char, "%c", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_CHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_CHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_char, "%c", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_CHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_CHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_char, "%c", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_CHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_CHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_char, "%c", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_CHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_CHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_char, "%c", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_CHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_CHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_char, "%c", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_CHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_CHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_char, "%c", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_CHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_CHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_char, "%c", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_CHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_CHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_char, "%c", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_CHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_CHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_char, "%c", >=, lhs, rhs, msg, ## __VA_ARGS__) /* unsigned char */ -#define T_EXPECT_EQ_UCHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_UCHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uchar, "%c", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_UCHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_UCHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uchar, "%c", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_UCHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_UCHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uchar, "%c", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_UCHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_UCHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uchar, "%c", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_UCHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_UCHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uchar, "%c", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_UCHAR(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_UCHAR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uchar, "%c", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_UCHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_UCHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uchar, "%c", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_UCHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_UCHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uchar, "%c", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_UCHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_UCHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uchar, "%c", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_UCHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_UCHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uchar, "%c", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_UCHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_UCHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uchar, "%c", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_UCHAR(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_UCHAR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uchar, "%c", >=, lhs, rhs, msg, ## __VA_ARGS__) /* short */ -#define T_EXPECT_EQ_SHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_SHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_short, "%hi", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_SHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_SHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_short, "%hi", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_SHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_SHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_short, "%hi", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_SHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_SHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_short, "%hi", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_SHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_SHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_short, "%hi", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_SHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_SHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_short, "%hi", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_SHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_SHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_short, "%hi", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_SHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_SHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_short, "%hi", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_SHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_SHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_short, "%hi", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_SHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_SHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_short, "%hi", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_SHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_SHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_short, "%hi", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_SHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_SHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_short, "%hi", >=, lhs, rhs, msg, ## __VA_ARGS__) /* unsigned short */ -#define T_EXPECT_EQ_USHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_USHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ushort, "%hu", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_USHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_USHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ushort, "%hu", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_USHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_USHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ushort, "%hu", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_USHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_USHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ushort, "%hu", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_USHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_USHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ushort, "%hu", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_USHORT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_USHORT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ushort, "%hu", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_USHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_USHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ushort, "%hu", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_USHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_USHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ushort, "%hu", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_USHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_USHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ushort, "%hu", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_USHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_USHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ushort, "%hu", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_USHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_USHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ushort, "%hu", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_USHORT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_USHORT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ushort, "%hu", >=, lhs, rhs, msg, ## __VA_ARGS__) /* int */ -#define T_EXPECT_EQ_INT(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_INT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_int, "%d", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_INT(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_INT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_int, "%d", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_INT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_INT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_int, "%d", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_INT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_INT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_int, "%d", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_INT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_INT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_int, "%d", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_INT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_INT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_int, "%d", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_INT(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_INT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_int, "%d", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_INT(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_INT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_int, "%d", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_INT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_INT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_int, "%d", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_INT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_INT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_int, "%d", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_INT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_INT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_int, "%d", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_INT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_INT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_int, "%d", >=, lhs, rhs, msg, ## __VA_ARGS__) /* unsigned int */ -#define T_EXPECT_EQ_UINT(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_UINT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uint, "%u", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_UINT(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_UINT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uint, "%u", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_UINT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_UINT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uint, "%u", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_UINT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_UINT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uint, "%u", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_UINT(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_UINT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uint, "%u", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_UINT(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_UINT(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_uint, "%u", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_UINT(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_UINT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uint, "%u", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_UINT(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_UINT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uint, "%u", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_UINT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_UINT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uint, "%u", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_UINT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_UINT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uint, "%u", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_UINT(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_UINT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uint, "%u", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_UINT(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_UINT(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_uint, "%u", >=, lhs, rhs, msg, ## __VA_ARGS__) /* long */ -#define T_EXPECT_EQ_LONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_LONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_long, "%li", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_LONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_LONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_long, "%li", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_LONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_LONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_long, "%li", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_LONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_LONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_long, "%li", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_LONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_LONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_long, "%li", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_LONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_LONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_long, "%li", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_LONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_LONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_long, "%li", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_LONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_LONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_long, "%li", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_LONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_LONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_long, "%li", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_LONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_LONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_long, "%li", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_LONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_LONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_long, "%li", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_LONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_LONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_long, "%li", >=, lhs, rhs, msg, ## __VA_ARGS__) /* unsigned long */ -#define T_EXPECT_EQ_ULONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_ULONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ulong, "%lu", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_ULONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_ULONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ulong, "%lu", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_ULONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_ULONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ulong, "%lu", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_ULONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_ULONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ulong, "%lu", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_ULONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_ULONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ulong, "%lu", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_ULONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_ULONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ulong, "%lu", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_ULONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_ULONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ulong, "%lu", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_ULONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_ULONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ulong, "%lu", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_ULONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_ULONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ulong, "%lu", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_ULONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_ULONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ulong, "%lu", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_ULONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_ULONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ulong, "%lu", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_ULONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_ULONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ulong, "%lu", >=, lhs, rhs, msg, ## __VA_ARGS__) /* long long */ -#define T_EXPECT_EQ_LLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_LLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_llong, "%lli", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_LLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_LLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_llong, "%lli", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_LLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_LLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_llong, "%lli", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_LLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_LLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_llong, "%lli", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_LLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_LLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_llong, "%lli", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_LLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_LLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_llong, "%lli", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_LLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_LLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_llong, "%lli", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_LLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_LLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_llong, "%lli", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_LLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_LLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_llong, "%lli", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_LLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_LLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_llong, "%lli", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_LLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_LLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_llong, "%lli", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_LLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_LLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_llong, "%lli", >=, lhs, rhs, msg, ## __VA_ARGS__) /* unsigned long long */ -#define T_EXPECT_EQ_ULLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_ULLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ullong, "%llu", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_ULLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_ULLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ullong, "%llu", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_ULLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_ULLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ullong, "%llu", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_ULLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_ULLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ullong, "%llu", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_ULLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_ULLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ullong, "%llu", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_ULLONG(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_ULLONG(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ullong, "%llu", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_ULLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_ULLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ullong, "%llu", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_ULLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_ULLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ullong, "%llu", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_ULLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_ULLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ullong, "%llu", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_ULLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_ULLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ullong, "%llu", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_ULLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_ULLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ullong, "%llu", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_ULLONG(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_ULLONG(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ullong, "%llu", >=, lhs, rhs, msg, ## __VA_ARGS__) /* pointer */ -#define T_EXPECT_EQ_PTR(lhs, rhs, msg, ...)\ +#define T_EXPECT_EQ_PTR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ptr, "%p", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_NE_PTR(lhs, rhs, msg, ...)\ +#define T_EXPECT_NE_PTR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ptr, "%p", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LT_PTR(lhs, rhs, msg, ...)\ +#define T_EXPECT_LT_PTR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ptr, "%p", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GT_PTR(lhs, rhs, msg, ...)\ +#define T_EXPECT_GT_PTR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ptr, "%p", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_LE_PTR(lhs, rhs, msg, ...)\ +#define T_EXPECT_LE_PTR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ptr, "%p", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_EXPECT_GE_PTR(lhs, rhs, msg, ...)\ +#define T_EXPECT_GE_PTR(lhs, rhs, msg, ...) \ T_EXPECT_BLOCK2(_ptr, "%p", >=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_EQ_PTR(lhs, rhs, msg, ...)\ +#define T_ASSERT_EQ_PTR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ptr, "%p", ==, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_NE_PTR(lhs, rhs, msg, ...)\ +#define T_ASSERT_NE_PTR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ptr, "%p", !=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LT_PTR(lhs, rhs, msg, ...)\ +#define T_ASSERT_LT_PTR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ptr, "%p", <, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GT_PTR(lhs, rhs, msg, ...)\ +#define T_ASSERT_GT_PTR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ptr, "%p", >, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_LE_PTR(lhs, rhs, msg, ...)\ +#define T_ASSERT_LE_PTR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ptr, "%p", <=, lhs, rhs, msg, ## __VA_ARGS__) -#define T_ASSERT_GE_PTR(lhs, rhs, msg, ...)\ +#define T_ASSERT_GE_PTR(lhs, rhs, msg, ...) \ T_ASSERT_BLOCK2(_ptr, "%p", >=, lhs, rhs, msg, ## __VA_ARGS__) /* @@ -639,8 +639,8 @@ extern T_SYM(temp) T_SYM(temp1), T_SYM(temp2), T_SYM(temp3); */ #define T_PERF(metric, value, unit, desc) \ do { \ - T_SAVEINFO; \ - T_SYM(perf)(metric, unit, value, desc); \ + T_SAVEINFO; \ + T_SYM(perf)(metric, unit, value, desc); \ } while (0) #endif /* _TESTS_KTEST_H */ diff --git a/osfmk/tests/ktest_accessor.c b/osfmk/tests/ktest_accessor.c index ab660d57c..047c9616d 100644 --- a/osfmk/tests/ktest_accessor.c +++ b/osfmk/tests/ktest_accessor.c @@ -33,7 +33,8 @@ int vsnprintf(char *, size_t, const char *, va_list); void -ktest_set_current_expr(const char * expr_fmt, ...) { +ktest_set_current_expr(const char * expr_fmt, ...) +{ int ret; va_list args; @@ -43,37 +44,40 @@ ktest_set_current_expr(const char * expr_fmt, ...) { } void -ktest_set_current_var(const char * name, const char * value_fmt, ...) { +ktest_set_current_var(const char * name, const char * value_fmt, ...) +{ int ret; va_list args; - if(ktest_current_var_index >= KTEST_MAXVARS) { + if (ktest_current_var_index >= KTEST_MAXVARS) { panic("Internal ktest error in " __func__); } strlcpy(ktest_current_var_names[ktest_current_var_index], - name, - KTEST_MAXLEN); + name, + KTEST_MAXLEN); va_start(args, value_fmt); ret = vsnprintf(ktest_current_var_values[ktest_current_var_index], - KTEST_MAXLEN, - value_fmt, - args); + KTEST_MAXLEN, + value_fmt, + args); va_end(args); ktest_current_var_index++; } void -ktest_set_current_msg(const char * msg, ...) { +ktest_set_current_msg(const char * msg, ...) +{ int ret; va_list args; - if(msg == NULL) return; + if (msg == NULL) { + return; + } va_start(args, msg); ret = vsnprintf(ktest_current_msg, KTEST_MAXLEN, msg, args); va_end(args); } - diff --git a/osfmk/tests/ktest_emit.c b/osfmk/tests/ktest_emit.c index 088dd386d..48bfb5b4b 100644 --- a/osfmk/tests/ktest_emit.c +++ b/osfmk/tests/ktest_emit.c @@ -30,14 +30,14 @@ #include #include -#define EMIT(buf,size) do { \ +#define EMIT(buf, size) do {\ console_write(buf, size); \ } while(0) /* TODO: intelligently truncate messages if possible */ #define BOUNDS_CHECK_AND_UPDATE(ret, size) do {\ if(ret < 0 || ret >= size) {\ - panic("Internal ktest error in %s", __func__);\ + panic("Internal ktest error in %s", __func__);\ }\ size -= ret;\ msg += ret;\ @@ -46,19 +46,22 @@ int vsnprintf(char *, size_t, const char *, va_list); void -ktest_emit_start(void) { +ktest_emit_start(void) +{ char str[] = "\n[KTEST]\tSTART\t" KTEST_VERSION_STR "\n"; - EMIT((char *)&str[0], sizeof(str)-1); + EMIT((char *)&str[0], sizeof(str) - 1); } void -ktest_emit_finish(void) { +ktest_emit_finish(void) +{ char str[] = "\n[KTEST]\tFINISH\n"; - EMIT((char *)&str[0], sizeof(str)-1); + EMIT((char *)&str[0], sizeof(str) - 1); } void -ktest_emit_testbegin(const char * test_name) { +ktest_emit_testbegin(const char * test_name) +{ char * msg = ktest_output_buf; int size = sizeof(ktest_output_buf); int ret; @@ -67,26 +70,27 @@ ktest_emit_testbegin(const char * test_name) { char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, - size, - "\n[KTEST]\t" /* header */ - "TESTBEGIN\t" /* type */ - "%lld\t" /* time */ - "%d\t" /* index */ - "%s\t" /* file */ - "%d\t" /* line */ - "%s\n", /* name */ - ktest_current_time, - ktest_test_index, - fname, - ktest_current_line, - test_name); + size, + "\n[KTEST]\t" /* header */ + "TESTBEGIN\t" /* type */ + "%lld\t" /* time */ + "%d\t" /* index */ + "%s\t" /* file */ + "%d\t" /* line */ + "%s\n", /* name */ + ktest_current_time, + ktest_test_index, + fname, + ktest_current_line, + test_name); BOUNDS_CHECK_AND_UPDATE(ret, size); EMIT(ktest_output_buf, (int)(msg - ktest_output_buf)); } void -ktest_emit_testskip(const char * skip_msg, va_list args) { +ktest_emit_testskip(const char * skip_msg, va_list args) +{ char * msg = ktest_output_buf; int size = sizeof(ktest_output_buf); int ret; @@ -94,15 +98,15 @@ ktest_emit_testskip(const char * skip_msg, va_list args) { char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, - size, - "\n[KTEST]\t" /* header */ - "TESTSKIP\t" /* type */ - "%lld\t" /* time */ - "%s\t" /* file */ - "%d\t", /* line */ - ktest_current_time, - fname, - ktest_current_line); + size, + "\n[KTEST]\t" /* header */ + "TESTSKIP\t" /* type */ + "%lld\t" /* time */ + "%s\t" /* file */ + "%d\t", /* line */ + ktest_current_time, + fname, + ktest_current_line); BOUNDS_CHECK_AND_UPDATE(ret, size); ret = vsnprintf(msg, size, skip_msg, args); @@ -112,11 +116,11 @@ ktest_emit_testskip(const char * skip_msg, va_list args) { BOUNDS_CHECK_AND_UPDATE(ret, size); EMIT(ktest_output_buf, (int)(msg - ktest_output_buf)); - } void -ktest_emit_testend() { +ktest_emit_testend() +{ char * msg = ktest_output_buf; int size = sizeof(ktest_output_buf); int ret; @@ -124,27 +128,27 @@ ktest_emit_testend() { char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, - size, - "\n[KTEST]\t" /* header */ - "TESTEND\t" /* type */ - "%lld\t" /* time */ - "%d\t" /* index */ - "%s\t" /* file */ - "%d\t" /* line */ - "%s\n", /* name */ - ktest_current_time, - ktest_test_index, - fname, - ktest_current_line, - ktest_test_name); + size, + "\n[KTEST]\t" /* header */ + "TESTEND\t" /* type */ + "%lld\t" /* time */ + "%d\t" /* index */ + "%s\t" /* file */ + "%d\t" /* line */ + "%s\n", /* name */ + ktest_current_time, + ktest_test_index, + fname, + ktest_current_line, + ktest_test_name); BOUNDS_CHECK_AND_UPDATE(ret, size); EMIT(ktest_output_buf, (int)(msg - ktest_output_buf)); - } void -ktest_emit_log(const char * log_msg, va_list args) { +ktest_emit_log(const char * log_msg, va_list args) +{ char * msg = ktest_output_buf; int size = sizeof(ktest_output_buf); int ret; @@ -152,15 +156,15 @@ ktest_emit_log(const char * log_msg, va_list args) { char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, - size, - "\n[KTEST]\t" /* header */ - "LOG\t" /* type */ - "%lld\t" /* time */ - "%s\t" /* file */ - "%d\t", /* line */ - ktest_current_time, - fname, - ktest_current_line); + size, + "\n[KTEST]\t" /* header */ + "LOG\t" /* type */ + "%lld\t" /* time */ + "%s\t" /* file */ + "%d\t", /* line */ + ktest_current_time, + fname, + ktest_current_line); BOUNDS_CHECK_AND_UPDATE(ret, size); ret = vsnprintf(msg, size, log_msg, args); @@ -170,7 +174,6 @@ ktest_emit_log(const char * log_msg, va_list args) { BOUNDS_CHECK_AND_UPDATE(ret, size); EMIT(ktest_output_buf, (int)(msg - ktest_output_buf)); - } void @@ -185,14 +188,14 @@ ktest_emit_perfdata(const char * metric, const char * unit, double value, const char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, size, - "\n[KTEST]\t" /* header */ - "PERF\t" /* type */ - "%lld\t" /* time */ - "%s\t" /* file */ - "%d\t", /* line */ - ktest_current_time, - fname, - ktest_current_line); + "\n[KTEST]\t" /* header */ + "PERF\t" /* type */ + "%lld\t" /* time */ + "%s\t" /* file */ + "%d\t", /* line */ + ktest_current_time, + fname, + ktest_current_line); BOUNDS_CHECK_AND_UPDATE(ret, size); ret = snprintf(msg, size, perfstr, metric, print_value, unit, desc); @@ -202,11 +205,11 @@ ktest_emit_perfdata(const char * metric, const char * unit, double value, const BOUNDS_CHECK_AND_UPDATE(ret, size); EMIT(ktest_output_buf, (int)(msg - ktest_output_buf)); - } void -ktest_emit_testcase(void) { +ktest_emit_testcase(void) +{ char * msg = ktest_output_buf; int size = sizeof(ktest_output_buf); int ret; @@ -214,31 +217,31 @@ ktest_emit_testcase(void) { char *fname = strnstr((char *)(uintptr_t)ktest_current_file, "xnu", 100); ret = snprintf(msg, - size, - "\n[KTEST]\t" /* header */ - "%s\t" /* type */ - "%lld\t" /* time */ - "%d\t" /* index */ - "%s\t" /* file */ - "%d\t" /* line */ - "%s\t" /* message */ - "%s", /* current_expr */ - ktest_testcase_result_tokens[ktest_testcase_mode] - [ktest_testcase_result], - ktest_current_time, - ktest_expression_index, - fname, - ktest_current_line, - ktest_current_msg, - ktest_current_expr); + size, + "\n[KTEST]\t" /* header */ + "%s\t" /* type */ + "%lld\t" /* time */ + "%d\t" /* index */ + "%s\t" /* file */ + "%d\t" /* line */ + "%s\t" /* message */ + "%s", /* current_expr */ + ktest_testcase_result_tokens[ktest_testcase_mode] + [ktest_testcase_result], + ktest_current_time, + ktest_expression_index, + fname, + ktest_current_line, + ktest_current_msg, + ktest_current_expr); BOUNDS_CHECK_AND_UPDATE(ret, size); - for(int i = 0; ktest_current_var_names[i][0]; i++) { + for (int i = 0; ktest_current_var_names[i][0]; i++) { ret = snprintf(msg, - size, - "\t%s\t%s", - ktest_current_var_names[i], - ktest_current_var_values[i]); + size, + "\t%s\t%s", + ktest_current_var_names[i], + ktest_current_var_values[i]); BOUNDS_CHECK_AND_UPDATE(ret, size); } diff --git a/osfmk/tests/ktest_global.c b/osfmk/tests/ktest_global.c index eee7e0af4..11ab4b26f 100644 --- a/osfmk/tests/ktest_global.c +++ b/osfmk/tests/ktest_global.c @@ -61,9 +61,9 @@ ktest_temp ktest_temp1, ktest_temp2, ktest_temp3; char ktest_output_buf[KTEST_MAXLEN] = ""; int -ktest_test_result_statetab[KTEST_NUM_TEST_STATES] - [KTEST_NUM_TESTCASE_STATES] - [KTEST_NUM_TESTCASE_MODES] = { + ktest_test_result_statetab[KTEST_NUM_TEST_STATES] +[KTEST_NUM_TESTCASE_STATES] +[KTEST_NUM_TESTCASE_MODES] = { [T_STATE_UNRESOLVED][T_RESULT_PASS][T_MAIN] = T_STATE_PASS, [T_STATE_UNRESOLVED][T_RESULT_FAIL][T_MAIN] = T_STATE_FAIL, [T_STATE_UNRESOLVED][T_RESULT_UXPASS][T_MAIN] = T_STATE_FAIL, @@ -106,7 +106,7 @@ ktest_test_result_statetab[KTEST_NUM_TEST_STATES] }; const char * ktest_testcase_result_tokens[KTEST_NUM_TESTCASE_MODES] - [KTEST_NUM_TESTCASE_STATES] = { +[KTEST_NUM_TESTCASE_STATES] = { [T_MAIN][T_RESULT_PASS] = "PASS", [T_MAIN][T_RESULT_FAIL] = "FAIL", [T_MAIN][T_RESULT_UXPASS] = "UXPASS", @@ -116,4 +116,3 @@ const char * ktest_testcase_result_tokens[KTEST_NUM_TESTCASE_MODES] [T_SETUP][T_RESULT_UXPASS] = "SETUP_UXPASS", [T_SETUP][T_RESULT_XFAIL] = "SETUP_XFAIL", }; - diff --git a/osfmk/tests/ktest_internal.h b/osfmk/tests/ktest_internal.h index bf82d45d0..fbe4147e7 100644 --- a/osfmk/tests/ktest_internal.h +++ b/osfmk/tests/ktest_internal.h @@ -72,11 +72,11 @@ extern ktest_temp ktest_temp1, ktest_temp2, ktest_temp3; extern char ktest_output_buf[KTEST_MAXLEN]; extern int ktest_test_result_statetab[KTEST_NUM_TEST_STATES] - [KTEST_NUM_TESTCASE_STATES] - [KTEST_NUM_TESTCASE_MODES]; +[KTEST_NUM_TESTCASE_STATES] +[KTEST_NUM_TESTCASE_MODES]; extern const char * ktest_testcase_result_tokens[KTEST_NUM_TESTCASE_MODES] - [KTEST_NUM_TESTCASE_STATES]; +[KTEST_NUM_TESTCASE_STATES]; void ktest_emit_start(void); @@ -89,4 +89,3 @@ void ktest_emit_perfdata(const char * metric, const char * unit, double value, c void ktest_emit_testcase(void); #endif /* _TESTS_KTEST_INTERNAL_H */ - diff --git a/osfmk/tests/pmap_tests.c b/osfmk/tests/pmap_tests.c index d0a116463..ee73016ad 100644 --- a/osfmk/tests/pmap_tests.c +++ b/osfmk/tests/pmap_tests.c @@ -2,7 +2,7 @@ * Copyright (c) 2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,22 +43,25 @@ typedef struct { pmap_t pmap; volatile boolean_t stop; ppnum_t pn; -} pmap_test_thread_args; +} pmap_test_thread_args; static pmap_t -pmap_create_wrapper() { +pmap_create_wrapper() +{ pmap_t new_pmap = NULL; ledger_t ledger; assert(task_ledger_template != NULL); - if ((ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) + if ((ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) { return NULL; - new_pmap = pmap_create(ledger, 0, FALSE); + } + new_pmap = pmap_create(ledger, 0, FALSE); ledger_dereference(ledger); return new_pmap; } static void -pmap_disconnect_thread(void *arg, wait_result_t __unused wres) { +pmap_disconnect_thread(void *arg, wait_result_t __unused wres) +{ pmap_test_thread_args *args = arg; do { pmap_disconnect(args->pn); @@ -72,8 +75,9 @@ test_pmap_enter_disconnect(unsigned int num_loops) kern_return_t kr = KERN_SUCCESS; thread_t disconnect_thread; pmap_t new_pmap = pmap_create_wrapper(); - if (new_pmap == NULL) + if (new_pmap == NULL) { return KERN_FAILURE; + } vm_page_t m = vm_page_grab(); if (m == VM_PAGE_NULL) { pmap_destroy(new_pmap); @@ -93,7 +97,7 @@ test_pmap_enter_disconnect(unsigned int num_loops) while (num_loops-- != 0) { kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, - VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE); assert(kr == KERN_SUCCESS); } @@ -114,4 +118,3 @@ test_pmap_iommu_disconnect(void) { return KERN_SUCCESS; } - diff --git a/osfmk/tests/test_thread_call.c b/osfmk/tests/test_thread_call.c index ad3702312..5f860b203 100644 --- a/osfmk/tests/test_thread_call.c +++ b/osfmk/tests/test_thread_call.c @@ -52,7 +52,7 @@ int once_callback_counter = 0; static void test_once_callback(thread_call_param_t param0, - thread_call_param_t param1) + thread_call_param_t param1) { T_ASSERT_EQ_INT((test_param)param0, TEST_ARG1, "param0 is correct"); T_ASSERT_EQ_INT((test_param)param1, TEST_ARG2, "param1 is correct"); @@ -87,9 +87,9 @@ test_once_thread_call(void) thread_call_t call; call = thread_call_allocate_with_options(&test_once_callback, - (thread_call_param_t)TEST_ARG1, - THREAD_CALL_PRIORITY_HIGH, - THREAD_CALL_OPTIONS_ONCE); + (thread_call_param_t)TEST_ARG1, + THREAD_CALL_PRIORITY_HIGH, + THREAD_CALL_OPTIONS_ONCE); thread_call_param_t arg2_param = (thread_call_param_t)TEST_ARG2; @@ -150,7 +150,7 @@ int signal_callback_counter = 0; static void test_signal_callback(__unused thread_call_param_t param0, - __unused thread_call_param_t param1) + __unused thread_call_param_t param1) { /* * ktest sometimes panics if you assert from interrupt context, @@ -166,9 +166,9 @@ test_signal_thread_call(void) { thread_call_t call; call = thread_call_allocate_with_options(&test_signal_callback, - (thread_call_param_t)TEST_ARG1, - THREAD_CALL_PRIORITY_HIGH, - THREAD_CALL_OPTIONS_ONCE|THREAD_CALL_OPTIONS_SIGNAL); + (thread_call_param_t)TEST_ARG1, + THREAD_CALL_PRIORITY_HIGH, + THREAD_CALL_OPTIONS_ONCE | THREAD_CALL_OPTIONS_SIGNAL); thread_call_param_t arg2_param = (thread_call_param_t)TEST_ARG2; diff --git a/osfmk/tests/xnupost.h b/osfmk/tests/xnupost.h index 326858612..cee9312b9 100644 --- a/osfmk/tests/xnupost.h +++ b/osfmk/tests/xnupost.h @@ -91,12 +91,12 @@ extern uint32_t total_post_tests_count; #define XNUPOST_TEST_CONFIG_BASIC(func) \ { \ - XT_CONFIG_RUN, 0, -1, T_STATE_PASS, 0, 0, 0, (func), "xnu."#func \ + XT_CONFIG_RUN, 0, -1, T_STATE_PASS, 0, 0, 0, (func), "xnu."#func \ } #define XNUPOST_TEST_CONFIG_TEST_PANIC(func) \ { \ - XT_CONFIG_EXPECT_PANIC, 0, -1, T_STATE_PASS, 0, 0, 0, (func), "xnu."#func \ + XT_CONFIG_EXPECT_PANIC, 0, -1, T_STATE_PASS, 0, 0, 0, (func), "xnu."#func \ } void xnupost_init(void); diff --git a/osfmk/vm/WKdm_new.h b/osfmk/vm/WKdm_new.h index ee9e884ee..286db72a2 100644 --- a/osfmk/vm/WKdm_new.h +++ b/osfmk/vm/WKdm_new.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,7 +46,7 @@ * iv. an integer WORD offset into the page saying where the * low-bits area ends * - * 2. a 64-word TAGS AREA holding one two-bit tag for each word in + * 2. a 64-word TAGS AREA holding one two-bit tag for each word in * the original (1024-word) page, packed 16 per word * * 3. a variable-sized FULL WORDS AREA (always word aligned and an @@ -59,7 +59,7 @@ * * 5. a variable-sized LOW BITS AREA (always word aligned and an * integral number of words) holding ten-bit low-bit patterns - * (from partial matches), packed three per word. + * (from partial matches), packed three per word. */ #ifdef __cplusplus @@ -69,45 +69,45 @@ extern "C" { #include -#define WKdm_SCRATCH_BUF_SIZE_INTERNAL PAGE_SIZE +#define WKdm_SCRATCH_BUF_SIZE_INTERNAL PAGE_SIZE typedef unsigned int WK_word; #if defined(__arm64__) void -WKdm_decompress_4k (const WK_word* src_buf, - WK_word* dest_buf, - WK_word* scratch, - unsigned int bytes); +WKdm_decompress_4k(const WK_word* src_buf, + WK_word* dest_buf, + WK_word* scratch, + unsigned int bytes); int -WKdm_compress_4k (const WK_word* src_buf, - WK_word* dest_buf, - WK_word* scratch, - unsigned int limit); +WKdm_compress_4k(const WK_word* src_buf, + WK_word* dest_buf, + WK_word* scratch, + unsigned int limit); void -WKdm_decompress_16k (WK_word* src_buf, - WK_word* dest_buf, - WK_word* scratch, - unsigned int bytes); +WKdm_decompress_16k(WK_word* src_buf, + WK_word* dest_buf, + WK_word* scratch, + unsigned int bytes); int -WKdm_compress_16k (WK_word* src_buf, - WK_word* dest_buf, - WK_word* scratch, - unsigned int limit); +WKdm_compress_16k(WK_word* src_buf, + WK_word* dest_buf, + WK_word* scratch, + unsigned int limit); #else void -WKdm_decompress_new (WK_word* src_buf, - WK_word* dest_buf, - WK_word* scratch, - unsigned int bytes); +WKdm_decompress_new(WK_word* src_buf, + WK_word* dest_buf, + WK_word* scratch, + unsigned int bytes); int -WKdm_compress_new (const WK_word* src_buf, - WK_word* dest_buf, - WK_word* scratch, - unsigned int limit); +WKdm_compress_new(const WK_word* src_buf, + WK_word* dest_buf, + WK_word* scratch, + unsigned int limit); #endif #ifdef __cplusplus diff --git a/osfmk/vm/bsd_vm.c b/osfmk/vm/bsd_vm.c index 0999ea757..707f58aa9 100644 --- a/osfmk/vm/bsd_vm.c +++ b/osfmk/vm/bsd_vm.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -44,6 +44,7 @@ #include #include #include +#include #include #include @@ -70,29 +71,29 @@ get_map_end( vm_map_t); /* - * + * */ int get_map_nentries( vm_map_t map) { - return(map->hdr.nentries); + return map->hdr.nentries; } mach_vm_offset_t mach_get_vm_start(vm_map_t map) { - return( vm_map_first_entry(map)->vme_start); + return vm_map_first_entry(map)->vme_start; } mach_vm_offset_t mach_get_vm_end(vm_map_t map) { - return( vm_map_last_entry(map)->vme_end); + return vm_map_last_entry(map)->vme_end; } -/* - * BSD VNODE PAGER +/* + * BSD VNODE PAGER */ const struct memory_object_pager_ops vnode_pager_ops = { @@ -116,21 +117,21 @@ typedef struct vnode_pager { struct memory_object vn_pgr_hdr; /* pager-specific */ - unsigned int ref_count; /* reference count */ - struct vnode *vnode_handle; /* vnode handle */ + struct os_refcnt ref_count; + struct vnode *vnode_handle; /* vnode handle */ } *vnode_pager_t; kern_return_t -vnode_pager_cluster_read( /* forward */ - vnode_pager_t, +vnode_pager_cluster_read( /* forward */ + vnode_pager_t, vm_object_offset_t, vm_object_offset_t, uint32_t, vm_size_t); void -vnode_pager_cluster_write( /* forward */ +vnode_pager_cluster_write( /* forward */ vnode_pager_t, vm_object_offset_t, vm_size_t, @@ -140,35 +141,35 @@ vnode_pager_cluster_write( /* forward */ vnode_pager_t -vnode_object_create( /* forward */ +vnode_object_create( /* forward */ struct vnode *); vnode_pager_t -vnode_pager_lookup( /* forward */ +vnode_pager_lookup( /* forward */ memory_object_t); struct vnode * -vnode_pager_lookup_vnode( /* forward */ +vnode_pager_lookup_vnode( /* forward */ memory_object_t); -zone_t vnode_pager_zone; +zone_t vnode_pager_zone; -#define VNODE_PAGER_NULL ((vnode_pager_t) 0) +#define VNODE_PAGER_NULL ((vnode_pager_t) 0) /* TODO: Should be set dynamically by vnode_pager_init() */ -#define CLUSTER_SHIFT 1 +#define CLUSTER_SHIFT 1 /* TODO: Should be set dynamically by vnode_pager_bootstrap() */ -#define MAX_VNODE 10000 +#define MAX_VNODE 10000 #if DEBUG -int pagerdebug=0; +int pagerdebug = 0; -#define PAGER_ALL 0xffffffff -#define PAGER_INIT 0x00000001 -#define PAGER_PAGEIN 0x00000002 +#define PAGER_ALL 0xffffffff +#define PAGER_INIT 0x00000001 +#define PAGER_PAGEIN 0x00000002 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}} #else @@ -178,33 +179,33 @@ int pagerdebug=0; extern int proc_resetpcontrol(int); -extern int uiomove64(addr64_t, int, void *); -#define MAX_RUN 32 +extern int uiomove64(addr64_t, int, void *); +#define MAX_RUN 32 int memory_object_control_uiomove( - memory_object_control_t control, - memory_object_offset_t offset, - void * uio, - int start_offset, - int io_requested, - int mark_dirty, - int take_reference) + memory_object_control_t control, + memory_object_offset_t offset, + void * uio, + int start_offset, + int io_requested, + int mark_dirty, + int take_reference) { - vm_object_t object; - vm_page_t dst_page; - int xsize; - int retval = 0; - int cur_run; - int cur_needed; - int i; - int orig_offset; - vm_page_t page_run[MAX_RUN]; - int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */ + vm_object_t object; + vm_page_t dst_page; + int xsize; + int retval = 0; + int cur_run; + int cur_needed; + int i; + int orig_offset; + vm_page_t page_run[MAX_RUN]; + int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */ object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { - return (0); + return 0; } assert(!object->internal); @@ -222,18 +223,18 @@ memory_object_control_uiomove( } orig_offset = start_offset; - dirty_count = 0; + dirty_count = 0; while (io_requested && retval == 0) { - cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE; - if (cur_needed > MAX_RUN) - cur_needed = MAX_RUN; - - for (cur_run = 0; cur_run < cur_needed; ) { + if (cur_needed > MAX_RUN) { + cur_needed = MAX_RUN; + } - if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) - break; + for (cur_run = 0; cur_run < cur_needed;) { + if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) { + break; + } if (dst_page->vmp_busy || dst_page->vmp_cleaning) { @@ -244,19 +245,22 @@ memory_object_control_uiomove( * page while holding other pages in the BUSY state * otherwise we will wait */ - if (cur_run) + if (cur_run) { break; + } PAGE_SLEEP(object, dst_page, THREAD_UNINT); continue; } - if (dst_page->vmp_laundry) + if (dst_page->vmp_laundry) { vm_pageout_steal_laundry(dst_page, FALSE); + } - if (mark_dirty) { - if (dst_page->vmp_dirty == FALSE) + if (mark_dirty) { + if (dst_page->vmp_dirty == FALSE) { dirty_count++; + } SET_PAGE_DIRTY(dst_page, FALSE); - if (dst_page->vmp_cs_validated && + if (dst_page->vmp_cs_validated && !dst_page->vmp_cs_tainted) { /* * CODE SIGNING: @@ -276,25 +280,27 @@ memory_object_control_uiomove( offset += PAGE_SIZE_64; } - if (cur_run == 0) - /* + if (cur_run == 0) { + /* * we hit a 'hole' in the cache or * a page we don't want to try to handle, * so bail at this point * we'll unlock the object below */ - break; + break; + } vm_object_unlock(object); for (i = 0; i < cur_run; i++) { - - dst_page = page_run[i]; + dst_page = page_run[i]; - if ((xsize = PAGE_SIZE - start_offset) > io_requested) - xsize = io_requested; + if ((xsize = PAGE_SIZE - start_offset) > io_requested) { + xsize = io_requested; + } - if ( (retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio)) ) - break; + if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) { + break; + } io_requested -= xsize; start_offset = 0; @@ -309,38 +315,40 @@ memory_object_control_uiomove( * the pages to the tail of the inactive queue * to implement an LRU for read/write accesses * - * the check for orig_offset == 0 is there to + * the check for orig_offset == 0 is there to * mitigate the cost of small (< page_size) requests * to the same page (this way we only move it once) */ if (take_reference && (cur_run > 1 || orig_offset == 0)) { - vm_page_lockspin_queues(); - for (i = 0; i < cur_run; i++) + for (i = 0; i < cur_run; i++) { vm_page_lru(page_run[i]); + } vm_page_unlock_queues(); } for (i = 0; i < cur_run; i++) { - dst_page = page_run[i]; + dst_page = page_run[i]; /* * someone is explicitly referencing this page... * update clustered and speculative state - * + * */ - if (dst_page->vmp_clustered) + if (dst_page->vmp_clustered) { VM_PAGE_CONSUME_CLUSTERED(dst_page); + } PAGE_WAKEUP_DONE(dst_page); } orig_offset = 0; } - if (object->pager) + if (object->pager) { task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager)); + } vm_object_unlock(object); - return (retval); + return retval; } @@ -353,15 +361,15 @@ vnode_pager_bootstrap(void) vm_size_t size; size = (vm_size_t) sizeof(struct vnode_pager); - vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE*size, - PAGE_SIZE, "vnode pager structures"); + vnode_pager_zone = zinit(size, (vm_size_t) MAX_VNODE * size, + PAGE_SIZE, "vnode pager structures"); zone_change(vnode_pager_zone, Z_CALLERACCT, FALSE); zone_change(vnode_pager_zone, Z_NOENCRYPT, TRUE); #if CONFIG_CODE_DECRYPTION apple_protect_pager_bootstrap(); -#endif /* CONFIG_CODE_DECRYPTION */ +#endif /* CONFIG_CODE_DECRYPTION */ swapfile_pager_bootstrap(); #if __arm64__ fourk_pager_bootstrap(); @@ -376,27 +384,28 @@ vnode_pager_bootstrap(void) */ memory_object_t vnode_pager_setup( - struct vnode *vp, - __unused memory_object_t pager) + struct vnode *vp, + __unused memory_object_t pager) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; vnode_object = vnode_object_create(vp); - if (vnode_object == VNODE_PAGER_NULL) + if (vnode_object == VNODE_PAGER_NULL) { panic("vnode_pager_setup: vnode_object_create() failed"); - return((memory_object_t)vnode_object); + } + return (memory_object_t)vnode_object; } /* * */ kern_return_t -vnode_pager_init(memory_object_t mem_obj, - memory_object_control_t control, +vnode_pager_init(memory_object_t mem_obj, + memory_object_control_t control, #if !DEBUG - __unused + __unused #endif - memory_object_cluster_size_t pg_size) + memory_object_cluster_size_t pg_size) { vnode_pager_t vnode_object; kern_return_t kr; @@ -405,8 +414,9 @@ vnode_pager_init(memory_object_t mem_obj, PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size)); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } vnode_object = vnode_pager_lookup(mem_obj); @@ -421,14 +431,15 @@ vnode_pager_init(memory_object_t mem_obj, attributes.temporary = TRUE; kr = memory_object_change_attributes( - control, - MEMORY_OBJECT_ATTRIBUTE_INFO, - (memory_object_info_t) &attributes, - MEMORY_OBJECT_ATTR_INFO_COUNT); - if (kr != KERN_SUCCESS) + control, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT); + if (kr != KERN_SUCCESS) { panic("vnode_pager_init: memory_object_change_attributes() failed"); + } - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -436,16 +447,16 @@ vnode_pager_init(memory_object_t mem_obj, */ kern_return_t vnode_pager_data_return( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt, - memory_object_offset_t *resid_offset, - int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - int upl_flags) + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt, + memory_object_offset_t *resid_offset, + int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + int upl_flags) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; vnode_object = vnode_pager_lookup(mem_obj); @@ -456,9 +467,9 @@ vnode_pager_data_return( kern_return_t vnode_pager_data_initialize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt) { panic("vnode_pager_data_initialize"); return KERN_FAILURE; @@ -466,24 +477,23 @@ vnode_pager_data_initialize( kern_return_t vnode_pager_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { return KERN_FAILURE; } void vnode_pager_dirtied( - memory_object_t mem_obj, - vm_object_offset_t s_offset, - vm_object_offset_t e_offset) + memory_object_t mem_obj, + vm_object_offset_t s_offset, + vm_object_offset_t e_offset) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) { - vnode_object = vnode_pager_lookup(mem_obj); vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset); } @@ -491,10 +501,10 @@ vnode_pager_dirtied( kern_return_t vnode_pager_get_isinuse( - memory_object_t mem_obj, - uint32_t *isinuse) + memory_object_t mem_obj, + uint32_t *isinuse) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { *isinuse = 1; @@ -509,13 +519,14 @@ vnode_pager_get_isinuse( kern_return_t vnode_pager_get_throttle_io_limit( - memory_object_t mem_obj, - uint32_t *limit) + memory_object_t mem_obj, + uint32_t *limit) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; - if (mem_obj->mo_pager_ops != &vnode_pager_ops) + if (mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; + } vnode_object = vnode_pager_lookup(mem_obj); @@ -525,13 +536,14 @@ vnode_pager_get_throttle_io_limit( kern_return_t vnode_pager_get_isSSD( - memory_object_t mem_obj, - boolean_t *isSSD) + memory_object_t mem_obj, + boolean_t *isSSD) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; - if (mem_obj->mo_pager_ops != &vnode_pager_ops) + if (mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; + } vnode_object = vnode_pager_lookup(mem_obj); @@ -541,10 +553,10 @@ vnode_pager_get_isSSD( kern_return_t vnode_pager_get_object_size( - memory_object_t mem_obj, - memory_object_offset_t *length) + memory_object_t mem_obj, + memory_object_offset_t *length) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { *length = 0; @@ -559,14 +571,14 @@ vnode_pager_get_object_size( kern_return_t vnode_pager_get_object_name( - memory_object_t mem_obj, - char *pathname, - vm_size_t pathname_len, - char *filename, - vm_size_t filename_len, - boolean_t *truncated_path_p) + memory_object_t mem_obj, + char *pathname, + vm_size_t pathname_len, + char *filename, + vm_size_t filename_len, + boolean_t *truncated_path_p) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; @@ -575,20 +587,20 @@ vnode_pager_get_object_name( vnode_object = vnode_pager_lookup(mem_obj); return vnode_pager_get_name(vnode_object->vnode_handle, - pathname, - pathname_len, - filename, - filename_len, - truncated_path_p); + pathname, + pathname_len, + filename, + filename_len, + truncated_path_p); } kern_return_t vnode_pager_get_object_mtime( - memory_object_t mem_obj, - struct timespec *mtime, - struct timespec *cs_mtime) + memory_object_t mem_obj, + struct timespec *mtime, + struct timespec *cs_mtime) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; if (mem_obj->mo_pager_ops != &vnode_pager_ops) { return KERN_INVALID_ARGUMENT; @@ -597,18 +609,18 @@ vnode_pager_get_object_mtime( vnode_object = vnode_pager_lookup(mem_obj); return vnode_pager_get_mtime(vnode_object->vnode_handle, - mtime, - cs_mtime); + mtime, + cs_mtime); } #if CHECK_CS_VALIDATION_BITMAP kern_return_t -vnode_pager_cs_check_validation_bitmap( - memory_object_t mem_obj, - memory_object_offset_t offset, - int optype ) +vnode_pager_cs_check_validation_bitmap( + memory_object_t mem_obj, + memory_object_offset_t offset, + int optype ) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; if (mem_obj == MEMORY_OBJECT_NULL || mem_obj->mo_pager_ops != &vnode_pager_ops) { @@ -623,18 +635,18 @@ vnode_pager_cs_check_validation_bitmap( /* * */ -kern_return_t +kern_return_t vnode_pager_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - __unused memory_object_cluster_size_t length, - __unused vm_prot_t desired_access, - memory_object_fault_info_t fault_info) + memory_object_t mem_obj, + memory_object_offset_t offset, + __unused memory_object_cluster_size_t length, + __unused vm_prot_t desired_access, + memory_object_fault_info_t fault_info) { - vnode_pager_t vnode_object; - memory_object_offset_t base_offset; - vm_size_t size; - uint32_t io_streaming = 0; + vnode_pager_t vnode_object; + memory_object_offset_t base_offset; + vm_size_t size; + uint32_t io_streaming = 0; vnode_object = vnode_pager_lookup(mem_obj); @@ -642,12 +654,13 @@ vnode_pager_data_request( base_offset = offset; if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control, - &base_offset, &size, &io_streaming, - fault_info) != KERN_SUCCESS) - size = PAGE_SIZE; + &base_offset, &size, &io_streaming, + fault_info) != KERN_SUCCESS) { + size = PAGE_SIZE; + } assert(offset >= base_offset && - offset < base_offset + size); + offset < base_offset + size); return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size); } @@ -657,14 +670,12 @@ vnode_pager_data_request( */ void vnode_pager_reference( - memory_object_t mem_obj) -{ - vnode_pager_t vnode_object; - unsigned int new_ref_count; + memory_object_t mem_obj) +{ + vnode_pager_t vnode_object; vnode_object = vnode_pager_lookup(mem_obj); - new_ref_count = hw_atomic_add(&vnode_object->ref_count, 1); - assert(new_ref_count > 1); + os_ref_retain(&vnode_object->ref_count); } /* @@ -672,21 +683,20 @@ vnode_pager_reference( */ void vnode_pager_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj)); vnode_object = vnode_pager_lookup(mem_obj); - if (hw_atomic_sub(&vnode_object->ref_count, 1) == 0) { + if (os_ref_release(&vnode_object->ref_count) == 0) { if (vnode_object->vnode_handle != NULL) { vnode_pager_vrele(vnode_object->vnode_handle); } zfree(vnode_pager_zone, vnode_object); } - return; } /* @@ -697,11 +707,11 @@ vnode_pager_terminate( #if !DEBUG __unused #endif - memory_object_t mem_obj) + memory_object_t mem_obj) { PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj)); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -709,13 +719,13 @@ vnode_pager_terminate( */ kern_return_t vnode_pager_synchronize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t sync_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t sync_flags) { panic("vnode_pager_synchronize: memory_object_synchronize no longer supported\n"); - return (KERN_FAILURE); + return KERN_FAILURE; } /* @@ -723,12 +733,12 @@ vnode_pager_synchronize( */ kern_return_t vnode_pager_map( - memory_object_t mem_obj, - vm_prot_t prot) + memory_object_t mem_obj, + vm_prot_t prot) { - vnode_pager_t vnode_object; - int ret; - kern_return_t kr; + vnode_pager_t vnode_object; + int ret; + kern_return_t kr; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot)); @@ -747,9 +757,9 @@ vnode_pager_map( kern_return_t vnode_pager_last_unmap( - memory_object_t mem_obj) + memory_object_t mem_obj) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj)); @@ -766,76 +776,78 @@ vnode_pager_last_unmap( */ void vnode_pager_cluster_write( - vnode_pager_t vnode_object, - vm_object_offset_t offset, - vm_size_t cnt, - vm_object_offset_t * resid_offset, - int * io_error, - int upl_flags) + vnode_pager_t vnode_object, + vm_object_offset_t offset, + vm_size_t cnt, + vm_object_offset_t * resid_offset, + int * io_error, + int upl_flags) { - vm_size_t size; - int errno; + vm_size_t size; + int errno; if (upl_flags & UPL_MSYNC) { + upl_flags |= UPL_VNODE_PAGER; - upl_flags |= UPL_VNODE_PAGER; - - if ( (upl_flags & UPL_IOSYNC) && io_error) - upl_flags |= UPL_KEEPCACHED; + if ((upl_flags & UPL_IOSYNC) && io_error) { + upl_flags |= UPL_KEEPCACHED; + } - while (cnt) { + while (cnt) { size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */ assert((upl_size_t) size == size); - vnode_pageout(vnode_object->vnode_handle, - NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno); + vnode_pageout(vnode_object->vnode_handle, + NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno); - if ( (upl_flags & UPL_KEEPCACHED) ) { - if ( (*io_error = errno) ) - break; + if ((upl_flags & UPL_KEEPCACHED)) { + if ((*io_error = errno)) { + break; + } } cnt -= size; offset += size; } - if (resid_offset) + if (resid_offset) { *resid_offset = offset; - + } } else { - vm_object_offset_t vnode_size; - vm_object_offset_t base_offset; + vm_object_offset_t vnode_size; + vm_object_offset_t base_offset; - /* + /* * this is the pageout path */ vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle); if (vnode_size > (offset + PAGE_SIZE)) { - /* + /* * preset the maximum size of the cluster * and put us on a nice cluster boundary... * and then clip the size to insure we * don't request past the end of the underlying file */ - size = MAX_UPL_TRANSFER_BYTES; - base_offset = offset & ~((signed)(size - 1)); + size = MAX_UPL_TRANSFER_BYTES; + base_offset = offset & ~((signed)(size - 1)); - if ((base_offset + size) > vnode_size) - size = round_page(((vm_size_t)(vnode_size - base_offset))); + if ((base_offset + size) > vnode_size) { + size = round_page(((vm_size_t)(vnode_size - base_offset))); + } } else { - /* + /* * we've been requested to page out a page beyond the current * end of the 'file'... don't try to cluster in this case... * we still need to send this page through because it might * be marked precious and the underlying filesystem may need * to do something with it (besides page it out)... */ - base_offset = offset; + base_offset = offset; size = PAGE_SIZE; } assert((upl_size_t) size == size); - vnode_pageout(vnode_object->vnode_handle, - NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size, - (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL); + vnode_pageout(vnode_object->vnode_handle, + NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size, + (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL); } } @@ -845,49 +857,50 @@ vnode_pager_cluster_write( */ kern_return_t vnode_pager_cluster_read( - vnode_pager_t vnode_object, - vm_object_offset_t base_offset, - vm_object_offset_t offset, - uint32_t io_streaming, - vm_size_t cnt) + vnode_pager_t vnode_object, + vm_object_offset_t base_offset, + vm_object_offset_t offset, + uint32_t io_streaming, + vm_size_t cnt) { - int local_error = 0; - int kret; - int flags = 0; + int local_error = 0; + int kret; + int flags = 0; - assert(! (cnt & PAGE_MASK)); + assert(!(cnt & PAGE_MASK)); - if (io_streaming) + if (io_streaming) { flags |= UPL_IOSTREAMING; + } assert((upl_size_t) cnt == cnt); kret = vnode_pagein(vnode_object->vnode_handle, - (upl_t) NULL, - (upl_offset_t) (offset - base_offset), - base_offset, - (upl_size_t) cnt, - flags, - &local_error); + (upl_t) NULL, + (upl_offset_t) (offset - base_offset), + base_offset, + (upl_size_t) cnt, + flags, + &local_error); /* - if(kret == PAGER_ABSENT) { - Need to work out the defs here, 1 corresponds to PAGER_ABSENT - defined in bsd/vm/vm_pager.h However, we should not be including - that file here it is a layering violation. -*/ + * if(kret == PAGER_ABSENT) { + * Need to work out the defs here, 1 corresponds to PAGER_ABSENT + * defined in bsd/vm/vm_pager.h However, we should not be including + * that file here it is a layering violation. + */ if (kret == 1) { - int uplflags; - upl_t upl = NULL; - unsigned int count = 0; - kern_return_t kr; + int uplflags; + upl_t upl = NULL; + unsigned int count = 0; + kern_return_t kr; uplflags = (UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | - UPL_SET_INTERNAL); + UPL_CLEAN_IN_PLACE | + UPL_SET_INTERNAL); count = 0; assert((upl_size_t) cnt == cnt); kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control, - base_offset, (upl_size_t) cnt, - &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE); + base_offset, (upl_size_t) cnt, + &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE); if (kr == KERN_SUCCESS) { upl_abort(upl, 0); upl_deallocate(upl); @@ -905,7 +918,6 @@ vnode_pager_cluster_read( } return KERN_SUCCESS; - } /* @@ -913,13 +925,14 @@ vnode_pager_cluster_read( */ vnode_pager_t vnode_object_create( - struct vnode *vp) + struct vnode *vp) { vnode_pager_t vnode_object; vnode_object = (struct vnode_pager *) zalloc(vnode_pager_zone); - if (vnode_object == VNODE_PAGER_NULL) - return(VNODE_PAGER_NULL); + if (vnode_object == VNODE_PAGER_NULL) { + return VNODE_PAGER_NULL; + } /* * The vm_map call takes both named entry ports and raw memory @@ -932,10 +945,10 @@ vnode_object_create( vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops; vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; - vnode_object->ref_count = 1; + os_ref_init(&vnode_object->ref_count, NULL); vnode_object->vnode_handle = vp; - return(vnode_object); + return vnode_object; } /* @@ -943,13 +956,13 @@ vnode_object_create( */ vnode_pager_t vnode_pager_lookup( - memory_object_t name) + memory_object_t name) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; vnode_object = (vnode_pager_t)name; assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops); - return (vnode_object); + return vnode_object; } @@ -959,10 +972,11 @@ vnode_pager_lookup_vnode( { vnode_pager_t vnode_object; vnode_object = (vnode_pager_t)name; - if(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) - return (vnode_object->vnode_handle); - else + if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) { + return vnode_object->vnode_handle; + } else { return NULL; + } } /*********************** proc_info implementation *************/ @@ -975,33 +989,31 @@ static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, u int fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid) { - vm_map_t map; - vm_map_offset_t address = (vm_map_offset_t )arg; - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - vm_map_offset_t start; + vm_map_offset_t address = (vm_map_offset_t)arg; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + vm_map_offset_t start; vm_region_extended_info_data_t extended; vm_region_top_info_data_t top; boolean_t do_region_footprint; - task_lock(task); - map = task->map; - if (map == VM_MAP_NULL) - { - task_unlock(task); - return(0); - } - vm_map_reference(map); - task_unlock(task); + task_lock(task); + map = task->map; + if (map == VM_MAP_NULL) { + task_unlock(task); + return 0; + } + vm_map_reference(map); + task_unlock(task); - do_region_footprint = task_self_region_footprint(); + do_region_footprint = task_self_region_footprint(); - vm_map_lock_read(map); + vm_map_lock_read(map); - start = address; + start = address; - if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { if (do_region_footprint && address == tmp_entry->vme_end) { @@ -1038,22 +1050,22 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal * pinfo->pri_user_wired_count = 0; pinfo->pri_user_tag = -1; pinfo->pri_pages_resident = - (uint32_t) (nonvol / PAGE_SIZE); + (uint32_t) (nonvol / PAGE_SIZE); pinfo->pri_pages_shared_now_private = 0; pinfo->pri_pages_swapped_out = - (uint32_t) (nonvol_compressed / PAGE_SIZE); + (uint32_t) (nonvol_compressed / PAGE_SIZE); pinfo->pri_pages_dirtied = - (uint32_t) (nonvol / PAGE_SIZE); + (uint32_t) (nonvol / PAGE_SIZE); pinfo->pri_ref_count = 1; pinfo->pri_shadow_depth = 0; pinfo->pri_share_mode = SM_PRIVATE; pinfo->pri_private_pages_resident = - (uint32_t) (nonvol / PAGE_SIZE); + (uint32_t) (nonvol / PAGE_SIZE); pinfo->pri_shared_pages_resident = 0; pinfo->pri_obj_id = INFO_MAKE_FAKE_OBJECT_ID(map, task_ledgers.purgeable_nonvolatile); pinfo->pri_address = address; pinfo->pri_size = - (uint64_t) (nonvol + nonvol_compressed); + (uint64_t) (nonvol + nonvol_compressed); pinfo->pri_depth = 0; vm_map_unlock_read(map); @@ -1064,104 +1076,104 @@ fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal * vm_map_deallocate(map); return 0; } - } else { + } else { entry = tmp_entry; - } + } - start = entry->vme_start; + start = entry->vme_start; - pinfo->pri_offset = VME_OFFSET(entry); - pinfo->pri_protection = entry->protection; - pinfo->pri_max_protection = entry->max_protection; - pinfo->pri_inheritance = entry->inheritance; - pinfo->pri_behavior = entry->behavior; - pinfo->pri_user_wired_count = entry->user_wired_count; - pinfo->pri_user_tag = VME_ALIAS(entry); + pinfo->pri_offset = VME_OFFSET(entry); + pinfo->pri_protection = entry->protection; + pinfo->pri_max_protection = entry->max_protection; + pinfo->pri_inheritance = entry->inheritance; + pinfo->pri_behavior = entry->behavior; + pinfo->pri_user_wired_count = entry->user_wired_count; + pinfo->pri_user_tag = VME_ALIAS(entry); - if (entry->is_sub_map) { + if (entry->is_sub_map) { pinfo->pri_flags |= PROC_REGION_SUBMAP; - } else { - if (entry->is_shared) + } else { + if (entry->is_shared) { pinfo->pri_flags |= PROC_REGION_SHARED; - } - - - extended.protection = entry->protection; - extended.user_tag = VME_ALIAS(entry); - extended.pages_resident = 0; - extended.pages_swapped_out = 0; - extended.pages_shared_now_private = 0; - extended.pages_dirtied = 0; - extended.external_pager = 0; - extended.shadow_depth = 0; - - vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT); - - if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) - extended.share_mode = SM_PRIVATE; - - top.private_pages_resident = 0; - top.shared_pages_resident = 0; - vm_map_region_top_walk(entry, &top); - - - pinfo->pri_pages_resident = extended.pages_resident; - pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private; - pinfo->pri_pages_swapped_out = extended.pages_swapped_out; - pinfo->pri_pages_dirtied = extended.pages_dirtied; - pinfo->pri_ref_count = extended.ref_count; - pinfo->pri_shadow_depth = extended.shadow_depth; - pinfo->pri_share_mode = extended.share_mode; - - pinfo->pri_private_pages_resident = top.private_pages_resident; - pinfo->pri_shared_pages_resident = top.shared_pages_resident; - pinfo->pri_obj_id = top.obj_id; - - pinfo->pri_address = (uint64_t)start; - pinfo->pri_size = (uint64_t)(entry->vme_end - start); - pinfo->pri_depth = 0; - - if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) { + } + } + + + extended.protection = entry->protection; + extended.user_tag = VME_ALIAS(entry); + extended.pages_resident = 0; + extended.pages_swapped_out = 0; + extended.pages_shared_now_private = 0; + extended.pages_dirtied = 0; + extended.external_pager = 0; + extended.shadow_depth = 0; + + vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT); + + if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) { + extended.share_mode = SM_PRIVATE; + } + + top.private_pages_resident = 0; + top.shared_pages_resident = 0; + vm_map_region_top_walk(entry, &top); + + + pinfo->pri_pages_resident = extended.pages_resident; + pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private; + pinfo->pri_pages_swapped_out = extended.pages_swapped_out; + pinfo->pri_pages_dirtied = extended.pages_dirtied; + pinfo->pri_ref_count = extended.ref_count; + pinfo->pri_shadow_depth = extended.shadow_depth; + pinfo->pri_share_mode = extended.share_mode; + + pinfo->pri_private_pages_resident = top.private_pages_resident; + pinfo->pri_shared_pages_resident = top.shared_pages_resident; + pinfo->pri_obj_id = top.obj_id; + + pinfo->pri_address = (uint64_t)start; + pinfo->pri_size = (uint64_t)(entry->vme_end - start); + pinfo->pri_depth = 0; + + if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) { *vnodeaddr = (uintptr_t)0; - if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) ==0) { + if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) == 0) { vm_map_unlock_read(map); - vm_map_deallocate(map); - return(1); + vm_map_deallocate(map); + return 1; } - } + } - vm_map_unlock_read(map); - vm_map_deallocate(map); - return(1); + vm_map_unlock_read(map); + vm_map_deallocate(map); + return 1; } int fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid) { - vm_map_t map; - vm_map_offset_t address = (vm_map_offset_t )arg; - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; + vm_map_offset_t address = (vm_map_offset_t)arg; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; task_lock(task); map = task->map; - if (map == VM_MAP_NULL) - { + if (map == VM_MAP_NULL) { task_unlock(task); - return(0); + return 0; } - vm_map_reference(map); + vm_map_reference(map); task_unlock(task); - + vm_map_lock_read(map); if (!vm_map_lookup_entry(map, address, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - vm_map_deallocate(map); - return(0); + vm_map_deallocate(map); + return 0; } } else { entry = tmp_entry; @@ -1173,7 +1185,6 @@ fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regi if (entry->is_sub_map == 0) { if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) { - pinfo->pri_offset = VME_OFFSET(entry); pinfo->pri_protection = entry->protection; pinfo->pri_max_protection = entry->max_protection; @@ -1181,10 +1192,11 @@ fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regi pinfo->pri_behavior = entry->behavior; pinfo->pri_user_wired_count = entry->user_wired_count; pinfo->pri_user_tag = VME_ALIAS(entry); - - if (entry->is_shared) + + if (entry->is_shared) { pinfo->pri_flags |= PROC_REGION_SHARED; - + } + pinfo->pri_pages_resident = 0; pinfo->pri_pages_shared_now_private = 0; pinfo->pri_pages_swapped_out = 0; @@ -1192,18 +1204,18 @@ fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regi pinfo->pri_ref_count = 0; pinfo->pri_shadow_depth = 0; pinfo->pri_share_mode = 0; - + pinfo->pri_private_pages_resident = 0; pinfo->pri_shared_pages_resident = 0; pinfo->pri_obj_id = 0; - + pinfo->pri_address = (uint64_t)entry->vme_start; pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start); pinfo->pri_depth = 0; - + vm_map_unlock_read(map); - vm_map_deallocate(map); - return(1); + vm_map_deallocate(map); + return 1; } } @@ -1212,25 +1224,25 @@ fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regi } vm_map_unlock_read(map); - vm_map_deallocate(map); - return(0); + vm_map_deallocate(map); + return 0; } static int fill_vnodeinfoforaddr( - vm_map_entry_t entry, + vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid) { - vm_object_t top_object, object; + vm_object_t top_object, object; memory_object_t memory_object; memory_object_pager_ops_t pager_ops; - kern_return_t kr; - int shadow_depth; + kern_return_t kr; + int shadow_depth; if (entry->is_sub_map) { - return(0); + return 0; } else { /* * The last object in the shadow chain has the @@ -1243,8 +1255,8 @@ fill_vnodeinfoforaddr( } else { vm_object_lock(top_object); for (object = top_object, shadow_depth = 0; - object->shadow != VM_OBJECT_NULL; - object = object->shadow, shadow_depth++) { + object->shadow != VM_OBJECT_NULL; + object = object->shadow, shadow_depth++) { vm_object_lock(object->shadow); vm_object_unlock(object); } @@ -1252,15 +1264,15 @@ fill_vnodeinfoforaddr( } if (object == VM_OBJECT_NULL) { - return(0); + return 0; } else if (object->internal) { vm_object_unlock(object); - return(0); - } else if (! object->pager_ready || - object->terminating || - ! object->alive) { + return 0; + } else if (!object->pager_ready || + object->terminating || + !object->alive) { vm_object_unlock(object); - return(0); + return 0; } else { memory_object = object->pager; pager_ops = memory_object->mo_pager_ops; @@ -1270,51 +1282,53 @@ fill_vnodeinfoforaddr( vnodeaddr, vid); if (kr != KERN_SUCCESS) { vm_object_unlock(object); - return(0); + return 0; } } else { vm_object_unlock(object); - return(0); + return 0; } } vm_object_unlock(object); - return(1); + return 1; } -kern_return_t -vnode_pager_get_object_vnode ( - memory_object_t mem_obj, +kern_return_t +vnode_pager_get_object_vnode( + memory_object_t mem_obj, uintptr_t * vnodeaddr, uint32_t * vid) { - vnode_pager_t vnode_object; + vnode_pager_t vnode_object; vnode_object = vnode_pager_lookup(mem_obj); - if (vnode_object->vnode_handle) { + if (vnode_object->vnode_handle) { *vnodeaddr = (uintptr_t)vnode_object->vnode_handle; - *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle); + *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle); - return(KERN_SUCCESS); + return KERN_SUCCESS; } - - return(KERN_FAILURE); + + return KERN_FAILURE; } #if CONFIG_IOSCHED kern_return_t vnode_pager_get_object_devvp( - memory_object_t mem_obj, - uintptr_t *devvp) + memory_object_t mem_obj, + uintptr_t *devvp) { - struct vnode *vp; - uint32_t vid; + struct vnode *vp; + uint32_t vid; - if(vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) - return (KERN_FAILURE); + if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) { + return KERN_FAILURE; + } *devvp = (uintptr_t)vnode_mountdevvp(vp); - if (*devvp) - return (KERN_SUCCESS); - return (KERN_FAILURE); + if (*devvp) { + return KERN_SUCCESS; + } + return KERN_FAILURE; } #endif @@ -1325,15 +1339,14 @@ vnode_pager_get_object_devvp( vm_object_t find_vnode_object( - vm_map_entry_t entry -) + vm_map_entry_t entry + ) { - vm_object_t top_object, object; - memory_object_t memory_object; - memory_object_pager_ops_t pager_ops; + vm_object_t top_object, object; + memory_object_t memory_object; + memory_object_pager_ops_t pager_ops; if (!entry->is_sub_map) { - /* * The last object in the shadow chain has the * relevant pager information. @@ -1360,14 +1373,14 @@ find_vnode_object( * vnode and so we fall through to the bottom and return NULL. */ - if (pager_ops == &vnode_pager_ops) - return object; /* we return with the object locked */ + if (pager_ops == &vnode_pager_ops) { + return object; /* we return with the object locked */ + } } vm_object_unlock(object); } - } - return(VM_OBJECT_NULL); + return VM_OBJECT_NULL; } diff --git a/osfmk/vm/cpm.h b/osfmk/vm/cpm.h index 4d4b968d2..cb011a8bd 100644 --- a/osfmk/vm/cpm.h +++ b/osfmk/vm/cpm.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,16 +22,16 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ - * + * */ -#ifndef _VM_CPM_H_ -#define _VM_CPM_H_ +#ifndef _VM_CPM_H_ +#define _VM_CPM_H_ /* * File: vm/cpm.h @@ -55,4 +55,4 @@ extern kern_return_t cpm_allocate(vm_size_t size, vm_page_t *list, ppnum_t max_pnum, ppnum_t pnum_mask, boolean_t wire, int flags); -#endif /* _VM_CPM_H_ */ +#endif /* _VM_CPM_H_ */ diff --git a/osfmk/vm/device_vm.c b/osfmk/vm/device_vm.c index c4f953e1c..6b478027a 100644 --- a/osfmk/vm/device_vm.c +++ b/osfmk/vm/device_vm.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -49,13 +49,14 @@ #include #include #include +#include /* Device VM COMPONENT INTERFACES */ -/* - * Device PAGER +/* + * Device PAGER */ @@ -90,41 +91,41 @@ typedef struct device_pager { struct memory_object dev_pgr_hdr; /* pager-specific data */ - lck_mtx_t lock; - unsigned int ref_count; /* reference count */ + lck_mtx_t lock; + struct os_refcnt ref_count; /* reference count */ device_port_t device_handle; /* device_handle */ - vm_size_t size; - int flags; - boolean_t is_mapped; + vm_size_t size; + int flags; + boolean_t is_mapped; } *device_pager_t; -lck_grp_t device_pager_lck_grp; -lck_grp_attr_t device_pager_lck_grp_attr; -lck_attr_t device_pager_lck_attr; +lck_grp_t device_pager_lck_grp; +lck_grp_attr_t device_pager_lck_grp_attr; +lck_attr_t device_pager_lck_attr; -#define device_pager_lock_init(pager) \ - lck_mtx_init(&(pager)->lock, \ - &device_pager_lck_grp, \ - &device_pager_lck_attr) -#define device_pager_lock_destroy(pager) \ +#define device_pager_lock_init(pager) \ + lck_mtx_init(&(pager)->lock, \ + &device_pager_lck_grp, \ + &device_pager_lck_attr) +#define device_pager_lock_destroy(pager) \ lck_mtx_destroy(&(pager)->lock, &device_pager_lck_grp) #define device_pager_lock(pager) lck_mtx_lock(&(pager)->lock) #define device_pager_unlock(pager) lck_mtx_unlock(&(pager)->lock) device_pager_t -device_pager_lookup( /* forward */ +device_pager_lookup( /* forward */ memory_object_t); device_pager_t -device_object_create(void); /* forward */ +device_object_create(void); /* forward */ -zone_t device_pager_zone; +zone_t device_pager_zone; -#define DEVICE_PAGER_NULL ((device_pager_t) 0) +#define DEVICE_PAGER_NULL ((device_pager_t) 0) -#define MAX_DNODE 10000 +#define MAX_DNODE 10000 @@ -139,8 +140,8 @@ device_pager_bootstrap(void) vm_size_t size; size = (vm_size_t) sizeof(struct device_pager); - device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE*size, - PAGE_SIZE, "device node pager structures"); + device_pager_zone = zinit(size, (vm_size_t) MAX_DNODE * size, + PAGE_SIZE, "device node pager structures"); zone_change(device_pager_zone, Z_CALLERACCT, FALSE); lck_grp_attr_setdefault(&device_pager_lck_grp_attr); @@ -156,25 +157,26 @@ device_pager_bootstrap(void) memory_object_t device_pager_setup( __unused memory_object_t device, - uintptr_t device_handle, - vm_size_t size, - int flags) + uintptr_t device_handle, + vm_size_t size, + int flags) { - device_pager_t device_object; + device_pager_t device_object; memory_object_control_t control; - vm_object_t object; + vm_object_t object; device_object = device_object_create(); - if (device_object == DEVICE_PAGER_NULL) + if (device_object == DEVICE_PAGER_NULL) { panic("device_pager_setup: device_object_create() failed"); + } device_object->device_handle = device_handle; device_object->size = size; device_object->flags = flags; memory_object_create_named((memory_object_t) device_object, - size, - &control); + size, + &control); object = memory_object_control_to_vm_object(control); assert(object != VM_OBJECT_NULL); @@ -193,41 +195,45 @@ device_pager_setup( */ kern_return_t device_pager_populate_object( - memory_object_t device, - memory_object_offset_t offset, - ppnum_t page_num, - vm_size_t size) + memory_object_t device, + memory_object_offset_t offset, + ppnum_t page_num, + vm_size_t size) { - device_pager_t device_object; - vm_object_t vm_object; - kern_return_t kr; - upl_t upl; + device_pager_t device_object; + vm_object_t vm_object; + kern_return_t kr; + upl_t upl; device_object = device_pager_lookup(device); - if(device_object == DEVICE_PAGER_NULL) + if (device_object == DEVICE_PAGER_NULL) { return KERN_FAILURE; + } vm_object = (vm_object_t)memory_object_control_to_vm_object( - device_object->dev_pgr_hdr.mo_control); - if(vm_object == NULL) + device_object->dev_pgr_hdr.mo_control); + if (vm_object == NULL) { return KERN_FAILURE; + } kr = vm_object_populate_with_private( - vm_object, offset, page_num, size); - if(kr != KERN_SUCCESS) + vm_object, offset, page_num, size); + if (kr != KERN_SUCCESS) { return kr; + } - if(!vm_object->phys_contiguous) { + if (!vm_object->phys_contiguous) { unsigned int null_size = 0; assert((upl_size_t) size == size); - kr = vm_object_upl_request(vm_object, - (vm_object_offset_t)offset, - (upl_size_t) size, &upl, NULL, - &null_size, - (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE), - VM_KERN_MEMORY_NONE); - if(kr != KERN_SUCCESS) + kr = vm_object_upl_request(vm_object, + (vm_object_offset_t)offset, + (upl_size_t) size, &upl, NULL, + &null_size, + (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE), + VM_KERN_MEMORY_NONE); + if (kr != KERN_SUCCESS) { panic("device_pager_populate_object: list_req failed"); + } upl_commit(upl, NULL, 0); upl_deallocate(upl); @@ -242,13 +248,13 @@ device_pager_populate_object( */ device_pager_t device_pager_lookup( - memory_object_t mem_obj) + memory_object_t mem_obj) { - device_pager_t device_object; + device_pager_t device_object; assert(mem_obj->mo_pager_ops == &device_pager_ops); device_object = (device_pager_t)mem_obj; - assert(device_object->ref_count > 0); + assert(os_ref_get_count(&device_object->ref_count) > 0); return device_object; } @@ -257,19 +263,20 @@ device_pager_lookup( */ kern_return_t device_pager_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, __unused memory_object_cluster_size_t pg_size) { device_pager_t device_object; kern_return_t kr; memory_object_attr_info_data_t attributes; - vm_object_t vm_object; + vm_object_t vm_object; - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } device_object = device_pager_lookup(mem_obj); @@ -283,10 +290,12 @@ device_pager_init( vm_object = (vm_object_t)memory_object_control_to_vm_object(control); vm_object_lock(vm_object); vm_object->private = TRUE; - if(device_object->flags & DEVICE_PAGER_CONTIGUOUS) + if (device_object->flags & DEVICE_PAGER_CONTIGUOUS) { vm_object->phys_contiguous = TRUE; - if(device_object->flags & DEVICE_PAGER_NOPHYSCACHE) + } + if (device_object->flags & DEVICE_PAGER_NOPHYSCACHE) { vm_object->nophyscache = TRUE; + } vm_object->wimg_bits = device_object->flags & VM_WIMG_MASK; vm_object_unlock(vm_object); @@ -299,14 +308,15 @@ device_pager_init( attributes.temporary = TRUE; kr = memory_object_change_attributes( - control, - MEMORY_OBJECT_ATTRIBUTE_INFO, - (memory_object_info_t) &attributes, - MEMORY_OBJECT_ATTR_INFO_COUNT); - if (kr != KERN_SUCCESS) + control, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT); + if (kr != KERN_SUCCESS) { panic("device_pager_init: memory_object_change_attributes() failed"); + } - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -315,48 +325,50 @@ device_pager_init( /*ARGSUSED6*/ kern_return_t device_pager_data_return( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags) + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags) { - device_pager_t device_object; + device_pager_t device_object; device_object = device_pager_lookup(mem_obj); - if (device_object == DEVICE_PAGER_NULL) + if (device_object == DEVICE_PAGER_NULL) { panic("device_pager_data_return: lookup failed"); + } __IGNORE_WCASTALIGN(return device_data_action(device_object->device_handle, - (ipc_port_t) device_object, - VM_PROT_READ | VM_PROT_WRITE, - offset, data_cnt)); + (ipc_port_t) device_object, + VM_PROT_READ | VM_PROT_WRITE, + offset, data_cnt)); } /* * */ -kern_return_t +kern_return_t device_pager_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - __unused vm_prot_t protection_required, - __unused memory_object_fault_info_t fault_info) + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, + __unused vm_prot_t protection_required, + __unused memory_object_fault_info_t fault_info) { - device_pager_t device_object; + device_pager_t device_object; device_object = device_pager_lookup(mem_obj); - if (device_object == DEVICE_PAGER_NULL) + if (device_object == DEVICE_PAGER_NULL) { panic("device_pager_data_request: lookup failed"); + } __IGNORE_WCASTALIGN(device_data_action(device_object->device_handle, - (ipc_port_t) device_object, - VM_PROT_READ, offset, length)); + (ipc_port_t) device_object, + VM_PROT_READ, offset, length)); return KERN_SUCCESS; } @@ -365,18 +377,16 @@ device_pager_data_request( */ void device_pager_reference( - memory_object_t mem_obj) -{ - device_pager_t device_object; - unsigned int new_ref_count; + memory_object_t mem_obj) +{ + device_pager_t device_object; device_object = device_pager_lookup(mem_obj); - new_ref_count = hw_atomic_add(&device_object->ref_count, 1); - assert(new_ref_count > 1); - DTRACE_VM2(device_pager_reference, - device_pager_t, device_object, - unsigned int, device_object->ref_count); + os_ref_retain(&device_object->ref_count); + DTRACE_VM2(device_pager_reference, + device_pager_t, device_object, + unsigned int, os_ref_get_count(&device_object->ref_count)); } /* @@ -384,20 +394,18 @@ device_pager_reference( */ void device_pager_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - device_pager_t device_object; - memory_object_control_t device_control; - unsigned int ref_count; + device_pager_t device_object; + memory_object_control_t device_control; device_object = device_pager_lookup(mem_obj); - assert(device_object->ref_count > 0); DTRACE_VM2(device_pager_deallocate, - device_pager_t, device_object, - unsigned int, device_object->ref_count); + device_pager_t, device_object, + unsigned int, os_ref_get_count(&device_object->ref_count)); - ref_count = hw_atomic_sub(&device_object->ref_count, 1); + os_ref_count_t ref_count = os_ref_release(&device_object->ref_count); if (ref_count == 1) { /* @@ -406,8 +414,8 @@ device_pager_deallocate( */ DTRACE_VM2(device_pager_destroy, - device_pager_t, device_object, - unsigned int, device_object->ref_count); + device_pager_t, device_object, + unsigned int, os_ref_get_count(&device_object->ref_count)); assert(device_object->is_mapped == FALSE); if (device_object->device_handle != (device_port_t) NULL) { @@ -421,8 +429,8 @@ device_pager_deallocate( * No more references: free the pager. */ DTRACE_VM2(device_pager_free, - device_pager_t, device_object, - unsigned int, device_object->ref_count); + device_pager_t, device_object, + unsigned int, os_ref_get_count(&device_object->ref_count)); device_pager_lock_destroy(device_object); @@ -433,9 +441,9 @@ device_pager_deallocate( kern_return_t device_pager_data_initialize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt) { panic("device_pager_data_initialize"); return KERN_FAILURE; @@ -443,17 +451,17 @@ device_pager_data_initialize( kern_return_t device_pager_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { return KERN_FAILURE; } kern_return_t device_pager_terminate( - __unused memory_object_t mem_obj) + __unused memory_object_t mem_obj) { return KERN_SUCCESS; } @@ -465,10 +473,10 @@ device_pager_terminate( */ kern_return_t device_pager_synchronize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t sync_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t sync_flags) { panic("device_pager_synchronize: memory_object_synchronize no longer supported\n"); return KERN_FAILURE; @@ -479,15 +487,15 @@ device_pager_synchronize( */ kern_return_t device_pager_map( - memory_object_t mem_obj, - __unused vm_prot_t prot) + memory_object_t mem_obj, + __unused vm_prot_t prot) { - device_pager_t device_object; + device_pager_t device_object; device_object = device_pager_lookup(mem_obj); device_pager_lock(device_object); - assert(device_object->ref_count > 0); + assert(os_ref_get_count(&device_object->ref_count) > 0); if (device_object->is_mapped == FALSE) { /* * First mapping of this pager: take an extra reference @@ -504,15 +512,15 @@ device_pager_map( kern_return_t device_pager_last_unmap( - memory_object_t mem_obj) + memory_object_t mem_obj) { - device_pager_t device_object; - boolean_t drop_ref; + device_pager_t device_object; + boolean_t drop_ref; device_object = device_pager_lookup(mem_obj); device_pager_lock(device_object); - assert(device_object->ref_count > 0); + assert(os_ref_get_count(&device_object->ref_count) > 0); if (device_object->is_mapped) { device_object->is_mapped = FALSE; drop_ref = TRUE; @@ -539,24 +547,25 @@ device_object_create(void) device_pager_t device_object; device_object = (struct device_pager *) zalloc(device_pager_zone); - if (device_object == DEVICE_PAGER_NULL) - return(DEVICE_PAGER_NULL); + if (device_object == DEVICE_PAGER_NULL) { + return DEVICE_PAGER_NULL; + } - bzero(device_object, sizeof (*device_object)); + bzero(device_object, sizeof(*device_object)); device_object->dev_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT; device_object->dev_pgr_hdr.mo_pager_ops = &device_pager_ops; device_object->dev_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; device_pager_lock_init(device_object); - device_object->ref_count = 1; + os_ref_init(&device_object->ref_count, NULL); device_object->is_mapped = FALSE; DTRACE_VM2(device_pager_create, - device_pager_t, device_object, - unsigned int, device_object->ref_count); + device_pager_t, device_object, + unsigned int, os_ref_get_count(&device_object->ref_count)); - return(device_object); + return device_object; } boolean_t diff --git a/osfmk/vm/lz4.c b/osfmk/vm/lz4.c index caec717a6..20d11549c 100644 --- a/osfmk/vm/lz4.c +++ b/osfmk/vm/lz4.c @@ -2,7 +2,7 @@ * Copyright (c) 2016-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,28 +34,29 @@ #include "lz4.h" #define memcpy __builtin_memcpy -size_t lz4raw_decode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, - const uint8_t * __restrict src_buffer, size_t src_size, - void * __restrict work __attribute__((unused))) +size_t +lz4raw_decode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, + const uint8_t * __restrict src_buffer, size_t src_size, + void * __restrict work __attribute__((unused))) { - const uint8_t * src = src_buffer; - uint8_t * dst = dst_buffer; - - // Go fast if we can, keeping away from the end of buffers + const uint8_t * src = src_buffer; + uint8_t * dst = dst_buffer; + + // Go fast if we can, keeping away from the end of buffers #if LZ4_ENABLE_ASSEMBLY_DECODE - if (dst_size > LZ4_GOFAST_SAFETY_MARGIN && src_size > LZ4_GOFAST_SAFETY_MARGIN) - { - if (lz4_decode_asm(&dst, dst_buffer, dst_buffer + dst_size - LZ4_GOFAST_SAFETY_MARGIN, &src, src_buffer + src_size - LZ4_GOFAST_SAFETY_MARGIN)) - return 0; // FAIL - } + if (dst_size > LZ4_GOFAST_SAFETY_MARGIN && src_size > LZ4_GOFAST_SAFETY_MARGIN) { + if (lz4_decode_asm(&dst, dst_buffer, dst_buffer + dst_size - LZ4_GOFAST_SAFETY_MARGIN, &src, src_buffer + src_size - LZ4_GOFAST_SAFETY_MARGIN)) { + return 0; // FAIL + } + } #endif //DRKTODO: Can the 'C' "safety" decode be eliminated for 4/16K fixed-sized buffers? - - // Finish safe - if (lz4_decode(&dst, dst_buffer, dst_buffer + dst_size, &src, src_buffer + src_size)) - return 0; // FAIL - return (size_t)(dst - dst_buffer); // bytes produced + // Finish safe + if (lz4_decode(&dst, dst_buffer, dst_buffer + dst_size, &src, src_buffer + src_size)) { + return 0; // FAIL + } + return (size_t)(dst - dst_buffer); // bytes produced } // Debug flags #if LZ4DEBUG @@ -77,120 +78,148 @@ size_t lz4raw_decode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, #endif // Return hash for 4-byte sequence X -static inline uint32_t lz4_hash(uint32_t x) { return (x * 2654435761U) >> (32 - LZ4_COMPRESS_HASH_BITS); } +static inline uint32_t +lz4_hash(uint32_t x) +{ + return (x * 2654435761U) >> (32 - LZ4_COMPRESS_HASH_BITS); +} // Store 0xfff..fff at *PTR -static inline void lz4_fill16(uint8_t * ptr) +static inline void +lz4_fill16(uint8_t * ptr) { - store8(ptr,-1); - store8(ptr+8,-1); + store8(ptr, -1); + store8(ptr + 8, -1); } // Return number of matching bytes 0..4 at positions A and B. -static inline size_t lz4_nmatch4(const uint8_t * a,const uint8_t * b) +static inline size_t +lz4_nmatch4(const uint8_t * a, const uint8_t * b) { - uint32_t x = load4(a) ^ load4(b); - return (x == 0)?4:(__builtin_ctzl(x) >> 3); + uint32_t x = load4(a) ^ load4(b); + return (x == 0)?4:(__builtin_ctzl(x) >> 3); } // Return number of matching bytes 0..8 at positions A and B. -static inline size_t lz4_nmatch8(const uint8_t * a,const uint8_t * b) +static inline size_t +lz4_nmatch8(const uint8_t * a, const uint8_t * b) { - uint64_t x = load8(a) ^ load8(b); - return (x == 0)?8:(__builtin_ctzll(x) >> 3); + uint64_t x = load8(a) ^ load8(b); + return (x == 0)?8:(__builtin_ctzll(x) >> 3); } // Return number of matching bytes 0..16 at positions A and B. -static inline size_t lz4_nmatch16(const uint8_t * a,const uint8_t * b) +static inline size_t +lz4_nmatch16(const uint8_t * a, const uint8_t * b) { - size_t n = lz4_nmatch8(a,b); - return (n == 8)?(8 + lz4_nmatch8(a+8,b+8)):n; + size_t n = lz4_nmatch8(a, b); + return (n == 8)?(8 + lz4_nmatch8(a + 8, b + 8)):n; } // Return number of matching bytes 0..32 at positions A and B. -static inline size_t lz4_nmatch32(const uint8_t * a,const uint8_t * b) +static inline size_t +lz4_nmatch32(const uint8_t * a, const uint8_t * b) { - size_t n = lz4_nmatch16(a,b); - return (n == 16)?(16 + lz4_nmatch16(a+16,b+16)):n; + size_t n = lz4_nmatch16(a, b); + return (n == 16)?(16 + lz4_nmatch16(a + 16, b + 16)):n; } // Return number of matching bytes 0..64 at positions A and B. -static inline size_t lz4_nmatch64(const uint8_t * a,const uint8_t * b) +static inline size_t +lz4_nmatch64(const uint8_t * a, const uint8_t * b) { - size_t n = lz4_nmatch32(a,b); - return (n == 32)?(32 + lz4_nmatch32(a+32,b+32)):n; + size_t n = lz4_nmatch32(a, b); + return (n == 32)?(32 + lz4_nmatch32(a + 32, b + 32)):n; } // Compile-time selection, return number of matching bytes 0..N at positions A and B. -static inline size_t lz4_nmatch(int N, const uint8_t * a, const uint8_t * b) +static inline size_t +lz4_nmatch(int N, const uint8_t * a, const uint8_t * b) { - switch (N) { - case 4: return lz4_nmatch4(a,b); - case 8: return lz4_nmatch8(a,b); - case 16: return lz4_nmatch16(a,b); - case 32: return lz4_nmatch32(a,b); - case 64: return lz4_nmatch64(a,b); - } - __builtin_trap(); // FAIL + switch (N) { + case 4: return lz4_nmatch4(a, b); + case 8: return lz4_nmatch8(a, b); + case 16: return lz4_nmatch16(a, b); + case 32: return lz4_nmatch32(a, b); + case 64: return lz4_nmatch64(a, b); + } + __builtin_trap(); // FAIL } // Store LENGTH in DST using the literal_length/match_length extension scheme: X is the sum of all bytes until we reach a byte < 0xff. // We are allowed to access a constant number of bytes above DST_END. // Return incremented DST pointer on success, and 0 on failure -static inline uint8_t *lz4_store_length(uint8_t * dst, const uint8_t * const end, uint32_t L) { +static inline uint8_t * +lz4_store_length(uint8_t * dst, const uint8_t * const end, uint32_t L) +{ (void)end; - while (L >= 17*255) { - lz4_fill16(dst); - dst += 16; - L -= 16*255; - } - lz4_fill16(dst); - //DRKTODO verify these modulos/divisions are optimally handled by clang - dst += L/255; - *dst++ = L%255; - return dst; + while (L >= 17 * 255) { + lz4_fill16(dst); + dst += 16; + L -= 16 * 255; + } + lz4_fill16(dst); + //DRKTODO verify these modulos/divisions are optimally handled by clang + dst += L / 255; + *dst++ = L % 255; + return dst; } -static inline uint32_t clamp(uint32_t x, uint32_t max) __attribute__((overloadable)) { return x > max ? max : x; } +static inline uint32_t +clamp(uint32_t x, uint32_t max) +__attribute__((overloadable)) +{ + return x > max ? max : x; +} -static inline uint8_t *copy_literal(uint8_t *dst, const uint8_t * restrict src, uint32_t L) { - uint8_t *end = dst + L; - { copy16(dst, src); dst += 16; src += 16; } - while (dst < end) { copy32(dst, src); dst += 32; src += 32; } - return end; +static inline uint8_t * +copy_literal(uint8_t *dst, const uint8_t * restrict src, uint32_t L) +{ + uint8_t *end = dst + L; + { copy16(dst, src); dst += 16; src += 16; } + while (dst < end) { + copy32(dst, src); dst += 32; src += 32; + } + return end; } -static uint8_t *lz4_emit_match(uint32_t L, uint32_t M, uint32_t D, - uint8_t * restrict dst, - const uint8_t * const end, - const uint8_t * restrict src) { - // The LZ4 encoding scheme requires that M is at least 4, because - // the actual value stored by the encoding is M - 4. Check this - // requirement for debug builds. - assert(M >= 4 && "LZ4 encoding requires that M is at least 4"); - // Having checked that M >= 4, translate M by four. - M -= 4; - // Similarly, we must have D < 2**16, because we use only two bytes - // to represent the value of D in the encoding. - assert(D <= USHRT_MAX && "LZ4 encoding requries that D can be stored in two bytes."); - // Construct the command byte by clamping both L and M to 0 ... 15 - // and packing them into a single byte, and store it. - *dst++ = clamp(L, 15) << 4 | clamp(M, 15); - // If L is 15 or greater, we need to encode extra literal length bytes. - if (L >= 15) { - dst = lz4_store_length(dst, end, L - 15); - if (dst == 0 || dst + L >= end) return NULL; - } - // Copy the literal itself from src to dst. - dst = copy_literal(dst, src, L); - // Store match distance. - store2(dst, D); dst += 2; - // If M is 15 or greater, we need to encode extra match length bytes. - if (M >= 15) { - dst = lz4_store_length(dst, end, M - 15); - if (dst == 0) return NULL; - } - return dst; +static uint8_t * +lz4_emit_match(uint32_t L, uint32_t M, uint32_t D, + uint8_t * restrict dst, + const uint8_t * const end, + const uint8_t * restrict src) +{ + // The LZ4 encoding scheme requires that M is at least 4, because + // the actual value stored by the encoding is M - 4. Check this + // requirement for debug builds. + assert(M >= 4 && "LZ4 encoding requires that M is at least 4"); + // Having checked that M >= 4, translate M by four. + M -= 4; + // Similarly, we must have D < 2**16, because we use only two bytes + // to represent the value of D in the encoding. + assert(D <= USHRT_MAX && "LZ4 encoding requries that D can be stored in two bytes."); + // Construct the command byte by clamping both L and M to 0 ... 15 + // and packing them into a single byte, and store it. + *dst++ = clamp(L, 15) << 4 | clamp(M, 15); + // If L is 15 or greater, we need to encode extra literal length bytes. + if (L >= 15) { + dst = lz4_store_length(dst, end, L - 15); + if (dst == 0 || dst + L >= end) { + return NULL; + } + } + // Copy the literal itself from src to dst. + dst = copy_literal(dst, src, L); + // Store match distance. + store2(dst, D); dst += 2; + // If M is 15 or greater, we need to encode extra match length bytes. + if (M >= 15) { + dst = lz4_store_length(dst, end, M - 15); + if (dst == 0) { + return NULL; + } + } + return dst; } /* #ifndef LZ4_EARLY_ABORT */ @@ -204,337 +233,376 @@ int lz4_early_aborts = 0; #define LZ4_EARLY_ABORT_MIN_COMPRESSION_FACTOR (20) #endif /* LZ4_EARLY_ABORT */ -void lz4_encode_2gb(uint8_t ** dst_ptr, - size_t dst_size, - const uint8_t ** src_ptr, - const uint8_t * src_begin, - size_t src_size, - lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES], - int skip_final_literals) +void +lz4_encode_2gb(uint8_t ** dst_ptr, + size_t dst_size, + const uint8_t ** src_ptr, + const uint8_t * src_begin, + size_t src_size, + lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES], + int skip_final_literals) { - uint8_t *dst = *dst_ptr; // current output stream position - uint8_t *end = dst + dst_size - LZ4_GOFAST_SAFETY_MARGIN; - const uint8_t *src = *src_ptr; // current input stream literal to encode - const uint8_t *src_end = src + src_size - LZ4_GOFAST_SAFETY_MARGIN; - const uint8_t *match_begin = 0; // first byte of matched sequence - const uint8_t *match_end = 0; // first byte after matched sequence + uint8_t *dst = *dst_ptr; // current output stream position + uint8_t *end = dst + dst_size - LZ4_GOFAST_SAFETY_MARGIN; + const uint8_t *src = *src_ptr; // current input stream literal to encode + const uint8_t *src_end = src + src_size - LZ4_GOFAST_SAFETY_MARGIN; + const uint8_t *match_begin = 0; // first byte of matched sequence + const uint8_t *match_end = 0; // first byte after matched sequence #if LZ4_EARLY_ABORT - uint8_t * const dst_begin = dst; - uint32_t lz4_do_abort_eval = lz4_do_early_abort; + uint8_t * const dst_begin = dst; + uint32_t lz4_do_abort_eval = lz4_do_early_abort; #endif - - while (dst < end) - { - ptrdiff_t match_distance = 0; - for (match_begin = src; match_begin < src_end; match_begin += 1) { - const uint32_t pos = (uint32_t)(match_begin - src_begin); - const uint32_t w0 = load4(match_begin); - const uint32_t w1 = load4(match_begin + 1); - const uint32_t w2 = load4(match_begin + 2); - const uint32_t w3 = load4(match_begin + 3); - const int i0 = lz4_hash(w0); - const int i1 = lz4_hash(w1); - const int i2 = lz4_hash(w2); - const int i3 = lz4_hash(w3); - const uint8_t *c0 = src_begin + hash_table[i0].offset; - const uint8_t *c1 = src_begin + hash_table[i1].offset; - const uint8_t *c2 = src_begin + hash_table[i2].offset; - const uint8_t *c3 = src_begin + hash_table[i3].offset; - const uint32_t m0 = hash_table[i0].word; - const uint32_t m1 = hash_table[i1].word; - const uint32_t m2 = hash_table[i2].word; - const uint32_t m3 = hash_table[i3].word; - hash_table[i0].offset = pos; - hash_table[i0].word = w0; - hash_table[i1].offset = pos + 1; - hash_table[i1].word = w1; - - hash_table[i2].offset = pos + 2; - hash_table[i2].word = w2; - hash_table[i3].offset = pos + 3; - hash_table[i3].word = w3; - - match_distance = (match_begin - c0); - if (w0 == m0 && match_distance < 0x10000 && match_distance > 0) { - match_end = match_begin + 4; - goto EXPAND_FORWARD; - } - - match_begin++; - match_distance = (match_begin - c1); - if (w1 == m1 && match_distance < 0x10000 && match_distance > 0) { - match_end = match_begin + 4; - goto EXPAND_FORWARD; - } - - match_begin++; - match_distance = (match_begin - c2); - if (w2 == m2 && match_distance < 0x10000 && match_distance > 0) { - match_end = match_begin + 4; - goto EXPAND_FORWARD; - } - - match_begin++; - match_distance = (match_begin - c3); - if (w3 == m3 && match_distance < 0x10000 && match_distance > 0) { - match_end = match_begin + 4; - goto EXPAND_FORWARD; - } + + while (dst < end) { + ptrdiff_t match_distance = 0; + for (match_begin = src; match_begin < src_end; match_begin += 1) { + const uint32_t pos = (uint32_t)(match_begin - src_begin); + const uint32_t w0 = load4(match_begin); + const uint32_t w1 = load4(match_begin + 1); + const uint32_t w2 = load4(match_begin + 2); + const uint32_t w3 = load4(match_begin + 3); + const int i0 = lz4_hash(w0); + const int i1 = lz4_hash(w1); + const int i2 = lz4_hash(w2); + const int i3 = lz4_hash(w3); + const uint8_t *c0 = src_begin + hash_table[i0].offset; + const uint8_t *c1 = src_begin + hash_table[i1].offset; + const uint8_t *c2 = src_begin + hash_table[i2].offset; + const uint8_t *c3 = src_begin + hash_table[i3].offset; + const uint32_t m0 = hash_table[i0].word; + const uint32_t m1 = hash_table[i1].word; + const uint32_t m2 = hash_table[i2].word; + const uint32_t m3 = hash_table[i3].word; + hash_table[i0].offset = pos; + hash_table[i0].word = w0; + hash_table[i1].offset = pos + 1; + hash_table[i1].word = w1; + + hash_table[i2].offset = pos + 2; + hash_table[i2].word = w2; + hash_table[i3].offset = pos + 3; + hash_table[i3].word = w3; + + match_distance = (match_begin - c0); + if (w0 == m0 && match_distance < 0x10000 && match_distance > 0) { + match_end = match_begin + 4; + goto EXPAND_FORWARD; + } + + match_begin++; + match_distance = (match_begin - c1); + if (w1 == m1 && match_distance < 0x10000 && match_distance > 0) { + match_end = match_begin + 4; + goto EXPAND_FORWARD; + } + + match_begin++; + match_distance = (match_begin - c2); + if (w2 == m2 && match_distance < 0x10000 && match_distance > 0) { + match_end = match_begin + 4; + goto EXPAND_FORWARD; + } + + match_begin++; + match_distance = (match_begin - c3); + if (w3 == m3 && match_distance < 0x10000 && match_distance > 0) { + match_end = match_begin + 4; + goto EXPAND_FORWARD; + } #if LZ4_EARLY_ABORT - //DRKTODO: Evaluate unrolling further. 2xunrolling had some modest benefits - if (lz4_do_abort_eval && ((pos) >= LZ4_EARLY_ABORT_EVAL)) { - ptrdiff_t dstd = dst - dst_begin; - - if (dstd == 0) { - lz4_early_aborts++; - return; - } - -/* if (dstd >= pos) { */ -/* return; */ -/* } */ -/* ptrdiff_t cbytes = pos - dstd; */ -/* if ((cbytes * LZ4_EARLY_ABORT_MIN_COMPRESSION_FACTOR) > pos) { */ -/* return; */ -/* } */ - lz4_do_abort_eval = 0; - } + //DRKTODO: Evaluate unrolling further. 2xunrolling had some modest benefits + if (lz4_do_abort_eval && ((pos) >= LZ4_EARLY_ABORT_EVAL)) { + ptrdiff_t dstd = dst - dst_begin; + + if (dstd == 0) { + lz4_early_aborts++; + return; + } + +/* if (dstd >= pos) { */ +/* return; */ +/* } */ +/* ptrdiff_t cbytes = pos - dstd; */ +/* if ((cbytes * LZ4_EARLY_ABORT_MIN_COMPRESSION_FACTOR) > pos) { */ +/* return; */ +/* } */ + lz4_do_abort_eval = 0; + } #endif - } - - if (skip_final_literals) { *src_ptr = src; *dst_ptr = dst; return; } // do not emit the final literal sequence - - // Emit a trailing literal that covers the remainder of the source buffer, - // if we can do so without exceeding the bounds of the destination buffer. - size_t src_remaining = src_end + LZ4_GOFAST_SAFETY_MARGIN - src; - if (src_remaining < 15) { - *dst++ = (uint8_t)(src_remaining << 4); - memcpy(dst, src, 16); dst += src_remaining; - } else { - *dst++ = 0xf0; - dst = lz4_store_length(dst, end, (uint32_t)(src_remaining - 15)); - if (dst == 0 || dst + src_remaining >= end) return; - memcpy(dst, src, src_remaining); dst += src_remaining; - } - *dst_ptr = dst; - *src_ptr = src + src_remaining; - return; - - EXPAND_FORWARD: - - // Expand match forward - { - const uint8_t * ref_end = match_end - match_distance; - while (match_end < src_end) - { - size_t n = lz4_nmatch(LZ4_MATCH_SEARCH_LOOP_SIZE, ref_end, match_end); - if (n < LZ4_MATCH_SEARCH_LOOP_SIZE) { match_end += n; break; } - match_end += LZ4_MATCH_SEARCH_LOOP_SIZE; - ref_end += LZ4_MATCH_SEARCH_LOOP_SIZE; - } - } - - // Expand match backward - { - // match_begin_min = max(src_begin + match_distance,literal) - const uint8_t * match_begin_min = src_begin + match_distance; - match_begin_min = (match_begin_min < src)?src:match_begin_min; - const uint8_t * ref_begin = match_begin - match_distance; - - while (match_begin > match_begin_min && ref_begin[-1] == match_begin[-1] ) { match_begin -= 1; ref_begin -= 1; } - } - - // Emit match - dst = lz4_emit_match((uint32_t)(match_begin - src), (uint32_t)(match_end - match_begin), (uint32_t)match_distance, dst, end, src); - if (!dst) return; - - // Update state - src = match_end; - - // Update return values to include the last fully encoded match - *dst_ptr = dst; - *src_ptr = src; - } + } + + if (skip_final_literals) { + *src_ptr = src; *dst_ptr = dst; return; + } // do not emit the final literal sequence + + // Emit a trailing literal that covers the remainder of the source buffer, + // if we can do so without exceeding the bounds of the destination buffer. + size_t src_remaining = src_end + LZ4_GOFAST_SAFETY_MARGIN - src; + if (src_remaining < 15) { + *dst++ = (uint8_t)(src_remaining << 4); + memcpy(dst, src, 16); dst += src_remaining; + } else { + *dst++ = 0xf0; + dst = lz4_store_length(dst, end, (uint32_t)(src_remaining - 15)); + if (dst == 0 || dst + src_remaining >= end) { + return; + } + memcpy(dst, src, src_remaining); dst += src_remaining; + } + *dst_ptr = dst; + *src_ptr = src + src_remaining; + return; + +EXPAND_FORWARD: + + // Expand match forward + { + const uint8_t * ref_end = match_end - match_distance; + while (match_end < src_end) { + size_t n = lz4_nmatch(LZ4_MATCH_SEARCH_LOOP_SIZE, ref_end, match_end); + if (n < LZ4_MATCH_SEARCH_LOOP_SIZE) { + match_end += n; break; + } + match_end += LZ4_MATCH_SEARCH_LOOP_SIZE; + ref_end += LZ4_MATCH_SEARCH_LOOP_SIZE; + } + } + + // Expand match backward + { + // match_begin_min = max(src_begin + match_distance,literal) + const uint8_t * match_begin_min = src_begin + match_distance; + match_begin_min = (match_begin_min < src)?src:match_begin_min; + const uint8_t * ref_begin = match_begin - match_distance; + + while (match_begin > match_begin_min && ref_begin[-1] == match_begin[-1]) { + match_begin -= 1; ref_begin -= 1; + } + } + + // Emit match + dst = lz4_emit_match((uint32_t)(match_begin - src), (uint32_t)(match_end - match_begin), (uint32_t)match_distance, dst, end, src); + if (!dst) { + return; + } + + // Update state + src = match_end; + + // Update return values to include the last fully encoded match + *dst_ptr = dst; + *src_ptr = src; + } } #endif -size_t lz4raw_encode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, - const uint8_t * __restrict src_buffer, size_t src_size, - lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES]) +size_t +lz4raw_encode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, + const uint8_t * __restrict src_buffer, size_t src_size, + lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES]) { - // Initialize hash table - const lz4_hash_entry_t HASH_FILL = { .offset = 0x80000000, .word = 0x0 }; - - const uint8_t * src = src_buffer; - uint8_t * dst = dst_buffer; - - // We need several blocks because our base function is limited to 2GB input - const size_t BLOCK_SIZE = 0x7ffff000; - while (src_size > 0) - { - //DRKTODO either implement pattern4 or figure out optimal unroll - //DRKTODO: bizarrely, with plain O3 the compiler generates a single - //DRKTODO: scalar STP per loop iteration with the stock loop - //DRKTODO If hand unrolled, it switches to NEON store pairs - // Reset hash table for each block + // Initialize hash table + const lz4_hash_entry_t HASH_FILL = { .offset = 0x80000000, .word = 0x0 }; + + const uint8_t * src = src_buffer; + uint8_t * dst = dst_buffer; + + // We need several blocks because our base function is limited to 2GB input + const size_t BLOCK_SIZE = 0x7ffff000; + while (src_size > 0) { + //DRKTODO either implement pattern4 or figure out optimal unroll + //DRKTODO: bizarrely, with plain O3 the compiler generates a single + //DRKTODO: scalar STP per loop iteration with the stock loop + //DRKTODO If hand unrolled, it switches to NEON store pairs + // Reset hash table for each block /* #if __STDC_HOSTED__ */ /* memset_pattern8(hash_table, &HASH_FILL, lz4_encode_scratch_size); */ /* #else */ /* for (int i=0;i BLOCK_SIZE ? BLOCK_SIZE : src_size; - - // Run the encoder, only the last block emits final literals. Allows concatenation of encoded payloads. - // Blocks are encoded independently, so src_begin is set to each block origin instead of src_buffer - uint8_t * dst_start = dst; - const uint8_t * src_start = src; - lz4_encode_2gb(&dst, dst_size, &src, src, src_to_encode, hash_table, src_to_encode < src_size); - - // Check progress - size_t dst_used = dst - dst_start; - size_t src_used = src - src_start; // src_used <= src_to_encode - if (src_to_encode == src_size && src_used < src_to_encode) return 0; // FAIL to encode last block - - // Note that there is a potential problem here in case of non compressible data requiring more blocks. - // We may end up here with src_used very small, or even 0, and will not be able to make progress during - // compression. We FAIL unless the length of literals remaining at the end is small enough. - if (src_to_encode < src_size && src_to_encode - src_used >= (1<<16)) return 0; // FAIL too many literals - - // Update counters (SRC and DST already have been updated) - src_size -= src_used; - dst_size -= dst_used; - } - - return (size_t)(dst - dst_buffer); // bytes produced + for (int i = 0; i < LZ4_COMPRESS_HASH_ENTRIES;) { + hash_table[i++] = HASH_FILL; + hash_table[i++] = HASH_FILL; + hash_table[i++] = HASH_FILL; + hash_table[i++] = HASH_FILL; + } + + // Bytes to encode in this block + const size_t src_to_encode = src_size > BLOCK_SIZE ? BLOCK_SIZE : src_size; + + // Run the encoder, only the last block emits final literals. Allows concatenation of encoded payloads. + // Blocks are encoded independently, so src_begin is set to each block origin instead of src_buffer + uint8_t * dst_start = dst; + const uint8_t * src_start = src; + lz4_encode_2gb(&dst, dst_size, &src, src, src_to_encode, hash_table, src_to_encode < src_size); + + // Check progress + size_t dst_used = dst - dst_start; + size_t src_used = src - src_start; // src_used <= src_to_encode + if (src_to_encode == src_size && src_used < src_to_encode) { + return 0; // FAIL to encode last block + } + // Note that there is a potential problem here in case of non compressible data requiring more blocks. + // We may end up here with src_used very small, or even 0, and will not be able to make progress during + // compression. We FAIL unless the length of literals remaining at the end is small enough. + if (src_to_encode < src_size && src_to_encode - src_used >= (1 << 16)) { + return 0; // FAIL too many literals + } + // Update counters (SRC and DST already have been updated) + src_size -= src_used; + dst_size -= dst_used; + } + + return (size_t)(dst - dst_buffer); // bytes produced } typedef uint32_t lz4_uint128 __attribute__((ext_vector_type(4))) __attribute__((__aligned__(1))); -int lz4_decode(uint8_t ** dst_ptr, - uint8_t * dst_begin, - uint8_t * dst_end, - const uint8_t ** src_ptr, - const uint8_t * src_end) +int +lz4_decode(uint8_t ** dst_ptr, + uint8_t * dst_begin, + uint8_t * dst_end, + const uint8_t ** src_ptr, + const uint8_t * src_end) { - uint8_t * dst = *dst_ptr; - const uint8_t * src = *src_ptr; - - // Require dst_end > dst. - if (dst_end <= dst) goto OUT_FULL; - - while (src < src_end) - { - // Keep last good position - *src_ptr = src; - *dst_ptr = dst; - - uint8_t cmd = *src++; // 1 byte encoding literal+(match-4) length: LLLLMMMM - uint32_t literalLength = (cmd >> 4) & 15; // 0..15 - uint32_t matchLength = 4 + (cmd & 15); // 4..19 - - // extra bytes for literalLength - if (__improbable(literalLength == 15)) - { - uint8_t s; - do { + uint8_t * dst = *dst_ptr; + const uint8_t * src = *src_ptr; + + // Require dst_end > dst. + if (dst_end <= dst) { + goto OUT_FULL; + } + + while (src < src_end) { + // Keep last good position + *src_ptr = src; + *dst_ptr = dst; + + uint8_t cmd = *src++; // 1 byte encoding literal+(match-4) length: LLLLMMMM + uint32_t literalLength = (cmd >> 4) & 15; // 0..15 + uint32_t matchLength = 4 + (cmd & 15); // 4..19 + + // extra bytes for literalLength + if (__improbable(literalLength == 15)) { + uint8_t s; + do { #if DEBUG_LZ4_DECODE_ERRORS - if (__improbable(src >= src_end)) printf("Truncated SRC literal length\n"); + if (__improbable(src >= src_end)) { + printf("Truncated SRC literal length\n"); + } #endif - if (__improbable(src >= src_end)) goto IN_FAIL; // unexpected end of input (1 byte needed) - s = *src++; - literalLength += s; - } while (__improbable(s == 255)); - } - - // copy literal + if (__improbable(src >= src_end)) { + goto IN_FAIL; // unexpected end of input (1 byte needed) + } + s = *src++; + literalLength += s; + } while (__improbable(s == 255)); + } + + // copy literal #if DEBUG_LZ4_DECODE_ERRORS - if (__improbable(literalLength > (size_t)(src_end - src))) printf("Truncated SRC literal\n"); + if (__improbable(literalLength > (size_t)(src_end - src))) { + printf("Truncated SRC literal\n"); + } #endif - if (__improbable(literalLength > (size_t)(src_end - src))) goto IN_FAIL; - if (__improbable(literalLength > (size_t)(dst_end - dst))) { - // literal will take us past the end of the destination buffer, - // so we can only copy part of it. - literalLength = (uint32_t)(dst_end - dst); - memcpy(dst, src, literalLength); - dst += literalLength; - goto OUT_FULL; - } - memcpy(dst,src,literalLength); - src += literalLength; - dst += literalLength; - - if (__improbable(src >= src_end)) goto OUT_FULL; // valid end of stream + if (__improbable(literalLength > (size_t)(src_end - src))) { + goto IN_FAIL; + } + if (__improbable(literalLength > (size_t)(dst_end - dst))) { + // literal will take us past the end of the destination buffer, + // so we can only copy part of it. + literalLength = (uint32_t)(dst_end - dst); + memcpy(dst, src, literalLength); + dst += literalLength; + goto OUT_FULL; + } + memcpy(dst, src, literalLength); + src += literalLength; + dst += literalLength; + + if (__improbable(src >= src_end)) { + goto OUT_FULL; // valid end of stream + } #if DEBUG_LZ4_DECODE_ERRORS - if (__improbable(2 > (size_t)(src_end - src))) printf("Truncated SRC distance\n"); + if (__improbable(2 > (size_t)(src_end - src))) { + printf("Truncated SRC distance\n"); + } #endif - if (__improbable(2 > (size_t)(src_end - src))) goto IN_FAIL; // unexpected end of input (2 bytes needed) - - //DRKTODO: this causes an alignment increase warning (legitimate?) - //DRKTODO: cast of char * to uint16_t* + if (__improbable(2 > (size_t)(src_end - src))) { + goto IN_FAIL; // unexpected end of input (2 bytes needed) + } + //DRKTODO: this causes an alignment increase warning (legitimate?) + //DRKTODO: cast of char * to uint16_t* #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-align" - - // match distance - uint64_t matchDistance = *(const uint16_t *)src; // 0x0000 <= matchDistance <= 0xffff + + // match distance + uint64_t matchDistance = *(const uint16_t *)src; // 0x0000 <= matchDistance <= 0xffff #pragma clang diagnostic pop - src += 2; + src += 2; #if DEBUG_LZ4_DECODE_ERRORS - if (matchDistance == 0) printf("Invalid match distance D = 0\n"); + if (matchDistance == 0) { + printf("Invalid match distance D = 0\n"); + } #endif - if (__improbable(matchDistance == 0)) goto IN_FAIL; // 0x0000 invalid - uint8_t * ref = dst - matchDistance; + if (__improbable(matchDistance == 0)) { + goto IN_FAIL; // 0x0000 invalid + } + uint8_t * ref = dst - matchDistance; #if DEBUG_LZ4_DECODE_ERRORS - if (__improbable(ref < dst_begin)) printf("Invalid reference D=0x%llx dst_begin=%p dst=%p dst_end=%p\n",matchDistance,dst_begin,dst,dst_end); + if (__improbable(ref < dst_begin)) { + printf("Invalid reference D=0x%llx dst_begin=%p dst=%p dst_end=%p\n", matchDistance, dst_begin, dst, dst_end); + } #endif - if (__improbable(ref < dst_begin)) goto OUT_FAIL; // out of range - - // extra bytes for matchLength - if (__improbable(matchLength == 19)) - { - uint8_t s; - do { + if (__improbable(ref < dst_begin)) { + goto OUT_FAIL; // out of range + } + // extra bytes for matchLength + if (__improbable(matchLength == 19)) { + uint8_t s; + do { #if DEBUG_LZ4_DECODE_ERRORS - if (__improbable(src >= src_end)) printf("Truncated SRC match length\n"); + if (__improbable(src >= src_end)) { + printf("Truncated SRC match length\n"); + } #endif - if (__improbable(src >= src_end)) goto IN_FAIL; // unexpected end of input (1 byte needed) - s = *src++; - matchLength += s; - } while (__improbable(s == 255)); - } - - // copy match (may overlap) - if (__improbable(matchLength > (size_t)(dst_end - dst))) { - // match will take us past the end of the destination buffer, - // so we can only copy part of it. - matchLength = (uint32_t)(dst_end - dst); - for (uint32_t i=0; i= src_end)) { + goto IN_FAIL; // unexpected end of input (1 byte needed) + } + s = *src++; + matchLength += s; + } while (__improbable(s == 255)); + } + + // copy match (may overlap) + if (__improbable(matchLength > (size_t)(dst_end - dst))) { + // match will take us past the end of the destination buffer, + // so we can only copy part of it. + matchLength = (uint32_t)(dst_end - dst); + for (uint32_t i = 0; i < matchLength; ++i) { + dst[i] = ref[i]; + } + dst += matchLength; + goto OUT_FULL; + } + for (uint64_t i = 0; i < matchLength; i++) { + dst[i] = ref[i]; + } + dst += matchLength; + } + + // We reached the end of the input buffer after a full instruction OUT_FULL: - // Or we reached the end of the output buffer - *dst_ptr = dst; - *src_ptr = src; - return 0; - - // Error conditions + // Or we reached the end of the output buffer + *dst_ptr = dst; + *src_ptr = src; + return 0; + + // Error conditions OUT_FAIL: IN_FAIL: - return 1; // FAIL + return 1; // FAIL } diff --git a/osfmk/vm/lz4.h b/osfmk/vm/lz4.h index c6af5edfd..190f4a26f 100644 --- a/osfmk/vm/lz4.h +++ b/osfmk/vm/lz4.h @@ -2,7 +2,7 @@ * Copyright (c) 2016-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,7 +40,7 @@ // Represents a position in the input stream typedef struct { uint32_t offset; uint32_t word; } lz4_hash_entry_t; -static const size_t lz4_hash_table_size = LZ4_COMPRESS_HASH_ENTRIES*sizeof(lz4_hash_entry_t); +static const size_t lz4_hash_table_size = LZ4_COMPRESS_HASH_ENTRIES * sizeof(lz4_hash_entry_t); // Worker function for lz4 encode. Underlies both the buffer and stream encode operations. // Performs lz4 encoding of up to 2gb of data, updates dst_ptr and src_ptr to point to the @@ -52,15 +52,15 @@ static const size_t lz4_hash_table_size = LZ4_COMPRESS_HASH_ENTRIES*sizeof(lz4_h // If skip_final_literals is not 0, this final literal sequence is not emitted, and the src buffer is // partially encoded (the length of this literal sequence varies). extern void lz4_encode_2gb(uint8_t **dst_ptr, size_t dst_size, - const uint8_t **src_ptr, const uint8_t *src_begin, size_t src_size, - lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES],int skip_final_literals); + const uint8_t **src_ptr, const uint8_t *src_begin, size_t src_size, + lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES], int skip_final_literals); extern int lz4_decode(uint8_t **dst_ptr, uint8_t *dst_begin, uint8_t *dst_end, - const uint8_t **src_ptr, const uint8_t *src_end); + const uint8_t **src_ptr, const uint8_t *src_end); #if LZ4_ENABLE_ASSEMBLY_DECODE extern int lz4_decode_asm(uint8_t **dst_ptr, uint8_t *dst_begin, uint8_t *dst_end, - const uint8_t **src_ptr, const uint8_t *src_end); + const uint8_t **src_ptr, const uint8_t *src_end); #endif #pragma mark - Buffer interfaces @@ -71,23 +71,23 @@ static const size_t lz4_decode_scratch_size = 0; #pragma mark - Buffer interfaces (LZ4 RAW) size_t lz4raw_encode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, - const uint8_t * __restrict src_buffer, size_t src_size, - lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES]); + const uint8_t * __restrict src_buffer, size_t src_size, + lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES]); size_t lz4raw_decode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size, - const uint8_t * __restrict src_buffer, size_t src_size, - void * __restrict work __attribute__((unused))); + const uint8_t * __restrict src_buffer, size_t src_size, + void * __restrict work __attribute__((unused))); -typedef __attribute__((__ext_vector_type__(8))) uint8_t vector_uchar8; +typedef __attribute__((__ext_vector_type__(8))) uint8_t vector_uchar8; typedef __attribute__((__ext_vector_type__(16))) uint8_t vector_uchar16; typedef __attribute__((__ext_vector_type__(32))) uint8_t vector_uchar32; typedef __attribute__((__ext_vector_type__(64))) uint8_t vector_uchar64; -typedef __attribute__((__ext_vector_type__(16),__aligned__(1))) uint8_t packed_uchar16; -typedef __attribute__((__ext_vector_type__(32),__aligned__(1))) uint8_t packed_uchar32; -typedef __attribute__((__ext_vector_type__(64),__aligned__(1))) uint8_t packed_uchar64; +typedef __attribute__((__ext_vector_type__(16), __aligned__(1))) uint8_t packed_uchar16; +typedef __attribute__((__ext_vector_type__(32), __aligned__(1))) uint8_t packed_uchar32; +typedef __attribute__((__ext_vector_type__(64), __aligned__(1))) uint8_t packed_uchar64; typedef __attribute__((__ext_vector_type__(4))) uint16_t vector_ushort4; -typedef __attribute__((__ext_vector_type__(4),__aligned__(2))) uint16_t packed_ushort4; +typedef __attribute__((__ext_vector_type__(4), __aligned__(2))) uint16_t packed_ushort4; typedef __attribute__((__ext_vector_type__(2))) int32_t vector_int2; typedef __attribute__((__ext_vector_type__(4))) int32_t vector_int4; @@ -98,24 +98,84 @@ typedef __attribute__((__ext_vector_type__(4))) uint32_t vector_uint4; #define UTIL_FUNCTION static inline __attribute__((__always_inline__)) __attribute__((__overloadable__)) // Load N bytes from unaligned location PTR -UTIL_FUNCTION uint16_t load2(const void * ptr) { uint16_t data; memcpy(&data,ptr,sizeof data); return data; } -UTIL_FUNCTION uint32_t load4(const void * ptr) { uint32_t data; memcpy(&data,ptr,sizeof data); return data; } -UTIL_FUNCTION uint64_t load8(const void * ptr) { uint64_t data; memcpy(&data,ptr,sizeof data); return data; } -UTIL_FUNCTION vector_uchar16 load16(const void * ptr) { return (const vector_uchar16)*(const packed_uchar16 *)ptr; } -UTIL_FUNCTION vector_uchar32 load32(const void * ptr) { return (const vector_uchar32)*(const packed_uchar32 *)ptr; } -UTIL_FUNCTION vector_uchar64 load64(const void * ptr) { return (const vector_uchar64)*(const packed_uchar64 *)ptr; } +UTIL_FUNCTION uint16_t +load2(const void * ptr) +{ + uint16_t data; memcpy(&data, ptr, sizeof data); return data; +} +UTIL_FUNCTION uint32_t +load4(const void * ptr) +{ + uint32_t data; memcpy(&data, ptr, sizeof data); return data; +} +UTIL_FUNCTION uint64_t +load8(const void * ptr) +{ + uint64_t data; memcpy(&data, ptr, sizeof data); return data; +} +UTIL_FUNCTION vector_uchar16 +load16(const void * ptr) +{ + return (const vector_uchar16)*(const packed_uchar16 *)ptr; +} +UTIL_FUNCTION vector_uchar32 +load32(const void * ptr) +{ + return (const vector_uchar32)*(const packed_uchar32 *)ptr; +} +UTIL_FUNCTION vector_uchar64 +load64(const void * ptr) +{ + return (const vector_uchar64)*(const packed_uchar64 *)ptr; +} // Store N bytes to unaligned location PTR -UTIL_FUNCTION void store2(void * ptr,uint16_t data) { memcpy(ptr,&data,sizeof data); } -UTIL_FUNCTION void store4(void * ptr,uint32_t data) { memcpy(ptr,&data,sizeof data); } -UTIL_FUNCTION void store8(void * ptr,uint64_t data) { memcpy(ptr,&data,sizeof data); } -UTIL_FUNCTION void store16(void * ptr,vector_uchar16 data) { *(packed_uchar16 *)ptr = (packed_uchar16)data; } -UTIL_FUNCTION void store32(void * ptr,vector_uchar32 data) { *(packed_uchar32 *)ptr = (packed_uchar32)data; } -UTIL_FUNCTION void store64(void * ptr,vector_uchar64 data) { *(packed_uchar64 *)ptr = (packed_uchar64)data; } +UTIL_FUNCTION void +store2(void * ptr, uint16_t data) +{ + memcpy(ptr, &data, sizeof data); +} +UTIL_FUNCTION void +store4(void * ptr, uint32_t data) +{ + memcpy(ptr, &data, sizeof data); +} +UTIL_FUNCTION void +store8(void * ptr, uint64_t data) +{ + memcpy(ptr, &data, sizeof data); +} +UTIL_FUNCTION void +store16(void * ptr, vector_uchar16 data) +{ + *(packed_uchar16 *)ptr = (packed_uchar16)data; +} +UTIL_FUNCTION void +store32(void * ptr, vector_uchar32 data) +{ + *(packed_uchar32 *)ptr = (packed_uchar32)data; +} +UTIL_FUNCTION void +store64(void * ptr, vector_uchar64 data) +{ + *(packed_uchar64 *)ptr = (packed_uchar64)data; +} // Load+Store N bytes from unaligned locations SRC to DST. No overlap allowed. -UTIL_FUNCTION void copy8(void * dst,const void * src) { store8(dst,load8(src)); } -UTIL_FUNCTION void copy16(void * dst,const void * src) { *(packed_uchar16 *)dst = *(const packed_uchar16 *)src; } -UTIL_FUNCTION void copy32(void * dst,const void * src) { *(packed_uchar32 *)dst = *(const packed_uchar32 *)src; } +UTIL_FUNCTION void +copy8(void * dst, const void * src) +{ + store8(dst, load8(src)); +} +UTIL_FUNCTION void +copy16(void * dst, const void * src) +{ + *(packed_uchar16 *)dst = *(const packed_uchar16 *)src; +} +UTIL_FUNCTION void +copy32(void * dst, const void * src) +{ + *(packed_uchar32 *)dst = *(const packed_uchar32 *)src; +} #undef memcpy diff --git a/osfmk/vm/lz4_assembly_select.h b/osfmk/vm/lz4_assembly_select.h index c320cad03..d92a87833 100644 --- a/osfmk/vm/lz4_assembly_select.h +++ b/osfmk/vm/lz4_assembly_select.h @@ -2,7 +2,7 @@ * Copyright (c) 2016-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/vm/lz4_constants.h b/osfmk/vm/lz4_constants.h index a3d9a5256..d054465ab 100644 --- a/osfmk/vm/lz4_constants.h +++ b/osfmk/vm/lz4_constants.h @@ -2,7 +2,7 @@ * Copyright (c) 2016-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/vm/memory_object.c b/osfmk/vm/memory_object.c index 864969eda..9a35734bc 100644 --- a/osfmk/vm/memory_object.c +++ b/osfmk/vm/memory_object.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -66,7 +66,7 @@ * Interface dependencies: */ -#include /* For pointer_t */ +#include /* For pointer_t */ #include #include @@ -82,11 +82,11 @@ /* * Implementation dependencies: */ -#include /* For memcpy() */ +#include /* For memcpy() */ -#include +#include #include -#include /* For current_thread() */ +#include /* For current_thread() */ #include #include @@ -95,18 +95,18 @@ #include #include #include -#include /* For pmap_clear_modify */ -#include /* For kernel_map, vm_move */ -#include /* For vm_map_pageable */ -#include /* Needed by some vm_page.h macros */ +#include /* For pmap_clear_modify */ +#include /* For kernel_map, vm_move */ +#include /* For vm_map_pageable */ +#include /* Needed by some vm_page.h macros */ #include #include #include -memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; -decl_lck_mtx_data(, memory_manager_default_lock) +memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; +decl_lck_mtx_data(, memory_manager_default_lock) /* @@ -127,24 +127,24 @@ decl_lck_mtx_data(, memory_manager_default_lock) * MEMORY_OBJECT_RETURN_NONE. */ -#define memory_object_should_return_page(m, should_return) \ +#define memory_object_should_return_page(m, should_return) \ (should_return != MEMORY_OBJECT_RETURN_NONE && \ (((m)->vmp_dirty || ((m)->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \ ((m)->vmp_precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) -typedef int memory_object_lock_result_t; +typedef int memory_object_lock_result_t; -#define MEMORY_OBJECT_LOCK_RESULT_DONE 0 -#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 -#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2 -#define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3 +#define MEMORY_OBJECT_LOCK_RESULT_DONE 0 +#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 +#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2 +#define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3 memory_object_lock_result_t memory_object_lock_page( - vm_page_t m, - memory_object_return_t should_return, - boolean_t should_flush, - vm_prot_t prot); + vm_page_t m, + memory_object_return_t should_return, + boolean_t should_flush, + vm_prot_t prot); /* * Routine: memory_object_lock_page @@ -161,21 +161,23 @@ memory_object_lock_result_t memory_object_lock_page( */ memory_object_lock_result_t memory_object_lock_page( - vm_page_t m, - memory_object_return_t should_return, - boolean_t should_flush, - vm_prot_t prot) + vm_page_t m, + memory_object_return_t should_return, + boolean_t should_flush, + vm_prot_t prot) { - XPR(XPR_MEMORY_OBJECT, - "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", - m, should_return, should_flush, prot, 0); + XPR(XPR_MEMORY_OBJECT, + "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", + m, should_return, should_flush, prot, 0); - if (m->vmp_busy || m->vmp_cleaning) - return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); + if (m->vmp_busy || m->vmp_cleaning) { + return MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK; + } - if (m->vmp_laundry) + if (m->vmp_laundry) { vm_pageout_steal_laundry(m, FALSE); + } /* * Don't worry about pages for which the kernel @@ -188,9 +190,9 @@ memory_object_lock_page( * clean it up and there is no * relevant data to return */ - return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE); + return MEMORY_OBJECT_LOCK_RESULT_MUST_FREE; } - return (MEMORY_OBJECT_LOCK_RESULT_DONE); + return MEMORY_OBJECT_LOCK_RESULT_DONE; } assert(!m->vmp_fictitious); @@ -199,15 +201,16 @@ memory_object_lock_page( * The page is wired... just clean or return the page if needed. * Wired pages don't get flushed or disconnected from the pmap. */ - if (memory_object_should_return_page(m, should_return)) - return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); + if (memory_object_should_return_page(m, should_return)) { + return MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN; + } - return (MEMORY_OBJECT_LOCK_RESULT_DONE); - } + return MEMORY_OBJECT_LOCK_RESULT_DONE; + } if (should_flush) { /* - * must do the pmap_disconnect before determining the + * must do the pmap_disconnect before determining the * need to return the page... otherwise it's possible * for the page to go from the clean to the dirty state * after we've made our decision @@ -221,8 +224,9 @@ memory_object_lock_page( * let the fault handler take care of increases * (pmap_page_protect may not increase protection). */ - if (prot != VM_PROT_NO_CHANGE) + if (prot != VM_PROT_NO_CHANGE) { pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot); + } } /* * Handle returning dirty or precious pages @@ -240,14 +244,15 @@ memory_object_lock_page( * if (!should_flush) * pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); */ - return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); + return MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN; } /* * Handle flushing clean pages */ - if (should_flush) - return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE); + if (should_flush) { + return MEMORY_OBJECT_LOCK_RESULT_MUST_FREE; + } /* * we use to deactivate clean pages at this point, @@ -265,7 +270,7 @@ memory_object_lock_page( * return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE); */ - return (MEMORY_OBJECT_LOCK_RESULT_DONE); + return MEMORY_OBJECT_LOCK_RESULT_DONE; } @@ -281,7 +286,7 @@ memory_object_lock_page( * forms specified by "prot"); * 2) return data to the manager (if "should_return" * is RETURN_DIRTY and the page is dirty, or - * "should_return" is RETURN_ALL and the page + * "should_return" is RETURN_ALL and the page * is either dirty or precious); and, * 3) flush the cached copy (if "should_flush" * is asserted). @@ -297,26 +302,28 @@ memory_object_lock_page( kern_return_t memory_object_lock_request( - memory_object_control_t control, - memory_object_offset_t offset, - memory_object_size_t size, - memory_object_offset_t * resid_offset, - int * io_errno, - memory_object_return_t should_return, - int flags, - vm_prot_t prot) + memory_object_control_t control, + memory_object_offset_t offset, + memory_object_size_t size, + memory_object_offset_t * resid_offset, + int * io_errno, + memory_object_return_t should_return, + int flags, + vm_prot_t prot) { - vm_object_t object; + vm_object_t object; - /* + /* * Check for bogus arguments. */ object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } - if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) - return (KERN_INVALID_ARGUMENT); + if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) { + return KERN_INVALID_ARGUMENT; + } size = round_page_64(size); @@ -335,16 +342,17 @@ memory_object_lock_request( } offset -= object->paging_offset; - if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) + if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) { vm_object_reap_pages(object, REAP_DATA_FLUSH); - else + } else { (void)vm_object_update(object, offset, size, resid_offset, - io_errno, should_return, flags, prot); + io_errno, should_return, flags, prot); + } vm_object_paging_end(object); vm_object_unlock(object); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -359,21 +367,22 @@ memory_object_lock_request( * being the name. * If the decision is made to proceed the name field flag is set to * false and the reference count is decremented. If the RESPECT_CACHE - * flag is set and the reference count has gone to zero, the + * flag is set and the reference count has gone to zero, the * memory_object is checked to see if it is cacheable otherwise when * the reference count is zero, it is simply terminated. */ kern_return_t memory_object_release_name( - memory_object_control_t control, - int flags) + memory_object_control_t control, + int flags) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } return vm_object_release_name(object, flags); } @@ -389,16 +398,17 @@ memory_object_release_name( */ kern_return_t memory_object_destroy( - memory_object_control_t control, - kern_return_t reason) + memory_object_control_t control, + kern_return_t reason) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } - return (vm_object_destroy(object, reason)); + return vm_object_destroy(object, reason); } /* @@ -427,19 +437,19 @@ memory_object_destroy( boolean_t vm_object_sync( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - boolean_t should_flush, - boolean_t should_return, - boolean_t should_iosync) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t should_flush, + boolean_t should_return, + boolean_t should_iosync) { - boolean_t rv; + boolean_t rv; int flags; - XPR(XPR_VM_OBJECT, - "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", - object, offset, size, should_flush, should_return); + XPR(XPR_VM_OBJECT, + "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", + object, offset, size, should_flush, should_return); /* * Lock the object, and acquire a paging reference to @@ -450,7 +460,7 @@ vm_object_sync( vm_object_paging_begin(object); if (should_flush) { - flags = MEMORY_OBJECT_DATA_FLUSH; + flags = MEMORY_OBJECT_DATA_FLUSH; /* * This flush is from an msync(), not a truncate(), so the * contents of the file are not affected. @@ -459,18 +469,20 @@ vm_object_sync( * push the old contents to a copy object. */ flags |= MEMORY_OBJECT_DATA_NO_CHANGE; - } else - flags = 0; + } else { + flags = 0; + } - if (should_iosync) - flags |= MEMORY_OBJECT_IO_SYNC; + if (should_iosync) { + flags |= MEMORY_OBJECT_IO_SYNC; + } rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL, - (should_return) ? - MEMORY_OBJECT_RETURN_ALL : - MEMORY_OBJECT_RETURN_NONE, - flags, - VM_PROT_NO_CHANGE); + (should_return) ? + MEMORY_OBJECT_RETURN_ALL : + MEMORY_OBJECT_RETURN_NONE, + flags, + VM_PROT_NO_CHANGE); vm_object_paging_end(object); @@ -481,32 +493,32 @@ vm_object_sync( #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \ -MACRO_BEGIN \ - \ - int upl_flags; \ - memory_object_t pager; \ - \ - if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \ - vm_object_paging_begin(object); \ - vm_object_unlock(object); \ - \ - if (iosync) \ - upl_flags = UPL_MSYNC | UPL_IOSYNC; \ - else \ - upl_flags = UPL_MSYNC; \ - \ - (void) memory_object_data_return(pager, \ - po, \ - (memory_object_cluster_size_t)data_cnt, \ +MACRO_BEGIN \ + \ + int upl_flags; \ + memory_object_t pager; \ + \ + if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \ + vm_object_paging_begin(object); \ + vm_object_unlock(object); \ + \ + if (iosync) \ + upl_flags = UPL_MSYNC | UPL_IOSYNC; \ + else \ + upl_flags = UPL_MSYNC; \ + \ + (void) memory_object_data_return(pager, \ + po, \ + (memory_object_cluster_size_t)data_cnt, \ ro, \ ioerr, \ - FALSE, \ - FALSE, \ - upl_flags); \ - \ - vm_object_lock(object); \ - vm_object_paging_end(object); \ - } \ + FALSE, \ + FALSE, \ + upl_flags); \ + \ + vm_object_lock(object); \ + vm_object_paging_end(object); \ + } \ MACRO_END extern struct vnode * @@ -514,58 +526,55 @@ vnode_pager_lookup_vnode(memory_object_t); static int vm_object_update_extent( - vm_object_t object, - vm_object_offset_t offset, - vm_object_offset_t offset_end, - vm_object_offset_t *offset_resid, - int *io_errno, - boolean_t should_flush, - memory_object_return_t should_return, - boolean_t should_iosync, - vm_prot_t prot) + vm_object_t object, + vm_object_offset_t offset, + vm_object_offset_t offset_end, + vm_object_offset_t *offset_resid, + int *io_errno, + boolean_t should_flush, + memory_object_return_t should_return, + boolean_t should_iosync, + vm_prot_t prot) { - vm_page_t m; - int retval = 0; - vm_object_offset_t paging_offset = 0; - vm_object_offset_t next_offset = offset; - memory_object_lock_result_t page_lock_result; - memory_object_cluster_size_t data_cnt = 0; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; - int dw_count; - int dw_limit; - int dirty_count; - - dwp = &dw_array[0]; - dw_count = 0; + vm_page_t m; + int retval = 0; + vm_object_offset_t paging_offset = 0; + vm_object_offset_t next_offset = offset; + memory_object_lock_result_t page_lock_result; + memory_object_cluster_size_t data_cnt = 0; + struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; + struct vm_page_delayed_work *dwp; + int dw_count; + int dw_limit; + int dirty_count; + + dwp = &dw_array[0]; + dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); dirty_count = 0; for (; - offset < offset_end && object->resident_page_count; - offset += PAGE_SIZE_64) { - - /* + offset < offset_end && object->resident_page_count; + offset += PAGE_SIZE_64) { + /* * Limit the number of pages to be cleaned at once to a contiguous * run, or at most MAX_UPL_TRANSFER_BYTES */ if (data_cnt) { if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) { - if (dw_count) { vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); dwp = &dw_array[0]; dw_count = 0; } LIST_REQ_PAGEOUT_PAGES(object, data_cnt, - paging_offset, offset_resid, io_errno, should_iosync); + paging_offset, offset_resid, io_errno, should_iosync); data_cnt = 0; } } while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { - dwp->dw_mask = 0; - + page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot); if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) { @@ -578,7 +587,7 @@ vm_object_update_extent( dw_count = 0; } LIST_REQ_PAGEOUT_PAGES(object, data_cnt, - paging_offset, offset_resid, io_errno, should_iosync); + paging_offset, offset_resid, io_errno, should_iosync); /* * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will * allow the state of page 'm' to change... we need to re-lookup @@ -589,13 +598,13 @@ vm_object_update_extent( } switch (page_lock_result) { - case MEMORY_OBJECT_LOCK_RESULT_DONE: break; case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE: - if (m->vmp_dirty == TRUE) + if (m->vmp_dirty == TRUE) { dirty_count++; + } dwp->dw_mask |= DW_vm_page_free; break; @@ -604,8 +613,9 @@ vm_object_update_extent( continue; case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: - if (data_cnt == 0) + if (data_cnt == 0) { paging_offset = offset; + } data_cnt += PAGE_SIZE; next_offset = offset + PAGE_SIZE_64; @@ -616,7 +626,6 @@ vm_object_update_extent( * no need to remove them */ if (!VM_PAGE_WIRED(m)) { - if (should_flush) { /* * add additional state for the flush @@ -647,21 +656,23 @@ vm_object_update_extent( break; } } - - if (object->pager) + + if (object->pager) { task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager)); + } /* * We have completed the scan for applicable pages. * Clean any pages that have been saved. */ - if (dw_count) + if (dw_count) { vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + } if (data_cnt) { - LIST_REQ_PAGEOUT_PAGES(object, data_cnt, - paging_offset, offset_resid, io_errno, should_iosync); + LIST_REQ_PAGEOUT_PAGES(object, data_cnt, + paging_offset, offset_resid, io_errno, should_iosync); } - return (retval); + return retval; } @@ -675,30 +686,30 @@ vm_object_update_extent( */ kern_return_t vm_object_update( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - vm_object_offset_t *resid_offset, - int *io_errno, - memory_object_return_t should_return, - int flags, - vm_prot_t protection) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_object_offset_t *resid_offset, + int *io_errno, + memory_object_return_t should_return, + int flags, + vm_prot_t protection) { - vm_object_t copy_object = VM_OBJECT_NULL; - boolean_t data_returned = FALSE; - boolean_t update_cow; - boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE; - boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE; - vm_fault_return_t result; - int num_of_extents; - int n; -#define MAX_EXTENTS 8 -#define EXTENT_SIZE (1024 * 1024 * 256) -#define RESIDENT_LIMIT (1024 * 32) + vm_object_t copy_object = VM_OBJECT_NULL; + boolean_t data_returned = FALSE; + boolean_t update_cow; + boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE; + boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE; + vm_fault_return_t result; + int num_of_extents; + int n; +#define MAX_EXTENTS 8 +#define EXTENT_SIZE (1024 * 1024 * 256) +#define RESIDENT_LIMIT (1024 * 32) struct extent { - vm_object_offset_t e_base; - vm_object_offset_t e_min; - vm_object_offset_t e_max; + vm_object_offset_t e_base; + vm_object_offset_t e_min; + vm_object_offset_t e_max; } extents[MAX_EXTENTS]; /* @@ -718,34 +729,34 @@ vm_object_update( * XXX coalescing implications before doing so. */ - update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) - && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && - !(flags & MEMORY_OBJECT_DATA_PURGE))) - || (flags & MEMORY_OBJECT_COPY_SYNC); - + update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) + && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && + !(flags & MEMORY_OBJECT_DATA_PURGE))) + || (flags & MEMORY_OBJECT_COPY_SYNC); + if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) { - int collisions = 0; + int collisions = 0; - while ((copy_object = object->copy) != VM_OBJECT_NULL) { - /* + while ((copy_object = object->copy) != VM_OBJECT_NULL) { + /* * need to do a try here since we're swimming upstream * against the normal lock ordering... however, we need * to hold the object stable until we gain control of the * copy object so we have to be careful how we approach this */ - if (vm_object_lock_try(copy_object)) { - /* - * we 'won' the lock on the copy object... - * no need to hold the object lock any longer... - * take a real reference on the copy object because - * we're going to call vm_fault_page on it which may - * under certain conditions drop the lock and the paging - * reference we're about to take... the reference - * will keep the copy object from going away if that happens - */ - vm_object_unlock(object); - vm_object_reference_locked(copy_object); - break; + if (vm_object_lock_try(copy_object)) { + /* + * we 'won' the lock on the copy object... + * no need to hold the object lock any longer... + * take a real reference on the copy object because + * we're going to call vm_fault_page on it which may + * under certain conditions drop the lock and the paging + * reference we're about to take... the reference + * will keep the copy object from going away if that happens + */ + vm_object_unlock(object); + vm_object_reference_locked(copy_object); + break; } vm_object_unlock(object); @@ -756,46 +767,46 @@ vm_object_update( } } if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) { - vm_map_size_t i; - vm_map_size_t copy_size; - vm_map_offset_t copy_offset; - vm_prot_t prot; - vm_page_t page; - vm_page_t top_page; - kern_return_t error = 0; + vm_map_size_t i; + vm_map_size_t copy_size; + vm_map_offset_t copy_offset; + vm_prot_t prot; + vm_page_t page; + vm_page_t top_page; + kern_return_t error = 0; struct vm_object_fault_info fault_info = {}; if (copy_object != VM_OBJECT_NULL) { - /* + /* * translate offset with respect to shadow's offset */ - copy_offset = (offset >= copy_object->vo_shadow_offset) ? - (vm_map_offset_t)(offset - copy_object->vo_shadow_offset) : - (vm_map_offset_t) 0; + copy_offset = (offset >= copy_object->vo_shadow_offset) ? + (vm_map_offset_t)(offset - copy_object->vo_shadow_offset) : + (vm_map_offset_t) 0; - if (copy_offset > copy_object->vo_size) - copy_offset = copy_object->vo_size; + if (copy_offset > copy_object->vo_size) { + copy_offset = copy_object->vo_size; + } /* * clip size with respect to shadow offset */ if (offset >= copy_object->vo_shadow_offset) { - copy_size = size; + copy_size = size; } else if (size >= copy_object->vo_shadow_offset - offset) { - copy_size = size - (copy_object->vo_shadow_offset - offset); + copy_size = size - (copy_object->vo_shadow_offset - offset); } else { - copy_size = 0; + copy_size = 0; } - + if (copy_offset + copy_size > copy_object->vo_size) { - if (copy_object->vo_size >= copy_offset) { - copy_size = copy_object->vo_size - copy_offset; + if (copy_object->vo_size >= copy_offset) { + copy_size = copy_object->vo_size - copy_offset; } else { - copy_size = 0; + copy_size = 0; } } - copy_size+=copy_offset; - + copy_size += copy_offset; } else { copy_object = object; @@ -813,23 +824,23 @@ vm_object_update( vm_object_paging_begin(copy_object); for (i = copy_offset; i < copy_size; i += PAGE_SIZE) { - RETRY_COW_OF_LOCK_REQUEST: +RETRY_COW_OF_LOCK_REQUEST: fault_info.cluster_size = (vm_size_t) (copy_size - i); assert(fault_info.cluster_size == copy_size - i); - prot = VM_PROT_WRITE|VM_PROT_READ; + prot = VM_PROT_WRITE | VM_PROT_READ; page = VM_PAGE_NULL; - result = vm_fault_page(copy_object, i, - VM_PROT_WRITE|VM_PROT_READ, - FALSE, - FALSE, /* page not looked up */ - &prot, - &page, - &top_page, - (int *)0, - &error, - FALSE, - FALSE, &fault_info); + result = vm_fault_page(copy_object, i, + VM_PROT_WRITE | VM_PROT_READ, + FALSE, + FALSE, /* page not looked up */ + &prot, + &page, + &top_page, + (int *)0, + &error, + FALSE, + FALSE, &fault_info); switch (result) { case VM_FAULT_SUCCESS: @@ -839,11 +850,10 @@ vm_object_update( vm_object_lock(copy_object); vm_object_paging_begin(copy_object); } - if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) { - + if ((!VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) { vm_page_lockspin_queues(); - - if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) { + + if ((!VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) { vm_page_deactivate(page); } vm_page_unlock_queues(); @@ -851,18 +861,18 @@ vm_object_update( PAGE_WAKEUP_DONE(page); break; case VM_FAULT_RETRY: - prot = VM_PROT_WRITE|VM_PROT_READ; + prot = VM_PROT_WRITE | VM_PROT_READ; vm_object_lock(copy_object); vm_object_paging_begin(copy_object); goto RETRY_COW_OF_LOCK_REQUEST; case VM_FAULT_INTERRUPTED: - prot = VM_PROT_WRITE|VM_PROT_READ; + prot = VM_PROT_WRITE | VM_PROT_READ; vm_object_lock(copy_object); vm_object_paging_begin(copy_object); goto RETRY_COW_OF_LOCK_REQUEST; case VM_FAULT_MEMORY_SHORTAGE: VM_PAGE_WAIT(); - prot = VM_PROT_WRITE|VM_PROT_READ; + prot = VM_PROT_WRITE | VM_PROT_READ; vm_object_lock(copy_object); vm_object_paging_begin(copy_object); goto RETRY_COW_OF_LOCK_REQUEST; @@ -870,32 +880,32 @@ vm_object_update( /* success but no VM page: fail */ vm_object_paging_end(copy_object); vm_object_unlock(copy_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: - if (object != copy_object) - vm_object_deallocate(copy_object); + if (object != copy_object) { + vm_object_deallocate(copy_object); + } vm_object_lock(object); goto BYPASS_COW_COPYIN; default: panic("vm_object_update: unexpected error 0x%x" - " from vm_fault_page()\n", result); + " from vm_fault_page()\n", result); } - } vm_object_paging_end(copy_object); } if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { - if (copy_object != VM_OBJECT_NULL && copy_object != object) { + if (copy_object != VM_OBJECT_NULL && copy_object != object) { vm_object_unlock(copy_object); - vm_object_deallocate(copy_object); + vm_object_deallocate(copy_object); vm_object_lock(object); } return KERN_SUCCESS; } if (copy_object != VM_OBJECT_NULL && copy_object != object) { - if ((flags & MEMORY_OBJECT_DATA_PURGE)) { + if ((flags & MEMORY_OBJECT_DATA_PURGE)) { vm_object_lock_assert_exclusive(copy_object); - copy_object->shadow_severed = TRUE; + copy_object->shadow_severed = TRUE; copy_object->shadowed = FALSE; copy_object->shadow = NULL; /* @@ -904,7 +914,7 @@ vm_object_update( vm_object_deallocate(object); } vm_object_unlock(copy_object); - vm_object_deallocate(copy_object); + vm_object_deallocate(copy_object); vm_object_lock(object); } BYPASS_COW_COPYIN: @@ -917,9 +927,9 @@ BYPASS_COW_COPYIN: * the page which means the resident queue can change which * means we can't walk the queue as we process the pages * we also want to do the processing in offset order to allow - * 'runs' of pages to be collected if we're being told to + * 'runs' of pages to be collected if we're being told to * flush to disk... the resident page queue is NOT ordered. - * + * * a temporary solution (until we figure out how to deal with * large address spaces more generically) is to pre-flight * the resident page queue (if it's small enough) and develop @@ -933,12 +943,12 @@ BYPASS_COW_COPYIN: * is not a theoretical problem */ - if ((object->resident_page_count < RESIDENT_LIMIT) && - (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) { - vm_page_t next; - vm_object_offset_t start; - vm_object_offset_t end; - vm_object_size_t e_mask; + if ((object->resident_page_count < RESIDENT_LIMIT) && + (atop_64(size) > (unsigned)(object->resident_page_count / (8 * MAX_EXTENTS)))) { + vm_page_t next; + vm_object_offset_t start; + vm_object_offset_t end; + vm_object_size_t e_mask; vm_page_t m; start = offset; @@ -952,35 +962,36 @@ BYPASS_COW_COPYIN: next = (vm_page_t) vm_page_queue_next(&m->vmp_listq); if ((m->vmp_offset >= start) && (m->vmp_offset < end)) { - /* + /* * this is a page we're interested in * try to fit it into a current extent */ - for (n = 0; n < num_of_extents; n++) { - if ((m->vmp_offset & e_mask) == extents[n].e_base) { - /* + for (n = 0; n < num_of_extents; n++) { + if ((m->vmp_offset & e_mask) == extents[n].e_base) { + /* * use (PAGE_SIZE - 1) to determine the * max offset so that we don't wrap if * we're at the last page of the space */ - if (m->vmp_offset < extents[n].e_min) - extents[n].e_min = m->vmp_offset; - else if ((m->vmp_offset + (PAGE_SIZE - 1)) > extents[n].e_max) - extents[n].e_max = m->vmp_offset + (PAGE_SIZE - 1); - break; + if (m->vmp_offset < extents[n].e_min) { + extents[n].e_min = m->vmp_offset; + } else if ((m->vmp_offset + (PAGE_SIZE - 1)) > extents[n].e_max) { + extents[n].e_max = m->vmp_offset + (PAGE_SIZE - 1); + } + break; } } if (n == num_of_extents) { - /* + /* * didn't find a current extent that can encompass * this page */ - if (n < MAX_EXTENTS) { - /* - * if we still have room, + if (n < MAX_EXTENTS) { + /* + * if we still have room, * create a new extent */ - extents[n].e_base = m->vmp_offset & e_mask; + extents[n].e_base = m->vmp_offset & e_mask; extents[n].e_min = m->vmp_offset; extents[n].e_max = m->vmp_offset + (PAGE_SIZE - 1); @@ -989,17 +1000,19 @@ BYPASS_COW_COPYIN: /* * no room to create a new extent... * fall back to a single extent based - * on the min and max page offsets + * on the min and max page offsets * we find in the range we're interested in... * first, look through the extent list and * develop the overall min and max for the * pages we've looked at up to this point - */ - for (n = 1; n < num_of_extents; n++) { - if (extents[n].e_min < extents[0].e_min) - extents[0].e_min = extents[n].e_min; - if (extents[n].e_max > extents[0].e_max) - extents[0].e_max = extents[n].e_max; + */ + for (n = 1; n < num_of_extents; n++) { + if (extents[n].e_min < extents[0].e_min) { + extents[0].e_min = extents[n].e_min; + } + if (extents[n].e_max > extents[0].e_max) { + extents[0].e_max = extents[n].e_max; + } } /* * now setup to run through the remaining pages @@ -1022,49 +1035,52 @@ BYPASS_COW_COPYIN: m = next; } } else { - extents[0].e_min = offset; + extents[0].e_min = offset; extents[0].e_max = offset + (size - 1); num_of_extents = 1; } for (n = 0; n < num_of_extents; n++) { - if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno, - should_flush, should_return, should_iosync, protection)) - data_returned = TRUE; + if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno, + should_flush, should_return, should_iosync, protection)) { + data_returned = TRUE; + } } - return (data_returned); + return data_returned; } static kern_return_t vm_object_set_attributes_common( - vm_object_t object, - boolean_t may_cache, + vm_object_t object, + boolean_t may_cache, memory_object_copy_strategy_t copy_strategy) { - boolean_t object_became_ready; + boolean_t object_became_ready; - XPR(XPR_MEMORY_OBJECT, + XPR(XPR_MEMORY_OBJECT, "m_o_set_attr_com, object 0x%X flg %x strat %d\n", - object, (may_cache&1), copy_strategy, 0, 0); + object, (may_cache & 1), copy_strategy, 0, 0); - if (object == VM_OBJECT_NULL) - return(KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } /* * Verify the attributes of importance */ - switch(copy_strategy) { - case MEMORY_OBJECT_COPY_NONE: - case MEMORY_OBJECT_COPY_DELAY: - break; - default: - return(KERN_INVALID_ARGUMENT); + switch (copy_strategy) { + case MEMORY_OBJECT_COPY_NONE: + case MEMORY_OBJECT_COPY_DELAY: + break; + default: + return KERN_INVALID_ARGUMENT; } - if (may_cache) + if (may_cache) { may_cache = TRUE; + } vm_object_lock(object); @@ -1088,45 +1104,46 @@ vm_object_set_attributes_common( vm_object_unlock(object); - return(KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t memory_object_synchronize_completed( - __unused memory_object_control_t control, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length) + __unused memory_object_control_t control, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length) { - panic("memory_object_synchronize_completed no longer supported\n"); - return(KERN_FAILURE); + panic("memory_object_synchronize_completed no longer supported\n"); + return KERN_FAILURE; } /* * Set the memory object attribute as provided. * - * XXX This routine cannot be completed until the vm_msync, clean + * XXX This routine cannot be completed until the vm_msync, clean * in place, and cluster work is completed. See ifdef notyet * below and note that vm_object_set_attributes_common() * may have to be expanded. */ kern_return_t memory_object_change_attributes( - memory_object_control_t control, - memory_object_flavor_t flavor, - memory_object_info_t attributes, - mach_msg_type_number_t count) + memory_object_control_t control, + memory_object_flavor_t flavor, + memory_object_info_t attributes, + mach_msg_type_number_t count) { - vm_object_t object; - kern_return_t result = KERN_SUCCESS; - boolean_t may_cache; - boolean_t invalidate; - memory_object_copy_strategy_t copy_strategy; + vm_object_t object; + kern_return_t result = KERN_SUCCESS; + boolean_t may_cache; + boolean_t invalidate; + memory_object_copy_strategy_t copy_strategy; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_object_lock(object); @@ -1135,99 +1152,100 @@ memory_object_change_attributes( #if notyet invalidate = object->invalidate; #endif - vm_object_unlock(object); + vm_object_unlock(object); switch (flavor) { - case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: - { - old_memory_object_behave_info_t behave; + case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: + { + old_memory_object_behave_info_t behave; - if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { - result = KERN_INVALID_ARGUMENT; - break; - } + if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } - behave = (old_memory_object_behave_info_t) attributes; + behave = (old_memory_object_behave_info_t) attributes; invalidate = behave->invalidate; copy_strategy = behave->copy_strategy; break; - } + } - case MEMORY_OBJECT_BEHAVIOR_INFO: - { - memory_object_behave_info_t behave; + case MEMORY_OBJECT_BEHAVIOR_INFO: + { + memory_object_behave_info_t behave; - if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { - result = KERN_INVALID_ARGUMENT; - break; - } + if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } - behave = (memory_object_behave_info_t) attributes; + behave = (memory_object_behave_info_t) attributes; invalidate = behave->invalidate; copy_strategy = behave->copy_strategy; break; - } + } - case MEMORY_OBJECT_PERFORMANCE_INFO: - { - memory_object_perf_info_t perf; + case MEMORY_OBJECT_PERFORMANCE_INFO: + { + memory_object_perf_info_t perf; - if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { - result = KERN_INVALID_ARGUMENT; - break; - } + if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } - perf = (memory_object_perf_info_t) attributes; + perf = (memory_object_perf_info_t) attributes; may_cache = perf->may_cache; break; - } + } - case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: - { - old_memory_object_attr_info_t attr; + case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: + { + old_memory_object_attr_info_t attr; - if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { - result = KERN_INVALID_ARGUMENT; - break; - } + if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } attr = (old_memory_object_attr_info_t) attributes; - may_cache = attr->may_cache; - copy_strategy = attr->copy_strategy; + may_cache = attr->may_cache; + copy_strategy = attr->copy_strategy; break; - } + } - case MEMORY_OBJECT_ATTRIBUTE_INFO: - { - memory_object_attr_info_t attr; + case MEMORY_OBJECT_ATTRIBUTE_INFO: + { + memory_object_attr_info_t attr; - if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { - result = KERN_INVALID_ARGUMENT; - break; - } + if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { + result = KERN_INVALID_ARGUMENT; + break; + } attr = (memory_object_attr_info_t) attributes; copy_strategy = attr->copy_strategy; - may_cache = attr->may_cache_object; + may_cache = attr->may_cache_object; break; - } + } - default: + default: result = KERN_INVALID_ARGUMENT; break; } - if (result != KERN_SUCCESS) - return(result); + if (result != KERN_SUCCESS) { + return result; + } if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) { copy_strategy = MEMORY_OBJECT_COPY_DELAY; @@ -1237,31 +1255,32 @@ memory_object_change_attributes( * XXX may_cache may become a tri-valued variable to handle * XXX uncache if not in use. */ - return (vm_object_set_attributes_common(object, - may_cache, - copy_strategy)); + return vm_object_set_attributes_common(object, + may_cache, + copy_strategy); } kern_return_t memory_object_get_attributes( - memory_object_control_t control, - memory_object_flavor_t flavor, - memory_object_info_t attributes, /* pointer to OUT array */ - mach_msg_type_number_t *count) /* IN/OUT */ + memory_object_control_t control, + memory_object_flavor_t flavor, + memory_object_info_t attributes, /* pointer to OUT array */ + mach_msg_type_number_t *count) /* IN/OUT */ { - kern_return_t ret = KERN_SUCCESS; - vm_object_t object; + kern_return_t ret = KERN_SUCCESS; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } - vm_object_lock(object); + vm_object_lock(object); switch (flavor) { - case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: - { - old_memory_object_behave_info_t behave; + case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: + { + old_memory_object_behave_info_t behave; if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { ret = KERN_INVALID_ARGUMENT; @@ -1271,42 +1290,42 @@ memory_object_get_attributes( behave = (old_memory_object_behave_info_t) attributes; behave->copy_strategy = object->copy_strategy; behave->temporary = FALSE; -#if notyet /* remove when vm_msync complies and clean in place fini */ - behave->invalidate = object->invalidate; +#if notyet /* remove when vm_msync complies and clean in place fini */ + behave->invalidate = object->invalidate; #else behave->invalidate = FALSE; #endif *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT; break; - } + } - case MEMORY_OBJECT_BEHAVIOR_INFO: - { - memory_object_behave_info_t behave; + case MEMORY_OBJECT_BEHAVIOR_INFO: + { + memory_object_behave_info_t behave; if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) { - ret = KERN_INVALID_ARGUMENT; - break; - } + ret = KERN_INVALID_ARGUMENT; + break; + } - behave = (memory_object_behave_info_t) attributes; - behave->copy_strategy = object->copy_strategy; + behave = (memory_object_behave_info_t) attributes; + behave->copy_strategy = object->copy_strategy; behave->temporary = FALSE; -#if notyet /* remove when vm_msync complies and clean in place fini */ - behave->invalidate = object->invalidate; +#if notyet /* remove when vm_msync complies and clean in place fini */ + behave->invalidate = object->invalidate; #else behave->invalidate = FALSE; #endif behave->advisory_pageout = FALSE; behave->silent_overwrite = FALSE; - *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; + *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; break; - } + } - case MEMORY_OBJECT_PERFORMANCE_INFO: - { - memory_object_perf_info_t perf; + case MEMORY_OBJECT_PERFORMANCE_INFO: + { + memory_object_perf_info_t perf; if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) { ret = KERN_INVALID_ARGUMENT; @@ -1319,69 +1338,69 @@ memory_object_get_attributes( *count = MEMORY_OBJECT_PERF_INFO_COUNT; break; - } + } - case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: - { - old_memory_object_attr_info_t attr; + case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: + { + old_memory_object_attr_info_t attr; - if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { - ret = KERN_INVALID_ARGUMENT; - break; - } + if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } - attr = (old_memory_object_attr_info_t) attributes; - attr->may_cache = object->can_persist; - attr->copy_strategy = object->copy_strategy; + attr = (old_memory_object_attr_info_t) attributes; + attr->may_cache = object->can_persist; + attr->copy_strategy = object->copy_strategy; - *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; - break; - } + *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; + break; + } - case MEMORY_OBJECT_ATTRIBUTE_INFO: - { - memory_object_attr_info_t attr; + case MEMORY_OBJECT_ATTRIBUTE_INFO: + { + memory_object_attr_info_t attr; - if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { - ret = KERN_INVALID_ARGUMENT; - break; - } + if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { + ret = KERN_INVALID_ARGUMENT; + break; + } - attr = (memory_object_attr_info_t) attributes; - attr->copy_strategy = object->copy_strategy; + attr = (memory_object_attr_info_t) attributes; + attr->copy_strategy = object->copy_strategy; attr->cluster_size = PAGE_SIZE; - attr->may_cache_object = object->can_persist; + attr->may_cache_object = object->can_persist; attr->temporary = FALSE; - *count = MEMORY_OBJECT_ATTR_INFO_COUNT; - break; - } + *count = MEMORY_OBJECT_ATTR_INFO_COUNT; + break; + } - default: + default: ret = KERN_INVALID_ARGUMENT; break; } - vm_object_unlock(object); + vm_object_unlock(object); - return(ret); + return ret; } kern_return_t memory_object_iopl_request( - ipc_port_t port, - memory_object_offset_t offset, - upl_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - upl_control_flags_t *flags, - vm_tag_t tag) + ipc_port_t port, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + upl_control_flags_t *flags, + vm_tag_t tag) { - vm_object_t object; - kern_return_t ret; - upl_control_flags_t caller_flags; + vm_object_t object; + kern_return_t ret; + upl_control_flags_t caller_flags; caller_flags = *flags; @@ -1394,59 +1413,66 @@ memory_object_iopl_request( } if (ip_kotype(port) == IKOT_NAMED_ENTRY) { - vm_named_entry_t named_entry; + vm_named_entry_t named_entry; named_entry = (vm_named_entry_t)port->ip_kobject; /* a few checks to make sure user is obeying rules */ - if(*upl_size == 0) { - if(offset >= named_entry->size) - return(KERN_INVALID_RIGHT); + if (*upl_size == 0) { + if (offset >= named_entry->size) { + return KERN_INVALID_RIGHT; + } *upl_size = (upl_size_t)(named_entry->size - offset); - if (*upl_size != named_entry->size - offset) + if (*upl_size != named_entry->size - offset) { return KERN_INVALID_ARGUMENT; + } } - if(caller_flags & UPL_COPYOUT_FROM) { - if((named_entry->protection & VM_PROT_READ) - != VM_PROT_READ) { - return(KERN_INVALID_RIGHT); + if (caller_flags & UPL_COPYOUT_FROM) { + if ((named_entry->protection & VM_PROT_READ) + != VM_PROT_READ) { + return KERN_INVALID_RIGHT; } } else { - if((named_entry->protection & - (VM_PROT_READ | VM_PROT_WRITE)) - != (VM_PROT_READ | VM_PROT_WRITE)) { - return(KERN_INVALID_RIGHT); + if ((named_entry->protection & + (VM_PROT_READ | VM_PROT_WRITE)) + != (VM_PROT_READ | VM_PROT_WRITE)) { + return KERN_INVALID_RIGHT; } } - if(named_entry->size < (offset + *upl_size)) - return(KERN_INVALID_ARGUMENT); + if (named_entry->size < (offset + *upl_size)) { + return KERN_INVALID_ARGUMENT; + } /* the callers parameter offset is defined to be the */ /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; if (named_entry->is_sub_map || - named_entry->is_copy) + named_entry->is_copy) { return KERN_INVALID_ARGUMENT; - + } + named_entry_lock(named_entry); object = named_entry->backing.object; vm_object_reference(object); named_entry_unlock(named_entry); } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) { - memory_object_control_t control; + memory_object_control_t control; control = (memory_object_control_t) port; - if (control == NULL) - return (KERN_INVALID_ARGUMENT); + if (control == NULL) { + return KERN_INVALID_ARGUMENT; + } object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_object_reference(object); } else { return KERN_INVALID_ARGUMENT; } - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } if (!object->private) { if (object->phys_contiguous) { @@ -1459,18 +1485,18 @@ memory_object_iopl_request( } ret = vm_object_iopl_request(object, - offset, - *upl_size, - upl_ptr, - user_page_list, - page_list_count, - caller_flags, - tag); + offset, + *upl_size, + upl_ptr, + user_page_list, + page_list_count, + caller_flags, + tag); vm_object_deallocate(object); return ret; } -/* +/* * Routine: memory_object_upl_request [interface] * Purpose: * Cause the population of a portion of a vm_object. @@ -1481,32 +1507,33 @@ memory_object_iopl_request( kern_return_t memory_object_upl_request( - memory_object_control_t control, - memory_object_offset_t offset, - upl_size_t size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int cntrl_flags, - int tag) + memory_object_control_t control, + memory_object_offset_t offset, + upl_size_t size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int cntrl_flags, + int tag) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_TERMINATED); + if (object == VM_OBJECT_NULL) { + return KERN_TERMINATED; + } return vm_object_upl_request(object, - offset, - size, - upl_ptr, - user_page_list, - page_list_count, - (upl_control_flags_t)(unsigned int) cntrl_flags, - tag); + offset, + size, + upl_ptr, + user_page_list, + page_list_count, + (upl_control_flags_t)(unsigned int) cntrl_flags, + tag); } -/* +/* * Routine: memory_object_super_upl_request [interface] * Purpose: * Cause the population of a portion of a vm_object @@ -1520,56 +1547,58 @@ memory_object_upl_request( kern_return_t memory_object_super_upl_request( memory_object_control_t control, - memory_object_offset_t offset, - upl_size_t size, - upl_size_t super_cluster, - upl_t *upl, - upl_page_info_t *user_page_list, - unsigned int *page_list_count, - int cntrl_flags, - int tag) + memory_object_offset_t offset, + upl_size_t size, + upl_size_t super_cluster, + upl_t *upl, + upl_page_info_t *user_page_list, + unsigned int *page_list_count, + int cntrl_flags, + int tag) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } return vm_object_super_upl_request(object, - offset, - size, - super_cluster, - upl, - user_page_list, - page_list_count, - (upl_control_flags_t)(unsigned int) cntrl_flags, - tag); + offset, + size, + super_cluster, + upl, + user_page_list, + page_list_count, + (upl_control_flags_t)(unsigned int) cntrl_flags, + tag); } kern_return_t memory_object_cluster_size( - memory_object_control_t control, - memory_object_offset_t *start, - vm_size_t *length, - uint32_t *io_streaming, + memory_object_control_t control, + memory_object_offset_t *start, + vm_size_t *length, + uint32_t *io_streaming, memory_object_fault_info_t mo_fault_info) { - vm_object_t object; - vm_object_fault_info_t fault_info; + vm_object_t object; + vm_object_fault_info_t fault_info; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL || object->paging_offset > *start) + if (object == VM_OBJECT_NULL || object->paging_offset > *start) { return KERN_INVALID_ARGUMENT; + } *start -= object->paging_offset; fault_info = (vm_object_fault_info_t)(uintptr_t) mo_fault_info; vm_object_cluster_size(object, - (vm_object_offset_t *)start, - length, - fault_info, - io_streaming); + (vm_object_offset_t *)start, + length, + fault_info, + io_streaming); *start += object->paging_offset; @@ -1587,8 +1616,8 @@ memory_object_cluster_size( */ kern_return_t host_default_memory_manager( - host_priv_t host_priv, - memory_object_default_t *default_manager, + host_priv_t host_priv, + memory_object_default_t *default_manager, __unused memory_object_cluster_size_t cluster_size) { memory_object_default_t current_manager; @@ -1596,8 +1625,9 @@ host_default_memory_manager( memory_object_default_t returned_manager; kern_return_t result = KERN_SUCCESS; - if (host_priv == HOST_PRIV_NULL) - return(KERN_INVALID_HOST); + if (host_priv == HOST_PRIV_NULL) { + return KERN_INVALID_HOST; + } assert(host_priv == &realhost); @@ -1628,8 +1658,9 @@ host_default_memory_manager( */ if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { result = vm_pageout_internal_start(); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { goto out; + } } /* @@ -1655,16 +1686,15 @@ host_default_memory_manager( * reactivate all the throttled pages (i.e. dirty pages with * no pager). */ - if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) - { + if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { vm_page_reactivate_all_throttled(); } } - out: +out: lck_mtx_unlock(&memory_manager_default_lock); *default_manager = returned_manager; - return(result); + return result; } /* @@ -1686,9 +1716,9 @@ memory_manager_default_reference(void) wait_result_t res; res = lck_mtx_sleep(&memory_manager_default_lock, - LCK_SLEEP_DEFAULT, - (event_t) &memory_manager_default, - THREAD_UNINT); + LCK_SLEEP_DEFAULT, + (event_t) &memory_manager_default, + THREAD_UNINT); assert(res == THREAD_AWAKENED); current_manager = memory_manager_default; } @@ -1718,16 +1748,17 @@ memory_manager_default_check(void) lck_mtx_lock(&memory_manager_default_lock); current = memory_manager_default; if (current == MEMORY_OBJECT_DEFAULT_NULL) { - static boolean_t logged; /* initialized to 0 */ - boolean_t complain = !logged; + static boolean_t logged; /* initialized to 0 */ + boolean_t complain = !logged; logged = TRUE; lck_mtx_unlock(&memory_manager_default_lock); - if (complain) + if (complain) { printf("Warning: No default memory manager\n"); - return(KERN_FAILURE); + } + return KERN_FAILURE; } else { lck_mtx_unlock(&memory_manager_default_lock); - return(KERN_SUCCESS); + return KERN_SUCCESS; } } @@ -1745,83 +1776,89 @@ memory_manager_default_init(void) kern_return_t memory_object_page_op( - memory_object_control_t control, - memory_object_offset_t offset, - int ops, - ppnum_t *phys_entry, - int *flags) + memory_object_control_t control, + memory_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } return vm_object_page_op(object, offset, ops, phys_entry, flags); } /* - * memory_object_range_op offers performance enhancement over - * memory_object_page_op for page_op functions which do not require page - * level state to be returned from the call. Page_op was created to provide - * a low-cost alternative to page manipulation via UPLs when only a single - * page was involved. The range_op call establishes the ability in the _op + * memory_object_range_op offers performance enhancement over + * memory_object_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op * family of functions to work on multiple pages where the lack of page level * state handling allows the caller to avoid the overhead of the upl structures. */ kern_return_t memory_object_range_op( - memory_object_control_t control, - memory_object_offset_t offset_beg, - memory_object_offset_t offset_end, + memory_object_control_t control, + memory_object_offset_t offset_beg, + memory_object_offset_t offset_end, int ops, int *range) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } return vm_object_range_op(object, - offset_beg, - offset_end, - ops, - (uint32_t *) range); + offset_beg, + offset_end, + ops, + (uint32_t *) range); } void memory_object_mark_used( - memory_object_control_t control) + memory_object_control_t control) { - vm_object_t object; + vm_object_t object; - if (control == NULL) + if (control == NULL) { return; + } object = memory_object_control_to_vm_object(control); - if (object != VM_OBJECT_NULL) + if (object != VM_OBJECT_NULL) { vm_object_cache_remove(object); + } } void memory_object_mark_unused( - memory_object_control_t control, - __unused boolean_t rage) + memory_object_control_t control, + __unused boolean_t rage) { - vm_object_t object; + vm_object_t object; - if (control == NULL) + if (control == NULL) { return; + } object = memory_object_control_to_vm_object(control); - if (object != VM_OBJECT_NULL) + if (object != VM_OBJECT_NULL) { vm_object_cache_add(object); + } } void @@ -1830,8 +1867,9 @@ memory_object_mark_io_tracking( { vm_object_t object; - if (control == NULL) + if (control == NULL) { return; + } object = memory_object_control_to_vm_object(control); if (object != VM_OBJECT_NULL) { @@ -1845,12 +1883,13 @@ memory_object_mark_io_tracking( void memory_object_mark_eligible_for_secluded( memory_object_control_t control, - boolean_t eligible_for_secluded) + boolean_t eligible_for_secluded) { vm_object_t object; - if (control == NULL) + if (control == NULL) { return; + } object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { @@ -1864,7 +1903,7 @@ memory_object_mark_eligible_for_secluded( object->eligible_for_secluded = TRUE; vm_page_secluded.eligible_for_secluded += object->resident_page_count; } else if (!eligible_for_secluded && - object->eligible_for_secluded) { + object->eligible_for_secluded) { object->eligible_for_secluded = FALSE; vm_page_secluded.eligible_for_secluded -= object->resident_page_count; if (object->resident_page_count) { @@ -1878,33 +1917,36 @@ memory_object_mark_eligible_for_secluded( kern_return_t memory_object_pages_resident( - memory_object_control_t control, - boolean_t * has_pages_resident) + memory_object_control_t control, + boolean_t * has_pages_resident) { - vm_object_t object; + vm_object_t object; *has_pages_resident = FALSE; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (object->resident_page_count) + if (object->resident_page_count) { *has_pages_resident = TRUE; - - return (KERN_SUCCESS); + } + + return KERN_SUCCESS; } kern_return_t memory_object_signed( - memory_object_control_t control, - boolean_t is_signed) + memory_object_control_t control, + boolean_t is_signed) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return KERN_INVALID_ARGUMENT; + } vm_object_lock(object); object->code_signed = is_signed; @@ -1915,14 +1957,15 @@ memory_object_signed( boolean_t memory_object_is_signed( - memory_object_control_t control) + memory_object_control_t control) { - boolean_t is_signed; - vm_object_t object; + boolean_t is_signed; + vm_object_t object; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return FALSE; + } vm_object_lock_shared(object); is_signed = object->code_signed; @@ -1933,13 +1976,14 @@ memory_object_is_signed( boolean_t memory_object_is_shared_cache( - memory_object_control_t control) + memory_object_control_t control) { - vm_object_t object = VM_OBJECT_NULL; + vm_object_t object = VM_OBJECT_NULL; object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return FALSE; + } return object->object_is_shared_cache; } @@ -1949,10 +1993,10 @@ static zone_t mem_obj_control_zone; __private_extern__ void memory_object_control_bootstrap(void) { - int i; + int i; - i = (vm_size_t) sizeof (struct memory_object_control); - mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control"); + i = (vm_size_t) sizeof(struct memory_object_control); + mem_obj_control_zone = zinit(i, 8192 * i, 4096, "mem_obj_control"); zone_change(mem_obj_control_zone, Z_CALLERACCT, FALSE); zone_change(mem_obj_control_zone, Z_NOENCRYPT, TRUE); return; @@ -1960,8 +2004,8 @@ memory_object_control_bootstrap(void) __private_extern__ memory_object_control_t memory_object_control_allocate( - vm_object_t object) -{ + vm_object_t object) +{ memory_object_control_t control; control = (memory_object_control_t)zalloc(mem_obj_control_zone); @@ -1969,28 +2013,29 @@ memory_object_control_allocate( control->moc_object = object; control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */ } - return (control); + return control; } __private_extern__ void memory_object_control_collapse( - memory_object_control_t control, - vm_object_t object) -{ + memory_object_control_t control, + vm_object_t object) +{ assert((control->moc_object != VM_OBJECT_NULL) && - (control->moc_object != object)); + (control->moc_object != object)); control->moc_object = object; } __private_extern__ vm_object_t memory_object_control_to_vm_object( - memory_object_control_t control) + memory_object_control_t control) { if (control == MEMORY_OBJECT_CONTROL_NULL || - control->moc_ikot != IKOT_MEM_OBJ_CONTROL) + control->moc_ikot != IKOT_MEM_OBJ_CONTROL) { return VM_OBJECT_NULL; + } - return (control->moc_object); + return control->moc_object; } __private_extern__ vm_object_t @@ -2011,7 +2056,7 @@ memory_object_to_vm_object( memory_object_control_t convert_port_to_mo_control( - __unused mach_port_t port) + __unused mach_port_t port) { return MEMORY_OBJECT_CONTROL_NULL; } @@ -2019,14 +2064,14 @@ convert_port_to_mo_control( mach_port_t convert_mo_control_to_port( - __unused memory_object_control_t control) + __unused memory_object_control_t control) { return MACH_PORT_NULL; } void memory_object_control_reference( - __unused memory_object_control_t control) + __unused memory_object_control_t control) { return; } @@ -2038,14 +2083,14 @@ memory_object_control_reference( */ void memory_object_control_deallocate( - memory_object_control_t control) + memory_object_control_t control) { zfree(mem_obj_control_zone, control); } void memory_object_control_disable( - memory_object_control_t control) + memory_object_control_t control) { assert(control->moc_object != VM_OBJECT_NULL); control->moc_object = VM_OBJECT_NULL; @@ -2067,22 +2112,23 @@ memory_object_default_deallocate( memory_object_t convert_port_to_memory_object( - __unused mach_port_t port) + __unused mach_port_t port) { - return (MEMORY_OBJECT_NULL); + return MEMORY_OBJECT_NULL; } mach_port_t convert_memory_object_to_port( - __unused memory_object_t object) + __unused memory_object_t object) { - return (MACH_PORT_NULL); + return MACH_PORT_NULL; } /* Routine memory_object_reference */ -void memory_object_reference( +void +memory_object_reference( memory_object_t memory_object) { (memory_object->mo_pager_ops->memory_object_reference)( @@ -2090,16 +2136,18 @@ void memory_object_reference( } /* Routine memory_object_deallocate */ -void memory_object_deallocate( +void +memory_object_deallocate( memory_object_t memory_object) { (memory_object->mo_pager_ops->memory_object_deallocate)( - memory_object); + memory_object); } /* Routine memory_object_init */ -kern_return_t memory_object_init +kern_return_t +memory_object_init ( memory_object_t memory_object, memory_object_control_t memory_control, @@ -2113,7 +2161,8 @@ kern_return_t memory_object_init } /* Routine memory_object_terminate */ -kern_return_t memory_object_terminate +kern_return_t +memory_object_terminate ( memory_object_t memory_object ) @@ -2123,7 +2172,8 @@ kern_return_t memory_object_terminate } /* Routine memory_object_data_request */ -kern_return_t memory_object_data_request +kern_return_t +memory_object_data_request ( memory_object_t memory_object, memory_object_offset_t offset, @@ -2134,23 +2184,24 @@ kern_return_t memory_object_data_request { return (memory_object->mo_pager_ops->memory_object_data_request)( memory_object, - offset, + offset, length, desired_access, fault_info); } /* Routine memory_object_data_return */ -kern_return_t memory_object_data_return +kern_return_t +memory_object_data_return ( memory_object_t memory_object, memory_object_offset_t offset, memory_object_cluster_size_t size, memory_object_offset_t *resid_offset, - int *io_error, + int *io_error, boolean_t dirty, boolean_t kernel_copy, - int upl_flags + int upl_flags ) { return (memory_object->mo_pager_ops->memory_object_data_return)( @@ -2165,7 +2216,8 @@ kern_return_t memory_object_data_return } /* Routine memory_object_data_initialize */ -kern_return_t memory_object_data_initialize +kern_return_t +memory_object_data_initialize ( memory_object_t memory_object, memory_object_offset_t offset, @@ -2179,7 +2231,8 @@ kern_return_t memory_object_data_initialize } /* Routine memory_object_data_unlock */ -kern_return_t memory_object_data_unlock +kern_return_t +memory_object_data_unlock ( memory_object_t memory_object, memory_object_offset_t offset, @@ -2195,7 +2248,8 @@ kern_return_t memory_object_data_unlock } /* Routine memory_object_synchronize */ -kern_return_t memory_object_synchronize +kern_return_t +memory_object_synchronize ( memory_object_t memory_object, memory_object_offset_t offset, @@ -2203,7 +2257,7 @@ kern_return_t memory_object_synchronize vm_sync_t sync_flags ) { - panic("memory_object_syncrhonize no longer supported\n"); + panic("memory_object_syncrhonize no longer supported\n"); return (memory_object->mo_pager_ops->memory_object_synchronize)( memory_object, @@ -2217,7 +2271,7 @@ kern_return_t memory_object_synchronize * memory_object_map() is called by VM (in vm_map_enter() and its variants) * each time a "named" VM object gets mapped directly or indirectly * (copy-on-write mapping). A "named" VM object has an extra reference held - * by the pager to keep it alive until the pager decides that the + * by the pager to keep it alive until the pager decides that the * memory object (and its VM object) can be reclaimed. * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all * the mappings of that memory object have been removed. @@ -2232,7 +2286,8 @@ kern_return_t memory_object_synchronize */ /* Routine memory_object_map */ -kern_return_t memory_object_map +kern_return_t +memory_object_map ( memory_object_t memory_object, vm_prot_t prot @@ -2244,7 +2299,8 @@ kern_return_t memory_object_map } /* Routine memory_object_last_unmap */ -kern_return_t memory_object_last_unmap +kern_return_t +memory_object_last_unmap ( memory_object_t memory_object ) @@ -2254,14 +2310,16 @@ kern_return_t memory_object_last_unmap } /* Routine memory_object_data_reclaim */ -kern_return_t memory_object_data_reclaim +kern_return_t +memory_object_data_reclaim ( memory_object_t memory_object, - boolean_t reclaim_backing_store + boolean_t reclaim_backing_store ) { - if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL) + if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL) { return KERN_NOT_SUPPORTED; + } return (memory_object->mo_pager_ops->memory_object_data_reclaim)( memory_object, reclaim_backing_store); @@ -2269,34 +2327,34 @@ kern_return_t memory_object_data_reclaim upl_t convert_port_to_upl( - ipc_port_t port) + ipc_port_t port) { upl_t upl; ip_lock(port); if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { - ip_unlock(port); - return (upl_t)NULL; + ip_unlock(port); + return (upl_t)NULL; } upl = (upl_t) port->ip_kobject; ip_unlock(port); upl_lock(upl); - upl->ref_count+=1; + upl->ref_count += 1; upl_unlock(upl); return upl; } mach_port_t convert_upl_to_port( - __unused upl_t upl) + __unused upl_t upl) { return MACH_PORT_NULL; } __private_extern__ void upl_no_senders( - __unused ipc_port_t port, - __unused mach_port_mscount_t mscount) + __unused ipc_port_t port, + __unused mach_port_mscount_t mscount) { return; } diff --git a/osfmk/vm/memory_object.h b/osfmk/vm/memory_object.h index 6023627ae..e70c96b02 100644 --- a/osfmk/vm/memory_object.h +++ b/osfmk/vm/memory_object.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * - * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. */ /* */ -#ifndef _VM_MEMORY_OBJECT_H_ -#define _VM_MEMORY_OBJECT_H_ +#ifndef _VM_MEMORY_OBJECT_H_ +#define _VM_MEMORY_OBJECT_H_ #include @@ -69,86 +69,86 @@ extern memory_object_default_t memory_manager_default; __private_extern__ -memory_object_default_t memory_manager_default_reference(void); +memory_object_default_t memory_manager_default_reference(void); __private_extern__ -kern_return_t memory_manager_default_check(void); +kern_return_t memory_manager_default_check(void); __private_extern__ -void memory_manager_default_init(void); +void memory_manager_default_init(void); __private_extern__ -void memory_object_control_bootstrap(void); +void memory_object_control_bootstrap(void); __private_extern__ memory_object_control_t memory_object_control_allocate( - vm_object_t object); + vm_object_t object); __private_extern__ -void memory_object_control_collapse( - memory_object_control_t control, - vm_object_t object); +void memory_object_control_collapse( + memory_object_control_t control, + vm_object_t object); __private_extern__ -vm_object_t memory_object_control_to_vm_object( - memory_object_control_t control); +vm_object_t memory_object_control_to_vm_object( + memory_object_control_t control); __private_extern__ -vm_object_t memory_object_to_vm_object( - memory_object_t mem_obj); +vm_object_t memory_object_to_vm_object( + memory_object_t mem_obj); extern -mach_port_t convert_mo_control_to_port( - memory_object_control_t control); +mach_port_t convert_mo_control_to_port( + memory_object_control_t control); extern void memory_object_control_disable( - memory_object_control_t control); + memory_object_control_t control); extern memory_object_control_t convert_port_to_mo_control( - mach_port_t port); + mach_port_t port); extern -mach_port_t convert_memory_object_to_port( - memory_object_t object); +mach_port_t convert_memory_object_to_port( + memory_object_t object); extern -memory_object_t convert_port_to_memory_object( - mach_port_t port); +memory_object_t convert_port_to_memory_object( + mach_port_t port); extern upl_t convert_port_to_upl( - ipc_port_t port); + ipc_port_t port); extern ipc_port_t convert_upl_to_port( upl_t ); __private_extern__ void upl_no_senders(ipc_port_t, mach_port_mscount_t); -extern kern_return_t memory_object_pages_resident( - memory_object_control_t control, - boolean_t * has_pages_resident); +extern kern_return_t memory_object_pages_resident( + memory_object_control_t control, + boolean_t * has_pages_resident); -extern kern_return_t memory_object_signed( - memory_object_control_t control, - boolean_t is_signed); +extern kern_return_t memory_object_signed( + memory_object_control_t control, + boolean_t is_signed); -extern boolean_t memory_object_is_signed( - memory_object_control_t control); +extern boolean_t memory_object_is_signed( + memory_object_control_t control); -extern boolean_t memory_object_is_shared_cache( - memory_object_control_t control); +extern boolean_t memory_object_is_shared_cache( + memory_object_control_t control); -extern void memory_object_mark_used( - memory_object_control_t control); +extern void memory_object_mark_used( + memory_object_control_t control); -extern void memory_object_mark_unused( - memory_object_control_t control, - boolean_t rage); +extern void memory_object_mark_unused( + memory_object_control_t control, + boolean_t rage); -extern void memory_object_mark_io_tracking( +extern void memory_object_mark_io_tracking( memory_object_control_t control); #if CONFIG_SECLUDED_MEMORY -extern void memory_object_mark_eligible_for_secluded( +extern void memory_object_mark_eligible_for_secluded( memory_object_control_t control, - boolean_t eligible_for_secluded); + boolean_t eligible_for_secluded); #endif /* CONFIG_SECLUDED_MEMORY */ -#endif /* _VM_MEMORY_OBJECT_H_ */ +#endif /* _VM_MEMORY_OBJECT_H_ */ diff --git a/osfmk/vm/pmap.h b/osfmk/vm/pmap.h index e70231c43..af1838296 100644 --- a/osfmk/vm/pmap.h +++ b/osfmk/vm/pmap.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -64,7 +64,7 @@ * section. [For machine-dependent section, see "machine/pmap.h".] */ -#ifndef _VM_PMAP_H_ +#ifndef _VM_PMAP_H_ #define _VM_PMAP_H_ #include @@ -76,7 +76,7 @@ #include -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE /* * The following is a description of the interface to the @@ -92,11 +92,11 @@ /* Copy between a physical page and a virtual address */ /* LP64todo - switch to vm_map_offset_t when it grows */ -extern kern_return_t copypv( - addr64_t source, - addr64_t sink, - unsigned int size, - int which); +extern kern_return_t copypv( + addr64_t source, + addr64_t sink, + unsigned int size, + int which); #define cppvPsnk 1 #define cppvPsnkb 31 #define cppvPsrc 2 @@ -109,12 +109,12 @@ extern kern_return_t copypv( #define cppvNoModSnkb 27 #define cppvNoRefSrc 32 #define cppvNoRefSrcb 26 -#define cppvKmap 64 /* Use the kernel's vm_map */ +#define cppvKmap 64 /* Use the kernel's vm_map */ #define cppvKmapb 25 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include @@ -132,32 +132,21 @@ extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); * vm_offset_t, etc... types. */ -extern void *pmap_steal_memory(vm_size_t size); - /* During VM initialization, - * steal a chunk of memory. - */ -extern unsigned int pmap_free_pages(void); /* During VM initialization, - * report remaining unused - * physical pages. - */ -extern void pmap_startup( - vm_offset_t *startp, - vm_offset_t *endp); - /* During VM initialization, - * use remaining physical pages - * to allocate page frames. - */ -extern void pmap_init(void); - /* Initialization, - * after kernel runs - * in virtual memory. - */ - -extern void mapping_adjust(void); /* Adjust free mapping count */ - -extern void mapping_free_prime(void); /* Primes the mapping block release list */ - -#ifndef MACHINE_PAGES +extern void *pmap_steal_memory(vm_size_t size); /* Early memory allocation */ + +extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */ + +extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */ + +extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */ + +extern void pmap_pv_fixup(vm_offset_t start, vm_size_t size); + +extern void mapping_adjust(void); /* Adjust free mapping count */ + +extern void mapping_free_prime(void); /* Primes the mapping block release list */ + +#ifndef MACHINE_PAGES /* * If machine/pmap.h defines MACHINE_PAGES, it must implement * the above functions. The pmap module has complete control. @@ -174,156 +163,164 @@ extern void mapping_free_prime(void); /* Primes the mapping block release list * However, for best performance pmap_free_pages should be accurate. */ -extern boolean_t pmap_next_page(ppnum_t *pnum); -extern boolean_t pmap_next_page_hi(ppnum_t *pnum); - /* During VM initialization, - * return the next unused - * physical page. - */ -extern void pmap_virtual_space( - vm_offset_t *virtual_start, - vm_offset_t *virtual_end); - /* During VM initialization, - * report virtual space - * available for the kernel. - */ -#endif /* MACHINE_PAGES */ +extern boolean_t pmap_next_page(ppnum_t *pnum); +extern boolean_t pmap_next_page_hi(ppnum_t *pnum); +/* During VM initialization, + * return the next unused + * physical page. + */ +extern void pmap_virtual_space( + vm_offset_t *virtual_start, + vm_offset_t *virtual_end); +/* During VM initialization, + * report virtual space + * available for the kernel. + */ +#endif /* MACHINE_PAGES */ /* * Routines to manage the physical map data structure. */ -extern pmap_t pmap_create( /* Create a pmap_t. */ - ledger_t ledger, - vm_map_size_t size, - boolean_t is_64bit); +extern pmap_t pmap_create( /* Create a pmap_t. */ + ledger_t ledger, + vm_map_size_t size, + boolean_t is_64bit); #if __x86_64__ -extern pmap_t pmap_create_options( - ledger_t ledger, - vm_map_size_t size, - int flags); +extern pmap_t pmap_create_options( + ledger_t ledger, + vm_map_size_t size, + int flags); #endif -extern pmap_t (pmap_kernel)(void); /* Return the kernel's pmap */ -extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ -extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ -extern void pmap_switch(pmap_t); +extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */ +extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ +extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ +extern void pmap_switch(pmap_t); #if MACH_ASSERT -extern void pmap_set_process(pmap_t pmap, - int pid, - char *procname); +extern void pmap_set_process(pmap_t pmap, + int pid, + char *procname); #endif /* MACH_ASSERT */ -extern kern_return_t pmap_enter( /* Enter a mapping */ - pmap_t pmap, - vm_map_offset_t v, - ppnum_t pn, - vm_prot_t prot, - vm_prot_t fault_type, - unsigned int flags, - boolean_t wired); - -extern kern_return_t pmap_enter_options( - pmap_t pmap, - vm_map_offset_t v, - ppnum_t pn, - vm_prot_t prot, - vm_prot_t fault_type, - unsigned int flags, - boolean_t wired, - unsigned int options, - void *arg); - -extern void pmap_remove_some_phys( - pmap_t pmap, - ppnum_t pn); - -extern void pmap_lock_phys_page( - ppnum_t pn); - -extern void pmap_unlock_phys_page( - ppnum_t pn); +extern kern_return_t pmap_enter( /* Enter a mapping */ + pmap_t pmap, + vm_map_offset_t v, + ppnum_t pn, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired); + +extern kern_return_t pmap_enter_options( + pmap_t pmap, + vm_map_offset_t v, + ppnum_t pn, + vm_prot_t prot, + vm_prot_t fault_type, + unsigned int flags, + boolean_t wired, + unsigned int options, + void *arg); + +extern void pmap_remove_some_phys( + pmap_t pmap, + ppnum_t pn); + +extern void pmap_lock_phys_page( + ppnum_t pn); + +extern void pmap_unlock_phys_page( + ppnum_t pn); /* * Routines that operate on physical addresses. */ -extern void pmap_page_protect( /* Restrict access to page. */ - ppnum_t phys, - vm_prot_t prot); - -extern void pmap_page_protect_options( /* Restrict access to page. */ - ppnum_t phys, - vm_prot_t prot, - unsigned int options, - void *arg); - -extern void (pmap_zero_page)( - ppnum_t pn); - -extern void (pmap_zero_part_page)( - ppnum_t pn, - vm_offset_t offset, - vm_size_t len); - -extern void (pmap_copy_page)( - ppnum_t src, - ppnum_t dest); - -extern void (pmap_copy_part_page)( - ppnum_t src, - vm_offset_t src_offset, - ppnum_t dst, - vm_offset_t dst_offset, - vm_size_t len); - -extern void (pmap_copy_part_lpage)( - vm_offset_t src, - ppnum_t dst, - vm_offset_t dst_offset, - vm_size_t len); - -extern void (pmap_copy_part_rpage)( - ppnum_t src, - vm_offset_t src_offset, - vm_offset_t dst, - vm_size_t len); - -extern unsigned int (pmap_disconnect)( /* disconnect mappings and return reference and change */ - ppnum_t phys); - -extern unsigned int (pmap_disconnect_options)( /* disconnect mappings and return reference and change */ - ppnum_t phys, - unsigned int options, - void *arg); - -extern kern_return_t (pmap_attribute_cache_sync)( /* Flush appropriate - * cache based on - * page number sent */ - ppnum_t pn, - vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value); - -extern unsigned int (pmap_cache_attributes)( - ppnum_t pn); +extern void pmap_page_protect( /* Restrict access to page. */ + ppnum_t phys, + vm_prot_t prot); + +extern void pmap_page_protect_options( /* Restrict access to page. */ + ppnum_t phys, + vm_prot_t prot, + unsigned int options, + void *arg); + +extern void(pmap_zero_page)( + ppnum_t pn); + +extern void(pmap_zero_part_page)( + ppnum_t pn, + vm_offset_t offset, + vm_size_t len); + +extern void(pmap_copy_page)( + ppnum_t src, + ppnum_t dest); + +extern void(pmap_copy_part_page)( + ppnum_t src, + vm_offset_t src_offset, + ppnum_t dst, + vm_offset_t dst_offset, + vm_size_t len); + +extern void(pmap_copy_part_lpage)( + vm_offset_t src, + ppnum_t dst, + vm_offset_t dst_offset, + vm_size_t len); + +extern void(pmap_copy_part_rpage)( + ppnum_t src, + vm_offset_t src_offset, + vm_offset_t dst, + vm_size_t len); + +extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */ + ppnum_t phys); + +extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */ + ppnum_t phys, + unsigned int options, + void *arg); + +extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate + * cache based on + * page number sent */ + ppnum_t pn, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); + +extern unsigned int(pmap_cache_attributes)( + ppnum_t pn); /* * Set (override) cache attributes for the specified physical page */ -extern void pmap_set_cache_attributes( - ppnum_t, - unsigned int); +extern void pmap_set_cache_attributes( + ppnum_t, + unsigned int); + +extern void *pmap_map_compressor_page( + ppnum_t); + +extern void pmap_unmap_compressor_page( + ppnum_t, + void*); + #if defined(__arm__) || defined(__arm64__) /* ARM64_TODO */ -extern boolean_t pmap_batch_set_cache_attributes( - ppnum_t, - unsigned int, - unsigned int, - unsigned int, - boolean_t, - unsigned int*); +extern boolean_t pmap_batch_set_cache_attributes( + ppnum_t, + unsigned int, + unsigned int, + unsigned int, + boolean_t, + unsigned int*); #endif extern void pmap_sync_page_data_phys(ppnum_t pa); extern void pmap_sync_page_attributes_phys(ppnum_t pa); @@ -331,55 +328,59 @@ extern void pmap_sync_page_attributes_phys(ppnum_t pa); /* * debug/assertions. pmap_verify_free returns true iff * the given physical page is mapped into no pmap. + * pmap_assert_free() will panic() if pn is not free. */ -extern boolean_t pmap_verify_free(ppnum_t pn); +extern boolean_t pmap_verify_free(ppnum_t pn); +#if MACH_ASSERT +extern void pmap_assert_free(ppnum_t pn); +#endif /* * Statistics routines */ -extern int (pmap_compressed)(pmap_t pmap); -extern int (pmap_resident_count)(pmap_t pmap); -extern int (pmap_resident_max)(pmap_t pmap); +extern int(pmap_compressed)(pmap_t pmap); +extern int(pmap_resident_count)(pmap_t pmap); +extern int(pmap_resident_max)(pmap_t pmap); /* * Sundry required (internal) routines */ #ifdef CURRENTLY_UNUSED_AND_UNTESTED -extern void pmap_collect(pmap_t pmap);/* Perform garbage - * collection, if any */ +extern void pmap_collect(pmap_t pmap);/* Perform garbage + * collection, if any */ #endif /* * Optional routines */ -extern void (pmap_copy)( /* Copy range of mappings, - * if desired. */ - pmap_t dest, - pmap_t source, - vm_map_offset_t dest_va, - vm_map_size_t size, - vm_map_offset_t source_va); - -extern kern_return_t (pmap_attribute)( /* Get/Set special memory - * attributes */ - pmap_t pmap, - vm_map_offset_t va, - vm_map_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value); +extern void(pmap_copy)( /* Copy range of mappings, + * if desired. */ + pmap_t dest, + pmap_t source, + vm_map_offset_t dest_va, + vm_map_size_t size, + vm_map_offset_t source_va); + +extern kern_return_t(pmap_attribute)( /* Get/Set special memory + * attributes */ + pmap_t pmap, + vm_map_offset_t va, + vm_map_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); /* * Routines defined as macros. */ #ifndef PMAP_ACTIVATE_USER -#ifndef PMAP_ACTIVATE +#ifndef PMAP_ACTIVATE #define PMAP_ACTIVATE_USER(thr, cpu) -#else /* PMAP_ACTIVATE */ -#define PMAP_ACTIVATE_USER(thr, cpu) { \ - pmap_t pmap; \ - \ - pmap = (thr)->map->pmap; \ - if (pmap != pmap_kernel()) \ - PMAP_ACTIVATE(pmap, (thr), (cpu)); \ +#else /* PMAP_ACTIVATE */ +#define PMAP_ACTIVATE_USER(thr, cpu) { \ + pmap_t pmap; \ + \ + pmap = (thr)->map->pmap; \ + if (pmap != pmap_kernel()) \ + PMAP_ACTIVATE(pmap, (thr), (cpu)); \ } #endif /* PMAP_ACTIVATE */ #endif /* PMAP_ACTIVATE_USER */ @@ -387,172 +388,172 @@ extern kern_return_t (pmap_attribute)( /* Get/Set special memory #ifndef PMAP_DEACTIVATE_USER #ifndef PMAP_DEACTIVATE #define PMAP_DEACTIVATE_USER(thr, cpu) -#else /* PMAP_DEACTIVATE */ -#define PMAP_DEACTIVATE_USER(thr, cpu) { \ - pmap_t pmap; \ - \ - pmap = (thr)->map->pmap; \ - if ((pmap) != pmap_kernel()) \ - PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ +#else /* PMAP_DEACTIVATE */ +#define PMAP_DEACTIVATE_USER(thr, cpu) { \ + pmap_t pmap; \ + \ + pmap = (thr)->map->pmap; \ + if ((pmap) != pmap_kernel()) \ + PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ } -#endif /* PMAP_DEACTIVATE */ +#endif /* PMAP_DEACTIVATE */ #endif /* PMAP_DEACTIVATE_USER */ -#ifndef PMAP_ACTIVATE_KERNEL +#ifndef PMAP_ACTIVATE_KERNEL #ifndef PMAP_ACTIVATE -#define PMAP_ACTIVATE_KERNEL(cpu) -#else /* PMAP_ACTIVATE */ -#define PMAP_ACTIVATE_KERNEL(cpu) \ - PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) -#endif /* PMAP_ACTIVATE */ -#endif /* PMAP_ACTIVATE_KERNEL */ - -#ifndef PMAP_DEACTIVATE_KERNEL +#define PMAP_ACTIVATE_KERNEL(cpu) +#else /* PMAP_ACTIVATE */ +#define PMAP_ACTIVATE_KERNEL(cpu) \ + PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) +#endif /* PMAP_ACTIVATE */ +#endif /* PMAP_ACTIVATE_KERNEL */ + +#ifndef PMAP_DEACTIVATE_KERNEL #ifndef PMAP_DEACTIVATE -#define PMAP_DEACTIVATE_KERNEL(cpu) -#else /* PMAP_DEACTIVATE */ -#define PMAP_DEACTIVATE_KERNEL(cpu) \ - PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) -#endif /* PMAP_DEACTIVATE */ -#endif /* PMAP_DEACTIVATE_KERNEL */ - -#ifndef PMAP_ENTER +#define PMAP_DEACTIVATE_KERNEL(cpu) +#else /* PMAP_DEACTIVATE */ +#define PMAP_DEACTIVATE_KERNEL(cpu) \ + PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) +#endif /* PMAP_DEACTIVATE */ +#endif /* PMAP_DEACTIVATE_KERNEL */ + +#ifndef PMAP_ENTER /* * Macro to be used in place of pmap_enter() */ -#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \ - flags, wired, result) \ - MACRO_BEGIN \ - pmap_t __pmap = (pmap); \ - vm_page_t __page = (page); \ - int __options = 0; \ - vm_object_t __obj; \ - \ - PMAP_ENTER_CHECK(__pmap, __page) \ - __obj = VM_PAGE_OBJECT(__page); \ - if (__obj->internal) { \ - __options |= PMAP_OPTIONS_INTERNAL; \ - } \ - if (__page->vmp_reusable || __obj->all_reusable) { \ - __options |= PMAP_OPTIONS_REUSABLE; \ - } \ - result = pmap_enter_options(__pmap, \ - (virtual_address), \ - VM_PAGE_GET_PHYS_PAGE(__page), \ - (protection), \ - (fault_type), \ - (flags), \ - (wired), \ - __options, \ - NULL); \ +#define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \ + flags, wired, result) \ + MACRO_BEGIN \ + pmap_t __pmap = (pmap); \ + vm_page_t __page = (page); \ + int __options = 0; \ + vm_object_t __obj; \ + \ + PMAP_ENTER_CHECK(__pmap, __page) \ + __obj = VM_PAGE_OBJECT(__page); \ + if (__obj->internal) { \ + __options |= PMAP_OPTIONS_INTERNAL; \ + } \ + if (__page->vmp_reusable || __obj->all_reusable) { \ + __options |= PMAP_OPTIONS_REUSABLE; \ + } \ + result = pmap_enter_options(__pmap, \ + (virtual_address), \ + VM_PAGE_GET_PHYS_PAGE(__page), \ + (protection), \ + (fault_type), \ + (flags), \ + (wired), \ + __options, \ + NULL); \ MACRO_END -#endif /* !PMAP_ENTER */ - -#ifndef PMAP_ENTER_OPTIONS -#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ - fault_type, flags, wired, options, result) \ - MACRO_BEGIN \ - pmap_t __pmap = (pmap); \ - vm_page_t __page = (page); \ - int __extra_options = 0; \ - vm_object_t __obj; \ - \ - PMAP_ENTER_CHECK(__pmap, __page) \ - __obj = VM_PAGE_OBJECT(__page); \ - if (__obj->internal) { \ - __extra_options |= PMAP_OPTIONS_INTERNAL; \ - } \ - if (__page->vmp_reusable || __obj->all_reusable) { \ - __extra_options |= PMAP_OPTIONS_REUSABLE; \ - } \ - result = pmap_enter_options(__pmap, \ - (virtual_address), \ - VM_PAGE_GET_PHYS_PAGE(__page), \ - (protection), \ - (fault_type), \ - (flags), \ - (wired), \ - (options) | __extra_options, \ - NULL); \ +#endif /* !PMAP_ENTER */ + +#ifndef PMAP_ENTER_OPTIONS +#define PMAP_ENTER_OPTIONS(pmap, virtual_address, page, protection, \ + fault_type, flags, wired, options, result) \ + MACRO_BEGIN \ + pmap_t __pmap = (pmap); \ + vm_page_t __page = (page); \ + int __extra_options = 0; \ + vm_object_t __obj; \ + \ + PMAP_ENTER_CHECK(__pmap, __page) \ + __obj = VM_PAGE_OBJECT(__page); \ + if (__obj->internal) { \ + __extra_options |= PMAP_OPTIONS_INTERNAL; \ + } \ + if (__page->vmp_reusable || __obj->all_reusable) { \ + __extra_options |= PMAP_OPTIONS_REUSABLE; \ + } \ + result = pmap_enter_options(__pmap, \ + (virtual_address), \ + VM_PAGE_GET_PHYS_PAGE(__page), \ + (protection), \ + (fault_type), \ + (flags), \ + (wired), \ + (options) | __extra_options, \ + NULL); \ MACRO_END -#endif /* !PMAP_ENTER_OPTIONS */ +#endif /* !PMAP_ENTER_OPTIONS */ #ifndef PMAP_SET_CACHE_ATTR -#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ - MACRO_BEGIN \ - if (!batch_pmap_op) { \ - pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ - object->set_cache_attr = TRUE; \ - } \ - MACRO_END -#endif /* PMAP_SET_CACHE_ATTR */ +#define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ + MACRO_BEGIN \ + if (!batch_pmap_op) { \ + pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ + object->set_cache_attr = TRUE; \ + } \ + MACRO_END +#endif /* PMAP_SET_CACHE_ATTR */ #ifndef PMAP_BATCH_SET_CACHE_ATTR -#if defined(__arm__) || defined(__arm64__) -#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ - cache_attr, num_pages, batch_pmap_op) \ - MACRO_BEGIN \ - if ((batch_pmap_op)) { \ - unsigned int __page_idx=0; \ - unsigned int res=0; \ - boolean_t batch=TRUE; \ - while (__page_idx < (num_pages)) { \ - if (!pmap_batch_set_cache_attributes( \ - user_page_list[__page_idx].phys_addr, \ - (cache_attr), \ - (num_pages), \ - (__page_idx), \ - FALSE, \ - (&res))) { \ - batch = FALSE; \ - break; \ - } \ - __page_idx++; \ - } \ - __page_idx=0; \ - res=0; \ - while (__page_idx < (num_pages)) { \ - if (batch) \ - (void)pmap_batch_set_cache_attributes( \ - user_page_list[__page_idx].phys_addr, \ - (cache_attr), \ - (num_pages), \ - (__page_idx), \ - TRUE, \ - (&res)); \ - else \ - pmap_set_cache_attributes( \ - user_page_list[__page_idx].phys_addr, \ - (cache_attr)); \ - __page_idx++; \ - } \ - (object)->set_cache_attr = TRUE; \ - } \ +#if defined(__arm__) || defined(__arm64__) +#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ + cache_attr, num_pages, batch_pmap_op) \ + MACRO_BEGIN \ + if ((batch_pmap_op)) { \ + unsigned int __page_idx=0; \ + unsigned int res=0; \ + boolean_t batch=TRUE; \ + while (__page_idx < (num_pages)) { \ + if (!pmap_batch_set_cache_attributes( \ + user_page_list[__page_idx].phys_addr, \ + (cache_attr), \ + (num_pages), \ + (__page_idx), \ + FALSE, \ + (&res))) { \ + batch = FALSE; \ + break; \ + } \ + __page_idx++; \ + } \ + __page_idx=0; \ + res=0; \ + while (__page_idx < (num_pages)) { \ + if (batch) \ + (void)pmap_batch_set_cache_attributes( \ + user_page_list[__page_idx].phys_addr, \ + (cache_attr), \ + (num_pages), \ + (__page_idx), \ + TRUE, \ + (&res)); \ + else \ + pmap_set_cache_attributes( \ + user_page_list[__page_idx].phys_addr, \ + (cache_attr)); \ + __page_idx++; \ + } \ + (object)->set_cache_attr = TRUE; \ + } \ MACRO_END #else -#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ - cache_attr, num_pages, batch_pmap_op) \ - MACRO_BEGIN \ - if ((batch_pmap_op)) { \ - unsigned int __page_idx=0; \ - while (__page_idx < (num_pages)) { \ - pmap_set_cache_attributes( \ - user_page_list[__page_idx].phys_addr, \ - (cache_attr)); \ - __page_idx++; \ - } \ - (object)->set_cache_attr = TRUE; \ - } \ +#define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ + cache_attr, num_pages, batch_pmap_op) \ + MACRO_BEGIN \ + if ((batch_pmap_op)) { \ + unsigned int __page_idx=0; \ + while (__page_idx < (num_pages)) { \ + pmap_set_cache_attributes( \ + user_page_list[__page_idx].phys_addr, \ + (cache_attr)); \ + __page_idx++; \ + } \ + (object)->set_cache_attr = TRUE; \ + } \ MACRO_END #endif -#endif /* PMAP_BATCH_SET_CACHE_ATTR */ - -#define PMAP_ENTER_CHECK(pmap, page) \ -{ \ - if ((page)->vmp_error) { \ - panic("VM page %p should not have an error\n", \ - (page)); \ - } \ +#endif /* PMAP_BATCH_SET_CACHE_ATTR */ + +#define PMAP_ENTER_CHECK(pmap, page) \ +{ \ + if ((page)->vmp_error) { \ + panic("VM page %p should not have an error\n", \ + (page)); \ + } \ } /* @@ -561,29 +562,29 @@ extern kern_return_t (pmap_attribute)( /* Get/Set special memory * by the hardware. */ struct pfc { - long pfc_cpus; - long pfc_invalid_global; + long pfc_cpus; + long pfc_invalid_global; }; -typedef struct pfc pmap_flush_context; - - /* Clear reference bit */ -extern void pmap_clear_reference(ppnum_t pn); - /* Return reference bit */ -extern boolean_t (pmap_is_referenced)(ppnum_t pn); - /* Set modify bit */ -extern void pmap_set_modify(ppnum_t pn); - /* Clear modify bit */ -extern void pmap_clear_modify(ppnum_t pn); - /* Return modify bit */ -extern boolean_t pmap_is_modified(ppnum_t pn); - /* Return modified and referenced bits */ +typedef struct pfc pmap_flush_context; + +/* Clear reference bit */ +extern void pmap_clear_reference(ppnum_t pn); +/* Return reference bit */ +extern boolean_t(pmap_is_referenced)(ppnum_t pn); +/* Set modify bit */ +extern void pmap_set_modify(ppnum_t pn); +/* Clear modify bit */ +extern void pmap_clear_modify(ppnum_t pn); +/* Return modify bit */ +extern boolean_t pmap_is_modified(ppnum_t pn); +/* Return modified and referenced bits */ extern unsigned int pmap_get_refmod(ppnum_t pn); - /* Clear modified and referenced bits */ -extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); -#define VM_MEM_MODIFIED 0x01 /* Modified bit */ -#define VM_MEM_REFERENCED 0x02 /* Referenced bit */ -extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); +/* Clear modified and referenced bits */ +extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); +#define VM_MEM_MODIFIED 0x01 /* Modified bit */ +#define VM_MEM_REFERENCED 0x02 /* Referenced bit */ +extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); extern void pmap_flush_context_init(pmap_flush_context *); @@ -592,52 +593,52 @@ extern void pmap_flush(pmap_flush_context *); /* * Routines that operate on ranges of virtual addresses. */ -extern void pmap_protect( /* Change protections. */ - pmap_t map, - vm_map_offset_t s, - vm_map_offset_t e, - vm_prot_t prot); - -extern void pmap_protect_options( /* Change protections. */ - pmap_t map, - vm_map_offset_t s, - vm_map_offset_t e, - vm_prot_t prot, - unsigned int options, - void *arg); - -extern void (pmap_pageable)( - pmap_t pmap, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t pageable); +extern void pmap_protect( /* Change protections. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + vm_prot_t prot); + +extern void pmap_protect_options( /* Change protections. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + vm_prot_t prot, + unsigned int options, + void *arg); + +extern void(pmap_pageable)( + pmap_t pmap, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t pageable); extern uint64_t pmap_nesting_size_min; extern uint64_t pmap_nesting_size_max; extern kern_return_t pmap_nest(pmap_t, - pmap_t, - addr64_t, - addr64_t, - uint64_t); + pmap_t, + addr64_t, + addr64_t, + uint64_t); extern kern_return_t pmap_unnest(pmap_t, - addr64_t, - uint64_t); + addr64_t, + uint64_t); -#define PMAP_UNNEST_CLEAN 1 +#define PMAP_UNNEST_CLEAN 1 extern kern_return_t pmap_unnest_options(pmap_t, - addr64_t, - uint64_t, - unsigned int); + addr64_t, + uint64_t, + unsigned int); extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); -extern void pmap_advise_pagezero_range(pmap_t, uint64_t); -#endif /* MACH_KERNEL_PRIVATE */ +extern void pmap_advise_pagezero_range(pmap_t, uint64_t); +#endif /* MACH_KERNEL_PRIVATE */ -extern boolean_t pmap_is_noencrypt(ppnum_t); -extern void pmap_set_noencrypt(ppnum_t pn); -extern void pmap_clear_noencrypt(ppnum_t pn); +extern boolean_t pmap_is_noencrypt(ppnum_t); +extern void pmap_set_noencrypt(ppnum_t pn); +extern void pmap_clear_noencrypt(ppnum_t pn); /* * JMM - This portion is exported to other kernel components right now, @@ -645,85 +646,85 @@ extern void pmap_clear_noencrypt(ppnum_t pn); * is provided in a cleaner manner. */ -extern pmap_t kernel_pmap; /* The kernel's map */ -#define pmap_kernel() (kernel_pmap) +extern pmap_t kernel_pmap; /* The kernel's map */ +#define pmap_kernel() (kernel_pmap) /* machine independent WIMG bits */ -#define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ -#define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ -#define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ -#define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ +#define VM_MEM_GUARDED 0x1 /* (G) Guarded Storage */ +#define VM_MEM_COHERENT 0x2 /* (M) Memory Coherency */ +#define VM_MEM_NOT_CACHEABLE 0x4 /* (I) Cache Inhibit */ +#define VM_MEM_WRITE_THROUGH 0x8 /* (W) Write-Through */ -#define VM_WIMG_USE_DEFAULT 0x80 -#define VM_WIMG_MASK 0xFF +#define VM_WIMG_USE_DEFAULT 0x80 +#define VM_WIMG_MASK 0xFF -#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ -#define VM_MEM_STACK 0x200 +#define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ +#define VM_MEM_STACK 0x200 #if __x86_64__ /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS * definitions in i386/pmap_internal.h */ -#define PMAP_CREATE_64BIT 0x1 -#define PMAP_CREATE_EPT 0x2 +#define PMAP_CREATE_64BIT 0x1 +#define PMAP_CREATE_EPT 0x2 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT) #endif -#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return - * KERN_RESOURCE_SHORTAGE - * instead */ -#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed - * but don't enter mapping - */ -#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for - * this operation */ -#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ -#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ -#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ -#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ -#define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ -#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ -#define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ -#define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ +#define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return + * KERN_RESOURCE_SHORTAGE + * instead */ +#define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed + * but don't enter mapping + */ +#define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for + * this operation */ +#define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ +#define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ +#define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ +#define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ +#define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ +#define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ +#define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ +#define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor - * iff page was modified */ -#define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be - * be upgraded */ + * iff page was modified */ +#define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be + * be upgraded */ #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 -#if !defined(__LP64__) -extern vm_offset_t pmap_extract(pmap_t pmap, - vm_map_offset_t va); +#if !defined(__LP64__) +extern vm_offset_t pmap_extract(pmap_t pmap, + vm_map_offset_t va); #endif -extern void pmap_change_wiring( /* Specify pageability */ - pmap_t pmap, - vm_map_offset_t va, - boolean_t wired); +extern void pmap_change_wiring( /* Specify pageability */ + pmap_t pmap, + vm_map_offset_t va, + boolean_t wired); /* LP64todo - switch to vm_map_offset_t when it grows */ -extern void pmap_remove( /* Remove mappings. */ - pmap_t map, - vm_map_offset_t s, - vm_map_offset_t e); +extern void pmap_remove( /* Remove mappings. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e); -extern void pmap_remove_options( /* Remove mappings. */ - pmap_t map, - vm_map_offset_t s, - vm_map_offset_t e, - int options); +extern void pmap_remove_options( /* Remove mappings. */ + pmap_t map, + vm_map_offset_t s, + vm_map_offset_t e, + int options); -extern void fillPage(ppnum_t pa, unsigned int fill); +extern void fillPage(ppnum_t pa, unsigned int fill); #if defined(__LP64__) void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); #endif mach_vm_size_t pmap_query_resident(pmap_t pmap, - vm_map_offset_t s, - vm_map_offset_t e, - mach_vm_size_t *compressed_bytes_p); + vm_map_offset_t s, + vm_map_offset_t e, + mach_vm_size_t *compressed_bytes_p); /* Inform the pmap layer that there is a JIT entry in this map. */ extern void pmap_set_jit_entitled(pmap_t pmap); @@ -755,16 +756,16 @@ bool pmap_has_prot_policy(vm_prot_t prot); */ uint64_t pmap_release_pages_fast(void); -#define PMAP_QUERY_PAGE_PRESENT 0x01 -#define PMAP_QUERY_PAGE_REUSABLE 0x02 -#define PMAP_QUERY_PAGE_INTERNAL 0x04 -#define PMAP_QUERY_PAGE_ALTACCT 0x08 -#define PMAP_QUERY_PAGE_COMPRESSED 0x10 -#define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 +#define PMAP_QUERY_PAGE_PRESENT 0x01 +#define PMAP_QUERY_PAGE_REUSABLE 0x02 +#define PMAP_QUERY_PAGE_INTERNAL 0x04 +#define PMAP_QUERY_PAGE_ALTACCT 0x08 +#define PMAP_QUERY_PAGE_COMPRESSED 0x10 +#define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 extern kern_return_t pmap_query_page_info( - pmap_t pmap, - vm_map_offset_t va, - int *disp); + pmap_t pmap, + vm_map_offset_t va, + int *disp); #if CONFIG_PGTRACE int pmap_pgtrace_add_page(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end); @@ -773,10 +774,51 @@ kern_return_t pmap_pgtrace_fault(pmap_t pmap, vm_map_offset_t va, arm_saved_stat #endif +struct pmap_legacy_trust_cache { + struct pmap_legacy_trust_cache *next; + uuid_t uuid; + uint32_t num_hashes; + uint8_t hashes[][CS_CDHASH_LEN]; +}; + +extern kern_return_t pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache *trust_cache, + const vm_size_t trust_cache_len); + +struct pmap_image4_trust_cache { + // Filled by pmap layer. + struct pmap_image4_trust_cache const *next; // linked list linkage + struct trust_cache_module1 const *module; // pointer into module (within data below) + + // Filled by caller. + // data is either an image4, + // or just the trust cache payload itself if the image4 manifest is external. + size_t data_len; + uint8_t const data[]; +}; + +typedef enum { + PMAP_TC_SUCCESS = 0, + PMAP_TC_UNKNOWN_FORMAT = -1, + PMAP_TC_TOO_SMALL_FOR_HEADER = -2, + PMAP_TC_TOO_SMALL_FOR_ENTRIES = -3, + PMAP_TC_UNKNOWN_VERSION = -4, + PMAP_TC_ALREADY_LOADED = -5, + PMAP_TC_TOO_BIG = -6, + PMAP_TC_RESOURCE_SHORTAGE = -7, + PMAP_TC_MANIFEST_TOO_BIG = -8, +} pmap_tc_ret_t; + +extern pmap_tc_ret_t pmap_load_image4_trust_cache( + struct pmap_image4_trust_cache *trust_cache, vm_size_t trust_cache_len, + uint8_t const *img4_manifest, + vm_size_t img4_manifest_buffer_len, + vm_size_t img4_manifest_actual_len, + bool dry_run); + extern void pmap_ledger_alloc_init(size_t); extern ledger_t pmap_ledger_alloc(void); extern void pmap_ledger_free(ledger_t); #endif /* KERNEL_PRIVATE */ -#endif /* _VM_PMAP_H_ */ +#endif /* _VM_PMAP_H_ */ diff --git a/osfmk/vm/vm32_user.c b/osfmk/vm/vm32_user.c index 9918cf984..fa529ec93 100644 --- a/osfmk/vm/vm32_user.c +++ b/osfmk/vm/vm32_user.c @@ -2,7 +2,7 @@ * Copyright (c) 2008-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -58,7 +58,7 @@ /* * File: vm/vm32_user.c * Author: Avadis Tevanian, Jr., Michael Wayne Young - * + * * User-exported virtual memory functions. */ @@ -66,9 +66,9 @@ #include #include -#include /* to get vm_address_t */ +#include /* to get vm_address_t */ #include -#include /* to get pointer_t */ +#include /* to get pointer_t */ #include #include #include @@ -105,103 +105,107 @@ kern_return_t vm32_allocate( - vm_map_t map, - vm32_offset_t *addr, - vm32_size_t size, - int flags) + vm_map_t map, + vm32_offset_t *addr, + vm32_size_t size, + int flags) { - mach_vm_offset_t maddr; - kern_return_t result; + mach_vm_offset_t maddr; + kern_return_t result; maddr = *addr; result = mach_vm_allocate_external(map, &maddr, size, flags); *addr = CAST_DOWN_EXPLICIT(vm32_offset_t, maddr); - + return result; } kern_return_t vm32_deallocate( - vm_map_t map, - vm32_offset_t start, - vm32_size_t size) + vm_map_t map, + vm32_offset_t start, + vm32_size_t size) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } return mach_vm_deallocate(map, start, size); } kern_return_t vm32_inherit( - vm_map_t map, - vm32_offset_t start, - vm32_size_t size, - vm_inherit_t new_inheritance) + vm_map_t map, + vm32_offset_t start, + vm32_size_t size, + vm_inherit_t new_inheritance) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } return mach_vm_inherit(map, start, size, new_inheritance); } kern_return_t vm32_protect( - vm_map_t map, - vm32_offset_t start, - vm32_size_t size, - boolean_t set_maximum, - vm_prot_t new_protection) + vm_map_t map, + vm32_offset_t start, + vm32_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } return mach_vm_protect(map, start, size, set_maximum, new_protection); } kern_return_t vm32_machine_attribute( - vm_map_t map, - vm32_address_t addr, - vm32_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + vm32_address_t addr, + vm32_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - if ((map == VM_MAP_NULL) || (addr + size < addr)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (addr + size < addr)) { + return KERN_INVALID_ARGUMENT; + } return mach_vm_machine_attribute(map, addr, size, attribute, value); } kern_return_t vm32_read( - vm_map_t map, - vm32_address_t addr, - vm32_size_t size, - pointer_t *data, - mach_msg_type_number_t *data_size) + vm_map_t map, + vm32_address_t addr, + vm32_size_t size, + pointer_t *data, + mach_msg_type_number_t *data_size) { return mach_vm_read(map, addr, size, data, data_size); } kern_return_t vm32_read_list( - vm_map_t map, - vm32_read_entry_t data_list, - natural_t count) + vm_map_t map, + vm32_read_entry_t data_list, + natural_t count) { - mach_vm_read_entry_t mdata_list; - mach_msg_type_number_t i; - kern_return_t result; + mach_vm_read_entry_t mdata_list; + mach_msg_type_number_t i; + kern_return_t result; - for (i=0; i < VM_MAP_ENTRY_MAX; i++) { + for (i = 0; i < VM_MAP_ENTRY_MAX; i++) { mdata_list[i].address = data_list[i].address; mdata_list[i].size = data_list[i].size; } - + result = mach_vm_read_list(map, mdata_list, count); - for (i=0; i < VM_MAP_ENTRY_MAX; i++) { + for (i = 0; i < VM_MAP_ENTRY_MAX; i++) { data_list[i].address = CAST_DOWN_EXPLICIT(vm32_address_t, mdata_list[i].address); data_list[i].size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_list[i].size); } @@ -211,293 +215,300 @@ vm32_read_list( kern_return_t vm32_read_overwrite( - vm_map_t map, - vm32_address_t address, - vm32_size_t size, - vm32_address_t data, - vm32_size_t *data_size) + vm_map_t map, + vm32_address_t address, + vm32_size_t size, + vm32_address_t data, + vm32_size_t *data_size) { - kern_return_t result; - mach_vm_size_t mdata_size; + kern_return_t result; + mach_vm_size_t mdata_size; mdata_size = *data_size; - result = mach_vm_read_overwrite(map, address, size, data, &mdata_size); + result = mach_vm_read_overwrite(map, address, size, data, &mdata_size); *data_size = CAST_DOWN_EXPLICIT(vm32_size_t, mdata_size); - + return result; } kern_return_t vm32_write( - vm_map_t map, - vm32_address_t address, - pointer_t data, - mach_msg_type_number_t size) + vm_map_t map, + vm32_address_t address, + pointer_t data, + mach_msg_type_number_t size) { return mach_vm_write(map, address, data, size); } kern_return_t vm32_copy( - vm_map_t map, - vm32_address_t source_address, - vm32_size_t size, - vm32_address_t dest_address) + vm_map_t map, + vm32_address_t source_address, + vm32_size_t size, + vm32_address_t dest_address) { return mach_vm_copy(map, source_address, size, dest_address); } kern_return_t vm32_map_64( - vm_map_t target_map, - vm32_offset_t *address, - vm32_size_t size, - vm32_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm32_offset_t *address, + vm32_size_t size, + vm32_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - mach_vm_offset_t maddress; - kern_return_t result; + mach_vm_offset_t maddress; + kern_return_t result; maddress = *address; result = mach_vm_map_external(target_map, &maddress, size, mask, - flags, port, offset, copy, - cur_protection, max_protection, inheritance); + flags, port, offset, copy, + cur_protection, max_protection, inheritance); *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress); - + return result; } kern_return_t vm32_map( - vm_map_t target_map, - vm32_offset_t *address, - vm32_size_t size, - vm32_offset_t mask, - int flags, - ipc_port_t port, - vm32_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm32_offset_t *address, + vm32_size_t size, + vm32_offset_t mask, + int flags, + ipc_port_t port, + vm32_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { return vm32_map_64(target_map, address, size, mask, - flags, port, offset, copy, - cur_protection, max_protection, inheritance); + flags, port, offset, copy, + cur_protection, max_protection, inheritance); } kern_return_t vm32_remap( - vm_map_t target_map, - vm32_offset_t *address, - vm32_size_t size, - vm32_offset_t mask, - boolean_t anywhere, - vm_map_t src_map, - vm32_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm32_offset_t *address, + vm32_size_t size, + vm32_offset_t mask, + boolean_t anywhere, + vm_map_t src_map, + vm32_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - mach_vm_offset_t maddress; - kern_return_t result; - + mach_vm_offset_t maddress; + kern_return_t result; + maddress = *address; result = mach_vm_remap_external(target_map, &maddress, size, mask, - anywhere, src_map, memory_address, copy, - cur_protection, max_protection, inheritance); + anywhere, src_map, memory_address, copy, + cur_protection, max_protection, inheritance); *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress); - + return result; } kern_return_t vm32_msync( - vm_map_t map, - vm32_address_t address, - vm32_size_t size, - vm_sync_t sync_flags) + vm_map_t map, + vm32_address_t address, + vm32_size_t size, + vm_sync_t sync_flags) { return mach_vm_msync(map, address, size, sync_flags); } -kern_return_t +kern_return_t vm32_behavior_set( - vm_map_t map, - vm32_offset_t start, - vm32_size_t size, - vm_behavior_t new_behavior) + vm_map_t map, + vm32_offset_t start, + vm32_size_t size, + vm_behavior_t new_behavior) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } return mach_vm_behavior_set(map, start, size, new_behavior); } kern_return_t vm32_region_64( - vm_map_t map, - vm32_offset_t *address, /* IN/OUT */ - vm32_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm32_offset_t *address, /* IN/OUT */ + vm32_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - mach_vm_offset_t maddress; - mach_vm_size_t msize; - kern_return_t result; + mach_vm_offset_t maddress; + mach_vm_size_t msize; + kern_return_t result; maddress = *address; msize = *size; result = mach_vm_region(map, &maddress, &msize, flavor, info, count, object_name); *size = CAST_DOWN_EXPLICIT(vm32_size_t, msize); *address = CAST_DOWN_EXPLICIT(vm32_offset_t, maddress); - + return result; } kern_return_t vm32_region( - vm_map_t map, - vm32_address_t *address, /* IN/OUT */ - vm32_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm32_address_t *address, /* IN/OUT */ + vm32_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr); *size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t vm32_region_recurse_64( - vm_map_t map, - vm32_address_t *address, - vm32_size_t *size, - uint32_t *depth, - vm_region_recurse_info_64_t info, - mach_msg_type_number_t *infoCnt) + vm_map_t map, + vm32_address_t *address, + vm32_size_t *size, + uint32_t *depth, + vm_region_recurse_info_64_t info, + mach_msg_type_number_t *infoCnt) { - mach_vm_address_t maddress; - mach_vm_size_t msize; - kern_return_t result; + mach_vm_address_t maddress; + mach_vm_size_t msize; + kern_return_t result; maddress = *address; msize = *size; result = mach_vm_region_recurse(map, &maddress, &msize, depth, info, infoCnt); *address = CAST_DOWN_EXPLICIT(vm32_address_t, maddress); *size = CAST_DOWN_EXPLICIT(vm32_size_t, msize); - + return result; } kern_return_t vm32_region_recurse( - vm_map_t map, - vm32_offset_t *address, /* IN/OUT */ - vm32_size_t *size, /* OUT */ - natural_t *depth, /* IN/OUT */ - vm_region_recurse_info_t info32, /* IN/OUT */ - mach_msg_type_number_t *infoCnt) /* IN/OUT */ + vm_map_t map, + vm32_offset_t *address, /* IN/OUT */ + vm32_size_t *size, /* OUT */ + natural_t *depth, /* IN/OUT */ + vm_region_recurse_info_t info32, /* IN/OUT */ + mach_msg_type_number_t *infoCnt) /* IN/OUT */ { vm_region_submap_info_data_64_t info64; vm_region_submap_info_t info; - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) + if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) { return KERN_INVALID_ARGUMENT; + } + - map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; info = (vm_region_submap_info_t)info32; *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; - kr = vm_map_region_recurse_64(map, &map_addr,&map_size, - depth, &info64, infoCnt); + kr = vm_map_region_recurse_64(map, &map_addr, &map_size, + depth, &info64, infoCnt); info->protection = info64.protection; info->max_protection = info64.max_protection; info->inheritance = info64.inheritance; info->offset = (uint32_t)info64.offset; /* trouble-maker */ - info->user_tag = info64.user_tag; - info->pages_resident = info64.pages_resident; - info->pages_shared_now_private = info64.pages_shared_now_private; - info->pages_swapped_out = info64.pages_swapped_out; - info->pages_dirtied = info64.pages_dirtied; - info->ref_count = info64.ref_count; - info->shadow_depth = info64.shadow_depth; - info->external_pager = info64.external_pager; - info->share_mode = info64.share_mode; + info->user_tag = info64.user_tag; + info->pages_resident = info64.pages_resident; + info->pages_shared_now_private = info64.pages_shared_now_private; + info->pages_swapped_out = info64.pages_swapped_out; + info->pages_dirtied = info64.pages_dirtied; + info->ref_count = info64.ref_count; + info->shadow_depth = info64.shadow_depth; + info->external_pager = info64.external_pager; + info->share_mode = info64.share_mode; info->is_submap = info64.is_submap; info->behavior = info64.behavior; info->object_id = info64.object_id; - info->user_wired_count = info64.user_wired_count; + info->user_wired_count = info64.user_wired_count; *address = CAST_DOWN_EXPLICIT(vm32_address_t, map_addr); *size = CAST_DOWN_EXPLICIT(vm32_size_t, map_size); *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; - if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM32_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t vm32_purgable_control( - vm_map_t map, - vm32_offset_t address, - vm_purgable_t control, - int *state) + vm_map_t map, + vm32_offset_t address, + vm_purgable_t control, + int *state) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } return vm_map_purgable_control(map, - vm_map_trunc_page(address, PAGE_MASK), - control, - state); + vm_map_trunc_page(address, PAGE_MASK), + control, + state); } - + kern_return_t vm32_map_page_query( - vm_map_t map, - vm32_offset_t offset, - int *disposition, - int *ref_count) + vm_map_t map, + vm32_offset_t offset, + int *disposition, + int *ref_count) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } return vm_map_page_query_internal( map, @@ -508,12 +519,12 @@ vm32_map_page_query( kern_return_t vm32_make_memory_entry_64( - vm_map_t target_map, - memory_object_size_t *size, + vm_map_t target_map, + memory_object_size_t *size, memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_handle) + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_handle) { // use the existing entrypoint return _mach_make_memory_entry(target_map, size, offset, permission, object_handle, parent_handle); @@ -521,51 +532,53 @@ vm32_make_memory_entry_64( kern_return_t vm32_make_memory_entry( - vm_map_t target_map, - vm32_size_t *size, - vm32_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_entry) -{ - memory_object_size_t mo_size; - kern_return_t kr; - + vm_map_t target_map, + vm32_size_t *size, + vm32_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) +{ + memory_object_size_t mo_size; + kern_return_t kr; + mo_size = (memory_object_size_t)*size; - kr = _mach_make_memory_entry(target_map, &mo_size, - (memory_object_offset_t)offset, permission, object_handle, - parent_entry); + kr = _mach_make_memory_entry(target_map, &mo_size, + (memory_object_offset_t)offset, permission, object_handle, + parent_entry); *size = CAST_DOWN_EXPLICIT(vm32_size_t, mo_size); return kr; } kern_return_t vm32__task_wire( - vm_map_t map, - boolean_t must_wire) + vm_map_t map, + boolean_t must_wire) { - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_map_lock(map); map->wiring_required = (must_wire == TRUE); vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t vm32__map_exec_lockdown( - vm_map_t map) + vm_map_t map) { - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_map_lock(map); map->map_disallow_new_exec = TRUE; vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } diff --git a/osfmk/vm/vm_apple_protect.c b/osfmk/vm/vm_apple_protect.c index 508211e68..416e90fa2 100644 --- a/osfmk/vm/vm_apple_protect.c +++ b/osfmk/vm/vm_apple_protect.c @@ -2,7 +2,7 @@ * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -59,12 +60,12 @@ #include -/* - * APPLE PROTECT MEMORY PAGER +/* + * APPLE PROTECT MEMORY PAGER * * This external memory manager (EMM) handles memory from the encrypted * sections of some executables protected by the DSMOS kernel extension. - * + * * It mostly handles page-in requests (from memory_object_data_request()) by * getting the encrypted data from its backing VM object, itself backed by * the encrypted file, decrypting it and providing it to VM. @@ -82,35 +83,35 @@ void apple_protect_pager_reference(memory_object_t mem_obj); void apple_protect_pager_deallocate(memory_object_t mem_obj); kern_return_t apple_protect_pager_init(memory_object_t mem_obj, - memory_object_control_t control, - memory_object_cluster_size_t pg_size); + memory_object_control_t control, + memory_object_cluster_size_t pg_size); kern_return_t apple_protect_pager_terminate(memory_object_t mem_obj); kern_return_t apple_protect_pager_data_request(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - vm_prot_t protection_required, - memory_object_fault_info_t fault_info); + memory_object_offset_t offset, + memory_object_cluster_size_t length, + vm_prot_t protection_required, + memory_object_fault_info_t fault_info); kern_return_t apple_protect_pager_data_return(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt, - memory_object_offset_t *resid_offset, - int *io_error, - boolean_t dirty, - boolean_t kernel_copy, - int upl_flags); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt, + memory_object_offset_t *resid_offset, + int *io_error, + boolean_t dirty, + boolean_t kernel_copy, + int upl_flags); kern_return_t apple_protect_pager_data_initialize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt); kern_return_t apple_protect_pager_data_unlock(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t size, - vm_prot_t desired_access); + memory_object_offset_t offset, + memory_object_size_t size, + vm_prot_t desired_access); kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, - vm_sync_t sync_flags); + memory_object_offset_t offset, + memory_object_size_t length, + vm_sync_t sync_flags); kern_return_t apple_protect_pager_map(memory_object_t mem_obj, - vm_prot_t prot); + vm_prot_t prot); kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj); #define CRYPT_INFO_DEBUG 0 @@ -146,27 +147,27 @@ typedef struct apple_protect_pager { struct memory_object ap_pgr_hdr; /* pager-specific data */ - queue_chain_t pager_queue; /* next & prev pagers */ - unsigned int ref_count; /* reference count */ - boolean_t is_ready; /* is this pager ready ? */ - boolean_t is_mapped; /* is this mem_obj mapped ? */ - vm_object_t backing_object; /* VM obj w/ encrypted data */ - vm_object_offset_t backing_offset; - vm_object_offset_t crypto_backing_offset; /* for key... */ - vm_object_offset_t crypto_start; - vm_object_offset_t crypto_end; + queue_chain_t pager_queue; /* next & prev pagers */ + struct os_refcnt ref_count; /* reference count */ + boolean_t is_ready; /* is this pager ready ? */ + boolean_t is_mapped; /* is this mem_obj mapped ? */ + vm_object_t backing_object; /* VM obj w/ encrypted data */ + vm_object_offset_t backing_offset; + vm_object_offset_t crypto_backing_offset; /* for key... */ + vm_object_offset_t crypto_start; + vm_object_offset_t crypto_end; struct pager_crypt_info *crypt_info; } *apple_protect_pager_t; -#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) +#define APPLE_PROTECT_PAGER_NULL ((apple_protect_pager_t) NULL) /* * List of memory objects managed by this EMM. * The list is protected by the "apple_protect_pager_lock" lock. */ -int apple_protect_pager_count = 0; /* number of pagers */ -int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ +int apple_protect_pager_count = 0; /* number of pagers */ +int apple_protect_pager_count_mapped = 0; /* number of unmapped pagers */ queue_head_t apple_protect_pager_queue; -decl_lck_mtx_data(,apple_protect_pager_lock) +decl_lck_mtx_data(, apple_protect_pager_lock) /* * Maximum number of unmapped pagers we're willing to keep around. @@ -182,9 +183,9 @@ int apple_protect_pager_num_trim_max = 0; int apple_protect_pager_num_trim_total = 0; -lck_grp_t apple_protect_pager_lck_grp; -lck_grp_attr_t apple_protect_pager_lck_grp_attr; -lck_attr_t apple_protect_pager_lck_attr; +lck_grp_t apple_protect_pager_lck_grp; +lck_grp_attr_t apple_protect_pager_lck_grp_attr; +lck_attr_t apple_protect_pager_lck_attr; /* internal prototypes */ @@ -198,22 +199,22 @@ apple_protect_pager_t apple_protect_pager_create( apple_protect_pager_t apple_protect_pager_lookup(memory_object_t mem_obj); void apple_protect_pager_dequeue(apple_protect_pager_t pager); void apple_protect_pager_deallocate_internal(apple_protect_pager_t pager, - boolean_t locked); + boolean_t locked); void apple_protect_pager_terminate_internal(apple_protect_pager_t pager); void apple_protect_pager_trim(void); #if DEBUG int apple_protect_pagerdebug = 0; -#define PAGER_ALL 0xffffffff -#define PAGER_INIT 0x00000001 -#define PAGER_PAGEIN 0x00000002 - -#define PAGER_DEBUG(LEVEL, A) \ - MACRO_BEGIN \ - if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \ - printf A; \ - } \ +#define PAGER_ALL 0xffffffff +#define PAGER_INIT 0x00000001 +#define PAGER_PAGEIN 0x00000002 + +#define PAGER_DEBUG(LEVEL, A) \ + MACRO_BEGIN \ + if ((apple_protect_pagerdebug & LEVEL)==LEVEL) { \ + printf A; \ + } \ MACRO_END #else #define PAGER_DEBUG(LEVEL, A) @@ -237,23 +238,24 @@ apple_protect_pager_bootstrap(void) */ kern_return_t apple_protect_pager_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, #if !DEBUG __unused #endif memory_object_cluster_size_t pg_size) { - apple_protect_pager_t pager; - kern_return_t kr; + apple_protect_pager_t pager; + kern_return_t kr; memory_object_attr_info_data_t attributes; PAGER_DEBUG(PAGER_ALL, - ("apple_protect_pager_init: %p, %p, %x\n", - mem_obj, control, pg_size)); + ("apple_protect_pager_init: %p, %p, %x\n", + mem_obj, control, pg_size)); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } pager = apple_protect_pager_lookup(mem_obj); @@ -268,13 +270,14 @@ apple_protect_pager_init( attributes.temporary = TRUE; kr = memory_object_change_attributes( - control, - MEMORY_OBJECT_ATTRIBUTE_INFO, - (memory_object_info_t) &attributes, - MEMORY_OBJECT_ATTR_INFO_COUNT); - if (kr != KERN_SUCCESS) + control, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT); + if (kr != KERN_SUCCESS) { panic("apple_protect_pager_init: " - "memory_object_change_attributes() failed"); + "memory_object_change_attributes() failed"); + } #if CONFIG_SECLUDED_MEMORY if (secluded_for_filecache) { @@ -295,14 +298,14 @@ apple_protect_pager_init( */ kern_return_t apple_protect_pager_data_return( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags) { panic("apple_protect_pager_data_return: should never get called"); return KERN_FAILURE; @@ -310,9 +313,9 @@ apple_protect_pager_data_return( kern_return_t apple_protect_pager_data_initialize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt) { panic("apple_protect_pager_data_initialize: should never get called"); return KERN_FAILURE; @@ -320,10 +323,10 @@ apple_protect_pager_data_initialize( kern_return_t apple_protect_pager_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { return KERN_FAILURE; } @@ -334,35 +337,35 @@ apple_protect_pager_data_unlock( * Handles page-in requests from VM. */ int apple_protect_pager_data_request_debug = 0; -kern_return_t +kern_return_t apple_protect_pager_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, #if !DEBUG __unused #endif - vm_prot_t protection_required, + vm_prot_t protection_required, memory_object_fault_info_t mo_fault_info) { - apple_protect_pager_t pager; - memory_object_control_t mo_control; - upl_t upl; - int upl_flags; - upl_size_t upl_size; - upl_page_info_t *upl_pl; - unsigned int pl_count; - vm_object_t src_top_object, src_page_object, dst_object; - kern_return_t kr, retval; - vm_offset_t src_vaddr, dst_vaddr; - vm_offset_t cur_offset; - vm_offset_t offset_in_page; - kern_return_t error_code; - vm_prot_t prot; - vm_page_t src_page, top_page; - int interruptible; - struct vm_object_fault_info fault_info; - int ret; + apple_protect_pager_t pager; + memory_object_control_t mo_control; + upl_t upl; + int upl_flags; + upl_size_t upl_size; + upl_page_info_t *upl_pl; + unsigned int pl_count; + vm_object_t src_top_object, src_page_object, dst_object; + kern_return_t kr, retval; + vm_offset_t src_vaddr, dst_vaddr; + vm_offset_t cur_offset; + vm_offset_t offset_in_page; + kern_return_t error_code; + vm_prot_t prot; + vm_page_t src_page, top_page; + int interruptible; + struct vm_object_fault_info fault_info; + int ret; PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); @@ -380,7 +383,7 @@ apple_protect_pager_data_request( pager = apple_protect_pager_lookup(mem_obj); assert(pager->is_ready); - assert(pager->ref_count > 1); /* pager is alive and mapped */ + assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */ PAGER_DEBUG(PAGER_PAGEIN, ("apple_protect_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager)); @@ -394,15 +397,15 @@ apple_protect_pager_data_request( upl_size = length; upl_flags = - UPL_RET_ONLY_ABSENT | - UPL_SET_LITE | - UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ - UPL_SET_INTERNAL; + UPL_RET_ONLY_ABSENT | + UPL_SET_LITE | + UPL_NO_SYNC | + UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ + UPL_SET_INTERNAL; pl_count = 0; kr = memory_object_upl_request(mo_control, - offset, upl_size, - &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); + offset, upl_size, + &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); if (kr != KERN_SUCCESS) { retval = kr; goto done; @@ -411,7 +414,7 @@ apple_protect_pager_data_request( assert(dst_object != VM_OBJECT_NULL); /* - * We'll map the encrypted data in the kernel address space from the + * We'll map the encrypted data in the kernel address space from the * backing VM object (itself backed by the encrypted file via * the vnode pager). */ @@ -425,8 +428,8 @@ apple_protect_pager_data_request( upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); pl_count = length / PAGE_SIZE; for (cur_offset = 0; - retval == KERN_SUCCESS && cur_offset < length; - cur_offset += PAGE_SIZE) { + retval == KERN_SUCCESS && cur_offset < length; + cur_offset += PAGE_SIZE) { ppnum_t dst_pnum; if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { @@ -439,25 +442,25 @@ apple_protect_pager_data_request( * virtual address space. * We already hold a reference on the src_top_object. */ - retry_src_fault: +retry_src_fault: vm_object_lock(src_top_object); vm_object_paging_begin(src_top_object); error_code = 0; prot = VM_PROT_READ; src_page = VM_PAGE_NULL; kr = vm_fault_page(src_top_object, - pager->backing_offset + offset + cur_offset, - VM_PROT_READ, - FALSE, - FALSE, /* src_page not looked up */ - &prot, - &src_page, - &top_page, - NULL, - &error_code, - FALSE, - FALSE, - &fault_info); + pager->backing_offset + offset + cur_offset, + VM_PROT_READ, + FALSE, + FALSE, /* src_page not looked up */ + &prot, + &src_page, + &top_page, + NULL, + &error_code, + FALSE, + FALSE, + &fault_info); switch (kr) { case VM_FAULT_SUCCESS: break; @@ -467,7 +470,7 @@ apple_protect_pager_data_request( if (vm_page_wait(interruptible)) { goto retry_src_fault; } - /* fall thru */ + /* fall thru */ case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto done; @@ -475,7 +478,7 @@ apple_protect_pager_data_request( /* success but no VM page: fail */ vm_object_paging_end(src_top_object); vm_object_unlock(src_top_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: /* the page is not there ! */ if (error_code) { @@ -486,18 +489,17 @@ apple_protect_pager_data_request( goto done; default: panic("apple_protect_pager_data_request: " - "vm_fault_page() unexpected error 0x%x\n", - kr); + "vm_fault_page() unexpected error 0x%x\n", + kr); } assert(src_page != VM_PAGE_NULL); assert(src_page->vmp_busy); if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { - vm_page_lockspin_queues(); if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { - vm_page_speculate(src_page, FALSE); + vm_page_speculate(src_page, FALSE); } vm_page_unlock_queues(); } @@ -507,21 +509,21 @@ apple_protect_pager_data_request( * and destination physical pages. */ dst_pnum = (ppnum_t) - upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); - assert(dst_pnum != 0); + upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); + assert(dst_pnum != 0); #if __x86_64__ src_vaddr = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) - << PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); dst_vaddr = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); #elif __arm__ || __arm64__ src_vaddr = (vm_map_offset_t) - phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) - << PAGE_SHIFT); + phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); dst_vaddr = (vm_map_offset_t) - phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); + phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); #else #error "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..." src_vaddr = 0; @@ -541,11 +543,11 @@ apple_protect_pager_data_request( * ... and transfer the results to the destination page. */ UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, - src_page->vmp_cs_validated); + src_page->vmp_cs_validated); UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, - src_page->vmp_cs_tainted); + src_page->vmp_cs_tainted); UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, - src_page->vmp_cs_nx); + src_page->vmp_cs_nx); /* * page_decrypt() might access a mapped file, so let's release @@ -563,42 +565,42 @@ apple_protect_pager_data_request( * into the destination page. */ for (offset_in_page = 0; - offset_in_page < PAGE_SIZE; - offset_in_page += 4096) { + offset_in_page < PAGE_SIZE; + offset_in_page += 4096) { if (offset + cur_offset + offset_in_page < pager->crypto_start || offset + cur_offset + offset_in_page >= pager->crypto_end) { /* not encrypted: just copy */ bcopy((const char *)(src_vaddr + - offset_in_page), - (char *)(dst_vaddr + offset_in_page), - 4096); + offset_in_page), + (char *)(dst_vaddr + offset_in_page), + 4096); if (apple_protect_pager_data_request_debug) { printf("apple_protect_data_request" - "(%p,0x%llx+0x%llx+0x%04llx): " - "out of crypto range " - "[0x%llx:0x%llx]: " - "COPY [0x%016llx 0x%016llx] " - "code_signed=%d " - "cs_validated=%d " - "cs_tainted=%d " - "cs_nx=%d\n", - pager, - offset, - (uint64_t) cur_offset, - (uint64_t) offset_in_page, - pager->crypto_start, - pager->crypto_end, - *(uint64_t *)(dst_vaddr+ - offset_in_page), - *(uint64_t *)(dst_vaddr+ - offset_in_page+8), - src_page_object->code_signed, - src_page->vmp_cs_validated, - src_page->vmp_cs_tainted, - src_page->vmp_cs_nx); + "(%p,0x%llx+0x%llx+0x%04llx): " + "out of crypto range " + "[0x%llx:0x%llx]: " + "COPY [0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + pager->crypto_start, + pager->crypto_end, + *(uint64_t *)(dst_vaddr + + offset_in_page), + *(uint64_t *)(dst_vaddr + + offset_in_page + 8), + src_page_object->code_signed, + src_page->vmp_cs_validated, + src_page->vmp_cs_tainted, + src_page->vmp_cs_nx); } ret = 0; continue; @@ -607,46 +609,46 @@ apple_protect_pager_data_request( (const void *)(src_vaddr + offset_in_page), (void *)(dst_vaddr + offset_in_page), ((pager->crypto_backing_offset - - pager->crypto_start) + /* XXX ? */ - offset + - cur_offset + - offset_in_page), + pager->crypto_start) + /* XXX ? */ + offset + + cur_offset + + offset_in_page), pager->crypt_info->crypt_ops); if (apple_protect_pager_data_request_debug) { printf("apple_protect_data_request" - "(%p,0x%llx+0x%llx+0x%04llx): " - "in crypto range [0x%llx:0x%llx]: " - "DECRYPT offset 0x%llx=" - "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)" - "[0x%016llx 0x%016llx] " - "code_signed=%d " - "cs_validated=%d " - "cs_tainted=%d " - "cs_nx=%d " - "ret=0x%x\n", - pager, - offset, - (uint64_t) cur_offset, - (uint64_t) offset_in_page, - pager->crypto_start, pager->crypto_end, - ((pager->crypto_backing_offset - - pager->crypto_start) + - offset + - cur_offset + - offset_in_page), - pager->crypto_backing_offset, - pager->crypto_start, - offset, - (uint64_t) cur_offset, - (uint64_t) offset_in_page, - *(uint64_t *)(dst_vaddr+offset_in_page), - *(uint64_t *)(dst_vaddr+offset_in_page+8), - src_page_object->code_signed, - src_page->vmp_cs_validated, - src_page->vmp_cs_tainted, - src_page->vmp_cs_nx, - ret); + "(%p,0x%llx+0x%llx+0x%04llx): " + "in crypto range [0x%llx:0x%llx]: " + "DECRYPT offset 0x%llx=" + "(0x%llx-0x%llx+0x%llx+0x%llx+0x%04llx)" + "[0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d " + "ret=0x%x\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + pager->crypto_start, pager->crypto_end, + ((pager->crypto_backing_offset - + pager->crypto_start) + + offset + + cur_offset + + offset_in_page), + pager->crypto_backing_offset, + pager->crypto_start, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + *(uint64_t *)(dst_vaddr + offset_in_page), + *(uint64_t *)(dst_vaddr + offset_in_page + 8), + src_page_object->code_signed, + src_page->vmp_cs_validated, + src_page->vmp_cs_tainted, + src_page->vmp_cs_nx, + ret); } if (ret) { break; @@ -698,7 +700,7 @@ done: if (retval != KERN_SUCCESS) { upl_abort(upl, 0); if (retval == KERN_ABORTED) { - wait_result_t wait_result; + wait_result_t wait_result; /* * We aborted the fault and did not provide @@ -718,7 +720,7 @@ done: wait_result = assert_wait_timeout( (event_t) apple_protect_pager_data_request, THREAD_UNINT, - 10000, /* 10ms */ + 10000, /* 10ms */ NSEC_PER_USEC); assert(wait_result == THREAD_WAITING); wait_result = thread_block(THREAD_CONTINUE_NULL); @@ -726,9 +728,9 @@ done: } } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, - UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, - upl_pl, pl_count, &empty); + upl_commit_range(upl, 0, upl->size, + UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, + upl_pl, pl_count, &empty); } /* and deallocate the UPL */ @@ -750,15 +752,14 @@ done: */ void apple_protect_pager_reference( - memory_object_t mem_obj) -{ - apple_protect_pager_t pager; + memory_object_t mem_obj) +{ + apple_protect_pager_t pager; pager = apple_protect_pager_lookup(mem_obj); lck_mtx_lock(&apple_protect_pager_lock); - assert(pager->ref_count > 0); - pager->ref_count++; + os_ref_retain_locked(&pager->ref_count); lck_mtx_unlock(&apple_protect_pager_lock); } @@ -777,12 +778,12 @@ apple_protect_pager_dequeue( assert(!pager->is_mapped); queue_remove(&apple_protect_pager_queue, - pager, - apple_protect_pager_t, - pager_queue); + pager, + apple_protect_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; - + apple_protect_pager_count--; } @@ -814,9 +815,9 @@ apple_protect_pager_terminate_internal( /* one less pager using this "pager_crypt_info" */ #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: deallocate %p ref %d\n", - __FUNCTION__, - pager->crypt_info, - pager->crypt_info->crypt_refcnt); + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ crypt_info_deallocate(pager->crypt_info); pager->crypt_info = NULL; @@ -835,18 +836,18 @@ apple_protect_pager_terminate_internal( */ void apple_protect_pager_deallocate_internal( - apple_protect_pager_t pager, - boolean_t locked) + apple_protect_pager_t pager, + boolean_t locked) { - boolean_t needs_trimming; - int count_unmapped; + boolean_t needs_trimming; + int count_unmapped; - if (! locked) { + if (!locked) { lck_mtx_lock(&apple_protect_pager_lock); } - count_unmapped = (apple_protect_pager_count - - apple_protect_pager_count_mapped); + count_unmapped = (apple_protect_pager_count - + apple_protect_pager_count_mapped); if (count_unmapped > apple_protect_pager_cache_limit) { /* we have too many unmapped pagers: trim some */ needs_trimming = TRUE; @@ -855,9 +856,9 @@ apple_protect_pager_deallocate_internal( } /* drop a reference on this pager */ - pager->ref_count--; + os_ref_count_t ref_count = os_ref_release_locked(&pager->ref_count); - if (pager->ref_count == 1) { + if (ref_count == 1) { /* * Only the "named" reference is left, which means that * no one is really holding on to this pager anymore. @@ -867,7 +868,7 @@ apple_protect_pager_deallocate_internal( /* the pager is all ours: no need for the lock now */ lck_mtx_unlock(&apple_protect_pager_lock); apple_protect_pager_terminate_internal(pager); - } else if (pager->ref_count == 0) { + } else if (ref_count == 0) { /* * Dropped the existence reference; the memory object has * been terminated. Do some final cleanup and release the @@ -878,7 +879,7 @@ apple_protect_pager_deallocate_internal( memory_object_control_deallocate(pager->ap_pgr_hdr.mo_control); pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; } - kfree(pager, sizeof (*pager)); + kfree(pager, sizeof(*pager)); pager = APPLE_PROTECT_PAGER_NULL; } else { /* there are still plenty of references: keep going... */ @@ -899,9 +900,9 @@ apple_protect_pager_deallocate_internal( */ void apple_protect_pager_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - apple_protect_pager_t pager; + apple_protect_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_deallocate: %p\n", mem_obj)); pager = apple_protect_pager_lookup(mem_obj); @@ -916,7 +917,7 @@ apple_protect_pager_terminate( #if !DEBUG __unused #endif - memory_object_t mem_obj) + memory_object_t mem_obj) { PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_terminate: %p\n", mem_obj)); @@ -928,10 +929,10 @@ apple_protect_pager_terminate( */ kern_return_t apple_protect_pager_synchronize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t sync_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t sync_flags) { panic("apple_protect_pager_synchronize: memory_object_synchronize no longer supported\n"); return KERN_FAILURE; @@ -947,10 +948,10 @@ apple_protect_pager_synchronize( */ kern_return_t apple_protect_pager_map( - memory_object_t mem_obj, - __unused vm_prot_t prot) + memory_object_t mem_obj, + __unused vm_prot_t prot) { - apple_protect_pager_t pager; + apple_protect_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("apple_protect_pager_map: %p\n", mem_obj)); @@ -958,7 +959,7 @@ apple_protect_pager_map( lck_mtx_lock(&apple_protect_pager_lock); assert(pager->is_ready); - assert(pager->ref_count > 0); /* pager is alive */ + assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */ if (pager->is_mapped == FALSE) { /* * First mapping of this pager: take an extra reference @@ -966,7 +967,7 @@ apple_protect_pager_map( * are removed. */ pager->is_mapped = TRUE; - pager->ref_count++; + os_ref_retain_locked(&pager->ref_count); apple_protect_pager_count_mapped++; } lck_mtx_unlock(&apple_protect_pager_lock); @@ -981,13 +982,13 @@ apple_protect_pager_map( */ kern_return_t apple_protect_pager_last_unmap( - memory_object_t mem_obj) + memory_object_t mem_obj) { - apple_protect_pager_t pager; - int count_unmapped; + apple_protect_pager_t pager; + int count_unmapped; PAGER_DEBUG(PAGER_ALL, - ("apple_protect_pager_last_unmap: %p\n", mem_obj)); + ("apple_protect_pager_last_unmap: %p\n", mem_obj)); pager = apple_protect_pager_lookup(mem_obj); @@ -999,7 +1000,7 @@ apple_protect_pager_last_unmap( */ apple_protect_pager_count_mapped--; count_unmapped = (apple_protect_pager_count - - apple_protect_pager_count_mapped); + apple_protect_pager_count_mapped); if (count_unmapped > apple_protect_pager_count_unmapped_max) { apple_protect_pager_count_unmapped_max = count_unmapped; } @@ -1009,7 +1010,7 @@ apple_protect_pager_last_unmap( } else { lck_mtx_unlock(&apple_protect_pager_lock); } - + return KERN_SUCCESS; } @@ -1019,31 +1020,31 @@ apple_protect_pager_last_unmap( */ apple_protect_pager_t apple_protect_pager_lookup( - memory_object_t mem_obj) + memory_object_t mem_obj) { - apple_protect_pager_t pager; + apple_protect_pager_t pager; assert(mem_obj->mo_pager_ops == &apple_protect_pager_ops); pager = (apple_protect_pager_t)(uintptr_t) mem_obj; - assert(pager->ref_count > 0); + assert(os_ref_get_count(&pager->ref_count) > 0); return pager; } apple_protect_pager_t apple_protect_pager_create( - vm_object_t backing_object, - vm_object_offset_t backing_offset, - vm_object_offset_t crypto_backing_offset, + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, struct pager_crypt_info *crypt_info, - vm_object_offset_t crypto_start, - vm_object_offset_t crypto_end) + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end) { - apple_protect_pager_t pager, pager2; - memory_object_control_t control; - kern_return_t kr; - struct pager_crypt_info *old_crypt_info; + apple_protect_pager_t pager, pager2; + memory_object_control_t control; + kern_return_t kr; + struct pager_crypt_info *old_crypt_info; - pager = (apple_protect_pager_t) kalloc(sizeof (*pager)); + pager = (apple_protect_pager_t) kalloc(sizeof(*pager)); if (pager == APPLE_PROTECT_PAGER_NULL) { return APPLE_PROTECT_PAGER_NULL; } @@ -1060,8 +1061,7 @@ apple_protect_pager_create( pager->ap_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; pager->is_ready = FALSE;/* not ready until it has a "name" */ - pager->ref_count = 1; /* existence reference (for the cache) */ - pager->ref_count++; /* for the caller */ + os_ref_init_count(&pager->ref_count, NULL, 2); /* existence reference (for the cache) and another for the caller */ pager->is_mapped = FALSE; pager->backing_object = backing_object; pager->backing_offset = backing_offset; @@ -1072,14 +1072,14 @@ apple_protect_pager_create( #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: crypt_info %p [%p,%p,%p,%d]\n", - __FUNCTION__, - crypt_info, - crypt_info->page_decrypt, - crypt_info->crypt_end, - crypt_info->crypt_ops, - crypt_info->crypt_refcnt); + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ - + vm_object_reference(backing_object); old_crypt_info = NULL; @@ -1087,15 +1087,15 @@ apple_protect_pager_create( lck_mtx_lock(&apple_protect_pager_lock); /* see if anyone raced us to create a pager for the same object */ queue_iterate(&apple_protect_pager_queue, - pager2, - apple_protect_pager_t, - pager_queue) { + pager2, + apple_protect_pager_t, + pager_queue) { if ((pager2->crypt_info->page_decrypt != - crypt_info->page_decrypt) || + crypt_info->page_decrypt) || (pager2->crypt_info->crypt_end != - crypt_info->crypt_end) || + crypt_info->crypt_end) || (pager2->crypt_info->crypt_ops != - crypt_info->crypt_ops)) { + crypt_info->crypt_ops)) { /* crypt_info contents do not match: next pager */ continue; } @@ -1108,16 +1108,16 @@ apple_protect_pager_create( /* ... switch to that pager's crypt_info */ #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: reference %p ref %d " - "(create match)\n", - __FUNCTION__, - pager2->crypt_info, - pager2->crypt_info->crypt_refcnt); + "(create match)\n", + __FUNCTION__, + pager2->crypt_info, + pager2->crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ old_crypt_info = pager2->crypt_info; crypt_info_reference(old_crypt_info); pager->crypt_info = old_crypt_info; } - + if (pager2->backing_object == backing_object && pager2->backing_offset == backing_offset && pager2->crypto_backing_offset == crypto_backing_offset && @@ -1127,21 +1127,21 @@ apple_protect_pager_create( break; } } - if (! queue_end(&apple_protect_pager_queue, - (queue_entry_t) pager2)) { + if (!queue_end(&apple_protect_pager_queue, + (queue_entry_t) pager2)) { /* we lost the race, down with the loser... */ lck_mtx_unlock(&apple_protect_pager_lock); vm_object_deallocate(pager->backing_object); pager->backing_object = VM_OBJECT_NULL; #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: %p ref %d (create pager match)\n", - __FUNCTION__, - pager->crypt_info, - pager->crypt_info->crypt_refcnt); + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ crypt_info_deallocate(pager->crypt_info); pager->crypt_info = NULL; - kfree(pager, sizeof (*pager)); + kfree(pager, sizeof(*pager)); /* ... and go with the winner */ pager = pager2; /* let the winner make sure the pager gets ready */ @@ -1150,9 +1150,9 @@ apple_protect_pager_create( /* enter new pager at the head of our list of pagers */ queue_enter_first(&apple_protect_pager_queue, - pager, - apple_protect_pager_t, - pager_queue); + pager, + apple_protect_pager_t, + pager_queue); apple_protect_pager_count++; if (apple_protect_pager_count > apple_protect_pager_count_max) { apple_protect_pager_count_max = apple_protect_pager_count; @@ -1160,8 +1160,8 @@ apple_protect_pager_create( lck_mtx_unlock(&apple_protect_pager_lock); kr = memory_object_create_named((memory_object_t) pager, - 0, - &control); + 0, + &control); assert(kr == KERN_SUCCESS); lck_mtx_lock(&apple_protect_pager_lock); @@ -1177,10 +1177,10 @@ apple_protect_pager_create( /* we re-used an old crypt_info instead of using our new one */ #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: deallocate %p ref %d " - "(create used old)\n", - __FUNCTION__, - crypt_info, - crypt_info->crypt_refcnt); + "(create used old)\n", + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ crypt_info_deallocate(crypt_info); crypt_info = NULL; @@ -1198,24 +1198,24 @@ apple_protect_pager_create( */ memory_object_t apple_protect_pager_setup( - vm_object_t backing_object, - vm_object_offset_t backing_offset, - vm_object_offset_t crypto_backing_offset, + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, struct pager_crypt_info *crypt_info, - vm_object_offset_t crypto_start, - vm_object_offset_t crypto_end) + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end) { - apple_protect_pager_t pager; - struct pager_crypt_info *old_crypt_info, *new_crypt_info; + apple_protect_pager_t pager; + struct pager_crypt_info *old_crypt_info, *new_crypt_info; #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: crypt_info=%p [%p,%p,%p,%d]\n", - __FUNCTION__, - crypt_info, - crypt_info->page_decrypt, - crypt_info->crypt_end, - crypt_info->crypt_ops, - crypt_info->crypt_refcnt); + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ old_crypt_info = NULL; @@ -1223,15 +1223,15 @@ apple_protect_pager_setup( lck_mtx_lock(&apple_protect_pager_lock); queue_iterate(&apple_protect_pager_queue, - pager, - apple_protect_pager_t, - pager_queue) { + pager, + apple_protect_pager_t, + pager_queue) { if ((pager->crypt_info->page_decrypt != - crypt_info->page_decrypt) || + crypt_info->page_decrypt) || (pager->crypt_info->crypt_end != - crypt_info->crypt_end) || + crypt_info->crypt_end) || (pager->crypt_info->crypt_ops != - crypt_info->crypt_ops)) { + crypt_info->crypt_ops)) { /* no match for "crypt_info": next pager */ continue; } @@ -1244,28 +1244,28 @@ apple_protect_pager_setup( old_crypt_info = pager->crypt_info; #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: " - "switching crypt_info from %p [%p,%p,%p,%d] " - "to %p [%p,%p,%p,%d] from pager %p\n", - __FUNCTION__, - crypt_info, - crypt_info->page_decrypt, - crypt_info->crypt_end, - crypt_info->crypt_ops, - crypt_info->crypt_refcnt, - old_crypt_info, - old_crypt_info->page_decrypt, - old_crypt_info->crypt_end, - old_crypt_info->crypt_ops, - old_crypt_info->crypt_refcnt, - pager); + "switching crypt_info from %p [%p,%p,%p,%d] " + "to %p [%p,%p,%p,%d] from pager %p\n", + __FUNCTION__, + crypt_info, + crypt_info->page_decrypt, + crypt_info->crypt_end, + crypt_info->crypt_ops, + crypt_info->crypt_refcnt, + old_crypt_info, + old_crypt_info->page_decrypt, + old_crypt_info->crypt_end, + old_crypt_info->crypt_ops, + old_crypt_info->crypt_refcnt, + pager); printf("CRYPT_INFO %s: %p ref %d (setup match)\n", - __FUNCTION__, - pager->crypt_info, - pager->crypt_info->crypt_refcnt); + __FUNCTION__, + pager->crypt_info, + pager->crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ crypt_info_reference(pager->crypt_info); } - + if (pager->backing_object == backing_object && pager->backing_offset == backing_offset && pager->crypto_backing_offset == crypto_backing_offset && @@ -1276,27 +1276,26 @@ apple_protect_pager_setup( assert(old_crypt_info->crypt_refcnt > 1); #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: " - "pager match with %p crypt_info %p\n", - __FUNCTION__, - pager, - pager->crypt_info); + "pager match with %p crypt_info %p\n", + __FUNCTION__, + pager, + pager->crypt_info); printf("CRYPT_INFO %s: deallocate %p ref %d " - "(pager match)\n", - __FUNCTION__, - old_crypt_info, - old_crypt_info->crypt_refcnt); + "(pager match)\n", + __FUNCTION__, + old_crypt_info, + old_crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ /* release the extra ref on crypt_info we got above */ crypt_info_deallocate(old_crypt_info); assert(old_crypt_info->crypt_refcnt > 0); /* give extra reference on pager to the caller */ - assert(pager->ref_count > 0); - pager->ref_count++; + os_ref_retain_locked(&pager->ref_count); break; } } if (queue_end(&apple_protect_pager_queue, - (queue_entry_t) pager)) { + (queue_entry_t) pager)) { lck_mtx_unlock(&apple_protect_pager_lock); /* no existing pager for this backing object */ pager = APPLE_PROTECT_PAGER_NULL; @@ -1305,20 +1304,20 @@ apple_protect_pager_setup( new_crypt_info = old_crypt_info; #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: " - "will use old_crypt_info %p for new pager\n", - __FUNCTION__, - old_crypt_info); + "will use old_crypt_info %p for new pager\n", + __FUNCTION__, + old_crypt_info); #endif /* CRYPT_INFO_DEBUG */ } else { /* allocate a new crypt_info for new pager */ - new_crypt_info = kalloc(sizeof (*new_crypt_info)); + new_crypt_info = kalloc(sizeof(*new_crypt_info)); *new_crypt_info = *crypt_info; new_crypt_info->crypt_refcnt = 1; #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: " - "will use new_crypt_info %p for new pager\n", - __FUNCTION__, - new_crypt_info); + "will use new_crypt_info %p for new pager\n", + __FUNCTION__, + new_crypt_info); #endif /* CRYPT_INFO_DEBUG */ } if (new_crypt_info == NULL) { @@ -1339,10 +1338,10 @@ apple_protect_pager_setup( /* release extra reference on old_crypt_info */ #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: deallocate %p ref %d " - "(create fail old_crypt_info)\n", - __FUNCTION__, - old_crypt_info, - old_crypt_info->crypt_refcnt); + "(create fail old_crypt_info)\n", + __FUNCTION__, + old_crypt_info, + old_crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ crypt_info_deallocate(old_crypt_info); old_crypt_info = NULL; @@ -1351,10 +1350,10 @@ apple_protect_pager_setup( assert(new_crypt_info->crypt_refcnt == 1); #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: deallocate %p ref %d " - "(create fail new_crypt_info)\n", - __FUNCTION__, - new_crypt_info, - new_crypt_info->crypt_refcnt); + "(create fail new_crypt_info)\n", + __FUNCTION__, + new_crypt_info, + new_crypt_info->crypt_refcnt); #endif /* CRYPT_INFO_DEBUG */ crypt_info_deallocate(new_crypt_info); new_crypt_info = NULL; @@ -1368,22 +1367,22 @@ apple_protect_pager_setup( while (!pager->is_ready) { lck_mtx_sleep(&apple_protect_pager_lock, - LCK_SLEEP_DEFAULT, - &pager->is_ready, - THREAD_UNINT); + LCK_SLEEP_DEFAULT, + &pager->is_ready, + THREAD_UNINT); } lck_mtx_unlock(&apple_protect_pager_lock); return (memory_object_t) pager; -} +} void apple_protect_pager_trim(void) { - apple_protect_pager_t pager, prev_pager; - queue_head_t trim_queue; - int num_trim; - int count_unmapped; + apple_protect_pager_t pager, prev_pager; + queue_head_t trim_queue; + int num_trim; + int count_unmapped; lck_mtx_lock(&apple_protect_pager_lock); @@ -1395,15 +1394,15 @@ apple_protect_pager_trim(void) num_trim = 0; for (pager = (apple_protect_pager_t) - queue_last(&apple_protect_pager_queue); - !queue_end(&apple_protect_pager_queue, - (queue_entry_t) pager); - pager = prev_pager) { + queue_last(&apple_protect_pager_queue); + !queue_end(&apple_protect_pager_queue, + (queue_entry_t) pager); + pager = prev_pager) { /* get prev elt before we dequeue */ prev_pager = (apple_protect_pager_t) - queue_prev(&pager->pager_queue); + queue_prev(&pager->pager_queue); - if (pager->ref_count == 2 && + if (os_ref_get_count(&pager->ref_count) == 2 && pager->is_ready && !pager->is_mapped) { /* this pager can be trimmed */ @@ -1412,12 +1411,12 @@ apple_protect_pager_trim(void) apple_protect_pager_dequeue(pager); /* ... and add it to our trim queue */ queue_enter_first(&trim_queue, - pager, - apple_protect_pager_t, - pager_queue); + pager, + apple_protect_pager_t, + pager_queue); count_unmapped = (apple_protect_pager_count - - apple_protect_pager_count_mapped); + apple_protect_pager_count_mapped); if (count_unmapped <= apple_protect_pager_cache_limit) { /* we have enough pagers to trim */ break; @@ -1434,18 +1433,18 @@ apple_protect_pager_trim(void) /* terminate the trimmed pagers */ while (!queue_empty(&trim_queue)) { queue_remove_first(&trim_queue, - pager, - apple_protect_pager_t, - pager_queue); + pager, + apple_protect_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; - assert(pager->ref_count == 2); /* * We can't call deallocate_internal() because the pager * has already been dequeued, but we still need to remove * a reference. */ - pager->ref_count--; + os_ref_count_t __assert_only count = os_ref_release_locked(&pager->ref_count); + assert(count == 1); apple_protect_pager_terminate_internal(pager); } } @@ -1458,24 +1457,24 @@ crypt_info_reference( assert(crypt_info->crypt_refcnt != 0); #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: %p ref %d -> %d\n", - __FUNCTION__, - crypt_info, - crypt_info->crypt_refcnt, - crypt_info->crypt_refcnt + 1); + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt, + crypt_info->crypt_refcnt + 1); #endif /* CRYPT_INFO_DEBUG */ OSAddAtomic(+1, &crypt_info->crypt_refcnt); } void crypt_info_deallocate( - struct pager_crypt_info *crypt_info) + struct pager_crypt_info *crypt_info) { #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: %p ref %d -> %d\n", - __FUNCTION__, - crypt_info, - crypt_info->crypt_refcnt, - crypt_info->crypt_refcnt - 1); + __FUNCTION__, + crypt_info, + crypt_info->crypt_refcnt, + crypt_info->crypt_refcnt - 1); #endif /* CRYPT_INFO_DEBUG */ OSAddAtomic(-1, &crypt_info->crypt_refcnt); if (crypt_info->crypt_refcnt == 0) { @@ -1486,10 +1485,10 @@ crypt_info_deallocate( } #if CRYPT_INFO_DEBUG printf("CRYPT_INFO %s: freeing %p\n", - __FUNCTION__, - crypt_info); + __FUNCTION__, + crypt_info); #endif /* CRYPT_INFO_DEBUG */ - kfree(crypt_info, sizeof (*crypt_info)); + kfree(crypt_info, sizeof(*crypt_info)); crypt_info = NULL; } } diff --git a/osfmk/vm/vm_compressor.c b/osfmk/vm/vm_compressor.c index 0f3a37fe1..ee77679da 100644 --- a/osfmk/vm/vm_compressor.c +++ b/osfmk/vm/vm_compressor.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -38,7 +38,7 @@ #include #include #include -#include /* for host_info() */ +#include /* for host_info() */ #include #include #include @@ -55,7 +55,9 @@ extern boolean_t vm_darkwake_mode; #if POPCOUNT_THE_COMPRESSED_DATA boolean_t popcount_c_segs = TRUE; -static inline uint32_t vmc_pop(uintptr_t ins, int sz) { +static inline uint32_t +vmc_pop(uintptr_t ins, int sz) +{ uint32_t rv = 0; if (__probable(popcount_c_segs == FALSE)) { @@ -103,37 +105,39 @@ boolean_t validate_c_segs = TRUE; #if CONFIG_EMBEDDED #if CONFIG_FREEZE -int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT; +int vm_compressor_mode = VM_PAGER_FREEZER_DEFAULT; -void *freezer_chead; /* The chead used to track c_segs allocated for the exclusive use of holding just one task's compressed memory.*/ -char *freezer_compressor_scratch_buf = NULL; +void *freezer_chead; /* The chead used to track c_segs allocated for the exclusive use of holding just one task's compressed memory.*/ +char *freezer_compressor_scratch_buf = NULL; -extern int c_freezer_swapout_page_count; /* This count keeps track of the # of compressed pages holding just one task's compressed memory on the swapout queue. This count is used during each freeze i.e. on a per-task basis.*/ +extern int c_freezer_swapout_page_count; /* This count keeps track of the # of compressed pages holding just one task's compressed memory on the swapout queue. This count is used during each freeze i.e. on a per-task basis.*/ #else /* CONFIG_FREEZE */ -int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED; +int vm_compressor_mode = VM_PAGER_NOT_CONFIGURED; #endif /* CONFIG_FREEZE */ -int vm_scale = 1; +int vm_scale = 1; #else /* CONFIG_EMBEDDED */ -int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP; -int vm_scale = 16; +int vm_compressor_mode = VM_PAGER_COMPRESSOR_WITH_SWAP; +int vm_scale = 16; #endif /* CONFIG_EMBEDDED */ -int vm_compressor_is_active = 0; -int vm_compression_limit = 0; -int vm_compressor_available = 0; +int vm_compressor_is_active = 0; +int vm_compression_limit = 0; +int vm_compressor_available = 0; -extern void vm_pageout_io_throttle(void); +extern void vm_pageout_io_throttle(void); #if CHECKSUM_THE_DATA || CHECKSUM_THE_SWAP || CHECKSUM_THE_COMPRESSED_DATA extern unsigned int hash_string(char *cp, int len); static unsigned int vmc_hash(char *, int); boolean_t checksum_c_segs = TRUE; -unsigned int vmc_hash(char *cp, int len) { +unsigned int +vmc_hash(char *cp, int len) +{ if (__probable(checksum_c_segs == FALSE)) { return 0xDEAD7A37; } @@ -141,180 +145,179 @@ unsigned int vmc_hash(char *cp, int len) { } #endif -#define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size) -#define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size)) +#define UNPACK_C_SIZE(cs) ((cs->c_size == (PAGE_SIZE-1)) ? PAGE_SIZE : cs->c_size) +#define PACK_C_SIZE(cs, size) (cs->c_size = ((size == PAGE_SIZE) ? PAGE_SIZE - 1 : size)) struct c_sv_hash_entry { union { - struct { - uint32_t c_sv_he_ref; - uint32_t c_sv_he_data; + struct { + uint32_t c_sv_he_ref; + uint32_t c_sv_he_data; } c_sv_he; - uint64_t c_sv_he_record; - - } c_sv_he_un; + uint64_t c_sv_he_record; + } c_sv_he_un; }; -#define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref -#define he_data c_sv_he_un.c_sv_he.c_sv_he_data -#define he_record c_sv_he_un.c_sv_he_record +#define he_ref c_sv_he_un.c_sv_he.c_sv_he_ref +#define he_data c_sv_he_un.c_sv_he.c_sv_he_data +#define he_record c_sv_he_un.c_sv_he_record -#define C_SV_HASH_MAX_MISS 32 -#define C_SV_HASH_SIZE ((1 << 10)) -#define C_SV_HASH_MASK ((1 << 10) - 1) -#define C_SV_CSEG_ID ((1 << 22) - 1) +#define C_SV_HASH_MAX_MISS 32 +#define C_SV_HASH_SIZE ((1 << 10)) +#define C_SV_HASH_MASK ((1 << 10) - 1) +#define C_SV_CSEG_ID ((1 << 22) - 1) union c_segu { - c_segment_t c_seg; - uintptr_t c_segno; + c_segment_t c_seg; + uintptr_t c_segno; }; -#define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) >> 2) -#define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) +#define C_SLOT_PACK_PTR(ptr) (((uintptr_t)ptr - (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) >> 2) +#define C_SLOT_UNPACK_PTR(cslot) ((uintptr_t)(cslot->c_packed_ptr << 2) + (uintptr_t) KERNEL_PMAP_HEAP_RANGE_START) -uint32_t c_segment_count = 0; -uint32_t c_segment_count_max = 0; +uint32_t c_segment_count = 0; +uint32_t c_segment_count_max = 0; -uint64_t c_generation_id = 0; -uint64_t c_generation_id_flush_barrier; +uint64_t c_generation_id = 0; +uint64_t c_generation_id_flush_barrier; -#define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120 +#define HIBERNATE_FLUSHING_SECS_TO_COMPLETE 120 -boolean_t hibernate_no_swapspace = FALSE; -clock_sec_t hibernate_flushing_deadline = 0; +boolean_t hibernate_no_swapspace = FALSE; +clock_sec_t hibernate_flushing_deadline = 0; #if RECORD_THE_COMPRESSED_DATA -char *c_compressed_record_sbuf; -char *c_compressed_record_ebuf; -char *c_compressed_record_cptr; +char *c_compressed_record_sbuf; +char *c_compressed_record_ebuf; +char *c_compressed_record_cptr; #endif -queue_head_t c_age_list_head; -queue_head_t c_swappedin_list_head; -queue_head_t c_swapout_list_head; -queue_head_t c_swapio_list_head; -queue_head_t c_swappedout_list_head; -queue_head_t c_swappedout_sparse_list_head; -queue_head_t c_major_list_head; -queue_head_t c_filling_list_head; -queue_head_t c_bad_list_head; - -uint32_t c_age_count = 0; -uint32_t c_swappedin_count = 0; -uint32_t c_swapout_count = 0; -uint32_t c_swapio_count = 0; -uint32_t c_swappedout_count = 0; -uint32_t c_swappedout_sparse_count = 0; -uint32_t c_major_count = 0; -uint32_t c_filling_count = 0; -uint32_t c_empty_count = 0; -uint32_t c_bad_count = 0; - - -queue_head_t c_minor_list_head; -uint32_t c_minor_count = 0; - -int c_overage_swapped_count = 0; -int c_overage_swapped_limit = 0; - -int c_seg_fixed_array_len; -union c_segu *c_segments; -vm_offset_t c_buffers; +queue_head_t c_age_list_head; +queue_head_t c_swappedin_list_head; +queue_head_t c_swapout_list_head; +queue_head_t c_swapio_list_head; +queue_head_t c_swappedout_list_head; +queue_head_t c_swappedout_sparse_list_head; +queue_head_t c_major_list_head; +queue_head_t c_filling_list_head; +queue_head_t c_bad_list_head; + +uint32_t c_age_count = 0; +uint32_t c_swappedin_count = 0; +uint32_t c_swapout_count = 0; +uint32_t c_swapio_count = 0; +uint32_t c_swappedout_count = 0; +uint32_t c_swappedout_sparse_count = 0; +uint32_t c_major_count = 0; +uint32_t c_filling_count = 0; +uint32_t c_empty_count = 0; +uint32_t c_bad_count = 0; + + +queue_head_t c_minor_list_head; +uint32_t c_minor_count = 0; + +int c_overage_swapped_count = 0; +int c_overage_swapped_limit = 0; + +int c_seg_fixed_array_len; +union c_segu *c_segments; +vm_offset_t c_buffers; vm_size_t c_buffers_size; -caddr_t c_segments_next_page; -boolean_t c_segments_busy; -uint32_t c_segments_available; -uint32_t c_segments_limit; -uint32_t c_segments_nearing_limit; +caddr_t c_segments_next_page; +boolean_t c_segments_busy; +uint32_t c_segments_available; +uint32_t c_segments_limit; +uint32_t c_segments_nearing_limit; -uint32_t c_segment_svp_in_hash; -uint32_t c_segment_svp_hash_succeeded; -uint32_t c_segment_svp_hash_failed; -uint32_t c_segment_svp_zero_compressions; -uint32_t c_segment_svp_nonzero_compressions; -uint32_t c_segment_svp_zero_decompressions; -uint32_t c_segment_svp_nonzero_decompressions; +uint32_t c_segment_svp_in_hash; +uint32_t c_segment_svp_hash_succeeded; +uint32_t c_segment_svp_hash_failed; +uint32_t c_segment_svp_zero_compressions; +uint32_t c_segment_svp_nonzero_compressions; +uint32_t c_segment_svp_zero_decompressions; +uint32_t c_segment_svp_nonzero_decompressions; -uint32_t c_segment_noncompressible_pages; +uint32_t c_segment_noncompressible_pages; -uint32_t c_segment_pages_compressed; -uint32_t c_segment_pages_compressed_limit; -uint32_t c_segment_pages_compressed_nearing_limit; -uint32_t c_free_segno_head = (uint32_t)-1; +uint32_t c_segment_pages_compressed; +uint32_t c_segment_pages_compressed_limit; +uint32_t c_segment_pages_compressed_nearing_limit; +uint32_t c_free_segno_head = (uint32_t)-1; -uint32_t vm_compressor_minorcompact_threshold_divisor = 10; -uint32_t vm_compressor_majorcompact_threshold_divisor = 10; -uint32_t vm_compressor_unthrottle_threshold_divisor = 10; -uint32_t vm_compressor_catchup_threshold_divisor = 10; +uint32_t vm_compressor_minorcompact_threshold_divisor = 10; +uint32_t vm_compressor_majorcompact_threshold_divisor = 10; +uint32_t vm_compressor_unthrottle_threshold_divisor = 10; +uint32_t vm_compressor_catchup_threshold_divisor = 10; -uint32_t vm_compressor_minorcompact_threshold_divisor_overridden = 0; -uint32_t vm_compressor_majorcompact_threshold_divisor_overridden = 0; -uint32_t vm_compressor_unthrottle_threshold_divisor_overridden = 0; -uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0; +uint32_t vm_compressor_minorcompact_threshold_divisor_overridden = 0; +uint32_t vm_compressor_majorcompact_threshold_divisor_overridden = 0; +uint32_t vm_compressor_unthrottle_threshold_divisor_overridden = 0; +uint32_t vm_compressor_catchup_threshold_divisor_overridden = 0; -#define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu)) +#define C_SEGMENTS_PER_PAGE (PAGE_SIZE / sizeof(union c_segu)) -lck_grp_attr_t vm_compressor_lck_grp_attr; -lck_attr_t vm_compressor_lck_attr; -lck_grp_t vm_compressor_lck_grp; -lck_mtx_t *c_list_lock; -lck_rw_t c_master_lock; -boolean_t decompressions_blocked = FALSE; +lck_grp_attr_t vm_compressor_lck_grp_attr; +lck_attr_t vm_compressor_lck_attr; +lck_grp_t vm_compressor_lck_grp; +lck_mtx_t *c_list_lock; +lck_rw_t c_master_lock; +boolean_t decompressions_blocked = FALSE; -zone_t compressor_segment_zone; -int c_compressor_swap_trigger = 0; +zone_t compressor_segment_zone; +int c_compressor_swap_trigger = 0; -uint32_t compressor_cpus; -char *compressor_scratch_bufs; -char *kdp_compressor_scratch_buf; -char *kdp_compressor_decompressed_page; -addr64_t kdp_compressor_decompressed_page_paddr; -ppnum_t kdp_compressor_decompressed_page_ppnum; +uint32_t compressor_cpus; +char *compressor_scratch_bufs; +char *kdp_compressor_scratch_buf; +char *kdp_compressor_decompressed_page; +addr64_t kdp_compressor_decompressed_page_paddr; +ppnum_t kdp_compressor_decompressed_page_ppnum; -clock_sec_t start_of_sample_period_sec = 0; -clock_nsec_t start_of_sample_period_nsec = 0; -clock_sec_t start_of_eval_period_sec = 0; -clock_nsec_t start_of_eval_period_nsec = 0; -uint32_t sample_period_decompression_count = 0; -uint32_t sample_period_compression_count = 0; -uint32_t last_eval_decompression_count = 0; -uint32_t last_eval_compression_count = 0; +clock_sec_t start_of_sample_period_sec = 0; +clock_nsec_t start_of_sample_period_nsec = 0; +clock_sec_t start_of_eval_period_sec = 0; +clock_nsec_t start_of_eval_period_nsec = 0; +uint32_t sample_period_decompression_count = 0; +uint32_t sample_period_compression_count = 0; +uint32_t last_eval_decompression_count = 0; +uint32_t last_eval_compression_count = 0; -#define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30) +#define DECOMPRESSION_SAMPLE_MAX_AGE (60 * 30) -boolean_t vm_swapout_ripe_segments = FALSE; -uint32_t vm_ripe_target_age = (60 * 60 * 48); +boolean_t vm_swapout_ripe_segments = FALSE; +uint32_t vm_ripe_target_age = (60 * 60 * 48); -uint32_t swapout_target_age = 0; -uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE]; -uint32_t overage_decompressions_during_sample_period = 0; +uint32_t swapout_target_age = 0; +uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE]; +uint32_t overage_decompressions_during_sample_period = 0; -void do_fastwake_warmup(queue_head_t *, boolean_t); -boolean_t fastwake_warmup = FALSE; -boolean_t fastwake_recording_in_progress = FALSE; -clock_sec_t dont_trim_until_ts = 0; +void do_fastwake_warmup(queue_head_t *, boolean_t); +boolean_t fastwake_warmup = FALSE; +boolean_t fastwake_recording_in_progress = FALSE; +clock_sec_t dont_trim_until_ts = 0; -uint64_t c_segment_warmup_count; -uint64_t first_c_segment_to_warm_generation_id = 0; -uint64_t last_c_segment_to_warm_generation_id = 0; -boolean_t hibernate_flushing = FALSE; +uint64_t c_segment_warmup_count; +uint64_t first_c_segment_to_warm_generation_id = 0; +uint64_t last_c_segment_to_warm_generation_id = 0; +boolean_t hibernate_flushing = FALSE; -int64_t c_segment_input_bytes __attribute__((aligned(8))) = 0; -int64_t c_segment_compressed_bytes __attribute__((aligned(8))) = 0; -int64_t compressor_bytes_used __attribute__((aligned(8))) = 0; +int64_t c_segment_input_bytes __attribute__((aligned(8))) = 0; +int64_t c_segment_compressed_bytes __attribute__((aligned(8))) = 0; +int64_t compressor_bytes_used __attribute__((aligned(8))) = 0; -struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned (8))); +struct c_sv_hash_entry c_segment_sv_hash_table[C_SV_HASH_SIZE] __attribute__ ((aligned(8))); static boolean_t compressor_needs_to_swap(void); static void vm_compressor_swap_trigger_thread(void); @@ -348,31 +351,33 @@ uint64_t vm_compressor_pages_compressed(void); * driven swapping, this will also cause swapouts to * be initiated. */ -static inline boolean_t vm_compressor_needs_to_major_compact() +static inline boolean_t +vm_compressor_needs_to_major_compact() { - uint32_t incore_seg_count; + uint32_t incore_seg_count; incore_seg_count = c_segment_count - c_swappedout_count - c_swappedout_sparse_count; if ((c_segment_count >= (c_segments_nearing_limit / 8)) && ((incore_seg_count * C_SEG_MAX_PAGES) - VM_PAGE_COMPRESSOR_COUNT) > - ((incore_seg_count / 8) * C_SEG_MAX_PAGES)) - return (1); - return (0); + ((incore_seg_count / 8) * C_SEG_MAX_PAGES)) { + return 1; + } + return 0; } uint64_t vm_available_memory(void) { - return (((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64); + return ((uint64_t)AVAILABLE_NON_COMPRESSED_MEMORY) * PAGE_SIZE_64; } uint64_t vm_compressor_pages_compressed(void) { - return (c_segment_pages_compressed * PAGE_SIZE_64); + return c_segment_pages_compressed * PAGE_SIZE_64; } @@ -380,10 +385,11 @@ boolean_t vm_compressor_low_on_space(void) { if ((c_segment_pages_compressed > c_segment_pages_compressed_nearing_limit) || - (c_segment_count > c_segments_nearing_limit)) - return (TRUE); + (c_segment_count > c_segments_nearing_limit)) { + return TRUE; + } - return (FALSE); + return FALSE; } @@ -391,45 +397,46 @@ boolean_t vm_compressor_out_of_space(void) { if ((c_segment_pages_compressed >= c_segment_pages_compressed_limit) || - (c_segment_count >= c_segments_limit)) - return (TRUE); + (c_segment_count >= c_segments_limit)) { + return TRUE; + } - return (FALSE); + return FALSE; } - + int vm_wants_task_throttled(task_t task) { - if (task == kernel_task) - return (0); + if (task == kernel_task) { + return 0; + } if (VM_CONFIG_SWAP_IS_ACTIVE) { if ((vm_compressor_low_on_space() || HARD_THROTTLE_LIMIT_REACHED()) && - (unsigned int)pmap_compressed(task->map->pmap) > (c_segment_pages_compressed / 4)) - return (1); + (unsigned int)pmap_compressed(task->map->pmap) > (c_segment_pages_compressed / 4)) { + return 1; + } } - return (0); + return 0; } #if DEVELOPMENT || DEBUG boolean_t kill_on_no_paging_space = FALSE; /* On compressor/swap exhaustion, kill the largest process regardless of - * its chosen process policy. Controlled by a boot-arg of the same name. */ + * its chosen process policy. Controlled by a boot-arg of the same name. */ #endif /* DEVELOPMENT || DEBUG */ #if !CONFIG_EMBEDDED -static uint32_t no_paging_space_action_in_progress = 0; +static uint32_t no_paging_space_action_in_progress = 0; extern void memorystatus_send_low_swap_note(void); static void vm_compressor_take_paging_space_action(void) { if (no_paging_space_action_in_progress == 0) { - if (OSCompareAndSwap(0, 1, (UInt32 *)&no_paging_space_action_in_progress)) { - if (no_paging_space_action()) { #if DEVELOPMENT || DEBUG if (kill_on_no_paging_space == TRUE) { @@ -469,7 +476,7 @@ vm_decompressor_lock(void) PAGE_REPLACEMENT_ALLOWED(TRUE); decompressions_blocked = TRUE; - + PAGE_REPLACEMENT_ALLOWED(FALSE); } @@ -485,20 +492,22 @@ vm_decompressor_unlock(void) thread_wakeup((event_t)&decompressions_blocked); } -static inline void cslot_copy(c_slot_t cdst, c_slot_t csrc) { +static inline void +cslot_copy(c_slot_t cdst, c_slot_t csrc) +{ #if CHECKSUM_THE_DATA - cdst->c_hash_data = csrc->c_hash_data; + cdst->c_hash_data = csrc->c_hash_data; #endif #if CHECKSUM_THE_COMPRESSED_DATA - cdst->c_hash_compressed_data = csrc->c_hash_compressed_data; + cdst->c_hash_compressed_data = csrc->c_hash_compressed_data; #endif #if POPCOUNT_THE_COMPRESSED_DATA - cdst->c_pop_cdata = csrc->c_pop_cdata; + cdst->c_pop_cdata = csrc->c_pop_cdata; #endif - cdst->c_size = csrc->c_size; - cdst->c_packed_ptr = csrc->c_packed_ptr; + cdst->c_size = csrc->c_size; + cdst->c_packed_ptr = csrc->c_packed_ptr; #if defined(__arm__) || defined(__arm64__) - cdst->c_codec = csrc->c_codec; + cdst->c_codec = csrc->c_codec; #endif } @@ -522,26 +531,26 @@ uint32_t vm_ktrace_enabled; void vm_compressor_init(void) { - thread_t thread; - struct c_slot cs_dummy; + thread_t thread; + struct c_slot cs_dummy; c_slot_t cs = &cs_dummy; - int c_segment_min_size; - int c_segment_padded_size; - int attempts = 1; - kern_return_t retval = KERN_SUCCESS; - vm_offset_t start_addr = 0; + int c_segment_min_size; + int c_segment_padded_size; + int attempts = 1; + kern_return_t retval = KERN_SUCCESS; + vm_offset_t start_addr = 0; vm_size_t c_segments_arr_size = 0, compressor_submap_size = 0; vm_map_kernel_flags_t vmk_flags; #if RECORD_THE_COMPRESSED_DATA - vm_size_t c_compressed_record_sbuf_size = 0; + vm_size_t c_compressed_record_sbuf_size = 0; #endif /* RECORD_THE_COMPRESSED_DATA */ #if DEVELOPMENT || DEBUG char bootarg_name[32]; - if (PE_parse_boot_argn("-kill_on_no_paging_space", bootarg_name, sizeof (bootarg_name))) { + if (PE_parse_boot_argn("-kill_on_no_paging_space", bootarg_name, sizeof(bootarg_name))) { kill_on_no_paging_space = TRUE; } - if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof (bootarg_name))) { + if (PE_parse_boot_argn("-disable_cseg_write_protection", bootarg_name, sizeof(bootarg_name))) { write_protect_c_segs = FALSE; } int vmcval = 1; @@ -570,18 +579,20 @@ vm_compressor_init(void) */ cs->c_packed_ptr = C_SLOT_PACK_PTR(zone_map_min_address); - if (C_SLOT_UNPACK_PTR(cs) != (uintptr_t)zone_map_min_address) + if (C_SLOT_UNPACK_PTR(cs) != (uintptr_t)zone_map_min_address) { panic("C_SLOT_UNPACK_PTR failed on zone_map_min_address - %p", (void *)zone_map_min_address); + } cs->c_packed_ptr = C_SLOT_PACK_PTR(zone_map_max_address); - if (C_SLOT_UNPACK_PTR(cs) != (uintptr_t)zone_map_max_address) + if (C_SLOT_UNPACK_PTR(cs) != (uintptr_t)zone_map_max_address) { panic("C_SLOT_UNPACK_PTR failed on zone_map_max_address - %p", (void *)zone_map_max_address); + } assert((C_SEGMENTS_PER_PAGE * sizeof(union c_segu)) == PAGE_SIZE); - PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit, sizeof (vm_compression_limit)); + PE_parse_boot_argn("vm_compression_limit", &vm_compression_limit, sizeof(vm_compression_limit)); #ifdef CONFIG_EMBEDDED vm_compressor_minorcompact_threshold_divisor = 20; @@ -623,8 +634,9 @@ vm_compressor_init(void) c_free_segno_head = -1; c_segments_available = 0; - if (vm_compression_limit) + if (vm_compression_limit) { compressor_pool_size = (uint64_t)vm_compression_limit * PAGE_SIZE_64; + } compressor_pool_max_size = C_SEG_MAX_LIMIT; compressor_pool_max_size *= C_SEG_BUFSIZE; @@ -632,44 +644,50 @@ vm_compressor_init(void) #if defined(__x86_64__) if (vm_compression_limit == 0) { - - if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) - compressor_pool_size = 16ULL * max_mem; - else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) - compressor_pool_size = 8ULL * max_mem; - else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) - compressor_pool_size = 4ULL * max_mem; - else - compressor_pool_size = 2ULL * max_mem; - } - if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) - compressor_pool_multiplier = 1; - else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) + if (max_mem <= (4ULL * 1024ULL * 1024ULL * 1024ULL)) { + compressor_pool_size = 16ULL * max_mem; + } else if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) { + compressor_pool_size = 8ULL * max_mem; + } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) { + compressor_pool_size = 4ULL * max_mem; + } else { + compressor_pool_size = 2ULL * max_mem; + } + } + if (max_mem <= (8ULL * 1024ULL * 1024ULL * 1024ULL)) { + compressor_pool_multiplier = 1; + } else if (max_mem <= (32ULL * 1024ULL * 1024ULL * 1024ULL)) { compressor_pool_multiplier = 2; - else + } else { compressor_pool_multiplier = 4; + } #elif defined(__arm__) -#define VM_RESERVE_SIZE (1024 * 1024 * 256) -#define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450) +#define VM_RESERVE_SIZE (1024 * 1024 * 256) +#define MAX_COMPRESSOR_POOL_SIZE (1024 * 1024 * 450) - if (compressor_pool_max_size > MAX_COMPRESSOR_POOL_SIZE) + if (compressor_pool_max_size > MAX_COMPRESSOR_POOL_SIZE) { compressor_pool_max_size = MAX_COMPRESSOR_POOL_SIZE; - - if (vm_compression_limit == 0) + } + + if (vm_compression_limit == 0) { compressor_pool_size = ((kernel_map->max_offset - kernel_map->min_offset) - kernel_map->size) - VM_RESERVE_SIZE; + } compressor_pool_multiplier = 1; #else - if (compressor_pool_max_size > max_mem) + if (compressor_pool_max_size > max_mem) { compressor_pool_max_size = max_mem; + } - if (vm_compression_limit == 0) + if (vm_compression_limit == 0) { compressor_pool_size = max_mem; + } compressor_pool_multiplier = 1; #endif - if (compressor_pool_size > compressor_pool_max_size) - compressor_pool_size = compressor_pool_max_size; + if (compressor_pool_size > compressor_pool_max_size) { + compressor_pool_size = compressor_pool_max_size; + } try_again: c_segments_limit = (uint32_t)(compressor_pool_size / (vm_size_t)(C_SEG_ALLOCSIZE)); @@ -677,8 +695,9 @@ try_again: c_segment_pages_compressed_limit = (c_segments_limit * (C_SEG_BUFSIZE / PAGE_SIZE) * compressor_pool_multiplier); - if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) + if (c_segment_pages_compressed_limit < (uint32_t)(max_mem / PAGE_SIZE)) { c_segment_pages_compressed_limit = (uint32_t)(max_mem / PAGE_SIZE); + } c_segment_pages_compressed_nearing_limit = (uint32_t)(((uint64_t)c_segment_pages_compressed_limit * 98ULL) / 100ULL); @@ -694,41 +713,46 @@ try_again: compressor_submap_size = c_segments_arr_size + c_buffers_size + C_SEG_BUFSIZE; #if RECORD_THE_COMPRESSED_DATA - c_compressed_record_sbuf_size = (vm_size_t)C_SEG_ALLOCSIZE + (PAGE_SIZE * 2); + c_compressed_record_sbuf_size = (vm_size_t)C_SEG_ALLOCSIZE + (PAGE_SIZE * 2); compressor_submap_size += c_compressed_record_sbuf_size; #endif /* RECORD_THE_COMPRESSED_DATA */ vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_permanent = TRUE; retval = kmem_suballoc(kernel_map, &start_addr, compressor_submap_size, - FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_COMPRESSOR, - &compressor_map); + FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_COMPRESSOR, + &compressor_map); if (retval != KERN_SUCCESS) { - if (++attempts > 3) - panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size); + if (++attempts > 3) { + panic("vm_compressor_init: kmem_suballoc failed - 0x%llx", (uint64_t)compressor_submap_size); + } compressor_pool_size = compressor_pool_size / 2; kprintf("retrying creation of the compressor submap at 0x%llx bytes\n", compressor_pool_size); goto try_again; } - if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments), (sizeof(union c_segu) * c_segments_limit), 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) + if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&c_segments), (sizeof(union c_segu) * c_segments_limit), 0, KMA_KOBJECT | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { panic("vm_compressor_init: kernel_memory_allocate failed - c_segments\n"); - if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0, KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) + } + if (kernel_memory_allocate(compressor_map, &c_buffers, c_buffers_size, 0, KMA_COMPRESSOR | KMA_VAONLY | KMA_PERMANENT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { panic("vm_compressor_init: kernel_memory_allocate failed - c_buffers\n"); + } c_segment_min_size = sizeof(struct c_segment) + (C_SEG_SLOT_VAR_ARRAY_MIN_LEN * sizeof(struct c_slot)); - - for (c_segment_padded_size = 128; c_segment_padded_size < c_segment_min_size; c_segment_padded_size = c_segment_padded_size << 1); + + for (c_segment_padded_size = 128; c_segment_padded_size < c_segment_min_size; c_segment_padded_size = c_segment_padded_size << 1) { + ; + } compressor_segment_zone = zinit(c_segment_padded_size, c_segments_limit * c_segment_padded_size, PAGE_SIZE, "compressor_segment"); zone_change(compressor_segment_zone, Z_CALLERACCT, FALSE); zone_change(compressor_segment_zone, Z_NOENCRYPT, TRUE); c_seg_fixed_array_len = (c_segment_padded_size - sizeof(struct c_segment)) / sizeof(struct c_slot); - + c_segments_busy = FALSE; c_segments_next_page = (caddr_t)c_segments; @@ -765,15 +789,16 @@ try_again: #endif #if RECORD_THE_COMPRESSED_DATA - if (kernel_memory_allocate(compressor_map, (vm_offset_t *)&c_compressed_record_sbuf, c_compressed_record_sbuf_size, 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) + if (kernel_memory_allocate(compressor_map, (vm_offset_t *)&c_compressed_record_sbuf, c_compressed_record_sbuf_size, 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { panic("vm_compressor_init: kernel_memory_allocate failed - c_compressed_record_sbuf\n"); + } c_compressed_record_cptr = c_compressed_record_sbuf; c_compressed_record_ebuf = c_compressed_record_sbuf + c_compressed_record_sbuf_size; #endif if (kernel_thread_start_priority((thread_continue_t)vm_compressor_swap_trigger_thread, NULL, - BASEPRI_VM, &thread) != KERN_SUCCESS) { + BASEPRI_VM, &thread) != KERN_SUCCESS) { panic("vm_compressor_swap_trigger_thread: create failed"); } thread_deallocate(thread); @@ -781,11 +806,13 @@ try_again: if (vm_pageout_internal_start() != KERN_SUCCESS) { panic("vm_compressor_init: Failed to start the internal pageout thread.\n"); } - if (VM_CONFIG_SWAP_IS_PRESENT) + if (VM_CONFIG_SWAP_IS_PRESENT) { vm_compressor_swap_init(); + } - if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) + if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) { vm_compressor_is_active = 1; + } #if CONFIG_FREEZE memorystatus_freeze_enabled = TRUE; @@ -802,11 +829,11 @@ try_again: static void c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact) { - int c_indx; - int32_t bytes_used; - uint32_t c_rounded_size; - uint32_t c_size; - c_slot_t cs; + int c_indx; + int32_t bytes_used; + uint32_t c_rounded_size; + uint32_t c_size; + c_slot_t cs; if (__probable(validate_c_segs == FALSE)) { return; @@ -815,16 +842,17 @@ c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact) c_indx = c_seg->c_firstemptyslot; cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx); - if (cs == NULL) + if (cs == NULL) { panic("c_seg_validate: no slot backing c_firstemptyslot"); - - if (cs->c_size) + } + + if (cs->c_size) { panic("c_seg_validate: c_firstemptyslot has non-zero size (%d)\n", cs->c_size); + } } bytes_used = 0; for (c_indx = 0; c_indx < c_seg->c_nextslot; c_indx++) { - cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx); c_size = UNPACK_C_SIZE(cs); @@ -849,20 +877,22 @@ c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact) } } #endif - } - if (bytes_used != c_seg->c_bytes_used) + if (bytes_used != c_seg->c_bytes_used) { panic("c_seg_validate: bytes_used mismatch - found %d, segment has %d\n", bytes_used, c_seg->c_bytes_used); + } - if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) + if (c_seg->c_bytes_used > C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) { panic("c_seg_validate: c_bytes_used > c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n", - (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used); + (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used); + } if (must_be_compact) { - if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) + if (c_seg->c_bytes_used != C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset)) { panic("c_seg_validate: c_bytes_used doesn't match c_nextoffset - c_nextoffset = %d, c_bytes_used = %d\n", - (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used); + (int32_t)C_SEG_OFFSET_TO_BYTES((int32_t)c_seg->c_nextoffset), c_seg->c_bytes_used); + } } } @@ -872,12 +902,12 @@ c_seg_validate(c_segment_t c_seg, boolean_t must_be_compact) void c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held) { - boolean_t clear_busy = FALSE; + boolean_t clear_busy = FALSE; if (c_list_lock_held == FALSE) { - if ( !lck_mtx_try_lock_spin_always(c_list_lock)) { + if (!lck_mtx_try_lock_spin_always(c_list_lock)) { C_SEG_BUSY(c_seg); - + lck_mtx_unlock_always(&c_seg->c_lock); lck_mtx_lock_spin_always(c_list_lock); lck_mtx_lock_spin_always(&c_seg->c_lock); @@ -892,11 +922,13 @@ c_seg_need_delayed_compaction(c_segment_t c_seg, boolean_t c_list_lock_held) c_seg->c_on_minorcompact_q = 1; c_minor_count++; } - if (c_list_lock_held == FALSE) + if (c_list_lock_held == FALSE) { lck_mtx_unlock_always(c_list_lock); - - if (clear_busy == TRUE) + } + + if (clear_busy == TRUE) { C_SEG_WAKEUP_DONE(c_seg); + } } @@ -905,15 +937,15 @@ unsigned int c_seg_moved_to_sparse_list = 0; void c_seg_move_to_sparse_list(c_segment_t c_seg) { - boolean_t clear_busy = FALSE; + boolean_t clear_busy = FALSE; - if ( !lck_mtx_try_lock_spin_always(c_list_lock)) { + if (!lck_mtx_try_lock_spin_always(c_list_lock)) { C_SEG_BUSY(c_seg); lck_mtx_unlock_always(&c_seg->c_lock); lck_mtx_lock_spin_always(c_list_lock); lck_mtx_lock_spin_always(&c_seg->c_lock); - + clear_busy = TRUE; } c_seg_switch_state(c_seg, C_ON_SWAPPEDOUTSPARSE_Q, FALSE); @@ -922,8 +954,9 @@ c_seg_move_to_sparse_list(c_segment_t c_seg) lck_mtx_unlock_always(c_list_lock); - if (clear_busy == TRUE) + if (clear_busy == TRUE) { C_SEG_WAKEUP_DONE(c_seg); + } } @@ -938,13 +971,12 @@ c_seg_insert_into_q(queue_head_t *qhead, c_segment_t c_seg) c_seg_next = (c_segment_t)queue_first(qhead); while (TRUE) { - if (c_seg->c_generation_id < c_seg_next->c_generation_id) { queue_insert_before(qhead, c_seg, c_seg_next, c_segment_t, c_age_list); break; } c_seg_next = (c_segment_t) queue_next(&c_seg_next->c_age_list); - + if (queue_end(qhead, (queue_entry_t) c_seg_next)) { queue_enter(qhead, c_seg, c_segment_t, c_age_list); break; @@ -960,7 +992,6 @@ int try_minor_compaction_succeeded = 0; void c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg) { - assert(c_seg->c_on_minorcompact_q); /* * c_seg is currently on the delayed minor compaction @@ -969,7 +1000,7 @@ c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg) * because the lock order is c_list_lock then c_seg's lock) * we'll pull it from the delayed list and free it directly */ - if ( !lck_mtx_try_lock_spin_always(c_list_lock)) { + if (!lck_mtx_try_lock_spin_always(c_list_lock)) { /* * c_list_lock is held, we need to bail */ @@ -988,7 +1019,7 @@ c_seg_try_minor_compaction_and_unlock(c_segment_t c_seg) int c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, boolean_t need_list_lock, boolean_t disallow_page_replacement) { - int c_seg_freed; + int c_seg_freed; assert(c_seg->c_busy); assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)); @@ -1003,21 +1034,21 @@ c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, bo * we will eventually run into the c_segments_limit */ if (c_seg->c_state == C_ON_MAJORCOMPACT_Q && C_SEG_SHOULD_MAJORCOMPACT_NOW(c_seg)) { - c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE); } if (!c_seg->c_on_minorcompact_q) { - if (clear_busy == TRUE) + if (clear_busy == TRUE) { C_SEG_WAKEUP_DONE(c_seg); + } lck_mtx_unlock_always(&c_seg->c_lock); - return (0); + return 0; } queue_remove(&c_minor_list_head, c_seg, c_segment_t, c_list); c_seg->c_on_minorcompact_q = 0; c_minor_count--; - + lck_mtx_unlock_always(c_list_lock); if (disallow_page_replacement == TRUE) { @@ -1029,13 +1060,15 @@ c_seg_do_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy, bo } c_seg_freed = c_seg_minor_compaction_and_unlock(c_seg, clear_busy); - if (disallow_page_replacement == TRUE) + if (disallow_page_replacement == TRUE) { PAGE_REPLACEMENT_DISALLOWED(FALSE); + } - if (need_list_lock == TRUE) + if (need_list_lock == TRUE) { lck_mtx_lock_spin_always(c_list_lock); + } - return (c_seg_freed); + return c_seg_freed; } @@ -1053,204 +1086,211 @@ c_seg_wait_on_busy(c_segment_t c_seg) void c_seg_switch_state(c_segment_t c_seg, int new_state, boolean_t insert_head) { - int old_state = c_seg->c_state; + int old_state = c_seg->c_state; #if __i386__ || __x86_64__ - if (new_state != C_IS_FILLING) + if (new_state != C_IS_FILLING) { LCK_MTX_ASSERT(&c_seg->c_lock, LCK_MTX_ASSERT_OWNED); + } LCK_MTX_ASSERT(c_list_lock, LCK_MTX_ASSERT_OWNED); #endif switch (old_state) { + case C_IS_EMPTY: + assert(new_state == C_IS_FILLING || new_state == C_IS_FREE); - case C_IS_EMPTY: - assert(new_state == C_IS_FILLING || new_state == C_IS_FREE); + c_empty_count--; + break; - c_empty_count--; - break; + case C_IS_FILLING: + assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q); - case C_IS_FILLING: - assert(new_state == C_ON_AGE_Q || new_state == C_ON_SWAPOUT_Q); + queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list); + c_filling_count--; + break; - queue_remove(&c_filling_list_head, c_seg, c_segment_t, c_age_list); - c_filling_count--; - break; + case C_ON_AGE_Q: + assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q || + new_state == C_IS_FREE); - case C_ON_AGE_Q: - assert(new_state == C_ON_SWAPOUT_Q || new_state == C_ON_MAJORCOMPACT_Q || - new_state == C_IS_FREE); + queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list); + c_age_count--; + break; - queue_remove(&c_age_list_head, c_seg, c_segment_t, c_age_list); - c_age_count--; - break; - - case C_ON_SWAPPEDIN_Q: - assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE); + case C_ON_SWAPPEDIN_Q: + assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE); - queue_remove(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list); - c_swappedin_count--; - break; + queue_remove(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list); + c_swappedin_count--; + break; - case C_ON_SWAPOUT_Q: - assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q); + case C_ON_SWAPOUT_Q: + assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE || new_state == C_IS_EMPTY || new_state == C_ON_SWAPIO_Q); - queue_remove(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); - thread_wakeup((event_t)&compaction_swapper_running); - c_swapout_count--; - break; + queue_remove(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); + thread_wakeup((event_t)&compaction_swapper_running); + c_swapout_count--; + break; - case C_ON_SWAPIO_Q: - assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q); + case C_ON_SWAPIO_Q: + assert(new_state == C_ON_SWAPPEDOUT_Q || new_state == C_ON_SWAPPEDOUTSPARSE_Q || new_state == C_ON_AGE_Q); - queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list); - c_swapio_count--; - break; + queue_remove(&c_swapio_list_head, c_seg, c_segment_t, c_age_list); + c_swapio_count--; + break; - case C_ON_SWAPPEDOUT_Q: - assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q || - new_state == C_ON_SWAPPEDOUTSPARSE_Q || - new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE); + case C_ON_SWAPPEDOUT_Q: + assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q || + new_state == C_ON_SWAPPEDOUTSPARSE_Q || + new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE); - queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list); - c_swappedout_count--; - break; + queue_remove(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list); + c_swappedout_count--; + break; - case C_ON_SWAPPEDOUTSPARSE_Q: - assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q || - new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE); + case C_ON_SWAPPEDOUTSPARSE_Q: + assert(new_state == C_ON_SWAPPEDIN_Q || new_state == C_ON_AGE_Q || + new_state == C_ON_BAD_Q || new_state == C_IS_EMPTY || new_state == C_IS_FREE); - queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list); - c_swappedout_sparse_count--; - break; + queue_remove(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list); + c_swappedout_sparse_count--; + break; - case C_ON_MAJORCOMPACT_Q: - assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE); + case C_ON_MAJORCOMPACT_Q: + assert(new_state == C_ON_AGE_Q || new_state == C_IS_FREE); - queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list); - c_major_count--; - break; + queue_remove(&c_major_list_head, c_seg, c_segment_t, c_age_list); + c_major_count--; + break; - case C_ON_BAD_Q: - assert(new_state == C_IS_FREE); + case C_ON_BAD_Q: + assert(new_state == C_IS_FREE); - queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list); - c_bad_count--; - break; + queue_remove(&c_bad_list_head, c_seg, c_segment_t, c_age_list); + c_bad_count--; + break; - default: - panic("c_seg %p has bad c_state = %d\n", c_seg, old_state); + default: + panic("c_seg %p has bad c_state = %d\n", c_seg, old_state); } - switch(new_state) { - case C_IS_FREE: - assert(old_state != C_IS_FILLING); + switch (new_state) { + case C_IS_FREE: + assert(old_state != C_IS_FILLING); - break; + break; - case C_IS_EMPTY: - assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); + case C_IS_EMPTY: + assert(old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); - c_empty_count++; - break; + c_empty_count++; + break; - case C_IS_FILLING: - assert(old_state == C_IS_EMPTY); + case C_IS_FILLING: + assert(old_state == C_IS_EMPTY); - queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list); - c_filling_count++; - break; + queue_enter(&c_filling_list_head, c_seg, c_segment_t, c_age_list); + c_filling_count++; + break; - case C_ON_AGE_Q: - assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q || - old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q || - old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); - - if (old_state == C_IS_FILLING) - queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list); - else { - if (!queue_empty(&c_age_list_head)) { - c_segment_t c_first; - - c_first = (c_segment_t)queue_first(&c_age_list_head); - c_seg->c_creation_ts = c_first->c_creation_ts; - } - queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list); + case C_ON_AGE_Q: + assert(old_state == C_IS_FILLING || old_state == C_ON_SWAPPEDIN_Q || + old_state == C_ON_SWAPOUT_Q || old_state == C_ON_SWAPIO_Q || + old_state == C_ON_MAJORCOMPACT_Q || old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); + + if (old_state == C_IS_FILLING) { + queue_enter(&c_age_list_head, c_seg, c_segment_t, c_age_list); + } else { + if (!queue_empty(&c_age_list_head)) { + c_segment_t c_first; + + c_first = (c_segment_t)queue_first(&c_age_list_head); + c_seg->c_creation_ts = c_first->c_creation_ts; } - c_age_count++; - break; - - case C_ON_SWAPPEDIN_Q: - assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); - - if (insert_head == TRUE) - queue_enter_first(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list); - c_swappedin_count++; - break; + queue_enter_first(&c_age_list_head, c_seg, c_segment_t, c_age_list); + } + c_age_count++; + break; - case C_ON_SWAPOUT_Q: - assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING); + case C_ON_SWAPPEDIN_Q: + assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); - if (insert_head == TRUE) - queue_enter_first(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); - c_swapout_count++; - break; + if (insert_head == TRUE) { + queue_enter_first(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_swappedin_list_head, c_seg, c_segment_t, c_age_list); + } + c_swappedin_count++; + break; - case C_ON_SWAPIO_Q: - assert(old_state == C_ON_SWAPOUT_Q); + case C_ON_SWAPOUT_Q: + assert(old_state == C_ON_AGE_Q || old_state == C_IS_FILLING); - if (insert_head == TRUE) - queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list); - c_swapio_count++; - break; + if (insert_head == TRUE) { + queue_enter_first(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_swapout_list_head, c_seg, c_segment_t, c_age_list); + } + c_swapout_count++; + break; - case C_ON_SWAPPEDOUT_Q: - assert(old_state == C_ON_SWAPIO_Q); + case C_ON_SWAPIO_Q: + assert(old_state == C_ON_SWAPOUT_Q); - if (insert_head == TRUE) - queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list); - c_swappedout_count++; - break; + if (insert_head == TRUE) { + queue_enter_first(&c_swapio_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_swapio_list_head, c_seg, c_segment_t, c_age_list); + } + c_swapio_count++; + break; - case C_ON_SWAPPEDOUTSPARSE_Q: - assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q); - - if (insert_head == TRUE) - queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list); + case C_ON_SWAPPEDOUT_Q: + assert(old_state == C_ON_SWAPIO_Q); - c_swappedout_sparse_count++; - break; + if (insert_head == TRUE) { + queue_enter_first(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_swappedout_list_head, c_seg, c_segment_t, c_age_list); + } + c_swappedout_count++; + break; - case C_ON_MAJORCOMPACT_Q: - assert(old_state == C_ON_AGE_Q); + case C_ON_SWAPPEDOUTSPARSE_Q: + assert(old_state == C_ON_SWAPIO_Q || old_state == C_ON_SWAPPEDOUT_Q); - if (insert_head == TRUE) - queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list); - c_major_count++; - break; + if (insert_head == TRUE) { + queue_enter_first(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_swappedout_sparse_list_head, c_seg, c_segment_t, c_age_list); + } - case C_ON_BAD_Q: - assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); + c_swappedout_sparse_count++; + break; - if (insert_head == TRUE) - queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list); - else - queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list); - c_bad_count++; - break; + case C_ON_MAJORCOMPACT_Q: + assert(old_state == C_ON_AGE_Q); + + if (insert_head == TRUE) { + queue_enter_first(&c_major_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_major_list_head, c_seg, c_segment_t, c_age_list); + } + c_major_count++; + break; - default: - panic("c_seg %p requesting bad c_state = %d\n", c_seg, new_state); + case C_ON_BAD_Q: + assert(old_state == C_ON_SWAPPEDOUT_Q || old_state == C_ON_SWAPPEDOUTSPARSE_Q); + + if (insert_head == TRUE) { + queue_enter_first(&c_bad_list_head, c_seg, c_segment_t, c_age_list); + } else { + queue_enter(&c_bad_list_head, c_seg, c_segment_t, c_age_list); + } + c_bad_count++; + break; + + default: + panic("c_seg %p requesting bad c_state = %d\n", c_seg, new_state); } c_seg->c_state = new_state; } @@ -1273,10 +1313,10 @@ c_seg_free(c_segment_t c_seg) void c_seg_free_locked(c_segment_t c_seg) { - int segno; - int pages_populated = 0; - int32_t *c_buffer = NULL; - uint64_t c_swap_handle = 0; + int segno; + int pages_populated = 0; + int32_t *c_buffer = NULL; + uint64_t c_swap_handle = 0; assert(c_seg->c_busy); assert(c_seg->c_slots_used == 0); @@ -1286,11 +1326,12 @@ c_seg_free_locked(c_segment_t c_seg) if (c_seg->c_overage_swap == TRUE) { c_overage_swapped_count--; c_seg->c_overage_swap = FALSE; - } - if ( !(C_SEG_IS_ONDISK(c_seg))) + } + if (!(C_SEG_IS_ONDISK(c_seg))) { c_buffer = c_seg->c_store.c_buffer; - else + } else { c_swap_handle = c_seg->c_store.c_swap_handle; + } c_seg_switch_state(c_seg, C_IS_FREE, FALSE); @@ -1299,18 +1340,19 @@ c_seg_free_locked(c_segment_t c_seg) if (c_buffer) { pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE; c_seg->c_store.c_buffer = NULL; - } else + } else { c_seg->c_store.c_swap_handle = (uint64_t)-1; + } lck_mtx_unlock_always(&c_seg->c_lock); if (c_buffer) { - if (pages_populated) + if (pages_populated) { kernel_memory_depopulate(compressor_map, (vm_offset_t) c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR); - + } } else if (c_swap_handle) { - /* - * Free swap space on disk. + /* + * Free swap space on disk. */ vm_swap_free(c_swap_handle); } @@ -1328,8 +1370,8 @@ c_seg_free_locked(c_segment_t c_seg) /* * because the c_buffer is now associated with the segno, * we can't put the segno back on the free list until - * after we have depopulated the c_buffer range, or - * we run the risk of depopulating a range that is + * after we have depopulated the c_buffer range, or + * we run the risk of depopulating a range that is * now being used in one of the compressor heads */ c_segments[segno].c_segno = c_free_segno_head; @@ -1340,8 +1382,9 @@ c_seg_free_locked(c_segment_t c_seg) lck_mtx_destroy(&c_seg->c_lock, &vm_compressor_lck_grp); - if (c_seg->c_slot_var_array_len) + if (c_seg->c_slot_var_array_len) { kfree(c_seg->c_slot_var_array, sizeof(struct c_slot) * c_seg->c_slot_var_array_len); + } zfree(compressor_segment_zone, c_seg); } @@ -1353,20 +1396,20 @@ int c_seg_trim_page_count = 0; void c_seg_trim_tail(c_segment_t c_seg) { - c_slot_t cs; - uint32_t c_size; - uint32_t c_offset; - uint32_t c_rounded_size; - uint16_t current_nextslot; - uint32_t current_populated_offset; - - if (c_seg->c_bytes_used == 0) + c_slot_t cs; + uint32_t c_size; + uint32_t c_offset; + uint32_t c_rounded_size; + uint16_t current_nextslot; + uint32_t current_populated_offset; + + if (c_seg->c_bytes_used == 0) { return; + } current_nextslot = c_seg->c_nextslot; current_populated_offset = c_seg->c_populated_offset; - - while (c_seg->c_nextslot) { + while (c_seg->c_nextslot) { cs = C_SEG_SLOT_FROM_INDEX(c_seg, (c_seg->c_nextslot - 1)); c_size = UNPACK_C_SIZE(cs); @@ -1378,18 +1421,19 @@ c_seg_trim_tail(c_segment_t c_seg) c_seg->c_nextoffset = c_offset; c_seg->c_populated_offset = (c_offset + (C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1)) & - ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1); + ~(C_SEG_BYTES_TO_OFFSET(PAGE_SIZE) - 1); - if (c_seg->c_firstemptyslot > c_seg->c_nextslot) + if (c_seg->c_firstemptyslot > c_seg->c_nextslot) { c_seg->c_firstemptyslot = c_seg->c_nextslot; + } #if DEVELOPMENT || DEBUG c_seg_trim_page_count += ((round_page_32(C_SEG_OFFSET_TO_BYTES(current_populated_offset)) - - round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / - PAGE_SIZE); + round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / + PAGE_SIZE); #endif } break; - } + } c_seg->c_nextslot--; } assert(c_seg->c_nextslot); @@ -1400,14 +1444,14 @@ int c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) { c_slot_mapping_t slot_ptr; - uint32_t c_offset = 0; - uint32_t old_populated_offset; - uint32_t c_rounded_size; - uint32_t c_size; - int c_indx = 0; - int i; - c_slot_t c_dst; - c_slot_t c_src; + uint32_t c_offset = 0; + uint32_t old_populated_offset; + uint32_t c_rounded_size; + uint32_t c_size; + int c_indx = 0; + int i; + c_slot_t c_dst; + c_slot_t c_src; assert(c_seg->c_busy); @@ -1416,12 +1460,13 @@ c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) #endif if (c_seg->c_bytes_used == 0) { c_seg_free(c_seg); - return (1); + return 1; } lck_mtx_unlock_always(&c_seg->c_lock); - if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) + if (c_seg->c_firstemptyslot >= c_seg->c_nextslot || C_SEG_UNUSED_BYTES(c_seg) < PAGE_SIZE) { goto done; + } /* TODO: assert first emptyslot's c_size is actually 0 */ @@ -1434,18 +1479,18 @@ c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) #endif c_indx = c_seg->c_firstemptyslot; c_dst = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx); - + old_populated_offset = c_seg->c_populated_offset; c_offset = c_dst->c_offset; for (i = c_indx + 1; i < c_seg->c_nextslot && c_offset < c_seg->c_nextoffset; i++) { - c_src = C_SEG_SLOT_FROM_INDEX(c_seg, i); c_size = UNPACK_C_SIZE(c_src); - if (c_size == 0) + if (c_size == 0) { continue; + } c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK; /* N.B.: This memcpy may be an overlapping copy */ @@ -1473,8 +1518,8 @@ c_seg_minor_compaction_and_unlock(c_segment_t c_seg, boolean_t clear_busy) c_seg_validate(c_seg, TRUE); #endif if (old_populated_offset > c_seg->c_populated_offset) { - uint32_t gc_size; - int32_t *gc_ptr; + uint32_t gc_size; + int32_t *gc_ptr; gc_size = C_SEG_OFFSET_TO_BYTES(old_populated_offset - c_seg->c_populated_offset); gc_ptr = &c_seg->c_store.c_buffer[c_seg->c_populated_offset]; @@ -1492,45 +1537,48 @@ done: C_SEG_WAKEUP_DONE(c_seg); lck_mtx_unlock_always(&c_seg->c_lock); } - return (0); + return 0; } static void c_seg_alloc_nextslot(c_segment_t c_seg) { - struct c_slot *old_slot_array = NULL; - struct c_slot *new_slot_array = NULL; - int newlen; - int oldlen; + struct c_slot *old_slot_array = NULL; + struct c_slot *new_slot_array = NULL; + int newlen; + int oldlen; - if (c_seg->c_nextslot < c_seg_fixed_array_len) + if (c_seg->c_nextslot < c_seg_fixed_array_len) { return; + } if ((c_seg->c_nextslot - c_seg_fixed_array_len) >= c_seg->c_slot_var_array_len) { - oldlen = c_seg->c_slot_var_array_len; old_slot_array = c_seg->c_slot_var_array; - if (oldlen == 0) + if (oldlen == 0) { newlen = C_SEG_SLOT_VAR_ARRAY_MIN_LEN; - else + } else { newlen = oldlen * 2; + } new_slot_array = (struct c_slot *)kalloc(sizeof(struct c_slot) * newlen); lck_mtx_lock_spin_always(&c_seg->c_lock); - if (old_slot_array) + if (old_slot_array) { memcpy((char *)new_slot_array, (char *)old_slot_array, sizeof(struct c_slot) * oldlen); + } c_seg->c_slot_var_array_len = newlen; c_seg->c_slot_var_array = new_slot_array; lck_mtx_unlock_always(&c_seg->c_lock); - - if (old_slot_array) + + if (old_slot_array) { kfree(old_slot_array, sizeof(struct c_slot) * oldlen); + } } } @@ -1547,7 +1595,7 @@ struct { } c_seg_major_compact_stats; -#define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 90) / 100) +#define C_MAJOR_COMPACTION_SIZE_APPROPRIATE ((C_SEG_BUFSIZE * 90) / 100) boolean_t @@ -1555,21 +1603,21 @@ c_seg_major_compact_ok( c_segment_t c_seg_dst, c_segment_t c_seg_src) { - c_seg_major_compact_stats.asked_permission++; if (c_seg_src->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE && - c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) - return (FALSE); + c_seg_dst->c_bytes_used >= C_MAJOR_COMPACTION_SIZE_APPROPRIATE) { + return FALSE; + } if (c_seg_dst->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg_dst->c_nextslot >= C_SLOT_MAX_INDEX) { /* * destination segment is full... can't compact */ - return (FALSE); + return FALSE; } - return (TRUE); + return TRUE; } @@ -1579,14 +1627,14 @@ c_seg_major_compact( c_segment_t c_seg_src) { c_slot_mapping_t slot_ptr; - uint32_t c_rounded_size; - uint32_t c_size; - uint16_t dst_slot; - int i; - c_slot_t c_dst; - c_slot_t c_src; - boolean_t keep_compacting = TRUE; - + uint32_t c_rounded_size; + uint32_t c_size; + uint16_t dst_slot; + int i; + c_slot_t c_dst; + c_slot_t c_src; + boolean_t keep_compacting = TRUE; + /* * segments are not locked but they are both marked c_busy * which keeps c_decompress from working on them... @@ -1607,7 +1655,6 @@ c_seg_major_compact( dst_slot = c_seg_dst->c_nextslot; for (i = 0; i < c_seg_src->c_nextslot; i++) { - c_src = C_SEG_SLOT_FROM_INDEX(c_seg_src, i); c_size = UNPACK_C_SIZE(c_src); @@ -1618,24 +1665,25 @@ c_seg_major_compact( } if (C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset - c_seg_dst->c_nextoffset) < (unsigned) c_size) { - int size_to_populate; + int size_to_populate; /* doesn't fit */ size_to_populate = C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset); - if (size_to_populate == 0) { + if (size_to_populate == 0) { /* can't fit */ keep_compacting = FALSE; break; } - if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) + if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) { size_to_populate = C_SEG_MAX_POPULATE_SIZE; + } kernel_memory_populate(compressor_map, - (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset], - size_to_populate, - KMA_COMPRESSOR, - VM_KERN_MEMORY_COMPRESSOR); + (vm_offset_t) &c_seg_dst->c_store.c_buffer[c_seg_dst->c_populated_offset], + size_to_populate, + KMA_COMPRESSOR, + VM_KERN_MEMORY_COMPRESSOR); c_seg_dst->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate); assert(C_SEG_OFFSET_TO_BYTES(c_seg_dst->c_populated_offset) <= C_SEG_BUFSIZE); @@ -1654,8 +1702,9 @@ c_seg_major_compact( cslot_copy(c_dst, c_src); c_dst->c_offset = c_seg_dst->c_nextoffset; - if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) + if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) { c_seg_dst->c_firstemptyslot++; + } c_seg_dst->c_slots_used++; c_seg_dst->c_nextslot++; c_seg_dst->c_bytes_used += c_rounded_size; @@ -1680,19 +1729,17 @@ c_seg_major_compact( C_SEG_WRITE_PROTECT(c_seg_dst); #endif if (dst_slot < c_seg_dst->c_nextslot) { - PAGE_REPLACEMENT_ALLOWED(TRUE); /* - * we've now locked out c_decompress from + * we've now locked out c_decompress from * converting the slot passed into it into - * a c_segment_t which allows us to use + * a c_segment_t which allows us to use * the backptr to change which c_segment and * index the slot points to */ while (dst_slot < c_seg_dst->c_nextslot) { - c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot); - + slot_ptr = (c_slot_mapping_t)C_SLOT_UNPACK_PTR(c_dst); /* would mean "empty slot", so use csegno+1 */ slot_ptr->s_cseg = c_seg_dst->c_mysegno + 1; @@ -1700,20 +1747,20 @@ c_seg_major_compact( } PAGE_REPLACEMENT_ALLOWED(FALSE); } - return (keep_compacting); + return keep_compacting; } uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t end_sec, clock_nsec_t end_nsec, clock_sec_t start_sec, clock_nsec_t start_nsec) { - uint64_t end_msecs; - uint64_t start_msecs; - + uint64_t end_msecs; + uint64_t start_msecs; + end_msecs = (end_sec * 1000) + end_nsec / 1000000; start_msecs = (start_sec * 1000) + start_nsec / 1000000; - return (end_msecs - start_msecs); + return end_msecs - start_msecs; } @@ -1725,18 +1772,18 @@ uint32_t compressor_thrashing_threshold_per_10msecs = 50; uint32_t compressor_thrashing_min_per_10msecs = 20; /* When true, reset sample data next chance we get. */ -static boolean_t compressor_need_sample_reset = FALSE; +static boolean_t compressor_need_sample_reset = FALSE; void compute_swapout_target_age(void) { - clock_sec_t cur_ts_sec; - clock_nsec_t cur_ts_nsec; - uint32_t min_operations_needed_in_this_sample; - uint64_t elapsed_msecs_in_eval; - uint64_t elapsed_msecs_in_sample; - boolean_t need_eval_reset = FALSE; + clock_sec_t cur_ts_sec; + clock_nsec_t cur_ts_nsec; + uint32_t min_operations_needed_in_this_sample; + uint64_t elapsed_msecs_in_eval; + uint64_t elapsed_msecs_in_sample; + boolean_t need_eval_reset = FALSE; clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec); @@ -1749,9 +1796,10 @@ compute_swapout_target_age(void) goto done; } elapsed_msecs_in_eval = vm_compressor_compute_elapsed_msecs(cur_ts_sec, cur_ts_nsec, start_of_eval_period_sec, start_of_eval_period_nsec); - - if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) + + if (elapsed_msecs_in_eval < compressor_eval_period_in_msecs) { goto done; + } need_eval_reset = TRUE; KERNEL_DEBUG(0xe0400020 | DBG_FUNC_START, elapsed_msecs_in_eval, sample_period_compression_count, sample_period_decompression_count, 0, 0); @@ -1760,9 +1808,8 @@ compute_swapout_target_age(void) if ((sample_period_compression_count - last_eval_compression_count) < min_operations_needed_in_this_sample || (sample_period_decompression_count - last_eval_decompression_count) < min_operations_needed_in_this_sample) { - KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_compression_count - last_eval_compression_count, - sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0); + sample_period_decompression_count - last_eval_decompression_count, 0, 1, 0); swapout_target_age = 0; @@ -1774,46 +1821,43 @@ compute_swapout_target_age(void) last_eval_decompression_count = sample_period_decompression_count; if (elapsed_msecs_in_sample < compressor_sample_min_in_msecs) { - KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, swapout_target_age, 0, 0, 5, 0); goto done; } if (sample_period_decompression_count > ((compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10)) { - - uint64_t running_total; - uint64_t working_target; - uint64_t aging_target; - uint32_t oldest_age_of_csegs_sampled = 0; - uint64_t working_set_approximation = 0; + uint64_t running_total; + uint64_t working_target; + uint64_t aging_target; + uint32_t oldest_age_of_csegs_sampled = 0; + uint64_t working_set_approximation = 0; swapout_target_age = 0; - working_target = (sample_period_decompression_count / 100) * 95; /* 95 percent */ - aging_target = (sample_period_decompression_count / 100) * 1; /* 1 percent */ + working_target = (sample_period_decompression_count / 100) * 95; /* 95 percent */ + aging_target = (sample_period_decompression_count / 100) * 1; /* 1 percent */ running_total = 0; for (oldest_age_of_csegs_sampled = 0; oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE; oldest_age_of_csegs_sampled++) { - running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled]; working_set_approximation += oldest_age_of_csegs_sampled * age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled]; - if (running_total >= working_target) + if (running_total >= working_target) { break; + } } if (oldest_age_of_csegs_sampled < DECOMPRESSION_SAMPLE_MAX_AGE) { - working_set_approximation = (working_set_approximation * 1000) / elapsed_msecs_in_sample; if (working_set_approximation < VM_PAGE_COMPRESSOR_COUNT) { - running_total = overage_decompressions_during_sample_period; for (oldest_age_of_csegs_sampled = DECOMPRESSION_SAMPLE_MAX_AGE - 1; oldest_age_of_csegs_sampled; oldest_age_of_csegs_sampled--) { running_total += age_of_decompressions_during_sample_period[oldest_age_of_csegs_sampled]; - if (running_total >= aging_target) + if (running_total >= aging_target) { break; + } } swapout_target_age = (uint32_t)cur_ts_sec - oldest_age_of_csegs_sampled; @@ -1821,13 +1865,15 @@ compute_swapout_target_age(void) } else { KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_set_approximation, VM_PAGE_COMPRESSOR_COUNT, 0, 3, 0); } - } else + } else { KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, working_target, running_total, 0, 4, 0); + } compressor_need_sample_reset = TRUE; need_eval_reset = TRUE; - } else + } else { KERNEL_DEBUG(0xe0400020 | DBG_FUNC_END, sample_period_decompression_count, (compressor_thrashing_threshold_per_10msecs * elapsed_msecs_in_sample) / 10, 0, 6, 0); + } done: if (compressor_need_sample_reset == TRUE) { bzero(age_of_decompressions_during_sample_period, sizeof(age_of_decompressions_during_sample_period)); @@ -1848,86 +1894,89 @@ done: } -int compaction_swapper_init_now = 0; -int compaction_swapper_running = 0; -int compaction_swapper_awakened = 0; -int compaction_swapper_abort = 0; +int compaction_swapper_init_now = 0; +int compaction_swapper_running = 0; +int compaction_swapper_awakened = 0; +int compaction_swapper_abort = 0; #if CONFIG_JETSAM -boolean_t memorystatus_kill_on_VM_compressor_thrashing(boolean_t); -boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); -boolean_t memorystatus_kill_on_FC_thrashing(boolean_t); -int compressor_thrashing_induced_jetsam = 0; -int filecache_thrashing_induced_jetsam = 0; -static boolean_t vm_compressor_thrashing_detected = FALSE; +boolean_t memorystatus_kill_on_VM_compressor_thrashing(boolean_t); +boolean_t memorystatus_kill_on_VM_compressor_space_shortage(boolean_t); +boolean_t memorystatus_kill_on_FC_thrashing(boolean_t); +int compressor_thrashing_induced_jetsam = 0; +int filecache_thrashing_induced_jetsam = 0; +static boolean_t vm_compressor_thrashing_detected = FALSE; #endif /* CONFIG_JETSAM */ static boolean_t compressor_needs_to_swap(void) { - boolean_t should_swap = FALSE; + boolean_t should_swap = FALSE; if (vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit) { - c_segment_t c_seg; - clock_sec_t now; - clock_sec_t age; - clock_nsec_t nsec; - - clock_get_system_nanotime(&now, &nsec); + c_segment_t c_seg; + clock_sec_t now; + clock_sec_t age; + clock_nsec_t nsec; + + clock_get_system_nanotime(&now, &nsec); age = 0; lck_mtx_lock_spin_always(c_list_lock); - if ( !queue_empty(&c_age_list_head)) { + if (!queue_empty(&c_age_list_head)) { c_seg = (c_segment_t) queue_first(&c_age_list_head); age = now - c_seg->c_creation_ts; } lck_mtx_unlock_always(c_list_lock); - if (age >= vm_ripe_target_age) - return (TRUE); + if (age >= vm_ripe_target_age) { + return TRUE; + } } if (VM_CONFIG_SWAP_IS_ACTIVE) { if (COMPRESSOR_NEEDS_TO_SWAP()) { - return (TRUE); + return TRUE; } if (VM_PAGE_Q_THROTTLED(&vm_pageout_queue_external) && vm_page_anonymous_count < (vm_page_inactive_count / 20)) { - return (TRUE); + return TRUE; + } + if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) { + return TRUE; } - if (vm_page_free_count < (vm_page_free_reserved - (COMPRESSOR_FREE_RESERVED_LIMIT * 2))) - return (TRUE); } compute_swapout_target_age(); - + if (swapout_target_age) { - c_segment_t c_seg; + c_segment_t c_seg; lck_mtx_lock_spin_always(c_list_lock); if (!queue_empty(&c_age_list_head)) { - c_seg = (c_segment_t) queue_first(&c_age_list_head); - if (c_seg->c_creation_ts > swapout_target_age) + if (c_seg->c_creation_ts > swapout_target_age) { swapout_target_age = 0; + } } lck_mtx_unlock_always(c_list_lock); } #if CONFIG_PHANTOM_CACHE - if (vm_phantom_cache_check_pressure()) + if (vm_phantom_cache_check_pressure()) { should_swap = TRUE; + } #endif - if (swapout_target_age) + if (swapout_target_age) { should_swap = TRUE; + } #if CONFIG_JETSAM if (should_swap || vm_compressor_low_on_space() == TRUE) { - if (vm_compressor_thrashing_detected == FALSE) { vm_compressor_thrashing_detected = TRUE; - + if (swapout_target_age || vm_compressor_low_on_space() == TRUE) { if (swapout_target_age) { /* The compressor is thrashing. */ @@ -1975,7 +2024,7 @@ compressor_needs_to_swap(void) * segments that have been major compacted * will be moved to the majorcompact queue */ - return (should_swap); + return should_swap; } #if CONFIG_JETSAM @@ -2014,8 +2063,9 @@ uint32_t vm_run_compactor_waited = 0; void vm_run_compactor(void) { - if (c_segment_count == 0) + if (c_segment_count == 0) { return; + } lck_mtx_lock_spin_always(c_list_lock); @@ -2026,7 +2076,6 @@ vm_run_compactor(void) return; } if (compaction_swapper_running) { - if (vm_pageout_state.vm_restricted_to_single_processor == FALSE) { vm_run_compactor_already_running++; @@ -2038,7 +2087,7 @@ vm_run_compactor(void) assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT); lck_mtx_unlock_always(c_list_lock); - + thread_block(THREAD_CONTINUE_NULL); return; @@ -2061,17 +2110,16 @@ vm_run_compactor(void) void vm_wake_compactor_swapper(void) { - if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) + if (compaction_swapper_running || compaction_swapper_awakened || c_segment_count == 0) { return; + } if (c_minor_count || vm_compressor_needs_to_major_compact()) { - lck_mtx_lock_spin_always(c_list_lock); fastwake_warmup = FALSE; if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) { - vm_wake_compactor_swapper_calls++; compaction_swapper_awakened = 1; @@ -2085,9 +2133,9 @@ vm_wake_compactor_swapper(void) void vm_consider_swapping() { - c_segment_t c_seg, c_seg_next; - clock_sec_t now; - clock_nsec_t nsec; + c_segment_t c_seg, c_seg_next; + clock_sec_t now; + clock_nsec_t nsec; assert(VM_CONFIG_SWAP_IS_PRESENT); @@ -2099,7 +2147,7 @@ vm_consider_swapping() assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT); lck_mtx_unlock_always(c_list_lock); - + thread_block(THREAD_CONTINUE_NULL); lck_mtx_lock_spin_always(c_list_lock); @@ -2110,22 +2158,20 @@ vm_consider_swapping() vm_swapout_ripe_segments = TRUE; if (!queue_empty(&c_major_list_head)) { - clock_get_system_nanotime(&now, &nsec); - + c_seg = (c_segment_t)queue_first(&c_major_list_head); while (!queue_end(&c_major_list_head, (queue_entry_t)c_seg)) { - - if (c_overage_swapped_count >= c_overage_swapped_limit) + if (c_overage_swapped_count >= c_overage_swapped_limit) { break; + } c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list); if ((now - c_seg->c_creation_ts) >= vm_ripe_target_age) { - lck_mtx_lock_spin_always(&c_seg->c_lock); - + c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE); lck_mtx_unlock_always(&c_seg->c_lock); @@ -2138,7 +2184,7 @@ vm_consider_swapping() compaction_swapper_running = 0; vm_swapout_ripe_segments = FALSE; - + lck_mtx_unlock_always(c_list_lock); thread_wakeup((event_t)&compaction_swapper_running); @@ -2148,13 +2194,15 @@ vm_consider_swapping() void vm_consider_waking_compactor_swapper(void) { - boolean_t need_wakeup = FALSE; + boolean_t need_wakeup = FALSE; - if (c_segment_count == 0) + if (c_segment_count == 0) { return; + } - if (compaction_swapper_running || compaction_swapper_awakened) + if (compaction_swapper_running || compaction_swapper_awakened) { return; + } if (!compaction_swapper_inited && !compaction_swapper_init_now) { compaction_swapper_init_now = 1; @@ -2162,23 +2210,19 @@ vm_consider_waking_compactor_swapper(void) } if (c_minor_count && (COMPRESSOR_NEEDS_TO_MINOR_COMPACT())) { - need_wakeup = TRUE; - } else if (compressor_needs_to_swap()) { - need_wakeup = TRUE; - } else if (c_minor_count) { - uint64_t total_bytes; + uint64_t total_bytes; total_bytes = compressor_object->resident_page_count * PAGE_SIZE_64; - if ((total_bytes - compressor_bytes_used) > total_bytes / 10) + if ((total_bytes - compressor_bytes_used) > total_bytes / 10) { need_wakeup = TRUE; + } } if (need_wakeup == TRUE) { - lck_mtx_lock_spin_always(c_list_lock); fastwake_warmup = FALSE; @@ -2194,15 +2238,15 @@ vm_consider_waking_compactor_swapper(void) } -#define C_SWAPOUT_LIMIT 4 -#define DELAYED_COMPACTIONS_PER_PASS 30 +#define C_SWAPOUT_LIMIT 4 +#define DELAYED_COMPACTIONS_PER_PASS 30 void vm_compressor_do_delayed_compactions(boolean_t flush_all) { - c_segment_t c_seg; - int number_compacted = 0; - boolean_t needs_to_swap = FALSE; + c_segment_t c_seg; + int number_compacted = 0; + boolean_t needs_to_swap = FALSE; #if !CONFIG_EMBEDDED @@ -2210,13 +2254,11 @@ vm_compressor_do_delayed_compactions(boolean_t flush_all) #endif /* !CONFIG_EMBEDDED */ while (!queue_empty(&c_minor_list_head) && needs_to_swap == FALSE) { - c_seg = (c_segment_t)queue_first(&c_minor_list_head); - + lck_mtx_lock_spin_always(&c_seg->c_lock); if (c_seg->c_busy) { - lck_mtx_unlock_always(c_list_lock); c_seg_wait_on_busy(c_seg); lck_mtx_lock_spin_always(c_list_lock); @@ -2228,9 +2270,9 @@ vm_compressor_do_delayed_compactions(boolean_t flush_all) c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE); if (VM_CONFIG_SWAP_IS_ACTIVE && (number_compacted++ > DELAYED_COMPACTIONS_PER_PASS)) { - - if ((flush_all == TRUE || compressor_needs_to_swap() == TRUE) && c_swapout_count < C_SWAPOUT_LIMIT) + if ((flush_all == TRUE || compressor_needs_to_swap() == TRUE) && c_swapout_count < C_SWAPOUT_LIMIT) { needs_to_swap = TRUE; + } number_compacted = 0; } @@ -2239,24 +2281,24 @@ vm_compressor_do_delayed_compactions(boolean_t flush_all) } -#define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10 +#define C_SEGMENT_SWAPPEDIN_AGE_LIMIT 10 static void vm_compressor_age_swapped_in_segments(boolean_t flush_all) { - c_segment_t c_seg; - clock_sec_t now; - clock_nsec_t nsec; + c_segment_t c_seg; + clock_sec_t now; + clock_nsec_t nsec; - clock_get_system_nanotime(&now, &nsec); - - while (!queue_empty(&c_swappedin_list_head)) { + clock_get_system_nanotime(&now, &nsec); + while (!queue_empty(&c_swappedin_list_head)) { c_seg = (c_segment_t)queue_first(&c_swappedin_list_head); - if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) + if (flush_all == FALSE && (now - c_seg->c_swappedin_ts) < C_SEGMENT_SWAPPEDIN_AGE_LIMIT) { break; - + } + lck_mtx_lock_spin_always(&c_seg->c_lock); c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE); @@ -2266,23 +2308,23 @@ vm_compressor_age_swapped_in_segments(boolean_t flush_all) } -extern int vm_num_swap_files; -extern int vm_num_pinned_swap_files; -extern int vm_swappin_enabled; +extern int vm_num_swap_files; +extern int vm_num_pinned_swap_files; +extern int vm_swappin_enabled; -extern unsigned int vm_swapfile_total_segs_used; -extern unsigned int vm_swapfile_total_segs_alloced; +extern unsigned int vm_swapfile_total_segs_used; +extern unsigned int vm_swapfile_total_segs_alloced; void vm_compressor_flush(void) { - uint64_t vm_swap_put_failures_at_start; - wait_result_t wait_result = 0; - AbsoluteTime startTime, endTime; - clock_sec_t now_sec; - clock_nsec_t now_nsec; - uint64_t nsec; + uint64_t vm_swap_put_failures_at_start; + wait_result_t wait_result = 0; + AbsoluteTime startTime, endTime; + clock_sec_t now_sec; + clock_nsec_t now_nsec; + uint64_t nsec; HIBLOG("vm_compressor_flush - starting\n"); @@ -2297,7 +2339,7 @@ vm_compressor_flush(void) assert_wait((event_t)&compaction_swapper_running, THREAD_UNINT); lck_mtx_unlock_always(c_list_lock); - + thread_block(THREAD_CONTINUE_NULL); lck_mtx_lock_spin_always(c_list_lock); @@ -2317,39 +2359,40 @@ vm_compressor_flush(void) vm_compressor_compact_and_swap(TRUE); while (!queue_empty(&c_swapout_list_head)) { - - assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000*NSEC_PER_USEC); + assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC); lck_mtx_unlock_always(c_list_lock); - + wait_result = thread_block(THREAD_CONTINUE_NULL); lck_mtx_lock_spin_always(c_list_lock); - if (wait_result == THREAD_TIMED_OUT) + if (wait_result == THREAD_TIMED_OUT) { break; + } } hibernate_flushing = FALSE; compaction_swapper_running = 0; - if (vm_swap_put_failures > vm_swap_put_failures_at_start) + if (vm_swap_put_failures > vm_swap_put_failures_at_start) { HIBLOG("vm_compressor_flush failed to clean %llu segments - vm_page_compressor_count(%d)\n", - vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT); - + vm_swap_put_failures - vm_swap_put_failures_at_start, VM_PAGE_COMPRESSOR_COUNT); + } + lck_mtx_unlock_always(c_list_lock); thread_wakeup((event_t)&compaction_swapper_running); - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nsec); + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nsec); HIBLOG("vm_compressor_flush completed - took %qd msecs - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d, vm_swappin_enabled = %d\n", - nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled); + nsec / 1000000ULL, vm_num_swap_files, vm_num_pinned_swap_files, vm_swappin_enabled); } -int compaction_swap_trigger_thread_awakened = 0; +int compaction_swap_trigger_thread_awakened = 0; static void vm_compressor_swap_trigger_thread(void) @@ -2358,19 +2401,20 @@ vm_compressor_swap_trigger_thread(void) /* * compaction_swapper_init_now is set when the first call to - * vm_consider_waking_compactor_swapper is made from - * vm_pageout_scan... since this function is called upon + * vm_consider_waking_compactor_swapper is made from + * vm_pageout_scan... since this function is called upon * thread creation, we want to make sure to delay adjusting * the tuneables until we are awakened via vm_pageout_scan * so that we are at a point where the vm_swapfile_open will * be operating on the correct directory (in case the default - * of /var/vm/ is overridden by the dymanic_pager + * of /var/vm/ is overridden by the dymanic_pager */ if (compaction_swapper_init_now) { vm_compaction_swapper_do_init(); - if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) + if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { thread_vm_bind_group_add(); + } thread_set_thread_name(current_thread(), "VM_cswap_trigger"); compaction_swapper_init_now = 0; } @@ -2380,7 +2424,6 @@ vm_compressor_swap_trigger_thread(void) compaction_swapper_awakened = 0; if (compaction_swapper_running == 0) { - compaction_swapper_running = 1; vm_compressor_compact_and_swap(FALSE); @@ -2389,13 +2432,14 @@ vm_compressor_swap_trigger_thread(void) } assert_wait((event_t)&c_compressor_swap_trigger, THREAD_UNINT); - if (compaction_swapper_running == 0) + if (compaction_swapper_running == 0) { thread_wakeup((event_t)&compaction_swapper_running); + } lck_mtx_unlock_always(c_list_lock); - + thread_block((thread_continue_t)vm_compressor_swap_trigger_thread); - + /* NOTREACHED */ } @@ -2403,18 +2447,18 @@ vm_compressor_swap_trigger_thread(void) void vm_compressor_record_warmup_start(void) { - c_segment_t c_seg; + c_segment_t c_seg; lck_mtx_lock_spin_always(c_list_lock); if (first_c_segment_to_warm_generation_id == 0) { if (!queue_empty(&c_age_list_head)) { - c_seg = (c_segment_t)queue_last(&c_age_list_head); first_c_segment_to_warm_generation_id = c_seg->c_generation_id; - } else + } else { first_c_segment_to_warm_generation_id = 0; + } fastwake_recording_in_progress = TRUE; } @@ -2422,22 +2466,21 @@ vm_compressor_record_warmup_start(void) } -void +void vm_compressor_record_warmup_end(void) { - c_segment_t c_seg; + c_segment_t c_seg; lck_mtx_lock_spin_always(c_list_lock); if (fastwake_recording_in_progress == TRUE) { - if (!queue_empty(&c_age_list_head)) { - c_seg = (c_segment_t)queue_last(&c_age_list_head); last_c_segment_to_warm_generation_id = c_seg->c_generation_id; - } else + } else { last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id; + } fastwake_recording_in_progress = FALSE; @@ -2447,13 +2490,13 @@ vm_compressor_record_warmup_end(void) } -#define DELAY_TRIM_ON_WAKE_SECS 25 +#define DELAY_TRIM_ON_WAKE_SECS 25 void vm_compressor_delay_trim(void) { - clock_sec_t sec; - clock_nsec_t nsec; + clock_sec_t sec; + clock_nsec_t nsec; clock_get_system_nanotime(&sec, &nsec); dont_trim_until_ts = sec + DELAY_TRIM_ON_WAKE_SECS; @@ -2464,7 +2507,7 @@ void vm_compressor_do_warmup(void) { lck_mtx_lock_spin_always(c_list_lock); - + if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) { first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0; @@ -2473,7 +2516,6 @@ vm_compressor_do_warmup(void) } if (compaction_swapper_running == 0 && compaction_swapper_awakened == 0) { - fastwake_warmup = TRUE; compaction_swapper_awakened = 1; @@ -2485,11 +2527,9 @@ vm_compressor_do_warmup(void) void do_fastwake_warmup_all(void) { - lck_mtx_lock_spin_always(c_list_lock); if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) { - lck_mtx_unlock_always(c_list_lock); return; } @@ -2503,15 +2543,14 @@ do_fastwake_warmup_all(void) fastwake_warmup = FALSE; lck_mtx_unlock_always(c_list_lock); - } void do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg) { - c_segment_t c_seg = NULL; - AbsoluteTime startTime, endTime; - uint64_t nsec; + c_segment_t c_seg = NULL; + AbsoluteTime startTime, endTime; + uint64_t nsec; HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id); @@ -2521,35 +2560,37 @@ do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg) lck_mtx_unlock_always(c_list_lock); proc_set_thread_policy(current_thread(), - TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2); PAGE_REPLACEMENT_DISALLOWED(TRUE); lck_mtx_lock_spin_always(c_list_lock); while (!queue_empty(c_queue) && fastwake_warmup == TRUE) { - c_seg = (c_segment_t) queue_first(c_queue); if (consider_all_cseg == FALSE) { if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id || - c_seg->c_generation_id > last_c_segment_to_warm_generation_id) + c_seg->c_generation_id > last_c_segment_to_warm_generation_id) { break; + } - if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) + if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) { break; + } } lck_mtx_lock_spin_always(&c_seg->c_lock); lck_mtx_unlock_always(c_list_lock); - + if (c_seg->c_busy) { PAGE_REPLACEMENT_DISALLOWED(FALSE); c_seg_wait_on_busy(c_seg); PAGE_REPLACEMENT_DISALLOWED(TRUE); } else { - if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) + if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) { lck_mtx_unlock_always(&c_seg->c_lock); + } c_segment_warmup_count++; PAGE_REPLACEMENT_DISALLOWED(FALSE); @@ -2563,11 +2604,11 @@ do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg) PAGE_REPLACEMENT_DISALLOWED(FALSE); proc_set_thread_policy(current_thread(), - TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0); - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nsec); + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nsec); HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL); @@ -2582,19 +2623,19 @@ do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg) void vm_compressor_compact_and_swap(boolean_t flush_all) { - c_segment_t c_seg, c_seg_next; - boolean_t keep_compacting; - clock_sec_t now; - clock_nsec_t nsec; + c_segment_t c_seg, c_seg_next; + boolean_t keep_compacting; + clock_sec_t now; + clock_nsec_t nsec; if (fastwake_warmup == TRUE) { - uint64_t starting_warmup_count; + uint64_t starting_warmup_count; starting_warmup_count = c_segment_warmup_count; KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count, - first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0); + first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0); do_fastwake_warmup(&c_swappedout_list_head, FALSE); KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0); @@ -2614,16 +2655,15 @@ vm_compressor_compact_and_swap(boolean_t flush_all) /* * we only need to grab the timestamp once per - * invocation of this function since the + * invocation of this function since the * timescale we're interested in is measured * in days */ - clock_get_system_nanotime(&now, &nsec); + clock_get_system_nanotime(&now, &nsec); while (!queue_empty(&c_age_list_head) && compaction_swapper_abort == 0) { - if (hibernate_flushing == TRUE) { - clock_sec_t sec; + clock_sec_t sec; if (hibernate_should_abort()) { HIBLOG("vm_compressor_flush - hibernate_should_abort returned TRUE\n"); @@ -2643,15 +2683,14 @@ vm_compressor_compact_and_swap(boolean_t flush_all) break; } clock_get_system_nanotime(&sec, &nsec); - + if (sec > hibernate_flushing_deadline) { HIBLOG("vm_compressor_flush - failed to finish before deadline\n"); break; } } if (c_swapout_count >= C_SWAPOUT_LIMIT) { - - assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000*NSEC_PER_USEC); + assert_wait_timeout((event_t) &compaction_swapper_running, THREAD_INTERRUPTIBLE, 100, 1000 * NSEC_PER_USEC); lck_mtx_unlock_always(c_list_lock); @@ -2681,35 +2720,38 @@ vm_compressor_compact_and_swap(boolean_t flush_all) * Swap out segments? */ if (flush_all == FALSE) { - boolean_t needs_to_swap; + boolean_t needs_to_swap; lck_mtx_unlock_always(c_list_lock); needs_to_swap = compressor_needs_to_swap(); #if !CONFIG_EMBEDDED - if (needs_to_swap == TRUE && vm_swap_low_on_space()) + if (needs_to_swap == TRUE && vm_swap_low_on_space()) { vm_compressor_take_paging_space_action(); + } #endif /* !CONFIG_EMBEDDED */ lck_mtx_lock_spin_always(c_list_lock); - - if (needs_to_swap == FALSE) + + if (needs_to_swap == FALSE) { break; + } } - if (queue_empty(&c_age_list_head)) + if (queue_empty(&c_age_list_head)) { break; + } c_seg = (c_segment_t) queue_first(&c_age_list_head); assert(c_seg->c_state == C_ON_AGE_Q); - if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) + if (flush_all == TRUE && c_seg->c_generation_id > c_generation_id_flush_barrier) { break; - + } + lck_mtx_lock_spin_always(&c_seg->c_lock); if (c_seg->c_busy) { - lck_mtx_unlock_always(c_list_lock); c_seg_wait_on_busy(c_seg); lck_mtx_lock_spin_always(c_list_lock); @@ -2732,25 +2774,25 @@ vm_compressor_compact_and_swap(boolean_t flush_all) keep_compacting = TRUE; while (keep_compacting == TRUE) { - assert(c_seg->c_busy); /* look for another segment to consolidate */ c_seg_next = (c_segment_t) queue_next(&c_seg->c_age_list); - - if (queue_end(&c_age_list_head, (queue_entry_t)c_seg_next)) + + if (queue_end(&c_age_list_head, (queue_entry_t)c_seg_next)) { break; + } assert(c_seg_next->c_state == C_ON_AGE_Q); - if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) + if (c_seg_major_compact_ok(c_seg, c_seg_next) == FALSE) { break; + } lck_mtx_lock_spin_always(&c_seg_next->c_lock); if (c_seg_next->c_busy) { - lck_mtx_unlock_always(c_list_lock); c_seg_wait_on_busy(c_seg_next); lck_mtx_lock_spin_always(c_list_lock); @@ -2781,7 +2823,7 @@ vm_compressor_compact_and_swap(boolean_t flush_all) lck_mtx_lock_spin_always(&c_seg_next->c_lock); /* * run a minor compaction on the donor segment - * since we pulled at least some of it's + * since we pulled at least some of it's * data into our target... if we've emptied * it, now is a good time to free it which * c_seg_minor_compaction_and_unlock also takes care of @@ -2789,14 +2831,14 @@ vm_compressor_compact_and_swap(boolean_t flush_all) * by passing TRUE, we ask for c_busy to be cleared * and c_wanted to be taken care of */ - if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) + if (c_seg_minor_compaction_and_unlock(c_seg_next, TRUE)) { c_seg_major_compact_stats.count_of_freed_segs++; + } PAGE_REPLACEMENT_DISALLOWED(FALSE); /* relock the list */ lck_mtx_lock_spin_always(c_list_lock); - } /* major compaction */ lck_mtx_lock_spin_always(&c_seg->c_lock); @@ -2812,7 +2854,6 @@ vm_compressor_compact_and_swap(boolean_t flush_all) c_seg_switch_state(c_seg, C_ON_SWAPOUT_Q, FALSE); } else { if ((vm_swapout_ripe_segments == TRUE && c_overage_swapped_count < c_overage_swapped_limit)) { - assert(VM_CONFIG_SWAP_IS_PRESENT); /* * we are running compressor sweeps with swap-behind @@ -2832,7 +2873,7 @@ vm_compressor_compact_and_swap(boolean_t flush_all) * so we need to move it out of the way... * we just did a major compaction on it so put it * on that queue - */ + */ c_seg_switch_state(c_seg, C_ON_MAJORCOMPACT_Q, FALSE); } else { c_seg_major_compact_stats.wasted_space_in_swapouts += C_SEG_BUFSIZE - c_seg->c_bytes_used; @@ -2846,7 +2887,7 @@ vm_compressor_compact_and_swap(boolean_t flush_all) lck_mtx_unlock_always(c_list_lock); thread_wakeup((event_t)&c_swapout_list_head); - + lck_mtx_lock_spin_always(c_list_lock); } } @@ -2856,23 +2897,24 @@ vm_compressor_compact_and_swap(boolean_t flush_all) static c_segment_t c_seg_allocate(c_segment_t *current_chead) { - c_segment_t c_seg; - int min_needed; - int size_to_populate; + c_segment_t c_seg; + int min_needed; + int size_to_populate; #if !CONFIG_EMBEDDED - if (vm_compressor_low_on_space()) + if (vm_compressor_low_on_space()) { vm_compressor_take_paging_space_action(); + } #endif /* !CONFIG_EMBEDDED */ - if ( (c_seg = *current_chead) == NULL ) { - uint32_t c_segno; + if ((c_seg = *current_chead) == NULL) { + uint32_t c_segno; lck_mtx_lock_spin_always(c_list_lock); while (c_segments_busy == TRUE) { assert_wait((event_t) (&c_segments_busy), THREAD_UNINT); - + lck_mtx_unlock_always(c_list_lock); thread_block(THREAD_CONTINUE_NULL); @@ -2880,27 +2922,29 @@ c_seg_allocate(c_segment_t *current_chead) lck_mtx_lock_spin_always(c_list_lock); } if (c_free_segno_head == (uint32_t)-1) { - uint32_t c_segments_available_new; + uint32_t c_segments_available_new; if (c_segments_available >= c_segments_limit || c_segment_pages_compressed >= c_segment_pages_compressed_limit) { lck_mtx_unlock_always(c_list_lock); - return (NULL); + return NULL; } c_segments_busy = TRUE; lck_mtx_unlock_always(c_list_lock); - kernel_memory_populate(compressor_map, (vm_offset_t)c_segments_next_page, - PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR); + kernel_memory_populate(compressor_map, (vm_offset_t)c_segments_next_page, + PAGE_SIZE, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR); c_segments_next_page += PAGE_SIZE; c_segments_available_new = c_segments_available + C_SEGMENTS_PER_PAGE; - if (c_segments_available_new > c_segments_limit) + if (c_segments_available_new > c_segments_limit) { c_segments_available_new = c_segments_limit; + } - for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) + for (c_segno = c_segments_available + 1; c_segno < c_segments_available_new; c_segno++) { c_segments[c_segno - 1].c_segno = c_segno; + } lck_mtx_lock_spin_always(c_list_lock); @@ -2922,8 +2966,9 @@ c_seg_allocate(c_segment_t *current_chead) * so that we can install it once we have the c_seg allocated */ c_segment_count++; - if (c_segment_count > c_segment_count_max) + if (c_segment_count > c_segment_count_max) { c_segment_count_max = c_segment_count; + } lck_mtx_unlock_always(c_list_lock); @@ -2933,7 +2978,7 @@ c_seg_allocate(c_segment_t *current_chead) c_seg->c_store.c_buffer = (int32_t *)C_SEG_BUFFER_ADDRESS(c_segno); lck_mtx_init(&c_seg->c_lock, &vm_compressor_lck_grp, &vm_compressor_lck_attr); - + c_seg->c_state = C_IS_EMPTY; c_seg->c_firstemptyslot = C_SLOT_MAX_INDEX; c_seg->c_mysegno = c_segno; @@ -2950,51 +2995,56 @@ c_seg_allocate(c_segment_t *current_chead) #if DEVELOPMENT || DEBUG C_SEG_MAKE_WRITEABLE(c_seg); #endif - } c_seg_alloc_nextslot(c_seg); size_to_populate = C_SEG_ALLOCSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset); - - if (size_to_populate) { + if (size_to_populate) { min_needed = PAGE_SIZE + (C_SEG_ALLOCSIZE - C_SEG_BUFSIZE); if (C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset) < (unsigned) min_needed) { - - if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) + if (size_to_populate > C_SEG_MAX_POPULATE_SIZE) { size_to_populate = C_SEG_MAX_POPULATE_SIZE; + } - OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed); + OSAddAtomic64(size_to_populate / PAGE_SIZE, &vm_pageout_vminfo.vm_compressor_pages_grabbed); kernel_memory_populate(compressor_map, - (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset], - size_to_populate, - KMA_COMPRESSOR, - VM_KERN_MEMORY_COMPRESSOR); - } else + (vm_offset_t) &c_seg->c_store.c_buffer[c_seg->c_populated_offset], + size_to_populate, + KMA_COMPRESSOR, + VM_KERN_MEMORY_COMPRESSOR); + } else { size_to_populate = 0; + } } PAGE_REPLACEMENT_DISALLOWED(TRUE); lck_mtx_lock_spin_always(&c_seg->c_lock); - if (size_to_populate) + if (size_to_populate) { c_seg->c_populated_offset += C_SEG_BYTES_TO_OFFSET(size_to_populate); + } - return (c_seg); + return c_seg; } +#if DEVELOPMENT || DEBUG +#if CONFIG_FREEZE +extern boolean_t memorystatus_freeze_to_memory; +#endif /* CONFIG_FREEZE */ +#endif /* DEVELOPMENT || DEBUG */ static void c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) { - uint32_t unused_bytes; - uint32_t offset_to_depopulate; - int new_state = C_ON_AGE_Q; - clock_sec_t sec; - clock_nsec_t nsec; - boolean_t head_insert = FALSE; + uint32_t unused_bytes; + uint32_t offset_to_depopulate; + int new_state = C_ON_AGE_Q; + clock_sec_t sec; + clock_nsec_t nsec; + boolean_t head_insert = FALSE; unused_bytes = trunc_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset - c_seg->c_nextoffset)); @@ -3030,28 +3080,34 @@ c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) #if DEVELOPMENT || DEBUG { - boolean_t c_seg_was_busy = FALSE; + boolean_t c_seg_was_busy = FALSE; - if ( !c_seg->c_busy) - C_SEG_BUSY(c_seg); - else - c_seg_was_busy = TRUE; + if (!c_seg->c_busy) { + C_SEG_BUSY(c_seg); + } else { + c_seg_was_busy = TRUE; + } - lck_mtx_unlock_always(&c_seg->c_lock); + lck_mtx_unlock_always(&c_seg->c_lock); - C_SEG_WRITE_PROTECT(c_seg); + C_SEG_WRITE_PROTECT(c_seg); - lck_mtx_lock_spin_always(&c_seg->c_lock); + lck_mtx_lock_spin_always(&c_seg->c_lock); - if (c_seg_was_busy == FALSE) - C_SEG_WAKEUP_DONE(c_seg); + if (c_seg_was_busy == FALSE) { + C_SEG_WAKEUP_DONE(c_seg); + } } #endif #if CONFIG_FREEZE if (current_chead == (c_segment_t*)&freezer_chead && VM_CONFIG_SWAP_IS_PRESENT && - VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { + VM_CONFIG_FREEZER_SWAP_IS_ACTIVE +#if DEVELOPMENT || DEBUG + && !memorystatus_freeze_to_memory +#endif /* DEVELOPMENT || DEBUG */ + ) { new_state = C_ON_SWAPOUT_Q; } #endif /* CONFIG_FREEZE */ @@ -3080,13 +3136,15 @@ c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) } #endif /* CONFIG_FREEZE */ - if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) + if (c_seg->c_state == C_ON_AGE_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) { c_seg_need_delayed_compaction(c_seg, TRUE); + } lck_mtx_unlock_always(c_list_lock); - if (c_seg->c_state == C_ON_SWAPOUT_Q) + if (c_seg->c_state == C_ON_SWAPOUT_Q) { thread_wakeup((event_t)&c_swapout_list_head); + } *current_chead = NULL; } @@ -3098,8 +3156,8 @@ c_current_seg_filled(c_segment_t c_seg, c_segment_t *current_chead) void c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_compact_ok, boolean_t age_on_swapin_q) { - clock_sec_t sec; - clock_nsec_t nsec; + clock_sec_t sec; + clock_nsec_t nsec; clock_get_system_nanotime(&sec, &nsec); @@ -3114,15 +3172,17 @@ c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_comp if (c_seg->c_overage_swap == TRUE) { c_overage_swapped_count--; c_seg->c_overage_swap = FALSE; - } + } if (has_data == TRUE) { - if (age_on_swapin_q == TRUE) + if (age_on_swapin_q == TRUE) { c_seg_switch_state(c_seg, C_ON_SWAPPEDIN_Q, FALSE); - else + } else { c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE); + } - if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) + if (minor_compact_ok == TRUE && !c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) { c_seg_need_delayed_compaction(c_seg, TRUE); + } } else { c_seg->c_store.c_buffer = (int32_t*) NULL; c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0); @@ -3145,12 +3205,12 @@ c_seg_swapin_requeue(c_segment_t c_seg, boolean_t has_data, boolean_t minor_comp int c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_on_swapin_q) { - vm_offset_t addr = 0; - uint32_t io_size = 0; - uint64_t f_offset; + vm_offset_t addr = 0; + uint32_t io_size = 0; + uint64_t f_offset; assert(C_SEG_IS_ONDISK(c_seg)); - + #if !CHECKSUM_THE_SWAP c_seg_trim_tail(c_seg); #endif @@ -3189,8 +3249,9 @@ c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_ #endif /* ENCRYPTED_SWAP */ #if CHECKSUM_THE_SWAP - if (c_seg->cseg_swap_size != io_size) + if (c_seg->cseg_swap_size != io_size) { panic("swapin size doesn't match swapout size"); + } if (c_seg->cseg_hash != vmc_hash((char*) c_seg->c_store.c_buffer, (int)io_size)) { panic("c_seg_swapin - Swap hash mismatch\n"); @@ -3213,7 +3274,7 @@ c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_ * is returned back to where it is supposed to be. */ clear_thread_rwlock_boost(); - return (1); + return 1; } lck_mtx_lock_spin_always(&c_seg->c_lock); @@ -3227,7 +3288,7 @@ c_seg_swapin(c_segment_t c_seg, boolean_t force_minor_compaction, boolean_t age_ */ clear_thread_rwlock_boost(); - return (0); + return 0; } @@ -3237,15 +3298,15 @@ c_segment_sv_hash_drop_ref(int hash_indx) struct c_sv_hash_entry o_sv_he, n_sv_he; while (1) { - o_sv_he.he_record = c_segment_sv_hash_table[hash_indx].he_record; n_sv_he.he_ref = o_sv_he.he_ref - 1; n_sv_he.he_data = o_sv_he.he_data; if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_indx].he_record) == TRUE) { - if (n_sv_he.he_ref == 0) + if (n_sv_he.he_ref == 0) { OSAddAtomic(-1, &c_segment_svp_in_hash); + } break; } } @@ -3255,20 +3316,20 @@ c_segment_sv_hash_drop_ref(int hash_indx) static int c_segment_sv_hash_insert(uint32_t data) { - int hash_sindx; - int misses; + int hash_sindx; + int misses; struct c_sv_hash_entry o_sv_he, n_sv_he; - boolean_t got_ref = FALSE; + boolean_t got_ref = FALSE; - if (data == 0) + if (data == 0) { OSAddAtomic(1, &c_segment_svp_zero_compressions); - else + } else { OSAddAtomic(1, &c_segment_svp_nonzero_compressions); + } hash_sindx = data & C_SV_HASH_MASK; - - for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) - { + + for (misses = 0; misses < C_SV_HASH_MAX_MISS; misses++) { o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record; while (o_sv_he.he_data == data || o_sv_he.he_ref == 0) { @@ -3276,24 +3337,28 @@ c_segment_sv_hash_insert(uint32_t data) n_sv_he.he_data = data; if (OSCompareAndSwap64((UInt64)o_sv_he.he_record, (UInt64)n_sv_he.he_record, (UInt64 *) &c_segment_sv_hash_table[hash_sindx].he_record) == TRUE) { - if (n_sv_he.he_ref == 1) + if (n_sv_he.he_ref == 1) { OSAddAtomic(1, &c_segment_svp_in_hash); + } got_ref = TRUE; break; } o_sv_he.he_record = c_segment_sv_hash_table[hash_sindx].he_record; } - if (got_ref == TRUE) + if (got_ref == TRUE) { break; + } hash_sindx++; - if (hash_sindx == C_SV_HASH_SIZE) + if (hash_sindx == C_SV_HASH_SIZE) { hash_sindx = 0; + } + } + if (got_ref == FALSE) { + return -1; } - if (got_ref == FALSE) - return(-1); - return (hash_sindx); + return hash_sindx; } @@ -3302,8 +3367,9 @@ c_segment_sv_hash_insert(uint32_t data) static void c_compressed_record_data(char *src, int c_size) { - if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) + if ((c_compressed_record_cptr + c_size + 4) >= c_compressed_record_ebuf) { panic("c_compressed_record_cptr >= c_compressed_record_ebuf"); + } *(int *)((void *)c_compressed_record_cptr) = c_size; @@ -3318,16 +3384,16 @@ c_compressed_record_data(char *src, int c_size) static int c_compress_page(char *src, c_slot_mapping_t slot_ptr, c_segment_t *current_chead, char *scratch_buf) { - int c_size; - int c_rounded_size = 0; - int max_csize; - c_slot_t cs; - c_segment_t c_seg; + int c_size; + int c_rounded_size = 0; + int max_csize; + c_slot_t cs; + c_segment_t c_seg; KERNEL_DEBUG(0xe0400000 | DBG_FUNC_START, *current_chead, 0, 0, 0, 0); retry: if ((c_seg = c_seg_allocate(current_chead)) == NULL) { - return (1); + return 1; } /* * returns with c_seg lock held @@ -3346,8 +3412,9 @@ retry: max_csize = C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES((int32_t)cs->c_offset); - if (max_csize > PAGE_SIZE) + if (max_csize > PAGE_SIZE) { max_csize = PAGE_SIZE; + } #if CHECKSUM_THE_DATA cs->c_hash_data = vmc_hash(src, PAGE_SIZE); @@ -3377,21 +3444,21 @@ retry: #endif } else { #if defined(__arm__) || defined(__arm64__) - cs->c_codec = CCWK; + cs->c_codec = CCWK; #endif #if defined(__arm64__) - __unreachable_ok_push - if (PAGE_SIZE == 4096) - c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], - (WK_word *)(uintptr_t)scratch_buf, max_csize_adj); - else { - c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], - (WK_word *)(uintptr_t)scratch_buf, max_csize_adj); - } - __unreachable_ok_pop + __unreachable_ok_push + if (PAGE_SIZE == 4096) { + c_size = WKdm_compress_4k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], + (WK_word *)(uintptr_t)scratch_buf, max_csize_adj); + } else { + c_size = WKdm_compress_16k((WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], + (WK_word *)(uintptr_t)scratch_buf, max_csize_adj); + } + __unreachable_ok_pop #else - c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], - (WK_word *)(uintptr_t)scratch_buf, max_csize_adj); + c_size = WKdm_compress_new((const WK_word *)(uintptr_t)src, (WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], + (WK_word *)(uintptr_t)scratch_buf, max_csize_adj); #endif } assertf(((c_size <= max_csize_adj) && (c_size >= -1)), @@ -3417,9 +3484,8 @@ retry: } OSAddAtomic(1, &c_segment_noncompressible_pages); - } else if (c_size == 0) { - int hash_index; + int hash_index; /* * special case - this is a page completely full of a single 32 bit value @@ -3437,7 +3503,7 @@ retry: goto sv_compression; } c_size = 4; - + memcpy(&c_seg->c_store.c_buffer[cs->c_offset], src, c_size); OSAddAtomic(1, &c_segment_svp_hash_failed); @@ -3461,7 +3527,7 @@ retry: slot_ptr->s_cindx = c_seg->c_nextslot++; /* would mean "empty slot", so use csegno+1 */ - slot_ptr->s_cseg = c_seg->c_mysegno + 1; + slot_ptr->s_cseg = c_seg->c_mysegno + 1; sv_compression: if (c_seg->c_nextoffset >= C_SEG_OFF_LIMIT || c_seg->c_nextslot >= C_SLOT_MAX_INDEX) { @@ -3489,58 +3555,60 @@ sv_compression: KERNEL_DEBUG(0xe0400000 | DBG_FUNC_END, *current_chead, c_size, c_segment_input_bytes, c_segment_compressed_bytes, 0); - return (0); + return 0; } -static inline void sv_decompress(int32_t *ddst, int32_t pattern) { +static inline void +sv_decompress(int32_t *ddst, int32_t pattern) +{ #if __x86_64__ - memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t)); + memset_word(ddst, pattern, PAGE_SIZE / sizeof(int32_t)); #else - size_t i; - - /* Unroll the pattern fill loop 4x to encourage the - * compiler to emit NEON stores, cf. - * Loop autovectorization - * anomalies. - * We use separate loops for each PAGE_SIZE - * to allow the autovectorizer to engage, as PAGE_SIZE - * is currently not a constant. - */ + size_t i; + + /* Unroll the pattern fill loop 4x to encourage the + * compiler to emit NEON stores, cf. + * Loop autovectorization + * anomalies. + * We use separate loops for each PAGE_SIZE + * to allow the autovectorizer to engage, as PAGE_SIZE + * is currently not a constant. + */ - __unreachable_ok_push - if (PAGE_SIZE == 4096) { - for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) { - *ddst++ = pattern; - *ddst++ = pattern; - *ddst++ = pattern; - *ddst++ = pattern; - } - } else { - assert(PAGE_SIZE == 16384); - for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) { - *ddst++ = pattern; - *ddst++ = pattern; - *ddst++ = pattern; - *ddst++ = pattern; - } - } - __unreachable_ok_pop + __unreachable_ok_push + if (PAGE_SIZE == 4096) { + for (i = 0; i < (4096U / sizeof(int32_t)); i += 4) { + *ddst++ = pattern; + *ddst++ = pattern; + *ddst++ = pattern; + *ddst++ = pattern; + } + } else { + assert(PAGE_SIZE == 16384); + for (i = 0; i < (int)(16384U / sizeof(int32_t)); i += 4) { + *ddst++ = pattern; + *ddst++ = pattern; + *ddst++ = pattern; + *ddst++ = pattern; + } + } + __unreachable_ok_pop #endif } static int c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int *zeroslot) { - c_slot_t cs; - c_segment_t c_seg; - uint32_t c_segno; - int c_indx; - int c_rounded_size; - uint32_t c_size; - int retval = 0; - boolean_t need_unlock = TRUE; - boolean_t consider_defragmenting = FALSE; - boolean_t kdp_mode = FALSE; + c_slot_t cs; + c_segment_t c_seg; + uint32_t c_segno; + int c_indx; + int c_rounded_size; + uint32_t c_size; + int retval = 0; + boolean_t need_unlock = TRUE; + boolean_t consider_defragmenting = FALSE; + boolean_t kdp_mode = FALSE; if (__improbable(flags & C_KDP)) { if (not_in_kdp) { @@ -3551,7 +3619,7 @@ c_decompress_page(char *dst, volatile c_slot_mapping_t slot_ptr, int flags, int assert((flags & C_DONT_BLOCK) == C_DONT_BLOCK); if ((flags & (C_DONT_BLOCK | C_KEEP)) != (C_DONT_BLOCK | C_KEEP)) { - return (-2); + return -2; } kdp_mode = TRUE; @@ -3563,7 +3631,7 @@ ReTry: PAGE_REPLACEMENT_DISALLOWED(TRUE); } else { if (kdp_lck_rw_lock_is_acquired_exclusive(&c_master_lock)) { - return (-2); + return -2; } } @@ -3579,13 +3647,12 @@ ReTry: */ if (__improbable(dst && decompressions_blocked == TRUE)) { if (flags & C_DONT_BLOCK) { - if (__probable(!kdp_mode)) { PAGE_REPLACEMENT_DISALLOWED(FALSE); } *zeroslot = 0; - return (-2); + return -2; } /* * it's safe to atomically assert and block behind the @@ -3605,13 +3672,15 @@ ReTry: /* s_cseg is actually "segno+1" */ c_segno = slot_ptr->s_cseg - 1; - if (__improbable(c_segno >= c_segments_available)) + if (__improbable(c_segno >= c_segments_available)) { panic("c_decompress_page: c_segno %d >= c_segments_available %d, slot_ptr(%p), slot_data(%x)", - c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr)); + c_segno, c_segments_available, slot_ptr, *(int *)((void *)slot_ptr)); + } - if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) + if (__improbable(c_segments[c_segno].c_segno < c_segments_available)) { panic("c_decompress_page: c_segno %d is free, slot_ptr(%p), slot_data(%x)", - c_segno, slot_ptr, *(int *)((void *)slot_ptr)); + c_segno, slot_ptr, *(int *)((void *)slot_ptr)); + } c_seg = c_segments[c_segno].c_seg; @@ -3619,7 +3688,7 @@ ReTry: lck_mtx_lock_spin_always(&c_seg->c_lock); } else { if (kdp_lck_mtx_lock_spin_is_acquired(&c_seg->c_lock)) { - return (-2); + return -2; } } @@ -3639,7 +3708,6 @@ ReTry: } } if (c_seg->c_busy) { - PAGE_REPLACEMENT_DISALLOWED(FALSE); c_seg_wait_on_busy(c_seg); @@ -3650,24 +3718,26 @@ bypass_busy_check: c_indx = slot_ptr->s_cindx; - if (__improbable(c_indx >= c_seg->c_nextslot)) + if (__improbable(c_indx >= c_seg->c_nextslot)) { panic("c_decompress_page: c_indx %d >= c_nextslot %d, c_seg(%p), slot_ptr(%p), slot_data(%x)", - c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr)); + c_indx, c_seg->c_nextslot, c_seg, slot_ptr, *(int *)((void *)slot_ptr)); + } cs = C_SEG_SLOT_FROM_INDEX(c_seg, c_indx); c_size = UNPACK_C_SIZE(cs); - if (__improbable(c_size == 0)) + if (__improbable(c_size == 0)) { panic("c_decompress_page: c_size == 0, c_seg(%p), slot_ptr(%p), slot_data(%x)", - c_seg, slot_ptr, *(int *)((void *)slot_ptr)); + c_seg, slot_ptr, *(int *)((void *)slot_ptr)); + } c_rounded_size = (c_size + C_SEG_OFFSET_ALIGNMENT_MASK) & ~C_SEG_OFFSET_ALIGNMENT_MASK; if (dst) { - uint32_t age_of_cseg; - clock_sec_t cur_ts_sec; - clock_nsec_t cur_ts_nsec; + uint32_t age_of_cseg; + clock_sec_t cur_ts_sec; + clock_nsec_t cur_ts_nsec; if (C_SEG_IS_ONDISK(c_seg)) { assert(kdp_mode == FALSE); @@ -3675,7 +3745,7 @@ bypass_busy_check: assert(retval == 0); retval = 1; - } + } if (c_seg->c_state == C_ON_BAD_Q) { assert(c_seg->c_store.c_buffer == NULL); *zeroslot = 0; @@ -3704,8 +3774,8 @@ bypass_busy_check: */ memcpy(dst, &c_seg->c_store.c_buffer[cs->c_offset], PAGE_SIZE); } else if (c_size == 4) { - int32_t data; - int32_t *dptr; + int32_t data; + int32_t *dptr; /* * page was populated with a single value @@ -3717,8 +3787,8 @@ bypass_busy_check: data = *(int32_t *)(&c_seg->c_store.c_buffer[cs->c_offset]); sv_decompress(dptr, data); } else { - uint32_t my_cpu_no; - char *scratch_buf; + uint32_t my_cpu_no; + char *scratch_buf; if (__probable(!kdp_mode)) { /* @@ -3743,41 +3813,41 @@ bypass_busy_check: #endif } else { #if defined(__arm64__) - __unreachable_ok_push - if (PAGE_SIZE == 4096) - WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], - (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size); - else { - WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], - (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size); - } - __unreachable_ok_pop -#else - WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], + __unreachable_ok_push + if (PAGE_SIZE == 4096) { + WKdm_decompress_4k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], + (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size); + } else { + WKdm_decompress_16k((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size); + } + __unreachable_ok_pop +#else + WKdm_decompress_new((WK_word *)(uintptr_t)&c_seg->c_store.c_buffer[cs->c_offset], + (WK_word *)(uintptr_t)dst, (WK_word *)(uintptr_t)scratch_buf, c_size); #endif } } #if CHECKSUM_THE_DATA if (cs->c_hash_data != vmc_hash(dst, PAGE_SIZE)) { -#if defined(__arm__) || defined(__arm64__) +#if defined(__arm__) || defined(__arm64__) int32_t *dinput = &c_seg->c_store.c_buffer[cs->c_offset]; panic("decompressed data doesn't match original cs: %p, hash: 0x%x, offset: %d, c_size: %d, c_rounded_size: %d, codec: %d, header: 0x%x 0x%x 0x%x", cs, cs->c_hash_data, cs->c_offset, c_size, c_rounded_size, cs->c_codec, *dinput, *(dinput + 1), *(dinput + 2)); #else - panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size); + panic("decompressed data doesn't match original cs: %p, hash: %d, offset: 0x%x, c_size: %d", cs, cs->c_hash_data, cs->c_offset, c_size); #endif } #endif if (c_seg->c_swappedin_ts == 0 && !kdp_mode) { - clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec); age_of_cseg = (uint32_t)cur_ts_sec - c_seg->c_creation_ts; - if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) + if (age_of_cseg < DECOMPRESSION_SAMPLE_MAX_AGE) { OSAddAtomic(1, &age_of_decompressions_during_sample_period[age_of_cseg]); - else + } else { OSAddAtomic(1, &overage_decompressions_during_sample_period); + } OSAddAtomic(1, &sample_period_decompression_count); } @@ -3796,8 +3866,9 @@ bypass_busy_check: PACK_C_SIZE(cs, 0); - if (c_indx < c_seg->c_firstemptyslot) + if (c_indx < c_seg->c_firstemptyslot) { c_seg->c_firstemptyslot = c_indx; + } OSAddAtomic(-1, &c_segment_pages_compressed); @@ -3821,14 +3892,13 @@ bypass_busy_check: if (c_seg->c_state != C_IS_FILLING) { if (c_seg->c_bytes_used == 0) { - if ( !(C_SEG_IS_ONDISK(c_seg))) { - int pages_populated; + if (!(C_SEG_IS_ONDISK(c_seg))) { + int pages_populated; pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE; c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0); if (pages_populated) { - assert(c_seg->c_state != C_ON_BAD_Q); assert(c_seg->c_store.c_buffer != NULL); @@ -3840,17 +3910,16 @@ bypass_busy_check: lck_mtx_lock_spin_always(&c_seg->c_lock); C_SEG_WAKEUP_DONE(c_seg); } - if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q) + if (!c_seg->c_on_minorcompact_q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q) { c_seg_need_delayed_compaction(c_seg, FALSE); + } } else { if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q) { - c_seg_move_to_sparse_list(c_seg); consider_defragmenting = TRUE; } } } else if (c_seg->c_on_minorcompact_q) { - assert(c_seg->c_state != C_ON_BAD_Q); assert(!C_SEG_IS_ON_DISK_OR_SOQ(c_seg)); @@ -3858,14 +3927,12 @@ bypass_busy_check: c_seg_try_minor_compaction_and_unlock(c_seg); need_unlock = FALSE; } - } else if ( !(C_SEG_IS_ONDISK(c_seg))) { - + } else if (!(C_SEG_IS_ONDISK(c_seg))) { if (c_seg->c_state != C_ON_BAD_Q && c_seg->c_state != C_ON_SWAPOUT_Q && c_seg->c_state != C_ON_SWAPIO_Q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) { c_seg_need_delayed_compaction(c_seg, FALSE); } } else if (c_seg->c_state != C_ON_SWAPPEDOUTSPARSE_Q && C_SEG_ONDISK_IS_SPARSE(c_seg)) { - c_seg_move_to_sparse_list(c_seg); consider_defragmenting = TRUE; } @@ -3875,20 +3942,23 @@ done: return retval; } - if (need_unlock == TRUE) + if (need_unlock == TRUE) { lck_mtx_unlock_always(&c_seg->c_lock); + } PAGE_REPLACEMENT_DISALLOWED(FALSE); - if (consider_defragmenting == TRUE) + if (consider_defragmenting == TRUE) { vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE); + } #if CONFIG_EMBEDDED - if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) + if ((c_minor_count && COMPRESSOR_NEEDS_TO_MINOR_COMPACT()) || vm_compressor_needs_to_major_compact()) { vm_wake_compactor_swapper(); + } #endif - return (retval); + return retval; } @@ -3896,22 +3966,18 @@ int vm_compressor_get(ppnum_t pn, int *slot, int flags) { c_slot_mapping_t slot_ptr; - char *dst; - int zeroslot = 1; - int retval; + char *dst; + int zeroslot = 1; + int retval; -#if __x86_64__ - dst = PHYSMAP_PTOV((uint64_t)pn << (uint64_t)PAGE_SHIFT); -#elif __arm__ || __arm64__ - dst = (char *) phystokv((pmap_paddr_t)pn << PAGE_SHIFT); -#else -#error "unsupported architecture" -#endif + dst = pmap_map_compressor_page(pn); slot_ptr = (c_slot_mapping_t)slot; + assert(dst != NULL); + if (slot_ptr->s_cseg == C_SV_CSEG_ID) { - int32_t data; - int32_t *dptr; + int32_t data; + int32_t *dptr; /* * page was populated with a single value @@ -3925,24 +3991,27 @@ vm_compressor_get(ppnum_t pn, int *slot, int flags) memset_word(dptr, data, PAGE_SIZE / sizeof(int32_t)); #else { - int i; + int i; - for (i = 0; i < (int)(PAGE_SIZE / sizeof(int32_t)); i++) - *dptr++ = data; + for (i = 0; i < (int)(PAGE_SIZE / sizeof(int32_t)); i++) { + *dptr++ = data; + } } #endif - if ( !(flags & C_KEEP)) { + if (!(flags & C_KEEP)) { c_segment_sv_hash_drop_ref(slot_ptr->s_cindx); OSAddAtomic(-1, &c_segment_pages_compressed); *slot = 0; } - if (data) + if (data) { OSAddAtomic(1, &c_segment_svp_nonzero_decompressions); - else + } else { OSAddAtomic(1, &c_segment_svp_zero_decompressions); + } - return (0); + pmap_unmap_compressor_page(pn, dst); + return 0; } retval = c_decompress_page(dst, slot_ptr, flags, &zeroslot); @@ -3954,13 +4023,16 @@ vm_compressor_get(ppnum_t pn, int *slot, int flags) if (zeroslot) { *slot = 0; } + + pmap_unmap_compressor_page(pn, dst); + /* * returns 0 if we successfully decompressed a page from a segment already in memory * returns 1 if we had to first swap in the segment, before successfully decompressing the page * returns -1 if we encountered an error swapping in the segment - decompression failed * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' or 'C_SEG_IS_ONDISK' to be true */ - return (retval); + return retval; } @@ -3968,20 +4040,19 @@ int vm_compressor_free(int *slot, int flags) { c_slot_mapping_t slot_ptr; - int zeroslot = 1; - int retval; + int zeroslot = 1; + int retval; assert(flags == 0 || flags == C_DONT_BLOCK); slot_ptr = (c_slot_mapping_t)slot; if (slot_ptr->s_cseg == C_SV_CSEG_ID) { - c_segment_sv_hash_drop_ref(slot_ptr->s_cindx); OSAddAtomic(-1, &c_segment_pages_compressed); *slot = 0; - return (0); + return 0; } retval = c_decompress_page(NULL, slot_ptr, flags, &zeroslot); /* @@ -3989,43 +4060,40 @@ vm_compressor_free(int *slot, int flags) * returns -2 if (flags & C_DONT_BLOCK) and we found 'c_busy' set */ - if (retval == 0) + if (retval == 0) { *slot = 0; - else + } else { assert(retval == -2); + } - return (retval); + return retval; } int vm_compressor_put(ppnum_t pn, int *slot, void **current_chead, char *scratch_buf) { - char *src; - int retval; + char *src; + int retval; -#if __x86_64__ - src = PHYSMAP_PTOV((uint64_t)pn << (uint64_t)PAGE_SHIFT); -#elif __arm__ || __arm64__ - src = (char *) phystokv((pmap_paddr_t)pn << PAGE_SHIFT); -#else -#error "unsupported architecture" -#endif + src = pmap_map_compressor_page(pn); + assert(src != NULL); retval = c_compress_page(src, (c_slot_mapping_t)slot, (c_segment_t *)current_chead, scratch_buf); + pmap_unmap_compressor_page(pn, src); - return (retval); + return retval; } void vm_compressor_transfer( - int *dst_slot_p, - int *src_slot_p) + int *dst_slot_p, + int *src_slot_p) { - c_slot_mapping_t dst_slot, src_slot; - c_segment_t c_seg; - int c_indx; - c_slot_t cs; + c_slot_mapping_t dst_slot, src_slot; + c_segment_t c_seg; + int c_indx; + c_slot_t cs; src_slot = (c_slot_mapping_t) src_slot_p; @@ -4038,7 +4106,7 @@ vm_compressor_transfer( Retry: PAGE_REPLACEMENT_DISALLOWED(TRUE); /* get segment for src_slot */ - c_seg = c_segments[src_slot->s_cseg -1].c_seg; + c_seg = c_segments[src_slot->s_cseg - 1].c_seg; /* lock segment */ lck_mtx_lock_spin_always(&c_seg->c_lock); /* wait if it's busy */ @@ -4061,19 +4129,20 @@ Retry: #if CONFIG_FREEZE -int freezer_finished_filling = 0; +int freezer_finished_filling = 0; void vm_compressor_finished_filling( - void **current_chead) + void **current_chead) { - c_segment_t c_seg; + c_segment_t c_seg; - if ((c_seg = *(c_segment_t *)current_chead) == NULL) + if ((c_seg = *(c_segment_t *)current_chead) == NULL) { return; + } assert(c_seg->c_state == C_IS_FILLING); - + lck_mtx_lock_spin_always(&c_seg->c_lock); c_current_seg_filled(c_seg, (c_segment_t *)current_chead); @@ -4091,27 +4160,27 @@ vm_compressor_finished_filling( * * Currently, this routine is only used by the "freezer backed by * compressor with swap" mode to create a series of c_segs that - * only contain compressed data belonging to one task. So, we + * only contain compressed data belonging to one task. So, we * move a task's previously compressed data into a set of new * c_segs which will also hold the task's yet to be compressed data. */ kern_return_t vm_compressor_relocate( - void **current_chead, - int *slot_p) + void **current_chead, + int *slot_p) { - c_slot_mapping_t slot_ptr; - c_slot_mapping_t src_slot; - uint32_t c_rounded_size; - uint32_t c_size; - uint16_t dst_slot; - c_slot_t c_dst; - c_slot_t c_src; - int c_indx; - c_segment_t c_seg_dst = NULL; - c_segment_t c_seg_src = NULL; - kern_return_t kr = KERN_SUCCESS; + c_slot_mapping_t slot_ptr; + c_slot_mapping_t src_slot; + uint32_t c_rounded_size; + uint32_t c_size; + uint16_t dst_slot; + c_slot_t c_dst; + c_slot_t c_src; + int c_indx; + c_segment_t c_seg_dst = NULL; + c_segment_t c_seg_src = NULL; + kern_return_t kr = KERN_SUCCESS; src_slot = (c_slot_mapping_t) slot_p; @@ -4122,7 +4191,7 @@ vm_compressor_relocate( * value which is hashed to a single entry not contained * in a c_segment_t */ - return (kr); + return kr; } Relookup_dst: @@ -4146,7 +4215,7 @@ Relookup_dst: C_SEG_BUSY(c_seg_dst); dst_slot = c_seg_dst->c_nextslot; - + lck_mtx_unlock_always(&c_seg_dst->c_lock); Relookup_src: @@ -4157,7 +4226,6 @@ Relookup_src: lck_mtx_lock_spin_always(&c_seg_src->c_lock); if (C_SEG_IS_ONDISK(c_seg_src)) { - /* * A "thaw" can mark a process as eligible for * another freeze cycle without bringing any of @@ -4169,7 +4237,7 @@ Relookup_src: * with an app's data that is already packed and * swapped out. Don't do anything. */ - + PAGE_REPLACEMENT_DISALLOWED(FALSE); lck_mtx_unlock_always(&c_seg_src->c_lock); @@ -4180,10 +4248,9 @@ Relookup_src: } if (c_seg_src->c_busy) { - PAGE_REPLACEMENT_DISALLOWED(FALSE); c_seg_wait_on_busy(c_seg_src); - + c_seg_src = NULL; PAGE_REPLACEMENT_DISALLOWED(TRUE); @@ -4194,7 +4261,7 @@ Relookup_src: C_SEG_BUSY(c_seg_src); lck_mtx_unlock_always(&c_seg_src->c_lock); - + PAGE_REPLACEMENT_DISALLOWED(FALSE); /* find the c_slot */ @@ -4212,7 +4279,7 @@ Relookup_src: */ PAGE_REPLACEMENT_DISALLOWED(TRUE); - + lck_mtx_lock_spin_always(&c_seg_src->c_lock); C_SEG_WAKEUP_DONE(c_seg_src); lck_mtx_unlock_always(&c_seg_src->c_lock); @@ -4227,9 +4294,9 @@ Relookup_src: c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead); assert(*current_chead == NULL); - + C_SEG_WAKEUP_DONE(c_seg_dst); - + lck_mtx_unlock_always(&c_seg_dst->c_lock); c_seg_dst = NULL; @@ -4248,14 +4315,15 @@ Relookup_src: cslot_copy(c_dst, c_src); c_dst->c_offset = c_seg_dst->c_nextoffset; - if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) + if (c_seg_dst->c_firstemptyslot == c_seg_dst->c_nextslot) { c_seg_dst->c_firstemptyslot++; + } c_seg_dst->c_slots_used++; c_seg_dst->c_nextslot++; c_seg_dst->c_bytes_used += c_rounded_size; c_seg_dst->c_nextoffset += C_SEG_BYTES_TO_OFFSET(c_rounded_size); - + PACK_C_SIZE(c_src, 0); @@ -4264,13 +4332,13 @@ Relookup_src: assert(c_seg_src->c_slots_used); c_seg_src->c_slots_used--; - + if (c_indx < c_seg_src->c_firstemptyslot) { c_seg_src->c_firstemptyslot = c_indx; } c_dst = C_SEG_SLOT_FROM_INDEX(c_seg_dst, dst_slot); - + PAGE_REPLACEMENT_ALLOWED(TRUE); slot_ptr = (c_slot_mapping_t)C_SLOT_UNPACK_PTR(c_dst); /* would mean "empty slot", so use csegno+1 */ @@ -4281,21 +4349,20 @@ Relookup_src: out: if (c_seg_src) { - lck_mtx_lock_spin_always(&c_seg_src->c_lock); C_SEG_WAKEUP_DONE(c_seg_src); if (c_seg_src->c_bytes_used == 0 && c_seg_src->c_state != C_IS_FILLING) { - if (!c_seg_src->c_on_minorcompact_q) + if (!c_seg_src->c_on_minorcompact_q) { c_seg_need_delayed_compaction(c_seg_src, FALSE); + } } lck_mtx_unlock_always(&c_seg_src->c_lock); } - - if (c_seg_dst) { + if (c_seg_dst) { PAGE_REPLACEMENT_DISALLOWED(TRUE); lck_mtx_lock_spin_always(&c_seg_dst->c_lock); @@ -4310,8 +4377,8 @@ out: c_current_seg_filled(c_seg_dst, (c_segment_t *)current_chead); assert(*current_chead == NULL); - } - + } + C_SEG_WAKEUP_DONE(c_seg_dst); lck_mtx_unlock_always(&c_seg_dst->c_lock); diff --git a/osfmk/vm/vm_compressor.h b/osfmk/vm/vm_compressor.h index 0dea16c53..c95cc518a 100644 --- a/osfmk/vm/vm_compressor.h +++ b/osfmk/vm/vm_compressor.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -43,18 +43,18 @@ #include #endif -#define C_SEG_OFFSET_BITS 16 -#define C_SEG_BUFSIZE (1024 * 256) -#define C_SEG_MAX_PAGES (C_SEG_BUFSIZE / PAGE_SIZE) +#define C_SEG_OFFSET_BITS 16 +#define C_SEG_BUFSIZE (1024 * 256) +#define C_SEG_MAX_PAGES (C_SEG_BUFSIZE / PAGE_SIZE) #if CONFIG_EMBEDDED -#define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 512))) -#define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE + PAGE_SIZE) +#define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 512))) +#define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE + PAGE_SIZE) #else -#define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 128))) -#define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE) +#define C_SEG_OFF_LIMIT (C_SEG_BYTES_TO_OFFSET((C_SEG_BUFSIZE - 128))) +#define C_SEG_ALLOCSIZE (C_SEG_BUFSIZE) #endif -#define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE) +#define C_SEG_MAX_POPULATE_SIZE (4 * PAGE_SIZE) #if defined(__arm64__) @@ -77,141 +77,141 @@ #define ENABLE_COMPRESSOR_CHECKS 0 #endif -#define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */ -#define CHECKSUM_THE_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor data */ -#define CHECKSUM_THE_COMPRESSED_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor compressed data */ +#define CHECKSUM_THE_SWAP ENABLE_SWAP_CHECKS /* Debug swap data */ +#define CHECKSUM_THE_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor data */ +#define CHECKSUM_THE_COMPRESSED_DATA ENABLE_COMPRESSOR_CHECKS /* Debug compressor/decompressor compressed data */ #ifndef VALIDATE_C_SEGMENTS -#define VALIDATE_C_SEGMENTS ENABLE_COMPRESSOR_CHECKS /* Debug compaction */ +#define VALIDATE_C_SEGMENTS ENABLE_COMPRESSOR_CHECKS /* Debug compaction */ #endif -#define RECORD_THE_COMPRESSED_DATA 0 +#define RECORD_THE_COMPRESSED_DATA 0 struct c_slot { - uint64_t c_offset:C_SEG_OFFSET_BITS, + uint64_t c_offset:C_SEG_OFFSET_BITS, #if defined(__arm64__) - c_size:14, - c_codec:1, - c_packed_ptr:33; + c_size:14, + c_codec:1, + c_packed_ptr:33; #elif defined(__arm__) - c_size:12, - c_codec:1, - c_packed_ptr:35; + c_size:12, + c_codec:1, + c_packed_ptr:35; #else - c_size:12, - c_packed_ptr:36; + c_size:12, + c_packed_ptr:36; #endif #if CHECKSUM_THE_DATA - unsigned int c_hash_data; + unsigned int c_hash_data; #endif #if CHECKSUM_THE_COMPRESSED_DATA - unsigned int c_hash_compressed_data; + unsigned int c_hash_compressed_data; #endif #if POPCOUNT_THE_COMPRESSED_DATA - unsigned int c_pop_cdata; + unsigned int c_pop_cdata; #endif }; -#define C_IS_EMPTY 0 -#define C_IS_FREE 1 -#define C_IS_FILLING 2 -#define C_ON_AGE_Q 3 -#define C_ON_SWAPOUT_Q 4 -#define C_ON_SWAPPEDOUT_Q 5 -#define C_ON_SWAPPEDOUTSPARSE_Q 6 -#define C_ON_SWAPPEDIN_Q 7 -#define C_ON_MAJORCOMPACT_Q 8 -#define C_ON_BAD_Q 9 +#define C_IS_EMPTY 0 +#define C_IS_FREE 1 +#define C_IS_FILLING 2 +#define C_ON_AGE_Q 3 +#define C_ON_SWAPOUT_Q 4 +#define C_ON_SWAPPEDOUT_Q 5 +#define C_ON_SWAPPEDOUTSPARSE_Q 6 +#define C_ON_SWAPPEDIN_Q 7 +#define C_ON_MAJORCOMPACT_Q 8 +#define C_ON_BAD_Q 9 #define C_ON_SWAPIO_Q 10 struct c_segment { - lck_mtx_t c_lock; - queue_chain_t c_age_list; - queue_chain_t c_list; + lck_mtx_t c_lock; + queue_chain_t c_age_list; + queue_chain_t c_list; -#define C_SEG_MAX_LIMIT (1 << 20) /* this needs to track the size of c_mysegno */ - uint32_t c_mysegno:20, - c_busy:1, - c_busy_swapping:1, - c_wanted:1, - c_on_minorcompact_q:1, /* can also be on the age_q, the majorcompact_q or the swappedin_q */ +#define C_SEG_MAX_LIMIT (1 << 20) /* this needs to track the size of c_mysegno */ + uint32_t c_mysegno:20, + c_busy:1, + c_busy_swapping:1, + c_wanted:1, + c_on_minorcompact_q:1, /* can also be on the age_q, the majorcompact_q or the swappedin_q */ - c_state:4, /* what state is the segment in which dictates which q to find it on */ - c_overage_swap:1, - c_reserved:3; + c_state:4, /* what state is the segment in which dictates which q to find it on */ + c_overage_swap:1, + c_reserved:3; - uint32_t c_creation_ts; - uint64_t c_generation_id; + uint32_t c_creation_ts; + uint64_t c_generation_id; - int32_t c_bytes_used; - int32_t c_bytes_unused; - uint32_t c_slots_used; + int32_t c_bytes_used; + int32_t c_bytes_unused; + uint32_t c_slots_used; - uint16_t c_firstemptyslot; - uint16_t c_nextslot; - uint32_t c_nextoffset; - uint32_t c_populated_offset; + uint16_t c_firstemptyslot; + uint16_t c_nextslot; + uint32_t c_nextoffset; + uint32_t c_populated_offset; - uint32_t c_swappedin_ts; + uint32_t c_swappedin_ts; union { int32_t *c_buffer; uint64_t c_swap_handle; } c_store; -#if VALIDATE_C_SEGMENTS - uint32_t c_was_minor_compacted; - uint32_t c_was_major_compacted; - uint32_t c_was_major_donor; +#if VALIDATE_C_SEGMENTS + uint32_t c_was_minor_compacted; + uint32_t c_was_major_compacted; + uint32_t c_was_major_donor; #endif -#if CHECKSUM_THE_SWAP - unsigned int cseg_hash; - unsigned int cseg_swap_size; +#if CHECKSUM_THE_SWAP + unsigned int cseg_hash; + unsigned int cseg_swap_size; #endif /* CHECKSUM_THE_SWAP */ #if MACH_ASSERT - thread_t c_busy_for_thread; + thread_t c_busy_for_thread; #endif /* MACH_ASSERT */ - int c_slot_var_array_len; - struct c_slot *c_slot_var_array; - struct c_slot c_slot_fixed_array[0]; + int c_slot_var_array_len; + struct c_slot *c_slot_var_array; + struct c_slot c_slot_fixed_array[0]; }; struct c_slot_mapping { - uint32_t s_cseg:22, /* segment number + 1 */ - s_cindx:10; /* index in the segment */ + uint32_t s_cseg:22, /* segment number + 1 */ + s_cindx:10; /* index in the segment */ }; -#define C_SLOT_MAX_INDEX (1 << 10) +#define C_SLOT_MAX_INDEX (1 << 10) typedef struct c_slot_mapping *c_slot_mapping_t; -#define C_SEG_SLOT_VAR_ARRAY_MIN_LEN C_SEG_MAX_PAGES +#define C_SEG_SLOT_VAR_ARRAY_MIN_LEN C_SEG_MAX_PAGES -extern int c_seg_fixed_array_len; -extern vm_offset_t c_buffers; -#define C_SEG_BUFFER_ADDRESS(c_segno) ((c_buffers + ((uint64_t)c_segno * (uint64_t)C_SEG_ALLOCSIZE))) +extern int c_seg_fixed_array_len; +extern vm_offset_t c_buffers; +#define C_SEG_BUFFER_ADDRESS(c_segno) ((c_buffers + ((uint64_t)c_segno * (uint64_t)C_SEG_ALLOCSIZE))) -#define C_SEG_SLOT_FROM_INDEX(cseg, index) (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len])) +#define C_SEG_SLOT_FROM_INDEX(cseg, index) (index < c_seg_fixed_array_len ? &(cseg->c_slot_fixed_array[index]) : &(cseg->c_slot_var_array[index - c_seg_fixed_array_len])) -#define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t)) -#define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t)) +#define C_SEG_OFFSET_TO_BYTES(off) ((off) * (int) sizeof(int32_t)) +#define C_SEG_BYTES_TO_OFFSET(bytes) ((bytes) / (int) sizeof(int32_t)) -#define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset))) +#define C_SEG_UNUSED_BYTES(cseg) (cseg->c_bytes_unused + (C_SEG_OFFSET_TO_BYTES(cseg->c_populated_offset - cseg->c_nextoffset))) //todo opensource #ifndef __PLATFORM_WKDM_ALIGNMENT_MASK__ -#define C_SEG_OFFSET_ALIGNMENT_MASK 0x3ULL -#define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4 +#define C_SEG_OFFSET_ALIGNMENT_MASK 0x3ULL +#define C_SEG_OFFSET_ALIGNMENT_BOUNDARY 0x4 #else -#define C_SEG_OFFSET_ALIGNMENT_MASK __PLATFORM_WKDM_ALIGNMENT_MASK__ -#define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ +#define C_SEG_OFFSET_ALIGNMENT_MASK __PLATFORM_WKDM_ALIGNMENT_MASK__ +#define C_SEG_OFFSET_ALIGNMENT_BOUNDARY __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ #endif -#define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (C_SEG_BUFSIZE / 4)) ? 1 : 0) +#define C_SEG_SHOULD_MINORCOMPACT_NOW(cseg) ((C_SEG_UNUSED_BYTES(cseg) >= (C_SEG_BUFSIZE / 4)) ? 1 : 0) /* * the decsion to force a c_seg to be major compacted is based on 2 criteria @@ -219,39 +219,39 @@ extern vm_offset_t c_buffers; * 2) are there at least a minimum number of slots unoccupied so that we have a chance * of combining this c_seg with another one. */ -#define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg) \ - ((((cseg->c_bytes_unused + (C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (C_SEG_BUFSIZE / 8)) && \ +#define C_SEG_SHOULD_MAJORCOMPACT_NOW(cseg) \ + ((((cseg->c_bytes_unused + (C_SEG_BUFSIZE - C_SEG_OFFSET_TO_BYTES(c_seg->c_nextoffset))) >= (C_SEG_BUFSIZE / 8)) && \ ((C_SLOT_MAX_INDEX - cseg->c_slots_used) > (C_SEG_BUFSIZE / PAGE_SIZE))) \ ? 1 : 0) -#define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0) -#define C_SEG_IS_ONDISK(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q)) -#define C_SEG_IS_ON_DISK_OR_SOQ(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \ - cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \ - cseg->c_state == C_ON_SWAPOUT_Q || \ - cseg->c_state == C_ON_SWAPIO_Q)) - - -#define C_SEG_WAKEUP_DONE(cseg) \ - MACRO_BEGIN \ - assert((cseg)->c_busy); \ - (cseg)->c_busy = 0; \ - assert((cseg)->c_busy_for_thread != NULL); \ - assert((((cseg)->c_busy_for_thread = NULL), TRUE)); \ - if ((cseg)->c_wanted) { \ - (cseg)->c_wanted = 0; \ - thread_wakeup((event_t) (cseg)); \ - } \ +#define C_SEG_ONDISK_IS_SPARSE(cseg) ((cseg->c_bytes_used < cseg->c_bytes_unused) ? 1 : 0) +#define C_SEG_IS_ONDISK(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q)) +#define C_SEG_IS_ON_DISK_OR_SOQ(cseg) ((cseg->c_state == C_ON_SWAPPEDOUT_Q || \ + cseg->c_state == C_ON_SWAPPEDOUTSPARSE_Q || \ + cseg->c_state == C_ON_SWAPOUT_Q || \ + cseg->c_state == C_ON_SWAPIO_Q)) + + +#define C_SEG_WAKEUP_DONE(cseg) \ + MACRO_BEGIN \ + assert((cseg)->c_busy); \ + (cseg)->c_busy = 0; \ + assert((cseg)->c_busy_for_thread != NULL); \ + assert((((cseg)->c_busy_for_thread = NULL), TRUE)); \ + if ((cseg)->c_wanted) { \ + (cseg)->c_wanted = 0; \ + thread_wakeup((event_t) (cseg)); \ + } \ MACRO_END -#define C_SEG_BUSY(cseg) \ - MACRO_BEGIN \ - assert((cseg)->c_busy == 0); \ - (cseg)->c_busy = 1; \ - assert((cseg)->c_busy_for_thread == NULL); \ - assert((((cseg)->c_busy_for_thread = current_thread()), TRUE)); \ +#define C_SEG_BUSY(cseg) \ + MACRO_BEGIN \ + assert((cseg)->c_busy == 0); \ + (cseg)->c_busy = 1; \ + assert((cseg)->c_busy_for_thread == NULL); \ + assert((((cseg)->c_busy_for_thread = current_thread()), TRUE)); \ MACRO_END - + extern vm_map_t compressor_map; @@ -259,36 +259,36 @@ extern vm_map_t compressor_map; extern boolean_t write_protect_c_segs; extern int vm_compressor_test_seg_wp; -#define C_SEG_MAKE_WRITEABLE(cseg) \ - MACRO_BEGIN \ - if (write_protect_c_segs) { \ - vm_map_protect(compressor_map, \ - (vm_map_offset_t)cseg->c_store.c_buffer, \ - (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\ - VM_PROT_READ | VM_PROT_WRITE, \ - 0); \ - } \ +#define C_SEG_MAKE_WRITEABLE(cseg) \ + MACRO_BEGIN \ + if (write_protect_c_segs) { \ + vm_map_protect(compressor_map, \ + (vm_map_offset_t)cseg->c_store.c_buffer, \ + (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\ + VM_PROT_READ | VM_PROT_WRITE, \ + 0); \ + } \ MACRO_END -#define C_SEG_WRITE_PROTECT(cseg) \ - MACRO_BEGIN \ - if (write_protect_c_segs) { \ - vm_map_protect(compressor_map, \ - (vm_map_offset_t)cseg->c_store.c_buffer, \ - (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\ - VM_PROT_READ, \ - 0); \ - } \ - if (vm_compressor_test_seg_wp) { \ - volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \ - *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \ - (void) vmtstmp; \ - } \ +#define C_SEG_WRITE_PROTECT(cseg) \ + MACRO_BEGIN \ + if (write_protect_c_segs) { \ + vm_map_protect(compressor_map, \ + (vm_map_offset_t)cseg->c_store.c_buffer, \ + (vm_map_offset_t)&cseg->c_store.c_buffer[C_SEG_BYTES_TO_OFFSET(C_SEG_ALLOCSIZE)],\ + VM_PROT_READ, \ + 0); \ + } \ + if (vm_compressor_test_seg_wp) { \ + volatile uint32_t vmtstmp = *(volatile uint32_t *)cseg->c_store.c_buffer; \ + *(volatile uint32_t *)cseg->c_store.c_buffer = 0xDEADABCD; \ + (void) vmtstmp; \ + } \ MACRO_END #endif -typedef struct c_segment *c_segment_t; -typedef struct c_slot *c_slot_t; +typedef struct c_segment *c_segment_t; +typedef struct c_slot *c_slot_t; uint64_t vm_compressor_total_compressions(void); void vm_wake_compactor_swapper(void); @@ -310,75 +310,75 @@ void vm_compressor_do_warmup(void); void vm_compressor_record_warmup_start(void); void vm_compressor_record_warmup_end(void); -int vm_wants_task_throttled(task_t); +int vm_wants_task_throttled(task_t); -extern void vm_compaction_swapper_do_init(void); -extern void vm_compressor_swap_init(void); -extern void vm_compressor_init_locks(void); -extern lck_rw_t c_master_lock; +extern void vm_compaction_swapper_do_init(void); +extern void vm_compressor_swap_init(void); +extern void vm_compressor_init_locks(void); +extern lck_rw_t c_master_lock; #if ENCRYPTED_SWAP -extern void vm_swap_decrypt(c_segment_t); +extern void vm_swap_decrypt(c_segment_t); #endif /* ENCRYPTED_SWAP */ -extern int vm_swap_low_on_space(void); -extern kern_return_t vm_swap_get(c_segment_t, uint64_t, uint64_t); -extern void vm_swap_free(uint64_t); -extern void vm_swap_consider_defragmenting(int); - -extern void c_seg_swapin_requeue(c_segment_t, boolean_t, boolean_t, boolean_t); -extern int c_seg_swapin(c_segment_t, boolean_t, boolean_t); -extern void c_seg_wait_on_busy(c_segment_t); -extern void c_seg_trim_tail(c_segment_t); -extern void c_seg_switch_state(c_segment_t, int, boolean_t); - -extern boolean_t fastwake_recording_in_progress; -extern int compaction_swapper_inited; -extern int compaction_swapper_running; -extern uint64_t vm_swap_put_failures; - -extern int c_overage_swapped_count; -extern int c_overage_swapped_limit; - -extern queue_head_t c_minor_list_head; -extern queue_head_t c_age_list_head; -extern queue_head_t c_swapout_list_head; -extern queue_head_t c_swappedout_list_head; -extern queue_head_t c_swappedout_sparse_list_head; - -extern uint32_t c_age_count; -extern uint32_t c_swapout_count; -extern uint32_t c_swappedout_count; -extern uint32_t c_swappedout_sparse_count; - -extern int64_t compressor_bytes_used; -extern uint64_t first_c_segment_to_warm_generation_id; -extern uint64_t last_c_segment_to_warm_generation_id; -extern boolean_t hibernate_flushing; -extern boolean_t hibernate_no_swapspace; -extern boolean_t hibernate_in_progress_with_pinned_swap; -extern uint32_t swapout_target_age; +extern int vm_swap_low_on_space(void); +extern kern_return_t vm_swap_get(c_segment_t, uint64_t, uint64_t); +extern void vm_swap_free(uint64_t); +extern void vm_swap_consider_defragmenting(int); + +extern void c_seg_swapin_requeue(c_segment_t, boolean_t, boolean_t, boolean_t); +extern int c_seg_swapin(c_segment_t, boolean_t, boolean_t); +extern void c_seg_wait_on_busy(c_segment_t); +extern void c_seg_trim_tail(c_segment_t); +extern void c_seg_switch_state(c_segment_t, int, boolean_t); + +extern boolean_t fastwake_recording_in_progress; +extern int compaction_swapper_inited; +extern int compaction_swapper_running; +extern uint64_t vm_swap_put_failures; + +extern int c_overage_swapped_count; +extern int c_overage_swapped_limit; + +extern queue_head_t c_minor_list_head; +extern queue_head_t c_age_list_head; +extern queue_head_t c_swapout_list_head; +extern queue_head_t c_swappedout_list_head; +extern queue_head_t c_swappedout_sparse_list_head; + +extern uint32_t c_age_count; +extern uint32_t c_swapout_count; +extern uint32_t c_swappedout_count; +extern uint32_t c_swappedout_sparse_count; + +extern int64_t compressor_bytes_used; +extern uint64_t first_c_segment_to_warm_generation_id; +extern uint64_t last_c_segment_to_warm_generation_id; +extern boolean_t hibernate_flushing; +extern boolean_t hibernate_no_swapspace; +extern boolean_t hibernate_in_progress_with_pinned_swap; +extern uint32_t swapout_target_age; extern void c_seg_insert_into_q(queue_head_t *, c_segment_t); -extern uint32_t vm_compressor_minorcompact_threshold_divisor; -extern uint32_t vm_compressor_majorcompact_threshold_divisor; -extern uint32_t vm_compressor_unthrottle_threshold_divisor; -extern uint32_t vm_compressor_catchup_threshold_divisor; +extern uint32_t vm_compressor_minorcompact_threshold_divisor; +extern uint32_t vm_compressor_majorcompact_threshold_divisor; +extern uint32_t vm_compressor_unthrottle_threshold_divisor; +extern uint32_t vm_compressor_catchup_threshold_divisor; -extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden; -extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden; -extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden; -extern uint32_t vm_compressor_catchup_threshold_divisor_overridden; +extern uint32_t vm_compressor_minorcompact_threshold_divisor_overridden; +extern uint32_t vm_compressor_majorcompact_threshold_divisor_overridden; +extern uint32_t vm_compressor_unthrottle_threshold_divisor_overridden; +extern uint32_t vm_compressor_catchup_threshold_divisor_overridden; extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, clock_sec_t, clock_nsec_t); -#define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock)) -#define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock)) +#define PAGE_REPLACEMENT_DISALLOWED(enable) (enable == TRUE ? lck_rw_lock_shared(&c_master_lock) : lck_rw_done(&c_master_lock)) +#define PAGE_REPLACEMENT_ALLOWED(enable) (enable == TRUE ? lck_rw_lock_exclusive(&c_master_lock) : lck_rw_done(&c_master_lock)) -#define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count) -#define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT) +#define AVAILABLE_NON_COMPRESSED_MEMORY (vm_page_active_count + vm_page_inactive_count + vm_page_free_count + vm_page_speculative_count) +#define AVAILABLE_MEMORY (AVAILABLE_NON_COMPRESSED_MEMORY + VM_PAGE_COMPRESSOR_COUNT) /* * TODO, there may be a minor optimisation opportunity to replace these divisions @@ -389,36 +389,36 @@ extern uint64_t vm_compressor_compute_elapsed_msecs(clock_sec_t, clock_nsec_t, c * By multiplying by 11, you get a number ~10% bigger which allows us to generate a reset limit derived from the same base which is useful for hysteresis */ -#define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10)) -#define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10)) +#define VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_minorcompact_threshold_divisor ? vm_compressor_minorcompact_threshold_divisor : 10)) +#define VM_PAGE_COMPRESSOR_SWAP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_majorcompact_threshold_divisor ? vm_compressor_majorcompact_threshold_divisor : 10)) -#define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10)) -#define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11)) +#define VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 10)) +#define VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_unthrottle_threshold_divisor ? vm_compressor_unthrottle_threshold_divisor : 11)) -#define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11)) -#define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10)) +#define VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD (((AVAILABLE_MEMORY) * 11) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 11)) +#define VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD (((AVAILABLE_MEMORY) * 10) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 10)) #define VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD (((AVAILABLE_MEMORY) * 9) / (vm_compressor_catchup_threshold_divisor ? vm_compressor_catchup_threshold_divisor : 9)) -#ifdef CONFIG_EMBEDDED -#define AVAILABLE_NON_COMPRESSED_MIN 20000 -#define COMPRESSOR_NEEDS_TO_SWAP() (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \ - (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0) +#ifdef CONFIG_EMBEDDED +#define AVAILABLE_NON_COMPRESSED_MIN 20000 +#define COMPRESSOR_NEEDS_TO_SWAP() (((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) || \ + (AVAILABLE_NON_COMPRESSED_MEMORY < AVAILABLE_NON_COMPRESSED_MIN)) ? 1 : 0) #else -#define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0) +#define COMPRESSOR_NEEDS_TO_SWAP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_THRESHOLD) ? 1 : 0) #endif -#define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0) -#define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0) -#define SWAPPER_NEEDS_TO_RETHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0) -#define SWAPPER_NEEDS_TO_CATCHUP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0) -#define SWAPPER_HAS_CAUGHTUP() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0) -#define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0) +#define HARD_THROTTLE_LIMIT_REACHED() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_HARD_THROTTLE_THRESHOLD) ? 1 : 0) +#define SWAPPER_NEEDS_TO_UNTHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) ? 1 : 0) +#define SWAPPER_NEEDS_TO_RETHROTTLE() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_RETHROTTLE_THRESHOLD) ? 1 : 0) +#define SWAPPER_NEEDS_TO_CATCHUP() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_SWAP_CATCHUP_THRESHOLD) ? 1 : 0) +#define SWAPPER_HAS_CAUGHTUP() ((AVAILABLE_NON_COMPRESSED_MEMORY > VM_PAGE_COMPRESSOR_SWAP_HAS_CAUGHTUP_THRESHOLD) ? 1 : 0) +#define COMPRESSOR_NEEDS_TO_MINOR_COMPACT() ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0) -#ifdef CONFIG_EMBEDDED -#define COMPRESSOR_FREE_RESERVED_LIMIT 28 +#ifdef CONFIG_EMBEDDED +#define COMPRESSOR_FREE_RESERVED_LIMIT 28 #else -#define COMPRESSOR_FREE_RESERVED_LIMIT 128 +#define COMPRESSOR_FREE_RESERVED_LIMIT 128 #endif uint32_t vm_compressor_get_encode_scratch_size(void); @@ -427,19 +427,19 @@ uint32_t vm_compressor_get_decode_scratch_size(void); #define COMPRESSOR_SCRATCH_BUF_SIZE vm_compressor_get_encode_scratch_size() #if RECORD_THE_COMPRESSED_DATA -extern void c_compressed_record_init(void); -extern void c_compressed_record_write(char *, int); +extern void c_compressed_record_init(void); +extern void c_compressed_record_write(char *, int); #endif -extern lck_mtx_t *c_list_lock; +extern lck_mtx_t *c_list_lock; #if DEVELOPMENT || DEBUG extern uint32_t vm_ktrace_enabled; -#define VMKDBG(x, ...) \ -MACRO_BEGIN \ -if (vm_ktrace_enabled) { \ +#define VMKDBG(x, ...) \ +MACRO_BEGIN \ +if (vm_ktrace_enabled) { \ KDBG(x, ## __VA_ARGS__);\ -} \ +} \ MACRO_END #endif diff --git a/osfmk/vm/vm_compressor_algorithms.c b/osfmk/vm/vm_compressor_algorithms.c index 9411412c6..4344f3ebe 100644 --- a/osfmk/vm/vm_compressor_algorithms.c +++ b/osfmk/vm/vm_compressor_algorithms.c @@ -2,7 +2,7 @@ * Copyright (c) 2010-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* This module implements a hybrid/adaptive compression scheme, using WKdm where @@ -110,37 +110,39 @@ boolean_t verbose = FALSE; #define VMDBGSTAT (DEBUG) #if VMDBGSTATS -#define VM_COMPRESSOR_STAT_DBG(x...) \ - do { \ - (x); \ +#define VM_COMPRESSOR_STAT_DBG(x...) \ + do { \ + (x); \ } while(0) #else -#define VM_COMPRESSOR_STAT_DBG(x...) \ - do { \ +#define VM_COMPRESSOR_STAT_DBG(x...) \ + do { \ } while (0) #endif #define VMCSTATS (DEVELOPMENT || DEBUG) #if VMCSTATS -#define VM_COMPRESSOR_STAT(x...) \ - do { \ - (x); \ +#define VM_COMPRESSOR_STAT(x...) \ + do { \ + (x); \ } while(0) //TODO make atomic where needed, decompression paths -#define VM_DECOMPRESSOR_STAT(x...) \ - do { \ - (x); \ +#define VM_DECOMPRESSOR_STAT(x...) \ + do { \ + (x); \ } while(0) #else -#define VM_COMPRESSOR_STAT(x...) \ - do { \ +#define VM_COMPRESSOR_STAT(x...) \ + do { \ }while (0) -#define VM_DECOMPRESSOR_STAT(x...) \ - do { \ +#define VM_DECOMPRESSOR_STAT(x...) \ + do { \ }while (0) #endif -static inline enum compressor_preselect_t compressor_preselect(void) { +static inline enum compressor_preselect_t +compressor_preselect(void) +{ if (vmcstate.lz4_failure_skips >= vmctune.lz4_max_failure_skips) { vmcstate.lz4_failure_skips = 0; vmcstate.lz4_failure_run_length = 0; @@ -165,11 +167,13 @@ static inline enum compressor_preselect_t compressor_preselect(void) { return CPRESELWK; } -static inline void compressor_selector_update(int lz4sz, int didwk, int wksz) { +static inline void +compressor_selector_update(int lz4sz, int didwk, int wksz) +{ VM_COMPRESSOR_STAT(compressor_stats.lz4_compressions++); if (lz4sz == 0) { - VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes+=PAGE_SIZE); + VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes += PAGE_SIZE); VM_COMPRESSOR_STAT(compressor_stats.lz4_compression_failures++); vmcstate.lz4_failure_run_length++; VM_COMPRESSOR_STAT(vmcstate.lz4_total_failures++); @@ -177,7 +181,7 @@ static inline void compressor_selector_update(int lz4sz, int didwk, int wksz) { } else { vmcstate.lz4_failure_run_length = 0; - VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes+=lz4sz); + VM_COMPRESSOR_STAT(compressor_stats.lz4_compressed_bytes += lz4sz); if (lz4sz <= vmctune.wkdm_reeval_threshold) { vmcstate.lz4_run_length = 0; @@ -190,7 +194,7 @@ static inline void compressor_selector_update(int lz4sz, int didwk, int wksz) { if (didwk) { if (__probable(wksz > lz4sz)) { uint32_t lz4delta = wksz - lz4sz; - VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_delta+=lz4delta); + VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_delta += lz4delta); if (lz4delta >= vmctune.lz4_run_continue_bytes) { vmcstate.lz4_run_length++; } else if (lz4delta <= vmctune.lz4_profitable_bytes) { @@ -201,7 +205,7 @@ static inline void compressor_selector_update(int lz4sz, int didwk, int wksz) { vmcstate.lz4_run_length = 0; } } else { - VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_negative_delta+=(lz4sz-wksz)); + VM_COMPRESSOR_STAT(compressor_stats.lz4_wk_compression_negative_delta += (lz4sz - wksz)); vmcstate.lz4_failure_run_length++; VM_COMPRESSOR_STAT(vmcstate.lz4_total_negatives++); vmcstate.lz4_run_length = 0; @@ -211,12 +215,14 @@ static inline void compressor_selector_update(int lz4sz, int didwk, int wksz) { } -static inline void WKdm_hv(uint32_t *wkbuf) { +static inline void +WKdm_hv(uint32_t *wkbuf) +{ #if DEVELOPMENT || DEBUG uint32_t *inw = (uint32_t *) wkbuf; if (*inw != MZV_MAGIC) { if ((*inw | *(inw + 1) | *(inw + 2)) & 0xFFFF0000) { - panic("WKdm(%p): invalid header 0x%x 0x%x 0x%x\n", wkbuf, *inw, *(inw +1), *(inw+2)); + panic("WKdm(%p): invalid header 0x%x 0x%x 0x%x\n", wkbuf, *inw, *(inw + 1), *(inw + 2)); } } #else /* DEVELOPMENT || DEBUG */ @@ -231,7 +237,9 @@ static inline void WKdm_hv(uint32_t *wkbuf) { #if defined(__arm64__) #endif -static inline void WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes) { +static inline void +WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, unsigned int bytes) +{ #if defined(__arm64__) #endif WKdm_hv(src_buf); @@ -255,7 +263,9 @@ static inline void WKdmD(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, int precompy, wkswhw; #endif -static inline int WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, boolean_t *incomp_copy, unsigned int limit) { +static inline int +WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, boolean_t *incomp_copy, unsigned int limit) +{ (void)incomp_copy; int wkcval; #if defined(__arm64__) @@ -279,7 +289,9 @@ static inline int WKdmC(WK_word* src_buf, WK_word* dest_buf, WK_word* scratch, b } -int metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec, void *cscratchin, boolean_t *incomp_copy) { +int +metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t *codec, void *cscratchin, boolean_t *incomp_copy) +{ int sz = -1; int dowk = FALSE, dolz4 = FALSE, skiplz4 = FALSE; int insize = PAGE_SIZE; @@ -309,7 +321,7 @@ int metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t sz = WKdmC(in, cdst, &cscratch->wkscratch[0], incomp_copy, outbufsz); if (sz == -1) { - VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total+=PAGE_SIZE); + VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += PAGE_SIZE); VM_COMPRESSOR_STAT(compressor_stats.wk_compression_failures++); if (vm_compressor_current_codec == CMODE_HYB) { @@ -318,9 +330,9 @@ int metacompressor(const uint8_t *in, uint8_t *cdst, int32_t outbufsz, uint16_t goto cexit; } else if (sz == 0) { VM_COMPRESSOR_STAT(compressor_stats.wk_sv_compressions++); - VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total+=4); + VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += 4); } else { - VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total+=sz); + VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_total += sz); } } lz4eval: @@ -332,7 +344,7 @@ lz4eval: int wkc = (sz == -1) ? PAGE_SIZE : sz; #endif VM_COMPRESSOR_STAT(compressor_stats.wk_compressions_exclusive++); - VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_exclusive+=wkc); + VM_COMPRESSOR_STAT(compressor_stats.wk_compressed_bytes_exclusive += wkc); goto cexit; } } @@ -358,32 +370,36 @@ cexit: return sz; } -void metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, uint16_t ccodec, void *compressor_dscratchin) { +void +metadecompressor(const uint8_t *source, uint8_t *dest, uint32_t csize, uint16_t ccodec, void *compressor_dscratchin) +{ int dolz4 = (ccodec == CCLZ4); int rval; compressor_decode_scratch_t *compressor_dscratch = compressor_dscratchin; - + if (dolz4) { rval = (int)lz4raw_decode_buffer(dest, PAGE_SIZE, source, csize, &compressor_dscratch->lz4decodestate[0]); - VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressions+=1); - VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressed_bytes+=csize); + VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressions += 1); + VM_DECOMPRESSOR_STAT(compressor_stats.lz4_decompressed_bytes += csize); #if DEVELOPMENT || DEBUG uint32_t *d32 = dest; #endif assertf(rval == PAGE_SIZE, "LZ4 decode: size != pgsize %d, header: 0x%x, 0x%x, 0x%x", - rval, *d32, *(d32+1), *(d32+2)); + rval, *d32, *(d32 + 1), *(d32 + 2)); } else { assert(ccodec == CCWK); WKdmD(source, dest, &compressor_dscratch->wkdecompscratch[0], csize); - VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressions+=1); - VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressed_bytes+=csize); + VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressions += 1); + VM_DECOMPRESSOR_STAT(compressor_stats.wk_decompressed_bytes += csize); } } #pragma clang diagnostic pop -uint32_t vm_compressor_get_encode_scratch_size(void) { +uint32_t +vm_compressor_get_encode_scratch_size(void) +{ if (vm_compressor_current_codec != VM_COMPRESSOR_DEFAULT_CODEC) { return MAX(sizeof(compressor_encode_scratch_t), WKdm_SCRATCH_BUF_SIZE_INTERNAL); } else { @@ -391,7 +407,9 @@ uint32_t vm_compressor_get_encode_scratch_size(void) { } } -uint32_t vm_compressor_get_decode_scratch_size(void) { +uint32_t +vm_compressor_get_decode_scratch_size(void) +{ if (vm_compressor_current_codec != VM_COMPRESSOR_DEFAULT_CODEC) { return MAX(sizeof(compressor_decode_scratch_t), WKdm_SCRATCH_BUF_SIZE_INTERNAL); } else { @@ -400,11 +418,15 @@ uint32_t vm_compressor_get_decode_scratch_size(void) { } -int vm_compressor_algorithm(void) { +int +vm_compressor_algorithm(void) +{ return vm_compressor_current_codec; } -void vm_compressor_algorithm_init(void) { +void +vm_compressor_algorithm_init(void) +{ vm_compressor_mode_t new_codec = VM_COMPRESSOR_DEFAULT_CODEC; #if defined(__arm64__) @@ -417,10 +439,10 @@ void vm_compressor_algorithm_init(void) { PE_parse_boot_argn("vm_compressor_codec", &new_codec, sizeof(new_codec)); assertf(((new_codec == VM_COMPRESSOR_DEFAULT_CODEC) || (new_codec == CMODE_WK) || - (new_codec == CMODE_LZ4) || (new_codec == CMODE_HYB)), + (new_codec == CMODE_LZ4) || (new_codec == CMODE_HYB)), "Invalid VM compression codec: %u", new_codec); -#if defined(__arm__)||defined(__arm64__) +#if defined(__arm__) || defined(__arm64__) uint32_t tmpc; if (PE_parse_boot_argn("-vm_compressor_wk", &tmpc, sizeof(tmpc))) { new_codec = VM_COMPRESSOR_DEFAULT_CODEC; diff --git a/osfmk/vm/vm_compressor_algorithms.h b/osfmk/vm/vm_compressor_algorithms.h index 46d022f5e..163d393be 100644 --- a/osfmk/vm/vm_compressor_algorithms.h +++ b/osfmk/vm/vm_compressor_algorithms.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -39,7 +39,7 @@ typedef struct { uint64_t lz4_wk_compression_delta; uint64_t lz4_wk_compression_negative_delta; uint64_t lz4_post_wk_compressions; - + uint64_t wk_compressions; uint64_t wk_cabstime; uint64_t wk_sv_compressions; diff --git a/osfmk/vm/vm_compressor_backing_store.c b/osfmk/vm/vm_compressor_backing_store.c index e8c1342a1..8f6971fb4 100644 --- a/osfmk/vm/vm_compressor_backing_store.c +++ b/osfmk/vm/vm_compressor_backing_store.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -34,71 +34,72 @@ #include -boolean_t compressor_store_stop_compaction = FALSE; -boolean_t vm_swapfile_create_needed = FALSE; -boolean_t vm_swapfile_gc_needed = FALSE; - -int vm_swapper_throttle = -1; -uint64_t vm_swapout_thread_id; - -uint64_t vm_swap_put_failures = 0; -uint64_t vm_swap_get_failures = 0; -int vm_num_swap_files_config = 0; -int vm_num_swap_files = 0; -int vm_num_pinned_swap_files = 0; -int vm_swapout_thread_processed_segments = 0; -int vm_swapout_thread_awakened = 0; -int vm_swapfile_create_thread_awakened = 0; -int vm_swapfile_create_thread_running = 0; -int vm_swapfile_gc_thread_awakened = 0; -int vm_swapfile_gc_thread_running = 0; - -int64_t vm_swappin_avail = 0; -boolean_t vm_swappin_enabled = FALSE; -unsigned int vm_swapfile_total_segs_alloced = 0; -unsigned int vm_swapfile_total_segs_used = 0; - -char swapfilename[MAX_SWAPFILENAME_LEN + 1] = SWAP_FILE_NAME; +boolean_t compressor_store_stop_compaction = FALSE; +boolean_t vm_swapfile_create_needed = FALSE; +boolean_t vm_swapfile_gc_needed = FALSE; + +int vm_swapper_throttle = -1; +uint64_t vm_swapout_thread_id; + +uint64_t vm_swap_put_failures = 0; /* Likely failed I/O. Data is still in memory. */ +uint64_t vm_swap_get_failures = 0; /* Fatal */ +uint64_t vm_swap_put_failures_no_swap_file = 0; /* Possibly not fatal because we might just need a new swapfile. */ +int vm_num_swap_files_config = 0; +int vm_num_swap_files = 0; +int vm_num_pinned_swap_files = 0; +int vm_swapout_thread_processed_segments = 0; +int vm_swapout_thread_awakened = 0; +int vm_swapfile_create_thread_awakened = 0; +int vm_swapfile_create_thread_running = 0; +int vm_swapfile_gc_thread_awakened = 0; +int vm_swapfile_gc_thread_running = 0; + +int64_t vm_swappin_avail = 0; +boolean_t vm_swappin_enabled = FALSE; +unsigned int vm_swapfile_total_segs_alloced = 0; +unsigned int vm_swapfile_total_segs_used = 0; + +char swapfilename[MAX_SWAPFILENAME_LEN + 1] = SWAP_FILE_NAME; extern vm_map_t compressor_map; -#define SWAP_READY 0x1 /* Swap file is ready to be used */ -#define SWAP_RECLAIM 0x2 /* Swap file is marked to be reclaimed */ -#define SWAP_WANTED 0x4 /* Swap file has waiters */ -#define SWAP_REUSE 0x8 /* Swap file is on the Q and has a name. Reuse after init-ing.*/ -#define SWAP_PINNED 0x10 /* Swap file is pinned (FusionDrive) */ - - -struct swapfile{ - queue_head_t swp_queue; /* list of swap files */ - char *swp_path; /* saved pathname of swap file */ - struct vnode *swp_vp; /* backing vnode */ - uint64_t swp_size; /* size of this swap file */ - uint8_t *swp_bitmap; /* bitmap showing the alloced/freed slots in the swap file */ - unsigned int swp_pathlen; /* length of pathname */ - unsigned int swp_nsegs; /* #segments we can use */ - unsigned int swp_nseginuse; /* #segments in use */ - unsigned int swp_index; /* index of this swap file */ - unsigned int swp_flags; /* state of swap file */ - unsigned int swp_free_hint; /* offset of 1st free chunk */ - unsigned int swp_io_count; /* count of outstanding I/Os */ - c_segment_t *swp_csegs; /* back pointers to the c_segments. Used during swap reclaim. */ - - struct trim_list *swp_delayed_trim_list_head; - unsigned int swp_delayed_trim_count; +#define SWAP_READY 0x1 /* Swap file is ready to be used */ +#define SWAP_RECLAIM 0x2 /* Swap file is marked to be reclaimed */ +#define SWAP_WANTED 0x4 /* Swap file has waiters */ +#define SWAP_REUSE 0x8 /* Swap file is on the Q and has a name. Reuse after init-ing.*/ +#define SWAP_PINNED 0x10 /* Swap file is pinned (FusionDrive) */ + + +struct swapfile { + queue_head_t swp_queue; /* list of swap files */ + char *swp_path; /* saved pathname of swap file */ + struct vnode *swp_vp; /* backing vnode */ + uint64_t swp_size; /* size of this swap file */ + uint8_t *swp_bitmap; /* bitmap showing the alloced/freed slots in the swap file */ + unsigned int swp_pathlen; /* length of pathname */ + unsigned int swp_nsegs; /* #segments we can use */ + unsigned int swp_nseginuse; /* #segments in use */ + unsigned int swp_index; /* index of this swap file */ + unsigned int swp_flags; /* state of swap file */ + unsigned int swp_free_hint; /* offset of 1st free chunk */ + unsigned int swp_io_count; /* count of outstanding I/Os */ + c_segment_t *swp_csegs; /* back pointers to the c_segments. Used during swap reclaim. */ + + struct trim_list *swp_delayed_trim_list_head; + unsigned int swp_delayed_trim_count; }; -queue_head_t swf_global_queue; -boolean_t swp_trim_supported = FALSE; +queue_head_t swf_global_queue; +boolean_t swp_trim_supported = FALSE; -extern clock_sec_t dont_trim_until_ts; -clock_sec_t vm_swapfile_last_failed_to_create_ts = 0; -clock_sec_t vm_swapfile_last_successful_create_ts = 0; -int vm_swapfile_can_be_created = FALSE; -boolean_t delayed_trim_handling_in_progress = FALSE; +extern clock_sec_t dont_trim_until_ts; +clock_sec_t vm_swapfile_last_failed_to_create_ts = 0; +clock_sec_t vm_swapfile_last_successful_create_ts = 0; +int vm_swapfile_can_be_created = FALSE; +boolean_t delayed_trim_handling_in_progress = FALSE; -boolean_t hibernate_in_progress_with_pinned_swap = FALSE; +boolean_t hibernate_in_progress_with_pinned_swap = FALSE; static void vm_swapout_thread_throttle_adjust(void); static void vm_swap_free_now(struct swapfile *swf, uint64_t f_offset); @@ -116,37 +117,37 @@ boolean_t vm_swap_force_defrag = FALSE, vm_swap_force_reclaim = FALSE; #if CONFIG_EMBEDDED #if DEVELOPMENT || DEBUG -#define VM_MAX_SWAP_FILE_NUM 100 +#define VM_MAX_SWAP_FILE_NUM 100 #else /* DEVELOPMENT || DEBUG */ -#define VM_MAX_SWAP_FILE_NUM 5 +#define VM_MAX_SWAP_FILE_NUM 5 #endif /* DEVELOPMENT || DEBUG */ -#define VM_SWAPFILE_DELAYED_TRIM_MAX 4 +#define VM_SWAPFILE_DELAYED_TRIM_MAX 4 -#define VM_SWAP_SHOULD_DEFRAGMENT() (((vm_swap_force_defrag == TRUE) || (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 16))) ? 1 : 0) -#define VM_SWAP_SHOULD_PIN(_size) FALSE -#define VM_SWAP_SHOULD_CREATE(cur_ts) ((vm_num_swap_files < vm_num_swap_files_config) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < (unsigned int)VM_SWAPFILE_HIWATER_SEGS) && \ - ((cur_ts - vm_swapfile_last_failed_to_create_ts) > VM_SWAPFILE_DELAYED_CREATE) ? 1 : 0) -#define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0) +#define VM_SWAP_SHOULD_DEFRAGMENT() (((vm_swap_force_defrag == TRUE) || (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 16))) ? 1 : 0) +#define VM_SWAP_SHOULD_PIN(_size) FALSE +#define VM_SWAP_SHOULD_CREATE(cur_ts) ((vm_num_swap_files < vm_num_swap_files_config) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < (unsigned int)VM_SWAPFILE_HIWATER_SEGS) && \ + ((cur_ts - vm_swapfile_last_failed_to_create_ts) > VM_SWAPFILE_DELAYED_CREATE) ? 1 : 0) +#define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0) #else /* CONFIG_EMBEDDED */ -#define VM_MAX_SWAP_FILE_NUM 100 -#define VM_SWAPFILE_DELAYED_TRIM_MAX 128 +#define VM_MAX_SWAP_FILE_NUM 100 +#define VM_SWAPFILE_DELAYED_TRIM_MAX 128 -#define VM_SWAP_SHOULD_DEFRAGMENT() (((vm_swap_force_defrag == TRUE) || (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 4))) ? 1 : 0) -#define VM_SWAP_SHOULD_PIN(_size) (vm_swappin_avail > 0 && vm_swappin_avail >= (int64_t)(_size)) -#define VM_SWAP_SHOULD_CREATE(cur_ts) ((vm_num_swap_files < vm_num_swap_files_config) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < (unsigned int)VM_SWAPFILE_HIWATER_SEGS) && \ - ((cur_ts - vm_swapfile_last_failed_to_create_ts) > VM_SWAPFILE_DELAYED_CREATE) ? 1 : 0) -#define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0) +#define VM_SWAP_SHOULD_DEFRAGMENT() (((vm_swap_force_defrag == TRUE) || (c_swappedout_sparse_count > (vm_swapfile_total_segs_used / 4))) ? 1 : 0) +#define VM_SWAP_SHOULD_PIN(_size) (vm_swappin_avail > 0 && vm_swappin_avail >= (int64_t)(_size)) +#define VM_SWAP_SHOULD_CREATE(cur_ts) ((vm_num_swap_files < vm_num_swap_files_config) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < (unsigned int)VM_SWAPFILE_HIWATER_SEGS) && \ + ((cur_ts - vm_swapfile_last_failed_to_create_ts) > VM_SWAPFILE_DELAYED_CREATE) ? 1 : 0) +#define VM_SWAP_SHOULD_TRIM(swf) ((swf->swp_delayed_trim_count >= VM_SWAPFILE_DELAYED_TRIM_MAX) ? 1 : 0) #endif /* CONFIG_EMBEDDED */ -#define VM_SWAP_SHOULD_RECLAIM() (((vm_swap_force_reclaim == TRUE) || ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) >= SWAPFILE_RECLAIM_THRESHOLD_SEGS)) ? 1 : 0) -#define VM_SWAP_SHOULD_ABORT_RECLAIM() (((vm_swap_force_reclaim == FALSE) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) <= SWAPFILE_RECLAIM_MINIMUM_SEGS)) ? 1 : 0) -#define VM_SWAPFILE_DELAYED_CREATE 15 +#define VM_SWAP_SHOULD_RECLAIM() (((vm_swap_force_reclaim == TRUE) || ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) >= SWAPFILE_RECLAIM_THRESHOLD_SEGS)) ? 1 : 0) +#define VM_SWAP_SHOULD_ABORT_RECLAIM() (((vm_swap_force_reclaim == FALSE) && ((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) <= SWAPFILE_RECLAIM_MINIMUM_SEGS)) ? 1 : 0) +#define VM_SWAPFILE_DELAYED_CREATE 15 -#define VM_SWAP_BUSY() ((c_swapout_count && (vm_swapper_throttle == THROTTLE_LEVEL_COMPRESSOR_TIER0)) ? 1 : 0) +#define VM_SWAP_BUSY() ((c_swapout_count && (vm_swapper_throttle == THROTTLE_LEVEL_COMPRESSOR_TIER0)) ? 1 : 0) #if CHECKSUM_THE_SWAP @@ -154,37 +155,35 @@ extern unsigned int hash_string(char *cp, int len); #endif #if RECORD_THE_COMPRESSED_DATA -boolean_t c_compressed_record_init_done = FALSE; -int c_compressed_record_write_error = 0; -struct vnode *c_compressed_record_vp = NULL; -uint64_t c_compressed_record_file_offset = 0; -void c_compressed_record_init(void); -void c_compressed_record_write(char *, int); +boolean_t c_compressed_record_init_done = FALSE; +int c_compressed_record_write_error = 0; +struct vnode *c_compressed_record_vp = NULL; +uint64_t c_compressed_record_file_offset = 0; +void c_compressed_record_init(void); +void c_compressed_record_write(char *, int); #endif -extern void vm_pageout_io_throttle(void); +extern void vm_pageout_io_throttle(void); static struct swapfile *vm_swapfile_for_handle(uint64_t); /* * Called with the vm_swap_data_lock held. - */ + */ static struct swapfile * -vm_swapfile_for_handle(uint64_t f_offset) +vm_swapfile_for_handle(uint64_t f_offset) { - - uint64_t file_offset = 0; - unsigned int swapfile_index = 0; - struct swapfile* swf = NULL; + uint64_t file_offset = 0; + unsigned int swapfile_index = 0; + struct swapfile* swf = NULL; - file_offset = (f_offset & SWAP_SLOT_MASK); + file_offset = (f_offset & SWAP_SLOT_MASK); swapfile_index = (f_offset >> SWAP_DEVICE_SHIFT); swf = (struct swapfile*) queue_first(&swf_global_queue); - while(queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { - + while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { if (swapfile_index == swf->swp_index) { break; } @@ -205,28 +204,28 @@ vm_swapfile_for_handle(uint64_t f_offset) extern int cc_rand_generate(void *, size_t); /* from libkern/cyrpto/rand.h> */ -boolean_t swap_crypt_initialized; -void swap_crypt_initialize(void); +boolean_t swap_crypt_initialized; +void swap_crypt_initialize(void); symmetric_xts xts_modectx; uint32_t swap_crypt_key1[8]; /* big enough for a 256 bit random key */ uint32_t swap_crypt_key2[8]; /* big enough for a 256 bit random key */ #if DEVELOPMENT || DEBUG -boolean_t swap_crypt_xts_tested = FALSE; +boolean_t swap_crypt_xts_tested = FALSE; unsigned char swap_crypt_test_page_ref[4096] __attribute__((aligned(4096))); unsigned char swap_crypt_test_page_encrypt[4096] __attribute__((aligned(4096))); unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096))); #endif /* DEVELOPMENT || DEBUG */ -unsigned long vm_page_encrypt_counter; -unsigned long vm_page_decrypt_counter; +unsigned long vm_page_encrypt_counter; +unsigned long vm_page_decrypt_counter; void swap_crypt_initialize(void) { - uint8_t *enckey1, *enckey2; + uint8_t *enckey1, *enckey2; int keylen1, keylen2; int error; @@ -249,9 +248,9 @@ swap_crypt_initialize(void) swap_crypt_initialized = TRUE; #if DEVELOPMENT || DEBUG - uint8_t *encptr; - uint8_t *decptr; - uint8_t *refptr; + uint8_t *encptr; + uint8_t *decptr; + uint8_t *refptr; uint8_t *iv; uint64_t ivnum[2]; int size = 0; @@ -266,12 +265,12 @@ swap_crypt_initialize(void) * First initialize the test data. */ for (i = 0; i < 4096; i++) { - swap_crypt_test_page_ref[i] = (char) i; + swap_crypt_test_page_ref[i] = (char) i; } ivnum[0] = (uint64_t)0xaa; ivnum[1] = 0; iv = (uint8_t *)ivnum; - + refptr = (uint8_t *)swap_crypt_test_page_ref; encptr = (uint8_t *)swap_crypt_test_page_encrypt; decptr = (uint8_t *)swap_crypt_test_page_decrypt; @@ -282,10 +281,10 @@ swap_crypt_initialize(void) assert(!rc); /* compare result with original - should NOT match */ - for (i = 0; i < 4096; i ++) { - if (swap_crypt_test_page_encrypt[i] != + for (i = 0; i < 4096; i++) { + if (swap_crypt_test_page_encrypt[i] != swap_crypt_test_page_ref[i]) { - break; + break; } } assert(i != 4096); @@ -295,10 +294,10 @@ swap_crypt_initialize(void) assert(!rc); /* compare result with original */ - for (i = 0; i < 4096; i ++) { - if (swap_crypt_test_page_decrypt[i] != + for (i = 0; i < 4096; i++) { + if (swap_crypt_test_page_decrypt[i] != swap_crypt_test_page_ref[i]) { - panic("encryption test failed"); + panic("encryption test failed"); } } /* encrypt in place */ @@ -309,10 +308,10 @@ swap_crypt_initialize(void) rc = xts_decrypt(decptr, size, decptr, iv, &xts_modectx); assert(!rc); - for (i = 0; i < 4096; i ++) { - if (swap_crypt_test_page_decrypt[i] != + for (i = 0; i < 4096; i++) { + if (swap_crypt_test_page_decrypt[i] != swap_crypt_test_page_ref[i]) { - panic("in place encryption test failed"); + panic("in place encryption test failed"); } } swap_crypt_xts_tested = TRUE; @@ -323,14 +322,15 @@ swap_crypt_initialize(void) void vm_swap_encrypt(c_segment_t c_seg) { - uint8_t *ptr; + uint8_t *ptr; uint8_t *iv; uint64_t ivnum[2]; int size = 0; int rc = 0; - if (swap_crypt_initialized == FALSE) + if (swap_crypt_initialized == FALSE) { swap_crypt_initialize(); + } #if DEVELOPMENT || DEBUG C_SEG_MAKE_WRITEABLE(c_seg); @@ -345,7 +345,7 @@ vm_swap_encrypt(c_segment_t c_seg) rc = xts_encrypt(ptr, size, ptr, iv, &xts_modectx); assert(!rc); - vm_page_encrypt_counter += (size/PAGE_SIZE_64); + vm_page_encrypt_counter += (size / PAGE_SIZE_64); #if DEVELOPMENT || DEBUG C_SEG_WRITE_PROTECT(c_seg); @@ -355,7 +355,7 @@ vm_swap_encrypt(c_segment_t c_seg) void vm_swap_decrypt(c_segment_t c_seg) { - uint8_t *ptr; + uint8_t *ptr; uint8_t *iv; uint64_t ivnum[2]; int size = 0; @@ -376,7 +376,7 @@ vm_swap_decrypt(c_segment_t c_seg) rc = xts_decrypt(ptr, size, ptr, iv, &xts_modectx); assert(!rc); - vm_page_decrypt_counter += (size/PAGE_SIZE_64); + vm_page_decrypt_counter += (size / PAGE_SIZE_64); #if DEVELOPMENT || DEBUG C_SEG_WRITE_PROTECT(c_seg); @@ -388,23 +388,23 @@ vm_swap_decrypt(c_segment_t c_seg) void vm_compressor_swap_init() { - thread_t thread = NULL; + thread_t thread = NULL; lck_grp_attr_setdefault(&vm_swap_data_lock_grp_attr); lck_grp_init(&vm_swap_data_lock_grp, - "vm_swap_data", - &vm_swap_data_lock_grp_attr); + "vm_swap_data", + &vm_swap_data_lock_grp_attr); lck_attr_setdefault(&vm_swap_data_lock_attr); lck_mtx_init_ext(&vm_swap_data_lock, - &vm_swap_data_lock_ext, - &vm_swap_data_lock_grp, - &vm_swap_data_lock_attr); + &vm_swap_data_lock_ext, + &vm_swap_data_lock_grp, + &vm_swap_data_lock_attr); queue_init(&swf_global_queue); - + if (kernel_thread_start_priority((thread_continue_t)vm_swapout_thread, NULL, - BASEPRI_VM, &thread) != KERN_SUCCESS) { + BASEPRI_VM, &thread) != KERN_SUCCESS) { panic("vm_swapout_thread: create failed"); } thread_set_thread_name(thread, "VM_swapout"); @@ -413,7 +413,7 @@ vm_compressor_swap_init() thread_deallocate(thread); if (kernel_thread_start_priority((thread_continue_t)vm_swapfile_create_thread, NULL, - BASEPRI_VM, &thread) != KERN_SUCCESS) { + BASEPRI_VM, &thread) != KERN_SUCCESS) { panic("vm_swapfile_create_thread: create failed"); } @@ -421,21 +421,21 @@ vm_compressor_swap_init() thread_deallocate(thread); if (kernel_thread_start_priority((thread_continue_t)vm_swapfile_gc_thread, NULL, - BASEPRI_VM, &thread) != KERN_SUCCESS) { + BASEPRI_VM, &thread) != KERN_SUCCESS) { panic("vm_swapfile_gc_thread: create failed"); } thread_set_thread_name(thread, "VM_swapfile_gc"); thread_deallocate(thread); proc_set_thread_policy_with_tid(kernel_task, thread->thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER2); proc_set_thread_policy_with_tid(kernel_task, thread->thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); #if CONFIG_EMBEDDED /* - * dummy value until the swap file gets created - * when we drive the first c_segment_t to the + * dummy value until the swap file gets created + * when we drive the first c_segment_t to the * swapout queue... at that time we will * know the true size we have to work with */ @@ -470,17 +470,18 @@ c_compressed_record_write(char *buf, int size) #endif -int compaction_swapper_inited = 0; +int compaction_swapper_inited = 0; void vm_compaction_swapper_do_init(void) { - struct vnode *vp; - char *pathname; - int namelen; + struct vnode *vp; + char *pathname; + int namelen; - if (compaction_swapper_inited) + if (compaction_swapper_inited) { return; + } if (vm_compressor_mode != VM_PAGER_COMPRESSOR_WITH_SWAP) { compaction_swapper_inited = 1; @@ -488,8 +489,7 @@ vm_compaction_swapper_do_init(void) } lck_mtx_lock(&vm_swap_data_lock); - if ( !compaction_swapper_inited) { - + if (!compaction_swapper_inited) { namelen = (int)strlen(swapfilename) + SWAPFILENAME_INDEX_LEN + 1; pathname = (char*)kalloc(namelen); memset(pathname, 0, namelen); @@ -498,32 +498,36 @@ vm_compaction_swapper_do_init(void) vm_swapfile_open(pathname, &vp); if (vp) { - if (vnode_pager_isSSD(vp) == FALSE) { - /* + /* * swap files live on an HDD, so let's make sure to start swapping - * much earlier since we're not worried about SSD write-wear and + * much earlier since we're not worried about SSD write-wear and * we have so little write bandwidth to work with * these values were derived expermentially by running the performance - * teams stock test for evaluating HDD performance against various + * teams stock test for evaluating HDD performance against various * combinations and looking and comparing overall results. * Note that the > relationship between these 4 values must be maintained */ - if (vm_compressor_minorcompact_threshold_divisor_overridden == 0) - vm_compressor_minorcompact_threshold_divisor = 15; - if (vm_compressor_majorcompact_threshold_divisor_overridden == 0) - vm_compressor_majorcompact_threshold_divisor = 18; - if (vm_compressor_unthrottle_threshold_divisor_overridden == 0) - vm_compressor_unthrottle_threshold_divisor = 24; - if (vm_compressor_catchup_threshold_divisor_overridden == 0) - vm_compressor_catchup_threshold_divisor = 30; + if (vm_compressor_minorcompact_threshold_divisor_overridden == 0) { + vm_compressor_minorcompact_threshold_divisor = 15; + } + if (vm_compressor_majorcompact_threshold_divisor_overridden == 0) { + vm_compressor_majorcompact_threshold_divisor = 18; + } + if (vm_compressor_unthrottle_threshold_divisor_overridden == 0) { + vm_compressor_unthrottle_threshold_divisor = 24; + } + if (vm_compressor_catchup_threshold_divisor_overridden == 0) { + vm_compressor_catchup_threshold_divisor = 30; + } } #if !CONFIG_EMBEDDED vnode_setswapmount(vp); vm_swappin_avail = vnode_getswappin_avail(vp); - if (vm_swappin_avail) + if (vm_swappin_avail) { vm_swappin_enabled = TRUE; + } #endif vm_swapfile_close((uint64_t)pathname, vp); } @@ -543,7 +547,6 @@ vm_swap_consider_defragmenting(int flags) if (compressor_store_stop_compaction == FALSE && !VM_SWAP_BUSY() && (force_defrag || force_reclaim || VM_SWAP_SHOULD_DEFRAGMENT() || VM_SWAP_SHOULD_RECLAIM())) { - if (!vm_swapfile_gc_thread_running || force_defrag || force_reclaim) { lck_mtx_lock(&vm_swap_data_lock); @@ -555,8 +558,9 @@ vm_swap_consider_defragmenting(int flags) vm_swap_force_reclaim = TRUE; } - if (!vm_swapfile_gc_thread_running) + if (!vm_swapfile_gc_thread_running) { thread_wakeup((event_t) &vm_swapfile_gc_needed); + } lck_mtx_unlock(&vm_swap_data_lock); } @@ -573,7 +577,7 @@ int vm_swap_defragment_busy = 0; static void vm_swap_defragment() { - c_segment_t c_seg; + c_segment_t c_seg; /* * have to grab the master lock w/o holding @@ -582,9 +586,8 @@ vm_swap_defragment() PAGE_REPLACEMENT_DISALLOWED(TRUE); lck_mtx_lock_spin_always(c_list_lock); - + while (!queue_empty(&c_swappedout_sparse_list_head)) { - if (compressor_store_stop_compaction == TRUE || VM_SWAP_BUSY()) { vm_swap_defragment_yielded++; break; @@ -623,18 +626,19 @@ vm_swap_defragment() } else { lck_mtx_unlock_always(c_list_lock); - if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) + if (c_seg_swapin(c_seg, TRUE, FALSE) == 0) { lck_mtx_unlock_always(&c_seg->c_lock); + } vm_swap_defragment_swapin++; } PAGE_REPLACEMENT_DISALLOWED(FALSE); - + vm_pageout_io_throttle(); /* * because write waiters have privilege over readers, - * dropping and immediately retaking the master lock will + * dropping and immediately retaking the master lock will * still allow any thread waiting to acquire the * master lock exclusively an opportunity to take it */ @@ -652,8 +656,8 @@ vm_swap_defragment() static void vm_swapfile_create_thread(void) { - clock_sec_t sec; - clock_nsec_t nsec; + clock_sec_t sec; + clock_nsec_t nsec; current_thread()->options |= TH_OPT_VMPRIV; @@ -671,34 +675,37 @@ vm_swapfile_create_thread(void) lck_mtx_lock(&vm_swap_data_lock); - if (hibernate_in_progress_with_pinned_swap == TRUE) + if (hibernate_in_progress_with_pinned_swap == TRUE) { break; + } clock_get_system_nanotime(&sec, &nsec); - if (VM_SWAP_SHOULD_CREATE(sec) == 0) + if (VM_SWAP_SHOULD_CREATE(sec) == 0) { break; + } lck_mtx_unlock(&vm_swap_data_lock); if (vm_swap_create_file() == FALSE) { vm_swapfile_last_failed_to_create_ts = sec; HIBLOG("vm_swap_create_file failed @ %lu secs\n", (unsigned long)sec); - - } else + } else { vm_swapfile_last_successful_create_ts = sec; + } } vm_swapfile_create_thread_running = 0; - if (hibernate_in_progress_with_pinned_swap == TRUE) + if (hibernate_in_progress_with_pinned_swap == TRUE) { thread_wakeup((event_t)&hibernate_in_progress_with_pinned_swap); + } assert_wait((event_t)&vm_swapfile_create_needed, THREAD_UNINT); lck_mtx_unlock(&vm_swap_data_lock); thread_block((thread_continue_t)vm_swapfile_create_thread); - + /* NOTREACHED */ } @@ -711,22 +718,21 @@ hibernate_pin_swap(boolean_t start) vm_compaction_swapper_do_init(); if (start == FALSE) { - lck_mtx_lock(&vm_swap_data_lock); hibernate_in_progress_with_pinned_swap = FALSE; lck_mtx_unlock(&vm_swap_data_lock); - return (KERN_SUCCESS); + return KERN_SUCCESS; + } + if (vm_swappin_enabled == FALSE) { + return KERN_SUCCESS; } - if (vm_swappin_enabled == FALSE) - return (KERN_SUCCESS); lck_mtx_lock(&vm_swap_data_lock); hibernate_in_progress_with_pinned_swap = TRUE; - - while (vm_swapfile_create_thread_running || vm_swapfile_gc_thread_running) { + while (vm_swapfile_create_thread_running || vm_swapfile_gc_thread_running) { assert_wait((event_t)&hibernate_in_progress_with_pinned_swap, THREAD_UNINT); lck_mtx_unlock(&vm_swap_data_lock); @@ -740,73 +746,79 @@ hibernate_pin_swap(boolean_t start) lck_mtx_unlock(&vm_swap_data_lock); HIBLOG("hibernate_pin_swap failed - vm_num_swap_files = %d, vm_num_pinned_swap_files = %d\n", - vm_num_swap_files, vm_num_pinned_swap_files); - return (KERN_FAILURE); + vm_num_swap_files, vm_num_pinned_swap_files); + return KERN_FAILURE; } lck_mtx_unlock(&vm_swap_data_lock); while (VM_SWAP_SHOULD_PIN(MAX_SWAP_FILE_SIZE)) { - if (vm_swap_create_file() == FALSE) + if (vm_swap_create_file() == FALSE) { break; + } } - return (KERN_SUCCESS); + return KERN_SUCCESS; } #endif static void vm_swapfile_gc_thread(void) - { - boolean_t need_defragment; - boolean_t need_reclaim; + boolean_t need_defragment; + boolean_t need_reclaim; vm_swapfile_gc_thread_awakened++; vm_swapfile_gc_thread_running = 1; while (TRUE) { - lck_mtx_lock(&vm_swap_data_lock); - - if (hibernate_in_progress_with_pinned_swap == TRUE) + + if (hibernate_in_progress_with_pinned_swap == TRUE) { break; + } - if (VM_SWAP_BUSY() || compressor_store_stop_compaction == TRUE) + if (VM_SWAP_BUSY() || compressor_store_stop_compaction == TRUE) { break; + } need_defragment = FALSE; need_reclaim = FALSE; - if (VM_SWAP_SHOULD_DEFRAGMENT()) + if (VM_SWAP_SHOULD_DEFRAGMENT()) { need_defragment = TRUE; + } if (VM_SWAP_SHOULD_RECLAIM()) { need_defragment = TRUE; need_reclaim = TRUE; } - if (need_defragment == FALSE && need_reclaim == FALSE) + if (need_defragment == FALSE && need_reclaim == FALSE) { break; + } vm_swap_force_defrag = FALSE; vm_swap_force_reclaim = FALSE; lck_mtx_unlock(&vm_swap_data_lock); - if (need_defragment == TRUE) + if (need_defragment == TRUE) { vm_swap_defragment(); - if (need_reclaim == TRUE) + } + if (need_reclaim == TRUE) { vm_swap_reclaim(); + } } vm_swapfile_gc_thread_running = 0; - if (hibernate_in_progress_with_pinned_swap == TRUE) + if (hibernate_in_progress_with_pinned_swap == TRUE) { thread_wakeup((event_t)&hibernate_in_progress_with_pinned_swap); + } assert_wait((event_t)&vm_swapfile_gc_needed, THREAD_UNINT); lck_mtx_unlock(&vm_swap_data_lock); thread_block((thread_continue_t)vm_swapfile_gc_thread); - + /* NOTREACHED */ } @@ -836,18 +848,16 @@ int vm_swapper_entered_T2P = 0; static void vm_swapout_thread_throttle_adjust(void) { - - switch(vm_swapout_state) { - + switch (vm_swapout_state) { case VM_SWAPOUT_START: - + vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2; vm_swapper_entered_T2P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T2P; vm_swapout_state = VM_SWAPOUT_T2_PASSIVE; @@ -860,9 +870,9 @@ vm_swapout_thread_throttle_adjust(void) vm_swapper_entered_T0P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T0P; vm_swapout_state = VM_SWAPOUT_T0_PASSIVE; @@ -873,9 +883,9 @@ vm_swapout_thread_throttle_adjust(void) vm_swapper_entered_T1P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T1P; vm_swapout_state = VM_SWAPOUT_T1_PASSIVE; } @@ -888,48 +898,47 @@ vm_swapout_thread_throttle_adjust(void) vm_swapper_entered_T0P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T0P; vm_swapout_state = VM_SWAPOUT_T0_PASSIVE; break; } if (swapout_target_age == 0 && hibernate_flushing == FALSE) { - - vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2; + vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2; vm_swapper_entered_T2P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T2P; vm_swapout_state = VM_SWAPOUT_T2_PASSIVE; } - break; + break; case VM_SWAPOUT_T0_PASSIVE: - if (SWAPPER_NEEDS_TO_RETHROTTLE()) { + if (SWAPPER_NEEDS_TO_RETHROTTLE()) { vm_swapper_throttle = THROTTLE_LEVEL_COMPRESSOR_TIER2; vm_swapper_entered_T2P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); + TASK_POLICY_INTERNAL, TASK_POLICY_IO, vm_swapper_throttle); proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T2P; vm_swapout_state = VM_SWAPOUT_T2_PASSIVE; break; } if (SWAPPER_NEEDS_TO_CATCHUP()) { - vm_swapper_entered_T0++; + vm_swapper_entered_T0++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_DISABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_DISABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T0; vm_swapout_state = VM_SWAPOUT_T0; } @@ -938,10 +947,10 @@ vm_swapout_thread_throttle_adjust(void) case VM_SWAPOUT_T0: if (SWAPPER_HAS_CAUGHTUP()) { - vm_swapper_entered_T0P++; + vm_swapper_entered_T0P++; proc_set_thread_policy_with_tid(kernel_task, vm_swapout_thread_id, - TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); + TASK_POLICY_INTERNAL, TASK_POLICY_PASSIVE_IO, TASK_POLICY_ENABLE); vm_swapout_limit = VM_SWAPOUT_LIMIT_T0P; vm_swapout_state = VM_SWAPOUT_T0_PASSIVE; } @@ -959,11 +968,13 @@ int vm_swapout_soc_done = 0; static struct swapout_io_completion * vm_swapout_find_free_soc(void) -{ int i; +{ + int i; - for (i = 0; i < VM_SWAPOUT_LIMIT_MAX; i++) { - if (vm_swapout_ctx[i].swp_io_busy == 0) - return (&vm_swapout_ctx[i]); + for (i = 0; i < VM_SWAPOUT_LIMIT_MAX; i++) { + if (vm_swapout_ctx[i].swp_io_busy == 0) { + return &vm_swapout_ctx[i]; + } } assert(vm_swapout_soc_busy == VM_SWAPOUT_LIMIT_MAX); @@ -972,12 +983,14 @@ vm_swapout_find_free_soc(void) static struct swapout_io_completion * vm_swapout_find_done_soc(void) -{ int i; +{ + int i; - if (vm_swapout_soc_done) { - for (i = 0; i < VM_SWAPOUT_LIMIT_MAX; i++) { - if (vm_swapout_ctx[i].swp_io_done) - return (&vm_swapout_ctx[i]); + if (vm_swapout_soc_done) { + for (i = 0; i < VM_SWAPOUT_LIMIT_MAX; i++) { + if (vm_swapout_ctx[i].swp_io_done) { + return &vm_swapout_ctx[i]; + } } } return NULL; @@ -986,12 +999,13 @@ vm_swapout_find_done_soc(void) static void vm_swapout_complete_soc(struct swapout_io_completion *soc) { - kern_return_t kr; + kern_return_t kr; - if (soc->swp_io_error) - kr = KERN_FAILURE; - else - kr = KERN_SUCCESS; + if (soc->swp_io_error) { + kr = KERN_FAILURE; + } else { + kr = KERN_SUCCESS; + } lck_mtx_unlock_always(c_list_lock); @@ -1011,9 +1025,9 @@ vm_swapout_complete_soc(struct swapout_io_completion *soc) static void vm_swapout_thread(void) { - uint32_t size = 0; - c_segment_t c_seg = NULL; - kern_return_t kr = KERN_SUCCESS; + uint32_t size = 0; + c_segment_t c_seg = NULL; + kern_return_t kr = KERN_SUCCESS; struct swapout_io_completion *soc; current_thread()->options |= TH_OPT_VMPRIV; @@ -1023,7 +1037,6 @@ vm_swapout_thread(void) lck_mtx_lock_spin_always(c_list_lock); again: while (!queue_empty(&c_swapout_list_head) && vm_swapout_soc_busy < vm_swapout_limit) { - c_seg = (c_segment_t)queue_first(&c_swapout_list_head); lck_mtx_lock_spin_always(&c_seg->c_lock); @@ -1042,12 +1055,13 @@ again: vm_swapout_thread_processed_segments++; size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset)); - + if (size == 0) { assert(c_seg->c_bytes_used == 0); - if (!c_seg->c_on_minorcompact_q) + if (!c_seg->c_on_minorcompact_q) { c_seg_need_delayed_compaction(c_seg, TRUE); + } c_seg_switch_state(c_seg, C_IS_EMPTY, FALSE); lck_mtx_unlock_always(&c_seg->c_lock); @@ -1064,7 +1078,7 @@ again: lck_mtx_unlock_always(c_list_lock); lck_mtx_unlock_always(&c_seg->c_lock); -#if CHECKSUM_THE_SWAP +#if CHECKSUM_THE_SWAP c_seg->cseg_hash = hash_string((char *)c_seg->c_store.c_buffer, (int)size); c_seg->cseg_swap_size = size; #endif /* CHECKSUM_THE_SWAP */ @@ -1083,33 +1097,35 @@ again: kr = vm_swap_put((vm_offset_t)c_seg->c_store.c_buffer, &soc->swp_f_offset, size, c_seg, soc); if (kr != KERN_SUCCESS) { - if (soc->swp_io_done) { - lck_mtx_lock_spin_always(c_list_lock); + if (soc->swp_io_done) { + lck_mtx_lock_spin_always(c_list_lock); - soc->swp_io_done = 0; + soc->swp_io_done = 0; vm_swapout_soc_done--; lck_mtx_unlock_always(c_list_lock); } - vm_swapout_finish(c_seg, soc->swp_f_offset, size, kr); + vm_swapout_finish(c_seg, soc->swp_f_offset, size, kr); } else { - soc->swp_io_busy = 1; + soc->swp_io_busy = 1; vm_swapout_soc_busy++; } vm_swapout_thread_throttle_adjust(); vm_pageout_io_throttle(); c_seg_is_empty: - if (c_swapout_count == 0) - vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE); + if (c_swapout_count == 0) { + vm_swap_consider_defragmenting(VM_SWAP_FLAGS_NONE); + } lck_mtx_lock_spin_always(c_list_lock); - if ((soc = vm_swapout_find_done_soc())) - vm_swapout_complete_soc(soc); + if ((soc = vm_swapout_find_done_soc())) { + vm_swapout_complete_soc(soc); + } } if ((soc = vm_swapout_find_done_soc())) { - vm_swapout_complete_soc(soc); + vm_swapout_complete_soc(soc); goto again; } assert_wait((event_t)&c_swapout_list_head, THREAD_UNINT); @@ -1117,7 +1133,7 @@ c_seg_is_empty: lck_mtx_unlock_always(c_list_lock); thread_block((thread_continue_t)vm_swapout_thread); - + /* NOTREACHED */ } @@ -1125,7 +1141,7 @@ c_seg_is_empty: void vm_swapout_iodone(void *io_context, int error) { - struct swapout_io_completion *soc; + struct swapout_io_completion *soc; soc = (struct swapout_io_completion *)io_context; @@ -1134,58 +1150,61 @@ vm_swapout_iodone(void *io_context, int error) soc->swp_io_done = 1; soc->swp_io_error = error; vm_swapout_soc_done++; - + thread_wakeup((event_t)&c_swapout_list_head); - + lck_mtx_unlock_always(c_list_lock); } static void -vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_return_t kr) +vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_return_t kr) { - PAGE_REPLACEMENT_DISALLOWED(TRUE); if (kr == KERN_SUCCESS) { - kernel_memory_depopulate(compressor_map, (vm_offset_t)c_seg->c_store.c_buffer, size, KMA_COMPRESSOR); + kernel_memory_depopulate(compressor_map, (vm_offset_t)c_seg->c_store.c_buffer, size, KMA_COMPRESSOR); } #if ENCRYPTED_SWAP else { - vm_swap_decrypt(c_seg); + vm_swap_decrypt(c_seg); } #endif /* ENCRYPTED_SWAP */ lck_mtx_lock_spin_always(c_list_lock); lck_mtx_lock_spin_always(&c_seg->c_lock); if (kr == KERN_SUCCESS) { - int new_state = C_ON_SWAPPEDOUT_Q; - boolean_t insert_head = FALSE; + int new_state = C_ON_SWAPPEDOUT_Q; + boolean_t insert_head = FALSE; if (hibernate_flushing == TRUE) { - if (c_seg->c_generation_id >= first_c_segment_to_warm_generation_id && - c_seg->c_generation_id <= last_c_segment_to_warm_generation_id) - insert_head = TRUE; - } else if (C_SEG_ONDISK_IS_SPARSE(c_seg)) - new_state = C_ON_SWAPPEDOUTSPARSE_Q; + if (c_seg->c_generation_id >= first_c_segment_to_warm_generation_id && + c_seg->c_generation_id <= last_c_segment_to_warm_generation_id) { + insert_head = TRUE; + } + } else if (C_SEG_ONDISK_IS_SPARSE(c_seg)) { + new_state = C_ON_SWAPPEDOUTSPARSE_Q; + } c_seg_switch_state(c_seg, new_state, insert_head); c_seg->c_store.c_swap_handle = f_offset; VM_STAT_INCR_BY(swapouts, size >> PAGE_SHIFT); - - if (c_seg->c_bytes_used) - OSAddAtomic64(-c_seg->c_bytes_used, &compressor_bytes_used); + + if (c_seg->c_bytes_used) { + OSAddAtomic64(-c_seg->c_bytes_used, &compressor_bytes_used); + } } else { - if (c_seg->c_overage_swap == TRUE) { - c_seg->c_overage_swap = FALSE; + if (c_seg->c_overage_swap == TRUE) { + c_seg->c_overage_swap = FALSE; c_overage_swapped_count--; } c_seg_switch_state(c_seg, C_ON_AGE_Q, FALSE); - if (!c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) - c_seg_need_delayed_compaction(c_seg, TRUE); + if (!c_seg->c_on_minorcompact_q && C_SEG_UNUSED_BYTES(c_seg) >= PAGE_SIZE) { + c_seg_need_delayed_compaction(c_seg, TRUE); + } } assert(c_seg->c_busy_swapping); assert(c_seg->c_busy); @@ -1203,11 +1222,11 @@ vm_swapout_finish(c_segment_t c_seg, uint64_t f_offset, uint32_t size, kern_ret boolean_t vm_swap_create_file() { - uint64_t size = 0; - int namelen = 0; - boolean_t swap_file_created = FALSE; - boolean_t swap_file_reuse = FALSE; - boolean_t swap_file_pin = FALSE; + uint64_t size = 0; + int namelen = 0; + boolean_t swap_file_created = FALSE; + boolean_t swap_file_reuse = FALSE; + boolean_t swap_file_pin = FALSE; struct swapfile *swf = NULL; /* @@ -1220,9 +1239,9 @@ vm_swap_create_file() vm_compaction_swapper_do_init(); /* - * Any swapfile structure ready for re-use? - */ - + * Any swapfile structure ready for re-use? + */ + lck_mtx_lock(&vm_swap_data_lock); swf = (struct swapfile*) queue_first(&swf_global_queue); @@ -1231,16 +1250,15 @@ vm_swap_create_file() if (swf->swp_flags == SWAP_REUSE) { swap_file_reuse = TRUE; break; - } + } swf = (struct swapfile*) queue_next(&swf->swp_queue); } lck_mtx_unlock(&vm_swap_data_lock); if (swap_file_reuse == FALSE) { - namelen = (int)strlen(swapfilename) + SWAPFILENAME_INDEX_LEN + 1; - + swf = (struct swapfile*) kalloc(sizeof *swf); memset(swf, 0, sizeof(*swf)); @@ -1257,7 +1275,7 @@ vm_swap_create_file() if (swf->swp_vp == NULL) { if (swap_file_reuse == FALSE) { - kfree(swf->swp_path, swf->swp_pathlen); + kfree(swf->swp_path, swf->swp_pathlen); kfree(swf, sizeof *swf); } return FALSE; @@ -1267,11 +1285,9 @@ vm_swap_create_file() size = MAX_SWAP_FILE_SIZE; while (size >= MIN_SWAP_FILE_SIZE) { - swap_file_pin = VM_SWAP_SHOULD_PIN(size); if (vm_swapfile_preallocate(swf->swp_vp, &size, &swap_file_pin) == 0) { - int num_bytes_for_bitmap = 0; swap_file_created = TRUE; @@ -1281,7 +1297,7 @@ vm_swap_create_file() swf->swp_nseginuse = 0; swf->swp_free_hint = 0; - num_bytes_for_bitmap = MAX((swf->swp_nsegs >> 3) , 1); + num_bytes_for_bitmap = MAX((swf->swp_nsegs >> 3), 1); /* * Allocate a bitmap that describes the * number of segments held by this swapfile. @@ -1297,8 +1313,9 @@ vm_swap_create_file() * will return ENOTSUP if trim isn't supported * and 0 if it is */ - if (vnode_trim_list(swf->swp_vp, NULL, FALSE) == 0) + if (vnode_trim_list(swf->swp_vp, NULL, FALSE) == 0) { swp_trim_supported = TRUE; + } lck_mtx_lock(&vm_swap_data_lock); @@ -1307,7 +1324,7 @@ vm_swap_create_file() if (swap_file_reuse == FALSE) { queue_enter(&swf_global_queue, swf, struct swapfile*, swp_queue); } - + vm_num_swap_files++; vm_swapfile_total_segs_alloced += swf->swp_nsegs; @@ -1323,27 +1340,25 @@ vm_swap_create_file() thread_wakeup((event_t) &vm_num_swap_files); #if CONFIG_EMBEDDED if (vm_num_swap_files == 1) { - c_overage_swapped_limit = (uint32_t)size / C_SEG_BUFSIZE; - if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { c_overage_swapped_limit /= 2; + } } #endif break; } else { - size = size / 2; } } if (swap_file_created == FALSE) { - vm_swapfile_close((uint64_t)(swf->swp_path), swf->swp_vp); swf->swp_vp = NULL; if (swap_file_reuse == FALSE) { - kfree(swf->swp_path, swf->swp_pathlen); + kfree(swf->swp_path, swf->swp_pathlen); kfree(swf, sizeof *swf); } } @@ -1355,8 +1370,8 @@ kern_return_t vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size) { struct swapfile *swf = NULL; - uint64_t file_offset = 0; - int retval = 0; + uint64_t file_offset = 0; + int retval = 0; assert(c_seg->c_store.c_buffer); @@ -1364,7 +1379,8 @@ vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size) swf = vm_swapfile_for_handle(f_offset); - if (swf == NULL || ( !(swf->swp_flags & SWAP_READY) && !(swf->swp_flags & SWAP_RECLAIM))) { + if (swf == NULL || (!(swf->swp_flags & SWAP_READY) && !(swf->swp_flags & SWAP_RECLAIM))) { + vm_swap_get_failures++; retval = 1; goto done; } @@ -1381,10 +1397,11 @@ vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size) #if DEVELOPMENT || DEBUG C_SEG_WRITE_PROTECT(c_seg); #endif - if (retval == 0) + if (retval == 0) { VM_STAT_INCR_BY(swapins, size >> PAGE_SHIFT); - else + } else { vm_swap_get_failures++; + } /* * Free this slot in the swap structure. @@ -1395,34 +1412,34 @@ vm_swap_get(c_segment_t c_seg, uint64_t f_offset, uint64_t size) swf->swp_io_count--; if ((swf->swp_flags & SWAP_WANTED) && swf->swp_io_count == 0) { - swf->swp_flags &= ~SWAP_WANTED; thread_wakeup((event_t) &swf->swp_flags); } done: lck_mtx_unlock(&vm_swap_data_lock); - if (retval == 0) + if (retval == 0) { return KERN_SUCCESS; - else + } else { return KERN_FAILURE; + } } kern_return_t vm_swap_put(vm_offset_t addr, uint64_t *f_offset, uint32_t size, c_segment_t c_seg, struct swapout_io_completion *soc) { - unsigned int segidx = 0; + unsigned int segidx = 0; struct swapfile *swf = NULL; - uint64_t file_offset = 0; - uint64_t swapfile_index = 0; - unsigned int byte_for_segidx = 0; - unsigned int offset_within_byte = 0; - boolean_t swf_eligible = FALSE; - boolean_t waiting = FALSE; - boolean_t retried = FALSE; - int error = 0; - clock_sec_t sec; - clock_nsec_t nsec; + uint64_t file_offset = 0; + uint64_t swapfile_index = 0; + unsigned int byte_for_segidx = 0; + unsigned int offset_within_byte = 0; + boolean_t swf_eligible = FALSE; + boolean_t waiting = FALSE; + boolean_t retried = FALSE; + int error = 0; + clock_sec_t sec; + clock_nsec_t nsec; void *upl_ctx = NULL; if (addr == 0 || f_offset == NULL) { @@ -1433,24 +1450,21 @@ retry: swf = (struct swapfile*) queue_first(&swf_global_queue); - while(queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { - + while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { segidx = swf->swp_free_hint; - swf_eligible = (swf->swp_flags & SWAP_READY) && (swf->swp_nseginuse < swf->swp_nsegs); + swf_eligible = (swf->swp_flags & SWAP_READY) && (swf->swp_nseginuse < swf->swp_nsegs); if (swf_eligible) { - - while(segidx < swf->swp_nsegs) { - + while (segidx < swf->swp_nsegs) { byte_for_segidx = segidx >> 3; offset_within_byte = segidx % 8; - + if ((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) { segidx++; continue; } - + (swf->swp_bitmap)[byte_for_segidx] |= (1 << offset_within_byte); file_offset = segidx * COMPRESSED_SWAP_CHUNK_SIZE; @@ -1463,18 +1477,19 @@ retry: clock_get_system_nanotime(&sec, &nsec); - if (VM_SWAP_SHOULD_CREATE(sec) && !vm_swapfile_create_thread_running) + if (VM_SWAP_SHOULD_CREATE(sec) && !vm_swapfile_create_thread_running) { thread_wakeup((event_t) &vm_swapfile_create_needed); + } lck_mtx_unlock(&vm_swap_data_lock); - + goto issue_io; } } swf = (struct swapfile*) queue_next(&swf->swp_queue); } assert(queue_end(&swf_global_queue, (queue_entry_t) swf)); - + /* * we've run out of swap segments, but may not * be in a position to immediately create a new swap @@ -1492,14 +1507,16 @@ retry: */ clock_get_system_nanotime(&sec, &nsec); - if (VM_SWAP_SHOULD_CREATE(sec) && !vm_swapfile_create_thread_running) + if (VM_SWAP_SHOULD_CREATE(sec) && !vm_swapfile_create_thread_running) { thread_wakeup((event_t) &vm_swapfile_create_needed); + } if (hibernate_flushing == FALSE || VM_SWAP_SHOULD_CREATE(sec)) { waiting = TRUE; - assert_wait_timeout((event_t) &vm_num_swap_files, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC); - } else + assert_wait_timeout((event_t) &vm_num_swap_files, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC); + } else { hibernate_no_swapspace = TRUE; + } lck_mtx_unlock(&vm_swap_data_lock); @@ -1511,11 +1528,11 @@ retry: goto retry; } } - vm_swap_put_failures++; + vm_swap_put_failures_no_swap_file++; return KERN_FAILURE; -issue_io: +issue_io: assert(c_seg->c_busy_swapping); assert(c_seg->c_busy); assert(!c_seg->c_on_minorcompact_q); @@ -1526,7 +1543,7 @@ issue_io: soc->swp_c_seg = c_seg; soc->swp_c_size = size; - soc->swp_swf = swf; + soc->swp_swf = swf; soc->swp_io_error = 0; soc->swp_io_done = 0; @@ -1535,8 +1552,9 @@ issue_io: } error = vm_swapfile_io(swf->swp_vp, file_offset, addr, (int) (size / PAGE_SIZE_64), SWAP_WRITE, upl_ctx); - if (error || upl_ctx == NULL) - return (vm_swap_put_finish(swf, f_offset, error)); + if (error || upl_ctx == NULL) { + return vm_swap_put_finish(swf, f_offset, error); + } return KERN_SUCCESS; } @@ -1549,7 +1567,6 @@ vm_swap_put_finish(struct swapfile *swf, uint64_t *f_offset, int error) swf->swp_io_count--; if ((swf->swp_flags & SWAP_WANTED) && swf->swp_io_count == 0) { - swf->swp_flags &= ~SWAP_WANTED; thread_wakeup((event_t) &swf->swp_flags); } @@ -1568,23 +1585,21 @@ vm_swap_put_finish(struct swapfile *swf, uint64_t *f_offset, int error) static void vm_swap_free_now(struct swapfile *swf, uint64_t f_offset) { - uint64_t file_offset = 0; - unsigned int segidx = 0; + uint64_t file_offset = 0; + unsigned int segidx = 0; if ((swf->swp_flags & SWAP_READY) || (swf->swp_flags & SWAP_RECLAIM)) { - unsigned int byte_for_segidx = 0; unsigned int offset_within_byte = 0; file_offset = (f_offset & SWAP_SLOT_MASK); segidx = (unsigned int) (file_offset / COMPRESSED_SWAP_CHUNK_SIZE); - + byte_for_segidx = segidx >> 3; offset_within_byte = segidx % 8; if ((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) { - (swf->swp_bitmap)[byte_for_segidx] &= ~(1 << offset_within_byte); swf->swp_csegs[segidx] = NULL; @@ -1596,8 +1611,9 @@ vm_swap_free_now(struct swapfile *swf, uint64_t f_offset) swf->swp_free_hint = segidx; } } - if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running) + if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running) { thread_wakeup((event_t) &vm_swapfile_gc_needed); + } } } @@ -1611,18 +1627,18 @@ vm_swap_free(uint64_t f_offset) { struct swapfile *swf = NULL; struct trim_list *tl = NULL; - clock_sec_t sec; - clock_nsec_t nsec; + clock_sec_t sec; + clock_nsec_t nsec; - if (swp_trim_supported == TRUE) + if (swp_trim_supported == TRUE) { tl = kalloc(sizeof(struct trim_list)); + } lck_mtx_lock(&vm_swap_data_lock); swf = vm_swapfile_for_handle(f_offset); if (swf && (swf->swp_flags & (SWAP_READY | SWAP_RECLAIM))) { - if (swp_trim_supported == FALSE || (swf->swp_flags & SWAP_RECLAIM)) { /* * don't delay the free if the underlying disk doesn't support @@ -1646,29 +1662,30 @@ vm_swap_free(uint64_t f_offset) if (VM_SWAP_SHOULD_TRIM(swf) && !vm_swapfile_create_thread_running) { clock_get_system_nanotime(&sec, &nsec); - if (sec > dont_trim_until_ts) + if (sec > dont_trim_until_ts) { thread_wakeup((event_t) &vm_swapfile_create_needed); + } } vm_swap_free_delayed_count++; } done: lck_mtx_unlock(&vm_swap_data_lock); - if (tl != NULL) + if (tl != NULL) { kfree(tl, sizeof(struct trim_list)); -} + } +} static void vm_swap_wait_on_trim_handling_in_progress() { while (delayed_trim_handling_in_progress == TRUE) { - assert_wait((event_t) &delayed_trim_handling_in_progress, THREAD_UNINT); lck_mtx_unlock(&vm_swap_data_lock); - + thread_block(THREAD_CONTINUE_NULL); - + lck_mtx_lock(&vm_swap_data_lock); } } @@ -1702,9 +1719,7 @@ vm_swap_handle_delayed_trims(boolean_t force_now) swf = (struct swapfile*) queue_first(&swf_global_queue); while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { - if ((swf->swp_flags & SWAP_READY) && (force_now == TRUE || VM_SWAP_SHOULD_TRIM(swf))) { - assert(!(swf->swp_flags & SWAP_RECLAIM)); vm_swap_do_delayed_trim(swf); } @@ -1715,11 +1730,11 @@ vm_swap_handle_delayed_trims(boolean_t force_now) delayed_trim_handling_in_progress = FALSE; thread_wakeup((event_t) &delayed_trim_handling_in_progress); - if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running) + if (VM_SWAP_SHOULD_RECLAIM() && !vm_swapfile_gc_thread_running) { thread_wakeup((event_t) &vm_swapfile_gc_needed); + } lck_mtx_unlock(&vm_swap_data_lock); - } static void @@ -1736,23 +1751,22 @@ vm_swap_do_delayed_trim(struct swapfile *swf) lck_mtx_unlock(&vm_swap_data_lock); vnode_trim_list(swf->swp_vp, tl_head, TRUE); - + while ((tl = tl_head) != NULL) { - unsigned int segidx = 0; - unsigned int byte_for_segidx = 0; - unsigned int offset_within_byte = 0; + unsigned int segidx = 0; + unsigned int byte_for_segidx = 0; + unsigned int offset_within_byte = 0; lck_mtx_lock(&vm_swap_data_lock); segidx = (unsigned int) (tl->tl_offset / COMPRESSED_SWAP_CHUNK_SIZE); - + byte_for_segidx = segidx >> 3; offset_within_byte = segidx % 8; if ((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) { - (swf->swp_bitmap)[byte_for_segidx] &= ~(1 << offset_within_byte); - + swf->swp_csegs[segidx] = NULL; swf->swp_nseginuse--; @@ -1767,7 +1781,7 @@ vm_swap_do_delayed_trim(struct swapfile *swf) tl_head = tl->tl_next; kfree(tl, sizeof(struct trim_list)); - } + } } @@ -1777,23 +1791,23 @@ vm_swap_flush() return; } -int vm_swap_reclaim_yielded = 0; +int vm_swap_reclaim_yielded = 0; void vm_swap_reclaim(void) { - vm_offset_t addr = 0; - unsigned int segidx = 0; - uint64_t f_offset = 0; + vm_offset_t addr = 0; + unsigned int segidx = 0; + uint64_t f_offset = 0; struct swapfile *swf = NULL; struct swapfile *smallest_swf = NULL; - unsigned int min_nsegs = 0; - unsigned int byte_for_segidx = 0; - unsigned int offset_within_byte = 0; - uint32_t c_size = 0; + unsigned int min_nsegs = 0; + unsigned int byte_for_segidx = 0; + unsigned int offset_within_byte = 0; + uint32_t c_size = 0; + + c_segment_t c_seg = NULL; - c_segment_t c_seg = NULL; - if (kernel_memory_allocate(compressor_map, (vm_offset_t *)(&addr), C_SEG_BUFSIZE, 0, KMA_KOBJECT, VM_KERN_MEMORY_COMPRESSOR) != KERN_SUCCESS) { panic("vm_swap_reclaim: kernel_memory_allocate failed\n"); } @@ -1819,17 +1833,16 @@ vm_swap_reclaim(void) smallest_swf = NULL; while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { - if ((swf->swp_flags & SWAP_READY) && (swf->swp_nseginuse <= min_nsegs)) { - smallest_swf = swf; min_nsegs = swf->swp_nseginuse; - } + } swf = (struct swapfile*) queue_next(&swf->swp_queue); } - - if (smallest_swf == NULL) + + if (smallest_swf == NULL) { goto done; + } swf = smallest_swf; @@ -1838,7 +1851,6 @@ vm_swap_reclaim(void) swf->swp_flags |= SWAP_RECLAIM; if (swf->swp_delayed_trim_count) { - lck_mtx_unlock(&vm_swap_data_lock); vm_swap_do_delayed_trim(swf); @@ -1848,23 +1860,21 @@ vm_swap_reclaim(void) segidx = 0; while (segidx < swf->swp_nsegs) { - -ReTry_for_cseg: +ReTry_for_cseg: /* * Wait for outgoing I/Os. */ while (swf->swp_io_count) { - swf->swp_flags |= SWAP_WANTED; assert_wait((event_t) &swf->swp_flags, THREAD_UNINT); lck_mtx_unlock(&vm_swap_data_lock); - + thread_block(THREAD_CONTINUE_NULL); - + lck_mtx_lock(&vm_swap_data_lock); } - if (compressor_store_stop_compaction == TRUE || VM_SWAP_SHOULD_ABORT_RECLAIM() || VM_SWAP_BUSY()) { + if (compressor_store_stop_compaction == TRUE || VM_SWAP_SHOULD_ABORT_RECLAIM() || VM_SWAP_BUSY()) { vm_swap_reclaim_yielded++; break; } @@ -1873,7 +1883,6 @@ ReTry_for_cseg: offset_within_byte = segidx % 8; if (((swf->swp_bitmap)[byte_for_segidx] & (1 << offset_within_byte)) == 0) { - segidx++; continue; } @@ -1894,16 +1903,16 @@ ReTry_for_cseg: * this c_segment no longer exists. */ c_seg->c_wanted = 1; - + assert_wait((event_t) (c_seg), THREAD_UNINT); lck_mtx_unlock_always(&c_seg->c_lock); - + lck_mtx_unlock(&vm_swap_data_lock); - + thread_block(THREAD_CONTINUE_NULL); lck_mtx_lock(&vm_swap_data_lock); - + goto ReTry_for_cseg; } (swf->swp_bitmap)[byte_for_segidx] &= ~(1 << offset_within_byte); @@ -1915,10 +1924,10 @@ ReTry_for_cseg: swf->swp_nseginuse--; vm_swapfile_total_segs_used--; - + lck_mtx_unlock(&vm_swap_data_lock); - assert(C_SEG_IS_ONDISK(c_seg)); + assert(C_SEG_IS_ONDISK(c_seg)); C_SEG_BUSY(c_seg); c_seg->c_busy_swapping = 1; @@ -1926,13 +1935,12 @@ ReTry_for_cseg: c_seg_trim_tail(c_seg); #endif c_size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset)); - + assert(c_size <= C_SEG_BUFSIZE && c_size); lck_mtx_unlock_always(&c_seg->c_lock); if (vm_swapfile_io(swf->swp_vp, f_offset, addr, (int)(c_size / PAGE_SIZE_64), SWAP_READ, NULL)) { - /* * reading the data back in failed, so convert c_seg * to a swapped in c_segment that contains no data @@ -1948,7 +1956,7 @@ ReTry_for_cseg: VM_STAT_INCR_BY(swapins, c_size >> PAGE_SHIFT); if (vm_swap_put(addr, &f_offset, c_size, c_seg, NULL)) { - vm_offset_t c_buffer; + vm_offset_t c_buffer; /* * the put failed, so convert c_seg to a fully swapped in c_segment @@ -1975,7 +1983,7 @@ ReTry_for_cseg: VM_STAT_INCR_BY(swapouts, c_size >> PAGE_SHIFT); lck_mtx_lock_spin_always(&c_seg->c_lock); - + assert(C_SEG_IS_ONDISK(c_seg)); /* * The c_seg will now know about the new location on disk. @@ -1987,28 +1995,25 @@ ReTry_for_cseg: swap_io_failed: assert(c_seg->c_busy); C_SEG_WAKEUP_DONE(c_seg); - + lck_mtx_unlock_always(&c_seg->c_lock); lck_mtx_lock(&vm_swap_data_lock); } if (swf->swp_nseginuse) { - swf->swp_flags &= ~SWAP_RECLAIM; swf->swp_flags |= SWAP_READY; goto done; } /* - * We don't remove this inactive swf from the queue. + * We don't remove this inactive swf from the queue. * That way, we can re-use it when needed again and * preserve the namespace. The delayed_trim processing * is also dependent on us not removing swfs from the queue. - */ + */ //queue_remove(&swf_global_queue, swf, struct swapfile*, swp_queue); - vm_num_swap_files--; - vm_swapfile_total_segs_alloced -= swf->swp_nsegs; lck_mtx_unlock(&vm_swap_data_lock); @@ -2017,7 +2022,7 @@ swap_io_failed: kfree(swf->swp_csegs, swf->swp_nsegs * sizeof(c_segment_t)); kfree(swf->swp_bitmap, MAX((swf->swp_nsegs >> 3), 1)); - + lck_mtx_lock(&vm_swap_data_lock); if (swf->swp_flags & SWAP_PINNED) { @@ -2025,12 +2030,14 @@ swap_io_failed: vm_swappin_avail += swf->swp_size; } - swf->swp_vp = NULL; + swf->swp_vp = NULL; swf->swp_size = 0; swf->swp_free_hint = 0; swf->swp_nsegs = 0; swf->swp_flags = SWAP_REUSE; + vm_num_swap_files--; + done: thread_wakeup((event_t) &swf->swp_flags); lck_mtx_unlock(&vm_swap_data_lock); @@ -2062,46 +2069,48 @@ vm_swap_get_used_space(void) uint64_t vm_swap_get_free_space(void) { - return (vm_swap_get_total_space() - vm_swap_get_used_space()); + return vm_swap_get_total_space() - vm_swap_get_used_space(); } int vm_swap_low_on_space(void) { - - if (vm_num_swap_files == 0 && vm_swapfile_can_be_created == FALSE) - return (0); + if (vm_num_swap_files == 0 && vm_swapfile_can_be_created == FALSE) { + return 0; + } if (((vm_swapfile_total_segs_alloced - vm_swapfile_total_segs_used) < ((unsigned int)VM_SWAPFILE_HIWATER_SEGS) / 8)) { + if (vm_num_swap_files == 0 && !SWAPPER_NEEDS_TO_UNTHROTTLE()) { + return 0; + } - if (vm_num_swap_files == 0 && !SWAPPER_NEEDS_TO_UNTHROTTLE()) - return (0); - - if (vm_swapfile_last_failed_to_create_ts >= vm_swapfile_last_successful_create_ts) - return (1); + if (vm_swapfile_last_failed_to_create_ts >= vm_swapfile_last_successful_create_ts) { + return 1; + } } - return (0); + return 0; } boolean_t vm_swap_files_pinned(void) { - boolean_t result; + boolean_t result; - if (vm_swappin_enabled == FALSE) - return (TRUE); + if (vm_swappin_enabled == FALSE) { + return TRUE; + } - result = (vm_num_pinned_swap_files == vm_num_swap_files); + result = (vm_num_pinned_swap_files == vm_num_swap_files); - return (result); + return result; } #if CONFIG_FREEZE boolean_t vm_swap_max_budget(uint64_t *freeze_daily_budget) { - boolean_t use_device_value = FALSE; + boolean_t use_device_value = FALSE; struct swapfile *swf = NULL; if (vm_num_swap_files) { @@ -2110,10 +2119,8 @@ vm_swap_max_budget(uint64_t *freeze_daily_budget) swf = (struct swapfile*) queue_first(&swf_global_queue); if (swf) { - while(queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { - + while (queue_end(&swf_global_queue, (queue_entry_t)swf) == FALSE) { if (swf->swp_flags == SWAP_READY) { - assert(swf->swp_vp); if (vm_swap_vol_get_budget(swf->swp_vp, freeze_daily_budget) == 0) { @@ -2126,9 +2133,7 @@ vm_swap_max_budget(uint64_t *freeze_daily_budget) } lck_mtx_unlock(&vm_swap_data_lock); - } else { - /* * This block is used for the initial budget value before any swap files * are created. We create a temp swap file to get the budget. @@ -2139,7 +2144,6 @@ vm_swap_max_budget(uint64_t *freeze_daily_budget) vm_swapfile_open(swapfilename, &temp_vp); if (temp_vp) { - if (vm_swap_vol_get_budget(temp_vp, freeze_daily_budget) == 0) { use_device_value = TRUE; } diff --git a/osfmk/vm/vm_compressor_backing_store.h b/osfmk/vm/vm_compressor_backing_store.h index 2bad2d6f1..abc0c3be2 100644 --- a/osfmk/vm/vm_compressor_backing_store.h +++ b/osfmk/vm/vm_compressor_backing_store.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -40,59 +40,58 @@ #if CONFIG_EMBEDDED -#define MIN_SWAP_FILE_SIZE (64 * 1024 * 1024) +#define MIN_SWAP_FILE_SIZE (64 * 1024 * 1024) -#define MAX_SWAP_FILE_SIZE (128 * 1024 * 1024) +#define MAX_SWAP_FILE_SIZE (128 * 1024 * 1024) #else /* CONFIG_EMBEDDED */ -#define MIN_SWAP_FILE_SIZE (256 * 1024 * 1024) +#define MIN_SWAP_FILE_SIZE (256 * 1024 * 1024) -#define MAX_SWAP_FILE_SIZE (1 * 1024 * 1024 * 1024) +#define MAX_SWAP_FILE_SIZE (1 * 1024 * 1024 * 1024) #endif /* CONFIG_EMBEDDED */ -#define COMPRESSED_SWAP_CHUNK_SIZE (C_SEG_BUFSIZE) +#define COMPRESSED_SWAP_CHUNK_SIZE (C_SEG_BUFSIZE) -#define VM_SWAPFILE_HIWATER_SEGS (MIN_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE) +#define VM_SWAPFILE_HIWATER_SEGS (MIN_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE) -#define SWAPFILE_RECLAIM_THRESHOLD_SEGS ((17 * (MAX_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE)) / 10) -#define SWAPFILE_RECLAIM_MINIMUM_SEGS ((13 * (MAX_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE)) / 10) +#define SWAPFILE_RECLAIM_THRESHOLD_SEGS ((17 * (MAX_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE)) / 10) +#define SWAPFILE_RECLAIM_MINIMUM_SEGS ((13 * (MAX_SWAP_FILE_SIZE / COMPRESSED_SWAP_CHUNK_SIZE)) / 10) -#define SWAP_FILE_NAME "/private/var/vm/swapfile" -#define SWAPFILENAME_LEN (int)(strlen(SWAP_FILE_NAME)) +#define SWAP_FILE_NAME "/private/var/vm/swapfile" +#define SWAPFILENAME_LEN (int)(strlen(SWAP_FILE_NAME)) -#define SWAP_SLOT_MASK 0x1FFFFFFFF -#define SWAP_DEVICE_SHIFT 33 +#define SWAP_SLOT_MASK 0x1FFFFFFFF +#define SWAP_DEVICE_SHIFT 33 -extern int vm_num_swap_files; +extern int vm_num_swap_files; struct swapfile; -lck_grp_attr_t vm_swap_data_lock_grp_attr; -lck_grp_t vm_swap_data_lock_grp; -lck_attr_t vm_swap_data_lock_attr; -lck_mtx_ext_t vm_swap_data_lock_ext; -lck_mtx_t vm_swap_data_lock; +lck_grp_attr_t vm_swap_data_lock_grp_attr; +lck_grp_t vm_swap_data_lock_grp; +lck_attr_t vm_swap_data_lock_attr; +lck_mtx_ext_t vm_swap_data_lock_ext; +lck_mtx_t vm_swap_data_lock; void vm_swap_init(void); boolean_t vm_swap_create_file(void); struct swapout_io_completion { - - int swp_io_busy; - int swp_io_done; - int swp_io_error; + int swp_io_busy; + int swp_io_done; + int swp_io_error; - uint32_t swp_c_size; - c_segment_t swp_c_seg; + uint32_t swp_c_size; + c_segment_t swp_c_seg; - struct swapfile *swp_swf; - uint64_t swp_f_offset; + struct swapfile *swp_swf; + uint64_t swp_f_offset; - struct upl_io_completion swp_upl_ctx; + struct upl_io_completion swp_upl_ctx; }; void vm_swapout_iodone(void *, int); @@ -124,4 +123,3 @@ int vm_swap_vol_get_budget(struct vnode* vp, uint64_t *freeze_daily_budget); #if RECORD_THE_COMPRESSED_DATA extern int vm_record_file_write(struct vnode *vp, uint64_t offset, char *buf, int size); #endif - diff --git a/osfmk/vm/vm_compressor_pager.c b/osfmk/vm/vm_compressor_pager.c index aa9d4662e..6eda97684 100644 --- a/osfmk/vm/vm_compressor_pager.c +++ b/osfmk/vm/vm_compressor_pager.c @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -77,46 +77,46 @@ void compressor_memory_object_reference(memory_object_t mem_obj); void compressor_memory_object_deallocate(memory_object_t mem_obj); kern_return_t compressor_memory_object_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, memory_object_cluster_size_t pager_page_size); kern_return_t compressor_memory_object_terminate(memory_object_t mem_obj); kern_return_t compressor_memory_object_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - __unused vm_prot_t protection_required, - memory_object_fault_info_t fault_info); + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, + __unused vm_prot_t protection_required, + memory_object_fault_info_t fault_info); kern_return_t compressor_memory_object_data_return( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t size, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags); + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t size, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags); kern_return_t compressor_memory_object_data_initialize( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t size); + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t size); kern_return_t compressor_memory_object_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access); + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access); kern_return_t compressor_memory_object_synchronize( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, - __unused vm_sync_t flags); + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_size_t length, + __unused vm_sync_t flags); kern_return_t compressor_memory_object_map( - __unused memory_object_t mem_obj, - __unused vm_prot_t prot); + __unused memory_object_t mem_obj, + __unused vm_prot_t prot); kern_return_t compressor_memory_object_last_unmap(memory_object_t mem_obj); kern_return_t compressor_memory_object_data_reclaim( - __unused memory_object_t mem_obj, - __unused boolean_t reclaim_backing_store); + __unused memory_object_t mem_obj, + __unused boolean_t reclaim_backing_store); const struct memory_object_pager_ops compressor_pager_ops = { compressor_memory_object_reference, @@ -137,13 +137,13 @@ const struct memory_object_pager_ops compressor_pager_ops = { /* internal data structures */ struct { - uint64_t data_returns; - uint64_t data_requests; - uint64_t put; - uint64_t get; - uint64_t state_clr; - uint64_t state_get; - uint64_t transfer; + uint64_t data_returns; + uint64_t data_requests; + uint64_t put; + uint64_t get; + uint64_t state_clr; + uint64_t state_get; + uint64_t transfer; } compressor_pager_stats; typedef int compressor_slot_t; @@ -153,32 +153,32 @@ typedef struct compressor_pager { struct memory_object cpgr_hdr; /* pager-specific data */ - lck_mtx_t cpgr_lock; - unsigned int cpgr_references; - unsigned int cpgr_num_slots; - unsigned int cpgr_num_slots_occupied; + lck_mtx_t cpgr_lock; + unsigned int cpgr_references; + unsigned int cpgr_num_slots; + unsigned int cpgr_num_slots_occupied; union { - compressor_slot_t cpgr_eslots[2]; /* embedded slots */ - compressor_slot_t *cpgr_dslots; /* direct slots */ - compressor_slot_t **cpgr_islots; /* indirect slots */ + compressor_slot_t cpgr_eslots[2]; /* embedded slots */ + compressor_slot_t *cpgr_dslots; /* direct slots */ + compressor_slot_t **cpgr_islots; /* indirect slots */ } cpgr_slots; } *compressor_pager_t; -#define compressor_pager_lookup(_mem_obj_, _cpgr_) \ - MACRO_BEGIN \ - if (_mem_obj_ == NULL || \ - _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \ - _cpgr_ = NULL; \ - } else { \ - _cpgr_ = (compressor_pager_t) _mem_obj_; \ - } \ +#define compressor_pager_lookup(_mem_obj_, _cpgr_) \ + MACRO_BEGIN \ + if (_mem_obj_ == NULL || \ + _mem_obj_->mo_pager_ops != &compressor_pager_ops) { \ + _cpgr_ = NULL; \ + } else { \ + _cpgr_ = (compressor_pager_t) _mem_obj_; \ + } \ MACRO_END zone_t compressor_pager_zone; -lck_grp_t compressor_pager_lck_grp; -lck_grp_attr_t compressor_pager_lck_grp_attr; -lck_attr_t compressor_pager_lck_attr; +lck_grp_t compressor_pager_lck_grp; +lck_grp_attr_t compressor_pager_lck_grp_attr; +lck_attr_t compressor_pager_lck_attr; #define compressor_pager_lock(_cpgr_) \ lck_mtx_lock(&(_cpgr_)->cpgr_lock) @@ -189,27 +189,27 @@ lck_attr_t compressor_pager_lck_attr; #define compressor_pager_lock_destroy(_cpgr_) \ lck_mtx_destroy(&(_cpgr_)->cpgr_lock, &compressor_pager_lck_grp) -#define COMPRESSOR_SLOTS_CHUNK_SIZE (512) -#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t)) +#define COMPRESSOR_SLOTS_CHUNK_SIZE (512) +#define COMPRESSOR_SLOTS_PER_CHUNK (COMPRESSOR_SLOTS_CHUNK_SIZE / sizeof (compressor_slot_t)) /* forward declarations */ unsigned int compressor_pager_slots_chunk_free(compressor_slot_t *chunk, - int num_slots, - int flags, - int *failures); + int num_slots, + int flags, + int *failures); void compressor_pager_slot_lookup( - compressor_pager_t pager, - boolean_t do_alloc, - memory_object_offset_t offset, - compressor_slot_t **slot_pp); + compressor_pager_t pager, + boolean_t do_alloc, + memory_object_offset_t offset, + compressor_slot_t **slot_pp); kern_return_t compressor_memory_object_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, __unused memory_object_cluster_size_t pager_page_size) { - compressor_pager_t pager; + compressor_pager_t pager; assert(pager_page_size == PAGE_SIZE); @@ -218,8 +218,9 @@ compressor_memory_object_init( compressor_pager_lookup(mem_obj, pager); compressor_pager_lock(pager); - if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) + if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) { panic("compressor_memory_object_init: bad request"); + } pager->cpgr_hdr.mo_control = control; compressor_pager_unlock(pager); @@ -230,9 +231,9 @@ compressor_memory_object_init( kern_return_t compressor_memory_object_synchronize( __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t flags) + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t flags) { panic("compressor_memory_object_synchronize: memory_object_synchronize no longer supported\n"); return KERN_FAILURE; @@ -240,8 +241,8 @@ compressor_memory_object_synchronize( kern_return_t compressor_memory_object_map( - __unused memory_object_t mem_obj, - __unused vm_prot_t prot) + __unused memory_object_t mem_obj, + __unused vm_prot_t prot) { panic("compressor_memory_object_map"); return KERN_FAILURE; @@ -249,7 +250,7 @@ compressor_memory_object_map( kern_return_t compressor_memory_object_last_unmap( - __unused memory_object_t mem_obj) + __unused memory_object_t mem_obj) { panic("compressor_memory_object_last_unmap"); return KERN_FAILURE; @@ -257,8 +258,8 @@ compressor_memory_object_last_unmap( kern_return_t compressor_memory_object_data_reclaim( - __unused memory_object_t mem_obj, - __unused boolean_t reclaim_backing_store) + __unused memory_object_t mem_obj, + __unused boolean_t reclaim_backing_store) { panic("compressor_memory_object_data_reclaim"); return KERN_FAILURE; @@ -266,12 +267,12 @@ compressor_memory_object_data_reclaim( kern_return_t compressor_memory_object_terminate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - memory_object_control_t control; - compressor_pager_t pager; + memory_object_control_t control; + compressor_pager_t pager; - /* + /* * control port is a receive right, not a send right. */ @@ -299,13 +300,14 @@ compressor_memory_object_terminate( void compressor_memory_object_reference( - memory_object_t mem_obj) + memory_object_t mem_obj) { - compressor_pager_t pager; + compressor_pager_t pager; compressor_pager_lookup(mem_obj, pager); - if (pager == NULL) + if (pager == NULL) { return; + } compressor_pager_lock(pager); assert(pager->cpgr_references > 0); @@ -315,10 +317,10 @@ compressor_memory_object_reference( void compressor_memory_object_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - compressor_pager_t pager; - unsigned int num_slots_freed; + compressor_pager_t pager; + unsigned int num_slots_freed; /* * Because we don't give out multiple first references @@ -328,8 +330,9 @@ compressor_memory_object_deallocate( */ compressor_pager_lookup(mem_obj, pager); - if (pager == NULL) + if (pager == NULL) { return; + } compressor_pager_lock(pager); if (--pager->cpgr_references > 0) { @@ -341,8 +344,9 @@ compressor_memory_object_deallocate( * We shouldn't get a deallocation call * when the kernel has the object cached. */ - if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) + if (pager->cpgr_hdr.mo_control != MEMORY_OBJECT_CONTROL_NULL) { panic("compressor_memory_object_deallocate(): bad request"); + } /* * Unlock the pager (though there should be no one @@ -355,45 +359,45 @@ compressor_memory_object_deallocate( int i; compressor_slot_t *chunk; - num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK; + num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK; if (num_chunks > 1) { /* we have an array of chunks */ for (i = 0; i < num_chunks; i++) { chunk = pager->cpgr_slots.cpgr_islots[i]; if (chunk != NULL) { num_slots_freed = - compressor_pager_slots_chunk_free( - chunk, - COMPRESSOR_SLOTS_PER_CHUNK, - 0, - NULL); + compressor_pager_slots_chunk_free( + chunk, + COMPRESSOR_SLOTS_PER_CHUNK, + 0, + NULL); pager->cpgr_slots.cpgr_islots[i] = NULL; kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); } } kfree(pager->cpgr_slots.cpgr_islots, - num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0])); + num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0])); pager->cpgr_slots.cpgr_islots = NULL; } else if (pager->cpgr_num_slots > 2) { chunk = pager->cpgr_slots.cpgr_dslots; num_slots_freed = - compressor_pager_slots_chunk_free( - chunk, - pager->cpgr_num_slots, - 0, - NULL); + compressor_pager_slots_chunk_free( + chunk, + pager->cpgr_num_slots, + 0, + NULL); pager->cpgr_slots.cpgr_dslots = NULL; kfree(chunk, - (pager->cpgr_num_slots * - sizeof (pager->cpgr_slots.cpgr_dslots[0]))); + (pager->cpgr_num_slots * + sizeof(pager->cpgr_slots.cpgr_dslots[0]))); } else { chunk = &pager->cpgr_slots.cpgr_eslots[0]; num_slots_freed = - compressor_pager_slots_chunk_free( - chunk, - pager->cpgr_num_slots, - 0, - NULL); + compressor_pager_slots_chunk_free( + chunk, + pager->cpgr_num_slots, + 0, + NULL); } compressor_pager_lock_destroy(pager); @@ -402,27 +406,28 @@ compressor_memory_object_deallocate( kern_return_t compressor_memory_object_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - __unused vm_prot_t protection_required, - __unused memory_object_fault_info_t fault_info) + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, + __unused vm_prot_t protection_required, + __unused memory_object_fault_info_t fault_info) { - compressor_pager_t pager; - kern_return_t kr; - compressor_slot_t *slot_p; - + compressor_pager_t pager; + kern_return_t kr; + compressor_slot_t *slot_p; + compressor_pager_stats.data_requests++; /* * Request must be on a page boundary and a multiple of pages. */ - if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) + if ((offset & PAGE_MASK) != 0 || (length & PAGE_MASK) != 0) { panic("compressor_memory_object_data_request(): bad alignment"); + } - if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { + if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) { panic("%s: offset 0x%llx overflow\n", - __FUNCTION__, (uint64_t) offset); + __FUNCTION__, (uint64_t) offset); return KERN_FAILURE; } @@ -464,19 +469,19 @@ compressor_memory_object_data_request( kern_return_t compressor_memory_object_data_initialize( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t size) + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t size) { - compressor_pager_t pager; - memory_object_offset_t cur_offset; + compressor_pager_t pager; + memory_object_offset_t cur_offset; compressor_pager_lookup(mem_obj, pager); compressor_pager_lock(pager); for (cur_offset = offset; - cur_offset < offset + size; - cur_offset += PAGE_SIZE) { + cur_offset < offset + size; + cur_offset += PAGE_SIZE) { panic("do a data_return() if slot for this page is empty"); } @@ -487,10 +492,10 @@ compressor_memory_object_data_initialize( kern_return_t compressor_memory_object_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { panic("compressor_memory_object_data_unlock()"); return KERN_FAILURE; @@ -500,14 +505,14 @@ compressor_memory_object_data_unlock( /*ARGSUSED*/ kern_return_t compressor_memory_object_data_return( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t size, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t size, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags) { panic("compressor: data_return"); return KERN_FAILURE; @@ -516,25 +521,25 @@ compressor_memory_object_data_return( /* * Routine: default_pager_memory_object_create * Purpose: - * Handle requests for memory objects from the - * kernel. + * Handle requests for memory objects from the + * kernel. * Notes: - * Because we only give out the default memory - * manager port to the kernel, we don't have to - * be so paranoid about the contents. + * Because we only give out the default memory + * manager port to the kernel, we don't have to + * be so paranoid about the contents. */ kern_return_t compressor_memory_object_create( - memory_object_size_t new_size, - memory_object_t *new_mem_obj) + memory_object_size_t new_size, + memory_object_t *new_mem_obj) { - compressor_pager_t pager; - int num_chunks; + compressor_pager_t pager; + int num_chunks; - if ((uint32_t)(new_size/PAGE_SIZE) != (new_size/PAGE_SIZE)) { + if ((uint32_t)(new_size / PAGE_SIZE) != (new_size / PAGE_SIZE)) { /* 32-bit overflow for number of pages */ panic("%s: size 0x%llx overflow\n", - __FUNCTION__, (uint64_t) new_size); + __FUNCTION__, (uint64_t) new_size); return KERN_INVALID_ARGUMENT; } @@ -545,16 +550,16 @@ compressor_memory_object_create( compressor_pager_lock_init(pager); pager->cpgr_references = 1; - pager->cpgr_num_slots = (uint32_t)(new_size/PAGE_SIZE); + pager->cpgr_num_slots = (uint32_t)(new_size / PAGE_SIZE); pager->cpgr_num_slots_occupied = 0; num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK; if (num_chunks > 1) { - pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0])); - bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof (pager->cpgr_slots.cpgr_islots[0])); + pager->cpgr_slots.cpgr_islots = kalloc(num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0])); + bzero(pager->cpgr_slots.cpgr_islots, num_chunks * sizeof(pager->cpgr_slots.cpgr_islots[0])); } else if (pager->cpgr_num_slots > 2) { - pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0])); - bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof (pager->cpgr_slots.cpgr_dslots[0])); + pager->cpgr_slots.cpgr_dslots = kalloc(pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0])); + bzero(pager->cpgr_slots.cpgr_dslots, pager->cpgr_num_slots * sizeof(pager->cpgr_slots.cpgr_dslots[0])); } else { pager->cpgr_slots.cpgr_eslots[0] = 0; pager->cpgr_slots.cpgr_eslots[1] = 0; @@ -575,30 +580,33 @@ compressor_memory_object_create( unsigned int compressor_pager_slots_chunk_free( - compressor_slot_t *chunk, - int num_slots, - int flags, - int *failures) + compressor_slot_t *chunk, + int num_slots, + int flags, + int *failures) { int i; int retval; unsigned int num_slots_freed; - if (failures) + if (failures) { *failures = 0; + } num_slots_freed = 0; for (i = 0; i < num_slots; i++) { if (chunk[i] != 0) { retval = vm_compressor_free(&chunk[i], flags); - if (retval == 0) + if (retval == 0) { num_slots_freed++; - else { - if (retval == -2) + } else { + if (retval == -2) { assert(flags & C_DONT_BLOCK); + } - if (failures) + if (failures) { *failures += 1; + } } } } @@ -607,23 +615,23 @@ compressor_pager_slots_chunk_free( void compressor_pager_slot_lookup( - compressor_pager_t pager, - boolean_t do_alloc, - memory_object_offset_t offset, - compressor_slot_t **slot_pp) + compressor_pager_t pager, + boolean_t do_alloc, + memory_object_offset_t offset, + compressor_slot_t **slot_pp) { - int num_chunks; - uint32_t page_num; - int chunk_idx; - int slot_idx; - compressor_slot_t *chunk; - compressor_slot_t *t_chunk; - - page_num = (uint32_t)(offset/PAGE_SIZE); - if (page_num != (offset/PAGE_SIZE)) { + int num_chunks; + uint32_t page_num; + int chunk_idx; + int slot_idx; + compressor_slot_t *chunk; + compressor_slot_t *t_chunk; + + page_num = (uint32_t)(offset / PAGE_SIZE); + if (page_num != (offset / PAGE_SIZE)) { /* overflow */ panic("%s: offset 0x%llx overflow\n", - __FUNCTION__, (uint64_t) offset); + __FUNCTION__, (uint64_t) offset); *slot_pp = NULL; return; } @@ -645,7 +653,6 @@ compressor_pager_slot_lookup( compressor_pager_lock(pager); if ((chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]) == NULL) { - /* * On some platforms, the memory stores from * the bzero(t_chunk) above might not have been @@ -661,9 +668,10 @@ compressor_pager_slot_lookup( t_chunk = NULL; } compressor_pager_unlock(pager); - - if (t_chunk) + + if (t_chunk) { kfree(t_chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); + } } if (chunk == NULL) { *slot_pp = NULL; @@ -687,9 +695,9 @@ vm_compressor_pager_init(void) lck_grp_init(&compressor_pager_lck_grp, "compressor_pager", &compressor_pager_lck_grp_attr); lck_attr_setdefault(&compressor_pager_lck_attr); - compressor_pager_zone = zinit(sizeof (struct compressor_pager), - 10000 * sizeof (struct compressor_pager), - 8192, "compressor_pager"); + compressor_pager_zone = zinit(sizeof(struct compressor_pager), + 10000 * sizeof(struct compressor_pager), + 8192, "compressor_pager"); zone_change(compressor_pager_zone, Z_CALLERACCT, FALSE); zone_change(compressor_pager_zone, Z_NOENCRYPT, TRUE); @@ -698,19 +706,15 @@ vm_compressor_pager_init(void) kern_return_t vm_compressor_pager_put( - memory_object_t mem_obj, - memory_object_offset_t offset, - ppnum_t ppnum, - void **current_chead, - char *scratch_buf, - int *compressed_count_delta_p) + memory_object_t mem_obj, + memory_object_offset_t offset, + ppnum_t ppnum, + void **current_chead, + char *scratch_buf, + int *compressed_count_delta_p) { - compressor_pager_t pager; - compressor_slot_t *slot_p; -#if __arm__ || __arm64__ - unsigned int prev_wimg = VM_WIMG_DEFAULT; - boolean_t set_cache_attr = FALSE; -#endif + compressor_pager_t pager; + compressor_slot_t *slot_p; compressor_pager_stats.put++; @@ -726,10 +730,10 @@ vm_compressor_pager_put( compressor_pager_lookup(mem_obj, pager); - if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { + if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) { /* overflow */ panic("%s: offset 0x%llx overflow\n", - __FUNCTION__, (uint64_t) offset); + __FUNCTION__, (uint64_t) offset); return KERN_RESOURCE_SHORTAGE; } @@ -751,19 +755,6 @@ vm_compressor_pager_put( *compressed_count_delta_p -= 1; } -#if __arm__ || __arm64__ - /* - * cacheability should be set to the system default (usually writeback) - * during compressor operations, both for performance and correctness, - * e.g. to avoid compressor codec faults generated by an unexpected - * memory type. - */ - prev_wimg = pmap_cache_attributes(ppnum) & VM_WIMG_MASK; - - if ((prev_wimg != VM_WIMG_DEFAULT) && (prev_wimg != VM_WIMG_USE_DEFAULT)) { - set_cache_attr = TRUE; - pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT); - } /* * If the compressor operation succeeds, we presumably don't need to * undo any previous WIMG update, as all live mappings should be @@ -771,15 +762,8 @@ vm_compressor_pager_put( */ if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) { - if (set_cache_attr) - pmap_set_cache_attributes(ppnum, prev_wimg); - return KERN_RESOURCE_SHORTAGE; - } -#else - if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) { - return KERN_RESOURCE_SHORTAGE; + return KERN_RESOURCE_SHORTAGE; } -#endif *compressed_count_delta_p += 1; return KERN_SUCCESS; @@ -788,24 +772,24 @@ vm_compressor_pager_put( kern_return_t vm_compressor_pager_get( - memory_object_t mem_obj, - memory_object_offset_t offset, - ppnum_t ppnum, - int *my_fault_type, - int flags, - int *compressed_count_delta_p) + memory_object_t mem_obj, + memory_object_offset_t offset, + ppnum_t ppnum, + int *my_fault_type, + int flags, + int *compressed_count_delta_p) { - compressor_pager_t pager; - kern_return_t kr; - compressor_slot_t *slot_p; - + compressor_pager_t pager; + kern_return_t kr; + compressor_slot_t *slot_p; + compressor_pager_stats.get++; *compressed_count_delta_p = 0; - if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { + if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) { panic("%s: offset 0x%llx overflow\n", - __FUNCTION__, (uint64_t) offset); + __FUNCTION__, (uint64_t) offset); return KERN_MEMORY_ERROR; } @@ -825,40 +809,20 @@ vm_compressor_pager_get( kr = KERN_SUCCESS; } *my_fault_type = DBG_COMPRESSOR_FAULT; - - if (kr == KERN_SUCCESS) { - int retval; -#if __arm__ || __arm64__ - unsigned int prev_wimg = VM_WIMG_DEFAULT; - boolean_t set_cache_attr = FALSE; - /* - * cacheability should be set to the system default (usually writeback) - * during compressor operations, both for performance and correctness, - * e.g. to avoid compressor codec faults generated by an unexpected - * memory type. - */ - prev_wimg = pmap_cache_attributes(ppnum) & VM_WIMG_MASK; + if (kr == KERN_SUCCESS) { + int retval; - if ((prev_wimg != VM_WIMG_DEFAULT) && (prev_wimg != VM_WIMG_USE_DEFAULT)) { - set_cache_attr = TRUE; - pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT); - } -#endif /* get the page from the compressor */ retval = vm_compressor_get(ppnum, slot_p, flags); - if (retval == -1) + if (retval == -1) { kr = KERN_MEMORY_FAILURE; - else if (retval == 1) + } else if (retval == 1) { *my_fault_type = DBG_COMPRESSOR_SWAPIN_FAULT; - else if (retval == -2) { + } else if (retval == -2) { assert((flags & C_DONT_BLOCK)); kr = KERN_FAILURE; } -#if __arm__ || __arm64__ - if (set_cache_attr) - pmap_set_cache_attributes(ppnum, prev_wimg); -#endif } if (kr == KERN_SUCCESS) { @@ -879,21 +843,21 @@ vm_compressor_pager_get( unsigned int vm_compressor_pager_state_clr( - memory_object_t mem_obj, - memory_object_offset_t offset) + memory_object_t mem_obj, + memory_object_offset_t offset) { - compressor_pager_t pager; - compressor_slot_t *slot_p; - unsigned int num_slots_freed; - + compressor_pager_t pager; + compressor_slot_t *slot_p; + unsigned int num_slots_freed; + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); compressor_pager_stats.state_clr++; - if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { + if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) { /* overflow */ panic("%s: offset 0x%llx overflow\n", - __FUNCTION__, (uint64_t) offset); + __FUNCTION__, (uint64_t) offset); return 0; } @@ -914,20 +878,20 @@ vm_compressor_pager_state_clr( vm_external_state_t vm_compressor_pager_state_get( - memory_object_t mem_obj, - memory_object_offset_t offset) + memory_object_t mem_obj, + memory_object_offset_t offset) { - compressor_pager_t pager; - compressor_slot_t *slot_p; + compressor_pager_t pager; + compressor_slot_t *slot_p; assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); - + compressor_pager_stats.state_get++; - if ((uint32_t)(offset/PAGE_SIZE) != (offset/PAGE_SIZE)) { + if ((uint32_t)(offset / PAGE_SIZE) != (offset / PAGE_SIZE)) { /* overflow */ panic("%s: offset 0x%llx overflow\n", - __FUNCTION__, (uint64_t) offset); + __FUNCTION__, (uint64_t) offset); return VM_EXTERNAL_STATE_ABSENT; } @@ -950,37 +914,38 @@ vm_compressor_pager_state_get( unsigned int vm_compressor_pager_reap_pages( - memory_object_t mem_obj, - int flags) + memory_object_t mem_obj, + int flags) { - compressor_pager_t pager; - int num_chunks; - int failures; - int i; - compressor_slot_t *chunk; - unsigned int num_slots_freed; + compressor_pager_t pager; + int num_chunks; + int failures; + int i; + compressor_slot_t *chunk; + unsigned int num_slots_freed; compressor_pager_lookup(mem_obj, pager); - if (pager == NULL) + if (pager == NULL) { return 0; + } compressor_pager_lock(pager); /* reap the compressor slots */ num_slots_freed = 0; - num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK -1) / COMPRESSOR_SLOTS_PER_CHUNK; + num_chunks = (pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / COMPRESSOR_SLOTS_PER_CHUNK; if (num_chunks > 1) { /* we have an array of chunks */ for (i = 0; i < num_chunks; i++) { chunk = pager->cpgr_slots.cpgr_islots[i]; if (chunk != NULL) { num_slots_freed += - compressor_pager_slots_chunk_free( - chunk, - COMPRESSOR_SLOTS_PER_CHUNK, - flags, - &failures); + compressor_pager_slots_chunk_free( + chunk, + COMPRESSOR_SLOTS_PER_CHUNK, + flags, + &failures); if (failures == 0) { pager->cpgr_slots.cpgr_islots[i] = NULL; kfree(chunk, COMPRESSOR_SLOTS_CHUNK_SIZE); @@ -990,19 +955,19 @@ vm_compressor_pager_reap_pages( } else if (pager->cpgr_num_slots > 2) { chunk = pager->cpgr_slots.cpgr_dslots; num_slots_freed += - compressor_pager_slots_chunk_free( - chunk, - pager->cpgr_num_slots, - flags, - NULL); + compressor_pager_slots_chunk_free( + chunk, + pager->cpgr_num_slots, + flags, + NULL); } else { chunk = &pager->cpgr_slots.cpgr_eslots[0]; num_slots_freed += - compressor_pager_slots_chunk_free( - chunk, - pager->cpgr_num_slots, - flags, - NULL); + compressor_pager_slots_chunk_free( + chunk, + pager->cpgr_num_slots, + flags, + NULL); } compressor_pager_unlock(pager); @@ -1012,14 +977,14 @@ vm_compressor_pager_reap_pages( void vm_compressor_pager_transfer( - memory_object_t dst_mem_obj, - memory_object_offset_t dst_offset, - memory_object_t src_mem_obj, - memory_object_offset_t src_offset) + memory_object_t dst_mem_obj, + memory_object_offset_t dst_offset, + memory_object_t src_mem_obj, + memory_object_offset_t src_offset) { - compressor_pager_t src_pager, dst_pager; - compressor_slot_t *src_slot_p, *dst_slot_p; - + compressor_pager_t src_pager, dst_pager; + compressor_slot_t *src_slot_p, *dst_slot_p; + compressor_pager_stats.transfer++; /* find the compressor slot for the destination */ @@ -1027,7 +992,7 @@ vm_compressor_pager_transfer( compressor_pager_lookup(dst_mem_obj, dst_pager); assert(dst_offset / PAGE_SIZE < dst_pager->cpgr_num_slots); compressor_pager_slot_lookup(dst_pager, TRUE, (uint32_t) dst_offset, - &dst_slot_p); + &dst_slot_p); assert(dst_slot_p != NULL); assert(*dst_slot_p == 0); @@ -1036,7 +1001,7 @@ vm_compressor_pager_transfer( compressor_pager_lookup(src_mem_obj, src_pager); assert(src_offset / PAGE_SIZE < src_pager->cpgr_num_slots); compressor_pager_slot_lookup(src_pager, FALSE, (uint32_t) src_offset, - &src_slot_p); + &src_slot_p); assert(src_slot_p != NULL); assert(*src_slot_p != 0); @@ -1048,20 +1013,20 @@ vm_compressor_pager_transfer( memory_object_offset_t vm_compressor_pager_next_compressed( - memory_object_t mem_obj, - memory_object_offset_t offset) + memory_object_t mem_obj, + memory_object_offset_t offset) { - compressor_pager_t pager; - uint32_t num_chunks; - uint32_t page_num; - uint32_t chunk_idx; - uint32_t slot_idx; - compressor_slot_t *chunk; + compressor_pager_t pager; + uint32_t num_chunks; + uint32_t page_num; + uint32_t chunk_idx; + uint32_t slot_idx; + compressor_slot_t *chunk; compressor_pager_lookup(mem_obj, pager); page_num = (uint32_t)(offset / PAGE_SIZE); - if (page_num != (offset/PAGE_SIZE)) { + if (page_num != (offset / PAGE_SIZE)) { /* overflow */ return (memory_object_offset_t) -1; } @@ -1071,7 +1036,7 @@ vm_compressor_pager_next_compressed( } num_chunks = ((pager->cpgr_num_slots + COMPRESSOR_SLOTS_PER_CHUNK - 1) / - COMPRESSOR_SLOTS_PER_CHUNK); + COMPRESSOR_SLOTS_PER_CHUNK); if (num_chunks == 1) { if (pager->cpgr_num_slots > 2) { @@ -1080,12 +1045,12 @@ vm_compressor_pager_next_compressed( chunk = &pager->cpgr_slots.cpgr_eslots[0]; } for (slot_idx = page_num; - slot_idx < pager->cpgr_num_slots; - slot_idx++) { + slot_idx < pager->cpgr_num_slots; + slot_idx++) { if (chunk[slot_idx] != 0) { /* found a non-NULL slot in this chunk */ return (memory_object_offset_t) (slot_idx * - PAGE_SIZE); + PAGE_SIZE); } } return (memory_object_offset_t) -1; @@ -1094,10 +1059,10 @@ vm_compressor_pager_next_compressed( /* we have an array of chunks; find the next non-NULL chunk */ chunk = NULL; for (chunk_idx = page_num / COMPRESSOR_SLOTS_PER_CHUNK, - slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK; - chunk_idx < num_chunks; - chunk_idx++, - slot_idx = 0) { + slot_idx = page_num % COMPRESSOR_SLOTS_PER_CHUNK; + chunk_idx < num_chunks; + chunk_idx++, + slot_idx = 0) { chunk = pager->cpgr_slots.cpgr_islots[chunk_idx]; if (chunk == NULL) { /* no chunk here: try the next one */ @@ -1105,21 +1070,21 @@ vm_compressor_pager_next_compressed( } /* search for an occupied slot in this chunk */ for (; - slot_idx < COMPRESSOR_SLOTS_PER_CHUNK; - slot_idx++) { + slot_idx < COMPRESSOR_SLOTS_PER_CHUNK; + slot_idx++) { if (chunk[slot_idx] != 0) { /* found an occupied slot in this chunk */ uint32_t next_slot; next_slot = ((chunk_idx * - COMPRESSOR_SLOTS_PER_CHUNK) + - slot_idx); + COMPRESSOR_SLOTS_PER_CHUNK) + + slot_idx); if (next_slot >= pager->cpgr_num_slots) { /* went beyond end of object */ return (memory_object_offset_t) -1; } return (memory_object_offset_t) (next_slot * - PAGE_SIZE); + PAGE_SIZE); } } } @@ -1130,11 +1095,12 @@ unsigned int vm_compressor_pager_get_count( memory_object_t mem_obj) { - compressor_pager_t pager; + compressor_pager_t pager; compressor_pager_lookup(mem_obj, pager); - if (pager == NULL) + if (pager == NULL) { return 0; + } /* * The caller should have the VM object locked and one @@ -1148,24 +1114,25 @@ vm_compressor_pager_get_count( void vm_compressor_pager_count( - memory_object_t mem_obj, - int compressed_count_delta, - boolean_t shared_lock, - vm_object_t object __unused) + memory_object_t mem_obj, + int compressed_count_delta, + boolean_t shared_lock, + vm_object_t object __unused) { - compressor_pager_t pager; + compressor_pager_t pager; if (compressed_count_delta == 0) { return; } compressor_pager_lookup(mem_obj, pager); - if (pager == NULL) + if (pager == NULL) { return; + } if (compressed_count_delta < 0) { assert(pager->cpgr_num_slots_occupied >= - (unsigned int) -compressed_count_delta); + (unsigned int) -compressed_count_delta); } /* @@ -1175,7 +1142,7 @@ vm_compressor_pager_count( if (shared_lock) { vm_object_lock_assert_shared(object); OSAddAtomic(compressed_count_delta, - &pager->cpgr_num_slots_occupied); + &pager->cpgr_num_slots_occupied); } else { vm_object_lock_assert_exclusive(object); pager->cpgr_num_slots_occupied += compressed_count_delta; @@ -1185,9 +1152,9 @@ vm_compressor_pager_count( #if CONFIG_FREEZE kern_return_t vm_compressor_pager_relocate( - memory_object_t mem_obj, - memory_object_offset_t offset, - void **current_chead) + memory_object_t mem_obj, + memory_object_offset_t offset, + void **current_chead) { /* * Has the page at this offset been compressed? @@ -1197,13 +1164,13 @@ vm_compressor_pager_relocate( compressor_pager_t dst_pager; assert(mem_obj); - + compressor_pager_lookup(mem_obj, dst_pager); - if (dst_pager == NULL) + if (dst_pager == NULL) { return KERN_FAILURE; + } compressor_pager_slot_lookup(dst_pager, FALSE, offset, &slot_p); - return (vm_compressor_relocate(current_chead, slot_p)); + return vm_compressor_relocate(current_chead, slot_p); } #endif /* CONFIG_FREEZE */ - diff --git a/osfmk/vm/vm_compressor_pager.h b/osfmk/vm/vm_compressor_pager.h index a42d1b9ee..26ace8f15 100644 --- a/osfmk/vm/vm_compressor_pager.h +++ b/osfmk/vm/vm_compressor_pager.h @@ -2,7 +2,7 @@ * Copyright (c) 2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE #ifndef _VM_VM_COMPRESSOR_PAGER_H_ #define _VM_VM_COMPRESSOR_PAGER_H_ @@ -36,77 +36,77 @@ #include extern kern_return_t vm_compressor_pager_put( - memory_object_t mem_obj, - memory_object_offset_t offset, - ppnum_t ppnum, - void **current_chead, - char *scratch_buf, - int *compressed_count_delta_p); + memory_object_t mem_obj, + memory_object_offset_t offset, + ppnum_t ppnum, + void **current_chead, + char *scratch_buf, + int *compressed_count_delta_p); extern kern_return_t vm_compressor_pager_get( - memory_object_t mem_obj, - memory_object_offset_t offset, - ppnum_t ppnum, - int *my_fault_type, - int flags, - int *compressed_count_delta_p); + memory_object_t mem_obj, + memory_object_offset_t offset, + ppnum_t ppnum, + int *my_fault_type, + int flags, + int *compressed_count_delta_p); -#define C_DONT_BLOCK 0x01 -#define C_KEEP 0x02 -#define C_KDP 0x04 +#define C_DONT_BLOCK 0x01 +#define C_KEEP 0x02 +#define C_KDP 0x04 extern unsigned int vm_compressor_pager_state_clr( - memory_object_t mem_obj, - memory_object_offset_t offset); + memory_object_t mem_obj, + memory_object_offset_t offset); extern vm_external_state_t vm_compressor_pager_state_get( - memory_object_t mem_obj, - memory_object_offset_t offset); + memory_object_t mem_obj, + memory_object_offset_t offset); -#define VM_COMPRESSOR_PAGER_STATE_GET(object, offset) \ - (((object)->internal && \ - (object)->pager != NULL && \ - !(object)->terminating && \ - (object)->alive) \ - ? vm_compressor_pager_state_get((object)->pager, \ - (offset) + (object)->paging_offset) \ +#define VM_COMPRESSOR_PAGER_STATE_GET(object, offset) \ + (((object)->internal && \ + (object)->pager != NULL && \ + !(object)->terminating && \ + (object)->alive) \ + ? vm_compressor_pager_state_get((object)->pager, \ + (offset) + (object)->paging_offset) \ : VM_EXTERNAL_STATE_UNKNOWN) -#define VM_COMPRESSOR_PAGER_STATE_CLR(object, offset) \ - MACRO_BEGIN \ - if ((object)->internal && \ - (object)->pager != NULL && \ - !(object)->terminating && \ - (object)->alive) { \ - int _num_pages_cleared; \ - _num_pages_cleared = \ - vm_compressor_pager_state_clr( \ - (object)->pager, \ - (offset) + (object)->paging_offset); \ - if (_num_pages_cleared) { \ - vm_compressor_pager_count((object)->pager, \ - -_num_pages_cleared, \ - FALSE, /* shared */ \ - (object)); \ - } \ - if (_num_pages_cleared && \ - ((object)->purgable != VM_PURGABLE_DENY || \ - (object)->vo_ledger_tag)) { \ - /* less compressed purgeable/tagged pages */ \ - assert(_num_pages_cleared == 1); \ - vm_object_owner_compressed_update( \ - (object), \ - -_num_pages_cleared); \ - } \ - } \ +#define VM_COMPRESSOR_PAGER_STATE_CLR(object, offset) \ + MACRO_BEGIN \ + if ((object)->internal && \ + (object)->pager != NULL && \ + !(object)->terminating && \ + (object)->alive) { \ + int _num_pages_cleared; \ + _num_pages_cleared = \ + vm_compressor_pager_state_clr( \ + (object)->pager, \ + (offset) + (object)->paging_offset); \ + if (_num_pages_cleared) { \ + vm_compressor_pager_count((object)->pager, \ + -_num_pages_cleared, \ + FALSE, /* shared */ \ + (object)); \ + } \ + if (_num_pages_cleared && \ + ((object)->purgable != VM_PURGABLE_DENY || \ + (object)->vo_ledger_tag)) { \ + /* less compressed purgeable/tagged pages */ \ + assert(_num_pages_cleared == 1); \ + vm_object_owner_compressed_update( \ + (object), \ + -_num_pages_cleared); \ + } \ + } \ MACRO_END extern void vm_compressor_pager_transfer( - memory_object_t dst_mem_obj, - memory_object_offset_t dst_offset, - memory_object_t src_mem_obj, - memory_object_offset_t src_offset); + memory_object_t dst_mem_obj, + memory_object_offset_t dst_offset, + memory_object_t src_mem_obj, + memory_object_offset_t src_offset); extern memory_object_offset_t vm_compressor_pager_next_compressed( - memory_object_t mem_obj, - memory_object_offset_t offset); + memory_object_t mem_obj, + memory_object_offset_t offset); extern void vm_compressor_init(void); extern int vm_compressor_put(ppnum_t pn, int *slot, void **current_chead, char *scratch_buf); @@ -115,11 +115,11 @@ extern int vm_compressor_free(int *slot, int flags); extern unsigned int vm_compressor_pager_reap_pages(memory_object_t mem_obj, int flags); extern unsigned int vm_compressor_pager_get_count(memory_object_t mem_obj); extern void vm_compressor_pager_count(memory_object_t mem_obj, - int compressed_count_delta, - boolean_t shared_lock, - vm_object_t object); + int compressed_count_delta, + boolean_t shared_lock, + vm_object_t object); -extern void vm_compressor_transfer(int *dst_slot_p, int *src_slot_p); +extern void vm_compressor_transfer(int *dst_slot_p, int *src_slot_p); #if CONFIG_FREEZE extern kern_return_t vm_compressor_pager_relocate(memory_object_t mem_obj, memory_object_offset_t mem_offset, void **current_chead); @@ -127,6 +127,6 @@ extern kern_return_t vm_compressor_relocate(void **current_chead, int *src_slot_ extern void vm_compressor_finished_filling(void **current_chead); #endif /* CONFIG_FREEZE */ -#endif /* _VM_VM_COMPRESSOR_PAGER_H_ */ +#endif /* _VM_VM_COMPRESSOR_PAGER_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_debug.c b/osfmk/vm/vm_debug.c index 12826e385..f0d1765a5 100644 --- a/osfmk/vm/vm_debug.c +++ b/osfmk/vm/vm_debug.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -112,31 +112,32 @@ kern_return_t vm32_region_info( - __DEBUG_ONLY vm_map_t map, - __DEBUG_ONLY vm32_offset_t address, - __DEBUG_ONLY vm_info_region_t *regionp, - __DEBUG_ONLY vm_info_object_array_t *objectsp, - __DEBUG_ONLY mach_msg_type_number_t *objectsCntp) + __DEBUG_ONLY vm_map_t map, + __DEBUG_ONLY vm32_offset_t address, + __DEBUG_ONLY vm_info_region_t *regionp, + __DEBUG_ONLY vm_info_object_array_t *objectsp, + __DEBUG_ONLY mach_msg_type_number_t *objectsCntp) { #if !MACH_VM_DEBUG - return KERN_FAILURE; + return KERN_FAILURE; #else vm_map_copy_t copy; - vm_offset_t addr = 0; /* memory for OOL data */ - vm_size_t size; /* size of the memory */ - unsigned int room; /* room for this many objects */ - unsigned int used; /* actually this many objects */ + vm_offset_t addr = 0; /* memory for OOL data */ + vm_size_t size; /* size of the memory */ + unsigned int room; /* room for this many objects */ + unsigned int used; /* actually this many objects */ vm_info_region_t region; kern_return_t kr; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_TASK; + } - size = 0; /* no memory allocated yet */ + size = 0; /* no memory allocated yet */ for (;;) { - vm_map_t cmap; /* current map in traversal */ - vm_map_t nmap; /* next map to look at */ + vm_map_t cmap; /* current map in traversal */ + vm_map_t nmap; /* next map to look at */ vm_map_entry_t entry; vm_object_t object, cobject, nobject; @@ -146,23 +147,24 @@ vm32_region_info( for (cmap = map;; cmap = nmap) { /* cmap is read-locked */ - if (!vm_map_lookup_entry(cmap, - (vm_map_address_t)address, &entry)) { - + if (!vm_map_lookup_entry(cmap, + (vm_map_address_t)address, &entry)) { entry = entry->vme_next; if (entry == vm_map_to_entry(cmap)) { vm_map_unlock_read(cmap); - if (size != 0) + if (size != 0) { kmem_free(ipc_kernel_map, - addr, size); + addr, size); + } return KERN_NO_SPACE; } } - if (entry->is_sub_map) + if (entry->is_sub_map) { nmap = VME_SUBMAP(entry); - else + } else { break; + } /* move down to the lower map */ @@ -201,50 +203,50 @@ vm32_region_info( if (used < room) { vm_info_object_t *vio = - &((vm_info_object_t *) addr)[used]; + &((vm_info_object_t *) addr)[used]; vio->vio_object = - (natural_t)(uintptr_t) cobject; + (natural_t)(uintptr_t) cobject; vio->vio_size = - (natural_t) cobject->vo_size; + (natural_t) cobject->vo_size; vio->vio_ref_count = - cobject->ref_count; + cobject->ref_count; vio->vio_resident_page_count = - cobject->resident_page_count; + cobject->resident_page_count; vio->vio_copy = - (natural_t)(uintptr_t) cobject->copy; + (natural_t)(uintptr_t) cobject->copy; vio->vio_shadow = - (natural_t)(uintptr_t) cobject->shadow; + (natural_t)(uintptr_t) cobject->shadow; vio->vio_shadow_offset = - (natural_t) cobject->vo_shadow_offset; + (natural_t) cobject->vo_shadow_offset; vio->vio_paging_offset = - (natural_t) cobject->paging_offset; + (natural_t) cobject->paging_offset; vio->vio_copy_strategy = - cobject->copy_strategy; + cobject->copy_strategy; vio->vio_last_alloc = - (vm_offset_t) cobject->last_alloc; + (vm_offset_t) cobject->last_alloc; vio->vio_paging_in_progress = - cobject->paging_in_progress + - cobject->activity_in_progress; + cobject->paging_in_progress + + cobject->activity_in_progress; vio->vio_pager_created = - cobject->pager_created; + cobject->pager_created; vio->vio_pager_initialized = - cobject->pager_initialized; + cobject->pager_initialized; vio->vio_pager_ready = - cobject->pager_ready; + cobject->pager_ready; vio->vio_can_persist = - cobject->can_persist; + cobject->can_persist; vio->vio_internal = - cobject->internal; + cobject->internal; vio->vio_temporary = - FALSE; + FALSE; vio->vio_alive = - cobject->alive; + cobject->alive; vio->vio_purgable = - (cobject->purgable != VM_PURGABLE_DENY); + (cobject->purgable != VM_PURGABLE_DENY); vio->vio_purgable_volatile = - (cobject->purgable == VM_PURGABLE_VOLATILE || - cobject->purgable == VM_PURGABLE_EMPTY); + (cobject->purgable == VM_PURGABLE_VOLATILE || + cobject->purgable == VM_PURGABLE_EMPTY); } used++; @@ -260,27 +262,30 @@ vm32_region_info( /* nothing locked */ - if (used <= room) + if (used <= room) { break; + } /* must allocate more memory */ - if (size != 0) + if (size != 0) { kmem_free(ipc_kernel_map, addr, size); + } size = vm_map_round_page(2 * used * sizeof(vm_info_object_t), - VM_MAP_PAGE_MASK(ipc_kernel_map)); + VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return KERN_RESOURCE_SHORTAGE; + } kr = vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - VM_PROT_READ|VM_PROT_WRITE, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); @@ -291,29 +296,31 @@ vm32_region_info( if (used == 0) { copy = VM_MAP_COPY_NULL; - if (size != 0) + if (size != 0) { kmem_free(ipc_kernel_map, addr, size); + } } else { vm_size_t size_used = (used * sizeof(vm_info_object_t)); vm_size_t vmsize_used = vm_map_round_page(size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)); + VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, - (vm_map_size_t)size_used, TRUE, ©); + (vm_map_size_t)size_used, TRUE, ©); assert(kr == KERN_SUCCESS); - if (size != vmsize_used) + if (size != vmsize_used) { kmem_free(ipc_kernel_map, - addr + vmsize_used, size - vmsize_used); + addr + vmsize_used, size - vmsize_used); + } } *regionp = region; @@ -329,31 +336,32 @@ vm32_region_info( kern_return_t vm32_region_info_64( - __DEBUG_ONLY vm_map_t map, - __DEBUG_ONLY vm32_offset_t address, - __DEBUG_ONLY vm_info_region_64_t *regionp, - __DEBUG_ONLY vm_info_object_array_t *objectsp, - __DEBUG_ONLY mach_msg_type_number_t *objectsCntp) + __DEBUG_ONLY vm_map_t map, + __DEBUG_ONLY vm32_offset_t address, + __DEBUG_ONLY vm_info_region_64_t *regionp, + __DEBUG_ONLY vm_info_object_array_t *objectsp, + __DEBUG_ONLY mach_msg_type_number_t *objectsCntp) { #if !MACH_VM_DEBUG - return KERN_FAILURE; + return KERN_FAILURE; #else vm_map_copy_t copy; - vm_offset_t addr = 0; /* memory for OOL data */ - vm_size_t size; /* size of the memory */ - unsigned int room; /* room for this many objects */ - unsigned int used; /* actually this many objects */ + vm_offset_t addr = 0; /* memory for OOL data */ + vm_size_t size; /* size of the memory */ + unsigned int room; /* room for this many objects */ + unsigned int used; /* actually this many objects */ vm_info_region_64_t region; kern_return_t kr; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_TASK; + } - size = 0; /* no memory allocated yet */ + size = 0; /* no memory allocated yet */ for (;;) { - vm_map_t cmap; /* current map in traversal */ - vm_map_t nmap; /* next map to look at */ + vm_map_t cmap; /* current map in traversal */ + vm_map_t nmap; /* next map to look at */ vm_map_entry_t entry; vm_object_t object, cobject, nobject; @@ -367,17 +375,19 @@ vm32_region_info_64( entry = entry->vme_next; if (entry == vm_map_to_entry(cmap)) { vm_map_unlock_read(cmap); - if (size != 0) + if (size != 0) { kmem_free(ipc_kernel_map, - addr, size); + addr, size); + } return KERN_NO_SPACE; } } - if (entry->is_sub_map) + if (entry->is_sub_map) { nmap = VME_SUBMAP(entry); - else + } else { break; + } /* move down to the lower map */ @@ -416,50 +426,50 @@ vm32_region_info_64( if (used < room) { vm_info_object_t *vio = - &((vm_info_object_t *) addr)[used]; + &((vm_info_object_t *) addr)[used]; vio->vio_object = - (natural_t)(uintptr_t) cobject; + (natural_t)(uintptr_t) cobject; vio->vio_size = - (natural_t) cobject->vo_size; + (natural_t) cobject->vo_size; vio->vio_ref_count = - cobject->ref_count; + cobject->ref_count; vio->vio_resident_page_count = - cobject->resident_page_count; + cobject->resident_page_count; vio->vio_copy = - (natural_t)(uintptr_t) cobject->copy; + (natural_t)(uintptr_t) cobject->copy; vio->vio_shadow = - (natural_t)(uintptr_t) cobject->shadow; + (natural_t)(uintptr_t) cobject->shadow; vio->vio_shadow_offset = - (natural_t) cobject->vo_shadow_offset; + (natural_t) cobject->vo_shadow_offset; vio->vio_paging_offset = - (natural_t) cobject->paging_offset; + (natural_t) cobject->paging_offset; vio->vio_copy_strategy = - cobject->copy_strategy; + cobject->copy_strategy; vio->vio_last_alloc = - (vm_offset_t) cobject->last_alloc; + (vm_offset_t) cobject->last_alloc; vio->vio_paging_in_progress = - cobject->paging_in_progress + - cobject->activity_in_progress; + cobject->paging_in_progress + + cobject->activity_in_progress; vio->vio_pager_created = - cobject->pager_created; + cobject->pager_created; vio->vio_pager_initialized = - cobject->pager_initialized; + cobject->pager_initialized; vio->vio_pager_ready = - cobject->pager_ready; + cobject->pager_ready; vio->vio_can_persist = - cobject->can_persist; + cobject->can_persist; vio->vio_internal = - cobject->internal; + cobject->internal; vio->vio_temporary = - FALSE; + FALSE; vio->vio_alive = - cobject->alive; + cobject->alive; vio->vio_purgable = - (cobject->purgable != VM_PURGABLE_DENY); + (cobject->purgable != VM_PURGABLE_DENY); vio->vio_purgable_volatile = - (cobject->purgable == VM_PURGABLE_VOLATILE || - cobject->purgable == VM_PURGABLE_EMPTY); + (cobject->purgable == VM_PURGABLE_VOLATILE || + cobject->purgable == VM_PURGABLE_EMPTY); } used++; @@ -475,27 +485,30 @@ vm32_region_info_64( /* nothing locked */ - if (used <= room) + if (used <= room) { break; + } /* must allocate more memory */ - if (size != 0) + if (size != 0) { kmem_free(ipc_kernel_map, addr, size); + } size = vm_map_round_page(2 * used * sizeof(vm_info_object_t), - VM_MAP_PAGE_MASK(ipc_kernel_map)); + VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return KERN_RESOURCE_SHORTAGE; + } kr = vm_map_wire_kernel( ipc_kernel_map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - VM_PROT_READ|VM_PROT_WRITE, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE); assert(kr == KERN_SUCCESS); @@ -506,29 +519,31 @@ vm32_region_info_64( if (used == 0) { copy = VM_MAP_COPY_NULL; - if (size != 0) + if (size != 0) { kmem_free(ipc_kernel_map, addr, size); + } } else { vm_size_t size_used = (used * sizeof(vm_info_object_t)); vm_size_t vmsize_used = vm_map_round_page(size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)); + VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_map_unwire( ipc_kernel_map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), vm_map_round_page(addr + size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE); assert(kr == KERN_SUCCESS); kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, - (vm_map_size_t)size_used, TRUE, ©); + (vm_map_size_t)size_used, TRUE, ©); assert(kr == KERN_SUCCESS); - if (size != vmsize_used) + if (size != vmsize_used) { kmem_free(ipc_kernel_map, - addr + vmsize_used, size - vmsize_used); + addr + vmsize_used, size - vmsize_used); + } } *regionp = region; @@ -542,90 +557,91 @@ vm32_region_info_64( */ kern_return_t vm32_mapped_pages_info( - __DEBUG_ONLY vm_map_t map, - __DEBUG_ONLY page_address_array_t *pages, - __DEBUG_ONLY mach_msg_type_number_t *pages_count) + __DEBUG_ONLY vm_map_t map, + __DEBUG_ONLY page_address_array_t *pages, + __DEBUG_ONLY mach_msg_type_number_t *pages_count) { #if !MACH_VM_DEBUG - return KERN_FAILURE; + return KERN_FAILURE; #else - pmap_t pmap; - vm_size_t size, size_used; - unsigned int actual, space; + pmap_t pmap; + vm_size_t size, size_used; + unsigned int actual, space; page_address_array_t list; - vm_offset_t addr = 0; + vm_offset_t addr = 0; - if (map == VM_MAP_NULL) - return (KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } pmap = map->pmap; size = pmap_resident_count(pmap) * sizeof(vm_offset_t); size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(ipc_kernel_map)); + VM_MAP_PAGE_MASK(ipc_kernel_map)); for (;;) { - (void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); - (void) vm_map_unwire( - ipc_kernel_map, - vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - vm_map_round_page(addr + size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - FALSE); - - list = (page_address_array_t) addr; - space = (unsigned int) (size / sizeof(vm_offset_t)); - - actual = pmap_list_resident_pages(pmap, - list, - space); - if (actual <= space) - break; - - /* - * Free memory if not enough - */ - (void) kmem_free(ipc_kernel_map, addr, size); - - /* - * Try again, doubling the size - */ - size = vm_map_round_page(actual * sizeof(vm_offset_t), - VM_MAP_PAGE_MASK(ipc_kernel_map)); + (void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); + (void) vm_map_unwire( + ipc_kernel_map, + vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + FALSE); + + list = (page_address_array_t) addr; + space = (unsigned int) (size / sizeof(vm_offset_t)); + + actual = pmap_list_resident_pages(pmap, + list, + space); + if (actual <= space) { + break; + } + + /* + * Free memory if not enough + */ + (void) kmem_free(ipc_kernel_map, addr, size); + + /* + * Try again, doubling the size + */ + size = vm_map_round_page(actual * sizeof(vm_offset_t), + VM_MAP_PAGE_MASK(ipc_kernel_map)); } if (actual == 0) { - *pages = 0; - *pages_count = 0; - (void) kmem_free(ipc_kernel_map, addr, size); - } - else { - vm_size_t vmsize_used; - *pages_count = actual; - size_used = (actual * sizeof(vm_offset_t)); - vmsize_used = vm_map_round_page(size_used, - VM_MAP_PAGE_MASK(ipc_kernel_map)); - (void) vm_map_wire_kernel( - ipc_kernel_map, - vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - vm_map_round_page(addr + size, - VM_MAP_PAGE_MASK(ipc_kernel_map)), - VM_PROT_READ|VM_PROT_WRITE, - VM_KERN_MEMORY_IPC, - FALSE); - (void) vm_map_copyin(ipc_kernel_map, - (vm_map_address_t)addr, - (vm_map_size_t)size_used, - TRUE, - (vm_map_copy_t *)pages); - if (vmsize_used != size) { - (void) kmem_free(ipc_kernel_map, - addr + vmsize_used, - size - vmsize_used); - } + *pages = 0; + *pages_count = 0; + (void) kmem_free(ipc_kernel_map, addr, size); + } else { + vm_size_t vmsize_used; + *pages_count = actual; + size_used = (actual * sizeof(vm_offset_t)); + vmsize_used = vm_map_round_page(size_used, + VM_MAP_PAGE_MASK(ipc_kernel_map)); + (void) vm_map_wire_kernel( + ipc_kernel_map, + vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(ipc_kernel_map)), + VM_PROT_READ | VM_PROT_WRITE, + VM_KERN_MEMORY_IPC, + FALSE); + (void) vm_map_copyin(ipc_kernel_map, + (vm_map_address_t)addr, + (vm_map_size_t)size_used, + TRUE, + (vm_map_copy_t *)pages); + if (vmsize_used != size) { + (void) kmem_free(ipc_kernel_map, + addr + vmsize_used, + size - vmsize_used); + } } - return (KERN_SUCCESS); + return KERN_SUCCESS; #endif /* MACH_VM_DEBUG */ } @@ -645,12 +661,12 @@ vm32_mapped_pages_info( kern_return_t host_virtual_physical_table_info( - __DEBUG_ONLY host_t host, - __DEBUG_ONLY hash_info_bucket_array_t *infop, - __DEBUG_ONLY mach_msg_type_number_t *countp) + __DEBUG_ONLY host_t host, + __DEBUG_ONLY hash_info_bucket_array_t *infop, + __DEBUG_ONLY mach_msg_type_number_t *countp) { #if !MACH_VM_DEBUG - return KERN_FAILURE; + return KERN_FAILURE; #else vm_offset_t addr = 0; vm_size_t size = 0; @@ -658,8 +674,9 @@ host_virtual_physical_table_info( unsigned int potential, actual; kern_return_t kr; - if (host == HOST_NULL) + if (host == HOST_NULL) { return KERN_INVALID_HOST; + } /* start with in-line data */ @@ -668,23 +685,26 @@ host_virtual_physical_table_info( for (;;) { actual = vm_page_info(info, potential); - if (actual <= potential) + if (actual <= potential) { break; + } /* allocate more memory */ - if (info != *infop) + if (info != *infop) { kmem_free(ipc_kernel_map, addr, size); + } size = vm_map_round_page(actual * sizeof *info, - VM_MAP_PAGE_MASK(ipc_kernel_map)); + VM_MAP_PAGE_MASK(ipc_kernel_map)); kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, - VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); - if (kr != KERN_SUCCESS) + VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC); + if (kr != KERN_SUCCESS) { return KERN_RESOURCE_SHORTAGE; + } info = (hash_info_bucket_t *) addr; - potential = (unsigned int) (size/sizeof (*info)); + potential = (unsigned int) (size / sizeof(*info)); } if (info == *infop) { @@ -702,11 +722,12 @@ host_virtual_physical_table_info( used = (actual * sizeof(*info)); vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map)); - if (vmused != size) + if (vmused != size) { kmem_free(ipc_kernel_map, addr + vmused, size - vmused); + } kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, - (vm_map_size_t)used, TRUE, ©); + (vm_map_size_t)used, TRUE, ©); assert(kr == KERN_SUCCESS); *infop = (hash_info_bucket_t *) copy; diff --git a/osfmk/vm/vm_debug.h b/osfmk/vm/vm_debug.h index b2dbd6fec..11b666740 100644 --- a/osfmk/vm/vm_debug.h +++ b/osfmk/vm/vm_debug.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ #ifndef VM_VM_DEBUG_H -#define VM_VM_DEBUG_H +#define VM_VM_DEBUG_H #include #include @@ -38,8 +38,8 @@ #include #include -extern unsigned int vm_page_info( - hash_info_bucket_t *info, - unsigned int count); +extern unsigned int vm_page_info( + hash_info_bucket_t *info, + unsigned int count); -#endif /* VM_VM_DEBUG_H */ +#endif /* VM_VM_DEBUG_H */ diff --git a/osfmk/vm/vm_external.h b/osfmk/vm/vm_external.h index eb7d71692..477822a7e 100644 --- a/osfmk/vm/vm_external.h +++ b/osfmk/vm/vm_external.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,41 +22,41 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ /* */ -#ifndef VM_VM_EXTERNAL_H_ +#ifndef VM_VM_EXTERNAL_H_ #define VM_VM_EXTERNAL_H_ #include @@ -67,9 +67,9 @@ * The states that may be recorded for a page of external storage. */ -typedef int vm_external_state_t; -#define VM_EXTERNAL_STATE_EXISTS 1 -#define VM_EXTERNAL_STATE_UNKNOWN 2 -#define VM_EXTERNAL_STATE_ABSENT 3 +typedef int vm_external_state_t; +#define VM_EXTERNAL_STATE_EXISTS 1 +#define VM_EXTERNAL_STATE_UNKNOWN 2 +#define VM_EXTERNAL_STATE_ABSENT 3 -#endif /* VM_VM_EXTERNAL_H_ */ +#endif /* VM_VM_EXTERNAL_H_ */ diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index abbe202ff..331777917 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -68,11 +68,11 @@ #include #include -#include /* for error codes */ +#include /* for error codes */ #include #include #include - /* For memory_object_data_{request,unlock} */ +/* For memory_object_data_{request,unlock} */ #include #include @@ -101,7 +101,7 @@ #include #include #include -#include /* Needed by some vm_page.h macros */ +#include /* Needed by some vm_page.h macros */ #include #include @@ -110,11 +110,11 @@ #include -#define VM_FAULT_CLASSIFY 0 +#define VM_FAULT_CLASSIFY 0 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */ -unsigned int vm_object_pagein_throttle = 16; +unsigned int vm_object_pagein_throttle = 16; /* * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which @@ -138,54 +138,54 @@ uint64_t vm_hard_throttle_threshold; -#define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \ - ((vm_page_free_count < vm_page_throttle_limit || \ - HARD_THROTTLE_LIMIT_REACHED()) && \ - proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED)) +#define NEED_TO_HARD_THROTTLE_THIS_TASK() (vm_wants_task_throttled(current_task()) || \ + ((vm_page_free_count < vm_page_throttle_limit || \ + HARD_THROTTLE_LIMIT_REACHED()) && \ + proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED)) -#define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */ -#define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */ +#define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */ +#define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */ -#define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6 -#define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000 +#define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6 +#define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000 boolean_t current_thread_aborted(void); /* Forward declarations of internal routines. */ static kern_return_t vm_fault_wire_fast( - vm_map_t map, - vm_map_offset_t va, - vm_prot_t prot, - vm_tag_t wire_tag, - vm_map_entry_t entry, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p); + vm_map_t map, + vm_map_offset_t va, + vm_prot_t prot, + vm_tag_t wire_tag, + vm_map_entry_t entry, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); static kern_return_t vm_fault_internal( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t caller_prot, - boolean_t change_wiring, - vm_tag_t wire_tag, - int interruptible, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p); + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t caller_prot, + boolean_t change_wiring, + vm_tag_t wire_tag, + int interruptible, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); static void vm_fault_copy_cleanup( - vm_page_t page, - vm_page_t top_page); + vm_page_t page, + vm_page_t top_page); static void vm_fault_copy_dst_cleanup( - vm_page_t page); + vm_page_t page); -#if VM_FAULT_CLASSIFY -extern void vm_fault_classify(vm_object_t object, - vm_object_offset_t offset, - vm_prot_t fault_type); +#if VM_FAULT_CLASSIFY +extern void vm_fault_classify(vm_object_t object, + vm_object_offset_t offset, + vm_prot_t fault_type); extern void vm_fault_classify_init(void); #endif @@ -206,8 +206,8 @@ uint64_t vm_cs_defer_to_pmap_cs_not = 0; void vm_pre_fault(vm_map_offset_t); extern char *kdp_compressor_decompressed_page; -extern addr64_t kdp_compressor_decompressed_page_paddr; -extern ppnum_t kdp_compressor_decompressed_page_ppnum; +extern addr64_t kdp_compressor_decompressed_page_paddr; +extern ppnum_t kdp_compressor_decompressed_page_ppnum; struct vmrtfr { int vmrtfr_maxi; @@ -246,23 +246,24 @@ vm_fault_init(void) * The formula here simply uses the number of gigabytes of ram to adjust the percentage. */ - vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024*1024*1024)), 25)) / 100; + vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100; /* * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry. */ - if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof (vm_compressor_temp))) { - for ( i = 0; i < VM_PAGER_MAX_MODES; i++) { + if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) { + for (i = 0; i < VM_PAGER_MAX_MODES; i++) { if (vm_compressor_temp > 0 && - ((vm_compressor_temp & ( 1 << i)) == vm_compressor_temp)) { + ((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) { need_default_val = FALSE; vm_compressor_mode = vm_compressor_temp; break; } } - if (need_default_val) + if (need_default_val) { printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp); + } } if (need_default_val) { /* If no boot arg or incorrect boot arg, try device tree. */ @@ -271,7 +272,9 @@ vm_fault_init(void) printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode); } -void vm_rtfault_record_init(void) { +void +vm_rtfault_record_init(void) +{ PE_parse_boot_argn("vm_rtfault_records", &vmrtf_num_records, sizeof(vmrtf_num_records)); assert(vmrtf_num_records >= 1); @@ -298,14 +301,14 @@ void vm_rtfault_record_init(void) { */ void vm_fault_cleanup( - vm_object_t object, - vm_page_t top_page) + vm_object_t object, + vm_page_t top_page) { vm_object_paging_end(object); - vm_object_unlock(object); + vm_object_unlock(object); if (top_page != VM_PAGE_NULL) { - object = VM_PAGE_OBJECT(top_page); + object = VM_PAGE_OBJECT(top_page); vm_object_lock(object); VM_PAGE_FREE(top_page); @@ -317,17 +320,17 @@ vm_fault_cleanup( #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0) -boolean_t vm_page_deactivate_behind = TRUE; +boolean_t vm_page_deactivate_behind = TRUE; /* * default sizes given VM_BEHAVIOR_DEFAULT reference behavior */ -#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128 -#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */ +#define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128 +#define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */ /* we use it to size an array on the stack */ int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW; -#define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024) +#define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024) /* * vm_page_is_sequential @@ -341,90 +344,95 @@ int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW; static void vm_fault_is_sequential( - vm_object_t object, - vm_object_offset_t offset, - vm_behavior_t behavior) + vm_object_t object, + vm_object_offset_t offset, + vm_behavior_t behavior) { - vm_object_offset_t last_alloc; - int sequential; - int orig_sequential; + vm_object_offset_t last_alloc; + int sequential; + int orig_sequential; - last_alloc = object->last_alloc; + last_alloc = object->last_alloc; sequential = object->sequential; orig_sequential = sequential; switch (behavior) { case VM_BEHAVIOR_RANDOM: - /* + /* * reset indicator of sequential behavior */ - sequential = 0; - break; + sequential = 0; + break; case VM_BEHAVIOR_SEQUENTIAL: - if (offset && last_alloc == offset - PAGE_SIZE_64) { - /* + if (offset && last_alloc == offset - PAGE_SIZE_64) { + /* * advance indicator of sequential behavior */ - if (sequential < MAX_SEQUENTIAL_RUN) - sequential += PAGE_SIZE; + if (sequential < MAX_SEQUENTIAL_RUN) { + sequential += PAGE_SIZE; + } } else { - /* + /* * reset indicator of sequential behavior */ - sequential = 0; + sequential = 0; } - break; + break; case VM_BEHAVIOR_RSEQNTL: - if (last_alloc && last_alloc == offset + PAGE_SIZE_64) { - /* + if (last_alloc && last_alloc == offset + PAGE_SIZE_64) { + /* * advance indicator of sequential behavior */ - if (sequential > -MAX_SEQUENTIAL_RUN) - sequential -= PAGE_SIZE; + if (sequential > -MAX_SEQUENTIAL_RUN) { + sequential -= PAGE_SIZE; + } } else { - /* + /* * reset indicator of sequential behavior */ - sequential = 0; + sequential = 0; } - break; + break; case VM_BEHAVIOR_DEFAULT: default: - if (offset && last_alloc == (offset - PAGE_SIZE_64)) { - /* + if (offset && last_alloc == (offset - PAGE_SIZE_64)) { + /* * advance indicator of sequential behavior */ - if (sequential < 0) - sequential = 0; - if (sequential < MAX_SEQUENTIAL_RUN) - sequential += PAGE_SIZE; - + if (sequential < 0) { + sequential = 0; + } + if (sequential < MAX_SEQUENTIAL_RUN) { + sequential += PAGE_SIZE; + } } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) { - /* + /* * advance indicator of sequential behavior */ - if (sequential > 0) - sequential = 0; - if (sequential > -MAX_SEQUENTIAL_RUN) - sequential -= PAGE_SIZE; + if (sequential > 0) { + sequential = 0; + } + if (sequential > -MAX_SEQUENTIAL_RUN) { + sequential -= PAGE_SIZE; + } } else { - /* + /* * reset indicator of sequential behavior */ - sequential = 0; + sequential = 0; } - break; + break; } if (sequential != orig_sequential) { - if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) { - /* + if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) { + /* * if someone else has already updated object->sequential * don't bother trying to update it or object->last_alloc */ - return; + return; } } /* @@ -459,23 +467,23 @@ int vm_page_deactivate_behind_count = 0; static boolean_t vm_fault_deactivate_behind( - vm_object_t object, - vm_object_offset_t offset, - vm_behavior_t behavior) + vm_object_t object, + vm_object_offset_t offset, + vm_behavior_t behavior) { - int n; - int pages_in_run = 0; - int max_pages_in_run = 0; - int sequential_run; - int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - vm_object_offset_t run_offset = 0; - vm_object_offset_t pg_offset = 0; - vm_page_t m; - vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER]; + int n; + int pages_in_run = 0; + int max_pages_in_run = 0; + int sequential_run; + int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + vm_object_offset_t run_offset = 0; + vm_object_offset_t pg_offset = 0; + vm_page_t m; + vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER]; pages_in_run = 0; #if TRACEFAULTPAGE - dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */ #endif if (object == kernel_object || vm_page_deactivate_behind == FALSE) { @@ -487,33 +495,33 @@ vm_fault_deactivate_behind( return FALSE; } if ((sequential_run = object->sequential)) { - if (sequential_run < 0) { - sequential_behavior = VM_BEHAVIOR_RSEQNTL; - sequential_run = 0 - sequential_run; - } else { - sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - } + if (sequential_run < 0) { + sequential_behavior = VM_BEHAVIOR_RSEQNTL; + sequential_run = 0 - sequential_run; + } else { + sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + } } switch (behavior) { case VM_BEHAVIOR_RANDOM: break; case VM_BEHAVIOR_SEQUENTIAL: - if (sequential_run >= (int)PAGE_SIZE) { + if (sequential_run >= (int)PAGE_SIZE) { run_offset = 0 - PAGE_SIZE_64; max_pages_in_run = 1; } break; case VM_BEHAVIOR_RSEQNTL: - if (sequential_run >= (int)PAGE_SIZE) { + if (sequential_run >= (int)PAGE_SIZE) { run_offset = PAGE_SIZE_64; max_pages_in_run = 1; } break; case VM_BEHAVIOR_DEFAULT: default: - { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; + { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64; - /* + /* * determine if the run of sequential accesss has been * long enough on an object with default access behavior * to consider it for deactivation @@ -524,24 +532,23 @@ vm_fault_deactivate_behind( * in this kind of odd fashion in order to prevent wrap around * at the end points */ - if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { - if (offset >= behind) { + if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { + if (offset >= behind) { run_offset = 0 - behind; pg_offset = PAGE_SIZE_64; max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; } } else { - if (offset < -behind) { + if (offset < -behind) { run_offset = behind; pg_offset = 0 - PAGE_SIZE_64; max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER; } } } - break; - } + break;} } - for (n = 0; n < max_pages_in_run; n++) { + for (n = 0; n < max_pages_in_run; n++) { m = vm_page_lookup(object, offset + run_offset + (n * pg_offset)); if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) { @@ -565,14 +572,13 @@ vm_fault_deactivate_behind( vm_page_lockspin_queues(); for (n = 0; n < pages_in_run; n++) { - m = page_run[n]; vm_page_deactivate_internal(m, FALSE); vm_page_deactivate_behind_count++; #if TRACEFAULTPAGE - dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif } vm_page_unlock_queues(); @@ -584,40 +590,41 @@ vm_fault_deactivate_behind( #if (DEVELOPMENT || DEBUG) -uint32_t vm_page_creation_throttled_hard = 0; -uint32_t vm_page_creation_throttled_soft = 0; -uint64_t vm_page_creation_throttle_avoided = 0; +uint32_t vm_page_creation_throttled_hard = 0; +uint32_t vm_page_creation_throttled_soft = 0; +uint64_t vm_page_creation_throttle_avoided = 0; #endif /* DEVELOPMENT || DEBUG */ static int vm_page_throttled(boolean_t page_kept) { - clock_sec_t elapsed_sec; - clock_sec_t tv_sec; - clock_usec_t tv_usec; + clock_sec_t elapsed_sec; + clock_sec_t tv_sec; + clock_usec_t tv_usec; thread_t thread = current_thread(); - if (thread->options & TH_OPT_VMPRIV) - return (0); + if (thread->options & TH_OPT_VMPRIV) { + return 0; + } if (thread->t_page_creation_throttled) { thread->t_page_creation_throttled = 0; - if (page_kept == FALSE) + if (page_kept == FALSE) { goto no_throttle; + } } if (NEED_TO_HARD_THROTTLE_THIS_TASK()) { #if (DEVELOPMENT || DEBUG) thread->t_page_creation_throttled_hard++; OSAddAtomic(1, &vm_page_creation_throttled_hard); #endif /* DEVELOPMENT || DEBUG */ - return (HARD_THROTTLE_DELAY); + return HARD_THROTTLE_DELAY; } if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) && thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) { - if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) { #if (DEVELOPMENT || DEBUG) OSAddAtomic64(1, &vm_page_creation_throttle_avoided); @@ -630,7 +637,6 @@ vm_page_throttled(boolean_t page_kept) if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS || (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) { - if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) { /* * we'll reset our stats to give a well behaved app @@ -653,13 +659,13 @@ vm_page_throttled(boolean_t page_kept) thread->t_page_creation_throttled_hard++; OSAddAtomic(1, &vm_page_creation_throttled_hard); #endif /* DEVELOPMENT || DEBUG */ - return (HARD_THROTTLE_DELAY); + return HARD_THROTTLE_DELAY; } else { #if (DEVELOPMENT || DEBUG) thread->t_page_creation_throttled_soft++; OSAddAtomic(1, &vm_page_creation_throttled_soft); #endif /* DEVELOPMENT || DEBUG */ - return (SOFT_THROTTLE_DELAY); + return SOFT_THROTTLE_DELAY; } } thread->t_page_creation_time = tv_sec; @@ -668,7 +674,7 @@ vm_page_throttled(boolean_t page_kept) no_throttle: thread->t_page_creation_count++; - return (0); + return 0; } @@ -685,22 +691,23 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrup { int throttle_delay; - if (object->shadow_severed || + if (object->shadow_severed || VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) { - /* + /* * Either: * 1. the shadow chain was severed, * 2. the purgeable object is volatile or empty and is marked * to fault on access while volatile. * Just have to return an error at this point */ - if (m != VM_PAGE_NULL) - VM_PAGE_FREE(m); + if (m != VM_PAGE_NULL) { + VM_PAGE_FREE(m); + } vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_ERROR); + return VM_FAULT_MEMORY_ERROR; } if (page_throttle == TRUE) { if ((throttle_delay = vm_page_throttled(FALSE))) { @@ -708,8 +715,9 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrup * we're throttling zero-fills... * treat this as if we couldn't grab a page */ - if (m != VM_PAGE_NULL) + if (m != VM_PAGE_NULL) { VM_PAGE_FREE(m); + } vm_fault_cleanup(object, first_m); VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); @@ -722,10 +730,10 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrup } thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } } - return (VM_FAULT_SUCCESS); + return VM_FAULT_SUCCESS; } @@ -739,8 +747,8 @@ vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrup static int vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) { - int my_fault = DBG_ZERO_FILL_FAULT; - vm_object_t object; + int my_fault = DBG_ZERO_FILL_FAULT; + vm_object_t object; object = VM_PAGE_OBJECT(m); @@ -768,8 +776,9 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) if (no_zero_fill == TRUE) { my_fault = DBG_NZF_PAGE_FAULT; - if (m->vmp_absent && m->vmp_busy) - return (my_fault); + if (m->vmp_absent && m->vmp_busy) { + return my_fault; + } } else { vm_page_zero_fill(m); @@ -781,10 +790,9 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0); if (!VM_DYNAMIC_PAGING_ENABLED() && - (object->purgable == VM_PURGABLE_DENY || - object->purgable == VM_PURGABLE_NONVOLATILE || - object->purgable == VM_PURGABLE_VOLATILE )) { - + (object->purgable == VM_PURGABLE_DENY || + object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_VOLATILE)) { vm_page_lockspin_queues(); if (!VM_DYNAMIC_PAGING_ENABLED()) { @@ -796,13 +804,13 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) */ vm_page_queues_remove(m, TRUE); vm_page_check_pageable_safe(m); - vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; vm_page_throttled_count++; } vm_page_unlock_queues(); } - return (my_fault); + return my_fault; } @@ -850,7 +858,7 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) * Special Case: * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the * fault succeeded but there's no VM page (i.e. the VM object - * does not actually hold VM pages, but device memory or + * does not actually hold VM pages, but device memory or * large pages). The object is still locked and we still hold a * paging_in_progress reference. */ @@ -860,50 +868,50 @@ unsigned int vm_fault_page_forced_retry = 0; vm_fault_return_t vm_fault_page( /* Arguments: */ - vm_object_t first_object, /* Object to begin search */ - vm_object_offset_t first_offset, /* Offset into object */ - vm_prot_t fault_type, /* What access is requested */ - boolean_t must_be_resident,/* Must page be resident? */ - boolean_t caller_lookup, /* caller looked up page */ + vm_object_t first_object, /* Object to begin search */ + vm_object_offset_t first_offset, /* Offset into object */ + vm_prot_t fault_type, /* What access is requested */ + boolean_t must_be_resident,/* Must page be resident? */ + boolean_t caller_lookup, /* caller looked up page */ /* Modifies in place: */ - vm_prot_t *protection, /* Protection for mapping */ - vm_page_t *result_page, /* Page found, if successful */ + vm_prot_t *protection, /* Protection for mapping */ + vm_page_t *result_page, /* Page found, if successful */ /* Returns: */ - vm_page_t *top_page, /* Page in top object, if - * not result_page. */ + vm_page_t *top_page, /* Page in top object, if + * not result_page. */ int *type_of_fault, /* if non-null, fill in with type of fault - * COW, zero-fill, etc... returned in trace point */ + * COW, zero-fill, etc... returned in trace point */ /* More arguments: */ - kern_return_t *error_code, /* code if page is in error */ - boolean_t no_zero_fill, /* don't zero fill absent pages */ - boolean_t data_supply, /* treat as data_supply if - * it is a write fault and a full - * page is provided */ + kern_return_t *error_code, /* code if page is in error */ + boolean_t no_zero_fill, /* don't zero fill absent pages */ + boolean_t data_supply, /* treat as data_supply if + * it is a write fault and a full + * page is provided */ vm_object_fault_info_t fault_info) { - vm_page_t m; - vm_object_t object; - vm_object_offset_t offset; - vm_page_t first_m; - vm_object_t next_object; - vm_object_t copy_object; - boolean_t look_for_page; - boolean_t force_fault_retry = FALSE; - vm_prot_t access_required = fault_type; - vm_prot_t wants_copy_flag; - kern_return_t wait_result; - wait_interrupt_t interruptible_state; - boolean_t data_already_requested = FALSE; - vm_behavior_t orig_behavior; - vm_size_t orig_cluster_size; - vm_fault_return_t error; - int my_fault; - uint32_t try_failed_count; - int interruptible; /* how may fault be interrupted? */ - int external_state = VM_EXTERNAL_STATE_UNKNOWN; - memory_object_t pager; - vm_fault_return_t retval; - int grab_options; + vm_page_t m; + vm_object_t object; + vm_object_offset_t offset; + vm_page_t first_m; + vm_object_t next_object; + vm_object_t copy_object; + boolean_t look_for_page; + boolean_t force_fault_retry = FALSE; + vm_prot_t access_required = fault_type; + vm_prot_t wants_copy_flag; + kern_return_t wait_result; + wait_interrupt_t interruptible_state; + boolean_t data_already_requested = FALSE; + vm_behavior_t orig_behavior; + vm_size_t orig_cluster_size; + vm_fault_return_t error; + int my_fault; + uint32_t try_failed_count; + int interruptible; /* how may fault be interrupted? */ + int external_state = VM_EXTERNAL_STATE_UNKNOWN; + memory_object_t pager; + vm_fault_return_t retval; + int grab_options; /* * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is @@ -917,7 +925,7 @@ vm_fault_page( * PAGED_OUT() is used to determine if a page has already been pushed * into a copy object in order to avoid a redundant page out operation. */ -#define MUST_ASK_PAGER(o, f, s) \ +#define MUST_ASK_PAGER(o, f, s) \ ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT) #define PAGED_OUT(o, f) \ @@ -926,23 +934,23 @@ vm_fault_page( /* * Recovery actions */ -#define RELEASE_PAGE(m) \ - MACRO_BEGIN \ - PAGE_WAKEUP_DONE(m); \ - if ( !VM_PAGE_PAGEABLE(m)) { \ - vm_page_lockspin_queues(); \ - if ( !VM_PAGE_PAGEABLE(m)) { \ - if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \ - vm_page_deactivate(m); \ - else \ - vm_page_activate(m); \ - } \ - vm_page_unlock_queues(); \ - } \ +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + vm_page_lockspin_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \ + vm_page_deactivate(m); \ + else \ + vm_page_activate(m); \ + } \ + vm_page_unlock_queues(); \ + } \ MACRO_END #if TRACEFAULTPAGE - dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */ #endif interruptible = fault_info->interruptible; @@ -981,8 +989,8 @@ vm_fault_page( XPR(XPR_VM_FAULT, - "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n", - object, offset, fault_type, *protection, 0); + "vm_f_page: obj 0x%X, offset 0x%X, type %d, prot %d\n", + object, offset, fault_type, *protection, 0); /* * default type of fault @@ -991,7 +999,7 @@ vm_fault_page( while (TRUE) { #if TRACEFAULTPAGE - dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ #endif grab_options = 0; @@ -1002,14 +1010,14 @@ vm_fault_page( #endif /* CONFIG_SECLUDED_MEMORY */ if (!object->alive) { - /* + /* * object is no longer valid * clean up and return error */ vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_ERROR); + return VM_FAULT_MEMORY_ERROR; } if (!object->pager_created && object->phys_contiguous) { @@ -1035,8 +1043,8 @@ vm_fault_page( vm_object_paging_end(object); while (object->blocked_access) { vm_object_sleep(object, - VM_OBJECT_EVENT_UNBLOCKED, - THREAD_UNINT); + VM_OBJECT_EVENT_UNBLOCKED, + THREAD_UNINT); } vm_fault_page_blocked_access++; vm_object_paging_begin(object); @@ -1060,17 +1068,16 @@ vm_fault_page( m = vm_page_lookup(object, offset); } #if TRACEFAULTPAGE - dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m != VM_PAGE_NULL) { - if (m->vmp_busy) { - /* + /* * The page is being brought in, * wait for it and then retry. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif wait_result = PAGE_SLEEP(object, m, interruptible); @@ -1084,18 +1091,20 @@ vm_fault_page( vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - if (wait_result == THREAD_RESTART) - return (VM_FAULT_RETRY); - else - return (VM_FAULT_INTERRUPTED); + if (wait_result == THREAD_RESTART) { + return VM_FAULT_RETRY; + } else { + return VM_FAULT_INTERRUPTED; + } } continue; } if (m->vmp_laundry) { m->vmp_free_when_done = FALSE; - if (!m->vmp_cleaning) + if (!m->vmp_cleaning) { vm_pageout_steal_laundry(m, FALSE); + } } if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { /* @@ -1112,8 +1121,9 @@ vm_fault_page( *result_page = m; assert(first_m == VM_PAGE_NULL); *top_page = first_m; - if (type_of_fault) + if (type_of_fault) { *type_of_fault = DBG_GUARD_FAULT; + } thread_interrupt_level(interruptible_state); return VM_FAULT_SUCCESS; } else { @@ -1128,40 +1138,41 @@ vm_fault_page( } if (m->vmp_error) { - /* + /* * The page is in error, give up now. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */ #endif - if (error_code) - *error_code = KERN_MEMORY_ERROR; + if (error_code) { + *error_code = KERN_MEMORY_ERROR; + } VM_PAGE_FREE(m); vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_ERROR); + return VM_FAULT_MEMORY_ERROR; } if (m->vmp_restart) { - /* + /* * The pager wants us to restart * at the top of the chain, * typically because it has moved the * page to another pager, then do so. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif VM_PAGE_FREE(m); vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } if (m->vmp_absent) { - /* + /* * The page isn't busy, but is absent, * therefore it's deemed "unavailable". * @@ -1170,7 +1181,7 @@ vm_fault_page( * next object (if there is one). */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */ #endif next_object = object->shadow; @@ -1192,17 +1203,18 @@ vm_fault_page( */ error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); - if (error != VM_FAULT_SUCCESS) - return (error); + if (error != VM_FAULT_SUCCESS) { + return error; + } XPR(XPR_VM_FAULT, "vm_f_page: zero obj 0x%X, off 0x%X, page 0x%X, first_obj 0x%X\n", - object, offset, - m, - first_object, 0); + object, offset, + m, + first_object, 0); if (object != first_object) { - /* + /* * free the absent page we just found */ VM_PAGE_FREE(m); @@ -1226,15 +1238,16 @@ vm_fault_page( vm_object_lock(object); } else { - /* + /* * we're going to use the absent page we just found * so convert it to a 'busy' page */ - m->vmp_absent = FALSE; + m->vmp_absent = FALSE; m->vmp_busy = TRUE; } - if (fault_info->mark_zf_absent && no_zero_fill == TRUE) + if (fault_info->mark_zf_absent && no_zero_fill == TRUE) { m->vmp_absent = TRUE; + } /* * zero-fill the page and put it on * the correct paging queue @@ -1243,9 +1256,9 @@ vm_fault_page( break; } else { - if (must_be_resident) + if (must_be_resident) { vm_object_paging_end(object); - else if (object != first_object) { + } else if (object != first_object) { vm_object_paging_end(object); VM_PAGE_FREE(m); } else { @@ -1259,9 +1272,9 @@ vm_fault_page( } XPR(XPR_VM_FAULT, "vm_f_page: unavail obj 0x%X, off 0x%X, next_obj 0x%X, newoff 0x%X\n", - object, offset, - next_object, - offset+object->vo_shadow_offset,0); + object, offset, + next_object, + offset + object->vo_shadow_offset, 0); offset += object->vo_shadow_offset; fault_info->lo_offset += object->vo_shadow_offset; @@ -1295,12 +1308,12 @@ vm_fault_page( * wired mapping. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */ #endif XPR(XPR_VM_FAULT, "vm_f_page: cleaning obj 0x%X, offset 0x%X, page 0x%X\n", - object, offset, - m, 0, 0); + object, offset, + m, 0, 0); /* * take an extra ref so that object won't die */ @@ -1328,12 +1341,12 @@ vm_fault_page( vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) && !(fault_info != NULL && fault_info->stealth)) { - /* + /* * If we were passed a non-NULL pointer for * "type_of_fault", than we came from * vm_fault... we'll let it deal with @@ -1348,10 +1361,11 @@ vm_fault_page( * it wants a "stealth" fault, we also leave * the page in the speculative queue. */ - vm_page_lockspin_queues(); - if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) + vm_page_lockspin_queues(); + if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) { vm_page_queues_remove(m, FALSE); - vm_page_unlock_queues(); + } + vm_page_unlock_queues(); } assert(object == VM_PAGE_OBJECT(m)); @@ -1375,11 +1389,11 @@ vm_fault_page( * remove the page from the queue, but not the object */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif XPR(XPR_VM_FAULT, "vm_f_page: found page obj 0x%X, offset 0x%X, page 0x%X\n", - object, offset, m, 0, 0); + object, offset, m, 0, 0); assert(!m->vmp_busy); assert(!m->vmp_absent); @@ -1420,10 +1434,10 @@ vm_fault_page( data_supply = FALSE; - look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply); + look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply); #if TRACEFAULTPAGE - dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (!look_for_page && object == first_object && !object->phys_contiguous) { /* @@ -1431,14 +1445,13 @@ vm_fault_page( */ m = vm_page_grab_options(grab_options); #if TRACEFAULTPAGE - dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m == VM_PAGE_NULL) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } if (fault_info && fault_info->batch_pmap_op == TRUE) { @@ -1448,8 +1461,8 @@ vm_fault_page( } } if (look_for_page) { - kern_return_t rc; - int my_fault_type; + kern_return_t rc; + int my_fault_type; /* * If the memory manager is not ready, we @@ -1457,14 +1470,15 @@ vm_fault_page( */ if (!object->pager_ready) { #if TRACEFAULTPAGE - dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - if (m != VM_PAGE_NULL) - VM_PAGE_FREE(m); + if (m != VM_PAGE_NULL) { + VM_PAGE_FREE(m); + } XPR(XPR_VM_FAULT, - "vm_f_page: ready wait obj 0x%X, offset 0x%X\n", - object, offset, 0, 0, 0); + "vm_f_page: ready wait obj 0x%X, offset 0x%X\n", + object, offset, 0, 0, 0); /* * take an extra ref so object won't die @@ -1480,8 +1494,9 @@ vm_fault_page( wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible); vm_object_unlock(object); - if (wait_result == THREAD_WAITING) + if (wait_result == THREAD_WAITING) { wait_result = thread_block(THREAD_CONTINUE_NULL); + } vm_object_deallocate(object); goto backoff; @@ -1490,7 +1505,7 @@ vm_fault_page( vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) { @@ -1500,10 +1515,11 @@ vm_fault_page( * wait for them to be resolved now. */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */ #endif - if (m != VM_PAGE_NULL) + if (m != VM_PAGE_NULL) { VM_PAGE_FREE(m); + } /* * take an extra ref so object won't die */ @@ -1517,7 +1533,7 @@ vm_fault_page( assert(object->ref_count > 0); if (object->paging_in_progress >= vm_object_pagein_throttle) { - vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible); + vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible); vm_object_unlock(object); wait_result = thread_block(THREAD_CONTINUE_NULL); @@ -1529,7 +1545,7 @@ vm_fault_page( vm_object_deallocate(object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } if (object->internal) { @@ -1543,14 +1559,13 @@ vm_fault_page( */ m = vm_page_grab_options(grab_options); #if TRACEFAULTPAGE - dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */ #endif if (m == VM_PAGE_NULL) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } m->vmp_absent = TRUE; @@ -1577,7 +1592,7 @@ vm_fault_page( &compressed_count_delta); if (type_of_fault == NULL) { - int throttle_delay; + int throttle_delay; /* * we weren't called from vm_fault, so we @@ -1605,7 +1620,7 @@ vm_fault_page( m->vmp_absent = FALSE; m->vmp_dirty = TRUE; if ((object->wimg_bits & - VM_WIMG_MASK) != + VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { /* * If the page is not cacheable, @@ -1628,10 +1643,10 @@ vm_fault_page( * update that now. */ if (((object->purgable != - VM_PURGABLE_DENY) || - object->vo_ledger_tag) && + VM_PURGABLE_DENY) || + object->vo_ledger_tag) && (object->vo_owner != - NULL)) { + NULL)) { /* * One less compressed * purgeable/tagged page. @@ -1652,9 +1667,9 @@ vm_fault_page( break; default: panic("vm_fault_page(): unexpected " - "error %d from " - "vm_compressor_pager_get()\n", - rc); + "error %d from " + "vm_compressor_pager_get()\n", + rc); } PAGE_WAKEUP_DONE(m); @@ -1669,7 +1684,7 @@ vm_fault_page( } #if TRACEFAULTPAGE - dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */ #endif /* @@ -1708,15 +1723,16 @@ vm_fault_page( * and its pushing pages up into a copy of * the object that it manages. */ - if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) + if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) { wants_copy_flag = VM_PROT_WANTS_COPY; - else + } else { wants_copy_flag = VM_PROT_NONE; + } XPR(XPR_VM_FAULT, "vm_f_page: data_req obj 0x%X, offset 0x%X, page 0x%X, acc %d\n", - object, offset, m, - access_required | wants_copy_flag, 0); + object, offset, m, + access_required | wants_copy_flag, 0); if (object->copy == first_object) { /* @@ -1770,12 +1786,13 @@ vm_fault_page( if (data_already_requested == TRUE) { fault_info->behavior = orig_behavior; fault_info->cluster_size = orig_cluster_size; - } else + } else { data_already_requested = TRUE; + } DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL); #if TRACEFAULTPAGE - dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */ #endif vm_object_lock(object); @@ -1783,15 +1800,14 @@ vm_fault_page( clear_thread_rwlock_boost(); } - data_requested: +data_requested: if (rc != KERN_SUCCESS) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return ((rc == MACH_SEND_INTERRUPTED) ? - VM_FAULT_INTERRUPTED : - VM_FAULT_MEMORY_ERROR); + return (rc == MACH_SEND_INTERRUPTED) ? + VM_FAULT_INTERRUPTED : + VM_FAULT_MEMORY_ERROR; } else { clock_sec_t tv_sec; clock_usec_t tv_usec; @@ -1803,18 +1819,16 @@ vm_fault_page( } } if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_INTERRUPTED); + return VM_FAULT_INTERRUPTED; } if (force_fault_retry == TRUE) { - vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } if (m == VM_PAGE_NULL && object->phys_contiguous) { /* @@ -1830,7 +1844,7 @@ vm_fault_page( * page fault against the object's new backing * store (different memory object). */ - phys_contig_object: +phys_contig_object: goto done; } /* @@ -1856,17 +1870,18 @@ dont_look_for_page: * the ZF case when the pager can't provide the page */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif - if (object == first_object) + if (object == first_object) { first_m = m; - else + } else { assert(m == VM_PAGE_NULL); + } XPR(XPR_VM_FAULT, "vm_f_page: no pager obj 0x%X, offset 0x%X, page 0x%X, next_obj 0x%X\n", - object, offset, m, - object->shadow, 0); + object, offset, m, + object->shadow, 0); next_object = object->shadow; @@ -1898,8 +1913,9 @@ dont_look_for_page: */ error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE); - if (error != VM_FAULT_SUCCESS) - return (error); + if (error != VM_FAULT_SUCCESS) { + return error; + } if (m == VM_PAGE_NULL) { m = vm_page_grab_options(grab_options); @@ -1908,24 +1924,25 @@ dont_look_for_page: vm_fault_cleanup(object, VM_PAGE_NULL); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } vm_page_insert(m, object, offset); } - if (fault_info->mark_zf_absent && no_zero_fill == TRUE) + if (fault_info->mark_zf_absent && no_zero_fill == TRUE) { m->vmp_absent = TRUE; + } my_fault = vm_fault_zero_page(m, no_zero_fill); break; - } else { - /* + /* * Move on to the next object. Lock the next * object before unlocking the current one. */ - if ((object != first_object) || must_be_resident) + if ((object != first_object) || must_be_resident) { vm_object_paging_end(object); + } offset += object->vo_shadow_offset; fault_info->lo_offset += object->vo_shadow_offset; @@ -1959,19 +1976,19 @@ dont_look_for_page: */ #if TRACEFAULTPAGE - dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */ #endif -#if EXTRA_ASSERTIONS +#if EXTRA_ASSERTIONS assert(m->vmp_busy && !m->vmp_absent); assert((first_m == VM_PAGE_NULL) || - (first_m->vmp_busy && !first_m->vmp_absent && - !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded)); -#endif /* EXTRA_ASSERTIONS */ + (first_m->vmp_busy && !first_m->vmp_absent && + !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded)); +#endif /* EXTRA_ASSERTIONS */ XPR(XPR_VM_FAULT, "vm_f_page: FOUND obj 0x%X, off 0x%X, page 0x%X, 1_obj 0x%X, 1_m 0x%X\n", - object, offset, m, - first_object, first_m); + object, offset, m, + first_object, first_m); /* * If the page is being written, but isn't @@ -1980,11 +1997,10 @@ dont_look_for_page: * by the top-level object. */ if (object != first_object) { - #if TRACEFAULTPAGE - dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */ #endif - if (fault_type & VM_PROT_WRITE) { + if (fault_type & VM_PROT_WRITE) { vm_page_t copy_m; /* @@ -2020,12 +2036,12 @@ dont_look_for_page: vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } XPR(XPR_VM_FAULT, "vm_f_page: page_copy obj 0x%X, offset 0x%X, m 0x%X, copy_m 0x%X\n", - object, offset, - m, copy_m, 0); + object, offset, + m, copy_m, 0); vm_page_copy(m, copy_m); @@ -2041,8 +2057,9 @@ dont_look_for_page: * access to this page, then we could * avoid the pmap_disconnect() call. */ - if (m->vmp_pmapped) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + if (m->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + } if (m->vmp_clustered) { VM_PAGE_COUNT_AS_PAGEIN(m); @@ -2061,7 +2078,7 @@ dont_look_for_page: * the first_object i.e. bring page in from disk, push to object above but * don't update the file object's sequential pattern. */ - if (object->internal == FALSE) { + if (object->internal == FALSE) { vm_fault_is_sequential(object, offset, fault_info->behavior); } @@ -2102,9 +2119,9 @@ dont_look_for_page: vm_object_paging_end(object); vm_object_collapse(object, offset, TRUE); vm_object_paging_begin(object); - - } else - *protection &= (~VM_PROT_WRITE); + } else { + *protection &= (~VM_PROT_WRITE); + } } /* * Now check whether the page needs to be pushed into the @@ -2116,11 +2133,11 @@ dont_look_for_page: try_failed_count = 0; while ((copy_object = first_object->copy) != VM_OBJECT_NULL) { - vm_object_offset_t copy_offset; - vm_page_t copy_m; + vm_object_offset_t copy_offset; + vm_page_t copy_m; #if TRACEFAULTPAGE - dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */ #endif /* * If the page is being written, but hasn't been @@ -2135,18 +2152,18 @@ dont_look_for_page: * If the page was guaranteed to be resident, * we must have already performed the copy. */ - if (must_be_resident) + if (must_be_resident) { break; + } /* * Try to get the lock on the copy_object. */ if (!vm_object_lock_try(copy_object)) { - vm_object_unlock(object); try_failed_count++; - mutex_pause(try_failed_count); /* wait a bit */ + mutex_pause(try_failed_count); /* wait a bit */ vm_object_lock(object); continue; @@ -2165,12 +2182,12 @@ dont_look_for_page: */ copy_offset = first_offset - copy_object->vo_shadow_offset; - if (copy_object->vo_size <= copy_offset) + if (copy_object->vo_size <= copy_offset) { /* * Copy object doesn't cover this page -- do nothing. */ ; - else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { + } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) { /* * Page currently exists in the copy object */ @@ -2210,11 +2227,10 @@ dont_look_for_page: vm_object_deallocate(copy_object); thread_interrupt_level(interruptible_state); - return (VM_FAULT_RETRY); + return VM_FAULT_RETRY; } } - } - else if (!PAGED_OUT(copy_object, copy_offset)) { + } else if (!PAGED_OUT(copy_object, copy_offset)) { /* * If PAGED_OUT is TRUE, then the page used to exist * in the copy-object, and has already been paged out. @@ -2240,7 +2256,7 @@ dont_look_for_page: vm_fault_cleanup(object, first_m); thread_interrupt_level(interruptible_state); - return (VM_FAULT_MEMORY_SHORTAGE); + return VM_FAULT_MEMORY_SHORTAGE; } /* * Must copy page into copy-object. @@ -2253,8 +2269,9 @@ dont_look_for_page: * from all pmaps. (We can't know which * pmaps use it.) */ - if (m->vmp_pmapped) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + if (m->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + } if (m->vmp_clustered) { VM_PAGE_COUNT_AS_PAGEIN(m); @@ -2265,10 +2282,9 @@ dont_look_for_page: * page out this page, using the "initialize" * option. Else, we use the copy. */ - if ((!copy_object->pager_ready) + if ((!copy_object->pager_ready) || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT - ) { - + ) { vm_page_lockspin_queues(); assert(!m->vmp_cleaning); vm_page_activate(copy_m); @@ -2276,9 +2292,7 @@ dont_look_for_page: SET_PAGE_DIRTY(copy_m, TRUE); PAGE_WAKEUP_DONE(copy_m); - } else { - assert(copy_m->vmp_busy == TRUE); assert(!m->vmp_cleaning); @@ -2360,8 +2374,8 @@ done: *top_page = first_m; XPR(XPR_VM_FAULT, - "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n", - object, offset, m, first_m, 0); + "vm_f_page: DONE obj 0x%X, offset 0x%X, m 0x%X, first_m 0x%X\n", + object, offset, m, first_m, 0); if (m != VM_PAGE_NULL) { assert(VM_PAGE_OBJECT(m) == object); @@ -2369,40 +2383,38 @@ done: retval = VM_FAULT_SUCCESS; if (my_fault == DBG_PAGEIN_FAULT) { - VM_PAGE_COUNT_AS_PAGEIN(m); - if (object->internal) + if (object->internal) { my_fault = DBG_PAGEIND_FAULT; - else + } else { my_fault = DBG_PAGEINV_FAULT; + } - /* + /* * evaluate access pattern and update state * vm_fault_deactivate_behind depends on the * state being up to date */ - vm_fault_is_sequential(object, offset, fault_info->behavior); + vm_fault_is_sequential(object, offset, fault_info->behavior); vm_fault_deactivate_behind(object, offset, fault_info->behavior); - } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) { - /* + /* * we weren't called from vm_fault, so handle the * accounting here for hits in the cache */ - if (m->vmp_clustered) { - VM_PAGE_COUNT_AS_PAGEIN(m); + if (m->vmp_clustered) { + VM_PAGE_COUNT_AS_PAGEIN(m); VM_PAGE_CONSUME_CLUSTERED(m); } - vm_fault_is_sequential(object, offset, fault_info->behavior); + vm_fault_is_sequential(object, offset, fault_info->behavior); vm_fault_deactivate_behind(object, offset, fault_info->behavior); - } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) { - VM_STAT_INCR(decompressions); } - if (type_of_fault) - *type_of_fault = my_fault; + if (type_of_fault) { + *type_of_fault = my_fault; + } } else { retval = VM_FAULT_SUCCESS_NO_VM_PAGE; assert(first_m == VM_PAGE_NULL); @@ -2412,18 +2424,19 @@ done: thread_interrupt_level(interruptible_state); #if TRACEFAULTPAGE - dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ + dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */ #endif return retval; backoff: thread_interrupt_level(interruptible_state); - if (wait_result == THREAD_INTERRUPTED) - return (VM_FAULT_INTERRUPTED); - return (VM_FAULT_RETRY); + if (wait_result == THREAD_INTERRUPTED) { + return VM_FAULT_INTERRUPTED; + } + return VM_FAULT_RETRY; -#undef RELEASE_PAGE +#undef RELEASE_PAGE } @@ -2436,11 +2449,11 @@ backoff: * 3. the page belongs to a code-signed object * 4. the page has not been validated yet or has been mapped for write. */ -#define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj) \ - ((pmap) != kernel_pmap /*1*/ && \ - !(page)->vmp_cs_tainted /*2*/ && \ - (page_obj)->code_signed /*3*/ && \ - (!(page)->vmp_cs_validated || (page)->vmp_wpmapped /*4*/)) +#define VM_FAULT_NEED_CS_VALIDATION(pmap, page, page_obj) \ + ((pmap) != kernel_pmap /*1*/ && \ + !(page)->vmp_cs_tainted /*2*/ && \ + (page_obj)->code_signed /*3*/ && \ + (!(page)->vmp_cs_validated || (page)->vmp_wpmapped /*4*/ )) /* @@ -2459,28 +2472,28 @@ unsigned long cs_enter_tainted_rejected = 0; unsigned long cs_enter_tainted_accepted = 0; kern_return_t vm_fault_enter(vm_page_t m, - pmap_t pmap, - vm_map_offset_t vaddr, - vm_prot_t prot, - vm_prot_t caller_prot, - boolean_t wired, - boolean_t change_wiring, - vm_tag_t wire_tag, - vm_object_fault_info_t fault_info, - boolean_t *need_retry, - int *type_of_fault) + pmap_t pmap, + vm_map_offset_t vaddr, + vm_prot_t prot, + vm_prot_t caller_prot, + boolean_t wired, + boolean_t change_wiring, + vm_tag_t wire_tag, + vm_object_fault_info_t fault_info, + boolean_t *need_retry, + int *type_of_fault) { - kern_return_t kr, pe_result; - boolean_t previously_pmapped = m->vmp_pmapped; - boolean_t must_disconnect = 0; - boolean_t map_is_switched, map_is_switch_protected; - boolean_t cs_violation; - int cs_enforcement_enabled; + kern_return_t kr, pe_result; + boolean_t previously_pmapped = m->vmp_pmapped; + boolean_t must_disconnect = 0; + boolean_t map_is_switched, map_is_switch_protected; + boolean_t cs_violation; + int cs_enforcement_enabled; vm_prot_t fault_type; - vm_object_t object; - boolean_t no_cache = fault_info->no_cache; - boolean_t cs_bypass = fault_info->cs_bypass; - int pmap_options = fault_info->pmap_options; + vm_object_t object; + boolean_t no_cache = fault_info->no_cache; + boolean_t cs_bypass = fault_info->cs_bypass; + int pmap_options = fault_info->pmap_options; fault_type = change_wiring ? VM_PROT_NONE : caller_prot; object = VM_PAGE_OBJECT(m); @@ -2501,15 +2514,13 @@ vm_fault_enter(vm_page_t m, } if (*type_of_fault == DBG_ZERO_FILL_FAULT) { - vm_object_lock_assert_exclusive(object); - } else if ((fault_type & VM_PROT_WRITE) == 0 && - (!m->vmp_wpmapped + (!m->vmp_wpmapped #if VM_OBJECT_ACCESS_TRACKING - || object->access_tracking + || object->access_tracking #endif /* VM_OBJECT_ACCESS_TRACKING */ - )) { + )) { /* * This is not a "write" fault, so we * might not have taken the object lock @@ -2530,7 +2541,6 @@ vm_fault_enter(vm_page_t m, } } if (m->vmp_pmapped == FALSE) { - if (m->vmp_clustered) { if (*type_of_fault == DBG_CACHE_HIT_FAULT) { /* @@ -2539,10 +2549,11 @@ vm_fault_enter(vm_page_t m, * so it must have come in as part of * a cluster... account 1 pagein against it */ - if (object->internal) + if (object->internal) { *type_of_fault = DBG_PAGEIND_FAULT; - else + } else { *type_of_fault = DBG_PAGEINV_FAULT; + } VM_PAGE_COUNT_AS_PAGEIN(m); } @@ -2594,11 +2605,11 @@ vm_fault_enter(vm_page_t m, #endif /* PMAP_CS */ } -#define page_immutable(m,prot) ((m)->vmp_cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/) +#define page_immutable(m, prot) ((m)->vmp_cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/ ) #define page_nx(m) ((m)->vmp_cs_nx) map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && - (pmap == vm_map_pmap(current_thread()->map))); + (pmap == vm_map_pmap(current_thread()->map))); map_is_switch_protected = current_thread()->map->switch_protect; /* If the map is switched, and is switch-protected, we must protect @@ -2613,16 +2624,16 @@ vm_fault_enter(vm_page_t m, */ cs_enforcement_enabled = cs_process_enforcement(NULL); - if(cs_enforcement_enabled && map_is_switched && - map_is_switch_protected && page_immutable(m, prot) && - (prot & VM_PROT_WRITE)) - { + if (cs_enforcement_enabled && map_is_switched && + map_is_switch_protected && page_immutable(m, prot) && + (prot & VM_PROT_WRITE)) { return KERN_CODESIGN_ERROR; } if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) { - if (cs_debug) + if (cs_debug) { printf("page marked to be NX, not letting it be mapped EXEC\n"); + } return KERN_CODESIGN_ERROR; } @@ -2646,8 +2657,8 @@ vm_fault_enter(vm_page_t m, /* no further code-signing enforcement */ cs_violation = FALSE; } else if (page_immutable(m, prot) && - ((prot & VM_PROT_WRITE) || - m->vmp_wpmapped)) { + ((prot & VM_PROT_WRITE) || + m->vmp_wpmapped)) { /* * The page should be immutable, but is in danger of being * modified. @@ -2665,17 +2676,17 @@ vm_fault_enter(vm_page_t m, */ cs_violation = TRUE; } else if (!m->vmp_cs_validated && - (prot & VM_PROT_EXECUTE) + (prot & VM_PROT_EXECUTE) #if PMAP_CS - /* - * Executable pages will be validated by pmap_cs; - * in pmap_cs we trust... - * If pmap_cs is turned off, this is a code-signing - * violation. - */ - && ! (pmap_cs_enforced(pmap)) + /* + * Executable pages will be validated by pmap_cs; + * in pmap_cs we trust... + * If pmap_cs is turned off, this is a code-signing + * violation. + */ + && !(pmap_cs_enforced(pmap)) #endif /* PMAP_CS */ - ) { + ) { cs_violation = TRUE; } else { cs_violation = FALSE; @@ -2690,35 +2701,36 @@ vm_fault_enter(vm_page_t m, * it will not be executing from the map. So we don't call * cs_invalid_page() in that case. */ boolean_t reject_page, cs_killed; - if(map_is_switched) { - assert(pmap==vm_map_pmap(current_thread()->map)); + if (map_is_switched) { + assert(pmap == vm_map_pmap(current_thread()->map)); assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE)); reject_page = FALSE; } else { - if (cs_debug > 5) + if (cs_debug > 5) { printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n", - object->code_signed ? "yes" : "no", - m->vmp_cs_validated ? "yes" : "no", - m->vmp_cs_tainted ? "yes" : "no", - m->vmp_wpmapped ? "yes" : "no", - (int)prot); + object->code_signed ? "yes" : "no", + m->vmp_cs_validated ? "yes" : "no", + m->vmp_cs_tainted ? "yes" : "no", + m->vmp_wpmapped ? "yes" : "no", + (int)prot); + } reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed); } if (reject_page) { /* reject the invalid page: abort the page fault */ - int pid; - const char *procname; - task_t task; - vm_object_t file_object, shadow; - vm_object_offset_t file_offset; - char *pathname, *filename; - vm_size_t pathname_len, filename_len; - boolean_t truncated_path; + int pid; + const char *procname; + task_t task; + vm_object_t file_object, shadow; + vm_object_offset_t file_offset; + char *pathname, *filename; + vm_size_t pathname_len, filename_len; + boolean_t truncated_path; #define __PATH_MAX 1024 - struct timespec mtime, cs_mtime; - int shadow_depth; - os_reason_t codesigning_exit_reason = OS_REASON_NULL; + struct timespec mtime, cs_mtime; + int shadow_depth; + os_reason_t codesigning_exit_reason = OS_REASON_NULL; kr = KERN_CODESIGN_ERROR; cs_enter_tainted_rejected++; @@ -2727,17 +2739,18 @@ vm_fault_enter(vm_page_t m, procname = "?"; task = current_task(); pid = proc_selfpid(); - if (task->bsd_info != NULL) + if (task->bsd_info != NULL) { procname = proc_name_address(task->bsd_info); + } /* get file's VM object */ file_object = object; file_offset = m->vmp_offset; for (shadow = file_object->shadow, - shadow_depth = 0; - shadow != VM_OBJECT_NULL; - shadow = file_object->shadow, - shadow_depth++) { + shadow_depth = 0; + shadow != VM_OBJECT_NULL; + shadow = file_object->shadow, + shadow_depth++) { vm_object_lock_shared(shadow); if (file_object != object) { vm_object_unlock(file_object); @@ -2767,44 +2780,44 @@ vm_fault_enter(vm_page_t m, filename_len = __PATH_MAX; } vnode_pager_get_object_name(file_object->pager, - pathname, - pathname_len, - filename, - filename_len, - &truncated_path); + pathname, + pathname_len, + filename, + filename_len, + &truncated_path); if (pathname) { /* safety first... */ - pathname[__PATH_MAX-1] = '\0'; - filename[__PATH_MAX-1] = '\0'; + pathname[__PATH_MAX - 1] = '\0'; + filename[__PATH_MAX - 1] = '\0'; } vnode_pager_get_object_mtime(file_object->pager, - &mtime, - &cs_mtime); + &mtime, + &cs_mtime); } printf("CODE SIGNING: process %d[%s]: " - "rejecting invalid page at address 0x%llx " - "from offset 0x%llx in file \"%s%s%s\" " - "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " - "(signed:%d validated:%d tainted:%d nx:%d " - "wpmapped:%d dirty:%d depth:%d)\n", - pid, procname, (addr64_t) vaddr, - file_offset, - (pathname ? pathname : ""), - (truncated_path ? "/.../" : ""), - (truncated_path ? filename : ""), - cs_mtime.tv_sec, cs_mtime.tv_nsec, - ((cs_mtime.tv_sec == mtime.tv_sec && - cs_mtime.tv_nsec == mtime.tv_nsec) - ? "==" - : "!="), - mtime.tv_sec, mtime.tv_nsec, - object->code_signed, - m->vmp_cs_validated, - m->vmp_cs_tainted, - m->vmp_cs_nx, - m->vmp_wpmapped, - m->vmp_dirty, - shadow_depth); + "rejecting invalid page at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d " + "wpmapped:%d dirty:%d depth:%d)\n", + pid, procname, (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + m->vmp_cs_validated, + m->vmp_cs_tainted, + m->vmp_cs_nx, + m->vmp_wpmapped, + m->vmp_dirty, + shadow_depth); /* * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page @@ -2814,7 +2827,7 @@ vm_fault_enter(vm_page_t m, */ if (cs_killed) { KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, - pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0); + pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0); codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE); if (codesigning_exit_reason == NULL) { @@ -2828,20 +2841,22 @@ vm_fault_enter(vm_page_t m, printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n"); } else { if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor, - EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) { + EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) { ceri = (struct codesigning_exit_reason_info *)data_addr; static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname)); ceri->ceri_virt_addr = vaddr; ceri->ceri_file_offset = file_offset; - if (pathname) + if (pathname) { strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname)); - else + } else { ceri->ceri_pathname[0] = '\0'; - if (filename) + } + if (filename) { strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename)); - else + } else { ceri->ceri_filename[0] = '\0'; + } ceri->ceri_path_truncated = (truncated_path); ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec; ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec; @@ -2872,29 +2887,29 @@ vm_fault_enter(vm_page_t m, if (panic_on_cs_killed && object->object_is_shared_cache) { panic("CODE SIGNING: process %d[%s]: " - "rejecting invalid page at address 0x%llx " - "from offset 0x%llx in file \"%s%s%s\" " - "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " - "(signed:%d validated:%d tainted:%d nx:%d" - "wpmapped:%d dirty:%d depth:%d)\n", - pid, procname, (addr64_t) vaddr, - file_offset, - (pathname ? pathname : ""), - (truncated_path ? "/.../" : ""), - (truncated_path ? filename : ""), - cs_mtime.tv_sec, cs_mtime.tv_nsec, - ((cs_mtime.tv_sec == mtime.tv_sec && - cs_mtime.tv_nsec == mtime.tv_nsec) - ? "==" - : "!="), - mtime.tv_sec, mtime.tv_nsec, - object->code_signed, - m->vmp_cs_validated, - m->vmp_cs_tainted, - m->vmp_cs_nx, - m->vmp_wpmapped, - m->vmp_dirty, - shadow_depth); + "rejecting invalid page at address 0x%llx " + "from offset 0x%llx in file \"%s%s%s\" " + "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) " + "(signed:%d validated:%d tainted:%d nx:%d" + "wpmapped:%d dirty:%d depth:%d)\n", + pid, procname, (addr64_t) vaddr, + file_offset, + (pathname ? pathname : ""), + (truncated_path ? "/.../" : ""), + (truncated_path ? filename : ""), + cs_mtime.tv_sec, cs_mtime.tv_nsec, + ((cs_mtime.tv_sec == mtime.tv_sec && + cs_mtime.tv_nsec == mtime.tv_nsec) + ? "==" + : "!="), + mtime.tv_sec, mtime.tv_nsec, + object->code_signed, + m->vmp_cs_validated, + m->vmp_cs_tainted, + m->vmp_cs_nx, + m->vmp_wpmapped, + m->vmp_dirty, + shadow_depth); } if (file_object != object) { @@ -2945,8 +2960,8 @@ vm_fault_enter(vm_page_t m, if (kr != KERN_SUCCESS) { if (cs_debug) { printf("CODESIGNING: vm_fault_enter(0x%llx): " - "*** INVALID PAGE ***\n", - (long long)vaddr); + "*** INVALID PAGE ***\n", + (long long)vaddr); } #if !SECURE_KERNEL if (cs_enforcement_panic) { @@ -2954,26 +2969,25 @@ vm_fault_enter(vm_page_t m, } #endif } - } else { /* proceed with the valid page */ kr = KERN_SUCCESS; } - boolean_t page_queues_locked = FALSE; -#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ -MACRO_BEGIN \ - if (! page_queues_locked) { \ - page_queues_locked = TRUE; \ - vm_page_lockspin_queues(); \ - } \ + boolean_t page_queues_locked = FALSE; +#define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \ +MACRO_BEGIN \ + if (! page_queues_locked) { \ + page_queues_locked = TRUE; \ + vm_page_lockspin_queues(); \ + } \ MACRO_END -#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \ -MACRO_BEGIN \ - if (page_queues_locked) { \ - page_queues_locked = FALSE; \ - vm_page_unlock_queues(); \ - } \ +#define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \ +MACRO_BEGIN \ + if (page_queues_locked) { \ + page_queues_locked = FALSE; \ + vm_page_unlock_queues(); \ + } \ MACRO_END /* @@ -2993,17 +3007,16 @@ MACRO_END */ assert(object == compressor_object); } else if (change_wiring) { - __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); if (wired) { if (kr == KERN_SUCCESS) { vm_page_wire(m, wire_tag, TRUE); } } else { - vm_page_unwire(m, TRUE); + vm_page_unwire(m, TRUE); } /* we keep the page queues lock, if we need it later */ - } else { if (object->internal == TRUE) { /* @@ -3012,21 +3025,20 @@ MACRO_END */ no_cache = FALSE; } - if (kr != KERN_SUCCESS) { - __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); - vm_page_deactivate(m); + if (kr != KERN_SUCCESS) { + __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); + vm_page_deactivate(m); /* we keep the page queues lock, if we need it later */ } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || - (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) || - (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) || - ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) && - !VM_PAGE_WIRED(m)) { - + (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) || + (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) || + ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) && + !VM_PAGE_WIRED(m)) { if (vm_page_local_q && (*type_of_fault == DBG_COW_FAULT || - *type_of_fault == DBG_ZERO_FILL_FAULT) ) { - struct vpl *lq; - uint32_t lid; + *type_of_fault == DBG_ZERO_FILL_FAULT)) { + struct vpl *lq; + uint32_t lid; assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); @@ -3054,21 +3066,20 @@ MACRO_END VPL_LOCK(&lq->vpl_lock); vm_page_check_pageable_safe(m); - vm_page_queue_enter(&lq->vpl_queue, m, - vm_page_t, vmp_pageq); + vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q; m->vmp_local_id = lid; lq->vpl_count++; - if (object->internal) + if (object->internal) { lq->vpl_internal_count++; - else + } else { lq->vpl_external_count++; + } VPL_UNLOCK(&lq->vpl_lock); - if (lq->vpl_count > vm_page_local_q_soft_limit) - { + if (lq->vpl_count > vm_page_local_q_soft_limit) { /* * we're beyond the soft limit * for the local queue @@ -3088,7 +3099,6 @@ MACRO_END vm_page_reactivate_local(lid, FALSE, FALSE); } } else { - __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED(); /* @@ -3103,8 +3113,8 @@ MACRO_END VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1); } - if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m) || - no_cache) { + if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) || + no_cache) { /* * If this is a no_cache mapping * and the page has never been @@ -3120,13 +3130,13 @@ MACRO_END if (no_cache && (!previously_pmapped || - m->vmp_no_cache)) { + m->vmp_no_cache)) { m->vmp_no_cache = TRUE; - if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) + if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { vm_page_speculate(m, FALSE); - - } else if ( !VM_PAGE_ACTIVE_OR_INACTIVE(m)) { + } + } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) { vm_page_activate(m); } } @@ -3146,14 +3156,14 @@ MACRO_END * now so those processes can take note. */ if (kr == KERN_SUCCESS) { - /* + /* * NOTE: we may only hold the vm_object lock SHARED * at this point, so we need the phys_page lock to * properly serialize updating the pmapped and * xpmapped bits */ if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) { - ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); pmap_lock_phys_page(phys_page); /* @@ -3165,13 +3175,13 @@ MACRO_END m->vmp_pmapped = TRUE; if (!m->vmp_xpmapped) { - m->vmp_xpmapped = TRUE; pmap_unlock_phys_page(phys_page); - if (!object->internal) + if (!object->internal) { OSAddAtomic(1, &vm_page_xpmapped_external_count); + } #if defined(__arm__) || defined(__arm64__) pmap_sync_page_data_phys(phys_page); @@ -3193,11 +3203,12 @@ MACRO_END pmap_sync_page_data_phys(phys_page); } #endif - } else + } else { pmap_unlock_phys_page(phys_page); + } } else { if (m->vmp_pmapped == FALSE) { - ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); + ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m); pmap_lock_phys_page(phys_page); m->vmp_pmapped = TRUE; @@ -3206,7 +3217,6 @@ MACRO_END } if (fault_type & VM_PROT_WRITE) { - if (m->vmp_wpmapped == FALSE) { vm_object_lock_assert_exclusive(object); if (!object->internal && object->pager) { @@ -3230,7 +3240,7 @@ MACRO_END * gets tainted, so we won't get stuck here * to make an already writeable page executable. */ - if (!cs_bypass){ + if (!cs_bypass) { assert(!pmap_has_prot_policy(prot)); prot &= ~VM_PROT_EXECUTE; } @@ -3269,9 +3279,9 @@ MACRO_END * holding the object lock if we need to wait for a page in * pmap_enter() - */ PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, 0, - wired, - pmap_options | PMAP_OPTIONS_NOWAIT, - pe_result); + wired, + pmap_options | PMAP_OPTIONS_NOWAIT, + pe_result); #if __x86_64__ if (pe_result == KERN_INVALID_ARGUMENT && pmap == PMAP_NULL && @@ -3286,8 +3296,7 @@ MACRO_END } #endif /* __x86_64__ */ - if(pe_result == KERN_RESOURCE_SHORTAGE) { - + if (pe_result == KERN_RESOURCE_SHORTAGE) { if (need_retry) { /* * this will be non-null in the case where we hold the lock @@ -3317,8 +3326,8 @@ MACRO_END vm_object_unlock(object); PMAP_ENTER_OPTIONS(pmap, vaddr, m, prot, fault_type, - 0, wired, - pmap_options, pe_result); + 0, wired, + pmap_options, pe_result); assert(VM_PAGE_OBJECT(m) == object); @@ -3328,7 +3337,7 @@ MACRO_END /* If the page was busy, someone else will wake it up. * Otherwise, we have to do it now. */ assert(m->vmp_busy); - if(!was_busy) { + if (!was_busy) { PAGE_WAKEUP_DONE(m); } vm_pmap_enter_blocked++; @@ -3345,15 +3354,14 @@ void vm_pre_fault(vm_map_offset_t vaddr) { if (pmap_find_phys(current_map()->pmap, vaddr) == 0) { - vm_fault(current_map(), /* map */ - vaddr, /* vaddr */ - VM_PROT_READ, /* fault_type */ - FALSE, /* change_wiring */ - VM_KERN_MEMORY_NONE, /* tag - not wiring */ - THREAD_UNINT, /* interruptible */ - NULL, /* caller_pmap */ - 0 /* caller_pmap_addr */); + vaddr, /* vaddr */ + VM_PROT_READ, /* fault_type */ + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + THREAD_UNINT, /* interruptible */ + NULL, /* caller_pmap */ + 0 /* caller_pmap_addr */); } } @@ -3373,7 +3381,7 @@ vm_pre_fault(vm_map_offset_t vaddr) */ extern int _map_enter_debug; -extern uint64_t get_current_unique_pid(void); +extern uint64_t get_current_unique_pid(void); unsigned long vm_fault_collapse_total = 0; unsigned long vm_fault_collapse_skipped = 0; @@ -3381,89 +3389,89 @@ unsigned long vm_fault_collapse_skipped = 0; kern_return_t vm_fault_external( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, - int interruptible, - pmap_t caller_pmap, - vm_map_offset_t caller_pmap_addr) + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr) { return vm_fault_internal(map, vaddr, fault_type, change_wiring, vm_tag_bt(), - interruptible, caller_pmap, caller_pmap_addr, - NULL); + interruptible, caller_pmap, caller_pmap_addr, + NULL); } kern_return_t vm_fault( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, - vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ - int interruptible, - pmap_t caller_pmap, - vm_map_offset_t caller_pmap_addr) + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr) { return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag, - interruptible, caller_pmap, caller_pmap_addr, - NULL); + interruptible, caller_pmap, caller_pmap_addr, + NULL); } kern_return_t vm_fault_internal( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t caller_prot, - boolean_t change_wiring, - vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ - int interruptible, - pmap_t caller_pmap, - vm_map_offset_t caller_pmap_addr, - ppnum_t *physpage_p) + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t caller_prot, + boolean_t change_wiring, + vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr, + ppnum_t *physpage_p) { - vm_map_version_t version; /* Map version for verificiation */ - boolean_t wired; /* Should mapping be wired down? */ - vm_object_t object; /* Top-level object */ - vm_object_offset_t offset; /* Top-level offset */ - vm_prot_t prot; /* Protection for mapping */ - vm_object_t old_copy_object; /* Saved copy object */ - vm_page_t result_page; /* Result of vm_fault_page */ - vm_page_t top_page; /* Placeholder page */ - kern_return_t kr; - - vm_page_t m; /* Fast access to result_page */ - kern_return_t error_code; - vm_object_t cur_object; - vm_object_t m_object = NULL; - vm_object_offset_t cur_offset; - vm_page_t cur_m; - vm_object_t new_object; + vm_map_version_t version; /* Map version for verificiation */ + boolean_t wired; /* Should mapping be wired down? */ + vm_object_t object; /* Top-level object */ + vm_object_offset_t offset; /* Top-level offset */ + vm_prot_t prot; /* Protection for mapping */ + vm_object_t old_copy_object; /* Saved copy object */ + vm_page_t result_page; /* Result of vm_fault_page */ + vm_page_t top_page; /* Placeholder page */ + kern_return_t kr; + + vm_page_t m; /* Fast access to result_page */ + kern_return_t error_code; + vm_object_t cur_object; + vm_object_t m_object = NULL; + vm_object_offset_t cur_offset; + vm_page_t cur_m; + vm_object_t new_object; int type_of_fault; - pmap_t pmap; - wait_interrupt_t interruptible_state; - vm_map_t real_map = map; - vm_map_t original_map = map; - boolean_t object_locks_dropped = FALSE; - vm_prot_t fault_type; - vm_prot_t original_fault_type; + pmap_t pmap; + wait_interrupt_t interruptible_state; + vm_map_t real_map = map; + vm_map_t original_map = map; + boolean_t object_locks_dropped = FALSE; + vm_prot_t fault_type; + vm_prot_t original_fault_type; struct vm_object_fault_info fault_info = {}; - boolean_t need_collapse = FALSE; - boolean_t need_retry = FALSE; - boolean_t *need_retry_ptr = NULL; - int object_lock_type = 0; - int cur_object_lock_type; - vm_object_t top_object = VM_OBJECT_NULL; - vm_object_t written_on_object = VM_OBJECT_NULL; - memory_object_t written_on_pager = NULL; - vm_object_offset_t written_on_offset = 0; - int throttle_delay; - int compressed_count_delta; - int grab_options; - vm_map_offset_t trace_vaddr; - vm_map_offset_t trace_real_vaddr; + boolean_t need_collapse = FALSE; + boolean_t need_retry = FALSE; + boolean_t *need_retry_ptr = NULL; + int object_lock_type = 0; + int cur_object_lock_type; + vm_object_t top_object = VM_OBJECT_NULL; + vm_object_t written_on_object = VM_OBJECT_NULL; + memory_object_t written_on_pager = NULL; + vm_object_offset_t written_on_offset = 0; + int throttle_delay; + int compressed_count_delta; + int grab_options; + vm_map_offset_t trace_vaddr; + vm_map_offset_t trace_real_vaddr; #if DEVELOPMENT || DEBUG - vm_map_offset_t real_vaddr; + vm_map_offset_t real_vaddr; real_vaddr = vaddr; #endif /* DEVELOPMENT || DEBUG */ @@ -3478,23 +3486,23 @@ vm_fault_internal( } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, - ((uint64_t)trace_vaddr >> 32), - trace_vaddr, - (map == kernel_map), - 0, - 0); + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + (map == kernel_map), + 0, + 0); if (get_preemption_level() != 0) { - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, - ((uint64_t)trace_vaddr >> 32), - trace_vaddr, - KERN_FAILURE, - 0, - 0); - - return (KERN_FAILURE); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + KERN_FAILURE, + 0, + 0); + + return KERN_FAILURE; } thread_t cthread = current_thread(); @@ -3513,10 +3521,11 @@ vm_fault_internal( current_task()->faults++; original_fault_type = fault_type; - if (fault_type & VM_PROT_WRITE) - object_lock_type = OBJECT_LOCK_EXCLUSIVE; - else - object_lock_type = OBJECT_LOCK_SHARED; + if (fault_type & VM_PROT_WRITE) { + object_lock_type = OBJECT_LOCK_EXCLUSIVE; + } else { + object_lock_type = OBJECT_LOCK_SHARED; + } cur_object_lock_type = OBJECT_LOCK_SHARED; @@ -3524,7 +3533,6 @@ vm_fault_internal( if (compressor_map) { if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) { panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map)); - } } } @@ -3547,10 +3555,10 @@ RetryFault: vm_map_lock_read(map); kr = vm_map_lookup_locked(&map, vaddr, fault_type, - object_lock_type, &version, - &object, &offset, &prot, &wired, - &fault_info, - &real_map); + object_lock_type, &version, + &object, &offset, &prot, &wired, + &fault_info, + &real_map); if (kr != KERN_SUCCESS) { vm_map_unlock_read(map); @@ -3574,20 +3582,19 @@ RetryFault: * we must hold the top object lock exclusively */ if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly * take the lock exclusively */ - vm_object_lock(object); + vm_object_lock(object); } } } -#if VM_FAULT_CLASSIFY +#if VM_FAULT_CLASSIFY /* * Temporary data gathering code */ @@ -3607,7 +3614,7 @@ RetryFault: * and use the original fault path (which doesn't hold * the map lock, and relies on busy pages). * The give up cases include: - * - Have to talk to pager. + * - Have to talk to pager. * - Page is busy, absent or in error. * - Pager has locked out desired access. * - Fault needs to be restarted. @@ -3615,7 +3622,7 @@ RetryFault: * * The code is an infinite loop that moves one level down * the shadow chain each time. cur_object and cur_offset - * refer to the current object being examined. object and offset + * refer to the current object being examined. object and offset * are the original object from the map. The loop is at the * top level if and only if object and cur_object are the same. * @@ -3631,16 +3638,16 @@ RetryFault: * pmap that enforces execute-only protection. */ if (fault_type == VM_PROT_READ && - (prot & VM_PROT_EXECUTE) && - !(prot & VM_PROT_READ) && - pmap_enforces_execute_only(pmap)) { - vm_object_unlock(object); - vm_map_unlock_read(map); - if (real_map != map) { - vm_map_unlock(real_map); - } - kr = KERN_PROTECTION_FAILURE; - goto done; + (prot & VM_PROT_EXECUTE) && + !(prot & VM_PROT_READ) && + pmap_enforces_execute_only(pmap)) { + vm_object_unlock(object); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } + kr = KERN_PROTECTION_FAILURE; + goto done; } #endif @@ -3650,8 +3657,9 @@ RetryFault: * copy delay strategy is implemented in the slow fault page. */ if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY && - object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) - goto handle_copy_delay; + object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) { + goto handle_copy_delay; + } cur_object = object; cur_offset = offset; @@ -3665,8 +3673,9 @@ RetryFault: while (TRUE) { if (!cur_object->pager_created && - cur_object->phys_contiguous) /* superpage */ + cur_object->phys_contiguous) { /* superpage */ break; + } if (cur_object->blocked_access) { /* @@ -3683,20 +3692,18 @@ RetryFault: m_object = cur_object; if (m->vmp_busy) { - wait_result_t result; + wait_result_t result; /* * in order to do the PAGE_ASSERT_WAIT, we must * have object that 'm' belongs to locked exclusively */ if (object != cur_object) { - if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(cur_object) == FALSE) { - /* + /* * couldn't upgrade so go do a full retry * immediately since we can no longer be * certain about cur_object (since we @@ -3705,19 +3712,19 @@ RetryFault: */ vm_object_unlock(object); - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } goto RetryFault; } } } else if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly take the lock * exclusively and go relookup the page since we * will have dropped the object lock and @@ -3726,7 +3733,7 @@ RetryFault: * no need for a full retry since we're * at the top level of the object chain */ - vm_object_lock(object); + vm_object_lock(object); continue; } @@ -3754,24 +3761,27 @@ RetryFault: } vm_page_unlock_queues(); } - if (object != cur_object) + if (object != cur_object) { vm_object_unlock(object); + } vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + if (real_map != map) { + vm_map_unlock(real_map); + } result = PAGE_ASSERT_WAIT(m, interruptible); vm_object_unlock(cur_object); if (result == THREAD_WAITING) { - result = thread_block(THREAD_CONTINUE_NULL); + result = thread_block(THREAD_CONTINUE_NULL); counter(c_vm_fault_page_block_busy_kernel++); } - if (result == THREAD_AWAKENED || result == THREAD_RESTART) - goto RetryFault; + if (result == THREAD_AWAKENED || result == THREAD_RESTART) { + goto RetryFault; + } kr = KERN_ABORTED; goto done; @@ -3786,14 +3796,13 @@ reclaimed_from_pageout: vm_object_unlock(cur_object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } - } else if (object_lock_type == OBJECT_LOCK_SHARED) { - object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { @@ -3821,17 +3830,19 @@ reclaimed_from_pageout: break; } if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) { - /* + /* * Unusual case... let the slow path deal with it */ break; } if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) { - if (object != cur_object) + if (object != cur_object) { vm_object_unlock(object); + } vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + if (real_map != map) { + vm_map_unlock(real_map); + } vm_object_unlock(cur_object); kr = KERN_MEMORY_ERROR; goto done; @@ -3846,26 +3857,25 @@ upgrade_for_validation: * against its code signature, so we * want to hold the VM object exclusively. */ - if (object != cur_object) { + if (object != cur_object) { if (cur_object_lock_type == OBJECT_LOCK_SHARED) { vm_object_unlock(object); vm_object_unlock(cur_object); - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; + cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } - } else if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly take the lock * exclusively and go relookup the page since we * will have dropped the object lock and @@ -3874,7 +3884,7 @@ upgrade_for_validation: * no need for a full retry since we're * at the top level of the object chain */ - vm_object_lock(object); + vm_object_lock(object); continue; } @@ -3888,7 +3898,6 @@ upgrade_for_validation: */ if (object == cur_object && object->copy == VM_OBJECT_NULL) { - goto FastPmapEnter; } @@ -3908,8 +3917,8 @@ upgrade_for_validation: assert(fault_info.cs_bypass); } - if (object != cur_object) { - /* + if (object != cur_object) { + /* * We still need to hold the top object * lock here to prevent a race between * a read fault (taking only "shared" @@ -3946,50 +3955,52 @@ FastPmapEnter: * cur_object == NULL or it's been unlocked * no paging references on either object or cur_object */ - if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) + if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) { need_retry_ptr = &need_retry; - else + } else { need_retry_ptr = NULL; + } if (caller_pmap) { - kr = vm_fault_enter(m, - caller_pmap, - caller_pmap_addr, - prot, - caller_prot, - wired, - change_wiring, - wire_tag, - &fault_info, - need_retry_ptr, - &type_of_fault); + kr = vm_fault_enter(m, + caller_pmap, + caller_pmap_addr, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + need_retry_ptr, + &type_of_fault); } else { - kr = vm_fault_enter(m, - pmap, - vaddr, - prot, - caller_prot, - wired, - change_wiring, - wire_tag, - &fault_info, - need_retry_ptr, - &type_of_fault); + kr = vm_fault_enter(m, + pmap, + vaddr, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + need_retry_ptr, + &type_of_fault); } #if DEVELOPMENT || DEBUG { - int event_code = 0; + int event_code = 0; - if (m_object->internal) - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); - else if (m_object->object_is_shared_cache) - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); - else - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + if (m_object->internal) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + } else if (m_object->object_is_shared_cache) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + } else { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); - DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); } #endif if (kr == KERN_SUCCESS && @@ -4016,28 +4027,29 @@ FastPmapEnter: top_object = VM_OBJECT_NULL; } - if (need_collapse == TRUE) - vm_object_collapse(object, offset, TRUE); + if (need_collapse == TRUE) { + vm_object_collapse(object, offset, TRUE); + } if (need_retry == FALSE && (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) { - /* + /* * evaluate access pattern and update state * vm_fault_deactivate_behind depends on the * state being up to date */ - vm_fault_is_sequential(m_object, cur_offset, fault_info.behavior); + vm_fault_is_sequential(m_object, cur_offset, fault_info.behavior); vm_fault_deactivate_behind(m_object, cur_offset, fault_info.behavior); } /* * That's it, clean up and return. */ - if (m->vmp_busy) - PAGE_WAKEUP_DONE(m); + if (m->vmp_busy) { + PAGE_WAKEUP_DONE(m); + } if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) { - vm_object_paging_begin(m_object); assert(written_on_object == VM_OBJECT_NULL); @@ -4048,8 +4060,9 @@ FastPmapEnter: vm_object_unlock(object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (need_retry == TRUE) { /* @@ -4074,7 +4087,7 @@ FastPmapEnter: */ assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE); - /* + /* * If objects match, then * object->copy must not be NULL (else control * would be in previous code block), and we @@ -4082,7 +4095,7 @@ FastPmapEnter: * with which we can't cope with here. */ if (cur_object == object) { - /* + /* * must take the slow path to * deal with the copy push */ @@ -4118,7 +4131,7 @@ FastPmapEnter: m_object = NULL; if (m == VM_PAGE_NULL) { - /* + /* * no free page currently available... * must take the slow path */ @@ -4138,8 +4151,9 @@ FastPmapEnter: /* * Now cope with the source page and object */ - if (object->ref_count > 1 && cur_m->vmp_pmapped) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); + if (object->ref_count > 1 && cur_m->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m)); + } if (cur_m->vmp_clustered) { VM_PAGE_COUNT_AS_PAGEIN(cur_m); @@ -4150,7 +4164,7 @@ FastPmapEnter: if (!cur_object->internal && cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) { - /* + /* * The object from which we've just * copied a page is most probably backed * by a vnode. We don't want to waste too @@ -4158,25 +4172,26 @@ FastPmapEnter: * and create a bottleneck when several tasks * map the same file. */ - if (cur_object->copy == object) { - /* + if (cur_object->copy == object) { + /* * Shared mapping or no COW yet. * We can never collapse a copy * object into its backing object. */ - need_collapse = FALSE; + need_collapse = FALSE; } else if (cur_object->copy == object->shadow && - object->shadow->resident_page_count == 0) { - /* + object->shadow->resident_page_count == 0) { + /* * Shared mapping after a COW occurred. */ - need_collapse = FALSE; + need_collapse = FALSE; } } vm_object_unlock(cur_object); - if (need_collapse == FALSE) - vm_fault_collapse_skipped++; + if (need_collapse == FALSE) { + vm_fault_collapse_skipped++; + } vm_fault_collapse_total++; type_of_fault = DBG_COW_FAULT; @@ -4185,20 +4200,19 @@ FastPmapEnter: current_task()->cow_faults++; goto FastPmapEnter; - } else { /* * No page at cur_object, cur_offset... m == NULL */ if (cur_object->pager_created) { - int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; + int compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN; - if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { - int my_fault_type; - int c_flags = C_DONT_BLOCK; - boolean_t insert_cur_object = FALSE; + if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) { + int my_fault_type; + int c_flags = C_DONT_BLOCK; + boolean_t insert_cur_object = FALSE; - /* + /* * May have to talk to a pager... * if so, take the slow path by * doing a 'break' from the while (TRUE) loop @@ -4206,8 +4220,9 @@ FastPmapEnter: * external_state will only be set to VM_EXTERNAL_STATE_EXISTS * if the compressor is active and the page exists there */ - if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) + if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) { break; + } if (map == kernel_map || real_map == kernel_map) { /* @@ -4218,15 +4233,14 @@ FastPmapEnter: break; } if (object != cur_object) { - if (fault_type & VM_PROT_WRITE) + if (fault_type & VM_PROT_WRITE) { c_flags |= C_KEEP; - else + } else { insert_cur_object = TRUE; + } } if (insert_cur_object == TRUE) { - if (cur_object_lock_type == OBJECT_LOCK_SHARED) { - cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(cur_object) == FALSE) { @@ -4240,14 +4254,14 @@ FastPmapEnter: vm_object_unlock(object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } } } else if (object_lock_type == OBJECT_LOCK_SHARED) { - object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (object != cur_object) { @@ -4265,8 +4279,9 @@ FastPmapEnter: vm_object_unlock(cur_object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } goto RetryFault; } @@ -4303,9 +4318,9 @@ FastPmapEnter: */ boolean_t shared_lock; if ((object == cur_object && - object_lock_type == OBJECT_LOCK_EXCLUSIVE) || + object_lock_type == OBJECT_LOCK_EXCLUSIVE) || (object != cur_object && - cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) { + cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) { shared_lock = FALSE; } else { shared_lock = TRUE; @@ -4314,7 +4329,7 @@ FastPmapEnter: kr = vm_compressor_pager_get( cur_object->pager, (cur_offset + - cur_object->paging_offset), + cur_object->paging_offset), VM_PAGE_GET_PHYS_PAGE(m), &my_fault_type, c_flags, @@ -4360,10 +4375,10 @@ FastPmapEnter: * case. */ } else if (((cur_object->purgable == - VM_PURGABLE_DENY) && - (!cur_object->vo_ledger_tag)) || - (cur_object->vo_owner == - NULL)) { + VM_PURGABLE_DENY) && + (!cur_object->vo_ledger_tag)) || + (cur_object->vo_owner == + NULL)) { /* * "cur_object" is not purgeable * and is not ledger-taged, or @@ -4391,7 +4406,7 @@ FastPmapEnter: } if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) { - /* + /* * If the page is not cacheable, * we can't let its contents * linger in the data cache @@ -4434,36 +4449,38 @@ FastPmapEnter: cur_object == compressor_object || cur_object == kernel_object || cur_object == vm_submap_object) { - if (object != cur_object) - vm_object_unlock(cur_object); + if (object != cur_object) { + vm_object_unlock(cur_object); + } vm_object_unlock(object); vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } kr = KERN_MEMORY_ERROR; goto done; } - if (cur_object != object) { + if (cur_object != object) { vm_object_unlock(cur_object); cur_object = object; } if (object_lock_type == OBJECT_LOCK_SHARED) { - - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade so do a full retry on the fault * since we dropped the object lock which * could allow another thread to insert * a page at this offset */ - vm_map_unlock_read(map); - if (real_map != map) - vm_map_unlock(real_map); + vm_map_unlock_read(map); + if (real_map != map) { + vm_map_unlock(real_map); + } goto RetryFault; } @@ -4472,7 +4489,7 @@ FastPmapEnter: m_object = NULL; if (m == VM_PAGE_NULL) { - /* + /* * no free page currently available... * must take the slow path */ @@ -4492,7 +4509,7 @@ FastPmapEnter: type_of_fault = vm_fault_zero_page(m, map->no_zero_fill); goto FastPmapEnter; - } + } /* * On to the next level in the shadow chain */ @@ -4502,13 +4519,15 @@ FastPmapEnter: /* * take the new_object's lock with the indicated state */ - if (cur_object_lock_type == OBJECT_LOCK_SHARED) - vm_object_lock_shared(new_object); - else - vm_object_lock(new_object); + if (cur_object_lock_type == OBJECT_LOCK_SHARED) { + vm_object_lock_shared(new_object); + } else { + vm_object_lock(new_object); + } - if (cur_object != object) + if (cur_object != object) { vm_object_unlock(cur_object); + } cur_object = new_object; @@ -4519,35 +4538,37 @@ FastPmapEnter: * Cleanup from fast fault failure. Drop any object * lock other than original and drop map lock. */ - if (object != cur_object) + if (object != cur_object) { vm_object_unlock(cur_object); + } /* * must own the object lock exclusively at this point */ if (object_lock_type == OBJECT_LOCK_SHARED) { - object_lock_type = OBJECT_LOCK_EXCLUSIVE; + object_lock_type = OBJECT_LOCK_EXCLUSIVE; if (vm_object_lock_upgrade(object) == FALSE) { - /* + /* * couldn't upgrade, so explictly * take the lock exclusively * no need to retry the fault at this * point since "vm_fault_page" will * completely re-evaluate the state */ - vm_object_lock(object); + vm_object_lock(object); } } handle_copy_delay: vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (__improbable(object == compressor_object || - object == kernel_object || - object == vm_submap_object)) { + object == kernel_object || + object == vm_submap_object)) { /* * These objects are explicitly managed and populated by the * kernel. The virtual ranges backed by these objects should @@ -4567,7 +4588,7 @@ handle_copy_delay: assert(object != kernel_object); assert(object != vm_submap_object); - /* + /* * Make a reference to this object to * prevent its disposal while we are messing with * it. Once we have the reference, the map is free @@ -4577,18 +4598,18 @@ handle_copy_delay: vm_object_reference_locked(object); vm_object_paging_begin(object); - XPR(XPR_VM_FAULT,"vm_fault -> vm_fault_page\n",0,0,0,0,0); + XPR(XPR_VM_FAULT, "vm_fault -> vm_fault_page\n", 0, 0, 0, 0, 0); error_code = 0; result_page = VM_PAGE_NULL; kr = vm_fault_page(object, offset, fault_type, - (change_wiring && !wired), - FALSE, /* page not looked up */ - &prot, &result_page, &top_page, - &type_of_fault, - &error_code, map->no_zero_fill, - FALSE, &fault_info); + (change_wiring && !wired), + FALSE, /* page not looked up */ + &prot, &result_page, &top_page, + &type_of_fault, + &error_code, map->no_zero_fill, + FALSE, &fault_info); /* * if kr != VM_FAULT_SUCCESS, then the paging reference @@ -4606,7 +4627,7 @@ handle_copy_delay: */ if (kr != VM_FAULT_SUCCESS && kr != VM_FAULT_SUCCESS_NO_VM_PAGE) { - /* + /* * we didn't succeed, lose the object reference immediately. */ vm_object_deallocate(object); @@ -4617,26 +4638,28 @@ handle_copy_delay: switch (kr) { case VM_FAULT_MEMORY_SHORTAGE: if (vm_page_wait((change_wiring) ? - THREAD_UNINT : - THREAD_ABORTSAFE)) + THREAD_UNINT : + THREAD_ABORTSAFE)) { goto RetryFault; - /* - * fall thru - */ + } + /* + * fall thru + */ case VM_FAULT_INTERRUPTED: kr = KERN_ABORTED; goto done; case VM_FAULT_RETRY: goto RetryFault; case VM_FAULT_MEMORY_ERROR: - if (error_code) + if (error_code) { kr = error_code; - else + } else { kr = KERN_MEMORY_ERROR; + } goto done; default: panic("vm_fault: unexpected error 0x%x from " - "vm_fault_page()\n", kr); + "vm_fault_page()\n", kr); } } m = result_page; @@ -4645,23 +4668,23 @@ handle_copy_delay: if (m != VM_PAGE_NULL) { m_object = VM_PAGE_OBJECT(m); assert((change_wiring && !wired) ? - (top_page == VM_PAGE_NULL) : - ((top_page == VM_PAGE_NULL) == (m_object == object))); + (top_page == VM_PAGE_NULL) : + ((top_page == VM_PAGE_NULL) == (m_object == object))); } /* * What to do with the resulting page from vm_fault_page * if it doesn't get entered into the physical map: */ -#define RELEASE_PAGE(m) \ - MACRO_BEGIN \ - PAGE_WAKEUP_DONE(m); \ - if ( !VM_PAGE_PAGEABLE(m)) { \ - vm_page_lockspin_queues(); \ - if ( !VM_PAGE_PAGEABLE(m)) \ - vm_page_activate(m); \ - vm_page_unlock_queues(); \ - } \ +#define RELEASE_PAGE(m) \ + MACRO_BEGIN \ + PAGE_WAKEUP_DONE(m); \ + if ( !VM_PAGE_PAGEABLE(m)) { \ + vm_page_lockspin_queues(); \ + if ( !VM_PAGE_PAGEABLE(m)) \ + vm_page_activate(m); \ + vm_page_unlock_queues(); \ + } \ MACRO_END @@ -4674,7 +4697,6 @@ handle_copy_delay: * drop the object locks and go in for the map lock again. */ if (!vm_map_try_lock_read(original_map)) { - if (m != VM_PAGE_NULL) { old_copy_object = m_object->copy; vm_object_unlock(m_object); @@ -4689,7 +4711,6 @@ handle_copy_delay: } if ((map != original_map) || !vm_map_verify(map, &version)) { - if (object_locks_dropped == FALSE) { if (m != VM_PAGE_NULL) { old_copy_object = m_object->copy; @@ -4698,16 +4719,16 @@ handle_copy_delay: old_copy_object = VM_OBJECT_NULL; vm_object_unlock(object); } - + object_locks_dropped = TRUE; } /* * no object locks are held at this point */ - vm_object_t retry_object; - vm_object_offset_t retry_offset; - vm_prot_t retry_prot; + vm_object_t retry_object; + vm_object_offset_t retry_offset; + vm_prot_t retry_prot; /* * To avoid trying to write_lock the map while another @@ -4721,12 +4742,12 @@ handle_copy_delay: map = original_map; kr = vm_map_lookup_locked(&map, vaddr, - fault_type & ~VM_PROT_WRITE, - OBJECT_LOCK_EXCLUSIVE, &version, - &retry_object, &retry_offset, &retry_prot, - &wired, - &fault_info, - &real_map); + fault_type & ~VM_PROT_WRITE, + OBJECT_LOCK_EXCLUSIVE, &version, + &retry_object, &retry_offset, &retry_prot, + &wired, + &fault_info, + &real_map); pmap = real_map->pmap; if (kr != KERN_SUCCESS) { @@ -4735,7 +4756,7 @@ handle_copy_delay: if (m != VM_PAGE_NULL) { assert(VM_PAGE_OBJECT(m) == m_object); - /* + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup and do the @@ -4747,14 +4768,14 @@ handle_copy_delay: vm_fault_cleanup(m_object, top_page); } else { - /* + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup */ - vm_object_lock(object); + vm_object_lock(object); - vm_fault_cleanup(object, top_page); + vm_fault_cleanup(object, top_page); } vm_object_deallocate(object); @@ -4763,34 +4784,34 @@ handle_copy_delay: vm_object_unlock(retry_object); if ((retry_object != object) || (retry_offset != offset)) { - vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (m != VM_PAGE_NULL) { assert(VM_PAGE_OBJECT(m) == m_object); - /* + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup and do the * PAGE_WAKEUP_DONE in RELEASE_PAGE */ - vm_object_lock(m_object); + vm_object_lock(m_object); RELEASE_PAGE(m); vm_fault_cleanup(m_object, top_page); } else { - /* + /* * retake the lock so that * we can drop the paging reference * in vm_fault_cleanup */ - vm_object_lock(object); + vm_object_lock(object); - vm_fault_cleanup(object, top_page); + vm_fault_cleanup(object, top_page); } vm_object_deallocate(object); @@ -4820,8 +4841,9 @@ handle_copy_delay: assert(!pmap_has_prot_policy(prot)); prot &= ~VM_PROT_WRITE; } - } else + } else { vm_object_lock(object); + } object_locks_dropped = FALSE; } @@ -4831,10 +4853,10 @@ handle_copy_delay: * adequate permissions, we must start all over. */ if (wired && (fault_type != (prot | VM_PROT_WRITE))) { - vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (m != VM_PAGE_NULL) { assert(VM_PAGE_OBJECT(m) == m_object); @@ -4842,8 +4864,9 @@ handle_copy_delay: RELEASE_PAGE(m); vm_fault_cleanup(m_object, top_page); - } else - vm_fault_cleanup(object, top_page); + } else { + vm_fault_cleanup(object, top_page); + } vm_object_deallocate(object); @@ -4859,52 +4882,54 @@ handle_copy_delay: */ if (caller_pmap) { kr = vm_fault_enter(m, - caller_pmap, - caller_pmap_addr, - prot, - caller_prot, - wired, - change_wiring, - wire_tag, - &fault_info, - NULL, - &type_of_fault); + caller_pmap, + caller_pmap_addr, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + NULL, + &type_of_fault); } else { kr = vm_fault_enter(m, - pmap, - vaddr, - prot, - caller_prot, - wired, - change_wiring, - wire_tag, - &fault_info, - NULL, - &type_of_fault); + pmap, + vaddr, + prot, + caller_prot, + wired, + change_wiring, + wire_tag, + &fault_info, + NULL, + &type_of_fault); } assert(VM_PAGE_OBJECT(m) == m_object); #if DEVELOPMENT || DEBUG - { - int event_code = 0; + { + int event_code = 0; - if (m_object->internal) - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); - else if (m_object->object_is_shared_cache) - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); - else - event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + if (m_object->internal) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL)); + } else if (m_object->object_is_shared_cache) { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE)); + } else { + event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); + } - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0); - DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); - } + DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); + } #endif if (kr != KERN_SUCCESS) { /* abort this page fault */ vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } PAGE_WAKEUP_DONE(m); vm_fault_cleanup(m_object, top_page); vm_object_deallocate(object); @@ -4919,18 +4944,18 @@ handle_copy_delay: } } } else { - - vm_map_entry_t entry; - vm_map_offset_t laddr; - vm_map_offset_t ldelta, hdelta; + vm_map_entry_t entry; + vm_map_offset_t laddr; + vm_map_offset_t ldelta, hdelta; /* * do a pmap block mapping from the physical address * in the object */ - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (original_map != map) { vm_map_unlock_read(map); @@ -4944,24 +4969,25 @@ handle_copy_delay: ldelta = 0xFFFFF000; while (vm_map_lookup_entry(map, laddr, &entry)) { - if (ldelta > (laddr - entry->vme_start)) + if (ldelta > (laddr - entry->vme_start)) { ldelta = laddr - entry->vme_start; - if (hdelta > (entry->vme_end - laddr)) + } + if (hdelta > (entry->vme_end - laddr)) { hdelta = entry->vme_end - laddr; + } if (entry->is_sub_map) { - laddr = ((laddr - entry->vme_start) - + VME_OFFSET(entry)); + + VME_OFFSET(entry)); vm_map_lock_read(VME_SUBMAP(entry)); - if (map != real_map) + if (map != real_map) { vm_map_unlock_read(map); + } if (entry->use_pmap) { vm_map_unlock_read(real_map); real_map = VME_SUBMAP(entry); } map = VME_SUBMAP(entry); - } else { break; } @@ -4976,7 +5002,7 @@ handle_copy_delay: object->phys_contiguous && VME_OFFSET(entry) == 0 && (entry->vme_end - entry->vme_start == object->vo_size) && - VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size-1))) { + VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) { superpage = VM_MEM_SUPERPAGE; } else { superpage = 0; @@ -4985,11 +5011,11 @@ handle_copy_delay: if (superpage && physpage_p) { /* for vm_map_wire_and_extract() */ *physpage_p = (ppnum_t) - ((((vm_map_offset_t) - object->vo_shadow_offset) - + VME_OFFSET(entry) - + (laddr - entry->vme_start)) - >> PAGE_SHIFT); + ((((vm_map_offset_t) + object->vo_shadow_offset) + + VME_OFFSET(entry) + + (laddr - entry->vme_start)) + >> PAGE_SHIFT); } if (caller_pmap) { @@ -4998,11 +5024,11 @@ handle_copy_delay: */ assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); kr = pmap_map_block(caller_pmap, - (addr64_t)(caller_pmap_addr - ldelta), - (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) + - VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), - (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, - (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); + (addr64_t)(caller_pmap_addr - ldelta), + (ppnum_t)((((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) + + VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), + (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, + (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); if (kr != KERN_SUCCESS) { goto cleanup; @@ -5013,11 +5039,11 @@ handle_copy_delay: */ assert((uint32_t)((ldelta + hdelta) >> PAGE_SHIFT) == ((ldelta + hdelta) >> PAGE_SHIFT)); kr = pmap_map_block(real_map->pmap, - (addr64_t)(vaddr - ldelta), - (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) + - VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), - (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, - (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); + (addr64_t)(vaddr - ldelta), + (ppnum_t)((((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) + + VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta) >> PAGE_SHIFT), + (uint32_t)((ldelta + hdelta) >> PAGE_SHIFT), prot, + (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0); if (kr != KERN_SUCCESS) { goto cleanup; @@ -5039,14 +5065,14 @@ cleanup: * Unlock everything, and return */ vm_map_unlock_read(map); - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } if (m != VM_PAGE_NULL) { assert(VM_PAGE_OBJECT(m) == m_object); if (!m_object->internal && (fault_type & VM_PROT_WRITE)) { - vm_object_paging_begin(m_object); assert(written_on_object == VM_OBJECT_NULL); @@ -5057,12 +5083,13 @@ cleanup: PAGE_WAKEUP_DONE(m); vm_fault_cleanup(m_object, top_page); - } else - vm_fault_cleanup(object, top_page); + } else { + vm_fault_cleanup(object, top_page); + } vm_object_deallocate(object); -#undef RELEASE_PAGE +#undef RELEASE_PAGE done: thread_interrupt_level(interruptible_state); @@ -5074,16 +5101,15 @@ done: throttle_lowpri_io(1); } else { if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) { - if ((throttle_delay = vm_page_throttled(TRUE))) { - if (vm_debug_events) { - if (type_of_fault == DBG_COMPRESSOR_FAULT) + if (type_of_fault == DBG_COMPRESSOR_FAULT) { VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); - else if (type_of_fault == DBG_COW_FAULT) + } else if (type_of_fault == DBG_COW_FAULT) { VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); - else + } else { VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0); + } } delay(throttle_delay); } @@ -5091,7 +5117,6 @@ done: } if (written_on_object) { - vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64); vm_object_lock(written_on_object); @@ -5106,14 +5131,14 @@ done: } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, - (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, - ((uint64_t)trace_vaddr >> 32), - trace_vaddr, - kr, - type_of_fault, - 0); - - return (kr); + (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, + kr, + type_of_fault, + 0); + + return kr; } /* @@ -5123,17 +5148,17 @@ done: */ kern_return_t vm_fault_wire( - vm_map_t map, - vm_map_entry_t entry, + vm_map_t map, + vm_map_entry_t entry, vm_prot_t prot, vm_tag_t wire_tag, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p) + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { - vm_map_offset_t va; - vm_map_offset_t end_addr = entry->vme_end; - kern_return_t rc; + vm_map_offset_t va; + vm_map_offset_t end_addr = entry->vme_end; + kern_return_t rc; assert(entry->in_transition); @@ -5150,7 +5175,7 @@ vm_fault_wire( */ pmap_pageable(pmap, pmap_addr, - pmap_addr + (end_addr - entry->vme_start), FALSE); + pmap_addr + (end_addr - entry->vme_start), FALSE); /* * We simulate a fault to get the page and enter it @@ -5159,27 +5184,27 @@ vm_fault_wire( for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap, - pmap_addr + (va - entry->vme_start), - physpage_p); + pmap_addr + (va - entry->vme_start), + physpage_p); if (rc != KERN_SUCCESS) { rc = vm_fault_internal(map, va, prot, TRUE, wire_tag, - ((pmap == kernel_pmap) - ? THREAD_UNINT - : THREAD_ABORTSAFE), - pmap, - (pmap_addr + - (va - entry->vme_start)), - physpage_p); + ((pmap == kernel_pmap) + ? THREAD_UNINT + : THREAD_ABORTSAFE), + pmap, + (pmap_addr + + (va - entry->vme_start)), + physpage_p); DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL); } if (rc != KERN_SUCCESS) { - struct vm_map_entry tmp_entry = *entry; + struct vm_map_entry tmp_entry = *entry; /* unwire wired pages */ tmp_entry.vme_end = va; vm_fault_unwire(map, - &tmp_entry, FALSE, pmap, pmap_addr); + &tmp_entry, FALSE, pmap, pmap_addr); return rc; } @@ -5194,15 +5219,15 @@ vm_fault_wire( */ void vm_fault_unwire( - vm_map_t map, - vm_map_entry_t entry, - boolean_t deallocate, - pmap_t pmap, - vm_map_offset_t pmap_addr) + vm_map_t map, + vm_map_entry_t entry, + boolean_t deallocate, + pmap_t pmap, + vm_map_offset_t pmap_addr) { - vm_map_offset_t va; - vm_map_offset_t end_addr = entry->vme_end; - vm_object_t object; + vm_map_offset_t va; + vm_map_offset_t end_addr = entry->vme_end; + vm_object_t object; struct vm_object_fault_info fault_info = {}; unsigned int unwired_pages; @@ -5214,8 +5239,9 @@ vm_fault_unwire( * anything to undo here. */ - if (object != VM_OBJECT_NULL && object->phys_contiguous) + if (object != VM_OBJECT_NULL && object->phys_contiguous) { return; + } fault_info.interruptible = THREAD_UNINT; fault_info.behavior = entry->behavior; @@ -5237,19 +5263,18 @@ vm_fault_unwire( */ for (va = entry->vme_start; va < end_addr; va += PAGE_SIZE) { - if (object == VM_OBJECT_NULL) { if (pmap) { pmap_change_wiring(pmap, - pmap_addr + (va - entry->vme_start), FALSE); + pmap_addr + (va - entry->vme_start), FALSE); } (void) vm_fault(map, va, VM_PROT_NONE, - TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr); + TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr); } else { - vm_prot_t prot; - vm_page_t result_page; - vm_page_t top_page; - vm_object_t result_object; + vm_prot_t prot; + vm_page_t result_page; + vm_page_t top_page; + vm_object_t result_object; vm_fault_return_t result; /* cap cluster size at maximum UPL size */ @@ -5265,13 +5290,13 @@ vm_fault_unwire( vm_object_lock(object); vm_object_paging_begin(object); XPR(XPR_VM_FAULT, - "vm_fault_unwire -> vm_fault_page\n", - 0,0,0,0,0); + "vm_fault_unwire -> vm_fault_page\n", + 0, 0, 0, 0, 0); result_page = VM_PAGE_NULL; - result = vm_fault_page( + result = vm_fault_page( object, (VME_OFFSET(entry) + - (va - entry->vme_start)), + (va - entry->vme_start)), VM_PROT_NONE, TRUE, FALSE, /* page not looked up */ &prot, &result_page, &top_page, @@ -5290,8 +5315,9 @@ vm_fault_unwire( * eject, so we don't want to panic in that situation. */ - if (result == VM_FAULT_MEMORY_ERROR && !object->alive) + if (result == VM_FAULT_MEMORY_ERROR && !object->alive) { continue; + } if (result == VM_FAULT_MEMORY_ERROR && object == kernel_object) { @@ -5305,23 +5331,25 @@ vm_fault_unwire( continue; } - if (result != VM_FAULT_SUCCESS) + if (result != VM_FAULT_SUCCESS) { panic("vm_fault_unwire: failure"); + } result_object = VM_PAGE_OBJECT(result_page); if (deallocate) { assert(VM_PAGE_GET_PHYS_PAGE(result_page) != - vm_page_fictitious_addr); + vm_page_fictitious_addr); pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page)); if (VM_PAGE_WIRED(result_page)) { unwired_pages++; } VM_PAGE_FREE(result_page); } else { - if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) + if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) { pmap_change_wiring(pmap, pmap_addr + (va - entry->vme_start), FALSE); + } if (VM_PAGE_WIRED(result_page)) { @@ -5330,7 +5358,7 @@ vm_fault_unwire( vm_page_unlock_queues(); unwired_pages++; } - if(entry->zero_wired_pages) { + if (entry->zero_wired_pages) { pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page)); entry->zero_wired_pages = FALSE; } @@ -5348,10 +5376,10 @@ vm_fault_unwire( */ pmap_pageable(pmap, pmap_addr, - pmap_addr + (end_addr - entry->vme_start), TRUE); + pmap_addr + (end_addr - entry->vme_start), TRUE); if (kernel_object == object) { - vm_tag_update_size(fault_info.user_tag, -ptoa_64(unwired_pages)); + vm_tag_update_size(fault_info.user_tag, -ptoa_64(unwired_pages)); } } @@ -5377,60 +5405,61 @@ vm_fault_unwire( */ static kern_return_t vm_fault_wire_fast( - __unused vm_map_t map, - vm_map_offset_t va, + __unused vm_map_t map, + vm_map_offset_t va, __unused vm_prot_t caller_prot, - vm_tag_t wire_tag, - vm_map_entry_t entry, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p) + vm_tag_t wire_tag, + vm_map_entry_t entry, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { - vm_object_t object; - vm_object_offset_t offset; - vm_page_t m; - vm_prot_t prot; - thread_t thread = current_thread(); - int type_of_fault; - kern_return_t kr; + vm_object_t object; + vm_object_offset_t offset; + vm_page_t m; + vm_prot_t prot; + thread_t thread = current_thread(); + int type_of_fault; + kern_return_t kr; struct vm_object_fault_info fault_info = {}; VM_STAT_INCR(faults); - if (thread != THREAD_NULL && thread->task != TASK_NULL) - thread->task->faults++; + if (thread != THREAD_NULL && thread->task != TASK_NULL) { + thread->task->faults++; + } /* * Recovery actions */ -#undef RELEASE_PAGE -#define RELEASE_PAGE(m) { \ - PAGE_WAKEUP_DONE(m); \ - vm_page_lockspin_queues(); \ - vm_page_unwire(m, TRUE); \ - vm_page_unlock_queues(); \ +#undef RELEASE_PAGE +#define RELEASE_PAGE(m) { \ + PAGE_WAKEUP_DONE(m); \ + vm_page_lockspin_queues(); \ + vm_page_unwire(m, TRUE); \ + vm_page_unlock_queues(); \ } -#undef UNLOCK_THINGS -#define UNLOCK_THINGS { \ - vm_object_paging_end(object); \ - vm_object_unlock(object); \ +#undef UNLOCK_THINGS +#define UNLOCK_THINGS { \ + vm_object_paging_end(object); \ + vm_object_unlock(object); \ } -#undef UNLOCK_AND_DEALLOCATE -#define UNLOCK_AND_DEALLOCATE { \ - UNLOCK_THINGS; \ - vm_object_deallocate(object); \ +#undef UNLOCK_AND_DEALLOCATE +#define UNLOCK_AND_DEALLOCATE { \ + UNLOCK_THINGS; \ + vm_object_deallocate(object); \ } /* * Give up and have caller do things the hard way. */ -#define GIVE_UP { \ - UNLOCK_AND_DEALLOCATE; \ - return(KERN_FAILURE); \ +#define GIVE_UP { \ + UNLOCK_AND_DEALLOCATE; \ + return(KERN_FAILURE); \ } @@ -5439,7 +5468,7 @@ vm_fault_wire_fast( */ if (entry->is_sub_map) { assert(physpage_p == NULL); - return(KERN_FAILURE); + return KERN_FAILURE; } /* @@ -5450,7 +5479,7 @@ vm_fault_wire_fast( offset = (va - entry->vme_start) + VME_OFFSET(entry); prot = entry->protection; - /* + /* * Make a reference to this object to prevent its * disposal while we are messing with it. */ @@ -5479,8 +5508,7 @@ vm_fault_wire_fast( */ m = vm_page_lookup(object, offset); if ((m == VM_PAGE_NULL) || (m->vmp_busy) || - (m->vmp_unusual && ( m->vmp_error || m->vmp_restart || m->vmp_absent))) { - + (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) { GIVE_UP; } if (m->vmp_fictitious && @@ -5529,16 +5557,16 @@ vm_fault_wire_fast( */ type_of_fault = DBG_CACHE_HIT_FAULT; kr = vm_fault_enter(m, - pmap, - pmap_addr, - prot, - prot, - TRUE, /* wired */ - FALSE, /* change_wiring */ - wire_tag, - &fault_info, - NULL, - &type_of_fault); + pmap, + pmap_addr, + prot, + prot, + TRUE, /* wired */ + FALSE, /* change_wiring */ + wire_tag, + &fault_info, + NULL, + &type_of_fault); if (kr != KERN_SUCCESS) { RELEASE_PAGE(m); GIVE_UP; @@ -5567,7 +5595,6 @@ done: UNLOCK_AND_DEALLOCATE; return kr; - } /* @@ -5578,16 +5605,16 @@ done: static void vm_fault_copy_cleanup( - vm_page_t page, - vm_page_t top_page) + vm_page_t page, + vm_page_t top_page) { - vm_object_t object = VM_PAGE_OBJECT(page); + vm_object_t object = VM_PAGE_OBJECT(page); vm_object_lock(object); PAGE_WAKEUP_DONE(page); - if ( !VM_PAGE_PAGEABLE(page)) { + if (!VM_PAGE_PAGEABLE(page)) { vm_page_lockspin_queues(); - if ( !VM_PAGE_PAGEABLE(page)) { + if (!VM_PAGE_PAGEABLE(page)) { vm_page_activate(page); } vm_page_unlock_queues(); @@ -5597,9 +5624,9 @@ vm_fault_copy_cleanup( static void vm_fault_copy_dst_cleanup( - vm_page_t page) + vm_page_t page) { - vm_object_t object; + vm_object_t object; if (page != VM_PAGE_NULL) { object = VM_PAGE_OBJECT(page); @@ -5641,32 +5668,32 @@ vm_fault_copy_dst_cleanup( */ kern_return_t vm_fault_copy( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_map_size_t *copy_size, /* INOUT */ - vm_object_t dst_object, - vm_object_offset_t dst_offset, - vm_map_t dst_map, - vm_map_version_t *dst_version, - int interruptible) + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_map_size_t *copy_size, /* INOUT */ + vm_object_t dst_object, + vm_object_offset_t dst_offset, + vm_map_t dst_map, + vm_map_version_t *dst_version, + int interruptible) { - vm_page_t result_page; + vm_page_t result_page; - vm_page_t src_page; - vm_page_t src_top_page; - vm_prot_t src_prot; + vm_page_t src_page; + vm_page_t src_top_page; + vm_prot_t src_prot; - vm_page_t dst_page; - vm_page_t dst_top_page; - vm_prot_t dst_prot; + vm_page_t dst_page; + vm_page_t dst_top_page; + vm_prot_t dst_prot; - vm_map_size_t amount_left; - vm_object_t old_copy_object; - vm_object_t result_page_object = NULL; - kern_return_t error = 0; - vm_fault_return_t result; + vm_map_size_t amount_left; + vm_object_t old_copy_object; + vm_object_t result_page_object = NULL; + kern_return_t error = 0; + vm_fault_return_t result; - vm_map_size_t part_size; + vm_map_size_t part_size; struct vm_object_fault_info fault_info_src = {}; struct vm_object_fault_info fault_info_dst = {}; @@ -5675,10 +5702,10 @@ vm_fault_copy( * the different offsets on a page boundary. */ -#define RETURN(x) \ - MACRO_BEGIN \ - *copy_size -= amount_left; \ - MACRO_RETURN(x); \ +#define RETURN(x) \ + MACRO_BEGIN \ + *copy_size -= amount_left; \ + MACRO_RETURN(x); \ MACRO_END amount_left = *copy_size; @@ -5703,9 +5730,9 @@ vm_fault_copy( * COW semantics if any. */ - RetryDestinationFault: ; +RetryDestinationFault:; - dst_prot = VM_PROT_WRITE|VM_PROT_READ; + dst_prot = VM_PROT_WRITE | VM_PROT_READ; vm_object_lock(dst_object); vm_object_paging_begin(dst_object); @@ -5717,44 +5744,46 @@ vm_fault_copy( } fault_info_dst.cluster_size = cluster_size; - XPR(XPR_VM_FAULT,"vm_fault_copy -> vm_fault_page\n",0,0,0,0,0); + XPR(XPR_VM_FAULT, "vm_fault_copy -> vm_fault_page\n", 0, 0, 0, 0, 0); dst_page = VM_PAGE_NULL; result = vm_fault_page(dst_object, - vm_object_trunc_page(dst_offset), - VM_PROT_WRITE|VM_PROT_READ, - FALSE, - FALSE, /* page not looked up */ - &dst_prot, &dst_page, &dst_top_page, - (int *)0, - &error, - dst_map->no_zero_fill, - FALSE, &fault_info_dst); + vm_object_trunc_page(dst_offset), + VM_PROT_WRITE | VM_PROT_READ, + FALSE, + FALSE, /* page not looked up */ + &dst_prot, &dst_page, &dst_top_page, + (int *)0, + &error, + dst_map->no_zero_fill, + FALSE, &fault_info_dst); switch (result) { case VM_FAULT_SUCCESS: break; case VM_FAULT_RETRY: goto RetryDestinationFault; case VM_FAULT_MEMORY_SHORTAGE: - if (vm_page_wait(interruptible)) + if (vm_page_wait(interruptible)) { goto RetryDestinationFault; - /* fall thru */ + } + /* fall thru */ case VM_FAULT_INTERRUPTED: RETURN(MACH_SEND_INTERRUPTED); case VM_FAULT_SUCCESS_NO_VM_PAGE: /* success but no VM page: fail the copy */ vm_object_paging_end(dst_object); vm_object_unlock(dst_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: - if (error) - return (error); - else - return(KERN_MEMORY_ERROR); + if (error) { + return error; + } else { + return KERN_MEMORY_ERROR; + } default: panic("vm_fault_copy: unexpected error 0x%x from " - "vm_fault_page()\n", result); + "vm_fault_page()\n", result); } - assert ((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); + assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE); assert(dst_object == VM_PAGE_OBJECT(dst_page)); old_copy_object = dst_object->copy; @@ -5784,7 +5813,7 @@ vm_fault_copy( vm_object_unlock(dst_object); } - RetrySourceFault: ; +RetrySourceFault:; if (src_object == VM_OBJECT_NULL) { /* @@ -5796,7 +5825,7 @@ vm_fault_copy( } else { vm_object_lock(src_object); src_page = vm_page_lookup(src_object, - vm_object_trunc_page(src_offset)); + vm_object_trunc_page(src_offset)); if (src_page == dst_page) { src_prot = dst_prot; result_page = VM_PAGE_NULL; @@ -5811,8 +5840,8 @@ vm_fault_copy( fault_info_src.cluster_size = cluster_size; XPR(XPR_VM_FAULT, - "vm_fault_copy(2) -> vm_fault_page\n", - 0,0,0,0,0); + "vm_fault_copy(2) -> vm_fault_page\n", + 0, 0, 0, 0, 0); result_page = VM_PAGE_NULL; result = vm_fault_page( src_object, @@ -5830,9 +5859,10 @@ vm_fault_copy( case VM_FAULT_RETRY: goto RetrySourceFault; case VM_FAULT_MEMORY_SHORTAGE: - if (vm_page_wait(interruptible)) + if (vm_page_wait(interruptible)) { goto RetrySourceFault; - /* fall thru */ + } + /* fall thru */ case VM_FAULT_INTERRUPTED: vm_fault_copy_dst_cleanup(dst_page); RETURN(MACH_SEND_INTERRUPTED); @@ -5840,24 +5870,25 @@ vm_fault_copy( /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: vm_fault_copy_dst_cleanup(dst_page); - if (error) - return (error); - else - return(KERN_MEMORY_ERROR); + if (error) { + return error; + } else { + return KERN_MEMORY_ERROR; + } default: panic("vm_fault_copy(2): unexpected " - "error 0x%x from " - "vm_fault_page()\n", result); + "error 0x%x from " + "vm_fault_page()\n", result); } result_page_object = VM_PAGE_OBJECT(result_page); assert((src_top_page == VM_PAGE_NULL) == - (result_page_object == src_object)); + (result_page_object == src_object)); } - assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE); + assert((src_prot & VM_PROT_READ) != VM_PROT_NONE); vm_object_unlock(result_page_object); } @@ -5865,8 +5896,9 @@ vm_fault_copy( if (!vm_map_verify(dst_map, dst_version)) { vm_map_unlock_read(dst_map); - if (result_page != VM_PAGE_NULL && src_page != dst_page) + if (result_page != VM_PAGE_NULL && src_page != dst_page) { vm_fault_copy_cleanup(result_page, src_top_page); + } vm_fault_copy_dst_cleanup(dst_page); break; } @@ -5877,8 +5909,9 @@ vm_fault_copy( if (dst_object->copy != old_copy_object) { vm_object_unlock(dst_object); vm_map_unlock_read(dst_map); - if (result_page != VM_PAGE_NULL && src_page != dst_page) + if (result_page != VM_PAGE_NULL && src_page != dst_page) { vm_fault_copy_cleanup(result_page, src_top_page); + } vm_fault_copy_dst_cleanup(dst_page); break; } @@ -5890,11 +5923,10 @@ vm_fault_copy( */ if (!page_aligned(src_offset) || - !page_aligned(dst_offset) || - !page_aligned(amount_left)) { - - vm_object_offset_t src_po, - dst_po; + !page_aligned(dst_offset) || + !page_aligned(amount_left)) { + vm_object_offset_t src_po, + dst_po; src_po = src_offset - vm_object_trunc_page(src_offset); dst_po = dst_offset - vm_object_trunc_page(dst_offset); @@ -5904,7 +5936,7 @@ vm_fault_copy( } else { part_size = PAGE_SIZE - src_po; } - if (part_size > (amount_left)){ + if (part_size > (amount_left)) { part_size = amount_left; } @@ -5912,41 +5944,39 @@ vm_fault_copy( assert((vm_offset_t) dst_po == dst_po); assert((vm_size_t) part_size == part_size); vm_page_part_zero_fill(dst_page, - (vm_offset_t) dst_po, - (vm_size_t) part_size); + (vm_offset_t) dst_po, + (vm_size_t) part_size); } else { assert((vm_offset_t) src_po == src_po); assert((vm_offset_t) dst_po == dst_po); assert((vm_size_t) part_size == part_size); vm_page_part_copy(result_page, - (vm_offset_t) src_po, - dst_page, - (vm_offset_t) dst_po, - (vm_size_t)part_size); - if(!dst_page->vmp_dirty){ + (vm_offset_t) src_po, + dst_page, + (vm_offset_t) dst_po, + (vm_size_t)part_size); + if (!dst_page->vmp_dirty) { vm_object_lock(dst_object); SET_PAGE_DIRTY(dst_page, TRUE); vm_object_unlock(dst_object); } - } } else { part_size = PAGE_SIZE; - if (result_page == VM_PAGE_NULL) + if (result_page == VM_PAGE_NULL) { vm_page_zero_fill(dst_page); - else{ + } else { vm_object_lock(result_page_object); vm_page_copy(result_page, dst_page); vm_object_unlock(result_page_object); - if(!dst_page->vmp_dirty){ + if (!dst_page->vmp_dirty) { vm_object_lock(dst_object); SET_PAGE_DIRTY(dst_page, TRUE); vm_object_unlock(dst_object); } } - } /* @@ -5955,8 +5985,9 @@ vm_fault_copy( vm_map_unlock_read(dst_map); - if (result_page != VM_PAGE_NULL && src_page != dst_page) + if (result_page != VM_PAGE_NULL && src_page != dst_page) { vm_fault_copy_cleanup(result_page, src_top_page); + } vm_fault_copy_dst_cleanup(dst_page); amount_left -= part_size; @@ -5965,12 +5996,12 @@ vm_fault_copy( } while (amount_left > 0); RETURN(KERN_SUCCESS); -#undef RETURN +#undef RETURN /*NOTREACHED*/ } -#if VM_FAULT_CLASSIFY +#if VM_FAULT_CLASSIFY /* * Temporary statistics gathering support. */ @@ -5978,30 +6009,30 @@ vm_fault_copy( /* * Statistics arrays: */ -#define VM_FAULT_TYPES_MAX 5 -#define VM_FAULT_LEVEL_MAX 8 +#define VM_FAULT_TYPES_MAX 5 +#define VM_FAULT_LEVEL_MAX 8 -int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX]; +int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX]; -#define VM_FAULT_TYPE_ZERO_FILL 0 -#define VM_FAULT_TYPE_MAP_IN 1 -#define VM_FAULT_TYPE_PAGER 2 -#define VM_FAULT_TYPE_COPY 3 -#define VM_FAULT_TYPE_OTHER 4 +#define VM_FAULT_TYPE_ZERO_FILL 0 +#define VM_FAULT_TYPE_MAP_IN 1 +#define VM_FAULT_TYPE_PAGER 2 +#define VM_FAULT_TYPE_COPY 3 +#define VM_FAULT_TYPE_OTHER 4 void -vm_fault_classify(vm_object_t object, - vm_object_offset_t offset, - vm_prot_t fault_type) +vm_fault_classify(vm_object_t object, + vm_object_offset_t offset, + vm_prot_t fault_type) { - int type, level = 0; - vm_page_t m; + int type, level = 0; + vm_page_t m; while (TRUE) { m = vm_page_lookup(object, offset); if (m != VM_PAGE_NULL) { - if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) { + if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) { type = VM_FAULT_TYPE_OTHER; break; } @@ -6012,8 +6043,7 @@ vm_fault_classify(vm_object_t object, } type = VM_FAULT_TYPE_COPY; break; - } - else { + } else { if (object->pager_created) { type = VM_FAULT_TYPE_PAGER; break; @@ -6021,7 +6051,7 @@ vm_fault_classify(vm_object_t object, if (object->shadow == VM_OBJECT_NULL) { type = VM_FAULT_TYPE_ZERO_FILL; break; - } + } offset += object->vo_shadow_offset; object = object->shadow; @@ -6030,8 +6060,9 @@ vm_fault_classify(vm_object_t object, } } - if (level > VM_FAULT_LEVEL_MAX) + if (level > VM_FAULT_LEVEL_MAX) { level = VM_FAULT_LEVEL_MAX; + } vm_fault_stats[type][level] += 1; @@ -6053,19 +6084,19 @@ vm_fault_classify_init(void) return; } -#endif /* VM_FAULT_CLASSIFY */ +#endif /* VM_FAULT_CLASSIFY */ vm_offset_t kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) { - vm_map_entry_t entry; - vm_object_t object; - vm_offset_t object_offset; - vm_page_t m; - int compressor_external_state, compressed_count_delta; - int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); - int my_fault_type = VM_PROT_READ; - kern_return_t kr; + vm_map_entry_t entry; + vm_object_t object; + vm_offset_t object_offset; + vm_page_t m; + int compressor_external_state, compressed_count_delta; + int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP); + int my_fault_type = VM_PROT_READ; + kern_return_t kr; if (not_in_kdp) { panic("kdp_lightweight_fault called from outside of debugger context"); @@ -6103,20 +6134,19 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) } if (object->pager_created && (object->paging_in_progress || - object->activity_in_progress)) { + object->activity_in_progress)) { return 0; } m = kdp_vm_page_lookup(object, object_offset); if (m != VM_PAGE_NULL) { - if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) { return 0; } if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || m->vmp_error || m->vmp_cleaning || - m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) { + m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) { return 0; } @@ -6143,8 +6173,8 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) { if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) { kr = vm_compressor_pager_get(object->pager, (object_offset + object->paging_offset), - kdp_compressor_decompressed_page_ppnum, &my_fault_type, - compressor_flags, &compressed_count_delta); + kdp_compressor_decompressed_page_ppnum, &my_fault_type, + compressor_flags, &compressed_count_delta); if (kr == KERN_SUCCESS) { return kdp_compressor_decompressed_page_paddr; } else { @@ -6160,7 +6190,6 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) object_offset += object->vo_shadow_offset; object = object->shadow; } - } /* @@ -6175,9 +6204,9 @@ kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr) */ static boolean_t vm_page_validate_cs_fast( - vm_page_t page) + vm_page_t page) { - vm_object_t object; + vm_object_t object; object = VM_PAGE_OBJECT(page); vm_object_lock_assert_held(object); @@ -6195,10 +6224,10 @@ vm_page_validate_cs_fast( page->vmp_cs_tainted = TRUE; if (cs_debug) { printf("CODESIGNING: %s: " - "page %p obj %p off 0x%llx " - "was modified\n", - __FUNCTION__, - page, object, page->vmp_offset); + "page %p obj %p off 0x%llx " + "was modified\n", + __FUNCTION__, + page, object, page->vmp_offset); } vm_cs_validated_dirtied++; } @@ -6238,15 +6267,15 @@ vm_page_validate_cs_fast( void vm_page_validate_cs_mapped_slow( - vm_page_t page, - const void *kaddr) + vm_page_t page, + const void *kaddr) { - vm_object_t object; - memory_object_offset_t mo_offset; - memory_object_t pager; - struct vnode *vnode; - boolean_t validated; - unsigned tainted; + vm_object_t object; + memory_object_offset_t mo_offset; + memory_object_t pager; + struct vnode *vnode; + boolean_t validated; + unsigned tainted; assert(page->vmp_busy); object = VM_PAGE_OBJECT(page); @@ -6272,11 +6301,11 @@ vm_page_validate_cs_mapped_slow( /* verify the SHA1 hash for this page */ tainted = 0; validated = cs_validate_range(vnode, - pager, - mo_offset, - (const void *)((const char *)kaddr), - PAGE_SIZE_64, - &tainted); + pager, + mo_offset, + (const void *)((const char *)kaddr), + PAGE_SIZE_64, + &tainted); if (tainted & CS_VALIDATE_TAINTED) { page->vmp_cs_tainted = TRUE; @@ -6291,16 +6320,16 @@ vm_page_validate_cs_mapped_slow( #if CHECK_CS_VALIDATION_BITMAP if (page->vmp_cs_validated && !page->vmp_cs_tainted) { vnode_pager_cs_check_validation_bitmap(object->pager, - mo_offset, - CS_BITMAP_SET); + mo_offset, + CS_BITMAP_SET); } #endif /* CHECK_CS_VALIDATION_BITMAP */ } void vm_page_validate_cs_mapped( - vm_page_t page, - const void *kaddr) + vm_page_t page, + const void *kaddr) { if (!vm_page_validate_cs_fast(page)) { vm_page_validate_cs_mapped_slow(page, kaddr); @@ -6309,16 +6338,16 @@ vm_page_validate_cs_mapped( void vm_page_validate_cs( - vm_page_t page) + vm_page_t page) { - vm_object_t object; - vm_object_offset_t offset; - vm_map_offset_t koffset; - vm_map_size_t ksize; - vm_offset_t kaddr; - kern_return_t kr; - boolean_t busy_page; - boolean_t need_unmap; + vm_object_t object; + vm_object_offset_t offset; + vm_map_offset_t koffset; + vm_map_size_t ksize; + vm_offset_t kaddr; + kern_return_t kr; + boolean_t busy_page; + boolean_t need_unmap; object = VM_PAGE_OBJECT(page); vm_object_lock_assert_held(object); @@ -6349,13 +6378,13 @@ vm_page_validate_cs( koffset = 0; need_unmap = FALSE; kr = vm_paging_map_object(page, - object, - offset, - VM_PROT_READ, - FALSE, /* can't unlock object ! */ - &ksize, - &koffset, - &need_unmap); + object, + offset, + VM_PROT_READ, + FALSE, /* can't unlock object ! */ + &ksize, + &koffset, + &need_unmap); if (kr != KERN_SUCCESS) { panic("%s: could not map page: 0x%x\n", __FUNCTION__, kr); } @@ -6383,19 +6412,19 @@ vm_page_validate_cs( void vm_page_validate_cs_mapped_chunk( - vm_page_t page, - const void *kaddr, - vm_offset_t chunk_offset, - vm_size_t chunk_size, - boolean_t *validated_p, - unsigned *tainted_p) + vm_page_t page, + const void *kaddr, + vm_offset_t chunk_offset, + vm_size_t chunk_size, + boolean_t *validated_p, + unsigned *tainted_p) { - vm_object_t object; - vm_object_offset_t offset, offset_in_page; - memory_object_t pager; - struct vnode *vnode; - boolean_t validated; - unsigned tainted; + vm_object_t object; + vm_object_offset_t offset, offset_in_page; + memory_object_t pager; + struct vnode *vnode; + boolean_t validated; + unsigned tainted; *validated_p = FALSE; *tainted_p = 0; @@ -6433,14 +6462,14 @@ vm_page_validate_cs_mapped_chunk( tainted = 0; validated = cs_validate_range(vnode, - pager, - (object->paging_offset + - offset + - offset_in_page), - (const void *)((const char *)kaddr - + offset_in_page), - chunk_size, - &tainted); + pager, + (object->paging_offset + + offset + + offset_in_page), + (const void *)((const char *)kaddr + + offset_in_page), + chunk_size, + &tainted); if (validated) { *validated_p = TRUE; } @@ -6449,21 +6478,29 @@ vm_page_validate_cs_mapped_chunk( } } -static void vm_rtfrecord_lock(void) { +static void +vm_rtfrecord_lock(void) +{ lck_spin_lock(&vm_rtfr_slock); } -static void vm_rtfrecord_unlock(void) { +static void +vm_rtfrecord_unlock(void) +{ lck_spin_unlock(&vm_rtfr_slock); } -unsigned int vmrtfaultinfo_bufsz(void) { - return (vmrtf_num_records * sizeof(vm_rtfault_record_t)); +unsigned int +vmrtfaultinfo_bufsz(void) +{ + return vmrtf_num_records * sizeof(vm_rtfault_record_t); } #include -static void vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault) { +static void +vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault) +{ uint64_t fend = mach_continuous_time(); uint64_t cfpc = 0; @@ -6507,7 +6544,9 @@ static void vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t vm_rtfrecord_unlock(); } -int vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void *vrecords, int *vmrtfrv) { +int +vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void *vrecords, int *vmrtfrv) +{ vm_rtfault_record_t *cvmrd = vrecords; size_t residue = vrecordsz; int numextracted = 0; @@ -6516,14 +6555,13 @@ int vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void vm_rtfrecord_lock(); for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) { - if (residue < sizeof(vm_rtfault_record_t)) { early_exit = TRUE; break; } if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) { -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG if (isroot == FALSE) { continue; } @@ -6541,5 +6579,5 @@ int vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, int vrecordsz, void vm_rtfrecord_unlock(); *vmrtfrv = numextracted; - return (early_exit); + return early_exit; } diff --git a/osfmk/vm/vm_fault.h b/osfmk/vm/vm_fault.h index 1dc0839e7..973851680 100644 --- a/osfmk/vm/vm_fault.h +++ b/osfmk/vm/vm_fault.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -61,7 +61,7 @@ * Page fault handling module declarations. */ -#ifndef _VM_VM_FAULT_H_ +#ifndef _VM_VM_FAULT_H_ #define _VM_VM_FAULT_H_ #include @@ -71,36 +71,36 @@ #include #include -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE -typedef kern_return_t vm_fault_return_t; +typedef kern_return_t vm_fault_return_t; -#define VM_FAULT_SUCCESS 0 -#define VM_FAULT_RETRY 1 -#define VM_FAULT_INTERRUPTED 2 -#define VM_FAULT_MEMORY_SHORTAGE 3 -#define VM_FAULT_MEMORY_ERROR 5 -#define VM_FAULT_SUCCESS_NO_VM_PAGE 6 /* success but no VM page */ +#define VM_FAULT_SUCCESS 0 +#define VM_FAULT_RETRY 1 +#define VM_FAULT_INTERRUPTED 2 +#define VM_FAULT_MEMORY_SHORTAGE 3 +#define VM_FAULT_MEMORY_ERROR 5 +#define VM_FAULT_SUCCESS_NO_VM_PAGE 6 /* success but no VM page */ /* * Page fault handling based on vm_map (or entries therein) */ extern kern_return_t vm_fault( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, #if XNU_KERNEL_PRIVATE - vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ + vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ #endif - int interruptible, - pmap_t pmap, - vm_map_offset_t pmap_addr); + int interruptible, + pmap_t pmap, + vm_map_offset_t pmap_addr); extern void vm_pre_fault(vm_map_offset_t); -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -110,68 +110,68 @@ extern void vm_fault_init(void); /* exported kext version */ extern kern_return_t vm_fault_external( - vm_map_t map, - vm_map_offset_t vaddr, - vm_prot_t fault_type, - boolean_t change_wiring, - int interruptible, - pmap_t caller_pmap, - vm_map_offset_t caller_pmap_addr); + vm_map_t map, + vm_map_offset_t vaddr, + vm_prot_t fault_type, + boolean_t change_wiring, + int interruptible, + pmap_t caller_pmap, + vm_map_offset_t caller_pmap_addr); /* * Page fault handling based on vm_object only. */ extern vm_fault_return_t vm_fault_page( - /* Arguments: */ - vm_object_t first_object, /* Object to begin search */ - vm_object_offset_t first_offset,/* Offset into object */ - vm_prot_t fault_type, /* What access is requested */ - boolean_t must_be_resident,/* Must page be resident? */ - boolean_t caller_lookup, /* caller looked up page */ - /* Modifies in place: */ - vm_prot_t *protection, /* Protection for mapping */ - vm_page_t *result_page, /* Page found, if successful */ - /* Returns: */ - vm_page_t *top_page, /* Page in top object, if - * not result_page. */ - int *type_of_fault, /* if non-zero, return COW, zero-filled, etc... - * used by kernel trace point in vm_fault */ - /* More arguments: */ - kern_return_t *error_code, /* code if page is in error */ - boolean_t no_zero_fill, /* don't fill absent pages */ - boolean_t data_supply, /* treat as data_supply */ - vm_object_fault_info_t fault_info); + /* Arguments: */ + vm_object_t first_object, /* Object to begin search */ + vm_object_offset_t first_offset, /* Offset into object */ + vm_prot_t fault_type, /* What access is requested */ + boolean_t must_be_resident, /* Must page be resident? */ + boolean_t caller_lookup, /* caller looked up page */ + /* Modifies in place: */ + vm_prot_t *protection, /* Protection for mapping */ + vm_page_t *result_page, /* Page found, if successful */ + /* Returns: */ + vm_page_t *top_page, /* Page in top object, if + * not result_page. */ + int *type_of_fault, /* if non-zero, return COW, zero-filled, etc... + * used by kernel trace point in vm_fault */ + /* More arguments: */ + kern_return_t *error_code, /* code if page is in error */ + boolean_t no_zero_fill, /* don't fill absent pages */ + boolean_t data_supply, /* treat as data_supply */ + vm_object_fault_info_t fault_info); extern void vm_fault_cleanup( - vm_object_t object, - vm_page_t top_page); + vm_object_t object, + vm_page_t top_page); extern kern_return_t vm_fault_wire( - vm_map_t map, - vm_map_entry_t entry, - vm_prot_t prot, - vm_tag_t wire_tag, - pmap_t pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p); + vm_map_t map, + vm_map_entry_t entry, + vm_prot_t prot, + vm_tag_t wire_tag, + pmap_t pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); extern void vm_fault_unwire( - vm_map_t map, - vm_map_entry_t entry, - boolean_t deallocate, - pmap_t pmap, - vm_map_offset_t pmap_addr); - -extern kern_return_t vm_fault_copy( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_map_size_t *copy_size, /* INOUT */ - vm_object_t dst_object, - vm_object_offset_t dst_offset, - vm_map_t dst_map, - vm_map_version_t *dst_version, - int interruptible); + vm_map_t map, + vm_map_entry_t entry, + boolean_t deallocate, + pmap_t pmap, + vm_map_offset_t pmap_addr); + +extern kern_return_t vm_fault_copy( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_map_size_t *copy_size, /* INOUT */ + vm_object_t dst_object, + vm_object_offset_t dst_offset, + vm_map_t dst_map, + vm_map_version_t *dst_version, + int interruptible); extern kern_return_t vm_fault_enter( vm_page_t m, @@ -181,19 +181,19 @@ extern kern_return_t vm_fault_enter( vm_prot_t fault_type, boolean_t wired, boolean_t change_wiring, - vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ + vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */ vm_object_fault_info_t fault_info, boolean_t *need_retry, int *type_of_fault); extern vm_offset_t kdp_lightweight_fault( - vm_map_t map, - vm_offset_t cur_target_addr); + vm_map_t map, + vm_offset_t cur_target_addr); extern void vm_rtfault_record_init(void); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ -#endif /* _VM_VM_FAULT_H_ */ +#endif /* _VM_VM_FAULT_H_ */ diff --git a/osfmk/vm/vm_fourk_pager.c b/osfmk/vm/vm_fourk_pager.c index d8de27077..cdc379909 100644 --- a/osfmk/vm/vm_fourk_pager.c +++ b/osfmk/vm/vm_fourk_pager.c @@ -2,7 +2,7 @@ * Copyright (c) 2014 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -59,12 +59,12 @@ #include -/* - * 4K MEMORY PAGER +/* + * 4K MEMORY PAGER * * This external memory manager (EMM) handles memory mappings that are * 4K-aligned but not page-aligned and can therefore not be mapped directly. - * + * * It mostly handles page-in requests (from memory_object_data_request()) by * getting the data needed to fill in each 4K-chunk. That can require * getting data from one or two pages from its backing VM object @@ -82,35 +82,35 @@ void fourk_pager_reference(memory_object_t mem_obj); void fourk_pager_deallocate(memory_object_t mem_obj); kern_return_t fourk_pager_init(memory_object_t mem_obj, - memory_object_control_t control, - memory_object_cluster_size_t pg_size); + memory_object_control_t control, + memory_object_cluster_size_t pg_size); kern_return_t fourk_pager_terminate(memory_object_t mem_obj); kern_return_t fourk_pager_data_request(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - vm_prot_t protection_required, - memory_object_fault_info_t fault_info); + memory_object_offset_t offset, + memory_object_cluster_size_t length, + vm_prot_t protection_required, + memory_object_fault_info_t fault_info); kern_return_t fourk_pager_data_return(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt, - memory_object_offset_t *resid_offset, - int *io_error, - boolean_t dirty, - boolean_t kernel_copy, - int upl_flags); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt, + memory_object_offset_t *resid_offset, + int *io_error, + boolean_t dirty, + boolean_t kernel_copy, + int upl_flags); kern_return_t fourk_pager_data_initialize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt); kern_return_t fourk_pager_data_unlock(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t size, - vm_prot_t desired_access); + memory_object_offset_t offset, + memory_object_size_t size, + vm_prot_t desired_access); kern_return_t fourk_pager_synchronize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, - vm_sync_t sync_flags); + memory_object_offset_t offset, + memory_object_size_t length, + vm_sync_t sync_flags); kern_return_t fourk_pager_map(memory_object_t mem_obj, - vm_prot_t prot); + vm_prot_t prot); kern_return_t fourk_pager_last_unmap(memory_object_t mem_obj); /* @@ -137,33 +137,33 @@ const struct memory_object_pager_ops fourk_pager_ops = { * The "fourk_pager" describes a memory object backed by * the "4K" EMM. */ -#define FOURK_PAGER_SLOTS 4 /* 16K / 4K */ +#define FOURK_PAGER_SLOTS 4 /* 16K / 4K */ typedef struct fourk_pager_backing { - vm_object_t backing_object; - vm_object_offset_t backing_offset; + vm_object_t backing_object; + vm_object_offset_t backing_offset; } *fourk_pager_backing_t; typedef struct fourk_pager { /* mandatory generic header */ struct memory_object fourk_pgr_hdr; /* pager-specific data */ - queue_chain_t pager_queue; /* next & prev pagers */ - unsigned int ref_count; /* reference count */ - int is_ready; /* is this pager ready ? */ - int is_mapped; /* is this mem_obj mapped ? */ + queue_chain_t pager_queue; /* next & prev pagers */ + unsigned int ref_count; /* reference count */ + int is_ready; /* is this pager ready ? */ + int is_mapped; /* is this mem_obj mapped ? */ struct fourk_pager_backing slots[FOURK_PAGER_SLOTS]; /* backing for each - 4K-chunk */ + * 4K-chunk */ } *fourk_pager_t; -#define FOURK_PAGER_NULL ((fourk_pager_t) NULL) +#define FOURK_PAGER_NULL ((fourk_pager_t) NULL) /* * List of memory objects managed by this EMM. * The list is protected by the "fourk_pager_lock" lock. */ -int fourk_pager_count = 0; /* number of pagers */ -int fourk_pager_count_mapped = 0; /* number of unmapped pagers */ +int fourk_pager_count = 0; /* number of pagers */ +int fourk_pager_count_mapped = 0; /* number of unmapped pagers */ queue_head_t fourk_pager_queue; -decl_lck_mtx_data(,fourk_pager_lock) +decl_lck_mtx_data(, fourk_pager_lock) /* * Maximum number of unmapped pagers we're willing to keep around. @@ -179,31 +179,31 @@ int fourk_pager_num_trim_max = 0; int fourk_pager_num_trim_total = 0; -lck_grp_t fourk_pager_lck_grp; -lck_grp_attr_t fourk_pager_lck_grp_attr; -lck_attr_t fourk_pager_lck_attr; +lck_grp_t fourk_pager_lck_grp; +lck_grp_attr_t fourk_pager_lck_grp_attr; +lck_attr_t fourk_pager_lck_attr; /* internal prototypes */ fourk_pager_t fourk_pager_lookup(memory_object_t mem_obj); void fourk_pager_dequeue(fourk_pager_t pager); void fourk_pager_deallocate_internal(fourk_pager_t pager, - boolean_t locked); + boolean_t locked); void fourk_pager_terminate_internal(fourk_pager_t pager); void fourk_pager_trim(void); #if DEBUG int fourk_pagerdebug = 0; -#define PAGER_ALL 0xffffffff -#define PAGER_INIT 0x00000001 -#define PAGER_PAGEIN 0x00000002 - -#define PAGER_DEBUG(LEVEL, A) \ - MACRO_BEGIN \ - if ((fourk_pagerdebug & LEVEL)==LEVEL) { \ - printf A; \ - } \ +#define PAGER_ALL 0xffffffff +#define PAGER_INIT 0x00000001 +#define PAGER_PAGEIN 0x00000002 + +#define PAGER_DEBUG(LEVEL, A) \ + MACRO_BEGIN \ + if ((fourk_pagerdebug & LEVEL)==LEVEL) { \ + printf A; \ + } \ MACRO_END #else #define PAGER_DEBUG(LEVEL, A) @@ -227,23 +227,24 @@ fourk_pager_bootstrap(void) */ kern_return_t fourk_pager_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, #if !DEBUG __unused #endif memory_object_cluster_size_t pg_size) { - fourk_pager_t pager; - kern_return_t kr; + fourk_pager_t pager; + kern_return_t kr; memory_object_attr_info_data_t attributes; PAGER_DEBUG(PAGER_ALL, - ("fourk_pager_init: %p, %p, %x\n", - mem_obj, control, pg_size)); + ("fourk_pager_init: %p, %p, %x\n", + mem_obj, control, pg_size)); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } pager = fourk_pager_lookup(mem_obj); @@ -258,13 +259,14 @@ fourk_pager_init( attributes.temporary = TRUE; kr = memory_object_change_attributes( - control, - MEMORY_OBJECT_ATTRIBUTE_INFO, - (memory_object_info_t) &attributes, - MEMORY_OBJECT_ATTR_INFO_COUNT); - if (kr != KERN_SUCCESS) + control, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT); + if (kr != KERN_SUCCESS) { panic("fourk_pager_init: " - "memory_object_change_attributes() failed"); + "memory_object_change_attributes() failed"); + } #if CONFIG_SECLUDED_MEMORY if (secluded_for_filecache) { @@ -285,14 +287,14 @@ fourk_pager_init( */ kern_return_t fourk_pager_data_return( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags) { panic("fourk_pager_data_return: should never get called"); return KERN_FAILURE; @@ -300,9 +302,9 @@ fourk_pager_data_return( kern_return_t fourk_pager_data_initialize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt) { panic("fourk_pager_data_initialize: should never get called"); return KERN_FAILURE; @@ -310,10 +312,10 @@ fourk_pager_data_initialize( kern_return_t fourk_pager_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { return KERN_FAILURE; } @@ -327,9 +329,9 @@ fourk_pager_data_unlock( */ void fourk_pager_reference( - memory_object_t mem_obj) -{ - fourk_pager_t pager; + memory_object_t mem_obj) +{ + fourk_pager_t pager; pager = fourk_pager_lookup(mem_obj); @@ -354,12 +356,12 @@ fourk_pager_dequeue( assert(!pager->is_mapped); queue_remove(&fourk_pager_queue, - pager, - fourk_pager_t, - pager_queue); + pager, + fourk_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; - + fourk_pager_count--; } @@ -393,7 +395,7 @@ fourk_pager_terminate_internal( pager->slots[i].backing_offset = (vm_object_offset_t) -1; } } - + /* trigger the destruction of the memory object */ memory_object_destroy(pager->fourk_pgr_hdr.mo_control, 0); } @@ -408,18 +410,18 @@ fourk_pager_terminate_internal( */ void fourk_pager_deallocate_internal( - fourk_pager_t pager, - boolean_t locked) + fourk_pager_t pager, + boolean_t locked) { - boolean_t needs_trimming; - int count_unmapped; + boolean_t needs_trimming; + int count_unmapped; - if (! locked) { + if (!locked) { lck_mtx_lock(&fourk_pager_lock); } - count_unmapped = (fourk_pager_count - - fourk_pager_count_mapped); + count_unmapped = (fourk_pager_count - + fourk_pager_count_mapped); if (count_unmapped > fourk_pager_cache_limit) { /* we have too many unmapped pagers: trim some */ needs_trimming = TRUE; @@ -451,7 +453,7 @@ fourk_pager_deallocate_internal( memory_object_control_deallocate(pager->fourk_pgr_hdr.mo_control); pager->fourk_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; } - kfree(pager, sizeof (*pager)); + kfree(pager, sizeof(*pager)); pager = FOURK_PAGER_NULL; } else { /* there are still plenty of references: keep going... */ @@ -472,9 +474,9 @@ fourk_pager_deallocate_internal( */ void fourk_pager_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - fourk_pager_t pager; + fourk_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("fourk_pager_deallocate: %p\n", mem_obj)); pager = fourk_pager_lookup(mem_obj); @@ -489,7 +491,7 @@ fourk_pager_terminate( #if !DEBUG __unused #endif - memory_object_t mem_obj) + memory_object_t mem_obj) { PAGER_DEBUG(PAGER_ALL, ("fourk_pager_terminate: %p\n", mem_obj)); @@ -501,13 +503,13 @@ fourk_pager_terminate( */ kern_return_t fourk_pager_synchronize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t sync_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t sync_flags) { panic("fourk_pager_synchronize: memory_object_synchronize no longer supported\n"); - return (KERN_FAILURE); + return KERN_FAILURE; } /* @@ -520,10 +522,10 @@ fourk_pager_synchronize( */ kern_return_t fourk_pager_map( - memory_object_t mem_obj, - __unused vm_prot_t prot) + memory_object_t mem_obj, + __unused vm_prot_t prot) { - fourk_pager_t pager; + fourk_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("fourk_pager_map: %p\n", mem_obj)); @@ -554,13 +556,13 @@ fourk_pager_map( */ kern_return_t fourk_pager_last_unmap( - memory_object_t mem_obj) + memory_object_t mem_obj) { - fourk_pager_t pager; - int count_unmapped; + fourk_pager_t pager; + int count_unmapped; PAGER_DEBUG(PAGER_ALL, - ("fourk_pager_last_unmap: %p\n", mem_obj)); + ("fourk_pager_last_unmap: %p\n", mem_obj)); pager = fourk_pager_lookup(mem_obj); @@ -572,7 +574,7 @@ fourk_pager_last_unmap( */ fourk_pager_count_mapped--; count_unmapped = (fourk_pager_count - - fourk_pager_count_mapped); + fourk_pager_count_mapped); if (count_unmapped > fourk_pager_count_unmapped_max) { fourk_pager_count_unmapped_max = count_unmapped; } @@ -582,7 +584,7 @@ fourk_pager_last_unmap( } else { lck_mtx_unlock(&fourk_pager_lock); } - + return KERN_SUCCESS; } @@ -592,9 +594,9 @@ fourk_pager_last_unmap( */ fourk_pager_t fourk_pager_lookup( - memory_object_t mem_obj) + memory_object_t mem_obj) { - fourk_pager_t pager; + fourk_pager_t pager; assert(mem_obj->mo_pager_ops == &fourk_pager_ops); pager = (fourk_pager_t) mem_obj; @@ -605,10 +607,10 @@ fourk_pager_lookup( void fourk_pager_trim(void) { - fourk_pager_t pager, prev_pager; - queue_head_t trim_queue; - int num_trim; - int count_unmapped; + fourk_pager_t pager, prev_pager; + queue_head_t trim_queue; + int num_trim; + int count_unmapped; lck_mtx_lock(&fourk_pager_lock); @@ -620,13 +622,13 @@ fourk_pager_trim(void) num_trim = 0; for (pager = (fourk_pager_t) - queue_last(&fourk_pager_queue); - !queue_end(&fourk_pager_queue, - (queue_entry_t) pager); - pager = prev_pager) { + queue_last(&fourk_pager_queue); + !queue_end(&fourk_pager_queue, + (queue_entry_t) pager); + pager = prev_pager) { /* get prev elt before we dequeue */ prev_pager = (fourk_pager_t) - queue_prev(&pager->pager_queue); + queue_prev(&pager->pager_queue); if (pager->ref_count == 2 && pager->is_ready && @@ -637,12 +639,12 @@ fourk_pager_trim(void) fourk_pager_dequeue(pager); /* ... and add it to our trim queue */ queue_enter_first(&trim_queue, - pager, - fourk_pager_t, - pager_queue); + pager, + fourk_pager_t, + pager_queue); count_unmapped = (fourk_pager_count - - fourk_pager_count_mapped); + fourk_pager_count_mapped); if (count_unmapped <= fourk_pager_cache_limit) { /* we have enough pagers to trim */ break; @@ -659,9 +661,9 @@ fourk_pager_trim(void) /* terminate the trimmed pagers */ while (!queue_empty(&trim_queue)) { queue_remove_first(&trim_queue, - pager, - fourk_pager_t, - pager_queue); + pager, + fourk_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; assert(pager->ref_count == 2); @@ -682,10 +684,10 @@ fourk_pager_trim(void) vm_object_t fourk_pager_to_vm_object( - memory_object_t mem_obj) + memory_object_t mem_obj) { - fourk_pager_t pager; - vm_object_t object; + fourk_pager_t pager; + vm_object_t object; pager = fourk_pager_lookup(mem_obj); if (pager == NULL) { @@ -702,10 +704,10 @@ fourk_pager_to_vm_object( memory_object_t fourk_pager_create(void) { - fourk_pager_t pager; - memory_object_control_t control; - kern_return_t kr; - int i; + fourk_pager_t pager; + memory_object_control_t control; + kern_return_t kr; + int i; #if 00 if (PAGE_SIZE_64 == FOURK_PAGE_SIZE) { @@ -713,11 +715,11 @@ fourk_pager_create(void) } #endif - pager = (fourk_pager_t) kalloc(sizeof (*pager)); + pager = (fourk_pager_t) kalloc(sizeof(*pager)); if (pager == FOURK_PAGER_NULL) { return MEMORY_OBJECT_NULL; } - bzero(pager, sizeof (*pager)); + bzero(pager, sizeof(*pager)); /* * The vm_map call takes both named entry ports and raw memory @@ -730,7 +732,7 @@ fourk_pager_create(void) pager->fourk_pgr_hdr.mo_pager_ops = &fourk_pager_ops; pager->fourk_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; - pager->ref_count = 2; /* existence + setup reference */ + pager->ref_count = 2; /* existence + setup reference */ pager->is_ready = FALSE;/* not ready until it has a "name" */ pager->is_mapped = FALSE; @@ -738,14 +740,14 @@ fourk_pager_create(void) pager->slots[i].backing_object = (vm_object_t) -1; pager->slots[i].backing_offset = (vm_object_offset_t) -1; } - + lck_mtx_lock(&fourk_pager_lock); /* enter new pager at the head of our list of pagers */ queue_enter_first(&fourk_pager_queue, - pager, - fourk_pager_t, - pager_queue); + pager, + fourk_pager_t, + pager_queue); fourk_pager_count++; if (fourk_pager_count > fourk_pager_count_max) { fourk_pager_count_max = fourk_pager_count; @@ -753,8 +755,8 @@ fourk_pager_create(void) lck_mtx_unlock(&fourk_pager_lock); kr = memory_object_create_named((memory_object_t) pager, - 0, - &control); + 0, + &control); assert(kr == KERN_SUCCESS); lck_mtx_lock(&fourk_pager_lock); @@ -774,31 +776,31 @@ fourk_pager_create(void) * Handles page-in requests from VM. */ int fourk_pager_data_request_debug = 0; -kern_return_t +kern_return_t fourk_pager_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, #if !DEBUG __unused #endif - vm_prot_t protection_required, + vm_prot_t protection_required, memory_object_fault_info_t mo_fault_info) { - fourk_pager_t pager; - memory_object_control_t mo_control; - upl_t upl; - int upl_flags; - upl_size_t upl_size; - upl_page_info_t *upl_pl; - unsigned int pl_count; - vm_object_t dst_object; - kern_return_t kr, retval; - vm_map_offset_t kernel_mapping; - vm_offset_t src_vaddr, dst_vaddr; - vm_offset_t cur_offset; - int sub_page; - int sub_page_idx, sub_page_cnt; + fourk_pager_t pager; + memory_object_control_t mo_control; + upl_t upl; + int upl_flags; + upl_size_t upl_size; + upl_page_info_t *upl_pl; + unsigned int pl_count; + vm_object_t dst_object; + kern_return_t kr, retval; + vm_map_offset_t kernel_mapping; + vm_offset_t src_vaddr, dst_vaddr; + vm_offset_t cur_offset; + int sub_page; + int sub_page_idx, sub_page_cnt; pager = fourk_pager_lookup(mem_obj); assert(pager->is_ready); @@ -818,15 +820,15 @@ fourk_pager_data_request( upl_size = length; upl_flags = - UPL_RET_ONLY_ABSENT | - UPL_SET_LITE | - UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ - UPL_SET_INTERNAL; + UPL_RET_ONLY_ABSENT | + UPL_SET_LITE | + UPL_NO_SYNC | + UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ + UPL_SET_INTERNAL; pl_count = 0; kr = memory_object_upl_request(mo_control, - offset, upl_size, - &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_NONE); + offset, upl_size, + &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_NONE); if (kr != KERN_SUCCESS) { retval = kr; goto done; @@ -842,16 +844,16 @@ fourk_pager_data_request( * source and destination physical pages when it's their turn to * be processed. */ - vm_map_entry_t map_entry; + vm_map_entry_t map_entry; - vm_object_reference(kernel_object); /* ref. for mapping */ + vm_object_reference(kernel_object); /* ref. for mapping */ kr = vm_map_find_space(kernel_map, - &kernel_mapping, - 2 * PAGE_SIZE_64, - 0, - 0, - VM_MAP_KERNEL_FLAGS_NONE, - &map_entry); + &kernel_mapping, + 2 * PAGE_SIZE_64, + 0, + 0, + VM_MAP_KERNEL_FLAGS_NONE, + &map_entry); if (kr != KERN_SUCCESS) { vm_object_deallocate(kernel_object); retval = kr; @@ -870,8 +872,8 @@ fourk_pager_data_request( upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); pl_count = length / PAGE_SIZE; for (cur_offset = 0; - retval == KERN_SUCCESS && cur_offset < length; - cur_offset += PAGE_SIZE) { + retval == KERN_SUCCESS && cur_offset < length; + cur_offset += PAGE_SIZE) { ppnum_t dst_pnum; int num_subpg_signed, num_subpg_validated; int num_subpg_tainted, num_subpg_nx; @@ -888,22 +890,22 @@ fourk_pager_data_request( * is "busy". */ dst_pnum = (ppnum_t) - upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); + upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); assert(dst_pnum != 0); #if __x86_64__ dst_vaddr = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); #elif __arm__ || __arm64__ dst_vaddr = (vm_map_offset_t) - phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); + phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); #else kr = pmap_enter(kernel_pmap, - dst_vaddr, - dst_pnum, - VM_PROT_READ | VM_PROT_WRITE, - VM_PROT_NONE, - 0, - TRUE); + dst_vaddr, + dst_pnum, + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_NONE, + 0, + TRUE); assert(kr == KERN_SUCCESS); #endif @@ -918,7 +920,7 @@ fourk_pager_data_request( assert(PAGE_SHIFT == FOURK_PAGE_SHIFT); assert(page_shift_user32 == SIXTEENK_PAGE_SHIFT); sub_page_idx = ((offset & SIXTEENK_PAGE_MASK) / - PAGE_SIZE); + PAGE_SIZE); /* * ... and provide only that one 4KB page. */ @@ -940,32 +942,32 @@ fourk_pager_data_request( /* retrieve appropriate data for each 4K-page in this page */ for (sub_page = sub_page_idx; - sub_page < sub_page_idx + sub_page_cnt; - sub_page++) { - vm_object_t src_object; - memory_object_offset_t src_offset; - vm_offset_t offset_in_src_page; - kern_return_t error_code; - vm_object_t src_page_object; - vm_page_t src_page; - vm_page_t top_page; - vm_prot_t prot; - int interruptible; - struct vm_object_fault_info fault_info; - boolean_t subpg_validated; - unsigned subpg_tainted; + sub_page < sub_page_idx + sub_page_cnt; + sub_page++) { + vm_object_t src_object; + memory_object_offset_t src_offset; + vm_offset_t offset_in_src_page; + kern_return_t error_code; + vm_object_t src_page_object; + vm_page_t src_page; + vm_page_t top_page; + vm_prot_t prot; + int interruptible; + struct vm_object_fault_info fault_info; + boolean_t subpg_validated; + unsigned subpg_tainted; if (offset < SIXTEENK_PAGE_SIZE) { /* * The 1st 16K-page can cover multiple - * sub-mappings, as described in the + * sub-mappings, as described in the * pager->slots[] array. */ src_object = - pager->slots[sub_page].backing_object; + pager->slots[sub_page].backing_object; src_offset = - pager->slots[sub_page].backing_offset; + pager->slots[sub_page].backing_offset; } else { fourk_pager_backing_t slot; @@ -974,45 +976,45 @@ fourk_pager_data_request( * an extension of the last "sub page" in * the pager->slots[] array. */ - slot = &pager->slots[FOURK_PAGER_SLOTS-1]; + slot = &pager->slots[FOURK_PAGER_SLOTS - 1]; src_object = slot->backing_object; src_offset = slot->backing_offset; src_offset += FOURK_PAGE_SIZE; src_offset += - (vm_map_trunc_page(offset, - SIXTEENK_PAGE_MASK) - - SIXTEENK_PAGE_SIZE); + (vm_map_trunc_page(offset, + SIXTEENK_PAGE_MASK) + - SIXTEENK_PAGE_SIZE); src_offset += sub_page * FOURK_PAGE_SIZE; } offset_in_src_page = src_offset & PAGE_MASK_64; src_offset = vm_object_trunc_page(src_offset); - + if (src_object == VM_OBJECT_NULL || src_object == (vm_object_t) -1) { /* zero-fill */ bzero((char *)(dst_vaddr + - ((sub_page-sub_page_idx) - * FOURK_PAGE_SIZE)), - FOURK_PAGE_SIZE); + ((sub_page - sub_page_idx) + * FOURK_PAGE_SIZE)), + FOURK_PAGE_SIZE); if (fourk_pager_data_request_debug) { printf("fourk_pager_data_request" - "(%p,0x%llx+0x%lx+0x%04x): " - "ZERO\n", - pager, - offset, - cur_offset, - ((sub_page - sub_page_idx) - * FOURK_PAGE_SIZE)); + "(%p,0x%llx+0x%lx+0x%04x): " + "ZERO\n", + pager, + offset, + cur_offset, + ((sub_page - sub_page_idx) + * FOURK_PAGE_SIZE)); } continue; } /* fault in the source page from src_object */ - retry_src_fault: +retry_src_fault: src_page = VM_PAGE_NULL; top_page = VM_PAGE_NULL; fault_info = *((struct vm_object_fault_info *) - (uintptr_t)mo_fault_info); + (uintptr_t)mo_fault_info); fault_info.stealth = TRUE; fault_info.io_sync = FALSE; fault_info.mark_zf_absent = FALSE; @@ -1024,18 +1026,18 @@ fourk_pager_data_request( vm_object_lock(src_object); vm_object_paging_begin(src_object); kr = vm_fault_page(src_object, - src_offset, - VM_PROT_READ, - FALSE, - FALSE, /* src_page not looked up */ - &prot, - &src_page, - &top_page, - NULL, - &error_code, - FALSE, - FALSE, - &fault_info); + src_offset, + VM_PROT_READ, + FALSE, + FALSE, /* src_page not looked up */ + &prot, + &src_page, + &top_page, + NULL, + &error_code, + FALSE, + FALSE, + &fault_info); switch (kr) { case VM_FAULT_SUCCESS: break; @@ -1045,7 +1047,7 @@ fourk_pager_data_request( if (vm_page_wait(interruptible)) { goto retry_src_fault; } - /* fall thru */ + /* fall thru */ case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto src_fault_done; @@ -1053,7 +1055,7 @@ fourk_pager_data_request( /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: /* the page is not there! */ if (error_code) { @@ -1064,18 +1066,18 @@ fourk_pager_data_request( goto src_fault_done; default: panic("fourk_pager_data_request: " - "vm_fault_page() unexpected error 0x%x\n", - kr); + "vm_fault_page() unexpected error 0x%x\n", + kr); } assert(src_page != VM_PAGE_NULL); assert(src_page->vmp_busy); src_page_object = VM_PAGE_OBJECT(src_page); - if (( !VM_PAGE_PAGEABLE(src_page)) && + if ((!VM_PAGE_PAGEABLE(src_page)) && !VM_PAGE_WIRED(src_page)) { vm_page_lockspin_queues(); - if (( !VM_PAGE_PAGEABLE(src_page)) && + if ((!VM_PAGE_PAGEABLE(src_page)) && !VM_PAGE_WIRED(src_page)) { vm_page_deactivate(src_page); } @@ -1084,24 +1086,24 @@ fourk_pager_data_request( #if __x86_64__ src_vaddr = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) - << PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); #elif __arm__ || __arm64__ src_vaddr = (vm_map_offset_t) - phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) - << PAGE_SHIFT); + phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); #else /* * Establish an explicit mapping of the source * physical page. */ kr = pmap_enter(kernel_pmap, - src_vaddr, - VM_PAGE_GET_PHYS_PAGE(src_page), - VM_PROT_READ, - VM_PROT_NONE, - 0, - TRUE); + src_vaddr, + VM_PAGE_GET_PHYS_PAGE(src_page), + VM_PROT_READ, + VM_PROT_NONE, + 0, + TRUE); assert(kr == KERN_SUCCESS); #endif @@ -1149,33 +1151,33 @@ fourk_pager_data_request( * into the appropriate part of the destination page. */ bcopy((const char *)(src_vaddr + offset_in_src_page), - (char *)(dst_vaddr + - ((sub_page - sub_page_idx) * - FOURK_PAGE_SIZE)), - FOURK_PAGE_SIZE); + (char *)(dst_vaddr + + ((sub_page - sub_page_idx) * + FOURK_PAGE_SIZE)), + FOURK_PAGE_SIZE); if (fourk_pager_data_request_debug) { printf("fourk_data_request" - "(%p,0x%llx+0x%lx+0x%04x): " - "backed by [%p:0x%llx]: " - "[0x%016llx 0x%016llx] " - "code_signed=%d " - "cs_valid=%d cs_tainted=%d cs_nx=%d\n", - pager, - offset, cur_offset, - (sub_page-sub_page_idx)*FOURK_PAGE_SIZE, - src_page_object, - src_page->vmp_offset + offset_in_src_page, - *(uint64_t *)(dst_vaddr + - ((sub_page-sub_page_idx) * - FOURK_PAGE_SIZE)), - *(uint64_t *)(dst_vaddr + - ((sub_page-sub_page_idx) * - FOURK_PAGE_SIZE) + - 8), - src_page_object->code_signed, - subpg_validated, - !!(subpg_tainted & CS_VALIDATE_TAINTED), - !!(subpg_tainted & CS_VALIDATE_NX)); + "(%p,0x%llx+0x%lx+0x%04x): " + "backed by [%p:0x%llx]: " + "[0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_valid=%d cs_tainted=%d cs_nx=%d\n", + pager, + offset, cur_offset, + (sub_page - sub_page_idx) * FOURK_PAGE_SIZE, + src_page_object, + src_page->vmp_offset + offset_in_src_page, + *(uint64_t *)(dst_vaddr + + ((sub_page - sub_page_idx) * + FOURK_PAGE_SIZE)), + *(uint64_t *)(dst_vaddr + + ((sub_page - sub_page_idx) * + FOURK_PAGE_SIZE) + + 8), + src_page_object->code_signed, + subpg_validated, + !!(subpg_tainted & CS_VALIDATE_TAINTED), + !!(subpg_tainted & CS_VALIDATE_NX)); } #if __x86_64__ || __arm__ || __arm64__ @@ -1183,15 +1185,15 @@ fourk_pager_data_request( src_vaddr = 0; #else /* __x86_64__ || __arm__ || __arm64__ */ /* - * Remove the pmap mapping of the source page + * Remove the pmap mapping of the source page * in the kernel. */ pmap_remove(kernel_pmap, - (addr64_t) src_vaddr, - (addr64_t) src_vaddr + PAGE_SIZE_64); + (addr64_t) src_vaddr, + (addr64_t) src_vaddr + PAGE_SIZE_64); #endif /* __x86_64__ || __arm__ || __arm64__ */ - src_fault_done: +src_fault_done: /* * Cleanup the result of vm_fault_page(). */ @@ -1203,7 +1205,7 @@ fourk_pager_data_request( vm_object_paging_end(src_page_object); vm_object_unlock(src_page_object); if (top_page) { - vm_object_t top_object; + vm_object_t top_object; top_object = VM_PAGE_OBJECT(top_page); vm_object_lock(top_object); @@ -1219,12 +1221,12 @@ fourk_pager_data_request( if (num_subpg_tainted > 0) { /* a tainted subpage taints entire 16K page */ UPL_SET_CS_TAINTED(upl_pl, - cur_offset / PAGE_SIZE, - TRUE); + cur_offset / PAGE_SIZE, + TRUE); /* also mark as "validated" for consisteny */ UPL_SET_CS_VALIDATED(upl_pl, - cur_offset / PAGE_SIZE, - TRUE); + cur_offset / PAGE_SIZE, + TRUE); } else if (num_subpg_validated == num_subpg_signed) { /* * All the code-signed 4K subpages of this @@ -1232,13 +1234,13 @@ fourk_pager_data_request( * considered validated. */ UPL_SET_CS_VALIDATED(upl_pl, - cur_offset / PAGE_SIZE, - TRUE); + cur_offset / PAGE_SIZE, + TRUE); } if (num_subpg_nx > 0) { UPL_SET_CS_NX(upl_pl, - cur_offset / PAGE_SIZE, - TRUE); + cur_offset / PAGE_SIZE, + TRUE); } } } @@ -1260,7 +1262,7 @@ done: if (retval != KERN_SUCCESS) { upl_abort(upl, 0); if (retval == KERN_ABORTED) { - wait_result_t wait_result; + wait_result_t wait_result; /* * We aborted the fault and did not provide @@ -1280,7 +1282,7 @@ done: wait_result = assert_wait_timeout( (event_t) fourk_pager_data_request, THREAD_UNINT, - 10000, /* 10ms */ + 10000, /* 10ms */ NSEC_PER_USEC); assert(wait_result == THREAD_WAITING); wait_result = thread_block(THREAD_CONTINUE_NULL); @@ -1288,9 +1290,9 @@ done: } } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, - UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, - upl_pl, pl_count, &empty); + upl_commit_range(upl, 0, upl->size, + UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, + upl_pl, pl_count, &empty); } /* and deallocate the UPL */ @@ -1300,9 +1302,9 @@ done: if (kernel_mapping != 0) { /* clean up the mapping of the source and destination pages */ kr = vm_map_remove(kernel_map, - kernel_mapping, - kernel_mapping + (2 * PAGE_SIZE_64), - VM_MAP_REMOVE_NO_FLAGS); + kernel_mapping, + kernel_mapping + (2 * PAGE_SIZE_64), + VM_MAP_REMOVE_NO_FLAGS); assert(kr == KERN_SUCCESS); kernel_mapping = 0; src_vaddr = 0; @@ -1316,15 +1318,15 @@ done: kern_return_t fourk_pager_populate( - memory_object_t mem_obj, - boolean_t overwrite, - int index, - vm_object_t new_backing_object, - vm_object_offset_t new_backing_offset, - vm_object_t *old_backing_object, - vm_object_offset_t *old_backing_offset) + memory_object_t mem_obj, + boolean_t overwrite, + int index, + vm_object_t new_backing_object, + vm_object_offset_t new_backing_offset, + vm_object_t *old_backing_object, + vm_object_offset_t *old_backing_offset) { - fourk_pager_t pager; + fourk_pager_t pager; pager = fourk_pager_lookup(mem_obj); if (pager == NULL) { @@ -1340,7 +1342,7 @@ fourk_pager_populate( if (!overwrite && (pager->slots[index].backing_object != (vm_object_t) -1 || - pager->slots[index].backing_offset != (vm_object_offset_t) -1)) { + pager->slots[index].backing_offset != (vm_object_offset_t) -1)) { return KERN_INVALID_ADDRESS; } @@ -1352,4 +1354,3 @@ fourk_pager_populate( return KERN_SUCCESS; } - diff --git a/osfmk/vm/vm_init.c b/osfmk/vm/vm_init.c index 82f7ce30c..2c2454821 100644 --- a/osfmk/vm/vm_init.c +++ b/osfmk/vm/vm_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -84,7 +84,7 @@ #define ZONE_MAP_MIN CONFIG_ZONE_MAP_MIN /* Maximum zone size is 1.5G */ -#define ZONE_MAP_MAX (1024 * 1024 * 1536) +#define ZONE_MAP_MAX (1024 * 1024 * 1536) const vm_offset_t vm_min_kernel_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; const vm_offset_t vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS; @@ -116,7 +116,7 @@ vm_mem_bootstrap_log(const char *message) void vm_mem_bootstrap(void) { - vm_offset_t start, end; + vm_offset_t start, end; vm_size_t zsizearg; mach_vm_size_t zsize; @@ -152,42 +152,62 @@ vm_mem_bootstrap(void) * eats at most 2M of VA from the map.) */ if (!PE_parse_boot_argn("kmapoff", &kmapoff_pgcnt, - sizeof (kmapoff_pgcnt))) - kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */ - + sizeof(kmapoff_pgcnt))) { + kmapoff_pgcnt = early_random() & 0x1ff; /* 9 bits */ + } if (kmapoff_pgcnt > 0 && vm_allocate_kernel(kernel_map, &kmapoff_kaddr, - kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) + kmapoff_pgcnt * PAGE_SIZE_64, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK) != KERN_SUCCESS) { panic("cannot vm_allocate %u kernel_map pages", kmapoff_pgcnt); + } #if CONFIG_EMBEDDED PE_parse_boot_argn("log_executable_mem_entry", - &log_executable_mem_entry, - sizeof (log_executable_mem_entry)); + &log_executable_mem_entry, + sizeof(log_executable_mem_entry)); #endif /* CONFIG_EMBEDDED */ vm_mem_bootstrap_log("pmap_init"); pmap_init(); - - kmem_alloc_ready = TRUE; - - if (PE_parse_boot_argn("zsize", &zsizearg, sizeof (zsizearg))) - zsize = zsizearg * 1024ULL * 1024ULL; - else { - zsize = sane_size >> 2; /* Get target zone size as 1/4 of physical memory */ - } - if (zsize < ZONE_MAP_MIN) - zsize = ZONE_MAP_MIN; /* Clamp to min */ + kmem_alloc_ready = TRUE; + if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) { + zsize = zsizearg * (1024ULL * 1024); + } else { + zsize = sane_size >> 2; /* Set target zone size as 1/4 of physical memory */ #if defined(__LP64__) - zsize += zsize >> 1; + zsize += zsize >> 1; #endif /* __LP64__ */ - if (zsize > sane_size >> 1) - zsize = sane_size >> 1; /* Clamp to half of RAM max */ + +#if defined(__x86_64__) + /* + * The max_zonemap_size was based on physical memory and might make the + * end of the zone go beyond what vm_page_[un]pack_ptr() can handle. + * To fix that we'll limit the size of the zone map to be what a 256Gig + * machine would have, but we'll retain the boot-args-specified size if + * it was provided. + */ + vm_size_t orig_zsize = zsize; + + if (zsize > 256 * (1024ULL * 1024 * 1024) / 4) { + zsize = 256 * (1024ULL * 1024 * 1024) / 4; + printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n", + (uintptr_t)orig_zsize, (uintptr_t)zsize); + } +#endif + } + + if (zsize < ZONE_MAP_MIN) { + zsize = ZONE_MAP_MIN; /* Clamp to min */ + } + if (zsize > sane_size >> 1) { + zsize = sane_size >> 1; /* Clamp to half of RAM max */ + } #if !__LP64__ - if (zsize > ZONE_MAP_MAX) - zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */ + if (zsize > ZONE_MAP_MAX) { + zsize = ZONE_MAP_MAX; /* Clamp to 1.5GB max for K32 */ + } #endif /* !__LP64__ */ vm_mem_bootstrap_log("kext_alloc_init"); @@ -195,7 +215,7 @@ vm_mem_bootstrap(void) vm_mem_bootstrap_log("zone_init"); assert((vm_size_t) zsize == zsize); - zone_init((vm_size_t) zsize); /* Allocate address space for zones */ + zone_init((vm_size_t) zsize); /* Allocate address space for zones */ /* The vm_page_zone must be created prior to kalloc_init; that * routine can trigger zalloc()s (for e.g. mutex statistic structure @@ -225,7 +245,7 @@ vm_mem_bootstrap(void) vm_mem_bootstrap_log("vm_mem_bootstrap done"); -#ifdef CONFIG_ZCACHE +#ifdef CONFIG_ZCACHE zcache_bootstrap(); #endif vm_rtfault_record_init(); diff --git a/osfmk/vm/vm_init.h b/osfmk/vm/vm_init.h index 8e23b580b..a0ba80a9b 100644 --- a/osfmk/vm/vm_init.h +++ b/osfmk/vm/vm_init.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* diff --git a/osfmk/vm/vm_kern.c b/osfmk/vm/vm_kern.c index 8e53cbd13..29f44cd6d 100644 --- a/osfmk/vm/vm_kern.c +++ b/osfmk/vm/vm_kern.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -75,6 +75,7 @@ #include #include #include +#include #include @@ -88,8 +89,8 @@ * Variables exported by this module. */ -vm_map_t kernel_map; -vm_map_t kernel_pageable_map; +vm_map_t kernel_map; +vm_map_t kernel_pageable_map; extern boolean_t vm_kernel_ready; @@ -97,39 +98,40 @@ extern boolean_t vm_kernel_ready; * Forward declarations for internal functions. */ extern kern_return_t kmem_alloc_pages( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size); + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size); kern_return_t kmem_alloc_contig( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_offset_t mask, - ppnum_t max_pnum, - ppnum_t pnum_mask, - int flags, + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + ppnum_t max_pnum, + ppnum_t pnum_mask, + int flags, vm_tag_t tag) { - vm_object_t object; - vm_object_offset_t offset; - vm_map_offset_t map_addr; - vm_map_offset_t map_mask; - vm_map_size_t map_size, i; - vm_map_entry_t entry; - vm_page_t m, pages; - kern_return_t kr; - - assert(VM_KERN_MEMORY_NONE != tag); - - if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) + vm_object_t object; + vm_object_offset_t offset; + vm_map_offset_t map_addr; + vm_map_offset_t map_mask; + vm_map_size_t map_size, i; + vm_map_entry_t entry; + vm_page_t m, pages; + kern_return_t kr; + + assert(VM_KERN_MEMORY_NONE != tag); + + if (map == VM_MAP_NULL || (flags & ~(KMA_KOBJECT | KMA_LOMEM | KMA_NOPAGEWAIT))) { return KERN_INVALID_ARGUMENT; + } map_size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); map_mask = (vm_map_offset_t)mask; - + /* Check for zero allocation size (either directly or via overflow) */ if (map_size == 0) { *addrp = 0; @@ -149,7 +151,7 @@ kmem_alloc_contig( } kr = vm_map_find_space(map, &map_addr, map_size, map_mask, 0, - VM_MAP_KERNEL_FLAGS_NONE, tag, &entry); + VM_MAP_KERNEL_FLAGS_NONE, tag, &entry); if (KERN_SUCCESS != kr) { vm_object_deallocate(object); return kr; @@ -171,11 +173,11 @@ kmem_alloc_contig( if (kr != KERN_SUCCESS) { vm_map_remove(map, - vm_map_trunc_page(map_addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(map_addr + map_size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_NO_FLAGS); + vm_map_trunc_page(map_addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(map_addr + map_size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); vm_object_deallocate(object); *addrp = 0; return kr; @@ -192,12 +194,12 @@ kmem_alloc_contig( vm_object_unlock(object); kr = vm_map_wire_kernel(map, - vm_map_trunc_page(map_addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(map_addr + map_size, - VM_MAP_PAGE_MASK(map)), - VM_PROT_DEFAULT, tag, - FALSE); + vm_map_trunc_page(map_addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(map_addr + map_size, + VM_MAP_PAGE_MASK(map)), + VM_PROT_DEFAULT, tag, + FALSE); if (kr != KERN_SUCCESS) { if (object == kernel_object) { @@ -206,11 +208,11 @@ kmem_alloc_contig( vm_object_unlock(object); } vm_map_remove(map, - vm_map_trunc_page(map_addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(map_addr + map_size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_NO_FLAGS); + vm_map_trunc_page(map_addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(map_addr + map_size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); vm_object_deallocate(object); return kr; } @@ -218,8 +220,8 @@ kmem_alloc_contig( if (object == kernel_object) { vm_map_simplify(map, map_addr); - vm_tag_update_size(tag, map_size); - } + vm_tag_update_size(tag, map_size); + } *addrp = (vm_offset_t) map_addr; assert((vm_map_offset_t) *addrp == map_addr); @@ -245,38 +247,41 @@ kmem_alloc_contig( kern_return_t kernel_memory_allocate( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_offset_t mask, - int flags, + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + int flags, vm_tag_t tag) { - vm_object_t object; - vm_object_offset_t offset; - vm_object_offset_t pg_offset; - vm_map_entry_t entry = NULL; - vm_map_offset_t map_addr, fill_start; - vm_map_offset_t map_mask; - vm_map_size_t map_size, fill_size; - kern_return_t kr, pe_result; - vm_page_t mem; - vm_page_t guard_page_list = NULL; - vm_page_t wired_page_list = NULL; - int guard_page_count = 0; - int wired_page_count = 0; - int page_grab_count = 0; - int i; - int vm_alloc_flags; - vm_map_kernel_flags_t vmk_flags; - vm_prot_t kma_prot; - - if (! vm_kernel_ready) { + vm_object_t object; + vm_object_offset_t offset; + vm_object_offset_t pg_offset; + vm_map_entry_t entry = NULL; + vm_map_offset_t map_addr, fill_start; + vm_map_offset_t map_mask; + vm_map_size_t map_size, fill_size; + kern_return_t kr, pe_result; + vm_page_t mem; + vm_page_t guard_page_list = NULL; + vm_page_t wired_page_list = NULL; + int guard_page_count = 0; + int wired_page_count = 0; + int page_grab_count = 0; + int i; + int vm_alloc_flags; + vm_map_kernel_flags_t vmk_flags; + vm_prot_t kma_prot; +#if DEVELOPMENT || DEBUG + task_t task = current_task(); +#endif /* DEVELOPMENT || DEBUG */ + + if (!vm_kernel_ready) { panic("kernel_memory_allocate: VM is not ready"); } map_size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); map_mask = (vm_map_offset_t) mask; vm_alloc_flags = 0; //VM_MAKE_TAG(tag); @@ -295,10 +300,10 @@ kernel_memory_allocate( * limit raised to 2GB with 128GB max physical limit, * but scaled by installed memory above this */ - if (!(flags & (KMA_VAONLY | KMA_PAGEABLE)) && - map_size > MAX(1ULL<<31, sane_size/64)) { - return KERN_RESOURCE_SHORTAGE; - } + if (!(flags & (KMA_VAONLY | KMA_PAGEABLE)) && + map_size > MAX(1ULL << 31, sane_size / 64)) { + return KERN_RESOURCE_SHORTAGE; + } /* * Guard pages: @@ -350,8 +355,9 @@ kernel_memory_allocate( for (;;) { mem = vm_page_grab_guard(); - if (mem != VM_PAGE_NULL) + if (mem != VM_PAGE_NULL) { break; + } if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; goto out; @@ -363,39 +369,43 @@ kernel_memory_allocate( } if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) { - for (i = 0; i < wired_page_count; i++) { - uint64_t unavailable; - - for (;;) { - if (flags & KMA_LOMEM) - mem = vm_page_grablo(); - else - mem = vm_page_grab(); + for (i = 0; i < wired_page_count; i++) { + uint64_t unavailable; - if (mem != VM_PAGE_NULL) - break; + for (;;) { + if (flags & KMA_LOMEM) { + mem = vm_page_grablo(); + } else { + mem = vm_page_grab(); + } - if (flags & KMA_NOPAGEWAIT) { - kr = KERN_RESOURCE_SHORTAGE; - goto out; - } - if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) { - kr = KERN_RESOURCE_SHORTAGE; - goto out; + if (mem != VM_PAGE_NULL) { + break; + } + + if (flags & KMA_NOPAGEWAIT) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; + } + if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; + } + unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE; + + if (unavailable > max_mem || map_size > (max_mem - unavailable)) { + kr = KERN_RESOURCE_SHORTAGE; + goto out; + } + VM_PAGE_WAIT(); } - unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE; - - if (unavailable > max_mem || map_size > (max_mem - unavailable)) { - kr = KERN_RESOURCE_SHORTAGE; - goto out; + page_grab_count++; + if (KMA_ZERO & flags) { + vm_page_zero_fill(mem); } - VM_PAGE_WAIT(); + mem->vmp_snext = wired_page_list; + wired_page_list = mem; } - page_grab_count++; - if (KMA_ZERO & flags) vm_page_zero_fill(mem); - mem->vmp_snext = wired_page_list; - wired_page_list = mem; - } } /* @@ -412,12 +422,13 @@ kernel_memory_allocate( object = vm_object_allocate(map_size); } - if (flags & KMA_ATOMIC) + if (flags & KMA_ATOMIC) { vmk_flags.vmkf_atomic_entry = TRUE; + } kr = vm_map_find_space(map, &map_addr, - fill_size, map_mask, - vm_alloc_flags, vmk_flags, tag, &entry); + fill_size, map_mask, + vm_alloc_flags, vmk_flags, tag, &entry); if (KERN_SUCCESS != kr) { vm_object_deallocate(object); goto out; @@ -430,15 +441,18 @@ kernel_memory_allocate( } VME_OBJECT_SET(entry, object); VME_OFFSET_SET(entry, offset); - - if (!(flags & (KMA_COMPRESSOR | KMA_PAGEABLE))) + + if (!(flags & (KMA_COMPRESSOR | KMA_PAGEABLE))) { entry->wired_count++; + } - if (flags & KMA_PERMANENT) + if (flags & KMA_PERMANENT) { entry->permanent = TRUE; + } - if (object != kernel_object && object != compressor_object) + if (object != kernel_object && object != compressor_object) { vm_object_reference(object); + } vm_object_lock(object); vm_map_unlock(map); @@ -446,8 +460,9 @@ kernel_memory_allocate( pg_offset = 0; if (fill_start) { - if (guard_page_list == NULL) + if (guard_page_list == NULL) { panic("kernel_memory_allocate: guard_page_list == NULL"); + } mem = guard_page_list; guard_page_list = mem->vmp_snext; @@ -471,57 +486,61 @@ kernel_memory_allocate( if (flags & (KMA_VAONLY | KMA_PAGEABLE)) { pg_offset = fill_start + fill_size; } else { - for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) { - if (wired_page_list == NULL) - panic("kernel_memory_allocate: wired_page_list == NULL"); + for (pg_offset = fill_start; pg_offset < fill_start + fill_size; pg_offset += PAGE_SIZE_64) { + if (wired_page_list == NULL) { + panic("kernel_memory_allocate: wired_page_list == NULL"); + } - mem = wired_page_list; - wired_page_list = mem->vmp_snext; - mem->vmp_snext = NULL; + mem = wired_page_list; + wired_page_list = mem->vmp_snext; + mem->vmp_snext = NULL; - assert(mem->vmp_wire_count == 0); - assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q); + assert(mem->vmp_wire_count == 0); + assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q); - mem->vmp_q_state = VM_PAGE_IS_WIRED; - mem->vmp_wire_count++; - if (__improbable(mem->vmp_wire_count == 0)) { - panic("kernel_memory_allocate(%p): wire_count overflow", - mem); - } + mem->vmp_q_state = VM_PAGE_IS_WIRED; + mem->vmp_wire_count++; + if (__improbable(mem->vmp_wire_count == 0)) { + panic("kernel_memory_allocate(%p): wire_count overflow", + mem); + } - vm_page_insert_wired(mem, object, offset + pg_offset, tag); + vm_page_insert_wired(mem, object, offset + pg_offset, tag); - mem->vmp_busy = FALSE; - mem->vmp_pmapped = TRUE; - mem->vmp_wpmapped = TRUE; + mem->vmp_busy = FALSE; + mem->vmp_pmapped = TRUE; + mem->vmp_wpmapped = TRUE; - PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem, - kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, - PMAP_OPTIONS_NOWAIT, pe_result); + PMAP_ENTER_OPTIONS(kernel_pmap, map_addr + pg_offset, mem, + kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, + PMAP_OPTIONS_NOWAIT, pe_result); - if (pe_result == KERN_RESOURCE_SHORTAGE) { - vm_object_unlock(object); + if (pe_result == KERN_RESOURCE_SHORTAGE) { + vm_object_unlock(object); - PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, - kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, - pe_result); + PMAP_ENTER(kernel_pmap, map_addr + pg_offset, mem, + kma_prot, VM_PROT_NONE, ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, + pe_result); - vm_object_lock(object); - } + vm_object_lock(object); + } - assert(pe_result == KERN_SUCCESS); + assert(pe_result == KERN_SUCCESS); - if (flags & KMA_NOENCRYPT) { - bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE); + if (flags & KMA_NOENCRYPT) { + bzero(CAST_DOWN(void *, (map_addr + pg_offset)), PAGE_SIZE); - pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); + pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); + } + } + if (kernel_object == object) { + vm_tag_update_size(tag, fill_size); } - } - if (kernel_object == object) vm_tag_update_size(tag, fill_size); } if ((fill_start + fill_size) < map_size) { - if (guard_page_list == NULL) + if (guard_page_list == NULL) { panic("kernel_memory_allocate: guard_page_list == NULL"); + } mem = guard_page_list; guard_page_list = mem->vmp_snext; @@ -531,13 +550,14 @@ kernel_memory_allocate( mem->vmp_busy = FALSE; } - if (guard_page_list || wired_page_list) + if (guard_page_list || wired_page_list) { panic("kernel_memory_allocate: non empty list\n"); + } if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) { - vm_page_lockspin_queues(); - vm_page_wire_count += wired_page_count; - vm_page_unlock_queues(); + vm_page_lockspin_queues(); + vm_page_wire_count += wired_page_count; + vm_page_unlock_queues(); } vm_object_unlock(object); @@ -545,13 +565,17 @@ kernel_memory_allocate( /* * now that the pages are wired, we no longer have to fear coalesce */ - if (object == kernel_object || object == compressor_object) + if (object == kernel_object || object == compressor_object) { vm_map_simplify(map, map_addr); - else + } else { vm_object_deallocate(object); + } #if DEBUG || DEVELOPMENT VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count); + } #endif /* @@ -561,14 +585,19 @@ kernel_memory_allocate( return KERN_SUCCESS; out: - if (guard_page_list) + if (guard_page_list) { vm_page_free_list(guard_page_list, FALSE); + } - if (wired_page_list) + if (wired_page_list) { vm_page_free_list(wired_page_list, FALSE); + } #if DEBUG || DEVELOPMENT VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); + if (task != NULL && kr == KERN_SUCCESS) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count); + } #endif return kr; @@ -576,55 +605,57 @@ out: kern_return_t kernel_memory_populate( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - int flags, + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + int flags, vm_tag_t tag) { - vm_object_t object; - vm_object_offset_t offset, pg_offset; - kern_return_t kr, pe_result; - vm_page_t mem; - vm_page_t page_list = NULL; - int page_count = 0; - int page_grab_count = 0; - int i; + vm_object_t object; + vm_object_offset_t offset, pg_offset; + kern_return_t kr, pe_result; + vm_page_t mem; + vm_page_t page_list = NULL; + int page_count = 0; + int page_grab_count = 0; + int i; #if DEBUG || DEVELOPMENT + task_t task = current_task(); VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START, size, 0, 0, 0); #endif page_count = (int) (size / PAGE_SIZE_64); - assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT)); + assert((flags & (KMA_COMPRESSOR | KMA_KOBJECT)) != (KMA_COMPRESSOR | KMA_KOBJECT)); if (flags & KMA_COMPRESSOR) { - pg_offset = page_count * PAGE_SIZE_64; do { for (;;) { mem = vm_page_grab(); - if (mem != VM_PAGE_NULL) + if (mem != VM_PAGE_NULL) { break; - + } + VM_PAGE_WAIT(); } page_grab_count++; - if (KMA_ZERO & flags) vm_page_zero_fill(mem); + if (KMA_ZERO & flags) { + vm_page_zero_fill(mem); + } mem->vmp_snext = page_list; page_list = mem; pg_offset -= PAGE_SIZE_64; kr = pmap_enter_options(kernel_pmap, - addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem), - VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, - PMAP_OPTIONS_INTERNAL, NULL); + addr + pg_offset, VM_PAGE_GET_PHYS_PAGE(mem), + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, + PMAP_OPTIONS_INTERNAL, NULL); assert(kr == KERN_SUCCESS); - } while (pg_offset); offset = addr; @@ -633,9 +664,8 @@ kernel_memory_populate( vm_object_lock(object); for (pg_offset = 0; - pg_offset < size; - pg_offset += PAGE_SIZE_64) { - + pg_offset < size; + pg_offset += PAGE_SIZE_64) { mem = page_list; page_list = mem->vmp_snext; mem->vmp_snext = NULL; @@ -660,19 +690,24 @@ kernel_memory_populate( #if DEBUG || DEVELOPMENT VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count); + } #endif return KERN_SUCCESS; } for (i = 0; i < page_count; i++) { for (;;) { - if (flags & KMA_LOMEM) + if (flags & KMA_LOMEM) { mem = vm_page_grablo(); - else + } else { mem = vm_page_grab(); - - if (mem != VM_PAGE_NULL) + } + + if (mem != VM_PAGE_NULL) { break; + } if (flags & KMA_NOPAGEWAIT) { kr = KERN_RESOURCE_SHORTAGE; @@ -686,7 +721,9 @@ kernel_memory_populate( VM_PAGE_WAIT(); } page_grab_count++; - if (KMA_ZERO & flags) vm_page_zero_fill(mem); + if (KMA_ZERO & flags) { + vm_page_zero_fill(mem); + } mem->vmp_snext = page_list; page_list = mem; } @@ -705,16 +742,16 @@ kernel_memory_populate( * unlock map; */ panic("kernel_memory_populate(%p,0x%llx,0x%llx,0x%x): " - "!KMA_KOBJECT", - map, (uint64_t) addr, (uint64_t) size, flags); + "!KMA_KOBJECT", + map, (uint64_t) addr, (uint64_t) size, flags); } for (pg_offset = 0; - pg_offset < size; - pg_offset += PAGE_SIZE_64) { - - if (page_list == NULL) + pg_offset < size; + pg_offset += PAGE_SIZE_64) { + if (page_list == NULL) { panic("kernel_memory_populate: page_list == NULL"); + } mem = page_list; page_list = mem->vmp_snext; @@ -734,18 +771,17 @@ kernel_memory_populate( mem->vmp_wpmapped = TRUE; PMAP_ENTER_OPTIONS(kernel_pmap, addr + pg_offset, mem, - VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, - ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, - PMAP_OPTIONS_NOWAIT, pe_result); + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, + ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, + PMAP_OPTIONS_NOWAIT, pe_result); if (pe_result == KERN_RESOURCE_SHORTAGE) { - vm_object_unlock(object); PMAP_ENTER(kernel_pmap, addr + pg_offset, mem, - VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, - ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, - pe_result); + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, + ((flags & KMA_KSTACK) ? VM_MEM_STACK : 0), TRUE, + pe_result); vm_object_lock(object); } @@ -763,9 +799,14 @@ kernel_memory_populate( #if DEBUG || DEVELOPMENT VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count); + } #endif - if (kernel_object == object) vm_tag_update_size(tag, size); + if (kernel_object == object) { + vm_tag_update_size(tag, size); + } vm_object_unlock(object); @@ -779,11 +820,15 @@ kernel_memory_populate( return KERN_SUCCESS; out: - if (page_list) + if (page_list) { vm_page_free_list(page_list, FALSE); + } #if DEBUG || DEVELOPMENT VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); + if (task != NULL && kr == KERN_SUCCESS) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count); + } #endif return kr; @@ -792,17 +837,17 @@ out: void kernel_memory_depopulate( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - int flags) + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + int flags) { - vm_object_t object; - vm_object_offset_t offset, pg_offset; - vm_page_t mem; - vm_page_t local_freeq = NULL; + vm_object_t object; + vm_object_offset_t offset, pg_offset; + vm_page_t mem; + vm_page_t local_freeq = NULL; - assert((flags & (KMA_COMPRESSOR|KMA_KOBJECT)) != (KMA_COMPRESSOR|KMA_KOBJECT)); + assert((flags & (KMA_COMPRESSOR | KMA_KOBJECT)) != (KMA_COMPRESSOR | KMA_KOBJECT)); if (flags & KMA_COMPRESSOR) { offset = addr; @@ -816,29 +861,29 @@ kernel_memory_depopulate( } else { offset = 0; object = NULL; - /* - * If it's not the kernel object, we need to: - * lock map; - * lookup entry; - * lock object; - * unlock map; - */ + /* + * If it's not the kernel object, we need to: + * lock map; + * lookup entry; + * lock object; + * unlock map; + */ panic("kernel_memory_depopulate(%p,0x%llx,0x%llx,0x%x): " - "!KMA_KOBJECT", - map, (uint64_t) addr, (uint64_t) size, flags); + "!KMA_KOBJECT", + map, (uint64_t) addr, (uint64_t) size, flags); } pmap_protect(kernel_map->pmap, offset, offset + size, VM_PROT_NONE); for (pg_offset = 0; - pg_offset < size; - pg_offset += PAGE_SIZE_64) { - + pg_offset < size; + pg_offset += PAGE_SIZE_64) { mem = vm_page_lookup(object, offset + pg_offset); assert(mem); - - if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) + + if (mem->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) { pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem)); + } mem->vmp_busy = TRUE; @@ -848,7 +893,7 @@ kernel_memory_depopulate( assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0); assert((mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || - (mem->vmp_q_state == VM_PAGE_NOT_ON_Q)); + (mem->vmp_q_state == VM_PAGE_NOT_ON_Q)); mem->vmp_q_state = VM_PAGE_NOT_ON_Q; mem->vmp_snext = local_freeq; @@ -856,8 +901,9 @@ kernel_memory_depopulate( } vm_object_unlock(object); - if (local_freeq) + if (local_freeq) { vm_page_free_list(local_freeq, TRUE); + } } /* @@ -869,31 +915,31 @@ kernel_memory_depopulate( kern_return_t kmem_alloc_external( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size) + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) { - return (kmem_alloc(map, addrp, size, vm_tag_bt())); + return kmem_alloc(map, addrp, size, vm_tag_bt()); } kern_return_t kmem_alloc( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag) + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag) { return kmem_alloc_flags(map, addrp, size, tag, 0); } kern_return_t kmem_alloc_flags( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag, - int flags) + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag, + int flags) { kern_return_t kr = kernel_memory_allocate(map, addrp, size, 0, flags, tag); TRACE_MACHLEAKS(KMEM_ALLOC_CODE, KMEM_ALLOC_CODE_2, size, *addrp); @@ -912,32 +958,32 @@ kmem_alloc_flags( */ kern_return_t kmem_realloc( - vm_map_t map, - vm_offset_t oldaddr, - vm_size_t oldsize, - vm_offset_t *newaddrp, - vm_size_t newsize, - vm_tag_t tag) + vm_map_t map, + vm_offset_t oldaddr, + vm_size_t oldsize, + vm_offset_t *newaddrp, + vm_size_t newsize, + vm_tag_t tag) { - vm_object_t object; - vm_object_offset_t offset; - vm_map_offset_t oldmapmin; - vm_map_offset_t oldmapmax; - vm_map_offset_t newmapaddr; - vm_map_size_t oldmapsize; - vm_map_size_t newmapsize; - vm_map_entry_t oldentry; - vm_map_entry_t newentry; - vm_page_t mem; - kern_return_t kr; + vm_object_t object; + vm_object_offset_t offset; + vm_map_offset_t oldmapmin; + vm_map_offset_t oldmapmax; + vm_map_offset_t newmapaddr; + vm_map_size_t oldmapsize; + vm_map_size_t newmapsize; + vm_map_entry_t oldentry; + vm_map_entry_t newentry; + vm_page_t mem; + kern_return_t kr; oldmapmin = vm_map_trunc_page(oldaddr, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); oldmapmax = vm_map_round_page(oldaddr + oldsize, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); oldmapsize = oldmapmax - oldmapmin; newmapsize = vm_map_round_page(newsize, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); if (newmapsize < newsize) { /* overflow */ *newaddrp = 0; @@ -950,8 +996,9 @@ kmem_realloc( vm_map_lock(map); - if (!vm_map_lookup_entry(map, oldmapmin, &oldentry)) + if (!vm_map_lookup_entry(map, oldmapmin, &oldentry)) { panic("kmem_realloc"); + } object = VME_OBJECT(oldentry); /* @@ -965,30 +1012,31 @@ kmem_realloc( /* attempt is made to realloc a kmem_alloc'd area */ vm_object_lock(object); vm_map_unlock(map); - if (object->vo_size != oldmapsize) + if (object->vo_size != oldmapsize) { panic("kmem_realloc"); + } object->vo_size = newmapsize; vm_object_unlock(object); /* allocate the new pages while expanded portion of the */ /* object is still not mapped */ kmem_alloc_pages(object, vm_object_round_page(oldmapsize), - vm_object_round_page(newmapsize-oldmapsize)); + vm_object_round_page(newmapsize - oldmapsize)); /* * Find space for the new region. */ kr = vm_map_find_space(map, &newmapaddr, newmapsize, - (vm_map_offset_t) 0, 0, - VM_MAP_KERNEL_FLAGS_NONE, - tag, - &newentry); + (vm_map_offset_t) 0, 0, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + &newentry); if (kr != KERN_SUCCESS) { vm_object_lock(object); - for(offset = oldmapsize; + for (offset = oldmapsize; offset < newmapsize; offset += PAGE_SIZE) { - if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { + if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { VM_PAGE_FREE(mem); } } @@ -1001,30 +1049,32 @@ kmem_realloc( VME_OFFSET_SET(newentry, 0); assert(newentry->wired_count == 0); - + /* add an extra reference in case we have someone doing an */ /* unexpected deallocate */ vm_object_reference(object); vm_map_unlock(map); kr = vm_map_wire_kernel(map, newmapaddr, newmapaddr + newmapsize, - VM_PROT_DEFAULT, tag, FALSE); + VM_PROT_DEFAULT, tag, FALSE); if (KERN_SUCCESS != kr) { vm_map_remove(map, newmapaddr, newmapaddr + newmapsize, VM_MAP_REMOVE_NO_FLAGS); vm_object_lock(object); - for(offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) { - if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { + for (offset = oldsize; offset < newmapsize; offset += PAGE_SIZE) { + if ((mem = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { VM_PAGE_FREE(mem); } } object->vo_size = oldmapsize; vm_object_unlock(object); vm_object_deallocate(object); - return (kr); + return kr; } vm_object_deallocate(object); - if (kernel_object == object) vm_tag_update_size(tag, newmapsize); + if (kernel_object == object) { + vm_tag_update_size(tag, newmapsize); + } *newaddrp = CAST_DOWN(vm_offset_t, newmapaddr); return KERN_SUCCESS; @@ -1043,18 +1093,18 @@ kmem_realloc( kern_return_t kmem_alloc_kobject_external( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size) + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) { - return (kmem_alloc_kobject(map, addrp, size, vm_tag_bt())); + return kmem_alloc_kobject(map, addrp, size, vm_tag_bt()); } kern_return_t kmem_alloc_kobject( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, vm_tag_t tag) { return kernel_memory_allocate(map, addrp, size, 0, KMA_KOBJECT, tag); @@ -1069,13 +1119,14 @@ kmem_alloc_kobject( kern_return_t kmem_alloc_aligned( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, vm_tag_t tag) { - if ((size & (size - 1)) != 0) + if ((size & (size - 1)) != 0) { panic("kmem_alloc_aligned: size not aligned"); + } return kernel_memory_allocate(map, addrp, size, size - 1, KMA_KOBJECT, tag); } @@ -1087,22 +1138,22 @@ kmem_alloc_aligned( kern_return_t kmem_alloc_pageable_external( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size) + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size) { - return (kmem_alloc_pageable(map, addrp, size, vm_tag_bt())); + return kmem_alloc_pageable(map, addrp, size, vm_tag_bt()); } kern_return_t kmem_alloc_pageable( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, vm_tag_t tag) { vm_map_offset_t map_addr; - vm_map_size_t map_size; + vm_map_size_t map_size; kern_return_t kr; #ifndef normal @@ -1111,7 +1162,7 @@ kmem_alloc_pageable( map_addr = vm_map_min(map); #endif map_size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); if (map_size < size) { /* overflow */ *addrp = 0; @@ -1119,15 +1170,16 @@ kmem_alloc_pageable( } kr = vm_map_enter(map, &map_addr, map_size, - (vm_map_offset_t) 0, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - tag, - VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - - if (kr != KERN_SUCCESS) + (vm_map_offset_t) 0, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + VM_OBJECT_NULL, (vm_object_offset_t) 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + + if (kr != KERN_SUCCESS) { return kr; + } #if KASAN kasan_notify_address(map_addr, map_size); @@ -1146,9 +1198,9 @@ kmem_alloc_pageable( void kmem_free( - vm_map_t map, - vm_offset_t addr, - vm_size_t size) + vm_map_t map, + vm_offset_t addr, + vm_size_t size) { kern_return_t kr; @@ -1156,21 +1208,22 @@ kmem_free( TRACE_MACHLEAKS(KMEM_FREE_CODE, KMEM_FREE_CODE_2, size, addr); - if(size == 0) { + if (size == 0) { #if MACH_ASSERT - printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n",map,(uint64_t)addr); + printf("kmem_free called with size==0 for map: %p with addr: 0x%llx\n", map, (uint64_t)addr); #endif return; } kr = vm_map_remove(map, - vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(addr + size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_KUNWIRE); - if (kr != KERN_SUCCESS) + vm_map_trunc_page(addr, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_KUNWIRE); + if (kr != KERN_SUCCESS) { panic("kmem_free"); + } } /* @@ -1179,31 +1232,31 @@ kmem_free( kern_return_t kmem_alloc_pages( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size) { - vm_object_size_t alloc_size; + vm_object_size_t alloc_size; alloc_size = vm_object_round_page(size); - vm_object_lock(object); + vm_object_lock(object); while (alloc_size) { - vm_page_t mem; + vm_page_t mem; - /* - * Allocate a page - */ - while (VM_PAGE_NULL == - (mem = vm_page_alloc(object, offset))) { - vm_object_unlock(object); - VM_PAGE_WAIT(); - vm_object_lock(object); - } - mem->vmp_busy = FALSE; + /* + * Allocate a page + */ + while (VM_PAGE_NULL == + (mem = vm_page_alloc(object, offset))) { + vm_object_unlock(object); + VM_PAGE_WAIT(); + vm_object_lock(object); + } + mem->vmp_busy = FALSE; - alloc_size -= PAGE_SIZE; - offset += PAGE_SIZE; + alloc_size -= PAGE_SIZE; + offset += PAGE_SIZE; } vm_object_unlock(object); return KERN_SUCCESS; @@ -1226,22 +1279,22 @@ kmem_alloc_pages( */ kern_return_t kmem_suballoc( - vm_map_t parent, - vm_offset_t *addr, - vm_size_t size, - boolean_t pageable, - int flags, + vm_map_t parent, + vm_offset_t *addr, + vm_size_t size, + boolean_t pageable, + int flags, vm_map_kernel_flags_t vmk_flags, vm_tag_t tag, - vm_map_t *new_map) + vm_map_t *new_map) { - vm_map_t map; - vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_t map; + vm_map_offset_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; map_size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(parent)); + VM_MAP_PAGE_MASK(parent)); if (map_size < size) { /* overflow */ *addr = 0; @@ -1256,23 +1309,24 @@ kmem_suballoc( vm_object_reference(vm_submap_object); map_addr = ((flags & VM_FLAGS_ANYWHERE) - ? vm_map_min(parent) - : vm_map_trunc_page(*addr, - VM_MAP_PAGE_MASK(parent))); + ? vm_map_min(parent) + : vm_map_trunc_page(*addr, + VM_MAP_PAGE_MASK(parent))); kr = vm_map_enter(parent, &map_addr, map_size, - (vm_map_offset_t) 0, flags, vmk_flags, tag, - vm_submap_object, (vm_object_offset_t) 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + (vm_map_offset_t) 0, flags, vmk_flags, tag, + vm_submap_object, (vm_object_offset_t) 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) { vm_object_deallocate(vm_submap_object); - return (kr); + return kr; } pmap_reference(vm_map_pmap(parent)); map = vm_map_create(vm_map_pmap(parent), map_addr, map_addr + map_size, pageable); - if (map == VM_MAP_NULL) - panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ + if (map == VM_MAP_NULL) { + panic("kmem_suballoc: vm_map_create failed"); /* "can't happen" */ + } /* inherit the parent map's page size */ vm_map_set_page_shift(map, VM_MAP_PAGE_SHIFT(parent)); @@ -1282,14 +1336,14 @@ kmem_suballoc( * See comment preceding vm_map_submap(). */ vm_map_remove(parent, map_addr, map_addr + map_size, - VM_MAP_REMOVE_NO_FLAGS); - vm_map_deallocate(map); /* also removes ref to pmap */ + VM_MAP_REMOVE_NO_FLAGS); + vm_map_deallocate(map); /* also removes ref to pmap */ vm_object_deallocate(vm_submap_object); - return (kr); + return kr; } *addr = CAST_DOWN(vm_offset_t, map_addr); *new_map = map; - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -1300,8 +1354,8 @@ kmem_suballoc( */ void kmem_init( - vm_offset_t start, - vm_offset_t end) + vm_offset_t start, + vm_offset_t end) { vm_map_offset_t map_start; vm_map_offset_t map_end; @@ -1312,79 +1366,78 @@ kmem_init( vmk_flags.vmkf_no_pmap_check = TRUE; map_start = vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(kernel_map)); + VM_MAP_PAGE_MASK(kernel_map)); map_end = vm_map_round_page(end, - VM_MAP_PAGE_MASK(kernel_map)); + VM_MAP_PAGE_MASK(kernel_map)); -#if defined(__arm__) || defined(__arm64__) - kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS, - VM_MAX_KERNEL_ADDRESS, FALSE); +#if defined(__arm__) || defined(__arm64__) + kernel_map = vm_map_create(pmap_kernel(), VM_MIN_KERNEL_AND_KEXT_ADDRESS, + VM_MAX_KERNEL_ADDRESS, FALSE); /* * Reserve virtual memory allocated up to this time. */ { - unsigned int region_select = 0; - vm_map_offset_t region_start; - vm_map_size_t region_size; + unsigned int region_select = 0; + vm_map_offset_t region_start; + vm_map_size_t region_size; vm_map_offset_t map_addr; kern_return_t kr; while (pmap_virtual_region(region_select, ®ion_start, ®ion_size)) { - map_addr = region_start; kr = vm_map_enter(kernel_map, &map_addr, - vm_map_round_page(region_size, - VM_MAP_PAGE_MASK(kernel_map)), - (vm_map_offset_t) 0, - VM_FLAGS_FIXED, - vmk_flags, - VM_KERN_MEMORY_NONE, - VM_OBJECT_NULL, - (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, - VM_INHERIT_DEFAULT); + vm_map_round_page(region_size, + VM_MAP_PAGE_MASK(kernel_map)), + (vm_map_offset_t) 0, + VM_FLAGS_FIXED, + vmk_flags, + VM_KERN_MEMORY_NONE, + VM_OBJECT_NULL, + (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE, + VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) { panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n", - (uint64_t) start, (uint64_t) end, (uint64_t) region_start, - (uint64_t) region_size, kr); - } + (uint64_t) start, (uint64_t) end, (uint64_t) region_start, + (uint64_t) region_size, kr); + } region_select++; - } + } } #else - kernel_map = vm_map_create(pmap_kernel(),VM_MIN_KERNEL_AND_KEXT_ADDRESS, - map_end, FALSE); + kernel_map = vm_map_create(pmap_kernel(), VM_MIN_KERNEL_AND_KEXT_ADDRESS, + map_end, FALSE); /* * Reserve virtual memory allocated up to this time. */ if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) { vm_map_offset_t map_addr; kern_return_t kr; - + vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_no_pmap_check = TRUE; map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS; kr = vm_map_enter(kernel_map, - &map_addr, - (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS), - (vm_map_offset_t) 0, - VM_FLAGS_FIXED, - vmk_flags, - VM_KERN_MEMORY_NONE, - VM_OBJECT_NULL, - (vm_object_offset_t) 0, FALSE, - VM_PROT_NONE, VM_PROT_NONE, - VM_INHERIT_DEFAULT); - + &map_addr, + (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS), + (vm_map_offset_t) 0, + VM_FLAGS_FIXED, + vmk_flags, + VM_KERN_MEMORY_NONE, + VM_OBJECT_NULL, + (vm_object_offset_t) 0, FALSE, + VM_PROT_NONE, VM_PROT_NONE, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x\n", - (uint64_t) start, (uint64_t) end, - (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS, - (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS), - kr); - } + (uint64_t) start, (uint64_t) end, + (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS, + (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS), + kr); + } } #endif @@ -1394,10 +1447,10 @@ kmem_init( * amount of memory that are potentially usable by a user app (max_mem) * minus a certain amount. This can be overridden via a sysctl. */ - vm_global_no_user_wire_amount = MIN(max_mem*20/100, - VM_NOT_USER_WIREABLE); + vm_global_no_user_wire_amount = MIN(max_mem * 20 / 100, + VM_NOT_USER_WIREABLE); vm_global_user_wire_limit = max_mem - vm_global_no_user_wire_amount; - + /* the default per user limit is the same as the global limit */ vm_user_wire_limit = vm_global_user_wire_limit; } @@ -1413,30 +1466,27 @@ kmem_init( */ kern_return_t copyinmap( - vm_map_t map, - vm_map_offset_t fromaddr, - void *todata, - vm_size_t length) + vm_map_t map, + vm_map_offset_t fromaddr, + void *todata, + vm_size_t length) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; vm_map_t oldmap; - if (vm_map_pmap(map) == pmap_kernel()) - { + if (vm_map_pmap(map) == pmap_kernel()) { /* assume a correct copy */ memcpy(todata, CAST_DOWN(void *, fromaddr), length); - } - else if (current_map() == map) - { - if (copyin(fromaddr, todata, length) != 0) + } else if (current_map() == map) { + if (copyin(fromaddr, todata, length) != 0) { kr = KERN_INVALID_ADDRESS; - } - else - { + } + } else { vm_map_reference(map); oldmap = vm_map_switch(map); - if (copyin(fromaddr, todata, length) != 0) + if (copyin(fromaddr, todata, length) != 0) { kr = KERN_INVALID_ADDRESS; + } vm_map_switch(oldmap); vm_map_deallocate(map); } @@ -1453,10 +1503,10 @@ copyinmap( */ kern_return_t copyoutmap( - vm_map_t map, - void *fromdata, - vm_map_address_t toaddr, - vm_size_t length) + vm_map_t map, + void *fromdata, + vm_map_address_t toaddr, + vm_size_t length) { if (vm_map_pmap(map) == pmap_kernel()) { /* assume a correct copy */ @@ -1464,11 +1514,13 @@ copyoutmap( return KERN_SUCCESS; } - if (current_map() != map) + if (current_map() != map) { return KERN_NOT_SUPPORTED; + } - if (copyout(fromdata, toaddr, length) != 0) + if (copyout(fromdata, toaddr, length) != 0) { return KERN_INVALID_ADDRESS; + } return KERN_SUCCESS; } @@ -1502,7 +1554,7 @@ vm_kernel_addrhash_internal( return; } - vm_offset_t sha_digest[SHA256_DIGEST_LENGTH/sizeof(vm_offset_t)]; + vm_offset_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(vm_offset_t)]; SHA256_CTX sha_ctx; SHA256_Init(&sha_ctx); diff --git a/osfmk/vm/vm_kern.h b/osfmk/vm/vm_kern.h index d63523e08..7d78e27cd 100644 --- a/osfmk/vm/vm_kern.h +++ b/osfmk/vm/vm_kern.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Kernel memory management definitions. */ -#ifndef _VM_VM_KERN_H_ +#ifndef _VM_VM_KERN_H_ #define _VM_VM_KERN_H_ #ifdef __cplusplus @@ -74,126 +74,126 @@ extern "C" { #include #include -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #ifdef XNU_KERNEL_PRIVATE #include -extern kern_return_t kernel_memory_allocate( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_tag_t tag); +extern kern_return_t kernel_memory_allocate( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_tag_t tag); /* flags for kernel_memory_allocate */ -#define KMA_HERE 0x01 -#define KMA_NOPAGEWAIT 0x02 -#define KMA_KOBJECT 0x04 -#define KMA_LOMEM 0x08 -#define KMA_GUARD_FIRST 0x10 -#define KMA_GUARD_LAST 0x20 -#define KMA_PERMANENT 0x40 -#define KMA_NOENCRYPT 0x80 -#define KMA_KSTACK 0x100 -#define KMA_VAONLY 0x200 -#define KMA_COMPRESSOR 0x400 /* Pages belonging to the compressor are not on the paging queues, nor are they counted as wired. */ -#define KMA_ATOMIC 0x800 -#define KMA_ZERO 0x1000 -#define KMA_PAGEABLE 0x2000 +#define KMA_HERE 0x01 +#define KMA_NOPAGEWAIT 0x02 +#define KMA_KOBJECT 0x04 +#define KMA_LOMEM 0x08 +#define KMA_GUARD_FIRST 0x10 +#define KMA_GUARD_LAST 0x20 +#define KMA_PERMANENT 0x40 +#define KMA_NOENCRYPT 0x80 +#define KMA_KSTACK 0x100 +#define KMA_VAONLY 0x200 +#define KMA_COMPRESSOR 0x400 /* Pages belonging to the compressor are not on the paging queues, nor are they counted as wired. */ +#define KMA_ATOMIC 0x800 +#define KMA_ZERO 0x1000 +#define KMA_PAGEABLE 0x2000 extern kern_return_t kmem_alloc( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag); + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag); extern kern_return_t kmem_alloc_contig( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_offset_t mask, - ppnum_t max_pnum, - ppnum_t pnum_mask, - int flags, - vm_tag_t tag); - -extern kern_return_t kmem_alloc_flags( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag, - int flags); - -extern kern_return_t kmem_alloc_pageable( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag); - -extern kern_return_t kmem_alloc_aligned( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag); - -extern kern_return_t kmem_realloc( - vm_map_t map, - vm_offset_t oldaddr, - vm_size_t oldsize, - vm_offset_t *newaddrp, - vm_size_t newsize, - vm_tag_t tag); - -extern void kmem_free( - vm_map_t map, - vm_offset_t addr, - vm_size_t size); - -extern kern_return_t kmem_suballoc( - vm_map_t parent, - vm_offset_t *addr, - vm_size_t size, - boolean_t pageable, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_map_t *new_map); - -extern kern_return_t kmem_alloc_kobject( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size, - vm_tag_t tag); + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_offset_t mask, + ppnum_t max_pnum, + ppnum_t pnum_mask, + int flags, + vm_tag_t tag); + +extern kern_return_t kmem_alloc_flags( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag, + int flags); + +extern kern_return_t kmem_alloc_pageable( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag); + +extern kern_return_t kmem_alloc_aligned( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag); + +extern kern_return_t kmem_realloc( + vm_map_t map, + vm_offset_t oldaddr, + vm_size_t oldsize, + vm_offset_t *newaddrp, + vm_size_t newsize, + vm_tag_t tag); + +extern void kmem_free( + vm_map_t map, + vm_offset_t addr, + vm_size_t size); + +extern kern_return_t kmem_suballoc( + vm_map_t parent, + vm_offset_t *addr, + vm_size_t size, + boolean_t pageable, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_t *new_map); + +extern kern_return_t kmem_alloc_kobject( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size, + vm_tag_t tag); extern kern_return_t kernel_memory_populate( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - int flags, + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + int flags, vm_tag_t tag); extern void kernel_memory_depopulate( - vm_map_t map, - vm_offset_t addr, - vm_size_t size, - int flags); - -extern kern_return_t memory_object_iopl_request( - ipc_port_t port, - memory_object_offset_t offset, - upl_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - upl_control_flags_t *flags, - vm_tag_t tag); + vm_map_t map, + vm_offset_t addr, + vm_size_t size, + int flags); + +extern kern_return_t memory_object_iopl_request( + ipc_port_t port, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + upl_control_flags_t *flags, + vm_tag_t tag); struct mach_memory_info; -extern kern_return_t vm_page_diagnose(struct mach_memory_info * info, - unsigned int num_info, uint64_t zones_collectable_bytes); +extern kern_return_t vm_page_diagnose(struct mach_memory_info * info, + unsigned int num_info, uint64_t zones_collectable_bytes); extern uint32_t vm_page_diagnose_estimate(void); @@ -205,52 +205,52 @@ extern kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size #endif /* DEBUG || DEVELOPMENT */ -extern vm_tag_t vm_tag_bt(void); +extern vm_tag_t vm_tag_bt(void); -extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site); +extern vm_tag_t vm_tag_alloc(vm_allocation_site_t * site); -extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP); +extern void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP); -extern void vm_tag_update_size(vm_tag_t tag, int64_t size); +extern void vm_tag_update_size(vm_tag_t tag, int64_t size); #if VM_MAX_TAG_ZONES -extern void vm_allocation_zones_init(void); +extern void vm_allocation_zones_init(void); extern void vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx); -extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste); +extern void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste); extern vm_allocation_zone_total_t ** vm_allocation_zone_totals; #endif /* VM_MAX_TAG_ZONES */ -extern vm_tag_t vm_tag_bt_debug(void); +extern vm_tag_t vm_tag_bt_debug(void); extern uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen); -extern boolean_t vm_kernel_map_is_kernel(vm_map_t map); +extern boolean_t vm_kernel_map_is_kernel(vm_map_t map); -extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr); +extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr); #else /* XNU_KERNEL_PRIVATE */ -extern kern_return_t kmem_alloc( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size); +extern kern_return_t kmem_alloc( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); -extern kern_return_t kmem_alloc_pageable( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size); +extern kern_return_t kmem_alloc_pageable( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); -extern kern_return_t kmem_alloc_kobject( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size); +extern kern_return_t kmem_alloc_kobject( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); -extern void kmem_free( - vm_map_t map, - vm_offset_t addr, - vm_size_t size); +extern void kmem_free( + vm_map_t map, + vm_offset_t addr, + vm_size_t size); #endif /* !XNU_KERNEL_PRIVATE */ @@ -263,173 +263,173 @@ struct kern_allocation_name; typedef struct kern_allocation_name * kern_allocation_name_t; #endif /* !XNU_KERNEL_PRIVATE */ -extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint32_t suballocs); -extern void kern_allocation_name_release(kern_allocation_name_t allocation); +extern kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint32_t suballocs); +extern void kern_allocation_name_release(kern_allocation_name_t allocation); extern const char * kern_allocation_get_name(kern_allocation_name_t allocation); #ifdef XNU_KERNEL_PRIVATE -extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta); -extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta); -extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation); +extern void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta); +extern void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta); +extern vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation); #endif /* XNU_KERNEL_PRIVATE */ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE -extern void kmem_init( - vm_offset_t start, - vm_offset_t end); +extern void kmem_init( + vm_offset_t start, + vm_offset_t end); -extern kern_return_t copyinmap( - vm_map_t map, - vm_map_offset_t fromaddr, - void *todata, - vm_size_t length); +extern kern_return_t copyinmap( + vm_map_t map, + vm_map_offset_t fromaddr, + void *todata, + vm_size_t length); -extern kern_return_t copyoutmap( - vm_map_t map, - void *fromdata, - vm_map_offset_t toaddr, - vm_size_t length); +extern kern_return_t copyoutmap( + vm_map_t map, + void *fromdata, + vm_map_offset_t toaddr, + vm_size_t length); -extern kern_return_t kmem_alloc_external( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size); +extern kern_return_t kmem_alloc_external( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); -extern kern_return_t kmem_alloc_kobject_external( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size); +extern kern_return_t kmem_alloc_kobject_external( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); -extern kern_return_t kmem_alloc_pageable_external( - vm_map_t map, - vm_offset_t *addrp, - vm_size_t size); +extern kern_return_t kmem_alloc_pageable_external( + vm_map_t map, + vm_offset_t *addrp, + vm_size_t size); -#endif /* MACH_KERNEL_PRIVATE */ +#endif /* MACH_KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE extern kern_return_t mach_vm_allocate_kernel( - vm_map_t map, - mach_vm_offset_t *addr, - mach_vm_size_t size, - int flags, + vm_map_t map, + mach_vm_offset_t *addr, + mach_vm_size_t size, + int flags, vm_tag_t tag); extern kern_return_t vm_allocate_kernel( - vm_map_t map, - vm_offset_t *addr, - vm_size_t size, + vm_map_t map, + vm_offset_t *addr, + vm_size_t size, int flags, vm_tag_t tag); extern kern_return_t mach_vm_map_kernel( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t initial_size, - mach_vm_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); extern kern_return_t vm_map_kernel( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); extern kern_return_t mach_vm_remap_kernel( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, vm_tag_t tag, - vm_map_t src_map, - mach_vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance); + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); extern kern_return_t vm_remap_kernel( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, vm_tag_t tag, - vm_map_t src_map, - vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance); + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); extern kern_return_t vm_map_64_kernel( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); extern kern_return_t mach_vm_wire_kernel( - host_priv_t host_priv, - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_prot_t access, - vm_tag_t tag); + host_priv_t host_priv, + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_prot_t access, + vm_tag_t tag); extern kern_return_t vm_map_wire_kernel( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t caller_prot, - vm_tag_t tag, - boolean_t user_wire); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire); extern kern_return_t vm_map_wire_and_extract_kernel( - vm_map_t map, - vm_map_offset_t start, - vm_prot_t caller_prot, - vm_tag_t tag, - boolean_t user_wire, - ppnum_t *physpage_p); + vm_map_t map, + vm_map_offset_t start, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + ppnum_t *physpage_p); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ -extern vm_map_t kernel_map; -extern vm_map_t kernel_pageable_map; +extern vm_map_t kernel_map; +extern vm_map_t kernel_pageable_map; extern vm_map_t ipc_kernel_map; -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #ifdef KERNEL @@ -443,21 +443,21 @@ extern void vm_kernel_addrhide( extern vm_offset_t vm_kernel_addrperm_ext; -extern void vm_kernel_addrperm_external( - vm_offset_t addr, - vm_offset_t *perm_addr); +extern void vm_kernel_addrperm_external( + vm_offset_t addr, + vm_offset_t *perm_addr); -extern void vm_kernel_unslide_or_perm_external( - vm_offset_t addr, - vm_offset_t *up_addr); +extern void vm_kernel_unslide_or_perm_external( + vm_offset_t addr, + vm_offset_t *up_addr); #if MACH_KERNEL_PRIVATE extern uint64_t vm_kernel_addrhash_salt; extern uint64_t vm_kernel_addrhash_salt_ext; extern void vm_kernel_addrhash_external( - vm_offset_t addr, - vm_offset_t *perm_addr); + vm_offset_t addr, + vm_offset_t *perm_addr); #endif /* MACH_KERNEL_PRIVATE */ extern void vm_init_before_launchd(void); @@ -468,4 +468,4 @@ extern void vm_init_before_launchd(void); } #endif -#endif /* _VM_VM_KERN_H_ */ +#endif /* _VM_VM_KERN_H_ */ diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c index 04fbe0ccc..bc5c093d2 100644 --- a/osfmk/vm/vm_map.c +++ b/osfmk/vm/vm_map.c @@ -141,201 +141,201 @@ int vm_map_debug_fourk = 0; SECURITY_READ_ONLY_LATE(int) vm_map_executable_immutable = 1; int vm_map_executable_immutable_verbose = 0; -extern u_int32_t random(void); /* from */ +extern u_int32_t random(void); /* from */ /* Internal prototypes */ static void vm_map_simplify_range( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); /* forward */ - -static boolean_t vm_map_range_check( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_entry_t *entry); - -static vm_map_entry_t _vm_map_entry_create( - struct vm_map_header *map_header, boolean_t map_locked); - -static void _vm_map_entry_dispose( - struct vm_map_header *map_header, - vm_map_entry_t entry); - -static void vm_map_pmap_enter( - vm_map_t map, - vm_map_offset_t addr, - vm_map_offset_t end_addr, - vm_object_t object, - vm_object_offset_t offset, - vm_prot_t protection); - -static void _vm_map_clip_end( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_map_offset_t end); - -static void _vm_map_clip_start( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_map_offset_t start); - -static void vm_map_entry_delete( - vm_map_t map, - vm_map_entry_t entry); - -static kern_return_t vm_map_delete( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - int flags, - vm_map_t zap_map); - -static void vm_map_copy_insert( - vm_map_t map, - vm_map_entry_t after_where, - vm_map_copy_t copy); - -static kern_return_t vm_map_copy_overwrite_unaligned( - vm_map_t dst_map, - vm_map_entry_t entry, - vm_map_copy_t copy, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); /* forward */ + +static boolean_t vm_map_range_check( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_entry_t *entry); + +static vm_map_entry_t _vm_map_entry_create( + struct vm_map_header *map_header, boolean_t map_locked); + +static void _vm_map_entry_dispose( + struct vm_map_header *map_header, + vm_map_entry_t entry); + +static void vm_map_pmap_enter( + vm_map_t map, + vm_map_offset_t addr, + vm_map_offset_t end_addr, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection); + +static void _vm_map_clip_end( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t end); + +static void _vm_map_clip_start( + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t start); + +static void vm_map_entry_delete( + vm_map_t map, + vm_map_entry_t entry); + +static kern_return_t vm_map_delete( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + int flags, + vm_map_t zap_map); + +static void vm_map_copy_insert( + vm_map_t map, + vm_map_entry_t after_where, + vm_map_copy_t copy); + +static kern_return_t vm_map_copy_overwrite_unaligned( + vm_map_t dst_map, + vm_map_entry_t entry, + vm_map_copy_t copy, vm_map_address_t start, - boolean_t discard_on_success); + boolean_t discard_on_success); -static kern_return_t vm_map_copy_overwrite_aligned( - vm_map_t dst_map, - vm_map_entry_t tmp_entry, - vm_map_copy_t copy, +static kern_return_t vm_map_copy_overwrite_aligned( + vm_map_t dst_map, + vm_map_entry_t tmp_entry, + vm_map_copy_t copy, vm_map_offset_t start, - pmap_t pmap); + pmap_t pmap); -static kern_return_t vm_map_copyin_kernel_buffer( - vm_map_t src_map, +static kern_return_t vm_map_copyin_kernel_buffer( + vm_map_t src_map, vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result); /* OUT */ - -static kern_return_t vm_map_copyout_kernel_buffer( - vm_map_t map, - vm_map_address_t *addr, /* IN/OUT */ - vm_map_copy_t copy, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result); /* OUT */ + +static kern_return_t vm_map_copyout_kernel_buffer( + vm_map_t map, + vm_map_address_t *addr, /* IN/OUT */ + vm_map_copy_t copy, vm_map_size_t copy_size, - boolean_t overwrite, - boolean_t consume_on_success); - -static void vm_map_fork_share( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map); - -static boolean_t vm_map_fork_copy( - vm_map_t old_map, - vm_map_entry_t *old_entry_p, - vm_map_t new_map, - int vm_map_copyin_flags); - -static kern_return_t vm_map_wire_nested( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t caller_prot, - vm_tag_t tag, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p); - -static kern_return_t vm_map_unwire_nested( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr); - -static kern_return_t vm_map_overwrite_submap_recurse( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_size_t dst_size); - -static kern_return_t vm_map_copy_overwrite_nested( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible, - pmap_t pmap, - boolean_t discard_on_success); - -static kern_return_t vm_map_remap_extract( - vm_map_t map, - vm_map_offset_t addr, - vm_map_size_t size, - boolean_t copy, - struct vm_map_header *map_header, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance, - boolean_t pageable, - boolean_t same_map, - vm_map_kernel_flags_t vmk_flags); - -static kern_return_t vm_map_remap_range_allocate( - vm_map_t map, - vm_map_address_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_map_entry_t *map_entry); - -static void vm_map_region_look_for_page( - vm_map_t map, + boolean_t overwrite, + boolean_t consume_on_success); + +static void vm_map_fork_share( + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map); + +static boolean_t vm_map_fork_copy( + vm_map_t old_map, + vm_map_entry_t *old_entry_p, + vm_map_t new_map, + int vm_map_copyin_flags); + +static kern_return_t vm_map_wire_nested( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p); + +static kern_return_t vm_map_unwire_nested( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr); + +static kern_return_t vm_map_overwrite_submap_recurse( + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_size_t dst_size); + +static kern_return_t vm_map_copy_overwrite_nested( + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible, + pmap_t pmap, + boolean_t discard_on_success); + +static kern_return_t vm_map_remap_extract( + vm_map_t map, + vm_map_offset_t addr, + vm_map_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance, + boolean_t pageable, + boolean_t same_map, + vm_map_kernel_flags_t vmk_flags); + +static kern_return_t vm_map_remap_range_allocate( + vm_map_t map, + vm_map_address_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_entry_t *map_entry); + +static void vm_map_region_look_for_page( + vm_map_t map, vm_map_offset_t va, - vm_object_t object, - vm_object_offset_t offset, + vm_object_t object, + vm_object_offset_t offset, int max_refcnt, int depth, vm_region_extended_info_t extended, mach_msg_type_number_t count); -static int vm_map_region_count_obj_refs( - vm_map_entry_t entry, - vm_object_t object); +static int vm_map_region_count_obj_refs( + vm_map_entry_t entry, + vm_object_t object); -static kern_return_t vm_map_willneed( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_willneed( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); -static kern_return_t vm_map_reuse_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_reuse_pages( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); -static kern_return_t vm_map_reusable_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_reusable_pages( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); -static kern_return_t vm_map_can_reuse( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_can_reuse( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); #if MACH_ASSERT -static kern_return_t vm_map_pageout( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +static kern_return_t vm_map_pageout( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); #endif /* MACH_ASSERT */ -static void vm_map_corpse_footprint_destroy( - vm_map_t map); +static void vm_map_corpse_footprint_destroy( + vm_map_t map); pid_t find_largest_process_vm_map_entries(void); @@ -357,10 +357,10 @@ pid_t find_largest_process_vm_map_entries(void); * new mapping to be "used for jit", so we always reset the flag here. * Same for "pmap_cs_associated". */ -#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW,OLD) \ -MACRO_BEGIN \ - (NEW)->used_for_jit = FALSE; \ - (NEW)->pmap_cs_associated = FALSE; \ +#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW, OLD) \ +MACRO_BEGIN \ + (NEW)->used_for_jit = FALSE; \ + (NEW)->pmap_cs_associated = FALSE; \ MACRO_END #else /* CONFIG_EMBEDDED */ @@ -369,41 +369,41 @@ MACRO_END * The "used_for_jit" flag was copied from OLD to NEW in vm_map_entry_copy(). * On macOS, the new mapping can be "used for jit". */ -#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW,OLD) \ -MACRO_BEGIN \ - assert((NEW)->used_for_jit == (OLD)->used_for_jit); \ - assert((NEW)->pmap_cs_associated == FALSE); \ +#define VM_MAP_ENTRY_COPY_CODE_SIGNING(NEW, OLD) \ +MACRO_BEGIN \ + assert((NEW)->used_for_jit == (OLD)->used_for_jit); \ + assert((NEW)->pmap_cs_associated == FALSE); \ MACRO_END #endif /* CONFIG_EMBEDDED */ -#define vm_map_entry_copy(NEW,OLD) \ -MACRO_BEGIN \ -boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \ +#define vm_map_entry_copy(NEW, OLD) \ +MACRO_BEGIN \ +boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \ *(NEW) = *(OLD); \ - (NEW)->is_shared = FALSE; \ + (NEW)->is_shared = FALSE; \ (NEW)->needs_wakeup = FALSE; \ (NEW)->in_transition = FALSE; \ (NEW)->wired_count = 0; \ (NEW)->user_wired_count = 0; \ - (NEW)->permanent = FALSE; \ - VM_MAP_ENTRY_COPY_CODE_SIGNING((NEW),(OLD)); \ - (NEW)->from_reserved_zone = _vmec_reserved; \ - if ((NEW)->iokit_acct) { \ + (NEW)->permanent = FALSE; \ + VM_MAP_ENTRY_COPY_CODE_SIGNING((NEW),(OLD)); \ + (NEW)->from_reserved_zone = _vmec_reserved; \ + if ((NEW)->iokit_acct) { \ assertf(!(NEW)->use_pmap, "old %p new %p\n", (OLD), (NEW)); \ - (NEW)->iokit_acct = FALSE; \ - (NEW)->use_pmap = TRUE; \ - } \ + (NEW)->iokit_acct = FALSE; \ + (NEW)->use_pmap = TRUE; \ + } \ (NEW)->vme_resilient_codesign = FALSE; \ - (NEW)->vme_resilient_media = FALSE; \ - (NEW)->vme_atomic = FALSE; \ + (NEW)->vme_resilient_media = FALSE; \ + (NEW)->vme_atomic = FALSE; \ MACRO_END -#define vm_map_entry_copy_full(NEW,OLD) \ -MACRO_BEGIN \ -boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \ -(*(NEW) = *(OLD)); \ -(NEW)->from_reserved_zone = _vmecf_reserved; \ +#define vm_map_entry_copy_full(NEW, OLD) \ +MACRO_BEGIN \ +boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \ +(*(NEW) = *(OLD)); \ +(NEW)->from_reserved_zone = _vmecf_reserved; \ MACRO_END /* @@ -425,7 +425,7 @@ MACRO_END * An application on any architecture may override these defaults by explicitly * adding PROT_EXEC permission to the page in question with the mprotect(2) * system call. This code here just determines what happens when an app tries to - * execute from a page that lacks execute permission. + * execute from a page that lacks execute permission. * * Note that allow_data_exec or allow_stack_exec may also be modified by sysctl to change the * default behavior for both 32 and 64 bit apps on a system-wide basis. Furthermore, @@ -443,24 +443,28 @@ override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ { int current_abi; - if (map->pmap == kernel_pmap) return FALSE; + if (map->pmap == kernel_pmap) { + return FALSE; + } /* * Determine if the app is running in 32 or 64 bit mode. */ - if (vm_map_is_64bit(map)) + if (vm_map_is_64bit(map)) { current_abi = VM_ABI_64; - else + } else { current_abi = VM_ABI_32; + } /* * Determine if we should allow the execution based on whether it's a * stack or data area and the current architecture. */ - if (user_tag == VM_MEMORY_STACK) + if (user_tag == VM_MEMORY_STACK) { return allow_stack_exec & current_abi; + } return (allow_data_exec & current_abi) && (map->map_disallow_data_exec == FALSE); } @@ -509,11 +513,11 @@ override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ * vm_object_copy_strategically() in vm_object.c. */ -static zone_t vm_map_zone; /* zone for vm_map structures */ -zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ -static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking allocations */ -static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ -zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ +static zone_t vm_map_zone; /* zone for vm_map structures */ +zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ +static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking allocations */ +static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ +zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ /* @@ -522,17 +526,17 @@ zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures * vm_map_submap creates the submap. */ -vm_object_t vm_submap_object; +vm_object_t vm_submap_object; -static void *map_data; -static vm_size_t map_data_size; -static void *kentry_data; -static vm_size_t kentry_data_size; -static void *map_holes_data; -static vm_size_t map_holes_data_size; +static void *map_data; +static vm_size_t map_data_size; +static void *kentry_data; +static vm_size_t kentry_data_size; +static void *map_holes_data; +static vm_size_t map_holes_data_size; #if CONFIG_EMBEDDED -#define NO_COALESCE_LIMIT 0 +#define NO_COALESCE_LIMIT 0 #else #define NO_COALESCE_LIMIT ((1024 * 128) - 1) #endif @@ -544,12 +548,12 @@ unsigned int vm_map_set_cache_attr_count = 0; kern_return_t vm_map_set_cache_attr( - vm_map_t map, - vm_map_offset_t va) + vm_map_t map, + vm_map_offset_t va) { - vm_map_entry_t map_entry; - vm_object_t object; - kern_return_t kr = KERN_SUCCESS; + vm_map_entry_t map_entry; + vm_object_t object; + kern_return_t kr = KERN_SUCCESS; vm_map_lock_read(map); @@ -593,22 +597,22 @@ done: */ kern_return_t vm_map_apple_protected( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_object_offset_t crypto_backing_offset, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_offset_t crypto_backing_offset, struct pager_crypt_info *crypt_info) { - boolean_t map_locked; - kern_return_t kr; - vm_map_entry_t map_entry; + boolean_t map_locked; + kern_return_t kr; + vm_map_entry_t map_entry; struct vm_map_entry tmp_entry; - memory_object_t unprotected_mem_obj; - vm_object_t protected_object; - vm_map_offset_t map_addr; - vm_map_offset_t start_aligned, end_aligned; - vm_object_offset_t crypto_start, crypto_end; - int vm_flags; + memory_object_t unprotected_mem_obj; + vm_object_t protected_object; + vm_map_offset_t map_addr; + vm_map_offset_t start_aligned, end_aligned; + vm_object_offset_t crypto_start, crypto_end; + int vm_flags; vm_map_kernel_flags_t vmk_flags; vm_flags = 0; @@ -643,15 +647,15 @@ vm_map_apple_protected( map_addr = start_aligned; for (map_addr = start_aligned; - map_addr < end; - map_addr = tmp_entry.vme_end) { + map_addr < end; + map_addr = tmp_entry.vme_end) { vm_map_lock(map); map_locked = TRUE; /* lookup the protected VM object */ if (!vm_map_lookup_entry(map, - map_addr, - &map_entry) || + map_addr, + &map_entry) || map_entry->is_sub_map || VME_OBJECT(map_entry) == VM_OBJECT_NULL || !(map_entry->protection & VM_PROT_EXECUTE)) { @@ -736,12 +740,12 @@ vm_map_apple_protected( #if __arm64__ if (tmp_entry.used_for_jit && (VM_MAP_PAGE_SHIFT(map) != FOURK_PAGE_SHIFT || - PAGE_SHIFT != FOURK_PAGE_SHIFT) && + PAGE_SHIFT != FOURK_PAGE_SHIFT) && fourk_binary_compatibility_unsafe && fourk_binary_compatibility_allow_wx) { printf("** FOURK_COMPAT [%d]: " - "allowing write+execute at 0x%llx\n", - proc_selfpid(), tmp_entry.vme_start); + "allowing write+execute at 0x%llx\n", + proc_selfpid(), tmp_entry.vme_start); vmk_flags.vmkf_map_jit = TRUE; } #endif /* __arm64__ */ @@ -749,43 +753,43 @@ vm_map_apple_protected( /* map this memory object in place of the current one */ map_addr = tmp_entry.vme_start; kr = vm_map_enter_mem_object(map, - &map_addr, - (tmp_entry.vme_end - - tmp_entry.vme_start), - (mach_vm_offset_t) 0, - vm_flags, - vmk_flags, - VM_KERN_MEMORY_NONE, - (ipc_port_t)(uintptr_t) unprotected_mem_obj, - 0, - TRUE, - tmp_entry.protection, - tmp_entry.max_protection, - tmp_entry.inheritance); + &map_addr, + (tmp_entry.vme_end - + tmp_entry.vme_start), + (mach_vm_offset_t) 0, + vm_flags, + vmk_flags, + VM_KERN_MEMORY_NONE, + (ipc_port_t)(uintptr_t) unprotected_mem_obj, + 0, + TRUE, + tmp_entry.protection, + tmp_entry.max_protection, + tmp_entry.inheritance); assertf(kr == KERN_SUCCESS, - "kr = 0x%x\n", kr); + "kr = 0x%x\n", kr); assertf(map_addr == tmp_entry.vme_start, - "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n", - (uint64_t)map_addr, - (uint64_t) tmp_entry.vme_start, - &tmp_entry); + "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n", + (uint64_t)map_addr, + (uint64_t) tmp_entry.vme_start, + &tmp_entry); #if VM_MAP_DEBUG_APPLE_PROTECT if (vm_map_debug_apple_protect) { printf("APPLE_PROTECT: map %p [0x%llx:0x%llx] pager %p:" - " backing:[object:%p,offset:0x%llx," - "crypto_backing_offset:0x%llx," - "crypto_start:0x%llx,crypto_end:0x%llx]\n", - map, - (uint64_t) map_addr, - (uint64_t) (map_addr + (tmp_entry.vme_end - - tmp_entry.vme_start)), - unprotected_mem_obj, - protected_object, - VME_OFFSET(&tmp_entry), - crypto_backing_offset, - crypto_start, - crypto_end); + " backing:[object:%p,offset:0x%llx," + "crypto_backing_offset:0x%llx," + "crypto_start:0x%llx,crypto_end:0x%llx]\n", + map, + (uint64_t) map_addr, + (uint64_t) (map_addr + (tmp_entry.vme_end - + tmp_entry.vme_start)), + unprotected_mem_obj, + protected_object, + VME_OFFSET(&tmp_entry), + crypto_backing_offset, + crypto_start, + crypto_end); } #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ @@ -800,7 +804,7 @@ vm_map_apple_protected( /* continue with next map entry */ crypto_backing_offset += (tmp_entry.vme_end - - tmp_entry.vme_start); + tmp_entry.vme_start); crypto_backing_offset -= crypto_start; } kr = KERN_SUCCESS; @@ -811,13 +815,13 @@ done: } return kr; } -#endif /* CONFIG_CODE_DECRYPTION */ +#endif /* CONFIG_CODE_DECRYPTION */ -lck_grp_t vm_map_lck_grp; -lck_grp_attr_t vm_map_lck_grp_attr; -lck_attr_t vm_map_lck_attr; -lck_attr_t vm_map_lck_rw_attr; +lck_grp_t vm_map_lck_grp; +lck_grp_attr_t vm_map_lck_grp_attr; +lck_attr_t vm_map_lck_attr; +lck_attr_t vm_map_lck_rw_attr; #if CONFIG_EMBEDDED int malloc_no_cow = 1; @@ -856,34 +860,34 @@ vm_map_init( vm_size_t entry_zone_alloc_size; const char *mez_name = "VM map entries"; - vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40*1024, - PAGE_SIZE, "maps"); + vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40 * 1024, + PAGE_SIZE, "maps"); zone_change(vm_map_zone, Z_NOENCRYPT, TRUE); -#if defined(__LP64__) +#if defined(__LP64__) entry_zone_alloc_size = PAGE_SIZE * 5; #else entry_zone_alloc_size = PAGE_SIZE * 6; #endif vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), - 1024*1024, entry_zone_alloc_size, - mez_name); + 1024 * 1024, entry_zone_alloc_size, + mez_name); zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE); zone_change(vm_map_entry_zone, Z_NOCALLOUT, TRUE); zone_change(vm_map_entry_zone, Z_GZALLOC_EXEMPT, TRUE); vm_map_entry_reserved_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), - kentry_data_size * 64, kentry_data_size, - "Reserved VM map entries"); + kentry_data_size * 64, kentry_data_size, + "Reserved VM map entries"); zone_change(vm_map_entry_reserved_zone, Z_NOENCRYPT, TRUE); /* Don't quarantine because we always need elements available */ zone_change(vm_map_entry_reserved_zone, Z_KASAN_QUARANTINE, FALSE); vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy), - 16*1024, PAGE_SIZE, "VM map copies"); + 16 * 1024, PAGE_SIZE, "VM map copies"); zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE); vm_map_holes_zone = zinit((vm_map_size_t) sizeof(struct vm_map_links), - 16*1024, PAGE_SIZE, "VM map holes"); + 16 * 1024, PAGE_SIZE, "VM map holes"); zone_change(vm_map_holes_zone, Z_NOENCRYPT, TRUE); /* @@ -892,7 +896,7 @@ vm_map_init( */ zone_change(vm_map_zone, Z_COLLECT, FALSE); zone_change(vm_map_zone, Z_FOREIGN, TRUE); - zone_change(vm_map_zone, Z_GZALLOC_EXEMPT, TRUE); + zone_change(vm_map_zone, Z_GZALLOC_EXEMPT, TRUE); zone_change(vm_map_entry_reserved_zone, Z_COLLECT, FALSE); zone_change(vm_map_entry_reserved_zone, Z_EXPAND, FALSE); @@ -918,15 +922,18 @@ vm_map_init( const vm_size_t stride = ZONE_CHUNK_MAXPAGES * PAGE_SIZE; for (vm_offset_t off = 0; off < kentry_data_size; off += stride) { zcram(vm_map_entry_reserved_zone, - (vm_offset_t)kentry_data + off, - MIN(kentry_data_size - off, stride)); + (vm_offset_t)kentry_data + off, + MIN(kentry_data_size - off, stride)); } for (vm_offset_t off = 0; off < map_holes_data_size; off += stride) { zcram(vm_map_holes_zone, - (vm_offset_t)map_holes_data + off, - MIN(map_holes_data_size - off, stride)); + (vm_offset_t)map_holes_data + off, + MIN(map_holes_data_size - off, stride)); } + /* + * Since these are covered by zones, remove them from stolen page accounting. + */ VM_PAGE_MOVE_STOLEN(atop_64(map_data_size) + atop_64(kentry_data_size) + atop_64(map_holes_data_size)); lck_grp_attr_setdefault(&vm_map_lck_grp_attr); @@ -938,28 +945,29 @@ vm_map_init( #if VM_MAP_DEBUG_APPLE_PROTECT PE_parse_boot_argn("vm_map_debug_apple_protect", - &vm_map_debug_apple_protect, - sizeof(vm_map_debug_apple_protect)); + &vm_map_debug_apple_protect, + sizeof(vm_map_debug_apple_protect)); #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ #if VM_MAP_DEBUG_APPLE_FOURK PE_parse_boot_argn("vm_map_debug_fourk", - &vm_map_debug_fourk, - sizeof(vm_map_debug_fourk)); + &vm_map_debug_fourk, + sizeof(vm_map_debug_fourk)); #endif /* VM_MAP_DEBUG_FOURK */ PE_parse_boot_argn("vm_map_executable_immutable", - &vm_map_executable_immutable, - sizeof(vm_map_executable_immutable)); + &vm_map_executable_immutable, + sizeof(vm_map_executable_immutable)); PE_parse_boot_argn("vm_map_executable_immutable_verbose", - &vm_map_executable_immutable_verbose, - sizeof(vm_map_executable_immutable_verbose)); + &vm_map_executable_immutable_verbose, + sizeof(vm_map_executable_immutable_verbose)); PE_parse_boot_argn("malloc_no_cow", - &malloc_no_cow, - sizeof(malloc_no_cow)); + &malloc_no_cow, + sizeof(malloc_no_cow)); if (malloc_no_cow) { vm_memory_malloc_no_cow_mask = 0ULL; vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC; vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_SMALL; + vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_MEDIUM; vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_LARGE; // vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_HUGE; // vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_REALLOC; @@ -969,8 +977,8 @@ vm_map_init( vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_MALLOC_NANO; // vm_memory_malloc_no_cow_mask |= 1ULL << VM_MEMORY_TCMALLOC; PE_parse_boot_argn("vm_memory_malloc_no_cow_mask", - &vm_memory_malloc_no_cow_mask, - sizeof(vm_memory_malloc_no_cow_mask)); + &vm_memory_malloc_no_cow_mask, + sizeof(vm_memory_malloc_no_cow_mask)); } } @@ -989,7 +997,7 @@ vm_map_steal_memory( * scheme is activated and/or entries are available from the general * map entry pool. */ -#if defined(__LP64__) +#if defined(__LP64__) kentry_initial_pages = 10; #else kentry_initial_pages = 6; @@ -998,9 +1006,10 @@ vm_map_steal_memory( #if CONFIG_GZALLOC /* If using the guard allocator, reserve more memory for the kernel * reserved map entry pool. - */ - if (gzalloc_enabled()) + */ + if (gzalloc_enabled()) { kentry_initial_pages *= 1024; + } #endif kentry_data_size = kentry_initial_pages * PAGE_SIZE; @@ -1013,27 +1022,26 @@ vm_map_steal_memory( boolean_t vm_map_supports_hole_optimization = FALSE; void -vm_kernel_reserved_entry_init(void) { - zone_prio_refill_configure(vm_map_entry_reserved_zone, (6*PAGE_SIZE)/sizeof(struct vm_map_entry)); +vm_kernel_reserved_entry_init(void) +{ + zone_prio_refill_configure(vm_map_entry_reserved_zone, (6 * PAGE_SIZE) / sizeof(struct vm_map_entry)); /* * Once we have our replenish thread set up, we can start using the vm_map_holes zone. */ - zone_prio_refill_configure(vm_map_holes_zone, (6*PAGE_SIZE)/sizeof(struct vm_map_links)); + zone_prio_refill_configure(vm_map_holes_zone, (6 * PAGE_SIZE) / sizeof(struct vm_map_links)); vm_map_supports_hole_optimization = TRUE; } void vm_map_disable_hole_optimization(vm_map_t map) { - vm_map_entry_t head_entry, hole_entry, next_hole_entry; + vm_map_entry_t head_entry, hole_entry, next_hole_entry; if (map->holelistenabled) { - head_entry = hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); while (hole_entry != NULL) { - next_hole_entry = hole_entry->vme_next; hole_entry->vme_next = NULL; @@ -1056,8 +1064,9 @@ vm_map_disable_hole_optimization(vm_map_t map) } boolean_t -vm_kernel_map_is_kernel(vm_map_t map) { - return (map->pmap == kernel_pmap); +vm_kernel_map_is_kernel(vm_map_t map) +{ + return map->pmap == kernel_pmap; } /* @@ -1070,10 +1079,10 @@ vm_kernel_map_is_kernel(vm_map_t map) { vm_map_t vm_map_create( - pmap_t pmap, - vm_map_offset_t min, - vm_map_offset_t max, - boolean_t pageable) + pmap_t pmap, + vm_map_offset_t min, + vm_map_offset_t max, + boolean_t pageable) { int options; @@ -1086,13 +1095,13 @@ vm_map_create( vm_map_t vm_map_create_options( - pmap_t pmap, - vm_map_offset_t min, + pmap_t pmap, + vm_map_offset_t min, vm_map_offset_t max, - int options) + int options) { - vm_map_t result; - struct vm_map_links *hole_entry = NULL; + vm_map_t result; + struct vm_map_links *hole_entry = NULL; if (options & ~(VM_MAP_CREATE_ALL_OPTIONS)) { /* unknown option */ @@ -1100,8 +1109,9 @@ vm_map_create_options( } result = (vm_map_t) zalloc(vm_map_zone); - if (result == VM_MAP_NULL) + if (result == VM_MAP_NULL) { panic("vm_map_create"); + } vm_map_first_entry(result) = vm_map_to_entry(result); vm_map_last_entry(result) = vm_map_to_entry(result); @@ -1112,21 +1122,21 @@ vm_map_create_options( result->hdr.entries_pageable = FALSE; } - vm_map_store_init( &(result->hdr) ); + vm_map_store_init( &(result->hdr)); result->hdr.page_shift = PAGE_SHIFT; result->size = 0; - result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */ + result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */ result->user_wire_size = 0; #if __x86_64__ result->vmmap_high_start = 0; #endif /* __x86_64__ */ result->map_refcnt = 1; -#if TASK_SWAPPER +#if TASK_SWAPPER result->res_count = 1; result->sw_state = MAP_SW_IN; -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ result->pmap = pmap; result->min_offset = min; result->max_offset = max; @@ -1142,7 +1152,7 @@ vm_map_create_options( result->highest_entry_end = 0; result->first_free = vm_map_to_entry(result); result->hint = vm_map_to_entry(result); - result->jit_entry_exists = FALSE; + result->jit_entry_exists = FALSE; /* "has_corpse_footprint" and "holelistenabled" are mutually exclusive */ if (options & VM_MAP_CREATE_CORPSE_FOOTPRINT) { @@ -1171,7 +1181,7 @@ vm_map_create_options( vm_map_lock_init(result); lck_mtx_init_ext(&result->s_lock, &result->s_lock_ext, &vm_map_lck_grp, &vm_map_lck_attr); - return(result); + return result; } /* @@ -1180,18 +1190,18 @@ vm_map_create_options( * Allocates a VM map entry for insertion in the * given map (or map copy). No fields are filled. */ -#define vm_map_entry_create(map, map_locked) _vm_map_entry_create(&(map)->hdr, map_locked) +#define vm_map_entry_create(map, map_locked) _vm_map_entry_create(&(map)->hdr, map_locked) -#define vm_map_copy_entry_create(copy, map_locked) \ +#define vm_map_copy_entry_create(copy, map_locked) \ _vm_map_entry_create(&(copy)->cpy_hdr, map_locked) unsigned reserved_zalloc_count, nonreserved_zalloc_count; static vm_map_entry_t _vm_map_entry_create( - struct vm_map_header *map_header, boolean_t __unused map_locked) + struct vm_map_header *map_header, boolean_t __unused map_locked) { - zone_t zone; - vm_map_entry_t entry; + zone_t zone; + vm_map_entry_t entry; zone = vm_map_entry_zone; @@ -1199,29 +1209,30 @@ _vm_map_entry_create( if (map_header->entries_pageable) { entry = (vm_map_entry_t) zalloc(zone); - } - else { + } else { entry = (vm_map_entry_t) zalloc_canblock(zone, FALSE); if (entry == VM_MAP_ENTRY_NULL) { zone = vm_map_entry_reserved_zone; entry = (vm_map_entry_t) zalloc(zone); OSAddAtomic(1, &reserved_zalloc_count); - } else + } else { OSAddAtomic(1, &nonreserved_zalloc_count); + } } - if (entry == VM_MAP_ENTRY_NULL) + if (entry == VM_MAP_ENTRY_NULL) { panic("vm_map_entry_create"); + } entry->from_reserved_zone = (zone == vm_map_entry_reserved_zone); - vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE); -#if MAP_ENTRY_CREATION_DEBUG + vm_map_store_update((vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE); +#if MAP_ENTRY_CREATION_DEBUG entry->vme_creation_maphdr = map_header; backtrace(&entry->vme_creation_bt[0], - (sizeof(entry->vme_creation_bt)/sizeof(uintptr_t))); + (sizeof(entry->vme_creation_bt) / sizeof(uintptr_t))); #endif - return(entry); + return entry; } /* @@ -1229,33 +1240,35 @@ _vm_map_entry_create( * * Inverse of vm_map_entry_create. * - * write map lock held so no need to + * write map lock held so no need to * do anything special to insure correctness - * of the stores + * of the stores */ -#define vm_map_entry_dispose(map, entry) \ +#define vm_map_entry_dispose(map, entry) \ _vm_map_entry_dispose(&(map)->hdr, (entry)) -#define vm_map_copy_entry_dispose(map, entry) \ +#define vm_map_copy_entry_dispose(map, entry) \ _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) static void _vm_map_entry_dispose( - struct vm_map_header *map_header, - vm_map_entry_t entry) + struct vm_map_header *map_header, + vm_map_entry_t entry) { - zone_t zone; + zone_t zone; - if (map_header->entries_pageable || !(entry->from_reserved_zone)) + if (map_header->entries_pageable || !(entry->from_reserved_zone)) { zone = vm_map_entry_zone; - else + } else { zone = vm_map_entry_reserved_zone; + } if (!map_header->entries_pageable) { - if (zone == vm_map_entry_zone) + if (zone == vm_map_entry_zone) { OSAddAtomic(-1, &nonreserved_zalloc_count); - else + } else { OSAddAtomic(-1, &reserved_zalloc_count); + } } zfree(zone, entry); @@ -1265,23 +1278,24 @@ _vm_map_entry_dispose( static boolean_t first_free_check = FALSE; boolean_t first_free_is_valid( - vm_map_t map) + vm_map_t map) { - if (!first_free_check) + if (!first_free_check) { return TRUE; + } - return( first_free_is_valid_store( map )); + return first_free_is_valid_store( map ); } #endif /* MACH_ASSERT */ -#define vm_map_copy_entry_link(copy, after_where, entry) \ +#define vm_map_copy_entry_link(copy, after_where, entry) \ _vm_map_store_entry_link(&(copy)->cpy_hdr, after_where, (entry)) -#define vm_map_copy_entry_unlink(copy, entry) \ +#define vm_map_copy_entry_unlink(copy, entry) \ _vm_map_store_entry_unlink(&(copy)->cpy_hdr, (entry)) -#if MACH_ASSERT && TASK_SWAPPER +#if MACH_ASSERT && TASK_SWAPPER /* * vm_map_res_reference: * @@ -1291,7 +1305,8 @@ first_free_is_valid( * vm_map_swapin. * */ -void vm_map_res_reference(vm_map_t map) +void +vm_map_res_reference(vm_map_t map) { /* assert map is locked */ assert(map->res_count >= 0); @@ -1303,8 +1318,9 @@ void vm_map_res_reference(vm_map_t map) lck_mtx_lock(&map->s_lock); ++map->res_count; vm_map_unlock(map); - } else + } else { ++map->res_count; + } } /* @@ -1315,7 +1331,8 @@ void vm_map_res_reference(vm_map_t map) * The map may not be in memory (i.e. zero residence count). * */ -void vm_map_reference_swap(vm_map_t map) +void +vm_map_reference_swap(vm_map_t map) { assert(map != VM_MAP_NULL); lck_mtx_lock(&map->s_lock); @@ -1336,7 +1353,8 @@ void vm_map_reference_swap(vm_map_t map) * The map is locked, so this function is callable from vm_map_deallocate. * */ -void vm_map_res_deallocate(vm_map_t map) +void +vm_map_res_deallocate(vm_map_t map) { assert(map->res_count > 0); if (--map->res_count == 0) { @@ -1348,7 +1366,7 @@ void vm_map_res_deallocate(vm_map_t map) } assert(map->map_refcnt >= map->res_count); } -#endif /* MACH_ASSERT && TASK_SWAPPER */ +#endif /* MACH_ASSERT && TASK_SWAPPER */ /* * vm_map_destroy: @@ -1357,8 +1375,8 @@ void vm_map_res_deallocate(vm_map_t map) */ void vm_map_destroy( - vm_map_t map, - int flags) + vm_map_t map, + int flags) { vm_map_lock(map); @@ -1371,11 +1389,11 @@ vm_map_destroy( /* clean up regular map entries */ (void) vm_map_delete(map, map->min_offset, map->max_offset, - flags, VM_MAP_NULL); + flags, VM_MAP_NULL); /* clean up leftover special mappings (commpage, etc...) */ -#if !defined(__arm__) && !defined(__arm64__) +#if !defined(__arm__) && !defined(__arm64__) (void) vm_map_delete(map, 0x0, 0xFFFFFFFFFFFFF000ULL, - flags, VM_MAP_NULL); + flags, VM_MAP_NULL); #endif /* !__arm__ && !__arm64__ */ vm_map_disable_hole_optimization(map); @@ -1385,8 +1403,9 @@ vm_map_destroy( assert(map->hdr.nentries == 0); - if(map->pmap) + if (map->pmap) { pmap_destroy(map->pmap); + } if (vm_map_lck_attr.lck_attr_val & LCK_ATTR_DEBUG) { /* @@ -1427,8 +1446,9 @@ find_largest_process_vm_map_entries(void) lck_mtx_lock(&tasks_threads_lock); queue_iterate(task_list, task, task_t, tasks) { - if (task == kernel_task || !task->active) + if (task == kernel_task || !task->active) { continue; + } vm_map_t task_map = task->map; if (task_map != VM_MAP_NULL) { @@ -1445,7 +1465,7 @@ find_largest_process_vm_map_entries(void) return victim_pid; } -#if TASK_SWAPPER +#if TASK_SWAPPER /* * vm_map_swapin/vm_map_swapout * @@ -1495,23 +1515,26 @@ find_largest_process_vm_map_entries(void) int vm_map_swap_enable = 1; -void vm_map_swapin (vm_map_t map) +void +vm_map_swapin(vm_map_t map) { vm_map_entry_t entry; - if (!vm_map_swap_enable) /* debug */ + if (!vm_map_swap_enable) { /* debug */ return; + } /* * Map is locked * First deal with various races. */ - if (map->sw_state == MAP_SW_IN) + if (map->sw_state == MAP_SW_IN) { /* * we raced with swapout and won. Returning will incr. * the res_count, turning the swapout into a nop. */ return; + } /* * The residence count must be zero. If we raced with another @@ -1563,7 +1586,8 @@ void vm_map_swapin (vm_map_t map) map->sw_state = MAP_SW_IN; } -void vm_map_swapout(vm_map_t map) +void +vm_map_swapout(vm_map_t map) { vm_map_entry_t entry; @@ -1586,8 +1610,9 @@ void vm_map_swapout(vm_map_t map) */ assert(map->sw_state == MAP_SW_IN); - if (!vm_map_swap_enable) + if (!vm_map_swap_enable) { return; + } /* * We now operate upon each map entry. If the entry is a sub- @@ -1626,7 +1651,7 @@ void vm_map_swapout(vm_map_t map) map->sw_state = MAP_SW_OUT; } -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ /* * vm_map_lookup_entry: [ internal use only ] @@ -1640,11 +1665,11 @@ void vm_map_swapout(vm_map_t map) */ boolean_t vm_map_lookup_entry( - vm_map_t map, - vm_map_offset_t address, - vm_map_entry_t *entry) /* OUT */ + vm_map_t map, + vm_map_offset_t address, + vm_map_entry_t *entry) /* OUT */ { - return ( vm_map_store_lookup_entry( map, address, entry )); + return vm_map_store_lookup_entry( map, address, entry ); } /* @@ -1662,19 +1687,19 @@ vm_map_lookup_entry( */ kern_return_t vm_map_find_space( - vm_map_t map, - vm_map_offset_t *address, /* OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags __unused, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_map_entry_t *o_entry) /* OUT */ + vm_map_t map, + vm_map_offset_t *address, /* OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags __unused, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_entry_t *o_entry) /* OUT */ { - vm_map_entry_t entry, new_entry; - vm_map_offset_t start; - vm_map_offset_t end; - vm_map_entry_t hole_entry; + vm_map_entry_t entry, new_entry; + vm_map_offset_t start; + vm_map_offset_t end; + vm_map_entry_t hole_entry; if (size == 0) { *address = 0; @@ -1695,7 +1720,7 @@ vm_map_find_space( vm_map_lock(map); - if( map->disable_vmentry_reuse == TRUE) { + if (map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(map, entry, start); } else { if (map->holelistenabled) { @@ -1707,17 +1732,18 @@ vm_map_find_space( */ vm_map_entry_dispose(map, new_entry); vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } entry = hole_entry; start = entry->vme_start; } else { assert(first_free_is_valid(map)); - if ((entry = map->first_free) == vm_map_to_entry(map)) + if ((entry = map->first_free) == vm_map_to_entry(map)) { start = map->min_offset; - else + } else { start = entry->vme_end; + } } } @@ -1727,7 +1753,7 @@ vm_map_find_space( */ while (TRUE) { - vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -1744,7 +1770,7 @@ vm_map_find_space( if (end < start) { vm_map_entry_dispose(map, new_entry); vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } start = end; assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); @@ -1754,14 +1780,15 @@ vm_map_find_space( if ((end > map->max_offset) || (end < start)) { vm_map_entry_dispose(map, new_entry); vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } next = entry->vme_next; if (map->holelistenabled) { - if (entry->vme_end >= end) + if (entry->vme_end >= end) { break; + } } else { /* * If there are no more entries, we must win. @@ -1772,11 +1799,13 @@ vm_map_find_space( * after the end of the potential new region. */ - if (next == vm_map_to_entry(map)) + if (next == vm_map_to_entry(map)) { break; + } - if (next->vme_start >= end) + if (next->vme_start >= end) { break; + } } /* @@ -1792,7 +1821,7 @@ vm_map_find_space( */ vm_map_entry_dispose(map, new_entry); vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } start = entry->vme_start; } else { @@ -1828,9 +1857,9 @@ vm_map_find_space( assert(page_aligned(new_entry->vme_start)); assert(page_aligned(new_entry->vme_end)); assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); new_entry->is_shared = FALSE; new_entry->is_sub_map = FALSE; @@ -1864,10 +1893,11 @@ vm_map_find_space( new_entry->iokit_acct = FALSE; new_entry->vme_resilient_codesign = FALSE; new_entry->vme_resilient_media = FALSE; - if (vmk_flags.vmkf_atomic_entry) + if (vmk_flags.vmkf_atomic_entry) { new_entry->vme_atomic = TRUE; - else + } else { new_entry->vme_atomic = FALSE; + } VME_ALIAS_SET(new_entry, tag); @@ -1885,7 +1915,7 @@ vm_map_find_space( SAVE_HINT_MAP_WRITE(map, new_entry); *o_entry = new_entry; - return(KERN_SUCCESS); + return KERN_SUCCESS; } int vm_map_pmap_enter_print = FALSE; @@ -1907,26 +1937,27 @@ int vm_map_pmap_enter_enable = FALSE; */ __unused static void vm_map_pmap_enter( - vm_map_t map, - vm_map_offset_t addr, - vm_map_offset_t end_addr, - vm_object_t object, - vm_object_offset_t offset, - vm_prot_t protection) + vm_map_t map, + vm_map_offset_t addr, + vm_map_offset_t end_addr, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection) { - int type_of_fault; - kern_return_t kr; + int type_of_fault; + kern_return_t kr; struct vm_object_fault_info fault_info = {}; - if(map->pmap == 0) + if (map->pmap == 0) { return; + } while (addr < end_addr) { - vm_page_t m; + vm_page_t m; /* - * TODO: + * TODO: * From vm_map_enter(), we come into this function without the map * lock held or the object lock held. * We haven't taken a reference on the object either. @@ -1940,7 +1971,7 @@ vm_map_pmap_enter( m = vm_page_lookup(object, offset); if (m == VM_PAGE_NULL || m->vmp_busy || m->vmp_fictitious || - (m->vmp_unusual && ( m->vmp_error || m->vmp_restart || m->vmp_absent))) { + (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) { vm_object_unlock(object); return; } @@ -1948,17 +1979,17 @@ vm_map_pmap_enter( if (vm_map_pmap_enter_print) { printf("vm_map_pmap_enter:"); printf("map: %p, addr: %llx, object: %p, offset: %llx\n", - map, (unsigned long long)addr, object, (unsigned long long)offset); + map, (unsigned long long)addr, object, (unsigned long long)offset); } type_of_fault = DBG_CACHE_HIT_FAULT; kr = vm_fault_enter(m, map->pmap, - addr, protection, protection, - VM_PAGE_WIRED(m), - FALSE, /* change_wiring */ - VM_KERN_MEMORY_NONE, /* tag - not wiring */ - &fault_info, - NULL, /* need_retry */ - &type_of_fault); + addr, protection, protection, + VM_PAGE_WIRED(m), + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + &fault_info, + NULL, /* need_retry */ + &type_of_fault); vm_object_unlock(object); @@ -1968,56 +1999,57 @@ vm_map_pmap_enter( } boolean_t vm_map_pmap_is_empty( - vm_map_t map, - vm_map_offset_t start, + vm_map_t map, + vm_map_offset_t start, vm_map_offset_t end); -boolean_t vm_map_pmap_is_empty( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) +boolean_t +vm_map_pmap_is_empty( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { #ifdef MACHINE_PMAP_IS_EMPTY return pmap_is_empty(map->pmap, start, end); -#else /* MACHINE_PMAP_IS_EMPTY */ - vm_map_offset_t offset; - ppnum_t phys_page; +#else /* MACHINE_PMAP_IS_EMPTY */ + vm_map_offset_t offset; + ppnum_t phys_page; if (map->pmap == NULL) { return TRUE; } for (offset = start; - offset < end; - offset += PAGE_SIZE) { + offset < end; + offset += PAGE_SIZE) { phys_page = pmap_find_phys(map->pmap, offset); if (phys_page) { kprintf("vm_map_pmap_is_empty(%p,0x%llx,0x%llx): " - "page %d at 0x%llx\n", - map, (long long)start, (long long)end, - phys_page, (long long)offset); + "page %d at 0x%llx\n", + map, (long long)start, (long long)end, + phys_page, (long long)offset); return FALSE; } } return TRUE; -#endif /* MACHINE_PMAP_IS_EMPTY */ +#endif /* MACHINE_PMAP_IS_EMPTY */ } -#define MAX_TRIES_TO_GET_RANDOM_ADDRESS 1000 +#define MAX_TRIES_TO_GET_RANDOM_ADDRESS 1000 kern_return_t vm_map_random_address_for_size( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size) + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size) { - kern_return_t kr = KERN_SUCCESS; - int tries = 0; - vm_map_offset_t random_addr = 0; + kern_return_t kr = KERN_SUCCESS; + int tries = 0; + vm_map_offset_t random_addr = 0; vm_map_offset_t hole_end; - vm_map_entry_t next_entry = VM_MAP_ENTRY_NULL; - vm_map_entry_t prev_entry = VM_MAP_ENTRY_NULL; - vm_map_size_t vm_hole_size = 0; - vm_map_size_t addr_space_size; + vm_map_entry_t next_entry = VM_MAP_ENTRY_NULL; + vm_map_entry_t prev_entry = VM_MAP_ENTRY_NULL; + vm_map_size_t vm_hole_size = 0; + vm_map_size_t addr_space_size; addr_space_size = vm_map_max(map) - vm_map_min(map); @@ -2026,7 +2058,7 @@ vm_map_random_address_for_size( while (tries < MAX_TRIES_TO_GET_RANDOM_ADDRESS) { random_addr = ((vm_map_offset_t)random()) << PAGE_SHIFT; random_addr = vm_map_trunc_page( - vm_map_min(map) +(random_addr % addr_space_size), + vm_map_min(map) + (random_addr % addr_space_size), VM_MAP_PAGE_MASK(map)); if (vm_map_lookup_entry(map, random_addr, &prev_entry) == FALSE) { @@ -2083,51 +2115,51 @@ static unsigned int vm_map_enter_restore_successes = 0; static unsigned int vm_map_enter_restore_failures = 0; kern_return_t vm_map_enter( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t alias, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t alias, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - vm_map_entry_t entry, new_entry; - vm_map_offset_t start, tmp_start, tmp_offset; - vm_map_offset_t end, tmp_end; - vm_map_offset_t tmp2_start, tmp2_end; - vm_map_offset_t desired_empty_end; - vm_map_offset_t step; - kern_return_t result = KERN_SUCCESS; - vm_map_t zap_old_map = VM_MAP_NULL; - vm_map_t zap_new_map = VM_MAP_NULL; - boolean_t map_locked = FALSE; - boolean_t pmap_empty = TRUE; - boolean_t new_mapping_established = FALSE; - boolean_t keep_map_locked = vmk_flags.vmkf_keep_map_locked; - boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); - boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); - boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); - boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); - boolean_t is_submap = vmk_flags.vmkf_submap; - boolean_t permanent = vmk_flags.vmkf_permanent; - boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; - boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; - boolean_t resilient_codesign = ((flags & VM_FLAGS_RESILIENT_CODESIGN) != 0); - boolean_t resilient_media = ((flags & VM_FLAGS_RESILIENT_MEDIA) != 0); - boolean_t random_address = ((flags & VM_FLAGS_RANDOM_ADDR) != 0); - unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); - vm_tag_t user_alias; - vm_map_offset_t effective_min_offset, effective_max_offset; - kern_return_t kr; - boolean_t clear_map_aligned = FALSE; - vm_map_entry_t hole_entry; - vm_map_size_t chunk_size = 0; + vm_map_entry_t entry, new_entry; + vm_map_offset_t start, tmp_start, tmp_offset; + vm_map_offset_t end, tmp_end; + vm_map_offset_t tmp2_start, tmp2_end; + vm_map_offset_t desired_empty_end; + vm_map_offset_t step; + kern_return_t result = KERN_SUCCESS; + vm_map_t zap_old_map = VM_MAP_NULL; + vm_map_t zap_new_map = VM_MAP_NULL; + boolean_t map_locked = FALSE; + boolean_t pmap_empty = TRUE; + boolean_t new_mapping_established = FALSE; + boolean_t keep_map_locked = vmk_flags.vmkf_keep_map_locked; + boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); + boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); + boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); + boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); + boolean_t is_submap = vmk_flags.vmkf_submap; + boolean_t permanent = vmk_flags.vmkf_permanent; + boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; + boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; + boolean_t resilient_codesign = ((flags & VM_FLAGS_RESILIENT_CODESIGN) != 0); + boolean_t resilient_media = ((flags & VM_FLAGS_RESILIENT_MEDIA) != 0); + boolean_t random_address = ((flags & VM_FLAGS_RANDOM_ADDR) != 0); + unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); + vm_tag_t user_alias; + vm_map_offset_t effective_min_offset, effective_max_offset; + kern_return_t kr; + boolean_t clear_map_aligned = FALSE; + vm_map_entry_t hole_entry; + vm_map_size_t chunk_size = 0; assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused); @@ -2151,19 +2183,20 @@ vm_map_enter( * with a lookup of the size depending on superpage_size. */ #ifdef __x86_64__ - case SUPERPAGE_SIZE_ANY: - /* handle it like 2 MB and round up to page size */ - size = (size + 2*1024*1024 - 1) & ~(2*1024*1024 - 1); - case SUPERPAGE_SIZE_2MB: - break; + case SUPERPAGE_SIZE_ANY: + /* handle it like 2 MB and round up to page size */ + size = (size + 2 * 1024 * 1024 - 1) & ~(2 * 1024 * 1024 - 1); + case SUPERPAGE_SIZE_2MB: + break; #endif - default: - return KERN_INVALID_ARGUMENT; + default: + return KERN_INVALID_ARGUMENT; } - mask = SUPERPAGE_SIZE-1; - if (size & (SUPERPAGE_SIZE-1)) + mask = SUPERPAGE_SIZE - 1; + if (size & (SUPERPAGE_SIZE - 1)) { return KERN_INVALID_ARGUMENT; - inheritance = VM_INHERIT_NONE; /* fork() children won't inherit superpages */ + } + inheritance = VM_INHERIT_NONE; /* fork() children won't inherit superpages */ } @@ -2172,26 +2205,26 @@ vm_map_enter( #if !CONFIG_EMBEDDED map != kernel_map && (cs_process_global_enforcement() || - (vmk_flags.vmkf_cs_enforcement_override - ? vmk_flags.vmkf_cs_enforcement - : cs_process_enforcement(NULL))) && + (vmk_flags.vmkf_cs_enforcement_override + ? vmk_flags.vmkf_cs_enforcement + : cs_process_enforcement(NULL))) && #endif /* !CONFIG_EMBEDDED */ !entry_for_jit) { DTRACE_VM3(cs_wx, - uint64_t, 0, - uint64_t, 0, - vm_prot_t, cur_protection); + uint64_t, 0, + uint64_t, 0, + vm_prot_t, cur_protection); printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. " #if VM_PROTECT_WX_FAIL - "failing\n", + "failing\n", #else /* VM_PROTECT_WX_FAIL */ - "turning off execute\n", + "turning off execute\n", #endif /* VM_PROTECT_WX_FAIL */ - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__); + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); cur_protection &= ~VM_PROT_EXECUTE; #if VM_PROTECT_WX_FAIL return KERN_PROTECTION_FAILURE; @@ -2245,12 +2278,12 @@ vm_map_enter( /* * Allow an insertion beyond the map's max offset. */ -#if !defined(__arm__) && !defined(__arm64__) - if (vm_map_is_64bit(map)) +#if !defined(__arm__) && !defined(__arm64__) + if (vm_map_is_64bit(map)) { effective_max_offset = 0xFFFFFFFFFFFFF000ULL; - else -#endif /* __arm__ */ - effective_max_offset = 0x00000000FFFFF000ULL; + } else +#endif /* __arm__ */ + effective_max_offset = 0x00000000FFFFF000ULL; } else { effective_max_offset = map->max_offset; } @@ -2267,7 +2300,11 @@ vm_map_enter( user_alias = alias; } -#define RETURN(value) { result = value; goto BailOut; } + if (user_alias == VM_MEMORY_MALLOC_MEDIUM) { + chunk_size = MALLOC_MEDIUM_CHUNK_SIZE; + } + +#define RETURN(value) { result = value; goto BailOut; } assert(page_aligned(*address)); assert(page_aligned(size)); @@ -2302,11 +2339,12 @@ vm_map_enter( */ if (purgable && (offset != 0 || - (object != VM_OBJECT_NULL && - (object->vo_size != size || - object->purgable == VM_PURGABLE_DENY)) - || size > ANON_MAX_SIZE)) /* LP64todo: remove when dp capable */ + (object != VM_OBJECT_NULL && + (object->vo_size != size || + object->purgable == VM_PURGABLE_DENY)) + || size > ANON_MAX_SIZE)) { /* LP64todo: remove when dp capable */ return KERN_INVALID_ARGUMENT; + } if (!anywhere && overwrite) { /* @@ -2319,14 +2357,14 @@ vm_map_enter( * new mapping fails. */ zap_old_map = vm_map_create(PMAP_NULL, - *address, - *address + size, - map->hdr.entries_pageable); + *address, + *address + size, + map->hdr.entries_pageable); vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map)); vm_map_disable_hole_optimization(zap_old_map); } -StartAgain: ; +StartAgain:; start = *address; @@ -2356,8 +2394,8 @@ StartAgain: ; } #if __x86_64__ else if ((start == 0 || start == vm_map_min(map)) && - !map->disable_vmentry_reuse && - map->vmmap_high_start != 0) { + !map->disable_vmentry_reuse && + map->vmmap_high_start != 0) { start = map->vmmap_high_start; } #endif /* __x86_64__ */ @@ -2367,10 +2405,12 @@ StartAgain: ; * Calculate the first possible address. */ - if (start < effective_min_offset) + if (start < effective_min_offset) { start = effective_min_offset; - if (start > effective_max_offset) + } + if (start > effective_max_offset) { RETURN(KERN_NO_SPACE); + } /* * Look for the first possible address; @@ -2378,10 +2418,9 @@ StartAgain: ; * address, we have to start after it. */ - if( map->disable_vmentry_reuse == TRUE) { + if (map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(map, entry, start); } else { - if (map->holelistenabled) { hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); @@ -2392,7 +2431,6 @@ StartAgain: ; result = KERN_NO_SPACE; goto BailOut; } else { - boolean_t found_hole = FALSE; do { @@ -2407,7 +2445,6 @@ StartAgain: ; break; } hole_entry = hole_entry->vme_next; - } while (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)); if (found_hole == FALSE) { @@ -2417,8 +2454,9 @@ StartAgain: ; entry = hole_entry; - if (start == 0) + if (start == 0) { start += PAGE_SIZE_64; + } } } else { assert(first_free_is_valid(map)); @@ -2428,32 +2466,32 @@ StartAgain: ; if (entry == vm_map_to_entry(map)) { entry = NULL; } else { - if (entry->vme_next == vm_map_to_entry(map)){ - /* - * Hole at the end of the map. - */ + if (entry->vme_next == vm_map_to_entry(map)) { + /* + * Hole at the end of the map. + */ entry = NULL; - } else { - if (start < (entry->vme_next)->vme_start ) { + } else { + if (start < (entry->vme_next)->vme_start) { start = entry->vme_end; start = vm_map_round_page(start, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); } else { /* * Need to do a lookup. */ entry = NULL; } - } + } } if (entry == NULL) { - vm_map_entry_t tmp_entry; + vm_map_entry_t tmp_entry; if (vm_map_lookup_entry(map, start, &tmp_entry)) { assert(!entry_for_jit); start = tmp_entry->vme_end; start = vm_map_round_page(start, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); } entry = tmp_entry; } @@ -2467,7 +2505,7 @@ StartAgain: ; */ while (TRUE) { - vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -2477,12 +2515,13 @@ StartAgain: ; end = ((start + mask) & ~mask); end = vm_map_round_page(end, - VM_MAP_PAGE_MASK(map)); - if (end < start) + VM_MAP_PAGE_MASK(map)); + if (end < start) { RETURN(KERN_NO_SPACE); + } start = end; assert(VM_MAP_PAGE_ALIGNED(start, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); end += size; /* We want an entire page of empty space, but don't increase the allocation size. */ @@ -2492,9 +2531,9 @@ StartAgain: ; if (map->wait_for_space) { assert(!keep_map_locked); if (size <= (effective_max_offset - - effective_min_offset)) { + effective_min_offset)) { assert_wait((event_t)map, - THREAD_ABORTSAFE); + THREAD_ABORTSAFE); vm_map_unlock(map); map_locked = FALSE; thread_block(THREAD_CONTINUE_NULL); @@ -2507,8 +2546,9 @@ StartAgain: ; next = entry->vme_next; if (map->holelistenabled) { - if (entry->vme_end >= desired_empty_end) + if (entry->vme_end >= desired_empty_end) { break; + } } else { /* * If there are no more entries, we must win. @@ -2519,11 +2559,13 @@ StartAgain: ; * after the end of the potential new region. */ - if (next == vm_map_to_entry(map)) + if (next == vm_map_to_entry(map)) { break; + } - if (next->vme_start >= desired_empty_end) + if (next->vme_start >= desired_empty_end) { break; + } } /* @@ -2546,7 +2588,7 @@ StartAgain: ; } start = vm_map_round_page(start, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); } if (map->holelistenabled) { @@ -2557,7 +2599,7 @@ StartAgain: ; *address = start; assert(VM_MAP_PAGE_ALIGNED(*address, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); } else { /* * Verify that: @@ -2567,8 +2609,9 @@ StartAgain: ; vm_map_lock(map); map_locked = TRUE; - if ((start & mask) != 0) + if ((start & mask) != 0) { RETURN(KERN_NO_SPACE); + } /* * ... the address is within bounds @@ -2596,8 +2639,8 @@ StartAgain: ; remove_flags |= VM_MAP_REMOVE_IMMUTABLE; } (void) vm_map_delete(map, start, end, - remove_flags, - zap_old_map); + remove_flags, + zap_old_map); } /* @@ -2605,7 +2648,7 @@ StartAgain: ; */ if (vm_map_lookup_entry(map, start, &entry)) { - if (! (vmk_flags.vmkf_already)) { + if (!(vmk_flags.vmkf_already)) { RETURN(KERN_NO_SPACE); } /* @@ -2616,10 +2659,9 @@ StartAgain: ; if (entry->vme_start < start) { tmp_start -= start - entry->vme_start; tmp_offset -= start - entry->vme_start; - } for (; entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { /* * Check if the mapping's attributes * match the existing map entry. @@ -2653,9 +2695,9 @@ StartAgain: ; obj2 = VME_OBJECT(entry); if ((obj2 == VM_OBJECT_NULL || - obj2->internal) && + obj2->internal) && (object == VM_OBJECT_NULL || - object->internal)) { + object->internal)) { /* * ... but both are * anonymous memory, @@ -2684,8 +2726,9 @@ StartAgain: ; */ if ((entry->vme_next != vm_map_to_entry(map)) && - (entry->vme_next->vme_start < end)) + (entry->vme_next->vme_start < end)) { RETURN(KERN_NO_SPACE); + } } /* @@ -2711,7 +2754,6 @@ StartAgain: ; entry_for_jit || vm_memory_malloc_no_cow(user_alias)) { if (object == VM_OBJECT_NULL) { - object = vm_object_allocate(size); object->copy_strategy = MEMORY_OBJECT_COPY_NONE; object->true_share = FALSE; @@ -2742,51 +2784,50 @@ StartAgain: ; offset = (vm_object_offset_t)0; } } else if ((is_submap == FALSE) && - (object == VM_OBJECT_NULL) && - (entry != vm_map_to_entry(map)) && - (entry->vme_end == start) && - (!entry->is_shared) && - (!entry->is_sub_map) && - (!entry->in_transition) && - (!entry->needs_wakeup) && - (entry->behavior == VM_BEHAVIOR_DEFAULT) && - (entry->protection == cur_protection) && - (entry->max_protection == max_protection) && - (entry->inheritance == inheritance) && - ((user_alias == VM_MEMORY_REALLOC) || - (VME_ALIAS(entry) == alias)) && - (entry->no_cache == no_cache) && - (entry->permanent == permanent) && - /* no coalescing for immutable executable mappings */ - !((entry->protection & VM_PROT_EXECUTE) && - entry->permanent) && - (!entry->superpage_size && !superpage_size) && - /* - * No coalescing if not map-aligned, to avoid propagating - * that condition any further than needed: - */ - (!entry->map_aligned || !clear_map_aligned) && - (!entry->zero_wired_pages) && - (!entry->used_for_jit && !entry_for_jit) && - (!entry->pmap_cs_associated) && - (entry->iokit_acct == iokit_acct) && - (!entry->vme_resilient_codesign) && - (!entry->vme_resilient_media) && - (!entry->vme_atomic) && - - ((entry->vme_end - entry->vme_start) + size <= - (user_alias == VM_MEMORY_REALLOC ? - ANON_CHUNK_SIZE : - NO_COALESCE_LIMIT)) && - - (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ + (object == VM_OBJECT_NULL) && + (entry != vm_map_to_entry(map)) && + (entry->vme_end == start) && + (!entry->is_shared) && + (!entry->is_sub_map) && + (!entry->in_transition) && + (!entry->needs_wakeup) && + (entry->behavior == VM_BEHAVIOR_DEFAULT) && + (entry->protection == cur_protection) && + (entry->max_protection == max_protection) && + (entry->inheritance == inheritance) && + ((user_alias == VM_MEMORY_REALLOC) || + (VME_ALIAS(entry) == alias)) && + (entry->no_cache == no_cache) && + (entry->permanent == permanent) && + /* no coalescing for immutable executable mappings */ + !((entry->protection & VM_PROT_EXECUTE) && + entry->permanent) && + (!entry->superpage_size && !superpage_size) && + /* + * No coalescing if not map-aligned, to avoid propagating + * that condition any further than needed: + */ + (!entry->map_aligned || !clear_map_aligned) && + (!entry->zero_wired_pages) && + (!entry->used_for_jit && !entry_for_jit) && + (!entry->pmap_cs_associated) && + (entry->iokit_acct == iokit_acct) && + (!entry->vme_resilient_codesign) && + (!entry->vme_resilient_media) && + (!entry->vme_atomic) && + + ((entry->vme_end - entry->vme_start) + size <= + (user_alias == VM_MEMORY_REALLOC ? + ANON_CHUNK_SIZE : + NO_COALESCE_LIMIT)) && + + (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ if (vm_object_coalesce(VME_OBJECT(entry), - VM_OBJECT_NULL, - VME_OFFSET(entry), - (vm_object_offset_t) 0, - (vm_map_size_t)(entry->vme_end - entry->vme_start), - (vm_map_size_t)(end - entry->vme_end))) { - + VM_OBJECT_NULL, + VME_OFFSET(entry), + (vm_object_offset_t) 0, + (vm_map_size_t)(entry->vme_end - entry->vme_start), + (vm_map_size_t)(end - entry->vme_end))) { /* * Coalesced the two objects - can extend * the previous map entry to include the @@ -2795,9 +2836,10 @@ StartAgain: ; map->size += (end - entry->vme_end); assert(entry->vme_start < end); assert(VM_MAP_PAGE_ALIGNED(end, - VM_MAP_PAGE_MASK(map))); - if (__improbable(vm_debug_events)) + VM_MAP_PAGE_MASK(map))); + if (__improbable(vm_debug_events)) { DTRACE_VM5(map_entry_extend, vm_map_t, map, vm_map_entry_t, entry, vm_address_t, entry->vme_start, vm_address_t, entry->vme_end, vm_address_t, end); + } entry->vme_end = end; if (map->holelistenabled) { vm_map_store_update_first_free(map, entry, TRUE); @@ -2812,7 +2854,7 @@ StartAgain: ; step = superpage_size ? SUPERPAGE_SIZE : (end - start); new_entry = NULL; - for (tmp2_start = start; tmp2_start chunk_size && max_protection != VM_PROT_NONE && - superpage_size == 0) + superpage_size == 0) { tmp_end = tmp_start + chunk_size; - else + } else { tmp_end = tmp2_end; + } do { new_entry = vm_map_entry_insert( map, entry, tmp_start, tmp_end, - object, offset, needs_copy, + object, offset, needs_copy, FALSE, FALSE, cur_protection, max_protection, VM_BEHAVIOR_DEFAULT, @@ -2854,14 +2897,14 @@ StartAgain: ; assert((object != kernel_object) || (VM_KERN_MEMORY_NONE != alias)); if (resilient_codesign && - ! ((cur_protection | max_protection) & - (VM_PROT_WRITE | VM_PROT_EXECUTE))) { + !((cur_protection | max_protection) & + (VM_PROT_WRITE | VM_PROT_EXECUTE))) { new_entry->vme_resilient_codesign = TRUE; } if (resilient_media && - ! ((cur_protection | max_protection) & - (VM_PROT_WRITE | VM_PROT_EXECUTE))) { + !((cur_protection | max_protection) & + (VM_PROT_WRITE | VM_PROT_EXECUTE))) { new_entry->vme_resilient_media = TRUE; } @@ -2869,7 +2912,7 @@ StartAgain: ; if (!is_submap && object != VM_OBJECT_NULL && (object->purgable != VM_PURGABLE_DENY || - object->vo_ledger_tag)) { + object->vo_ledger_tag)) { assert(new_entry->use_pmap); assert(!new_entry->iokit_acct); /* @@ -2879,9 +2922,9 @@ StartAgain: ; */ new_entry->use_pmap = FALSE; } else if (!is_submap && - iokit_acct && - object != VM_OBJECT_NULL && - object->internal) { + iokit_acct && + object != VM_OBJECT_NULL && + object->internal) { /* alternate accounting */ assert(!new_entry->iokit_acct); assert(new_entry->use_pmap); @@ -2896,16 +2939,16 @@ StartAgain: ; vm_map_iokit_mapped_region( map, (new_entry->vme_end - - new_entry->vme_start)); + new_entry->vme_start)); } else if (!is_submap) { assert(!new_entry->iokit_acct); assert(new_entry->use_pmap); } if (is_submap) { - vm_map_t submap; - boolean_t submap_is_64bit; - boolean_t use_pmap; + vm_map_t submap; + boolean_t submap_is_64bit; + boolean_t use_pmap; assert(new_entry->is_sub_map); assert(!new_entry->use_pmap); @@ -2922,7 +2965,7 @@ StartAgain: ; if (submap->pmap == NULL) { /* let's proceed without nesting... */ } -#if defined(__arm__) || defined(__arm64__) +#if defined(__arm__) || defined(__arm64__) else { pmap_set_nested(submap->pmap); } @@ -2930,17 +2973,17 @@ StartAgain: ; } if (use_pmap && submap->pmap != NULL) { kr = pmap_nest(map->pmap, - submap->pmap, - tmp_start, - tmp_start, - tmp_end - tmp_start); + submap->pmap, + tmp_start, + tmp_start, + tmp_end - tmp_start); if (kr != KERN_SUCCESS) { printf("vm_map_enter: " - "pmap_nest(0x%llx,0x%llx) " - "error 0x%x\n", - (long long)tmp_start, - (long long)tmp_end, - kr); + "pmap_nest(0x%llx,0x%llx) " + "error 0x%x\n", + (long long)tmp_start, + (long long)tmp_end, + kr); } else { /* we're now nested ! */ new_entry->use_pmap = TRUE; @@ -2959,7 +3002,7 @@ StartAgain: ; VME_OFFSET_SET(entry, 0); /* allocate one superpage */ - kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES-1, TRUE, 0); + kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES - 1, TRUE, 0); if (kr != KERN_SUCCESS) { /* deallocate whole range... */ new_mapping_established = TRUE; @@ -2971,15 +3014,15 @@ StartAgain: ; /* create one vm_object per superpage */ sp_object = vm_object_allocate((vm_map_size_t)(entry->vme_end - entry->vme_start)); sp_object->phys_contiguous = TRUE; - sp_object->vo_shadow_offset = (vm_object_offset_t)VM_PAGE_GET_PHYS_PAGE(pages)*PAGE_SIZE; + sp_object->vo_shadow_offset = (vm_object_offset_t)VM_PAGE_GET_PHYS_PAGE(pages) * PAGE_SIZE; VME_OBJECT_SET(entry, sp_object); assert(entry->use_pmap); /* enter the base pages into the object */ vm_object_lock(sp_object); for (sp_offset = 0; - sp_offset < SUPERPAGE_SIZE; - sp_offset += PAGE_SIZE) { + sp_offset < SUPERPAGE_SIZE; + sp_offset += PAGE_SIZE) { m = pages; pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m)); pages = NEXT_PAGE(m); @@ -2989,9 +3032,9 @@ StartAgain: ; vm_object_unlock(sp_object); } } while (tmp_end != tmp2_end && - (tmp_start = tmp_end) && - (tmp_end = (tmp2_end - tmp_end > chunk_size) ? - tmp_end + chunk_size : tmp2_end)); + (tmp_start = tmp_end) && + (tmp_end = (tmp2_end - tmp_end > chunk_size) ? + tmp_end + chunk_size : tmp2_end)); } new_mapping_established = TRUE; @@ -3007,8 +3050,8 @@ BailOut: if (pmap_empty && !(vmk_flags.vmkf_no_pmap_check)) { assert(vm_map_pmap_is_empty(map, - *address, - *address+size)); + *address, + *address + size)); } #endif /* DEBUG */ @@ -3064,16 +3107,15 @@ BailOut: */ if (result == KERN_SUCCESS) { - /* Wire down the new entry if the user * requested all new map entries be wired. */ - if ((map->wiring_required)||(superpage_size)) { + if ((map->wiring_required) || (superpage_size)) { assert(!keep_map_locked); pmap_empty = FALSE; /* pmap won't be empty */ kr = vm_map_wire_kernel(map, start, end, - new_entry->protection, VM_KERN_MEMORY_MLOCK, - TRUE); + new_entry->protection, VM_KERN_MEMORY_MLOCK, + TRUE); result = kr; } @@ -3088,25 +3130,25 @@ BailOut: * that someone else create new mappings that range. */ zap_new_map = vm_map_create(PMAP_NULL, - *address, - *address + size, - map->hdr.entries_pageable); + *address, + *address + size, + map->hdr.entries_pageable); vm_map_set_page_shift(zap_new_map, - VM_MAP_PAGE_SHIFT(map)); + VM_MAP_PAGE_SHIFT(map)); vm_map_disable_hole_optimization(zap_new_map); if (!map_locked) { vm_map_lock(map); map_locked = TRUE; } - (void) vm_map_delete(map, *address, *address+size, - (VM_MAP_REMOVE_SAVE_ENTRIES | - VM_MAP_REMOVE_NO_MAP_ALIGN), - zap_new_map); + (void) vm_map_delete(map, *address, *address + size, + (VM_MAP_REMOVE_SAVE_ENTRIES | + VM_MAP_REMOVE_NO_MAP_ALIGN), + zap_new_map); } if (zap_old_map != VM_MAP_NULL && zap_old_map->hdr.nentries != 0) { - vm_map_entry_t entry1, entry2; + vm_map_entry_t entry1, entry2; /* * The new mapping failed. Attempt to restore @@ -3136,17 +3178,17 @@ BailOut: * inserting them all after "entry1". */ for (entry2 = vm_map_first_entry(zap_old_map); - entry2 != vm_map_to_entry(zap_old_map); - entry2 = vm_map_first_entry(zap_old_map)) { + entry2 != vm_map_to_entry(zap_old_map); + entry2 = vm_map_first_entry(zap_old_map)) { vm_map_size_t entry_size; entry_size = (entry2->vme_end - - entry2->vme_start); + entry2->vme_start); vm_map_store_entry_unlink(zap_old_map, - entry2); + entry2); zap_old_map->size -= entry_size; vm_map_store_entry_link(map, entry1, entry2, - VM_MAP_KERNEL_FLAGS_NONE); + VM_MAP_KERNEL_FLAGS_NONE); map->size += entry_size; entry1 = entry2; } @@ -3184,58 +3226,58 @@ BailOut: return result; -#undef RETURN +#undef RETURN } #if __arm64__ extern const struct memory_object_pager_ops fourk_pager_ops; kern_return_t vm_map_enter_fourk( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t alias, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t alias, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - vm_map_entry_t entry, new_entry; - vm_map_offset_t start, fourk_start; - vm_map_offset_t end, fourk_end; - vm_map_size_t fourk_size; - kern_return_t result = KERN_SUCCESS; - vm_map_t zap_old_map = VM_MAP_NULL; - vm_map_t zap_new_map = VM_MAP_NULL; - boolean_t map_locked = FALSE; - boolean_t pmap_empty = TRUE; - boolean_t new_mapping_established = FALSE; - boolean_t keep_map_locked = vmk_flags.vmkf_keep_map_locked; - boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); - boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); - boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); - boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); - boolean_t is_submap = vmk_flags.vmkf_submap; - boolean_t permanent = vmk_flags.vmkf_permanent; - boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; + vm_map_entry_t entry, new_entry; + vm_map_offset_t start, fourk_start; + vm_map_offset_t end, fourk_end; + vm_map_size_t fourk_size; + kern_return_t result = KERN_SUCCESS; + vm_map_t zap_old_map = VM_MAP_NULL; + vm_map_t zap_new_map = VM_MAP_NULL; + boolean_t map_locked = FALSE; + boolean_t pmap_empty = TRUE; + boolean_t new_mapping_established = FALSE; + boolean_t keep_map_locked = vmk_flags.vmkf_keep_map_locked; + boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); + boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); + boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); + boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); + boolean_t is_submap = vmk_flags.vmkf_submap; + boolean_t permanent = vmk_flags.vmkf_permanent; + boolean_t entry_for_jit = vmk_flags.vmkf_map_jit; // boolean_t iokit_acct = vmk_flags.vmkf_iokit_acct; - unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); - vm_map_offset_t effective_min_offset, effective_max_offset; - kern_return_t kr; - boolean_t clear_map_aligned = FALSE; - memory_object_t fourk_mem_obj; - vm_object_t fourk_object; - vm_map_offset_t fourk_pager_offset; - int fourk_pager_index_start, fourk_pager_index_num; - int cur_idx; - boolean_t fourk_copy; - vm_object_t copy_object; - vm_object_offset_t copy_offset; + unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); + vm_map_offset_t effective_min_offset, effective_max_offset; + kern_return_t kr; + boolean_t clear_map_aligned = FALSE; + memory_object_t fourk_mem_obj; + vm_object_t fourk_object; + vm_map_offset_t fourk_pager_offset; + int fourk_pager_index_start, fourk_pager_index_num; + int cur_idx; + boolean_t fourk_copy; + vm_object_t copy_object; + vm_object_offset_t copy_offset; fourk_mem_obj = MEMORY_OBJECT_NULL; fourk_object = VM_OBJECT_NULL; @@ -3252,16 +3294,16 @@ vm_map_enter_fourk( #endif /* !CONFIG_EMBEDDED */ !entry_for_jit) { DTRACE_VM3(cs_wx, - uint64_t, 0, - uint64_t, 0, - vm_prot_t, cur_protection); + uint64_t, 0, + uint64_t, 0, + vm_prot_t, cur_protection); printf("CODE SIGNING: %d[%s] %s: curprot cannot be write+execute. " - "turning off execute\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__); + "turning off execute\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); cur_protection &= ~VM_PROT_EXECUTE; } @@ -3299,7 +3341,7 @@ vm_map_enter_fourk( return KERN_INVALID_ARGUMENT; } -#define RETURN(value) { result = value; goto BailOut; } +#define RETURN(value) { result = value; goto BailOut; } assert(VM_MAP_PAGE_ALIGNED(*address, FOURK_PAGE_MASK)); assert(VM_MAP_PAGE_ALIGNED(size, FOURK_PAGE_MASK)); @@ -3318,9 +3360,9 @@ vm_map_enter_fourk( * new mapping fails. */ zap_old_map = vm_map_create(PMAP_NULL, - *address, - *address + size, - map->hdr.entries_pageable); + *address, + *address + size, + map->hdr.entries_pageable); vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map)); vm_map_disable_hole_optimization(zap_old_map); } @@ -3367,9 +3409,9 @@ vm_map_enter_fourk( * address range, saving them in our "zap_old_map". */ (void) vm_map_delete(map, start, end, - (VM_MAP_REMOVE_SAVE_ENTRIES | - VM_MAP_REMOVE_NO_MAP_ALIGN), - zap_old_map); + (VM_MAP_REMOVE_SAVE_ENTRIES | + VM_MAP_REMOVE_NO_MAP_ALIGN), + zap_old_map); } /* @@ -3425,7 +3467,7 @@ vm_map_enter_fourk( entry->protection |= cur_protection; entry->max_protection |= max_protection; if ((entry->protection & (VM_PROT_WRITE | - VM_PROT_EXECUTE)) == + VM_PROT_EXECUTE)) == (VM_PROT_WRITE | VM_PROT_EXECUTE) && fourk_binary_compatibility_unsafe && fourk_binary_compatibility_allow_wx) { @@ -3468,11 +3510,11 @@ vm_map_enter_fourk( /* create a "copy" object, to map the "4K" object copy-on-write */ fourk_copy = TRUE; result = vm_object_copy_strategically(fourk_object, - 0, - end - start, - ©_object, - ©_offset, - &fourk_copy); + 0, + end - start, + ©_object, + ©_offset, + &fourk_copy); assert(result == KERN_SUCCESS); assert(copy_object != VM_OBJECT_NULL); assert(copy_offset == 0); @@ -3482,37 +3524,37 @@ vm_map_enter_fourk( /* map the "4K" pager's copy object */ new_entry = - vm_map_entry_insert(map, entry, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(end, - VM_MAP_PAGE_MASK(map)), - copy_object, - 0, /* offset */ - FALSE, /* needs_copy */ - FALSE, FALSE, - cur_protection, max_protection, - VM_BEHAVIOR_DEFAULT, - ((entry_for_jit) - ? VM_INHERIT_NONE - : inheritance), - 0, - no_cache, - permanent, - superpage_size, - clear_map_aligned, - is_submap, - FALSE, /* jit */ - alias); + vm_map_entry_insert(map, entry, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(end, + VM_MAP_PAGE_MASK(map)), + copy_object, + 0, /* offset */ + FALSE, /* needs_copy */ + FALSE, FALSE, + cur_protection, max_protection, + VM_BEHAVIOR_DEFAULT, + ((entry_for_jit) + ? VM_INHERIT_NONE + : inheritance), + 0, + no_cache, + permanent, + superpage_size, + clear_map_aligned, + is_submap, + FALSE, /* jit */ + alias); entry = new_entry; #if VM_MAP_DEBUG_FOURK if (vm_map_debug_fourk) { printf("FOURK_PAGER: map %p [0x%llx:0x%llx] new pager %p\n", - map, - (uint64_t) entry->vme_start, - (uint64_t) entry->vme_end, - fourk_mem_obj); + map, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end, + fourk_mem_obj); } #endif /* VM_MAP_DEBUG_FOURK */ @@ -3531,56 +3573,56 @@ map_in_fourk_pager: fourk_pager_index_num = 4 - fourk_pager_index_start; } for (cur_idx = 0; - cur_idx < fourk_pager_index_num; - cur_idx++) { - vm_object_t old_object; - vm_object_offset_t old_offset; + cur_idx < fourk_pager_index_num; + cur_idx++) { + vm_object_t old_object; + vm_object_offset_t old_offset; kr = fourk_pager_populate(fourk_mem_obj, - TRUE, /* overwrite */ - fourk_pager_index_start + cur_idx, - object, - (object - ? (offset + - (cur_idx * FOURK_PAGE_SIZE)) - : 0), - &old_object, - &old_offset); + TRUE, /* overwrite */ + fourk_pager_index_start + cur_idx, + object, + (object + ? (offset + + (cur_idx * FOURK_PAGE_SIZE)) + : 0), + &old_object, + &old_offset); #if VM_MAP_DEBUG_FOURK if (vm_map_debug_fourk) { if (old_object == (vm_object_t) -1 && old_offset == (vm_object_offset_t) -1) { printf("FOURK_PAGER: map %p [0x%llx:0x%llx] " - "pager [%p:0x%llx] " - "populate[%d] " - "[object:%p,offset:0x%llx]\n", - map, - (uint64_t) entry->vme_start, - (uint64_t) entry->vme_end, - fourk_mem_obj, - VME_OFFSET(entry), - fourk_pager_index_start + cur_idx, - object, - (object - ? (offset + (cur_idx * FOURK_PAGE_SIZE)) - : 0)); + "pager [%p:0x%llx] " + "populate[%d] " + "[object:%p,offset:0x%llx]\n", + map, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end, + fourk_mem_obj, + VME_OFFSET(entry), + fourk_pager_index_start + cur_idx, + object, + (object + ? (offset + (cur_idx * FOURK_PAGE_SIZE)) + : 0)); } else { printf("FOURK_PAGER: map %p [0x%llx:0x%llx] " - "pager [%p:0x%llx] " - "populate[%d] [object:%p,offset:0x%llx] " - "old [%p:0x%llx]\n", - map, - (uint64_t) entry->vme_start, - (uint64_t) entry->vme_end, - fourk_mem_obj, - VME_OFFSET(entry), - fourk_pager_index_start + cur_idx, - object, - (object - ? (offset + (cur_idx * FOURK_PAGE_SIZE)) - : 0), - old_object, - old_offset); + "pager [%p:0x%llx] " + "populate[%d] [object:%p,offset:0x%llx] " + "old [%p:0x%llx]\n", + map, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end, + fourk_mem_obj, + VME_OFFSET(entry), + fourk_pager_index_start + cur_idx, + object, + (object + ? (offset + (cur_idx * FOURK_PAGE_SIZE)) + : 0), + old_object, + old_offset); } } #endif /* VM_MAP_DEBUG_FOURK */ @@ -3615,8 +3657,8 @@ BailOut: if (pmap_empty && !(vmk_flags.vmkf_no_pmap_check)) { assert(vm_map_pmap_is_empty(map, - *address, - *address+size)); + *address, + *address + size)); } #endif /* DEBUG */ @@ -3668,7 +3710,7 @@ BailOut: pager != MEMORY_OBJECT_NULL) { assert(fourk_object->pager_ready); vm_object_mapping_wait(fourk_object, - THREAD_UNINT); + THREAD_UNINT); vm_object_mapping_begin(fourk_object); vm_object_unlock(fourk_object); @@ -3694,16 +3736,15 @@ BailOut: */ if (result == KERN_SUCCESS) { - /* Wire down the new entry if the user * requested all new map entries be wired. */ - if ((map->wiring_required)||(superpage_size)) { + if ((map->wiring_required) || (superpage_size)) { assert(!keep_map_locked); pmap_empty = FALSE; /* pmap won't be empty */ kr = vm_map_wire_kernel(map, start, end, - new_entry->protection, VM_KERN_MEMORY_MLOCK, - TRUE); + new_entry->protection, VM_KERN_MEMORY_MLOCK, + TRUE); result = kr; } @@ -3718,25 +3759,25 @@ BailOut: * that someone else create new mappings that range. */ zap_new_map = vm_map_create(PMAP_NULL, - *address, - *address + size, - map->hdr.entries_pageable); + *address, + *address + size, + map->hdr.entries_pageable); vm_map_set_page_shift(zap_new_map, - VM_MAP_PAGE_SHIFT(map)); + VM_MAP_PAGE_SHIFT(map)); vm_map_disable_hole_optimization(zap_new_map); if (!map_locked) { vm_map_lock(map); map_locked = TRUE; } - (void) vm_map_delete(map, *address, *address+size, - (VM_MAP_REMOVE_SAVE_ENTRIES | - VM_MAP_REMOVE_NO_MAP_ALIGN), - zap_new_map); + (void) vm_map_delete(map, *address, *address + size, + (VM_MAP_REMOVE_SAVE_ENTRIES | + VM_MAP_REMOVE_NO_MAP_ALIGN), + zap_new_map); } if (zap_old_map != VM_MAP_NULL && zap_old_map->hdr.nentries != 0) { - vm_map_entry_t entry1, entry2; + vm_map_entry_t entry1, entry2; /* * The new mapping failed. Attempt to restore @@ -3766,17 +3807,17 @@ BailOut: * inserting them all after "entry1". */ for (entry2 = vm_map_first_entry(zap_old_map); - entry2 != vm_map_to_entry(zap_old_map); - entry2 = vm_map_first_entry(zap_old_map)) { + entry2 != vm_map_to_entry(zap_old_map); + entry2 = vm_map_first_entry(zap_old_map)) { vm_map_size_t entry_size; entry_size = (entry2->vme_end - - entry2->vme_start); + entry2->vme_start); vm_map_store_entry_unlink(zap_old_map, - entry2); + entry2); zap_old_map->size -= entry_size; vm_map_store_entry_link(map, entry1, entry2, - VM_MAP_KERNEL_FLAGS_NONE); + VM_MAP_KERNEL_FLAGS_NONE); map->size += entry_size; entry1 = entry2; } @@ -3814,7 +3855,7 @@ BailOut: return result; -#undef RETURN +#undef RETURN } #endif /* __arm64__ */ @@ -3826,32 +3867,32 @@ int64_t vm_prefault_nb_bailout = 0; static kern_return_t vm_map_enter_mem_object_helper( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t initial_size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance, - upl_page_list_ptr_t page_list, - unsigned int page_list_count) + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance, + upl_page_list_ptr_t page_list, + unsigned int page_list_count) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - vm_object_t object; - vm_object_size_t size; - kern_return_t result; - boolean_t mask_cur_protection, mask_max_protection; - boolean_t kernel_prefault, try_prefault = (page_list_count != 0); - vm_map_offset_t offset_in_mapping = 0; + vm_map_address_t map_addr; + vm_map_size_t map_size; + vm_object_t object; + vm_object_size_t size; + kern_return_t result; + boolean_t mask_cur_protection, mask_max_protection; + boolean_t kernel_prefault, try_prefault = (page_list_count != 0); + vm_map_offset_t offset_in_mapping = 0; #if __arm64__ - boolean_t fourk = vmk_flags.vmkf_fourk; + boolean_t fourk = vmk_flags.vmkf_fourk; #endif /* __arm64__ */ assertf(vmk_flags.__vmkf_unused == 0, "vmk_flags unused=0x%x\n", vmk_flags.__vmkf_unused); @@ -3881,9 +3922,9 @@ vm_map_enter_mem_object_helper( #endif /* __arm64__ */ { map_addr = vm_map_trunc_page(*address, - VM_MAP_PAGE_MASK(target_map)); + VM_MAP_PAGE_MASK(target_map)); map_size = vm_map_round_page(initial_size, - VM_MAP_PAGE_MASK(target_map)); + VM_MAP_PAGE_MASK(target_map)); } size = vm_object_round_page(initial_size); @@ -3895,19 +3936,20 @@ vm_map_enter_mem_object_helper( offset = 0; copy = FALSE; } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { - vm_named_entry_t named_entry; + vm_named_entry_t named_entry; named_entry = (vm_named_entry_t) port->ip_kobject; if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { offset += named_entry->data_offset; } /* a few checks to make sure user is obeying rules */ if (size == 0) { - if (offset >= named_entry->size) + if (offset >= named_entry->size) { return KERN_INVALID_RIGHT; + } size = named_entry->size - offset; } if (mask_max_protection) { @@ -3917,11 +3959,13 @@ vm_map_enter_mem_object_helper( cur_protection &= named_entry->protection; } if ((named_entry->protection & max_protection) != - max_protection) + max_protection) { return KERN_INVALID_RIGHT; + } if ((named_entry->protection & cur_protection) != - cur_protection) + cur_protection) { return KERN_INVALID_RIGHT; + } if (offset + size < offset) { /* overflow */ return KERN_INVALID_ARGUMENT; @@ -3934,8 +3978,8 @@ vm_map_enter_mem_object_helper( /* for a vm_map_copy, we can only map it whole */ if ((size != named_entry->size) && (vm_map_round_page(size, - VM_MAP_PAGE_MASK(target_map)) == - named_entry->size)) { + VM_MAP_PAGE_MASK(target_map)) == + named_entry->size)) { /* XXX FBDP use the rounded size... */ size = vm_map_round_page( size, @@ -3944,7 +3988,7 @@ vm_map_enter_mem_object_helper( if (!(flags & VM_FLAGS_ANYWHERE) && (offset != 0 || - size != named_entry->size)) { + size != named_entry->size)) { /* * XXX for a mapping at a "fixed" address, * we can't trim after mapping the whole @@ -3959,8 +4003,8 @@ vm_map_enter_mem_object_helper( /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; - if (! VM_MAP_PAGE_ALIGNED(size, - VM_MAP_PAGE_MASK(target_map))) { + if (!VM_MAP_PAGE_ALIGNED(size, + VM_MAP_PAGE_MASK(target_map))) { /* * Let's not map more than requested; * vm_map_enter() will handle this "not map-aligned" @@ -3971,10 +4015,10 @@ vm_map_enter_mem_object_helper( named_entry_lock(named_entry); if (named_entry->is_sub_map) { - vm_map_t submap; + vm_map_t submap; if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap."); } @@ -3987,18 +4031,18 @@ vm_map_enter_mem_object_helper( vmk_flags.vmkf_submap = TRUE; result = vm_map_enter(target_map, - &map_addr, - map_size, - mask, - flags, - vmk_flags, - tag, - (vm_object_t)(uintptr_t) submap, - offset, - copy, - cur_protection, - max_protection, - inheritance); + &map_addr, + map_size, + mask, + flags, + vmk_flags, + tag, + (vm_object_t)(uintptr_t) submap, + offset, + copy, + cur_protection, + max_protection, + inheritance); if (result != KERN_SUCCESS) { vm_map_deallocate(submap); } else { @@ -4027,28 +4071,28 @@ vm_map_enter_mem_object_helper( *address = map_addr; } return result; - } else if (named_entry->is_copy) { - kern_return_t kr; - vm_map_copy_t copy_map; - vm_map_entry_t copy_entry; - vm_map_offset_t copy_addr; + kern_return_t kr; + vm_map_copy_t copy_map; + vm_map_entry_t copy_entry; + vm_map_offset_t copy_addr; if (flags & ~(VM_FLAGS_FIXED | - VM_FLAGS_ANYWHERE | - VM_FLAGS_OVERWRITE | - VM_FLAGS_RETURN_4K_DATA_ADDR | - VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_ALIAS_MASK)) { + VM_FLAGS_ANYWHERE | + VM_FLAGS_OVERWRITE | + VM_FLAGS_RETURN_4K_DATA_ADDR | + VM_FLAGS_RETURN_DATA_ADDR | + VM_FLAGS_ALIAS_MASK)) { named_entry_unlock(named_entry); return KERN_INVALID_ARGUMENT; } if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { offset_in_mapping = offset - vm_object_trunc_page(offset); - if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) + if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { offset_in_mapping &= ~((signed)(0xFFF)); + } offset = vm_object_trunc_page(offset); map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; } @@ -4058,31 +4102,31 @@ vm_map_enter_mem_object_helper( if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) { /* unsupported type; should not happen */ printf("vm_map_enter_mem_object: " - "memory_entry->backing.copy " - "unsupported type 0x%x\n", - copy_map->type); + "memory_entry->backing.copy " + "unsupported type 0x%x\n", + copy_map->type); named_entry_unlock(named_entry); return KERN_INVALID_ARGUMENT; } /* reserve a contiguous range */ kr = vm_map_enter(target_map, - &map_addr, - /* map whole mem entry, trim later: */ - named_entry->size, - mask, - flags & (VM_FLAGS_ANYWHERE | - VM_FLAGS_OVERWRITE | - VM_FLAGS_RETURN_4K_DATA_ADDR | - VM_FLAGS_RETURN_DATA_ADDR), - vmk_flags, - tag, - VM_OBJECT_NULL, - 0, - FALSE, /* copy */ - cur_protection, - max_protection, - inheritance); + &map_addr, + /* map whole mem entry, trim later: */ + named_entry->size, + mask, + flags & (VM_FLAGS_ANYWHERE | + VM_FLAGS_OVERWRITE | + VM_FLAGS_RETURN_4K_DATA_ADDR | + VM_FLAGS_RETURN_DATA_ADDR), + vmk_flags, + tag, + VM_OBJECT_NULL, + 0, + FALSE, /* copy */ + cur_protection, + max_protection, + inheritance); if (kr != KERN_SUCCESS) { named_entry_unlock(named_entry); return kr; @@ -4091,15 +4135,15 @@ vm_map_enter_mem_object_helper( copy_addr = map_addr; for (copy_entry = vm_map_copy_first_entry(copy_map); - copy_entry != vm_map_copy_to_entry(copy_map); - copy_entry = copy_entry->vme_next) { - int remap_flags; - vm_map_kernel_flags_t vmk_remap_flags; - vm_map_t copy_submap; - vm_object_t copy_object; - vm_map_size_t copy_size; - vm_object_offset_t copy_offset; - int copy_vm_alias; + copy_entry != vm_map_copy_to_entry(copy_map); + copy_entry = copy_entry->vme_next) { + int remap_flags; + vm_map_kernel_flags_t vmk_remap_flags; + vm_map_t copy_submap; + vm_object_t copy_object; + vm_map_size_t copy_size; + vm_object_offset_t copy_offset; + int copy_vm_alias; remap_flags = 0; vmk_remap_flags = VM_MAP_KERNEL_FLAGS_NONE; @@ -4107,7 +4151,7 @@ vm_map_enter_mem_object_helper( copy_object = VME_OBJECT(copy_entry); copy_offset = VME_OFFSET(copy_entry); copy_size = (copy_entry->vme_end - - copy_entry->vme_start); + copy_entry->vme_start); VM_GET_FLAGS_ALIAS(flags, copy_vm_alias); if (copy_vm_alias == 0) { /* @@ -4121,7 +4165,7 @@ vm_map_enter_mem_object_helper( /* sanity check */ if ((copy_addr + copy_size) > (map_addr + - named_entry->size /* XXX full size */ )) { + named_entry->size /* XXX full size */)) { /* over-mapping too much !? */ kr = KERN_INVALID_ARGUMENT; /* abort */ @@ -4137,12 +4181,12 @@ vm_map_enter_mem_object_helper( vm_map_unlock(copy_submap); copy_object = (vm_object_t)(uintptr_t) copy_submap; } else if (!copy && - copy_object != VM_OBJECT_NULL && - (copy_entry->needs_copy || - copy_object->shadowed || - (!copy_object->true_share && - !copy_entry->is_shared && - copy_object->vo_size > copy_size))) { + copy_object != VM_OBJECT_NULL && + (copy_entry->needs_copy || + copy_object->shadowed || + (!copy_object->true_share && + !copy_entry->is_shared && + copy_object->vo_size > copy_size))) { /* * We need to resolve our side of this * "symmetric" copy-on-write now; we @@ -4163,11 +4207,11 @@ vm_map_enter_mem_object_helper( prot = copy_entry->protection & ~VM_PROT_WRITE; vm_object_pmap_protect(copy_object, - copy_offset, - copy_size, - PMAP_NULL, - 0, - prot); + copy_offset, + copy_size, + PMAP_NULL, + 0, + prot); } copy_entry->needs_copy = FALSE; @@ -4210,18 +4254,18 @@ vm_map_enter_mem_object_helper( } #endif /* !CONFIG_EMBEDDED */ kr = vm_map_enter(target_map, - ©_addr, - copy_size, - (vm_map_offset_t) 0, - remap_flags, - vmk_remap_flags, - copy_vm_alias, - copy_object, - copy_offset, - copy, - cur_protection, - max_protection, - inheritance); + ©_addr, + copy_size, + (vm_map_offset_t) 0, + remap_flags, + vmk_remap_flags, + copy_vm_alias, + copy_object, + copy_offset, + ((copy_object == NULL) ? FALSE : copy), + cur_protection, + max_protection, + inheritance); if (kr != KERN_SUCCESS) { if (copy_entry->is_sub_map) { vm_map_deallocate(copy_submap); @@ -4238,7 +4282,7 @@ vm_map_enter_mem_object_helper( if (kr == KERN_SUCCESS) { if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { *address = map_addr + offset_in_mapping; } else { *address = map_addr; @@ -4249,9 +4293,9 @@ vm_map_enter_mem_object_helper( * Trim in front, from 0 to "offset". */ vm_map_remove(target_map, - map_addr, - map_addr + offset, - VM_MAP_REMOVE_NO_FLAGS); + map_addr, + map_addr + offset, + VM_MAP_REMOVE_NO_FLAGS); *address += offset; } if (offset + map_size < named_entry->size) { @@ -4261,30 +4305,29 @@ vm_map_enter_mem_object_helper( * "named_entry->size". */ vm_map_remove(target_map, - (map_addr + - offset + map_size), - (map_addr + - named_entry->size), - VM_MAP_REMOVE_NO_FLAGS); + (map_addr + + offset + map_size), + (map_addr + + named_entry->size), + VM_MAP_REMOVE_NO_FLAGS); } } named_entry_unlock(named_entry); if (kr != KERN_SUCCESS) { - if (! (flags & VM_FLAGS_OVERWRITE)) { + if (!(flags & VM_FLAGS_OVERWRITE)) { /* deallocate the contiguous range */ (void) vm_deallocate(target_map, - map_addr, - map_size); + map_addr, + map_size); } } return kr; - } else { - unsigned int access; - vm_prot_t protections; - unsigned int wimg_mode; + unsigned int access; + vm_prot_t protections; + unsigned int wimg_mode; /* we are mapping a VM object */ @@ -4292,10 +4335,11 @@ vm_map_enter_mem_object_helper( access = GET_MAP_MEM(named_entry->protection); if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { offset_in_mapping = offset - vm_object_trunc_page(offset); - if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) + if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) { offset_in_mapping &= ~((signed)(0xFFF)); + } offset = vm_object_trunc_page(offset); map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; } @@ -4308,9 +4352,10 @@ vm_map_enter_mem_object_helper( vm_object_reference_locked(object); wimg_mode = object->wimg_bits; - vm_prot_to_wimg(access, &wimg_mode); - if (object->wimg_bits != wimg_mode) + vm_prot_to_wimg(access, &wimg_mode); + if (object->wimg_bits != wimg_mode) { vm_object_change_wimg_mode(object, wimg_mode); + } vm_object_unlock(object); } @@ -4324,20 +4369,21 @@ vm_map_enter_mem_object_helper( * instead is just a raw memory object. */ if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { panic("VM_FLAGS_RETURN_DATA_ADDR not expected for raw memory object."); } object = memory_object_to_vm_object((memory_object_t)port); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return KERN_INVALID_OBJECT; + } vm_object_reference(object); /* wait for object (if any) to be ready */ if (object != VM_OBJECT_NULL) { if (object == kernel_object) { printf("Warning: Attempt to map kernel object" - " by a non-private kernel entity\n"); + " by a non-private kernel entity\n"); return KERN_INVALID_OBJECT; } if (!object->pager_ready) { @@ -4345,8 +4391,8 @@ vm_map_enter_mem_object_helper( while (!object->pager_ready) { vm_object_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); vm_object_lock(object); } vm_object_unlock(object); @@ -4361,8 +4407,8 @@ vm_map_enter_mem_object_helper( object->pager != MEMORY_OBJECT_NULL && object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { memory_object_t pager; - vm_prot_t pager_prot; - kern_return_t kr; + vm_prot_t pager_prot; + kern_return_t kr; /* * For "named" VM objects, let the pager know that the @@ -4405,13 +4451,13 @@ vm_map_enter_mem_object_helper( */ if (copy) { - vm_object_t new_object; - vm_object_offset_t new_offset; + vm_object_t new_object; + vm_object_offset_t new_offset; result = vm_object_copy_strategically(object, offset, - map_size, - &new_object, &new_offset, - ©); + map_size, + &new_object, &new_offset, + ©); if (result == KERN_MEMORY_RESTART_COPY) { @@ -4431,10 +4477,10 @@ vm_map_enter_mem_object_helper( new_object = object; new_offset = offset; success = vm_object_copy_quickly(&new_object, - new_offset, - map_size, - &src_needs_copy, - ©); + new_offset, + map_size, + &src_needs_copy, + ©); assert(success); result = KERN_SUCCESS; } @@ -4464,34 +4510,35 @@ vm_map_enter_mem_object_helper( if (fourk) { /* map this object in a "4K" pager */ result = vm_map_enter_fourk(target_map, - &map_addr, - map_size, - (vm_map_offset_t) mask, - flags, - vmk_flags, - tag, - object, - offset, - copy, - cur_protection, - max_protection, - inheritance); + &map_addr, + map_size, + (vm_map_offset_t) mask, + flags, + vmk_flags, + tag, + object, + offset, + copy, + cur_protection, + max_protection, + inheritance); } else #endif /* __arm64__ */ { result = vm_map_enter(target_map, - &map_addr, map_size, - (vm_map_offset_t)mask, - flags, - vmk_flags, - tag, - object, offset, - copy, - cur_protection, max_protection, - inheritance); - } - if (result != KERN_SUCCESS) + &map_addr, map_size, + (vm_map_offset_t)mask, + flags, + vmk_flags, + tag, + object, offset, + copy, + cur_protection, max_protection, + inheritance); + } + if (result != KERN_SUCCESS) { vm_object_deallocate(object); + } /* * Try to prefault, and do not forget to release the vm map lock. @@ -4525,9 +4572,9 @@ vm_map_enter_mem_object_helper( * something critical. */ kr = pmap_enter_options(target_map->pmap, - va, UPL_PHYS_PAGE(page_list, i), - cur_protection, VM_PROT_NONE, - 0, TRUE, pmap_options, NULL); + va, UPL_PHYS_PAGE(page_list, i), + cur_protection, VM_PROT_NONE, + 0, TRUE, pmap_options, NULL); if (kr != KERN_SUCCESS) { OSIncrementAtomic64(&vm_prefault_nb_bailout); if (kernel_prefault) { @@ -4547,7 +4594,7 @@ vm_map_enter_mem_object_helper( } if (flags & (VM_FLAGS_RETURN_DATA_ADDR | - VM_FLAGS_RETURN_4K_DATA_ADDR)) { + VM_FLAGS_RETURN_4K_DATA_ADDR)) { *address = map_addr + offset_in_mapping; } else { *address = map_addr; @@ -4557,37 +4604,37 @@ vm_map_enter_mem_object_helper( kern_return_t vm_map_enter_mem_object( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t initial_size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { kern_return_t ret; ret = vm_map_enter_mem_object_helper(target_map, - address, - initial_size, - mask, - flags, - vmk_flags, - tag, - port, - offset, - copy, - cur_protection, - max_protection, - inheritance, - NULL, - 0); + address, + initial_size, + mask, + flags, + vmk_flags, + tag, + port, + offset, + copy, + cur_protection, + max_protection, + inheritance, + NULL, + 0); #if KASAN if (ret == KERN_SUCCESS && address && target_map->pmap == kernel_pmap) { @@ -4600,37 +4647,37 @@ vm_map_enter_mem_object( kern_return_t vm_map_enter_mem_object_prefault( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t initial_size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - vm_prot_t cur_protection, - vm_prot_t max_protection, - upl_page_list_ptr_t page_list, - unsigned int page_list_count) + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + vm_prot_t cur_protection, + vm_prot_t max_protection, + upl_page_list_ptr_t page_list, + unsigned int page_list_count) { kern_return_t ret; ret = vm_map_enter_mem_object_helper(target_map, - address, - initial_size, - mask, - flags, - vmk_flags, - tag, - port, - offset, - FALSE, - cur_protection, - max_protection, - VM_INHERIT_DEFAULT, - page_list, - page_list_count); + address, + initial_size, + mask, + flags, + vmk_flags, + tag, + port, + offset, + FALSE, + cur_protection, + max_protection, + VM_INHERIT_DEFAULT, + page_list, + page_list_count); #if KASAN if (ret == KERN_SUCCESS && address && target_map->pmap == kernel_pmap) { @@ -4644,30 +4691,30 @@ vm_map_enter_mem_object_prefault( kern_return_t vm_map_enter_mem_object_control( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t initial_size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - memory_object_control_t control, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t initial_size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + memory_object_control_t control, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - vm_object_t object; - vm_object_size_t size; - kern_return_t result; - memory_object_t pager; - vm_prot_t pager_prot; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + vm_object_t object; + vm_object_size_t size; + kern_return_t result; + memory_object_t pager; + vm_prot_t pager_prot; + kern_return_t kr; #if __arm64__ - boolean_t fourk = vmk_flags.vmkf_fourk; + boolean_t fourk = vmk_flags.vmkf_fourk; #endif /* __arm64__ */ /* @@ -4684,27 +4731,28 @@ vm_map_enter_mem_object_control( #if __arm64__ if (fourk) { map_addr = vm_map_trunc_page(*address, - FOURK_PAGE_MASK); + FOURK_PAGE_MASK); map_size = vm_map_round_page(initial_size, - FOURK_PAGE_MASK); + FOURK_PAGE_MASK); } else #endif /* __arm64__ */ { map_addr = vm_map_trunc_page(*address, - VM_MAP_PAGE_MASK(target_map)); + VM_MAP_PAGE_MASK(target_map)); map_size = vm_map_round_page(initial_size, - VM_MAP_PAGE_MASK(target_map)); + VM_MAP_PAGE_MASK(target_map)); } size = vm_object_round_page(initial_size); object = memory_object_control_to_vm_object(control); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return KERN_INVALID_OBJECT; + } if (object == kernel_object) { printf("Warning: Attempt to map kernel object" - " by a non-private kernel entity\n"); + " by a non-private kernel entity\n"); return KERN_INVALID_OBJECT; } @@ -4747,12 +4795,12 @@ vm_map_enter_mem_object_control( */ if (copy) { - vm_object_t new_object; - vm_object_offset_t new_offset; + vm_object_t new_object; + vm_object_offset_t new_offset; result = vm_object_copy_strategically(object, offset, size, - &new_object, &new_offset, - ©); + &new_object, &new_offset, + ©); if (result == KERN_MEMORY_RESTART_COPY) { @@ -4772,9 +4820,9 @@ vm_map_enter_mem_object_control( new_object = object; new_offset = offset; success = vm_object_copy_quickly(&new_object, - new_offset, size, - &src_needs_copy, - ©); + new_offset, size, + &src_needs_copy, + ©); assert(success); result = KERN_SUCCESS; } @@ -4796,42 +4844,43 @@ vm_map_enter_mem_object_control( #if __arm64__ if (fourk) { result = vm_map_enter_fourk(target_map, - &map_addr, - map_size, - (vm_map_offset_t)mask, - flags, - vmk_flags, - tag, - object, offset, - copy, - cur_protection, max_protection, - inheritance); + &map_addr, + map_size, + (vm_map_offset_t)mask, + flags, + vmk_flags, + tag, + object, offset, + copy, + cur_protection, max_protection, + inheritance); } else #endif /* __arm64__ */ { result = vm_map_enter(target_map, - &map_addr, map_size, - (vm_map_offset_t)mask, - flags, - vmk_flags, - tag, - object, offset, - copy, - cur_protection, max_protection, - inheritance); - } - if (result != KERN_SUCCESS) + &map_addr, map_size, + (vm_map_offset_t)mask, + flags, + vmk_flags, + tag, + object, offset, + copy, + cur_protection, max_protection, + inheritance); + } + if (result != KERN_SUCCESS) { vm_object_deallocate(object); + } *address = map_addr; return result; } -#if VM_CPM +#if VM_CPM #ifdef MACH_ASSERT -extern pmap_paddr_t avail_start, avail_end; +extern pmap_paddr_t avail_start, avail_end; #endif /* @@ -4846,21 +4895,21 @@ extern pmap_paddr_t avail_start, avail_end; */ kern_return_t vm_map_enter_cpm( - vm_map_t map, - vm_map_offset_t *addr, - vm_map_size_t size, - int flags) + vm_map_t map, + vm_map_offset_t *addr, + vm_map_size_t size, + int flags) { - vm_object_t cpm_obj; - pmap_t pmap; - vm_page_t m, pages; - kern_return_t kr; - vm_map_offset_t va, start, end, offset; -#if MACH_ASSERT - vm_map_offset_t prev_addr = 0; -#endif /* MACH_ASSERT */ - - boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); + vm_object_t cpm_obj; + pmap_t pmap; + vm_page_t m, pages; + kern_return_t kr; + vm_map_offset_t va, start, end, offset; +#if MACH_ASSERT + vm_map_offset_t prev_addr = 0; +#endif /* MACH_ASSERT */ + + boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); @@ -4869,24 +4918,27 @@ vm_map_enter_cpm( *addr = 0; return KERN_SUCCESS; } - if (anywhere) + if (anywhere) { *addr = vm_map_min(map); - else + } else { *addr = vm_map_trunc_page(*addr, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); + } size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); /* * LP64todo - cpm_allocate should probably allow * allocations of >4GB, but not with the current * algorithm, so just cast down the size for now. */ - if (size > VM_MAX_ADDRESS) + if (size > VM_MAX_ADDRESS) { return KERN_RESOURCE_SHORTAGE; + } if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size), - &pages, 0, 0, TRUE, flags)) != KERN_SUCCESS) + &pages, 0, 0, TRUE, flags)) != KERN_SUCCESS) { return kr; + } cpm_obj = vm_object_allocate((vm_object_size_t)size); assert(cpm_obj != VM_OBJECT_NULL); @@ -4913,7 +4965,7 @@ vm_map_enter_cpm( assert(!m->vmp_tabled); assert(VM_PAGE_WIRED(m)); assert(m->vmp_busy); - assert(VM_PAGE_GET_PHYS_PAGE(m)>=(avail_start>>PAGE_SHIFT) && VM_PAGE_GET_PHYS_PAGE(m)<=(avail_end>>PAGE_SHIFT)); + assert(VM_PAGE_GET_PHYS_PAGE(m) >= (avail_start >> PAGE_SHIFT) && VM_PAGE_GET_PHYS_PAGE(m) <= (avail_end >> PAGE_SHIFT)); m->vmp_busy = FALSE; vm_page_insert(m, cpm_obj, offset); @@ -4983,8 +5035,8 @@ vm_map_enter_cpm( * again. */ for (offset = 0, va = start; offset < size; - va += PAGE_SIZE, offset += PAGE_SIZE) { - int type_of_fault; + va += PAGE_SIZE, offset += PAGE_SIZE) { + int type_of_fault; vm_object_lock(cpm_obj); m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); @@ -4995,20 +5047,20 @@ vm_map_enter_cpm( type_of_fault = DBG_ZERO_FILL_FAULT; vm_fault_enter(m, pmap, va, VM_PROT_ALL, VM_PROT_WRITE, - VM_PAGE_WIRED(m), - FALSE, /* change_wiring */ - VM_KERN_MEMORY_NONE, /* tag - not wiring */ - FALSE, /* no_cache */ - FALSE, /* cs_bypass */ - 0, /* user_tag */ - 0, /* pmap_options */ - NULL, /* need_retry */ - &type_of_fault); + VM_PAGE_WIRED(m), + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + FALSE, /* no_cache */ + FALSE, /* cs_bypass */ + 0, /* user_tag */ + 0, /* pmap_options */ + NULL, /* need_retry */ + &type_of_fault); vm_object_unlock(cpm_obj); } -#if MACH_ASSERT +#if MACH_ASSERT /* * Verify ordering in address space. */ @@ -5016,9 +5068,10 @@ vm_map_enter_cpm( vm_object_lock(cpm_obj); m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); vm_object_unlock(cpm_obj); - if (m == VM_PAGE_NULL) + if (m == VM_PAGE_NULL) { panic("vm_allocate_cpm: obj %p off 0x%llx no page", - cpm_obj, (uint64_t)offset); + cpm_obj, (uint64_t)offset); + } assert(m->vmp_tabled); assert(!m->vmp_busy); assert(!m->vmp_wanted); @@ -5033,7 +5086,7 @@ vm_map_enter_cpm( if (offset != 0) { if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) { printf("start 0x%llx end 0x%llx va 0x%llx\n", - (uint64_t)start, (uint64_t)end, (uint64_t)va); + (uint64_t)start, (uint64_t)end, (uint64_t)va); printf("obj %p off 0x%llx\n", cpm_obj, (uint64_t)offset); printf("m %p prev_address 0x%llx\n", m, (uint64_t)prev_addr); panic("vm_allocate_cpm: pages not contig!"); @@ -5041,7 +5094,7 @@ vm_map_enter_cpm( } prev_addr = VM_PAGE_GET_PHYS_PAGE(m); } -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ vm_object_deallocate(cpm_obj); /* kill extra ref */ @@ -5049,7 +5102,7 @@ vm_map_enter_cpm( } -#else /* VM_CPM */ +#else /* VM_CPM */ /* * Interface is defined in all cases, but unless the kernel @@ -5059,10 +5112,10 @@ vm_map_enter_cpm( kern_return_t vm_map_enter_cpm( - __unused vm_map_t map, - __unused vm_map_offset_t *addr, - __unused vm_map_size_t size, - __unused int flags) + __unused vm_map_t map, + __unused vm_map_offset_t *addr, + __unused vm_map_size_t size, + __unused int flags) { return KERN_FAILURE; } @@ -5077,10 +5130,10 @@ vm_map_enter_cpm( static void vm_map_clip_unnest( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t start_unnest, - vm_map_offset_t end_unnest) + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t start_unnest, + vm_map_offset_t end_unnest) { vm_map_offset_t old_start_unnest = start_unnest; vm_map_offset_t old_end_unnest = end_unnest; @@ -5101,26 +5154,26 @@ vm_map_clip_unnest( assert(VME_SUBMAP(entry)->is_nested_map); assert(!VME_SUBMAP(entry)->disable_vmentry_reuse); log_unnest_badness(map, - old_start_unnest, - old_end_unnest, - VME_SUBMAP(entry)->is_nested_map, - (entry->vme_start + - VME_SUBMAP(entry)->lowest_unnestable_start - - VME_OFFSET(entry))); + old_start_unnest, + old_end_unnest, + VME_SUBMAP(entry)->is_nested_map, + (entry->vme_start + + VME_SUBMAP(entry)->lowest_unnestable_start - + VME_OFFSET(entry))); } if (entry->vme_start > start_unnest || entry->vme_end < end_unnest) { panic("vm_map_clip_unnest(0x%llx,0x%llx): " - "bad nested entry: start=0x%llx end=0x%llx\n", - (long long)start_unnest, (long long)end_unnest, - (long long)entry->vme_start, (long long)entry->vme_end); + "bad nested entry: start=0x%llx end=0x%llx\n", + (long long)start_unnest, (long long)end_unnest, + (long long)entry->vme_start, (long long)entry->vme_end); } if (start_unnest > entry->vme_start) { _vm_map_clip_start(&map->hdr, - entry, - start_unnest); + entry, + start_unnest); if (map->holelistenabled) { vm_map_store_update_first_free(map, NULL, FALSE); } else { @@ -5129,8 +5182,8 @@ vm_map_clip_unnest( } if (entry->vme_end > end_unnest) { _vm_map_clip_end(&map->hdr, - entry, - end_unnest); + entry, + end_unnest); if (map->holelistenabled) { vm_map_store_update_first_free(map, NULL, FALSE); } else { @@ -5139,8 +5192,8 @@ vm_map_clip_unnest( } pmap_unnest(map->pmap, - entry->vme_start, - entry->vme_end - entry->vme_start); + entry->vme_start, + entry->vme_end - entry->vme_start); if ((map->mapped_in_other_pmaps) && (map->map_refcnt)) { /* clean up parent map/maps */ vm_map_submap_pmap_clean( @@ -5155,7 +5208,7 @@ vm_map_clip_unnest( VME_ALIAS_SET(entry, VM_MEMORY_UNSHARED_PMAP); } } -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ /* * vm_map_clip_start: [ internal use only ] @@ -5166,15 +5219,15 @@ vm_map_clip_unnest( */ void vm_map_clip_start( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t startaddr) + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t startaddr) { #ifndef NO_NESTED_PMAP if (entry->is_sub_map && entry->use_pmap && startaddr >= entry->vme_start) { - vm_map_offset_t start_unnest, end_unnest; + vm_map_offset_t start_unnest, end_unnest; /* * Make sure "startaddr" is no longer in a nested range @@ -5193,8 +5246,8 @@ vm_map_clip_start( !entry->is_sub_map && VME_OBJECT(entry)->phys_contiguous) { pmap_remove(map->pmap, - (addr64_t)(entry->vme_start), - (addr64_t)(entry->vme_end)); + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end)); } if (entry->vme_atomic) { panic("Attempting to clip an atomic VM entry! (map: %p, entry: %p)\n", map, entry); @@ -5221,7 +5274,7 @@ vm_map_clip_start( #define vm_map_copy_clip_start(copy, entry, startaddr) \ MACRO_BEGIN \ if ((startaddr) > (entry)->vme_start) \ - _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ + _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ MACRO_END /* @@ -5230,11 +5283,11 @@ vm_map_clip_start( */ static void _vm_map_clip_start( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_map_offset_t start) + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t start) { - vm_map_entry_t new_entry; + vm_map_entry_t new_entry; /* * Split off the front portion -- @@ -5246,7 +5299,7 @@ _vm_map_clip_start( if (entry->map_aligned) { assert(VM_MAP_PAGE_ALIGNED(start, - VM_MAP_HDR_PAGE_MASK(map_header))); + VM_MAP_HDR_PAGE_MASK(map_header))); } new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); @@ -5260,10 +5313,11 @@ _vm_map_clip_start( _vm_map_store_entry_link(map_header, entry->vme_prev, new_entry); - if (entry->is_sub_map) + if (entry->is_sub_map) { vm_map_reference(VME_SUBMAP(new_entry)); - else + } else { vm_object_reference(VME_OBJECT(new_entry)); + } } @@ -5276,9 +5330,9 @@ _vm_map_clip_start( */ void vm_map_clip_end( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t endaddr) + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t endaddr) { if (endaddr > entry->vme_end) { /* @@ -5289,7 +5343,7 @@ vm_map_clip_end( } #ifndef NO_NESTED_PMAP if (entry->is_sub_map && entry->use_pmap) { - vm_map_offset_t start_unnest, end_unnest; + vm_map_offset_t start_unnest, end_unnest; /* * Make sure the range between the start of this entry and @@ -5300,8 +5354,8 @@ vm_map_clip_end( */ start_unnest = entry->vme_start; end_unnest = - (endaddr + pmap_nesting_size_min - 1) & - ~(pmap_nesting_size_min - 1); + (endaddr + pmap_nesting_size_min - 1) & + ~(pmap_nesting_size_min - 1); vm_map_clip_unnest(map, entry, start_unnest, end_unnest); } #endif /* NO_NESTED_PMAP */ @@ -5310,8 +5364,8 @@ vm_map_clip_end( !entry->is_sub_map && VME_OBJECT(entry)->phys_contiguous) { pmap_remove(map->pmap, - (addr64_t)(entry->vme_start), - (addr64_t)(entry->vme_end)); + (addr64_t)(entry->vme_start), + (addr64_t)(entry->vme_end)); } if (entry->vme_atomic) { panic("Attempting to clip an atomic VM entry! (map: %p, entry: %p)\n", map, entry); @@ -5337,7 +5391,7 @@ vm_map_clip_end( #define vm_map_copy_clip_end(copy, entry, endaddr) \ MACRO_BEGIN \ if ((endaddr) < (entry)->vme_end) \ - _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ + _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ MACRO_END /* @@ -5346,11 +5400,11 @@ vm_map_clip_end( */ static void _vm_map_clip_end( - struct vm_map_header *map_header, - vm_map_entry_t entry, - vm_map_offset_t end) + struct vm_map_header *map_header, + vm_map_entry_t entry, + vm_map_offset_t end) { - vm_map_entry_t new_entry; + vm_map_entry_t new_entry; /* * Create a new entry and insert it @@ -5359,7 +5413,7 @@ _vm_map_clip_end( if (entry->map_aligned) { assert(VM_MAP_PAGE_ALIGNED(end, - VM_MAP_HDR_PAGE_MASK(map_header))); + VM_MAP_HDR_PAGE_MASK(map_header))); } new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); @@ -5368,15 +5422,16 @@ _vm_map_clip_end( assert(entry->vme_start < end); new_entry->vme_start = entry->vme_end = end; VME_OFFSET_SET(new_entry, - VME_OFFSET(new_entry) + (end - entry->vme_start)); + VME_OFFSET(new_entry) + (end - entry->vme_start)); assert(new_entry->vme_start < new_entry->vme_end); _vm_map_store_entry_link(map_header, entry, new_entry); - if (entry->is_sub_map) + if (entry->is_sub_map) { vm_map_reference(VME_SUBMAP(new_entry)); - else + } else { vm_object_reference(VME_OBJECT(new_entry)); + } } @@ -5386,14 +5441,14 @@ _vm_map_clip_end( * Asserts that the starting and ending region * addresses fall within the valid range of the map. */ -#define VM_MAP_RANGE_CHECK(map, start, end) \ - MACRO_BEGIN \ - if (start < vm_map_min(map)) \ - start = vm_map_min(map); \ - if (end > vm_map_max(map)) \ - end = vm_map_max(map); \ - if (start > end) \ - start = end; \ +#define VM_MAP_RANGE_CHECK(map, start, end) \ + MACRO_BEGIN \ + if (start < vm_map_min(map)) \ + start = vm_map_min(map); \ + if (end > vm_map_max(map)) \ + end = vm_map_max(map); \ + if (start > end) \ + start = end; \ MACRO_END /* @@ -5412,49 +5467,54 @@ _vm_map_clip_end( */ static boolean_t vm_map_range_check( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_entry_t *entry) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_entry_t *entry) { - vm_map_entry_t cur; - vm_map_offset_t prev; + vm_map_entry_t cur; + vm_map_offset_t prev; /* - * Basic sanity checks first + * Basic sanity checks first */ - if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) - return (FALSE); + if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) { + return FALSE; + } /* - * Check first if the region starts within a valid + * Check first if the region starts within a valid * mapping for the map. */ - if (!vm_map_lookup_entry(map, start, &cur)) - return (FALSE); + if (!vm_map_lookup_entry(map, start, &cur)) { + return FALSE; + } /* * Optimize for the case that the region is contained * in a single map entry. */ - if (entry != (vm_map_entry_t *) NULL) + if (entry != (vm_map_entry_t *) NULL) { *entry = cur; - if (end <= cur->vme_end) - return (TRUE); + } + if (end <= cur->vme_end) { + return TRUE; + } /* - * If the region is not wholly contained within a - * single entry, walk the entries looking for holes. + * If the region is not wholly contained within a + * single entry, walk the entries looking for holes. */ prev = cur->vme_end; cur = cur->vme_next; while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) { - if (end <= cur->vme_end) - return (TRUE); + if (end <= cur->vme_end) { + return TRUE; + } prev = cur->vme_end; cur = cur->vme_next; } - return (FALSE); + return FALSE; } /* @@ -5477,23 +5537,23 @@ vm_map_range_check( */ kern_return_t vm_map_submap( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_t submap, - vm_map_offset_t offset, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t submap, + vm_map_offset_t offset, #ifdef NO_NESTED_PMAP __unused -#endif /* NO_NESTED_PMAP */ - boolean_t use_pmap) +#endif /* NO_NESTED_PMAP */ + boolean_t use_pmap) { - vm_map_entry_t entry; - kern_return_t result = KERN_INVALID_ARGUMENT; - vm_object_t object; + vm_map_entry_t entry; + kern_return_t result = KERN_INVALID_ARGUMENT; + vm_object_t object; vm_map_lock(map); - if (! vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &entry)) { entry = entry->vme_next; } @@ -5537,35 +5597,36 @@ vm_map_submap( #ifndef NO_NESTED_PMAP if (use_pmap) { /* nest if platform code will allow */ - if(submap->pmap == NULL) { + if (submap->pmap == NULL) { ledger_t ledger = map->pmap->ledger; submap->pmap = pmap_create(ledger, - (vm_map_size_t) 0, FALSE); - if(submap->pmap == PMAP_NULL) { + (vm_map_size_t) 0, FALSE); + if (submap->pmap == PMAP_NULL) { vm_map_unlock(map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } -#if defined(__arm__) || defined(__arm64__) +#if defined(__arm__) || defined(__arm64__) pmap_set_nested(submap->pmap); #endif } result = pmap_nest(map->pmap, - (VME_SUBMAP(entry))->pmap, - (addr64_t)start, - (addr64_t)start, - (uint64_t)(end - start)); - if(result) + (VME_SUBMAP(entry))->pmap, + (addr64_t)start, + (addr64_t)start, + (uint64_t)(end - start)); + if (result) { panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result); + } entry->use_pmap = TRUE; } -#else /* NO_NESTED_PMAP */ +#else /* NO_NESTED_PMAP */ pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end); -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ result = KERN_SUCCESS; } vm_map_unlock(map); - return(result); + return result; } /* @@ -5578,27 +5639,27 @@ vm_map_submap( */ kern_return_t vm_map_protect( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t new_prot, - boolean_t set_max) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t new_prot, + boolean_t set_max) { - vm_map_entry_t current; - vm_map_offset_t prev; - vm_map_entry_t entry; - vm_prot_t new_max; - int pmap_options = 0; - kern_return_t kr; + vm_map_entry_t current; + vm_map_offset_t prev; + vm_map_entry_t entry; + vm_prot_t new_max; + int pmap_options = 0; + kern_return_t kr; XPR(XPR_VM_MAP, "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d", map, start, end, new_prot, set_max); if (new_prot & VM_PROT_COPY) { - vm_map_offset_t new_start; - vm_prot_t cur_prot, max_prot; - vm_map_kernel_flags_t kflags; + vm_map_offset_t new_start; + vm_prot_t cur_prot, max_prot; + vm_map_kernel_flags_t kflags; /* LP64todo - see below */ if (start >= map->max_offset) { @@ -5610,15 +5671,15 @@ vm_map_protect( map != kernel_map && cs_process_enforcement(NULL)) { DTRACE_VM3(cs_wx, - uint64_t, (uint64_t) start, - uint64_t, (uint64_t) end, - vm_prot_t, new_prot); + uint64_t, (uint64_t) start, + uint64_t, (uint64_t) end, + vm_prot_t, new_prot); printf("CODE SIGNING: %d[%s] %s can't have both write and exec at the same time\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__); + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); return KERN_PROTECTION_FAILURE; } #endif /* VM_PROTECT_WX_FAIL */ @@ -5640,18 +5701,18 @@ vm_map_protect( kflags.vmkf_overwrite_immutable = TRUE; new_start = start; kr = vm_map_remap(map, - &new_start, - end - start, - 0, /* mask */ - VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, - kflags, - 0, - map, - start, - TRUE, /* copy-on-write remapping! */ - &cur_prot, - &max_prot, - VM_INHERIT_DEFAULT); + &new_start, + end - start, + 0, /* mask */ + VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, + kflags, + 0, + map, + start, + TRUE, /* copy-on-write remapping! */ + &cur_prot, + &max_prot, + VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) { return kr; } @@ -5666,27 +5727,28 @@ vm_map_protect( */ if (start >= map->max_offset) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - while(1) { + while (1) { /* - * Lookup the entry. If it doesn't start in a valid + * Lookup the entry. If it doesn't start in a valid * entry, return an error. */ - if (! vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &entry)) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - if (entry->superpage_size && (start & (SUPERPAGE_SIZE-1))) { /* extend request to whole entry */ + if (entry->superpage_size && (start & (SUPERPAGE_SIZE - 1))) { /* extend request to whole entry */ start = SUPERPAGE_ROUND_DOWN(start); continue; } break; - } - if (entry->superpage_size) - end = SUPERPAGE_ROUND_UP(end); + } + if (entry->superpage_size) { + end = SUPERPAGE_ROUND_UP(end); + } /* * Make a first pass to check for protection and address @@ -5696,20 +5758,19 @@ vm_map_protect( current = entry; prev = current->vme_start; while ((current != vm_map_to_entry(map)) && - (current->vme_start < end)) { - + (current->vme_start < end)) { /* * If there is a hole, return an error. */ if (current->vme_start != prev) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } new_max = current->max_protection; if ((new_prot & new_max) != new_prot) { vm_map_unlock(map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } if ((new_prot & VM_PROT_WRITE) && @@ -5720,15 +5781,15 @@ vm_map_protect( #endif /* !CONFIG_EMBEDDED */ !(current->used_for_jit)) { DTRACE_VM3(cs_wx, - uint64_t, (uint64_t) current->vme_start, - uint64_t, (uint64_t) current->vme_end, - vm_prot_t, new_prot); + uint64_t, (uint64_t) current->vme_start, + uint64_t, (uint64_t) current->vme_end, + vm_prot_t, new_prot); printf("CODE SIGNING: %d[%s] %s can't have both write and exec at the same time\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__); + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); new_prot &= ~VM_PROT_EXECUTE; #if VM_PROTECT_WX_FAIL vm_map_unlock(map); @@ -5746,7 +5807,7 @@ vm_map_protect( if ((new_prot & VM_PROT_EXECUTE) || ((current->protection & VM_PROT_EXECUTE) && (new_prot & VM_PROT_WRITE))) { vm_map_unlock(map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } } @@ -5763,8 +5824,8 @@ vm_map_protect( if (prev_entry != vm_map_to_entry(map) && !prev_entry->map_aligned && (vm_map_round_page(prev_entry->vme_end, - VM_MAP_PAGE_MASK(map)) - == end)) { + VM_MAP_PAGE_MASK(map)) + == end)) { /* * The last entry in our range is not "map-aligned" * but it would have reached all the way to "end" @@ -5778,7 +5839,7 @@ vm_map_protect( if (end > prev) { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } /* @@ -5794,9 +5855,8 @@ vm_map_protect( } while ((current != vm_map_to_entry(map)) && - (current->vme_start < end)) { - - vm_prot_t old_prot; + (current->vme_start < end)) { + vm_prot_t old_prot; vm_map_clip_end(map, current, end); @@ -5832,14 +5892,15 @@ vm_map_protect( prot = current->protection; if (current->is_sub_map || (VME_OBJECT(current) == NULL) || (VME_OBJECT(current) != compressor_object)) { - prot &= ~VM_PROT_WRITE; - } else { - assert(!VME_OBJECT(current)->code_signed); - assert(VME_OBJECT(current)->copy_strategy == MEMORY_OBJECT_COPY_NONE); + prot &= ~VM_PROT_WRITE; + } else { + assert(!VME_OBJECT(current)->code_signed); + assert(VME_OBJECT(current)->copy_strategy == MEMORY_OBJECT_COPY_NONE); } - if (override_nx(map, VME_ALIAS(current)) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(map, VME_ALIAS(current)) && prot) { + prot |= VM_PROT_EXECUTE; + } #if CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) if (!(old_prot & VM_PROT_EXECUTE) && @@ -5853,7 +5914,7 @@ vm_map_protect( if (pmap_has_prot_policy(prot)) { if (current->wired_count) { panic("vm_map_protect(%p,0x%llx,0x%llx) new=0x%x wired=%x\n", - map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, prot, current->wired_count); + map, (uint64_t)current->vme_start, (uint64_t)current->vme_end, prot, current->wired_count); } /* If the pmap layer cares about this @@ -5874,9 +5935,9 @@ vm_map_protect( if (current->is_sub_map && current->use_pmap) { pmap_protect(VME_SUBMAP(current)->pmap, - current->vme_start, - current->vme_end, - prot); + current->vme_start, + current->vme_end, + prot); } else { if (prot & VM_PROT_WRITE) { if (VME_OBJECT(current) == compressor_object) { @@ -5893,11 +5954,11 @@ vm_map_protect( } pmap_protect_options(map->pmap, - current->vme_start, - current->vme_end, - prot, - pmap_options, - NULL); + current->vme_start, + current->vme_end, + prot, + pmap_options, + NULL); } } current = current->vme_next; @@ -5905,13 +5966,13 @@ vm_map_protect( current = entry; while ((current != vm_map_to_entry(map)) && - (current->vme_start <= end)) { + (current->vme_start <= end)) { vm_map_simplify_entry(map, current); current = current->vme_next; } vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -5924,13 +5985,13 @@ vm_map_protect( */ kern_return_t vm_map_inherit( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_inherit_t new_inheritance) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_inherit_t new_inheritance) { - vm_map_entry_t entry; - vm_map_entry_t temp_entry; + vm_map_entry_t entry; + vm_map_entry_t temp_entry; vm_map_lock(map); @@ -5938,8 +5999,7 @@ vm_map_inherit( if (vm_map_lookup_entry(map, start, &temp_entry)) { entry = temp_entry; - } - else { + } else { temp_entry = temp_entry->vme_next; entry = temp_entry; } @@ -5947,10 +6007,10 @@ vm_map_inherit( /* first check entire range for submaps which can't support the */ /* given inheritance. */ while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { - if(entry->is_sub_map) { - if(new_inheritance == VM_INHERIT_COPY) { + if (entry->is_sub_map) { + if (new_inheritance == VM_INHERIT_COPY) { vm_map_unlock(map); - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } @@ -5976,7 +6036,7 @@ vm_map_inherit( } vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -5986,11 +6046,11 @@ vm_map_inherit( static kern_return_t add_wire_counts( - vm_map_t map, - vm_map_entry_t entry, - boolean_t user_wire) + vm_map_t map, + vm_map_entry_t entry, + boolean_t user_wire) { - vm_map_size_t size; + vm_map_size_t size; if (user_wire) { unsigned int total_wire_count = vm_page_wire_count + vm_lopage_free_count; @@ -6011,36 +6071,38 @@ add_wire_counts( * limit, then we fail. */ - if(size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) || - size + ptoa_64(total_wire_count) > vm_global_user_wire_limit || - size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) + if (size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) || + size + ptoa_64(total_wire_count) > vm_global_user_wire_limit || + size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) { return KERN_RESOURCE_SHORTAGE; + } /* * The first time the user wires an entry, we also increment the wired_count and add this to * the total that has been wired in the map. */ - if (entry->wired_count >= MAX_WIRE_COUNT) + if (entry->wired_count >= MAX_WIRE_COUNT) { return KERN_FAILURE; + } entry->wired_count++; map->user_wire_size += size; } - if (entry->user_wired_count >= MAX_WIRE_COUNT) + if (entry->user_wired_count >= MAX_WIRE_COUNT) { return KERN_FAILURE; + } entry->user_wired_count++; - } else { - /* * The kernel's wiring the memory. Just bump the count and continue. */ - if (entry->wired_count >= MAX_WIRE_COUNT) + if (entry->wired_count >= MAX_WIRE_COUNT) { panic("vm_map_wire: too many wirings"); + } entry->wired_count++; } @@ -6054,19 +6116,16 @@ add_wire_counts( static void subtract_wire_counts( - vm_map_t map, - vm_map_entry_t entry, - boolean_t user_wire) + vm_map_t map, + vm_map_entry_t entry, + boolean_t user_wire) { - if (user_wire) { - /* * We're unwiring memory at the request of the user. See if we're removing the last user wire reference. */ if (entry->user_wired_count == 1) { - /* * We're removing the last user wire reference. Decrement the wired_count and the total * user wired memory for this map. @@ -6079,9 +6138,7 @@ subtract_wire_counts( assert(entry->user_wired_count >= 1); entry->user_wired_count--; - } else { - /* * The kernel is unwiring the memory. Just update the count. */ @@ -6108,29 +6165,29 @@ int cs_executable_wire = 0; */ static kern_return_t vm_map_wire_nested( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t caller_prot, - vm_tag_t tag, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr, - ppnum_t *physpage_p) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr, + ppnum_t *physpage_p) { - vm_map_entry_t entry; - vm_prot_t access_type; - struct vm_map_entry *first_entry, tmp_entry; - vm_map_t real_map; - vm_map_offset_t s,e; - kern_return_t rc; - boolean_t need_wakeup; - boolean_t main_map = FALSE; - wait_interrupt_t interruptible_state; - thread_t cur_thread; - unsigned int last_timestamp; - vm_map_size_t size; - boolean_t wire_and_extract; + vm_map_entry_t entry; + vm_prot_t access_type; + struct vm_map_entry *first_entry, tmp_entry; + vm_map_t real_map; + vm_map_offset_t s, e; + kern_return_t rc; + boolean_t need_wakeup; + boolean_t main_map = FALSE; + wait_interrupt_t interruptible_state; + thread_t cur_thread; + unsigned int last_timestamp; + vm_map_size_t size; + boolean_t wire_and_extract; access_type = (caller_prot & VM_PROT_ALL); @@ -6149,8 +6206,9 @@ vm_map_wire_nested( } vm_map_lock(map); - if(map_pmap == NULL) + if (map_pmap == NULL) { main_map = TRUE; + } last_timestamp = map->timestamp; VM_MAP_RANGE_CHECK(map, start, end); @@ -6193,8 +6251,9 @@ vm_map_wire_nested( /* "e" is how far we want to wire in this entry */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } /* * If another thread is wiring/unwiring this entry then @@ -6226,9 +6285,9 @@ vm_map_wire_nested( * User wiring is interruptible */ wait_result = vm_map_entry_wait(map, - (user_wire) ? THREAD_ABORTSAFE : - THREAD_UNINT); - if (user_wire && wait_result == THREAD_INTERRUPTED) { + (user_wire) ? THREAD_ABORTSAFE : + THREAD_UNINT); + if (user_wire && wait_result == THREAD_INTERRUPTED) { /* * undo the wirings we have done so far * We do not clear the needs_wakeup flag, @@ -6262,11 +6321,11 @@ vm_map_wire_nested( } if (entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_start; - vm_map_offset_t local_end; - pmap_t pmap; + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_start; + vm_map_offset_t local_end; + pmap_t pmap; if (wire_and_extract) { /* @@ -6287,16 +6346,16 @@ vm_map_wire_nested( sub_end += VME_OFFSET(entry) - entry->vme_start; local_end = entry->vme_end; - if(map_pmap == NULL) { - vm_object_t object; - vm_object_offset_t offset; - vm_prot_t prot; - boolean_t wired; - vm_map_entry_t local_entry; - vm_map_version_t version; - vm_map_t lookup_map; - - if(entry->use_pmap) { + if (map_pmap == NULL) { + vm_object_t object; + vm_object_offset_t offset; + vm_prot_t prot; + boolean_t wired; + vm_map_entry_t local_entry; + vm_map_version_t version; + vm_map_t lookup_map; + + if (entry->use_pmap) { pmap = VME_SUBMAP(entry)->pmap; /* ppc implementation requires that */ /* submaps pmap address ranges line */ @@ -6311,8 +6370,9 @@ vm_map_wire_nested( } if (entry->wired_count) { - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } /* * The map was not unlocked: @@ -6322,7 +6382,6 @@ vm_map_wire_nested( entry = entry->vme_next; s = entry->vme_start; continue; - } /* call vm_map_lookup_locked to */ @@ -6331,31 +6390,31 @@ vm_map_wire_nested( local_start = entry->vme_start; lookup_map = map; vm_map_lock_write_to_read(map); - if(vm_map_lookup_locked( - &lookup_map, local_start, - access_type | VM_PROT_COPY, - OBJECT_LOCK_EXCLUSIVE, - &version, &object, - &offset, &prot, &wired, - NULL, - &real_map)) { - + if (vm_map_lookup_locked( + &lookup_map, local_start, + access_type | VM_PROT_COPY, + OBJECT_LOCK_EXCLUSIVE, + &version, &object, + &offset, &prot, &wired, + NULL, + &real_map)) { vm_map_unlock_read(lookup_map); assert(map_pmap == NULL); vm_map_unwire(map, start, - s, user_wire); - return(KERN_FAILURE); + s, user_wire); + return KERN_FAILURE; } vm_object_unlock(object); - if(real_map != lookup_map) + if (real_map != lookup_map) { vm_map_unlock(real_map); + } vm_map_unlock_read(lookup_map); vm_map_lock(map); /* we unlocked, so must re-lookup */ if (!vm_map_lookup_entry(map, - local_start, - &local_entry)) { + local_start, + &local_entry)) { rc = KERN_FAILURE; goto done; } @@ -6370,8 +6429,9 @@ vm_map_wire_nested( vm_map_clip_end(map, entry, end); /* re-compute "e" */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } /* did we have a change of type? */ if (!entry->is_sub_map) { @@ -6383,17 +6443,18 @@ vm_map_wire_nested( pmap = map_pmap; } - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } entry->in_transition = TRUE; vm_map_unlock(map); rc = vm_map_wire_nested(VME_SUBMAP(entry), - sub_start, sub_end, - caller_prot, tag, - user_wire, pmap, pmap_addr, - NULL); + sub_start, sub_end, + caller_prot, tag, + user_wire, pmap, pmap_addr, + NULL); vm_map_lock(map); /* @@ -6401,19 +6462,21 @@ vm_map_wire_nested( * after we unlocked the map. */ if (!vm_map_lookup_entry(map, local_start, - &first_entry)) + &first_entry)) { panic("vm_map_wire: re-lookup failed"); + } entry = first_entry; assert(local_start == s); /* re-compute "e" */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } last_timestamp = map->timestamp; while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < e)) { + (entry->vme_start < e)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -6425,7 +6488,7 @@ vm_map_wire_nested( } entry = entry->vme_next; } - if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ goto done; } @@ -6439,7 +6502,6 @@ vm_map_wire_nested( * the appropriate wire reference count. */ if (entry->wired_count) { - if ((entry->protection & access_type) != access_type) { /* found a protection problem */ @@ -6464,13 +6526,14 @@ vm_map_wire_nested( vm_map_clip_start(map, entry, s); vm_map_clip_end(map, entry, end); - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } if (wire_and_extract) { - vm_object_t object; - vm_object_offset_t offset; - vm_page_t m; + vm_object_t object; + vm_object_offset_t offset; + vm_page_t m; /* * We don't have to "wire" the page again @@ -6479,12 +6542,12 @@ vm_map_wire_nested( * checks. */ assert((entry->vme_end - entry->vme_start) - == PAGE_SIZE); + == PAGE_SIZE); assert(!entry->needs_copy); assert(!entry->is_sub_map); assert(VME_OBJECT(entry)); if (((entry->vme_end - entry->vme_start) - != PAGE_SIZE) || + != PAGE_SIZE) || entry->needs_copy || entry->is_sub_map || VME_OBJECT(entry) == VM_OBJECT_NULL) { @@ -6540,21 +6603,21 @@ vm_map_wire_nested( map != kernel_map && cs_process_enforcement(NULL) #endif /* !CONFIG_EMBEDDED */ - ) { + ) { #if MACH_ASSERT printf("pid %d[%s] wiring executable range from " - "0x%llx to 0x%llx: rejected to preserve " - "code-signing\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - (uint64_t) entry->vme_start, - (uint64_t) entry->vme_end); + "0x%llx to 0x%llx: rejected to preserve " + "code-signing\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end); #endif /* MACH_ASSERT */ DTRACE_VM2(cs_executable_wire, - uint64_t, (uint64_t)entry->vme_start, - uint64_t, (uint64_t)entry->vme_end); + uint64_t, (uint64_t)entry->vme_start, + uint64_t, (uint64_t)entry->vme_end); cs_executable_wire++; rc = KERN_PROTECTION_FAILURE; goto done; @@ -6603,8 +6666,9 @@ vm_map_wire_nested( /* re-compute "e" */ e = entry->vme_end; - if (e > end) + if (e > end) { e = end; + } /* * Check for holes and protection mismatch. @@ -6615,7 +6679,7 @@ vm_map_wire_nested( */ if ((entry->vme_end < end) && ((entry->vme_next == vm_map_to_entry(map)) || - (entry->vme_next->vme_start > entry->vme_end))) { + (entry->vme_next->vme_start > entry->vme_end))) { /* found a hole */ rc = KERN_INVALID_ADDRESS; goto done; @@ -6628,8 +6692,9 @@ vm_map_wire_nested( assert(entry->wired_count == 0 && entry->user_wired_count == 0); - if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) + if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) { goto done; + } entry->in_transition = TRUE; @@ -6651,34 +6716,38 @@ vm_map_wire_nested( */ vm_map_unlock(map); - if (!user_wire && cur_thread != THREAD_NULL) + if (!user_wire && cur_thread != THREAD_NULL) { interruptible_state = thread_interrupt_level(THREAD_UNINT); - else + } else { interruptible_state = THREAD_UNINT; + } - if(map_pmap) + if (map_pmap) { rc = vm_fault_wire(map, - &tmp_entry, caller_prot, tag, map_pmap, pmap_addr, - physpage_p); - else + &tmp_entry, caller_prot, tag, map_pmap, pmap_addr, + physpage_p); + } else { rc = vm_fault_wire(map, - &tmp_entry, caller_prot, tag, map->pmap, - tmp_entry.vme_start, - physpage_p); + &tmp_entry, caller_prot, tag, map->pmap, + tmp_entry.vme_start, + physpage_p); + } - if (!user_wire && cur_thread != THREAD_NULL) + if (!user_wire && cur_thread != THREAD_NULL) { thread_interrupt_level(interruptible_state); + } vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have been clipped * after we unlocked the map. */ if (!vm_map_lookup_entry(map, tmp_entry.vme_start, - &first_entry)) + &first_entry)) { panic("vm_map_wire: re-lookup failed"); + } entry = first_entry; } @@ -6686,20 +6755,20 @@ vm_map_wire_nested( last_timestamp = map->timestamp; while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { entry->needs_wakeup = FALSE; need_wakeup = TRUE; } - if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ subtract_wire_counts(map, entry, user_wire); } entry = entry->vme_next; } - if (rc != KERN_SUCCESS) { /* from vm_*_wire */ + if (rc != KERN_SUCCESS) { /* from vm_*_wire */ goto done; } @@ -6713,7 +6782,6 @@ vm_map_wire_nested( } s = entry->vme_start; - } /* end while loop through map entries */ done: @@ -6727,72 +6795,72 @@ done: /* * wake up anybody waiting on entries we wired. */ - if (need_wakeup) + if (need_wakeup) { vm_map_entry_wakeup(map); + } if (rc != KERN_SUCCESS) { /* undo what has been wired so far */ vm_map_unwire_nested(map, start, s, user_wire, - map_pmap, pmap_addr); + map_pmap, pmap_addr); if (physpage_p) { *physpage_p = 0; } } return rc; - } kern_return_t vm_map_wire_external( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t caller_prot, - boolean_t user_wire) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + boolean_t user_wire) { - kern_return_t kret; + kern_return_t kret; kret = vm_map_wire_nested(map, start, end, caller_prot, vm_tag_bt(), - user_wire, (pmap_t)NULL, 0, NULL); + user_wire, (pmap_t)NULL, 0, NULL); return kret; } kern_return_t vm_map_wire_kernel( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t caller_prot, - vm_tag_t tag, - boolean_t user_wire) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire) { - kern_return_t kret; + kern_return_t kret; kret = vm_map_wire_nested(map, start, end, caller_prot, tag, - user_wire, (pmap_t)NULL, 0, NULL); + user_wire, (pmap_t)NULL, 0, NULL); return kret; } kern_return_t vm_map_wire_and_extract_external( - vm_map_t map, - vm_map_offset_t start, - vm_prot_t caller_prot, - boolean_t user_wire, - ppnum_t *physpage_p) + vm_map_t map, + vm_map_offset_t start, + vm_prot_t caller_prot, + boolean_t user_wire, + ppnum_t *physpage_p) { - kern_return_t kret; + kern_return_t kret; kret = vm_map_wire_nested(map, - start, - start+VM_MAP_PAGE_SIZE(map), - caller_prot, - vm_tag_bt(), - user_wire, - (pmap_t)NULL, - 0, - physpage_p); + start, + start + VM_MAP_PAGE_SIZE(map), + caller_prot, + vm_tag_bt(), + user_wire, + (pmap_t)NULL, + 0, + physpage_p); if (kret != KERN_SUCCESS && physpage_p != NULL) { *physpage_p = 0; @@ -6802,24 +6870,24 @@ vm_map_wire_and_extract_external( kern_return_t vm_map_wire_and_extract_kernel( - vm_map_t map, - vm_map_offset_t start, - vm_prot_t caller_prot, - vm_tag_t tag, - boolean_t user_wire, - ppnum_t *physpage_p) + vm_map_t map, + vm_map_offset_t start, + vm_prot_t caller_prot, + vm_tag_t tag, + boolean_t user_wire, + ppnum_t *physpage_p) { - kern_return_t kret; + kern_return_t kret; kret = vm_map_wire_nested(map, - start, - start+VM_MAP_PAGE_SIZE(map), - caller_prot, - tag, - user_wire, - (pmap_t)NULL, - 0, - physpage_p); + start, + start + VM_MAP_PAGE_SIZE(map), + caller_prot, + tag, + user_wire, + (pmap_t)NULL, + 0, + physpage_p); if (kret != KERN_SUCCESS && physpage_p != NULL) { *physpage_p = 0; @@ -6842,22 +6910,23 @@ vm_map_wire_and_extract_kernel( */ static kern_return_t vm_map_unwire_nested( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t user_wire, - pmap_t map_pmap, - vm_map_offset_t pmap_addr) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire, + pmap_t map_pmap, + vm_map_offset_t pmap_addr) { - vm_map_entry_t entry; - struct vm_map_entry *first_entry, tmp_entry; - boolean_t need_wakeup; - boolean_t main_map = FALSE; - unsigned int last_timestamp; + vm_map_entry_t entry; + struct vm_map_entry *first_entry, tmp_entry; + boolean_t need_wakeup; + boolean_t main_map = FALSE; + unsigned int last_timestamp; vm_map_lock(map); - if(map_pmap == NULL) + if (map_pmap == NULL) { main_map = TRUE; + } last_timestamp = map->timestamp; VM_MAP_RANGE_CHECK(map, start, end); @@ -6878,14 +6947,13 @@ vm_map_unwire_nested( * vm_map_clip_start will be done later. * We don't want to unnest any nested sub maps here ! */ - } - else { + } else { if (!user_wire) { panic("vm_map_unwire: start not found"); } /* Start address is not in map. */ vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } if (entry->superpage_size) { @@ -6940,10 +7008,10 @@ vm_map_unwire_nested( } if (entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; - pmap_t pmap; + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; + pmap_t pmap; vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); @@ -6952,8 +7020,8 @@ vm_map_unwire_nested( sub_end = entry->vme_end - entry->vme_start; sub_end += VME_OFFSET(entry); local_end = entry->vme_end; - if(map_pmap == NULL) { - if(entry->use_pmap) { + if (map_pmap == NULL) { + if (entry->use_pmap) { pmap = VME_SUBMAP(entry)->pmap; pmap_addr = sub_start; } else { @@ -6962,8 +7030,9 @@ vm_map_unwire_nested( } if (entry->wired_count == 0 || (user_wire && entry->user_wired_count == 0)) { - if (!user_wire) + if (!user_wire) { panic("vm_map_unwire: entry is unwired"); + } entry = entry->vme_next; continue; } @@ -6974,15 +7043,16 @@ vm_map_unwire_nested( * this is the end of the region. */ if (((entry->vme_end < end) && - ((entry->vme_next == vm_map_to_entry(map)) || - (entry->vme_next->vme_start - > entry->vme_end)))) { - if (!user_wire) + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start + > entry->vme_end)))) { + if (!user_wire) { panic("vm_map_unwire: non-contiguous region"); + } /* - entry = entry->vme_next; - continue; -*/ + * entry = entry->vme_next; + * continue; + */ } subtract_wire_counts(map, entry, user_wire); @@ -7001,22 +7071,24 @@ vm_map_unwire_nested( */ vm_map_unlock(map); vm_map_unwire_nested(VME_SUBMAP(entry), - sub_start, sub_end, user_wire, pmap, pmap_addr); + sub_start, sub_end, user_wire, pmap, pmap_addr); vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have been * clipped or deleted after we unlocked the map. */ if (!vm_map_lookup_entry(map, - tmp_entry.vme_start, - &first_entry)) { - if (!user_wire) + tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) { panic("vm_map_unwire: re-lookup failed"); + } entry = first_entry->vme_next; - } else + } else { entry = first_entry; + } } last_timestamp = map->timestamp; @@ -7026,7 +7098,7 @@ vm_map_unwire_nested( * tmp_entry). Also check for waiters. */ while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -7039,23 +7111,25 @@ vm_map_unwire_nested( } else { vm_map_unlock(map); vm_map_unwire_nested(VME_SUBMAP(entry), - sub_start, sub_end, user_wire, map_pmap, - pmap_addr); + sub_start, sub_end, user_wire, map_pmap, + pmap_addr); vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have been * clipped or deleted after we unlocked the map. */ if (!vm_map_lookup_entry(map, - tmp_entry.vme_start, - &first_entry)) { - if (!user_wire) + tmp_entry.vme_start, + &first_entry)) { + if (!user_wire) { panic("vm_map_unwire: re-lookup failed"); + } entry = first_entry->vme_next; - } else + } else { entry = first_entry; + } } last_timestamp = map->timestamp; } @@ -7064,15 +7138,16 @@ vm_map_unwire_nested( if ((entry->wired_count == 0) || (user_wire && entry->user_wired_count == 0)) { - if (!user_wire) + if (!user_wire) { panic("vm_map_unwire: entry is unwired"); + } entry = entry->vme_next; continue; } assert(entry->wired_count > 0 && - (!user_wire || entry->user_wired_count > 0)); + (!user_wire || entry->user_wired_count > 0)); vm_map_clip_start(map, entry, start); vm_map_clip_end(map, entry, end); @@ -7083,11 +7158,11 @@ vm_map_unwire_nested( * this is the end of the region. */ if (((entry->vme_end < end) && - ((entry->vme_next == vm_map_to_entry(map)) || - (entry->vme_next->vme_start > entry->vme_end)))) { - - if (!user_wire) + ((entry->vme_next == vm_map_to_entry(map)) || + (entry->vme_next->vme_start > entry->vme_end)))) { + if (!user_wire) { panic("vm_map_unwire: non-contiguous region"); + } entry = entry->vme_next; continue; } @@ -7099,40 +7174,42 @@ vm_map_unwire_nested( continue; } - if(entry->zero_wired_pages) { + if (entry->zero_wired_pages) { entry->zero_wired_pages = FALSE; } entry->in_transition = TRUE; - tmp_entry = *entry; /* see comment in vm_map_wire() */ + tmp_entry = *entry; /* see comment in vm_map_wire() */ /* * We can unlock the map now. The in_transition state * guarantees existance of the entry. */ vm_map_unlock(map); - if(map_pmap) { + if (map_pmap) { vm_fault_unwire(map, - &tmp_entry, FALSE, map_pmap, pmap_addr); + &tmp_entry, FALSE, map_pmap, pmap_addr); } else { vm_fault_unwire(map, - &tmp_entry, FALSE, map->pmap, - tmp_entry.vme_start); + &tmp_entry, FALSE, map->pmap, + tmp_entry.vme_start); } vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have been clipped * or deleted after we unlocked the map. */ if (!vm_map_lookup_entry(map, tmp_entry.vme_start, - &first_entry)) { - if (!user_wire) + &first_entry)) { + if (!user_wire) { panic("vm_map_unwire: re-lookup failed"); + } entry = first_entry->vme_next; - } else + } else { entry = first_entry; + } } last_timestamp = map->timestamp; @@ -7142,7 +7219,7 @@ vm_map_unwire_nested( * check for waiters. */ while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -7167,21 +7244,21 @@ vm_map_unwire_nested( /* * wake up anybody waiting on entries that we have unwired. */ - if (need_wakeup) + if (need_wakeup) { vm_map_entry_wakeup(map); - return(KERN_SUCCESS); - + } + return KERN_SUCCESS; } kern_return_t vm_map_unwire( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t user_wire) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire) { return vm_map_unwire_nested(map, start, end, - user_wire, (pmap_t)NULL, 0); + user_wire, (pmap_t)NULL, 0); } @@ -7192,12 +7269,12 @@ vm_map_unwire( */ static void vm_map_entry_delete( - vm_map_t map, - vm_map_entry_t entry) + vm_map_t map, + vm_map_entry_t entry) { - vm_map_offset_t s, e; - vm_object_t object; - vm_map_t submap; + vm_map_offset_t s, e; + vm_object_t object; + vm_map_t submap; s = entry->vme_start; e = entry->vme_end; @@ -7229,42 +7306,42 @@ vm_map_entry_delete( * Deallocate the object only after removing all * pmap entries pointing to its pages. */ - if (submap) + if (submap) { vm_map_deallocate(submap); - else + } else { vm_object_deallocate(object); - + } } void vm_map_submap_pmap_clean( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_t sub_map, - vm_map_offset_t offset) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t sub_map, + vm_map_offset_t offset) { - vm_map_offset_t submap_start; - vm_map_offset_t submap_end; - vm_map_size_t remove_size; - vm_map_entry_t entry; + vm_map_offset_t submap_start; + vm_map_offset_t submap_end; + vm_map_size_t remove_size; + vm_map_entry_t entry; submap_end = offset + (end - start); submap_start = offset; vm_map_lock_read(sub_map); - if(vm_map_lookup_entry(sub_map, offset, &entry)) { - + if (vm_map_lookup_entry(sub_map, offset, &entry)) { remove_size = (entry->vme_end - entry->vme_start); - if(offset > entry->vme_start) + if (offset > entry->vme_start) { remove_size -= offset - entry->vme_start; + } - if(submap_end < entry->vme_end) { + if (submap_end < entry->vme_end) { remove_size -= - entry->vme_end - submap_end; + entry->vme_end - submap_end; } - if(entry->is_sub_map) { + if (entry->is_sub_map) { vm_map_submap_pmap_clean( sub_map, start, @@ -7272,14 +7349,13 @@ vm_map_submap_pmap_clean( VME_SUBMAP(entry), VME_OFFSET(entry)); } else { - - if((map->mapped_in_other_pmaps) && (map->map_refcnt) - && (VME_OBJECT(entry) != NULL)) { + if ((map->mapped_in_other_pmaps) && (map->map_refcnt) + && (VME_OBJECT(entry) != NULL)) { vm_object_pmap_protect_options( VME_OBJECT(entry), (VME_OFFSET(entry) + - offset - - entry->vme_start), + offset - + entry->vme_start), remove_size, PMAP_NULL, entry->vme_start, @@ -7287,21 +7363,21 @@ vm_map_submap_pmap_clean( PMAP_OPTIONS_REMOVE); } else { pmap_remove(map->pmap, - (addr64_t)start, - (addr64_t)(start + remove_size)); + (addr64_t)start, + (addr64_t)(start + remove_size)); } } } entry = entry->vme_next; - while((entry != vm_map_to_entry(sub_map)) - && (entry->vme_start < submap_end)) { + while ((entry != vm_map_to_entry(sub_map)) + && (entry->vme_start < submap_end)) { remove_size = (entry->vme_end - entry->vme_start); - if(submap_end < entry->vme_end) { + if (submap_end < entry->vme_end) { remove_size -= entry->vme_end - submap_end; } - if(entry->is_sub_map) { + if (entry->is_sub_map) { vm_map_submap_pmap_clean( sub_map, (start + entry->vme_start) - offset, @@ -7309,8 +7385,8 @@ vm_map_submap_pmap_clean( VME_SUBMAP(entry), VME_OFFSET(entry)); } else { - if((map->mapped_in_other_pmaps) && (map->map_refcnt) - && (VME_OBJECT(entry) != NULL)) { + if ((map->mapped_in_other_pmaps) && (map->map_refcnt) + && (VME_OBJECT(entry) != NULL)) { vm_object_pmap_protect_options( VME_OBJECT(entry), VME_OFFSET(entry), @@ -7321,10 +7397,10 @@ vm_map_submap_pmap_clean( PMAP_OPTIONS_REMOVE); } else { pmap_remove(map->pmap, - (addr64_t)((start + entry->vme_start) - - offset), - (addr64_t)(((start + entry->vme_start) - - offset) + remove_size)); + (addr64_t)((start + entry->vme_start) + - offset), + (addr64_t)(((start + entry->vme_start) + - offset) + remove_size)); } } entry = entry->vme_next; @@ -7405,8 +7481,9 @@ vm_map_guard_exception( mach_exception_data_type_t subcode = (uint64_t)gap_start; /* Can't deliver exceptions to kernel task */ - if (current_task() == kernel_task) + if (current_task() == kernel_task) { return; + } EXC_GUARD_ENCODE_TYPE(code, guard_type); EXC_GUARD_ENCODE_FLAVOR(code, reason); @@ -7427,32 +7504,33 @@ vm_map_guard_exception( */ static kern_return_t vm_map_delete( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - int flags, - vm_map_t zap_map) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + int flags, + vm_map_t zap_map) { - vm_map_entry_t entry, next; - struct vm_map_entry *first_entry, tmp_entry; - vm_map_offset_t s; - vm_object_t object; - boolean_t need_wakeup; - unsigned int last_timestamp = ~0; /* unlikely value */ - int interruptible; - vm_map_offset_t gap_start; - vm_map_offset_t save_start = start; - vm_map_offset_t save_end = end; - const vm_map_offset_t FIND_GAP = 1; /* a not page aligned value */ - const vm_map_offset_t GAPS_OK = 2; /* a different not page aligned value */ - - if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK)) + vm_map_entry_t entry, next; + struct vm_map_entry *first_entry, tmp_entry; + vm_map_offset_t s; + vm_object_t object; + boolean_t need_wakeup; + unsigned int last_timestamp = ~0; /* unlikely value */ + int interruptible; + vm_map_offset_t gap_start; + vm_map_offset_t save_start = start; + vm_map_offset_t save_end = end; + const vm_map_offset_t FIND_GAP = 1; /* a not page aligned value */ + const vm_map_offset_t GAPS_OK = 2; /* a different not page aligned value */ + + if (map != kernel_map && !(flags & VM_MAP_REMOVE_GAPS_OK)) { gap_start = FIND_GAP; - else + } else { gap_start = GAPS_OK; + } interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ? - THREAD_ABORTSAFE : THREAD_UNINT; + THREAD_ABORTSAFE : THREAD_UNINT; /* * All our DMA I/O operations in IOKit are currently done by @@ -7467,7 +7545,7 @@ vm_map_delete( */ flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE; - while(1) { + while (1) { /* * Find the start of the region, and clip it */ @@ -7475,15 +7553,15 @@ vm_map_delete( entry = first_entry; if (map == kalloc_map && (entry->vme_start != start || - entry->vme_end != end)) { + entry->vme_end != end)) { panic("vm_map_delete(%p,0x%llx,0x%llx): " - "mismatched entry %p [0x%llx:0x%llx]\n", - map, - (uint64_t)start, - (uint64_t)end, - entry, - (uint64_t)entry->vme_start, - (uint64_t)entry->vme_end); + "mismatched entry %p [0x%llx:0x%llx]\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)entry->vme_start, + (uint64_t)entry->vme_end); } /* @@ -7514,12 +7592,12 @@ vm_map_delete( } if (map == kalloc_map) { panic("vm_map_delete(%p,0x%llx,0x%llx):" - " clipping %p at 0x%llx\n", - map, - (uint64_t)start, - (uint64_t)end, - entry, - (uint64_t)start); + " clipping %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)start); } vm_map_clip_start(map, entry, start); } @@ -7529,26 +7607,26 @@ vm_map_delete( * time through the loop. */ SAVE_HINT_MAP_WRITE(map, entry->vme_prev); - } else { - if (map->pmap == kernel_pmap && map->map_refcnt != 0) { panic("vm_map_delete(%p,0x%llx,0x%llx): " - "no map entry at 0x%llx\n", - map, - (uint64_t)start, - (uint64_t)end, - (uint64_t)start); + "no map entry at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + (uint64_t)start); } entry = first_entry->vme_next; - if (gap_start == FIND_GAP) + if (gap_start == FIND_GAP) { gap_start = start; + } } break; } - if (entry->superpage_size) + if (entry->superpage_size) { end = SUPERPAGE_ROUND_UP(end); + } need_wakeup = FALSE; /* @@ -7582,7 +7660,7 @@ vm_map_delete( if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && entry->map_aligned && !VM_MAP_PAGE_ALIGNED(s, - VM_MAP_PAGE_MASK(map))) { + VM_MAP_PAGE_MASK(map))) { /* * The entry will no longer be map-aligned * after clipping and the caller said it's OK. @@ -7591,12 +7669,12 @@ vm_map_delete( } if (map == kalloc_map) { panic("vm_map_delete(%p,0x%llx,0x%llx): " - "clipping %p at 0x%llx\n", - map, - (uint64_t)start, - (uint64_t)end, - entry, - (uint64_t)s); + "clipping %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)s); } vm_map_clip_start(map, entry, s); } @@ -7609,7 +7687,7 @@ vm_map_delete( if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && entry->map_aligned && !VM_MAP_PAGE_ALIGNED(end, - VM_MAP_PAGE_MASK(map))) { + VM_MAP_PAGE_MASK(map))) { /* * The entry will no longer be map-aligned * after clipping and the caller said it's OK. @@ -7618,12 +7696,12 @@ vm_map_delete( } if (map == kalloc_map) { panic("vm_map_delete(%p,0x%llx,0x%llx): " - "clipping %p at 0x%llx\n", - map, - (uint64_t)start, - (uint64_t)end, - entry, - (uint64_t)end); + "clipping %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)end); } vm_map_clip_end(map, entry, end); } @@ -7631,16 +7709,16 @@ vm_map_delete( if (entry->permanent) { if (map->pmap == kernel_pmap) { panic("%s(%p,0x%llx,0x%llx): " - "attempt to remove permanent " - "VM map entry " - "%p [0x%llx:0x%llx]\n", - __FUNCTION__, - map, - (uint64_t) start, - (uint64_t) end, - entry, - (uint64_t) entry->vme_start, - (uint64_t) entry->vme_end); + "attempt to remove permanent " + "VM map entry " + "%p [0x%llx:0x%llx]\n", + __FUNCTION__, + map, + (uint64_t) start, + (uint64_t) end, + entry, + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end); } else if (flags & VM_MAP_REMOVE_IMMUTABLE) { // printf("FBDP %d[%s] removing permanent entry %p [0x%llx:0x%llx] prot 0x%x/0x%x\n", proc_selfpid(), (current_task()->bsd_info ? proc_name_address(current_task()->bsd_info) : "?"), entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, entry->protection, entry->max_protection); entry->permanent = FALSE; @@ -7649,46 +7727,46 @@ vm_map_delete( entry->permanent = FALSE; printf("%d[%s] %s(0x%llx,0x%llx): " - "pmap_cs disabled, allowing for permanent executable entry [0x%llx:0x%llx] " - "prot 0x%x/0x%x\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__, - (uint64_t) start, - (uint64_t) end, - (uint64_t)entry->vme_start, - (uint64_t)entry->vme_end, - entry->protection, - entry->max_protection); + "pmap_cs disabled, allowing for permanent executable entry [0x%llx:0x%llx] " + "prot 0x%x/0x%x\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__, + (uint64_t) start, + (uint64_t) end, + (uint64_t)entry->vme_start, + (uint64_t)entry->vme_end, + entry->protection, + entry->max_protection); #endif } else { if (vm_map_executable_immutable_verbose) { printf("%d[%s] %s(0x%llx,0x%llx): " - "permanent entry [0x%llx:0x%llx] " - "prot 0x%x/0x%x\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__, - (uint64_t) start, - (uint64_t) end, - (uint64_t)entry->vme_start, - (uint64_t)entry->vme_end, - entry->protection, - entry->max_protection); + "permanent entry [0x%llx:0x%llx] " + "prot 0x%x/0x%x\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__, + (uint64_t) start, + (uint64_t) end, + (uint64_t)entry->vme_start, + (uint64_t)entry->vme_end, + entry->protection, + entry->max_protection); } /* * dtrace -n 'vm_map_delete_permanent { print("start=0x%llx end=0x%llx prot=0x%x/0x%x\n", arg0, arg1, arg2, arg3); stack(); ustack(); }' */ DTRACE_VM5(vm_map_delete_permanent, - vm_map_offset_t, entry->vme_start, - vm_map_offset_t, entry->vme_end, - vm_prot_t, entry->protection, - vm_prot_t, entry->max_protection, - int, VME_ALIAS(entry)); + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + vm_prot_t, entry->protection, + vm_prot_t, entry->max_protection, + int, VME_ALIAS(entry)); } } @@ -7731,8 +7809,9 @@ vm_map_delete( /* * User: use the next entry */ - if (gap_start == FIND_GAP) + if (gap_start == FIND_GAP) { gap_start = s; + } entry = first_entry->vme_next; s = entry->vme_start; } else { @@ -7744,12 +7823,12 @@ vm_map_delete( } /* end in_transition */ if (entry->wired_count) { - boolean_t user_wire; + boolean_t user_wire; user_wire = entry->user_wired_count > 0; /* - * Remove a kernel wiring if requested + * Remove a kernel wiring if requested */ if (flags & VM_MAP_REMOVE_KUNWIRE) { entry->wired_count--; @@ -7759,8 +7838,9 @@ vm_map_delete( * Remove all user wirings for proper accounting */ if (entry->user_wired_count > 0) { - while (entry->user_wired_count) + while (entry->user_wired_count) { subtract_wire_counts(map, entry, user_wire); + } } if (entry->wired_count != 0) { @@ -7778,7 +7858,7 @@ vm_map_delete( assert(s == entry->vme_start); entry->needs_wakeup = TRUE; wait_result = vm_map_entry_wait(map, - interruptible); + interruptible); if (interruptible && wait_result == THREAD_INTERRUPTED) { @@ -7797,13 +7877,14 @@ vm_map_delete( * up again. */ if (!vm_map_lookup_entry(map, s, - &first_entry)) { + &first_entry)) { assert(map != kernel_map); /* * User: use the next entry */ - if (gap_start == FIND_GAP) + if (gap_start == FIND_GAP) { gap_start = s; + } entry = first_entry->vme_next; s = entry->vme_start; } else { @@ -7812,8 +7893,7 @@ vm_map_delete( } last_timestamp = map->timestamp; continue; - } - else { + } else { return KERN_FAILURE; } } @@ -7841,7 +7921,7 @@ vm_map_delete( sub_map = VME_SUBMAP(&tmp_entry); sub_start = VME_OFFSET(&tmp_entry); sub_end = sub_start + (tmp_entry.vme_end - - tmp_entry.vme_start); + tmp_entry.vme_start); if (tmp_entry.use_pmap) { pmap = sub_map->pmap; pmap_addr = tmp_entry.vme_start; @@ -7850,11 +7930,10 @@ vm_map_delete( pmap_addr = tmp_entry.vme_start; } (void) vm_map_unwire_nested(sub_map, - sub_start, sub_end, - user_wire, - pmap, pmap_addr); + sub_start, sub_end, + user_wire, + pmap, pmap_addr); } else { - if (VME_OBJECT(&tmp_entry) == kernel_object) { pmap_protect_options( map->pmap, @@ -7865,22 +7944,23 @@ vm_map_delete( NULL); } vm_fault_unwire(map, &tmp_entry, - VME_OBJECT(&tmp_entry) == kernel_object, - map->pmap, tmp_entry.vme_start); + VME_OBJECT(&tmp_entry) == kernel_object, + map->pmap, tmp_entry.vme_start); } vm_map_lock(map); - if (last_timestamp+1 != map->timestamp) { + if (last_timestamp + 1 != map->timestamp) { /* * Find the entry again. It could have * been clipped after we unlocked the map. */ - if (!vm_map_lookup_entry(map, s, &first_entry)){ + if (!vm_map_lookup_entry(map, s, &first_entry)) { assert((map != kernel_map) && - (!entry->is_sub_map)); - if (gap_start == FIND_GAP) + (!entry->is_sub_map)); + if (gap_start == FIND_GAP) { gap_start = s; + } first_entry = first_entry->vme_next; s = first_entry->vme_start; } else { @@ -7895,7 +7975,7 @@ vm_map_delete( entry = first_entry; while ((entry != vm_map_to_entry(map)) && - (entry->vme_start < tmp_entry.vme_end)) { + (entry->vme_start < tmp_entry.vme_end)) { assert(entry->in_transition); entry->in_transition = FALSE; if (entry->needs_wakeup) { @@ -7960,7 +8040,7 @@ vm_map_delete( (addr64_t)entry->vme_start, entry->vme_end - entry->vme_start, pmap_flags); -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ if ((map->mapped_in_other_pmaps) && (map->map_refcnt)) { /* clean up parent map/maps */ vm_map_submap_pmap_clean( @@ -7976,7 +8056,7 @@ vm_map_delete( VME_OFFSET(entry)); } } else if (VME_OBJECT(entry) != kernel_object && - VME_OBJECT(entry) != compressor_object) { + VME_OBJECT(entry) != compressor_object) { object = VME_OBJECT(entry); if ((map->mapped_in_other_pmaps) && (map->map_refcnt)) { vm_object_pmap_protect_options( @@ -7987,7 +8067,7 @@ vm_map_delete( VM_PROT_NONE, PMAP_OPTIONS_REMOVE); } else if ((VME_OBJECT(entry) != VM_OBJECT_NULL) || - (map->pmap == kernel_pmap)) { + (map->pmap == kernel_pmap)) { /* Remove translations associated * with this range unless the entry * does not have an object, or @@ -8000,22 +8080,22 @@ vm_map_delete( * translations. */ pmap_remove_options(map->pmap, - (addr64_t)entry->vme_start, - (addr64_t)entry->vme_end, - PMAP_OPTIONS_REMOVE); + (addr64_t)entry->vme_start, + (addr64_t)entry->vme_end, + PMAP_OPTIONS_REMOVE); } } if (entry->iokit_acct) { /* alternate accounting */ DTRACE_VM4(vm_map_iokit_unmapped_region, - vm_map_t, map, - vm_map_offset_t, entry->vme_start, - vm_map_offset_t, entry->vme_end, - int, VME_ALIAS(entry)); + vm_map_t, map, + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + int, VME_ALIAS(entry)); vm_map_iokit_unmapped_region(map, - (entry->vme_end - - entry->vme_start)); + (entry->vme_end - + entry->vme_start)); entry->iokit_acct = FALSE; entry->use_pmap = FALSE; } @@ -8026,8 +8106,8 @@ vm_map_delete( */ #if DEBUG assert(vm_map_pmap_is_empty(map, - entry->vme_start, - entry->vme_end)); + entry->vme_start, + entry->vme_end)); #endif /* DEBUG */ next = entry->vme_next; @@ -8036,14 +8116,14 @@ vm_map_delete( map->map_refcnt != 0 && entry->vme_end < end && (next == vm_map_to_entry(map) || - next->vme_start != entry->vme_end)) { + next->vme_start != entry->vme_end)) { panic("vm_map_delete(%p,0x%llx,0x%llx): " - "hole after %p at 0x%llx\n", - map, - (uint64_t)start, - (uint64_t)end, - entry, - (uint64_t)entry->vme_end); + "hole after %p at 0x%llx\n", + map, + (uint64_t)start, + (uint64_t)end, + entry, + (uint64_t)entry->vme_end); } /* @@ -8070,7 +8150,7 @@ vm_map_delete( entry->protection = VM_PROT_NONE; entry->max_protection = VM_PROT_NONE; } else if ((flags & VM_MAP_REMOVE_SAVE_ENTRIES) && - zap_map != VM_MAP_NULL) { + zap_map != VM_MAP_NULL) { vm_map_size_t entry_size; /* * The caller wants to save the affected VM map entries @@ -8081,9 +8161,9 @@ vm_map_delete( vm_map_store_entry_unlink(map, entry); /* ... and add it to the end of the "zap_map" */ vm_map_store_entry_link(zap_map, - vm_map_last_entry(zap_map), - entry, - VM_MAP_KERNEL_FLAGS_NONE); + vm_map_last_entry(zap_map), + entry, + VM_MAP_KERNEL_FLAGS_NONE); entry_size = entry->vme_end - entry->vme_start; map->size -= entry_size; zap_map->size += entry_size; @@ -8097,7 +8177,7 @@ vm_map_delete( entry = next; - if(entry == vm_map_to_entry(map)) { + if (entry == vm_map_to_entry(map)) { break; } if (last_timestamp + 1 != map->timestamp) { @@ -8109,18 +8189,19 @@ vm_map_delete( * we have to assume that the task has been fully * disabled before we get here */ - if (!vm_map_lookup_entry(map, s, &entry)){ - entry = entry->vme_next; + if (!vm_map_lookup_entry(map, s, &entry)) { + entry = entry->vme_next; /* * Nothing found for s. If we weren't already done, then there is a gap. */ - if (gap_start == FIND_GAP && s < end) + if (gap_start == FIND_GAP && s < end) { gap_start = s; + } s = entry->vme_start; - } else { + } else { SAVE_HINT_MAP_WRITE(map, entry->vme_prev); - } + } /* * others can not only allocate behind us, we can * also see coalesce while we don't have the map lock @@ -8132,13 +8213,15 @@ vm_map_delete( last_timestamp = map->timestamp; } - if (map->wait_for_space) + if (map->wait_for_space) { thread_wakeup((event_t) map); + } /* * wake up anybody waiting on entries that we have already deleted. */ - if (need_wakeup) + if (need_wakeup) { vm_map_entry_wakeup(map); + } if (gap_start != FIND_GAP && gap_start != GAPS_OK) { DTRACE_VM3(kern_vm_deallocate_gap, @@ -8173,12 +8256,12 @@ vm_map_delete( */ kern_return_t vm_map_remove( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t flags) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags) { - kern_return_t result; + kern_return_t result; vm_map_lock(map); VM_MAP_RANGE_CHECK(map, start, end); @@ -8189,12 +8272,13 @@ vm_map_remove( * free to the zone_map into a no-op, there is a problem and we should * panic. */ - if ((map == zone_map) && (start == end)) + if ((map == zone_map) && (start == end)) { panic("Nothing being freed to the zone_map. start = end = %p\n", (void *)start); + } result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); vm_map_unlock(map); - return(result); + return result; } /* @@ -8205,16 +8289,16 @@ vm_map_remove( */ kern_return_t vm_map_remove_locked( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t flags) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags) { - kern_return_t result; + kern_return_t result; VM_MAP_RANGE_CHECK(map, start, end); result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); - return(result); + return result; } @@ -8230,7 +8314,7 @@ vm_map_copy_allocate(void) vm_map_copy_t new_copy; new_copy = zalloc(vm_map_copy_zone); - bzero(new_copy, sizeof (*new_copy)); + bzero(new_copy, sizeof(*new_copy)); new_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; vm_map_copy_first_entry(new_copy) = vm_map_copy_to_entry(new_copy); vm_map_copy_last_entry(new_copy) = vm_map_copy_to_entry(new_copy); @@ -8246,16 +8330,17 @@ vm_map_copy_allocate(void) */ void vm_map_copy_discard( - vm_map_copy_t copy) + vm_map_copy_t copy) { - if (copy == VM_MAP_COPY_NULL) + if (copy == VM_MAP_COPY_NULL) { return; + } switch (copy->type) { case VM_MAP_COPY_ENTRY_LIST: while (vm_map_copy_first_entry(copy) != - vm_map_copy_to_entry(copy)) { - vm_map_entry_t entry = vm_map_copy_first_entry(copy); + vm_map_copy_to_entry(copy)) { + vm_map_entry_t entry = vm_map_copy_first_entry(copy); vm_map_copy_entry_unlink(copy, entry); if (entry->is_sub_map) { @@ -8266,7 +8351,7 @@ vm_map_copy_discard( vm_map_copy_entry_dispose(copy, entry); } break; - case VM_MAP_COPY_OBJECT: + case VM_MAP_COPY_OBJECT: vm_object_deallocate(copy->cpy_object); break; case VM_MAP_COPY_KERNEL_BUFFER: @@ -8276,9 +8361,10 @@ vm_map_copy_discard( * allocated by a single call to kalloc(), i.e. the * vm_map_copy_t was not allocated out of the zone. */ - if (copy->size > msg_ool_size_small || copy->offset) + if (copy->size > msg_ool_size_small || copy->offset) { panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", - (long long)copy->size, (long long)copy->offset); + (long long)copy->size, (long long)copy->offset); + } kfree(copy, copy->size + cpy_kdata_hdr_sz); return; } @@ -8304,12 +8390,13 @@ vm_map_copy_discard( */ vm_map_copy_t vm_map_copy_copy( - vm_map_copy_t copy) + vm_map_copy_t copy) { - vm_map_copy_t new_copy; + vm_map_copy_t new_copy; - if (copy == VM_MAP_COPY_NULL) + if (copy == VM_MAP_COPY_NULL) { return VM_MAP_COPY_NULL; + } /* * Allocate a new copy object, and copy the information @@ -8325,9 +8412,9 @@ vm_map_copy_copy( * changed to point to the new copy object. */ vm_map_copy_first_entry(copy)->vme_prev - = vm_map_copy_to_entry(new_copy); + = vm_map_copy_to_entry(new_copy); vm_map_copy_last_entry(copy)->vme_next - = vm_map_copy_to_entry(new_copy); + = vm_map_copy_to_entry(new_copy); } /* @@ -8345,15 +8432,15 @@ vm_map_copy_copy( static kern_return_t vm_map_overwrite_submap_recurse( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_size_t dst_size) + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_size_t dst_size) { - vm_map_offset_t dst_end; - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - kern_return_t result; - boolean_t encountered_sub_map = FALSE; + vm_map_offset_t dst_end; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + kern_return_t result; + boolean_t encountered_sub_map = FALSE; @@ -8365,39 +8452,39 @@ vm_map_overwrite_submap_recurse( */ dst_end = vm_map_round_page(dst_addr + dst_size, - VM_MAP_PAGE_MASK(dst_map)); + VM_MAP_PAGE_MASK(dst_map)); vm_map_lock(dst_map); start_pass_1: if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } vm_map_clip_start(dst_map, - tmp_entry, - vm_map_trunc_page(dst_addr, - VM_MAP_PAGE_MASK(dst_map))); + tmp_entry, + vm_map_trunc_page(dst_addr, + VM_MAP_PAGE_MASK(dst_map))); if (tmp_entry->is_sub_map) { /* clipping did unnest if needed */ assert(!tmp_entry->use_pmap); } for (entry = tmp_entry;;) { - vm_map_entry_t next; + vm_map_entry_t next; next = entry->vme_next; - while(entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; + while (entry->is_sub_map) { + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; if (entry->in_transition) { /* * Say that we are waiting, and wait for entry. */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } @@ -8405,10 +8492,11 @@ start_pass_1: encountered_sub_map = TRUE; sub_start = VME_OFFSET(entry); - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; + } sub_end -= entry->vme_start; sub_end += VME_OFFSET(entry); local_end = entry->vme_end; @@ -8419,23 +8507,25 @@ start_pass_1: sub_start, sub_end - sub_start); - if(result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { return result; - if (dst_end <= entry->vme_end) + } + if (dst_end <= entry->vme_end) { return KERN_SUCCESS; + } vm_map_lock(dst_map); - if(!vm_map_lookup_entry(dst_map, local_end, - &tmp_entry)) { + if (!vm_map_lookup_entry(dst_map, local_end, + &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } entry = tmp_entry; next = entry->vme_next; } - if ( ! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { vm_map_unlock(dst_map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } /* @@ -8443,13 +8533,12 @@ start_pass_1: * for it to exit that state. Anything could happen * when we unlock the map, so start over. */ - if (entry->in_transition) { - - /* - * Say that we are waiting, and wait for entry. - */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + if (entry->in_transition) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } @@ -8467,7 +8556,7 @@ start_pass_1: if ((next == vm_map_to_entry(dst_map)) || (next->vme_start != entry->vme_end)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } /* @@ -8475,10 +8564,10 @@ start_pass_1: */ if ((VME_OBJECT(entry) != VM_OBJECT_NULL) && ((!VME_OBJECT(entry)->internal) || - (VME_OBJECT(entry)->true_share))) { - if(encountered_sub_map) { + (VME_OBJECT(entry)->true_share))) { + if (encountered_sub_map) { vm_map_unlock(dst_map); - return(KERN_FAILURE); + return KERN_FAILURE; } } @@ -8486,7 +8575,7 @@ start_pass_1: entry = next; }/* for */ vm_map_unlock(dst_map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -8542,31 +8631,32 @@ start_pass_1: static kern_return_t vm_map_copy_overwrite_nested( - vm_map_t dst_map, - vm_map_address_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible, - pmap_t pmap, - boolean_t discard_on_success) + vm_map_t dst_map, + vm_map_address_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible, + pmap_t pmap, + boolean_t discard_on_success) { - vm_map_offset_t dst_end; - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - kern_return_t kr; - boolean_t aligned = TRUE; - boolean_t contains_permanent_objects = FALSE; - boolean_t encountered_sub_map = FALSE; - vm_map_offset_t base_addr; - vm_map_size_t copy_size; - vm_map_size_t total_size; + vm_map_offset_t dst_end; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + kern_return_t kr; + boolean_t aligned = TRUE; + boolean_t contains_permanent_objects = FALSE; + boolean_t encountered_sub_map = FALSE; + vm_map_offset_t base_addr; + vm_map_size_t copy_size; + vm_map_size_t total_size; /* * Check for null copy object. */ - if (copy == VM_MAP_COPY_NULL) - return(KERN_SUCCESS); + if (copy == VM_MAP_COPY_NULL) { + return KERN_SUCCESS; + } /* * Check for special kernel buffer allocated @@ -8574,9 +8664,9 @@ vm_map_copy_overwrite_nested( */ if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { - return(vm_map_copyout_kernel_buffer( - dst_map, &dst_addr, - copy, copy->size, TRUE, discard_on_success)); + return vm_map_copyout_kernel_buffer( + dst_map, &dst_addr, + copy, copy->size, TRUE, discard_on_success); } /* @@ -8587,9 +8677,10 @@ vm_map_copy_overwrite_nested( assert(copy->type == VM_MAP_COPY_ENTRY_LIST); if (copy->size == 0) { - if (discard_on_success) + if (discard_on_success) { vm_map_copy_discard(copy); - return(KERN_SUCCESS); + } + return KERN_SUCCESS; } /* @@ -8600,15 +8691,14 @@ vm_map_copy_overwrite_nested( */ if (!VM_MAP_PAGE_ALIGNED(copy->size, - VM_MAP_PAGE_MASK(dst_map)) || + VM_MAP_PAGE_MASK(dst_map)) || !VM_MAP_PAGE_ALIGNED(copy->offset, - VM_MAP_PAGE_MASK(dst_map)) || + VM_MAP_PAGE_MASK(dst_map)) || !VM_MAP_PAGE_ALIGNED(dst_addr, - VM_MAP_PAGE_MASK(dst_map))) - { + VM_MAP_PAGE_MASK(dst_map))) { aligned = FALSE; dst_end = vm_map_round_page(dst_addr + copy->size, - VM_MAP_PAGE_MASK(dst_map)); + VM_MAP_PAGE_MASK(dst_map)); } else { dst_end = dst_addr + copy->size; } @@ -8621,39 +8711,38 @@ vm_map_copy_overwrite_nested( */ if (dst_addr >= dst_map->max_offset) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } start_pass_1: if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } vm_map_clip_start(dst_map, - tmp_entry, - vm_map_trunc_page(dst_addr, - VM_MAP_PAGE_MASK(dst_map))); + tmp_entry, + vm_map_trunc_page(dst_addr, + VM_MAP_PAGE_MASK(dst_map))); for (entry = tmp_entry;;) { - vm_map_entry_t next = entry->vme_next; - - while(entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; + vm_map_entry_t next = entry->vme_next; - if (entry->in_transition) { + while (entry->is_sub_map) { + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; + if (entry->in_transition) { /* * Say that we are waiting, and wait for entry. */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } local_end = entry->vme_end; - if (!(entry->needs_copy)) { + if (!(entry->needs_copy)) { /* if needs_copy we are a COW submap */ /* in such a case we just replace so */ /* there is no need for the follow- */ @@ -8661,10 +8750,11 @@ start_pass_1: encountered_sub_map = TRUE; sub_start = VME_OFFSET(entry); - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; + } sub_end -= entry->vme_start; sub_end += VME_OFFSET(entry); vm_map_unlock(dst_map); @@ -8673,24 +8763,26 @@ start_pass_1: VME_SUBMAP(entry), sub_start, sub_end - sub_start); - if(kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } vm_map_lock(dst_map); } - if (dst_end <= entry->vme_end) + if (dst_end <= entry->vme_end) { goto start_overwrite; - if(!vm_map_lookup_entry(dst_map, local_end, - &entry)) { + } + if (!vm_map_lookup_entry(dst_map, local_end, + &entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } next = entry->vme_next; } - if ( ! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { vm_map_unlock(dst_map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } /* @@ -8698,13 +8790,12 @@ start_pass_1: * for it to exit that state. Anything could happen * when we unlock the map, so start over. */ - if (entry->in_transition) { - - /* - * Say that we are waiting, and wait for entry. - */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + if (entry->in_transition) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); goto start_pass_1; } @@ -8712,15 +8803,16 @@ start_pass_1: /* * our range is contained completely within this map entry */ - if (dst_end <= entry->vme_end) + if (dst_end <= entry->vme_end) { break; + } /* * check that range specified is contiguous region */ if ((next == vm_map_to_entry(dst_map)) || (next->vme_start != entry->vme_end)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } @@ -8729,7 +8821,7 @@ start_pass_1: */ if ((VME_OBJECT(entry) != VM_OBJECT_NULL) && ((!VME_OBJECT(entry)->internal) || - (VME_OBJECT(entry)->true_share))) { + (VME_OBJECT(entry)->true_share))) { contains_permanent_objects = TRUE; } @@ -8744,11 +8836,11 @@ start_overwrite: if (interruptible && contains_permanent_objects) { vm_map_unlock(dst_map); - return(KERN_FAILURE); /* XXX */ + return KERN_FAILURE; /* XXX */ } /* - * + * * Make a second pass, overwriting the data * At the beginning of each loop iteration, * the next entry to be overwritten is "tmp_entry" @@ -8758,31 +8850,31 @@ start_overwrite: */ total_size = copy->size; - if(encountered_sub_map) { + if (encountered_sub_map) { copy_size = 0; /* re-calculate tmp_entry since we've had the map */ /* unlocked */ if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { copy_size = copy->size; } base_addr = dst_addr; - while(TRUE) { + while (TRUE) { /* deconstruct the copy object and do in parts */ /* only in sub_map, interruptable case */ - vm_map_entry_t copy_entry; - vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL; - vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL; - int nentries; - int remaining_entries = 0; - vm_map_offset_t new_offset = 0; + vm_map_entry_t copy_entry; + vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL; + vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL; + int nentries; + int remaining_entries = 0; + vm_map_offset_t new_offset = 0; for (entry = tmp_entry; copy_size == 0;) { - vm_map_entry_t next; + vm_map_entry_t next; next = entry->vme_next; @@ -8795,38 +8887,40 @@ start_overwrite: /* encounter of a submap as dictated by base_addr */ /* we will zero copy_size accordingly. */ if (entry->in_transition) { - /* - * Say that we are waiting, and wait for entry. - */ - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); - - if(!vm_map_lookup_entry(dst_map, base_addr, - &tmp_entry)) { + /* + * Say that we are waiting, and wait for entry. + */ + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); + + if (!vm_map_lookup_entry(dst_map, base_addr, + &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } copy_size = 0; entry = tmp_entry; continue; } if (entry->is_sub_map) { - vm_map_offset_t sub_start; - vm_map_offset_t sub_end; - vm_map_offset_t local_end; + vm_map_offset_t sub_start; + vm_map_offset_t sub_end; + vm_map_offset_t local_end; - if (entry->needs_copy) { + if (entry->needs_copy) { /* if this is a COW submap */ /* just back the range with a */ /* anonymous entry */ - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; - if(entry->vme_start < base_addr) + } + if (entry->vme_start < base_addr) { sub_start = base_addr; - else + } else { sub_start = entry->vme_start; + } vm_map_clip_end( dst_map, entry, sub_end); vm_map_clip_start( @@ -8845,25 +8939,27 @@ start_overwrite: entry->max_protection = VM_PROT_ALL; entry->wired_count = 0; entry->user_wired_count = 0; - if(entry->inheritance - == VM_INHERIT_SHARE) + if (entry->inheritance + == VM_INHERIT_SHARE) { entry->inheritance = VM_INHERIT_COPY; + } continue; } /* first take care of any non-sub_map */ /* entries to send */ - if(base_addr < entry->vme_start) { + if (base_addr < entry->vme_start) { /* stuff to send */ copy_size = - entry->vme_start - base_addr; + entry->vme_start - base_addr; break; } sub_start = VME_OFFSET(entry); - if(entry->vme_end < dst_end) + if (entry->vme_end < dst_end) { sub_end = entry->vme_end; - else + } else { sub_end = dst_end; + } sub_end -= entry->vme_start; sub_end += VME_OFFSET(entry); local_end = entry->vme_end; @@ -8872,38 +8968,38 @@ start_overwrite: /* adjust the copy object */ if (total_size > copy_size) { - vm_map_size_t local_size = 0; - vm_map_size_t entry_size; + vm_map_size_t local_size = 0; + vm_map_size_t entry_size; nentries = 1; new_offset = copy->offset; copy_entry = vm_map_copy_first_entry(copy); - while(copy_entry != - vm_map_copy_to_entry(copy)){ + while (copy_entry != + vm_map_copy_to_entry(copy)) { entry_size = copy_entry->vme_end - - copy_entry->vme_start; - if((local_size < copy_size) && - ((local_size + entry_size) + copy_entry->vme_start; + if ((local_size < copy_size) && + ((local_size + entry_size) >= copy_size)) { vm_map_copy_clip_end(copy, - copy_entry, - copy_entry->vme_start + - (copy_size - local_size)); + copy_entry, + copy_entry->vme_start + + (copy_size - local_size)); entry_size = copy_entry->vme_end - - copy_entry->vme_start; + copy_entry->vme_start; local_size += entry_size; new_offset += entry_size; } - if(local_size >= copy_size) { + if (local_size >= copy_size) { next_copy = copy_entry->vme_next; copy_entry->vme_next = - vm_map_copy_to_entry(copy); + vm_map_copy_to_entry(copy); previous_prev = - copy->cpy_hdr.links.prev; + copy->cpy_hdr.links.prev; copy->cpy_hdr.links.prev = copy_entry; copy->size = copy_size; remaining_entries = - copy->cpy_hdr.nentries; + copy->cpy_hdr.nentries; remaining_entries -= nentries; copy->cpy_hdr.nentries = nentries; break; @@ -8916,7 +9012,7 @@ start_overwrite: } } - if((entry->use_pmap) && (pmap == NULL)) { + if ((entry->use_pmap) && (pmap == NULL)) { kr = vm_map_copy_overwrite_nested( VME_SUBMAP(entry), sub_start, @@ -8940,20 +9036,20 @@ start_overwrite: dst_map->pmap, TRUE); } - if(kr != KERN_SUCCESS) { - if(next_copy != NULL) { + if (kr != KERN_SUCCESS) { + if (next_copy != NULL) { copy->cpy_hdr.nentries += - remaining_entries; + remaining_entries; copy->cpy_hdr.links.prev->vme_next = - next_copy; + next_copy; copy->cpy_hdr.links.prev - = previous_prev; + = previous_prev; copy->size = total_size; } return kr; } if (dst_end <= local_end) { - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* otherwise copy no longer exists, it was */ /* destroyed after successful copy_overwrite */ @@ -8970,21 +9066,21 @@ start_overwrite: total_size -= copy_size; copy_size = 0; /* put back remainder of copy in container */ - if(next_copy != NULL) { + if (next_copy != NULL) { copy->cpy_hdr.nentries = remaining_entries; copy->cpy_hdr.links.next = next_copy; copy->cpy_hdr.links.prev = previous_prev; copy->size = total_size; next_copy->vme_prev = - vm_map_copy_to_entry(copy); + vm_map_copy_to_entry(copy); next_copy = NULL; } base_addr = local_end; vm_map_lock(dst_map); - if(!vm_map_lookup_entry(dst_map, - local_end, &tmp_entry)) { + if (!vm_map_lookup_entry(dst_map, + local_end, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } entry = tmp_entry; continue; @@ -8997,7 +9093,7 @@ start_overwrite: if ((next == vm_map_to_entry(dst_map)) || (next->vme_start != entry->vme_end)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } entry = next; @@ -9008,35 +9104,35 @@ start_overwrite: /* adjust the copy object */ if (total_size > copy_size) { - vm_map_size_t local_size = 0; - vm_map_size_t entry_size; + vm_map_size_t local_size = 0; + vm_map_size_t entry_size; new_offset = copy->offset; copy_entry = vm_map_copy_first_entry(copy); - while(copy_entry != vm_map_copy_to_entry(copy)) { + while (copy_entry != vm_map_copy_to_entry(copy)) { entry_size = copy_entry->vme_end - - copy_entry->vme_start; - if((local_size < copy_size) && - ((local_size + entry_size) + copy_entry->vme_start; + if ((local_size < copy_size) && + ((local_size + entry_size) >= copy_size)) { vm_map_copy_clip_end(copy, copy_entry, - copy_entry->vme_start + - (copy_size - local_size)); + copy_entry->vme_start + + (copy_size - local_size)); entry_size = copy_entry->vme_end - - copy_entry->vme_start; + copy_entry->vme_start; local_size += entry_size; new_offset += entry_size; } - if(local_size >= copy_size) { + if (local_size >= copy_size) { next_copy = copy_entry->vme_next; copy_entry->vme_next = - vm_map_copy_to_entry(copy); + vm_map_copy_to_entry(copy); previous_prev = - copy->cpy_hdr.links.prev; + copy->cpy_hdr.links.prev; copy->cpy_hdr.links.prev = copy_entry; copy->size = copy_size; remaining_entries = - copy->cpy_hdr.nentries; + copy->cpy_hdr.nentries; remaining_entries -= nentries; copy->cpy_hdr.nentries = nentries; break; @@ -9050,23 +9146,24 @@ start_overwrite: } if (aligned) { - pmap_t local_pmap; + pmap_t local_pmap; - if(pmap) + if (pmap) { local_pmap = pmap; - else + } else { local_pmap = dst_map->pmap; + } if ((kr = vm_map_copy_overwrite_aligned( - dst_map, tmp_entry, copy, - base_addr, local_pmap)) != KERN_SUCCESS) { - if(next_copy != NULL) { + dst_map, tmp_entry, copy, + base_addr, local_pmap)) != KERN_SUCCESS) { + if (next_copy != NULL) { copy->cpy_hdr.nentries += - remaining_entries; - copy->cpy_hdr.links.prev->vme_next = - next_copy; - copy->cpy_hdr.links.prev = - previous_prev; + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev = + previous_prev; copy->size += copy_size; } return kr; @@ -9091,25 +9188,26 @@ start_overwrite: base_addr, discard_on_success); if (kr != KERN_SUCCESS) { - if(next_copy != NULL) { + if (next_copy != NULL) { copy->cpy_hdr.nentries += - remaining_entries; - copy->cpy_hdr.links.prev->vme_next = - next_copy; - copy->cpy_hdr.links.prev = - previous_prev; + remaining_entries; + copy->cpy_hdr.links.prev->vme_next = + next_copy; + copy->cpy_hdr.links.prev = + previous_prev; copy->size += copy_size; } return kr; } } total_size -= copy_size; - if(total_size == 0) + if (total_size == 0) { break; + } base_addr += copy_size; copy_size = 0; copy->offset = new_offset; - if(next_copy != NULL) { + if (next_copy != NULL) { copy->cpy_hdr.nentries = remaining_entries; copy->cpy_hdr.links.next = next_copy; copy->cpy_hdr.links.prev = previous_prev; @@ -9117,23 +9215,23 @@ start_overwrite: copy->size = total_size; } vm_map_lock(dst_map); - while(TRUE) { + while (TRUE) { if (!vm_map_lookup_entry(dst_map, - base_addr, &tmp_entry)) { + base_addr, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - if (tmp_entry->in_transition) { - entry->needs_wakeup = TRUE; - vm_map_entry_wait(dst_map, THREAD_UNINT); + if (tmp_entry->in_transition) { + entry->needs_wakeup = TRUE; + vm_map_entry_wait(dst_map, THREAD_UNINT); } else { break; } } vm_map_clip_start(dst_map, - tmp_entry, - vm_map_trunc_page(base_addr, - VM_MAP_PAGE_MASK(dst_map))); + tmp_entry, + vm_map_trunc_page(base_addr, + VM_MAP_PAGE_MASK(dst_map))); entry = tmp_entry; } /* while */ @@ -9141,25 +9239,26 @@ start_overwrite: /* * Throw away the vm_map_copy object */ - if (discard_on_success) + if (discard_on_success) { vm_map_copy_discard(copy); + } - return(KERN_SUCCESS); + return KERN_SUCCESS; }/* vm_map_copy_overwrite */ kern_return_t vm_map_copy_overwrite( - vm_map_t dst_map, - vm_map_offset_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible) + vm_map_t dst_map, + vm_map_offset_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible) { - vm_map_size_t head_size, tail_size; - vm_map_copy_t head_copy, tail_copy; - vm_map_offset_t head_addr, tail_addr; - vm_map_entry_t entry; - kern_return_t kr; - vm_map_offset_t effective_page_mask, effective_page_size; + vm_map_size_t head_size, tail_size; + vm_map_copy_t head_copy, tail_copy; + vm_map_offset_t head_addr, tail_addr; + vm_map_entry_t entry; + kern_return_t kr; + vm_map_offset_t effective_page_mask, effective_page_size; head_size = 0; tail_size = 0; @@ -9175,18 +9274,18 @@ vm_map_copy_overwrite( * We can't split the "copy" map if we're interruptible * or if we don't have a "copy" map... */ - blunt_copy: +blunt_copy: return vm_map_copy_overwrite_nested(dst_map, - dst_addr, - copy, - interruptible, - (pmap_t) NULL, - TRUE); + dst_addr, + copy, + interruptible, + (pmap_t) NULL, + TRUE); } effective_page_mask = MAX(VM_MAP_PAGE_MASK(dst_map), PAGE_MASK); effective_page_mask = MAX(VM_MAP_COPY_PAGE_MASK(copy), - effective_page_mask); + effective_page_mask); effective_page_size = effective_page_mask + 1; if (copy->size < 3 * effective_page_size) { @@ -9212,18 +9311,18 @@ vm_map_copy_overwrite( if (!vm_map_page_aligned(dst_addr, effective_page_mask)) { head_addr = dst_addr; head_size = (effective_page_size - - (copy->offset & effective_page_mask)); + (copy->offset & effective_page_mask)); head_size = MIN(head_size, copy->size); } if (!vm_map_page_aligned(copy->offset + copy->size, - effective_page_mask)) { + effective_page_mask)) { /* * Mis-alignment at the end. * Do an aligned copy up to the last page and * then an unaligned copy for the remaining bytes. */ tail_size = ((copy->offset + copy->size) & - effective_page_mask); + effective_page_mask); tail_size = MIN(tail_size, copy->size); tail_addr = dst_addr + copy->size - tail_size; assert(tail_addr >= head_addr + head_size); @@ -9244,14 +9343,14 @@ vm_map_copy_overwrite( * in that case. */ vm_map_lock_read(dst_map); - if (! vm_map_lookup_entry(dst_map, dst_addr, &entry)) { + if (!vm_map_lookup_entry(dst_map, dst_addr, &entry)) { vm_map_unlock_read(dst_map); goto blunt_copy; } for (; - (entry != vm_map_copy_to_entry(copy) && - entry->vme_start < dst_addr + copy->size); - entry = entry->vme_next) { + (entry != vm_map_copy_to_entry(copy) && + entry->vme_start < dst_addr + copy->size); + entry = entry->vme_next) { if (entry->is_sub_map) { vm_map_unlock_read(dst_map); goto blunt_copy; @@ -9271,7 +9370,7 @@ vm_map_copy_overwrite( head_copy = vm_map_copy_allocate(); head_copy->type = VM_MAP_COPY_ENTRY_LIST; head_copy->cpy_hdr.entries_pageable = - copy->cpy_hdr.entries_pageable; + copy->cpy_hdr.entries_pageable; vm_map_store_init(&head_copy->cpy_hdr); entry = vm_map_copy_first_entry(copy); @@ -9287,20 +9386,21 @@ vm_map_copy_overwrite( vm_map_copy_clip_end(copy, entry, copy->offset); vm_map_copy_entry_unlink(copy, entry); vm_map_copy_entry_link(head_copy, - vm_map_copy_to_entry(head_copy), - entry); + vm_map_copy_to_entry(head_copy), + entry); /* * Do the unaligned copy. */ kr = vm_map_copy_overwrite_nested(dst_map, - head_addr, - head_copy, - interruptible, - (pmap_t) NULL, - FALSE); - if (kr != KERN_SUCCESS) + head_addr, + head_copy, + interruptible, + (pmap_t) NULL, + FALSE); + if (kr != KERN_SUCCESS) { goto done; + } } if (tail_size) { @@ -9310,7 +9410,7 @@ vm_map_copy_overwrite( tail_copy = vm_map_copy_allocate(); tail_copy->type = VM_MAP_COPY_ENTRY_LIST; tail_copy->cpy_hdr.entries_pageable = - copy->cpy_hdr.entries_pageable; + copy->cpy_hdr.entries_pageable; vm_map_store_init(&tail_copy->cpy_hdr); tail_copy->offset = copy->offset + copy->size - tail_size; @@ -9323,30 +9423,30 @@ vm_map_copy_overwrite( entry = vm_map_copy_last_entry(copy); vm_map_copy_entry_unlink(copy, entry); vm_map_copy_entry_link(tail_copy, - vm_map_copy_last_entry(tail_copy), - entry); + vm_map_copy_last_entry(tail_copy), + entry); } /* * Copy most (or possibly all) of the data. */ kr = vm_map_copy_overwrite_nested(dst_map, - dst_addr + head_size, - copy, - interruptible, - (pmap_t) NULL, - FALSE); + dst_addr + head_size, + copy, + interruptible, + (pmap_t) NULL, + FALSE); if (kr != KERN_SUCCESS) { goto done; } if (tail_size) { kr = vm_map_copy_overwrite_nested(dst_map, - tail_addr, - tail_copy, - interruptible, - (pmap_t) NULL, - FALSE); + tail_addr, + tail_copy, + interruptible, + (pmap_t) NULL, + FALSE); } done: @@ -9372,8 +9472,8 @@ done: entry = vm_map_copy_first_entry(head_copy); vm_map_copy_entry_unlink(head_copy, entry); vm_map_copy_entry_link(copy, - vm_map_copy_to_entry(copy), - entry); + vm_map_copy_to_entry(copy), + entry); copy->offset -= head_size; copy->size += head_size; vm_map_copy_discard(head_copy); @@ -9383,8 +9483,8 @@ done: entry = vm_map_copy_last_entry(tail_copy); vm_map_copy_entry_unlink(tail_copy, entry); vm_map_copy_entry_link(copy, - vm_map_copy_last_entry(copy), - entry); + vm_map_copy_last_entry(copy), + entry); copy->size += tail_size; vm_map_copy_discard(tail_copy); tail_copy = NULL; @@ -9419,25 +9519,25 @@ done: static kern_return_t vm_map_copy_overwrite_unaligned( - vm_map_t dst_map, - vm_map_entry_t entry, - vm_map_copy_t copy, - vm_map_offset_t start, - boolean_t discard_on_success) + vm_map_t dst_map, + vm_map_entry_t entry, + vm_map_copy_t copy, + vm_map_offset_t start, + boolean_t discard_on_success) { - vm_map_entry_t copy_entry; - vm_map_entry_t copy_entry_next; - vm_map_version_t version; - vm_object_t dst_object; - vm_object_offset_t dst_offset; - vm_object_offset_t src_offset; - vm_object_offset_t entry_offset; - vm_map_offset_t entry_end; - vm_map_size_t src_size, - dst_size, - copy_size, - amount_left; - kern_return_t kr = KERN_SUCCESS; + vm_map_entry_t copy_entry; + vm_map_entry_t copy_entry_next; + vm_map_version_t version; + vm_object_t dst_object; + vm_object_offset_t dst_offset; + vm_object_offset_t src_offset; + vm_object_offset_t entry_offset; + vm_map_offset_t entry_end; + vm_map_size_t src_size, + dst_size, + copy_size, + amount_left; + kern_return_t kr = KERN_SUCCESS; copy_entry = vm_map_copy_first_entry(copy); @@ -9451,21 +9551,20 @@ vm_map_copy_overwrite_unaligned( * the vm_object not just the data. */ while (amount_left > 0) { - if (entry == vm_map_to_entry(dst_map)) { vm_map_unlock_read(dst_map); return KERN_INVALID_ADDRESS; } /* "start" must be within the current map entry */ - assert ((start>=entry->vme_start) && (startvme_end)); + assert((start >= entry->vme_start) && (start < entry->vme_end)); dst_offset = start - entry->vme_start; dst_size = entry->vme_end - start; src_size = copy_entry->vme_end - - (copy_entry->vme_start + src_offset); + (copy_entry->vme_start + src_offset); if (dst_size < src_size) { /* @@ -9489,15 +9588,14 @@ vm_map_copy_overwrite_unaligned( * Copy on write region. */ if (entry->needs_copy && - ((entry->protection & VM_PROT_WRITE) != 0)) - { + ((entry->protection & VM_PROT_WRITE) != 0)) { if (vm_map_lock_read_to_write(dst_map)) { vm_map_lock_read(dst_map); goto RetryLookup; } VME_OBJECT_SHADOW(entry, - (vm_map_size_t)(entry->vme_end - - entry->vme_start)); + (vm_map_size_t)(entry->vme_end + - entry->vme_start)); entry->needs_copy = FALSE; vm_map_lock_write_to_read(dst_map); } @@ -9506,13 +9604,13 @@ vm_map_copy_overwrite_unaligned( * unlike with the virtual (aligned) copy we're going * to fault on it therefore we need a target object. */ - if (dst_object == VM_OBJECT_NULL) { + if (dst_object == VM_OBJECT_NULL) { if (vm_map_lock_read_to_write(dst_map)) { vm_map_lock_read(dst_map); goto RetryLookup; } dst_object = vm_object_allocate((vm_map_size_t) - entry->vme_end - entry->vme_start); + entry->vme_end - entry->vme_start); VME_OBJECT(entry) = dst_object; VME_OFFSET_SET(entry, 0); assert(entry->use_pmap); @@ -9550,12 +9648,12 @@ vm_map_copy_overwrite_unaligned( /* * If a hard error occurred, return it now */ - if (kr != KERN_SUCCESS) + if (kr != KERN_SUCCESS) { return kr; + } if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end - || amount_left == 0) - { + || amount_left == 0) { /* * all done with this copy entry, dispose. */ @@ -9581,8 +9679,9 @@ vm_map_copy_overwrite_unaligned( src_offset = 0; } - if (amount_left == 0) + if (amount_left == 0) { return KERN_SUCCESS; + } vm_map_lock_read(dst_map); if (version.main_timestamp == dst_map->timestamp) { @@ -9599,7 +9698,7 @@ vm_map_copy_overwrite_unaligned( */ if (start != entry->vme_start) { vm_map_unlock_read(dst_map); - return KERN_INVALID_ADDRESS ; + return KERN_INVALID_ADDRESS; } } } else { @@ -9608,11 +9707,10 @@ vm_map_copy_overwrite_unaligned( * we must lookup the entry because somebody * might have changed the map behind our backs. */ - RetryLookup: - if (!vm_map_lookup_entry(dst_map, start, &entry)) - { +RetryLookup: + if (!vm_map_lookup_entry(dst_map, start, &entry)) { vm_map_unlock_read(dst_map); - return KERN_INVALID_ADDRESS ; + return KERN_INVALID_ADDRESS; } } }/* while */ @@ -9645,21 +9743,20 @@ int vm_map_copy_overwrite_aligned_src_large = 0; static kern_return_t vm_map_copy_overwrite_aligned( - vm_map_t dst_map, - vm_map_entry_t tmp_entry, - vm_map_copy_t copy, - vm_map_offset_t start, - __unused pmap_t pmap) + vm_map_t dst_map, + vm_map_entry_t tmp_entry, + vm_map_copy_t copy, + vm_map_offset_t start, + __unused pmap_t pmap) { - vm_object_t object; - vm_map_entry_t copy_entry; - vm_map_size_t copy_size; - vm_map_size_t size; - vm_map_entry_t entry; + vm_object_t object; + vm_map_entry_t copy_entry; + vm_map_size_t copy_size; + vm_map_size_t size; + vm_map_entry_t entry; while ((copy_entry = vm_map_copy_first_entry(copy)) - != vm_map_copy_to_entry(copy)) - { + != vm_map_copy_to_entry(copy)) { copy_size = (copy_entry->vme_end - copy_entry->vme_start); entry = tmp_entry; @@ -9680,9 +9777,9 @@ vm_map_copy_overwrite_aligned( */ if ((entry->vme_start != start) || ((entry->is_sub_map) - && !entry->needs_copy)) { + && !entry->needs_copy)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } assert(entry != vm_map_to_entry(dst_map)); @@ -9690,9 +9787,9 @@ vm_map_copy_overwrite_aligned( * Check protection again */ - if ( ! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { vm_map_unlock(dst_map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } /* @@ -9702,7 +9799,7 @@ vm_map_copy_overwrite_aligned( if (copy_size < size) { if (entry->map_aligned && !VM_MAP_PAGE_ALIGNED(entry->vme_start + copy_size, - VM_MAP_PAGE_MASK(dst_map))) { + VM_MAP_PAGE_MASK(dst_map))) { /* no longer map-aligned */ entry->map_aligned = FALSE; } @@ -9716,7 +9813,7 @@ vm_map_copy_overwrite_aligned( if (size < copy_size) { vm_map_copy_clip_end(copy, copy_entry, - copy_entry->vme_start + size); + copy_entry->vme_start + size); copy_size = size; } @@ -9732,12 +9829,12 @@ vm_map_copy_overwrite_aligned( object = VME_OBJECT(entry); if ((!entry->is_shared && - ((object == VM_OBJECT_NULL) || - (object->internal && !object->true_share))) || + ((object == VM_OBJECT_NULL) || + (object->internal && !object->true_share))) || entry->needs_copy) { - vm_object_t old_object = VME_OBJECT(entry); - vm_object_offset_t old_offset = VME_OFFSET(entry); - vm_object_offset_t offset; + vm_object_t old_object = VME_OBJECT(entry); + vm_object_offset_t old_offset = VME_OFFSET(entry); + vm_object_offset_t offset; /* * Ensure that the source and destination aren't @@ -9748,8 +9845,9 @@ vm_map_copy_overwrite_aligned( vm_map_copy_entry_unlink(copy, copy_entry); vm_map_copy_entry_dispose(copy, copy_entry); - if (old_object != VM_OBJECT_NULL) + if (old_object != VM_OBJECT_NULL) { vm_object_deallocate(old_object); + } start = tmp_entry->vme_end; tmp_entry = tmp_entry->vme_next; @@ -9757,8 +9855,8 @@ vm_map_copy_overwrite_aligned( } #if !CONFIG_EMBEDDED -#define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */ -#define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */ +#define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */ +#define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */ if (VME_OBJECT(copy_entry) != VM_OBJECT_NULL && VME_OBJECT(copy_entry)->vo_size >= __TRADEOFF1_OBJ_SIZE && copy_size <= __TRADEOFF1_COPY_SIZE) { @@ -9778,7 +9876,7 @@ vm_map_copy_overwrite_aligned( if ((dst_map->pmap != kernel_pmap) && (VME_ALIAS(entry) >= VM_MEMORY_MALLOC) && - (VME_ALIAS(entry) <= VM_MEMORY_MALLOC_LARGE_REUSED)) { + (VME_ALIAS(entry) <= VM_MEMORY_MALLOC_MEDIUM)) { vm_object_t new_object, new_shadow; /* @@ -9791,10 +9889,10 @@ vm_map_copy_overwrite_aligned( } while (new_object != VM_OBJECT_NULL && #if !CONFIG_EMBEDDED - !new_object->true_share && - new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && + !new_object->true_share && + new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && #endif /* !CONFIG_EMBEDDED */ - new_object->internal) { + new_object->internal) { new_shadow = new_object->shadow; if (new_shadow == VM_OBJECT_NULL) { break; @@ -9843,14 +9941,14 @@ vm_map_copy_overwrite_aligned( } if (old_object != VM_OBJECT_NULL) { - if(entry->is_sub_map) { - if(entry->use_pmap) { + if (entry->is_sub_map) { + if (entry->use_pmap) { #ifndef NO_NESTED_PMAP pmap_unnest(dst_map->pmap, - (addr64_t)entry->vme_start, - entry->vme_end - entry->vme_start); -#endif /* NO_NESTED_PMAP */ - if(dst_map->mapped_in_other_pmaps) { + (addr64_t)entry->vme_start, + entry->vme_end - entry->vme_start); +#endif /* NO_NESTED_PMAP */ + if (dst_map->mapped_in_other_pmaps) { /* clean up parent */ /* map/maps */ vm_map_submap_pmap_clean( @@ -9866,9 +9964,9 @@ vm_map_copy_overwrite_aligned( VME_SUBMAP(entry), VME_OFFSET(entry)); } - vm_map_deallocate(VME_SUBMAP(entry)); - } else { - if(dst_map->mapped_in_other_pmaps) { + vm_map_deallocate(VME_SUBMAP(entry)); + } else { + if (dst_map->mapped_in_other_pmaps) { vm_object_pmap_protect_options( VME_OBJECT(entry), VME_OFFSET(entry), @@ -9886,7 +9984,7 @@ vm_map_copy_overwrite_aligned( PMAP_OPTIONS_REMOVE); } vm_object_deallocate(old_object); - } + } } if (entry->iokit_acct) { @@ -9928,16 +10026,16 @@ vm_map_copy_overwrite_aligned( start = tmp_entry->vme_end; tmp_entry = tmp_entry->vme_next; } else { - vm_map_version_t version; - vm_object_t dst_object; - vm_object_offset_t dst_offset; - kern_return_t r; + vm_map_version_t version; + vm_object_t dst_object; + vm_object_offset_t dst_offset; + kern_return_t r; - slow_copy: +slow_copy: if (entry->needs_copy) { VME_OBJECT_SHADOW(entry, - (entry->vme_end - - entry->vme_start)); + (entry->vme_end - + entry->vme_start)); entry->needs_copy = FALSE; } @@ -9969,7 +10067,6 @@ vm_map_copy_overwrite_aligned( VME_OBJECT_SET(entry, dst_object); VME_OFFSET_SET(entry, dst_offset); assert(entry->use_pmap); - } vm_object_reference(dst_object); @@ -10004,8 +10101,9 @@ vm_map_copy_overwrite_aligned( * If a hard error occurred, return it now */ - if (r != KERN_SUCCESS) - return(r); + if (r != KERN_SUCCESS) { + return r; + } if (copy_size != 0) { /* @@ -10013,7 +10111,7 @@ vm_map_copy_overwrite_aligned( */ vm_map_copy_clip_end(copy, copy_entry, - copy_entry->vme_start + copy_size); + copy_entry->vme_start + copy_size); vm_map_copy_entry_unlink(copy, copy_entry); vm_object_deallocate(VME_OBJECT(copy_entry)); vm_map_copy_entry_dispose(copy, copy_entry); @@ -10046,7 +10144,7 @@ vm_map_copy_overwrite_aligned( if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { vm_map_unlock(dst_map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } if (tmp_entry->map_aligned && !VM_MAP_PAGE_ALIGNED( @@ -10060,7 +10158,7 @@ vm_map_copy_overwrite_aligned( } }/* while */ - return(KERN_SUCCESS); + return KERN_SUCCESS; }/* vm_map_copy_overwrite_aligned */ /* @@ -10075,24 +10173,26 @@ vm_map_copy_overwrite_aligned( */ static kern_return_t vm_map_copyin_kernel_buffer( - vm_map_t src_map, - vm_map_offset_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result) + vm_map_t src_map, + vm_map_offset_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) { kern_return_t kr; vm_map_copy_t copy; vm_size_t kalloc_size; - if (len > msg_ool_size_small) + if (len > msg_ool_size_small) { return KERN_INVALID_ARGUMENT; + } kalloc_size = (vm_size_t)(cpy_kdata_hdr_sz + len); copy = (vm_map_copy_t)kalloc(kalloc_size); - if (copy == VM_MAP_COPY_NULL) + if (copy == VM_MAP_COPY_NULL) { return KERN_RESOURCE_SHORTAGE; + } copy->type = VM_MAP_COPY_KERNEL_BUFFER; copy->size = len; copy->offset = 0; @@ -10106,12 +10206,12 @@ vm_map_copyin_kernel_buffer( (void) vm_map_remove( src_map, vm_map_trunc_page(src_addr, - VM_MAP_PAGE_MASK(src_map)), + VM_MAP_PAGE_MASK(src_map)), vm_map_round_page(src_addr + len, - VM_MAP_PAGE_MASK(src_map)), + VM_MAP_PAGE_MASK(src_map)), (VM_MAP_REMOVE_INTERRUPTIBLE | - VM_MAP_REMOVE_WAIT_FOR_KWIRE | - ((src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : VM_MAP_REMOVE_NO_FLAGS))); + VM_MAP_REMOVE_WAIT_FOR_KWIRE | + ((src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : VM_MAP_REMOVE_NO_FLAGS))); } *copy_result = copy; return KERN_SUCCESS; @@ -10131,12 +10231,12 @@ vm_map_copyin_kernel_buffer( static int vm_map_copyout_kernel_buffer_failures = 0; static kern_return_t vm_map_copyout_kernel_buffer( - vm_map_t map, - vm_map_address_t *addr, /* IN/OUT */ - vm_map_copy_t copy, - vm_map_size_t copy_size, - boolean_t overwrite, - boolean_t consume_on_success) + vm_map_t map, + vm_map_address_t *addr, /* IN/OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t overwrite, + boolean_t consume_on_success) { kern_return_t kr = KERN_SUCCESS; thread_t thread = current_thread(); @@ -10146,32 +10246,33 @@ vm_map_copyout_kernel_buffer( /* * check for corrupted vm_map_copy structure */ - if (copy_size > msg_ool_size_small || copy->offset) + if (copy_size > msg_ool_size_small || copy->offset) { panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", - (long long)copy->size, (long long)copy->offset); + (long long)copy->size, (long long)copy->offset); + } if (!overwrite) { - /* * Allocate space in the target map for the data */ *addr = 0; kr = vm_map_enter(map, - addr, - vm_map_round_page(copy_size, - VM_MAP_PAGE_MASK(map)), - (vm_map_offset_t) 0, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - VM_OBJECT_NULL, - (vm_object_offset_t) 0, - FALSE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); - if (kr != KERN_SUCCESS) + addr, + vm_map_round_page(copy_size, + VM_MAP_PAGE_MASK(map)), + (vm_map_offset_t) 0, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + VM_OBJECT_NULL, + (vm_object_offset_t) 0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { return kr; + } #if KASAN if (map->pmap == kernel_pmap) { kasan_notify_address(*addr, copy->size); @@ -10183,7 +10284,6 @@ vm_map_copyout_kernel_buffer( * Copyout the data from the kernel buffer to the target map. */ if (thread->map == map) { - /* * If the target map is the current map, just do * the copy. @@ -10192,8 +10292,7 @@ vm_map_copyout_kernel_buffer( if (copyout(copy->cpy_kdata, *addr, (vm_size_t)copy_size)) { kr = KERN_INVALID_ADDRESS; } - } - else { + } else { vm_map_t oldmap; /* @@ -10223,11 +10322,11 @@ vm_map_copyout_kernel_buffer( (void) vm_map_remove( map, vm_map_trunc_page(*addr, - VM_MAP_PAGE_MASK(map)), + VM_MAP_PAGE_MASK(map)), vm_map_round_page((*addr + - vm_map_round_page(copy_size, - VM_MAP_PAGE_MASK(map))), - VM_MAP_PAGE_MASK(map)), + vm_map_round_page(copy_size, + VM_MAP_PAGE_MASK(map))), + VM_MAP_PAGE_MASK(map)), VM_MAP_REMOVE_NO_FLAGS); *addr = 0; } @@ -10242,7 +10341,7 @@ vm_map_copyout_kernel_buffer( } /* - * Routine: vm_map_copy_insert [internal use only] + * Routine: vm_map_copy_insert [internal use only] * * Description: * Link a copy chain ("copy") into a map at the @@ -10252,17 +10351,17 @@ vm_map_copyout_kernel_buffer( */ static void vm_map_copy_insert( - vm_map_t map, - vm_map_entry_t after_where, - vm_map_copy_t copy) + vm_map_t map, + vm_map_entry_t after_where, + vm_map_copy_t copy) { - vm_map_entry_t entry; + vm_map_entry_t entry; while (vm_map_copy_first_entry(copy) != vm_map_copy_to_entry(copy)) { entry = vm_map_copy_first_entry(copy); vm_map_copy_entry_unlink(copy, entry); vm_map_store_entry_link(map, after_where, entry, - VM_MAP_KERNEL_FLAGS_NONE); + VM_MAP_KERNEL_FLAGS_NONE); after_where = entry; } zfree(vm_map_copy_zone, copy); @@ -10270,22 +10369,22 @@ vm_map_copy_insert( void vm_map_copy_remap( - vm_map_t map, - vm_map_entry_t where, - vm_map_copy_t copy, - vm_map_offset_t adjustment, - vm_prot_t cur_prot, - vm_prot_t max_prot, - vm_inherit_t inheritance) + vm_map_t map, + vm_map_entry_t where, + vm_map_copy_t copy, + vm_map_offset_t adjustment, + vm_prot_t cur_prot, + vm_prot_t max_prot, + vm_inherit_t inheritance) { - vm_map_entry_t copy_entry, new_entry; + vm_map_entry_t copy_entry, new_entry; for (copy_entry = vm_map_copy_first_entry(copy); - copy_entry != vm_map_copy_to_entry(copy); - copy_entry = copy_entry->vme_next) { + copy_entry != vm_map_copy_to_entry(copy); + copy_entry = copy_entry->vme_next) { /* get a new VM map entry for the map */ new_entry = vm_map_entry_create(map, - !map->hdr.entries_pageable); + !map->hdr.entries_pageable); /* copy the "copy entry" to the new entry */ vm_map_entry_copy(new_entry, copy_entry); /* adjust "start" and "end" */ @@ -10307,7 +10406,7 @@ vm_map_copy_remap( } /* insert the new entry in the map */ vm_map_store_entry_link(map, where, new_entry, - VM_MAP_KERNEL_FLAGS_NONE); + VM_MAP_KERNEL_FLAGS_NONE); /* continue inserting the "copy entries" after the new entry */ where = new_entry; } @@ -10321,19 +10420,21 @@ vm_map_copy_remap( */ boolean_t vm_map_copy_validate_size( - vm_map_t dst_map, - vm_map_copy_t copy, - vm_map_size_t *size) + vm_map_t dst_map, + vm_map_copy_t copy, + vm_map_size_t *size) { - if (copy == VM_MAP_COPY_NULL) + if (copy == VM_MAP_COPY_NULL) { return FALSE; + } vm_map_size_t copy_sz = copy->size; vm_map_size_t sz = *size; switch (copy->type) { case VM_MAP_COPY_OBJECT: case VM_MAP_COPY_KERNEL_BUFFER: - if (sz == copy_sz) + if (sz == copy_sz) { return TRUE; + } break; case VM_MAP_COPY_ENTRY_LIST: /* @@ -10366,16 +10467,16 @@ vm_map_copy_validate_size( */ kern_return_t vm_map_copyout_size( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy, - vm_map_size_t copy_size) + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size) { return vm_map_copyout_internal(dst_map, dst_addr, copy, copy_size, - TRUE, /* consume_on_success */ - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); + TRUE, /* consume_on_success */ + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); } /* @@ -10390,35 +10491,35 @@ vm_map_copyout_size( */ kern_return_t vm_map_copyout( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy) + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy) { return vm_map_copyout_internal(dst_map, dst_addr, copy, copy ? copy->size : 0, - TRUE, /* consume_on_success */ - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); + TRUE, /* consume_on_success */ + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); } kern_return_t vm_map_copyout_internal( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy, - vm_map_size_t copy_size, - boolean_t consume_on_success, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t consume_on_success, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - vm_map_size_t size; - vm_map_size_t adjustment; - vm_map_offset_t start; - vm_object_offset_t vm_copy_start; - vm_map_entry_t last; - vm_map_entry_t entry; - vm_map_entry_t hole_entry; + vm_map_size_t size; + vm_map_size_t adjustment; + vm_map_offset_t start; + vm_object_offset_t vm_copy_start; + vm_map_entry_t last; + vm_map_entry_t entry; + vm_map_entry_t hole_entry; /* * Check for null copy object. @@ -10426,7 +10527,7 @@ vm_map_copyout_internal( if (copy == VM_MAP_COPY_NULL) { *dst_addr = 0; - return(KERN_SUCCESS); + return KERN_SUCCESS; } if (copy->size != copy_size) { @@ -10440,30 +10541,32 @@ vm_map_copyout_internal( */ if (copy->type == VM_MAP_COPY_OBJECT) { - vm_object_t object = copy->cpy_object; - kern_return_t kr; - vm_object_offset_t offset; + vm_object_t object = copy->cpy_object; + kern_return_t kr; + vm_object_offset_t offset; offset = vm_object_trunc_page(copy->offset); size = vm_map_round_page((copy_size + - (vm_map_size_t)(copy->offset - - offset)), - VM_MAP_PAGE_MASK(dst_map)); + (vm_map_size_t)(copy->offset - + offset)), + VM_MAP_PAGE_MASK(dst_map)); *dst_addr = 0; kr = vm_map_enter(dst_map, dst_addr, size, - (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - object, offset, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_DEFAULT); - if (kr != KERN_SUCCESS) - return(kr); + (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + object, offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); + if (kr != KERN_SUCCESS) { + return kr; + } /* Account for non-pagealigned copy object */ *dst_addr += (vm_map_offset_t)(copy->offset - offset); - if (consume_on_success) + if (consume_on_success) { zfree(vm_map_copy_zone, copy); - return(KERN_SUCCESS); + } + return KERN_SUCCESS; } /* @@ -10473,8 +10576,8 @@ vm_map_copyout_internal( if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { return vm_map_copyout_kernel_buffer(dst_map, dst_addr, - copy, copy_size, FALSE, - consume_on_success); + copy, copy_size, FALSE, + consume_on_success); } @@ -10483,16 +10586,16 @@ vm_map_copyout_internal( */ vm_copy_start = vm_map_trunc_page((vm_map_size_t)copy->offset, - VM_MAP_COPY_PAGE_MASK(copy)); + VM_MAP_COPY_PAGE_MASK(copy)); size = vm_map_round_page((vm_map_size_t)copy->offset + copy_size, - VM_MAP_COPY_PAGE_MASK(copy)) - - vm_copy_start; + VM_MAP_COPY_PAGE_MASK(copy)) + - vm_copy_start; -StartAgain: ; +StartAgain:; vm_map_lock(dst_map); - if( dst_map->disable_vmentry_reuse == TRUE) { + if (dst_map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(dst_map, entry, start); last = entry; } else { @@ -10504,7 +10607,7 @@ StartAgain: ; * No more space in the map? */ vm_map_unlock(dst_map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } last = hole_entry; @@ -10512,33 +10615,34 @@ StartAgain: ; } else { assert(first_free_is_valid(dst_map)); start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ? - vm_map_min(dst_map) : last->vme_end; + vm_map_min(dst_map) : last->vme_end; } start = vm_map_round_page(start, - VM_MAP_PAGE_MASK(dst_map)); + VM_MAP_PAGE_MASK(dst_map)); } while (TRUE) { - vm_map_entry_t next = last->vme_next; - vm_map_offset_t end = start + size; + vm_map_entry_t next = last->vme_next; + vm_map_offset_t end = start + size; if ((end > dst_map->max_offset) || (end < start)) { if (dst_map->wait_for_space) { if (size <= (dst_map->max_offset - dst_map->min_offset)) { assert_wait((event_t) dst_map, - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); vm_map_unlock(dst_map); thread_block(THREAD_CONTINUE_NULL); goto StartAgain; } } vm_map_unlock(dst_map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } if (dst_map->holelistenabled) { - if (last->vme_end >= end) + if (last->vme_end >= end) { break; + } } else { /* * If there are no more entries, we must win. @@ -10549,11 +10653,13 @@ StartAgain: ; * after the end of the potential new region. */ - if (next == vm_map_to_entry(dst_map)) + if (next == vm_map_to_entry(dst_map)) { break; + } - if (next->vme_start >= end) + if (next->vme_start >= end) { break; + } } last = next; @@ -10564,14 +10670,14 @@ StartAgain: ; * Wrapped around */ vm_map_unlock(dst_map); - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } start = last->vme_start; } else { start = last->vme_end; } start = vm_map_round_page(start, - VM_MAP_PAGE_MASK(dst_map)); + VM_MAP_PAGE_MASK(dst_map)); } if (dst_map->holelistenabled) { @@ -10582,7 +10688,7 @@ StartAgain: ; adjustment = start - vm_copy_start; - if (! consume_on_success) { + if (!consume_on_success) { /* * We're not allowed to consume "copy", so we'll have to * copy its map entries into the destination map below. @@ -10607,8 +10713,8 @@ StartAgain: ; * Mismatches occur when dealing with the default * pager. */ - zone_t old_zone; - vm_map_entry_t next, new; + zone_t old_zone; + vm_map_entry_t next, new; /* * Find the zone that the copies were allocated from @@ -10635,8 +10741,8 @@ StartAgain: ; new->use_pmap = FALSE; } vm_map_copy_entry_link(copy, - vm_map_copy_last_entry(copy), - new); + vm_map_copy_last_entry(copy), + new); next = entry->vme_next; old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone; zfree(old_zone, entry); @@ -10650,8 +10756,8 @@ StartAgain: ; */ for (entry = vm_map_copy_first_entry(copy); - entry != vm_map_copy_to_entry(copy); - entry = entry->vme_next) { + entry != vm_map_copy_to_entry(copy); + entry = entry->vme_next) { if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) { /* * We're injecting this copy entry into a map that @@ -10667,9 +10773,9 @@ StartAgain: ; if (entry->map_aligned) { assert(VM_MAP_PAGE_ALIGNED(entry->vme_start, - VM_MAP_PAGE_MASK(dst_map))); + VM_MAP_PAGE_MASK(dst_map))); assert(VM_MAP_PAGE_ALIGNED(entry->vme_end, - VM_MAP_PAGE_MASK(dst_map))); + VM_MAP_PAGE_MASK(dst_map))); } entry->inheritance = VM_INHERIT_DEFAULT; @@ -10683,22 +10789,22 @@ StartAgain: ; */ if (entry->wired_count != 0) { vm_map_offset_t va; - vm_object_offset_t offset; + vm_object_offset_t offset; vm_object_t object; vm_prot_t prot; - int type_of_fault; + int type_of_fault; object = VME_OBJECT(entry); offset = VME_OFFSET(entry); va = entry->vme_start; pmap_pageable(dst_map->pmap, - entry->vme_start, - entry->vme_end, - TRUE); + entry->vme_start, + entry->vme_end, + TRUE); while (va < entry->vme_end) { - vm_page_t m; + vm_page_t m; struct vm_object_fault_info fault_info = {}; /* @@ -10722,14 +10828,16 @@ StartAgain: ; m = vm_page_lookup(object, offset); if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) || - m->vmp_absent) + m->vmp_absent) { panic("vm_map_copyout: wiring %p", m); + } prot = entry->protection; if (override_nx(dst_map, VME_ALIAS(entry)) && - prot) - prot |= VM_PROT_EXECUTE; + prot) { + prot |= VM_PROT_EXECUTE; + } type_of_fault = DBG_CACHE_HIT_FAULT; @@ -10741,16 +10849,16 @@ StartAgain: ; } vm_fault_enter(m, - dst_map->pmap, - va, - prot, - prot, - VM_PAGE_WIRED(m), - FALSE, /* change_wiring */ - VM_KERN_MEMORY_NONE, /* tag - not wiring */ - &fault_info, - NULL, /* need_retry */ - &type_of_fault); + dst_map->pmap, + va, + prot, + prot, + VM_PAGE_WIRED(m), + FALSE, /* change_wiring */ + VM_KERN_MEMORY_NONE, /* tag - not wiring */ + &fault_info, + NULL, /* need_retry */ + &type_of_fault); vm_object_unlock(object); @@ -10792,8 +10900,8 @@ after_adjustments: vm_map_copy_insert(dst_map, last, copy); } else { vm_map_copy_remap(dst_map, last, copy, adjustment, - cur_protection, max_protection, - inheritance); + cur_protection, max_protection, + inheritance); } vm_map_unlock(dst_map); @@ -10802,7 +10910,7 @@ after_adjustments: * XXX If wiring_required, call vm_map_pageable */ - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -10817,14 +10925,14 @@ after_adjustments: kern_return_t vm_map_copyin( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result) /* OUT */ + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result) /* OUT */ { - return(vm_map_copyin_common(src_map, src_addr, len, src_destroy, - FALSE, copy_result, FALSE)); + return vm_map_copyin_common(src_map, src_addr, len, src_destroy, + FALSE, copy_result, FALSE); } /* @@ -10847,22 +10955,22 @@ vm_map_copyin( */ typedef struct submap_map { - vm_map_t parent_map; - vm_map_offset_t base_start; - vm_map_offset_t base_end; - vm_map_size_t base_len; + vm_map_t parent_map; + vm_map_offset_t base_start; + vm_map_offset_t base_end; + vm_map_size_t base_len; struct submap_map *next; } submap_map_t; kern_return_t vm_map_copyin_common( - vm_map_t src_map, + vm_map_t src_map, vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - __unused boolean_t src_volatile, - vm_map_copy_t *copy_result, /* OUT */ - boolean_t use_maxprot) + vm_map_size_t len, + boolean_t src_destroy, + __unused boolean_t src_volatile, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t use_maxprot) { int flags; @@ -10874,44 +10982,44 @@ vm_map_copyin_common( flags |= VM_MAP_COPYIN_USE_MAXPROT; } return vm_map_copyin_internal(src_map, - src_addr, - len, - flags, - copy_result); + src_addr, + len, + flags, + copy_result); } kern_return_t vm_map_copyin_internal( - vm_map_t src_map, + vm_map_t src_map, vm_map_address_t src_addr, - vm_map_size_t len, - int flags, - vm_map_copy_t *copy_result) /* OUT */ + vm_map_size_t len, + int flags, + vm_map_copy_t *copy_result) /* OUT */ { - vm_map_entry_t tmp_entry; /* Result of last map lookup -- - * in multi-level lookup, this - * entry contains the actual - * vm_object/offset. - */ - vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ - - vm_map_offset_t src_start; /* Start of current entry -- - * where copy is taking place now - */ - vm_map_offset_t src_end; /* End of entire region to be - * copied */ + vm_map_entry_t tmp_entry; /* Result of last map lookup -- + * in multi-level lookup, this + * entry contains the actual + * vm_object/offset. + */ + vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ + + vm_map_offset_t src_start; /* Start of current entry -- + * where copy is taking place now + */ + vm_map_offset_t src_end; /* End of entire region to be + * copied */ vm_map_offset_t src_base; - vm_map_t base_map = src_map; - boolean_t map_share=FALSE; - submap_map_t *parent_maps = NULL; + vm_map_t base_map = src_map; + boolean_t map_share = FALSE; + submap_map_t *parent_maps = NULL; - vm_map_copy_t copy; /* Resulting copy */ + vm_map_copy_t copy; /* Resulting copy */ vm_map_address_t copy_addr; - vm_map_size_t copy_size; - boolean_t src_destroy; - boolean_t use_maxprot; - boolean_t preserve_purgeable; - boolean_t entry_was_shared; - vm_map_entry_t saved_src_entry; + vm_map_size_t copy_size; + boolean_t src_destroy; + boolean_t use_maxprot; + boolean_t preserve_purgeable; + boolean_t entry_was_shared; + vm_map_entry_t saved_src_entry; if (flags & ~VM_MAP_COPYIN_ALL_FLAGS) { return KERN_INVALID_ARGUMENT; @@ -10920,7 +11028,7 @@ vm_map_copyin_internal( src_destroy = (flags & VM_MAP_COPYIN_SRC_DESTROY) ? TRUE : FALSE; use_maxprot = (flags & VM_MAP_COPYIN_USE_MAXPROT) ? TRUE : FALSE; preserve_purgeable = - (flags & VM_MAP_COPYIN_PRESERVE_PURGEABLE) ? TRUE : FALSE; + (flags & VM_MAP_COPYIN_PRESERVE_PURGEABLE) ? TRUE : FALSE; /* * Check for copies of zero bytes. @@ -10928,23 +11036,24 @@ vm_map_copyin_internal( if (len == 0) { *copy_result = VM_MAP_COPY_NULL; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* * Check that the end address doesn't overflow */ src_end = src_addr + len; - if (src_end < src_addr) + if (src_end < src_addr) { return KERN_INVALID_ADDRESS; + } /* * Compute (page aligned) start and end of region */ src_start = vm_map_trunc_page(src_addr, - VM_MAP_PAGE_MASK(src_map)); + VM_MAP_PAGE_MASK(src_map)); src_end = vm_map_round_page(src_end, - VM_MAP_PAGE_MASK(src_map)); + VM_MAP_PAGE_MASK(src_map)); /* * If the copy is sufficiently small, use a kernel buffer instead @@ -10964,11 +11073,12 @@ vm_map_copyin_internal( * of the commpage would now fail when it used to work. */ (src_start >= vm_map_min(src_map) && - src_start < vm_map_max(src_map) && - src_end >= vm_map_min(src_map) && - src_end < vm_map_max(src_map))) + src_start < vm_map_max(src_map) && + src_end >= vm_map_min(src_map) && + src_end < vm_map_max(src_map))) { return vm_map_copyin_kernel_buffer(src_map, src_addr, len, - src_destroy, copy_result); + src_destroy, copy_result); + } XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0); @@ -10995,48 +11105,49 @@ vm_map_copyin_internal( copy->cpy_hdr.page_shift = PAGE_SHIFT; #endif - vm_map_store_init( &(copy->cpy_hdr) ); + vm_map_store_init( &(copy->cpy_hdr)); copy->offset = src_addr; copy->size = len; new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); -#define RETURN(x) \ - MACRO_BEGIN \ - vm_map_unlock(src_map); \ - if(src_map != base_map) \ - vm_map_deallocate(src_map); \ - if (new_entry != VM_MAP_ENTRY_NULL) \ - vm_map_copy_entry_dispose(copy,new_entry); \ - vm_map_copy_discard(copy); \ - { \ - submap_map_t *_ptr; \ - \ - for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \ - parent_maps=parent_maps->next; \ - if (_ptr->parent_map != base_map) \ - vm_map_deallocate(_ptr->parent_map); \ - kfree(_ptr, sizeof(submap_map_t)); \ - } \ - } \ - MACRO_RETURN(x); \ +#define RETURN(x) \ + MACRO_BEGIN \ + vm_map_unlock(src_map); \ + if(src_map != base_map) \ + vm_map_deallocate(src_map); \ + if (new_entry != VM_MAP_ENTRY_NULL) \ + vm_map_copy_entry_dispose(copy,new_entry); \ + vm_map_copy_discard(copy); \ + { \ + submap_map_t *_ptr; \ + \ + for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \ + parent_maps=parent_maps->next; \ + if (_ptr->parent_map != base_map) \ + vm_map_deallocate(_ptr->parent_map); \ + kfree(_ptr, sizeof(submap_map_t)); \ + } \ + } \ + MACRO_RETURN(x); \ MACRO_END /* * Find the beginning of the region. */ - vm_map_lock(src_map); + vm_map_lock(src_map); /* * Lookup the original "src_addr" rather than the truncated * "src_start", in case "src_start" falls in a non-map-aligned * map entry *before* the map entry that contains "src_addr"... */ - if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry)) + if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry)) { RETURN(KERN_INVALID_ADDRESS); - if(!tmp_entry->is_sub_map) { + } + if (!tmp_entry->is_sub_map) { /* * ... but clip to the map-rounded "src_start" rather than * "src_addr" to preserve map-alignment. We'll adjust the @@ -11059,30 +11170,30 @@ vm_map_copyin_internal( */ while (TRUE) { - vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ - vm_map_size_t src_size; /* Size of source - * map entry (in both - * maps) - */ - - vm_object_t src_object; /* Object to copy */ - vm_object_offset_t src_offset; - - boolean_t src_needs_copy; /* Should source map - * be made read-only - * for copy-on-write? - */ - - boolean_t new_entry_needs_copy; /* Will new entry be COW? */ - - boolean_t was_wired; /* Was source wired? */ - vm_map_version_t version; /* Version before locks - * dropped to make copy - */ - kern_return_t result; /* Return value from - * copy_strategically. - */ - while(tmp_entry->is_sub_map) { + vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ + vm_map_size_t src_size; /* Size of source + * map entry (in both + * maps) + */ + + vm_object_t src_object; /* Object to copy */ + vm_object_offset_t src_offset; + + boolean_t src_needs_copy; /* Should source map + * be made read-only + * for copy-on-write? + */ + + boolean_t new_entry_needs_copy; /* Will new entry be COW? */ + + boolean_t was_wired; /* Was source wired? */ + vm_map_version_t version; /* Version before locks + * dropped to make copy + */ + kern_return_t result; /* Return value from + * copy_strategically. + */ + while (tmp_entry->is_sub_map) { vm_map_size_t submap_len; submap_map_t *ptr; @@ -11093,8 +11204,9 @@ vm_map_copyin_internal( ptr->base_start = src_start; ptr->base_end = src_end; submap_len = tmp_entry->vme_end - src_start; - if(submap_len > (src_end-src_start)) - submap_len = src_end-src_start; + if (submap_len > (src_end - src_start)) { + submap_len = src_end - src_start; + } ptr->base_len = submap_len; src_start -= tmp_entry->vme_start; @@ -11107,11 +11219,13 @@ vm_map_copyin_internal( vm_map_reference(src_map); vm_map_unlock(ptr->parent_map); if (!vm_map_lookup_entry( - src_map, src_start, &tmp_entry)) + src_map, src_start, &tmp_entry)) { RETURN(KERN_INVALID_ADDRESS); + } map_share = TRUE; - if(!tmp_entry->is_sub_map) + if (!tmp_entry->is_sub_map) { vm_map_clip_start(src_map, tmp_entry, src_start); + } src_entry = tmp_entry; } /* we are now in the lowest level submap... */ @@ -11142,11 +11256,12 @@ vm_map_copyin_internal( vm_map_lock(src_map); if ((version.main_timestamp + 1) != src_map->timestamp) { if (!vm_map_lookup_entry(src_map, src_start, - &tmp_entry)) { + &tmp_entry)) { RETURN(KERN_INVALID_ADDRESS); } - if (!tmp_entry->is_sub_map) + if (!tmp_entry->is_sub_map) { vm_map_clip_start(src_map, tmp_entry, src_start); + } continue; /* restart w/ new tmp_entry */ } } @@ -11155,9 +11270,10 @@ vm_map_copyin_internal( * Verify that the region can be read. */ if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE && - !use_maxprot) || - (src_entry->max_protection & VM_PROT_READ) == 0) + !use_maxprot) || + (src_entry->max_protection & VM_PROT_READ) == 0) { RETURN(KERN_PROTECTION_FAILURE); + } /* * Clip against the endpoints of the entire region. @@ -11195,11 +11311,11 @@ vm_map_copyin_internal( if (src_destroy && (src_object == VM_OBJECT_NULL || - (src_object->internal && - src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && - src_entry->vme_start <= src_addr && - src_entry->vme_end >= src_end && - !map_share))) { + (src_object->internal && + src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && + src_entry->vme_start <= src_addr && + src_entry->vme_end >= src_end && + !map_share))) { /* * If we are destroying the source, and the object * is internal, we can move the object reference @@ -11225,19 +11341,18 @@ vm_map_copyin_internal( } - RestartCopy: +RestartCopy: XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n", src_object, new_entry, VME_OBJECT(new_entry), was_wired, 0); if ((src_object == VM_OBJECT_NULL || - (!was_wired && !map_share && !tmp_entry->is_shared)) && + (!was_wired && !map_share && !tmp_entry->is_shared)) && vm_object_copy_quickly( &VME_OBJECT(new_entry), src_offset, src_size, &src_needs_copy, &new_entry_needs_copy)) { - new_entry->needs_copy = new_entry_needs_copy; /* @@ -11245,21 +11360,22 @@ vm_map_copyin_internal( */ if (src_needs_copy && !tmp_entry->needs_copy) { - vm_prot_t prot; + vm_prot_t prot; prot = src_entry->protection & ~VM_PROT_WRITE; if (override_nx(src_map, VME_ALIAS(src_entry)) - && prot) - prot |= VM_PROT_EXECUTE; + && prot) { + prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect( src_object, src_offset, src_size, - (src_entry->is_shared ? - PMAP_NULL - : src_map->pmap), + (src_entry->is_shared ? + PMAP_NULL + : src_map->pmap), src_entry->vme_start, prot); @@ -11292,7 +11408,7 @@ vm_map_copyin_internal( */ version.main_timestamp = src_map->timestamp; - vm_map_unlock(src_map); /* Increments timestamp once! */ + vm_map_unlock(src_map); /* Increments timestamp once! */ saved_src_entry = src_entry; tmp_entry = VM_MAP_ENTRY_NULL; src_entry = VM_MAP_ENTRY_NULL; @@ -11302,7 +11418,7 @@ vm_map_copyin_internal( */ if (was_wired) { - CopySlowly: +CopySlowly: vm_object_lock(src_object); result = vm_object_copy_slowly( src_object, @@ -11312,10 +11428,9 @@ vm_map_copyin_internal( &VME_OBJECT(new_entry)); VME_OFFSET_SET(new_entry, 0); new_entry->needs_copy = FALSE; - } - else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && - (entry_was_shared || map_share)) { - vm_object_t new_object; + } else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && + (entry_was_shared || map_share)) { + vm_object_t new_object; vm_object_lock_shared(src_object); new_object = vm_object_copy_delayed( @@ -11323,8 +11438,9 @@ vm_map_copyin_internal( src_offset, src_size, TRUE); - if (new_object == VM_OBJECT_NULL) - goto CopySlowly; + if (new_object == VM_OBJECT_NULL) { + goto CopySlowly; + } VME_OBJECT_SET(new_entry, new_object); assert(new_entry->wired_count == 0); @@ -11333,16 +11449,15 @@ vm_map_copyin_internal( assert(new_object->purgable == VM_PURGABLE_DENY); assertf(new_entry->use_pmap, "src_map %p new_entry %p\n", src_map, new_entry); result = KERN_SUCCESS; - } else { vm_object_offset_t new_offset; new_offset = VME_OFFSET(new_entry); result = vm_object_copy_strategically(src_object, - src_offset, - src_size, - &VME_OBJECT(new_entry), - &new_offset, - &new_entry_needs_copy); + src_offset, + src_size, + &VME_OBJECT(new_entry), + &new_offset, + &new_entry_needs_copy); if (new_offset != VME_OFFSET(new_entry)) { VME_OFFSET_SET(new_entry, new_offset); } @@ -11353,7 +11468,7 @@ vm_map_copyin_internal( if (result == KERN_SUCCESS && preserve_purgeable && src_object->purgable != VM_PURGABLE_DENY) { - vm_object_t new_object; + vm_object_t new_object; new_object = VME_OBJECT(new_entry); assert(new_object != src_object); @@ -11435,9 +11550,10 @@ vm_map_copyin_internal( vm_map_clip_start(src_map, src_entry, src_start); if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) && - !use_maxprot) || - ((src_entry->max_protection & VM_PROT_READ) == 0)) + !use_maxprot) || + ((src_entry->max_protection & VM_PROT_READ) == 0)) { goto VerificationFailed; + } if (src_entry->vme_end < new_entry->vme_end) { /* @@ -11451,7 +11567,7 @@ vm_map_copyin_internal( * no longer points at the same object/offset. */ assert(VM_MAP_PAGE_ALIGNED(src_entry->vme_end, - VM_MAP_COPY_PAGE_MASK(copy))); + VM_MAP_COPY_PAGE_MASK(copy))); new_entry->vme_end = src_entry->vme_end; src_size = new_entry->vme_end - src_start; } else if (src_entry->vme_end > new_entry->vme_end) { @@ -11473,14 +11589,13 @@ vm_map_copyin_internal( if ((VME_OBJECT(src_entry) != src_object) || (VME_OFFSET(src_entry) != src_offset) || (src_entry->vme_end > new_entry->vme_end)) { - /* * Verification failed. * * Start over with this top-level entry. */ - VerificationFailed: ; +VerificationFailed: ; vm_object_deallocate(VME_OBJECT(new_entry)); tmp_entry = src_entry; @@ -11491,23 +11606,24 @@ vm_map_copyin_internal( * Verification succeeded. */ - VerificationSuccessful: ; +VerificationSuccessful:; - if (result == KERN_MEMORY_RESTART_COPY) + if (result == KERN_MEMORY_RESTART_COPY) { goto RestartCopy; + } /* * Copy succeeded. */ - CopySuccessful: ; +CopySuccessful: ; /* * Link in the new copy entry. */ vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), - new_entry); + new_entry); /* * Determine whether the entire region @@ -11517,7 +11633,7 @@ vm_map_copyin_internal( src_start = new_entry->vme_end; new_entry = VM_MAP_ENTRY_NULL; while ((src_start >= src_end) && (src_end != 0)) { - submap_map_t *ptr; + submap_map_t *ptr; if (src_map == base_map) { /* back to the top */ @@ -11530,8 +11646,8 @@ vm_map_copyin_internal( /* fix up the damage we did in that submap */ vm_map_simplify_range(src_map, - src_base, - src_end); + src_base, + src_end); vm_map_unlock(src_map); vm_map_deallocate(src_map); @@ -11541,14 +11657,15 @@ vm_map_copyin_internal( src_start = ptr->base_start + ptr->base_len; src_end = ptr->base_end; if (!vm_map_lookup_entry(src_map, - src_start, - &tmp_entry) && + src_start, + &tmp_entry) && (src_end > src_start)) { RETURN(KERN_INVALID_ADDRESS); } kfree(ptr, sizeof(submap_map_t)); - if (parent_maps == NULL) + if (parent_maps == NULL) { map_share = FALSE; + } src_entry = tmp_entry->vme_prev; } @@ -11571,8 +11688,9 @@ vm_map_copyin_internal( break; } - if ((src_start >= src_end) && (src_end != 0)) + if ((src_start >= src_end) && (src_end != 0)) { break; + } /* * Verify that there are no gaps in the region @@ -11593,20 +11711,20 @@ vm_map_copyin_internal( (void) vm_map_delete( src_map, vm_map_trunc_page(src_addr, - VM_MAP_PAGE_MASK(src_map)), + VM_MAP_PAGE_MASK(src_map)), src_end, ((src_map == kernel_map) ? - VM_MAP_REMOVE_KUNWIRE : - VM_MAP_REMOVE_NO_FLAGS), + VM_MAP_REMOVE_KUNWIRE : + VM_MAP_REMOVE_NO_FLAGS), VM_MAP_NULL); } else { /* fix up the damage we did in the base map */ vm_map_simplify_range( src_map, vm_map_trunc_page(src_addr, - VM_MAP_PAGE_MASK(src_map)), + VM_MAP_PAGE_MASK(src_map)), vm_map_round_page(src_end, - VM_MAP_PAGE_MASK(src_map))); + VM_MAP_PAGE_MASK(src_map))); } vm_map_unlock(src_map); @@ -11627,27 +11745,27 @@ vm_map_copyin_internal( /* map-align the start of the first copy entry... */ adjustment = (tmp_entry->vme_start - - vm_map_trunc_page( - tmp_entry->vme_start, - VM_MAP_PAGE_MASK(src_map))); + vm_map_trunc_page( + tmp_entry->vme_start, + VM_MAP_PAGE_MASK(src_map))); tmp_entry->vme_start -= adjustment; VME_OFFSET_SET(tmp_entry, - VME_OFFSET(tmp_entry) - adjustment); + VME_OFFSET(tmp_entry) - adjustment); copy_addr -= adjustment; assert(tmp_entry->vme_start < tmp_entry->vme_end); /* ... adjust for mis-aligned start of copy range */ adjustment = - (vm_map_trunc_page(copy->offset, - PAGE_MASK) - - vm_map_trunc_page(copy->offset, - VM_MAP_PAGE_MASK(src_map))); + (vm_map_trunc_page(copy->offset, + PAGE_MASK) - + vm_map_trunc_page(copy->offset, + VM_MAP_PAGE_MASK(src_map))); if (adjustment) { assert(page_aligned(adjustment)); assert(adjustment < VM_MAP_PAGE_SIZE(src_map)); tmp_entry->vme_start += adjustment; VME_OFFSET_SET(tmp_entry, - (VME_OFFSET(tmp_entry) + - adjustment)); + (VME_OFFSET(tmp_entry) + + adjustment)); copy_addr += adjustment; assert(tmp_entry->vme_start < tmp_entry->vme_end); } @@ -11663,9 +11781,9 @@ vm_map_copyin_internal( * a single 16K page. */ assert(vm_map_trunc_page(tmp_entry->vme_start, - VM_MAP_PAGE_MASK(src_map)) == - vm_map_trunc_page(original_start, - VM_MAP_PAGE_MASK(src_map))); + VM_MAP_PAGE_MASK(src_map)) == + vm_map_trunc_page(original_start, + VM_MAP_PAGE_MASK(src_map))); } /* adjust alignment of last copy_entry's "vme_end" */ @@ -11677,16 +11795,16 @@ vm_map_copyin_internal( /* map-align the end of the last copy entry... */ tmp_entry->vme_end = - vm_map_round_page(tmp_entry->vme_end, - VM_MAP_PAGE_MASK(src_map)); + vm_map_round_page(tmp_entry->vme_end, + VM_MAP_PAGE_MASK(src_map)); /* ... adjust for mis-aligned end of copy range */ adjustment = - (vm_map_round_page((copy->offset + - copy->size), - VM_MAP_PAGE_MASK(src_map)) - - vm_map_round_page((copy->offset + - copy->size), - PAGE_MASK)); + (vm_map_round_page((copy->offset + + copy->size), + VM_MAP_PAGE_MASK(src_map)) - + vm_map_round_page((copy->offset + + copy->size), + PAGE_MASK)); if (adjustment) { assert(page_aligned(adjustment)); assert(adjustment < VM_MAP_PAGE_SIZE(src_map)); @@ -11704,9 +11822,9 @@ vm_map_copyin_internal( * a single 16K page. */ assert(vm_map_round_page(tmp_entry->vme_end, - VM_MAP_PAGE_MASK(src_map)) == - vm_map_round_page(original_end, - VM_MAP_PAGE_MASK(src_map))); + VM_MAP_PAGE_MASK(src_map)) == + vm_map_round_page(original_end, + VM_MAP_PAGE_MASK(src_map))); } } @@ -11718,12 +11836,12 @@ vm_map_copyin_internal( copy_size = 0; /* compute actual size */ while (tmp_entry != vm_map_copy_to_entry(copy)) { assert(VM_MAP_PAGE_ALIGNED( - copy_addr + (tmp_entry->vme_end - - tmp_entry->vme_start), - VM_MAP_COPY_PAGE_MASK(copy))); + copy_addr + (tmp_entry->vme_end - + tmp_entry->vme_start), + VM_MAP_COPY_PAGE_MASK(copy))); assert(VM_MAP_PAGE_ALIGNED( - copy_addr, - VM_MAP_COPY_PAGE_MASK(copy))); + copy_addr, + VM_MAP_COPY_PAGE_MASK(copy))); /* * The copy_entries will be injected directly into the @@ -11732,7 +11850,7 @@ vm_map_copyin_internal( tmp_entry->map_aligned = FALSE; tmp_entry->vme_end = copy_addr + - (tmp_entry->vme_end - tmp_entry->vme_start); + (tmp_entry->vme_end - tmp_entry->vme_start); tmp_entry->vme_start = copy_addr; assert(tmp_entry->vme_start < tmp_entry->vme_end); copy_addr += tmp_entry->vme_end - tmp_entry->vme_start; @@ -11755,30 +11873,30 @@ vm_map_copyin_internal( * in vm_map_copyout() or vm_map_copy_overwrite(). */ assert(vm_map_round_page(copy_size, - VM_MAP_PAGE_MASK(src_map)) == - vm_map_round_page(copy->size, - VM_MAP_PAGE_MASK(src_map))); + VM_MAP_PAGE_MASK(src_map)) == + vm_map_round_page(copy->size, + VM_MAP_PAGE_MASK(src_map))); copy->size = copy_size; } *copy_result = copy; - return(KERN_SUCCESS); + return KERN_SUCCESS; -#undef RETURN +#undef RETURN } kern_return_t vm_map_copy_extract( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - vm_map_copy_t *copy_result, /* OUT */ - vm_prot_t *cur_prot, /* OUT */ - vm_prot_t *max_prot) + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + vm_map_copy_t *copy_result, /* OUT */ + vm_prot_t *cur_prot, /* OUT */ + vm_prot_t *max_prot) { - vm_map_offset_t src_start, src_end; - vm_map_copy_t copy; - kern_return_t kr; + vm_map_offset_t src_start, src_end; + vm_map_copy_t copy; + kern_return_t kr; /* * Check for copies of zero bytes. @@ -11786,15 +11904,16 @@ vm_map_copy_extract( if (len == 0) { *copy_result = VM_MAP_COPY_NULL; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* * Check that the end address doesn't overflow */ src_end = src_addr + len; - if (src_end < src_addr) + if (src_end < src_addr) { return KERN_INVALID_ADDRESS; + } /* * Compute (page aligned) start and end of region @@ -11819,16 +11938,16 @@ vm_map_copy_extract( copy->size = len; kr = vm_map_remap_extract(src_map, - src_addr, - len, - FALSE, /* copy */ - ©->cpy_hdr, - cur_prot, - max_prot, - VM_INHERIT_SHARE, - TRUE, /* pageable */ - FALSE, /* same_map */ - VM_MAP_KERNEL_FLAGS_NONE); + src_addr, + len, + FALSE, /* copy */ + ©->cpy_hdr, + cur_prot, + max_prot, + VM_INHERIT_SHARE, + TRUE, /* pageable */ + FALSE, /* same_map */ + VM_MAP_KERNEL_FLAGS_NONE); if (kr != KERN_SUCCESS) { vm_map_copy_discard(copy); return kr; @@ -11847,12 +11966,12 @@ vm_map_copy_extract( kern_return_t vm_map_copyin_object( - vm_object_t object, - vm_object_offset_t offset, /* offset of region in object */ - vm_object_size_t size, /* size of region in object */ - vm_map_copy_t *copy_result) /* OUT */ + vm_object_t object, + vm_object_offset_t offset, /* offset of region in object */ + vm_object_size_t size, /* size of region in object */ + vm_map_copy_t *copy_result) /* OUT */ { - vm_map_copy_t copy; /* Resulting copy */ + vm_map_copy_t copy; /* Resulting copy */ /* * We drop the object into a special copy object @@ -11866,17 +11985,17 @@ vm_map_copyin_object( copy->size = size; *copy_result = copy; - return(KERN_SUCCESS); + return KERN_SUCCESS; } static void vm_map_fork_share( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map) + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map) { - vm_object_t object; - vm_map_entry_t new_entry; + vm_object_t object; + vm_map_entry_t new_entry; /* * New sharing code. New map entry @@ -11892,44 +12011,42 @@ vm_map_fork_share( if (old_entry->is_sub_map) { assert(old_entry->wired_count == 0); #ifndef NO_NESTED_PMAP - if(old_entry->use_pmap) { - kern_return_t result; + if (old_entry->use_pmap) { + kern_return_t result; result = pmap_nest(new_map->pmap, - (VME_SUBMAP(old_entry))->pmap, - (addr64_t)old_entry->vme_start, - (addr64_t)old_entry->vme_start, - (uint64_t)(old_entry->vme_end - old_entry->vme_start)); - if(result) + (VME_SUBMAP(old_entry))->pmap, + (addr64_t)old_entry->vme_start, + (addr64_t)old_entry->vme_start, + (uint64_t)(old_entry->vme_end - old_entry->vme_start)); + if (result) { panic("vm_map_fork_share: pmap_nest failed!"); + } } -#endif /* NO_NESTED_PMAP */ +#endif /* NO_NESTED_PMAP */ } else if (object == VM_OBJECT_NULL) { object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end - - old_entry->vme_start)); + old_entry->vme_start)); VME_OFFSET_SET(old_entry, 0); VME_OBJECT_SET(old_entry, object); old_entry->use_pmap = TRUE; // assert(!old_entry->needs_copy); } else if (object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC) { - + MEMORY_OBJECT_COPY_SYMMETRIC) { /* * We are already using an asymmetric * copy, and therefore we already have * the right object. */ - assert(! old_entry->needs_copy); - } - else if (old_entry->needs_copy || /* case 1 */ - object->shadowed || /* case 2 */ - (!object->true_share && /* case 3 */ - !old_entry->is_shared && - (object->vo_size > - (vm_map_size_t)(old_entry->vme_end - - old_entry->vme_start)))) { - + assert(!old_entry->needs_copy); + } else if (old_entry->needs_copy || /* case 1 */ + object->shadowed || /* case 2 */ + (!object->true_share && /* case 3 */ + !old_entry->is_shared && + (object->vo_size > + (vm_map_size_t)(old_entry->vme_end - + old_entry->vme_start)))) { /* * We need to create a shadow. * There are three cases here. @@ -12007,8 +12124,8 @@ vm_map_fork_share( * case 2.) */ VME_OBJECT_SHADOW(old_entry, - (vm_map_size_t) (old_entry->vme_end - - old_entry->vme_start)); + (vm_map_size_t) (old_entry->vme_end - + old_entry->vme_start)); /* * If we're making a shadow for other than @@ -12018,7 +12135,7 @@ vm_map_fork_share( if (!old_entry->needs_copy && (old_entry->protection & VM_PROT_WRITE)) { - vm_prot_t prot; + vm_prot_t prot; assert(!pmap_has_prot_policy(old_entry->protection)); @@ -12026,8 +12143,9 @@ vm_map_fork_share( assert(!pmap_has_prot_policy(prot)); - if (override_nx(old_map, VME_ALIAS(old_entry)) && prot) - prot |= VM_PROT_EXECUTE; + if (override_nx(old_map, VME_ALIAS(old_entry)) && prot) { + prot |= VM_PROT_EXECUTE; + } if (old_map->mapped_in_other_pmaps) { @@ -12035,15 +12153,15 @@ vm_map_fork_share( VME_OBJECT(old_entry), VME_OFFSET(old_entry), (old_entry->vme_end - - old_entry->vme_start), + old_entry->vme_start), PMAP_NULL, old_entry->vme_start, prot); } else { pmap_protect(old_map->pmap, - old_entry->vme_start, - old_entry->vme_end, - prot); + old_entry->vme_start, + old_entry->vme_end, + prot); } } @@ -12061,7 +12179,7 @@ vm_map_fork_share( * new entry. */ - if(old_entry->is_sub_map) { + if (old_entry->is_sub_map) { vm_map_lock(VME_SUBMAP(old_entry)); vm_map_reference(VME_SUBMAP(old_entry)); vm_map_unlock(VME_SUBMAP(old_entry)); @@ -12080,7 +12198,7 @@ vm_map_fork_share( */ new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel - * map or descendants */ + * map or descendants */ vm_map_entry_copy(new_entry, old_entry); old_entry->is_shared = TRUE; new_entry->is_shared = TRUE; @@ -12100,7 +12218,6 @@ vm_map_fork_share( * write permission from the new entry. */ if (old_entry->inheritance == VM_INHERIT_NONE) { - new_entry->protection &= ~VM_PROT_WRITE; new_entry->max_protection &= ~VM_PROT_WRITE; } @@ -12112,7 +12229,7 @@ vm_map_fork_share( */ vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry, - VM_MAP_KERNEL_FLAGS_NONE); + VM_MAP_KERNEL_FLAGS_NONE); /* * Update the physical map @@ -12122,17 +12239,17 @@ vm_map_fork_share( /* Bill Angell pmap support goes here */ } else { pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start, - old_entry->vme_end - old_entry->vme_start, - old_entry->vme_start); + old_entry->vme_end - old_entry->vme_start, + old_entry->vme_start); } } static boolean_t vm_map_fork_copy( - vm_map_t old_map, - vm_map_entry_t *old_entry_p, - vm_map_t new_map, - int vm_map_copyin_flags) + vm_map_t old_map, + vm_map_entry_t *old_entry_p, + vm_map_t new_map, + int vm_map_copyin_flags) { vm_map_entry_t old_entry = *old_entry_p; vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start; @@ -12149,7 +12266,7 @@ vm_map_fork_copy( */ vm_map_copyin_flags |= VM_MAP_COPYIN_USE_MAXPROT; if (vm_map_copyin_internal(old_map, start, entry_size, - vm_map_copyin_flags, ©) + vm_map_copyin_flags, ©) != KERN_SUCCESS) { /* * The map might have changed while it @@ -12187,7 +12304,7 @@ vm_map_fork_copy( vm_map_lock(old_map); start += entry_size; - if (! vm_map_lookup_entry(old_map, start, &last)) { + if (!vm_map_lookup_entry(old_map, start, &last)) { last = last->vme_next; } else { if (last->vme_start == start) { @@ -12216,37 +12333,37 @@ vm_map_fork_copy( */ vm_map_t vm_map_fork( - ledger_t ledger, - vm_map_t old_map, - int options) + ledger_t ledger, + vm_map_t old_map, + int options) { - pmap_t new_pmap; - vm_map_t new_map; - vm_map_entry_t old_entry; - vm_map_size_t new_size = 0, entry_size; - vm_map_entry_t new_entry; - boolean_t src_needs_copy; - boolean_t new_entry_needs_copy; - boolean_t pmap_is64bit; - int vm_map_copyin_flags; - vm_inherit_t old_entry_inheritance; - int map_create_options; - kern_return_t footprint_collect_kr; + pmap_t new_pmap; + vm_map_t new_map; + vm_map_entry_t old_entry; + vm_map_size_t new_size = 0, entry_size; + vm_map_entry_t new_entry; + boolean_t src_needs_copy; + boolean_t new_entry_needs_copy; + boolean_t pmap_is64bit; + int vm_map_copyin_flags; + vm_inherit_t old_entry_inheritance; + int map_create_options; + kern_return_t footprint_collect_kr; if (options & ~(VM_MAP_FORK_SHARE_IF_INHERIT_NONE | - VM_MAP_FORK_PRESERVE_PURGEABLE | - VM_MAP_FORK_CORPSE_FOOTPRINT)) { + VM_MAP_FORK_PRESERVE_PURGEABLE | + VM_MAP_FORK_CORPSE_FOOTPRINT)) { /* unsupported option */ return VM_MAP_NULL; } pmap_is64bit = #if defined(__i386__) || defined(__x86_64__) - old_map->pmap->pm_task_map != TASK_MAP_32BIT; + old_map->pmap->pm_task_map != TASK_MAP_32BIT; #elif defined(__arm64__) - old_map->pmap->max == MACH_VM_MAX_ADDRESS; + old_map->pmap->max == MACH_VM_MAX_ADDRESS; #elif defined(__arm__) - FALSE; + FALSE; #else #error Unknown architecture. #endif @@ -12265,9 +12382,9 @@ vm_map_fork( footprint_collect_kr = KERN_SUCCESS; } new_map = vm_map_create_options(new_pmap, - old_map->min_offset, - old_map->max_offset, - map_create_options); + old_map->min_offset, + old_map->max_offset, + map_create_options); vm_map_lock(new_map); vm_commit_pagezero_status(new_map); /* inherit the parent map's page size */ @@ -12276,7 +12393,6 @@ vm_map_fork( old_entry = vm_map_first_entry(old_map); old_entry != vm_map_to_entry(old_map); ) { - entry_size = old_entry->vme_end - old_entry->vme_start; old_entry_inheritance = old_entry->inheritance; @@ -12288,10 +12404,10 @@ vm_map_fork( if (old_entry_inheritance == VM_INHERIT_NONE && (options & VM_MAP_FORK_SHARE_IF_INHERIT_NONE) && !(!old_entry->is_sub_map && - VME_OBJECT(old_entry) != NULL && - VME_OBJECT(old_entry)->pager != NULL && - is_device_pager_ops( - VME_OBJECT(old_entry)->pager->mo_pager_ops))) { + VME_OBJECT(old_entry) != NULL && + VME_OBJECT(old_entry)->pager != NULL && + is_device_pager_ops( + VME_OBJECT(old_entry)->pager->mo_pager_ops))) { old_entry_inheritance = VM_INHERIT_SHARE; } @@ -12305,9 +12421,9 @@ vm_map_fork( * for later autopsy. */ footprint_collect_kr = - vm_map_corpse_footprint_collect(old_map, - old_entry, - new_map); + vm_map_corpse_footprint_collect(old_map, + old_entry, + new_map); } switch (old_entry_inheritance) { @@ -12327,11 +12443,12 @@ vm_map_fork( * to vm_map_fork_copy. */ - if(old_entry->is_sub_map) + if (old_entry->is_sub_map) { break; + } if ((old_entry->wired_count != 0) || ((VME_OBJECT(old_entry) != NULL) && - (VME_OBJECT(old_entry)->true_share))) { + (VME_OBJECT(old_entry)->true_share))) { goto slow_vm_map_fork_copy; } @@ -12355,11 +12472,11 @@ vm_map_fork( new_entry->use_pmap = TRUE; } - if (! vm_object_copy_quickly( + if (!vm_object_copy_quickly( &VME_OBJECT(new_entry), VME_OFFSET(old_entry), (old_entry->vme_end - - old_entry->vme_start), + old_entry->vme_start), &src_needs_copy, &new_entry_needs_copy)) { vm_map_entry_dispose(new_map, new_entry); @@ -12371,15 +12488,16 @@ vm_map_fork( */ if (src_needs_copy && !old_entry->needs_copy) { - vm_prot_t prot; + vm_prot_t prot; assert(!pmap_has_prot_policy(old_entry->protection)); prot = old_entry->protection & ~VM_PROT_WRITE; if (override_nx(old_map, VME_ALIAS(old_entry)) - && prot) - prot |= VM_PROT_EXECUTE; + && prot) { + prot |= VM_PROT_EXECUTE; + } assert(!pmap_has_prot_policy(prot)); @@ -12387,11 +12505,11 @@ vm_map_fork( VME_OBJECT(old_entry), VME_OFFSET(old_entry), (old_entry->vme_end - - old_entry->vme_start), + old_entry->vme_start), ((old_entry->is_shared - || old_map->mapped_in_other_pmaps) - ? PMAP_NULL : - old_map->pmap), + || old_map->mapped_in_other_pmaps) + ? PMAP_NULL : + old_map->pmap), old_entry->vme_start, prot); @@ -12406,22 +12524,22 @@ vm_map_fork( */ vm_map_store_entry_link(new_map, - vm_map_last_entry(new_map), - new_entry, - VM_MAP_KERNEL_FLAGS_NONE); + vm_map_last_entry(new_map), + new_entry, + VM_MAP_KERNEL_FLAGS_NONE); new_size += entry_size; break; - slow_vm_map_fork_copy: +slow_vm_map_fork_copy: vm_map_copyin_flags = 0; if (options & VM_MAP_FORK_PRESERVE_PURGEABLE) { vm_map_copyin_flags |= - VM_MAP_COPYIN_PRESERVE_PURGEABLE; + VM_MAP_COPYIN_PRESERVE_PURGEABLE; } if (vm_map_fork_copy(old_map, - &old_entry, - new_map, - vm_map_copyin_flags)) { + &old_entry, + new_map, + vm_map_copyin_flags)) { new_size += entry_size; } continue; @@ -12443,43 +12561,43 @@ vm_map_fork( vm_map_unlock(old_map); vm_map_deallocate(old_map); - return(new_map); + return new_map; } /* * vm_map_exec: * - * Setup the "new_map" with the proper execution environment according + * Setup the "new_map" with the proper execution environment according * to the type of executable (platform, 64bit, chroot environment). * Map the comm page and shared region, etc... */ kern_return_t vm_map_exec( - vm_map_t new_map, - task_t task, - boolean_t is64bit, - void *fsroot, - cpu_type_t cpu, - cpu_subtype_t cpu_subtype) + vm_map_t new_map, + task_t task, + boolean_t is64bit, + void *fsroot, + cpu_type_t cpu, + cpu_subtype_t cpu_subtype) { SHARED_REGION_TRACE_DEBUG( ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): ->\n", - (void *)VM_KERNEL_ADDRPERM(current_task()), - (void *)VM_KERNEL_ADDRPERM(new_map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, - cpu_subtype)); + (void *)VM_KERNEL_ADDRPERM(current_task()), + (void *)VM_KERNEL_ADDRPERM(new_map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, + cpu_subtype)); (void) vm_commpage_enter(new_map, task, is64bit); (void) vm_shared_region_enter(new_map, task, is64bit, fsroot, cpu, cpu_subtype); SHARED_REGION_TRACE_DEBUG( ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x,0x%x): <-\n", - (void *)VM_KERNEL_ADDRPERM(current_task()), - (void *)VM_KERNEL_ADDRPERM(new_map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, - cpu_subtype)); + (void *)VM_KERNEL_ADDRPERM(current_task()), + (void *)VM_KERNEL_ADDRPERM(new_map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, + cpu_subtype)); return KERN_SUCCESS; } @@ -12509,29 +12627,29 @@ vm_map_exec( */ kern_return_t vm_map_lookup_locked( - vm_map_t *var_map, /* IN/OUT */ - vm_map_offset_t vaddr, - vm_prot_t fault_type, - int object_lock_type, - vm_map_version_t *out_version, /* OUT */ - vm_object_t *object, /* OUT */ - vm_object_offset_t *offset, /* OUT */ - vm_prot_t *out_prot, /* OUT */ - boolean_t *wired, /* OUT */ - vm_object_fault_info_t fault_info, /* OUT */ - vm_map_t *real_map) + vm_map_t *var_map, /* IN/OUT */ + vm_map_offset_t vaddr, + vm_prot_t fault_type, + int object_lock_type, + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_object_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired, /* OUT */ + vm_object_fault_info_t fault_info, /* OUT */ + vm_map_t *real_map) { - vm_map_entry_t entry; - vm_map_t map = *var_map; - vm_map_t old_map = *var_map; - vm_map_t cow_sub_map_parent = VM_MAP_NULL; - vm_map_offset_t cow_parent_vaddr = 0; - vm_map_offset_t old_start = 0; - vm_map_offset_t old_end = 0; - vm_prot_t prot; - boolean_t mask_protections; - boolean_t force_copy; - vm_prot_t original_fault_type; + vm_map_entry_t entry; + vm_map_t map = *var_map; + vm_map_t old_map = *var_map; + vm_map_t cow_sub_map_parent = VM_MAP_NULL; + vm_map_offset_t cow_parent_vaddr = 0; + vm_map_offset_t old_start = 0; + vm_map_offset_t old_end = 0; + vm_prot_t prot; + boolean_t mask_protections; + boolean_t force_copy; + vm_prot_t original_fault_type; /* * VM_PROT_MASK means that the caller wants us to use "fault_type" @@ -12556,24 +12674,26 @@ RetryLookup: if ((entry == vm_map_to_entry(map)) || (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) { - vm_map_entry_t tmp_entry; + vm_map_entry_t tmp_entry; /* * Entry was either not a valid hint, or the vaddr * was not contained in the entry, so do a full lookup. */ if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { - if((cow_sub_map_parent) && (cow_sub_map_parent != map)) + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { vm_map_unlock(cow_sub_map_parent); - if((*real_map != map) - && (*real_map != cow_sub_map_parent)) + } + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { vm_map_unlock(*real_map); + } return KERN_INVALID_ADDRESS; } entry = tmp_entry; } - if(map == old_map) { + if (map == old_map) { old_start = entry->vme_start; old_end = entry->vme_end; } @@ -12585,28 +12705,29 @@ RetryLookup: submap_recurse: if (entry->is_sub_map) { - vm_map_offset_t local_vaddr; - vm_map_offset_t end_delta; - vm_map_offset_t start_delta; - vm_map_entry_t submap_entry; - vm_prot_t subentry_protection; - vm_prot_t subentry_max_protection; - boolean_t mapped_needs_copy=FALSE; + vm_map_offset_t local_vaddr; + vm_map_offset_t end_delta; + vm_map_offset_t start_delta; + vm_map_entry_t submap_entry; + vm_prot_t subentry_protection; + vm_prot_t subentry_max_protection; + boolean_t mapped_needs_copy = FALSE; local_vaddr = vaddr; if ((entry->use_pmap && - ! ((fault_type & VM_PROT_WRITE) || - force_copy))) { + !((fault_type & VM_PROT_WRITE) || + force_copy))) { /* if real_map equals map we unlock below */ if ((*real_map != map) && - (*real_map != cow_sub_map_parent)) + (*real_map != cow_sub_map_parent)) { vm_map_unlock(*real_map); + } *real_map = VME_SUBMAP(entry); } - if(entry->needs_copy && - ((fault_type & VM_PROT_WRITE) || + if (entry->needs_copy && + ((fault_type & VM_PROT_WRITE) || force_copy)) { if (!mapped_needs_copy) { if (vm_map_lock_read_to_write(map)) { @@ -12627,9 +12748,10 @@ submap_recurse: } else { vm_map_lock_read(VME_SUBMAP(entry)); *var_map = VME_SUBMAP(entry); - if((cow_sub_map_parent != map) && - (*real_map != map)) + if ((cow_sub_map_parent != map) && + (*real_map != map)) { vm_map_unlock(map); + } } } else { vm_map_lock_read(VME_SUBMAP(entry)); @@ -12639,8 +12761,9 @@ submap_recurse: /* follow the maps down to the object */ /* here we unlock knowing we are not */ /* revisiting the map. */ - if((*real_map != map) && (map != cow_sub_map_parent)) + if ((*real_map != map) && (map != cow_sub_map_parent)) { vm_map_unlock_read(map); + } } map = *var_map; @@ -12648,13 +12771,13 @@ submap_recurse: /* calculate the offset in the submap for vaddr */ local_vaddr = (local_vaddr - entry->vme_start) + VME_OFFSET(entry); - RetrySubMap: - if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { - if((cow_sub_map_parent) && (cow_sub_map_parent != map)){ +RetrySubMap: + if (!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { + if ((cow_sub_map_parent) && (cow_sub_map_parent != map)) { vm_map_unlock(cow_sub_map_parent); } - if((*real_map != map) - && (*real_map != cow_sub_map_parent)) { + if ((*real_map != map) + && (*real_map != cow_sub_map_parent)) { vm_map_unlock(*real_map); } *real_map = map; @@ -12672,33 +12795,32 @@ submap_recurse: /* to be as big as the portion of the underlying entry */ /* which is mapped */ start_delta = submap_entry->vme_start > VME_OFFSET(entry) ? - submap_entry->vme_start - VME_OFFSET(entry) : 0; + submap_entry->vme_start - VME_OFFSET(entry) : 0; end_delta = - (VME_OFFSET(entry) + start_delta + (old_end - old_start)) <= - submap_entry->vme_end ? - 0 : (VME_OFFSET(entry) + - (old_end - old_start)) - - submap_entry->vme_end; + (VME_OFFSET(entry) + start_delta + (old_end - old_start)) <= + submap_entry->vme_end ? + 0 : (VME_OFFSET(entry) + + (old_end - old_start)) + - submap_entry->vme_end; old_start += start_delta; old_end -= end_delta; - if(submap_entry->is_sub_map) { + if (submap_entry->is_sub_map) { entry = submap_entry; vaddr = local_vaddr; goto submap_recurse; } if (((fault_type & VM_PROT_WRITE) || - force_copy) + force_copy) && cow_sub_map_parent) { - - vm_object_t sub_object, copy_object; + vm_object_t sub_object, copy_object; vm_object_offset_t copy_offset; - vm_map_offset_t local_start; - vm_map_offset_t local_end; - boolean_t copied_slowly = FALSE; + vm_map_offset_t local_start; + vm_map_offset_t local_end; + boolean_t copied_slowly = FALSE; if (vm_map_lock_read_to_write(map)) { vm_map_lock_read(map); @@ -12711,19 +12833,19 @@ submap_recurse: sub_object = VME_OBJECT(submap_entry); if (sub_object == VM_OBJECT_NULL) { sub_object = - vm_object_allocate( - (vm_map_size_t) - (submap_entry->vme_end - - submap_entry->vme_start)); + vm_object_allocate( + (vm_map_size_t) + (submap_entry->vme_end - + submap_entry->vme_start)); VME_OBJECT_SET(submap_entry, sub_object); VME_OFFSET_SET(submap_entry, 0); assert(!submap_entry->is_sub_map); assert(submap_entry->use_pmap); } local_start = local_vaddr - - (cow_parent_vaddr - old_start); + (cow_parent_vaddr - old_start); local_end = local_vaddr + - (old_end - cow_parent_vaddr); + (old_end - cow_parent_vaddr); vm_map_clip_start(map, submap_entry, local_start); vm_map_clip_end(map, submap_entry, local_end); if (submap_entry->is_sub_map) { @@ -12737,19 +12859,18 @@ submap_recurse: /* submap. */ - if(submap_entry->wired_count != 0 || - (sub_object->copy_strategy == + if (submap_entry->wired_count != 0 || + (sub_object->copy_strategy == MEMORY_OBJECT_COPY_NONE)) { vm_object_lock(sub_object); vm_object_copy_slowly(sub_object, - VME_OFFSET(submap_entry), - (submap_entry->vme_end - - submap_entry->vme_start), - FALSE, - ©_object); + VME_OFFSET(submap_entry), + (submap_entry->vme_end - + submap_entry->vme_start), + FALSE, + ©_object); copied_slowly = TRUE; } else { - /* set up shadow object */ copy_object = sub_object; vm_object_lock(sub_object); @@ -12766,9 +12887,10 @@ submap_recurse: assert(!pmap_has_prot_policy(prot)); if (override_nx(old_map, - VME_ALIAS(submap_entry)) - && prot) - prot |= VM_PROT_EXECUTE; + VME_ALIAS(submap_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect( sub_object, @@ -12776,7 +12898,7 @@ submap_recurse: submap_entry->vme_end - submap_entry->vme_start, (submap_entry->is_shared - || map->mapped_in_other_pmaps) ? + || map->mapped_in_other_pmaps) ? PMAP_NULL : map->pmap, submap_entry->vme_start, prot); @@ -12786,8 +12908,8 @@ submap_recurse: * Adjust the fault offset to the submap entry. */ copy_offset = (local_vaddr - - submap_entry->vme_start + - VME_OFFSET(submap_entry)); + submap_entry->vme_start + + VME_OFFSET(submap_entry)); /* This works diffently than the */ /* normal submap case. We go back */ @@ -12808,8 +12930,8 @@ submap_recurse: vaddr = cow_parent_vaddr; cow_sub_map_parent = NULL; - if(!vm_map_lookup_entry(map, - vaddr, &entry)) { + if (!vm_map_lookup_entry(map, + vaddr, &entry)) { vm_object_deallocate( copy_object); vm_map_lock_write_to_read(map); @@ -12883,19 +13005,19 @@ submap_recurse: #endif /* !CONFIG_EMBEDDED */ !(entry->used_for_jit)) { DTRACE_VM3(cs_wx, - uint64_t, (uint64_t)entry->vme_start, - uint64_t, (uint64_t)entry->vme_end, - vm_prot_t, entry->protection); + uint64_t, (uint64_t)entry->vme_start, + uint64_t, (uint64_t)entry->vme_end, + vm_prot_t, entry->protection); printf("CODE SIGNING: %d[%s] %s can't have both write and exec at the same time\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - __FUNCTION__); + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + __FUNCTION__); entry->protection &= ~VM_PROT_EXECUTE; } - if(copied_slowly) { + if (copied_slowly) { VME_OFFSET_SET(entry, local_start - old_start); entry->needs_copy = FALSE; entry->is_shared = FALSE; @@ -12903,19 +13025,22 @@ submap_recurse: VME_OFFSET_SET(entry, copy_offset); assert(entry->wired_count == 0); entry->needs_copy = TRUE; - if(entry->inheritance == VM_INHERIT_SHARE) + if (entry->inheritance == VM_INHERIT_SHARE) { entry->inheritance = VM_INHERIT_COPY; - if (map != old_map) + } + if (map != old_map) { entry->is_shared = TRUE; + } } - if(entry->inheritance == VM_INHERIT_SHARE) + if (entry->inheritance == VM_INHERIT_SHARE) { entry->inheritance = VM_INHERIT_COPY; + } vm_map_lock_write_to_read(map); } else { - if((cow_sub_map_parent) - && (cow_sub_map_parent != *real_map) - && (cow_sub_map_parent != map)) { + if ((cow_sub_map_parent) + && (cow_sub_map_parent != *real_map) + && (cow_sub_map_parent != map)) { vm_map_unlock(cow_sub_map_parent); } entry = submap_entry; @@ -12931,10 +13056,10 @@ submap_recurse: prot = entry->protection; if (override_nx(old_map, VME_ALIAS(entry)) && prot) { - /* + /* * HACK -- if not a stack, then allow execution */ - prot |= VM_PROT_EXECUTE; + prot |= VM_PROT_EXECUTE; } if (mask_protections) { @@ -12949,14 +13074,15 @@ submap_recurse: && !(prot == VM_PROT_EXECUTE && fault_type == (VM_PROT_READ | VM_PROT_EXECUTE)) #endif ) { - protection_failure: +protection_failure: if (*real_map != map) { vm_map_unlock(*real_map); } *real_map = map; - if ((fault_type & VM_PROT_EXECUTE) && prot) - log_stack_execution_failure((addr64_t)vaddr, prot); + if ((fault_type & VM_PROT_EXECUTE) && prot) { + log_stack_execution_failure((addr64_t)vaddr, prot); + } DTRACE_VM2(prot_fault, int, 1, (uint64_t *), NULL); return KERN_PROTECTION_FAILURE; @@ -12968,15 +13094,16 @@ submap_recurse: */ *wired = (entry->wired_count != 0); - if (*wired) - fault_type = prot; + if (*wired) { + fault_type = prot; + } /* * If the entry was copy-on-write, we either ... */ if (entry->needs_copy) { - /* + /* * If we want to write the page, we may as well * handle that now since we've got the map locked. * @@ -13003,8 +13130,8 @@ submap_recurse: vm_object_unlock(VME_OBJECT(entry)); } VME_OBJECT_SHADOW(entry, - (vm_map_size_t) (entry->vme_end - - entry->vme_start)); + (vm_map_size_t) (entry->vme_end - + entry->vme_start)); entry->needs_copy = FALSE; vm_map_lock_write_to_read(map); @@ -13023,16 +13150,15 @@ submap_recurse: * Create an object if necessary. */ if (VME_OBJECT(entry) == VM_OBJECT_NULL) { - if (vm_map_lock_read_to_write(map)) { vm_map_lock_read(map); goto RetryLookup; } VME_OBJECT_SET(entry, - vm_object_allocate( - (vm_map_size_t)(entry->vme_end - - entry->vme_start))); + vm_object_allocate( + (vm_map_size_t)(entry->vme_end - + entry->vme_start))); VME_OFFSET_SET(entry, 0); assert(entry->use_pmap); vm_map_lock_write_to_read(map); @@ -13044,24 +13170,24 @@ submap_recurse: * return the protection. */ - *offset = (vaddr - entry->vme_start) + VME_OFFSET(entry); - *object = VME_OBJECT(entry); + *offset = (vaddr - entry->vme_start) + VME_OFFSET(entry); + *object = VME_OBJECT(entry); *out_prot = prot; if (fault_info) { fault_info->interruptible = THREAD_UNINT; /* for now... */ /* ... the caller will change "interruptible" if needed */ - fault_info->cluster_size = 0; + fault_info->cluster_size = 0; fault_info->user_tag = VME_ALIAS(entry); fault_info->pmap_options = 0; if (entry->iokit_acct || (!entry->is_sub_map && !entry->use_pmap)) { fault_info->pmap_options |= PMAP_OPTIONS_ALT_ACCT; } - fault_info->behavior = entry->behavior; + fault_info->behavior = entry->behavior; fault_info->lo_offset = VME_OFFSET(entry); fault_info->hi_offset = - (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); + (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); fault_info->no_cache = entry->no_cache; fault_info->stealth = FALSE; fault_info->io_sync = FALSE; @@ -13088,10 +13214,11 @@ submap_recurse: /* * Lock the object to prevent it from disappearing */ - if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) - vm_object_lock(*object); - else - vm_object_lock_shared(*object); + if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) { + vm_object_lock(*object); + } else { + vm_object_lock_shared(*object); + } /* * Save the version number @@ -13113,15 +13240,15 @@ submap_recurse: */ boolean_t vm_map_verify( - vm_map_t map, - vm_map_version_t *version) /* REF */ + vm_map_t map, + vm_map_version_t *version) /* REF */ { - boolean_t result; + boolean_t result; vm_map_lock_assert_held(map); result = (map->timestamp == version->main_timestamp); - return(result); + return result; } /* @@ -13135,18 +13262,18 @@ vm_map_verify( kern_return_t vm_map_region_recurse_64( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t *size, /* OUT */ - natural_t *nesting_depth, /* IN/OUT */ - vm_region_submap_info_64_t submap_info, /* IN/OUT */ - mach_msg_type_number_t *count) /* IN/OUT */ + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t *size, /* OUT */ + natural_t *nesting_depth, /* IN/OUT */ + vm_region_submap_info_64_t submap_info, /* IN/OUT */ + mach_msg_type_number_t *count) /* IN/OUT */ { - mach_msg_type_number_t original_count; - vm_region_extended_info_data_t extended; - vm_map_entry_t tmp_entry; - vm_map_offset_t user_address; - unsigned int user_max_depth; + mach_msg_type_number_t original_count; + vm_region_extended_info_data_t extended; + vm_map_entry_t tmp_entry; + vm_map_offset_t user_address; + unsigned int user_max_depth; /* * "curr_entry" is the VM map entry preceding or including the @@ -13163,15 +13290,15 @@ vm_map_region_recurse_64( * "curr_address") we should take into account in the current (sub)map. * They limit the range to what's visible through the map entries * we've traversed from the top map to the current map. - + * */ - vm_map_entry_t curr_entry; - vm_map_address_t curr_address; - vm_map_offset_t curr_offset; - vm_map_t curr_map; - unsigned int curr_depth; - vm_map_offset_t curr_max_below, curr_max_above; - vm_map_offset_t curr_skip; + vm_map_entry_t curr_entry; + vm_map_address_t curr_address; + vm_map_offset_t curr_offset; + vm_map_t curr_map; + unsigned int curr_depth; + vm_map_offset_t curr_max_below, curr_max_above; + vm_map_offset_t curr_skip; /* * "next_" is the same as "curr_" but for the VM region immediately @@ -13179,17 +13306,17 @@ vm_map_region_recurse_64( * too because we want to return info about that region if the * address we're looking for is not mapped. */ - vm_map_entry_t next_entry; - vm_map_offset_t next_offset; - vm_map_offset_t next_address; - vm_map_t next_map; - unsigned int next_depth; - vm_map_offset_t next_max_below, next_max_above; - vm_map_offset_t next_skip; - - boolean_t look_for_pages; + vm_map_entry_t next_entry; + vm_map_offset_t next_offset; + vm_map_offset_t next_address; + vm_map_t next_map; + unsigned int next_depth; + vm_map_offset_t next_max_below, next_max_above; + vm_map_offset_t next_skip; + + boolean_t look_for_pages; vm_region_submap_short_info_64_t short_info; - boolean_t do_region_footprint; + boolean_t do_region_footprint; if (map == VM_MAP_NULL) { /* no address space to work on */ @@ -13251,8 +13378,8 @@ recurse_again: for (;;) { if (vm_map_lookup_entry(curr_map, - curr_address, - &tmp_entry)) { + curr_address, + &tmp_entry)) { /* tmp_entry contains the address we're looking for */ curr_entry = tmp_entry; } else { @@ -13266,7 +13393,7 @@ recurse_again: if (curr_entry == vm_map_to_entry(curr_map) || (curr_entry->vme_start >= - curr_address + curr_max_above)) { + curr_address + curr_max_above)) { /* no next entry at this level: stop looking */ if (not_in_kdp) { vm_map_unlock_read(curr_map); @@ -13299,14 +13426,14 @@ recurse_again: if (tmp_entry == vm_map_to_entry(curr_map)) { /* no next entry at this level */ } else if (tmp_entry->vme_start >= - curr_address + curr_max_above) { + curr_address + curr_max_above) { /* * tmp_entry is beyond the scope of what we mapped of * this submap in the upper level: ignore it. */ } else if ((next_entry == NULL) || - (tmp_entry->vme_start + curr_offset <= - next_entry->vme_start + next_offset)) { + (tmp_entry->vme_start + curr_offset <= + next_entry->vme_start + next_offset)) { /* * We didn't have a "next_entry" or this one is * closer to the address we're looking for: @@ -13328,10 +13455,10 @@ recurse_again: next_offset += (next_address - curr_address); next_max_above = MIN(next_max_above, curr_max_above); next_max_above = MIN(next_max_above, - next_entry->vme_end - next_address); + next_entry->vme_end - next_address); next_max_below = MIN(next_max_below, curr_max_below); next_max_below = MIN(next_max_below, - next_address - next_entry->vme_start); + next_address - next_entry->vme_start); } /* @@ -13343,9 +13470,9 @@ recurse_again: * "VME_OFFSET(curr_entry)" up to the size of "curr_entry". */ curr_max_above = MIN(curr_max_above, - curr_entry->vme_end - curr_address); + curr_entry->vme_end - curr_address); curr_max_below = MIN(curr_max_below, - curr_address - curr_entry->vme_start); + curr_address - curr_entry->vme_start); if (!curr_entry->is_sub_map || curr_depth >= user_max_depth) { @@ -13373,8 +13500,9 @@ recurse_again: /* keep "next_map" locked in case we need it */ } else { /* release this map */ - if (not_in_kdp) + if (not_in_kdp) { vm_map_unlock_read(curr_map); + } } /* @@ -13387,7 +13515,7 @@ recurse_again: * space (i.e. the top-level VM map). */ curr_offset += - (VME_OFFSET(curr_entry) - curr_entry->vme_start); + (VME_OFFSET(curr_entry) - curr_entry->vme_start); curr_address = user_address + curr_offset; /* switch to the submap */ curr_map = VME_SUBMAP(curr_entry); @@ -13474,7 +13602,7 @@ recurse_again: } /* ... gather info about the next VM region */ curr_entry = next_entry; - curr_map = next_map; /* still locked ... */ + curr_map = next_map; /* still locked ... */ curr_address = next_address; curr_skip = next_skip; curr_offset = next_offset; @@ -13517,7 +13645,7 @@ recurse_again: // LP64todo: all the current tools are 32bit, obviously never worked for 64b // so probably should be a real 32b ID vs. ptr. // Current users just check for equality -#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p)) +#define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p)) if (look_for_pages) { submap_info->user_tag = VME_ALIAS(curr_entry); @@ -13555,18 +13683,18 @@ recurse_again: if (!curr_entry->is_sub_map) { vm_map_offset_t range_start, range_end; range_start = MAX((curr_address - curr_max_below), - curr_entry->vme_start); + curr_entry->vme_start); range_end = MIN((curr_address + curr_max_above), - curr_entry->vme_end); + curr_entry->vme_end); vm_map_region_walk(curr_map, - range_start, - curr_entry, - (VME_OFFSET(curr_entry) + - (range_start - - curr_entry->vme_start)), - range_end - range_start, - &extended, - look_for_pages, VM_REGION_EXTENDED_INFO_COUNT); + range_start, + curr_entry, + (VME_OFFSET(curr_entry) + + (range_start - + curr_entry->vme_start)), + range_end - range_start, + &extended, + look_for_pages, VM_REGION_EXTENDED_INFO_COUNT); if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) { @@ -13586,7 +13714,7 @@ recurse_again: submap_info->pages_resident = extended.pages_resident; submap_info->pages_swapped_out = extended.pages_swapped_out; submap_info->pages_shared_now_private = - extended.pages_shared_now_private; + extended.pages_shared_now_private; submap_info->pages_dirtied = extended.pages_dirtied; submap_info->external_pager = extended.external_pager; submap_info->shadow_depth = extended.shadow_depth; @@ -13624,30 +13752,31 @@ recurse_again: kern_return_t vm_map_region( - vm_map_t map, - vm_map_offset_t *address, /* IN/OUT */ - vm_map_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm_map_offset_t *address, /* IN/OUT */ + vm_map_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_entry_t tmp_entry; - vm_map_entry_t entry; - vm_map_offset_t start; + vm_map_entry_t tmp_entry; + vm_map_entry_t entry; + vm_map_offset_t start; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } switch (flavor) { - case VM_REGION_BASIC_INFO: /* legacy for old 32-bit objects info */ { - vm_region_basic_info_t basic; + vm_region_basic_info_t basic; - if (*count < VM_REGION_BASIC_INFO_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < VM_REGION_BASIC_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } basic = (vm_region_basic_info_t) info; *count = VM_REGION_BASIC_INFO_COUNT; @@ -13658,7 +13787,7 @@ vm_map_region( if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { entry = tmp_entry; @@ -13676,7 +13805,9 @@ vm_map_region( *address = start; *size = (entry->vme_end - start); - if (object_name) *object_name = IP_NULL; + if (object_name) { + *object_name = IP_NULL; + } if (entry->is_sub_map) { basic->shared = FALSE; } else { @@ -13684,15 +13815,16 @@ vm_map_region( } vm_map_unlock_read(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } case VM_REGION_BASIC_INFO_64: { - vm_region_basic_info_64_t basic; + vm_region_basic_info_64_t basic; - if (*count < VM_REGION_BASIC_INFO_COUNT_64) - return(KERN_INVALID_ARGUMENT); + if (*count < VM_REGION_BASIC_INFO_COUNT_64) { + return KERN_INVALID_ARGUMENT; + } basic = (vm_region_basic_info_64_t) info; *count = VM_REGION_BASIC_INFO_COUNT_64; @@ -13703,7 +13835,7 @@ vm_map_region( if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { entry = tmp_entry; @@ -13721,7 +13853,9 @@ vm_map_region( *address = start; *size = (entry->vme_end - start); - if (object_name) *object_name = IP_NULL; + if (object_name) { + *object_name = IP_NULL; + } if (entry->is_sub_map) { basic->shared = FALSE; } else { @@ -13729,71 +13863,76 @@ vm_map_region( } vm_map_unlock_read(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } case VM_REGION_EXTENDED_INFO: - if (*count < VM_REGION_EXTENDED_INFO_COUNT) - return(KERN_INVALID_ARGUMENT); - /*fallthru*/ + if (*count < VM_REGION_EXTENDED_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } + /*fallthru*/ case VM_REGION_EXTENDED_INFO__legacy: - if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy) + if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy) { return KERN_INVALID_ARGUMENT; + } - { - vm_region_extended_info_t extended; - mach_msg_type_number_t original_count; + { + vm_region_extended_info_t extended; + mach_msg_type_number_t original_count; - extended = (vm_region_extended_info_t) info; + extended = (vm_region_extended_info_t) info; - vm_map_lock_read(map); + vm_map_lock_read(map); - start = *address; - if (!vm_map_lookup_entry(map, start, &tmp_entry)) { - if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { - vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); - } - } else { - entry = tmp_entry; - } - start = entry->vme_start; - - extended->protection = entry->protection; - extended->user_tag = VME_ALIAS(entry); - extended->pages_resident = 0; - extended->pages_swapped_out = 0; - extended->pages_shared_now_private = 0; - extended->pages_dirtied = 0; - extended->external_pager = 0; - extended->shadow_depth = 0; + start = *address; + if (!vm_map_lookup_entry(map, start, &tmp_entry)) { + if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { + vm_map_unlock_read(map); + return KERN_INVALID_ADDRESS; + } + } else { + entry = tmp_entry; + } + start = entry->vme_start; - original_count = *count; - if (flavor == VM_REGION_EXTENDED_INFO__legacy) { - *count = VM_REGION_EXTENDED_INFO_COUNT__legacy; - } else { - extended->pages_reusable = 0; - *count = VM_REGION_EXTENDED_INFO_COUNT; - } + extended->protection = entry->protection; + extended->user_tag = VME_ALIAS(entry); + extended->pages_resident = 0; + extended->pages_swapped_out = 0; + extended->pages_shared_now_private = 0; + extended->pages_dirtied = 0; + extended->external_pager = 0; + extended->shadow_depth = 0; + + original_count = *count; + if (flavor == VM_REGION_EXTENDED_INFO__legacy) { + *count = VM_REGION_EXTENDED_INFO_COUNT__legacy; + } else { + extended->pages_reusable = 0; + *count = VM_REGION_EXTENDED_INFO_COUNT; + } - vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, extended, TRUE, *count); + vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, extended, TRUE, *count); - if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) - extended->share_mode = SM_PRIVATE; + if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) { + extended->share_mode = SM_PRIVATE; + } - if (object_name) - *object_name = IP_NULL; - *address = start; - *size = (entry->vme_end - start); + if (object_name) { + *object_name = IP_NULL; + } + *address = start; + *size = (entry->vme_end - start); - vm_map_unlock_read(map); - return(KERN_SUCCESS); - } + vm_map_unlock_read(map); + return KERN_SUCCESS; + } case VM_REGION_TOP_INFO: { - vm_region_top_info_t top; + vm_region_top_info_t top; - if (*count < VM_REGION_TOP_INFO_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*count < VM_REGION_TOP_INFO_COUNT) { + return KERN_INVALID_ARGUMENT; + } top = (vm_region_top_info_t) info; *count = VM_REGION_TOP_INFO_COUNT; @@ -13804,11 +13943,10 @@ vm_map_region( if (!vm_map_lookup_entry(map, start, &tmp_entry)) { if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } } else { entry = tmp_entry; - } start = entry->vme_start; @@ -13817,31 +13955,31 @@ vm_map_region( vm_map_region_top_walk(entry, top); - if (object_name) + if (object_name) { *object_name = IP_NULL; + } *address = start; *size = (entry->vme_end - start); vm_map_unlock_read(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } default: - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } } -#define OBJ_RESIDENT_COUNT(obj, entry_size) \ - MIN((entry_size), \ - ((obj)->all_reusable ? \ - (obj)->wired_page_count : \ +#define OBJ_RESIDENT_COUNT(obj, entry_size) \ + MIN((entry_size), \ + ((obj)->all_reusable ? \ + (obj)->wired_page_count : \ (obj)->resident_page_count - (obj)->reusable_page_count)) void vm_map_region_top_walk( - vm_map_entry_t entry, + vm_map_entry_t entry, vm_region_top_info_t top) { - if (VME_OBJECT(entry) == 0 || entry->is_sub_map) { top->share_mode = SM_EMPTY; top->ref_count = 0; @@ -13850,9 +13988,9 @@ vm_map_region_top_walk( } { - struct vm_object *obj, *tmp_obj; - int ref_count; - uint32_t entry_size; + struct vm_object *obj, *tmp_obj; + int ref_count; + uint32_t entry_size; entry_size = (uint32_t) ((entry->vme_end - entry->vme_start) / PAGE_SIZE_64); @@ -13860,17 +13998,19 @@ vm_map_region_top_walk( vm_object_lock(obj); - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { ref_count--; + } assert(obj->reusable_page_count <= obj->resident_page_count); if (obj->shadow) { - if (ref_count == 1) + if (ref_count == 1) { top->private_pages_resident = - OBJ_RESIDENT_COUNT(obj, entry_size); - else + OBJ_RESIDENT_COUNT(obj, entry_size); + } else { top->shared_pages_resident = - OBJ_RESIDENT_COUNT(obj, entry_size); + OBJ_RESIDENT_COUNT(obj, entry_size); + } top->ref_count = ref_count; top->share_mode = SM_COW; @@ -13879,12 +14019,13 @@ vm_map_region_top_walk( vm_object_unlock(obj); obj = tmp_obj; - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { ref_count--; + } assert(obj->reusable_page_count <= obj->resident_page_count); top->shared_pages_resident += - OBJ_RESIDENT_COUNT(obj, entry_size); + OBJ_RESIDENT_COUNT(obj, entry_size); top->ref_count += ref_count - 1; } } else { @@ -13895,19 +14036,19 @@ vm_map_region_top_walk( } else if (entry->needs_copy) { top->share_mode = SM_COW; top->shared_pages_resident = - OBJ_RESIDENT_COUNT(obj, entry_size); + OBJ_RESIDENT_COUNT(obj, entry_size); } else { if (ref_count == 1 || (ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) { top->share_mode = SM_PRIVATE; - top->private_pages_resident = - OBJ_RESIDENT_COUNT(obj, - entry_size); + top->private_pages_resident = + OBJ_RESIDENT_COUNT(obj, + entry_size); } else { top->share_mode = SM_SHARED; top->shared_pages_resident = - OBJ_RESIDENT_COUNT(obj, - entry_size); + OBJ_RESIDENT_COUNT(obj, + entry_size); } } top->ref_count = ref_count; @@ -13921,29 +14062,29 @@ vm_map_region_top_walk( void vm_map_region_walk( - vm_map_t map, - vm_map_offset_t va, - vm_map_entry_t entry, - vm_object_offset_t offset, - vm_object_size_t range, - vm_region_extended_info_t extended, - boolean_t look_for_pages, + vm_map_t map, + vm_map_offset_t va, + vm_map_entry_t entry, + vm_object_offset_t offset, + vm_object_size_t range, + vm_region_extended_info_t extended, + boolean_t look_for_pages, mach_msg_type_number_t count) { - struct vm_object *obj, *tmp_obj; + struct vm_object *obj, *tmp_obj; vm_map_offset_t last_offset; int i; int ref_count; - struct vm_object *shadow_object; - int shadow_depth; - boolean_t do_region_footprint; + struct vm_object *shadow_object; + int shadow_depth; + boolean_t do_region_footprint; do_region_footprint = task_self_region_footprint(); if ((VME_OBJECT(entry) == 0) || (entry->is_sub_map) || (VME_OBJECT(entry)->phys_contiguous && - !entry->superpage_size)) { + !entry->superpage_size)) { extended->share_mode = SM_EMPTY; extended->ref_count = 0; return; @@ -13963,14 +14104,14 @@ vm_map_region_walk( vm_object_lock(obj); - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { ref_count--; + } if (look_for_pages) { for (last_offset = offset + range; - offset < last_offset; - offset += PAGE_SIZE_64, va += PAGE_SIZE) { - + offset < last_offset; + offset += PAGE_SIZE_64, va += PAGE_SIZE) { if (do_region_footprint) { int disp; @@ -13989,8 +14130,8 @@ vm_map_region_walk( * Query the pmap. */ pmap_query_page_info(map->pmap, - va, - &disp); + va, + &disp); } if (disp & PMAP_QUERY_PAGE_PRESENT) { if (!(disp & PMAP_QUERY_PAGE_ALTACCT)) { @@ -13999,7 +14140,7 @@ vm_map_region_walk( if (disp & PMAP_QUERY_PAGE_REUSABLE) { extended->pages_reusable++; } else if (!(disp & PMAP_QUERY_PAGE_INTERNAL) || - (disp & PMAP_QUERY_PAGE_ALTACCT)) { + (disp & PMAP_QUERY_PAGE_ALTACCT)) { /* alternate accounting */ } else { extended->pages_dirtied++; @@ -14017,11 +14158,11 @@ vm_map_region_walk( VM_OBJECT_OWNER(obj) != NULL && VM_OBJECT_OWNER(obj)->map == map) { if ((((va - - entry->vme_start - + VME_OFFSET(entry)) - / PAGE_SIZE) < - (obj->resident_page_count + - vm_compressor_pager_get_count(obj->pager)))) { + - entry->vme_start + + VME_OFFSET(entry)) + / PAGE_SIZE) < + (obj->resident_page_count + + vm_compressor_pager_get_count(obj->pager)))) { /* * Non-volatile purgeable object owned * by this task: report the first @@ -14038,15 +14179,15 @@ vm_map_region_walk( extended->pages_resident++; } } else if ((obj->purgable == VM_PURGABLE_VOLATILE || - obj->purgable == VM_PURGABLE_EMPTY) && - /* && not tagged as no-footprint? */ - VM_OBJECT_OWNER(obj) != NULL && - VM_OBJECT_OWNER(obj)->map == map) { + obj->purgable == VM_PURGABLE_EMPTY) && + /* && not tagged as no-footprint? */ + VM_OBJECT_OWNER(obj) != NULL && + VM_OBJECT_OWNER(obj)->map == map) { if ((((va - - entry->vme_start - + VME_OFFSET(entry)) - / PAGE_SIZE) < - obj->wired_page_count)) { + - entry->vme_start + + VME_OFFSET(entry)) + / PAGE_SIZE) < + obj->wired_page_count)) { /* * Volatile|empty purgeable object owned * by this task: report the first @@ -14065,7 +14206,7 @@ vm_map_region_walk( } else if (obj->purgable != VM_PURGABLE_DENY) { /* * Pages from purgeable objects - * will be reported as dirty + * will be reported as dirty * appropriately in an extra * fake memory region at the end of * the address space. @@ -14082,32 +14223,33 @@ vm_map_region_walk( } vm_map_region_look_for_page(map, va, obj, - offset, ref_count, - 0, extended, count); + offset, ref_count, + 0, extended, count); } if (do_region_footprint) { goto collect_object_info; } - } else { - collect_object_info: +collect_object_info: shadow_object = obj->shadow; shadow_depth = 0; - if ( !(obj->pager_trusted) && !(obj->internal)) + if (!(obj->pager_trusted) && !(obj->internal)) { extended->external_pager = 1; + } if (shadow_object != VM_OBJECT_NULL) { vm_object_lock(shadow_object); for (; - shadow_object != VM_OBJECT_NULL; - shadow_depth++) { - vm_object_t next_shadow; + shadow_object != VM_OBJECT_NULL; + shadow_depth++) { + vm_object_t next_shadow; - if ( !(shadow_object->pager_trusted) && - !(shadow_object->internal)) + if (!(shadow_object->pager_trusted) && + !(shadow_object->internal)) { extended->external_pager = 1; + } next_shadow = shadow_object->shadow; if (next_shadow) { @@ -14120,28 +14262,31 @@ vm_map_region_walk( extended->shadow_depth = shadow_depth; } - if (extended->shadow_depth || entry->needs_copy) + if (extended->shadow_depth || entry->needs_copy) { extended->share_mode = SM_COW; - else { - if (ref_count == 1) + } else { + if (ref_count == 1) { extended->share_mode = SM_PRIVATE; - else { - if (obj->true_share) + } else { + if (obj->true_share) { extended->share_mode = SM_TRUESHARED; - else + } else { extended->share_mode = SM_SHARED; + } } } extended->ref_count = ref_count - extended->shadow_depth; for (i = 0; i < extended->shadow_depth; i++) { - if ((tmp_obj = obj->shadow) == 0) + if ((tmp_obj = obj->shadow) == 0) { break; + } vm_object_lock(tmp_obj); vm_object_unlock(obj); - if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) + if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) { ref_count--; + } extended->ref_count += ref_count; obj = tmp_obj; @@ -14149,23 +14294,26 @@ vm_map_region_walk( vm_object_unlock(obj); if (extended->share_mode == SM_SHARED) { - vm_map_entry_t cur; - vm_map_entry_t last; + vm_map_entry_t cur; + vm_map_entry_t last; int my_refs; obj = VME_OBJECT(entry); last = vm_map_to_entry(map); my_refs = 0; - if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) + if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) { ref_count--; - for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) + } + for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) { my_refs += vm_map_region_count_obj_refs(cur, obj); + } - if (my_refs == ref_count) + if (my_refs == ref_count) { extended->share_mode = SM_PRIVATE_ALIASED; - else if (my_refs > 1) + } else if (my_refs > 1) { extended->share_mode = SM_SHARED_ALIASED; + } } } @@ -14175,37 +14323,38 @@ vm_map_region_walk( static void vm_map_region_look_for_page( - __unused vm_map_t map, - __unused vm_map_offset_t va, - vm_object_t object, - vm_object_offset_t offset, - int max_refcnt, - int depth, - vm_region_extended_info_t extended, + __unused vm_map_t map, + __unused vm_map_offset_t va, + vm_object_t object, + vm_object_offset_t offset, + int max_refcnt, + int depth, + vm_region_extended_info_t extended, mach_msg_type_number_t count) { - vm_page_t p; - vm_object_t shadow; - int ref_count; - vm_object_t caller_object; + vm_page_t p; + vm_object_t shadow; + int ref_count; + vm_object_t caller_object; shadow = object->shadow; caller_object = object; while (TRUE) { - - if ( !(object->pager_trusted) && !(object->internal)) + if (!(object->pager_trusted) && !(object->internal)) { extended->external_pager = 1; + } if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { - if (shadow && (max_refcnt == 1)) - extended->pages_shared_now_private++; + if (shadow && (max_refcnt == 1)) { + extended->pages_shared_now_private++; + } if (!p->vmp_fictitious && - (p->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) - extended->pages_dirtied++; - else if (count >= VM_REGION_EXTENDED_INFO_COUNT) { + (p->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { + extended->pages_dirtied++; + } else if (count >= VM_REGION_EXTENDED_INFO_COUNT) { if (p->vmp_reusable || object->all_reusable) { extended->pages_reusable++; } @@ -14213,8 +14362,9 @@ vm_map_region_look_for_page( extended->pages_resident++; - if(object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } return; } @@ -14222,13 +14372,13 @@ vm_map_region_look_for_page( object->alive && !object->terminating && object->pager_ready) { - if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_EXISTS) { /* the pager has that page */ extended->pages_swapped_out++; - if (object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } return; } } @@ -14236,61 +14386,69 @@ vm_map_region_look_for_page( if (shadow) { vm_object_lock(shadow); - if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) - ref_count--; + if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) { + ref_count--; + } - if (++depth > extended->shadow_depth) - extended->shadow_depth = depth; + if (++depth > extended->shadow_depth) { + extended->shadow_depth = depth; + } - if (ref_count > max_refcnt) - max_refcnt = ref_count; + if (ref_count > max_refcnt) { + max_refcnt = ref_count; + } - if(object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } offset = offset + object->vo_shadow_offset; object = shadow; shadow = object->shadow; continue; } - if(object != caller_object) + if (object != caller_object) { vm_object_unlock(object); + } break; } } static int vm_map_region_count_obj_refs( - vm_map_entry_t entry, + vm_map_entry_t entry, vm_object_t object) { - int ref_count; + int ref_count; vm_object_t chk_obj; vm_object_t tmp_obj; - if (VME_OBJECT(entry) == 0) - return(0); + if (VME_OBJECT(entry) == 0) { + return 0; + } - if (entry->is_sub_map) - return(0); - else { + if (entry->is_sub_map) { + return 0; + } else { ref_count = 0; chk_obj = VME_OBJECT(entry); vm_object_lock(chk_obj); while (chk_obj) { - if (chk_obj == object) + if (chk_obj == object) { ref_count++; + } tmp_obj = chk_obj->shadow; - if (tmp_obj) + if (tmp_obj) { vm_object_lock(tmp_obj); + } vm_object_unlock(chk_obj); chk_obj = tmp_obj; } } - return(ref_count); + return ref_count; } @@ -14309,10 +14467,10 @@ vm_map_region_count_obj_refs( */ void vm_map_simplify_entry( - vm_map_t map, - vm_map_entry_t this_entry) + vm_map_t map, + vm_map_entry_t this_entry) { - vm_map_entry_t prev_entry; + vm_map_entry_t prev_entry; counter(c_vm_map_simplify_entry_called++); @@ -14326,8 +14484,8 @@ vm_map_simplify_entry( (prev_entry->is_sub_map == this_entry->is_sub_map) && (VME_OBJECT(prev_entry) == VME_OBJECT(this_entry)) && ((VME_OFFSET(prev_entry) + (prev_entry->vme_end - - prev_entry->vme_start)) - == VME_OFFSET(this_entry)) && + prev_entry->vme_start)) + == VME_OFFSET(this_entry)) && (prev_entry->behavior == this_entry->behavior) && (prev_entry->needs_copy == this_entry->needs_copy) && @@ -14345,9 +14503,9 @@ vm_map_simplify_entry( /* from_reserved_zone: OK if that field doesn't match */ (prev_entry->iokit_acct == this_entry->iokit_acct) && (prev_entry->vme_resilient_codesign == - this_entry->vme_resilient_codesign) && + this_entry->vme_resilient_codesign) && (prev_entry->vme_resilient_media == - this_entry->vme_resilient_media) && + this_entry->vme_resilient_media) && (prev_entry->wired_count == this_entry->wired_count) && (prev_entry->user_wired_count == this_entry->user_wired_count) && @@ -14361,12 +14519,13 @@ vm_map_simplify_entry( (this_entry->is_shared == FALSE) && (prev_entry->superpage_size == FALSE) && (this_entry->superpage_size == FALSE) - ) { + ) { vm_map_store_entry_unlink(map, prev_entry); assert(prev_entry->vme_start < this_entry->vme_end); - if (prev_entry->map_aligned) + if (prev_entry->map_aligned) { assert(VM_MAP_PAGE_ALIGNED(prev_entry->vme_start, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); + } this_entry->vme_start = prev_entry->vme_start; VME_OFFSET_SET(this_entry, VME_OFFSET(prev_entry)); @@ -14387,10 +14546,10 @@ vm_map_simplify_entry( void vm_map_simplify( - vm_map_t map, - vm_map_offset_t start) + vm_map_t map, + vm_map_offset_t start) { - vm_map_entry_t this_entry; + vm_map_entry_t this_entry; vm_map_lock(map); if (vm_map_lookup_entry(map, start, &this_entry)) { @@ -14403,11 +14562,11 @@ vm_map_simplify( static void vm_map_simplify_range( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; + vm_map_entry_t entry; /* * The map should be locked (for "write") by the caller. @@ -14419,9 +14578,9 @@ vm_map_simplify_range( } start = vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); end = vm_map_round_page(end, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); if (!vm_map_lookup_entry(map, start, &entry)) { /* "start" is not mapped and "entry" ends before "start" */ @@ -14435,7 +14594,7 @@ vm_map_simplify_range( } while (entry != vm_map_to_entry(map) && - entry->vme_start <= end) { + entry->vme_start <= end) { /* try and coalesce "entry" with its previous entry */ vm_map_simplify_entry(map, entry); entry = entry->vme_next; @@ -14459,18 +14618,19 @@ vm_map_simplify_range( */ kern_return_t vm_map_machine_attribute( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - kern_return_t ret; + kern_return_t ret; vm_map_size_t sync_size; vm_map_entry_t entry; - if (start < vm_map_min(map) || end > vm_map_max(map)) + if (start < vm_map_min(map) || end > vm_map_max(map)) { return KERN_INVALID_ADDRESS; + } /* Figure how much memory we need to flush (in page increments) */ sync_size = end - start; @@ -14480,30 +14640,30 @@ vm_map_machine_attribute( if (attribute != MATTR_CACHE) { /* If we don't have to find physical addresses, we */ /* don't have to do an explicit traversal here. */ - ret = pmap_attribute(map->pmap, start, end-start, - attribute, value); + ret = pmap_attribute(map->pmap, start, end - start, + attribute, value); vm_map_unlock(map); return ret; } - ret = KERN_SUCCESS; /* Assume it all worked */ + ret = KERN_SUCCESS; /* Assume it all worked */ - while(sync_size) { + while (sync_size) { if (vm_map_lookup_entry(map, start, &entry)) { - vm_map_size_t sub_size; - if((entry->vme_end - start) > sync_size) { + vm_map_size_t sub_size; + if ((entry->vme_end - start) > sync_size) { sub_size = sync_size; sync_size = 0; } else { sub_size = entry->vme_end - start; sync_size -= sub_size; } - if(entry->is_sub_map) { + if (entry->is_sub_map) { vm_map_offset_t sub_start; vm_map_offset_t sub_end; sub_start = (start - entry->vme_start) - + VME_OFFSET(entry); + + VME_OFFSET(entry); sub_end = sub_start + sub_size; vm_map_machine_attribute( VME_SUBMAP(entry), @@ -14512,16 +14672,16 @@ vm_map_machine_attribute( attribute, value); } else { if (VME_OBJECT(entry)) { - vm_page_t m; - vm_object_t object; - vm_object_t base_object; - vm_object_t last_object; - vm_object_offset_t offset; - vm_object_offset_t base_offset; - vm_map_size_t range; + vm_page_t m; + vm_object_t object; + vm_object_t base_object; + vm_object_t last_object; + vm_object_offset_t offset; + vm_object_offset_t base_offset; + vm_map_size_t range; range = sub_size; offset = (start - entry->vme_start) - + VME_OFFSET(entry); + + VME_OFFSET(entry); base_offset = offset; object = VME_OBJECT(entry); base_object = object; @@ -14534,14 +14694,13 @@ vm_map_machine_attribute( object, offset); if (m && !m->vmp_fictitious) { - ret = - pmap_attribute_cache_sync( - VM_PAGE_GET_PHYS_PAGE(m), - PAGE_SIZE, - attribute, value); - + ret = + pmap_attribute_cache_sync( + VM_PAGE_GET_PHYS_PAGE(m), + PAGE_SIZE, + attribute, value); } else if (object->shadow) { - offset = offset + object->vo_shadow_offset; + offset = offset + object->vo_shadow_offset; last_object = object; object = object->shadow; vm_object_lock(last_object->shadow); @@ -14551,7 +14710,7 @@ vm_map_machine_attribute( range -= PAGE_SIZE; if (base_object != object) { - vm_object_unlock(object); + vm_object_unlock(object); vm_object_lock(base_object); object = base_object; } @@ -14567,7 +14726,6 @@ vm_map_machine_attribute( vm_map_unlock(map); return KERN_FAILURE; } - } vm_map_unlock(map); @@ -14585,13 +14743,13 @@ vm_map_machine_attribute( */ kern_return_t vm_map_behavior_set( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_behavior_t new_behavior) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_behavior_t new_behavior) { - vm_map_entry_t entry; - vm_map_entry_t temp_entry; + vm_map_entry_t entry; + vm_map_entry_t temp_entry; XPR(XPR_VM_MAP, "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d", @@ -14604,7 +14762,6 @@ vm_map_behavior_set( } switch (new_behavior) { - /* * This first block of behaviors all set a persistent state on the specified * memory range. All we have to do here is to record the desired behavior @@ -14620,7 +14777,7 @@ vm_map_behavior_set( /* * The entire address range must be valid for the map. - * Note that vm_map_range_check() does a + * Note that vm_map_range_check() does a * vm_map_lookup_entry() internally and returns the * entry containing the start of the address range if * the entire range is valid. @@ -14628,10 +14785,9 @@ vm_map_behavior_set( if (vm_map_range_check(map, start, end, &temp_entry)) { entry = temp_entry; vm_map_clip_start(map, entry, start); - } - else { + } else { vm_map_unlock(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { @@ -14640,7 +14796,7 @@ vm_map_behavior_set( assert(!entry->use_pmap); } - if( new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES ) { + if (new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES) { entry->zero_wired_pages = TRUE; } else { entry->behavior = new_behavior; @@ -14681,10 +14837,10 @@ vm_map_behavior_set( #endif /* MACH_ASSERT */ default: - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } - return(KERN_SUCCESS); + return KERN_SUCCESS; } @@ -14699,22 +14855,22 @@ vm_map_behavior_set( static kern_return_t vm_map_willneed( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end -) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end + ) { - vm_map_entry_t entry; - vm_object_t object; - memory_object_t pager; - struct vm_object_fault_info fault_info = {}; - kern_return_t kr; - vm_object_size_t len; - vm_object_offset_t offset; - - fault_info.interruptible = THREAD_UNINT; /* ignored value */ + vm_map_entry_t entry; + vm_object_t object; + memory_object_t pager; + struct vm_object_fault_info fault_info = {}; + kern_return_t kr; + vm_object_size_t len; + vm_object_offset_t offset; + + fault_info.interruptible = THREAD_UNINT; /* ignored value */ fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; - fault_info.stealth = TRUE; + fault_info.stealth = TRUE; /* * The MADV_WILLNEED operation doesn't require any changes to the @@ -14729,7 +14885,7 @@ vm_map_willneed( * an error. */ - if (! vm_map_range_check(map, start, end, &entry)) { + if (!vm_map_range_check(map, start, end, &entry)) { vm_map_unlock_read(map); return KERN_INVALID_ADDRESS; } @@ -14737,8 +14893,7 @@ vm_map_willneed( /* * Examine each vm_map_entry_t in the range. */ - for (; entry != vm_map_to_entry(map) && start < end; ) { - + for (; entry != vm_map_to_entry(map) && start < end;) { /* * The first time through, the start address could be anywhere * within the vm_map_entry we found. So adjust the offset to @@ -14821,7 +14976,7 @@ vm_map_willneed( kr = memory_object_data_request( pager, offset + object->paging_offset, - 0, /* ignored */ + 0, /* ignored */ VM_PROT_READ, (memory_object_fault_info_t)&fault_info); @@ -14847,7 +15002,7 @@ vm_map_willneed( /* look up next entry */ vm_map_lock_read(map); - if (! vm_map_lookup_entry(map, start, &entry)) { + if (!vm_map_lookup_entry(map, start, &entry)) { /* * There's a new hole in the address range. */ @@ -14895,17 +15050,17 @@ vm_map_entry_is_reusable( } if (/*entry->is_shared ||*/ - entry->is_sub_map || - entry->in_transition || - entry->protection != VM_PROT_DEFAULT || - entry->max_protection != VM_PROT_ALL || - entry->inheritance != VM_INHERIT_DEFAULT || - entry->no_cache || - entry->permanent || - entry->superpage_size != FALSE || - entry->zero_wired_pages || - entry->wired_count != 0 || - entry->user_wired_count != 0) { + entry->is_sub_map || + entry->in_transition || + entry->protection != VM_PROT_DEFAULT || + entry->max_protection != VM_PROT_ALL || + entry->inheritance != VM_INHERIT_DEFAULT || + entry->no_cache || + entry->permanent || + entry->superpage_size != FALSE || + entry->zero_wired_pages || + entry->wired_count != 0 || + entry->user_wired_count != 0) { return FALSE; } @@ -14927,33 +15082,31 @@ vm_map_entry_is_reusable( * on to it. This allows its "resident size" to not include * the reusable range. */ - object->ref_count == 1 && + object->ref_count == 1 && #endif - object->wired_page_count == 0 && - object->copy == VM_OBJECT_NULL && - object->shadow == VM_OBJECT_NULL && - object->internal && - object->purgable == VM_PURGABLE_DENY && - object->copy_strategy != MEMORY_OBJECT_COPY_DELAY && - !object->true_share && - object->wimg_bits == VM_WIMG_USE_DEFAULT && - !object->code_signed) { + object->wired_page_count == 0 && + object->copy == VM_OBJECT_NULL && + object->shadow == VM_OBJECT_NULL && + object->internal && + object->purgable == VM_PURGABLE_DENY && + object->copy_strategy != MEMORY_OBJECT_COPY_DELAY && + !object->true_share && + object->wimg_bits == VM_WIMG_USE_DEFAULT && + !object->code_signed) { return TRUE; } return FALSE; - - } static kern_return_t vm_map_reuse_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; - vm_object_t object; - vm_object_offset_t start_offset, end_offset; + vm_map_entry_t entry; + vm_object_t object; + vm_object_offset_t start_offset, end_offset; /* * The MADV_REUSE operation doesn't require any changes to the @@ -14961,7 +15114,7 @@ vm_map_reuse_pages( */ vm_map_lock_read(map); - assert(map->pmap != kernel_pmap); /* protect alias access */ + assert(map->pmap != kernel_pmap); /* protect alias access */ /* * The madvise semantics require that the address range be fully @@ -14979,11 +15132,11 @@ vm_map_reuse_pages( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { /* * Sanity check on the VM map entry. */ - if (! vm_map_entry_is_reusable(entry)) { + if (!vm_map_entry_is_reusable(entry)) { vm_map_unlock_read(map); vm_page_stats_reusable.reuse_pages_failure++; return KERN_INVALID_ADDRESS; @@ -15008,7 +15161,7 @@ vm_map_reuse_pages( if (object != VM_OBJECT_NULL) { vm_object_lock(object); vm_object_reuse_pages(object, start_offset, end_offset, - TRUE); + TRUE); vm_object_unlock(object); } @@ -15033,14 +15186,14 @@ vm_map_reuse_pages( static kern_return_t vm_map_reusable_pages( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; - vm_object_t object; - vm_object_offset_t start_offset, end_offset; - vm_map_offset_t pmap_offset; + vm_map_entry_t entry; + vm_object_t object; + vm_object_offset_t start_offset, end_offset; + vm_map_offset_t pmap_offset; /* * The MADV_REUSABLE operation doesn't require any changes to the @@ -15048,7 +15201,7 @@ vm_map_reusable_pages( */ vm_map_lock_read(map); - assert(map->pmap != kernel_pmap); /* protect alias access */ + assert(map->pmap != kernel_pmap); /* protect alias access */ /* * The madvise semantics require that the address range be fully @@ -15066,19 +15219,19 @@ vm_map_reusable_pages( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { int kill_pages = 0; /* * Sanity check on the VM map entry. */ - if (! vm_map_entry_is_reusable(entry)) { + if (!vm_map_entry_is_reusable(entry)) { vm_map_unlock_read(map); vm_page_stats_reusable.reusable_pages_failure++; return KERN_INVALID_ADDRESS; } - if (! (entry->protection & VM_PROT_WRITE) && !entry->used_for_jit) { + if (!(entry->protection & VM_PROT_WRITE) && !entry->used_for_jit) { /* not writable: can't discard contents */ vm_map_unlock_read(map); vm_page_stats_reusable.reusable_nonwritable++; @@ -15104,14 +15257,15 @@ vm_map_reusable_pages( assert(!entry->is_sub_map); object = VME_OBJECT(entry); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { continue; + } vm_object_lock(object); if (((object->ref_count == 1) || - (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC && - object->copy == VM_OBJECT_NULL)) && + (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC && + object->copy == VM_OBJECT_NULL)) && object->shadow == VM_OBJECT_NULL && /* * "iokit_acct" entries are billed for their virtual size @@ -15122,7 +15276,7 @@ vm_map_reusable_pages( * ledgers. */ !(entry->iokit_acct || - (!entry->is_sub_map && !entry->use_pmap))) { + (!entry->is_sub_map && !entry->use_pmap))) { if (object->ref_count != 1) { vm_page_stats_reusable.reusable_shared++; } @@ -15132,12 +15286,12 @@ vm_map_reusable_pages( } if (kill_pages != -1) { vm_object_deactivate_pages(object, - start_offset, - end_offset - start_offset, - kill_pages, - TRUE /*reusable_pages*/, - map->pmap, - pmap_offset); + start_offset, + end_offset - start_offset, + kill_pages, + TRUE /*reusable_pages*/, + map->pmap, + pmap_offset); } else { vm_page_stats_reusable.reusable_pages_shared++; } @@ -15165,11 +15319,11 @@ vm_map_reusable_pages( static kern_return_t vm_map_can_reuse( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; + vm_map_entry_t entry; /* * The MADV_REUSABLE operation doesn't require any changes to the @@ -15177,7 +15331,7 @@ vm_map_can_reuse( */ vm_map_lock_read(map); - assert(map->pmap != kernel_pmap); /* protect alias access */ + assert(map->pmap != kernel_pmap); /* protect alias access */ /* * The madvise semantics require that the address range be fully @@ -15195,11 +15349,11 @@ vm_map_can_reuse( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { + entry = entry->vme_next) { /* * Sanity check on the VM map entry. */ - if (! vm_map_entry_is_reusable(entry)) { + if (!vm_map_entry_is_reusable(entry)) { vm_map_unlock_read(map); vm_page_stats_reusable.can_reuse_failure++; return KERN_INVALID_ADDRESS; @@ -15215,11 +15369,11 @@ vm_map_can_reuse( #if MACH_ASSERT static kern_return_t vm_map_pageout( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { - vm_map_entry_t entry; + vm_map_entry_t entry; /* * The MADV_PAGEOUT operation doesn't require any changes to the @@ -15243,8 +15397,8 @@ vm_map_pageout( * Examine each vm_map_entry_t in the range. */ for (; entry != vm_map_to_entry(map) && entry->vme_start < end; - entry = entry->vme_next) { - vm_object_t object; + entry = entry->vme_next) { + vm_object_t object; /* * Sanity check on the VM map entry. @@ -15258,14 +15412,14 @@ vm_map_pageout( submap = VME_SUBMAP(entry); submap_start = VME_OFFSET(entry); submap_end = submap_start + (entry->vme_end - - entry->vme_start); + entry->vme_start); vm_map_lock_read(submap); - if (! vm_map_range_check(submap, - submap_start, - submap_end, - &submap_entry)) { + if (!vm_map_range_check(submap, + submap_start, + submap_end, + &submap_entry)) { vm_map_unlock_read(submap); vm_map_unlock_read(map); return KERN_INVALID_ADDRESS; @@ -15310,35 +15464,35 @@ vm_map_pageout( */ vm_map_entry_t vm_map_entry_insert( - vm_map_t map, - vm_map_entry_t insp_entry, - vm_map_offset_t start, - vm_map_offset_t end, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - boolean_t is_shared, - boolean_t in_transition, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_behavior_t behavior, - vm_inherit_t inheritance, - unsigned wired_count, - boolean_t no_cache, - boolean_t permanent, - unsigned int superpage_size, - boolean_t clear_map_aligned, - boolean_t is_submap, - boolean_t used_for_jit, - int alias) + vm_map_t map, + vm_map_entry_t insp_entry, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + boolean_t is_shared, + boolean_t in_transition, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_behavior_t behavior, + vm_inherit_t inheritance, + unsigned wired_count, + boolean_t no_cache, + boolean_t permanent, + unsigned int superpage_size, + boolean_t clear_map_aligned, + boolean_t is_submap, + boolean_t used_for_jit, + int alias) { - vm_map_entry_t new_entry; + vm_map_entry_t new_entry; assert(insp_entry != (vm_map_entry_t)0); vm_map_lock_assert_exclusive(map); #if DEVELOPMENT || DEBUG - vm_object_offset_t end_offset = 0; + vm_object_offset_t end_offset = 0; assertf(!os_add_overflow(end - start, offset, &end_offset), "size 0x%llx, offset 0x%llx caused overflow", (uint64_t)(end - start), offset); #endif /* DEVELOPMENT || DEBUG */ @@ -15350,8 +15504,8 @@ vm_map_entry_insert( new_entry->map_aligned = FALSE; } if (clear_map_aligned && - (! VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)) || - ! VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)))) { + (!VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)) || + !VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)))) { new_entry->map_aligned = FALSE; } @@ -15361,9 +15515,9 @@ vm_map_entry_insert( assert(page_aligned(new_entry->vme_end)); if (new_entry->map_aligned) { assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map))); } assert(new_entry->vme_start < new_entry->vme_end); @@ -15397,11 +15551,12 @@ vm_map_entry_insert( new_entry->zero_wired_pages = FALSE; new_entry->no_cache = no_cache; new_entry->permanent = permanent; - if (superpage_size) + if (superpage_size) { new_entry->superpage_size = TRUE; - else + } else { new_entry->superpage_size = FALSE; - if (used_for_jit){ + } + if (used_for_jit) { #if CONFIG_EMBEDDED if (!(map->jit_entry_exists)) #endif /* CONFIG_EMBEDDED */ @@ -15426,7 +15581,7 @@ vm_map_entry_insert( */ vm_map_store_entry_link(map, insp_entry, new_entry, - VM_MAP_KERNEL_FLAGS_NONE); + VM_MAP_KERNEL_FLAGS_NONE); map->size += end - start; /* @@ -15444,42 +15599,42 @@ vm_map_entry_insert( */ static kern_return_t vm_map_remap_extract( - vm_map_t map, - vm_map_offset_t addr, - vm_map_size_t size, - boolean_t copy, - struct vm_map_header *map_header, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, + vm_map_t map, + vm_map_offset_t addr, + vm_map_size_t size, + boolean_t copy, + struct vm_map_header *map_header, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, /* What, no behavior? */ - vm_inherit_t inheritance, - boolean_t pageable, - boolean_t same_map, - vm_map_kernel_flags_t vmk_flags) + vm_inherit_t inheritance, + boolean_t pageable, + boolean_t same_map, + vm_map_kernel_flags_t vmk_flags) { - kern_return_t result; - vm_map_size_t mapped_size; - vm_map_size_t tmp_size; - vm_map_entry_t src_entry; /* result of last map lookup */ - vm_map_entry_t new_entry; - vm_object_offset_t offset; - vm_map_offset_t map_address; - vm_map_offset_t src_start; /* start of entry to map */ - vm_map_offset_t src_end; /* end of region to be mapped */ - vm_object_t object; - vm_map_version_t version; - boolean_t src_needs_copy; - boolean_t new_entry_needs_copy; - vm_map_entry_t saved_src_entry; - boolean_t src_entry_was_wired; - vm_prot_t max_prot_for_prot_copy; + kern_return_t result; + vm_map_size_t mapped_size; + vm_map_size_t tmp_size; + vm_map_entry_t src_entry; /* result of last map lookup */ + vm_map_entry_t new_entry; + vm_object_offset_t offset; + vm_map_offset_t map_address; + vm_map_offset_t src_start; /* start of entry to map */ + vm_map_offset_t src_end; /* end of region to be mapped */ + vm_object_t object; + vm_map_version_t version; + boolean_t src_needs_copy; + boolean_t new_entry_needs_copy; + vm_map_entry_t saved_src_entry; + boolean_t src_entry_was_wired; + vm_prot_t max_prot_for_prot_copy; assert(map != VM_MAP_NULL); assert(size != 0); assert(size == vm_map_round_page(size, PAGE_MASK)); assert(inheritance == VM_INHERIT_NONE || - inheritance == VM_INHERIT_COPY || - inheritance == VM_INHERIT_SHARE); + inheritance == VM_INHERIT_COPY || + inheritance == VM_INHERIT_SHARE); /* * Compute start and end of region. @@ -15517,12 +15672,12 @@ vm_map_remap_extract( */ vm_map_lock(map); while (mapped_size != size) { - vm_map_size_t entry_size; + vm_map_size_t entry_size; /* * Find the beginning of the region. */ - if (! vm_map_lookup_entry(map, src_start, &src_entry)) { + if (!vm_map_lookup_entry(map, src_start, &src_entry)) { result = KERN_INVALID_ADDRESS; break; } @@ -15534,13 +15689,14 @@ vm_map_remap_extract( } tmp_size = size - mapped_size; - if (src_end > src_entry->vme_end) + if (src_end > src_entry->vme_end) { tmp_size -= (src_end - src_entry->vme_end); + } entry_size = (vm_map_size_t)(src_entry->vme_end - - src_entry->vme_start); + src_entry->vme_start); - if(src_entry->is_sub_map) { + if (src_entry->is_sub_map) { vm_map_reference(VME_SUBMAP(src_entry)); object = VM_OBJECT_NULL; } else { @@ -15550,34 +15706,34 @@ vm_map_remap_extract( * This entry uses "IOKit accounting". */ } else if (object != VM_OBJECT_NULL && - object->purgable != VM_PURGABLE_DENY) { + object->purgable != VM_PURGABLE_DENY) { /* * Purgeable objects have their own accounting: * no pmap accounting for them. */ assertf(!src_entry->use_pmap, - "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d", - map, - src_entry, - (uint64_t)src_entry->vme_start, - (uint64_t)src_entry->vme_end, - src_entry->protection, - src_entry->max_protection, - VME_ALIAS(src_entry)); + "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d", + map, + src_entry, + (uint64_t)src_entry->vme_start, + (uint64_t)src_entry->vme_end, + src_entry->protection, + src_entry->max_protection, + VME_ALIAS(src_entry)); } else { /* * Not IOKit or purgeable: * must be accounted by pmap stats. */ assertf(src_entry->use_pmap, - "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d", - map, - src_entry, - (uint64_t)src_entry->vme_start, - (uint64_t)src_entry->vme_end, - src_entry->protection, - src_entry->max_protection, - VME_ALIAS(src_entry)); + "map=%p src_entry=%p [0x%llx:0x%llx] 0x%x/0x%x %d", + map, + src_entry, + (uint64_t)src_entry->vme_start, + (uint64_t)src_entry->vme_end, + src_entry->protection, + src_entry->max_protection, + VME_ALIAS(src_entry)); } if (object == VM_OBJECT_NULL) { @@ -15586,7 +15742,7 @@ vm_map_remap_extract( VME_OBJECT_SET(src_entry, object); assert(src_entry->use_pmap); } else if (object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC) { + MEMORY_OBJECT_COPY_SYMMETRIC) { /* * We are already using an asymmetric * copy, and therefore we already have @@ -15594,29 +15750,29 @@ vm_map_remap_extract( */ assert(!src_entry->needs_copy); } else if (src_entry->needs_copy || object->shadowed || - (object->internal && !object->true_share && - !src_entry->is_shared && - object->vo_size > entry_size)) { - + (object->internal && !object->true_share && + !src_entry->is_shared && + object->vo_size > entry_size)) { VME_OBJECT_SHADOW(src_entry, entry_size); assert(src_entry->use_pmap); if (!src_entry->needs_copy && (src_entry->protection & VM_PROT_WRITE)) { - vm_prot_t prot; + vm_prot_t prot; assert(!pmap_has_prot_policy(src_entry->protection)); - prot = src_entry->protection & ~VM_PROT_WRITE; + prot = src_entry->protection & ~VM_PROT_WRITE; if (override_nx(map, - VME_ALIAS(src_entry)) - && prot) - prot |= VM_PROT_EXECUTE; + VME_ALIAS(src_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } assert(!pmap_has_prot_policy(prot)); - if(map->mapped_in_other_pmaps) { + if (map->mapped_in_other_pmaps) { vm_object_pmap_protect( VME_OBJECT(src_entry), VME_OFFSET(src_entry), @@ -15626,9 +15782,9 @@ vm_map_remap_extract( prot); } else { pmap_protect(vm_map_pmap(map), - src_entry->vme_start, - src_entry->vme_end, - prot); + src_entry->vme_start, + src_entry->vme_end, + prot); } } @@ -15642,13 +15798,13 @@ vm_map_remap_extract( if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { object->copy_strategy = - MEMORY_OBJECT_COPY_DELAY; + MEMORY_OBJECT_COPY_DELAY; } vm_object_unlock(object); } offset = (VME_OFFSET(src_entry) + - (src_start - src_entry->vme_start)); + (src_start - src_entry->vme_start)); new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); vm_map_entry_copy(new_entry, src_entry); @@ -15680,7 +15836,7 @@ vm_map_remap_extract( * to convert a read-only mapping into a * copy-on-write version of itself but * with write access: - * keep the original inheritance and add + * keep the original inheritance and add * VM_PROT_WRITE to the max protection. */ new_entry->inheritance = src_entry->inheritance; @@ -15690,11 +15846,11 @@ vm_map_remap_extract( new_entry->inheritance = inheritance; } VME_OFFSET_SET(new_entry, offset); - + /* * The new region has to be copied now if required. */ - RestartCopy: +RestartCopy: if (!copy) { /* * Cannot allow an entry describing a JIT @@ -15708,22 +15864,21 @@ vm_map_remap_extract( } src_entry->is_shared = TRUE; new_entry->is_shared = TRUE; - if (!(new_entry->is_sub_map)) + if (!(new_entry->is_sub_map)) { new_entry->needs_copy = FALSE; - + } } else if (src_entry->is_sub_map) { /* make this a COW sub_map if not already */ assert(new_entry->wired_count == 0); new_entry->needs_copy = TRUE; object = VM_OBJECT_NULL; } else if (src_entry->wired_count == 0 && - vm_object_copy_quickly(&VME_OBJECT(new_entry), - VME_OFFSET(new_entry), - (new_entry->vme_end - - new_entry->vme_start), - &src_needs_copy, - &new_entry_needs_copy)) { - + vm_object_copy_quickly(&VME_OBJECT(new_entry), + VME_OFFSET(new_entry), + (new_entry->vme_end - + new_entry->vme_start), + &src_needs_copy, + &new_entry_needs_copy)) { new_entry->needs_copy = new_entry_needs_copy; new_entry->is_shared = FALSE; assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry); @@ -15732,27 +15887,28 @@ vm_map_remap_extract( * Handle copy_on_write semantics. */ if (src_needs_copy && !src_entry->needs_copy) { - vm_prot_t prot; + vm_prot_t prot; assert(!pmap_has_prot_policy(src_entry->protection)); prot = src_entry->protection & ~VM_PROT_WRITE; if (override_nx(map, - VME_ALIAS(src_entry)) - && prot) - prot |= VM_PROT_EXECUTE; + VME_ALIAS(src_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } assert(!pmap_has_prot_policy(prot)); vm_object_pmap_protect(object, - offset, - entry_size, - ((src_entry->is_shared - || map->mapped_in_other_pmaps) ? - PMAP_NULL : map->pmap), - src_entry->vme_start, - prot); + offset, + entry_size, + ((src_entry->is_shared + || map->mapped_in_other_pmaps) ? + PMAP_NULL : map->pmap), + src_entry->vme_start, + prot); assert(src_entry->wired_count == 0); src_entry->needs_copy = TRUE; @@ -15761,7 +15917,6 @@ vm_map_remap_extract( * Throw away the old object reference of the new entry. */ vm_object_deallocate(object); - } else { new_entry->is_shared = FALSE; assertf(new_entry->use_pmap, "map %p new_entry %p\n", map, new_entry); @@ -15778,7 +15933,7 @@ vm_map_remap_extract( * verification, and unlock the map. */ version.main_timestamp = map->timestamp; - vm_map_unlock(map); /* Increments timestamp once! */ + vm_map_unlock(map); /* Increments timestamp once! */ /* * Perform the copy. @@ -15842,8 +15997,9 @@ vm_map_remap_extract( saved_src_entry = VM_MAP_ENTRY_NULL; vm_object_deallocate(VME_OBJECT(new_entry)); _vm_map_entry_dispose(map_header, new_entry); - if (result == KERN_MEMORY_RESTART_COPY) + if (result == KERN_MEMORY_RESTART_COPY) { result = KERN_SUCCESS; + } continue; } /* map hasn't changed: src_entry is still valid */ @@ -15857,17 +16013,16 @@ vm_map_remap_extract( } _vm_map_store_entry_link(map_header, - map_header->links.prev, new_entry); + map_header->links.prev, new_entry); /*Protections for submap mapping are irrelevant here*/ - if( !src_entry->is_sub_map ) { + if (!src_entry->is_sub_map) { *cur_protection &= src_entry->protection; *max_protection &= src_entry->max_protection; } map_address += tmp_size; mapped_size += tmp_size; src_start += tmp_size; - } /* end while */ vm_map_unlock(map); @@ -15876,8 +16031,8 @@ vm_map_remap_extract( * Free all allocated elements. */ for (src_entry = map_header->links.next; - src_entry != CAST_TO_VM_MAP_ENTRY(&map_header->links); - src_entry = new_entry) { + src_entry != CAST_TO_VM_MAP_ENTRY(&map_header->links); + src_entry = new_entry) { new_entry = src_entry->vme_next; _vm_map_store_entry_unlink(map_header, src_entry); if (src_entry->is_sub_map) { @@ -15904,37 +16059,39 @@ vm_map_remap_extract( */ kern_return_t vm_map_remap( - vm_map_t target_map, - vm_map_address_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_map_t src_map, - vm_map_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_map_address_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_t src_map, + vm_map_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - kern_return_t result; - vm_map_entry_t entry; - vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL; - vm_map_entry_t new_entry; - struct vm_map_header map_header; - vm_map_offset_t offset_in_mapping; - - if (target_map == VM_MAP_NULL) + kern_return_t result; + vm_map_entry_t entry; + vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL; + vm_map_entry_t new_entry; + struct vm_map_header map_header; + vm_map_offset_t offset_in_mapping; + + if (target_map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } switch (inheritance) { case VM_INHERIT_NONE: case VM_INHERIT_COPY: case VM_INHERIT_SHARE: - if (size != 0 && src_map != VM_MAP_NULL) + if (size != 0 && src_map != VM_MAP_NULL) { break; - /*FALL THRU*/ + } + /*FALL THRU*/ default: return KERN_INVALID_ARGUMENT; } @@ -15949,9 +16106,9 @@ vm_map_remap( * the highest page that the requested region includes and make * sure that the size will cover it. * - * The key example we're worried about it is of the form: + * The key example we're worried about it is of the form: * - * memory_address = 0x1ff0, size = 0x20 + * memory_address = 0x1ff0, size = 0x20 * * With the old semantics, we round down the memory_address to 0x1000 * and round up the size to 0x1000, resulting in our covering *only* @@ -15970,13 +16127,13 @@ vm_map_remap( } result = vm_map_remap_extract(src_map, memory_address, - size, copy, &map_header, - cur_protection, - max_protection, - inheritance, - target_map->hdr.entries_pageable, - src_map == target_map, - vmk_flags); + size, copy, &map_header, + cur_protection, + max_protection, + inheritance, + target_map->hdr.entries_pageable, + src_map == target_map, + vmk_flags); if (result != KERN_SUCCESS) { return result; @@ -15987,15 +16144,15 @@ vm_map_remap( * space for the target */ *address = vm_map_trunc_page(*address, - VM_MAP_PAGE_MASK(target_map)); + VM_MAP_PAGE_MASK(target_map)); vm_map_lock(target_map); result = vm_map_remap_range_allocate(target_map, address, size, - mask, flags, vmk_flags, tag, - &insp_entry); + mask, flags, vmk_flags, tag, + &insp_entry); for (entry = map_header.links.next; - entry != CAST_TO_VM_MAP_ENTRY(&map_header.links); - entry = new_entry) { + entry != CAST_TO_VM_MAP_ENTRY(&map_header.links); + entry = new_entry) { new_entry = entry->vme_next; _vm_map_store_entry_unlink(&map_header, entry); if (result == KERN_SUCCESS) { @@ -16009,7 +16166,7 @@ vm_map_remap( entry->vme_end += *address; assert(!entry->map_aligned); vm_map_store_entry_link(target_map, insp_entry, entry, - vmk_flags); + vmk_flags); insp_entry = entry; } else { if (!entry->is_sub_map) { @@ -16026,9 +16183,9 @@ vm_map_remap( *max_protection = VM_PROT_READ; } - if( target_map->disable_vmentry_reuse == TRUE) { + if (target_map->disable_vmentry_reuse == TRUE) { assert(!target_map->is_nested_map); - if( target_map->highest_entry_end < insp_entry->vme_end ){ + if (target_map->highest_entry_end < insp_entry->vme_end) { target_map->highest_entry_end = insp_entry->vme_end; } } @@ -16056,22 +16213,22 @@ vm_map_remap( *cur_protection = VM_PROT_READ; *max_protection = VM_PROT_READ; printf("mismatched remap of executable range 0x%llx-0x%llx to 0x%llx, " - "region_start 0x%llx, region_size 0x%llx, cd_entry %sNULL, making non-executable.\n", - page_addr, page_addr+assoc_size, *address, - region_start, region_size, - region_cd != NULL ? "not " : "" // Don't leak kernel slide - ); + "region_start 0x%llx, region_size 0x%llx, cd_entry %sNULL, making non-executable.\n", + page_addr, page_addr + assoc_size, *address, + region_start, region_size, + region_cd != NULL ? "not " : "" // Don't leak kernel slide + ); } } #endif - } vm_map_unlock(target_map); - if (result == KERN_SUCCESS && target_map->wiring_required) + if (result == KERN_SUCCESS && target_map->wiring_required) { result = vm_map_wire_kernel(target_map, *address, - *address + size, *cur_protection, VM_KERN_MEMORY_MLOCK, - TRUE); + *address + size, *cur_protection, VM_KERN_MEMORY_MLOCK, + TRUE); + } /* * If requested, return the address of the data pointed to by the @@ -16097,36 +16254,34 @@ vm_map_remap( static kern_return_t vm_map_remap_range_allocate( - vm_map_t map, - vm_map_address_t *address, /* IN/OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, + vm_map_t map, + vm_map_address_t *address, /* IN/OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, __unused vm_tag_t tag, - vm_map_entry_t *map_entry) /* OUT */ + vm_map_entry_t *map_entry) /* OUT */ { - vm_map_entry_t entry; - vm_map_offset_t start; - vm_map_offset_t end; - vm_map_offset_t desired_empty_end; - kern_return_t kr; - vm_map_entry_t hole_entry; + vm_map_entry_t entry; + vm_map_offset_t start; + vm_map_offset_t end; + vm_map_offset_t desired_empty_end; + kern_return_t kr; + vm_map_entry_t hole_entry; -StartAgain: ; +StartAgain:; start = *address; - if (flags & VM_FLAGS_ANYWHERE) - { - if (flags & VM_FLAGS_RANDOM_ADDR) - { + if (flags & VM_FLAGS_ANYWHERE) { + if (flags & VM_FLAGS_RANDOM_ADDR) { /* * Get a random start address. */ kr = vm_map_random_address_for_size(map, address, size); if (kr != KERN_SUCCESS) { - return(kr); + return kr; } start = *address; } @@ -16135,10 +16290,12 @@ StartAgain: ; * Calculate the first possible address. */ - if (start < map->min_offset) + if (start < map->min_offset) { start = map->min_offset; - if (start > map->max_offset) - return(KERN_NO_SPACE); + } + if (start > map->max_offset) { + return KERN_NO_SPACE; + } /* * Look for the first possible address; @@ -16146,10 +16303,9 @@ StartAgain: ; * address, we have to start after it. */ - if( map->disable_vmentry_reuse == TRUE) { + if (map->disable_vmentry_reuse == TRUE) { VM_MAP_HIGHEST_ENTRY(map, entry, start); } else { - if (map->holelistenabled) { hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); @@ -16157,9 +16313,8 @@ StartAgain: ; /* * No more space in the map? */ - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } else { - boolean_t found_hole = FALSE; do { @@ -16174,11 +16329,10 @@ StartAgain: ; break; } hole_entry = hole_entry->vme_next; - } while (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)); if (found_hole == FALSE) { - return (KERN_NO_SPACE); + return KERN_NO_SPACE; } entry = hole_entry; @@ -16186,17 +16340,19 @@ StartAgain: ; } else { assert(first_free_is_valid(map)); if (start == map->min_offset) { - if ((entry = map->first_free) != vm_map_to_entry(map)) + if ((entry = map->first_free) != vm_map_to_entry(map)) { start = entry->vme_end; + } } else { - vm_map_entry_t tmp_entry; - if (vm_map_lookup_entry(map, start, &tmp_entry)) + vm_map_entry_t tmp_entry; + if (vm_map_lookup_entry(map, start, &tmp_entry)) { start = tmp_entry->vme_end; + } entry = tmp_entry; } } start = vm_map_round_page(start, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); } /* @@ -16206,7 +16362,7 @@ StartAgain: ; */ while (TRUE) { - vm_map_entry_t next; + vm_map_entry_t next; /* * Find the end of the proposed new region. @@ -16216,9 +16372,10 @@ StartAgain: ; end = ((start + mask) & ~mask); end = vm_map_round_page(end, - VM_MAP_PAGE_MASK(map)); - if (end < start) - return(KERN_NO_SPACE); + VM_MAP_PAGE_MASK(map)); + if (end < start) { + return KERN_NO_SPACE; + } start = end; end += size; @@ -16228,7 +16385,7 @@ StartAgain: ; if ((desired_empty_end > map->max_offset) || (desired_empty_end < start)) { if (map->wait_for_space) { if (size <= (map->max_offset - - map->min_offset)) { + map->min_offset)) { assert_wait((event_t) map, THREAD_INTERRUPTIBLE); vm_map_unlock(map); thread_block(THREAD_CONTINUE_NULL); @@ -16237,17 +16394,18 @@ StartAgain: ; } } - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } next = entry->vme_next; if (map->holelistenabled) { - if (entry->vme_end >= desired_empty_end) + if (entry->vme_end >= desired_empty_end) { break; + } } else { /* - * If there are no more entries, we must win. + * If there are no more entries, we must win. * * OR * @@ -16255,11 +16413,13 @@ StartAgain: ; * after the end of the potential new region. */ - if (next == vm_map_to_entry(map)) + if (next == vm_map_to_entry(map)) { break; + } - if (next->vme_start >= desired_empty_end) + if (next->vme_start >= desired_empty_end) { break; + } } /* @@ -16273,7 +16433,7 @@ StartAgain: ; /* * Wrapped around */ - return(KERN_NO_SPACE); + return KERN_NO_SPACE; } start = entry->vme_start; } else { @@ -16282,16 +16442,14 @@ StartAgain: ; } if (map->holelistenabled) { - if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); } } *address = start; - } else { - vm_map_entry_t temp_entry; + vm_map_entry_t temp_entry; /* * Verify that: @@ -16299,8 +16457,9 @@ StartAgain: ; * the mask requirement. */ - if ((start & mask) != 0) - return(KERN_NO_SPACE); + if ((start & mask) != 0) { + return KERN_NO_SPACE; + } /* @@ -16312,7 +16471,7 @@ StartAgain: ; if ((start < map->min_offset) || (end > map->max_offset) || (start >= end)) { - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } /* @@ -16330,9 +16489,9 @@ StartAgain: ; * combination. */ zap_map = vm_map_create(PMAP_NULL, - start, - end, - map->hdr.entries_pageable); + start, + end, + map->hdr.entries_pageable); if (zap_map == VM_MAP_NULL) { return KERN_RESOURCE_SHORTAGE; } @@ -16343,11 +16502,11 @@ StartAgain: ; remove_flags |= VM_MAP_REMOVE_IMMUTABLE; } kr = vm_map_delete(map, start, end, - remove_flags, - zap_map); + remove_flags, + zap_map); if (kr == KERN_SUCCESS) { vm_map_destroy(zap_map, - VM_MAP_REMOVE_NO_PMAP_CLEANUP); + VM_MAP_REMOVE_NO_PMAP_CLEANUP); zap_map = VM_MAP_NULL; } } @@ -16356,8 +16515,9 @@ StartAgain: ; * ... the starting address isn't allocated */ - if (vm_map_lookup_entry(map, start, &temp_entry)) - return(KERN_NO_SPACE); + if (vm_map_lookup_entry(map, start, &temp_entry)) { + return KERN_NO_SPACE; + } entry = temp_entry; @@ -16367,11 +16527,12 @@ StartAgain: ; */ if ((entry->vme_next != vm_map_to_entry(map)) && - (entry->vme_next->vme_start < end)) - return(KERN_NO_SPACE); + (entry->vme_next->vme_start < end)) { + return KERN_NO_SPACE; + } } *map_entry = entry; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -16382,11 +16543,11 @@ StartAgain: ; vm_map_t vm_map_switch( - vm_map_t map) + vm_map_t map) { - int mycpu; - thread_t thread = current_thread(); - vm_map_t oldmap = thread->map; + int mycpu; + thread_t thread = current_thread(); + vm_map_t oldmap = thread->map; mp_disable_preemption(); mycpu = cpu_number(); @@ -16397,7 +16558,7 @@ vm_map_switch( PMAP_SWITCH_USER(thread, map, mycpu); mp_enable_preemption(); - return(oldmap); + return oldmap; } @@ -16415,19 +16576,19 @@ vm_map_switch( */ kern_return_t vm_map_write_user( - vm_map_t map, - void *src_p, - vm_map_address_t dst_addr, - vm_size_t size) + vm_map_t map, + void *src_p, + vm_map_address_t dst_addr, + vm_size_t size) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; - if(current_map() == map) { + if (current_map() == map) { if (copyout(src_p, dst_addr, size)) { kr = KERN_INVALID_ADDRESS; } } else { - vm_map_t oldmap; + vm_map_t oldmap; /* take on the identity of the target map while doing */ /* the transfer */ @@ -16457,19 +16618,19 @@ vm_map_write_user( */ kern_return_t vm_map_read_user( - vm_map_t map, - vm_map_address_t src_addr, - void *dst_p, - vm_size_t size) + vm_map_t map, + vm_map_address_t src_addr, + void *dst_p, + vm_size_t size) { - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; - if(current_map() == map) { + if (current_map() == map) { if (copyin(src_addr, dst_p, size)) { kr = KERN_INVALID_ADDRESS; } } else { - vm_map_t oldmap; + vm_map_t oldmap; /* take on the identity of the target map while doing */ /* the transfer */ @@ -16495,22 +16656,21 @@ vm_map_read_user( */ boolean_t vm_map_check_protection(vm_map_t map, vm_map_offset_t start, - vm_map_offset_t end, vm_prot_t protection) + vm_map_offset_t end, vm_prot_t protection) { vm_map_entry_t entry; vm_map_entry_t tmp_entry; vm_map_lock(map); - if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) - { + if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) { vm_map_unlock(map); - return (FALSE); + return FALSE; } if (!vm_map_lookup_entry(map, start, &tmp_entry)) { vm_map_unlock(map); - return(FALSE); + return FALSE; } entry = tmp_entry; @@ -16518,7 +16678,7 @@ vm_map_check_protection(vm_map_t map, vm_map_offset_t start, while (start < end) { if (entry == vm_map_to_entry(map)) { vm_map_unlock(map); - return(FALSE); + return FALSE; } /* @@ -16527,7 +16687,7 @@ vm_map_check_protection(vm_map_t map, vm_map_offset_t start, if (start < entry->vme_start) { vm_map_unlock(map); - return(FALSE); + return FALSE; } /* @@ -16536,7 +16696,7 @@ vm_map_check_protection(vm_map_t map, vm_map_offset_t start, if ((entry->protection & protection) != protection) { vm_map_unlock(map); - return(FALSE); + return FALSE; } /* go to next entry */ @@ -16545,33 +16705,35 @@ vm_map_check_protection(vm_map_t map, vm_map_offset_t start, entry = entry->vme_next; } vm_map_unlock(map); - return(TRUE); + return TRUE; } kern_return_t vm_map_purgable_control( - vm_map_t map, - vm_map_offset_t address, - vm_purgable_t control, - int *state) + vm_map_t map, + vm_map_offset_t address, + vm_purgable_t control, + int *state) { - vm_map_entry_t entry; - vm_object_t object; - kern_return_t kr; - boolean_t was_nonvolatile; + vm_map_entry_t entry; + vm_object_t object; + kern_return_t kr; + boolean_t was_nonvolatile; /* * Vet all the input parameters and current type and state of the * underlaying object. Return with an error if anything is amiss. */ - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } if (control != VM_PURGABLE_SET_STATE && control != VM_PURGABLE_GET_STATE && control != VM_PURGABLE_PURGE_ALL && - control != VM_PURGABLE_SET_STATE_FROM_KERNEL) - return(KERN_INVALID_ARGUMENT); + control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { + return KERN_INVALID_ARGUMENT; + } if (control == VM_PURGABLE_PURGE_ALL) { vm_purgeable_object_purge_all(); @@ -16579,20 +16741,20 @@ vm_map_purgable_control( } if ((control == VM_PURGABLE_SET_STATE || - control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && + control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || - ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) - return(KERN_INVALID_ARGUMENT); + ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) { + return KERN_INVALID_ARGUMENT; + } vm_map_lock_read(map); if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) { - /* * Must pass a valid non-submap address. */ vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } if ((entry->protection & VM_PROT_WRITE) == 0) { @@ -16600,7 +16762,7 @@ vm_map_purgable_control( * Can't apply purgable controls to something you can't write. */ vm_map_unlock_read(map); - return(KERN_PROTECTION_FAILURE); + return KERN_PROTECTION_FAILURE; } object = VME_OBJECT(entry); @@ -16652,21 +16814,21 @@ vm_map_purgable_control( kern_return_t vm_map_page_query_internal( - vm_map_t target_map, - vm_map_offset_t offset, - int *disposition, - int *ref_count) + vm_map_t target_map, + vm_map_offset_t offset, + int *disposition, + int *ref_count) { - kern_return_t kr; - vm_page_info_basic_data_t info; - mach_msg_type_number_t count; + kern_return_t kr; + vm_page_info_basic_data_t info; + mach_msg_type_number_t count; count = VM_PAGE_INFO_BASIC_COUNT; kr = vm_map_page_info(target_map, - offset, - VM_PAGE_INFO_BASIC, - (vm_page_info_t) &info, - &count); + offset, + VM_PAGE_INFO_BASIC, + (vm_page_info_t) &info, + &count); if (kr == KERN_SUCCESS) { *disposition = info.disposition; *ref_count = info.ref_count; @@ -16680,40 +16842,40 @@ vm_map_page_query_internal( kern_return_t vm_map_page_info( - vm_map_t map, - vm_map_offset_t offset, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count) + vm_map_t map, + vm_map_offset_t offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) { - return (vm_map_page_range_info_internal(map, - offset, /* start of range */ - (offset + 1), /* this will get rounded in the call to the page boundary */ - flavor, - info, - count)); + return vm_map_page_range_info_internal(map, + offset, /* start of range */ + (offset + 1), /* this will get rounded in the call to the page boundary */ + flavor, + info, + count); } kern_return_t vm_map_page_range_info_internal( - vm_map_t map, - vm_map_offset_t start_offset, - vm_map_offset_t end_offset, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count) + vm_map_t map, + vm_map_offset_t start_offset, + vm_map_offset_t end_offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) { - vm_map_entry_t map_entry = VM_MAP_ENTRY_NULL; - vm_object_t object = VM_OBJECT_NULL, curr_object = VM_OBJECT_NULL; - vm_page_t m = VM_PAGE_NULL; - kern_return_t retval = KERN_SUCCESS; - int disposition = 0; - int ref_count = 0; - int depth = 0, info_idx = 0; - vm_page_info_basic_t basic_info = 0; - vm_map_offset_t offset_in_page = 0, offset_in_object = 0, curr_offset_in_object = 0; - vm_map_offset_t start = 0, end = 0, curr_s_offset = 0, curr_e_offset = 0; - boolean_t do_region_footprint; + vm_map_entry_t map_entry = VM_MAP_ENTRY_NULL; + vm_object_t object = VM_OBJECT_NULL, curr_object = VM_OBJECT_NULL; + vm_page_t m = VM_PAGE_NULL; + kern_return_t retval = KERN_SUCCESS; + int disposition = 0; + int ref_count = 0; + int depth = 0, info_idx = 0; + vm_page_info_basic_t basic_info = 0; + vm_map_offset_t offset_in_page = 0, offset_in_object = 0, curr_offset_in_object = 0; + vm_map_offset_t start = 0, end = 0, curr_s_offset = 0, curr_e_offset = 0; + boolean_t do_region_footprint; switch (flavor) { case VM_PAGE_INFO_BASIC: @@ -16723,8 +16885,9 @@ vm_map_page_range_info_internal( * properly padded, so allow the size to be off by * one to maintain backwards binary compatibility... */ - if (*count != VM_PAGE_INFO_BASIC_COUNT - 1) + if (*count != VM_PAGE_INFO_BASIC_COUNT - 1) { return KERN_INVALID_ARGUMENT; + } } break; default: @@ -16742,7 +16905,11 @@ vm_map_page_range_info_internal( start = vm_map_trunc_page(start_offset, PAGE_MASK); end = vm_map_round_page(end_offset, PAGE_MASK); - assert ((end - start) <= MAX_PAGE_RANGE_QUERY); + if (end < start) { + return KERN_INVALID_ARGUMENT; + } + + assert((end - start) <= MAX_PAGE_RANGE_QUERY); vm_map_lock_read(map); @@ -16819,13 +16986,11 @@ vm_map_page_range_info_internal( * Illegal address that falls below map min. */ curr_e_offset = MIN(end, vm_map_min(map)); - } else if (curr_s_offset >= vm_map_max(map)) { /* * Illegal address that falls on/after map max. */ curr_e_offset = end; - } else if (map_entry == vm_map_to_entry(map)) { /* * Hit a hole. @@ -16837,8 +17002,8 @@ vm_map_page_range_info_internal( curr_e_offset = MIN(map->max_offset, end); } else { /* - * Hole at start of the map. - */ + * Hole at start of the map. + */ curr_e_offset = MIN(map_entry->vme_next->vme_start, end); } } else { @@ -16891,11 +17056,11 @@ vm_map_page_range_info_internal( submap_info = (vm_page_info_t) (((uintptr_t) info) + (info_idx * sizeof(struct vm_page_info_basic))); retval = vm_map_page_range_info_internal(sub_map, - submap_s_offset, - submap_e_offset, - VM_PAGE_INFO_BASIC, - (vm_page_info_t) submap_info, - count); + submap_s_offset, + submap_e_offset, + VM_PAGE_INFO_BASIC, + (vm_page_info_t) submap_info, + count); assert(retval == KERN_SUCCESS); @@ -16913,7 +17078,6 @@ vm_map_page_range_info_internal( object = VME_OBJECT(map_entry); if (object == VM_OBJECT_NULL) { - /* * We don't have an object here and, hence, * no pages to inspect. We'll fill up the @@ -16954,19 +17118,19 @@ vm_map_page_range_info_internal( * Query the pmap. */ pmap_query_page_info(map->pmap, - curr_s_offset, - &pmap_disp); + curr_s_offset, + &pmap_disp); } if (object->purgable == VM_PURGABLE_NONVOLATILE && /* && not tagged as no-footprint? */ VM_OBJECT_OWNER(object) != NULL && VM_OBJECT_OWNER(object)->map == map) { if ((((curr_s_offset - - map_entry->vme_start - + VME_OFFSET(map_entry)) - / PAGE_SIZE) < - (object->resident_page_count + - vm_compressor_pager_get_count(object->pager)))) { + - map_entry->vme_start + + VME_OFFSET(map_entry)) + / PAGE_SIZE) < + (object->resident_page_count + + vm_compressor_pager_get_count(object->pager)))) { /* * Non-volatile purgeable object owned * by this task: report the first @@ -16983,15 +17147,15 @@ vm_map_page_range_info_internal( disposition |= VM_PAGE_QUERY_PAGE_PRESENT; } } else if ((object->purgable == VM_PURGABLE_VOLATILE || - object->purgable == VM_PURGABLE_EMPTY) && - /* && not tagged as no-footprint? */ - VM_OBJECT_OWNER(object) != NULL && - VM_OBJECT_OWNER(object)->map == map) { + object->purgable == VM_PURGABLE_EMPTY) && + /* && not tagged as no-footprint? */ + VM_OBJECT_OWNER(object) != NULL && + VM_OBJECT_OWNER(object)->map == map) { if ((((curr_s_offset - - map_entry->vme_start - + VME_OFFSET(map_entry)) - / PAGE_SIZE) < - object->wired_page_count)) { + - map_entry->vme_start + + VME_OFFSET(map_entry)) + / PAGE_SIZE) < + object->wired_page_count)) { /* * Volatile|empty purgeable object owned * by this task: report the first @@ -17008,8 +17172,8 @@ vm_map_page_range_info_internal( disposition |= VM_PAGE_QUERY_PAGE_PRESENT; } } else if (map_entry->iokit_acct && - object->internal && - object->purgable == VM_PURGABLE_DENY) { + object->internal && + object->purgable == VM_PURGABLE_DENY) { /* * Non-purgeable IOKit memory: phys_footprint * includes the entire virtual mapping. @@ -17018,7 +17182,7 @@ vm_map_page_range_info_internal( disposition |= VM_PAGE_QUERY_PAGE_PRESENT; disposition |= VM_PAGE_QUERY_PAGE_DIRTY; } else if (pmap_disp & (PMAP_QUERY_PAGE_ALTACCT | - PMAP_QUERY_PAGE_COMPRESSED_ALTACCT)) { + PMAP_QUERY_PAGE_COMPRESSED_ALTACCT)) { /* alternate accounting */ #if CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) if (map->pmap->footprint_was_suspended || @@ -17038,7 +17202,7 @@ vm_map_page_range_info_internal( */ } else #endif /* CONFIG_EMBEDDED && (DEVELOPMENT || DEBUG) */ - assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); + assertf(!map_entry->use_pmap, "offset 0x%llx map_entry %p", (uint64_t) curr_s_offset, map_entry); pmap_disp = 0; } else { if (pmap_disp & PMAP_QUERY_PAGE_PRESENT) { @@ -17087,7 +17251,6 @@ vm_map_page_range_info_internal( curr_object = object; for (; curr_s_offset < curr_e_offset;) { - if (object == curr_object) { ref_count = curr_object->ref_count - 1; /* account for our object reference above. */ } else { @@ -17100,16 +17263,13 @@ vm_map_page_range_info_internal( m = vm_page_lookup(curr_object, curr_offset_in_object); if (m != VM_PAGE_NULL) { - disposition |= VM_PAGE_QUERY_PAGE_PRESENT; break; - } else { if (curr_object->internal && curr_object->alive && !curr_object->terminating && curr_object->pager_ready) { - if (VM_COMPRESSOR_PAGER_STATE_GET(curr_object, curr_offset_in_object) == VM_EXTERNAL_STATE_EXISTS) { /* the pager has that page */ @@ -17117,7 +17277,7 @@ vm_map_page_range_info_internal( break; } } - + /* * Go down the VM object shadow chain until we find the page * we're looking for. @@ -17136,7 +17296,6 @@ vm_map_page_range_info_internal( depth++; continue; } else { - break; } } @@ -17152,34 +17311,39 @@ vm_map_page_range_info_internal( /* but this would under count as only faulted-in mappings would */ /* show up. */ - if ((curr_object == object) && curr_object->shadow) + if ((curr_object == object) && curr_object->shadow) { disposition |= VM_PAGE_QUERY_PAGE_COPIED; + } - if (! curr_object->internal) + if (!curr_object->internal) { disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; + } if (m != VM_PAGE_NULL) { - if (m->vmp_fictitious) { - disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; - } else { - if (m->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m))) + if (m->vmp_dirty || pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m))) { disposition |= VM_PAGE_QUERY_PAGE_DIRTY; + } - if (m->vmp_reference || pmap_is_referenced(VM_PAGE_GET_PHYS_PAGE(m))) + if (m->vmp_reference || pmap_is_referenced(VM_PAGE_GET_PHYS_PAGE(m))) { disposition |= VM_PAGE_QUERY_PAGE_REF; + } - if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) + if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) { disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE; + } - if (m->vmp_cs_validated) + if (m->vmp_cs_validated) { disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED; - if (m->vmp_cs_tainted) + } + if (m->vmp_cs_tainted) { disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED; - if (m->vmp_cs_nx) + } + if (m->vmp_cs_nx) { disposition |= VM_PAGE_QUERY_PAGE_CS_NX; + } } } @@ -17189,9 +17353,9 @@ vm_map_page_range_info_internal( basic_info->disposition = disposition; basic_info->ref_count = ref_count; basic_info->object_id = (vm_object_id_t) (uintptr_t) - VM_KERNEL_ADDRPERM(curr_object); + VM_KERNEL_ADDRPERM(curr_object); basic_info->offset = - (memory_object_offset_t) curr_offset_in_object + offset_in_page; + (memory_object_offset_t) curr_offset_in_object + offset_in_page; basic_info->depth = depth; info_idx++; @@ -17204,19 +17368,17 @@ vm_map_page_range_info_internal( /* * Move to next offset in the range and in our object. */ - curr_s_offset += PAGE_SIZE; + curr_s_offset += PAGE_SIZE; offset_in_object += PAGE_SIZE; curr_offset_in_object = offset_in_object; if (curr_object != object) { - vm_object_unlock(curr_object); curr_object = object; vm_object_lock_shared(curr_object); } else { - vm_object_lock_yield_shared(curr_object); } } @@ -17271,50 +17433,52 @@ vm_map_page_range_info_internal( kern_return_t vm_map_msync( - vm_map_t map, - vm_map_address_t address, - vm_map_size_t size, - vm_sync_t sync_flags) + vm_map_t map, + vm_map_address_t address, + vm_map_size_t size, + vm_sync_t sync_flags) { - vm_map_entry_t entry; - vm_map_size_t amount_left; - vm_object_offset_t offset; - boolean_t do_sync_req; - boolean_t had_hole = FALSE; - vm_map_offset_t pmap_offset; + vm_map_entry_t entry; + vm_map_size_t amount_left; + vm_object_offset_t offset; + boolean_t do_sync_req; + boolean_t had_hole = FALSE; + vm_map_offset_t pmap_offset; if ((sync_flags & VM_SYNC_ASYNCHRONOUS) && - (sync_flags & VM_SYNC_SYNCHRONOUS)) - return(KERN_INVALID_ARGUMENT); + (sync_flags & VM_SYNC_SYNCHRONOUS)) { + return KERN_INVALID_ARGUMENT; + } /* * align address and size on page boundaries */ size = (vm_map_round_page(address + size, - VM_MAP_PAGE_MASK(map)) - - vm_map_trunc_page(address, - VM_MAP_PAGE_MASK(map))); + VM_MAP_PAGE_MASK(map)) - + vm_map_trunc_page(address, + VM_MAP_PAGE_MASK(map))); address = vm_map_trunc_page(address, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); - if (map == VM_MAP_NULL) - return(KERN_INVALID_TASK); + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } - if (size == 0) - return(KERN_SUCCESS); + if (size == 0) { + return KERN_SUCCESS; + } amount_left = size; while (amount_left > 0) { - vm_object_size_t flush_size; - vm_object_t object; + vm_object_size_t flush_size; + vm_object_t object; vm_map_lock(map); if (!vm_map_lookup_entry(map, - address, - &entry)) { - - vm_map_size_t skip; + address, + &entry)) { + vm_map_size_t skip; /* * hole in the address map. @@ -17354,10 +17518,11 @@ vm_map_msync( * Move up to the next entry if needed */ skip = (entry->vme_next->vme_start - address); - if (skip >= amount_left) + if (skip >= amount_left) { amount_left = 0; - else + } else { amount_left -= skip; + } address = entry->vme_next->vme_start; vm_map_unlock(map); continue; @@ -17372,7 +17537,7 @@ vm_map_msync( */ if (amount_left + entry->vme_start + offset > entry->vme_end) { flush_size = entry->vme_end - - (entry->vme_start + offset); + (entry->vme_start + offset); } else { flush_size = amount_left; } @@ -17380,8 +17545,8 @@ vm_map_msync( address += flush_size; if (entry->is_sub_map == TRUE) { - vm_map_t local_map; - vm_map_offset_t local_offset; + vm_map_t local_map; + vm_map_offset_t local_offset; local_map = VME_SUBMAP(entry); local_offset = VME_OFFSET(entry); @@ -17407,28 +17572,28 @@ vm_map_msync( } offset += VME_OFFSET(entry); - vm_object_lock(object); + vm_object_lock(object); if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) { - int kill_pages = 0; + int kill_pages = 0; boolean_t reusable_pages = FALSE; if (sync_flags & VM_SYNC_KILLPAGES) { - if (((object->ref_count == 1) || - ((object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC) && - (object->copy == VM_OBJECT_NULL))) && + if (((object->ref_count == 1) || + ((object->copy_strategy != + MEMORY_OBJECT_COPY_SYMMETRIC) && + (object->copy == VM_OBJECT_NULL))) && (object->shadow == VM_OBJECT_NULL)) { if (object->ref_count != 1) { vm_page_stats_reusable.free_shared++; } - kill_pages = 1; + kill_pages = 1; } else { - kill_pages = -1; + kill_pages = -1; } } - if (kill_pages != -1) - vm_object_deactivate_pages( + if (kill_pages != -1) { + vm_object_deactivate_pages( object, offset, (vm_object_size_t) flush_size, @@ -17436,6 +17601,7 @@ vm_map_msync( reusable_pages, map->pmap, pmap_offset); + } vm_object_unlock(object); vm_map_unlock(map); continue; @@ -17460,18 +17626,18 @@ vm_map_msync( vm_map_unlock(map); do_sync_req = vm_object_sync(object, - offset, - flush_size, - sync_flags & VM_SYNC_INVALIDATE, - ((sync_flags & VM_SYNC_SYNCHRONOUS) || - (sync_flags & VM_SYNC_ASYNCHRONOUS)), - sync_flags & VM_SYNC_SYNCHRONOUS); + offset, + flush_size, + sync_flags & VM_SYNC_INVALIDATE, + ((sync_flags & VM_SYNC_SYNCHRONOUS) || + (sync_flags & VM_SYNC_ASYNCHRONOUS)), + sync_flags & VM_SYNC_SYNCHRONOUS); if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) { - /* + /* * clear out the clustering and read-ahead hints */ - vm_object_lock(object); + vm_object_lock(object); object->pages_created = 0; object->pages_used = 0; @@ -17484,10 +17650,11 @@ vm_map_msync( } /* while */ /* for proper msync() behaviour */ - if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) - return(KERN_INVALID_ADDRESS); + if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) { + return KERN_INVALID_ADDRESS; + } - return(KERN_SUCCESS); + return KERN_SUCCESS; }/* vm_msync */ /* @@ -17504,32 +17671,32 @@ vm_map_msync( vm_map_t convert_port_entry_to_map( - ipc_port_t port) + ipc_port_t port) { vm_map_t map; - vm_named_entry_t named_entry; - uint32_t try_failed_count = 0; + vm_named_entry_t named_entry; + uint32_t try_failed_count = 0; - if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { - while(TRUE) { + if (IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { + while (TRUE) { ip_lock(port); - if(ip_active(port) && (ip_kotype(port) - == IKOT_NAMED_ENTRY)) { + if (ip_active(port) && (ip_kotype(port) + == IKOT_NAMED_ENTRY)) { named_entry = - (vm_named_entry_t)port->ip_kobject; + (vm_named_entry_t)port->ip_kobject; if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { - ip_unlock(port); + ip_unlock(port); try_failed_count++; - mutex_pause(try_failed_count); - continue; - } + mutex_pause(try_failed_count); + continue; + } named_entry->ref_count++; lck_mtx_unlock(&(named_entry)->Lock); ip_unlock(port); if ((named_entry->is_sub_map) && (named_entry->protection - & VM_PROT_WRITE)) { + & VM_PROT_WRITE)) { map = named_entry->backing.map; } else { mach_destroy_memory_entry(port); @@ -17538,13 +17705,13 @@ convert_port_entry_to_map( vm_map_reference_swap(map); mach_destroy_memory_entry(port); break; - } - else + } else { return VM_MAP_NULL; + } } - } - else + } else { map = convert_port_to_map(port); + } return map; } @@ -17562,15 +17729,15 @@ convert_port_entry_to_map( vm_object_t convert_port_entry_to_object( - ipc_port_t port) + ipc_port_t port) { - vm_object_t object = VM_OBJECT_NULL; - vm_named_entry_t named_entry; - uint32_t try_failed_count = 0; + vm_object_t object = VM_OBJECT_NULL; + vm_named_entry_t named_entry; + uint32_t try_failed_count = 0; if (IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { - try_again: +try_again: ip_lock(port); if (ip_active(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { @@ -17579,7 +17746,7 @@ convert_port_entry_to_object( ip_unlock(port); try_failed_count++; mutex_pause(try_failed_count); - goto try_again; + goto try_again; } named_entry->ref_count++; lck_mtx_unlock(&(named_entry)->Lock); @@ -17605,7 +17772,7 @@ convert_port_entry_to_object( vm_map_t current_map(void) { - return (current_map_fast()); + return current_map_fast(); } /* @@ -17618,13 +17785,14 @@ current_map(void) #undef vm_map_reference void vm_map_reference( - vm_map_t map) + vm_map_t map) { - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return; + } lck_mtx_lock(&map->s_lock); -#if TASK_SWAPPER +#if TASK_SWAPPER assert(map->res_count > 0); assert(map->map_refcnt >= map->res_count); map->res_count++; @@ -17642,12 +17810,13 @@ vm_map_reference( */ void vm_map_deallocate( - vm_map_t map) + vm_map_t map) { - unsigned int ref; + unsigned int ref; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return; + } lck_mtx_lock(&map->s_lock); ref = --map->map_refcnt; @@ -17659,7 +17828,7 @@ vm_map_deallocate( assert(map->map_refcnt == 0); lck_mtx_unlock(&map->s_lock); -#if TASK_SWAPPER +#if TASK_SWAPPER /* * The map residence count isn't decremented here because * the vm_map_delete below will traverse the entire map, @@ -17675,21 +17844,24 @@ vm_map_deallocate( void vm_map_disable_NX(vm_map_t map) { - if (map == NULL) - return; - if (map->pmap == NULL) - return; + if (map == NULL) { + return; + } + if (map->pmap == NULL) { + return; + } - pmap_disable_NX(map->pmap); + pmap_disable_NX(map->pmap); } void vm_map_disallow_data_exec(vm_map_t map) { - if (map == NULL) - return; + if (map == NULL) { + return; + } - map->map_disallow_data_exec = TRUE; + map->map_disallow_data_exec = TRUE; } /* XXX Consider making these constants (VM_MAX_ADDRESS and MACH_VM_MAX_ADDRESS) @@ -17781,17 +17953,17 @@ vm_map_offset_t vm_compute_max_offset(boolean_t is64) { #if defined(__arm__) || defined(__arm64__) - return (pmap_max_offset(is64, ARM_PMAP_MAX_OFFSET_DEVICE)); + return pmap_max_offset(is64, ARM_PMAP_MAX_OFFSET_DEVICE); #else - return (is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS); + return is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS; #endif } void vm_map_get_max_aslr_slide_section( - vm_map_t map __unused, - int64_t *max_sections, - int64_t *section_size) + vm_map_t map __unused, + int64_t *max_sections, + int64_t *section_size) { #if defined(__arm64__) *max_sections = 3; @@ -17810,9 +17982,9 @@ vm_map_get_max_aslr_slide_pages(vm_map_t map) * limited embedded address space; this is also meant to minimize pmap * memory usage on 16KB page systems. */ - return (1 << (24 - VM_MAP_PAGE_SHIFT(map))); + return 1 << (24 - VM_MAP_PAGE_SHIFT(map)); #else - return (1 << (vm_map_is_64bit(map) ? 16 : 8)); + return 1 << (vm_map_is_64bit(map) ? 16 : 8); #endif } @@ -17823,16 +17995,16 @@ vm_map_get_max_loader_aslr_slide_pages(vm_map_t map) /* We limit the loader slide to 4MB, in order to ensure at least 8 bits * of independent entropy on 16KB page systems. */ - return (1 << (22 - VM_MAP_PAGE_SHIFT(map))); + return 1 << (22 - VM_MAP_PAGE_SHIFT(map)); #else - return (1 << (vm_map_is_64bit(map) ? 16 : 8)); + return 1 << (vm_map_is_64bit(map) ? 16 : 8); #endif } -#ifndef __arm__ +#ifndef __arm__ boolean_t vm_map_is_64bit( - vm_map_t map) + vm_map_t map) { return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS); } @@ -17840,8 +18012,8 @@ vm_map_is_64bit( boolean_t vm_map_has_hard_pagezero( - vm_map_t map, - vm_map_offset_t pagezero_size) + vm_map_t map, + vm_map_offset_t pagezero_size) { /* * XXX FBDP @@ -17853,7 +18025,7 @@ vm_map_has_hard_pagezero( * VM map is being torn down, and when a new map is created via * load_machfile()/execve(). */ - return (map->min_offset >= pagezero_size); + return map->min_offset >= pagezero_size; } /* @@ -17861,10 +18033,10 @@ vm_map_has_hard_pagezero( */ kern_return_t vm_map_raise_max_offset( - vm_map_t map, - vm_map_offset_t new_max_offset) + vm_map_t map, + vm_map_offset_t new_max_offset) { - kern_return_t ret; + kern_return_t ret; vm_map_lock(map); ret = KERN_INVALID_ADDRESS; @@ -17894,13 +18066,13 @@ vm_map_raise_max_offset( */ kern_return_t vm_map_raise_min_offset( - vm_map_t map, - vm_map_offset_t new_min_offset) + vm_map_t map, + vm_map_offset_t new_min_offset) { - vm_map_entry_t first_entry; + vm_map_entry_t first_entry; new_min_offset = vm_map_round_page(new_min_offset, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); vm_map_lock(map); @@ -17949,18 +18121,19 @@ vm_map_raise_min_offset( */ void -vm_map_set_user_wire_limit(vm_map_t map, - vm_size_t limit) +vm_map_set_user_wire_limit(vm_map_t map, + vm_size_t limit) { map->user_wire_limit = limit; } -void vm_map_switch_protect(vm_map_t map, - boolean_t val) +void +vm_map_switch_protect(vm_map_t map, + boolean_t val) { vm_map_lock(map); - map->switch_protect=val; + map->switch_protect = val; vm_map_unlock(map); } @@ -17989,9 +18162,10 @@ vm_map_iokit_unmapped_region(vm_map_t map, vm_size_t bytes) /* Add (generate) code signature for memory range */ #if CONFIG_DYNAMIC_CODE_SIGNING -kern_return_t vm_map_sign(vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end) +kern_return_t +vm_map_sign(vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end) { vm_map_entry_t entry; vm_page_t m; @@ -18001,8 +18175,9 @@ kern_return_t vm_map_sign(vm_map_t map, * Vet all the input parameters and current type and state of the * underlaying object. Return with an error if anything is amiss. */ - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_map_lock_read(map); @@ -18011,16 +18186,16 @@ kern_return_t vm_map_sign(vm_map_t map, * Must pass a valid non-submap address. */ vm_map_unlock_read(map); - return(KERN_INVALID_ADDRESS); + return KERN_INVALID_ADDRESS; } - if((entry->vme_start > start) || (entry->vme_end < end)) { + if ((entry->vme_start > start) || (entry->vme_end < end)) { /* * Map entry doesn't cover the requested range. Not handling * this situation currently. */ vm_map_unlock_read(map); - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } object = VME_OBJECT(entry); @@ -18035,12 +18210,12 @@ kern_return_t vm_map_sign(vm_map_t map, vm_object_lock(object); vm_map_unlock_read(map); - while(start < end) { + while (start < end) { uint32_t refmod; m = vm_page_lookup(object, - start - entry->vme_start + VME_OFFSET(entry)); - if (m==VM_PAGE_NULL) { + start - entry->vme_start + VME_OFFSET(entry)); + if (m == VM_PAGE_NULL) { /* shoud we try to fault a page here? we can probably * demand it exists and is locked for this request */ vm_object_unlock(object); @@ -18080,12 +18255,13 @@ kern_return_t vm_map_sign(vm_map_t map, } #endif -kern_return_t vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident, unsigned int *reclaimed_compressed) +kern_return_t +vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident, unsigned int *reclaimed_compressed) { - vm_map_entry_t entry = VM_MAP_ENTRY_NULL; + vm_map_entry_t entry = VM_MAP_ENTRY_NULL; vm_map_entry_t next_entry; - kern_return_t kr = KERN_SUCCESS; - vm_map_t zap_map; + kern_return_t kr = KERN_SUCCESS; + vm_map_t zap_map; vm_map_lock(map); @@ -18094,49 +18270,48 @@ kern_return_t vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident * the "map" in vm_map_delete(). */ zap_map = vm_map_create(PMAP_NULL, - map->min_offset, - map->max_offset, - map->hdr.entries_pageable); + map->min_offset, + map->max_offset, + map->hdr.entries_pageable); if (zap_map == VM_MAP_NULL) { return KERN_RESOURCE_SHORTAGE; } vm_map_set_page_shift(zap_map, - VM_MAP_PAGE_SHIFT(map)); + VM_MAP_PAGE_SHIFT(map)); vm_map_disable_hole_optimization(zap_map); for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = next_entry) { + entry != vm_map_to_entry(map); + entry = next_entry) { next_entry = entry->vme_next; if (VME_OBJECT(entry) && !entry->is_sub_map && (VME_OBJECT(entry)->internal == TRUE) && (VME_OBJECT(entry)->ref_count == 1)) { - *reclaimed_resident += VME_OBJECT(entry)->resident_page_count; *reclaimed_compressed += vm_compressor_pager_get_count(VME_OBJECT(entry)->pager); (void)vm_map_delete(map, - entry->vme_start, - entry->vme_end, - VM_MAP_REMOVE_SAVE_ENTRIES, - zap_map); + entry->vme_start, + entry->vme_end, + VM_MAP_REMOVE_SAVE_ENTRIES, + zap_map); } } vm_map_unlock(map); - /* + /* * Get rid of the "zap_maps" and all the map entries that - * they may still contain. - */ - if (zap_map != VM_MAP_NULL) { - vm_map_destroy(zap_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); - zap_map = VM_MAP_NULL; - } + * they may still contain. + */ + if (zap_map != VM_MAP_NULL) { + vm_map_destroy(zap_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); + zap_map = VM_MAP_NULL; + } return kr; } @@ -18150,16 +18325,15 @@ vm_map_disconnect_page_mappings( boolean_t do_unnest) { vm_map_entry_t entry; - int page_count = 0; + int page_count = 0; if (do_unnest == TRUE) { #ifndef NO_NESTED_PMAP vm_map_lock(map); for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = entry->vme_next) { - + entry != vm_map_to_entry(map); + entry = entry->vme_next) { if (entry->is_sub_map && entry->use_pmap) { /* * Make sure the range between the start of this entry and @@ -18178,15 +18352,15 @@ vm_map_disconnect_page_mappings( page_count = map->pmap->stats.resident_count; for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = entry->vme_next) { - + entry != vm_map_to_entry(map); + entry = entry->vme_next) { if (!entry->is_sub_map && ((VME_OBJECT(entry) == 0) || - (VME_OBJECT(entry)->phys_contiguous))) { + (VME_OBJECT(entry)->phys_contiguous))) { continue; } - if (entry->is_sub_map) + if (entry->is_sub_map) { assert(!entry->use_pmap); + } pmap_remove_options(map->pmap, entry->vme_start, entry->vme_end, 0); } @@ -18210,22 +18384,22 @@ extern unsigned int memorystatus_freeze_shared_mb_per_process_max; kern_return_t vm_map_freeze( - vm_map_t map, - unsigned int *purgeable_count, - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - __unused unsigned int dirty_budget, - unsigned int *shared_count, - int *freezer_error_code, - boolean_t eval_only) + vm_map_t map, + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + __unused unsigned int dirty_budget, + unsigned int *shared_count, + int *freezer_error_code, + boolean_t eval_only) { - vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL; - kern_return_t kr = KERN_SUCCESS; - boolean_t evaluation_phase = TRUE; - vm_object_t cur_shared_object = NULL; - int cur_shared_obj_ref_cnt = 0; - unsigned int dirty_private_count = 0, dirty_shared_count = 0, obj_pages_snapshot = 0; + vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL; + kern_return_t kr = KERN_SUCCESS; + boolean_t evaluation_phase = TRUE; + vm_object_t cur_shared_object = NULL; + int cur_shared_obj_ref_cnt = 0; + unsigned int dirty_private_count = 0, dirty_shared_count = 0, obj_pages_snapshot = 0; *purgeable_count = *wired_count = *clean_count = *dirty_count = *shared_count = 0; @@ -18274,10 +18448,9 @@ vm_map_freeze( again: for (entry2 = vm_map_first_entry(map); - entry2 != vm_map_to_entry(map); - entry2 = entry2->vme_next) { - - vm_object_t src_object = VME_OBJECT(entry2); + entry2 != vm_map_to_entry(map); + entry2 = entry2->vme_next) { + vm_object_t src_object = VME_OBJECT(entry2); if (src_object && !entry2->is_sub_map && @@ -18285,7 +18458,6 @@ again: /* If eligible, scan the entry, moving eligible pages over to our parent object */ if (src_object->internal == TRUE) { - if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { /* * Pages belonging to this object could be swapped to disk. @@ -18327,7 +18499,6 @@ again: } if (evaluation_phase == TRUE) { - continue; } } @@ -18353,7 +18524,6 @@ again: } if (evaluation_phase) { - unsigned int shared_pages_threshold = (memorystatus_freeze_shared_mb_per_process_max * 1024 * 1024ULL) / PAGE_SIZE_64; if (dirty_shared_count > shared_pages_threshold) { @@ -18363,7 +18533,7 @@ again: } if (dirty_shared_count && - ((dirty_private_count / dirty_shared_count) < memorystatus_freeze_private_shared_pages_ratio)) { + ((dirty_private_count / dirty_shared_count) < memorystatus_freeze_private_shared_pages_ratio)) { *freezer_error_code = FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO; kr = KERN_FAILURE; goto done; @@ -18371,7 +18541,7 @@ again: evaluation_phase = FALSE; dirty_shared_count = dirty_private_count = 0; - + c_freezer_compression_count = 0; clock_get_uptime(&c_freezer_last_yield_ts); @@ -18381,9 +18551,7 @@ again: } goto again; - } else { - kr = KERN_SUCCESS; *shared_count = (unsigned int) ((dirty_shared_count * PAGE_SIZE_64) / (1024 * 1024ULL)); } @@ -18419,21 +18587,21 @@ done: * For now, we target only the map entries created for the Objective C * Garbage Collector, which initially have the following properties: * - alias == VM_MEMORY_MALLOC - * - wired_count == 0 - * - !needs_copy + * - wired_count == 0 + * - !needs_copy * and a VM object with: - * - internal - * - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC - * - !true_share - * - vo_size == ANON_CHUNK_SIZE + * - internal + * - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC + * - !true_share + * - vo_size == ANON_CHUNK_SIZE * * Only non-kernel map entries. */ boolean_t vm_map_entry_should_cow_for_true_share( - vm_map_entry_t entry) + vm_map_entry_t entry) { - vm_object_t object; + vm_object_t object; if (entry->is_sub_map) { /* entry does not point at a VM object */ @@ -18502,24 +18670,24 @@ vm_map_entry_should_cow_for_true_share( vm_map_offset_t vm_map_round_page_mask( - vm_map_offset_t offset, - vm_map_offset_t mask) + vm_map_offset_t offset, + vm_map_offset_t mask) { return VM_MAP_ROUND_PAGE(offset, mask); } vm_map_offset_t vm_map_trunc_page_mask( - vm_map_offset_t offset, - vm_map_offset_t mask) + vm_map_offset_t offset, + vm_map_offset_t mask) { return VM_MAP_TRUNC_PAGE(offset, mask); } boolean_t vm_map_page_aligned( - vm_map_offset_t offset, - vm_map_offset_t mask) + vm_map_offset_t offset, + vm_map_offset_t mask) { return ((offset) & mask) == 0; } @@ -18547,8 +18715,8 @@ vm_map_page_mask( kern_return_t vm_map_set_page_shift( - vm_map_t map, - int pageshift) + vm_map_t map, + int pageshift) { if (map->hdr.nentries != 0) { /* too late to change page size */ @@ -18562,21 +18730,21 @@ vm_map_set_page_shift( kern_return_t vm_map_query_volatile( - vm_map_t map, - mach_vm_size_t *volatile_virtual_size_p, - mach_vm_size_t *volatile_resident_size_p, - mach_vm_size_t *volatile_compressed_size_p, - mach_vm_size_t *volatile_pmap_size_p, - mach_vm_size_t *volatile_compressed_pmap_size_p) + vm_map_t map, + mach_vm_size_t *volatile_virtual_size_p, + mach_vm_size_t *volatile_resident_size_p, + mach_vm_size_t *volatile_compressed_size_p, + mach_vm_size_t *volatile_pmap_size_p, + mach_vm_size_t *volatile_compressed_pmap_size_p) { - mach_vm_size_t volatile_virtual_size; - mach_vm_size_t volatile_resident_count; - mach_vm_size_t volatile_compressed_count; - mach_vm_size_t volatile_pmap_count; - mach_vm_size_t volatile_compressed_pmap_count; - mach_vm_size_t resident_count; - vm_map_entry_t entry; - vm_object_t object; + mach_vm_size_t volatile_virtual_size; + mach_vm_size_t volatile_resident_count; + mach_vm_size_t volatile_compressed_count; + mach_vm_size_t volatile_pmap_count; + mach_vm_size_t volatile_compressed_pmap_count; + mach_vm_size_t resident_count; + vm_map_entry_t entry; + vm_object_t object; /* map should be locked by caller */ @@ -18587,14 +18755,14 @@ vm_map_query_volatile( volatile_compressed_pmap_count = 0; for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = entry->vme_next) { - mach_vm_size_t pmap_resident_bytes, pmap_compressed_bytes; + entry != vm_map_to_entry(map); + entry = entry->vme_next) { + mach_vm_size_t pmap_resident_bytes, pmap_compressed_bytes; if (entry->is_sub_map) { continue; } - if (! (entry->protection & VM_PROT_WRITE)) { + if (!(entry->protection & VM_PROT_WRITE)) { continue; } object = VME_OBJECT(entry); @@ -18626,17 +18794,17 @@ vm_map_query_volatile( volatile_resident_count += resident_count; if (object->pager) { volatile_compressed_count += - vm_compressor_pager_get_count(object->pager); + vm_compressor_pager_get_count(object->pager); } pmap_compressed_bytes = 0; pmap_resident_bytes = - pmap_query_resident(map->pmap, - entry->vme_start, - entry->vme_end, - &pmap_compressed_bytes); + pmap_query_resident(map->pmap, + entry->vme_start, + entry->vme_end, + &pmap_compressed_bytes); volatile_pmap_count += (pmap_resident_bytes / PAGE_SIZE); volatile_compressed_pmap_count += (pmap_compressed_bytes - / PAGE_SIZE); + / PAGE_SIZE); } /* map is still locked on return */ @@ -18652,63 +18820,76 @@ vm_map_query_volatile( void vm_map_sizes(vm_map_t map, - vm_map_size_t * psize, - vm_map_size_t * pfree, - vm_map_size_t * plargest_free) + vm_map_size_t * psize, + vm_map_size_t * pfree, + vm_map_size_t * plargest_free) { - vm_map_entry_t entry; - vm_map_offset_t prev; - vm_map_size_t free, total_free, largest_free; - boolean_t end; - - if (!map) - { - *psize = *pfree = *plargest_free = 0; - return; - } - total_free = largest_free = 0; - - vm_map_lock_read(map); - if (psize) *psize = map->max_offset - map->min_offset; - - prev = map->min_offset; - for (entry = vm_map_first_entry(map);; entry = entry->vme_next) - { - end = (entry == vm_map_to_entry(map)); - - if (end) free = entry->vme_end - prev; - else free = entry->vme_start - prev; - - total_free += free; - if (free > largest_free) largest_free = free; - - if (end) break; - prev = entry->vme_end; - } - vm_map_unlock_read(map); - if (pfree) *pfree = total_free; - if (plargest_free) *plargest_free = largest_free; + vm_map_entry_t entry; + vm_map_offset_t prev; + vm_map_size_t free, total_free, largest_free; + boolean_t end; + + if (!map) { + *psize = *pfree = *plargest_free = 0; + return; + } + total_free = largest_free = 0; + + vm_map_lock_read(map); + if (psize) { + *psize = map->max_offset - map->min_offset; + } + + prev = map->min_offset; + for (entry = vm_map_first_entry(map);; entry = entry->vme_next) { + end = (entry == vm_map_to_entry(map)); + + if (end) { + free = entry->vme_end - prev; + } else { + free = entry->vme_start - prev; + } + + total_free += free; + if (free > largest_free) { + largest_free = free; + } + + if (end) { + break; + } + prev = entry->vme_end; + } + vm_map_unlock_read(map); + if (pfree) { + *pfree = total_free; + } + if (plargest_free) { + *plargest_free = largest_free; + } } #if VM_SCAN_FOR_SHADOW_CHAIN int vm_map_shadow_max(vm_map_t map); -int vm_map_shadow_max( +int +vm_map_shadow_max( vm_map_t map) { - int shadows, shadows_max; - vm_map_entry_t entry; - vm_object_t object, next_object; + int shadows, shadows_max; + vm_map_entry_t entry; + vm_object_t object, next_object; - if (map == NULL) + if (map == NULL) { return 0; + } shadows_max = 0; vm_map_lock_read(map); for (entry = vm_map_first_entry(map); - entry != vm_map_to_entry(map); - entry = entry->vme_next) { + entry != vm_map_to_entry(map); + entry = entry->vme_next) { if (entry->is_sub_map) { continue; } @@ -18718,8 +18899,8 @@ int vm_map_shadow_max( } vm_object_lock_shared(object); for (shadows = 0; - object->shadow != NULL; - shadows++, object = next_object) { + object->shadow != NULL; + shadows++, object = next_object) { next_object = object->shadow; vm_object_lock_shared(next_object); vm_object_unlock(object); @@ -18736,15 +18917,17 @@ int vm_map_shadow_max( } #endif /* VM_SCAN_FOR_SHADOW_CHAIN */ -void vm_commit_pagezero_status(vm_map_t lmap) { +void +vm_commit_pagezero_status(vm_map_t lmap) +{ pmap_advise_pagezero_range(lmap->pmap, lmap->min_offset); } #if __x86_64__ void vm_map_set_high_start( - vm_map_t map, - vm_map_offset_t high_start) + vm_map_t map, + vm_map_offset_t high_start) { map->vmmap_high_start = high_start; } @@ -18753,9 +18936,9 @@ vm_map_set_high_start( #if PMAP_CS kern_return_t vm_map_entry_cs_associate( - vm_map_t map, - vm_map_entry_t entry, - vm_map_kernel_flags_t vmk_flags) + vm_map_t map, + vm_map_entry_t entry, + vm_map_kernel_flags_t vmk_flags) { vm_object_t cs_object, cs_shadow; vm_object_offset_t cs_offset; @@ -18766,7 +18949,7 @@ vm_map_entry_cs_associate( if (map->pmap == NULL || entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */ VME_OBJECT(entry) == VM_OBJECT_NULL || - ! (entry->protection & VM_PROT_EXECUTE)) { + !(entry->protection & VM_PROT_EXECUTE)) { return KERN_SUCCESS; } @@ -18774,26 +18957,26 @@ vm_map_entry_cs_associate( if (entry->used_for_jit) { cs_ret = pmap_cs_associate(map->pmap, - PMAP_CS_ASSOCIATE_JIT, - entry->vme_start, - entry->vme_end - entry->vme_start); + PMAP_CS_ASSOCIATE_JIT, + entry->vme_start, + entry->vme_end - entry->vme_start); goto done; } if (vmk_flags.vmkf_remap_prot_copy) { cs_ret = pmap_cs_associate(map->pmap, - PMAP_CS_ASSOCIATE_COW, - entry->vme_start, - entry->vme_end - entry->vme_start); + PMAP_CS_ASSOCIATE_COW, + entry->vme_start, + entry->vme_end - entry->vme_start); goto done; } vm_object_lock_shared(VME_OBJECT(entry)); cs_offset = VME_OFFSET(entry); for (cs_object = VME_OBJECT(entry); - (cs_object != VM_OBJECT_NULL && - !cs_object->code_signed); - cs_object = cs_shadow) { + (cs_object != VM_OBJECT_NULL && + !cs_object->code_signed); + cs_object = cs_shadow) { cs_shadow = cs_object->shadow; if (cs_shadow != VM_OBJECT_NULL) { cs_offset += cs_object->vo_shadow_offset; @@ -18808,22 +18991,22 @@ vm_map_entry_cs_associate( cs_offset += cs_object->paging_offset; cs_vnode = vnode_pager_lookup_vnode(cs_object->pager); cs_ret = vnode_pager_get_cs_blobs(cs_vnode, - &cs_blobs); + &cs_blobs); assert(cs_ret == KERN_SUCCESS); cs_ret = cs_associate_blob_with_mapping(map->pmap, - entry->vme_start, - (entry->vme_end - - entry->vme_start), - cs_offset, - cs_blobs); + entry->vme_start, + (entry->vme_end - + entry->vme_start), + cs_offset, + cs_blobs); vm_object_unlock(cs_object); cs_object = VM_OBJECT_NULL; - done: +done: if (cs_ret == KERN_SUCCESS) { DTRACE_VM2(vm_map_entry_cs_associate_success, - vm_map_offset_t, entry->vme_start, - vm_map_offset_t, entry->vme_end); + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end); if (vm_map_executable_immutable) { /* * Prevent this executable @@ -18850,18 +19033,18 @@ vm_map_entry_cs_associate( * doing it. */ DTRACE_VM3(vm_map_entry_cs_associate_off, - vm_map_offset_t, entry->vme_start, - vm_map_offset_t, entry->vme_end, - int, cs_ret); + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + int, cs_ret); } else { /* * A real error: do not allow * execution in this mapping. */ DTRACE_VM3(vm_map_entry_cs_associate_failure, - vm_map_offset_t, entry->vme_start, - vm_map_offset_t, entry->vme_end, - int, cs_ret); + vm_map_offset_t, entry->vme_start, + vm_map_offset_t, entry->vme_end, + int, cs_ret); entry->protection &= ~VM_PROT_EXECUTE; entry->max_protection &= ~VM_PROT_EXECUTE; } @@ -18919,8 +19102,8 @@ vm_map_entry_cs_associate( * +---------------------------------------+ * * where: - * cf_size: total size of the buffer (rounded to page size) - * cf_last_region: offset in the buffer of the last "region" sub-header + * cf_size: total size of the buffer (rounded to page size) + * cf_last_region: offset in the buffer of the last "region" sub-header * cf_last_zeroes: number of trailing "zero" dispositions at the end * of last region * cfr_vaddr: virtual address of the start of the covered "region" @@ -18950,7 +19133,7 @@ uint64_t vm_map_corpse_footprint_no_buf = 0; /* * vm_map_corpse_footprint_new_region: - * closes the current footprint "region" and creates a new one + * closes the current footprint "region" and creates a new one * * Returns NULL if there's not enough space in the buffer for a new region. */ @@ -18958,24 +19141,24 @@ static struct vm_map_corpse_footprint_region * vm_map_corpse_footprint_new_region( struct vm_map_corpse_footprint_header *footprint_header) { - uintptr_t footprint_edge; - uint32_t new_region_offset; + uintptr_t footprint_edge; + uint32_t new_region_offset; struct vm_map_corpse_footprint_region *footprint_region; struct vm_map_corpse_footprint_region *new_footprint_region; footprint_edge = ((uintptr_t)footprint_header + - footprint_header->cf_size); + footprint_header->cf_size); footprint_region = ((struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + - footprint_header->cf_last_region)); - assert((uintptr_t)footprint_region + sizeof (*footprint_region) <= - footprint_edge); + ((char *)footprint_header + + footprint_header->cf_last_region)); + assert((uintptr_t)footprint_region + sizeof(*footprint_region) <= + footprint_edge); /* get rid of trailing zeroes in the last region */ assert(footprint_region->cfr_num_pages >= - footprint_header->cf_last_zeroes); + footprint_header->cf_last_zeroes); footprint_region->cfr_num_pages -= - footprint_header->cf_last_zeroes; + footprint_header->cf_last_zeroes; footprint_header->cf_last_zeroes = 0; /* reuse this region if it's now empty */ @@ -18985,14 +19168,14 @@ vm_map_corpse_footprint_new_region( /* compute offset of new region */ new_region_offset = footprint_header->cf_last_region; - new_region_offset += sizeof (*footprint_region); + new_region_offset += sizeof(*footprint_region); new_region_offset += footprint_region->cfr_num_pages; - new_region_offset = roundup(new_region_offset, sizeof (int)); + new_region_offset = roundup(new_region_offset, sizeof(int)); /* check if we're going over the edge */ if (((uintptr_t)footprint_header + - new_region_offset + - sizeof (*footprint_region)) >= + new_region_offset + + sizeof(*footprint_region)) >= footprint_edge) { /* over the edge: no new region */ return NULL; @@ -19002,8 +19185,8 @@ vm_map_corpse_footprint_new_region( footprint_header->cf_last_region = new_region_offset; new_footprint_region = (struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + - footprint_header->cf_last_region); + ((char *)footprint_header + + footprint_header->cf_last_region); new_footprint_region->cfr_vaddr = 0; new_footprint_region->cfr_num_pages = 0; /* caller needs to initialize new region */ @@ -19018,19 +19201,19 @@ vm_map_corpse_footprint_new_region( */ kern_return_t vm_map_corpse_footprint_collect( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map) + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map) { - vm_map_offset_t va; - int disp; - kern_return_t kr; + vm_map_offset_t va; + int disp; + kern_return_t kr; struct vm_map_corpse_footprint_header *footprint_header; struct vm_map_corpse_footprint_region *footprint_region; struct vm_map_corpse_footprint_region *new_footprint_region; - unsigned char *next_disp_p; - uintptr_t footprint_edge; - uint32_t num_pages_tmp; + unsigned char *next_disp_p; + uintptr_t footprint_edge; + uint32_t num_pages_tmp; va = old_entry->vme_start; @@ -19049,19 +19232,19 @@ vm_map_corpse_footprint_collect( } if (new_map->vmmap_corpse_footprint == NULL) { - vm_offset_t buf; - vm_size_t buf_size; + vm_offset_t buf; + vm_size_t buf_size; buf = 0; - buf_size = (sizeof (*footprint_header) + - (old_map->hdr.nentries - * - (sizeof (*footprint_region) + - + 3)) /* potential alignment for each region */ - + - ((old_map->size / PAGE_SIZE) - * - sizeof (char))); /* disposition for each page */ + buf_size = (sizeof(*footprint_header) + + (old_map->hdr.nentries + * + (sizeof(*footprint_region) + + +3)) /* potential alignment for each region */ + + + ((old_map->size / PAGE_SIZE) + * + sizeof(char))); /* disposition for each page */ // printf("FBDP corpse map %p guestimate footprint size 0x%llx\n", new_map, (uint64_t) buf_size); buf_size = round_page(buf_size); @@ -19070,9 +19253,9 @@ vm_map_corpse_footprint_collect( /* limit size to a somewhat sane amount */ #if CONFIG_EMBEDDED -#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */ +#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (256*1024) /* 256KB */ #else /* CONFIG_EMBEDDED */ -#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (8*1024*1024) /* 8MB */ +#define VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE (8*1024*1024) /* 8MB */ #endif /* CONFIG_EMBEDDED */ if (buf_size > VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE) { buf_size = VM_MAP_CORPSE_FOOTPRINT_INFO_MAX_SIZE; @@ -19083,12 +19266,12 @@ vm_map_corpse_footprint_collect( * It will be zero-filled on demand. */ kr = kernel_memory_allocate(kernel_map, - &buf, - (buf_size - + PAGE_SIZE), /* trailing guard page */ - 0, /* mask */ - KMA_PAGEABLE | KMA_GUARD_LAST, - VM_KERN_MEMORY_DIAG); + &buf, + (buf_size + + PAGE_SIZE), /* trailing guard page */ + 0, /* mask */ + KMA_PAGEABLE | KMA_GUARD_LAST, + VM_KERN_MEMORY_DIAG); if (kr != KERN_SUCCESS) { vm_map_corpse_footprint_no_buf++; return kr; @@ -19100,28 +19283,28 @@ vm_map_corpse_footprint_collect( footprint_header->cf_size = buf_size; footprint_header->cf_last_region = - sizeof (*footprint_header); + sizeof(*footprint_header); footprint_header->cf_last_zeroes = 0; footprint_region = (struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + - footprint_header->cf_last_region); + ((char *)footprint_header + + footprint_header->cf_last_region); footprint_region->cfr_vaddr = 0; footprint_region->cfr_num_pages = 0; } else { /* retrieve header and last region */ footprint_header = (struct vm_map_corpse_footprint_header *) - new_map->vmmap_corpse_footprint; + new_map->vmmap_corpse_footprint; footprint_region = (struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + - footprint_header->cf_last_region); + ((char *)footprint_header + + footprint_header->cf_last_region); } footprint_edge = ((uintptr_t)footprint_header + - footprint_header->cf_size); + footprint_header->cf_size); if ((footprint_region->cfr_vaddr + - (((vm_map_offset_t)footprint_region->cfr_num_pages) * - PAGE_SIZE)) + (((vm_map_offset_t)footprint_region->cfr_num_pages) * + PAGE_SIZE)) != old_entry->vme_start) { uint64_t num_pages_delta; uint32_t region_offset_delta; @@ -19133,22 +19316,22 @@ vm_map_corpse_footprint_collect( */ /* size of gap in actual page dispositions */ num_pages_delta = (((old_entry->vme_start - - footprint_region->cfr_vaddr) / PAGE_SIZE) - - footprint_region->cfr_num_pages); + footprint_region->cfr_vaddr) / PAGE_SIZE) + - footprint_region->cfr_num_pages); /* size of gap as a new footprint region header */ region_offset_delta = - (sizeof (*footprint_region) + - roundup((footprint_region->cfr_num_pages - - footprint_header->cf_last_zeroes), - sizeof (int)) - - (footprint_region->cfr_num_pages - - footprint_header->cf_last_zeroes)); + (sizeof(*footprint_region) + + roundup((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes), + sizeof(int)) - + (footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes)); // printf("FBDP %s:%d region 0x%x 0x%llx 0x%x vme_start 0x%llx pages_delta 0x%llx region_delta 0x%x\n", __FUNCTION__, __LINE__, footprint_header->cf_last_region, footprint_region->cfr_vaddr, footprint_region->cfr_num_pages, old_entry->vme_start, num_pages_delta, region_offset_delta); if (region_offset_delta < num_pages_delta || os_add3_overflow(footprint_region->cfr_num_pages, - (uint32_t) num_pages_delta, - 1, - &num_pages_tmp)) { + (uint32_t) num_pages_delta, + 1, + &num_pages_tmp)) { /* * Storing data for this gap would take more space * than inserting a new footprint region header: @@ -19163,7 +19346,7 @@ vm_map_corpse_footprint_collect( */ // printf("FBDP %s:%d new region\n", __FUNCTION__, __LINE__); new_footprint_region = - vm_map_corpse_footprint_new_region(footprint_header); + vm_map_corpse_footprint_new_region(footprint_header); /* check that we're not going over the edge */ if (new_footprint_region == NULL) { goto over_the_edge; @@ -19180,9 +19363,9 @@ vm_map_corpse_footprint_collect( // printf("FBDP %s:%d zero gap\n", __FUNCTION__, __LINE__); for (; num_pages_delta > 0; num_pages_delta--) { next_disp_p = - ((unsigned char *) footprint_region + - sizeof (*footprint_region) + - footprint_region->cfr_num_pages); + ((unsigned char *) footprint_region + + sizeof(*footprint_region) + + footprint_region->cfr_num_pages); /* check that we're not going over the edge */ if ((uintptr_t)next_disp_p >= footprint_edge) { goto over_the_edge; @@ -19196,9 +19379,9 @@ vm_map_corpse_footprint_collect( } for (va = old_entry->vme_start; - va < old_entry->vme_end; - va += PAGE_SIZE) { - vm_object_t object; + va < old_entry->vme_end; + va += PAGE_SIZE) { + vm_object_t object; object = VME_OBJECT(old_entry); if (!old_entry->is_sub_map && @@ -19215,12 +19398,12 @@ vm_map_corpse_footprint_collect( * shows up in the forked corpse's footprint. */ disp = (PMAP_QUERY_PAGE_PRESENT | - PMAP_QUERY_PAGE_INTERNAL); + PMAP_QUERY_PAGE_INTERNAL); } else { disp = 0; pmap_query_page_info(old_map->pmap, - va, - &disp); + va, + &disp); } // if (va < SHARED_REGION_BASE_ARM64) printf("FBDP collect map %p va 0x%llx disp 0x%x\n", new_map, va, disp); @@ -19236,11 +19419,11 @@ vm_map_corpse_footprint_collect( /* would region's cfr_num_pages overflow? */ if (os_add_overflow(footprint_region->cfr_num_pages, 1, - &num_pages_tmp)) { + &num_pages_tmp)) { /* overflow: create a new region */ new_footprint_region = - vm_map_corpse_footprint_new_region( - footprint_header); + vm_map_corpse_footprint_new_region( + footprint_header); if (new_footprint_region == NULL) { goto over_the_edge; } @@ -19250,8 +19433,8 @@ vm_map_corpse_footprint_collect( } next_disp_p = ((unsigned char *)footprint_region + - sizeof (*footprint_region) + - footprint_region->cfr_num_pages); + sizeof(*footprint_region) + + footprint_region->cfr_num_pages); /* check that we're not going over the edge */ if ((uintptr_t)next_disp_p >= footprint_edge) { goto over_the_edge; @@ -19270,11 +19453,11 @@ vm_map_corpse_footprint_collect( /* zero disp: add to the current streak of zeroes */ footprint_header->cf_last_zeroes++; if ((footprint_header->cf_last_zeroes + - roundup((footprint_region->cfr_num_pages - - footprint_header->cf_last_zeroes) & - (sizeof (int) - 1), - sizeof (int))) < - (sizeof (*footprint_header))) { + roundup((footprint_region->cfr_num_pages - + footprint_header->cf_last_zeroes) & + (sizeof(int) - 1), + sizeof(int))) < + (sizeof(*footprint_header))) { /* * There are not enough trailing "zero" dispositions * (+ the extra padding we would need for the previous @@ -19289,7 +19472,7 @@ vm_map_corpse_footprint_collect( * "zero" dispositions. */ new_footprint_region = - vm_map_corpse_footprint_new_region(footprint_header); + vm_map_corpse_footprint_new_region(footprint_header); if (new_footprint_region == NULL) { goto over_the_edge; } @@ -19316,12 +19499,12 @@ over_the_edge: */ void vm_map_corpse_footprint_collect_done( - vm_map_t new_map) + vm_map_t new_map) { struct vm_map_corpse_footprint_header *footprint_header; struct vm_map_corpse_footprint_region *footprint_region; - vm_size_t buf_size, actual_size; - kern_return_t kr; + vm_size_t buf_size, actual_size; + kern_return_t kr; assert(new_map->has_corpse_footprint); if (!new_map->has_corpse_footprint || @@ -19330,12 +19513,12 @@ vm_map_corpse_footprint_collect_done( } footprint_header = (struct vm_map_corpse_footprint_header *) - new_map->vmmap_corpse_footprint; + new_map->vmmap_corpse_footprint; buf_size = footprint_header->cf_size; footprint_region = (struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + - footprint_header->cf_last_region); + ((char *)footprint_header + + footprint_header->cf_last_region); /* get rid of trailing zeroes in last region */ assert(footprint_region->cfr_num_pages >= footprint_header->cf_last_zeroes); @@ -19343,15 +19526,15 @@ vm_map_corpse_footprint_collect_done( footprint_header->cf_last_zeroes = 0; actual_size = (vm_size_t)(footprint_header->cf_last_region + - sizeof (*footprint_region) + - footprint_region->cfr_num_pages); + sizeof(*footprint_region) + + footprint_region->cfr_num_pages); // printf("FBDP map %p buf_size 0x%llx actual_size 0x%llx\n", new_map, (uint64_t) buf_size, (uint64_t) actual_size); - vm_map_corpse_footprint_size_avg = - (((vm_map_corpse_footprint_size_avg * - vm_map_corpse_footprint_count) + - actual_size) / - (vm_map_corpse_footprint_count + 1)); + vm_map_corpse_footprint_size_avg = + (((vm_map_corpse_footprint_size_avg * + vm_map_corpse_footprint_count) + + actual_size) / + (vm_map_corpse_footprint_count + 1)); vm_map_corpse_footprint_count++; if (actual_size > vm_map_corpse_footprint_size_max) { vm_map_corpse_footprint_size_max = actual_size; @@ -19360,28 +19543,28 @@ vm_map_corpse_footprint_collect_done( actual_size = round_page(actual_size); if (buf_size > actual_size) { kr = vm_deallocate(kernel_map, - ((vm_address_t)footprint_header + - actual_size + - PAGE_SIZE), /* trailing guard page */ - (buf_size - actual_size)); + ((vm_address_t)footprint_header + + actual_size + + PAGE_SIZE), /* trailing guard page */ + (buf_size - actual_size)); assertf(kr == KERN_SUCCESS, - "trim: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n", - footprint_header, - (uint64_t) buf_size, - (uint64_t) actual_size, - kr); + "trim: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n", + footprint_header, + (uint64_t) buf_size, + (uint64_t) actual_size, + kr); kr = vm_protect(kernel_map, - ((vm_address_t)footprint_header + - actual_size), - PAGE_SIZE, - FALSE, /* set_maximum */ - VM_PROT_NONE); + ((vm_address_t)footprint_header + + actual_size), + PAGE_SIZE, + FALSE, /* set_maximum */ + VM_PROT_NONE); assertf(kr == KERN_SUCCESS, - "guard: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n", - footprint_header, - (uint64_t) buf_size, - (uint64_t) actual_size, - kr); + "guard: footprint_header %p buf_size 0x%llx actual_size 0x%llx kr=0x%x\n", + footprint_header, + (uint64_t) buf_size, + (uint64_t) actual_size, + kr); } footprint_header->cf_size = actual_size; @@ -19396,16 +19579,16 @@ vm_map_corpse_footprint_collect_done( */ kern_return_t vm_map_corpse_footprint_query_page_info( - vm_map_t map, - vm_map_offset_t va, - int *disp) + vm_map_t map, + vm_map_offset_t va, + int *disp) { struct vm_map_corpse_footprint_header *footprint_header; struct vm_map_corpse_footprint_region *footprint_region; - uint32_t footprint_region_offset; - vm_map_offset_t region_start, region_end; - int disp_idx; - kern_return_t kr; + uint32_t footprint_region_offset; + vm_map_offset_t region_start, region_end; + int disp_idx; + kern_return_t kr; if (!map->has_corpse_footprint) { *disp = 0; @@ -19425,28 +19608,28 @@ vm_map_corpse_footprint_query_page_info( footprint_region_offset = footprint_header->cf_hint_region; lookup_again: - if (footprint_region_offset < sizeof (*footprint_header)) { + if (footprint_region_offset < sizeof(*footprint_header)) { /* hint too low: start from 1st region */ - footprint_region_offset = sizeof (*footprint_header); + footprint_region_offset = sizeof(*footprint_header); } if (footprint_region_offset >= footprint_header->cf_last_region) { /* hint too high: re-start from 1st region */ - footprint_region_offset = sizeof (*footprint_header); + footprint_region_offset = sizeof(*footprint_header); } footprint_region = (struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + footprint_region_offset); + ((char *)footprint_header + footprint_region_offset); region_start = footprint_region->cfr_vaddr; region_end = (region_start + - ((vm_map_offset_t)(footprint_region->cfr_num_pages) * - PAGE_SIZE)); + ((vm_map_offset_t)(footprint_region->cfr_num_pages) * + PAGE_SIZE)); if (va < region_start && - footprint_region_offset != sizeof (*footprint_header)) { + footprint_region_offset != sizeof(*footprint_header)) { /* our range starts before the hint region */ /* reset the hint (in a racy way...) */ - footprint_header->cf_hint_region = sizeof (*footprint_header); + footprint_header->cf_hint_region = sizeof(*footprint_header); /* lookup "va" again from 1st region */ - footprint_region_offset = sizeof (*footprint_header); + footprint_region_offset = sizeof(*footprint_header); goto lookup_again; } @@ -19455,19 +19638,19 @@ lookup_again: break; } /* skip the region's header */ - footprint_region_offset += sizeof (*footprint_region); + footprint_region_offset += sizeof(*footprint_region); /* skip the region's page dispositions */ footprint_region_offset += footprint_region->cfr_num_pages; /* align to next word boundary */ footprint_region_offset = - roundup(footprint_region_offset, - sizeof (int)); + roundup(footprint_region_offset, + sizeof(int)); footprint_region = (struct vm_map_corpse_footprint_region *) - ((char *)footprint_header + footprint_region_offset); + ((char *)footprint_header + footprint_region_offset); region_start = footprint_region->cfr_vaddr; region_end = (region_start + - ((vm_map_offset_t)(footprint_region->cfr_num_pages) * - PAGE_SIZE)); + ((vm_map_offset_t)(footprint_region->cfr_num_pages) * + PAGE_SIZE)); } if (va < region_start || va >= region_end) { /* page not found */ @@ -19489,10 +19672,10 @@ done: // if (va < SHARED_REGION_BASE_ARM64) printf("FBDP %d query map %p va 0x%llx disp 0x%x\n", __LINE__, map, va, *disp); /* dtrace -n 'vminfo:::footprint_query_page_info { printf("map 0x%p va 0x%llx disp 0x%x kr 0x%x", arg0, arg1, arg2, arg3); }' */ DTRACE_VM4(footprint_query_page_info, - vm_map_t, map, - vm_map_offset_t, va, - int, *disp, - kern_return_t, kr); + vm_map_t, map, + vm_map_offset_t, va, + int, *disp, + kern_return_t, kr); return kr; } @@ -19500,7 +19683,7 @@ done: static void vm_map_corpse_footprint_destroy( - vm_map_t map) + vm_map_t map) { if (map->has_corpse_footprint && map->vmmap_corpse_footprint != 0) { @@ -19511,9 +19694,9 @@ vm_map_corpse_footprint_destroy( footprint_header = map->vmmap_corpse_footprint; buf_size = footprint_header->cf_size; kr = vm_deallocate(kernel_map, - (vm_offset_t) map->vmmap_corpse_footprint, - ((vm_size_t) buf_size - + PAGE_SIZE)); /* trailing guard page */ + (vm_offset_t) map->vmmap_corpse_footprint, + ((vm_size_t) buf_size + + PAGE_SIZE)); /* trailing guard page */ assertf(kr == KERN_SUCCESS, "kr=0x%x\n", kr); map->vmmap_corpse_footprint = 0; map->has_corpse_footprint = FALSE; @@ -19527,21 +19710,21 @@ vm_map_corpse_footprint_destroy( */ void vm_map_copy_footprint_ledgers( - task_t old_task, - task_t new_task) + task_t old_task, + task_t new_task) { vm_map_copy_ledger(old_task, new_task, task_ledgers.phys_footprint); vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile); vm_map_copy_ledger(old_task, new_task, task_ledgers.purgeable_nonvolatile_compressed); - vm_map_copy_ledger(old_task, new_task, task_ledgers.internal); - vm_map_copy_ledger(old_task, new_task, task_ledgers.internal_compressed); - vm_map_copy_ledger(old_task, new_task, task_ledgers.iokit_mapped); - vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting); - vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting_compressed); - vm_map_copy_ledger(old_task, new_task, task_ledgers.page_table); - vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile); - vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile_compressed); - vm_map_copy_ledger(old_task, new_task, task_ledgers.wired_mem); + vm_map_copy_ledger(old_task, new_task, task_ledgers.internal); + vm_map_copy_ledger(old_task, new_task, task_ledgers.internal_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.iokit_mapped); + vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting); + vm_map_copy_ledger(old_task, new_task, task_ledgers.alternate_accounting_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.page_table); + vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile); + vm_map_copy_ledger(old_task, new_task, task_ledgers.network_nonvolatile_compressed); + vm_map_copy_ledger(old_task, new_task, task_ledgers.wired_mem); } /* @@ -19550,40 +19733,41 @@ vm_map_copy_footprint_ledgers( */ void vm_map_copy_ledger( - task_t old_task, - task_t new_task, - int ledger_entry) + task_t old_task, + task_t new_task, + int ledger_entry) { - ledger_amount_t old_balance, new_balance, delta; + ledger_amount_t old_balance, new_balance, delta; assert(new_task->map->has_corpse_footprint); - if (!new_task->map->has_corpse_footprint) + if (!new_task->map->has_corpse_footprint) { return; + } /* turn off sanity checks for the ledger we're about to mess with */ ledger_disable_panic_on_negative(new_task->ledger, - ledger_entry); + ledger_entry); /* adjust "new_task" to match "old_task" */ ledger_get_balance(old_task->ledger, - ledger_entry, - &old_balance); + ledger_entry, + &old_balance); ledger_get_balance(new_task->ledger, - ledger_entry, - &new_balance); + ledger_entry, + &new_balance); if (new_balance == old_balance) { /* new == old: done */ } else if (new_balance > old_balance) { /* new > old ==> new -= new - old */ delta = new_balance - old_balance; ledger_debit(new_task->ledger, - ledger_entry, - delta); + ledger_entry, + delta); } else { /* new < old ==> new += old - new */ delta = old_balance - new_balance; ledger_credit(new_task->ledger, - ledger_entry, - delta); + ledger_entry, + delta); } } diff --git a/osfmk/vm/vm_map.h b/osfmk/vm/vm_map.h index 44cef715d..533b8d78c 100644 --- a/osfmk/vm/vm_map.h +++ b/osfmk/vm/vm_map.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -67,7 +67,7 @@ * avie, dlb, mwyoung */ -#ifndef _VM_VM_MAP_H_ +#ifndef _VM_VM_MAP_H_ #define _VM_VM_MAP_H_ #include @@ -78,29 +78,30 @@ #include #include #include +#include #include -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include __BEGIN_DECLS -extern void vm_map_reference(vm_map_t map); +extern void vm_map_reference(vm_map_t map); extern vm_map_t current_map(void); /* Setup reserved areas in a new VM map */ -extern kern_return_t vm_map_exec( - vm_map_t new_map, - task_t task, - boolean_t is64bit, - void *fsroot, - cpu_type_t cpu, - cpu_subtype_t cpu_subtype); +extern kern_return_t vm_map_exec( + vm_map_t new_map, + task_t task, + boolean_t is64bit, + void *fsroot, + cpu_type_t cpu, + cpu_subtype_t cpu_subtype); __END_DECLS -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #include @@ -113,8 +114,8 @@ __END_DECLS #include -#define current_map_fast() (current_thread()->map) -#define current_map() (current_map_fast()) +#define current_map_fast() (current_thread()->map) +#define current_map() (current_map_fast()) #include @@ -128,8 +129,8 @@ __END_DECLS * vm_map_copy_t represents memory copied from an address map, * used for inter-map copy operations */ -typedef struct vm_map_entry *vm_map_entry_t; -#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) +typedef struct vm_map_entry *vm_map_entry_t; +#define VM_MAP_ENTRY_NULL ((vm_map_entry_t) 0) /* @@ -140,14 +141,14 @@ typedef struct vm_map_entry *vm_map_entry_t; * memory object or a sub map (of the kernel map). */ typedef union vm_map_object { - vm_object_t vmo_object; /* object object */ - vm_map_t vmo_submap; /* belongs to another map */ + vm_object_t vmo_object; /* object object */ + vm_map_t vmo_submap; /* belongs to another map */ } vm_map_object_t; -#define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) -#define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) -#define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) -#define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) +#define named_entry_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) +#define named_entry_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) +#define named_entry_lock(object) lck_mtx_lock(&(object)->Lock) +#define named_entry_unlock(object) lck_mtx_unlock(&(object)->Lock) #if VM_NAMED_ENTRY_LIST extern queue_head_t vm_named_entry_list; #endif /* VM_NAMED_ENTRY_LIST */ @@ -160,7 +161,7 @@ extern queue_head_t vm_named_entry_list; * * Implementation: * While the handle to this object is used as a means to map - * and pass around the right to map regions backed by pagers + * and pass around the right to map regions backed by pagers * of all sorts, the named_entry itself is only manipulated * by the kernel. Named entries hold information on the * right to map a region of a cached object. Namely, @@ -171,27 +172,27 @@ extern queue_head_t vm_named_entry_list; */ struct vm_named_entry { - decl_lck_mtx_data(, Lock) /* Synchronization */ + decl_lck_mtx_data(, Lock) /* Synchronization */ union { - vm_object_t object; /* object I point to */ - vm_map_t map; /* map backing submap */ - vm_map_copy_t copy; /* a VM map copy */ + vm_object_t object; /* object I point to */ + vm_map_t map; /* map backing submap */ + vm_map_copy_t copy; /* a VM map copy */ } backing; - vm_object_offset_t offset; /* offset into object */ - vm_object_size_t size; /* size of region */ - vm_object_offset_t data_offset; /* offset to first byte of data */ - vm_prot_t protection; /* access permissions */ - int ref_count; /* Number of references */ - unsigned int /* Is backing.xxx : */ - /* boolean_t */ internal:1, /* ... an internal object */ - /* boolean_t */ is_sub_map:1, /* ... a submap? */ - /* boolean_t */ is_copy:1; /* ... a VM map copy */ + vm_object_offset_t offset; /* offset into object */ + vm_object_size_t size; /* size of region */ + vm_object_offset_t data_offset; /* offset to first byte of data */ + vm_prot_t protection; /* access permissions */ + int ref_count; /* Number of references */ + unsigned int /* Is backing.xxx : */ + /* boolean_t */ internal:1, /* ... an internal object */ + /* boolean_t */ is_sub_map:1, /* ... a submap? */ + /* boolean_t */ is_copy:1; /* ... a VM map copy */ #if VM_NAMED_ENTRY_LIST - queue_chain_t named_entry_list; - int named_entry_alias; - mach_port_t named_entry_port; + queue_chain_t named_entry_list; + int named_entry_alias; + mach_port_t named_entry_port; #define NAMED_ENTRY_BT_DEPTH 16 - void *named_entry_bt[NAMED_ENTRY_BT_DEPTH]; + void *named_entry_bt[NAMED_ENTRY_BT_DEPTH]; #endif /* VM_NAMED_ENTRY_LIST */ }; @@ -210,10 +211,10 @@ struct vm_named_entry { */ struct vm_map_links { - struct vm_map_entry *prev; /* previous entry */ - struct vm_map_entry *next; /* next entry */ - vm_map_offset_t start; /* start address */ - vm_map_offset_t end; /* end address */ + struct vm_map_entry *prev; /* previous entry */ + struct vm_map_entry *next; /* next entry */ + vm_map_offset_t start; /* start address */ + vm_map_offset_t end; /* end address */ }; /* @@ -223,46 +224,46 @@ struct vm_map_links { * updated without the VM map "exclusive" lock. */ #define VME_OBJECT(entry) ((entry)->vme_object.vmo_object) -#define VME_OBJECT_SET(entry, object) \ - MACRO_BEGIN \ - (entry)->vme_object.vmo_object = (object); \ +#define VME_OBJECT_SET(entry, object) \ + MACRO_BEGIN \ + (entry)->vme_object.vmo_object = (object); \ MACRO_END #define VME_SUBMAP(entry) ((entry)->vme_object.vmo_submap) -#define VME_SUBMAP_SET(entry, submap) \ - MACRO_BEGIN \ - (entry)->vme_object.vmo_submap = (submap); \ +#define VME_SUBMAP_SET(entry, submap) \ + MACRO_BEGIN \ + (entry)->vme_object.vmo_submap = (submap); \ MACRO_END #define VME_OFFSET(entry) ((entry)->vme_offset & ~PAGE_MASK) -#define VME_OFFSET_SET(entry, offset) \ - MACRO_BEGIN \ - int __alias; \ - __alias = VME_ALIAS((entry)); \ - assert((offset & PAGE_MASK) == 0); \ - (entry)->vme_offset = offset | __alias; \ +#define VME_OFFSET_SET(entry, offset) \ + MACRO_BEGIN \ + int __alias; \ + __alias = VME_ALIAS((entry)); \ + assert((offset & PAGE_MASK) == 0); \ + (entry)->vme_offset = offset | __alias; \ MACRO_END -#define VME_OBJECT_SHADOW(entry, length) \ - MACRO_BEGIN \ - vm_object_t __object; \ - vm_object_offset_t __offset; \ - __object = VME_OBJECT((entry)); \ - __offset = VME_OFFSET((entry)); \ - vm_object_shadow(&__object, &__offset, (length)); \ - if (__object != VME_OBJECT((entry))) { \ - VME_OBJECT_SET((entry), __object); \ - (entry)->use_pmap = TRUE; \ - } \ - if (__offset != VME_OFFSET((entry))) { \ - VME_OFFSET_SET((entry), __offset); \ - } \ +#define VME_OBJECT_SHADOW(entry, length) \ + MACRO_BEGIN \ + vm_object_t __object; \ + vm_object_offset_t __offset; \ + __object = VME_OBJECT((entry)); \ + __offset = VME_OFFSET((entry)); \ + vm_object_shadow(&__object, &__offset, (length)); \ + if (__object != VME_OBJECT((entry))) { \ + VME_OBJECT_SET((entry), __object); \ + (entry)->use_pmap = TRUE; \ + } \ + if (__offset != VME_OFFSET((entry))) { \ + VME_OFFSET_SET((entry), __offset); \ + } \ MACRO_END #define VME_ALIAS_MASK (PAGE_MASK) #define VME_ALIAS(entry) ((unsigned int)((entry)->vme_offset & VME_ALIAS_MASK)) #define VME_ALIAS_SET(entry, alias) \ - MACRO_BEGIN \ - vm_map_offset_t __offset; \ - __offset = VME_OFFSET((entry)); \ - (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK); \ + MACRO_BEGIN \ + vm_map_offset_t __offset; \ + __offset = VME_OFFSET((entry)); \ + (entry)->vme_offset = __offset | ((alias) & VME_ALIAS_MASK); \ MACRO_END /* @@ -295,69 +296,69 @@ struct vm_map_links { */ struct vm_map_entry { - struct vm_map_links links; /* links to other entries */ -#define vme_prev links.prev -#define vme_next links.next -#define vme_start links.start -#define vme_end links.end + struct vm_map_links links; /* links to other entries */ +#define vme_prev links.prev +#define vme_next links.next +#define vme_start links.start +#define vme_end links.end - struct vm_map_store store; - union vm_map_object vme_object; /* object I point to */ - vm_object_offset_t vme_offset; /* offset into object */ + struct vm_map_store store; + union vm_map_object vme_object; /* object I point to */ + vm_object_offset_t vme_offset; /* offset into object */ unsigned int - /* boolean_t */ is_shared:1, /* region is shared */ - /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ - /* boolean_t */ in_transition:1, /* Entry being changed */ - /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ - /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ - /* behavior is not defined for submap type */ - /* boolean_t */ needs_copy:1, /* object need to be copied? */ - - /* Only in task maps: */ - /* vm_prot_t */ protection:3, /* protection code */ - /* vm_prot_t */ max_protection:3, /* maximum protection */ + /* boolean_t */ is_shared:1, /* region is shared */ + /* boolean_t */ is_sub_map:1, /* Is "object" a submap? */ + /* boolean_t */ in_transition:1, /* Entry being changed */ + /* boolean_t */ needs_wakeup:1, /* Waiters on in_transition */ + /* vm_behavior_t */ behavior:2, /* user paging behavior hint */ + /* behavior is not defined for submap type */ + /* boolean_t */ needs_copy:1, /* object need to be copied? */ + + /* Only in task maps: */ + /* vm_prot_t */ protection:3, /* protection code */ + /* vm_prot_t */ max_protection:3, /* maximum protection */ /* vm_inherit_t */ inheritance:2, /* inheritance */ - /* boolean_t */ use_pmap:1, /* - * use_pmap is overloaded: - * if "is_sub_map": - * use a nested pmap? - * else (i.e. if object): - * use pmap accounting - * for footprint? - */ - /* boolean_t */ no_cache:1, /* should new pages be cached? */ - /* boolean_t */ permanent:1, /* mapping can not be removed */ - /* boolean_t */ superpage_size:1, /* use superpages of a certain size */ - /* boolean_t */ map_aligned:1, /* align to map's page size */ - /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of - * this entry it is being deleted - * without unwiring them */ - /* boolean_t */ used_for_jit:1, + /* boolean_t */ use_pmap:1, /* + * use_pmap is overloaded: + * if "is_sub_map": + * use a nested pmap? + * else (i.e. if object): + * use pmap accounting + * for footprint? + */ + /* boolean_t */ no_cache:1, /* should new pages be cached? */ + /* boolean_t */ permanent:1, /* mapping can not be removed */ + /* boolean_t */ superpage_size:1, /* use superpages of a certain size */ + /* boolean_t */ map_aligned:1, /* align to map's page size */ + /* boolean_t */ zero_wired_pages:1, /* zero out the wired pages of + * this entry it is being deleted + * without unwiring them */ + /* boolean_t */ used_for_jit:1, /* boolean_t */ pmap_cs_associated:1, /* pmap_cs will validate */ - /* boolean_t */ from_reserved_zone:1, /* Allocated from - * kernel reserved zone */ + /* boolean_t */ from_reserved_zone:1, /* Allocated from + * kernel reserved zone */ /* iokit accounting: use the virtual size rather than resident size: */ /* boolean_t */ iokit_acct:1, /* boolean_t */ vme_resilient_codesign:1, /* boolean_t */ vme_resilient_media:1, /* boolean_t */ vme_atomic:1, /* entry cannot be split/coalesced */ - __unused:4; -; + __unused:4; + ; - unsigned short wired_count; /* can be paged if = 0 */ - unsigned short user_wired_count; /* for vm_wire */ -#if DEBUG -#define MAP_ENTRY_CREATION_DEBUG (1) + unsigned short wired_count; /* can be paged if = 0 */ + unsigned short user_wired_count; /* for vm_wire */ +#if DEBUG +#define MAP_ENTRY_CREATION_DEBUG (1) #define MAP_ENTRY_INSERTION_DEBUG (1) -#endif -#if MAP_ENTRY_CREATION_DEBUG - struct vm_map_header *vme_creation_maphdr; - uintptr_t vme_creation_bt[16]; #endif -#if MAP_ENTRY_INSERTION_DEBUG - uintptr_t vme_insertion_bt[16]; +#if MAP_ENTRY_CREATION_DEBUG + struct vm_map_header *vme_creation_maphdr; + uintptr_t vme_creation_bt[16]; +#endif +#if MAP_ENTRY_INSERTION_DEBUG + uintptr_t vme_insertion_bt[16]; #endif }; @@ -374,7 +375,7 @@ struct vm_map_entry { * wired_counts are unsigned short. This value is used to safeguard * against any mishaps due to runaway user programs. */ -#define MAX_WIRE_COUNT 65535 +#define MAX_WIRE_COUNT 65535 @@ -387,14 +388,14 @@ struct vm_map_entry { struct vm_map_header { - struct vm_map_links links; /* first, last, min, max */ - int nentries; /* Number of entries */ - boolean_t entries_pageable; - /* are map entries pageable? */ + struct vm_map_links links; /* first, last, min, max */ + int nentries; /* Number of entries */ + boolean_t entries_pageable; + /* are map entries pageable? */ #ifdef VM_MAP_STORE_USE_RB - struct rb_head rb_head_store; + struct rb_head rb_head_store; #endif - int page_shift; /* page shift */ + int page_shift; /* page shift */ }; #define VM_MAP_HDR_PAGE_SHIFT(hdr) ((hdr)->page_shift) @@ -417,16 +418,16 @@ struct vm_map_header { * quickly find free space. */ struct _vm_map { - lck_rw_t lock; /* map lock */ - struct vm_map_header hdr; /* Map entry header */ -#define min_offset hdr.links.start /* start of range */ -#define max_offset hdr.links.end /* end of range */ - pmap_t pmap; /* Physical map */ - vm_map_size_t size; /* virtual size */ - vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ - vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ + lck_rw_t lock; /* map lock */ + struct vm_map_header hdr; /* Map entry header */ +#define min_offset hdr.links.start /* start of range */ +#define max_offset hdr.links.end /* end of range */ + pmap_t pmap; /* Physical map */ + vm_map_size_t size; /* virtual size */ + vm_map_size_t user_wire_limit;/* rlimit on user locked memory */ + vm_map_size_t user_wire_size; /* current size of user locked memory in this map */ #if __x86_64__ - vm_map_offset_t vmmap_high_start; + vm_map_offset_t vmmap_high_start; #endif /* __x86_64__ */ union { @@ -434,71 +435,71 @@ struct _vm_map { * If map->disable_vmentry_reuse == TRUE: * the end address of the highest allocated vm_map_entry_t. */ - vm_map_offset_t vmu1_highest_entry_end; + vm_map_offset_t vmu1_highest_entry_end; /* * For a nested VM map: * the lowest address in this nested VM map that we would * expect to be unnested under normal operation (i.e. for * regular copy-on-write on DATA section). */ - vm_map_offset_t vmu1_lowest_unnestable_start; + vm_map_offset_t vmu1_lowest_unnestable_start; } vmu1; -#define highest_entry_end vmu1.vmu1_highest_entry_end -#define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start - decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */ - lck_mtx_ext_t s_lock_ext; - vm_map_entry_t hint; /* hint for quick lookups */ +#define highest_entry_end vmu1.vmu1_highest_entry_end +#define lowest_unnestable_start vmu1.vmu1_lowest_unnestable_start + decl_lck_mtx_data(, s_lock) /* Lock ref, res fields */ + lck_mtx_ext_t s_lock_ext; + vm_map_entry_t hint; /* hint for quick lookups */ union { - struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */ + struct vm_map_links* vmmap_hole_hint; /* hint for quick hole lookups */ struct vm_map_corpse_footprint_header *vmmap_corpse_footprint; } vmmap_u_1; #define hole_hint vmmap_u_1.vmmap_hole_hint #define vmmap_corpse_footprint vmmap_u_1.vmmap_corpse_footprint union{ - vm_map_entry_t _first_free; /* First free space hint */ - struct vm_map_links* _holes; /* links all holes between entries */ - } f_s; /* Union for free space data structures being used */ - -#define first_free f_s._first_free -#define holes_list f_s._holes - - int map_refcnt; /* Reference count */ - -#if TASK_SWAPPER - int res_count; /* Residence count (swap) */ - int sw_state; /* Swap state */ -#endif /* TASK_SWAPPER */ - - unsigned int - /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */ - /* boolean_t */ wiring_required:1, /* All memory wired? */ - /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */ - /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */ - /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */ - /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */ - /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */ - /* boolean_t */ holelistenabled:1, - /* boolean_t */ is_nested_map:1, - /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */ - /* boolean_t */ jit_entry_exists:1, - /* boolean_t */ has_corpse_footprint:1, - /* boolean_t */ warned_delete_gap:1, - /* reserved */ pad:19; - unsigned int timestamp; /* Version number */ + vm_map_entry_t _first_free; /* First free space hint */ + struct vm_map_links* _holes; /* links all holes between entries */ + } f_s; /* Union for free space data structures being used */ + +#define first_free f_s._first_free +#define holes_list f_s._holes + + int map_refcnt; /* Reference count */ + +#if TASK_SWAPPER + int res_count; /* Residence count (swap) */ + int sw_state; /* Swap state */ +#endif /* TASK_SWAPPER */ + + unsigned int + /* boolean_t */ wait_for_space:1, /* Should callers wait for space? */ + /* boolean_t */ wiring_required:1, /* All memory wired? */ + /* boolean_t */ no_zero_fill:1, /*No zero fill absent pages */ + /* boolean_t */ mapped_in_other_pmaps:1, /*has this submap been mapped in maps that use a different pmap */ + /* boolean_t */ switch_protect:1, /* Protect map from write faults while switched */ + /* boolean_t */ disable_vmentry_reuse:1, /* All vm entries should keep using newer and higher addresses in the map */ + /* boolean_t */ map_disallow_data_exec:1, /* Disallow execution from data pages on exec-permissive architectures */ + /* boolean_t */ holelistenabled:1, + /* boolean_t */ is_nested_map:1, + /* boolean_t */ map_disallow_new_exec:1, /* Disallow new executable code */ + /* boolean_t */ jit_entry_exists:1, + /* boolean_t */ has_corpse_footprint:1, + /* boolean_t */ warned_delete_gap:1, + /* reserved */ pad:19; + unsigned int timestamp; /* Version number */ }; #define CAST_TO_VM_MAP_ENTRY(x) ((struct vm_map_entry *)(uintptr_t)(x)) #define vm_map_to_entry(map) CAST_TO_VM_MAP_ENTRY(&(map)->hdr.links) -#define vm_map_first_entry(map) ((map)->hdr.links.next) -#define vm_map_last_entry(map) ((map)->hdr.links.prev) +#define vm_map_first_entry(map) ((map)->hdr.links.next) +#define vm_map_last_entry(map) ((map)->hdr.links.prev) -#if TASK_SWAPPER +#if TASK_SWAPPER /* * VM map swap states. There are no transition states. */ -#define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ -#define MAP_SW_OUT 2 /* map is out (res_count == 0 */ -#endif /* TASK_SWAPPER */ +#define MAP_SW_IN 1 /* map is swapped in; residence count > 0 */ +#define MAP_SW_OUT 2 /* map is out (res_count == 0 */ +#endif /* TASK_SWAPPER */ /* * Type: vm_map_version_t [exported; contents invisible] @@ -515,7 +516,7 @@ struct _vm_map { * Just a timestamp for the main map. */ typedef struct vm_map_version { - unsigned int main_timestamp; + unsigned int main_timestamp; } vm_map_version_t; /* @@ -530,7 +531,7 @@ typedef struct vm_map_version { * at a time. * * Implementation: - * There are three formats for map copy objects. + * There are three formats for map copy objects. * The first is very similar to the main * address map in structure, and as a result, some * of the internal maintenance functions/macros can @@ -545,7 +546,7 @@ typedef struct vm_map_version { * except for placeholder copy objects (see vm_map_copy_copy()). * * The third format is a kernel buffer copy object - for data - * small enough that physical copies were the most efficient + * small enough that physical copies were the most efficient * method. This method uses a zero-sized array unioned with * other format-specific data in the 'c_u' member. This unsized * array overlaps the other elements and allows us to use this @@ -554,12 +555,12 @@ typedef struct vm_map_version { */ struct vm_map_copy { - int type; -#define VM_MAP_COPY_ENTRY_LIST 1 -#define VM_MAP_COPY_OBJECT 2 -#define VM_MAP_COPY_KERNEL_BUFFER 3 - vm_object_offset_t offset; - vm_map_size_t size; + int type; +#define VM_MAP_COPY_ENTRY_LIST 1 +#define VM_MAP_COPY_OBJECT 2 +#define VM_MAP_COPY_KERNEL_BUFFER 3 + vm_object_offset_t offset; + vm_map_size_t size; union { struct vm_map_header hdr; /* ENTRY_LIST */ vm_object_t object; /* OBJECT */ @@ -568,11 +569,11 @@ struct vm_map_copy { }; -#define cpy_hdr c_u.hdr +#define cpy_hdr c_u.hdr -#define cpy_object c_u.object -#define cpy_kdata c_u.kdata -#define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata)) +#define cpy_object c_u.object +#define cpy_kdata c_u.kdata +#define cpy_kdata_hdr_sz (offsetof(struct vm_map_copy, c_u.kdata)) #define VM_MAP_COPY_PAGE_SHIFT(copy) ((copy)->cpy_hdr.page_shift) #define VM_MAP_COPY_PAGE_SIZE(copy) (1 << VM_MAP_COPY_PAGE_SHIFT((copy))) @@ -583,10 +584,10 @@ struct vm_map_copy { */ #define vm_map_copy_to_entry(copy) CAST_TO_VM_MAP_ENTRY(&(copy)->cpy_hdr.links) -#define vm_map_copy_first_entry(copy) \ - ((copy)->cpy_hdr.links.next) -#define vm_map_copy_last_entry(copy) \ - ((copy)->cpy_hdr.links.prev) +#define vm_map_copy_first_entry(copy) \ + ((copy)->cpy_hdr.links.next) +#define vm_map_copy_last_entry(copy) \ + ((copy)->cpy_hdr.links.prev) /* * Macros: vm_map_lock, etc. [internal use only] @@ -596,35 +597,86 @@ struct vm_map_copy { * (See vm_map.c::vm_remap()) */ -#define vm_map_lock_init(map) \ - ((map)->timestamp = 0 , \ +#define vm_map_lock_init(map) \ + ((map)->timestamp = 0 , \ lck_rw_init(&(map)->lock, &vm_map_lck_grp, &vm_map_lck_rw_attr)) -#define vm_map_lock(map) lck_rw_lock_exclusive(&(map)->lock) -#define vm_map_unlock(map) \ - ((map)->timestamp++ , lck_rw_done(&(map)->lock)) -#define vm_map_lock_read(map) lck_rw_lock_shared(&(map)->lock) -#define vm_map_unlock_read(map) lck_rw_done(&(map)->lock) -#define vm_map_lock_write_to_read(map) \ - ((map)->timestamp++ , lck_rw_lock_exclusive_to_shared(&(map)->lock)) -/* lock_read_to_write() returns FALSE on failure. Macro evaluates to +#define vm_map_lock(map) \ + MACRO_BEGIN \ + DTRACE_VM(vm_map_lock_w); \ + lck_rw_lock_exclusive(&(map)->lock); \ + MACRO_END + +#define vm_map_unlock(map) \ + MACRO_BEGIN \ + DTRACE_VM(vm_map_unlock_w); \ + (map)->timestamp++; \ + lck_rw_done(&(map)->lock); \ + MACRO_END + +#define vm_map_lock_read(map) \ + MACRO_BEGIN \ + DTRACE_VM(vm_map_lock_r); \ + lck_rw_lock_shared(&(map)->lock); \ + MACRO_END + +#define vm_map_unlock_read(map) \ + MACRO_BEGIN \ + DTRACE_VM(vm_map_unlock_r); \ + lck_rw_done(&(map)->lock); \ + MACRO_END + +#define vm_map_lock_write_to_read(map) \ + MACRO_BEGIN \ + DTRACE_VM(vm_map_lock_downgrade); \ + (map)->timestamp++; \ + lck_rw_lock_exclusive_to_shared(&(map)->lock); \ + MACRO_END + +/* + * lock_read_to_write() returns FALSE on failure. This function evaluates to * zero on success and non-zero value on failure. */ -#define vm_map_lock_read_to_write(map) (lck_rw_lock_shared_to_exclusive(&(map)->lock) != TRUE) +static inline int +vm_map_lock_read_to_write(vm_map_t map) +{ + if (lck_rw_lock_shared_to_exclusive(&(map)->lock)) { + DTRACE_VM(vm_map_lock_upgrade); + return 0; + } + return 1; +} + +static inline boolean_t +vm_map_try_lock(vm_map_t map) +{ + if (lck_rw_try_lock_exclusive(&(map)->lock)) { + DTRACE_VM(vm_map_lock_w); + return TRUE; + } + return FALSE; +} -#define vm_map_try_lock(map) lck_rw_try_lock_exclusive(&(map)->lock) -#define vm_map_try_lock_read(map) lck_rw_try_lock_shared(&(map)->lock) +static inline boolean_t +vm_map_try_lock_read(vm_map_t map) +{ + if (lck_rw_try_lock_shared(&(map)->lock)) { + DTRACE_VM(vm_map_lock_r); + return TRUE; + } + return FALSE; +} #if MACH_ASSERT || DEBUG #define vm_map_lock_assert_held(map) \ lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_HELD) -#define vm_map_lock_assert_shared(map) \ +#define vm_map_lock_assert_shared(map) \ lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_SHARED) #define vm_map_lock_assert_exclusive(map) \ lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_EXCLUSIVE) #define vm_map_lock_assert_notheld(map) \ lck_rw_assert(&(map)->lock, LCK_RW_ASSERT_NOTHELD) -#else /* MACH_ASSERT || DEBUG */ +#else /* MACH_ASSERT || DEBUG */ #define vm_map_lock_assert_held(map) #define vm_map_lock_assert_shared(map) #define vm_map_lock_assert_exclusive(map) @@ -636,447 +688,447 @@ struct vm_map_copy { */ /* Initialize the module */ -extern void vm_map_init(void); +extern void vm_map_init(void); -extern void vm_kernel_reserved_entry_init(void); +extern void vm_kernel_reserved_entry_init(void); /* Allocate a range in the specified virtual address map and * return the entry allocated for that range. */ extern kern_return_t vm_map_find_space( - vm_map_t map, - vm_map_address_t *address, /* OUT */ - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_map_entry_t *o_entry); /* OUT */ + vm_map_t map, + vm_map_address_t *address, /* OUT */ + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_entry_t *o_entry); /* OUT */ extern void vm_map_clip_start( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t endaddr); + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t endaddr); extern void vm_map_clip_end( - vm_map_t map, - vm_map_entry_t entry, - vm_map_offset_t endaddr); + vm_map_t map, + vm_map_entry_t entry, + vm_map_offset_t endaddr); extern boolean_t vm_map_entry_should_cow_for_true_share( - vm_map_entry_t entry); + vm_map_entry_t entry); /* Lookup map entry containing or the specified address in the given map */ -extern boolean_t vm_map_lookup_entry( - vm_map_t map, - vm_map_address_t address, - vm_map_entry_t *entry); /* OUT */ - -extern void vm_map_copy_remap( - vm_map_t map, - vm_map_entry_t where, - vm_map_copy_t copy, - vm_map_offset_t adjustment, - vm_prot_t cur_prot, - vm_prot_t max_prot, - vm_inherit_t inheritance); +extern boolean_t vm_map_lookup_entry( + vm_map_t map, + vm_map_address_t address, + vm_map_entry_t *entry); /* OUT */ + +extern void vm_map_copy_remap( + vm_map_t map, + vm_map_entry_t where, + vm_map_copy_t copy, + vm_map_offset_t adjustment, + vm_prot_t cur_prot, + vm_prot_t max_prot, + vm_inherit_t inheritance); /* Find the VM object, offset, and protection for a given virtual address * in the specified map, assuming a page fault of the type specified. */ -extern kern_return_t vm_map_lookup_locked( - vm_map_t *var_map, /* IN/OUT */ - vm_map_address_t vaddr, - vm_prot_t fault_type, - int object_lock_type, - vm_map_version_t *out_version, /* OUT */ - vm_object_t *object, /* OUT */ - vm_object_offset_t *offset, /* OUT */ - vm_prot_t *out_prot, /* OUT */ - boolean_t *wired, /* OUT */ - vm_object_fault_info_t fault_info, /* OUT */ - vm_map_t *real_map); /* OUT */ +extern kern_return_t vm_map_lookup_locked( + vm_map_t *var_map, /* IN/OUT */ + vm_map_address_t vaddr, + vm_prot_t fault_type, + int object_lock_type, + vm_map_version_t *out_version, /* OUT */ + vm_object_t *object, /* OUT */ + vm_object_offset_t *offset, /* OUT */ + vm_prot_t *out_prot, /* OUT */ + boolean_t *wired, /* OUT */ + vm_object_fault_info_t fault_info, /* OUT */ + vm_map_t *real_map); /* OUT */ /* Verifies that the map has not changed since the given version. */ -extern boolean_t vm_map_verify( - vm_map_t map, - vm_map_version_t *version); /* REF */ - -extern vm_map_entry_t vm_map_entry_insert( - vm_map_t map, - vm_map_entry_t insp_entry, - vm_map_offset_t start, - vm_map_offset_t end, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - boolean_t is_shared, - boolean_t in_transition, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_behavior_t behavior, - vm_inherit_t inheritance, - unsigned wired_count, - boolean_t no_cache, - boolean_t permanent, - unsigned int superpage_size, - boolean_t clear_map_aligned, - boolean_t is_submap, - boolean_t used_for_jit, - int alias); +extern boolean_t vm_map_verify( + vm_map_t map, + vm_map_version_t *version); /* REF */ + +extern vm_map_entry_t vm_map_entry_insert( + vm_map_t map, + vm_map_entry_t insp_entry, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + boolean_t is_shared, + boolean_t in_transition, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_behavior_t behavior, + vm_inherit_t inheritance, + unsigned wired_count, + boolean_t no_cache, + boolean_t permanent, + unsigned int superpage_size, + boolean_t clear_map_aligned, + boolean_t is_submap, + boolean_t used_for_jit, + int alias); /* * Functions implemented as macros */ -#define vm_map_min(map) ((map)->min_offset) - /* Lowest valid address in - * a map */ +#define vm_map_min(map) ((map)->min_offset) +/* Lowest valid address in + * a map */ -#define vm_map_max(map) ((map)->max_offset) - /* Highest valid address */ +#define vm_map_max(map) ((map)->max_offset) +/* Highest valid address */ -#define vm_map_pmap(map) ((map)->pmap) - /* Physical map associated - * with this address map */ +#define vm_map_pmap(map) ((map)->pmap) +/* Physical map associated +* with this address map */ /* * Macros/functions for map residence counts and swapin/out of vm maps */ -#if TASK_SWAPPER +#if TASK_SWAPPER -#if MACH_ASSERT +#if MACH_ASSERT /* Gain a reference to an existing map */ -extern void vm_map_reference( - vm_map_t map); +extern void vm_map_reference( + vm_map_t map); /* Lose a residence count */ -extern void vm_map_res_deallocate( - vm_map_t map); +extern void vm_map_res_deallocate( + vm_map_t map); /* Gain a residence count on a map */ -extern void vm_map_res_reference( - vm_map_t map); +extern void vm_map_res_reference( + vm_map_t map); /* Gain reference & residence counts to possibly swapped-out map */ -extern void vm_map_reference_swap( - vm_map_t map); - -#else /* MACH_ASSERT */ - -#define vm_map_reference(map) \ -MACRO_BEGIN \ - vm_map_t Map = (map); \ - if (Map) { \ - lck_mtx_lock(&Map->s_lock); \ - Map->res_count++; \ - Map->map_refcnt++; \ - lck_mtx_unlock(&Map->s_lock); \ - } \ +extern void vm_map_reference_swap( + vm_map_t map); + +#else /* MACH_ASSERT */ + +#define vm_map_reference(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + if (Map) { \ + lck_mtx_lock(&Map->s_lock); \ + Map->res_count++; \ + Map->map_refcnt++; \ + lck_mtx_unlock(&Map->s_lock); \ + } \ MACRO_END -#define vm_map_res_reference(map) \ -MACRO_BEGIN \ - vm_map_t Lmap = (map); \ - if (Lmap->res_count == 0) { \ - lck_mtx_unlock(&Lmap->s_lock);\ - vm_map_lock(Lmap); \ - vm_map_swapin(Lmap); \ - lck_mtx_lock(&Lmap->s_lock); \ - ++Lmap->res_count; \ - vm_map_unlock(Lmap); \ - } else \ - ++Lmap->res_count; \ +#define vm_map_res_reference(map) \ +MACRO_BEGIN \ + vm_map_t Lmap = (map); \ + if (Lmap->res_count == 0) { \ + lck_mtx_unlock(&Lmap->s_lock);\ + vm_map_lock(Lmap); \ + vm_map_swapin(Lmap); \ + lck_mtx_lock(&Lmap->s_lock); \ + ++Lmap->res_count; \ + vm_map_unlock(Lmap); \ + } else \ + ++Lmap->res_count; \ MACRO_END -#define vm_map_res_deallocate(map) \ -MACRO_BEGIN \ - vm_map_t Map = (map); \ - if (--Map->res_count == 0) { \ - lck_mtx_unlock(&Map->s_lock); \ - vm_map_lock(Map); \ - vm_map_swapout(Map); \ - vm_map_unlock(Map); \ - lck_mtx_lock(&Map->s_lock); \ - } \ +#define vm_map_res_deallocate(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + if (--Map->res_count == 0) { \ + lck_mtx_unlock(&Map->s_lock); \ + vm_map_lock(Map); \ + vm_map_swapout(Map); \ + vm_map_unlock(Map); \ + lck_mtx_lock(&Map->s_lock); \ + } \ MACRO_END -#define vm_map_reference_swap(map) \ -MACRO_BEGIN \ - vm_map_t Map = (map); \ - lck_mtx_lock(&Map->s_lock); \ - ++Map->map_refcnt; \ - vm_map_res_reference(Map); \ - lck_mtx_unlock(&Map->s_lock); \ +#define vm_map_reference_swap(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + lck_mtx_lock(&Map->s_lock); \ + ++Map->map_refcnt; \ + vm_map_res_reference(Map); \ + lck_mtx_unlock(&Map->s_lock); \ MACRO_END -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ -extern void vm_map_swapin( - vm_map_t map); +extern void vm_map_swapin( + vm_map_t map); -extern void vm_map_swapout( - vm_map_t map); +extern void vm_map_swapout( + vm_map_t map); -#else /* TASK_SWAPPER */ +#else /* TASK_SWAPPER */ -#define vm_map_reference(map) \ -MACRO_BEGIN \ - vm_map_t Map = (map); \ - if (Map) { \ - lck_mtx_lock(&Map->s_lock); \ - Map->map_refcnt++; \ - lck_mtx_unlock(&Map->s_lock); \ - } \ +#define vm_map_reference(map) \ +MACRO_BEGIN \ + vm_map_t Map = (map); \ + if (Map) { \ + lck_mtx_lock(&Map->s_lock); \ + Map->map_refcnt++; \ + lck_mtx_unlock(&Map->s_lock); \ + } \ MACRO_END -#define vm_map_reference_swap(map) vm_map_reference(map) +#define vm_map_reference_swap(map) vm_map_reference(map) #define vm_map_res_reference(map) #define vm_map_res_deallocate(map) -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ /* * Submap object. Must be used to create memory to be put * in a submap by vm_map_submap. */ -extern vm_object_t vm_submap_object; +extern vm_object_t vm_submap_object; /* * Wait and wakeup macros for in_transition map entries. */ -#define vm_map_entry_wait(map, interruptible) \ - ((map)->timestamp++ , \ +#define vm_map_entry_wait(map, interruptible) \ + ((map)->timestamp++ , \ lck_rw_sleep(&(map)->lock, LCK_SLEEP_EXCLUSIVE|LCK_SLEEP_PROMOTED_PRI, \ - (event_t)&(map)->hdr, interruptible)) + (event_t)&(map)->hdr, interruptible)) #define vm_map_entry_wakeup(map) \ thread_wakeup((event_t)(&(map)->hdr)) -#define vm_map_ref_fast(map) \ - MACRO_BEGIN \ - lck_mtx_lock(&map->s_lock); \ - map->ref_count++; \ - vm_map_res_reference(map); \ - lck_mtx_unlock(&map->s_lock); \ +#define vm_map_ref_fast(map) \ + MACRO_BEGIN \ + lck_mtx_lock(&map->s_lock); \ + map->ref_count++; \ + vm_map_res_reference(map); \ + lck_mtx_unlock(&map->s_lock); \ MACRO_END -#define vm_map_dealloc_fast(map) \ - MACRO_BEGIN \ - int c; \ - \ - lck_mtx_lock(&map->s_lock); \ - c = --map->ref_count; \ - if (c > 0) \ - vm_map_res_deallocate(map); \ - lck_mtx_unlock(&map->s_lock); \ - if (c == 0) \ - vm_map_destroy(map); \ +#define vm_map_dealloc_fast(map) \ + MACRO_BEGIN \ + int c; \ + \ + lck_mtx_lock(&map->s_lock); \ + c = --map->ref_count; \ + if (c > 0) \ + vm_map_res_deallocate(map); \ + lck_mtx_unlock(&map->s_lock); \ + if (c == 0) \ + vm_map_destroy(map); \ MACRO_END /* simplify map entries */ -extern void vm_map_simplify_entry( - vm_map_t map, - vm_map_entry_t this_entry); -extern void vm_map_simplify( - vm_map_t map, - vm_map_offset_t start); +extern void vm_map_simplify_entry( + vm_map_t map, + vm_map_entry_t this_entry); +extern void vm_map_simplify( + vm_map_t map, + vm_map_offset_t start); /* Move the information in a map copy object to a new map copy object */ -extern vm_map_copy_t vm_map_copy_copy( - vm_map_copy_t copy); +extern vm_map_copy_t vm_map_copy_copy( + vm_map_copy_t copy); /* Create a copy object from an object. */ -extern kern_return_t vm_map_copyin_object( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - vm_map_copy_t *copy_result); /* OUT */ +extern kern_return_t vm_map_copyin_object( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_map_copy_t *copy_result); /* OUT */ -extern kern_return_t vm_map_random_address_for_size( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size); +extern kern_return_t vm_map_random_address_for_size( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size); /* Enter a mapping */ -extern kern_return_t vm_map_enter( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); +extern kern_return_t vm_map_enter( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); #if __arm64__ -extern kern_return_t vm_map_enter_fourk( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_object_t object, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); +extern kern_return_t vm_map_enter_fourk( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_object_t object, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); #endif /* __arm64__ */ /* XXX should go away - replaced with regular enter of contig object */ -extern kern_return_t vm_map_enter_cpm( - vm_map_t map, - vm_map_address_t *addr, - vm_map_size_t size, - int flags); +extern kern_return_t vm_map_enter_cpm( + vm_map_t map, + vm_map_address_t *addr, + vm_map_size_t size, + int flags); extern kern_return_t vm_map_remap( - vm_map_t target_map, - vm_map_offset_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - vm_map_t src_map, - vm_map_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + vm_map_t src_map, + vm_map_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); /* * Read and write from a kernel buffer to a specified map. */ -extern kern_return_t vm_map_write_user( - vm_map_t map, - void *src_p, - vm_map_offset_t dst_addr, - vm_size_t size); - -extern kern_return_t vm_map_read_user( - vm_map_t map, - vm_map_offset_t src_addr, - void *dst_p, - vm_size_t size); +extern kern_return_t vm_map_write_user( + vm_map_t map, + void *src_p, + vm_map_offset_t dst_addr, + vm_size_t size); + +extern kern_return_t vm_map_read_user( + vm_map_t map, + vm_map_offset_t src_addr, + void *dst_p, + vm_size_t size); /* Create a new task map using an existing task map as a template. */ -extern vm_map_t vm_map_fork( - ledger_t ledger, - vm_map_t old_map, - int options); -#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001 -#define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002 -#define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004 +extern vm_map_t vm_map_fork( + ledger_t ledger, + vm_map_t old_map, + int options); +#define VM_MAP_FORK_SHARE_IF_INHERIT_NONE 0x00000001 +#define VM_MAP_FORK_PRESERVE_PURGEABLE 0x00000002 +#define VM_MAP_FORK_CORPSE_FOOTPRINT 0x00000004 /* Change inheritance */ -extern kern_return_t vm_map_inherit( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_inherit_t new_inheritance); +extern kern_return_t vm_map_inherit( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_inherit_t new_inheritance); /* Add or remove machine-dependent attributes from map regions */ -extern kern_return_t vm_map_machine_attribute( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value); /* IN/OUT */ - -extern kern_return_t vm_map_msync( - vm_map_t map, - vm_map_address_t address, - vm_map_size_t size, - vm_sync_t sync_flags); +extern kern_return_t vm_map_machine_attribute( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value); /* IN/OUT */ + +extern kern_return_t vm_map_msync( + vm_map_t map, + vm_map_address_t address, + vm_map_size_t size, + vm_sync_t sync_flags); /* Set paging behavior */ -extern kern_return_t vm_map_behavior_set( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_behavior_t new_behavior); +extern kern_return_t vm_map_behavior_set( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_behavior_t new_behavior); extern kern_return_t vm_map_region( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t *size, - vm_region_flavor_t flavor, - vm_region_info_t info, - mach_msg_type_number_t *count, - mach_port_t *object_name); + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t *size, + vm_region_flavor_t flavor, + vm_region_info_t info, + mach_msg_type_number_t *count, + mach_port_t *object_name); extern kern_return_t vm_map_region_recurse_64( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t *size, - natural_t *nesting_depth, - vm_region_submap_info_64_t info, - mach_msg_type_number_t *count); + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t *size, + natural_t *nesting_depth, + vm_region_submap_info_64_t info, + mach_msg_type_number_t *count); extern kern_return_t vm_map_page_query_internal( - vm_map_t map, - vm_map_offset_t offset, - int *disposition, - int *ref_count); + vm_map_t map, + vm_map_offset_t offset, + int *disposition, + int *ref_count); extern kern_return_t vm_map_query_volatile( - vm_map_t map, - mach_vm_size_t *volatile_virtual_size_p, - mach_vm_size_t *volatile_resident_size_p, - mach_vm_size_t *volatile_compressed_size_p, - mach_vm_size_t *volatile_pmap_size_p, - mach_vm_size_t *volatile_compressed_pmap_size_p); - -extern kern_return_t vm_map_submap( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_t submap, - vm_map_offset_t offset, - boolean_t use_pmap); + vm_map_t map, + mach_vm_size_t *volatile_virtual_size_p, + mach_vm_size_t *volatile_resident_size_p, + mach_vm_size_t *volatile_compressed_size_p, + mach_vm_size_t *volatile_pmap_size_p, + mach_vm_size_t *volatile_compressed_pmap_size_p); + +extern kern_return_t vm_map_submap( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t submap, + vm_map_offset_t offset, + boolean_t use_pmap); extern void vm_map_submap_pmap_clean( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_map_t sub_map, - vm_map_offset_t offset); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_map_t sub_map, + vm_map_offset_t offset); /* Convert from a map entry port to a map */ extern vm_map_t convert_port_entry_to_map( - ipc_port_t port); + ipc_port_t port); /* Convert from a port to a vm_object */ extern vm_object_t convert_port_entry_to_object( - ipc_port_t port); + ipc_port_t port); extern kern_return_t vm_map_set_cache_attr( - vm_map_t map, - vm_map_offset_t va); + vm_map_t map, + vm_map_offset_t va); /* definitions related to overriding the NX behavior */ -#define VM_ABI_32 0x1 -#define VM_ABI_64 0x2 +#define VM_ABI_32 0x1 +#define VM_ABI_64 0x2 extern int override_nx(vm_map_t map, uint32_t user_tag); #if PMAP_CS extern kern_return_t vm_map_entry_cs_associate( - vm_map_t map, - vm_map_entry_t entry, - vm_map_kernel_flags_t vmk_flags); + vm_map_t map, + vm_map_entry_t entry, + vm_map_kernel_flags_t vmk_flags); #endif /* PMAP_CS */ extern void vm_map_region_top_walk( - vm_map_entry_t entry, + vm_map_entry_t entry, vm_region_top_info_t top); extern void vm_map_region_walk( vm_map_t map, @@ -1090,412 +1142,416 @@ extern void vm_map_region_walk( struct vm_map_corpse_footprint_header { - vm_size_t cf_size; /* allocated buffer size */ - uint32_t cf_last_region; /* offset of last region in buffer */ + vm_size_t cf_size; /* allocated buffer size */ + uint32_t cf_last_region; /* offset of last region in buffer */ union { uint32_t cfu_last_zeroes; /* during creation: - * number of "zero" dispositions at - * end of last region */ + * number of "zero" dispositions at + * end of last region */ uint32_t cfu_hint_region; /* during lookup: - * offset of last looked up region */ + * offset of last looked up region */ #define cf_last_zeroes cfu.cfu_last_zeroes #define cf_hint_region cfu.cfu_hint_region } cfu; }; struct vm_map_corpse_footprint_region { - vm_map_offset_t cfr_vaddr; /* region start virtual address */ - uint32_t cfr_num_pages; /* number of pages in this "region" */ - unsigned char cfr_disposition[0]; /* disposition of each page */ + vm_map_offset_t cfr_vaddr; /* region start virtual address */ + uint32_t cfr_num_pages; /* number of pages in this "region" */ + unsigned char cfr_disposition[0]; /* disposition of each page */ } __attribute__((packed)); extern kern_return_t vm_map_corpse_footprint_collect( - vm_map_t old_map, - vm_map_entry_t old_entry, - vm_map_t new_map); + vm_map_t old_map, + vm_map_entry_t old_entry, + vm_map_t new_map); extern void vm_map_corpse_footprint_collect_done( - vm_map_t new_map); + vm_map_t new_map); extern kern_return_t vm_map_corpse_footprint_query_page_info( - vm_map_t map, - vm_map_offset_t va, - int *disp); + vm_map_t map, + vm_map_offset_t va, + int *disp); extern void vm_map_copy_footprint_ledgers( - task_t old_task, - task_t new_task); + task_t old_task, + task_t new_task); extern void vm_map_copy_ledger( - task_t old_task, - task_t new_task, - int ledger_entry); + task_t old_task, + task_t new_task, + int ledger_entry); #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS /* Create an empty map */ -extern vm_map_t vm_map_create( - pmap_t pmap, - vm_map_offset_t min_off, - vm_map_offset_t max_off, - boolean_t pageable); +extern vm_map_t vm_map_create( + pmap_t pmap, + vm_map_offset_t min_off, + vm_map_offset_t max_off, + boolean_t pageable); extern vm_map_t vm_map_create_options( - pmap_t pmap, - vm_map_offset_t min_off, - vm_map_offset_t max_off, - int options); -#define VM_MAP_CREATE_PAGEABLE 0x00000001 -#define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002 + pmap_t pmap, + vm_map_offset_t min_off, + vm_map_offset_t max_off, + int options); +#define VM_MAP_CREATE_PAGEABLE 0x00000001 +#define VM_MAP_CREATE_CORPSE_FOOTPRINT 0x00000002 #define VM_MAP_CREATE_ALL_OPTIONS (VM_MAP_CREATE_PAGEABLE | \ - VM_MAP_CREATE_CORPSE_FOOTPRINT) + VM_MAP_CREATE_CORPSE_FOOTPRINT) -extern void vm_map_disable_hole_optimization(vm_map_t map); +extern void vm_map_disable_hole_optimization(vm_map_t map); /* Get rid of a map */ -extern void vm_map_destroy( - vm_map_t map, - int flags); +extern void vm_map_destroy( + vm_map_t map, + int flags); /* Lose a reference */ -extern void vm_map_deallocate( - vm_map_t map); +extern void vm_map_deallocate( + vm_map_t map); -extern vm_map_t vm_map_switch( - vm_map_t map); +extern vm_map_t vm_map_switch( + vm_map_t map); /* Change protection */ -extern kern_return_t vm_map_protect( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t new_prot, - boolean_t set_max); +extern kern_return_t vm_map_protect( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t new_prot, + boolean_t set_max); /* Check protection */ extern boolean_t vm_map_check_protection( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t protection); + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t protection); /* wire down a region */ #ifdef XNU_KERNEL_PRIVATE -extern kern_return_t vm_map_wire_kernel( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t access_type, - vm_tag_t tag, - boolean_t user_wire); - -extern kern_return_t vm_map_wire_and_extract_kernel( - vm_map_t map, - vm_map_offset_t start, - vm_prot_t access_type, - vm_tag_t tag, - boolean_t user_wire, - ppnum_t *physpage_p); +extern kern_return_t vm_map_wire_kernel( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + vm_tag_t tag, + boolean_t user_wire); + +extern kern_return_t vm_map_wire_and_extract_kernel( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t access_type, + vm_tag_t tag, + boolean_t user_wire, + ppnum_t *physpage_p); /* kext exported versions */ -extern kern_return_t vm_map_wire_external( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t access_type, - boolean_t user_wire); +extern kern_return_t vm_map_wire_external( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + boolean_t user_wire); -extern kern_return_t vm_map_wire_and_extract_external( - vm_map_t map, - vm_map_offset_t start, - vm_prot_t access_type, - boolean_t user_wire, - ppnum_t *physpage_p); +extern kern_return_t vm_map_wire_and_extract_external( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t access_type, + boolean_t user_wire, + ppnum_t *physpage_p); #else /* XNU_KERNEL_PRIVATE */ -extern kern_return_t vm_map_wire( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_prot_t access_type, - boolean_t user_wire); +extern kern_return_t vm_map_wire( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_prot_t access_type, + boolean_t user_wire); -extern kern_return_t vm_map_wire_and_extract( - vm_map_t map, - vm_map_offset_t start, - vm_prot_t access_type, - boolean_t user_wire, - ppnum_t *physpage_p); +extern kern_return_t vm_map_wire_and_extract( + vm_map_t map, + vm_map_offset_t start, + vm_prot_t access_type, + boolean_t user_wire, + ppnum_t *physpage_p); #endif /* !XNU_KERNEL_PRIVATE */ /* unwire a region */ -extern kern_return_t vm_map_unwire( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t user_wire); +extern kern_return_t vm_map_unwire( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t user_wire); #ifdef XNU_KERNEL_PRIVATE /* Enter a mapping of a memory object */ -extern kern_return_t vm_map_enter_mem_object( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); +extern kern_return_t vm_map_enter_mem_object( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); /* Enter a mapping of a memory object */ -extern kern_return_t vm_map_enter_mem_object_prefault( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - vm_prot_t cur_protection, - vm_prot_t max_protection, - upl_page_list_ptr_t page_list, - unsigned int page_list_count); +extern kern_return_t vm_map_enter_mem_object_prefault( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + vm_prot_t cur_protection, + vm_prot_t max_protection, + upl_page_list_ptr_t page_list, + unsigned int page_list_count); /* Enter a mapping of a memory object */ -extern kern_return_t vm_map_enter_mem_object_control( - vm_map_t map, - vm_map_offset_t *address, - vm_map_size_t size, - vm_map_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - memory_object_control_t control, - vm_object_offset_t offset, - boolean_t needs_copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); +extern kern_return_t vm_map_enter_mem_object_control( + vm_map_t map, + vm_map_offset_t *address, + vm_map_size_t size, + vm_map_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + memory_object_control_t control, + vm_object_offset_t offset, + boolean_t needs_copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); #endif /* !XNU_KERNEL_PRIVATE */ /* Deallocate a region */ -extern kern_return_t vm_map_remove( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t flags); +extern kern_return_t vm_map_remove( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags); /* Deallocate a region when the map is already locked */ -extern kern_return_t vm_map_remove_locked( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - boolean_t flags); +extern kern_return_t vm_map_remove_locked( + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + boolean_t flags); /* Discard a copy without using it */ -extern void vm_map_copy_discard( - vm_map_copy_t copy); +extern void vm_map_copy_discard( + vm_map_copy_t copy); /* Overwrite existing memory with a copy */ -extern kern_return_t vm_map_copy_overwrite( - vm_map_t dst_map, - vm_map_address_t dst_addr, - vm_map_copy_t copy, - boolean_t interruptible); +extern kern_return_t vm_map_copy_overwrite( + vm_map_t dst_map, + vm_map_address_t dst_addr, + vm_map_copy_t copy, + boolean_t interruptible); /* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */ -extern boolean_t vm_map_copy_validate_size( - vm_map_t dst_map, - vm_map_copy_t copy, - vm_map_size_t *size); +extern boolean_t vm_map_copy_validate_size( + vm_map_t dst_map, + vm_map_copy_t copy, + vm_map_size_t *size); /* Place a copy into a map */ -extern kern_return_t vm_map_copyout( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy); +extern kern_return_t vm_map_copyout( + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy); extern kern_return_t vm_map_copyout_size( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy, - vm_map_size_t copy_size); - -extern kern_return_t vm_map_copyout_internal( - vm_map_t dst_map, - vm_map_address_t *dst_addr, /* OUT */ - vm_map_copy_t copy, - vm_map_size_t copy_size, - boolean_t consume_on_success, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); - -extern kern_return_t vm_map_copyin( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - vm_map_copy_t *copy_result); /* OUT */ - -extern kern_return_t vm_map_copyin_common( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - boolean_t src_destroy, - boolean_t src_volatile, - vm_map_copy_t *copy_result, /* OUT */ - boolean_t use_maxprot); - -#define VM_MAP_COPYIN_SRC_DESTROY 0x00000001 -#define VM_MAP_COPYIN_USE_MAXPROT 0x00000002 -#define VM_MAP_COPYIN_ENTRY_LIST 0x00000004 + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size); + +extern kern_return_t vm_map_copyout_internal( + vm_map_t dst_map, + vm_map_address_t *dst_addr, /* OUT */ + vm_map_copy_t copy, + vm_map_size_t copy_size, + boolean_t consume_on_success, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); + +extern kern_return_t vm_map_copyin( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + vm_map_copy_t *copy_result); /* OUT */ + +extern kern_return_t vm_map_copyin_common( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + boolean_t src_destroy, + boolean_t src_volatile, + vm_map_copy_t *copy_result, /* OUT */ + boolean_t use_maxprot); + +#define VM_MAP_COPYIN_SRC_DESTROY 0x00000001 +#define VM_MAP_COPYIN_USE_MAXPROT 0x00000002 +#define VM_MAP_COPYIN_ENTRY_LIST 0x00000004 #define VM_MAP_COPYIN_PRESERVE_PURGEABLE 0x00000008 -#define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F -extern kern_return_t vm_map_copyin_internal( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - int flags, - vm_map_copy_t *copy_result); /* OUT */ +#define VM_MAP_COPYIN_ALL_FLAGS 0x0000000F +extern kern_return_t vm_map_copyin_internal( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + int flags, + vm_map_copy_t *copy_result); /* OUT */ -extern kern_return_t vm_map_copy_extract( - vm_map_t src_map, - vm_map_address_t src_addr, - vm_map_size_t len, - vm_map_copy_t *copy_result, /* OUT */ - vm_prot_t *cur_prot, /* OUT */ - vm_prot_t *max_prot); +extern kern_return_t vm_map_copy_extract( + vm_map_t src_map, + vm_map_address_t src_addr, + vm_map_size_t len, + vm_map_copy_t *copy_result, /* OUT */ + vm_prot_t *cur_prot, /* OUT */ + vm_prot_t *max_prot); -extern void vm_map_disable_NX( - vm_map_t map); +extern void vm_map_disable_NX( + vm_map_t map); -extern void vm_map_disallow_data_exec( - vm_map_t map); +extern void vm_map_disallow_data_exec( + vm_map_t map); -extern void vm_map_set_64bit( - vm_map_t map); +extern void vm_map_set_64bit( + vm_map_t map); -extern void vm_map_set_32bit( - vm_map_t map); +extern void vm_map_set_32bit( + vm_map_t map); -extern void vm_map_set_jumbo( - vm_map_t map); +extern void vm_map_set_jumbo( + vm_map_t map); -extern void vm_map_set_max_addr( - vm_map_t map, vm_map_offset_t new_max_offset); +extern void vm_map_set_max_addr( + vm_map_t map, vm_map_offset_t new_max_offset); -extern boolean_t vm_map_has_hard_pagezero( - vm_map_t map, - vm_map_offset_t pagezero_size); -extern void vm_commit_pagezero_status(vm_map_t tmap); +extern boolean_t vm_map_has_hard_pagezero( + vm_map_t map, + vm_map_offset_t pagezero_size); +extern void vm_commit_pagezero_status(vm_map_t tmap); #ifdef __arm__ -static inline boolean_t vm_map_is_64bit(__unused vm_map_t map) { return 0; } +static inline boolean_t +vm_map_is_64bit(__unused vm_map_t map) +{ + return 0; +} #else -extern boolean_t vm_map_is_64bit( - vm_map_t map); +extern boolean_t vm_map_is_64bit( + vm_map_t map); #endif -extern kern_return_t vm_map_raise_max_offset( - vm_map_t map, - vm_map_offset_t new_max_offset); +extern kern_return_t vm_map_raise_max_offset( + vm_map_t map, + vm_map_offset_t new_max_offset); -extern kern_return_t vm_map_raise_min_offset( - vm_map_t map, - vm_map_offset_t new_min_offset); +extern kern_return_t vm_map_raise_min_offset( + vm_map_t map, + vm_map_offset_t new_min_offset); #if __x86_64__ extern void vm_map_set_high_start( - vm_map_t map, - vm_map_offset_t high_start); + vm_map_t map, + vm_map_offset_t high_start); #endif /* __x86_64__ */ -extern vm_map_offset_t vm_compute_max_offset( - boolean_t is64); +extern vm_map_offset_t vm_compute_max_offset( + boolean_t is64); -extern void vm_map_get_max_aslr_slide_section( - vm_map_t map, - int64_t *max_sections, - int64_t *section_size); +extern void vm_map_get_max_aslr_slide_section( + vm_map_t map, + int64_t *max_sections, + int64_t *section_size); -extern uint64_t vm_map_get_max_aslr_slide_pages( - vm_map_t map); +extern uint64_t vm_map_get_max_aslr_slide_pages( + vm_map_t map); -extern uint64_t vm_map_get_max_loader_aslr_slide_pages( - vm_map_t map); +extern uint64_t vm_map_get_max_loader_aslr_slide_pages( + vm_map_t map); -extern void vm_map_set_user_wire_limit( - vm_map_t map, - vm_size_t limit); +extern void vm_map_set_user_wire_limit( + vm_map_t map, + vm_size_t limit); extern void vm_map_switch_protect( - vm_map_t map, - boolean_t val); + vm_map_t map, + boolean_t val); extern void vm_map_iokit_mapped_region( - vm_map_t map, - vm_size_t bytes); + vm_map_t map, + vm_size_t bytes); extern void vm_map_iokit_unmapped_region( - vm_map_t map, - vm_size_t bytes); + vm_map_t map, + vm_size_t bytes); extern boolean_t first_free_is_valid(vm_map_t); -extern int vm_map_page_shift( - vm_map_t map); +extern int vm_map_page_shift( + vm_map_t map); -extern vm_map_offset_t vm_map_page_mask( - vm_map_t map); +extern vm_map_offset_t vm_map_page_mask( + vm_map_t map); -extern int vm_map_page_size( - vm_map_t map); +extern int vm_map_page_size( + vm_map_t map); -extern vm_map_offset_t vm_map_round_page_mask( - vm_map_offset_t offset, - vm_map_offset_t mask); +extern vm_map_offset_t vm_map_round_page_mask( + vm_map_offset_t offset, + vm_map_offset_t mask); -extern vm_map_offset_t vm_map_trunc_page_mask( - vm_map_offset_t offset, - vm_map_offset_t mask); +extern vm_map_offset_t vm_map_trunc_page_mask( + vm_map_offset_t offset, + vm_map_offset_t mask); -extern boolean_t vm_map_page_aligned( - vm_map_offset_t offset, - vm_map_offset_t mask); +extern boolean_t vm_map_page_aligned( + vm_map_offset_t offset, + vm_map_offset_t mask); #ifdef XNU_KERNEL_PRIVATE extern kern_return_t vm_map_page_info( - vm_map_t map, - vm_map_offset_t offset, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count); + vm_map_t map, + vm_map_offset_t offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count); extern kern_return_t vm_map_page_range_info_internal( - vm_map_t map, - vm_map_offset_t start_offset, - vm_map_offset_t end_offset, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count); + vm_map_t map, + vm_map_offset_t start_offset, + vm_map_offset_t end_offset, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count); #endif /* XNU_KERNEL_PRIVATE */ -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE /* * Macros to invoke vm_map_copyin_common. vm_map_copyin is the @@ -1506,21 +1562,21 @@ extern kern_return_t vm_map_page_range_info_internal( * BUT possible maximum access is rejected by vm_map_copyin(), but * returned by vm_map_copyin_maxprot. */ -#define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ - vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ - FALSE, copy_result, FALSE) +#define vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) \ + vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ + FALSE, copy_result, FALSE) #define vm_map_copyin_maxprot(src_map, \ - src_addr, len, src_destroy, copy_result) \ - vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ - FALSE, copy_result, TRUE) + src_addr, len, src_destroy, copy_result) \ + vm_map_copyin_common(src_map, src_addr, len, src_destroy, \ + FALSE, copy_result, TRUE) /* * Internal macros for rounding and truncation of vm_map offsets and sizes */ -#define VM_MAP_ROUND_PAGE(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) -#define VM_MAP_TRUNC_PAGE(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) +#define VM_MAP_ROUND_PAGE(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) +#define VM_MAP_TRUNC_PAGE(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) /* * Macros for rounding and truncation of vm_map offsets and sizes @@ -1528,20 +1584,21 @@ extern kern_return_t vm_map_page_range_info_internal( #define VM_MAP_PAGE_SHIFT(map) ((map) ? (map)->hdr.page_shift : PAGE_SHIFT) #define VM_MAP_PAGE_SIZE(map) (1 << VM_MAP_PAGE_SHIFT((map))) #define VM_MAP_PAGE_MASK(map) (VM_MAP_PAGE_SIZE((map)) - 1) -#define VM_MAP_PAGE_ALIGNED(x,pgmask) (((x) & (pgmask)) == 0) +#define VM_MAP_PAGE_ALIGNED(x, pgmask) (((x) & (pgmask)) == 0) -static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) -{ +static inline void +vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) +{ switch (prot) { - case MAP_MEM_NOOP: break; - case MAP_MEM_IO: *wimg = VM_WIMG_IO; break; - case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break; - case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break; - case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break; - case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break; - case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break; - default: - panic("Unrecognized mapping type %u\n", prot); + case MAP_MEM_NOOP: break; + case MAP_MEM_IO: *wimg = VM_WIMG_IO; break; + case MAP_MEM_COPYBACK: *wimg = VM_WIMG_USE_DEFAULT; break; + case MAP_MEM_INNERWBACK: *wimg = VM_WIMG_INNERWBACK; break; + case MAP_MEM_POSTED: *wimg = VM_WIMG_POSTED; break; + case MAP_MEM_WTHRU: *wimg = VM_WIMG_WTHRU; break; + case MAP_MEM_WCOMB: *wimg = VM_WIMG_WCOMB; break; + default: + panic("Unrecognized mapping type %u\n", prot); } } @@ -1551,85 +1608,85 @@ static inline void vm_prot_to_wimg(unsigned int prot, unsigned int *wimg) extern kern_return_t vm_map_set_page_shift(vm_map_t map, int pageshift); #endif /* XNU_KERNEL_PRIVATE */ -#define vm_map_round_page(x,pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) -#define vm_map_trunc_page(x,pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) +#define vm_map_round_page(x, pgmask) (((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask))) +#define vm_map_trunc_page(x, pgmask) ((vm_map_offset_t)(x) & ~((signed)(pgmask))) /* * Flags for vm_map_remove() and vm_map_delete() */ -#define VM_MAP_REMOVE_NO_FLAGS 0x0 -#define VM_MAP_REMOVE_KUNWIRE 0x1 -#define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 -#define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 -#define VM_MAP_REMOVE_SAVE_ENTRIES 0x8 -#define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10 -#define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20 -#define VM_MAP_REMOVE_NO_UNNESTING 0x40 -#define VM_MAP_REMOVE_IMMUTABLE 0x80 -#define VM_MAP_REMOVE_GAPS_OK 0x100 +#define VM_MAP_REMOVE_NO_FLAGS 0x0 +#define VM_MAP_REMOVE_KUNWIRE 0x1 +#define VM_MAP_REMOVE_INTERRUPTIBLE 0x2 +#define VM_MAP_REMOVE_WAIT_FOR_KWIRE 0x4 +#define VM_MAP_REMOVE_SAVE_ENTRIES 0x8 +#define VM_MAP_REMOVE_NO_PMAP_CLEANUP 0x10 +#define VM_MAP_REMOVE_NO_MAP_ALIGN 0x20 +#define VM_MAP_REMOVE_NO_UNNESTING 0x40 +#define VM_MAP_REMOVE_IMMUTABLE 0x80 +#define VM_MAP_REMOVE_GAPS_OK 0x100 /* Support for UPLs from vm_maps */ #ifdef XNU_KERNEL_PRIVATE extern kern_return_t vm_map_get_upl( - vm_map_t target_map, - vm_map_offset_t map_offset, - upl_size_t *size, - upl_t *upl, - upl_page_info_array_t page_info, - unsigned int *page_infoCnt, - upl_control_flags_t *flags, - vm_tag_t tag, - int force_data_sync); + vm_map_t target_map, + vm_map_offset_t map_offset, + upl_size_t *size, + upl_t *upl, + upl_page_info_array_t page_info, + unsigned int *page_infoCnt, + upl_control_flags_t *flags, + vm_tag_t tag, + int force_data_sync); #endif /* XNU_KERNEL_PRIVATE */ extern void vm_map_sizes(vm_map_t map, - vm_map_size_t * psize, - vm_map_size_t * pfree, - vm_map_size_t * plargest_free); + vm_map_size_t * psize, + vm_map_size_t * pfree, + vm_map_size_t * plargest_free); #if CONFIG_DYNAMIC_CODE_SIGNING -extern kern_return_t vm_map_sign(vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end); +extern kern_return_t vm_map_sign(vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end); #endif extern kern_return_t vm_map_partial_reap( - vm_map_t map, - unsigned int *reclaimed_resident, - unsigned int *reclaimed_compressed); + vm_map_t map, + unsigned int *reclaimed_resident, + unsigned int *reclaimed_compressed); #if DEVELOPMENT || DEBUG extern int vm_map_disconnect_page_mappings( - vm_map_t map, - boolean_t); + vm_map_t map, + boolean_t); #endif #if CONFIG_FREEZE extern kern_return_t vm_map_freeze( - vm_map_t map, - unsigned int *purgeable_count, - unsigned int *wired_count, - unsigned int *clean_count, - unsigned int *dirty_count, - unsigned int dirty_budget, - unsigned int *shared_count, - int *freezer_error_code, - boolean_t eval_only); - - -#define FREEZER_ERROR_GENERIC (-1) -#define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2) -#define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3) -#define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4) -#define FREEZER_ERROR_NO_SWAP_SPACE (-5) + vm_map_t map, + unsigned int *purgeable_count, + unsigned int *wired_count, + unsigned int *clean_count, + unsigned int *dirty_count, + unsigned int dirty_budget, + unsigned int *shared_count, + int *freezer_error_code, + boolean_t eval_only); + + +#define FREEZER_ERROR_GENERIC (-1) +#define FREEZER_ERROR_EXCESS_SHARED_MEMORY (-2) +#define FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO (-3) +#define FREEZER_ERROR_NO_COMPRESSOR_SPACE (-4) +#define FREEZER_ERROR_NO_SWAP_SPACE (-5) #endif @@ -1641,8 +1698,8 @@ __END_DECLS * a fake pointer based on the map's ledger and the index of the ledger being * reported. */ -#define INFO_MAKE_FAKE_OBJECT_ID(map,ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) +#define INFO_MAKE_FAKE_OBJECT_ID(map, ledger_id) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((int*)((map)->pmap->ledger)+(ledger_id))) + +#endif /* KERNEL_PRIVATE */ -#endif /* KERNEL_PRIVATE */ - -#endif /* _VM_VM_MAP_H_ */ +#endif /* _VM_VM_MAP_H_ */ diff --git a/osfmk/vm/vm_map_store.c b/osfmk/vm/vm_map_store.c index 8690d27ad..e4782aedb 100644 --- a/osfmk/vm/vm_map_store.c +++ b/osfmk/vm/vm_map_store.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,7 +35,7 @@ boolean_t first_free_is_valid_store( vm_map_t map ) { - return(first_free_is_valid_ll( map )); + return first_free_is_valid_ll( map ); } #endif @@ -61,15 +61,15 @@ vm_map_store_init( struct vm_map_header *hdr ) boolean_t vm_map_store_lookup_entry( - vm_map_t map, - vm_map_offset_t address, - vm_map_entry_t *entry) /* OUT */ + vm_map_t map, + vm_map_offset_t address, + vm_map_entry_t *entry) /* OUT */ { #ifdef VM_MAP_STORE_USE_LL - return (vm_map_store_lookup_entry_ll( map, address, entry )); + return vm_map_store_lookup_entry_ll( map, address, entry ); #elif defined VM_MAP_STORE_USE_RB if (vm_map_store_has_RB_support( &map->hdr )) { - return (vm_map_store_lookup_entry_rb( map, address, entry )); + return vm_map_store_lookup_entry_rb( map, address, entry ); } else { panic("VM map lookups need RB tree support.\n"); return FALSE; /* For compiler warning.*/ @@ -81,18 +81,18 @@ void vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type ) { switch (update_type) { - case VM_MAP_ENTRY_CREATE: - break; - case VM_MAP_ENTRY_DELETE: - if((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) { - (map)->first_free = vm_map_to_entry(map); - } - if((entry) == (map)->hint) { - (map)->hint = vm_map_to_entry(map); - } - break; - default: - break; + case VM_MAP_ENTRY_CREATE: + break; + case VM_MAP_ENTRY_DELETE: + if ((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) { + (map)->first_free = vm_map_to_entry(map); + } + if ((entry) == (map)->hint) { + (map)->hint = vm_map_to_entry(map); + } + break; + default: + break; } } @@ -104,7 +104,7 @@ vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type ) * some places where updating first_free is not needed & * copy maps are being modified. Also note the first argument * is the map header. - * Modifying the vm_map_store_entry_{un,}link functions to + * Modifying the vm_map_store_entry_{un,}link functions to * deal with these call sites made the interface confusing * and clunky. */ @@ -113,8 +113,9 @@ void _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry) { assert(entry->vme_start < entry->vme_end); - if (__improbable(vm_debug_events)) - DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof (lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end); + if (__improbable(vm_debug_events)) { + DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end); + } vm_map_store_entry_link_ll(mapHdr, after_where, entry); #ifdef VM_MAP_STORE_USE_RB @@ -124,24 +125,24 @@ _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_wh #endif #if MAP_ENTRY_INSERTION_DEBUG backtrace(&entry->vme_insertion_bt[0], - (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t))); + (sizeof(entry->vme_insertion_bt) / sizeof(uintptr_t))); #endif } void vm_map_store_entry_link( - vm_map_t map, - vm_map_entry_t after_where, - vm_map_entry_t entry, - vm_map_kernel_flags_t vmk_flags) + vm_map_t map, + vm_map_entry_t after_where, + vm_map_entry_t entry, + vm_map_kernel_flags_t vmk_flags) { vm_map_t VMEL_map; vm_map_entry_t VMEL_entry; VMEL_map = (map); VMEL_entry = (entry); - + _vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); - if( VMEL_map->disable_vmentry_reuse == TRUE ) { + if (VMEL_map->disable_vmentry_reuse == TRUE) { UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry); } else { update_first_free_ll(VMEL_map, VMEL_map->first_free); @@ -161,8 +162,9 @@ vm_map_store_entry_link( void _vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry) { - if (__improbable(vm_debug_events)) - DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof (lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end); + if (__improbable(vm_debug_events)) { + DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end); + } vm_map_store_entry_unlink_ll(mapHdr, entry); #ifdef VM_MAP_STORE_USE_RB @@ -182,9 +184,9 @@ vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry) VMEU_entry = (entry); if (map->holelistenabled == FALSE) { - if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start){ + if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) { VMEU_first_free = VMEU_entry->vme_prev; - } else { + } else { VMEU_first_free = VMEU_map->first_free; } } @@ -199,7 +201,7 @@ vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry) } void -vm_map_store_copy_reset( vm_map_copy_t copy,vm_map_entry_t entry) +vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry) { int nentries = copy->cpy_hdr.nentries; vm_map_store_copy_reset_ll(copy, entry, nentries); diff --git a/osfmk/vm/vm_map_store.h b/osfmk/vm/vm_map_store.h index 8a0641c70..d4712a7c3 100644 --- a/osfmk/vm/vm_map_store.h +++ b/osfmk/vm/vm_map_store.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,10 +30,10 @@ #define _VM_VM_MAP_STORE_H /* -#ifndef VM_MAP_STORE_USE_LL -#define VM_MAP_STORE_USE_LL -#endif -*/ + #ifndef VM_MAP_STORE_USE_LL + #define VM_MAP_STORE_USE_LL + #endif + */ #ifndef VM_MAP_STORE_USE_RB #define VM_MAP_STORE_USE_RB #endif @@ -52,41 +52,41 @@ struct vm_map_store { }; #ifdef VM_MAP_STORE_USE_RB - RB_HEAD( rb_head, vm_map_store ); +RB_HEAD( rb_head, vm_map_store ); #endif #include #include #include -#define UPDATE_HIGHEST_ENTRY_END(map, highest_entry) \ - MACRO_BEGIN \ - struct _vm_map* UHEE_map; \ - struct vm_map_entry* UHEE_entry; \ - UHEE_map = (map); \ - assert(UHEE_map->disable_vmentry_reuse); \ - assert(!UHEE_map->is_nested_map); \ - UHEE_entry = (highest_entry); \ - if( UHEE_map->highest_entry_end < UHEE_entry->vme_end) { \ - UHEE_map->highest_entry_end = UHEE_entry->vme_end; \ - } \ +#define UPDATE_HIGHEST_ENTRY_END(map, highest_entry) \ + MACRO_BEGIN \ + struct _vm_map* UHEE_map; \ + struct vm_map_entry* UHEE_entry; \ + UHEE_map = (map); \ + assert(UHEE_map->disable_vmentry_reuse); \ + assert(!UHEE_map->is_nested_map); \ + UHEE_entry = (highest_entry); \ + if( UHEE_map->highest_entry_end < UHEE_entry->vme_end) { \ + UHEE_map->highest_entry_end = UHEE_entry->vme_end; \ + } \ MACRO_END -#define VM_MAP_HIGHEST_ENTRY(map, entry, start) \ - MACRO_BEGIN \ - struct _vm_map* VMHE_map; \ - struct vm_map_entry* tmp_entry; \ - vm_map_offset_t VMHE_start; \ - VMHE_map = (map); \ - assert(VMHE_map->disable_vmentry_reuse); \ - assert(!VMHE_map->is_nested_map); \ - VMHE_start= VMHE_map->highest_entry_end + PAGE_SIZE_64; \ - while(vm_map_lookup_entry(VMHE_map, VMHE_start, &tmp_entry)){ \ - VMHE_map->highest_entry_end = tmp_entry->vme_end; \ - VMHE_start = VMHE_map->highest_entry_end + PAGE_SIZE_64; \ - } \ - entry = tmp_entry; \ - start = VMHE_start; \ +#define VM_MAP_HIGHEST_ENTRY(map, entry, start) \ + MACRO_BEGIN \ + struct _vm_map* VMHE_map; \ + struct vm_map_entry* tmp_entry; \ + vm_map_offset_t VMHE_start; \ + VMHE_map = (map); \ + assert(VMHE_map->disable_vmentry_reuse); \ + assert(!VMHE_map->is_nested_map); \ + VMHE_start= VMHE_map->highest_entry_end + PAGE_SIZE_64; \ + while(vm_map_lookup_entry(VMHE_map, VMHE_start, &tmp_entry)){ \ + VMHE_map->highest_entry_end = tmp_entry->vme_end; \ + VMHE_start = VMHE_map->highest_entry_end + PAGE_SIZE_64; \ + } \ + entry = tmp_entry; \ + start = VMHE_start; \ MACRO_END /* @@ -94,12 +94,12 @@ struct vm_map_store { * * Saves the specified entry as the hint for * future lookups. only a read lock is held on map, - * so make sure the store is atomic... OSCompareAndSwap + * so make sure the store is atomic... OSCompareAndSwap * guarantees this... also, we don't care if we collide * and someone else wins and stores their 'hint' */ -#define SAVE_HINT_MAP_READ(map,value) \ - MACRO_BEGIN \ +#define SAVE_HINT_MAP_READ(map, value) \ + MACRO_BEGIN \ OSCompareAndSwapPtr((map)->hint, value, &(map)->hint); \ MACRO_END @@ -109,38 +109,37 @@ struct vm_map_store { * * Saves the specified entry as the hint for * future lookups. write lock held on map, - * so no one else can be writing or looking - * until the lock is dropped, so it's safe - * to just do an assignment + * so no one else can be writing or looking + * until the lock is dropped, so it's safe + * to just do an assignment */ -#define SAVE_HINT_MAP_WRITE(map,value) \ - MACRO_BEGIN \ - (map)->hint = (value); \ +#define SAVE_HINT_MAP_WRITE(map, value) \ + MACRO_BEGIN \ + (map)->hint = (value); \ MACRO_END -#define SAVE_HINT_HOLE_WRITE(map,value) \ - MACRO_BEGIN \ - (map)->hole_hint = (value); \ +#define SAVE_HINT_HOLE_WRITE(map, value) \ + MACRO_BEGIN \ + (map)->hole_hint = (value); \ MACRO_END -#define SKIP_RB_TREE 0xBAADC0D1 +#define SKIP_RB_TREE 0xBAADC0D1 -#define VM_MAP_ENTRY_CREATE 1 -#define VM_MAP_ENTRY_DELETE 2 +#define VM_MAP_ENTRY_CREATE 1 +#define VM_MAP_ENTRY_DELETE 2 void vm_map_store_init( struct vm_map_header* ); boolean_t vm_map_store_lookup_entry( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**); -void vm_map_store_update( struct _vm_map*, struct vm_map_entry*, int); -void _vm_map_store_entry_link( struct vm_map_header *, struct vm_map_entry*, struct vm_map_entry*); -void vm_map_store_entry_link( struct _vm_map*, struct vm_map_entry*, struct vm_map_entry*, vm_map_kernel_flags_t); -void _vm_map_store_entry_unlink( struct vm_map_header *, struct vm_map_entry*); -void vm_map_store_entry_unlink( struct _vm_map*, struct vm_map_entry*); -void vm_map_store_update_first_free( struct _vm_map*, struct vm_map_entry*, boolean_t new_entry_creation); -void vm_map_store_copy_reset( struct vm_map_copy*, struct vm_map_entry*); +void vm_map_store_update( struct _vm_map*, struct vm_map_entry*, int); +void _vm_map_store_entry_link( struct vm_map_header *, struct vm_map_entry*, struct vm_map_entry*); +void vm_map_store_entry_link( struct _vm_map*, struct vm_map_entry*, struct vm_map_entry*, vm_map_kernel_flags_t); +void _vm_map_store_entry_unlink( struct vm_map_header *, struct vm_map_entry*); +void vm_map_store_entry_unlink( struct _vm_map*, struct vm_map_entry*); +void vm_map_store_update_first_free( struct _vm_map*, struct vm_map_entry*, boolean_t new_entry_creation); +void vm_map_store_copy_reset( struct vm_map_copy*, struct vm_map_entry*); #if MACH_ASSERT boolean_t first_free_is_valid_store( struct _vm_map*); #endif boolean_t vm_map_store_has_RB_support( struct vm_map_header *hdr ); #endif /* _VM_VM_MAP_STORE_H */ - diff --git a/osfmk/vm/vm_map_store_ll.c b/osfmk/vm/vm_map_store_ll.c index 5f33f8c0c..ad6ad2bdd 100644 --- a/osfmk/vm/vm_map_store_ll.c +++ b/osfmk/vm/vm_map_store_ll.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,26 +31,27 @@ boolean_t first_free_is_valid_ll( vm_map_t map ) { - vm_map_entry_t entry, next; + vm_map_entry_t entry, next; entry = vm_map_to_entry(map); next = entry->vme_next; while (vm_map_trunc_page(next->vme_start, - VM_MAP_PAGE_MASK(map)) == - vm_map_trunc_page(entry->vme_end, - VM_MAP_PAGE_MASK(map)) || - (vm_map_trunc_page(next->vme_start, - VM_MAP_PAGE_MASK(map)) == - vm_map_trunc_page(entry->vme_start, - VM_MAP_PAGE_MASK(map)) && - next != vm_map_to_entry(map))) { + VM_MAP_PAGE_MASK(map)) == + vm_map_trunc_page(entry->vme_end, + VM_MAP_PAGE_MASK(map)) || + (vm_map_trunc_page(next->vme_start, + VM_MAP_PAGE_MASK(map)) == + vm_map_trunc_page(entry->vme_start, + VM_MAP_PAGE_MASK(map)) && + next != vm_map_to_entry(map))) { entry = next; next = entry->vme_next; - if (entry == vm_map_to_entry(map)) + if (entry == vm_map_to_entry(map)) { break; + } } if (map->first_free != entry) { printf("Bad first_free for map %p: %p should be %p\n", - map, map->first_free, entry); + map, map->first_free, entry); return FALSE; } return TRUE; @@ -61,59 +62,59 @@ first_free_is_valid_ll( vm_map_t map ) * * Updates the map->first_free pointer to the * entry immediately before the first hole in the map. - * The map should be locked. + * The map should be locked. */ -#define UPDATE_FIRST_FREE_LL(map, new_first_free) \ - MACRO_BEGIN \ - if( map->disable_vmentry_reuse == FALSE){ \ - vm_map_t UFF_map; \ - vm_map_entry_t UFF_first_free; \ - vm_map_entry_t UFF_next_entry; \ - UFF_map = (map); \ - UFF_first_free = (new_first_free); \ - UFF_next_entry = UFF_first_free->vme_next; \ - while (vm_map_trunc_page(UFF_next_entry->vme_start, \ - VM_MAP_PAGE_MASK(UFF_map)) == \ - vm_map_trunc_page(UFF_first_free->vme_end, \ - VM_MAP_PAGE_MASK(UFF_map)) || \ - (vm_map_trunc_page(UFF_next_entry->vme_start, \ - VM_MAP_PAGE_MASK(UFF_map)) == \ - vm_map_trunc_page(UFF_first_free->vme_start, \ - VM_MAP_PAGE_MASK(UFF_map)) && \ - UFF_next_entry != vm_map_to_entry(UFF_map))) { \ - UFF_first_free = UFF_next_entry; \ - UFF_next_entry = UFF_first_free->vme_next; \ - if (UFF_first_free == vm_map_to_entry(UFF_map)) \ - break; \ - } \ - UFF_map->first_free = UFF_first_free; \ - assert(first_free_is_valid(UFF_map)); \ - } \ +#define UPDATE_FIRST_FREE_LL(map, new_first_free) \ + MACRO_BEGIN \ + if( map->disable_vmentry_reuse == FALSE){ \ + vm_map_t UFF_map; \ + vm_map_entry_t UFF_first_free; \ + vm_map_entry_t UFF_next_entry; \ + UFF_map = (map); \ + UFF_first_free = (new_first_free); \ + UFF_next_entry = UFF_first_free->vme_next; \ + while (vm_map_trunc_page(UFF_next_entry->vme_start, \ + VM_MAP_PAGE_MASK(UFF_map)) == \ + vm_map_trunc_page(UFF_first_free->vme_end, \ + VM_MAP_PAGE_MASK(UFF_map)) || \ + (vm_map_trunc_page(UFF_next_entry->vme_start, \ + VM_MAP_PAGE_MASK(UFF_map)) == \ + vm_map_trunc_page(UFF_first_free->vme_start, \ + VM_MAP_PAGE_MASK(UFF_map)) && \ + UFF_next_entry != vm_map_to_entry(UFF_map))) { \ + UFF_first_free = UFF_next_entry; \ + UFF_next_entry = UFF_first_free->vme_next; \ + if (UFF_first_free == vm_map_to_entry(UFF_map)) \ + break; \ + } \ + UFF_map->first_free = UFF_first_free; \ + assert(first_free_is_valid(UFF_map)); \ + } \ MACRO_END -#define _vm_map_entry_link_ll(hdr, after_where, entry) \ - MACRO_BEGIN \ - if (entry->map_aligned) { \ - assert(VM_MAP_PAGE_ALIGNED((entry->vme_start), \ - VM_MAP_HDR_PAGE_MASK((hdr))));\ - assert(VM_MAP_PAGE_ALIGNED((entry->vme_end), \ - VM_MAP_HDR_PAGE_MASK((hdr))));\ - } \ - (hdr)->nentries++; \ - (entry)->vme_prev = (after_where); \ - (entry)->vme_next = (after_where)->vme_next; \ +#define _vm_map_entry_link_ll(hdr, after_where, entry) \ + MACRO_BEGIN \ + if (entry->map_aligned) { \ + assert(VM_MAP_PAGE_ALIGNED((entry->vme_start), \ + VM_MAP_HDR_PAGE_MASK((hdr))));\ + assert(VM_MAP_PAGE_ALIGNED((entry->vme_end), \ + VM_MAP_HDR_PAGE_MASK((hdr))));\ + } \ + (hdr)->nentries++; \ + (entry)->vme_prev = (after_where); \ + (entry)->vme_next = (after_where)->vme_next; \ (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \ MACRO_END -#define _vm_map_entry_unlink_ll(hdr, entry) \ - MACRO_BEGIN \ - (hdr)->nentries--; \ - (entry)->vme_next->vme_prev = (entry)->vme_prev; \ - (entry)->vme_prev->vme_next = (entry)->vme_next; \ +#define _vm_map_entry_unlink_ll(hdr, entry) \ + MACRO_BEGIN \ + (hdr)->nentries--; \ + (entry)->vme_next->vme_prev = (entry)->vme_prev; \ + (entry)->vme_prev->vme_next = (entry)->vme_next; \ MACRO_END /* * Macro: vm_map_copy_insert - * + * * Description: * Link a copy chain ("copy") into a map at the * specified location (after "where"). @@ -122,20 +123,20 @@ first_free_is_valid_ll( vm_map_t map ) * Warning: * The arguments are evaluated multiple times. */ -#define _vm_map_copy_insert_ll(map, where, copy) \ -MACRO_BEGIN \ - vm_map_t VMCI_map; \ - vm_map_entry_t VMCI_where; \ - vm_map_copy_t VMCI_copy; \ - VMCI_map = (map); \ - VMCI_where = (where); \ - VMCI_copy = (copy); \ +#define _vm_map_copy_insert_ll(map, where, copy) \ +MACRO_BEGIN \ + vm_map_t VMCI_map; \ + vm_map_entry_t VMCI_where; \ + vm_map_copy_t VMCI_copy; \ + VMCI_map = (map); \ + VMCI_where = (where); \ + VMCI_copy = (copy); \ ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\ - ->vme_next = (VMCI_where->vme_next); \ - ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \ - ->vme_prev = VMCI_where; \ - VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \ - update_first_free_ll(VMCI_map, VMCI_map->first_free); \ + ->vme_next = (VMCI_where->vme_next); \ + ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \ + ->vme_prev = VMCI_where; \ + VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \ + update_first_free_ll(VMCI_map, VMCI_map->first_free); \ MACRO_END @@ -157,12 +158,12 @@ vm_map_store_init_ll( __unused struct vm_map_header *hdr) */ boolean_t vm_map_store_lookup_entry_ll( - vm_map_t map, - vm_map_offset_t address, - vm_map_entry_t *entry) /* OUT */ + vm_map_t map, + vm_map_offset_t address, + vm_map_entry_t *entry) /* OUT */ { - vm_map_entry_t cur; - vm_map_entry_t last; + vm_map_entry_t cur; + vm_map_entry_t last; /* * Start looking either from the head of the @@ -170,8 +171,9 @@ vm_map_store_lookup_entry_ll( */ cur = map->hint; - if (cur == vm_map_to_entry(map)) + if (cur == vm_map_to_entry(map)) { cur = cur->vme_next; + } if (address >= cur->vme_start) { /* @@ -188,10 +190,9 @@ vm_map_store_lookup_entry_ll( last = vm_map_to_entry(map); if ((cur != last) && (cur->vme_end > address)) { *entry = cur; - return(TRUE); + return TRUE; } - } - else { + } else { /* * Go from start to hint, *inclusively* */ @@ -214,7 +215,7 @@ vm_map_store_lookup_entry_ll( *entry = cur; SAVE_HINT_MAP_READ(map, cur); - return(TRUE); + return TRUE; } break; } @@ -223,7 +224,7 @@ vm_map_store_lookup_entry_ll( *entry = cur->vme_prev; SAVE_HINT_MAP_READ(map, *entry); - return(FALSE); + return FALSE; } void @@ -243,17 +244,16 @@ vm_map_store_copy_reset_ll( vm_map_copy_t copy, __unused vm_map_entry_t entry, _ { copy->cpy_hdr.nentries = 0; vm_map_copy_first_entry(copy) = - vm_map_copy_last_entry(copy) = - vm_map_copy_to_entry(copy); - + vm_map_copy_last_entry(copy) = + vm_map_copy_to_entry(copy); } void update_first_free_ll( vm_map_t map, vm_map_entry_t new_first_free) { - if (map->holelistenabled) + if (map->holelistenabled) { return; + } UPDATE_FIRST_FREE_LL( map, new_first_free); } - diff --git a/osfmk/vm/vm_map_store_ll.h b/osfmk/vm/vm_map_store_ll.h index 0c15b914d..e122db663 100644 --- a/osfmk/vm/vm_map_store_ll.h +++ b/osfmk/vm/vm_map_store_ll.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -35,9 +35,9 @@ boolean_t first_free_is_valid_ll( struct _vm_map*); void vm_map_store_init_ll( struct vm_map_header* ); boolean_t vm_map_store_lookup_entry_ll( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**); -void vm_map_store_entry_link_ll( struct vm_map_header*, struct vm_map_entry*, struct vm_map_entry*); -void vm_map_store_entry_unlink_ll( struct vm_map_header*, struct vm_map_entry*); -void update_first_free_ll(struct _vm_map*, struct vm_map_entry*); +void vm_map_store_entry_link_ll( struct vm_map_header*, struct vm_map_entry*, struct vm_map_entry*); +void vm_map_store_entry_unlink_ll( struct vm_map_header*, struct vm_map_entry*); +void update_first_free_ll(struct _vm_map*, struct vm_map_entry*); void vm_map_store_copy_reset_ll( struct vm_map_copy*, struct vm_map_entry*, int); #endif /* _VM_VM_MAP_STORE_LL_H */ diff --git a/osfmk/vm/vm_map_store_rb.c b/osfmk/vm/vm_map_store_rb.c index 9485f0cb8..c66a1446a 100644 --- a/osfmk/vm/vm_map_store_rb.c +++ b/osfmk/vm/vm_map_store_rb.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -31,7 +31,7 @@ RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare); -#define VME_FOR_STORE( store) \ +#define VME_FOR_STORE( store) \ (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links))) void @@ -40,35 +40,41 @@ vm_map_store_init_rb( struct vm_map_header* hdr ) RB_INIT(&(hdr->rb_head_store)); } -int rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent) +int +rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent) { vm_map_entry_t vme_c; vm_map_entry_t vme_p; vme_c = VME_FOR_STORE(node); vme_p = VME_FOR_STORE(parent); - if (vme_c->vme_start < vme_p->vme_start) + if (vme_c->vme_start < vme_p->vme_start) { return -1; - if (vme_c->vme_start >= vme_p->vme_end) + } + if (vme_c->vme_start >= vme_p->vme_end) { return 1; + } return 0; } -void vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry) +void +vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry) { struct vm_map_header hdr = map->hdr; struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); vm_map_entry_t cur = *vm_entry; - rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store)); - if(rb_entry == NULL) + rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store)); + if (rb_entry == NULL) { panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme); - else - panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry,entry)), VME_FOR_STORE(RB_RIGHT(rb_entry,entry))); + } else { + panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry))); + } } -boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) +boolean_t +vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) { struct vm_map_header hdr = map->hdr; struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store)); @@ -76,9 +82,10 @@ boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, v vm_map_entry_t prev = VM_MAP_ENTRY_NULL; while (rb_entry != (struct vm_map_store*)NULL) { - cur = VME_FOR_STORE(rb_entry); - if(cur == VM_MAP_ENTRY_NULL) + cur = VME_FOR_STORE(rb_entry); + if (cur == VM_MAP_ENTRY_NULL) { panic("no entry"); + } if (address >= cur->vme_start) { if (address < cur->vme_end) { *vm_entry = cur; @@ -90,33 +97,36 @@ boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, v rb_entry = RB_LEFT(rb_entry, entry); } } - if( prev == VM_MAP_ENTRY_NULL){ + if (prev == VM_MAP_ENTRY_NULL) { prev = vm_map_to_entry(map); } *vm_entry = prev; return FALSE; } -void vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry) +void +vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry) { struct rb_head *rbh = &(mapHdr->rb_head_store); struct vm_map_store *store = &(entry->store); struct vm_map_store *tmp_store; - if((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) { + if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) { panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end, - (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); + (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); } } -void vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry) +void +vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry) { struct rb_head *rbh = &(mapHdr->rb_head_store); struct vm_map_store *rb_entry; struct vm_map_store *store = &(entry->store); - - rb_entry = RB_FIND( rb_head, rbh, store); - if(rb_entry == NULL) + + rb_entry = RB_FIND( rb_head, rbh, store); + if (rb_entry == NULL) { panic("NO ENTRY TO DELETE"); + } RB_REMOVE( rb_head, rbh, store ); } @@ -126,9 +136,9 @@ vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentri struct vm_map_header *mapHdr = &(copy->cpy_hdr); struct rb_head *rbh = &(mapHdr->rb_head_store); struct vm_map_store *store; - int deleted=0; - - while (entry != vm_map_copy_to_entry(copy) && nentries > 0) { + int deleted = 0; + + while (entry != vm_map_copy_to_entry(copy) && nentries > 0) { store = &(entry->store); RB_REMOVE( rb_head, rbh, store ); entry = entry->vme_next; @@ -137,14 +147,13 @@ vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentri } } -extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ +extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ void vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry); void vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry) { - vm_map_entry_t middle_hole_entry, last_hole_entry; hole_entry->vme_end = hole_entry->vme_next->vme_end; @@ -174,13 +183,10 @@ void vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry) { if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { - if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { - map->holes_list = NULL; SAVE_HINT_HOLE_WRITE(map, NULL); } else { - vm_map_entry_t l_next, l_prev; l_next = (vm_map_entry_t) map->holes_list->next; @@ -193,7 +199,6 @@ vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry) SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next); } } else { - SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev); hole_entry->vme_prev->vme_next = hole_entry->vme_next; @@ -214,11 +219,10 @@ vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry) static void check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry) { - vm_map_entry_t hole_entry, next_hole_entry; - vm_map_entry_t map_entry, next_map_entry; + vm_map_entry_t hole_entry, next_hole_entry; + vm_map_entry_t map_entry, next_map_entry; if (map->holes_list == NULL) { - return; } @@ -228,23 +232,24 @@ check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry) map_entry = vm_map_first_entry(map); next_map_entry = map_entry->vme_next; - while(map_entry->vme_start > hole_entry->vme_start) { + while (map_entry->vme_start > hole_entry->vme_start) { hole_entry = next_hole_entry; next_hole_entry = hole_entry->vme_next; - if (hole_entry == (vm_map_entry_t)map->holes_list) + if (hole_entry == (vm_map_entry_t)map->holes_list) { break; + } } while (map_entry != vm_map_to_entry(map)) { - - if (map_entry->vme_start >= map->max_offset) + if (map_entry->vme_start >= map->max_offset) { break; + } if (map_entry->vme_end != map_entry->vme_next->vme_start) { - - if (map_entry->vme_next == vm_map_to_entry(map)) + if (map_entry->vme_next == vm_map_to_entry(map)) { break; + } if (hole_entry->vme_start != map_entry->vme_end) { panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry); @@ -259,8 +264,9 @@ check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry) hole_entry = next_hole_entry; next_hole_entry = hole_entry->vme_next; - if (hole_entry == (vm_map_entry_t)map->holes_list) + if (hole_entry == (vm_map_entry_t)map->holes_list) { break; + } } map_entry = map_entry->vme_next; @@ -289,38 +295,32 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) * Dealing with the deletion of an older entry. */ - vm_map_entry_t hole_entry, next_hole_entry; + vm_map_entry_t hole_entry, next_hole_entry; #if DEBUG - struct vm_map_entry old_hole_entry; + struct vm_map_entry old_hole_entry; #endif /* DEBUG */ - boolean_t create_new_hole = TRUE; + boolean_t create_new_hole = TRUE; hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint); if (hole_entry) { - if (hole_entry->vme_end == old_entry->vme_start) { /* * Found a hole right after above our entry. * Hit. */ - } else if (hole_entry->vme_start == old_entry->vme_end) { - if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { - /* * Found a hole right after below our entry but * make sure we don't erroneously extend backwards. - * + * * Hit. */ hole_entry = hole_entry->vme_prev; } - } else if (hole_entry->vme_start > old_entry->vme_end) { - /* * Useless hint. Start from the top. */ @@ -331,29 +331,27 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { if (hole_entry->vme_start > old_entry->vme_start) { panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n", - (unsigned long long)hole_entry->vme_start, - (unsigned long long)old_entry->vme_start, - (unsigned long long)map->holes_list->start, - (unsigned long long)map->hole_hint->start); + (unsigned long long)hole_entry->vme_start, + (unsigned long long)old_entry->vme_start, + (unsigned long long)map->holes_list->start, + (unsigned long long)map->hole_hint->start); } if (hole_entry->vme_end > old_entry->vme_start) { panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n", - (unsigned long long)hole_entry->vme_end, - (unsigned long long)old_entry->vme_start, - (unsigned long long)map->holes_list->start, - (unsigned long long)map->hole_hint->start); + (unsigned long long)hole_entry->vme_end, + (unsigned long long)old_entry->vme_start, + (unsigned long long)map->holes_list->start, + (unsigned long long)map->hole_hint->start); } } while (1) { - next_hole_entry = hole_entry->vme_next; /* * Hole is right above the entry. */ if (hole_entry->vme_end == old_entry->vme_start) { - #if DEBUG copy_hole_info(hole_entry, &old_hole_entry); #endif /* DEBUG */ @@ -364,10 +362,8 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) */ if (old_entry->vme_end == hole_entry->vme_next->vme_start) { - vm_map_combine_hole(map, hole_entry); } else { - hole_entry->vme_end = old_entry->vme_end; } create_new_hole = FALSE; @@ -381,7 +377,6 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) * Hole is right below the entry. */ if (hole_entry->vme_start == old_entry->vme_end) { - #if DEBUG copy_hole_info(hole_entry, &old_hole_entry); #endif /* DEBUG */ @@ -401,7 +396,6 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) * new hole that will be needed. */ if (hole_entry->vme_start > old_entry->vme_end) { - #if DEBUG copy_hole_info(hole_entry, &old_hole_entry); #endif /* DEBUG */ @@ -423,8 +417,8 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) } if (create_new_hole) { - struct vm_map_links *new_hole_entry = NULL; - vm_map_entry_t l_next, l_prev; + struct vm_map_links *new_hole_entry = NULL; + vm_map_entry_t l_next, l_prev; new_hole_entry = zalloc(vm_map_holes_zone); @@ -434,13 +428,10 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) * A hole that is located above the current first hole in the map? */ if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) { - if (map->holes_list == NULL) { - map->holes_list = new_hole_entry; new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list); } else { - l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list); l_prev = map->holes_list->prev; map->holes_list = new_hole_entry; @@ -450,7 +441,6 @@ update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); } } else { - l_next = hole_entry->vme_next; l_prev = hole_entry->vme_next->vme_prev; @@ -483,12 +473,11 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry); void update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) { - - vm_map_entry_t hole_entry, next_hole_entry; + vm_map_entry_t hole_entry, next_hole_entry; #if DEBUG - struct vm_map_entry old_hole_entry; - vm_map_entry_t tmp_entry; - boolean_t check_map_with_hole_sanity = TRUE; + struct vm_map_entry old_hole_entry; + vm_map_entry_t tmp_entry; + boolean_t check_map_with_hole_sanity = TRUE; #endif /* DEBUG */ /* @@ -507,7 +496,6 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) next_hole_entry = hole_entry->vme_next; while (1) { - #if DEBUG /* * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where @@ -520,7 +508,6 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) if (hole_entry->vme_start == new_entry->vme_start && hole_entry->vme_end == new_entry->vme_end) { - /* Case A */ #if DEBUG copy_hole_info(hole_entry, &old_hole_entry); @@ -534,27 +521,26 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) * But for copy maps, the hole is deleted before the VM entry is * linked (vm_map_store_copy_insert) and so this check is invalid. * - if (hole_entry == (vm_map_entry_t) map->holes_list) { - - if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) { - - next_hole_entry = vm_map_last_entry(map); - assert(next_hole_entry->vme_end >= map->max_offset); - } - } - */ + * if (hole_entry == (vm_map_entry_t) map->holes_list) { + * + * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) { + * + * next_hole_entry = vm_map_last_entry(map); + * assert(next_hole_entry->vme_end >= map->max_offset); + * } + * } + */ vm_map_delete_hole(map, hole_entry); #if DEBUG - if (check_map_with_hole_sanity) + if (check_map_with_hole_sanity) { check_map_sanity(map, &old_hole_entry); + } #endif /* DEBUG */ return; - } else if (hole_entry->vme_start < new_entry->vme_start && - hole_entry->vme_end > new_entry->vme_end) { - + hole_entry->vme_end > new_entry->vme_end) { /* Case B */ struct vm_map_links *new_hole_entry = NULL; @@ -577,15 +563,14 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) assert(new_hole_entry->start < new_hole_entry->end); #if DEBUG - if (check_map_with_hole_sanity) + if (check_map_with_hole_sanity) { check_map_sanity(map, &old_hole_entry); + } #endif /* DEBUG */ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); return; - } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) { - /* * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry. */ @@ -595,7 +580,6 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) #endif /* DEBUG */ if (hole_entry->vme_end <= new_entry->vme_end) { - vm_map_delete_hole(map, hole_entry); } else { hole_entry->vme_start = new_entry->vme_end; @@ -603,14 +587,13 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) } #if DEBUG - if (check_map_with_hole_sanity) + if (check_map_with_hole_sanity) { check_map_sanity(map, &old_hole_entry); + } #endif /* DEBUG */ return; - } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) { - /* * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry. */ @@ -627,8 +610,9 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) } #if DEBUG - if (check_map_with_hole_sanity) + if (check_map_with_hole_sanity) { check_map_sanity(map, &old_hole_entry); + } #endif /* DEBUG */ return; @@ -637,29 +621,27 @@ update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) hole_entry = next_hole_entry; next_hole_entry = hole_entry->vme_next; - if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) + if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { break; + } } panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n", - hole_entry->vme_prev, - (unsigned long long)hole_entry->vme_prev->vme_start, - (unsigned long long)hole_entry->vme_prev->vme_end, - hole_entry, - (unsigned long long)hole_entry->vme_start, - (unsigned long long)hole_entry->vme_end, - hole_entry->vme_next, - (unsigned long long)hole_entry->vme_next->vme_start, - (unsigned long long)hole_entry->vme_next->vme_end); - + hole_entry->vme_prev, + (unsigned long long)hole_entry->vme_prev->vme_start, + (unsigned long long)hole_entry->vme_prev->vme_end, + hole_entry, + (unsigned long long)hole_entry->vme_start, + (unsigned long long)hole_entry->vme_end, + hole_entry->vme_next, + (unsigned long long)hole_entry->vme_next->vme_start, + (unsigned long long)hole_entry->vme_next->vme_end); } void update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation) { - if (map->holelistenabled) { - /* * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map). */ @@ -694,10 +676,8 @@ update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_cre */ if (new_entry_creation) { - update_holes_on_entry_creation(map, entry); } else { - update_holes_on_entry_deletion(map, entry); } } diff --git a/osfmk/vm/vm_map_store_rb.h b/osfmk/vm/vm_map_store_rb.h index 82ac40321..2a87b2132 100644 --- a/osfmk/vm/vm_map_store_rb.h +++ b/osfmk/vm/vm_map_store_rb.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -37,9 +37,9 @@ void vm_map_store_init_rb( struct vm_map_header* ); int rb_node_compare(struct vm_map_store *, struct vm_map_store *); void vm_map_store_walk_rb( struct _vm_map*, struct vm_map_entry**, struct vm_map_entry**); boolean_t vm_map_store_lookup_entry_rb( struct _vm_map*, vm_map_offset_t, struct vm_map_entry**); -void vm_map_store_entry_link_rb( struct vm_map_header*, struct vm_map_entry*, struct vm_map_entry*); -void vm_map_store_entry_unlink_rb( struct vm_map_header*, struct vm_map_entry*); -void vm_map_store_copy_reset_rb( struct vm_map_copy*, struct vm_map_entry*, int); -void update_first_free_rb(struct _vm_map*, struct vm_map_entry*, boolean_t new_entry_creation); +void vm_map_store_entry_link_rb( struct vm_map_header*, struct vm_map_entry*, struct vm_map_entry*); +void vm_map_store_entry_unlink_rb( struct vm_map_header*, struct vm_map_entry*); +void vm_map_store_copy_reset_rb( struct vm_map_copy*, struct vm_map_entry*, int); +void update_first_free_rb(struct _vm_map*, struct vm_map_entry*, boolean_t new_entry_creation); #endif /* _VM_VM_MAP_STORE_RB_H */ diff --git a/osfmk/vm/vm_object.c b/osfmk/vm/vm_object.c index 84f0ff6e8..099fdea8d 100644 --- a/osfmk/vm/vm_object.c +++ b/osfmk/vm/vm_object.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -124,8 +124,8 @@ vm_object_tracking_init(void) int vm_object_tracking; vm_object_tracking = 1; - PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking, - sizeof (vm_object_tracking)); + PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking, + sizeof(vm_object_tracking)); if (vm_object_tracking) { vm_object_tracking_btlog = btlog_create( @@ -187,7 +187,7 @@ vm_object_tracking_init(void) * that depend on the default memory manager are called * "internal". The "pager_created" field is provided to * indicate whether these ports have ever been allocated. - * + * * The kernel may also create virtual memory objects to * hold changed pages after a copy-on-write operation. * In this case, the virtual memory object (and its @@ -212,37 +212,37 @@ vm_object_tracking_init(void) */ /* Forward declarations for internal functions. */ -static kern_return_t vm_object_terminate( - vm_object_t object); +static kern_return_t vm_object_terminate( + vm_object_t object); -static kern_return_t vm_object_copy_call( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - vm_object_t *_result_object); +static kern_return_t vm_object_copy_call( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *_result_object); -static void vm_object_do_collapse( - vm_object_t object, - vm_object_t backing_object); +static void vm_object_do_collapse( + vm_object_t object, + vm_object_t backing_object); -static void vm_object_do_bypass( - vm_object_t object, - vm_object_t backing_object); +static void vm_object_do_bypass( + vm_object_t object, + vm_object_t backing_object); -static void vm_object_release_pager( - memory_object_t pager); +static void vm_object_release_pager( + memory_object_t pager); -zone_t vm_object_zone; /* vm backing store zone */ +zone_t vm_object_zone; /* vm backing store zone */ /* * All wired-down kernel memory belongs to a single virtual * memory object (kernel_object) to avoid wasting data structures. */ -static struct vm_object kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -vm_object_t kernel_object; +static struct vm_object kernel_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_object_t kernel_object; -static struct vm_object compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -vm_object_t compressor_object = &compressor_object_store; +static struct vm_object compressor_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_object_t compressor_object = &compressor_object_store; /* * The submap object is used as a placeholder for vm_map_submap @@ -250,7 +250,7 @@ vm_object_t compressor_object = &compressor_object_store; * is exported by the vm_map module. The storage is declared * here because it must be initialized here. */ -static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* * Virtual memory objects are initialized from @@ -260,75 +260,75 @@ static struct vm_object vm_submap_object_store __attribute__((aligned(VM_PACKE * object structure, be sure to add initialization * (see _vm_object_allocate()). */ -static struct vm_object vm_object_template; +static struct vm_object vm_object_template; unsigned int vm_page_purged_wired = 0; unsigned int vm_page_purged_busy = 0; unsigned int vm_page_purged_others = 0; -static queue_head_t vm_object_cached_list; -static uint32_t vm_object_cache_pages_freed = 0; -static uint32_t vm_object_cache_pages_moved = 0; -static uint32_t vm_object_cache_pages_skipped = 0; -static uint32_t vm_object_cache_adds = 0; -static uint32_t vm_object_cached_count = 0; -static lck_mtx_t vm_object_cached_lock_data; -static lck_mtx_ext_t vm_object_cached_lock_data_ext; +static queue_head_t vm_object_cached_list; +static uint32_t vm_object_cache_pages_freed = 0; +static uint32_t vm_object_cache_pages_moved = 0; +static uint32_t vm_object_cache_pages_skipped = 0; +static uint32_t vm_object_cache_adds = 0; +static uint32_t vm_object_cached_count = 0; +static lck_mtx_t vm_object_cached_lock_data; +static lck_mtx_ext_t vm_object_cached_lock_data_ext; -static uint32_t vm_object_page_grab_failed = 0; -static uint32_t vm_object_page_grab_skipped = 0; -static uint32_t vm_object_page_grab_returned = 0; -static uint32_t vm_object_page_grab_pmapped = 0; -static uint32_t vm_object_page_grab_reactivations = 0; +static uint32_t vm_object_page_grab_failed = 0; +static uint32_t vm_object_page_grab_skipped = 0; +static uint32_t vm_object_page_grab_returned = 0; +static uint32_t vm_object_page_grab_pmapped = 0; +static uint32_t vm_object_page_grab_reactivations = 0; -#define vm_object_cache_lock_spin() \ - lck_mtx_lock_spin(&vm_object_cached_lock_data) -#define vm_object_cache_unlock() \ - lck_mtx_unlock(&vm_object_cached_lock_data) +#define vm_object_cache_lock_spin() \ + lck_mtx_lock_spin(&vm_object_cached_lock_data) +#define vm_object_cache_unlock() \ + lck_mtx_unlock(&vm_object_cached_lock_data) -static void vm_object_cache_remove_locked(vm_object_t); +static void vm_object_cache_remove_locked(vm_object_t); static void vm_object_reap(vm_object_t object); static void vm_object_reap_async(vm_object_t object); static void vm_object_reaper_thread(void); -static lck_mtx_t vm_object_reaper_lock_data; -static lck_mtx_ext_t vm_object_reaper_lock_data_ext; +static lck_mtx_t vm_object_reaper_lock_data; +static lck_mtx_ext_t vm_object_reaper_lock_data_ext; static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */ unsigned int vm_object_reap_count = 0; unsigned int vm_object_reap_count_async = 0; -#define vm_object_reaper_lock() \ - lck_mtx_lock(&vm_object_reaper_lock_data) -#define vm_object_reaper_lock_spin() \ - lck_mtx_lock_spin(&vm_object_reaper_lock_data) -#define vm_object_reaper_unlock() \ - lck_mtx_unlock(&vm_object_reaper_lock_data) +#define vm_object_reaper_lock() \ + lck_mtx_lock(&vm_object_reaper_lock_data) +#define vm_object_reaper_lock_spin() \ + lck_mtx_lock_spin(&vm_object_reaper_lock_data) +#define vm_object_reaper_unlock() \ + lck_mtx_unlock(&vm_object_reaper_lock_data) #if CONFIG_IOSCHED /* I/O Re-prioritization request list */ -queue_head_t io_reprioritize_list; -lck_spin_t io_reprioritize_list_lock; +queue_head_t io_reprioritize_list; +lck_spin_t io_reprioritize_list_lock; -#define IO_REPRIORITIZE_LIST_LOCK() \ - lck_spin_lock(&io_reprioritize_list_lock) -#define IO_REPRIORITIZE_LIST_UNLOCK() \ - lck_spin_unlock(&io_reprioritize_list_lock) +#define IO_REPRIORITIZE_LIST_LOCK() \ + lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp) +#define IO_REPRIORITIZE_LIST_UNLOCK() \ + lck_spin_unlock(&io_reprioritize_list_lock) -#define MAX_IO_REPRIORITIZE_REQS 8192 -zone_t io_reprioritize_req_zone; +#define MAX_IO_REPRIORITIZE_REQS 8192 +zone_t io_reprioritize_req_zone; /* I/O Re-prioritization thread */ int io_reprioritize_wakeup = 0; static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused); -#define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup) -#define IO_REPRIO_THREAD_CONTINUATION() \ -{ \ - assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \ - thread_block(io_reprioritize_thread); \ +#define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup) +#define IO_REPRIO_THREAD_CONTINUATION() \ +{ \ + assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \ + thread_block(io_reprioritize_thread); \ } void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int); @@ -350,12 +350,12 @@ void vm_decmp_upl_reprioritize(upl_t, int); __private_extern__ void _vm_object_allocate( - vm_object_size_t size, - vm_object_t object) + vm_object_size_t size, + vm_object_t object) { XPR(XPR_VM_OBJECT, - "vm_object_allocate, object 0x%X size 0x%X\n", - object, size, 0,0,0); + "vm_object_allocate, object 0x%X size 0x%X\n", + object, size, 0, 0, 0); *object = vm_object_template; vm_page_queue_init(&object->memq); @@ -367,42 +367,43 @@ _vm_object_allocate( #if VM_OBJECT_TRACKING_OP_CREATED if (vm_object_tracking_inited) { - void *bt[VM_OBJECT_TRACKING_BTDEPTH]; - int numsaved = 0; + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int numsaved = 0; numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); btlog_add_entry(vm_object_tracking_btlog, - object, - VM_OBJECT_TRACKING_OP_CREATED, - bt, - numsaved); + object, + VM_OBJECT_TRACKING_OP_CREATED, + bt, + numsaved); } #endif /* VM_OBJECT_TRACKING_OP_CREATED */ } __private_extern__ vm_object_t vm_object_allocate( - vm_object_size_t size) + vm_object_size_t size) { vm_object_t object; object = (vm_object_t) zalloc(vm_object_zone); - + // dbgLog(object, size, 0, 2); /* (TEST/DEBUG) */ - if (object != VM_OBJECT_NULL) + if (object != VM_OBJECT_NULL) { _vm_object_allocate(size, object); + } return object; } -lck_grp_t vm_object_lck_grp; -lck_grp_t vm_object_cache_lck_grp; -lck_grp_attr_t vm_object_lck_grp_attr; -lck_attr_t vm_object_lck_attr; -lck_attr_t kernel_object_lck_attr; -lck_attr_t compressor_object_lck_attr; +lck_grp_t vm_object_lck_grp; +lck_grp_t vm_object_cache_lck_grp; +lck_grp_attr_t vm_object_lck_grp_attr; +lck_attr_t vm_object_lck_attr; +lck_attr_t kernel_object_lck_attr; +lck_attr_t compressor_object_lck_attr; extern void vm_named_entry_init(void); @@ -416,16 +417,16 @@ int workaround_41447923 = 0; __private_extern__ void vm_object_bootstrap(void) { - vm_size_t vm_object_size; + vm_size_t vm_object_size; - assert(sizeof (mo_ipc_object_bits_t) == sizeof (ipc_object_bits_t)); + assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t)); - vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1); + vm_object_size = (sizeof(struct vm_object) + (VM_PACKED_POINTER_ALIGNMENT - 1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1); vm_object_zone = zinit(vm_object_size, - round_page(512*1024), - round_page(12*1024), - "vm objects"); + round_page(512 * 1024), + round_page(12 * 1024), + "vm objects"); zone_change(vm_object_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ zone_change(vm_object_zone, Z_NOENCRYPT, TRUE); zone_change(vm_object_zone, Z_ALIGNMENT_REQUIRED, TRUE); @@ -435,16 +436,16 @@ vm_object_bootstrap(void) queue_init(&vm_object_cached_list); lck_mtx_init_ext(&vm_object_cached_lock_data, - &vm_object_cached_lock_data_ext, - &vm_object_cache_lck_grp, - &vm_object_lck_attr); + &vm_object_cached_lock_data_ext, + &vm_object_cache_lck_grp, + &vm_object_lck_attr); queue_init(&vm_object_reaper_queue); lck_mtx_init_ext(&vm_object_reaper_lock_data, - &vm_object_reaper_lock_data_ext, - &vm_object_lck_grp, - &vm_object_lck_attr); + &vm_object_reaper_lock_data_ext, + &vm_object_lck_grp, + &vm_object_lck_attr); /* @@ -452,7 +453,7 @@ vm_object_bootstrap(void) */ /* memq; Lock; init after allocation */ - + vm_object_template.memq.prev = 0; vm_object_template.memq.next = 0; #if 0 @@ -471,9 +472,9 @@ vm_object_bootstrap(void) vm_object_template.vo_size = 0; vm_object_template.memq_hint = VM_PAGE_NULL; vm_object_template.ref_count = 1; -#if TASK_SWAPPER +#if TASK_SWAPPER vm_object_template.res_count = 1; -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ vm_object_template.resident_page_count = 0; vm_object_template.wired_page_count = 0; vm_object_template.reusable_page_count = 0; @@ -515,7 +516,7 @@ vm_object_bootstrap(void) vm_object_template.cached_list.prev = NULL; vm_object_template.cached_list.next = NULL; - + vm_object_template.last_alloc = (vm_object_offset_t) 0; vm_object_template.sequential = (vm_object_offset_t) 0; vm_object_template.pages_created = 0; @@ -546,7 +547,7 @@ vm_object_bootstrap(void) #endif /* UPL_DEBUG */ #ifdef VM_PIP_DEBUG bzero(&vm_object_template.pip_holders, - sizeof (vm_object_template.pip_holders)); + sizeof(vm_object_template.pip_holders)); #endif /* VM_PIP_DEBUG */ vm_object_template.objq.next = NULL; @@ -560,7 +561,7 @@ vm_object_bootstrap(void) vm_object_template.vo_cache_ts = 0; vm_object_template.wire_tag = VM_KERN_MEMORY_NONE; -#if ! VM_TAG_ACTIVE_UPDATE +#if !VM_TAG_ACTIVE_UPDATE vm_object_template.wired_objq.next = NULL; vm_object_template.wired_objq.prev = NULL; #endif /* ! VM_TAG_ACTIVE_UPDATE */ @@ -573,7 +574,7 @@ vm_object_bootstrap(void) #else /* CONFIG_SECLUDED_MEMORY */ vm_object_template.__object3_unused_bits = 0; #endif /* CONFIG_SECLUDED_MEMORY */ - + #if VM_OBJECT_ACCESS_TRACKING vm_object_template.access_tracking = FALSE; vm_object_template.access_tracking_reads = 0; @@ -582,10 +583,10 @@ vm_object_bootstrap(void) #if DEBUG bzero(&vm_object_template.purgeable_owner_bt[0], - sizeof (vm_object_template.purgeable_owner_bt)); + sizeof(vm_object_template.purgeable_owner_bt)); vm_object_template.vo_purgeable_volatilizer = NULL; bzero(&vm_object_template.purgeable_volatilizer_bt[0], - sizeof (vm_object_template.purgeable_volatilizer_bt)); + sizeof(vm_object_template.purgeable_volatilizer_bt)); #endif /* DEBUG */ /* @@ -595,15 +596,15 @@ vm_object_bootstrap(void) kernel_object = &kernel_object_store; /* - * Note that in the following size specifications, we need to add 1 because + * Note that in the following size specifications, we need to add 1 because * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size. */ _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, - kernel_object); + kernel_object); _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, - compressor_object); + compressor_object); kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; kernel_object->no_tag_update = TRUE; @@ -615,7 +616,7 @@ vm_object_bootstrap(void) vm_submap_object = &vm_submap_object_store; _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, - vm_submap_object); + vm_submap_object); vm_submap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE; /* @@ -627,40 +628,40 @@ vm_object_bootstrap(void) vm_named_entry_init(); - PE_parse_boot_argn("workaround_41447923", &workaround_41447923, - sizeof (workaround_41447923)); + PE_parse_boot_argn("workaround_41447923", &workaround_41447923, + sizeof(workaround_41447923)); } #if CONFIG_IOSCHED void vm_io_reprioritize_init(void) { - kern_return_t result; - thread_t thread = THREAD_NULL; + kern_return_t result; + thread_t thread = THREAD_NULL; /* Initialze the I/O reprioritization subsystem */ - lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr); - queue_init(&io_reprioritize_list); + lck_spin_init(&io_reprioritize_list_lock, &vm_object_lck_grp, &vm_object_lck_attr); + queue_init(&io_reprioritize_list); io_reprioritize_req_zone = zinit(sizeof(struct io_reprioritize_req), - MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req), - 4096, "io_reprioritize_req"); + MAX_IO_REPRIORITIZE_REQS * sizeof(struct io_reprioritize_req), + 4096, "io_reprioritize_req"); zone_change(io_reprioritize_req_zone, Z_COLLECT, FALSE); result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread); - if (result == KERN_SUCCESS) { - thread_deallocate(thread); - } else { - panic("Could not create io_reprioritize_thread"); - } + if (result == KERN_SUCCESS) { + thread_deallocate(thread); + } else { + panic("Could not create io_reprioritize_thread"); + } } #endif void vm_object_reaper_init(void) { - kern_return_t kr; - thread_t thread; + kern_return_t kr; + thread_t thread; kr = kernel_thread_start_priority( (thread_continue_t) vm_object_reaper_thread, @@ -716,15 +717,16 @@ unsigned long vm_object_deallocate_shared_swap_failures = 0; __private_extern__ void vm_object_deallocate( - vm_object_t object) + vm_object_t object) { - vm_object_t shadow = VM_OBJECT_NULL; - + vm_object_t shadow = VM_OBJECT_NULL; + // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */ // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */ - if (object == VM_OBJECT_NULL) - return; + if (object == VM_OBJECT_NULL) { + return; + } if (object == kernel_object || object == compressor_object) { vm_object_lock_shared(object); @@ -732,10 +734,11 @@ vm_object_deallocate( OSAddAtomic(-1, &object->ref_count); if (object->ref_count == 0) { - if (object == kernel_object) + if (object == kernel_object) { panic("vm_object_deallocate: losing kernel_object\n"); - else + } else { panic("vm_object_deallocate: losing compressor_object\n"); + } } vm_object_unlock(object); return; @@ -749,18 +752,18 @@ vm_object_deallocate( * we'll need to call memory_object_last_unmap(). */ } else if (object->ref_count == 2 && - object->internal && - object->shadow != VM_OBJECT_NULL) { + object->internal && + object->shadow != VM_OBJECT_NULL) { /* * This internal object's reference count is about to * drop from 2 to 1 and it has a shadow object: * we'll want to try and collapse this object with its * shadow. */ - } else if (object->ref_count >= 2) { - UInt32 original_ref_count; - volatile UInt32 *ref_count_p; - Boolean atomic_swap; + } else if (object->ref_count >= 2) { + UInt32 original_ref_count; + volatile UInt32 *ref_count_p; + Boolean atomic_swap; /* * The object currently looks like it is not being @@ -771,7 +774,7 @@ vm_object_deallocate( * Lock the object "shared" to make sure we don't race with * anyone holding it "exclusive". */ - vm_object_lock_shared(object); + vm_object_lock_shared(object); ref_count_p = (volatile UInt32 *) &object->ref_count; original_ref_count = object->ref_count; /* @@ -783,11 +786,11 @@ vm_object_deallocate( /* need to take slow path for m_o_last_unmap() */ atomic_swap = FALSE; } else if (original_ref_count == 2 && - object->internal && - object->shadow != VM_OBJECT_NULL) { + object->internal && + object->shadow != VM_OBJECT_NULL) { /* need to take slow path for vm_object_collapse() */ atomic_swap = FALSE; - } else if (original_ref_count < 2) { + } else if (original_ref_count < 2) { /* need to take slow path for vm_object_terminate() */ atomic_swap = FALSE; } else { @@ -801,7 +804,7 @@ vm_object_deallocate( /* fall back to the slow path... */ } } - + vm_object_unlock(object); if (atomic_swap) { @@ -821,7 +824,6 @@ vm_object_deallocate( } while (object != VM_OBJECT_NULL) { - vm_object_lock(object); assert(object->ref_count > 0); @@ -831,8 +833,8 @@ vm_object_deallocate( * that reference would remain, inform the pager * about the last "mapping" reference going away. */ - if ((object->ref_count == 2) && (object->named)) { - memory_object_t pager = object->pager; + if ((object->ref_count == 2) && (object->named)) { + memory_object_t pager = object->pager; /* Notify the Pager that there are no */ /* more mappers for this object */ @@ -887,7 +889,7 @@ vm_object_deallocate( */ vm_object_collapse(object, 0, FALSE); } - vm_object_unlock(object); + vm_object_unlock(object); return; } @@ -895,12 +897,12 @@ vm_object_deallocate( * We have to wait for initialization * before destroying or caching the object. */ - - if (object->pager_created && ! object->pager_initialized) { - assert(! object->can_persist); + + if (object->pager_created && !object->pager_initialized) { + assert(!object->can_persist); vm_object_assert_wait(object, - VM_OBJECT_EVENT_INITIALIZED, - THREAD_UNINT); + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); vm_object_unlock(object); thread_block(THREAD_CONTINUE_NULL); @@ -911,9 +913,9 @@ vm_object_deallocate( "vm_o_deallocate: 0x%X res %d paging_ops %d thread 0x%p ref %d\n", object, object->resident_page_count, object->paging_in_progress, - (void *)current_thread(),object->ref_count); + (void *)current_thread(), object->ref_count); - VM_OBJ_RES_DECR(object); /* XXX ? */ + VM_OBJ_RES_DECR(object); /* XXX ? */ /* * Terminate this object. If it had a shadow, * then deallocate it; otherwise, if we need @@ -925,11 +927,11 @@ vm_object_deallocate( shadow = object->pageout?VM_OBJECT_NULL:object->shadow; if (vm_object_terminate(object) != KERN_SUCCESS) { - return; + return; } if (shadow != VM_OBJECT_NULL) { - object = shadow; - continue; + object = shadow; + continue; } return; } @@ -939,11 +941,11 @@ vm_object_deallocate( vm_page_t vm_object_page_grab( - vm_object_t object) + vm_object_t object) { - vm_page_t p, next_p; - int p_limit = 0; - int p_skipped = 0; + vm_page_t p, next_p; + int p_limit = 0; + int p_skipped = 0; vm_object_lock_assert_exclusive(object); @@ -951,12 +953,12 @@ vm_object_page_grab( p_limit = MIN(50, object->resident_page_count); while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) { - p = next_p; next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); - if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) + if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) { goto move_page_in_obj; + } if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) { vm_page_lockspin_queues(); @@ -967,27 +969,28 @@ vm_object_page_grab( vm_object_page_grab_pmapped++; if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) { - refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p)); - if (refmod_state & VM_MEM_REFERENCED) + if (refmod_state & VM_MEM_REFERENCED) { p->vmp_reference = TRUE; + } if (refmod_state & VM_MEM_MODIFIED) { SET_PAGE_DIRTY(p, FALSE); } } if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { - refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); - if (refmod_state & VM_MEM_REFERENCED) + if (refmod_state & VM_MEM_REFERENCED) { p->vmp_reference = TRUE; + } if (refmod_state & VM_MEM_MODIFIED) { SET_PAGE_DIRTY(p, FALSE); } - if (p->vmp_dirty == FALSE) + if (p->vmp_dirty == FALSE) { goto take_page; + } } } if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) { @@ -998,8 +1001,8 @@ vm_object_page_grab( } vm_page_unlock_queues(); move_page_in_obj: - vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); - vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); + vm_page_queue_remove(&object->memq, p, vmp_listq); + vm_page_queue_enter(&object->memq, p, vmp_listq); p_skipped++; continue; @@ -1013,25 +1016,25 @@ take_page: vm_page_unlock_queues(); vm_page_free_prepare_object(p, TRUE); - - return (p); + + return p; } vm_object_page_grab_skipped += p_skipped; vm_object_page_grab_failed++; - return (NULL); + return NULL; } -#define EVICT_PREPARE_LIMIT 64 -#define EVICT_AGE 10 +#define EVICT_PREPARE_LIMIT 64 +#define EVICT_AGE 10 -static clock_sec_t vm_object_cache_aging_ts = 0; +static clock_sec_t vm_object_cache_aging_ts = 0; static void vm_object_cache_remove_locked( - vm_object_t object) + vm_object_t object) { assert(object->purgable == VM_PURGABLE_DENY); @@ -1044,28 +1047,30 @@ vm_object_cache_remove_locked( void vm_object_cache_remove( - vm_object_t object) + vm_object_t object) { vm_object_cache_lock_spin(); if (object->cached_list.next && - object->cached_list.prev) + object->cached_list.prev) { vm_object_cache_remove_locked(object); + } vm_object_cache_unlock(); } void vm_object_cache_add( - vm_object_t object) + vm_object_t object) { clock_sec_t sec; clock_nsec_t nsec; assert(object->purgable == VM_PURGABLE_DENY); - if (object->resident_page_count == 0) + if (object->resident_page_count == 0) { return; + } clock_get_system_nanotime(&sec, &nsec); vm_object_cache_lock_spin(); @@ -1084,33 +1089,33 @@ vm_object_cache_add( int vm_object_cache_evict( - int num_to_evict, - int max_objects_to_examine) + int num_to_evict, + int max_objects_to_examine) { - vm_object_t object = VM_OBJECT_NULL; - vm_object_t next_obj = VM_OBJECT_NULL; - vm_page_t local_free_q = VM_PAGE_NULL; - vm_page_t p; - vm_page_t next_p; - int object_cnt = 0; - vm_page_t ep_array[EVICT_PREPARE_LIMIT]; - int ep_count; - int ep_limit; - int ep_index; - int ep_freed = 0; - int ep_moved = 0; - uint32_t ep_skipped = 0; - clock_sec_t sec; - clock_nsec_t nsec; + vm_object_t object = VM_OBJECT_NULL; + vm_object_t next_obj = VM_OBJECT_NULL; + vm_page_t local_free_q = VM_PAGE_NULL; + vm_page_t p; + vm_page_t next_p; + int object_cnt = 0; + vm_page_t ep_array[EVICT_PREPARE_LIMIT]; + int ep_count; + int ep_limit; + int ep_index; + int ep_freed = 0; + int ep_moved = 0; + uint32_t ep_skipped = 0; + clock_sec_t sec; + clock_nsec_t nsec; KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0); /* - * do a couple of quick checks to see if it's + * do a couple of quick checks to see if it's * worthwhile grabbing the lock */ if (queue_empty(&vm_object_cached_list)) { KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); - return (0); + return 0; } clock_get_system_nanotime(&sec, &nsec); @@ -1120,10 +1125,10 @@ vm_object_cache_evict( */ if (sec < vm_object_cache_aging_ts) { KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0); - return (0); + return 0; } /* - * don't need the queue lock to find + * don't need the queue lock to find * and lock an object on the cached list */ vm_page_unlock_queues(); @@ -1134,12 +1139,11 @@ vm_object_cache_evict( next_obj = (vm_object_t)queue_first(&vm_object_cached_list); while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) { - object = next_obj; next_obj = (vm_object_t)queue_next(&next_obj->cached_list); assert(object->purgable == VM_PURGABLE_DENY); - + if (sec < object->vo_cache_ts) { KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0); @@ -1165,7 +1169,7 @@ vm_object_cache_evict( * the list, we'll never move past it. */ KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0); - + vm_object_cache_remove_locked(object); vm_object_unlock(object); object = VM_OBJECT_NULL; @@ -1179,8 +1183,9 @@ vm_object_cache_evict( } vm_object_cache_unlock(); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { break; + } /* * object is locked at this point and @@ -1196,27 +1201,27 @@ vm_object_cache_evict( * tenfold... and we may have a 'run' of pages we can't utilize that * needs to be skipped over... */ - if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) + if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) { ep_limit = EVICT_PREPARE_LIMIT; + } ep_count = 0; while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) { - p = next_p; next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq); object->vo_cache_pages_to_scan--; if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) { - vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); - vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); + vm_page_queue_remove(&object->memq, p, vmp_listq); + vm_page_queue_enter(&object->memq, p, vmp_listq); ep_skipped++; continue; } if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { - vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); - vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); + vm_page_queue_remove(&object->memq, p, vmp_listq); + vm_page_queue_enter(&object->memq, p, vmp_listq); pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p)); } @@ -1227,7 +1232,6 @@ vm_object_cache_evict( vm_page_lockspin_queues(); for (ep_index = 0; ep_index < ep_count; ep_index++) { - p = ep_array[ep_index]; if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) { @@ -1294,7 +1298,7 @@ vm_object_cache_evict( } /* * put the page queues lock back to the caller's - * idea of it + * idea of it */ vm_page_lock_queues(); @@ -1303,7 +1307,7 @@ vm_object_cache_evict( vm_object_cache_pages_skipped += ep_skipped; KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0); - return (ep_freed); + return ep_freed; } /* @@ -1325,12 +1329,12 @@ vm_object_cache_evict( */ static kern_return_t vm_object_terminate( - vm_object_t object) + vm_object_t object) { - vm_object_t shadow_object; + vm_object_t shadow_object; XPR(XPR_VM_OBJECT, "vm_object_terminate, object 0x%X ref %d\n", - object, object->ref_count, 0, 0, 0); + object, object->ref_count, 0, 0, 0); vm_object_lock_assert_exclusive(object); @@ -1378,8 +1382,9 @@ vm_object_terminate( if (!object->internal && object->cached_list.next && - object->cached_list.prev) + object->cached_list.prev) { vm_object_cache_remove(object); + } /* * Detach the object from its shadow if we are the shadow's @@ -1389,8 +1394,9 @@ vm_object_terminate( if (((shadow_object = object->shadow) != VM_OBJECT_NULL) && !(object->pageout)) { vm_object_lock(shadow_object); - if (shadow_object->copy == object) + if (shadow_object->copy == object) { shadow_object->copy = VM_OBJECT_NULL; + } vm_object_unlock(shadow_object); } @@ -1462,7 +1468,7 @@ void vm_object_reap( vm_object_t object) { - memory_object_t pager; + memory_object_t pager; vm_object_lock_assert_exclusive(object); assert(object->paging_in_progress == 0); @@ -1477,28 +1483,29 @@ vm_object_reap( */ if (object->internal && (object->purgable != VM_PURGABLE_DENY || - object->vo_ledger_tag)) { + object->vo_ledger_tag)) { assert(!object->alive); assert(object->terminating); vm_object_ownership_change(object, - object->vo_ledger_tag, /* unchanged */ - NULL, /* no owner */ - FALSE); /* task_objq not locked */ + object->vo_ledger_tag, /* unchanged */ + NULL, /* no owner */ + FALSE); /* task_objq not locked */ assert(object->vo_owner == NULL); } pager = object->pager; object->pager = MEMORY_OBJECT_NULL; - if (pager != MEMORY_OBJECT_NULL) + if (pager != MEMORY_OBJECT_NULL) { memory_object_control_disable(object->pager_control); + } object->ref_count--; -#if TASK_SWAPPER +#if TASK_SWAPPER assert(object->res_count == 0); -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ - assert (object->ref_count == 0); + assert(object->ref_count == 0); /* * remove from purgeable queue if it's on @@ -1523,42 +1530,41 @@ vm_object_reap( */ vm_page_lock_queues(); vm_purgeable_token_delete_first(queue); - - assert(queue->debug_count_objects>=0); + + assert(queue->debug_count_objects >= 0); vm_page_unlock_queues(); } /* * Update "vm_page_purgeable_count" in bulk and mark - * object as VM_PURGABLE_EMPTY to avoid updating + * object as VM_PURGABLE_EMPTY to avoid updating * "vm_page_purgeable_count" again in vm_page_remove() * when reaping the pages. */ unsigned int delta; assert(object->resident_page_count >= - object->wired_page_count); + object->wired_page_count); delta = (object->resident_page_count - - object->wired_page_count); + object->wired_page_count); if (delta != 0) { assert(vm_page_purgeable_count >= delta); OSAddAtomic(-delta, - (SInt32 *)&vm_page_purgeable_count); + (SInt32 *)&vm_page_purgeable_count); } if (object->wired_page_count != 0) { assert(vm_page_purgeable_wired_count >= - object->wired_page_count); + object->wired_page_count); OSAddAtomic(-object->wired_page_count, - (SInt32 *)&vm_page_purgeable_wired_count); + (SInt32 *)&vm_page_purgeable_wired_count); } object->purgable = VM_PURGABLE_EMPTY; - } - else if (object->purgable == VM_PURGABLE_NONVOLATILE || - object->purgable == VM_PURGABLE_EMPTY) { + } else if (object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_EMPTY) { /* remove from nonvolatile queue */ vm_purgeable_nonvolatile_dequeue(object); } else { panic("object %p in unexpected purgeable state 0x%x\n", - object, object->purgable); + object, object->purgable); } if (object->transposed && object->cached_list.next != NULL && @@ -1572,7 +1578,7 @@ vm_object_reap( } assert(object->cached_list.prev == NULL); } - + if (object->pageout) { /* * free all remaining pages tabled on @@ -1582,10 +1588,9 @@ vm_object_reap( assert(object->shadow != VM_OBJECT_NULL); vm_pageout_object_terminate(object); - } else if (object->resident_page_count) { /* - * free all remaining pages tabled on + * free all remaining pages tabled on * this object */ vm_object_reap_pages(object, REAP_REAP); @@ -1617,7 +1622,7 @@ vm_object_reap( #if VM_OBJECT_TRACKING if (vm_object_tracking_inited) { btlog_remove_entries_for_element(vm_object_tracking_btlog, - object); + object); } #endif /* VM_OBJECT_TRACKING */ @@ -1634,39 +1639,39 @@ unsigned int vm_max_batch = 256; #define V_O_R_MAX_BATCH 128 -#define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch) - - -#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \ - MACRO_BEGIN \ - if (_local_free_q) { \ - if (do_disconnect) { \ - vm_page_t m; \ - for (m = _local_free_q; \ - m != VM_PAGE_NULL; \ - m = m->vmp_snext) { \ - if (m->vmp_pmapped) { \ - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \ - } \ - } \ - } \ - vm_page_free_list(_local_free_q, TRUE); \ - _local_free_q = VM_PAGE_NULL; \ - } \ +#define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch) + + +#define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \ + MACRO_BEGIN \ + if (_local_free_q) { \ + if (do_disconnect) { \ + vm_page_t m; \ + for (m = _local_free_q; \ + m != VM_PAGE_NULL; \ + m = m->vmp_snext) { \ + if (m->vmp_pmapped) { \ + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \ + } \ + } \ + } \ + vm_page_free_list(_local_free_q, TRUE); \ + _local_free_q = VM_PAGE_NULL; \ + } \ MACRO_END void vm_object_reap_pages( - vm_object_t object, - int reap_type) + vm_object_t object, + int reap_type) { - vm_page_t p; - vm_page_t next; - vm_page_t local_free_q = VM_PAGE_NULL; - int loop_count; - boolean_t disconnect_on_release; - pmap_flush_context pmap_flush_context_storage; + vm_page_t p; + vm_page_t next; + vm_page_t local_free_q = VM_PAGE_NULL; + int loop_count; + boolean_t disconnect_on_release; + pmap_flush_context pmap_flush_context_storage; if (reap_type == REAP_DATA_FLUSH) { /* @@ -1684,30 +1689,29 @@ vm_object_reap_pages( */ disconnect_on_release = FALSE; } - + restart_after_sleep: - if (vm_page_queue_empty(&object->memq)) + if (vm_page_queue_empty(&object->memq)) { return; + } loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); - if (reap_type == REAP_PURGEABLE) + if (reap_type == REAP_PURGEABLE) { pmap_flush_context_init(&pmap_flush_context_storage); + } vm_page_lockspin_queues(); next = (vm_page_t)vm_page_queue_first(&object->memq); while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { - p = next; next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); if (--loop_count == 0) { - vm_page_unlock_queues(); if (local_free_q) { - if (reap_type == REAP_PURGEABLE) { pmap_flush(&pmap_flush_context_storage); pmap_flush_context_init(&pmap_flush_context_storage); @@ -1718,34 +1722,33 @@ restart_after_sleep: * hogging the page queue lock too long */ VM_OBJ_REAP_FREELIST(local_free_q, - disconnect_on_release); - } else + disconnect_on_release); + } else { mutex_pause(0); + } loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH); vm_page_lockspin_queues(); } if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) { - if (p->vmp_busy || p->vmp_cleaning) { - vm_page_unlock_queues(); /* * free the pages reclaimed so far */ VM_OBJ_REAP_FREELIST(local_free_q, - disconnect_on_release); + disconnect_on_release); PAGE_SLEEP(object, p, THREAD_UNINT); goto restart_after_sleep; } - if (p->vmp_laundry) + if (p->vmp_laundry) { vm_pageout_steal_laundry(p, TRUE); + } } switch (reap_type) { - case REAP_DATA_FLUSH: if (VM_PAGE_WIRED(p)) { /* @@ -1757,7 +1760,7 @@ restart_after_sleep: continue; } break; - + case REAP_PURGEABLE: if (VM_PAGE_WIRED(p)) { /* @@ -1766,8 +1769,9 @@ restart_after_sleep: vm_page_purged_wired++; continue; } - if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) + if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) { vm_pageout_steal_laundry(p, TRUE); + } if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) { /* @@ -1784,8 +1788,9 @@ restart_after_sleep: * sure that it gets considered by * vm_pageout_scan() later. */ - if (VM_PAGE_PAGEABLE(p)) + if (VM_PAGE_PAGEABLE(p)) { vm_page_deactivate(p); + } vm_page_purged_busy++; continue; } @@ -1817,16 +1822,16 @@ restart_after_sleep: break; } if (p->vmp_fictitious) { - assert (VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr); + assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr); break; } - if (!p->vmp_dirty && p->vmp_wpmapped) + if (!p->vmp_dirty && p->vmp_wpmapped) { p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)); + } if ((p->vmp_dirty || p->vmp_precious) && !p->vmp_error && object->alive) { - assert(!object->internal); - + p->vmp_free_when_done = TRUE; if (!p->vmp_laundry) { @@ -1842,7 +1847,7 @@ restart_after_sleep: * free the pages reclaimed so far */ VM_OBJ_REAP_FREELIST(local_free_q, - disconnect_on_release); + disconnect_on_release); vm_object_paging_wait(object, THREAD_UNINT); @@ -1867,17 +1872,18 @@ restart_after_sleep: /* * Free the remaining reclaimed pages */ - if (reap_type == REAP_PURGEABLE) + if (reap_type == REAP_PURGEABLE) { pmap_flush(&pmap_flush_context_storage); + } VM_OBJ_REAP_FREELIST(local_free_q, - disconnect_on_release); + disconnect_on_release); } void vm_object_reap_async( - vm_object_t object) + vm_object_t object) { vm_object_lock_assert_exclusive(object); @@ -1887,7 +1893,7 @@ vm_object_reap_async( /* enqueue the VM object... */ queue_enter(&vm_object_reaper_queue, object, - vm_object_t, cached_list); + vm_object_t, cached_list); vm_object_reaper_unlock(); @@ -1899,22 +1905,22 @@ vm_object_reap_async( void vm_object_reaper_thread(void) { - vm_object_t object, shadow_object; + vm_object_t object, shadow_object; vm_object_reaper_lock_spin(); while (!queue_empty(&vm_object_reaper_queue)) { queue_remove_first(&vm_object_reaper_queue, - object, - vm_object_t, - cached_list); + object, + vm_object_t, + cached_list); vm_object_reaper_unlock(); vm_object_lock(object); assert(object->terminating); assert(!object->alive); - + /* * The pageout daemon might be playing with our pages. * Now that the object is dead, it won't touch any more @@ -1924,15 +1930,15 @@ vm_object_reaper_thread(void) * itself. */ while (object->paging_in_progress != 0 || - object->activity_in_progress != 0) { + object->activity_in_progress != 0) { vm_object_wait(object, - VM_OBJECT_EVENT_PAGING_IN_PROGRESS, - THREAD_UNINT); + VM_OBJECT_EVENT_PAGING_IN_PROGRESS, + THREAD_UNINT); vm_object_lock(object); } shadow_object = - object->pageout ? VM_OBJECT_NULL : object->shadow; + object->pageout ? VM_OBJECT_NULL : object->shadow; vm_object_reap(object); /* cache is unlocked and object is no longer valid */ @@ -1965,9 +1971,8 @@ vm_object_reaper_thread(void) */ static void vm_object_release_pager( - memory_object_t pager) + memory_object_t pager) { - /* * Terminate the pager. */ @@ -1989,13 +1994,14 @@ vm_object_release_pager( */ kern_return_t vm_object_destroy( - vm_object_t object, - __unused kern_return_t reason) + vm_object_t object, + __unused kern_return_t reason) { - memory_object_t old_pager; + memory_object_t old_pager; - if (object == VM_OBJECT_NULL) - return(KERN_SUCCESS); + if (object == VM_OBJECT_NULL) { + return KERN_SUCCESS; + } /* * Remove the pager association immediately. @@ -2013,8 +2019,9 @@ vm_object_destroy( old_pager = object->pager; object->pager = MEMORY_OBJECT_NULL; - if (old_pager != MEMORY_OBJECT_NULL) + if (old_pager != MEMORY_OBJECT_NULL) { memory_object_control_disable(object->pager_control); + } /* * Wait for the existing paging activity (that got @@ -2030,7 +2037,7 @@ vm_object_destroy( if (old_pager != MEMORY_OBJECT_NULL) { vm_object_release_pager(old_pager); - /* + /* * JMM - Release the caller's reference. This assumes the * caller had a reference to release, which is a big (but * currently valid) assumption if this is driven from the @@ -2038,9 +2045,8 @@ vm_object_destroy( * this call).. */ vm_object_deallocate(object); - } - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -2064,31 +2070,31 @@ vm_object_destroy( * out with all the bits set. The macros below hide all these details from the caller. */ -#define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */ - /* be the same as the number of bits in */ - /* the chunk_state_t type. We use 64 */ - /* just for convenience. */ +#define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */ + /* be the same as the number of bits in */ + /* the chunk_state_t type. We use 64 */ + /* just for convenience. */ -#define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */ +#define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */ -typedef uint64_t chunk_state_t; +typedef uint64_t chunk_state_t; /* * The bit map uses negative logic, so we start out with all 64 bits set to indicate * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE, * then we mark pages beyond the len as having been "processed" so that we don't waste time - * looking at pages in that range. This can save us from unnecessarily chasing down the + * looking at pages in that range. This can save us from unnecessarily chasing down the * shadow chain. */ -#define CHUNK_INIT(c, len) \ - MACRO_BEGIN \ - uint64_t p; \ - \ - (c) = 0xffffffffffffffffLL; \ - \ - for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \ - MARK_PAGE_HANDLED(c, p); \ +#define CHUNK_INIT(c, len) \ + MACRO_BEGIN \ + uint64_t p; \ + \ + (c) = 0xffffffffffffffffLL; \ + \ + for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \ + MARK_PAGE_HANDLED(c, p); \ MACRO_END @@ -2096,14 +2102,14 @@ typedef uint64_t chunk_state_t; * Return true if all pages in the chunk have not yet been processed. */ -#define CHUNK_NOT_COMPLETE(c) ((c) != 0) +#define CHUNK_NOT_COMPLETE(c) ((c) != 0) /* * Return true if the page at offset 'p' in the bit map has already been handled * while processing a higher level object in the shadow chain. */ -#define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0) +#define PAGE_ALREADY_HANDLED(c, p) (((c) & (1LL << (p))) == 0) /* * Mark the page at offset 'p' in the bit map as having been processed. @@ -2122,15 +2128,14 @@ MACRO_END static boolean_t page_is_paged_out( - vm_object_t object, - vm_object_offset_t offset) + vm_object_t object, + vm_object_offset_t offset) { if (object->internal && - object->alive && - !object->terminating && - object->pager_ready) { - - if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) + object->alive && + !object->terminating && + object->pager_ready) { + if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_EXISTS) { return TRUE; } @@ -2162,24 +2167,24 @@ int madvise_free_debug = 0; static void deactivate_pages_in_object( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, boolean_t kill_page, - boolean_t reusable_page, - boolean_t all_reusable, - chunk_state_t *chunk_state, + boolean_t reusable_page, + boolean_t all_reusable, + chunk_state_t *chunk_state, pmap_flush_context *pfc, - struct pmap *pmap, - vm_map_offset_t pmap_offset) + struct pmap *pmap, + vm_map_offset_t pmap_offset) { - vm_page_t m; - int p; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; - int dw_count; - int dw_limit; - unsigned int reusable = 0; + vm_page_t m; + int p; + struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; + struct vm_page_delayed_work *dwp; + int dw_count; + int dw_limit; + unsigned int reusable = 0; /* * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the @@ -2192,23 +2197,22 @@ deactivate_pages_in_object( dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - for(p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { - + for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) { /* * If this offset has already been found and handled in a higher level object, then don't * do anything with it in the current shadow object. */ - if (PAGE_ALREADY_HANDLED(*chunk_state, p)) + if (PAGE_ALREADY_HANDLED(*chunk_state, p)) { continue; - + } + /* * See if the page at this offset is around. First check to see if the page is resident, * then if not, check the existence map or with the pager. */ - if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { - + if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { /* * We found a page we were looking for. Mark it as "handled" now in the chunk_state * so that we won't bother looking for a page at this offset again if there are more @@ -2216,12 +2220,12 @@ deactivate_pages_in_object( */ MARK_PAGE_HANDLED(*chunk_state, p); - - if (( !VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) && + + if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) && (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) { - int clear_refmod; - int pmap_options; - + int clear_refmod; + int pmap_options; + dwp->dw_mask = 0; pmap_options = 0; @@ -2237,8 +2241,8 @@ deactivate_pages_in_object( */ pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m)); } - m->vmp_precious = FALSE; - m->vmp_dirty = FALSE; + m->vmp_precious = FALSE; + m->vmp_dirty = FALSE; clear_refmod |= VM_MEM_MODIFIED; if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { @@ -2266,26 +2270,28 @@ deactivate_pages_in_object( * "reusable" (to update pmap * stats for all mappings). */ - pmap_options |= PMAP_OPTIONS_SET_REUSABLE; + pmap_options |= PMAP_OPTIONS_SET_REUSABLE; } } pmap_options |= PMAP_OPTIONS_NOFLUSH; pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), - clear_refmod, - pmap_options, - (void *)pfc); + clear_refmod, + pmap_options, + (void *)pfc); - if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable)) + if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !(reusable_page || all_reusable)) { dwp->dw_mask |= DW_move_page; - - if (dwp->dw_mask) + } + + if (dwp->dw_mask) { VM_PAGE_ADD_DELAYED_WORK(dwp, m, - dw_count); + dw_count); + } if (dw_count >= dw_limit) { if (reusable) { OSAddAtomic(reusable, - &vm_page_stats_reusable.reusable_count); + &vm_page_stats_reusable.reusable_count); vm_page_stats_reusable.reusable += reusable; reusable = 0; } @@ -2295,9 +2301,7 @@ deactivate_pages_in_object( dw_count = 0; } } - } else { - /* * The page at this offset isn't memory resident, check to see if it's * been paged out. If so, mark it as handled so we don't bother looking @@ -2308,12 +2312,11 @@ deactivate_pages_in_object( MARK_PAGE_HANDLED(*chunk_state, p); /* - * If we're killing a non-resident page, then clear the page in the existence + * If we're killing a non-resident page, then clear the page in the existence * map so we don't bother paging it back in if it's touched again in the future. */ if ((kill_page) && (object->internal)) { - VM_COMPRESSOR_PAGER_STATE_CLR(object, offset); if (pmap != PMAP_NULL) { @@ -2328,7 +2331,7 @@ deactivate_pages_in_object( pmap, pmap_offset, (pmap_offset + - PAGE_SIZE), + PAGE_SIZE), PMAP_OPTIONS_REMOVE); } } @@ -2338,12 +2341,13 @@ deactivate_pages_in_object( if (reusable) { OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count); - vm_page_stats_reusable.reusable += reusable; + vm_page_stats_reusable.reusable += reusable; reusable = 0; } - - if (dw_count) + + if (dw_count) { vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + } } @@ -2358,20 +2362,20 @@ deactivate_pages_in_object( static vm_object_size_t deactivate_a_chunk( - vm_object_t orig_object, - vm_object_offset_t offset, - vm_object_size_t size, + vm_object_t orig_object, + vm_object_offset_t offset, + vm_object_size_t size, boolean_t kill_page, - boolean_t reusable_page, - boolean_t all_reusable, + boolean_t reusable_page, + boolean_t all_reusable, pmap_flush_context *pfc, - struct pmap *pmap, - vm_map_offset_t pmap_offset) + struct pmap *pmap, + vm_map_offset_t pmap_offset) { - vm_object_t object; - vm_object_t tmp_object; - vm_object_size_t length; - chunk_state_t chunk_state; + vm_object_t object; + vm_object_t tmp_object; + vm_object_size_t length; + chunk_state_t chunk_state; /* @@ -2415,18 +2419,20 @@ deactivate_a_chunk( kill_page = FALSE; reusable_page = FALSE; all_reusable = FALSE; - offset += object->vo_shadow_offset; - vm_object_lock(tmp_object); + offset += object->vo_shadow_offset; + vm_object_lock(tmp_object); } - if (object != orig_object) - vm_object_unlock(object); + if (object != orig_object) { + vm_object_unlock(object); + } object = tmp_object; } - if (object && object != orig_object) - vm_object_unlock(object); + if (object && object != orig_object) { + vm_object_unlock(object); + } return length; } @@ -2441,21 +2447,21 @@ deactivate_a_chunk( __private_extern__ void vm_object_deactivate_pages( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, boolean_t kill_page, - boolean_t reusable_page, - struct pmap *pmap, - vm_map_offset_t pmap_offset) + boolean_t reusable_page, + struct pmap *pmap, + vm_map_offset_t pmap_offset) { - vm_object_size_t length; - boolean_t all_reusable; - pmap_flush_context pmap_flush_context_storage; + vm_object_size_t length; + boolean_t all_reusable; + pmap_flush_context pmap_flush_context_storage; /* * We break the range up into chunks and do one chunk at a time. This is for - * efficiency and performance while handling the shadow chains and the locks. + * efficiency and performance while handling the shadow chains and the locks. * The deactivate_a_chunk() function returns how much of the range it processed. * We keep calling this routine until the given size is exhausted. */ @@ -2464,7 +2470,7 @@ vm_object_deactivate_pages( all_reusable = FALSE; #if 11 /* - * For the sake of accurate "reusable" pmap stats, we need + * For the sake of accurate "reusable" pmap stats, we need * to tell pmap about each page that is no longer "reusable", * so we can't do the "all_reusable" optimization. */ @@ -2480,7 +2486,7 @@ vm_object_deactivate_pages( #endif if ((reusable_page || all_reusable) && object->all_reusable) { - /* This means MADV_FREE_REUSABLE has been called twice, which + /* This means MADV_FREE_REUSABLE has been called twice, which * is probably illegal. */ return; } @@ -2505,7 +2511,7 @@ vm_object_deactivate_pages( /* update global stats */ reusable = object->resident_page_count; OSAddAtomic(reusable, - &vm_page_stats_reusable.reusable_count); + &vm_page_stats_reusable.reusable_count); vm_page_stats_reusable.reusable += reusable; vm_page_stats_reusable.all_reusable_calls++; } @@ -2516,37 +2522,37 @@ vm_object_deactivate_pages( void vm_object_reuse_pages( - vm_object_t object, - vm_object_offset_t start_offset, - vm_object_offset_t end_offset, - boolean_t allow_partial_reuse) + vm_object_t object, + vm_object_offset_t start_offset, + vm_object_offset_t end_offset, + boolean_t allow_partial_reuse) { - vm_object_offset_t cur_offset; - vm_page_t m; - unsigned int reused, reusable; - -#define VM_OBJECT_REUSE_PAGE(object, m, reused) \ - MACRO_BEGIN \ - if ((m) != VM_PAGE_NULL && \ - (m)->vmp_reusable) { \ - assert((object)->reusable_page_count <= \ - (object)->resident_page_count); \ - assert((object)->reusable_page_count > 0); \ - (object)->reusable_page_count--; \ - (m)->vmp_reusable = FALSE; \ - (reused)++; \ - /* \ - * Tell pmap that this page is no longer \ - * "reusable", to update the "reusable" stats \ - * for all the pmaps that have mapped this \ - * page. \ - */ \ - pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \ - 0, /* refmod */ \ - (PMAP_OPTIONS_CLEAR_REUSABLE \ - | PMAP_OPTIONS_NOFLUSH), \ - NULL); \ - } \ + vm_object_offset_t cur_offset; + vm_page_t m; + unsigned int reused, reusable; + +#define VM_OBJECT_REUSE_PAGE(object, m, reused) \ + MACRO_BEGIN \ + if ((m) != VM_PAGE_NULL && \ + (m)->vmp_reusable) { \ + assert((object)->reusable_page_count <= \ + (object)->resident_page_count); \ + assert((object)->reusable_page_count > 0); \ + (object)->reusable_page_count--; \ + (m)->vmp_reusable = FALSE; \ + (reused)++; \ + /* \ + * Tell pmap that this page is no longer \ + * "reusable", to update the "reusable" stats \ + * for all the pmaps that have mapped this \ + * page. \ + */ \ + pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \ + 0, /* refmod */ \ + (PMAP_OPTIONS_CLEAR_REUSABLE \ + | PMAP_OPTIONS_NOFLUSH), \ + NULL); \ + } \ MACRO_END reused = 0; @@ -2556,7 +2562,7 @@ vm_object_reuse_pages( if (object->all_reusable) { panic("object %p all_reusable: can't update pmap stats\n", - object); + object); assert(object->reusable_page_count == 0); object->all_reusable = FALSE; if (end_offset - start_offset == object->vo_size || @@ -2565,7 +2571,7 @@ vm_object_reuse_pages( reused = object->resident_page_count; } else { vm_page_stats_reusable.partial_reuse_calls++; - vm_page_queue_iterate(&object->memq, m, vm_page_t, vmp_listq) { + vm_page_queue_iterate(&object->memq, m, vmp_listq) { if (m->vmp_offset < start_offset || m->vmp_offset >= end_offset) { m->vmp_reusable = TRUE; @@ -2579,11 +2585,11 @@ vm_object_reuse_pages( } } } else if (object->resident_page_count > - ((end_offset - start_offset) >> PAGE_SHIFT)) { + ((end_offset - start_offset) >> PAGE_SHIFT)) { vm_page_stats_reusable.partial_reuse_calls++; for (cur_offset = start_offset; - cur_offset < end_offset; - cur_offset += PAGE_SIZE_64) { + cur_offset < end_offset; + cur_offset += PAGE_SIZE_64) { if (object->reusable_page_count == 0) { break; } @@ -2592,7 +2598,7 @@ vm_object_reuse_pages( } } else { vm_page_stats_reusable.partial_reuse_calls++; - vm_page_queue_iterate(&object->memq, m, vm_page_t, vmp_listq) { + vm_page_queue_iterate(&object->memq, m, vmp_listq) { if (object->reusable_page_count == 0) { break; } @@ -2605,7 +2611,7 @@ vm_object_reuse_pages( } /* update global stats */ - OSAddAtomic(reusable-reused, &vm_page_stats_reusable.reusable_count); + OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count); vm_page_stats_reusable.reused += reused; vm_page_stats_reusable.reusable += reusable; } @@ -2627,7 +2633,7 @@ vm_object_reuse_pages( * remove access to all pages in shadowed objects. * * The object must *not* be locked. The object must - * be internal. + * be internal. * * If pmap is not NULL, this routine assumes that * the only mappings for the pages are in that @@ -2636,32 +2642,33 @@ vm_object_reuse_pages( __private_extern__ void vm_object_pmap_protect( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - pmap_t pmap, - vm_map_offset_t pmap_start, - vm_prot_t prot) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot) { vm_object_pmap_protect_options(object, offset, size, - pmap, pmap_start, prot, 0); + pmap, pmap_start, prot, 0); } __private_extern__ void vm_object_pmap_protect_options( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - pmap_t pmap, - vm_map_offset_t pmap_start, - vm_prot_t prot, - int options) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot, + int options) { - pmap_flush_context pmap_flush_context_storage; - boolean_t delayed_pmap_flush = FALSE; + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return; + } size = vm_object_round_page(size); offset = vm_object_trunc_page(offset); @@ -2671,11 +2678,11 @@ vm_object_pmap_protect_options( if (pmap != NULL) { vm_object_unlock(object); pmap_protect_options(pmap, - pmap_start, - pmap_start + size, - prot, - options & ~PMAP_OPTIONS_NOFLUSH, - NULL); + pmap_start, + pmap_start + size, + prot, + options & ~PMAP_OPTIONS_NOFLUSH, + NULL); } else { vm_object_offset_t phys_start, phys_end, phys_addr; @@ -2689,8 +2696,8 @@ vm_object_pmap_protect_options( delayed_pmap_flush = FALSE; for (phys_addr = phys_start; - phys_addr < phys_end; - phys_addr += PAGE_SIZE_64) { + phys_addr < phys_end; + phys_addr += PAGE_SIZE_64) { pmap_page_protect_options( (ppnum_t) (phys_addr >> PAGE_SHIFT), prot, @@ -2698,8 +2705,9 @@ vm_object_pmap_protect_options( (void *)&pmap_flush_context_storage); delayed_pmap_flush = TRUE; } - if (delayed_pmap_flush == TRUE) + if (delayed_pmap_flush == TRUE) { pmap_flush(&pmap_flush_context_storage); + } } return; } @@ -2707,117 +2715,116 @@ vm_object_pmap_protect_options( assert(object->internal); while (TRUE) { - if (ptoa_64(object->resident_page_count) > size/2 && pmap != PMAP_NULL) { - vm_object_unlock(object); - pmap_protect_options(pmap, pmap_start, pmap_start + size, prot, - options & ~PMAP_OPTIONS_NOFLUSH, NULL); - return; - } - - pmap_flush_context_init(&pmap_flush_context_storage); - delayed_pmap_flush = FALSE; - - /* - * if we are doing large ranges with respect to resident - * page count then we should interate over pages otherwise - * inverse page look-up will be faster - */ - if (ptoa_64(object->resident_page_count / 4) < size) { - vm_page_t p; - vm_object_offset_t end; - - end = offset + size; - - vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { - if (!p->vmp_fictitious && (offset <= p->vmp_offset) && (p->vmp_offset < end)) { - vm_map_offset_t start; - - start = pmap_start + p->vmp_offset - offset; - - if (pmap != PMAP_NULL) - pmap_protect_options( - pmap, - start, - start + PAGE_SIZE_64, - prot, - options | PMAP_OPTIONS_NOFLUSH, - &pmap_flush_context_storage); - else - pmap_page_protect_options( - VM_PAGE_GET_PHYS_PAGE(p), - prot, - options | PMAP_OPTIONS_NOFLUSH, - &pmap_flush_context_storage); + if (ptoa_64(object->resident_page_count) > size / 2 && pmap != PMAP_NULL) { + vm_object_unlock(object); + pmap_protect_options(pmap, pmap_start, pmap_start + size, prot, + options & ~PMAP_OPTIONS_NOFLUSH, NULL); + return; + } + + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + + /* + * if we are doing large ranges with respect to resident + * page count then we should interate over pages otherwise + * inverse page look-up will be faster + */ + if (ptoa_64(object->resident_page_count / 4) < size) { + vm_page_t p; + vm_object_offset_t end; + + end = offset + size; + + vm_page_queue_iterate(&object->memq, p, vmp_listq) { + if (!p->vmp_fictitious && (offset <= p->vmp_offset) && (p->vmp_offset < end)) { + vm_map_offset_t start; + + start = pmap_start + p->vmp_offset - offset; + + if (pmap != PMAP_NULL) { + pmap_protect_options( + pmap, + start, + start + PAGE_SIZE_64, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + } else { + pmap_page_protect_options( + VM_PAGE_GET_PHYS_PAGE(p), + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + } delayed_pmap_flush = TRUE; + } } - } + } else { + vm_page_t p; + vm_object_offset_t end; + vm_object_offset_t target_off; - } else { - vm_page_t p; - vm_object_offset_t end; - vm_object_offset_t target_off; + end = offset + size; - end = offset + size; + for (target_off = offset; + target_off < end; target_off += PAGE_SIZE) { + p = vm_page_lookup(object, target_off); - for (target_off = offset; - target_off < end; target_off += PAGE_SIZE) { + if (p != VM_PAGE_NULL) { + vm_object_offset_t start; - p = vm_page_lookup(object, target_off); + start = pmap_start + (p->vmp_offset - offset); - if (p != VM_PAGE_NULL) { - vm_object_offset_t start; - - start = pmap_start + (p->vmp_offset - offset); - - if (pmap != PMAP_NULL) - pmap_protect_options( - pmap, - start, - start + PAGE_SIZE_64, - prot, - options | PMAP_OPTIONS_NOFLUSH, - &pmap_flush_context_storage); - else - pmap_page_protect_options( - VM_PAGE_GET_PHYS_PAGE(p), - prot, - options | PMAP_OPTIONS_NOFLUSH, - &pmap_flush_context_storage); + if (pmap != PMAP_NULL) { + pmap_protect_options( + pmap, + start, + start + PAGE_SIZE_64, + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + } else { + pmap_page_protect_options( + VM_PAGE_GET_PHYS_PAGE(p), + prot, + options | PMAP_OPTIONS_NOFLUSH, + &pmap_flush_context_storage); + } delayed_pmap_flush = TRUE; - } + } + } + } + if (delayed_pmap_flush == TRUE) { + pmap_flush(&pmap_flush_context_storage); } - } - if (delayed_pmap_flush == TRUE) - pmap_flush(&pmap_flush_context_storage); - if (prot == VM_PROT_NONE) { - /* - * Must follow shadow chain to remove access - * to pages in shadowed objects. - */ - vm_object_t next_object; - - next_object = object->shadow; - if (next_object != VM_OBJECT_NULL) { - offset += object->vo_shadow_offset; - vm_object_lock(next_object); - vm_object_unlock(object); - object = next_object; - } - else { - /* - * End of chain - we are done. - */ - break; - } - } - else { - /* - * Pages in shadowed objects may never have - * write permission - we may stop here. - */ - break; - } + if (prot == VM_PROT_NONE) { + /* + * Must follow shadow chain to remove access + * to pages in shadowed objects. + */ + vm_object_t next_object; + + next_object = object->shadow; + if (next_object != VM_OBJECT_NULL) { + offset += object->vo_shadow_offset; + vm_object_lock(next_object); + vm_object_unlock(object); + object = next_object; + } else { + /* + * End of chain - we are done. + */ + break; + } + } else { + /* + * Pages in shadowed objects may never have + * write permission - we may stop here. + */ + break; + } } vm_object_unlock(object); @@ -2857,14 +2864,14 @@ uint32_t vm_page_busy_absent_skipped = 0; */ __private_extern__ kern_return_t vm_object_copy_slowly( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - boolean_t interruptible, - vm_object_t *_result_object) /* OUT */ + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t interruptible, + vm_object_t *_result_object) /* OUT */ { - vm_object_t new_object; - vm_object_offset_t new_offset; + vm_object_t new_object; + vm_object_offset_t new_offset; struct vm_object_fault_info fault_info = {}; @@ -2874,7 +2881,7 @@ vm_object_copy_slowly( if (size == 0) { vm_object_unlock(src_object); *_result_object = VM_OBJECT_NULL; - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } /* @@ -2896,7 +2903,7 @@ vm_object_copy_slowly( new_object = vm_object_allocate(size); new_offset = 0; - assert(size == trunc_page_64(size)); /* Will the loop terminate? */ + assert(size == trunc_page_64(size)); /* Will the loop terminate? */ fault_info.interruptible = interruptible; fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; @@ -2904,38 +2911,37 @@ vm_object_copy_slowly( fault_info.hi_offset = src_offset + size; fault_info.stealth = TRUE; - for ( ; - size != 0 ; - src_offset += PAGE_SIZE_64, - new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64 + for (; + size != 0; + src_offset += PAGE_SIZE_64, + new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64 ) { - vm_page_t new_page; + vm_page_t new_page; vm_fault_return_t result; vm_object_lock(new_object); while ((new_page = vm_page_alloc(new_object, new_offset)) - == VM_PAGE_NULL) { - + == VM_PAGE_NULL) { vm_object_unlock(new_object); if (!vm_page_wait(interruptible)) { vm_object_deallocate(new_object); vm_object_deallocate(src_object); *_result_object = VM_OBJECT_NULL; - return(MACH_SEND_INTERRUPTED); + return MACH_SEND_INTERRUPTED; } vm_object_lock(new_object); } vm_object_unlock(new_object); do { - vm_prot_t prot = VM_PROT_READ; - vm_page_t _result_page; - vm_page_t top_page; - vm_page_t result_page; - kern_return_t error_code; - vm_object_t result_page_object; + vm_prot_t prot = VM_PROT_READ; + vm_page_t _result_page; + vm_page_t top_page; + vm_page_t result_page; + kern_return_t error_code; + vm_object_t result_page_object; vm_object_lock(src_object); @@ -2943,13 +2949,13 @@ vm_object_copy_slowly( if (src_object->internal && src_object->shadow == VM_OBJECT_NULL && (src_object->pager == NULL || - (VM_COMPRESSOR_PAGER_STATE_GET(src_object, - src_offset) == - VM_EXTERNAL_STATE_ABSENT))) { + (VM_COMPRESSOR_PAGER_STATE_GET(src_object, + src_offset) == + VM_EXTERNAL_STATE_ABSENT))) { boolean_t can_skip_page; _result_page = vm_page_lookup(src_object, - src_offset); + src_offset); if (_result_page == VM_PAGE_NULL) { /* * This page is neither resident nor @@ -2963,12 +2969,12 @@ vm_object_copy_slowly( */ can_skip_page = TRUE; } else if (workaround_41447923 && - src_object->pager == NULL && - _result_page != VM_PAGE_NULL && - _result_page->vmp_busy && - _result_page->vmp_absent && - src_object->purgable == VM_PURGABLE_DENY && - !src_object->blocked_access) { + src_object->pager == NULL && + _result_page != VM_PAGE_NULL && + _result_page->vmp_busy && + _result_page->vmp_absent && + src_object->purgable == VM_PURGABLE_DENY && + !src_object->blocked_access) { /* * This page is "busy" and "absent" * but not because we're waiting for @@ -3012,16 +3018,16 @@ vm_object_copy_slowly( } fault_info.cluster_size = cluster_size; - XPR(XPR_VM_FAULT,"vm_object_copy_slowly -> vm_fault_page",0,0,0,0,0); + XPR(XPR_VM_FAULT, "vm_object_copy_slowly -> vm_fault_page", 0, 0, 0, 0, 0); _result_page = VM_PAGE_NULL; result = vm_fault_page(src_object, src_offset, - VM_PROT_READ, FALSE, - FALSE, /* page not looked up */ - &prot, &_result_page, &top_page, - (int *)0, - &error_code, FALSE, FALSE, &fault_info); + VM_PROT_READ, FALSE, + FALSE, /* page not looked up */ + &prot, &_result_page, &top_page, + (int *)0, + &error_code, FALSE, FALSE, &fault_info); - switch(result) { + switch (result) { case VM_FAULT_SUCCESS: result_page = _result_page; result_page_object = VM_PAGE_OBJECT(result_page); @@ -3064,33 +3070,34 @@ vm_object_copy_slowly( */ vm_fault_cleanup(result_page_object, - top_page); + top_page); break; - + case VM_FAULT_RETRY: break; case VM_FAULT_MEMORY_SHORTAGE: - if (vm_page_wait(interruptible)) + if (vm_page_wait(interruptible)) { break; - /* fall thru */ + } + /* fall thru */ case VM_FAULT_INTERRUPTED: vm_object_lock(new_object); VM_PAGE_FREE(new_page); vm_object_unlock(new_object); - + vm_object_deallocate(new_object); vm_object_deallocate(src_object); *_result_object = VM_OBJECT_NULL; - return(MACH_SEND_INTERRUPTED); + return MACH_SEND_INTERRUPTED; case VM_FAULT_SUCCESS_NO_VM_PAGE: /* success but no VM page: fail */ vm_object_paging_end(src_object); vm_object_unlock(src_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: /* * A policy choice: @@ -3107,12 +3114,12 @@ vm_object_copy_slowly( vm_object_deallocate(new_object); vm_object_deallocate(src_object); *_result_object = VM_OBJECT_NULL; - return(error_code ? error_code: - KERN_MEMORY_ERROR); + return error_code ? error_code: + KERN_MEMORY_ERROR; default: panic("vm_object_copy_slowly: unexpected error" - " 0x%x from vm_fault_page()\n", result); + " 0x%x from vm_fault_page()\n", result); } } while (result != VM_FAULT_SUCCESS); } @@ -3122,7 +3129,7 @@ vm_object_copy_slowly( */ vm_object_deallocate(src_object); *_result_object = new_object; - return(KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -3145,13 +3152,13 @@ vm_object_copy_slowly( /*ARGSUSED*/ __private_extern__ boolean_t vm_object_copy_quickly( - vm_object_t *_object, /* INOUT */ - __unused vm_object_offset_t offset, /* IN */ - __unused vm_object_size_t size, /* IN */ - boolean_t *_src_needs_copy, /* OUT */ - boolean_t *_dst_needs_copy) /* OUT */ + vm_object_t *_object, /* INOUT */ + __unused vm_object_offset_t offset, /* IN */ + __unused vm_object_size_t size, /* IN */ + boolean_t *_src_needs_copy, /* OUT */ + boolean_t *_dst_needs_copy) /* OUT */ { - vm_object_t object = *_object; + vm_object_t object = *_object; memory_object_copy_strategy_t copy_strategy; XPR(XPR_VM_OBJECT, "v_o_c_quickly obj 0x%x off 0x%x size 0x%x\n", @@ -3159,7 +3166,7 @@ vm_object_copy_quickly( if (object == VM_OBJECT_NULL) { *_src_needs_copy = FALSE; *_dst_needs_copy = FALSE; - return(TRUE); + return TRUE; } vm_object_lock(object); @@ -3192,13 +3199,13 @@ vm_object_copy_quickly( case MEMORY_OBJECT_COPY_DELAY: vm_object_unlock(object); - return(FALSE); + return FALSE; default: vm_object_unlock(object); - return(FALSE); + return FALSE; } - return(TRUE); + return TRUE; } static int copy_call_count = 0; @@ -3225,15 +3232,15 @@ static int copy_call_restart_count = 0; */ static kern_return_t vm_object_copy_call( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - vm_object_t *_result_object) /* OUT */ + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *_result_object) /* OUT */ { - kern_return_t kr; - vm_object_t copy; - boolean_t check_ready = FALSE; - uint32_t try_failed_count = 0; + kern_return_t kr; + vm_object_t copy; + boolean_t check_ready = FALSE; + uint32_t try_failed_count = 0; /* * If a copy is already in progress, wait and retry. @@ -3251,7 +3258,7 @@ vm_object_copy_call( copy_call_count++; while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL, - THREAD_UNINT); + THREAD_UNINT); copy_call_restart_count++; } @@ -3274,7 +3281,7 @@ vm_object_copy_call( * via memory_object_create_copy. */ - kr = KERN_FAILURE; /* XXX need to change memory_object.defs */ + kr = KERN_FAILURE; /* XXX need to change memory_object.defs */ if (kr != KERN_SUCCESS) { return kr; } @@ -3285,7 +3292,7 @@ vm_object_copy_call( vm_object_lock(src_object); while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) { vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL, - THREAD_UNINT); + THREAD_UNINT); copy_call_sleep_count++; } Retry: @@ -3295,16 +3302,18 @@ Retry: vm_object_unlock(src_object); try_failed_count++; - mutex_pause(try_failed_count); /* wait a bit */ + mutex_pause(try_failed_count); /* wait a bit */ vm_object_lock(src_object); goto Retry; } - if (copy->vo_size < src_offset+size) - copy->vo_size = src_offset+size; + if (copy->vo_size < src_offset + size) { + copy->vo_size = src_offset + size; + } - if (!copy->pager_ready) + if (!copy->pager_ready) { check_ready = TRUE; + } /* * Return the copy. @@ -3346,24 +3355,24 @@ static int copy_delayed_protect_iterate = 0; */ __private_extern__ vm_object_t vm_object_copy_delayed( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - boolean_t src_object_shared) + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t src_object_shared) { - vm_object_t new_copy = VM_OBJECT_NULL; - vm_object_t old_copy; - vm_page_t p; - vm_object_size_t copy_size = src_offset + size; - pmap_flush_context pmap_flush_context_storage; - boolean_t delayed_pmap_flush = FALSE; + vm_object_t new_copy = VM_OBJECT_NULL; + vm_object_t old_copy; + vm_page_t p; + vm_object_size_t copy_size = src_offset + size; + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; int collisions = 0; /* * The user-level memory manager wants to see all of the changes * to this object, but it has promised not to make any changes on - * its own. + * its own. * * Perform an asymmetric copy-on-write, as follows: * Create a new object, called a "copy object" to hold @@ -3400,16 +3409,16 @@ vm_object_copy_delayed( */ copy_size = vm_object_round_page(copy_size); - Retry: - +Retry: + /* * Wait for paging in progress. */ if (!src_object->true_share && (src_object->paging_in_progress != 0 || - src_object->activity_in_progress != 0)) { - if (src_object_shared == TRUE) { - vm_object_unlock(src_object); + src_object->activity_in_progress != 0)) { + if (src_object_shared == TRUE) { + vm_object_unlock(src_object); vm_object_lock(src_object); src_object_shared = FALSE; goto Retry; @@ -3423,33 +3432,37 @@ vm_object_copy_delayed( old_copy = src_object->copy; if (old_copy != VM_OBJECT_NULL) { - int lock_granted; + int lock_granted; /* * Try to get the locks (out of order) */ - if (src_object_shared == TRUE) - lock_granted = vm_object_lock_try_shared(old_copy); - else - lock_granted = vm_object_lock_try(old_copy); + if (src_object_shared == TRUE) { + lock_granted = vm_object_lock_try_shared(old_copy); + } else { + lock_granted = vm_object_lock_try(old_copy); + } if (!lock_granted) { vm_object_unlock(src_object); - if (collisions++ == 0) + if (collisions++ == 0) { copy_delayed_lock_contention++; + } mutex_pause(collisions); /* Heisenberg Rules */ copy_delayed_lock_collisions++; - if (collisions > copy_delayed_max_collisions) + if (collisions > copy_delayed_max_collisions) { copy_delayed_max_collisions = collisions; + } - if (src_object_shared == TRUE) - vm_object_lock_shared(src_object); - else - vm_object_lock(src_object); + if (src_object_shared == TRUE) { + vm_object_lock_shared(src_object); + } else { + vm_object_lock(src_object); + } goto Retry; } @@ -3471,10 +3484,10 @@ vm_object_copy_delayed( */ if (old_copy->vo_size < copy_size) { - if (src_object_shared == TRUE) { - vm_object_unlock(old_copy); + if (src_object_shared == TRUE) { + vm_object_unlock(old_copy); vm_object_unlock(src_object); - + vm_object_lock(src_object); src_object_shared = FALSE; goto Retry; @@ -3491,9 +3504,9 @@ vm_object_copy_delayed( pmap_flush_context_init(&pmap_flush_context_storage); delayed_pmap_flush = FALSE; - vm_page_queue_iterate(&src_object->memq, p, vm_page_t, vmp_listq) { - if (!p->vmp_fictitious && - p->vmp_offset >= old_copy->vo_size && + vm_page_queue_iterate(&src_object->memq, p, vmp_listq) { + if (!p->vmp_fictitious && + p->vmp_offset >= old_copy->vo_size && p->vmp_offset < copy_size) { if (VM_PAGE_WIRED(p)) { vm_object_unlock(old_copy); @@ -3503,26 +3516,29 @@ vm_object_copy_delayed( vm_object_unlock(new_copy); vm_object_deallocate(new_copy); } - if (delayed_pmap_flush == TRUE) + if (delayed_pmap_flush == TRUE) { pmap_flush(&pmap_flush_context_storage); + } return VM_OBJECT_NULL; } else { pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE), - PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); delayed_pmap_flush = TRUE; } } } - if (delayed_pmap_flush == TRUE) + if (delayed_pmap_flush == TRUE) { pmap_flush(&pmap_flush_context_storage); + } old_copy->vo_size = copy_size; } - if (src_object_shared == TRUE) - vm_object_reference_shared(old_copy); - else - vm_object_reference_locked(old_copy); + if (src_object_shared == TRUE) { + vm_object_reference_shared(old_copy); + } else { + vm_object_reference_locked(old_copy); + } vm_object_unlock(old_copy); vm_object_unlock(src_object); @@ -3530,18 +3546,19 @@ vm_object_copy_delayed( vm_object_unlock(new_copy); vm_object_deallocate(new_copy); } - return(old_copy); + return old_copy; } - - + + /* - * Adjust the size argument so that the newly-created + * Adjust the size argument so that the newly-created * copy object will be large enough to back either the * old copy object or the new mapping. */ - if (old_copy->vo_size > copy_size) + if (old_copy->vo_size > copy_size) { copy_size = old_copy->vo_size; + } if (new_copy == VM_OBJECT_NULL) { vm_object_unlock(old_copy); @@ -3553,7 +3570,7 @@ vm_object_copy_delayed( src_object_shared = FALSE; goto Retry; } - new_copy->vo_size = copy_size; + new_copy->vo_size = copy_size; /* * The copy-object is always made large enough to @@ -3564,7 +3581,6 @@ vm_object_copy_delayed( assert((old_copy->shadow == src_object) && (old_copy->vo_shadow_offset == (vm_object_offset_t) 0)); - } else if (new_copy == VM_OBJECT_NULL) { vm_object_unlock(src_object); new_copy = vm_object_allocate(copy_size); @@ -3591,28 +3607,31 @@ vm_object_copy_delayed( pmap_flush_context_init(&pmap_flush_context_storage); delayed_pmap_flush = FALSE; - vm_page_queue_iterate(&src_object->memq, p, vm_page_t, vmp_listq) { + vm_page_queue_iterate(&src_object->memq, p, vmp_listq) { if (!p->vmp_fictitious && p->vmp_offset < copy_size) { if (VM_PAGE_WIRED(p)) { - if (old_copy) + if (old_copy) { vm_object_unlock(old_copy); + } vm_object_unlock(src_object); vm_object_unlock(new_copy); vm_object_deallocate(new_copy); - if (delayed_pmap_flush == TRUE) + if (delayed_pmap_flush == TRUE) { pmap_flush(&pmap_flush_context_storage); + } return VM_OBJECT_NULL; } else { pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE), - PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); delayed_pmap_flush = TRUE; } } } - if (delayed_pmap_flush == TRUE) + if (delayed_pmap_flush == TRUE) { pmap_flush(&pmap_flush_context_storage); + } if (old_copy != VM_OBJECT_NULL) { /* @@ -3629,7 +3648,7 @@ vm_object_copy_delayed( old_copy->shadow = new_copy; vm_object_lock_assert_exclusive(new_copy); assert(new_copy->ref_count > 0); - new_copy->ref_count++; /* for old_copy->shadow ref. */ + new_copy->ref_count++; /* for old_copy->shadow ref. */ #if TASK_SWAPPER if (old_copy->res_count) { @@ -3638,7 +3657,7 @@ vm_object_copy_delayed( } #endif - vm_object_unlock(old_copy); /* done with old_copy */ + vm_object_unlock(old_copy); /* done with old_copy */ } /* @@ -3647,7 +3666,7 @@ vm_object_copy_delayed( vm_object_lock_assert_exclusive(new_copy); new_copy->shadow = src_object; new_copy->vo_shadow_offset = 0; - new_copy->shadowed = TRUE; /* caller must set needs_copy */ + new_copy->shadowed = TRUE; /* caller must set needs_copy */ vm_object_lock_assert_exclusive(src_object); vm_object_reference_locked(src_object); @@ -3656,8 +3675,8 @@ vm_object_copy_delayed( vm_object_unlock(new_copy); XPR(XPR_VM_OBJECT, - "vm_object_copy_delayed: used copy object %X for source %X\n", - new_copy, src_object, 0, 0, 0); + "vm_object_copy_delayed: used copy object %X for source %X\n", + new_copy, src_object, 0, 0, 0); return new_copy; } @@ -3672,16 +3691,16 @@ vm_object_copy_delayed( */ __private_extern__ kern_return_t vm_object_copy_strategically( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - vm_object_t *dst_object, /* OUT */ - vm_object_offset_t *dst_offset, /* OUT */ - boolean_t *dst_needs_copy) /* OUT */ + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *dst_object, /* OUT */ + vm_object_offset_t *dst_offset, /* OUT */ + boolean_t *dst_needs_copy) /* OUT */ { - boolean_t result; - boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */ - boolean_t object_lock_shared = FALSE; + boolean_t result; + boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */ + boolean_t object_lock_shared = FALSE; memory_object_copy_strategy_t copy_strategy; assert(src_object != VM_OBJECT_NULL); @@ -3689,10 +3708,11 @@ vm_object_copy_strategically( copy_strategy = src_object->copy_strategy; if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) { - vm_object_lock_shared(src_object); + vm_object_lock_shared(src_object); object_lock_shared = TRUE; - } else - vm_object_lock(src_object); + } else { + vm_object_lock(src_object); + } /* * The copy strategy is only valid if the memory manager @@ -3703,20 +3723,20 @@ vm_object_copy_strategically( wait_result_t wait_result; if (object_lock_shared == TRUE) { - vm_object_unlock(src_object); + vm_object_unlock(src_object); vm_object_lock(src_object); object_lock_shared = FALSE; continue; } - wait_result = vm_object_sleep( src_object, - VM_OBJECT_EVENT_PAGER_READY, - interruptible); + wait_result = vm_object_sleep( src_object, + VM_OBJECT_EVENT_PAGER_READY, + interruptible); if (wait_result != THREAD_AWAKENED) { vm_object_unlock(src_object); *dst_object = VM_OBJECT_NULL; *dst_offset = 0; *dst_needs_copy = FALSE; - return(MACH_SEND_INTERRUPTED); + return MACH_SEND_INTERRUPTED; } } @@ -3725,9 +3745,9 @@ vm_object_copy_strategically( */ switch (copy_strategy) { - case MEMORY_OBJECT_COPY_DELAY: + case MEMORY_OBJECT_COPY_DELAY: *dst_object = vm_object_copy_delayed(src_object, - src_offset, size, object_lock_shared); + src_offset, size, object_lock_shared); if (*dst_object != VM_OBJECT_NULL) { *dst_offset = src_offset; *dst_needs_copy = TRUE; @@ -3735,37 +3755,37 @@ vm_object_copy_strategically( break; } vm_object_lock(src_object); - /* fall thru when delayed copy not allowed */ + /* fall thru when delayed copy not allowed */ - case MEMORY_OBJECT_COPY_NONE: + case MEMORY_OBJECT_COPY_NONE: result = vm_object_copy_slowly(src_object, src_offset, size, - interruptible, dst_object); + interruptible, dst_object); if (result == KERN_SUCCESS) { *dst_offset = 0; *dst_needs_copy = FALSE; } break; - case MEMORY_OBJECT_COPY_CALL: + case MEMORY_OBJECT_COPY_CALL: result = vm_object_copy_call(src_object, src_offset, size, - dst_object); + dst_object); if (result == KERN_SUCCESS) { *dst_offset = src_offset; *dst_needs_copy = TRUE; } break; - case MEMORY_OBJECT_COPY_SYMMETRIC: + case MEMORY_OBJECT_COPY_SYMMETRIC: XPR(XPR_VM_OBJECT, "v_o_c_strategically obj 0x%x off 0x%x size 0x%x\n", src_object, src_offset, size, 0, 0); vm_object_unlock(src_object); result = KERN_MEMORY_RESTART_COPY; break; - default: + default: panic("copy_strategically: bad strategy"); result = KERN_INVALID_ARGUMENT; } - return(result); + return result; } /* @@ -3782,17 +3802,18 @@ boolean_t vm_object_shadow_check = TRUE; __private_extern__ boolean_t vm_object_shadow( - vm_object_t *object, /* IN/OUT */ - vm_object_offset_t *offset, /* IN/OUT */ - vm_object_size_t length) + vm_object_t *object, /* IN/OUT */ + vm_object_offset_t *offset, /* IN/OUT */ + vm_object_size_t length) { - vm_object_t source; - vm_object_t result; + vm_object_t source; + vm_object_t result; source = *object; assert(source != VM_OBJECT_NULL); - if (source == VM_OBJECT_NULL) + if (source == VM_OBJECT_NULL) { return FALSE; + } #if 0 /* @@ -3826,17 +3847,22 @@ vm_object_shadow( if (vm_object_shadow_check && source->vo_size == length && - source->ref_count == 1 && - (source->shadow == VM_OBJECT_NULL || - source->shadow->copy == VM_OBJECT_NULL) ) - { - /* lock the object and check again */ + source->ref_count == 1) { + /* + * Lock the object and check again. + * We also check to see if there's + * a shadow or copy object involved. + * We can't do that earlier because + * without the object locked, there + * could be a collapse and the chain + * gets modified leaving us with an + * invalid pointer. + */ vm_object_lock(source); if (source->vo_size == length && source->ref_count == 1 && (source->shadow == VM_OBJECT_NULL || - source->shadow->copy == VM_OBJECT_NULL)) - { + source->shadow->copy == VM_OBJECT_NULL)) { source->shadowed = FALSE; vm_object_unlock(source); return FALSE; @@ -3849,8 +3875,9 @@ vm_object_shadow( * Allocate a new object with the given length */ - if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) + if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) { panic("vm_object_shadow: no object for shadowing"); + } /* * The new object shadows the source object, adding @@ -3860,7 +3887,7 @@ vm_object_shadow( * count. */ result->shadow = source; - + /* * Store the offset into the source object, * and fix up the offset into the new object. @@ -3962,10 +3989,10 @@ vm_object_shadow( */ vm_object_t vm_object_memory_object_associate( - memory_object_t pager, - vm_object_t object, - vm_object_size_t size, - boolean_t named) + memory_object_t pager, + vm_object_t object, + vm_object_size_t size, + boolean_t named) { memory_object_control_t control; @@ -3990,7 +4017,7 @@ vm_object_memory_object_associate( */ control = memory_object_control_allocate(object); - assert (control != MEMORY_OBJECT_CONTROL_NULL); + assert(control != MEMORY_OBJECT_CONTROL_NULL); vm_object_lock(object); @@ -4016,12 +4043,13 @@ vm_object_memory_object_associate( */ (void) memory_object_init(pager, - object->pager_control, - PAGE_SIZE); + object->pager_control, + PAGE_SIZE); vm_object_lock(object); - if (named) + if (named) { object->named = TRUE; + } if (object->internal) { object->pager_ready = TRUE; vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); @@ -4051,10 +4079,10 @@ vm_object_memory_object_associate( void vm_object_compressor_pager_create( - vm_object_t object) + vm_object_t object) { - memory_object_t pager; - vm_object_t pager_object = VM_OBJECT_NULL; + memory_object_t pager; + vm_object_t pager_object = VM_OBJECT_NULL; assert(object != kernel_object); @@ -4070,21 +4098,21 @@ vm_object_compressor_pager_create( */ while (!object->pager_initialized) { vm_object_sleep(object, - VM_OBJECT_EVENT_INITIALIZED, - THREAD_UNINT); + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); } vm_object_paging_end(object); return; } - if ((uint32_t) (object->vo_size/PAGE_SIZE) != - (object->vo_size/PAGE_SIZE)) { + if ((uint32_t) (object->vo_size / PAGE_SIZE) != + (object->vo_size / PAGE_SIZE)) { #if DEVELOPMENT || DEBUG printf("vm_object_compressor_pager_create(%p): " - "object size 0x%llx >= 0x%llx\n", - object, - (uint64_t) object->vo_size, - 0x0FFFFFFFFULL*PAGE_SIZE); + "object size 0x%llx >= 0x%llx\n", + object, + (uint64_t) object->vo_size, + 0x0FFFFFFFFULL * PAGE_SIZE); #endif /* DEVELOPMENT || DEBUG */ vm_object_paging_end(object); return; @@ -4097,29 +4125,29 @@ vm_object_compressor_pager_create( object->pager_created = TRUE; object->paging_offset = 0; - + vm_object_unlock(object); /* * Create the [internal] pager, and associate it with this object. * * We make the association here so that vm_object_enter() - * can look up the object to complete initializing it. No + * can look up the object to complete initializing it. No * user will ever map this object. */ { /* create our new memory object */ - assert((uint32_t) (object->vo_size/PAGE_SIZE) == - (object->vo_size/PAGE_SIZE)); + assert((uint32_t) (object->vo_size / PAGE_SIZE) == + (object->vo_size / PAGE_SIZE)); (void) compressor_memory_object_create( (memory_object_size_t) object->vo_size, &pager); if (pager == NULL) { panic("vm_object_compressor_pager_create(): " - "no pager for object %p size 0x%llx\n", - object, (uint64_t) object->vo_size); + "no pager for object %p size 0x%llx\n", + object, (uint64_t) object->vo_size); } - } + } /* * A reference was returned by @@ -4128,9 +4156,9 @@ vm_object_compressor_pager_create( */ pager_object = vm_object_memory_object_associate(pager, - object, - object->vo_size, - FALSE); + object, + object->vo_size, + FALSE); if (pager_object != object) { panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)\n", pager, pager_object, object, (uint64_t) object->vo_size); } @@ -4154,14 +4182,14 @@ vm_object_compressor_pager_create( * Counts for normal collapses and bypasses. * Debugging variables, to watch or disable collapse. */ -static long object_collapses = 0; -static long object_bypasses = 0; +static long object_collapses = 0; +static long object_bypasses = 0; -static boolean_t vm_object_collapse_allowed = TRUE; -static boolean_t vm_object_bypass_allowed = TRUE; +static boolean_t vm_object_collapse_allowed = TRUE; +static boolean_t vm_object_bypass_allowed = TRUE; void vm_object_do_collapse_compressor(vm_object_t object, - vm_object_t backing_object); + vm_object_t backing_object); void vm_object_do_collapse_compressor( vm_object_t object, @@ -4183,13 +4211,13 @@ vm_object_do_collapse_compressor( */ for (backing_offset = object->vo_shadow_offset; - backing_offset < object->vo_shadow_offset + object->vo_size; - backing_offset += PAGE_SIZE) { + backing_offset < object->vo_shadow_offset + object->vo_size; + backing_offset += PAGE_SIZE) { memory_object_offset_t backing_pager_offset; /* find the next compressed page at or after this offset */ backing_pager_offset = (backing_offset + - backing_object->paging_offset); + backing_object->paging_offset); backing_pager_offset = vm_compressor_pager_next_compressed( backing_object->pager, backing_pager_offset); @@ -4198,7 +4226,7 @@ vm_object_do_collapse_compressor( break; } backing_offset = (backing_pager_offset - - backing_object->paging_offset); + backing_object->paging_offset); new_offset = backing_offset - object->vo_shadow_offset; @@ -4209,9 +4237,9 @@ vm_object_do_collapse_compressor( if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) || (vm_compressor_pager_state_get(object->pager, - (new_offset + - object->paging_offset)) == - VM_EXTERNAL_STATE_EXISTS)) { + (new_offset + + object->paging_offset)) == + VM_EXTERNAL_STATE_EXISTS)) { /* * This page already exists in object, resident or * compressed. @@ -4272,13 +4300,12 @@ vm_object_do_collapse( * will be overwritten by any of the parent's * pages that shadow them. */ - + while (!vm_page_queue_empty(&backing_object->memq)) { - p = (vm_page_t) vm_page_queue_first(&backing_object->memq); - + new_offset = (p->vmp_offset - backing_offset); - + assert(!p->vmp_busy || p->vmp_absent); /* @@ -4288,15 +4315,14 @@ vm_object_do_collapse( * * Otherwise, move it as planned. */ - + if (p->vmp_offset < backing_offset || new_offset >= size) { VM_PAGE_FREE(p); } else { pp = vm_page_lookup(object, new_offset); if (pp == VM_PAGE_NULL) { - if (VM_COMPRESSOR_PAGER_STATE_GET(object, - new_offset) + new_offset) == VM_EXTERNAL_STATE_EXISTS) { /* * Parent object has this page @@ -4309,12 +4335,12 @@ vm_object_do_collapse( /* * Parent now has no page. * Move the backing object's page - * up. + * up. */ vm_page_rename(p, object, new_offset); } } else { - assert(! pp->vmp_absent); + assert(!pp->vmp_absent); /* * Parent object has a real page. @@ -4329,16 +4355,13 @@ vm_object_do_collapse( if (vm_object_collapse_compressor_allowed && object->pager != MEMORY_OBJECT_NULL && backing_object->pager != MEMORY_OBJECT_NULL) { - /* move compressed pages from backing_object to object */ vm_object_do_collapse_compressor(object, backing_object); - } else if (backing_object->pager != MEMORY_OBJECT_NULL) { - assert((!object->pager_created && - (object->pager == MEMORY_OBJECT_NULL)) || - (!backing_object->pager_created && - (backing_object->pager == MEMORY_OBJECT_NULL))); + (object->pager == MEMORY_OBJECT_NULL)) || + (!backing_object->pager_created && + (backing_object->pager == MEMORY_OBJECT_NULL))); /* * Move the pager from backing_object to object. * @@ -4361,7 +4384,7 @@ vm_object_do_collapse( backing_object->paging_offset + backing_offset; if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) { memory_object_control_collapse(object->pager_control, - object); + object); } /* the backing_object has lost its pager: reset all fields */ backing_object->pager_created = FALSE; @@ -4375,7 +4398,7 @@ vm_object_do_collapse( * Note that the reference to backing_object->shadow * moves from within backing_object to within object. */ - + assert(!object->phys_contiguous); assert(!backing_object->phys_contiguous); object->shadow = backing_object->shadow; @@ -4389,7 +4412,7 @@ vm_object_do_collapse( object->vo_shadow_offset = 0; } assert((object->shadow == VM_OBJECT_NULL) || - (object->shadow->copy != backing_object)); + (object->shadow->copy != backing_object)); /* * Discard backing_object. @@ -4399,7 +4422,7 @@ vm_object_do_collapse( * all that is necessary is to dispose of it. */ object_collapses++; - + assert(backing_object->ref_count == 1); assert(backing_object->resident_page_count == 0); assert(backing_object->paging_in_progress == 0); @@ -4422,19 +4445,18 @@ vm_object_do_collapse( vm_object_unlock(backing_object); XPR(XPR_VM_OBJECT, "vm_object_collapse, collapsed 0x%X\n", - backing_object, 0,0,0,0); + backing_object, 0, 0, 0, 0); #if VM_OBJECT_TRACKING if (vm_object_tracking_inited) { btlog_remove_entries_for_element(vm_object_tracking_btlog, - backing_object); + backing_object); } #endif /* VM_OBJECT_TRACKING */ vm_object_lock_destroy(backing_object); zfree(vm_object_zone, backing_object); - } static void @@ -4446,13 +4468,13 @@ vm_object_do_bypass( * Make the parent shadow the next object * in the chain. */ - + vm_object_lock_assert_exclusive(object); vm_object_lock_assert_exclusive(backing_object); -#if TASK_SWAPPER +#if TASK_SWAPPER /* - * Do object reference in-line to + * Do object reference in-line to * conditionally increment shadow's * residence count. If object is not * resident, leave residence count @@ -4462,13 +4484,14 @@ vm_object_do_bypass( vm_object_lock(backing_object->shadow); vm_object_lock_assert_exclusive(backing_object->shadow); backing_object->shadow->ref_count++; - if (object->res_count != 0) + if (object->res_count != 0) { vm_object_res_reference(backing_object->shadow); + } vm_object_unlock(backing_object->shadow); } -#else /* TASK_SWAPPER */ +#else /* TASK_SWAPPER */ vm_object_reference(backing_object->shadow); -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ assert(!object->phys_contiguous); assert(!backing_object->phys_contiguous); @@ -4479,30 +4502,30 @@ vm_object_do_bypass( /* no shadow, therefore no shadow offset... */ object->vo_shadow_offset = 0; } - + /* * Backing object might have had a copy pointer - * to us. If it did, clear it. + * to us. If it did, clear it. */ if (backing_object->copy == object) { backing_object->copy = VM_OBJECT_NULL; } - + /* * Drop the reference count on backing_object. -#if TASK_SWAPPER + #if TASK_SWAPPER * Since its ref_count was at least 2, it * will not vanish; so we don't need to call * vm_object_deallocate. * [with a caveat for "named" objects] - * + * * The res_count on the backing object is * conditionally decremented. It's possible * (via vm_pageout_scan) to get here with * a "swapped" object, which has a 0 res_count, * in which case, the backing object res_count * is already down by one. -#else + #else * Don't call vm_object_deallocate unless * ref_count drops to zero. * @@ -4510,31 +4533,31 @@ vm_object_do_bypass( * backing object could be bypassed but not * collapsed, such as when the backing object * is temporary and cachable. -#endif + #endif */ if (backing_object->ref_count > 2 || (!backing_object->named && backing_object->ref_count > 1)) { vm_object_lock_assert_exclusive(backing_object); backing_object->ref_count--; -#if TASK_SWAPPER - if (object->res_count != 0) +#if TASK_SWAPPER + if (object->res_count != 0) { vm_object_res_deallocate(backing_object); + } assert(backing_object->ref_count > 0); -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ vm_object_unlock(backing_object); } else { - /* * Drop locks so that we can deallocate * the backing object. */ -#if TASK_SWAPPER +#if TASK_SWAPPER if (object->res_count == 0) { /* XXX get a reference for the deallocate below */ vm_object_res_reference(backing_object); } -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ /* * vm_object_collapse (the caller of this function) is * now called from contexts that may not guarantee that a @@ -4561,11 +4584,11 @@ vm_object_do_bypass( vm_object_lock(object); vm_object_activity_end(object); } - + object_bypasses++; } - + /* * vm_object_collapse: * @@ -4583,29 +4606,30 @@ static unsigned long vm_object_collapse_do_bypass = 0; __private_extern__ void vm_object_collapse( - vm_object_t object, - vm_object_offset_t hint_offset, - boolean_t can_bypass) + vm_object_t object, + vm_object_offset_t hint_offset, + boolean_t can_bypass) { - vm_object_t backing_object; - unsigned int rcount; - unsigned int size; - vm_object_t original_object; - int object_lock_type; - int backing_object_lock_type; + vm_object_t backing_object; + unsigned int rcount; + unsigned int size; + vm_object_t original_object; + int object_lock_type; + int backing_object_lock_type; vm_object_collapse_calls++; - if (! vm_object_collapse_allowed && - ! (can_bypass && vm_object_bypass_allowed)) { + if (!vm_object_collapse_allowed && + !(can_bypass && vm_object_bypass_allowed)) { return; } - XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n", - object, 0,0,0,0); + XPR(XPR_VM_OBJECT, "vm_object_collapse, obj 0x%X\n", + object, 0, 0, 0, 0); - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return; + } original_object = object; @@ -4632,7 +4656,7 @@ retry: /* * There is a backing object, and */ - + backing_object = object->shadow; if (backing_object == VM_OBJECT_NULL) { if (object != original_object) { @@ -4669,7 +4693,7 @@ retry: * The backing object is internal. * */ - + if (!backing_object->internal || backing_object->paging_in_progress != 0 || backing_object->activity_in_progress != 0) { @@ -4693,9 +4717,9 @@ retry: if (object->purgable != VM_PURGABLE_DENY || backing_object->purgable != VM_PURGABLE_DENY) { panic("vm_object_collapse() attempting to collapse " - "purgeable object: %p(%d) %p(%d)\n", - object, object->purgable, - backing_object, backing_object->purgable); + "purgeable object: %p(%d) %p(%d)\n", + object, object->purgable, + backing_object, backing_object->purgable); /* try and collapse the rest of the shadow chain */ if (object != original_object) { vm_object_unlock(object); @@ -4704,7 +4728,7 @@ retry: object_lock_type = backing_object_lock_type; continue; } - + /* * The backing object can't be a copy-object: * the shadow_offset for the copy-object must stay @@ -4741,20 +4765,19 @@ retry: */ if (backing_object->ref_count == 1 && (vm_object_collapse_compressor_allowed || - !object->pager_created - || (!backing_object->pager_created) + !object->pager_created + || (!backing_object->pager_created) ) && vm_object_collapse_allowed) { - /* * We need the exclusive lock on the VM objects. */ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { /* - * We have an object and its shadow locked + * We have an object and its shadow locked * "shared". We can't just upgrade the locks * to "exclusive", as some other thread might * also have these objects locked "shared" and - * attempt to upgrade one or the other to + * attempt to upgrade one or the other to * "exclusive". The upgrades would block * forever waiting for the other "shared" locks * to get released. @@ -4763,18 +4786,19 @@ retry: * have changed) with "exclusive" locking. */ vm_object_unlock(backing_object); - if (object != original_object) + if (object != original_object) { vm_object_unlock(object); + } object_lock_type = OBJECT_LOCK_EXCLUSIVE; backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; goto retry; } - XPR(XPR_VM_OBJECT, - "vm_object_collapse: %x to %x, pager %x, pager_control %x\n", - backing_object, object, - backing_object->pager, - backing_object->pager_control, 0); + XPR(XPR_VM_OBJECT, + "vm_object_collapse: %x to %x, pager %x, pager_control %x\n", + backing_object, object, + backing_object->pager, + backing_object->pager_control, 0); /* * Collapse the object with its backing @@ -4792,7 +4816,7 @@ retry: * or permitted, so let's try bypassing it. */ - if (! (can_bypass && vm_object_bypass_allowed)) { + if (!(can_bypass && vm_object_bypass_allowed)) { /* try and collapse the rest of the shadow chain */ if (object != original_object) { vm_object_unlock(object); @@ -4812,9 +4836,9 @@ retry: rcount = object->resident_page_count; if (rcount != size) { - vm_object_offset_t offset; - vm_object_offset_t backing_offset; - unsigned int backing_rcount; + vm_object_offset_t offset; + vm_object_offset_t backing_offset; + unsigned int backing_rcount; /* * If the backing object has a pager but no pagemap, @@ -4849,8 +4873,8 @@ retry: backing_offset = object->vo_shadow_offset; backing_rcount = backing_object->resident_page_count; - if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) { - /* + if ((int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) { + /* * we have enough pages in the backing object to guarantee that * at least 1 of them must be 'uncovered' by a resident page * in the object we're evaluating, so move on and @@ -4877,23 +4901,24 @@ retry: * */ -#define EXISTS_IN_OBJECT(obj, off, rc) \ - ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ - == VM_EXTERNAL_STATE_EXISTS) || \ +#define EXISTS_IN_OBJECT(obj, off, rc) \ + ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \ + == VM_EXTERNAL_STATE_EXISTS) || \ ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--)) /* * Check the hint location first * (since it is often the quickest way out of here). */ - if (object->cow_hint != ~(vm_offset_t)0) + if (object->cow_hint != ~(vm_offset_t)0) { hint_offset = (vm_object_offset_t)object->cow_hint; - else + } else { hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ? - (hint_offset - 8 * PAGE_SIZE_64) : 0; + (hint_offset - 8 * PAGE_SIZE_64) : 0; + } if (EXISTS_IN_OBJECT(backing_object, hint_offset + - backing_offset, backing_rcount) && + backing_offset, backing_rcount) && !EXISTS_IN_OBJECT(object, hint_offset, rcount)) { /* dependency right at the hint */ object->cow_hint = (vm_offset_t) hint_offset; /* atomic */ @@ -4913,7 +4938,7 @@ retry: * walk the backing_object's resident pages first. * * NOTE: Pages may be in both the existence map and/or - * resident, so if we don't find a dependency while + * resident, so if we don't find a dependency while * walking the backing object's resident page list * directly, and there is an existence map, we'll have * to run the offset based 2nd pass. Because we may @@ -4934,13 +4959,12 @@ retry: !EXISTS_IN_OBJECT(object, offset, rc)) { /* found a dependency */ object->cow_hint = (vm_offset_t) offset; /* atomic */ - + break; } p = (vm_page_t) vm_page_queue_next(&p->vmp_listq); - } while (--backing_rcount); - if (backing_rcount != 0 ) { + if (backing_rcount != 0) { /* try and collapse the rest of the shadow chain */ if (object != original_object) { vm_object_unlock(object); @@ -4957,13 +4981,12 @@ retry: */ if (backing_rcount) { offset = hint_offset; - - while((offset = - (offset + PAGE_SIZE_64 < object->vo_size) ? - (offset + PAGE_SIZE_64) : 0) != hint_offset) { + while ((offset = + (offset + PAGE_SIZE_64 < object->vo_size) ? + (offset + PAGE_SIZE_64) : 0) != hint_offset) { if (EXISTS_IN_OBJECT(backing_object, offset + - backing_offset, backing_rcount) && + backing_offset, backing_rcount) && !EXISTS_IN_OBJECT(object, offset, rcount)) { /* found a dependency */ object->cow_hint = (vm_offset_t) offset; /* atomic */ @@ -4987,8 +5010,9 @@ retry: */ if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) { vm_object_unlock(backing_object); - if (object != original_object) + if (object != original_object) { vm_object_unlock(object); + } object_lock_type = OBJECT_LOCK_EXCLUSIVE; backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE; goto retry; @@ -5015,10 +5039,10 @@ retry: /* NOT REACHED */ /* - if (object != original_object) { - vm_object_unlock(object); - } - */ + * if (object != original_object) { + * vm_object_unlock(object); + * } + */ } /* @@ -5037,11 +5061,11 @@ unsigned int vm_object_page_remove_iterate = 0; __private_extern__ void vm_object_page_remove( - vm_object_t object, - vm_object_offset_t start, - vm_object_offset_t end) + vm_object_t object, + vm_object_offset_t start, + vm_object_offset_t end) { - vm_page_t p, next; + vm_page_t p, next; /* * One and two page removals are most popular. @@ -5049,15 +5073,16 @@ vm_object_page_remove( * It balances vm_object_lookup vs iteration. */ - if (atop_64(end - start) < (unsigned)object->resident_page_count/16) { + if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) { vm_object_page_remove_lookup++; for (; start < end; start += PAGE_SIZE_64) { p = vm_page_lookup(object, start); if (p != VM_PAGE_NULL) { assert(!p->vmp_cleaning && !p->vmp_laundry); - if (!p->vmp_fictitious && p->vmp_pmapped) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); + if (!p->vmp_fictitious && p->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); + } VM_PAGE_FREE(p); } } @@ -5069,8 +5094,9 @@ vm_object_page_remove( next = (vm_page_t) vm_page_queue_next(&p->vmp_listq); if ((start <= p->vmp_offset) && (p->vmp_offset < end)) { assert(!p->vmp_cleaning && !p->vmp_laundry); - if (!p->vmp_fictitious && p->vmp_pmapped) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); + if (!p->vmp_fictitious && p->vmp_pmapped) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p)); + } VM_PAGE_FREE(p); } p = next; @@ -5106,30 +5132,30 @@ static int vm_object_coalesce_count = 0; __private_extern__ boolean_t vm_object_coalesce( - vm_object_t prev_object, - vm_object_t next_object, - vm_object_offset_t prev_offset, + vm_object_t prev_object, + vm_object_t next_object, + vm_object_offset_t prev_offset, __unused vm_object_offset_t next_offset, - vm_object_size_t prev_size, - vm_object_size_t next_size) + vm_object_size_t prev_size, + vm_object_size_t next_size) { - vm_object_size_t newsize; + vm_object_size_t newsize; -#ifdef lint +#ifdef lint next_offset++; -#endif /* lint */ +#endif /* lint */ if (next_object != VM_OBJECT_NULL) { - return(FALSE); + return FALSE; } if (prev_object == VM_OBJECT_NULL) { - return(TRUE); + return TRUE; } XPR(XPR_VM_OBJECT, - "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n", - prev_object, prev_offset, prev_size, next_size, 0); + "vm_object_coalesce: 0x%X prev_off 0x%X prev_size 0x%X next_size 0x%X\n", + prev_object, prev_offset, prev_size, next_size, 0); vm_object_lock(prev_object); @@ -5158,7 +5184,7 @@ vm_object_coalesce( (prev_object->paging_in_progress != 0) || (prev_object->activity_in_progress != 0)) { vm_object_unlock(prev_object); - return(FALSE); + return FALSE; } vm_object_coalesce_count++; @@ -5168,8 +5194,8 @@ vm_object_coalesce( * a previous deallocation. */ vm_object_page_remove(prev_object, - prev_offset + prev_size, - prev_offset + prev_size + next_size); + prev_offset + prev_size, + prev_offset + prev_size + next_size); /* * Extend the object if necessary. @@ -5180,29 +5206,30 @@ vm_object_coalesce( } vm_object_unlock(prev_object); - return(TRUE); + return TRUE; } kern_return_t vm_object_populate_with_private( - vm_object_t object, - vm_object_offset_t offset, - ppnum_t phys_page, - vm_size_t size) + vm_object_t object, + vm_object_offset_t offset, + ppnum_t phys_page, + vm_size_t size) { - ppnum_t base_page; - vm_object_offset_t base_offset; + ppnum_t base_page; + vm_object_offset_t base_offset; - if (!object->private) + if (!object->private) { return KERN_FAILURE; + } base_page = phys_page; vm_object_lock(object); if (!object->phys_contiguous) { - vm_page_t m; + vm_page_t m; if ((base_offset = trunc_page_64(offset)) != offset) { vm_object_unlock(object); @@ -5216,7 +5243,6 @@ vm_object_populate_with_private( if (m != VM_PAGE_NULL) { if (m->vmp_fictitious) { if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) { - vm_page_lockspin_queues(); m->vmp_private = TRUE; vm_page_unlock_queues(); @@ -5225,25 +5251,24 @@ vm_object_populate_with_private( VM_PAGE_SET_PHYS_PAGE(m, base_page); } } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) { - - if ( !m->vmp_private) { + if (!m->vmp_private) { /* * we'd leak a real page... that can't be right */ panic("vm_object_populate_with_private - %p not private", m); } if (m->vmp_pmapped) { - /* + /* * pmap call to clear old mapping */ - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); } VM_PAGE_SET_PHYS_PAGE(m, base_page); } - } else { - while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) - vm_page_more_fictitious(); + while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL) { + vm_page_more_fictitious(); + } /* * private normally requires lock_queues but since we @@ -5255,19 +5280,19 @@ vm_object_populate_with_private( m->vmp_unusual = TRUE; m->vmp_busy = FALSE; - vm_page_insert(m, object, base_offset); + vm_page_insert(m, object, base_offset); } - base_page++; /* Go to the next physical page */ + base_page++; /* Go to the next physical page */ base_offset += PAGE_SIZE; size -= PAGE_SIZE; } } else { /* NOTE: we should check the original settings here */ /* if we have a size > zero a pmap call should be made */ - /* to disable the range */ + /* to disable the range */ /* pmap_? */ - + /* shadows on contiguous memory are not allowed */ /* we therefore can use the offset field */ object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT; @@ -5281,37 +5306,38 @@ vm_object_populate_with_private( kern_return_t memory_object_create_named( - memory_object_t pager, - memory_object_offset_t size, - memory_object_control_t *control) + memory_object_t pager, + memory_object_offset_t size, + memory_object_control_t *control) { - vm_object_t object; + vm_object_t object; *control = MEMORY_OBJECT_CONTROL_NULL; - if (pager == MEMORY_OBJECT_NULL) + if (pager == MEMORY_OBJECT_NULL) { return KERN_INVALID_ARGUMENT; + } object = vm_object_memory_object_associate(pager, - VM_OBJECT_NULL, - size, - TRUE); + VM_OBJECT_NULL, + size, + TRUE); if (object == VM_OBJECT_NULL) { return KERN_INVALID_OBJECT; } - + /* wait for object (if any) to be ready */ if (object != VM_OBJECT_NULL) { vm_object_lock(object); object->named = TRUE; while (!object->pager_ready) { vm_object_sleep(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); } *control = object->pager_control; vm_object_unlock(object); } - return (KERN_SUCCESS); + return KERN_SUCCESS; } @@ -5329,22 +5355,22 @@ memory_object_create_named( */ kern_return_t memory_object_recover_named( - memory_object_control_t control, - boolean_t wait_on_terminating) + memory_object_control_t control, + boolean_t wait_on_terminating) { - vm_object_t object; + vm_object_t object; object = memory_object_control_to_vm_object(control); if (object == VM_OBJECT_NULL) { - return (KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } restart: vm_object_lock(object); if (object->terminating && wait_on_terminating) { - vm_object_wait(object, - VM_OBJECT_EVENT_PAGING_IN_PROGRESS, - THREAD_UNINT); + vm_object_wait(object, + VM_OBJECT_EVENT_PAGING_IN_PROGRESS, + THREAD_UNINT); goto restart; } @@ -5363,16 +5389,16 @@ restart: vm_object_res_reference(object); while (!object->pager_ready) { vm_object_sleep(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); + VM_OBJECT_EVENT_PAGER_READY, + THREAD_UNINT); } vm_object_unlock(object); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* - * vm_object_release_name: + * vm_object_release_name: * * Enforces name semantic on memory_object reference count decrement * This routine should not be called unless the caller holds a name @@ -5383,26 +5409,26 @@ restart: * being the name. * If the decision is made to proceed the name field flag is set to * false and the reference count is decremented. If the RESPECT_CACHE - * flag is set and the reference count has gone to zero, the + * flag is set and the reference count has gone to zero, the * memory_object is checked to see if it is cacheable otherwise when * the reference count is zero, it is simply terminated. */ __private_extern__ kern_return_t vm_object_release_name( - vm_object_t object, - int flags) + vm_object_t object, + int flags) { - vm_object_t shadow; - boolean_t original_object = TRUE; + vm_object_t shadow; + boolean_t original_object = TRUE; while (object != VM_OBJECT_NULL) { - vm_object_lock(object); assert(object->alive); - if (original_object) + if (original_object) { assert(object->named); + } assert(object->ref_count > 0); /* @@ -5413,16 +5439,16 @@ vm_object_release_name( if (object->pager_created && !object->pager_initialized) { assert(!object->can_persist); vm_object_assert_wait(object, - VM_OBJECT_EVENT_INITIALIZED, - THREAD_UNINT); + VM_OBJECT_EVENT_INITIALIZED, + THREAD_UNINT); vm_object_unlock(object); thread_block(THREAD_CONTINUE_NULL); continue; } if (((object->ref_count > 1) - && (flags & MEMORY_OBJECT_TERMINATE_IDLE)) - || (object->terminating)) { + && (flags & MEMORY_OBJECT_TERMINATE_IDLE)) + || (object->terminating)) { vm_object_unlock(object); return KERN_FAILURE; } else { @@ -5431,11 +5457,12 @@ vm_object_release_name( return KERN_SUCCESS; } } - + if ((flags & MEMORY_OBJECT_RESPECT_CACHE) && - (object->ref_count == 1)) { - if (original_object) + (object->ref_count == 1)) { + if (original_object) { object->named = FALSE; + } vm_object_unlock(object); /* let vm_object_deallocate push this thing into */ /* the cache, if that it is where it is bound */ @@ -5463,8 +5490,9 @@ vm_object_release_name( vm_object_lock_assert_exclusive(object); object->ref_count--; assert(object->ref_count > 0); - if(original_object) + if (original_object) { object->named = FALSE; + } vm_object_unlock(object); return KERN_SUCCESS; } @@ -5477,30 +5505,32 @@ vm_object_release_name( __private_extern__ kern_return_t vm_object_lock_request( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - memory_object_return_t should_return, - int flags, - vm_prot_t prot) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + memory_object_return_t should_return, + int flags, + vm_prot_t prot) { - __unused boolean_t should_flush; + __unused boolean_t should_flush; should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; - XPR(XPR_MEMORY_OBJECT, + XPR(XPR_MEMORY_OBJECT, "vm_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n", - object, offset, size, - (((should_return&1)<<1)|should_flush), prot); + object, offset, size, + (((should_return & 1) << 1) | should_flush), prot); /* * Check for bogus arguments. */ - if (object == VM_OBJECT_NULL) - return (KERN_INVALID_ARGUMENT); + if (object == VM_OBJECT_NULL) { + return KERN_INVALID_ARGUMENT; + } - if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) - return (KERN_INVALID_ARGUMENT); + if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) { + return KERN_INVALID_ARGUMENT; + } size = round_page_64(size); @@ -5512,12 +5542,12 @@ vm_object_lock_request( vm_object_paging_begin(object); (void)vm_object_update(object, - offset, size, NULL, NULL, should_return, flags, prot); + offset, size, NULL, NULL, should_return, flags, prot); vm_object_paging_end(object); vm_object_unlock(object); - return (KERN_SUCCESS); + return KERN_SUCCESS; } /* @@ -5535,14 +5565,15 @@ vm_object_lock_request( uint64_t vm_object_purge(vm_object_t object, int flags) { - unsigned int object_page_count = 0, pgcount = 0; - uint64_t total_purged_pgcount = 0; - boolean_t skipped_object = FALSE; + unsigned int object_page_count = 0, pgcount = 0; + uint64_t total_purged_pgcount = 0; + boolean_t skipped_object = FALSE; - vm_object_lock_assert_exclusive(object); + vm_object_lock_assert_exclusive(object); - if (object->purgable == VM_PURGABLE_DENY) + if (object->purgable == VM_PURGABLE_DENY) { return 0; + } assert(object->copy == VM_OBJECT_NULL); assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); @@ -5565,25 +5596,25 @@ vm_object_purge(vm_object_t object, int flags) if (object->purgable == VM_PURGABLE_VOLATILE) { unsigned int delta; assert(object->resident_page_count >= - object->wired_page_count); + object->wired_page_count); delta = (object->resident_page_count - - object->wired_page_count); + object->wired_page_count); if (delta != 0) { assert(vm_page_purgeable_count >= - delta); + delta); OSAddAtomic(-delta, - (SInt32 *)&vm_page_purgeable_count); + (SInt32 *)&vm_page_purgeable_count); } if (object->wired_page_count != 0) { assert(vm_page_purgeable_wired_count >= - object->wired_page_count); + object->wired_page_count); OSAddAtomic(-object->wired_page_count, - (SInt32 *)&vm_page_purgeable_wired_count); + (SInt32 *)&vm_page_purgeable_wired_count); } object->purgable = VM_PURGABLE_EMPTY; } assert(object->purgable == VM_PURGABLE_EMPTY); - + object_page_count = object->resident_page_count; vm_object_reap_pages(object, REAP_PURGEABLE); @@ -5595,7 +5626,6 @@ vm_object_purge(vm_object_t object, int flags) } if (object->pager != NULL) { - assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); if (object->activity_in_progress == 0 && @@ -5613,15 +5643,15 @@ vm_object_purge(vm_object_t object, int flags) if (pgcount) { pgcount = vm_compressor_pager_reap_pages(object->pager, flags); vm_compressor_pager_count(object->pager, - -pgcount, - FALSE, /* shared */ - object); + -pgcount, + FALSE, /* shared */ + object); vm_object_owner_compressed_update(object, - -pgcount); + -pgcount); } - if ( !(flags & C_DONT_BLOCK)) { + if (!(flags & C_DONT_BLOCK)) { assert(vm_compressor_pager_get_count(object->pager) - == 0); + == 0); } } else { /* @@ -5631,7 +5661,7 @@ vm_object_purge(vm_object_t object, int flags) * the VM object is not locked, so it could race * with us. * - * We can't really synchronize this without possibly + * We can't really synchronize this without possibly * causing a deadlock when the compressor needs to * allocate or free memory while compressing or * decompressing a page from a purgeable object @@ -5650,15 +5680,15 @@ vm_object_purge(vm_object_t object, int flags) total_purged_pgcount += pgcount; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)), - VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ - object_page_count, - total_purged_pgcount, - skipped_object, - 0); + VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ + object_page_count, + total_purged_pgcount, + skipped_object, + 0); return total_purged_pgcount; } - + /* * vm_object_purgeable_control() allows the caller to control and investigate the @@ -5737,12 +5767,12 @@ vm_object_purge(vm_object_t object, int flags) */ kern_return_t vm_object_purgable_control( - vm_object_t object, - vm_purgable_t control, - int *state) + vm_object_t object, + vm_purgable_t control, + int *state) { - int old_state; - int new_state; + int old_state; + int new_state; if (object == VM_OBJECT_NULL) { /* @@ -5757,11 +5787,12 @@ vm_object_purgable_control( * Get current state of the purgeable object. */ old_state = object->purgable; - if (old_state == VM_PURGABLE_DENY) + if (old_state == VM_PURGABLE_DENY) { return KERN_INVALID_ARGUMENT; - + } + /* purgeable cant have delayed copies - now or in the future */ - assert(object->copy == VM_OBJECT_NULL); + assert(object->copy == VM_OBJECT_NULL); assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE); /* @@ -5815,28 +5846,28 @@ vm_object_purgable_control( unsigned int delta; assert(object->resident_page_count >= - object->wired_page_count); + object->wired_page_count); delta = (object->resident_page_count - - object->wired_page_count); + object->wired_page_count); assert(vm_page_purgeable_count >= delta); if (delta != 0) { OSAddAtomic(-delta, - (SInt32 *)&vm_page_purgeable_count); + (SInt32 *)&vm_page_purgeable_count); } if (object->wired_page_count != 0) { assert(vm_page_purgeable_wired_count >= - object->wired_page_count); + object->wired_page_count); OSAddAtomic(-object->wired_page_count, - (SInt32 *)&vm_page_purgeable_wired_count); + (SInt32 *)&vm_page_purgeable_wired_count); } vm_page_lock_queues(); /* object should be on a queue */ assert(object->objq.next != NULL && - object->objq.prev != NULL); + object->objq.prev != NULL); purgeable_q_t queue; /* @@ -5849,7 +5880,7 @@ vm_object_purgable_control( if (object->purgeable_when_ripe) { vm_purgeable_token_delete_last(queue); } - assert(queue->debug_count_objects>=0); + assert(queue->debug_count_objects >= 0); vm_page_unlock_queues(); } @@ -5866,10 +5897,10 @@ vm_object_purgable_control( case VM_PURGABLE_VOLATILE: if (object->volatile_fault) { - vm_page_t p; - int refmod; + vm_page_t p; + int refmod; - vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { + vm_page_queue_iterate(&object->memq, p, vmp_listq) { if (p->vmp_busy || VM_PAGE_WIRED(p) || p->vmp_fictitious) { @@ -5886,17 +5917,18 @@ vm_object_purgable_control( assert(old_state != VM_PURGABLE_EMPTY); purgeable_q_t queue; - + /* find the correct queue */ - if ((*state&VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) - queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; - else { - if ((*state&VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) - queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; - else - queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; - } - + if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) { + queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; + } else { + if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) { + queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; + } else { + queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; + } + } + if (old_state == VM_PURGABLE_NONVOLATILE || old_state == VM_PURGABLE_EMPTY) { unsigned int delta; @@ -5907,7 +5939,7 @@ vm_object_purgable_control( } else { object->purgeable_when_ripe = TRUE; } - + if (object->purgeable_when_ripe) { kern_return_t result; @@ -5923,17 +5955,17 @@ vm_object_purgable_control( } assert(object->resident_page_count >= - object->wired_page_count); + object->wired_page_count); delta = (object->resident_page_count - - object->wired_page_count); + object->wired_page_count); if (delta != 0) { OSAddAtomic(delta, - &vm_page_purgeable_count); + &vm_page_purgeable_count); } if (object->wired_page_count != 0) { OSAddAtomic(object->wired_page_count, - &vm_page_purgeable_wired_count); + &vm_page_purgeable_wired_count); } object->purgable = new_state; @@ -5941,12 +5973,11 @@ vm_object_purgable_control( /* object should be on "non-volatile" queue */ assert(object->objq.next != NULL); assert(object->objq.prev != NULL); - } - else if (old_state == VM_PURGABLE_VOLATILE) { - purgeable_q_t old_queue; - boolean_t purgeable_when_ripe; + } else if (old_state == VM_PURGABLE_VOLATILE) { + purgeable_q_t old_queue; + boolean_t purgeable_when_ripe; - /* + /* * if reassigning priorities / purgeable groups, we don't change the * token queue. So moving priorities will not make pages stay around longer. * Reasoning is that the algorithm gives most priority to the most important @@ -5954,54 +5985,54 @@ vm_object_purgable_control( * This biases the system already for purgeable queues that move a lot. * It doesn't seem more biasing is neccessary in this case, where no new object is added. */ - assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ - + assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */ + old_queue = vm_purgeable_object_remove(object); assert(old_queue); - + if ((*state & VM_PURGABLE_NO_AGING_MASK) == VM_PURGABLE_NO_AGING) { purgeable_when_ripe = FALSE; } else { purgeable_when_ripe = TRUE; } - + if (old_queue != queue || (purgeable_when_ripe != - object->purgeable_when_ripe)) { + object->purgeable_when_ripe)) { kern_return_t result; - /* Changing queue. Have to move token. */ - vm_page_lock_queues(); + /* Changing queue. Have to move token. */ + vm_page_lock_queues(); if (object->purgeable_when_ripe) { vm_purgeable_token_delete_last(old_queue); } object->purgeable_when_ripe = purgeable_when_ripe; if (object->purgeable_when_ripe) { result = vm_purgeable_token_add(queue); - assert(result==KERN_SUCCESS); /* this should never fail since we just freed a token */ + assert(result == KERN_SUCCESS); /* this should never fail since we just freed a token */ } vm_page_unlock_queues(); - } - }; - vm_purgeable_object_add(object, queue, (*state&VM_VOLATILE_GROUP_MASK)>>VM_VOLATILE_GROUP_SHIFT ); + } + ; + vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT ); if (old_state == VM_PURGABLE_NONVOLATILE) { vm_purgeable_accounting(object, - VM_PURGABLE_NONVOLATILE); + VM_PURGABLE_NONVOLATILE); } - assert(queue->debug_count_objects>=0); - + assert(queue->debug_count_objects >= 0); + break; case VM_PURGABLE_EMPTY: if (object->volatile_fault) { - vm_page_t p; - int refmod; + vm_page_t p; + int refmod; - vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { + vm_page_queue_iterate(&object->memq, p, vmp_listq) { if (p->vmp_busy || VM_PAGE_WIRED(p) || p->vmp_fictitious) { @@ -6020,7 +6051,7 @@ vm_object_purgable_control( /* object should be on a queue */ assert(object->objq.next != NULL && - object->objq.prev != NULL); + object->objq.prev != NULL); old_queue = vm_purgeable_object_remove(object); assert(old_queue); @@ -6038,7 +6069,7 @@ vm_object_purgable_control( * "volatile". */ vm_purgeable_accounting(object, - VM_PURGABLE_NONVOLATILE); + VM_PURGABLE_NONVOLATILE); /* * Set to VM_PURGABLE_EMPTY because the pages are no * longer accounted in the "non-volatile" ledger @@ -6063,33 +6094,32 @@ vm_object_purgable_control( kern_return_t vm_object_get_page_counts( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - unsigned int *resident_page_count, - unsigned int *dirty_page_count) + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + unsigned int *resident_page_count, + unsigned int *dirty_page_count) { + kern_return_t kr = KERN_SUCCESS; + boolean_t count_dirty_pages = FALSE; + vm_page_t p = VM_PAGE_NULL; + unsigned int local_resident_count = 0; + unsigned int local_dirty_count = 0; + vm_object_offset_t cur_offset = 0; + vm_object_offset_t end_offset = 0; - kern_return_t kr = KERN_SUCCESS; - boolean_t count_dirty_pages = FALSE; - vm_page_t p = VM_PAGE_NULL; - unsigned int local_resident_count = 0; - unsigned int local_dirty_count = 0; - vm_object_offset_t cur_offset = 0; - vm_object_offset_t end_offset = 0; - - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return KERN_INVALID_ARGUMENT; + } cur_offset = offset; - + end_offset = offset + size; vm_object_lock_assert_exclusive(object); if (dirty_page_count != NULL) { - count_dirty_pages = TRUE; } @@ -6100,49 +6130,37 @@ vm_object_get_page_counts( * - the entire object is exactly covered by the request. */ if (offset == 0 && (object->vo_size == size)) { - *resident_page_count = object->resident_page_count; goto out; } } if (object->resident_page_count <= (size >> PAGE_SHIFT)) { - - vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { - + vm_page_queue_iterate(&object->memq, p, vmp_listq) { if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) { - local_resident_count++; if (count_dirty_pages) { - if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { - local_dirty_count++; } } } } } else { - for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) { - p = vm_page_lookup(object, cur_offset); - - if (p != VM_PAGE_NULL) { + if (p != VM_PAGE_NULL) { local_resident_count++; if (count_dirty_pages) { - if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) { - local_dirty_count++; } } } } - } if (resident_page_count != NULL) { @@ -6158,7 +6176,7 @@ out: } -#if TASK_SWAPPER +#if TASK_SWAPPER /* * vm_object_res_deallocate * @@ -6174,7 +6192,7 @@ out: __private_extern__ void vm_object_res_deallocate( - vm_object_t object) + vm_object_t object) { vm_object_t orig_object = object; /* @@ -6183,22 +6201,25 @@ vm_object_res_deallocate( * unlocked. */ assert(object->res_count > 0); - while (--object->res_count == 0) { + while (--object->res_count == 0) { assert(object->ref_count >= object->res_count); vm_object_deactivate_all_pages(object); /* iterate on shadow, if present */ if (object->shadow != VM_OBJECT_NULL) { vm_object_t tmp_object = object->shadow; vm_object_lock(tmp_object); - if (object != orig_object) + if (object != orig_object) { vm_object_unlock(object); + } object = tmp_object; assert(object->res_count > 0); - } else + } else { break; + } } - if (object != orig_object) + if (object != orig_object) { vm_object_unlock(object); + } } /* @@ -6217,28 +6238,30 @@ vm_object_res_deallocate( __private_extern__ void vm_object_res_reference( - vm_object_t object) + vm_object_t object) { vm_object_t orig_object = object; - /* + /* * Object is locked, so this can be called directly * from vm_object_reference. This lock is never released. */ - while ((++object->res_count == 1) && - (object->shadow != VM_OBJECT_NULL)) { + while ((++object->res_count == 1) && + (object->shadow != VM_OBJECT_NULL)) { vm_object_t tmp_object = object->shadow; assert(object->ref_count >= object->res_count); vm_object_lock(tmp_object); - if (object != orig_object) + if (object != orig_object) { vm_object_unlock(object); + } object = tmp_object; } - if (object != orig_object) + if (object != orig_object) { vm_object_unlock(object); + } assert(orig_object->ref_count >= orig_object->res_count); } -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ /* * vm_object_reference: @@ -6250,10 +6273,11 @@ vm_object_res_reference( #endif __private_extern__ void vm_object_reference( - vm_object_t object) + vm_object_t object) { - if (object == VM_OBJECT_NULL) + if (object == VM_OBJECT_NULL) { return; + } vm_object_lock(object); assert(object->ref_count > 0); @@ -6274,15 +6298,15 @@ vm_object_reference( unsigned int vm_object_transpose_count = 0; kern_return_t vm_object_transpose( - vm_object_t object1, - vm_object_t object2, - vm_object_size_t transpose_size) + vm_object_t object1, + vm_object_t object2, + vm_object_size_t transpose_size) { - vm_object_t tmp_object; - kern_return_t retval; - boolean_t object1_locked, object2_locked; - vm_page_t page; - vm_object_offset_t page_offset; + vm_object_t tmp_object; + kern_return_t retval; + boolean_t object1_locked, object2_locked; + vm_page_t page; + vm_object_offset_t page_offset; tmp_object = VM_OBJECT_NULL; object1_locked = FALSE; object2_locked = FALSE; @@ -6303,7 +6327,7 @@ vm_object_transpose( * make sure we always lock them in the same order to * avoid deadlocks. */ - if (object1 > object2) { + if (object1 > object2) { tmp_object = object1; object1 = object2; object2 = tmp_object; @@ -6333,14 +6357,14 @@ vm_object_transpose( goto done; } /* - * We're about to mess with the object's backing store and + * We're about to mess with the object's backing store and * taking a "paging_in_progress" reference wouldn't be enough * to prevent any paging activity on this object, so the caller should * have "quiesced" the objects beforehand, via a UPL operation with * UPL_SET_IO_WIRE (to make sure all the pages are there and wired) * and UPL_BLOCK_ACCESS (to mark the pages "busy"). - * - * Wait for any paging operation to complete (but only paging, not + * + * Wait for any paging operation to complete (but only paging, not * other kind of activities not linked to the pager). After we're * statisfied that there's no more paging in progress, we keep the * object locked, to guarantee that no one tries to access its pager. @@ -6352,7 +6376,7 @@ vm_object_transpose( */ vm_object_lock(object2); object2_locked = TRUE; - if (! object2->alive || object2->terminating || + if (!object2->alive || object2->terminating || object2->copy || object2->shadow || object2->shadowed || object2->purgable != VM_PURGABLE_DENY) { retval = KERN_INVALID_VALUE; @@ -6368,7 +6392,7 @@ vm_object_transpose( * exchange their backing stores or one would overflow. * If their size doesn't match the caller's * "transpose_size", we can't do it either because the - * transpose operation will affect the entire span of + * transpose operation will affect the entire span of * the objects. */ retval = KERN_INVALID_VALUE; @@ -6409,7 +6433,7 @@ vm_object_transpose( page_offset = page->vmp_offset; vm_page_remove(page, TRUE); page->vmp_offset = page_offset; - vm_page_queue_enter(&tmp_object->memq, page, vm_page_t, vmp_listq); + vm_page_queue_enter(&tmp_object->memq, page, vmp_listq); } assert(vm_page_queue_empty(&object1->memq)); /* transfer object2's pages to object1 */ @@ -6421,18 +6445,17 @@ vm_object_transpose( /* transfer tmp_object's pages to object2 */ while (!vm_page_queue_empty(&tmp_object->memq)) { page = (vm_page_t) vm_page_queue_first(&tmp_object->memq); - vm_page_queue_remove(&tmp_object->memq, page, - vm_page_t, vmp_listq); + vm_page_queue_remove(&tmp_object->memq, page, vmp_listq); vm_page_insert(page, object2, page->vmp_offset); } assert(vm_page_queue_empty(&tmp_object->memq)); } -#define __TRANSPOSE_FIELD(field) \ -MACRO_BEGIN \ - tmp_object->field = object1->field; \ - object1->field = object2->field; \ - object2->field = tmp_object->field; \ +#define __TRANSPOSE_FIELD(field) \ +MACRO_BEGIN \ + tmp_object->field = object1->field; \ + object1->field = object2->field; \ + object2->field = tmp_object->field; \ MACRO_END /* "Lock" refers to the object not its contents */ @@ -6447,7 +6470,7 @@ MACRO_END #endif /* "resident_page_count" was updated above when transposing pages */ /* "wired_page_count" was updated above when transposing pages */ -#if ! VM_TAG_ACTIVE_UPDATE +#if !VM_TAG_ACTIVE_UPDATE /* "wired_objq" was dealt with along with "wired_page_count" */ #endif /* ! VM_TAG_ACTIVE_UPDATE */ /* "reusable_page_count" was updated above when transposing pages */ @@ -6464,11 +6487,11 @@ MACRO_END /* update the memory_objects' pointers back to the VM objects */ if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) { memory_object_control_collapse(object1->pager_control, - object1); + object1); } if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) { memory_object_control_collapse(object2->pager_control, - object2); + object2); } __TRANSPOSE_FIELD(copy_strategy); /* "paging_in_progress" refers to the object not its contents */ @@ -6623,35 +6646,35 @@ extern int speculative_reads_disabled; * are odd multiples of PAGE_SIZE. */ #if CONFIG_EMBEDDED - unsigned int preheat_max_bytes = (1024 * 512); +unsigned int preheat_max_bytes = (1024 * 512); #else /* CONFIG_EMBEDDED */ - unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES; +unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES; #endif /* CONFIG_EMBEDDED */ unsigned int preheat_min_bytes = (1024 * 32); __private_extern__ void vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, - vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming) + vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming) { - vm_size_t pre_heat_size; - vm_size_t tail_size; - vm_size_t head_size; - vm_size_t max_length; - vm_size_t cluster_size; - vm_object_offset_t object_size; - vm_object_offset_t orig_start; - vm_object_offset_t target_start; - vm_object_offset_t offset; - vm_behavior_t behavior; - boolean_t look_behind = TRUE; - boolean_t look_ahead = TRUE; - boolean_t isSSD = FALSE; - uint32_t throttle_limit; - int sequential_run; - int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - vm_size_t max_ph_size; - vm_size_t min_ph_size; + vm_size_t pre_heat_size; + vm_size_t tail_size; + vm_size_t head_size; + vm_size_t max_length; + vm_size_t cluster_size; + vm_object_offset_t object_size; + vm_object_offset_t orig_start; + vm_object_offset_t target_start; + vm_object_offset_t offset; + vm_behavior_t behavior; + boolean_t look_behind = TRUE; + boolean_t look_ahead = TRUE; + boolean_t isSSD = FALSE; + uint32_t throttle_limit; + int sequential_run; + int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + vm_size_t max_ph_size; + vm_size_t min_ph_size; assert( !(*length & PAGE_MASK)); assert( !(*start & PAGE_MASK_64)); @@ -6669,10 +6692,10 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, *io_streaming = 0; if (speculative_reads_disabled || fault_info == NULL) { - /* + /* * no cluster... just fault the page in */ - return; + return; } orig_start = *start; target_start = orig_start; @@ -6681,9 +6704,9 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, vm_object_lock(object); - if (object->pager == MEMORY_OBJECT_NULL) - goto out; /* pager is gone for this object, nothing more to do */ - + if (object->pager == MEMORY_OBJECT_NULL) { + goto out; /* pager is gone for this object, nothing more to do */ + } vnode_pager_get_isSSD(object->pager, &isSSD); min_ph_size = round_page(preheat_min_bytes); @@ -6704,142 +6727,151 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, } #endif /* !CONFIG_EMBEDDED */ - if (min_ph_size < PAGE_SIZE) + if (min_ph_size < PAGE_SIZE) { min_ph_size = PAGE_SIZE; + } - if (max_ph_size < PAGE_SIZE) + if (max_ph_size < PAGE_SIZE) { max_ph_size = PAGE_SIZE; - else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) + } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) { max_ph_size = MAX_UPL_TRANSFER_BYTES; + } - if (max_length > max_ph_size) - max_length = max_ph_size; + if (max_length > max_ph_size) { + max_length = max_ph_size; + } - if (max_length <= PAGE_SIZE) + if (max_length <= PAGE_SIZE) { goto out; + } - if (object->internal) - object_size = object->vo_size; - else - vnode_pager_get_object_size(object->pager, &object_size); + if (object->internal) { + object_size = object->vo_size; + } else { + vnode_pager_get_object_size(object->pager, &object_size); + } object_size = round_page_64(object_size); if (orig_start >= object_size) { - /* + /* * fault occurred beyond the EOF... * we need to punt w/o changing the * starting offset */ - goto out; + goto out; } if (object->pages_used > object->pages_created) { - /* + /* * must have wrapped our 32 bit counters * so reset */ - object->pages_used = object->pages_created = 0; + object->pages_used = object->pages_created = 0; } if ((sequential_run = object->sequential)) { - if (sequential_run < 0) { - sequential_behavior = VM_BEHAVIOR_RSEQNTL; - sequential_run = 0 - sequential_run; - } else { - sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; - } - + if (sequential_run < 0) { + sequential_behavior = VM_BEHAVIOR_RSEQNTL; + sequential_run = 0 - sequential_run; + } else { + sequential_behavior = VM_BEHAVIOR_SEQUENTIAL; + } } switch (behavior) { - default: - behavior = VM_BEHAVIOR_DEFAULT; + behavior = VM_BEHAVIOR_DEFAULT; case VM_BEHAVIOR_DEFAULT: - if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) - goto out; + if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) { + goto out; + } if (sequential_run >= (3 * PAGE_SIZE)) { - pre_heat_size = sequential_run + PAGE_SIZE; + pre_heat_size = sequential_run + PAGE_SIZE; - if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) - look_behind = FALSE; - else - look_ahead = FALSE; + if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) { + look_behind = FALSE; + } else { + look_ahead = FALSE; + } *io_streaming = 1; } else { - if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) { - /* + /* * prime the pump */ - pre_heat_size = min_ph_size; + pre_heat_size = min_ph_size; } else { /* * Linear growth in PH size: The maximum size is max_length... - * this cacluation will result in a size that is neither a + * this cacluation will result in a size that is neither a * power of 2 nor a multiple of PAGE_SIZE... so round * it up to the nearest PAGE_SIZE boundary */ pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created; - if (pre_heat_size < min_ph_size) + if (pre_heat_size < min_ph_size) { pre_heat_size = min_ph_size; - else + } else { pre_heat_size = round_page(pre_heat_size); + } } } break; case VM_BEHAVIOR_RANDOM: - if ((pre_heat_size = cluster_size) <= PAGE_SIZE) - goto out; - break; + if ((pre_heat_size = cluster_size) <= PAGE_SIZE) { + goto out; + } + break; case VM_BEHAVIOR_SEQUENTIAL: - if ((pre_heat_size = cluster_size) == 0) - pre_heat_size = sequential_run + PAGE_SIZE; + if ((pre_heat_size = cluster_size) == 0) { + pre_heat_size = sequential_run + PAGE_SIZE; + } look_behind = FALSE; *io_streaming = 1; - break; + break; case VM_BEHAVIOR_RSEQNTL: - if ((pre_heat_size = cluster_size) == 0) - pre_heat_size = sequential_run + PAGE_SIZE; + if ((pre_heat_size = cluster_size) == 0) { + pre_heat_size = sequential_run + PAGE_SIZE; + } look_ahead = FALSE; *io_streaming = 1; - break; - + break; } throttle_limit = (uint32_t) max_length; assert(throttle_limit == max_length); if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) { - if (max_length > throttle_limit) + if (max_length > throttle_limit) { max_length = throttle_limit; + } + } + if (pre_heat_size > max_length) { + pre_heat_size = max_length; } - if (pre_heat_size > max_length) - pre_heat_size = max_length; if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) { - unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count; - + if (consider_free < vm_page_throttle_limit) { pre_heat_size = trunc_page(pre_heat_size / 16); } else if (consider_free < vm_page_free_target) { pre_heat_size = trunc_page(pre_heat_size / 4); } - - if (pre_heat_size < min_ph_size) + + if (pre_heat_size < min_ph_size) { pre_heat_size = min_ph_size; + } } if (look_ahead == TRUE) { - if (look_behind == TRUE) { + if (look_behind == TRUE) { /* - * if we get here its due to a random access... + * if we get here its due to a random access... * so we want to center the original fault address * within the cluster we will issue... make sure * to calculate 'head_size' as a multiple of PAGE_SIZE... @@ -6849,10 +6881,11 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, */ head_size = trunc_page(pre_heat_size / 2); - if (target_start > head_size) + if (target_start > head_size) { target_start -= head_size; - else + } else { target_start = 0; + } /* * 'target_start' at this point represents the beginning offset @@ -6861,47 +6894,50 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, * due to running into the start of the file */ } - if ((target_start + pre_heat_size) > object_size) - pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start)); + if ((target_start + pre_heat_size) > object_size) { + pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start)); + } /* * at this point caclulate the number of pages beyond the original fault * address that we want to consider... this is guaranteed not to extend beyond * the current EOF... */ assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start)); - tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE; + tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE; } else { - if (pre_heat_size > target_start) { + if (pre_heat_size > target_start) { /* * since pre_heat_size is always smaller then 2^32, * if it is larger then target_start (a 64 bit value) * it is safe to clip target_start to 32 bits */ - pre_heat_size = (vm_size_t) target_start; + pre_heat_size = (vm_size_t) target_start; } tail_size = 0; } assert( !(target_start & PAGE_MASK_64)); assert( !(pre_heat_size & PAGE_MASK_64)); - if (pre_heat_size <= PAGE_SIZE) - goto out; + if (pre_heat_size <= PAGE_SIZE) { + goto out; + } if (look_behind == TRUE) { - /* + /* * take a look at the pages before the original * faulting offset... recalculate this in case - * we had to clip 'pre_heat_size' above to keep + * we had to clip 'pre_heat_size' above to keep * from running past the EOF. */ - head_size = pre_heat_size - tail_size - PAGE_SIZE; + head_size = pre_heat_size - tail_size - PAGE_SIZE; - for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) { - /* - * don't poke below the lowest offset + for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) { + /* + * don't poke below the lowest offset */ - if (offset < fault_info->lo_offset) - break; + if (offset < fault_info->lo_offset) { + break; + } /* * for external objects or internal objects w/o a pager, * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN @@ -6910,22 +6946,23 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, break; } if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { - /* + /* * don't bridge resident pages */ - break; + break; } *start = offset; *length += PAGE_SIZE; } } if (look_ahead == TRUE) { - for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) { - /* - * don't poke above the highest offset + for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) { + /* + * don't poke above the highest offset */ - if (offset >= fault_info->hi_offset) - break; + if (offset >= fault_info->hi_offset) { + break; + } assert(offset < object_size); /* @@ -6936,20 +6973,21 @@ vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start, break; } if (vm_page_lookup(object, offset) != VM_PAGE_NULL) { - /* + /* * don't bridge resident pages */ - break; + break; } *length += PAGE_SIZE; } } out: - if (*length > max_length) + if (*length > max_length) { *length = max_length; + } vm_object_unlock(object); - + DTRACE_VM1(clustersize, vm_size_t, *length); } @@ -6961,21 +6999,21 @@ out: kern_return_t vm_object_page_op( - vm_object_t object, - vm_object_offset_t offset, - int ops, - ppnum_t *phys_entry, - int *flags) + vm_object_t object, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags) { - vm_page_t dst_page; + vm_page_t dst_page; vm_object_lock(object); - if(ops & UPL_POP_PHYSICAL) { - if(object->phys_contiguous) { + if (ops & UPL_POP_PHYSICAL) { + if (object->phys_contiguous) { if (phys_entry) { *phys_entry = (ppnum_t) - (object->vo_shadow_offset >> PAGE_SHIFT); + (object->vo_shadow_offset >> PAGE_SHIFT); } vm_object_unlock(object); return KERN_SUCCESS; @@ -6984,21 +7022,21 @@ vm_object_page_op( return KERN_INVALID_OBJECT; } } - if(object->phys_contiguous) { + if (object->phys_contiguous) { vm_object_unlock(object); return KERN_INVALID_OBJECT; } - while(TRUE) { - if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) { + while (TRUE) { + if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) { vm_object_unlock(object); return KERN_FAILURE; } /* Sync up on getting the busy bit */ - if((dst_page->vmp_busy || dst_page->vmp_cleaning) && - (((ops & UPL_POP_SET) && - (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { + if ((dst_page->vmp_busy || dst_page->vmp_cleaning) && + (((ops & UPL_POP_SET) && + (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { /* someone else is playing with the page, we will */ /* have to wait */ PAGE_SLEEP(object, dst_page, THREAD_UNINT); @@ -7006,29 +7044,40 @@ vm_object_page_op( } if (ops & UPL_POP_DUMP) { - if (dst_page->vmp_pmapped == TRUE) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); + if (dst_page->vmp_pmapped == TRUE) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); + } VM_PAGE_FREE(dst_page); break; } if (flags) { - *flags = 0; + *flags = 0; /* Get the condition of flags before requested ops */ /* are undertaken */ - if(dst_page->vmp_dirty) *flags |= UPL_POP_DIRTY; - if(dst_page->vmp_free_when_done) *flags |= UPL_POP_PAGEOUT; - if(dst_page->vmp_precious) *flags |= UPL_POP_PRECIOUS; - if(dst_page->vmp_absent) *flags |= UPL_POP_ABSENT; - if(dst_page->vmp_busy) *flags |= UPL_POP_BUSY; + if (dst_page->vmp_dirty) { + *flags |= UPL_POP_DIRTY; + } + if (dst_page->vmp_free_when_done) { + *flags |= UPL_POP_PAGEOUT; + } + if (dst_page->vmp_precious) { + *flags |= UPL_POP_PRECIOUS; + } + if (dst_page->vmp_absent) { + *flags |= UPL_POP_ABSENT; + } + if (dst_page->vmp_busy) { + *flags |= UPL_POP_BUSY; + } } /* The caller should have made a call either contingent with */ /* or prior to this call to set UPL_POP_BUSY */ - if(ops & UPL_POP_SET) { + if (ops & UPL_POP_SET) { /* The protection granted with this assert will */ /* not be complete. If the caller violates the */ /* convention and attempts to change page state */ @@ -7040,20 +7089,36 @@ vm_object_page_op( if (ops & UPL_POP_DIRTY) { SET_PAGE_DIRTY(dst_page, FALSE); } - if (ops & UPL_POP_PAGEOUT) dst_page->vmp_free_when_done = TRUE; - if (ops & UPL_POP_PRECIOUS) dst_page->vmp_precious = TRUE; - if (ops & UPL_POP_ABSENT) dst_page->vmp_absent = TRUE; - if (ops & UPL_POP_BUSY) dst_page->vmp_busy = TRUE; + if (ops & UPL_POP_PAGEOUT) { + dst_page->vmp_free_when_done = TRUE; + } + if (ops & UPL_POP_PRECIOUS) { + dst_page->vmp_precious = TRUE; + } + if (ops & UPL_POP_ABSENT) { + dst_page->vmp_absent = TRUE; + } + if (ops & UPL_POP_BUSY) { + dst_page->vmp_busy = TRUE; + } } - if(ops & UPL_POP_CLR) { + if (ops & UPL_POP_CLR) { assert(dst_page->vmp_busy); - if (ops & UPL_POP_DIRTY) dst_page->vmp_dirty = FALSE; - if (ops & UPL_POP_PAGEOUT) dst_page->vmp_free_when_done = FALSE; - if (ops & UPL_POP_PRECIOUS) dst_page->vmp_precious = FALSE; - if (ops & UPL_POP_ABSENT) dst_page->vmp_absent = FALSE; + if (ops & UPL_POP_DIRTY) { + dst_page->vmp_dirty = FALSE; + } + if (ops & UPL_POP_PAGEOUT) { + dst_page->vmp_free_when_done = FALSE; + } + if (ops & UPL_POP_PRECIOUS) { + dst_page->vmp_precious = FALSE; + } + if (ops & UPL_POP_ABSENT) { + dst_page->vmp_absent = FALSE; + } if (ops & UPL_POP_BUSY) { - dst_page->vmp_busy = FALSE; + dst_page->vmp_busy = FALSE; PAGE_WAKEUP(dst_page); } } @@ -7071,40 +7136,39 @@ vm_object_page_op( vm_object_unlock(object); return KERN_SUCCESS; - } /* - * vm_object_range_op offers performance enhancement over - * vm_object_page_op for page_op functions which do not require page - * level state to be returned from the call. Page_op was created to provide - * a low-cost alternative to page manipulation via UPLs when only a single - * page was involved. The range_op call establishes the ability in the _op + * vm_object_range_op offers performance enhancement over + * vm_object_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op * family of functions to work on multiple pages where the lack of page level * state handling allows the caller to avoid the overhead of the upl structures. */ kern_return_t vm_object_range_op( - vm_object_t object, - vm_object_offset_t offset_beg, - vm_object_offset_t offset_end, + vm_object_t object, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, int ops, - uint32_t *range) + uint32_t *range) { - vm_object_offset_t offset; - vm_page_t dst_page; + vm_object_offset_t offset; + vm_page_t dst_page; if (offset_end - offset_beg > (uint32_t) -1) { /* range is too big and would overflow "*range" */ return KERN_INVALID_ARGUMENT; - } + } if (object->resident_page_count == 0) { - if (range) { - if (ops & UPL_ROP_PRESENT) { - *range = 0; + if (range) { + if (ops & UPL_ROP_PRESENT) { + *range = 0; } else { - *range = (uint32_t) (offset_end - offset_beg); + *range = (uint32_t) (offset_end - offset_beg); assert(*range == (offset_end - offset_beg)); } } @@ -7114,9 +7178,9 @@ vm_object_range_op( if (object->phys_contiguous) { vm_object_unlock(object); - return KERN_INVALID_OBJECT; + return KERN_INVALID_OBJECT; } - + offset = offset_beg & ~PAGE_MASK_64; while (offset < offset_end) { @@ -7125,10 +7189,10 @@ vm_object_range_op( if (ops & UPL_ROP_DUMP) { if (dst_page->vmp_busy || dst_page->vmp_cleaning) { /* - * someone else is playing with the + * someone else is playing with the * page, we will have to wait */ - PAGE_SLEEP(object, dst_page, THREAD_UNINT); + PAGE_SLEEP(object, dst_page, THREAD_UNINT); /* * need to relook the page up since it's * state may have changed while we slept @@ -7137,29 +7201,32 @@ vm_object_range_op( */ continue; } - if (dst_page->vmp_laundry) + if (dst_page->vmp_laundry) { vm_pageout_steal_laundry(dst_page, FALSE); + } - if (dst_page->vmp_pmapped == TRUE) - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); + if (dst_page->vmp_pmapped == TRUE) { + pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page)); + } VM_PAGE_FREE(dst_page); - } else if ((ops & UPL_ROP_ABSENT) - && (!dst_page->vmp_absent || dst_page->vmp_busy)) { + && (!dst_page->vmp_absent || dst_page->vmp_busy)) { break; } - } else if (ops & UPL_ROP_PRESENT) - break; + } else if (ops & UPL_ROP_PRESENT) { + break; + } offset += PAGE_SIZE; } vm_object_unlock(object); if (range) { - if (offset > offset_end) - offset = offset_end; - if(offset > offset_beg) { + if (offset > offset_end) { + offset = offset_end; + } + if (offset > offset_beg) { *range = (uint32_t) (offset - offset_beg); assert(*range == (offset - offset_beg)); } else { @@ -7174,11 +7241,12 @@ vm_object_range_op( * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently * expect that the virtual address will denote the start of a range that is physically contiguous. */ -kern_return_t pager_map_to_phys_contiguous( - memory_object_control_t object, - memory_object_offset_t offset, - addr64_t base_vaddr, - vm_size_t size) +kern_return_t +pager_map_to_phys_contiguous( + memory_object_control_t object, + memory_object_offset_t offset, + addr64_t base_vaddr, + vm_size_t size) { ppnum_t page_num; boolean_t clobbered_private; @@ -7224,11 +7292,12 @@ uint32_t scan_object_collision = 0; void vm_object_lock(vm_object_t object) { - if (object == vm_pageout_scan_wants_object) { - scan_object_collision++; - mutex_pause(2); + if (object == vm_pageout_scan_wants_object) { + scan_object_collision++; + mutex_pause(2); } - lck_rw_lock_exclusive(&object->Lock); + DTRACE_VM(vm_object_lock_w); + lck_rw_lock_exclusive(&object->Lock); #if DEVELOPMENT || DEBUG object->Lock_owner = current_thread(); #endif @@ -7237,8 +7306,8 @@ vm_object_lock(vm_object_t object) boolean_t vm_object_lock_avoid(vm_object_t object) { - if (object == vm_pageout_scan_wants_object) { - scan_object_collision++; + if (object == vm_pageout_scan_wants_object) { + scan_object_collision++; return TRUE; } return FALSE; @@ -7247,14 +7316,16 @@ vm_object_lock_avoid(vm_object_t object) boolean_t _vm_object_lock_try(vm_object_t object) { - boolean_t retval; + boolean_t retval; retval = lck_rw_try_lock_exclusive(&object->Lock); #if DEVELOPMENT || DEBUG - if (retval == TRUE) + if (retval == TRUE) { + DTRACE_VM(vm_object_lock_w); object->Lock_owner = current_thread(); + } #endif - return (retval); + return retval; } boolean_t @@ -7263,7 +7334,7 @@ vm_object_lock_try(vm_object_t object) /* * Called from hibernate path so check before blocking. */ - if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level()==0) { + if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) { mutex_pause(2); } return _vm_object_lock_try(object); @@ -7272,9 +7343,10 @@ vm_object_lock_try(vm_object_t object) void vm_object_lock_shared(vm_object_t object) { - if (vm_object_lock_avoid(object)) { - mutex_pause(2); + if (vm_object_lock_avoid(object)) { + mutex_pause(2); } + DTRACE_VM(vm_object_lock_r); lck_rw_lock_shared(&object->Lock); } @@ -7288,29 +7360,41 @@ vm_object_lock_yield_shared(vm_object_t object) force_yield = vm_object_lock_avoid(object); retval = lck_rw_lock_yield_shared(&object->Lock, force_yield); + if (retval) { + DTRACE_VM(vm_object_lock_yield); + } - return (retval); + return retval; } boolean_t vm_object_lock_try_shared(vm_object_t object) { - if (vm_object_lock_avoid(object)) { - mutex_pause(2); + boolean_t retval; + + if (vm_object_lock_avoid(object)) { + mutex_pause(2); + } + retval = lck_rw_try_lock_shared(&object->Lock); + if (retval) { + DTRACE_VM(vm_object_lock_r); } - return (lck_rw_try_lock_shared(&object->Lock)); + return retval; } boolean_t vm_object_lock_upgrade(vm_object_t object) -{ boolean_t retval; +{ + boolean_t retval; retval = lck_rw_lock_shared_to_exclusive(&object->Lock); #if DEVELOPMENT || DEBUG - if (retval == TRUE) + if (retval == TRUE) { + DTRACE_VM(vm_object_lock_w); object->Lock_owner = current_thread(); + } #endif - return (retval); + return retval; } void @@ -7318,9 +7402,11 @@ vm_object_unlock(vm_object_t object) { #if DEVELOPMENT || DEBUG if (object->Lock_owner) { - if (object->Lock_owner != current_thread()) + if (object->Lock_owner != current_thread()) { panic("vm_object_unlock: not owner - %p\n", object); + } object->Lock_owner = 0; + DTRACE_VM(vm_object_unlock); } #endif lck_rw_done(&object->Lock); @@ -7341,15 +7427,16 @@ vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode) vm_object_paging_wait(object, THREAD_UNINT); - vm_page_queue_iterate(&object->memq, p, vm_page_t, vmp_listq) { - - if (!p->vmp_fictitious) + vm_page_queue_iterate(&object->memq, p, vmp_listq) { + if (!p->vmp_fictitious) { pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode); + } } - if (wimg_mode == VM_WIMG_USE_DEFAULT) + if (wimg_mode == VM_WIMG_USE_DEFAULT) { object->set_cache_attr = FALSE; - else + } else { object->set_cache_attr = TRUE; + } object->wimg_bits = wimg_mode; @@ -7371,9 +7458,9 @@ extern char *freezer_compressor_scratch_buf; extern int c_freezer_compression_count; extern AbsoluteTime c_freezer_last_yield_ts; -#define MAX_FREE_BATCH 32 -#define FREEZER_DUTY_CYCLE_ON_MS 5 -#define FREEZER_DUTY_CYCLE_OFF_MS 5 +#define MAX_FREE_BATCH 32 +#define FREEZER_DUTY_CYCLE_ON_MS 5 +#define FREEZER_DUTY_CYCLE_OFF_MS 5 static int c_freezer_should_yield(void); @@ -7381,8 +7468,8 @@ static int c_freezer_should_yield(void); static int c_freezer_should_yield() { - AbsoluteTime cur_time; - uint64_t nsecs; + AbsoluteTime cur_time; + uint64_t nsecs; assert(c_freezer_last_yield_ts); clock_get_uptime(&cur_time); @@ -7390,9 +7477,10 @@ c_freezer_should_yield() SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts); absolutetime_to_nanoseconds(cur_time, &nsecs); - if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) - return (1); - return (0); + if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) { + return 1; + } + return 0; } @@ -7407,35 +7495,34 @@ void vm_object_compressed_freezer_pageout( vm_object_t object) { - vm_page_t p; - vm_page_t local_freeq = NULL; - int local_freed = 0; - kern_return_t retval = KERN_SUCCESS; - int obj_resident_page_count_snapshot = 0; + vm_page_t p; + vm_page_t local_freeq = NULL; + int local_freed = 0; + kern_return_t retval = KERN_SUCCESS; + int obj_resident_page_count_snapshot = 0; assert(object != VM_OBJECT_NULL); assert(object->internal); vm_object_lock(object); - if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { - + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { if (!object->pager_initialized) { - vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); - if (!object->pager_initialized) + if (!object->pager_initialized) { vm_object_compressor_pager_create(object); + } } - if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { vm_object_unlock(object); return; } } - + if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { - vm_object_offset_t curr_offset = 0; + vm_object_offset_t curr_offset = 0; /* * Go through the object and make sure that any @@ -7443,16 +7530,17 @@ vm_object_compressed_freezer_pageout( * a compressed segment associated with our "freezer_chead". */ while (curr_offset < object->vo_size) { - curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset); - - if (curr_offset == (vm_object_offset_t) -1) + + if (curr_offset == (vm_object_offset_t) -1) { break; + } retval = vm_compressor_pager_relocate(object->pager, curr_offset, &freezer_chead); - if (retval != KERN_SUCCESS) + if (retval != KERN_SUCCESS) { break; + } curr_offset += PAGE_SIZE_64; } @@ -7476,7 +7564,6 @@ vm_object_compressed_freezer_pageout( vm_object_activity_begin(object); while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq)) { - p = (vm_page_t)vm_page_queue_first(&object->memq); KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0); @@ -7484,13 +7571,12 @@ vm_object_compressed_freezer_pageout( vm_page_lockspin_queues(); if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || p->vmp_error || VM_PAGE_WIRED(p)) { - vm_page_unlock_queues(); KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0); - vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); - vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); + vm_page_queue_remove(&object->memq, p, vmp_listq); + vm_page_queue_enter(&object->memq, p, vmp_listq); continue; } @@ -7509,7 +7595,7 @@ vm_object_compressed_freezer_pageout( SET_PAGE_DIRTY(p, FALSE); } } - + if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) { /* * Clean and non-precious page. @@ -7521,8 +7607,9 @@ vm_object_compressed_freezer_pageout( continue; } - if (p->vmp_laundry) + if (p->vmp_laundry) { vm_pageout_steal_laundry(p, TRUE); + } vm_page_queues_remove(p, TRUE); @@ -7535,8 +7622,8 @@ vm_object_compressed_freezer_pageout( * Make the move here while we have the object lock held. */ - vm_page_queue_remove(&object->memq, p, vm_page_t, vmp_listq); - vm_page_queue_enter(&object->memq, p, vm_page_t, vmp_listq); + vm_page_queue_remove(&object->memq, p, vmp_listq); + vm_page_queue_enter(&object->memq, p, vmp_listq); /* * Grab an activity_in_progress here for vm_pageout_compress_page() to consume. @@ -7558,11 +7645,10 @@ vm_object_compressed_freezer_pageout( local_freed++; if (local_freed >= MAX_FREE_BATCH) { - - OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); + OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); vm_page_free_list(local_freeq, TRUE); - + local_freeq = NULL; local_freed = 0; } @@ -7571,7 +7657,6 @@ vm_object_compressed_freezer_pageout( KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0); if (local_freed == 0 && c_freezer_should_yield()) { - thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); clock_get_uptime(&c_freezer_last_yield_ts); } @@ -7580,20 +7665,19 @@ vm_object_compressed_freezer_pageout( } if (local_freeq) { - OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); + OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); vm_page_free_list(local_freeq, TRUE); - + local_freeq = NULL; local_freed = 0; } - + vm_object_activity_end(object); vm_object_unlock(object); if (c_freezer_should_yield()) { - thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS); clock_get_uptime(&c_freezer_last_yield_ts); } @@ -7606,16 +7690,17 @@ void vm_object_pageout( vm_object_t object) { - vm_page_t p, next; - struct vm_pageout_queue *iq; + vm_page_t p, next; + struct vm_pageout_queue *iq; - if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) + if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) { return; + } iq = &vm_pageout_queue_internal; - + assert(object != VM_OBJECT_NULL ); - + vm_object_lock(object); if (!object->internal || @@ -7625,31 +7710,30 @@ vm_object_pageout( return; } - if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { - + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { if (!object->pager_initialized) { - vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); - if (!object->pager_initialized) + if (!object->pager_initialized) { vm_object_compressor_pager_create(object); + } } - if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { + if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) { vm_object_unlock(object); return; } } - -ReScan: + +ReScan: next = (vm_page_t)vm_page_queue_first(&object->memq); while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) { p = next; next = (vm_page_t)vm_page_queue_next(&next->vmp_listq); - + assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q); - + if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) || p->vmp_cleaning || p->vmp_laundry || @@ -7664,7 +7748,7 @@ ReScan: continue; } if (vm_compressor_low_on_space()) { - break; + break; } /* Throw to the pageout queue */ @@ -7672,14 +7756,13 @@ ReScan: vm_page_lockspin_queues(); if (VM_PAGE_Q_THROTTLED(iq)) { - iq->pgo_draining = TRUE; - + assert_wait((event_t) (&iq->pgo_laundry + 1), - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); vm_page_unlock_queues(); vm_object_unlock(object); - + thread_block(THREAD_CONTINUE_NULL); vm_object_lock(object); @@ -7703,7 +7786,7 @@ ReScan: * for as "compressed" if it's been modified. */ pmap_options = - PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; + PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; if (p->vmp_dirty || p->vmp_precious) { /* * We already know it's been modified, @@ -7713,8 +7796,8 @@ ReScan: pmap_options = PMAP_OPTIONS_COMPRESSOR; } refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), - pmap_options, - NULL); + pmap_options, + NULL); if (refmod_state & VM_MEM_MODIFIED) { SET_PAGE_DIRTY(p, FALSE); } @@ -7728,7 +7811,7 @@ ReScan: vm_page_queues_remove(p, TRUE); vm_pageout_cluster(p); - + vm_page_unlock_queues(); } vm_object_unlock(object); @@ -7739,20 +7822,22 @@ ReScan: void vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio) { - io_reprioritize_req_t req; - struct vnode *devvp = NULL; + io_reprioritize_req_t req; + struct vnode *devvp = NULL; - if(vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) + if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { return; - + } + /* * Create the request for I/O reprioritization. * We use the noblock variant of zalloc because we're holding the object * lock here and we could cause a deadlock in low memory conditions. */ req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone); - if (req == NULL) + if (req == NULL) { return; + } req->blkno = blkno; req->len = len; req->priority = prio; @@ -7764,38 +7849,40 @@ vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int pr IO_REPRIORITIZE_LIST_UNLOCK(); /* Wakeup reprioritize thread */ - IO_REPRIO_THREAD_WAKEUP(); + IO_REPRIO_THREAD_WAKEUP(); - return; -} + return; +} void vm_decmp_upl_reprioritize(upl_t upl, int prio) { int offset; vm_object_t object; - io_reprioritize_req_t req; + io_reprioritize_req_t req; struct vnode *devvp = NULL; - uint64_t blkno; - uint32_t len; - upl_t io_upl; - uint64_t *io_upl_reprio_info; - int io_upl_size; + uint64_t blkno; + uint32_t len; + upl_t io_upl; + uint64_t *io_upl_reprio_info; + int io_upl_size; - if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) + if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) { return; + } - /* - * We dont want to perform any allocations with the upl lock held since that might - * result in a deadlock. If the system is low on memory, the pageout thread would + /* + * We dont want to perform any allocations with the upl lock held since that might + * result in a deadlock. If the system is low on memory, the pageout thread would * try to pageout stuff and might wait on this lock. If we are waiting for the memory to * be freed up by the pageout thread, it would be a deadlock. */ /* First step is just to get the size of the upl to find out how big the reprio info is */ - if(!upl_try_lock(upl)) + if (!upl_try_lock(upl)) { return; + } if (upl->decmp_io_upl == NULL) { /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ @@ -7807,15 +7894,17 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0); io_upl_size = io_upl->size; upl_unlock(upl); - + /* Now perform the allocation */ io_upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); - if (io_upl_reprio_info == NULL) + if (io_upl_reprio_info == NULL) { return; + } /* Now again take the lock, recheck the state and grab out the required info */ - if(!upl_try_lock(upl)) + if (!upl_try_lock(upl)) { goto out; + } if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) { /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */ @@ -7832,8 +7921,8 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) } /* Get the dev vnode ptr for this object */ - if(!object || !object->pager || - vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { + if (!object || !object->pager || + vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) { upl_unlock(upl); goto out; } @@ -7844,29 +7933,29 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) offset = 0; while (offset < io_upl_size) { - blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK; - len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; + blkno = io_upl_reprio_info[(offset / PAGE_SIZE)] & UPL_REPRIO_INFO_MASK; + len = (io_upl_reprio_info[(offset / PAGE_SIZE)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK; /* - * This implementation may cause some spurious expedites due to the - * fact that we dont cleanup the blkno & len from the upl_reprio_info - * even after the I/O is complete. + * This implementation may cause some spurious expedites due to the + * fact that we dont cleanup the blkno & len from the upl_reprio_info + * even after the I/O is complete. */ - + if (blkno != 0 && len != 0) { /* Create the request for I/O reprioritization */ - req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone); - assert(req != NULL); - req->blkno = blkno; - req->len = len; - req->priority = prio; - req->devvp = devvp; - - /* Insert request into the reprioritization list */ - IO_REPRIORITIZE_LIST_LOCK(); - queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); - IO_REPRIORITIZE_LIST_UNLOCK(); - + req = (io_reprioritize_req_t)zalloc(io_reprioritize_req_zone); + assert(req != NULL); + req->blkno = blkno; + req->len = len; + req->priority = prio; + req->devvp = devvp; + + /* Insert request into the reprioritization list */ + IO_REPRIORITIZE_LIST_LOCK(); + queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); + IO_REPRIORITIZE_LIST_UNLOCK(); + offset += len; } else { offset += PAGE_SIZE; @@ -7874,7 +7963,7 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio) } /* Wakeup reprioritize thread */ - IO_REPRIO_THREAD_WAKEUP(); + IO_REPRIO_THREAD_WAKEUP(); out: kfree(io_upl_reprio_info, sizeof(uint64_t) * (io_upl_size / PAGE_SIZE)); @@ -7885,48 +7974,51 @@ void vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) { upl_t upl; - upl_page_info_t *pl; - unsigned int i, num_pages; - int cur_tier; + upl_page_info_t *pl; + unsigned int i, num_pages; + int cur_tier; cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO); - /* - Scan through all UPLs associated with the object to find the - UPL containing the contended page. - */ + /* + * Scan through all UPLs associated with the object to find the + * UPL containing the contended page. + */ queue_iterate(&o->uplq, upl, upl_t, uplq) { - if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) + if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) { continue; + } pl = UPL_GET_INTERNAL_PAGE_LIST(upl); - num_pages = (upl->size / PAGE_SIZE); - + num_pages = (upl->size / PAGE_SIZE); + /* - For each page in the UPL page list, see if it matches the contended - page and was issued as a low prio I/O. - */ - for(i=0; i < num_pages; i++) { - if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) { + * For each page in the UPL page list, see if it matches the contended + * page and was issued as a low prio I/O. + */ + for (i = 0; i < num_pages; i++) { + if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) { if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) { KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), - VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0); + VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0); vm_decmp_upl_reprioritize(upl, cur_tier); break; } KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), - upl->upl_reprio_info[i], upl->upl_priority, 0); - if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) + upl->upl_reprio_info[i], upl->upl_priority, 0); + if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) { vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier); - break; - } - } - /* Check if we found any hits */ - if (i != num_pages) + } + break; + } + } + /* Check if we found any hits */ + if (i != num_pages) { break; + } } - + return; -} +} wait_result_t vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) @@ -7934,12 +8026,12 @@ vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) wait_result_t ret; KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); - + if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) { - /* - Indicates page is busy due to an I/O. Issue a reprioritize request if necessary. - */ - vm_page_handle_prio_inversion(o,m); + /* + * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary. + */ + vm_page_handle_prio_inversion(o, m); } m->vmp_wanted = TRUE; ret = thread_sleep_vm_object(o, m, interruptible); @@ -7951,22 +8043,21 @@ static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused) { io_reprioritize_req_t req = NULL; - - while(1) { + while (1) { IO_REPRIORITIZE_LIST_LOCK(); if (queue_empty(&io_reprioritize_list)) { IO_REPRIORITIZE_LIST_UNLOCK(); break; } - - queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); + + queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list); IO_REPRIORITIZE_LIST_UNLOCK(); - + vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority); - zfree(io_reprioritize_req_zone, req); - } - + zfree(io_reprioritize_req_zone, req); + } + IO_REPRIO_THREAD_CONTINUATION(); } #endif @@ -7974,12 +8065,12 @@ io_reprioritize_thread(void *param __unused, wait_result_t wr __unused) #if VM_OBJECT_ACCESS_TRACKING void vm_object_access_tracking( - vm_object_t object, - int *access_tracking_p, - uint32_t *access_tracking_reads_p, - uint32_t *access_tracking_writes_p) + vm_object_t object, + int *access_tracking_p, + uint32_t *access_tracking_reads_p, + uint32_t *access_tracking_writes_p) { - int access_tracking; + int access_tracking; access_tracking = !!*access_tracking_p; @@ -7998,24 +8089,24 @@ vm_object_access_tracking( if (access_tracking) { vm_object_pmap_protect_options(object, - 0, - object->vo_size, - PMAP_NULL, - 0, - VM_PROT_NONE, - 0); + 0, + object->vo_size, + PMAP_NULL, + 0, + VM_PROT_NONE, + 0); } } #endif /* VM_OBJECT_ACCESS_TRACKING */ void vm_object_ledger_tag_ledgers( - vm_object_t object, - int *ledger_idx_volatile, - int *ledger_idx_nonvolatile, - int *ledger_idx_volatile_compressed, - int *ledger_idx_nonvolatile_compressed, - boolean_t *do_footprint) + vm_object_t object, + int *ledger_idx_volatile, + int *ledger_idx_nonvolatile, + int *ledger_idx_volatile_compressed, + int *ledger_idx_nonvolatile_compressed, + boolean_t *do_footprint) { assert(object->shadow == VM_OBJECT_NULL); @@ -8039,28 +8130,28 @@ vm_object_ledger_tag_ledgers( case VM_OBJECT_LEDGER_TAG_MEDIA: default: panic("%s: object %p has unsupported ledger_tag %d\n", - __FUNCTION__, object, object->vo_ledger_tag); + __FUNCTION__, object, object->vo_ledger_tag); } } kern_return_t vm_object_ownership_change( - vm_object_t object, - int new_ledger_tag, - task_t new_owner, - boolean_t task_objq_locked) + vm_object_t object, + int new_ledger_tag, + task_t new_owner, + boolean_t task_objq_locked) { - int old_ledger_tag; - task_t old_owner; - int resident_count, wired_count; - unsigned int compressed_count; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - int ledger_idx; - int ledger_idx_compressed; - boolean_t do_footprint; + int old_ledger_tag; + task_t old_owner; + int resident_count, wired_count; + unsigned int compressed_count; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + int ledger_idx; + int ledger_idx_compressed; + boolean_t do_footprint; vm_object_lock_assert_exclusive(object); assert(object->internal); @@ -8076,19 +8167,19 @@ vm_object_ownership_change( * Deal with the old owner and/or ledger tag, if needed. */ if (old_owner != TASK_NULL && - ((old_owner != new_owner) /* new owner ... */ - || /* ... or ... */ - (old_ledger_tag && /* ... new ledger */ - old_ledger_tag != new_ledger_tag))) { + ((old_owner != new_owner) /* new owner ... */ + || /* ... or ... */ + (old_ledger_tag && /* ... new ledger */ + old_ledger_tag != new_ledger_tag))) { /* * Take this object off of the old owner's ledgers. */ vm_object_ledger_tag_ledgers(object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); if (object->purgable == VM_PURGABLE_VOLATILE || object->purgable == VM_PURGABLE_EMPTY) { ledger_idx = ledger_idx_volatile; @@ -8103,26 +8194,26 @@ vm_object_ownership_change( * number of resident pages. */ ledger_debit(old_owner->ledger, - ledger_idx, - ptoa_64(resident_count)); + ledger_idx, + ptoa_64(resident_count)); /* adjust old owner's footprint */ if (do_footprint && object->purgable != VM_PURGABLE_VOLATILE && object->purgable != VM_PURGABLE_EMPTY) { ledger_debit(old_owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(resident_count)); + task_ledgers.phys_footprint, + ptoa_64(resident_count)); } } if (wired_count) { /* wired pages are always nonvolatile */ ledger_debit(old_owner->ledger, - ledger_idx_nonvolatile, - ptoa_64(wired_count)); + ledger_idx_nonvolatile, + ptoa_64(wired_count)); if (do_footprint) { ledger_debit(old_owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(wired_count)); + task_ledgers.phys_footprint, + ptoa_64(wired_count)); } } if (compressed_count) { @@ -8131,35 +8222,35 @@ vm_object_ownership_change( * by the number of compressed pages. */ ledger_debit(old_owner->ledger, - ledger_idx_compressed, - ptoa_64(compressed_count)); + ledger_idx_compressed, + ptoa_64(compressed_count)); if (do_footprint && object->purgable != VM_PURGABLE_VOLATILE && object->purgable != VM_PURGABLE_EMPTY) { ledger_debit(old_owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(compressed_count)); + task_ledgers.phys_footprint, + ptoa_64(compressed_count)); } } if (old_owner != new_owner) { /* remove object from old_owner's list of owned objects */ DTRACE_VM2(object_owner_remove, - vm_object_t, object, - task_t, new_owner); + vm_object_t, object, + task_t, new_owner); if (!task_objq_locked) { task_objq_lock(old_owner); } queue_remove(&old_owner->task_objq, object, - vm_object_t, task_objq); + vm_object_t, task_objq); switch (object->purgable) { case VM_PURGABLE_NONVOLATILE: case VM_PURGABLE_EMPTY: vm_purgeable_nonvolatile_owner_update(old_owner, - -1); + -1); break; case VM_PURGABLE_VOLATILE: vm_purgeable_volatile_owner_update(old_owner, - -1); + -1); break; default: break; @@ -8185,19 +8276,19 @@ vm_object_ownership_change( * Deal with the new owner and/or ledger tag, if needed. */ if (new_owner != TASK_NULL && - ((new_owner != old_owner) /* new owner ... */ - || /* ... or ... */ - (new_ledger_tag && /* ... new ledger */ - new_ledger_tag != old_ledger_tag))) { + ((new_owner != old_owner) /* new owner ... */ + || /* ... or ... */ + (new_ledger_tag && /* ... new ledger */ + new_ledger_tag != old_ledger_tag))) { /* * Add this object to the new owner's ledgers. */ vm_object_ledger_tag_ledgers(object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); if (object->purgable == VM_PURGABLE_VOLATILE || object->purgable == VM_PURGABLE_EMPTY) { ledger_idx = ledger_idx_volatile; @@ -8212,26 +8303,26 @@ vm_object_ownership_change( * number of resident pages. */ ledger_credit(new_owner->ledger, - ledger_idx, - ptoa_64(resident_count)); + ledger_idx, + ptoa_64(resident_count)); /* adjust new owner's footprint */ if (do_footprint && object->purgable != VM_PURGABLE_VOLATILE && object->purgable != VM_PURGABLE_EMPTY) { ledger_credit(new_owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(resident_count)); + task_ledgers.phys_footprint, + ptoa_64(resident_count)); } } if (wired_count) { /* wired pages are always nonvolatile */ ledger_credit(new_owner->ledger, - ledger_idx_nonvolatile, - ptoa_64(wired_count)); + ledger_idx_nonvolatile, + ptoa_64(wired_count)); if (do_footprint) { ledger_credit(new_owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(wired_count)); + task_ledgers.phys_footprint, + ptoa_64(wired_count)); } } if (compressed_count) { @@ -8240,33 +8331,33 @@ vm_object_ownership_change( * compressed pages. */ ledger_credit(new_owner->ledger, - ledger_idx_compressed, - ptoa_64(compressed_count)); + ledger_idx_compressed, + ptoa_64(compressed_count)); if (do_footprint && object->purgable != VM_PURGABLE_VOLATILE && object->purgable != VM_PURGABLE_EMPTY) { ledger_credit(new_owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(compressed_count)); + task_ledgers.phys_footprint, + ptoa_64(compressed_count)); } } if (new_owner != old_owner) { /* add object to new_owner's list of owned objects */ DTRACE_VM2(object_owner_add, - vm_object_t, object, - task_t, new_owner); + vm_object_t, object, + task_t, new_owner); task_objq_lock(new_owner); queue_enter(&new_owner->task_objq, object, - vm_object_t, task_objq); + vm_object_t, task_objq); switch (object->purgable) { case VM_PURGABLE_NONVOLATILE: case VM_PURGABLE_EMPTY: vm_purgeable_nonvolatile_owner_update(new_owner, - +1); + +1); break; case VM_PURGABLE_VOLATILE: vm_purgeable_volatile_owner_update(new_owner, - +1); + +1); break; default: break; diff --git a/osfmk/vm/vm_object.h b/osfmk/vm/vm_object.h index ccd1de547..3fef567bb 100644 --- a/osfmk/vm/vm_object.h +++ b/osfmk/vm/vm_object.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Virtual memory object module definitions. */ -#ifndef _VM_VM_OBJECT_H_ +#ifndef _VM_VM_OBJECT_H_ #define _VM_VM_OBJECT_H_ #include @@ -97,11 +97,11 @@ extern void vm_object_tracking_init(void); extern boolean_t vm_object_tracking_inited; extern btlog_t *vm_object_tracking_btlog; -#define VM_OBJECT_TRACKING_NUM_RECORDS 50000 +#define VM_OBJECT_TRACKING_NUM_RECORDS 50000 #define VM_OBJECT_TRACKING_BTDEPTH 7 -#define VM_OBJECT_TRACKING_OP_CREATED 1 -#define VM_OBJECT_TRACKING_OP_MODIFIED 2 -#define VM_OBJECT_TRACKING_OP_TRUESHARE 3 +#define VM_OBJECT_TRACKING_OP_CREATED 1 +#define VM_OBJECT_TRACKING_OP_MODIFIED 2 +#define VM_OBJECT_TRACKING_OP_TRUESHARE 3 #endif /* VM_OBJECT_TRACKING */ struct vm_page; @@ -114,30 +114,30 @@ struct vm_page; */ struct vm_object_fault_info { - int interruptible; - uint32_t user_tag; - vm_size_t cluster_size; - vm_behavior_t behavior; - vm_map_offset_t lo_offset; - vm_map_offset_t hi_offset; + int interruptible; + uint32_t user_tag; + vm_size_t cluster_size; + vm_behavior_t behavior; + vm_map_offset_t lo_offset; + vm_map_offset_t hi_offset; unsigned int - /* boolean_t */ no_cache:1, - /* boolean_t */ stealth:1, - /* boolean_t */ io_sync:1, + /* boolean_t */ no_cache:1, + /* boolean_t */ stealth:1, + /* boolean_t */ io_sync:1, /* boolean_t */ cs_bypass:1, /* boolean_t */ pmap_cs_associated:1, - /* boolean_t */ mark_zf_absent:1, + /* boolean_t */ mark_zf_absent:1, /* boolean_t */ batch_pmap_op:1, - __vm_object_fault_info_unused_bits:25; - int pmap_options; + __vm_object_fault_info_unused_bits:25; + int pmap_options; }; -#define vo_size vo_un1.vou_size -#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan -#define vo_shadow_offset vo_un2.vou_shadow_offset -#define vo_cache_ts vo_un2.vou_cache_ts -#define vo_owner vo_un2.vou_owner +#define vo_size vo_un1.vou_size +#define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan +#define vo_shadow_offset vo_un2.vou_shadow_offset +#define vo_cache_ts vo_un2.vou_cache_ts +#define vo_owner vo_un2.vou_owner struct vm_object { /* @@ -153,53 +153,53 @@ struct vm_object { * rounding the size of the vm_object element to the nearest * 64 byte size before creating the zone. */ - vm_page_queue_head_t memq; /* Resident memory - must be first */ - lck_rw_t Lock; /* Synchronization */ + vm_page_queue_head_t memq; /* Resident memory - must be first */ + lck_rw_t Lock; /* Synchronization */ #if DEVELOPMENT || DEBUG - thread_t Lock_owner; + thread_t Lock_owner; #endif union { - vm_object_size_t vou_size; /* Object size (only valid if internal) */ - int vou_cache_pages_to_scan; /* pages yet to be visited in an - * external object in cache - */ + vm_object_size_t vou_size; /* Object size (only valid if internal) */ + int vou_cache_pages_to_scan; /* pages yet to be visited in an + * external object in cache + */ } vo_un1; - struct vm_page *memq_hint; - int ref_count; /* Number of references */ - unsigned int resident_page_count; - /* number of resident pages */ - unsigned int wired_page_count; /* number of wired pages - use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */ - unsigned int reusable_page_count; - - struct vm_object *copy; /* Object that should receive - * a copy of my changed pages, - * for copy_delay, or just the - * temporary object that - * shadows this object, for - * copy_call. - */ - struct vm_object *shadow; /* My shadow */ + struct vm_page *memq_hint; + int ref_count; /* Number of references */ + unsigned int resident_page_count; + /* number of resident pages */ + unsigned int wired_page_count; /* number of wired pages + * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */ + unsigned int reusable_page_count; + + struct vm_object *copy; /* Object that should receive + * a copy of my changed pages, + * for copy_delay, or just the + * temporary object that + * shadows this object, for + * copy_call. + */ + struct vm_object *shadow; /* My shadow */ union { - vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ - clock_sec_t vou_cache_ts; /* age of an external object - * present in cache - */ - task_t vou_owner; /* If the object is purgeable - * or has a "ledger_tag", this - * is the task that owns it. - */ + vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ + clock_sec_t vou_cache_ts; /* age of an external object + * present in cache + */ + task_t vou_owner; /* If the object is purgeable + * or has a "ledger_tag", this + * is the task that owns it. + */ } vo_un2; - memory_object_t pager; /* Where to get data */ - vm_object_offset_t paging_offset; /* Offset into memory object */ - memory_object_control_t pager_control; /* Where data comes back */ + memory_object_t pager; /* Where to get data */ + vm_object_offset_t paging_offset; /* Offset into memory object */ + memory_object_control_t pager_control; /* Where data comes back */ memory_object_copy_strategy_t - copy_strategy; /* How to handle data copy */ + copy_strategy; /* How to handle data copy */ #if __LP64__ /* @@ -209,9 +209,9 @@ struct vm_object { * Since we never enforced any limit there, let's give them 32 bits * for backwards compatibility's sake. */ - unsigned int paging_in_progress:16, - __object1_unused_bits:16; - unsigned int activity_in_progress; + unsigned int paging_in_progress:16, + __object1_unused_bits:16; + unsigned int activity_in_progress; #else /* __LP64__ */ /* * On 32-bit platforms, enlarging "activity_in_progress" would increase @@ -219,185 +219,185 @@ struct vm_object { * overflow of these counters on these platforms, let's keep the * counters as 16-bit integers. */ - unsigned short paging_in_progress; - unsigned short activity_in_progress; + unsigned short paging_in_progress; + unsigned short activity_in_progress; #endif /* __LP64__ */ - /* The memory object ports are - * being used (e.g., for pagein - * or pageout) -- don't change - * any of these fields (i.e., - * don't collapse, destroy or - * terminate) - */ + /* The memory object ports are + * being used (e.g., for pagein + * or pageout) -- don't change + * any of these fields (i.e., + * don't collapse, destroy or + * terminate) + */ unsigned int - /* boolean_t array */ all_wanted:11, /* Bit array of "want to be - * awakened" notations. See - * VM_OBJECT_EVENT_* items - * below */ - /* boolean_t */ pager_created:1, /* Has pager been created? */ - /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ - /* boolean_t */ pager_ready:1, /* Will pager take requests? */ - - /* boolean_t */ pager_trusted:1,/* The pager for this object - * is trusted. This is true for - * all internal objects (backed - * by the default pager) - */ - /* boolean_t */ can_persist:1, /* The kernel may keep the data - * for this object (and rights - * to the memory object) after - * all address map references - * are deallocated? - */ - /* boolean_t */ internal:1, /* Created by the kernel (and - * therefore, managed by the - * default memory manger) - */ - /* boolean_t */ private:1, /* magic device_pager object, - * holds private pages only */ - /* boolean_t */ pageout:1, /* pageout object. contains - * private pages that refer to - * a real memory object. */ - /* boolean_t */ alive:1, /* Not yet terminated */ - - /* boolean_t */ purgable:2, /* Purgable state. See - * VM_PURGABLE_* - */ - /* boolean_t */ purgeable_only_by_kernel:1, - /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token - * becomes ripe. - */ - /* boolean_t */ shadowed:1, /* Shadow may exist */ - /* boolean_t */ true_share:1, - /* This object is mapped - * in more than one place - * and hence cannot be - * coalesced */ - /* boolean_t */ terminating:1, - /* Allows vm_object_lookup - * and vm_object_deallocate - * to special case their - * behavior when they are - * called as a result of - * page cleaning during - * object termination - */ - /* boolean_t */ named:1, /* An enforces an internal - * naming convention, by - * calling the right routines - * for allocation and - * destruction, UBC references - * against the vm_object are - * checked. - */ - /* boolean_t */ shadow_severed:1, - /* When a permanent object - * backing a COW goes away - * unexpectedly. This bit - * allows vm_fault to return - * an error rather than a - * zero filled page. - */ - /* boolean_t */ phys_contiguous:1, - /* Memory is wired and - * guaranteed physically - * contiguous. However - * it is not device memory - * and obeys normal virtual - * memory rules w.r.t pmap - * access bits. - */ - /* boolean_t */ nophyscache:1, - /* When mapped at the - * pmap level, don't allow - * primary caching. (for - * I/O) - */ - /* boolean_t */ _object5_unused_bits:1; - - queue_chain_t cached_list; /* Attachment point for the - * list of objects cached as a - * result of their can_persist - * value - */ - /* - * the following fields are not protected by any locks - * they are updated via atomic compare and swap - */ - vm_object_offset_t last_alloc; /* last allocation offset */ - int sequential; /* sequential access size */ - - uint32_t pages_created; - uint32_t pages_used; - vm_offset_t cow_hint; /* last page present in */ - /* shadow but not in object */ + /* boolean_t array */ all_wanted:11, /* Bit array of "want to be + * awakened" notations. See + * VM_OBJECT_EVENT_* items + * below */ + /* boolean_t */ pager_created:1, /* Has pager been created? */ + /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ + /* boolean_t */ pager_ready:1, /* Will pager take requests? */ + + /* boolean_t */ pager_trusted:1, /* The pager for this object + * is trusted. This is true for + * all internal objects (backed + * by the default pager) + */ + /* boolean_t */ can_persist:1, /* The kernel may keep the data + * for this object (and rights + * to the memory object) after + * all address map references + * are deallocated? + */ + /* boolean_t */ internal:1, /* Created by the kernel (and + * therefore, managed by the + * default memory manger) + */ + /* boolean_t */ private:1, /* magic device_pager object, + * holds private pages only */ + /* boolean_t */ pageout:1, /* pageout object. contains + * private pages that refer to + * a real memory object. */ + /* boolean_t */ alive:1, /* Not yet terminated */ + + /* boolean_t */ purgable:2, /* Purgable state. See + * VM_PURGABLE_* + */ + /* boolean_t */ purgeable_only_by_kernel:1, + /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token + * becomes ripe. + */ + /* boolean_t */ shadowed:1, /* Shadow may exist */ + /* boolean_t */ true_share:1, + /* This object is mapped + * in more than one place + * and hence cannot be + * coalesced */ + /* boolean_t */ terminating:1, + /* Allows vm_object_lookup + * and vm_object_deallocate + * to special case their + * behavior when they are + * called as a result of + * page cleaning during + * object termination + */ + /* boolean_t */ named:1, /* An enforces an internal + * naming convention, by + * calling the right routines + * for allocation and + * destruction, UBC references + * against the vm_object are + * checked. + */ + /* boolean_t */ shadow_severed:1, + /* When a permanent object + * backing a COW goes away + * unexpectedly. This bit + * allows vm_fault to return + * an error rather than a + * zero filled page. + */ + /* boolean_t */ phys_contiguous:1, + /* Memory is wired and + * guaranteed physically + * contiguous. However + * it is not device memory + * and obeys normal virtual + * memory rules w.r.t pmap + * access bits. + */ + /* boolean_t */ nophyscache:1, + /* When mapped at the + * pmap level, don't allow + * primary caching. (for + * I/O) + */ + /* boolean_t */ _object5_unused_bits:1; + + queue_chain_t cached_list; /* Attachment point for the + * list of objects cached as a + * result of their can_persist + * value + */ + /* + * the following fields are not protected by any locks + * they are updated via atomic compare and swap + */ + vm_object_offset_t last_alloc; /* last allocation offset */ + int sequential; /* sequential access size */ + + uint32_t pages_created; + uint32_t pages_used; + vm_offset_t cow_hint; /* last page present in */ + /* shadow but not in object */ /* hold object lock when altering */ - unsigned int - wimg_bits:8, /* cache WIMG bits */ - code_signed:1, /* pages are signed and should be - validated; the signatures are stored - with the pager */ - transposed:1, /* object was transposed with another */ - mapping_in_progress:1, /* pager being mapped/unmapped */ - phantom_isssd:1, - volatile_empty:1, - volatile_fault:1, - all_reusable:1, - blocked_access:1, - set_cache_attr:1, - object_is_shared_cache:1, - purgeable_queue_type:2, - purgeable_queue_group:3, - io_tracking:1, - no_tag_update:1, /* */ + unsigned int + wimg_bits:8, /* cache WIMG bits */ + code_signed:1, /* pages are signed and should be + * validated; the signatures are stored + * with the pager */ + transposed:1, /* object was transposed with another */ + mapping_in_progress:1, /* pager being mapped/unmapped */ + phantom_isssd:1, + volatile_empty:1, + volatile_fault:1, + all_reusable:1, + blocked_access:1, + set_cache_attr:1, + object_is_shared_cache:1, + purgeable_queue_type:2, + purgeable_queue_group:3, + io_tracking:1, + no_tag_update:1, /* */ #if CONFIG_SECLUDED_MEMORY - eligible_for_secluded:1, - can_grab_secluded:1, + eligible_for_secluded:1, + can_grab_secluded:1, #else /* CONFIG_SECLUDED_MEMORY */ - __object3_unused_bits:2, + __object3_unused_bits:2, #endif /* CONFIG_SECLUDED_MEMORY */ #if VM_OBJECT_ACCESS_TRACKING - access_tracking:1, + access_tracking:1, #else /* VM_OBJECT_ACCESS_TRACKING */ - __unused_access_tracking:1, + __unused_access_tracking:1, #endif /* VM_OBJECT_ACCESS_TRACKING */ - vo_ledger_tag:2, - __object2_unused_bits:2; /* for expansion */ + vo_ledger_tag:2, + __object2_unused_bits:2; /* for expansion */ #if VM_OBJECT_ACCESS_TRACKING - uint32_t access_tracking_reads; - uint32_t access_tracking_writes; + uint32_t access_tracking_reads; + uint32_t access_tracking_writes; #endif /* VM_OBJECT_ACCESS_TRACKING */ - uint8_t scan_collisions; - vm_tag_t wire_tag; - uint8_t __object4_unused_bits[2]; + uint8_t scan_collisions; + vm_tag_t wire_tag; + uint8_t __object4_unused_bits[2]; #if CONFIG_PHANTOM_CACHE - uint32_t phantom_object_id; + uint32_t phantom_object_id; #endif #if CONFIG_IOSCHED || UPL_DEBUG - queue_head_t uplq; /* List of outstanding upls */ + queue_head_t uplq; /* List of outstanding upls */ #endif -#ifdef VM_PIP_DEBUG +#ifdef VM_PIP_DEBUG /* * Keep track of the stack traces for the first holders * of a "paging_in_progress" reference for this VM object. */ -#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ -#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ +#define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ +#define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ struct __pip_backtrace { void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES]; } pip_holders[VM_PIP_DEBUG_MAX_REFS]; -#endif /* VM_PIP_DEBUG */ +#endif /* VM_PIP_DEBUG */ - queue_chain_t objq; /* object queue - currently used for purgable queues */ - queue_chain_t task_objq; /* objects owned by task - protected by task lock */ + queue_chain_t objq; /* object queue - currently used for purgable queues */ + queue_chain_t task_objq; /* objects owned by task - protected by task lock */ #if !VM_TAG_ACTIVE_UPDATE - queue_chain_t wired_objq; + queue_chain_t wired_objq; #endif /* !VM_TAG_ACTIVE_UPDATE */ #if DEBUG @@ -408,42 +408,42 @@ struct vm_object { }; /* values for object->vo_ledger_tag */ -#define VM_OBJECT_LEDGER_TAG_NONE 0 -#define VM_OBJECT_LEDGER_TAG_NETWORK 1 -#define VM_OBJECT_LEDGER_TAG_MEDIA 2 -#define VM_OBJECT_LEDGER_TAG_RESERVED 3 - -#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ - ((object)->volatile_fault && \ - ((object)->purgable == VM_PURGABLE_VOLATILE || \ +#define VM_OBJECT_LEDGER_TAG_NONE 0 +#define VM_OBJECT_LEDGER_TAG_NETWORK 1 +#define VM_OBJECT_LEDGER_TAG_MEDIA 2 +#define VM_OBJECT_LEDGER_TAG_RESERVED 3 + +#define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ + ((object)->volatile_fault && \ + ((object)->purgable == VM_PURGABLE_VOLATILE || \ (object)->purgable == VM_PURGABLE_EMPTY)) #if VM_OBJECT_ACCESS_TRACKING extern uint64_t vm_object_access_tracking_reads; extern uint64_t vm_object_access_tracking_writes; extern void vm_object_access_tracking(vm_object_t object, - int *access_tracking, - uint32_t *access_tracking_reads, - uint32_t *acess_tracking_writes); + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *acess_tracking_writes); #endif /* VM_OBJECT_ACCESS_TRACKING */ extern -vm_object_t kernel_object; /* the single kernel object */ +vm_object_t kernel_object; /* the single kernel object */ extern -vm_object_t compressor_object; /* the single compressor object */ +vm_object_t compressor_object; /* the single compressor object */ extern -unsigned int vm_object_absent_max; /* maximum number of absent pages - at a time for each object */ +unsigned int vm_object_absent_max; /* maximum number of absent pages + * at a time for each object */ -# define VM_MSYNC_INITIALIZED 0 -# define VM_MSYNC_SYNCHRONIZING 1 -# define VM_MSYNC_DONE 2 +# define VM_MSYNC_INITIALIZED 0 +# define VM_MSYNC_SYNCHRONIZING 1 +# define VM_MSYNC_DONE 2 -extern lck_grp_t vm_map_lck_grp; -extern lck_attr_t vm_map_lck_attr; +extern lck_grp_t vm_map_lck_grp; +extern lck_attr_t vm_map_lck_attr; #ifndef VM_TAG_ACTIVE_UPDATE #error VM_TAG_ACTIVE_UPDATE @@ -453,76 +453,76 @@ extern lck_attr_t vm_map_lck_attr; #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE") #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE") #else /* VM_TAG_ACTIVE_UPDATE */ -#define VM_OBJECT_WIRED_ENQUEUE(object) \ - MACRO_BEGIN \ - lck_spin_lock(&vm_objects_wired_lock); \ - assert(!(object)->wired_objq.next); \ - assert(!(object)->wired_objq.prev); \ - queue_enter(&vm_objects_wired, (object), \ - vm_object_t, wired_objq); \ - lck_spin_unlock(&vm_objects_wired_lock); \ +#define VM_OBJECT_WIRED_ENQUEUE(object) \ + MACRO_BEGIN \ + lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \ + assert(!(object)->wired_objq.next); \ + assert(!(object)->wired_objq.prev); \ + queue_enter(&vm_objects_wired, (object), \ + vm_object_t, wired_objq); \ + lck_spin_unlock(&vm_objects_wired_lock); \ MACRO_END -#define VM_OBJECT_WIRED_DEQUEUE(object) \ - MACRO_BEGIN \ - if ((object)->wired_objq.next) { \ - lck_spin_lock(&vm_objects_wired_lock); \ - queue_remove(&vm_objects_wired, (object), \ - vm_object_t, wired_objq); \ - lck_spin_unlock(&vm_objects_wired_lock); \ - } \ +#define VM_OBJECT_WIRED_DEQUEUE(object) \ + MACRO_BEGIN \ + if ((object)->wired_objq.next) { \ + lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \ + queue_remove(&vm_objects_wired, (object), \ + vm_object_t, wired_objq); \ + lck_spin_unlock(&vm_objects_wired_lock); \ + } \ MACRO_END #endif /* VM_TAG_ACTIVE_UPDATE */ -#define VM_OBJECT_WIRED(object, tag) \ - MACRO_BEGIN \ - assert(VM_KERN_MEMORY_NONE != (tag)); \ - assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \ - (object)->wire_tag = (tag); \ - if (!VM_TAG_ACTIVE_UPDATE) { \ - VM_OBJECT_WIRED_ENQUEUE((object)); \ - } \ +#define VM_OBJECT_WIRED(object, tag) \ + MACRO_BEGIN \ + assert(VM_KERN_MEMORY_NONE != (tag)); \ + assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \ + (object)->wire_tag = (tag); \ + if (!VM_TAG_ACTIVE_UPDATE) { \ + VM_OBJECT_WIRED_ENQUEUE((object)); \ + } \ MACRO_END -#define VM_OBJECT_UNWIRED(object) \ - MACRO_BEGIN \ - if (!VM_TAG_ACTIVE_UPDATE) { \ - VM_OBJECT_WIRED_DEQUEUE((object)); \ - } \ - if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \ +#define VM_OBJECT_UNWIRED(object) \ + MACRO_BEGIN \ + if (!VM_TAG_ACTIVE_UPDATE) { \ + VM_OBJECT_WIRED_DEQUEUE((object)); \ + } \ + if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \ vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count)); \ - (object)->wire_tag = VM_KERN_MEMORY_NONE; \ - } \ + (object)->wire_tag = VM_KERN_MEMORY_NONE; \ + } \ MACRO_END // These two macros start & end a C block -#define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \ - MACRO_BEGIN \ - { \ +#define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \ + MACRO_BEGIN \ + { \ int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag; -#define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \ - if (__wireddelta) { \ - boolean_t __overflow __assert_only = \ - os_add_overflow((object)->wired_page_count, __wireddelta, \ - &(object)->wired_page_count); \ - assert(!__overflow); \ - if (!(object)->pageout && !(object)->no_tag_update) { \ - if (__wireddelta > 0) { \ - assert (VM_KERN_MEMORY_NONE != (tag)); \ - if (VM_KERN_MEMORY_NONE == __waswired) { \ - VM_OBJECT_WIRED((object), (tag)); \ - } \ - vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ - } else if (VM_KERN_MEMORY_NONE != __waswired) { \ - assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \ - vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ - if (!(object)->wired_page_count) { \ - VM_OBJECT_UNWIRED((object)); \ - } \ - } \ - } \ - } \ - } \ +#define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \ + if (__wireddelta) { \ + boolean_t __overflow __assert_only = \ + os_add_overflow((object)->wired_page_count, __wireddelta, \ + &(object)->wired_page_count); \ + assert(!__overflow); \ + if (!(object)->pageout && !(object)->no_tag_update) { \ + if (__wireddelta > 0) { \ + assert (VM_KERN_MEMORY_NONE != (tag)); \ + if (VM_KERN_MEMORY_NONE == __waswired) { \ + VM_OBJECT_WIRED((object), (tag)); \ + } \ + vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ + } else if (VM_KERN_MEMORY_NONE != __waswired) { \ + assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \ + vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta)); \ + if (!(object)->wired_page_count) { \ + VM_OBJECT_UNWIRED((object)); \ + } \ + } \ + } \ + } \ + } \ MACRO_END #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \ @@ -536,42 +536,42 @@ extern lck_attr_t vm_map_lck_attr; -#define OBJECT_LOCK_SHARED 0 -#define OBJECT_LOCK_EXCLUSIVE 1 +#define OBJECT_LOCK_SHARED 0 +#define OBJECT_LOCK_EXCLUSIVE 1 -extern lck_grp_t vm_object_lck_grp; -extern lck_grp_attr_t vm_object_lck_grp_attr; -extern lck_attr_t vm_object_lck_attr; -extern lck_attr_t kernel_object_lck_attr; -extern lck_attr_t compressor_object_lck_attr; +extern lck_grp_t vm_object_lck_grp; +extern lck_grp_attr_t vm_object_lck_grp_attr; +extern lck_attr_t vm_object_lck_attr; +extern lck_attr_t kernel_object_lck_attr; +extern lck_attr_t compressor_object_lck_attr; -extern vm_object_t vm_pageout_scan_wants_object; +extern vm_object_t vm_pageout_scan_wants_object; -extern void vm_object_lock(vm_object_t); -extern boolean_t vm_object_lock_try(vm_object_t); -extern boolean_t _vm_object_lock_try(vm_object_t); -extern boolean_t vm_object_lock_avoid(vm_object_t); -extern void vm_object_lock_shared(vm_object_t); -extern boolean_t vm_object_lock_yield_shared(vm_object_t); -extern boolean_t vm_object_lock_try_shared(vm_object_t); -extern void vm_object_unlock(vm_object_t); -extern boolean_t vm_object_lock_upgrade(vm_object_t); +extern void vm_object_lock(vm_object_t); +extern boolean_t vm_object_lock_try(vm_object_t); +extern boolean_t _vm_object_lock_try(vm_object_t); +extern boolean_t vm_object_lock_avoid(vm_object_t); +extern void vm_object_lock_shared(vm_object_t); +extern boolean_t vm_object_lock_yield_shared(vm_object_t); +extern boolean_t vm_object_lock_try_shared(vm_object_t); +extern void vm_object_unlock(vm_object_t); +extern boolean_t vm_object_lock_upgrade(vm_object_t); /* * Object locking macros */ -#define vm_object_lock_init(object) \ - lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ - (((object) == kernel_object || \ - (object) == vm_submap_object) ? \ - &kernel_object_lck_attr : \ - (((object) == compressor_object) ? \ - &compressor_object_lck_attr : \ - &vm_object_lck_attr))) -#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) +#define vm_object_lock_init(object) \ + lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \ + (((object) == kernel_object || \ + (object) == vm_submap_object) ? \ + &kernel_object_lck_attr : \ + (((object) == compressor_object) ? \ + &compressor_object_lck_attr : \ + &vm_object_lck_attr))) +#define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp) -#define vm_object_lock_try_scan(object) _vm_object_lock_try(object) +#define vm_object_lock_try_scan(object) _vm_object_lock_try(object) /* * CAUTION: the following vm_object_lock_assert_held*() macros merely @@ -587,7 +587,7 @@ extern boolean_t vm_object_lock_upgrade(vm_object_t); lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE) #define vm_object_lock_assert_notheld(object) \ lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD) -#else /* MACH_ASSERT || DEBUG */ +#else /* MACH_ASSERT || DEBUG */ #define vm_object_lock_assert_held(object) #define vm_object_lock_assert_shared(object) #define vm_object_lock_assert_exclusive(object) @@ -599,296 +599,296 @@ extern boolean_t vm_object_lock_upgrade(vm_object_t); * Declare procedures that operate on VM objects. */ -__private_extern__ void vm_object_bootstrap(void); +__private_extern__ void vm_object_bootstrap(void); -__private_extern__ void vm_object_init(void); +__private_extern__ void vm_object_init(void); -__private_extern__ void vm_object_init_lck_grp(void); +__private_extern__ void vm_object_init_lck_grp(void); -__private_extern__ void vm_object_reaper_init(void); +__private_extern__ void vm_object_reaper_init(void); -__private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size); +__private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size); __private_extern__ void _vm_object_allocate(vm_object_size_t size, - vm_object_t object); + vm_object_t object); -#if TASK_SWAPPER +#if TASK_SWAPPER -__private_extern__ void vm_object_res_reference( - vm_object_t object); -__private_extern__ void vm_object_res_deallocate( - vm_object_t object); -#define VM_OBJ_RES_INCR(object) (object)->res_count++ -#define VM_OBJ_RES_DECR(object) (object)->res_count-- +__private_extern__ void vm_object_res_reference( + vm_object_t object); +__private_extern__ void vm_object_res_deallocate( + vm_object_t object); +#define VM_OBJ_RES_INCR(object) (object)->res_count++ +#define VM_OBJ_RES_DECR(object) (object)->res_count-- -#else /* TASK_SWAPPER */ +#else /* TASK_SWAPPER */ -#define VM_OBJ_RES_INCR(object) -#define VM_OBJ_RES_DECR(object) +#define VM_OBJ_RES_INCR(object) +#define VM_OBJ_RES_DECR(object) #define vm_object_res_reference(object) #define vm_object_res_deallocate(object) -#endif /* TASK_SWAPPER */ +#endif /* TASK_SWAPPER */ -#define vm_object_reference_locked(object) \ - MACRO_BEGIN \ - vm_object_t RLObject = (object); \ - vm_object_lock_assert_exclusive(object); \ - assert((RLObject)->ref_count > 0); \ - (RLObject)->ref_count++; \ - assert((RLObject)->ref_count > 1); \ - vm_object_res_reference(RLObject); \ +#define vm_object_reference_locked(object) \ + MACRO_BEGIN \ + vm_object_t RLObject = (object); \ + vm_object_lock_assert_exclusive(object); \ + assert((RLObject)->ref_count > 0); \ + (RLObject)->ref_count++; \ + assert((RLObject)->ref_count > 1); \ + vm_object_res_reference(RLObject); \ MACRO_END -#define vm_object_reference_shared(object) \ - MACRO_BEGIN \ - vm_object_t RLObject = (object); \ - vm_object_lock_assert_shared(object); \ - assert((RLObject)->ref_count > 0); \ - OSAddAtomic(1, &(RLObject)->ref_count); \ - assert((RLObject)->ref_count > 0); \ - /* XXX we would need an atomic version of the following ... */ \ - vm_object_res_reference(RLObject); \ +#define vm_object_reference_shared(object) \ + MACRO_BEGIN \ + vm_object_t RLObject = (object); \ + vm_object_lock_assert_shared(object); \ + assert((RLObject)->ref_count > 0); \ + OSAddAtomic(1, &(RLObject)->ref_count); \ + assert((RLObject)->ref_count > 0); \ + /* XXX we would need an atomic version of the following ... */ \ + vm_object_res_reference(RLObject); \ MACRO_END -__private_extern__ void vm_object_reference( - vm_object_t object); +__private_extern__ void vm_object_reference( + vm_object_t object); -#if !MACH_ASSERT +#if !MACH_ASSERT -#define vm_object_reference(object) \ -MACRO_BEGIN \ - vm_object_t RObject = (object); \ - if (RObject) { \ - vm_object_lock_shared(RObject); \ - vm_object_reference_shared(RObject); \ - vm_object_unlock(RObject); \ - } \ +#define vm_object_reference(object) \ +MACRO_BEGIN \ + vm_object_t RObject = (object); \ + if (RObject) { \ + vm_object_lock_shared(RObject); \ + vm_object_reference_shared(RObject); \ + vm_object_unlock(RObject); \ + } \ MACRO_END -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ -__private_extern__ void vm_object_deallocate( - vm_object_t object); +__private_extern__ void vm_object_deallocate( + vm_object_t object); __private_extern__ kern_return_t vm_object_release_name( - vm_object_t object, - int flags); - -__private_extern__ void vm_object_pmap_protect( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - pmap_t pmap, - vm_map_offset_t pmap_start, - vm_prot_t prot); - -__private_extern__ void vm_object_pmap_protect_options( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - pmap_t pmap, - vm_map_offset_t pmap_start, - vm_prot_t prot, - int options); - -__private_extern__ void vm_object_page_remove( - vm_object_t object, - vm_object_offset_t start, - vm_object_offset_t end); - -__private_extern__ void vm_object_deactivate_pages( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - boolean_t kill_page, - boolean_t reusable_page, - struct pmap *pmap, - vm_map_offset_t pmap_offset); - -__private_extern__ void vm_object_reuse_pages( - vm_object_t object, - vm_object_offset_t start_offset, - vm_object_offset_t end_offset, - boolean_t allow_partial_reuse); - -__private_extern__ uint64_t vm_object_purge( - vm_object_t object, - int flags); + vm_object_t object, + int flags); + +__private_extern__ void vm_object_pmap_protect( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot); + +__private_extern__ void vm_object_pmap_protect_options( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + pmap_t pmap, + vm_map_offset_t pmap_start, + vm_prot_t prot, + int options); + +__private_extern__ void vm_object_page_remove( + vm_object_t object, + vm_object_offset_t start, + vm_object_offset_t end); + +__private_extern__ void vm_object_deactivate_pages( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t kill_page, + boolean_t reusable_page, + struct pmap *pmap, + vm_map_offset_t pmap_offset); + +__private_extern__ void vm_object_reuse_pages( + vm_object_t object, + vm_object_offset_t start_offset, + vm_object_offset_t end_offset, + boolean_t allow_partial_reuse); + +__private_extern__ uint64_t vm_object_purge( + vm_object_t object, + int flags); __private_extern__ kern_return_t vm_object_purgable_control( - vm_object_t object, - vm_purgable_t control, - int *state); + vm_object_t object, + vm_purgable_t control, + int *state); __private_extern__ kern_return_t vm_object_get_page_counts( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - unsigned int *resident_page_count, - unsigned int *dirty_page_count); - -__private_extern__ boolean_t vm_object_coalesce( - vm_object_t prev_object, - vm_object_t next_object, - vm_object_offset_t prev_offset, - vm_object_offset_t next_offset, - vm_object_size_t prev_size, - vm_object_size_t next_size); - -__private_extern__ boolean_t vm_object_shadow( - vm_object_t *object, - vm_object_offset_t *offset, - vm_object_size_t length); - -__private_extern__ void vm_object_collapse( - vm_object_t object, - vm_object_offset_t offset, - boolean_t can_bypass); - -__private_extern__ boolean_t vm_object_copy_quickly( - vm_object_t *_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - boolean_t *_src_needs_copy, - boolean_t *_dst_needs_copy); - -__private_extern__ kern_return_t vm_object_copy_strategically( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - vm_object_t *dst_object, - vm_object_offset_t *dst_offset, - boolean_t *dst_needs_copy); - -__private_extern__ kern_return_t vm_object_copy_slowly( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - boolean_t interruptible, - vm_object_t *_result_object); - -__private_extern__ vm_object_t vm_object_copy_delayed( - vm_object_t src_object, - vm_object_offset_t src_offset, - vm_object_size_t size, - boolean_t src_object_shared); - - - -__private_extern__ kern_return_t vm_object_destroy( - vm_object_t object, - kern_return_t reason); - -__private_extern__ void vm_object_pager_create( - vm_object_t object); - -__private_extern__ void vm_object_compressor_pager_create( - vm_object_t object); - -__private_extern__ void vm_object_page_map( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - vm_object_offset_t (*map_fn) - (void *, vm_object_offset_t), - void *map_fn_data); + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + unsigned int *resident_page_count, + unsigned int *dirty_page_count); + +__private_extern__ boolean_t vm_object_coalesce( + vm_object_t prev_object, + vm_object_t next_object, + vm_object_offset_t prev_offset, + vm_object_offset_t next_offset, + vm_object_size_t prev_size, + vm_object_size_t next_size); + +__private_extern__ boolean_t vm_object_shadow( + vm_object_t *object, + vm_object_offset_t *offset, + vm_object_size_t length); + +__private_extern__ void vm_object_collapse( + vm_object_t object, + vm_object_offset_t offset, + boolean_t can_bypass); + +__private_extern__ boolean_t vm_object_copy_quickly( + vm_object_t *_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t *_src_needs_copy, + boolean_t *_dst_needs_copy); + +__private_extern__ kern_return_t vm_object_copy_strategically( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + vm_object_t *dst_object, + vm_object_offset_t *dst_offset, + boolean_t *dst_needs_copy); + +__private_extern__ kern_return_t vm_object_copy_slowly( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t interruptible, + vm_object_t *_result_object); + +__private_extern__ vm_object_t vm_object_copy_delayed( + vm_object_t src_object, + vm_object_offset_t src_offset, + vm_object_size_t size, + boolean_t src_object_shared); + + + +__private_extern__ kern_return_t vm_object_destroy( + vm_object_t object, + kern_return_t reason); + +__private_extern__ void vm_object_pager_create( + vm_object_t object); + +__private_extern__ void vm_object_compressor_pager_create( + vm_object_t object); + +__private_extern__ void vm_object_page_map( + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_object_offset_t (*map_fn) + (void *, vm_object_offset_t), + void *map_fn_data); __private_extern__ kern_return_t vm_object_upl_request( - vm_object_t object, - vm_object_offset_t offset, - upl_size_t size, - upl_t *upl, - upl_page_info_t *page_info, - unsigned int *count, - upl_control_flags_t flags, - vm_tag_t tag); + vm_object_t object, + vm_object_offset_t offset, + upl_size_t size, + upl_t *upl, + upl_page_info_t *page_info, + unsigned int *count, + upl_control_flags_t flags, + vm_tag_t tag); __private_extern__ kern_return_t vm_object_transpose( - vm_object_t object1, - vm_object_t object2, - vm_object_size_t transpose_size); + vm_object_t object1, + vm_object_t object2, + vm_object_size_t transpose_size); __private_extern__ boolean_t vm_object_sync( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - boolean_t should_flush, - boolean_t should_return, - boolean_t should_iosync); + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + boolean_t should_flush, + boolean_t should_return, + boolean_t should_iosync); __private_extern__ kern_return_t vm_object_update( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - vm_object_offset_t *error_offset, - int *io_errno, - memory_object_return_t should_return, - int flags, - vm_prot_t prot); + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + vm_object_offset_t *error_offset, + int *io_errno, + memory_object_return_t should_return, + int flags, + vm_prot_t prot); __private_extern__ kern_return_t vm_object_lock_request( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - memory_object_return_t should_return, - int flags, - vm_prot_t prot); + vm_object_t object, + vm_object_offset_t offset, + vm_object_size_t size, + memory_object_return_t should_return, + int flags, + vm_prot_t prot); -__private_extern__ vm_object_t vm_object_memory_object_associate( - memory_object_t pager, - vm_object_t object, - vm_object_size_t size, - boolean_t check_named); +__private_extern__ vm_object_t vm_object_memory_object_associate( + memory_object_t pager, + vm_object_t object, + vm_object_size_t size, + boolean_t check_named); -__private_extern__ void vm_object_cluster_size( - vm_object_t object, - vm_object_offset_t *start, - vm_size_t *length, - vm_object_fault_info_t fault_info, - uint32_t *io_streaming); +__private_extern__ void vm_object_cluster_size( + vm_object_t object, + vm_object_offset_t *start, + vm_size_t *length, + vm_object_fault_info_t fault_info, + uint32_t *io_streaming); __private_extern__ kern_return_t vm_object_populate_with_private( - vm_object_t object, - vm_object_offset_t offset, - ppnum_t phys_page, - vm_size_t size); + vm_object_t object, + vm_object_offset_t offset, + ppnum_t phys_page, + vm_size_t size); __private_extern__ void vm_object_change_wimg_mode( - vm_object_t object, - unsigned int wimg_mode); + vm_object_t object, + unsigned int wimg_mode); extern kern_return_t adjust_vm_object_cache( vm_size_t oval, vm_size_t nval); extern kern_return_t vm_object_page_op( - vm_object_t object, - vm_object_offset_t offset, - int ops, - ppnum_t *phys_entry, - int *flags); + vm_object_t object, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags); extern kern_return_t vm_object_range_op( - vm_object_t object, - vm_object_offset_t offset_beg, - vm_object_offset_t offset_end, + vm_object_t object, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, int ops, - uint32_t *range); + uint32_t *range); -__private_extern__ void vm_object_reap_pages( - vm_object_t object, - int reap_type); -#define REAP_REAP 0 -#define REAP_TERMINATE 1 -#define REAP_PURGEABLE 2 -#define REAP_DATA_FLUSH 3 +__private_extern__ void vm_object_reap_pages( + vm_object_t object, + int reap_type); +#define REAP_REAP 0 +#define REAP_TERMINATE 1 +#define REAP_PURGEABLE 2 +#define REAP_DATA_FLUSH 3 #if CONFIG_FREEZE @@ -908,11 +908,11 @@ vm_object_pageout( #if CONFIG_IOSCHED struct io_reprioritize_req { - uint64_t blkno; - uint32_t len; - int priority; - struct vnode *devvp; - queue_chain_t io_reprioritize_list; + uint64_t blkno; + uint32_t len; + int priority; + struct vnode *devvp; + queue_chain_t io_reprioritize_list; }; typedef struct io_reprioritize_req *io_reprioritize_req_t; @@ -923,24 +923,24 @@ extern void vm_io_reprioritize_init(void); * Event waiting handling */ -#define VM_OBJECT_EVENT_INITIALIZED 0 -#define VM_OBJECT_EVENT_PAGER_READY 1 -#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 -#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3 -#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 -#define VM_OBJECT_EVENT_UNCACHING 5 -#define VM_OBJECT_EVENT_COPY_CALL 6 -#define VM_OBJECT_EVENT_CACHING 7 -#define VM_OBJECT_EVENT_UNBLOCKED 8 -#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 +#define VM_OBJECT_EVENT_INITIALIZED 0 +#define VM_OBJECT_EVENT_PAGER_READY 1 +#define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2 +#define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3 +#define VM_OBJECT_EVENT_LOCK_IN_PROGRESS 4 +#define VM_OBJECT_EVENT_UNCACHING 5 +#define VM_OBJECT_EVENT_COPY_CALL 6 +#define VM_OBJECT_EVENT_CACHING 7 +#define VM_OBJECT_EVENT_UNBLOCKED 8 +#define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */ static __inline__ wait_result_t vm_object_assert_wait( - vm_object_t object, - int event, - wait_interrupt_t interruptible) + vm_object_t object, + int event, + wait_interrupt_t interruptible) { wait_result_t wr; @@ -949,15 +949,15 @@ vm_object_assert_wait( object->all_wanted |= 1 << event; wr = assert_wait((event_t)((vm_offset_t)object + event), - interruptible); + interruptible); return wr; } static __inline__ wait_result_t vm_object_wait( - vm_object_t object, - int event, - wait_interrupt_t interruptible) + vm_object_t object, + int event, + wait_interrupt_t interruptible) { wait_result_t wr; @@ -969,21 +969,22 @@ vm_object_wait( static __inline__ wait_result_t thread_sleep_vm_object( - vm_object_t object, - event_t event, - wait_interrupt_t interruptible) + vm_object_t object, + event_t event, + wait_interrupt_t interruptible) { wait_result_t wr; #if DEVELOPMENT || DEBUG - if (object->Lock_owner != current_thread()) + if (object->Lock_owner != current_thread()) { panic("thread_sleep_vm_object: now owner - %p\n", object); + } object->Lock_owner = 0; #endif - wr = lck_rw_sleep(&object->Lock, - LCK_SLEEP_PROMOTED_PRI, - event, - interruptible); + wr = lck_rw_sleep(&object->Lock, + LCK_SLEEP_PROMOTED_PRI, + event, + interruptible); #if DEVELOPMENT || DEBUG object->Lock_owner = current_thread(); #endif @@ -992,9 +993,9 @@ thread_sleep_vm_object( static __inline__ wait_result_t vm_object_sleep( - vm_object_t object, - int event, - wait_interrupt_t interruptible) + vm_object_t object, + int event, + wait_interrupt_t interruptible) { wait_result_t wr; @@ -1003,28 +1004,29 @@ vm_object_sleep( object->all_wanted |= 1 << event; wr = thread_sleep_vm_object(object, - (event_t)((vm_offset_t)object + event), - interruptible); + (event_t)((vm_offset_t)object + event), + interruptible); return wr; } static __inline__ void vm_object_wakeup( - vm_object_t object, - int event) + vm_object_t object, + int event) { vm_object_lock_assert_exclusive(object); assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); - if (object->all_wanted & (1 << event)) + if (object->all_wanted & (1 << event)) { thread_wakeup((event_t)((vm_offset_t)object + event)); + } object->all_wanted &= ~(1 << event); } static __inline__ void vm_object_set_wanted( - vm_object_t object, - int event) + vm_object_t object, + int event) { vm_object_lock_assert_exclusive(object); assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); @@ -1034,8 +1036,8 @@ vm_object_set_wanted( static __inline__ int vm_object_wanted( - vm_object_t object, - int event) + vm_object_t object, + int event) { vm_object_lock_assert_held(object); assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX); @@ -1048,129 +1050,129 @@ vm_object_wanted( */ #ifdef VM_PIP_DEBUG #include -#define VM_PIP_DEBUG_BEGIN(object) \ - MACRO_BEGIN \ - int pip = ((object)->paging_in_progress + \ - (object)->activity_in_progress); \ - if (pip < VM_PIP_DEBUG_MAX_REFS) { \ - (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ - VM_PIP_DEBUG_STACK_FRAMES); \ - } \ +#define VM_PIP_DEBUG_BEGIN(object) \ + MACRO_BEGIN \ + int pip = ((object)->paging_in_progress + \ + (object)->activity_in_progress); \ + if (pip < VM_PIP_DEBUG_MAX_REFS) { \ + (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \ + VM_PIP_DEBUG_STACK_FRAMES); \ + } \ MACRO_END -#else /* VM_PIP_DEBUG */ +#else /* VM_PIP_DEBUG */ #define VM_PIP_DEBUG_BEGIN(object) -#endif /* VM_PIP_DEBUG */ - -#define vm_object_activity_begin(object) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - VM_PIP_DEBUG_BEGIN((object)); \ - (object)->activity_in_progress++; \ - if ((object)->activity_in_progress == 0) { \ - panic("vm_object_activity_begin(%p): overflow\n", (object));\ - } \ +#endif /* VM_PIP_DEBUG */ + +#define vm_object_activity_begin(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + VM_PIP_DEBUG_BEGIN((object)); \ + (object)->activity_in_progress++; \ + if ((object)->activity_in_progress == 0) { \ + panic("vm_object_activity_begin(%p): overflow\n", (object));\ + } \ MACRO_END -#define vm_object_activity_end(object) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - if ((object)->activity_in_progress == 0) { \ - panic("vm_object_activity_end(%p): underflow\n", (object));\ - } \ - (object)->activity_in_progress--; \ - if ((object)->paging_in_progress == 0 && \ - (object)->activity_in_progress == 0) \ - vm_object_wakeup((object), \ - VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ +#define vm_object_activity_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + if ((object)->activity_in_progress == 0) { \ + panic("vm_object_activity_end(%p): underflow\n", (object));\ + } \ + (object)->activity_in_progress--; \ + if ((object)->paging_in_progress == 0 && \ + (object)->activity_in_progress == 0) \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ MACRO_END -#define vm_object_paging_begin(object) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - VM_PIP_DEBUG_BEGIN((object)); \ - (object)->paging_in_progress++; \ - if ((object)->paging_in_progress == 0) { \ - panic("vm_object_paging_begin(%p): overflow\n", (object));\ - } \ +#define vm_object_paging_begin(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + VM_PIP_DEBUG_BEGIN((object)); \ + (object)->paging_in_progress++; \ + if ((object)->paging_in_progress == 0) { \ + panic("vm_object_paging_begin(%p): overflow\n", (object));\ + } \ MACRO_END -#define vm_object_paging_end(object) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - if ((object)->paging_in_progress == 0) { \ - panic("vm_object_paging_end(%p): underflow\n", (object));\ - } \ - (object)->paging_in_progress--; \ - if ((object)->paging_in_progress == 0) { \ - vm_object_wakeup((object), \ - VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \ - if ((object)->activity_in_progress == 0) \ - vm_object_wakeup((object), \ - VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ - } \ +#define vm_object_paging_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + if ((object)->paging_in_progress == 0) { \ + panic("vm_object_paging_end(%p): underflow\n", (object));\ + } \ + (object)->paging_in_progress--; \ + if ((object)->paging_in_progress == 0) { \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \ + if ((object)->activity_in_progress == 0) \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \ + } \ MACRO_END -#define vm_object_paging_wait(object, interruptible) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - while ((object)->paging_in_progress != 0 || \ - (object)->activity_in_progress != 0) { \ - wait_result_t _wr; \ - \ - _wr = vm_object_sleep((object), \ - VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \ - (interruptible)); \ - \ - /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ - /*XXX break; */ \ - } \ +#define vm_object_paging_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->paging_in_progress != 0 || \ + (object)->activity_in_progress != 0) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \ + (interruptible)); \ + \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \ + /*XXX break; */ \ + } \ MACRO_END -#define vm_object_paging_only_wait(object, interruptible) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - while ((object)->paging_in_progress != 0) { \ - wait_result_t _wr; \ - \ - _wr = vm_object_sleep((object), \ - VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\ - (interruptible)); \ - \ - /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ - /*XXX break; */ \ - } \ +#define vm_object_paging_only_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->paging_in_progress != 0) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\ + (interruptible)); \ + \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \ + /*XXX break; */ \ + } \ MACRO_END -#define vm_object_mapping_begin(object) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - assert(! (object)->mapping_in_progress); \ - (object)->mapping_in_progress = TRUE; \ +#define vm_object_mapping_begin(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert(! (object)->mapping_in_progress); \ + (object)->mapping_in_progress = TRUE; \ MACRO_END -#define vm_object_mapping_end(object) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - assert((object)->mapping_in_progress); \ - (object)->mapping_in_progress = FALSE; \ - vm_object_wakeup((object), \ - VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \ +#define vm_object_mapping_end(object) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + assert((object)->mapping_in_progress); \ + (object)->mapping_in_progress = FALSE; \ + vm_object_wakeup((object), \ + VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \ MACRO_END -#define vm_object_mapping_wait(object, interruptible) \ - MACRO_BEGIN \ - vm_object_lock_assert_exclusive((object)); \ - while ((object)->mapping_in_progress) { \ - wait_result_t _wr; \ - \ - _wr = vm_object_sleep((object), \ - VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \ - (interruptible)); \ - /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/\ - /*XXX break; */ \ - } \ - assert(!(object)->mapping_in_progress); \ +#define vm_object_mapping_wait(object, interruptible) \ + MACRO_BEGIN \ + vm_object_lock_assert_exclusive((object)); \ + while ((object)->mapping_in_progress) { \ + wait_result_t _wr; \ + \ + _wr = vm_object_sleep((object), \ + VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \ + (interruptible)); \ + /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \ + /*XXX break; */ \ + } \ + assert(!(object)->mapping_in_progress); \ MACRO_END @@ -1178,21 +1180,21 @@ vm_object_wanted( #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK)) #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK)) -extern void vm_object_cache_add(vm_object_t); -extern void vm_object_cache_remove(vm_object_t); -extern int vm_object_cache_evict(int, int); +extern void vm_object_cache_add(vm_object_t); +extern void vm_object_cache_remove(vm_object_t); +extern int vm_object_cache_evict(int, int); #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1) -#define VM_OBJECT_OWNER(object) \ - ((((object)->purgable == VM_PURGABLE_DENY && \ - (object)->vo_ledger_tag == 0) || \ - (object)->vo_owner == TASK_NULL) \ - ? TASK_NULL /* not owned */ \ - : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \ - ? kernel_task /* disowned -> kernel */ \ - : (object)->vo_owner)) /* explicit owner */ \ - -extern void vm_object_ledger_tag_ledgers( +#define VM_OBJECT_OWNER(object) \ + ((((object)->purgable == VM_PURGABLE_DENY && \ + (object)->vo_ledger_tag == 0) || \ + (object)->vo_owner == TASK_NULL) \ + ? TASK_NULL /* not owned */ \ + : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \ + ? kernel_task /* disowned -> kernel */ \ + : (object)->vo_owner)) /* explicit owner */ \ + +extern void vm_object_ledger_tag_ledgers( vm_object_t object, int *ledger_idx_volatile, int *ledger_idx_nonvolatile, @@ -1205,4 +1207,4 @@ extern kern_return_t vm_object_ownership_change( task_t owner, boolean_t task_objq_locked); -#endif /* _VM_VM_OBJECT_H_ */ +#endif /* _VM_VM_OBJECT_H_ */ diff --git a/osfmk/vm/vm_options.h b/osfmk/vm/vm_options.h index 47a456aa6..43a37d0a8 100644 --- a/osfmk/vm/vm_options.h +++ b/osfmk/vm/vm_options.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ diff --git a/osfmk/vm/vm_page.h b/osfmk/vm/vm_page.h index f8fa9c025..9e0304dbf 100644 --- a/osfmk/vm/vm_page.h +++ b/osfmk/vm/vm_page.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,7 +63,7 @@ * Resident memory system definitions. */ -#ifndef _VM_VM_PAGE_H_ +#ifndef _VM_VM_PAGE_H_ #define _VM_VM_PAGE_H_ #include @@ -82,34 +82,34 @@ * vm_page_t array allocation and the zone init code to determine if we can safely pack and unpack * pointers from the 2 ends of these spaces */ -typedef uint32_t vm_page_packed_t; +typedef uint32_t vm_page_packed_t; struct vm_page_packed_queue_entry { - vm_page_packed_t next; /* next element */ - vm_page_packed_t prev; /* previous element */ + vm_page_packed_t next; /* next element */ + vm_page_packed_t prev; /* previous element */ }; -typedef struct vm_page_packed_queue_entry *vm_page_queue_t; -typedef struct vm_page_packed_queue_entry vm_page_queue_head_t; -typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t; -typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t; +typedef struct vm_page_packed_queue_entry *vm_page_queue_t; +typedef struct vm_page_packed_queue_entry vm_page_queue_head_t; +typedef struct vm_page_packed_queue_entry vm_page_queue_chain_t; +typedef struct vm_page_packed_queue_entry *vm_page_queue_entry_t; -typedef vm_page_packed_t vm_page_object_t; +typedef vm_page_packed_t vm_page_object_t; #else /* - * we can't do the packing trick on 32 bit architectures, so + * we can't do the packing trick on 32 bit architectures, so * just turn the macros into noops. */ -typedef struct vm_page *vm_page_packed_t; +typedef struct vm_page *vm_page_packed_t; -#define vm_page_queue_t queue_t -#define vm_page_queue_head_t queue_head_t -#define vm_page_queue_chain_t queue_chain_t -#define vm_page_queue_entry_t queue_entry_t +#define vm_page_queue_t queue_t +#define vm_page_queue_head_t queue_head_t +#define vm_page_queue_chain_t queue_chain_t +#define vm_page_queue_entry_t queue_entry_t -#define vm_page_object_t vm_object_t +#define vm_page_object_t vm_object_t #endif @@ -122,7 +122,7 @@ typedef struct vm_page *vm_page_packed_t; -#define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) +#define VM_PAGE_COMPRESSOR_COUNT (compressor_object->resident_page_count) /* * Management of resident (logical) pages. @@ -150,38 +150,38 @@ typedef struct vm_page *vm_page_packed_t; * change that field; holding either lock is sufficient to read.] */ -#define VM_PAGE_NULL ((vm_page_t) 0) +#define VM_PAGE_NULL ((vm_page_t) 0) -extern char vm_page_inactive_states[]; -extern char vm_page_pageable_states[]; -extern char vm_page_non_speculative_pageable_states[]; -extern char vm_page_active_or_inactive_states[]; +extern char vm_page_inactive_states[]; +extern char vm_page_pageable_states[]; +extern char vm_page_non_speculative_pageable_states[]; +extern char vm_page_active_or_inactive_states[]; -#define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state]) -#define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state]) -#define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state]) -#define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state]) +#define VM_PAGE_INACTIVE(m) (vm_page_inactive_states[m->vmp_q_state]) +#define VM_PAGE_PAGEABLE(m) (vm_page_pageable_states[m->vmp_q_state]) +#define VM_PAGE_NON_SPECULATIVE_PAGEABLE(m) (vm_page_non_speculative_pageable_states[m->vmp_q_state]) +#define VM_PAGE_ACTIVE_OR_INACTIVE(m) (vm_page_active_or_inactive_states[m->vmp_q_state]) -#define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */ -#define VM_PAGE_IS_WIRED 1 /* page is currently wired */ -#define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */ -#define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */ -#define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */ -#define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */ -#define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */ -#define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */ -#define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */ -#define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */ -#define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */ -#define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */ -#define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */ -#define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */ -#define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */ -#define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */ +#define VM_PAGE_NOT_ON_Q 0 /* page is not present on any queue, nor is it wired... mainly a transient state */ +#define VM_PAGE_IS_WIRED 1 /* page is currently wired */ +#define VM_PAGE_USED_BY_COMPRESSOR 2 /* page is in use by the compressor to hold compressed data */ +#define VM_PAGE_ON_FREE_Q 3 /* page is on the main free queue */ +#define VM_PAGE_ON_FREE_LOCAL_Q 4 /* page is on one of the per-CPU free queues */ +#define VM_PAGE_ON_FREE_LOPAGE_Q 5 /* page is on the lopage pool free list */ +#define VM_PAGE_ON_THROTTLED_Q 6 /* page is on the throttled queue... we stash anonymous pages here when not paging */ +#define VM_PAGE_ON_PAGEOUT_Q 7 /* page is on one of the pageout queues (internal/external) awaiting processing */ +#define VM_PAGE_ON_SPECULATIVE_Q 8 /* page is on one of the speculative queues */ +#define VM_PAGE_ON_ACTIVE_LOCAL_Q 9 /* page has recently been created and is being held in one of the per-CPU local queues */ +#define VM_PAGE_ON_ACTIVE_Q 10 /* page is in global active queue */ +#define VM_PAGE_ON_INACTIVE_INTERNAL_Q 11 /* page is on the inactive internal queue a.k.a. anonymous queue */ +#define VM_PAGE_ON_INACTIVE_EXTERNAL_Q 12 /* page in on the inactive external queue a.k.a. file backed queue */ +#define VM_PAGE_ON_INACTIVE_CLEANED_Q 13 /* page has been cleaned to a backing file and is ready to be stolen */ +#define VM_PAGE_ON_SECLUDED_Q 14 /* page is on secluded queue */ +#define VM_PAGE_Q_STATE_LAST_VALID_VALUE 14 /* we currently use 4 bits for the state... don't let this go beyond 15 */ -#define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1) +#define VM_PAGE_Q_STATE_ARRAY_SIZE (VM_PAGE_Q_STATE_LAST_VALID_VALUE+1) /* @@ -199,7 +199,7 @@ struct vm_page { vm_page_queue_chain_t vmp_listq; /* all pages in same object (O) */ #if CONFIG_BACKGROUND_QUEUE - vm_page_queue_chain_t vmp_backgroundq; /* anonymous pages in the background pool (P) */ + vm_page_queue_chain_t vmp_backgroundq; /* anonymous pages in the background pool (P) */ #endif vm_object_offset_t vmp_offset; /* offset into that object (O,P) */ @@ -213,21 +213,21 @@ struct vm_page { */ #define vmp_local_id vmp_wire_count unsigned int vmp_wire_count:16, /* how many wired down maps use me? (O&P) */ - vmp_q_state:4, /* which q is the page on (P) */ - vmp_in_background:1, - vmp_on_backgroundq:1, - vmp_gobbled:1, /* page used internally (P) */ - vmp_laundry:1, /* page is being cleaned now (P)*/ - vmp_no_cache:1, /* page is not to be cached and should */ + vmp_q_state:4, /* which q is the page on (P) */ + vmp_in_background:1, + vmp_on_backgroundq:1, + vmp_gobbled:1, /* page used internally (P) */ + vmp_laundry:1, /* page is being cleaned now (P)*/ + vmp_no_cache:1, /* page is not to be cached and should */ /* be reused ahead of other pages (P) */ - vmp_private:1, /* Page should not be returned to the free list (P) */ - vmp_reference:1, /* page has been used (P) */ - vmp_unused_page_bits:5; + vmp_private:1, /* Page should not be returned to the free list (P) */ + vmp_reference:1, /* page has been used (P) */ + vmp_unused_page_bits:5; /* * MUST keep the 2 32 bit words used as bit fields * separated since the compiler has a nasty habit - * of using 64 bit loads and stores on them as + * of using 64 bit loads and stores on them as * if they were a single 64 bit field... since * they are protected by 2 different locks, this * is a real problem @@ -236,41 +236,41 @@ struct vm_page { /* * The following word of flags is protected by the "VM object" lock. - * + * * IMPORTANT: the "vmp_pmapped", "vmp_xpmapped" and "vmp_clustered" bits can be modified while holding the * VM object "shared" lock + the page lock provided through the pmap_lock_phys_page function. * This is done in vm_fault_enter() and the CONSUME_CLUSTERED macro. * It's also ok to modify them behind just the VM object "exclusive" lock. */ unsigned int vmp_busy:1, /* page is in transit (O) */ - vmp_wanted:1, /* someone is waiting for page (O) */ - vmp_tabled:1, /* page is in VP table (O) */ - vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */ - vmp_fictitious:1, /* Physical page doesn't exist (O) */ - vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ - vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */ + vmp_wanted:1, /* someone is waiting for page (O) */ + vmp_tabled:1, /* page is in VP table (O) */ + vmp_hashed:1, /* page is in vm_page_buckets[] (O) + the bucket lock */ + vmp_fictitious:1, /* Physical page doesn't exist (O) */ + vmp_clustered:1, /* page is not the faulted page (O) or (O-shared AND pmap_page) */ + vmp_pmapped:1, /* page has at some time been entered into a pmap (O) or */ /* (O-shared AND pmap_page) */ - vmp_xpmapped:1, /* page has been entered with execute permission (O) or */ + vmp_xpmapped:1, /* page has been entered with execute permission (O) or */ /* (O-shared AND pmap_page) */ - vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */ - vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */ - vmp_absent:1, /* Data has been requested, but is not yet available (O) */ - vmp_error:1, /* Data manager was unable to provide data due to error (O) */ - vmp_dirty:1, /* Page must be cleaned (O) */ - vmp_cleaning:1, /* Page clean has begun (O) */ - vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */ - vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */ + vmp_wpmapped:1, /* page has been entered at some point into a pmap for write (O) */ + vmp_free_when_done:1, /* page is to be freed once cleaning is completed (O) */ + vmp_absent:1, /* Data has been requested, but is not yet available (O) */ + vmp_error:1, /* Data manager was unable to provide data due to error (O) */ + vmp_dirty:1, /* Page must be cleaned (O) */ + vmp_cleaning:1, /* Page clean has begun (O) */ + vmp_precious:1, /* Page is precious; data must be returned even if clean (O) */ + vmp_overwriting:1, /* Request to unlock has been made without having data. (O) */ /* [See vm_fault_page_overwrite] */ - vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */ + vmp_restart:1, /* Page was pushed higher in shadow chain by copy_call-related pagers */ /* start again at top of chain */ - vmp_unusual:1, /* Page is absent, error, restart or page locked */ - vmp_cs_validated:1, /* code-signing: page was checked */ - vmp_cs_tainted:1, /* code-signing: page is tainted */ - vmp_cs_nx:1, /* code-signing: page is nx */ - vmp_reusable:1, - vmp_lopage:1, - vmp_written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ - vmp_unused_object_bits:8; + vmp_unusual:1, /* Page is absent, error, restart or page locked */ + vmp_cs_validated:1, /* code-signing: page was checked */ + vmp_cs_tainted:1, /* code-signing: page is tainted */ + vmp_cs_nx:1, /* code-signing: page is nx */ + vmp_reusable:1, + vmp_lopage:1, + vmp_written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ + vmp_unused_object_bits:8; #if !defined(__arm__) && !defined(__arm64__) ppnum_t vmp_phys_page; /* Physical page number of the page */ @@ -278,81 +278,85 @@ struct vm_page { }; -typedef struct vm_page *vm_page_t; -extern vm_page_t vm_pages; -extern vm_page_t vm_page_array_beginning_addr; -extern vm_page_t vm_page_array_ending_addr; +typedef struct vm_page *vm_page_t; +extern vm_page_t vm_pages; +extern vm_page_t vm_page_array_beginning_addr; +extern vm_page_t vm_page_array_ending_addr; #if defined(__arm__) || defined(__arm64__) -extern unsigned int vm_first_phys_ppnum; +extern unsigned int vm_first_phys_ppnum; struct vm_page_with_ppnum { - struct vm_page vm_page_wo_ppnum; + struct vm_page vm_page_wo_ppnum; - ppnum_t vmp_phys_page; + ppnum_t vmp_phys_page; }; typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; -static inline ppnum_t VM_PAGE_GET_PHYS_PAGE(vm_page_t m) +static inline ppnum_t +VM_PAGE_GET_PHYS_PAGE(vm_page_t m) { - if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) - return ((ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum)); - else - return (((vm_page_with_ppnum_t)m)->vmp_phys_page); + if (m >= vm_page_array_beginning_addr && m < vm_page_array_ending_addr) { + return (ppnum_t)((uintptr_t)(m - vm_page_array_beginning_addr) + vm_first_phys_ppnum); + } else { + return ((vm_page_with_ppnum_t)m)->vmp_phys_page; + } } -#define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \ - MACRO_BEGIN \ - if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \ - ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \ - assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \ +#define VM_PAGE_SET_PHYS_PAGE(m, ppnum) \ + MACRO_BEGIN \ + if ((m) < vm_page_array_beginning_addr || (m) >= vm_page_array_ending_addr) \ + ((vm_page_with_ppnum_t)(m))->vmp_phys_page = ppnum; \ + assert(ppnum == VM_PAGE_GET_PHYS_PAGE(m)); \ MACRO_END #define VM_PAGE_GET_COLOR(m) (VM_PAGE_GET_PHYS_PAGE(m) & vm_color_mask) -#else /* defined(__arm__) || defined(__arm64__) */ +#else /* defined(__arm__) || defined(__arm64__) */ struct vm_page_with_ppnum { - struct vm_page vm_page_with_ppnum; + struct vm_page vm_page_with_ppnum; }; typedef struct vm_page_with_ppnum *vm_page_with_ppnum_t; -#define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page -#define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \ - MACRO_BEGIN \ - (page)->vmp_phys_page = ppnum; \ +#define VM_PAGE_GET_PHYS_PAGE(page) (page)->vmp_phys_page +#define VM_PAGE_SET_PHYS_PAGE(page, ppnum) \ + MACRO_BEGIN \ + (page)->vmp_phys_page = ppnum; \ MACRO_END #define VM_PAGE_GET_CLUMP(m) ((VM_PAGE_GET_PHYS_PAGE(m)) >> vm_clump_shift) #define VM_PAGE_GET_COLOR(m) ((VM_PAGE_GET_CLUMP(m)) & vm_color_mask) -#endif /* defined(__arm__) || defined(__arm64__) */ +#endif /* defined(__arm__) || defined(__arm64__) */ #if defined(__LP64__) -#define VM_VPLQ_ALIGNMENT 128 -#define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */ -#define VM_PACKED_POINTER_SHIFT 6 +#define VM_VPLQ_ALIGNMENT 128 +#define VM_PACKED_POINTER_ALIGNMENT 64 /* must be a power of 2 */ +#define VM_PACKED_POINTER_SHIFT 6 -#define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000 +#define VM_PACKED_FROM_VM_PAGES_ARRAY 0x80000000 -static inline vm_page_packed_t vm_page_pack_ptr(uintptr_t p) +static inline vm_page_packed_t +vm_page_pack_ptr(uintptr_t p) { vm_page_packed_t packed_ptr; - if (!p) - return ((vm_page_packed_t)0); + if (!p) { + return (vm_page_packed_t)0; + } if (p >= (uintptr_t)(vm_page_array_beginning_addr) && p < (uintptr_t)(vm_page_array_ending_addr)) { packed_ptr = ((vm_page_packed_t)(((vm_page_t)p - vm_page_array_beginning_addr))); - assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); + assert(!(packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); packed_ptr |= VM_PACKED_FROM_VM_PAGES_ARRAY; return packed_ptr; } @@ -361,50 +365,52 @@ static inline vm_page_packed_t vm_page_pack_ptr(uintptr_t p) packed_ptr = ((vm_page_packed_t)(((uintptr_t)(p - (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)) >> VM_PACKED_POINTER_SHIFT)); assert(packed_ptr != 0); - assert(! (packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); + assert(!(packed_ptr & VM_PACKED_FROM_VM_PAGES_ARRAY)); return packed_ptr; } -static inline uintptr_t vm_page_unpack_ptr(uintptr_t p) +static inline uintptr_t +vm_page_unpack_ptr(uintptr_t p) { - if (!p) - return ((uintptr_t)0); + if (!p) { + return (uintptr_t)0; + } - if (p & VM_PACKED_FROM_VM_PAGES_ARRAY) - return ((uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)])); - return (((p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS)); + if (p & VM_PACKED_FROM_VM_PAGES_ARRAY) { + return (uintptr_t)(&vm_pages[(uint32_t)(p & ~VM_PACKED_FROM_VM_PAGES_ARRAY)]); + } + return (p << VM_PACKED_POINTER_SHIFT) + (uintptr_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS; } -#define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p)) -#define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p)) +#define VM_PAGE_PACK_PTR(p) vm_page_pack_ptr((uintptr_t)(p)) +#define VM_PAGE_UNPACK_PTR(p) vm_page_unpack_ptr((uintptr_t)(p)) -#define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object))) -#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) +#define VM_PAGE_OBJECT(p) ((vm_object_t)(VM_PAGE_UNPACK_PTR(p->vmp_object))) +#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) -#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ -MACRO_BEGIN \ - (p)->vmp_snext = 0; \ +#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ +MACRO_BEGIN \ + (p)->vmp_snext = 0; \ MACRO_END -#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p) +#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) VM_PAGE_PACK_PTR(p) static __inline__ void vm_page_enqueue_tail( - vm_page_queue_t que, - vm_page_queue_entry_t elt) + vm_page_queue_t que, + vm_page_queue_entry_t elt) { - vm_page_queue_entry_t old_tail; + vm_page_queue_entry_t old_tail; old_tail = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(que->prev); elt->next = VM_PAGE_PACK_PTR(que); elt->prev = que->prev; - old_tail->next = VM_PAGE_PACK_PTR(elt); - que->prev = VM_PAGE_PACK_PTR(elt); + que->prev = old_tail->next = VM_PAGE_PACK_PTR(elt); } @@ -412,15 +418,18 @@ static __inline__ void vm_page_remque( vm_page_queue_entry_t elt) { - vm_page_queue_entry_t next_elt, prev_elt; + vm_page_queue_entry_t next; + vm_page_queue_entry_t prev; + vm_page_packed_t next_pck = elt->next; + vm_page_packed_t prev_pck = elt->prev; - next_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->next); + next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(next_pck); - /* next_elt may equal prev_elt (and the queue head) if elt was the only element */ - prev_elt = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(elt->prev); + /* next may equal prev (and the queue head) if elt was the only element */ + prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev_pck); - next_elt->prev = VM_PAGE_PACK_PTR(prev_elt); - prev_elt->next = VM_PAGE_PACK_PTR(next_elt); + next->prev = prev_pck; + prev->next = next_pck; elt->next = 0; elt->prev = 0; @@ -435,68 +444,70 @@ vm_page_remque( * void vm_page_queue_init(q) * vm_page_queue_t q; \* MODIFIED *\ */ -#define vm_page_queue_init(q) \ -MACRO_BEGIN \ - assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \ - assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \ - (q)->next = VM_PAGE_PACK_PTR(q); \ - (q)->prev = VM_PAGE_PACK_PTR(q); \ +#define vm_page_queue_init(q) \ +MACRO_BEGIN \ + assert((((uintptr_t)q) & (VM_PACKED_POINTER_ALIGNMENT-1)) == 0); \ + assert((VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR((uintptr_t)q))) == (uintptr_t)q); \ + (q)->next = VM_PAGE_PACK_PTR(q); \ + (q)->prev = VM_PAGE_PACK_PTR(q); \ MACRO_END /* - * Macro: vm_page_queue_enter - * Function: - * Insert a new element at the tail of the queue. - * Header: - * void vm_page_queue_enter(q, elt, type, field) - * queue_t q; - * elt; - * is what's in our queue - * is the chain field in (*) - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * Macro: vm_page_queue_enter + * Function: + * Insert a new element at the tail of the vm_page queue. + * Header: + * void vm_page_queue_enter(q, elt, field) + * queue_t q; + * vm_page_t elt; + * is the list field in vm_page_t + * + * This macro's arguments have to match the generic "queue_enter()" macro which is + * what is used for this on 32 bit kernels. */ -#define vm_page_queue_enter(head, elt, type, field) \ -MACRO_BEGIN \ - vm_page_queue_entry_t __prev; \ - \ - __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->prev)); \ - if ((head) == __prev) { \ - (head)->next = VM_PAGE_PACK_PTR(elt); \ - } \ - else { \ - ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(elt); \ - } \ - (elt)->field.prev = VM_PAGE_PACK_PTR(__prev); \ - (elt)->field.next = VM_PAGE_PACK_PTR(head); \ - (head)->prev = VM_PAGE_PACK_PTR(elt); \ +#define vm_page_queue_enter(head, elt, field) \ +MACRO_BEGIN \ + vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \ + vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ + vm_page_packed_t __pck_prev = (head)->prev; \ + \ + if (__pck_head == __pck_prev) { \ + (head)->next = __pck_elt; \ + } else { \ + vm_page_t __prev; \ + __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \ + __prev->field.next = __pck_elt; \ + } \ + (elt)->field.prev = __pck_prev; \ + (elt)->field.next = __pck_head; \ + (head)->prev = __pck_elt; \ MACRO_END +#if defined(__x86_64__) /* * These are helper macros for vm_page_queue_enter_clump to assist * with conditional compilation (release / debug / development) */ #if DEVELOPMENT || DEBUG -#define __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field) \ -MACRO_BEGIN \ - if(__check) { /* if first forward buddy.. */ \ - if(__prev) { /* ..and if a backward buddy was found, verify link consistency */ \ - assert(__p == (vm_page_t) VM_PAGE_UNPACK_PTR(__prev->next)); \ - assert(__prev == (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__p->field.prev)); \ - } \ - __check=0; \ - } \ +#define __DEBUG_CHECK_BUDDIES(__prev, __p, field) \ +MACRO_BEGIN \ + if (__prev != NULL) { \ + assert(__p == (vm_page_t)VM_PAGE_UNPACK_PTR(__prev->next)); \ + assert(__prev == (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__p->field.prev)); \ + } \ MACRO_END -#define __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next) \ -MACRO_BEGIN \ - vm_page_queue_entry_t __tmp; \ - for(__i=0, __tmp=__first; __i<__n_free; __i++) \ - __tmp=(vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__tmp->next); \ - assert(__tmp == __last_next); \ +#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) \ +MACRO_BEGIN \ + unsigned int __i; \ + vm_page_queue_entry_t __tmp; \ + for (__i = 0, __tmp = __first; __i < __n_free; __i++) { \ + __tmp = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(__tmp->next); \ + } \ + assert(__tmp == __last_next); \ MACRO_END #define __DEBUG_STAT_INCREMENT_INRANGE vm_clump_inrange++ @@ -505,8 +516,8 @@ MACRO_END #else -#define __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field) __check=1 -#define __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next) +#define __DEBUG_CHECK_BUDDIES(__prev, __p, field) +#define __DEBUG_VERIFY_LINKS(__first, __n_free, __last_next) #define __DEBUG_STAT_INCREMENT_INRANGE #define __DEBUG_STAT_INCREMENT_INSERTS #define __DEBUG_STAT_INCREMENT_PROMOTES(__n_free) @@ -514,208 +525,251 @@ MACRO_END #endif /* if DEVELOPMENT || DEBUG */ /* - * Macro: vm_page_queue_enter_clump - * Function: - * Insert a new element into the free queue and clump pages within the same 16K boundary together - * - * Header: - * void vm_page_queue_enter_clump(q, elt, type, field) - * queue_t q; - * elt; - * is what's in our queue - * is the chain field in (*) - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * Insert a new page into a free queue and clump pages within the same 16K boundary together */ -#if defined(__x86_64__) -#define vm_page_queue_enter_clump(head, elt, type, field) \ -MACRO_BEGIN \ - ppnum_t __clump_num; \ - unsigned int __i, __n, __n_free=1, __check=1; \ - vm_page_queue_entry_t __prev=0, __next, __last, __last_next, __first, __first_prev, __head_next; \ - vm_page_t __p; \ - \ - /* if elt is part of vm_pages[] */ \ - if((elt) >= vm_page_array_beginning_addr && (elt) < vm_page_array_boundary) { \ - __first = __last = (vm_page_queue_entry_t) (elt); \ - __clump_num = VM_PAGE_GET_CLUMP(elt); \ - __n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask; \ - /* scan backward looking for a buddy page */ \ - for(__i=0, __p=(elt)-1; __i<__n && __p>=vm_page_array_beginning_addr; __i++, __p--) { \ - if(__p->vmp_q_state == VM_PAGE_ON_FREE_Q && __clump_num == VM_PAGE_GET_CLUMP(__p)) { \ - if(__prev == 0) __prev = (vm_page_queue_entry_t) __p; \ - __first = (vm_page_queue_entry_t) __p; \ - __n_free++; \ - } \ - } \ - /* scan forward looking for a buddy page */ \ - for(__i=__n+1, __p=(elt)+1; __ivmp_q_state == VM_PAGE_ON_FREE_Q && __clump_num == VM_PAGE_GET_CLUMP(__p)) { \ - __DEBUG_CHECK_BUDDIES(__check, __prev, __p, field); \ - if(__prev == 0) __prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__p->field.prev); \ - __last = (vm_page_queue_entry_t) __p; \ - __n_free++; \ - } \ - } \ - __DEBUG_STAT_INCREMENT_INRANGE; \ - } \ - /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */ \ - if(__prev == 0) __prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR((head)->prev); \ - \ - /* insert the element */ \ - __next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__prev->next); \ - (elt)->field.next = __prev->next; \ - (elt)->field.prev = __next->prev; \ - __prev->next = __next->prev = VM_PAGE_PACK_PTR(elt); \ - __DEBUG_STAT_INCREMENT_INSERTS; \ - \ - /* check if clump needs to be promoted to head */ \ - if(__n_free >= vm_clump_promote_threshold && __n_free > 1) { \ - __first_prev = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__first->prev); \ - if(__first_prev != (head)) { /* if not at head already */ \ - __last_next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR(__last->next); \ - /* verify that the links within the clump are consistent */ \ - __DEBUG_VERIFY_LINKS(__i, __first, __n_free, __last_next); \ - /* promote clump to head */ \ - __first_prev->next = __last->next; \ - __last_next->prev = __first->prev; \ - __first->prev = VM_PAGE_PACK_PTR(head); \ - __last->next = (head)->next; \ - __head_next = (vm_page_queue_entry_t) VM_PAGE_UNPACK_PTR((head)->next); \ - __head_next->prev = VM_PAGE_PACK_PTR(__last); \ - (head)->next = VM_PAGE_PACK_PTR(__first); \ - __DEBUG_STAT_INCREMENT_PROMOTES(__n_free); \ - } \ - } \ -MACRO_END +static inline void +vm_page_queue_enter_clump( + vm_page_queue_t head, + vm_page_t elt) +{ + vm_page_queue_entry_t first; /* first page in the clump */ + vm_page_queue_entry_t last; /* last page in the clump */ + vm_page_queue_entry_t prev = NULL; + vm_page_queue_entry_t next; + uint_t n_free = 1; + extern unsigned int vm_pages_count; + extern unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold; + extern unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes; + + /* + * If elt is part of the vm_pages[] array, find its neighboring buddies in the array. + */ + if (vm_page_array_beginning_addr <= elt && elt < &vm_pages[vm_pages_count]) { + vm_page_t p; + uint_t i; + uint_t n; + ppnum_t clump_num; + + first = last = (vm_page_queue_entry_t)elt; + clump_num = VM_PAGE_GET_CLUMP(elt); + n = VM_PAGE_GET_PHYS_PAGE(elt) & vm_clump_mask; + + /* + * Check for preceeding vm_pages[] entries in the same chunk + */ + for (i = 0, p = elt - 1; i < n && vm_page_array_beginning_addr <= p; i++, p--) { + if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) { + if (prev == NULL) { + prev = (vm_page_queue_entry_t)p; + } + first = (vm_page_queue_entry_t)p; + n_free++; + } + } + + /* + * Check the following vm_pages[] entries in the same chunk + */ + for (i = n + 1, p = elt + 1; i < vm_clump_size && p < &vm_pages[vm_pages_count]; i++, p++) { + if (p->vmp_q_state == VM_PAGE_ON_FREE_Q && clump_num == VM_PAGE_GET_CLUMP(p)) { + if (last == (vm_page_queue_entry_t)elt) { /* first one only */ + __DEBUG_CHECK_BUDDIES(prev, p, vmp_pageq); + } + + if (prev == NULL) { + prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev); + } + last = (vm_page_queue_entry_t)p; + n_free++; + } + } + __DEBUG_STAT_INCREMENT_INRANGE; + } + + /* if elt is not part of vm_pages or if 1st page in clump, insert at tail */ + if (prev == NULL) { + prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->prev); + } + + /* insert the element */ + next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(prev->next); + elt->vmp_pageq.next = prev->next; + elt->vmp_pageq.prev = next->prev; + prev->next = next->prev = VM_PAGE_PACK_PTR(elt); + __DEBUG_STAT_INCREMENT_INSERTS; + + /* + * Check if clump needs to be promoted to head. + */ + if (n_free >= vm_clump_promote_threshold && n_free > 1) { + vm_page_queue_entry_t first_prev; + + first_prev = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(first->prev); + + /* If not at head already */ + if (first_prev != head) { + vm_page_queue_entry_t last_next; + vm_page_queue_entry_t head_next; + + last_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(last->next); + + /* verify that the links within the clump are consistent */ + __DEBUG_VERIFY_LINKS(first, n_free, last_next); + + /* promote clump to head */ + first_prev->next = last->next; + last_next->prev = first->prev; + first->prev = VM_PAGE_PACK_PTR(head); + last->next = head->next; + + head_next = (vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR(head->next); + head_next->prev = VM_PAGE_PACK_PTR(last); + head->next = VM_PAGE_PACK_PTR(first); + __DEBUG_STAT_INCREMENT_PROMOTES(n_free); + } + } +} #endif /* - * Macro: vm_page_queue_enter_first - * Function: - * Insert a new element at the head of the queue. - * Header: - * void queue_enter_first(q, elt, type, field) - * queue_t q; - * elt; - * is what's in our queue - * is the chain field in (*) - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * Macro: vm_page_queue_enter_first + * Function: + * Insert a new element at the head of the vm_page queue. + * Header: + * void queue_enter_first(q, elt, , field) + * queue_t q; + * vm_page_t elt; + * is the linkage field in vm_page + * + * This macro's arguments have to match the generic "queue_enter_first()" macro which is + * what is used for this on 32 bit kernels. */ -#define vm_page_queue_enter_first(head, elt, type, field) \ -MACRO_BEGIN \ - vm_page_queue_entry_t __next; \ - \ - __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((head)->next)); \ - if ((head) == __next) { \ - (head)->prev = VM_PAGE_PACK_PTR(elt); \ - } \ - else { \ - ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(elt); \ - } \ - (elt)->field.next = VM_PAGE_PACK_PTR(__next); \ - (elt)->field.prev = VM_PAGE_PACK_PTR(head); \ - (head)->next = VM_PAGE_PACK_PTR(elt); \ +#define vm_page_queue_enter_first(head, elt, field) \ +MACRO_BEGIN \ + vm_page_packed_t __pck_next = (head)->next; \ + vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ + vm_page_packed_t __pck_elt = VM_PAGE_PACK_PTR(elt); \ + \ + if (__pck_head == __pck_next) { \ + (head)->prev = __pck_elt; \ + } else { \ + vm_page_t __next; \ + __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ + __next->field.prev = __pck_elt; \ + } \ + \ + (elt)->field.next = __pck_next; \ + (elt)->field.prev = __pck_head; \ + (head)->next = __pck_elt; \ MACRO_END /* - * Macro: vm_page_queue_remove - * Function: - * Remove an arbitrary item from the queue. - * Header: - * void vm_page_queue_remove(q, qe, type, field) - * arguments as in vm_page_queue_enter - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * Macro: vm_page_queue_remove + * Function: + * Remove an arbitrary page from a vm_page queue. + * Header: + * void vm_page_queue_remove(q, qe, field) + * arguments as in vm_page_queue_enter + * + * This macro's arguments have to match the generic "queue_enter()" macro which is + * what is used for this on 32 bit kernels. */ -#define vm_page_queue_remove(head, elt, type, field) \ -MACRO_BEGIN \ - vm_page_queue_entry_t __next, __prev; \ - \ - __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.next)); \ - __prev = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((elt)->field.prev)); \ - \ - if ((head) == __next) \ - (head)->prev = VM_PAGE_PACK_PTR(__prev); \ - else \ - ((type)(void *)__next)->field.prev = VM_PAGE_PACK_PTR(__prev); \ - \ - if ((head) == __prev) \ - (head)->next = VM_PAGE_PACK_PTR(__next); \ - else \ - ((type)(void *)__prev)->field.next = VM_PAGE_PACK_PTR(__next); \ - \ - (elt)->field.next = 0; \ - (elt)->field.prev = 0; \ +#define vm_page_queue_remove(head, elt, field) \ +MACRO_BEGIN \ + vm_page_packed_t __pck_next = (elt)->field.next; \ + vm_page_packed_t __pck_prev = (elt)->field.prev; \ + vm_page_t __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ + vm_page_t __prev = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_prev); \ + \ + if ((void *)(head) == (void *)__next) { \ + (head)->prev = __pck_prev; \ + } else { \ + __next->field.prev = __pck_prev; \ + } \ + \ + if ((void *)(head) == (void *)__prev) { \ + (head)->next = __pck_next; \ + } else { \ + __prev->field.next = __pck_next; \ + } \ + \ + (elt)->field.next = 0; \ + (elt)->field.prev = 0; \ MACRO_END /* - * Macro: vm_page_queue_remove_first - * Function: - * Remove and return the entry at the head of - * the queue. - * Header: - * vm_page_queue_remove_first(head, entry, type, field) - * entry is returned by reference - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * Macro: vm_page_queue_remove_first + * + * Function: + * Remove and return the entry at the head of a vm_page queue. + * + * Header: + * vm_page_queue_remove_first(head, entry, field) + * N.B. entry is returned by reference + * + * This macro's arguments have to match the generic "queue_remove_first()" macro which is + * what is used for this on 32 bit kernels. */ -#define vm_page_queue_remove_first(head, entry, type, field) \ -MACRO_BEGIN \ - vm_page_queue_entry_t __next; \ - \ - (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \ - __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \ - \ - if ((head) == __next) \ - (head)->prev = VM_PAGE_PACK_PTR(head); \ - else \ - ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \ - (head)->next = VM_PAGE_PACK_PTR(__next); \ - \ - (entry)->field.next = 0; \ - (entry)->field.prev = 0; \ +#define vm_page_queue_remove_first(head, entry, field) \ +MACRO_BEGIN \ + vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ + vm_page_packed_t __pck_next; \ + vm_page_t __next; \ + \ + (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \ + __pck_next = (entry)->field.next; \ + __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ + \ + if (__pck_head == __pck_next) { \ + (head)->prev = __pck_head; \ + } else { \ + __next->field.prev = __pck_head; \ + } \ + \ + (head)->next = __pck_next; \ + (entry)->field.next = 0; \ + (entry)->field.prev = 0; \ MACRO_END +#if defined(__x86_64__) /* - * Macro: vm_page_queue_remove_first_with_clump - * Function: - * Remove and return the entry at the head of the free queue - * end is set to 1 to indicate that we just returned the last page in a clump + * Macro: vm_page_queue_remove_first_with_clump + * Function: + * Remove and return the entry at the head of the free queue + * end is set to 1 to indicate that we just returned the last page in a clump * - * Header: - * vm_page_queue_remove_first_with_clump(head, entry, type, field, end) - * entry is returned by reference - * end is returned by reference - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * Header: + * vm_page_queue_remove_first_with_clump(head, entry, end) + * entry is returned by reference + * end is returned by reference */ -#if defined(__x86_64__) -#define vm_page_queue_remove_first_with_clump(head, entry, type, field, end) \ -MACRO_BEGIN \ - vm_page_queue_entry_t __next; \ - \ - (entry) = (type)(void *) VM_PAGE_UNPACK_PTR(((head)->next)); \ - __next = ((vm_page_queue_entry_t)VM_PAGE_UNPACK_PTR((entry)->field.next)); \ - \ - (end)=0; \ - if ((head) == __next) { \ - (head)->prev = VM_PAGE_PACK_PTR(head); \ - (end)=1; \ - } \ - else { \ - ((type)(void *)(__next))->field.prev = VM_PAGE_PACK_PTR(head); \ - if(VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(((type)(void *)(__next)))) (end)=1; \ - } \ - (head)->next = VM_PAGE_PACK_PTR(__next); \ - \ - (entry)->field.next = 0; \ - (entry)->field.prev = 0; \ - \ +#define vm_page_queue_remove_first_with_clump(head, entry, end) \ +MACRO_BEGIN \ + vm_page_packed_t __pck_head = VM_PAGE_PACK_PTR(head); \ + vm_page_packed_t __pck_next; \ + vm_page_t __next; \ + \ + (entry) = (vm_page_t)VM_PAGE_UNPACK_PTR((head)->next); \ + __pck_next = (entry)->vmp_pageq.next; \ + __next = (vm_page_t)VM_PAGE_UNPACK_PTR(__pck_next); \ + \ + (end) = 0; \ + if (__pck_head == __pck_next) { \ + (head)->prev = __pck_head; \ + (end) = 1; \ + } else { \ + __next->vmp_pageq.prev = __pck_head; \ + if (VM_PAGE_GET_CLUMP(entry) != VM_PAGE_GET_CLUMP(__next)) { \ + (end) = 1; \ + } \ + } \ + \ + (head)->next = __pck_next; \ + (entry)->vmp_pageq.next = 0; \ + (entry)->vmp_pageq.prev = 0; \ MACRO_END #endif @@ -729,7 +783,7 @@ MACRO_END * vm_page_queue_t q; * vm_page_queue_entry_t qe; */ -#define vm_page_queue_end(q, qe) ((q) == (qe)) +#define vm_page_queue_end(q, qe) ((q) == (qe)) /* @@ -740,7 +794,7 @@ MACRO_END * boolean_t vm_page_queue_empty(q) * vm_page_queue_t q; */ -#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q))) +#define vm_page_queue_empty(q) vm_page_queue_end((q), ((vm_page_queue_entry_t)vm_page_queue_first(q))) @@ -752,7 +806,7 @@ MACRO_END * uintpr_t vm_page_queue_first(q) * vm_page_queue_t q; \* IN *\ */ -#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next)) +#define vm_page_queue_first(q) (VM_PAGE_UNPACK_PTR((q)->next)) @@ -764,7 +818,7 @@ MACRO_END * vm_page_queue_entry_t queue_last(q) * queue_t q; \* IN *\ */ -#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev)) +#define vm_page_queue_last(q) (VM_PAGE_UNPACK_PTR((q)->prev)) @@ -776,7 +830,7 @@ MACRO_END * uintpr_t vm_page_queue_next(qc) * vm_page_queue_t qc; */ -#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next)) +#define vm_page_queue_next(qc) (VM_PAGE_UNPACK_PTR((qc)->next)) @@ -788,88 +842,85 @@ MACRO_END * uinptr_t vm_page_queue_prev(qc) * vm_page_queue_t qc; */ -#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev)) +#define vm_page_queue_prev(qc) (VM_PAGE_UNPACK_PTR((qc)->prev)) /* * Macro: vm_page_queue_iterate * Function: - * iterate over each item in the queue. + * iterate over each item in a vm_page queue. * Generates a 'for' loop, setting elt to * each item in turn (by reference). * Header: - * vm_page_queue_iterate(q, elt, type, field) + * vm_page_queue_iterate(q, elt, field) * queue_t q; - * elt; - * is what's in our queue - * is the chain field in (*) - * Note: - * This should only be used with Method 2 queue iteration (element chains) + * vm_page_t elt; + * is the chain field in vm_page_t */ -#define vm_page_queue_iterate(head, elt, type, field) \ - for ((elt) = (type)(void *) vm_page_queue_first(head); \ - !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \ - (elt) = (type)(void *) vm_page_queue_next(&(elt)->field)) +#define vm_page_queue_iterate(head, elt, field) \ + for ((elt) = (vm_page_t)vm_page_queue_first(head); \ + !vm_page_queue_end((head), (vm_page_queue_entry_t)(elt)); \ + (elt) = (vm_page_t)vm_page_queue_next(&(elt)->field)) \ #else -#define VM_VPLQ_ALIGNMENT 128 -#define VM_PACKED_POINTER_ALIGNMENT 4 -#define VM_PACKED_POINTER_SHIFT 0 +#define VM_VPLQ_ALIGNMENT 128 +#define VM_PACKED_POINTER_ALIGNMENT 4 +#define VM_PACKED_POINTER_SHIFT 0 -#define VM_PACKED_FROM_VM_PAGES_ARRAY 0 +#define VM_PACKED_FROM_VM_PAGES_ARRAY 0 -#define VM_PAGE_PACK_PTR(p) (p) -#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p)) +#define VM_PAGE_PACK_PTR(p) (p) +#define VM_PAGE_UNPACK_PTR(p) ((uintptr_t)(p)) -#define VM_PAGE_OBJECT(p) (vm_object_t)(p->vmp_object) -#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) +#define VM_PAGE_OBJECT(p) (vm_object_t)(p->vmp_object) +#define VM_PAGE_PACK_OBJECT(o) ((vm_page_object_t)(VM_PAGE_PACK_PTR(o))) -#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ -MACRO_BEGIN \ - (p)->vmp_pageq.next = 0; \ - (p)->vmp_pageq.prev = 0; \ +#define VM_PAGE_ZERO_PAGEQ_ENTRY(p) \ +MACRO_BEGIN \ + (p)->vmp_pageq.next = 0; \ + (p)->vmp_pageq.prev = 0; \ MACRO_END -#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p)) - -#define vm_page_remque remque -#define vm_page_enqueue_tail enqueue_tail -#define vm_page_queue_init queue_init -#define vm_page_queue_enter queue_enter -#define vm_page_queue_enter_first queue_enter_first -#define vm_page_queue_remove queue_remove -#define vm_page_queue_remove_first queue_remove_first -#define vm_page_queue_end queue_end -#define vm_page_queue_empty queue_empty -#define vm_page_queue_first queue_first -#define vm_page_queue_last queue_last -#define vm_page_queue_next queue_next -#define vm_page_queue_prev queue_prev -#define vm_page_queue_iterate queue_iterate +#define VM_PAGE_CONVERT_TO_QUEUE_ENTRY(p) ((queue_entry_t)(p)) + +#define vm_page_remque remque +#define vm_page_enqueue_tail enqueue_tail +#define vm_page_queue_init queue_init +#define vm_page_queue_enter(h, e, f) queue_enter(h, e, vm_page_t, f) +#define vm_page_queue_enter_first(h, e, f) queue_enter_first(h, e, vm_page_t, f) +#define vm_page_queue_remove(h, e, f) queue_remove(h, e, vm_page_t, f) +#define vm_page_queue_remove_first(h, e, f) queue_remove_first(h, e, vm_page_t, f) +#define vm_page_queue_end queue_end +#define vm_page_queue_empty queue_empty +#define vm_page_queue_first queue_first +#define vm_page_queue_last queue_last +#define vm_page_queue_next queue_next +#define vm_page_queue_prev queue_prev +#define vm_page_queue_iterate(h, e, f) queue_iterate(h, e, vm_page_t, f) #endif -/* +/* * VM_PAGE_MIN_SPECULATIVE_AGE_Q through VM_PAGE_MAX_SPECULATIVE_AGE_Q * represents a set of aging bins that are 'protected'... * * VM_PAGE_SPECULATIVE_AGED_Q is a list of the speculative pages that have * not yet been 'claimed' but have been aged out of the protective bins - * this occurs in vm_page_speculate when it advances to the next bin + * this occurs in vm_page_speculate when it advances to the next bin * and discovers that it is still occupied... at that point, all of the * pages in that bin are moved to the VM_PAGE_SPECULATIVE_AGED_Q. the pages * in that bin are all guaranteed to have reached at least the maximum age * we allow for a protected page... they can be older if there is no * memory pressure to pull them from the bin, or there are no new speculative pages * being generated to push them out. - * this list is the one that vm_pageout_scan will prefer when looking + * this list is the one that vm_pageout_scan will prefer when looking * for pages to move to the underweight free list - * + * * VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS * defines the amount of time a speculative page is normally * allowed to live in the 'protected' state (i.e. not available @@ -883,48 +934,48 @@ MACRO_END * vm_pageout_scan is also allowed to pull pages from a protected * bin if the bin has reached the "age of consent" we've set */ -#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 -#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 -#define VM_PAGE_SPECULATIVE_AGED_Q 0 +#define VM_PAGE_MAX_SPECULATIVE_AGE_Q 10 +#define VM_PAGE_MIN_SPECULATIVE_AGE_Q 1 +#define VM_PAGE_SPECULATIVE_AGED_Q 0 -#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 +#define VM_PAGE_SPECULATIVE_Q_AGE_MS 500 struct vm_speculative_age_q { /* * memory queue for speculative pages via clustered pageins */ - vm_page_queue_head_t age_q; - mach_timespec_t age_ts; + vm_page_queue_head_t age_q; + mach_timespec_t age_ts; } __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); extern -struct vm_speculative_age_q vm_page_queue_speculative[]; +struct vm_speculative_age_q vm_page_queue_speculative[]; -extern int speculative_steal_index; -extern int speculative_age_index; -extern unsigned int vm_page_speculative_q_age_ms; +extern int speculative_steal_index; +extern int speculative_age_index; +extern unsigned int vm_page_speculative_q_age_ms; typedef struct vm_locks_array { - char pad __attribute__ ((aligned (64))); - lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned (64))); - lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned (64))); - char pad2 __attribute__ ((aligned (64))); + char pad __attribute__ ((aligned(64))); + lck_mtx_t vm_page_queue_lock2 __attribute__ ((aligned(64))); + lck_mtx_t vm_page_queue_free_lock2 __attribute__ ((aligned(64))); + char pad2 __attribute__ ((aligned(64))); } vm_locks_array_t; #if CONFIG_BACKGROUND_QUEUE extern void vm_page_assign_background_state(vm_page_t mem); -extern void vm_page_update_background_state(vm_page_t mem); -extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first); -extern void vm_page_remove_from_backgroundq(vm_page_t mem); +extern void vm_page_update_background_state(vm_page_t mem); +extern void vm_page_add_to_backgroundq(vm_page_t mem, boolean_t first); +extern void vm_page_remove_from_backgroundq(vm_page_t mem); #endif -#define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED) -#define NEXT_PAGE(m) ((m)->vmp_snext) -#define NEXT_PAGE_PTR(m) (&(m)->vmp_snext) +#define VM_PAGE_WIRED(m) ((m)->vmp_q_state == VM_PAGE_IS_WIRED) +#define NEXT_PAGE(m) ((m)->vmp_snext) +#define NEXT_PAGE_PTR(m) (&(m)->vmp_snext) /* * XXX The unusual bit should not be necessary. Most of the bit @@ -938,8 +989,8 @@ extern void vm_page_remove_from_backgroundq(vm_page_t mem); * current call-sites can be left intact for future uses. */ -#define VM_PAGE_CHECK(mem) \ - MACRO_BEGIN \ +#define VM_PAGE_CHECK(mem) \ + MACRO_BEGIN \ MACRO_END /* Page coloring: @@ -951,42 +1002,42 @@ extern void vm_page_remove_from_backgroundq(vm_page_t mem); * The boot-arg "colors" may be used to override vm_colors. * Note that there is little harm in having more colors than needed. */ - + #define MAX_COLORS 128 -#define DEFAULT_COLORS 32 +#define DEFAULT_COLORS 32 extern -unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ +unsigned int vm_colors; /* must be in range 1..MAX_COLORS */ extern -unsigned int vm_color_mask; /* must be (vm_colors-1) */ +unsigned int vm_color_mask; /* must be (vm_colors-1) */ extern -unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ +unsigned int vm_cache_geometry_colors; /* optimal #colors based on cache geometry */ /* * Wired memory is a very limited resource and we can't let users exhaust it * and deadlock the entire system. We enforce the following limits: - * + * * vm_user_wire_limit (default: all memory minus vm_global_no_user_wire_amount) - * how much memory can be user-wired in one user task + * how much memory can be user-wired in one user task * * vm_global_user_wire_limit (default: same as vm_user_wire_limit) - * how much memory can be user-wired in all user tasks + * how much memory can be user-wired in all user tasks * * vm_global_no_user_wire_amount (default: VM_NOT_USER_WIREABLE) * how much memory must remain user-unwired at any time */ -#define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */ +#define VM_NOT_USER_WIREABLE (64*1024*1024) /* 64MB */ extern -vm_map_size_t vm_user_wire_limit; +vm_map_size_t vm_user_wire_limit; extern -vm_map_size_t vm_global_user_wire_limit; +vm_map_size_t vm_global_user_wire_limit; extern -vm_map_size_t vm_global_no_user_wire_amount; +vm_map_size_t vm_global_no_user_wire_amount; /* * Each pageable resident page falls into one of three lists: * - * free + * free * Available for allocation now. The free list is * actually an array of lists, one per color. * inactive @@ -1008,384 +1059,389 @@ vm_map_size_t vm_global_no_user_wire_amount; #define VPL_LOCK_SPIN 1 struct vpl { - vm_page_queue_head_t vpl_queue; - unsigned int vpl_count; - unsigned int vpl_internal_count; - unsigned int vpl_external_count; -#ifdef VPL_LOCK_SPIN - lck_spin_t vpl_lock; + vm_page_queue_head_t vpl_queue; + unsigned int vpl_count; + unsigned int vpl_internal_count; + unsigned int vpl_external_count; +#ifdef VPL_LOCK_SPIN + lck_spin_t vpl_lock; #else - lck_mtx_t vpl_lock; - lck_mtx_ext_t vpl_lock_ext; + lck_mtx_t vpl_lock; + lck_mtx_ext_t vpl_lock_ext; #endif }; -struct vplq { +struct vplq { union { char cache_line_pad[VM_VPLQ_ALIGNMENT]; struct vpl vpl; } vpl_un; }; extern -unsigned int vm_page_local_q_count; +unsigned int vm_page_local_q_count; extern -struct vplq *vm_page_local_q; +struct vplq *vm_page_local_q; extern -unsigned int vm_page_local_q_soft_limit; +unsigned int vm_page_local_q_soft_limit; extern -unsigned int vm_page_local_q_hard_limit; +unsigned int vm_page_local_q_hard_limit; extern vm_locks_array_t vm_page_locks; extern -vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */ +vm_page_queue_head_t vm_lopage_queue_free; /* low memory free queue */ extern -vm_page_queue_head_t vm_page_queue_active; /* active memory queue */ +vm_page_queue_head_t vm_page_queue_active; /* active memory queue */ extern -vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ +vm_page_queue_head_t vm_page_queue_inactive; /* inactive memory queue for normal pages */ #if CONFIG_SECLUDED_MEMORY extern -vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */ +vm_page_queue_head_t vm_page_queue_secluded; /* reclaimable pages secluded for Camera */ #endif /* CONFIG_SECLUDED_MEMORY */ extern vm_page_queue_head_t vm_page_queue_cleaned; /* clean-queue inactive memory */ extern -vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ +vm_page_queue_head_t vm_page_queue_anonymous; /* inactive memory queue for anonymous pages */ extern -vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ +vm_page_queue_head_t vm_page_queue_throttled; /* memory queue for throttled pageout pages */ extern -queue_head_t vm_objects_wired; +queue_head_t vm_objects_wired; extern -lck_spin_t vm_objects_wired_lock; +lck_spin_t vm_objects_wired_lock; #if CONFIG_BACKGROUND_QUEUE -#define VM_PAGE_BACKGROUND_TARGET_MAX 50000 +#define VM_PAGE_BACKGROUND_TARGET_MAX 50000 -#define VM_PAGE_BG_DISABLED 0 -#define VM_PAGE_BG_LEVEL_1 1 +#define VM_PAGE_BG_DISABLED 0 +#define VM_PAGE_BG_LEVEL_1 1 extern -vm_page_queue_head_t vm_page_queue_background; +vm_page_queue_head_t vm_page_queue_background; extern -uint64_t vm_page_background_promoted_count; +uint64_t vm_page_background_promoted_count; extern -uint32_t vm_page_background_count; +uint32_t vm_page_background_count; extern -uint32_t vm_page_background_target; +uint32_t vm_page_background_target; extern -uint32_t vm_page_background_internal_count; +uint32_t vm_page_background_internal_count; extern -uint32_t vm_page_background_external_count; +uint32_t vm_page_background_external_count; extern -uint32_t vm_page_background_mode; +uint32_t vm_page_background_mode; extern -uint32_t vm_page_background_exclude_external; +uint32_t vm_page_background_exclude_external; #endif extern -vm_offset_t first_phys_addr; /* physical address for first_page */ +vm_offset_t first_phys_addr; /* physical address for first_page */ extern -vm_offset_t last_phys_addr; /* physical address for last_page */ +vm_offset_t last_phys_addr; /* physical address for last_page */ extern -unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ +unsigned int vm_page_free_count; /* How many pages are free? (sum of all colors) */ extern -unsigned int vm_page_active_count; /* How many pages are active? */ +unsigned int vm_page_active_count; /* How many pages are active? */ extern -unsigned int vm_page_inactive_count; /* How many pages are inactive? */ +unsigned int vm_page_inactive_count; /* How many pages are inactive? */ #if CONFIG_SECLUDED_MEMORY extern -unsigned int vm_page_secluded_count; /* How many pages are secluded? */ +unsigned int vm_page_secluded_count; /* How many pages are secluded? */ extern -unsigned int vm_page_secluded_count_free; +unsigned int vm_page_secluded_count_free; extern -unsigned int vm_page_secluded_count_inuse; +unsigned int vm_page_secluded_count_inuse; #endif /* CONFIG_SECLUDED_MEMORY */ extern unsigned int vm_page_cleaned_count; /* How many pages are in the clean queue? */ extern -unsigned int vm_page_throttled_count;/* How many inactives are throttled */ +unsigned int vm_page_throttled_count;/* How many inactives are throttled */ +extern +unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ +extern unsigned int vm_page_pageable_internal_count; +extern unsigned int vm_page_pageable_external_count; extern -unsigned int vm_page_speculative_count; /* How many speculative pages are unclaimed? */ -extern unsigned int vm_page_pageable_internal_count; -extern unsigned int vm_page_pageable_external_count; +unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ extern -unsigned int vm_page_xpmapped_external_count; /* How many pages are mapped executable? */ +unsigned int vm_page_external_count; /* How many pages are file-backed? */ extern -unsigned int vm_page_external_count; /* How many pages are file-backed? */ +unsigned int vm_page_internal_count; /* How many pages are anonymous? */ extern -unsigned int vm_page_internal_count; /* How many pages are anonymous? */ +unsigned int vm_page_wire_count; /* How many pages are wired? */ extern -unsigned int vm_page_wire_count; /* How many pages are wired? */ +unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ extern -unsigned int vm_page_wire_count_initial; /* How many pages wired at startup */ +unsigned int vm_page_wire_count_on_boot; /* even earlier than _initial */ extern -unsigned int vm_page_free_target; /* How many do we want free? */ +unsigned int vm_page_free_target; /* How many do we want free? */ extern -unsigned int vm_page_free_min; /* When to wakeup pageout */ +unsigned int vm_page_free_min; /* When to wakeup pageout */ extern -unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ +unsigned int vm_page_throttle_limit; /* When to throttle new page creation */ extern -unsigned int vm_page_inactive_target;/* How many do we want inactive? */ +unsigned int vm_page_inactive_target;/* How many do we want inactive? */ #if CONFIG_SECLUDED_MEMORY extern -unsigned int vm_page_secluded_target;/* How many do we want secluded? */ +unsigned int vm_page_secluded_target;/* How many do we want secluded? */ #endif /* CONFIG_SECLUDED_MEMORY */ extern -unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ +unsigned int vm_page_anonymous_min; /* When it's ok to pre-clean */ extern -unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ +unsigned int vm_page_free_reserved; /* How many pages reserved to do pageout */ extern -unsigned int vm_page_gobble_count; +unsigned int vm_page_gobble_count; extern -unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */ +unsigned int vm_page_stolen_count; /* Count of stolen pages not acccounted in zones */ #if DEVELOPMENT || DEBUG extern -unsigned int vm_page_speculative_used; +unsigned int vm_page_speculative_used; #endif extern -unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ +unsigned int vm_page_purgeable_count;/* How many pages are purgeable now ? */ extern -unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ +unsigned int vm_page_purgeable_wired_count;/* How many purgeable pages are wired now ? */ extern -uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ +uint64_t vm_page_purged_count; /* How many pages got purged so far ? */ -extern unsigned int vm_page_free_wanted; - /* how many threads are waiting for memory */ +extern unsigned int vm_page_free_wanted; +/* how many threads are waiting for memory */ -extern unsigned int vm_page_free_wanted_privileged; - /* how many VM privileged threads are waiting for memory */ +extern unsigned int vm_page_free_wanted_privileged; +/* how many VM privileged threads are waiting for memory */ #if CONFIG_SECLUDED_MEMORY -extern unsigned int vm_page_free_wanted_secluded; - /* how many threads are waiting for secluded memory */ +extern unsigned int vm_page_free_wanted_secluded; +/* how many threads are waiting for secluded memory */ #endif /* CONFIG_SECLUDED_MEMORY */ -extern const ppnum_t vm_page_fictitious_addr; - /* (fake) phys_addr of fictitious pages */ +extern const ppnum_t vm_page_fictitious_addr; +/* (fake) phys_addr of fictitious pages */ -extern const ppnum_t vm_page_guard_addr; - /* (fake) phys_addr of guard pages */ +extern const ppnum_t vm_page_guard_addr; +/* (fake) phys_addr of guard pages */ -extern boolean_t vm_page_deactivate_hint; +extern boolean_t vm_page_deactivate_hint; -extern int vm_compressor_mode; +extern int vm_compressor_mode; /* - 0 = all pages avail ( default. ) - 1 = disable high mem ( cap max pages to 4G) - 2 = prefer himem -*/ -extern int vm_himemory_mode; - -extern boolean_t vm_lopage_needed; -extern uint32_t vm_lopage_free_count; -extern uint32_t vm_lopage_free_limit; -extern uint32_t vm_lopage_lowater; -extern boolean_t vm_lopage_refill; -extern uint64_t max_valid_dma_address; -extern ppnum_t max_valid_low_ppnum; + * Defaults to true, so highest memory is used first. + */ +extern boolean_t vm_himemory_mode; + +extern boolean_t vm_lopage_needed; +extern uint32_t vm_lopage_free_count; +extern uint32_t vm_lopage_free_limit; +extern uint32_t vm_lopage_lowater; +extern boolean_t vm_lopage_refill; +extern uint64_t max_valid_dma_address; +extern ppnum_t max_valid_low_ppnum; /* * Prototypes for functions exported by this module. */ -extern void vm_page_bootstrap( - vm_offset_t *startp, - vm_offset_t *endp); +extern void vm_page_bootstrap( + vm_offset_t *startp, + vm_offset_t *endp); + +extern void vm_page_module_init(void); -extern void vm_page_module_init(void); - -extern void vm_page_init_local_q(void); +extern void vm_page_init_local_q(void); -extern void vm_page_create( - ppnum_t start, - ppnum_t end); +extern void vm_page_create( + ppnum_t start, + ppnum_t end); -extern vm_page_t kdp_vm_page_lookup( - vm_object_t object, - vm_object_offset_t offset); +extern vm_page_t kdp_vm_page_lookup( + vm_object_t object, + vm_object_offset_t offset); -extern vm_page_t vm_page_lookup( - vm_object_t object, - vm_object_offset_t offset); +extern vm_page_t vm_page_lookup( + vm_object_t object, + vm_object_offset_t offset); -extern vm_page_t vm_page_grab_fictitious(void); +extern vm_page_t vm_page_grab_fictitious(void); -extern vm_page_t vm_page_grab_guard(void); +extern vm_page_t vm_page_grab_guard(void); -extern void vm_page_release_fictitious( - vm_page_t page); +extern void vm_page_release_fictitious( + vm_page_t page); -extern void vm_page_more_fictitious(void); +extern void vm_free_delayed_pages(void); -extern int vm_pool_low(void); +extern void vm_page_more_fictitious(void); -extern vm_page_t vm_page_grab(void); -extern vm_page_t vm_page_grab_options(int flags); +extern int vm_pool_low(void); + +extern vm_page_t vm_page_grab(void); +extern vm_page_t vm_page_grab_options(int flags); + +#define VM_PAGE_GRAB_OPTIONS_NONE 0x00000000 #if CONFIG_SECLUDED_MEMORY -#define VM_PAGE_GRAB_SECLUDED 0x00000001 +#define VM_PAGE_GRAB_SECLUDED 0x00000001 #endif /* CONFIG_SECLUDED_MEMORY */ +#define VM_PAGE_GRAB_Q_LOCK_HELD 0x00000002 -extern vm_page_t vm_page_grablo(void); +extern vm_page_t vm_page_grablo(void); -extern void vm_page_release( - vm_page_t page, - boolean_t page_queues_locked); +extern void vm_page_release( + vm_page_t page, + boolean_t page_queues_locked); -extern boolean_t vm_page_wait( - int interruptible ); +extern boolean_t vm_page_wait( + int interruptible ); -extern vm_page_t vm_page_alloc( - vm_object_t object, - vm_object_offset_t offset); +extern vm_page_t vm_page_alloc( + vm_object_t object, + vm_object_offset_t offset); -extern vm_page_t vm_page_alloc_guard( - vm_object_t object, - vm_object_offset_t offset); +extern vm_page_t vm_page_alloc_guard( + vm_object_t object, + vm_object_offset_t offset); -extern void vm_page_init( - vm_page_t page, - ppnum_t phys_page, - boolean_t lopage); +extern void vm_page_init( + vm_page_t page, + ppnum_t phys_page, + boolean_t lopage); -extern void vm_page_free( - vm_page_t page); +extern void vm_page_free( + vm_page_t page); -extern void vm_page_free_unlocked( - vm_page_t page, - boolean_t remove_from_hash); +extern void vm_page_free_unlocked( + vm_page_t page, + boolean_t remove_from_hash); extern void vm_page_balance_inactive( - int max_to_move); - -extern void vm_page_activate( - vm_page_t page); - -extern void vm_page_deactivate( - vm_page_t page); - -extern void vm_page_deactivate_internal( - vm_page_t page, - boolean_t clear_hw_reference); - -extern void vm_page_enqueue_cleaned(vm_page_t page); - -extern void vm_page_lru( - vm_page_t page); - -extern void vm_page_speculate( - vm_page_t page, - boolean_t new); - -extern void vm_page_speculate_ageit( - struct vm_speculative_age_q *aq); - -extern void vm_page_reactivate_all_throttled(void); - -extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); - -extern void vm_page_rename( - vm_page_t page, - vm_object_t new_object, - vm_object_offset_t new_offset); - -extern void vm_page_insert( - vm_page_t page, - vm_object_t object, - vm_object_offset_t offset); - -extern void vm_page_insert_wired( - vm_page_t page, - vm_object_t object, - vm_object_offset_t offset, - vm_tag_t tag); - -extern void vm_page_insert_internal( - vm_page_t page, - vm_object_t object, - vm_object_offset_t offset, - vm_tag_t tag, - boolean_t queues_lock_held, - boolean_t insert_in_hash, - boolean_t batch_pmap_op, - boolean_t delayed_accounting, - uint64_t *delayed_ledger_update); - -extern void vm_page_replace( - vm_page_t mem, - vm_object_t object, - vm_object_offset_t offset); - -extern void vm_page_remove( - vm_page_t page, - boolean_t remove_from_hash); - -extern void vm_page_zero_fill( - vm_page_t page); - -extern void vm_page_part_zero_fill( - vm_page_t m, - vm_offset_t m_pa, - vm_size_t len); - -extern void vm_page_copy( - vm_page_t src_page, - vm_page_t dest_page); - -extern void vm_page_part_copy( - vm_page_t src_m, - vm_offset_t src_pa, - vm_page_t dst_m, - vm_offset_t dst_pa, - vm_size_t len); - -extern void vm_page_wire( - vm_page_t page, - vm_tag_t tag, - boolean_t check_memorystatus); - -extern void vm_page_unwire( - vm_page_t page, - boolean_t queueit); - -extern void vm_set_page_size(void); - -extern void vm_page_gobble( - vm_page_t page); - -extern void vm_page_validate_cs(vm_page_t page); -extern void vm_page_validate_cs_mapped( - vm_page_t page, - const void *kaddr); -extern void vm_page_validate_cs_mapped_slow( - vm_page_t page, - const void *kaddr); -extern void vm_page_validate_cs_mapped_chunk( - vm_page_t page, - const void *kaddr, - vm_offset_t chunk_offset, - vm_size_t chunk_size, - boolean_t *validated, - unsigned *tainted); - -extern void vm_page_free_prepare_queues( - vm_page_t page); - -extern void vm_page_free_prepare_object( - vm_page_t page, - boolean_t remove_from_hash); + int max_to_move); + +extern void vm_page_activate( + vm_page_t page); + +extern void vm_page_deactivate( + vm_page_t page); + +extern void vm_page_deactivate_internal( + vm_page_t page, + boolean_t clear_hw_reference); + +extern void vm_page_enqueue_cleaned(vm_page_t page); + +extern void vm_page_lru( + vm_page_t page); + +extern void vm_page_speculate( + vm_page_t page, + boolean_t new); + +extern void vm_page_speculate_ageit( + struct vm_speculative_age_q *aq); + +extern void vm_page_reactivate_all_throttled(void); + +extern void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks); + +extern void vm_page_rename( + vm_page_t page, + vm_object_t new_object, + vm_object_offset_t new_offset); + +extern void vm_page_insert( + vm_page_t page, + vm_object_t object, + vm_object_offset_t offset); + +extern void vm_page_insert_wired( + vm_page_t page, + vm_object_t object, + vm_object_offset_t offset, + vm_tag_t tag); + +extern void vm_page_insert_internal( + vm_page_t page, + vm_object_t object, + vm_object_offset_t offset, + vm_tag_t tag, + boolean_t queues_lock_held, + boolean_t insert_in_hash, + boolean_t batch_pmap_op, + boolean_t delayed_accounting, + uint64_t *delayed_ledger_update); + +extern void vm_page_replace( + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset); + +extern void vm_page_remove( + vm_page_t page, + boolean_t remove_from_hash); + +extern void vm_page_zero_fill( + vm_page_t page); + +extern void vm_page_part_zero_fill( + vm_page_t m, + vm_offset_t m_pa, + vm_size_t len); + +extern void vm_page_copy( + vm_page_t src_page, + vm_page_t dest_page); + +extern void vm_page_part_copy( + vm_page_t src_m, + vm_offset_t src_pa, + vm_page_t dst_m, + vm_offset_t dst_pa, + vm_size_t len); + +extern void vm_page_wire( + vm_page_t page, + vm_tag_t tag, + boolean_t check_memorystatus); + +extern void vm_page_unwire( + vm_page_t page, + boolean_t queueit); + +extern void vm_set_page_size(void); + +extern void vm_page_gobble( + vm_page_t page); + +extern void vm_page_validate_cs(vm_page_t page); +extern void vm_page_validate_cs_mapped( + vm_page_t page, + const void *kaddr); +extern void vm_page_validate_cs_mapped_slow( + vm_page_t page, + const void *kaddr); +extern void vm_page_validate_cs_mapped_chunk( + vm_page_t page, + const void *kaddr, + vm_offset_t chunk_offset, + vm_size_t chunk_size, + boolean_t *validated, + unsigned *tainted); + +extern void vm_page_free_prepare_queues( + vm_page_t page); + +extern void vm_page_free_prepare_object( + vm_page_t page, + boolean_t remove_from_hash); #if CONFIG_IOSCHED -extern wait_result_t vm_page_sleep( - vm_object_t object, - vm_page_t m, - int interruptible); +extern wait_result_t vm_page_sleep( + vm_object_t object, + vm_page_t m, + int interruptible); #endif extern void vm_pressure_response(void); @@ -1394,11 +1450,11 @@ extern void vm_pressure_response(void); extern void memorystatus_pages_update(unsigned int pages_avail); #define VM_CHECK_MEMORYSTATUS do { \ - memorystatus_pages_update( \ - vm_page_pageable_external_count + \ - vm_page_free_count + \ - (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \ - ); \ + memorystatus_pages_update( \ + vm_page_pageable_external_count + \ + vm_page_free_count + \ + (VM_DYNAMIC_PAGING_ENABLED() ? 0 : vm_page_purgeable_count) \ + ); \ } while(0) #else /* CONFIG_JETSAM */ @@ -1409,7 +1465,7 @@ extern void memorystatus_pages_update(unsigned int pages_avail); #else /* CONFIG_EMBEDDED */ -#define VM_CHECK_MEMORYSTATUS vm_pressure_response() +#define VM_CHECK_MEMORYSTATUS vm_pressure_response() #endif /* CONFIG_EMBEDDED */ @@ -1421,82 +1477,84 @@ extern void memorystatus_pages_update(unsigned int pages_avail); */ #if CONFIG_EMBEDDED -#define SET_PAGE_DIRTY(m, set_pmap_modified) \ - MACRO_BEGIN \ - vm_page_t __page__ = (m); \ - if (__page__->vmp_pmapped == TRUE && \ - __page__->vmp_wpmapped == TRUE && \ - __page__->vmp_dirty == FALSE && \ - (set_pmap_modified)) { \ - pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \ - } \ - __page__->vmp_dirty = TRUE; \ - MACRO_END +#define SET_PAGE_DIRTY(m, set_pmap_modified) \ + MACRO_BEGIN \ + vm_page_t __page__ = (m); \ + if (__page__->vmp_pmapped == TRUE && \ + __page__->vmp_wpmapped == TRUE && \ + __page__->vmp_dirty == FALSE && \ + (set_pmap_modified)) { \ + pmap_set_modify(VM_PAGE_GET_PHYS_PAGE(__page__)); \ + } \ + __page__->vmp_dirty = TRUE; \ + MACRO_END #else /* CONFIG_EMBEDDED */ -#define SET_PAGE_DIRTY(m, set_pmap_modified) \ - MACRO_BEGIN \ - vm_page_t __page__ = (m); \ - __page__->vmp_dirty = TRUE; \ - MACRO_END +#define SET_PAGE_DIRTY(m, set_pmap_modified) \ + MACRO_BEGIN \ + vm_page_t __page__ = (m); \ + __page__->vmp_dirty = TRUE; \ + MACRO_END #endif /* CONFIG_EMBEDDED */ -#define PAGE_ASSERT_WAIT(m, interruptible) \ - (((m)->vmp_wanted = TRUE), \ - assert_wait((event_t) (m), (interruptible))) +#define PAGE_ASSERT_WAIT(m, interruptible) \ + (((m)->vmp_wanted = TRUE), \ + assert_wait((event_t) (m), (interruptible))) #if CONFIG_IOSCHED -#define PAGE_SLEEP(o, m, interruptible) \ - vm_page_sleep(o, m, interruptible) +#define PAGE_SLEEP(o, m, interruptible) \ + vm_page_sleep(o, m, interruptible) #else -#define PAGE_SLEEP(o, m, interruptible) \ - (((m)->vmp_wanted = TRUE), \ +#define PAGE_SLEEP(o, m, interruptible) \ + (((m)->vmp_wanted = TRUE), \ thread_sleep_vm_object((o), (m), (interruptible))) #endif -#define PAGE_WAKEUP_DONE(m) \ - MACRO_BEGIN \ - (m)->vmp_busy = FALSE; \ - if ((m)->vmp_wanted) { \ - (m)->vmp_wanted = FALSE; \ - thread_wakeup((event_t) (m)); \ - } \ - MACRO_END - -#define PAGE_WAKEUP(m) \ - MACRO_BEGIN \ - if ((m)->vmp_wanted) { \ - (m)->vmp_wanted = FALSE; \ - thread_wakeup((event_t) (m)); \ - } \ - MACRO_END - -#define VM_PAGE_FREE(p) \ - MACRO_BEGIN \ - vm_page_free_unlocked(p, TRUE); \ - MACRO_END - -#define VM_PAGE_GRAB_FICTITIOUS(M) \ - MACRO_BEGIN \ - while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ - vm_page_more_fictitious(); \ - MACRO_END - -#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) +#define PAGE_WAKEUP_DONE(m) \ + MACRO_BEGIN \ + (m)->vmp_busy = FALSE; \ + if ((m)->vmp_wanted) { \ + (m)->vmp_wanted = FALSE; \ + thread_wakeup((event_t) (m)); \ + } \ + MACRO_END + +#define PAGE_WAKEUP(m) \ + MACRO_BEGIN \ + if ((m)->vmp_wanted) { \ + (m)->vmp_wanted = FALSE; \ + thread_wakeup((event_t) (m)); \ + } \ + MACRO_END + +#define VM_PAGE_FREE(p) \ + MACRO_BEGIN \ + vm_page_free_unlocked(p, TRUE); \ + MACRO_END + +#define VM_PAGE_GRAB_FICTITIOUS(M) \ + MACRO_BEGIN \ + while ((M = vm_page_grab_fictitious()) == VM_PAGE_NULL) \ + vm_page_more_fictitious(); \ + MACRO_END + +#define VM_PAGE_WAIT() ((void)vm_page_wait(THREAD_UNINT)) #define vm_page_queue_lock (vm_page_locks.vm_page_queue_lock2) #define vm_page_queue_free_lock (vm_page_locks.vm_page_queue_free_lock2) -#define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) +#define vm_page_lock_queues() lck_mtx_lock(&vm_page_queue_lock) #define vm_page_trylock_queues() lck_mtx_try_lock(&vm_page_queue_lock) -#define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) +#define vm_page_unlock_queues() lck_mtx_unlock(&vm_page_queue_lock) -#define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) -#define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) -#define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) +#define vm_page_lockspin_queues() lck_mtx_lock_spin(&vm_page_queue_lock) +#define vm_page_trylockspin_queues() lck_mtx_try_lock_spin(&vm_page_queue_lock) +#define vm_page_lockconvert_queues() lck_mtx_convert_spin(&vm_page_queue_lock) + +#ifdef VPL_LOCK_SPIN +extern lck_grp_t vm_page_lck_grp_local; -#ifdef VPL_LOCK_SPIN #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_spin_init(&vlq->vpl_lock, vpl_grp, vpl_attr) -#define VPL_LOCK(vpl) lck_spin_lock(vpl) +#define VPL_LOCK(vpl) lck_spin_lock_grp(vpl, &vm_page_lck_grp_local) #define VPL_UNLOCK(vpl) lck_spin_unlock(vpl) #else #define VPL_LOCK_INIT(vlq, vpl_grp, vpl_attr) lck_mtx_init_ext(&vlq->vpl_lock, &vlq->vpl_lock_ext, vpl_grp, vpl_attr) @@ -1506,83 +1564,83 @@ extern void memorystatus_pages_update(unsigned int pages_avail); #if DEVELOPMENT || DEBUG -#define VM_PAGE_SPECULATIVE_USED_ADD() \ - MACRO_BEGIN \ - OSAddAtomic(1, &vm_page_speculative_used); \ +#define VM_PAGE_SPECULATIVE_USED_ADD() \ + MACRO_BEGIN \ + OSAddAtomic(1, &vm_page_speculative_used); \ MACRO_END #else -#define VM_PAGE_SPECULATIVE_USED_ADD() +#define VM_PAGE_SPECULATIVE_USED_ADD() #endif -#define VM_PAGE_CONSUME_CLUSTERED(mem) \ - MACRO_BEGIN \ - ppnum_t __phys_page; \ - __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \ - pmap_lock_phys_page(__phys_page); \ - if (mem->vmp_clustered) { \ - vm_object_t o; \ - o = VM_PAGE_OBJECT(mem); \ - assert(o); \ - o->pages_used++; \ - mem->vmp_clustered = FALSE; \ - VM_PAGE_SPECULATIVE_USED_ADD(); \ - } \ - pmap_unlock_phys_page(__phys_page); \ +#define VM_PAGE_CONSUME_CLUSTERED(mem) \ + MACRO_BEGIN \ + ppnum_t __phys_page; \ + __phys_page = VM_PAGE_GET_PHYS_PAGE(mem); \ + pmap_lock_phys_page(__phys_page); \ + if (mem->vmp_clustered) { \ + vm_object_t o; \ + o = VM_PAGE_OBJECT(mem); \ + assert(o); \ + o->pages_used++; \ + mem->vmp_clustered = FALSE; \ + VM_PAGE_SPECULATIVE_USED_ADD(); \ + } \ + pmap_unlock_phys_page(__phys_page); \ MACRO_END -#define VM_PAGE_COUNT_AS_PAGEIN(mem) \ - MACRO_BEGIN \ - { \ - vm_object_t o; \ - o = VM_PAGE_OBJECT(mem); \ - DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ - current_task()->pageins++; \ - if (o->internal) { \ - DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ - } else { \ - DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ - } \ - } \ +#define VM_PAGE_COUNT_AS_PAGEIN(mem) \ + MACRO_BEGIN \ + { \ + vm_object_t o; \ + o = VM_PAGE_OBJECT(mem); \ + DTRACE_VM2(pgin, int, 1, (uint64_t *), NULL); \ + current_task()->pageins++; \ + if (o->internal) { \ + DTRACE_VM2(anonpgin, int, 1, (uint64_t *), NULL); \ + } else { \ + DTRACE_VM2(fspgin, int, 1, (uint64_t *), NULL); \ + } \ + } \ MACRO_END /* adjust for stolen pages accounted elsewhere */ -#define VM_PAGE_MOVE_STOLEN(page_count) \ - MACRO_BEGIN \ - vm_page_stolen_count -= (page_count); \ - vm_page_wire_count_initial -= (page_count); \ +#define VM_PAGE_MOVE_STOLEN(page_count) \ + MACRO_BEGIN \ + vm_page_stolen_count -= (page_count); \ + vm_page_wire_count_initial -= (page_count); \ MACRO_END - -#define DW_vm_page_unwire 0x01 -#define DW_vm_page_wire 0x02 -#define DW_vm_page_free 0x04 -#define DW_vm_page_activate 0x08 -#define DW_vm_page_deactivate_internal 0x10 -#define DW_vm_page_speculate 0x20 -#define DW_vm_page_lru 0x40 -#define DW_vm_pageout_throttle_up 0x80 -#define DW_PAGE_WAKEUP 0x100 -#define DW_clear_busy 0x200 -#define DW_clear_reference 0x400 -#define DW_set_reference 0x800 -#define DW_move_page 0x1000 -#define DW_VM_PAGE_QUEUES_REMOVE 0x2000 -#define DW_enqueue_cleaned 0x4000 -#define DW_vm_phantom_cache_update 0x8000 + +#define DW_vm_page_unwire 0x01 +#define DW_vm_page_wire 0x02 +#define DW_vm_page_free 0x04 +#define DW_vm_page_activate 0x08 +#define DW_vm_page_deactivate_internal 0x10 +#define DW_vm_page_speculate 0x20 +#define DW_vm_page_lru 0x40 +#define DW_vm_pageout_throttle_up 0x80 +#define DW_PAGE_WAKEUP 0x100 +#define DW_clear_busy 0x200 +#define DW_clear_reference 0x400 +#define DW_set_reference 0x800 +#define DW_move_page 0x1000 +#define DW_VM_PAGE_QUEUES_REMOVE 0x2000 +#define DW_enqueue_cleaned 0x4000 +#define DW_vm_phantom_cache_update 0x8000 struct vm_page_delayed_work { - vm_page_t dw_m; - int dw_mask; + vm_page_t dw_m; + int dw_mask; }; void vm_page_do_delayed_work(vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, int dw_count); extern unsigned int vm_max_delayed_work_limit; -#define DEFAULT_DELAYED_WORK_LIMIT 32 +#define DEFAULT_DELAYED_WORK_LIMIT 32 -#define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) +#define DELAYED_WORK_LIMIT(max) ((vm_max_delayed_work_limit >= max ? max : vm_max_delayed_work_limit)) /* * vm_page_do_delayed_work may need to drop the object lock... @@ -1593,16 +1651,16 @@ extern unsigned int vm_max_delayed_work_limit; * it once we're done processing the page. */ -#define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ - MACRO_BEGIN \ - if (mem->vmp_busy == FALSE) { \ - mem->vmp_busy = TRUE; \ - if ( !(dwp->dw_mask & DW_vm_page_free)) \ - dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ - } \ - dwp->dw_m = mem; \ - dwp++; \ - dw_cnt++; \ +#define VM_PAGE_ADD_DELAYED_WORK(dwp, mem, dw_cnt) \ + MACRO_BEGIN \ + if (mem->vmp_busy == FALSE) { \ + mem->vmp_busy = TRUE; \ + if ( !(dwp->dw_mask & DW_vm_page_free)) \ + dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); \ + } \ + dwp->dw_m = mem; \ + dwp++; \ + dw_cnt++; \ MACRO_END extern vm_page_t vm_object_page_grab(vm_object_t); @@ -1624,4 +1682,4 @@ extern void stop_secluded_suppression(task_t); #endif /* CONFIG_SECLUDED_MEMORY */ -#endif /* _VM_VM_PAGE_H_ */ +#endif /* _VM_VM_PAGE_H_ */ diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c index 7aaef6cdd..6b6e3d04d 100644 --- a/osfmk/vm/vm_pageout.c +++ b/osfmk/vm/vm_pageout.c @@ -145,7 +145,7 @@ extern void consider_vm_pressure_events(void); #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */ -#ifdef CONFIG_EMBEDDED +#ifdef CONFIG_EMBEDDED #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024 #else #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096 @@ -153,32 +153,32 @@ extern void consider_vm_pressure_events(void); #endif #ifndef VM_PAGEOUT_DEADLOCK_RELIEF -#define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */ +#define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */ #endif -#ifndef VM_PAGE_LAUNDRY_MAX -#define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */ -#endif /* VM_PAGEOUT_LAUNDRY_MAX */ +#ifndef VM_PAGE_LAUNDRY_MAX +#define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */ +#endif /* VM_PAGEOUT_LAUNDRY_MAX */ -#ifndef VM_PAGEOUT_BURST_WAIT -#define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */ -#endif /* VM_PAGEOUT_BURST_WAIT */ +#ifndef VM_PAGEOUT_BURST_WAIT +#define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */ +#endif /* VM_PAGEOUT_BURST_WAIT */ -#ifndef VM_PAGEOUT_EMPTY_WAIT -#define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */ -#endif /* VM_PAGEOUT_EMPTY_WAIT */ +#ifndef VM_PAGEOUT_EMPTY_WAIT +#define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */ +#endif /* VM_PAGEOUT_EMPTY_WAIT */ -#ifndef VM_PAGEOUT_DEADLOCK_WAIT +#ifndef VM_PAGEOUT_DEADLOCK_WAIT #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */ -#endif /* VM_PAGEOUT_DEADLOCK_WAIT */ +#endif /* VM_PAGEOUT_DEADLOCK_WAIT */ -#ifndef VM_PAGEOUT_IDLE_WAIT -#define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */ -#endif /* VM_PAGEOUT_IDLE_WAIT */ +#ifndef VM_PAGEOUT_IDLE_WAIT +#define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */ +#endif /* VM_PAGEOUT_IDLE_WAIT */ -#ifndef VM_PAGEOUT_SWAP_WAIT -#define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */ -#endif /* VM_PAGEOUT_SWAP_WAIT */ +#ifndef VM_PAGEOUT_SWAP_WAIT +#define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */ +#endif /* VM_PAGEOUT_SWAP_WAIT */ #ifndef VM_PAGE_SPECULATIVE_TARGET @@ -198,22 +198,22 @@ extern void consider_vm_pressure_events(void); * then the pageout daemon starts running. */ -#ifndef VM_PAGE_INACTIVE_TARGET -#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2) -#endif /* VM_PAGE_INACTIVE_TARGET */ +#ifndef VM_PAGE_INACTIVE_TARGET +#define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2) +#endif /* VM_PAGE_INACTIVE_TARGET */ /* * Once the pageout daemon starts running, it keeps going * until vm_page_free_count meets or exceeds vm_page_free_target. */ -#ifndef VM_PAGE_FREE_TARGET -#ifdef CONFIG_EMBEDDED -#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100) +#ifndef VM_PAGE_FREE_TARGET +#ifdef CONFIG_EMBEDDED +#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100) #else -#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80) +#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80) #endif -#endif /* VM_PAGE_FREE_TARGET */ +#endif /* VM_PAGE_FREE_TARGET */ /* @@ -221,22 +221,22 @@ extern void consider_vm_pressure_events(void); * falls below vm_page_free_min. */ -#ifndef VM_PAGE_FREE_MIN -#ifdef CONFIG_EMBEDDED -#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200) +#ifndef VM_PAGE_FREE_MIN +#ifdef CONFIG_EMBEDDED +#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200) #else -#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100) +#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100) #endif -#endif /* VM_PAGE_FREE_MIN */ +#endif /* VM_PAGE_FREE_MIN */ -#ifdef CONFIG_EMBEDDED -#define VM_PAGE_FREE_RESERVED_LIMIT 100 -#define VM_PAGE_FREE_MIN_LIMIT 1500 -#define VM_PAGE_FREE_TARGET_LIMIT 2000 +#ifdef CONFIG_EMBEDDED +#define VM_PAGE_FREE_RESERVED_LIMIT 100 +#define VM_PAGE_FREE_MIN_LIMIT 1500 +#define VM_PAGE_FREE_TARGET_LIMIT 2000 #else -#define VM_PAGE_FREE_RESERVED_LIMIT 1700 -#define VM_PAGE_FREE_MIN_LIMIT 3500 -#define VM_PAGE_FREE_TARGET_LIMIT 4000 +#define VM_PAGE_FREE_RESERVED_LIMIT 1700 +#define VM_PAGE_FREE_MIN_LIMIT 3500 +#define VM_PAGE_FREE_TARGET_LIMIT 4000 #endif /* @@ -247,10 +247,10 @@ extern void consider_vm_pressure_events(void); * operation by dipping into the reserved pool of pages. */ -#ifndef VM_PAGE_FREE_RESERVED -#define VM_PAGE_FREE_RESERVED(n) \ +#ifndef VM_PAGE_FREE_RESERVED +#define VM_PAGE_FREE_RESERVED(n) \ ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n)) -#endif /* VM_PAGE_FREE_RESERVED */ +#endif /* VM_PAGE_FREE_RESERVED */ /* * When we dequeue pages from the inactive list, they are @@ -262,14 +262,14 @@ extern void consider_vm_pressure_events(void); */ #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000 -#ifndef VM_PAGE_REACTIVATE_LIMIT -#ifdef CONFIG_EMBEDDED -#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2) +#ifndef VM_PAGE_REACTIVATE_LIMIT +#ifdef CONFIG_EMBEDDED +#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2) #else -#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX)) +#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX)) #endif -#endif /* VM_PAGE_REACTIVATE_LIMIT */ -#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000 +#endif /* VM_PAGE_REACTIVATE_LIMIT */ +#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000 extern boolean_t hibernate_cleaning_in_progress; @@ -278,9 +278,9 @@ extern boolean_t hibernate_cleaning_in_progress; */ struct cq { struct vm_pageout_queue *q; - void *current_chead; - char *scratch_buf; - int id; + void *current_chead; + char *scratch_buf; + int id; }; struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT]; @@ -319,15 +319,17 @@ struct vm_pageout_vminfo vm_pageout_vminfo; struct vm_pageout_state vm_pageout_state; struct vm_config vm_config; -struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +struct vm_pageout_queue vm_pageout_queue_internal __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +struct vm_pageout_queue vm_pageout_queue_external __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -int vm_upl_wait_for_pages = 0; +int vm_upl_wait_for_pages = 0; vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL; -boolean_t (* volatile consider_buffer_cache_collect)(int) = NULL; +boolean_t(*volatile consider_buffer_cache_collect)(int) = NULL; -int vm_debug_events = 0; +int vm_debug_events = 0; + +lck_grp_t vm_pageout_lck_grp; #if CONFIG_MEMORYSTATUS extern boolean_t memorystatus_kill_on_VM_page_shortage(boolean_t async); @@ -350,9 +352,9 @@ uint32_t vm_pageout_memorystatus_fb_factor_dr = 2; */ void vm_pageout_object_terminate( - vm_object_t object) + vm_object_t object) { - vm_object_t shadow_object; + vm_object_t shadow_object; /* * Deal with the deallocation (last reference) of a pageout object @@ -365,8 +367,8 @@ vm_pageout_object_terminate( vm_object_lock(shadow_object); while (!vm_page_queue_empty(&object->memq)) { - vm_page_t p, m; - vm_object_offset_t offset; + vm_page_t p, m; + vm_object_offset_t offset; p = (vm_page_t) vm_page_queue_first(&object->memq); @@ -381,21 +383,23 @@ vm_pageout_object_terminate( p = VM_PAGE_NULL; m = vm_page_lookup(shadow_object, - offset + object->vo_shadow_offset); + offset + object->vo_shadow_offset); - if(m == VM_PAGE_NULL) + if (m == VM_PAGE_NULL) { continue; + } assert((m->vmp_dirty) || (m->vmp_precious) || - (m->vmp_busy && m->vmp_cleaning)); + (m->vmp_busy && m->vmp_cleaning)); /* * Handle the trusted pager throttle. * Also decrement the burst throttle (if external). */ vm_page_lock_queues(); - if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) + if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { vm_pageout_throttle_up(m); + } /* * Handle the "target" page(s). These pages are to be freed if @@ -428,11 +432,11 @@ vm_pageout_object_terminate( } if (m->vmp_dirty) { - vm_page_unwire(m, TRUE); /* reactivates */ + vm_page_unwire(m, TRUE); /* reactivates */ VM_STAT_INCR(reactivations); PAGE_WAKEUP_DONE(m); } else { - vm_page_free(m); /* clears busy, etc. */ + vm_page_free(m); /* clears busy, etc. */ } vm_page_unlock_queues(); continue; @@ -444,10 +448,11 @@ vm_pageout_object_terminate( * page, so make it active. */ if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) && !m->vmp_private) { - if (m->vmp_reference) + if (m->vmp_reference) { vm_page_activate(m); - else + } else { vm_page_deactivate(m); + } } if (m->vmp_overwriting) { /* @@ -473,8 +478,8 @@ vm_pageout_object_terminate( * Occurs when the original page was wired * at the time of the list request */ - assert(VM_PAGE_WIRED(m)); - vm_page_unwire(m, TRUE); /* reactivates */ + assert(VM_PAGE_WIRED(m)); + vm_page_unwire(m, TRUE); /* reactivates */ } m->vmp_overwriting = FALSE; } else { @@ -514,10 +519,10 @@ vm_pageout_object_terminate( */ static void vm_pageclean_setup( - vm_page_t m, - vm_page_t new_m, - vm_object_t new_object, - vm_object_offset_t new_offset) + vm_page_t m, + vm_page_t new_m, + vm_object_t new_object, + vm_object_offset_t new_offset) { assert(!m->vmp_busy); #if 0 @@ -526,8 +531,8 @@ vm_pageclean_setup( XPR(XPR_VM_PAGEOUT, "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n", - VM_PAGE_OBJECT(m), m->vmp_offset, m, - new_m, new_offset); + VM_PAGE_OBJECT(m), m->vmp_offset, m, + new_m, new_offset); pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m)); @@ -578,15 +583,15 @@ vm_pageclean_setup( */ void vm_pageout_initialize_page( - vm_page_t m) + vm_page_t m) { - vm_object_t object; - vm_object_offset_t paging_offset; - memory_object_t pager; + vm_object_t object; + vm_object_offset_t paging_offset; + memory_object_t pager; XPR(XPR_VM_PAGEOUT, - "vm_pageout_initialize_page, page 0x%X\n", - m, 0, 0, 0, 0); + "vm_pageout_initialize_page, page 0x%X\n", + m, 0, 0, 0, 0); assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); @@ -691,13 +696,13 @@ vmct_state_t vmct_state[MAX_COMPRESSOR_THREAD_COUNT]; void vm_pageout_cluster(vm_page_t m) { - vm_object_t object = VM_PAGE_OBJECT(m); - struct vm_pageout_queue *q; + vm_object_t object = VM_PAGE_OBJECT(m); + struct vm_pageout_queue *q; XPR(XPR_VM_PAGEOUT, - "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n", - object, m->vmp_offset, m, 0, 0); + "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n", + object, m->vmp_offset, m, 0, 0); VM_PAGE_CHECK(m); LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); @@ -720,9 +725,10 @@ vm_pageout_cluster(vm_page_t m) m->vmp_busy = TRUE; - q = &vm_pageout_queue_internal; - } else - q = &vm_pageout_queue_external; + q = &vm_pageout_queue_internal; + } else { + q = &vm_pageout_queue_external; + } /* * pgo_laundry count is tied to the laundry bit @@ -731,7 +737,7 @@ vm_pageout_cluster(vm_page_t m) q->pgo_laundry++; m->vmp_q_state = VM_PAGE_ON_PAGEOUT_Q; - vm_page_queue_enter(&q->pgo_pending, m, vm_page_t, vmp_pageq); + vm_page_queue_enter(&q->pgo_pending, m, vmp_pageq); if (q->pgo_idle == TRUE) { q->pgo_idle = FALSE; @@ -750,49 +756,48 @@ vm_pageout_cluster(vm_page_t m) */ void vm_pageout_throttle_up( - vm_page_t m) + vm_page_t m) { - struct vm_pageout_queue *q; - vm_object_t m_object; - - m_object = VM_PAGE_OBJECT(m); - - assert(m_object != VM_OBJECT_NULL); - assert(m_object != kernel_object); + struct vm_pageout_queue *q; + vm_object_t m_object; - LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - vm_object_lock_assert_exclusive(m_object); + m_object = VM_PAGE_OBJECT(m); - if (m_object->internal == TRUE) - q = &vm_pageout_queue_internal; - else - q = &vm_pageout_queue_external; + assert(m_object != VM_OBJECT_NULL); + assert(m_object != kernel_object); - if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + vm_object_lock_assert_exclusive(m_object); - vm_page_queue_remove(&q->pgo_pending, m, vm_page_t, vmp_pageq); - m->vmp_q_state = VM_PAGE_NOT_ON_Q; + if (m_object->internal == TRUE) { + q = &vm_pageout_queue_internal; + } else { + q = &vm_pageout_queue_external; + } - VM_PAGE_ZERO_PAGEQ_ENTRY(m); + if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { + vm_page_queue_remove(&q->pgo_pending, m, vmp_pageq); + m->vmp_q_state = VM_PAGE_NOT_ON_Q; - vm_object_activity_end(m_object); + VM_PAGE_ZERO_PAGEQ_ENTRY(m); - VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1); - } - if (m->vmp_laundry == TRUE) { + vm_object_activity_end(m_object); - m->vmp_laundry = FALSE; - q->pgo_laundry--; + VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page, 1); + } + if (m->vmp_laundry == TRUE) { + m->vmp_laundry = FALSE; + q->pgo_laundry--; - if (q->pgo_throttled == TRUE) { - q->pgo_throttled = FALSE; - thread_wakeup((event_t) &q->pgo_laundry); - } - if (q->pgo_draining == TRUE && q->pgo_laundry == 0) { - q->pgo_draining = FALSE; - thread_wakeup((event_t) (&q->pgo_laundry+1)); - } - VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1); + if (q->pgo_throttled == TRUE) { + q->pgo_throttled = FALSE; + thread_wakeup((event_t) &q->pgo_laundry); + } + if (q->pgo_draining == TRUE && q->pgo_laundry == 0) { + q->pgo_draining = FALSE; + thread_wakeup((event_t) (&q->pgo_laundry + 1)); + } + VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, 1); } } @@ -800,22 +805,22 @@ vm_pageout_throttle_up( static void vm_pageout_throttle_up_batch( struct vm_pageout_queue *q, - int batch_cnt) + int batch_cnt) { - LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt); + VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count, batch_cnt); - q->pgo_laundry -= batch_cnt; + q->pgo_laundry -= batch_cnt; - if (q->pgo_throttled == TRUE) { - q->pgo_throttled = FALSE; - thread_wakeup((event_t) &q->pgo_laundry); - } - if (q->pgo_draining == TRUE && q->pgo_laundry == 0) { - q->pgo_draining = FALSE; - thread_wakeup((event_t) (&q->pgo_laundry+1)); - } + if (q->pgo_throttled == TRUE) { + q->pgo_throttled = FALSE; + thread_wakeup((event_t) &q->pgo_laundry); + } + if (q->pgo_draining == TRUE && q->pgo_laundry == 0) { + q->pgo_draining = FALSE; + thread_wakeup((event_t) (&q->pgo_laundry + 1)); + } } @@ -838,41 +843,41 @@ vm_pageout_throttle_up_batch( * moment in time. */ #if DEVELOPMENT || DEBUG -#define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1 +#define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1 #else -#define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1 +#define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1 #endif struct vm_pageout_stat { - unsigned long vm_page_active_count; - unsigned long vm_page_speculative_count; - unsigned long vm_page_inactive_count; - unsigned long vm_page_anonymous_count; + unsigned long vm_page_active_count; + unsigned long vm_page_speculative_count; + unsigned long vm_page_inactive_count; + unsigned long vm_page_anonymous_count; - unsigned long vm_page_free_count; - unsigned long vm_page_wire_count; - unsigned long vm_page_compressor_count; + unsigned long vm_page_free_count; + unsigned long vm_page_wire_count; + unsigned long vm_page_compressor_count; - unsigned long vm_page_pages_compressed; - unsigned long vm_page_pageable_internal_count; - unsigned long vm_page_pageable_external_count; - unsigned long vm_page_xpmapped_external_count; + unsigned long vm_page_pages_compressed; + unsigned long vm_page_pageable_internal_count; + unsigned long vm_page_pageable_external_count; + unsigned long vm_page_xpmapped_external_count; - unsigned int pages_grabbed; - unsigned int pages_freed; + unsigned int pages_grabbed; + unsigned int pages_freed; unsigned int pages_compressed; unsigned int pages_grabbed_by_compressor; unsigned int failed_compressions; - unsigned int pages_evicted; - unsigned int pages_purged; + unsigned int pages_evicted; + unsigned int pages_purged; unsigned int considered; - unsigned int considered_bq_internal; - unsigned int considered_bq_external; + unsigned int considered_bq_internal; + unsigned int considered_bq_external; - unsigned int skipped_external; - unsigned int filecache_min_reactivations; + unsigned int skipped_external; + unsigned int filecache_min_reactivations; unsigned int freed_speculative; unsigned int freed_cleaned; @@ -880,19 +885,19 @@ struct vm_pageout_stat { unsigned int freed_external; unsigned int cleaned_dirty_external; - unsigned int cleaned_dirty_internal; + unsigned int cleaned_dirty_internal; - unsigned int inactive_referenced; - unsigned int inactive_nolock; - unsigned int reactivation_limit_exceeded; - unsigned int forced_inactive_reclaim; + unsigned int inactive_referenced; + unsigned int inactive_nolock; + unsigned int reactivation_limit_exceeded; + unsigned int forced_inactive_reclaim; unsigned int throttled_internal_q; unsigned int throttled_external_q; - unsigned int phantom_ghosts_found; - unsigned int phantom_ghosts_added; -} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, }; + unsigned int phantom_ghosts_found; + unsigned int phantom_ghosts_added; +} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }; unsigned int vm_pageout_stat_now = 0; @@ -922,12 +927,12 @@ record_memory_pressure(void) #endif /* VM_PAGE_BUCKETS_CHECK */ vm_pageout_state.vm_memory_pressure = - vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative + - vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned + - vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal + - vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external; + vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_speculative + + vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_cleaned + + vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_internal + + vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].freed_external; - commpage_set_memory_pressure( (unsigned int)vm_pageout_state.vm_memory_pressure ); + commpage_set_memory_pressure((unsigned int)vm_pageout_state.vm_memory_pressure ); /* move "now" forward */ vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now); @@ -972,14 +977,14 @@ mach_vm_ctl_page_free_wanted(void) kern_return_t mach_vm_pressure_monitor( - boolean_t wait_for_pressure, - unsigned int nsecs_monitored, - unsigned int *pages_reclaimed_p, - unsigned int *pages_wanted_p) + boolean_t wait_for_pressure, + unsigned int nsecs_monitored, + unsigned int *pages_reclaimed_p, + unsigned int *pages_wanted_p) { - wait_result_t wr; - unsigned int vm_pageout_then, vm_pageout_now; - unsigned int pages_reclaimed; + wait_result_t wr; + unsigned int vm_pageout_then, vm_pageout_now; + unsigned int pages_reclaimed; unsigned int units_of_monitor; units_of_monitor = 8 * nsecs_monitored; @@ -994,7 +999,7 @@ mach_vm_pressure_monitor( /* wait until there's memory pressure */ while (vm_page_free_count >= vm_page_free_target) { wr = assert_wait((event_t) &vm_page_free_wanted, - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); if (wr == THREAD_WAITING) { wr = thread_block(THREAD_CONTINUE_NULL); } @@ -1026,11 +1031,11 @@ mach_vm_pressure_monitor( vm_pageout_now = vm_pageout_stat_now; pages_reclaimed = 0; for (vm_pageout_then = - VM_PAGEOUT_STAT_BEFORE(vm_pageout_now); - vm_pageout_then != vm_pageout_now && - units_of_monitor-- != 0; - vm_pageout_then = - VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) { + VM_PAGEOUT_STAT_BEFORE(vm_pageout_now); + vm_pageout_then != vm_pageout_now && + units_of_monitor-- != 0; + vm_pageout_then = + VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) { pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_speculative; pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_cleaned; pages_reclaimed += vm_pageout_stats[vm_pageout_then].freed_internal; @@ -1052,7 +1057,7 @@ vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *, int); * condition variable used to make sure there is * only a single sweep going on at a time */ -boolean_t vm_pageout_disconnect_all_pages_active = FALSE; +boolean_t vm_pageout_disconnect_all_pages_active = FALSE; void @@ -1078,23 +1083,22 @@ vm_pageout_disconnect_all_pages() void vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount) { - vm_page_t m; - vm_object_t t_object = NULL; - vm_object_t l_object = NULL; - vm_object_t m_object = NULL; - int delayed_unlock = 0; - int try_failed_count = 0; - int disconnected_count = 0; - int paused_count = 0; - int object_locked_count = 0; + vm_page_t m; + vm_object_t t_object = NULL; + vm_object_t l_object = NULL; + vm_object_t m_object = NULL; + int delayed_unlock = 0; + int try_failed_count = 0; + int disconnected_count = 0; + int paused_count = 0; + int object_locked_count = 0; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_START, - q, qcount, 0, 0, 0); + q, qcount, 0, 0, 0); vm_page_lock_queues(); while (qcount && !vm_page_queue_empty(q)) { - LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); m = (vm_page_t) vm_page_queue_first(q); @@ -1106,17 +1110,18 @@ vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount) * already got the lock */ if (m_object != l_object) { - /* + /* * the object associated with candidate page is * different from the one we were just working * with... dump the lock if we still own it */ - if (l_object != NULL) { - vm_object_unlock(l_object); + if (l_object != NULL) { + vm_object_unlock(l_object); l_object = NULL; } - if (m_object != t_object) + if (m_object != t_object) { try_failed_count = 0; + } /* * Try to lock object; since we've alread got the @@ -1125,8 +1130,7 @@ vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount) * to allow the owner of the object lock a chance to * run... */ - if ( !vm_object_lock_try_scan(m_object)) { - + if (!vm_object_lock_try_scan(m_object)) { if (try_failed_count > 20) { goto reenter_pg_on_q; } @@ -1144,27 +1148,25 @@ vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t *q, int qcount) l_object = m_object; } - if ( !m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) { + if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) { /* * put it back on the head of its queue */ goto reenter_pg_on_q; } if (m->vmp_pmapped == TRUE) { - pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); disconnected_count++; } reenter_pg_on_q: - vm_page_queue_remove(q, m, vm_page_t, vmp_pageq); - vm_page_queue_enter(q, m, vm_page_t, vmp_pageq); + vm_page_queue_remove(q, m, vmp_pageq); + vm_page_queue_enter(q, m, vmp_pageq); qcount--; try_failed_count = 0; if (delayed_unlock++ > 128) { - if (l_object != NULL) { vm_object_unlock(l_object); l_object = NULL; @@ -1180,7 +1182,7 @@ reenter_pg_on_q: vm_page_unlock_queues(); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_ALL_PAGE_MAPPINGS)) | DBG_FUNC_END, - q, disconnected_count, object_locked_count, paused_count, 0); + q, disconnected_count, object_locked_count, paused_count, 0); } #endif @@ -1193,14 +1195,13 @@ vm_pageout_page_queue(vm_page_queue_head_t *, int); * condition variable used to make sure there is * only a single sweep going on at a time */ -boolean_t vm_pageout_anonymous_pages_active = FALSE; +boolean_t vm_pageout_anonymous_pages_active = FALSE; void vm_pageout_anonymous_pages() { if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { - vm_page_lock_queues(); if (vm_pageout_anonymous_pages_active == TRUE) { @@ -1214,8 +1215,9 @@ vm_pageout_anonymous_pages() vm_pageout_page_queue(&vm_page_queue_anonymous, vm_page_anonymous_count); vm_pageout_page_queue(&vm_page_queue_active, vm_page_active_count); - if (VM_CONFIG_SWAP_IS_PRESENT) + if (VM_CONFIG_SWAP_IS_PRESENT) { vm_consider_swapping(); + } vm_page_lock_queues(); vm_pageout_anonymous_pages_active = FALSE; @@ -1227,16 +1229,16 @@ vm_pageout_anonymous_pages() void vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) { - vm_page_t m; - vm_object_t t_object = NULL; - vm_object_t l_object = NULL; - vm_object_t m_object = NULL; - int delayed_unlock = 0; - int try_failed_count = 0; - int refmod_state; - int pmap_options; - struct vm_pageout_queue *iq; - ppnum_t phys_page; + vm_page_t m; + vm_object_t t_object = NULL; + vm_object_t l_object = NULL; + vm_object_t m_object = NULL; + int delayed_unlock = 0; + int try_failed_count = 0; + int refmod_state; + int pmap_options; + struct vm_pageout_queue *iq; + ppnum_t phys_page; iq = &vm_pageout_queue_internal; @@ -1244,13 +1246,11 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) vm_page_lock_queues(); while (qcount && !vm_page_queue_empty(q)) { - LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); if (VM_PAGE_Q_THROTTLED(iq)) { - - if (l_object != NULL) { - vm_object_unlock(l_object); + if (l_object != NULL) { + vm_object_unlock(l_object); l_object = NULL; } iq->pgo_draining = TRUE; @@ -1273,20 +1273,22 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) * already got the lock */ if (m_object != l_object) { - if ( !m_object->internal) + if (!m_object->internal) { goto reenter_pg_on_q; + } - /* + /* * the object associated with candidate page is * different from the one we were just working * with... dump the lock if we still own it */ - if (l_object != NULL) { - vm_object_unlock(l_object); + if (l_object != NULL) { + vm_object_unlock(l_object); l_object = NULL; } - if (m_object != t_object) + if (m_object != t_object) { try_failed_count = 0; + } /* * Try to lock object; since we've alread got the @@ -1295,8 +1297,7 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) * to allow the owner of the object lock a chance to * run... */ - if ( !vm_object_lock_try_scan(m_object)) { - + if (!vm_object_lock_try_scan(m_object)) { if (try_failed_count > 20) { goto reenter_pg_on_q; } @@ -1310,7 +1311,7 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) } l_object = m_object; } - if ( !m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) { + if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error || m->vmp_free_when_done) { /* * page is not to be cleaned * put it back on the head of its queue @@ -1322,10 +1323,11 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) { refmod_state = pmap_get_refmod(phys_page); - if (refmod_state & VM_MEM_REFERENCED) - m->vmp_reference = TRUE; + if (refmod_state & VM_MEM_REFERENCED) { + m->vmp_reference = TRUE; + } if (refmod_state & VM_MEM_MODIFIED) { - SET_PAGE_DIRTY(m, FALSE); + SET_PAGE_DIRTY(m, FALSE); } } if (m->vmp_reference == TRUE) { @@ -1345,7 +1347,7 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) } } - if ( !m->vmp_dirty && !m->vmp_precious) { + if (!m->vmp_dirty && !m->vmp_precious) { vm_page_unlock_queues(); VM_PAGE_FREE(m); vm_page_lock_queues(); @@ -1353,22 +1355,22 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) goto next_pg; } - if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) { - + if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) { if (!m_object->pager_initialized) { - vm_page_unlock_queues(); vm_object_collapse(m_object, (vm_object_offset_t) 0, TRUE); - if (!m_object->pager_initialized) + if (!m_object->pager_initialized) { vm_object_compressor_pager_create(m_object); + } vm_page_lock_queues(); delayed_unlock = 0; } - if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) + if (!m_object->pager_initialized || m_object->pager == MEMORY_OBJECT_NULL) { goto reenter_pg_on_q; + } /* * vm_object_compressor_pager_create will drop the object lock * which means 'm' may no longer be valid to use @@ -1389,14 +1391,13 @@ vm_pageout_page_queue(vm_page_queue_head_t *q, int qcount) goto next_pg; reenter_pg_on_q: - vm_page_queue_remove(q, m, vm_page_t, vmp_pageq); - vm_page_queue_enter(q, m, vm_page_t, vmp_pageq); + vm_page_queue_remove(q, m, vmp_pageq); + vm_page_queue_enter(q, m, vmp_pageq); next_pg: qcount--; try_failed_count = 0; if (delayed_unlock++ > 128) { - if (l_object != NULL) { vm_object_unlock(l_object); l_object = NULL; @@ -1419,37 +1420,37 @@ next_pg: */ extern void vm_pageout_io_throttle(void); -#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \ - MACRO_BEGIN \ - /* \ - * If a "reusable" page somehow made it back into \ - * the active queue, it's been re-used and is not \ - * quite re-usable. \ - * If the VM object was "all_reusable", consider it \ - * as "all re-used" instead of converting it to \ - * "partially re-used", which could be expensive. \ - */ \ - assert(VM_PAGE_OBJECT((m)) == (obj)); \ - if ((m)->vmp_reusable || \ - (obj)->all_reusable) { \ - vm_object_reuse_pages((obj), \ - (m)->vmp_offset, \ - (m)->vmp_offset + PAGE_SIZE_64, \ - FALSE); \ - } \ - MACRO_END - - -#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64 -#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024 - -#define FCS_IDLE 0 -#define FCS_DELAYED 1 -#define FCS_DEADLOCK_DETECTED 2 +#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \ + MACRO_BEGIN \ + /* \ + * If a "reusable" page somehow made it back into \ + * the active queue, it's been re-used and is not \ + * quite re-usable. \ + * If the VM object was "all_reusable", consider it \ + * as "all re-used" instead of converting it to \ + * "partially re-used", which could be expensive. \ + */ \ + assert(VM_PAGE_OBJECT((m)) == (obj)); \ + if ((m)->vmp_reusable || \ + (obj)->all_reusable) { \ + vm_object_reuse_pages((obj), \ + (m)->vmp_offset, \ + (m)->vmp_offset + PAGE_SIZE_64, \ + FALSE); \ + } \ + MACRO_END + + +#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64 +#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024 + +#define FCS_IDLE 0 +#define FCS_DELAYED 1 +#define FCS_DEADLOCK_DETECTED 2 struct flow_control { - int state; - mach_timespec_t ts; + int state; + mach_timespec_t ts; }; @@ -1459,7 +1460,7 @@ uint64_t vm_pageout_rejected_bq_external = 0; uint64_t vm_pageout_skipped_bq_internal = 0; #endif -#define ANONS_GRABBED_LIMIT 2 +#define ANONS_GRABBED_LIMIT 2 #if 0 @@ -1467,9 +1468,9 @@ static void vm_pageout_delayed_unlock(int *, int *, vm_page_t *); #endif static void vm_pageout_prepare_to_block(vm_object_t *, int *, vm_page_t *, int *, int); -#define VM_PAGEOUT_PB_NO_ACTION 0 -#define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1 -#define VM_PAGEOUT_PB_THREAD_YIELD 2 +#define VM_PAGEOUT_PB_NO_ACTION 0 +#define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1 +#define VM_PAGEOUT_PB_THREAD_YIELD 2 #if 0 @@ -1485,8 +1486,8 @@ vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *loca vm_page_free_list(*local_freeq, TRUE); - VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist,VM_PAGEOUT_FREELIST, DBG_FUNC_END, - vm_page_free_count, *local_freed, 0, 1); + VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist, VM_PAGEOUT_FREELIST, DBG_FUNC_END, + vm_page_free_count, *local_freed, 0, 1); *local_freeq = NULL; *local_freed = 0; @@ -1502,7 +1503,7 @@ vm_pageout_delayed_unlock(int *delayed_unlock, int *local_freed, vm_page_t *loca static void vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock, - vm_page_t *local_freeq, int *local_freed, int action) + vm_page_t *local_freeq, int *local_freed, int action) { vm_page_unlock_queues(); @@ -1511,7 +1512,6 @@ vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock, *object = NULL; } if (*local_freeq) { - vm_page_free_list(*local_freeq, TRUE); *local_freeq = NULL; @@ -1520,7 +1520,6 @@ vm_pageout_prepare_to_block(vm_object_t *object, int *delayed_unlock, *delayed_unlock = 1; switch (action) { - case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER: vm_consider_waking_compactor_swapper(); break; @@ -1544,9 +1543,10 @@ extern uint32_t c_segment_pages_compressed; extern uint64_t shared_region_pager_reclaimed; extern struct memory_object_pager_ops shared_region_pager_ops; -void update_vm_info(void) +void +update_vm_info(void) { - uint64_t tmp; + uint64_t tmp; vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count = vm_page_active_count; vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count = vm_page_speculative_count; @@ -1597,16 +1597,15 @@ void update_vm_info(void) if (vm_pageout_stats[vm_pageout_stat_now].considered) { - - tmp = vm_pageout_vminfo.vm_pageout_pages_evicted; + tmp = vm_pageout_vminfo.vm_pageout_pages_evicted; vm_pageout_stats[vm_pageout_stat_now].pages_evicted = (unsigned int)(tmp - last.vm_pageout_pages_evicted); last.vm_pageout_pages_evicted = tmp; - tmp = vm_pageout_vminfo.vm_pageout_pages_purged; + tmp = vm_pageout_vminfo.vm_pageout_pages_purged; vm_pageout_stats[vm_pageout_stat_now].pages_purged = (unsigned int)(tmp - last.vm_pageout_pages_purged); last.vm_pageout_pages_purged = tmp; - tmp = vm_pageout_vminfo.vm_pageout_freed_speculative; + tmp = vm_pageout_vminfo.vm_pageout_freed_speculative; vm_pageout_stats[vm_pageout_stat_now].freed_speculative = (unsigned int)(tmp - last.vm_pageout_freed_speculative); last.vm_pageout_freed_speculative = tmp; @@ -1672,72 +1671,70 @@ void update_vm_info(void) } KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO1)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count, - 0); + vm_pageout_stats[vm_pageout_stat_now].vm_page_active_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_speculative_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_inactive_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_anonymous_count, + 0); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO2)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count, - 0, - 0); + vm_pageout_stats[vm_pageout_stat_now].vm_page_free_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_wire_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_compressor_count, + 0, + 0); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO3)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed, - vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count, - vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count, - 0); + vm_pageout_stats[vm_pageout_stat_now].vm_page_pages_compressed, + vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_internal_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_pageable_external_count, + vm_pageout_stats[vm_pageout_stat_now].vm_page_xpmapped_external_count, + 0); if (vm_pageout_stats[vm_pageout_stat_now].considered || vm_pageout_stats[vm_pageout_stat_now].pages_compressed || vm_pageout_stats[vm_pageout_stat_now].failed_compressions) { - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO4)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].considered, - vm_pageout_stats[vm_pageout_stat_now].freed_speculative, - vm_pageout_stats[vm_pageout_stat_now].freed_external, - vm_pageout_stats[vm_pageout_stat_now].inactive_referenced, - 0); + vm_pageout_stats[vm_pageout_stat_now].considered, + vm_pageout_stats[vm_pageout_stat_now].freed_speculative, + vm_pageout_stats[vm_pageout_stat_now].freed_external, + vm_pageout_stats[vm_pageout_stat_now].inactive_referenced, + 0); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO5)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].throttled_external_q, - vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external, - vm_pageout_stats[vm_pageout_stat_now].freed_cleaned, - vm_pageout_stats[vm_pageout_stat_now].inactive_nolock, - 0); + vm_pageout_stats[vm_pageout_stat_now].throttled_external_q, + vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_external, + vm_pageout_stats[vm_pageout_stat_now].freed_cleaned, + vm_pageout_stats[vm_pageout_stat_now].inactive_nolock, + 0); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO6)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q, - vm_pageout_stats[vm_pageout_stat_now].pages_compressed, - vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor, - vm_pageout_stats[vm_pageout_stat_now].skipped_external, - 0); + vm_pageout_stats[vm_pageout_stat_now].throttled_internal_q, + vm_pageout_stats[vm_pageout_stat_now].pages_compressed, + vm_pageout_stats[vm_pageout_stat_now].pages_grabbed_by_compressor, + vm_pageout_stats[vm_pageout_stat_now].skipped_external, + 0); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO7)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded, - vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim, - vm_pageout_stats[vm_pageout_stat_now].failed_compressions, - vm_pageout_stats[vm_pageout_stat_now].freed_internal, - 0); + vm_pageout_stats[vm_pageout_stat_now].reactivation_limit_exceeded, + vm_pageout_stats[vm_pageout_stat_now].forced_inactive_reclaim, + vm_pageout_stats[vm_pageout_stat_now].failed_compressions, + vm_pageout_stats[vm_pageout_stat_now].freed_internal, + 0); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO8)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal, - vm_pageout_stats[vm_pageout_stat_now].considered_bq_external, - vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations, - vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal, - 0); - + vm_pageout_stats[vm_pageout_stat_now].considered_bq_internal, + vm_pageout_stats[vm_pageout_stat_now].considered_bq_external, + vm_pageout_stats[vm_pageout_stat_now].filecache_min_reactivations, + vm_pageout_stats[vm_pageout_stat_now].cleaned_dirty_internal, + 0); } KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_INFO9)) | DBG_FUNC_NONE, - vm_pageout_stats[vm_pageout_stat_now].pages_grabbed, - vm_pageout_stats[vm_pageout_stat_now].pages_freed, - vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found, - vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added, - 0); + vm_pageout_stats[vm_pageout_stat_now].pages_grabbed, + vm_pageout_stats[vm_pageout_stat_now].pages_freed, + vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_found, + vm_pageout_stats[vm_pageout_stat_now].phantom_ghosts_added, + 0); record_memory_pressure(); } @@ -1747,7 +1744,7 @@ extern boolean_t hibernation_vmqueues_inspection; void vm_page_balance_inactive(int max_to_move) { - vm_page_t m; + vm_page_t m; LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); @@ -1763,12 +1760,11 @@ vm_page_balance_inactive(int max_to_move) return; } vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count + - vm_page_inactive_count + - vm_page_speculative_count); + vm_page_inactive_count + + vm_page_speculative_count); while (max_to_move-- && (vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) { - - VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1); + VM_PAGEOUT_DEBUG(vm_pageout_balanced, 1); m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); @@ -1790,8 +1786,9 @@ vm_page_balance_inactive(int max_to_move) * in the past (TLB caches don't hang around for very long), and of course could just as easily * have happened before we moved the page */ - if (m->vmp_pmapped == TRUE) - pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); + if (m->vmp_pmapped == TRUE) { + pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL); + } /* * The page might be absent or busy, @@ -1818,45 +1815,45 @@ vm_pageout_scan(void) vm_page_t local_freeq = NULL; int local_freed = 0; int delayed_unlock; - int delayed_unlock_limit = 0; - int refmod_state = 0; - int vm_pageout_deadlock_target = 0; - struct vm_pageout_queue *iq; - struct vm_pageout_queue *eq; - struct vm_speculative_age_q *sq; - struct flow_control flow_control = { 0, { 0, 0 } }; - boolean_t inactive_throttled = FALSE; - mach_timespec_t ts; - unsigned int msecs = 0; - vm_object_t object = NULL; - uint32_t inactive_reclaim_run; - boolean_t exceeded_burst_throttle; - boolean_t grab_anonymous = FALSE; - boolean_t force_anonymous = FALSE; - boolean_t force_speculative_aging = FALSE; - int anons_grabbed = 0; - int page_prev_q_state = 0; + int delayed_unlock_limit = 0; + int refmod_state = 0; + int vm_pageout_deadlock_target = 0; + struct vm_pageout_queue *iq; + struct vm_pageout_queue *eq; + struct vm_speculative_age_q *sq; + struct flow_control flow_control = { 0, { 0, 0 } }; + boolean_t inactive_throttled = FALSE; + mach_timespec_t ts; + unsigned int msecs = 0; + vm_object_t object = NULL; + uint32_t inactive_reclaim_run; + boolean_t exceeded_burst_throttle; + boolean_t grab_anonymous = FALSE; + boolean_t force_anonymous = FALSE; + boolean_t force_speculative_aging = FALSE; + int anons_grabbed = 0; + int page_prev_q_state = 0; #if CONFIG_BACKGROUND_QUEUE - boolean_t page_from_bg_q = FALSE; + boolean_t page_from_bg_q = FALSE; #endif - int cache_evict_throttle = 0; - uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0; + int cache_evict_throttle = 0; + uint32_t vm_pageout_inactive_external_forced_reactivate_limit = 0; uint32_t inactive_external_count; - int force_purge = 0; + int force_purge = 0; int divisor; -#define DELAY_SPECULATIVE_AGE 1000 - int delay_speculative_age = 0; - vm_object_t m_object = VM_OBJECT_NULL; +#define DELAY_SPECULATIVE_AGE 1000 + int delay_speculative_age = 0; + vm_object_t m_object = VM_OBJECT_NULL; #if VM_PRESSURE_EVENTS vm_pressure_level_t pressure_level; #endif /* VM_PRESSURE_EVENTS */ VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_START, - vm_pageout_vminfo.vm_pageout_freed_speculative, - vm_pageout_state.vm_pageout_inactive_clean, - vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, - vm_pageout_vminfo.vm_pageout_inactive_dirty_external); + vm_pageout_vminfo.vm_pageout_freed_speculative, + vm_pageout_state.vm_pageout_inactive_clean, + vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, + vm_pageout_vminfo.vm_pageout_inactive_dirty_external); flow_control.state = FCS_IDLE; iq = &vm_pageout_queue_internal; @@ -1864,7 +1861,7 @@ vm_pageout_scan(void) sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; - XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0); + XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0); /* Ask the pmap layer to return any pages it no longer needs. */ uint64_t pmap_wired_pages_freed = pmap_release_pages_fast(); @@ -1881,7 +1878,7 @@ vm_pageout_scan(void) */ reactivated_this_call = 0; reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count + - vm_page_inactive_count); + vm_page_inactive_count); inactive_reclaim_run = 0; vm_pageout_inactive_external_forced_reactivate_limit = vm_page_active_count + vm_page_inactive_count; @@ -1890,7 +1887,7 @@ vm_pageout_scan(void) * We must limit the rate at which we send pages to the pagers * so that we don't tie up too many pages in the I/O queues. * We implement a throttling mechanism using the laundry count - * to limit the number of pages outstanding to the default + * to limit the number of pages outstanding to the default * and external pagers. We can bypass the throttles and look * for clean pages if the pageout queues don't drain in a timely * fashion since this may indicate that the pageout paths are @@ -1904,26 +1901,29 @@ Restart: vm_page_anonymous_min = vm_page_inactive_target / 20; - if (vm_pageout_state.vm_page_speculative_percentage > 50) + if (vm_pageout_state.vm_page_speculative_percentage > 50) { vm_pageout_state.vm_page_speculative_percentage = 50; - else if (vm_pageout_state.vm_page_speculative_percentage <= 0) + } else if (vm_pageout_state.vm_page_speculative_percentage <= 0) { vm_pageout_state.vm_page_speculative_percentage = 1; + } vm_pageout_state.vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count + - vm_page_inactive_count); + vm_page_inactive_count); for (;;) { vm_page_t m; DTRACE_VM2(rev, int, 1, (uint64_t *), NULL); - if (vm_upl_wait_for_pages < 0) + if (vm_upl_wait_for_pages < 0) { vm_upl_wait_for_pages = 0; + } delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT + vm_upl_wait_for_pages; - if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) + if (delayed_unlock_limit > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX) { delayed_unlock_limit = VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX; + } #if CONFIG_SECLUDED_MEMORY /* @@ -1938,8 +1938,8 @@ Restart: * will later go to the inactive queue. */ assert((vm_page_secluded_count_free + - vm_page_secluded_count_inuse) == - vm_page_secluded_count); + vm_page_secluded_count_inuse) == + vm_page_secluded_count); secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded); assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q); @@ -1948,14 +1948,14 @@ Restart: assert(!VM_PAGE_WIRED(secluded_page)); if (secluded_page->vmp_object == 0) { - /* transfer to free queue */ - assert(secluded_page->vmp_busy); + /* transfer to free queue */ + assert(secluded_page->vmp_busy); secluded_page->vmp_snext = local_freeq; local_freeq = secluded_page; local_freed++; } else { - /* transfer to head of active queue */ - vm_page_enqueue_active(secluded_page, FALSE); + /* transfer to head of active queue */ + vm_page_enqueue_active(secluded_page, FALSE); secluded_page = VM_PAGE_NULL; } } @@ -1970,17 +1970,16 @@ Restart: /********************************************************************** - * above this point we're playing with the active and secluded queues - * below this point we're playing with the throttling mechanisms - * and the inactive queue - **********************************************************************/ + * above this point we're playing with the active and secluded queues + * below this point we're playing with the throttling mechanisms + * and the inactive queue + **********************************************************************/ - if (vm_page_free_count + local_freed >= vm_page_free_target) - { + if (vm_page_free_count + local_freed >= vm_page_free_target) { vm_pageout_scan_wants_object = VM_OBJECT_NULL; vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed, - VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); + VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); /* * make sure the pageout I/O threads are running * throttled in case there are still requests @@ -1991,7 +1990,7 @@ Restart: */ vm_pageout_adjust_eq_iothrottle(eq, TRUE); - lck_mtx_lock(&vm_page_queue_free_lock); + lck_mtx_lock(&vm_page_queue_free_lock); if ((vm_page_free_count >= vm_page_free_target) && (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) { @@ -2003,13 +2002,13 @@ return_from_scan: assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL); VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_NONE, - vm_pageout_state.vm_pageout_inactive, - vm_pageout_state.vm_pageout_inactive_used, 0, 0); + vm_pageout_state.vm_pageout_inactive, + vm_pageout_state.vm_pageout_inactive_used, 0, 0); VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan, VM_PAGEOUT_SCAN, DBG_FUNC_END, - vm_pageout_vminfo.vm_pageout_freed_speculative, - vm_pageout_state.vm_pageout_inactive_clean, - vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, - vm_pageout_vminfo.vm_pageout_inactive_dirty_external); + vm_pageout_vminfo.vm_pageout_freed_speculative, + vm_pageout_state.vm_pageout_inactive_clean, + vm_pageout_vminfo.vm_pageout_inactive_dirty_internal, + vm_pageout_vminfo.vm_pageout_inactive_dirty_external); return; } @@ -2024,14 +2023,13 @@ return_from_scan: * the new memory situation. */ - assert (available_for_purge>=0); + assert(available_for_purge >= 0); force_purge = 0; /* no force-purging */ #if VM_PRESSURE_EVENTS pressure_level = memorystatus_vm_pressure_level; if (pressure_level > kVMPressureNormal) { - if (pressure_level >= kVMPressureCritical) { force_purge = vm_pageout_state.memorystatus_purge_on_critical; } else if (pressure_level >= kVMPressureUrgent) { @@ -2043,9 +2041,8 @@ return_from_scan: #endif /* VM_PRESSURE_EVENTS */ if (available_for_purge || force_purge) { - - if (object != NULL) { - vm_object_unlock(object); + if (object != NULL) { + vm_object_unlock(object); object = NULL; } @@ -2053,7 +2050,7 @@ return_from_scan: VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_START, vm_page_free_count, 0, 0, 0); if (vm_purgeable_object_purge_one(force_purge, C_DONT_BLOCK)) { - VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1); + VM_PAGEOUT_DEBUG(vm_pageout_purged_objects, 1); VM_DEBUG_EVENT(vm_pageout_purgeone, VM_PAGEOUT_PURGEONE, DBG_FUNC_END, vm_page_free_count, 0, 0, 0); memoryshot(VM_PAGEOUT_PURGEONE, DBG_FUNC_END); continue; @@ -2063,25 +2060,25 @@ return_from_scan: } if (vm_page_queue_empty(&sq->age_q) && vm_page_speculative_count) { - /* + /* * try to pull pages from the aging bins... * see vm_page.h for an explanation of how * this mechanism works */ - struct vm_speculative_age_q *aq; - boolean_t can_steal = FALSE; + struct vm_speculative_age_q *aq; + boolean_t can_steal = FALSE; int num_scanned_queues; aq = &vm_page_queue_speculative[speculative_steal_index]; num_scanned_queues = 0; while (vm_page_queue_empty(&aq->age_q) && - num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) { + num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) { + speculative_steal_index++; - speculative_steal_index++; - - if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) - speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; + if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) { + speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; + } aq = &vm_page_queue_speculative[speculative_steal_index]; } @@ -2093,8 +2090,9 @@ return_from_scan: * that is not empty, even though * vm_page_speculative_count is not 0. */ - if (!vm_page_queue_empty(&sq->age_q)) - continue; + if (!vm_page_queue_empty(&sq->age_q)) { + continue; + } #if DEVELOPMENT || DEBUG panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count); #endif @@ -2104,15 +2102,15 @@ return_from_scan: continue; } - if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) - can_steal = TRUE; - else { + if (vm_page_speculative_count > vm_pageout_state.vm_page_speculative_target || force_speculative_aging == TRUE) { + can_steal = TRUE; + } else { if (!delay_speculative_age) { - mach_timespec_t ts_fully_aged; + mach_timespec_t ts_fully_aged; ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) / 1000; ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * vm_pageout_state.vm_page_speculative_q_age_ms) % 1000) - * 1000 * NSEC_PER_USEC; + * 1000 * NSEC_PER_USEC; ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts); @@ -2122,27 +2120,29 @@ return_from_scan: ts.tv_sec = (unsigned int) sec; ts.tv_nsec = nsec; - if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) + if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0) { can_steal = TRUE; - else + } else { delay_speculative_age++; + } } else { delay_speculative_age++; - if (delay_speculative_age == DELAY_SPECULATIVE_AGE) + if (delay_speculative_age == DELAY_SPECULATIVE_AGE) { delay_speculative_age = 0; + } } } - if (can_steal == TRUE) + if (can_steal == TRUE) { vm_page_speculate_ageit(aq); + } } force_speculative_aging = FALSE; if (vm_page_queue_empty(&sq->age_q) && cache_evict_throttle == 0) { + int pages_evicted; - int pages_evicted; - - if (object != NULL) { - vm_object_unlock(object); + if (object != NULL) { + vm_object_unlock(object); object = NULL; } KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0); @@ -2152,11 +2152,10 @@ return_from_scan: KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END, pages_evicted, 0, 0, 0, 0); if (pages_evicted) { - vm_pageout_vminfo.vm_pageout_pages_evicted += pages_evicted; VM_DEBUG_EVENT(vm_pageout_cache_evict, VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE, - vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0); + vm_page_free_count, pages_evicted, vm_pageout_vminfo.vm_pageout_pages_evicted, 0); memoryshot(VM_PAGEOUT_CACHE_EVICT, DBG_FUNC_NONE); /* @@ -2165,11 +2164,13 @@ return_from_scan: * and re-evaulate the memory situation */ continue; - } else + } else { cache_evict_throttle = 1000; + } } - if (cache_evict_throttle) + if (cache_evict_throttle) { cache_evict_throttle--; + } divisor = vm_pageout_state.vm_page_filecache_min_divisor; @@ -2185,24 +2186,26 @@ return_from_scan: * throttled queue (which isn't counted as available) which * effectively disables this filter */ - if (vm_compressor_low_on_space() || divisor == 0) + if (vm_compressor_low_on_space() || divisor == 0) { vm_pageout_state.vm_page_filecache_min = 0; - else - vm_pageout_state.vm_page_filecache_min = - ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor; + } else { + vm_pageout_state.vm_page_filecache_min = + ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor; + } #else - if (vm_compressor_out_of_space() || divisor == 0) + if (vm_compressor_out_of_space() || divisor == 0) { vm_pageout_state.vm_page_filecache_min = 0; - else { + } else { /* * don't let the filecache_min fall below the specified critical level */ - vm_pageout_state.vm_page_filecache_min = - ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor; + vm_pageout_state.vm_page_filecache_min = + ((AVAILABLE_NON_COMPRESSED_MEMORY) * 10) / divisor; } #endif - if (vm_page_free_count < (vm_page_free_reserved / 4)) + if (vm_page_free_count < (vm_page_free_reserved / 4)) { vm_pageout_state.vm_page_filecache_min = 0; + } exceeded_burst_throttle = FALSE; /* @@ -2216,48 +2219,45 @@ return_from_scan: vm_page_queue_empty(&vm_page_queue_anonymous) && vm_page_queue_empty(&vm_page_queue_cleaned) && vm_page_queue_empty(&sq->age_q)) { - VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1); + VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle, 1); msecs = vm_pageout_state.vm_pageout_empty_wait; goto vm_pageout_scan_delay; - } else if (inactive_burst_count >= - MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle, - (vm_page_inactive_count + - vm_page_speculative_count))) { - VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1); + MIN(vm_pageout_state.vm_pageout_burst_inactive_throttle, + (vm_page_inactive_count + + vm_page_speculative_count))) { + VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle, 1); msecs = vm_pageout_state.vm_pageout_burst_wait; exceeded_burst_throttle = TRUE; goto vm_pageout_scan_delay; - } else if (VM_PAGE_Q_THROTTLED(iq) && - VM_DYNAMIC_PAGING_ENABLED()) { + VM_DYNAMIC_PAGING_ENABLED()) { clock_sec_t sec; clock_nsec_t nsec; - switch (flow_control.state) { - + switch (flow_control.state) { case FCS_IDLE: if ((vm_page_free_count + local_freed) < vm_page_free_target && vm_pageout_state.vm_restricted_to_single_processor == FALSE) { - /* + /* * since the compressor is running independently of vm_pageout_scan * let's not wait for it just yet... as long as we have a healthy supply * of filecache pages to work with, let's keep stealing those. */ - inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count; + inactive_external_count = vm_page_inactive_count - vm_page_anonymous_count; if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min && (inactive_external_count >= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count))) { - anons_grabbed = ANONS_GRABBED_LIMIT; + anons_grabbed = ANONS_GRABBED_LIMIT; VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred, 1); goto consider_inactive; } } reset_deadlock_timer: - ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000; + ts.tv_sec = vm_pageout_state.vm_pageout_deadlock_wait / 1000; ts.tv_nsec = (vm_pageout_state.vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC; - clock_get_system_nanotime(&sec, &nsec); + clock_get_system_nanotime(&sec, &nsec); flow_control.ts.tv_sec = (unsigned int) sec; flow_control.ts.tv_nsec = nsec; ADD_MACH_TIMESPEC(&flow_control.ts, &ts); @@ -2269,12 +2269,12 @@ reset_deadlock_timer: break; case FCS_DELAYED: - clock_get_system_nanotime(&sec, &nsec); + clock_get_system_nanotime(&sec, &nsec); ts.tv_sec = (unsigned int) sec; ts.tv_nsec = nsec; if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) { - /* + /* * the pageout thread for the default pager is potentially * deadlocked since the * default pager queue has been throttled for more than the @@ -2290,8 +2290,8 @@ reset_deadlock_timer: * stop moving pages and allow the system to run to see what * state it settles into. */ - vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief + - vm_page_free_wanted + vm_page_free_wanted_privileged; + vm_pageout_deadlock_target = vm_pageout_state.vm_pageout_deadlock_relief + + vm_page_free_wanted + vm_page_free_wanted_privileged; VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected, 1); flow_control.state = FCS_DEADLOCK_DETECTED; thread_wakeup((event_t) &vm_pageout_garbage_collect); @@ -2307,16 +2307,16 @@ reset_deadlock_timer: break; case FCS_DEADLOCK_DETECTED: - if (vm_pageout_deadlock_target) - goto consider_inactive; + if (vm_pageout_deadlock_target) { + goto consider_inactive; + } goto reset_deadlock_timer; - } vm_pageout_scan_delay: vm_pageout_scan_wants_object = VM_OBJECT_NULL; vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed, - VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); + VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); if (vm_page_free_count >= vm_page_free_target) { /* @@ -2390,11 +2390,12 @@ vm_pageout_scan_delay: VM_CHECK_MEMORYSTATUS; - if (flow_control.state != FCS_IDLE) - VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1); + if (flow_control.state != FCS_IDLE) { + VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle, 1); + } iq->pgo_throttled = TRUE; - assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC); + assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000 * NSEC_PER_USEC); counter(c_vm_pageout_scan_block++); @@ -2403,21 +2404,22 @@ vm_pageout_scan_delay: assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL); VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START, - iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0); + iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0); memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_START); thread_block(THREAD_CONTINUE_NULL); VM_DEBUG_EVENT(vm_pageout_thread_block, VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END, - iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0); + iq->pgo_laundry, iq->pgo_maxlaundry, msecs, 0); memoryshot(VM_PAGEOUT_THREAD_BLOCK, DBG_FUNC_END); vm_page_lock_queues(); iq->pgo_throttled = FALSE; - if (loop_count >= vm_page_inactive_count) + if (loop_count >= vm_page_inactive_count) { loop_count = 0; + } inactive_burst_count = 0; goto Restart; @@ -2428,7 +2430,7 @@ vm_pageout_scan_delay: flow_control.state = FCS_IDLE; consider_inactive: vm_pageout_inactive_external_forced_reactivate_limit = MIN((vm_page_active_count + vm_page_inactive_count), - vm_pageout_inactive_external_forced_reactivate_limit); + vm_pageout_inactive_external_forced_reactivate_limit); loop_count++; inactive_burst_count++; vm_pageout_state.vm_pageout_inactive++; @@ -2437,7 +2439,6 @@ consider_inactive: * Choose a victim. */ while (1) { - #if CONFIG_BACKGROUND_QUEUE page_from_bg_q = FALSE; #endif /* CONFIG_BACKGROUND_QUEUE */ @@ -2472,15 +2473,16 @@ consider_inactive: assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); - if (!m->vmp_dirty || force_anonymous == FALSE) - break; - else - m = NULL; + if (!m->vmp_dirty || force_anonymous == FALSE) { + break; + } else { + m = NULL; + } } #if CONFIG_BACKGROUND_QUEUE if (vm_page_background_mode != VM_PAGE_BG_DISABLED && (vm_page_background_count > vm_page_background_target)) { - vm_object_t bg_m_object = NULL; + vm_object_t bg_m_object = NULL; m = (vm_page_t) vm_page_queue_first(&vm_page_queue_background); @@ -2498,20 +2500,19 @@ consider_inactive: * page. */ } else if (force_anonymous == FALSE || bg_m_object->internal) { - if (bg_m_object->internal && (VM_PAGE_Q_THROTTLED(iq) || - vm_compressor_out_of_space() == TRUE || - vm_page_free_count < (vm_page_free_reserved / 4))) { - - vm_pageout_skipped_bq_internal++; + vm_compressor_out_of_space() == TRUE || + vm_page_free_count < (vm_page_free_reserved / 4))) { + vm_pageout_skipped_bq_internal++; } else { page_from_bg_q = TRUE; - if (bg_m_object->internal) + if (bg_m_object->internal) { vm_pageout_vminfo.vm_pageout_considered_bq_internal++; - else + } else { vm_pageout_vminfo.vm_pageout_considered_bq_external++; + } break; } } @@ -2543,7 +2544,7 @@ consider_inactive: if (vm_page_pageable_external_count > vm_pageout_state.vm_page_filecache_min) { if ((vm_page_pageable_external_count * - vm_pageout_memorystatus_fb_factor_dr) > + vm_pageout_memorystatus_fb_factor_dr) > (memorystatus_available_pages_critical * vm_pageout_memorystatus_fb_factor_nr)) { grab_anonymous = FALSE; @@ -2552,38 +2553,36 @@ consider_inactive: } } if (grab_anonymous) { - VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1); + VM_PAGEOUT_DEBUG(vm_grab_anon_nops, 1); } } #endif /* CONFIG_JETSAM */ want_anonymous: if (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || vm_page_queue_empty(&vm_page_queue_anonymous)) { - - if ( !vm_page_queue_empty(&vm_page_queue_inactive) ) { + if (!vm_page_queue_empty(&vm_page_queue_inactive)) { m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); anons_grabbed = 0; if (vm_page_pageable_external_count < vm_pageout_state.vm_page_filecache_min) { - - if ( !vm_page_queue_empty(&vm_page_queue_anonymous) ) { - if ((++reactivated_this_call % 100)) { - vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++; - goto must_activate_page; - } - /* - * steal 1% of the file backed pages even if - * we are under the limit that has been set - * for a healthy filecache - */ - } + if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { + if ((++reactivated_this_call % 100)) { + vm_pageout_vminfo.vm_pageout_filecache_min_reactivated++; + goto must_activate_page; + } + /* + * steal 1% of the file backed pages even if + * we are under the limit that has been set + * for a healthy filecache + */ + } } break; } } - if ( !vm_page_queue_empty(&vm_page_queue_anonymous) ) { + if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); @@ -2603,8 +2602,9 @@ want_anonymous: force_anonymous = FALSE; VM_PAGEOUT_DEBUG(vm_pageout_no_victim, 1); - if (!vm_page_queue_empty(&sq->age_q)) + if (!vm_page_queue_empty(&sq->age_q)) { goto done_with_inactivepage; + } if (vm_page_speculative_count) { force_speculative_aging = TRUE; @@ -2642,13 +2642,13 @@ want_anonymous: * already got the lock */ if (m_object != object) { - /* + /* * the object associated with candidate page is * different from the one we were just working * with... dump the lock if we still own it */ - if (object != NULL) { - vm_object_unlock(object); + if (object != NULL) { + vm_object_unlock(object); object = NULL; } /* @@ -2666,15 +2666,16 @@ want_anonymous: vm_pageout_vminfo.vm_pageout_inactive_nolock++; - if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) - VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1); + if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock, 1); + } pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m)); m->vmp_reference = FALSE; - if ( !m_object->object_is_shared_cache) { - /* + if (!m_object->object_is_shared_cache) { + /* * don't apply this optimization if this is the shared cache * object, it's too easy to get rid of very hot and important * pages... @@ -2686,26 +2687,28 @@ want_anonymous: * is possible for the value to be a bit non-determistic, but that's ok * since it's only used as a hint */ - m_object->scan_collisions = 1; + m_object->scan_collisions = 1; } - if ( !vm_page_queue_empty(&vm_page_queue_cleaned)) + if (!vm_page_queue_empty(&vm_page_queue_cleaned)) { m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); - else if ( !vm_page_queue_empty(&sq->age_q)) + } else if (!vm_page_queue_empty(&sq->age_q)) { m_want = (vm_page_t) vm_page_queue_first(&sq->age_q); - else if ( (grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || - vm_page_queue_empty(&vm_page_queue_anonymous)) && - !vm_page_queue_empty(&vm_page_queue_inactive)) - m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); - else if ( !vm_page_queue_empty(&vm_page_queue_anonymous)) + } else if ((grab_anonymous == FALSE || anons_grabbed >= ANONS_GRABBED_LIMIT || + vm_page_queue_empty(&vm_page_queue_anonymous)) && + !vm_page_queue_empty(&vm_page_queue_inactive)) { + m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); + } else if (!vm_page_queue_empty(&vm_page_queue_anonymous)) { m_want = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); + } /* * this is the next object we're going to be interested in * try to make sure its available after the mutex_pause * returns control */ - if (m_want) + if (m_want) { vm_pageout_scan_wants_object = VM_PAGE_OBJECT(m_want); + } goto requeue_page; } @@ -2721,22 +2724,25 @@ want_anonymous: * Put it back on the appropriate queue * */ - VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1); + VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy, 1); - if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) - VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1); + if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy, 1); + } requeue_page: - if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) + if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) { vm_page_enqueue_inactive(m, FALSE); - else + } else { vm_page_activate(m); + } #if CONFIG_BACKGROUND_QUEUE #if DEVELOPMENT || DEBUG if (page_from_bg_q == TRUE) { - if (m_object->internal) + if (m_object->internal) { vm_pageout_rejected_bq_internal++; - else + } else { vm_pageout_rejected_bq_external++; + } } #endif #endif @@ -2753,8 +2759,8 @@ requeue_page: * if (m->vmp_free_when_done && !m->vmp_cleaning) * an msync INVALIDATE is in progress... * this page has been marked for destruction - * after it has been cleaned, - * but not yet gathered into a UPL + * after it has been cleaned, + * but not yet gathered into a UPL * where 'cleaning' will be set... * just leave it off the paging queues * @@ -2776,17 +2782,17 @@ requeue_page: * dealt with */ if (m->vmp_absent || m->vmp_error || !object->alive) { - - if (m->vmp_absent) - VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1); - else if (!object->alive) - VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1); - else - VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1); + if (m->vmp_absent) { + VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent, 1); + } else if (!object->alive) { + VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive, 1); + } else { + VM_PAGEOUT_DEBUG(vm_pageout_inactive_error, 1); + } reclaim_page: if (vm_pageout_deadlock_target) { - VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1); - vm_pageout_deadlock_target--; + VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success, 1); + vm_pageout_deadlock_target--; } DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL); @@ -2813,28 +2819,30 @@ reclaim_page: * we'd normally do in vm_page_free_prepare_object * until 'vm_page_free_list' is called */ - if (m->vmp_tabled) + if (m->vmp_tabled) { vm_page_remove(m, TRUE); + } assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0); m->vmp_snext = local_freeq; local_freeq = m; local_freed++; - if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) + if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) { vm_pageout_vminfo.vm_pageout_freed_speculative++; - else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) + } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { vm_pageout_vminfo.vm_pageout_freed_cleaned++; - else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) + } else if (page_prev_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) { vm_pageout_vminfo.vm_pageout_freed_internal++; - else + } else { vm_pageout_vminfo.vm_pageout_freed_external++; + } inactive_burst_count = 0; goto done_with_inactivepage; } if (object->copy == VM_OBJECT_NULL) { - /* + /* * No one else can have any interest in this page. * If this is an empty purgable object, the page can be * reclaimed even if dirty. @@ -2877,8 +2885,9 @@ reclaim_page: /* just stick it back on! */ reactivated_this_call++; - if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) - VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1); + if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated, 1); + } goto reactivate_page; } @@ -2892,32 +2901,34 @@ reclaim_page: refmod_state = -1; if (m->vmp_reference == FALSE && m->vmp_pmapped == TRUE) { - refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); + refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); - if (refmod_state & VM_MEM_REFERENCED) - m->vmp_reference = TRUE; - if (refmod_state & VM_MEM_MODIFIED) { + if (refmod_state & VM_MEM_REFERENCED) { + m->vmp_reference = TRUE; + } + if (refmod_state & VM_MEM_MODIFIED) { SET_PAGE_DIRTY(m, FALSE); } } - if (m->vmp_reference || m->vmp_dirty) { - /* deal with a rogue "reusable" page */ - VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object); - } + if (m->vmp_reference || m->vmp_dirty) { + /* deal with a rogue "reusable" page */ + VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, m_object); + } divisor = vm_pageout_state.vm_page_xpmapped_min_divisor; - if (divisor == 0) - vm_pageout_state.vm_page_xpmapped_min = 0; - else - vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / divisor; + if (divisor == 0) { + vm_pageout_state.vm_page_xpmapped_min = 0; + } else { + vm_pageout_state.vm_page_xpmapped_min = (vm_page_external_count * 10) / divisor; + } if (!m->vmp_no_cache && #if CONFIG_BACKGROUND_QUEUE page_from_bg_q == FALSE && #endif (m->vmp_reference || (m->vmp_xpmapped && !object->internal && - (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) { + (vm_page_xpmapped_external_count < vm_pageout_state.vm_page_xpmapped_min)))) { /* * The page we pulled off the inactive list has * been referenced. It is possible for other @@ -2933,13 +2944,14 @@ reclaim_page: } else { uint32_t isinuse; - if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) - VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1); + if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated, 1); + } vm_pageout_vminfo.vm_pageout_inactive_referenced++; reactivate_page: - if ( !object->internal && object->pager != MEMORY_OBJECT_NULL && - vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) { + if (!object->internal && object->pager != MEMORY_OBJECT_NULL && + vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) { /* * no explict mappings of this object exist * and it's not open via the filesystem @@ -2958,18 +2970,20 @@ must_activate_page: #if CONFIG_BACKGROUND_QUEUE #if DEVELOPMENT || DEBUG if (page_from_bg_q == TRUE) { - if (m_object->internal) + if (m_object->internal) { vm_pageout_rejected_bq_internal++; - else + } else { vm_pageout_rejected_bq_external++; + } } #endif #endif - if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) - VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); + if (page_prev_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) { + VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); + } vm_pageout_state.vm_pageout_inactive_used++; - goto done_with_inactivepage; + goto done_with_inactivepage; } /* * Make sure we call pmap_get_refmod() if it @@ -2984,9 +2998,9 @@ must_activate_page: } } - XPR(XPR_VM_PAGEOUT, - "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n", - object, m->vmp_offset, m, 0,0); + XPR(XPR_VM_PAGEOUT, + "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n", + object, m->vmp_offset, m, 0, 0); /* * we've got a candidate page to steal... @@ -2995,9 +3009,9 @@ must_activate_page: * preceding check for m->vmp_reference... if * we get here, then m->vmp_reference had to be * FALSE (or possibly "reactivate_limit" was - * exceeded), but in either case we called - * pmap_get_refmod() and updated both - * m->vmp_reference and m->vmp_dirty + * exceeded), but in either case we called + * pmap_get_refmod() and updated both + * m->vmp_reference and m->vmp_dirty * * if it's dirty or precious we need to * see if the target queue is throtttled @@ -3008,9 +3022,10 @@ must_activate_page: inactive_throttled = FALSE; if (m->vmp_dirty || m->vmp_precious) { - if (object->internal) { - if (VM_PAGE_Q_THROTTLED(iq)) - inactive_throttled = TRUE; + if (object->internal) { + if (VM_PAGE_Q_THROTTLED(iq)) { + inactive_throttled = TRUE; + } } else if (VM_PAGE_Q_THROTTLED(eq)) { inactive_throttled = TRUE; } @@ -3019,12 +3034,11 @@ throttle_inactive: if (!VM_DYNAMIC_PAGING_ENABLED() && object->internal && m->vmp_dirty && (object->purgable == VM_PURGABLE_DENY || - object->purgable == VM_PURGABLE_NONVOLATILE || - object->purgable == VM_PURGABLE_VOLATILE)) { + object->purgable == VM_PURGABLE_NONVOLATILE || + object->purgable == VM_PURGABLE_VOLATILE)) { vm_page_check_pageable_safe(m); assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); - vm_page_queue_enter(&vm_page_queue_throttled, m, - vm_page_t, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; vm_page_throttled_count++; @@ -3034,9 +3048,8 @@ throttle_inactive: goto done_with_inactivepage; } if (inactive_throttled == TRUE) { - if (object->internal == FALSE) { - /* + /* * we need to break up the following potential deadlock case... * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written. * b) The thread doing the writing is waiting for pages while holding the truncate lock @@ -3065,7 +3078,7 @@ throttle_inactive: vm_page_check_pageable_safe(m); assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q); - vm_page_queue_enter(&vm_page_queue_active, m, vm_page_t, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_active, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; vm_page_active_count++; vm_page_pageable_external_count++; @@ -3086,15 +3099,15 @@ throttle_inactive: vm_page_unlock_queues(); VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_START, - vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count); + vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count); - /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */ + /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */ if (memorystatus_kill_on_VM_page_shortage(FALSE) == TRUE) { - VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1); + VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count, 1); } VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam, VM_PAGEOUT_JETSAM, DBG_FUNC_END, - vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count); + vm_page_active_count, vm_page_inactive_count, vm_page_free_count, vm_page_free_count); vm_page_lock_queues(); delayed_unlock = 1; @@ -3139,7 +3152,7 @@ throttle_inactive: * 3) This page belongs to a file and hence will not be * sent into the compressor */ - if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE || + if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE || object->internal == FALSE) { pmap_options = 0; } else if (m->vmp_dirty || m->vmp_precious) { @@ -3161,11 +3174,11 @@ throttle_inactive: * modified. */ pmap_options = - PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; + PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; } refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), - pmap_options, - NULL); + pmap_options, + NULL); if (refmod_state & VM_MEM_MODIFIED) { SET_PAGE_DIRTY(m, FALSE); } @@ -3181,15 +3194,15 @@ throttle_inactive: * If it's clean and not precious, we can free the page. */ if (!m->vmp_dirty && !m->vmp_precious) { - vm_pageout_state.vm_pageout_inactive_clean++; /* * OK, at this point we have found a page we are going to free. */ #if CONFIG_PHANTOM_CACHE - if (!object->internal) + if (!object->internal) { vm_phantom_cache_add_ghost(m); + } #endif goto reclaim_page; } @@ -3201,14 +3214,16 @@ throttle_inactive: * disconnected here, we can make one final check. */ if (object->internal) { - if (VM_PAGE_Q_THROTTLED(iq)) + if (VM_PAGE_Q_THROTTLED(iq)) { inactive_throttled = TRUE; + } } else if (VM_PAGE_Q_THROTTLED(eq)) { inactive_throttled = TRUE; } - if (inactive_throttled == TRUE) + if (inactive_throttled == TRUE) { goto throttle_inactive; + } #if VM_PRESSURE_EVENTS #if CONFIG_JETSAM @@ -3227,13 +3242,15 @@ throttle_inactive: #endif /* CONFIG_JETSAM */ #endif /* VM_PRESSURE_EVENTS */ - if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) - VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1); + if (page_prev_q_state == VM_PAGE_ON_SPECULATIVE_Q) { + VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty, 1); + } - if (object->internal) + if (object->internal) { vm_pageout_vminfo.vm_pageout_inactive_dirty_internal++; - else + } else { vm_pageout_vminfo.vm_pageout_inactive_dirty_external++; + } /* * internal pages will go to the compressor... @@ -3247,14 +3264,15 @@ throttle_inactive: done_with_inactivepage: if (delayed_unlock++ > delayed_unlock_limit) { - int freed = local_freed; + int freed = local_freed; - vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed, - VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); - if (freed == 0) - lck_mtx_yield(&vm_page_queue_lock); + vm_pageout_prepare_to_block(&object, &delayed_unlock, &local_freeq, &local_freed, + VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER); + if (freed == 0) { + lck_mtx_yield(&vm_page_queue_lock); + } } else if (vm_pageout_scan_wants_object) { - vm_page_unlock_queues(); + vm_page_unlock_queues(); mutex_pause(0); vm_page_lock_queues(); } @@ -3269,37 +3287,40 @@ void vm_page_free_reserve( int pages) { - int free_after_reserve; + int free_after_reserve; if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { - - if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) + if ((vm_page_free_reserved + pages + COMPRESSOR_FREE_RESERVED_LIMIT) >= (VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT)) { vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT + COMPRESSOR_FREE_RESERVED_LIMIT; - else + } else { vm_page_free_reserved += (pages + COMPRESSOR_FREE_RESERVED_LIMIT); - + } } else { - if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) + if ((vm_page_free_reserved + pages) >= VM_PAGE_FREE_RESERVED_LIMIT) { vm_page_free_reserved = VM_PAGE_FREE_RESERVED_LIMIT; - else + } else { vm_page_free_reserved += pages; + } } free_after_reserve = vm_pageout_state.vm_page_free_count_init - vm_page_free_reserved; vm_page_free_min = vm_page_free_reserved + - VM_PAGE_FREE_MIN(free_after_reserve); + VM_PAGE_FREE_MIN(free_after_reserve); - if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) - vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT; + if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT) { + vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT; + } vm_page_free_target = vm_page_free_reserved + - VM_PAGE_FREE_TARGET(free_after_reserve); + VM_PAGE_FREE_TARGET(free_after_reserve); - if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) - vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT; + if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT) { + vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT; + } - if (vm_page_free_target < vm_page_free_min + 5) + if (vm_page_free_target < vm_page_free_min + 5) { vm_page_free_target = vm_page_free_min + 5; + } vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 2); } @@ -3352,17 +3373,17 @@ vm_pageout_wait(uint64_t deadline) kern_return_t kr; lck_mtx_lock(&vm_page_queue_free_lock); - for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr); ) { + for (kr = KERN_SUCCESS; vm_pageout_running && (KERN_SUCCESS == kr);) { vm_pageout_waiter = TRUE; if (THREAD_AWAKENED != lck_mtx_sleep_deadline( - &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT, - (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) { + &vm_page_queue_free_lock, LCK_SLEEP_DEFAULT, + (event_t) &vm_pageout_waiter, THREAD_UNINT, deadline)) { kr = KERN_OPERATION_TIMED_OUT; } } lck_mtx_unlock(&vm_page_queue_free_lock); - return (kr); + return kr; } #endif /* !CONFIG_EMBEDDED */ @@ -3370,136 +3391,136 @@ vm_pageout_wait(uint64_t deadline) static void vm_pageout_iothread_external_continue(struct vm_pageout_queue *q) { - vm_page_t m = NULL; - vm_object_t object; + vm_page_t m = NULL; + vm_object_t object; vm_object_offset_t offset; - memory_object_t pager; + memory_object_t pager; /* On systems with a compressor, the external IO thread clears its * VM privileged bit to accommodate large allocations (e.g. bulk UPL * creation) */ - if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) + if (vm_pageout_state.vm_pageout_internal_iothread != THREAD_NULL) { current_thread()->options &= ~TH_OPT_VMPRIV; + } vm_page_lockspin_queues(); - while ( !vm_page_queue_empty(&q->pgo_pending) ) { - - q->pgo_busy = TRUE; - vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, vmp_pageq); - - assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q); - VM_PAGE_CHECK(m); - /* - * grab a snapshot of the object and offset this - * page is tabled in so that we can relookup this - * page after we've taken the object lock - these - * fields are stable while we hold the page queues lock - * but as soon as we drop it, there is nothing to keep - * this page in this object... we hold an activity_in_progress - * on this object which will keep it from terminating - */ - object = VM_PAGE_OBJECT(m); - offset = m->vmp_offset; - - m->vmp_q_state = VM_PAGE_NOT_ON_Q; - VM_PAGE_ZERO_PAGEQ_ENTRY(m); - - vm_page_unlock_queues(); - - vm_object_lock(object); - - m = vm_page_lookup(object, offset); - - if (m == NULL || - m->vmp_busy || m->vmp_cleaning || !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) { - /* - * it's either the same page that someone else has - * started cleaning (or it's finished cleaning or - * been put back on the pageout queue), or - * the page has been freed or we have found a - * new page at this offset... in all of these cases - * we merely need to release the activity_in_progress - * we took when we put the page on the pageout queue - */ - vm_object_activity_end(object); - vm_object_unlock(object); - - vm_page_lockspin_queues(); - continue; - } - pager = object->pager; - - if (pager == MEMORY_OBJECT_NULL) { - /* - * This pager has been destroyed by either - * memory_object_destroy or vm_object_destroy, and - * so there is nowhere for the page to go. - */ - if (m->vmp_free_when_done) { - /* - * Just free the page... VM_PAGE_FREE takes - * care of cleaning up all the state... - * including doing the vm_pageout_throttle_up - */ - VM_PAGE_FREE(m); - } else { - vm_page_lockspin_queues(); - - vm_pageout_throttle_up(m); - vm_page_activate(m); - - vm_page_unlock_queues(); - - /* - * And we are done with it. - */ - } - vm_object_activity_end(object); - vm_object_unlock(object); - - vm_page_lockspin_queues(); - continue; - } + while (!vm_page_queue_empty(&q->pgo_pending)) { + q->pgo_busy = TRUE; + vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq); + + assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q); + VM_PAGE_CHECK(m); + /* + * grab a snapshot of the object and offset this + * page is tabled in so that we can relookup this + * page after we've taken the object lock - these + * fields are stable while we hold the page queues lock + * but as soon as we drop it, there is nothing to keep + * this page in this object... we hold an activity_in_progress + * on this object which will keep it from terminating + */ + object = VM_PAGE_OBJECT(m); + offset = m->vmp_offset; + + m->vmp_q_state = VM_PAGE_NOT_ON_Q; + VM_PAGE_ZERO_PAGEQ_ENTRY(m); + + vm_page_unlock_queues(); + + vm_object_lock(object); + + m = vm_page_lookup(object, offset); + + if (m == NULL || m->vmp_busy || m->vmp_cleaning || + !m->vmp_laundry || (m->vmp_q_state != VM_PAGE_NOT_ON_Q)) { + /* + * it's either the same page that someone else has + * started cleaning (or it's finished cleaning or + * been put back on the pageout queue), or + * the page has been freed or we have found a + * new page at this offset... in all of these cases + * we merely need to release the activity_in_progress + * we took when we put the page on the pageout queue + */ + vm_object_activity_end(object); + vm_object_unlock(object); + + vm_page_lockspin_queues(); + continue; + } + pager = object->pager; + + if (pager == MEMORY_OBJECT_NULL) { + /* + * This pager has been destroyed by either + * memory_object_destroy or vm_object_destroy, and + * so there is nowhere for the page to go. + */ + if (m->vmp_free_when_done) { + /* + * Just free the page... VM_PAGE_FREE takes + * care of cleaning up all the state... + * including doing the vm_pageout_throttle_up + */ + VM_PAGE_FREE(m); + } else { + vm_page_lockspin_queues(); + + vm_pageout_throttle_up(m); + vm_page_activate(m); + + vm_page_unlock_queues(); + + /* + * And we are done with it. + */ + } + vm_object_activity_end(object); + vm_object_unlock(object); + + vm_page_lockspin_queues(); + continue; + } #if 0 - /* - * we don't hold the page queue lock - * so this check isn't safe to make - */ - VM_PAGE_CHECK(m); + /* + * we don't hold the page queue lock + * so this check isn't safe to make + */ + VM_PAGE_CHECK(m); #endif - /* - * give back the activity_in_progress reference we - * took when we queued up this page and replace it - * it with a paging_in_progress reference that will - * also hold the paging offset from changing and - * prevent the object from terminating - */ - vm_object_activity_end(object); - vm_object_paging_begin(object); - vm_object_unlock(object); - - /* - * Send the data to the pager. - * any pageout clustering happens there - */ - memory_object_data_return(pager, - m->vmp_offset + object->paging_offset, - PAGE_SIZE, - NULL, - NULL, - FALSE, - FALSE, - 0); - - vm_object_lock(object); - vm_object_paging_end(object); - vm_object_unlock(object); - - vm_pageout_io_throttle(); - - vm_page_lockspin_queues(); + /* + * give back the activity_in_progress reference we + * took when we queued up this page and replace it + * it with a paging_in_progress reference that will + * also hold the paging offset from changing and + * prevent the object from terminating + */ + vm_object_activity_end(object); + vm_object_paging_begin(object); + vm_object_unlock(object); + + /* + * Send the data to the pager. + * any pageout clustering happens there + */ + memory_object_data_return(pager, + m->vmp_offset + object->paging_offset, + PAGE_SIZE, + NULL, + NULL, + FALSE, + FALSE, + 0); + + vm_object_lock(object); + vm_object_paging_end(object); + vm_object_unlock(object); + + vm_pageout_io_throttle(); + + vm_page_lockspin_queues(); } q->pgo_busy = FALSE; q->pgo_idle = TRUE; @@ -3512,10 +3533,10 @@ vm_pageout_iothread_external_continue(struct vm_pageout_queue *q) } -#define MAX_FREE_BATCH 32 +#define MAX_FREE_BATCH 32 uint32_t vm_compressor_time_thread; /* Set via sysctl to record time accrued by - * this thread. - */ + * this thread. + */ void @@ -3524,15 +3545,15 @@ void vm_pageout_iothread_internal_continue(struct cq *cq) { struct vm_pageout_queue *q; - vm_page_t m = NULL; - boolean_t pgo_draining; + vm_page_t m = NULL; + boolean_t pgo_draining; vm_page_t local_q; - int local_cnt; + int local_cnt; vm_page_t local_freeq = NULL; int local_freed = 0; - int local_batch_size; + int local_batch_size; #if DEVELOPMENT || DEBUG - int ncomps = 0; + int ncomps = 0; boolean_t marked_active = FALSE; #endif KERNEL_DEBUG(0xe040000c | DBG_FUNC_END, 0, 0, 0, 0, 0); @@ -3541,11 +3562,12 @@ vm_pageout_iothread_internal_continue(struct cq *cq) local_batch_size = q->pgo_maxlaundry / (vm_pageout_state.vm_compressor_thread_count * 2); #if RECORD_THE_COMPRESSED_DATA - if (q->pgo_laundry) + if (q->pgo_laundry) { c_compressed_record_init(); + } #endif while (TRUE) { - int pages_left_on_q = 0; + int pages_left_on_q = 0; local_cnt = 0; local_q = NULL; @@ -3567,9 +3589,8 @@ vm_pageout_iothread_internal_continue(struct cq *cq) KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START, q->pgo_laundry, 0, 0, 0, 0); - while ( !vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) { - - vm_page_queue_remove_first(&q->pgo_pending, m, vm_page_t, vmp_pageq); + while (!vm_page_queue_empty(&q->pgo_pending) && local_cnt < local_batch_size) { + vm_page_queue_remove_first(&q->pgo_pending, m, vmp_pageq); assert(m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q); VM_PAGE_CHECK(m); @@ -3581,16 +3602,18 @@ vm_pageout_iothread_internal_continue(struct cq *cq) local_q = m; local_cnt++; } - if (local_q == NULL) + if (local_q == NULL) { break; + } q->pgo_busy = TRUE; if ((pgo_draining = q->pgo_draining) == FALSE) { vm_pageout_throttle_up_batch(q, local_cnt); pages_left_on_q = q->pgo_laundry; - } else + } else { pages_left_on_q = q->pgo_laundry - local_cnt; + } vm_page_unlock_queues(); @@ -3602,7 +3625,6 @@ vm_pageout_iothread_internal_continue(struct cq *cq) KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END, q->pgo_laundry, 0, 0, 0, 0); while (local_q) { - KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START, local_cnt, 0, 0, 0, 0); m = local_q; @@ -3620,8 +3642,7 @@ vm_pageout_iothread_internal_continue(struct cq *cq) local_freed++; if (local_freed >= MAX_FREE_BATCH) { - - OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); + OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); vm_page_free_list(local_freeq, TRUE); @@ -3631,11 +3652,11 @@ vm_pageout_iothread_internal_continue(struct cq *cq) } #if !CONFIG_JETSAM while (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) { - kern_return_t wait_result; - int need_wakeup = 0; + kern_return_t wait_result; + int need_wakeup = 0; if (local_freeq) { - OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); + OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); vm_page_free_list(local_freeq, TRUE); local_freeq = NULL; @@ -3646,26 +3667,28 @@ vm_pageout_iothread_internal_continue(struct cq *cq) lck_mtx_lock_spin(&vm_page_queue_free_lock); if (vm_page_free_count < COMPRESSOR_FREE_RESERVED_LIMIT) { - - if (vm_page_free_wanted_privileged++ == 0) + if (vm_page_free_wanted_privileged++ == 0) { need_wakeup = 1; + } wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, THREAD_UNINT); lck_mtx_unlock(&vm_page_queue_free_lock); - if (need_wakeup) + if (need_wakeup) { thread_wakeup((event_t)&vm_page_free_wanted); + } - if (wait_result == THREAD_WAITING) - + if (wait_result == THREAD_WAITING) { thread_block(THREAD_CONTINUE_NULL); - } else + } + } else { lck_mtx_unlock(&vm_page_queue_free_lock); + } } #endif } if (local_freeq) { - OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); + OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions); vm_page_free_list(local_freeq, TRUE); local_freeq = NULL; @@ -3728,10 +3751,10 @@ vm_pageout_iothread_internal_continue(struct cq *cq) kern_return_t vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) { - vm_object_t object; - memory_object_t pager; - int compressed_count_delta; - kern_return_t retval; + vm_object_t object; + memory_object_t pager; + int compressed_count_delta; + kern_return_t retval; object = VM_PAGE_OBJECT(m); @@ -3740,8 +3763,7 @@ vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) pager = object->pager; - if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) { - + if (!object->pager_initialized || pager == MEMORY_OBJECT_NULL) { KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START, object, pager, 0, 0, 0); vm_object_lock(object); @@ -3751,10 +3773,12 @@ vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) * one and hand it to the compression pager. */ - if (!object->pager_initialized) + if (!object->pager_initialized) { vm_object_collapse(object, (vm_object_offset_t) 0, TRUE); - if (!object->pager_initialized) + } + if (!object->pager_initialized) { vm_object_compressor_pager_create(object); + } pager = object->pager; @@ -3804,9 +3828,9 @@ vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) assert( !VM_PAGE_WIRED(m)); vm_compressor_pager_count(pager, - compressed_count_delta, - FALSE, /* shared_lock */ - object); + compressed_count_delta, + FALSE, /* shared_lock */ + object); if (retval == KERN_SUCCESS) { /* @@ -3817,17 +3841,17 @@ vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) * so account for it as such. */ if ((object->purgable != VM_PURGABLE_DENY || - object->vo_ledger_tag) && + object->vo_ledger_tag) && object->vo_owner != NULL) { /* one more compressed purgeable/tagged page */ vm_object_owner_compressed_update(object, - +1); + +1); } VM_STAT_INCR(compressions); - if (m->vmp_tabled) + if (m->vmp_tabled) { vm_page_remove(m, TRUE); - + } } else { PAGE_WAKEUP_DONE(m); @@ -3848,13 +3872,13 @@ vm_pageout_compress_page(void **current_chead, char *scratch_buf, vm_page_t m) static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpriority) { - uint32_t policy; + uint32_t policy; - if (hibernate_cleaning_in_progress == TRUE) + if (hibernate_cleaning_in_progress == TRUE) { req_lowpriority = FALSE; + } if (eq->pgo_inited == TRUE && eq->pgo_lowpriority != req_lowpriority) { - vm_page_unlock_queues(); if (req_lowpriority == TRUE) { @@ -3865,7 +3889,7 @@ vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpr DTRACE_VM(laundryunthrottle); } proc_set_thread_policy_with_tid(kernel_task, eq->pgo_tid, - TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy); + TASK_POLICY_EXTERNAL, TASK_POLICY_IO, policy); eq->pgo_lowpriority = req_lowpriority; @@ -3877,14 +3901,14 @@ vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue *eq, boolean_t req_lowpr static void vm_pageout_iothread_external(void) { - thread_t self = current_thread(); + thread_t self = current_thread(); self->options |= TH_OPT_VMPRIV; DTRACE_VM2(laundrythrottle, int, 1, (uint64_t *), NULL); proc_set_thread_policy(self, TASK_POLICY_EXTERNAL, - TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED); + TASK_POLICY_IO, THROTTLE_LEVEL_PAGEOUT_THROTTLED); vm_page_lock_queues(); @@ -3903,7 +3927,7 @@ vm_pageout_iothread_external(void) static void vm_pageout_iothread_internal(struct cq *cq) { - thread_t self = current_thread(); + thread_t self = current_thread(); self->options |= TH_OPT_VMPRIV; @@ -3915,8 +3939,9 @@ vm_pageout_iothread_internal(struct cq *cq) vm_page_unlock_queues(); - if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) + if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { thread_vm_bind_group_add(); + } thread_set_thread_name(current_thread(), "VM_compressor"); @@ -3938,8 +3963,8 @@ vm_set_buffer_cleanup_callout(boolean_t (*func)(int)) } } -extern boolean_t memorystatus_manual_testing_on; -extern unsigned int memorystatus_level; +extern boolean_t memorystatus_manual_testing_on; +extern unsigned int memorystatus_level; #if VM_PRESSURE_EVENTS @@ -3949,14 +3974,14 @@ boolean_t vm_pressure_events_enabled = FALSE; void vm_pressure_response(void) { + vm_pressure_level_t old_level = kVMPressureNormal; + int new_level = -1; + unsigned int total_pages; + uint64_t available_memory = 0; - vm_pressure_level_t old_level = kVMPressureNormal; - int new_level = -1; - unsigned int total_pages; - uint64_t available_memory = 0; - - if (vm_pressure_events_enabled == FALSE) + if (vm_pressure_events_enabled == FALSE) { return; + } #if CONFIG_EMBEDDED @@ -3982,48 +4007,47 @@ vm_pressure_response(void) old_level = memorystatus_vm_pressure_level; switch (memorystatus_vm_pressure_level) { - - case kVMPressureNormal: - { - if (VM_PRESSURE_WARNING_TO_CRITICAL()) { - new_level = kVMPressureCritical; - } else if (VM_PRESSURE_NORMAL_TO_WARNING()) { - new_level = kVMPressureWarning; - } - break; + case kVMPressureNormal: + { + if (VM_PRESSURE_WARNING_TO_CRITICAL()) { + new_level = kVMPressureCritical; + } else if (VM_PRESSURE_NORMAL_TO_WARNING()) { + new_level = kVMPressureWarning; } + break; + } - case kVMPressureWarning: - case kVMPressureUrgent: - { - if (VM_PRESSURE_WARNING_TO_NORMAL()) { - new_level = kVMPressureNormal; - } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) { - new_level = kVMPressureCritical; - } - break; + case kVMPressureWarning: + case kVMPressureUrgent: + { + if (VM_PRESSURE_WARNING_TO_NORMAL()) { + new_level = kVMPressureNormal; + } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) { + new_level = kVMPressureCritical; } + break; + } - case kVMPressureCritical: - { - if (VM_PRESSURE_WARNING_TO_NORMAL()) { - new_level = kVMPressureNormal; - } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) { - new_level = kVMPressureWarning; - } - break; + case kVMPressureCritical: + { + if (VM_PRESSURE_WARNING_TO_NORMAL()) { + new_level = kVMPressureNormal; + } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) { + new_level = kVMPressureWarning; } + break; + } - default: - return; + default: + return; } if (new_level != -1) { memorystatus_vm_pressure_level = (vm_pressure_level_t) new_level; - if (new_level != old_level) { + if (new_level != (int) old_level) { VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change, VM_PRESSURE_LEVEL_CHANGE, DBG_FUNC_NONE, - new_level, old_level, 0, 0); + new_level, old_level, 0, 0); } if ((memorystatus_vm_pressure_level != kVMPressureNormal) || (old_level != memorystatus_vm_pressure_level)) { @@ -4036,13 +4060,12 @@ vm_pressure_response(void) } } } - } #endif /* VM_PRESSURE_EVENTS */ kern_return_t -mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) { - +mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused unsigned int *pressure_level) +{ #if CONFIG_EMBEDDED return KERN_FAILURE; @@ -4053,18 +4076,17 @@ mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused un #else /* VM_PRESSURE_EVENTS */ - kern_return_t kr = KERN_SUCCESS; + kern_return_t kr = KERN_SUCCESS; if (pressure_level != NULL) { - - vm_pressure_level_t old_level = memorystatus_vm_pressure_level; + vm_pressure_level_t old_level = memorystatus_vm_pressure_level; if (wait_for_pressure == TRUE) { - wait_result_t wr = 0; + wait_result_t wr = 0; while (old_level == *pressure_level) { wr = assert_wait((event_t) &vm_pageout_state.vm_pressure_changed, - THREAD_INTERRUPTIBLE); + THREAD_INTERRUPTIBLE); if (wr == THREAD_WAITING) { wr = thread_block(THREAD_CONTINUE_NULL); } @@ -4072,7 +4094,6 @@ mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused un return KERN_ABORTED; } if (wr == THREAD_AWAKENED) { - old_level = memorystatus_vm_pressure_level; if (old_level != *pressure_level) { @@ -4094,7 +4115,8 @@ mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure, __unused un #if VM_PRESSURE_EVENTS void -vm_pressure_thread(void) { +vm_pressure_thread(void) +{ static boolean_t thread_initialized = FALSE; if (thread_initialized == TRUE) { @@ -4118,7 +4140,6 @@ void compute_pageout_gc_throttle(__unused void *arg) { if (vm_pageout_vminfo.vm_pageout_considered_page != vm_pageout_state.vm_pageout_considered_page_last) { - vm_pageout_state.vm_pageout_considered_page_last = vm_pageout_vminfo.vm_pageout_considered_page; thread_wakeup((event_t) &vm_pageout_garbage_collect); @@ -4171,7 +4192,6 @@ vm_pageout_garbage_collect(int collect) * up again. */ consider_zone_gc(TRUE); - } else { /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */ boolean_t buf_large_zfree = FALSE; @@ -4194,7 +4214,6 @@ vm_pageout_garbage_collect(int collect) consider_zone_gc(FALSE); } first_try = FALSE; - } while (buf_large_zfree == TRUE && vm_page_free_count < vm_page_free_target); consider_machine_adjust(); @@ -4245,17 +4264,18 @@ vm_set_restrictions() * scheduling mechanism and KPI. */ vm_pageout_state.vm_restricted_to_single_processor = TRUE; - } else + } else { vm_pageout_state.vm_restricted_to_single_processor = FALSE; + } } void vm_pageout(void) { - thread_t self = current_thread(); - thread_t thread; - kern_return_t result; - spl_t s; + thread_t self = current_thread(); + thread_t thread; + kern_return_t result; + spl_t s; /* * Set thread privileges. @@ -4267,11 +4287,13 @@ vm_pageout(void) sched_set_thread_base_priority(self, BASEPRI_VM); thread_unlock(self); - if (!self->reserved_stack) + if (!self->reserved_stack) { self->reserved_stack = self->kernel_stack; + } - if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) + if (vm_pageout_state.vm_restricted_to_single_processor == TRUE) { thread_vm_bind_group_add(); + } splx(s); @@ -4306,7 +4328,7 @@ vm_pageout(void) vm_pageout_state.vm_pageout_inactive_clean = 0; vm_pageout_state.vm_memory_pressure = 0; - vm_pageout_state.vm_page_filecache_min = 0; + vm_pageout_state.vm_page_filecache_min = 0; #if CONFIG_JETSAM vm_pageout_state.vm_page_filecache_min_divisor = 70; vm_pageout_state.vm_page_xpmapped_min_divisor = 40; @@ -4316,28 +4338,35 @@ vm_pageout(void) #endif vm_pageout_state.vm_page_free_count_init = vm_page_free_count; - vm_pageout_state.vm_pageout_considered_page_last = 0; + vm_pageout_state.vm_pageout_considered_page_last = 0; - if (vm_pageout_state.vm_pageout_swap_wait == 0) + if (vm_pageout_state.vm_pageout_swap_wait == 0) { vm_pageout_state.vm_pageout_swap_wait = VM_PAGEOUT_SWAP_WAIT; + } - if (vm_pageout_state.vm_pageout_idle_wait == 0) + if (vm_pageout_state.vm_pageout_idle_wait == 0) { vm_pageout_state.vm_pageout_idle_wait = VM_PAGEOUT_IDLE_WAIT; + } - if (vm_pageout_state.vm_pageout_burst_wait == 0) + if (vm_pageout_state.vm_pageout_burst_wait == 0) { vm_pageout_state.vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT; + } - if (vm_pageout_state.vm_pageout_empty_wait == 0) + if (vm_pageout_state.vm_pageout_empty_wait == 0) { vm_pageout_state.vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT; + } - if (vm_pageout_state.vm_pageout_deadlock_wait == 0) + if (vm_pageout_state.vm_pageout_deadlock_wait == 0) { vm_pageout_state.vm_pageout_deadlock_wait = VM_PAGEOUT_DEADLOCK_WAIT; + } - if (vm_pageout_state.vm_pageout_deadlock_relief == 0) + if (vm_pageout_state.vm_pageout_deadlock_relief == 0) { vm_pageout_state.vm_pageout_deadlock_relief = VM_PAGEOUT_DEADLOCK_RELIEF; + } - if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) - vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE; + if (vm_pageout_state.vm_pageout_burst_inactive_throttle == 0) { + vm_pageout_state.vm_pageout_burst_inactive_throttle = VM_PAGEOUT_BURST_INACTIVE_THROTTLE; + } /* * even if we've already called vm_page_free_reserve * call it again here to insure that the targets are @@ -4347,8 +4376,9 @@ vm_pageout(void) */ if (vm_page_free_reserved < VM_PAGE_FREE_RESERVED(processor_count)) { vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count)) - vm_page_free_reserved); - } else + } else { vm_page_free_reserve(0); + } vm_page_queue_init(&vm_pageout_queue_external.pgo_pending); @@ -4377,28 +4407,31 @@ vm_pageout(void) /* external pageout and garbage collection threads started here */ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL, - BASEPRI_VM, - &vm_pageout_state.vm_pageout_external_iothread); - if (result != KERN_SUCCESS) + BASEPRI_VM, + &vm_pageout_state.vm_pageout_external_iothread); + if (result != KERN_SUCCESS) { panic("vm_pageout_iothread_external: create failed"); + } thread_deallocate(vm_pageout_state.vm_pageout_external_iothread); result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL, - BASEPRI_DEFAULT, - &thread); - if (result != KERN_SUCCESS) + BASEPRI_DEFAULT, + &thread); + if (result != KERN_SUCCESS) { panic("vm_pageout_garbage_collect: create failed"); + } thread_deallocate(thread); #if VM_PRESSURE_EVENTS result = kernel_thread_start_priority((thread_continue_t)vm_pressure_thread, NULL, - BASEPRI_DEFAULT, - &thread); + BASEPRI_DEFAULT, + &thread); - if (result != KERN_SUCCESS) + if (result != KERN_SUCCESS) { panic("vm_pressure_thread: create failed"); + } thread_deallocate(thread); #endif @@ -4408,8 +4441,7 @@ vm_pageout(void) bzero(&vm_config, sizeof(vm_config)); - switch(vm_compressor_mode) { - + switch (vm_compressor_mode) { case VM_PAGER_DEFAULT: printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n"); @@ -4448,8 +4480,9 @@ vm_pageout(void) printf("unknown compressor mode - %x\n", vm_compressor_mode); break; } - if (VM_CONFIG_COMPRESSOR_IS_PRESENT) + if (VM_CONFIG_COMPRESSOR_IS_PRESENT) { vm_compressor_pager_init(); + } #if VM_PRESSURE_EVENTS vm_pressure_events_enabled = TRUE; @@ -4461,12 +4494,12 @@ vm_pageout(void) #if VM_PAGE_BUCKETS_CHECK #if VM_PAGE_FAKE_BUCKETS printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n", - (uint64_t) vm_page_fake_buckets_start, - (uint64_t) vm_page_fake_buckets_end); + (uint64_t) vm_page_fake_buckets_start, + (uint64_t) vm_page_fake_buckets_end); pmap_protect(kernel_pmap, - vm_page_fake_buckets_start, - vm_page_fake_buckets_end, - VM_PROT_READ); + vm_page_fake_buckets_start, + vm_page_fake_buckets_end, + VM_PROT_READ); // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */ #endif /* VM_PAGE_FAKE_BUCKETS */ #endif /* VM_PAGE_BUCKETS_CHECK */ @@ -4508,11 +4541,11 @@ vm_pageout(void) kern_return_t vm_pageout_internal_start(void) { - kern_return_t result; - int i; + kern_return_t result; + int i; host_basic_info_data_t hinfo; - assert (VM_CONFIG_COMPRESSOR_IS_PRESENT); + assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT; #define BSD_HOST 1 @@ -4520,23 +4553,28 @@ vm_pageout_internal_start(void) assert(hinfo.max_cpus > 0); + lck_grp_init(&vm_pageout_lck_grp, "vm_pageout", LCK_GRP_ATTR_NULL); + #if CONFIG_EMBEDDED vm_pageout_state.vm_compressor_thread_count = 1; #else - if (hinfo.max_cpus > 4) - vm_pageout_state.vm_compressor_thread_count = 2; - else - vm_pageout_state.vm_compressor_thread_count = 1; + if (hinfo.max_cpus > 4) { + vm_pageout_state.vm_compressor_thread_count = 2; + } else { + vm_pageout_state.vm_compressor_thread_count = 1; + } #endif PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state.vm_compressor_thread_count, - sizeof(vm_pageout_state.vm_compressor_thread_count)); + sizeof(vm_pageout_state.vm_compressor_thread_count)); - if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) + if (vm_pageout_state.vm_compressor_thread_count >= hinfo.max_cpus) { vm_pageout_state.vm_compressor_thread_count = hinfo.max_cpus - 1; - if (vm_pageout_state.vm_compressor_thread_count <= 0) + } + if (vm_pageout_state.vm_compressor_thread_count <= 0) { vm_pageout_state.vm_compressor_thread_count = 1; - else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) + } else if (vm_pageout_state.vm_compressor_thread_count > MAX_COMPRESSOR_THREAD_COUNT) { vm_pageout_state.vm_compressor_thread_count = MAX_COMPRESSOR_THREAD_COUNT; + } vm_pageout_queue_internal.pgo_maxlaundry = (vm_pageout_state.vm_compressor_thread_count * 4) * VM_PAGE_LAUNDRY_MAX; @@ -4549,12 +4587,13 @@ vm_pageout_internal_start(void) ciq[i].scratch_buf = kalloc(COMPRESSOR_SCRATCH_BUF_SIZE); result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, (void *)&ciq[i], - BASEPRI_VM, &vm_pageout_state.vm_pageout_internal_iothread); + BASEPRI_VM, &vm_pageout_state.vm_pageout_internal_iothread); - if (result == KERN_SUCCESS) + if (result == KERN_SUCCESS) { thread_deallocate(vm_pageout_state.vm_pageout_internal_iothread); - else + } else { break; + } } return result; } @@ -4576,41 +4615,41 @@ vm_pageout_internal_start(void) static void upl_set_decmp_info(upl_t upl, upl_t src_upl) { - assert((src_upl->flags & UPL_DECMP_REQ) != 0); - - upl_lock(src_upl); - if (src_upl->decmp_io_upl) { - /* - * If there is already an alive real I/O UPL, ignore this new UPL. - * This case should rarely happen and even if it does, it just means - * that we might issue a spurious expedite which the driver is expected - * to handle. - */ - upl_unlock(src_upl); - return; - } - src_upl->decmp_io_upl = (void *)upl; - src_upl->ref_count++; - - upl->flags |= UPL_DECMP_REAL_IO; - upl->decmp_io_upl = (void *)src_upl; + assert((src_upl->flags & UPL_DECMP_REQ) != 0); + + upl_lock(src_upl); + if (src_upl->decmp_io_upl) { + /* + * If there is already an alive real I/O UPL, ignore this new UPL. + * This case should rarely happen and even if it does, it just means + * that we might issue a spurious expedite which the driver is expected + * to handle. + */ + upl_unlock(src_upl); + return; + } + src_upl->decmp_io_upl = (void *)upl; + src_upl->ref_count++; + + upl->flags |= UPL_DECMP_REAL_IO; + upl->decmp_io_upl = (void *)src_upl; upl_unlock(src_upl); } #endif /* CONFIG_IOSCHED */ #if UPL_DEBUG -int upl_debug_enabled = 1; +int upl_debug_enabled = 1; #else -int upl_debug_enabled = 0; +int upl_debug_enabled = 0; #endif static upl_t upl_create(int type, int flags, upl_size_t size) { - upl_t upl; - vm_size_t page_field_size = 0; - int upl_flags = 0; - vm_size_t upl_size = sizeof(struct upl); + upl_t upl; + vm_size_t page_field_size = 0; + int upl_flags = 0; + vm_size_t upl_size = sizeof(struct upl); size = round_page_32(size); @@ -4627,8 +4666,9 @@ upl_create(int type, int flags, upl_size_t size) } upl = (upl_t)kalloc(upl_size + page_field_size); - if (page_field_size) - bzero((char *)upl + upl_size, page_field_size); + if (page_field_size) { + bzero((char *)upl + upl_size, page_field_size); + } upl->flags = upl_flags | flags; upl->kaddr = (vm_offset_t)0; @@ -4654,8 +4694,9 @@ upl_create(int type, int flags, upl_size_t size) upl->upl_reprio_info = (uint64_t *)kalloc(sizeof(uint64_t) * atop(size)); bzero(upl->upl_reprio_info, (sizeof(uint64_t) * atop(size))); upl->flags |= UPL_EXPEDITE_SUPPORTED; - if (curthread->decmp_upl != NULL) + if (curthread->decmp_upl != NULL) { upl_set_decmp_info(upl, curthread->decmp_upl); + } } #endif #if CONFIG_IOSCHED || UPL_DEBUG @@ -4678,34 +4719,34 @@ upl_create(int type, int flags, upl_size_t size) (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES); #endif /* UPL_DEBUG */ - return(upl); + return upl; } static void upl_destroy(upl_t upl) { - int page_field_size; /* bit field in word size buf */ - int size; + int page_field_size; /* bit field in word size buf */ + int size; if (upl->ext_ref_count) { panic("upl(%p) ext_ref_count", upl); } #if CONFIG_IOSCHED - if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) { - upl_t src_upl; - src_upl = upl->decmp_io_upl; - assert((src_upl->flags & UPL_DECMP_REQ) != 0); - upl_lock(src_upl); - src_upl->decmp_io_upl = NULL; - upl_unlock(src_upl); - upl_deallocate(src_upl); - } + if ((upl->flags & UPL_DECMP_REAL_IO) && upl->decmp_io_upl) { + upl_t src_upl; + src_upl = upl->decmp_io_upl; + assert((src_upl->flags & UPL_DECMP_REQ) != 0); + upl_lock(src_upl); + src_upl->decmp_io_upl = NULL; + upl_unlock(src_upl); + upl_deallocate(src_upl); + } #endif /* CONFIG_IOSCHED */ #if CONFIG_IOSCHED || UPL_DEBUG if ((upl->flags & UPL_TRACKED_BY_OBJECT) && !(upl->flags & UPL_VECTOR)) { - vm_object_t object; + vm_object_t object; if (upl->flags & UPL_SHADOWED) { object = upl->map_object->shadow; @@ -4724,32 +4765,35 @@ upl_destroy(upl_t upl) * drop a reference on the map_object whether or * not a pageout object is inserted */ - if (upl->flags & UPL_SHADOWED) + if (upl->flags & UPL_SHADOWED) { vm_object_deallocate(upl->map_object); + } - if (upl->flags & UPL_DEVICE_MEMORY) - size = PAGE_SIZE; - else - size = upl->size; + if (upl->flags & UPL_DEVICE_MEMORY) { + size = PAGE_SIZE; + } else { + size = upl->size; + } page_field_size = 0; if (upl->flags & UPL_LITE) { - page_field_size = ((size/PAGE_SIZE) + 7) >> 3; + page_field_size = ((size / PAGE_SIZE) + 7) >> 3; page_field_size = (page_field_size + 3) & 0xFFFFFFFC; } upl_lock_destroy(upl); upl->vector_upl = (vector_upl_t) 0xfeedbeef; #if CONFIG_IOSCHED - if (upl->flags & UPL_EXPEDITE_SUPPORTED) - kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size/PAGE_SIZE)); + if (upl->flags & UPL_EXPEDITE_SUPPORTED) { + kfree(upl->upl_reprio_info, sizeof(uint64_t) * (size / PAGE_SIZE)); + } #endif if (upl->flags & UPL_INTERNAL) { kfree(upl, - sizeof(struct upl) + - (sizeof(struct upl_page_info) * (size/PAGE_SIZE)) - + page_field_size); + sizeof(struct upl) + + (sizeof(struct upl_page_info) * (size / PAGE_SIZE)) + + page_field_size); } else { kfree(upl, sizeof(struct upl) + page_field_size); } @@ -4761,16 +4805,19 @@ upl_deallocate(upl_t upl) upl_lock(upl); if (--upl->ref_count == 0) { - if(vector_upl_is_valid(upl)) + if (vector_upl_is_valid(upl)) { vector_upl_deallocate(upl); + } upl_unlock(upl); - if (upl->upl_iodone) - upl_callout_iodone(upl); + if (upl->upl_iodone) { + upl_callout_iodone(upl); + } upl_destroy(upl); - } else + } else { upl_unlock(upl); + } } #if CONFIG_IOSCHED @@ -4786,15 +4833,15 @@ upl_mark_decmp(upl_t upl) void upl_unmark_decmp(upl_t upl) { - if(upl && (upl->flags & UPL_DECMP_REQ)) { + if (upl && (upl->flags & UPL_DECMP_REQ)) { upl->upl_creator->decmp_upl = NULL; } } #endif /* CONFIG_IOSCHED */ -#define VM_PAGE_Q_BACKING_UP(q) \ - ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10)) +#define VM_PAGE_Q_BACKING_UP(q) \ + ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10)) boolean_t must_throttle_writes(void); @@ -4802,10 +4849,11 @@ boolean_t must_throttle_writes() { if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external) && - vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) - return (TRUE); + vm_page_pageable_external_count > (AVAILABLE_NON_COMPRESSED_MEMORY * 6) / 10) { + return TRUE; + } - return (FALSE); + return FALSE; } @@ -4855,37 +4903,40 @@ must_throttle_writes() __private_extern__ kern_return_t vm_object_upl_request( - vm_object_t object, - vm_object_offset_t offset, - upl_size_t size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - upl_control_flags_t cntrl_flags, - vm_tag_t tag) + vm_object_t object, + vm_object_offset_t offset, + upl_size_t size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + upl_control_flags_t cntrl_flags, + vm_tag_t tag) { - vm_page_t dst_page = VM_PAGE_NULL; - vm_object_offset_t dst_offset; - upl_size_t xfer_size; - unsigned int size_in_pages; - boolean_t dirty; - boolean_t hw_dirty; - upl_t upl = NULL; - unsigned int entry; - vm_page_t alias_page = NULL; - int refmod_state = 0; - wpl_array_t lite_list = NULL; - vm_object_t last_copy_object; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; - int dw_count; - int dw_limit; - int io_tracking_flag = 0; - int grab_options; - int page_grab_count = 0; - ppnum_t phys_page; - pmap_flush_context pmap_flush_context_storage; + vm_page_t dst_page = VM_PAGE_NULL; + vm_object_offset_t dst_offset; + upl_size_t xfer_size; + unsigned int size_in_pages; + boolean_t dirty; + boolean_t hw_dirty; + upl_t upl = NULL; + unsigned int entry; + vm_page_t alias_page = NULL; + int refmod_state = 0; + wpl_array_t lite_list = NULL; + vm_object_t last_copy_object; + struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; + struct vm_page_delayed_work *dwp; + int dw_count; + int dw_limit; + int io_tracking_flag = 0; + int grab_options; + int page_grab_count = 0; + ppnum_t phys_page; + pmap_flush_context pmap_flush_context_storage; boolean_t pmap_flushes_delayed = FALSE; +#if DEVELOPMENT || DEBUG + task_t task = current_task(); +#endif /* DEVELOPMENT || DEBUG */ if (cntrl_flags & ~UPL_VALID_FLAGS) { /* @@ -4894,43 +4945,48 @@ vm_object_upl_request( */ return KERN_INVALID_VALUE; } - if ( (!object->internal) && (object->paging_offset != 0) ) + if ((!object->internal) && (object->paging_offset != 0)) { panic("vm_object_upl_request: external object with non-zero paging offset\n"); - if (object->phys_contiguous) - panic("vm_object_upl_request: contiguous object specified\n"); + } + if (object->phys_contiguous) { + panic("vm_object_upl_request: contiguous object specified\n"); + } VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, 0, 0); - if (size > MAX_UPL_SIZE_BYTES) + if (size > MAX_UPL_SIZE_BYTES) { size = MAX_UPL_SIZE_BYTES; + } - if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) - *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT; + if ((cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL) { + *page_list_count = MAX_UPL_SIZE_BYTES >> PAGE_SHIFT; + } #if CONFIG_IOSCHED || UPL_DEBUG - if (object->io_tracking || upl_debug_enabled) + if (object->io_tracking || upl_debug_enabled) { io_tracking_flag |= UPL_CREATE_IO_TRACKING; + } #endif #if CONFIG_IOSCHED - if (object->io_tracking) + if (object->io_tracking) { io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP; + } #endif if (cntrl_flags & UPL_SET_INTERNAL) { - if (cntrl_flags & UPL_SET_LITE) { - + if (cntrl_flags & UPL_SET_LITE) { upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size); user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); lite_list = (wpl_array_t) - (((uintptr_t)user_page_list) + - ((size/PAGE_SIZE) * sizeof(upl_page_info_t))); + (((uintptr_t)user_page_list) + + ((size / PAGE_SIZE) * sizeof(upl_page_info_t))); if (size == 0) { user_page_list = NULL; lite_list = NULL; } } else { - upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size); + upl = upl_create(UPL_CREATE_INTERNAL | io_tracking_flag, 0, size); user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); if (size == 0) { @@ -4938,8 +4994,7 @@ vm_object_upl_request( } } } else { - if (cntrl_flags & UPL_SET_LITE) { - + if (cntrl_flags & UPL_SET_LITE) { upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE | io_tracking_flag, 0, size); lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); @@ -4947,18 +5002,19 @@ vm_object_upl_request( lite_list = NULL; } } else { - upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size); + upl = upl_create(UPL_CREATE_EXTERNAL | io_tracking_flag, 0, size); } } *upl_ptr = upl; - if (user_page_list) - user_page_list[0].device = FALSE; + if (user_page_list) { + user_page_list[0].device = FALSE; + } if (cntrl_flags & UPL_SET_LITE) { - upl->map_object = object; + upl->map_object = object; } else { - upl->map_object = vm_object_allocate(size); + upl->map_object = vm_object_allocate(size); /* * No neeed to lock the new object: nobody else knows * about it yet, so it's all ours so far. @@ -4974,8 +5030,9 @@ vm_object_upl_request( upl->flags |= UPL_SHADOWED; } - if (cntrl_flags & UPL_FOR_PAGEOUT) + if (cntrl_flags & UPL_FOR_PAGEOUT) { upl->flags |= UPL_PAGEOUT; + } vm_object_lock(object); vm_object_activity_begin(object); @@ -5010,13 +5067,13 @@ vm_object_upl_request( * the caller modify them. */ vm_object_update(object, - offset, - size, - NULL, - NULL, - FALSE, /* should_return */ - MEMORY_OBJECT_COPY_SYNC, - VM_PROT_NO_CHANGE); + offset, + size, + NULL, + NULL, + FALSE, /* should_return */ + MEMORY_OBJECT_COPY_SYNC, + VM_PROT_NO_CHANGE); VM_PAGEOUT_DEBUG(upl_cow, 1); VM_PAGEOUT_DEBUG(upl_cow_pages, (size >> PAGE_SHIFT)); @@ -5036,11 +5093,12 @@ vm_object_upl_request( dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); if (vm_page_free_count > (vm_page_free_target + size_in_pages) || - object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) + object->resident_page_count < ((MAX_UPL_SIZE_BYTES * 2) >> PAGE_SHIFT)) { object->scan_collisions = 0; + } if ((cntrl_flags & UPL_WILL_MODIFY) && must_throttle_writes() == TRUE) { - boolean_t isSSD = FALSE; + boolean_t isSSD = FALSE; #if CONFIG_EMBEDDED isSSD = TRUE; @@ -5051,17 +5109,17 @@ vm_object_upl_request( OSAddAtomic(size_in_pages, &vm_upl_wait_for_pages); - if (isSSD == TRUE) + if (isSSD == TRUE) { delay(1000 * size_in_pages); - else + } else { delay(5000 * size_in_pages); + } OSAddAtomic(-size_in_pages, &vm_upl_wait_for_pages); vm_object_lock(object); } while (xfer_size) { - dwp->dw_mask = 0; if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) { @@ -5070,17 +5128,17 @@ vm_object_upl_request( vm_object_lock(object); } if (cntrl_flags & UPL_COPYOUT_FROM) { - upl->flags |= UPL_PAGE_SYNC_DONE; - - if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) || - dst_page->vmp_fictitious || - dst_page->vmp_absent || - dst_page->vmp_error || - dst_page->vmp_cleaning || - (VM_PAGE_WIRED(dst_page))) { - - if (user_page_list) + upl->flags |= UPL_PAGE_SYNC_DONE; + + if (((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) || + dst_page->vmp_fictitious || + dst_page->vmp_absent || + dst_page->vmp_error || + dst_page->vmp_cleaning || + (VM_PAGE_WIRED(dst_page))) { + if (user_page_list) { user_page_list[entry].phys_addr = 0; + } goto try_next_page; } @@ -5093,13 +5151,14 @@ vm_object_upl_request( * anyway... so we can eliminate an extra call into * the pmap layer by grabbing it here and recording it */ - if (dst_page->vmp_pmapped) - refmod_state = pmap_get_refmod(phys_page); - else - refmod_state = 0; + if (dst_page->vmp_pmapped) { + refmod_state = pmap_get_refmod(phys_page); + } else { + refmod_state = 0; + } - if ( (refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) { - /* + if ((refmod_state & VM_MEM_REFERENCED) && VM_PAGE_INACTIVE(dst_page)) { + /* * page is on inactive list and referenced... * reactivate it now... this gets it out of the * way of vm_pageout_scan which would have to @@ -5108,19 +5167,20 @@ vm_object_upl_request( dwp->dw_mask |= DW_vm_page_activate; } if (cntrl_flags & UPL_RET_ONLY_DIRTY) { - /* + /* * we're only asking for DIRTY pages to be returned */ - if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) { - /* + if (dst_page->vmp_laundry || !(cntrl_flags & UPL_FOR_PAGEOUT)) { + /* * if we were the page stolen by vm_pageout_scan to be * cleaned (as opposed to a buddy being clustered in * or this request is not being driven by a PAGEOUT cluster * then we only need to check for the page being dirty or * precious to decide whether to return it */ - if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) - goto check_busy; + if (dst_page->vmp_dirty || dst_page->vmp_precious || (refmod_state & VM_MEM_MODIFIED)) { + goto check_busy; + } goto dont_return; } /* @@ -5129,11 +5189,11 @@ vm_object_upl_request( * does it have to be dirty to be returned, but it also * can't have been referenced recently... */ - if ( (hibernate_cleaning_in_progress == TRUE || - (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) || - (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) && - ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious) ) { - goto check_busy; + if ((hibernate_cleaning_in_progress == TRUE || + (!((refmod_state & VM_MEM_REFERENCED) || dst_page->vmp_reference) || + (dst_page->vmp_q_state == VM_PAGE_ON_THROTTLED_Q))) && + ((refmod_state & VM_MEM_MODIFIED) || dst_page->vmp_dirty || dst_page->vmp_precious)) { + goto check_busy; } dont_return: /* @@ -5154,16 +5214,18 @@ dont_return: vm_page_unlock_queues(); } - if (user_page_list) - user_page_list[entry].phys_addr = 0; + if (user_page_list) { + user_page_list[entry].phys_addr = 0; + } goto try_next_page; } check_busy: if (dst_page->vmp_busy) { - if (cntrl_flags & UPL_NOBLOCK) { - if (user_page_list) - user_page_list[entry].phys_addr = 0; + if (cntrl_flags & UPL_NOBLOCK) { + if (user_page_list) { + user_page_list[entry].phys_addr = 0; + } dwp->dw_mask = 0; goto try_next_page; @@ -5177,7 +5239,6 @@ check_busy: continue; } if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { - vm_page_lockspin_queues(); if (dst_page->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) { @@ -5195,27 +5256,28 @@ check_busy: hw_dirty = refmod_state & VM_MEM_MODIFIED; dirty = hw_dirty ? TRUE : dst_page->vmp_dirty; - if (phys_page > upl->highest_page) - upl->highest_page = phys_page; + if (phys_page > upl->highest_page) { + upl->highest_page = phys_page; + } - assert (!pmap_is_noencrypt(phys_page)); + assert(!pmap_is_noencrypt(phys_page)); if (cntrl_flags & UPL_SET_LITE) { - unsigned int pg_num; + unsigned int pg_num; - pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE); - assert(pg_num == (dst_offset-offset)/PAGE_SIZE); - lite_list[pg_num>>5] |= 1 << (pg_num & 31); + pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE); + assert(pg_num == (dst_offset - offset) / PAGE_SIZE); + lite_list[pg_num >> 5] |= 1 << (pg_num & 31); if (hw_dirty) { - if (pmap_flushes_delayed == FALSE) { - pmap_flush_context_init(&pmap_flush_context_storage); + if (pmap_flushes_delayed == FALSE) { + pmap_flush_context_init(&pmap_flush_context_storage); pmap_flushes_delayed = TRUE; } - pmap_clear_refmod_options(phys_page, - VM_MEM_MODIFIED, - PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE, - &pmap_flush_context_storage); + pmap_clear_refmod_options(phys_page, + VM_MEM_MODIFIED, + PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_CLEAR_WRITE, + &pmap_flush_context_storage); } /* @@ -5225,12 +5287,12 @@ check_busy: dst_page->vmp_cleaning = TRUE; dst_page->vmp_precious = FALSE; } else { - /* + /* * use pageclean setup, it is more * convenient even for the pageout * cases here */ - vm_object_lock(upl->map_object); + vm_object_lock(upl->map_object); vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size); vm_object_unlock(upl->map_object); @@ -5243,12 +5305,14 @@ check_busy: dst_page->vmp_dirty = FALSE; } - if (!dirty) + if (!dirty) { dst_page->vmp_precious = TRUE; + } - if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) { - if ( !VM_PAGE_WIRED(dst_page)) + if (!(cntrl_flags & UPL_CLEAN_IN_PLACE)) { + if (!VM_PAGE_WIRED(dst_page)) { dst_page->vmp_free_when_done = TRUE; + } } } else { if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) { @@ -5280,7 +5344,7 @@ check_busy: xfer_size, /* remaining size */ NULL, NULL, - FALSE, /* should_return */ + FALSE, /* should_return */ MEMORY_OBJECT_COPY_SYNC, VM_PROT_NO_CHANGE); @@ -5295,13 +5359,13 @@ check_busy: dst_page = vm_page_lookup(object, dst_offset); if (dst_page != VM_PAGE_NULL) { - if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) { /* * skip over pages already present in the cache */ - if (user_page_list) + if (user_page_list) { user_page_list[entry].phys_addr = 0; + } goto try_next_page; } @@ -5318,8 +5382,9 @@ check_busy: continue; } - if (dst_page->vmp_laundry) + if (dst_page->vmp_laundry) { vm_pageout_steal_laundry(dst_page, FALSE); + } } else { if (object->private) { /* @@ -5331,8 +5396,9 @@ check_busy: * physical page by asking the * backing device. */ - if (user_page_list) + if (user_page_list) { user_page_list[entry].phys_addr = 0; + } goto try_next_page; } @@ -5350,9 +5416,10 @@ check_busy: */ dst_page = vm_object_page_grab(object); - if (dst_page != VM_PAGE_NULL) + if (dst_page != VM_PAGE_NULL) { vm_page_release(dst_page, - FALSE); + FALSE); + } dst_page = vm_object_page_grab(object); } @@ -5361,22 +5428,24 @@ check_busy: * need to allocate a page */ dst_page = vm_page_grab_options(grab_options); - if (dst_page != VM_PAGE_NULL) + if (dst_page != VM_PAGE_NULL) { page_grab_count++; + } } if (dst_page == VM_PAGE_NULL) { - if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) { - /* - * we don't want to stall waiting for pages to come onto the free list - * while we're already holding absent pages in this UPL - * the caller will deal with the empty slots - */ - if (user_page_list) - user_page_list[entry].phys_addr = 0; + if ((cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) { + /* + * we don't want to stall waiting for pages to come onto the free list + * while we're already holding absent pages in this UPL + * the caller will deal with the empty slots + */ + if (user_page_list) { + user_page_list[entry].phys_addr = 0; + } goto try_next_page; } - /* + /* * no pages available... wait * then try again for the same * offset... @@ -5402,7 +5471,7 @@ check_busy: dst_page->vmp_busy = FALSE; if (cntrl_flags & UPL_RET_ONLY_ABSENT) { - /* + /* * if UPL_RET_ONLY_ABSENT was specified, * than we're definitely setting up a * upl for a clustered read/pagein @@ -5410,10 +5479,11 @@ check_busy: * so upl_commit_range can put them on the * speculative list */ - dst_page->vmp_clustered = TRUE; + dst_page->vmp_clustered = TRUE; - if ( !(cntrl_flags & UPL_FILE_IO)) + if (!(cntrl_flags & UPL_FILE_IO)) { VM_STAT_INCR(pageins); + } } } phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); @@ -5421,29 +5491,32 @@ check_busy: dst_page->vmp_overwriting = TRUE; if (dst_page->vmp_pmapped) { - if ( !(cntrl_flags & UPL_FILE_IO)) - /* + if (!(cntrl_flags & UPL_FILE_IO)) { + /* * eliminate all mappings from the * original object and its prodigy */ - refmod_state = pmap_disconnect(phys_page); - else - refmod_state = pmap_get_refmod(phys_page); - } else - refmod_state = 0; + refmod_state = pmap_disconnect(phys_page); + } else { + refmod_state = pmap_get_refmod(phys_page); + } + } else { + refmod_state = 0; + } hw_dirty = refmod_state & VM_MEM_MODIFIED; dirty = hw_dirty ? TRUE : dst_page->vmp_dirty; if (cntrl_flags & UPL_SET_LITE) { - unsigned int pg_num; + unsigned int pg_num; - pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE); - assert(pg_num == (dst_offset-offset)/PAGE_SIZE); - lite_list[pg_num>>5] |= 1 << (pg_num & 31); + pg_num = (unsigned int) ((dst_offset - offset) / PAGE_SIZE); + assert(pg_num == (dst_offset - offset) / PAGE_SIZE); + lite_list[pg_num >> 5] |= 1 << (pg_num & 31); - if (hw_dirty) - pmap_clear_modify(phys_page); + if (hw_dirty) { + pmap_clear_modify(phys_page); + } /* * Mark original page as cleaning @@ -5457,9 +5530,9 @@ check_busy: * convenient even for the pageout * cases here */ - vm_object_lock(upl->map_object); + vm_object_lock(upl->map_object); vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size); - vm_object_unlock(upl->map_object); + vm_object_unlock(upl->map_object); alias_page->vmp_absent = FALSE; alias_page = NULL; @@ -5483,17 +5556,19 @@ check_busy: } dst_page->vmp_dirty = dirty; - if (!dirty) + if (!dirty) { dst_page->vmp_precious = TRUE; + } - if ( !VM_PAGE_WIRED(dst_page)) { - /* + if (!VM_PAGE_WIRED(dst_page)) { + /* * deny access to the target page while * it is being worked on */ dst_page->vmp_busy = TRUE; - } else + } else { dwp->dw_mask |= DW_vm_page_wire; + } /* * We might be about to satisfy a fault which has been @@ -5501,7 +5576,7 @@ check_busy: */ dst_page->vmp_restart = FALSE; if (!dst_page->vmp_absent && !(cntrl_flags & UPL_WILL_MODIFY)) { - /* + /* * expect the page to be used */ dwp->dw_mask |= DW_set_reference; @@ -5517,49 +5592,54 @@ check_busy: dst_page->vmp_precious = FALSE; } } - if (dst_page->vmp_busy) + if (dst_page->vmp_busy) { upl->flags |= UPL_HAS_BUSY; + } - if (phys_page > upl->highest_page) - upl->highest_page = phys_page; - assert (!pmap_is_noencrypt(phys_page)); + if (phys_page > upl->highest_page) { + upl->highest_page = phys_page; + } + assert(!pmap_is_noencrypt(phys_page)); if (user_page_list) { user_page_list[entry].phys_addr = phys_page; - user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; - user_page_list[entry].absent = dst_page->vmp_absent; - user_page_list[entry].dirty = dst_page->vmp_dirty; - user_page_list[entry].precious = dst_page->vmp_precious; - user_page_list[entry].device = FALSE; + user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; + user_page_list[entry].absent = dst_page->vmp_absent; + user_page_list[entry].dirty = dst_page->vmp_dirty; + user_page_list[entry].precious = dst_page->vmp_precious; + user_page_list[entry].device = FALSE; user_page_list[entry].needed = FALSE; - if (dst_page->vmp_clustered == TRUE) - user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE; - else - user_page_list[entry].speculative = FALSE; + if (dst_page->vmp_clustered == TRUE) { + user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE; + } else { + user_page_list[entry].speculative = FALSE; + } user_page_list[entry].cs_validated = dst_page->vmp_cs_validated; user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted; user_page_list[entry].cs_nx = dst_page->vmp_cs_nx; user_page_list[entry].mark = FALSE; } - /* + /* * if UPL_RET_ONLY_ABSENT is set, then * we are working with a fresh page and we've * just set the clustered flag on it to * indicate that it was drug in as part of a * speculative cluster... so leave it alone */ - if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) { - /* + if (!(cntrl_flags & UPL_RET_ONLY_ABSENT)) { + /* * someone is explicitly grabbing this page... * update clustered and speculative state * */ - if (dst_page->vmp_clustered) + if (dst_page->vmp_clustered) { VM_PAGE_CONSUME_CLUSTERED(dst_page); + } } try_next_page: if (dwp->dw_mask) { - if (dwp->dw_mask & DW_vm_page_activate) + if (dwp->dw_mask & DW_vm_page_activate) { VM_STAT_INCR(reactivations); + } VM_PAGE_ADD_DELAYED_WORK(dwp, dst_page, dw_count); @@ -5574,20 +5654,23 @@ try_next_page: dst_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; } - if (dw_count) + if (dw_count) { vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); + } if (alias_page != NULL) { VM_PAGE_FREE(alias_page); } - if (pmap_flushes_delayed == TRUE) - pmap_flush(&pmap_flush_context_storage); + if (pmap_flushes_delayed == TRUE) { + pmap_flush(&pmap_flush_context_storage); + } if (page_list_count != NULL) { - if (upl->flags & UPL_INTERNAL) + if (upl->flags & UPL_INTERNAL) { *page_list_count = 0; - else if (*page_list_count > entry) + } else if (*page_list_count > entry) { *page_list_count = entry; + } } #if UPL_DEBUG upl->upl_state = 1; @@ -5595,6 +5678,11 @@ try_next_page: vm_object_unlock(object); VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request, VM_UPL_REQUEST, DBG_FUNC_END, page_grab_count, 0, 0, 0); +#if DEVELOPMENT || DEBUG + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_upl, page_grab_count); + } +#endif /* DEVELOPMENT || DEBUG */ return KERN_SUCCESS; } @@ -5613,38 +5701,38 @@ try_next_page: __private_extern__ kern_return_t vm_object_super_upl_request( vm_object_t object, - vm_object_offset_t offset, - upl_size_t size, - upl_size_t super_cluster, - upl_t *upl, - upl_page_info_t *user_page_list, - unsigned int *page_list_count, - upl_control_flags_t cntrl_flags, - vm_tag_t tag) + vm_object_offset_t offset, + upl_size_t size, + upl_size_t super_cluster, + upl_t *upl, + upl_page_info_t *user_page_list, + unsigned int *page_list_count, + upl_control_flags_t cntrl_flags, + vm_tag_t tag) { - if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)) + if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR) == UPL_VECTOR)) { return KERN_FAILURE; + } assert(object->paging_in_progress); offset = offset - object->paging_offset; if (super_cluster > size) { - - vm_object_offset_t base_offset; - upl_size_t super_size; - vm_object_size_t super_size_64; + vm_object_offset_t base_offset; + upl_size_t super_size; + vm_object_size_t super_size_64; base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1)); - super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster; + super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster << 1 : super_cluster; super_size_64 = ((base_offset + super_size) > object->vo_size) ? (object->vo_size - base_offset) : super_size; super_size = (upl_size_t) super_size_64; assert(super_size == super_size_64); if (offset > (base_offset + super_size)) { - panic("vm_object_super_upl_request: Missed target pageout" - " %#llx,%#llx, %#x, %#x, %#x, %#llx\n", - offset, base_offset, super_size, super_cluster, - size, object->paging_offset); + panic("vm_object_super_upl_request: Missed target pageout" + " %#llx,%#llx, %#x, %#x, %#x, %#llx\n", + offset, base_offset, super_size, super_cluster, + size, object->paging_offset); } /* * apparently there is a case where the vm requests a @@ -5652,7 +5740,7 @@ vm_object_super_upl_request( * object size */ if ((offset + size) > (base_offset + super_size)) { - super_size_64 = (offset + size) - base_offset; + super_size_64 = (offset + size) - base_offset; super_size = (upl_size_t) super_size_64; assert(super_size == super_size_64); } @@ -5671,23 +5759,23 @@ extern char *proc_name_address(void *p); kern_return_t vm_map_create_upl( - vm_map_t map, - vm_map_address_t offset, - upl_size_t *upl_size, - upl_t *upl, - upl_page_info_array_t page_list, - unsigned int *count, - upl_control_flags_t *flags, - vm_tag_t tag) + vm_map_t map, + vm_map_address_t offset, + upl_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + upl_control_flags_t *flags, + vm_tag_t tag) { - vm_map_entry_t entry; - upl_control_flags_t caller_flags; - int force_data_sync; - int sync_cow_data; - vm_object_t local_object; - vm_map_offset_t local_offset; - vm_map_offset_t local_start; - kern_return_t ret; + vm_map_entry_t entry; + upl_control_flags_t caller_flags; + int force_data_sync; + int sync_cow_data; + vm_object_t local_object; + vm_map_offset_t local_offset; + vm_map_offset_t local_start; + kern_return_t ret; assert(page_aligned(offset)); @@ -5703,8 +5791,9 @@ vm_map_create_upl( force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC); sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM); - if (upl == NULL) + if (upl == NULL) { return KERN_INVALID_ARGUMENT; + } REDISCOVER_ENTRY: vm_map_lock_read(map); @@ -5724,11 +5813,13 @@ REDISCOVER_ENTRY: if (!entry->is_sub_map && VME_OBJECT(entry) != VM_OBJECT_NULL) { - if (VME_OBJECT(entry)->private) + if (VME_OBJECT(entry)->private) { *flags = UPL_DEV_MEMORY; + } - if (VME_OBJECT(entry)->phys_contiguous) + if (VME_OBJECT(entry)->phys_contiguous) { *flags |= UPL_PHYS_CONTIG; + } } vm_map_unlock_read(map); return KERN_SUCCESS; @@ -5736,22 +5827,23 @@ REDISCOVER_ENTRY: if (VME_OBJECT(entry) == VM_OBJECT_NULL || !VME_OBJECT(entry)->phys_contiguous) { - if (*upl_size > MAX_UPL_SIZE_BYTES) + if (*upl_size > MAX_UPL_SIZE_BYTES) { *upl_size = MAX_UPL_SIZE_BYTES; + } } /* * Create an object if necessary. */ if (VME_OBJECT(entry) == VM_OBJECT_NULL) { - - if (vm_map_lock_read_to_write(map)) + if (vm_map_lock_read_to_write(map)) { goto REDISCOVER_ENTRY; + } VME_OBJECT_SET(entry, - vm_object_allocate((vm_size_t) - (entry->vme_end - - entry->vme_start))); + vm_object_allocate((vm_size_t) + (entry->vme_end - + entry->vme_start))); VME_OFFSET_SET(entry, 0); assert(entry->use_pmap); @@ -5770,8 +5862,8 @@ REDISCOVER_ENTRY: (caller_flags & UPL_COPYOUT_FROM) && (entry->protection & VM_PROT_EXECUTE) && !(entry->protection & VM_PROT_WRITE)) { - vm_offset_t kaddr; - vm_size_t ksize; + vm_offset_t kaddr; + vm_size_t ksize; /* * We're about to create a read-only UPL backed by @@ -5791,9 +5883,9 @@ REDISCOVER_ENTRY: ksize = round_page(*upl_size); kaddr = 0; ret = kmem_alloc_pageable(kernel_map, - &kaddr, - ksize, - tag); + &kaddr, + ksize, + tag); if (ret == KERN_SUCCESS) { /* copyin the user data */ assert(page_aligned(offset)); @@ -5803,12 +5895,12 @@ REDISCOVER_ENTRY: if (ksize > *upl_size) { /* zero out the extra space in kernel buffer */ memset((void *)(kaddr + *upl_size), - 0, - ksize - *upl_size); + 0, + ksize - *upl_size); } /* create the UPL from the kernel buffer */ ret = vm_map_create_upl(kernel_map, kaddr, upl_size, - upl, page_list, count, flags, tag); + upl, page_list, count, flags, tag); } if (kaddr != 0) { /* free the kernel buffer */ @@ -5818,10 +5910,10 @@ REDISCOVER_ENTRY: } #if DEVELOPMENT || DEBUG DTRACE_VM4(create_upl_from_executable, - vm_map_t, map, - vm_map_address_t, offset, - upl_size_t, *upl_size, - kern_return_t, ret); + vm_map_t, map, + vm_map_address_t, offset, + upl_size_t, *upl_size, + kern_return_t, ret); #endif /* DEVELOPMENT || DEBUG */ return ret; } @@ -5837,12 +5929,12 @@ REDISCOVER_ENTRY: entry->wired_count == 0 && /* No COW for entries that are wired */ (map->pmap != kernel_pmap) && /* alias checks */ (vm_map_entry_should_cow_for_true_share(entry) /* case 1 */ - || - (/* case 2 */ - local_object->internal && - (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) && - local_object->ref_count > 1))) { - vm_prot_t prot; + || + ( /* case 2 */ + local_object->internal && + (local_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) && + local_object->ref_count > 1))) { + vm_prot_t prot; /* * Case 1: @@ -5867,30 +5959,31 @@ REDISCOVER_ENTRY: assert(VME_OBJECT(entry) == local_object); vm_map_clip_start(map, - entry, - vm_map_trunc_page(offset, - VM_MAP_PAGE_MASK(map))); + entry, + vm_map_trunc_page(offset, + VM_MAP_PAGE_MASK(map))); vm_map_clip_end(map, - entry, - vm_map_round_page(offset + *upl_size, - VM_MAP_PAGE_MASK(map))); + entry, + vm_map_round_page(offset + *upl_size, + VM_MAP_PAGE_MASK(map))); if ((entry->vme_end - offset) < *upl_size) { *upl_size = (upl_size_t) (entry->vme_end - offset); assert(*upl_size == entry->vme_end - offset); } prot = entry->protection & ~VM_PROT_WRITE; - if (override_nx(map, VME_ALIAS(entry)) && prot) + if (override_nx(map, VME_ALIAS(entry)) && prot) { prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect(local_object, - VME_OFFSET(entry), - entry->vme_end - entry->vme_start, - ((entry->is_shared || - map->mapped_in_other_pmaps) - ? PMAP_NULL - : map->pmap), - entry->vme_start, - prot); + VME_OFFSET(entry), + entry->vme_end - entry->vme_start, + ((entry->is_shared || + map->mapped_in_other_pmaps) + ? PMAP_NULL + : map->pmap), + entry->vme_start, + prot); assert(entry->wired_count == 0); @@ -5903,11 +5996,11 @@ REDISCOVER_ENTRY: if (local_object->true_share) { /* object is already in proper state: no COW needed */ assert(local_object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC); + MEMORY_OBJECT_COPY_SYMMETRIC); } else { /* not true_share: ask for copy-on-write below */ assert(local_object->copy_strategy == - MEMORY_OBJECT_COPY_SYMMETRIC); + MEMORY_OBJECT_COPY_SYMMETRIC); entry->needs_copy = TRUE; } vm_object_unlock(local_object); @@ -5915,19 +6008,19 @@ REDISCOVER_ENTRY: vm_map_lock_write_to_read(map); } - if (entry->needs_copy) { + if (entry->needs_copy) { /* * Honor copy-on-write for COPY_SYMMETRIC * strategy. */ - vm_map_t local_map; - vm_object_t object; - vm_object_offset_t new_offset; - vm_prot_t prot; - boolean_t wired; - vm_map_version_t version; - vm_map_t real_map; - vm_prot_t fault_type; + vm_map_t local_map; + vm_object_t object; + vm_object_offset_t new_offset; + vm_prot_t prot; + boolean_t wired; + vm_map_version_t version; + vm_map_t real_map; + vm_prot_t fault_type; local_map = map; @@ -5935,17 +6028,17 @@ REDISCOVER_ENTRY: fault_type = VM_PROT_READ | VM_PROT_COPY; vm_counters.create_upl_extra_cow++; vm_counters.create_upl_extra_cow_pages += - (entry->vme_end - entry->vme_start) / PAGE_SIZE; + (entry->vme_end - entry->vme_start) / PAGE_SIZE; } else { fault_type = VM_PROT_WRITE; } if (vm_map_lookup_locked(&local_map, - offset, fault_type, - OBJECT_LOCK_EXCLUSIVE, - &version, &object, - &new_offset, &prot, &wired, - NULL, - &real_map) != KERN_SUCCESS) { + offset, fault_type, + OBJECT_LOCK_EXCLUSIVE, + &version, &object, + &new_offset, &prot, &wired, + NULL, + &real_map) != KERN_SUCCESS) { if (fault_type == VM_PROT_WRITE) { vm_counters.create_upl_lookup_failure_write++; } else { @@ -5954,8 +6047,9 @@ REDISCOVER_ENTRY: vm_map_unlock_read(local_map); return KERN_FAILURE; } - if (real_map != map) + if (real_map != map) { vm_map_unlock(real_map); + } vm_map_unlock_read(local_map); vm_object_unlock(object); @@ -5964,7 +6058,7 @@ REDISCOVER_ENTRY: } if (entry->is_sub_map) { - vm_map_t submap; + vm_map_t submap; submap = VME_SUBMAP(entry); local_start = entry->vme_start; @@ -5974,8 +6068,8 @@ REDISCOVER_ENTRY: vm_map_unlock_read(map); ret = vm_map_create_upl(submap, - local_offset + (offset - local_start), - upl_size, upl, page_list, count, flags, tag); + local_offset + (offset - local_start), + upl_size, upl, page_list, count, flags, tag); vm_map_deallocate(submap); return ret; @@ -5983,7 +6077,7 @@ REDISCOVER_ENTRY: if (sync_cow_data && (VME_OBJECT(entry)->shadow || - VME_OBJECT(entry)->copy)) { + VME_OBJECT(entry)->copy)) { local_object = VME_OBJECT(entry); local_start = entry->vme_start; local_offset = VME_OFFSET(entry); @@ -5993,13 +6087,13 @@ REDISCOVER_ENTRY: if (local_object->shadow && local_object->copy) { vm_object_lock_request(local_object->shadow, - ((vm_object_offset_t) - ((offset - local_start) + - local_offset) + - local_object->vo_shadow_offset), - *upl_size, FALSE, - MEMORY_OBJECT_DATA_SYNC, - VM_PROT_NO_CHANGE); + ((vm_object_offset_t) + ((offset - local_start) + + local_offset) + + local_object->vo_shadow_offset), + *upl_size, FALSE, + MEMORY_OBJECT_DATA_SYNC, + VM_PROT_NO_CHANGE); } sync_cow_data = FALSE; vm_object_deallocate(local_object); @@ -6015,26 +6109,28 @@ REDISCOVER_ENTRY: vm_map_unlock_read(map); vm_object_lock_request(local_object, - ((vm_object_offset_t) - ((offset - local_start) + - local_offset)), - (vm_object_size_t)*upl_size, - FALSE, - MEMORY_OBJECT_DATA_SYNC, - VM_PROT_NO_CHANGE); + ((vm_object_offset_t) + ((offset - local_start) + + local_offset)), + (vm_object_size_t)*upl_size, + FALSE, + MEMORY_OBJECT_DATA_SYNC, + VM_PROT_NO_CHANGE); force_data_sync = FALSE; vm_object_deallocate(local_object); goto REDISCOVER_ENTRY; } - if (VME_OBJECT(entry)->private) + if (VME_OBJECT(entry)->private) { *flags = UPL_DEV_MEMORY; - else + } else { *flags = 0; + } - if (VME_OBJECT(entry)->phys_contiguous) + if (VME_OBJECT(entry)->phys_contiguous) { *flags |= UPL_PHYS_CONTIG; + } local_object = VME_OBJECT(entry); local_offset = VME_OFFSET(entry); @@ -6050,18 +6146,18 @@ REDISCOVER_ENTRY: if (entry->protection & VM_PROT_EXECUTE) { #if MACH_ASSERT printf("pid %d[%s] create_upl out of executable range from " - "0x%llx to 0x%llx: side effects may include " - "code-signing violations later on\n", - proc_selfpid(), - (current_task()->bsd_info - ? proc_name_address(current_task()->bsd_info) - : "?"), - (uint64_t) entry->vme_start, - (uint64_t) entry->vme_end); + "0x%llx to 0x%llx: side effects may include " + "code-signing violations later on\n", + proc_selfpid(), + (current_task()->bsd_info + ? proc_name_address(current_task()->bsd_info) + : "?"), + (uint64_t) entry->vme_start, + (uint64_t) entry->vme_end); #endif /* MACH_ASSERT */ DTRACE_VM2(cs_executable_create_upl, - uint64_t, (uint64_t)entry->vme_start, - uint64_t, (uint64_t)entry->vme_end); + uint64_t, (uint64_t)entry->vme_start, + uint64_t, (uint64_t)entry->vme_end); cs_executable_create_upl++; } #endif /* CONFIG_EMBEDDED */ @@ -6077,22 +6173,22 @@ REDISCOVER_ENTRY: */ if (local_object->true_share) { assert(local_object->copy_strategy != - MEMORY_OBJECT_COPY_SYMMETRIC); + MEMORY_OBJECT_COPY_SYMMETRIC); } else if (local_object != kernel_object && - local_object != compressor_object && - !local_object->phys_contiguous) { + local_object != compressor_object && + !local_object->phys_contiguous) { #if VM_OBJECT_TRACKING_OP_TRUESHARE if (!local_object->true_share && vm_object_tracking_inited) { void *bt[VM_OBJECT_TRACKING_BTDEPTH]; int num = 0; num = OSBacktrace(bt, - VM_OBJECT_TRACKING_BTDEPTH); + VM_OBJECT_TRACKING_BTDEPTH); btlog_add_entry(vm_object_tracking_btlog, - local_object, - VM_OBJECT_TRACKING_OP_TRUESHARE, - bt, - num); + local_object, + VM_OBJECT_TRACKING_OP_TRUESHARE, + bt, + num); } #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ local_object->true_share = TRUE; @@ -6108,14 +6204,14 @@ REDISCOVER_ENTRY: vm_map_unlock_read(map); ret = vm_object_iopl_request(local_object, - ((vm_object_offset_t) - ((offset - local_start) + local_offset)), - *upl_size, - upl, - page_list, - count, - caller_flags, - tag); + ((vm_object_offset_t) + ((offset - local_start) + local_offset)), + *upl_size, + upl, + page_list, + count, + caller_flags, + tag); vm_object_deallocate(local_object); return ret; @@ -6129,70 +6225,75 @@ REDISCOVER_ENTRY: */ kern_return_t vm_map_enter_upl( - vm_map_t map, - upl_t upl, - vm_map_offset_t *dst_addr) + vm_map_t map, + upl_t upl, + vm_map_offset_t *dst_addr) { - vm_map_size_t size; - vm_object_offset_t offset; - vm_map_offset_t addr; - vm_page_t m; - kern_return_t kr; - int isVectorUPL = 0, curr_upl=0; - upl_t vector_upl = NULL; - vm_offset_t vector_upl_dst_addr = 0; - vm_map_t vector_upl_submap = NULL; - upl_offset_t subupl_offset = 0; - upl_size_t subupl_size = 0; - - if (upl == UPL_NULL) + vm_map_size_t size; + vm_object_offset_t offset; + vm_map_offset_t addr; + vm_page_t m; + kern_return_t kr; + int isVectorUPL = 0, curr_upl = 0; + upl_t vector_upl = NULL; + vm_offset_t vector_upl_dst_addr = 0; + vm_map_t vector_upl_submap = NULL; + upl_offset_t subupl_offset = 0; + upl_size_t subupl_size = 0; + + if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; + } - if((isVectorUPL = vector_upl_is_valid(upl))) { - int mapped=0,valid_upls=0; + if ((isVectorUPL = vector_upl_is_valid(upl))) { + int mapped = 0, valid_upls = 0; vector_upl = upl; upl_lock(vector_upl); - for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) { + for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) { upl = vector_upl_subupl_byindex(vector_upl, curr_upl ); - if(upl == NULL) + if (upl == NULL) { continue; + } valid_upls++; - if (UPL_PAGE_LIST_MAPPED & upl->flags) + if (UPL_PAGE_LIST_MAPPED & upl->flags) { mapped++; + } } - if(mapped) { - if(mapped != valid_upls) + if (mapped) { + if (mapped != valid_upls) { panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls); - else { + } else { upl_unlock(vector_upl); return KERN_FAILURE; } } kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, - &vector_upl_submap); - if( kr != KERN_SUCCESS ) + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, + &vector_upl_submap); + if (kr != KERN_SUCCESS) { panic("Vector UPL submap allocation failed\n"); + } map = vector_upl_submap; vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr); - curr_upl=0; - } - else + curr_upl = 0; + } else { upl_lock(upl); + } process_upl_to_enter: - if(isVectorUPL){ - if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) { + if (isVectorUPL) { + if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) { *dst_addr = vector_upl_dst_addr; upl_unlock(vector_upl); return KERN_SUCCESS; } upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ ); - if(upl == NULL) + if (upl == NULL) { goto process_upl_to_enter; + } vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size); *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset); @@ -6207,20 +6308,19 @@ process_upl_to_enter: } if ((!(upl->flags & UPL_SHADOWED)) && ((upl->flags & UPL_HAS_BUSY) || - !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) { - - vm_object_t object; - vm_page_t alias_page; - vm_object_offset_t new_offset; - unsigned int pg_num; - wpl_array_t lite_list; + !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) { + vm_object_t object; + vm_page_t alias_page; + vm_object_offset_t new_offset; + unsigned int pg_num; + wpl_array_t lite_list; if (upl->flags & UPL_INTERNAL) { lite_list = (wpl_array_t) - ((((uintptr_t)upl) + sizeof(struct upl)) - + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t))); + ((((uintptr_t)upl) + sizeof(struct upl)) + + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); } else { - lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl)); + lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl)); } object = upl->map_object; upl->map_object = vm_object_allocate(upl->size); @@ -6243,15 +6343,14 @@ process_upl_to_enter: pg_num = (unsigned int) (new_offset / PAGE_SIZE); assert(pg_num == new_offset / PAGE_SIZE); - if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) { - + if (lite_list[pg_num >> 5] & (1 << (pg_num & 31))) { VM_PAGE_GRAB_FICTITIOUS(alias_page); vm_object_lock(object); m = vm_page_lookup(object, offset); if (m == VM_PAGE_NULL) { - panic("vm_upl_map: page missing\n"); + panic("vm_upl_map: page missing\n"); } /* @@ -6270,7 +6369,7 @@ process_upl_to_enter: */ VM_PAGE_SET_PHYS_PAGE(alias_page, VM_PAGE_GET_PHYS_PAGE(m)); - vm_object_unlock(object); + vm_object_unlock(object); vm_page_lockspin_queues(); vm_page_wire(alias_page, VM_KERN_MEMORY_NONE, TRUE); @@ -6288,38 +6387,39 @@ process_upl_to_enter: } vm_object_unlock(upl->map_object); } - if (upl->flags & UPL_SHADOWED) - offset = 0; - else - offset = upl->offset - upl->map_object->paging_offset; + if (upl->flags & UPL_SHADOWED) { + offset = 0; + } else { + offset = upl->offset - upl->map_object->paging_offset; + } size = upl->size; vm_object_reference(upl->map_object); - if(!isVectorUPL) { + if (!isVectorUPL) { *dst_addr = 0; /* - * NEED A UPL_MAP ALIAS - */ + * NEED A UPL_MAP ALIAS + */ kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK, - upl->map_object, offset, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK, + upl->map_object, offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) { vm_object_deallocate(upl->map_object); upl_unlock(upl); - return(kr); + return kr; } - } - else { + } else { kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0, - VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK, - upl->map_object, offset, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); - if(kr) + VM_FLAGS_FIXED, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_OSFMK, + upl->map_object, offset, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); + if (kr) { panic("vm_map_enter failed for a Vector UPL\n"); + } } vm_object_lock(upl->map_object); @@ -6354,8 +6454,9 @@ process_upl_to_enter: upl->kaddr = (vm_offset_t) *dst_addr; assert(upl->kaddr == *dst_addr); - if(isVectorUPL) + if (isVectorUPL) { goto process_upl_to_enter; + } upl_unlock(upl); @@ -6374,46 +6475,49 @@ process_upl_to_enter: */ kern_return_t vm_map_remove_upl( - vm_map_t map, - upl_t upl) + vm_map_t map, + upl_t upl) { - vm_address_t addr; - upl_size_t size; - int isVectorUPL = 0, curr_upl = 0; - upl_t vector_upl = NULL; + vm_address_t addr; + upl_size_t size; + int isVectorUPL = 0, curr_upl = 0; + upl_t vector_upl = NULL; - if (upl == UPL_NULL) + if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; + } - if((isVectorUPL = vector_upl_is_valid(upl))) { - int unmapped=0, valid_upls=0; + if ((isVectorUPL = vector_upl_is_valid(upl))) { + int unmapped = 0, valid_upls = 0; vector_upl = upl; upl_lock(vector_upl); - for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) { + for (curr_upl = 0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) { upl = vector_upl_subupl_byindex(vector_upl, curr_upl ); - if(upl == NULL) + if (upl == NULL) { continue; + } valid_upls++; - if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) + if (!(UPL_PAGE_LIST_MAPPED & upl->flags)) { unmapped++; + } } - if(unmapped) { - if(unmapped != valid_upls) + if (unmapped) { + if (unmapped != valid_upls) { panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls); - else { + } else { upl_unlock(vector_upl); return KERN_FAILURE; } } - curr_upl=0; - } - else + curr_upl = 0; + } else { upl_lock(upl); + } process_upl_to_remove: - if(isVectorUPL) { - if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) { + if (isVectorUPL) { + if (curr_upl == MAX_VECTOR_UPL_ELEMENTS) { vm_map_t v_upl_submap; vm_offset_t v_upl_submap_dst_addr; vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr); @@ -6425,8 +6529,9 @@ process_upl_to_remove: } upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ ); - if(upl == NULL) + if (upl == NULL) { goto process_upl_to_remove; + } } if (upl->flags & UPL_PAGE_LIST_MAPPED) { @@ -6434,29 +6539,28 @@ process_upl_to_remove: size = upl->size; assert(upl->ref_count > 1); - upl->ref_count--; /* removing mapping ref */ + upl->ref_count--; /* removing mapping ref */ upl->flags &= ~UPL_PAGE_LIST_MAPPED; upl->kaddr = (vm_offset_t) 0; - if(!isVectorUPL) { + if (!isVectorUPL) { upl_unlock(upl); vm_map_remove( map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(map)), + VM_MAP_PAGE_MASK(map)), vm_map_round_page(addr + size, - VM_MAP_PAGE_MASK(map)), + VM_MAP_PAGE_MASK(map)), VM_MAP_REMOVE_NO_FLAGS); return KERN_SUCCESS; - } - else { + } else { /* - * If it's a Vectored UPL, we'll be removing the entire - * submap anyways, so no need to remove individual UPL - * element mappings from within the submap - */ + * If it's a Vectored UPL, we'll be removing the entire + * submap anyways, so no need to remove individual UPL + * element mappings from within the submap + */ goto process_upl_to_remove; } } @@ -6468,67 +6572,69 @@ process_upl_to_remove: kern_return_t upl_commit_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int flags, - upl_page_info_t *page_list, - mach_msg_type_number_t count, - boolean_t *empty) + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int flags, + upl_page_info_t *page_list, + mach_msg_type_number_t count, + boolean_t *empty) { - upl_size_t xfer_size, subupl_size = size; - vm_object_t shadow_object; - vm_object_t object; - vm_object_t m_object; - vm_object_offset_t target_offset; - upl_offset_t subupl_offset = offset; - int entry; - wpl_array_t lite_list; - int occupied; - int clear_refmod = 0; - int pgpgout_count = 0; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; - int dw_count; - int dw_limit; - int isVectorUPL = 0; - upl_t vector_upl = NULL; - boolean_t should_be_throttled = FALSE; - - vm_page_t nxt_page = VM_PAGE_NULL; - int fast_path_possible = 0; - int fast_path_full_commit = 0; - int throttle_page = 0; - int unwired_count = 0; - int local_queue_count = 0; - vm_page_t first_local, last_local; + upl_size_t xfer_size, subupl_size = size; + vm_object_t shadow_object; + vm_object_t object; + vm_object_t m_object; + vm_object_offset_t target_offset; + upl_offset_t subupl_offset = offset; + int entry; + wpl_array_t lite_list; + int occupied; + int clear_refmod = 0; + int pgpgout_count = 0; + struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; + struct vm_page_delayed_work *dwp; + int dw_count; + int dw_limit; + int isVectorUPL = 0; + upl_t vector_upl = NULL; + boolean_t should_be_throttled = FALSE; + + vm_page_t nxt_page = VM_PAGE_NULL; + int fast_path_possible = 0; + int fast_path_full_commit = 0; + int throttle_page = 0; + int unwired_count = 0; + int local_queue_count = 0; + vm_page_t first_local, last_local; *empty = FALSE; - if (upl == UPL_NULL) + if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; + } - if (count == 0) + if (count == 0) { page_list = NULL; + } - if((isVectorUPL = vector_upl_is_valid(upl))) { + if ((isVectorUPL = vector_upl_is_valid(upl))) { vector_upl = upl; upl_lock(vector_upl); - } - else + } else { upl_lock(upl); + } process_upl_to_commit: - if(isVectorUPL) { + if (isVectorUPL) { size = subupl_size; offset = subupl_offset; - if(size == 0) { + if (size == 0) { upl_unlock(vector_upl); return KERN_SUCCESS; } upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size); - if(upl == NULL) { + if (upl == NULL) { upl_unlock(vector_upl); return KERN_FAILURE; } @@ -6547,47 +6653,51 @@ process_upl_to_commit: upl->upl_commit_index++; } #endif - if (upl->flags & UPL_DEVICE_MEMORY) + if (upl->flags & UPL_DEVICE_MEMORY) { xfer_size = 0; - else if ((offset + size) <= upl->size) - xfer_size = size; - else { - if(!isVectorUPL) + } else if ((offset + size) <= upl->size) { + xfer_size = size; + } else { + if (!isVectorUPL) { upl_unlock(upl); - else { + } else { upl_unlock(vector_upl); } return KERN_FAILURE; } - if (upl->flags & UPL_SET_DIRTY) + if (upl->flags & UPL_SET_DIRTY) { flags |= UPL_COMMIT_SET_DIRTY; - if (upl->flags & UPL_CLEAR_DIRTY) - flags |= UPL_COMMIT_CLEAR_DIRTY; + } + if (upl->flags & UPL_CLEAR_DIRTY) { + flags |= UPL_COMMIT_CLEAR_DIRTY; + } - if (upl->flags & UPL_INTERNAL) + if (upl->flags & UPL_INTERNAL) { lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl)) - + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t))); - else + + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); + } else { lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); + } object = upl->map_object; if (upl->flags & UPL_SHADOWED) { - vm_object_lock(object); + vm_object_lock(object); shadow_object = object->shadow; } else { shadow_object = object; } - entry = offset/PAGE_SIZE; + entry = offset / PAGE_SIZE; target_offset = (vm_object_offset_t)offset; assert(!(target_offset & PAGE_MASK)); assert(!(xfer_size & PAGE_MASK)); - if (upl->flags & UPL_KERNEL_OBJECT) + if (upl->flags & UPL_KERNEL_OBJECT) { vm_object_lock_shared(shadow_object); - else + } else { vm_object_lock(shadow_object); + } VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object); @@ -6607,14 +6717,15 @@ process_upl_to_commit: */ flags &= ~UPL_COMMIT_CS_VALIDATED; } - if (! page_list) { + if (!page_list) { /* * No page list to get the code-signing info from !? */ flags &= ~UPL_COMMIT_CS_VALIDATED; } - if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) + if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal) { should_be_throttled = TRUE; + } dwp = &dw_array[0]; dw_count = 0; @@ -6625,9 +6736,7 @@ process_upl_to_commit: !isVectorUPL && shadow_object->purgable != VM_PURGABLE_VOLATILE && shadow_object->purgable != VM_PURGABLE_EMPTY) { - if (!vm_page_queue_empty(&shadow_object->memq)) { - if (size == shadow_object->vo_size) { nxt_page = (vm_page_t)vm_page_queue_first(&shadow_object->memq); fast_path_full_commit = 1; @@ -6636,8 +6745,8 @@ process_upl_to_commit: if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object->internal && (shadow_object->purgable == VM_PURGABLE_DENY || - shadow_object->purgable == VM_PURGABLE_NONVOLATILE || - shadow_object->purgable == VM_PURGABLE_VOLATILE)) { + shadow_object->purgable == VM_PURGABLE_NONVOLATILE || + shadow_object->purgable == VM_PURGABLE_VOLATILE)) { throttle_page = 1; } } @@ -6646,7 +6755,7 @@ process_upl_to_commit: last_local = VM_PAGE_NULL; while (xfer_size) { - vm_page_t t, m; + vm_page_t t, m; dwp->dw_mask = 0; clear_refmod = 0; @@ -6654,37 +6763,40 @@ process_upl_to_commit: m = VM_PAGE_NULL; if (upl->flags & UPL_LITE) { - unsigned int pg_num; + unsigned int pg_num; if (nxt_page != VM_PAGE_NULL) { m = nxt_page; nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq); target_offset = m->vmp_offset; } - pg_num = (unsigned int) (target_offset/PAGE_SIZE); - assert(pg_num == target_offset/PAGE_SIZE); + pg_num = (unsigned int) (target_offset / PAGE_SIZE); + assert(pg_num == target_offset / PAGE_SIZE); - if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) { - lite_list[pg_num>>5] &= ~(1 << (pg_num & 31)); + if (lite_list[pg_num >> 5] & (1 << (pg_num & 31))) { + lite_list[pg_num >> 5] &= ~(1 << (pg_num & 31)); - if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) + if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) { m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset)); - } else + } + } else { m = NULL; + } } if (upl->flags & UPL_SHADOWED) { - if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) { - + if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) { t->vmp_free_when_done = FALSE; VM_PAGE_FREE(t); - if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) + if (!(upl->flags & UPL_KERNEL_OBJECT) && m == VM_PAGE_NULL) { m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset); + } } } - if (m == VM_PAGE_NULL) + if (m == VM_PAGE_NULL) { goto commit_next_page; + } m_object = VM_PAGE_OBJECT(m); @@ -6705,20 +6817,21 @@ process_upl_to_commit: m->vmp_cs_tainted = page_list[entry].cs_tainted; m->vmp_cs_nx = page_list[entry].cs_nx; } - if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) + if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) { m->vmp_written_by_kernel = TRUE; + } if (upl->flags & UPL_IO_WIRE) { - - if (page_list) + if (page_list) { page_list[entry].phys_addr = 0; + } if (flags & UPL_COMMIT_SET_DIRTY) { SET_PAGE_DIRTY(m, FALSE); } else if (flags & UPL_COMMIT_CLEAR_DIRTY) { m->vmp_dirty = FALSE; - if (! (flags & UPL_COMMIT_CS_VALIDATED) && + if (!(flags & UPL_COMMIT_CS_VALIDATED) && m->vmp_cs_validated && !m->vmp_cs_tainted) { /* * CODE SIGNING: @@ -6754,8 +6867,9 @@ process_upl_to_commit: m->vmp_absent = FALSE; dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); } else { - if (m->vmp_wire_count == 0) + if (m->vmp_wire_count == 0) { panic("wire_count == 0, m = %p, obj = %p\n", m, shadow_object); + } assert(m->vmp_q_state == VM_PAGE_IS_WIRED); /* @@ -6792,12 +6906,14 @@ process_upl_to_commit: m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; } else { if (flags & UPL_COMMIT_INACTIVATE) { - if (shadow_object->internal) + if (shadow_object->internal) { m->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q; - else + } else { m->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q; - } else + } + } else { m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; + } } } } else { @@ -6806,24 +6922,27 @@ process_upl_to_commit: clear_refmod |= VM_MEM_REFERENCED; } if (m->vmp_absent) { - if (flags & UPL_COMMIT_FREE_ABSENT) + if (flags & UPL_COMMIT_FREE_ABSENT) { dwp->dw_mask |= DW_vm_page_free; - else { + } else { m->vmp_absent = FALSE; dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); - if ( !(dwp->dw_mask & DW_vm_page_deactivate_internal)) + if (!(dwp->dw_mask & DW_vm_page_deactivate_internal)) { dwp->dw_mask |= DW_vm_page_activate; + } } - } else + } else { dwp->dw_mask |= DW_vm_page_unwire; + } } goto commit_next_page; } assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR); - if (page_list) + if (page_list) { page_list[entry].phys_addr = 0; + } /* * make sure to clear the hardware @@ -6837,13 +6956,15 @@ process_upl_to_commit: clear_refmod |= VM_MEM_MODIFIED; } - if (m->vmp_laundry) + if (m->vmp_laundry) { dwp->dw_mask |= DW_vm_pageout_throttle_up; + } - if (VM_PAGE_WIRED(m)) + if (VM_PAGE_WIRED(m)) { m->vmp_free_when_done = FALSE; + } - if (! (flags & UPL_COMMIT_CS_VALIDATED) && + if (!(flags & UPL_COMMIT_CS_VALIDATED) && m->vmp_cs_validated && !m->vmp_cs_tainted) { /* * CODE SIGNING: @@ -6864,8 +6985,9 @@ process_upl_to_commit: */ if (m->vmp_busy) { #if CONFIG_PHANTOM_CACHE - if (m->vmp_absent && !m_object->internal) + if (m->vmp_absent && !m_object->internal) { dwp->dw_mask |= DW_vm_phantom_cache_update; + } #endif m->vmp_absent = FALSE; @@ -6935,8 +7057,9 @@ process_upl_to_commit: * this can be used to strip the precious bit * as well as clean */ - if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) + if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS)) { m->vmp_precious = FALSE; + } if (flags & UPL_COMMIT_SET_DIRTY) { SET_PAGE_DIRTY(m, FALSE); @@ -6961,18 +7084,16 @@ process_upl_to_commit: */ SET_PAGE_DIRTY(m, FALSE); dwp->dw_mask |= DW_vm_page_activate; - } else { if ((flags & UPL_COMMIT_INACTIVATE) && !m->vmp_clustered && (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q)) { dwp->dw_mask |= DW_vm_page_deactivate_internal; clear_refmod |= VM_MEM_REFERENCED; - } else if ( !VM_PAGE_PAGEABLE(m)) { - - if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) + } else if (!VM_PAGE_PAGEABLE(m)) { + if (m->vmp_clustered || (flags & UPL_COMMIT_SPECULATE)) { dwp->dw_mask |= DW_vm_page_speculate; - else if (m->vmp_reference) + } else if (m->vmp_reference) { dwp->dw_mask |= DW_vm_page_activate; - else { + } else { dwp->dw_mask |= DW_vm_page_deactivate_internal; clear_refmod |= VM_MEM_REFERENCED; } @@ -6992,8 +7113,9 @@ process_upl_to_commit: dwp->dw_mask |= DW_PAGE_WAKEUP; commit_next_page: - if (clear_refmod) + if (clear_refmod) { pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m), clear_refmod); + } target_offset += PAGE_SIZE_64; xfer_size -= PAGE_SIZE; @@ -7010,38 +7132,41 @@ commit_next_page: dw_count = 0; } } else { - if (dwp->dw_mask & DW_clear_busy) + if (dwp->dw_mask & DW_clear_busy) { m->vmp_busy = FALSE; + } - if (dwp->dw_mask & DW_PAGE_WAKEUP) + if (dwp->dw_mask & DW_PAGE_WAKEUP) { PAGE_WAKEUP(m); + } } } } - if (dw_count) + if (dw_count) { vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + } if (fast_path_possible) { - assert(shadow_object->purgable != VM_PURGABLE_VOLATILE); assert(shadow_object->purgable != VM_PURGABLE_EMPTY); if (local_queue_count || unwired_count) { - if (local_queue_count) { - vm_page_t first_target; - vm_page_queue_head_t *target_queue; + vm_page_t first_target; + vm_page_queue_head_t *target_queue; - if (throttle_page) + if (throttle_page) { target_queue = &vm_page_queue_throttled; - else { + } else { if (flags & UPL_COMMIT_INACTIVATE) { - if (shadow_object->internal) + if (shadow_object->internal) { target_queue = &vm_page_queue_anonymous; - else + } else { target_queue = &vm_page_queue_inactive; - } else + } + } else { target_queue = &vm_page_queue_active; + } } /* * Transfer the entire local queue to a regular LRU page queues. @@ -7050,10 +7175,11 @@ commit_next_page: first_target = (vm_page_t) vm_page_queue_first(target_queue); - if (vm_page_queue_empty(target_queue)) + if (vm_page_queue_empty(target_queue)) { target_queue->prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local); - else + } else { first_target->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local); + } target_queue->next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local); first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue); @@ -7066,18 +7192,21 @@ commit_next_page: vm_page_throttled_count += local_queue_count; } else { if (flags & UPL_COMMIT_INACTIVATE) { - if (shadow_object->internal) + if (shadow_object->internal) { vm_page_anonymous_count += local_queue_count; + } vm_page_inactive_count += local_queue_count; token_new_pagecount += local_queue_count; - } else + } else { vm_page_active_count += local_queue_count; + } - if (shadow_object->internal) + if (shadow_object->internal) { vm_page_pageable_internal_count += local_queue_count; - else + } else { vm_page_pageable_external_count += local_queue_count; + } } } else { vm_page_lockspin_queues(); @@ -7093,16 +7222,16 @@ commit_next_page: } occupied = 1; - if (upl->flags & UPL_DEVICE_MEMORY) { + if (upl->flags & UPL_DEVICE_MEMORY) { occupied = 0; } else if (upl->flags & UPL_LITE) { - int pg_num; - int i; + int pg_num; + int i; occupied = 0; if (!fast_path_full_commit) { - pg_num = upl->size/PAGE_SIZE; + pg_num = upl->size / PAGE_SIZE; pg_num = (pg_num + 31) >> 5; for (i = 0; i < pg_num; i++) { @@ -7113,8 +7242,9 @@ commit_next_page: } } } else { - if (vm_page_queue_empty(&upl->map_object->memq)) + if (vm_page_queue_empty(&upl->map_object->memq)) { occupied = 0; + } } if (occupied == 0) { /* @@ -7125,11 +7255,12 @@ commit_next_page: * should be considered relevant for the Vector UPL and not * the internal UPLs. */ - if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) + if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) { *empty = TRUE; + } if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) { - /* + /* * this is not a paging object * so we need to drop the paging reference * that was taken when we created the UPL @@ -7138,21 +7269,22 @@ commit_next_page: vm_object_activity_end(shadow_object); vm_object_collapse(shadow_object, 0, TRUE); } else { - /* - * we dontated the paging reference to - * the map object... vm_pageout_object_terminate - * will drop this reference - */ + /* + * we dontated the paging reference to + * the map object... vm_pageout_object_terminate + * will drop this reference + */ } } VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object, shadow_object->wire_tag); vm_object_unlock(shadow_object); - if (object != shadow_object) - vm_object_unlock(object); + if (object != shadow_object) { + vm_object_unlock(object); + } - if(!isVectorUPL) + if (!isVectorUPL) { upl_unlock(upl); - else { + } else { /* * If we completed our operations on an UPL that is * part of a Vectored UPL and if empty is TRUE, then @@ -7162,7 +7294,7 @@ commit_next_page: * so that in ubc_upl_commit_range or ubc_upl_commit, we * can go ahead and deallocate the Vector UPL too. */ - if(*empty==TRUE) { + if (*empty == TRUE) { *empty = vector_upl_set_subupl(vector_upl, upl, 0); upl_deallocate(upl); } @@ -7177,53 +7309,55 @@ commit_next_page: kern_return_t upl_abort_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int error, - boolean_t *empty) + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int error, + boolean_t *empty) { - upl_page_info_t *user_page_list = NULL; - upl_size_t xfer_size, subupl_size = size; - vm_object_t shadow_object; - vm_object_t object; - vm_object_offset_t target_offset; - upl_offset_t subupl_offset = offset; - int entry; - wpl_array_t lite_list; - int occupied; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; - int dw_count; - int dw_limit; - int isVectorUPL = 0; - upl_t vector_upl = NULL; + upl_page_info_t *user_page_list = NULL; + upl_size_t xfer_size, subupl_size = size; + vm_object_t shadow_object; + vm_object_t object; + vm_object_offset_t target_offset; + upl_offset_t subupl_offset = offset; + int entry; + wpl_array_t lite_list; + int occupied; + struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; + struct vm_page_delayed_work *dwp; + int dw_count; + int dw_limit; + int isVectorUPL = 0; + upl_t vector_upl = NULL; *empty = FALSE; - if (upl == UPL_NULL) + if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; + } - if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) ) + if ((upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES)) { return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty); + } - if((isVectorUPL = vector_upl_is_valid(upl))) { + if ((isVectorUPL = vector_upl_is_valid(upl))) { vector_upl = upl; upl_lock(vector_upl); - } - else + } else { upl_lock(upl); + } process_upl_to_abort: - if(isVectorUPL) { + if (isVectorUPL) { size = subupl_size; offset = subupl_offset; - if(size == 0) { + if (size == 0) { upl_unlock(vector_upl); return KERN_SUCCESS; } upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size); - if(upl == NULL) { + if (upl == NULL) { upl_unlock(vector_upl); return KERN_FAILURE; } @@ -7244,14 +7378,14 @@ process_upl_to_abort: upl->upl_commit_index++; } #endif - if (upl->flags & UPL_DEVICE_MEMORY) + if (upl->flags & UPL_DEVICE_MEMORY) { xfer_size = 0; - else if ((offset + size) <= upl->size) - xfer_size = size; - else { - if(!isVectorUPL) + } else if ((offset + size) <= upl->size) { + xfer_size = size; + } else { + if (!isVectorUPL) { upl_unlock(upl); - else { + } else { upl_unlock(vector_upl); } @@ -7259,32 +7393,34 @@ process_upl_to_abort: } if (upl->flags & UPL_INTERNAL) { lite_list = (wpl_array_t) - ((((uintptr_t)upl) + sizeof(struct upl)) - + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t))); + ((((uintptr_t)upl) + sizeof(struct upl)) + + ((upl->size / PAGE_SIZE) * sizeof(upl_page_info_t))); user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); } else { lite_list = (wpl_array_t) - (((uintptr_t)upl) + sizeof(struct upl)); + (((uintptr_t)upl) + sizeof(struct upl)); } object = upl->map_object; if (upl->flags & UPL_SHADOWED) { - vm_object_lock(object); + vm_object_lock(object); shadow_object = object->shadow; - } else + } else { shadow_object = object; + } - entry = offset/PAGE_SIZE; + entry = offset / PAGE_SIZE; target_offset = (vm_object_offset_t)offset; assert(!(target_offset & PAGE_MASK)); assert(!(xfer_size & PAGE_MASK)); - if (upl->flags & UPL_KERNEL_OBJECT) + if (upl->flags & UPL_KERNEL_OBJECT) { vm_object_lock_shared(shadow_object); - else + } else { vm_object_lock(shadow_object); + } if (upl->flags & UPL_ACCESS_BLOCKED) { assert(shadow_object->blocked_access); @@ -7296,54 +7432,57 @@ process_upl_to_abort: dw_count = 0; dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); - if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) + if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT)) { panic("upl_abort_range: kernel_object being DUMPED"); + } while (xfer_size) { - vm_page_t t, m; - unsigned int pg_num; - boolean_t needed; + vm_page_t t, m; + unsigned int pg_num; + boolean_t needed; - pg_num = (unsigned int) (target_offset/PAGE_SIZE); - assert(pg_num == target_offset/PAGE_SIZE); + pg_num = (unsigned int) (target_offset / PAGE_SIZE); + assert(pg_num == target_offset / PAGE_SIZE); needed = FALSE; - if (user_page_list) + if (user_page_list) { needed = user_page_list[pg_num].needed; + } dwp->dw_mask = 0; m = VM_PAGE_NULL; if (upl->flags & UPL_LITE) { + if (lite_list[pg_num >> 5] & (1 << (pg_num & 31))) { + lite_list[pg_num >> 5] &= ~(1 << (pg_num & 31)); - if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) { - lite_list[pg_num>>5] &= ~(1 << (pg_num & 31)); - - if ( !(upl->flags & UPL_KERNEL_OBJECT)) + if (!(upl->flags & UPL_KERNEL_OBJECT)) { m = vm_page_lookup(shadow_object, target_offset + - (upl->offset - shadow_object->paging_offset)); + (upl->offset - shadow_object->paging_offset)); + } } } if (upl->flags & UPL_SHADOWED) { - if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) { - t->vmp_free_when_done = FALSE; + if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) { + t->vmp_free_when_done = FALSE; VM_PAGE_FREE(t); - if (m == VM_PAGE_NULL) + if (m == VM_PAGE_NULL) { m = vm_page_lookup(shadow_object, target_offset + object->vo_shadow_offset); + } } } - if ((upl->flags & UPL_KERNEL_OBJECT)) + if ((upl->flags & UPL_KERNEL_OBJECT)) { goto abort_next_page; + } if (m != VM_PAGE_NULL) { - assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR); if (m->vmp_absent) { - boolean_t must_free = TRUE; + boolean_t must_free = TRUE; /* * COPYOUT = FALSE case @@ -7398,16 +7537,18 @@ process_upl_to_abort: dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP); - if (must_free == TRUE) + if (must_free == TRUE) { dwp->dw_mask |= DW_vm_page_free; - else + } else { dwp->dw_mask |= DW_vm_page_activate; + } } else { - /* + /* * Handle the trusted pager throttle. */ - if (m->vmp_laundry) + if (m->vmp_laundry) { dwp->dw_mask |= DW_vm_pageout_throttle_up; + } if (upl->flags & UPL_ACCESS_BLOCKED) { /* @@ -7418,9 +7559,9 @@ process_upl_to_abort: dwp->dw_mask |= DW_clear_busy; } if (m->vmp_overwriting) { - if (m->vmp_busy) + if (m->vmp_busy) { dwp->dw_mask |= DW_clear_busy; - else { + } else { /* * deal with the 'alternate' method * of stabilizing the page... @@ -7451,9 +7592,9 @@ process_upl_to_abort: * implementing an LRU on the inactive q */ dwp->dw_mask |= DW_vm_page_lru; - - } else if ( !VM_PAGE_PAGEABLE(m)) + } else if (!VM_PAGE_PAGEABLE(m)) { dwp->dw_mask |= DW_vm_page_deactivate_internal; + } } dwp->dw_mask |= DW_PAGE_WAKEUP; } @@ -7475,26 +7616,29 @@ abort_next_page: dw_count = 0; } } else { - if (dwp->dw_mask & DW_clear_busy) + if (dwp->dw_mask & DW_clear_busy) { m->vmp_busy = FALSE; + } - if (dwp->dw_mask & DW_PAGE_WAKEUP) + if (dwp->dw_mask & DW_PAGE_WAKEUP) { PAGE_WAKEUP(m); + } } } } - if (dw_count) + if (dw_count) { vm_page_do_delayed_work(shadow_object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); + } occupied = 1; - if (upl->flags & UPL_DEVICE_MEMORY) { + if (upl->flags & UPL_DEVICE_MEMORY) { occupied = 0; } else if (upl->flags & UPL_LITE) { - int pg_num; - int i; + int pg_num; + int i; - pg_num = upl->size/PAGE_SIZE; + pg_num = upl->size / PAGE_SIZE; pg_num = (pg_num + 31) >> 5; occupied = 0; @@ -7505,8 +7649,9 @@ abort_next_page: } } } else { - if (vm_page_queue_empty(&upl->map_object->memq)) + if (vm_page_queue_empty(&upl->map_object->memq)) { occupied = 0; + } } if (occupied == 0) { /* @@ -7517,11 +7662,12 @@ abort_next_page: * should be considered relevant for the Vector UPL and * not the internal UPLs. */ - if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) + if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL) { *empty = TRUE; + } if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) { - /* + /* * this is not a paging object * so we need to drop the paging reference * that was taken when we created the UPL @@ -7530,31 +7676,32 @@ abort_next_page: vm_object_activity_end(shadow_object); vm_object_collapse(shadow_object, 0, TRUE); } else { - /* - * we dontated the paging reference to - * the map object... vm_pageout_object_terminate - * will drop this reference - */ + /* + * we dontated the paging reference to + * the map object... vm_pageout_object_terminate + * will drop this reference + */ } } vm_object_unlock(shadow_object); - if (object != shadow_object) - vm_object_unlock(object); + if (object != shadow_object) { + vm_object_unlock(object); + } - if(!isVectorUPL) + if (!isVectorUPL) { upl_unlock(upl); - else { + } else { /* - * If we completed our operations on an UPL that is - * part of a Vectored UPL and if empty is TRUE, then - * we should go ahead and deallocate this UPL element. - * Then we check if this was the last of the UPL elements - * within that Vectored UPL. If so, set empty to TRUE - * so that in ubc_upl_abort_range or ubc_upl_abort, we - * can go ahead and deallocate the Vector UPL too. - */ - if(*empty == TRUE) { - *empty = vector_upl_set_subupl(vector_upl, upl,0); + * If we completed our operations on an UPL that is + * part of a Vectored UPL and if empty is TRUE, then + * we should go ahead and deallocate this UPL element. + * Then we check if this was the last of the UPL elements + * within that Vectored UPL. If so, set empty to TRUE + * so that in ubc_upl_abort_range or ubc_upl_abort, we + * can go ahead and deallocate the Vector UPL too. + */ + if (*empty == TRUE) { + *empty = vector_upl_set_subupl(vector_upl, upl, 0); upl_deallocate(upl); } goto process_upl_to_abort; @@ -7566,13 +7713,14 @@ abort_next_page: kern_return_t upl_abort( - upl_t upl, - int error) + upl_t upl, + int error) { - boolean_t empty; + boolean_t empty; - if (upl == UPL_NULL) + if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; + } return upl_abort_range(upl, 0, upl->size, error, &empty); } @@ -7581,14 +7729,15 @@ upl_abort( /* an option on commit should be wire */ kern_return_t upl_commit( - upl_t upl, - upl_page_info_t *page_list, - mach_msg_type_number_t count) + upl_t upl, + upl_page_info_t *page_list, + mach_msg_type_number_t count) { - boolean_t empty; + boolean_t empty; - if (upl == UPL_NULL) + if (upl == UPL_NULL) { return KERN_INVALID_ARGUMENT; + } return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty); } @@ -7596,44 +7745,49 @@ upl_commit( void iopl_valid_data( - upl_t upl, + upl_t upl, vm_tag_t tag) { - vm_object_t object; - vm_offset_t offset; - vm_page_t m, nxt_page = VM_PAGE_NULL; - upl_size_t size; - int wired_count = 0; + vm_object_t object; + vm_offset_t offset; + vm_page_t m, nxt_page = VM_PAGE_NULL; + upl_size_t size; + int wired_count = 0; - if (upl == NULL) + if (upl == NULL) { panic("iopl_valid_data: NULL upl"); - if (vector_upl_is_valid(upl)) + } + if (vector_upl_is_valid(upl)) { panic("iopl_valid_data: vector upl"); - if ((upl->flags & (UPL_DEVICE_MEMORY|UPL_SHADOWED|UPL_ACCESS_BLOCKED|UPL_IO_WIRE|UPL_INTERNAL)) != UPL_IO_WIRE) + } + if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_SHADOWED | UPL_ACCESS_BLOCKED | UPL_IO_WIRE | UPL_INTERNAL)) != UPL_IO_WIRE) { panic("iopl_valid_data: unsupported upl, flags = %x", upl->flags); + } object = upl->map_object; - if (object == kernel_object || object == compressor_object) + if (object == kernel_object || object == compressor_object) { panic("iopl_valid_data: object == kernel or compressor"); + } if (object->purgable == VM_PURGABLE_VOLATILE || - object->purgable == VM_PURGABLE_EMPTY) + object->purgable == VM_PURGABLE_EMPTY) { panic("iopl_valid_data: object %p purgable %d", - object, object->purgable); + object, object->purgable); + } size = upl->size; vm_object_lock(object); VM_OBJECT_WIRED_PAGE_UPDATE_START(object); - if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) + if (object->vo_size == size && object->resident_page_count == (size / PAGE_SIZE)) { nxt_page = (vm_page_t)vm_page_queue_first(&object->memq); - else + } else { offset = 0 + upl->offset - object->paging_offset; + } while (size) { - if (nxt_page != VM_PAGE_NULL) { m = nxt_page; nxt_page = (vm_page_t)vm_page_queue_next(&nxt_page->vmp_listq); @@ -7641,15 +7795,18 @@ iopl_valid_data( m = vm_page_lookup(object, offset); offset += PAGE_SIZE; - if (m == VM_PAGE_NULL) + if (m == VM_PAGE_NULL) { panic("iopl_valid_data: missing expected page at offset %lx", (long)offset); + } } if (m->vmp_busy) { - if (!m->vmp_absent) + if (!m->vmp_absent) { panic("iopl_valid_data: busy page w/o absent"); + } - if (m->vmp_pageq.next || m->vmp_pageq.prev) + if (m->vmp_pageq.next || m->vmp_pageq.prev) { panic("iopl_valid_data: busy+absent page on page queue"); + } if (m->vmp_reusable) { panic("iopl_valid_data: %p is reusable", m); } @@ -7672,7 +7829,6 @@ iopl_valid_data( size -= PAGE_SIZE; } if (wired_count) { - VM_OBJECT_WIRED_PAGE_COUNT(object, wired_count); assert(object->resident_page_count >= object->wired_page_count); @@ -7691,10 +7847,10 @@ iopl_valid_data( void vm_object_set_pmap_cache_attr( - vm_object_t object, - upl_page_info_array_t user_page_list, - unsigned int num_pages, - boolean_t batch_pmap_op) + vm_object_t object, + upl_page_info_array_t user_page_list, + unsigned int num_pages, + boolean_t batch_pmap_op) { unsigned int cache_attr = 0; @@ -7706,21 +7862,21 @@ vm_object_set_pmap_cache_attr( } -boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t); -kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*); +boolean_t vm_object_iopl_wire_full(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t); +kern_return_t vm_object_iopl_wire_empty(vm_object_t, upl_t, upl_page_info_array_t, wpl_array_t, upl_control_flags_t, vm_tag_t, vm_object_offset_t *, int, int*); boolean_t vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list, - wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag) + wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag) { - vm_page_t dst_page; - unsigned int entry; - int page_count; - int delayed_unlock = 0; - boolean_t retval = TRUE; - ppnum_t phys_page; + vm_page_t dst_page; + unsigned int entry; + int page_count; + int delayed_unlock = 0; + boolean_t retval = TRUE; + ppnum_t phys_page; vm_object_lock_assert_exclusive(object); assert(object->purgable != VM_PURGABLE_VOLATILE); @@ -7735,7 +7891,6 @@ vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t us vm_page_lock_queues(); while (page_count--) { - if (dst_page->vmp_busy || dst_page->vmp_fictitious || dst_page->vmp_absent || @@ -7759,12 +7914,13 @@ vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t us } entry = (unsigned int)(dst_page->vmp_offset / PAGE_SIZE); assert(entry >= 0 && entry < object->resident_page_count); - lite_list[entry>>5] |= 1 << (entry & 31); + lite_list[entry >> 5] |= 1 << (entry & 31); phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); - if (phys_page > upl->highest_page) + if (phys_page > upl->highest_page) { upl->highest_page = phys_page; + } if (user_page_list) { user_page_list[entry].phys_addr = phys_page; @@ -7776,7 +7932,7 @@ vm_object_iopl_wire_full(vm_object_t object, upl_t upl, upl_page_info_array_t us user_page_list[entry].speculative = FALSE; user_page_list[entry].cs_validated = FALSE; user_page_list[entry].cs_tainted = FALSE; - user_page_list[entry].cs_nx = FALSE; + user_page_list[entry].cs_nx = FALSE; user_page_list[entry].needed = FALSE; user_page_list[entry].mark = FALSE; } @@ -7793,25 +7949,25 @@ done: VM_CHECK_MEMORYSTATUS; - return (retval); + return retval; } kern_return_t vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t user_page_list, - wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset, - int page_count, int* page_grab_count) + wpl_array_t lite_list, upl_control_flags_t cntrl_flags, vm_tag_t tag, vm_object_offset_t *dst_offset, + int page_count, int* page_grab_count) { - vm_page_t dst_page; - boolean_t no_zero_fill = FALSE; - int interruptible; - int pages_wired = 0; - int pages_inserted = 0; - int entry = 0; - uint64_t delayed_ledger_update = 0; - kern_return_t ret = KERN_SUCCESS; - int grab_options; - ppnum_t phys_page; + vm_page_t dst_page; + boolean_t no_zero_fill = FALSE; + int interruptible; + int pages_wired = 0; + int pages_inserted = 0; + int entry = 0; + uint64_t delayed_ledger_update = 0; + kern_return_t ret = KERN_SUCCESS; + int grab_options; + ppnum_t phys_page; vm_object_lock_assert_exclusive(object); assert(object->purgable != VM_PURGABLE_VOLATILE); @@ -7820,13 +7976,15 @@ vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t u assert(object->copy == NULL); assert(object->shadow == NULL); - if (cntrl_flags & UPL_SET_INTERRUPTIBLE) + if (cntrl_flags & UPL_SET_INTERRUPTIBLE) { interruptible = THREAD_ABORTSAFE; - else + } else { interruptible = THREAD_UNINT; + } - if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) - no_zero_fill = TRUE; + if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) { + no_zero_fill = TRUE; + } grab_options = 0; #if CONFIG_SECLUDED_MEMORY @@ -7836,10 +7994,8 @@ vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t u #endif /* CONFIG_SECLUDED_MEMORY */ while (page_count--) { - while ((dst_page = vm_page_grab_options(grab_options)) - == VM_PAGE_NULL) { - + == VM_PAGE_NULL) { OSAddAtomic(page_count, &vm_upl_wait_for_pages); VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0); @@ -7859,10 +8015,11 @@ vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t u VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0); } - if (no_zero_fill == FALSE) + if (no_zero_fill == FALSE) { vm_page_zero_fill(dst_page); - else + } else { dst_page->vmp_absent = TRUE; + } dst_page->vmp_reference = TRUE; @@ -7882,20 +8039,21 @@ vm_object_iopl_wire_empty(vm_object_t object, upl_t upl, upl_page_info_array_t u vm_page_insert_internal(dst_page, object, *dst_offset, tag, FALSE, TRUE, TRUE, TRUE, &delayed_ledger_update); - lite_list[entry>>5] |= 1 << (entry & 31); + lite_list[entry >> 5] |= 1 << (entry & 31); phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); - if (phys_page > upl->highest_page) + if (phys_page > upl->highest_page) { upl->highest_page = phys_page; + } if (user_page_list) { - user_page_list[entry].phys_addr = phys_page; - user_page_list[entry].absent = dst_page->vmp_absent; - user_page_list[entry].dirty = dst_page->vmp_dirty; - user_page_list[entry].free_when_done = FALSE; - user_page_list[entry].precious = FALSE; - user_page_list[entry].device = FALSE; + user_page_list[entry].phys_addr = phys_page; + user_page_list[entry].absent = dst_page->vmp_absent; + user_page_list[entry].dirty = dst_page->vmp_dirty; + user_page_list[entry].free_when_done = FALSE; + user_page_list[entry].precious = FALSE; + user_page_list[entry].device = FALSE; user_page_list[entry].speculative = FALSE; user_page_list[entry].cs_validated = FALSE; user_page_list[entry].cs_tainted = FALSE; @@ -7920,81 +8078,85 @@ done: } } if (delayed_ledger_update) { - task_t owner; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; + task_t owner; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; owner = VM_OBJECT_OWNER(object); assert(owner); vm_object_ledger_tag_ledgers(object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); /* more non-volatile bytes */ ledger_credit(owner->ledger, - ledger_idx_nonvolatile, - delayed_ledger_update); + ledger_idx_nonvolatile, + delayed_ledger_update); if (do_footprint) { /* more footprint */ ledger_credit(owner->ledger, - task_ledgers.phys_footprint, - delayed_ledger_update); + task_ledgers.phys_footprint, + delayed_ledger_update); } } assert(page_grab_count); *page_grab_count = pages_inserted; - return (ret); + return ret; } kern_return_t vm_object_iopl_request( - vm_object_t object, - vm_object_offset_t offset, - upl_size_t size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - upl_control_flags_t cntrl_flags, - vm_tag_t tag) + vm_object_t object, + vm_object_offset_t offset, + upl_size_t size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + upl_control_flags_t cntrl_flags, + vm_tag_t tag) { - vm_page_t dst_page; - vm_object_offset_t dst_offset; - upl_size_t xfer_size; - upl_t upl = NULL; - unsigned int entry; - wpl_array_t lite_list = NULL; - int no_zero_fill = FALSE; - unsigned int size_in_pages; - int page_grab_count = 0; - u_int32_t psize; - kern_return_t ret; - vm_prot_t prot; + vm_page_t dst_page; + vm_object_offset_t dst_offset; + upl_size_t xfer_size; + upl_t upl = NULL; + unsigned int entry; + wpl_array_t lite_list = NULL; + int no_zero_fill = FALSE; + unsigned int size_in_pages; + int page_grab_count = 0; + u_int32_t psize; + kern_return_t ret; + vm_prot_t prot; struct vm_object_fault_info fault_info = {}; - struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; - struct vm_page_delayed_work *dwp; - int dw_count; - int dw_limit; - int dw_index; - boolean_t caller_lookup; - int io_tracking_flag = 0; - int interruptible; - ppnum_t phys_page; - - boolean_t set_cache_attr_needed = FALSE; - boolean_t free_wired_pages = FALSE; - boolean_t fast_path_empty_req = FALSE; - boolean_t fast_path_full_req = FALSE; + struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; + struct vm_page_delayed_work *dwp; + int dw_count; + int dw_limit; + int dw_index; + boolean_t caller_lookup; + int io_tracking_flag = 0; + int interruptible; + ppnum_t phys_page; + + boolean_t set_cache_attr_needed = FALSE; + boolean_t free_wired_pages = FALSE; + boolean_t fast_path_empty_req = FALSE; + boolean_t fast_path_full_req = FALSE; + +#if DEVELOPMENT || DEBUG + task_t task = current_task(); +#endif /* DEVELOPMENT || DEBUG */ if (cntrl_flags & ~UPL_VALID_FLAGS) { /* @@ -8003,80 +8165,91 @@ vm_object_iopl_request( */ return KERN_INVALID_VALUE; } - if (vm_lopage_needed == FALSE) - cntrl_flags &= ~UPL_NEED_32BIT_ADDR; + if (vm_lopage_needed == FALSE) { + cntrl_flags &= ~UPL_NEED_32BIT_ADDR; + } if (cntrl_flags & UPL_NEED_32BIT_ADDR) { - if ( (cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) - return KERN_INVALID_VALUE; + if ((cntrl_flags & (UPL_SET_IO_WIRE | UPL_SET_LITE)) != (UPL_SET_IO_WIRE | UPL_SET_LITE)) { + return KERN_INVALID_VALUE; + } if (object->phys_contiguous) { - if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) - return KERN_INVALID_ADDRESS; + if ((offset + object->vo_shadow_offset) >= (vm_object_offset_t)max_valid_dma_address) { + return KERN_INVALID_ADDRESS; + } - if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) - return KERN_INVALID_ADDRESS; + if (((offset + object->vo_shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address) { + return KERN_INVALID_ADDRESS; + } } } - if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) - no_zero_fill = TRUE; + if (cntrl_flags & (UPL_NOZEROFILL | UPL_NOZEROFILLIO)) { + no_zero_fill = TRUE; + } - if (cntrl_flags & UPL_COPYOUT_FROM) + if (cntrl_flags & UPL_COPYOUT_FROM) { prot = VM_PROT_READ; - else + } else { prot = VM_PROT_READ | VM_PROT_WRITE; + } - if ((!object->internal) && (object->paging_offset != 0)) + if ((!object->internal) && (object->paging_offset != 0)) { panic("vm_object_iopl_request: external object with non-zero paging offset\n"); + } VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_START, size, cntrl_flags, prot, 0); #if CONFIG_IOSCHED || UPL_DEBUG - if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) + if ((object->io_tracking && object != kernel_object) || upl_debug_enabled) { io_tracking_flag |= UPL_CREATE_IO_TRACKING; + } #endif #if CONFIG_IOSCHED if (object->io_tracking) { /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */ - if (object != kernel_object) + if (object != kernel_object) { io_tracking_flag |= UPL_CREATE_EXPEDITE_SUP; + } } #endif - if (object->phys_contiguous) - psize = PAGE_SIZE; - else - psize = size; + if (object->phys_contiguous) { + psize = PAGE_SIZE; + } else { + psize = size; + } if (cntrl_flags & UPL_SET_INTERNAL) { - upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize); + upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize); user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); lite_list = (wpl_array_t) (((uintptr_t)user_page_list) + - ((psize / PAGE_SIZE) * sizeof(upl_page_info_t))); + ((psize / PAGE_SIZE) * sizeof(upl_page_info_t))); if (size == 0) { user_page_list = NULL; lite_list = NULL; } } else { - upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize); + upl = upl_create(UPL_CREATE_LITE | io_tracking_flag, UPL_IO_WIRE, psize); lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl)); if (size == 0) { lite_list = NULL; } } - if (user_page_list) - user_page_list[0].device = FALSE; + if (user_page_list) { + user_page_list[0].device = FALSE; + } *upl_ptr = upl; if (cntrl_flags & UPL_NOZEROFILLIO) { DTRACE_VM4(upl_nozerofillio, - vm_object_t, object, - vm_object_offset_t, offset, - upl_size_t, size, - upl_t, upl); + vm_object_t, object, + vm_object_offset_t, offset, + upl_size_t, size, + upl_t, upl); } upl->map_object = object; @@ -8117,7 +8290,6 @@ vm_object_iopl_request( #endif if (object->phys_contiguous) { - if (upl->flags & UPL_ACCESS_BLOCKED) { assert(!object->blocked_access); object->blocked_access = TRUE; @@ -8131,20 +8303,26 @@ vm_object_iopl_request( */ upl->flags |= UPL_DEVICE_MEMORY; - upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1)>>PAGE_SHIFT); + upl->highest_page = (ppnum_t) ((offset + object->vo_shadow_offset + size - 1) >> PAGE_SHIFT); if (user_page_list) { - user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset)>>PAGE_SHIFT); + user_page_list[0].phys_addr = (ppnum_t) ((offset + object->vo_shadow_offset) >> PAGE_SHIFT); user_page_list[0].device = TRUE; } if (page_list_count != NULL) { - if (upl->flags & UPL_INTERNAL) - *page_list_count = 0; - else - *page_list_count = 1; + if (upl->flags & UPL_INTERNAL) { + *page_list_count = 0; + } else { + *page_list_count = 1; + } } VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0); +#if DEVELOPMENT || DEBUG + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); + } +#endif /* DEVELOPMENT || DEBUG */ return KERN_SUCCESS; } if (object != kernel_object && object != compressor_object) { @@ -8158,20 +8336,21 @@ vm_object_iopl_request( int num = 0; num = OSBacktrace(bt, - VM_OBJECT_TRACKING_BTDEPTH); + VM_OBJECT_TRACKING_BTDEPTH); btlog_add_entry(vm_object_tracking_btlog, - object, - VM_OBJECT_TRACKING_OP_TRUESHARE, - bt, - num); + object, + VM_OBJECT_TRACKING_OP_TRUESHARE, + bt, + num); } #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ vm_object_lock_assert_exclusive(object); object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } } if (!(cntrl_flags & UPL_COPYOUT_FROM) && @@ -8194,13 +8373,13 @@ vm_object_iopl_request( * code path for that... */ vm_object_update(object, - offset, - size, - NULL, - NULL, - FALSE, /* should_return */ - MEMORY_OBJECT_COPY_SYNC, - VM_PROT_NO_CHANGE); + offset, + size, + NULL, + NULL, + FALSE, /* should_return */ + MEMORY_OBJECT_COPY_SYNC, + VM_PROT_NO_CHANGE); VM_PAGEOUT_DEBUG(iopl_cow, 1); VM_PAGEOUT_DEBUG(iopl_cow_pages, (size >> PAGE_SHIFT)); } @@ -8211,16 +8390,12 @@ vm_object_iopl_request( size == object->vo_size && offset == 0 && object->shadow == NULL && - object->pager == NULL) - { - if (object->resident_page_count == size_in_pages) - { + object->pager == NULL) { + if (object->resident_page_count == size_in_pages) { assert(object != compressor_object); assert(object != kernel_object); fast_path_full_req = TRUE; - } - else if (object->resident_page_count == 0) - { + } else if (object->resident_page_count == 0) { assert(object != compressor_object); assert(object != kernel_object); fast_path_empty_req = TRUE; @@ -8228,10 +8403,11 @@ vm_object_iopl_request( } } - if (cntrl_flags & UPL_SET_INTERRUPTIBLE) + if (cntrl_flags & UPL_SET_INTERRUPTIBLE) { interruptible = THREAD_ABORTSAFE; - else + } else { interruptible = THREAD_UNINT; + } entry = 0; @@ -8240,16 +8416,14 @@ vm_object_iopl_request( dw_count = 0; if (fast_path_full_req) { - - if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) + if (vm_object_iopl_wire_full(object, upl, user_page_list, lite_list, cntrl_flags, tag) == TRUE) { goto finish; + } /* * we couldn't complete the processing of this request on the fast path * so fall through to the slow path and finish up */ - } else if (fast_path_empty_req) { - if (cntrl_flags & UPL_REQUEST_NO_FAULT) { ret = KERN_MEMORY_ERROR; goto return_err; @@ -8274,7 +8448,7 @@ vm_object_iopl_request( dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); while (xfer_size) { - vm_fault_return_t result; + vm_fault_return_t result; dwp->dw_mask = 0; @@ -8287,8 +8461,9 @@ vm_object_iopl_request( * the following check is needed to determine whether * this page was already processed in the fast path */ - if (lite_list[entry>>5] & (1 << (entry & 31))) + if (lite_list[entry >> 5] & (1 << (entry & 31))) { goto skip_page; + } } dst_page = vm_page_lookup(object, dst_offset); @@ -8298,140 +8473,140 @@ vm_object_iopl_request( dst_page->vmp_restart || dst_page->vmp_absent || dst_page->vmp_fictitious) { + if (object == kernel_object) { + panic("vm_object_iopl_request: missing/bad page in kernel object\n"); + } + if (object == compressor_object) { + panic("vm_object_iopl_request: missing/bad page in compressor object\n"); + } - if (object == kernel_object) - panic("vm_object_iopl_request: missing/bad page in kernel object\n"); - if (object == compressor_object) - panic("vm_object_iopl_request: missing/bad page in compressor object\n"); - - if (cntrl_flags & UPL_REQUEST_NO_FAULT) { - ret = KERN_MEMORY_ERROR; - goto return_err; - } - set_cache_attr_needed = TRUE; - - /* - * We just looked up the page and the result remains valid - * until the object lock is release, so send it to - * vm_fault_page() (as "dst_page"), to avoid having to - * look it up again there. - */ - caller_lookup = TRUE; + if (cntrl_flags & UPL_REQUEST_NO_FAULT) { + ret = KERN_MEMORY_ERROR; + goto return_err; + } + set_cache_attr_needed = TRUE; - do { - vm_page_t top_page; - kern_return_t error_code; + /* + * We just looked up the page and the result remains valid + * until the object lock is release, so send it to + * vm_fault_page() (as "dst_page"), to avoid having to + * look it up again there. + */ + caller_lookup = TRUE; - fault_info.cluster_size = xfer_size; + do { + vm_page_t top_page; + kern_return_t error_code; - vm_object_paging_begin(object); + fault_info.cluster_size = xfer_size; - result = vm_fault_page(object, dst_offset, - prot | VM_PROT_WRITE, FALSE, - caller_lookup, - &prot, &dst_page, &top_page, - (int *)0, - &error_code, no_zero_fill, - FALSE, &fault_info); + vm_object_paging_begin(object); - /* our lookup is no longer valid at this point */ - caller_lookup = FALSE; + result = vm_fault_page(object, dst_offset, + prot | VM_PROT_WRITE, FALSE, + caller_lookup, + &prot, &dst_page, &top_page, + (int *)0, + &error_code, no_zero_fill, + FALSE, &fault_info); - switch (result) { + /* our lookup is no longer valid at this point */ + caller_lookup = FALSE; - case VM_FAULT_SUCCESS: - page_grab_count++; + switch (result) { + case VM_FAULT_SUCCESS: + page_grab_count++; - if ( !dst_page->vmp_absent) { - PAGE_WAKEUP_DONE(dst_page); - } else { + if (!dst_page->vmp_absent) { + PAGE_WAKEUP_DONE(dst_page); + } else { + /* + * we only get back an absent page if we + * requested that it not be zero-filled + * because we are about to fill it via I/O + * + * absent pages should be left BUSY + * to prevent them from being faulted + * into an address space before we've + * had a chance to complete the I/O on + * them since they may contain info that + * shouldn't be seen by the faulting task + */ + } /* - * we only get back an absent page if we - * requested that it not be zero-filled - * because we are about to fill it via I/O - * - * absent pages should be left BUSY - * to prevent them from being faulted - * into an address space before we've - * had a chance to complete the I/O on - * them since they may contain info that - * shouldn't be seen by the faulting task + * Release paging references and + * top-level placeholder page, if any. */ - } - /* - * Release paging references and - * top-level placeholder page, if any. - */ - if (top_page != VM_PAGE_NULL) { - vm_object_t local_object; + if (top_page != VM_PAGE_NULL) { + vm_object_t local_object; - local_object = VM_PAGE_OBJECT(top_page); + local_object = VM_PAGE_OBJECT(top_page); - /* - * comparing 2 packed pointers - */ - if (top_page->vmp_object != dst_page->vmp_object) { - vm_object_lock(local_object); - VM_PAGE_FREE(top_page); - vm_object_paging_end(local_object); - vm_object_unlock(local_object); - } else { - VM_PAGE_FREE(top_page); - vm_object_paging_end(local_object); + /* + * comparing 2 packed pointers + */ + if (top_page->vmp_object != dst_page->vmp_object) { + vm_object_lock(local_object); + VM_PAGE_FREE(top_page); + vm_object_paging_end(local_object); + vm_object_unlock(local_object); + } else { + VM_PAGE_FREE(top_page); + vm_object_paging_end(local_object); + } } - } - vm_object_paging_end(object); - break; + vm_object_paging_end(object); + break; - case VM_FAULT_RETRY: - vm_object_lock(object); - break; + case VM_FAULT_RETRY: + vm_object_lock(object); + break; - case VM_FAULT_MEMORY_SHORTAGE: - OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages); + case VM_FAULT_MEMORY_SHORTAGE: + OSAddAtomic((size_in_pages - entry), &vm_upl_wait_for_pages); - VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0); + VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_START, vm_upl_wait_for_pages, 0, 0, 0); - if (vm_page_wait(interruptible)) { - OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages); + if (vm_page_wait(interruptible)) { + OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages); - VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0); - vm_object_lock(object); + VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, 0); + vm_object_lock(object); - break; - } - OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages); + break; + } + OSAddAtomic(-(size_in_pages - entry), &vm_upl_wait_for_pages); - VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1); + VM_DEBUG_EVENT(vm_iopl_page_wait, VM_IOPL_PAGE_WAIT, DBG_FUNC_END, vm_upl_wait_for_pages, 0, 0, -1); /* fall thru */ - case VM_FAULT_INTERRUPTED: - error_code = MACH_SEND_INTERRUPTED; - case VM_FAULT_MEMORY_ERROR: - memory_error: - ret = (error_code ? error_code: KERN_MEMORY_ERROR); + case VM_FAULT_INTERRUPTED: + error_code = MACH_SEND_INTERRUPTED; + case VM_FAULT_MEMORY_ERROR: +memory_error: + ret = (error_code ? error_code: KERN_MEMORY_ERROR); - vm_object_lock(object); - goto return_err; - - case VM_FAULT_SUCCESS_NO_VM_PAGE: - /* success but no page: fail */ - vm_object_paging_end(object); - vm_object_unlock(object); - goto memory_error; + vm_object_lock(object); + goto return_err; - default: - panic("vm_object_iopl_request: unexpected error" - " 0x%x from vm_fault_page()\n", result); - } - } while (result != VM_FAULT_SUCCESS); + case VM_FAULT_SUCCESS_NO_VM_PAGE: + /* success but no page: fail */ + vm_object_paging_end(object); + vm_object_unlock(object); + goto memory_error; + default: + panic("vm_object_iopl_request: unexpected error" + " 0x%x from vm_fault_page()\n", result); + } + } while (result != VM_FAULT_SUCCESS); } phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); - if (upl->flags & UPL_KERNEL_OBJECT) + if (upl->flags & UPL_KERNEL_OBJECT) { goto record_phys_addr; + } if (dst_page->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { dst_page->vmp_busy = TRUE; @@ -8449,17 +8624,18 @@ vm_object_iopl_request( * We'd better wait for the cleaning to complete and * then try again. */ - VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1); + VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning, 1); PAGE_SLEEP(object, dst_page, THREAD_UNINT); continue; } - if (dst_page->vmp_laundry) + if (dst_page->vmp_laundry) { vm_pageout_steal_laundry(dst_page, FALSE); + } - if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) && - phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) { - vm_page_t low_page; - int refmod; + if ((cntrl_flags & UPL_NEED_32BIT_ADDR) && + phys_page >= (max_valid_dma_address >> PAGE_SHIFT)) { + vm_page_t low_page; + int refmod; /* * support devices that can't DMA above 32 bits @@ -8470,13 +8646,13 @@ vm_object_iopl_request( * handed out to some other 64 bit capable DMA device to use */ if (VM_PAGE_WIRED(dst_page)) { - ret = KERN_PROTECTION_FAILURE; + ret = KERN_PROTECTION_FAILURE; goto return_err; } low_page = vm_page_grablo(); if (low_page == VM_PAGE_NULL) { - ret = KERN_RESOURCE_SHORTAGE; + ret = KERN_RESOURCE_SHORTAGE; goto return_err; } /* @@ -8486,22 +8662,25 @@ vm_object_iopl_request( * it after we disconnect it... we want the fault * to find the new page being substituted. */ - if (dst_page->vmp_pmapped) - refmod = pmap_disconnect(phys_page); - else - refmod = 0; + if (dst_page->vmp_pmapped) { + refmod = pmap_disconnect(phys_page); + } else { + refmod = 0; + } - if (!dst_page->vmp_absent) + if (!dst_page->vmp_absent) { vm_page_copy(dst_page, low_page); + } low_page->vmp_reference = dst_page->vmp_reference; low_page->vmp_dirty = dst_page->vmp_dirty; low_page->vmp_absent = dst_page->vmp_absent; - if (refmod & VM_MEM_REFERENCED) - low_page->vmp_reference = TRUE; + if (refmod & VM_MEM_REFERENCED) { + low_page->vmp_reference = TRUE; + } if (refmod & VM_MEM_MODIFIED) { - SET_PAGE_DIRTY(low_page, FALSE); + SET_PAGE_DIRTY(low_page, FALSE); } vm_page_replace(low_page, object, dst_offset); @@ -8512,13 +8691,15 @@ vm_object_iopl_request( * BUSY... we don't need a PAGE_WAKEUP_DONE * here, because we've never dropped the object lock */ - if ( !dst_page->vmp_absent) + if (!dst_page->vmp_absent) { dst_page->vmp_busy = FALSE; + } phys_page = VM_PAGE_GET_PHYS_PAGE(dst_page); } - if ( !dst_page->vmp_busy) + if (!dst_page->vmp_busy) { dwp->dw_mask |= DW_vm_page_wire; + } if (cntrl_flags & UPL_BLOCK_ACCESS) { /* @@ -8536,7 +8717,7 @@ vm_object_iopl_request( */ dwp->dw_mask |= DW_set_reference; - if (!(cntrl_flags & UPL_COPYOUT_FROM)) { + if (!(cntrl_flags & UPL_COPYOUT_FROM)) { SET_PAGE_DIRTY(dst_page, TRUE); } if ((cntrl_flags & UPL_REQUEST_FORCE_COHERENCY) && dst_page->vmp_written_by_kernel == TRUE) { @@ -8545,26 +8726,29 @@ vm_object_iopl_request( } record_phys_addr: - if (dst_page->vmp_busy) + if (dst_page->vmp_busy) { upl->flags |= UPL_HAS_BUSY; + } - lite_list[entry>>5] |= 1 << (entry & 31); + lite_list[entry >> 5] |= 1 << (entry & 31); - if (phys_page > upl->highest_page) - upl->highest_page = phys_page; + if (phys_page > upl->highest_page) { + upl->highest_page = phys_page; + } if (user_page_list) { - user_page_list[entry].phys_addr = phys_page; - user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; - user_page_list[entry].absent = dst_page->vmp_absent; - user_page_list[entry].dirty = dst_page->vmp_dirty; - user_page_list[entry].precious = dst_page->vmp_precious; - user_page_list[entry].device = FALSE; + user_page_list[entry].phys_addr = phys_page; + user_page_list[entry].free_when_done = dst_page->vmp_free_when_done; + user_page_list[entry].absent = dst_page->vmp_absent; + user_page_list[entry].dirty = dst_page->vmp_dirty; + user_page_list[entry].precious = dst_page->vmp_precious; + user_page_list[entry].device = FALSE; user_page_list[entry].needed = FALSE; - if (dst_page->vmp_clustered == TRUE) - user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE; - else - user_page_list[entry].speculative = FALSE; + if (dst_page->vmp_clustered == TRUE) { + user_page_list[entry].speculative = (dst_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ? TRUE : FALSE; + } else { + user_page_list[entry].speculative = FALSE; + } user_page_list[entry].cs_validated = dst_page->vmp_cs_validated; user_page_list[entry].cs_tainted = dst_page->vmp_cs_tainted; user_page_list[entry].cs_nx = dst_page->vmp_cs_nx; @@ -8576,8 +8760,9 @@ record_phys_addr: * update clustered and speculative state * */ - if (dst_page->vmp_clustered) + if (dst_page->vmp_clustered) { VM_PAGE_CONSUME_CLUSTERED(dst_page); + } } skip_page: entry++; @@ -8597,17 +8782,20 @@ skip_page: } assert(entry == size_in_pages); - if (dw_count) + if (dw_count) { vm_page_do_delayed_work(object, tag, &dw_array[0], dw_count); + } finish: - if (user_page_list && set_cache_attr_needed == TRUE) + if (user_page_list && set_cache_attr_needed == TRUE) { vm_object_set_pmap_cache_attr(object, user_page_list, size_in_pages, TRUE); + } if (page_list_count != NULL) { - if (upl->flags & UPL_INTERNAL) + if (upl->flags & UPL_INTERNAL) { *page_list_count = 0; - else if (*page_list_count > size_in_pages) + } else if (*page_list_count > size_in_pages) { *page_list_count = size_in_pages; + } } vm_object_unlock(object); @@ -8619,12 +8807,17 @@ finish: * can't be accessed without causing a page fault. */ vm_object_pmap_protect(object, offset, (vm_object_size_t)size, - PMAP_NULL, 0, VM_PROT_NONE); + PMAP_NULL, 0, VM_PROT_NONE); assert(!object->blocked_access); object->blocked_access = TRUE; } VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, KERN_SUCCESS, 0, 0); +#if DEVELOPMENT || DEBUG + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); + } +#endif /* DEVELOPMENT || DEBUG */ return KERN_SUCCESS; return_err: @@ -8633,10 +8826,11 @@ return_err: for (; offset < dst_offset; offset += PAGE_SIZE) { boolean_t need_unwire; - dst_page = vm_page_lookup(object, offset); + dst_page = vm_page_lookup(object, offset); - if (dst_page == VM_PAGE_NULL) - panic("vm_object_iopl_request: Wired page missing. \n"); + if (dst_page == VM_PAGE_NULL) { + panic("vm_object_iopl_request: Wired page missing. \n"); + } /* * if we've already processed this page in an earlier @@ -8670,20 +8864,22 @@ return_err: need_unwire = FALSE; } else { - if (need_unwire == TRUE) + if (need_unwire == TRUE) { vm_page_unwire(dst_page, TRUE); + } PAGE_WAKEUP_DONE(dst_page); } vm_page_unlock_queues(); - if (need_unwire == TRUE) + if (need_unwire == TRUE) { VM_STAT_INCR(reactivations); + } } #if UPL_DEBUG upl->upl_state = 2; #endif - if (! (upl->flags & UPL_KERNEL_OBJECT)) { + if (!(upl->flags & UPL_KERNEL_OBJECT)) { vm_object_activity_end(object); vm_object_collapse(object, 0, TRUE); } @@ -8691,19 +8887,24 @@ return_err: upl_destroy(upl); VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request, VM_IOPL_REQUEST, DBG_FUNC_END, page_grab_count, ret, 0, 0); +#if DEVELOPMENT || DEBUG + if (task != NULL) { + ledger_credit(task->ledger, task_ledgers.pages_grabbed_iopl, page_grab_count); + } +#endif /* DEVELOPMENT || DEBUG */ return ret; } kern_return_t upl_transpose( - upl_t upl1, - upl_t upl2) + upl_t upl1, + upl_t upl2) { - kern_return_t retval; - boolean_t upls_locked; - vm_object_t object1, object2; + kern_return_t retval; + boolean_t upls_locked; + vm_object_t object1, object2; - if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) { + if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR) == UPL_VECTOR) || ((upl2->flags & UPL_VECTOR) == UPL_VECTOR)) { return KERN_INVALID_ARGUMENT; } @@ -8720,7 +8921,7 @@ upl_transpose( upl_lock(upl2); upl_lock(upl1); } - upls_locked = TRUE; /* the UPLs will need to be unlocked */ + upls_locked = TRUE; /* the UPLs will need to be unlocked */ object1 = upl1->map_object; object2 = upl2->map_object; @@ -8741,7 +8942,7 @@ upl_transpose( * Tranpose the VM objects' backing store. */ retval = vm_object_transpose(object1, object2, - (vm_object_size_t) upl1->size); + (vm_object_size_t) upl1->size); if (retval == KERN_SUCCESS) { /* @@ -8753,19 +8954,23 @@ upl_transpose( vm_object_lock(object1); vm_object_lock(object2); } - if (upl1->flags & UPL_TRACKED_BY_OBJECT) + if (upl1->flags & UPL_TRACKED_BY_OBJECT) { queue_remove(&object1->uplq, upl1, upl_t, uplq); - if (upl2->flags & UPL_TRACKED_BY_OBJECT) + } + if (upl2->flags & UPL_TRACKED_BY_OBJECT) { queue_remove(&object2->uplq, upl2, upl_t, uplq); + } #endif upl1->map_object = object2; upl2->map_object = object1; #if CONFIG_IOSCHED || UPL_DEBUG - if (upl1->flags & UPL_TRACKED_BY_OBJECT) + if (upl1->flags & UPL_TRACKED_BY_OBJECT) { queue_enter(&object2->uplq, upl1, upl_t, uplq); - if (upl2->flags & UPL_TRACKED_BY_OBJECT) + } + if (upl2->flags & UPL_TRACKED_BY_OBJECT) { queue_enter(&object1->uplq, upl2, upl_t, uplq); + } if ((upl1->flags & UPL_TRACKED_BY_OBJECT) || (upl2->flags & UPL_TRACKED_BY_OBJECT)) { vm_object_unlock(object2); vm_object_unlock(object1); @@ -8788,22 +8993,24 @@ done: void upl_range_needed( - upl_t upl, - int index, - int count) + upl_t upl, + int index, + int count) { - upl_page_info_t *user_page_list; - int size_in_pages; + upl_page_info_t *user_page_list; + int size_in_pages; - if ( !(upl->flags & UPL_INTERNAL) || count <= 0) + if (!(upl->flags & UPL_INTERNAL) || count <= 0) { return; + } size_in_pages = upl->size / PAGE_SIZE; user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl)); - while (count-- && index < size_in_pages) + while (count-- && index < size_in_pages) { user_page_list[index++].needed = TRUE; + } } @@ -8816,26 +9023,26 @@ upl_range_needed( * virtaul address space each time we need to work with * a physical page. */ -decl_simple_lock_data(,vm_paging_lock) -#define VM_PAGING_NUM_PAGES 64 +decl_simple_lock_data(, vm_paging_lock) +#define VM_PAGING_NUM_PAGES 64 vm_map_offset_t vm_paging_base_address = 0; -boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, }; -int vm_paging_max_index = 0; -int vm_paging_page_waiter = 0; -int vm_paging_page_waiter_total = 0; +boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, }; +int vm_paging_max_index = 0; +int vm_paging_page_waiter = 0; +int vm_paging_page_waiter_total = 0; -unsigned long vm_paging_no_kernel_page = 0; -unsigned long vm_paging_objects_mapped = 0; -unsigned long vm_paging_pages_mapped = 0; -unsigned long vm_paging_objects_mapped_slow = 0; -unsigned long vm_paging_pages_mapped_slow = 0; +unsigned long vm_paging_no_kernel_page = 0; +unsigned long vm_paging_objects_mapped = 0; +unsigned long vm_paging_pages_mapped = 0; +unsigned long vm_paging_objects_mapped_slow = 0; +unsigned long vm_paging_pages_mapped_slow = 0; void vm_paging_map_init(void) { - kern_return_t kr; - vm_map_offset_t page_map_offset; - vm_map_entry_t map_entry; + kern_return_t kr; + vm_map_offset_t page_map_offset; + vm_map_entry_t map_entry; assert(vm_paging_base_address == 0); @@ -8845,13 +9052,13 @@ vm_paging_map_init(void) */ page_map_offset = 0; kr = vm_map_find_space(kernel_map, - &page_map_offset, - VM_PAGING_NUM_PAGES * PAGE_SIZE, - 0, - 0, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - &map_entry); + &page_map_offset, + VM_PAGING_NUM_PAGES * PAGE_SIZE, + 0, + 0, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + &map_entry); if (kr != KERN_SUCCESS) { panic("vm_paging_map_init: kernel_map full\n"); } @@ -8870,45 +9077,45 @@ vm_paging_map_init(void) /* * vm_paging_map_object: * Maps part of a VM object's pages in the kernel - * virtual address space, using the pre-allocated + * virtual address space, using the pre-allocated * kernel virtual addresses, if possible. * Context: - * The VM object is locked. This lock will get - * dropped and re-acquired though, so the caller - * must make sure the VM object is kept alive + * The VM object is locked. This lock will get + * dropped and re-acquired though, so the caller + * must make sure the VM object is kept alive * (by holding a VM map that has a reference - * on it, for example, or taking an extra reference). - * The page should also be kept busy to prevent + * on it, for example, or taking an extra reference). + * The page should also be kept busy to prevent * it from being reclaimed. */ kern_return_t vm_paging_map_object( - vm_page_t page, - vm_object_t object, - vm_object_offset_t offset, - vm_prot_t protection, - boolean_t can_unlock_object, - vm_map_size_t *size, /* IN/OUT */ - vm_map_offset_t *address, /* OUT */ - boolean_t *need_unmap) /* OUT */ + vm_page_t page, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection, + boolean_t can_unlock_object, + vm_map_size_t *size, /* IN/OUT */ + vm_map_offset_t *address, /* OUT */ + boolean_t *need_unmap) /* OUT */ { - kern_return_t kr; - vm_map_offset_t page_map_offset; - vm_map_size_t map_size; - vm_object_offset_t object_offset; - int i; + kern_return_t kr; + vm_map_offset_t page_map_offset; + vm_map_size_t map_size; + vm_object_offset_t object_offset; + int i; if (page != VM_PAGE_NULL && *size == PAGE_SIZE) { /* use permanent 1-to-1 kernel mapping of physical memory ? */ #if __x86_64__ *address = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << - PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << + PAGE_SHIFT); *need_unmap = FALSE; return KERN_SUCCESS; #elif __arm__ || __arm64__ *address = (vm_map_offset_t) - phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT); + phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(page) << PAGE_SHIFT); *need_unmap = FALSE; return KERN_SUCCESS; #else @@ -8921,7 +9128,7 @@ vm_paging_map_object( * and just enter the VM page in the kernel address space * at that virtual address. */ - simple_lock(&vm_paging_lock); + simple_lock(&vm_paging_lock, &vm_pageout_lck_grp); /* * Try and find an available kernel virtual address @@ -8932,8 +9139,8 @@ vm_paging_map_object( for (i = 0; i < VM_PAGING_NUM_PAGES; i++) { if (vm_paging_page_inuse[i] == FALSE) { page_map_offset = - vm_paging_base_address + - (i * PAGE_SIZE); + vm_paging_base_address + + (i * PAGE_SIZE); break; } } @@ -8959,7 +9166,7 @@ vm_paging_map_object( if (kr == THREAD_WAITING) { simple_unlock(&vm_paging_lock); kr = thread_block(THREAD_CONTINUE_NULL); - simple_lock(&vm_paging_lock); + simple_lock(&vm_paging_lock, &vm_pageout_lck_grp); } vm_paging_page_waiter--; /* ... and try again */ @@ -8985,13 +9192,13 @@ vm_paging_map_object( * vm_object_pmap_protect() call... */ PMAP_ENTER(kernel_pmap, - page_map_offset, - page, - protection, - VM_PROT_NONE, - 0, - TRUE, - kr); + page_map_offset, + page, + protection, + VM_PROT_NONE, + 0, + TRUE, + kr); assert(kr == KERN_SUCCESS); vm_paging_objects_mapped++; vm_paging_pages_mapped++; @@ -9015,7 +9222,7 @@ vm_paging_map_object( simple_unlock(&vm_paging_lock); } - if (! can_unlock_object) { + if (!can_unlock_object) { *address = 0; *size = 0; *need_unmap = FALSE; @@ -9024,34 +9231,34 @@ vm_paging_map_object( object_offset = vm_object_trunc_page(offset); map_size = vm_map_round_page(*size, - VM_MAP_PAGE_MASK(kernel_map)); + VM_MAP_PAGE_MASK(kernel_map)); /* * Try and map the required range of the object * in the kernel_map */ - vm_object_reference_locked(object); /* for the map entry */ + vm_object_reference_locked(object); /* for the map entry */ vm_object_unlock(object); kr = vm_map_enter(kernel_map, - address, - map_size, - 0, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - object, - object_offset, - FALSE, - protection, - VM_PROT_ALL, - VM_INHERIT_NONE); + address, + map_size, + 0, + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + object, + object_offset, + FALSE, + protection, + VM_PROT_ALL, + VM_INHERIT_NONE); if (kr != KERN_SUCCESS) { *address = 0; *size = 0; *need_unmap = FALSE; - vm_object_deallocate(object); /* for the map entry */ + vm_object_deallocate(object); /* for the map entry */ vm_object_lock(object); return kr; } @@ -9070,15 +9277,14 @@ vm_paging_map_object( */ for (page_map_offset = 0; - map_size != 0; - map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) { - + map_size != 0; + map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) { page = vm_page_lookup(object, offset + page_map_offset); if (page == VM_PAGE_NULL) { printf("vm_paging_map_object: no page !?"); vm_object_unlock(object); kr = vm_map_remove(kernel_map, *address, *size, - VM_MAP_REMOVE_NO_FLAGS); + VM_MAP_REMOVE_NO_FLAGS); assert(kr == KERN_SUCCESS); *address = 0; *size = 0; @@ -9090,13 +9296,13 @@ vm_paging_map_object( //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page))); PMAP_ENTER(kernel_pmap, - *address + page_map_offset, - page, - protection, - VM_PROT_NONE, - 0, - TRUE, - kr); + *address + page_map_offset, + page, + protection, + VM_PROT_NONE, + 0, + TRUE, + kr); assert(kr == KERN_SUCCESS); #if KASAN kasan_notify_address(*address + page_map_offset, PAGE_SIZE); @@ -9114,24 +9320,24 @@ vm_paging_map_object( /* * vm_paging_unmap_object: * Unmaps part of a VM object's pages from the kernel - * virtual address space. + * virtual address space. * Context: - * The VM object is locked. This lock will get - * dropped and re-acquired though. + * The VM object is locked. This lock will get + * dropped and re-acquired though. */ void vm_paging_unmap_object( - vm_object_t object, - vm_map_offset_t start, - vm_map_offset_t end) + vm_object_t object, + vm_map_offset_t start, + vm_map_offset_t end) { - kern_return_t kr; - int i; + kern_return_t kr; + int i; if ((vm_paging_base_address == 0) || (start < vm_paging_base_address) || (end > (vm_paging_base_address - + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) { + + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) { /* * We didn't use our pre-allocated pool of * kernel virtual address. Deallocate the @@ -9141,7 +9347,7 @@ vm_paging_unmap_object( vm_object_unlock(object); } kr = vm_map_remove(kernel_map, start, end, - VM_MAP_REMOVE_NO_FLAGS); + VM_MAP_REMOVE_NO_FLAGS); if (object != VM_OBJECT_NULL) { vm_object_lock(object); } @@ -9159,7 +9365,7 @@ vm_paging_unmap_object( /* undo the pmap mapping */ pmap_remove(kernel_pmap, start, end); - simple_lock(&vm_paging_lock); + simple_lock(&vm_paging_lock, &vm_pageout_lck_grp); vm_paging_page_inuse[i] = FALSE; if (vm_paging_page_waiter) { thread_wakeup(&vm_paging_page_waiter); @@ -9198,24 +9404,23 @@ vm_pageout_steal_laundry(vm_page_t page, boolean_t queues_locked) upl_t vector_upl_create(vm_offset_t upl_offset) { - int vector_upl_size = sizeof(struct _vector_upl); - int i=0; - upl_t upl; + int vector_upl_size = sizeof(struct _vector_upl); + int i = 0; + upl_t upl; vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size); - upl = upl_create(0,UPL_VECTOR,0); + upl = upl_create(0, UPL_VECTOR, 0); upl->vector_upl = vector_upl; upl->offset = upl_offset; vector_upl->size = 0; vector_upl->offset = upl_offset; - vector_upl->invalid_upls=0; - vector_upl->num_upls=0; + vector_upl->invalid_upls = 0; + vector_upl->num_upls = 0; vector_upl->pagelist = NULL; - for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) { + for (i = 0; i < MAX_VECTOR_UPL_ELEMENTS; i++) { vector_upl->upl_iostates[i].size = 0; vector_upl->upl_iostates[i].offset = 0; - } return upl; } @@ -9223,81 +9428,86 @@ vector_upl_create(vm_offset_t upl_offset) void vector_upl_deallocate(upl_t upl) { - if(upl) { + if (upl) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - if(vector_upl->invalid_upls != vector_upl->num_upls) + if (vector_upl) { + if (vector_upl->invalid_upls != vector_upl->num_upls) { panic("Deallocating non-empty Vectored UPL\n"); - kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE))); - vector_upl->invalid_upls=0; + } + kfree(vector_upl->pagelist, (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE))); + vector_upl->invalid_upls = 0; vector_upl->num_upls = 0; vector_upl->pagelist = NULL; vector_upl->size = 0; vector_upl->offset = 0; kfree(vector_upl, sizeof(struct _vector_upl)); vector_upl = (vector_upl_t)0xfeedfeed; - } - else + } else { panic("vector_upl_deallocate was passed a non-vectored upl\n"); - } - else + } + } else { panic("vector_upl_deallocate was passed a NULL upl\n"); + } } boolean_t vector_upl_is_valid(upl_t upl) { - if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) { + if (upl && ((upl->flags & UPL_VECTOR) == UPL_VECTOR)) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) + if (vector_upl == NULL || vector_upl == (vector_upl_t)0xfeedfeed || vector_upl == (vector_upl_t)0xfeedbeef) { return FALSE; - else + } else { return TRUE; + } } return FALSE; } boolean_t -vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size) +vector_upl_set_subupl(upl_t upl, upl_t subupl, uint32_t io_size) { - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - if(subupl) { - if(io_size) { - if(io_size < PAGE_SIZE) + if (vector_upl) { + if (subupl) { + if (io_size) { + if (io_size < PAGE_SIZE) { io_size = PAGE_SIZE; + } subupl->vector_upl = (void*)vector_upl; vector_upl->upl_elems[vector_upl->num_upls++] = subupl; vector_upl->size += io_size; upl->size += io_size; - } - else { - uint32_t i=0,invalid_upls=0; - for(i = 0; i < vector_upl->num_upls; i++) { - if(vector_upl->upl_elems[i] == subupl) + } else { + uint32_t i = 0, invalid_upls = 0; + for (i = 0; i < vector_upl->num_upls; i++) { + if (vector_upl->upl_elems[i] == subupl) { break; + } } - if(i == vector_upl->num_upls) + if (i == vector_upl->num_upls) { panic("Trying to remove sub-upl when none exists"); + } vector_upl->upl_elems[i] = NULL; invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1); - if(invalid_upls == vector_upl->num_upls) + if (invalid_upls == vector_upl->num_upls) { return TRUE; - else + } else { return FALSE; + } } - } - else + } else { panic("vector_upl_set_subupl was passed a NULL upl element\n"); - } - else + } + } else { panic("vector_upl_set_subupl was passed a non-vectored upl\n"); - } - else + } + } else { panic("vector_upl_set_subupl was passed a NULL upl\n"); + } return FALSE; } @@ -9305,43 +9515,44 @@ vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size) void vector_upl_set_pagelist(upl_t upl) { - if(vector_upl_is_valid(upl)) { - uint32_t i=0; + if (vector_upl_is_valid(upl)) { + uint32_t i = 0; vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0; + if (vector_upl) { + vm_offset_t pagelist_size = 0, cur_upl_pagelist_size = 0; - vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)); + vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE)); - for(i=0; i < vector_upl->num_upls; i++) { - cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE; + for (i = 0; i < vector_upl->num_upls; i++) { + cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size / PAGE_SIZE; bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size); pagelist_size += cur_upl_pagelist_size; - if(vector_upl->upl_elems[i]->highest_page > upl->highest_page) + if (vector_upl->upl_elems[i]->highest_page > upl->highest_page) { upl->highest_page = vector_upl->upl_elems[i]->highest_page; + } } - assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) ); - } - else + assert( pagelist_size == (sizeof(struct upl_page_info) * (vector_upl->size / PAGE_SIZE))); + } else { panic("vector_upl_set_pagelist was passed a non-vectored upl\n"); - } - else + } + } else { panic("vector_upl_set_pagelist was passed a NULL upl\n"); - + } } upl_t vector_upl_subupl_byindex(upl_t upl, uint32_t index) { - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - if(index < vector_upl->num_upls) + if (vector_upl) { + if (index < vector_upl->num_upls) { return vector_upl->upl_elems[index]; - } - else + } + } else { panic("vector_upl_subupl_byindex was passed a non-vectored upl\n"); + } } return NULL; } @@ -9349,39 +9560,42 @@ vector_upl_subupl_byindex(upl_t upl, uint32_t index) upl_t vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size) { - if(vector_upl_is_valid(upl)) { - uint32_t i=0; + if (vector_upl_is_valid(upl)) { + uint32_t i = 0; vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { + if (vector_upl) { upl_t subupl = NULL; vector_upl_iostates_t subupl_state; - for(i=0; i < vector_upl->num_upls; i++) { + for (i = 0; i < vector_upl->num_upls; i++) { subupl = vector_upl->upl_elems[i]; subupl_state = vector_upl->upl_iostates[i]; - if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) { + if (*upl_offset <= (subupl_state.offset + subupl_state.size - 1)) { /* We could have been passed an offset/size pair that belongs * to an UPL element that has already been committed/aborted. * If so, return NULL. */ - if(subupl == NULL) + if (subupl == NULL) { return NULL; - if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) { + } + if ((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) { *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset; - if(*upl_size > subupl_state.size) + if (*upl_size > subupl_state.size) { *upl_size = subupl_state.size; + } } - if(*upl_offset >= subupl_state.offset) + if (*upl_offset >= subupl_state.offset) { *upl_offset -= subupl_state.offset; - else if(i) + } else if (i) { panic("Vector UPL offset miscalculation\n"); + } return subupl; } } - } - else + } else { panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n"); + } } return NULL; } @@ -9391,107 +9605,112 @@ vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst { *v_upl_submap = NULL; - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { + if (vector_upl) { *v_upl_submap = vector_upl->submap; *submap_dst_addr = vector_upl->submap_dst_addr; - } - else + } else { panic("vector_upl_get_submap was passed a non-vectored UPL\n"); - } - else + } + } else { panic("vector_upl_get_submap was passed a null UPL\n"); + } } void vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr) { - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { + if (vector_upl) { vector_upl->submap = submap; vector_upl->submap_dst_addr = submap_dst_addr; - } - else + } else { panic("vector_upl_get_submap was passed a non-vectored UPL\n"); - } - else + } + } else { panic("vector_upl_get_submap was passed a NULL UPL\n"); + } } void vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size) { - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { uint32_t i = 0; vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - for(i = 0; i < vector_upl->num_upls; i++) { - if(vector_upl->upl_elems[i] == subupl) + if (vector_upl) { + for (i = 0; i < vector_upl->num_upls; i++) { + if (vector_upl->upl_elems[i] == subupl) { break; + } } - if(i == vector_upl->num_upls) + if (i == vector_upl->num_upls) { panic("setting sub-upl iostate when none exists"); + } vector_upl->upl_iostates[i].offset = offset; - if(size < PAGE_SIZE) + if (size < PAGE_SIZE) { size = PAGE_SIZE; + } vector_upl->upl_iostates[i].size = size; - } - else + } else { panic("vector_upl_set_iostate was passed a non-vectored UPL\n"); - } - else + } + } else { panic("vector_upl_set_iostate was passed a NULL UPL\n"); + } } void vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size) { - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { uint32_t i = 0; vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - for(i = 0; i < vector_upl->num_upls; i++) { - if(vector_upl->upl_elems[i] == subupl) + if (vector_upl) { + for (i = 0; i < vector_upl->num_upls; i++) { + if (vector_upl->upl_elems[i] == subupl) { break; + } } - if(i == vector_upl->num_upls) + if (i == vector_upl->num_upls) { panic("getting sub-upl iostate when none exists"); + } *offset = vector_upl->upl_iostates[i].offset; *size = vector_upl->upl_iostates[i].size; - } - else + } else { panic("vector_upl_get_iostate was passed a non-vectored UPL\n"); - } - else + } + } else { panic("vector_upl_get_iostate was passed a NULL UPL\n"); + } } void vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size) { - if(vector_upl_is_valid(upl)) { + if (vector_upl_is_valid(upl)) { vector_upl_t vector_upl = upl->vector_upl; - if(vector_upl) { - if(index < vector_upl->num_upls) { + if (vector_upl) { + if (index < vector_upl->num_upls) { *offset = vector_upl->upl_iostates[index].offset; *size = vector_upl->upl_iostates[index].size; - } - else + } else { *offset = *size = 0; - } - else + } + } else { panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n"); - } - else + } + } else { panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n"); + } } upl_page_info_t * @@ -9514,8 +9733,8 @@ upl_get_internal_pagelist_offset(void) void upl_clear_dirty( - upl_t upl, - boolean_t value) + upl_t upl, + boolean_t value) { if (value) { upl->flags |= UPL_CLEAR_DIRTY; @@ -9526,8 +9745,8 @@ upl_clear_dirty( void upl_set_referenced( - upl_t upl, - boolean_t value) + upl_t upl, + boolean_t value) { upl_lock(upl); if (value) { @@ -9544,69 +9763,78 @@ upl_set_referenced( #if CONFIG_IOSCHED void upl_set_blkno( - upl_t upl, - vm_offset_t upl_offset, - int io_size, - int64_t blkno) + upl_t upl, + vm_offset_t upl_offset, + int io_size, + int64_t blkno) { - int i,j; - if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) - return; + int i, j; + if ((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) { + return; + } - assert(upl->upl_reprio_info != 0); - for(i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) { - UPL_SET_REPRIO_INFO(upl, i, blkno, io_size); - } + assert(upl->upl_reprio_info != 0); + for (i = (int)(upl_offset / PAGE_SIZE), j = 0; j < io_size; i++, j += PAGE_SIZE) { + UPL_SET_REPRIO_INFO(upl, i, blkno, io_size); + } } #endif -void inline memoryshot(unsigned int event, unsigned int control) +void inline +memoryshot(unsigned int event, unsigned int control) { if (vm_debug_events) { KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE, event)) | control, - vm_page_active_count, vm_page_inactive_count, - vm_page_free_count, vm_page_speculative_count, - vm_page_throttled_count); + vm_page_active_count, vm_page_inactive_count, + vm_page_free_count, vm_page_speculative_count, + vm_page_throttled_count); } else { (void) event; (void) control; } - } #ifdef MACH_BSD -boolean_t upl_device_page(upl_page_info_t *upl) +boolean_t +upl_device_page(upl_page_info_t *upl) { - return(UPL_DEVICE_PAGE(upl)); + return UPL_DEVICE_PAGE(upl); } -boolean_t upl_page_present(upl_page_info_t *upl, int index) +boolean_t +upl_page_present(upl_page_info_t *upl, int index) { - return(UPL_PAGE_PRESENT(upl, index)); + return UPL_PAGE_PRESENT(upl, index); } -boolean_t upl_speculative_page(upl_page_info_t *upl, int index) +boolean_t +upl_speculative_page(upl_page_info_t *upl, int index) { - return(UPL_SPECULATIVE_PAGE(upl, index)); + return UPL_SPECULATIVE_PAGE(upl, index); } -boolean_t upl_dirty_page(upl_page_info_t *upl, int index) +boolean_t +upl_dirty_page(upl_page_info_t *upl, int index) { - return(UPL_DIRTY_PAGE(upl, index)); + return UPL_DIRTY_PAGE(upl, index); } -boolean_t upl_valid_page(upl_page_info_t *upl, int index) +boolean_t +upl_valid_page(upl_page_info_t *upl, int index) { - return(UPL_VALID_PAGE(upl, index)); + return UPL_VALID_PAGE(upl, index); } -ppnum_t upl_phys_page(upl_page_info_t *upl, int index) +ppnum_t +upl_phys_page(upl_page_info_t *upl, int index) { - return(UPL_PHYS_PAGE(upl, index)); + return UPL_PHYS_PAGE(upl, index); } -void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v) +void +upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v) { upl[index].mark = v; } -boolean_t upl_page_get_mark(upl_page_info_t *upl, int index) +boolean_t +upl_page_get_mark(upl_page_info_t *upl, int index) { return upl[index].mark; } @@ -9620,102 +9848,134 @@ vm_countdirtypages(void) int precpages; - dpages=0; - pgopages=0; - precpages=0; + dpages = 0; + pgopages = 0; + precpages = 0; vm_page_lock_queues(); m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); do { - if (m ==(vm_page_t )0) break; + if (m == (vm_page_t)0) { + break; + } - if(m->vmp_dirty) dpages++; - if(m->vmp_free_when_done) pgopages++; - if(m->vmp_precious) precpages++; + if (m->vmp_dirty) { + dpages++; + } + if (m->vmp_free_when_done) { + pgopages++; + } + if (m->vmp_precious) { + precpages++; + } assert(VM_PAGE_OBJECT(m) != kernel_object); m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); - if (m ==(vm_page_t )0) break; - + if (m == (vm_page_t)0) { + break; + } } while (!vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t) m)); vm_page_unlock_queues(); vm_page_lock_queues(); m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled); do { - if (m ==(vm_page_t )0) break; + if (m == (vm_page_t)0) { + break; + } dpages++; assert(m->vmp_dirty); assert(!m->vmp_free_when_done); assert(VM_PAGE_OBJECT(m) != kernel_object); m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); - if (m ==(vm_page_t )0) break; - + if (m == (vm_page_t)0) { + break; + } } while (!vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t) m)); vm_page_unlock_queues(); vm_page_lock_queues(); m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); do { - if (m ==(vm_page_t )0) break; + if (m == (vm_page_t)0) { + break; + } - if(m->vmp_dirty) dpages++; - if(m->vmp_free_when_done) pgopages++; - if(m->vmp_precious) precpages++; + if (m->vmp_dirty) { + dpages++; + } + if (m->vmp_free_when_done) { + pgopages++; + } + if (m->vmp_precious) { + precpages++; + } assert(VM_PAGE_OBJECT(m) != kernel_object); m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); - if (m ==(vm_page_t )0) break; - + if (m == (vm_page_t)0) { + break; + } } while (!vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t) m)); vm_page_unlock_queues(); printf("IN Q: %d : %d : %d\n", dpages, pgopages, precpages); - dpages=0; - pgopages=0; - precpages=0; + dpages = 0; + pgopages = 0; + precpages = 0; vm_page_lock_queues(); m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); do { - if(m == (vm_page_t )0) break; - if(m->vmp_dirty) dpages++; - if(m->vmp_free_when_done) pgopages++; - if(m->vmp_precious) precpages++; + if (m == (vm_page_t)0) { + break; + } + if (m->vmp_dirty) { + dpages++; + } + if (m->vmp_free_when_done) { + pgopages++; + } + if (m->vmp_precious) { + precpages++; + } assert(VM_PAGE_OBJECT(m) != kernel_object); m = (vm_page_t) vm_page_queue_next(&m->vmp_pageq); - if(m == (vm_page_t )0) break; - + if (m == (vm_page_t)0) { + break; + } } while (!vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t) m)); vm_page_unlock_queues(); printf("AC Q: %d : %d : %d\n", dpages, pgopages, precpages); - } #endif /* MACH_BSD */ #if CONFIG_IOSCHED -int upl_get_cached_tier(upl_t upl) +int +upl_get_cached_tier(upl_t upl) { - assert(upl); - if (upl->flags & UPL_TRACKED_BY_OBJECT) - return (upl->upl_priority); - return (-1); + assert(upl); + if (upl->flags & UPL_TRACKED_BY_OBJECT) { + return upl->upl_priority; + } + return -1; } #endif /* CONFIG_IOSCHED */ -void upl_callout_iodone(upl_t upl) +void +upl_callout_iodone(upl_t upl) { - struct upl_io_completion *upl_ctx = upl->upl_iodone; + struct upl_io_completion *upl_ctx = upl->upl_iodone; if (upl_ctx) { - void (*iodone_func)(void *, int) = upl_ctx->io_done; + void (*iodone_func)(void *, int) = upl_ctx->io_done; assert(upl_ctx->io_done); @@ -9723,63 +9983,76 @@ void upl_callout_iodone(upl_t upl) } } -void upl_set_iodone(upl_t upl, void *upl_iodone) +void +upl_set_iodone(upl_t upl, void *upl_iodone) { - upl->upl_iodone = (struct upl_io_completion *)upl_iodone; + upl->upl_iodone = (struct upl_io_completion *)upl_iodone; } -void upl_set_iodone_error(upl_t upl, int error) +void +upl_set_iodone_error(upl_t upl, int error) { - struct upl_io_completion *upl_ctx = upl->upl_iodone; + struct upl_io_completion *upl_ctx = upl->upl_iodone; - if (upl_ctx) - upl_ctx->io_error = error; + if (upl_ctx) { + upl_ctx->io_error = error; + } } -ppnum_t upl_get_highest_page( - upl_t upl) +ppnum_t +upl_get_highest_page( + upl_t upl) { - return upl->highest_page; + return upl->highest_page; } -upl_size_t upl_get_size( - upl_t upl) +upl_size_t +upl_get_size( + upl_t upl) { - return upl->size; + return upl->size; } -upl_t upl_associated_upl(upl_t upl) +upl_t +upl_associated_upl(upl_t upl) { return upl->associated_upl; } -void upl_set_associated_upl(upl_t upl, upl_t associated_upl) +void +upl_set_associated_upl(upl_t upl, upl_t associated_upl) { upl->associated_upl = associated_upl; } -struct vnode * upl_lookup_vnode(upl_t upl) +struct vnode * +upl_lookup_vnode(upl_t upl) { - if (!upl->map_object->internal) + if (!upl->map_object->internal) { return vnode_pager_lookup_vnode(upl->map_object->pager); - else + } else { return NULL; + } } #if UPL_DEBUG -kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2) +kern_return_t +upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2) { upl->ubc_alias1 = alias1; upl->ubc_alias2 = alias2; return KERN_SUCCESS; } -int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2) +int +upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2) { - if(al) + if (al) { *al = upl->ubc_alias1; - if(al2) + } + if (al2) { *al2 = upl->ubc_alias2; + } return KERN_SUCCESS; } #endif /* UPL_DEBUG */ @@ -9791,10 +10064,9 @@ int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2) extern boolean_t vm_compressor_low_on_space(void); boolean_t -VM_PRESSURE_NORMAL_TO_WARNING(void) { - - if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) { - +VM_PRESSURE_NORMAL_TO_WARNING(void) +{ + if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { /* Available pages below our threshold */ if (memorystatus_available_pages < memorystatus_available_pages_pressure) { /* No frozen processes to kill */ @@ -9806,24 +10078,22 @@ VM_PRESSURE_NORMAL_TO_WARNING(void) { } } return FALSE; - } else { - return ((AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0); + return (AVAILABLE_NON_COMPRESSED_MEMORY < VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) ? 1 : 0; } } boolean_t -VM_PRESSURE_WARNING_TO_CRITICAL(void) { - - if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) { - +VM_PRESSURE_WARNING_TO_CRITICAL(void) +{ + if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { /* Available pages below our threshold */ if (memorystatus_available_pages < memorystatus_available_pages_critical) { return TRUE; } return FALSE; } else { - return (vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0); + return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY < ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0; } } @@ -9831,10 +10101,9 @@ VM_PRESSURE_WARNING_TO_CRITICAL(void) { * Downward trajectory. */ boolean_t -VM_PRESSURE_WARNING_TO_NORMAL(void) { - - if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) { - +VM_PRESSURE_WARNING_TO_NORMAL(void) +{ + if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { /* Available pages above our threshold */ unsigned int target_threshold = (unsigned int) (memorystatus_available_pages_pressure + ((15 * memorystatus_available_pages_pressure) / 100)); if (memorystatus_available_pages > target_threshold) { @@ -9842,15 +10111,14 @@ VM_PRESSURE_WARNING_TO_NORMAL(void) { } return FALSE; } else { - return ((AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0); + return (AVAILABLE_NON_COMPRESSED_MEMORY > ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD) / 10)) ? 1 : 0; } } boolean_t -VM_PRESSURE_CRITICAL_TO_WARNING(void) { - - if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) { - +VM_PRESSURE_CRITICAL_TO_WARNING(void) +{ + if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { /* Available pages above our threshold */ unsigned int target_threshold = (unsigned int)(memorystatus_available_pages_critical + ((15 * memorystatus_available_pages_critical) / 100)); if (memorystatus_available_pages > target_threshold) { @@ -9858,20 +10126,20 @@ VM_PRESSURE_CRITICAL_TO_WARNING(void) { } return FALSE; } else { - return ((AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0); + return (AVAILABLE_NON_COMPRESSED_MEMORY > ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD) / 10)) ? 1 : 0; } } #endif /* VM_PRESSURE_EVENTS */ -#define VM_TEST_COLLAPSE_COMPRESSOR 0 -#define VM_TEST_WIRE_AND_EXTRACT 0 -#define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0 +#define VM_TEST_COLLAPSE_COMPRESSOR 0 +#define VM_TEST_WIRE_AND_EXTRACT 0 +#define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0 #if __arm64__ -#define VM_TEST_KERNEL_OBJECT_FAULT 0 +#define VM_TEST_KERNEL_OBJECT_FAULT 0 #endif /* __arm64__ */ -#define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG) +#define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG) #if VM_TEST_COLLAPSE_COMPRESSOR extern boolean_t vm_object_collapse_compressor_allowed; @@ -9879,11 +10147,11 @@ extern boolean_t vm_object_collapse_compressor_allowed; static void vm_test_collapse_compressor(void) { - vm_object_size_t backing_size, top_size; - vm_object_t backing_object, top_object; - vm_map_offset_t backing_offset, top_offset; - unsigned char *backing_address, *top_address; - kern_return_t kr; + vm_object_size_t backing_size, top_size; + vm_object_t backing_object, top_object; + vm_map_offset_t backing_offset, top_offset; + unsigned char *backing_address, *top_address; + kern_return_t kr; printf("VM_TEST_COLLAPSE_COMPRESSOR:\n"); @@ -9892,45 +10160,46 @@ vm_test_collapse_compressor(void) backing_object = vm_object_allocate(backing_size); assert(backing_object != VM_OBJECT_NULL); printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n", - backing_object); + backing_object); /* map backing object */ backing_offset = 0; kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, - backing_object, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, + backing_object, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); assert(kr == KERN_SUCCESS); backing_address = (unsigned char *) backing_offset; printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "mapped backing object %p at 0x%llx\n", - backing_object, (uint64_t) backing_offset); + "mapped backing object %p at 0x%llx\n", + backing_object, (uint64_t) backing_offset); /* populate with pages to be compressed in backing object */ - backing_address[0x1*PAGE_SIZE] = 0xB1; - backing_address[0x4*PAGE_SIZE] = 0xB4; - backing_address[0x7*PAGE_SIZE] = 0xB7; - backing_address[0xa*PAGE_SIZE] = 0xBA; - backing_address[0xd*PAGE_SIZE] = 0xBD; + backing_address[0x1 * PAGE_SIZE] = 0xB1; + backing_address[0x4 * PAGE_SIZE] = 0xB4; + backing_address[0x7 * PAGE_SIZE] = 0xB7; + backing_address[0xa * PAGE_SIZE] = 0xBA; + backing_address[0xd * PAGE_SIZE] = 0xBD; printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be compressed in " - "backing_object %p\n", backing_object); + "populated pages to be compressed in " + "backing_object %p\n", backing_object); /* compress backing object */ vm_object_pageout(backing_object); printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n", - backing_object); + backing_object); /* wait for all the pages to be gone */ - while (*(volatile int *)&backing_object->resident_page_count != 0) + while (*(volatile int *)&backing_object->resident_page_count != 0) { IODelay(10); + } printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n", - backing_object); + backing_object); /* populate with pages to be resident in backing object */ - backing_address[0x0*PAGE_SIZE] = 0xB0; - backing_address[0x3*PAGE_SIZE] = 0xB3; - backing_address[0x6*PAGE_SIZE] = 0xB6; - backing_address[0x9*PAGE_SIZE] = 0xB9; - backing_address[0xc*PAGE_SIZE] = 0xBC; + backing_address[0x0 * PAGE_SIZE] = 0xB0; + backing_address[0x3 * PAGE_SIZE] = 0xB3; + backing_address[0x6 * PAGE_SIZE] = 0xB6; + backing_address[0x9 * PAGE_SIZE] = 0xB9; + backing_address[0xc * PAGE_SIZE] = 0xBC; printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be resident in " - "backing_object %p\n", backing_object); + "populated pages to be resident in " + "backing_object %p\n", backing_object); /* leave the other pages absent */ /* mess with the paging_offset of the backing_object */ assert(backing_object->paging_offset == 0); @@ -9941,41 +10210,42 @@ vm_test_collapse_compressor(void) top_object = vm_object_allocate(top_size); assert(top_object != VM_OBJECT_NULL); printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n", - top_object); + top_object); /* map top object */ top_offset = 0; kr = vm_map_enter(kernel_map, &top_offset, top_size, 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, - top_object, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, + top_object, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); assert(kr == KERN_SUCCESS); top_address = (unsigned char *) top_offset; printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "mapped top object %p at 0x%llx\n", - top_object, (uint64_t) top_offset); + "mapped top object %p at 0x%llx\n", + top_object, (uint64_t) top_offset); /* populate with pages to be compressed in top object */ - top_address[0x3*PAGE_SIZE] = 0xA3; - top_address[0x4*PAGE_SIZE] = 0xA4; - top_address[0x5*PAGE_SIZE] = 0xA5; + top_address[0x3 * PAGE_SIZE] = 0xA3; + top_address[0x4 * PAGE_SIZE] = 0xA4; + top_address[0x5 * PAGE_SIZE] = 0xA5; printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be compressed in " - "top_object %p\n", top_object); + "populated pages to be compressed in " + "top_object %p\n", top_object); /* compress top object */ vm_object_pageout(top_object); printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n", - top_object); + top_object); /* wait for all the pages to be gone */ - while (top_object->resident_page_count != 0) + while (top_object->resident_page_count != 0) { IODelay(10); + } printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n", - top_object); + top_object); /* populate with pages to be resident in top object */ - top_address[0x0*PAGE_SIZE] = 0xA0; - top_address[0x1*PAGE_SIZE] = 0xA1; - top_address[0x2*PAGE_SIZE] = 0xA2; + top_address[0x0 * PAGE_SIZE] = 0xA0; + top_address[0x1 * PAGE_SIZE] = 0xA1; + top_address[0x2 * PAGE_SIZE] = 0xA2; printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "populated pages to be resident in " - "top_object %p\n", top_object); + "populated pages to be resident in " + "top_object %p\n", top_object); /* leave the other pages absent */ /* link the 2 objects */ @@ -9983,18 +10253,18 @@ vm_test_collapse_compressor(void) top_object->shadow = backing_object; top_object->vo_shadow_offset = 0x3000; printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n", - top_object, backing_object); + top_object, backing_object); /* unmap backing object */ vm_map_remove(kernel_map, - backing_offset, - backing_offset + backing_size, - VM_MAP_REMOVE_NO_FLAGS); + backing_offset, + backing_offset + backing_size, + VM_MAP_REMOVE_NO_FLAGS); printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "unmapped backing_object %p [0x%llx:0x%llx]\n", - backing_object, - (uint64_t) backing_offset, - (uint64_t) (backing_offset + backing_size)); + "unmapped backing_object %p [0x%llx:0x%llx]\n", + backing_object, + (uint64_t) backing_offset, + (uint64_t) (backing_offset + backing_size)); /* collapse */ printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object); @@ -10013,32 +10283,32 @@ vm_test_collapse_compressor(void) } else { /* check the contents of the mapping */ unsigned char expect[9] = - { 0xA0, 0xA1, 0xA2, /* resident in top */ - 0xA3, 0xA4, 0xA5, /* compressed in top */ - 0xB9, /* resident in backing + shadow_offset */ - 0xBD, /* compressed in backing + shadow_offset + paging_offset */ - 0x00 }; /* absent in both */ + { 0xA0, 0xA1, 0xA2, /* resident in top */ + 0xA3, 0xA4, 0xA5, /* compressed in top */ + 0xB9, /* resident in backing + shadow_offset */ + 0xBD, /* compressed in backing + shadow_offset + paging_offset */ + 0x00 }; /* absent in both */ unsigned char actual[9]; unsigned int i, errors; errors = 0; - for (i = 0; i < sizeof (actual); i++) { - actual[i] = (unsigned char) top_address[i*PAGE_SIZE]; + for (i = 0; i < sizeof(actual); i++) { + actual[i] = (unsigned char) top_address[i * PAGE_SIZE]; if (actual[i] != expect[i]) { errors++; } } printf("VM_TEST_COLLAPSE_COMPRESSOR: " - "actual [%x %x %x %x %x %x %x %x %x] " - "expect [%x %x %x %x %x %x %x %x %x] " - "%d errors\n", - actual[0], actual[1], actual[2], actual[3], - actual[4], actual[5], actual[6], actual[7], - actual[8], - expect[0], expect[1], expect[2], expect[3], - expect[4], expect[5], expect[6], expect[7], - expect[8], - errors); + "actual [%x %x %x %x %x %x %x %x %x] " + "expect [%x %x %x %x %x %x %x %x %x] " + "%d errors\n", + actual[0], actual[1], actual[2], actual[3], + actual[4], actual[5], actual[6], actual[7], + actual[8], + expect[0], expect[1], expect[2], expect[3], + expect[4], expect[5], expect[6], expect[7], + expect[8], + errors); if (errors) { panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); } else { @@ -10051,69 +10321,69 @@ vm_test_collapse_compressor(void) #endif /* VM_TEST_COLLAPSE_COMPRESSOR */ #if VM_TEST_WIRE_AND_EXTRACT -extern ledger_template_t task_ledger_template; +extern ledger_template_t task_ledger_template; #include extern ppnum_t vm_map_get_phys_page(vm_map_t map, - vm_offset_t offset); + vm_offset_t offset); static void vm_test_wire_and_extract(void) { - ledger_t ledger; - vm_map_t user_map, wire_map; - mach_vm_address_t user_addr, wire_addr; - mach_vm_size_t user_size, wire_size; - mach_vm_offset_t cur_offset; - vm_prot_t cur_prot, max_prot; - ppnum_t user_ppnum, wire_ppnum; - kern_return_t kr; + ledger_t ledger; + vm_map_t user_map, wire_map; + mach_vm_address_t user_addr, wire_addr; + mach_vm_size_t user_size, wire_size; + mach_vm_offset_t cur_offset; + vm_prot_t cur_prot, max_prot; + ppnum_t user_ppnum, wire_ppnum; + kern_return_t kr; ledger = ledger_instantiate(task_ledger_template, - LEDGER_CREATE_ACTIVE_ENTRIES); + LEDGER_CREATE_ACTIVE_ENTRIES); user_map = vm_map_create(pmap_create(ledger, 0, PMAP_CREATE_64BIT), - 0x100000000ULL, - 0x200000000ULL, - TRUE); + 0x100000000ULL, + 0x200000000ULL, + TRUE); wire_map = vm_map_create(NULL, - 0x100000000ULL, - 0x200000000ULL, - TRUE); + 0x100000000ULL, + 0x200000000ULL, + TRUE); user_addr = 0; user_size = 0x10000; kr = mach_vm_allocate(user_map, - &user_addr, - user_size, - VM_FLAGS_ANYWHERE); + &user_addr, + user_size, + VM_FLAGS_ANYWHERE); assert(kr == KERN_SUCCESS); wire_addr = 0; wire_size = user_size; kr = mach_vm_remap(wire_map, - &wire_addr, - wire_size, - 0, - VM_FLAGS_ANYWHERE, - user_map, - user_addr, - FALSE, - &cur_prot, - &max_prot, - VM_INHERIT_NONE); + &wire_addr, + wire_size, + 0, + VM_FLAGS_ANYWHERE, + user_map, + user_addr, + FALSE, + &cur_prot, + &max_prot, + VM_INHERIT_NONE); assert(kr == KERN_SUCCESS); for (cur_offset = 0; - cur_offset < wire_size; - cur_offset += PAGE_SIZE) { + cur_offset < wire_size; + cur_offset += PAGE_SIZE) { kr = vm_map_wire_and_extract(wire_map, - wire_addr + cur_offset, - VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), - TRUE, - &wire_ppnum); + wire_addr + cur_offset, + VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), + TRUE, + &wire_ppnum); assert(kr == KERN_SUCCESS); user_ppnum = vm_map_get_phys_page(user_map, - user_addr + cur_offset); + user_addr + cur_offset); printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x " - "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", - kr, - user_map, user_addr + cur_offset, user_ppnum, - wire_map, wire_addr + cur_offset, wire_ppnum); + "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", + kr, + user_map, user_addr + cur_offset, user_ppnum, + wire_map, wire_addr + cur_offset, wire_ppnum); if (kr != KERN_SUCCESS || wire_ppnum == 0 || wire_ppnum != user_ppnum) { @@ -10122,16 +10392,16 @@ vm_test_wire_and_extract(void) } cur_offset -= PAGE_SIZE; kr = vm_map_wire_and_extract(wire_map, - wire_addr + cur_offset, - VM_PROT_DEFAULT, - TRUE, - &wire_ppnum); + wire_addr + cur_offset, + VM_PROT_DEFAULT, + TRUE, + &wire_ppnum); assert(kr == KERN_SUCCESS); printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x " - "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", - kr, - user_map, user_addr + cur_offset, user_ppnum, - wire_map, wire_addr + cur_offset, wire_ppnum); + "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", + kr, + user_map, user_addr + cur_offset, user_ppnum, + wire_map, wire_addr + cur_offset, wire_ppnum); if (kr != KERN_SUCCESS || wire_ppnum == 0 || wire_ppnum != user_ppnum) { @@ -10163,7 +10433,7 @@ vm_test_page_wire_overflow_panic(void) vm_page_unlock_queues(); vm_object_unlock(object); panic("FBDP(%p,%p): wire_count overflow not detected\n", - object, page); + object, page); } #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ #define vm_test_page_wire_overflow_panic() @@ -10180,11 +10450,11 @@ vm_test_kernel_object_fault(void) int ret; kr = kernel_memory_allocate(kernel_map, &stack, - kernel_stack_size + (2*PAGE_SIZE), - 0, - (KMA_KSTACK | KMA_KOBJECT | - KMA_GUARD_FIRST | KMA_GUARD_LAST), - VM_KERN_MEMORY_STACK); + kernel_stack_size + (2 * PAGE_SIZE), + 0, + (KMA_KSTACK | KMA_KOBJECT | + KMA_GUARD_FIRST | KMA_GUARD_LAST), + VM_KERN_MEMORY_STACK); if (kr != KERN_SUCCESS) { panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr); } @@ -10195,9 +10465,9 @@ vm_test_kernel_object_fault(void) printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n"); } vm_map_remove(kernel_map, - stack, - stack + kernel_stack_size + (2*PAGE_SIZE), - VM_MAP_REMOVE_KUNWIRE); + stack, + stack + kernel_stack_size + (2 * PAGE_SIZE), + VM_MAP_REMOVE_KUNWIRE); stack = 0; } #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ @@ -10208,11 +10478,11 @@ vm_test_kernel_object_fault(void) static void vm_test_device_pager_transpose(void) { - memory_object_t device_pager; - vm_object_t anon_object, device_object; - vm_size_t size; - vm_map_offset_t anon_mapping, device_mapping; - kern_return_t kr; + memory_object_t device_pager; + vm_object_t anon_object, device_object; + vm_size_t size; + vm_map_offset_t device_mapping; + kern_return_t kr; size = 3 * PAGE_SIZE; anon_object = vm_object_allocate(size); @@ -10221,20 +10491,27 @@ vm_test_device_pager_transpose(void) assert(device_pager != NULL); device_object = memory_object_to_vm_object(device_pager); assert(device_object != VM_OBJECT_NULL); - anon_mapping = 0; +#if 0 + /* + * Can't actually map this, since another thread might do a + * vm_map_enter() that gets coalesced into this object, which + * would cause the test to fail. + */ + vm_map_offset_t anon_mapping = 0; kr = vm_map_enter(kernel_map, &anon_mapping, size, 0, - VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, - anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_DEFAULT); + VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, + anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); assert(kr == KERN_SUCCESS); +#endif device_mapping = 0; kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0, - VM_FLAGS_ANYWHERE, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - (void *)device_pager, 0, FALSE, - VM_PROT_DEFAULT, VM_PROT_ALL, - VM_INHERIT_DEFAULT); + VM_FLAGS_ANYWHERE, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + (void *)device_pager, 0, FALSE, + VM_PROT_DEFAULT, VM_PROT_ALL, + VM_INHERIT_DEFAULT); assert(kr == KERN_SUCCESS); memory_object_deallocate(device_pager); @@ -10266,8 +10543,10 @@ vm_test_device_pager_transpose(void) assert(anon_object->ref_count == 2); assert(anon_object->named); +#if 0 kr = vm_deallocate(kernel_map, anon_mapping, size); assert(kr == KERN_SUCCESS); +#endif assert(device_object->ref_count == 1); assert(!device_object->named); kr = vm_deallocate(kernel_map, device_mapping, size); diff --git a/osfmk/vm/vm_pageout.h b/osfmk/vm/vm_pageout.h index 237205926..b0608aef5 100644 --- a/osfmk/vm/vm_pageout.h +++ b/osfmk/vm/vm_pageout.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -63,10 +63,10 @@ * Declarations for the pageout daemon interface. */ -#ifndef _VM_VM_PAGEOUT_H_ +#ifndef _VM_VM_PAGEOUT_H_ #define _VM_VM_PAGEOUT_H_ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include #include @@ -81,13 +81,13 @@ #include -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include #endif #include -#define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) +#define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) /* externally manipulated counters */ extern unsigned int vm_pageout_cleaned_fault_reactivated; @@ -107,29 +107,29 @@ extern boolean_t vm_pressure_events_enabled; * the following codes are used in the DBG_MACH_WORKINGSET subclass * of the DBG_MACH class */ -#define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 -#define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 -#define VM_REAL_FAULT_ADDR_INTERNAL 0x02 -#define VM_REAL_FAULT_ADDR_PURGABLE 0x03 -#define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 -#define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 +#define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 +#define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 +#define VM_REAL_FAULT_ADDR_INTERNAL 0x02 +#define VM_REAL_FAULT_ADDR_PURGABLE 0x03 +#define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 +#define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 -extern int vm_debug_events; +extern int vm_debug_events; -#define VMF_CHECK_ZFDELAY 0x100 -#define VMF_COWDELAY 0x101 -#define VMF_ZFDELAY 0x102 -#define VMF_COMPRESSORDELAY 0x103 +#define VMF_CHECK_ZFDELAY 0x100 +#define VMF_COWDELAY 0x101 +#define VMF_ZFDELAY 0x102 +#define VMF_COMPRESSORDELAY 0x103 -#define VM_PAGEOUT_SCAN 0x104 -#define VM_PAGEOUT_BALANCE 0x105 -#define VM_PAGEOUT_FREELIST 0x106 -#define VM_PAGEOUT_PURGEONE 0x107 -#define VM_PAGEOUT_CACHE_EVICT 0x108 -#define VM_PAGEOUT_THREAD_BLOCK 0x109 -#define VM_PAGEOUT_JETSAM 0x10A +#define VM_PAGEOUT_SCAN 0x104 +#define VM_PAGEOUT_BALANCE 0x105 +#define VM_PAGEOUT_FREELIST 0x106 +#define VM_PAGEOUT_PURGEONE 0x107 +#define VM_PAGEOUT_CACHE_EVICT 0x108 +#define VM_PAGEOUT_THREAD_BLOCK 0x109 +#define VM_PAGEOUT_JETSAM 0x10A #define VM_INFO1 0x10B #define VM_INFO2 0x10C #define VM_INFO3 0x10D @@ -140,40 +140,40 @@ extern int vm_debug_events; #define VM_INFO8 0x112 #define VM_INFO9 0x113 -#define VM_UPL_PAGE_WAIT 0x120 -#define VM_IOPL_PAGE_WAIT 0x121 -#define VM_PAGE_WAIT_BLOCK 0x122 +#define VM_UPL_PAGE_WAIT 0x120 +#define VM_IOPL_PAGE_WAIT 0x121 +#define VM_PAGE_WAIT_BLOCK 0x122 #if CONFIG_IOSCHED -#define VM_PAGE_SLEEP 0x123 -#define VM_PAGE_EXPEDITE 0x124 +#define VM_PAGE_SLEEP 0x123 +#define VM_PAGE_EXPEDITE 0x124 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125 #endif -#define VM_PAGE_GRAB 0x126 -#define VM_PAGE_RELEASE 0x127 +#define VM_PAGE_GRAB 0x126 +#define VM_PAGE_RELEASE 0x127 -#define VM_PRESSURE_EVENT 0x130 -#define VM_EXECVE 0x131 -#define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 -#define VM_UPL_REQUEST 0x133 -#define VM_IOPL_REQUEST 0x134 -#define VM_KERN_REQUEST 0x135 +#define VM_PRESSURE_EVENT 0x130 +#define VM_EXECVE 0x131 +#define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 +#define VM_UPL_REQUEST 0x133 +#define VM_IOPL_REQUEST 0x134 +#define VM_KERN_REQUEST 0x135 -#define VM_DATA_WRITE 0x140 +#define VM_DATA_WRITE 0x140 -#define VM_PRESSURE_LEVEL_CHANGE 0x141 +#define VM_PRESSURE_LEVEL_CHANGE 0x141 -#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ - MACRO_BEGIN \ - if (__improbable(vm_debug_events)) { \ - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ - } \ +#define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ + MACRO_BEGIN \ + if (__improbable(vm_debug_events)) { \ + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ + } \ MACRO_END -#define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ - MACRO_BEGIN \ - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ +#define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ + MACRO_BEGIN \ + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ MACRO_END extern void memoryshot(unsigned int event, unsigned int control); @@ -182,7 +182,7 @@ extern void update_vm_info(void); #if CONFIG_IOSCHED extern int upl_get_cached_tier( - upl_t upl); + upl_t upl); #endif extern void upl_set_iodone(upl_t, void *); @@ -190,64 +190,64 @@ extern void upl_set_iodone_error(upl_t, int); extern void upl_callout_iodone(upl_t); extern ppnum_t upl_get_highest_page( - upl_t upl); + upl_t upl); extern upl_size_t upl_get_size( - upl_t upl); + upl_t upl); extern upl_t upl_associated_upl(upl_t upl); extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE extern kern_return_t vm_map_create_upl( - vm_map_t map, - vm_map_address_t offset, - upl_size_t *upl_size, - upl_t *upl, - upl_page_info_array_t page_list, - unsigned int *count, - upl_control_flags_t *flags, + vm_map_t map, + vm_map_address_t offset, + upl_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + upl_control_flags_t *flags, vm_tag_t tag); extern void iopl_valid_data( - upl_t upl_ptr, + upl_t upl_ptr, vm_tag_t tag); -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ extern struct vnode * upl_lookup_vnode(upl_t upl); -#ifndef MACH_KERNEL_PRIVATE -typedef struct vm_page *vm_page_t; +#ifndef MACH_KERNEL_PRIVATE +typedef struct vm_page *vm_page_t; #endif extern void vm_page_free_list( - vm_page_t mem, - boolean_t prepare_object); + vm_page_t mem, + boolean_t prepare_object); extern kern_return_t vm_page_alloc_list( - int page_count, - int flags, - vm_page_t * list); + int page_count, + int flags, + vm_page_t * list); extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); extern vm_object_offset_t vm_page_get_offset(vm_page_t page); extern ppnum_t vm_page_get_phys_page(vm_page_t page); extern vm_page_t vm_page_get_next(vm_page_t page); -extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); +extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); #if !CONFIG_EMBEDDED -extern kern_return_t vm_pageout_wait(uint64_t deadline); +extern kern_return_t vm_pageout_wait(uint64_t deadline); #endif -#ifdef MACH_KERNEL_PRIVATE +#ifdef MACH_KERNEL_PRIVATE #include -extern unsigned int vm_pageout_scan_event_counter; -extern unsigned int vm_page_anonymous_count; +extern unsigned int vm_pageout_scan_event_counter; +extern unsigned int vm_page_anonymous_count; /* @@ -255,54 +255,54 @@ extern unsigned int vm_page_anonymous_count; * manipulate this structure */ struct vm_pageout_queue { - vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ - unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ - unsigned int pgo_maxlaundry; - uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ - uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ - - unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ - pgo_busy:1, /* iothread is currently processing request from pgo_pending */ - pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ - pgo_draining:1, - pgo_inited:1, - :0; + vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ + unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ + unsigned int pgo_maxlaundry; + uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ + uint8_t pgo_lowpriority; /* iothread is set to use low priority I/O */ + + unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */ + pgo_busy:1, /* iothread is currently processing request from pgo_pending */ + pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ + pgo_draining:1, + pgo_inited:1, + :0; }; -#define VM_PAGE_Q_THROTTLED(q) \ - ((q)->pgo_laundry >= (q)->pgo_maxlaundry) +#define VM_PAGE_Q_THROTTLED(q) \ + ((q)->pgo_laundry >= (q)->pgo_maxlaundry) -extern struct vm_pageout_queue vm_pageout_queue_internal; -extern struct vm_pageout_queue vm_pageout_queue_external; +extern struct vm_pageout_queue vm_pageout_queue_internal; +extern struct vm_pageout_queue vm_pageout_queue_external; /* * Routines exported to Mach. */ -extern void vm_pageout(void); +extern void vm_pageout(void); -extern kern_return_t vm_pageout_internal_start(void); +extern kern_return_t vm_pageout_internal_start(void); -extern void vm_pageout_object_terminate( - vm_object_t object); +extern void vm_pageout_object_terminate( + vm_object_t object); -extern void vm_pageout_cluster( - vm_page_t m); +extern void vm_pageout_cluster( + vm_page_t m); -extern void vm_pageout_initialize_page( - vm_page_t m); +extern void vm_pageout_initialize_page( + vm_page_t m); /* UPL exported routines and structures */ -#define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) -#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) -#define upl_lock(object) lck_mtx_lock(&(object)->Lock) -#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) -#define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) +#define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) +#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) +#define upl_lock(object) lck_mtx_lock(&(object)->Lock) +#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) +#define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) -#define MAX_VECTOR_UPL_ELEMENTS 8 +#define MAX_VECTOR_UPL_ELEMENTS 8 -struct _vector_upl_iostates{ +struct _vector_upl_iostates { upl_offset_t offset; upl_size_t size; }; @@ -310,16 +310,16 @@ struct _vector_upl_iostates{ typedef struct _vector_upl_iostates vector_upl_iostates_t; struct _vector_upl { - upl_size_t size; - uint32_t num_upls; - uint32_t invalid_upls; - uint32_t _reserved; - vm_map_t submap; - vm_offset_t submap_dst_addr; - vm_object_offset_t offset; - upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; - upl_page_info_array_t pagelist; - vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; + upl_size_t size; + uint32_t num_upls; + uint32_t invalid_upls; + uint32_t _reserved; + vm_map_t submap; + vm_offset_t submap_dst_addr; + vm_object_offset_t offset; + upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; + upl_page_info_array_t pagelist; + vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; }; typedef struct _vector_upl* vector_upl_t; @@ -327,87 +327,86 @@ typedef struct _vector_upl* vector_upl_t; /* universal page list structure */ #if UPL_DEBUG -#define UPL_DEBUG_STACK_FRAMES 16 +#define UPL_DEBUG_STACK_FRAMES 16 #define UPL_DEBUG_COMMIT_RECORDS 4 struct ucd { - upl_offset_t c_beg; - upl_offset_t c_end; - int c_aborted; - void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; + upl_offset_t c_beg; + upl_offset_t c_end; + int c_aborted; + void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; }; #endif struct upl_io_completion { + void *io_context; + void (*io_done)(void *, int); - void *io_context; - void (*io_done)(void *, int); - - int io_error; + int io_error; }; struct upl { - decl_lck_mtx_data(, Lock) /* Synchronization */ - int ref_count; - int ext_ref_count; - int flags; + decl_lck_mtx_data(, Lock) /* Synchronization */ + int ref_count; + int ext_ref_count; + int flags; vm_object_offset_t offset; - upl_size_t size; /* size in bytes of the address space */ - vm_offset_t kaddr; /* secondary mapping in kernel */ - vm_object_t map_object; - ppnum_t highest_page; - void* vector_upl; - upl_t associated_upl; - struct upl_io_completion *upl_iodone; + upl_size_t size; /* size in bytes of the address space */ + vm_offset_t kaddr; /* secondary mapping in kernel */ + vm_object_t map_object; + ppnum_t highest_page; + void* vector_upl; + upl_t associated_upl; + struct upl_io_completion *upl_iodone; #if CONFIG_IOSCHED - int upl_priority; + int upl_priority; uint64_t *upl_reprio_info; - void *decmp_io_upl; + void *decmp_io_upl; #endif #if CONFIG_IOSCHED || UPL_DEBUG thread_t upl_creator; - queue_chain_t uplq; /* List of outstanding upls on an obj */ + queue_chain_t uplq; /* List of outstanding upls on an obj */ #endif -#if UPL_DEBUG - uintptr_t ubc_alias1; - uintptr_t ubc_alias2; - - uint32_t upl_state; - uint32_t upl_commit_index; - void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; - - struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; -#endif /* UPL_DEBUG */ +#if UPL_DEBUG + uintptr_t ubc_alias1; + uintptr_t ubc_alias2; + + uint32_t upl_state; + uint32_t upl_commit_index; + void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; + + struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; +#endif /* UPL_DEBUG */ }; /* upl struct flags */ -#define UPL_PAGE_LIST_MAPPED 0x1 -#define UPL_KERNEL_MAPPED 0x2 -#define UPL_CLEAR_DIRTY 0x4 -#define UPL_COMPOSITE_LIST 0x8 -#define UPL_INTERNAL 0x10 -#define UPL_PAGE_SYNC_DONE 0x20 -#define UPL_DEVICE_MEMORY 0x40 -#define UPL_PAGEOUT 0x80 -#define UPL_LITE 0x100 -#define UPL_IO_WIRE 0x200 -#define UPL_ACCESS_BLOCKED 0x400 -#define UPL_SHADOWED 0x1000 -#define UPL_KERNEL_OBJECT 0x2000 -#define UPL_VECTOR 0x4000 -#define UPL_SET_DIRTY 0x8000 -#define UPL_HAS_BUSY 0x10000 -#define UPL_TRACKED_BY_OBJECT 0x20000 -#define UPL_EXPEDITE_SUPPORTED 0x40000 -#define UPL_DECMP_REQ 0x80000 -#define UPL_DECMP_REAL_IO 0x100000 +#define UPL_PAGE_LIST_MAPPED 0x1 +#define UPL_KERNEL_MAPPED 0x2 +#define UPL_CLEAR_DIRTY 0x4 +#define UPL_COMPOSITE_LIST 0x8 +#define UPL_INTERNAL 0x10 +#define UPL_PAGE_SYNC_DONE 0x20 +#define UPL_DEVICE_MEMORY 0x40 +#define UPL_PAGEOUT 0x80 +#define UPL_LITE 0x100 +#define UPL_IO_WIRE 0x200 +#define UPL_ACCESS_BLOCKED 0x400 +#define UPL_SHADOWED 0x1000 +#define UPL_KERNEL_OBJECT 0x2000 +#define UPL_VECTOR 0x4000 +#define UPL_SET_DIRTY 0x8000 +#define UPL_HAS_BUSY 0x10000 +#define UPL_TRACKED_BY_OBJECT 0x20000 +#define UPL_EXPEDITE_SUPPORTED 0x40000 +#define UPL_DECMP_REQ 0x80000 +#define UPL_DECMP_REAL_IO 0x100000 /* flags for upl_create flags parameter */ -#define UPL_CREATE_EXTERNAL 0 -#define UPL_CREATE_INTERNAL 0x1 -#define UPL_CREATE_LITE 0x2 -#define UPL_CREATE_IO_TRACKING 0x4 +#define UPL_CREATE_EXTERNAL 0 +#define UPL_CREATE_INTERNAL 0x1 +#define UPL_CREATE_LITE 0x2 +#define UPL_CREATE_IO_TRACKING 0x4 #define UPL_CREATE_EXPEDITE_SUP 0x8 extern upl_t vector_upl_create(vm_offset_t); @@ -420,46 +419,46 @@ extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); -extern upl_t vector_upl_subupl_byindex(upl_t , uint32_t); -extern upl_t vector_upl_subupl_byoffset(upl_t , upl_offset_t*, upl_size_t*); +extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); +extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); extern void vm_object_set_pmap_cache_attr( - vm_object_t object, - upl_page_info_array_t user_page_list, - unsigned int num_pages, - boolean_t batch_pmap_op); + vm_object_t object, + upl_page_info_array_t user_page_list, + unsigned int num_pages, + boolean_t batch_pmap_op); extern kern_return_t vm_object_iopl_request( - vm_object_t object, - vm_object_offset_t offset, - upl_size_t size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - upl_control_flags_t cntrl_flags, + vm_object_t object, + vm_object_offset_t offset, + upl_size_t size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + upl_control_flags_t cntrl_flags, vm_tag_t tag); extern kern_return_t vm_object_super_upl_request( - vm_object_t object, - vm_object_offset_t offset, - upl_size_t size, - upl_size_t super_cluster, - upl_t *upl, - upl_page_info_t *user_page_list, - unsigned int *page_list_count, - upl_control_flags_t cntrl_flags, + vm_object_t object, + vm_object_offset_t offset, + upl_size_t size, + upl_size_t super_cluster, + upl_t *upl, + upl_page_info_t *user_page_list, + unsigned int *page_list_count, + upl_control_flags_t cntrl_flags, vm_tag_t tag); /* should be just a regular vm_map_enter() */ extern kern_return_t vm_map_enter_upl( - vm_map_t map, - upl_t upl, - vm_map_offset_t *dst_addr); + vm_map_t map, + upl_t upl, + vm_map_offset_t *dst_addr); /* should be just a regular vm_map_remove() */ extern kern_return_t vm_map_remove_upl( - vm_map_t map, - upl_t upl); + vm_map_t map, + upl_t upl); /* wired page list structure */ typedef uint32_t *wpl_array_t; @@ -470,18 +469,18 @@ extern void vm_pageout_throttle_down(vm_page_t page); extern void vm_pageout_throttle_up(vm_page_t page); extern kern_return_t vm_paging_map_object( - vm_page_t page, - vm_object_t object, - vm_object_offset_t offset, - vm_prot_t protection, - boolean_t can_unlock_object, - vm_map_size_t *size, /* IN/OUT */ - vm_map_offset_t *address, /* OUT */ - boolean_t *need_unmap); /* OUT */ + vm_page_t page, + vm_object_t object, + vm_object_offset_t offset, + vm_prot_t protection, + boolean_t can_unlock_object, + vm_map_size_t *size, /* IN/OUT */ + vm_map_offset_t *address, /* OUT */ + boolean_t *need_unmap); /* OUT */ extern void vm_paging_unmap_object( - vm_object_t object, - vm_map_offset_t start, - vm_map_offset_t end); + vm_object_t object, + vm_map_offset_t start, + vm_map_offset_t end); decl_simple_lock_data(extern, vm_paging_lock) /* @@ -490,9 +489,9 @@ decl_simple_lock_data(extern, vm_paging_lock) extern unsigned int vm_backing_store_low; extern void vm_pageout_steal_laundry( - vm_page_t page, + vm_page_t page, boolean_t queues_locked); - + #endif /* MACH_KERNEL_PRIVATE */ #if UPL_DEBUG @@ -509,46 +508,46 @@ extern int upl_ubc_alias_get( extern void vm_countdirtypages(void); extern void vm_backing_store_disable( - boolean_t suspend); + boolean_t suspend); extern kern_return_t upl_transpose( - upl_t upl1, - upl_t upl2); + upl_t upl1, + upl_t upl2); extern kern_return_t mach_vm_pressure_monitor( - boolean_t wait_for_pressure, - unsigned int nsecs_monitored, - unsigned int *pages_reclaimed_p, - unsigned int *pages_wanted_p); + boolean_t wait_for_pressure, + unsigned int nsecs_monitored, + unsigned int *pages_reclaimed_p, + unsigned int *pages_wanted_p); extern kern_return_t vm_set_buffer_cleanup_callout( - boolean_t (*func)(int)); + boolean_t (*func)(int)); struct vm_page_stats_reusable { - SInt32 reusable_count; - uint64_t reusable; - uint64_t reused; - uint64_t reused_wire; - uint64_t reused_remove; - uint64_t all_reusable_calls; - uint64_t partial_reusable_calls; - uint64_t all_reuse_calls; - uint64_t partial_reuse_calls; - uint64_t reusable_pages_success; - uint64_t reusable_pages_failure; - uint64_t reusable_pages_shared; - uint64_t reuse_pages_success; - uint64_t reuse_pages_failure; - uint64_t can_reuse_success; - uint64_t can_reuse_failure; - uint64_t reusable_reclaimed; - uint64_t reusable_nonwritable; - uint64_t reusable_shared; - uint64_t free_shared; + SInt32 reusable_count; + uint64_t reusable; + uint64_t reused; + uint64_t reused_wire; + uint64_t reused_remove; + uint64_t all_reusable_calls; + uint64_t partial_reusable_calls; + uint64_t all_reuse_calls; + uint64_t partial_reuse_calls; + uint64_t reusable_pages_success; + uint64_t reusable_pages_failure; + uint64_t reusable_pages_shared; + uint64_t reuse_pages_success; + uint64_t reuse_pages_failure; + uint64_t can_reuse_success; + uint64_t can_reuse_failure; + uint64_t reusable_reclaimed; + uint64_t reusable_nonwritable; + uint64_t reusable_shared; + uint64_t free_shared; }; extern struct vm_page_stats_reusable vm_page_stats_reusable; - + extern int hibernate_flush_memory(void); extern void hibernate_reset_stats(void); extern void hibernate_create_paddr_map(void); @@ -561,76 +560,76 @@ extern void vm_pageout_anonymous_pages(void); extern void vm_pageout_disconnect_all_pages(void); -struct vm_config { - boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ - boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ - boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ - boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ - boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ +struct vm_config { + boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ + boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ + boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ + boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ + boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ }; -extern struct vm_config vm_config; +extern struct vm_config vm_config; -#define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ -#define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ -#define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ -#define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ -#define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ -#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ -#define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ +#define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ +#define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ +#define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ +#define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ +#define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ +#define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ +#define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ -#define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ +#define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ -#define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) -#define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) -#define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) -#define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) -#define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) +#define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) +#define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) +#define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) +#define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) +#define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) -#endif /* KERNEL_PRIVATE */ +#endif /* KERNEL_PRIVATE */ #ifdef XNU_KERNEL_PRIVATE struct vm_pageout_state { - boolean_t vm_pressure_thread_running; - boolean_t vm_pressure_changed; - boolean_t vm_restricted_to_single_processor; - int vm_compressor_thread_count; - - unsigned int vm_page_speculative_q_age_ms; - unsigned int vm_page_speculative_percentage; - unsigned int vm_page_speculative_target; - - unsigned int vm_pageout_swap_wait; - unsigned int vm_pageout_idle_wait; /* milliseconds */ - unsigned int vm_pageout_empty_wait; /* milliseconds */ - unsigned int vm_pageout_burst_wait; /* milliseconds */ - unsigned int vm_pageout_deadlock_wait; /* milliseconds */ - unsigned int vm_pageout_deadlock_relief; - unsigned int vm_pageout_burst_inactive_throttle; - - unsigned int vm_pageout_inactive; - unsigned int vm_pageout_inactive_used; /* debugging */ - unsigned int vm_pageout_inactive_clean; /* debugging */ - - uint32_t vm_page_filecache_min; - uint32_t vm_page_filecache_min_divisor; - uint32_t vm_page_xpmapped_min; - uint32_t vm_page_xpmapped_min_divisor; - uint64_t vm_pageout_considered_page_last; - - int vm_page_free_count_init; - - unsigned int vm_memory_pressure; - - int memorystatus_purge_on_critical; - int memorystatus_purge_on_warning; - int memorystatus_purge_on_urgent; - - thread_t vm_pageout_external_iothread; - thread_t vm_pageout_internal_iothread; + boolean_t vm_pressure_thread_running; + boolean_t vm_pressure_changed; + boolean_t vm_restricted_to_single_processor; + int vm_compressor_thread_count; + + unsigned int vm_page_speculative_q_age_ms; + unsigned int vm_page_speculative_percentage; + unsigned int vm_page_speculative_target; + + unsigned int vm_pageout_swap_wait; + unsigned int vm_pageout_idle_wait; /* milliseconds */ + unsigned int vm_pageout_empty_wait; /* milliseconds */ + unsigned int vm_pageout_burst_wait; /* milliseconds */ + unsigned int vm_pageout_deadlock_wait; /* milliseconds */ + unsigned int vm_pageout_deadlock_relief; + unsigned int vm_pageout_burst_inactive_throttle; + + unsigned int vm_pageout_inactive; + unsigned int vm_pageout_inactive_used; /* debugging */ + unsigned int vm_pageout_inactive_clean; /* debugging */ + + uint32_t vm_page_filecache_min; + uint32_t vm_page_filecache_min_divisor; + uint32_t vm_page_xpmapped_min; + uint32_t vm_page_xpmapped_min_divisor; + uint64_t vm_pageout_considered_page_last; + + int vm_page_free_count_init; + + unsigned int vm_memory_pressure; + + int memorystatus_purge_on_critical; + int memorystatus_purge_on_warning; + int memorystatus_purge_on_urgent; + + thread_t vm_pageout_external_iothread; + thread_t vm_pageout_internal_iothread; }; extern struct vm_pageout_state vm_pageout_state; @@ -639,35 +638,35 @@ extern struct vm_pageout_state vm_pageout_state; * This structure is used to track the VM_INFO instrumentation */ struct vm_pageout_vminfo { - unsigned long vm_pageout_considered_page; - unsigned long vm_pageout_considered_bq_internal; - unsigned long vm_pageout_considered_bq_external; - unsigned long vm_pageout_skipped_external; - - unsigned long vm_pageout_pages_evicted; - unsigned long vm_pageout_pages_purged;; - unsigned long vm_pageout_freed_cleaned; - unsigned long vm_pageout_freed_speculative; - unsigned long vm_pageout_freed_external; - unsigned long vm_pageout_freed_internal; - unsigned long vm_pageout_inactive_dirty_internal; - unsigned long vm_pageout_inactive_dirty_external; - unsigned long vm_pageout_inactive_referenced; - unsigned long vm_pageout_reactivation_limit_exceeded; - unsigned long vm_pageout_inactive_force_reclaim; - unsigned long vm_pageout_inactive_nolock; - unsigned long vm_pageout_filecache_min_reactivated; - unsigned long vm_pageout_scan_inactive_throttled_internal; - unsigned long vm_pageout_scan_inactive_throttled_external; - - uint64_t vm_pageout_compressions; - uint64_t vm_compressor_pages_grabbed; - unsigned long vm_compressor_failed; - - unsigned long vm_page_pages_freed; - - unsigned long vm_phantom_cache_found_ghost; - unsigned long vm_phantom_cache_added_ghost; + unsigned long vm_pageout_considered_page; + unsigned long vm_pageout_considered_bq_internal; + unsigned long vm_pageout_considered_bq_external; + unsigned long vm_pageout_skipped_external; + + unsigned long vm_pageout_pages_evicted; + unsigned long vm_pageout_pages_purged;; + unsigned long vm_pageout_freed_cleaned; + unsigned long vm_pageout_freed_speculative; + unsigned long vm_pageout_freed_external; + unsigned long vm_pageout_freed_internal; + unsigned long vm_pageout_inactive_dirty_internal; + unsigned long vm_pageout_inactive_dirty_external; + unsigned long vm_pageout_inactive_referenced; + unsigned long vm_pageout_reactivation_limit_exceeded; + unsigned long vm_pageout_inactive_force_reclaim; + unsigned long vm_pageout_inactive_nolock; + unsigned long vm_pageout_filecache_min_reactivated; + unsigned long vm_pageout_scan_inactive_throttled_internal; + unsigned long vm_pageout_scan_inactive_throttled_external; + + uint64_t vm_pageout_compressions; + uint64_t vm_compressor_pages_grabbed; + unsigned long vm_compressor_failed; + + unsigned long vm_page_pages_freed; + + unsigned long vm_phantom_cache_found_ghost; + unsigned long vm_phantom_cache_added_ghost; }; extern struct vm_pageout_vminfo vm_pageout_vminfo; @@ -681,67 +680,67 @@ extern struct vm_pageout_vminfo vm_pageout_vminfo; * No locking needed because only one thread modifies the fields. */ struct vm_pageout_debug { - uint32_t vm_pageout_balanced; - uint32_t vm_pageout_scan_event_counter; - uint32_t vm_pageout_speculative_dirty; - - uint32_t vm_pageout_inactive_busy; - uint32_t vm_pageout_inactive_absent; - uint32_t vm_pageout_inactive_notalive; - uint32_t vm_pageout_inactive_error; - uint32_t vm_pageout_inactive_deactivated; - - uint32_t vm_pageout_enqueued_cleaned; - - uint32_t vm_pageout_cleaned_busy; - uint32_t vm_pageout_cleaned_nolock; - uint32_t vm_pageout_cleaned_reference_reactivated; - uint32_t vm_pageout_cleaned_volatile_reactivated; - uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ - uint32_t vm_pageout_cleaned_fault_reactivated; - - uint32_t vm_pageout_dirty_no_pager; - uint32_t vm_pageout_purged_objects; - - uint32_t vm_pageout_scan_throttle; - uint32_t vm_pageout_scan_reclaimed_throttled; - uint32_t vm_pageout_scan_burst_throttle; - uint32_t vm_pageout_scan_empty_throttle; - uint32_t vm_pageout_scan_swap_throttle; - uint32_t vm_pageout_scan_deadlock_detected; - uint32_t vm_pageout_scan_inactive_throttle_success; - uint32_t vm_pageout_scan_throttle_deferred; - - uint32_t vm_pageout_inactive_external_forced_jetsam_count; - - uint32_t vm_grab_anon_overrides; - uint32_t vm_grab_anon_nops; - - uint32_t vm_pageout_no_victim; - unsigned long vm_pageout_throttle_up_count; - uint32_t vm_page_steal_pageout_page; - - uint32_t vm_cs_validated_resets; - uint32_t vm_object_iopl_request_sleep_for_cleaning; - uint32_t vm_page_slide_counter; - uint32_t vm_page_slide_errors; - uint32_t vm_page_throttle_count; - /* + uint32_t vm_pageout_balanced; + uint32_t vm_pageout_scan_event_counter; + uint32_t vm_pageout_speculative_dirty; + + uint32_t vm_pageout_inactive_busy; + uint32_t vm_pageout_inactive_absent; + uint32_t vm_pageout_inactive_notalive; + uint32_t vm_pageout_inactive_error; + uint32_t vm_pageout_inactive_deactivated; + + uint32_t vm_pageout_enqueued_cleaned; + + uint32_t vm_pageout_cleaned_busy; + uint32_t vm_pageout_cleaned_nolock; + uint32_t vm_pageout_cleaned_reference_reactivated; + uint32_t vm_pageout_cleaned_volatile_reactivated; + uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ + uint32_t vm_pageout_cleaned_fault_reactivated; + + uint32_t vm_pageout_dirty_no_pager; + uint32_t vm_pageout_purged_objects; + + uint32_t vm_pageout_scan_throttle; + uint32_t vm_pageout_scan_reclaimed_throttled; + uint32_t vm_pageout_scan_burst_throttle; + uint32_t vm_pageout_scan_empty_throttle; + uint32_t vm_pageout_scan_swap_throttle; + uint32_t vm_pageout_scan_deadlock_detected; + uint32_t vm_pageout_scan_inactive_throttle_success; + uint32_t vm_pageout_scan_throttle_deferred; + + uint32_t vm_pageout_inactive_external_forced_jetsam_count; + + uint32_t vm_grab_anon_overrides; + uint32_t vm_grab_anon_nops; + + uint32_t vm_pageout_no_victim; + unsigned long vm_pageout_throttle_up_count; + uint32_t vm_page_steal_pageout_page; + + uint32_t vm_cs_validated_resets; + uint32_t vm_object_iopl_request_sleep_for_cleaning; + uint32_t vm_page_slide_counter; + uint32_t vm_page_slide_errors; + uint32_t vm_page_throttle_count; + /* * Statistics about UPL enforcement of copy-on-write obligations. */ - unsigned long upl_cow; - unsigned long upl_cow_again; - unsigned long upl_cow_pages; - unsigned long upl_cow_again_pages; - unsigned long iopl_cow; - unsigned long iopl_cow_pages; + unsigned long upl_cow; + unsigned long upl_cow_again; + unsigned long upl_cow_pages; + unsigned long upl_cow_again_pages; + unsigned long iopl_cow; + unsigned long iopl_cow_pages; }; extern struct vm_pageout_debug vm_pageout_debug; -#define VM_PAGEOUT_DEBUG(member, value) \ - MACRO_BEGIN \ - vm_pageout_debug.member += value; \ +#define VM_PAGEOUT_DEBUG(member, value) \ + MACRO_BEGIN \ + vm_pageout_debug.member += value; \ MACRO_END #else #define VM_PAGEOUT_DEBUG(member, value) @@ -760,4 +759,4 @@ typedef struct vmct_stats_s { } vmct_stats_t; #endif #endif -#endif /* _VM_VM_PAGEOUT_H_ */ +#endif /* _VM_VM_PAGEOUT_H_ */ diff --git a/osfmk/vm/vm_phantom_cache.c b/osfmk/vm/vm_phantom_cache.c index 95bdaa27e..01e0711b3 100644 --- a/osfmk/vm/vm_phantom_cache.c +++ b/osfmk/vm/vm_phantom_cache.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -52,31 +52,31 @@ unsigned phantom_cache_contiguous_periods = 4; unsigned phantom_cache_contiguous_periods = 2; #endif -clock_sec_t pc_start_of_eval_period_sec = 0; -clock_nsec_t pc_start_of_eval_period_nsec = 0; -boolean_t pc_need_eval_reset = FALSE; +clock_sec_t pc_start_of_eval_period_sec = 0; +clock_nsec_t pc_start_of_eval_period_nsec = 0; +boolean_t pc_need_eval_reset = FALSE; /* One bit per recent sampling period. Bit 0 = current period. */ -uint32_t pc_history = 0; +uint32_t pc_history = 0; -uint32_t sample_period_ghost_added_count = 0; -uint32_t sample_period_ghost_added_count_ssd = 0; -uint32_t sample_period_ghost_found_count = 0; -uint32_t sample_period_ghost_found_count_ssd = 0; +uint32_t sample_period_ghost_added_count = 0; +uint32_t sample_period_ghost_added_count_ssd = 0; +uint32_t sample_period_ghost_found_count = 0; +uint32_t sample_period_ghost_found_count_ssd = 0; -uint32_t vm_phantom_object_id = 1; -#define VM_PHANTOM_OBJECT_ID_AFTER_WRAP 1000000 +uint32_t vm_phantom_object_id = 1; +#define VM_PHANTOM_OBJECT_ID_AFTER_WRAP 1000000 -vm_ghost_t vm_phantom_cache; -uint32_t vm_phantom_cache_nindx = 1; -uint32_t vm_phantom_cache_num_entries = 0; -uint32_t vm_phantom_cache_size; +vm_ghost_t vm_phantom_cache; +uint32_t vm_phantom_cache_nindx = 1; +uint32_t vm_phantom_cache_num_entries = 0; +uint32_t vm_phantom_cache_size; -typedef uint32_t vm_phantom_hash_entry_t; -vm_phantom_hash_entry_t *vm_phantom_cache_hash; -uint32_t vm_phantom_cache_hash_size; -uint32_t vm_ghost_hash_mask; /* Mask for hash function */ -uint32_t vm_ghost_bucket_hash; /* Basic bucket hash */ +typedef uint32_t vm_phantom_hash_entry_t; +vm_phantom_hash_entry_t *vm_phantom_cache_hash; +uint32_t vm_phantom_cache_hash_size; +uint32_t vm_ghost_hash_mask; /* Mask for hash function */ +uint32_t vm_ghost_bucket_hash; /* Basic bucket hash */ int pg_masks[4] = { @@ -85,20 +85,20 @@ int pg_masks[4] = { #define vm_phantom_hash(obj_id, offset) (\ - ( (natural_t)((uintptr_t)obj_id * vm_ghost_bucket_hash) + (offset ^ vm_ghost_bucket_hash)) & vm_ghost_hash_mask) + ( (natural_t)((uintptr_t)obj_id * vm_ghost_bucket_hash) + (offset ^ vm_ghost_bucket_hash)) & vm_ghost_hash_mask) struct phantom_cache_stats { - uint32_t pcs_wrapped; - uint32_t pcs_added_page_to_entry; - uint32_t pcs_added_new_entry; - uint32_t pcs_replaced_entry; + uint32_t pcs_wrapped; + uint32_t pcs_added_page_to_entry; + uint32_t pcs_added_new_entry; + uint32_t pcs_replaced_entry; - uint32_t pcs_lookup_found_page_in_cache; - uint32_t pcs_lookup_entry_not_in_cache; - uint32_t pcs_lookup_page_not_in_entry; + uint32_t pcs_lookup_found_page_in_cache; + uint32_t pcs_lookup_entry_not_in_cache; + uint32_t pcs_lookup_page_not_in_entry; - uint32_t pcs_updated_phantom_state; + uint32_t pcs_updated_phantom_state; } phantom_cache_stats; @@ -106,12 +106,13 @@ struct phantom_cache_stats { void vm_phantom_cache_init() { - unsigned int num_entries; - unsigned int log1; - unsigned int size; + unsigned int num_entries; + unsigned int log1; + unsigned int size; - if ( !VM_CONFIG_COMPRESSOR_IS_ACTIVE) + if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE) { return; + } #if CONFIG_EMBEDDED num_entries = (uint32_t)(((max_mem / PAGE_SIZE) / 10) / VM_GHOST_PAGES_PER_ENTRY); #else @@ -119,18 +120,28 @@ vm_phantom_cache_init() #endif vm_phantom_cache_num_entries = 1; - while (vm_phantom_cache_num_entries < num_entries) + while (vm_phantom_cache_num_entries < num_entries) { vm_phantom_cache_num_entries <<= 1; + } + + /* + * We index this with g_next_index, so don't exceed the width of that bitfield. + */ + if (vm_phantom_cache_num_entries > (1 << VM_GHOST_INDEX_BITS)) { + vm_phantom_cache_num_entries = (1 << VM_GHOST_INDEX_BITS); + } vm_phantom_cache_size = sizeof(struct vm_ghost) * vm_phantom_cache_num_entries; vm_phantom_cache_hash_size = sizeof(vm_phantom_hash_entry_t) * vm_phantom_cache_num_entries; - if (kernel_memory_allocate(kernel_map, (vm_offset_t *)(&vm_phantom_cache), vm_phantom_cache_size, 0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PHANTOM_CACHE) != KERN_SUCCESS) + if (kernel_memory_allocate(kernel_map, (vm_offset_t *)(&vm_phantom_cache), vm_phantom_cache_size, 0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PHANTOM_CACHE) != KERN_SUCCESS) { panic("vm_phantom_cache_init: kernel_memory_allocate failed\n"); + } bzero(vm_phantom_cache, vm_phantom_cache_size); - if (kernel_memory_allocate(kernel_map, (vm_offset_t *)(&vm_phantom_cache_hash), vm_phantom_cache_hash_size, 0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PHANTOM_CACHE) != KERN_SUCCESS) + if (kernel_memory_allocate(kernel_map, (vm_offset_t *)(&vm_phantom_cache_hash), vm_phantom_cache_hash_size, 0, KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PHANTOM_CACHE) != KERN_SUCCESS) { panic("vm_phantom_cache_init: kernel_memory_allocate failed\n"); + } bzero(vm_phantom_cache_hash, vm_phantom_cache_hash_size); @@ -144,26 +155,28 @@ vm_phantom_cache_init() * B/2 - O */ size = vm_phantom_cache_num_entries; - for (log1 = 0; size > 1; log1++) + for (log1 = 0; size > 1; log1++) { size /= 2; - - vm_ghost_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ - vm_ghost_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ - vm_ghost_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ + } - if (vm_ghost_hash_mask & vm_phantom_cache_num_entries) + vm_ghost_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ + vm_ghost_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ + vm_ghost_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ + + if (vm_ghost_hash_mask & vm_phantom_cache_num_entries) { printf("vm_phantom_cache_init: WARNING -- strange page hash\n"); + } } void vm_phantom_cache_add_ghost(vm_page_t m) { - vm_ghost_t vpce; - vm_object_t object; - int ghost_index; - int pg_mask; - boolean_t isSSD = FALSE; + vm_ghost_t vpce; + vm_object_t object; + int ghost_index; + int pg_mask; + boolean_t isSSD = FALSE; vm_phantom_hash_entry_t ghost_hash_index; object = VM_PAGE_OBJECT(m); @@ -171,26 +184,28 @@ vm_phantom_cache_add_ghost(vm_page_t m) LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); vm_object_lock_assert_exclusive(object); - if (vm_phantom_cache_num_entries == 0) + if (vm_phantom_cache_num_entries == 0) { return; - + } + pg_mask = pg_masks[(m->vmp_offset >> PAGE_SHIFT) & VM_GHOST_PAGE_MASK]; if (object->phantom_object_id == 0) { - vnode_pager_get_isSSD(object->pager, &isSSD); - if (isSSD == TRUE) + if (isSSD == TRUE) { object->phantom_isssd = TRUE; + } object->phantom_object_id = vm_phantom_object_id++; - - if (vm_phantom_object_id == 0) + + if (vm_phantom_object_id == 0) { vm_phantom_object_id = VM_PHANTOM_OBJECT_ID_AFTER_WRAP; + } } else { - if ( (vpce = vm_phantom_cache_lookup_ghost(m, 0)) ) { + if ((vpce = vm_phantom_cache_lookup_ghost(m, 0))) { vpce->g_pages_held |= pg_mask; - + phantom_cache_stats.pcs_added_page_to_entry++; goto done; } @@ -215,7 +230,7 @@ vm_phantom_cache_add_ghost(vm_page_t m) * we're going to replace an existing entry * so first remove it from the hash */ - vm_ghost_t nvpce; + vm_ghost_t nvpce; ghost_hash_index = vm_phantom_hash(vpce->g_obj_id, vpce->g_obj_offset); @@ -225,8 +240,9 @@ vm_phantom_cache_add_ghost(vm_page_t m) vm_phantom_cache_hash[ghost_hash_index] = vpce->g_next_index; } else { for (;;) { - if (nvpce->g_next_index == 0) + if (nvpce->g_next_index == 0) { panic("didn't find ghost in hash\n"); + } if (&vm_phantom_cache[nvpce->g_next_index] == vpce) { nvpce->g_next_index = vpce->g_next_index; @@ -236,8 +252,9 @@ vm_phantom_cache_add_ghost(vm_page_t m) } } phantom_cache_stats.pcs_replaced_entry++; - } else + } else { phantom_cache_stats.pcs_added_new_entry++; + } vpce->g_pages_held = pg_mask; vpce->g_obj_offset = (m->vmp_offset >> (PAGE_SHIFT + VM_GHOST_PAGE_SHIFT)) & VM_GHOST_OFFSET_MASK; @@ -250,20 +267,21 @@ vm_phantom_cache_add_ghost(vm_page_t m) done: vm_pageout_vminfo.vm_phantom_cache_added_ghost++; - if (object->phantom_isssd) + if (object->phantom_isssd) { OSAddAtomic(1, &sample_period_ghost_added_count_ssd); - else + } else { OSAddAtomic(1, &sample_period_ghost_added_count); + } } vm_ghost_t vm_phantom_cache_lookup_ghost(vm_page_t m, uint32_t pg_mask) { - uint64_t g_obj_offset; - uint32_t g_obj_id; - uint32_t ghost_index; - vm_object_t object; + uint64_t g_obj_offset; + uint32_t g_obj_id; + uint32_t ghost_index; + vm_object_t object; object = VM_PAGE_OBJECT(m); @@ -271,7 +289,7 @@ vm_phantom_cache_lookup_ghost(vm_page_t m, uint32_t pg_mask) /* * no entries in phantom cache for this object */ - return (NULL); + return NULL; } g_obj_offset = (m->vmp_offset >> (PAGE_SHIFT + VM_GHOST_PAGE_SHIFT)) & VM_GHOST_OFFSET_MASK; @@ -283,21 +301,20 @@ vm_phantom_cache_lookup_ghost(vm_page_t m, uint32_t pg_mask) vpce = &vm_phantom_cache[ghost_index]; if (vpce->g_obj_id == g_obj_id && vpce->g_obj_offset == g_obj_offset) { - if (pg_mask == 0 || (vpce->g_pages_held & pg_mask)) { phantom_cache_stats.pcs_lookup_found_page_in_cache++; - return (vpce); + return vpce; } phantom_cache_stats.pcs_lookup_page_not_in_entry++; - return (NULL); + return NULL; } ghost_index = vpce->g_next_index; } phantom_cache_stats.pcs_lookup_entry_not_in_cache++; - return (NULL); + return NULL; } @@ -305,48 +322,49 @@ vm_phantom_cache_lookup_ghost(vm_page_t m, uint32_t pg_mask) void vm_phantom_cache_update(vm_page_t m) { - int pg_mask; + int pg_mask; vm_ghost_t vpce; - vm_object_t object; + vm_object_t object; object = VM_PAGE_OBJECT(m); LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); vm_object_lock_assert_exclusive(object); - if (vm_phantom_cache_num_entries == 0) + if (vm_phantom_cache_num_entries == 0) { return; - + } + pg_mask = pg_masks[(m->vmp_offset >> PAGE_SHIFT) & VM_GHOST_PAGE_MASK]; - - if ( (vpce = vm_phantom_cache_lookup_ghost(m, pg_mask)) ) { + if ((vpce = vm_phantom_cache_lookup_ghost(m, pg_mask))) { vpce->g_pages_held &= ~pg_mask; phantom_cache_stats.pcs_updated_phantom_state++; vm_pageout_vminfo.vm_phantom_cache_found_ghost++; - if (object->phantom_isssd) + if (object->phantom_isssd) { OSAddAtomic(1, &sample_period_ghost_found_count_ssd); - else + } else { OSAddAtomic(1, &sample_period_ghost_found_count); + } } } -#define PHANTOM_CACHE_DEBUG 1 +#define PHANTOM_CACHE_DEBUG 1 -#if PHANTOM_CACHE_DEBUG +#if PHANTOM_CACHE_DEBUG -int sample_period_ghost_counts_indx = 0; +int sample_period_ghost_counts_indx = 0; struct { - uint32_t added; - uint32_t found; - uint32_t added_ssd; - uint32_t found_ssd; - uint32_t elapsed_ms; - boolean_t pressure_detected; + uint32_t added; + uint32_t found; + uint32_t added_ssd; + uint32_t found_ssd; + uint32_t elapsed_ms; + boolean_t pressure_detected; } sample_period_ghost_counts[256]; #endif @@ -362,8 +380,9 @@ static boolean_t is_thrashing(uint32_t added, uint32_t found, uint32_t threshold) { /* Ignore normal activity below the threshold. */ - if (added < threshold || found < threshold) + if (added < threshold || found < threshold) { return FALSE; + } /* * When thrashing in a way that we can mitigate, most of the pages read @@ -377,8 +396,9 @@ is_thrashing(uint32_t added, uint32_t found, uint32_t threshold) * This is not thrashing, or freeing up memory wouldn't help much * anyway. */ - if (found < added / 2) + if (found < added / 2) { return FALSE; + } return TRUE; } @@ -393,10 +413,10 @@ is_thrashing(uint32_t added, uint32_t found, uint32_t threshold) boolean_t vm_phantom_cache_check_pressure() { - clock_sec_t cur_ts_sec; - clock_nsec_t cur_ts_nsec; - uint64_t elapsed_msecs_in_eval; - boolean_t pressure_detected = FALSE; + clock_sec_t cur_ts_sec; + clock_nsec_t cur_ts_nsec; + uint64_t elapsed_msecs_in_eval; + boolean_t pressure_detected = FALSE; clock_get_system_nanotime(&cur_ts_sec, &cur_ts_nsec); @@ -411,7 +431,6 @@ vm_phantom_cache_check_pressure() } if (pc_need_eval_reset == TRUE) { - #if PHANTOM_CACHE_DEBUG /* * maintain some info about the last 256 sample periods @@ -424,8 +443,9 @@ vm_phantom_cache_check_pressure() sample_period_ghost_counts_indx++; - if (sample_period_ghost_counts_indx >= 256) + if (sample_period_ghost_counts_indx >= 256) { sample_period_ghost_counts_indx = 0; + } #endif sample_period_ghost_added_count = 0; sample_period_ghost_found_count = 0; @@ -446,11 +466,11 @@ vm_phantom_cache_check_pressure() * that info to maintains counts for both the SSD and spinning disk cases. */ if (is_thrashing(sample_period_ghost_added_count, - sample_period_ghost_found_count, - phantom_cache_thrashing_threshold) || + sample_period_ghost_found_count, + phantom_cache_thrashing_threshold) || is_thrashing(sample_period_ghost_added_count_ssd, - sample_period_ghost_found_count_ssd, - phantom_cache_thrashing_threshold_ssd)) { + sample_period_ghost_found_count_ssd, + phantom_cache_thrashing_threshold_ssd)) { /* Thrashing in the current period: Set bit 0. */ pc_history |= 1; } @@ -463,16 +483,18 @@ vm_phantom_cache_check_pressure() * in pc_history. The high bits of pc_history are ignored. */ uint32_t bitmask = (1u << phantom_cache_contiguous_periods) - 1; - if ((pc_history & bitmask) == bitmask) + if ((pc_history & bitmask) == bitmask) { pressure_detected = TRUE; + } - if (vm_page_external_count > ((AVAILABLE_MEMORY) * 50) / 100) + if (vm_page_external_count > ((AVAILABLE_MEMORY) * 50) / 100) { pressure_detected = FALSE; + } #if PHANTOM_CACHE_DEBUG sample_period_ghost_counts[sample_period_ghost_counts_indx].pressure_detected = pressure_detected; #endif - return (pressure_detected); + return pressure_detected; } /* diff --git a/osfmk/vm/vm_phantom_cache.h b/osfmk/vm/vm_phantom_cache.h index dcf0e5a54..24d73ac89 100644 --- a/osfmk/vm/vm_phantom_cache.h +++ b/osfmk/vm/vm_phantom_cache.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2013 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,32 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include -#define VM_GHOST_OFFSET_BITS 39 -#define VM_GHOST_OFFSET_MASK 0x7FFFFFFFFF -#define VM_GHOST_PAGES_PER_ENTRY 4 -#define VM_GHOST_PAGE_MASK 0x3 -#define VM_GHOST_PAGE_SHIFT 2 -#define VM_GHOST_INDEX_BITS (64 - VM_GHOST_OFFSET_BITS - VM_GHOST_PAGES_PER_ENTRY) - -struct vm_ghost -{ - uint64_t g_next_index:VM_GHOST_INDEX_BITS, - g_pages_held:VM_GHOST_PAGES_PER_ENTRY, - g_obj_offset:VM_GHOST_OFFSET_BITS; - uint32_t g_obj_id; +#define VM_GHOST_OFFSET_BITS 39 +#define VM_GHOST_OFFSET_MASK 0x7FFFFFFFFF +#define VM_GHOST_PAGES_PER_ENTRY 4 +#define VM_GHOST_PAGE_MASK 0x3 +#define VM_GHOST_PAGE_SHIFT 2 +#define VM_GHOST_INDEX_BITS (64 - VM_GHOST_OFFSET_BITS - VM_GHOST_PAGES_PER_ENTRY) +struct vm_ghost { + uint64_t g_next_index:VM_GHOST_INDEX_BITS, + g_pages_held:VM_GHOST_PAGES_PER_ENTRY, + g_obj_offset:VM_GHOST_OFFSET_BITS; + uint32_t g_obj_id; } __attribute__((packed)); -typedef struct vm_ghost *vm_ghost_t; +typedef struct vm_ghost *vm_ghost_t; -extern void vm_phantom_cache_init(void); -extern void vm_phantom_cache_add_ghost(vm_page_t); -extern vm_ghost_t vm_phantom_cache_lookup_ghost(vm_page_t, uint32_t); -extern void vm_phantom_cache_update(vm_page_t); -extern boolean_t vm_phantom_cache_check_pressure(void); -extern void vm_phantom_cache_restart_sample(void); +extern void vm_phantom_cache_init(void); +extern void vm_phantom_cache_add_ghost(vm_page_t); +extern vm_ghost_t vm_phantom_cache_lookup_ghost(vm_page_t, uint32_t); +extern void vm_phantom_cache_update(vm_page_t); +extern boolean_t vm_phantom_cache_check_pressure(void); +extern void vm_phantom_cache_restart_sample(void); diff --git a/osfmk/vm/vm_protos.h b/osfmk/vm/vm_protos.h index cf4ad88ce..43d45dbbe 100644 --- a/osfmk/vm/vm_protos.h +++ b/osfmk/vm/vm_protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2004-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,11 +22,11 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#ifdef XNU_KERNEL_PRIVATE +#ifdef XNU_KERNEL_PRIVATE #ifndef _VM_VM_PROTOS_H_ #define _VM_VM_PROTOS_H_ @@ -53,10 +53,10 @@ extern "C" { * iokit */ extern kern_return_t device_data_action( - uintptr_t device_handle, + uintptr_t device_handle, ipc_port_t device_pager, - vm_prot_t protection, - vm_object_offset_t offset, + vm_prot_t protection, + vm_object_offset_t offset, vm_size_t size); extern kern_return_t device_close( @@ -69,28 +69,28 @@ extern boolean_t vm_swap_files_pinned(void); */ #ifndef _IPC_IPC_PORT_H_ extern mach_port_name_t ipc_port_copyout_send( - ipc_port_t sright, - ipc_space_t space); + ipc_port_t sright, + ipc_space_t space); extern task_t port_name_to_task( mach_port_name_t name); extern task_t port_name_to_task_inspect( mach_port_name_t name); extern void ipc_port_release_send( - ipc_port_t port); + ipc_port_t port); #endif /* _IPC_IPC_PORT_H_ */ extern ipc_space_t get_task_ipcspace( task_t t); #if CONFIG_MEMORYSTATUS -extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */ +extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */ #endif /* CONFIG_MEMORYSTATUS */ /* Some loose-ends VM stuff */ -extern vm_map_t kalloc_map; -extern vm_size_t msg_ool_size_small; -extern vm_map_t zone_map; +extern vm_map_t kalloc_map; +extern vm_size_t msg_ool_size_small; +extern vm_map_t zone_map; extern void consider_machine_adjust(void); extern vm_map_offset_t get_map_min(vm_map_t); @@ -104,10 +104,10 @@ extern int get_map_nentries(vm_map_t); extern vm_map_offset_t vm_map_page_mask(vm_map_t); extern kern_return_t vm_map_purgable_control( - vm_map_t map, - vm_map_offset_t address, - vm_purgable_t control, - int *state); + vm_map_t map, + vm_map_offset_t address, + vm_purgable_t control, + int *state); extern kern_return_t vnode_pager_get_object_vnode( @@ -152,38 +152,38 @@ extern mach_vm_offset_t mach_get_vm_start(vm_map_t); extern mach_vm_offset_t mach_get_vm_end(vm_map_t); #if CONFIG_CODE_DECRYPTION -#define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT +#define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT #if VM_MAP_DEBUG_APPLE_PROTECT extern int vm_map_debug_apple_protect; #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ struct pager_crypt_info; extern kern_return_t vm_map_apple_protected( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_object_offset_t crypto_backing_offset, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_offset_t crypto_backing_offset, struct pager_crypt_info *crypt_info); extern void apple_protect_pager_bootstrap(void); extern memory_object_t apple_protect_pager_setup( - vm_object_t backing_object, - vm_object_offset_t backing_offset, - vm_object_offset_t crypto_backing_offset, + vm_object_t backing_object, + vm_object_offset_t backing_offset, + vm_object_offset_t crypto_backing_offset, struct pager_crypt_info *crypt_info, - vm_object_offset_t crypto_start, - vm_object_offset_t crypto_end); -#endif /* CONFIG_CODE_DECRYPTION */ + vm_object_offset_t crypto_start, + vm_object_offset_t crypto_end); +#endif /* CONFIG_CODE_DECRYPTION */ struct vm_shared_region_slide_info; extern kern_return_t vm_map_shared_region( - vm_map_t map, - vm_map_offset_t start, - vm_map_offset_t end, - vm_object_offset_t backing_offset, + vm_map_t map, + vm_map_offset_t start, + vm_map_offset_t end, + vm_object_offset_t backing_offset, struct vm_shared_region_slide_info *slide_info); extern void shared_region_pager_bootstrap(void); extern memory_object_t shared_region_pager_setup( - vm_object_t backing_object, - vm_object_offset_t backing_offset, + vm_object_t backing_object, + vm_object_offset_t backing_offset, struct vm_shared_region_slide_info *slide_info); struct vnode; @@ -192,19 +192,19 @@ extern memory_object_t swapfile_pager_setup(struct vnode *vp); extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); #if __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) -#define SIXTEENK_PAGE_SIZE 0x4000 -#define SIXTEENK_PAGE_MASK 0x3FFF -#define SIXTEENK_PAGE_SHIFT 14 +#define SIXTEENK_PAGE_SIZE 0x4000 +#define SIXTEENK_PAGE_MASK 0x3FFF +#define SIXTEENK_PAGE_SHIFT 14 #endif /* __arm64__ || ((__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)) */ #if __arm64__ -#define FOURK_PAGE_SIZE 0x1000 -#define FOURK_PAGE_MASK 0xFFF -#define FOURK_PAGE_SHIFT 12 +#define FOURK_PAGE_SIZE 0x1000 +#define FOURK_PAGE_MASK 0xFFF +#define FOURK_PAGE_SHIFT 12 extern unsigned int page_shift_user32; -#define VM_MAP_DEBUG_FOURK MACH_ASSERT +#define VM_MAP_DEBUG_FOURK MACH_ASSERT #if VM_MAP_DEBUG_FOURK extern int vm_map_debug_fourk; #endif /* VM_MAP_DEBUG_FOURK */ @@ -232,20 +232,20 @@ extern void vnode_setswapmount(struct vnode *); extern int64_t vnode_getswappin_avail(struct vnode *); extern void vnode_pager_was_dirtied( - struct vnode *, + struct vnode *, vm_object_offset_t, vm_object_offset_t); typedef int pager_return_t; -extern pager_return_t vnode_pagein( +extern pager_return_t vnode_pagein( struct vnode *, upl_t, upl_offset_t, vm_object_offset_t, upl_size_t, int, int *); -extern pager_return_t vnode_pageout( +extern pager_return_t vnode_pageout( struct vnode *, upl_t, upl_offset_t, vm_object_offset_t, upl_size_t, int, int *); -extern uint32_t vnode_trim (struct vnode *, int64_t offset, unsigned long len); +extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len); extern memory_object_t vnode_pager_setup( struct vnode *, memory_object_t); extern vm_object_offset_t vnode_pager_get_filesize( @@ -260,47 +260,47 @@ extern uint32_t vnode_pager_return_throttle_io_limit( struct vnode *, uint32_t *); extern kern_return_t vnode_pager_get_name( - struct vnode *vp, - char *pathname, - vm_size_t pathname_len, - char *filename, - vm_size_t filename_len, - boolean_t *truncated_path_p); + struct vnode *vp, + char *pathname, + vm_size_t pathname_len, + char *filename, + vm_size_t filename_len, + boolean_t *truncated_path_p); struct timespec; extern kern_return_t vnode_pager_get_mtime( - struct vnode *vp, - struct timespec *mtime, - struct timespec *cs_mtime); + struct vnode *vp, + struct timespec *mtime, + struct timespec *cs_mtime); extern kern_return_t vnode_pager_get_cs_blobs( - struct vnode *vp, - void **blobs); + struct vnode *vp, + void **blobs); #if CONFIG_IOSCHED void vnode_pager_issue_reprioritize_io( - struct vnode *devvp, - uint64_t blkno, - uint32_t len, - int priority); + struct vnode *devvp, + uint64_t blkno, + uint32_t len, + int priority); #endif -#if CHECK_CS_VALIDATION_BITMAP +#if CHECK_CS_VALIDATION_BITMAP /* used by the vnode_pager_cs_validation_bitmap routine*/ -#define CS_BITMAP_SET 1 -#define CS_BITMAP_CLEAR 2 -#define CS_BITMAP_CHECK 3 +#define CS_BITMAP_SET 1 +#define CS_BITMAP_CLEAR 2 +#define CS_BITMAP_CHECK 3 #endif /* CHECK_CS_VALIDATION_BITMAP */ extern void vnode_pager_bootstrap(void); extern kern_return_t vnode_pager_data_unlock( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t size, - vm_prot_t desired_access); + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_size_t size, + vm_prot_t desired_access); extern kern_return_t vnode_pager_init( - memory_object_t, - memory_object_control_t, + memory_object_t, + memory_object_control_t, memory_object_cluster_size_t); extern kern_return_t vnode_pager_get_object_size( memory_object_t, @@ -308,8 +308,8 @@ extern kern_return_t vnode_pager_get_object_size( #if CONFIG_IOSCHED extern kern_return_t vnode_pager_get_object_devvp( - memory_object_t, - uintptr_t *); + memory_object_t, + uintptr_t *); #endif extern void vnode_pager_dirtied( @@ -326,31 +326,31 @@ extern kern_return_t vnode_pager_get_throttle_io_limit( memory_object_t, uint32_t *); extern kern_return_t vnode_pager_get_object_name( - memory_object_t mem_obj, - char *pathname, - vm_size_t pathname_len, - char *filename, - vm_size_t filename_len, - boolean_t *truncated_path_p); + memory_object_t mem_obj, + char *pathname, + vm_size_t pathname_len, + char *filename, + vm_size_t filename_len, + boolean_t *truncated_path_p); extern kern_return_t vnode_pager_get_object_mtime( - memory_object_t mem_obj, + memory_object_t mem_obj, struct timespec *mtime, - struct timespec *cs_mtime); + struct timespec *cs_mtime); -#if CHECK_CS_VALIDATION_BITMAP -extern kern_return_t vnode_pager_cs_check_validation_bitmap( - memory_object_t mem_obj, - memory_object_offset_t offset, - int optype); +#if CHECK_CS_VALIDATION_BITMAP +extern kern_return_t vnode_pager_cs_check_validation_bitmap( + memory_object_t mem_obj, + memory_object_offset_t offset, + int optype); #endif /*CHECK_CS_VALIDATION_BITMAP*/ -extern kern_return_t ubc_cs_check_validation_bitmap ( - struct vnode *vp, +extern kern_return_t ubc_cs_check_validation_bitmap( + struct vnode *vp, memory_object_offset_t offset, int optype); -extern kern_return_t vnode_pager_data_request( - memory_object_t, +extern kern_return_t vnode_pager_data_request( + memory_object_t, memory_object_offset_t, memory_object_cluster_size_t, vm_prot_t, @@ -369,17 +369,17 @@ extern kern_return_t vnode_pager_data_initialize( memory_object_offset_t, memory_object_cluster_size_t); extern void vnode_pager_reference( - memory_object_t mem_obj); + memory_object_t mem_obj); extern kern_return_t vnode_pager_synchronize( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, - vm_sync_t sync_flags); + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_size_t length, + vm_sync_t sync_flags); extern kern_return_t vnode_pager_map( - memory_object_t mem_obj, - vm_prot_t prot); + memory_object_t mem_obj, + vm_prot_t prot); extern kern_return_t vnode_pager_last_unmap( - memory_object_t mem_obj); + memory_object_t mem_obj); extern void vnode_pager_deallocate( memory_object_t); extern kern_return_t vnode_pager_terminate( @@ -401,40 +401,40 @@ extern struct vm_object *find_vnode_object(struct vm_map_entry *entry); extern void device_pager_reference(memory_object_t); extern void device_pager_deallocate(memory_object_t); extern kern_return_t device_pager_init(memory_object_t, - memory_object_control_t, - memory_object_cluster_size_t); -extern kern_return_t device_pager_terminate(memory_object_t); -extern kern_return_t device_pager_data_request(memory_object_t, - memory_object_offset_t, - memory_object_cluster_size_t, - vm_prot_t, - memory_object_fault_info_t); + memory_object_control_t, + memory_object_cluster_size_t); +extern kern_return_t device_pager_terminate(memory_object_t); +extern kern_return_t device_pager_data_request(memory_object_t, + memory_object_offset_t, + memory_object_cluster_size_t, + vm_prot_t, + memory_object_fault_info_t); extern kern_return_t device_pager_data_return(memory_object_t, - memory_object_offset_t, - memory_object_cluster_size_t, - memory_object_offset_t *, - int *, - boolean_t, - boolean_t, - int); + memory_object_offset_t, + memory_object_cluster_size_t, + memory_object_offset_t *, + int *, + boolean_t, + boolean_t, + int); extern kern_return_t device_pager_data_initialize(memory_object_t, - memory_object_offset_t, - memory_object_cluster_size_t); + memory_object_offset_t, + memory_object_cluster_size_t); extern kern_return_t device_pager_data_unlock(memory_object_t, - memory_object_offset_t, - memory_object_size_t, - vm_prot_t); + memory_object_offset_t, + memory_object_size_t, + vm_prot_t); extern kern_return_t device_pager_synchronize(memory_object_t, - memory_object_offset_t, - memory_object_size_t, - vm_sync_t); + memory_object_offset_t, + memory_object_size_t, + vm_sync_t); extern kern_return_t device_pager_map(memory_object_t, vm_prot_t); extern kern_return_t device_pager_last_unmap(memory_object_t); extern kern_return_t device_pager_populate_object( - memory_object_t device, - memory_object_offset_t offset, - ppnum_t page_num, - vm_size_t size); + memory_object_t device, + memory_object_offset_t offset, + ppnum_t page_num, + vm_size_t size); extern memory_object_t device_pager_setup( memory_object_t, uintptr_t, @@ -444,25 +444,25 @@ extern void device_pager_bootstrap(void); extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops); extern kern_return_t pager_map_to_phys_contiguous( - memory_object_control_t object, - memory_object_offset_t offset, - addr64_t base_vaddr, - vm_size_t size); + memory_object_control_t object, + memory_object_offset_t offset, + addr64_t base_vaddr, + vm_size_t size); extern kern_return_t memory_object_create_named( - memory_object_t pager, - memory_object_offset_t size, - memory_object_control_t *control); + memory_object_t pager, + memory_object_offset_t size, + memory_object_control_t *control); struct macx_triggers_args; extern int mach_macx_triggers( - struct macx_triggers_args *args); + struct macx_triggers_args *args); extern int macx_swapinfo( - memory_object_size_t *total_p, - memory_object_size_t *avail_p, - vm_size_t *pagesize_p, - boolean_t *encrypted_p); + memory_object_size_t *total_p, + memory_object_size_t *avail_p, + vm_size_t *pagesize_p, + boolean_t *encrypted_p); extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot); extern void log_unnest_badness( @@ -476,14 +476,14 @@ struct proc; extern int cs_allow_invalid(struct proc *p); extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); -#define CS_VALIDATE_TAINTED 0x00000001 -#define CS_VALIDATE_NX 0x00000002 +#define CS_VALIDATE_TAINTED 0x00000001 +#define CS_VALIDATE_NX 0x00000002 extern boolean_t cs_validate_range(struct vnode *vp, - memory_object_t pager, - memory_object_offset_t offset, - const void *data, - vm_size_t size, - unsigned *result); + memory_object_t pager, + memory_object_offset_t offset, + const void *data, + vm_size_t size, + unsigned *result); #if PMAP_CS extern kern_return_t cs_associate_blob_with_mapping( void *pmap, @@ -494,37 +494,37 @@ extern kern_return_t cs_associate_blob_with_mapping( #endif /* PMAP_CS */ extern kern_return_t memory_entry_purgeable_control_internal( - ipc_port_t entry_port, - vm_purgable_t control, - int *state); + ipc_port_t entry_port, + vm_purgable_t control, + int *state); extern kern_return_t memory_entry_access_tracking_internal( - ipc_port_t entry_port, - int *access_tracking, - uint32_t *access_tracking_reads, - uint32_t *access_tracking_writes); + ipc_port_t entry_port, + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *access_tracking_writes); extern kern_return_t mach_memory_entry_purgable_control( - ipc_port_t entry_port, - vm_purgable_t control, - int *state); + ipc_port_t entry_port, + vm_purgable_t control, + int *state); extern kern_return_t mach_memory_entry_get_page_counts( - ipc_port_t entry_port, - unsigned int *resident_page_count, - unsigned int *dirty_page_count); + ipc_port_t entry_port, + unsigned int *resident_page_count, + unsigned int *dirty_page_count); extern kern_return_t mach_memory_entry_page_op( - ipc_port_t entry_port, - vm_object_offset_t offset, - int ops, - ppnum_t *phys_entry, - int *flags); + ipc_port_t entry_port, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags); extern kern_return_t mach_memory_entry_range_op( - ipc_port_t entry_port, - vm_object_offset_t offset_beg, - vm_object_offset_t offset_end, + ipc_port_t entry_port, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, int ops, int *range); @@ -541,14 +541,14 @@ extern unsigned int mach_vm_ctl_page_free_wanted(void); extern int no_paging_space_action(void); -#define VM_TOGGLE_CLEAR 0 -#define VM_TOGGLE_SET 1 -#define VM_TOGGLE_GETVALUE 999 +#define VM_TOGGLE_CLEAR 0 +#define VM_TOGGLE_SET 1 +#define VM_TOGGLE_GETVALUE 999 int vm_toggle_entry_reuse(int, int*); -#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */ -#define SWAP_READ 0x00000001 /* Read buffer. */ -#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */ +#define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define SWAP_READ 0x00000001 /* Read buffer. */ +#define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */ extern void vm_compressor_pager_init(void); extern kern_return_t compressor_memory_object_create( @@ -557,8 +557,8 @@ extern kern_return_t compressor_memory_object_create( extern boolean_t vm_compressor_low_on_space(void); extern boolean_t vm_compressor_out_of_space(void); -extern int vm_swap_low_on_space(void); -void do_fastwake_warmup_all(void); +extern int vm_swap_low_on_space(void); +void do_fastwake_warmup_all(void); #if CONFIG_JETSAM extern int proc_get_memstat_priority(struct proc*, boolean_t); #endif /* CONFIG_JETSAM */ @@ -567,48 +567,48 @@ extern int proc_get_memstat_priority(struct proc*, boolean_t); /* returns TRUE if an object was purged, otherwise FALSE. */ boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group); void vm_purgeable_disown(task_t task); -void vm_purgeable_nonvolatile_owner_update(task_t owner, - int delta); -void vm_purgeable_volatile_owner_update(task_t owner, - int delta); +void vm_purgeable_nonvolatile_owner_update(task_t owner, + int delta); +void vm_purgeable_volatile_owner_update(task_t owner, + int delta); struct trim_list { - uint64_t tl_offset; - uint64_t tl_length; + uint64_t tl_offset; + uint64_t tl_length; struct trim_list *tl_next; }; u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only); -#define MAX_SWAPFILENAME_LEN 1024 -#define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */ +#define MAX_SWAPFILENAME_LEN 1024 +#define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */ -extern char swapfilename[MAX_SWAPFILENAME_LEN + 1]; +extern char swapfilename[MAX_SWAPFILENAME_LEN + 1]; struct vm_counters { - unsigned int do_collapse_compressor; - unsigned int do_collapse_compressor_pages; - unsigned int do_collapse_terminate; - unsigned int do_collapse_terminate_failure; - unsigned int should_cow_but_wired; - unsigned int create_upl_extra_cow; - unsigned int create_upl_extra_cow_pages; - unsigned int create_upl_lookup_failure_write; - unsigned int create_upl_lookup_failure_copy; + unsigned int do_collapse_compressor; + unsigned int do_collapse_compressor_pages; + unsigned int do_collapse_terminate; + unsigned int do_collapse_terminate_failure; + unsigned int should_cow_but_wired; + unsigned int create_upl_extra_cow; + unsigned int create_upl_extra_cow_pages; + unsigned int create_upl_lookup_failure_write; + unsigned int create_upl_lookup_failure_copy; }; extern struct vm_counters vm_counters; #if CONFIG_SECLUDED_MEMORY struct vm_page_secluded_data { - int eligible_for_secluded; - int grab_success_free; - int grab_success_other; - int grab_failure_locked; - int grab_failure_state; - int grab_failure_dirty; - int grab_for_iokit; - int grab_for_iokit_success; + int eligible_for_secluded; + int grab_success_free; + int grab_success_other; + int grab_failure_locked; + int grab_failure_state; + int grab_failure_dirty; + int grab_for_iokit; + int grab_for_iokit_success; }; extern struct vm_page_secluded_data vm_page_secluded; @@ -622,24 +622,24 @@ extern int secluded_for_filecache; extern int secluded_for_fbdp; #endif -extern void memory_object_mark_eligible_for_secluded( - memory_object_control_t control, - boolean_t eligible_for_secluded); +extern void memory_object_mark_eligible_for_secluded( + memory_object_control_t control, + boolean_t eligible_for_secluded); #endif /* CONFIG_SECLUDED_MEMORY */ -#define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */ +#define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */ extern kern_return_t mach_make_memory_entry_internal( - vm_map_t target_map, - memory_object_size_t *size, + vm_map_t target_map, + memory_object_size_t *size, memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_handle); + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_handle); -#define roundup(x, y) ((((x) % (y)) == 0) ? \ - (x) : ((x) + ((y) - ((x) % (y))))) +#define roundup(x, y) ((((x) % (y)) == 0) ? \ + (x) : ((x) + ((y) - ((x) % (y))))) #ifdef __cplusplus } @@ -655,6 +655,6 @@ extern kern_return_t mach_make_memory_entry_internal( #define VM_SWAP_FLAGS_FORCE_DEFRAG 1 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2 -#endif /* _VM_VM_PROTOS_H_ */ +#endif /* _VM_VM_PROTOS_H_ */ -#endif /* XNU_KERNEL_PRIVATE */ +#endif /* XNU_KERNEL_PRIVATE */ diff --git a/osfmk/vm/vm_purgeable.c b/osfmk/vm/vm_purgeable.c index 5fc967116..6ebfaf77a 100644 --- a/osfmk/vm/vm_purgeable.c +++ b/osfmk/vm/vm_purgeable.c @@ -32,7 +32,7 @@ #include #include -#include /* kmem_alloc */ +#include /* kmem_alloc */ #include #include #include @@ -58,33 +58,33 @@ extern vm_pressure_level_t memorystatus_vm_pressure_level; struct token { token_cnt_t count; - token_idx_t prev; + token_idx_t prev; token_idx_t next; }; -struct token *tokens; -token_idx_t token_q_max_cnt = 0; -vm_size_t token_q_cur_size = 0; +struct token *tokens; +token_idx_t token_q_max_cnt = 0; +vm_size_t token_q_cur_size = 0; -token_idx_t token_free_idx = 0; /* head of free queue */ -token_idx_t token_init_idx = 1; /* token 0 is reserved!! */ -int32_t token_new_pagecount = 0; /* count of pages that will - * be added onto token queue */ +token_idx_t token_free_idx = 0; /* head of free queue */ +token_idx_t token_init_idx = 1; /* token 0 is reserved!! */ +int32_t token_new_pagecount = 0; /* count of pages that will + * be added onto token queue */ -int available_for_purge = 0; /* increase when ripe token - * added, decrease when ripe - * token removed. - * protected by page_queue_lock - */ +int available_for_purge = 0; /* increase when ripe token + * added, decrease when ripe + * token removed. + * protected by page_queue_lock + */ -static int token_q_allocating = 0; /* flag for singlethreading - * allocator */ +static int token_q_allocating = 0; /* flag for singlethreading + * allocator */ struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX]; queue_head_t purgeable_nonvolatile_queue; int purgeable_nonvolatile_count; -decl_lck_mtx_data(,vm_purgeable_queue_lock) +decl_lck_mtx_data(, vm_purgeable_queue_lock) static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue); @@ -101,7 +101,7 @@ vm_purgeable_token_check_queue(purgeable_q_t queue) int our_inactive_count; #if DEVELOPMENT - static unsigned lightweight_check = 0; + static unsigned lightweight_check = 0; /* * Due to performance impact, only perform this check @@ -123,20 +123,21 @@ vm_purgeable_token_check_queue(purgeable_q_t queue) } page_cnt += tokens[token].count; } - if (tokens[token].next == 0) + if (tokens[token].next == 0) { assert(queue->token_q_tail == token); + } token_cnt++; token = tokens[token].next; } - if (unripe) + if (unripe) { assert(queue->token_q_unripe == unripe); + } assert(token_cnt == queue->debug_count_tokens); - + /* obsolete queue doesn't maintain token counts */ - if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE) - { + if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) { our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount; assert(our_inactive_count >= 0); assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count); @@ -152,94 +153,98 @@ kern_return_t vm_purgeable_token_add(purgeable_q_t queue) { LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - + /* new token */ token_idx_t token; enum purgeable_q_type i; find_available_token: - if (token_free_idx) { /* unused tokens available */ + if (token_free_idx) { /* unused tokens available */ token = token_free_idx; token_free_idx = tokens[token_free_idx].next; - } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */ + } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */ token = token_init_idx; token_init_idx++; - } else { /* allocate more memory */ + } else { /* allocate more memory */ /* Wait if another thread is inside the memory alloc section */ - while(token_q_allocating) { + while (token_q_allocating) { wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock, - LCK_SLEEP_DEFAULT, - (event_t)&token_q_allocating, - THREAD_UNINT); - if(res != THREAD_AWAKENED) return KERN_ABORTED; - }; - + LCK_SLEEP_DEFAULT, + (event_t)&token_q_allocating, + THREAD_UNINT); + if (res != THREAD_AWAKENED) { + return KERN_ABORTED; + } + } + ; + /* Check whether memory is still maxed out */ - if(token_init_idx < token_q_max_cnt) + if (token_init_idx < token_q_max_cnt) { goto find_available_token; - + } + /* Still no memory. Allocate some. */ token_q_allocating = 1; - + /* Drop page queue lock so we can allocate */ vm_page_unlock_queues(); - + struct token *new_loc; vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE; kern_return_t result; - - if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) { + + if (alloc_size / sizeof(struct token) > TOKEN_COUNT_MAX) { result = KERN_RESOURCE_SHORTAGE; } else { if (token_q_cur_size) { result = kmem_realloc(kernel_map, - (vm_offset_t) tokens, - token_q_cur_size, - (vm_offset_t *) &new_loc, - alloc_size, VM_KERN_MEMORY_OSFMK); + (vm_offset_t) tokens, + token_q_cur_size, + (vm_offset_t *) &new_loc, + alloc_size, VM_KERN_MEMORY_OSFMK); } else { result = kmem_alloc(kernel_map, - (vm_offset_t *) &new_loc, - alloc_size, VM_KERN_MEMORY_OSFMK); + (vm_offset_t *) &new_loc, + alloc_size, VM_KERN_MEMORY_OSFMK); } } - + vm_page_lock_queues(); - + if (result) { /* Unblock waiting threads */ token_q_allocating = 0; thread_wakeup((event_t)&token_q_allocating); return result; } - + /* If we get here, we allocated new memory. Update pointers and * dealloc old range */ - struct token *old_tokens=tokens; - tokens=new_loc; - vm_size_t old_token_q_cur_size=token_q_cur_size; - token_q_cur_size=alloc_size; + struct token *old_tokens = tokens; + tokens = new_loc; + vm_size_t old_token_q_cur_size = token_q_cur_size; + token_q_cur_size = alloc_size; token_q_max_cnt = (token_idx_t) (token_q_cur_size / - sizeof(struct token)); - assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */ - - if (old_token_q_cur_size) { /* clean up old mapping */ + sizeof(struct token)); + assert(token_init_idx < token_q_max_cnt); /* We must have a free token now */ + + if (old_token_q_cur_size) { /* clean up old mapping */ vm_page_unlock_queues(); /* kmem_realloc leaves the old region mapped. Get rid of it. */ kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size); vm_page_lock_queues(); } - + /* Unblock waiting threads */ token_q_allocating = 0; thread_wakeup((event_t)&token_q_allocating); - + goto find_available_token; } - - assert (token); - + + assert(token); + /* * the new pagecount we got need to be applied to all queues except * obsolete @@ -254,11 +259,12 @@ find_available_token: token_new_pagecount = 0; /* set token counter value */ - if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) + if (queue->type != PURGEABLE_Q_TYPE_OBSOLETE) { tokens[token].count = queue->new_pages; - else - tokens[token].count = 0; /* all obsolete items are - * ripe immediately */ + } else { + tokens[token].count = 0; /* all obsolete items are + * ripe immediately */ + } queue->new_pages = 0; /* put token on token counter list */ @@ -271,13 +277,14 @@ find_available_token: tokens[queue->token_q_tail].next = token; tokens[token].prev = queue->token_q_tail; } - if (queue->token_q_unripe == 0) { /* only ripe tokens (token - * count == 0) in queue */ - if (tokens[token].count > 0) - queue->token_q_unripe = token; /* first unripe token */ - else - available_for_purge++; /* added a ripe token? - * increase available count */ + if (queue->token_q_unripe == 0) { /* only ripe tokens (token + * count == 0) in queue */ + if (tokens[token].count > 0) { + queue->token_q_unripe = token; /* first unripe token */ + } else { + available_for_purge++; /* added a ripe token? + * increase available count */ + } } queue->token_q_tail = token; @@ -288,12 +295,12 @@ find_available_token: vm_purgeable_token_check_queue(&purgeable_queues[PURGEABLE_Q_TYPE_LIFO]); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_ADD)), - queue->type, - tokens[token].count, /* num pages on token - * (last token) */ - queue->debug_count_tokens, - 0, - 0); + queue->type, + tokens[token].count, /* num pages on token + * (last token) */ + queue->debug_count_tokens, + 0, + 0); #endif return KERN_SUCCESS; @@ -302,13 +309,13 @@ find_available_token: /* * Remove first token from queue and return its index. Add its count to the * count of the next token. - * Call with page queue locked. + * Call with page queue locked. */ -static token_idx_t +static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue) { LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - + token_idx_t token; token = queue->token_q_head; @@ -325,8 +332,9 @@ vm_purgeable_token_remove_first(purgeable_q_t queue) assert(available_for_purge >= 0); } - if (queue->token_q_tail == queue->token_q_head) + if (queue->token_q_tail == queue->token_q_head) { assert(tokens[token].next == 0); + } queue->token_q_head = tokens[token].next; if (queue->token_q_head) { @@ -348,23 +356,23 @@ vm_purgeable_token_remove_first(purgeable_q_t queue) vm_purgeable_token_check_queue(queue); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), - queue->type, - tokens[queue->token_q_head].count, /* num pages on new - * first token */ - token_new_pagecount, /* num pages waiting for - * next token */ - available_for_purge, - 0); + queue->type, + tokens[queue->token_q_head].count, /* num pages on new + * first token */ + token_new_pagecount, /* num pages waiting for + * next token */ + available_for_purge, + 0); #endif } return token; } -static token_idx_t +static token_idx_t vm_purgeable_token_remove_last(purgeable_q_t queue) { LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - + token_idx_t token; token = queue->token_q_tail; @@ -373,8 +381,9 @@ vm_purgeable_token_remove_last(purgeable_q_t queue) if (token) { assert(queue->token_q_head); - if (queue->token_q_tail == queue->token_q_head) + if (queue->token_q_tail == queue->token_q_head) { assert(tokens[token].next == 0); + } if (queue->token_q_unripe == 0) { /* we're removing a ripe token. decrease count */ @@ -384,7 +393,7 @@ vm_purgeable_token_remove_last(purgeable_q_t queue) /* we're removing the only unripe token */ queue->token_q_unripe = 0; } - + if (token == queue->token_q_head) { /* token is the last one in the queue */ queue->token_q_head = 0; @@ -408,21 +417,21 @@ vm_purgeable_token_remove_last(purgeable_q_t queue) vm_purgeable_token_check_queue(queue); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)), - queue->type, - tokens[queue->token_q_head].count, /* num pages on new - * first token */ - token_new_pagecount, /* num pages waiting for - * next token */ - available_for_purge, - 0); + queue->type, + tokens[queue->token_q_head].count, /* num pages on new + * first token */ + token_new_pagecount, /* num pages waiting for + * next token */ + available_for_purge, + 0); #endif } return token; } -/* +/* * Delete first token from queue. Return token to token queue. - * Call with page queue locked. + * Call with page queue locked. */ void vm_purgeable_token_delete_first(purgeable_q_t queue) @@ -458,12 +467,11 @@ void vm_purgeable_q_advance_all() { LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - + /* check queue counters - if they get really large, scale them back. * They tend to get that large when there is no purgeable queue action */ int i; - if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */ - { + if (token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) { /* a system idling years might get there */ for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount; assert(pages >= 0); @@ -473,7 +481,7 @@ vm_purgeable_q_advance_all() } token_new_pagecount = 0; } - + /* * Decrement token counters. A token counter can be zero, this means the * object is ripe to be purged. It is not purged immediately, because that @@ -488,11 +496,10 @@ vm_purgeable_q_advance_all() for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) { purgeable_q_t queue = &purgeable_queues[i]; uint32_t num_pages = 1; - + /* Iterate over tokens as long as there are unripe tokens. */ while (queue->token_q_unripe) { - if (tokens[queue->token_q_unripe].count && num_pages) - { + if (tokens[queue->token_q_unripe].count && num_pages) { tokens[queue->token_q_unripe].count -= 1; num_pages -= 1; } @@ -501,18 +508,19 @@ vm_purgeable_q_advance_all() queue->token_q_unripe = tokens[queue->token_q_unripe].next; available_for_purge++; KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)), - queue->type, - tokens[queue->token_q_head].count, /* num pages on new - * first token */ - 0, - available_for_purge, - 0); - continue; /* One token ripened. Make sure to - * check the next. */ + queue->type, + tokens[queue->token_q_head].count, /* num pages on new + * first token */ + 0, + available_for_purge, + 0); + continue; /* One token ripened. Make sure to + * check the next. */ + } + if (num_pages == 0) { + break; /* Current token not ripe and no more pages. + * Work done. */ } - if (num_pages == 0) - break; /* Current token not ripe and no more pages. - * Work done. */ } /* @@ -556,8 +564,9 @@ vm_purgeable_token_remove_ripe(purgeable_q_t queue) token_free_idx = queue->token_q_head; queue->token_q_head = new_head; tokens[new_head].prev = 0; - if (new_head == 0) + if (new_head == 0) { queue->token_q_tail = 0; + } #if MACH_ASSERT queue->debug_count_tokens--; @@ -595,8 +604,8 @@ vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t que token_cnt_t count; /* remove token from queue1 */ - assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe - * tokens, remember? */ + assert(queue->token_q_unripe == queue->token_q_head); /* queue1 had no unripe + * tokens, remember? */ token = vm_purgeable_token_remove_first(queue); assert(token); @@ -613,11 +622,11 @@ vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t que } /* token_to_insert_before is now set correctly */ - - /* should the inserted token become the first unripe token? */ - if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) - queue2->token_q_unripe = token; /* if so, must update unripe pointer */ + /* should the inserted token become the first unripe token? */ + if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0)) { + queue2->token_q_unripe = token; /* if so, must update unripe pointer */ + } /* * insert token. * if inserting at end, reduce new_pages by that value; @@ -665,18 +674,18 @@ vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t que /* Call with purgeable queue locked. */ static vm_object_t vm_purgeable_object_find_and_lock( - purgeable_q_t queue, - int group, - boolean_t pick_ripe) + purgeable_q_t queue, + int group, + boolean_t pick_ripe) { vm_object_t object, best_object; - int object_task_importance; - int best_object_task_importance; - int best_object_skipped; - int num_objects_skipped; - int try_lock_failed = 0; - int try_lock_succeeded = 0; - task_t owner; + int object_task_importance; + int best_object_task_importance; + int best_object_skipped; + int num_objects_skipped; + int try_lock_failed = 0; + int try_lock_succeeded = 0; + task_t owner; best_object = VM_OBJECT_NULL; best_object_task_importance = INT_MAX; @@ -689,18 +698,17 @@ vm_purgeable_object_find_and_lock( */ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START), - pick_ripe, - group, - VM_KERNEL_UNSLIDE_OR_PERM(queue), - 0, - 0); + pick_ripe, + group, + VM_KERNEL_UNSLIDE_OR_PERM(queue), + 0, + 0); num_objects_skipped = 0; for (object = (vm_object_t) queue_first(&queue->objq[group]); - !queue_end(&queue->objq[group], (queue_entry_t) object); - object = (vm_object_t) queue_next(&object->objq), - num_objects_skipped++) { - + !queue_end(&queue->objq[group], (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq), + num_objects_skipped++) { /* * To prevent us looping for an excessively long time, choose * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements. @@ -712,7 +720,7 @@ vm_purgeable_object_find_and_lock( } if (pick_ripe && - ! object->purgeable_when_ripe) { + !object->purgeable_when_ripe) { /* we want an object that has a ripe token */ continue; } @@ -728,7 +736,7 @@ vm_purgeable_object_find_and_lock( if (owner != NULL && owner != VM_OBJECT_OWNER_DISOWNED) { #if CONFIG_EMBEDDED #if CONFIG_JETSAM - object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE); + object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE); #endif /* CONFIG_JETSAM */ #else /* CONFIG_EMBEDDED */ object_task_importance = task_importance_estimate(owner); @@ -756,11 +764,11 @@ vm_purgeable_object_find_and_lock( } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END), - num_objects_skipped, /* considered objects */ - try_lock_failed, - try_lock_succeeded, - VM_KERNEL_UNSLIDE_OR_PERM(best_object), - ((best_object == NULL) ? 0 : best_object->resident_page_count)); + num_objects_skipped, /* considered objects */ + try_lock_failed, + try_lock_succeeded, + VM_KERNEL_UNSLIDE_OR_PERM(best_object), + ((best_object == NULL) ? 0 : best_object->resident_page_count)); object = best_object; @@ -774,7 +782,7 @@ vm_purgeable_object_find_and_lock( vm_object_lock_assert_exclusive(object); queue_remove(&queue->objq[group], object, - vm_object_t, objq); + vm_object_t, objq); object->objq.next = NULL; object->objq.prev = NULL; object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; @@ -788,7 +796,7 @@ vm_purgeable_object_find_and_lock( /* keep queue of non-volatile objects */ queue_enter(&purgeable_nonvolatile_queue, object, - vm_object_t, objq); + vm_object_t, objq); assert(purgeable_nonvolatile_count >= 0); purgeable_nonvolatile_count++; assert(purgeable_nonvolatile_count > 0); @@ -808,8 +816,8 @@ vm_purgeable_object_purge_all(void) enum purgeable_q_type i; int group; vm_object_t object; - unsigned int purged_count; - uint32_t collisions; + unsigned int purged_count; + uint32_t collisions; purged_count = 0; collisions = 0; @@ -838,7 +846,7 @@ restart: } lck_mtx_unlock(&vm_purgeable_queue_lock); - + /* Lock the page queue here so we don't hold it * over the whole, legthy operation */ if (object->purgeable_when_ripe) { @@ -846,7 +854,7 @@ restart: vm_purgeable_token_remove_first(queue); vm_page_unlock_queues(); } - + (void) vm_object_purge(object, 0); assert(object->purgable == VM_PURGABLE_EMPTY); /* no change in purgeable accounting */ @@ -859,20 +867,20 @@ restart: } } KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)), - purged_count, /* # of purged objects */ - 0, - available_for_purge, - 0, - 0); + purged_count, /* # of purged objects */ + 0, + available_for_purge, + 0, + 0); lck_mtx_unlock(&vm_purgeable_queue_lock); return; } boolean_t vm_purgeable_object_purge_one_unlocked( - int force_purge_below_group) + int force_purge_below_group) { - boolean_t retval; + boolean_t retval; vm_page_lock_queues(); retval = vm_purgeable_object_purge_one(force_purge_below_group, 0); @@ -883,24 +891,24 @@ vm_purgeable_object_purge_one_unlocked( boolean_t vm_purgeable_object_purge_one( - int force_purge_below_group, - int flags) + int force_purge_below_group, + int flags) { enum purgeable_q_type i; int group; vm_object_t object = 0; purgeable_q_t queue, queue2; - boolean_t forced_purge; + boolean_t forced_purge; unsigned int resident_page_count; KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_START, - force_purge_below_group, flags, 0, 0, 0); + force_purge_below_group, flags, 0, 0, 0); /* Need the page queue lock since we'll be changing the token queue. */ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); lck_mtx_lock(&vm_purgeable_queue_lock); - + /* Cycle through all queues */ for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) { queue = &purgeable_queues[i]; @@ -954,7 +962,7 @@ vm_purgeable_object_purge_one( /* nothing to purge in this group: next group */ continue; } - if (!queue_empty(&queue->objq[group]) && + if (!queue_empty(&queue->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) { lck_mtx_unlock(&vm_purgeable_queue_lock); if (object->purgeable_when_ripe) { @@ -963,14 +971,14 @@ vm_purgeable_object_purge_one( forced_purge = FALSE; goto purge_now; } - if (i != PURGEABLE_Q_TYPE_OBSOLETE) { + if (i != PURGEABLE_Q_TYPE_OBSOLETE) { /* This is the token migration case, and it works between * FIFO and LIFO only */ - queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? - PURGEABLE_Q_TYPE_FIFO : - PURGEABLE_Q_TYPE_LIFO]; + queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? + PURGEABLE_Q_TYPE_FIFO : + PURGEABLE_Q_TYPE_LIFO]; - if (!queue_empty(&queue2->objq[group]) && + if (!queue_empty(&queue2->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) { lck_mtx_unlock(&vm_purgeable_queue_lock); if (object->purgeable_when_ripe) { @@ -984,14 +992,14 @@ vm_purgeable_object_purge_one( } } /* - * because we have to do a try_lock on the objects which could fail, - * we could end up with no object to purge at this time, even though - * we have objects in a purgeable state - */ + * because we have to do a try_lock on the objects which could fail, + * we could end up with no object to purge at this time, even though + * we have objects in a purgeable state + */ lck_mtx_unlock(&vm_purgeable_queue_lock); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END, - 0, 0, available_for_purge, 0, 0); + 0, 0, available_for_purge, 0, 0); return FALSE; @@ -1010,12 +1018,12 @@ purge_now: vm_pageout_vminfo.vm_pageout_pages_purged += resident_page_count; KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)) | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ - resident_page_count, - available_for_purge, - 0, - 0); - + VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ + resident_page_count, + available_for_purge, + 0, + 0); + return TRUE; } @@ -1029,7 +1037,7 @@ vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group) assert(object->objq.next != NULL); assert(object->objq.prev != NULL); queue_remove(&purgeable_nonvolatile_queue, object, - vm_object_t, objq); + vm_object_t, objq); object->objq.next = NULL; object->objq.prev = NULL; assert(purgeable_nonvolatile_count > 0); @@ -1038,14 +1046,16 @@ vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group) /* one less nonvolatile object for this object's owner */ vm_purgeable_nonvolatile_owner_update(VM_OBJECT_OWNER(object), -1); - if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) + if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE) { group = 0; + } - if (queue->type != PURGEABLE_Q_TYPE_LIFO) /* fifo and obsolete are - * fifo-queued */ - queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */ - else - queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */ + if (queue->type != PURGEABLE_Q_TYPE_LIFO) { /* fifo and obsolete are + * fifo-queued */ + queue_enter(&queue->objq[group], object, vm_object_t, objq); /* last to die */ + } else { + queue_enter_first(&queue->objq[group], object, vm_object_t, objq); /* first to die */ + } /* one more volatile object for this object's owner */ vm_purgeable_volatile_owner_update(VM_OBJECT_OWNER(object), +1); @@ -1056,17 +1066,17 @@ vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group) assert(object->vo_purgeable_volatilizer == NULL); object->vo_purgeable_volatilizer = current_task(); OSBacktrace(&object->purgeable_volatilizer_bt[0], - ARRAY_COUNT(object->purgeable_volatilizer_bt)); + ARRAY_COUNT(object->purgeable_volatilizer_bt)); #endif /* DEBUG */ #if MACH_ASSERT queue->debug_count_objects++; KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)), - 0, - tokens[queue->token_q_head].count, - queue->type, - group, - 0); + 0, + tokens[queue->token_q_head].count, + queue->type, + group, + 0); #endif lck_mtx_unlock(&vm_purgeable_queue_lock); @@ -1087,12 +1097,14 @@ vm_purgeable_object_remove(vm_object_t object) group = object->purgeable_queue_group; if (type == PURGEABLE_Q_TYPE_MAX) { - if (object->objq.prev || object->objq.next) + if (object->objq.prev || object->objq.next) { panic("unmarked object on purgeable q"); + } return NULL; - } else if (!(object->objq.prev && object->objq.next)) + } else if (!(object->objq.prev && object->objq.next)) { panic("marked object not on purgeable q"); + } lck_mtx_lock(&vm_purgeable_queue_lock); @@ -1109,7 +1121,7 @@ vm_purgeable_object_remove(vm_object_t object) /* keep queue of non-volatile objects */ if (object->alive && !object->terminating) { queue_enter(&purgeable_nonvolatile_queue, object, - vm_object_t, objq); + vm_object_t, objq); assert(purgeable_nonvolatile_count >= 0); purgeable_nonvolatile_count++; assert(purgeable_nonvolatile_count > 0); @@ -1120,11 +1132,11 @@ vm_purgeable_object_remove(vm_object_t object) #if MACH_ASSERT queue->debug_count_objects--; KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)), - 0, - tokens[queue->token_q_head].count, - queue->type, - group, - 0); + 0, + tokens[queue->token_q_head].count, + queue->type, + group, + 0); #endif lck_mtx_unlock(&vm_purgeable_queue_lock); @@ -1145,8 +1157,8 @@ vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int gr stat->count = stat->size = 0; vm_object_t object; for (object = (vm_object_t) queue_first(&queue->objq[group]); - !queue_end(&queue->objq[group], (queue_entry_t) object); - object = (vm_object_t) queue_next(&object->objq)) { + !queue_end(&queue->objq[group], (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) { if (!target_task || VM_OBJECT_OWNER(object) == target_task) { stat->count++; stat->size += (object->resident_page_count * PAGE_SIZE); @@ -1158,20 +1170,22 @@ vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int gr void vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task) { - purgeable_q_t queue; + purgeable_q_t queue; int group; lck_mtx_lock(&vm_purgeable_queue_lock); - + /* Populate fifo_data */ queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; - for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task); - + } + /* Populate lifo_data */ queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; - for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task); + } /* Populate obsolete data */ queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; @@ -1202,7 +1216,6 @@ vm_purgeable_account_volatile_queue( acnt_info->pvm_nonvolatile_count += object->wired_page_count; } } - } /* @@ -1211,15 +1224,15 @@ vm_purgeable_account_volatile_queue( */ kern_return_t vm_purgeable_account( - task_t task, - pvm_account_info_t acnt_info) + task_t task, + pvm_account_info_t acnt_info) { - queue_head_t *nonvolatile_q; - vm_object_t object; - int group; - int state; - uint64_t compressed_count; - purgeable_q_t volatile_q; + queue_head_t *nonvolatile_q; + vm_object_t object; + int group; + int state; + uint64_t compressed_count; + purgeable_q_t volatile_q; if ((task == NULL) || (acnt_info == NULL)) { @@ -1235,8 +1248,8 @@ vm_purgeable_account( nonvolatile_q = &purgeable_nonvolatile_queue; for (object = (vm_object_t) queue_first(nonvolatile_q); - !queue_end(nonvolatile_q, (queue_entry_t) object); - object = (vm_object_t) queue_next(&object->objq)) { + !queue_end(nonvolatile_q, (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) { if (VM_OBJECT_OWNER(object) == task) { state = object->purgable; compressed_count = vm_compressor_pager_get_count(object->pager); @@ -1276,11 +1289,11 @@ vm_purgeable_account( void vm_purgeable_disown( - task_t task) + task_t task) { - vm_object_t next_object; - vm_object_t object; - int collisions; + vm_object_t next_object; + vm_object_t object; + int collisions; if (task == NULL) { return; @@ -1313,8 +1326,8 @@ again: task->task_purgeable_disowning = TRUE; for (object = (vm_object_t) queue_first(&task->task_objq); - !queue_end(&task->task_objq, (queue_entry_t) object); - object = next_object) { + !queue_end(&task->task_objq, (queue_entry_t) object); + object = next_object) { if (task->task_nonvolatile_objects == 0 && task->task_volatile_objects == 0) { /* no more purgeable objects owned by "task" */ @@ -1343,21 +1356,21 @@ again: object, object->vo_ledger_tag, /* unchanged */ VM_OBJECT_OWNER_DISOWNED, /* new owner */ - TRUE); /* old_owner->task_objq locked */ + TRUE); /* old_owner->task_objq locked */ assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); vm_object_unlock(object); } if (__improbable(task->task_volatile_objects != 0 || - task->task_nonvolatile_objects != 0)) { + task->task_nonvolatile_objects != 0)) { panic("%s(%p): volatile=%d nonvolatile=%d q=%p q_first=%p q_last=%p", - __FUNCTION__, - task, - task->task_volatile_objects, - task->task_nonvolatile_objects, - &task->task_objq, - queue_first(&task->task_objq), - queue_last(&task->task_objq)); + __FUNCTION__, + task, + task->task_volatile_objects, + task->task_nonvolatile_objects, + &task->task_objq, + queue_first(&task->task_objq), + queue_last(&task->task_objq)); } /* there shouldn't be any purgeable objects owned by task now */ @@ -1375,13 +1388,13 @@ again: static uint64_t vm_purgeable_queue_purge_task_owned( - purgeable_q_t queue, - int group, - task_t task) + purgeable_q_t queue, + int group, + task_t task) { - vm_object_t object = VM_OBJECT_NULL; - int collisions = 0; - uint64_t num_pages_purged = 0; + vm_object_t object = VM_OBJECT_NULL; + int collisions = 0; + uint64_t num_pages_purged = 0; num_pages_purged = 0; collisions = 0; @@ -1390,9 +1403,8 @@ look_again: lck_mtx_lock(&vm_purgeable_queue_lock); for (object = (vm_object_t) queue_first(&queue->objq[group]); - !queue_end(&queue->objq[group], (queue_entry_t) object); - object = (vm_object_t) queue_next(&object->objq)) { - + !queue_end(&queue->objq[group], (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) { if (object->vo_owner != task) { continue; } @@ -1409,7 +1421,7 @@ look_again: /* remove object from purgeable queue */ queue_remove(&queue->objq[group], object, - vm_object_t, objq); + vm_object_t, objq); object->objq.next = NULL; object->objq.prev = NULL; object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX; @@ -1422,7 +1434,7 @@ look_again: object->vo_purgeable_volatilizer = NULL; #endif /* DEBUG */ queue_enter(&purgeable_nonvolatile_queue, object, - vm_object_t, objq); + vm_object_t, objq); assert(purgeable_nonvolatile_count >= 0); purgeable_nonvolatile_count++; assert(purgeable_nonvolatile_count > 0); @@ -1458,38 +1470,40 @@ look_again: uint64_t vm_purgeable_purge_task_owned( - task_t task) + task_t task) { - purgeable_q_t queue = NULL; - int group = 0; - uint64_t num_pages_purged = 0; + purgeable_q_t queue = NULL; + int group = 0; + uint64_t num_pages_purged = 0; num_pages_purged = 0; queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE]; num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, - 0, - task); + 0, + task); queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]; - for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, - group, - task); - + group, + task); + } + queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]; - for (group = 0; group < NUM_VOLATILE_GROUPS; group++) + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { num_pages_purged += vm_purgeable_queue_purge_task_owned(queue, - group, - task); + group, + task); + } return num_pages_purged; } void vm_purgeable_nonvolatile_enqueue( - vm_object_t object, - task_t owner) + vm_object_t object, + task_t owner) { vm_object_lock_assert_exclusive(object); @@ -1508,20 +1522,20 @@ vm_purgeable_nonvolatile_enqueue( } #if DEBUG OSBacktrace(&object->purgeable_owner_bt[0], - ARRAY_COUNT(object->purgeable_owner_bt)); + ARRAY_COUNT(object->purgeable_owner_bt)); object->vo_purgeable_volatilizer = NULL; #endif /* DEBUG */ vm_object_ownership_change(object, - object->vo_ledger_tag, /* tag unchanged */ - owner, - FALSE); /* task_objq_locked */ + object->vo_ledger_tag, /* tag unchanged */ + owner, + FALSE); /* task_objq_locked */ assert(object->objq.next == NULL); assert(object->objq.prev == NULL); queue_enter(&purgeable_nonvolatile_queue, object, - vm_object_t, objq); + vm_object_t, objq); assert(purgeable_nonvolatile_count >= 0); purgeable_nonvolatile_count++; assert(purgeable_nonvolatile_count > 0); @@ -1532,9 +1546,9 @@ vm_purgeable_nonvolatile_enqueue( void vm_purgeable_nonvolatile_dequeue( - vm_object_t object) + vm_object_t object) { - task_t owner; + task_t owner; vm_object_lock_assert_exclusive(object); @@ -1551,7 +1565,7 @@ vm_purgeable_nonvolatile_dequeue( assert(VM_OBJECT_OWNER(object) != kernel_task); vm_object_ownership_change( object, - object->vo_ledger_tag, /* unchanged */ + object->vo_ledger_tag, /* unchanged */ VM_OBJECT_OWNER_DISOWNED, /* new owner */ FALSE); /* old_owner->task_objq locked */ assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED); @@ -1561,7 +1575,7 @@ vm_purgeable_nonvolatile_dequeue( assert(object->objq.next != NULL); assert(object->objq.prev != NULL); queue_remove(&purgeable_nonvolatile_queue, object, - vm_object_t, objq); + vm_object_t, objq); object->objq.next = NULL; object->objq.prev = NULL; assert(purgeable_nonvolatile_count > 0); @@ -1574,40 +1588,41 @@ vm_purgeable_nonvolatile_dequeue( void vm_purgeable_accounting( - vm_object_t object, - vm_purgable_t old_state) + vm_object_t object, + vm_purgable_t old_state) { - task_t owner; - int resident_page_count; - int wired_page_count; - int compressed_page_count; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; + task_t owner; + int resident_page_count; + int wired_page_count; + int compressed_page_count; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; vm_object_lock_assert_exclusive(object); assert(object->purgable != VM_PURGABLE_DENY); owner = VM_OBJECT_OWNER(object); if (owner == NULL || - object->purgable == VM_PURGABLE_DENY) + object->purgable == VM_PURGABLE_DENY) { return; + } vm_object_ledger_tag_ledgers(object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); resident_page_count = object->resident_page_count; wired_page_count = object->wired_page_count; if (VM_CONFIG_COMPRESSOR_IS_PRESENT && - object->pager != NULL) { + object->pager != NULL) { compressed_page_count = - vm_compressor_pager_get_count(object->pager); + vm_compressor_pager_get_count(object->pager); } else { compressed_page_count = 0; } @@ -1616,61 +1631,59 @@ vm_purgeable_accounting( old_state == VM_PURGABLE_EMPTY) { /* less volatile bytes in ledger */ ledger_debit(owner->ledger, - ledger_idx_volatile, - ptoa_64(resident_page_count - wired_page_count)); + ledger_idx_volatile, + ptoa_64(resident_page_count - wired_page_count)); /* less compressed volatile bytes in ledger */ ledger_debit(owner->ledger, - ledger_idx_volatile_compressed, - ptoa_64(compressed_page_count)); + ledger_idx_volatile_compressed, + ptoa_64(compressed_page_count)); /* more non-volatile bytes in ledger */ ledger_credit(owner->ledger, - ledger_idx_nonvolatile, - ptoa_64(resident_page_count - wired_page_count)); + ledger_idx_nonvolatile, + ptoa_64(resident_page_count - wired_page_count)); /* more compressed non-volatile bytes in ledger */ ledger_credit(owner->ledger, - ledger_idx_nonvolatile_compressed, - ptoa_64(compressed_page_count)); + ledger_idx_nonvolatile_compressed, + ptoa_64(compressed_page_count)); if (do_footprint) { /* more footprint */ ledger_credit(owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(resident_page_count - + compressed_page_count - - wired_page_count)); + task_ledgers.phys_footprint, + ptoa_64(resident_page_count + + compressed_page_count + - wired_page_count)); } - } else if (old_state == VM_PURGABLE_NONVOLATILE) { - /* less non-volatile bytes in ledger */ ledger_debit(owner->ledger, - ledger_idx_nonvolatile, - ptoa_64(resident_page_count - wired_page_count)); + ledger_idx_nonvolatile, + ptoa_64(resident_page_count - wired_page_count)); /* less compressed non-volatile bytes in ledger */ ledger_debit(owner->ledger, - ledger_idx_nonvolatile_compressed, - ptoa_64(compressed_page_count)); + ledger_idx_nonvolatile_compressed, + ptoa_64(compressed_page_count)); if (do_footprint) { /* less footprint */ ledger_debit(owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(resident_page_count - + compressed_page_count - - wired_page_count)); + task_ledgers.phys_footprint, + ptoa_64(resident_page_count + + compressed_page_count + - wired_page_count)); } /* more volatile bytes in ledger */ ledger_credit(owner->ledger, - ledger_idx_volatile, - ptoa_64(resident_page_count - wired_page_count)); + ledger_idx_volatile, + ptoa_64(resident_page_count - wired_page_count)); /* more compressed volatile bytes in ledger */ ledger_credit(owner->ledger, - ledger_idx_volatile_compressed, - ptoa_64(compressed_page_count)); + ledger_idx_volatile_compressed, + ptoa_64(compressed_page_count)); } else { panic("vm_purgeable_accounting(%p): " - "unexpected old_state=%d\n", - object, old_state); + "unexpected old_state=%d\n", + object, old_state); } vm_object_lock_assert_exclusive(object); @@ -1678,8 +1691,8 @@ vm_purgeable_accounting( void vm_purgeable_nonvolatile_owner_update( - task_t owner, - int delta) + task_t owner, + int delta) { if (owner == NULL || delta == 0) { return; @@ -1698,8 +1711,8 @@ vm_purgeable_nonvolatile_owner_update( void vm_purgeable_volatile_owner_update( - task_t owner, - int delta) + task_t owner, + int delta) { if (owner == NULL || delta == 0) { return; @@ -1718,15 +1731,15 @@ vm_purgeable_volatile_owner_update( void vm_object_owner_compressed_update( - vm_object_t object, - int delta) + vm_object_t object, + int delta) { - task_t owner; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; + task_t owner; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; vm_object_lock_assert_exclusive(object); @@ -1735,41 +1748,41 @@ vm_object_owner_compressed_update( if (delta == 0 || !object->internal || (object->purgable == VM_PURGABLE_DENY && - ! object->vo_ledger_tag) || + !object->vo_ledger_tag) || owner == NULL) { /* not an owned purgeable (or tagged) VM object: nothing to update */ return; } - + vm_object_ledger_tag_ledgers(object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); switch (object->purgable) { case VM_PURGABLE_DENY: /* not purgeable: must be ledger-tagged */ assert(object->vo_ledger_tag != VM_OBJECT_LEDGER_TAG_NONE); - /* fallthru */ + /* fallthru */ case VM_PURGABLE_NONVOLATILE: if (delta > 0) { ledger_credit(owner->ledger, - ledger_idx_nonvolatile_compressed, - ptoa_64(delta)); + ledger_idx_nonvolatile_compressed, + ptoa_64(delta)); if (do_footprint) { ledger_credit(owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(delta)); + task_ledgers.phys_footprint, + ptoa_64(delta)); } } else { ledger_debit(owner->ledger, - ledger_idx_nonvolatile_compressed, - ptoa_64(-delta)); + ledger_idx_nonvolatile_compressed, + ptoa_64(-delta)); if (do_footprint) { ledger_debit(owner->ledger, - task_ledgers.phys_footprint, - ptoa_64(-delta)); + task_ledgers.phys_footprint, + ptoa_64(-delta)); } } break; @@ -1777,17 +1790,17 @@ vm_object_owner_compressed_update( case VM_PURGABLE_EMPTY: if (delta > 0) { ledger_credit(owner->ledger, - ledger_idx_volatile_compressed, - ptoa_64(delta)); + ledger_idx_volatile_compressed, + ptoa_64(delta)); } else { ledger_debit(owner->ledger, - ledger_idx_volatile_compressed, - ptoa_64(-delta)); + ledger_idx_volatile_compressed, + ptoa_64(-delta)); } break; default: panic("vm_purgeable_compressed_update(): " - "unexpected purgable %d for object %p\n", - object->purgable, object); + "unexpected purgable %d for object %p\n", + object->purgable, object); } } diff --git a/osfmk/vm/vm_purgeable_internal.h b/osfmk/vm/vm_purgeable_internal.h index 5015ada14..f2599e771 100644 --- a/osfmk/vm/vm_purgeable_internal.h +++ b/osfmk/vm/vm_purgeable_internal.h @@ -2,14 +2,14 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,13 +17,13 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ /* * Purgeable spelling rules - * It is believed that the correct spelling is + * It is believed that the correct spelling is * { 'p', 'u', 'r', 'g', 'e', 'a', 'b', 'l', 'e' }. * However, there is one published API that likes to spell it without the * first 'e', vm_purgable_control(). Since we can't change that API, @@ -82,7 +82,7 @@ extern int available_for_purge; * mostly used on a user context and we don't want any contention with the * pageout daemon. */ -decl_lck_mtx_data(extern,vm_purgeable_queue_lock) +decl_lck_mtx_data(extern, vm_purgeable_queue_lock) /* add a new token to queue. called by vm_object_purgeable_control */ /* enter with page queue locked */ @@ -122,21 +122,21 @@ kern_return_t vm_purgeable_account(task_t task, pvm_account_info_t acnt_info); uint64_t vm_purgeable_purge_task_owned(task_t task); void vm_purgeable_nonvolatile_enqueue(vm_object_t object, task_t task); void vm_purgeable_nonvolatile_dequeue(vm_object_t object); -void vm_purgeable_accounting(vm_object_t object, - vm_purgable_t old_state); -void vm_object_owner_compressed_update(vm_object_t object, - int delta); +void vm_purgeable_accounting(vm_object_t object, + vm_purgable_t old_state); +void vm_object_owner_compressed_update(vm_object_t object, + int delta); #define PURGEABLE_LOOP_MAX 64 -#define TOKEN_ADD 0x40 /* 0x100 */ -#define TOKEN_DELETE 0x41 /* 0x104 */ -#define TOKEN_RIPEN 0x42 /* 0x108 */ -#define OBJECT_ADD 0x48 /* 0x120 */ -#define OBJECT_REMOVE 0x49 /* 0x124 */ -#define OBJECT_PURGE 0x4a /* 0x128 */ -#define OBJECT_PURGE_ALL 0x4b /* 0x12c */ -#define OBJECT_PURGE_ONE 0x4c /* 0x12d */ -#define OBJECT_PURGE_LOOP 0x4e /* 0x12e */ +#define TOKEN_ADD 0x40 /* 0x100 */ +#define TOKEN_DELETE 0x41 /* 0x104 */ +#define TOKEN_RIPEN 0x42 /* 0x108 */ +#define OBJECT_ADD 0x48 /* 0x120 */ +#define OBJECT_REMOVE 0x49 /* 0x124 */ +#define OBJECT_PURGE 0x4a /* 0x128 */ +#define OBJECT_PURGE_ALL 0x4b /* 0x12c */ +#define OBJECT_PURGE_ONE 0x4c /* 0x12d */ +#define OBJECT_PURGE_LOOP 0x4e /* 0x12e */ #endif /* __VM_PURGEABLE_INTERNAL__ */ diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index 860bde4d5..4cdb91692 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -84,7 +84,7 @@ #include #include #include -#include /* kernel_memory_allocate() */ +#include /* kernel_memory_allocate() */ #include #include #include @@ -106,47 +106,56 @@ #include +#if MACH_ASSERT + +#define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem)) + +#else /* MACH_ASSERT */ + +#define ASSERT_PMAP_FREE(mem) /* nothing */ -char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; -char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; -char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; -char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; +#endif /* MACH_ASSERT */ + +char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; +char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; +char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; +char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE]; #if CONFIG_SECLUDED_MEMORY struct vm_page_secluded_data vm_page_secluded; void secluded_suppression_init(void); #endif /* CONFIG_SECLUDED_MEMORY */ -boolean_t hibernate_cleaning_in_progress = FALSE; -boolean_t vm_page_free_verify = TRUE; +boolean_t hibernate_cleaning_in_progress = FALSE; +boolean_t vm_page_free_verify = TRUE; -uint32_t vm_lopage_free_count = 0; -uint32_t vm_lopage_free_limit = 0; -uint32_t vm_lopage_lowater = 0; -boolean_t vm_lopage_refill = FALSE; -boolean_t vm_lopage_needed = FALSE; +uint32_t vm_lopage_free_count = 0; +uint32_t vm_lopage_free_limit = 0; +uint32_t vm_lopage_lowater = 0; +boolean_t vm_lopage_refill = FALSE; +boolean_t vm_lopage_needed = FALSE; -lck_mtx_ext_t vm_page_queue_lock_ext; -lck_mtx_ext_t vm_page_queue_free_lock_ext; -lck_mtx_ext_t vm_purgeable_queue_lock_ext; +lck_mtx_ext_t vm_page_queue_lock_ext; +lck_mtx_ext_t vm_page_queue_free_lock_ext; +lck_mtx_ext_t vm_purgeable_queue_lock_ext; -int speculative_age_index = 0; -int speculative_steal_index = 0; +int speculative_age_index = 0; +int speculative_steal_index = 0; struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1]; -boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues. - * Updated and checked behind the vm_page_queues_lock. */ +boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues. + * Updated and checked behind the vm_page_queues_lock. */ -__private_extern__ void vm_page_init_lck_grp(void); +__private_extern__ void vm_page_init_lck_grp(void); -static void vm_page_free_prepare(vm_page_t page); -static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr); +static void vm_page_free_prepare(vm_page_t page); +static vm_page_t vm_page_grab_fictitious_common(ppnum_t phys_addr); static void vm_tag_init(void); -uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; -uint32_t vm_packed_from_vm_pages_array_mask = VM_PACKED_FROM_VM_PAGES_ARRAY; -uint32_t vm_packed_pointer_shift = VM_PACKED_POINTER_SHIFT; +uint64_t vm_min_kernel_and_kext_address = VM_MIN_KERNEL_AND_KEXT_ADDRESS; +uint32_t vm_packed_from_vm_pages_array_mask = VM_PACKED_FROM_VM_PAGES_ARRAY; +uint32_t vm_packed_pointer_shift = VM_PACKED_POINTER_SHIFT; /* * Associated with page of user-allocatable memory is a @@ -161,7 +170,7 @@ uint32_t vm_packed_pointer_shift = VM_PACKED_POINTER_SHIFT; vm_offset_t virtual_space_start; vm_offset_t virtual_space_end; -uint32_t vm_page_pages; +uint32_t vm_page_pages; /* * The vm_page_lookup() routine, which provides for fast @@ -173,21 +182,21 @@ uint32_t vm_page_pages; */ typedef struct { vm_page_packed_t page_list; -#if MACH_PAGE_HASH_STATS - int cur_count; /* current count */ - int hi_count; /* high water mark */ +#if MACH_PAGE_HASH_STATS + int cur_count; /* current count */ + int hi_count; /* high water mark */ #endif /* MACH_PAGE_HASH_STATS */ } vm_page_bucket_t; -#define BUCKETS_PER_LOCK 16 +#define BUCKETS_PER_LOCK 16 -vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ -unsigned int vm_page_bucket_count = 0; /* How big is array? */ -unsigned int vm_page_hash_mask; /* Mask for hash function */ -unsigned int vm_page_hash_shift; /* Shift for hash function */ -uint32_t vm_page_bucket_hash; /* Basic bucket hash */ -unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */ +vm_page_bucket_t *vm_page_buckets; /* Array of buckets */ +unsigned int vm_page_bucket_count = 0; /* How big is array? */ +unsigned int vm_page_hash_mask; /* Mask for hash function */ +unsigned int vm_page_hash_shift; /* Shift for hash function */ +uint32_t vm_page_bucket_hash; /* Basic bucket hash */ +unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */ #ifndef VM_TAG_ACTIVE_UPDATE #error VM_TAG_ACTIVE_UPDATE @@ -197,9 +206,9 @@ unsigned int vm_page_bucket_lock_count = 0; /* How big is array of locks? */ #endif boolean_t vm_tag_active_update = VM_TAG_ACTIVE_UPDATE; -lck_spin_t *vm_page_bucket_locks; -lck_spin_t vm_objects_wired_lock; -lck_spin_t vm_allocation_sites_lock; +lck_spin_t *vm_page_bucket_locks; +lck_spin_t vm_objects_wired_lock; +lck_spin_t vm_allocation_sites_lock; vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1]; vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE]; @@ -212,14 +221,14 @@ vm_tag_t vm_allocation_tag_highest; #if VM_PAGE_BUCKETS_CHECK boolean_t vm_page_buckets_check_ready = FALSE; #if VM_PAGE_FAKE_BUCKETS -vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */ +vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */ vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end; #endif /* VM_PAGE_FAKE_BUCKETS */ #endif /* VM_PAGE_BUCKETS_CHECK */ -#if MACH_PAGE_HASH_STATS +#if MACH_PAGE_HASH_STATS /* This routine is only for debug. It is intended to be called by * hand by a developer using a kernel debugger. This routine prints * out vm_page_hash table statistics to the kernel debug console. @@ -227,29 +236,30 @@ vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end; void hash_debug(void) { - int i; - int numbuckets = 0; - int highsum = 0; - int maxdepth = 0; + int i; + int numbuckets = 0; + int highsum = 0; + int maxdepth = 0; for (i = 0; i < vm_page_bucket_count; i++) { if (vm_page_buckets[i].hi_count) { numbuckets++; highsum += vm_page_buckets[i].hi_count; - if (vm_page_buckets[i].hi_count > maxdepth) + if (vm_page_buckets[i].hi_count > maxdepth) { maxdepth = vm_page_buckets[i].hi_count; + } } } printf("Total number of buckets: %d\n", vm_page_bucket_count); printf("Number used buckets: %d = %d%%\n", - numbuckets, 100*numbuckets/vm_page_bucket_count); + numbuckets, 100 * numbuckets / vm_page_bucket_count); printf("Number unused buckets: %d = %d%%\n", - vm_page_bucket_count - numbuckets, - 100*(vm_page_bucket_count-numbuckets)/vm_page_bucket_count); + vm_page_bucket_count - numbuckets, + 100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count); printf("Sum of bucket max depth: %d\n", highsum); printf("Average bucket depth: %d.%2d\n", - highsum/vm_page_bucket_count, - highsum%vm_page_bucket_count); + highsum / vm_page_bucket_count, + highsum % vm_page_bucket_count); printf("Maximum bucket depth: %d\n", maxdepth); } #endif /* MACH_PAGE_HASH_STATS */ @@ -266,57 +276,45 @@ hash_debug(void) * constants. */ #if defined(__arm__) || defined(__arm64__) -vm_size_t page_size; -vm_size_t page_mask; -int page_shift; +vm_size_t page_size; +vm_size_t page_mask; +int page_shift; #else -vm_size_t page_size = PAGE_SIZE; -vm_size_t page_mask = PAGE_MASK; -int page_shift = PAGE_SHIFT; +vm_size_t page_size = PAGE_SIZE; +vm_size_t page_mask = PAGE_MASK; +int page_shift = PAGE_SHIFT; #endif -/* - * Resident page structures are initialized from - * a template (see vm_page_alloc). - * - * When adding a new field to the virtual memory - * object structure, be sure to add initialization - * (see vm_page_bootstrap). - */ -struct vm_page vm_page_template; - -vm_page_t vm_pages = VM_PAGE_NULL; -vm_page_t vm_page_array_beginning_addr; -vm_page_t vm_page_array_ending_addr; -vm_page_t vm_page_array_boundary; +vm_page_t vm_pages = VM_PAGE_NULL; +vm_page_t vm_page_array_beginning_addr; +vm_page_t vm_page_array_ending_addr; -unsigned int vm_pages_count = 0; -ppnum_t vm_page_lowest = 0; +unsigned int vm_pages_count = 0; /* * Resident pages that represent real memory * are allocated from a set of free lists, * one per color. */ -unsigned int vm_colors; -unsigned int vm_color_mask; /* mask is == (vm_colors-1) */ -unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */ -unsigned int vm_free_magazine_refill_limit = 0; +unsigned int vm_colors; +unsigned int vm_color_mask; /* mask is == (vm_colors-1) */ +unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */ +unsigned int vm_free_magazine_refill_limit = 0; struct vm_page_queue_free_head { - vm_page_queue_head_t qhead; + vm_page_queue_head_t qhead; } __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS]; +struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS]; -unsigned int vm_page_free_wanted; -unsigned int vm_page_free_wanted_privileged; +unsigned int vm_page_free_wanted; +unsigned int vm_page_free_wanted_privileged; #if CONFIG_SECLUDED_MEMORY -unsigned int vm_page_free_wanted_secluded; +unsigned int vm_page_free_wanted_secluded; #endif /* CONFIG_SECLUDED_MEMORY */ -unsigned int vm_page_free_count; +unsigned int vm_page_free_count; /* * Occasionally, the virtual memory system uses @@ -327,15 +325,15 @@ unsigned int vm_page_free_count; * These page structures are allocated the way * most other kernel structures are. */ -zone_t vm_page_array_zone; -zone_t vm_page_zone; +zone_t vm_page_array_zone; +zone_t vm_page_zone; vm_locks_array_t vm_page_locks; -decl_lck_mtx_data(,vm_page_alloc_lock) +decl_lck_mtx_data(, vm_page_alloc_lock) lck_mtx_ext_t vm_page_alloc_lock_ext; -unsigned int vm_page_local_q_count = 0; -unsigned int vm_page_local_q_soft_limit = 250; -unsigned int vm_page_local_q_hard_limit = 500; +unsigned int vm_page_local_q_count = 0; +unsigned int vm_page_local_q_soft_limit = 250; +unsigned int vm_page_local_q_hard_limit = 500; struct vplq *vm_page_local_q = NULL; /* N.B. Guard and fictitious pages must not @@ -351,7 +349,7 @@ const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1; /* * Guard pages are not accessible so they don't - * need a physical address, but we need to enter + * need a physical address, but we need to enter * one in the pmap. * Let's make it recognizable and make sure that * we don't use a real physical page with that @@ -364,80 +362,79 @@ const ppnum_t vm_page_guard_addr = (ppnum_t) -2; * queues that are used by the page replacement * system (pageout daemon). These queues are * defined here, but are shared by the pageout - * module. The inactive queue is broken into - * file backed and anonymous for convenience as the - * pageout daemon often assignes a higher + * module. The inactive queue is broken into + * file backed and anonymous for convenience as the + * pageout daemon often assignes a higher * importance to anonymous pages (less likely to pick) */ -vm_page_queue_head_t vm_page_queue_active __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -vm_page_queue_head_t vm_page_queue_inactive __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_active __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_inactive __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); #if CONFIG_SECLUDED_MEMORY -vm_page_queue_head_t vm_page_queue_secluded __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_secluded __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); #endif /* CONFIG_SECLUDED_MEMORY */ -vm_page_queue_head_t vm_page_queue_anonymous __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* inactive memory queue for anonymous pages */ -vm_page_queue_head_t vm_page_queue_throttled __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +vm_page_queue_head_t vm_page_queue_anonymous __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); /* inactive memory queue for anonymous pages */ +vm_page_queue_head_t vm_page_queue_throttled __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -queue_head_t vm_objects_wired; +queue_head_t vm_objects_wired; void vm_update_darkwake_mode(boolean_t); #if CONFIG_BACKGROUND_QUEUE -vm_page_queue_head_t vm_page_queue_background __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -uint32_t vm_page_background_target; -uint32_t vm_page_background_target_snapshot; -uint32_t vm_page_background_count; -uint64_t vm_page_background_promoted_count; +vm_page_queue_head_t vm_page_queue_background __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +uint32_t vm_page_background_target; +uint32_t vm_page_background_target_snapshot; +uint32_t vm_page_background_count; +uint64_t vm_page_background_promoted_count; -uint32_t vm_page_background_internal_count; -uint32_t vm_page_background_external_count; +uint32_t vm_page_background_internal_count; +uint32_t vm_page_background_external_count; -uint32_t vm_page_background_mode; -uint32_t vm_page_background_exclude_external; +uint32_t vm_page_background_mode; +uint32_t vm_page_background_exclude_external; #endif -unsigned int vm_page_active_count; -unsigned int vm_page_inactive_count; +unsigned int vm_page_active_count; +unsigned int vm_page_inactive_count; #if CONFIG_SECLUDED_MEMORY -unsigned int vm_page_secluded_count; -unsigned int vm_page_secluded_count_free; -unsigned int vm_page_secluded_count_inuse; +unsigned int vm_page_secluded_count; +unsigned int vm_page_secluded_count_free; +unsigned int vm_page_secluded_count_inuse; #endif /* CONFIG_SECLUDED_MEMORY */ -unsigned int vm_page_anonymous_count; -unsigned int vm_page_throttled_count; -unsigned int vm_page_speculative_count; - -unsigned int vm_page_wire_count; -unsigned int vm_page_wire_count_on_boot = 0; -unsigned int vm_page_stolen_count; -unsigned int vm_page_wire_count_initial; -unsigned int vm_page_pages_initial; -unsigned int vm_page_gobble_count = 0; - -#define VM_PAGE_WIRE_COUNT_WARNING 0 -#define VM_PAGE_GOBBLE_COUNT_WARNING 0 - -unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */ -unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */ -uint64_t vm_page_purged_count = 0; /* total count of purged pages */ - -unsigned int vm_page_xpmapped_external_count = 0; -unsigned int vm_page_external_count = 0; -unsigned int vm_page_internal_count = 0; -unsigned int vm_page_pageable_external_count = 0; -unsigned int vm_page_pageable_internal_count = 0; +unsigned int vm_page_anonymous_count; +unsigned int vm_page_throttled_count; +unsigned int vm_page_speculative_count; + +unsigned int vm_page_wire_count; +unsigned int vm_page_wire_count_on_boot = 0; +unsigned int vm_page_stolen_count = 0; +unsigned int vm_page_wire_count_initial; +unsigned int vm_page_gobble_count = 0; + +#define VM_PAGE_WIRE_COUNT_WARNING 0 +#define VM_PAGE_GOBBLE_COUNT_WARNING 0 + +unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */ +unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */ +uint64_t vm_page_purged_count = 0; /* total count of purged pages */ + +unsigned int vm_page_xpmapped_external_count = 0; +unsigned int vm_page_external_count = 0; +unsigned int vm_page_internal_count = 0; +unsigned int vm_page_pageable_external_count = 0; +unsigned int vm_page_pageable_internal_count = 0; #if DEVELOPMENT || DEBUG -unsigned int vm_page_speculative_recreated = 0; -unsigned int vm_page_speculative_created = 0; -unsigned int vm_page_speculative_used = 0; +unsigned int vm_page_speculative_recreated = 0; +unsigned int vm_page_speculative_created = 0; +unsigned int vm_page_speculative_used = 0; #endif vm_page_queue_head_t vm_page_queue_cleaned __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); -unsigned int vm_page_cleaned_count = 0; +unsigned int vm_page_cleaned_count = 0; -uint64_t max_valid_dma_address = 0xffffffffffffffffULL; -ppnum_t max_valid_low_ppnum = 0xffffffff; +uint64_t max_valid_dma_address = 0xffffffffffffffffULL; +ppnum_t max_valid_low_ppnum = PPNUM_MAX; /* @@ -446,15 +443,15 @@ ppnum_t max_valid_low_ppnum = 0xffffffff; * (done here in vm_page_alloc) can trigger the * pageout daemon. */ -unsigned int vm_page_free_target = 0; -unsigned int vm_page_free_min = 0; -unsigned int vm_page_throttle_limit = 0; -unsigned int vm_page_inactive_target = 0; +unsigned int vm_page_free_target = 0; +unsigned int vm_page_free_min = 0; +unsigned int vm_page_throttle_limit = 0; +unsigned int vm_page_inactive_target = 0; #if CONFIG_SECLUDED_MEMORY -unsigned int vm_page_secluded_target = 0; +unsigned int vm_page_secluded_target = 0; #endif /* CONFIG_SECLUDED_MEMORY */ -unsigned int vm_page_anonymous_min = 0; -unsigned int vm_page_free_reserved = 0; +unsigned int vm_page_anonymous_min = 0; +unsigned int vm_page_free_reserved = 0; /* @@ -468,7 +465,7 @@ unsigned int vm_page_free_reserved = 0; boolean_t vm_page_deactivate_hint = TRUE; struct vm_page_stats_reusable vm_page_stats_reusable; - + /* * vm_set_page_size: * @@ -485,12 +482,15 @@ vm_set_page_size(void) page_mask = PAGE_MASK; page_shift = PAGE_SHIFT; - if ((page_mask & page_size) != 0) + if ((page_mask & page_size) != 0) { panic("vm_set_page_size: page size not a power of two"); + } - for (page_shift = 0; ; page_shift++) - if ((1U << page_shift) == page_size) + for (page_shift = 0;; page_shift++) { + if ((1U << page_shift) == page_size) { break; + } + } } #if defined (__x86_64__) @@ -501,13 +501,17 @@ vm_set_page_size(void) unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold; #if DEVELOPMENT || DEBUG -unsigned long vm_clump_stats[MAX_CLUMP_SIZE+1]; +unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1]; unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes; -static inline void vm_clump_update_stats(unsigned int c) { - assert(c<=vm_clump_size); - if(c>0 && c<=vm_clump_size) vm_clump_stats[c]+=c; - vm_clump_allocs+=c; +static inline void +vm_clump_update_stats(unsigned int c) +{ + assert(c <= vm_clump_size); + if (c > 0 && c <= vm_clump_size) { + vm_clump_stats[c] += c; + } + vm_clump_allocs += c; } #endif /* if DEVELOPMENT || DEBUG */ @@ -515,76 +519,298 @@ static inline void vm_clump_update_stats(unsigned int c) { static void vm_page_setup_clump( void ) { - unsigned int override, n; + unsigned int override, n; - vm_clump_size = DEFAULT_CLUMP_SIZE; - if ( PE_parse_boot_argn("clump_size", &override, sizeof (override)) ) vm_clump_size = override; + vm_clump_size = DEFAULT_CLUMP_SIZE; + if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) { + vm_clump_size = override; + } - if(vm_clump_size > MAX_CLUMP_SIZE) panic("vm_page_setup_clump:: clump_size is too large!"); - if(vm_clump_size < 1) panic("vm_page_setup_clump:: clump_size must be >= 1"); - if((vm_clump_size & (vm_clump_size-1)) != 0) panic("vm_page_setup_clump:: clump_size must be a power of 2"); + if (vm_clump_size > MAX_CLUMP_SIZE) { + panic("vm_page_setup_clump:: clump_size is too large!"); + } + if (vm_clump_size < 1) { + panic("vm_page_setup_clump:: clump_size must be >= 1"); + } + if ((vm_clump_size & (vm_clump_size - 1)) != 0) { + panic("vm_page_setup_clump:: clump_size must be a power of 2"); + } - vm_clump_promote_threshold = vm_clump_size; - vm_clump_mask = vm_clump_size - 1; - for(vm_clump_shift=0, n=vm_clump_size; n>1; n>>=1, vm_clump_shift++); + vm_clump_promote_threshold = vm_clump_size; + vm_clump_mask = vm_clump_size - 1; + for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) { + ; + } #if DEVELOPMENT || DEBUG - bzero(vm_clump_stats, sizeof(vm_clump_stats)); - vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0; + bzero(vm_clump_stats, sizeof(vm_clump_stats)); + vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0; #endif /* if DEVELOPMENT || DEBUG */ } -#endif /* #if defined (__x86_64__) */ +#endif /* #if defined (__x86_64__) */ -#define COLOR_GROUPS_TO_STEAL 4 +#define COLOR_GROUPS_TO_STEAL 4 /* Called once during statup, once the cache geometry is known. */ static void vm_page_set_colors( void ) { - unsigned int n, override; + unsigned int n, override; -#if defined (__x86_64__) +#if defined (__x86_64__) /* adjust #colors because we need to color outside the clump boundary */ vm_cache_geometry_colors >>= vm_clump_shift; #endif - if ( PE_parse_boot_argn("colors", &override, sizeof (override)) ) /* colors specified as a boot-arg? */ - n = override; - else if ( vm_cache_geometry_colors ) /* do we know what the cache geometry is? */ + if (PE_parse_boot_argn("colors", &override, sizeof(override))) { /* colors specified as a boot-arg? */ + n = override; + } else if (vm_cache_geometry_colors) { /* do we know what the cache geometry is? */ n = vm_cache_geometry_colors; - else n = DEFAULT_COLORS; /* use default if all else fails */ - - if ( n == 0 ) + } else { + n = DEFAULT_COLORS; /* use default if all else fails */ + } + if (n == 0) { n = 1; - if ( n > MAX_COLORS ) + } + if (n > MAX_COLORS) { n = MAX_COLORS; - + } + /* the count must be a power of 2 */ - if ( ( n & (n - 1)) != 0 ) - n = DEFAULT_COLORS; /* use default if all else fails */ - + if ((n & (n - 1)) != 0) { + n = DEFAULT_COLORS; /* use default if all else fails */ + } vm_colors = n; vm_color_mask = n - 1; vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL; #if defined (__x86_64__) - /* adjust for reduction in colors due to clumping and multiple cores */ - if (real_ncpus) + /* adjust for reduction in colors due to clumping and multiple cores */ + if (real_ncpus) { vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus); + } +#endif +} + +/* + * During single threaded early boot we don't initialize all pages. + * This avoids some delay during boot. They'll be initialized and + * added to the free list as needed or after we are multithreaded by + * what becomes the pageout thread. + */ +static boolean_t fill = FALSE; +static unsigned int fillval; +uint_t vm_delayed_count = 0; /* when non-zero, indicates we may have more pages to init */ +ppnum_t delay_above_pnum = PPNUM_MAX; + +/* + * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with. + * If ARM ever uses delayed page initialization, this value may need to be quite different. + */ +#define DEFAULT_DELAY_ABOVE_PHYS_GB (8) + +/* + * When we have to dip into more delayed pages due to low memory, free up + * a large chunk to get things back to normal. This avoids contention on the + * delayed code allocating page by page. + */ +#define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE) + +/* + * Get and initialize the next delayed page. + */ +static vm_page_t +vm_get_delayed_page(int grab_options) +{ + vm_page_t p; + ppnum_t pnum; + + /* + * Get a new page if we have one. + */ + lck_mtx_lock(&vm_page_queue_free_lock); + if (vm_delayed_count == 0) { + lck_mtx_unlock(&vm_page_queue_free_lock); + return NULL; + } + if (!pmap_next_page(&pnum)) { + vm_delayed_count = 0; + lck_mtx_unlock(&vm_page_queue_free_lock); + return NULL; + } + + assert(vm_delayed_count > 0); + --vm_delayed_count; + + p = &vm_pages[vm_pages_count]; + assert(p < vm_page_array_ending_addr); + vm_page_init(p, pnum, FALSE); + ++vm_pages_count; + ++vm_page_pages; + lck_mtx_unlock(&vm_page_queue_free_lock); + + /* + * These pages were initially counted as wired, undo that now. + */ + if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) { + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + } else { + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); + vm_page_lockspin_queues(); + } + --vm_page_wire_count; + --vm_page_wire_count_initial; + if (vm_page_wire_count_on_boot != 0) { + --vm_page_wire_count_on_boot; + } + if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) { + vm_page_unlock_queues(); + } + + + if (fill) { + fillPage(pnum, fillval); + } + return p; +} + +static void vm_page_module_init_delayed(void); + +/* + * Free all remaining delayed pages to the free lists. + */ +void +vm_free_delayed_pages(void) +{ + vm_page_t p; + vm_page_t list = NULL; + uint_t cnt = 0; + vm_offset_t start_free_page; + vm_size_t free_size; + + while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) { + if (vm_himemory_mode) { + vm_page_release(p, FALSE); + } else { + p->vmp_snext = list; + list = p; + } + ++cnt; + } + + /* + * Free the pages in reverse order if not himemory mode. + * Hence the low memory pages will be first on free lists. (LIFO) + */ + while (list != NULL) { + p = list; + list = p->vmp_snext; + p->vmp_snext = NULL; + vm_page_release(p, FALSE); + } +#if DEVELOPMENT || DEBUG + kprintf("vm_free_delayed_pages: freed %d pages\n", cnt); +#endif + + /* + * Free up any unused full pages at the end of the vm_pages[] array + */ + start_free_page = round_page((vm_offset_t)&vm_pages[vm_pages_count]); + if (start_free_page < (vm_offset_t)vm_page_array_ending_addr) { + free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_page); + if (free_size > 0) { +#if DEVELOPMENT || DEBUG + kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n", + (long)free_size, (long)start_free_page); #endif + pmap_pv_fixup(start_free_page, free_size); + ml_static_mfree(start_free_page, free_size); + vm_page_array_ending_addr = (void *)start_free_page; + + /* + * Note there's no locking here, as only this thread will ever change this value. + * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at. + */ + --vm_page_stolen_count; + } + } + + + /* + * now we can create the VM page array zone + */ + vm_page_module_init_delayed(); +} + +/* + * Try and free up enough delayed pages to match a contig memory allocation. + */ +static void +vm_free_delayed_pages_contig( + uint_t npages, + ppnum_t max_pnum, + ppnum_t pnum_mask) +{ + vm_page_t p; + ppnum_t pnum; + uint_t cnt = 0; + + /* + * Treat 0 as the absolute max page number. + */ + if (max_pnum == 0) { + max_pnum = PPNUM_MAX; + } + + /* + * Free till we get a properly aligned start page + */ + for (;;) { + p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE); + if (p == NULL) { + return; + } + pnum = VM_PAGE_GET_PHYS_PAGE(p); + vm_page_release(p, FALSE); + if (pnum >= max_pnum) { + return; + } + if ((pnum & pnum_mask) == 0) { + break; + } + } + + /* + * Having a healthy pool of free pages will help performance. We don't + * want to fall back to the delayed code for every page allocation. + */ + if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) { + npages += VM_DELAY_PAGE_CHUNK; + } + + /* + * Now free up the pages + */ + for (cnt = 1; cnt < npages; ++cnt) { + p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE); + if (p == NULL) { + return; + } + vm_page_release(p, FALSE); + } } -lck_grp_t vm_page_lck_grp_free; -lck_grp_t vm_page_lck_grp_queue; -lck_grp_t vm_page_lck_grp_local; -lck_grp_t vm_page_lck_grp_purge; -lck_grp_t vm_page_lck_grp_alloc; -lck_grp_t vm_page_lck_grp_bucket; -lck_grp_attr_t vm_page_lck_grp_attr; -lck_attr_t vm_page_lck_attr; +lck_grp_t vm_page_lck_grp_free; +lck_grp_t vm_page_lck_grp_queue; +lck_grp_t vm_page_lck_grp_local; +lck_grp_t vm_page_lck_grp_purge; +lck_grp_t vm_page_lck_grp_alloc; +lck_grp_t vm_page_lck_grp_bucket; +lck_grp_attr_t vm_page_lck_grp_attr; +lck_attr_t vm_page_lck_attr; __private_extern__ void @@ -611,9 +837,9 @@ vm_page_init_lck_grp(void) void vm_page_init_local_q() { - unsigned int num_cpus; - unsigned int i; - struct vplq *t_local_q; + unsigned int num_cpus; + unsigned int i; + struct vplq *t_local_q; num_cpus = ml_get_max_cpus(); @@ -625,14 +851,14 @@ vm_page_init_local_q() /* KASAN breaks the expectation of a size-aligned object by adding a * redzone, so explicitly align. */ t_local_q = (struct vplq *)kalloc(num_cpus * sizeof(struct vplq) + VM_PACKED_POINTER_ALIGNMENT); - t_local_q = (void *)(((uintptr_t)t_local_q + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT-1)); + t_local_q = (void *)(((uintptr_t)t_local_q + (VM_PACKED_POINTER_ALIGNMENT - 1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1)); #else /* round the size up to the nearest power of two */ t_local_q = (struct vplq *)kalloc(ROUNDUP_NEXTP2(num_cpus * sizeof(struct vplq))); #endif for (i = 0; i < num_cpus; i++) { - struct vpl *lq; + struct vpl *lq; lq = &t_local_q[i].vpl_un.vpl; VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr); @@ -655,7 +881,9 @@ vm_page_init_local_q() void vm_init_before_launchd() { + vm_page_lockspin_queues(); vm_page_wire_count_on_boot = vm_page_wire_count; + vm_page_unlock_queues(); } @@ -672,90 +900,31 @@ vm_init_before_launchd() void vm_page_bootstrap( - vm_offset_t *startp, - vm_offset_t *endp) + vm_offset_t *startp, + vm_offset_t *endp) { - vm_page_t m; - unsigned int i; - unsigned int log1; - unsigned int log2; - unsigned int size; - - /* - * Initialize the vm_page template. - */ - - m = &vm_page_template; - bzero(m, sizeof (*m)); - -#if CONFIG_BACKGROUND_QUEUE - m->vmp_backgroundq.next = 0; - m->vmp_backgroundq.prev = 0; - m->vmp_in_background = FALSE; - m->vmp_on_backgroundq = FALSE; -#endif - - VM_PAGE_ZERO_PAGEQ_ENTRY(m); - m->vmp_listq.next = 0; - m->vmp_listq.prev = 0; - m->vmp_next_m = 0; - - m->vmp_object = 0; /* reset later */ - m->vmp_offset = (vm_object_offset_t) -1; /* reset later */ - - m->vmp_wire_count = 0; - m->vmp_q_state = VM_PAGE_NOT_ON_Q; - m->vmp_laundry = FALSE; - m->vmp_reference = FALSE; - m->vmp_gobbled = FALSE; - m->vmp_private = FALSE; - m->vmp_unused_page_bits = 0; - -#if !defined(__arm__) && !defined(__arm64__) - VM_PAGE_SET_PHYS_PAGE(m, 0); /* reset later */ -#endif - m->vmp_busy = TRUE; - m->vmp_wanted = FALSE; - m->vmp_tabled = FALSE; - m->vmp_hashed = FALSE; - m->vmp_fictitious = FALSE; - m->vmp_pmapped = FALSE; - m->vmp_wpmapped = FALSE; - m->vmp_free_when_done = FALSE; - m->vmp_absent = FALSE; - m->vmp_error = FALSE; - m->vmp_dirty = FALSE; - m->vmp_cleaning = FALSE; - m->vmp_precious = FALSE; - m->vmp_clustered = FALSE; - m->vmp_overwriting = FALSE; - m->vmp_restart = FALSE; - m->vmp_unusual = FALSE; - m->vmp_cs_validated = FALSE; - m->vmp_cs_tainted = FALSE; - m->vmp_cs_nx = FALSE; - m->vmp_no_cache = FALSE; - m->vmp_reusable = FALSE; - m->vmp_xpmapped = FALSE; - m->vmp_written_by_kernel = FALSE; - m->vmp_unused_object_bits = 0; + unsigned int i; + unsigned int log1; + unsigned int log2; + unsigned int size; /* * Initialize the page queues. */ vm_page_init_lck_grp(); - + lck_mtx_init_ext(&vm_page_queue_free_lock, &vm_page_queue_free_lock_ext, &vm_page_lck_grp_free, &vm_page_lck_attr); lck_mtx_init_ext(&vm_page_queue_lock, &vm_page_queue_lock_ext, &vm_page_lck_grp_queue, &vm_page_lck_attr); lck_mtx_init_ext(&vm_purgeable_queue_lock, &vm_purgeable_queue_lock_ext, &vm_page_lck_grp_purge, &vm_page_lck_attr); - + for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) { int group; purgeable_queues[i].token_q_head = 0; purgeable_queues[i].token_q_tail = 0; - for (group = 0; group < NUM_VOLATILE_GROUPS; group++) - queue_init(&purgeable_queues[i].objq[group]); + for (group = 0; group < NUM_VOLATILE_GROUPS; group++) { + queue_init(&purgeable_queues[i].objq[group]); + } purgeable_queues[i].type = i; purgeable_queues[i].new_pages = 0; @@ -763,12 +932,14 @@ vm_page_bootstrap( purgeable_queues[i].debug_count_tokens = 0; purgeable_queues[i].debug_count_objects = 0; #endif - }; + } + ; purgeable_nonvolatile_count = 0; queue_init(&purgeable_nonvolatile_queue); - - for (i = 0; i < MAX_COLORS; i++ ) + + for (i = 0; i < MAX_COLORS; i++) { vm_page_queue_init(&vm_page_queue_free[i].qhead); + } vm_page_queue_init(&vm_lopage_queue_free); vm_page_queue_init(&vm_page_queue_active); @@ -781,7 +952,7 @@ vm_page_bootstrap( vm_page_queue_init(&vm_page_queue_anonymous); queue_init(&vm_objects_wired); - for ( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) { + for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) { vm_page_queue_init(&vm_page_queue_speculative[i].age_q); vm_page_queue_speculative[i].age_ts.tv_sec = 0; @@ -797,8 +968,9 @@ vm_page_bootstrap( vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25); - if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) + if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) { vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX; + } vm_page_background_mode = VM_PAGE_BG_LEVEL_1; vm_page_background_exclude_external = 0; @@ -807,15 +979,16 @@ vm_page_bootstrap( PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external)); PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target)); - if (vm_page_background_mode > VM_PAGE_BG_LEVEL_1) + if (vm_page_background_mode > VM_PAGE_BG_LEVEL_1) { vm_page_background_mode = VM_PAGE_BG_LEVEL_1; + } #endif vm_page_free_wanted = 0; vm_page_free_wanted_privileged = 0; #if CONFIG_SECLUDED_MEMORY vm_page_free_wanted_secluded = 0; #endif /* CONFIG_SECLUDED_MEMORY */ - + #if defined (__x86_64__) /* this must be called before vm_page_set_colors() */ vm_page_setup_clump(); @@ -848,7 +1021,7 @@ vm_page_bootstrap( #if CONFIG_SECLUDED_MEMORY vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1; #endif /* CONFIG_SECLUDED_MEMORY */ - + bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states)); vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1; vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1; @@ -858,8 +1031,7 @@ vm_page_bootstrap( vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1; #endif /* CONFIG_SECLUDED_MEMORY */ - for (i = 0; i < VM_KERN_MEMORY_FIRST_DYNAMIC; i++) - { + for (i = 0; i < VM_KERN_MEMORY_FIRST_DYNAMIC; i++) { vm_allocation_sites_static[i].refcount = 2; vm_allocation_sites_static[i].tag = i; vm_allocation_sites[i] = &vm_allocation_sites_static[i]; @@ -891,8 +1063,9 @@ vm_page_bootstrap( unsigned int npages = pmap_free_pages(); vm_page_bucket_count = 1; - while (vm_page_bucket_count < npages) + while (vm_page_bucket_count < npages) { vm_page_bucket_count <<= 1; + } } vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK; @@ -906,19 +1079,22 @@ vm_page_bootstrap( * B/2 - O */ size = vm_page_bucket_count; - for (log1 = 0; size > 1; log1++) + for (log1 = 0; size > 1; log1++) { size /= 2; + } size = sizeof(struct vm_object); - for (log2 = 0; size > 1; log2++) + for (log2 = 0; size > 1; log2++) { size /= 2; - vm_page_hash_shift = log1/2 - log2 + 1; - - vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ - vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ - vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ + } + vm_page_hash_shift = log1 / 2 - log2 + 1; + + vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */ + vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */ + vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */ - if (vm_page_hash_mask & vm_page_bucket_count) + if (vm_page_hash_mask & vm_page_bucket_count) { printf("vm_page_bootstrap: WARNING -- strange page hash\n"); + } #if VM_PAGE_BUCKETS_CHECK #if VM_PAGE_FAKE_BUCKETS @@ -927,18 +1103,18 @@ vm_page_bootstrap( * any stomping there. */ vm_page_fake_buckets = (vm_page_bucket_t *) - pmap_steal_memory(vm_page_bucket_count * - sizeof(vm_page_bucket_t)); + pmap_steal_memory(vm_page_bucket_count * + sizeof(vm_page_bucket_t)); vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets; vm_page_fake_buckets_end = - vm_map_round_page((vm_page_fake_buckets_start + - (vm_page_bucket_count * - sizeof (vm_page_bucket_t))), - PAGE_MASK); + vm_map_round_page((vm_page_fake_buckets_start + + (vm_page_bucket_count * + sizeof(vm_page_bucket_t))), + PAGE_MASK); char *cp; for (cp = (char *)vm_page_fake_buckets_start; - cp < (char *)vm_page_fake_buckets_end; - cp++) { + cp < (char *)vm_page_fake_buckets_end; + cp++) { *cp = 0x5a; } #endif /* VM_PAGE_FAKE_BUCKETS */ @@ -946,13 +1122,13 @@ vm_page_bootstrap( kernel_debug_string_early("vm_page_buckets"); vm_page_buckets = (vm_page_bucket_t *) - pmap_steal_memory(vm_page_bucket_count * - sizeof(vm_page_bucket_t)); + pmap_steal_memory(vm_page_bucket_count * + sizeof(vm_page_bucket_t)); kernel_debug_string_early("vm_page_bucket_locks"); vm_page_bucket_locks = (lck_spin_t *) - pmap_steal_memory(vm_page_bucket_lock_count * - sizeof(lck_spin_t)); + pmap_steal_memory(vm_page_bucket_lock_count * + sizeof(lck_spin_t)); for (i = 0; i < vm_page_bucket_count; i++) { vm_page_bucket_t *bucket = &vm_page_buckets[i]; @@ -964,8 +1140,9 @@ vm_page_bootstrap( #endif /* MACH_PAGE_HASH_STATS */ } - for (i = 0; i < vm_page_bucket_lock_count; i++) - lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr); + for (i = 0; i < vm_page_bucket_lock_count; i++) { + lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr); + } lck_spin_init(&vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); lck_spin_init(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr); @@ -993,27 +1170,27 @@ vm_page_bootstrap( /* * Compute the initial "wire" count. - * Up until now, the pages which have been set aside are not under + * Up until now, the pages which have been set aside are not under * the VM system's control, so although they aren't explicitly * wired, they nonetheless can't be moved. At this moment, * all VM managed pages are "free", courtesy of pmap_startup. */ assert((unsigned int) atop_64(max_mem) == atop_64(max_mem)); - vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - vm_page_free_count - vm_lopage_free_count; /* initial value */ + vm_page_wire_count = ((unsigned int) atop_64(max_mem)) - + vm_page_free_count - vm_lopage_free_count; #if CONFIG_SECLUDED_MEMORY vm_page_wire_count -= vm_page_secluded_count; #endif vm_page_wire_count_initial = vm_page_wire_count; - vm_page_pages_initial = vm_page_pages; - printf("vm_page_bootstrap: %d free pages and %d wired pages\n", - vm_page_free_count, vm_page_wire_count); + printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n", + vm_page_free_count, vm_page_wire_count, vm_delayed_count); kernel_debug_string_early("vm_page_bootstrap complete"); simple_lock_init(&vm_paging_lock, 0); } -#ifndef MACHINE_PAGES +#ifndef MACHINE_PAGES /* * We implement pmap_steal_memory and pmap_startup with the help * of two simpler functions, pmap_virtual_space and pmap_next_page. @@ -1031,7 +1208,7 @@ pmap_steal_memory( * We round the size to a round multiple. */ - size = (size + sizeof (void *) - 1) &~ (sizeof (void *) - 1); + size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1); /* * If this is the first call to pmap_steal_memory, @@ -1064,18 +1241,18 @@ pmap_steal_memory( */ for (vaddr = round_page(addr); - vaddr < addr + size; - vaddr += PAGE_SIZE) { - - if (!pmap_next_page_hi(&phys_page)) + vaddr < addr + size; + vaddr += PAGE_SIZE) { + if (!pmap_next_page_hi(&phys_page)) { panic("pmap_steal_memory() size: 0x%llx\n", (uint64_t)size); + } /* * XXX Logically, these mappings should be wired, * but some pmap modules barf if they are. */ #if defined(__LP64__) -#ifdef __arm64__ +#ifdef __arm64__ /* ARM64_TODO: verify that we really don't need this */ #else pmap_pre_expand(kernel_pmap, vaddr); @@ -1083,12 +1260,12 @@ pmap_steal_memory( #endif kr = pmap_enter(kernel_pmap, vaddr, phys_page, - VM_PROT_READ|VM_PROT_WRITE, VM_PROT_NONE, - VM_WIMG_USE_DEFAULT, FALSE); + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, + VM_WIMG_USE_DEFAULT, FALSE); if (kr != KERN_SUCCESS) { panic("pmap_steal_memory() pmap_enter failed, vaddr=%#lx, phys_page=%u", - (unsigned long)vaddr, phys_page); + (unsigned long)vaddr, phys_page); } /* @@ -1106,10 +1283,10 @@ pmap_steal_memory( #if CONFIG_SECLUDED_MEMORY /* boot-args to control secluded memory */ -unsigned int secluded_mem_mb = 0; /* # of MBs of RAM to seclude */ -int secluded_for_iokit = 1; /* IOKit can use secluded memory */ -int secluded_for_apps = 1; /* apps can use secluded memory */ -int secluded_for_filecache = 2; /* filecache can use seclude memory */ +unsigned int secluded_mem_mb = 0; /* # of MBs of RAM to seclude */ +int secluded_for_iokit = 1; /* IOKit can use secluded memory */ +int secluded_for_apps = 1; /* apps can use secluded memory */ +int secluded_for_filecache = 2; /* filecache can use seclude memory */ #if 11 int secluded_for_fbdp = 0; #endif @@ -1122,16 +1299,18 @@ extern void patch_low_glo_vm_page_info(void *, void *, uint32_t); unsigned int vm_first_phys_ppnum = 0; #endif - void vm_page_release_startup(vm_page_t mem); void pmap_startup( - vm_offset_t *startp, - vm_offset_t *endp) + vm_offset_t *startp, + vm_offset_t *endp) { - unsigned int i, npages, pages_initialized, fill, fillval; - ppnum_t phys_page; - addr64_t tmpaddr; + unsigned int i, npages; + ppnum_t phys_page; + uint64_t mem_sz; + uint64_t start_ns; + uint64_t now_ns; + uint_t low_page_count = 0; #if defined(__LP64__) /* @@ -1139,106 +1318,80 @@ pmap_startup( * for VM_PAGE_PACK_PTR (it clips off the low-order * 6 bits of the pointer) */ - if (virtual_space_start != virtual_space_end) + if (virtual_space_start != virtual_space_end) { virtual_space_start = round_page(virtual_space_start); + } #endif /* - * We calculate how many page frames we will have - * and then allocate the page structures in one chunk. + * We calculate how many page frames we will have + * and then allocate the page structures in one chunk. + * + * Note that the calculation here doesn't take into account + * the memory needed to map what's being allocated, i.e. the page + * table entries. So the actual number of pages we get will be + * less than this. To do someday: include that in the computation. */ - - tmpaddr = (addr64_t)pmap_free_pages() * (addr64_t)PAGE_SIZE; /* Get the amount of memory left */ - tmpaddr = tmpaddr + (addr64_t)(round_page(virtual_space_start) - virtual_space_start); /* Account for any slop */ - npages = (unsigned int)(tmpaddr / (addr64_t)(PAGE_SIZE + sizeof(*vm_pages))); /* Figure size of all vm_page_ts, including enough to hold the vm_page_ts */ + mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE; + mem_sz += round_page(virtual_space_start) - virtual_space_start; /* Account for any slop */ + npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages))); /* scaled to include the vm_page_ts */ vm_pages = (vm_page_t) pmap_steal_memory(npages * sizeof *vm_pages); - /* - * Initialize the page frames. - */ - kernel_debug_string_early("Initialize the page frames"); - - vm_page_array_beginning_addr = &vm_pages[0]; - vm_page_array_ending_addr = &vm_pages[npages]; - - for (i = 0, pages_initialized = 0; i < npages; i++) { - if (!pmap_next_page(&phys_page)) - break; -#if defined(__arm__) || defined(__arm64__) - if (pages_initialized == 0) { - vm_first_phys_ppnum = phys_page; - patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr, (void *)vm_page_array_ending_addr, vm_first_phys_ppnum); - } - assert((i + vm_first_phys_ppnum) == phys_page); -#endif - if (pages_initialized == 0 || phys_page < vm_page_lowest) - vm_page_lowest = phys_page; - - vm_page_init(&vm_pages[i], phys_page, FALSE); - vm_page_pages++; - pages_initialized++; - } - vm_pages_count = pages_initialized; - vm_page_array_boundary = &vm_pages[pages_initialized]; - -#if defined(__LP64__) - - if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) - panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]); - - if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count-1]))) != &vm_pages[vm_pages_count-1]) - panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count-1]); -#endif - kernel_debug_string_early("page fill/release"); /* * Check if we want to initialize pages to a known value */ - fill = 0; /* Assume no fill */ - if (PE_parse_boot_argn("fill", &fillval, sizeof (fillval))) fill = 1; /* Set fill */ -#if DEBUG + if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) { + fill = TRUE; + } +#if DEBUG /* This slows down booting the DEBUG kernel, particularly on * large memory systems, but is worthwhile in deterministically * trapping uninitialized memory usage. */ - if (fill == 0) { - fill = 1; + if (!fill) { + fill = TRUE; fillval = 0xDEB8F177; } #endif - if (fill) + if (fill) { kprintf("Filling vm_pages with pattern: 0x%x\n", fillval); + } #if CONFIG_SECLUDED_MEMORY - /* default: no secluded mem */ + /* + * Figure out how much secluded memory to have before we start + * release pages to free lists. + * The default, if specified nowhere else, is no secluded mem. + */ secluded_mem_mb = 0; - if (max_mem > 1*1024*1024*1024) { + if (max_mem > 1 * 1024 * 1024 * 1024) { /* default to 90MB for devices with > 1GB of RAM */ secluded_mem_mb = 90; } /* override with value from device tree, if provided */ PE_get_default("kern.secluded_mem_mb", - &secluded_mem_mb, sizeof(secluded_mem_mb)); + &secluded_mem_mb, sizeof(secluded_mem_mb)); /* override with value from boot-args, if provided */ PE_parse_boot_argn("secluded_mem_mb", - &secluded_mem_mb, - sizeof (secluded_mem_mb)); + &secluded_mem_mb, + sizeof(secluded_mem_mb)); vm_page_secluded_target = (unsigned int) - ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE); + ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE); PE_parse_boot_argn("secluded_for_iokit", - &secluded_for_iokit, - sizeof (secluded_for_iokit)); + &secluded_for_iokit, + sizeof(secluded_for_iokit)); PE_parse_boot_argn("secluded_for_apps", - &secluded_for_apps, - sizeof (secluded_for_apps)); + &secluded_for_apps, + sizeof(secluded_for_apps)); PE_parse_boot_argn("secluded_for_filecache", - &secluded_for_filecache, - sizeof (secluded_for_filecache)); + &secluded_for_filecache, + sizeof(secluded_for_filecache)); #if 11 PE_parse_boot_argn("secluded_for_fbdp", - &secluded_for_fbdp, - sizeof (secluded_for_fbdp)); + &secluded_for_fbdp, + sizeof(secluded_for_fbdp)); #endif /* @@ -1246,7 +1399,6 @@ pmap_startup( * secluded memory until it exits. */ if (max_mem <= 1 * 1024 * 1024 * 1024 && vm_page_secluded_target != 0) { - /* * Get an amount from boot-args, else use 500MB. * 500MB was chosen from a Peace daemon tentpole test which used munch @@ -1254,133 +1406,179 @@ pmap_startup( */ int secluded_shutoff_mb; if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb, - sizeof (secluded_shutoff_mb))) + sizeof(secluded_shutoff_mb))) { secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024; - else + } else { secluded_shutoff_trigger = 500 * 1024 * 1024; + } - if (secluded_shutoff_trigger != 0) + if (secluded_shutoff_trigger != 0) { secluded_suppression_init(); + } } #endif /* CONFIG_SECLUDED_MEMORY */ +#if defined(__x86_64__) + /* - * By default release pages in reverse order so that physical pages - * initially get allocated in ascending addresses. This keeps - * the devices (which must address physical memory) happy if - * they require several consecutive pages. - * - * For debugging, you can reverse this ordering and/or fill - * all pages with a known value. + * Decide how much memory we delay freeing at boot time. */ - if (vm_himemory_mode == 2) { - for (i = 0; i < pages_initialized; i++) { - if (fill) - fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]), fillval); - vm_page_release_startup(&vm_pages[i]); - } + uint32_t delay_above_gb; + if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) { + delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB; + } + + if (delay_above_gb == 0) { + delay_above_pnum = PPNUM_MAX; } else { - for (i = pages_initialized; i-- > 0; ) { - if (fill) - fillPage(VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]), fillval); - vm_page_release_startup(&vm_pages[i]); - } + delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE); } - VM_CHECK_MEMORYSTATUS; - -#if 0 - { - vm_page_t xx, xxo, xxl; - int i, j, k, l; - - j = 0; /* (BRINGUP) */ - xxl = 0; - - for( i = 0; i < vm_colors; i++ ) { - queue_iterate(&vm_page_queue_free[i].qhead, - xx, - vm_page_t, - vmp_pageq) { /* BRINGUP */ - j++; /* (BRINGUP) */ - if(j > vm_page_free_count) { /* (BRINGUP) */ - panic("pmap_startup: too many pages, xx = %08X, xxl = %08X\n", xx, xxl); - } - - l = vm_page_free_count - j; /* (BRINGUP) */ - k = 0; /* (BRINGUP) */ - - if(((j - 1) & 0xFFFF) == 0) kprintf("checking number %d of %d\n", j, vm_page_free_count); - - for(xxo = xx->pageq.next; xxo != &vm_page_queue_free[i].qhead; xxo = xxo->pageq.next) { /* (BRINGUP) */ - k++; - if(k > l) panic("pmap_startup: too many in secondary check %d %d\n", k, l); - if((xx->phys_page & 0xFFFFFFFF) == (xxo->phys_page & 0xFFFFFFFF)) { /* (BRINGUP) */ - panic("pmap_startup: duplicate physaddr, xx = %08X, xxo = %08X\n", xx, xxo); - } - } + /* make sure we have sane breathing room: 1G above low memory */ + if (delay_above_pnum <= max_valid_low_ppnum) { + delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT); + } - xxl = xx; - } - } - - if(j != vm_page_free_count) { /* (BRINGUP) */ - panic("pmap_startup: vm_page_free_count does not match, calc = %d, vm_page_free_count = %08X\n", j, vm_page_free_count); - } + if (delay_above_pnum < PPNUM_MAX) { + printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum); } -#endif +#endif /* defined(__x86_64__) */ /* - * We have to re-align virtual_space_start, - * because pmap_steal_memory has been using it. + * Initialize and release the page frames. */ + kernel_debug_string_early("Initialize and free the page frames"); - virtual_space_start = round_page(virtual_space_start); + vm_page_array_beginning_addr = &vm_pages[0]; + vm_page_array_ending_addr = &vm_pages[npages]; /* used by ptr packing/unpacking code */ - *startp = virtual_space_start; - *endp = virtual_space_end; -} -#endif /* MACHINE_PAGES */ + vm_delayed_count = 0; -/* - * Routine: vm_page_module_init - * Purpose: - * Second initialization pass, to be done after - * the basic VM system is ready. - */ -void -vm_page_module_init(void) -{ - uint64_t vm_page_zone_pages, vm_page_array_zone_data_size; - vm_size_t vm_page_with_ppnum_size; + absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns); + vm_pages_count = 0; + for (i = 0; i < npages; i++) { + /* Did we run out of pages? */ + if (!pmap_next_page(&phys_page)) { + break; + } - vm_page_array_zone = zinit((vm_size_t) sizeof(struct vm_page), - 0, PAGE_SIZE, "vm pages array"); + if (phys_page < max_valid_low_ppnum) { + ++low_page_count; + } - zone_change(vm_page_array_zone, Z_CALLERACCT, FALSE); - zone_change(vm_page_array_zone, Z_EXPAND, FALSE); - zone_change(vm_page_array_zone, Z_EXHAUST, TRUE); + /* Are we at high enough pages to delay the rest? */ + if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) { + vm_delayed_count = pmap_free_pages(); + break; + } + +#if defined(__arm__) || defined(__arm64__) + if (i == 0) { + vm_first_phys_ppnum = phys_page; + patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr, + (void *)vm_page_array_ending_addr, vm_first_phys_ppnum); + } + assert((i + vm_first_phys_ppnum) == phys_page); +#endif + + ++vm_pages_count; + vm_page_init(&vm_pages[i], phys_page, FALSE); + if (fill) { + fillPage(phys_page, fillval); + } + if (vm_himemory_mode) { + vm_page_release_startup(&vm_pages[i]); + } + } + vm_page_pages = vm_pages_count; /* used to report to user space */ + + if (!vm_himemory_mode) { + do { + vm_page_release_startup(&vm_pages[--i]); + } while (i != 0); + } + + absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns); + printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC); + printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count); + +#if defined(__LP64__) + + if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) { + panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]); + } + + if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) { + panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]); + } +#endif + + VM_CHECK_MEMORYSTATUS; + + /* + * We have to re-align virtual_space_start, + * because pmap_steal_memory has been using it. + */ + virtual_space_start = round_page(virtual_space_start); + *startp = virtual_space_start; + *endp = virtual_space_end; +} +#endif /* MACHINE_PAGES */ + +/* + * Create the zone that represents the vm_pages[] array. Nothing ever allocates + * or frees to this zone. It's just here for reporting purposes via zprint command. + * This needs to be done after all initially delayed pages are put on the free lists. + */ +static void +vm_page_module_init_delayed(void) +{ + uint64_t vm_page_zone_pages, vm_page_array_zone_data_size; + + vm_page_array_zone = zinit((vm_size_t) sizeof(struct vm_page), + 0, PAGE_SIZE, "vm pages array"); + + zone_change(vm_page_array_zone, Z_CALLERACCT, FALSE); + zone_change(vm_page_array_zone, Z_EXPAND, FALSE); + zone_change(vm_page_array_zone, Z_EXHAUST, TRUE); zone_change(vm_page_array_zone, Z_FOREIGN, TRUE); zone_change(vm_page_array_zone, Z_GZALLOC_EXEMPT, TRUE); + /* - * Adjust zone statistics to account for the real pages allocated - * in vm_page_create(). [Q: is this really what we want?] + * Reflect size and usage information for vm_pages[]. */ - vm_page_array_zone->count += vm_page_pages; - vm_page_array_zone->sum_count += vm_page_pages; - vm_page_array_zone_data_size = vm_page_pages * vm_page_array_zone->elem_size; - vm_page_array_zone->cur_size += vm_page_array_zone_data_size; + vm_page_array_zone->count = vm_pages_count; + vm_page_array_zone->countfree = (int)(vm_page_array_ending_addr - &vm_pages[vm_pages_count]); + vm_page_array_zone->sum_count = vm_pages_count; + vm_page_array_zone_data_size = (uintptr_t)((void *)vm_page_array_ending_addr - (void *)vm_pages); + vm_page_array_zone->cur_size = vm_page_array_zone_data_size; vm_page_zone_pages = ((round_page(vm_page_array_zone_data_size)) / PAGE_SIZE); OSAddAtomic64(vm_page_zone_pages, &(vm_page_array_zone->page_count)); /* since zone accounts for these, take them out of stolen */ VM_PAGE_MOVE_STOLEN(vm_page_zone_pages); +} + +/* + * Create the vm_pages zone. This is used for the vm_page structures for the pages + * that are scavanged from other boot time usages by ml_static_mfree(). As such, + * this needs to happen in early VM bootstrap. + */ +void +vm_page_module_init(void) +{ + vm_size_t vm_page_with_ppnum_size; - vm_page_with_ppnum_size = (sizeof(struct vm_page_with_ppnum) + (VM_PACKED_POINTER_ALIGNMENT-1)) & ~(VM_PACKED_POINTER_ALIGNMENT - 1); + /* + * Since the pointers to elements in this zone will be packed, they + * must have appropriate size. Not strictly what sizeof() reports. + */ + vm_page_with_ppnum_size = + (sizeof(struct vm_page_with_ppnum) + (VM_PACKED_POINTER_ALIGNMENT - 1)) & + ~(VM_PACKED_POINTER_ALIGNMENT - 1); - vm_page_zone = zinit(vm_page_with_ppnum_size, - 0, PAGE_SIZE, "vm pages"); + vm_page_zone = zinit(vm_page_with_ppnum_size, 0, PAGE_SIZE, "vm pages"); zone_change(vm_page_zone, Z_CALLERACCT, FALSE); zone_change(vm_page_zone, Z_EXPAND, FALSE); @@ -1404,20 +1602,23 @@ vm_page_create( ppnum_t start, ppnum_t end) { - ppnum_t phys_page; - vm_page_t m; + ppnum_t phys_page; + vm_page_t m; for (phys_page = start; - phys_page < end; - phys_page++) { + phys_page < end; + phys_page++) { while ((m = (vm_page_t) vm_page_grab_fictitious_common(phys_page)) - == VM_PAGE_NULL) + == VM_PAGE_NULL) { vm_page_more_fictitious(); + } m->vmp_fictitious = FALSE; pmap_clear_noencrypt(phys_page); + lck_mtx_lock(&vm_page_queue_free_lock); vm_page_pages++; + lck_mtx_unlock(&vm_page_queue_free_lock); vm_page_release(m, FALSE); } } @@ -1444,18 +1645,18 @@ vm_page_create( */ void vm_page_insert( - vm_page_t mem, - vm_object_t object, - vm_object_offset_t offset) + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset) { vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL); } void vm_page_insert_wired( - vm_page_t mem, - vm_object_t object, - vm_object_offset_t offset, + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset, vm_tag_t tag) { vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL); @@ -1463,29 +1664,29 @@ vm_page_insert_wired( void vm_page_insert_internal( - vm_page_t mem, - vm_object_t object, - vm_object_offset_t offset, + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset, vm_tag_t tag, - boolean_t queues_lock_held, - boolean_t insert_in_hash, - boolean_t batch_pmap_op, - boolean_t batch_accounting, - uint64_t *delayed_ledger_update) -{ - vm_page_bucket_t *bucket; - lck_spin_t *bucket_lock; - int hash_id; - task_t owner; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; - - XPR(XPR_VM_PAGE, - "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n", - object, offset, mem, 0,0); + boolean_t queues_lock_held, + boolean_t insert_in_hash, + boolean_t batch_pmap_op, + boolean_t batch_accounting, + uint64_t *delayed_ledger_update) +{ + vm_page_bucket_t *bucket; + lck_spin_t *bucket_lock; + int hash_id; + task_t owner; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; + + XPR(XPR_VM_PAGE, + "vm_page_insert, object 0x%X offset 0x%X page 0x%X\n", + object, offset, mem, 0, 0); #if 0 /* * we may not hold the page queue lock @@ -1503,26 +1704,28 @@ vm_page_insert_internal( vm_object_lock_assert_exclusive(object); LCK_MTX_ASSERT(&vm_page_queue_lock, - queues_lock_held ? LCK_MTX_ASSERT_OWNED - : LCK_MTX_ASSERT_NOTOWNED); + queues_lock_held ? LCK_MTX_ASSERT_OWNED + : LCK_MTX_ASSERT_NOTOWNED); - if (queues_lock_held == FALSE) + if (queues_lock_held == FALSE) { assert(!VM_PAGE_PAGEABLE(mem)); + } if (insert_in_hash == TRUE) { #if DEBUG || VM_PAGE_CHECK_BUCKETS - if (mem->vmp_tabled || mem->vmp_object) + if (mem->vmp_tabled || mem->vmp_object) { panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) " - "already in (obj=%p,off=0x%llx)", - mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset); + "already in (obj=%p,off=0x%llx)", + mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset); + } #endif if (object->internal && (offset >= object->vo_size)) { panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds", - mem, object, offset, object->vo_size); + mem, object, offset, object->vo_size); } assert(vm_page_lookup(object, offset) == VM_PAGE_NULL); - + /* * Record the object/offset pair in this page */ @@ -1542,22 +1745,23 @@ vm_page_insert_internal( hash_id = vm_page_hash(object, offset); bucket = &vm_page_buckets[hash_id]; bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; - - lck_spin_lock(bucket_lock); + + lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket); mem->vmp_next_m = bucket->page_list; bucket->page_list = VM_PAGE_PACK_PTR(mem); assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))); #if MACH_PAGE_HASH_STATS - if (++bucket->cur_count > bucket->hi_count) + if (++bucket->cur_count > bucket->hi_count) { bucket->hi_count = bucket->cur_count; + } #endif /* MACH_PAGE_HASH_STATS */ mem->vmp_hashed = TRUE; lck_spin_unlock(bucket_lock); } - { + { unsigned int cache_attr; cache_attr = object->wimg_bits & VM_WIMG_MASK; @@ -1569,7 +1773,7 @@ vm_page_insert_internal( /* * Now link into the object's list of backed pages. */ - vm_page_queue_enter(&object->memq, mem, vm_page_t, vmp_listq); + vm_page_queue_enter(&object->memq, mem, vmp_listq); object->memq_hint = mem; mem->vmp_tabled = TRUE; @@ -1579,14 +1783,14 @@ vm_page_insert_internal( object->resident_page_count++; if (VM_PAGE_WIRED(mem)) { - assert(mem->vmp_wire_count > 0); - VM_OBJECT_WIRED_PAGE_UPDATE_START(object); - VM_OBJECT_WIRED_PAGE_ADD(object, mem); - VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag); + assert(mem->vmp_wire_count > 0); + VM_OBJECT_WIRED_PAGE_UPDATE_START(object); + VM_OBJECT_WIRED_PAGE_ADD(object, mem); + VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag); } assert(object->resident_page_count >= object->wired_page_count); - if (batch_accounting == FALSE) { + if (batch_accounting == FALSE) { if (object->internal) { OSAddAtomic(1, &vm_page_internal_count); } else { @@ -1612,45 +1816,43 @@ vm_page_insert_internal( } if (object->purgable == VM_PURGABLE_DENY && - ! object->vo_ledger_tag) { + !object->vo_ledger_tag) { owner = TASK_NULL; } else { owner = VM_OBJECT_OWNER(object); vm_object_ledger_tag_ledgers(object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); } if (owner && (object->purgable == VM_PURGABLE_NONVOLATILE || - object->purgable == VM_PURGABLE_DENY || - VM_PAGE_WIRED(mem))) { - - if (delayed_ledger_update) + object->purgable == VM_PURGABLE_DENY || + VM_PAGE_WIRED(mem))) { + if (delayed_ledger_update) { *delayed_ledger_update += PAGE_SIZE; - else { + } else { /* more non-volatile bytes */ ledger_credit(owner->ledger, - ledger_idx_nonvolatile, - PAGE_SIZE); + ledger_idx_nonvolatile, + PAGE_SIZE); if (do_footprint) { /* more footprint */ ledger_credit(owner->ledger, - task_ledgers.phys_footprint, - PAGE_SIZE); + task_ledgers.phys_footprint, + PAGE_SIZE); } } - } else if (owner && - (object->purgable == VM_PURGABLE_VOLATILE || - object->purgable == VM_PURGABLE_EMPTY)) { - assert(! VM_PAGE_WIRED(mem)); + (object->purgable == VM_PURGABLE_VOLATILE || + object->purgable == VM_PURGABLE_EMPTY)) { + assert(!VM_PAGE_WIRED(mem)); /* more volatile bytes */ ledger_credit(owner->ledger, - ledger_idx_volatile, - PAGE_SIZE); + ledger_idx_volatile, + PAGE_SIZE); } if (object->purgable == VM_PURGABLE_VOLATILE) { @@ -1660,7 +1862,7 @@ vm_page_insert_internal( OSAddAtomic(+1, &vm_page_purgeable_count); } } else if (object->purgable == VM_PURGABLE_EMPTY && - mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { + mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) { /* * This page belongs to a purged VM object but hasn't * been purged (because it was "busy"). @@ -1669,11 +1871,13 @@ vm_page_insert_internal( * queue, so that it can eventually be reclaimed, instead * of lingering in the "empty" object. */ - if (queues_lock_held == FALSE) + if (queues_lock_held == FALSE) { vm_page_lockspin_queues(); + } vm_page_deactivate(mem); - if (queues_lock_held == FALSE) + if (queues_lock_held == FALSE) { vm_page_unlock_queues(); + } } #if VM_OBJECT_TRACKING_OP_MODIFIED @@ -1686,12 +1890,12 @@ vm_page_insert_internal( void *bt[VM_OBJECT_TRACKING_BTDEPTH]; int numsaved = 0; - numsaved =OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); + numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH); btlog_add_entry(vm_object_tracking_btlog, - object, - VM_OBJECT_TRACKING_OP_MODIFIED, - bt, - numsaved); + object, + VM_OBJECT_TRACKING_OP_MODIFIED, + bt, + numsaved); } #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */ } @@ -1706,14 +1910,14 @@ vm_page_insert_internal( */ void vm_page_replace( - vm_page_t mem, - vm_object_t object, - vm_object_offset_t offset) + vm_page_t mem, + vm_object_t object, + vm_object_offset_t offset) { vm_page_bucket_t *bucket; - vm_page_t found_m = VM_PAGE_NULL; - lck_spin_t *bucket_lock; - int hash_id; + vm_page_t found_m = VM_PAGE_NULL; + lck_spin_t *bucket_lock; + int hash_id; #if 0 /* @@ -1724,10 +1928,11 @@ vm_page_replace( #endif vm_object_lock_assert_exclusive(object); #if DEBUG || VM_PAGE_CHECK_BUCKETS - if (mem->vmp_tabled || mem->vmp_object) + if (mem->vmp_tabled || mem->vmp_object) { panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) " - "already in (obj=%p,off=0x%llx)", - mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset); + "already in (obj=%p,off=0x%llx)", + mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset); + } #endif LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); @@ -1748,7 +1953,7 @@ vm_page_replace( bucket = &vm_page_buckets[hash_id]; bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); + lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket); if (bucket->page_list) { vm_page_packed_t *mp = &bucket->page_list; @@ -1785,7 +1990,7 @@ vm_page_replace( lck_spin_unlock(bucket_lock); if (found_m) { - /* + /* * there was already a page at the specified * offset for this object... remove it from * the object and free it back to the free list @@ -1806,27 +2011,27 @@ vm_page_replace( void vm_page_remove( - vm_page_t mem, - boolean_t remove_from_hash) + vm_page_t mem, + boolean_t remove_from_hash) { vm_page_bucket_t *bucket; - vm_page_t this; - lck_spin_t *bucket_lock; - int hash_id; - task_t owner; - vm_object_t m_object; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - int do_footprint; + vm_page_t this; + lck_spin_t *bucket_lock; + int hash_id; + task_t owner; + vm_object_t m_object; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + int do_footprint; m_object = VM_PAGE_OBJECT(mem); - XPR(XPR_VM_PAGE, - "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", - m_object, mem->vmp_offset, - mem, 0,0); + XPR(XPR_VM_PAGE, + "vm_page_remove, object 0x%X offset 0x%X page 0x%X\n", + m_object, mem->vmp_offset, + mem, 0, 0); vm_object_lock_assert_exclusive(m_object); assert(mem->vmp_tabled); @@ -1851,19 +2056,20 @@ vm_page_remove( bucket = &vm_page_buckets[hash_id]; bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); + lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket); if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) { /* optimize for common case */ bucket->page_list = mem->vmp_next_m; } else { - vm_page_packed_t *prev; + vm_page_packed_t *prev; for (prev = &this->vmp_next_m; - (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem; - prev = &this->vmp_next_m) + (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem; + prev = &this->vmp_next_m) { continue; + } *prev = this->vmp_next_m; } #if MACH_PAGE_HASH_STATS @@ -1905,8 +2111,9 @@ vm_page_remove( if (!m_object->internal && m_object->cached_list.next && m_object->cached_list.prev) { - if (m_object->resident_page_count == 0) + if (m_object->resident_page_count == 0) { vm_object_cache_remove(m_object); + } } if (VM_PAGE_WIRED(mem)) { @@ -1916,12 +2123,12 @@ vm_page_remove( VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag); } assert(m_object->resident_page_count >= - m_object->wired_page_count); + m_object->wired_page_count); if (mem->vmp_reusable) { assert(m_object->reusable_page_count > 0); m_object->reusable_page_count--; assert(m_object->reusable_page_count <= - m_object->resident_page_count); + m_object->resident_page_count); mem->vmp_reusable = FALSE; OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count); vm_page_stats_reusable.reused_remove++; @@ -1931,39 +2138,39 @@ vm_page_remove( } if (m_object->purgable == VM_PURGABLE_DENY && - ! m_object->vo_ledger_tag) { + !m_object->vo_ledger_tag) { owner = TASK_NULL; } else { owner = VM_OBJECT_OWNER(m_object); vm_object_ledger_tag_ledgers(m_object, - &ledger_idx_volatile, - &ledger_idx_nonvolatile, - &ledger_idx_volatile_compressed, - &ledger_idx_nonvolatile_compressed, - &do_footprint); + &ledger_idx_volatile, + &ledger_idx_nonvolatile, + &ledger_idx_volatile_compressed, + &ledger_idx_nonvolatile_compressed, + &do_footprint); } if (owner && (m_object->purgable == VM_PURGABLE_NONVOLATILE || - m_object->purgable == VM_PURGABLE_DENY || - VM_PAGE_WIRED(mem))) { + m_object->purgable == VM_PURGABLE_DENY || + VM_PAGE_WIRED(mem))) { /* less non-volatile bytes */ ledger_debit(owner->ledger, - ledger_idx_nonvolatile, - PAGE_SIZE); + ledger_idx_nonvolatile, + PAGE_SIZE); if (do_footprint) { /* less footprint */ ledger_debit(owner->ledger, - task_ledgers.phys_footprint, - PAGE_SIZE); + task_ledgers.phys_footprint, + PAGE_SIZE); } } else if (owner && - (m_object->purgable == VM_PURGABLE_VOLATILE || - m_object->purgable == VM_PURGABLE_EMPTY)) { - assert(! VM_PAGE_WIRED(mem)); + (m_object->purgable == VM_PURGABLE_VOLATILE || + m_object->purgable == VM_PURGABLE_EMPTY)) { + assert(!VM_PAGE_WIRED(mem)); /* less volatile bytes */ ledger_debit(owner->ledger, - ledger_idx_volatile, - PAGE_SIZE); + ledger_idx_volatile, + PAGE_SIZE); } if (m_object->purgable == VM_PURGABLE_VOLATILE) { if (VM_PAGE_WIRED(mem)) { @@ -1975,8 +2182,9 @@ vm_page_remove( } } - if (m_object->set_cache_attr == TRUE) + if (m_object->set_cache_attr == TRUE) { pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0); + } mem->vmp_tabled = FALSE; mem->vmp_object = 0; @@ -1993,34 +2201,34 @@ vm_page_remove( * The object must be locked. No side effects. */ -#define VM_PAGE_HASH_LOOKUP_THRESHOLD 10 +#define VM_PAGE_HASH_LOOKUP_THRESHOLD 10 #if DEBUG_VM_PAGE_LOOKUP struct { - uint64_t vpl_total; - uint64_t vpl_empty_obj; - uint64_t vpl_bucket_NULL; - uint64_t vpl_hit_hint; - uint64_t vpl_hit_hint_next; - uint64_t vpl_hit_hint_prev; - uint64_t vpl_fast; - uint64_t vpl_slow; - uint64_t vpl_hit; - uint64_t vpl_miss; - - uint64_t vpl_fast_elapsed; - uint64_t vpl_slow_elapsed; + uint64_t vpl_total; + uint64_t vpl_empty_obj; + uint64_t vpl_bucket_NULL; + uint64_t vpl_hit_hint; + uint64_t vpl_hit_hint_next; + uint64_t vpl_hit_hint_prev; + uint64_t vpl_fast; + uint64_t vpl_slow; + uint64_t vpl_hit; + uint64_t vpl_miss; + + uint64_t vpl_fast_elapsed; + uint64_t vpl_slow_elapsed; } vm_page_lookup_stats __attribute__((aligned(8))); #endif -#define KDP_VM_PAGE_WALK_MAX 1000 +#define KDP_VM_PAGE_WALK_MAX 1000 vm_page_t kdp_vm_page_lookup( - vm_object_t object, - vm_object_offset_t offset) + vm_object_t object, + vm_object_offset_t offset) { vm_page_t cur_page; int num_traversed = 0; @@ -2029,7 +2237,7 @@ kdp_vm_page_lookup( panic("panic: kdp_vm_page_lookup done outside of kernel debugger"); } - vm_page_queue_iterate(&object->memq, cur_page, vm_page_t, vmp_listq) { + vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) { if (cur_page->vmp_offset == offset) { return cur_page; } @@ -2045,16 +2253,16 @@ kdp_vm_page_lookup( vm_page_t vm_page_lookup( - vm_object_t object, - vm_object_offset_t offset) + vm_object_t object, + vm_object_offset_t offset) { - vm_page_t mem; + vm_page_t mem; vm_page_bucket_t *bucket; - vm_page_queue_entry_t qe; - lck_spin_t *bucket_lock = NULL; - int hash_id; + vm_page_queue_entry_t qe; + lck_spin_t *bucket_lock = NULL; + int hash_id; #if DEBUG_VM_PAGE_LOOKUP - uint64_t start, elapsed; + uint64_t start, elapsed; OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total); #endif @@ -2064,7 +2272,7 @@ vm_page_lookup( #if DEBUG_VM_PAGE_LOOKUP OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj); #endif - return (VM_PAGE_NULL); + return VM_PAGE_NULL; } mem = object->memq_hint; @@ -2076,12 +2284,12 @@ vm_page_lookup( #if DEBUG_VM_PAGE_LOOKUP OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint); #endif - return (mem); + return mem; } qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq); - if (! vm_page_queue_end(&object->memq, qe)) { - vm_page_t next_page; + if (!vm_page_queue_end(&object->memq, qe)) { + vm_page_t next_page; next_page = (vm_page_t)((uintptr_t)qe); assert(VM_PAGE_OBJECT(next_page) == object); @@ -2091,12 +2299,12 @@ vm_page_lookup( #if DEBUG_VM_PAGE_LOOKUP OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next); #endif - return (next_page); + return next_page; } } qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq); - if (! vm_page_queue_end(&object->memq, qe)) { + if (!vm_page_queue_end(&object->memq, qe)) { vm_page_t prev_page; prev_page = (vm_page_t)((uintptr_t)qe); @@ -2107,7 +2315,7 @@ vm_page_lookup( #if DEBUG_VM_PAGE_LOOKUP OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev); #endif - return (prev_page); + return prev_page; } } } @@ -2122,14 +2330,14 @@ vm_page_lookup( * new pages can be inserted into this object... this in turn * guarantess that the page we're looking for can't exist * if the bucket it hashes to is currently NULL even when looked - * at outside the scope of the hash bucket lock... this is a + * at outside the scope of the hash bucket lock... this is a * really cheap optimiztion to avoid taking the lock */ if (!bucket->page_list) { #if DEBUG_VM_PAGE_LOOKUP OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL); #endif - return (VM_PAGE_NULL); + return VM_PAGE_NULL; } #if DEBUG_VM_PAGE_LOOKUP @@ -2143,26 +2351,27 @@ vm_page_lookup( mem = (vm_page_t)vm_page_queue_first(&object->memq); while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) { - - if (mem->vmp_offset == offset) + if (mem->vmp_offset == offset) { break; + } mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq); } - if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) + if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) { mem = NULL; + } } else { - vm_page_object_t packed_object; + vm_page_object_t packed_object; packed_object = VM_PAGE_PACK_OBJECT(object); bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); + lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket); for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); - mem != VM_PAGE_NULL; - mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) { + mem != VM_PAGE_NULL; + mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) { #if 0 /* * we don't hold the page queue lock @@ -2170,8 +2379,9 @@ vm_page_lookup( */ VM_PAGE_CHECK(mem); #endif - if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) + if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) { break; + } } lck_spin_unlock(bucket_lock); } @@ -2186,17 +2396,18 @@ vm_page_lookup( OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast); OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed); } - if (mem != VM_PAGE_NULL) + if (mem != VM_PAGE_NULL) { OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit); - else - OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss); + } else { + OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss); + } #endif if (mem != VM_PAGE_NULL) { assert(VM_PAGE_OBJECT(mem) == object); object->memq_hint = mem; } - return (mem); + return mem; } @@ -2210,23 +2421,23 @@ vm_page_lookup( */ void vm_page_rename( - vm_page_t mem, - vm_object_t new_object, - vm_object_offset_t new_offset) + vm_page_t mem, + vm_object_t new_object, + vm_object_offset_t new_offset) { - boolean_t internal_to_external, external_to_internal; - vm_tag_t tag; - vm_object_t m_object; + boolean_t internal_to_external, external_to_internal; + vm_tag_t tag; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); assert(m_object != new_object); - assert(m_object); + assert(m_object); - XPR(XPR_VM_PAGE, - "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n", - new_object, new_offset, - mem, 0,0); + XPR(XPR_VM_PAGE, + "vm_page_rename, new object 0x%X, offset 0x%X page 0x%X\n", + new_object, new_offset, + mem, 0, 0); /* * Changes to mem->vmp_object require the page lock because @@ -2257,7 +2468,7 @@ vm_page_rename( } tag = m_object->wire_tag; - vm_page_remove(mem, TRUE); + vm_page_remove(mem, TRUE); vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL); if (internal_to_external) { @@ -2280,20 +2491,38 @@ vm_page_rename( */ void vm_page_init( - vm_page_t mem, - ppnum_t phys_page, - boolean_t lopage) + vm_page_t mem, + ppnum_t phys_page, + boolean_t lopage) { + uint_t i; + uintptr_t *p; + assert(phys_page); -#if DEBUG +#if DEBUG if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) { if (!(pmap_valid_page(phys_page))) { panic("vm_page_init: non-DRAM phys_page 0x%x\n", phys_page); } } -#endif - *mem = vm_page_template; +#endif /* DEBUG */ + + /* + * Initialize the fields of the vm_page. If adding any new fields to vm_page, + * try to use initial values which match 0. This minimizes the number of writes + * needed for boot-time initialization. + * + * Kernel bzero() isn't an inline yet, so do it by hand for performance. + */ + assert(VM_PAGE_NOT_ON_Q == 0); + assert(sizeof(*mem) % sizeof(uintptr_t) == 0); + for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) { + *p++ = 0; + } + mem->vmp_offset = (vm_object_offset_t)-1; + mem->vmp_busy = TRUE; + mem->vmp_lopage = lopage; VM_PAGE_SET_PHYS_PAGE(mem, phys_page); #if 0 @@ -2309,13 +2538,12 @@ vm_page_init( /* * make sure both the h/w referenced and modified bits are - * clear at this point... we are especially dependent on + * clear at this point... we are especially dependent on * not finding a 'stale' h/w modified in a number of spots * once this page goes back into use */ pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED); #endif - mem->vmp_lopage = lopage; } /* @@ -2324,25 +2552,25 @@ vm_page_init( * Remove a fictitious page from the free list. * Returns VM_PAGE_NULL if there are no free pages. */ -int c_vm_page_grab_fictitious = 0; -int c_vm_page_grab_fictitious_failed = 0; -int c_vm_page_release_fictitious = 0; -int c_vm_page_more_fictitious = 0; +int c_vm_page_grab_fictitious = 0; +int c_vm_page_grab_fictitious_failed = 0; +int c_vm_page_release_fictitious = 0; +int c_vm_page_more_fictitious = 0; vm_page_t vm_page_grab_fictitious_common( ppnum_t phys_addr) { - vm_page_t m; + vm_page_t m; if ((m = (vm_page_t)zget(vm_page_zone))) { - vm_page_init(m, phys_addr, FALSE); m->vmp_fictitious = TRUE; c_vm_page_grab_fictitious++; - } else + } else { c_vm_page_grab_fictitious_failed++; + } return m; } @@ -2361,7 +2589,9 @@ vm_page_grab_guard(void) { vm_page_t page; page = vm_page_grab_fictitious_common(vm_page_guard_addr); - if (page) OSAddAtomic(1, &vm_guard_count); + if (page) { + OSAddAtomic(1, &vm_guard_count); + } return page; } @@ -2378,10 +2608,12 @@ vm_page_release_fictitious( assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED)); assert(m->vmp_fictitious); assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr || - VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr); + VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr); -if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) OSAddAtomic(-1, &vm_guard_count); + if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) { + OSAddAtomic(-1, &vm_guard_count); + } c_vm_page_release_fictitious++; @@ -2408,10 +2640,11 @@ if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) OSAddAtomic(-1, &vm_guard_co * zone. */ -void vm_page_more_fictitious(void) +void +vm_page_more_fictitious(void) { - vm_offset_t addr; - kern_return_t retval; + vm_offset_t addr; + kern_return_t retval; c_vm_page_more_fictitious++; @@ -2446,9 +2679,9 @@ void vm_page_more_fictitious(void) } retval = kernel_memory_allocate(zone_map, - &addr, PAGE_SIZE, 0, - KMA_KOBJECT|KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE); - if (retval != KERN_SUCCESS) { + &addr, PAGE_SIZE, 0, + KMA_KOBJECT | KMA_NOPAGEWAIT, VM_KERN_MEMORY_ZONE); + if (retval != KERN_SUCCESS) { /* * No page was available. Drop the * lock to give another thread a chance at it, and @@ -2476,7 +2709,7 @@ int vm_pool_low(void) { /* No locking, at worst we will fib. */ - return( vm_page_free_count <= vm_page_free_reserved ); + return vm_page_free_count <= vm_page_free_reserved; } boolean_t vm_darkwake_mode = FALSE; @@ -2520,7 +2753,6 @@ vm_update_darkwake_mode(boolean_t darkwake_mode) vm_page_background_target = 0; #endif /* CONFIG_BACKGROUND_QUEUE */ - } else if (vm_darkwake_mode == FALSE) { #if CONFIG_BACKGROUND_QUEUE @@ -2537,13 +2769,15 @@ vm_update_darkwake_mode(boolean_t darkwake_mode) void vm_page_update_background_state(vm_page_t mem) { - if (vm_page_background_mode == VM_PAGE_BG_DISABLED) + if (vm_page_background_mode == VM_PAGE_BG_DISABLED) { return; + } - if (mem->vmp_in_background == FALSE) + if (mem->vmp_in_background == FALSE) { return; + } - task_t my_task = current_task(); + task_t my_task = current_task(); if (my_task) { if (task_get_darkwake_mode(my_task)) { @@ -2552,12 +2786,14 @@ vm_page_update_background_state(vm_page_t mem) } #if BACKGROUNDQ_BASED_ON_QOS - if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) + if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) { return; + } #else if (my_task) { - if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) + if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) { return; + } } #endif vm_page_lockspin_queues(); @@ -2574,10 +2810,11 @@ vm_page_update_background_state(vm_page_t mem) void vm_page_assign_background_state(vm_page_t mem) { - if (vm_page_background_mode == VM_PAGE_BG_DISABLED) + if (vm_page_background_mode == VM_PAGE_BG_DISABLED) { return; + } - task_t my_task = current_task(); + task_t my_task = current_task(); if (my_task) { if (task_get_darkwake_mode(my_task)) { @@ -2587,124 +2824,128 @@ vm_page_assign_background_state(vm_page_t mem) } #if BACKGROUNDQ_BASED_ON_QOS - if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) - mem->vmp_in_background = TRUE; - else - mem->vmp_in_background = FALSE; + if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) { + mem->vmp_in_background = TRUE; + } else { + mem->vmp_in_background = FALSE; + } #else - if (my_task) + if (my_task) { mem->vmp_in_background = proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG); + } #endif } void vm_page_remove_from_backgroundq( - vm_page_t mem) + vm_page_t mem) { - vm_object_t m_object; + vm_object_t m_object; LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); if (mem->vmp_on_backgroundq) { - vm_page_queue_remove(&vm_page_queue_background, mem, vm_page_t, vmp_backgroundq); + vm_page_queue_remove(&vm_page_queue_background, mem, vmp_backgroundq); mem->vmp_backgroundq.next = 0; mem->vmp_backgroundq.prev = 0; mem->vmp_on_backgroundq = FALSE; - + vm_page_background_count--; m_object = VM_PAGE_OBJECT(mem); - if (m_object->internal) + if (m_object->internal) { vm_page_background_internal_count--; - else + } else { vm_page_background_external_count--; + } } else { assert(VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.next) == (uintptr_t)NULL && - VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.prev) == (uintptr_t)NULL); + VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.prev) == (uintptr_t)NULL); } } void vm_page_add_to_backgroundq( - vm_page_t mem, - boolean_t first) -{ - vm_object_t m_object; + vm_page_t mem, + boolean_t first) +{ + vm_object_t m_object; LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - if (vm_page_background_mode == VM_PAGE_BG_DISABLED) + if (vm_page_background_mode == VM_PAGE_BG_DISABLED) { return; + } if (mem->vmp_on_backgroundq == FALSE) { - m_object = VM_PAGE_OBJECT(mem); - if (vm_page_background_exclude_external && !m_object->internal) + if (vm_page_background_exclude_external && !m_object->internal) { return; + } - if (first == TRUE) - vm_page_queue_enter_first(&vm_page_queue_background, mem, vm_page_t, vmp_backgroundq); - else - vm_page_queue_enter(&vm_page_queue_background, mem, vm_page_t, vmp_backgroundq); + if (first == TRUE) { + vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_backgroundq); + } else { + vm_page_queue_enter(&vm_page_queue_background, mem, vmp_backgroundq); + } mem->vmp_on_backgroundq = TRUE; - + vm_page_background_count++; - if (m_object->internal) + if (m_object->internal) { vm_page_background_internal_count++; - else + } else { vm_page_background_external_count++; + } } } #endif /* CONFIG_BACKGROUND_QUEUE */ /* - * this is an interface to support bring-up of drivers - * on platforms with physical memory > 4G... + * This can be switched to FALSE to help debug drivers + * that are having problems with memory > 4G. */ -int vm_himemory_mode = 2; - +boolean_t vm_himemory_mode = TRUE; /* * this interface exists to support hardware controllers * incapable of generating DMAs with more than 32 bits * of address on platforms with physical memory > 4G... */ -unsigned int vm_lopages_allocated_q = 0; -unsigned int vm_lopages_allocated_cpm_success = 0; -unsigned int vm_lopages_allocated_cpm_failed = 0; -vm_page_queue_head_t vm_lopage_queue_free __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); +unsigned int vm_lopages_allocated_q = 0; +unsigned int vm_lopages_allocated_cpm_success = 0; +unsigned int vm_lopages_allocated_cpm_failed = 0; +vm_page_queue_head_t vm_lopage_queue_free __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); vm_page_t vm_page_grablo(void) { - vm_page_t mem; + vm_page_t mem; - if (vm_lopage_needed == FALSE) - return (vm_page_grab()); + if (vm_lopage_needed == FALSE) { + return vm_page_grab(); + } lck_mtx_lock_spin(&vm_page_queue_free_lock); - if ( !vm_page_queue_empty(&vm_lopage_queue_free)) { - vm_page_queue_remove_first(&vm_lopage_queue_free, - mem, - vm_page_t, - vmp_pageq); + if (!vm_page_queue_empty(&vm_lopage_queue_free)) { + vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq); assert(vm_lopage_free_count); assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q); mem->vmp_q_state = VM_PAGE_NOT_ON_Q; - vm_lopage_free_count--; + vm_lopage_free_count--; vm_lopages_allocated_q++; - if (vm_lopage_free_count < vm_lopage_lowater) + if (vm_lopage_free_count < vm_lopage_lowater) { vm_lopage_refill = TRUE; + } lck_mtx_unlock(&vm_page_queue_free_lock); @@ -2714,20 +2955,19 @@ vm_page_grablo(void) } else { lck_mtx_unlock(&vm_page_queue_free_lock); - if (cpm_allocate(PAGE_SIZE, &mem, atop(0xffffffff), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) { - + if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) { lck_mtx_lock_spin(&vm_page_queue_free_lock); vm_lopages_allocated_cpm_failed++; lck_mtx_unlock(&vm_page_queue_free_lock); - return (VM_PAGE_NULL); + return VM_PAGE_NULL; } assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q); mem->vmp_busy = TRUE; vm_page_lockspin_queues(); - + mem->vmp_gobbled = FALSE; vm_page_gobble_count--; vm_page_wire_count--; @@ -2747,7 +2987,7 @@ vm_page_grablo(void) VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0); enable_preemption(); - return (mem); + return mem; } @@ -2756,18 +2996,18 @@ vm_page_grablo(void) * * first try to grab a page from the per-cpu free list... * this must be done while pre-emption is disabled... if - * a page is available, we're done... + * a page is available, we're done... * if no page is available, grab the vm_page_queue_free_lock * and see if current number of free pages would allow us - * to grab at least 1... if not, return VM_PAGE_NULL as before... + * to grab at least 1... if not, return VM_PAGE_NULL as before... * if there are pages available, disable preemption and - * recheck the state of the per-cpu free list... we could + * recheck the state of the per-cpu free list... we could * have been preempted and moved to a different cpu, or - * some other thread could have re-filled it... if still + * some other thread could have re-filled it... if still * empty, figure out how many pages we can steal from the * global free queue and move to the per-cpu queue... * return 1 of these pages when done... only wakeup the - * pageout_scan thread if we moved pages from the global + * pageout_scan thread if we moved pages from the global * list... no need for the wakeup if we've satisfied the * request from the per-cpu queue. */ @@ -2776,10 +3016,13 @@ vm_page_grablo(void) vm_page_t vm_page_grab_secluded(void); #endif /* CONFIG_SECLUDED_MEMORY */ +static inline void +vm_page_grab_diags(void); + vm_page_t vm_page_grab(void) { - return vm_page_grab_options(0); + return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE); } #if HIBERNATION @@ -2790,7 +3033,7 @@ vm_page_t vm_page_grab_options( int grab_options) { - vm_page_t mem; + vm_page_t mem; disable_preemption(); @@ -2803,11 +3046,13 @@ return_page_from_cpu_list: panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__); } #endif /* HIBERNATION */ - PROCESSOR_DATA(current_processor(), page_grab_count) += 1; - PROCESSOR_DATA(current_processor(), free_pages) = mem->vmp_snext; + + vm_page_grab_diags(); + PROCESSOR_DATA(current_processor(), page_grab_count) += 1; + PROCESSOR_DATA(current_processor(), free_pages) = mem->vmp_snext; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0); - enable_preemption(); + enable_preemption(); VM_PAGE_ZERO_PAGEQ_ENTRY(mem); mem->vmp_q_state = VM_PAGE_NOT_ON_Q; @@ -2815,7 +3060,7 @@ return_page_from_cpu_list: assert(mem->vmp_tabled == FALSE); assert(mem->vmp_object == 0); assert(!mem->vmp_laundry); - assertf(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)), "page = 0x%llx", (uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)); + ASSERT_PMAP_FREE(mem); assert(mem->vmp_busy); assert(!mem->vmp_pmapped); assert(!mem->vmp_wpmapped); @@ -2836,16 +3081,26 @@ return_page_from_cpu_list: #if VM_PAGE_WIRE_COUNT_WARNING if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) { printf("mk: vm_page_grab(): high wired page count of %d\n", - vm_page_wire_count); + vm_page_wire_count); } #endif #if VM_PAGE_GOBBLE_COUNT_WARNING if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) { printf("mk: vm_page_grab(): high gobbled page count of %d\n", - vm_page_gobble_count); + vm_page_gobble_count); } #endif + /* + * If free count is low and we have delayed pages from early boot, + * get one of those instead. + */ + if (__improbable(vm_delayed_count > 0 && + vm_page_free_count <= vm_page_free_target && + (mem = vm_get_delayed_page(grab_options)) != NULL)) { + return mem; + } + lck_mtx_lock_spin(&vm_page_queue_free_lock); /* @@ -2862,7 +3117,7 @@ return_page_from_cpu_list: /* ... but can we try and grab from the secluded queue? */ if (vm_page_secluded_count > 0 && ((grab_options & VM_PAGE_GRAB_SECLUDED) || - task_can_use_secluded_mem(current_task(), TRUE))) { + task_can_use_secluded_mem(current_task(), TRUE))) { mem = vm_page_grab_secluded(); if (grab_options & VM_PAGE_GRAB_SECLUDED) { vm_page_secluded.grab_for_iokit++; @@ -2874,6 +3129,7 @@ return_page_from_cpu_list: VM_CHECK_MEMORYSTATUS; disable_preemption(); + vm_page_grab_diags(); PROCESSOR_DATA(current_processor(), page_grab_count) += 1; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0); enable_preemption(); @@ -2884,20 +3140,18 @@ return_page_from_cpu_list: #else /* CONFIG_SECLUDED_MEMORY */ (void) grab_options; #endif /* CONFIG_SECLUDED_MEMORY */ - } - else { - vm_page_t head; - vm_page_t tail; - unsigned int pages_to_steal; - unsigned int color; - unsigned int clump_end, sub_count; - - while ( vm_page_free_count == 0 ) { + } else { + vm_page_t head; + vm_page_t tail; + unsigned int pages_to_steal; + unsigned int color; + unsigned int clump_end, sub_count; + while (vm_page_free_count == 0) { lck_mtx_unlock(&vm_page_queue_free_lock); /* * must be a privileged thread to be - * in this state since a non-privileged + * in this state since a non-privileged * thread would have bailed if we were * under the vm_page_free_reserved mark */ @@ -2910,19 +3164,20 @@ return_page_from_cpu_list: if ((mem = PROCESSOR_DATA(current_processor(), free_pages))) { lck_mtx_unlock(&vm_page_queue_free_lock); - /* + /* * we got preempted and moved to another processor * or we got preempted and someone else ran and filled the cache */ goto return_page_from_cpu_list; } - if (vm_page_free_count <= vm_page_free_reserved) - pages_to_steal = 1; - else { - if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) + if (vm_page_free_count <= vm_page_free_reserved) { + pages_to_steal = 1; + } else { + if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) { pages_to_steal = vm_free_magazine_refill_limit; - else - pages_to_steal = (vm_page_free_count - vm_page_free_reserved); + } else { + pages_to_steal = (vm_page_free_count - vm_page_free_reserved); + } } color = PROCESSOR_DATA(current_processor(), start_color); head = tail = NULL; @@ -2931,20 +3186,15 @@ return_page_from_cpu_list: clump_end = sub_count = 0; while (pages_to_steal--) { - - while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) - color = (color + 1) & vm_color_mask; + while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) { + color = (color + 1) & vm_color_mask; + } #if defined(__x86_64__) vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq, - clump_end); + mem, clump_end); #else vm_page_queue_remove_first(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + mem, vmp_pageq); #endif assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q); @@ -2964,17 +3214,20 @@ return_page_from_cpu_list: color = (color + 1) & vm_color_mask; } #else - if (clump_end) color = (color + 1) & vm_color_mask; + if (clump_end) { + color = (color + 1) & vm_color_mask; + } #endif /* if DEVELOPMENT || DEBUG */ #endif /* if defined(__arm__) || defined(__arm64__) */ - if (head == NULL) + if (head == NULL) { head = mem; - else - tail->vmp_snext = mem; - tail = mem; + } else { + tail->vmp_snext = mem; + } + tail = mem; assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0); assert(mem->vmp_tabled == FALSE); @@ -2983,7 +3236,7 @@ return_page_from_cpu_list: mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q; - assertf(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)), "page = 0x%llx", (uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)); + ASSERT_PMAP_FREE(mem); assert(mem->vmp_busy); assert(!mem->vmp_pmapped); assert(!mem->vmp_wpmapped); @@ -3005,7 +3258,8 @@ return_page_from_cpu_list: /* * satisfy this request */ - PROCESSOR_DATA(current_processor(), page_grab_count) += 1; + vm_page_grab_diags(); + PROCESSOR_DATA(current_processor(), page_grab_count) += 1; VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0); mem = head; assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); @@ -3025,8 +3279,9 @@ return_page_from_cpu_list: * We don't have the counts locked ... if they change a little, * it doesn't really matter. */ - if (vm_page_free_count < vm_page_free_min) - thread_wakeup((event_t) &vm_page_free_wanted); + if (vm_page_free_count < vm_page_free_min) { + thread_wakeup((event_t) &vm_page_free_wanted); + } VM_CHECK_MEMORYSTATUS; @@ -3044,9 +3299,9 @@ return_page_from_cpu_list: vm_page_t vm_page_grab_secluded(void) { - vm_page_t mem; - vm_object_t object; - int refmod_state; + vm_page_t mem; + vm_object_t object; + int refmod_state; if (vm_page_secluded_count == 0) { /* no secluded pages to grab... */ @@ -3066,7 +3321,7 @@ vm_page_grab_secluded(void) /* can we grab from the secluded queue? */ if (vm_page_secluded_count > vm_page_secluded_target || (vm_page_secluded_count > 0 && - task_can_use_secluded_mem(current_task(), TRUE))) { + task_can_use_secluded_mem(current_task(), TRUE))) { /* OK */ } else { /* can't grab from secluded queue... */ @@ -3077,8 +3332,8 @@ vm_page_grab_secluded(void) /* we can grab a page from secluded queue! */ assert((vm_page_secluded_count_free + - vm_page_secluded_count_inuse) == - vm_page_secluded_count); + vm_page_secluded_count_inuse) == + vm_page_secluded_count); if (current_task()->task_can_use_secluded_mem) { assert(num_tasks_can_use_secluded_mem > 0); } @@ -3118,7 +3373,7 @@ vm_page_grab_secluded(void) if (!vm_object_lock_try(object)) { // printf("SECLUDED: page %p: object %p locked\n", mem, object); vm_page_secluded.grab_failure_locked++; - reactivate_secluded_page: +reactivate_secluded_page: vm_page_activate(mem); vm_page_unlock_queues(); return VM_PAGE_NULL; @@ -3159,7 +3414,7 @@ vm_page_grab_secluded(void) vm_object_unlock(object); object = VM_OBJECT_NULL; if (vm_page_free_verify) { - assertf(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)), "page = 0x%llx", (uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)); + ASSERT_PMAP_FREE(mem); } pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); vm_page_secluded.grab_success_other++; @@ -3181,6 +3436,20 @@ vm_page_grab_secluded(void) } #endif /* CONFIG_SECLUDED_MEMORY */ + +static inline void +vm_page_grab_diags() +{ +#if DEVELOPMENT || DEBUG + task_t task = current_task(); + if (task == NULL) { + return; + } + + ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1); +#endif /* DEVELOPMENT || DEBUG */ +} + /* * vm_page_release: * @@ -3189,14 +3458,14 @@ vm_page_grab_secluded(void) void vm_page_release( - vm_page_t mem, - boolean_t page_queues_locked) + vm_page_t mem, + boolean_t page_queues_locked) { - unsigned int color; - int need_wakeup = 0; - int need_priv_wakeup = 0; + unsigned int color; + int need_wakeup = 0; + int need_priv_wakeup = 0; #if CONFIG_SECLUDED_MEMORY - int need_secluded_wakeup = 0; + int need_secluded_wakeup = 0; #endif /* CONFIG_SECLUDED_MEMORY */ if (page_queues_locked) { @@ -3207,7 +3476,7 @@ vm_page_release( assert(!mem->vmp_private && !mem->vmp_fictitious); if (vm_page_free_verify) { - assertf(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)), "page = 0x%llx", (uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)); + ASSERT_PMAP_FREE(mem); } // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */ @@ -3223,32 +3492,30 @@ vm_page_release( assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0); #if CONFIG_BACKGROUND_QUEUE assert(mem->vmp_backgroundq.next == 0 && - mem->vmp_backgroundq.prev == 0 && - mem->vmp_on_backgroundq == FALSE); -#endif + mem->vmp_backgroundq.prev == 0 && + mem->vmp_on_backgroundq == FALSE); +#endif if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) && vm_lopage_free_count < vm_lopage_free_limit && VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) { - /* + /* * this exists to support hardware controllers * incapable of generating DMAs with more than 32 bits * of address on platforms with physical memory > 4G... */ - vm_page_queue_enter_first(&vm_lopage_queue_free, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq); vm_lopage_free_count++; - if (vm_lopage_free_count >= vm_lopage_free_limit) + if (vm_lopage_free_count >= vm_lopage_free_limit) { vm_lopage_refill = FALSE; + } mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q; mem->vmp_lopage = TRUE; #if CONFIG_SECLUDED_MEMORY } else if (vm_page_free_count > vm_page_free_reserved && - vm_page_secluded_count < vm_page_secluded_target && - num_tasks_can_use_secluded_mem == 0) { + vm_page_secluded_count < vm_page_secluded_target && + num_tasks_can_use_secluded_mem == 0) { /* * XXX FBDP TODO: also avoid refilling secluded queue * when some IOKit objects are already grabbing from it... @@ -3263,10 +3530,7 @@ vm_page_release( } mem->vmp_lopage = FALSE; LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - vm_page_queue_enter_first(&vm_page_queue_secluded, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq); mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q; vm_page_secluded_count++; vm_page_secluded_count_free++; @@ -3275,7 +3539,7 @@ vm_page_release( } LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED); if (vm_page_free_wanted_secluded > 0) { - vm_page_free_wanted_secluded--; + vm_page_free_wanted_secluded--; need_secluded_wakeup = 1; } #endif /* CONFIG_SECLUDED_MEMORY */ @@ -3285,15 +3549,9 @@ vm_page_release( color = VM_PAGE_GET_COLOR(mem); #if defined(__x86_64__) - vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem); #else - vm_page_queue_enter(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq); #endif vm_page_free_count++; /* @@ -3318,17 +3576,17 @@ vm_page_release( assert(vm_page_free_count > 0); if (vm_page_free_wanted_privileged > 0) { - vm_page_free_wanted_privileged--; + vm_page_free_wanted_privileged--; need_priv_wakeup = 1; #if CONFIG_SECLUDED_MEMORY } else if (vm_page_free_wanted_secluded > 0 && - vm_page_free_count > vm_page_free_reserved) { + vm_page_free_count > vm_page_free_reserved) { vm_page_free_wanted_secluded--; need_secluded_wakeup = 1; #endif /* CONFIG_SECLUDED_MEMORY */ } else if (vm_page_free_wanted > 0 && - vm_page_free_count > vm_page_free_reserved) { - vm_page_free_wanted--; + vm_page_free_count > vm_page_free_reserved) { + vm_page_free_wanted--; need_wakeup = 1; } } @@ -3338,29 +3596,32 @@ vm_page_release( lck_mtx_unlock(&vm_page_queue_free_lock); - if (need_priv_wakeup) + if (need_priv_wakeup) { thread_wakeup_one((event_t) &vm_page_free_wanted_privileged); + } #if CONFIG_SECLUDED_MEMORY - else if (need_secluded_wakeup) + else if (need_secluded_wakeup) { thread_wakeup_one((event_t) &vm_page_free_wanted_secluded); + } #endif /* CONFIG_SECLUDED_MEMORY */ - else if (need_wakeup) + else if (need_wakeup) { thread_wakeup_one((event_t) &vm_page_free_count); + } VM_CHECK_MEMORYSTATUS; } /* * This version of vm_page_release() is used only at startup - * when we are single-threaded and pages are being released + * when we are single-threaded and pages are being released * for the first time. Hence, no locking or unnecessary checks are made. * Note: VM_CHECK_MEMORYSTATUS invoked by the caller. */ void vm_page_release_startup( - vm_page_t mem) + vm_page_t mem) { - vm_page_queue_t queue_free; + vm_page_queue_t queue_free; if (vm_lopage_free_count < vm_lopage_free_limit && VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) { @@ -3384,12 +3645,13 @@ vm_page_release_startup( } if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) { #if defined(__x86_64__) - vm_page_queue_enter_clump(queue_free, mem, vm_page_t, vmp_pageq); + vm_page_queue_enter_clump(queue_free, mem); #else - vm_page_queue_enter(queue_free, mem, vm_page_t, vmp_pageq); + vm_page_queue_enter(queue_free, mem, vmp_pageq); #endif - } else - vm_page_queue_enter_first(queue_free, mem, vm_page_t, vmp_pageq); + } else { + vm_page_queue_enter_first(queue_free, mem, vmp_pageq); + } } /* @@ -3405,7 +3667,7 @@ vm_page_release_startup( boolean_t vm_page_wait( - int interruptible ) + int interruptible ) { /* * We can't use vm_page_free_reserved to make this @@ -3414,9 +3676,9 @@ vm_page_wait( * succeeds, the second fails. After the first page is freed, * a call to vm_page_wait must really block. */ - kern_return_t wait_result; - int need_wakeup = 0; - int is_privileged = current_thread()->options & TH_OPT_VMPRIV; + kern_return_t wait_result; + int need_wakeup = 0; + int is_privileged = current_thread()->options & TH_OPT_VMPRIV; lck_mtx_lock_spin(&vm_page_queue_free_lock); @@ -3431,12 +3693,13 @@ vm_page_wait( } if (is_privileged) { - if (vm_page_free_wanted_privileged++ == 0) + if (vm_page_free_wanted_privileged++ == 0) { need_wakeup = 1; + } wait_result = assert_wait((event_t)&vm_page_free_wanted_privileged, interruptible); #if CONFIG_SECLUDED_MEMORY } else if (secluded_for_apps && - task_can_use_secluded_mem(current_task(), FALSE)) { + task_can_use_secluded_mem(current_task(), FALSE)) { #if 00 /* XXX FBDP: need pageq lock for this... */ /* XXX FBDP: might wait even if pages available, */ @@ -3454,33 +3717,35 @@ vm_page_wait( interruptible); #endif /* CONFIG_SECLUDED_MEMORY */ } else { - if (vm_page_free_wanted++ == 0) + if (vm_page_free_wanted++ == 0) { need_wakeup = 1; + } wait_result = assert_wait((event_t)&vm_page_free_count, - interruptible); + interruptible); } lck_mtx_unlock(&vm_page_queue_free_lock); counter(c_vm_page_wait_block++); - if (need_wakeup) + if (need_wakeup) { thread_wakeup((event_t)&vm_page_free_wanted); + } if (wait_result == THREAD_WAITING) { VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START, - vm_page_free_wanted_privileged, - vm_page_free_wanted, + vm_page_free_wanted_privileged, + vm_page_free_wanted, #if CONFIG_SECLUDED_MEMORY - vm_page_free_wanted_secluded, + vm_page_free_wanted_secluded, #else /* CONFIG_SECLUDED_MEMORY */ - 0, + 0, #endif /* CONFIG_SECLUDED_MEMORY */ - 0); + 0); wait_result = thread_block(THREAD_CONTINUE_NULL); VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, - VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0); + VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0); } - return (wait_result == THREAD_AWAKENED); + return wait_result == THREAD_AWAKENED; } /* @@ -3494,11 +3759,11 @@ vm_page_wait( vm_page_t vm_page_alloc( - vm_object_t object, - vm_object_offset_t offset) + vm_object_t object, + vm_object_offset_t offset) { - vm_page_t mem; - int grab_options; + vm_page_t mem; + int grab_options; vm_object_lock_assert_exclusive(object); grab_options = 0; @@ -3508,41 +3773,43 @@ vm_page_alloc( } #endif /* CONFIG_SECLUDED_MEMORY */ mem = vm_page_grab_options(grab_options); - if (mem == VM_PAGE_NULL) + if (mem == VM_PAGE_NULL) { return VM_PAGE_NULL; + } vm_page_insert(mem, object, offset); - return(mem); + return mem; } /* * vm_page_alloc_guard: - * - * Allocate a fictitious page which will be used + * + * Allocate a fictitious page which will be used * as a guard page. The page will be inserted into * the object and returned to the caller. */ vm_page_t vm_page_alloc_guard( - vm_object_t object, - vm_object_offset_t offset) + vm_object_t object, + vm_object_offset_t offset) { - vm_page_t mem; + vm_page_t mem; vm_object_lock_assert_exclusive(object); mem = vm_page_grab_guard(); - if (mem == VM_PAGE_NULL) + if (mem == VM_PAGE_NULL) { return VM_PAGE_NULL; + } vm_page_insert(mem, object, offset); - return(mem); + return mem; } -counter(unsigned int c_laundry_pages_freed = 0;) +counter(unsigned int c_laundry_pages_freed = 0; ) /* * vm_page_free_prepare: @@ -3554,7 +3821,7 @@ counter(unsigned int c_laundry_pages_freed = 0;) */ static void vm_page_free_prepare( - vm_page_t mem) + vm_page_t mem) { vm_page_free_prepare_queues(mem); vm_page_free_prepare_object(mem, TRUE); @@ -3563,9 +3830,9 @@ vm_page_free_prepare( void vm_page_free_prepare_queues( - vm_page_t mem) + vm_page_t mem) { - vm_object_t m_object; + vm_object_t m_object; VM_PAGE_CHECK(mem); @@ -3588,20 +3855,19 @@ vm_page_free_prepare_queues( vm_pageout_steal_laundry(mem, TRUE); counter(++c_laundry_pages_freed); } - + vm_page_queues_remove(mem, TRUE); if (VM_PAGE_WIRED(mem)) { assert(mem->vmp_wire_count > 0); if (m_object) { - VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object); VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem); VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag); assert(m_object->resident_page_count >= - m_object->wired_page_count); + m_object->wired_page_count); if (m_object->purgable == VM_PURGABLE_VOLATILE) { OSAddAtomic(+1, &vm_page_purgeable_count); @@ -3609,14 +3875,14 @@ vm_page_free_prepare_queues( OSAddAtomic(-1, &vm_page_purgeable_wired_count); } if ((m_object->purgable == VM_PURGABLE_VOLATILE || - m_object->purgable == VM_PURGABLE_EMPTY) && + m_object->purgable == VM_PURGABLE_EMPTY) && m_object->vo_owner != TASK_NULL) { - task_t owner; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; + task_t owner; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; owner = VM_OBJECT_OWNER(m_object); vm_object_ledger_tag_ledgers( @@ -3633,29 +3899,31 @@ vm_page_free_prepare_queues( */ /* one less "non-volatile"... */ ledger_debit(owner->ledger, - ledger_idx_nonvolatile, - PAGE_SIZE); + ledger_idx_nonvolatile, + PAGE_SIZE); if (do_footprint) { /* ... and "phys_footprint" */ ledger_debit(owner->ledger, - task_ledgers.phys_footprint, - PAGE_SIZE); + task_ledgers.phys_footprint, + PAGE_SIZE); } /* one more "volatile" */ ledger_credit(owner->ledger, - ledger_idx_volatile, - PAGE_SIZE); + ledger_idx_volatile, + PAGE_SIZE); } } - if (!mem->vmp_private && !mem->vmp_fictitious) + if (!mem->vmp_private && !mem->vmp_fictitious) { vm_page_wire_count--; + } mem->vmp_q_state = VM_PAGE_NOT_ON_Q; mem->vmp_wire_count = 0; assert(!mem->vmp_gobbled); } else if (mem->vmp_gobbled) { - if (!mem->vmp_private && !mem->vmp_fictitious) + if (!mem->vmp_private && !mem->vmp_fictitious) { vm_page_wire_count--; + } vm_page_gobble_count--; } } @@ -3663,20 +3931,20 @@ vm_page_free_prepare_queues( void vm_page_free_prepare_object( - vm_page_t mem, - boolean_t remove_from_hash) + vm_page_t mem, + boolean_t remove_from_hash) { - if (mem->vmp_tabled) - vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */ - - PAGE_WAKEUP(mem); /* clears wanted */ + if (mem->vmp_tabled) { + vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */ + } + PAGE_WAKEUP(mem); /* clears wanted */ if (mem->vmp_private) { mem->vmp_private = FALSE; mem->vmp_fictitious = TRUE; VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr); } - if ( !mem->vmp_fictitious) { + if (!mem->vmp_fictitious) { assert(mem->vmp_pageq.next == 0); assert(mem->vmp_pageq.prev == 0); assert(mem->vmp_listq.next == 0); @@ -3686,6 +3954,7 @@ vm_page_free_prepare_object( assert(mem->vmp_backgroundq.prev == 0); #endif /* CONFIG_BACKGROUND_QUEUE */ assert(mem->vmp_next_m == 0); + ASSERT_PMAP_FREE(mem); vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage); } } @@ -3701,7 +3970,7 @@ vm_page_free_prepare_object( */ void vm_page_free( - vm_page_t mem) + vm_page_t mem) { vm_page_free_prepare(mem); @@ -3709,15 +3978,15 @@ vm_page_free( vm_page_release_fictitious(mem); } else { vm_page_release(mem, - TRUE); /* page queues are locked */ + TRUE); /* page queues are locked */ } } void vm_page_free_unlocked( - vm_page_t mem, - boolean_t remove_from_hash) + vm_page_t mem, + boolean_t remove_from_hash) { vm_page_lockspin_queues(); vm_page_free_prepare_queues(mem); @@ -3744,19 +4013,18 @@ vm_page_free_unlocked( */ void vm_page_free_list( - vm_page_t freeq, - boolean_t prepare_object) + vm_page_t freeq, + boolean_t prepare_object) { - vm_page_t mem; - vm_page_t nxt; - vm_page_t local_freeq; - int pg_count; + vm_page_t mem; + vm_page_t nxt; + vm_page_t local_freeq; + int pg_count; LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED); while (freeq) { - pg_count = 0; local_freeq = VM_PAGE_NULL; mem = freeq; @@ -3768,23 +4036,23 @@ vm_page_free_list( * contention on the global free queue lock */ while (mem && pg_count < 64) { - assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) || - (mem->vmp_q_state == VM_PAGE_IS_WIRED)); + (mem->vmp_q_state == VM_PAGE_IS_WIRED)); #if CONFIG_BACKGROUND_QUEUE assert(mem->vmp_backgroundq.next == 0 && - mem->vmp_backgroundq.prev == 0 && - mem->vmp_on_backgroundq == FALSE); + mem->vmp_backgroundq.prev == 0 && + mem->vmp_on_backgroundq == FALSE); #endif nxt = mem->vmp_snext; mem->vmp_snext = NULL; assert(mem->vmp_pageq.prev == 0); if (vm_page_free_verify && !mem->vmp_fictitious && !mem->vmp_private) { - assertf(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)), "page = 0x%llx", (uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)); + ASSERT_PMAP_FREE(mem); } - if (prepare_object == TRUE) + if (prepare_object == TRUE) { vm_page_free_prepare_object(mem, TRUE); + } if (!mem->vmp_fictitious) { assert(mem->vmp_busy); @@ -3795,9 +4063,9 @@ vm_page_free_list( vm_page_release(mem, FALSE); /* page queues are not locked */ #if CONFIG_SECLUDED_MEMORY } else if (vm_page_secluded_count < vm_page_secluded_target && - num_tasks_can_use_secluded_mem == 0) { + num_tasks_can_use_secluded_mem == 0) { vm_page_release(mem, - FALSE); /* page queues are not locked */ + FALSE); /* page queues are not locked */ #endif /* CONFIG_SECLUDED_MEMORY */ } else { /* @@ -3817,25 +4085,25 @@ vm_page_free_list( } } else { assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr || - VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr); + VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr); vm_page_release_fictitious(mem); } mem = nxt; } freeq = mem; - if ( (mem = local_freeq) ) { - unsigned int avail_free_count; - unsigned int need_wakeup = 0; - unsigned int need_priv_wakeup = 0; + if ((mem = local_freeq)) { + unsigned int avail_free_count; + unsigned int need_wakeup = 0; + unsigned int need_priv_wakeup = 0; #if CONFIG_SECLUDED_MEMORY - unsigned int need_wakeup_secluded = 0; + unsigned int need_wakeup_secluded = 0; #endif /* CONFIG_SECLUDED_MEMORY */ - + lck_mtx_lock_spin(&vm_page_queue_free_lock); while (mem) { - int color; + int color; nxt = mem->vmp_snext; @@ -3846,15 +4114,10 @@ vm_page_free_list( color = VM_PAGE_GET_COLOR(mem); #if defined(__x86_64__) - vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem); #else vm_page_queue_enter(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + mem, vmp_pageq); #endif mem = nxt; } @@ -3865,7 +4128,6 @@ vm_page_free_list( VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0); if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) { - if (avail_free_count < vm_page_free_wanted_privileged) { need_priv_wakeup = avail_free_count; vm_page_free_wanted_privileged -= avail_free_count; @@ -3881,18 +4143,18 @@ vm_page_free_list( avail_free_count > vm_page_free_reserved) { unsigned int available_pages; available_pages = (avail_free_count - - vm_page_free_reserved); + vm_page_free_reserved); if (available_pages < vm_page_free_wanted_secluded) { need_wakeup_secluded = available_pages; vm_page_free_wanted_secluded -= - available_pages; + available_pages; avail_free_count -= available_pages; } else { need_wakeup_secluded = - vm_page_free_wanted_secluded; + vm_page_free_wanted_secluded; avail_free_count -= - vm_page_free_wanted_secluded; + vm_page_free_wanted_secluded; vm_page_free_wanted_secluded = 0; } } @@ -3924,11 +4186,11 @@ vm_page_free_list( if (need_wakeup_secluded != 0 && vm_page_free_wanted_secluded == 0) { thread_wakeup((event_t) - &vm_page_free_wanted_secluded); + &vm_page_free_wanted_secluded); } else { for (; - need_wakeup_secluded != 0; - need_wakeup_secluded--) { + need_wakeup_secluded != 0; + need_wakeup_secluded--) { thread_wakeup_one( (event_t) &vm_page_free_wanted_secluded); @@ -3942,11 +4204,13 @@ vm_page_free_list( * once. */ thread_wakeup((event_t) &vm_page_free_count); - } else for (; need_wakeup != 0; need_wakeup--) { - /* - * Wake up one waiter per page we just released. - */ - thread_wakeup_one((event_t) &vm_page_free_count); + } else { + for (; need_wakeup != 0; need_wakeup--) { + /* + * Wake up one waiter per page we just released. + */ + thread_wakeup_one((event_t) &vm_page_free_count); + } } VM_CHECK_MEMORYSTATUS; @@ -3970,9 +4234,9 @@ void vm_page_wire( vm_page_t mem, vm_tag_t tag, - boolean_t check_memorystatus) + boolean_t check_memorystatus) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); @@ -3993,10 +4257,10 @@ vm_page_wire( */ } LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - if ( !VM_PAGE_WIRED(mem)) { - - if (mem->vmp_laundry) + if (!VM_PAGE_WIRED(mem)) { + if (mem->vmp_laundry) { vm_pageout_steal_laundry(mem, TRUE); + } vm_page_queues_remove(mem, TRUE); @@ -4004,27 +4268,26 @@ vm_page_wire( mem->vmp_q_state = VM_PAGE_IS_WIRED; if (m_object) { - VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object); VM_OBJECT_WIRED_PAGE_ADD(m_object, mem); VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag); assert(m_object->resident_page_count >= - m_object->wired_page_count); + m_object->wired_page_count); if (m_object->purgable == VM_PURGABLE_VOLATILE) { assert(vm_page_purgeable_count > 0); OSAddAtomic(-1, &vm_page_purgeable_count); OSAddAtomic(1, &vm_page_purgeable_wired_count); } if ((m_object->purgable == VM_PURGABLE_VOLATILE || - m_object->purgable == VM_PURGABLE_EMPTY) && + m_object->purgable == VM_PURGABLE_EMPTY) && m_object->vo_owner != TASK_NULL) { - task_t owner; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; + task_t owner; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; owner = VM_OBJECT_OWNER(m_object); vm_object_ledger_tag_ledgers( @@ -4036,17 +4299,17 @@ vm_page_wire( &do_footprint); /* less volatile bytes */ ledger_debit(owner->ledger, - ledger_idx_volatile, - PAGE_SIZE); + ledger_idx_volatile, + PAGE_SIZE); /* more not-quite-volatile bytes */ ledger_credit(owner->ledger, - ledger_idx_nonvolatile, - PAGE_SIZE); + ledger_idx_nonvolatile, + PAGE_SIZE); if (do_footprint) { /* more footprint */ ledger_credit(owner->ledger, - task_ledgers.phys_footprint, - PAGE_SIZE); + task_ledgers.phys_footprint, + PAGE_SIZE); } } if (m_object->all_reusable) { @@ -4062,17 +4325,19 @@ vm_page_wire( * accounting. */ vm_object_reuse_pages(m_object, - mem->vmp_offset, - mem->vmp_offset+PAGE_SIZE_64, - FALSE); + mem->vmp_offset, + mem->vmp_offset + PAGE_SIZE_64, + FALSE); } } assert(!mem->vmp_reusable); - if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) + if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) { vm_page_wire_count++; - if (mem->vmp_gobbled) + } + if (mem->vmp_gobbled) { vm_page_gobble_count--; + } mem->vmp_gobbled = FALSE; if (check_memorystatus == TRUE) { @@ -4098,10 +4363,10 @@ vm_page_wire( */ void vm_page_unwire( - vm_page_t mem, - boolean_t queueit) + vm_page_t mem, + boolean_t queueit) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); @@ -4115,7 +4380,6 @@ vm_page_unwire( vm_object_lock_assert_exclusive(m_object); LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); if (--mem->vmp_wire_count == 0) { - mem->vmp_q_state = VM_PAGE_NOT_ON_Q; VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object); @@ -4126,21 +4390,21 @@ vm_page_unwire( } assert(m_object->resident_page_count >= - m_object->wired_page_count); + m_object->wired_page_count); if (m_object->purgable == VM_PURGABLE_VOLATILE) { OSAddAtomic(+1, &vm_page_purgeable_count); assert(vm_page_purgeable_wired_count > 0); OSAddAtomic(-1, &vm_page_purgeable_wired_count); } if ((m_object->purgable == VM_PURGABLE_VOLATILE || - m_object->purgable == VM_PURGABLE_EMPTY) && + m_object->purgable == VM_PURGABLE_EMPTY) && m_object->vo_owner != TASK_NULL) { - task_t owner; - int ledger_idx_volatile; - int ledger_idx_nonvolatile; - int ledger_idx_volatile_compressed; - int ledger_idx_nonvolatile_compressed; - boolean_t do_footprint; + task_t owner; + int ledger_idx_volatile; + int ledger_idx_nonvolatile; + int ledger_idx_volatile_compressed; + int ledger_idx_nonvolatile_compressed; + boolean_t do_footprint; owner = VM_OBJECT_OWNER(m_object); vm_object_ledger_tag_ledgers( @@ -4152,17 +4416,17 @@ vm_page_unwire( &do_footprint); /* more volatile bytes */ ledger_credit(owner->ledger, - ledger_idx_volatile, - PAGE_SIZE); + ledger_idx_volatile, + PAGE_SIZE); /* less not-quite-volatile bytes */ ledger_debit(owner->ledger, - ledger_idx_nonvolatile, - PAGE_SIZE); + ledger_idx_nonvolatile, + PAGE_SIZE); if (do_footprint) { /* less footprint */ ledger_debit(owner->ledger, - task_ledgers.phys_footprint, - PAGE_SIZE); + task_ledgers.phys_footprint, + PAGE_SIZE); } } assert(m_object != kernel_object); @@ -4177,7 +4441,6 @@ vm_page_unwire( } VM_CHECK_MEMORYSTATUS; - } VM_PAGE_CHECK(mem); } @@ -4193,7 +4456,7 @@ vm_page_unwire( */ void vm_page_deactivate( - vm_page_t m) + vm_page_t m) { vm_page_deactivate_internal(m, TRUE); } @@ -4201,10 +4464,10 @@ vm_page_deactivate( void vm_page_deactivate_internal( - vm_page_t m, - boolean_t clear_hw_reference) + vm_page_t m, + boolean_t clear_hw_reference) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(m); @@ -4221,13 +4484,14 @@ vm_page_deactivate_internal( * inactive queue. Note wired pages should not have * their reference bit cleared. */ - assert ( !(m->vmp_absent && !m->vmp_unusual)); + assert( !(m->vmp_absent && !m->vmp_unusual)); - if (m->vmp_gobbled) { /* can this happen? */ + if (m->vmp_gobbled) { /* can this happen? */ assert( !VM_PAGE_WIRED(m)); - if (!m->vmp_private && !m->vmp_fictitious) + if (!m->vmp_private && !m->vmp_fictitious) { vm_page_wire_count--; + } vm_page_gobble_count--; m->vmp_gobbled = FALSE; } @@ -4243,29 +4507,30 @@ vm_page_deactivate_internal( (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) || VM_PAGE_WIRED(m)) { - return; + return; } - if (!m->vmp_absent && clear_hw_reference == TRUE) + if (!m->vmp_absent && clear_hw_reference == TRUE) { pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m)); + } m->vmp_reference = FALSE; m->vmp_no_cache = FALSE; - if ( !VM_PAGE_INACTIVE(m)) { + if (!VM_PAGE_INACTIVE(m)) { vm_page_queues_remove(m, FALSE); if (!VM_DYNAMIC_PAGING_ENABLED() && m->vmp_dirty && m_object->internal && (m_object->purgable == VM_PURGABLE_DENY || - m_object->purgable == VM_PURGABLE_NONVOLATILE || - m_object->purgable == VM_PURGABLE_VOLATILE)) { + m_object->purgable == VM_PURGABLE_NONVOLATILE || + m_object->purgable == VM_PURGABLE_VOLATILE)) { vm_page_check_pageable_safe(m); - vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; vm_page_throttled_count++; } else { if (m_object->named && m_object->ref_count == 1) { - vm_page_speculate(m, FALSE); + vm_page_speculate(m, FALSE); #if DEVELOPMENT || DEBUG vm_page_speculative_recreated++; #endif @@ -4286,9 +4551,10 @@ vm_page_deactivate_internal( * Call with the queues lock held. */ -void vm_page_enqueue_cleaned(vm_page_t m) +void +vm_page_enqueue_cleaned(vm_page_t m) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(m); @@ -4301,8 +4567,9 @@ void vm_page_enqueue_cleaned(vm_page_t m) } if (m->vmp_gobbled) { - if (!m->vmp_private && !m->vmp_fictitious) + if (!m->vmp_private && !m->vmp_fictitious) { vm_page_wire_count--; + } vm_page_gobble_count--; m->vmp_gobbled = FALSE; } @@ -4317,12 +4584,12 @@ void vm_page_enqueue_cleaned(vm_page_t m) if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious || (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) || (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) { - return; + return; } vm_page_queues_remove(m, FALSE); vm_page_check_pageable_safe(m); - vm_page_queue_enter(&vm_page_queue_cleaned, m, vm_page_t, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q; vm_page_cleaned_count++; @@ -4333,8 +4600,9 @@ void vm_page_enqueue_cleaned(vm_page_t m) vm_page_pageable_external_count++; } #if CONFIG_BACKGROUND_QUEUE - if (m->vmp_in_background) + if (m->vmp_in_background) { vm_page_add_to_backgroundq(m, TRUE); + } #endif VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1); } @@ -4349,14 +4617,14 @@ void vm_page_enqueue_cleaned(vm_page_t m) void vm_page_activate( - vm_page_t m) + vm_page_t m) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(m); VM_PAGE_CHECK(m); -#ifdef FIXME_4778297 +#ifdef FIXME_4778297 assert(m_object != kernel_object); #endif assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr); @@ -4365,8 +4633,9 @@ vm_page_activate( if (m->vmp_gobbled) { assert( !VM_PAGE_WIRED(m)); - if (!m->vmp_private && !m->vmp_fictitious) + if (!m->vmp_private && !m->vmp_fictitious) { vm_page_wire_count--; + } vm_page_gobble_count--; m->vmp_gobbled = FALSE; } @@ -4380,29 +4649,31 @@ vm_page_activate( */ if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious || (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || - (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) + (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) { return; + } #if DEBUG - if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) - panic("vm_page_activate: already active"); + if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) { + panic("vm_page_activate: already active"); + } #endif if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) { DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL); DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL); } - + vm_page_queues_remove(m, FALSE); - if ( !VM_PAGE_WIRED(m)) { + if (!VM_PAGE_WIRED(m)) { vm_page_check_pageable_safe(m); - if (!VM_DYNAMIC_PAGING_ENABLED() && - m->vmp_dirty && m_object->internal && + if (!VM_DYNAMIC_PAGING_ENABLED() && + m->vmp_dirty && m_object->internal && (m_object->purgable == VM_PURGABLE_DENY || - m_object->purgable == VM_PURGABLE_NONVOLATILE || - m_object->purgable == VM_PURGABLE_VOLATILE)) { - vm_page_queue_enter(&vm_page_queue_throttled, m, vm_page_t, vmp_pageq); + m_object->purgable == VM_PURGABLE_NONVOLATILE || + m_object->purgable == VM_PURGABLE_VOLATILE)) { + vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q; vm_page_throttled_count++; } else { @@ -4411,8 +4682,7 @@ vm_page_activate( vm_page_secluded_target != 0 && num_tasks_can_use_secluded_mem == 0 && m_object->eligible_for_secluded) { - vm_page_queue_enter(&vm_page_queue_secluded, m, - vm_page_t, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq); m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q; vm_page_secluded_count++; vm_page_secluded_count_inuse++; @@ -4438,11 +4708,11 @@ vm_page_activate( */ void vm_page_speculate( - vm_page_t m, - boolean_t new) + vm_page_t m, + boolean_t new) { - struct vm_speculative_age_q *aq; - vm_object_t m_object; + struct vm_speculative_age_q *aq; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(m); @@ -4464,28 +4734,28 @@ vm_page_speculate( */ if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious || (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || - (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) + (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) { return; + } vm_page_queues_remove(m, FALSE); - if ( !VM_PAGE_WIRED(m)) { - mach_timespec_t ts; + if (!VM_PAGE_WIRED(m)) { + mach_timespec_t ts; clock_sec_t sec; clock_nsec_t nsec; - clock_get_system_nanotime(&sec, &nsec); + clock_get_system_nanotime(&sec, &nsec); ts.tv_sec = (unsigned int) sec; ts.tv_nsec = nsec; if (vm_page_speculative_count == 0) { - speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; aq = &vm_page_queue_speculative[speculative_age_index]; - /* + /* * set the timer to begin a new group */ aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000; @@ -4495,21 +4765,23 @@ vm_page_speculate( aq = &vm_page_queue_speculative[speculative_age_index]; if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) { + speculative_age_index++; - speculative_age_index++; - - if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) - speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; + if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) { + speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; + } if (speculative_age_index == speculative_steal_index) { - speculative_steal_index = speculative_age_index + 1; + speculative_steal_index = speculative_age_index + 1; - if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) - speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; + if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) { + speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q; + } } aq = &vm_page_queue_speculative[speculative_age_index]; - if (!vm_page_queue_empty(&aq->age_q)) - vm_page_speculate_ageit(aq); + if (!vm_page_queue_empty(&aq->age_q)) { + vm_page_speculate_ageit(aq); + } aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000; aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC; @@ -4524,7 +4796,7 @@ vm_page_speculate( if (new == TRUE) { vm_object_lock_assert_exclusive(m_object); - m_object->pages_created++; + m_object->pages_created++; #if DEVELOPMENT || DEBUG vm_page_speculative_created++; #endif @@ -4543,24 +4815,24 @@ vm_page_speculate( void vm_page_speculate_ageit(struct vm_speculative_age_q *aq) { - struct vm_speculative_age_q *sq; - vm_page_t t; + struct vm_speculative_age_q *sq; + vm_page_t t; sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q]; if (vm_page_queue_empty(&sq->age_q)) { - sq->age_q.next = aq->age_q.next; + sq->age_q.next = aq->age_q.next; sq->age_q.prev = aq->age_q.prev; - + t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next); t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q); t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev); t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q); } else { - t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev); + t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev); t->vmp_pageq.next = aq->age_q.next; - + t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next); t->vmp_pageq.prev = sq->age_q.prev; @@ -4575,7 +4847,7 @@ vm_page_speculate_ageit(struct vm_speculative_age_q *aq) void vm_page_lru( - vm_page_t m) + vm_page_t m) { VM_PAGE_CHECK(m); assert(VM_PAGE_OBJECT(m) != kernel_object); @@ -4584,18 +4856,18 @@ vm_page_lru( LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) { - /* - * we don't need to do all the other work that + /* + * we don't need to do all the other work that * vm_page_queues_remove and vm_page_enqueue_inactive * bring along for the ride */ - assert(!m->vmp_laundry); + assert(!m->vmp_laundry); assert(!m->vmp_private); - + m->vmp_no_cache = FALSE; - vm_page_queue_remove(&vm_page_queue_inactive, m, vm_page_t, vmp_pageq); - vm_page_queue_enter(&vm_page_queue_inactive, m, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq); + vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq); return; } @@ -4610,8 +4882,9 @@ vm_page_lru( if (m->vmp_laundry || m->vmp_private || (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) || - VM_PAGE_WIRED(m)) + VM_PAGE_WIRED(m)) { return; + } m->vmp_no_cache = FALSE; @@ -4624,25 +4897,26 @@ vm_page_lru( void vm_page_reactivate_all_throttled(void) { - vm_page_t first_throttled, last_throttled; - vm_page_t first_active; - vm_page_t m; - int extra_active_count; - int extra_internal_count, extra_external_count; - vm_object_t m_object; - - if (!VM_DYNAMIC_PAGING_ENABLED()) + vm_page_t first_throttled, last_throttled; + vm_page_t first_active; + vm_page_t m; + int extra_active_count; + int extra_internal_count, extra_external_count; + vm_object_t m_object; + + if (!VM_DYNAMIC_PAGING_ENABLED()) { return; + } extra_active_count = 0; extra_internal_count = 0; extra_external_count = 0; vm_page_lock_queues(); - if (! vm_page_queue_empty(&vm_page_queue_throttled)) { + if (!vm_page_queue_empty(&vm_page_queue_throttled)) { /* * Switch "throttled" pages to "active". */ - vm_page_queue_iterate(&vm_page_queue_throttled, m, vm_page_t, vmp_pageq) { + vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) { VM_PAGE_CHECK(m); assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q); @@ -4658,8 +4932,9 @@ vm_page_reactivate_all_throttled(void) m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; VM_PAGE_CHECK(m); #if CONFIG_BACKGROUND_QUEUE - if (m->vmp_in_background) + if (m->vmp_in_background) { vm_page_add_to_backgroundq(m, FALSE); + } #endif } @@ -4709,23 +4984,26 @@ vm_page_reactivate_all_throttled(void) void vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) { - struct vpl *lq; - vm_page_t first_local, last_local; - vm_page_t first_active; - vm_page_t m; - uint32_t count = 0; + struct vpl *lq; + vm_page_t first_local, last_local; + vm_page_t first_active; + vm_page_t m; + uint32_t count = 0; - if (vm_page_local_q == NULL) + if (vm_page_local_q == NULL) { return; + } lq = &vm_page_local_q[lid].vpl_un.vpl; if (nolocks == FALSE) { if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) { - if ( !vm_page_trylockspin_queues()) + if (!vm_page_trylockspin_queues()) { return; - } else + } + } else { vm_page_lockspin_queues(); + } VPL_LOCK(&lq->vpl_lock); } @@ -4735,26 +5013,29 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) */ assert(!vm_page_queue_empty(&lq->vpl_queue)); - vm_page_queue_iterate(&lq->vpl_queue, m, vm_page_t, vmp_pageq) { + vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) { VM_PAGE_CHECK(m); vm_page_check_pageable_safe(m); assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q); assert(!m->vmp_fictitious); - if (m->vmp_local_id != lid) + if (m->vmp_local_id != lid) { panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m); - + } + m->vmp_local_id = 0; m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; VM_PAGE_CHECK(m); #if CONFIG_BACKGROUND_QUEUE - if (m->vmp_in_background) + if (m->vmp_in_background) { vm_page_add_to_backgroundq(m, FALSE); + } #endif count++; } - if (count != lq->vpl_count) + if (count != lq->vpl_count) { panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d\n", count, lq->vpl_count); + } /* * Transfer the entire local queue to a regular LRU page queues. @@ -4801,11 +5082,10 @@ vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks) #define PMAP_ZERO_PART_PAGE_IMPLEMENTED void vm_page_part_zero_fill( - vm_page_t m, - vm_offset_t m_pa, - vm_size_t len) + vm_page_t m, + vm_offset_t m_pa, + vm_size_t len) { - #if 0 /* * we don't hold the page queue lock @@ -4817,27 +5097,26 @@ vm_page_part_zero_fill( #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len); #else - vm_page_t tmp; + vm_page_t tmp; while (1) { - tmp = vm_page_grab(); + tmp = vm_page_grab(); if (tmp == VM_PAGE_NULL) { vm_page_wait(THREAD_UNINT); continue; } - break; + break; } vm_page_zero_fill(tmp); - if(m_pa != 0) { + if (m_pa != 0) { vm_page_part_copy(m, 0, tmp, 0, m_pa); } - if((m_pa + len) < PAGE_SIZE) { - vm_page_part_copy(m, m_pa + len, tmp, - m_pa + len, PAGE_SIZE - (m_pa + len)); + if ((m_pa + len) < PAGE_SIZE) { + vm_page_part_copy(m, m_pa + len, tmp, + m_pa + len, PAGE_SIZE - (m_pa + len)); } - vm_page_copy(tmp,m); - VM_PAGE_FREE(tmp); + vm_page_copy(tmp, m); + VM_PAGE_FREE(tmp); #endif - } /* @@ -4847,11 +5126,11 @@ vm_page_part_zero_fill( */ void vm_page_zero_fill( - vm_page_t m) + vm_page_t m) { - XPR(XPR_VM_PAGE, + XPR(XPR_VM_PAGE, "vm_page_zero_fill, object 0x%X offset 0x%X page 0x%X\n", - VM_PAGE_OBJECT(m), m->vmp_offset, m, 0,0); + VM_PAGE_OBJECT(m), m->vmp_offset, m, 0, 0); #if 0 /* * we don't hold the page queue lock @@ -4872,11 +5151,11 @@ vm_page_zero_fill( void vm_page_part_copy( - vm_page_t src_m, - vm_offset_t src_pa, - vm_page_t dst_m, - vm_offset_t dst_pa, - vm_size_t len) + vm_page_t src_m, + vm_offset_t src_pa, + vm_page_t dst_m, + vm_offset_t dst_pa, + vm_size_t len) { #if 0 /* @@ -4887,7 +5166,7 @@ vm_page_part_copy( VM_PAGE_CHECK(dst_m); #endif pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa, - VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len); + VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len); } /* @@ -4901,16 +5180,16 @@ int vm_page_copy_cs_tainted = 0; void vm_page_copy( - vm_page_t src_m, - vm_page_t dest_m) + vm_page_t src_m, + vm_page_t dest_m) { - vm_object_t src_m_object; + vm_object_t src_m_object; src_m_object = VM_PAGE_OBJECT(src_m); - XPR(XPR_VM_PAGE, + XPR(XPR_VM_PAGE, "vm_page_copy, object 0x%X offset 0x%X to object 0x%X offset 0x%X\n", - src_m_object, src_m->vmp_offset, + src_m_object, src_m->vmp_offset, VM_PAGE_OBJECT(dest_m), dest_m->vmp_offset, 0); #if 0 @@ -4935,12 +5214,11 @@ vm_page_copy( vm_page_validate_cs(src_m); #if DEVELOPMENT || DEBUG DTRACE_VM4(codesigned_copy, - vm_object_t, src_m_object, - vm_object_offset_t, src_m->vmp_offset, - int, src_m->vmp_cs_validated, - int, src_m->vmp_cs_tainted); + vm_object_t, src_m_object, + vm_object_offset_t, src_m->vmp_offset, + int, src_m->vmp_cs_validated, + int, src_m->vmp_cs_tainted); #endif /* DEVELOPMENT || DEBUG */ - } /* @@ -4958,49 +5236,49 @@ vm_page_copy( #if MACH_ASSERT static void _vm_page_print( - vm_page_t p) + vm_page_t p) { printf("vm_page %p: \n", p); printf(" pageq: next=%p prev=%p\n", - (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next), - (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev)); + (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next), + (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev)); printf(" listq: next=%p prev=%p\n", - (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)), - (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev))); + (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)), + (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev))); printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m))); - printf(" object=%p offset=0x%llx\n",VM_PAGE_OBJECT(p), p->vmp_offset); + printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset); printf(" wire_count=%u\n", p->vmp_wire_count); printf(" q_state=%u\n", p->vmp_q_state); printf(" %slaundry, %sref, %sgobbled, %sprivate\n", - (p->vmp_laundry ? "" : "!"), - (p->vmp_reference ? "" : "!"), - (p->vmp_gobbled ? "" : "!"), - (p->vmp_private ? "" : "!")); + (p->vmp_laundry ? "" : "!"), + (p->vmp_reference ? "" : "!"), + (p->vmp_gobbled ? "" : "!"), + (p->vmp_private ? "" : "!")); printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n", - (p->vmp_busy ? "" : "!"), - (p->vmp_wanted ? "" : "!"), - (p->vmp_tabled ? "" : "!"), - (p->vmp_fictitious ? "" : "!"), - (p->vmp_pmapped ? "" : "!"), - (p->vmp_wpmapped ? "" : "!")); + (p->vmp_busy ? "" : "!"), + (p->vmp_wanted ? "" : "!"), + (p->vmp_tabled ? "" : "!"), + (p->vmp_fictitious ? "" : "!"), + (p->vmp_pmapped ? "" : "!"), + (p->vmp_wpmapped ? "" : "!")); printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n", - (p->vmp_free_when_done ? "" : "!"), - (p->vmp_absent ? "" : "!"), - (p->vmp_error ? "" : "!"), - (p->vmp_dirty ? "" : "!"), - (p->vmp_cleaning ? "" : "!"), - (p->vmp_precious ? "" : "!"), - (p->vmp_clustered ? "" : "!")); + (p->vmp_free_when_done ? "" : "!"), + (p->vmp_absent ? "" : "!"), + (p->vmp_error ? "" : "!"), + (p->vmp_dirty ? "" : "!"), + (p->vmp_cleaning ? "" : "!"), + (p->vmp_precious ? "" : "!"), + (p->vmp_clustered ? "" : "!")); printf(" %soverwriting, %srestart, %sunusual\n", - (p->vmp_overwriting ? "" : "!"), - (p->vmp_restart ? "" : "!"), - (p->vmp_unusual ? "" : "!")); + (p->vmp_overwriting ? "" : "!"), + (p->vmp_restart ? "" : "!"), + (p->vmp_unusual ? "" : "!")); printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n", - (p->vmp_cs_validated ? "" : "!"), - (p->vmp_cs_tainted ? "" : "!"), - (p->vmp_cs_nx ? "" : "!"), - (p->vmp_no_cache ? "" : "!")); + (p->vmp_cs_validated ? "" : "!"), + (p->vmp_cs_tainted ? "" : "!"), + (p->vmp_cs_nx ? "" : "!"), + (p->vmp_no_cache ? "" : "!")); printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p)); } @@ -5011,19 +5289,19 @@ _vm_page_print( */ static int vm_page_verify_contiguous( - vm_page_t pages, - unsigned int npages) + vm_page_t pages, + unsigned int npages) { - vm_page_t m; - unsigned int page_count; - vm_offset_t prev_addr; + vm_page_t m; + unsigned int page_count; + vm_offset_t prev_addr; prev_addr = VM_PAGE_GET_PHYS_PAGE(pages); page_count = 1; for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) { if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) { printf("m %p prev_addr 0x%lx, current addr 0x%x\n", - m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m)); + m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m)); printf("pages %p page_count %d npages %d\n", pages, page_count, npages); panic("vm_page_verify_contiguous: not contiguous!"); } @@ -5032,7 +5310,7 @@ vm_page_verify_contiguous( } if (page_count != npages) { printf("pages %p actual count 0x%x but requested 0x%x\n", - pages, page_count, npages); + pages, page_count, npages); panic("vm_page_verify_contiguous: count error"); } return 1; @@ -5045,48 +5323,50 @@ vm_page_verify_contiguous( static boolean_t vm_page_verify_this_free_list_enabled = FALSE; static unsigned int vm_page_verify_free_list( - vm_page_queue_head_t *vm_page_queue, - unsigned int color, - vm_page_t look_for_page, - boolean_t expect_page) + vm_page_queue_head_t *vm_page_queue, + unsigned int color, + vm_page_t look_for_page, + boolean_t expect_page) { - unsigned int npages; - vm_page_t m; - vm_page_t prev_m; - boolean_t found_page; + unsigned int npages; + vm_page_t m; + vm_page_t prev_m; + boolean_t found_page; - if (! vm_page_verify_this_free_list_enabled) + if (!vm_page_verify_this_free_list_enabled) { return 0; + } found_page = FALSE; npages = 0; prev_m = (vm_page_t)((uintptr_t)vm_page_queue); - vm_page_queue_iterate(vm_page_queue, - m, - vm_page_t, - vmp_pageq) { - + vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) { if (m == look_for_page) { found_page = TRUE; } - if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) + if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) { panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p\n", - color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m); - if ( ! m->vmp_busy ) + color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m); + } + if (!m->vmp_busy) { panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy\n", - color, npages, m); + color, npages, m); + } if (color != (unsigned int) -1) { - if (VM_PAGE_GET_COLOR(m) != color) + if (VM_PAGE_GET_COLOR(m) != color) { panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u\n", - color, npages, m, VM_PAGE_GET_COLOR(m), color); - if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) + color, npages, m, VM_PAGE_GET_COLOR(m), color); + } + if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) { panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d\n", - color, npages, m, m->vmp_q_state); + color, npages, m, m->vmp_q_state); + } } else { - if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) + if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) { panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d\n", - npages, m, m->vmp_q_state); + npages, m, m->vmp_q_state); + } } ++npages; prev_m = m; @@ -5096,25 +5376,26 @@ vm_page_verify_free_list( if (expect_page && !found_page) { printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n", - color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page)); + color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page)); _vm_page_print(look_for_page); for (other_color = 0; - other_color < vm_colors; - other_color++) { - if (other_color == color) + other_color < vm_colors; + other_color++) { + if (other_color == color) { continue; + } vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead, - other_color, look_for_page, FALSE); + other_color, look_for_page, FALSE); } if (color == (unsigned int) -1) { vm_page_verify_free_list(&vm_lopage_queue_free, - (unsigned int) -1, look_for_page, FALSE); + (unsigned int) -1, look_for_page, FALSE); } panic("vm_page_verify_free_list(color=%u)\n", color); } if (!expect_page && found_page) { printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n", - color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page)); + color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page)); } } return npages; @@ -5124,16 +5405,17 @@ static boolean_t vm_page_verify_all_free_lists_enabled = FALSE; static void vm_page_verify_free_lists( void ) { - unsigned int color, npages, nlopages; - boolean_t toggle = TRUE; + unsigned int color, npages, nlopages; + boolean_t toggle = TRUE; - if (! vm_page_verify_all_free_lists_enabled) + if (!vm_page_verify_all_free_lists_enabled) { return; + } npages = 0; lck_mtx_lock(&vm_page_queue_free_lock); - + if (vm_page_verify_this_free_list_enabled == TRUE) { /* * This variable has been set globally for extra checking of @@ -5147,17 +5429,18 @@ vm_page_verify_free_lists( void ) vm_page_verify_this_free_list_enabled = TRUE; } - for( color = 0; color < vm_colors; color++ ) { + for (color = 0; color < vm_colors; color++) { npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead, - color, VM_PAGE_NULL, FALSE); + color, VM_PAGE_NULL, FALSE); } nlopages = vm_page_verify_free_list(&vm_lopage_queue_free, - (unsigned int) -1, - VM_PAGE_NULL, FALSE); - if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) + (unsigned int) -1, + VM_PAGE_NULL, FALSE); + if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) { panic("vm_page_verify_free_lists: " - "npages %u free_count %d nlopages %u lo_free_count %u", - npages, vm_page_free_count, nlopages, vm_lopage_free_count); + "npages %u free_count %d nlopages %u lo_free_count %u", + npages, vm_page_free_count, nlopages, vm_lopage_free_count); + } if (toggle == TRUE) { vm_page_verify_this_free_list_enabled = FALSE; @@ -5166,49 +5449,10 @@ vm_page_verify_free_lists( void ) lck_mtx_unlock(&vm_page_queue_free_lock); } -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ - -#if __arm64__ -/* - * 1 or more clients (currently only SEP) ask for a large contiguous chunk of memory - * after the system has 'aged'. To ensure that other allocation requests don't mess - * with the chances of that request being satisfied, we pre-allocate a single contiguous - * 10MB buffer and hand it out to the first request of >= 4MB. - */ - -kern_return_t cpm_preallocate_early(void); - -vm_page_t cpm_preallocated_pages_list = NULL; -boolean_t preallocated_buffer_available = FALSE; - -#define PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT ((10 * 1024 * 1024) / PAGE_SIZE_64) /* 10 MB */ -#define MIN_CONTIG_PAGES_REQUEST_FOR_PREALLOCATED_BUFFER ((4 * 1024 *1024) / PAGE_SIZE_64) /* 4 MB */ - -kern_return_t -cpm_preallocate_early(void) -{ - - kern_return_t kr = KERN_SUCCESS; - vm_map_size_t prealloc_size = (PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT * PAGE_SIZE_64); - - printf("cpm_preallocate_early called to preallocate contiguous buffer of %llu pages\n", PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT); - - kr = cpm_allocate(CAST_DOWN(vm_size_t, prealloc_size), &cpm_preallocated_pages_list, 0, 0, TRUE, 0); - - if (kr != KERN_SUCCESS) { - printf("cpm_allocate for preallocated contig buffer failed with %d.\n", kr); - } else { - preallocated_buffer_available = TRUE; - } - - return kr; -} -#endif /* __arm64__ */ - - -extern boolean_t (* volatile consider_buffer_cache_collect)(int); +extern boolean_t(*volatile consider_buffer_cache_collect)(int); /* * CONTIGUOUS PAGE ALLOCATION @@ -5219,27 +5463,27 @@ extern boolean_t (* volatile consider_buffer_cache_collect)(int); * This is done by traversing the vm_page_t array in a linear fashion * we assume that the vm_page_t array has the avaiable physical pages in an * ordered, ascending list... this is currently true of all our implementations - * and must remain so... there can be 'holes' in the array... we also can + * and must remain so... there can be 'holes' in the array... we also can * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed - * which use to happen via 'vm_page_convert'... that function was no longer - * being called and was removed... - * - * The basic flow consists of stabilizing some of the interesting state of + * which use to happen via 'vm_page_convert'... that function was no longer + * being called and was removed... + * + * The basic flow consists of stabilizing some of the interesting state of * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our * sweep at the beginning of the array looking for pages that meet our criterea * for a 'stealable' page... currently we are pretty conservative... if the page * meets this criterea and is physically contiguous to the previous page in the 'run' - * we keep developing it. If we hit a page that doesn't fit, we reset our state + * we keep developing it. If we hit a page that doesn't fit, we reset our state * and start to develop a new run... if at this point we've already considered - * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold, - * and mutex_pause (which will yield the processor), to keep the latency low w/r + * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold, + * and mutex_pause (which will yield the processor), to keep the latency low w/r * to other threads trying to acquire free pages (or move pages from q to q), * and then continue from the spot we left off... we only make 1 pass through the * array. Once we have a 'run' that is long enough, we'll go into the loop which - * which steals the pages from the queues they're currently on... pages on the free + * which steals the pages from the queues they're currently on... pages on the free * queue can be stolen directly... pages that are on any of the other queues * must be removed from the object they are tabled on... this requires taking the - * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails + * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails * or if the state of the page behind the vm_object lock is no longer viable, we'll * dump the pages we've currently stolen back to the free list, and pick up our * scan from the point where we aborted the 'current' run. @@ -5253,17 +5497,17 @@ extern boolean_t (* volatile consider_buffer_cache_collect)(int); * Algorithm: */ -#define MAX_CONSIDERED_BEFORE_YIELD 1000 +#define MAX_CONSIDERED_BEFORE_YIELD 1000 -#define RESET_STATE_OF_RUN() \ - MACRO_BEGIN \ - prevcontaddr = -2; \ - start_pnum = -1; \ - free_considered = 0; \ - substitute_needed = 0; \ - npages = 0; \ - MACRO_END +#define RESET_STATE_OF_RUN() \ + MACRO_BEGIN \ + prevcontaddr = -2; \ + start_pnum = -1; \ + free_considered = 0; \ + substitute_needed = 0; \ + npages = 0; \ + MACRO_END /* * Can we steal in-use (i.e. not free) pages when searching for @@ -5271,42 +5515,43 @@ extern boolean_t (* volatile consider_buffer_cache_collect)(int); */ #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1 -static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0; +static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0; #if DEBUG int vm_page_find_contig_debug = 0; #endif static vm_page_t vm_page_find_contiguous( - unsigned int contig_pages, - ppnum_t max_pnum, + unsigned int contig_pages, + ppnum_t max_pnum, ppnum_t pnum_mask, - boolean_t wire, - int flags) -{ - vm_page_t m = NULL; - ppnum_t prevcontaddr = 0; - ppnum_t start_pnum = 0; - unsigned int npages = 0, considered = 0, scanned = 0; - unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0; - unsigned int idx_last_contig_page_found = 0; - int free_considered = 0, free_available = 0; - int substitute_needed = 0; - boolean_t wrapped, zone_gc_called = FALSE; - kern_return_t kr; + boolean_t wire, + int flags) +{ + vm_page_t m = NULL; + ppnum_t prevcontaddr = 0; + ppnum_t start_pnum = 0; + unsigned int npages = 0, considered = 0, scanned = 0; + unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0; + unsigned int idx_last_contig_page_found = 0; + int free_considered = 0, free_available = 0; + int substitute_needed = 0; + boolean_t wrapped, zone_gc_called = FALSE; + kern_return_t kr; #if DEBUG - clock_sec_t tv_start_sec = 0, tv_end_sec = 0; - clock_usec_t tv_start_usec = 0, tv_end_usec = 0; + clock_sec_t tv_start_sec = 0, tv_end_sec = 0; + clock_usec_t tv_start_usec = 0, tv_end_usec = 0; #endif - int yielded = 0; - int dumped_run = 0; - int stolen_pages = 0; - int compressed_pages = 0; + int yielded = 0; + int dumped_run = 0; + int stolen_pages = 0; + int compressed_pages = 0; - if (contig_pages == 0) + if (contig_pages == 0) { return VM_PAGE_NULL; + } full_scan_again: @@ -5318,60 +5563,14 @@ full_scan_again: #endif PAGE_REPLACEMENT_ALLOWED(TRUE); - vm_page_lock_queues(); - -#if __arm64__ - if (preallocated_buffer_available) { - - if ((contig_pages >= MIN_CONTIG_PAGES_REQUEST_FOR_PREALLOCATED_BUFFER) && (contig_pages <= PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT)) { - - m = cpm_preallocated_pages_list; - - start_idx = (unsigned int) (m - &vm_pages[0]); - - if (wire == FALSE) { - - last_idx = start_idx; - - for(npages = 0; npages < contig_pages; npages++, last_idx++) { - - assert(vm_pages[last_idx].vmp_gobbled == FALSE); - - vm_pages[last_idx].vmp_gobbled = TRUE; - vm_page_gobble_count++; - - assert(1 == vm_pages[last_idx].vmp_wire_count); - /* - * Gobbled pages are counted as wired pages. So no need to drop - * the global wired page count. Just the page's wire count is fine. - */ - vm_pages[last_idx].vmp_wire_count--; - vm_pages[last_idx].vmp_q_state = VM_PAGE_NOT_ON_Q; - } - - } - - last_idx = start_idx + contig_pages - 1; - - vm_pages[last_idx].vmp_snext = NULL; - - printf("Using preallocated buffer: Requested size (pages):%d... index range: %d-%d...freeing %llu pages\n", contig_pages, start_idx, last_idx, PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT - contig_pages); - - last_idx += 1; - for(npages = contig_pages; npages < PREALLOCATED_CONTIG_BUFFER_PAGES_COUNT; npages++, last_idx++) { - - VM_PAGE_ZERO_PAGEQ_ENTRY(&vm_pages[last_idx]); - vm_page_free(&vm_pages[last_idx]); - } - - cpm_preallocated_pages_list = NULL; - preallocated_buffer_available = FALSE; - - goto done_scanning; - } + /* + * If there are still delayed pages, try to free up some that match. + */ + if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) { + vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask); } -#endif /* __arm64__ */ + vm_page_lock_queues(); lck_mtx_lock(&vm_page_queue_free_lock); RESET_STATE_OF_RUN(); @@ -5381,18 +5580,19 @@ full_scan_again: free_available = vm_page_free_count - vm_page_free_reserved; wrapped = FALSE; - - if(flags & KMA_LOMEM) + + if (flags & KMA_LOMEM) { idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx; - else + } else { idx_last_contig_page_found = vm_page_find_contiguous_last_idx; + } orig_last_idx = idx_last_contig_page_found; last_idx = orig_last_idx; for (page_idx = last_idx, start_idx = last_idx; - npages < contig_pages && page_idx < vm_pages_count; - page_idx++) { + npages < contig_pages && page_idx < vm_pages_count; + page_idx++) { retry: if (wrapped && npages == 0 && @@ -5419,10 +5619,9 @@ retry: * not aligned */ RESET_STATE_OF_RUN(); - } else if (VM_PAGE_WIRED(m) || m->vmp_gobbled || - m->vmp_laundry || m->vmp_wanted || - m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) { + m->vmp_laundry || m->vmp_wanted || + m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) { /* * page is in a transient state * or a state we don't want to deal @@ -5430,11 +5629,10 @@ retry: * means starting a new run */ RESET_STATE_OF_RUN(); - } else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || - (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) || - (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) || - (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) { + (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) || + (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) || + (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) { /* * page needs to be on one of our queues (other then the pageout or special free queues) * or it needs to belong to the compressor pool (which is now indicated @@ -5446,7 +5644,6 @@ retry: * means starting a new run */ RESET_STATE_OF_RUN(); - } else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) && (!m->vmp_tabled || m->vmp_busy)) { /* * pages on the free list are always 'busy' @@ -5455,12 +5652,11 @@ retry: * 'free' are never 'tabled', so we also couldn't * test for 'tabled'. So we check here to make * sure that a non-free page is not busy and is - * tabled on an object... + * tabled on an object... * if not, don't consider it which * means starting a new run */ RESET_STATE_OF_RUN(); - } else { if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) { if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) { @@ -5475,7 +5671,7 @@ retry: npages++; } prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m); - + VM_PAGE_CHECK(m); if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) { free_considered++; @@ -5498,7 +5694,7 @@ retry: #endif } - if ((free_considered + substitute_needed) > free_available) { + if ((free_considered + substitute_needed) > free_available) { /* * if we let this run continue * we will end up dropping the vm_page_free_count @@ -5525,7 +5721,6 @@ retry: } did_consider: if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) { - PAGE_REPLACEMENT_ALLOWED(FALSE); lck_mtx_unlock(&vm_page_queue_free_lock); @@ -5562,10 +5757,11 @@ did_consider: * Start again from the very first page. */ RESET_STATE_OF_RUN(); - if( flags & KMA_LOMEM) + if (flags & KMA_LOMEM) { idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = 0; - else + } else { idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0; + } last_idx = 0; page_idx = last_idx; wrapped = TRUE; @@ -5573,13 +5769,13 @@ did_consider: } lck_mtx_unlock(&vm_page_queue_free_lock); } else { - vm_page_t m1; - vm_page_t m2; - unsigned int cur_idx; - unsigned int tmp_start_idx; - vm_object_t locked_object = VM_OBJECT_NULL; - boolean_t abort_run = FALSE; - + vm_page_t m1; + vm_page_t m2; + unsigned int cur_idx; + unsigned int tmp_start_idx; + vm_object_t locked_object = VM_OBJECT_NULL; + boolean_t abort_run = FALSE; + assert(page_idx - start_idx == contig_pages); tmp_start_idx = start_idx; @@ -5587,14 +5783,13 @@ did_consider: /* * first pass through to pull the free pages * off of the free queue so that in case we - * need substitute pages, we won't grab any + * need substitute pages, we won't grab any * of the free pages in the run... we'll clear * the 'free' bit in the 2nd pass, and even in * an abort_run case, we'll collect all of the * free pages in this run and return them to the free list */ while (start_idx < page_idx) { - m1 = &vm_pages[start_idx++]; #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL @@ -5608,10 +5803,7 @@ did_consider: #if MACH_ASSERT vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE); #endif - vm_page_queue_remove(&vm_page_queue_free[color].qhead, - m1, - vm_page_t, - vmp_pageq); + vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq); VM_PAGE_ZERO_PAGEQ_ENTRY(m1); #if MACH_ASSERT @@ -5628,11 +5820,12 @@ did_consider: vm_page_free_count--; } } - if( flags & KMA_LOMEM) + if (flags & KMA_LOMEM) { vm_page_lomem_find_contiguous_last_idx = page_idx; - else + } else { vm_page_find_contiguous_last_idx = page_idx; - + } + /* * we can drop the free queue lock at this point since * we've pulled any 'free' candidates off of the list @@ -5667,8 +5860,9 @@ did_consider: int refmod; boolean_t disconnected, reusable; - if (abort_run == TRUE) + if (abort_run == TRUE) { continue; + } assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q); @@ -5679,15 +5873,15 @@ did_consider: vm_object_unlock(locked_object); locked_object = VM_OBJECT_NULL; } - if (vm_object_lock_try(object)) + if (vm_object_lock_try(object)) { locked_object = object; + } } - if (locked_object == VM_OBJECT_NULL || + if (locked_object == VM_OBJECT_NULL || (VM_PAGE_WIRED(m1) || m1->vmp_gobbled || - m1->vmp_laundry || m1->vmp_wanted || - m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) || + m1->vmp_laundry || m1->vmp_wanted || + m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) || (m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) { - if (locked_object) { vm_object_unlock(locked_object); locked_object = VM_OBJECT_NULL; @@ -5701,7 +5895,7 @@ did_consider: reusable = FALSE; if ((m1->vmp_reusable || - object->all_reusable) && + object->all_reusable) && (m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) && !m1->vmp_dirty && !m1->vmp_reference) { @@ -5718,12 +5912,12 @@ did_consider: } if ((m1->vmp_pmapped && - ! reusable) || + !reusable) || m1->vmp_dirty || m1->vmp_precious) { vm_object_offset_t offset; - m2 = vm_page_grab(); + m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD); if (m2 == VM_PAGE_NULL) { if (locked_object) { @@ -5734,11 +5928,12 @@ did_consider: abort_run = TRUE; continue; } - if (! disconnected) { - if (m1->vmp_pmapped) + if (!disconnected) { + if (m1->vmp_pmapped) { refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1)); - else + } else { refmod = 0; + } } /* copy the page's contents */ @@ -5748,29 +5943,29 @@ did_consider: assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q); assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q); assert(!m1->vmp_laundry); - m2->vmp_reference = m1->vmp_reference; + m2->vmp_reference = m1->vmp_reference; assert(!m1->vmp_gobbled); assert(!m1->vmp_private); - m2->vmp_no_cache = m1->vmp_no_cache; - m2->vmp_xpmapped = 0; + m2->vmp_no_cache = m1->vmp_no_cache; + m2->vmp_xpmapped = 0; assert(!m1->vmp_busy); assert(!m1->vmp_wanted); assert(!m1->vmp_fictitious); - m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */ - m2->vmp_wpmapped = m1->vmp_wpmapped; + m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */ + m2->vmp_wpmapped = m1->vmp_wpmapped; assert(!m1->vmp_free_when_done); - m2->vmp_absent = m1->vmp_absent; - m2->vmp_error = m1->vmp_error; - m2->vmp_dirty = m1->vmp_dirty; + m2->vmp_absent = m1->vmp_absent; + m2->vmp_error = m1->vmp_error; + m2->vmp_dirty = m1->vmp_dirty; assert(!m1->vmp_cleaning); - m2->vmp_precious = m1->vmp_precious; - m2->vmp_clustered = m1->vmp_clustered; + m2->vmp_precious = m1->vmp_precious; + m2->vmp_clustered = m1->vmp_clustered; assert(!m1->vmp_overwriting); - m2->vmp_restart = m1->vmp_restart; - m2->vmp_unusual = m1->vmp_unusual; + m2->vmp_restart = m1->vmp_restart; + m2->vmp_unusual = m1->vmp_unusual; m2->vmp_cs_validated = m1->vmp_cs_validated; - m2->vmp_cs_tainted = m1->vmp_cs_tainted; - m2->vmp_cs_nx = m1->vmp_cs_nx; + m2->vmp_cs_tainted = m1->vmp_cs_tainted; + m2->vmp_cs_nx = m1->vmp_cs_nx; /* * If m1 had really been reusable, @@ -5784,8 +5979,9 @@ did_consider: // assert(!m1->vmp_lopage); - if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) + if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR; + } /* * page may need to be flushed if @@ -5803,8 +5999,9 @@ did_consider: */ pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED); - if (refmod & VM_MEM_REFERENCED) + if (refmod & VM_MEM_REFERENCED) { m2->vmp_reference = TRUE; + } if (refmod & VM_MEM_MODIFIED) { SET_PAGE_DIRTY(m2, TRUE); } @@ -5830,20 +6027,19 @@ did_consider: m2->vmp_wpmapped = TRUE; PMAP_ENTER(kernel_pmap, m2->vmp_offset, m2, - VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr); + VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr); assert(kr == KERN_SUCCESS); compressed_pages++; - } else { - if (m2->vmp_reference) + if (m2->vmp_reference) { vm_page_activate(m2); - else + } else { vm_page_deactivate(m2); + } } PAGE_WAKEUP_DONE(m2); - } else { assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR); @@ -5858,7 +6054,6 @@ did_consider: } stolen_pages++; - } #if CONFIG_BACKGROUND_QUEUE vm_page_assign_background_state(m1); @@ -5896,21 +6091,22 @@ did_consider: wrapped = TRUE; } abort_run = FALSE; - + /* * We didn't find a contiguous range but we didn't * start from the very first page. * Start again from the very first page. */ RESET_STATE_OF_RUN(); - - if( flags & KMA_LOMEM) + + if (flags & KMA_LOMEM) { idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = page_idx; - else + } else { idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx; - + } + last_idx = page_idx; - + if (m != VM_PAGE_NULL) { vm_page_unlock_queues(); vm_page_free_list(m, FALSE); @@ -5921,33 +6117,34 @@ did_consider: lck_mtx_lock(&vm_page_queue_free_lock); /* - * reset our free page limit since we - * dropped the lock protecting the vm_page_free_queue - */ + * reset our free page limit since we + * dropped the lock protecting the vm_page_free_queue + */ free_available = vm_page_free_count - vm_page_free_reserved; goto retry; } for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) { - assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q); assert(m1->vmp_wire_count == 0); if (wire == TRUE) { m1->vmp_wire_count++; m1->vmp_q_state = VM_PAGE_IS_WIRED; - } else + } else { m1->vmp_gobbled = TRUE; + } } - if (wire == FALSE) + if (wire == FALSE) { vm_page_gobble_count += npages; + } /* * gobbled pages are also counted as wired pages */ vm_page_wire_count += npages; - assert(vm_page_verify_contiguous(m, npages)); + assert(vm_page_verify_contiguous(m, npages)); } done_scanning: PAGE_REPLACEMENT_ALLOWED(FALSE); @@ -5969,9 +6166,9 @@ done_scanning: } if (vm_page_find_contig_debug) { printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n", - __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, - (long)tv_end_sec, tv_end_usec, orig_last_idx, - scanned, yielded, dumped_run, stolen_pages, compressed_pages); + __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, + (long)tv_end_sec, tv_end_usec, orig_last_idx, + scanned, yielded, dumped_run, stolen_pages, compressed_pages); } #endif @@ -5980,8 +6177,8 @@ done_scanning: #endif if (m == NULL && zone_gc_called == FALSE) { printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n", - __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, - scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count); + __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT, + scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count); if (consider_buffer_cache_collect != NULL) { (void)(*consider_buffer_cache_collect)(1); @@ -6003,18 +6200,19 @@ done_scanning: */ kern_return_t cpm_allocate( - vm_size_t size, - vm_page_t *list, - ppnum_t max_pnum, - ppnum_t pnum_mask, - boolean_t wire, - int flags) + vm_size_t size, + vm_page_t *list, + ppnum_t max_pnum, + ppnum_t pnum_mask, + boolean_t wire, + int flags) { - vm_page_t pages; - unsigned int npages; + vm_page_t pages; + unsigned int npages; - if (size % PAGE_SIZE != 0) + if (size % PAGE_SIZE != 0) { return KERN_INVALID_ARGUMENT; + } npages = (unsigned int) (size / PAGE_SIZE); if (npages != size / PAGE_SIZE) { @@ -6029,16 +6227,18 @@ cpm_allocate( */ pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags); - if (pages == VM_PAGE_NULL) + if (pages == VM_PAGE_NULL) { return KERN_NO_SPACE; + } /* * determine need for wakeups */ - if (vm_page_free_count < vm_page_free_min) - thread_wakeup((event_t) &vm_page_free_wanted); - + if (vm_page_free_count < vm_page_free_min) { + thread_wakeup((event_t) &vm_page_free_wanted); + } + VM_CHECK_MEMORYSTATUS; - + /* * The CPM pages should now be available and * ordered by ascending physical address. @@ -6053,7 +6253,7 @@ cpm_allocate( unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT; /* - * when working on a 'run' of pages, it is necessary to hold + * when working on a 'run' of pages, it is necessary to hold * the vm_page_queue_lock (a hot global lock) for certain operations * on the page... however, the majority of the work can be done * while merely holding the object lock... in fact there are certain @@ -6073,14 +6273,14 @@ unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT; void vm_page_do_delayed_work( - vm_object_t object, + vm_object_t object, vm_tag_t tag, struct vm_page_delayed_work *dwp, - int dw_count) + int dw_count) { - int j; - vm_page_t m; - vm_page_t local_free_q = VM_PAGE_NULL; + int j; + vm_page_t m; + vm_page_t local_free_q = VM_PAGE_NULL; /* * pageout_scan takes the vm_page_lock_queues first @@ -6100,29 +6300,31 @@ vm_page_do_delayed_work( vm_page_lockspin_queues(); - for (j = 0; ; j++) { + for (j = 0;; j++) { if (!vm_object_lock_avoid(object) && - _vm_object_lock_try(object)) + _vm_object_lock_try(object)) { break; + } vm_page_unlock_queues(); mutex_pause(j); vm_page_lockspin_queues(); } } for (j = 0; j < dw_count; j++, dwp++) { - m = dwp->dw_m; - if (dwp->dw_mask & DW_vm_pageout_throttle_up) + if (dwp->dw_mask & DW_vm_pageout_throttle_up) { vm_pageout_throttle_up(m); + } #if CONFIG_PHANTOM_CACHE - if (dwp->dw_mask & DW_vm_phantom_cache_update) + if (dwp->dw_mask & DW_vm_phantom_cache_update) { vm_phantom_cache_update(m); + } #endif - if (dwp->dw_mask & DW_vm_page_wire) + if (dwp->dw_mask & DW_vm_page_wire) { vm_page_wire(m, tag, FALSE); - else if (dwp->dw_mask & DW_vm_page_unwire) { - boolean_t queueit; + } else if (dwp->dw_mask & DW_vm_page_unwire) { + boolean_t queueit; queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE; @@ -6139,16 +6341,15 @@ vm_page_do_delayed_work( m->vmp_snext = local_free_q; local_free_q = m; } else { - if (dwp->dw_mask & DW_vm_page_deactivate_internal) + if (dwp->dw_mask & DW_vm_page_deactivate_internal) { vm_page_deactivate_internal(m, FALSE); - else if (dwp->dw_mask & DW_vm_page_activate) { + } else if (dwp->dw_mask & DW_vm_page_activate) { if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) { vm_page_activate(m); } - } - else if (dwp->dw_mask & DW_vm_page_speculate) + } else if (dwp->dw_mask & DW_vm_page_speculate) { vm_page_speculate(m, TRUE); - else if (dwp->dw_mask & DW_enqueue_cleaned) { + } else if (dwp->dw_mask & DW_enqueue_cleaned) { /* * if we didn't hold the object lock and did this, * we might disconnect the page, then someone might @@ -6163,26 +6364,28 @@ vm_page_do_delayed_work( * this page has been touched since it got cleaned; let's activate it * if it hasn't already been */ - VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1); + VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1); VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1); - if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) + if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) { vm_page_activate(m); + } } else { m->vmp_reference = FALSE; vm_page_enqueue_cleaned(m); } - } - else if (dwp->dw_mask & DW_vm_page_lru) + } else if (dwp->dw_mask & DW_vm_page_lru) { vm_page_lru(m); - else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) { - if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) + } else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) { + if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) { vm_page_queues_remove(m, TRUE); + } } - if (dwp->dw_mask & DW_set_reference) + if (dwp->dw_mask & DW_set_reference) { m->vmp_reference = TRUE; - else if (dwp->dw_mask & DW_clear_reference) + } else if (dwp->dw_mask & DW_clear_reference) { m->vmp_reference = FALSE; + } if (dwp->dw_mask & DW_move_page) { if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) { @@ -6193,53 +6396,56 @@ vm_page_do_delayed_work( vm_page_enqueue_inactive(m, FALSE); } } - if (dwp->dw_mask & DW_clear_busy) + if (dwp->dw_mask & DW_clear_busy) { m->vmp_busy = FALSE; + } - if (dwp->dw_mask & DW_PAGE_WAKEUP) + if (dwp->dw_mask & DW_PAGE_WAKEUP) { PAGE_WAKEUP(m); + } } } vm_page_unlock_queues(); - if (local_free_q) + if (local_free_q) { vm_page_free_list(local_free_q, TRUE); - - VM_CHECK_MEMORYSTATUS; + } + VM_CHECK_MEMORYSTATUS; } kern_return_t vm_page_alloc_list( - int page_count, - int flags, + int page_count, + int flags, vm_page_t *list) { - vm_page_t lo_page_list = VM_PAGE_NULL; - vm_page_t mem; - int i; + vm_page_t lo_page_list = VM_PAGE_NULL; + vm_page_t mem; + int i; - if ( !(flags & KMA_LOMEM)) + if (!(flags & KMA_LOMEM)) { panic("vm_page_alloc_list: called w/o KMA_LOMEM"); + } for (i = 0; i < page_count; i++) { - mem = vm_page_grablo(); if (mem == VM_PAGE_NULL) { - if (lo_page_list) + if (lo_page_list) { vm_page_free_list(lo_page_list, FALSE); + } *list = VM_PAGE_NULL; - return (KERN_RESOURCE_SHORTAGE); + return KERN_RESOURCE_SHORTAGE; } mem->vmp_snext = lo_page_list; lo_page_list = mem; } *list = lo_page_list; - return (KERN_SUCCESS); + return KERN_SUCCESS; } void @@ -6251,22 +6457,22 @@ vm_page_set_offset(vm_page_t page, vm_object_offset_t offset) vm_page_t vm_page_get_next(vm_page_t page) { - return (page->vmp_snext); + return page->vmp_snext; } vm_object_offset_t vm_page_get_offset(vm_page_t page) { - return (page->vmp_offset); + return page->vmp_offset; } ppnum_t vm_page_get_phys_page(vm_page_t page) { - return (VM_PAGE_GET_PHYS_PAGE(page)); + return VM_PAGE_GET_PHYS_PAGE(page); } - - + + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #if HIBERNATION @@ -6281,12 +6487,12 @@ void hibernate_flush_wait(void); void hibernate_mark_in_progress(void); void hibernate_clear_in_progress(void); -void hibernate_free_range(int, int); -void hibernate_hash_insert_page(vm_page_t); -uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *); -void hibernate_rebuild_vm_structs(void); -uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *); -ppnum_t hibernate_lookup_paddr(unsigned int); +void hibernate_free_range(int, int); +void hibernate_hash_insert_page(vm_page_t); +uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *); +void hibernate_rebuild_vm_structs(void); +uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *); +ppnum_t hibernate_lookup_paddr(unsigned int); struct hibernate_statistics { int hibernate_considered; @@ -6327,21 +6533,20 @@ struct hibernate_statistics { * so that we don't overrun the estimated image size, which would * result in a hibernation failure. */ -#define HIBERNATE_XPMAPPED_LIMIT 40000 +#define HIBERNATE_XPMAPPED_LIMIT 40000 static int hibernate_drain_pageout_queue(struct vm_pageout_queue *q) { - wait_result_t wait_result; + wait_result_t wait_result; vm_page_lock_queues(); - while ( !vm_page_queue_empty(&q->pgo_pending) ) { - + while (!vm_page_queue_empty(&q->pgo_pending)) { q->pgo_draining = TRUE; - assert_wait_timeout((event_t) (&q->pgo_laundry+1), THREAD_INTERRUPTIBLE, 5000, 1000*NSEC_PER_USEC); + assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC); vm_page_unlock_queues(); @@ -6349,11 +6554,12 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q) if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) { hibernate_stats.hibernate_drain_timeout++; - - if (q == &vm_pageout_queue_external) - return (0); - - return (1); + + if (q == &vm_pageout_queue_external) { + return 0; + } + + return 1; } vm_page_lock_queues(); @@ -6361,7 +6567,7 @@ hibernate_drain_pageout_queue(struct vm_pageout_queue *q) } vm_page_unlock_queues(); - return (0); + return 0; } @@ -6370,19 +6576,19 @@ boolean_t hibernate_skip_external = FALSE; static int hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) { - vm_page_t m; - vm_object_t l_object = NULL; - vm_object_t m_object = NULL; - int refmod_state = 0; - int try_failed_count = 0; - int retval = 0; - int current_run = 0; - struct vm_pageout_queue *iq; - struct vm_pageout_queue *eq; - struct vm_pageout_queue *tq; + vm_page_t m; + vm_object_t l_object = NULL; + vm_object_t m_object = NULL; + int refmod_state = 0; + int try_failed_count = 0; + int retval = 0; + int current_run = 0; + struct vm_pageout_queue *iq; + struct vm_pageout_queue *eq; + struct vm_pageout_queue *tq; KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(q), qcount); + VM_KERNEL_UNSLIDE_OR_PERM(q), qcount); iq = &vm_pageout_queue_internal; eq = &vm_pageout_queue_external; @@ -6390,7 +6596,6 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) vm_page_lock_queues(); while (qcount && !vm_page_queue_empty(q)) { - if (current_run++ == 1000) { if (hibernate_should_abort()) { retval = 1; @@ -6408,13 +6613,13 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) * already got the lock */ if (m_object != l_object) { - /* - * the object associated with candidate page is + /* + * the object associated with candidate page is * different from the one we were just working * with... dump the lock if we still own it */ - if (l_object != NULL) { - vm_object_unlock(l_object); + if (l_object != NULL) { + vm_object_unlock(l_object); l_object = NULL; } /* @@ -6422,10 +6627,9 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) * page queues lock, we can only 'try' for this one. * if the 'try' fails, we need to do a mutex_pause * to allow the owner of the object lock a chance to - * run... + * run... */ - if ( !vm_object_lock_try_scan(m_object)) { - + if (!vm_object_lock_try_scan(m_object)) { if (try_failed_count > 20) { hibernate_stats.hibernate_queue_nolock++; @@ -6442,15 +6646,16 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) l_object = m_object; } } - if ( !m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error) { + if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error) { /* * page is not to be cleaned * put it back on the head of its queue */ - if (m->vmp_cleaning) + if (m->vmp_cleaning) { hibernate_stats.hibernate_skipped_cleaning++; - else + } else { hibernate_stats.hibernate_skipped_transient++; + } goto reenter_pg_on_q; } @@ -6463,54 +6668,56 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) goto reenter_pg_on_q; } } - if ( !m->vmp_dirty && m->vmp_pmapped) { - refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); + if (!m->vmp_dirty && m->vmp_pmapped) { + refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); if ((refmod_state & VM_MEM_MODIFIED)) { SET_PAGE_DIRTY(m, FALSE); } - } else + } else { refmod_state = 0; + } - if ( !m->vmp_dirty) { + if (!m->vmp_dirty) { /* * page is not to be cleaned * put it back on the head of its queue */ - if (m->vmp_precious) + if (m->vmp_precious) { hibernate_stats.hibernate_skipped_precious++; + } goto reenter_pg_on_q; } if (hibernate_skip_external == TRUE && !m_object->internal) { - hibernate_stats.hibernate_skipped_external++; - + goto reenter_pg_on_q; } tq = NULL; if (m_object->internal) { - if (VM_PAGE_Q_THROTTLED(iq)) + if (VM_PAGE_Q_THROTTLED(iq)) { tq = iq; - } else if (VM_PAGE_Q_THROTTLED(eq)) + } + } else if (VM_PAGE_Q_THROTTLED(eq)) { tq = eq; + } if (tq != NULL) { - wait_result_t wait_result; - int wait_count = 5; + wait_result_t wait_result; + int wait_count = 5; - if (l_object != NULL) { - vm_object_unlock(l_object); + if (l_object != NULL) { + vm_object_unlock(l_object); l_object = NULL; } while (retval == 0) { - tq->pgo_throttled = TRUE; - assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000*NSEC_PER_USEC); + assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC); vm_page_unlock_queues(); @@ -6518,16 +6725,18 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) vm_page_lock_queues(); - if (wait_result != THREAD_TIMED_OUT) + if (wait_result != THREAD_TIMED_OUT) { + break; + } + if (!VM_PAGE_Q_THROTTLED(tq)) { break; - if (!VM_PAGE_Q_THROTTLED(tq)) - break; + } - if (hibernate_should_abort()) + if (hibernate_should_abort()) { retval = 1; + } if (--wait_count == 0) { - hibernate_stats.hibernate_throttle_timeout++; if (tq == eq) { @@ -6537,8 +6746,9 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) retval = 1; } } - if (retval) + if (retval) { break; + } hibernate_stats.hibernate_throttled++; @@ -6551,8 +6761,9 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) */ vm_page_queues_remove(m, TRUE); - if (m_object->internal == TRUE) + if (m_object->internal == TRUE) { pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL); + } vm_pageout_cluster(m); @@ -6561,8 +6772,8 @@ hibernate_flush_queue(vm_page_queue_head_t *q, int qcount) goto next_pg; reenter_pg_on_q: - vm_page_queue_remove(q, m, vm_page_t, vmp_pageq); - vm_page_queue_enter(q, m, vm_page_t, vmp_pageq); + vm_page_queue_remove(q, m, vmp_pageq); + vm_page_queue_enter(q, m, vmp_pageq); hibernate_stats.hibernate_reentered_on_q++; next_pg: @@ -6580,77 +6791,85 @@ next_pg: KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0); - return (retval); + return retval; } static int hibernate_flush_dirty_pages(int pass) { - struct vm_speculative_age_q *aq; - uint32_t i; + struct vm_speculative_age_q *aq; + uint32_t i; if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) + for (i = 0; i < vm_page_local_q_count; i++) { vm_page_reactivate_local(i, TRUE, FALSE); + } } for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) { - int qcount; - vm_page_t m; + int qcount; + vm_page_t m; aq = &vm_page_queue_speculative[i]; - if (vm_page_queue_empty(&aq->age_q)) + if (vm_page_queue_empty(&aq->age_q)) { continue; + } qcount = 0; vm_page_lockspin_queues(); - vm_page_queue_iterate(&aq->age_q, - m, - vm_page_t, - vmp_pageq) - { + vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) { qcount++; } vm_page_unlock_queues(); if (qcount) { - if (hibernate_flush_queue(&aq->age_q, qcount)) - return (1); + if (hibernate_flush_queue(&aq->age_q, qcount)) { + return 1; + } } } - if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) - return (1); + if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) { + return 1; + } /* XXX FBDP TODO: flush secluded queue */ - if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) - return (1); - if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) - return (1); - if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) - return (1); - - if (pass == 1) + if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) { + return 1; + } + if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) { + return 1; + } + if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) { + return 1; + } + + if (pass == 1) { vm_compressor_record_warmup_start(); + } if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) { - if (pass == 1) + if (pass == 1) { vm_compressor_record_warmup_end(); - return (1); + } + return 1; } if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) { - if (pass == 1) + if (pass == 1) { vm_compressor_record_warmup_end(); - return (1); + } + return 1; } - if (pass == 1) + if (pass == 1) { vm_compressor_record_warmup_end(); + } - if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) - return (1); + if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) { + return 1; + } - return (0); + return 0; } @@ -6664,7 +6883,7 @@ hibernate_reset_stats() int hibernate_flush_memory() { - int retval; + int retval; assert(VM_CONFIG_COMPRESSOR_IS_PRESENT); @@ -6674,7 +6893,6 @@ hibernate_flush_memory() hibernate_skip_external = FALSE; if ((retval = hibernate_flush_dirty_pages(1)) == 0) { - KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0); vm_compressor_flush(); @@ -6686,7 +6904,7 @@ hibernate_flush_memory() KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0); orig_wire_count = vm_page_wire_count; - + (void)(*consider_buffer_cache_collect)(1); consider_zone_gc(FALSE); @@ -6699,831 +6917,861 @@ hibernate_flush_memory() KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0); - if (retval) + if (retval) { HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT); + } - HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n", - hibernate_stats.hibernate_considered, - hibernate_stats.hibernate_reentered_on_q, - hibernate_stats.hibernate_found_dirty); - HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n", - hibernate_stats.hibernate_skipped_cleaning, - hibernate_stats.hibernate_skipped_transient, - hibernate_stats.hibernate_skipped_precious, - hibernate_stats.hibernate_skipped_external, - hibernate_stats.hibernate_queue_nolock); - HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n", - hibernate_stats.hibernate_queue_paused, - hibernate_stats.hibernate_throttled, - hibernate_stats.hibernate_throttle_timeout, - hibernate_stats.hibernate_drained, - hibernate_stats.hibernate_drain_timeout); + HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n", + hibernate_stats.hibernate_considered, + hibernate_stats.hibernate_reentered_on_q, + hibernate_stats.hibernate_found_dirty); + HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n", + hibernate_stats.hibernate_skipped_cleaning, + hibernate_stats.hibernate_skipped_transient, + hibernate_stats.hibernate_skipped_precious, + hibernate_stats.hibernate_skipped_external, + hibernate_stats.hibernate_queue_nolock); + HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n", + hibernate_stats.hibernate_queue_paused, + hibernate_stats.hibernate_throttled, + hibernate_stats.hibernate_throttle_timeout, + hibernate_stats.hibernate_drained, + hibernate_stats.hibernate_drain_timeout); - return (retval); + return retval; } static void hibernate_page_list_zero(hibernate_page_list_t *list) { - uint32_t bank; - hibernate_bitmap_t * bitmap; - - bitmap = &list->bank_bitmap[0]; - for (bank = 0; bank < list->bank_count; bank++) - { - uint32_t last_bit; - - bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); - // set out-of-bound bits at end of bitmap. - last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31); - if (last_bit) - bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit); + uint32_t bank; + hibernate_bitmap_t * bitmap; + + bitmap = &list->bank_bitmap[0]; + for (bank = 0; bank < list->bank_count; bank++) { + uint32_t last_bit; + + bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2); + // set out-of-bound bits at end of bitmap. + last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31); + if (last_bit) { + bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit); + } - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; - } + bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords]; + } } void hibernate_free_gobble_pages(void) { - vm_page_t m, next; - uint32_t count = 0; + vm_page_t m, next; + uint32_t count = 0; + + m = (vm_page_t) hibernate_gobble_queue; + while (m) { + next = m->vmp_snext; + vm_page_free(m); + count++; + m = next; + } + hibernate_gobble_queue = VM_PAGE_NULL; - m = (vm_page_t) hibernate_gobble_queue; - while(m) - { - next = m->vmp_snext; - vm_page_free(m); - count++; - m = next; - } - hibernate_gobble_queue = VM_PAGE_NULL; - - if (count) - HIBLOG("Freed %d pages\n", count); + if (count) { + HIBLOG("Freed %d pages\n", count); + } } -static boolean_t +static boolean_t hibernate_consider_discard(vm_page_t m, boolean_t preflight) { - vm_object_t object = NULL; - int refmod_state; - boolean_t discard = FALSE; + vm_object_t object = NULL; + int refmod_state; + boolean_t discard = FALSE; - do - { - if (m->vmp_private) - panic("hibernate_consider_discard: private"); + do{ + if (m->vmp_private) { + panic("hibernate_consider_discard: private"); + } - object = VM_PAGE_OBJECT(m); + object = VM_PAGE_OBJECT(m); - if (!vm_object_lock_try(object)) { - object = NULL; - if (!preflight) hibernate_stats.cd_lock_failed++; - break; - } - if (VM_PAGE_WIRED(m)) { - if (!preflight) hibernate_stats.cd_found_wired++; - break; - } - if (m->vmp_precious) { - if (!preflight) hibernate_stats.cd_found_precious++; - break; - } - if (m->vmp_busy || !object->alive) { - /* - * Somebody is playing with this page. - */ - if (!preflight) hibernate_stats.cd_found_busy++; - break; - } - if (m->vmp_absent || m->vmp_unusual || m->vmp_error) { - /* - * If it's unusual in anyway, ignore it - */ - if (!preflight) hibernate_stats.cd_found_unusual++; - break; - } - if (m->vmp_cleaning) { - if (!preflight) hibernate_stats.cd_found_cleaning++; - break; - } - if (m->vmp_laundry) { - if (!preflight) hibernate_stats.cd_found_laundry++; - break; - } - if (!m->vmp_dirty) - { - refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); - - if (refmod_state & VM_MEM_REFERENCED) - m->vmp_reference = TRUE; - if (refmod_state & VM_MEM_MODIFIED) { - SET_PAGE_DIRTY(m, FALSE); - } - } - - /* - * If it's clean or purgeable we can discard the page on wakeup. - */ - discard = (!m->vmp_dirty) + if (!vm_object_lock_try(object)) { + object = NULL; + if (!preflight) { + hibernate_stats.cd_lock_failed++; + } + break; + } + if (VM_PAGE_WIRED(m)) { + if (!preflight) { + hibernate_stats.cd_found_wired++; + } + break; + } + if (m->vmp_precious) { + if (!preflight) { + hibernate_stats.cd_found_precious++; + } + break; + } + if (m->vmp_busy || !object->alive) { + /* + * Somebody is playing with this page. + */ + if (!preflight) { + hibernate_stats.cd_found_busy++; + } + break; + } + if (m->vmp_absent || m->vmp_unusual || m->vmp_error) { + /* + * If it's unusual in anyway, ignore it + */ + if (!preflight) { + hibernate_stats.cd_found_unusual++; + } + break; + } + if (m->vmp_cleaning) { + if (!preflight) { + hibernate_stats.cd_found_cleaning++; + } + break; + } + if (m->vmp_laundry) { + if (!preflight) { + hibernate_stats.cd_found_laundry++; + } + break; + } + if (!m->vmp_dirty) { + refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m)); + + if (refmod_state & VM_MEM_REFERENCED) { + m->vmp_reference = TRUE; + } + if (refmod_state & VM_MEM_MODIFIED) { + SET_PAGE_DIRTY(m, FALSE); + } + } + + /* + * If it's clean or purgeable we can discard the page on wakeup. + */ + discard = (!m->vmp_dirty) || (VM_PURGABLE_VOLATILE == object->purgable) - || (VM_PURGABLE_EMPTY == object->purgable); + || (VM_PURGABLE_EMPTY == object->purgable); - if (discard == FALSE) { - if (!preflight) - hibernate_stats.cd_found_dirty++; - } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) { - if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) { - if (!preflight) - hibernate_stats.cd_found_xpmapped++; - discard = FALSE; - } else { - if (!preflight) - hibernate_stats.cd_skipped_xpmapped++; + if (discard == FALSE) { + if (!preflight) { + hibernate_stats.cd_found_dirty++; + } + } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) { + if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) { + if (!preflight) { + hibernate_stats.cd_found_xpmapped++; + } + discard = FALSE; + } else { + if (!preflight) { + hibernate_stats.cd_skipped_xpmapped++; + } + } } - } - } - while (FALSE); + }while (FALSE); - if (object) - vm_object_unlock(object); + if (object) { + vm_object_unlock(object); + } - return (discard); + return discard; } static void hibernate_discard_page(vm_page_t m) { - vm_object_t m_object; + vm_object_t m_object; - if (m->vmp_absent || m->vmp_unusual || m->vmp_error) - /* - * If it's unusual in anyway, ignore - */ - return; + if (m->vmp_absent || m->vmp_unusual || m->vmp_error) { + /* + * If it's unusual in anyway, ignore + */ + return; + } - m_object = VM_PAGE_OBJECT(m); + m_object = VM_PAGE_OBJECT(m); #if MACH_ASSERT || DEBUG - if (!vm_object_lock_try(m_object)) - panic("hibernate_discard_page(%p) !vm_object_lock_try", m); + if (!vm_object_lock_try(m_object)) { + panic("hibernate_discard_page(%p) !vm_object_lock_try", m); + } #else - /* No need to lock page queue for token delete, hibernate_vm_unlock() - makes sure these locks are uncontended before sleep */ + /* No need to lock page queue for token delete, hibernate_vm_unlock() + * makes sure these locks are uncontended before sleep */ #endif /* MACH_ASSERT || DEBUG */ - if (m->vmp_pmapped == TRUE) - { - __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); - } - - if (m->vmp_laundry) - panic("hibernate_discard_page(%p) laundry", m); - if (m->vmp_private) - panic("hibernate_discard_page(%p) private", m); - if (m->vmp_fictitious) - panic("hibernate_discard_page(%p) fictitious", m); - - if (VM_PURGABLE_VOLATILE == m_object->purgable) - { - /* object should be on a queue */ - assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL)); - purgeable_q_t old_queue = vm_purgeable_object_remove(m_object); - assert(old_queue); - if (m_object->purgeable_when_ripe) { - vm_purgeable_token_delete_first(old_queue); + if (m->vmp_pmapped == TRUE) { + __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); } - vm_object_lock_assert_exclusive(m_object); - m_object->purgable = VM_PURGABLE_EMPTY; - /* - * Purgeable ledgers: pages of VOLATILE and EMPTY objects are - * accounted in the "volatile" ledger, so no change here. - * We have to update vm_page_purgeable_count, though, since we're - * effectively purging this object. - */ - unsigned int delta; - assert(m_object->resident_page_count >= m_object->wired_page_count); - delta = (m_object->resident_page_count - m_object->wired_page_count); - assert(vm_page_purgeable_count >= delta); - assert(delta > 0); - OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count); - } - - vm_page_free(m); + if (m->vmp_laundry) { + panic("hibernate_discard_page(%p) laundry", m); + } + if (m->vmp_private) { + panic("hibernate_discard_page(%p) private", m); + } + if (m->vmp_fictitious) { + panic("hibernate_discard_page(%p) fictitious", m); + } + + if (VM_PURGABLE_VOLATILE == m_object->purgable) { + /* object should be on a queue */ + assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL)); + purgeable_q_t old_queue = vm_purgeable_object_remove(m_object); + assert(old_queue); + if (m_object->purgeable_when_ripe) { + vm_purgeable_token_delete_first(old_queue); + } + vm_object_lock_assert_exclusive(m_object); + m_object->purgable = VM_PURGABLE_EMPTY; + + /* + * Purgeable ledgers: pages of VOLATILE and EMPTY objects are + * accounted in the "volatile" ledger, so no change here. + * We have to update vm_page_purgeable_count, though, since we're + * effectively purging this object. + */ + unsigned int delta; + assert(m_object->resident_page_count >= m_object->wired_page_count); + delta = (m_object->resident_page_count - m_object->wired_page_count); + assert(vm_page_purgeable_count >= delta); + assert(delta > 0); + OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count); + } + + vm_page_free(m); #if MACH_ASSERT || DEBUG - vm_object_unlock(m_object); -#endif /* MACH_ASSERT || DEBUG */ + vm_object_unlock(m_object); +#endif /* MACH_ASSERT || DEBUG */ } /* - Grab locks for hibernate_page_list_setall() -*/ + * Grab locks for hibernate_page_list_setall() + */ void hibernate_vm_lock_queues(void) { - vm_object_lock(compressor_object); - vm_page_lock_queues(); - lck_mtx_lock(&vm_page_queue_free_lock); - lck_mtx_lock(&vm_purgeable_queue_lock); + vm_object_lock(compressor_object); + vm_page_lock_queues(); + lck_mtx_lock(&vm_page_queue_free_lock); + lck_mtx_lock(&vm_purgeable_queue_lock); - if (vm_page_local_q) { - uint32_t i; - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; - VPL_LOCK(&lq->vpl_lock); + if (vm_page_local_q) { + uint32_t i; + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + } } - } } void hibernate_vm_unlock_queues(void) { - if (vm_page_local_q) { - uint32_t i; - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; - VPL_UNLOCK(&lq->vpl_lock); + if (vm_page_local_q) { + uint32_t i; + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_UNLOCK(&lq->vpl_lock); + } } - } - lck_mtx_unlock(&vm_purgeable_queue_lock); - lck_mtx_unlock(&vm_page_queue_free_lock); - vm_page_unlock_queues(); - vm_object_unlock(compressor_object); + lck_mtx_unlock(&vm_purgeable_queue_lock); + lck_mtx_unlock(&vm_page_queue_free_lock); + vm_page_unlock_queues(); + vm_object_unlock(compressor_object); } /* - Bits zero in the bitmaps => page needs to be saved. All pages default to be saved, - pages known to VM to not need saving are subtracted. - Wired pages to be saved are present in page_list_wired, pageable in page_list. -*/ + * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved, + * pages known to VM to not need saving are subtracted. + * Wired pages to be saved are present in page_list_wired, pageable in page_list. + */ void hibernate_page_list_setall(hibernate_page_list_t * page_list, - hibernate_page_list_t * page_list_wired, - hibernate_page_list_t * page_list_pal, - boolean_t preflight, - boolean_t will_discard, - uint32_t * pagesOut) -{ - uint64_t start, end, nsec; - vm_page_t m; - vm_page_t next; - uint32_t pages = page_list->page_count; - uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0; - uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0; - uint32_t count_wire = pages; - uint32_t count_discard_active = 0; - uint32_t count_discard_inactive = 0; - uint32_t count_discard_cleaned = 0; - uint32_t count_discard_purgeable = 0; - uint32_t count_discard_speculative = 0; - uint32_t count_discard_vm_struct_pages = 0; - uint32_t i; - uint32_t bank; - hibernate_bitmap_t * bitmap; - hibernate_bitmap_t * bitmap_wired; - boolean_t discard_all; - boolean_t discard; - - HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight); - - if (preflight) { - page_list = NULL; - page_list_wired = NULL; - page_list_pal = NULL; + hibernate_page_list_t * page_list_wired, + hibernate_page_list_t * page_list_pal, + boolean_t preflight, + boolean_t will_discard, + uint32_t * pagesOut) +{ + uint64_t start, end, nsec; + vm_page_t m; + vm_page_t next; + uint32_t pages = page_list->page_count; + uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0; + uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0; + uint32_t count_wire = pages; + uint32_t count_discard_active = 0; + uint32_t count_discard_inactive = 0; + uint32_t count_discard_cleaned = 0; + uint32_t count_discard_purgeable = 0; + uint32_t count_discard_speculative = 0; + uint32_t count_discard_vm_struct_pages = 0; + uint32_t i; + uint32_t bank; + hibernate_bitmap_t * bitmap; + hibernate_bitmap_t * bitmap_wired; + boolean_t discard_all; + boolean_t discard; + + HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight); + + if (preflight) { + page_list = NULL; + page_list_wired = NULL; + page_list_pal = NULL; discard_all = FALSE; - } else { + } else { discard_all = will_discard; - } + } #if MACH_ASSERT || DEBUG - if (!preflight) - { - assert(hibernate_vm_locks_are_safe()); - vm_page_lock_queues(); + if (!preflight) { + assert(hibernate_vm_locks_are_safe()); + vm_page_lock_queues(); + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + } + } + } +#endif /* MACH_ASSERT || DEBUG */ + + + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0); + + clock_get_uptime(&start); + + if (!preflight) { + hibernate_page_list_zero(page_list); + hibernate_page_list_zero(page_list_wired); + hibernate_page_list_zero(page_list_pal); + + hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count; + hibernate_stats.cd_pages = pages; + } + if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; - VPL_LOCK(&lq->vpl_lock); - } + for (i = 0; i < vm_page_local_q_count; i++) { + vm_page_reactivate_local(i, TRUE, !preflight); + } + } + + if (preflight) { + vm_object_lock(compressor_object); + vm_page_lock_queues(); + lck_mtx_lock(&vm_page_queue_free_lock); + } + + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + + hibernation_vmqueues_inspection = TRUE; + + m = (vm_page_t) hibernate_gobble_queue; + while (m) { + pages--; + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + m = m->vmp_snext; + } + + if (!preflight) { + for (i = 0; i < real_ncpus; i++) { + if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor) { + for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = m->vmp_snext) { + assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); + + pages--; + count_wire--; + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + + hibernate_stats.cd_local_free++; + hibernate_stats.cd_total_free++; + } + } + } + } + + for (i = 0; i < vm_colors; i++) { + vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) { + assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q); + + pages--; + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + + hibernate_stats.cd_total_free++; + } + } + } + + vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) { + assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q); + + pages--; + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + + hibernate_stats.cd_total_free++; + } + } + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled); + while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q); + + next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) + && hibernate_consider_discard(m, preflight)) { + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + count_discard_inactive++; + discard = discard_all; + } else { + count_throttled++; + } + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + + if (discard) { + hibernate_discard_page(m); + } + m = next; + } + + m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous); + while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); + + next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) && + hibernate_consider_discard(m, preflight)) { + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_inactive++; + } + discard = discard_all; + } else { + count_anonymous++; + } + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (discard) { + hibernate_discard_page(m); + } + m = next; + } + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); + while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); + + next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) && + hibernate_consider_discard(m, preflight)) { + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_cleaned++; + } + discard = discard_all; + } else { + count_cleaned++; + } + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (discard) { + hibernate_discard_page(m); + } + m = next; + } + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); + while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q); + + next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + discard = FALSE; + if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) && + hibernate_consider_discard(m, preflight)) { + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_active++; + } + discard = discard_all; + } else { + count_active++; + } + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (discard) { + hibernate_discard_page(m); + } + m = next; + } + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); + while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); + + next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) && + hibernate_consider_discard(m, preflight)) { + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_inactive++; + } + discard = discard_all; + } else { + count_inactive++; + } + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (discard) { + hibernate_discard_page(m); + } + m = next; + } + /* XXX FBDP TODO: secluded queue */ + + for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) { + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q); + while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) { + assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q, + "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)", + m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight); + + next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + discard = FALSE; + if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) && + hibernate_consider_discard(m, preflight)) { + if (!preflight) { + hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + count_discard_speculative++; + discard = discard_all; + } else { + count_speculative++; + } + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + if (discard) { + hibernate_discard_page(m); + } + m = next; + } } - } -#endif /* MACH_ASSERT || DEBUG */ + vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) { + assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR); - KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0); - - clock_get_uptime(&start); + count_compressor++; + count_wire--; + if (!preflight) { + hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + } + } - if (!preflight) { - hibernate_page_list_zero(page_list); - hibernate_page_list_zero(page_list_wired); - hibernate_page_list_zero(page_list_pal); - - hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count; - hibernate_stats.cd_pages = pages; - } + if (preflight == FALSE && discard_all == TRUE) { + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START); - if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) - vm_page_reactivate_local(i, TRUE, !preflight); - } + HIBLOG("hibernate_teardown started\n"); + count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired); + HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages); - if (preflight) { - vm_object_lock(compressor_object); - vm_page_lock_queues(); - lck_mtx_lock(&vm_page_queue_free_lock); - } + pages -= count_discard_vm_struct_pages; + count_wire -= count_discard_vm_struct_pages; - LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); + hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages; - hibernation_vmqueues_inspection = TRUE; + KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END); + } - m = (vm_page_t) hibernate_gobble_queue; - while (m) - { - pages--; - count_wire--; if (!preflight) { - hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); + // pull wired from hibernate_bitmap + bitmap = &page_list->bank_bitmap[0]; + bitmap_wired = &page_list_wired->bank_bitmap[0]; + for (bank = 0; bank < page_list->bank_count; bank++) { + for (i = 0; i < bitmap->bitmapwords; i++) { + bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; + } + bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords]; + bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; + } } - m = m->vmp_snext; - } - - if (!preflight) for( i = 0; i < real_ncpus; i++ ) - { - if (cpu_data_ptr[i] && cpu_data_ptr[i]->cpu_processor) - { - for (m = PROCESSOR_DATA(cpu_data_ptr[i]->cpu_processor, free_pages); m; m = m->vmp_snext) - { - assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q); - pages--; - count_wire--; - hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - - hibernate_stats.cd_local_free++; - hibernate_stats.cd_total_free++; - } - } - } + // machine dependent adjustments + hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages); - for( i = 0; i < vm_colors; i++ ) - { - vm_page_queue_iterate(&vm_page_queue_free[i].qhead, - m, - vm_page_t, - vmp_pageq) - { - assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q); - - pages--; - count_wire--; - if (!preflight) { - hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - - hibernate_stats.cd_total_free++; - } - } - } - - vm_page_queue_iterate(&vm_lopage_queue_free, - m, - vm_page_t, - vmp_pageq) - { - assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q); - - pages--; - count_wire--; if (!preflight) { - hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - - hibernate_stats.cd_total_free++; - } - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled); - while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q); - - next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - discard = FALSE; - if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - count_discard_inactive++; - discard = discard_all; - } - else - count_throttled++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - - if (discard) hibernate_discard_page(m); - m = next; - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); - while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); - - next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - discard = FALSE; - if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_inactive++; - discard = discard_all; - } - else - count_anonymous++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (discard) hibernate_discard_page(m); - m = next; - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); - while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); - - next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - discard = FALSE; - if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_cleaned++; - discard = discard_all; - } - else - count_cleaned++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (discard) hibernate_discard_page(m); - m = next; - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); - while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q); - - next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - discard = FALSE; - if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_active++; - discard = discard_all; - } - else - count_active++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (discard) hibernate_discard_page(m); - m = next; - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); - while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); - - next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - discard = FALSE; - if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_inactive++; - discard = discard_all; - } - else - count_inactive++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (discard) hibernate_discard_page(m); - m = next; - } - /* XXX FBDP TODO: secluded queue */ - - for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) - { - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q); - while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); - assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q, - "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)", - m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight); - - next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - discard = FALSE; - if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) - && hibernate_consider_discard(m, preflight)) - { - if (!preflight) hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - count_discard_speculative++; - discard = discard_all; - } - else - count_speculative++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - if (discard) hibernate_discard_page(m); - m = next; - } - } - - vm_page_queue_iterate(&compressor_object->memq, m, vm_page_t, vmp_listq) - { - assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR); - - count_compressor++; - count_wire--; - if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m)); - } - - if (preflight == FALSE && discard_all == TRUE) { - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START); - - HIBLOG("hibernate_teardown started\n"); - count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired); - HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages); - - pages -= count_discard_vm_struct_pages; - count_wire -= count_discard_vm_struct_pages; - - hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages; - - KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END); - } - - if (!preflight) { - // pull wired from hibernate_bitmap - bitmap = &page_list->bank_bitmap[0]; - bitmap_wired = &page_list_wired->bank_bitmap[0]; - for (bank = 0; bank < page_list->bank_count; bank++) - { - for (i = 0; i < bitmap->bitmapwords; i++) - bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i]; - bitmap = (hibernate_bitmap_t *) &bitmap->bitmap [bitmap->bitmapwords]; - bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords]; + hibernate_stats.cd_count_wire = count_wire; + hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable + + count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages; } - } - - // machine dependent adjustments - hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages); - if (!preflight) { - hibernate_stats.cd_count_wire = count_wire; - hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable + - count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages; - } + clock_get_uptime(&end); + absolutetime_to_nanoseconds(end - start, &nsec); + HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL); - clock_get_uptime(&end); - absolutetime_to_nanoseconds(end - start, &nsec); - HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL); + HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", + pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped, + discard_all ? "did" : "could", + count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); - HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d\n", - pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped, - discard_all ? "did" : "could", - count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); - - if (hibernate_stats.cd_skipped_xpmapped) - HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped); + if (hibernate_stats.cd_skipped_xpmapped) { + HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped); + } - *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned; + *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned; - if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active; + if (preflight && will_discard) { + *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active; + } - hibernation_vmqueues_inspection = FALSE; + hibernation_vmqueues_inspection = FALSE; #if MACH_ASSERT || DEBUG - if (!preflight) - { - if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; - VPL_UNLOCK(&lq->vpl_lock); - } + if (!preflight) { + if (vm_page_local_q) { + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_UNLOCK(&lq->vpl_lock); + } + } + vm_page_unlock_queues(); } - vm_page_unlock_queues(); - } #endif /* MACH_ASSERT || DEBUG */ - if (preflight) { - lck_mtx_unlock(&vm_page_queue_free_lock); - vm_page_unlock_queues(); - vm_object_unlock(compressor_object); - } + if (preflight) { + lck_mtx_unlock(&vm_page_queue_free_lock); + vm_page_unlock_queues(); + vm_object_unlock(compressor_object); + } - KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0); + KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0); } void hibernate_page_list_discard(hibernate_page_list_t * page_list) { - uint64_t start, end, nsec; - vm_page_t m; - vm_page_t next; - uint32_t i; - uint32_t count_discard_active = 0; - uint32_t count_discard_inactive = 0; - uint32_t count_discard_purgeable = 0; - uint32_t count_discard_cleaned = 0; - uint32_t count_discard_speculative = 0; + uint64_t start, end, nsec; + vm_page_t m; + vm_page_t next; + uint32_t i; + uint32_t count_discard_active = 0; + uint32_t count_discard_inactive = 0; + uint32_t count_discard_purgeable = 0; + uint32_t count_discard_cleaned = 0; + uint32_t count_discard_speculative = 0; #if MACH_ASSERT || DEBUG - vm_page_lock_queues(); + vm_page_lock_queues(); if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; - VPL_LOCK(&lq->vpl_lock); - } + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_LOCK(&lq->vpl_lock); + } } #endif /* MACH_ASSERT || DEBUG */ - clock_get_uptime(&start); - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); - while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); - - next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) - { - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_inactive++; - hibernate_discard_page(m); - } - m = next; - } - - for( i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++ ) - { - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q); - while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); - - next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) - { - count_discard_speculative++; - hibernate_discard_page(m); - } - m = next; - } - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); - while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); - - next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) - { - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_inactive++; - hibernate_discard_page(m); - } - m = next; - } - /* XXX FBDP TODO: secluded queue */ - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); - while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q); - - next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) - { - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_active++; - hibernate_discard_page(m); - } - m = next; - } - - m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); - while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) - { - assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); - - next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); - if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) - { - if (m->vmp_dirty) - count_discard_purgeable++; - else - count_discard_cleaned++; - hibernate_discard_page(m); - } - m = next; - } + clock_get_uptime(&start); + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous); + while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q); + + next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) { + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_inactive++; + } + hibernate_discard_page(m); + } + m = next; + } + + for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) { + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q); + while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q); + + next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) { + count_discard_speculative++; + hibernate_discard_page(m); + } + m = next; + } + } + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive); + while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q); + + next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) { + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_inactive++; + } + hibernate_discard_page(m); + } + m = next; + } + /* XXX FBDP TODO: secluded queue */ + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active); + while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q); + + next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) { + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_active++; + } + hibernate_discard_page(m); + } + m = next; + } + + m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned); + while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) { + assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q); + + next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next); + if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) { + if (m->vmp_dirty) { + count_discard_purgeable++; + } else { + count_discard_cleaned++; + } + hibernate_discard_page(m); + } + m = next; + } #if MACH_ASSERT || DEBUG if (vm_page_local_q) { - for (i = 0; i < vm_page_local_q_count; i++) { - struct vpl *lq; - lq = &vm_page_local_q[i].vpl_un.vpl; - VPL_UNLOCK(&lq->vpl_lock); - } + for (i = 0; i < vm_page_local_q_count; i++) { + struct vpl *lq; + lq = &vm_page_local_q[i].vpl_un.vpl; + VPL_UNLOCK(&lq->vpl_lock); + } } - vm_page_unlock_queues(); + vm_page_unlock_queues(); #endif /* MACH_ASSERT || DEBUG */ - clock_get_uptime(&end); - absolutetime_to_nanoseconds(end - start, &nsec); - HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n", - nsec / 1000000ULL, - count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); + clock_get_uptime(&end); + absolutetime_to_nanoseconds(end - start, &nsec); + HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n", + nsec / 1000000ULL, + count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned); } boolean_t hibernate_paddr_map_inited = FALSE; -unsigned int hibernate_teardown_last_valid_compact_indx = -1; -vm_page_t hibernate_rebuild_hash_list = NULL; +unsigned int hibernate_teardown_last_valid_compact_indx = -1; +vm_page_t hibernate_rebuild_hash_list = NULL; -unsigned int hibernate_teardown_found_tabled_pages = 0; -unsigned int hibernate_teardown_found_created_pages = 0; -unsigned int hibernate_teardown_found_free_pages = 0; -unsigned int hibernate_teardown_vm_page_free_count; +unsigned int hibernate_teardown_found_tabled_pages = 0; +unsigned int hibernate_teardown_found_created_pages = 0; +unsigned int hibernate_teardown_found_free_pages = 0; +unsigned int hibernate_teardown_vm_page_free_count; struct ppnum_mapping { - struct ppnum_mapping *ppnm_next; - ppnum_t ppnm_base_paddr; - unsigned int ppnm_sindx; - unsigned int ppnm_eindx; + struct ppnum_mapping *ppnm_next; + ppnum_t ppnm_base_paddr; + unsigned int ppnm_sindx; + unsigned int ppnm_eindx; }; -struct ppnum_mapping *ppnm_head; -struct ppnum_mapping *ppnm_last_found = NULL; +struct ppnum_mapping *ppnm_head; +struct ppnum_mapping *ppnm_last_found = NULL; void -hibernate_create_paddr_map() +hibernate_create_paddr_map() { - unsigned int i; - ppnum_t next_ppnum_in_run = 0; + unsigned int i; + ppnum_t next_ppnum_in_run = 0; struct ppnum_mapping *ppnm = NULL; if (hibernate_paddr_map_inited == FALSE) { - for (i = 0; i < vm_pages_count; i++) { - - if (ppnm) + if (ppnm) { ppnm->ppnm_eindx = i; + } if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) { - ppnm = kalloc(sizeof(struct ppnum_mapping)); ppnm->ppnm_next = ppnm_head; @@ -7544,51 +7792,51 @@ ppnum_t hibernate_lookup_paddr(unsigned int indx) { struct ppnum_mapping *ppnm = NULL; - + ppnm = ppnm_last_found; if (ppnm) { - if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) + if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) { goto done; + } } for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) { - if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) { ppnm_last_found = ppnm; break; } } - if (ppnm == NULL) + if (ppnm == NULL) { panic("hibernate_lookup_paddr of %d failed\n", indx); + } done: - return (ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx)); + return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx); } uint32_t hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired) { - addr64_t saddr_aligned; - addr64_t eaddr_aligned; - addr64_t addr; - ppnum_t paddr; - unsigned int mark_as_unneeded_pages = 0; + addr64_t saddr_aligned; + addr64_t eaddr_aligned; + addr64_t addr; + ppnum_t paddr; + unsigned int mark_as_unneeded_pages = 0; saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64; eaddr_aligned = eaddr & ~PAGE_MASK_64; for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) { - paddr = pmap_find_phys(kernel_pmap, addr); assert(paddr); - hibernate_page_bitset(page_list, TRUE, paddr); + hibernate_page_bitset(page_list, TRUE, paddr); hibernate_page_bitset(page_list_wired, TRUE, paddr); mark_as_unneeded_pages++; } - return (mark_as_unneeded_pages); + return mark_as_unneeded_pages; } @@ -7596,8 +7844,8 @@ void hibernate_hash_insert_page(vm_page_t mem) { vm_page_bucket_t *bucket; - int hash_id; - vm_object_t m_object; + int hash_id; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); @@ -7619,8 +7867,8 @@ hibernate_hash_insert_page(vm_page_t mem) void hibernate_free_range(int sindx, int eindx) { - vm_page_t mem; - unsigned int color; + vm_page_t mem; + unsigned int color; while (sindx < eindx) { mem = &vm_pages[sindx]; @@ -7632,15 +7880,9 @@ hibernate_free_range(int sindx, int eindx) color = VM_PAGE_GET_COLOR(mem); #if defined(__x86_64__) - vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem); #else - vm_page_queue_enter(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq); #endif vm_page_free_count++; @@ -7654,13 +7896,14 @@ extern void hibernate_rebuild_pmap_structs(void); void hibernate_rebuild_vm_structs(void) { - int i, cindx, sindx, eindx; - vm_page_t mem, tmem, mem_next; - AbsoluteTime startTime, endTime; - uint64_t nsec; + int i, cindx, sindx, eindx; + vm_page_t mem, tmem, mem_next; + AbsoluteTime startTime, endTime; + uint64_t nsec; - if (hibernate_rebuild_needed == FALSE) + if (hibernate_rebuild_needed == FALSE) { return; + } KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START); HIBLOG("hibernate_rebuild started\n"); @@ -7673,15 +7916,15 @@ hibernate_rebuild_vm_structs(void) eindx = vm_pages_count; /* - * Mark all the vm_pages[] that have not been initialized yet as being + * Mark all the vm_pages[] that have not been initialized yet as being * transient. This is needed to ensure that buddy page search is corrrect. - * Without this random data in these vm_pages[] can trip the buddy search + * Without this random data in these vm_pages[] can trip the buddy search */ - for (i = hibernate_teardown_last_valid_compact_indx+1; i < eindx; ++i) + for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) { vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q; + } for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) { - mem = &vm_pages[cindx]; assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q); /* @@ -7701,25 +7944,27 @@ hibernate_rebuild_vm_structs(void) *tmem = *mem; mem = tmem; } - if (mem->vmp_hashed) + if (mem->vmp_hashed) { hibernate_hash_insert_page(mem); + } /* * the 'hole' between this vm_page_t and the previous - * vm_page_t we moved needs to be initialized as + * vm_page_t we moved needs to be initialized as * a range of free vm_page_t's */ hibernate_free_range(sindx + 1, eindx); eindx = sindx; } - if (sindx) + if (sindx) { hibernate_free_range(0, sindx); + } assert(vm_page_free_count == hibernate_teardown_vm_page_free_count); /* * process the list of vm_page_t's that were entered in the hash, - * but were not located in the vm_pages arrary... these are + * but were not located in the vm_pages arrary... these are * vm_page_t's that were created on the fly (i.e. fictitious) */ for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) { @@ -7730,9 +7975,9 @@ hibernate_rebuild_vm_structs(void) } hibernate_rebuild_hash_list = NULL; - clock_get_uptime(&endTime); - SUB_ABSOLUTETIME(&endTime, &startTime); - absolutetime_to_nanoseconds(endTime, &nsec); + clock_get_uptime(&endTime); + SUB_ABSOLUTETIME(&endTime, &startTime); + absolutetime_to_nanoseconds(endTime, &nsec); HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL); @@ -7747,29 +7992,29 @@ extern void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired) { - unsigned int i; - unsigned int compact_target_indx; - vm_page_t mem, mem_next; + unsigned int i; + unsigned int compact_target_indx; + vm_page_t mem, mem_next; vm_page_bucket_t *bucket; - unsigned int mark_as_unneeded_pages = 0; - unsigned int unneeded_vm_page_bucket_pages = 0; - unsigned int unneeded_vm_pages_pages = 0; - unsigned int unneeded_pmap_pages = 0; - addr64_t start_of_unneeded = 0; - addr64_t end_of_unneeded = 0; + unsigned int mark_as_unneeded_pages = 0; + unsigned int unneeded_vm_page_bucket_pages = 0; + unsigned int unneeded_vm_pages_pages = 0; + unsigned int unneeded_pmap_pages = 0; + addr64_t start_of_unneeded = 0; + addr64_t end_of_unneeded = 0; + - - if (hibernate_should_abort()) - return (0); + if (hibernate_should_abort()) { + return 0; + } hibernate_rebuild_needed = TRUE; HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n", - vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, - vm_page_cleaned_count, compressor_object->resident_page_count); + vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count, + vm_page_cleaned_count, compressor_object->resident_page_count); for (i = 0; i < vm_page_bucket_count; i++) { - bucket = &vm_page_buckets[i]; for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) { @@ -7791,7 +8036,6 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l compact_target_indx = 0; for (i = 0; i < vm_pages_count; i++) { - mem = &vm_pages[i]; if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) { @@ -7802,10 +8046,7 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l color = VM_PAGE_GET_COLOR(mem); - vm_page_queue_remove(&vm_page_queue_free[color].qhead, - mem, - vm_page_t, - vmp_pageq); + vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq); VM_PAGE_ZERO_PAGEQ_ENTRY(mem); @@ -7813,8 +8054,9 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l hibernate_teardown_found_free_pages++; - if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) + if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) { compact_target_indx = i; + } } else { /* * record this vm_page_t's original location @@ -7834,12 +8076,13 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l hibernate_teardown_last_valid_compact_indx = compact_target_indx; compact_target_indx++; - } else + } else { hibernate_teardown_last_valid_compact_indx = i; + } } } - unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx+1], - (addr64_t)&vm_pages[vm_pages_count-1], page_list, page_list_wired); + unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1], + (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired); mark_as_unneeded_pages += unneeded_vm_pages_pages; hibernate_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded); @@ -7850,7 +8093,7 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l } HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages); - return (mark_as_unneeded_pages); + return mark_as_unneeded_pages; } @@ -7859,7 +8102,7 @@ hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_l /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include -#if MACH_VM_DEBUG +#if MACH_VM_DEBUG #include #include @@ -7881,10 +8124,11 @@ vm_page_info( unsigned int count) { unsigned int i; - lck_spin_t *bucket_lock; + lck_spin_t *bucket_lock; - if (vm_page_bucket_count < count) + if (vm_page_bucket_count < count) { count = vm_page_bucket_count; + } for (i = 0; i < count; i++) { vm_page_bucket_t *bucket = &vm_page_buckets[i]; @@ -7892,12 +8136,13 @@ vm_page_info( vm_page_t m; bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); + lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket); for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); - m != VM_PAGE_NULL; - m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) + m != VM_PAGE_NULL; + m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) { bucket_count++; + } lck_spin_unlock(bucket_lock); @@ -7907,7 +8152,7 @@ vm_page_info( return vm_page_bucket_count; } -#endif /* MACH_VM_DEBUG */ +#endif /* MACH_VM_DEBUG */ #if VM_PAGE_BUCKETS_CHECK void @@ -7917,7 +8162,7 @@ vm_page_buckets_check(void) vm_page_t p; unsigned int p_hash; vm_page_bucket_t *bucket; - lck_spin_t *bucket_lock; + lck_spin_t *bucket_lock; if (!vm_page_buckets_check_ready) { return; @@ -7927,29 +8172,29 @@ vm_page_buckets_check(void) if (hibernate_rebuild_needed || hibernate_rebuild_hash_list) { panic("BUCKET_CHECK: hibernation in progress: " - "rebuild_needed=%d rebuild_hash_list=%p\n", - hibernate_rebuild_needed, - hibernate_rebuild_hash_list); + "rebuild_needed=%d rebuild_hash_list=%p\n", + hibernate_rebuild_needed, + hibernate_rebuild_hash_list); } #endif /* HIBERNATION */ #if VM_PAGE_FAKE_BUCKETS char *cp; for (cp = (char *) vm_page_fake_buckets_start; - cp < (char *) vm_page_fake_buckets_end; - cp++) { + cp < (char *) vm_page_fake_buckets_end; + cp++) { if (*cp != 0x5a) { panic("BUCKET_CHECK: corruption at %p in fake buckets " - "[0x%llx:0x%llx]\n", - cp, - (uint64_t) vm_page_fake_buckets_start, - (uint64_t) vm_page_fake_buckets_end); + "[0x%llx:0x%llx]\n", + cp, + (uint64_t) vm_page_fake_buckets_start, + (uint64_t) vm_page_fake_buckets_end); } } #endif /* VM_PAGE_FAKE_BUCKETS */ for (i = 0; i < vm_page_bucket_count; i++) { - vm_object_t p_object; + vm_object_t p_object; bucket = &vm_page_buckets[i]; if (!bucket->page_list) { @@ -7957,7 +8202,7 @@ vm_page_buckets_check(void) } bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK]; - lck_spin_lock(bucket_lock); + lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket); p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); while (p != VM_PAGE_NULL) { @@ -7965,18 +8210,18 @@ vm_page_buckets_check(void) if (!p->vmp_hashed) { panic("BUCKET_CHECK: page %p (%p,0x%llx) " - "hash %d in bucket %d at %p " - "is not hashed\n", - p, p_object, p->vmp_offset, - p_hash, i, bucket); + "hash %d in bucket %d at %p " + "is not hashed\n", + p, p_object, p->vmp_offset, + p_hash, i, bucket); } p_hash = vm_page_hash(p_object, p->vmp_offset); if (p_hash != i) { panic("BUCKET_CHECK: corruption in bucket %d " - "at %p: page %p object %p offset 0x%llx " - "hash %d\n", - i, bucket, p, p_object, p->vmp_offset, - p_hash); + "at %p: page %p object %p offset 0x%llx " + "hash %d\n", + i, bucket, p, p_object, p->vmp_offset, + p_hash); } p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)); } @@ -8011,15 +8256,14 @@ void vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) #endif { - boolean_t was_pageable = TRUE; - vm_object_t m_object; + boolean_t was_pageable = TRUE; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) - { + if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) { assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0); #if CONFIG_BACKGROUND_QUEUE if (remove_from_backgroundq == TRUE) { @@ -8036,13 +8280,12 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) return; } - if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) - { + if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) { assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0); #if CONFIG_BACKGROUND_QUEUE assert(mem->vmp_backgroundq.next == 0 && - mem->vmp_backgroundq.prev == 0 && - mem->vmp_on_backgroundq == FALSE); + mem->vmp_backgroundq.prev == 0 && + mem->vmp_on_backgroundq == FALSE); #endif return; } @@ -8054,8 +8297,8 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0); #if CONFIG_BACKGROUND_QUEUE assert(mem->vmp_backgroundq.next == 0 && - mem->vmp_backgroundq.prev == 0 && - mem->vmp_on_backgroundq == FALSE); + mem->vmp_backgroundq.prev == 0 && + mem->vmp_on_backgroundq == FALSE); #endif return; } @@ -8065,16 +8308,14 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) assert(m_object != vm_submap_object); assert(!mem->vmp_fictitious); - switch(mem->vmp_q_state) { - + switch (mem->vmp_q_state) { case VM_PAGE_ON_ACTIVE_LOCAL_Q: { - struct vpl *lq; + struct vpl *lq; lq = &vm_page_local_q[mem->vmp_local_id].vpl_un.vpl; VPL_LOCK(&lq->vpl_lock); - vm_page_queue_remove(&lq->vpl_queue, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq); mem->vmp_local_id = 0; lq->vpl_count--; if (m_object->internal) { @@ -8088,8 +8329,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) } case VM_PAGE_ON_ACTIVE_Q: { - vm_page_queue_remove(&vm_page_queue_active, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq); vm_page_active_count--; break; } @@ -8099,8 +8339,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) assert(m_object->internal == TRUE); vm_page_inactive_count--; - vm_page_queue_remove(&vm_page_queue_anonymous, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq); vm_page_anonymous_count--; vm_purgeable_q_advance_all(); @@ -8113,8 +8352,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) assert(m_object->internal == FALSE); vm_page_inactive_count--; - vm_page_queue_remove(&vm_page_queue_inactive, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq); vm_purgeable_q_advance_all(); vm_page_balance_inactive(3); break; @@ -8125,8 +8363,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) assert(m_object->internal == FALSE); vm_page_inactive_count--; - vm_page_queue_remove(&vm_page_queue_cleaned, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq); vm_page_cleaned_count--; vm_page_balance_inactive(3); break; @@ -8136,8 +8373,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) { assert(m_object->internal == TRUE); - vm_page_queue_remove(&vm_page_queue_throttled, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq); vm_page_throttled_count--; was_pageable = FALSE; break; @@ -8147,7 +8383,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) { assert(m_object->internal == FALSE); - vm_page_remque(&mem->vmp_pageq); + vm_page_remque(&mem->vmp_pageq); vm_page_speculative_count--; vm_page_balance_inactive(3); break; @@ -8156,8 +8392,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) #if CONFIG_SECLUDED_MEMORY case VM_PAGE_ON_SECLUDED_Q: { - vm_page_queue_remove(&vm_page_queue_secluded, - mem, vm_page_t, vmp_pageq); + vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq); vm_page_secluded_count--; if (m_object == VM_OBJECT_NULL) { vm_page_secluded_count_free--; @@ -8176,10 +8411,10 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) { /* * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) - * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue... - * the caller is responsible for determing if the page is on that queue, and if so, must - * either first remove it (it needs both the page queues lock and the object lock to do - * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove + * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue... + * the caller is responsible for determing if the page is on that queue, and if so, must + * either first remove it (it needs both the page queues lock and the object lock to do + * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove * * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q * or any of the undefined states @@ -8187,14 +8422,14 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) panic("vm_page_queues_remove - bad page q_state (%p, %d)\n", mem, mem->vmp_q_state); break; } - } VM_PAGE_ZERO_PAGEQ_ENTRY(mem); mem->vmp_q_state = VM_PAGE_NOT_ON_Q; #if CONFIG_BACKGROUND_QUEUE - if (remove_from_backgroundq == TRUE) + if (remove_from_backgroundq == TRUE) { vm_page_remove_from_backgroundq(mem); + } #endif if (was_pageable) { if (m_object->internal) { @@ -8210,8 +8445,8 @@ vm_page_remove_internal(vm_page_t page) { vm_object_t __object = VM_PAGE_OBJECT(page); if (page == __object->memq_hint) { - vm_page_t __new_hint; - vm_page_queue_entry_t __qe; + vm_page_t __new_hint; + vm_page_queue_entry_t __qe; __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq); if (vm_page_queue_end(&__object->memq, __qe)) { __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq); @@ -8222,7 +8457,7 @@ vm_page_remove_internal(vm_page_t page) __new_hint = (vm_page_t)((uintptr_t) __qe); __object->memq_hint = __new_hint; } - vm_page_queue_remove(&__object->memq, page, vm_page_t, vmp_listq); + vm_page_queue_remove(&__object->memq, page, vmp_listq); #if CONFIG_SECLUDED_MEMORY if (__object->eligible_for_secluded) { vm_page_secluded.eligible_for_secluded--; @@ -8233,7 +8468,7 @@ vm_page_remove_internal(vm_page_t page) void vm_page_enqueue_inactive(vm_page_t mem, boolean_t first) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); @@ -8246,20 +8481,22 @@ vm_page_enqueue_inactive(vm_page_t mem, boolean_t first) if (m_object->internal) { mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q; - if (first == TRUE) - vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vm_page_t, vmp_pageq); - else - vm_page_queue_enter(&vm_page_queue_anonymous, mem, vm_page_t, vmp_pageq); + if (first == TRUE) { + vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq); + } else { + vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq); + } vm_page_anonymous_count++; vm_page_pageable_internal_count++; } else { mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q; - if (first == TRUE) - vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vm_page_t, vmp_pageq); - else - vm_page_queue_enter(&vm_page_queue_inactive, mem, vm_page_t, vmp_pageq); + if (first == TRUE) { + vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq); + } else { + vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq); + } vm_page_pageable_external_count++; } @@ -8267,15 +8504,16 @@ vm_page_enqueue_inactive(vm_page_t mem, boolean_t first) token_new_pagecount++; #if CONFIG_BACKGROUND_QUEUE - if (mem->vmp_in_background) + if (mem->vmp_in_background) { vm_page_add_to_backgroundq(mem, FALSE); + } #endif } void vm_page_enqueue_active(vm_page_t mem, boolean_t first) { - vm_object_t m_object; + vm_object_t m_object; m_object = VM_PAGE_OBJECT(mem); @@ -8286,10 +8524,11 @@ vm_page_enqueue_active(vm_page_t mem, boolean_t first) vm_page_check_pageable_safe(mem); mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q; - if (first == TRUE) - vm_page_queue_enter_first(&vm_page_queue_active, mem, vm_page_t, vmp_pageq); - else - vm_page_queue_enter(&vm_page_queue_active, mem, vm_page_t, vmp_pageq); + if (first == TRUE) { + vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq); + } else { + vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq); + } vm_page_active_count++; if (m_object->internal) { @@ -8299,8 +8538,9 @@ vm_page_enqueue_active(vm_page_t mem, boolean_t first) } #if CONFIG_BACKGROUND_QUEUE - if (mem->vmp_in_background) + if (mem->vmp_in_background) { vm_page_add_to_backgroundq(mem, FALSE); + } #endif vm_page_balance_inactive(3); } @@ -8312,264 +8552,286 @@ vm_page_enqueue_active(vm_page_t mem, boolean_t first) void vm_page_check_pageable_safe(vm_page_t page) { - vm_object_t page_object; + vm_object_t page_object; page_object = VM_PAGE_OBJECT(page); if (page_object == kernel_object) { panic("vm_page_check_pageable_safe: trying to add page" \ - "from kernel object (%p) to pageable queue", kernel_object); + "from kernel object (%p) to pageable queue", kernel_object); } if (page_object == compressor_object) { panic("vm_page_check_pageable_safe: trying to add page" \ - "from compressor object (%p) to pageable queue", compressor_object); + "from compressor object (%p) to pageable queue", compressor_object); } if (page_object == vm_submap_object) { panic("vm_page_check_pageable_safe: trying to add page" \ - "from submap object (%p) to pageable queue", vm_submap_object); + "from submap object (%p) to pageable queue", vm_submap_object); } } /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * wired page diagnose - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ +* wired page diagnose +* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ #include -#define KA_SIZE(namelen, subtotalscount) \ +#define KA_SIZE(namelen, subtotalscount) \ (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total))) -#define KA_NAME(alloc) \ +#define KA_NAME(alloc) \ ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)])) -#define KA_NAME_LEN(alloc) \ +#define KA_NAME_LEN(alloc) \ (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT)) -vm_tag_t +vm_tag_t vm_tag_bt(void) { - uintptr_t* frameptr; - uintptr_t* frameptr_next; - uintptr_t retaddr; - uintptr_t kstackb, kstackt; - const vm_allocation_site_t * site; - thread_t cthread; - kern_allocation_name_t name; - - cthread = current_thread(); - if (__improbable(cthread == NULL)) return VM_KERN_MEMORY_OSFMK; - - if ((name = thread_get_kernel_state(cthread)->allocation_name)) - { - if (!name->tag) vm_tag_alloc(name); - return name->tag; - } - - kstackb = cthread->kernel_stack; - kstackt = kstackb + kernel_stack_size; - - /* Load stack frame pointer (EBP on x86) into frameptr */ - frameptr = __builtin_frame_address(0); - site = NULL; - while (frameptr != NULL) - { - /* Verify thread stack bounds */ - if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) break; - - /* Next frame pointer is pointed to by the previous one */ - frameptr_next = (uintptr_t*) *frameptr; - - /* Pull return address from one spot above the frame pointer */ - retaddr = *(frameptr + 1); - - - if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text)) - || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) - { - site = OSKextGetAllocationSiteForCaller(retaddr); - break; + uintptr_t* frameptr; + uintptr_t* frameptr_next; + uintptr_t retaddr; + uintptr_t kstackb, kstackt; + const vm_allocation_site_t * site; + thread_t cthread; + kern_allocation_name_t name; + + cthread = current_thread(); + if (__improbable(cthread == NULL)) { + return VM_KERN_MEMORY_OSFMK; + } + + if ((name = thread_get_kernel_state(cthread)->allocation_name)) { + if (!name->tag) { + vm_tag_alloc(name); + } + return name->tag; + } + + kstackb = cthread->kernel_stack; + kstackt = kstackb + kernel_stack_size; + + /* Load stack frame pointer (EBP on x86) into frameptr */ + frameptr = __builtin_frame_address(0); + site = NULL; + while (frameptr != NULL) { + /* Verify thread stack bounds */ + if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) { + break; + } + + /* Next frame pointer is pointed to by the previous one */ + frameptr_next = (uintptr_t*) *frameptr; + + /* Pull return address from one spot above the frame pointer */ + retaddr = *(frameptr + 1); + + + if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text)) + || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) { + site = OSKextGetAllocationSiteForCaller(retaddr); + break; + } + frameptr = frameptr_next; } - frameptr = frameptr_next; - } - return (site ? site->tag : VM_KERN_MEMORY_NONE); + return site ? site->tag : VM_KERN_MEMORY_NONE; } -static uint64_t free_tag_bits[VM_MAX_TAG_VALUE/64]; +static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64]; void vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP) { - vm_tag_t tag; - uint64_t avail; - uint32_t idx; - vm_allocation_site_t * prev; + vm_tag_t tag; + uint64_t avail; + uint32_t idx; + vm_allocation_site_t * prev; - if (site->tag) return; + if (site->tag) { + return; + } - idx = 0; - while (TRUE) - { + idx = 0; + while (TRUE) { avail = free_tag_bits[idx]; - if (avail) - { - tag = __builtin_clzll(avail); - avail &= ~(1ULL << (63 - tag)); - free_tag_bits[idx] = avail; - tag += (idx << 6); - break; + if (avail) { + tag = __builtin_clzll(avail); + avail &= ~(1ULL << (63 - tag)); + free_tag_bits[idx] = avail; + tag += (idx << 6); + break; } idx++; - if (idx >= ARRAY_COUNT(free_tag_bits)) - { - for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) - { + if (idx >= ARRAY_COUNT(free_tag_bits)) { + for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) { prev = vm_allocation_sites[idx]; - if (!prev) continue; - if (!KA_NAME_LEN(prev)) continue; - if (!prev->tag) continue; - if (prev->total) continue; - if (1 != prev->refcount) continue; + if (!prev) { + continue; + } + if (!KA_NAME_LEN(prev)) { + continue; + } + if (!prev->tag) { + continue; + } + if (prev->total) { + continue; + } + if (1 != prev->refcount) { + continue; + } assert(idx == prev->tag); tag = idx; prev->tag = VM_KERN_MEMORY_NONE; *releasesiteP = prev; break; - } - if (idx >= ARRAY_COUNT(vm_allocation_sites)) - { + } + if (idx >= ARRAY_COUNT(vm_allocation_sites)) { tag = VM_KERN_MEMORY_ANY; } - break; + break; } - } - site->tag = tag; + } + site->tag = tag; - OSAddAtomic16(1, &site->refcount); + OSAddAtomic16(1, &site->refcount); - if (VM_KERN_MEMORY_ANY != tag) vm_allocation_sites[tag] = site; + if (VM_KERN_MEMORY_ANY != tag) { + vm_allocation_sites[tag] = site; + } - if (tag > vm_allocation_tag_highest) vm_allocation_tag_highest = tag; + if (tag > vm_allocation_tag_highest) { + vm_allocation_tag_highest = tag; + } } static void vm_tag_free_locked(vm_tag_t tag) { - uint64_t avail; - uint32_t idx; - uint64_t bit; + uint64_t avail; + uint32_t idx; + uint64_t bit; - if (VM_KERN_MEMORY_ANY == tag) return; + if (VM_KERN_MEMORY_ANY == tag) { + return; + } - idx = (tag >> 6); - avail = free_tag_bits[idx]; - tag &= 63; - bit = (1ULL << (63 - tag)); - assert(!(avail & bit)); - free_tag_bits[idx] = (avail | bit); + idx = (tag >> 6); + avail = free_tag_bits[idx]; + tag &= 63; + bit = (1ULL << (63 - tag)); + assert(!(avail & bit)); + free_tag_bits[idx] = (avail | bit); } static void vm_tag_init(void) { - vm_tag_t tag; - for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) - { - vm_tag_free_locked(tag); - } + vm_tag_t tag; + for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) { + vm_tag_free_locked(tag); + } - for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) - { - vm_tag_free_locked(tag); - } + for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) { + vm_tag_free_locked(tag); + } } vm_tag_t vm_tag_alloc(vm_allocation_site_t * site) { - vm_tag_t tag; - vm_allocation_site_t * releasesite; + vm_tag_t tag; + vm_allocation_site_t * releasesite; - if (VM_TAG_BT & site->flags) - { + if (VM_TAG_BT & site->flags) { tag = vm_tag_bt(); - if (VM_KERN_MEMORY_NONE != tag) return (tag); - } + if (VM_KERN_MEMORY_NONE != tag) { + return tag; + } + } - if (!site->tag) - { + if (!site->tag) { releasesite = NULL; lck_spin_lock(&vm_allocation_sites_lock); vm_tag_alloc_locked(site, &releasesite); lck_spin_unlock(&vm_allocation_sites_lock); - if (releasesite) kern_allocation_name_release(releasesite); - } + if (releasesite) { + kern_allocation_name_release(releasesite); + } + } - return (site->tag); + return site->tag; } void vm_tag_update_size(vm_tag_t tag, int64_t delta) { - vm_allocation_site_t * allocation; - uint64_t prior; + vm_allocation_site_t * allocation; + uint64_t prior; - assert(VM_KERN_MEMORY_NONE != tag); - assert(tag < VM_MAX_TAG_VALUE); + assert(VM_KERN_MEMORY_NONE != tag); + assert(tag < VM_MAX_TAG_VALUE); - allocation = vm_allocation_sites[tag]; - assert(allocation); + allocation = vm_allocation_sites[tag]; + assert(allocation); - if (delta < 0) { + if (delta < 0) { assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation); - } - prior = OSAddAtomic64(delta, &allocation->total); + } + prior = OSAddAtomic64(delta, &allocation->total); #if DEBUG || DEVELOPMENT - uint64_t new, peak; + uint64_t new, peak; new = prior + delta; - do - { - peak = allocation->peak; - if (new <= peak) break; - } - while (!OSCompareAndSwap64(peak, new, &allocation->peak)); + do{ + peak = allocation->peak; + if (new <= peak) { + break; + } + }while (!OSCompareAndSwap64(peak, new, &allocation->peak)); #endif /* DEBUG || DEVELOPMENT */ - if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) return; + if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) { + return; + } - if (!prior && !allocation->tag) vm_tag_alloc(allocation); + if (!prior && !allocation->tag) { + vm_tag_alloc(allocation); + } } void kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta) { - uint64_t prior; + uint64_t prior; - if (delta < 0) { + if (delta < 0) { assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation); - } - prior = OSAddAtomic64(delta, &allocation->total); + } + prior = OSAddAtomic64(delta, &allocation->total); #if DEBUG || DEVELOPMENT - uint64_t new, peak; + uint64_t new, peak; new = prior + delta; - do - { - peak = allocation->peak; - if (new <= peak) break; - } - while (!OSCompareAndSwap64(peak, new, &allocation->peak)); + do{ + peak = allocation->peak; + if (new <= peak) { + break; + } + }while (!OSCompareAndSwap64(peak, new, &allocation->peak)); #endif /* DEBUG || DEVELOPMENT */ - if (!prior && !allocation->tag) vm_tag_alloc(allocation); + if (!prior && !allocation->tag) { + vm_tag_alloc(allocation); + } } #if VM_MAX_TAG_ZONES @@ -8577,86 +8839,91 @@ kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta) void vm_allocation_zones_init(void) { - kern_return_t ret; - vm_offset_t addr; + kern_return_t ret; + vm_offset_t addr; vm_size_t size; - size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **) - + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t); + size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *) + + 2 * VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t); ret = kernel_memory_allocate(kernel_map, - &addr, round_page(size), 0, - KMA_ZERO, VM_KERN_MEMORY_DIAG); - assert(KERN_SUCCESS == ret); + &addr, round_page(size), 0, + KMA_ZERO, VM_KERN_MEMORY_DIAG); + assert(KERN_SUCCESS == ret); - vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr; - addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t **); + vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr; + addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *); - // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations - // in vm_tag_update_zone_size() won't recurse - vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG] = (vm_allocation_zone_total_t *) addr; - addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t); - vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr; + // prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations + // in vm_tag_update_zone_size() won't recurse + vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG] = (vm_allocation_zone_total_t *) addr; + addr += VM_MAX_TAG_ZONES * sizeof(vm_allocation_zone_total_t); + vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr; } void vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx) { - vm_allocation_zone_total_t * zone; + vm_allocation_zone_total_t * zone; - assert(VM_KERN_MEMORY_NONE != tag); - assert(tag < VM_MAX_TAG_VALUE); + assert(VM_KERN_MEMORY_NONE != tag); + assert(tag < VM_MAX_TAG_VALUE); - if (zidx >= VM_MAX_TAG_ZONES) return; + if (zidx >= VM_MAX_TAG_ZONES) { + return; + } zone = vm_allocation_zone_totals[tag]; - if (!zone) - { - zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG); - if (!zone) return; - bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone)); - if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag])) - { + if (!zone) { + zone = kalloc_tag(VM_MAX_TAG_ZONES * sizeof(*zone), VM_KERN_MEMORY_DIAG); + if (!zone) { + return; + } + bzero(zone, VM_MAX_TAG_ZONES * sizeof(*zone)); + if (!OSCompareAndSwapPtr(NULL, zone, &vm_allocation_zone_totals[tag])) { kfree(zone, VM_MAX_TAG_ZONES * sizeof(*zone)); } - } + } } void vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwaste) { - vm_allocation_zone_total_t * zone; - uint32_t new; + vm_allocation_zone_total_t * zone; + uint32_t new; - assert(VM_KERN_MEMORY_NONE != tag); - assert(tag < VM_MAX_TAG_VALUE); + assert(VM_KERN_MEMORY_NONE != tag); + assert(tag < VM_MAX_TAG_VALUE); - if (zidx >= VM_MAX_TAG_ZONES) return; + if (zidx >= VM_MAX_TAG_ZONES) { + return; + } zone = vm_allocation_zone_totals[tag]; - assert(zone); - zone += zidx; + assert(zone); + zone += zidx; - /* the zone is locked */ - if (delta < 0) - { + /* the zone is locked */ + if (delta < 0) { assertf(zone->total >= ((uint64_t)-delta), "zidx %d, tag %d, %p", zidx, tag, zone); - zone->total += delta; - } - else - { zone->total += delta; - if (zone->total > zone->peak) zone->peak = zone->total; - if (dwaste) - { + } else { + zone->total += delta; + if (zone->total > zone->peak) { + zone->peak = zone->total; + } + if (dwaste) { new = zone->waste; - if (zone->wastediv < 65536) zone->wastediv++; - else new -= (new >> 16); + if (zone->wastediv < 65536) { + zone->wastediv++; + } else { + new -= (new >> 16); + } __assert_only bool ov = os_add_overflow(new, dwaste, &new); assert(!ov); zone->waste = new; - } - } + } + } } #endif /* VM_MAX_TAG_ZONES */ @@ -8664,211 +8931,203 @@ vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, int64_t delta, int64_t dwas void kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta) { - kern_allocation_name_t other; + kern_allocation_name_t other; struct vm_allocation_total * total; - uint32_t subidx; + uint32_t subidx; - subidx = 0; - assert(VM_KERN_MEMORY_NONE != subtag); - for (; subidx < allocation->subtotalscount; subidx++) - { - if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) - { + subidx = 0; + assert(VM_KERN_MEMORY_NONE != subtag); + for (; subidx < allocation->subtotalscount; subidx++) { + if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) { allocation->subtotals[subidx].tag = subtag; break; } - if (subtag == allocation->subtotals[subidx].tag) break; + if (subtag == allocation->subtotals[subidx].tag) { + break; + } + } + assert(subidx < allocation->subtotalscount); + if (subidx >= allocation->subtotalscount) { + return; } - assert(subidx < allocation->subtotalscount); - if (subidx >= allocation->subtotalscount) return; - total = &allocation->subtotals[subidx]; - other = vm_allocation_sites[subtag]; - assert(other); + total = &allocation->subtotals[subidx]; + other = vm_allocation_sites[subtag]; + assert(other); - if (delta < 0) - { + if (delta < 0) { assertf(total->total >= ((uint64_t)-delta), "name %p", allocation); - OSAddAtomic64(delta, &total->total); + OSAddAtomic64(delta, &total->total); assertf(other->mapped >= ((uint64_t)-delta), "other %p", other); - OSAddAtomic64(delta, &other->mapped); - } - else - { - OSAddAtomic64(delta, &other->mapped); - OSAddAtomic64(delta, &total->total); - } + OSAddAtomic64(delta, &other->mapped); + } else { + OSAddAtomic64(delta, &other->mapped); + OSAddAtomic64(delta, &total->total); + } } const char * kern_allocation_get_name(kern_allocation_name_t allocation) { - return (KA_NAME(allocation)); + return KA_NAME(allocation); } kern_allocation_name_t kern_allocation_name_allocate(const char * name, uint32_t subtotalscount) { - uint32_t namelen; + uint32_t namelen; - namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1); + namelen = (uint32_t) strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1); - kern_allocation_name_t allocation; - allocation = kalloc(KA_SIZE(namelen, subtotalscount)); - bzero(allocation, KA_SIZE(namelen, subtotalscount)); + kern_allocation_name_t allocation; + allocation = kalloc(KA_SIZE(namelen, subtotalscount)); + bzero(allocation, KA_SIZE(namelen, subtotalscount)); - allocation->refcount = 1; - allocation->subtotalscount = subtotalscount; - allocation->flags = (namelen << VM_TAG_NAME_LEN_SHIFT); - strlcpy(KA_NAME(allocation), name, namelen + 1); + allocation->refcount = 1; + allocation->subtotalscount = subtotalscount; + allocation->flags = (namelen << VM_TAG_NAME_LEN_SHIFT); + strlcpy(KA_NAME(allocation), name, namelen + 1); - return (allocation); + return allocation; } void kern_allocation_name_release(kern_allocation_name_t allocation) { - assert(allocation->refcount > 0); - if (1 == OSAddAtomic16(-1, &allocation->refcount)) - { - kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount)); - } + assert(allocation->refcount > 0); + if (1 == OSAddAtomic16(-1, &allocation->refcount)) { + kfree(allocation, KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount)); + } } vm_tag_t kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation) { - return (vm_tag_alloc(allocation)); + return vm_tag_alloc(allocation); } -#if ! VM_TAG_ACTIVE_UPDATE -static void +#if !VM_TAG_ACTIVE_UPDATE +static void vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object) { - if (!object->wired_page_count) return; - if (object != kernel_object) - { + if (!object->wired_page_count) { + return; + } + if (object != kernel_object) { assert(object->wire_tag < num_info); info[object->wire_tag].size += ptoa_64(object->wired_page_count); - } + } } typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info, - unsigned int num_info, vm_object_t object); + unsigned int num_info, vm_object_t object); -static void +static void vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info, - vm_page_iterate_proc proc, purgeable_q_t queue, - int group) + vm_page_iterate_proc proc, purgeable_q_t queue, + int group) { - vm_object_t object; + vm_object_t object; - for (object = (vm_object_t) queue_first(&queue->objq[group]); - !queue_end(&queue->objq[group], (queue_entry_t) object); - object = (vm_object_t) queue_next(&object->objq)) - { + for (object = (vm_object_t) queue_first(&queue->objq[group]); + !queue_end(&queue->objq[group], (queue_entry_t) object); + object = (vm_object_t) queue_next(&object->objq)) { proc(info, num_info, object); - } + } } -static void +static void vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info, - vm_page_iterate_proc proc) + vm_page_iterate_proc proc) { - vm_object_t object; + vm_object_t object; - lck_spin_lock(&vm_objects_wired_lock); - queue_iterate(&vm_objects_wired, - object, - vm_object_t, - wired_objq) - { + lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); + queue_iterate(&vm_objects_wired, + object, + vm_object_t, + wired_objq) + { proc(info, num_info, object); - } - lck_spin_unlock(&vm_objects_wired_lock); + } + lck_spin_unlock(&vm_objects_wired_lock); } #endif /* ! VM_TAG_ACTIVE_UPDATE */ static uint64_t process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, boolean_t iterated) { - size_t namelen; - unsigned int idx, count, nextinfo; - vm_allocation_site_t * site; + size_t namelen; + unsigned int idx, count, nextinfo; + vm_allocation_site_t * site; lck_spin_lock(&vm_allocation_sites_lock); - for (idx = 0; idx <= vm_allocation_tag_highest; idx++) - { + for (idx = 0; idx <= vm_allocation_tag_highest; idx++) { site = vm_allocation_sites[idx]; - if (!site) continue; + if (!site) { + continue; + } info[idx].mapped = site->mapped; info[idx].tag = site->tag; - if (!iterated) - { + if (!iterated) { info[idx].size = site->total; #if DEBUG || DEVELOPMENT info[idx].peak = site->peak; #endif /* DEBUG || DEVELOPMENT */ - } - else - { - if (!site->subtotalscount && (site->total != info[idx].size)) - { - printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size); - info[idx].size = site->total; - } - } - } - - nextinfo = (vm_allocation_tag_highest + 1); - count = nextinfo; - if (count >= num_info) count = num_info; - - for (idx = 0; idx < count; idx++) - { - site = vm_allocation_sites[idx]; - if (!site) continue; + } else { + if (!site->subtotalscount && (site->total != info[idx].size)) { + printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size); + info[idx].size = site->total; + } + } info[idx].flags |= VM_KERN_SITE_WIRED; - if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) - { - info[idx].site = idx; - info[idx].flags |= VM_KERN_SITE_TAG; - if (VM_KERN_MEMORY_ZONE == idx) - { + if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) { + info[idx].site = idx; + info[idx].flags |= VM_KERN_SITE_TAG; + if (VM_KERN_MEMORY_ZONE == idx) { info[idx].flags |= VM_KERN_SITE_HIDE; info[idx].flags &= ~VM_KERN_SITE_WIRED; info[idx].collectable_bytes = zones_collectable_bytes; } + } else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) { + info[idx].site = 0; + info[idx].flags |= VM_KERN_SITE_NAMED; + if (namelen > sizeof(info[idx].name)) { + namelen = sizeof(info[idx].name); + } + strncpy(&info[idx].name[0], KA_NAME(site), namelen); + } else if (VM_TAG_KMOD & site->flags) { + info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0); + info[idx].flags |= VM_KERN_SITE_KMOD; + } else { + info[idx].site = VM_KERNEL_UNSLIDE(site); + info[idx].flags |= VM_KERN_SITE_KERNEL; } - else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) - { - info[idx].site = 0; - info[idx].flags |= VM_KERN_SITE_NAMED; - if (namelen > sizeof(info[idx].name)) namelen = sizeof(info[idx].name); - strncpy(&info[idx].name[0], KA_NAME(site), namelen); - } - else if (VM_TAG_KMOD & site->flags) - { - info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0); - info[idx].flags |= VM_KERN_SITE_KMOD; - } - else - { - info[idx].site = VM_KERNEL_UNSLIDE(site); - info[idx].flags |= VM_KERN_SITE_KERNEL; + } + + nextinfo = (vm_allocation_tag_highest + 1); + count = nextinfo; + if (count >= num_info) { + count = num_info; + } + + for (idx = 0; idx < count; idx++) { + site = vm_allocation_sites[idx]; + if (!site) { + continue; } #if VM_MAX_TAG_ZONES vm_allocation_zone_total_t * zone; unsigned int zidx; vm_size_t elem_size; - if (vm_allocation_zone_totals - && (zone = vm_allocation_zone_totals[idx]) - && (nextinfo < num_info)) - { - for (zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) - { - if (!zone[zidx].peak) continue; + if (vm_allocation_zone_totals + && (zone = vm_allocation_zone_totals[idx]) + && (nextinfo < num_info)) { + for (zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) { + if (!zone[zidx].peak) { + continue; + } info[nextinfo] = info[idx]; info[nextinfo].zone = zone_index_from_tag_index(zidx, &elem_size); info[nextinfo].flags &= ~VM_KERN_SITE_WIRED; @@ -8876,168 +9135,175 @@ process_account(mach_memory_info_t * info, unsigned int num_info, uint64_t zones info[nextinfo].size = zone[zidx].total; info[nextinfo].peak = zone[zidx].peak; info[nextinfo].mapped = 0; - if (zone[zidx].wastediv) - { + if (zone[zidx].wastediv) { info[nextinfo].collectable_bytes = ((zone[zidx].waste * zone[zidx].total / elem_size) / zone[zidx].wastediv); } - nextinfo++; - } - } + nextinfo++; + } + } #endif /* VM_MAX_TAG_ZONES */ - if (site->subtotalscount) - { + if (site->subtotalscount) { uint64_t mapped, mapcost, take; uint32_t sub; vm_tag_t alloctag; - info[idx].size = site->total; - mapped = info[idx].size; - info[idx].mapped = mapped; - mapcost = 0; - for (sub = 0; sub < site->subtotalscount; sub++) - { + info[idx].size = site->total; + mapped = info[idx].size; + info[idx].mapped = mapped; + mapcost = 0; + for (sub = 0; sub < site->subtotalscount; sub++) { alloctag = site->subtotals[sub].tag; assert(alloctag < num_info); - if (info[alloctag].name[0]) continue; - take = info[alloctag].mapped; - if (take > info[alloctag].size) take = info[alloctag].size; - if (take > mapped) take = mapped; + if (info[alloctag].name[0]) { + continue; + } + take = site->subtotals[sub].total; + if (take > info[alloctag].size) { + take = info[alloctag].size; + } + if (take > mapped) { + take = mapped; + } info[alloctag].mapped -= take; info[alloctag].size -= take; mapped -= take; mapcost += take; - } - info[idx].size = mapcost; - } + } + info[idx].size = mapcost; + } } lck_spin_unlock(&vm_allocation_sites_lock); - return (0); + return 0; } uint32_t vm_page_diagnose_estimate(void) { - vm_allocation_site_t * site; - uint32_t count; - uint32_t idx; + vm_allocation_site_t * site; + uint32_t count; + uint32_t idx; lck_spin_lock(&vm_allocation_sites_lock); - for (count = idx = 0; idx < VM_MAX_TAG_VALUE; idx++) - { + for (count = idx = 0; idx < VM_MAX_TAG_VALUE; idx++) { site = vm_allocation_sites[idx]; - if (!site) continue; + if (!site) { + continue; + } count++; #if VM_MAX_TAG_ZONES - if (vm_allocation_zone_totals) - { + if (vm_allocation_zone_totals) { vm_allocation_zone_total_t * zone; zone = vm_allocation_zone_totals[idx]; - if (!zone) continue; - for (uint32_t zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) if (zone[zidx].peak) count++; + if (!zone) { + continue; + } + for (uint32_t zidx = 0; zidx < VM_MAX_TAG_ZONES; zidx++) { + if (zone[zidx].peak) { + count++; + } + } } #endif - } + } lck_spin_unlock(&vm_allocation_sites_lock); - /* some slop for new tags created */ - count += 8; - count += VM_KERN_COUNTER_COUNT; + /* some slop for new tags created */ + count += 8; + count += VM_KERN_COUNTER_COUNT; - return (count); + return count; } -kern_return_t +kern_return_t vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes) { - uint64_t wired_size; - uint64_t wired_managed_size; - uint64_t wired_reserved_size; - uint64_t booter_size; - boolean_t iterate; - mach_memory_info_t * counts; + uint64_t wired_size; + uint64_t wired_managed_size; + uint64_t wired_reserved_size; + uint64_t booter_size; + boolean_t iterate; + mach_memory_info_t * counts; - bzero(info, num_info * sizeof(mach_memory_info_t)); + bzero(info, num_info * sizeof(mach_memory_info_t)); - if (!vm_page_wire_count_initial) return (KERN_ABORTED); + if (!vm_page_wire_count_initial) { + return KERN_ABORTED; + } #if CONFIG_EMBEDDED - wired_size = ptoa_64(vm_page_wire_count); - wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count); + wired_size = ptoa_64(vm_page_wire_count); + wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count); #else - wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count); - wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count); + wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count); + wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count); #endif - wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial); + wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial); - booter_size = ml_get_booter_memory_size(); - wired_size += booter_size; + booter_size = ml_get_booter_memory_size(); + wired_size += booter_size; - assert(num_info >= VM_KERN_COUNTER_COUNT); - num_info -= VM_KERN_COUNTER_COUNT; - counts = &info[num_info]; + assert(num_info >= VM_KERN_COUNTER_COUNT); + num_info -= VM_KERN_COUNTER_COUNT; + counts = &info[num_info]; -#define SET_COUNT(xcount, xsize, xflags) \ +#define SET_COUNT(xcount, xsize, xflags) \ counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \ - counts[xcount].site = (xcount); \ - counts[xcount].size = (xsize); \ - counts[xcount].mapped = (xsize); \ + counts[xcount].site = (xcount); \ + counts[xcount].size = (xsize); \ + counts[xcount].mapped = (xsize); \ counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags; - SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0); - SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0); - SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0); - SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED); - SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED); - SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED); - SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0); - SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED); - -#define SET_MAP(xcount, xsize, xfree, xlargest) \ - counts[xcount].site = (xcount); \ - counts[xcount].size = (xsize); \ - counts[xcount].mapped = (xsize); \ - counts[xcount].free = (xfree); \ - counts[xcount].largest = (xlargest); \ + SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0); + SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0); + SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0); + SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED); + SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0); + SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED); + +#define SET_MAP(xcount, xsize, xfree, xlargest) \ + counts[xcount].site = (xcount); \ + counts[xcount].size = (xsize); \ + counts[xcount].mapped = (xsize); \ + counts[xcount].free = (xfree); \ + counts[xcount].largest = (xlargest); \ counts[xcount].flags = VM_KERN_SITE_COUNTER; - vm_map_size_t map_size, map_free, map_largest; + vm_map_size_t map_size, map_free, map_largest; - vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest); - SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest); + vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest); - vm_map_sizes(zone_map, &map_size, &map_free, &map_largest); - SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest); + vm_map_sizes(zone_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest); - vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest); - SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest); + vm_map_sizes(kalloc_map, &map_size, &map_free, &map_largest); + SET_MAP(VM_KERN_COUNT_MAP_KALLOC, map_size, map_free, map_largest); - iterate = !VM_TAG_ACTIVE_UPDATE; - if (iterate) - { - enum { kMaxKernelDepth = 1 }; - vm_map_t maps [kMaxKernelDepth]; - vm_map_entry_t entries[kMaxKernelDepth]; - vm_map_t map; - vm_map_entry_t entry; - vm_object_offset_t offset; - vm_page_t page; - int stackIdx, count; - -#if ! VM_TAG_ACTIVE_UPDATE - vm_page_iterate_objects(info, num_info, &vm_page_count_object); + iterate = !VM_TAG_ACTIVE_UPDATE; + if (iterate) { + enum { kMaxKernelDepth = 1 }; + vm_map_t maps[kMaxKernelDepth]; + vm_map_entry_t entries[kMaxKernelDepth]; + vm_map_t map; + vm_map_entry_t entry; + vm_object_offset_t offset; + vm_page_t page; + int stackIdx, count; + +#if !VM_TAG_ACTIVE_UPDATE + vm_page_iterate_objects(info, num_info, &vm_page_count_object); #endif /* ! VM_TAG_ACTIVE_UPDATE */ - map = kernel_map; - stackIdx = 0; - while (map) - { + map = kernel_map; + stackIdx = 0; + while (map) { vm_map_lock(map); - for (entry = map->hdr.links.next; map; entry = entry->links.next) - { - if (entry->is_sub_map) - { + for (entry = map->hdr.links.next; map; entry = entry->links.next) { + if (entry->is_sub_map) { assert(stackIdx < kMaxKernelDepth); maps[stackIdx] = map; entries[stackIdx] = entry; @@ -9045,43 +9311,41 @@ vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zone map = VME_SUBMAP(entry); entry = NULL; break; - } - if (VME_OBJECT(entry) == kernel_object) - { + } + if (VME_OBJECT(entry) == kernel_object) { count = 0; vm_object_lock(VME_OBJECT(entry)); - for (offset = entry->links.start; offset < entry->links.end; offset += page_size) - { + for (offset = entry->links.start; offset < entry->links.end; offset += page_size) { page = vm_page_lookup(VME_OBJECT(entry), offset); - if (page && VM_PAGE_WIRED(page)) count++; + if (page && VM_PAGE_WIRED(page)) { + count++; + } } vm_object_unlock(VME_OBJECT(entry)); - if (count) - { - assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE); - assert(VME_ALIAS(entry) < num_info); - info[VME_ALIAS(entry)].size += ptoa_64(count); + if (count) { + assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE); + assert(VME_ALIAS(entry) < num_info); + info[VME_ALIAS(entry)].size += ptoa_64(count); } - } - while (map && (entry == vm_map_last_entry(map))) - { + } + while (map && (entry == vm_map_last_entry(map))) { vm_map_unlock(map); - if (!stackIdx) map = NULL; - else - { - --stackIdx; - map = maps[stackIdx]; - entry = entries[stackIdx]; + if (!stackIdx) { + map = NULL; + } else { + --stackIdx; + map = maps[stackIdx]; + entry = entries[stackIdx]; } - } + } } - } - } + } + } + + process_account(info, num_info, zones_collectable_bytes, iterate); - process_account(info, num_info, zones_collectable_bytes, iterate); - - return (KERN_SUCCESS); + return KERN_SUCCESS; } #if DEBUG || DEVELOPMENT @@ -9089,40 +9353,45 @@ vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zone kern_return_t vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size) { - kern_return_t ret; - vm_size_t zsize; - vm_map_t map; - vm_map_entry_t entry; + kern_return_t ret; + vm_size_t zsize; + vm_map_t map; + vm_map_entry_t entry; - zsize = zone_element_info((void *) addr, tag); - if (zsize) - { + zsize = zone_element_info((void *) addr, tag); + if (zsize) { *zone_size = *size = zsize; - return (KERN_SUCCESS); - } + return KERN_SUCCESS; + } *zone_size = 0; - ret = KERN_INVALID_ADDRESS; - for (map = kernel_map; map; ) - { + ret = KERN_INVALID_ADDRESS; + for (map = kernel_map; map;) { vm_map_lock(map); - if (!vm_map_lookup_entry(map, addr, &entry)) break; - if (entry->is_sub_map) - { - if (map != kernel_map) break; + if (!vm_map_lookup_entry(map, addr, &entry)) { + break; + } + if (entry->is_sub_map) { + if (map != kernel_map) { + break; + } map = VME_SUBMAP(entry); continue; - } - if (entry->vme_start != addr) break; + } + if (entry->vme_start != addr) { + break; + } *tag = VME_ALIAS(entry); *size = (entry->vme_end - addr); ret = KERN_SUCCESS; break; } - if (map != kernel_map) vm_map_unlock(map); + if (map != kernel_map) { + vm_map_unlock(map); + } vm_map_unlock(kernel_map); - return (ret); + return ret; } #endif /* DEBUG || DEVELOPMENT */ @@ -9130,21 +9399,19 @@ vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_siz uint32_t vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen) { - vm_allocation_site_t * site; - uint32_t kmodId; + vm_allocation_site_t * site; + uint32_t kmodId; - kmodId = 0; - lck_spin_lock(&vm_allocation_sites_lock); - if ((site = vm_allocation_sites[tag])) - { - if (VM_TAG_KMOD & site->flags) - { - kmodId = OSKextGetKmodIDForSite(site, name, namelen); - } - } - lck_spin_unlock(&vm_allocation_sites_lock); + kmodId = 0; + lck_spin_lock(&vm_allocation_sites_lock); + if ((site = vm_allocation_sites[tag])) { + if (VM_TAG_KMOD & site->flags) { + kmodId = OSKextGetKmodIDForSite(site, name, namelen); + } + } + lck_spin_unlock(&vm_allocation_sites_lock); - return (kmodId); + return kmodId; } @@ -9161,17 +9428,17 @@ unsigned int vm_page_secluded_suppress_cnt = 0; unsigned int vm_page_secluded_save_target; -lck_grp_attr_t secluded_suppress_slock_grp_attr; -lck_grp_t secluded_suppress_slock_grp; -lck_attr_t secluded_suppress_slock_attr; -lck_spin_t secluded_suppress_slock; +lck_grp_attr_t secluded_suppress_slock_grp_attr; +lck_grp_t secluded_suppress_slock_grp; +lck_attr_t secluded_suppress_slock_attr; +lck_spin_t secluded_suppress_slock; void secluded_suppression_init(void) { lck_grp_attr_setdefault(&secluded_suppress_slock_grp_attr); lck_grp_init(&secluded_suppress_slock_grp, - "secluded_suppress_slock", &secluded_suppress_slock_grp_attr); + "secluded_suppress_slock", &secluded_suppress_slock_grp_attr); lck_attr_setdefault(&secluded_suppress_slock_attr); lck_spin_init(&secluded_suppress_slock, &secluded_suppress_slock_grp, &secluded_suppress_slock_attr); @@ -9180,8 +9447,9 @@ secluded_suppression_init(void) void start_secluded_suppression(task_t task) { - if (task->task_suppressed_secluded) + if (task->task_suppressed_secluded) { return; + } lck_spin_lock(&secluded_suppress_slock); if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) { task->task_suppressed_secluded = TRUE; diff --git a/osfmk/vm/vm_shared_region.c b/osfmk/vm/vm_shared_region.c index e94960ddc..03624ce07 100644 --- a/osfmk/vm/vm_shared_region.c +++ b/osfmk/vm/vm_shared_region.c @@ -2,14 +2,14 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,7 +17,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ @@ -44,7 +44,7 @@ * the same mappings in their VM map. All they need is contained in the shared * region. * It can also shared a pmap (mostly for read-only parts but also for the - * initial version of some writable parts), which gets "nested" into the + * initial version of some writable parts), which gets "nested" into the * process's pmap. This reduces the number of soft faults: once one process * brings in a page in the shared region, all the other processes can access * it without having to enter it in their own pmap. @@ -120,7 +120,7 @@ * the following codes are used in the subclass * of the DBG_MACH_SHAREDREGION class */ -#define PROCESS_SHARED_CACHE_LAYOUT 0x00 +#define PROCESS_SHARED_CACHE_LAYOUT 0x00 /* "dyld" uses this to figure out what the kernel supports */ @@ -130,7 +130,7 @@ int shared_region_version = 3; int shared_region_trace_level = SHARED_REGION_TRACE_ERROR_LVL; /* should local (non-chroot) shared regions persist when no task uses them ? */ -int shared_region_persistence = 0; /* no by default */ +int shared_region_persistence = 0; /* no by default */ /* delay before reclaiming an unused shared region */ int shared_region_destroy_delay = 120; /* in seconds */ @@ -138,9 +138,9 @@ int shared_region_destroy_delay = 120; /* in seconds */ struct vm_shared_region *init_task_shared_region = NULL; #ifndef CONFIG_EMBEDDED -/* +/* * Only one cache gets to slide on Desktop, since we can't - * tear down slide info properly today and the desktop actually + * tear down slide info properly today and the desktop actually * produces lots of shared caches. */ boolean_t shared_region_completed_slide = FALSE; @@ -152,25 +152,25 @@ lck_mtx_t vm_shared_region_lock; #define vm_shared_region_lock() lck_mtx_lock(&vm_shared_region_lock) #define vm_shared_region_unlock() lck_mtx_unlock(&vm_shared_region_lock) -#define vm_shared_region_sleep(event, interruptible) \ - lck_mtx_sleep(&vm_shared_region_lock, \ - LCK_SLEEP_DEFAULT, \ - (event_t) (event), \ - (interruptible)) +#define vm_shared_region_sleep(event, interruptible) \ + lck_mtx_sleep(&vm_shared_region_lock, \ + LCK_SLEEP_DEFAULT, \ + (event_t) (event), \ + (interruptible)) /* the list of currently available shared regions (one per environment) */ -queue_head_t vm_shared_region_queue; +queue_head_t vm_shared_region_queue; static void vm_shared_region_reference_locked(vm_shared_region_t shared_region); static vm_shared_region_t vm_shared_region_create( - void *root_dir, - cpu_type_t cputype, - cpu_subtype_t cpu_subtype, - boolean_t is_64bit); + void *root_dir, + cpu_type_t cputype, + cpu_subtype_t cpu_subtype, + boolean_t is_64bit); static void vm_shared_region_destroy(vm_shared_region_t shared_region); static void vm_shared_region_timeout(thread_call_param_t param0, - thread_call_param_t param1); + thread_call_param_t param1); kern_return_t vm_shared_region_slide_mapping( vm_shared_region_t sr, mach_vm_size_t slide_info_size, @@ -182,7 +182,7 @@ kern_return_t vm_shared_region_slide_mapping( static int __commpage_setup = 0; #if defined(__i386__) || defined(__x86_64__) -static int __system_power_source = 1; /* init to extrnal power source */ +static int __system_power_source = 1; /* init to extrnal power source */ static void post_sys_powersource_internal(int i, int internal); #endif /* __i386__ || __x86_64__ */ @@ -197,10 +197,10 @@ vm_shared_region_init(void) ("shared_region: -> init\n")); vm_shared_region_lck_grp = lck_grp_alloc_init("vm shared region", - LCK_GRP_ATTR_NULL); + LCK_GRP_ATTR_NULL); lck_mtx_init(&vm_shared_region_lock, - vm_shared_region_lck_grp, - LCK_ATTR_NULL); + vm_shared_region_lck_grp, + LCK_ATTR_NULL); queue_init(&vm_shared_region_queue); @@ -209,20 +209,20 @@ vm_shared_region_init(void) } /* - * Retrieve a task's shared region and grab an extra reference to - * make sure it doesn't disappear while the caller is using it. + * Retrieve a task's shared region and grab an extra reference to + * make sure it doesn't disappear while the caller is using it. * The caller is responsible for consuming that extra reference if * necessary. */ vm_shared_region_t vm_shared_region_get( - task_t task) + task_t task) { - vm_shared_region_t shared_region; + vm_shared_region_t shared_region; SHARED_REGION_TRACE_DEBUG( ("shared_region: -> get(%p)\n", - (void *)VM_KERNEL_ADDRPERM(task))); + (void *)VM_KERNEL_ADDRPERM(task))); task_lock(task); vm_shared_region_lock(); @@ -236,8 +236,8 @@ vm_shared_region_get( SHARED_REGION_TRACE_DEBUG( ("shared_region: get(%p) <- %p\n", - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(shared_region))); return shared_region; } @@ -252,16 +252,16 @@ vm_shared_region_get( */ mach_vm_offset_t vm_shared_region_base_address( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { SHARED_REGION_TRACE_DEBUG( ("shared_region: -> base_address(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); SHARED_REGION_TRACE_DEBUG( ("shared_region: base_address(%p) <- 0x%llx\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (long long)shared_region->sr_base_address)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (long long)shared_region->sr_base_address)); return shared_region->sr_base_address; } @@ -275,16 +275,16 @@ vm_shared_region_base_address( */ mach_vm_size_t vm_shared_region_size( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { SHARED_REGION_TRACE_DEBUG( ("shared_region: -> size(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); SHARED_REGION_TRACE_DEBUG( ("shared_region: size(%p) <- 0x%llx\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (long long)shared_region->sr_size)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (long long)shared_region->sr_size)); return shared_region->sr_size; } @@ -298,30 +298,30 @@ vm_shared_region_size( */ ipc_port_t vm_shared_region_mem_entry( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { SHARED_REGION_TRACE_DEBUG( ("shared_region: -> mem_entry(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); SHARED_REGION_TRACE_DEBUG( ("shared_region: mem_entry(%p) <- %p\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (void *)VM_KERNEL_ADDRPERM(shared_region->sr_mem_entry))); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (void *)VM_KERNEL_ADDRPERM(shared_region->sr_mem_entry))); return shared_region->sr_mem_entry; } vm_map_t vm_shared_region_vm_map( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { - ipc_port_t sr_handle; - vm_named_entry_t sr_mem_entry; - vm_map_t sr_map; + ipc_port_t sr_handle; + vm_named_entry_t sr_mem_entry; + vm_map_t sr_map; SHARED_REGION_TRACE_DEBUG( ("shared_region: -> vm_map(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); sr_handle = shared_region->sr_mem_entry; @@ -331,42 +331,42 @@ vm_shared_region_vm_map( SHARED_REGION_TRACE_DEBUG( ("shared_region: vm_map(%p) <- %p\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (void *)VM_KERNEL_ADDRPERM(sr_map))); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (void *)VM_KERNEL_ADDRPERM(sr_map))); return sr_map; } uint32_t vm_shared_region_get_slide( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { SHARED_REGION_TRACE_DEBUG( ("shared_region: -> vm_shared_region_get_slide(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); SHARED_REGION_TRACE_DEBUG( ("shared_region: vm_shared_region_get_slide(%p) <- %u\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - shared_region->sr_slide_info.slide)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + shared_region->sr_slide_info.slide)); /* 0 if we haven't slid */ - assert(shared_region->sr_slide_info.slide_object != NULL || - shared_region->sr_slide_info.slide == 0); + assert(shared_region->sr_slide_info.slide_object != NULL || + shared_region->sr_slide_info.slide == 0); - return shared_region->sr_slide_info.slide; + return shared_region->sr_slide_info.slide; } vm_shared_region_slide_info_t vm_shared_region_get_slide_info( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { SHARED_REGION_TRACE_DEBUG( ("shared_region: -> vm_shared_region_get_slide_info(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); SHARED_REGION_TRACE_DEBUG( ("shared_region: vm_shared_region_get_slide_info(%p) <- %p\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (void *)VM_KERNEL_ADDRPERM(&shared_region->sr_slide_info))); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (void *)VM_KERNEL_ADDRPERM(&shared_region->sr_slide_info))); return &shared_region->sr_slide_info; } @@ -379,15 +379,15 @@ vm_shared_region_get_slide_info( */ void vm_shared_region_set( - task_t task, - vm_shared_region_t new_shared_region) + task_t task, + vm_shared_region_t new_shared_region) { - vm_shared_region_t old_shared_region; + vm_shared_region_t old_shared_region; SHARED_REGION_TRACE_DEBUG( ("shared_region: -> set(%p, %p)\n", - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(new_shared_region))); + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(new_shared_region))); task_lock(task); vm_shared_region_lock(); @@ -409,9 +409,9 @@ vm_shared_region_set( SHARED_REGION_TRACE_DEBUG( ("shared_region: set(%p) <- old=%p new=%p\n", - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(old_shared_region), - (void *)VM_KERNEL_ADDRPERM(new_shared_region))); + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(old_shared_region), + (void *)VM_KERNEL_ADDRPERM(new_shared_region))); } /* @@ -423,19 +423,19 @@ vm_shared_region_set( */ vm_shared_region_t vm_shared_region_lookup( - void *root_dir, - cpu_type_t cputype, - cpu_subtype_t cpu_subtype, - boolean_t is_64bit) + void *root_dir, + cpu_type_t cputype, + cpu_subtype_t cpu_subtype, + boolean_t is_64bit) { - vm_shared_region_t shared_region; - vm_shared_region_t new_shared_region; + vm_shared_region_t shared_region; + vm_shared_region_t new_shared_region; SHARED_REGION_TRACE_DEBUG( ("shared_region: -> lookup(root=%p,cpu=<%d,%d>,64bit=%d)\n", - (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit)); + (void *)VM_KERNEL_ADDRPERM(root_dir), + cputype, cpu_subtype, is_64bit)); shared_region = NULL; new_shared_region = NULL; @@ -443,9 +443,9 @@ vm_shared_region_lookup( vm_shared_region_lock(); for (;;) { queue_iterate(&vm_shared_region_queue, - shared_region, - vm_shared_region_t, - sr_q) { + shared_region, + vm_shared_region_t, + sr_q) { assert(shared_region->sr_ref_count > 0); if (shared_region->sr_cpu_type == cputype && shared_region->sr_cpu_subtype == cpu_subtype && @@ -460,9 +460,9 @@ vm_shared_region_lookup( /* no match: create a new one */ vm_shared_region_unlock(); new_shared_region = vm_shared_region_create(root_dir, - cputype, - cpu_subtype, - is_64bit); + cputype, + cpu_subtype, + is_64bit); /* do the lookup again, in case we lost a race */ vm_shared_region_lock(); continue; @@ -471,9 +471,9 @@ vm_shared_region_lookup( shared_region = new_shared_region; new_shared_region = NULL; queue_enter(&vm_shared_region_queue, - shared_region, - vm_shared_region_t, - sr_q); + shared_region, + vm_shared_region_t, + sr_q); break; } @@ -493,9 +493,9 @@ done: SHARED_REGION_TRACE_DEBUG( ("shared_region: lookup(root=%p,cpu=<%d,%d>,64bit=%d) <- %p\n", - (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit, - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(root_dir), + cputype, cpu_subtype, is_64bit, + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 0); return shared_region; @@ -507,13 +507,13 @@ done: */ static void vm_shared_region_reference_locked( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { LCK_MTX_ASSERT(&vm_shared_region_lock, LCK_MTX_ASSERT_OWNED); SHARED_REGION_TRACE_DEBUG( ("shared_region: -> reference_locked(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 0); shared_region->sr_ref_count++; @@ -534,8 +534,8 @@ vm_shared_region_reference_locked( SHARED_REGION_TRACE_DEBUG( ("shared_region: reference_locked(%p) <- %d\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - shared_region->sr_ref_count)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + shared_region->sr_ref_count)); } /* @@ -544,14 +544,14 @@ vm_shared_region_reference_locked( */ void vm_shared_region_deallocate( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { SHARED_REGION_TRACE_DEBUG( ("shared_region: -> deallocate(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); vm_shared_region_lock(); - + assert(shared_region->sr_ref_count > 0); if (shared_region->sr_root_dir == NULL) { @@ -570,7 +570,7 @@ vm_shared_region_deallocate( shared_region->sr_ref_count++; shared_region->sr_persists = TRUE; } else if (!shared_region_persistence && - shared_region->sr_persists) { + shared_region->sr_persists) { /* make this one no longer persistent */ assert(shared_region->sr_ref_count > 1); shared_region->sr_ref_count--; @@ -582,8 +582,8 @@ vm_shared_region_deallocate( shared_region->sr_ref_count--; SHARED_REGION_TRACE_DEBUG( ("shared_region: deallocate(%p): ref now %d\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - shared_region->sr_ref_count)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + shared_region->sr_ref_count)); if (shared_region->sr_ref_count == 0) { uint64_t deadline; @@ -592,7 +592,7 @@ vm_shared_region_deallocate( if (shared_region->sr_timer_call == NULL) { /* hold one reference for the timer */ - assert(! shared_region->sr_mapping_in_progress); + assert(!shared_region->sr_mapping_in_progress); shared_region->sr_ref_count++; /* set up the timer */ @@ -602,20 +602,20 @@ vm_shared_region_deallocate( /* schedule the timer */ clock_interval_to_deadline(shared_region_destroy_delay, - 1000 * 1000 * 1000, - &deadline); + 1000 * 1000 * 1000, + &deadline); thread_call_enter_delayed(shared_region->sr_timer_call, - deadline); + deadline); SHARED_REGION_TRACE_DEBUG( ("shared_region: deallocate(%p): armed timer\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); vm_shared_region_unlock(); } else { /* timer expired: let go of this shared region */ - /* + /* * We can't properly handle teardown of a slid object today. */ assert(!shared_region->sr_slid); @@ -625,9 +625,9 @@ vm_shared_region_deallocate( * it... */ queue_remove(&vm_shared_region_queue, - shared_region, - vm_shared_region_t, - sr_q); + shared_region, + vm_shared_region_t, + sr_q); vm_shared_region_unlock(); /* ... and destroy it */ @@ -640,15 +640,15 @@ vm_shared_region_deallocate( SHARED_REGION_TRACE_DEBUG( ("shared_region: deallocate(%p) <-\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); } void vm_shared_region_timeout( - thread_call_param_t param0, - __unused thread_call_param_t param1) + thread_call_param_t param0, + __unused thread_call_param_t param1) { - vm_shared_region_t shared_region; + vm_shared_region_t shared_region; shared_region = (vm_shared_region_t) param0; @@ -660,24 +660,24 @@ vm_shared_region_timeout( */ static vm_shared_region_t vm_shared_region_create( - void *root_dir, - cpu_type_t cputype, - cpu_subtype_t cpu_subtype, - boolean_t is_64bit) + void *root_dir, + cpu_type_t cputype, + cpu_subtype_t cpu_subtype, + boolean_t is_64bit) { - kern_return_t kr; - vm_named_entry_t mem_entry; - ipc_port_t mem_entry_port; - vm_shared_region_t shared_region; + kern_return_t kr; + vm_named_entry_t mem_entry; + ipc_port_t mem_entry_port; + vm_shared_region_t shared_region; vm_shared_region_slide_info_t si; - vm_map_t sub_map; - mach_vm_offset_t base_address, pmap_nesting_start; - mach_vm_size_t size, pmap_nesting_size; + vm_map_t sub_map; + mach_vm_offset_t base_address, pmap_nesting_start; + mach_vm_size_t size, pmap_nesting_size; SHARED_REGION_TRACE_INFO( ("shared_region: -> create(root=%p,cpu=<%d,%d>,64bit=%d)\n", - (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit)); + (void *)VM_KERNEL_ADDRPERM(root_dir), + cputype, cpu_subtype, is_64bit)); base_address = 0; size = 0; @@ -686,7 +686,7 @@ vm_shared_region_create( sub_map = VM_MAP_NULL; /* create a new shared region structure... */ - shared_region = kalloc(sizeof (*shared_region)); + shared_region = kalloc(sizeof(*shared_region)); if (shared_region == NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: create: couldn't allocate\n")); @@ -720,8 +720,8 @@ vm_shared_region_create( default: SHARED_REGION_TRACE_ERROR( ("shared_region: create: unknown cpu type %d\n", - cputype)); - kfree(shared_region, sizeof (*shared_region)); + cputype)); + kfree(shared_region, sizeof(*shared_region)); shared_region = NULL; goto done; } @@ -752,8 +752,8 @@ vm_shared_region_create( default: SHARED_REGION_TRACE_ERROR( ("shared_region: create: unknown cpu type %d\n", - cputype)); - kfree(shared_region, sizeof (*shared_region)); + cputype)); + kfree(shared_region, sizeof(*shared_region)); shared_region = NULL; goto done; } @@ -761,17 +761,17 @@ vm_shared_region_create( /* create a memory entry structure and a Mach port handle */ kr = mach_memory_entry_allocate(&mem_entry, - &mem_entry_port); + &mem_entry_port); if (kr != KERN_SUCCESS) { - kfree(shared_region, sizeof (*shared_region)); + kfree(shared_region, sizeof(*shared_region)); shared_region = NULL; SHARED_REGION_TRACE_ERROR( ("shared_region: create: " - "couldn't allocate mem_entry\n")); + "couldn't allocate mem_entry\n")); goto done; } -#if defined(__arm__) || defined(__arm64__) +#if defined(__arm__) || defined(__arm64__) { struct pmap *pmap_nested; @@ -784,7 +784,7 @@ vm_shared_region_create( page_shift_user32 == SIXTEENK_PAGE_SHIFT) { /* enforce 16KB alignment of VM map entries */ vm_map_set_page_shift(sub_map, - SIXTEENK_PAGE_SHIFT); + SIXTEENK_PAGE_SHIFT); } #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS) /* enforce 16KB alignment for watch targets with new ABI */ @@ -797,16 +797,16 @@ vm_shared_region_create( #else /* create a VM sub map and its pmap */ sub_map = vm_map_create(pmap_create(NULL, 0, is_64bit), - 0, size, - TRUE); + 0, size, + TRUE); #endif if (sub_map == VM_MAP_NULL) { ipc_port_release_send(mem_entry_port); - kfree(shared_region, sizeof (*shared_region)); + kfree(shared_region, sizeof(*shared_region)); shared_region = NULL; SHARED_REGION_TRACE_ERROR( ("shared_region: create: " - "couldn't allocate map\n")); + "couldn't allocate map\n")); goto done; } @@ -861,25 +861,25 @@ done: if (shared_region) { SHARED_REGION_TRACE_INFO( ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d," - "base=0x%llx,size=0x%llx) <- " - "%p mem=(%p,%p) map=%p pmap=%p\n", - (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit, - (long long)base_address, - (long long)size, - (void *)VM_KERNEL_ADDRPERM(shared_region), - (void *)VM_KERNEL_ADDRPERM(mem_entry_port), - (void *)VM_KERNEL_ADDRPERM(mem_entry), - (void *)VM_KERNEL_ADDRPERM(sub_map), - (void *)VM_KERNEL_ADDRPERM(sub_map->pmap))); + "base=0x%llx,size=0x%llx) <- " + "%p mem=(%p,%p) map=%p pmap=%p\n", + (void *)VM_KERNEL_ADDRPERM(root_dir), + cputype, cpu_subtype, is_64bit, + (long long)base_address, + (long long)size, + (void *)VM_KERNEL_ADDRPERM(shared_region), + (void *)VM_KERNEL_ADDRPERM(mem_entry_port), + (void *)VM_KERNEL_ADDRPERM(mem_entry), + (void *)VM_KERNEL_ADDRPERM(sub_map), + (void *)VM_KERNEL_ADDRPERM(sub_map->pmap))); } else { SHARED_REGION_TRACE_INFO( ("shared_region: create(root=%p,cpu=<%d,%d>,64bit=%d," - "base=0x%llx,size=0x%llx) <- NULL", - (void *)VM_KERNEL_ADDRPERM(root_dir), - cputype, cpu_subtype, is_64bit, - (long long)base_address, - (long long)size)); + "base=0x%llx,size=0x%llx) <- NULL", + (void *)VM_KERNEL_ADDRPERM(root_dir), + cputype, cpu_subtype, is_64bit, + (long long)base_address, + (long long)size)); } return shared_region; } @@ -890,18 +890,18 @@ done: */ static void vm_shared_region_destroy( - vm_shared_region_t shared_region) + vm_shared_region_t shared_region) { - vm_named_entry_t mem_entry; - vm_map_t map; + vm_named_entry_t mem_entry; + vm_map_t map; SHARED_REGION_TRACE_INFO( ("shared_region: -> destroy(%p) (root=%p,cpu=<%d,%d>,64bit=%d)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (void *)VM_KERNEL_ADDRPERM(shared_region->sr_root_dir), - shared_region->sr_cpu_type, - shared_region->sr_cpu_subtype, - shared_region->sr_64bit)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (void *)VM_KERNEL_ADDRPERM(shared_region->sr_root_dir), + shared_region->sr_cpu_type, + shared_region->sr_cpu_subtype, + shared_region->sr_64bit)); assert(shared_region->sr_ref_count == 0); assert(!shared_region->sr_persists); @@ -923,9 +923,9 @@ vm_shared_region_destroy( */ if (map->pmap) { pmap_remove(map->pmap, - shared_region->sr_base_address, - (shared_region->sr_base_address + - shared_region->sr_size)); + shared_region->sr_base_address, + (shared_region->sr_base_address + + shared_region->sr_size)); } /* @@ -944,27 +944,26 @@ vm_shared_region_destroy( } #if 0 - /* + /* * If slid, free those resources. We'll want this eventually, * but can't handle it properly today. */ si = &shared_region->sr_slide_info; if (si->slide_info_entry) { kmem_free(kernel_map, - (vm_offset_t) si->slide_info_entry, - (vm_size_t) si->slide_info_size); + (vm_offset_t) si->slide_info_entry, + (vm_size_t) si->slide_info_size); vm_object_deallocate(si->slide_object); } -#endif +#endif /* release the shared region structure... */ - kfree(shared_region, sizeof (*shared_region)); + kfree(shared_region, sizeof(*shared_region)); SHARED_REGION_TRACE_DEBUG( ("shared_region: destroy(%p) <-\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); shared_region = NULL; - } /* @@ -972,16 +971,16 @@ vm_shared_region_destroy( */ kern_return_t vm_shared_region_start_address( - vm_shared_region_t shared_region, - mach_vm_offset_t *start_address) + vm_shared_region_t shared_region, + mach_vm_offset_t *start_address) { - kern_return_t kr; - mach_vm_offset_t sr_base_address; - mach_vm_offset_t sr_first_mapping; + kern_return_t kr; + mach_vm_offset_t sr_base_address; + mach_vm_offset_t sr_first_mapping; SHARED_REGION_TRACE_DEBUG( ("shared_region: -> start_address(%p)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region))); + (void *)VM_KERNEL_ADDRPERM(shared_region))); assert(shared_region->sr_ref_count > 1); vm_shared_region_lock(); @@ -995,11 +994,11 @@ vm_shared_region_start_address( /* wait for our turn... */ assert(shared_region->sr_ref_count > 1); vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, - THREAD_UNINT); + THREAD_UNINT); } - assert(! shared_region->sr_mapping_in_progress); + assert(!shared_region->sr_mapping_in_progress); assert(shared_region->sr_ref_count > 1); - + sr_base_address = shared_region->sr_base_address; sr_first_mapping = shared_region->sr_first_mapping; @@ -1012,11 +1011,11 @@ vm_shared_region_start_address( } vm_shared_region_unlock(); - + SHARED_REGION_TRACE_DEBUG( ("shared_region: start_address(%p) <- 0x%llx\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), - (long long)shared_region->sr_base_address)); + (void *)VM_KERNEL_ADDRPERM(shared_region), + (long long)shared_region->sr_base_address)); return kr; } @@ -1028,20 +1027,20 @@ vm_shared_region_undo_mappings( struct shared_file_mapping_np *mappings, unsigned int mappings_count) { - unsigned int j = 0; - vm_shared_region_t shared_region = NULL; - boolean_t reset_shared_region_state = FALSE; + unsigned int j = 0; + vm_shared_region_t shared_region = NULL; + boolean_t reset_shared_region_state = FALSE; shared_region = vm_shared_region_get(current_task()); if (shared_region == NULL) { printf("Failed to undo mappings because of NULL shared region.\n"); return; } - + if (sr_map == NULL) { - ipc_port_t sr_handle; - vm_named_entry_t sr_mem_entry; + ipc_port_t sr_handle; + vm_named_entry_t sr_mem_entry; vm_shared_region_lock(); assert(shared_region->sr_ref_count > 1); @@ -1049,9 +1048,9 @@ vm_shared_region_undo_mappings( while (shared_region->sr_mapping_in_progress) { /* wait for our turn... */ vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, - THREAD_UNINT); + THREAD_UNINT); } - assert(! shared_region->sr_mapping_in_progress); + assert(!shared_region->sr_mapping_in_progress); assert(shared_region->sr_ref_count > 1); /* let others know we're working in this shared region */ shared_region->sr_mapping_in_progress = TRUE; @@ -1081,21 +1080,21 @@ vm_shared_region_undo_mappings( } SHARED_REGION_TRACE_INFO( ("shared_region: mapping[%d]: " - "address:0x%016llx " - "size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x: " - "undoing...\n", - j, - (long long)mappings[j].sfm_address, - (long long)mappings[j].sfm_size, - (long long)mappings[j].sfm_file_offset, - mappings[j].sfm_max_prot, - mappings[j].sfm_init_prot)); + "address:0x%016llx " + "size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x: " + "undoing...\n", + j, + (long long)mappings[j].sfm_address, + (long long)mappings[j].sfm_size, + (long long)mappings[j].sfm_file_offset, + mappings[j].sfm_max_prot, + mappings[j].sfm_init_prot)); kr2 = mach_vm_deallocate( sr_map, (mappings[j].sfm_address - - sr_base_address), + sr_base_address), mappings[j].sfm_size); assert(kr2 == KERN_SUCCESS); } @@ -1120,48 +1119,48 @@ vm_shared_region_undo_mappings( * to populate the shared region with the appropriate shared cache. * * One could also call it several times to incrementally load several - * libraries, as long as they do not overlap. + * libraries, as long as they do not overlap. * It will return KERN_SUCCESS if the mappings were successfully established * or if they were already established identically by another process. */ kern_return_t vm_shared_region_map_file( - vm_shared_region_t shared_region, - unsigned int mappings_count, - struct shared_file_mapping_np *mappings, - memory_object_control_t file_control, - memory_object_size_t file_size, - void *root_dir, - uint32_t slide, - user_addr_t slide_start, - user_addr_t slide_size) + vm_shared_region_t shared_region, + unsigned int mappings_count, + struct shared_file_mapping_np *mappings, + memory_object_control_t file_control, + memory_object_size_t file_size, + void *root_dir, + uint32_t slide, + user_addr_t slide_start, + user_addr_t slide_size) { - kern_return_t kr; - vm_object_t file_object; - ipc_port_t sr_handle; - vm_named_entry_t sr_mem_entry; - vm_map_t sr_map; - mach_vm_offset_t sr_base_address; - unsigned int i; - mach_port_t map_port; - vm_map_offset_t target_address; - vm_object_t object; - vm_object_size_t obj_size; - struct shared_file_mapping_np *mapping_to_slide = NULL; - mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1; - mach_vm_offset_t slid_mapping = (mach_vm_offset_t) -1; - vm_map_offset_t lowest_unnestable_addr = 0; - vm_map_kernel_flags_t vmk_flags; - mach_vm_offset_t sfm_min_address = ~0; - mach_vm_offset_t sfm_max_address = 0; + kern_return_t kr; + vm_object_t file_object; + ipc_port_t sr_handle; + vm_named_entry_t sr_mem_entry; + vm_map_t sr_map; + mach_vm_offset_t sr_base_address; + unsigned int i; + mach_port_t map_port; + vm_map_offset_t target_address; + vm_object_t object; + vm_object_size_t obj_size; + struct shared_file_mapping_np *mapping_to_slide = NULL; + mach_vm_offset_t first_mapping = (mach_vm_offset_t) -1; + mach_vm_offset_t slid_mapping = (mach_vm_offset_t) -1; + vm_map_offset_t lowest_unnestable_addr = 0; + vm_map_kernel_flags_t vmk_flags; + mach_vm_offset_t sfm_min_address = ~0; + mach_vm_offset_t sfm_max_address = 0; struct _dyld_cache_header sr_cache_header; #if __arm64__ if ((shared_region->sr_64bit || - page_shift_user32 == SIXTEENK_PAGE_SHIFT) && + page_shift_user32 == SIXTEENK_PAGE_SHIFT) && ((slide & SIXTEENK_PAGE_MASK) != 0)) { printf("FOURK_COMPAT: %s: rejecting mis-aligned slide 0x%x\n", - __FUNCTION__, slide); + __FUNCTION__, slide); kr = KERN_INVALID_ARGUMENT; goto done; } @@ -1176,7 +1175,7 @@ vm_shared_region_map_file( /* * This shared region doesn't match the current root * directory of this process. Deny the mapping to - * avoid tainting the shared region with something that + * avoid tainting the shared region with something that * doesn't quite belong into it. */ vm_shared_region_unlock(); @@ -1192,9 +1191,9 @@ vm_shared_region_map_file( while (shared_region->sr_mapping_in_progress) { /* wait for our turn... */ vm_shared_region_sleep(&shared_region->sr_mapping_in_progress, - THREAD_UNINT); + THREAD_UNINT); } - assert(! shared_region->sr_mapping_in_progress); + assert(!shared_region->sr_mapping_in_progress); assert(shared_region->sr_ref_count > 1); /* let others know we're working in this shared region */ shared_region->sr_mapping_in_progress = TRUE; @@ -1209,9 +1208,9 @@ vm_shared_region_map_file( SHARED_REGION_TRACE_DEBUG( ("shared_region: -> map(%p,%d,%p,%p,0x%llx)\n", - (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, - (void *)VM_KERNEL_ADDRPERM(mappings), - (void *)VM_KERNEL_ADDRPERM(file_control), file_size)); + (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, + (void *)VM_KERNEL_ADDRPERM(mappings), + (void *)VM_KERNEL_ADDRPERM(file_control), file_size)); /* get the VM object associated with the file to be mapped */ file_object = memory_object_control_to_vm_object(file_control); @@ -1222,14 +1221,14 @@ vm_shared_region_map_file( for (i = 0; i < mappings_count; i++) { SHARED_REGION_TRACE_INFO( ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx offset:0x%016llx " - "maxprot:0x%x prot:0x%x\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); + "address:0x%016llx size:0x%016llx offset:0x%016llx " + "maxprot:0x%x prot:0x%x\n", + i, + (long long)mappings[i].sfm_address, + (long long)mappings[i].sfm_size, + (long long)mappings[i].sfm_file_offset, + mappings[i].sfm_max_prot, + mappings[i].sfm_init_prot)); if (mappings[i].sfm_address < sfm_min_address) { sfm_min_address = mappings[i].sfm_address; @@ -1246,7 +1245,7 @@ vm_shared_region_map_file( /* file-backed memory */ __IGNORE_WCASTALIGN(map_port = (ipc_port_t) file_object->pager); } - + if (mappings[i].sfm_init_prot & VM_PROT_SLIDE) { /* * This is the mapping that needs to be slid. @@ -1254,16 +1253,16 @@ vm_shared_region_map_file( if (mapping_to_slide != NULL) { SHARED_REGION_TRACE_INFO( ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x " - "will not be slid as only one such mapping is allowed...\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); + "address:0x%016llx size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x " + "will not be slid as only one such mapping is allowed...\n", + i, + (long long)mappings[i].sfm_address, + (long long)mappings[i].sfm_size, + (long long)mappings[i].sfm_file_offset, + mappings[i].sfm_max_prot, + mappings[i].sfm_init_prot)); } else { mapping_to_slide = &mappings[i]; } @@ -1271,7 +1270,7 @@ vm_shared_region_map_file( /* mapping's address is relative to the shared region base */ target_address = - mappings[i].sfm_address - sr_base_address; + mappings[i].sfm_address - sr_base_address; vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; vmk_flags.vmkf_already = TRUE; @@ -1285,7 +1284,7 @@ vm_shared_region_map_file( * can be mapped "copy-on-write". */ obj_size = vm_map_round_page(mappings[i].sfm_size, - VM_MAP_PAGE_MASK(sr_map)); + VM_MAP_PAGE_MASK(sr_map)); object = vm_object_allocate(obj_size); if (object == VM_OBJECT_NULL) { kr = KERN_RESOURCE_SHORTAGE; @@ -1294,7 +1293,7 @@ vm_shared_region_map_file( sr_map, &target_address, vm_map_round_page(mappings[i].sfm_size, - VM_MAP_PAGE_MASK(sr_map)), + VM_MAP_PAGE_MASK(sr_map)), 0, VM_FLAGS_FIXED, vmk_flags, @@ -1312,7 +1311,7 @@ vm_shared_region_map_file( sr_map, &target_address, vm_map_round_page(mappings[i].sfm_size, - VM_MAP_PAGE_MASK(sr_map)), + VM_MAP_PAGE_MASK(sr_map)), 0, VM_FLAGS_FIXED, vmk_flags, @@ -1323,7 +1322,6 @@ vm_shared_region_map_file( mappings[i].sfm_init_prot & VM_PROT_ALL, mappings[i].sfm_max_prot & VM_PROT_ALL, VM_INHERIT_DEFAULT); - } if (kr == KERN_SUCCESS) { @@ -1338,7 +1336,7 @@ vm_shared_region_map_file( } if ((slid_mapping == (mach_vm_offset_t) -1) && - (mapping_to_slide == &mappings[i])) { + (mapping_to_slide == &mappings[i])) { slid_mapping = target_address; } @@ -1350,7 +1348,7 @@ vm_shared_region_map_file( if ((mappings[i].sfm_init_prot & VM_PROT_WRITE) && sr_map->is_nested_map && (lowest_unnestable_addr == 0 || - (target_address < lowest_unnestable_addr))) { + (target_address < lowest_unnestable_addr))) { lowest_unnestable_addr = target_address; } } else { @@ -1369,16 +1367,16 @@ vm_shared_region_map_file( */ SHARED_REGION_TRACE_INFO( ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x " - "already mapped...\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot)); + "address:0x%016llx size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x " + "already mapped...\n", + i, + (long long)mappings[i].sfm_address, + (long long)mappings[i].sfm_size, + (long long)mappings[i].sfm_file_offset, + mappings[i].sfm_max_prot, + mappings[i].sfm_init_prot)); /* * We didn't establish this mapping ourselves; * let's reset its size, so that we do not @@ -1390,59 +1388,57 @@ vm_shared_region_map_file( /* this mapping failed ! */ SHARED_REGION_TRACE_ERROR( ("shared_region: mapping[%d]: " - "address:0x%016llx size:0x%016llx " - "offset:0x%016llx " - "maxprot:0x%x prot:0x%x failed 0x%x\n", - i, - (long long)mappings[i].sfm_address, - (long long)mappings[i].sfm_size, - (long long)mappings[i].sfm_file_offset, - mappings[i].sfm_max_prot, - mappings[i].sfm_init_prot, - kr)); + "address:0x%016llx size:0x%016llx " + "offset:0x%016llx " + "maxprot:0x%x prot:0x%x failed 0x%x\n", + i, + (long long)mappings[i].sfm_address, + (long long)mappings[i].sfm_size, + (long long)mappings[i].sfm_file_offset, + mappings[i].sfm_max_prot, + mappings[i].sfm_init_prot, + kr)); vm_shared_region_undo_mappings(sr_map, sr_base_address, mappings, i); break; } - } - } if (kr == KERN_SUCCESS && slide_size != 0 && mapping_to_slide != NULL) { - kr = vm_shared_region_slide(slide, - mapping_to_slide->sfm_file_offset, - mapping_to_slide->sfm_size, - slide_start, - slide_size, - slid_mapping, - file_control); - if (kr != KERN_SUCCESS) { + kr = vm_shared_region_slide(slide, + mapping_to_slide->sfm_file_offset, + mapping_to_slide->sfm_size, + slide_start, + slide_size, + slid_mapping, + file_control); + if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: region_slide(" - "slide:0x%x start:0x%016llx " - "size:0x%016llx) failed 0x%x\n", - slide, - (long long)slide_start, - (long long)slide_size, - kr)); + "slide:0x%x start:0x%016llx " + "size:0x%016llx) failed 0x%x\n", + slide, + (long long)slide_start, + (long long)slide_size, + kr)); vm_shared_region_undo_mappings(sr_map, - sr_base_address, - mappings, - mappings_count); + sr_base_address, + mappings, + mappings_count); } } if (kr == KERN_SUCCESS) { /* adjust the map's "lowest_unnestable_start" */ - lowest_unnestable_addr &= ~(pmap_nesting_size_min-1); + lowest_unnestable_addr &= ~(pmap_nesting_size_min - 1); if (lowest_unnestable_addr != sr_map->lowest_unnestable_start) { vm_map_lock(sr_map); sr_map->lowest_unnestable_start = - lowest_unnestable_addr; + lowest_unnestable_addr; vm_map_unlock(sr_map); } } @@ -1451,7 +1447,7 @@ vm_shared_region_map_file( assert(shared_region->sr_ref_count > 1); assert(shared_region->sr_mapping_in_progress); - /* set "sr_first_mapping"; dyld uses it to validate the shared cache */ + /* set "sr_first_mapping"; dyld uses it to validate the shared cache */ if (kr == KERN_SUCCESS && shared_region->sr_first_mapping == (mach_vm_offset_t) -1) { shared_region->sr_first_mapping = first_mapping; @@ -1465,22 +1461,22 @@ vm_shared_region_map_file( */ if (kr == KERN_SUCCESS && !shared_region->sr_uuid_copied) { int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping), - (char *)&sr_cache_header, - sizeof(sr_cache_header)); + (char *)&sr_cache_header, + sizeof(sr_cache_header)); if (error == 0) { memcpy(&shared_region->sr_uuid, &sr_cache_header.uuid, sizeof(shared_region->sr_uuid)); shared_region->sr_uuid_copied = TRUE; } else { #if DEVELOPMENT || DEBUG panic("shared_region: copyin shared_cache_header(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx " - "offset:0 size:0x%016llx) failed with %d\n", - (long long)shared_region->sr_base_address, - (long long)shared_region->sr_first_mapping, - (long long)sizeof(sr_cache_header), - error); + "offset:0 size:0x%016llx) failed with %d\n", + (long long)shared_region->sr_base_address, + (long long)shared_region->sr_first_mapping, + (long long)sizeof(sr_cache_header), + error); #endif /* DEVELOPMENT || DEBUG */ shared_region->sr_uuid_copied = FALSE; - } + } } /* @@ -1495,12 +1491,12 @@ vm_shared_region_map_file( size_t image_array_length = (sr_cache_header.imagesTextCount * sizeof(struct _dyld_cache_image_text_info)); struct _dyld_cache_image_text_info *sr_image_layout = kalloc(image_array_length); int error = copyin((shared_region->sr_base_address + shared_region->sr_first_mapping + - sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length); + sr_cache_header.imagesTextOffset), (char *)sr_image_layout, image_array_length); if (error == 0) { shared_region->sr_images = kalloc(sr_cache_header.imagesTextCount * sizeof(struct dyld_uuid_info_64)); for (size_t index = 0; index < sr_cache_header.imagesTextCount; index++) { memcpy((char *)&shared_region->sr_images[index].imageUUID, (char *)&sr_image_layout[index].uuid, - sizeof(shared_region->sr_images[index].imageUUID)); + sizeof(shared_region->sr_images[index].imageUUID)); shared_region->sr_images[index].imageLoadAddress = sr_image_layout[index].loadAddress; } @@ -1509,12 +1505,12 @@ vm_shared_region_map_file( } else { #if DEVELOPMENT || DEBUG panic("shared_region: copyin shared_cache_layout(sr_base_addr:0x%016llx sr_first_mapping:0x%016llx " - "offset:0x%016llx size:0x%016llx) failed with %d\n", - (long long)shared_region->sr_base_address, - (long long)shared_region->sr_first_mapping, - (long long)sr_cache_header.imagesTextOffset, - (long long)image_array_length, - error); + "offset:0x%016llx size:0x%016llx) failed with %d\n", + (long long)shared_region->sr_base_address, + (long long)shared_region->sr_first_mapping, + (long long)sr_cache_header.imagesTextOffset, + (long long)image_array_length, + error); #endif /* DEVELOPMENT || DEBUG */ } KDBG((MACHDBG_CODE(DBG_MACH_SHAREDREGION, PROCESS_SHARED_CACHE_LAYOUT)) | DBG_FUNC_END, shared_region->sr_images_count); @@ -1541,9 +1537,9 @@ vm_shared_region_map_file( done: SHARED_REGION_TRACE_DEBUG( ("shared_region: map(%p,%d,%p,%p,0x%llx) <- 0x%x \n", - (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, - (void *)VM_KERNEL_ADDRPERM(mappings), - (void *)VM_KERNEL_ADDRPERM(file_control), file_size, kr)); + (void *)VM_KERNEL_ADDRPERM(shared_region), mappings_count, + (void *)VM_KERNEL_ADDRPERM(mappings), + (void *)VM_KERNEL_ADDRPERM(file_control), file_size, kr)); return kr; } @@ -1587,29 +1583,29 @@ vm_shared_region_trim_and_get(task_t task) */ kern_return_t vm_shared_region_enter( - struct _vm_map *map, - struct task *task, - boolean_t is_64bit, - void *fsroot, - cpu_type_t cpu, - cpu_subtype_t cpu_subtype) + struct _vm_map *map, + struct task *task, + boolean_t is_64bit, + void *fsroot, + cpu_type_t cpu, + cpu_subtype_t cpu_subtype) { - kern_return_t kr; - vm_shared_region_t shared_region; - vm_map_offset_t sr_address, sr_offset, target_address; - vm_map_size_t sr_size, mapping_size; - vm_map_offset_t sr_pmap_nesting_start; - vm_map_size_t sr_pmap_nesting_size; - ipc_port_t sr_handle; - vm_prot_t cur_prot, max_prot; + kern_return_t kr; + vm_shared_region_t shared_region; + vm_map_offset_t sr_address, sr_offset, target_address; + vm_map_size_t sr_size, mapping_size; + vm_map_offset_t sr_pmap_nesting_start; + vm_map_size_t sr_pmap_nesting_size; + ipc_port_t sr_handle; + vm_prot_t cur_prot, max_prot; SHARED_REGION_TRACE_DEBUG( ("shared_region: -> " - "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit)); + "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d)\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit)); /* lookup (create if needed) the shared region for this environment */ shared_region = vm_shared_region_lookup(fsroot, cpu, cpu_subtype, is_64bit); @@ -1617,16 +1613,16 @@ vm_shared_region_enter( /* this should not happen ! */ SHARED_REGION_TRACE_ERROR( ("shared_region: -> " - "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): " - "lookup failed !\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit)); + "enter(map=%p,task=%p,root=%p,cpu=<%d,%d>,64bit=%d): " + "lookup failed !\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit)); //panic("shared_region_enter: lookup failed\n"); return KERN_FAILURE; } - + kr = KERN_SUCCESS; /* no need to lock since this data is never modified */ sr_address = shared_region->sr_base_address; @@ -1673,25 +1669,25 @@ vm_shared_region_enter( if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: enter(%p,%p,%p,%d,%d,%d): " - "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, - (long long)target_address, - (long long)mapping_size, - (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); + "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, + (long long)target_address, + (long long)mapping_size, + (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); goto done; } SHARED_REGION_TRACE_DEBUG( ("shared_region: enter(%p,%p,%p,%d,%d,%d): " - "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, - (long long)target_address, (long long)mapping_size, - (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); + "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, + (long long)target_address, (long long)mapping_size, + (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); sr_offset += mapping_size; sr_size -= mapping_size; } @@ -1701,10 +1697,10 @@ vm_shared_region_enter( * The pmap-nesting is triggered by the "VM_MEMORY_SHARED_PMAP" alias... */ for (; - sr_pmap_nesting_size > 0; - sr_offset += mapping_size, - sr_size -= mapping_size, - sr_pmap_nesting_size -= mapping_size) { + sr_pmap_nesting_size > 0; + sr_offset += mapping_size, + sr_size -= mapping_size, + sr_pmap_nesting_size -= mapping_size) { target_address = sr_address + sr_offset; mapping_size = sr_pmap_nesting_size; if (mapping_size > pmap_nesting_size_max) { @@ -1727,25 +1723,25 @@ vm_shared_region_enter( if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: enter(%p,%p,%p,%d,%d,%d): " - "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, - (long long)target_address, - (long long)mapping_size, - (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); + "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, + (long long)target_address, + (long long)mapping_size, + (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); goto done; } SHARED_REGION_TRACE_DEBUG( ("shared_region: enter(%p,%p,%p,%d,%d,%d): " - "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, - (long long)target_address, (long long)mapping_size, - (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); + "nested vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, + (long long)target_address, (long long)mapping_size, + (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); } if (sr_size > 0) { /* and there's some left to be mapped without pmap-nesting */ @@ -1768,25 +1764,25 @@ vm_shared_region_enter( if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: enter(%p,%p,%p,%d,%d,%d): " - "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, - (long long)target_address, - (long long)mapping_size, - (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); + "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, + (long long)target_address, + (long long)mapping_size, + (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); goto done; } SHARED_REGION_TRACE_DEBUG( ("shared_region: enter(%p,%p,%p,%d,%d,%d): " - "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, - (long long)target_address, (long long)mapping_size, - (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); + "vm_map_enter(0x%llx,0x%llx,%p) error 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, + (long long)target_address, (long long)mapping_size, + (void *)VM_KERNEL_ADDRPERM(sr_handle), kr)); sr_offset += mapping_size; sr_size -= mapping_size; } @@ -1804,18 +1800,18 @@ done: SHARED_REGION_TRACE_DEBUG( ("shared_region: enter(%p,%p,%p,%d,%d,%d) <- 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), - (void *)VM_KERNEL_ADDRPERM(fsroot), - cpu, cpu_subtype, is_64bit, kr)); + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), + (void *)VM_KERNEL_ADDRPERM(fsroot), + cpu, cpu_subtype, is_64bit, kr)); return kr; } -#define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/ -struct vm_shared_region_slide_info slide_info; +#define SANE_SLIDE_INFO_SIZE (2560*1024) /*Can be changed if needed*/ +struct vm_shared_region_slide_info slide_info; kern_return_t -vm_shared_region_sliding_valid(uint32_t slide) +vm_shared_region_sliding_valid(uint32_t slide) { kern_return_t kr = KERN_SUCCESS; vm_shared_region_t sr = vm_shared_region_get(current_task()); @@ -1826,9 +1822,9 @@ vm_shared_region_sliding_valid(uint32_t slide) } if ((sr->sr_slid == TRUE) && slide) { - if (slide != vm_shared_region_get_slide_info(sr)->slide) { + if (slide != vm_shared_region_get_slide_info(sr)->slide) { printf("Only one shared region can be slid\n"); - kr = KERN_FAILURE; + kr = KERN_FAILURE; } else { /* * Request for sliding when we've @@ -1838,7 +1834,7 @@ vm_shared_region_sliding_valid(uint32_t slide) * we don't want to slide again and * so we return this value. */ - kr = KERN_INVALID_ARGUMENT; + kr = KERN_INVALID_ARGUMENT; } } vm_shared_region_deallocate(sr); @@ -1847,25 +1843,25 @@ vm_shared_region_sliding_valid(uint32_t slide) kern_return_t vm_shared_region_slide_mapping( - vm_shared_region_t sr, - mach_vm_size_t slide_info_size, - mach_vm_offset_t start, - mach_vm_size_t size, - mach_vm_offset_t slid_mapping, - uint32_t slide, - memory_object_control_t sr_file_control) + vm_shared_region_t sr, + mach_vm_size_t slide_info_size, + mach_vm_offset_t start, + mach_vm_size_t size, + mach_vm_offset_t slid_mapping, + uint32_t slide, + memory_object_control_t sr_file_control) { - kern_return_t kr; - vm_object_t object; + kern_return_t kr; + vm_object_t object; vm_shared_region_slide_info_t si; - vm_offset_t slide_info_entry; - vm_map_entry_t slid_entry, tmp_entry; - struct vm_map_entry tmp_entry_store; - memory_object_t sr_pager; - vm_map_t sr_map; - int vm_flags; - vm_map_kernel_flags_t vmk_flags; - vm_map_offset_t map_addr; + vm_offset_t slide_info_entry; + vm_map_entry_t slid_entry, tmp_entry; + struct vm_map_entry tmp_entry_store; + memory_object_t sr_pager; + vm_map_t sr_map; + int vm_flags; + vm_map_kernel_flags_t vmk_flags; + vm_map_offset_t map_addr; tmp_entry = VM_MAP_ENTRY_NULL; sr_pager = MEMORY_OBJECT_NULL; @@ -1888,8 +1884,8 @@ vm_shared_region_slide_mapping( } kr = kmem_alloc(kernel_map, - (vm_offset_t *) &slide_info_entry, - (vm_size_t) slide_info_size, VM_KERN_MEMORY_OSFMK); + (vm_offset_t *) &slide_info_entry, + (vm_size_t) slide_info_size, VM_KERN_MEMORY_OSFMK); if (kr != KERN_SUCCESS) { return kr; } @@ -1902,7 +1898,7 @@ vm_shared_region_slide_mapping( } vm_object_lock(object); - vm_object_reference_locked(object); /* for si->slide_object */ + vm_object_reference_locked(object); /* for si->slide_object */ object->object_is_shared_cache = TRUE; vm_object_unlock(object); @@ -1920,8 +1916,8 @@ vm_shared_region_slide_mapping( sr_map = vm_shared_region_vm_map(sr); vm_map_lock_read(sr_map); if (!vm_map_lookup_entry(sr_map, - slid_mapping, - &slid_entry)) { + slid_mapping, + &slid_entry)) { /* no mapping there */ vm_map_unlock(sr_map); kr = KERN_INVALID_ARGUMENT; @@ -1942,8 +1938,8 @@ vm_shared_region_slide_mapping( /* create a "shared_region" sliding pager */ sr_pager = shared_region_pager_setup(VME_OBJECT(tmp_entry), - VME_OFFSET(tmp_entry), - si); + VME_OFFSET(tmp_entry), + si); if (sr_pager == NULL) { kr = KERN_RESOURCE_SHORTAGE; goto done; @@ -1955,25 +1951,25 @@ vm_shared_region_slide_mapping( vmk_flags.vmkf_overwrite_immutable = TRUE; map_addr = tmp_entry->vme_start; kr = vm_map_enter_mem_object(sr_map, - &map_addr, - (tmp_entry->vme_end - - tmp_entry->vme_start), - (mach_vm_offset_t) 0, - vm_flags, - vmk_flags, - VM_KERN_MEMORY_NONE, - (ipc_port_t)(uintptr_t) sr_pager, - 0, - TRUE, - tmp_entry->protection, - tmp_entry->max_protection, - tmp_entry->inheritance); + &map_addr, + (tmp_entry->vme_end - + tmp_entry->vme_start), + (mach_vm_offset_t) 0, + vm_flags, + vmk_flags, + VM_KERN_MEMORY_NONE, + (ipc_port_t)(uintptr_t) sr_pager, + 0, + TRUE, + tmp_entry->protection, + tmp_entry->max_protection, + tmp_entry->inheritance); assertf(kr == KERN_SUCCESS, "kr = 0x%x\n", kr); assertf(map_addr == tmp_entry->vme_start, - "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n", - (uint64_t)map_addr, - (uint64_t) tmp_entry->vme_start, - tmp_entry); + "map_addr=0x%llx vme_start=0x%llx tmp_entry=%p\n", + (uint64_t)map_addr, + (uint64_t) tmp_entry->vme_start, + tmp_entry); /* success! */ kr = KERN_SUCCESS; @@ -2009,28 +2005,27 @@ done: return kr; } -void* -vm_shared_region_get_slide_info_entry(vm_shared_region_t sr) { +void* +vm_shared_region_get_slide_info_entry(vm_shared_region_t sr) +{ return (void*)sr->sr_slide_info.slide_info_entry; } static kern_return_t vm_shared_region_slide_sanity_check_v1(vm_shared_region_slide_info_entry_v1_t s_info) { - uint32_t pageIndex=0; - uint16_t entryIndex=0; + uint32_t pageIndex = 0; + uint16_t entryIndex = 0; uint16_t *toc = NULL; toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); - for (;pageIndex < s_info->toc_count; pageIndex++) { - + for (; pageIndex < s_info->toc_count; pageIndex++) { entryIndex = (uint16_t)(toc[pageIndex]); if (entryIndex >= s_info->entry_count) { printf("No sliding bitmap entry for pageIndex: %d at entryIndex: %d amongst %d entries\n", pageIndex, entryIndex, s_info->entry_count); return KERN_FAILURE; } - } return KERN_SUCCESS; } @@ -2097,35 +2092,35 @@ vm_shared_region_slide_sanity_check_v3(vm_shared_region_slide_info_entry_v3_t s_ static kern_return_t vm_shared_region_slide_sanity_check_v4(vm_shared_region_slide_info_entry_v4_t s_info, mach_vm_size_t slide_info_size) { - if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { - return KERN_FAILURE; - } - - /* Ensure that the slide info doesn't reference any data outside of its bounds. */ - - uint32_t page_starts_count = s_info->page_starts_count; - uint32_t page_extras_count = s_info->page_extras_count; - mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count; - if (num_trailing_entries < page_starts_count) { - return KERN_FAILURE; - } - - /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */ - mach_vm_size_t trailing_size = num_trailing_entries << 1; - if (trailing_size >> 1 != num_trailing_entries) { - return KERN_FAILURE; - } - - mach_vm_size_t required_size = sizeof(*s_info) + trailing_size; - if (required_size < sizeof(*s_info)) { - return KERN_FAILURE; - } - - if (required_size > slide_info_size) { - return KERN_FAILURE; - } - - return KERN_SUCCESS; + if (s_info->page_size != PAGE_SIZE_FOR_SR_SLIDE) { + return KERN_FAILURE; + } + + /* Ensure that the slide info doesn't reference any data outside of its bounds. */ + + uint32_t page_starts_count = s_info->page_starts_count; + uint32_t page_extras_count = s_info->page_extras_count; + mach_vm_size_t num_trailing_entries = page_starts_count + page_extras_count; + if (num_trailing_entries < page_starts_count) { + return KERN_FAILURE; + } + + /* Scale by sizeof(uint16_t). Hard-coding the size simplifies the overflow check. */ + mach_vm_size_t trailing_size = num_trailing_entries << 1; + if (trailing_size >> 1 != num_trailing_entries) { + return KERN_FAILURE; + } + + mach_vm_size_t required_size = sizeof(*s_info) + trailing_size; + if (required_size < sizeof(*s_info)) { + return KERN_FAILURE; + } + + if (required_size > slide_info_size) { + return KERN_FAILURE; + } + + return KERN_SUCCESS; } @@ -2140,9 +2135,9 @@ vm_shared_region_slide_sanity_check(vm_shared_region_t sr) s_info = si->slide_info_entry; kr = mach_vm_protect(kernel_map, - (mach_vm_offset_t)(vm_offset_t)s_info, - (mach_vm_size_t) si->slide_info_size, - TRUE, VM_PROT_READ); + (mach_vm_offset_t)(vm_offset_t)s_info, + (mach_vm_size_t) si->slide_info_size, + TRUE, VM_PROT_READ); if (kr != KERN_SUCCESS) { panic("vm_shared_region_slide_sanity_check: vm_protect() error 0x%x\n", kr); } @@ -2153,8 +2148,8 @@ vm_shared_region_slide_sanity_check(vm_shared_region_t sr) kr = vm_shared_region_slide_sanity_check_v2(&s_info->v2, si->slide_info_size); } else if (s_info->version == 3) { kr = vm_shared_region_slide_sanity_check_v3(&s_info->v3, si->slide_info_size); - } else if (s_info->version == 4) { - kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, si->slide_info_size); + } else if (s_info->version == 4) { + kr = vm_shared_region_slide_sanity_check_v4(&s_info->v4, si->slide_info_size); } else { goto fail; } @@ -2166,13 +2161,13 @@ vm_shared_region_slide_sanity_check(vm_shared_region_t sr) fail: if (si->slide_info_entry != NULL) { kmem_free(kernel_map, - (vm_offset_t) si->slide_info_entry, - (vm_size_t) si->slide_info_size); - + (vm_offset_t) si->slide_info_entry, + (vm_size_t) si->slide_info_size); + vm_object_deallocate(si->slide_object); - si->slide_object = NULL; + si->slide_object = NULL; si->start = 0; - si->end = 0; + si->end = 0; si->slide = 0; si->slide_info_entry = NULL; si->slide_info_size = 0; @@ -2185,34 +2180,34 @@ vm_shared_region_slide_page_v1(vm_shared_region_slide_info_t si, vm_offset_t vad { uint16_t *toc = NULL; slide_info_entry_toc_t bitmap = NULL; - uint32_t i=0, j=0; + uint32_t i = 0, j = 0; uint8_t b = 0; uint32_t slide = si->slide; int is_64 = task_has_64Bit_addr(current_task()); vm_shared_region_slide_info_entry_v1_t s_info = &si->slide_info_entry->v1; toc = (uint16_t*)((uintptr_t)s_info + s_info->toc_offset); - + if (pageIndex >= s_info->toc_count) { printf("No slide entry for this page in toc. PageIndex: %d Toc Count: %d\n", pageIndex, s_info->toc_count); } else { uint16_t entryIndex = (uint16_t)(toc[pageIndex]); slide_info_entry_toc_t slide_info_entries = (slide_info_entry_toc_t)((uintptr_t)s_info + s_info->entry_offset); - + if (entryIndex >= s_info->entry_count) { printf("No sliding bitmap entry for entryIndex: %d amongst %d entries\n", entryIndex, s_info->entry_count); } else { bitmap = &slide_info_entries[entryIndex]; - for(i=0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) { + for (i = 0; i < NUM_SLIDING_BITMAPS_PER_PAGE; ++i) { b = bitmap->entry[i]; - if (b!=0) { - for (j=0; j <8; ++j) { - if (b & (1 <= s_info->page_starts_count) { printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n", - pageIndex, s_info->page_starts_count); + pageIndex, s_info->page_starts_count); return KERN_FAILURE; } page_entry = page_starts[pageIndex]; @@ -2392,7 +2387,7 @@ vm_shared_region_slide_page_v2(vm_shared_region_slide_info_t si, vm_offset_t vad if (chain_index >= s_info->page_extras_count) { printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n", - chain_index, s_info->page_extras_count); + chain_index, s_info->page_extras_count); return KERN_FAILURE; } info = page_extras[chain_index]; @@ -2430,7 +2425,7 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad if (pageIndex >= s_info->page_starts_count) { printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n", - pageIndex, s_info->page_starts_count); + pageIndex, s_info->page_starts_count); return KERN_FAILURE; } page_entry = s_info->page_starts[pageIndex]; @@ -2445,7 +2440,7 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad rebaseLocation += delta; uint64_t value; memcpy(&value, rebaseLocation, sizeof(value)); - delta = ( (value & 0x3FF8000000000000) >> 51) * sizeof(uint64_t); + delta = ((value & 0x3FF8000000000000) >> 51) * sizeof(uint64_t); // A pointer is one of : // { @@ -2486,7 +2481,7 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad // and the bottom 43-bits to be fit in to 51-bits. uint64_t top8Bits = value & 0x0007F80000000000ULL; uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL; - uint64_t targetValue = ( top8Bits << 13 ) | bottom43Bits; + uint64_t targetValue = (top8Bits << 13) | bottom43Bits; value = targetValue + slide_amount; } @@ -2498,108 +2493,108 @@ vm_shared_region_slide_page_v3(vm_shared_region_slide_info_t si, vm_offset_t vad static kern_return_t rebase_chainv4( - uint8_t *page_content, - uint16_t start_offset, - uint32_t slide_amount, - vm_shared_region_slide_info_entry_v4_t s_info) + uint8_t *page_content, + uint16_t start_offset, + uint32_t slide_amount, + vm_shared_region_slide_info_entry_v4_t s_info) { - const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t); - - const uint32_t delta_mask = (uint32_t)(s_info->delta_mask); - const uint32_t value_mask = ~delta_mask; - const uint32_t value_add = (uint32_t)(s_info->value_add); - const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2; - - uint32_t page_offset = start_offset; - uint32_t delta = 1; - - while (delta != 0 && page_offset <= last_page_offset) { - uint8_t *loc; - uint32_t value; - - loc = page_content + page_offset; - memcpy(&value, loc, sizeof(value)); - delta = (value & delta_mask) >> delta_shift; - value &= value_mask; - - if ( (value & 0xFFFF8000) == 0 ) { - // small positive non-pointer, use as-is - } else if ( (value & 0x3FFF8000) == 0x3FFF8000 ) { - // small negative non-pointer - value |= 0xC0000000; - } else { - // pointer that needs rebasing - value += value_add; - value += slide_amount; - } - memcpy(loc, &value, sizeof(value)); - page_offset += delta; - } - - /* If the offset went past the end of the page, then the slide data is invalid. */ - if (page_offset > last_page_offset) { - return KERN_FAILURE; - } - return KERN_SUCCESS; + const uint32_t last_page_offset = PAGE_SIZE_FOR_SR_SLIDE - sizeof(uint32_t); + + const uint32_t delta_mask = (uint32_t)(s_info->delta_mask); + const uint32_t value_mask = ~delta_mask; + const uint32_t value_add = (uint32_t)(s_info->value_add); + const uint32_t delta_shift = __builtin_ctzll(delta_mask) - 2; + + uint32_t page_offset = start_offset; + uint32_t delta = 1; + + while (delta != 0 && page_offset <= last_page_offset) { + uint8_t *loc; + uint32_t value; + + loc = page_content + page_offset; + memcpy(&value, loc, sizeof(value)); + delta = (value & delta_mask) >> delta_shift; + value &= value_mask; + + if ((value & 0xFFFF8000) == 0) { + // small positive non-pointer, use as-is + } else if ((value & 0x3FFF8000) == 0x3FFF8000) { + // small negative non-pointer + value |= 0xC0000000; + } else { + // pointer that needs rebasing + value += value_add; + value += slide_amount; + } + memcpy(loc, &value, sizeof(value)); + page_offset += delta; + } + + /* If the offset went past the end of the page, then the slide data is invalid. */ + if (page_offset > last_page_offset) { + return KERN_FAILURE; + } + return KERN_SUCCESS; } static kern_return_t vm_shared_region_slide_page_v4(vm_shared_region_slide_info_t si, vm_offset_t vaddr, uint32_t pageIndex) { - vm_shared_region_slide_info_entry_v4_t s_info = &si->slide_info_entry->v4; - const uint32_t slide_amount = si->slide; - - const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset); - const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset); - - uint8_t *page_content = (uint8_t *)vaddr; - uint16_t page_entry; - - if (pageIndex >= s_info->page_starts_count) { - printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n", - pageIndex, s_info->page_starts_count); - return KERN_FAILURE; - } - page_entry = page_starts[pageIndex]; - - if (page_entry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE) { - return KERN_SUCCESS; - } - - if (page_entry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) { - uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE4_PAGE_INDEX; - uint16_t info; - - do { - uint16_t page_start_offset; - kern_return_t kr; - - if (chain_index >= s_info->page_extras_count) { - printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n", - chain_index, s_info->page_extras_count); - return KERN_FAILURE; - } - info = page_extras[chain_index]; - page_start_offset = (info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; - - kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); - if (kr != KERN_SUCCESS) { - return KERN_FAILURE; - } - - chain_index++; - } while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END)); - } else { - const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; - kern_return_t kr; - - kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); - if (kr != KERN_SUCCESS) { - return KERN_FAILURE; - } - } - - return KERN_SUCCESS; + vm_shared_region_slide_info_entry_v4_t s_info = &si->slide_info_entry->v4; + const uint32_t slide_amount = si->slide; + + const uint16_t *page_starts = (uint16_t *)((uintptr_t)s_info + s_info->page_starts_offset); + const uint16_t *page_extras = (uint16_t *)((uintptr_t)s_info + s_info->page_extras_offset); + + uint8_t *page_content = (uint8_t *)vaddr; + uint16_t page_entry; + + if (pageIndex >= s_info->page_starts_count) { + printf("vm_shared_region_slide_page() did not find page start in slide info: pageIndex=%u, count=%u\n", + pageIndex, s_info->page_starts_count); + return KERN_FAILURE; + } + page_entry = page_starts[pageIndex]; + + if (page_entry == DYLD_CACHE_SLIDE4_PAGE_NO_REBASE) { + return KERN_SUCCESS; + } + + if (page_entry & DYLD_CACHE_SLIDE4_PAGE_USE_EXTRA) { + uint16_t chain_index = page_entry & DYLD_CACHE_SLIDE4_PAGE_INDEX; + uint16_t info; + + do { + uint16_t page_start_offset; + kern_return_t kr; + + if (chain_index >= s_info->page_extras_count) { + printf("vm_shared_region_slide_page() out-of-bounds extras index: index=%u, count=%u\n", + chain_index, s_info->page_extras_count); + return KERN_FAILURE; + } + info = page_extras[chain_index]; + page_start_offset = (info & DYLD_CACHE_SLIDE4_PAGE_INDEX) << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; + + kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); + if (kr != KERN_SUCCESS) { + return KERN_FAILURE; + } + + chain_index++; + } while (!(info & DYLD_CACHE_SLIDE4_PAGE_EXTRA_END)); + } else { + const uint32_t page_start_offset = page_entry << DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT; + kern_return_t kr; + + kr = rebase_chainv4(page_content, page_start_offset, slide_amount, s_info); + if (kr != KERN_SUCCESS) { + return KERN_FAILURE; + } + } + + return KERN_SUCCESS; } @@ -2611,13 +2606,13 @@ vm_shared_region_slide_page(vm_shared_region_slide_info_t si, vm_offset_t vaddr, return vm_shared_region_slide_page_v1(si, vaddr, pageIndex); } else if (si->slide_info_entry->version == 2) { return vm_shared_region_slide_page_v2(si, vaddr, pageIndex); - } else if (si->slide_info_entry->version == 3) { + } else if (si->slide_info_entry->version == 3) { return vm_shared_region_slide_page_v3(si, vaddr, uservaddr, pageIndex); - } else if (si->slide_info_entry->version == 4) { - return vm_shared_region_slide_page_v4(si, vaddr, pageIndex); + } else if (si->slide_info_entry->version == 4) { + return vm_shared_region_slide_page_v4(si, vaddr, pageIndex); } else { - return KERN_FAILURE; - } + return KERN_FAILURE; + } } /******************************************************************************/ @@ -2647,19 +2642,19 @@ user64_addr_t commpage_text64_location = (user64_addr_t) _COMM_PAGE64_TEXT_START */ static void _vm_commpage_init( - ipc_port_t *handlep, - vm_map_size_t size) + ipc_port_t *handlep, + vm_map_size_t size) { - kern_return_t kr; - vm_named_entry_t mem_entry; - vm_map_t new_map; + kern_return_t kr; + vm_named_entry_t mem_entry; + vm_map_t new_map; SHARED_REGION_TRACE_DEBUG( ("commpage: -> _init(0x%llx)\n", - (long long)size)); + (long long)size)); kr = mach_memory_entry_allocate(&mem_entry, - handlep); + handlep); if (kr != KERN_SUCCESS) { panic("_vm_commpage_init: could not allocate mem_entry"); } @@ -2676,16 +2671,16 @@ _vm_commpage_init( SHARED_REGION_TRACE_DEBUG( ("commpage: _init(0x%llx) <- %p\n", - (long long)size, (void *)VM_KERNEL_ADDRPERM(*handlep))); + (long long)size, (void *)VM_KERNEL_ADDRPERM(*handlep))); } #endif /* - *Initialize the comm text pages at boot time + * Initialize the comm text pages at boot time */ - extern u_int32_t random(void); - void +extern u_int32_t random(void); +void vm_commpage_text_init(void) { SHARED_REGION_TRACE_DEBUG( @@ -2698,11 +2693,11 @@ vm_commpage_text_init(void) commpage_text32_map = commpage_text32_entry->backing.map; commpage_text32_location = (user32_addr_t) (_COMM_PAGE32_TEXT_START + offset); /* XXX if (cpu_is_64bit_capable()) ? */ - /* create the 64-bit comm page */ + /* create the 64-bit comm page */ offset = (random() % _PFZ64_SLIDE_RANGE) << PAGE_SHIFT; /* restricting sliding upto 2Mb range */ - _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH); - commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject; - commpage_text64_map = commpage_text64_entry->backing.map; + _vm_commpage_init(&commpage_text64_handle, _COMM_PAGE_TEXT_AREA_LENGTH); + commpage_text64_entry = (vm_named_entry_t) commpage_text64_handle->ip_kobject; + commpage_text64_map = commpage_text64_entry->backing.map; commpage_text64_location = (user64_addr_t) (_COMM_PAGE64_TEXT_START + offset); commpage_text_populate(); @@ -2712,8 +2707,7 @@ vm_commpage_text_init(void) #endif /* __i386__ || __x86_64__ */ /* populate the routines in here */ SHARED_REGION_TRACE_DEBUG( - ("commpage text: init() <-\n")); - + ("commpage text: init() <-\n")); } /* @@ -2758,33 +2752,33 @@ vm_commpage_init(void) */ kern_return_t vm_commpage_enter( - vm_map_t map, - task_t task, - boolean_t is64bit) + vm_map_t map, + task_t task, + boolean_t is64bit) { -#if defined(__arm__) +#if defined(__arm__) #pragma unused(is64bit) (void)task; (void)map; return KERN_SUCCESS; -#elif defined(__arm64__) +#elif defined(__arm64__) #pragma unused(is64bit) (void)task; (void)map; pmap_insert_sharedpage(vm_map_pmap(map)); return KERN_SUCCESS; #else - ipc_port_t commpage_handle, commpage_text_handle; - vm_map_offset_t commpage_address, objc_address, commpage_text_address; - vm_map_size_t commpage_size, objc_size, commpage_text_size; - int vm_flags; - vm_map_kernel_flags_t vmk_flags; - kern_return_t kr; + ipc_port_t commpage_handle, commpage_text_handle; + vm_map_offset_t commpage_address, objc_address, commpage_text_address; + vm_map_size_t commpage_size, objc_size, commpage_text_size; + int vm_flags; + vm_map_kernel_flags_t vmk_flags; + kern_return_t kr; SHARED_REGION_TRACE_DEBUG( ("commpage: -> enter(%p,%p)\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task))); + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task))); commpage_text_size = _COMM_PAGE_TEXT_AREA_LENGTH; /* the comm page is likely to be beyond the actual end of the VM map */ @@ -2793,7 +2787,7 @@ vm_commpage_enter( vmk_flags.vmkf_beyond_max = TRUE; /* select the appropriate comm page for this task */ - assert(! (is64bit ^ vm_map_is_64bit(map))); + assert(!(is64bit ^ vm_map_is_64bit(map))); if (is64bit) { commpage_handle = commpage64_handle; commpage_address = (vm_map_offset_t) _COMM_PAGE64_BASE_ADDRESS; @@ -2805,7 +2799,7 @@ vm_commpage_enter( } else { commpage_handle = commpage32_handle; commpage_address = - (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS; + (vm_map_offset_t)(unsigned) _COMM_PAGE32_BASE_ADDRESS; commpage_size = _COMM_PAGE32_AREA_LENGTH; objc_size = _COMM_PAGE32_OBJC_SIZE; objc_address = _COMM_PAGE32_OBJC_BASE; @@ -2813,7 +2807,7 @@ vm_commpage_enter( commpage_text_address = (vm_map_offset_t) commpage_text32_location; } - vm_tag_t tag = VM_KERN_MEMORY_NONE; + vm_tag_t tag = VM_KERN_MEMORY_NONE; if ((commpage_address & (pmap_nesting_size_min - 1)) == 0 && (commpage_size & (pmap_nesting_size_min - 1)) == 0) { /* the commpage is properly aligned or sized for pmap-nesting */ @@ -2838,11 +2832,11 @@ vm_commpage_enter( if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("commpage: enter(%p,0x%llx,0x%llx) " - "commpage %p mapping failed 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (long long)commpage_address, - (long long)commpage_size, - (void *)VM_KERNEL_ADDRPERM(commpage_handle), kr)); + "commpage %p mapping failed 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (long long)commpage_address, + (long long)commpage_size, + (void *)VM_KERNEL_ADDRPERM(commpage_handle), kr)); } /* map the comm text page in the task's address space */ @@ -2858,17 +2852,17 @@ vm_commpage_enter( commpage_text_handle, 0, FALSE, - VM_PROT_READ|VM_PROT_EXECUTE, - VM_PROT_READ|VM_PROT_EXECUTE, + VM_PROT_READ | VM_PROT_EXECUTE, + VM_PROT_READ | VM_PROT_EXECUTE, VM_INHERIT_SHARE); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("commpage text: enter(%p,0x%llx,0x%llx) " - "commpage text %p mapping failed 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (long long)commpage_text_address, - (long long)commpage_text_size, - (void *)VM_KERNEL_ADDRPERM(commpage_text_handle), kr)); + "commpage text %p mapping failed 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (long long)commpage_text_address, + (long long)commpage_text_size, + (void *)VM_KERNEL_ADDRPERM(commpage_text_handle), kr)); } /* @@ -2893,44 +2887,44 @@ vm_commpage_enter( if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("commpage: enter(%p,0x%llx,0x%llx) " - "objc mapping failed 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (long long)objc_address, - (long long)objc_size, kr)); + "objc mapping failed 0x%x\n", + (void *)VM_KERNEL_ADDRPERM(map), + (long long)objc_address, + (long long)objc_size, kr)); } } SHARED_REGION_TRACE_DEBUG( ("commpage: enter(%p,%p) <- 0x%x\n", - (void *)VM_KERNEL_ADDRPERM(map), - (void *)VM_KERNEL_ADDRPERM(task), kr)); + (void *)VM_KERNEL_ADDRPERM(map), + (void *)VM_KERNEL_ADDRPERM(task), kr)); return kr; #endif } int vm_shared_region_slide(uint32_t slide, - mach_vm_offset_t entry_start_address, - mach_vm_size_t entry_size, - mach_vm_offset_t slide_start, - mach_vm_size_t slide_size, - mach_vm_offset_t slid_mapping, - memory_object_control_t sr_file_control) + mach_vm_offset_t entry_start_address, + mach_vm_size_t entry_size, + mach_vm_offset_t slide_start, + mach_vm_size_t slide_size, + mach_vm_offset_t slid_mapping, + memory_object_control_t sr_file_control) { void *slide_info_entry = NULL; - int error; - vm_shared_region_t sr; + int error; + vm_shared_region_t sr; SHARED_REGION_TRACE_DEBUG( ("vm_shared_region_slide: -> slide %#x, entry_start %#llx, entry_size %#llx, slide_start %#llx, slide_size %#llx\n", - slide, entry_start_address, entry_size, slide_start, slide_size)); + slide, entry_start_address, entry_size, slide_start, slide_size)); sr = vm_shared_region_get(current_task()); if (sr == NULL) { printf("%s: no shared region?\n", __FUNCTION__); SHARED_REGION_TRACE_DEBUG( ("vm_shared_region_slide: <- %d (no shared region)\n", - KERN_FAILURE)); + KERN_FAILURE)); return KERN_FAILURE; } @@ -2938,21 +2932,21 @@ vm_shared_region_slide(uint32_t slide, * Protect from concurrent access. */ vm_shared_region_lock(); - while(sr->sr_slide_in_progress) { + while (sr->sr_slide_in_progress) { vm_shared_region_sleep(&sr->sr_slide_in_progress, THREAD_UNINT); } if (sr->sr_slid #ifndef CONFIG_EMBEDDED - || shared_region_completed_slide + || shared_region_completed_slide #endif - ) { + ) { vm_shared_region_unlock(); vm_shared_region_deallocate(sr); printf("%s: shared region already slid?\n", __FUNCTION__); SHARED_REGION_TRACE_DEBUG( ("vm_shared_region_slide: <- %d (already slid)\n", - KERN_FAILURE)); + KERN_FAILURE)); return KERN_FAILURE; } @@ -2960,41 +2954,41 @@ vm_shared_region_slide(uint32_t slide, vm_shared_region_unlock(); error = vm_shared_region_slide_mapping(sr, - slide_size, - entry_start_address, - entry_size, - slid_mapping, - slide, - sr_file_control); + slide_size, + entry_start_address, + entry_size, + slid_mapping, + slide, + sr_file_control); if (error) { printf("slide_info initialization failed with kr=%d\n", error); goto done; } slide_info_entry = vm_shared_region_get_slide_info_entry(sr); - if (slide_info_entry == NULL){ + if (slide_info_entry == NULL) { error = KERN_FAILURE; - } else { + } else { error = copyin((user_addr_t)slide_start, - slide_info_entry, - (vm_size_t)slide_size); - if (error) { + slide_info_entry, + (vm_size_t)slide_size); + if (error) { error = KERN_INVALID_ADDRESS; } } if (error) { goto done; } - + if (vm_shared_region_slide_sanity_check(sr) != KERN_SUCCESS) { - error = KERN_INVALID_ARGUMENT; - printf("Sanity Check failed for slide_info\n"); - } else { + error = KERN_INVALID_ARGUMENT; + printf("Sanity Check failed for slide_info\n"); + } else { #if DEBUG printf("Succesfully init slide_info with start_address: %p region_size: %ld slide_header_size: %ld\n", - (void*)(uintptr_t)entry_start_address, - (unsigned long)entry_size, - (unsigned long)slide_size); + (void*)(uintptr_t)entry_start_address, + (unsigned long)entry_size, + (unsigned long)slide_size); #endif } done: @@ -3011,10 +3005,10 @@ done: /* * We don't know how to tear down a slid shared region today, because * we would have to invalidate all the pages that have been slid - * atomically with respect to anyone mapping the shared region afresh. - * Therefore, take a dangling reference to prevent teardown. + * atomically with respect to anyone mapping the shared region afresh. + * Therefore, take a dangling reference to prevent teardown. */ - sr->sr_ref_count++; + sr->sr_ref_count++; #ifndef CONFIG_EMBEDDED shared_region_completed_slide = TRUE; #endif @@ -3025,12 +3019,12 @@ done: SHARED_REGION_TRACE_DEBUG( ("vm_shared_region_slide: <- %d\n", - error)); + error)); return error; } -/* +/* * This is called from powermanagement code to let kernel know the current source of power. * 0 if it is external source (connected to power ) * 1 if it is internal power source ie battery @@ -3052,15 +3046,16 @@ post_sys_powersource(__unused int i) static void post_sys_powersource_internal(int i, int internal) { - if (internal == 0) + if (internal == 0) { __system_power_source = i; + } if (__commpage_setup != 0) { - if (__system_power_source != 0) + if (__system_power_source != 0) { commpage_set_spin_count(0); - else + } else { commpage_set_spin_count(MP_SPIN_TRIES); + } } } #endif /* __i386__ || __x86_64__ */ - diff --git a/osfmk/vm/vm_shared_region.h b/osfmk/vm/vm_shared_region.h index f57b3c891..bfe7f518b 100644 --- a/osfmk/vm/vm_shared_region.h +++ b/osfmk/vm/vm_shared_region.h @@ -2,14 +2,14 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in * compliance with the License. Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this * file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -17,20 +17,20 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_LICENSE_HEADER_END@ */ /* * * File: vm/vm_shared_region.h * - * protos and struct definitions for shared region + * protos and struct definitions for shared region */ #ifndef _VM_SHARED_REGION_H_ #define _VM_SHARED_REGION_H_ -#ifdef KERNEL_PRIVATE +#ifdef KERNEL_PRIVATE #include #include @@ -46,11 +46,11 @@ extern int shared_region_persistence; #if DEBUG extern int shared_region_debug; -#define SHARED_REGION_DEBUG(args) \ - MACRO_BEGIN \ - if (shared_region_debug) { \ - kprintf args; \ - } \ +#define SHARED_REGION_DEBUG(args) \ + MACRO_BEGIN \ + if (shared_region_debug) { \ + kprintf args; \ + } \ MACRO_END #else /* DEBUG */ #define SHARED_REGION_DEBUG(args) @@ -60,31 +60,31 @@ extern int shared_region_trace_level; extern struct vm_shared_region *init_task_shared_region; -#define SHARED_REGION_TRACE_NONE_LVL 0 /* no trace */ -#define SHARED_REGION_TRACE_ERROR_LVL 1 /* trace abnormal events */ -#define SHARED_REGION_TRACE_INFO_LVL 2 /* trace all events */ -#define SHARED_REGION_TRACE_DEBUG_LVL 3 /* extra traces for debug */ -#define SHARED_REGION_TRACE(level, args) \ - MACRO_BEGIN \ - if (shared_region_trace_level >= level) { \ - printf args; \ - } \ +#define SHARED_REGION_TRACE_NONE_LVL 0 /* no trace */ +#define SHARED_REGION_TRACE_ERROR_LVL 1 /* trace abnormal events */ +#define SHARED_REGION_TRACE_INFO_LVL 2 /* trace all events */ +#define SHARED_REGION_TRACE_DEBUG_LVL 3 /* extra traces for debug */ +#define SHARED_REGION_TRACE(level, args) \ + MACRO_BEGIN \ + if (shared_region_trace_level >= level) { \ + printf args; \ + } \ MACRO_END #define SHARED_REGION_TRACE_NONE(args) -#define SHARED_REGION_TRACE_ERROR(args) \ - MACRO_BEGIN \ - SHARED_REGION_TRACE(SHARED_REGION_TRACE_ERROR_LVL, \ - args); \ +#define SHARED_REGION_TRACE_ERROR(args) \ + MACRO_BEGIN \ + SHARED_REGION_TRACE(SHARED_REGION_TRACE_ERROR_LVL, \ + args); \ MACRO_END -#define SHARED_REGION_TRACE_INFO(args) \ - MACRO_BEGIN \ - SHARED_REGION_TRACE(SHARED_REGION_TRACE_INFO_LVL, \ - args); \ +#define SHARED_REGION_TRACE_INFO(args) \ + MACRO_BEGIN \ + SHARED_REGION_TRACE(SHARED_REGION_TRACE_INFO_LVL, \ + args); \ MACRO_END -#define SHARED_REGION_TRACE_DEBUG(args) \ - MACRO_BEGIN \ - SHARED_REGION_TRACE(SHARED_REGION_TRACE_DEBUG_LVL, \ - args); \ +#define SHARED_REGION_TRACE_DEBUG(args) \ + MACRO_BEGIN \ + SHARED_REGION_TRACE(SHARED_REGION_TRACE_DEBUG_LVL, \ + args); \ MACRO_END typedef struct vm_shared_region *vm_shared_region_t; @@ -95,75 +95,74 @@ typedef struct vm_shared_region *vm_shared_region_t; #include #include -#define PAGE_SIZE_FOR_SR_SLIDE 4096 +#define PAGE_SIZE_FOR_SR_SLIDE 4096 /* Documentation for the slide info format can be found in the dyld project in * the file 'launch-cache/dyld_cache_format.h'. */ typedef struct vm_shared_region_slide_info_entry_v1 *vm_shared_region_slide_info_entry_v1_t; struct vm_shared_region_slide_info_entry_v1 { - uint32_t version; - uint32_t toc_offset; // offset from start of header to table-of-contents - uint32_t toc_count; // number of entries in toc (same as number of pages in r/w mapping) - uint32_t entry_offset; - uint32_t entry_count; + uint32_t version; + uint32_t toc_offset; // offset from start of header to table-of-contents + uint32_t toc_count; // number of entries in toc (same as number of pages in r/w mapping) + uint32_t entry_offset; + uint32_t entry_count; // uint16_t toc[toc_count]; // entrybitmap entries[entries_count]; }; -#define NBBY 8 -#define NUM_SLIDING_BITMAPS_PER_PAGE (0x1000/sizeof(int)/NBBY) /*128*/ -typedef struct slide_info_entry_toc *slide_info_entry_toc_t; -struct slide_info_entry_toc { +#define NBBY 8 +#define NUM_SLIDING_BITMAPS_PER_PAGE (0x1000/sizeof(int)/NBBY) /*128*/ +typedef struct slide_info_entry_toc *slide_info_entry_toc_t; +struct slide_info_entry_toc { uint8_t entry[NUM_SLIDING_BITMAPS_PER_PAGE]; }; typedef struct vm_shared_region_slide_info_entry_v2 *vm_shared_region_slide_info_entry_v2_t; struct vm_shared_region_slide_info_entry_v2 { - uint32_t version; - uint32_t page_size; - uint32_t page_starts_offset; - uint32_t page_starts_count; - uint32_t page_extras_offset; - uint32_t page_extras_count; - uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location - uint64_t value_add; + uint32_t version; + uint32_t page_size; + uint32_t page_starts_offset; + uint32_t page_starts_count; + uint32_t page_extras_offset; + uint32_t page_extras_count; + uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location + uint64_t value_add; // uint16_t page_starts[page_starts_count]; // uint16_t page_extras[page_extras_count]; }; -#define DYLD_CACHE_SLIDE_PAGE_ATTRS 0xC000 // high bits of uint16_t are flags -#define DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA 0x8000 // index is into extras array (not starts array) -#define DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE 0x4000 // page has no rebasing -#define DYLD_CACHE_SLIDE_PAGE_ATTR_END 0x8000 // last chain entry for page -#define DYLD_CACHE_SLIDE_PAGE_VALUE 0x3FFF // bitwise negation of DYLD_CACHE_SLIDE_PAGE_ATTRS -#define DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT 2 +#define DYLD_CACHE_SLIDE_PAGE_ATTRS 0xC000 // high bits of uint16_t are flags +#define DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA 0x8000 // index is into extras array (not starts array) +#define DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE 0x4000 // page has no rebasing +#define DYLD_CACHE_SLIDE_PAGE_ATTR_END 0x8000 // last chain entry for page +#define DYLD_CACHE_SLIDE_PAGE_VALUE 0x3FFF // bitwise negation of DYLD_CACHE_SLIDE_PAGE_ATTRS +#define DYLD_CACHE_SLIDE_PAGE_OFFSET_SHIFT 2 typedef struct vm_shared_region_slide_info_entry_v3 *vm_shared_region_slide_info_entry_v3_t; -struct vm_shared_region_slide_info_entry_v3 -{ - uint32_t version; // currently 3 - uint32_t page_size; // currently 4096 (may also be 16384) - uint32_t page_starts_count; - uint64_t value_add; - uint16_t page_starts[/* page_starts_count */]; +struct vm_shared_region_slide_info_entry_v3 { + uint32_t version; // currently 3 + uint32_t page_size; // currently 4096 (may also be 16384) + uint32_t page_starts_count; + uint64_t value_add; + uint16_t page_starts[] /* page_starts_count */; }; -#define DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE 0xFFFF // page has no rebasing +#define DYLD_CACHE_SLIDE_V3_PAGE_ATTR_NO_REBASE 0xFFFF // page has no rebasing typedef struct vm_shared_region_slide_info_entry_v4 *vm_shared_region_slide_info_entry_v4_t; struct vm_shared_region_slide_info_entry_v4 { - uint32_t version; // currently 4 - uint32_t page_size; // currently 4096 (may also be 16384) - uint32_t page_starts_offset; - uint32_t page_starts_count; - uint32_t page_extras_offset; - uint32_t page_extras_count; - uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location (0xC0000000) - uint64_t value_add; // base address of cache - // uint16_t page_starts[page_starts_count]; - // uint16_t page_extras[page_extras_count]; + uint32_t version; // currently 4 + uint32_t page_size; // currently 4096 (may also be 16384) + uint32_t page_starts_offset; + uint32_t page_starts_count; + uint32_t page_extras_offset; + uint32_t page_extras_count; + uint64_t delta_mask; // which (contiguous) set of bits contains the delta to the next rebase location (0xC0000000) + uint64_t value_add; // base address of cache + // uint16_t page_starts[page_starts_count]; + // uint16_t page_extras[page_extras_count]; }; #define DYLD_CACHE_SLIDE4_PAGE_NO_REBASE 0xFFFF // page has no rebasing @@ -175,54 +174,54 @@ struct vm_shared_region_slide_info_entry_v4 { typedef union vm_shared_region_slide_info_entry *vm_shared_region_slide_info_entry_t; union vm_shared_region_slide_info_entry { - uint32_t version; - struct vm_shared_region_slide_info_entry_v1 v1; - struct vm_shared_region_slide_info_entry_v2 v2; - struct vm_shared_region_slide_info_entry_v3 v3; - struct vm_shared_region_slide_info_entry_v4 v4; + uint32_t version; + struct vm_shared_region_slide_info_entry_v1 v1; + struct vm_shared_region_slide_info_entry_v2 v2; + struct vm_shared_region_slide_info_entry_v3 v3; + struct vm_shared_region_slide_info_entry_v4 v4; }; typedef struct vm_shared_region_slide_info *vm_shared_region_slide_info_t; struct vm_shared_region_slide_info { - mach_vm_address_t slid_address; - mach_vm_offset_t start; - mach_vm_offset_t end; - uint32_t slide; - vm_object_t slide_object; - mach_vm_size_t slide_info_size; - vm_shared_region_slide_info_entry_t slide_info_entry; + mach_vm_address_t slid_address; + mach_vm_offset_t start; + mach_vm_offset_t end; + uint32_t slide; + vm_object_t slide_object; + mach_vm_size_t slide_info_size; + vm_shared_region_slide_info_entry_t slide_info_entry; }; /* address space shared region descriptor */ struct vm_shared_region { - uint32_t sr_ref_count; - queue_chain_t sr_q; - void *sr_root_dir; - cpu_type_t sr_cpu_type; - cpu_subtype_t sr_cpu_subtype; - boolean_t sr_64bit; - boolean_t sr_mapping_in_progress; - boolean_t sr_slide_in_progress; - boolean_t sr_persists; - boolean_t sr_slid; - ipc_port_t sr_mem_entry; - mach_vm_offset_t sr_first_mapping; - mach_vm_offset_t sr_base_address; - mach_vm_size_t sr_size; - mach_vm_offset_t sr_pmap_nesting_start; - mach_vm_size_t sr_pmap_nesting_size; - thread_call_t sr_timer_call; + uint32_t sr_ref_count; + queue_chain_t sr_q; + void *sr_root_dir; + cpu_type_t sr_cpu_type; + cpu_subtype_t sr_cpu_subtype; + boolean_t sr_64bit; + boolean_t sr_mapping_in_progress; + boolean_t sr_slide_in_progress; + boolean_t sr_persists; + boolean_t sr_slid; + ipc_port_t sr_mem_entry; + mach_vm_offset_t sr_first_mapping; + mach_vm_offset_t sr_base_address; + mach_vm_size_t sr_size; + mach_vm_offset_t sr_pmap_nesting_start; + mach_vm_size_t sr_pmap_nesting_size; + thread_call_t sr_timer_call; struct vm_shared_region_slide_info sr_slide_info; - uuid_t sr_uuid; - boolean_t sr_uuid_copied; - uint32_t sr_images_count; + uuid_t sr_uuid; + boolean_t sr_uuid_copied; + uint32_t sr_images_count; struct dyld_uuid_info_64 *sr_images; }; extern kern_return_t vm_shared_region_slide_page(vm_shared_region_slide_info_t si, - vm_offset_t vaddr, - mach_vm_offset_t uservaddr, - uint32_t pageIndex); + vm_offset_t vaddr, + mach_vm_offset_t uservaddr, + uint32_t pageIndex); extern vm_shared_region_slide_info_t vm_shared_region_get_slide_info(vm_shared_region_t sr); #else /* !MACH_KERNEL_PRIVATE */ @@ -235,77 +234,77 @@ struct slide_info_entry_toc; extern void vm_shared_region_init(void); extern kern_return_t vm_shared_region_enter( - struct _vm_map *map, - struct task *task, - boolean_t is_64bit, - void *fsroot, - cpu_type_t cpu, - cpu_subtype_t cpu_subtype); + struct _vm_map *map, + struct task *task, + boolean_t is_64bit, + void *fsroot, + cpu_type_t cpu, + cpu_subtype_t cpu_subtype); extern kern_return_t vm_shared_region_remove( - struct _vm_map *map, - struct task *task); + struct _vm_map *map, + struct task *task); extern vm_shared_region_t vm_shared_region_get( - struct task *task); + struct task *task); extern vm_shared_region_t vm_shared_region_trim_and_get( - struct task *task); + struct task *task); extern void vm_shared_region_deallocate( - struct vm_shared_region *shared_region); + struct vm_shared_region *shared_region); extern mach_vm_offset_t vm_shared_region_base_address( - struct vm_shared_region *shared_region); + struct vm_shared_region *shared_region); extern mach_vm_size_t vm_shared_region_size( - struct vm_shared_region *shared_region); + struct vm_shared_region *shared_region); extern ipc_port_t vm_shared_region_mem_entry( - struct vm_shared_region *shared_region); + struct vm_shared_region *shared_region); extern vm_map_t vm_shared_region_vm_map( - struct vm_shared_region *shared_region); + struct vm_shared_region *shared_region); extern uint32_t vm_shared_region_get_slide( - vm_shared_region_t shared_region); + vm_shared_region_t shared_region); extern void vm_shared_region_set( - struct task *task, - struct vm_shared_region *new_shared_region); + struct task *task, + struct vm_shared_region *new_shared_region); extern vm_shared_region_t vm_shared_region_lookup( - void *root_dir, - cpu_type_t cpu, - cpu_subtype_t cpu_subtype, - boolean_t is_64bit); + void *root_dir, + cpu_type_t cpu, + cpu_subtype_t cpu_subtype, + boolean_t is_64bit); extern kern_return_t vm_shared_region_start_address( - struct vm_shared_region *shared_region, - mach_vm_offset_t *start_address); + struct vm_shared_region *shared_region, + mach_vm_offset_t *start_address); extern void vm_shared_region_undo_mappings( - vm_map_t sr_map, - mach_vm_offset_t sr_base_address, - struct shared_file_mapping_np *mappings, - unsigned int mappings_count); + vm_map_t sr_map, + mach_vm_offset_t sr_base_address, + struct shared_file_mapping_np *mappings, + unsigned int mappings_count); extern kern_return_t vm_shared_region_map_file( - struct vm_shared_region *shared_region, - unsigned int mappings_count, + struct vm_shared_region *shared_region, + unsigned int mappings_count, struct shared_file_mapping_np *mappings, - memory_object_control_t file_control, - memory_object_size_t file_size, - void *root_dir, - uint32_t slide, - user_addr_t slide_start, - user_addr_t slide_size); + memory_object_control_t file_control, + memory_object_size_t file_size, + void *root_dir, + uint32_t slide, + user_addr_t slide_start, + user_addr_t slide_size); extern kern_return_t vm_shared_region_sliding_valid(uint32_t slide); extern kern_return_t vm_shared_region_slide_sanity_check(vm_shared_region_t sr); extern void* vm_shared_region_get_slide_info_entry(vm_shared_region_t sr); extern void vm_commpage_init(void); extern void vm_commpage_text_init(void); extern kern_return_t vm_commpage_enter( - struct _vm_map *map, - struct task *task, - boolean_t is64bit); + struct _vm_map *map, + struct task *task, + boolean_t is64bit); extern kern_return_t vm_commpage_remove( - struct _vm_map *map, - struct task *task); -int vm_shared_region_slide(uint32_t, - mach_vm_offset_t, - mach_vm_size_t, - mach_vm_offset_t, - mach_vm_size_t, - mach_vm_offset_t, - memory_object_control_t); + struct _vm_map *map, + struct task *task); +int vm_shared_region_slide(uint32_t, + mach_vm_offset_t, + mach_vm_size_t, + mach_vm_offset_t, + mach_vm_size_t, + mach_vm_offset_t, + memory_object_control_t); #endif /* KERNEL_PRIVATE */ -#endif /* _VM_SHARED_REGION_H_ */ +#endif /* _VM_SHARED_REGION_H_ */ diff --git a/osfmk/vm/vm_shared_region_pager.c b/osfmk/vm/vm_shared_region_pager.c index 773233d24..a4d1fc46f 100644 --- a/osfmk/vm/vm_shared_region_pager.c +++ b/osfmk/vm/vm_shared_region_pager.c @@ -82,35 +82,35 @@ void shared_region_pager_reference(memory_object_t mem_obj); void shared_region_pager_deallocate(memory_object_t mem_obj); kern_return_t shared_region_pager_init(memory_object_t mem_obj, - memory_object_control_t control, - memory_object_cluster_size_t pg_size); + memory_object_control_t control, + memory_object_cluster_size_t pg_size); kern_return_t shared_region_pager_terminate(memory_object_t mem_obj); kern_return_t shared_region_pager_data_request(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - vm_prot_t protection_required, - memory_object_fault_info_t fault_info); + memory_object_offset_t offset, + memory_object_cluster_size_t length, + vm_prot_t protection_required, + memory_object_fault_info_t fault_info); kern_return_t shared_region_pager_data_return(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt, - memory_object_offset_t *resid_offset, - int *io_error, - boolean_t dirty, - boolean_t kernel_copy, - int upl_flags); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt, + memory_object_offset_t *resid_offset, + int *io_error, + boolean_t dirty, + boolean_t kernel_copy, + int upl_flags); kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt); kern_return_t shared_region_pager_data_unlock(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t size, - vm_prot_t desired_access); + memory_object_offset_t offset, + memory_object_size_t size, + vm_prot_t desired_access); kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, - vm_sync_t sync_flags); + memory_object_offset_t offset, + memory_object_size_t length, + vm_sync_t sync_flags); kern_return_t shared_region_pager_map(memory_object_t mem_obj, - vm_prot_t prot); + vm_prot_t prot); kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj); /* @@ -142,24 +142,24 @@ typedef struct shared_region_pager { struct memory_object sc_pgr_hdr; /* pager-specific data */ - queue_chain_t pager_queue; /* next & prev pagers */ - unsigned int ref_count; /* reference count */ - boolean_t is_ready; /* is this pager ready ? */ - boolean_t is_mapped; /* is this mem_obj mapped ? */ - vm_object_t backing_object; /* VM obj for shared cache */ - vm_object_offset_t backing_offset; + queue_chain_t pager_queue; /* next & prev pagers */ + unsigned int ref_count; /* reference count */ + boolean_t is_ready; /* is this pager ready ? */ + boolean_t is_mapped; /* is this mem_obj mapped ? */ + vm_object_t backing_object; /* VM obj for shared cache */ + vm_object_offset_t backing_offset; struct vm_shared_region_slide_info *scp_slide_info; } *shared_region_pager_t; -#define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL) +#define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL) /* * List of memory objects managed by this EMM. * The list is protected by the "shared_region_pager_lock" lock. */ -int shared_region_pager_count = 0; /* number of pagers */ -int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */ +int shared_region_pager_count = 0; /* number of pagers */ +int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */ queue_head_t shared_region_pager_queue; -decl_lck_mtx_data(,shared_region_pager_lock) +decl_lck_mtx_data(, shared_region_pager_lock) /* * Maximum number of unmapped pagers we're willing to keep around. @@ -175,9 +175,9 @@ int shared_region_pager_num_trim_max = 0; int shared_region_pager_num_trim_total = 0; -lck_grp_t shared_region_pager_lck_grp; -lck_grp_attr_t shared_region_pager_lck_grp_attr; -lck_attr_t shared_region_pager_lck_attr; +lck_grp_t shared_region_pager_lck_grp; +lck_grp_attr_t shared_region_pager_lck_grp_attr; +lck_attr_t shared_region_pager_lck_attr; uint64_t shared_region_pager_copied = 0; uint64_t shared_region_pager_slid = 0; @@ -192,22 +192,22 @@ shared_region_pager_t shared_region_pager_create( shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj); void shared_region_pager_dequeue(shared_region_pager_t pager); void shared_region_pager_deallocate_internal(shared_region_pager_t pager, - boolean_t locked); + boolean_t locked); void shared_region_pager_terminate_internal(shared_region_pager_t pager); void shared_region_pager_trim(void); #if DEBUG int shared_region_pagerdebug = 0; -#define PAGER_ALL 0xffffffff -#define PAGER_INIT 0x00000001 -#define PAGER_PAGEIN 0x00000002 - -#define PAGER_DEBUG(LEVEL, A) \ - MACRO_BEGIN \ - if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \ - printf A; \ - } \ +#define PAGER_ALL 0xffffffff +#define PAGER_INIT 0x00000001 +#define PAGER_PAGEIN 0x00000002 + +#define PAGER_DEBUG(LEVEL, A) \ + MACRO_BEGIN \ + if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \ + printf A; \ + } \ MACRO_END #else #define PAGER_DEBUG(LEVEL, A) @@ -231,23 +231,24 @@ shared_region_pager_bootstrap(void) */ kern_return_t shared_region_pager_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, #if !DEBUG __unused #endif memory_object_cluster_size_t pg_size) { - shared_region_pager_t pager; - kern_return_t kr; + shared_region_pager_t pager; + kern_return_t kr; memory_object_attr_info_data_t attributes; PAGER_DEBUG(PAGER_ALL, - ("shared_region_pager_init: %p, %p, %x\n", - mem_obj, control, pg_size)); + ("shared_region_pager_init: %p, %p, %x\n", + mem_obj, control, pg_size)); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } pager = shared_region_pager_lookup(mem_obj); @@ -262,13 +263,14 @@ shared_region_pager_init( attributes.temporary = TRUE; kr = memory_object_change_attributes( - control, - MEMORY_OBJECT_ATTRIBUTE_INFO, - (memory_object_info_t) &attributes, - MEMORY_OBJECT_ATTR_INFO_COUNT); - if (kr != KERN_SUCCESS) + control, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT); + if (kr != KERN_SUCCESS) { panic("shared_region_pager_init: " - "memory_object_change_attributes() failed"); + "memory_object_change_attributes() failed"); + } #if CONFIG_SECLUDED_MEMORY if (secluded_for_filecache) { @@ -297,14 +299,14 @@ shared_region_pager_init( */ kern_return_t shared_region_pager_data_return( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags) { panic("shared_region_pager_data_return: should never get called"); return KERN_FAILURE; @@ -312,9 +314,9 @@ shared_region_pager_data_return( kern_return_t shared_region_pager_data_initialize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt) { panic("shared_region_pager_data_initialize: should never get called"); return KERN_FAILURE; @@ -322,10 +324,10 @@ shared_region_pager_data_initialize( kern_return_t shared_region_pager_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { return KERN_FAILURE; } @@ -338,33 +340,33 @@ shared_region_pager_data_unlock( int shared_region_pager_data_request_debug = 0; kern_return_t shared_region_pager_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, #if !DEBUG __unused #endif - vm_prot_t protection_required, + vm_prot_t protection_required, memory_object_fault_info_t mo_fault_info) { - shared_region_pager_t pager; - memory_object_control_t mo_control; - upl_t upl; - int upl_flags; - upl_size_t upl_size; - upl_page_info_t *upl_pl; - unsigned int pl_count; - vm_object_t src_top_object, src_page_object, dst_object; - kern_return_t kr, retval; - vm_offset_t src_vaddr, dst_vaddr; - vm_offset_t cur_offset; - vm_offset_t offset_in_page; - kern_return_t error_code; - vm_prot_t prot; - vm_page_t src_page, top_page; - int interruptible; - struct vm_object_fault_info fault_info; - mach_vm_offset_t slide_start_address; + shared_region_pager_t pager; + memory_object_control_t mo_control; + upl_t upl; + int upl_flags; + upl_size_t upl_size; + upl_page_info_t *upl_pl; + unsigned int pl_count; + vm_object_t src_top_object, src_page_object, dst_object; + kern_return_t kr, retval; + vm_offset_t src_vaddr, dst_vaddr; + vm_offset_t cur_offset; + vm_offset_t offset_in_page; + kern_return_t error_code; + vm_prot_t prot; + vm_page_t src_page, top_page; + int interruptible; + struct vm_object_fault_info fault_info; + mach_vm_offset_t slide_start_address; PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); @@ -393,15 +395,15 @@ shared_region_pager_data_request( upl_size = length; upl_flags = - UPL_RET_ONLY_ABSENT | - UPL_SET_LITE | - UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ - UPL_SET_INTERNAL; + UPL_RET_ONLY_ABSENT | + UPL_SET_LITE | + UPL_NO_SYNC | + UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ + UPL_SET_INTERNAL; pl_count = 0; kr = memory_object_upl_request(mo_control, - offset, upl_size, - &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); + offset, upl_size, + &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY); if (kr != KERN_SUCCESS) { retval = kr; goto done; @@ -429,8 +431,8 @@ shared_region_pager_data_request( upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl); pl_count = length / PAGE_SIZE; for (cur_offset = 0; - retval == KERN_SUCCESS && cur_offset < length; - cur_offset += PAGE_SIZE) { + retval == KERN_SUCCESS && cur_offset < length; + cur_offset += PAGE_SIZE) { ppnum_t dst_pnum; if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) { @@ -443,25 +445,25 @@ shared_region_pager_data_request( * virtual address space. * We already hold a reference on the src_top_object. */ - retry_src_fault: +retry_src_fault: vm_object_lock(src_top_object); vm_object_paging_begin(src_top_object); error_code = 0; prot = VM_PROT_READ; src_page = VM_PAGE_NULL; kr = vm_fault_page(src_top_object, - pager->backing_offset + offset + cur_offset, - VM_PROT_READ, - FALSE, - FALSE, /* src_page not looked up */ - &prot, - &src_page, - &top_page, - NULL, - &error_code, - FALSE, - FALSE, - &fault_info); + pager->backing_offset + offset + cur_offset, + VM_PROT_READ, + FALSE, + FALSE, /* src_page not looked up */ + &prot, + &src_page, + &top_page, + NULL, + &error_code, + FALSE, + FALSE, + &fault_info); switch (kr) { case VM_FAULT_SUCCESS: break; @@ -471,7 +473,7 @@ shared_region_pager_data_request( if (vm_page_wait(interruptible)) { goto retry_src_fault; } - /* fall thru */ + /* fall thru */ case VM_FAULT_INTERRUPTED: retval = MACH_SEND_INTERRUPTED; goto done; @@ -479,7 +481,7 @@ shared_region_pager_data_request( /* success but no VM page: fail */ vm_object_paging_end(src_top_object); vm_object_unlock(src_top_object); - /*FALLTHROUGH*/ + /*FALLTHROUGH*/ case VM_FAULT_MEMORY_ERROR: /* the page is not there ! */ if (error_code) { @@ -490,8 +492,8 @@ shared_region_pager_data_request( goto done; default: panic("shared_region_pager_data_request: " - "vm_fault_page() unexpected error 0x%x\n", - kr); + "vm_fault_page() unexpected error 0x%x\n", + kr); } assert(src_page != VM_PAGE_NULL); assert(src_page->vmp_busy); @@ -499,7 +501,7 @@ shared_region_pager_data_request( if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { vm_page_lockspin_queues(); if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) { - vm_page_speculate(src_page, FALSE); + vm_page_speculate(src_page, FALSE); } vm_page_unlock_queues(); } @@ -509,21 +511,21 @@ shared_region_pager_data_request( * and destination physical pages. */ dst_pnum = (ppnum_t) - upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); - assert(dst_pnum != 0); + upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); + assert(dst_pnum != 0); #if __x86_64__ src_vaddr = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) - << PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); dst_vaddr = (vm_map_offset_t) - PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); + PHYSMAP_PTOV((pmap_paddr_t)dst_pnum << PAGE_SHIFT); #elif __arm__ || __arm64__ src_vaddr = (vm_map_offset_t) - phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) - << PAGE_SHIFT); + phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) + << PAGE_SHIFT); dst_vaddr = (vm_map_offset_t) - phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); + phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT); #else #error "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..." src_vaddr = 0; @@ -543,11 +545,11 @@ shared_region_pager_data_request( * ... and transfer the results to the destination page. */ UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, - src_page->vmp_cs_validated); + src_page->vmp_cs_validated); UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, - src_page->vmp_cs_tainted); + src_page->vmp_cs_tainted); UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, - src_page->vmp_cs_nx); + src_page->vmp_cs_nx); /* * The page provider might access a mapped file, so let's @@ -566,8 +568,8 @@ shared_region_pager_data_request( * into the destination page. */ for (offset_in_page = 0; - offset_in_page < PAGE_SIZE; - offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) { + offset_in_page < PAGE_SIZE; + offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) { vm_object_offset_t chunk_offset; vm_object_offset_t offset_in_backing_object; vm_object_offset_t offset_in_sliding_range; @@ -575,12 +577,12 @@ shared_region_pager_data_request( chunk_offset = offset + cur_offset + offset_in_page; bcopy((const char *)(src_vaddr + - offset_in_page), - (char *)(dst_vaddr + offset_in_page), - PAGE_SIZE_FOR_SR_SLIDE); + offset_in_page), + (char *)(dst_vaddr + offset_in_page), + PAGE_SIZE_FOR_SR_SLIDE); offset_in_backing_object = (chunk_offset + - pager->backing_offset); + pager->backing_offset); if ((offset_in_backing_object < pager->scp_slide_info->start) || (offset_in_backing_object >= pager->scp_slide_info->end)) { /* chunk is outside of sliding range: done */ @@ -589,49 +591,49 @@ shared_region_pager_data_request( } offset_in_sliding_range = - (offset_in_backing_object - - pager->scp_slide_info->start); + (offset_in_backing_object - + pager->scp_slide_info->start); kr = vm_shared_region_slide_page( pager->scp_slide_info, dst_vaddr + offset_in_page, (mach_vm_offset_t) (offset_in_sliding_range + - slide_start_address), + slide_start_address), (uint32_t) (offset_in_sliding_range / - PAGE_SIZE_FOR_SR_SLIDE)); + PAGE_SIZE_FOR_SR_SLIDE)); if (shared_region_pager_data_request_debug) { printf("shared_region_data_request" - "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx " - "in sliding range [0x%llx:0x%llx]: " - "SLIDE offset 0x%llx=" - "(0x%llx+0x%llx+0x%llx+0x%04llx)" - "[0x%016llx 0x%016llx] " - "code_signed=%d " - "cs_validated=%d " - "cs_tainted=%d " - "cs_nx=%d " - "kr=0x%x\n", - pager, - offset, - (uint64_t) cur_offset, - (uint64_t) offset_in_page, - chunk_offset, - pager->scp_slide_info->start, - pager->scp_slide_info->end, - (pager->backing_offset + - offset + - cur_offset + - offset_in_page), - pager->backing_offset, - offset, - (uint64_t) cur_offset, - (uint64_t) offset_in_page, - *(uint64_t *)(dst_vaddr+offset_in_page), - *(uint64_t *)(dst_vaddr+offset_in_page+8), - src_page_object->code_signed, - src_page->vmp_cs_validated, - src_page->vmp_cs_tainted, - src_page->vmp_cs_nx, - kr); + "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx " + "in sliding range [0x%llx:0x%llx]: " + "SLIDE offset 0x%llx=" + "(0x%llx+0x%llx+0x%llx+0x%04llx)" + "[0x%016llx 0x%016llx] " + "code_signed=%d " + "cs_validated=%d " + "cs_tainted=%d " + "cs_nx=%d " + "kr=0x%x\n", + pager, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + chunk_offset, + pager->scp_slide_info->start, + pager->scp_slide_info->end, + (pager->backing_offset + + offset + + cur_offset + + offset_in_page), + pager->backing_offset, + offset, + (uint64_t) cur_offset, + (uint64_t) offset_in_page, + *(uint64_t *)(dst_vaddr + offset_in_page), + *(uint64_t *)(dst_vaddr + offset_in_page + 8), + src_page_object->code_signed, + src_page->vmp_cs_validated, + src_page->vmp_cs_tainted, + src_page->vmp_cs_nx, + kr); } if (kr != KERN_SUCCESS) { shared_region_pager_slid_error++; @@ -681,8 +683,8 @@ done: } else { boolean_t empty; upl_commit_range(upl, 0, upl->size, - UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, - upl_pl, pl_count, &empty); + UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL, + upl_pl, pl_count, &empty); } /* and deallocate the UPL */ @@ -704,9 +706,9 @@ done: */ void shared_region_pager_reference( - memory_object_t mem_obj) + memory_object_t mem_obj) { - shared_region_pager_t pager; + shared_region_pager_t pager; pager = shared_region_pager_lookup(mem_obj); @@ -731,9 +733,9 @@ shared_region_pager_dequeue( assert(!pager->is_mapped); queue_remove(&shared_region_pager_queue, - pager, - shared_region_pager_t, - pager_queue); + pager, + shared_region_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; @@ -778,18 +780,18 @@ shared_region_pager_terminate_internal( */ void shared_region_pager_deallocate_internal( - shared_region_pager_t pager, - boolean_t locked) + shared_region_pager_t pager, + boolean_t locked) { - boolean_t needs_trimming; - int count_unmapped; + boolean_t needs_trimming; + int count_unmapped; - if (! locked) { + if (!locked) { lck_mtx_lock(&shared_region_pager_lock); } count_unmapped = (shared_region_pager_count - - shared_region_pager_count_mapped); + shared_region_pager_count_mapped); if (count_unmapped > shared_region_pager_cache_limit) { /* we have too many unmapped pagers: trim some */ needs_trimming = TRUE; @@ -821,7 +823,7 @@ shared_region_pager_deallocate_internal( memory_object_control_deallocate(pager->sc_pgr_hdr.mo_control); pager->sc_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; } - kfree(pager, sizeof (*pager)); + kfree(pager, sizeof(*pager)); pager = SHARED_REGION_PAGER_NULL; } else { /* there are still plenty of references: keep going... */ @@ -842,9 +844,9 @@ shared_region_pager_deallocate_internal( */ void shared_region_pager_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - shared_region_pager_t pager; + shared_region_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj)); pager = shared_region_pager_lookup(mem_obj); @@ -859,7 +861,7 @@ shared_region_pager_terminate( #if !DEBUG __unused #endif - memory_object_t mem_obj) + memory_object_t mem_obj) { PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj)); @@ -871,10 +873,10 @@ shared_region_pager_terminate( */ kern_return_t shared_region_pager_synchronize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t sync_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t sync_flags) { panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported\n"); return KERN_FAILURE; @@ -890,10 +892,10 @@ shared_region_pager_synchronize( */ kern_return_t shared_region_pager_map( - memory_object_t mem_obj, - __unused vm_prot_t prot) + memory_object_t mem_obj, + __unused vm_prot_t prot) { - shared_region_pager_t pager; + shared_region_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj)); @@ -924,13 +926,13 @@ shared_region_pager_map( */ kern_return_t shared_region_pager_last_unmap( - memory_object_t mem_obj) + memory_object_t mem_obj) { - shared_region_pager_t pager; - int count_unmapped; + shared_region_pager_t pager; + int count_unmapped; PAGER_DEBUG(PAGER_ALL, - ("shared_region_pager_last_unmap: %p\n", mem_obj)); + ("shared_region_pager_last_unmap: %p\n", mem_obj)); pager = shared_region_pager_lookup(mem_obj); @@ -942,7 +944,7 @@ shared_region_pager_last_unmap( */ shared_region_pager_count_mapped--; count_unmapped = (shared_region_pager_count - - shared_region_pager_count_mapped); + shared_region_pager_count_mapped); if (count_unmapped > shared_region_pager_count_unmapped_max) { shared_region_pager_count_unmapped_max = count_unmapped; } @@ -962,9 +964,9 @@ shared_region_pager_last_unmap( */ shared_region_pager_t shared_region_pager_lookup( - memory_object_t mem_obj) + memory_object_t mem_obj) { - shared_region_pager_t pager; + shared_region_pager_t pager; assert(mem_obj->mo_pager_ops == &shared_region_pager_ops); pager = (shared_region_pager_t)(uintptr_t) mem_obj; @@ -974,15 +976,15 @@ shared_region_pager_lookup( shared_region_pager_t shared_region_pager_create( - vm_object_t backing_object, - vm_object_offset_t backing_offset, + vm_object_t backing_object, + vm_object_offset_t backing_offset, struct vm_shared_region_slide_info *slide_info) { - shared_region_pager_t pager; - memory_object_control_t control; - kern_return_t kr; + shared_region_pager_t pager; + memory_object_control_t control; + kern_return_t kr; - pager = (shared_region_pager_t) kalloc(sizeof (*pager)); + pager = (shared_region_pager_t) kalloc(sizeof(*pager)); if (pager == SHARED_REGION_PAGER_NULL) { return SHARED_REGION_PAGER_NULL; } @@ -999,8 +1001,8 @@ shared_region_pager_create( pager->sc_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; pager->is_ready = FALSE;/* not ready until it has a "name" */ - pager->ref_count = 1; /* existence reference (for the cache) */ - pager->ref_count++; /* for the caller */ + pager->ref_count = 1; /* existence reference (for the cache) */ + pager->ref_count++; /* for the caller */ pager->is_mapped = FALSE; pager->backing_object = backing_object; pager->backing_offset = backing_offset; @@ -1011,9 +1013,9 @@ shared_region_pager_create( lck_mtx_lock(&shared_region_pager_lock); /* enter new pager at the head of our list of pagers */ queue_enter_first(&shared_region_pager_queue, - pager, - shared_region_pager_t, - pager_queue); + pager, + shared_region_pager_t, + pager_queue); shared_region_pager_count++; if (shared_region_pager_count > shared_region_pager_count_max) { shared_region_pager_count_max = shared_region_pager_count; @@ -1021,8 +1023,8 @@ shared_region_pager_create( lck_mtx_unlock(&shared_region_pager_lock); kr = memory_object_create_named((memory_object_t) pager, - 0, - &control); + 0, + &control); assert(kr == KERN_SUCCESS); lck_mtx_lock(&shared_region_pager_lock); @@ -1044,11 +1046,11 @@ shared_region_pager_create( */ memory_object_t shared_region_pager_setup( - vm_object_t backing_object, - vm_object_offset_t backing_offset, + vm_object_t backing_object, + vm_object_offset_t backing_offset, struct vm_shared_region_slide_info *slide_info) { - shared_region_pager_t pager; + shared_region_pager_t pager; /* create new pager */ pager = shared_region_pager_create( @@ -1063,9 +1065,9 @@ shared_region_pager_setup( lck_mtx_lock(&shared_region_pager_lock); while (!pager->is_ready) { lck_mtx_sleep(&shared_region_pager_lock, - LCK_SLEEP_DEFAULT, - &pager->is_ready, - THREAD_UNINT); + LCK_SLEEP_DEFAULT, + &pager->is_ready, + THREAD_UNINT); } lck_mtx_unlock(&shared_region_pager_lock); @@ -1075,10 +1077,10 @@ shared_region_pager_setup( void shared_region_pager_trim(void) { - shared_region_pager_t pager, prev_pager; - queue_head_t trim_queue; - int num_trim; - int count_unmapped; + shared_region_pager_t pager, prev_pager; + queue_head_t trim_queue; + int num_trim; + int count_unmapped; lck_mtx_lock(&shared_region_pager_lock); @@ -1090,13 +1092,13 @@ shared_region_pager_trim(void) num_trim = 0; for (pager = (shared_region_pager_t) - queue_last(&shared_region_pager_queue); - !queue_end(&shared_region_pager_queue, - (queue_entry_t) pager); - pager = prev_pager) { + queue_last(&shared_region_pager_queue); + !queue_end(&shared_region_pager_queue, + (queue_entry_t) pager); + pager = prev_pager) { /* get prev elt before we dequeue */ prev_pager = (shared_region_pager_t) - queue_prev(&pager->pager_queue); + queue_prev(&pager->pager_queue); if (pager->ref_count == 2 && pager->is_ready && @@ -1107,12 +1109,12 @@ shared_region_pager_trim(void) shared_region_pager_dequeue(pager); /* ... and add it to our trim queue */ queue_enter_first(&trim_queue, - pager, - shared_region_pager_t, - pager_queue); + pager, + shared_region_pager_t, + pager_queue); count_unmapped = (shared_region_pager_count - - shared_region_pager_count_mapped); + shared_region_pager_count_mapped); if (count_unmapped <= shared_region_pager_cache_limit) { /* we have enough pagers to trim */ break; @@ -1129,9 +1131,9 @@ shared_region_pager_trim(void) /* terminate the trimmed pagers */ while (!queue_empty(&trim_queue)) { queue_remove_first(&trim_queue, - pager, - shared_region_pager_t, - pager_queue); + pager, + shared_region_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; assert(pager->ref_count == 2); diff --git a/osfmk/vm/vm_swapfile_pager.c b/osfmk/vm/vm_swapfile_pager.c index 489297724..a8b27af22 100644 --- a/osfmk/vm/vm_swapfile_pager.c +++ b/osfmk/vm/vm_swapfile_pager.c @@ -2,7 +2,7 @@ * Copyright (c) 2008 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -40,8 +41,8 @@ #include -/* - * APPLE SWAPFILE MEMORY PAGER +/* + * APPLE SWAPFILE MEMORY PAGER * * This external memory manager (EMM) handles mappings of the swap files. * Swap files are not regular files and are used solely to store contents of @@ -78,35 +79,35 @@ void swapfile_pager_reference(memory_object_t mem_obj); void swapfile_pager_deallocate(memory_object_t mem_obj); kern_return_t swapfile_pager_init(memory_object_t mem_obj, - memory_object_control_t control, - memory_object_cluster_size_t pg_size); + memory_object_control_t control, + memory_object_cluster_size_t pg_size); kern_return_t swapfile_pager_terminate(memory_object_t mem_obj); kern_return_t swapfile_pager_data_request(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, - vm_prot_t protection_required, - memory_object_fault_info_t fault_info); + memory_object_offset_t offset, + memory_object_cluster_size_t length, + vm_prot_t protection_required, + memory_object_fault_info_t fault_info); kern_return_t swapfile_pager_data_return(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt, - memory_object_offset_t *resid_offset, - int *io_error, - boolean_t dirty, - boolean_t kernel_copy, - int upl_flags); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt, + memory_object_offset_t *resid_offset, + int *io_error, + boolean_t dirty, + boolean_t kernel_copy, + int upl_flags); kern_return_t swapfile_pager_data_initialize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t data_cnt); + memory_object_offset_t offset, + memory_object_cluster_size_t data_cnt); kern_return_t swapfile_pager_data_unlock(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t size, - vm_prot_t desired_access); + memory_object_offset_t offset, + memory_object_size_t size, + vm_prot_t desired_access); kern_return_t swapfile_pager_synchronize(memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_size_t length, - vm_sync_t sync_flags); + memory_object_offset_t offset, + memory_object_size_t length, + vm_sync_t sync_flags); kern_return_t swapfile_pager_map(memory_object_t mem_obj, - vm_prot_t prot); + vm_prot_t prot); kern_return_t swapfile_pager_last_unmap(memory_object_t mem_obj); /* @@ -138,21 +139,21 @@ typedef struct swapfile_pager { struct memory_object swp_pgr_hdr; /* pager-specific data */ - queue_chain_t pager_queue; /* next & prev pagers */ - unsigned int ref_count; /* reference count */ - boolean_t is_ready; /* is this pager ready ? */ - boolean_t is_mapped; /* is this pager mapped ? */ - struct vnode *swapfile_vnode;/* the swapfile's vnode */ + queue_chain_t pager_queue; /* next & prev pagers */ + struct os_refcnt ref_count; /* reference count */ + boolean_t is_ready; /* is this pager ready ? */ + boolean_t is_mapped; /* is this pager mapped ? */ + struct vnode *swapfile_vnode;/* the swapfile's vnode */ } *swapfile_pager_t; -#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL) +#define SWAPFILE_PAGER_NULL ((swapfile_pager_t) NULL) /* * List of memory objects managed by this EMM. * The list is protected by the "swapfile_pager_lock" lock. */ -int swapfile_pager_count = 0; /* number of pagers */ +int swapfile_pager_count = 0; /* number of pagers */ queue_head_t swapfile_pager_queue; -decl_lck_mtx_data(,swapfile_pager_lock) +decl_lck_mtx_data(, swapfile_pager_lock) /* * Statistics & counters. @@ -160,9 +161,9 @@ decl_lck_mtx_data(,swapfile_pager_lock) int swapfile_pager_count_max = 0; -lck_grp_t swapfile_pager_lck_grp; -lck_grp_attr_t swapfile_pager_lck_grp_attr; -lck_attr_t swapfile_pager_lck_attr; +lck_grp_t swapfile_pager_lck_grp; +lck_grp_attr_t swapfile_pager_lck_grp_attr; +lck_attr_t swapfile_pager_lck_attr; /* internal prototypes */ @@ -170,21 +171,21 @@ swapfile_pager_t swapfile_pager_create(struct vnode *vp); swapfile_pager_t swapfile_pager_lookup(memory_object_t mem_obj); void swapfile_pager_dequeue(swapfile_pager_t pager); void swapfile_pager_deallocate_internal(swapfile_pager_t pager, - boolean_t locked); + boolean_t locked); void swapfile_pager_terminate_internal(swapfile_pager_t pager); #if DEBUG int swapfile_pagerdebug = 0; -#define PAGER_ALL 0xffffffff -#define PAGER_INIT 0x00000001 -#define PAGER_PAGEIN 0x00000002 - -#define PAGER_DEBUG(LEVEL, A) \ - MACRO_BEGIN \ - if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \ - printf A; \ - } \ +#define PAGER_ALL 0xffffffff +#define PAGER_INIT 0x00000001 +#define PAGER_PAGEIN 0x00000002 + +#define PAGER_DEBUG(LEVEL, A) \ + MACRO_BEGIN \ + if ((swapfile_pagerdebug & LEVEL)==LEVEL) { \ + printf A; \ + } \ MACRO_END #else #define PAGER_DEBUG(LEVEL, A) @@ -208,23 +209,24 @@ swapfile_pager_bootstrap(void) */ kern_return_t swapfile_pager_init( - memory_object_t mem_obj, - memory_object_control_t control, + memory_object_t mem_obj, + memory_object_control_t control, #if !DEBUG __unused #endif memory_object_cluster_size_t pg_size) { - swapfile_pager_t pager; - kern_return_t kr; + swapfile_pager_t pager; + kern_return_t kr; memory_object_attr_info_data_t attributes; PAGER_DEBUG(PAGER_ALL, - ("swapfile_pager_init: %p, %p, %x\n", - mem_obj, control, pg_size)); + ("swapfile_pager_init: %p, %p, %x\n", + mem_obj, control, pg_size)); - if (control == MEMORY_OBJECT_CONTROL_NULL) + if (control == MEMORY_OBJECT_CONTROL_NULL) { return KERN_INVALID_ARGUMENT; + } pager = swapfile_pager_lookup(mem_obj); @@ -238,13 +240,14 @@ swapfile_pager_init( attributes.temporary = TRUE; kr = memory_object_change_attributes( - control, - MEMORY_OBJECT_ATTRIBUTE_INFO, - (memory_object_info_t) &attributes, - MEMORY_OBJECT_ATTR_INFO_COUNT); - if (kr != KERN_SUCCESS) + control, + MEMORY_OBJECT_ATTRIBUTE_INFO, + (memory_object_info_t) &attributes, + MEMORY_OBJECT_ATTR_INFO_COUNT); + if (kr != KERN_SUCCESS) { panic("swapfile_pager_init: " - "memory_object_change_attributes() failed"); + "memory_object_change_attributes() failed"); + } return KERN_SUCCESS; } @@ -259,14 +262,14 @@ swapfile_pager_init( */ kern_return_t swapfile_pager_data_return( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt, - __unused memory_object_offset_t *resid_offset, - __unused int *io_error, - __unused boolean_t dirty, - __unused boolean_t kernel_copy, - __unused int upl_flags) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt, + __unused memory_object_offset_t *resid_offset, + __unused int *io_error, + __unused boolean_t dirty, + __unused boolean_t kernel_copy, + __unused int upl_flags) { panic("swapfile_pager_data_return: should never get called"); return KERN_FAILURE; @@ -274,9 +277,9 @@ swapfile_pager_data_return( kern_return_t swapfile_pager_data_initialize( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_cluster_size_t data_cnt) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_cluster_size_t data_cnt) { panic("swapfile_pager_data_initialize: should never get called"); return KERN_FAILURE; @@ -284,10 +287,10 @@ swapfile_pager_data_initialize( kern_return_t swapfile_pager_data_unlock( - __unused memory_object_t mem_obj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t size, - __unused vm_prot_t desired_access) + __unused memory_object_t mem_obj, + __unused memory_object_offset_t offset, + __unused memory_object_size_t size, + __unused vm_prot_t desired_access) { return KERN_FAILURE; } @@ -297,31 +300,31 @@ swapfile_pager_data_unlock( * * Handles page-in requests from VM. */ -kern_return_t +kern_return_t swapfile_pager_data_request( - memory_object_t mem_obj, - memory_object_offset_t offset, - memory_object_cluster_size_t length, + memory_object_t mem_obj, + memory_object_offset_t offset, + memory_object_cluster_size_t length, #if !DEBUG __unused #endif - vm_prot_t protection_required, + vm_prot_t protection_required, __unused memory_object_fault_info_t mo_fault_info) { - swapfile_pager_t pager; - memory_object_control_t mo_control; - upl_t upl; - int upl_flags; - upl_size_t upl_size; - upl_page_info_t *upl_pl = NULL; - unsigned int pl_count; - vm_object_t dst_object; - kern_return_t kr, retval; - vm_map_offset_t kernel_mapping; - vm_offset_t dst_vaddr; - char *dst_ptr; - vm_offset_t cur_offset; - vm_map_entry_t map_entry; + swapfile_pager_t pager; + memory_object_control_t mo_control; + upl_t upl; + int upl_flags; + upl_size_t upl_size; + upl_page_info_t *upl_pl = NULL; + unsigned int pl_count; + vm_object_t dst_object; + kern_return_t kr, retval; + vm_map_offset_t kernel_mapping; + vm_offset_t dst_vaddr; + char *dst_ptr; + vm_offset_t cur_offset; + vm_map_entry_t map_entry; PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required)); @@ -331,7 +334,7 @@ swapfile_pager_data_request( pager = swapfile_pager_lookup(mem_obj); assert(pager->is_ready); - assert(pager->ref_count > 1); /* pager is alive and mapped */ + assert(os_ref_get_count(&pager->ref_count) > 1); /* pager is alive and mapped */ PAGER_DEBUG(PAGER_PAGEIN, ("swapfile_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager)); @@ -342,15 +345,15 @@ swapfile_pager_data_request( upl_size = length; upl_flags = - UPL_RET_ONLY_ABSENT | - UPL_SET_LITE | - UPL_NO_SYNC | - UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ - UPL_SET_INTERNAL; + UPL_RET_ONLY_ABSENT | + UPL_SET_LITE | + UPL_NO_SYNC | + UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */ + UPL_SET_INTERNAL; pl_count = 0; kr = memory_object_upl_request(mo_control, - offset, upl_size, - &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK); + offset, upl_size, + &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_OSFMK); if (kr != KERN_SUCCESS) { retval = kr; goto done; @@ -363,15 +366,15 @@ swapfile_pager_data_request( * Reserve a virtual page in the kernel address space to map each * destination physical page when it's its turn to be processed. */ - vm_object_reference(kernel_object); /* ref. for mapping */ + vm_object_reference(kernel_object); /* ref. for mapping */ kr = vm_map_find_space(kernel_map, - &kernel_mapping, - PAGE_SIZE_64, - 0, - 0, - VM_MAP_KERNEL_FLAGS_NONE, - VM_KERN_MEMORY_NONE, - &map_entry); + &kernel_mapping, + PAGE_SIZE_64, + 0, + 0, + VM_MAP_KERNEL_FLAGS_NONE, + VM_KERN_MEMORY_NONE, + &map_entry); if (kr != KERN_SUCCESS) { vm_object_deallocate(kernel_object); retval = kr; @@ -403,15 +406,15 @@ swapfile_pager_data_request( * is "busy". */ dst_pnum = (ppnum_t) - upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); + upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE)); assert(dst_pnum != 0); retval = pmap_enter(kernel_pmap, - kernel_mapping, - dst_pnum, - VM_PROT_READ | VM_PROT_WRITE, - VM_PROT_NONE, - 0, - TRUE); + kernel_mapping, + dst_pnum, + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_NONE, + 0, + TRUE); assert(retval == KERN_SUCCESS); @@ -421,16 +424,15 @@ swapfile_pager_data_request( memset(dst_ptr, '\0', PAGE_SIZE); /* add an end-of-line to keep line counters happy */ - dst_ptr[PAGE_SIZE-1] = '\n'; - + dst_ptr[PAGE_SIZE - 1] = '\n'; + /* * Remove the pmap mapping of the destination page * in the kernel. */ pmap_remove(kernel_pmap, - (addr64_t) kernel_mapping, - (addr64_t) (kernel_mapping + PAGE_SIZE_64)); - + (addr64_t) kernel_mapping, + (addr64_t) (kernel_mapping + PAGE_SIZE_64)); } retval = KERN_SUCCESS; @@ -452,9 +454,9 @@ done: upl_abort(upl, 0); } else { boolean_t empty; - upl_commit_range(upl, 0, upl->size, - UPL_COMMIT_CS_VALIDATED, - upl_pl, pl_count, &empty); + upl_commit_range(upl, 0, upl->size, + UPL_COMMIT_CS_VALIDATED, + upl_pl, pl_count, &empty); } /* and deallocate the UPL */ @@ -464,9 +466,9 @@ done: if (kernel_mapping != 0) { /* clean up the mapping of the source and destination pages */ kr = vm_map_remove(kernel_map, - kernel_mapping, - kernel_mapping + PAGE_SIZE_64, - VM_MAP_REMOVE_NO_FLAGS); + kernel_mapping, + kernel_mapping + PAGE_SIZE_64, + VM_MAP_REMOVE_NO_FLAGS); assert(kr == KERN_SUCCESS); kernel_mapping = 0; dst_vaddr = 0; @@ -484,15 +486,14 @@ done: */ void swapfile_pager_reference( - memory_object_t mem_obj) -{ - swapfile_pager_t pager; + memory_object_t mem_obj) +{ + swapfile_pager_t pager; pager = swapfile_pager_lookup(mem_obj); lck_mtx_lock(&swapfile_pager_lock); - assert(pager->ref_count > 0); - pager->ref_count++; + os_ref_retain_locked(&pager->ref_count); lck_mtx_unlock(&swapfile_pager_lock); } @@ -511,12 +512,12 @@ swapfile_pager_dequeue( assert(!pager->is_mapped); queue_remove(&swapfile_pager_queue, - pager, - swapfile_pager_t, - pager_queue); + pager, + swapfile_pager_t, + pager_queue); pager->pager_queue.next = NULL; pager->pager_queue.prev = NULL; - + swapfile_pager_count--; } @@ -558,17 +559,17 @@ swapfile_pager_terminate_internal( */ void swapfile_pager_deallocate_internal( - swapfile_pager_t pager, - boolean_t locked) + swapfile_pager_t pager, + boolean_t locked) { - if (! locked) { + if (!locked) { lck_mtx_lock(&swapfile_pager_lock); } /* drop a reference on this pager */ - pager->ref_count--; + os_ref_count_t refcount = os_ref_release_locked(&pager->ref_count); - if (pager->ref_count == 1) { + if (refcount == 1) { /* * Only the "named" reference is left, which means that * no one is really holding on to this pager anymore. @@ -578,7 +579,7 @@ swapfile_pager_deallocate_internal( /* the pager is all ours: no need for the lock now */ lck_mtx_unlock(&swapfile_pager_lock); swapfile_pager_terminate_internal(pager); - } else if (pager->ref_count == 0) { + } else if (refcount == 0) { /* * Dropped the existence reference; the memory object has * been terminated. Do some final cleanup and release the @@ -589,7 +590,7 @@ swapfile_pager_deallocate_internal( memory_object_control_deallocate(pager->swp_pgr_hdr.mo_control); pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; } - kfree(pager, sizeof (*pager)); + kfree(pager, sizeof(*pager)); pager = SWAPFILE_PAGER_NULL; } else { /* there are still plenty of references: keep going... */ @@ -607,9 +608,9 @@ swapfile_pager_deallocate_internal( */ void swapfile_pager_deallocate( - memory_object_t mem_obj) + memory_object_t mem_obj) { - swapfile_pager_t pager; + swapfile_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_deallocate: %p\n", mem_obj)); pager = swapfile_pager_lookup(mem_obj); @@ -624,7 +625,7 @@ swapfile_pager_terminate( #if !DEBUG __unused #endif - memory_object_t mem_obj) + memory_object_t mem_obj) { PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_terminate: %p\n", mem_obj)); @@ -637,12 +638,12 @@ swapfile_pager_terminate( kern_return_t swapfile_pager_synchronize( __unused memory_object_t mem_obbj, - __unused memory_object_offset_t offset, - __unused memory_object_size_t length, - __unused vm_sync_t sync_flags) + __unused memory_object_offset_t offset, + __unused memory_object_size_t length, + __unused vm_sync_t sync_flags) { panic("swapfile_pager_synchronize: memory_object_synchronize no longer supported\n"); - return (KERN_FAILURE); + return KERN_FAILURE; } /* @@ -655,10 +656,10 @@ swapfile_pager_synchronize( */ kern_return_t swapfile_pager_map( - memory_object_t mem_obj, - __unused vm_prot_t prot) + memory_object_t mem_obj, + __unused vm_prot_t prot) { - swapfile_pager_t pager; + swapfile_pager_t pager; PAGER_DEBUG(PAGER_ALL, ("swapfile_pager_map: %p\n", mem_obj)); @@ -666,7 +667,7 @@ swapfile_pager_map( lck_mtx_lock(&swapfile_pager_lock); assert(pager->is_ready); - assert(pager->ref_count > 0); /* pager is alive */ + assert(os_ref_get_count(&pager->ref_count) > 0); /* pager is alive */ if (pager->is_mapped == FALSE) { /* * First mapping of this pager: take an extra reference @@ -674,7 +675,7 @@ swapfile_pager_map( * are removed. */ pager->is_mapped = TRUE; - pager->ref_count++; + os_ref_retain_locked(&pager->ref_count); } lck_mtx_unlock(&swapfile_pager_lock); @@ -688,12 +689,12 @@ swapfile_pager_map( */ kern_return_t swapfile_pager_last_unmap( - memory_object_t mem_obj) + memory_object_t mem_obj) { - swapfile_pager_t pager; + swapfile_pager_t pager; PAGER_DEBUG(PAGER_ALL, - ("swapfile_pager_last_unmap: %p\n", mem_obj)); + ("swapfile_pager_last_unmap: %p\n", mem_obj)); pager = swapfile_pager_lookup(mem_obj); @@ -709,7 +710,7 @@ swapfile_pager_last_unmap( } else { lck_mtx_unlock(&swapfile_pager_lock); } - + return KERN_SUCCESS; } @@ -719,25 +720,25 @@ swapfile_pager_last_unmap( */ swapfile_pager_t swapfile_pager_lookup( - memory_object_t mem_obj) + memory_object_t mem_obj) { - swapfile_pager_t pager; + swapfile_pager_t pager; assert(mem_obj->mo_pager_ops == &swapfile_pager_ops); __IGNORE_WCASTALIGN(pager = (swapfile_pager_t) mem_obj); - assert(pager->ref_count > 0); + assert(os_ref_get_count(&pager->ref_count) > 0); return pager; } swapfile_pager_t swapfile_pager_create( - struct vnode *vp) + struct vnode *vp) { - swapfile_pager_t pager, pager2; - memory_object_control_t control; - kern_return_t kr; + swapfile_pager_t pager, pager2; + memory_object_control_t control; + kern_return_t kr; - pager = (swapfile_pager_t) kalloc(sizeof (*pager)); + pager = (swapfile_pager_t) kalloc(sizeof(*pager)); if (pager == SWAPFILE_PAGER_NULL) { return SWAPFILE_PAGER_NULL; } @@ -754,28 +755,28 @@ swapfile_pager_create( pager->swp_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL; pager->is_ready = FALSE;/* not ready until it has a "name" */ - pager->ref_count = 1; /* setup reference */ + os_ref_init(&pager->ref_count, NULL); /* setup reference */ pager->is_mapped = FALSE; pager->swapfile_vnode = vp; - + lck_mtx_lock(&swapfile_pager_lock); /* see if anyone raced us to create a pager for the same object */ queue_iterate(&swapfile_pager_queue, - pager2, - swapfile_pager_t, - pager_queue) { + pager2, + swapfile_pager_t, + pager_queue) { if (pager2->swapfile_vnode == vp) { break; } } - if (! queue_end(&swapfile_pager_queue, - (queue_entry_t) pager2)) { + if (!queue_end(&swapfile_pager_queue, + (queue_entry_t) pager2)) { /* while we hold the lock, transfer our setup ref to winner */ - pager2->ref_count++; + os_ref_retain_locked(&pager2->ref_count); /* we lost the race, down with the loser... */ lck_mtx_unlock(&swapfile_pager_lock); pager->swapfile_vnode = NULL; - kfree(pager, sizeof (*pager)); + kfree(pager, sizeof(*pager)); /* ... and go with the winner */ pager = pager2; /* let the winner make sure the pager gets ready */ @@ -784,9 +785,9 @@ swapfile_pager_create( /* enter new pager at the head of our list of pagers */ queue_enter_first(&swapfile_pager_queue, - pager, - swapfile_pager_t, - pager_queue); + pager, + swapfile_pager_t, + pager_queue); swapfile_pager_count++; if (swapfile_pager_count > swapfile_pager_count_max) { swapfile_pager_count_max = swapfile_pager_count; @@ -794,8 +795,8 @@ swapfile_pager_create( lck_mtx_unlock(&swapfile_pager_lock); kr = memory_object_create_named((memory_object_t) pager, - 0, - &control); + 0, + &control); assert(kr == KERN_SUCCESS); lck_mtx_lock(&swapfile_pager_lock); @@ -820,25 +821,25 @@ memory_object_t swapfile_pager_setup( struct vnode *vp) { - swapfile_pager_t pager; + swapfile_pager_t pager; lck_mtx_lock(&swapfile_pager_lock); queue_iterate(&swapfile_pager_queue, - pager, - swapfile_pager_t, - pager_queue) { + pager, + swapfile_pager_t, + pager_queue) { if (pager->swapfile_vnode == vp) { break; } } if (queue_end(&swapfile_pager_queue, - (queue_entry_t) pager)) { + (queue_entry_t) pager)) { /* no existing pager for this backing object */ pager = SWAPFILE_PAGER_NULL; } else { /* make sure pager doesn't disappear */ - pager->ref_count++; + os_ref_retain_locked(&pager->ref_count); } lck_mtx_unlock(&swapfile_pager_lock); @@ -853,20 +854,20 @@ swapfile_pager_setup( lck_mtx_lock(&swapfile_pager_lock); while (!pager->is_ready) { lck_mtx_sleep(&swapfile_pager_lock, - LCK_SLEEP_DEFAULT, - &pager->is_ready, - THREAD_UNINT); + LCK_SLEEP_DEFAULT, + &pager->is_ready, + THREAD_UNINT); } lck_mtx_unlock(&swapfile_pager_lock); return (memory_object_t) pager; -} +} memory_object_control_t swapfile_pager_control( - memory_object_t mem_obj) + memory_object_t mem_obj) { - swapfile_pager_t pager; + swapfile_pager_t pager; if (mem_obj == MEMORY_OBJECT_NULL || mem_obj->mo_pager_ops != &swapfile_pager_ops) { diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index 93e1374e6..92df95613 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -58,7 +58,7 @@ /* * File: vm/vm_user.c * Author: Avadis Tevanian, Jr., Michael Wayne Young - * + * * User-exported virtual memory functions. */ @@ -90,9 +90,9 @@ #include #include #include -#include /* to get vm_address_t */ +#include /* to get vm_address_t */ #include -#include /* to get pointer_t */ +#include /* to get pointer_t */ #include #include #include @@ -125,9 +125,9 @@ vm_size_t upl_offset_to_pagelist = 0; -#if VM_CPM +#if VM_CPM #include -#endif /* VM_CPM */ +#endif /* VM_CPM */ /* * mach_vm_allocate allocates "zero fill" memory in the specfied @@ -135,39 +135,41 @@ vm_size_t upl_offset_to_pagelist = 0; */ kern_return_t mach_vm_allocate_external( - vm_map_t map, - mach_vm_offset_t *addr, - mach_vm_size_t size, - int flags) + vm_map_t map, + mach_vm_offset_t *addr, + mach_vm_size_t size, + int flags) { - vm_tag_t tag; + vm_tag_t tag; - VM_GET_FLAGS_ALIAS(flags, tag); - return (mach_vm_allocate_kernel(map, addr, size, flags, tag)); + VM_GET_FLAGS_ALIAS(flags, tag); + return mach_vm_allocate_kernel(map, addr, size, flags, tag); } kern_return_t mach_vm_allocate_kernel( - vm_map_t map, - mach_vm_offset_t *addr, - mach_vm_size_t size, - int flags, + vm_map_t map, + mach_vm_offset_t *addr, + mach_vm_size_t size, + int flags, vm_tag_t tag) { vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t result; - boolean_t anywhere; + vm_map_size_t map_size; + kern_return_t result; + boolean_t anywhere; /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_ALLOCATE) + if (flags & ~VM_FLAGS_USER_ALLOCATE) { return KERN_INVALID_ARGUMENT; + } - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } if (size == 0) { *addr = 0; - return(KERN_SUCCESS); + return KERN_SUCCESS; } anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); @@ -183,76 +185,80 @@ mach_vm_allocate_kernel( * memory would tend to confuse those applications. */ map_addr = vm_map_min(map); - if (map_addr == 0) + if (map_addr == 0) { map_addr += VM_MAP_PAGE_SIZE(map); - } else + } + } else { map_addr = vm_map_trunc_page(*addr, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); + } map_size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); if (map_size == 0) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } result = vm_map_enter( - map, - &map_addr, - map_size, - (vm_map_offset_t)0, - flags, - VM_MAP_KERNEL_FLAGS_NONE, - tag, - VM_OBJECT_NULL, - (vm_object_offset_t)0, - FALSE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); + map, + &map_addr, + map_size, + (vm_map_offset_t)0, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + VM_OBJECT_NULL, + (vm_object_offset_t)0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); *addr = map_addr; - return(result); + return result; } /* - * vm_allocate + * vm_allocate * Legacy routine that allocates "zero fill" memory in the specfied * map (which is limited to the same size as the kernel). */ kern_return_t vm_allocate_external( - vm_map_t map, - vm_offset_t *addr, - vm_size_t size, - int flags) + vm_map_t map, + vm_offset_t *addr, + vm_size_t size, + int flags) { vm_tag_t tag; - VM_GET_FLAGS_ALIAS(flags, tag); - return (vm_allocate_kernel(map, addr, size, flags, tag)); + VM_GET_FLAGS_ALIAS(flags, tag); + return vm_allocate_kernel(map, addr, size, flags, tag); } kern_return_t vm_allocate_kernel( - vm_map_t map, - vm_offset_t *addr, - vm_size_t size, + vm_map_t map, + vm_offset_t *addr, + vm_size_t size, int flags, vm_tag_t tag) { vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t result; - boolean_t anywhere; + vm_map_size_t map_size; + kern_return_t result; + boolean_t anywhere; /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_ALLOCATE) + if (flags & ~VM_FLAGS_USER_ALLOCATE) { return KERN_INVALID_ARGUMENT; + } - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } if (size == 0) { *addr = 0; - return(KERN_SUCCESS); + return KERN_SUCCESS; } anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); @@ -268,31 +274,33 @@ vm_allocate_kernel( * memory would tend to confuse those applications. */ map_addr = vm_map_min(map); - if (map_addr == 0) + if (map_addr == 0) { map_addr += VM_MAP_PAGE_SIZE(map); - } else + } + } else { map_addr = vm_map_trunc_page(*addr, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); + } map_size = vm_map_round_page(size, - VM_MAP_PAGE_MASK(map)); + VM_MAP_PAGE_MASK(map)); if (map_size == 0) { - return(KERN_INVALID_ARGUMENT); + return KERN_INVALID_ARGUMENT; } result = vm_map_enter( - map, - &map_addr, - map_size, - (vm_map_offset_t)0, - flags, - VM_MAP_KERNEL_FLAGS_NONE, - tag, - VM_OBJECT_NULL, - (vm_object_offset_t)0, - FALSE, - VM_PROT_DEFAULT, - VM_PROT_ALL, - VM_INHERIT_DEFAULT); + map, + &map_addr, + map_size, + (vm_map_offset_t)0, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + VM_OBJECT_NULL, + (vm_object_offset_t)0, + FALSE, + VM_PROT_DEFAULT, + VM_PROT_ALL, + VM_INHERIT_DEFAULT); #if KASAN if (result == KERN_SUCCESS && map->pmap == kernel_pmap) { @@ -301,7 +309,7 @@ vm_allocate_kernel( #endif *addr = CAST_DOWN(vm_offset_t, map_addr); - return(result); + return result; } /* @@ -311,22 +319,24 @@ vm_allocate_kernel( */ kern_return_t mach_vm_deallocate( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } - if (size == (mach_vm_offset_t) 0) - return(KERN_SUCCESS); + if (size == (mach_vm_offset_t) 0) { + return KERN_SUCCESS; + } return vm_map_remove(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_NO_FLAGS); + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); } /* @@ -337,22 +347,24 @@ mach_vm_deallocate( */ kern_return_t vm_deallocate( - vm_map_t map, - vm_offset_t start, - vm_size_t size) + vm_map_t map, + vm_offset_t start, + vm_size_t size) { - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } - if (size == (vm_offset_t) 0) - return(KERN_SUCCESS); + if (size == (vm_offset_t) 0) { + return KERN_SUCCESS; + } return vm_map_remove(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - VM_MAP_REMOVE_NO_FLAGS); + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + VM_MAP_REMOVE_NO_FLAGS); } /* @@ -362,24 +374,26 @@ vm_deallocate( */ kern_return_t mach_vm_inherit( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_inherit_t new_inheritance) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_inherit_t new_inheritance) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_inheritance > VM_INHERIT_LAST_VALID)) - return(KERN_INVALID_ARGUMENT); + (new_inheritance > VM_INHERIT_LAST_VALID)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_inherit(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - new_inheritance)); + return vm_map_inherit(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_inheritance); } /* @@ -389,24 +403,26 @@ mach_vm_inherit( */ kern_return_t vm_inherit( - vm_map_t map, - vm_offset_t start, - vm_size_t size, - vm_inherit_t new_inheritance) + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_inherit_t new_inheritance) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_inheritance > VM_INHERIT_LAST_VALID)) - return(KERN_INVALID_ARGUMENT); + (new_inheritance > VM_INHERIT_LAST_VALID)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_inherit(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - new_inheritance)); + return vm_map_inherit(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_inheritance); } /* @@ -417,26 +433,28 @@ vm_inherit( kern_return_t mach_vm_protect( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - boolean_t set_maximum, - vm_prot_t new_protection) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) - return(KERN_INVALID_ARGUMENT); + (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_protect(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - new_protection, - set_maximum)); + return vm_map_protect(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_protection, + set_maximum); } /* @@ -448,26 +466,28 @@ mach_vm_protect( kern_return_t vm_protect( - vm_map_t map, - vm_offset_t start, - vm_size_t size, - boolean_t set_maximum, - vm_prot_t new_protection) + vm_map_t map, + vm_offset_t start, + vm_size_t size, + boolean_t set_maximum, + vm_prot_t new_protection) { if ((map == VM_MAP_NULL) || (start + size < start) || - (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) - return(KERN_INVALID_ARGUMENT); + (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } - return(vm_map_protect(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - new_protection, - set_maximum)); + return vm_map_protect(map, + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + new_protection, + set_maximum); } /* @@ -477,24 +497,26 @@ vm_protect( */ kern_return_t mach_vm_machine_attribute( - vm_map_t map, - mach_vm_address_t addr, - mach_vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + mach_vm_address_t addr, + mach_vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - if ((map == VM_MAP_NULL) || (addr + size < addr)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (addr + size < addr)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } return vm_map_machine_attribute( - map, + map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(addr+size, - VM_MAP_PAGE_MASK(map)), + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(map)), attribute, value); } @@ -507,24 +529,26 @@ mach_vm_machine_attribute( */ kern_return_t vm_machine_attribute( - vm_map_t map, - vm_address_t addr, - vm_size_t size, - vm_machine_attribute_t attribute, - vm_machine_attribute_val_t* value) /* IN/OUT */ + vm_map_t map, + vm_address_t addr, + vm_size_t size, + vm_machine_attribute_t attribute, + vm_machine_attribute_val_t* value) /* IN/OUT */ { - if ((map == VM_MAP_NULL) || (addr + size < addr)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (addr + size < addr)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } return vm_map_machine_attribute( - map, + map, vm_map_trunc_page(addr, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(addr+size, - VM_MAP_PAGE_MASK(map)), + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(addr + size, + VM_MAP_PAGE_MASK(map)), attribute, value); } @@ -537,47 +561,49 @@ vm_machine_attribute( * the IPC implementation as part of receiving the reply to this call. * If IPC isn't used, the caller must deal with the vm_map_copy_t object * that gets returned. - * + * * JMM - because of mach_msg_type_number_t, this call is limited to a * single 4GB region at this time. * */ kern_return_t mach_vm_read( - vm_map_t map, - mach_vm_address_t addr, - mach_vm_size_t size, - pointer_t *data, - mach_msg_type_number_t *data_size) + vm_map_t map, + mach_vm_address_t addr, + mach_vm_size_t size, + pointer_t *data, + mach_msg_type_number_t *data_size) { - kern_return_t error; - vm_map_copy_t ipc_address; + kern_return_t error; + vm_map_copy_t ipc_address; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } - if ((mach_msg_type_number_t) size != size) + if ((mach_msg_type_number_t) size != size) { return KERN_INVALID_ARGUMENT; - + } + error = vm_map_copyin(map, - (vm_map_address_t)addr, - (vm_map_size_t)size, - FALSE, /* src_destroy */ - &ipc_address); + (vm_map_address_t)addr, + (vm_map_size_t)size, + FALSE, /* src_destroy */ + &ipc_address); if (KERN_SUCCESS == error) { *data = (pointer_t) ipc_address; *data_size = (mach_msg_type_number_t) size; assert(*data_size == size); } - return(error); + return error; } /* * vm_read - * Read/copy a range from one address space and return it to the caller. * Limited addressability (same range limits as for the native kernel map). - * + * * It is assumed that the address for the returned memory is selected by * the IPC implementation as part of receiving the reply to this call. * If IPC isn't used, the caller must deal with the vm_map_copy_t object @@ -585,17 +611,18 @@ mach_vm_read( */ kern_return_t vm_read( - vm_map_t map, - vm_address_t addr, - vm_size_t size, - pointer_t *data, - mach_msg_type_number_t *data_size) + vm_map_t map, + vm_address_t addr, + vm_size_t size, + pointer_t *data, + mach_msg_type_number_t *data_size) { - kern_return_t error; - vm_map_copy_t ipc_address; + kern_return_t error; + vm_map_copy_t ipc_address; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } mach_msg_type_number_t dsize; if (os_convert_overflow(size, &dsize)) { @@ -609,20 +636,20 @@ vm_read( } error = vm_map_copyin(map, - (vm_map_address_t)addr, - (vm_map_size_t)size, - FALSE, /* src_destroy */ - &ipc_address); + (vm_map_address_t)addr, + (vm_map_size_t)size, + FALSE, /* src_destroy */ + &ipc_address); if (KERN_SUCCESS == error) { *data = (pointer_t) ipc_address; *data_size = dsize; assert(*data_size == size); } - return(error); + return error; } -/* +/* * mach_vm_read_list - * Read/copy a list of address ranges from specified map. * @@ -632,37 +659,38 @@ vm_read( */ kern_return_t mach_vm_read_list( - vm_map_t map, - mach_vm_read_entry_t data_list, - natural_t count) + vm_map_t map, + mach_vm_read_entry_t data_list, + natural_t count) { - mach_msg_type_number_t i; - kern_return_t error; - vm_map_copy_t copy; + mach_msg_type_number_t i; + kern_return_t error; + vm_map_copy_t copy; if (map == VM_MAP_NULL || - count > VM_MAP_ENTRY_MAX) - return(KERN_INVALID_ARGUMENT); + count > VM_MAP_ENTRY_MAX) { + return KERN_INVALID_ARGUMENT; + } error = KERN_SUCCESS; - for(i=0; imap, - &map_addr, - copy); + current_task()->map, + &map_addr, + copy); if (KERN_SUCCESS == error) { data_list[i].address = map_addr; continue; @@ -673,10 +701,10 @@ mach_vm_read_list( data_list[i].address = (mach_vm_address_t)0; data_list[i].size = (mach_vm_size_t)0; } - return(error); + return error; } -/* +/* * vm_read_list - * Read/copy a list of address ranges from specified map. * @@ -697,39 +725,40 @@ mach_vm_read_list( kern_return_t vm_read_list( - vm_map_t map, - vm_read_entry_t data_list, - natural_t count) + vm_map_t map, + vm_read_entry_t data_list, + natural_t count) { - mach_msg_type_number_t i; - kern_return_t error; - vm_map_copy_t copy; + mach_msg_type_number_t i; + kern_return_t error; + vm_map_copy_t copy; if (map == VM_MAP_NULL || - count > VM_MAP_ENTRY_MAX) - return(KERN_INVALID_ARGUMENT); + count > VM_MAP_ENTRY_MAX) { + return KERN_INVALID_ARGUMENT; + } error = KERN_SUCCESS; - for(i=0; imap, - &map_addr, - copy); + error = vm_map_copyout(current_task()->map, + &map_addr, + copy); if (KERN_SUCCESS == error) { data_list[i].address = - CAST_DOWN(vm_offset_t, map_addr); + CAST_DOWN(vm_offset_t, map_addr); continue; } vm_map_copy_discard(copy); @@ -738,14 +767,14 @@ vm_read_list( data_list[i].address = (mach_vm_address_t)0; data_list[i].size = (mach_vm_size_t)0; } - return(error); + return error; } /* * mach_vm_read_overwrite - * Overwrite a range of the current map with data from the specified * map/address range. - * + * * In making an assumption that the current thread is local, it is * no longer cluster-safe without a fully supportive local proxy * thread/task (but we don't support cluster's anymore so this is moot). @@ -753,39 +782,40 @@ vm_read_list( kern_return_t mach_vm_read_overwrite( - vm_map_t map, - mach_vm_address_t address, - mach_vm_size_t size, - mach_vm_address_t data, - mach_vm_size_t *data_size) + vm_map_t map, + mach_vm_address_t address, + mach_vm_size_t size, + mach_vm_address_t data, + mach_vm_size_t *data_size) { - kern_return_t error; - vm_map_copy_t copy; + kern_return_t error; + vm_map_copy_t copy; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } error = vm_map_copyin(map, (vm_map_address_t)address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == error) { error = vm_map_copy_overwrite(current_thread()->map, - (vm_map_address_t)data, - copy, FALSE); + (vm_map_address_t)data, + copy, FALSE); if (KERN_SUCCESS == error) { *data_size = size; return error; } vm_map_copy_discard(copy); } - return(error); + return error; } /* * vm_read_overwrite - * Overwrite a range of the current map with data from the specified * map/address range. - * + * * This routine adds the additional limitation that the source and * destination ranges must be describable with vm_address_t values * (i.e. the same size address spaces as the kernel, or at least the @@ -795,32 +825,33 @@ mach_vm_read_overwrite( kern_return_t vm_read_overwrite( - vm_map_t map, - vm_address_t address, - vm_size_t size, - vm_address_t data, - vm_size_t *data_size) + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_address_t data, + vm_size_t *data_size) { - kern_return_t error; - vm_map_copy_t copy; + kern_return_t error; + vm_map_copy_t copy; - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } error = vm_map_copyin(map, (vm_map_address_t)address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == error) { error = vm_map_copy_overwrite(current_thread()->map, - (vm_map_address_t)data, - copy, FALSE); + (vm_map_address_t)data, + copy, FALSE); if (KERN_SUCCESS == error) { *data_size = size; return error; } vm_map_copy_discard(copy); } - return(error); + return error; } @@ -831,16 +862,17 @@ vm_read_overwrite( */ kern_return_t mach_vm_write( - vm_map_t map, - mach_vm_address_t address, - pointer_t data, - __unused mach_msg_type_number_t size) + vm_map_t map, + mach_vm_address_t address, + pointer_t data, + __unused mach_msg_type_number_t size) { - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } return vm_map_copy_overwrite(map, (vm_map_address_t)address, - (vm_map_copy_t) data, FALSE /* interruptible XXX */); + (vm_map_copy_t) data, FALSE /* interruptible XXX */); } /* @@ -855,16 +887,17 @@ mach_vm_write( */ kern_return_t vm_write( - vm_map_t map, - vm_address_t address, - pointer_t data, - __unused mach_msg_type_number_t size) + vm_map_t map, + vm_address_t address, + pointer_t data, + __unused mach_msg_type_number_t size) { - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } return vm_map_copy_overwrite(map, (vm_map_address_t)address, - (vm_map_copy_t) data, FALSE /* interruptible XXX */); + (vm_map_copy_t) data, FALSE /* interruptible XXX */); } /* @@ -875,54 +908,58 @@ vm_write( */ kern_return_t mach_vm_copy( - vm_map_t map, - mach_vm_address_t source_address, - mach_vm_size_t size, - mach_vm_address_t dest_address) + vm_map_t map, + mach_vm_address_t source_address, + mach_vm_size_t size, + mach_vm_address_t dest_address) { vm_map_copy_t copy; kern_return_t kr; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } kr = vm_map_copyin(map, (vm_map_address_t)source_address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == kr) { kr = vm_map_copy_overwrite(map, - (vm_map_address_t)dest_address, - copy, FALSE /* interruptible XXX */); + (vm_map_address_t)dest_address, + copy, FALSE /* interruptible XXX */); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { vm_map_copy_discard(copy); + } } return kr; } kern_return_t vm_copy( - vm_map_t map, - vm_address_t source_address, - vm_size_t size, - vm_address_t dest_address) + vm_map_t map, + vm_address_t source_address, + vm_size_t size, + vm_address_t dest_address) { vm_map_copy_t copy; kern_return_t kr; - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; + } kr = vm_map_copyin(map, (vm_map_address_t)source_address, - (vm_map_size_t)size, FALSE, ©); + (vm_map_size_t)size, FALSE, ©); if (KERN_SUCCESS == kr) { kr = vm_map_copy_overwrite(map, - (vm_map_address_t)dest_address, - copy, FALSE /* interruptible XXX */); + (vm_map_address_t)dest_address, + copy, FALSE /* interruptible XXX */); - if (KERN_SUCCESS != kr) + if (KERN_SUCCESS != kr) { vm_map_copy_discard(copy); + } } return kr; } @@ -940,66 +977,67 @@ vm_copy( */ kern_return_t mach_vm_map_external( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t initial_size, - mach_vm_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); - return (mach_vm_map_kernel(target_map, address, initial_size, mask, - flags, VM_MAP_KERNEL_FLAGS_NONE, tag, - port, offset, copy, - cur_protection, max_protection, - inheritance)); + return mach_vm_map_kernel(target_map, address, initial_size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, tag, + port, offset, copy, + cur_protection, max_protection, + inheritance); } kern_return_t mach_vm_map_kernel( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t initial_size, - mach_vm_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - kern_return_t kr; - vm_map_offset_t vmmaddr; + kern_return_t kr; + vm_map_offset_t vmmaddr; vmmaddr = (vm_map_offset_t) *address; /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_MAP) + if (flags & ~VM_FLAGS_USER_MAP) { return KERN_INVALID_ARGUMENT; + } kr = vm_map_enter_mem_object(target_map, - &vmmaddr, - initial_size, - mask, - flags, - vmk_flags, - tag, - port, - offset, - copy, - cur_protection, - max_protection, - inheritance); + &vmmaddr, + initial_size, + mask, + flags, + vmk_flags, + tag, + port, + offset, + copy, + cur_protection, + max_protection, + inheritance); #if KASAN if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) { @@ -1015,43 +1053,43 @@ mach_vm_map_kernel( /* legacy interface */ kern_return_t vm_map_64_external( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); - return (vm_map_64_kernel(target_map, address, size, mask, - flags, VM_MAP_KERNEL_FLAGS_NONE, - tag, port, offset, copy, - cur_protection, max_protection, - inheritance)); + return vm_map_64_kernel(target_map, address, size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, + tag, port, offset, copy, + cur_protection, max_protection, + inheritance); } kern_return_t vm_map_64_kernel( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { mach_vm_address_t map_addr; mach_vm_size_t map_size; @@ -1063,9 +1101,9 @@ vm_map_64_kernel( map_mask = (mach_vm_offset_t)mask; kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, - flags, vmk_flags, tag, - port, offset, copy, - cur_protection, max_protection, inheritance); + flags, vmk_flags, tag, + port, offset, copy, + cur_protection, max_protection, inheritance); *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -1073,42 +1111,42 @@ vm_map_64_kernel( /* temporary, until world build */ kern_return_t vm_map_external( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - ipc_port_t port, - vm_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); - return (vm_map_kernel(target_map, address, size, mask, - flags, VM_MAP_KERNEL_FLAGS_NONE, tag, - port, offset, copy, - cur_protection, max_protection, inheritance)); + return vm_map_kernel(target_map, address, size, mask, + flags, VM_MAP_KERNEL_FLAGS_NONE, tag, + port, offset, copy, + cur_protection, max_protection, inheritance); } kern_return_t vm_map_kernel( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_map_kernel_flags_t vmk_flags, - vm_tag_t tag, - ipc_port_t port, - vm_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_kernel_flags_t vmk_flags, + vm_tag_t tag, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { mach_vm_address_t map_addr; mach_vm_size_t map_size; @@ -1122,9 +1160,9 @@ vm_map_kernel( obj_offset = (vm_object_offset_t)offset; kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, - flags, vmk_flags, tag, - port, obj_offset, copy, - cur_protection, max_protection, inheritance); + flags, vmk_flags, tag, + port, obj_offset, copy, + cur_protection, max_protection, inheritance); *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -1138,65 +1176,67 @@ vm_map_kernel( */ kern_return_t mach_vm_remap_external( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, - vm_map_t src_map, - mach_vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); - return (mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address, - copy, cur_protection, max_protection, inheritance)); + return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address, + copy, cur_protection, max_protection, inheritance); } kern_return_t mach_vm_remap_kernel( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, - vm_tag_t tag, - vm_map_t src_map, - mach_vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_tag_t tag, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - vm_map_offset_t map_addr; - kern_return_t kr; + vm_map_offset_t map_addr; + kern_return_t kr; - if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) + if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { return KERN_INVALID_ARGUMENT; + } /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_REMAP) + if (flags & ~VM_FLAGS_USER_REMAP) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; kr = vm_map_remap(target_map, - &map_addr, - size, - mask, - flags, - VM_MAP_KERNEL_FLAGS_NONE, - tag, - src_map, - memory_address, - copy, - cur_protection, - max_protection, - inheritance); + &map_addr, + size, + mask, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + src_map, + memory_address, + copy, + cur_protection, + max_protection, + inheritance); *address = map_addr; return kr; } @@ -1214,65 +1254,67 @@ mach_vm_remap_kernel( */ kern_return_t vm_remap_external( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_map_t src_map, - vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); - return (vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, - memory_address, copy, cur_protection, max_protection, inheritance)); + return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, + memory_address, copy, cur_protection, max_protection, inheritance); } kern_return_t vm_remap_kernel( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - vm_tag_t tag, - vm_map_t src_map, - vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + vm_tag_t tag, + vm_map_t src_map, + vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - vm_map_offset_t map_addr; - kern_return_t kr; + vm_map_offset_t map_addr; + kern_return_t kr; - if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) + if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { return KERN_INVALID_ARGUMENT; + } /* filter out any kernel-only flags */ - if (flags & ~VM_FLAGS_USER_REMAP) + if (flags & ~VM_FLAGS_USER_REMAP) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; kr = vm_map_remap(target_map, - &map_addr, - size, - mask, - flags, - VM_MAP_KERNEL_FLAGS_NONE, - tag, - src_map, - memory_address, - copy, - cur_protection, - max_protection, - inheritance); + &map_addr, + size, + mask, + flags, + VM_MAP_KERNEL_FLAGS_NONE, + tag, + src_map, + memory_address, + copy, + cur_protection, + max_protection, + inheritance); *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -1292,52 +1334,55 @@ vm_remap_kernel( */ kern_return_t mach_vm_wire_external( - host_priv_t host_priv, - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_prot_t access) + host_priv_t host_priv, + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_prot_t access) { - return (mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK)); + return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK); } kern_return_t mach_vm_wire_kernel( - host_priv_t host_priv, - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_prot_t access, - vm_tag_t tag) + host_priv_t host_priv, + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_prot_t access, + vm_tag_t tag) { - kern_return_t rc; + kern_return_t rc; - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_HOST; + } assert(host_priv == &realhost); - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_TASK; + } - if (access & ~VM_PROT_ALL || (start + size < start)) + if (access & ~VM_PROT_ALL || (start + size < start)) { return KERN_INVALID_ARGUMENT; + } if (access != VM_PROT_NONE) { rc = vm_map_wire_kernel(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - access, tag, - TRUE); + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + access, tag, + TRUE); } else { rc = vm_map_unwire(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - TRUE); + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + TRUE); } return rc; } @@ -1352,42 +1397,45 @@ mach_vm_wire_kernel( */ kern_return_t vm_wire( - host_priv_t host_priv, - vm_map_t map, - vm_offset_t start, - vm_size_t size, - vm_prot_t access) + host_priv_t host_priv, + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_prot_t access) { - kern_return_t rc; + kern_return_t rc; - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) { return KERN_INVALID_HOST; + } assert(host_priv == &realhost); - if (map == VM_MAP_NULL) + if (map == VM_MAP_NULL) { return KERN_INVALID_TASK; + } - if ((access & ~VM_PROT_ALL) || (start + size < start)) + if ((access & ~VM_PROT_ALL) || (start + size < start)) { return KERN_INVALID_ARGUMENT; + } if (size == 0) { rc = KERN_SUCCESS; } else if (access != VM_PROT_NONE) { rc = vm_map_wire_kernel(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - access, VM_KERN_MEMORY_OSFMK, - TRUE); + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + access, VM_KERN_MEMORY_OSFMK, + TRUE); } else { rc = vm_map_unwire(map, - vm_map_trunc_page(start, - VM_MAP_PAGE_MASK(map)), - vm_map_round_page(start+size, - VM_MAP_PAGE_MASK(map)), - TRUE); + vm_map_trunc_page(start, + VM_MAP_PAGE_MASK(map)), + vm_map_round_page(start + size, + VM_MAP_PAGE_MASK(map)), + TRUE); } return rc; } @@ -1425,19 +1473,19 @@ vm_wire( kern_return_t mach_vm_msync( - vm_map_t map, - mach_vm_address_t address, - mach_vm_size_t size, - vm_sync_t sync_flags) + vm_map_t map, + mach_vm_address_t address, + mach_vm_size_t size, + vm_sync_t sync_flags) { - - if (map == VM_MAP_NULL) - return(KERN_INVALID_TASK); + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } return vm_map_msync(map, (vm_map_address_t)address, - (vm_map_size_t)size, sync_flags); + (vm_map_size_t)size, sync_flags); } - + /* * vm_msync * @@ -1474,17 +1522,17 @@ mach_vm_msync( kern_return_t vm_msync( - vm_map_t map, - vm_address_t address, - vm_size_t size, - vm_sync_t sync_flags) + vm_map_t map, + vm_address_t address, + vm_size_t size, + vm_sync_t sync_flags) { - - if (map == VM_MAP_NULL) - return(KERN_INVALID_TASK); + if (map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } return vm_map_msync(map, (vm_map_address_t)address, - (vm_map_size_t)size, sync_flags); + (vm_map_size_t)size, sync_flags); } @@ -1492,11 +1540,11 @@ int vm_toggle_entry_reuse(int toggle, int *old_value) { vm_map_t map = current_map(); - + assert(!map->is_nested_map); - if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){ + if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) { *old_value = map->disable_vmentry_reuse; - } else if(toggle == VM_TOGGLE_SET){ + } else if (toggle == VM_TOGGLE_SET) { vm_map_entry_t map_to_entry; vm_map_lock(map); @@ -1509,18 +1557,19 @@ vm_toggle_entry_reuse(int toggle, int *old_value) map->highest_entry_end = map->first_free->vme_end; } vm_map_unlock(map); - } else if (toggle == VM_TOGGLE_CLEAR){ + } else if (toggle == VM_TOGGLE_CLEAR) { vm_map_lock(map); map->disable_vmentry_reuse = FALSE; vm_map_unlock(map); - } else + } else { return KERN_INVALID_ARGUMENT; + } return KERN_SUCCESS; } /* - * mach_vm_behavior_set + * mach_vm_behavior_set * * Sets the paging behavior attribute for the specified range * in the specified map. @@ -1528,20 +1577,22 @@ vm_toggle_entry_reuse(int toggle, int *old_value) * This routine will fail with KERN_INVALID_ADDRESS if any address * in [start,start+size) is not a valid allocated memory region. */ -kern_return_t +kern_return_t mach_vm_behavior_set( - vm_map_t map, - mach_vm_offset_t start, - mach_vm_size_t size, - vm_behavior_t new_behavior) + vm_map_t map, + mach_vm_offset_t start, + mach_vm_size_t size, + vm_behavior_t new_behavior) { - vm_map_offset_t align_mask; + vm_map_offset_t align_mask; - if ((map == VM_MAP_NULL) || (start + size < start)) - return(KERN_INVALID_ARGUMENT); + if ((map == VM_MAP_NULL) || (start + size < start)) { + return KERN_INVALID_ARGUMENT; + } - if (size == 0) + if (size == 0) { return KERN_SUCCESS; + } switch (new_behavior) { case VM_BEHAVIOR_REUSABLE: @@ -1560,13 +1611,13 @@ mach_vm_behavior_set( } return vm_map_behavior_set(map, - vm_map_trunc_page(start, align_mask), - vm_map_round_page(start+size, align_mask), - new_behavior); + vm_map_trunc_page(start, align_mask), + vm_map_round_page(start + size, align_mask), + new_behavior); } /* - * vm_behavior_set + * vm_behavior_set * * Sets the paging behavior attribute for the specified range * in the specified map. @@ -1578,20 +1629,21 @@ mach_vm_behavior_set( * use of vm_offset_t (if the map provided is larger than the * kernel's). */ -kern_return_t +kern_return_t vm_behavior_set( - vm_map_t map, - vm_offset_t start, - vm_size_t size, - vm_behavior_t new_behavior) + vm_map_t map, + vm_offset_t start, + vm_size_t size, + vm_behavior_t new_behavior) { - if (start + size < start) + if (start + size < start) { return KERN_INVALID_ARGUMENT; + } return mach_vm_behavior_set(map, - (mach_vm_offset_t) start, - (mach_vm_size_t) size, - new_behavior); + (mach_vm_offset_t) start, + (mach_vm_size_t) size, + new_behavior); } /* @@ -1610,32 +1662,34 @@ vm_behavior_set( kern_return_t mach_vm_region( - vm_map_t map, - mach_vm_offset_t *address, /* IN/OUT */ - mach_vm_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + mach_vm_offset_t *address, /* IN/OUT */ + mach_vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_offset_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; map_size = (vm_map_size_t)*size; /* legacy conversion */ - if (VM_REGION_BASIC_INFO == flavor) + if (VM_REGION_BASIC_INFO == flavor) { flavor = VM_REGION_BASIC_INFO_64; + } kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = map_addr; *size = map_size; @@ -1658,71 +1712,76 @@ mach_vm_region( kern_return_t vm_region_64( - vm_map_t map, - vm_offset_t *address, /* IN/OUT */ - vm_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_offset_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_offset_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_offset_t)*address; map_size = (vm_map_size_t)*size; /* legacy conversion */ - if (VM_REGION_BASIC_INFO == flavor) + if (VM_REGION_BASIC_INFO == flavor) { flavor = VM_REGION_BASIC_INFO_64; + } kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = CAST_DOWN(vm_offset_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t vm_region( - vm_map_t map, - vm_address_t *address, /* IN/OUT */ - vm_size_t *size, /* OUT */ - vm_region_flavor_t flavor, /* IN */ - vm_region_info_t info, /* OUT */ - mach_msg_type_number_t *count, /* IN/OUT */ - mach_port_t *object_name) /* OUT */ + vm_map_t map, + vm_address_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + vm_region_flavor_t flavor, /* IN */ + vm_region_info_t info, /* OUT */ + mach_msg_type_number_t *count, /* IN/OUT */ + mach_port_t *object_name) /* OUT */ { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region(map, - &map_addr, &map_size, - flavor, info, count, - object_name); + &map_addr, &map_size, + flavor, info, count, + object_name); *address = CAST_DOWN(vm_address_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } @@ -1733,30 +1792,31 @@ vm_region( */ kern_return_t mach_vm_region_recurse( - vm_map_t map, - mach_vm_address_t *address, - mach_vm_size_t *size, - uint32_t *depth, - vm_region_recurse_info_t info, - mach_msg_type_number_t *infoCnt) + vm_map_t map, + mach_vm_address_t *address, + mach_vm_size_t *size, + uint32_t *depth, + vm_region_recurse_info_t info, + mach_msg_type_number_t *infoCnt) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region_recurse_64( - map, - &map_addr, - &map_size, - depth, - (vm_region_submap_info_64_t)info, - infoCnt); + map, + &map_addr, + &map_size, + depth, + (vm_region_submap_info_64_t)info, + infoCnt); *address = map_addr; *size = map_size; @@ -1770,102 +1830,107 @@ mach_vm_region_recurse( */ kern_return_t vm_region_recurse_64( - vm_map_t map, - vm_address_t *address, - vm_size_t *size, - uint32_t *depth, - vm_region_recurse_info_64_t info, - mach_msg_type_number_t *infoCnt) + vm_map_t map, + vm_address_t *address, + vm_size_t *size, + uint32_t *depth, + vm_region_recurse_info_64_t info, + mach_msg_type_number_t *infoCnt) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; kr = vm_map_region_recurse_64( - map, - &map_addr, - &map_size, - depth, - (vm_region_submap_info_64_t)info, - infoCnt); + map, + &map_addr, + &map_size, + depth, + (vm_region_submap_info_64_t)info, + infoCnt); *address = CAST_DOWN(vm_address_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t vm_region_recurse( - vm_map_t map, - vm_offset_t *address, /* IN/OUT */ - vm_size_t *size, /* OUT */ - natural_t *depth, /* IN/OUT */ - vm_region_recurse_info_t info32, /* IN/OUT */ - mach_msg_type_number_t *infoCnt) /* IN/OUT */ + vm_map_t map, + vm_offset_t *address, /* IN/OUT */ + vm_size_t *size, /* OUT */ + natural_t *depth, /* IN/OUT */ + vm_region_recurse_info_t info32, /* IN/OUT */ + mach_msg_type_number_t *infoCnt) /* IN/OUT */ { vm_region_submap_info_data_64_t info64; vm_region_submap_info_t info; - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) + if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) { return KERN_INVALID_ARGUMENT; + } + - map_addr = (vm_map_address_t)*address; map_size = (vm_map_size_t)*size; info = (vm_region_submap_info_t)info32; *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; - kr = vm_map_region_recurse_64(map, &map_addr,&map_size, - depth, &info64, infoCnt); + kr = vm_map_region_recurse_64(map, &map_addr, &map_size, + depth, &info64, infoCnt); info->protection = info64.protection; info->max_protection = info64.max_protection; info->inheritance = info64.inheritance; info->offset = (uint32_t)info64.offset; /* trouble-maker */ - info->user_tag = info64.user_tag; - info->pages_resident = info64.pages_resident; - info->pages_shared_now_private = info64.pages_shared_now_private; - info->pages_swapped_out = info64.pages_swapped_out; - info->pages_dirtied = info64.pages_dirtied; - info->ref_count = info64.ref_count; - info->shadow_depth = info64.shadow_depth; - info->external_pager = info64.external_pager; - info->share_mode = info64.share_mode; + info->user_tag = info64.user_tag; + info->pages_resident = info64.pages_resident; + info->pages_shared_now_private = info64.pages_shared_now_private; + info->pages_swapped_out = info64.pages_swapped_out; + info->pages_dirtied = info64.pages_dirtied; + info->ref_count = info64.ref_count; + info->shadow_depth = info64.shadow_depth; + info->external_pager = info64.external_pager; + info->share_mode = info64.share_mode; info->is_submap = info64.is_submap; info->behavior = info64.behavior; info->object_id = info64.object_id; - info->user_wired_count = info64.user_wired_count; + info->user_wired_count = info64.user_wired_count; *address = CAST_DOWN(vm_address_t, map_addr); *size = CAST_DOWN(vm_size_t, map_size); *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; - if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) + if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { return KERN_INVALID_ADDRESS; + } return kr; } kern_return_t mach_vm_purgable_control( - vm_map_t map, - mach_vm_offset_t address, - vm_purgable_t control, - int *state) + vm_map_t map, + mach_vm_offset_t address, + vm_purgable_t control, + int *state) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { /* not allowed from user-space */ @@ -1873,20 +1938,21 @@ mach_vm_purgable_control( } return vm_map_purgable_control(map, - vm_map_trunc_page(address, PAGE_MASK), - control, - state); + vm_map_trunc_page(address, PAGE_MASK), + control, + state); } kern_return_t vm_purgable_control( - vm_map_t map, - vm_offset_t address, - vm_purgable_t control, - int *state) + vm_map_t map, + vm_offset_t address, + vm_purgable_t control, + int *state) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { /* not allowed from user-space */ @@ -1894,11 +1960,11 @@ vm_purgable_control( } return vm_map_purgable_control(map, - vm_map_trunc_page(address, PAGE_MASK), - control, - state); + vm_map_trunc_page(address, PAGE_MASK), + control, + state); } - + /* * Ordinarily, the right to allocate CPM is restricted @@ -1906,7 +1972,7 @@ vm_purgable_control( * to the host priv port). Set this variable to zero if * you want to let any application allocate CPM. */ -unsigned int vm_allocate_cpm_privileged = 0; +unsigned int vm_allocate_cpm_privileged = 0; /* * Allocate memory in the specified map, with the caveat that @@ -1920,29 +1986,31 @@ unsigned int vm_allocate_cpm_privileged = 0; */ kern_return_t vm_allocate_cpm( - host_priv_t host_priv, - vm_map_t map, - vm_address_t *addr, - vm_size_t size, - int flags) + host_priv_t host_priv, + vm_map_t map, + vm_address_t *addr, + vm_size_t size, + int flags) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - kern_return_t kr; + vm_map_address_t map_addr; + vm_map_size_t map_size; + kern_return_t kr; - if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) + if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) { return KERN_INVALID_HOST; + } - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_addr = (vm_map_address_t)*addr; map_size = (vm_map_size_t)size; kr = vm_map_enter_cpm(map, - &map_addr, - map_size, - flags); + &map_addr, + map_size, + flags); *addr = CAST_DOWN(vm_address_t, map_addr); return kr; @@ -1951,13 +2019,14 @@ vm_allocate_cpm( kern_return_t mach_vm_page_query( - vm_map_t map, - mach_vm_offset_t offset, - int *disposition, - int *ref_count) + vm_map_t map, + mach_vm_offset_t offset, + int *disposition, + int *ref_count) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } return vm_map_page_query_internal( map, @@ -1967,13 +2036,14 @@ mach_vm_page_query( kern_return_t vm_map_page_query( - vm_map_t map, - vm_offset_t offset, - int *disposition, - int *ref_count) + vm_map_t map, + vm_offset_t offset, + int *disposition, + int *ref_count) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } return vm_map_page_query_internal( map, @@ -1983,28 +2053,28 @@ vm_map_page_query( kern_return_t mach_vm_page_range_query( - vm_map_t map, - mach_vm_offset_t address, - mach_vm_size_t size, - mach_vm_address_t dispositions_addr, - mach_vm_size_t *dispositions_count) + vm_map_t map, + mach_vm_offset_t address, + mach_vm_size_t size, + mach_vm_address_t dispositions_addr, + mach_vm_size_t *dispositions_count) { - kern_return_t kr = KERN_SUCCESS; - int num_pages = 0, i = 0; - mach_vm_size_t curr_sz = 0, copy_sz = 0; - mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0; - mach_msg_type_number_t count = 0; + kern_return_t kr = KERN_SUCCESS; + int num_pages = 0, i = 0; + mach_vm_size_t curr_sz = 0, copy_sz = 0; + mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0; + mach_msg_type_number_t count = 0; - void *info = NULL; - void *local_disp = NULL;; - vm_map_size_t info_size = 0, local_disp_size = 0; - mach_vm_offset_t start = 0, end = 0; + void *info = NULL; + void *local_disp = NULL;; + vm_map_size_t info_size = 0, local_disp_size = 0; + mach_vm_offset_t start = 0, end = 0; if (map == VM_MAP_NULL || dispositions_count == NULL) { return KERN_INVALID_ARGUMENT; } - disp_buf_req_size = ( *dispositions_count * sizeof(int)); + disp_buf_req_size = (*dispositions_count * sizeof(int)); start = mach_vm_trunc_page(address); end = mach_vm_round_page(address + size); @@ -2012,6 +2082,13 @@ mach_vm_page_range_query( return KERN_INVALID_ARGUMENT; } + if ((end - start) < size) { + /* + * Aligned size is less than unaligned size. + */ + return KERN_INVALID_ARGUMENT; + } + if (disp_buf_req_size == 0 || (end == start)) { return KERN_SUCCESS; } @@ -2035,31 +2112,28 @@ mach_vm_page_range_query( local_disp = kalloc(local_disp_size); if (local_disp == NULL) { - kfree(info, info_size); info = NULL; return KERN_RESOURCE_SHORTAGE; } while (size) { - count = VM_PAGE_INFO_BASIC_COUNT; kr = vm_map_page_range_info_internal( - map, - start, - mach_vm_round_page(start + curr_sz), - VM_PAGE_INFO_BASIC, - (vm_page_info_t) info, - &count); + map, + start, + mach_vm_round_page(start + curr_sz), + VM_PAGE_INFO_BASIC, + (vm_page_info_t) info, + &count); assert(kr == KERN_SUCCESS); for (i = 0; i < num_pages; i++) { - ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition; } - copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int)/* an int per page */); + copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */); kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz); start += curr_sz; @@ -2071,7 +2145,6 @@ mach_vm_page_range_query( } if ((disp_buf_req_size == 0) || (curr_sz >= size)) { - /* * We might have inspected the full range OR * more than it esp. if the user passed in @@ -2080,9 +2153,7 @@ mach_vm_page_range_query( */ size = 0; - } else { - dispositions_addr += copy_sz; size -= curr_sz; @@ -2105,13 +2176,13 @@ mach_vm_page_range_query( kern_return_t mach_vm_page_info( - vm_map_t map, - mach_vm_address_t address, - vm_page_info_flavor_t flavor, - vm_page_info_t info, - mach_msg_type_number_t *count) + vm_map_t map, + mach_vm_address_t address, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) { - kern_return_t kr; + kern_return_t kr; if (map == VM_MAP_NULL) { return KERN_INVALID_ARGUMENT; @@ -2124,15 +2195,16 @@ mach_vm_page_info( /* map a (whole) upl into an address space */ kern_return_t vm_upl_map( - vm_map_t map, - upl_t upl, - vm_address_t *dst_addr) + vm_map_t map, + upl_t upl, + vm_address_t *dst_addr) { - vm_map_offset_t map_addr; - kern_return_t kr; + vm_map_offset_t map_addr; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } kr = vm_map_enter_upl(map, upl, &map_addr); *dst_addr = CAST_DOWN(vm_address_t, map_addr); @@ -2141,47 +2213,50 @@ vm_upl_map( kern_return_t vm_upl_unmap( - vm_map_t map, - upl_t upl) + vm_map_t map, + upl_t upl) { - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } - return (vm_map_remove_upl(map, upl)); + return vm_map_remove_upl(map, upl); } /* Retrieve a upl for an object underlying an address range in a map */ kern_return_t vm_map_get_upl( - vm_map_t map, - vm_map_offset_t map_offset, - upl_size_t *upl_size, - upl_t *upl, - upl_page_info_array_t page_list, - unsigned int *count, - upl_control_flags_t *flags, - vm_tag_t tag, - int force_data_sync) + vm_map_t map, + vm_map_offset_t map_offset, + upl_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + upl_control_flags_t *flags, + vm_tag_t tag, + int force_data_sync) { upl_control_flags_t map_flags; - kern_return_t kr; + kern_return_t kr; - if (VM_MAP_NULL == map) + if (VM_MAP_NULL == map) { return KERN_INVALID_ARGUMENT; + } map_flags = *flags & ~UPL_NOZEROFILL; - if (force_data_sync) + if (force_data_sync) { map_flags |= UPL_FORCE_DATA_SYNC; + } kr = vm_map_create_upl(map, - map_offset, - upl_size, - upl, - page_list, - count, - &map_flags, - tag); + map_offset, + upl_size, + upl, + page_list, + count, + &map_flags, + tag); *flags = (map_flags & ~UPL_FORCE_DATA_SYNC); return kr; @@ -2204,12 +2279,12 @@ int log_executable_mem_entry = 0; */ kern_return_t mach_make_memory_entry_64( - vm_map_t target_map, - memory_object_size_t *size, + vm_map_t target_map, + memory_object_size_t *size, memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_handle) + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_handle) { if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) { /* @@ -2219,64 +2294,64 @@ mach_make_memory_entry_64( } return mach_make_memory_entry_internal(target_map, - size, - offset, - permission, - object_handle, - parent_handle); + size, + offset, + permission, + object_handle, + parent_handle); } kern_return_t mach_make_memory_entry_internal( - vm_map_t target_map, - memory_object_size_t *size, + vm_map_t target_map, + memory_object_size_t *size, memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_handle) + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_handle) { - vm_map_version_t version; - vm_named_entry_t parent_entry; - vm_named_entry_t user_entry; - ipc_port_t user_handle; - kern_return_t kr; - vm_map_t real_map; + vm_map_version_t version; + vm_named_entry_t parent_entry; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + kern_return_t kr; + vm_map_t real_map; /* needed for call to vm_map_lookup_locked */ - boolean_t wired; - boolean_t iskernel; - vm_object_offset_t obj_off; - vm_prot_t prot; - struct vm_object_fault_info fault_info = {}; - vm_object_t object; - vm_object_t shadow_object; + boolean_t wired; + boolean_t iskernel; + vm_object_offset_t obj_off; + vm_prot_t prot; + struct vm_object_fault_info fault_info = {}; + vm_object_t object; + vm_object_t shadow_object; /* needed for direct map entry manipulation */ - vm_map_entry_t map_entry; - vm_map_entry_t next_entry; - vm_map_t local_map; - vm_map_t original_map = target_map; - vm_map_size_t total_size, map_size; - vm_map_offset_t map_start, map_end; - vm_map_offset_t local_offset; - vm_object_size_t mappable_size; - - /* + vm_map_entry_t map_entry; + vm_map_entry_t next_entry; + vm_map_t local_map; + vm_map_t original_map = target_map; + vm_map_size_t total_size, map_size; + vm_map_offset_t map_start, map_end; + vm_map_offset_t local_offset; + vm_object_size_t mappable_size; + + /* * Stash the offset in the page for use by vm_map_enter_mem_object() * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case. */ - vm_object_offset_t offset_in_page; + vm_object_offset_t offset_in_page; - unsigned int access; - vm_prot_t protections; - vm_prot_t original_protections, mask_protections; - unsigned int wimg_mode; + unsigned int access; + vm_prot_t protections; + vm_prot_t original_protections, mask_protections; + unsigned int wimg_mode; - boolean_t force_shadow = FALSE; - boolean_t use_data_addr; - boolean_t use_4K_compat; + boolean_t force_shadow = FALSE; + boolean_t use_data_addr; + boolean_t use_4K_compat; #if VM_NAMED_ENTRY_LIST - int alias = -1; + int alias = -1; #endif /* VM_NAMED_ENTRY_LIST */ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) { @@ -2310,40 +2385,42 @@ mach_make_memory_entry_internal( map_start = vm_map_trunc_page(offset, PAGE_MASK); if (permission & MAP_MEM_ONLY) { - boolean_t parent_is_object; + boolean_t parent_is_object; map_end = vm_map_round_page(offset + *size, PAGE_MASK); map_size = map_end - map_start; - + if (use_data_addr || use_4K_compat || parent_entry == NULL) { return KERN_INVALID_ARGUMENT; } parent_is_object = !parent_entry->is_sub_map; object = parent_entry->backing.object; - if(parent_is_object && object != VM_OBJECT_NULL) + if (parent_is_object && object != VM_OBJECT_NULL) { wimg_mode = object->wimg_bits; - else + } else { wimg_mode = VM_WIMG_USE_DEFAULT; - if((access != GET_MAP_MEM(parent_entry->protection)) && - !(parent_entry->protection & VM_PROT_WRITE)) { + } + if ((access != GET_MAP_MEM(parent_entry->protection)) && + !(parent_entry->protection & VM_PROT_WRITE)) { return KERN_INVALID_RIGHT; } vm_prot_to_wimg(access, &wimg_mode); - if (access != MAP_MEM_NOOP) + if (access != MAP_MEM_NOOP) { SET_MAP_MEM(access, parent_entry->protection); + } if (parent_is_object && object && - (access != MAP_MEM_NOOP) && - (!(object->nophyscache))) { - + (access != MAP_MEM_NOOP) && + (!(object->nophyscache))) { if (object->wimg_bits != wimg_mode) { vm_object_lock(object); vm_object_change_wimg_mode(object, wimg_mode); vm_object_unlock(object); } } - if (object_handle) + if (object_handle) { *object_handle = IP_NULL; + } return KERN_SUCCESS; } else if (permission & MAP_MEM_NAMED_CREATE) { map_end = vm_map_round_page(offset + *size, PAGE_MASK); @@ -2377,7 +2454,7 @@ mach_make_memory_entry_internal( if (permission & MAP_MEM_PURGABLE) { task_t owner; - if (! (permission & VM_PROT_WRITE)) { + if (!(permission & VM_PROT_WRITE)) { /* if we can't write, we can't purge */ vm_object_deallocate(object); kr = KERN_INVALID_ARGUMENT; @@ -2422,10 +2499,10 @@ mach_make_memory_entry_internal( if (secluded_for_iokit && /* global boot-arg */ ((permission & MAP_MEM_GRAB_SECLUDED) #if 11 - /* XXX FBDP for my testing only */ - || (secluded_for_fbdp && map_size == 97550336) + /* XXX FBDP for my testing only */ + || (secluded_for_fbdp && map_size == 97550336) #endif - )) { + )) { #if 11 if (!(permission & MAP_MEM_GRAB_SECLUDED) && secluded_for_fbdp) { @@ -2444,9 +2521,9 @@ mach_make_memory_entry_internal( wimg_mode = object->wimg_bits; vm_prot_to_wimg(access, &wimg_mode); - if (access != MAP_MEM_NOOP) { - object->wimg_bits = wimg_mode; - } + if (access != MAP_MEM_NOOP) { + object->wimg_bits = wimg_mode; + } /* the object has no pages, so no WIMG bits to update here */ @@ -2475,13 +2552,13 @@ mach_make_memory_entry_internal( /* when the object field is filled in. */ *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; } if (permission & MAP_MEM_VM_COPY) { - vm_map_copy_t copy; + vm_map_copy_t copy; if (target_map == VM_MAP_NULL) { return KERN_INVALID_TASK; @@ -2491,21 +2568,22 @@ mach_make_memory_entry_internal( map_size = map_end - map_start; if (use_data_addr || use_4K_compat) { offset_in_page = offset - map_start; - if (use_4K_compat) + if (use_4K_compat) { offset_in_page &= ~((signed)(0xFFF)); + } } else { offset_in_page = 0; } kr = vm_map_copyin_internal(target_map, - map_start, - map_size, - VM_MAP_COPYIN_ENTRY_LIST, - ©); + map_start, + map_size, + VM_MAP_COPYIN_ENTRY_LIST, + ©); if (kr != KERN_SUCCESS) { return kr; } - + kr = mach_memory_entry_allocate(&user_entry, &user_handle); if (kr != KERN_SUCCESS) { vm_map_copy_discard(copy); @@ -2522,14 +2600,14 @@ mach_make_memory_entry_internal( user_entry->data_offset = offset_in_page; *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; } if (permission & MAP_MEM_VM_SHARE) { - vm_map_copy_t copy; - vm_prot_t cur_prot, max_prot; + vm_map_copy_t copy; + vm_prot_t cur_prot, max_prot; if (target_map == VM_MAP_NULL) { return KERN_INVALID_TASK; @@ -2539,26 +2617,27 @@ mach_make_memory_entry_internal( map_size = map_end - map_start; if (use_data_addr || use_4K_compat) { offset_in_page = offset - map_start; - if (use_4K_compat) + if (use_4K_compat) { offset_in_page &= ~((signed)(0xFFF)); + } } else { offset_in_page = 0; } cur_prot = VM_PROT_ALL; kr = vm_map_copy_extract(target_map, - map_start, - map_size, - ©, - &cur_prot, - &max_prot); + map_start, + map_size, + ©, + &cur_prot, + &max_prot); if (kr != KERN_SUCCESS) { return kr; } if (mask_protections) { /* - * We just want as much of "original_protections" + * We just want as much of "original_protections" * as we can get out of the actual "cur_prot". */ protections &= cur_prot; @@ -2594,20 +2673,20 @@ mach_make_memory_entry_internal( user_entry->data_offset = offset_in_page; *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; } if (parent_entry == NULL || (permission & MAP_MEM_NAMED_REUSE)) { - map_end = vm_map_round_page(offset + *size, PAGE_MASK); map_size = map_end - map_start; if (use_data_addr || use_4K_compat) { offset_in_page = offset - map_start; - if (use_4K_compat) + if (use_4K_compat) { offset_in_page &= ~((signed)(0xFFF)); + } } else { offset_in_page = 0; } @@ -2627,12 +2706,12 @@ redo_lookup: /* note we check the permission of the range against */ /* that requested by the caller */ - kr = vm_map_lookup_locked(&target_map, map_start, - protections | mask_protections, - OBJECT_LOCK_EXCLUSIVE, &version, - &object, &obj_off, &prot, &wired, - &fault_info, - &real_map); + kr = vm_map_lookup_locked(&target_map, map_start, + protections | mask_protections, + OBJECT_LOCK_EXCLUSIVE, &version, + &object, &obj_off, &prot, &wired, + &fault_info, + &real_map); if (kr != KERN_SUCCESS) { vm_map_unlock_read(target_map); goto make_mem_done; @@ -2657,19 +2736,19 @@ redo_lookup: void *bsd_info; bsd_info = current_task()->bsd_info; printf("pid %d[%s] making memory entry out of " - "executable range from 0x%llx to 0x%llx:" - "might cause code-signing issues " - "later\n", - proc_selfpid(), - (bsd_info != NULL - ? proc_name_address(bsd_info) - : "?"), - (uint64_t) map_start, - (uint64_t) map_end); + "executable range from 0x%llx to 0x%llx:" + "might cause code-signing issues " + "later\n", + proc_selfpid(), + (bsd_info != NULL + ? proc_name_address(bsd_info) + : "?"), + (uint64_t) map_start, + (uint64_t) map_end); } DTRACE_VM2(cs_executable_mem_entry, - uint64_t, (uint64_t)map_start, - uint64_t, (uint64_t)map_end); + uint64_t, (uint64_t)map_start, + uint64_t, (uint64_t)map_end); cs_executable_mem_entry++; #if 11 @@ -2682,24 +2761,25 @@ redo_lookup: kr = KERN_PROTECTION_FAILURE; vm_object_unlock(object); vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); + } goto make_mem_done; #endif /* 11 */ - } #endif /* CONFIG_EMBEDDED */ - if (((prot & protections) != protections) + if (((prot & protections) != protections) || (object == kernel_object)) { kr = KERN_INVALID_RIGHT; vm_object_unlock(object); vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); - if(object == kernel_object) { + } + if (object == kernel_object) { printf("Warning: Attempt to create a named" - " entry from the kernel_object\n"); + " entry from the kernel_object\n"); } goto make_mem_done; } @@ -2723,49 +2803,52 @@ redo_lookup: local_map = original_map; local_offset = map_start; - if(target_map != local_map) { + if (target_map != local_map) { vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); + } vm_map_lock_read(local_map); target_map = local_map; real_map = local_map; } - while(TRUE) { - if(!vm_map_lookup_entry(local_map, - local_offset, &map_entry)) { - kr = KERN_INVALID_ARGUMENT; - vm_map_unlock_read(target_map); - if(real_map != target_map) - vm_map_unlock_read(real_map); - vm_object_deallocate(object); /* release extra ref */ - object = VM_OBJECT_NULL; - goto make_mem_done; - } - iskernel = (local_map->pmap == kernel_pmap); - if(!(map_entry->is_sub_map)) { - if (VME_OBJECT(map_entry) != object) { - kr = KERN_INVALID_ARGUMENT; - vm_map_unlock_read(target_map); - if(real_map != target_map) - vm_map_unlock_read(real_map); - vm_object_deallocate(object); /* release extra ref */ - object = VM_OBJECT_NULL; - goto make_mem_done; - } - break; - } else { - vm_map_t tmap; - tmap = local_map; - local_map = VME_SUBMAP(map_entry); - - vm_map_lock_read(local_map); - vm_map_unlock_read(tmap); - target_map = local_map; - real_map = local_map; - local_offset = local_offset - map_entry->vme_start; - local_offset += VME_OFFSET(map_entry); - } + while (TRUE) { + if (!vm_map_lookup_entry(local_map, + local_offset, &map_entry)) { + kr = KERN_INVALID_ARGUMENT; + vm_map_unlock_read(target_map); + if (real_map != target_map) { + vm_map_unlock_read(real_map); + } + vm_object_deallocate(object); /* release extra ref */ + object = VM_OBJECT_NULL; + goto make_mem_done; + } + iskernel = (local_map->pmap == kernel_pmap); + if (!(map_entry->is_sub_map)) { + if (VME_OBJECT(map_entry) != object) { + kr = KERN_INVALID_ARGUMENT; + vm_map_unlock_read(target_map); + if (real_map != target_map) { + vm_map_unlock_read(real_map); + } + vm_object_deallocate(object); /* release extra ref */ + object = VM_OBJECT_NULL; + goto make_mem_done; + } + break; + } else { + vm_map_t tmap; + tmap = local_map; + local_map = VME_SUBMAP(map_entry); + + vm_map_lock_read(local_map); + vm_map_unlock_read(tmap); + target_map = local_map; + real_map = local_map; + local_offset = local_offset - map_entry->vme_start; + local_offset += VME_OFFSET(map_entry); + } } #if VM_NAMED_ENTRY_LIST @@ -2776,10 +2859,10 @@ redo_lookup: * We found the VM map entry, lock the VM object again. */ vm_object_lock(object); - if(map_entry->wired_count) { - /* JMM - The check below should be reworked instead. */ - object->true_share = TRUE; - } + if (map_entry->wired_count) { + /* JMM - The check below should be reworked instead. */ + object->true_share = TRUE; + } if (mask_protections) { /* * The caller asked us to use the "protections" as @@ -2788,34 +2871,35 @@ redo_lookup: */ protections &= map_entry->max_protection; } - if(((map_entry->max_protection) & protections) != protections) { - kr = KERN_INVALID_RIGHT; - vm_object_unlock(object); - vm_map_unlock_read(target_map); - if(real_map != target_map) + if (((map_entry->max_protection) & protections) != protections) { + kr = KERN_INVALID_RIGHT; + vm_object_unlock(object); + vm_map_unlock_read(target_map); + if (real_map != target_map) { vm_map_unlock_read(real_map); - vm_object_deallocate(object); - object = VM_OBJECT_NULL; - goto make_mem_done; + } + vm_object_deallocate(object); + object = VM_OBJECT_NULL; + goto make_mem_done; } mappable_size = fault_info.hi_offset - obj_off; total_size = map_entry->vme_end - map_entry->vme_start; - if(map_size > mappable_size) { + if (map_size > mappable_size) { /* try to extend mappable size if the entries */ /* following are from the same object and are */ /* compatible */ next_entry = map_entry->vme_next; /* lets see if the next map entry is still */ /* pointing at this object and is contiguous */ - while(map_size > mappable_size) { + while (map_size > mappable_size) { if ((VME_OBJECT(next_entry) == object) && - (next_entry->vme_start == - next_entry->vme_prev->vme_end) && - (VME_OFFSET(next_entry) == - (VME_OFFSET(next_entry->vme_prev) + - (next_entry->vme_prev->vme_end - - next_entry->vme_prev->vme_start)))) { + (next_entry->vme_start == + next_entry->vme_prev->vme_end) && + (VME_OFFSET(next_entry) == + (VME_OFFSET(next_entry->vme_prev) + + (next_entry->vme_prev->vme_end - + next_entry->vme_prev->vme_start)))) { if (mask_protections) { /* * The caller asked us to use @@ -2830,27 +2914,27 @@ redo_lookup: (map_entry->wired_count == 0)) { break; } - if(((next_entry->max_protection) - & protections) != protections) { - break; + if (((next_entry->max_protection) + & protections) != protections) { + break; } if (next_entry->needs_copy != - map_entry->needs_copy) + map_entry->needs_copy) { break; + } mappable_size += next_entry->vme_end - - next_entry->vme_start; + - next_entry->vme_start; total_size += next_entry->vme_end - - next_entry->vme_start; + - next_entry->vme_start; next_entry = next_entry->vme_next; } else { break; } - } } /* vm_map_entry_should_cow_for_true_share() checks for malloc tags, - * never true in kernel */ + * never true in kernel */ if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) && object->vo_size > map_size && map_size != 0) { @@ -2859,7 +2943,7 @@ redo_lookup: * limit the impact of "true_share"/"copy_delay" to * that range instead of the entire VM object... */ - + vm_object_unlock(object); if (vm_map_lock_read_to_write(target_map)) { vm_object_deallocate(object); @@ -2868,13 +2952,13 @@ redo_lookup: } vm_map_clip_start(target_map, - map_entry, - vm_map_trunc_page(map_start, - VM_MAP_PAGE_MASK(target_map))); + map_entry, + vm_map_trunc_page(map_start, + VM_MAP_PAGE_MASK(target_map))); vm_map_clip_end(target_map, - map_entry, - (vm_map_round_page(map_end, - VM_MAP_PAGE_MASK(target_map)))); + map_entry, + (vm_map_round_page(map_end, + VM_MAP_PAGE_MASK(target_map)))); force_shadow = TRUE; if ((map_entry->vme_end - offset) < map_size) { @@ -2887,22 +2971,22 @@ redo_lookup: } if (object->internal) { - /* vm_map_lookup_locked will create a shadow if */ - /* needs_copy is set but does not check for the */ - /* other two conditions shown. It is important to */ + /* vm_map_lookup_locked will create a shadow if */ + /* needs_copy is set but does not check for the */ + /* other two conditions shown. It is important to */ /* set up an object which will not be pulled from */ /* under us. */ - if (force_shadow || - ((map_entry->needs_copy || - object->shadowed || - (object->vo_size > total_size && - (VME_OFFSET(map_entry) != 0 || - object->vo_size > - vm_map_round_page(total_size, - VM_MAP_PAGE_MASK(target_map))))) - && !object->true_share - && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) { + if (force_shadow || + ((map_entry->needs_copy || + object->shadowed || + (object->vo_size > total_size && + (VME_OFFSET(map_entry) != 0 || + object->vo_size > + vm_map_round_page(total_size, + VM_MAP_PAGE_MASK(target_map))))) + && !object->true_share + && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) { /* * We have to unlock the VM object before * trying to upgrade the VM map lock, to @@ -2916,7 +3000,7 @@ redo_lookup: */ vm_object_unlock(object); - if (vm_map_lock_read_to_write(target_map)) { + if (vm_map_lock_read_to_write(target_map)) { /* * We couldn't upgrade our VM map lock * from "read" to "write" and we lost @@ -2925,20 +3009,20 @@ redo_lookup: */ vm_object_deallocate(object); /* extra ref */ target_map = original_map; - goto redo_lookup; - } + goto redo_lookup; + } #if 00 vm_object_lock(object); #endif - /* + /* * JMM - We need to avoid coming here when the object * is wired by anybody, not just the current map. Why * couldn't we use the standard vm_object_copy_quickly() * approach here? */ - - /* create a shadow object */ + + /* create a shadow object */ VME_OBJECT_SHADOW(map_entry, total_size); shadow_object = VME_OBJECT(map_entry); #if 00 @@ -2948,48 +3032,49 @@ redo_lookup: prot = map_entry->protection & ~VM_PROT_WRITE; if (override_nx(target_map, - VME_ALIAS(map_entry)) - && prot) - prot |= VM_PROT_EXECUTE; + VME_ALIAS(map_entry)) + && prot) { + prot |= VM_PROT_EXECUTE; + } vm_object_pmap_protect( object, VME_OFFSET(map_entry), total_size, - ((map_entry->is_shared - || target_map->mapped_in_other_pmaps) - ? PMAP_NULL : - target_map->pmap), + ((map_entry->is_shared + || target_map->mapped_in_other_pmaps) + ? PMAP_NULL : + target_map->pmap), map_entry->vme_start, prot); - total_size -= (map_entry->vme_end - - map_entry->vme_start); + total_size -= (map_entry->vme_end + - map_entry->vme_start); next_entry = map_entry->vme_next; map_entry->needs_copy = FALSE; vm_object_lock(shadow_object); while (total_size) { - assert((next_entry->wired_count == 0) || - (map_entry->wired_count)); - - if (VME_OBJECT(next_entry) == object) { - vm_object_reference_locked(shadow_object); - VME_OBJECT_SET(next_entry, - shadow_object); - vm_object_deallocate(object); - VME_OFFSET_SET( - next_entry, - (VME_OFFSET(next_entry->vme_prev) + - (next_entry->vme_prev->vme_end - - next_entry->vme_prev->vme_start))); - next_entry->use_pmap = TRUE; + assert((next_entry->wired_count == 0) || + (map_entry->wired_count)); + + if (VME_OBJECT(next_entry) == object) { + vm_object_reference_locked(shadow_object); + VME_OBJECT_SET(next_entry, + shadow_object); + vm_object_deallocate(object); + VME_OFFSET_SET( + next_entry, + (VME_OFFSET(next_entry->vme_prev) + + (next_entry->vme_prev->vme_end + - next_entry->vme_prev->vme_start))); + next_entry->use_pmap = TRUE; next_entry->needs_copy = FALSE; } else { panic("mach_make_memory_entry_64:" - " map entries out of sync\n"); + " map entries out of sync\n"); } - total_size -= - next_entry->vme_end - - next_entry->vme_start; + total_size -= + next_entry->vme_end + - next_entry->vme_start; next_entry = next_entry->vme_next; } @@ -3002,11 +3087,11 @@ redo_lookup: object = shadow_object; obj_off = ((local_offset - map_entry->vme_start) - + VME_OFFSET(map_entry)); + + VME_OFFSET(map_entry)); vm_map_lock_write_to_read(target_map); - } - } + } + } /* note: in the future we can (if necessary) allow for */ /* memory object lists, this will better support */ @@ -3019,8 +3104,9 @@ redo_lookup: /* against delayed copy, etc. is mostly defensive. */ wimg_mode = object->wimg_bits; - if(!(object->nophyscache)) + if (!(object->nophyscache)) { vm_prot_to_wimg(access, &wimg_mode); + } #if VM_OBJECT_TRACKING_OP_TRUESHARE if (!object->true_share && @@ -3029,19 +3115,20 @@ redo_lookup: int num = 0; num = OSBacktrace(bt, - VM_OBJECT_TRACKING_BTDEPTH); + VM_OBJECT_TRACKING_BTDEPTH); btlog_add_entry(vm_object_tracking_btlog, - object, - VM_OBJECT_TRACKING_OP_TRUESHARE, - bt, - num); + object, + VM_OBJECT_TRACKING_OP_TRUESHARE, + bt, + num); } #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ vm_object_lock_assert_exclusive(object); object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } /* * The memory entry now points to this VM object and we @@ -3051,11 +3138,13 @@ redo_lookup: */ vm_map_unlock_read(target_map); - if(real_map != target_map) + if (real_map != target_map) { vm_map_unlock_read(real_map); + } - if (object->wimg_bits != wimg_mode) + if (object->wimg_bits != wimg_mode) { vm_object_change_wimg_mode(object, wimg_mode); + } /* the size of mapped entry that overlaps with our region */ /* which is targeted for share. */ @@ -3063,8 +3152,9 @@ redo_lookup: /* offset of our beg addr within entry */ /* it corresponds to this: */ - if(map_size > mappable_size) + if (map_size > mappable_size) { map_size = mappable_size; + } if (permission & MAP_MEM_NAMED_REUSE) { /* @@ -3080,9 +3170,9 @@ redo_lookup: parent_entry->protection == protections && parent_entry->size == map_size && ((!(use_data_addr || use_4K_compat) && - (parent_entry->data_offset == 0)) || - ((use_data_addr || use_4K_compat) && - (parent_entry->data_offset == offset_in_page)))) { + (parent_entry->data_offset == 0)) || + ((use_data_addr || use_4K_compat) && + (parent_entry->data_offset == offset_in_page)))) { /* * We have a match: re-use "parent_entry". */ @@ -3094,8 +3184,8 @@ redo_lookup: ipc_port_copy_send(parent_handle); *size = CAST_DOWN(vm_size_t, - (parent_entry->size - - parent_entry->data_offset)); + (parent_entry->size - + parent_entry->data_offset)); *object_handle = parent_handle; return KERN_SUCCESS; } else { @@ -3130,10 +3220,9 @@ redo_lookup: /* when the object field is filled in. */ *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; - } else { /* The new object will be base on an existing named object */ if (parent_entry == NULL) { @@ -3153,15 +3242,16 @@ redo_lookup: * Account for offset to data in parent entry and * compute our own offset to data. */ - if((offset + *size + parent_entry->data_offset) > parent_entry->size) { + if ((offset + *size + parent_entry->data_offset) > parent_entry->size) { kr = KERN_INVALID_ARGUMENT; goto make_mem_done; } map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); offset_in_page = (offset + parent_entry->data_offset) - map_start; - if (use_4K_compat) + if (use_4K_compat) { offset_in_page &= ~((signed)(0xFFF)); + } map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); map_size = map_end - map_start; } else { @@ -3169,7 +3259,7 @@ redo_lookup: map_size = map_end - map_start; offset_in_page = 0; - if((offset + map_size) > parent_entry->size) { + if ((offset + map_size) > parent_entry->size) { kr = KERN_INVALID_ARGUMENT; goto make_mem_done; } @@ -3183,7 +3273,7 @@ redo_lookup: */ protections &= parent_entry->protection; } - if((protections & parent_entry->protection) != protections) { + if ((protections & parent_entry->protection) != protections) { kr = KERN_PROTECTION_FAILURE; goto make_mem_done; } @@ -3196,51 +3286,52 @@ redo_lookup: user_entry->size = map_size; user_entry->offset = parent_entry->offset + map_start; - user_entry->data_offset = offset_in_page; + user_entry->data_offset = offset_in_page; user_entry->is_sub_map = parent_entry->is_sub_map; user_entry->is_copy = parent_entry->is_copy; user_entry->internal = parent_entry->internal; user_entry->protection = protections; - if(access != MAP_MEM_NOOP) { - SET_MAP_MEM(access, user_entry->protection); + if (access != MAP_MEM_NOOP) { + SET_MAP_MEM(access, user_entry->protection); } - if(parent_entry->is_sub_map) { - user_entry->backing.map = parent_entry->backing.map; - vm_map_lock(user_entry->backing.map); - user_entry->backing.map->map_refcnt++; - vm_map_unlock(user_entry->backing.map); + if (parent_entry->is_sub_map) { + user_entry->backing.map = parent_entry->backing.map; + vm_map_lock(user_entry->backing.map); + user_entry->backing.map->map_refcnt++; + vm_map_unlock(user_entry->backing.map); } else { - object = parent_entry->backing.object; - assert(object != VM_OBJECT_NULL); - user_entry->backing.object = object; - /* we now point to this object, hold on */ - vm_object_lock(object); - vm_object_reference_locked(object); + object = parent_entry->backing.object; + assert(object != VM_OBJECT_NULL); + user_entry->backing.object = object; + /* we now point to this object, hold on */ + vm_object_lock(object); + vm_object_reference_locked(object); #if VM_OBJECT_TRACKING_OP_TRUESHARE - if (!object->true_share && - vm_object_tracking_inited) { - void *bt[VM_OBJECT_TRACKING_BTDEPTH]; - int num = 0; - - num = OSBacktrace(bt, - VM_OBJECT_TRACKING_BTDEPTH); - btlog_add_entry(vm_object_tracking_btlog, - object, - VM_OBJECT_TRACKING_OP_TRUESHARE, - bt, - num); - } + if (!object->true_share && + vm_object_tracking_inited) { + void *bt[VM_OBJECT_TRACKING_BTDEPTH]; + int num = 0; + + num = OSBacktrace(bt, + VM_OBJECT_TRACKING_BTDEPTH); + btlog_add_entry(vm_object_tracking_btlog, + object, + VM_OBJECT_TRACKING_OP_TRUESHARE, + bt, + num); + } #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ - object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) - object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; - vm_object_unlock(object); + object->true_share = TRUE; + if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { + object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; + } + vm_object_unlock(object); } *size = CAST_DOWN(vm_size_t, (user_entry->size - - user_entry->data_offset)); + user_entry->data_offset)); *object_handle = user_handle; return KERN_SUCCESS; } @@ -3259,40 +3350,40 @@ make_mem_done: kern_return_t _mach_make_memory_entry( - vm_map_t target_map, - memory_object_size_t *size, - memory_object_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_entry) + vm_map_t target_map, + memory_object_size_t *size, + memory_object_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) { - memory_object_size_t mo_size; - kern_return_t kr; - + memory_object_size_t mo_size; + kern_return_t kr; + mo_size = (memory_object_size_t)*size; - kr = mach_make_memory_entry_64(target_map, &mo_size, - (memory_object_offset_t)offset, permission, object_handle, - parent_entry); + kr = mach_make_memory_entry_64(target_map, &mo_size, + (memory_object_offset_t)offset, permission, object_handle, + parent_entry); *size = mo_size; return kr; } kern_return_t mach_make_memory_entry( - vm_map_t target_map, - vm_size_t *size, - vm_offset_t offset, - vm_prot_t permission, - ipc_port_t *object_handle, - ipc_port_t parent_entry) -{ - memory_object_size_t mo_size; - kern_return_t kr; - + vm_map_t target_map, + vm_size_t *size, + vm_offset_t offset, + vm_prot_t permission, + ipc_port_t *object_handle, + ipc_port_t parent_entry) +{ + memory_object_size_t mo_size; + kern_return_t kr; + mo_size = (memory_object_size_t)*size; - kr = mach_make_memory_entry_64(target_map, &mo_size, - (memory_object_offset_t)offset, permission, object_handle, - parent_entry); + kr = mach_make_memory_entry_64(target_map, &mo_size, + (memory_object_offset_t)offset, permission, object_handle, + parent_entry); *size = CAST_DOWN(vm_size_t, mo_size); return kr; } @@ -3307,38 +3398,40 @@ mach_make_memory_entry( */ kern_return_t task_wire( - vm_map_t map, - boolean_t must_wire) + vm_map_t map, + boolean_t must_wire) { - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_map_lock(map); map->wiring_required = (must_wire == TRUE); vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } kern_return_t vm_map_exec_lockdown( - vm_map_t map) + vm_map_t map) { - if (map == VM_MAP_NULL) - return(KERN_INVALID_ARGUMENT); + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } vm_map_lock(map); map->map_disallow_new_exec = TRUE; vm_map_unlock(map); - return(KERN_SUCCESS); + return KERN_SUCCESS; } #if VM_NAMED_ENTRY_LIST -queue_head_t vm_named_entry_list; -int vm_named_entry_count = 0; -lck_mtx_t vm_named_entry_list_lock_data; -lck_mtx_ext_t vm_named_entry_list_lock_data_ext; +queue_head_t vm_named_entry_list; +int vm_named_entry_count = 0; +lck_mtx_t vm_named_entry_list_lock_data; +lck_mtx_ext_t vm_named_entry_list_lock_data_ext; #endif /* VM_NAMED_ENTRY_LIST */ void vm_named_entry_init(void); @@ -3349,25 +3442,26 @@ vm_named_entry_init(void) queue_init(&vm_named_entry_list); vm_named_entry_count = 0; lck_mtx_init_ext(&vm_named_entry_list_lock_data, - &vm_named_entry_list_lock_data_ext, - &vm_object_lck_grp, - &vm_object_lck_attr); + &vm_named_entry_list_lock_data_ext, + &vm_object_lck_grp, + &vm_object_lck_attr); #endif /* VM_NAMED_ENTRY_LIST */ } __private_extern__ kern_return_t mach_memory_entry_allocate( - vm_named_entry_t *user_entry_p, - ipc_port_t *user_handle_p) + vm_named_entry_t *user_entry_p, + ipc_port_t *user_handle_p) { - vm_named_entry_t user_entry; - ipc_port_t user_handle; - ipc_port_t previous; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + ipc_port_t previous; user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry); - if (user_entry == NULL) + if (user_entry == NULL) { return KERN_FAILURE; - bzero(user_entry, sizeof (*user_entry)); + } + bzero(user_entry, sizeof(*user_entry)); named_entry_lock_init(user_entry); @@ -3383,9 +3477,9 @@ mach_memory_entry_allocate( ip_reference(user_handle); /* make a send right */ - user_handle->ip_mscount++; - user_handle->ip_srights++; - ip_reference(user_handle); + user_handle->ip_mscount++; + user_handle->ip_srights++; + ip_reference(user_handle); ipc_port_nsrequest(user_handle, 1, user_handle, &previous); /* nsrequest unlocks user_handle */ @@ -3401,7 +3495,7 @@ mach_memory_entry_allocate( user_entry->ref_count = 1; ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry, - IKOT_NAMED_ENTRY); + IKOT_NAMED_ENTRY); *user_entry_p = user_entry; *user_handle_p = user_handle; @@ -3411,12 +3505,12 @@ mach_memory_entry_allocate( user_entry->named_entry_port = user_handle; /* backtrace at allocation time, for debugging only */ OSBacktrace(&user_entry->named_entry_bt[0], - NAMED_ENTRY_BT_DEPTH); + NAMED_ENTRY_BT_DEPTH); /* add this new named entry to the global list */ lck_mtx_lock_spin(&vm_named_entry_list_lock_data); queue_enter(&vm_named_entry_list, user_entry, - vm_named_entry_t, named_entry_list); + vm_named_entry_t, named_entry_list); vm_named_entry_count++; lck_mtx_unlock(&vm_named_entry_list_lock_data); #endif /* VM_NAMED_ENTRY_LIST */ @@ -3432,20 +3526,21 @@ mach_memory_entry_allocate( */ kern_return_t mach_memory_object_memory_entry_64( - host_t host, - boolean_t internal, - vm_object_offset_t size, - vm_prot_t permission, - memory_object_t pager, - ipc_port_t *entry_handle) + host_t host, + boolean_t internal, + vm_object_offset_t size, + vm_prot_t permission, + memory_object_t pager, + ipc_port_t *entry_handle) { - unsigned int access; - vm_named_entry_t user_entry; - ipc_port_t user_handle; - vm_object_t object; + unsigned int access; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + vm_object_t object; - if (host == HOST_NULL) - return(KERN_INVALID_HOST); + if (host == HOST_NULL) { + return KERN_INVALID_HOST; + } if (pager == MEMORY_OBJECT_NULL && internal) { object = vm_object_allocate(size); @@ -3486,23 +3581,23 @@ mach_memory_object_memory_entry_64( kern_return_t mach_memory_object_memory_entry( - host_t host, - boolean_t internal, - vm_size_t size, - vm_prot_t permission, - memory_object_t pager, - ipc_port_t *entry_handle) + host_t host, + boolean_t internal, + vm_size_t size, + vm_prot_t permission, + memory_object_t pager, + ipc_port_t *entry_handle) { - return mach_memory_object_memory_entry_64( host, internal, - (vm_object_offset_t)size, permission, pager, entry_handle); + return mach_memory_object_memory_entry_64( host, internal, + (vm_object_offset_t)size, permission, pager, entry_handle); } kern_return_t mach_memory_entry_purgable_control( - ipc_port_t entry_port, - vm_purgable_t control, - int *state) + ipc_port_t entry_port, + vm_purgable_t control, + int *state) { if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { /* not allowed from user-space */ @@ -3514,13 +3609,13 @@ mach_memory_entry_purgable_control( kern_return_t memory_entry_purgeable_control_internal( - ipc_port_t entry_port, - vm_purgable_t control, - int *state) + ipc_port_t entry_port, + vm_purgable_t control, + int *state) { - kern_return_t kr; - vm_named_entry_t mem_entry; - vm_object_t object; + kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { @@ -3528,14 +3623,16 @@ memory_entry_purgeable_control_internal( } if (control != VM_PURGABLE_SET_STATE && control != VM_PURGABLE_GET_STATE && - control != VM_PURGABLE_SET_STATE_FROM_KERNEL) - return(KERN_INVALID_ARGUMENT); + control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { + return KERN_INVALID_ARGUMENT; + } if ((control == VM_PURGABLE_SET_STATE || - control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && + control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || - ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) - return(KERN_INVALID_ARGUMENT); + ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) { + return KERN_INVALID_ARGUMENT; + } mem_entry = (vm_named_entry_t) entry_port->ip_kobject; @@ -3573,27 +3670,27 @@ memory_entry_purgeable_control_internal( kern_return_t mach_memory_entry_access_tracking( - ipc_port_t entry_port, - int *access_tracking, - uint32_t *access_tracking_reads, - uint32_t *access_tracking_writes) + ipc_port_t entry_port, + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *access_tracking_writes) { return memory_entry_access_tracking_internal(entry_port, - access_tracking, - access_tracking_reads, - access_tracking_writes); + access_tracking, + access_tracking_reads, + access_tracking_writes); } kern_return_t memory_entry_access_tracking_internal( - ipc_port_t entry_port, - int *access_tracking, - uint32_t *access_tracking_reads, - uint32_t *access_tracking_writes) + ipc_port_t entry_port, + int *access_tracking, + uint32_t *access_tracking_reads, + uint32_t *access_tracking_writes) { - vm_named_entry_t mem_entry; - vm_object_t object; - kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { @@ -3618,9 +3715,9 @@ memory_entry_access_tracking_internal( #if VM_OBJECT_ACCESS_TRACKING vm_object_access_tracking(object, - access_tracking, - access_tracking_reads, - access_tracking_writes); + access_tracking, + access_tracking_reads, + access_tracking_writes); kr = KERN_SUCCESS; #else /* VM_OBJECT_ACCESS_TRACKING */ (void) access_tracking; @@ -3636,15 +3733,15 @@ memory_entry_access_tracking_internal( kern_return_t mach_memory_entry_get_page_counts( - ipc_port_t entry_port, - unsigned int *resident_page_count, - unsigned int *dirty_page_count) + ipc_port_t entry_port, + unsigned int *resident_page_count, + unsigned int *dirty_page_count) { - kern_return_t kr; - vm_named_entry_t mem_entry; - vm_object_t object; - vm_object_offset_t offset; - vm_object_size_t size; + kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + vm_object_offset_t offset; + vm_object_size_t size; if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { @@ -3690,7 +3787,7 @@ mach_memory_entry_get_page_counts( */ void mach_memory_entry_port_release( - ipc_port_t port) + ipc_port_t port) { assert(ip_kotype(port) == IKOT_NAMED_ENTRY); ipc_port_release_send(port); @@ -3710,9 +3807,9 @@ mach_memory_entry_port_release( */ void mach_destroy_memory_entry( - ipc_port_t port) + ipc_port_t port) { - vm_named_entry_t named_entry; + vm_named_entry_t named_entry; #if MACH_ASSERT assert(ip_kotype(port) == IKOT_NAMED_ENTRY); #endif /* MACH_ASSERT */ @@ -3721,7 +3818,7 @@ mach_destroy_memory_entry( named_entry_lock(named_entry); named_entry->ref_count -= 1; - if(named_entry->ref_count == 0) { + if (named_entry->ref_count == 0) { if (named_entry->is_sub_map) { vm_map_deallocate(named_entry->backing.map); } else if (named_entry->is_copy) { @@ -3737,16 +3834,17 @@ mach_destroy_memory_entry( #if VM_NAMED_ENTRY_LIST lck_mtx_lock_spin(&vm_named_entry_list_lock_data); queue_remove(&vm_named_entry_list, named_entry, - vm_named_entry_t, named_entry_list); + vm_named_entry_t, named_entry_list); assert(vm_named_entry_count > 0); vm_named_entry_count--; lck_mtx_unlock(&vm_named_entry_list_lock_data); #endif /* VM_NAMED_ENTRY_LIST */ - kfree((void *) port->ip_kobject, - sizeof (struct vm_named_entry)); - } else + kfree(port->ip_kobject, + sizeof(struct vm_named_entry)); + } else { named_entry_unlock(named_entry); + } } /* Allow manipulation of individual page state. This is actually part of */ @@ -3754,15 +3852,15 @@ mach_destroy_memory_entry( kern_return_t mach_memory_entry_page_op( - ipc_port_t entry_port, - vm_object_offset_t offset, - int ops, - ppnum_t *phys_entry, - int *flags) + ipc_port_t entry_port, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags) { - vm_named_entry_t mem_entry; - vm_object_t object; - kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { @@ -3790,32 +3888,32 @@ mach_memory_entry_page_op( kr = vm_object_page_op(object, offset, ops, phys_entry, flags); - vm_object_deallocate(object); + vm_object_deallocate(object); return kr; } /* - * mach_memory_entry_range_op offers performance enhancement over - * mach_memory_entry_page_op for page_op functions which do not require page - * level state to be returned from the call. Page_op was created to provide - * a low-cost alternative to page manipulation via UPLs when only a single - * page was involved. The range_op call establishes the ability in the _op + * mach_memory_entry_range_op offers performance enhancement over + * mach_memory_entry_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op * family of functions to work on multiple pages where the lack of page level * state handling allows the caller to avoid the overhead of the upl structures. */ kern_return_t mach_memory_entry_range_op( - ipc_port_t entry_port, - vm_object_offset_t offset_beg, - vm_object_offset_t offset_end, + ipc_port_t entry_port, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, int ops, int *range) { - vm_named_entry_t mem_entry; - vm_object_t object; - kern_return_t kr; + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; if (!IP_VALID(entry_port) || ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { @@ -3842,10 +3940,10 @@ mach_memory_entry_range_op( named_entry_unlock(mem_entry); kr = vm_object_range_op(object, - offset_beg, - offset_end, - ops, - (uint32_t *) range); + offset_beg, + offset_end, + ops, + (uint32_t *) range); vm_object_deallocate(object); @@ -3866,15 +3964,15 @@ extern int kernel_upl_unmap( extern int kernel_upl_commit( upl_t upl, upl_page_info_t *pl, - mach_msg_type_number_t count); + mach_msg_type_number_t count); extern int kernel_upl_commit_range( upl_t upl, upl_offset_t offset, - upl_size_t size, - int flags, - upl_page_info_array_t pl, - mach_msg_type_number_t count); + upl_size_t size, + int flags, + upl_page_info_array_t pl, + mach_msg_type_number_t count); extern int kernel_upl_abort( upl_t upl, @@ -3889,9 +3987,9 @@ extern int kernel_upl_abort_range( kern_return_t kernel_upl_map( - vm_map_t map, - upl_t upl, - vm_offset_t *dst_addr) + vm_map_t map, + upl_t upl, + vm_offset_t *dst_addr) { return vm_upl_map(map, upl, dst_addr); } @@ -3899,8 +3997,8 @@ kernel_upl_map( kern_return_t kernel_upl_unmap( - vm_map_t map, - upl_t upl) + vm_map_t map, + upl_t upl) { return vm_upl_unmap(map, upl); } @@ -3911,7 +4009,7 @@ kernel_upl_commit( upl_page_info_t *pl, mach_msg_type_number_t count) { - kern_return_t kr; + kern_return_t kr; kr = upl_commit(upl, pl, count); upl_deallocate(upl); @@ -3921,18 +4019,19 @@ kernel_upl_commit( kern_return_t kernel_upl_commit_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int flags, + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int flags, upl_page_info_array_t pl, mach_msg_type_number_t count) { - boolean_t finished = FALSE; - kern_return_t kr; + boolean_t finished = FALSE; + kern_return_t kr; - if (flags & UPL_COMMIT_FREE_ON_EMPTY) + if (flags & UPL_COMMIT_FREE_ON_EMPTY) { flags |= UPL_COMMIT_NOTIFY_EMPTY; + } if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { return KERN_INVALID_ARGUMENT; @@ -3940,39 +4039,42 @@ kernel_upl_commit_range( kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); - if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) + if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) { upl_deallocate(upl); + } return kr; } - + kern_return_t kernel_upl_abort_range( - upl_t upl, - upl_offset_t offset, - upl_size_t size, - int abort_flags) + upl_t upl, + upl_offset_t offset, + upl_size_t size, + int abort_flags) { - kern_return_t kr; - boolean_t finished = FALSE; + kern_return_t kr; + boolean_t finished = FALSE; - if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) + if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) { abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; + } kr = upl_abort_range(upl, offset, size, abort_flags, &finished); - if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) + if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) { upl_deallocate(upl); + } return kr; } kern_return_t kernel_upl_abort( - upl_t upl, - int abort_type) + upl_t upl, + int abort_type) { - kern_return_t kr; + kern_return_t kr; kr = upl_abort(upl, abort_type); upl_deallocate(upl); @@ -3987,15 +4089,15 @@ kernel_upl_abort( kern_return_t vm_region_object_create( - __unused vm_map_t target_map, - vm_size_t size, - ipc_port_t *object_handle) + __unused vm_map_t target_map, + vm_size_t size, + ipc_port_t *object_handle) { - vm_named_entry_t user_entry; - ipc_port_t user_handle; + vm_named_entry_t user_entry; + ipc_port_t user_handle; + + vm_map_t new_map; - vm_map_t new_map; - if (mach_memory_entry_allocate(&user_entry, &user_handle) != KERN_SUCCESS) { return KERN_FAILURE; @@ -4004,9 +4106,9 @@ vm_region_object_create( /* Create a named object based on a submap of specified size */ new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS, - vm_map_round_page(size, - VM_MAP_PAGE_MASK(target_map)), - TRUE); + vm_map_round_page(size, + VM_MAP_PAGE_MASK(target_map)), + TRUE); vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map)); user_entry->backing.map = new_map; @@ -4019,40 +4121,38 @@ vm_region_object_create( *object_handle = user_handle; return KERN_SUCCESS; - } -ppnum_t vm_map_get_phys_page( /* forward */ - vm_map_t map, - vm_offset_t offset); +ppnum_t vm_map_get_phys_page( /* forward */ + vm_map_t map, + vm_offset_t offset); ppnum_t vm_map_get_phys_page( - vm_map_t map, - vm_offset_t addr) + vm_map_t map, + vm_offset_t addr) { - vm_object_offset_t offset; - vm_object_t object; - vm_map_offset_t map_offset; - vm_map_entry_t entry; - ppnum_t phys_page = 0; + vm_object_offset_t offset; + vm_object_t object; + vm_map_offset_t map_offset; + vm_map_entry_t entry; + ppnum_t phys_page = 0; map_offset = vm_map_trunc_page(addr, PAGE_MASK); vm_map_lock(map); while (vm_map_lookup_entry(map, map_offset, &entry)) { - if (VME_OBJECT(entry) == VM_OBJECT_NULL) { vm_map_unlock(map); return (ppnum_t) 0; } if (entry->is_sub_map) { - vm_map_t old_map; + vm_map_t old_map; vm_map_lock(VME_SUBMAP(entry)); old_map = map; map = VME_SUBMAP(entry); map_offset = (VME_OFFSET(entry) + - (map_offset - entry->vme_start)); + (map_offset - entry->vme_start)); vm_map_unlock(old_map); continue; } @@ -4064,27 +4164,26 @@ vm_map_get_phys_page( if (VME_OBJECT(entry)->vo_shadow_offset == 0) { /* need to call vm_fault */ vm_map_unlock(map); - vm_fault(map, map_offset, VM_PROT_NONE, - FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, - THREAD_UNINT, NULL, 0); + vm_fault(map, map_offset, VM_PROT_NONE, + FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, + THREAD_UNINT, NULL, 0); vm_map_lock(map); continue; } offset = (VME_OFFSET(entry) + - (map_offset - entry->vme_start)); + (map_offset - entry->vme_start)); phys_page = (ppnum_t) - ((VME_OBJECT(entry)->vo_shadow_offset - + offset) >> PAGE_SHIFT); + ((VME_OBJECT(entry)->vo_shadow_offset + + offset) >> PAGE_SHIFT); break; - } offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start)); object = VME_OBJECT(entry); vm_object_lock(object); while (TRUE) { - vm_page_t dst_page = vm_page_lookup(object,offset); - if(dst_page == VM_PAGE_NULL) { - if(object->shadow) { + vm_page_t dst_page = vm_page_lookup(object, offset); + if (dst_page == VM_PAGE_NULL) { + if (object->shadow) { vm_object_t old_object; vm_object_lock(object->shadow); old_object = object; @@ -4102,37 +4201,36 @@ vm_map_get_phys_page( } } break; - - } + } vm_map_unlock(map); return phys_page; } #if 0 -kern_return_t kernel_object_iopl_request( /* forward */ - vm_named_entry_t named_entry, - memory_object_offset_t offset, - upl_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int *flags); +kern_return_t kernel_object_iopl_request( /* forward */ + vm_named_entry_t named_entry, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags); kern_return_t kernel_object_iopl_request( - vm_named_entry_t named_entry, - memory_object_offset_t offset, - upl_size_t *upl_size, - upl_t *upl_ptr, - upl_page_info_array_t user_page_list, - unsigned int *page_list_count, - int *flags) + vm_named_entry_t named_entry, + memory_object_offset_t offset, + upl_size_t *upl_size, + upl_t *upl_ptr, + upl_page_info_array_t user_page_list, + unsigned int *page_list_count, + int *flags) { - vm_object_t object; - kern_return_t ret; + vm_object_t object; + kern_return_t ret; - int caller_flags; + int caller_flags; caller_flags = *flags; @@ -4145,36 +4243,40 @@ kernel_object_iopl_request( } /* a few checks to make sure user is obeying rules */ - if(*upl_size == 0) { - if(offset >= named_entry->size) - return(KERN_INVALID_RIGHT); + if (*upl_size == 0) { + if (offset >= named_entry->size) { + return KERN_INVALID_RIGHT; + } *upl_size = (upl_size_t) (named_entry->size - offset); - if (*upl_size != named_entry->size - offset) + if (*upl_size != named_entry->size - offset) { return KERN_INVALID_ARGUMENT; + } } - if(caller_flags & UPL_COPYOUT_FROM) { - if((named_entry->protection & VM_PROT_READ) - != VM_PROT_READ) { - return(KERN_INVALID_RIGHT); + if (caller_flags & UPL_COPYOUT_FROM) { + if ((named_entry->protection & VM_PROT_READ) + != VM_PROT_READ) { + return KERN_INVALID_RIGHT; } } else { - if((named_entry->protection & - (VM_PROT_READ | VM_PROT_WRITE)) - != (VM_PROT_READ | VM_PROT_WRITE)) { - return(KERN_INVALID_RIGHT); + if ((named_entry->protection & + (VM_PROT_READ | VM_PROT_WRITE)) + != (VM_PROT_READ | VM_PROT_WRITE)) { + return KERN_INVALID_RIGHT; } } - if(named_entry->size < (offset + *upl_size)) - return(KERN_INVALID_ARGUMENT); + if (named_entry->size < (offset + *upl_size)) { + return KERN_INVALID_ARGUMENT; + } /* the callers parameter offset is defined to be the */ /* offset from beginning of named entry offset in object */ offset = offset + named_entry->offset; if (named_entry->is_sub_map || - named_entry->is_copy) + named_entry->is_copy) { return KERN_INVALID_ARGUMENT; - + } + named_entry_lock(named_entry); /* This is the case where we are going to operate */ @@ -4188,8 +4290,9 @@ kernel_object_iopl_request( named_entry_unlock(named_entry); if (!object->private) { - if (*upl_size > MAX_UPL_TRANSFER_BYTES) + if (*upl_size > MAX_UPL_TRANSFER_BYTES) { *upl_size = MAX_UPL_TRANSFER_BYTES; + } if (object->phys_contiguous) { *flags = UPL_PHYS_CONTIG; } else { @@ -4200,12 +4303,12 @@ kernel_object_iopl_request( } ret = vm_object_iopl_request(object, - offset, - *upl_size, - upl_ptr, - user_page_list, - page_list_count, - (upl_control_flags_t)(unsigned int)caller_flags); + offset, + *upl_size, + upl_ptr, + user_page_list, + page_list_count, + (upl_control_flags_t)(unsigned int)caller_flags); vm_object_deallocate(object); return ret; } @@ -4220,103 +4323,103 @@ kernel_object_iopl_request( kern_return_t mach_vm_map( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t initial_size, - mach_vm_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); kern_return_t mach_vm_remap( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, - vm_map_t src_map, - mach_vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance); kern_return_t mach_vm_map( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t initial_size, - mach_vm_offset_t mask, - int flags, - ipc_port_t port, - vm_object_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t initial_size, + mach_vm_offset_t mask, + int flags, + ipc_port_t port, + vm_object_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { - return (mach_vm_map_external(target_map, address, initial_size, mask, flags, port, - offset, copy, cur_protection, max_protection, inheritance)); + return mach_vm_map_external(target_map, address, initial_size, mask, flags, port, + offset, copy, cur_protection, max_protection, inheritance); } kern_return_t mach_vm_remap( - vm_map_t target_map, - mach_vm_offset_t *address, - mach_vm_size_t size, - mach_vm_offset_t mask, - int flags, - vm_map_t src_map, - mach_vm_offset_t memory_address, - boolean_t copy, - vm_prot_t *cur_protection, - vm_prot_t *max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + mach_vm_offset_t *address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_map, + mach_vm_offset_t memory_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance) { - return (mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address, - copy, cur_protection, max_protection, inheritance)); + return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address, + copy, cur_protection, max_protection, inheritance); } kern_return_t vm_map( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - ipc_port_t port, - vm_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance); + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance); kern_return_t vm_map( - vm_map_t target_map, - vm_offset_t *address, - vm_size_t size, - vm_offset_t mask, - int flags, - ipc_port_t port, - vm_offset_t offset, - boolean_t copy, - vm_prot_t cur_protection, - vm_prot_t max_protection, - vm_inherit_t inheritance) + vm_map_t target_map, + vm_offset_t *address, + vm_size_t size, + vm_offset_t mask, + int flags, + ipc_port_t port, + vm_offset_t offset, + boolean_t copy, + vm_prot_t cur_protection, + vm_prot_t max_protection, + vm_inherit_t inheritance) { vm_tag_t tag; VM_GET_FLAGS_ALIAS(flags, tag); return vm_map_kernel(target_map, address, size, mask, - flags, VM_MAP_KERNEL_FLAGS_NONE, tag, - port, offset, copy, - cur_protection, max_protection, inheritance); + flags, VM_MAP_KERNEL_FLAGS_NONE, tag, + port, offset, copy, + cur_protection, max_protection, inheritance); } #endif /* __x86_64__ */ diff --git a/osfmk/voucher/ipc_pthread_priority.c b/osfmk/voucher/ipc_pthread_priority.c index c9cd2e807..28d3f54b3 100644 --- a/osfmk/voucher/ipc_pthread_priority.c +++ b/osfmk/voucher/ipc_pthread_priority.c @@ -101,7 +101,7 @@ struct ipc_voucher_attr_manager ipc_pthread_priority_manager = { .ivam_release_value = ipc_pthread_priority_release_value, .ivam_get_value = ipc_pthread_priority_get_value, .ivam_extract_content = ipc_pthread_priority_extract_content, - .ivam_command = ipc_pthread_priority_command, + .ivam_command = ipc_pthread_priority_command, .ivam_release = ipc_pthread_priority_release, .ivam_flags = IVAM_FLAGS_NONE, }; @@ -118,15 +118,16 @@ ipc_pthread_priority_init() /* Register the ipc_pthread_priority manager with the Vouchers sub system. */ kr = ipc_register_well_known_mach_voucher_attr_manager( - &ipc_pthread_priority_manager, - 0, - MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, - &ipc_pthread_priority_voucher_attr_control); - if (kr != KERN_SUCCESS ) + &ipc_pthread_priority_manager, + 0, + MACH_VOUCHER_ATTR_KEY_PTHPRIORITY, + &ipc_pthread_priority_voucher_attr_control); + if (kr != KERN_SUCCESS) { panic("IPC_PTHREAD_PRIORITY subsystem initialization failed"); + } kprintf("IPC_PTHREAD_PRIORITY subsystem is initialized\n"); - return ; + return; } /* @@ -142,10 +143,10 @@ ipc_pthread_priority_init() */ kern_return_t ipc_pthread_priority_release_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_t value, - mach_voucher_attr_value_reference_t sync) + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_t value, + mach_voucher_attr_value_reference_t sync) { assert(MACH_VOUCHER_ATTR_KEY_PTHPRIORITY == key); assert(manager == &ipc_pthread_priority_manager); @@ -161,16 +162,16 @@ ipc_pthread_priority_release_value( */ kern_return_t ipc_pthread_priority_get_value( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_recipe_command_t command, - mach_voucher_attr_value_handle_array_t __unused prev_values, - mach_msg_type_number_t __unused prev_value_count, - mach_voucher_attr_content_t recipe, - mach_voucher_attr_content_size_t recipe_size, + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_recipe_command_t command, + mach_voucher_attr_value_handle_array_t __unused prev_values, + mach_msg_type_number_t __unused prev_value_count, + mach_voucher_attr_content_t recipe, + mach_voucher_attr_content_size_t recipe_size, mach_voucher_attr_value_handle_t *out_value, mach_voucher_attr_value_flags_t *out_flags, - ipc_voucher_t *out_value_voucher) + ipc_voucher_t *out_value_voucher) { kern_return_t kr = KERN_SUCCESS; ipc_pthread_priority_value_t ipc_pthread_priority_value; @@ -184,7 +185,6 @@ ipc_pthread_priority_get_value( *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_NONE; switch (command) { - case MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE: if (recipe_size != sizeof(ipc_pthread_priority_value_t)) { @@ -200,7 +200,7 @@ ipc_pthread_priority_get_value( /* Callout to pthread kext to get the canonicalized value */ canonicalize_priority_value = (ipc_pthread_priority_value_t) - _pthread_priority_normalize_for_ipc((unsigned long)ipc_pthread_priority_value); + _pthread_priority_normalize_for_ipc((unsigned long)ipc_pthread_priority_value); *out_value = IPC_PTHREAD_PRIORITY_VALUE_TO_HANDLE(canonicalize_priority_value); *out_flags = MACH_VOUCHER_ATTR_VALUE_FLAGS_PERSIST; @@ -225,7 +225,7 @@ ipc_pthread_priority_extract_content( ipc_voucher_attr_manager_t __assert_only manager, mach_voucher_attr_key_t __assert_only key, mach_voucher_attr_value_handle_array_t values, - mach_msg_type_number_t value_count, + mach_msg_type_number_t value_count, mach_voucher_attr_recipe_command_t *out_command, mach_voucher_attr_content_t out_recipe, mach_voucher_attr_content_size_t *in_out_recipe_size) @@ -237,7 +237,7 @@ ipc_pthread_priority_extract_content( assert(MACH_VOUCHER_ATTR_KEY_PTHPRIORITY == key); assert(manager == &ipc_pthread_priority_manager); - for (i = 0; i < value_count; i++) { + for (i = 0; i < value_count && *in_out_recipe_size > 0; i++) { ipc_pthread_priority_value = HANDLE_TO_IPC_PTHREAD_PRIORITY_VALUE(values[i]); if (ipc_pthread_priority_value == PTHPRIORITY_ATTR_DEFAULT_VALUE) { @@ -267,14 +267,14 @@ ipc_pthread_priority_extract_content( */ kern_return_t ipc_pthread_priority_command( - ipc_voucher_attr_manager_t __assert_only manager, - mach_voucher_attr_key_t __assert_only key, - mach_voucher_attr_value_handle_array_t __unused values, - mach_msg_type_number_t __unused value_count, - mach_voucher_attr_command_t __unused command, - mach_voucher_attr_content_t __unused in_content, + ipc_voucher_attr_manager_t __assert_only manager, + mach_voucher_attr_key_t __assert_only key, + mach_voucher_attr_value_handle_array_t __unused values, + mach_msg_type_number_t __unused value_count, + mach_voucher_attr_command_t __unused command, + mach_voucher_attr_content_t __unused in_content, mach_voucher_attr_content_size_t __unused in_content_size, - mach_voucher_attr_content_t __unused out_content, + mach_voucher_attr_content_t __unused out_content, mach_voucher_attr_content_size_t __unused *out_content_size) { assert(MACH_VOUCHER_ATTR_KEY_PTHPRIORITY == key); @@ -285,7 +285,7 @@ ipc_pthread_priority_command( void ipc_pthread_priority_release( - ipc_voucher_attr_manager_t __assert_only manager) + ipc_voucher_attr_manager_t __assert_only manager) { assert(manager == &ipc_pthread_priority_manager); } diff --git a/osfmk/voucher/ipc_pthread_priority_internal.h b/osfmk/voucher/ipc_pthread_priority_internal.h index c399046ff..c7e9977c9 100644 --- a/osfmk/voucher/ipc_pthread_priority_internal.h +++ b/osfmk/voucher/ipc_pthread_priority_internal.h @@ -35,4 +35,3 @@ extern void ipc_pthread_priority_init(void); #endif /* _VOUCHER_IPC_PTHREAD_PRIORITY_INTERNAL_H_ */ - diff --git a/osfmk/voucher/ipc_pthread_priority_types.h b/osfmk/voucher/ipc_pthread_priority_types.h index f59d06e9b..00b018769 100644 --- a/osfmk/voucher/ipc_pthread_priority_types.h +++ b/osfmk/voucher/ipc_pthread_priority_types.h @@ -32,8 +32,8 @@ #include #include -#define MACH_VOUCHER_ATTR_PTHPRIORITY_NULL ((mach_voucher_attr_recipe_command_t)701) -#define MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE ((mach_voucher_attr_recipe_command_t)710) +#define MACH_VOUCHER_ATTR_PTHPRIORITY_NULL ((mach_voucher_attr_recipe_command_t)701) +#define MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE ((mach_voucher_attr_recipe_command_t)710) typedef uint32_t ipc_pthread_priority_value_t; diff --git a/osfmk/x86_64/Makefile b/osfmk/x86_64/Makefile index 91dc2251e..90edf3c1c 100644 --- a/osfmk/x86_64/Makefile +++ b/osfmk/x86_64/Makefile @@ -6,7 +6,8 @@ export MakeInc_dir=${SRCROOT}/makedefs/MakeInc.dir include $(MakeInc_cmd) include $(MakeInc_def) -EXPORT_FILES = +EXPORT_FILES = \ + machine_remote_time.h EXPORT_ONLY_FILES = \ diff --git a/osfmk/x86_64/bcopy.s b/osfmk/x86_64/bcopy.s index 3ed02246f..78c45d2e9 100644 --- a/osfmk/x86_64/bcopy.s +++ b/osfmk/x86_64/bcopy.s @@ -62,18 +62,12 @@ /* rdi, rsi, rdx */ /* * Note: memcpy does not support overlapping copies - */ - + */ ENTRY(memcpy) movq %rdi, %rax /* return destination */ movq %rdx,%rcx - shrq $3,%rcx /* copy by 64-bit words */ cld /* copy forwards */ rep - movsq - movq %rdx,%rcx - andq $7,%rcx /* any bytes left? */ - rep movsb ret @@ -98,13 +92,8 @@ ENTRY(bcopy) cmpq %rcx,%rax /* overlapping && src < dst? */ jb 1f - shrq $3,%rcx /* copy by 64-bit words */ cld /* nope, copy forwards */ rep - movsq - movq %rdx,%rcx - andq $7,%rcx /* any bytes left? */ - rep movsb ret diff --git a/osfmk/x86_64/boot_pt.c b/osfmk/x86_64/boot_pt.c index 392c3c1f7..538622bef 100644 --- a/osfmk/x86_64/boot_pt.c +++ b/osfmk/x86_64/boot_pt.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -37,26 +37,26 @@ * * These tables are statically-defined as physical-zero-based. * Startup code in start.s rebases these according to the actual physical - * base address. + * base address. */ /* * NB: This must be located at the kernel's base address! */ #define PML4_PROT (INTEL_PTE_VALID | INTEL_PTE_WRITE) -pml4_entry_t BootPML4[PTE_PER_PAGE] - __attribute__((section("__HIB, __bootPT"))) = { - [0] = ((uint64_t)(PAGE_SIZE) | PML4_PROT), - [KERNEL_PML4_INDEX] = ((uint64_t)(PAGE_SIZE) | PML4_PROT), +pml4_entry_t BootPML4[PTE_PER_PAGE] +__attribute__((section("__HIB, __bootPT"))) = { + [0] = ((uint64_t)(PAGE_SIZE) | PML4_PROT), + [KERNEL_PML4_INDEX] = ((uint64_t)(PAGE_SIZE) | PML4_PROT), }; #define PDPT_PROT (INTEL_PTE_VALID | INTEL_PTE_WRITE) -pdpt_entry_t BootPDPT[PTE_PER_PAGE] - __attribute__((section("__HIB, __bootPT"))) = { - [0] = ((uint64_t)(2*PAGE_SIZE) | PDPT_PROT), - [1] = ((uint64_t)(3*PAGE_SIZE) | PDPT_PROT), - [2] = ((uint64_t)(4*PAGE_SIZE) | PDPT_PROT), - [3] = ((uint64_t)(5*PAGE_SIZE) | PDPT_PROT), +pdpt_entry_t BootPDPT[PTE_PER_PAGE] +__attribute__((section("__HIB, __bootPT"))) = { + [0] = ((uint64_t)(2 * PAGE_SIZE) | PDPT_PROT), + [1] = ((uint64_t)(3 * PAGE_SIZE) | PDPT_PROT), + [2] = ((uint64_t)(4 * PAGE_SIZE) | PDPT_PROT), + [3] = ((uint64_t)(5 * PAGE_SIZE) | PDPT_PROT), }; #if NPGPTD != 4 @@ -66,25 +66,25 @@ pdpt_entry_t BootPDPT[PTE_PER_PAGE] #if MACHINE_BOOTSTRAPPTD #define PDT_PROT (INTEL_PTE_PS | INTEL_PTE_VALID | INTEL_PTE_WRITE) -#define ID_MAP_2MEG(x) [(x)] = ((((uint64_t)(x)) << 21) | (PDT_PROT)), +#define ID_MAP_2MEG(x) [(x)] = ((((uint64_t)(x)) << 21) | (PDT_PROT)), -#define L0(x,n) x(n) -#define L1(x,n) L0(x,n-1) L0(x,n) -#define L2(x,n) L1(x,n-2) L1(x,n) -#define L3(x,n) L2(x,n-4) L2(x,n) -#define L4(x,n) L3(x,n-8) L3(x,n) -#define L5(x,n) L4(x,n-16) L4(x,n) -#define L6(x,n) L5(x,n-32) L5(x,n) -#define L7(x,n) L6(x,n-64) L6(x,n) -#define L8(x,n) L7(x,n-128) L7(x,n) -#define L9(x,n) L8(x,n-256) L8(x,n) -#define L10(x,n) L9(x,n-512) L9(x,n) -#define L11(x,n) L10(x,n-1024) L10(x,n) +#define L0(x, n) x(n) +#define L1(x, n) L0(x,n-1) L0(x,n) +#define L2(x, n) L1(x,n-2) L1(x,n) +#define L3(x, n) L2(x,n-4) L2(x,n) +#define L4(x, n) L3(x,n-8) L3(x,n) +#define L5(x, n) L4(x,n-16) L4(x,n) +#define L6(x, n) L5(x,n-32) L5(x,n) +#define L7(x, n) L6(x,n-64) L6(x,n) +#define L8(x, n) L7(x,n-128) L7(x,n) +#define L9(x, n) L8(x,n-256) L8(x,n) +#define L10(x, n) L9(x,n-512) L9(x,n) +#define L11(x, n) L10(x,n-1024) L10(x,n) #define FOR_0_TO_2047(x) L11(x,2047) -pd_entry_t BootPTD[2048] - __attribute__((section("__HIB, __bootPT"))) = { +pd_entry_t BootPTD[2048] +__attribute__((section("__HIB, __bootPT"))) = { FOR_0_TO_2047(ID_MAP_2MEG) }; #endif /* MACHINE_BOOTSTRAPPTD */ diff --git a/osfmk/x86_64/bzero.s b/osfmk/x86_64/bzero.s index ccaf05f1c..90e4a296a 100644 --- a/osfmk/x86_64/bzero.s +++ b/osfmk/x86_64/bzero.s @@ -68,24 +68,12 @@ ENTRY(secure_memset) /* * void *memset(void * addr, int pattern, size_t length) */ - +/* TODO: add variants for use with non-cacheable ranges */ ENTRY(memset) movq %rdi, %r8 movq %rsi, %rax /* move pattern (arg2) to rax */ - movb %al,%ah /* fill out pattern */ - movw %ax,%cx - shll $16,%eax - movw %cx,%ax - mov %eax, %ecx - shlq $32,%rax - orq %rcx, %rax - cld /* reset direction flag */ - movq %rdx, %rcx /* mov quads first */ - shrq $3, %rcx - rep - stosq - movq %rdx,%rcx /* mov bytes */ - andq $7,%rcx + movq %rdx, %rcx /* mov bytes */ + cld /* reset direction flag */ rep stosb movq %r8 ,%rax /* returns its first argument */ @@ -119,13 +107,8 @@ ENTRY(memset_word) Entry(blkclr) ENTRY2(bzero,__bzero) movq %rsi,%rcx - xorq %rax,%rax - shrq $3,%rcx + xor %eax,%eax cld rep - stosq - movq %rsi,%rcx - andq $7,%rcx - rep stosb ret diff --git a/osfmk/x86_64/copyio.c b/osfmk/x86_64/copyio.c index aae293b6c..557fae0ec 100644 --- a/osfmk/x86_64/copyio.c +++ b/osfmk/x86_64/copyio.c @@ -2,7 +2,7 @@ * Copyright (c) 2009-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -87,26 +87,26 @@ extern boolean_t copyio_zalloc_check; /* * Types of copies: */ -#define COPYIN 0 /* from user virtual to kernel virtual */ -#define COPYOUT 1 /* from kernel virtual to user virtual */ -#define COPYINSTR 2 /* string variant of copyout */ -#define COPYINPHYS 3 /* from user virtual to kernel physical */ -#define COPYOUTPHYS 4 /* from kernel physical to user virtual */ -#define COPYINWORD 5 /* from user virtual to kernel virtual */ +#define COPYIN 0 /* from user virtual to kernel virtual */ +#define COPYOUT 1 /* from kernel virtual to user virtual */ +#define COPYINSTR 2 /* string variant of copyout */ +#define COPYINPHYS 3 /* from user virtual to kernel physical */ +#define COPYOUTPHYS 4 /* from kernel physical to user virtual */ +#define COPYINWORD 5 /* from user virtual to kernel virtual */ #if ENABLE_SMAPLOG typedef struct { - uint64_t timestamp; - thread_t thread; - uintptr_t cr4; - uint8_t cpuid; - uint8_t smap_state; - uint8_t copyio_active; + uint64_t timestamp; + thread_t thread; + uintptr_t cr4; + uint8_t cpuid; + uint8_t smap_state; + uint8_t copyio_active; } smaplog_entry_t; #define SMAPLOG_BUFFER_SIZE (50) -static smaplog_entry_t smaplog_cbuf[SMAPLOG_BUFFER_SIZE]; -static uint32_t smaplog_head = 0; +static smaplog_entry_t smaplog_cbuf[SMAPLOG_BUFFER_SIZE]; +static uint32_t smaplog_head = 0; static void smaplog_add_entry(boolean_t enabling) @@ -132,7 +132,9 @@ smaplog_add_entry(boolean_t enabling) #endif /* ENABLE_SMAPLOG */ extern boolean_t pmap_smap_enabled; -static inline void user_access_enable(void) { +static inline void +user_access_enable(void) +{ if (pmap_smap_enabled) { stac(); #if ENABLE_SMAPLOG @@ -140,7 +142,9 @@ static inline void user_access_enable(void) { #endif } } -static inline void user_access_disable(void) { +static inline void +user_access_disable(void) +{ if (pmap_smap_enabled) { clac(); #if ENABLE_SMAPLOG @@ -157,47 +161,51 @@ static inline void user_access_disable(void) { static int copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, - vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map) + vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map) { - thread_t thread = current_thread(); - pmap_t pmap; - vm_size_t bytes_copied; - int error = 0; - boolean_t istate = FALSE; - boolean_t recursive_CopyIOActive; -#if COPYIO_TRACE_ENABLED - int debug_type = 0xeff70010; + thread_t thread = current_thread(); + pmap_t pmap; + vm_size_t bytes_copied; + int error = 0; + boolean_t istate = FALSE; + boolean_t recursive_CopyIOActive; +#if COPYIO_TRACE_ENABLED + int debug_type = 0xeff70010; debug_type += (copy_type << 2); #endif vm_size_t kernel_buf_size = 0; - if (__improbable(nbytes > copysize_limit_panic)) + if (__improbable(nbytes > copysize_limit_panic)) { panic("%s(%p, %p, %lu) - transfer too large", __func__, - (void *)user_addr, (void *)kernel_addr, nbytes); + (void *)user_addr, (void *)kernel_addr, nbytes); + } COPYIO_TRACE(debug_type | DBG_FUNC_START, user_addr, kernel_addr, nbytes, use_kernel_map, 0); - if (__improbable(nbytes == 0)) + if (__improbable(nbytes == 0)) { goto out; + } pmap = thread->map->pmap; boolean_t nopagezero = thread->map->pmap->pagezero_accessible; if ((copy_type != COPYINPHYS) && (copy_type != COPYOUTPHYS)) { - if (__improbable((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) + if (__improbable((vm_offset_t)kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS)) { panic("Invalid copy parameter, copy type: %d, kernel address: %p", copy_type, kernel_addr); + } if (__probable(copyio_zalloc_check)) { kernel_buf_size = zone_element_size(kernel_addr, NULL); - if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) + if (__improbable(kernel_buf_size && kernel_buf_size < nbytes)) { panic("copyio: kernel buffer %p has size %lu < nbytes %lu", kernel_addr, kernel_buf_size, nbytes); + } } } - + /* Sanity and security check for addresses to/from a user */ if (__improbable(((pmap != kernel_pmap) && (use_kernel_map == 0)) && - ((nbytes && (user_addr+nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map))))) { + ((nbytes && (user_addr + nbytes <= user_addr)) || ((user_addr + nbytes) > vm_map_max(thread->map))))) { error = EFAULT; goto out; } @@ -211,7 +219,7 @@ copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, #endif /* - * If the no_shared_cr3 boot-arg is set (true), the kernel runs on + * If the no_shared_cr3 boot-arg is set (true), the kernel runs on * its own pmap and cr3 rather than the user's -- so that wild accesses * from kernel or kexts can be trapped. So, during copyin and copyout, * we need to switch back to the user's map/cr3. The thread is flagged @@ -236,15 +244,15 @@ copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, user_access_enable(); -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG /* * Ensure that we're running on the target thread's cr3. */ if ((pmap != kernel_pmap) && !use_kernel_map && (get_cr3_base() != pmap->pm_cr3)) { panic("copyio(%d,%p,%p,%ld,%p,%d) cr3 is %p expects %p", - copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map, - (void *) get_cr3_raw(), (void *) pmap->pm_cr3); + copy_type, (void *)user_addr, kernel_addr, nbytes, lencopied, use_kernel_map, + (void *) get_cr3_raw(), (void *) pmap->pm_cr3); } #endif @@ -253,76 +261,76 @@ copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, } COPYIO_TRACE(0xeff70044 | DBG_FUNC_NONE, user_addr, - kernel_addr, nbytes, 0, 0); - - switch (copy_type) { + kernel_addr, nbytes, 0, 0); + switch (copy_type) { case COPYIN: - error = _bcopy((const void *) user_addr, - kernel_addr, - nbytes); + error = _bcopy((const void *) user_addr, + kernel_addr, + nbytes); break; - + case COPYOUT: - error = _bcopy(kernel_addr, - (void *) user_addr, - nbytes); + error = _bcopy(kernel_addr, + (void *) user_addr, + nbytes); break; case COPYINPHYS: - error = _bcopy((const void *) user_addr, - PHYSMAP_PTOV(kernel_addr), - nbytes); + error = _bcopy((const void *) user_addr, + PHYSMAP_PTOV(kernel_addr), + nbytes); break; case COPYOUTPHYS: - error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr), - (void *) user_addr, - nbytes); + error = _bcopy((const void *) PHYSMAP_PTOV(kernel_addr), + (void *) user_addr, + nbytes); break; case COPYINWORD: error = _copyin_word((const void *) user_addr, - (void *) kernel_addr, - nbytes); + (void *) kernel_addr, + nbytes); break; case COPYINSTR: - error = _bcopystr((const void *) user_addr, - kernel_addr, - (int) nbytes, - &bytes_copied); + error = _bcopystr((const void *) user_addr, + kernel_addr, + (int) nbytes, + &bytes_copied); /* * lencopied should be updated on success * or ENAMETOOLONG... but not EFAULT */ - if (error != EFAULT) - *lencopied = bytes_copied; + if (error != EFAULT) { + *lencopied = bytes_copied; + } if (error) { #if KDEBUG - nbytes = *lencopied; + nbytes = *lencopied; #endif - break; + break; } if (*(kernel_addr + bytes_copied - 1) == 0) { - /* + /* * we found a NULL terminator... we're done */ #if KDEBUG - nbytes = *lencopied; + nbytes = *lencopied; #endif break; } else { - /* + /* * no more room in the buffer and we haven't * yet come across a NULL terminator */ #if KDEBUG - nbytes = *lencopied; + nbytes = *lencopied; #endif - error = ENAMETOOLONG; + error = ENAMETOOLONG; break; } } @@ -331,7 +339,7 @@ copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, if (__improbable(pdswitch)) { istate = ml_set_interrupts_enabled(FALSE); - if (!recursive_CopyIOActive && (get_cr3_raw() != kernel_pmap->pm_cr3)) { + if (!recursive_CopyIOActive && (get_cr3_raw() != kernel_pmap->pm_cr3)) { if (nopagezero && pmap_pcid_ncpus) { pmap_pcid_activate(pmap, cpu_number(), TRUE, FALSE); } else { @@ -350,26 +358,26 @@ copyio(int copy_type, user_addr_t user_addr, char *kernel_addr, out: COPYIO_TRACE(debug_type | DBG_FUNC_END, user_addr, kernel_addr, nbytes, error, 0); - return (error); + return error; } static int copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which) { - char *paddr; + char *paddr; user_addr_t vaddr; int ctype; if (which & cppvPsnk) { paddr = (char *)sink; - vaddr = (user_addr_t)source; + vaddr = (user_addr_t)source; ctype = COPYINPHYS; } else { - paddr = (char *)source; + paddr = (char *)source; vaddr = (user_addr_t)sink; ctype = COPYOUTPHYS; - CALL_COPYOUT_SHIM_PHYS((void *)PHYSMAP_PTOV(source),sink,csize) + CALL_COPYOUT_SHIM_PHYS((void *)PHYSMAP_PTOV(source), sink, csize) } return copyio(ctype, vaddr, paddr, csize, NULL, which & cppvKmap); } @@ -377,13 +385,13 @@ copyio_phys(addr64_t source, addr64_t sink, vm_size_t csize, int which) int copyinmsg(const user_addr_t user_addr, char *kernel_addr, mach_msg_size_t nbytes) { - return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0); -} + return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0); +} int copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes) { - return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0); + return copyio(COPYIN, user_addr, kernel_addr, nbytes, NULL, 0); } /* @@ -395,35 +403,37 @@ int copyin_word(const user_addr_t user_addr, uint64_t *kernel_addr, vm_size_t nbytes) { /* Verify sizes */ - if ((nbytes != 4) && (nbytes != 8)) + if ((nbytes != 4) && (nbytes != 8)) { return EINVAL; + } /* Test alignment */ - if (user_addr & (nbytes - 1)) + if (user_addr & (nbytes - 1)) { return EINVAL; + } return copyio(COPYINWORD, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0); } int -copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied) +copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied) { - *lencopied = 0; + *lencopied = 0; - return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0); + return copyio(COPYINSTR, user_addr, kernel_addr, nbytes, lencopied, 0); } int copyoutmsg(const char *kernel_addr, user_addr_t user_addr, mach_msg_size_t nbytes) { - CALL_COPYOUT_SHIM_MSG(kernel_addr,user_addr,(vm_size_t)nbytes) - return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0); + CALL_COPYOUT_SHIM_MSG(kernel_addr, user_addr, (vm_size_t)nbytes) + return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0); } int copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes) { - CALL_COPYOUT_SHIM_NRML(kernel_addr,user_addr,nbytes) - return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0); + CALL_COPYOUT_SHIM_NRML(kernel_addr, user_addr, nbytes) + return copyio(COPYOUT, user_addr, (char *)(uintptr_t)kernel_addr, nbytes, NULL, 0); } @@ -432,68 +442,75 @@ copypv(addr64_t src64, addr64_t snk64, unsigned int size, int which) { unsigned int lop, csize; int bothphys = 0; - - KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64, - (unsigned)snk64, size, which, 0); - - if ((which & (cppvPsrc | cppvPsnk)) == 0 ) /* Make sure that only one is virtual */ - panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ - if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk)) - bothphys = 1; /* both are physical */ + KERNEL_DEBUG(0xeff7004c | DBG_FUNC_START, (unsigned)src64, + (unsigned)snk64, size, which, 0); + if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */ + panic("copypv: no more than 1 parameter may be virtual\n"); /* Not allowed */ + } + if ((which & (cppvPsrc | cppvPsnk)) == (cppvPsrc | cppvPsnk)) { + bothphys = 1; /* both are physical */ + } while (size) { - - if (bothphys) { - lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */ + if (bothphys) { + lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); /* Assume sink smallest */ - if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)))) - lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */ + if (lop > (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1)))) { + lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); /* No, source is smaller */ + } } else { - /* + /* * only need to compute the resid for the physical page * address... we don't care about where we start/finish in * the virtual since we just call the normal copyin/copyout */ - if (which & cppvPsrc) - lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); - else - lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); + if (which & cppvPsrc) { + lop = (unsigned int)(PAGE_SIZE - (src64 & (PAGE_SIZE - 1))); + } else { + lop = (unsigned int)(PAGE_SIZE - (snk64 & (PAGE_SIZE - 1))); + } } - csize = size; /* Assume we can copy it all */ - if (lop < size) - csize = lop; /* Nope, we can't do it all */ -#if 0 + csize = size; /* Assume we can copy it all */ + if (lop < size) { + csize = lop; /* Nope, we can't do it all */ + } +#if 0 /* - * flush_dcache64 is currently a nop on the i386... + * flush_dcache64 is currently a nop on the i386... * it's used when copying to non-system memory such * as video capture cards... on PPC there was a need * to flush due to how we mapped this memory... not * sure if it's needed on i386. */ - if (which & cppvFsrc) - flush_dcache64(src64, csize, 1); /* If requested, flush source before move */ - if (which & cppvFsnk) - flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */ + if (which & cppvFsrc) { + flush_dcache64(src64, csize, 1); /* If requested, flush source before move */ + } + if (which & cppvFsnk) { + flush_dcache64(snk64, csize, 1); /* If requested, flush sink before move */ + } #endif - if (bothphys) - bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */ - else { - if (copyio_phys(src64, snk64, csize, which)) - return (KERN_FAILURE); + if (bothphys) { + bcopy_phys(src64, snk64, csize); /* Do a physical copy, virtually */ + } else { + if (copyio_phys(src64, snk64, csize, which)) { + return KERN_FAILURE; + } } #if 0 - if (which & cppvFsrc) - flush_dcache64(src64, csize, 1); /* If requested, flush source after move */ - if (which & cppvFsnk) - flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */ + if (which & cppvFsrc) { + flush_dcache64(src64, csize, 1); /* If requested, flush source after move */ + } + if (which & cppvFsnk) { + flush_dcache64(snk64, csize, 1); /* If requested, flush sink after move */ + } #endif - size -= csize; /* Calculate what is left */ - snk64 += csize; /* Bump sink to next physical address */ - src64 += csize; /* Bump source to next physical address */ + size -= csize; /* Calculate what is left */ + snk64 += csize; /* Bump sink to next physical address */ + src64 += csize; /* Bump source to next physical address */ } KERNEL_DEBUG(0xeff7004c | DBG_FUNC_END, (unsigned)src64, - (unsigned)snk64, size, which, 0); + (unsigned)snk64, size, which, 0); return KERN_SUCCESS; } diff --git a/osfmk/x86_64/idt64.s b/osfmk/x86_64/idt64.s index b1a1ada4c..d17bb5bb7 100644 --- a/osfmk/x86_64/idt64.s +++ b/osfmk/x86_64/idt64.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Apple Inc. All rights reserved. + * Copyright (c) 2010-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -199,7 +199,8 @@ Entry(idt64_debug) leaq EXT(idt64_hndl_table0)(%rip), %rax mov 16(%rax), %rax /* Offset of per-CPU shadow */ - mov %gs:CPU_TASK_CR3(%rax), %rax + + mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax mov %rax, %cr3 pop %rcx @@ -249,12 +250,81 @@ Entry(idt64_mdep_scall) Entry(idt64_gen_prot) pushq $(HNDL_ALLTRAPS) pushq $(T_GENERAL_PROTECTION) - jmp L_dispatch + jmp L_check_for_kern_flt Entry(idt64_stack_fault) pushq $(HNDL_ALLTRAPS) pushq $(T_STACK_FAULT) - jmp L_dispatch + jmp L_check_for_kern_flt + +L_check_for_kern_flt: + /* + * If we took a #GP or #SS from the kernel, check if we took them + * from either ret32_iret or ret64_iret. If we did, we need to + * jump into L_dispatch at the swapgs so that the code in L_dispatch + * can proceed with the correct GSbase. + */ + pushq %rax + testb $3, 8+ISF64_CS(%rsp) + jnz L_dispatch_from_user_no_push_rax /* Fault from user, go straight to dispatch */ + leaq EXT(ret32_iret)(%rip), %rax + cmpq %rax, 8+ISF64_RIP(%rsp) + je 1f + leaq EXT(ret64_iret)(%rip), %rax + cmpq %rax, 8+ISF64_RIP(%rsp) + je 1f + jmp L_dispatch_from_kernel_no_push_rax + /* + * We hit the fault on iretq, so check the original return %cs. If + * it's a user %cs, fixup the stack and then jump to dispatch.. + * + * With this type of fault, the stack is layed-out as follows: + * + * + * orig %ss saved_rsp+32 + * orig %rsp saved_rsp+24 + * orig %rflags saved_rsp+16 + * orig %cs saved_rsp+8 + * orig %rip saved_rsp + * ^^^^^^^^^ (maybe on another stack, since we switched to IST1) + * %ss +64 -8 + * saved_rsp +56 -16 + * %rflags +48 -24 + * %cs +40 -32 + * %rip +32 -40 + * error code +24 -48 + * hander +16 -56 + * trap number +8 -64 + * <== %rsp -72 + */ +1: + pushq %rbx + movq 16+ISF64_RSP(%rsp), %rbx + movq ISF64_CS-24(%rbx), %rax + testb $3, %al /* If the original return destination was to user */ + jnz 2f + popq %rbx + jmp L_dispatch_from_kernel_no_push_rax /* Fault occurred when trying to return to kernel */ +2: + /* + * Fix the stack so the original trap frame is current, then jump to dispatch + */ + movq %rax, 16+ISF64_CS(%rsp) + + movq ISF64_RSP-24(%rbx), %rax + movq %rax, 16+ISF64_RSP(%rsp) + + movq ISF64_RIP-24(%rbx), %rax + movq %rax, 16+ISF64_RIP(%rsp) + + movq ISF64_SS-24(%rbx), %rax + movq %rax, 16+ISF64_SS(%rsp) + + movq ISF64_RFLAGS-24(%rbx), %rax + movq %rax, 16+ISF64_RFLAGS(%rsp) + + popq %rbx + jmp L_dispatch_from_user_no_push_rax Entry(idt64_segnp) pushq $(HNDL_ALLTRAPS) @@ -300,7 +370,7 @@ Entry(idt64_nmi) leaq EXT(idt64_hndl_table0)(%rip), %rax mov 16(%rax), %rax /* Offset of per-CPU shadow */ - mov %gs:CPU_TASK_CR3(%rax), %rax + mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax mov %rax, %cr3 /* note that SMAP is enabled in L_common_dispatch (on Broadwell+) */ mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */ @@ -326,7 +396,7 @@ Entry(idt64_nmi) leaq EXT(idt64_hndl_table0)(%rip), %rax mov 16(%rax), %rax /* Offset of per-CPU shadow */ mov %cr3, %rdx - mov %gs:CPU_TASK_CR3(%rax), %rax + mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax mov %rax, %cr3 /* Unconditionally switch to primary kernel pagetables */ /* @@ -463,15 +533,17 @@ L_dispatch: pushq %rax testb $3, 8+ISF64_CS(%rsp) jz 1f +L_dispatch_from_user_no_push_rax: swapgs leaq EXT(idt64_hndl_table0)(%rip), %rax mov 16(%rax), %rax L_dispatch_kgsb: - mov %gs:CPU_TASK_CR3(%rax), %rax + mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax mov %rax, %cr3 #if DEBUG mov %rax, %gs:CPU_ENTRY_CR3 #endif +L_dispatch_from_kernel_no_push_rax: 1: leaq EXT(idt64_hndl_table0)(%rip), %rax /* The text/data relationship here must be preserved in the doublemap, and the contents must be remapped */ @@ -503,25 +575,77 @@ Entry(ks_64bit_return) push R64_CS(%r15) push R64_RIP(%r15) - mov R64_R15(%r15), %r15 cmpq $(KERNEL64_CS), 8(%rsp) - jz 1f + jne 1f /* Returning to user (%r15 will be restored after the segment checks) */ + mov R64_R15(%r15), %r15 + jmp L_64b_kernel_return /* Returning to kernel */ + +1: + push %rax /* [A] */ + movl %gs:CPU_NEED_SEGCHK, %eax + push %rax /* [B] */ + + /* Returning to user */ + cmpl $0, %gs:CPU_CURTASK_HAS_LDT /* If the current task has an LDT, check and restore segment regs */ + jne L_64b_segops_island + + /* + * Restore %r15, since we're now done accessing saved state + * and (%r15) won't be accessible after the %cr3 load anyway. + * Note that %r15 is restored below for the segment-restore + * case, just after we no longer need to access register state + * relative to %r15. + */ + mov R64_R15(%r15), %r15 + + /* + * Note that this %cr3 sequence is duplicated here to save + * [at least] a load and comparison that would be required if + * this block were shared. + */ /* Discover user cr3/ASID */ - push %rax mov %gs:CPU_UCR3, %rax #if DEBUG mov %rax, %gs:CPU_EXIT_CR3 #endif mov %rax, %cr3 /* Continue execution on the shared/doublemapped trampoline */ - pop %rax swapgs -1: - cmpl $(SYSCALL_CS), 8(%rsp) /* test for exit via SYSRET */ + +L_chk_sysret: + pop %rax /* Matched to [B], above (segchk required) */ + + /* + * At this point, the stack contains: + * + * +--------------+ + * | Return SS | +40 + * | Return RSP | +32 + * | Return RFL | +24 + * | Return CS | +16 + * | Return RIP | +8 + * | Saved RAX | <-- rsp + * +--------------+ + */ + cmpl $(SYSCALL_CS), 16(%rsp) /* test for exit via SYSRET */ je L_sysret + + cmpl $1, %eax + je L_verw_island_2 + + pop %rax /* Matched to [A], above */ + +L_64b_kernel_return: +.globl EXT(ret64_iret) EXT(ret64_iret): - iretq /* return from interrupt */ + iretq /* return from interrupt */ + + L_sysret: + cmpl $1, %eax + je L_verw_island_3 + + pop %rax /* Matched to [A], above */ /* * Here to restore rcx/r11/rsp and perform the sysret back to user-space. * rcx user rip @@ -532,7 +656,138 @@ L_sysret: add $8, %rsp pop %r11 pop %rsp - sysretq /* return from system call */ + sysretq /* return from system call */ + + +L_verw_island_2: + + pop %rax /* Matched to [A], above */ + verw 40(%rsp) /* verw operates on the %ss value already on the stack */ + jmp EXT(ret64_iret) + + +L_verw_island_3: + + pop %rax /* Matched to [A], above */ + + /* + * Here to restore rcx/r11/rsp and perform the sysret back to user-space. + * rcx user rip + * r11 user rflags + * rsp user stack pointer + */ + pop %rcx + add $8, %rsp + pop %r11 + verw 8(%rsp) /* verw operates on the %ss value already on the stack */ + pop %rsp + sysretq /* return from system call */ + + +L_64b_segops_island: + + /* Validate CS/DS/ES/FS/GS segment selectors with the Load Access Rights instruction prior to restoration */ + /* Exempt "known good" statically configured selectors, e.g. USER64_CS and 0 */ + cmpl $(USER64_CS), R64_CS(%r15) + jz 11f + larw R64_CS(%r15), %ax + jnz L_64_reset_cs + /* Ensure that the segment referenced by CS in the saved state is a code segment (bit 11 == 1) */ + testw $0x800, %ax + jz L_64_reset_cs /* Update stored %cs with known-good selector if ZF == 1 */ + jmp 11f +L_64_reset_cs: + movl $(USER64_CS), R64_CS(%r15) +11: + cmpl $0, R64_DS(%r15) + jz 22f + larw R64_DS(%r15), %ax + jz 22f + movl $0, R64_DS(%r15) +22: + cmpl $0, R64_ES(%r15) + jz 33f + larw R64_ES(%r15), %ax + jz 33f + movl $0, R64_ES(%r15) +33: + cmpl $0, R64_FS(%r15) + jz 44f + larw R64_FS(%r15), %ax + jz 44f + movl $0, R64_FS(%r15) +44: + cmpl $0, R64_GS(%r15) + jz 55f + larw R64_GS(%r15), %ax + jz 55f + movl $0, R64_GS(%r15) +55: + /* + * Pack the segment registers in %rax since (%r15) will not + * be accessible after the %cr3 switch. + * Only restore %gs if cthread_self is zero, (indicate + * this to the code below with a value of 0xffff) + */ + mov %gs:CPU_ACTIVE_THREAD, %rax /* Get the active thread */ + cmpq $0, TH_CTH_SELF(%rax) + je L_restore_gs + movw $0xFFFF, %ax + jmp 1f +L_restore_gs: + movw R64_GS(%r15), %ax +1: + shlq $16, %rax + movw R64_FS(%r15), %ax + shlq $16, %rax + movw R64_ES(%r15), %ax + shlq $16, %rax + movw R64_DS(%r15), %ax + + /* + * Restore %r15, since we're done accessing saved state + * and (%r15) won't be accessible after the %cr3 switch. + */ + mov R64_R15(%r15), %r15 + + /* Discover user cr3/ASID */ + push %rax + mov %gs:CPU_UCR3, %rax +#if DEBUG + mov %rax, %gs:CPU_EXIT_CR3 +#endif + mov %rax, %cr3 + /* Continue execution on the shared/doublemapped trampoline */ + pop %rax + swapgs + + /* + * Returning to user; restore segment registers that might be used + * by compatibility-mode code in a 64-bit user process. + * + * Note that if we take a fault here, it's OK that we haven't yet + * popped %rax from the stack, because %rsp will be reset to + * the value pushed onto the exception stack (above). + */ + movw %ax, %ds + shrq $16, %rax + + movw %ax, %es + shrq $16, %rax + + movw %ax, %fs + shrq $16, %rax + + /* + * 0xFFFF is the sentinel set above that indicates we should + * not restore %gs (because GS.base was already set elsewhere + * (e.g.: in act_machine_set_pcb or machine_thread_set_tsd_base)) + */ + cmpw $0xFFFF, %ax + je L_chk_sysret + movw %ax, %gs /* Restore %gs to user-set value */ + jmp L_chk_sysret + L_u64bit_entry_check: /* @@ -617,11 +872,20 @@ L_dispatch_64bit: movl $(SS_64), SS_FLAVOR(%r15) /* - * Save segment regs - for completeness since theyre not used. + * Save segment regs if a 64-bit task has + * installed customized segments in the LDT */ + cmpl $0, %gs:CPU_CURTASK_HAS_LDT + je L_skip_save_extra_segregs + + mov %ds, R64_DS(%r15) + mov %es, R64_ES(%r15) + +L_skip_save_extra_segregs: mov %fs, R64_FS(%r15) mov %gs, R64_GS(%r15) + /* Save general-purpose registers */ mov %rax, R64_RAX(%r15) mov %rbx, R64_RBX(%r15) @@ -653,6 +917,7 @@ L_dispatch_64bit: mov %cr2, %rax mov %rax, R64_CR2(%r15) +L_dispatch_U64_after_fault: mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */ mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */ mov R64_CS(%r15), %esi /* %esi := cs for later */ @@ -726,12 +991,7 @@ L_common_dispatch: clac /* Clear EFLAGS.AC if SMAP is present/enabled */ 1: /* - * On entering the kernel, we typically don't switch CR3 - * because the kernel shares the user's address space. - * But we mark the kernel's cr3 as "active" for TLB coherency evaluation - * If, however, the CPU's invalid TLB flag is set, we have to invalidate the TLB - * since the kernel pagetables were changed while we were in userspace. - * + * We mark the kernel's cr3 as "active" for TLB coherency evaluation * For threads with a mapped pagezero (some WINE games) on non-SMAP platforms, * we switch to the kernel's address space on entry. Also, * if the global no_shared_cr3 is TRUE we do switch to the kernel's cr3 @@ -739,9 +999,9 @@ L_common_dispatch: */ mov %gs:CPU_KERNEL_CR3, %rcx mov %rcx, %gs:CPU_ACTIVE_CR3 - test $3, %esi /* user/kernel? */ - jz 2f /* skip cr3 reload from kernel */ - xor %rbp, %rbp + test $3, %esi /* CS: user/kernel? */ + jz 2f /* skip CR3 reload if from kernel */ + xor %ebp, %ebp cmpl $0, %gs:CPU_PAGEZERO_MAPPED jnz 11f cmpl $0, EXT(no_shared_cr3)(%rip) @@ -751,28 +1011,27 @@ L_common_dispatch: movw %gs:CPU_KERNEL_PCID, %ax or %rax, %rcx mov %rcx, %cr3 /* load kernel cr3 */ - jmp 4f /* and skip tlb flush test */ + jmp 4f 2: - mov %gs:CPU_ACTIVE_CR3+4, %rcx - shr $32, %rcx - testl %ecx, %ecx - jz 4f - movl $0, %gs:CPU_TLB_INVALID - mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/ - and $(~CR4_PGE), %rcx - mov %rcx, %cr4 - or $(CR4_PGE), %rcx - mov %rcx, %cr4 + /* Deferred processing of pending kernel address space TLB invalidations */ + mov %gs:CPU_ACTIVE_CR3+4, %rcx + shr $32, %rcx + testl %ecx, %ecx + jz 4f + movl $0, %gs:CPU_TLB_INVALID + cmpb $0, EXT(invpcid_enabled)(%rip) + jz L_cr4_island + movl $2, %ecx + invpcid %gs:CPU_IP_DESC, %rcx 4: +L_set_act: mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */ testq %rcx, %rcx - je 5f + je L_intcnt movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap */ cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */ - je 5f - xor %ecx, %ecx /* If so, reset DR7 (the control) */ - mov %rcx, %dr7 -5: + jnz L_dr7_island +L_intcnt: incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count /* Dispatch the designated handler */ cmp EXT(dblmap_base)(%rip), %rsp @@ -785,6 +1044,17 @@ L_common_dispatch: leaq EXT(idt64_hndl_table1)(%rip), %rax jmp *(%rax, %rdx, 8) +L_cr4_island: + mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/ + and $(~CR4_PGE), %rcx + mov %rcx, %cr4 + or $(CR4_PGE), %rcx + mov %rcx, %cr4 + jmp L_set_act +L_dr7_island: + xor %ecx, %ecx /* If so, reset DR7 (the control) */ + mov %rcx, %dr7 + jmp L_intcnt /* * Control is passed here to return to user. */ @@ -792,43 +1062,13 @@ Entry(return_to_user) TIME_TRAP_UEXIT Entry(ret_to_user) -// XXX 'Be nice to tidy up this debug register restore sequence... mov %gs:CPU_ACTIVE_THREAD, %rdx - movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */ - - test %rax, %rax /* Is there a debug register context? */ - je 2f /* branch if not */ - cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */ - jne 1f - movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */ - movq %rcx, %dr0 - movl DS_DR1(%rax), %ecx - movq %rcx, %dr1 - movl DS_DR2(%rax), %ecx - movq %rcx, %dr2 - movl DS_DR3(%rax), %ecx - movq %rcx, %dr3 - movl DS_DR7(%rax), %ecx - movq %rcx, %gs:CPU_DR7 - jmp 2f -1: - mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/ - mov %rcx, %dr0 - mov DS64_DR1(%rax), %rcx - mov %rcx, %dr1 - mov DS64_DR2(%rax), %rcx - mov %rcx, %dr2 - mov DS64_DR3(%rax), %rcx - mov %rcx, %dr3 - mov DS64_DR7(%rax), %rcx - mov %rcx, %gs:CPU_DR7 -2: + cmpq $0, TH_PCB_IDS(%rdx) /* Is there a debug register context? */ + jnz L_dr_restore_island +L_post_dr_restore: /* - * On exiting the kernel there's typically no need to switch cr3 since we're - * already running in the user's address space which includes the - * kernel. We now mark the task's cr3 as active, for TLB coherency. - * If the target address space has a pagezero mapping present, or - * if no_shared_cr3 is set, we do need to switch cr3 at this point. + * We now mark the task's address space as active for TLB coherency. + * Handle special cases such as pagezero-less tasks here. */ mov %gs:CPU_TASK_CR3, %rcx mov %rcx, %gs:CPU_ACTIVE_CR3 @@ -873,8 +1113,19 @@ L_32bit_return: movl R32_SS(%r15), %eax movl %eax, R64_SS(%r15) - /* Validate DS/ES/FS/GS segment selectors with the Load Access Rights instruction prior to restoration */ - /* Exempt "known good" statically configured selectors, e.g. USER_DS and 0 */ + /* Validate CS/DS/ES/FS/GS segment selectors with the Load Access Rights instruction prior to restoration */ + /* Exempt "known good" statically configured selectors, e.g. USER_CS, USER_DS and 0 */ + cmpl $(USER_CS), R32_CS(%r15) + jz 11f + larw R32_CS(%r15), %ax + jnz L_32_reset_cs + /* Ensure that the segment referenced by CS in the saved state is a code segment (bit 11 == 1) */ + testw $0x800, %ax + jz L_32_reset_cs /* Update stored %cs with known-good selector if ZF == 1 */ + jmp 11f +L_32_reset_cs: + movl $(USER_CS), R32_CS(%r15) +11: cmpl $(USER_DS), R32_DS(%r15) jz 22f cmpl $0, R32_DS(%r15) @@ -923,6 +1174,7 @@ L_32bit_return: * push state on the IST1 stack and will not affect the "PCB stack". */ mov %r15, %rsp /* Set the PCB as the stack */ + movl %gs:CPU_NEED_SEGCHK, %r14d /* %r14 will be restored below */ swapgs /* Zero 64-bit-exclusive GPRs to prevent data leaks */ @@ -932,35 +1184,98 @@ L_32bit_return: xor %r11, %r11 xor %r12, %r12 xor %r13, %r13 - xor %r14, %r14 xor %r15, %r15 -EXT(ret32_set_ds): movw R32_DS(%rsp), %ds -EXT(ret32_set_es): movw R32_ES(%rsp), %es -EXT(ret32_set_fs): movw R32_FS(%rsp), %fs -EXT(ret32_set_gs): movw R32_GS(%rsp), %gs /* pop compat frame + trapno, trapfn and error */ add $(ISS64_OFFSET)+8+8+8, %rsp - cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp) - /* test for fast entry/exit */ - je L_fast_exit + + /* + * At this point, the stack contains: + * + * +--------------+ + * | Return SS | +32 + * | Return RSP | +24 + * | Return RFL | +16 + * | Return CS | +8 + * | Return RIP | <-- rsp + * +--------------+ + */ + + cmpl $(SYSENTER_CS), 8(%rsp) + /* test for sysexit */ + je L_rtu_via_sysexit + + cmpl $1, %r14d + je L_verw_island + +L_after_verw: + xor %r14, %r14 + +.globl EXT(ret32_iret) EXT(ret32_iret): iretq /* return from interrupt */ -L_fast_exit: +L_verw_island: + verw 32(%rsp) + jmp L_after_verw + +L_verw_island_1: + verw 16(%rsp) + jmp L_after_verw_1 + +L_rtu_via_sysexit: pop %rdx /* user return eip */ pop %rcx /* pop and toss cs */ andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */ + + /* + * %ss is now at 16(%rsp) + */ + cmpl $1, %r14d + je L_verw_island_1 +L_after_verw_1: + xor %r14, %r14 + popf /* flags - carry denotes failure */ pop %rcx /* user return esp */ + + sti /* interrupts enabled after sysexit */ sysexitl /* 32-bit sysexit */ +L_dr_restore_island: + movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */ + cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */ + jne 1f + movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */ + movq %rcx, %dr0 + movl DS_DR1(%rax), %ecx + movq %rcx, %dr1 + movl DS_DR2(%rax), %ecx + movq %rcx, %dr2 + movl DS_DR3(%rax), %ecx + movq %rcx, %dr3 + movl DS_DR7(%rax), %ecx + movq %rcx, %gs:CPU_DR7 + jmp 2f +1: + mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/ + mov %rcx, %dr0 + mov DS64_DR1(%rax), %rcx + mov %rcx, %dr1 + mov DS64_DR2(%rax), %rcx + mov %rcx, %dr2 + mov DS64_DR3(%rax), %rcx + mov %rcx, %dr3 + mov DS64_DR7(%rax), %rcx + mov %rcx, %gs:CPU_DR7 +2: + jmp L_post_dr_restore L_cr3_switch_island: xor %eax, %eax movw %gs:CPU_ACTIVE_PCID, %ax @@ -992,213 +1307,6 @@ L_64bit_return: leaq EXT(idt64_hndl_table0)(%rip), %rax jmp *8(%rax) -Entry(ks_idt64_debug_kernel) - /* - * trap came from kernel mode - */ - - push %rax /* save %rax temporarily */ - lea EXT(idt64_sysenter)(%rip), %rax - cmp %rax, ISF64_RIP+8(%rsp) - pop %rax - jne EXT(ks_dispatch_kernel) - /* - * Interrupt stack frame has been pushed on the temporary stack. - * We have to switch to pcb stack and patch up the saved state. - */ - mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */ - mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */ - xchg %rcx,%rsp /* switch to pcb stack */ - push $(USER_DS) /* ss */ - push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */ - push ISF64_RFLAGS(%rcx) /* rflags */ - push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */ - mov ISF64_ERR(%rcx),%rcx /* restore %rcx */ - jmp L_sysenter_continue /* continue sysenter entry */ - -Entry(ks_trap_check_kernel_exit) - testb $3,ISF64_CS(%rsp) - jz L_kernel_gpf - - /* Here for fault from user-space. Copy interrupt state to PCB. */ - swapgs - push %rax - mov %rcx, %gs:CPU_UBER_TMP /* save user RCX */ - mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */ - mov ISF64_SS+8(%rsp), %rax - mov %rax, ISF64_SS(%rcx) - mov ISF64_RSP+8(%rsp), %rax - mov %rax, ISF64_RSP(%rcx) - mov ISF64_RFLAGS+8(%rsp), %rax - mov %rax, ISF64_RFLAGS(%rcx) - mov ISF64_CS+8(%rsp), %rax - mov %rax, ISF64_CS(%rcx) - mov ISF64_RIP+8(%rsp), %rax - mov %rax, ISF64_RIP(%rcx) - mov ISF64_ERR+8(%rsp), %rax - mov %rax, ISF64_ERR(%rcx) - mov ISF64_TRAPFN+8(%rsp), %rax - mov %rax, ISF64_TRAPFN(%rcx) - mov ISF64_TRAPNO+8(%rsp), %rax - mov %rax, ISF64_TRAPNO(%rcx) - pop %rax - mov %gs:CPU_UBER_TMP, %rsp /* user RCX into RSP */ - xchg %rcx, %rsp /* to PCB stack with user RCX */ - jmp EXT(ks_dispatch_user) - -L_kernel_gpf: - /* Here for GPF from kernel_space. Check for recoverable cases. */ - push %rax - leaq EXT(ret32_iret)(%rip), %rax - cmp %rax, 8+ISF64_RIP(%rsp) - je L_fault_iret - leaq EXT(ret64_iret)(%rip), %rax - cmp %rax, 8+ISF64_RIP(%rsp) - je L_fault_iret - leaq EXT(ret32_set_ds)(%rip), %rax - cmp %rax, 8+ISF64_RIP(%rsp) - je L_32bit_fault_set_seg - leaq EXT(ret32_set_es)(%rip), %rax - cmp %rax, 8+ISF64_RIP(%rsp) - je L_32bit_fault_set_seg - leaq EXT(ret32_set_fs)(%rip), %rax - cmp %rax, 8+ISF64_RIP(%rsp) - je L_32bit_fault_set_seg - leaq EXT(ret32_set_gs)(%rip), %rax - cmp %rax, 8+ISF64_RIP(%rsp) - je L_32bit_fault_set_seg - jmp EXT(ks_kernel_trap) - /* Fall through */ - -Entry(ks_kernel_trap) - /* - * Here after taking an unexpected trap from kernel mode - perhaps - * while running in the trampolines hereabouts. - * Note: %rax has been pushed on stack. - * Make sure we're not on the PCB stack, if so move to the kernel stack. - * This is likely a fatal condition. - * But first, ensure we have the kernel gs base active... - */ - push %rcx - push %rdx - mov $(MSR_IA32_GS_BASE), %ecx - rdmsr /* read kernel gsbase */ - test $0x80000000, %edx /* test MSB of address */ - jne 1f - swapgs /* so swap */ -1: - pop %rdx - pop %rcx - - movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */ - subq %rsp, %rax - cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */ - jb 2f /* - yes, deal with it */ - pop %rax /* - no, restore %rax */ - jmp EXT(ks_dispatch_kernel) -2: - /* - * Here if %rsp is in the PCB - * Copy the interrupt stack frame from PCB stack to kernel stack - */ - movq %gs:CPU_KERNEL_STACK, %rax - xchgq %rax, %rsp - pushq 8+ISF64_SS(%rax) - pushq 8+ISF64_RSP(%rax) - pushq 8+ISF64_RFLAGS(%rax) - pushq 8+ISF64_CS(%rax) - pushq 8+ISF64_RIP(%rax) - pushq 8+ISF64_ERR(%rax) - pushq 8+ISF64_TRAPFN(%rax) - pushq 8+ISF64_TRAPNO(%rax) - movq (%rax), %rax - jmp EXT(ks_dispatch_kernel) - - -/* - * GP/NP fault on IRET: CS or SS is in error. - * User GSBASE is active. - * On IST1 stack containing: - * (rax saved above, which is immediately popped) - * 0 ISF64_TRAPNO: trap code (NP or GP) - * 8 ISF64_TRAPFN: trap function - * 16 ISF64_ERR: segment number in error (error code) - * 24 ISF64_RIP: kernel RIP - * 32 ISF64_CS: kernel CS - * 40 ISF64_RFLAGS: kernel RFLAGS - * 48 ISF64_RSP: kernel RSP - * 56 ISF64_SS: kernel SS - * On the PCB stack, pointed to by the kernel's RSP is: - * 0 user RIP - * 8 user CS - * 16 user RFLAGS - * 24 user RSP - * 32 user SS - * - * We need to move the kernel's TRAPNO, TRAPFN and ERR to the PCB and handle - * as a user fault with: - * 0 ISF64_TRAPNO: trap code (NP or GP) - * 8 ISF64_TRAPFN: trap function - * 16 ISF64_ERR: segment number in error (error code) - * 24 user RIP - * 32 user CS - * 40 user RFLAGS - * 48 user RSP - * 56 user SS - */ -L_fault_iret: - pop %rax /* recover saved %rax */ - mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */ - mov ISF64_RSP(%rsp), %rax - xchg %rax, %rsp /* switch to PCB stack */ - push ISF64_ERR(%rax) - push ISF64_TRAPFN(%rax) - push ISF64_TRAPNO(%rax) - mov ISF64_RIP(%rax), %rax /* restore rax */ - /* now treat as fault from user */ - jmp L_dispatch - -/* - * Fault restoring a segment register. All of the saved state is still - * on the stack untouched since we haven't yet moved the stack pointer. - * On IST1 stack containing: - * (rax saved above, which is immediately popped) - * 0 ISF64_TRAPNO: trap code (NP or GP) - * 8 ISF64_TRAPFN: trap function - * 16 ISF64_ERR: segment number in error (error code) - * 24 ISF64_RIP: kernel RIP - * 32 ISF64_CS: kernel CS - * 40 ISF64_RFLAGS: kernel RFLAGS - * 48 ISF64_RSP: kernel RSP - * 56 ISF64_SS: kernel SS - * On the PCB stack, pointed to by the kernel's RSP is: - * 0 user trap code - * 8 user trap function - * 16 user err - * 24 user RIP - * 32 user CS - * 40 user RFLAGS - * 48 user RSP - * 56 user SS - */ -L_32bit_fault_set_seg: - swapgs - pop %rax /* toss saved %rax from stack */ - mov ISF64_TRAPNO(%rsp), %rax - mov ISF64_TRAPFN(%rsp), %rcx - mov ISF64_ERR(%rsp), %rdx - mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */ - mov %rax,R64_TRAPNO(%rsp) - mov %rcx,R64_TRAPFN(%rsp) - mov %rdx,R64_ERR(%rsp) - /* now treat as fault from user */ - /* except that all the state is */ - /* already saved - we just have to */ - /* move the trapno and error into */ - /* the compatibility frame */ - jmp L_dispatch_U32_after_fault - - /* All 'exceptions' enter hndl_alltraps, with: * r15 x86_saved_state_t address * rsp kernel stack if user-space, otherwise interrupt or kernel stack diff --git a/osfmk/x86_64/idt_table.h b/osfmk/x86_64/idt_table.h index 639516a56..af91ae513 100644 --- a/osfmk/x86_64/idt_table.h +++ b/osfmk/x86_64/idt_table.h @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,42 +22,42 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -TRAP(0x00,idt64_zero_div) -TRAP_IST1(0x01,idt64_debug) -TRAP_IST2(0x02,idt64_nmi) -USER_TRAP(0x03,idt64_int3) -USER_TRAP(0x04,idt64_into) -USER_TRAP(0x05,idt64_bounds) -TRAP(0x06,idt64_invop) -TRAP(0x07,idt64_nofpu) -TRAP_IST1(0x08,idt64_double_fault) -TRAP(0x09,idt64_fpu_over) -TRAP_ERR(0x0a,idt64_inv_tss) -TRAP_IST1(0x0b,idt64_segnp) -TRAP_IST1(0x0c,idt64_stack_fault) -TRAP_IST1(0x0d,idt64_gen_prot) -TRAP_SPC(0x0e,idt64_page_fault) -TRAP(0x0f,idt64_trap_0f) -TRAP(0x10,idt64_fpu_err) -TRAP_ERR(0x11,idt64_alignment_check) -TRAP_IST1(0x12,idt64_mc) -TRAP(0x13,idt64_sse_err) -TRAP(0x14,idt64_trap_14) -TRAP(0x15,idt64_trap_15) -TRAP(0x16,idt64_trap_16) -TRAP(0x17,idt64_trap_17) -TRAP(0x18,idt64_trap_18) -TRAP(0x19,idt64_trap_19) -TRAP(0x1a,idt64_trap_1a) -TRAP(0x1b,idt64_trap_1b) -TRAP(0x1c,idt64_trap_1c) -TRAP(0x1d,idt64_trap_1d) -TRAP(0x1e,idt64_trap_1e) -TRAP(0x1f,idt64_trap_1f) +TRAP(0x00, idt64_zero_div) +TRAP_IST1(0x01, idt64_debug) +TRAP_IST2(0x02, idt64_nmi) +USER_TRAP(0x03, idt64_int3) +USER_TRAP(0x04, idt64_into) +USER_TRAP(0x05, idt64_bounds) +TRAP(0x06, idt64_invop) +TRAP(0x07, idt64_nofpu) +TRAP_IST1(0x08, idt64_double_fault) +TRAP(0x09, idt64_fpu_over) +TRAP_ERR(0x0a, idt64_inv_tss) +TRAP_IST1(0x0b, idt64_segnp) +TRAP_IST1(0x0c, idt64_stack_fault) +TRAP_IST1(0x0d, idt64_gen_prot) +TRAP_SPC(0x0e, idt64_page_fault) +TRAP(0x0f, idt64_trap_0f) +TRAP(0x10, idt64_fpu_err) +TRAP_ERR(0x11, idt64_alignment_check) +TRAP_IST1(0x12, idt64_mc) +TRAP(0x13, idt64_sse_err) +TRAP(0x14, idt64_trap_14) +TRAP(0x15, idt64_trap_15) +TRAP(0x16, idt64_trap_16) +TRAP(0x17, idt64_trap_17) +TRAP(0x18, idt64_trap_18) +TRAP(0x19, idt64_trap_19) +TRAP(0x1a, idt64_trap_1a) +TRAP(0x1b, idt64_trap_1b) +TRAP(0x1c, idt64_trap_1c) +TRAP(0x1d, idt64_trap_1d) +TRAP(0x1e, idt64_trap_1e) +TRAP(0x1f, idt64_trap_1f) INTERRUPT(0x20) INTERRUPT(0x21) @@ -161,9 +161,9 @@ INTERRUPT(0x7d) INTERRUPT(0x7e) USER_TRAP(0x7f, idt64_dtrace_ret) /* Required by dtrace "fasttrap" */ -USER_TRAP_SPC(0x80,idt64_unix_scall) -USER_TRAP_SPC(0x81,idt64_mach_scall) -USER_TRAP_SPC(0x82,idt64_mdep_scall) +USER_TRAP_SPC(0x80, idt64_unix_scall) +USER_TRAP_SPC(0x81, idt64_mach_scall) +USER_TRAP_SPC(0x82, idt64_mdep_scall) INTERRUPT(0x83) INTERRUPT(0x84) @@ -297,4 +297,4 @@ INTERRUPT(0xfb) INTERRUPT(0xfc) INTERRUPT(0xfd) INTERRUPT(0xfe) -TRAP(0xff,idt64_preempt) +TRAP(0xff, idt64_preempt) diff --git a/osfmk/x86_64/kpc_x86.c b/osfmk/x86_64/kpc_x86.c index f24bbfa31..da2ccc40d 100644 --- a/osfmk/x86_64/kpc_x86.c +++ b/osfmk/x86_64/kpc_x86.c @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -133,7 +133,7 @@ kpc_is_running_configurable(uint64_t pmc_mask) uint32_t kpc_fixed_count(void) { - i386_cpu_info_t *info = NULL; + i386_cpu_info_t *info = NULL; info = cpuid_info(); return info->cpuid_arch_perf_leaf.fixed_number; } @@ -141,7 +141,7 @@ kpc_fixed_count(void) uint32_t kpc_configurable_count(void) { - i386_cpu_info_t *info = NULL; + i386_cpu_info_t *info = NULL; info = cpuid_info(); return info->cpuid_arch_perf_leaf.number; } @@ -175,8 +175,8 @@ kpc_get_rawpmu_config(__unused kpc_config_t *configv) static uint8_t kpc_fixed_width(void) { - i386_cpu_info_t *info = NULL; - + i386_cpu_info_t *info = NULL; + info = cpuid_info(); return info->cpuid_arch_perf_leaf.fixed_width; @@ -185,7 +185,7 @@ kpc_fixed_width(void) static uint8_t kpc_configurable_width(void) { - i386_cpu_info_t *info = NULL; + i386_cpu_info_t *info = NULL; info = cpuid_info(); @@ -236,12 +236,13 @@ set_running_fixed(boolean_t on) int i; boolean_t enabled; - if( on ) + if (on) { /* these are per-thread in SMT */ fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI; - else + } else { /* don't allow disabling fixed counters */ return; + } wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl ); @@ -249,13 +250,15 @@ set_running_fixed(boolean_t on) /* rmw the global control */ global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL); - for( i = 0; i < (int) kpc_fixed_count(); i++ ) - mask |= (1ULL<<(32+i)); + for (i = 0; i < (int) kpc_fixed_count(); i++) { + mask |= (1ULL << (32 + i)); + } - if( on ) + if (on) { global |= mask; - else + } else { global &= ~mask; + } wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global); @@ -283,8 +286,8 @@ set_running_configurable(uint64_t target_mask, uint64_t state_mask) } /* update the global control value */ - global &= ~target_mask; /* clear the targeted PMCs bits */ - global |= state_mask; /* update the targeted PMCs bits with their new states */ + global &= ~target_mask; /* clear the targeted PMCs bits */ + global |= state_mask; /* update the targeted PMCs bits with their new states */ wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global); ml_set_interrupts_enabled(enabled); @@ -296,11 +299,12 @@ kpc_set_running_mp_call( void *vstate ) struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate; assert(mp_config); - if (kpc_controls_fixed_counters()) + if (kpc_controls_fixed_counters()) { set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK); + } set_running_configurable(mp_config->cfg_target_mask, - mp_config->cfg_state_mask); + mp_config->cfg_state_mask); } int @@ -328,9 +332,9 @@ kpc_get_fixed_counters(uint64_t *counterv) uint64_t status; /* snap the counters */ - for( i = 0; i < n; i++ ) { + for (i = 0; i < n; i++) { counterv[i] = FIXED_SHADOW(ctr) + - (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr)); + (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr)); } /* Grab the overflow bits */ @@ -340,14 +344,16 @@ kpc_get_fixed_counters(uint64_t *counterv) * before the counter overflowed. Re-read any counter with it's overflow bit set so * we know for sure that it has overflowed. The reason this matters is that the math * is different for a counter that has overflowed. */ - for( i = 0; i < n; i++ ) { - if ((1ull << (i + 32)) & status) + for (i = 0; i < n; i++) { + if ((1ull << (i + 32)) & status) { counterv[i] = FIXED_SHADOW(ctr) + - (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + IA32_FIXED_CTRx(i); + (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + IA32_FIXED_CTRx(i); + } } #else - for( i = 0; i < n; i++ ) + for (i = 0; i < n; i++) { counterv[i] = IA32_FIXED_CTRx(i); + } #endif return 0; @@ -360,9 +366,11 @@ kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) assert(configv); - for (uint32_t i = 0; i < cfg_count; ++i) - if ((1ULL << i) & pmc_mask) + for (uint32_t i = 0; i < cfg_count; ++i) { + if ((1ULL << i) & pmc_mask) { *configv++ = IA32_PERFEVTSELx(i); + } + } return 0; } @@ -372,9 +380,10 @@ kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) uint32_t cfg_count = kpc_configurable_count(); uint64_t save; - for (uint32_t i = 0; i < cfg_count; i++ ) { - if (((1ULL << i) & pmc_mask) == 0) + for (uint32_t i = 0; i < cfg_count; i++) { + if (((1ULL << i) & pmc_mask) == 0) { continue; + } /* need to save and restore counter since it resets when reconfigured */ save = IA32_PMCx(i); @@ -421,7 +430,7 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) for (uint32_t i = 0; i < cfg_count; ++i) { if ((1ULL << i) & pmc_mask) { *it_counterv++ = CONFIGURABLE_SHADOW(i) + - (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i)); + (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i)); } } @@ -439,10 +448,9 @@ kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) */ for (uint32_t i = 0; i < cfg_count; ++i) { if (((1ULL << i) & pmc_mask) && - ((1ULL << i) & status)) - { + ((1ULL << i) & status)) { *it_counterv++ = CONFIGURABLE_SHADOW(i) + - (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i); + (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i); } } @@ -453,7 +461,7 @@ static void kpc_get_curcpu_counters_mp_call(void *args) { struct kpc_get_counters_remote *handler = args; - int offset=0, r=0; + int offset = 0, r = 0; assert(handler); assert(handler->buf); @@ -479,8 +487,9 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) enabled = ml_set_interrupts_enabled(FALSE); - if (curcpu) + if (curcpu) { *curcpu = current_processor()->cpu_id; + } mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl); ml_set_interrupts_enabled(enabled); @@ -491,7 +500,6 @@ kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) static void kpc_set_config_mp_call(void *vmp_config) { - struct kpc_config_remote *mp_config = vmp_config; kpc_config_t *new_config = NULL; uint32_t classes = 0, count = 0; @@ -503,9 +511,8 @@ kpc_set_config_mp_call(void *vmp_config) new_config = mp_config->configv; enabled = ml_set_interrupts_enabled(FALSE); - - if (classes & KPC_CLASS_FIXED_MASK) - { + + if (classes & KPC_CLASS_FIXED_MASK) { kpc_set_fixed_config(&new_config[count]); count += kpc_get_config_count(KPC_CLASS_FIXED_MASK); } @@ -547,11 +554,13 @@ kpc_set_reload_mp_call(void *vmp_config) count = kpc_configurable_count(); for (uint32_t i = 0; i < count; ++i) { /* ignore the counter */ - if (((1ULL << i) & mp_config->pmc_mask) == 0) + if (((1ULL << i) & mp_config->pmc_mask) == 0) { continue; + } - if (*new_period == 0) + if (*new_period == 0) { *new_period = kpc_configurable_max(); + } CONFIGURABLE_RELOAD(i) = max - *new_period; @@ -622,7 +631,8 @@ kpc_set_config_arch(struct kpc_config_remote *mp_config) } /* PMI stuff */ -void kpc_pmi_handler(__unused x86_saved_state_t *state) +void +kpc_pmi_handler(__unused x86_saved_state_t *state) { uint64_t status, extra; uint32_t ctr; @@ -638,12 +648,13 @@ void kpc_pmi_handler(__unused x86_saved_state_t *state) extra = kpc_reload_fixed(ctr); FIXED_SHADOW(ctr) - += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; + += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr)); - if (FIXED_ACTIONID(ctr)) + if (FIXED_ACTIONID(ctr)) { kpc_sample_kperf(FIXED_ACTIONID(ctr)); + } } } #endif @@ -653,16 +664,17 @@ void kpc_pmi_handler(__unused x86_saved_state_t *state) extra = kpc_reload_configurable(ctr); CONFIGURABLE_SHADOW(ctr) - += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra; + += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra; /* kperf can grab the PMCs when it samples so we need to make sure the overflow * bits are in the correct state before the call to kperf_sample */ wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr); BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr)); - - if (CONFIGURABLE_ACTIONID(ctr)) + + if (CONFIGURABLE_ACTIONID(ctr)) { kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr)); + } } } @@ -690,4 +702,3 @@ kpc_get_pmu_version(void) return KPC_PMU_ERROR; } - diff --git a/osfmk/x86_64/loose_ends.c b/osfmk/x86_64/loose_ends.c index 07c07327d..cb63ffcad 100644 --- a/osfmk/x86_64/loose_ends.c +++ b/osfmk/x86_64/loose_ends.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2013 Apple Inc. All rights reserved. + * Copyright (c) 2000-2019 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,34 +22,34 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990,1989 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -76,12 +76,15 @@ #include #include +#include #include #if !MACH_KDP #include #endif /* !MACH_KDP */ +#include + #include #if CONFIG_DTRACE #include @@ -99,24 +102,24 @@ #undef bcopy /* XXX - should be gone from here */ -extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); -extern void flush_dcache64(addr64_t addr, unsigned count, int phys); -extern boolean_t phys_page_exists(ppnum_t); -extern void bcopy_no_overwrite(const char *from, char *to,vm_size_t bytes); -extern void pmap_set_reference(ppnum_t pn); -extern void mapping_set_mod(ppnum_t pa); -extern void mapping_set_ref(ppnum_t pn); - -extern void ovbcopy(const char *from, - char *to, - vm_size_t nbytes); +extern void invalidate_icache64(addr64_t addr, unsigned cnt, int phys); +extern void flush_dcache64(addr64_t addr, unsigned count, int phys); +extern boolean_t phys_page_exists(ppnum_t); +extern void bcopy_no_overwrite(const char *from, char *to, vm_size_t bytes); +extern void pmap_set_reference(ppnum_t pn); +extern void mapping_set_mod(ppnum_t pa); +extern void mapping_set_ref(ppnum_t pn); + +extern void ovbcopy(const char *from, + char *to, + vm_size_t nbytes); void machine_callstack(uintptr_t *buf, vm_size_t callstack_max); #define value_64bit(value) ((value) & 0xFFFFFFFF00000000ULL) #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFULL)) -#define INT_SIZE (BYTE_SIZE * sizeof (int)) +#define INT_SIZE (BYTE_SIZE * sizeof (int)) /* * Set indicated bit in bit string. @@ -153,15 +156,18 @@ ffsbit(int *s) { int offset; - for (offset = 0; !*s; offset += (int)INT_SIZE, ++s); + for (offset = 0; !*s; offset += (int)INT_SIZE, ++s) { + ; + } return offset + __builtin_ctz(*s); } int ffs(unsigned int mask) { - if (mask == 0) + if (mask == 0) { return 0; + } /* * NOTE: cannot use __builtin_ffs because it generates a call to @@ -173,8 +179,9 @@ ffs(unsigned int mask) int ffsll(unsigned long long mask) { - if (mask == 0) + if (mask == 0) { return 0; + } /* * NOTE: cannot use __builtin_ffsll because it generates a call to @@ -189,33 +196,35 @@ ffsll(unsigned long long mask) int fls(unsigned int mask) { - if (mask == 0) + if (mask == 0) { return 0; + } - return (sizeof (mask) << 3) - __builtin_clz(mask); + return (sizeof(mask) << 3) - __builtin_clz(mask); } int flsll(unsigned long long mask) { - if (mask == 0) + if (mask == 0) { return 0; + } - return (sizeof (mask) << 3) - __builtin_clzll(mask); + return (sizeof(mask) << 3) - __builtin_clzll(mask); } void bzero_phys_nc( - addr64_t src64, - uint32_t bytes) + addr64_t src64, + uint32_t bytes) { - bzero_phys(src64,bytes); + bzero_phys(src64, bytes); } void bzero_phys( - addr64_t src64, - uint32_t bytes) + addr64_t src64, + uint32_t bytes) { bzero(PHYSMAP_PTOV(src64), bytes); } @@ -227,14 +236,14 @@ bzero_phys( void bcopy_phys( - addr64_t src64, - addr64_t dst64, - vm_size_t bytes) + addr64_t src64, + addr64_t dst64, + vm_size_t bytes) { /* Not necessary for K64 - but ensure we stay within a page */ - if (((((uint32_t)src64 & (NBPG-1)) + bytes) > NBPG) || - ((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { - panic("bcopy_phys alignment"); + if (((((uint32_t)src64 & (NBPG - 1)) + bytes) > NBPG) || + ((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) { + panic("bcopy_phys alignment"); } bcopy(PHYSMAP_PTOV(src64), PHYSMAP_PTOV(dst64), bytes); } @@ -245,41 +254,42 @@ bcopy_phys( int apply_func_phys( - addr64_t dst64, - vm_size_t bytes, - int (*func)(void * buffer, vm_size_t bytes, void * arg), - void * arg) + addr64_t dst64, + vm_size_t bytes, + int (*func)(void * buffer, vm_size_t bytes, void * arg), + void * arg) { /* Not necessary for K64 - but ensure we stay within a page */ - if (((((uint32_t)dst64 & (NBPG-1)) + bytes) > NBPG) ) { - panic("apply_func_phys alignment"); + if (((((uint32_t)dst64 & (NBPG - 1)) + bytes) > NBPG)) { + panic("apply_func_phys alignment"); } return func(PHYSMAP_PTOV(dst64), bytes, arg); } -/* - * ovbcopy - like bcopy, but recognizes overlapping ranges and handles +/* + * ovbcopy - like bcopy, but recognizes overlapping ranges and handles * them correctly. */ void ovbcopy( - const char *from, - char *to, - vm_size_t bytes) /* num bytes to copy */ + const char *from, + char *to, + vm_size_t bytes) /* num bytes to copy */ { /* Assume that bcopy copies left-to-right (low addr first). */ - if (from + bytes <= to || to + bytes <= from || to == from) - bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/ - else if (from > to) - bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */ - else { + if (from + bytes <= to || to + bytes <= from || to == from) { + bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/ + } else if (from > to) { + bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */ + } else { /* to > from: overlapping, and must copy right-to-left. */ from += bytes - 1; to += bytes - 1; - while (bytes-- > 0) + while (bytes-- > 0) { *to-- = *from--; + } } } @@ -289,127 +299,170 @@ ovbcopy( */ uint64_t reportphyreaddelayabs; +uint64_t reportphywritedelayabs; uint32_t reportphyreadosbt; +uint32_t reportphywriteosbt; #if DEVELOPMENT || DEBUG uint32_t phyreadpanic = 1; +uint32_t phywritepanic = 1; +uint64_t tracephyreaddelayabs = 50 * NSEC_PER_USEC; +uint64_t tracephywritedelayabs = 50 * NSEC_PER_USEC; +uint64_t simulate_stretched_io = 0; #else uint32_t phyreadpanic = 0; +uint32_t phywritepanic = 0; +uint64_t tracephyreaddelayabs = 0; +uint64_t tracephywritedelayabs = 0; #endif __private_extern__ uint64_t -ml_phys_read_data(pmap_paddr_t paddr, int size) { +ml_phys_read_data(uint64_t paddr, int size) +{ uint64_t result = 0; unsigned char s1; unsigned short s2; boolean_t istate = TRUE, timeread = FALSE; uint64_t sabs = 0, eabs; - if (__improbable(!physmap_enclosed(paddr))) + if (__improbable(!physmap_enclosed(paddr))) { panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); + } if (__improbable(reportphyreaddelayabs != 0)) { istate = ml_set_interrupts_enabled(FALSE); sabs = mach_absolute_time(); timeread = TRUE; } +#if DEVELOPMENT || DEBUG + if (__improbable(timeread && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ - switch (size) { - case 1: + switch (size) { + case 1: s1 = *(volatile unsigned char *)PHYSMAP_PTOV(paddr); result = s1; break; - case 2: + case 2: s2 = *(volatile unsigned short *)PHYSMAP_PTOV(paddr); result = s2; break; - case 4: + case 4: result = *(volatile unsigned int *)PHYSMAP_PTOV(paddr); break; case 8: result = *(volatile unsigned long long *)PHYSMAP_PTOV(paddr); break; default: - panic("Invalid size %d for ml_phys_read_data\n", size); + panic("Invalid size %d for ml_phys_read_data", size); break; - } + } if (__improbable(timeread == TRUE)) { eabs = mach_absolute_time(); - (void)ml_set_interrupts_enabled(istate); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_PHYS_READ, 0, paddr, size, result, sabs, eabs - sabs); +#endif if (__improbable((eabs - sabs) > reportphyreaddelayabs)) { + (void)ml_set_interrupts_enabled(istate); + if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { panic_io_port_read(); - panic("Read from physical addr 0x%llx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", paddr, (eabs - sabs), result, sabs, eabs, reportphyreaddelayabs); + panic("Read from physical addr 0x%llx took %llu ns, " + "result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", + paddr, (eabs - sabs), result, sabs, eabs, + reportphyreaddelayabs); } if (reportphyreadosbt) { - OSReportWithBacktrace("ml_phys_read_data took %lluus\n", (eabs - sabs) / 1000); + OSReportWithBacktrace("ml_phys_read_data took %lluus", + (eabs - sabs) / NSEC_PER_USEC); } #if CONFIG_DTRACE - DTRACE_PHYSLAT3(physread, uint64_t, (eabs - sabs), - pmap_paddr_t, paddr, uint32_t, size); -#endif + DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs), + uint64_t, paddr, uint32_t, size, uint64_t, result); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) { + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ), + (eabs - sabs), sabs, paddr, result); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); } } - return result; + return result; } static unsigned long long -ml_phys_read_long_long(pmap_paddr_t paddr) { +ml_phys_read_long_long(uint64_t paddr) +{ return ml_phys_read_data(paddr, 8); } -unsigned int ml_phys_read( vm_offset_t paddr) +unsigned int +ml_phys_read(vm_offset_t paddr) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr, 4); + return (unsigned int) ml_phys_read_data(paddr, 4); } -unsigned int ml_phys_read_word(vm_offset_t paddr) { - - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr, 4); +unsigned int +ml_phys_read_word(vm_offset_t paddr) +{ + return (unsigned int) ml_phys_read_data(paddr, 4); } -unsigned int ml_phys_read_64(addr64_t paddr64) +unsigned int +ml_phys_read_64(addr64_t paddr64) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr64, 4); + return (unsigned int) ml_phys_read_data(paddr64, 4); } -unsigned int ml_phys_read_word_64(addr64_t paddr64) +unsigned int +ml_phys_read_word_64(addr64_t paddr64) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr64, 4); + return (unsigned int) ml_phys_read_data(paddr64, 4); } -unsigned int ml_phys_read_half(vm_offset_t paddr) +unsigned int +ml_phys_read_half(vm_offset_t paddr) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr, 2); + return (unsigned int) ml_phys_read_data(paddr, 2); } -unsigned int ml_phys_read_half_64(addr64_t paddr64) +unsigned int +ml_phys_read_half_64(addr64_t paddr64) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr64, 2); + return (unsigned int) ml_phys_read_data(paddr64, 2); } -unsigned int ml_phys_read_byte(vm_offset_t paddr) +unsigned int +ml_phys_read_byte(vm_offset_t paddr) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr, 1); + return (unsigned int) ml_phys_read_data(paddr, 1); } -unsigned int ml_phys_read_byte_64(addr64_t paddr64) +unsigned int +ml_phys_read_byte_64(addr64_t paddr64) { - return (unsigned int) ml_phys_read_data((pmap_paddr_t)paddr64, 1); + return (unsigned int) ml_phys_read_data(paddr64, 1); } -unsigned long long ml_phys_read_double(vm_offset_t paddr) +unsigned long long +ml_phys_read_double(vm_offset_t paddr) { - return ml_phys_read_long_long((pmap_paddr_t)paddr); + return ml_phys_read_long_long(paddr); } -unsigned long long ml_phys_read_double_64(addr64_t paddr64) +unsigned long long +ml_phys_read_double_64(addr64_t paddr64) { - return ml_phys_read_long_long((pmap_paddr_t)paddr64); + return ml_phys_read_long_long(paddr64); } @@ -418,87 +471,323 @@ unsigned long long ml_phys_read_double_64(addr64_t paddr64) * Write data to a physical address. Memory should not be cache inhibited. */ -static inline void -ml_phys_write_data(pmap_paddr_t paddr, unsigned long data, int size) +__private_extern__ void +ml_phys_write_data(uint64_t paddr, unsigned long long data, int size) { - if (!physmap_enclosed(paddr)) + boolean_t istate = TRUE, timewrite = FALSE; + uint64_t sabs = 0, eabs; + + if (__improbable(!physmap_enclosed(paddr))) { panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); + } - switch (size) { - case 1: - *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data; - break; - case 2: - *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data; - break; - case 4: - *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data; - break; + if (__improbable(reportphywritedelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timewrite = TRUE; + } +#if DEVELOPMENT || DEBUG + if (__improbable(timewrite && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ + + switch (size) { + case 1: + *(volatile unsigned char *)PHYSMAP_PTOV(paddr) = (unsigned char)data; + break; + case 2: + *(volatile unsigned short *)PHYSMAP_PTOV(paddr) = (unsigned short)data; + break; + case 4: + *(volatile unsigned int *)PHYSMAP_PTOV(paddr) = (unsigned int)data; + break; + case 8: + *(volatile unsigned long *)PHYSMAP_PTOV(paddr) = data; + break; default: - panic("Invalid size %d for ml_phys_write_data\n", size); + panic("Invalid size %d for ml_phys_write_data", size); break; - } + } + + if (__improbable(timewrite == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphywritedelayabs)) { + (void)ml_set_interrupts_enabled(istate); + + if (phywritepanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Write to physical addr 0x%llx took %llu ns, " + "data: 0x%llx (start: %llu, end: %llu), ceiling: %llu", + paddr, (eabs - sabs), data, sabs, eabs, + reportphywritedelayabs); + } + + if (reportphywriteosbt) { + OSReportWithBacktrace("ml_phys_write_data (%p, 0x%llx) " + "took %lluus", + paddr, data, (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs), + uint64_t, paddr, uint32_t, size, uint64_t, data); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) { + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE), + (eabs - sabs), sabs, paddr, data); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } } -static void -ml_phys_write_long_long(pmap_paddr_t paddr, unsigned long long data) +void +ml_phys_write_byte(vm_offset_t paddr, unsigned int data) { - if (!physmap_enclosed(paddr)) - panic("%s: 0x%llx out of bounds\n", __FUNCTION__, paddr); + ml_phys_write_data(paddr, data, 1); +} + +void +ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) +{ + ml_phys_write_data(paddr64, data, 1); +} + +void +ml_phys_write_half(vm_offset_t paddr, unsigned int data) +{ + ml_phys_write_data(paddr, data, 2); +} + +void +ml_phys_write_half_64(addr64_t paddr64, unsigned int data) +{ + ml_phys_write_data(paddr64, data, 2); +} + +void +ml_phys_write(vm_offset_t paddr, unsigned int data) +{ + ml_phys_write_data(paddr, data, 4); +} - *(volatile unsigned long long *)PHYSMAP_PTOV(paddr) = data; +void +ml_phys_write_64(addr64_t paddr64, unsigned int data) +{ + ml_phys_write_data(paddr64, data, 4); } -void ml_phys_write_byte(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_word(vm_offset_t paddr, unsigned int data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 1); + ml_phys_write_data(paddr, data, 4); } -void ml_phys_write_byte_64(addr64_t paddr64, unsigned int data) +void +ml_phys_write_word_64(addr64_t paddr64, unsigned int data) +{ + ml_phys_write_data(paddr64, data, 4); +} + +void +ml_phys_write_double(vm_offset_t paddr, unsigned long long data) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 1); + ml_phys_write_data(paddr, data, 8); } -void ml_phys_write_half(vm_offset_t paddr, unsigned int data) +void +ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 2); + ml_phys_write_data(paddr64, data, 8); } -void ml_phys_write_half_64(addr64_t paddr64, unsigned int data) +uint32_t +ml_port_io_read(uint16_t ioport, int size) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 2); + uint32_t result = 0; + + uint64_t sabs, eabs; + boolean_t istate, timeread = FALSE; + + if (__improbable(reportphyreaddelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timeread = TRUE; + } + +#if DEVELOPMENT || DEBUG + if (__improbable(timeread && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ + + switch (size) { + case 1: + result = inb(ioport); + break; + case 2: + result = inw(ioport); + break; + case 4: + result = inl(ioport); + break; + default: + panic("Invalid size %d for ml_port_io_read(0x%x)", size, (unsigned)ioport); + break; + } + + if (__improbable(timeread == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_PORTIO_READ, 0, ioport, size, result, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphyreaddelayabs)) { + (void)ml_set_interrupts_enabled(istate); + + if (phyreadpanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Read from IO port 0x%x took %llu ns, " + "result: 0x%x (start: %llu, end: %llu), ceiling: %llu", + ioport, (eabs - sabs), result, sabs, eabs, + reportphyreaddelayabs); + } + + if (reportphyreadosbt) { + OSReportWithBacktrace("ml_port_io_read(0x%x) took %lluus", + ioport, (eabs - sabs) / NSEC_PER_USEC); + } +#if CONFIG_DTRACE + DTRACE_PHYSLAT3(portioread, uint64_t, (eabs - sabs), + uint16_t, ioport, uint32_t, size); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephyreaddelayabs > 0 && (eabs - sabs) > tracephyreaddelayabs)) { + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_READ), + (eabs - sabs), sabs, ioport, result); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } + + return result; } -void ml_phys_write(vm_offset_t paddr, unsigned int data) +void +ml_port_io_write(uint16_t ioport, uint32_t val, int size) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 4); + uint64_t sabs, eabs; + boolean_t istate, timewrite = FALSE; + + if (__improbable(reportphywritedelayabs != 0)) { + istate = ml_set_interrupts_enabled(FALSE); + sabs = mach_absolute_time(); + timewrite = TRUE; + } +#if DEVELOPMENT || DEBUG + if (__improbable(timewrite && simulate_stretched_io)) { + sabs -= simulate_stretched_io; + } +#endif /* x86_64 DEVELOPMENT || DEBUG */ + + switch (size) { + case 1: + outb(ioport, (uint8_t)val); + break; + case 2: + outw(ioport, (uint16_t)val); + break; + case 4: + outl(ioport, (uint32_t)val); + break; + default: + panic("Invalid size %d for ml_port_io_write(0x%x)", size, (unsigned)ioport); + break; + } + + if (__improbable(timewrite == TRUE)) { + eabs = mach_absolute_time(); + +#if DEVELOPMENT || DEBUG + iotrace(IOTRACE_PORTIO_WRITE, 0, ioport, size, val, sabs, eabs - sabs); +#endif + + if (__improbable((eabs - sabs) > reportphywritedelayabs)) { + (void)ml_set_interrupts_enabled(istate); + + if (phywritepanic && (machine_timeout_suspended() == FALSE)) { + panic_io_port_read(); + panic("Write to IO port 0x%x took %llu ns, val: 0x%x" + " (start: %llu, end: %llu), ceiling: %llu", + ioport, (eabs - sabs), val, sabs, eabs, + reportphywritedelayabs); + } + + if (reportphywriteosbt) { + OSReportWithBacktrace("ml_port_io_write(0x%x, %d, 0x%llx) " + "took %lluus", + ioport, size, val, (eabs - sabs) / NSEC_PER_USEC); + } + +#if CONFIG_DTRACE + DTRACE_PHYSLAT4(portiowrite, uint64_t, (eabs - sabs), + uint16_t, ioport, uint32_t, size, uint64_t, val); +#endif /* CONFIG_DTRACE */ + } else if (__improbable(tracephywritedelayabs > 0 && (eabs - sabs) > tracephywritedelayabs)) { + KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PORTIO_WRITE), + (eabs - sabs), sabs, ioport, val); + + (void)ml_set_interrupts_enabled(istate); + } else { + (void)ml_set_interrupts_enabled(istate); + } + } } -void ml_phys_write_64(addr64_t paddr64, unsigned int data) +uint8_t +ml_port_io_read8(uint16_t ioport) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); + return ml_port_io_read(ioport, 1); } -void ml_phys_write_word(vm_offset_t paddr, unsigned int data) +uint16_t +ml_port_io_read16(uint16_t ioport) { - ml_phys_write_data((pmap_paddr_t)paddr, data, 4); + return ml_port_io_read(ioport, 2); } -void ml_phys_write_word_64(addr64_t paddr64, unsigned int data) +uint32_t +ml_port_io_read32(uint16_t ioport) { - ml_phys_write_data((pmap_paddr_t)paddr64, data, 4); + return ml_port_io_read(ioport, 4); } -void ml_phys_write_double(vm_offset_t paddr, unsigned long long data) +void +ml_port_io_write8(uint16_t ioport, uint8_t val) { - ml_phys_write_long_long((pmap_paddr_t)paddr, data); + ml_port_io_write(ioport, val, 1); } -void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) +void +ml_port_io_write16(uint16_t ioport, uint16_t val) { - ml_phys_write_long_long((pmap_paddr_t)paddr64, data); + ml_port_io_write(ioport, val, 2); } +void +ml_port_io_write32(uint16_t ioport, uint32_t val) +{ + ml_port_io_write(ioport, val, 4); +} /* PCI config cycle probing * @@ -511,12 +800,13 @@ void ml_phys_write_double_64(addr64_t paddr64, unsigned long long data) boolean_t ml_probe_read(vm_offset_t paddr, unsigned int *val) { - if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) - return FALSE; + if ((PAGE_SIZE - (paddr & PAGE_MASK)) < 4) { + return FALSE; + } - *val = ml_phys_read((pmap_paddr_t)paddr); + *val = ml_phys_read(paddr); - return TRUE; + return TRUE; } /* @@ -525,33 +815,37 @@ ml_probe_read(vm_offset_t paddr, unsigned int *val) * have a machine check here. So we have to be able to handle that. * We assume that machine checks are enabled both in MSR and HIDs */ -boolean_t +boolean_t ml_probe_read_64(addr64_t paddr64, unsigned int *val) { - if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) - return FALSE; + if ((PAGE_SIZE - (paddr64 & PAGE_MASK)) < 4) { + return FALSE; + } - *val = ml_phys_read_64((pmap_paddr_t)paddr64); - return TRUE; + *val = ml_phys_read_64(paddr64); + return TRUE; } #undef bcmp -int bcmp( - const void *pa, - const void *pb, - size_t len) +int +bcmp( + const void *pa, + const void *pb, + size_t len) { const char *a = (const char *)pa; const char *b = (const char *)pb; - if (len == 0) + if (len == 0) { return 0; + } - do - if (*a++ != *b++) + do{ + if (*a++ != *b++) { break; - while (--len); + } + } while (--len); return (int)len; } @@ -564,11 +858,12 @@ memcmp(const void *s1, const void *s2, size_t n) const unsigned char *p1 = s1, *p2 = s2; do { - if (*p1++ != *p2++) - return (*--p1 - *--p2); + if (*p1++ != *p2++) { + return *--p1 - *--p2; + } } while (--n != 0); } - return (0); + return 0; } #undef memmove @@ -592,43 +887,49 @@ strlen( { const char *ret = string; - while (*string++ != '\0') + while (*string++ != '\0') { continue; + } return string - 1 - ret; } -#if MACH_ASSERT +#if MACH_ASSERT /* * Machine-dependent routine to fill in an array with up to callstack_max * levels of return pc information. */ -void machine_callstack( - __unused uintptr_t *buf, - __unused vm_size_t callstack_max) +void +machine_callstack( + __unused uintptr_t *buf, + __unused vm_size_t callstack_max) { } -#endif /* MACH_ASSERT */ +#endif /* MACH_ASSERT */ -void fillPage(ppnum_t pa, unsigned int fill) +void +fillPage(ppnum_t pa, unsigned int fill) { - pmap_paddr_t src; + uint64_t src; int i; int cnt = PAGE_SIZE / sizeof(unsigned int); unsigned int *addr; src = i386_ptob(pa); - for (i = 0, addr = (unsigned int *)PHYSMAP_PTOV(src); i < cnt; i++) + for (i = 0, addr = (unsigned int *)PHYSMAP_PTOV(src); i < cnt; i++) { *addr++ = fill; + } } -static inline void __clflush(void *ptr) +static inline void +__clflush(void *ptr) { - __asm__ volatile("clflush (%0)" : : "r" (ptr)); + __asm__ volatile ("clflush (%0)" : : "r" (ptr)); } -void dcache_incoherent_io_store64(addr64_t pa, unsigned int count) +void +dcache_incoherent_io_store64(addr64_t pa, unsigned int count) { addr64_t linesize = cpuid_info()->cache_linesize; addr64_t bound = (pa + count + linesize - 1) & ~(linesize - 1); @@ -643,9 +944,10 @@ void dcache_incoherent_io_store64(addr64_t pa, unsigned int count) mfence(); } -void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count) +void +dcache_incoherent_io_flush64(addr64_t pa, unsigned int count) { - return(dcache_incoherent_io_store64(pa,count)); + return dcache_incoherent_io_store64(pa, count); } void @@ -653,10 +955,9 @@ flush_dcache64(addr64_t addr, unsigned count, int phys) { if (phys) { dcache_incoherent_io_flush64(addr, count); - } - else { + } else { uint64_t linesize = cpuid_info()->cache_linesize; - addr64_t bound = (addr + count + linesize -1) & ~(linesize - 1); + addr64_t bound = (addr + count + linesize - 1) & ~(linesize - 1); mfence(); while (addr < bound) { __clflush((void *) (uintptr_t) addr); @@ -668,8 +969,8 @@ flush_dcache64(addr64_t addr, unsigned count, int phys) void invalidate_icache64(__unused addr64_t addr, - __unused unsigned count, - __unused int phys) + __unused unsigned count, + __unused int phys) { } @@ -679,37 +980,38 @@ addr64_t vm_last_addr; void mapping_set_mod(ppnum_t pn) { - pmap_set_modify(pn); + pmap_set_modify(pn); } void mapping_set_ref(ppnum_t pn) { - pmap_set_reference(pn); + pmap_set_reference(pn); } -extern i386_cpu_info_t cpuid_cpu_info; +extern i386_cpu_info_t cpuid_cpu_info; void cache_flush_page_phys(ppnum_t pa) { - boolean_t istate; - unsigned char *cacheline_addr; - i386_cpu_info_t *cpuid_infop = cpuid_info(); - int cacheline_size; - int cachelines_to_flush; + boolean_t istate; + unsigned char *cacheline_addr; + i386_cpu_info_t *cpuid_infop = cpuid_info(); + int cacheline_size; + int cachelines_to_flush; cacheline_size = cpuid_infop->cache_linesize; - if (cacheline_size == 0) + if (cacheline_size == 0) { panic("cacheline_size=0 cpuid_infop=%p\n", cpuid_infop); - cachelines_to_flush = PAGE_SIZE/cacheline_size; + } + cachelines_to_flush = PAGE_SIZE / cacheline_size; mfence(); istate = ml_set_interrupts_enabled(FALSE); for (cacheline_addr = (unsigned char *)PHYSMAP_PTOV(i386_ptob(pa)); - cachelines_to_flush > 0; - cachelines_to_flush--, cacheline_addr += cacheline_size) { + cachelines_to_flush > 0; + cachelines_to_flush--, cacheline_addr += cacheline_size) { __clflush((void *) cacheline_addr); } @@ -728,12 +1030,14 @@ kdp_register_callout(kdp_callout_fn_t fn, void *arg) #endif #if !CONFIG_VMX -int host_vmxon(boolean_t exclusive __unused) +int +host_vmxon(boolean_t exclusive __unused) { return VMX_UNSUPPORTED; } -void host_vmxoff(void) +void +host_vmxoff(void) { return; } diff --git a/osfmk/x86_64/lowglobals.h b/osfmk/x86_64/lowglobals.h index 4ed3792b8..b19589fbc 100644 --- a/osfmk/x86_64/lowglobals.h +++ b/osfmk/x86_64/lowglobals.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,15 +22,15 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* - * Header files for the Low Memory Globals (lg) + * Header files for the Low Memory Globals (lg) */ -#ifndef _LOW_MEMORY_GLOBALS_H_ -#define _LOW_MEMORY_GLOBALS_H_ +#ifndef _LOW_MEMORY_GLOBALS_H_ +#define _LOW_MEMORY_GLOBALS_H_ #include #include @@ -38,35 +38,35 @@ #include #ifndef __x86_64__ -#error Wrong architecture - this file is meant for x86_64 +#error Wrong architecture - this file is meant for x86_64 #endif /* * Don't change these structures unless you change the corresponding assembly code * which is in lowmem_vectors.s */ - -#pragma pack(8) /* Make sure the structure stays as we defined it */ -typedef struct lowglo { - unsigned char lgVerCode[8]; /* 0xffffff8000002000 System verification code */ - uint64_t lgZero; /* 0xffffff8000002008 Double constant 0 */ - uint64_t lgStext; /* 0xffffff8000002010 Start of kernel text */ - uint64_t lgRsv018; /* 0xffffff8000002018 Reserved */ - uint64_t lgRsv020; /* 0xffffff8000002020 Reserved */ - uint64_t lgRsv028; /* 0xffffff8000002028 Reserved */ - uint64_t lgVersion; /* 0xffffff8000002030 Pointer to kernel version string */ - uint64_t lgRsv038[280]; /* 0xffffff8000002038 Reserved */ - uint64_t lgKmodptr; /* 0xffffff80000028f8 Pointer to kmod, debugging aid */ - uint64_t lgTransOff; /* 0xffffff8000002900 Pointer to kdp_trans_off, debugging aid */ - uint64_t lgReadIO; /* 0xffffff8000002908 Pointer to kdp_read_io, debugging aid */ - uint64_t lgDevSlot1; /* 0xffffff8000002910 For developer use */ - uint64_t lgDevSlot2; /* 0xffffff8000002918 For developer use */ - uint64_t lgOSVersion; /* 0xffffff8000002920 Pointer to OS version string */ - uint64_t lgRebootFlag; /* 0xffffff8000002928 Pointer to debugger reboot trigger */ - uint64_t lgManualPktAddr; /* 0xffffff8000002930 Pointer to manual packet structure */ +#pragma pack(8) /* Make sure the structure stays as we defined it */ +typedef struct lowglo { + unsigned char lgVerCode[8]; /* 0xffffff8000002000 System verification code */ + uint64_t lgZero; /* 0xffffff8000002008 Double constant 0 */ + uint64_t lgStext; /* 0xffffff8000002010 Start of kernel text */ + uint64_t lgRsv018; /* 0xffffff8000002018 Reserved */ + uint64_t lgRsv020; /* 0xffffff8000002020 Reserved */ + uint64_t lgRsv028; /* 0xffffff8000002028 Reserved */ + uint64_t lgVersion; /* 0xffffff8000002030 Pointer to kernel version string */ + uint64_t lgRsv038[280]; /* 0xffffff8000002038 Reserved */ + uint64_t lgKmodptr; /* 0xffffff80000028f8 Pointer to kmod, debugging aid */ + uint64_t lgTransOff; /* 0xffffff8000002900 Pointer to kdp_trans_off, debugging aid */ + uint64_t lgReadIO; /* 0xffffff8000002908 Pointer to kdp_read_io, debugging aid */ + uint64_t lgDevSlot1; /* 0xffffff8000002910 For developer use */ + uint64_t lgDevSlot2; /* 0xffffff8000002918 For developer use */ + uint64_t lgOSVersion; /* 0xffffff8000002920 Pointer to OS version string */ + uint64_t lgRebootFlag; /* 0xffffff8000002928 Pointer to debugger reboot trigger */ + uint64_t lgManualPktAddr; /* 0xffffff8000002930 Pointer to manual packet structure */ + uint64_t lgKdpJtagCoredumpAddr; /* 0xffffff8000002938 Pointer to kdp_jtag_coredump_t structure */ - uint64_t lgRsv938[217]; /* 0xffffff8000002938 Reserved - push to 1 page */ + uint64_t lgRsv940[216]; /* 0xffffff8000002940 Reserved - push to 1 page */ } lowglo; #pragma pack() extern lowglo lowGlo; diff --git a/osfmk/x86_64/lowmem_vectors.c b/osfmk/x86_64/lowmem_vectors.c index 09bd405e4..0ba690dcb 100644 --- a/osfmk/x86_64/lowmem_vectors.c +++ b/osfmk/x86_64/lowmem_vectors.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,35 +22,35 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ */ -/* +/* * Mach Operating System * Copyright (c) 1991,1990 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -59,45 +59,46 @@ #include #include -/* +/* * on x86_64 the low mem vectors live here and get mapped to 0xffffff8000002000 at * system startup time */ -extern void *version; -extern void *kmod; -extern void *kdp_trans_off; -extern void *kdp_read_io; -extern void *osversion; -extern void *flag_kdp_trigger_reboot; -extern void *manual_pkt; +extern void *version; +extern void *kmod; +extern void *kdp_trans_off; +extern void *kdp_read_io; +extern void *osversion; +extern void *flag_kdp_trigger_reboot; +extern void *manual_pkt; +extern void *kdp_jtag_coredump; lowglo lowGlo __attribute__ ((aligned(PAGE_SIZE))) = { + .lgVerCode = { 'C', 'a', 't', 'f', 'i', 's', 'h', ' ' }, - .lgVerCode = { 'C','a','t','f','i','s','h',' ' }, - - .lgVersion = (uint64_t) &version, + .lgVersion = (uint64_t) &version, - .lgKmodptr = (uint64_t) &kmod, + .lgKmodptr = (uint64_t) &kmod, #if MACH_KDP - .lgTransOff = (uint64_t) &kdp_trans_off, - .lgReadIO = (uint64_t) &kdp_read_io, + .lgTransOff = (uint64_t) &kdp_trans_off, + .lgReadIO = (uint64_t) &kdp_read_io, #else - .lgTransOff = 0, - .lgReadIO = 0, + .lgTransOff = 0, + .lgReadIO = 0, #endif - .lgDevSlot1 = 0, - .lgDevSlot2 = 0, + .lgDevSlot1 = 0, + .lgDevSlot2 = 0, - .lgOSVersion = (uint64_t) &osversion, + .lgOSVersion = (uint64_t) &osversion, #if MACH_KDP - .lgRebootFlag = (uint64_t) &flag_kdp_trigger_reboot, - .lgManualPktAddr = (uint64_t) &manual_pkt, + .lgRebootFlag = (uint64_t) &flag_kdp_trigger_reboot, + .lgManualPktAddr = (uint64_t) &manual_pkt, #else - .lgRebootFlag = 0, - .lgManualPktAddr = 0, -#endif + .lgRebootFlag = 0, + .lgManualPktAddr = 0, +#endif + .lgKdpJtagCoredumpAddr = (uint64_t) &kdp_jtag_coredump }; diff --git a/osfmk/x86_64/machine_kpc.h b/osfmk/x86_64/machine_kpc.h index 954ac8def..16ca39df2 100644 --- a/osfmk/x86_64/machine_kpc.h +++ b/osfmk/x86_64/machine_kpc.h @@ -2,7 +2,7 @@ * Copyright (c) 2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _MACHINE_X86_64_KPC_H diff --git a/osfmk/x86_64/machine_remote_time.c b/osfmk/x86_64/machine_remote_time.c new file mode 100644 index 000000000..3c834c041 --- /dev/null +++ b/osfmk/x86_64/machine_remote_time.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#include +#include +#include +#include +#include + +void mach_bridge_send_timestamp(uint64_t timestamp); + +extern _Atomic uint32_t bt_init_flag; +extern lck_spin_t *bt_maintenance_lock; +extern void mach_bridge_timer_init(void); +extern uint32_t bt_enable_flag; + +/* + * Delay sending timestamps by certain interval to + * avoid overwriting sentinel values + */ +#define DELAY_INTERVAL_NS (50 * NSEC_PER_MSEC) +static uint64_t bt_delay_timestamp = 0; +static mach_bridge_regwrite_timestamp_func_t bridge_regwrite_timestamp_callback = NULL; + +/* + * This function should only be called by the kext + * responsible for sending timestamps across the link + */ +void +mach_bridge_register_regwrite_timestamp_callback(mach_bridge_regwrite_timestamp_func_t func) +{ + static uint64_t delay_amount = 0; + + if (!atomic_load(&bt_init_flag)) { + mach_bridge_timer_init(); + nanoseconds_to_absolutetime(DELAY_INTERVAL_NS, &delay_amount); + bt_init_flag = 1; + } + + lck_spin_lock(bt_maintenance_lock); + bridge_regwrite_timestamp_callback = func; + bt_enable_flag = (func != NULL) ? 1 : 0; + bt_delay_timestamp = mach_absolute_time() + delay_amount; + lck_spin_unlock(bt_maintenance_lock); +} + +void +mach_bridge_send_timestamp(uint64_t timestamp) +{ + LCK_SPIN_ASSERT(bt_maintenance_lock, LCK_ASSERT_OWNED); + + if (bt_delay_timestamp > 0) { + uint64_t now = mach_absolute_time(); + if (now < bt_delay_timestamp) { + return; + } + bt_delay_timestamp = 0; + } + + if (bridge_regwrite_timestamp_callback) { + bridge_regwrite_timestamp_callback(timestamp); + } +} diff --git a/osfmk/x86_64/machine_remote_time.h b/osfmk/x86_64/machine_remote_time.h new file mode 100644 index 000000000..818aa0d96 --- /dev/null +++ b/osfmk/x86_64/machine_remote_time.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +#ifndef MACHINE_X86_64_REMOTE_TIME_H +#define MACHINE_X86_64_REMOTE_TIME_H + +#include +#include + +__BEGIN_DECLS +typedef void (*mach_bridge_regwrite_timestamp_func_t)(uint64_t); +void mach_bridge_register_regwrite_timestamp_callback(mach_bridge_regwrite_timestamp_func_t func); +__END_DECLS + +#endif /* MACHINE_X86_64_REMOTE_TIME_H */ diff --git a/osfmk/x86_64/monotonic_x86_64.c b/osfmk/x86_64/monotonic_x86_64.c index 720560148..9a69f0805 100644 --- a/osfmk/x86_64/monotonic_x86_64.c +++ b/osfmk/x86_64/monotonic_x86_64.c @@ -33,9 +33,10 @@ #include #include /* static_assert, assert */ #include -#include +#include #include #include +#include /* * Sanity check the compiler. @@ -230,11 +231,11 @@ mt_pmi_x86_64(x86_saved_state_t *state) x86_saved_state64_t *state64 = saved_state64(state); bool user_mode = (state64->isf.cs & 0x3) ? true : false; KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1), - mt_microstackshot_ctr, user_mode); + mt_microstackshot_ctr, user_mode); mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx); } else if (mt_debug) { KDBG(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2), - mt_microstackshot_ctr, i); + mt_microstackshot_ctr, i); } mtc->mtc_snaps[i] = mt_core_reset_values[i]; @@ -274,9 +275,15 @@ mt_microstackshot_start_arch(uint64_t period) return ENOTSUP; } + uint64_t reset_value = 0; + int ovf = os_sub_overflow(CTR_MAX, period, &reset_value); + if (ovf) { + return ERANGE; + } + mt_core_reset_values[mt_microstackshot_ctr] = CTR_MAX - period; mp_cpus_call(CPUMASK_ALL, ASYNC, mt_microstackshot_start_remote, - NULL); + NULL); return 0; } @@ -306,5 +313,5 @@ struct mt_device mt_devices[] = { }; static_assert( - (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS, - "MT_NDEVS macro should be same as the length of mt_devices"); + (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS, + "MT_NDEVS macro should be same as the length of mt_devices"); diff --git a/osfmk/x86_64/pmap.c b/osfmk/x86_64/pmap.c index 8be1ce0de..50557b010 100644 --- a/osfmk/x86_64/pmap.c +++ b/osfmk/x86_64/pmap.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -32,24 +32,24 @@ * Mach Operating System * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University * All Rights Reserved. - * + * * Permission to use, copy, modify and distribute this software and its * documentation is hereby granted, provided that both the copyright * notice and this permission notice appear in all copies of the * software, derivative works or modified versions, and any portions * thereof, and that both notices appear in supporting documentation. - * + * * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. - * + * * Carnegie Mellon requests users of this software to return to - * + * * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU * School of Computer Science * Carnegie Mellon University * Pittsburgh PA 15213-3890 - * + * * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ @@ -116,7 +116,7 @@ #include #include -#include /* prototyping */ +#include /* prototyping */ #include #include #include @@ -151,14 +151,14 @@ int pmap_stats_assert = 1; #endif /* MACH_ASSERT */ #ifdef IWANTTODEBUG -#undef DEBUG +#undef DEBUG #define DEBUG 1 #define POSTCODE_DELAY 1 #include #endif /* IWANTTODEBUG */ -#ifdef PMAP_DEBUG -#define DBG(x...) kprintf("DBG: " x) +#ifdef PMAP_DEBUG +#define DBG(x...) kprintf("DBG: " x) #else #define DBG(x...) #endif @@ -168,42 +168,44 @@ int pmap_stats_assert = 1; char pmap_cpu_data_assert[(((offsetof(cpu_data_t, cpu_tlb_invalid) - offsetof(cpu_data_t, cpu_active_cr3)) == 8) && (offsetof(cpu_data_t, cpu_active_cr3) % 64 == 0)) ? 1 : -1]; boolean_t pmap_trace = FALSE; -boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */ +boolean_t no_shared_cr3 = DEBUG; /* TRUE for DEBUG by default */ -int nx_enabled = 1; /* enable no-execute protection -- set during boot */ +#if DEVELOPMENT || DEBUG +int nx_enabled = 1; /* enable no-execute protection -- set during boot */ +#else +const int nx_enabled = 1; +#endif #if DEBUG || DEVELOPMENT -int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */ -int allow_stack_exec = 0; /* No apps may execute from the stack by default */ +int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */ +int allow_stack_exec = 0; /* No apps may execute from the stack by default */ #else /* DEBUG || DEVELOPMENT */ -const int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */ -const int allow_stack_exec = 0; /* No apps may execute from the stack by default */ +const int allow_data_exec = VM_ABI_32; /* 32-bit apps may execute data by default, 64-bit apps may not */ +const int allow_stack_exec = 0; /* No apps may execute from the stack by default */ #endif /* DEBUG || DEVELOPMENT */ -const boolean_t cpu_64bit = TRUE; /* Mais oui! */ - uint64_t max_preemption_latency_tsc = 0; pv_hashed_entry_t *pv_hash_table; /* hash lists */ uint32_t npvhashmask = 0, npvhashbuckets = 0; -pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL; -pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL; -decl_simple_lock_data(,pv_hashed_free_list_lock) -decl_simple_lock_data(,pv_hashed_kern_free_list_lock) -decl_simple_lock_data(,pv_hash_table_lock) +pv_hashed_entry_t pv_hashed_free_list = PV_HASHED_ENTRY_NULL; +pv_hashed_entry_t pv_hashed_kern_free_list = PV_HASHED_ENTRY_NULL; +decl_simple_lock_data(, pv_hashed_free_list_lock) +decl_simple_lock_data(, pv_hashed_kern_free_list_lock) +decl_simple_lock_data(, pv_hash_table_lock) -decl_simple_lock_data(,phys_backup_lock) +decl_simple_lock_data(, phys_backup_lock) -zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ +zone_t pv_hashed_list_zone; /* zone of pv_hashed_entry structures */ /* * First and last physical addresses that we maintain any information * for. Initialized to zero so that pmap operations done before * pmap_init won't touch any non-existent structures. */ -boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ +boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ static struct vm_object kptobj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); static struct vm_object kpml4obj_object_store __attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT))); @@ -213,15 +215,8 @@ static struct vm_object kpdptobj_object_store __attribute__((aligned(VM_PACKED_P * Array of physical page attribites for managed pages. * One byte per physical page. */ -char *pmap_phys_attributes; -ppnum_t last_managed_page = 0; - -/* - * Amount of virtual memory mapped by one - * page-directory entry. - */ - -uint64_t pde_mapped_size = PDE_MAPPED_SIZE; +char *pmap_phys_attributes; +ppnum_t last_managed_page = 0; unsigned pmap_memory_region_count; unsigned pmap_memory_region_current; @@ -231,44 +226,44 @@ pmap_memory_region_t pmap_memory_regions[PMAP_MEMORY_REGIONS_SIZE]; /* * Other useful macros. */ -#define current_pmap() (vm_map_pmap(current_thread()->map)) - -struct pmap kernel_pmap_store; -pmap_t kernel_pmap; +#define current_pmap() (vm_map_pmap(current_thread()->map)) -struct zone *pmap_zone; /* zone of pmap structures */ +struct pmap kernel_pmap_store; +pmap_t kernel_pmap; -struct zone *pmap_anchor_zone; -struct zone *pmap_uanchor_zone; -int pmap_debug = 0; /* flag for debugging prints */ +struct zone *pmap_zone; /* zone of pmap structures */ -unsigned int inuse_ptepages_count = 0; -long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */ -unsigned int bootstrap_wired_pages = 0; -int pt_fake_zone_index = -1; +struct zone *pmap_anchor_zone; +struct zone *pmap_uanchor_zone; +int pmap_debug = 0; /* flag for debugging prints */ -extern long NMIPI_acks; +unsigned int inuse_ptepages_count = 0; +long long alloc_ptepages_count __attribute__((aligned(8))) = 0; /* aligned for atomic access */ +unsigned int bootstrap_wired_pages = 0; +int pt_fake_zone_index = -1; -boolean_t kernel_text_ps_4K = TRUE; -boolean_t wpkernel = TRUE; +extern long NMIPI_acks; -extern char end; +boolean_t kernel_text_ps_4K = TRUE; -static int nkpt; +extern char end; -pt_entry_t *DMAP1, *DMAP2; -caddr_t DADDR1; -caddr_t DADDR2; +static int nkpt; -boolean_t pmap_disable_kheap_nx = FALSE; -boolean_t pmap_disable_kstack_nx = FALSE; +#if DEVELOPMENT || DEBUG +boolean_t pmap_disable_kheap_nx = FALSE; +boolean_t pmap_disable_kstack_nx = FALSE; +boolean_t wpkernel = TRUE; +#else +const boolean_t wpkernel = TRUE; +#endif extern long __stack_chk_guard[]; static uint64_t pmap_eptp_flags = 0; boolean_t pmap_ept_support_ad = FALSE; - +static void process_pmap_updates(pmap_t, bool, addr64_t, addr64_t); /* * Map memory at initialization. The physical addresses being * mapped are not managed and are never unmapped. @@ -278,44 +273,44 @@ boolean_t pmap_ept_support_ad = FALSE; */ vm_offset_t pmap_map( - vm_offset_t virt, - vm_map_offset_t start_addr, - vm_map_offset_t end_addr, - vm_prot_t prot, - unsigned int flags) + vm_offset_t virt, + vm_map_offset_t start_addr, + vm_map_offset_t end_addr, + vm_prot_t prot, + unsigned int flags) { - kern_return_t kr; - int ps; + kern_return_t kr; + int ps; ps = PAGE_SIZE; while (start_addr < end_addr) { kr = pmap_enter(kernel_pmap, (vm_map_offset_t)virt, - (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE); + (ppnum_t) i386_btop(start_addr), prot, VM_PROT_NONE, flags, TRUE); if (kr != KERN_SUCCESS) { panic("%s: failed pmap_enter, " - "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x", - __FUNCTION__, - (void *)virt, (void *)start_addr, (void *)end_addr, prot, flags); + "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x", + __FUNCTION__, + (void *)virt, (void *)start_addr, (void *)end_addr, prot, flags); } virt += ps; start_addr += ps; } - return(virt); + return virt; } -extern char *first_avail; -extern vm_offset_t virtual_avail, virtual_end; -extern pmap_paddr_t avail_start, avail_end; -extern vm_offset_t sHIB; -extern vm_offset_t eHIB; -extern vm_offset_t stext; -extern vm_offset_t etext; -extern vm_offset_t sdata, edata; -extern vm_offset_t sconst, econst; +extern char *first_avail; +extern vm_offset_t virtual_avail, virtual_end; +extern pmap_paddr_t avail_start, avail_end; +extern vm_offset_t sHIB; +extern vm_offset_t eHIB; +extern vm_offset_t stext; +extern vm_offset_t etext; +extern vm_offset_t sdata, edata; +extern vm_offset_t sconst, econst; -extern void *KPTphys; +extern void *KPTphys; boolean_t pmap_smep_enabled = FALSE; boolean_t pmap_smap_enabled = FALSE; @@ -323,7 +318,7 @@ boolean_t pmap_smap_enabled = FALSE; void pmap_cpu_init(void) { - cpu_data_t *cdp = current_cpu_datap(); + cpu_data_t *cdp = current_cpu_datap(); set_cr4(get_cr4() | CR4_PGE); @@ -333,13 +328,13 @@ pmap_cpu_init(void) cdp->cpu_kernel_cr3 = kernel_pmap->pm_cr3; cpu_shadowp(cdp->cpu_number)->cpu_kernel_cr3 = cdp->cpu_kernel_cr3; cdp->cpu_active_cr3 = kernel_pmap->pm_cr3; - cdp->cpu_tlb_invalid = FALSE; + cdp->cpu_tlb_invalid = 0; cdp->cpu_task_map = TASK_MAP_64BIT; pmap_pcid_configure(); if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMEP) { pmap_smep_enabled = TRUE; -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG boolean_t nsmep; if (PE_parse_boot_argn("-pmap_smep_disable", &nsmep, sizeof(nsmep))) { pmap_smep_enabled = FALSE; @@ -348,7 +343,6 @@ pmap_cpu_init(void) if (pmap_smep_enabled) { set_cr4(get_cr4() | CR4_SMEP); } - } if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_SMAP) { pmap_smap_enabled = TRUE; @@ -371,19 +365,25 @@ pmap_cpu_init(void) #endif /* !MONOTONIC */ } -static uint32_t pmap_scale_shift(void) { +static uint32_t +pmap_scale_shift(void) +{ uint32_t scale = 0; - if (sane_size <= 8*GB) { + if (sane_size <= 8 * GB) { scale = (uint32_t)(sane_size / (2 * GB)); - } else if (sane_size <= 32*GB) { - scale = 4 + (uint32_t)((sane_size - (8 * GB))/ (4 * GB)); + } else if (sane_size <= 32 * GB) { + scale = 4 + (uint32_t)((sane_size - (8 * GB)) / (4 * GB)); } else { - scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB))/ (8 * GB))); + scale = 10 + (uint32_t)MIN(4, ((sane_size - (32 * GB)) / (8 * GB))); } return scale; } +lck_grp_t pmap_lck_grp; +lck_grp_attr_t pmap_lck_grp_attr; +lck_attr_t pmap_lck_rw_attr; + /* * Bootstrap the system enough to run with virtual memory. * Map the kernel's code and data, and allocate the system page table. @@ -392,17 +392,17 @@ static uint32_t pmap_scale_shift(void) { void pmap_bootstrap( - __unused vm_offset_t load_start, - __unused boolean_t IA32e) + __unused vm_offset_t load_start, + __unused boolean_t IA32e) { #if NCOPY_WINDOWS > 0 - vm_offset_t va; + vm_offset_t va; int i; #endif assert(IA32e); - vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address - * known to VM */ + vm_last_addr = VM_MAX_KERNEL_ADDRESS; /* Set the highest address + * known to VM */ /* * The kernel's pmap is statically allocated so we don't * have to use pmap_create, which is unlikely to work @@ -411,7 +411,9 @@ pmap_bootstrap( kernel_pmap = &kernel_pmap_store; kernel_pmap->ref_count = 1; +#if DEVELOPMENT || DEBUG kernel_pmap->nx_enabled = TRUE; +#endif kernel_pmap->pm_task_map = TASK_MAP_64BIT; kernel_pmap->pm_obj = (vm_object_t) NULL; kernel_pmap->pm_pml4 = IdlePML4; @@ -425,8 +427,8 @@ pmap_bootstrap( current_cpu_datap()->cpu_kernel_cr3 = cpu_shadowp(cpu_number())->cpu_kernel_cr3 = (addr64_t) kernel_pmap->pm_cr3; nkpt = NKPT; - OSAddAtomic(NKPT, &inuse_ptepages_count); - OSAddAtomic64(NKPT, &alloc_ptepages_count); + OSAddAtomic(NKPT, &inuse_ptepages_count); + OSAddAtomic64(NKPT, &alloc_ptepages_count); bootstrap_wired_pages = NKPT; virtual_avail = (vm_offset_t)(VM_MIN_KERNEL_ADDRESS) + (vm_offset_t)first_avail; @@ -437,39 +439,35 @@ pmap_bootstrap( * Reserve some special page table entries/VA space for temporary * mapping of pages. */ -#define SYSMAP(c, p, v, n) \ +#define SYSMAP(c, p, v, n) \ v = (c)va; va += ((n)*INTEL_PGBYTES); va = virtual_avail; - for (i=0; icpu_pmap); - kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow); - kprintf("two stuff %p %p\n", - (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), - (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR)); + kprintf("trying to do SYSMAP idx %d %p\n", i, + current_cpu_datap()); + kprintf("cpu_pmap %p\n", current_cpu_datap()->cpu_pmap); + kprintf("mapwindow %p\n", current_cpu_datap()->cpu_pmap->mapwindow); + kprintf("two stuff %p %p\n", + (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), + (void *)(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR)); #endif - SYSMAP(caddr_t, - (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), - (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR), - 1); - current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = - &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store); - *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; - } - - /* DMAP user for debugger */ - SYSMAP(caddr_t, DMAP1, DADDR1, 1); - SYSMAP(caddr_t, DMAP2, DADDR2, 1); /* XXX temporary - can remove */ + SYSMAP(caddr_t, + (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP), + (current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CADDR), + 1); + current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = + &(current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP_store); + *current_cpu_datap()->cpu_pmap->mapwindow[i].prv_CMAP = 0; + } + virtual_avail = va; #endif - if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof (npvhashmask))) { + if (!PE_parse_boot_argn("npvhash", &npvhashmask, sizeof(npvhashmask))) { npvhashmask = ((NPVHASHBUCKETS) << pmap_scale_shift()) - 1; - } npvhashbuckets = npvhashmask + 1; @@ -479,27 +477,38 @@ pmap_bootstrap( "using default %d\n", npvhashmask, NPVHASHMASK); } - simple_lock_init(&kernel_pmap->lock, 0); + lck_grp_attr_setdefault(&pmap_lck_grp_attr); + lck_grp_init(&pmap_lck_grp, "pmap", &pmap_lck_grp_attr); + + lck_attr_setdefault(&pmap_lck_rw_attr); + lck_attr_cleardebug(&pmap_lck_rw_attr); + + lck_rw_init(&kernel_pmap->pmap_rwl, &pmap_lck_grp, &pmap_lck_rw_attr); + kernel_pmap->pmap_rwl.lck_rw_can_sleep = FALSE; + simple_lock_init(&pv_hashed_free_list_lock, 0); simple_lock_init(&pv_hashed_kern_free_list_lock, 0); - simple_lock_init(&pv_hash_table_lock,0); + simple_lock_init(&pv_hash_table_lock, 0); simple_lock_init(&phys_backup_lock, 0); pmap_cpu_init(); - if (pmap_pcid_ncpus) + if (pmap_pcid_ncpus) { printf("PMAP: PCID enabled\n"); + } - if (pmap_smep_enabled) + if (pmap_smep_enabled) { printf("PMAP: Supervisor Mode Execute Protection enabled\n"); - if (pmap_smap_enabled) + } + if (pmap_smap_enabled) { printf("PMAP: Supervisor Mode Access Protection enabled\n"); + } -#if DEBUG +#if DEBUG printf("Stack canary: 0x%lx\n", __stack_chk_guard[0]); printf("early_random(): 0x%qx\n", early_random()); #endif -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG boolean_t ptmp; /* Check if the user has requested disabling stack or heap no-execute * enforcement. These are "const" variables; that qualifier is cast away @@ -524,9 +533,9 @@ pmap_bootstrap( virtual_end = VM_MAX_KERNEL_ADDRESS_EFI32; } kprintf("Kernel virtual space from 0x%lx to 0x%lx.\n", - (long)KERNEL_BASE, (long)virtual_end); + (long)KERNEL_BASE, (long)virtual_end); kprintf("Available physical space from 0x%llx to 0x%llx\n", - avail_start, avail_end); + avail_start, avail_end); /* * The -no_shared_cr3 boot-arg is a debugging feature (set by default @@ -536,20 +545,22 @@ pmap_bootstrap( * a panic. Only copyin and copyout are exempt from this. */ (void) PE_parse_boot_argn("-no_shared_cr3", - &no_shared_cr3, sizeof (no_shared_cr3)); - if (no_shared_cr3) + &no_shared_cr3, sizeof(no_shared_cr3)); + if (no_shared_cr3) { kprintf("Kernel not sharing user map\n"); + } -#ifdef PMAP_TRACES - if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof (pmap_trace))) { +#ifdef PMAP_TRACES + if (PE_parse_boot_argn("-pmap_trace", &pmap_trace, sizeof(pmap_trace))) { kprintf("Kernel traces for pmap operations enabled\n"); } -#endif /* PMAP_TRACES */ +#endif /* PMAP_TRACES */ #if MACH_ASSERT + PE_parse_boot_argn("pmap_asserts", &pmap_asserts_enabled, sizeof(pmap_asserts_enabled)); PE_parse_boot_argn("pmap_stats_assert", - &pmap_stats_assert, - sizeof (pmap_stats_assert)); + &pmap_stats_assert, + sizeof(pmap_stats_assert)); #endif /* MACH_ASSERT */ } @@ -569,36 +580,36 @@ pmap_virtual_space( #include -int32_t pmap_npages; -int32_t pmap_teardown_last_valid_compact_indx = -1; +int32_t pmap_npages; +int32_t pmap_teardown_last_valid_compact_indx = -1; -void hibernate_rebuild_pmap_structs(void); -void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); -void pmap_pack_index(uint32_t); -int32_t pmap_unpack_index(pv_rooted_entry_t); +void hibernate_rebuild_pmap_structs(void); +void hibernate_teardown_pmap_structs(addr64_t *, addr64_t *); +void pmap_pack_index(uint32_t); +int32_t pmap_unpack_index(pv_rooted_entry_t); int32_t pmap_unpack_index(pv_rooted_entry_t pv_h) { - int32_t indx = 0; + int32_t indx = 0; indx = (int32_t)(*((uint64_t *)(&pv_h->qlink.next)) >> 48); indx = indx << 16; indx |= (int32_t)(*((uint64_t *)(&pv_h->qlink.prev)) >> 48); - + *((uint64_t *)(&pv_h->qlink.next)) |= ((uint64_t)0xffff << 48); *((uint64_t *)(&pv_h->qlink.prev)) |= ((uint64_t)0xffff << 48); - return (indx); + return indx; } void pmap_pack_index(uint32_t indx) { - pv_rooted_entry_t pv_h; + pv_rooted_entry_t pv_h; pv_h = &pv_head_table[indx]; @@ -613,36 +624,37 @@ pmap_pack_index(uint32_t indx) void hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end) { - int32_t i; - int32_t compact_target_indx; + int32_t i; + int32_t compact_target_indx; compact_target_indx = 0; for (i = 0; i < pmap_npages; i++) { if (pv_head_table[i].pmap == PMAP_NULL) { - - if (pv_head_table[compact_target_indx].pmap != PMAP_NULL) + if (pv_head_table[compact_target_indx].pmap != PMAP_NULL) { compact_target_indx = i; + } } else { pmap_pack_index((uint32_t)i); if (pv_head_table[compact_target_indx].pmap == PMAP_NULL) { /* - * we've got a hole to fill, so - * move this pv_rooted_entry_t to it's new home - */ + * we've got a hole to fill, so + * move this pv_rooted_entry_t to it's new home + */ pv_head_table[compact_target_indx] = pv_head_table[i]; pv_head_table[i].pmap = PMAP_NULL; - + pmap_teardown_last_valid_compact_indx = compact_target_indx; compact_target_indx++; - } else + } else { pmap_teardown_last_valid_compact_indx = i; + } } } - *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx+1]; - *unneeded_end = (addr64_t)&pv_head_table[pmap_npages-1]; - + *unneeded_start = (addr64_t)&pv_head_table[pmap_teardown_last_valid_compact_indx + 1]; + *unneeded_end = (addr64_t)&pv_head_table[pmap_npages - 1]; + HIBLOG("hibernate_teardown_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); } @@ -650,13 +662,12 @@ hibernate_teardown_pmap_structs(addr64_t *unneeded_start, addr64_t *unneeded_end void hibernate_rebuild_pmap_structs(void) { - int32_t cindx, eindx, rindx = 0; - pv_rooted_entry_t pv_h; + int32_t cindx, eindx, rindx = 0; + pv_rooted_entry_t pv_h; eindx = (int32_t)pmap_npages; for (cindx = pmap_teardown_last_valid_compact_indx; cindx >= 0; cindx--) { - pv_h = &pv_head_table[cindx]; rindx = pmap_unpack_index(pv_h); @@ -669,18 +680,19 @@ hibernate_rebuild_pmap_structs(void) */ pv_head_table[rindx] = pv_head_table[cindx]; } - if (rindx+1 != eindx) { + if (rindx + 1 != eindx) { /* * the 'hole' between this vm_rooted_entry_t and the previous - * vm_rooted_entry_t we moved needs to be initialized as + * vm_rooted_entry_t we moved needs to be initialized as * a range of zero'd vm_rooted_entry_t's */ - bzero((char *)&pv_head_table[rindx+1], (eindx - rindx - 1) * sizeof (struct pv_rooted_entry)); + bzero((char *)&pv_head_table[rindx + 1], (eindx - rindx - 1) * sizeof(struct pv_rooted_entry)); } eindx = rindx; } - if (rindx) - bzero ((char *)&pv_head_table[0], rindx * sizeof (struct pv_rooted_entry)); + if (rindx) { + bzero((char *)&pv_head_table[0], rindx * sizeof(struct pv_rooted_entry)); + } HIBLOG("hibernate_rebuild_pmap_structs done: last_valid_compact_indx %d\n", pmap_teardown_last_valid_compact_indx); } @@ -695,10 +707,10 @@ hibernate_rebuild_pmap_structs(void) void pmap_init(void) { - long npages; - vm_offset_t addr; - vm_size_t s, vsize; - vm_map_offset_t vaddr; + long npages; + vm_offset_t addr; + vm_size_t s, vsize; + vm_map_offset_t vaddr; ppnum_t ppn; @@ -724,17 +736,18 @@ pmap_init(void) npages = i386_btop(avail_end); #if HIBERNATION pmap_npages = (uint32_t)npages; -#endif +#endif s = (vm_size_t) (sizeof(struct pv_rooted_entry) * npages - + (sizeof (struct pv_hashed_entry_t *) * (npvhashbuckets)) - + pv_lock_table_size(npages) - + pv_hash_lock_table_size((npvhashbuckets)) - + npages); + + (sizeof(struct pv_hashed_entry_t *) * (npvhashbuckets)) + + pv_lock_table_size(npages) + + pv_hash_lock_table_size((npvhashbuckets)) + + npages); s = round_page(s); if (kernel_memory_allocate(kernel_map, &addr, s, 0, - KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP) - != KERN_SUCCESS) + KMA_KOBJECT | KMA_PERMANENT, VM_KERN_MEMORY_PMAP) + != KERN_SUCCESS) { panic("pmap_init"); + } memset((char *)addr, 0, s); @@ -742,7 +755,9 @@ pmap_init(void) vsize = s; #if PV_DEBUG - if (0 == npvhashmask) panic("npvhashmask not initialized"); + if (0 == npvhashmask) { + panic("npvhashmask not initialized"); + } #endif /* @@ -763,21 +778,24 @@ pmap_init(void) pmap_phys_attributes = (char *) addr; ppnum_t last_pn = i386_btop(avail_end); - unsigned int i; + unsigned int i; pmap_memory_region_t *pmptr = pmap_memory_regions; for (i = 0; i < pmap_memory_region_count; i++, pmptr++) { - if (pmptr->type != kEfiConventionalMemory) + if (pmptr->type != kEfiConventionalMemory) { continue; + } ppnum_t pn; for (pn = pmptr->base; pn <= pmptr->end; pn++) { if (pn < last_pn) { pmap_phys_attributes[pn] |= PHYS_MANAGED; - if (pn > last_managed_page) + if (pn > last_managed_page) { last_managed_page = pn; + } - if (pn >= lowest_hi && pn <= highest_hi) + if (pn >= lowest_hi && pn <= highest_hi) { pmap_phys_attributes[pn] |= PHYS_NOENCRYPT; + } } } } @@ -794,8 +812,8 @@ pmap_init(void) * and of the physical-to-virtual entries. */ s = (vm_size_t) sizeof(struct pmap); - pmap_zone = zinit(s, 400*s, 4096, "pmap"); /* XXX */ - zone_change(pmap_zone, Z_NOENCRYPT, TRUE); + pmap_zone = zinit(s, 400 * s, 4096, "pmap"); /* XXX */ + zone_change(pmap_zone, Z_NOENCRYPT, TRUE); pmap_anchor_zone = zinit(PAGE_SIZE, task_max, PAGE_SIZE, "pagetable anchors"); zone_change(pmap_anchor_zone, Z_NOENCRYPT, TRUE); @@ -820,24 +838,23 @@ pmap_init(void) zone_change(pmap_uanchor_zone, Z_ALIGNMENT_REQUIRED, TRUE); s = (vm_size_t) sizeof(struct pv_hashed_entry); - pv_hashed_list_zone = zinit(s, 10000*s /* Expandable zone */, + pv_hashed_list_zone = zinit(s, 10000 * s /* Expandable zone */, 4096 * 3 /* LCM x86_64*/, "pv_list"); zone_change(pv_hashed_list_zone, Z_NOENCRYPT, TRUE); zone_change(pv_hashed_list_zone, Z_GZALLOC_EXEMPT, TRUE); - /* create pv entries for kernel pages mapped by low level - startup code. these have to exist so we can pmap_remove() - e.g. kext pages from the middle of our addr space */ - + /* create pv entries for kernel pages that might get pmap_remove()ed */ vaddr = (vm_map_offset_t) VM_MIN_KERNEL_ADDRESS; for (ppn = VM_MIN_KERNEL_PAGE; ppn < i386_btop(avail_start); ppn++) { - pv_rooted_entry_t pv_e; + pv_rooted_entry_t pv_h; - pv_e = pai_to_pvh(ppn); - pv_e->va_and_flags = vaddr; + pv_h = pai_to_pvh(ppn); + assert(pv_h->qlink.next == 0); /* shouldn't be init'd yet */ + assert(pv_h->pmap == NULL); + pv_h->va_and_flags = vaddr; vaddr += PAGE_SIZE; - pv_e->pmap = kernel_pmap; - queue_init(&pv_e->qlink); + pv_h->pmap = kernel_pmap; + queue_init(&pv_h->qlink); } pmap_initialized = TRUE; @@ -850,13 +867,40 @@ pmap_init(void) pmap_expand_pml4(kernel_pmap, KERNEL_BASEMENT, PMAP_EXPAND_OPTIONS_NONE); #if CONFIG_VMX - pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE); + pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE); pmap_eptp_flags = HV_VMX_EPTP_MEMORY_TYPE_WB | HV_VMX_EPTP_WALK_LENGTH(4) | (pmap_ept_support_ad ? HV_VMX_EPTP_ENABLE_AD_FLAGS : 0); #endif /* CONFIG_VMX */ } +/* + * Create pv entries for kernel pages mapped by low level + * startup code. These have to exist so we can pmap_remove() them. + */ +void +pmap_pv_fixup(vm_offset_t start, vm_size_t length) +{ + ppnum_t ppn; + pv_rooted_entry_t pv_h; + + while (length != 0) { + ppn = pmap_find_phys(kernel_pmap, start); + if (ppn != 0) { + pv_h = pai_to_pvh(ppn); + assert(pv_h->qlink.next == 0); /* shouldn't be init'd yet */ + assert(pv_h->pmap == 0); + pv_h->va_and_flags = start; + pv_h->pmap = kernel_pmap; + queue_init(&pv_h->qlink); + } + start += PAGE_SIZE; + length -= PAGE_SIZE; + } +} + static -void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) { +void +pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, boolean_t ro) +{ uint64_t ev = sv + nxrosz, cv = sv; pd_entry_t *pdep; pt_entry_t *ptep = NULL; @@ -869,10 +913,12 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b uint64_t pdev = (cv & ~((uint64_t)PDEMASK)); if (*pdep & INTEL_PTE_PS) { - if (NX) + if (NX) { *pdep |= INTEL_PTE_NX; - if (ro) + } + if (ro) { *pdep &= ~INTEL_PTE_WRITE; + } cv += NBPD; cv &= ~((uint64_t) PDEMASK); pdep = pmap_pde(npmap, cv); @@ -880,10 +926,12 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b } for (ptep = pmap_pte(npmap, cv); ptep != NULL && (cv < (pdev + NBPD)) && (cv < ev);) { - if (NX) + if (NX) { *ptep |= INTEL_PTE_NX; - if (ro) + } + if (ro) { *ptep &= ~INTEL_PTE_WRITE; + } cv += NBPT; ptep = pmap_pte(npmap, cv); } @@ -908,14 +956,14 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b * through linker directives. Large pages are used only if this alignment * exists (and not overriden by the -kernel_text_page_4K boot-arg). The * memory layout is: - * + * * : : * | __DATA | * sdata: ================== 2Meg * | | * | zero-padding | * | | - * etext: ------------------ + * etext: ------------------ * | | * : : * | | @@ -927,7 +975,7 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b * | | * | zero-padding | * | | - * eHIB: ------------------ + * eHIB: ------------------ * | __HIB | * : : * @@ -936,12 +984,14 @@ void pmap_mark_range(pmap_t npmap, uint64_t sv, uint64_t nxrosz, boolean_t NX, b * 4K pages covering [stext,etext] are coalesced as 2M large pages. * The now unused level-1 PTE pages are also freed. */ -extern ppnum_t vm_kernel_base_page; +extern ppnum_t vm_kernel_base_page; static uint32_t constptes = 0, dataptes = 0; -void pmap_lowmem_finalize(void) { +void +pmap_lowmem_finalize(void) +{ spl_t spl; - int i; + int i; /* * Update wired memory statistics for early boot pages @@ -960,34 +1010,38 @@ void pmap_lowmem_finalize(void) { * needs has to be released to VM. */ for (i = 0; - pmap_memory_regions[i].end < vm_kernel_base_page; - i++) { - vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base); - vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end+1); + pmap_memory_regions[i].end < vm_kernel_base_page; + i++) { + vm_offset_t pbase = i386_ptob(pmap_memory_regions[i].base); + vm_offset_t pend = i386_ptob(pmap_memory_regions[i].end + 1); DBG("pmap region %d [%p..[%p\n", i, (void *) pbase, (void *) pend); - if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED) + if (pmap_memory_regions[i].attribute & EFI_MEMORY_KERN_RESERVED) { continue; + } /* * rdar://6332712 * Adjust limits not to free pages in range 0xc0000-0xff000. */ - if (pbase >= 0xc0000 && pend <= 0x100000) + if (pbase >= 0xc0000 && pend <= 0x100000) { continue; + } if (pbase < 0xc0000 && pend > 0x100000) { /* page range entirely within region, free lower part */ DBG("- ml_static_mfree(%p,%p)\n", (void *) ml_static_ptovirt(pbase), - (void *) (0xc0000-pbase)); - ml_static_mfree(ml_static_ptovirt(pbase),0xc0000-pbase); + (void *) (0xc0000 - pbase)); + ml_static_mfree(ml_static_ptovirt(pbase), 0xc0000 - pbase); pbase = 0x100000; } - if (pbase < 0xc0000) + if (pbase < 0xc0000) { pend = MIN(pend, 0xc0000); - if (pend > 0x100000) + } + if (pend > 0x100000) { pbase = MAX(pbase, 0x100000); + } DBG("- ml_static_mfree(%p,%p)\n", (void *) ml_static_ptovirt(pbase), (void *) (pend - pbase)); @@ -1001,7 +1055,7 @@ void pmap_lowmem_finalize(void) { /* * Remove all mappings past the boot-cpu descriptor aliases and low globals. - * Non-boot-cpu GDT aliases will be remapped later as needed. + * Non-boot-cpu GDT aliases will be remapped later as needed. */ pmap_remove(kernel_pmap, LOWGLOBAL_ALIAS + PAGE_SIZE, vm_kernel_base); @@ -1014,24 +1068,27 @@ void pmap_lowmem_finalize(void) { kprintf("Kernel text is 2MB aligned"); kernel_text_ps_4K = FALSE; if (PE_parse_boot_argn("-kernel_text_ps_4K", - &kernel_text_ps_4K, - sizeof (kernel_text_ps_4K))) + &kernel_text_ps_4K, + sizeof(kernel_text_ps_4K))) { kprintf(" but will be mapped with 4K pages\n"); - else + } else { kprintf(" and will be mapped with 2M pages\n"); + } } - - (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof (wpkernel)); - if (wpkernel) +#if DEVELOPMENT || DEBUG + (void) PE_parse_boot_argn("wpkernel", &wpkernel, sizeof(wpkernel)); +#endif + if (wpkernel) { kprintf("Kernel text %p-%p to be write-protected\n", - (void *) stext, (void *) etext); + (void *) stext, (void *) etext); + } spl = splhigh(); /* * Scan over text if mappings are to be changed: - * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0 - * - Change to large-pages if possible and not overriden. + * - Remap kernel text readonly unless the "wpkernel" boot-arg is 0 + * - Change to large-pages if possible and not overriden. */ if (kernel_text_ps_4K && wpkernel) { vm_offset_t myva; @@ -1039,8 +1096,9 @@ void pmap_lowmem_finalize(void) { pt_entry_t *ptep; ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva); - if (ptep) + if (ptep) { pmap_store_pte(ptep, *ptep & ~INTEL_PTE_WRITE); + } } } @@ -1051,36 +1109,38 @@ void pmap_lowmem_finalize(void) { * Release zero-filled page padding used for 2M-alignment. */ DBG("ml_static_mfree(%p,%p) for padding below text\n", - (void *) eHIB, (void *) (stext - eHIB)); + (void *) eHIB, (void *) (stext - eHIB)); ml_static_mfree(eHIB, stext - eHIB); DBG("ml_static_mfree(%p,%p) for padding above text\n", - (void *) etext, (void *) (sdata - etext)); + (void *) etext, (void *) (sdata - etext)); ml_static_mfree(etext, sdata - etext); /* * Coalesce text pages into large pages. */ for (myva = stext; myva < sdata; myva += I386_LPGBYTES) { - pt_entry_t *ptep; - vm_offset_t pte_phys; - pt_entry_t *pdep; - pt_entry_t pde; + pt_entry_t *ptep; + vm_offset_t pte_phys; + pt_entry_t *pdep; + pt_entry_t pde; pdep = pmap_pde(kernel_pmap, (vm_map_offset_t)myva); ptep = pmap_pte(kernel_pmap, (vm_map_offset_t)myva); DBG("myva: %p pdep: %p ptep: %p\n", - (void *) myva, (void *) pdep, (void *) ptep); - if ((*ptep & INTEL_PTE_VALID) == 0) + (void *) myva, (void *) pdep, (void *) ptep); + if ((*ptep & INTEL_PTE_VALID) == 0) { continue; + } pte_phys = (vm_offset_t)(*ptep & PG_FRAME); - pde = *pdep & PTMASK; /* page attributes from pde */ - pde |= INTEL_PTE_PS; /* make it a 2M entry */ - pde |= pte_phys; /* take page frame from pte */ + pde = *pdep & PTMASK; /* page attributes from pde */ + pde |= INTEL_PTE_PS; /* make it a 2M entry */ + pde |= pte_phys; /* take page frame from pte */ - if (wpkernel) + if (wpkernel) { pde &= ~INTEL_PTE_WRITE; + } DBG("pmap_store_pte(%p,0x%llx)\n", - (void *)pdep, pde); + (void *)pdep, pde); pmap_store_pte(pdep, pde); /* @@ -1091,9 +1151,9 @@ void pmap_lowmem_finalize(void) { * in the Idle PTEs in "low memory". */ vm_offset_t vm_ptep = (vm_offset_t) KPTphys - + (pte_phys >> PTPGSHIFT); + + (pte_phys >> PTPGSHIFT); DBG("ml_static_mfree(%p,0x%x) for pte\n", - (void *) vm_ptep, PAGE_SIZE); + (void *) vm_ptep, PAGE_SIZE); ml_static_mfree(vm_ptep, PAGE_SIZE); } @@ -1112,7 +1172,7 @@ void pmap_lowmem_finalize(void) { } kprintf("Marking const DATA read-only\n"); } - + vm_offset_t dva; for (dva = sdata; dva < edata; dva += I386_PGBYTES) { @@ -1155,8 +1215,9 @@ void pmap_lowmem_finalize(void) { } if (!strcmp(seg->segname, "__HIB")) { for (sec = firstsect(seg); sec != NULL; sec = nextsect(seg, sec)) { - if (sec->addr & PAGE_MASK) + if (sec->addr & PAGE_MASK) { panic("__HIB segment's sections misaligned"); + } if (!strcmp(sec->sectname, "__text")) { pmap_mark_range(kernel_pmap, sec->addr, round_page(sec->size), FALSE, TRUE); } else { @@ -1174,27 +1235,24 @@ void pmap_lowmem_finalize(void) { */ if (debug_boot_arg) { pt_entry_t *pte = NULL; - if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS))) + if (0 == (pte = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS))) { panic("lowmem pte"); + } /* make sure it is defined on page boundary */ assert(0 == ((vm_offset_t) &lowGlo & PAGE_MASK)); pmap_store_pte(pte, kvtophys((vm_offset_t)&lowGlo) - | INTEL_PTE_REF - | INTEL_PTE_MOD - | INTEL_PTE_WIRED - | INTEL_PTE_VALID - | INTEL_PTE_WRITE - | INTEL_PTE_NX); + | INTEL_PTE_REF + | INTEL_PTE_MOD + | INTEL_PTE_WIRED + | INTEL_PTE_VALID + | INTEL_PTE_WRITE + | INTEL_PTE_NX); } else { pmap_remove(kernel_pmap, - LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE); + LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE); } - + pmap_tlbi_range(0, ~0ULL, true, 0); splx(spl); - if (pmap_pcid_ncpus) - tlb_flush_global(); - else - flush_tlb_raw(); } /* @@ -1202,33 +1260,89 @@ void pmap_lowmem_finalize(void) { */ boolean_t pmap_verify_free( - ppnum_t pn) + ppnum_t pn) { - pv_rooted_entry_t pv_h; - int pai; - boolean_t result; + pv_rooted_entry_t pv_h; + int pai; + boolean_t result; assert(pn != vm_page_fictitious_addr); - if (!pmap_initialized) - return(TRUE); + if (!pmap_initialized) { + return TRUE; + } - if (pn == vm_page_guard_addr) + if (pn == vm_page_guard_addr) { return TRUE; + } pai = ppn_to_pai(pn); - if (!IS_MANAGED_PAGE(pai)) - return(FALSE); + if (!IS_MANAGED_PAGE(pai)) { + return FALSE; + } pv_h = pai_to_pvh(pn); result = (pv_h->pmap == PMAP_NULL); - return(result); + return result; +} + + +#if MACH_ASSERT +void +pmap_assert_free(ppnum_t pn) +{ + int pai; + pv_rooted_entry_t pv_h = NULL; + pmap_t pmap = NULL; + vm_offset_t va = 0; + static char buffer[32]; + static char *pr_name = "not managed pn"; + uint_t attr; + pt_entry_t *ptep; + pt_entry_t pte = -1ull; + + if (pmap_verify_free(pn)) { + return; + } + + if (pn > last_managed_page) { + attr = 0xff; + goto done; + } + + pai = ppn_to_pai(pn); + attr = pmap_phys_attributes[pai]; + pv_h = pai_to_pvh(pai); + va = pv_h->va_and_flags; + pmap = pv_h->pmap; + if (pmap == kernel_pmap) { + pr_name = "kernel"; + } else if (pmap == NULL) { + pr_name = "pmap NULL"; + } else if (pmap->pmap_procname[0] != 0) { + pr_name = &pmap->pmap_procname[0]; + } else { + snprintf(buffer, sizeof(buffer), "pmap %p", pv_h->pmap); + pr_name = buffer; + } + + if (pmap != NULL) { + ptep = pmap_pte(pmap, va); + if (ptep != NULL) { + pte = (uintptr_t)*ptep; + } + } + +done: + panic("page not FREE page: 0x%lx attr: 0x%x %s va: 0x%lx PTE: 0x%llx", + (ulong_t)pn, attr, pr_name, va, pte); } +#endif /* MACH_ASSERT */ boolean_t pmap_is_empty( - pmap_t pmap, - vm_map_offset_t va_start, - vm_map_offset_t va_end) + pmap_t pmap, + vm_map_offset_t va_start, + vm_map_offset_t va_end) { vm_map_offset_t offset; ppnum_t phys_page; @@ -1245,17 +1359,18 @@ pmap_is_empty( * This assumes the count is correct * .. the debug kernel ought to be checking perhaps by page table walk. */ - if (pmap->stats.resident_count == 0) + if (pmap->stats.resident_count == 0) { return TRUE; + } for (offset = va_start; - offset < va_end; - offset += PAGE_SIZE_64) { + offset < va_end; + offset += PAGE_SIZE_64) { phys_page = pmap_find_phys(pmap, offset); if (phys_page) { kprintf("pmap_is_empty(%p,0x%llx,0x%llx): " - "page %d at 0x%llx\n", - pmap, va_start, va_end, phys_page, offset); + "page %d at 0x%llx\n", + pmap, va_start, va_end, phys_page, offset); return FALSE; } } @@ -1301,14 +1416,15 @@ hv_ept_pmap_create(void **ept_pmap, void **eptp) pmap_t pmap_create_options( - ledger_t ledger, - vm_map_size_t sz, - int flags) + ledger_t ledger, + vm_map_size_t sz, + int flags) { - pmap_t p; - vm_size_t size; + pmap_t p; + vm_size_t size; pml4_entry_t *pml4; pml4_entry_t *kpml4; + int i; PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, sz, flags); @@ -1319,28 +1435,33 @@ pmap_create_options( */ if (size != 0) { - return(PMAP_NULL); + return PMAP_NULL; } /* * Return error when unrecognized flags are passed. */ if (__improbable((flags & ~(PMAP_CREATE_KNOWN_FLAGS)) != 0)) { - return(PMAP_NULL); + return PMAP_NULL; } p = (pmap_t) zalloc(pmap_zone); - if (PMAP_NULL == p) + if (PMAP_NULL == p) { panic("pmap_create zalloc"); + } /* Zero all fields */ bzero(p, sizeof(*p)); - /* init counts now since we'll be bumping some */ - simple_lock_init(&p->lock, 0); - bzero(&p->stats, sizeof (p->stats)); + + lck_rw_init(&p->pmap_rwl, &pmap_lck_grp, &pmap_lck_rw_attr); + p->pmap_rwl.lck_rw_can_sleep = FALSE; + + bzero(&p->stats, sizeof(p->stats)); p->ref_count = 1; +#if DEVELOPMENT || DEBUG p->nx_enabled = 1; +#endif p->pm_shared = FALSE; ledger_reference(ledger); p->ledger = ledger; @@ -1373,29 +1494,37 @@ pmap_create_options( /* allocate the vm_objs to hold the pdpt, pde and pte pages */ - p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) * PAGE_SIZE); - if (NULL == p->pm_obj_pml4) + p->pm_obj_pml4 = vm_object_allocate((vm_object_size_t)(NPML4PGS) *PAGE_SIZE); + if (NULL == p->pm_obj_pml4) { panic("pmap_create pdpt obj"); + } - p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) * PAGE_SIZE); - if (NULL == p->pm_obj_pdpt) + p->pm_obj_pdpt = vm_object_allocate((vm_object_size_t)(NPDPTPGS) *PAGE_SIZE); + if (NULL == p->pm_obj_pdpt) { panic("pmap_create pdpt obj"); + } - p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) * PAGE_SIZE); - if (NULL == p->pm_obj) + p->pm_obj = vm_object_allocate((vm_object_size_t)(NPDEPGS) *PAGE_SIZE); + if (NULL == p->pm_obj) { panic("pmap_create pte obj"); + } if (!(flags & PMAP_CREATE_EPT)) { /* All host pmaps share the kernel's pml4 */ pml4 = pmap64_pml4(p, 0ULL); kpml4 = kernel_pmap->pm_pml4; - pml4[KERNEL_PML4_INDEX] = kpml4[KERNEL_PML4_INDEX]; + for (i = KERNEL_PML4_INDEX; i < (KERNEL_PML4_INDEX + KERNEL_PML4_COUNT); i++) { + pml4[i] = kpml4[i]; + } pml4[KERNEL_KEXTS_INDEX] = kpml4[KERNEL_KEXTS_INDEX]; - pml4[KERNEL_PHYSMAP_PML4_INDEX] = kpml4[KERNEL_PHYSMAP_PML4_INDEX]; + for (i = KERNEL_PHYSMAP_PML4_INDEX; i < (KERNEL_PHYSMAP_PML4_INDEX + KERNEL_PHYSMAP_PML4_COUNT); i++) { + pml4[i] = kpml4[i]; + } pml4[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX]; #if KASAN - pml4[KERNEL_KASAN_PML4_INDEX0] = kpml4[KERNEL_KASAN_PML4_INDEX0]; - pml4[KERNEL_KASAN_PML4_INDEX1] = kpml4[KERNEL_KASAN_PML4_INDEX1]; + for (i = KERNEL_KASAN_PML4_FIRST; i <= KERNEL_KASAN_PML4_LAST; i++) { + pml4[i] = kpml4[i]; + } #endif pml4_entry_t *pml4u = pmap64_user_pml4(p, 0ULL); pml4u[KERNEL_DBLMAP_PML4_INDEX] = kpml4[KERNEL_DBLMAP_PML4_INDEX]; @@ -1404,20 +1533,20 @@ pmap_create_options( #if MACH_ASSERT p->pmap_stats_assert = TRUE; p->pmap_pid = 0; - strlcpy(p->pmap_procname, "", sizeof (p->pmap_procname)); + strlcpy(p->pmap_procname, "", sizeof(p->pmap_procname)); #endif /* MACH_ASSERT */ PMAP_TRACE(PMAP_CODE(PMAP__CREATE) | DBG_FUNC_END, - VM_KERNEL_ADDRHIDE(p)); + VM_KERNEL_ADDRHIDE(p)); - return(p); + return p; } pmap_t pmap_create( - ledger_t ledger, - vm_map_size_t sz, - boolean_t is_64bit) + ledger_t ledger, + vm_map_size_t sz, + boolean_t is_64bit) { return pmap_create_options(ledger, sz, ((is_64bit) ? PMAP_CREATE_64BIT : 0)); } @@ -1435,118 +1564,121 @@ pmap_create( #if MACH_ASSERT struct { - uint64_t num_pmaps_checked; - - int phys_footprint_over; - ledger_amount_t phys_footprint_over_total; - ledger_amount_t phys_footprint_over_max; - int phys_footprint_under; - ledger_amount_t phys_footprint_under_total; - ledger_amount_t phys_footprint_under_max; - - int internal_over; - ledger_amount_t internal_over_total; - ledger_amount_t internal_over_max; - int internal_under; - ledger_amount_t internal_under_total; - ledger_amount_t internal_under_max; - - int internal_compressed_over; - ledger_amount_t internal_compressed_over_total; - ledger_amount_t internal_compressed_over_max; - int internal_compressed_under; - ledger_amount_t internal_compressed_under_total; - ledger_amount_t internal_compressed_under_max; - - int iokit_mapped_over; - ledger_amount_t iokit_mapped_over_total; - ledger_amount_t iokit_mapped_over_max; - int iokit_mapped_under; - ledger_amount_t iokit_mapped_under_total; - ledger_amount_t iokit_mapped_under_max; - - int alternate_accounting_over; - ledger_amount_t alternate_accounting_over_total; - ledger_amount_t alternate_accounting_over_max; - int alternate_accounting_under; - ledger_amount_t alternate_accounting_under_total; - ledger_amount_t alternate_accounting_under_max; - - int alternate_accounting_compressed_over; - ledger_amount_t alternate_accounting_compressed_over_total; - ledger_amount_t alternate_accounting_compressed_over_max; - int alternate_accounting_compressed_under; - ledger_amount_t alternate_accounting_compressed_under_total; - ledger_amount_t alternate_accounting_compressed_under_max; - - int page_table_over; - ledger_amount_t page_table_over_total; - ledger_amount_t page_table_over_max; - int page_table_under; - ledger_amount_t page_table_under_total; - ledger_amount_t page_table_under_max; - - int purgeable_volatile_over; - ledger_amount_t purgeable_volatile_over_total; - ledger_amount_t purgeable_volatile_over_max; - int purgeable_volatile_under; - ledger_amount_t purgeable_volatile_under_total; - ledger_amount_t purgeable_volatile_under_max; - - int purgeable_nonvolatile_over; - ledger_amount_t purgeable_nonvolatile_over_total; - ledger_amount_t purgeable_nonvolatile_over_max; - int purgeable_nonvolatile_under; - ledger_amount_t purgeable_nonvolatile_under_total; - ledger_amount_t purgeable_nonvolatile_under_max; - - int purgeable_volatile_compressed_over; - ledger_amount_t purgeable_volatile_compressed_over_total; - ledger_amount_t purgeable_volatile_compressed_over_max; - int purgeable_volatile_compressed_under; - ledger_amount_t purgeable_volatile_compressed_under_total; - ledger_amount_t purgeable_volatile_compressed_under_max; - - int purgeable_nonvolatile_compressed_over; - ledger_amount_t purgeable_nonvolatile_compressed_over_total; - ledger_amount_t purgeable_nonvolatile_compressed_over_max; - int purgeable_nonvolatile_compressed_under; - ledger_amount_t purgeable_nonvolatile_compressed_under_total; - ledger_amount_t purgeable_nonvolatile_compressed_under_max; - - int network_volatile_over; - ledger_amount_t network_volatile_over_total; - ledger_amount_t network_volatile_over_max; - int network_volatile_under; - ledger_amount_t network_volatile_under_total; - ledger_amount_t network_volatile_under_max; - - int network_nonvolatile_over; - ledger_amount_t network_nonvolatile_over_total; - ledger_amount_t network_nonvolatile_over_max; - int network_nonvolatile_under; - ledger_amount_t network_nonvolatile_under_total; - ledger_amount_t network_nonvolatile_under_max; - - int network_volatile_compressed_over; - ledger_amount_t network_volatile_compressed_over_total; - ledger_amount_t network_volatile_compressed_over_max; - int network_volatile_compressed_under; - ledger_amount_t network_volatile_compressed_under_total; - ledger_amount_t network_volatile_compressed_under_max; - - int network_nonvolatile_compressed_over; - ledger_amount_t network_nonvolatile_compressed_over_total; - ledger_amount_t network_nonvolatile_compressed_over_max; - int network_nonvolatile_compressed_under; - ledger_amount_t network_nonvolatile_compressed_under_total; - ledger_amount_t network_nonvolatile_compressed_under_max; + uint64_t num_pmaps_checked; + + int phys_footprint_over; + ledger_amount_t phys_footprint_over_total; + ledger_amount_t phys_footprint_over_max; + int phys_footprint_under; + ledger_amount_t phys_footprint_under_total; + ledger_amount_t phys_footprint_under_max; + + int internal_over; + ledger_amount_t internal_over_total; + ledger_amount_t internal_over_max; + int internal_under; + ledger_amount_t internal_under_total; + ledger_amount_t internal_under_max; + + int internal_compressed_over; + ledger_amount_t internal_compressed_over_total; + ledger_amount_t internal_compressed_over_max; + int internal_compressed_under; + ledger_amount_t internal_compressed_under_total; + ledger_amount_t internal_compressed_under_max; + + int iokit_mapped_over; + ledger_amount_t iokit_mapped_over_total; + ledger_amount_t iokit_mapped_over_max; + int iokit_mapped_under; + ledger_amount_t iokit_mapped_under_total; + ledger_amount_t iokit_mapped_under_max; + + int alternate_accounting_over; + ledger_amount_t alternate_accounting_over_total; + ledger_amount_t alternate_accounting_over_max; + int alternate_accounting_under; + ledger_amount_t alternate_accounting_under_total; + ledger_amount_t alternate_accounting_under_max; + + int alternate_accounting_compressed_over; + ledger_amount_t alternate_accounting_compressed_over_total; + ledger_amount_t alternate_accounting_compressed_over_max; + int alternate_accounting_compressed_under; + ledger_amount_t alternate_accounting_compressed_under_total; + ledger_amount_t alternate_accounting_compressed_under_max; + + int page_table_over; + ledger_amount_t page_table_over_total; + ledger_amount_t page_table_over_max; + int page_table_under; + ledger_amount_t page_table_under_total; + ledger_amount_t page_table_under_max; + + int purgeable_volatile_over; + ledger_amount_t purgeable_volatile_over_total; + ledger_amount_t purgeable_volatile_over_max; + int purgeable_volatile_under; + ledger_amount_t purgeable_volatile_under_total; + ledger_amount_t purgeable_volatile_under_max; + + int purgeable_nonvolatile_over; + ledger_amount_t purgeable_nonvolatile_over_total; + ledger_amount_t purgeable_nonvolatile_over_max; + int purgeable_nonvolatile_under; + ledger_amount_t purgeable_nonvolatile_under_total; + ledger_amount_t purgeable_nonvolatile_under_max; + + int purgeable_volatile_compressed_over; + ledger_amount_t purgeable_volatile_compressed_over_total; + ledger_amount_t purgeable_volatile_compressed_over_max; + int purgeable_volatile_compressed_under; + ledger_amount_t purgeable_volatile_compressed_under_total; + ledger_amount_t purgeable_volatile_compressed_under_max; + + int purgeable_nonvolatile_compressed_over; + ledger_amount_t purgeable_nonvolatile_compressed_over_total; + ledger_amount_t purgeable_nonvolatile_compressed_over_max; + int purgeable_nonvolatile_compressed_under; + ledger_amount_t purgeable_nonvolatile_compressed_under_total; + ledger_amount_t purgeable_nonvolatile_compressed_under_max; + + int network_volatile_over; + ledger_amount_t network_volatile_over_total; + ledger_amount_t network_volatile_over_max; + int network_volatile_under; + ledger_amount_t network_volatile_under_total; + ledger_amount_t network_volatile_under_max; + + int network_nonvolatile_over; + ledger_amount_t network_nonvolatile_over_total; + ledger_amount_t network_nonvolatile_over_max; + int network_nonvolatile_under; + ledger_amount_t network_nonvolatile_under_total; + ledger_amount_t network_nonvolatile_under_max; + + int network_volatile_compressed_over; + ledger_amount_t network_volatile_compressed_over_total; + ledger_amount_t network_volatile_compressed_over_max; + int network_volatile_compressed_under; + ledger_amount_t network_volatile_compressed_under_total; + ledger_amount_t network_volatile_compressed_under_max; + + int network_nonvolatile_compressed_over; + ledger_amount_t network_nonvolatile_compressed_over_total; + ledger_amount_t network_nonvolatile_compressed_over_max; + int network_nonvolatile_compressed_under; + ledger_amount_t network_nonvolatile_compressed_under_total; + ledger_amount_t network_nonvolatile_compressed_under_max; } pmap_ledgers_drift; static void pmap_check_ledgers(pmap_t pmap); #else /* MACH_ASSERT */ -static inline void pmap_check_ledgers(__unused pmap_t pmap) {} +static inline void +pmap_check_ledgers(__unused pmap_t pmap) +{ +} #endif /* MACH_ASSERT */ - + /* * Retire the given physical map from service. * Should only be called if the map contains @@ -1555,40 +1687,42 @@ static inline void pmap_check_ledgers(__unused pmap_t pmap) {} extern int vm_wired_objects_page_count; void -pmap_destroy(pmap_t p) +pmap_destroy(pmap_t p) { - int c; + int c; - if (p == PMAP_NULL) + if (p == PMAP_NULL) { return; + } PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDe(p)); + VM_KERNEL_ADDRHIDe(p)); - PMAP_LOCK(p); + PMAP_LOCK_EXCLUSIVE(p); c = --p->ref_count; pmap_assert((current_thread() && (current_thread()->map)) ? (current_thread()->map->pmap != p) : TRUE); if (c == 0) { - /* + /* * If some cpu is not using the physical pmap pointer that it * is supposed to be (see set_dirbase), we might be using the * pmap that is being destroyed! Make sure we are * physically on the right pmap: */ PMAP_UPDATE_TLBS(p, 0x0ULL, 0xFFFFFFFFFFFFF000ULL); - if (pmap_pcid_ncpus) + if (pmap_pcid_ncpus) { pmap_destroy_pcid_sync(p); + } } - PMAP_UNLOCK(p); + PMAP_UNLOCK_EXCLUSIVE(p); if (c != 0) { PMAP_TRACE(PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END); pmap_assert(p == kernel_pmap); - return; /* still in use */ + return; /* still in use */ } /* @@ -1609,7 +1743,7 @@ pmap_destroy(pmap_t p) inuse_ptepages += p->pm_obj->resident_page_count; vm_object_deallocate(p->pm_obj); - OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count); + OSAddAtomic(-inuse_ptepages, &inuse_ptepages_count); PMAP_ZINFO_PFREE(p, inuse_ptepages * PAGE_SIZE); pmap_check_ledgers(p); @@ -1624,12 +1758,12 @@ pmap_destroy(pmap_t p) */ void -pmap_reference(pmap_t p) +pmap_reference(pmap_t p) { if (p != PMAP_NULL) { - PMAP_LOCK(p); + PMAP_LOCK_EXCLUSIVE(p); p->ref_count++; - PMAP_UNLOCK(p);; + PMAP_UNLOCK_EXCLUSIVE(p);; } } @@ -1639,21 +1773,19 @@ pmap_reference(pmap_t p) */ void pmap_remove_some_phys( - __unused pmap_t map, + __unused pmap_t map, __unused ppnum_t pn) { - /* Implement to support working set code */ - } void pmap_protect( - pmap_t map, - vm_map_offset_t sva, - vm_map_offset_t eva, - vm_prot_t prot) + pmap_t map, + vm_map_offset_t sva, + vm_map_offset_t eva, + vm_prot_t prot) { pmap_protect_options(map, sva, eva, prot, 0, NULL); } @@ -1665,34 +1797,35 @@ pmap_protect( * * VERY IMPORTANT: Will *NOT* increase permissions. * pmap_protect_options() should protect the range against any access types - * that are not in "prot" but it should never grant extra access. + * that are not in "prot" but it should never grant extra access. * For example, if "prot" is READ|EXECUTE, that means "remove write - * access" but it does *not* mean "add read and execute" access. + * access" but it does *not* mean "add read and execute" access. * VM relies on getting soft-faults to enforce extra checks (code * signing, for example), for example. * New access permissions are granted via pmap_enter() only. */ void pmap_protect_options( - pmap_t map, - vm_map_offset_t sva, - vm_map_offset_t eva, - vm_prot_t prot, - unsigned int options, - void *arg) + pmap_t map, + vm_map_offset_t sva, + vm_map_offset_t eva, + vm_prot_t prot, + unsigned int options, + void *arg) { - pt_entry_t *pde; - pt_entry_t *spte, *epte; + pt_entry_t *pde; + pt_entry_t *spte, *epte; vm_map_offset_t lva; vm_map_offset_t orig_sva; boolean_t set_NX; int num_found = 0; - boolean_t is_ept; + boolean_t is_ept; pmap_intr_assert(); - if (map == PMAP_NULL) + if (map == PMAP_NULL) { return; + } if (prot == VM_PROT_NONE) { pmap_remove_options(map, sva, eva, options); @@ -1700,51 +1833,58 @@ pmap_protect_options( } PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START, - VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(sva), - VM_KERNEL_ADDRHIDE(eva)); + VM_KERNEL_ADDRHIDE(map), VM_KERNEL_ADDRHIDE(sva), + VM_KERNEL_ADDRHIDE(eva)); - if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !map->nx_enabled) + if (prot & VM_PROT_EXECUTE) { set_NX = FALSE; - else + } else { set_NX = TRUE; + } +#if DEVELOPMENT || DEBUG + if (__improbable(set_NX && (!nx_enabled || !map->nx_enabled))) { + set_NX = FALSE; + } +#endif is_ept = is_ept_pmap(map); - - PMAP_LOCK(map); + PMAP_LOCK_EXCLUSIVE(map); orig_sva = sva; while (sva < eva) { - lva = (sva + pde_mapped_size) & ~(pde_mapped_size - 1); - if (lva > eva) + lva = (sva + PDE_MAPPED_SIZE) & ~(PDE_MAPPED_SIZE - 1); + if (lva > eva) { lva = eva; + } pde = pmap_pde(map, sva); if (pde && (*pde & PTE_VALID_MASK(is_ept))) { if (*pde & PTE_PS) { /* superpage */ spte = pde; - epte = spte+1; /* excluded */ + epte = spte + 1; /* excluded */ } else { - spte = pmap_pte(map, (sva & ~(pde_mapped_size - 1))); + spte = pmap_pte(map, (sva & ~(PDE_MAPPED_SIZE - 1))); spte = &spte[ptenum(sva)]; epte = &spte[intel_btop(lva - sva)]; } for (; spte < epte; spte++) { - if (!(*spte & PTE_VALID_MASK(is_ept))) + if (!(*spte & PTE_VALID_MASK(is_ept))) { continue; + } if (is_ept) { - if (! (prot & VM_PROT_READ)) { + if (!(prot & VM_PROT_READ)) { pmap_update_pte(spte, PTE_READ(is_ept), 0); } } - if (! (prot & VM_PROT_WRITE)) { + if (!(prot & VM_PROT_WRITE)) { pmap_update_pte(spte, PTE_WRITE(is_ept), 0); } #if DEVELOPMENT || DEBUG else if ((options & PMAP_OPTIONS_PROTECT_IMMEDIATE) && - map == kernel_pmap) { + map == kernel_pmap) { pmap_update_pte(spte, 0, PTE_WRITE(is_ept)); } #endif /* DEVELOPMENT || DEBUG */ @@ -1762,39 +1902,41 @@ pmap_protect_options( sva = lva; } if (num_found) { - if (options & PMAP_OPTIONS_NOFLUSH) + if (options & PMAP_OPTIONS_NOFLUSH) { PMAP_UPDATE_TLBS_DELAYED(map, orig_sva, eva, (pmap_flush_context *)arg); - else + } else { PMAP_UPDATE_TLBS(map, orig_sva, eva); + } } - PMAP_UNLOCK(map); - PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END); + PMAP_UNLOCK_EXCLUSIVE(map); + PMAP_TRACE(PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END); } /* Map a (possibly) autogenned block */ kern_return_t pmap_map_block( - pmap_t pmap, - addr64_t va, - ppnum_t pa, - uint32_t size, - vm_prot_t prot, - int attr, - __unused unsigned int flags) + pmap_t pmap, + addr64_t va, + ppnum_t pa, + uint32_t size, + vm_prot_t prot, + int attr, + __unused unsigned int flags) { kern_return_t kr; - addr64_t original_va = va; + addr64_t original_va = va; uint32_t page; - int cur_page_size; + int cur_page_size; - if (attr & VM_MEM_SUPERPAGE) + if (attr & VM_MEM_SUPERPAGE) { cur_page_size = SUPERPAGE_SIZE; - else + } else { cur_page_size = PAGE_SIZE; + } - for (page = 0; page < size; page+=cur_page_size/PAGE_SIZE) { + for (page = 0; page < size; page += cur_page_size / PAGE_SIZE) { kr = pmap_enter(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE); if (kr != KERN_SUCCESS) { @@ -1803,16 +1945,16 @@ pmap_map_block( * removing the mappings is correct. */ panic("%s: failed pmap_enter, " - "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x", - __FUNCTION__, - pmap, va, pa, size, prot, flags); + "pmap=%p, va=%#llx, pa=%u, size=%u, prot=%#x, flags=%#x", + __FUNCTION__, + pmap, va, pa, size, prot, flags); pmap_remove(pmap, original_va, va - original_va); return kr; } va += cur_page_size; - pa+=cur_page_size/PAGE_SIZE; + pa += cur_page_size / PAGE_SIZE; } return KERN_SUCCESS; @@ -1820,16 +1962,16 @@ pmap_map_block( kern_return_t pmap_expand_pml4( - pmap_t map, - vm_map_offset_t vaddr, + pmap_t map, + vm_map_offset_t vaddr, unsigned int options) { - vm_page_t m; - pmap_paddr_t pa; - uint64_t i; - ppnum_t pn; - pml4_entry_t *pml4p; - boolean_t is_ept = is_ept_pmap(map); + vm_page_t m; + pmap_paddr_t pa; + uint64_t i; + ppnum_t pn; + pml4_entry_t *pml4p; + boolean_t is_ept = is_ept_pmap(map); DBG("pmap_expand_pml4(%p,%p)\n", map, (void *)vaddr); @@ -1841,8 +1983,9 @@ pmap_expand_pml4( * Allocate a VM page for the pml4 page */ while ((m = vm_page_grab()) == VM_PAGE_NULL) { - if (options & PMAP_EXPAND_OPTIONS_NOWAIT) + if (options & PMAP_EXPAND_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; + } VM_PAGE_WAIT(); } /* @@ -1862,33 +2005,33 @@ pmap_expand_pml4( vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); vm_page_unlock_queues(); - OSAddAtomic(1, &inuse_ptepages_count); - OSAddAtomic64(1, &alloc_ptepages_count); + OSAddAtomic(1, &inuse_ptepages_count); + OSAddAtomic64(1, &alloc_ptepages_count); PMAP_ZINFO_PALLOC(map, PAGE_SIZE); /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ vm_object_lock(map->pm_obj_pml4); - PMAP_LOCK(map); + PMAP_LOCK_EXCLUSIVE(map); /* * See if someone else expanded us first */ if (pmap64_pdpt(map, vaddr) != PDPT_ENTRY_NULL) { - PMAP_UNLOCK(map); + PMAP_UNLOCK_EXCLUSIVE(map); vm_object_unlock(map->pm_obj_pml4); VM_PAGE_FREE(m); - OSAddAtomic(-1, &inuse_ptepages_count); + OSAddAtomic(-1, &inuse_ptepages_count); PMAP_ZINFO_PFREE(map, PAGE_SIZE); return KERN_SUCCESS; } #if 0 /* DEBUG */ - if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) { - panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", - map, map->pm_obj_pml4, vaddr, i); - } + if (0 != vm_page_lookup(map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE)) { + panic("pmap_expand_pml4: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", + map, map->pm_obj_pml4, vaddr, i); + } #endif vm_page_insert_wired(m, map->pm_obj_pml4, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE); vm_object_unlock(map->pm_obj_pml4); @@ -1899,18 +2042,18 @@ pmap_expand_pml4( pml4p = pmap64_pml4(map, vaddr); /* refetch under lock */ pmap_store_pte(pml4p, pa_to_pte(pa) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); - pml4_entry_t *upml4p; + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); + pml4_entry_t *upml4p; upml4p = pmap64_user_pml4(map, vaddr); pmap_store_pte(upml4p, pa_to_pte(pa) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); - PMAP_UNLOCK(map); + PMAP_UNLOCK_EXCLUSIVE(map); return KERN_SUCCESS; } @@ -1918,27 +2061,29 @@ pmap_expand_pml4( kern_return_t pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options) { - vm_page_t m; - pmap_paddr_t pa; - uint64_t i; - ppnum_t pn; - pdpt_entry_t *pdptp; - boolean_t is_ept = is_ept_pmap(map); + vm_page_t m; + pmap_paddr_t pa; + uint64_t i; + ppnum_t pn; + pdpt_entry_t *pdptp; + boolean_t is_ept = is_ept_pmap(map); DBG("pmap_expand_pdpt(%p,%p)\n", map, (void *)vaddr); while ((pdptp = pmap64_pdpt(map, vaddr)) == PDPT_ENTRY_NULL) { kern_return_t pep4kr = pmap_expand_pml4(map, vaddr, options); - if (pep4kr != KERN_SUCCESS) + if (pep4kr != KERN_SUCCESS) { return pep4kr; + } } /* * Allocate a VM page for the pdpt page */ while ((m = vm_page_grab()) == VM_PAGE_NULL) { - if (options & PMAP_EXPAND_OPTIONS_NOWAIT) + if (options & PMAP_EXPAND_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; + } VM_PAGE_WAIT(); } @@ -1959,33 +2104,33 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options) vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); vm_page_unlock_queues(); - OSAddAtomic(1, &inuse_ptepages_count); - OSAddAtomic64(1, &alloc_ptepages_count); + OSAddAtomic(1, &inuse_ptepages_count); + OSAddAtomic64(1, &alloc_ptepages_count); PMAP_ZINFO_PALLOC(map, PAGE_SIZE); /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ vm_object_lock(map->pm_obj_pdpt); - PMAP_LOCK(map); + PMAP_LOCK_EXCLUSIVE(map); /* * See if someone else expanded us first */ - if (pmap64_pde(map, vaddr) != PD_ENTRY_NULL) { - PMAP_UNLOCK(map); + if (pmap_pde(map, vaddr) != PD_ENTRY_NULL) { + PMAP_UNLOCK_EXCLUSIVE(map); vm_object_unlock(map->pm_obj_pdpt); VM_PAGE_FREE(m); - OSAddAtomic(-1, &inuse_ptepages_count); + OSAddAtomic(-1, &inuse_ptepages_count); PMAP_ZINFO_PFREE(map, PAGE_SIZE); return KERN_SUCCESS; } #if 0 /* DEBUG */ - if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) { - panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", - map, map->pm_obj_pdpt, vaddr, i); - } + if (0 != vm_page_lookup(map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE)) { + panic("pmap_expand_pdpt: obj not empty, pmap %p pm_obj %p vaddr 0x%llx i 0x%llx\n", + map, map->pm_obj_pdpt, vaddr, i); + } #endif vm_page_insert_wired(m, map->pm_obj_pdpt, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE); vm_object_unlock(map->pm_obj_pdpt); @@ -1996,14 +2141,13 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options) pdptp = pmap64_pdpt(map, vaddr); /* refetch under lock */ pmap_store_pte(pdptp, pa_to_pte(pa) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); - PMAP_UNLOCK(map); + PMAP_UNLOCK_EXCLUSIVE(map); return KERN_SUCCESS; - } @@ -2025,44 +2169,45 @@ pmap_expand_pdpt(pmap_t map, vm_map_offset_t vaddr, unsigned int options) */ kern_return_t pmap_expand( - pmap_t map, - vm_map_offset_t vaddr, + pmap_t map, + vm_map_offset_t vaddr, unsigned int options) { - pt_entry_t *pdp; - vm_page_t m; - pmap_paddr_t pa; - uint64_t i; + pt_entry_t *pdp; + vm_page_t m; + pmap_paddr_t pa; + uint64_t i; ppnum_t pn; - boolean_t is_ept = is_ept_pmap(map); + boolean_t is_ept = is_ept_pmap(map); /* - * For the kernel, the virtual address must be in or above the basement + * For the kernel, the virtual address must be in or above the basement * which is for kexts and is in the 512GB immediately below the kernel.. * XXX - should use VM_MIN_KERNEL_AND_KEXT_ADDRESS not KERNEL_BASEMENT */ - if (__improbable(map == kernel_pmap && - !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))) { + if (__improbable(map == kernel_pmap && + !(vaddr >= KERNEL_BASEMENT && vaddr <= VM_MAX_KERNEL_ADDRESS))) { if ((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0) { panic("pmap_expand: bad vaddr 0x%llx for kernel pmap", vaddr); } } - - while ((pdp = pmap64_pde(map, vaddr)) == PD_ENTRY_NULL) { + while ((pdp = pmap_pde(map, vaddr)) == PD_ENTRY_NULL) { assert((options & PMAP_EXPAND_OPTIONS_ALIASMAP) == 0); kern_return_t pepkr = pmap_expand_pdpt(map, vaddr, options); - if (pepkr != KERN_SUCCESS) + if (pepkr != KERN_SUCCESS) { return pepkr; + } } /* * Allocate a VM page for the pde entries. */ while ((m = vm_page_grab()) == VM_PAGE_NULL) { - if (options & PMAP_EXPAND_OPTIONS_NOWAIT) + if (options & PMAP_EXPAND_OPTIONS_NOWAIT) { return KERN_RESOURCE_SHORTAGE; + } VM_PAGE_WAIT(); } @@ -2083,34 +2228,34 @@ pmap_expand( vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE); vm_page_unlock_queues(); - OSAddAtomic(1, &inuse_ptepages_count); - OSAddAtomic64(1, &alloc_ptepages_count); + OSAddAtomic(1, &inuse_ptepages_count); + OSAddAtomic64(1, &alloc_ptepages_count); PMAP_ZINFO_PALLOC(map, PAGE_SIZE); /* Take the oject lock (mutex) before the PMAP_LOCK (spinlock) */ vm_object_lock(map->pm_obj); - PMAP_LOCK(map); + PMAP_LOCK_EXCLUSIVE(map); /* * See if someone else expanded us first */ if (pmap_pte(map, vaddr) != PT_ENTRY_NULL) { - PMAP_UNLOCK(map); + PMAP_UNLOCK_EXCLUSIVE(map); vm_object_unlock(map->pm_obj); VM_PAGE_FREE(m); - OSAddAtomic(-1, &inuse_ptepages_count);//todo replace all with inlines + OSAddAtomic(-1, &inuse_ptepages_count); //todo replace all with inlines PMAP_ZINFO_PFREE(map, PAGE_SIZE); return KERN_SUCCESS; } #if 0 /* DEBUG */ - if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) { - panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n", - map, map->pm_obj, vaddr, i); - } + if (0 != vm_page_lookup(map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE)) { + panic("pmap_expand: obj not empty, pmap 0x%x pm_obj 0x%x vaddr 0x%llx i 0x%llx\n", + map, map->pm_obj, vaddr, i); + } #endif vm_page_insert_wired(m, map->pm_obj, (vm_object_offset_t)i * PAGE_SIZE, VM_KERN_MEMORY_PTE); vm_object_unlock(map->pm_obj); @@ -2120,11 +2265,11 @@ pmap_expand( */ pdp = pmap_pde(map, vaddr); pmap_store_pte(pdp, pa_to_pte(pa) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); - PMAP_UNLOCK(map); + PMAP_UNLOCK_EXCLUSIVE(map); return KERN_SUCCESS; } @@ -2138,67 +2283,69 @@ void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr) { ppnum_t pn; - pt_entry_t *pte; - boolean_t is_ept = is_ept_pmap(pmap); + pt_entry_t *pte; + boolean_t is_ept = is_ept_pmap(pmap); - PMAP_LOCK(pmap); + PMAP_LOCK_EXCLUSIVE(pmap); - if(pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) { - if (!pmap_next_page_hi(&pn)) + if (pmap64_pdpt(pmap, vaddr) == PDPT_ENTRY_NULL) { + if (!pmap_next_page_hi(&pn)) { panic("pmap_pre_expand"); + } pmap_zero_page(pn); pte = pmap64_pml4(pmap, vaddr); pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); pte = pmap64_user_pml4(pmap, vaddr); pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); - + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); } - if(pmap64_pde(pmap, vaddr) == PD_ENTRY_NULL) { - if (!pmap_next_page_hi(&pn)) + if (pmap_pde(pmap, vaddr) == PD_ENTRY_NULL) { + if (!pmap_next_page_hi(&pn)) { panic("pmap_pre_expand"); + } pmap_zero_page(pn); pte = pmap64_pdpt(pmap, vaddr); pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); } - if(pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) { - if (!pmap_next_page_hi(&pn)) + if (pmap_pte(pmap, vaddr) == PT_ENTRY_NULL) { + if (!pmap_next_page_hi(&pn)) { panic("pmap_pre_expand"); + } pmap_zero_page(pn); - pte = pmap64_pde(pmap, vaddr); + pte = pmap_pde(pmap, vaddr); pmap_store_pte(pte, pa_to_pte(i386_ptob(pn)) - | PTE_READ(is_ept) - | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) - | PTE_WRITE(is_ept)); + | PTE_READ(is_ept) + | (is_ept ? INTEL_EPT_EX : INTEL_PTE_USER) + | PTE_WRITE(is_ept)); } - PMAP_UNLOCK(pmap); + PMAP_UNLOCK_EXCLUSIVE(pmap); } /* * pmap_sync_page_data_phys(ppnum_t pa) - * + * * Invalidates all of the instruction cache on a physical page and * pushes any dirty data from the data cache for the same physical page * Not required in i386. @@ -2211,7 +2358,7 @@ pmap_sync_page_data_phys(__unused ppnum_t pa) /* * pmap_sync_page_attributes_phys(ppnum_t pa) - * + * * Write back and invalidate all cachelines on a physical page. */ void @@ -2224,8 +2371,8 @@ pmap_sync_page_attributes_phys(ppnum_t pa) #ifdef CURRENTLY_UNUSED_AND_UNTESTED -int collect_ref; -int collect_unref; +int collect_ref; +int collect_unref; /* * Routine: pmap_collect @@ -2240,18 +2387,20 @@ int collect_unref; */ void pmap_collect( - pmap_t p) + pmap_t p) { - pt_entry_t *pdp, *ptp; - pt_entry_t *eptp; - int wired; - boolean_t is_ept; + pt_entry_t *pdp, *ptp; + pt_entry_t *eptp; + int wired; + boolean_t is_ept; - if (p == PMAP_NULL) + if (p == PMAP_NULL) { return; + } - if (p == kernel_pmap) + if (p == kernel_pmap) { return; + } is_ept = is_ept_pmap(p); @@ -2261,9 +2410,8 @@ pmap_collect( PMAP_LOCK(p); for (pdp = (pt_entry_t *)p->dirbase; - pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI+1)]; - pdp++) - { + pdp < (pt_entry_t *)&p->dirbase[(UMAXPTDI + 1)]; + pdp++) { if (*pdp & PTE_VALID_MASK(is_ept)) { if (*pdp & PTE_REF(is_ept)) { pmap_store_pte(pdp, *pdp & ~PTE_REF(is_ept)); @@ -2291,10 +2439,10 @@ pmap_collect( /* * Remove the virtual addresses mapped by this pte page. */ - pmap_remove_range(p, - pdetova(pdp - (pt_entry_t *)p->dirbase), - ptp, - eptp); + pmap_remove_range(p, + pdetova(pdp - (pt_entry_t *)p->dirbase), + ptp, + eptp); /* * Invalidate the page directory pointer. @@ -2311,15 +2459,16 @@ pmap_collect( vm_object_lock(p->pm_obj); - m = vm_page_lookup(p->pm_obj,(vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE); - if (m == VM_PAGE_NULL) + m = vm_page_lookup(p->pm_obj, (vm_object_offset_t)(pdp - (pt_entry_t *)&p->dirbase[0]) * PAGE_SIZE); + if (m == VM_PAGE_NULL) { panic("pmap_collect: pte page not in object"); + } vm_object_unlock(p->pm_obj); VM_PAGE_FREE(m); - OSAddAtomic(-1, &inuse_ptepages_count); + OSAddAtomic(-1, &inuse_ptepages_count); PMAP_ZINFO_PFREE(p, PAGE_SIZE); } @@ -2340,8 +2489,8 @@ void pmap_copy_page(ppnum_t src, ppnum_t dst) { bcopy_phys((addr64_t)i386_ptob(src), - (addr64_t)i386_ptob(dst), - PAGE_SIZE); + (addr64_t)i386_ptob(dst), + PAGE_SIZE); } @@ -2361,28 +2510,28 @@ pmap_copy_page(ppnum_t src, ppnum_t dst) */ void pmap_pageable( - __unused pmap_t pmap, - __unused vm_map_offset_t start_addr, - __unused vm_map_offset_t end_addr, - __unused boolean_t pageable) + __unused pmap_t pmap, + __unused vm_map_offset_t start_addr, + __unused vm_map_offset_t end_addr, + __unused boolean_t pageable) { -#ifdef lint +#ifdef lint pmap++; start_addr++; end_addr++; pageable++; -#endif /* lint */ +#endif /* lint */ } -void -invalidate_icache(__unused vm_offset_t addr, - __unused unsigned cnt, - __unused int phys) +void +invalidate_icache(__unused vm_offset_t addr, + __unused unsigned cnt, + __unused int phys) { return; } -void -flush_dcache(__unused vm_offset_t addr, - __unused unsigned count, - __unused int phys) +void +flush_dcache(__unused vm_offset_t addr, + __unused unsigned count, + __unused int phys) { return; } @@ -2394,39 +2543,42 @@ flush_dcache(__unused vm_offset_t addr, extern kern_return_t dtrace_copyio_preflight(addr64_t); extern kern_return_t dtrace_copyio_postflight(addr64_t); -kern_return_t dtrace_copyio_preflight(__unused addr64_t va) +kern_return_t +dtrace_copyio_preflight(__unused addr64_t va) { thread_t thread = current_thread(); uint64_t ccr3; - if (current_map() == kernel_map) + if (current_map() == kernel_map) { return KERN_FAILURE; - else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE)) + } else if (((ccr3 = get_cr3_base()) != thread->map->pmap->pm_cr3) && (no_shared_cr3 == FALSE)) { return KERN_FAILURE; - else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3)) + } else if (no_shared_cr3 && (ccr3 != kernel_pmap->pm_cr3)) { return KERN_FAILURE; - else + } else { return KERN_SUCCESS; + } } - -kern_return_t dtrace_copyio_postflight(__unused addr64_t va) + +kern_return_t +dtrace_copyio_postflight(__unused addr64_t va) { return KERN_SUCCESS; } #endif /* CONFIG_DTRACE */ #include -#if MACH_VM_DEBUG +#if MACH_VM_DEBUG #include int pmap_list_resident_pages( - __unused pmap_t pmap, - __unused vm_offset_t *listp, - __unused int space) + __unused pmap_t pmap, + __unused vm_offset_t *listp, + __unused int space) { return 0; } -#endif /* MACH_VM_DEBUG */ +#endif /* MACH_VM_DEBUG */ #if CONFIG_COREDUMP @@ -2438,9 +2590,10 @@ coredumpok(__unused vm_map_t map, __unused vm_offset_t va) pt_entry_t *ptep; ptep = pmap_pte(map->pmap, va); - if (0 == ptep) + if (0 == ptep) { return FALSE; - return ((*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)); + } + return (*ptep & (INTEL_PTE_NCACHE | INTEL_PTE_WIRED)) != (INTEL_PTE_NCACHE | INTEL_PTE_WIRED); #else return TRUE; #endif @@ -2452,14 +2605,17 @@ phys_page_exists(ppnum_t pn) { assert(pn != vm_page_fictitious_addr); - if (!pmap_initialized) + if (!pmap_initialized) { return TRUE; + } - if (pn == vm_page_guard_addr) + if (pn == vm_page_guard_addr) { return FALSE; + } - if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) + if (!IS_MANAGED_PAGE(ppn_to_pai(pn))) { return FALSE; + } return TRUE; } @@ -2469,12 +2625,9 @@ phys_page_exists(ppnum_t pn) void pmap_switch(pmap_t tpmap) { - spl_t s; - PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(tpmap)); - s = splhigh(); /* Make sure interruptions are disabled */ + assert(ml_get_interrupts_enabled() == FALSE); set_dirbase(tpmap, current_thread(), cpu_number()); - splx(s); PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END); } @@ -2484,12 +2637,14 @@ pmap_switch(pmap_t tpmap) * the specified pmap */ void -pmap_disable_NX(pmap_t pmap) +pmap_disable_NX(__unused pmap_t pmap) { - pmap->nx_enabled = 0; +#if DEVELOPMENT || DEBUG + pmap->nx_enabled = 0; +#endif } -void +void pt_fake_zone_init(int zone_index) { pt_fake_zone_index = zone_index; @@ -2497,22 +2652,22 @@ pt_fake_zone_init(int zone_index) void pt_fake_zone_info( - int *count, - vm_size_t *cur_size, - vm_size_t *max_size, - vm_size_t *elem_size, - vm_size_t *alloc_size, - uint64_t *sum_size, - int *collectable, - int *exhaustable, - int *caller_acct) + int *count, + vm_size_t *cur_size, + vm_size_t *max_size, + vm_size_t *elem_size, + vm_size_t *alloc_size, + uint64_t *sum_size, + int *collectable, + int *exhaustable, + int *caller_acct) { - *count = inuse_ptepages_count; + *count = inuse_ptepages_count; *cur_size = PAGE_SIZE * inuse_ptepages_count; *max_size = PAGE_SIZE * (inuse_ptepages_count + - vm_page_inactive_count + - vm_page_active_count + - vm_page_free_count); + vm_page_inactive_count + + vm_page_active_count + + vm_page_free_count); *elem_size = PAGE_SIZE; *alloc_size = PAGE_SIZE; *sum_size = alloc_ptepages_count * PAGE_SIZE; @@ -2530,19 +2685,53 @@ pmap_flush_context_init(pmap_flush_context *pfc) pfc->pfc_invalid_global = 0; } +static bool +pmap_tlbi_response(uint32_t lcpu, uint32_t rcpu, bool ngflush) +{ + bool responded = false; + bool gflushed = (cpu_datap(rcpu)->cpu_tlb_invalid_global_count != + cpu_datap(lcpu)->cpu_tlb_gen_counts_global[rcpu]); + + if (ngflush) { + if (gflushed) { + responded = true; + } + } else { + if (gflushed) { + responded = true; + } else { + bool lflushed = (cpu_datap(rcpu)->cpu_tlb_invalid_local_count != + cpu_datap(lcpu)->cpu_tlb_gen_counts_local[rcpu]); + if (lflushed) { + responded = true; + } + } + } + + if (responded == false) { + if ((cpu_datap(rcpu)->cpu_tlb_invalid == 0) || + !CPU_CR3_IS_ACTIVE(rcpu) || + !cpu_is_running(rcpu)) { + responded = true; + } + } + return responded; +} + extern uint64_t TLBTimeOut; void pmap_flush( pmap_flush_context *pfc) { - unsigned int my_cpu; - unsigned int cpu; - cpumask_t cpu_bit; - cpumask_t cpus_to_respond = 0; - cpumask_t cpus_to_signal = 0; - cpumask_t cpus_signaled = 0; - boolean_t flush_self = FALSE; - uint64_t deadline; + unsigned int my_cpu; + unsigned int cpu; + cpumask_t cpu_bit; + cpumask_t cpus_to_respond = 0; + cpumask_t cpus_to_signal = 0; + cpumask_t cpus_signaled = 0; + boolean_t flush_self = FALSE; + uint64_t deadline; + bool need_global_flush = false; mp_disable_preemption(); @@ -2550,21 +2739,24 @@ pmap_flush( cpus_to_signal = pfc->pfc_cpus; PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_START, - NULL, cpus_to_signal); + NULL, cpus_to_signal); for (cpu = 0, cpu_bit = 1; cpu < real_ncpus && cpus_to_signal; cpu++, cpu_bit <<= 1) { - if (cpus_to_signal & cpu_bit) { - cpus_to_signal &= ~cpu_bit; - if (!cpu_is_running(cpu)) + if (!cpu_is_running(cpu)) { continue; + } - if (pfc->pfc_invalid_global & cpu_bit) - cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE; - else - cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE; + if (pfc->pfc_invalid_global & cpu_bit) { + cpu_datap(cpu)->cpu_tlb_invalid_global = 1; + need_global_flush = true; + } else { + cpu_datap(cpu)->cpu_tlb_invalid_local = 1; + } + cpu_datap(my_cpu)->cpu_tlb_gen_counts_global[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_global_count; + cpu_datap(my_cpu)->cpu_tlb_gen_counts_local[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_local_count; mfence(); if (cpu == my_cpu) { @@ -2583,13 +2775,13 @@ pmap_flush( * Flush local tlb if required. * Do this now to overlap with other processors responding. */ - if (flush_self && cpu_datap(my_cpu)->cpu_tlb_invalid != FALSE) - process_pmap_updates(); + if (flush_self) { + process_pmap_updates(NULL, (pfc->pfc_invalid_global != 0), 0ULL, ~0ULL); + } if (cpus_to_respond) { - deadline = mach_absolute_time() + - (TLBTimeOut ? TLBTimeOut : LockTimeOut); + (TLBTimeOut ? TLBTimeOut : LockTimeOut); boolean_t is_timeout_traced = FALSE; /* @@ -2599,29 +2791,30 @@ pmap_flush( long orig_acks = 0; for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - /* Consider checking local/global invalidity - * as appropriate in the PCID case. - */ + bool responded = false; if ((cpus_to_respond & cpu_bit) != 0) { - if (!cpu_is_running(cpu) || - cpu_datap(cpu)->cpu_tlb_invalid == FALSE || - !CPU_CR3_IS_ACTIVE(cpu)) { + responded = pmap_tlbi_response(my_cpu, cpu, need_global_flush); + if (responded) { cpus_to_respond &= ~cpu_bit; } cpu_pause(); } - if (cpus_to_respond == 0) + + if (cpus_to_respond == 0) { break; + } } if (cpus_to_respond && (mach_absolute_time() > deadline)) { - if (machine_timeout_suspended()) + if (machine_timeout_suspended()) { continue; + } if (TLBTimeOut == 0) { - if (is_timeout_traced) + if (is_timeout_traced) { continue; + } PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO), - NULL, cpus_to_signal, cpus_to_respond); + NULL, cpus_to_signal, cpus_to_respond); is_timeout_traced = TRUE; continue; @@ -2629,13 +2822,13 @@ pmap_flush( orig_acks = NMIPI_acks; NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT); panic("Uninterruptible processor(s): CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu", - cpus_to_respond, orig_acks, NMIPI_acks, deadline); + cpus_to_respond, orig_acks, NMIPI_acks, deadline); } } } PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_DELAYED_TLBS) | DBG_FUNC_END, - NULL, cpus_signaled, flush_self); + NULL, cpus_signaled, flush_self); mp_enable_preemption(); } @@ -2649,9 +2842,9 @@ invept(void *eptp) uint64_t reserved; } __attribute__((aligned(16), packed)) invept_descriptor = {(uint64_t)eptp, 0}; - __asm__ volatile("invept (%%rax), %%rcx" - : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor) - : "cc", "memory"); + __asm__ volatile ("invept (%%rax), %%rcx" + : : "c" (PMAP_INVEPT_SINGLE_CONTEXT), "a" (&invept_descriptor) + : "cc", "memory"); } /* @@ -2665,43 +2858,48 @@ invept(void *eptp) */ void -pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc) +pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int options, pmap_flush_context *pfc) { - unsigned int cpu; - cpumask_t cpu_bit; - cpumask_t cpus_to_signal = 0; - unsigned int my_cpu = cpu_number(); - pmap_paddr_t pmap_cr3 = pmap->pm_cr3; - boolean_t flush_self = FALSE; - uint64_t deadline; - boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap)); - boolean_t need_global_flush = FALSE; - uint32_t event_code; - vm_map_offset_t event_startv, event_endv; - boolean_t is_ept = is_ept_pmap(pmap); + unsigned int cpu; + cpumask_t cpu_bit; + cpumask_t cpus_to_signal = 0; + unsigned int my_cpu = cpu_number(); + pmap_paddr_t pmap_cr3 = pmap->pm_cr3; + boolean_t flush_self = FALSE; + uint64_t deadline; + boolean_t pmap_is_shared = (pmap->pm_shared || (pmap == kernel_pmap)); + bool need_global_flush = false; + uint32_t event_code; + vm_map_offset_t event_startv, event_endv; + boolean_t is_ept = is_ept_pmap(pmap); assert((processor_avail_count < 2) || - (ml_get_interrupts_enabled() && get_preemption_level() != 0)); - - if (pmap == kernel_pmap) { - event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS); - event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv); - event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv); - } else if (is_ept) { - event_code = PMAP_CODE(PMAP__FLUSH_EPT); - event_startv = startv; - event_endv = endv; - } else { - event_code = PMAP_CODE(PMAP__FLUSH_TLBS); - event_startv = startv; - event_endv = endv; + (ml_get_interrupts_enabled() && get_preemption_level() != 0)); + + assert((endv - startv) >= PAGE_SIZE); + assert(((endv | startv) & PAGE_MASK) == 0); + + if (__improbable(kdebug_enable)) { + if (pmap == kernel_pmap) { + event_code = PMAP_CODE(PMAP__FLUSH_KERN_TLBS); + event_startv = VM_KERNEL_UNSLIDE_OR_PERM(startv); + event_endv = VM_KERNEL_UNSLIDE_OR_PERM(endv); + } else if (__improbable(is_ept)) { + event_code = PMAP_CODE(PMAP__FLUSH_EPT); + event_startv = startv; + event_endv = endv; + } else { + event_code = PMAP_CODE(PMAP__FLUSH_TLBS); + event_startv = startv; + event_endv = endv; + } } PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_START, - VM_KERNEL_UNSLIDE_OR_PERM(pmap), options, - event_startv, event_endv); + VM_KERNEL_UNSLIDE_OR_PERM(pmap), options, + event_startv, event_endv); - if (is_ept) { + if (__improbable(is_ept)) { mp_cpus_call(CPUMASK_ALL, ASYNC, invept, (void*)pmap->pm_eptp); goto out; } @@ -2712,36 +2910,44 @@ pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o * don't signal -- they'll check as they go busy. */ if (pmap_pcid_ncpus) { - if (pmap_is_shared) - need_global_flush = TRUE; + if (pmap_is_shared) { + need_global_flush = true; + } pmap_pcid_invalidate_all_cpus(pmap); mfence(); } + for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - if (!cpu_is_running(cpu)) + if (!cpu_is_running(cpu)) { continue; - uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu); - uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu); -//recall that the shadowed task cr3 is pre-composed + } + uint64_t cpu_active_cr3 = CPU_GET_ACTIVE_CR3(cpu); + uint64_t cpu_task_cr3 = CPU_GET_TASK_CR3(cpu); + if ((pmap_cr3 == cpu_task_cr3) || (pmap_cr3 == cpu_active_cr3) || (pmap_is_shared)) { - if (options & PMAP_DELAY_TLB_FLUSH) { - if (need_global_flush == TRUE) + if (need_global_flush == true) { pfc->pfc_invalid_global |= cpu_bit; + } pfc->pfc_cpus |= cpu_bit; continue; } + if (need_global_flush == true) { + cpu_datap(my_cpu)->cpu_tlb_gen_counts_global[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_global_count; + cpu_datap(cpu)->cpu_tlb_invalid_global = 1; + } else { + cpu_datap(my_cpu)->cpu_tlb_gen_counts_local[cpu] = cpu_datap(cpu)->cpu_tlb_invalid_local_count; + cpu_datap(cpu)->cpu_tlb_invalid_local = 1; + } + if (cpu == my_cpu) { flush_self = TRUE; continue; } - if (need_global_flush == TRUE) - cpu_datap(cpu)->cpu_tlb_invalid_global = TRUE; - else - cpu_datap(cpu)->cpu_tlb_invalid_local = TRUE; + mfence(); /* @@ -2760,37 +2966,31 @@ pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o */ if (CPU_CR3_IS_ACTIVE(cpu) && (pmap_cr3 == CPU_GET_ACTIVE_CR3(cpu) || - pmap->pm_shared || - (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) { + pmap->pm_shared || + (pmap_cr3 == CPU_GET_TASK_CR3(cpu)))) { cpus_to_signal |= cpu_bit; i386_signal_cpu(cpu, MP_TLB_FLUSH, ASYNC); } } } - if ((options & PMAP_DELAY_TLB_FLUSH)) + + if ((options & PMAP_DELAY_TLB_FLUSH)) { goto out; + } /* * Flush local tlb if required. * Do this now to overlap with other processors responding. */ if (flush_self) { - if (pmap_pcid_ncpus) { - pmap_pcid_validate_cpu(pmap, my_cpu); - if (pmap_is_shared) - tlb_flush_global(); - else - flush_tlb_raw(); - } - else - flush_tlb_raw(); + process_pmap_updates(pmap, pmap_is_shared, startv, endv); } if (cpus_to_signal) { - cpumask_t cpus_to_respond = cpus_to_signal; + cpumask_t cpus_to_respond = cpus_to_signal; deadline = mach_absolute_time() + - (TLBTimeOut ? TLBTimeOut : LockTimeOut); + (TLBTimeOut ? TLBTimeOut : LockTimeOut); boolean_t is_timeout_traced = FALSE; /* @@ -2800,41 +3000,42 @@ pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o long orig_acks = 0; for (cpu = 0, cpu_bit = 1; cpu < real_ncpus; cpu++, cpu_bit <<= 1) { - /* Consider checking local/global invalidity - * as appropriate in the PCID case. - */ + bool responded = false; if ((cpus_to_respond & cpu_bit) != 0) { - if (!cpu_is_running(cpu) || - cpu_datap(cpu)->cpu_tlb_invalid == FALSE || - !CPU_CR3_IS_ACTIVE(cpu)) { + responded = pmap_tlbi_response(my_cpu, cpu, need_global_flush); + if (responded) { cpus_to_respond &= ~cpu_bit; } cpu_pause(); } - if (cpus_to_respond == 0) + if (cpus_to_respond == 0) { break; + } } if (cpus_to_respond && (mach_absolute_time() > deadline)) { - if (machine_timeout_suspended()) + if (machine_timeout_suspended()) { continue; + } if (TLBTimeOut == 0) { /* cut tracepoint but don't panic */ - if (is_timeout_traced) + if (is_timeout_traced) { continue; + } PMAP_TRACE_CONSTANT(PMAP_CODE(PMAP__FLUSH_TLBS_TO), - VM_KERNEL_UNSLIDE_OR_PERM(pmap), - cpus_to_signal, - cpus_to_respond); + VM_KERNEL_UNSLIDE_OR_PERM(pmap), + cpus_to_signal, + cpus_to_respond); is_timeout_traced = TRUE; continue; } orig_acks = NMIPI_acks; - + uint64_t tstamp1 = mach_absolute_time(); NMIPI_panic(cpus_to_respond, TLB_FLUSH_TIMEOUT); - panic("TLB invalidation IPI timeout, unresponsive CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu", - cpus_to_respond, orig_acks, NMIPI_acks, deadline); + uint64_t tstamp2 = mach_absolute_time(); + panic("IPI timeout, unresponsive CPU bitmap: 0x%llx, NMIPI acks: 0x%lx, now: 0x%lx, deadline: %llu, pre-NMIPI time: 0x%llx, current: 0x%llx, global: %d", + cpus_to_respond, orig_acks, NMIPI_acks, deadline, tstamp1, tstamp2, need_global_flush); } } } @@ -2845,45 +3046,64 @@ pmap_flush_tlbs(pmap_t pmap, vm_map_offset_t startv, vm_map_offset_t endv, int o out: PMAP_TRACE_CONSTANT(event_code | DBG_FUNC_END, - VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, - event_startv, event_endv); - + VM_KERNEL_UNSLIDE_OR_PERM(pmap), cpus_to_signal, + event_startv, event_endv); } -void -process_pmap_updates(void) +static void +process_pmap_updates(pmap_t p, bool pshared, addr64_t istart, addr64_t iend) { int ccpu = cpu_number(); - pmap_assert(ml_get_interrupts_enabled() == 0 || get_preemption_level() != 0); - if (pmap_pcid_ncpus) { - pmap_pcid_validate_current(); - cpu_datap(ccpu)->cpu_tlb_invalid = FALSE; - tlb_flush_global(); + bool gtlbf = false; + + pmap_assert(ml_get_interrupts_enabled() == 0 || + get_preemption_level() != 0); + + if (cpu_datap(ccpu)->cpu_tlb_invalid_global) { + cpu_datap(ccpu)->cpu_tlb_invalid_global_count++; + cpu_datap(ccpu)->cpu_tlb_invalid = 0; + gtlbf = true; } else { - current_cpu_datap()->cpu_tlb_invalid = FALSE; - flush_tlb_raw(); + cpu_datap(ccpu)->cpu_tlb_invalid_local_count++; + cpu_datap(ccpu)->cpu_tlb_invalid_local = 0; } - mfence(); + if (pmap_pcid_ncpus) { + if (p) { + /* TODO global generation count to + * avoid potentially redundant + * csw invalidations post-global invalidation + */ + pmap_pcid_validate_cpu(p, ccpu); + pmap_tlbi_range(istart, iend, (pshared || gtlbf), p->pmap_pcid_cpus[ccpu]); + } else { + pmap_pcid_validate_current(); + pmap_tlbi_range(istart, iend, true, 0); + } + } else { + pmap_tlbi_range(0, ~0ULL, true, 0); + } } void pmap_update_interrupt(void) { - PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START); + PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_START); - if (current_cpu_datap()->cpu_tlb_invalid) - process_pmap_updates(); + if (current_cpu_datap()->cpu_tlb_invalid) { + process_pmap_updates(NULL, true, 0ULL, ~0ULL); + } - PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END); + PMAP_TRACE(PMAP_CODE(PMAP__UPDATE_INTERRUPT) | DBG_FUNC_END); } -#include /* mach_vm_region_recurse() */ +#include /* mach_vm_region_recurse() */ /* Scan kernel pmap for W+X PTEs, scan kernel VM map for W+X map entries * and identify ranges with mismatched VM permissions and PTE permissions */ kern_return_t -pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) { +pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset_t ev) +{ vm_offset_t cv = sv; kern_return_t rv = KERN_SUCCESS; uint64_t skip4 = 0, skip2 = 0; @@ -2894,25 +3114,27 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset ev &= ~PAGE_MASK_64; while (cv < ev) { if (__improbable((cv > 0x00007FFFFFFFFFFFULL) && - (cv < 0xFFFF800000000000ULL))) { + (cv < 0xFFFF800000000000ULL))) { cv = 0xFFFF800000000000ULL; } /* Potential inconsistencies from not holding pmap lock * but harmless for the moment. */ if (((cv & PML4MASK) == 0) && (pmap64_pml4(ipmap, cv) == 0)) { - if ((cv + NBPML4) > cv) + if ((cv + NBPML4) > cv) { cv += NBPML4; - else + } else { break; + } skip4++; continue; } if (((cv & PDMASK) == 0) && (pmap_pde(ipmap, cv) == 0)) { - if ((cv + NBPD) > cv) + if ((cv + NBPD) > cv) { cv += NBPD; - else + } else { break; + } skip2++; continue; } @@ -2921,7 +3143,7 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset if (ptep && (*ptep & INTEL_PTE_VALID)) { if (*ptep & INTEL_PTE_WRITE) { if (!(*ptep & INTEL_PTE_NX)) { - kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap64_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep))))); + kprintf("W+X PTE at 0x%lx, P4: 0x%llx, P3: 0x%llx, P2: 0x%llx, PT: 0x%llx, VP: %u\n", cv, *pmap64_pml4(ipmap, cv), *pmap64_pdpt(ipmap, cv), *pmap_pde(ipmap, cv), *ptep, pmap_valid_page((ppnum_t)(i386_btop(pte_to_pa(*ptep))))); rv = KERN_FAILURE; } } @@ -2933,23 +3155,22 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset struct vm_region_submap_info_64 vbr; mach_msg_type_number_t vbrcount = 0; - mach_vm_size_t vmsize; - vm_prot_t prot; + mach_vm_size_t vmsize; + vm_prot_t prot; uint32_t nesting_depth = 0; kern_return_t kret; - + while (cv < ev) { - for (;;) { vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64; - if((kret = mach_vm_region_recurse(ivmmap, - (mach_vm_address_t *) &cv, &vmsize, &nesting_depth, - (vm_region_recurse_info_t)&vbr, - &vbrcount)) != KERN_SUCCESS) { + if ((kret = mach_vm_region_recurse(ivmmap, + (mach_vm_address_t *) &cv, &vmsize, &nesting_depth, + (vm_region_recurse_info_t)&vbr, + &vbrcount)) != KERN_SUCCESS) { break; } - if(vbr.is_submap) { + if (vbr.is_submap) { nesting_depth++; continue; } else { @@ -2957,8 +3178,9 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset } } - if(kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { break; + } prot = vbr.protection; @@ -2973,13 +3195,16 @@ pmap_permissions_verify(pmap_t ipmap, vm_map_t ivmmap, vm_offset_t sv, vm_offset pt_entry_t *ptep = pmap_pte(ipmap, pcv); vm_prot_t tprot; - if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID)) + if ((ptep == NULL) || !(*ptep & INTEL_PTE_VALID)) { continue; + } tprot = VM_PROT_READ; - if (*ptep & INTEL_PTE_WRITE) + if (*ptep & INTEL_PTE_WRITE) { tprot |= VM_PROT_WRITE; - if ((*ptep & INTEL_PTE_NX) == 0) + } + if ((*ptep & INTEL_PTE_NX) == 0) { tprot |= VM_PROT_EXECUTE; + } if (tprot != prot) { kprintf("PTE/map entry permissions mismatch at address 0x%lx, pte: 0x%llx, protection: 0x%x\n", pcv, *ptep, prot); rv = KERN_FAILURE; @@ -2999,10 +3224,10 @@ static void pmap_check_ledgers( pmap_t pmap) { - ledger_amount_t bal; - int pid; - char *procname; - boolean_t do_panic; + ledger_amount_t bal; + int pid; + char *procname; + boolean_t do_panic; if (pmap->pmap_pid == 0) { /* @@ -3026,40 +3251,40 @@ pmap_check_ledgers( pmap_ledgers_drift.num_pmaps_checked++; -#define LEDGER_CHECK_BALANCE(__LEDGER) \ -MACRO_BEGIN \ - int panic_on_negative = TRUE; \ - ledger_get_balance(pmap->ledger, \ - task_ledgers.__LEDGER, \ - &bal); \ - ledger_get_panic_on_negative(pmap->ledger, \ - task_ledgers.__LEDGER, \ - &panic_on_negative); \ - if (bal != 0) { \ - if (panic_on_negative || \ - (pmap_ledgers_panic && \ - pmap_ledgers_panic_leeway > 0 && \ - (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \ - bal < (pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \ - do_panic = TRUE; \ - } \ - printf("LEDGER BALANCE proc %d (%s) " \ - "\"%s\" = %lld\n", \ - pid, procname, #__LEDGER, bal); \ - if (bal > 0) { \ - pmap_ledgers_drift.__LEDGER##_over++; \ - pmap_ledgers_drift.__LEDGER##_over_total += bal; \ - if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \ - pmap_ledgers_drift.__LEDGER##_over_max = bal; \ - } \ - } else if (bal < 0) { \ - pmap_ledgers_drift.__LEDGER##_under++; \ - pmap_ledgers_drift.__LEDGER##_under_total += bal; \ - if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \ - pmap_ledgers_drift.__LEDGER##_under_max = bal; \ - } \ - } \ - } \ +#define LEDGER_CHECK_BALANCE(__LEDGER) \ +MACRO_BEGIN \ + int panic_on_negative = TRUE; \ + ledger_get_balance(pmap->ledger, \ + task_ledgers.__LEDGER, \ + &bal); \ + ledger_get_panic_on_negative(pmap->ledger, \ + task_ledgers.__LEDGER, \ + &panic_on_negative); \ + if (bal != 0) { \ + if (panic_on_negative || \ + (pmap_ledgers_panic && \ + pmap_ledgers_panic_leeway > 0 && \ + (bal > (pmap_ledgers_panic_leeway * PAGE_SIZE) || \ + bal < (pmap_ledgers_panic_leeway * PAGE_SIZE)))) { \ + do_panic = TRUE; \ + } \ + printf("LEDGER BALANCE proc %d (%s) " \ + "\"%s\" = %lld\n", \ + pid, procname, #__LEDGER, bal); \ + if (bal > 0) { \ + pmap_ledgers_drift.__LEDGER##_over++; \ + pmap_ledgers_drift.__LEDGER##_over_total += bal; \ + if (bal > pmap_ledgers_drift.__LEDGER##_over_max) { \ + pmap_ledgers_drift.__LEDGER##_over_max = bal; \ + } \ + } else if (bal < 0) { \ + pmap_ledgers_drift.__LEDGER##_under++; \ + pmap_ledgers_drift.__LEDGER##_under_total += bal; \ + if (bal < pmap_ledgers_drift.__LEDGER##_under_max) { \ + pmap_ledgers_drift.__LEDGER##_under_max = bal; \ + } \ + } \ + } \ MACRO_END LEDGER_CHECK_BALANCE(phys_footprint); @@ -3081,10 +3306,10 @@ MACRO_END if (do_panic) { if (pmap_ledgers_panic) { panic("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n", - pmap, pid, procname); + pmap, pid, procname); } else { printf("pmap_destroy(%p) %d[%s] has imbalanced ledgers\n", - pmap, pid, procname); + pmap, pid, procname); } } @@ -3107,24 +3332,24 @@ MACRO_END if (pmap_stats_assert && pmap->pmap_stats_assert) { panic("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld", - pmap, pid, procname, - pmap->stats.resident_count, - pmap->stats.wired_count, - pmap->stats.device, - pmap->stats.internal, - pmap->stats.external, - pmap->stats.reusable, - pmap->stats.compressed); + pmap, pid, procname, + pmap->stats.resident_count, + pmap->stats.wired_count, + pmap->stats.device, + pmap->stats.internal, + pmap->stats.external, + pmap->stats.reusable, + pmap->stats.compressed); } else { printf("pmap_destroy(%p) %d[%s] imbalanced stats: resident=%d wired=%d device=%d internal=%d external=%d reusable=%d compressed=%lld", - pmap, pid, procname, - pmap->stats.resident_count, - pmap->stats.wired_count, - pmap->stats.device, - pmap->stats.internal, - pmap->stats.external, - pmap->stats.reusable, - pmap->stats.compressed); + pmap, pid, procname, + pmap->stats.resident_count, + pmap->stats.wired_count, + pmap->stats.device, + pmap->stats.internal, + pmap->stats.external, + pmap->stats.reusable, + pmap->stats.compressed); } } } @@ -3135,11 +3360,12 @@ pmap_set_process( int pid, char *procname) { - if (pmap == NULL) + if (pmap == NULL) { return; + } pmap->pmap_pid = pid; - strlcpy(pmap->pmap_procname, procname, sizeof (pmap->pmap_procname)); + strlcpy(pmap->pmap_procname, procname, sizeof(pmap->pmap_procname)); if (pmap_ledgers_panic_leeway) { /* * XXX FBDP @@ -3154,17 +3380,17 @@ pmap_set_process( */ pmap->pmap_stats_assert = FALSE; ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.phys_footprint); + task_ledgers.phys_footprint); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.internal); + task_ledgers.internal); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.internal_compressed); + task_ledgers.internal_compressed); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.iokit_mapped); + task_ledgers.iokit_mapped); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.alternate_accounting); + task_ledgers.alternate_accounting); ledger_disable_panic_on_negative(pmap->ledger, - task_ledgers.alternate_accounting_compressed); + task_ledgers.alternate_accounting_compressed); } } #endif /* MACH_ASSERT */ @@ -3174,7 +3400,9 @@ pmap_set_process( int pmap_pagezero_mitigation = 1; #endif -void pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound) { +void +pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound) +{ #if DEVELOPMENT || DEBUG if (pmap_pagezero_mitigation == 0) { lpmap->pagezero_accessible = FALSE; @@ -3189,18 +3417,50 @@ void pmap_advise_pagezero_range(pmap_t lpmap, uint64_t low_bound) { } } -void pmap_verify_noncacheable(uintptr_t vaddr) { +uintptr_t +pmap_verify_noncacheable(uintptr_t vaddr) +{ pt_entry_t *ptep = NULL; ptep = pmap_pte(kernel_pmap, vaddr); if (ptep == NULL) { panic("pmap_verify_noncacheable: no translation for 0x%lx", vaddr); } /* Non-cacheable OK */ - if (*ptep & (INTEL_PTE_NCACHE)) - return; + if (*ptep & (INTEL_PTE_NCACHE)) { + return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK); + } /* Write-combined OK */ - if (*ptep & (INTEL_PTE_PTA)) - return; + if (*ptep & (INTEL_PTE_PAT)) { + return pte_to_pa(*ptep) | (vaddr & INTEL_OFFMASK); + } panic("pmap_verify_noncacheable: IO read from a cacheable address? address: 0x%lx, PTE: %p, *PTE: 0x%llx", vaddr, ptep, *ptep); + /*NOTREACHED*/ + return 0; +} + +void +trust_cache_init(void) +{ + // Unsupported on this architecture. +} + +kern_return_t +pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused *trust_cache, + const vm_size_t __unused trust_cache_len) +{ + // Unsupported on this architecture. + return KERN_NOT_SUPPORTED; +} + +pmap_tc_ret_t +pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused *trust_cache, + const vm_size_t __unused trust_cache_len, + uint8_t const * __unused img4_manifest, + const vm_size_t __unused img4_manifest_buffer_len, + const vm_size_t __unused img4_manifest_actual_len, + bool __unused dry_run) +{ + // Unsupported on this architecture. + return PMAP_TC_UNKNOWN_FORMAT; } diff --git a/osfmk/x86_64/pmap_pcid.c b/osfmk/x86_64/pmap_pcid.c index 506d684ce..6ecc6f225 100644 --- a/osfmk/x86_64/pmap_pcid.c +++ b/osfmk/x86_64/pmap_pcid.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -55,34 +55,40 @@ * to physical TLB context IDs in an LRU fashion for efficiency. (DRK '10) */ -uint32_t pmap_pcid_ncpus; -boolean_t pmap_pcid_disabled = FALSE; +uint32_t pmap_pcid_ncpus; +boolean_t pmap_pcid_disabled = FALSE; +bool invpcid_enabled = false; +static uint32_t INP_MAX = 0; pcid_cdata_t pcid_data[MAX_CPUS] __attribute__((aligned(64))); -void pmap_pcid_configure(void) { +void +pmap_pcid_configure(void) +{ int ccpu = cpu_number(); uintptr_t cr4 = get_cr4(); boolean_t pcid_present = FALSE; pmap_pcid_log("PCID configure invoked on CPU %d\n", ccpu); - pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0); + pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() != 0); pmap_assert(cpu_mode_is64bit()); - if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof (pmap_pcid_disabled))) { + if (PE_parse_boot_argn("-pmap_pcid_disable", &pmap_pcid_disabled, sizeof(pmap_pcid_disabled))) { pmap_pcid_log("PMAP: PCID feature disabled\n"); printf("PMAP: PCID feature disabled, %u\n", pmap_pcid_disabled); kprintf("PMAP: PCID feature disabled %u\n", pmap_pcid_disabled); } - /* no_shared_cr3+PCID is currently unsupported */ - //todo remove nscr3 -#if DEBUG - if (pmap_pcid_disabled == FALSE) + /* no_shared_cr3+PCID is currently unsupported */ + +#if DEBUG + if (pmap_pcid_disabled == FALSE) { no_shared_cr3 = FALSE; - else + } else { no_shared_cr3 = TRUE; + } #else - if (no_shared_cr3) + if (no_shared_cr3) { pmap_pcid_disabled = TRUE; + } #endif if (pmap_pcid_disabled || no_shared_cr3) { unsigned i; @@ -102,19 +108,27 @@ void pmap_pcid_configure(void) { /* DRKTODO: assert if features haven't been discovered yet. Redundant * invocation of cpu_mode_init and descendants masks this for now. */ - if ((cpuid_features() & CPUID_FEATURE_PCID)) + if ((cpuid_features() & CPUID_FEATURE_PCID)) { pcid_present = TRUE; - else { + } else { cpu_datap(ccpu)->cpu_pmap_pcid_enabled = FALSE; pmap_pcid_log("PMAP: PCID not detected CPU %d\n", ccpu); return; } - if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE|CR4_PGE)) { + if ((cr4 & (CR4_PCIDE | CR4_PGE)) == (CR4_PCIDE | CR4_PGE)) { cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE; pmap_pcid_log("PMAP: PCID already enabled %d\n", ccpu); return; } if (pcid_present == TRUE) { + if (ccpu == 0) { + if (cpuid_leaf7_features() & CPUID_LEAF7_FEATURE_INVPCID) { + invpcid_enabled = true; + } + } +#if DEVELOPMENT || DEBUG + PE_parse_boot_argn("pmap_inp_max", &INP_MAX, sizeof(INP_MAX)); +#endif pmap_pcid_log("Pre-PCID:CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, cr4); if (cpu_number() >= PMAP_PCID_MAX_CPUS) { @@ -126,7 +140,7 @@ void pmap_pcid_configure(void) { } set_cr4(get_cr4() | CR4_PCIDE); pmap_pcid_log("Post PCID: CR0: 0x%lx, CR3: 0x%lx, CR4(CPU %d): 0x%lx\n", get_cr0(), get_cr3_raw(), ccpu, get_cr4()); - tlb_flush_global(); + pmap_tlbi_range(0, ~0ULL, true, 0); cpu_datap(ccpu)->cpu_pmap_pcid_enabled = TRUE; if (OSIncrementAtomic(&pmap_pcid_ncpus) == machine_info.max_cpus) { @@ -140,9 +154,11 @@ void pmap_pcid_configure(void) { } } -void pmap_pcid_initialize(pmap_t p) { +void +pmap_pcid_initialize(pmap_t p) +{ unsigned i; - unsigned nc = sizeof(p->pmap_pcid_cpus)/sizeof(pcid_t); + unsigned nc = sizeof(p->pmap_pcid_cpus) / sizeof(pcid_t); pmap_assert(nc >= real_ncpus); for (i = 0; i < nc; i++) { @@ -153,9 +169,11 @@ void pmap_pcid_initialize(pmap_t p) { } } -void pmap_pcid_initialize_kernel(pmap_t p) { +void +pmap_pcid_initialize_kernel(pmap_t p) +{ unsigned i; - unsigned nc = sizeof(p->pmap_pcid_cpus)/sizeof(pcid_t); + unsigned nc = sizeof(p->pmap_pcid_cpus) / sizeof(pcid_t); for (i = 0; i < nc; i++) { p->pmap_pcid_cpus[i] = 0; @@ -165,12 +183,14 @@ void pmap_pcid_initialize_kernel(pmap_t p) { } } -pcid_t pmap_pcid_allocate_pcid(int ccpu) { +pcid_t +pmap_pcid_allocate_pcid(int ccpu) +{ int i; - pcid_ref_t cur_min = 0xFF; - uint32_t cur_min_index = ~1; - pcid_ref_t *cpu_pcid_refcounts = &cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_refcounts[0]; - pcid_ref_t old_count; + pcid_ref_t cur_min = 0xFF; + uint32_t cur_min_index = ~1; + pcid_ref_t *cpu_pcid_refcounts = &cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_refcounts[0]; + pcid_ref_t old_count; if ((i = cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_free_hint) != 0) { if (cpu_pcid_refcounts[i] == 0) { @@ -209,41 +229,51 @@ pcid_t pmap_pcid_allocate_pcid(int ccpu) { old_count = __sync_fetch_and_add(&cpu_pcid_refcounts[cur_min_index], 1); pmap_assert(old_count < PMAP_PCID_MAX_REFCOUNT); - return (cur_min_index); + return cur_min_index; } -void pmap_pcid_deallocate_pcid(int ccpu, pmap_t tpmap) { +void +pmap_pcid_deallocate_pcid(int ccpu, pmap_t tpmap) +{ pcid_t pcid; pmap_t lp; pcid_ref_t prior_count; pcid = tpmap->pmap_pcid_cpus[ccpu]; pmap_assert(pcid != PMAP_PCID_INVALID_PCID); - if (pcid == PMAP_PCID_INVALID_PCID) + if (pcid == PMAP_PCID_INVALID_PCID) { return; + } lp = cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_last_pmap_dispatched[pcid]; pmap_assert(pcid > 0 && pcid < PMAP_PCID_MAX_PCID); pmap_assert(cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_refcounts[pcid] >= 1); - if (lp == tpmap) + if (lp == tpmap) { (void)__sync_bool_compare_and_swap(&cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_last_pmap_dispatched[pcid], tpmap, PMAP_INVALID); + } if ((prior_count = __sync_fetch_and_sub(&cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_refcounts[pcid], 1)) == 1) { - cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_free_hint = pcid; + cpu_datap(ccpu)->cpu_pcid_data->cpu_pcid_free_hint = pcid; } pmap_assert(prior_count <= PMAP_PCID_MAX_REFCOUNT); } -void pmap_destroy_pcid_sync(pmap_t p) { +void +pmap_destroy_pcid_sync(pmap_t p) +{ int i; - pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() !=0); - for (i = 0; i < PMAP_PCID_MAX_CPUS; i++) - if (p->pmap_pcid_cpus[i] != PMAP_PCID_INVALID_PCID) + pmap_assert(ml_get_interrupts_enabled() == FALSE || get_preemption_level() != 0); + for (i = 0; i < PMAP_PCID_MAX_CPUS; i++) { + if (p->pmap_pcid_cpus[i] != PMAP_PCID_INVALID_PCID) { pmap_pcid_deallocate_pcid(i, p); + } + } } -pcid_t pcid_for_pmap_cpu_tuple(pmap_t cpmap, thread_t cthread, int ccpu) { +pcid_t +pcid_for_pmap_cpu_tuple(pmap_t cpmap, thread_t cthread, int ccpu) +{ pmap_t active_pmap = cpmap; if (__improbable(cpmap->pagezero_accessible)) { @@ -260,12 +290,15 @@ int npz = 0; #define PCID_RECORD_SIZE 128 uint64_t pcid_record_array[PCID_RECORD_SIZE]; #endif +#define PMAP_UPCIDP(p) ((p ? (p + PMAP_PCID_MAX_PCID) : 0) | 1ULL << 63) -void pmap_pcid_activate(pmap_t tpmap, int ccpu, boolean_t nopagezero, boolean_t copyio) { - pcid_t new_pcid = tpmap->pmap_pcid_cpus[ccpu]; - pmap_t last_pmap; - boolean_t pcid_conflict = FALSE, pending_flush = FALSE; - pcid_cdata_t *pcdata = cpu_datap(ccpu)->cpu_pcid_data; +void +pmap_pcid_activate(pmap_t tpmap, int ccpu, boolean_t nopagezero, boolean_t copyio) +{ + pcid_t new_pcid = tpmap->pmap_pcid_cpus[ccpu]; + pmap_t last_pmap; + boolean_t pcid_conflict = FALSE, pending_flush = FALSE; + pcid_cdata_t *pcdata = cpu_datap(ccpu)->cpu_pcid_data; pmap_assert(cpu_datap(ccpu)->cpu_pmap_pcid_enabled); if (__improbable(new_pcid == PMAP_PCID_INVALID_PCID)) { @@ -273,7 +306,7 @@ void pmap_pcid_activate(pmap_t tpmap, int ccpu, boolean_t nopagezero, boolean_t } pmap_assert(new_pcid != PMAP_PCID_INVALID_PCID); -#ifdef PCID_ASSERT +#ifdef PCID_ASSERT cpu_datap(ccpu)->cpu_last_pcid = cpu_datap(ccpu)->cpu_active_pcid; #endif cpu_datap(ccpu)->cpu_active_pcid = new_pcid; @@ -290,15 +323,16 @@ void pmap_pcid_activate(pmap_t tpmap, int ccpu, boolean_t nopagezero, boolean_t pcdata->cpu_pcid_last_pmap_dispatched[new_pcid] = tpmap; pmap_assert(new_pcid < PMAP_PCID_MAX_PCID); - pmap_assert(((tpmap == kernel_pmap) && new_pcid == 0) || + pmap_assert(((tpmap == kernel_pmap) && new_pcid == 0) || ((new_pcid != PMAP_PCID_INVALID_PCID) && (new_pcid != 0))); -#if PMAP_ASSERT - pcid_record_array[ccpu % PCID_RECORD_SIZE] = tpmap->pm_cr3 | new_pcid | (((uint64_t)(!(pending_flush || pcid_conflict))) <<63); +#if PMAP_ASSERT + pcid_record_array[ccpu % PCID_RECORD_SIZE] = tpmap->pm_cr3 | new_pcid | (((uint64_t)(!(pending_flush || pcid_conflict))) << 63); pml4_entry_t *pml4 = pmap64_pml4(tpmap, 0ULL); /* Diagnostic to detect pagetable anchor corruption */ - if (pml4[KERNEL_PML4_INDEX] != kernel_pmap->pm_pml4[KERNEL_PML4_INDEX]) - __asm__ volatile("int3"); -#endif /* PMAP_ASSERT */ + if (pml4[KERNEL_PML4_INDEX] != kernel_pmap->pm_pml4[KERNEL_PML4_INDEX]) { + __asm__ volatile ("int3"); + } +#endif /* PMAP_ASSERT */ pmap_paddr_t ncr3 = tpmap->pm_cr3; @@ -312,23 +346,11 @@ void pmap_pcid_activate(pmap_t tpmap, int ccpu, boolean_t nopagezero, boolean_t npz++; } - uint64_t preserve = !(pending_flush || pcid_conflict); - set_cr3_composed(ncr3, new_pcid, preserve); -#if DEBUG - cpu_datap(ccpu)->cpu_pcid_last_cr3 = ncr3 | new_pcid | preserve << 63; -#endif - uint64_t spcid = (new_pcid + PMAP_PCID_MAX_PCID); - if (new_pcid == 0) { - spcid = 0; - } - uint64_t scr3 = tpmap->pm_ucr3 | spcid; - - cpu_datap(ccpu)->cpu_ucr3 = scr3; - cpu_shadowp(ccpu)->cpu_ucr3 = scr3; - - cpu_shadowp(ccpu)->cpu_task_cr3 = ncr3 | new_pcid; + set_cr3_composed(ncr3, new_pcid, 1ULL); + cpu_shadowp(ccpu)->cpu_shadowtask_cr3 = ncr3 | new_pcid | (1ULL << 63); - if (!pending_flush) { + bool preserve = !pcid_conflict && !pending_flush; + if (preserve == true) { /* We did not previously observe a pending invalidation for this * ASID. However, the load from the coherency vector * could've been reordered ahead of the store to the @@ -341,11 +363,88 @@ void pmap_pcid_activate(pmap_t tpmap, int ccpu, boolean_t nopagezero, boolean_t pending_flush = (tpmap->pmap_pcid_coherency_vector[ccpu] != 0); if (__improbable(pending_flush != 0)) { pmap_pcid_validate_cpu(tpmap, ccpu); - set_cr3_composed(ncr3, new_pcid, FALSE); + preserve = false; } } + + if (preserve == false) { + bool gtlbi = (invpcid_enabled == false); + pmap_tlbi_range(0, ~0ULL, gtlbi, new_pcid); + } + + uint64_t spcid = PMAP_UPCIDP(new_pcid); + uint64_t scr3 = tpmap->pm_ucr3 | spcid; + + cpu_datap(ccpu)->cpu_ucr3 = scr3; + cpu_shadowp(ccpu)->cpu_ucr3 = scr3; + cpu_datap(ccpu)->cpu_pmap_pcid_coherentp = &(tpmap->pmap_pcid_coherency_vector[ccpu]); -#if DEBUG +#if DEBUG + cpu_datap(ccpu)->cpu_pcid_last_cr3 = scr3; KERNEL_DEBUG_CONSTANT(0x9c1d0000, tpmap, new_pcid, pending_flush, pcid_conflict, 0); #endif } + +typedef enum { + INP_ALLG = 2, INP_ASPACE = 1, INP_SINGLE = 0, INP_ALLNG = 3 +} invpcid_type_t; +typedef struct __attribute__((packed)) { + uint64_t ipcid_and_rsvd; + uint64_t iaddr; +} invpcid_desc_t; + +static inline void +invpcid(invpcid_type_t itype, pcid_t ipcid, uint64_t iaddr) +{ + invpcid_desc_t ipcdt; + + ipcdt.ipcid_and_rsvd = ipcid; + ipcdt.iaddr = iaddr; + + uint64_t iptype = itype; //promote to workaround assembler bug + + __asm__ volatile ("invpcid %0, %1" :: "m" (ipcdt), "r" (iptype) : "memory"); +} + + +void +pmap_tlbi_range(uint64_t startv, uint64_t endv, bool global, uint16_t pcid) +{ + assert(ml_get_interrupts_enabled() == FALSE || + get_preemption_level() != 0); + + if (invpcid_enabled) { + if (global) { + invpcid(INP_ALLG, 0, 0ULL); + } else { + /* TODO: separate large page invalidation check */ + if ((endv - startv) >= INP_MAX) { + invpcid(INP_ASPACE, pcid, 0ULL); + if (pcid) { + invpcid(INP_ASPACE, (pcid + PMAP_PCID_MAX_PCID), 0ULL); + } + } else { + uint64_t cv = startv; + for (; cv < endv; cv += PAGE_SIZE) { + invpcid(INP_SINGLE, pcid, cv); + if (pcid) { + invpcid(INP_SINGLE, (pcid + PMAP_PCID_MAX_PCID), cv); + } + } + } + } + } else { + if (pmap_pcid_ncpus) { + uintptr_t cr4 = get_cr4(); + if (__improbable((cr4 & CR4_PGE) == 0)) { + set_cr4(cr4 | CR4_PGE); + } else { + set_cr4(cr4 & ~CR4_PGE); + set_cr4(cr4 | CR4_PGE); + } + } else { + set_cr3_raw(get_cr3_raw()); + } + } + __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); +} diff --git a/osfmk/x86_64/start.s b/osfmk/x86_64/start.s index d94e1ee77..256b9d2fb 100644 --- a/osfmk/x86_64/start.s +++ b/osfmk/x86_64/start.s @@ -224,7 +224,7 @@ Lvstartshim: /* %edi = boot_args_start */ leaq _vstart(%rip), %rcx - movq $0xffffff8000000000, %rax /* adjust pointer up high */ + movq $(KERNEL_BASE), %rax /* adjust pointer up high */ or %rax, %rsp /* and stack pointer up there */ or %rcx, %rax andq $0xfffffffffffffff0, %rsp /* align stack */ @@ -308,7 +308,7 @@ LEXT(hibernate_machine_entrypoint) leaq EXT(hibernate_kernel_entrypoint)(%rip),%rcx /* adjust the pointers to be up high */ - movq $0xffffff8000000000, %rax + movq $(KERNEL_BASE), %rax orq %rax, %rsp orq %rcx, %rax diff --git a/pexpert/arm/pe_consistent_debug.c b/pexpert/arm/pe_consistent_debug.c index 28732b8b5..e569811d6 100644 --- a/pexpert/arm/pe_consistent_debug.c +++ b/pexpert/arm/pe_consistent_debug.c @@ -32,13 +32,16 @@ #include #include -static dbg_registry_t * consistent_debug_registry = NULL; +static dbg_registry_t * consistent_debug_registry = NULL; -static dbg_record_header_t* consistent_debug_allocate_entry(void) { +static dbg_record_header_t* +consistent_debug_allocate_entry(void) +{ unsigned int i; - if (!consistent_debug_registry) + if (!consistent_debug_registry) { return NULL; + } for (i = 0; i < consistent_debug_registry->top_level_header.num_records; i++) { dbg_record_header_t *record = &consistent_debug_registry->records[i]; if (OSCompareAndSwap64(kDbgIdUnusedEntry, kDbgIdReservedEntry, &record->record_id)) { @@ -49,37 +52,43 @@ static dbg_record_header_t* consistent_debug_allocate_entry(void) { return NULL; } -int PE_consistent_debug_inherit(void) +int +PE_consistent_debug_inherit(void) { - DTEntry entryP; - uintptr_t *prop_data; - uintptr_t root_pointer = 0; - uint32_t size; + DTEntry entryP; + uintptr_t *prop_data; + uintptr_t root_pointer = 0; + uint32_t size; - if ((DTLookupEntry(NULL, "/chosen", &entryP) == kSuccess)) - if (DTGetProperty(entryP, "consistent-debug-root", (void **)&prop_data, &size) == kSuccess) + if ((DTLookupEntry(NULL, "/chosen", &entryP) == kSuccess)) { + if (DTGetProperty(entryP, "consistent-debug-root", (void **)&prop_data, &size) == kSuccess) { root_pointer = prop_data[0]; - if (root_pointer == 0) + } + } + if (root_pointer == 0) { return -1; + } consistent_debug_registry = (dbg_registry_t *)ml_map_high_window(root_pointer, sizeof(dbg_registry_t)); return 0; } -int PE_consistent_debug_register(uint64_t record_id, uint64_t physaddr, uint64_t length) +int +PE_consistent_debug_register(uint64_t record_id, uint64_t physaddr, uint64_t length) { dbg_record_header_t *allocated_header = consistent_debug_allocate_entry(); - if (allocated_header == NULL) + if (allocated_header == NULL) { return -1; + } allocated_header->length = length; allocated_header->physaddr = physaddr; // Make sure the hdr/length are visible before the record_id. - __asm__ volatile("dmb ish" : : : "memory"); + __asm__ volatile ("dmb ish" : : : "memory"); allocated_header->record_id = record_id; return 0; } -int PE_consistent_debug_enabled(void) +int +PE_consistent_debug_enabled(void) { - return (consistent_debug_registry != NULL); + return consistent_debug_registry != NULL; } - diff --git a/pexpert/arm/pe_identify_machine.c b/pexpert/arm/pe_identify_machine.c index 4e734fbe6..4328c7f2f 100644 --- a/pexpert/arm/pe_identify_machine.c +++ b/pexpert/arm/pe_identify_machine.c @@ -19,41 +19,42 @@ #include #endif /* Local declarations */ -void pe_identify_machine(boot_args * bootArgs); +void pe_identify_machine(boot_args * bootArgs); /* External declarations */ extern void clean_mmu_dcache(void); static char *gPESoCDeviceType; -static char gPESoCDeviceTypeBuffer[SOC_DEVICE_TYPE_BUFFER_SIZE]; +static char gPESoCDeviceTypeBuffer[SOC_DEVICE_TYPE_BUFFER_SIZE]; static vm_offset_t gPESoCBasePhys; -static uint32_t gTCFG0Value; +static uint32_t gTCFG0Value; static uint32_t pe_arm_init_timer(void *args); #if DEVELOPMENT || DEBUG -decl_simple_lock_data(, panic_trace_lock;) +decl_simple_lock_data(, panic_trace_lock; ) #endif /* * pe_identify_machine: - * + * * Sets up platform parameters. Returns: nothing */ void pe_identify_machine(boot_args * bootArgs) { OpaqueDTEntryIterator iter; - DTEntry cpus, cpu; - uint32_t mclk = 0, hclk = 0, pclk = 0, tclk = 0, use_dt = 0; + DTEntry cpus, cpu; + uint32_t mclk = 0, hclk = 0, pclk = 0, tclk = 0, use_dt = 0; unsigned long *value; - unsigned int size; - int err; + unsigned int size; + int err; (void)bootArgs; - if (pe_arm_get_soc_base_phys() == 0) + if (pe_arm_get_soc_base_phys() == 0) { return; + } /* Clear the gPEClockFrequencyInfo struct */ bzero((void *)&gPEClockFrequencyInfo, sizeof(clock_frequency_info_t)); @@ -67,15 +68,14 @@ pe_identify_machine(boot_args * bootArgs) gTCFG0Value = tclk - 1; - tclk = pclk / (4 * tclk); /* Calculate the "actual" - * Timer0 frequency in fixed - * point. */ + tclk = pclk / (4 * tclk); /* Calculate the "actual" + * Timer0 frequency in fixed + * point. */ mclk = (mclk >> 17) * (125 * 125); hclk = (hclk >> 17) * (125 * 125); pclk = (pclk >> 17) * (125 * 125); tclk = (((((tclk * 125) + 2) >> 2) * 125) + (1 << 14)) >> 15; - } else if (!strcmp(gPESoCDeviceType, "integratorcp-io")) { mclk = 200000000; hclk = mclk / 2; @@ -87,23 +87,23 @@ pe_identify_machine(boot_args * bootArgs) pclk = hclk / 2; tclk = pclk; } else if (!strcmp(gPESoCDeviceType, "omap3430sdp-io")) { - mclk = 332000000; - hclk = 19200000; - pclk = hclk; - tclk = pclk; + mclk = 332000000; + hclk = 19200000; + pclk = hclk; + tclk = pclk; } else if (!strcmp(gPESoCDeviceType, "s5i3000-io")) { mclk = 400000000; hclk = mclk / 4; pclk = hclk / 2; - tclk = 100000; /* timer is at 100khz */ - + tclk = 100000; /* timer is at 100khz */ } else if (!strcmp(gPESoCDeviceType, "bcm2837-io")) { mclk = 1200000000; hclk = mclk / 4; pclk = hclk / 2; tclk = 1000000; - } else + } else { use_dt = 1; + } if (use_dt) { /* Start with default values. */ @@ -119,8 +119,9 @@ pe_identify_machine(boot_args * bootArgs) while (kSuccess == DTIterateEntries(&iter, &cpu)) { if ((kSuccess != DTGetProperty(cpu, "state", (void **)&value, &size)) || - (strncmp((char*)value, "running", size) != 0)) + (strncmp((char*)value, "running", size) != 0)) { continue; + } /* Find the time base frequency first. */ if (DTGetProperty(cpu, "timebase-frequency", (void **)&value, &size) == kSuccess) { @@ -129,69 +130,77 @@ pe_identify_machine(boot_args * bootArgs) * the device tree should never provide 64 * bits so this if should never be taken. */ - if (size == 8) + if (size == 8) { gPEClockFrequencyInfo.timebase_frequency_hz = *(unsigned long long *)value; - else + } else { gPEClockFrequencyInfo.timebase_frequency_hz = *value; + } } gPEClockFrequencyInfo.dec_clock_rate_hz = gPEClockFrequencyInfo.timebase_frequency_hz; /* Find the bus frequency next. */ if (DTGetProperty(cpu, "bus-frequency", (void **)&value, &size) == kSuccess) { - if (size == 8) + if (size == 8) { gPEClockFrequencyInfo.bus_frequency_hz = *(unsigned long long *)value; - else + } else { gPEClockFrequencyInfo.bus_frequency_hz = *value; + } } gPEClockFrequencyInfo.bus_frequency_min_hz = gPEClockFrequencyInfo.bus_frequency_hz; gPEClockFrequencyInfo.bus_frequency_max_hz = gPEClockFrequencyInfo.bus_frequency_hz; - if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) + if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) { gPEClockFrequencyInfo.bus_clock_rate_hz = gPEClockFrequencyInfo.bus_frequency_hz; - else + } else { gPEClockFrequencyInfo.bus_clock_rate_hz = 0xFFFFFFFF; + } /* Find the memory frequency next. */ if (DTGetProperty(cpu, "memory-frequency", (void **)&value, &size) == kSuccess) { - if (size == 8) + if (size == 8) { gPEClockFrequencyInfo.mem_frequency_hz = *(unsigned long long *)value; - else + } else { gPEClockFrequencyInfo.mem_frequency_hz = *value; + } } gPEClockFrequencyInfo.mem_frequency_min_hz = gPEClockFrequencyInfo.mem_frequency_hz; gPEClockFrequencyInfo.mem_frequency_max_hz = gPEClockFrequencyInfo.mem_frequency_hz; /* Find the peripheral frequency next. */ if (DTGetProperty(cpu, "peripheral-frequency", (void **)&value, &size) == kSuccess) { - if (size == 8) + if (size == 8) { gPEClockFrequencyInfo.prf_frequency_hz = *(unsigned long long *)value; - else + } else { gPEClockFrequencyInfo.prf_frequency_hz = *value; + } } gPEClockFrequencyInfo.prf_frequency_min_hz = gPEClockFrequencyInfo.prf_frequency_hz; gPEClockFrequencyInfo.prf_frequency_max_hz = gPEClockFrequencyInfo.prf_frequency_hz; /* Find the fixed frequency next. */ if (DTGetProperty(cpu, "fixed-frequency", (void **)&value, &size) == kSuccess) { - if (size == 8) + if (size == 8) { gPEClockFrequencyInfo.fix_frequency_hz = *(unsigned long long *)value; - else + } else { gPEClockFrequencyInfo.fix_frequency_hz = *value; + } } /* Find the cpu frequency last. */ if (DTGetProperty(cpu, "clock-frequency", (void **)&value, &size) == kSuccess) { - if (size == 8) + if (size == 8) { gPEClockFrequencyInfo.cpu_frequency_hz = *(unsigned long long *)value; - else + } else { gPEClockFrequencyInfo.cpu_frequency_hz = *value; + } } gPEClockFrequencyInfo.cpu_frequency_min_hz = gPEClockFrequencyInfo.cpu_frequency_hz; gPEClockFrequencyInfo.cpu_frequency_max_hz = gPEClockFrequencyInfo.cpu_frequency_hz; - if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) + if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) { gPEClockFrequencyInfo.cpu_clock_rate_hz = gPEClockFrequencyInfo.cpu_frequency_hz; - else + } else { gPEClockFrequencyInfo.cpu_clock_rate_hz = 0xFFFFFFFF; + } } } else { /* Use the canned values. */ @@ -218,20 +227,20 @@ pe_identify_machine(boot_args * bootArgs) gPEClockFrequencyInfo.bus_clock_rate_den = 1; gPEClockFrequencyInfo.bus_to_cpu_rate_num = - (2 * gPEClockFrequencyInfo.cpu_clock_rate_hz) / gPEClockFrequencyInfo.bus_clock_rate_hz; + (2 * gPEClockFrequencyInfo.cpu_clock_rate_hz) / gPEClockFrequencyInfo.bus_clock_rate_hz; gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2; gPEClockFrequencyInfo.bus_to_dec_rate_num = 1; gPEClockFrequencyInfo.bus_to_dec_rate_den = - gPEClockFrequencyInfo.bus_clock_rate_hz / gPEClockFrequencyInfo.dec_clock_rate_hz; + gPEClockFrequencyInfo.bus_clock_rate_hz / gPEClockFrequencyInfo.dec_clock_rate_hz; } vm_offset_t pe_arm_get_soc_base_phys(void) { - DTEntry entryP; - uintptr_t *ranges_prop; - uint32_t prop_size; + DTEntry entryP; + uintptr_t *ranges_prop; + uint32_t prop_size; char *tmpStr; if (DTFindEntry("name", "arm-io", &entryP) == kSuccess) { @@ -251,22 +260,23 @@ pe_arm_get_soc_base_phys(void) uint32_t pe_arm_get_soc_revision(void) { - DTEntry entryP; - uint32_t *value; - uint32_t size; + DTEntry entryP; + uint32_t *value; + uint32_t size; - if ((DTFindEntry("name", "arm-io", &entryP) == kSuccess) + if ((DTFindEntry("name", "arm-io", &entryP) == kSuccess) && (DTGetProperty(entryP, "chip-revision", (void **)&value, &size) == kSuccess)) { - if (size == 8) - return((uint32_t)*(unsigned long long *)value); - else - return(*value); + if (size == 8) { + return (uint32_t)*(unsigned long long *)value; + } else { + return *value; + } } return 0; } -extern void fleh_fiq_generic(void); +extern void fleh_fiq_generic(void); #if defined(ARM_BOARD_CLASS_S5L8960X) static struct tbd_ops s5l8960x_funcs = {NULL, NULL, NULL}; @@ -278,8 +288,8 @@ static struct tbd_ops t7000_funcs = {NULL, NULL, NULL}; #if defined(ARM_BOARD_CLASS_S7002) extern void fleh_fiq_s7002(void); -extern uint32_t s7002_get_decrementer(void); -extern void s7002_set_decrementer(uint32_t); +extern uint32_t s7002_get_decrementer(void); +extern void s7002_set_decrementer(uint32_t); static struct tbd_ops s7002_funcs = {&fleh_fiq_s7002, &s7002_get_decrementer, &s7002_set_decrementer}; #endif /* defined(ARM_BOARD_CLASS_S7002) */ @@ -289,8 +299,8 @@ static struct tbd_ops s8000_funcs = {NULL, NULL, NULL}; #if defined(ARM_BOARD_CLASS_T8002) extern void fleh_fiq_t8002(void); -extern uint32_t t8002_get_decrementer(void); -extern void t8002_set_decrementer(uint32_t); +extern uint32_t t8002_get_decrementer(void); +extern void t8002_set_decrementer(uint32_t); static struct tbd_ops t8002_funcs = {&fleh_fiq_t8002, &t8002_get_decrementer, &t8002_set_decrementer}; #endif /* defined(ARM_BOARD_CLASS_T8002) */ @@ -315,52 +325,51 @@ static struct tbd_ops t8015_funcs = {NULL, NULL, NULL}; static struct tbd_ops bcm2837_funcs = {NULL, NULL, NULL}; #endif /* defined(ARM_BOARD_CLASS_BCM2837) */ -vm_offset_t gPicBase; -vm_offset_t gTimerBase; -vm_offset_t gSocPhys; +vm_offset_t gPicBase; +vm_offset_t gTimerBase; +vm_offset_t gSocPhys; #if DEVELOPMENT || DEBUG // This block contains the panic trace implementation // These variables are local to this file, and contain the panic trace configuration information -typedef enum -{ - panic_trace_disabled = 0, - panic_trace_unused, - panic_trace_enabled, - panic_trace_alt_enabled, +typedef enum{ + panic_trace_disabled = 0, + panic_trace_unused, + panic_trace_enabled, + panic_trace_alt_enabled, } panic_trace_t; static panic_trace_t bootarg_panic_trace; // The command buffer contains the converted commands from the device tree for commanding cpu_halt, enable_trace, etc. #define DEBUG_COMMAND_BUFFER_SIZE 256 -typedef struct command_buffer_element{ +typedef struct command_buffer_element { uintptr_t address; uint16_t destination_cpu_selector; uintptr_t value; } command_buffer_element_t; -static command_buffer_element_t debug_command_buffer[DEBUG_COMMAND_BUFFER_SIZE]; // statically allocate to prevent needing alloc at runtime -static uint32_t next_command_bufffer_entry = 0; // index of next unused slot in debug_command_buffer - -#define CPU_SELECTOR_SHIFT ((sizeof(int)-2)*8) -#define CPU_SELECTOR_MASK (0xFFFF << CPU_SELECTOR_SHIFT) -#define REGISTER_OFFSET_MASK (~CPU_SELECTOR_MASK) -#define REGISTER_OFFSET(register_prop) (register_prop & REGISTER_OFFSET_MASK) -#define CPU_SELECTOR(register_offset) (register_offset >> CPU_SELECTOR_SHIFT) // Upper 16bits holds the cpu selector -#define MAX_WINDOW_SIZE 0xFFFF -#define PE_ISSPACE(c) (c == ' ' || c == '\t' || c == '\n' || c == '\12') +static command_buffer_element_t debug_command_buffer[DEBUG_COMMAND_BUFFER_SIZE]; // statically allocate to prevent needing alloc at runtime +static uint32_t next_command_bufffer_entry = 0; // index of next unused slot in debug_command_buffer + +#define CPU_SELECTOR_SHIFT ((sizeof(int)-2)*8) +#define CPU_SELECTOR_MASK (0xFFFF << CPU_SELECTOR_SHIFT) +#define REGISTER_OFFSET_MASK (~CPU_SELECTOR_MASK) +#define REGISTER_OFFSET(register_prop) (register_prop & REGISTER_OFFSET_MASK) +#define CPU_SELECTOR(register_offset) (register_offset >> CPU_SELECTOR_SHIFT) // Upper 16bits holds the cpu selector +#define MAX_WINDOW_SIZE 0xFFFF +#define PE_ISSPACE(c) (c == ' ' || c == '\t' || c == '\n' || c == '\12') /* -0x0000 - all cpus -0x0001 - cpu 0 -0x0002 - cpu 1 -0x0004 - cpu 2 -0x0003 - cpu 0 and 1 -since it's 16bits, we can have up to 16 cpus -*/ + * 0x0000 - all cpus + * 0x0001 - cpu 0 + * 0x0002 - cpu 1 + * 0x0004 - cpu 2 + * 0x0003 - cpu 0 and 1 + * since it's 16bits, we can have up to 16 cpus + */ #define ALL_CPUS 0x0000 #define IS_CPU_SELECTED(cpu_number, cpu_selector) (cpu_selector == ALL_CPUS || (cpu_selector & (1< DEBUG_COMMAND_BUFFER_SIZE-1) { + if (next_command_bufffer_entry + prop_size / sizeof(uintptr_t) > DEBUG_COMMAND_BUFFER_SIZE - 1) { panic("pe_init_debug_command: property %s is %u bytes, command buffer only has %lu bytes remaining\n", - entry_name, prop_size, ((DEBUG_COMMAND_BUFFER_SIZE-1) - next_command_bufffer_entry) * sizeof(uintptr_t) ); + entry_name, prop_size, ((DEBUG_COMMAND_BUFFER_SIZE - 1) - next_command_bufffer_entry) * sizeof(uintptr_t)); } // Hold the pointer in a temp variable and later assign it to command buffer, in case we panic while half-initialized command_starting_index = next_command_bufffer_entry; // convert to real virt addresses and stuff commands into debug_command_buffer - for( ; prop_size ; reg_prop += 2, prop_size -= 2*sizeof(uintptr_t) ) { + for (; prop_size; reg_prop += 2, prop_size -= 2 * sizeof(uintptr_t)) { if (*reg_prop == RESET_VIRTUAL_ADDRESS_WINDOW) { debug_reg_window = 0; // Create a new window - } - else if (debug_reg_window==0) { + } else if (debug_reg_window == 0) { // create a window from virtual address to the specified physical address reg_window_size = ((uint32_t)*(reg_prop + 1)); if (reg_window_size > MAX_WINDOW_SIZE) { @@ -409,19 +417,19 @@ pe_init_debug_command(DTEntry entryP, command_buffer_element_t **command_buffer, debug_reg_window = ml_io_map(gSocPhys + *reg_prop, reg_window_size); // for debug -- kprintf("pe_init_debug_command: %s registers @ 0x%08lX for 0x%08lX\n", entry_name, debug_reg_window, *(reg_prop + 1) ); } else { - if ((REGISTER_OFFSET(*reg_prop)+ sizeof(uintptr_t)) >= reg_window_size) { - panic("pe_init_debug_command: Command Offset is %lx, exceeds allocated size of %x\n", REGISTER_OFFSET(*reg_prop),reg_window_size ); + if ((REGISTER_OFFSET(*reg_prop) + sizeof(uintptr_t)) >= reg_window_size) { + panic("pe_init_debug_command: Command Offset is %lx, exceeds allocated size of %x\n", REGISTER_OFFSET(*reg_prop), reg_window_size ); } debug_command_buffer[next_command_bufffer_entry].address = debug_reg_window + REGISTER_OFFSET(*reg_prop); debug_command_buffer[next_command_bufffer_entry].destination_cpu_selector = CPU_SELECTOR(*reg_prop); - debug_command_buffer[next_command_bufffer_entry++].value = *(reg_prop+1); + debug_command_buffer[next_command_bufffer_entry++].value = *(reg_prop + 1); } } // null terminate the address field of the command to end it debug_command_buffer[next_command_bufffer_entry++].address = 0; - // save pointer into table for this command + // save pointer into table for this command *command_buffer = &debug_command_buffer[command_starting_index]; } @@ -429,12 +437,12 @@ static void pe_run_debug_command(command_buffer_element_t *command_buffer) { // When both the CPUs panic, one will get stuck on the lock and the other CPU will be halted when the first executes the debug command - simple_lock(&panic_trace_lock); + simple_lock(&panic_trace_lock, LCK_GRP_NULL); running_debug_command_on_cpu_number = cpu_number(); - while( command_buffer && command_buffer->address ) { + while (command_buffer && command_buffer->address) { if (IS_CPU_SELECTED(running_debug_command_on_cpu_number, command_buffer->destination_cpu_selector)) { - *((volatile uintptr_t*)(command_buffer->address)) = command_buffer->value; // register = value; + *((volatile uintptr_t*)(command_buffer->address)) = command_buffer->value; // register = value; } command_buffer++; } @@ -447,24 +455,24 @@ pe_run_debug_command(command_buffer_element_t *command_buffer) void PE_arm_debug_enable_trace(void) { - switch (bootarg_panic_trace) { - case panic_trace_enabled: - pe_run_debug_command(enable_trace); - break; - - case panic_trace_alt_enabled: - pe_run_debug_command(enable_alt_trace); - break; - - default: - break; - } + switch (bootarg_panic_trace) { + case panic_trace_enabled: + pe_run_debug_command(enable_trace); + break; + + case panic_trace_alt_enabled: + pe_run_debug_command(enable_alt_trace); + break; + + default: + break; + } } static void PEARMDebugPanicHook(const char *str) { - (void)str; // not used + (void)str; // not used // if panic trace is enabled if (bootarg_panic_trace != 0) { @@ -474,7 +482,7 @@ PEARMDebugPanicHook(const char *str) return; // allow the normal panic operation to occur. } - // Stop tracing to freze the buffer and return to normal panic processing. + // Stop tracing to freze the buffer and return to normal panic processing. pe_run_debug_command(trace_halt); } } @@ -490,16 +498,16 @@ void (*PE_arm_debug_panic_hook)(const char *str) = NULL; void pe_arm_init_debug(void *args) { - DTEntry entryP; - uintptr_t *reg_prop; - uint32_t prop_size; + DTEntry entryP; + uintptr_t *reg_prop; + uint32_t prop_size; - if (gSocPhys == 0 ) { + if (gSocPhys == 0) { kprintf("pe_arm_init_debug: failed to initialize gSocPhys == 0\n"); - return; + return; } - - if ( DTFindEntry("device_type", "cpu-debug-interface", &entryP) == kSuccess ) { + + if (DTFindEntry("device_type", "cpu-debug-interface", &entryP) == kSuccess) { if (args != NULL) { if (DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size) == kSuccess) { ml_init_arm_debug_interface(args, ml_io_map(gSocPhys + *reg_prop, *(reg_prop + 1))); @@ -509,20 +517,19 @@ pe_arm_init_debug(void *args) // This controls one-time initialization of the Panic Trace infrastructure simple_lock_init(&panic_trace_lock, 0); //assuming single threaded mode - + // Panic_halt is deprecated. Please use panic_trace istead. unsigned int temp_bootarg_panic_trace; if (PE_parse_boot_argn("panic_trace", &temp_bootarg_panic_trace, sizeof(temp_bootarg_panic_trace)) || PE_parse_boot_argn("panic_halt", &temp_bootarg_panic_trace, sizeof(temp_bootarg_panic_trace))) { - kprintf("pe_arm_init_debug: panic_trace=%d\n", temp_bootarg_panic_trace); - // Prepare debug command buffers. + // Prepare debug command buffers. pe_init_debug_command(entryP, &cpu_halt, "cpu_halt"); pe_init_debug_command(entryP, &enable_trace, "enable_trace"); pe_init_debug_command(entryP, &enable_alt_trace, "enable_alt_trace"); pe_init_debug_command(entryP, &trace_halt, "trace_halt"); - + // now that init's are done, enable the panic halt capture (allows pe_init_debug_command to panic normally if necessary) bootarg_panic_trace = temp_bootarg_panic_trace; @@ -539,17 +546,18 @@ pe_arm_init_debug(void *args) static uint32_t pe_arm_map_interrupt_controller(void) { - DTEntry entryP; - uintptr_t *reg_prop; - uint32_t prop_size; - vm_offset_t soc_phys = 0; + DTEntry entryP; + uintptr_t *reg_prop; + uint32_t prop_size; + vm_offset_t soc_phys = 0; gSocPhys = pe_arm_get_soc_base_phys(); soc_phys = gSocPhys; kprintf("pe_arm_map_interrupt_controller: soc_phys: 0x%lx\n", (unsigned long)soc_phys); - if (soc_phys == 0) + if (soc_phys == 0) { return 0; + } if (DTFindEntry("interrupt-controller", "master", &entryP) == kSuccess) { kprintf("pe_arm_map_interrupt_controller: found interrupt-controller\n"); @@ -591,16 +599,16 @@ pe_arm_init_interrupts(void *args) return pe_arm_init_timer(args); } -static uint32_t +static uint32_t pe_arm_init_timer(void *args) { - vm_offset_t pic_base = 0; - vm_offset_t timer_base = 0; - vm_offset_t soc_phys; - vm_offset_t eoi_addr = 0; - uint32_t eoi_value = 0; + vm_offset_t pic_base = 0; + vm_offset_t timer_base = 0; + vm_offset_t soc_phys; + vm_offset_t eoi_addr = 0; + uint32_t eoi_value = 0; struct tbd_ops generic_funcs = {&fleh_fiq_generic, NULL, NULL}; - tbd_ops_t tbd_funcs = &generic_funcs; + tbd_ops_t tbd_funcs = &generic_funcs; /* The SoC headers expect to use pic_base, timer_base, etc... */ pic_base = gPicBase; @@ -609,22 +617,20 @@ pe_arm_init_timer(void *args) #if defined(ARM_BOARD_CLASS_S5L8960X) if (!strcmp(gPESoCDeviceType, "s5l8960x-io")) { - tbd_funcs = &s5l8960x_funcs; } else -#endif +#endif #if defined(ARM_BOARD_CLASS_T7000) if (!strcmp(gPESoCDeviceType, "t7000-io") || - !strcmp(gPESoCDeviceType, "t7001-io")) { + !strcmp(gPESoCDeviceType, "t7001-io")) { tbd_funcs = &t7000_funcs; } else #endif #if defined(ARM_BOARD_CLASS_S7002) if (!strcmp(gPESoCDeviceType, "s7002-io")) { - #ifdef ARM_BOARD_WFE_TIMEOUT_NS // Enable the WFE Timer - rPMGR_EVENT_TMR_PERIOD = ((uint64_t)(ARM_BOARD_WFE_TIMEOUT_NS) * gPEClockFrequencyInfo.timebase_frequency_hz) / NSEC_PER_SEC; + rPMGR_EVENT_TMR_PERIOD = ((uint64_t)(ARM_BOARD_WFE_TIMEOUT_NS) *gPEClockFrequencyInfo.timebase_frequency_hz) / NSEC_PER_SEC; rPMGR_EVENT_TMR = rPMGR_EVENT_TMR_PERIOD; rPMGR_EVENT_TMR_CTL = PMGR_EVENT_TMR_CTL_EN; #endif /* ARM_BOARD_WFE_TIMEOUT_NS */ @@ -646,14 +652,13 @@ pe_arm_init_timer(void *args) #if defined(ARM_BOARD_CLASS_T8002) if (!strcmp(gPESoCDeviceType, "t8002-io") || !strcmp(gPESoCDeviceType, "t8004-io")) { - /* Enable the Decrementer */ aic_write32(kAICTmrCnt, 0x7FFFFFFF); aic_write32(kAICTmrCfg, kAICTmrCfgEn); aic_write32(kAICTmrIntStat, kAICTmrIntStatPct); #ifdef ARM_BOARD_WFE_TIMEOUT_NS // Enable the WFE Timer - rPMGR_EVENT_TMR_PERIOD = ((uint64_t)(ARM_BOARD_WFE_TIMEOUT_NS) * gPEClockFrequencyInfo.timebase_frequency_hz) / NSEC_PER_SEC; + rPMGR_EVENT_TMR_PERIOD = ((uint64_t)(ARM_BOARD_WFE_TIMEOUT_NS) *gPEClockFrequencyInfo.timebase_frequency_hz) / NSEC_PER_SEC; rPMGR_EVENT_TMR = rPMGR_EVENT_TMR_PERIOD; rPMGR_EVENT_TMR_CTL = PMGR_EVENT_TMR_CTL_EN; #endif /* ARM_BOARD_WFE_TIMEOUT_NS */ @@ -683,11 +688,11 @@ pe_arm_init_timer(void *args) tbd_funcs = &bcm2837_funcs; } else #endif - return 0; + return 0; - if (args != NULL) + if (args != NULL) { ml_init_timebase(args, tbd_funcs, eoi_addr, eoi_value); + } return 1; } - diff --git a/pexpert/arm/pe_init.c b/pexpert/arm/pe_init.c index 6e65a0ab9..1113d5a5c 100644 --- a/pexpert/arm/pe_init.c +++ b/pexpert/arm/pe_init.c @@ -28,7 +28,7 @@ extern void pe_identify_machine(boot_args *bootArgs); /* static references */ -static void pe_prepare_images(void); +static void pe_prepare_images(void); /* private globals */ SECURITY_READ_ONLY_LATE(PE_state_t) PE_state; @@ -37,13 +37,13 @@ char firmware_version[FW_VERS_LEN]; /* * This variable is only modified once, when the BSP starts executing. We put it in __TEXT - * as page protections on kernel text early in startup are read-write. The kernel is - * locked down later in start-up, said mappings become RO and thus this + * as page protections on kernel text early in startup are read-write. The kernel is + * locked down later in start-up, said mappings become RO and thus this * variable becomes immutable. * * See osfmk/arm/arm_vm_init.c for more information. */ -SECURITY_READ_ONLY_SPECIAL_SECTION(volatile uint32_t, "__TEXT,__const") debug_enabled = FALSE; +SECURITY_READ_ONLY_SPECIAL_SECTION(volatile uint32_t, "__TEXT,__const") debug_enabled = FALSE; uint8_t gPlatformECID[8]; uint32_t gPlatformMemoryID; @@ -85,17 +85,21 @@ check_for_panic_log(void) /* * Find the vram node in the device tree */ - if (kSuccess != DTLookupEntry(0, "pram", &entry)) + if (kSuccess != DTLookupEntry(0, "pram", &entry)) { return; + } - if (kSuccess != DTGetProperty(entry, "reg", (void **)®_prop, &size)) + if (kSuccess != DTGetProperty(entry, "reg", (void **)®_prop, &size)) { return; + } - if (kSuccess != DTLookupEntry(0, "/chosen", &chosen)) + if (kSuccess != DTLookupEntry(0, "/chosen", &chosen)) { return; + } - if (kSuccess != DTGetProperty(chosen, "embedded-panic-log-size", (void **) &panic_region_length, &size)) + if (kSuccess != DTGetProperty(chosen, "embedded-panic-log-size", (void **) &panic_region_length, &size)) { return; + } /* * Map the first page of VRAM into the kernel for use in case of @@ -137,10 +141,11 @@ PE_initialize_console(PE_Video * info, int op) { static int last_console = -1; - if (info && (info != &PE_state.video)) info->v_scale = PE_state.video.v_scale; + if (info && (info != &PE_state.video)) { + info->v_scale = PE_state.video.v_scale; + } switch (op) { - case kPEDisableScreen: initialize_screen(info, op); last_console = switch_to_serial_console(); @@ -149,11 +154,13 @@ PE_initialize_console(PE_Video * info, int op) case kPEEnableScreen: initialize_screen(info, op); - if (info) + if (info) { PE_state.video = *info; + } kprintf("kPEEnableScreen %d\n", last_console); - if (last_console != -1) + if (last_console != -1) { switch_to_old_console(last_console); + } break; case kPEReleaseScreen: @@ -165,10 +172,10 @@ PE_initialize_console(PE_Video * info, int op) default_progress.dx = 0; default_progress.dy = 0; vc_progress_initialize(&default_progress, - default_progress_data1x, - default_progress_data2x, - default_progress_data3x, - (unsigned char *) appleClut8); + default_progress_data1x, + default_progress_data2x, + default_progress_data3x, + (unsigned char *) appleClut8); vc_progress_initialized = TRUE; } initialize_screen(info, op); @@ -185,16 +192,16 @@ PE_initialize_console(PE_Video * info, int op) void PE_init_iokit(void) { - DTEntry entry; - unsigned int size, scale; - unsigned long display_size; - void **map; - unsigned int show_progress; - int *delta, image_size, flip; - uint32_t start_time_value = 0; - uint32_t debug_wait_start_value = 0; - uint32_t load_kernel_start_value = 0; - uint32_t populate_registry_time_value = 0; + DTEntry entry; + unsigned int size, scale; + unsigned long display_size; + void **map; + unsigned int show_progress; + int *delta, image_size, flip; + uint32_t start_time_value = 0; + uint32_t debug_wait_start_value = 0; + uint32_t load_kernel_start_value = 0; + uint32_t populate_registry_time_value = 0; PE_init_kprintf(TRUE); PE_init_printf(TRUE); @@ -202,14 +209,13 @@ PE_init_iokit(void) printf("iBoot version: %s\n", firmware_version); if (kSuccess == DTLookupEntry(0, "/chosen/memory-map", &entry)) { - boot_progress_element *bootPict; - if (kSuccess == DTGetProperty(entry, "BootCLUT", (void **) &map, &size)) + if (kSuccess == DTGetProperty(entry, "BootCLUT", (void **) &map, &size)) { bcopy(map[0], appleClut8, sizeof(appleClut8)); + } if (kSuccess == DTGetProperty(entry, "Pict-FailedBoot", (void **) &map, &size)) { - bootPict = (boot_progress_element *) map[0]; default_noroot.width = bootPict->width; default_noroot.height = bootPict->height; @@ -224,12 +230,12 @@ PE_init_iokit(void) scale = PE_state.video.v_scale; flip = 1; - if (PE_parse_boot_argn("-progress", &show_progress, sizeof (show_progress)) && show_progress) { + if (PE_parse_boot_argn("-progress", &show_progress, sizeof(show_progress)) && show_progress) { /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ switch (PE_state.video.v_rotate) { - case 2: + case 2: flip = -1; - /* fall through */ + /* fall through */ case 0: display_size = PE_state.video.v_height; image_size = default_progress.height; @@ -237,7 +243,7 @@ PE_init_iokit(void) break; case 1: flip = -1; - /* fall through */ + /* fall through */ case 3: default: display_size = PE_state.video.v_width; @@ -255,10 +261,10 @@ PE_init_iokit(void) PE_get_default("progress-dy", &default_progress.dy, sizeof(default_progress.dy)); vc_progress_initialize(&default_progress, - default_progress_data1x, - default_progress_data2x, - default_progress_data3x, - (unsigned char *) appleClut8); + default_progress_data1x, + default_progress_data2x, + default_progress_data3x, + (unsigned char *) appleClut8); vc_progress_initialized = TRUE; } @@ -268,23 +274,27 @@ PE_init_iokit(void) uint32_t * value_ptr; if (kSuccess == DTGetProperty(entry, "start-time", (void **)&value_ptr, &size)) { - if (size == sizeof(start_time_value)) + if (size == sizeof(start_time_value)) { start_time_value = *value_ptr; + } } if (kSuccess == DTGetProperty(entry, "debug-wait-start", (void **)&value_ptr, &size)) { - if (size == sizeof(debug_wait_start_value)) + if (size == sizeof(debug_wait_start_value)) { debug_wait_start_value = *value_ptr; + } } if (kSuccess == DTGetProperty(entry, "load-kernel-start", (void **)&value_ptr, &size)) { - if (size == sizeof(load_kernel_start_value)) + if (size == sizeof(load_kernel_start_value)) { load_kernel_start_value = *value_ptr; + } } if (kSuccess == DTGetProperty(entry, "populate-registry-time", (void **)&value_ptr, &size)) { - if (size == sizeof(populate_registry_time_value)) + if (size == sizeof(populate_registry_time_value)) { populate_registry_time_value = *value_ptr; + } } } @@ -306,7 +316,7 @@ void PE_init_platform(boolean_t vm_initialized, void *args) { DTEntry entry; - unsigned int size; + unsigned int size; void **prop; boot_args *boot_args_ptr = (boot_args *) args; @@ -340,26 +350,28 @@ PE_init_platform(boolean_t vm_initialized, void *args) if (!vm_initialized) { if (kSuccess == (DTFindEntry("name", "device-tree", &entry))) { if (kSuccess == DTGetProperty(entry, "target-type", - (void **)&prop, &size)) { - if (size > sizeof(gTargetTypeBuffer)) + (void **)&prop, &size)) { + if (size > sizeof(gTargetTypeBuffer)) { size = sizeof(gTargetTypeBuffer); - bcopy(prop,gTargetTypeBuffer,size); - gTargetTypeBuffer[size-1]='\0'; + } + bcopy(prop, gTargetTypeBuffer, size); + gTargetTypeBuffer[size - 1] = '\0'; } } if (kSuccess == (DTFindEntry("name", "device-tree", &entry))) { if (kSuccess == DTGetProperty(entry, "model", - (void **)&prop, &size)) { - if (size > sizeof(gModelTypeBuffer)) + (void **)&prop, &size)) { + if (size > sizeof(gModelTypeBuffer)) { size = sizeof(gModelTypeBuffer); - bcopy(prop,gModelTypeBuffer,size); - gModelTypeBuffer[size-1]='\0'; + } + bcopy(prop, gModelTypeBuffer, size); + gModelTypeBuffer[size - 1] = '\0'; } } if (kSuccess == DTLookupEntry(NULL, "/chosen", &entry)) { if (kSuccess == DTGetProperty(entry, "debug-enabled", - (void **) &prop, &size)) { - /* + (void **) &prop, &size)) { + /* * We purposefully modify a constified variable as * it will get locked down by a trusted monitor or * via page table mappings. We don't want people easily @@ -368,29 +380,33 @@ PE_init_platform(boolean_t vm_initialized, void *args) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-qual" boolean_t *modify_debug_enabled = (boolean_t *) &debug_enabled; - if (size > sizeof(uint32_t)) + if (size > sizeof(uint32_t)) { size = sizeof(uint32_t); + } bcopy(prop, modify_debug_enabled, size); #pragma clang diagnostic pop } if (kSuccess == DTGetProperty(entry, "firmware-version", - (void **) &prop, &size)) { - if (size > sizeof(firmware_version)) + (void **) &prop, &size)) { + if (size > sizeof(firmware_version)) { size = sizeof(firmware_version); + } bcopy(prop, firmware_version, size); firmware_version[size - 1] = '\0'; } if (kSuccess == DTGetProperty(entry, "unique-chip-id", - (void **) &prop, &size)) { - if (size > sizeof(gPlatformECID)) + (void **) &prop, &size)) { + if (size > sizeof(gPlatformECID)) { size = sizeof(gPlatformECID); - bcopy(prop,gPlatformECID,size); + } + bcopy(prop, gPlatformECID, size); } if (kSuccess == DTGetProperty(entry, "dram-vendor-id", - (void **) &prop, &size)) { - if (size > sizeof(gPlatformMemoryID)) + (void **) &prop, &size)) { + if (size > sizeof(gPlatformMemoryID)) { size = sizeof(gPlatformMemoryID); - bcopy(prop,&gPlatformMemoryID,size); + } + bcopy(prop, &gPlatformMemoryID, size); } } pe_init_debug(); @@ -406,30 +422,32 @@ PE_create_console(void) */ check_for_panic_log(); - if (PE_state.video.v_display) + if (PE_state.video.v_display) { PE_initialize_console(&PE_state.video, kPEGraphicsMode); - else + } else { PE_initialize_console(&PE_state.video, kPETextMode); + } } int PE_current_console(PE_Video * info) { *info = PE_state.video; - return (0); + return 0; } void PE_display_icon(__unused unsigned int flags, __unused const char *name) { - if (default_noroot_data) + if (default_noroot_data) { vc_display_icon(&default_noroot, default_noroot_data); + } } extern boolean_t PE_get_hotkey(__unused unsigned char key) { - return (FALSE); + return FALSE; } static timebase_callback_func gTimebaseCallback; @@ -450,8 +468,9 @@ PE_call_timebase_callback(void) timebase_freq.timebase_num = gPEClockFrequencyInfo.timebase_frequency_hz; timebase_freq.timebase_den = 1; - if (gTimebaseCallback) + if (gTimebaseCallback) { gTimebaseCallback(&timebase_freq); + } } /* @@ -461,7 +480,7 @@ static int PE_stub_poll_input(__unused unsigned int options, char *c) { *c = uart_getc(); - return 0; /* 0 for success, 1 for unsupported */ + return 0; /* 0 for success, 1 for unsupported */ } /* @@ -486,12 +505,13 @@ PE_i_can_has_debugger(uint32_t *debug_flags) #if DEVELOPMENT || DEBUG assert(debug_boot_arg_inited); #endif - if (debug_enabled) - *debug_flags = debug_boot_arg; - else + if (debug_enabled) { + *debug_flags = debug_boot_arg; + } else { *debug_flags = 0; + } } - return (debug_enabled); + return debug_enabled; } /* @@ -521,12 +541,13 @@ PE_save_buffer_to_vram(unsigned char *buf, unsigned int *size) } *size = *size > panic_text_len ? panic_text_len : *size; - if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) + if (panic_info->eph_magic != EMBEDDED_PANIC_MAGIC) { printf("Error!! Current Magic 0x%X, expected value 0x%x", panic_info->eph_magic, EMBEDDED_PANIC_MAGIC); + } /* CRC everything after the CRC itself - starting with the panic header version */ panic_info->eph_crc = crc32(0L, &panic_info->eph_version, (panic_text_len + - sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version))); + sizeof(struct embedded_panic_header) - offsetof(struct embedded_panic_header, eph_version))); } uint32_t @@ -542,8 +563,9 @@ PE_get_offset_into_panic_region(char *location) void PE_init_panicheader() { - if (!panic_info) + if (!panic_info) { return; + } bzero(panic_info, sizeof(struct embedded_panic_header)); @@ -568,8 +590,9 @@ PE_init_panicheader() void PE_update_panicheader_nestedpanic() { - if (!panic_info) + if (!panic_info) { return; + } /* * If the panic log offset is not set, re-init the panic header @@ -595,7 +618,7 @@ PE_update_panicheader_nestedpanic() /* If this assert fires, it's likely indicative of corruption in the panic region */ assert(((panic_info->eph_stackshot_offset == 0) && (panic_info->eph_stackshot_len == 0)) || - ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))); + ((panic_info->eph_stackshot_offset != 0) && (panic_info->eph_stackshot_len != 0))); /* * If we haven't set up the other log yet, set the beginning of the other log @@ -617,7 +640,7 @@ PE_reboot_on_panic(void) uint32_t debug_flags; if (PE_i_can_has_debugger(&debug_flags) - && (debug_flags & DB_NMI)) { + && (debug_flags & DB_NMI)) { /* kernel debugging is active */ return FALSE; } else { @@ -637,8 +660,9 @@ PE_sync_panic_buffers(void) * be discarded on reset. If we can make sure the lines are flushed to L3/DRAM, * the platform reset handler will flush any L3. */ - if (gPanicBase) + if (gPanicBase) { CleanPoC_DcacheRegion_Force(gPanicBase, gPanicSize); + } } static void @@ -693,5 +717,5 @@ void PE_mark_hwaccess(uint64_t thread) { last_hwaccess_thread = thread; - asm volatile("dmb ish"); + asm volatile ("dmb ish"); } diff --git a/pexpert/arm/pe_kprintf.c b/pexpert/arm/pe_kprintf.c index c0ec13792..5287e5c86 100644 --- a/pexpert/arm/pe_kprintf.c +++ b/pexpert/arm/pe_kprintf.c @@ -14,7 +14,7 @@ #include /* Globals */ -void (*PE_kputc) (char c) = 0; +void (*PE_kputc)(char c) = 0; SECURITY_READ_ONLY_LATE(unsigned int) disable_serial_output = TRUE; @@ -25,26 +25,30 @@ PE_init_kprintf(boolean_t vm_initialized) { unsigned int boot_arg; - if (PE_state.initialized == FALSE) + if (PE_state.initialized == FALSE) { panic("Platform Expert not initialized"); + } if (!vm_initialized) { simple_lock_init(&kprintf_lock, 0); - if (PE_parse_boot_argn("debug", &boot_arg, sizeof (boot_arg))) - if (boot_arg & DB_KPRT) + if (PE_parse_boot_argn("debug", &boot_arg, sizeof(boot_arg))) { + if (boot_arg & DB_KPRT) { disable_serial_output = FALSE; + } + } - if (serial_init()) + if (serial_init()) { PE_kputc = serial_putc; - else + } else { PE_kputc = cnputc; + } } } #ifdef MP_DEBUG -static void -_kprintf(const char *format,...) +static void +_kprintf(const char *format, ...) { va_list listp; @@ -52,10 +56,10 @@ _kprintf(const char *format,...) _doprnt_log(format, &listp, PE_kputc, 16); va_end(listp); } -#define MP_DEBUG_KPRINTF(x...) _kprintf(x) -#else /* MP_DEBUG */ +#define MP_DEBUG_KPRINTF(x...) _kprintf(x) +#else /* MP_DEBUG */ #define MP_DEBUG_KPRINTF(x...) -#endif /* MP_DEBUG */ +#endif /* MP_DEBUG */ #if CONFIG_NO_KPRINTF_STRINGS /* Prevent CPP from breaking the definition below */ @@ -64,8 +68,9 @@ _kprintf(const char *format,...) static int cpu_last_locked = 0; -__attribute__((noinline,not_tail_called)) -void kprintf(const char *fmt,...) +__attribute__((noinline, not_tail_called)) +void +kprintf(const char *fmt, ...) { va_list listp; va_list listp2; @@ -73,14 +78,13 @@ void kprintf(const char *fmt,...) void *caller = __builtin_return_address(0); if (!disable_serial_output) { - /* * Spin to get kprintf lock but re-enable interrupts while failing. * This allows interrupts to be handled while waiting but * interrupts are disabled once we have the lock. */ state = ml_set_interrupts_enabled(FALSE); - while (!simple_lock_try(&kprintf_lock)) { + while (!simple_lock_try(&kprintf_lock, LCK_GRP_NULL)) { ml_set_interrupts_enabled(state); ml_set_interrupts_enabled(FALSE); } @@ -106,8 +110,9 @@ void kprintf(const char *fmt,...) * take the panic when it reenables interrupts. * Hopefully one day this is fixed so that this workaround is unnecessary. */ - if (state == TRUE) + if (state == TRUE) { ml_spin_debug_clear_self(); + } #endif ml_set_interrupts_enabled(state); @@ -116,8 +121,7 @@ void kprintf(const char *fmt,...) os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); } va_end(listp2); - } - else { + } else { // If interrupts are enabled if (ml_get_interrupts_enabled()) { va_start(listp, fmt); @@ -127,15 +131,14 @@ void kprintf(const char *fmt,...) } } -void +void serial_putc(char c) { uart_putc(c); } -int +int serial_getc(void) { return uart_getc(); } - diff --git a/pexpert/arm/pe_serial.c b/pexpert/arm/pe_serial.c index 18c746773..3e70e3f2d 100644 --- a/pexpert/arm/pe_serial.c +++ b/pexpert/arm/pe_serial.c @@ -39,13 +39,13 @@ struct pe_serial_functions { static struct pe_serial_functions *gPESF; -static int uart_initted = 0; /* 1 if init'ed */ +static int uart_initted = 0; /* 1 if init'ed */ -static vm_offset_t uart_base; +static vm_offset_t uart_base; /*****************************************************************************/ -#ifdef S3CUART +#ifdef S3CUART static int32_t dt_pclk = -1; static int32_t dt_sampling = -1; @@ -54,21 +54,22 @@ static int32_t dt_ubrdiv = -1; static void ln2410_uart_init(void) { - uint32_t ucon0 = 0x405; /* NCLK, No interrupts, No DMA - just polled */ + uint32_t ucon0 = 0x405; /* NCLK, No interrupts, No DMA - just polled */ - rULCON0 = 0x03; /* 81N, not IR */ + rULCON0 = 0x03; /* 81N, not IR */ // Override with pclk dt entry - if (dt_pclk != -1) + if (dt_pclk != -1) { ucon0 = ucon0 & ~0x400; + } rUCON0 = ucon0; - rUMCON0 = 0x00; /* Clear Flow Control */ + rUMCON0 = 0x00; /* Clear Flow Control */ gPESF->uart_set_baud_rate(0, 115200); - rUFCON0 = 0x03; /* Clear & Enable FIFOs */ - rUMCON0 = 0x01; /* Assert RTS on UART0 */ + rUFCON0 = 0x03; /* Clear & Enable FIFOs */ + rUMCON0 = 0x01; /* Assert RTS on UART0 */ } static void @@ -77,35 +78,38 @@ ln2410_uart_set_baud_rate(__unused int unit, uint32_t baud_rate) uint32_t div = 0; uint32_t uart_clock = 0; uint32_t sample_rate = 16; - - if (baud_rate < 300) + + if (baud_rate < 300) { baud_rate = 9600; + } - if (rUCON0 & 0x400) + if (rUCON0 & 0x400) { // NCLK uart_clock = (uint32_t)gPEClockFrequencyInfo.fix_frequency_hz; - else - // PCLK + } else { + // PCLK uart_clock = (uint32_t)gPEClockFrequencyInfo.prf_frequency_hz; + } if (dt_sampling != -1) { // Use the sampling rate specified in the Device Tree sample_rate = dt_sampling & 0xf; } - + if (dt_ubrdiv != -1) { // Use the ubrdiv specified in the Device Tree div = dt_ubrdiv & 0xffff; } else { // Calculate ubrdiv. UBRDIV = (SourceClock / (BPS * Sample Rate)) - 1 div = uart_clock / (baud_rate * sample_rate); - + uint32_t actual_baud = uart_clock / ((div + 0) * sample_rate); uint32_t baud_low = uart_clock / ((div + 1) * sample_rate); // Adjust div to get the closest target baudrate - if ((baud_rate - baud_low) > (actual_baud - baud_rate)) + if ((baud_rate - baud_low) > (actual_baud - baud_rate)) { div--; + } } // Sample Rate [19:16], UBRDIV [15:0] @@ -135,9 +139,10 @@ ln2410_rd0(void) static struct pe_serial_functions ln2410_serial_functions = { ln2410_uart_init, ln2410_uart_set_baud_rate, -ln2410_tr0, ln2410_td0, ln2410_rr0, ln2410_rd0}; + ln2410_tr0, ln2410_td0, ln2410_rr0, ln2410_rd0 +}; -#endif /* S3CUART */ +#endif /* S3CUART */ /*****************************************************************************/ @@ -146,10 +151,10 @@ static unsigned int read_dtr(void) { #ifdef __arm__ - unsigned int c; - __asm__ volatile( - "mrc p14, 0, %0, c0, c5\n" -: "=r"(c)); + unsigned int c; + __asm__ volatile ( + "mrc p14, 0, %0, c0, c5\n" + : "=r"(c)); return c; #else /* ARM64_TODO */ @@ -161,10 +166,10 @@ static void write_dtr(unsigned int c) { #ifdef __arm__ - __asm__ volatile( - "mcr p14, 0, %0, c0, c5\n" - : - :"r"(c)); + __asm__ volatile ( + "mcr p14, 0, %0, c0, c5\n" + : + :"r"(c)); #else /* ARM64_TODO */ (void)c; @@ -210,67 +215,68 @@ dcc_rd0(void) static struct pe_serial_functions dcc_serial_functions = { NULL, NULL, -dcc_tr0, dcc_td0, dcc_rr0, dcc_rd0}; + dcc_tr0, dcc_td0, dcc_rr0, dcc_rd0 +}; /*****************************************************************************/ #ifdef SHMCON -#define CPU_CACHELINE_SIZE (1 << MMU_CLINE) +#define CPU_CACHELINE_SIZE (1 << MMU_CLINE) #ifndef SHMCON_NAME -#define SHMCON_NAME "AP-xnu" +#define SHMCON_NAME "AP-xnu" #endif -#define SHMCON_MAGIC 'SHMC' -#define SHMCON_VERSION 2 -#define CBUF_IN 0 -#define CBUF_OUT 1 -#define INBUF_SIZE (panic_size / 16) -#define FULL_ALIGNMENT (64) +#define SHMCON_MAGIC 'SHMC' +#define SHMCON_VERSION 2 +#define CBUF_IN 0 +#define CBUF_OUT 1 +#define INBUF_SIZE (panic_size / 16) +#define FULL_ALIGNMENT (64) -#define FLAG_CACHELINE_32 1 -#define FLAG_CACHELINE_64 2 +#define FLAG_CACHELINE_32 1 +#define FLAG_CACHELINE_64 2 /* Defines to clarify the master/slave fields' use as circular buffer pointers */ -#define head_in sidx[CBUF_IN] -#define tail_in midx[CBUF_IN] -#define head_out midx[CBUF_OUT] -#define tail_out sidx[CBUF_OUT] +#define head_in sidx[CBUF_IN] +#define tail_in midx[CBUF_IN] +#define head_out midx[CBUF_OUT] +#define tail_out sidx[CBUF_OUT] /* TODO: get from device tree/target */ -#define NUM_CHILDREN 5 +#define NUM_CHILDREN 5 #define WRAP_INCR(len, x) do{ (x)++; if((x) >= (len)) (x) = 0; } while(0) #define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1))) -#define MAX(a,b) ((a) > (b) ? (a) : (b)) -#define MIN(a,b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) #define shmcon_barrier() do {__asm__ volatile("dmb ish" : : : "memory");} while(0) struct shm_buffer_info { - uint64_t base; - uint32_t unused; - uint32_t magic; + uint64_t base; + uint32_t unused; + uint32_t magic; }; struct shmcon_header { - uint32_t magic; - uint8_t version; - uint8_t children; /* number of child entries in child_ent */ - uint16_t flags; - uint64_t buf_paddr[2]; /* Physical address for buffers (in, out) */ - uint32_t buf_len[2]; - uint8_t name[8]; + uint32_t magic; + uint8_t version; + uint8_t children; /* number of child entries in child_ent */ + uint16_t flags; + uint64_t buf_paddr[2]; /* Physical address for buffers (in, out) */ + uint32_t buf_len[2]; + uint8_t name[8]; /* Slave-modified data - invalidate before read */ - uint32_t sidx[2] __attribute__((aligned (FULL_ALIGNMENT))); /* In head, out tail */ + uint32_t sidx[2] __attribute__((aligned(FULL_ALIGNMENT))); /* In head, out tail */ /* Master-modified data - clean after write */ - uint32_t midx[2] __attribute__((aligned (FULL_ALIGNMENT))); /* In tail, out head */ + uint32_t midx[2] __attribute__((aligned(FULL_ALIGNMENT))); /* In tail, out head */ - uint64_t child[0]; /* Physical address of child header pointers */ + uint64_t child[0]; /* Physical address of child header pointers */ }; static volatile struct shmcon_header *shmcon = NULL; @@ -280,12 +286,14 @@ static uint64_t grace = 0; static uint64_t full_timeout = 0; #endif -static void shmcon_set_baud_rate(__unused int unit, __unused uint32_t baud_rate) +static void +shmcon_set_baud_rate(__unused int unit, __unused uint32_t baud_rate) { return; } -static int shmcon_tr0(void) +static int +shmcon_tr0(void) { #ifdef SHMCON_THROTTLED uint32_t head = shmcon->head_out; @@ -303,13 +311,15 @@ static int shmcon_tr0(void) full_timeout = mach_absolute_time() + grace; return 0; } - if (full_timeout > mach_absolute_time()) + if (full_timeout > mach_absolute_time()) { return 0; + } /* Timeout - slave not really there or not keeping up */ tail += (len / 4); - if (tail >= len) + if (tail >= len) { tail -= len; + } shmcon_barrier(); shmcon->tail_out = tail; full_timeout = 0; @@ -317,7 +327,8 @@ static int shmcon_tr0(void) return 1; } -static void shmcon_td0(int c) +static void +shmcon_td0(int c) { uint32_t head = shmcon->head_out; uint32_t len = shmcon->buf_len[CBUF_OUT]; @@ -328,14 +339,17 @@ static void shmcon_td0(int c) shmcon->head_out = head; } -static int shmcon_rr0(void) +static int +shmcon_rr0(void) { - if (shmcon->tail_in == shmcon->head_in) + if (shmcon->tail_in == shmcon->head_in) { return 0; + } return 1; } -static int shmcon_rd0(void) +static int +shmcon_rd0(void) { int c; uint32_t tail = shmcon->tail_in; @@ -348,20 +362,23 @@ static int shmcon_rd0(void) return c; } -static void shmcon_init(void) +static void +shmcon_init(void) { - DTEntry entry; - uintptr_t *reg_prop; - volatile struct shm_buffer_info *end; - size_t i, header_size; - unsigned int size; - vm_offset_t pa_panic_base, panic_size, va_buffer_base, va_buffer_end; + DTEntry entry; + uintptr_t *reg_prop; + volatile struct shm_buffer_info *end; + size_t i, header_size; + unsigned int size; + vm_offset_t pa_panic_base, panic_size, va_buffer_base, va_buffer_end; - if (kSuccess != DTLookupEntry(0, "pram", &entry)) + if (kSuccess != DTLookupEntry(0, "pram", &entry)) { return; + } - if (kSuccess != DTGetProperty(entry, "reg", (void **)®_prop, &size)) + if (kSuccess != DTGetProperty(entry, "reg", (void **)®_prop, &size)) { return; + } pa_panic_base = reg_prop[0]; panic_size = reg_prop[1]; @@ -386,25 +403,28 @@ static void shmcon_init(void) len = shmcon->buf_len[i]; /* Validate buffers */ if ((pa_buf < pa_buffer_base) || - (pa_buf >= pa_buffer_end) || - ((pa_buf + len) > pa_buffer_end) || - (shmcon->midx[i] >= len) || /* Index out of bounds */ - (shmcon->sidx[i] >= len) || - (pa_buf != ROUNDUP(pa_buf, CPU_CACHELINE_SIZE)) || /* Unaligned pa_buffer */ - (len < 1024) || - (len > (pa_buffer_end - pa_buffer_base)) || - (shmcon->children != NUM_CHILDREN)) + (pa_buf >= pa_buffer_end) || + ((pa_buf + len) > pa_buffer_end) || + (shmcon->midx[i] >= len) || /* Index out of bounds */ + (shmcon->sidx[i] >= len) || + (pa_buf != ROUNDUP(pa_buf, CPU_CACHELINE_SIZE)) || /* Unaligned pa_buffer */ + (len < 1024) || + (len > (pa_buffer_end - pa_buffer_base)) || + (shmcon->children != NUM_CHILDREN)) { goto validation_failure; + } /* Compute the VA offset of the buffer */ shmbuf[i] = (uint8_t *)(uintptr_t)shmcon + ((uintptr_t)pa_buf - (uintptr_t)pa_panic_base); } /* Check that buffers don't overlap */ if ((uintptr_t)shmbuf[0] < (uintptr_t)shmbuf[1]) { - if ((uintptr_t)(shmbuf[0] + shmcon->buf_len[0]) > (uintptr_t)shmbuf[1]) + if ((uintptr_t)(shmbuf[0] + shmcon->buf_len[0]) > (uintptr_t)shmbuf[1]) { goto validation_failure; + } } else { - if ((uintptr_t)(shmbuf[1] + shmcon->buf_len[1]) > (uintptr_t)shmbuf[0]) + if ((uintptr_t)(shmbuf[1] + shmcon->buf_len[1]) > (uintptr_t)shmbuf[0]) { goto validation_failure; + } } shmcon->tail_in = shmcon->head_in; /* Clear input buffer */ shmcon_barrier(); @@ -427,8 +447,9 @@ validation_failure: memset((void *)shmcon->name, ' ', sizeof(shmcon->name)); memcpy((void *)shmcon->name, SHMCON_NAME, MIN(sizeof(shmcon->name), strlen(SHMCON_NAME))); #pragma clang diagnostic pop - for (i = 0; i < NUM_CHILDREN; i++) + for (i = 0; i < NUM_CHILDREN; i++) { shmcon->child[0] = 0; + } shmcon_barrier(); shmcon->magic = SHMCON_MAGIC; } @@ -454,13 +475,16 @@ static struct pe_serial_functions shmcon_serial_functions = .rd0 = shmcon_rd0 }; -int pe_shmcon_set_child(uint64_t paddr, uint32_t entry) +int +pe_shmcon_set_child(uint64_t paddr, uint32_t entry) { - if (shmcon == NULL) + if (shmcon == NULL) { return -1; + } - if (shmcon->children >= entry) + if (shmcon->children >= entry) { return -1; + } shmcon->child[entry] = paddr; return 0; @@ -474,10 +498,10 @@ int pe_shmcon_set_child(uint64_t paddr, uint32_t entry) // Allow a 30ms stall of wall clock time before DockFIFO starts dropping characters -#define DOCKFIFO_WR_MAX_STALL_US (30*1000) +#define DOCKFIFO_WR_MAX_STALL_US (30*1000) static uint64_t prev_dockfifo_drained_time; // Last time we've seen the DockFIFO drained by an external agent -static uint64_t prev_dockfifo_spaces; // Previous w_stat level of the DockFIFO. +static uint64_t prev_dockfifo_spaces; // Previous w_stat level of the DockFIFO. static uint32_t dockfifo_capacity; static uint64_t dockfifo_stall_grace; @@ -486,7 +510,8 @@ static uint64_t dockfifo_stall_grace; // Local funtions //======================= -static int dockfifo_drain_on_stall() +static int +dockfifo_drain_on_stall() { // Called when DockFIFO runs out of spaces. // Check if the DockFIFO reader has stalled. If so, empty the DockFIFO ourselves. @@ -503,40 +528,43 @@ static int dockfifo_drain_on_stall() } -static int dockfifo_uart_tr0(void) +static int +dockfifo_uart_tr0(void) { uint32_t spaces = rDOCKFIFO_W_STAT(DOCKFIFO_UART_WRITE) & 0xffff; if (spaces >= dockfifo_capacity || spaces > prev_dockfifo_spaces) { - // More spaces showed up. That can only mean someone read the FIFO. - // Note that if the DockFIFO is empty we cannot tell if someone is listening, - // we can only give them the benefit of the doubt. + // More spaces showed up. That can only mean someone read the FIFO. + // Note that if the DockFIFO is empty we cannot tell if someone is listening, + // we can only give them the benefit of the doubt. - prev_dockfifo_drained_time = mach_absolute_time(); + prev_dockfifo_drained_time = mach_absolute_time(); } prev_dockfifo_spaces = spaces; return spaces || dockfifo_drain_on_stall(); - } -static void dockfifo_uart_td0(int c) +static void +dockfifo_uart_td0(int c) { rDOCKFIFO_W_DATA(DOCKFIFO_UART_WRITE, 1) = (unsigned)(c & 0xff); prev_dockfifo_spaces--; // After writing a byte we have one fewer space than previously expected. - } -static int dockfifo_uart_rr0(void) +static int +dockfifo_uart_rr0(void) { return rDOCKFIFO_R_DATA(DOCKFIFO_UART_READ, 0) & 0x7f; } -static int dockfifo_uart_rd0(void) +static int +dockfifo_uart_rd0(void) { return (int)((rDOCKFIFO_R_DATA(DOCKFIFO_UART_READ, 1) >> 8) & 0xff); } -static void dockfifo_uart_init(void) +static void +dockfifo_uart_init(void) { nanoseconds_to_absolutetime(DOCKFIFO_WR_MAX_STALL_US * 1000, &dockfifo_stall_grace); @@ -544,7 +572,9 @@ static void dockfifo_uart_init(void) rDOCKFIFO_DRAIN(DOCKFIFO_UART_WRITE) = 0; // Empty the DockFIFO by draining it until OCCUPANCY is 0, then measure its capacity - while (rDOCKFIFO_R_DATA(DOCKFIFO_UART_WRITE, 3) & 0x7F); + while (rDOCKFIFO_R_DATA(DOCKFIFO_UART_WRITE, 3) & 0x7F) { + ; + } dockfifo_capacity = rDOCKFIFO_W_STAT(DOCKFIFO_UART_WRITE) & 0xffff; } @@ -563,20 +593,21 @@ static struct pe_serial_functions dockfifo_uart_serial_functions = /*****************************************************************************/ #ifdef DOCKCHANNEL_UART -#define DOCKCHANNEL_WR_MAX_STALL_US (30*1000) +#define DOCKCHANNEL_WR_MAX_STALL_US (30*1000) -static vm_offset_t dock_agent_base; -static uint32_t max_dockchannel_drain_period; -static bool use_sw_drain; -static uint64_t prev_dockchannel_drained_time; // Last time we've seen the DockChannel drained by an external agent -static uint64_t prev_dockchannel_spaces; // Previous w_stat level of the DockChannel. -static uint64_t dockchannel_stall_grace; +static vm_offset_t dock_agent_base; +static uint32_t max_dockchannel_drain_period; +static bool use_sw_drain; +static uint64_t prev_dockchannel_drained_time; // Last time we've seen the DockChannel drained by an external agent +static uint64_t prev_dockchannel_spaces; // Previous w_stat level of the DockChannel. +static uint64_t dockchannel_stall_grace; //======================= // Local funtions //======================= -static int dockchannel_drain_on_stall() +static int +dockchannel_drain_on_stall() { // Called when DockChannel runs out of spaces. // Check if the DockChannel reader has stalled. If so, empty the DockChannel ourselves. @@ -592,7 +623,8 @@ static int dockchannel_drain_on_stall() return 0; } -static int dockchannel_uart_tr0(void) +static int +dockchannel_uart_tr0(void) { if (use_sw_drain) { uint32_t spaces = rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & 0x1ff; @@ -607,11 +639,12 @@ static int dockchannel_uart_tr0(void) return spaces || dockchannel_drain_on_stall(); } else { // Returns spaces in dockchannel fifo - return (rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & 0x1ff); + return rDOCKCHANNELS_DEV_WSTAT(DOCKCHANNEL_UART_CHANNEL) & 0x1ff; } } -static void dockchannel_uart_td0(int c) +static void +dockchannel_uart_td0(int c) { rDOCKCHANNELS_DEV_WDATA1(DOCKCHANNEL_UART_CHANNEL) = (unsigned)(c & 0xff); if (use_sw_drain) { @@ -619,17 +652,20 @@ static void dockchannel_uart_td0(int c) } } -static int dockchannel_uart_rr0(void) +static int +dockchannel_uart_rr0(void) { return rDOCKCHANNELS_DEV_RDATA0(DOCKCHANNEL_UART_CHANNEL) & 0x7f; } -static int dockchannel_uart_rd0(void) +static int +dockchannel_uart_rd0(void) { - return (int)((rDOCKCHANNELS_DEV_RDATA1(DOCKCHANNEL_UART_CHANNEL)>> 8) & 0xff); + return (int)((rDOCKCHANNELS_DEV_RDATA1(DOCKCHANNEL_UART_CHANNEL) >> 8) & 0xff); } -static void dockchannel_uart_init(void) +static void +dockchannel_uart_init(void) { if (use_sw_drain) { nanoseconds_to_absolutetime(DOCKCHANNEL_WR_MAX_STALL_US * NSEC_PER_USEC, &dockchannel_stall_grace); @@ -645,7 +681,7 @@ static void dockchannel_uart_init(void) rDOCKCHANNELS_DEV_DRAIN_CFG(DOCKCHANNEL_UART_CHANNEL) = max_dockchannel_drain_period; // Drain timer doesn't get loaded with value from drain period register if fifo - // is already full. Drop a character from the fifo. + // is already full. Drop a character from the fifo. rDOCKCHANNELS_DOCK_RDATA1(DOCKCHANNEL_UART_CHANNEL); } @@ -662,30 +698,35 @@ static struct pe_serial_functions dockchannel_uart_serial_functions = #endif /* DOCKCHANNEL_UART */ /****************************************************************************/ -#ifdef PI3_UART +#ifdef PI3_UART vm_offset_t pi3_gpio_base_vaddr; vm_offset_t pi3_aux_base_vaddr; -static int pi3_uart_tr0(void) +static int +pi3_uart_tr0(void) { - return (int) BCM2837_GET32(BCM2837_AUX_MU_LSR_REG_V) & 0x20; + return (int) BCM2837_GET32(BCM2837_AUX_MU_LSR_REG_V) & 0x20; } -static void pi3_uart_td0(int c) +static void +pi3_uart_td0(int c) { - BCM2837_PUT32(BCM2837_AUX_MU_IO_REG_V, (uint32_t) c); + BCM2837_PUT32(BCM2837_AUX_MU_IO_REG_V, (uint32_t) c); } -static int pi3_uart_rr0(void) -{ - return (int) BCM2837_GET32(BCM2837_AUX_MU_LSR_REG_V) & 0x01; +static int +pi3_uart_rr0(void) +{ + return (int) BCM2837_GET32(BCM2837_AUX_MU_LSR_REG_V) & 0x01; } -static int pi3_uart_rd0(void) +static int +pi3_uart_rd0(void) { - return (int) BCM2837_GET32(BCM2837_AUX_MU_IO_REG_V) & 0xff; + return (int) BCM2837_GET32(BCM2837_AUX_MU_IO_REG_V) & 0xff; } -static void pi3_uart_init(void) +static void +pi3_uart_init(void) { // Scratch variable uint32_t i; @@ -699,7 +740,7 @@ static void pi3_uart_init(void) BCM2837_PUT32(BCM2837_AUX_MU_IIR_REG_V, 0xC6); BCM2837_PUT32(BCM2837_AUX_MU_BAUD_REG_V, 270); - i = BCM2837_FSEL_REG(14); + i = BCM2837_FSEL_REG(14); // Configure GPIOs 14 & 15 for alternate function 5 i &= ~(BCM2837_FSEL_MASK(14)); i |= (BCM2837_FSEL_ALT5 << BCM2837_FSEL_OFFS(14)); @@ -713,18 +754,18 @@ static void pi3_uart_init(void) // Barrier before AP spinning for 150 cycles __builtin_arm_isb(ISB_SY); - for(i = 0; i < 150; i++) { - asm volatile("add x0, x0, xzr"); + for (i = 0; i < 150; i++) { + asm volatile ("add x0, x0, xzr"); } __builtin_arm_isb(ISB_SY); - BCM2837_PUT32(BCM2837_GPPUDCLK0_V,(1 << 14) | (1 << 15)); + BCM2837_PUT32(BCM2837_GPPUDCLK0_V, (1 << 14) | (1 << 15)); __builtin_arm_isb(ISB_SY); - for(i = 0; i < 150; i++) { - asm volatile("add x0, x0, xzr"); + for (i = 0; i < 150; i++) { + asm volatile ("add x0, x0, xzr"); } __builtin_arm_isb(ISB_SY); @@ -749,23 +790,23 @@ static struct pe_serial_functions pi3_uart_serial_functions = int serial_init(void) { - DTEntry entryP = NULL; - uint32_t prop_size, dccmode; - vm_offset_t soc_base; - uintptr_t *reg_prop; - uint32_t *prop_value = NULL; - char *serial_compat = 0; + DTEntry entryP = NULL; + uint32_t prop_size, dccmode; + vm_offset_t soc_base; + uintptr_t *reg_prop; + uint32_t *prop_value = NULL; + char *serial_compat = 0; #ifdef SHMCON - uint32_t jconmode; + uint32_t jconmode; #endif #ifdef DOCKFIFO_UART - uint32_t no_dockfifo_uart; + uint32_t no_dockfifo_uart; #endif #ifdef DOCKCHANNEL_UART - uint32_t no_dockchannel_uart; + uint32_t no_dockchannel_uart; #endif #ifdef PI3_UART - uint32_t is_pi3; + uint32_t is_pi3; #endif if (uart_initted && gPESF) { @@ -775,7 +816,7 @@ serial_init(void) } dccmode = 0; - if (PE_parse_boot_argn("dcc", &dccmode, sizeof (dccmode))) { + if (PE_parse_boot_argn("dcc", &dccmode, sizeof(dccmode))) { gPESF = &dcc_serial_functions; uart_initted = 1; return 1; @@ -805,8 +846,9 @@ serial_init(void) soc_base = pe_arm_get_soc_base_phys(); - if (soc_base == 0) + if (soc_base == 0) { return 0; + } #ifdef DOCKFIFO_UART no_dockfifo_uart = 0; @@ -815,8 +857,7 @@ serial_init(void) if (DTFindEntry("name", "dockfifo-uart", &entryP) == kSuccess) { DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); - } - else { + } else { return 0; } gPESF = &dockfifo_uart_serial_functions; @@ -834,8 +875,9 @@ serial_init(void) if (DTFindEntry("name", "dockchannel-uart", &entryP) == kSuccess) { DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); // Should be two reg entries - if (prop_size/sizeof(uintptr_t) != 4) + if (prop_size / sizeof(uintptr_t) != 4) { panic("Malformed dockchannel-uart property"); + } uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); dock_agent_base = ml_io_map(soc_base + *(reg_prop + 2), *(reg_prop + 3)); gPESF = &dockchannel_uart_serial_functions; @@ -860,46 +902,58 @@ serial_init(void) if (DTFindEntry("boot-console", NULL, &entryP) == kSuccess) { DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); - if (serial_compat == 0) + if (serial_compat == 0) { DTGetProperty(entryP, "compatible", (void **)&serial_compat, &prop_size); + } } else if (DTFindEntry("name", "uart0", &entryP) == kSuccess) { DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); - if (serial_compat == 0) + if (serial_compat == 0) { DTGetProperty(entryP, "compatible", (void **)&serial_compat, &prop_size); + } } else if (DTFindEntry("name", "uart1", &entryP) == kSuccess) { DTGetProperty(entryP, "reg", (void **)®_prop, &prop_size); uart_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); - if (serial_compat == 0) + if (serial_compat == 0) { DTGetProperty(entryP, "compatible", (void **)&serial_compat, &prop_size); + } } -#ifdef S3CUART +#ifdef S3CUART if (NULL != entryP) { DTGetProperty(entryP, "pclk", (void **)&prop_value, &prop_size); - if (prop_value) dt_pclk = *prop_value; + if (prop_value) { + dt_pclk = *prop_value; + } prop_value = NULL; DTGetProperty(entryP, "sampling", (void **)&prop_value, &prop_size); - if (prop_value) dt_sampling = *prop_value; + if (prop_value) { + dt_sampling = *prop_value; + } prop_value = NULL; DTGetProperty(entryP, "ubrdiv", (void **)&prop_value, &prop_size); - if (prop_value) dt_ubrdiv = *prop_value; + if (prop_value) { + dt_ubrdiv = *prop_value; + } } - if (!strcmp(serial_compat, "uart,16550")) + if (!strcmp(serial_compat, "uart,16550")) { gPESF = &ln2410_serial_functions; - else if (!strcmp(serial_compat, "uart-16550")) + } else if (!strcmp(serial_compat, "uart-16550")) { gPESF = &ln2410_serial_functions; - else if (!strcmp(serial_compat, "uart,s5i3000")) + } else if (!strcmp(serial_compat, "uart,s5i3000")) { gPESF = &ln2410_serial_functions; - else if (!strcmp(serial_compat, "uart-1,samsung")) + } else if (!strcmp(serial_compat, "uart-1,samsung")) { gPESF = &ln2410_serial_functions; -#elif defined (ARM_BOARD_CONFIG_MV88F6710) - if (!strcmp(serial_compat, "uart16x50,mmio")) + } +#elif defined (ARM_BOARD_CONFIG_MV88F6710) + if (!strcmp(serial_compat, "uart16x50,mmio")) { gPESF = &uart16x50_serial_functions; + } #endif - else + else { return 0; + } gPESF->uart_init(); @@ -912,17 +966,20 @@ void uart_putc(char c) { if (uart_initted) { - while (!gPESF->tr0()); /* Wait until THR is empty. */ + while (!gPESF->tr0()) { + ; /* Wait until THR is empty. */ + } gPESF->td0(c); } } int uart_getc(void) -{ /* returns -1 if no data available */ +{ /* returns -1 if no data available */ if (uart_initted) { - if (!gPESF->rr0()) - return -1; /* Receive data read */ + if (!gPESF->rr0()) { + return -1; /* Receive data read */ + } return gPESF->rd0(); } return -1; diff --git a/pexpert/gen/bootargs.c b/pexpert/gen/bootargs.c index 754513a5c..5bf70059c 100644 --- a/pexpert/gen/bootargs.c +++ b/pexpert/gen/bootargs.c @@ -35,7 +35,7 @@ static boolean_t israngesep( char c); #ifndef CONFIG_EMBEDDED static int argstrcpy(char *from, char *to); #endif -static int argstrcpy2(char *from,char *to, unsigned maxlen); +static int argstrcpy2(char *from, char *to, unsigned maxlen); static int argnumcpy(long long val, void *to, unsigned maxlen); static int getval(char *s, long long *val, argsep_func_t issep, boolean_t skip_equal_sign); boolean_t get_range_bounds(char * c, int64_t * lower, int64_t * upper); @@ -44,18 +44,18 @@ extern int IODTGetDefault(const char *key, void *infoAddr, unsigned int infoSize struct i24 { - int32_t i24 : 24; + int32_t i24 : 24; int32_t _pad : 8; }; -#define NUM 0 -#define STR 1 +#define NUM 0 +#define STR 1 #if !defined(__LP64__) && !defined(__arm__) boolean_t PE_parse_boot_arg( const char *arg_string, - void *arg_ptr) + void *arg_ptr) { int max_len = -1; @@ -83,120 +83,128 @@ PE_parse_boot_argn_internal( boolean_t arg_found; args = PE_boot_args(); - if (*args == '\0') return FALSE; + if (*args == '\0') { + return FALSE; + } #ifdef CONFIG_EMBEDDED - if (max_len == -1) return FALSE; + if (max_len == -1) { + return FALSE; + } #endif arg_found = FALSE; - while(*args && isargsep(*args)) args++; + while (*args && isargsep(*args)) { + args++; + } - while (*args) - { - if (*args == '-') + while (*args) { + if (*args == '-') { arg_boolean = TRUE; - else + } else { arg_boolean = FALSE; + } cp = args; - while (!isargsep (*cp) && *cp != '=') + while (!isargsep(*cp) && *cp != '=') { cp++; - if (*cp != '=' && !arg_boolean) + } + if (*cp != '=' && !arg_boolean) { goto gotit; + } c = *cp; - i = cp-args; + i = cp - args; if (strncmp(args, arg_string, i) || - (i!=strlen(arg_string))) + (i != strlen(arg_string))) { goto gotit; + } if (arg_boolean) { if (!force_string) { if (max_len > 0) { argnumcpy(1, arg_ptr, max_len);/* max_len of 0 performs no copy at all*/ arg_found = TRUE; - } - else if (max_len == 0) { + } else if (max_len == 0) { arg_found = TRUE; } } break; } else { - while (*cp && isargsep (*cp)) + while (*cp && isargsep(*cp)) { cp++; + } if (*cp == '=' && c != '=') { - args = cp+1; + args = cp + 1; goto gotit; } - if ('_' == *arg_string) /* Force a string copy if the argument name begins with an underscore */ - { + if ('_' == *arg_string) { /* Force a string copy if the argument name begins with an underscore */ if (max_len > 0) { int hacklen = 17 > max_len ? 17 : max_len; - argstrcpy2 (++cp, (char *)arg_ptr, hacklen - 1); /* Hack - terminate after 16 characters */ + argstrcpy2(++cp, (char *)arg_ptr, hacklen - 1); /* Hack - terminate after 16 characters */ arg_found = TRUE; - } - else if (max_len == 0) { + } else if (max_len == 0) { arg_found = TRUE; } break; } - switch ((force_string && *cp == '=') ? STR : getval(cp, &val, isargsep, FALSE)) - { - case NUM: - if (max_len > 0) { - argnumcpy(val, arg_ptr, max_len); - arg_found = TRUE; - } - else if (max_len == 0) { - arg_found = TRUE; - } - break; - case STR: - if (max_len > 0) { - argstrcpy2(++cp, (char *)arg_ptr, max_len - 1);/*max_len of 0 performs no copy at all*/ - arg_found = TRUE; - } - else if (max_len == 0) { - arg_found = TRUE; - } + switch ((force_string && *cp == '=') ? STR : getval(cp, &val, isargsep, FALSE)) { + case NUM: + if (max_len > 0) { + argnumcpy(val, arg_ptr, max_len); + arg_found = TRUE; + } else if (max_len == 0) { + arg_found = TRUE; + } + break; + case STR: + if (max_len > 0) { + argstrcpy2(++cp, (char *)arg_ptr, max_len - 1); /*max_len of 0 performs no copy at all*/ + arg_found = TRUE; + } else if (max_len == 0) { + arg_found = TRUE; + } #if !CONFIG_EMBEDDED - else if (max_len == -1) { /* unreachable on embedded */ - argstrcpy(++cp, (char *)arg_ptr); - arg_found = TRUE; - } + else if (max_len == -1) { /* unreachable on embedded */ + argstrcpy(++cp, (char *)arg_ptr); + arg_found = TRUE; + } #endif - break; + break; } goto gotit; } gotit: /* Skip over current arg */ - while(!isargsep(*args)) args++; + while (!isargsep(*args)) { + args++; + } /* Skip leading white space (catch end of args) */ - while(*args && isargsep(*args)) args++; + while (*args && isargsep(*args)) { + args++; + } } - return(arg_found); + return arg_found; } boolean_t PE_parse_boot_argn( - const char *arg_string, - void *arg_ptr, - int max_len) + const char *arg_string, + void *arg_ptr, + int max_len) { return PE_parse_boot_argn_internal(arg_string, arg_ptr, max_len, FALSE); } boolean_t PE_parse_boot_arg_str( - const char *arg_string, - char *arg_ptr, - int strlen) + const char *arg_string, + char *arg_ptr, + int strlen) { return PE_parse_boot_argn_internal(arg_string, arg_ptr, strlen, TRUE); } @@ -204,19 +212,21 @@ PE_parse_boot_arg_str( static boolean_t isargsep(char c) { - if (c == ' ' || c == '\0' || c == '\t') - return (TRUE); - else - return (FALSE); + if (c == ' ' || c == '\0' || c == '\t') { + return TRUE; + } else { + return FALSE; + } } static boolean_t israngesep(char c) { - if (isargsep(c) || c == '_' || c == ',') - return (TRUE); - else - return (FALSE); + if (isargsep(c) || c == '_' || c == ',') { + return TRUE; + } else { + return FALSE; + } } #if !CONFIG_EMBEDDED @@ -232,7 +242,7 @@ argstrcpy( *to++ = *from++; } *to = 0; - return(i); + return i; } #endif @@ -249,35 +259,36 @@ argstrcpy2( *to++ = *from++; } *to = 0; - return(i); + return i; } -static int argnumcpy(long long val, void *to, unsigned maxlen) +static int +argnumcpy(long long val, void *to, unsigned maxlen) { switch (maxlen) { - case 0: - /* No write-back, caller just wants to know if arg was found */ - break; - case 1: - *(int8_t *)to = val; - break; - case 2: - *(int16_t *)to = val; - break; - case 3: - /* Unlikely in practice */ - ((struct i24 *)to)->i24 = val; - break; - case 4: - *(int32_t *)to = val; - break; - case 8: - *(int64_t *)to = val; - break; - default: - *(int32_t *)to = val; - maxlen = 4; - break; + case 0: + /* No write-back, caller just wants to know if arg was found */ + break; + case 1: + *(int8_t *)to = val; + break; + case 2: + *(int16_t *)to = val; + break; + case 3: + /* Unlikely in practice */ + ((struct i24 *)to)->i24 = val; + break; + case 4: + *(int32_t *)to = val; + break; + case 8: + *(int64_t *)to = val; + break; + default: + *(int32_t *)to = val; + maxlen = 4; + break; } return (int)maxlen; @@ -305,11 +316,10 @@ getval( sign = -1; s++; } - intval = *s++-'0'; + intval = *s++ - '0'; radix = 10; if (intval == 0) { - switch(*s) { - + switch (*s) { case 'x': radix = 16; s++; @@ -322,58 +332,62 @@ getval( case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': - intval = *s-'0'; + intval = *s - '0'; s++; radix = 8; break; default: - if (!issep(*s)) - return (STR); + if (!issep(*s)) { + return STR; + } } - } else if (intval >= radix) { - return (STR); - } - for(;;) { - c = *s++; - if (issep(c)) - break; - if ((radix <= 10) && - ((c >= '0') && (c <= ('9' - (10 - radix))))) { - c -= '0'; - } else if ((radix == 16) && - ((c >= '0') && (c <= '9'))) { + } else if (intval >= radix) { + return STR; + } + for (;;) { + c = *s++; + if (issep(c)) { + break; + } + if ((radix <= 10) && + ((c >= '0') && (c <= ('9' - (10 - radix))))) { + c -= '0'; + } else if ((radix == 16) && + ((c >= '0') && (c <= '9'))) { c -= '0'; - } else if ((radix == 16) && - ((c >= 'a') && (c <= 'f'))) { + } else if ((radix == 16) && + ((c >= 'a') && (c <= 'f'))) { c -= 'a' - 10; - } else if ((radix == 16) && - ((c >= 'A') && (c <= 'F'))) { + } else if ((radix == 16) && + ((c >= 'A') && (c <= 'F'))) { c -= 'A' - 10; - } else if (c == 'k' || c == 'K') { + } else if (c == 'k' || c == 'K') { sign *= 1024; break; } else if (c == 'm' || c == 'M') { sign *= 1024 * 1024; - break; + break; } else if (c == 'g' || c == 'G') { sign *= 1024 * 1024 * 1024; - break; + break; } else { - return (STR); - } - if (c >= radix) - return (STR); + return STR; + } + if (c >= radix) { + return STR; + } intval *= radix; intval += c; } - if (!issep(c) && !issep(*s)) - return STR; + if (!issep(c) && !issep(*s)) { + return STR; + } *val = intval * sign; - return (NUM); + return NUM; } *val = 1; - return (NUM); + return NUM; } boolean_t @@ -384,32 +398,33 @@ PE_imgsrc_mount_supported() boolean_t PE_get_default( - const char *property_name, - void *property_ptr, + const char *property_name, + void *property_ptr, unsigned int max_property) { - DTEntry dte; - void **property_data; + DTEntry dte; + void **property_data; unsigned int property_size; /* * Look for the property using the PE DT support. */ if (kSuccess == DTLookupEntry(NULL, "/defaults", &dte)) { - /* * We have a /defaults node, look for the named property. */ - if (kSuccess != DTGetProperty(dte, property_name, (void **)&property_data, &property_size)) + if (kSuccess != DTGetProperty(dte, property_name, (void **)&property_data, &property_size)) { return FALSE; + } /* * This would be a fine place to do smart argument size management for 32/64 * translation, but for now we'll insist that callers know how big their * default values are. */ - if (property_size > max_property) + if (property_size > max_property) { return FALSE; + } /* * Copy back the precisely-sized result. diff --git a/pexpert/gen/device_tree.c b/pexpert/gen/device_tree.c index 651ccd77b..ef5744688 100644 --- a/pexpert/gen/device_tree.c +++ b/pexpert/gen/device_tree.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -52,8 +52,9 @@ static inline DeviceTreeNodeProperty* next_prop(DeviceTreeNodeProperty* prop) { uintptr_t next_addr; - if (os_add3_overflow((uintptr_t)prop, prop->length, sizeof(DeviceTreeNodeProperty) + 3, &next_addr)) + if (os_add3_overflow((uintptr_t)prop, prop->length, sizeof(DeviceTreeNodeProperty) + 3, &next_addr)) { panic("Device tree property overflow: prop %p, length 0x%x\n", prop, prop->length); + } next_addr &= ~(3ULL); return (DeviceTreeNodeProperty*)next_addr; } @@ -72,7 +73,7 @@ skipProperties(RealDTEntry entry) prop = next_prop(prop); } } - return ((RealDTEntry) prop); + return (RealDTEntry) prop; } static RealDTEntry @@ -127,10 +128,10 @@ GetNextComponent(const char *cp, char *bp) static RealDTEntry FindChild(RealDTEntry cur, char *buf) { - RealDTEntry child; - unsigned long index; - char * str; - unsigned int dummy; + RealDTEntry child; + unsigned long index; + char * str; + unsigned int dummy; if (cur->nChildren == 0) { return NULL; @@ -168,60 +169,63 @@ int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2) { /* equality of pointers */ - return (ref1 == ref2); + return ref1 == ref2; } -static char *startingP; // needed for find_entry +static char *startingP; // needed for find_entry int find_entry(const char *propName, const char *propValue, DTEntry *entryH); -int DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH) +int +DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH) { if (!DTInitialized) { return kError; } startingP = (char *)DTRootNode; - return(find_entry(propName, propValue, entryH)); + return find_entry(propName, propValue, entryH); } -int find_entry(const char *propName, const char *propValue, DTEntry *entryH) +int +find_entry(const char *propName, const char *propValue, DTEntry *entryH) { DeviceTreeNode *nodeP = (DeviceTreeNode *) (void *) startingP; unsigned int k; - if (nodeP->nProperties == 0) return(kError); // End of the list of nodes + if (nodeP->nProperties == 0) { + return kError; // End of the list of nodes + } startingP = (char *) (nodeP + 1); // Search current entry for (k = 0; k < nodeP->nProperties; ++k) { DeviceTreeNodeProperty *propP = (DeviceTreeNodeProperty *) (void *) startingP; - startingP += sizeof (*propP) + ((propP->length + 3) & -4); + startingP += sizeof(*propP) + ((propP->length + 3) & -4); - if (strcmp (propP->name, propName) == 0) { - if (propValue == NULL || strcmp( (char *)(propP + 1), propValue) == 0) - { + if (strcmp(propP->name, propName) == 0) { + if (propValue == NULL || strcmp((char *)(propP + 1), propValue) == 0) { *entryH = (DTEntry)nodeP; - return(kSuccess); + return kSuccess; } } } // Search child nodes - for (k = 0; k < nodeP->nChildren; ++k) - { - if (find_entry(propName, propValue, entryH) == kSuccess) - return(kSuccess); + for (k = 0; k < nodeP->nChildren; ++k) { + if (find_entry(propName, propValue, entryH) == kSuccess) { + return kSuccess; + } } - return(kError); + return kError; } int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry) { - DTEntryNameBuf buf; - RealDTEntry cur; - const char * cp; + DTEntryNameBuf buf; + RealDTEntry cur; + const char * cp; if (!DTInitialized) { return kError; @@ -252,7 +256,6 @@ DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEnt } cur = FindChild(cur, buf); - } while (cur != NULL); return kError; @@ -291,7 +294,7 @@ DTEnterEntry(DTEntryIterator iter, DTEntry childEntry) newScope->nextScope = iter->savedScope; newScope->scope = iter->currentScope; newScope->entry = iter->currentEntry; - newScope->index = iter->currentIndex; + newScope->index = iter->currentIndex; iter->currentScope = childEntry; iter->currentEntry = NULL; @@ -374,7 +377,7 @@ DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValu for (k = 0; k < entry->nProperties; k++) { if (strcmp(prop->name, propertyName) == 0) { *propertyValue = (void *) (((uintptr_t)prop) - + sizeof(DeviceTreeNodeProperty)); + + sizeof(DeviceTreeNodeProperty)); *propertySize = prop->length; return kSuccess; } @@ -387,7 +390,6 @@ DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValu int DTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter) { - iter->entry = entry; iter->currentProperty = NULL; iter->currentIndex = 0; @@ -419,4 +421,3 @@ DTRestartPropertyIteration(DTPropertyIterator iter) iter->currentIndex = 0; return kSuccess; } - diff --git a/pexpert/gen/pe_gen.c b/pexpert/gen/pe_gen.c index 5515f47b2..b96bf4974 100644 --- a/pexpert/gen/pe_gen.c +++ b/pexpert/gen/pe_gen.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -50,12 +50,14 @@ int32_t gPESerialBaud = -1; int debug_cpu_performance_degradation_factor = 1; -void pe_init_debug(void) +void +pe_init_debug(void) { boolean_t boot_arg_value; - if (!PE_parse_boot_argn("debug", &DEBUGFlag, sizeof (DEBUGFlag))) + if (!PE_parse_boot_argn("debug", &DEBUGFlag, sizeof(DEBUGFlag))) { DEBUGFlag = 0; + } gPEKernelConfigurationBitmask = 0; @@ -82,7 +84,7 @@ void pe_init_debug(void) #else if (!PE_i_can_has_debugger(NULL)) { boot_arg_value = FALSE; - } else if (!PE_parse_boot_argn("diagnostic_api", &boot_arg_value, sizeof(boot_arg_value))) { + } else if (!PE_parse_boot_argn("diagnostic_api", &boot_arg_value, sizeof(boot_arg_value))) { boot_arg_value = TRUE; } #endif @@ -90,11 +92,11 @@ void pe_init_debug(void) int factor = 1; - boolean_t have_bootarg = PE_parse_boot_argn("cpu-factor", &factor, sizeof (factor)); + boolean_t have_bootarg = PE_parse_boot_argn("cpu-factor", &factor, sizeof(factor)); if (have_bootarg) { debug_cpu_performance_degradation_factor = factor; } else { - DTEntry root; + DTEntry root; if (DTLookupEntry(NULL, "/", &root) == kSuccess) { void *prop = NULL; uint32_t size = 0; @@ -105,10 +107,12 @@ void pe_init_debug(void) } } -void PE_enter_debugger(const char *cause) +void +PE_enter_debugger(const char *cause) { - if (DEBUGFlag & DB_NMI) - Debugger(cause); + if (DEBUGFlag & DB_NMI) { + Debugger(cause); + } } uint32_t @@ -123,130 +127,133 @@ extern void vcattach(void); /* Globals */ void (*PE_putc)(char c); -void PE_init_printf(boolean_t vm_initialized) +void +PE_init_printf(boolean_t vm_initialized) { - if (!vm_initialized) { - PE_putc = cnputc; - } else { - vcattach(); - } + if (!vm_initialized) { + PE_putc = cnputc; + } else { + vcattach(); + } } uint32_t PE_get_random_seed(unsigned char *dst_random_seed, uint32_t request_size) { - DTEntry entryP; - uint32_t size = 0; - void *dt_random_seed; + DTEntry entryP; + uint32_t size = 0; + void *dt_random_seed; - if ((DTLookupEntry(NULL, "/chosen", &entryP) == kSuccess) + if ((DTLookupEntry(NULL, "/chosen", &entryP) == kSuccess) && (DTGetProperty(entryP, "random-seed", - (void **)&dt_random_seed, &size) == kSuccess)) { + (void **)&dt_random_seed, &size) == kSuccess)) { unsigned char *src_random_seed; unsigned int i; unsigned int null_count = 0; src_random_seed = (unsigned char *)dt_random_seed; - if (size > request_size) size = request_size; + if (size > request_size) { + size = request_size; + } /* * Copy from the device tree into the destination buffer, * count the number of null bytes and null out the device tree. */ - for (i=0 ; i< size; i++, src_random_seed++, dst_random_seed++) { + for (i = 0; i < size; i++, src_random_seed++, dst_random_seed++) { *dst_random_seed = *src_random_seed; null_count += *src_random_seed == (unsigned char)0; *src_random_seed = (unsigned char)0; } - if (null_count == size) + if (null_count == size) { /* All nulls is no seed - return 0 */ size = 0; + } } - return(size); + return size; } -unsigned char appleClut8[ 256 * 3 ] = { +unsigned char appleClut8[256 * 3] = { // 00 - 0xFF,0xFF,0xFF, 0xFF,0xFF,0xCC, 0xFF,0xFF,0x99, 0xFF,0xFF,0x66, - 0xFF,0xFF,0x33, 0xFF,0xFF,0x00, 0xFF,0xCC,0xFF, 0xFF,0xCC,0xCC, - 0xFF,0xCC,0x99, 0xFF,0xCC,0x66, 0xFF,0xCC,0x33, 0xFF,0xCC,0x00, - 0xFF,0x99,0xFF, 0xFF,0x99,0xCC, 0xFF,0x99,0x99, 0xFF,0x99,0x66, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xCC, 0xFF, 0xFF, 0x99, 0xFF, 0xFF, 0x66, + 0xFF, 0xFF, 0x33, 0xFF, 0xFF, 0x00, 0xFF, 0xCC, 0xFF, 0xFF, 0xCC, 0xCC, + 0xFF, 0xCC, 0x99, 0xFF, 0xCC, 0x66, 0xFF, 0xCC, 0x33, 0xFF, 0xCC, 0x00, + 0xFF, 0x99, 0xFF, 0xFF, 0x99, 0xCC, 0xFF, 0x99, 0x99, 0xFF, 0x99, 0x66, // 10 - 0xFF,0x99,0x33, 0xFF,0x99,0x00, 0xFF,0x66,0xFF, 0xFF,0x66,0xCC, - 0xFF,0x66,0x99, 0xFF,0x66,0x66, 0xFF,0x66,0x33, 0xFF,0x66,0x00, - 0xFF,0x33,0xFF, 0xFF,0x33,0xCC, 0xFF,0x33,0x99, 0xFF,0x33,0x66, - 0xFF,0x33,0x33, 0xFF,0x33,0x00, 0xFF,0x00,0xFF, 0xFF,0x00,0xCC, + 0xFF, 0x99, 0x33, 0xFF, 0x99, 0x00, 0xFF, 0x66, 0xFF, 0xFF, 0x66, 0xCC, + 0xFF, 0x66, 0x99, 0xFF, 0x66, 0x66, 0xFF, 0x66, 0x33, 0xFF, 0x66, 0x00, + 0xFF, 0x33, 0xFF, 0xFF, 0x33, 0xCC, 0xFF, 0x33, 0x99, 0xFF, 0x33, 0x66, + 0xFF, 0x33, 0x33, 0xFF, 0x33, 0x00, 0xFF, 0x00, 0xFF, 0xFF, 0x00, 0xCC, // 20 - 0xFF,0x00,0x99, 0xFF,0x00,0x66, 0xFF,0x00,0x33, 0xFF,0x00,0x00, - 0xCC,0xFF,0xFF, 0xCC,0xFF,0xCC, 0xCC,0xFF,0x99, 0xCC,0xFF,0x66, - 0xCC,0xFF,0x33, 0xCC,0xFF,0x00, 0xCC,0xCC,0xFF, 0xCC,0xCC,0xCC, - 0xCC,0xCC,0x99, 0xCC,0xCC,0x66, 0xCC,0xCC,0x33, 0xCC,0xCC,0x00, + 0xFF, 0x00, 0x99, 0xFF, 0x00, 0x66, 0xFF, 0x00, 0x33, 0xFF, 0x00, 0x00, + 0xCC, 0xFF, 0xFF, 0xCC, 0xFF, 0xCC, 0xCC, 0xFF, 0x99, 0xCC, 0xFF, 0x66, + 0xCC, 0xFF, 0x33, 0xCC, 0xFF, 0x00, 0xCC, 0xCC, 0xFF, 0xCC, 0xCC, 0xCC, + 0xCC, 0xCC, 0x99, 0xCC, 0xCC, 0x66, 0xCC, 0xCC, 0x33, 0xCC, 0xCC, 0x00, // 30 - 0xCC,0x99,0xFF, 0xCC,0x99,0xCC, 0xCC,0x99,0x99, 0xCC,0x99,0x66, - 0xCC,0x99,0x33, 0xCC,0x99,0x00, 0xCC,0x66,0xFF, 0xCC,0x66,0xCC, - 0xCC,0x66,0x99, 0xCC,0x66,0x66, 0xCC,0x66,0x33, 0xCC,0x66,0x00, - 0xCC,0x33,0xFF, 0xCC,0x33,0xCC, 0xCC,0x33,0x99, 0xCC,0x33,0x66, + 0xCC, 0x99, 0xFF, 0xCC, 0x99, 0xCC, 0xCC, 0x99, 0x99, 0xCC, 0x99, 0x66, + 0xCC, 0x99, 0x33, 0xCC, 0x99, 0x00, 0xCC, 0x66, 0xFF, 0xCC, 0x66, 0xCC, + 0xCC, 0x66, 0x99, 0xCC, 0x66, 0x66, 0xCC, 0x66, 0x33, 0xCC, 0x66, 0x00, + 0xCC, 0x33, 0xFF, 0xCC, 0x33, 0xCC, 0xCC, 0x33, 0x99, 0xCC, 0x33, 0x66, // 40 - 0xCC,0x33,0x33, 0xCC,0x33,0x00, 0xCC,0x00,0xFF, 0xCC,0x00,0xCC, - 0xCC,0x00,0x99, 0xCC,0x00,0x66, 0xCC,0x00,0x33, 0xCC,0x00,0x00, - 0x99,0xFF,0xFF, 0x99,0xFF,0xCC, 0x99,0xFF,0x99, 0x99,0xFF,0x66, - 0x99,0xFF,0x33, 0x99,0xFF,0x00, 0x99,0xCC,0xFF, 0x99,0xCC,0xCC, + 0xCC, 0x33, 0x33, 0xCC, 0x33, 0x00, 0xCC, 0x00, 0xFF, 0xCC, 0x00, 0xCC, + 0xCC, 0x00, 0x99, 0xCC, 0x00, 0x66, 0xCC, 0x00, 0x33, 0xCC, 0x00, 0x00, + 0x99, 0xFF, 0xFF, 0x99, 0xFF, 0xCC, 0x99, 0xFF, 0x99, 0x99, 0xFF, 0x66, + 0x99, 0xFF, 0x33, 0x99, 0xFF, 0x00, 0x99, 0xCC, 0xFF, 0x99, 0xCC, 0xCC, // 50 - 0x99,0xCC,0x99, 0x99,0xCC,0x66, 0x99,0xCC,0x33, 0x99,0xCC,0x00, - 0x99,0x99,0xFF, 0x99,0x99,0xCC, 0x99,0x99,0x99, 0x99,0x99,0x66, - 0x99,0x99,0x33, 0x99,0x99,0x00, 0x99,0x66,0xFF, 0x99,0x66,0xCC, - 0x99,0x66,0x99, 0x99,0x66,0x66, 0x99,0x66,0x33, 0x99,0x66,0x00, + 0x99, 0xCC, 0x99, 0x99, 0xCC, 0x66, 0x99, 0xCC, 0x33, 0x99, 0xCC, 0x00, + 0x99, 0x99, 0xFF, 0x99, 0x99, 0xCC, 0x99, 0x99, 0x99, 0x99, 0x99, 0x66, + 0x99, 0x99, 0x33, 0x99, 0x99, 0x00, 0x99, 0x66, 0xFF, 0x99, 0x66, 0xCC, + 0x99, 0x66, 0x99, 0x99, 0x66, 0x66, 0x99, 0x66, 0x33, 0x99, 0x66, 0x00, // 60 - 0x99,0x33,0xFF, 0x99,0x33,0xCC, 0x99,0x33,0x99, 0x99,0x33,0x66, - 0x99,0x33,0x33, 0x99,0x33,0x00, 0x99,0x00,0xFF, 0x99,0x00,0xCC, - 0x99,0x00,0x99, 0x99,0x00,0x66, 0x99,0x00,0x33, 0x99,0x00,0x00, - 0x66,0xFF,0xFF, 0x66,0xFF,0xCC, 0x66,0xFF,0x99, 0x66,0xFF,0x66, + 0x99, 0x33, 0xFF, 0x99, 0x33, 0xCC, 0x99, 0x33, 0x99, 0x99, 0x33, 0x66, + 0x99, 0x33, 0x33, 0x99, 0x33, 0x00, 0x99, 0x00, 0xFF, 0x99, 0x00, 0xCC, + 0x99, 0x00, 0x99, 0x99, 0x00, 0x66, 0x99, 0x00, 0x33, 0x99, 0x00, 0x00, + 0x66, 0xFF, 0xFF, 0x66, 0xFF, 0xCC, 0x66, 0xFF, 0x99, 0x66, 0xFF, 0x66, // 70 - 0x66,0xFF,0x33, 0x66,0xFF,0x00, 0x66,0xCC,0xFF, 0x66,0xCC,0xCC, - 0x66,0xCC,0x99, 0x66,0xCC,0x66, 0x66,0xCC,0x33, 0x66,0xCC,0x00, - 0x66,0x99,0xFF, 0x66,0x99,0xCC, 0x66,0x99,0x99, 0x66,0x99,0x66, - 0x66,0x99,0x33, 0x66,0x99,0x00, 0x66,0x66,0xFF, 0x66,0x66,0xCC, + 0x66, 0xFF, 0x33, 0x66, 0xFF, 0x00, 0x66, 0xCC, 0xFF, 0x66, 0xCC, 0xCC, + 0x66, 0xCC, 0x99, 0x66, 0xCC, 0x66, 0x66, 0xCC, 0x33, 0x66, 0xCC, 0x00, + 0x66, 0x99, 0xFF, 0x66, 0x99, 0xCC, 0x66, 0x99, 0x99, 0x66, 0x99, 0x66, + 0x66, 0x99, 0x33, 0x66, 0x99, 0x00, 0x66, 0x66, 0xFF, 0x66, 0x66, 0xCC, // 80 - 0x66,0x66,0x99, 0x66,0x66,0x66, 0x66,0x66,0x33, 0x66,0x66,0x00, - 0x66,0x33,0xFF, 0x66,0x33,0xCC, 0x66,0x33,0x99, 0x66,0x33,0x66, - 0x66,0x33,0x33, 0x66,0x33,0x00, 0x66,0x00,0xFF, 0x66,0x00,0xCC, - 0x66,0x00,0x99, 0x66,0x00,0x66, 0x66,0x00,0x33, 0x66,0x00,0x00, + 0x66, 0x66, 0x99, 0x66, 0x66, 0x66, 0x66, 0x66, 0x33, 0x66, 0x66, 0x00, + 0x66, 0x33, 0xFF, 0x66, 0x33, 0xCC, 0x66, 0x33, 0x99, 0x66, 0x33, 0x66, + 0x66, 0x33, 0x33, 0x66, 0x33, 0x00, 0x66, 0x00, 0xFF, 0x66, 0x00, 0xCC, + 0x66, 0x00, 0x99, 0x66, 0x00, 0x66, 0x66, 0x00, 0x33, 0x66, 0x00, 0x00, // 90 - 0x33,0xFF,0xFF, 0x33,0xFF,0xCC, 0x33,0xFF,0x99, 0x33,0xFF,0x66, - 0x33,0xFF,0x33, 0x33,0xFF,0x00, 0x33,0xCC,0xFF, 0x33,0xCC,0xCC, - 0x33,0xCC,0x99, 0x33,0xCC,0x66, 0x33,0xCC,0x33, 0x33,0xCC,0x00, - 0x33,0x99,0xFF, 0x33,0x99,0xCC, 0x33,0x99,0x99, 0x33,0x99,0x66, + 0x33, 0xFF, 0xFF, 0x33, 0xFF, 0xCC, 0x33, 0xFF, 0x99, 0x33, 0xFF, 0x66, + 0x33, 0xFF, 0x33, 0x33, 0xFF, 0x00, 0x33, 0xCC, 0xFF, 0x33, 0xCC, 0xCC, + 0x33, 0xCC, 0x99, 0x33, 0xCC, 0x66, 0x33, 0xCC, 0x33, 0x33, 0xCC, 0x00, + 0x33, 0x99, 0xFF, 0x33, 0x99, 0xCC, 0x33, 0x99, 0x99, 0x33, 0x99, 0x66, // a0 - 0x33,0x99,0x33, 0x33,0x99,0x00, 0x33,0x66,0xFF, 0x33,0x66,0xCC, - 0x33,0x66,0x99, 0x33,0x66,0x66, 0x33,0x66,0x33, 0x33,0x66,0x00, - 0x33,0x33,0xFF, 0x33,0x33,0xCC, 0x33,0x33,0x99, 0x33,0x33,0x66, - 0x33,0x33,0x33, 0x33,0x33,0x00, 0x33,0x00,0xFF, 0x33,0x00,0xCC, + 0x33, 0x99, 0x33, 0x33, 0x99, 0x00, 0x33, 0x66, 0xFF, 0x33, 0x66, 0xCC, + 0x33, 0x66, 0x99, 0x33, 0x66, 0x66, 0x33, 0x66, 0x33, 0x33, 0x66, 0x00, + 0x33, 0x33, 0xFF, 0x33, 0x33, 0xCC, 0x33, 0x33, 0x99, 0x33, 0x33, 0x66, + 0x33, 0x33, 0x33, 0x33, 0x33, 0x00, 0x33, 0x00, 0xFF, 0x33, 0x00, 0xCC, // b0 - 0x33,0x00,0x99, 0x33,0x00,0x66, 0x33,0x00,0x33, 0x33,0x00,0x00, - 0x00,0xFF,0xFF, 0x00,0xFF,0xCC, 0x00,0xFF,0x99, 0x00,0xFF,0x66, - 0x00,0xFF,0x33, 0x00,0xFF,0x00, 0x00,0xCC,0xFF, 0x00,0xCC,0xCC, - 0x00,0xCC,0x99, 0x00,0xCC,0x66, 0x00,0xCC,0x33, 0x00,0xCC,0x00, + 0x33, 0x00, 0x99, 0x33, 0x00, 0x66, 0x33, 0x00, 0x33, 0x33, 0x00, 0x00, + 0x00, 0xFF, 0xFF, 0x00, 0xFF, 0xCC, 0x00, 0xFF, 0x99, 0x00, 0xFF, 0x66, + 0x00, 0xFF, 0x33, 0x00, 0xFF, 0x00, 0x00, 0xCC, 0xFF, 0x00, 0xCC, 0xCC, + 0x00, 0xCC, 0x99, 0x00, 0xCC, 0x66, 0x00, 0xCC, 0x33, 0x00, 0xCC, 0x00, // c0 - 0x00,0x99,0xFF, 0x00,0x99,0xCC, 0x00,0x99,0x99, 0x00,0x99,0x66, - 0x00,0x99,0x33, 0x00,0x99,0x00, 0x00,0x66,0xFF, 0x00,0x66,0xCC, - 0x00,0x66,0x99, 0x00,0x66,0x66, 0x00,0x66,0x33, 0x00,0x66,0x00, - 0x00,0x33,0xFF, 0x00,0x33,0xCC, 0x00,0x33,0x99, 0x00,0x33,0x66, + 0x00, 0x99, 0xFF, 0x00, 0x99, 0xCC, 0x00, 0x99, 0x99, 0x00, 0x99, 0x66, + 0x00, 0x99, 0x33, 0x00, 0x99, 0x00, 0x00, 0x66, 0xFF, 0x00, 0x66, 0xCC, + 0x00, 0x66, 0x99, 0x00, 0x66, 0x66, 0x00, 0x66, 0x33, 0x00, 0x66, 0x00, + 0x00, 0x33, 0xFF, 0x00, 0x33, 0xCC, 0x00, 0x33, 0x99, 0x00, 0x33, 0x66, // d0 - 0x00,0x33,0x33, 0x00,0x33,0x00, 0x00,0x00,0xFF, 0x00,0x00,0xCC, - 0x00,0x00,0x99, 0x00,0x00,0x66, 0x00,0x00,0x33, 0xEE,0x00,0x00, - 0xDD,0x00,0x00, 0xBB,0x00,0x00, 0xAA,0x00,0x00, 0x88,0x00,0x00, - 0x77,0x00,0x00, 0x55,0x00,0x00, 0x44,0x00,0x00, 0x22,0x00,0x00, + 0x00, 0x33, 0x33, 0x00, 0x33, 0x00, 0x00, 0x00, 0xFF, 0x00, 0x00, 0xCC, + 0x00, 0x00, 0x99, 0x00, 0x00, 0x66, 0x00, 0x00, 0x33, 0xEE, 0x00, 0x00, + 0xDD, 0x00, 0x00, 0xBB, 0x00, 0x00, 0xAA, 0x00, 0x00, 0x88, 0x00, 0x00, + 0x77, 0x00, 0x00, 0x55, 0x00, 0x00, 0x44, 0x00, 0x00, 0x22, 0x00, 0x00, // e0 - 0x11,0x00,0x00, 0x00,0xEE,0x00, 0x00,0xDD,0x00, 0x00,0xBB,0x00, - 0x00,0xAA,0x00, 0x00,0x88,0x00, 0x00,0x77,0x00, 0x00,0x55,0x00, - 0x00,0x44,0x00, 0x00,0x22,0x00, 0x00,0x11,0x00, 0x00,0x00,0xEE, - 0x00,0x00,0xDD, 0x00,0x00,0xBB, 0x00,0x00,0xAA, 0x00,0x00,0x88, + 0x11, 0x00, 0x00, 0x00, 0xEE, 0x00, 0x00, 0xDD, 0x00, 0x00, 0xBB, 0x00, + 0x00, 0xAA, 0x00, 0x00, 0x88, 0x00, 0x00, 0x77, 0x00, 0x00, 0x55, 0x00, + 0x00, 0x44, 0x00, 0x00, 0x22, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0xEE, + 0x00, 0x00, 0xDD, 0x00, 0x00, 0xBB, 0x00, 0x00, 0xAA, 0x00, 0x00, 0x88, // f0 - 0x00,0x00,0x77, 0x00,0x00,0x55, 0x00,0x00,0x44, 0x00,0x00,0x22, - 0x00,0x00,0x11, 0xEE,0xEE,0xEE, 0xDD,0xDD,0xDD, 0xBB,0xBB,0xBB, - 0xAA,0xAA,0xAA, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, - 0x44,0x44,0x44, 0x22,0x22,0x22, 0x11,0x11,0x11, 0x00,0x00,0x00 + 0x00, 0x00, 0x77, 0x00, 0x00, 0x55, 0x00, 0x00, 0x44, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x11, 0xEE, 0xEE, 0xEE, 0xDD, 0xDD, 0xDD, 0xBB, 0xBB, 0xBB, + 0xAA, 0xAA, 0xAA, 0x88, 0x88, 0x88, 0x77, 0x77, 0x77, 0x55, 0x55, 0x55, + 0x44, 0x44, 0x44, 0x22, 0x22, 0x22, 0x11, 0x11, 0x11, 0x00, 0x00, 0x00 }; - diff --git a/pexpert/i386/boot_images.h b/pexpert/i386/boot_images.h index 71e0f2860..b8341c27f 100644 --- a/pexpert/i386/boot_images.h +++ b/pexpert/i386/boot_images.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,91 +22,91 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -static const unsigned char bootClut[ 256 * 3 ] = +static const unsigned char bootClut[256 * 3] = { - 0xff,0xff,0xff, 0xbf,0xbf,0xbf, 0xbe,0xbe,0xbe, 0xbd,0xbd,0xbd, - 0xbc,0xbc,0xbc, 0xff,0xff,0x00, 0xba,0xba,0xba, 0xb9,0xb9,0xb9, - 0xb8,0xb8,0xb8, 0xb7,0xb7,0xb7, 0xb6,0xb6,0xb6, 0xb5,0xb5,0xb5, - 0xb4,0xb4,0xb4, 0xb3,0xb3,0xb3, 0xb2,0xb2,0xb2, 0x00,0x00,0x00, - - 0xb1,0xb1,0xb1, 0xb0,0xb0,0xb0, 0xaf,0xaf,0xaf, 0xae,0xae,0xae, - 0xad,0xad,0xad, 0xac,0xac,0xac, 0xab,0xab,0xab, 0xaa,0xaa,0xaa, - 0xff,0x00,0xff, 0xa9,0xa9,0xa9, 0xa8,0xa8,0xa8, 0xa7,0xa7,0xa7, - 0xa6,0xa6,0xa6, 0xa5,0xa5,0xa5, 0xa4,0xa4,0xa4, 0xa3,0xa3,0xa3, - - 0xa2,0xa2,0xa2, 0xa1,0xa1,0xa1, 0xa0,0xa0,0xa0, 0xff,0x00,0x00, - 0x9f,0x9f,0x9f, 0x9e,0x9e,0x9e, 0x9d,0x9d,0x9d, 0x9c,0x9c,0x9c, - 0x9b,0x9b,0x9b, 0x9a,0x9a,0x9a, 0xcc,0xcc,0xff, 0xcc,0xcc,0xcc, - 0x99,0x99,0x99, 0x98,0x98,0x98, 0x97,0x97,0x97, 0x96,0x96,0x96, - - 0x95,0x95,0x95, 0x94,0x94,0x94, 0x93,0x93,0x93, 0x92,0x92,0x92, - 0x91,0x91,0x91, 0x90,0x90,0x90, 0x8f,0x8f,0x8f, 0x8e,0x8e,0x8e, - 0x8d,0x8d,0x8d, 0x8c,0x8c,0x8c, 0x8b,0x8b,0x8b, 0x8a,0x8a,0x8a, - 0x89,0x89,0x89, 0x88,0x88,0x88, 0x86,0x86,0x86, 0x85,0x85,0x85, - - 0x84,0x84,0x84, 0x83,0x83,0x83, 0x82,0x82,0x82, 0x81,0x81,0x81, - 0x80,0x80,0x80, 0x7f,0x7f,0x7f, 0x7e,0x7e,0x7e, 0x7d,0x7d,0x7d, - 0x7c,0x7c,0x7c, 0x7b,0x7b,0x7b, 0x7a,0x7a,0x7a, 0x79,0x79,0x79, - 0x78,0x78,0x78, 0x76,0x76,0x76, 0x75,0x75,0x75, 0x74,0x74,0x74, - - 0x73,0x73,0x73, 0x72,0x72,0x72, 0x71,0x71,0x71, 0x70,0x70,0x70, - 0x6f,0x6f,0x6f, 0x6e,0x6e,0x6e, 0x6d,0x6d,0x6d, 0x6c,0x6c,0x6c, - 0x6b,0x6b,0x6b, 0x6a,0x6a,0x6a, 0x69,0x69,0x69, 0x68,0x68,0x68, - 0x67,0x67,0x67, 0x66,0x66,0x66, 0x64,0x64,0x64, 0x63,0x63,0x63, - - 0x62,0x62,0x62, 0x61,0x61,0x61, 0x60,0x60,0x60, 0x5f,0x5f,0x5f, - 0x5e,0x5e,0x5e, 0x5d,0x5d,0x5d, 0x5c,0x5c,0x5c, 0x5b,0x5b,0x5b, - 0x5a,0x5a,0x5a, 0x59,0x59,0x59, 0x58,0x58,0x58, 0x57,0x57,0x57, - 0x56,0x56,0x56, 0x54,0x54,0x54, 0x53,0x53,0x53, 0x52,0x52,0x52, - - 0x51,0x51,0x51, 0x50,0x50,0x50, 0x4f,0x4f,0x4f, 0x4e,0x4e,0x4e, - 0x4d,0x4d,0x4d, 0x4c,0x4c,0x4c, 0x4b,0x4b,0x4b, 0x4a,0x4a,0x4a, - 0x49,0x49,0x49, 0x48,0x48,0x48, 0x47,0x47,0x47, 0x46,0x46,0x46, - 0x45,0x45,0x45, 0x43,0x43,0x43, 0x42,0x42,0x42, 0x41,0x41,0x41, - - 0x40,0x40,0x40, 0x3f,0x3f,0x3f, 0x3e,0x3e,0x3e, 0x3d,0x3d,0x3d, - 0x3c,0x3c,0x3c, 0x3b,0x3b,0x3b, 0x3a,0x3a,0x3a, 0x39,0x39,0x39, - 0x38,0x38,0x38, 0x37,0x37,0x37, 0x36,0x36,0x36, 0x35,0x35,0x35, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x00,0xff,0xff, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x00,0xff,0x00, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x00,0x00,0xff, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0xd0,0x00,0x00, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x00,0xbb,0x00, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0xbb,0xbb,0xbb, - 0x65,0x65,0x65, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, - 0x44,0x44,0x44, 0x22,0x22,0x22, 0x65,0x65,0x65, 0x00,0x00,0x00 + 0xff, 0xff, 0xff, 0xbf, 0xbf, 0xbf, 0xbe, 0xbe, 0xbe, 0xbd, 0xbd, 0xbd, + 0xbc, 0xbc, 0xbc, 0xff, 0xff, 0x00, 0xba, 0xba, 0xba, 0xb9, 0xb9, 0xb9, + 0xb8, 0xb8, 0xb8, 0xb7, 0xb7, 0xb7, 0xb6, 0xb6, 0xb6, 0xb5, 0xb5, 0xb5, + 0xb4, 0xb4, 0xb4, 0xb3, 0xb3, 0xb3, 0xb2, 0xb2, 0xb2, 0x00, 0x00, 0x00, + + 0xb1, 0xb1, 0xb1, 0xb0, 0xb0, 0xb0, 0xaf, 0xaf, 0xaf, 0xae, 0xae, 0xae, + 0xad, 0xad, 0xad, 0xac, 0xac, 0xac, 0xab, 0xab, 0xab, 0xaa, 0xaa, 0xaa, + 0xff, 0x00, 0xff, 0xa9, 0xa9, 0xa9, 0xa8, 0xa8, 0xa8, 0xa7, 0xa7, 0xa7, + 0xa6, 0xa6, 0xa6, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xa3, 0xa3, 0xa3, + + 0xa2, 0xa2, 0xa2, 0xa1, 0xa1, 0xa1, 0xa0, 0xa0, 0xa0, 0xff, 0x00, 0x00, + 0x9f, 0x9f, 0x9f, 0x9e, 0x9e, 0x9e, 0x9d, 0x9d, 0x9d, 0x9c, 0x9c, 0x9c, + 0x9b, 0x9b, 0x9b, 0x9a, 0x9a, 0x9a, 0xcc, 0xcc, 0xff, 0xcc, 0xcc, 0xcc, + 0x99, 0x99, 0x99, 0x98, 0x98, 0x98, 0x97, 0x97, 0x97, 0x96, 0x96, 0x96, + + 0x95, 0x95, 0x95, 0x94, 0x94, 0x94, 0x93, 0x93, 0x93, 0x92, 0x92, 0x92, + 0x91, 0x91, 0x91, 0x90, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, + 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8a, 0x8a, 0x8a, + 0x89, 0x89, 0x89, 0x88, 0x88, 0x88, 0x86, 0x86, 0x86, 0x85, 0x85, 0x85, + + 0x84, 0x84, 0x84, 0x83, 0x83, 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, + 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e, 0x7d, 0x7d, 0x7d, + 0x7c, 0x7c, 0x7c, 0x7b, 0x7b, 0x7b, 0x7a, 0x7a, 0x7a, 0x79, 0x79, 0x79, + 0x78, 0x78, 0x78, 0x76, 0x76, 0x76, 0x75, 0x75, 0x75, 0x74, 0x74, 0x74, + + 0x73, 0x73, 0x73, 0x72, 0x72, 0x72, 0x71, 0x71, 0x71, 0x70, 0x70, 0x70, + 0x6f, 0x6f, 0x6f, 0x6e, 0x6e, 0x6e, 0x6d, 0x6d, 0x6d, 0x6c, 0x6c, 0x6c, + 0x6b, 0x6b, 0x6b, 0x6a, 0x6a, 0x6a, 0x69, 0x69, 0x69, 0x68, 0x68, 0x68, + 0x67, 0x67, 0x67, 0x66, 0x66, 0x66, 0x64, 0x64, 0x64, 0x63, 0x63, 0x63, + + 0x62, 0x62, 0x62, 0x61, 0x61, 0x61, 0x60, 0x60, 0x60, 0x5f, 0x5f, 0x5f, + 0x5e, 0x5e, 0x5e, 0x5d, 0x5d, 0x5d, 0x5c, 0x5c, 0x5c, 0x5b, 0x5b, 0x5b, + 0x5a, 0x5a, 0x5a, 0x59, 0x59, 0x59, 0x58, 0x58, 0x58, 0x57, 0x57, 0x57, + 0x56, 0x56, 0x56, 0x54, 0x54, 0x54, 0x53, 0x53, 0x53, 0x52, 0x52, 0x52, + + 0x51, 0x51, 0x51, 0x50, 0x50, 0x50, 0x4f, 0x4f, 0x4f, 0x4e, 0x4e, 0x4e, + 0x4d, 0x4d, 0x4d, 0x4c, 0x4c, 0x4c, 0x4b, 0x4b, 0x4b, 0x4a, 0x4a, 0x4a, + 0x49, 0x49, 0x49, 0x48, 0x48, 0x48, 0x47, 0x47, 0x47, 0x46, 0x46, 0x46, + 0x45, 0x45, 0x45, 0x43, 0x43, 0x43, 0x42, 0x42, 0x42, 0x41, 0x41, 0x41, + + 0x40, 0x40, 0x40, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, + 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, + 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x00, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x00, 0xff, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x00, 0x00, 0xff, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0xd0, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x00, 0xbb, 0x00, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0xbb, 0xbb, 0xbb, + 0x65, 0x65, 0x65, 0x88, 0x88, 0x88, 0x77, 0x77, 0x77, 0x55, 0x55, 0x55, + 0x44, 0x44, 0x44, 0x22, 0x22, 0x22, 0x65, 0x65, 0x65, 0x00, 0x00, 0x00 }; #define kFailedBootWidth 28 @@ -115,32 +115,32 @@ static const unsigned char bootClut[ 256 * 3 ] = static const unsigned char failedBootPict[] = { - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x09,0x13,0x1b,0xc0,0x27,0x27,0xc0,0x1b,0x13,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x15,0xc0,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0xc0,0x15,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x13,0xc0,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x1e,0x0c,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x11,0x27,0x29,0x29,0x29,0xc0,0xa0,0x11,0x09,0x09,0x11,0xa0,0xc0,0x29,0x29,0x29,0x20,0x0c,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x07,0x20,0x29,0x29,0x29,0xc0,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x15,0x25,0x29,0x29,0x1e,0x07,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x15,0x29,0x29,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xa3,0x25,0x29,0x29,0x15,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x09,0xc0,0x29,0x29,0xc0,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x15,0x29,0x29,0xc0,0x09,0x00,0x00,0x00, - 0x00,0x00,0x00,0x13,0x29,0x29,0xc0,0x09,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0xc0,0x29,0x29,0x13,0x00,0x00,0x00, - 0x00,0x00,0x00,0x1b,0x29,0x29,0xa0,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xa0,0x29,0x29,0x1b,0x00,0x00,0x00, - 0x00,0x00,0x00,0xc0,0x29,0x29,0x11,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x00,0x00,0x11,0x29,0x29,0xc0,0x00,0x00,0x00, - 0x00,0x00,0x00,0x27,0x29,0x29,0x09,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x00,0x09,0x29,0x29,0x27,0x00,0x00,0x00, - 0x00,0x00,0x00,0x27,0x29,0x29,0x09,0x00,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x00,0x09,0x29,0x29,0x27,0x00,0x00,0x00, - 0x00,0x00,0x00,0xc0,0x29,0x29,0x11,0x00,0x00,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0x00,0x11,0x29,0x29,0xc0,0x00,0x00,0x00, - 0x00,0x00,0x00,0x1b,0x29,0x29,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x00,0x00,0xa0,0x29,0x29,0x1b,0x00,0x00,0x00, - 0x00,0x00,0x00,0x13,0x29,0x29,0xc0,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x1b,0x08,0xc0,0x29,0x29,0x13,0x00,0x00,0x00, - 0x00,0x00,0x00,0x09,0xc0,0x29,0x29,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0xc0,0x29,0x29,0xc0,0x09,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x15,0x29,0x29,0x25,0xa3,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x1b,0x29,0x29,0x29,0x29,0x29,0x15,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x07,0x1e,0x29,0x29,0x25,0x15,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x09,0xc0,0x29,0x29,0x29,0x1e,0x07,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x0c,0x20,0x29,0x29,0x29,0xc0,0xa0,0x11,0x09,0x09,0x11,0xa0,0xc0,0x29,0x29,0x29,0x20,0x0c,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x0c,0x1e,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x1e,0x0c,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x15,0xc0,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0x29,0xc0,0x15,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x09,0x13,0x1b,0xc0,0x27,0x27,0xc0,0x1b,0x13,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00 + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x13, 0x1b, 0xc0, 0x27, 0x27, 0xc0, 0x1b, 0x13, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x15, 0xc0, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0xc0, 0x15, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xc0, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x1e, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x27, 0x29, 0x29, 0x29, 0xc0, 0xa0, 0x11, 0x09, 0x09, 0x11, 0xa0, 0xc0, 0x29, 0x29, 0x29, 0x20, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x20, 0x29, 0x29, 0x29, 0xc0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x15, 0x25, 0x29, 0x29, 0x1e, 0x07, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x15, 0x29, 0x29, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x25, 0x29, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x09, 0xc0, 0x29, 0x29, 0xc0, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x29, 0x29, 0xc0, 0x09, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x13, 0x29, 0x29, 0xc0, 0x09, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0xc0, 0x29, 0x29, 0x13, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1b, 0x29, 0x29, 0xa0, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xc0, 0x29, 0x29, 0x11, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x29, 0x29, 0xc0, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x27, 0x29, 0x29, 0x09, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x29, 0x29, 0x27, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x27, 0x29, 0x29, 0x09, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x00, 0x09, 0x29, 0x29, 0x27, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0xc0, 0x29, 0x29, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, 0x11, 0x29, 0x29, 0xc0, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x1b, 0x29, 0x29, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x00, 0x00, 0xa0, 0x29, 0x29, 0x1b, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x13, 0x29, 0x29, 0xc0, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x1b, 0x08, 0xc0, 0x29, 0x29, 0x13, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x09, 0xc0, 0x29, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0xc0, 0x29, 0x29, 0xc0, 0x09, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x15, 0x29, 0x29, 0x25, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x1b, 0x29, 0x29, 0x29, 0x29, 0x29, 0x15, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x07, 0x1e, 0x29, 0x29, 0x25, 0x15, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0xc0, 0x29, 0x29, 0x29, 0x1e, 0x07, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x20, 0x29, 0x29, 0x29, 0xc0, 0xa0, 0x11, 0x09, 0x09, 0x11, 0xa0, 0xc0, 0x29, 0x29, 0x29, 0x20, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1e, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x1e, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x15, 0xc0, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0x29, 0xc0, 0x15, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x13, 0x1b, 0xc0, 0x27, 0x27, 0xc0, 0x1b, 0x13, 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; diff --git a/pexpert/i386/pe_bootargs.c b/pexpert/i386/pe_bootargs.c index 7c2f531a6..7282cf5f8 100644 --- a/pexpert/i386/pe_bootargs.c +++ b/pexpert/i386/pe_bootargs.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -32,5 +32,5 @@ char * PE_boot_args( void) { - return ((boot_args *)PE_state.bootArgs)->CommandLine; + return ((boot_args *)PE_state.bootArgs)->CommandLine; } diff --git a/pexpert/i386/pe_identify_machine.c b/pexpert/i386/pe_identify_machine.c index a632a7ae7..71e26b08f 100644 --- a/pexpert/i386/pe_identify_machine.c +++ b/pexpert/i386/pe_identify_machine.c @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -37,36 +37,37 @@ void pe_identify_machine(boot_args *args); * Sets up platform parameters. * Returns: nothing */ -void pe_identify_machine(__unused boot_args *args) +void +pe_identify_machine(__unused boot_args *args) { - // Clear the gPEClockFrequencyInfo struct - bzero((void *)&gPEClockFrequencyInfo, sizeof(clock_frequency_info_t)); - - // Start with default values. - gPEClockFrequencyInfo.timebase_frequency_hz = 1000000000; - gPEClockFrequencyInfo.bus_frequency_hz = 100000000; - gPEClockFrequencyInfo.cpu_frequency_hz = 300000000; - - gPEClockFrequencyInfo.bus_frequency_min_hz = gPEClockFrequencyInfo.bus_frequency_hz; - gPEClockFrequencyInfo.bus_frequency_max_hz = gPEClockFrequencyInfo.bus_frequency_hz; - gPEClockFrequencyInfo.cpu_frequency_min_hz = gPEClockFrequencyInfo.cpu_frequency_hz; - gPEClockFrequencyInfo.cpu_frequency_max_hz = gPEClockFrequencyInfo.cpu_frequency_hz; - - gPEClockFrequencyInfo.dec_clock_rate_hz = gPEClockFrequencyInfo.timebase_frequency_hz; - gPEClockFrequencyInfo.bus_clock_rate_hz = gPEClockFrequencyInfo.bus_frequency_hz; - gPEClockFrequencyInfo.cpu_clock_rate_hz = gPEClockFrequencyInfo.cpu_frequency_hz; - - // Get real number from some where. - - // Set the num / den pairs form the hz values. - gPEClockFrequencyInfo.bus_clock_rate_num = gPEClockFrequencyInfo.bus_clock_rate_hz; - gPEClockFrequencyInfo.bus_clock_rate_den = 1; - - gPEClockFrequencyInfo.bus_to_cpu_rate_num = - (2 * gPEClockFrequencyInfo.cpu_clock_rate_hz) / gPEClockFrequencyInfo.bus_clock_rate_hz; - gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2; - - gPEClockFrequencyInfo.bus_to_dec_rate_num = 1; - gPEClockFrequencyInfo.bus_to_dec_rate_den = - gPEClockFrequencyInfo.bus_clock_rate_hz / gPEClockFrequencyInfo.dec_clock_rate_hz; + // Clear the gPEClockFrequencyInfo struct + bzero((void *)&gPEClockFrequencyInfo, sizeof(clock_frequency_info_t)); + + // Start with default values. + gPEClockFrequencyInfo.timebase_frequency_hz = 1000000000; + gPEClockFrequencyInfo.bus_frequency_hz = 100000000; + gPEClockFrequencyInfo.cpu_frequency_hz = 300000000; + + gPEClockFrequencyInfo.bus_frequency_min_hz = gPEClockFrequencyInfo.bus_frequency_hz; + gPEClockFrequencyInfo.bus_frequency_max_hz = gPEClockFrequencyInfo.bus_frequency_hz; + gPEClockFrequencyInfo.cpu_frequency_min_hz = gPEClockFrequencyInfo.cpu_frequency_hz; + gPEClockFrequencyInfo.cpu_frequency_max_hz = gPEClockFrequencyInfo.cpu_frequency_hz; + + gPEClockFrequencyInfo.dec_clock_rate_hz = gPEClockFrequencyInfo.timebase_frequency_hz; + gPEClockFrequencyInfo.bus_clock_rate_hz = gPEClockFrequencyInfo.bus_frequency_hz; + gPEClockFrequencyInfo.cpu_clock_rate_hz = gPEClockFrequencyInfo.cpu_frequency_hz; + + // Get real number from some where. + + // Set the num / den pairs form the hz values. + gPEClockFrequencyInfo.bus_clock_rate_num = gPEClockFrequencyInfo.bus_clock_rate_hz; + gPEClockFrequencyInfo.bus_clock_rate_den = 1; + + gPEClockFrequencyInfo.bus_to_cpu_rate_num = + (2 * gPEClockFrequencyInfo.cpu_clock_rate_hz) / gPEClockFrequencyInfo.bus_clock_rate_hz; + gPEClockFrequencyInfo.bus_to_cpu_rate_den = 2; + + gPEClockFrequencyInfo.bus_to_dec_rate_num = 1; + gPEClockFrequencyInfo.bus_to_dec_rate_den = + gPEClockFrequencyInfo.bus_clock_rate_hz / gPEClockFrequencyInfo.dec_clock_rate_hz; } diff --git a/pexpert/i386/pe_init.c b/pexpert/i386/pe_init.c index 49f9caecf..c2debbbd1 100644 --- a/pexpert/i386/pe_init.c +++ b/pexpert/i386/pe_init.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,6 +30,7 @@ * i386 platform expert initialization. */ +#include #include #include #include @@ -49,7 +50,7 @@ /* extern references */ extern void pe_identify_machine(void * args); -extern int kdb_printf(const char *format, ...) __printflike(1,2); +extern int kdb_printf(const char *format, ...) __printflike(1, 2); /* private globals */ PE_state_t PE_state; @@ -62,248 +63,274 @@ void *gPEEFIRuntimeServices; static boot_icon_element* norootIcon_lzss; static const uint8_t* norootClut_lzss; -int PE_initialize_console( PE_Video * info, int op ) +int +PE_initialize_console( PE_Video * info, int op ) { - static int last_console = -1; - - if (info) { - info->v_offset = 0; - info->v_length = 0; - info->v_display = GRAPHICS_MODE; - } - - switch ( op ) { - - case kPEDisableScreen: - initialize_screen(info, op); - kprintf("kPEDisableScreen %d\n", last_console); - if (!console_is_serial()) - last_console = switch_to_serial_console(); - break; - - case kPEEnableScreen: - initialize_screen(info, op); - if (info) PE_state.video = *info; - kprintf("kPEEnableScreen %d\n", last_console); - if( last_console != -1) - switch_to_old_console( last_console); - break; - - case kPEBaseAddressChange: - if (info) PE_state.video = *info; - /* fall thru */ - - default: - initialize_screen(info, op); - break; - } - - return 0; + static int last_console = -1; + + if (info) { + info->v_offset = 0; + info->v_length = 0; + info->v_display = GRAPHICS_MODE; + } + + switch (op) { + case kPEDisableScreen: + initialize_screen(info, op); + kprintf("kPEDisableScreen %d\n", last_console); + if (!console_is_serial()) { + last_console = switch_to_serial_console(); + } + break; + + case kPEEnableScreen: + initialize_screen(info, op); + if (info) { + PE_state.video = *info; + } + kprintf("kPEEnableScreen %d\n", last_console); + if (last_console != -1) { + switch_to_old_console( last_console); + } + break; + + case kPEBaseAddressChange: + if (info) { + PE_state.video = *info; + } + /* fall thru */ + + default: + initialize_screen(info, op); + break; + } + + return 0; } -void PE_init_iokit(void) +void +PE_init_iokit(void) { - enum { kMaxBootVar = 128 }; - - boolean_t bootClutInitialized = FALSE; - boolean_t noroot_rle_Initialized = FALSE; - - DTEntry entry; - unsigned int size; - uint32_t *map; + enum { kMaxBootVar = 128 }; + + boolean_t bootClutInitialized = FALSE; + boolean_t noroot_rle_Initialized = FALSE; + + DTEntry entry; + unsigned int size; + uint32_t *map; boot_progress_element *bootPict; - norootIcon_lzss = NULL; - norootClut_lzss = NULL; - - PE_init_kprintf(TRUE); - PE_init_printf(TRUE); - - kprintf("Kernel boot args: '%s'\n", PE_boot_args()); - - /* - * Fetch the CLUT and the noroot image. - */ - - if( kSuccess == DTLookupEntry(NULL, "/chosen/memory-map", &entry)) { - if( kSuccess == DTGetProperty(entry, "BootCLUT", (void **) &map, &size)) { - if (sizeof(appleClut8) <= map[1]) { - bcopy( (void *)ml_static_ptovirt(map[0]), appleClut8, sizeof(appleClut8) ); - bootClutInitialized = TRUE; - } - } - - if( kSuccess == DTGetProperty(entry, "Pict-FailedBoot", (void **) &map, &size)) { - bootPict = (boot_progress_element *) ml_static_ptovirt(map[0]); - default_noroot.width = bootPict->width; - default_noroot.height = bootPict->height; - default_noroot.dx = 0; - default_noroot.dy = bootPict->yOffset; - default_noroot_data = &bootPict->data[0]; - noroot_rle_Initialized = TRUE; - } - - if( kSuccess == DTGetProperty(entry, "FailedCLUT", (void **) &map, &size)) { - norootClut_lzss = (uint8_t*) ml_static_ptovirt(map[0]); - } - - if( kSuccess == DTGetProperty(entry, "FailedImage", (void **) &map, &size)) { - norootIcon_lzss = (boot_icon_element *) ml_static_ptovirt(map[0]); - if (norootClut_lzss == NULL) { - printf("ERROR: No FailedCLUT provided for noroot icon!\n"); - } - } - } - - if (!bootClutInitialized) { - bcopy( (void *) (uintptr_t) bootClut, (void *) appleClut8, sizeof(appleClut8) ); - } - - if (!noroot_rle_Initialized) { - default_noroot.width = kFailedBootWidth; - default_noroot.height = kFailedBootHeight; - default_noroot.dx = 0; - default_noroot.dy = kFailedBootOffset; - default_noroot_data = failedBootPict; - } - - /* - * Initialize the spinning wheel (progress indicator). - */ - vc_progress_initialize(&default_progress, - default_progress_data1x, - default_progress_data2x, - default_progress_data3x, - (unsigned char *) appleClut8); - - StartIOKit( PE_state.deviceTreeHead, PE_state.bootArgs, gPEEFIRuntimeServices, NULL); + norootIcon_lzss = NULL; + norootClut_lzss = NULL; + + PE_init_kprintf(TRUE); + PE_init_printf(TRUE); + + kprintf("Kernel boot args: '%s'\n", PE_boot_args()); + + /* + * Fetch the CLUT and the noroot image. + */ + + if (kSuccess == DTLookupEntry(NULL, "/chosen/memory-map", &entry)) { + if (kSuccess == DTGetProperty(entry, "BootCLUT", (void **) &map, &size)) { + if (sizeof(appleClut8) <= map[1]) { + bcopy((void *)ml_static_ptovirt(map[0]), appleClut8, sizeof(appleClut8)); + bootClutInitialized = TRUE; + } + } + + if (kSuccess == DTGetProperty(entry, "Pict-FailedBoot", (void **) &map, &size)) { + bootPict = (boot_progress_element *) ml_static_ptovirt(map[0]); + default_noroot.width = bootPict->width; + default_noroot.height = bootPict->height; + default_noroot.dx = 0; + default_noroot.dy = bootPict->yOffset; + default_noroot_data = &bootPict->data[0]; + noroot_rle_Initialized = TRUE; + } + + if (kSuccess == DTGetProperty(entry, "FailedCLUT", (void **) &map, &size)) { + norootClut_lzss = (uint8_t*) ml_static_ptovirt(map[0]); + } + + if (kSuccess == DTGetProperty(entry, "FailedImage", (void **) &map, &size)) { + norootIcon_lzss = (boot_icon_element *) ml_static_ptovirt(map[0]); + if (norootClut_lzss == NULL) { + printf("ERROR: No FailedCLUT provided for noroot icon!\n"); + } + } + } + + if (!bootClutInitialized) { + bcopy((void *) (uintptr_t) bootClut, (void *) appleClut8, sizeof(appleClut8)); + } + + if (!noroot_rle_Initialized) { + default_noroot.width = kFailedBootWidth; + default_noroot.height = kFailedBootHeight; + default_noroot.dx = 0; + default_noroot.dy = kFailedBootOffset; + default_noroot_data = failedBootPict; + } + + /* + * Initialize the spinning wheel (progress indicator). + */ + vc_progress_initialize(&default_progress, + default_progress_data1x, + default_progress_data2x, + default_progress_data3x, + (unsigned char *) appleClut8); + + StartIOKit( PE_state.deviceTreeHead, PE_state.bootArgs, gPEEFIRuntimeServices, NULL); } -void PE_init_platform(boolean_t vm_initialized, void * _args) +void +PE_init_platform(boolean_t vm_initialized, void * _args) { - boot_args *args = (boot_args *)_args; - - if (PE_state.initialized == FALSE) { - PE_state.initialized = TRUE; - - // New EFI-style - PE_state.bootArgs = _args; - PE_state.deviceTreeHead = (void *) ml_static_ptovirt(args->deviceTreeP); - if (args->Video.v_baseAddr) { - PE_state.video.v_baseAddr = args->Video.v_baseAddr; // remains physical address - PE_state.video.v_rowBytes = args->Video.v_rowBytes; - PE_state.video.v_width = args->Video.v_width; - PE_state.video.v_height = args->Video.v_height; - PE_state.video.v_depth = args->Video.v_depth; - PE_state.video.v_display = args->Video.v_display; - strlcpy(PE_state.video.v_pixelFormat, "PPPPPPPP", - sizeof(PE_state.video.v_pixelFormat)); - } else { - PE_state.video.v_baseAddr = args->VideoV1.v_baseAddr; // remains physical address - PE_state.video.v_rowBytes = args->VideoV1.v_rowBytes; - PE_state.video.v_width = args->VideoV1.v_width; - PE_state.video.v_height = args->VideoV1.v_height; - PE_state.video.v_depth = args->VideoV1.v_depth; - PE_state.video.v_display = args->VideoV1.v_display; - strlcpy(PE_state.video.v_pixelFormat, "PPPPPPPP", - sizeof(PE_state.video.v_pixelFormat)); - } + boot_args *args = (boot_args *)_args; + + if (PE_state.initialized == FALSE) { + PE_state.initialized = TRUE; + + // New EFI-style + PE_state.bootArgs = _args; + PE_state.deviceTreeHead = (void *) ml_static_ptovirt(args->deviceTreeP); + if (args->Video.v_baseAddr) { + PE_state.video.v_baseAddr = args->Video.v_baseAddr;// remains physical address + PE_state.video.v_rowBytes = args->Video.v_rowBytes; + PE_state.video.v_depth = args->Video.v_depth; + PE_state.video.v_display = args->Video.v_display; + PE_state.video.v_rotate = args->Video.v_rotate; + + /* EFI doesn't have a good way of describing rotation internally, + * so it flips width and height in portrait mode. We flip it back. */ + if (PE_state.video.v_rotate == kDataRotate90 || + PE_state.video.v_rotate == kDataRotate270) { + PE_state.video.v_width = args->Video.v_height; + PE_state.video.v_height = args->Video.v_width; + } else { + PE_state.video.v_width = args->Video.v_width; + PE_state.video.v_height = args->Video.v_height; + } + + strlcpy(PE_state.video.v_pixelFormat, "PPPPPPPP", + sizeof(PE_state.video.v_pixelFormat)); + } else { + PE_state.video.v_baseAddr = args->VideoV1.v_baseAddr;// remains physical address + PE_state.video.v_rowBytes = args->VideoV1.v_rowBytes; + PE_state.video.v_width = args->VideoV1.v_width; + PE_state.video.v_height = args->VideoV1.v_height; + PE_state.video.v_depth = args->VideoV1.v_depth; + PE_state.video.v_display = args->VideoV1.v_display; + PE_state.video.v_rotate = kDataRotate0; /* no room for rotation info */ + strlcpy(PE_state.video.v_pixelFormat, "PPPPPPPP", + sizeof(PE_state.video.v_pixelFormat)); + } #ifdef kBootArgsFlagHiDPI - if (args->flags & kBootArgsFlagHiDPI) - PE_state.video.v_scale = kPEScaleFactor2x; - else - PE_state.video.v_scale = kPEScaleFactor1x; + if (args->flags & kBootArgsFlagHiDPI) { + PE_state.video.v_scale = kPEScaleFactor2x; + } else { + PE_state.video.v_scale = kPEScaleFactor1x; + } #else - PE_state.video.v_scale = kPEScaleFactor1x; + PE_state.video.v_scale = kPEScaleFactor1x; #endif - } - - if (!vm_initialized) { - - if (PE_state.deviceTreeHead) { - DTInit(PE_state.deviceTreeHead); - } + } - pe_identify_machine(args); - pe_init_debug(); - } + if (!vm_initialized) { + if (PE_state.deviceTreeHead) { + DTInit(PE_state.deviceTreeHead); + } + pe_identify_machine(args); + pe_init_debug(); + } } -void PE_create_console( void ) +void +PE_create_console( void ) { - if ( PE_state.video.v_display == GRAPHICS_MODE ) - PE_initialize_console( &PE_state.video, kPEGraphicsMode ); - else - PE_initialize_console( &PE_state.video, kPETextMode ); + if (PE_state.video.v_display == GRAPHICS_MODE) { + PE_initialize_console( &PE_state.video, kPEGraphicsMode ); + } else { + PE_initialize_console( &PE_state.video, kPETextMode ); + } } -int PE_current_console( PE_Video * info ) +int +PE_current_console( PE_Video * info ) { - *info = PE_state.video; + *info = PE_state.video; - return (0); + return 0; } -void PE_display_icon( __unused unsigned int flags, __unused const char * name ) +void +PE_display_icon( __unused unsigned int flags, __unused const char * name ) { - if ( norootIcon_lzss && norootClut_lzss ) { - uint32_t width = norootIcon_lzss->width; - uint32_t height = norootIcon_lzss->height; - uint32_t x = ((PE_state.video.v_width - width) / 2); - uint32_t y = ((PE_state.video.v_height - height) / 2) + norootIcon_lzss->y_offset_from_center; - - vc_display_lzss_icon(x, y, width, height, - &norootIcon_lzss->data[0], - norootIcon_lzss->data_size, - norootClut_lzss); - } - else if ( default_noroot_data ) { - vc_display_icon( &default_noroot, default_noroot_data ); - } else { - printf("ERROR: No data found for noroot icon!\n"); - } + if (norootIcon_lzss && norootClut_lzss) { + uint32_t width = norootIcon_lzss->width; + uint32_t height = norootIcon_lzss->height; + uint32_t x = ((PE_state.video.v_width - width) / 2); + uint32_t y = ((PE_state.video.v_height - height) / 2) + norootIcon_lzss->y_offset_from_center; + + vc_display_lzss_icon(x, y, width, height, + &norootIcon_lzss->data[0], + norootIcon_lzss->data_size, + norootClut_lzss); + } else if (default_noroot_data) { + vc_display_icon( &default_noroot, default_noroot_data ); + } else { + printf("ERROR: No data found for noroot icon!\n"); + } } boolean_t PE_get_hotkey(__unused unsigned char key) { - return (FALSE); + return FALSE; } static timebase_callback_func gTimebaseCallback; -void PE_register_timebase_callback(timebase_callback_func callback) +void +PE_register_timebase_callback(timebase_callback_func callback) { - gTimebaseCallback = callback; - - PE_call_timebase_callback(); + gTimebaseCallback = callback; + + PE_call_timebase_callback(); } -void PE_call_timebase_callback(void) +void +PE_call_timebase_callback(void) { - struct timebase_freq_t timebase_freq; - unsigned long num, den, cnt; - - num = gPEClockFrequencyInfo.bus_clock_rate_num * gPEClockFrequencyInfo.bus_to_dec_rate_num; - den = gPEClockFrequencyInfo.bus_clock_rate_den * gPEClockFrequencyInfo.bus_to_dec_rate_den; - - cnt = 2; - while (cnt <= den) { - if ((num % cnt) || (den % cnt)) { - cnt++; - continue; - } - - num /= cnt; - den /= cnt; - } - - timebase_freq.timebase_num = num; - timebase_freq.timebase_den = den; - - if (gTimebaseCallback) gTimebaseCallback(&timebase_freq); + struct timebase_freq_t timebase_freq; + unsigned long num, den, cnt; + + num = gPEClockFrequencyInfo.bus_clock_rate_num * gPEClockFrequencyInfo.bus_to_dec_rate_num; + den = gPEClockFrequencyInfo.bus_clock_rate_den * gPEClockFrequencyInfo.bus_to_dec_rate_den; + + cnt = 2; + while (cnt <= den) { + if ((num % cnt) || (den % cnt)) { + cnt++; + continue; + } + + num /= cnt; + den /= cnt; + } + + timebase_freq.timebase_num = num; + timebase_freq.timebase_den = den; + + if (gTimebaseCallback) { + gTimebaseCallback(&timebase_freq); + } } /* @@ -312,8 +339,8 @@ void PE_call_timebase_callback(void) static int PE_stub_poll_input(__unused unsigned int options, char * c) { - *c = 0xff; - return 1; /* 0 for success, 1 for unsupported */ + *c = 0xff; + return 1; /* 0 for success, 1 for unsupported */ } /* @@ -322,17 +349,18 @@ PE_stub_poll_input(__unused unsigned int options, char * c) * with their polled-mode input function. */ int (*PE_poll_input)(unsigned int options, char * c) - = PE_stub_poll_input; + = PE_stub_poll_input; boolean_t PE_reboot_on_panic(void) { boot_args *args = (boot_args *)PE_state.bootArgs; - if (args->flags & kBootArgsFlagRebootOnPanic) + if (args->flags & kBootArgsFlagRebootOnPanic) { return TRUE; - else + } else { return FALSE; + } } void @@ -352,8 +380,9 @@ PE_i_can_has_debugger(uint32_t *debug_flags) #if CONFIG_CSR if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) { - if (debug_flags) + if (debug_flags) { *debug_flags = 0; + } return FALSE; } #endif @@ -406,10 +435,10 @@ PE_update_panicheader_nestedpanic() panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_NESTED_PANIC; /* Usually indicative of corruption in the panic region */ - if(!(((panic_info->mph_stackshot_offset == 0) && (panic_info->mph_stackshot_len == 0)) || - ((panic_info->mph_stackshot_offset != 0) && (panic_info->mph_stackshot_len != 0)))) { + if (!(((panic_info->mph_stackshot_offset == 0) && (panic_info->mph_stackshot_len == 0)) || + ((panic_info->mph_stackshot_offset != 0) && (panic_info->mph_stackshot_len != 0)))) { kdb_printf("panic_info contains invalid stackshot metadata: mph_stackshot_offset 0x%x mph_stackshot_len 0x%x\n", - panic_info->mph_stackshot_offset, panic_info->mph_stackshot_len); + panic_info->mph_stackshot_offset, panic_info->mph_stackshot_len); } /* @@ -422,7 +451,7 @@ PE_update_panicheader_nestedpanic() /* Usually indicative of corruption in the panic region */ if (panic_info->mph_other_log_len != 0) { kdb_printf("panic_info contains invalid other log metadata (zero offset but non-zero length), length was 0x%x, zeroing value\n", - panic_info->mph_other_log_len); + panic_info->mph_other_log_len); panic_info->mph_other_log_len = 0; } } diff --git a/pexpert/i386/pe_interrupt.c b/pexpert/i386/pe_interrupt.c index 19b0e003f..df8fa6554 100644 --- a/pexpert/i386/pe_interrupt.c +++ b/pexpert/i386/pe_interrupt.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -37,47 +37,48 @@ void PE_incoming_interrupt(int); struct i386_interrupt_handler { - IOInterruptHandler handler; - void *nub; - void *target; - void *refCon; + IOInterruptHandler handler; + void *nub; + void *target; + void *refCon; }; typedef struct i386_interrupt_handler i386_interrupt_handler_t; -i386_interrupt_handler_t PE_interrupt_handler; +i386_interrupt_handler_t PE_interrupt_handler; void PE_incoming_interrupt(int interrupt) { - i386_interrupt_handler_t *vector; + i386_interrupt_handler_t *vector; vector = &PE_interrupt_handler; #if CONFIG_DTRACE && DEVELOPMENT - DTRACE_INT5(interrupt_start, void *, vector->nub, int, 0, - void *, vector->target, IOInterruptHandler, vector->handler, - void *, vector->refCon); + DTRACE_INT5(interrupt_start, void *, vector->nub, int, 0, + void *, vector->target, IOInterruptHandler, vector->handler, + void *, vector->refCon); #endif vector->handler(vector->target, NULL, vector->nub, interrupt); #if CONFIG_DTRACE && DEVELOPMENT - DTRACE_INT5(interrupt_complete, void *, vector->nub, int, 0, - void *, vector->target, IOInterruptHandler, vector->handler, - void *, vector->refCon); + DTRACE_INT5(interrupt_complete, void *, vector->nub, int, 0, + void *, vector->target, IOInterruptHandler, vector->handler, + void *, vector->refCon); #endif } -void PE_install_interrupt_handler(void *nub, - __unused int source, - void *target, - IOInterruptHandler handler, - void *refCon) +void +PE_install_interrupt_handler(void *nub, + __unused int source, + void *target, + IOInterruptHandler handler, + void *refCon) { - i386_interrupt_handler_t *vector; + i386_interrupt_handler_t *vector; vector = &PE_interrupt_handler; diff --git a/pexpert/i386/pe_kprintf.c b/pexpert/i386/pe_kprintf.c index d1d51da43..63e10d9f8 100644 --- a/pexpert/i386/pe_kprintf.c +++ b/pexpert/i386/pe_kprintf.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -44,7 +44,7 @@ /* Globals */ void (*PE_kputc)(char c); -#if DEVELOPMENT || DEBUG +#if DEVELOPMENT || DEBUG /* DEBUG kernel starts with true serial, but * may later disable or switch to video * console */ @@ -55,29 +55,34 @@ SECURITY_READ_ONLY_LATE(unsigned int) disable_serial_output = TRUE; decl_simple_lock_data(static, kprintf_lock) -void PE_init_kprintf(boolean_t vm_initialized) +void +PE_init_kprintf(boolean_t vm_initialized) { - unsigned int boot_arg; + unsigned int boot_arg; - if (PE_state.initialized == FALSE) + if (PE_state.initialized == FALSE) { panic("Platform Expert not initialized"); + } if (!vm_initialized) { unsigned int new_disable_serial_output = TRUE; simple_lock_init(&kprintf_lock, 0); - if (PE_parse_boot_argn("debug", &boot_arg, sizeof (boot_arg))) - if (boot_arg & DB_KPRT) + if (PE_parse_boot_argn("debug", &boot_arg, sizeof(boot_arg))) { + if (boot_arg & DB_KPRT) { new_disable_serial_output = FALSE; + } + } /* If we are newly enabling serial, make sure we only * call pal_serial_init() if our previous state was * not enabled */ - if (!new_disable_serial_output && (!disable_serial_output || pal_serial_init())) + if (!new_disable_serial_output && (!disable_serial_output || pal_serial_init())) { PE_kputc = pal_serial_putc; - else + } else { PE_kputc = cnputc; + } disable_serial_output = new_disable_serial_output; } @@ -89,23 +94,25 @@ void PE_init_kprintf(boolean_t vm_initialized) #endif #ifdef MP_DEBUG -static void _kprintf(const char *format, ...) +static void +_kprintf(const char *format, ...) { va_list listp; - va_start(listp, format); - _doprnt(format, &listp, PE_kputc, 16); - va_end(listp); + va_start(listp, format); + _doprnt(format, &listp, PE_kputc, 16); + va_end(listp); } -#define MP_DEBUG_KPRINTF(x...) _kprintf(x) +#define MP_DEBUG_KPRINTF(x...) _kprintf(x) #else /* MP_DEBUG */ #define MP_DEBUG_KPRINTF(x...) #endif /* MP_DEBUG */ static int cpu_last_locked = 0; -__attribute__((noinline,not_tail_called)) -void kprintf(const char *fmt, ...) +__attribute__((noinline, not_tail_called)) +void +kprintf(const char *fmt, ...) { va_list listp; va_list listp2; @@ -114,7 +121,8 @@ void kprintf(const char *fmt, ...) if (!disable_serial_output) { boolean_t early = FALSE; - if (rdmsr64(MSR_IA32_GS_BASE) == 0) { + uint64_t gsbase = rdmsr64(MSR_IA32_GS_BASE); + if (gsbase == EARLY_GSBASE_MAGIC || gsbase == 0) { early = TRUE; } /* If PE_kputc has not yet been initialized, don't @@ -142,7 +150,7 @@ void kprintf(const char *fmt, ...) pal_preemption_assert(); - while (!simple_lock_try(&kprintf_lock)) { + while (!simple_lock_try(&kprintf_lock, LCK_GRP_NULL)) { (void) cpu_signal_handler(NULL); } @@ -164,9 +172,7 @@ void kprintf(const char *fmt, ...) os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp2, caller); } va_end(listp2); - - } - else { + } else { if (ml_get_interrupts_enabled()) { va_start(listp, fmt); os_log_with_args(OS_LOG_DEFAULT, OS_LOG_TYPE_DEFAULT, fmt, listp, caller); diff --git a/pexpert/i386/pe_serial.c b/pexpert/i386/pe_serial.c index e35457dde..0c5abf15c 100644 --- a/pexpert/i386/pe_serial.c +++ b/pexpert/i386/pe_serial.c @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -36,12 +36,12 @@ #include struct pe_serial_functions { - void (*uart_init) (void); - void (*uart_set_baud_rate) (int unit, uint32_t baud_rate); - int (*tr0) (void); - void (*td0) (int c); - int (*rr0) (void); - int (*rd0) (void); + void (*uart_init) (void); + void (*uart_set_baud_rate) (int unit, uint32_t baud_rate); + int (*tr0) (void); + void (*td0) (int c); + int (*rr0) (void); + int (*rd0) (void); }; static struct pe_serial_functions *gPESF; @@ -52,7 +52,7 @@ static unsigned int legacy_uart_enabled = 0; /* 1 Legacy IO based UART is suppor static boolean_t lpss_uart_supported = 0; /* 1 if LPSS UART is supported on platform */ static unsigned int lpss_uart_enabled = 0; /* 1 if it is LPSS UART is in D0 state */ -static void lpss_uart_re_init (void); +static void lpss_uart_re_init(void); static boolean_t pcie_uart_enabled = 0; /* 1 if PCIe UART is supported on platform */ @@ -71,44 +71,44 @@ static unsigned uart_baud_rate = DEFAULT_UART_BAUD_RATE; #define IO_READ(r) inb(LEGACY_UART_PORT_ADDR + UART_##r) enum { - COM1_PORT_ADDR = 0x3f8, - COM2_PORT_ADDR = 0x2f8 + COM1_PORT_ADDR = 0x3f8, + COM2_PORT_ADDR = 0x2f8 }; enum { - UART_RBR = 0, /* receive buffer Register (R) */ - UART_THR = 0, /* transmit holding register (W) */ - UART_DLL = 0, /* DLAB = 1, divisor latch (LSB) */ - UART_IER = 1, /* interrupt enable register */ - UART_DLM = 1, /* DLAB = 1, divisor latch (MSB) */ - UART_IIR = 2, /* interrupt ident register (R) */ - UART_FCR = 2, /* fifo control register (W) */ - UART_LCR = 3, /* line control register */ - UART_MCR = 4, /* modem control register */ - UART_LSR = 5, /* line status register */ - UART_MSR = 6, /* modem status register */ - UART_SCR = 7 /* scratch register */ + UART_RBR = 0, /* receive buffer Register (R) */ + UART_THR = 0, /* transmit holding register (W) */ + UART_DLL = 0, /* DLAB = 1, divisor latch (LSB) */ + UART_IER = 1, /* interrupt enable register */ + UART_DLM = 1, /* DLAB = 1, divisor latch (MSB) */ + UART_IIR = 2, /* interrupt ident register (R) */ + UART_FCR = 2, /* fifo control register (W) */ + UART_LCR = 3, /* line control register */ + UART_MCR = 4, /* modem control register */ + UART_LSR = 5, /* line status register */ + UART_MSR = 6, /* modem status register */ + UART_SCR = 7 /* scratch register */ }; enum { - UART_LCR_8BITS = 0x03, - UART_LCR_DLAB = 0x80 + UART_LCR_8BITS = 0x03, + UART_LCR_DLAB = 0x80 }; enum { - UART_MCR_DTR = 0x01, - UART_MCR_RTS = 0x02, - UART_MCR_OUT1 = 0x04, - UART_MCR_OUT2 = 0x08, - UART_MCR_LOOP = 0x10 + UART_MCR_DTR = 0x01, + UART_MCR_RTS = 0x02, + UART_MCR_OUT1 = 0x04, + UART_MCR_OUT2 = 0x08, + UART_MCR_LOOP = 0x10 }; enum { - UART_LSR_DR = 0x01, - UART_LSR_OE = 0x02, - UART_LSR_PE = 0x04, - UART_LSR_FE = 0x08, - UART_LSR_THRE = 0x20 + UART_LSR_DR = 0x01, + UART_LSR_OE = 0x02, + UART_LSR_PE = 0x04, + UART_LSR_FE = 0x08, + UART_LSR_THRE = 0x20 }; enum { @@ -119,101 +119,106 @@ enum { static int legacy_uart_probe( void ) { - /* Verify that the Scratch Register is accessible */ + /* Verify that the Scratch Register is accessible */ - IO_WRITE( SCR, 0x5a ); - if (IO_READ(SCR) != 0x5a) return 0; - IO_WRITE( SCR, 0xa5 ); - if (IO_READ(SCR) != 0xa5) return 0; - return 1; + IO_WRITE( SCR, 0x5a ); + if (IO_READ(SCR) != 0x5a) { + return 0; + } + IO_WRITE( SCR, 0xa5 ); + if (IO_READ(SCR) != 0xa5) { + return 0; + } + return 1; } static void legacy_uart_set_baud_rate( __unused int unit, uint32_t baud_rate ) { - const unsigned char lcr = IO_READ( LCR ); - unsigned long div; - - if (baud_rate == 0) baud_rate = 9600; - div = LEGACY_UART_CLOCK / 16 / baud_rate; - IO_WRITE( LCR, lcr | UART_LCR_DLAB ); - IO_WRITE( DLM, (unsigned char)(div >> 8) ); - IO_WRITE( DLL, (unsigned char) div ); - IO_WRITE( LCR, lcr & ~UART_LCR_DLAB); + const unsigned char lcr = IO_READ( LCR ); + unsigned long div; + + if (baud_rate == 0) { + baud_rate = 9600; + } + div = LEGACY_UART_CLOCK / 16 / baud_rate; + IO_WRITE( LCR, lcr | UART_LCR_DLAB ); + IO_WRITE( DLM, (unsigned char)(div >> 8)); + IO_WRITE( DLL, (unsigned char) div ); + IO_WRITE( LCR, lcr & ~UART_LCR_DLAB); } static int legacy_uart_tr0( void ) { - return (IO_READ(LSR) & UART_LSR_THRE); + return IO_READ(LSR) & UART_LSR_THRE; } static void legacy_uart_td0( int c ) { - IO_WRITE( THR, c ); + IO_WRITE( THR, c ); } static void legacy_uart_init( void ) { - /* Disable hardware interrupts */ + /* Disable hardware interrupts */ - IO_WRITE( MCR, 0 ); - IO_WRITE( IER, 0 ); + IO_WRITE( MCR, 0 ); + IO_WRITE( IER, 0 ); - /* Disable FIFO's for 16550 devices */ + /* Disable FIFO's for 16550 devices */ - IO_WRITE( FCR, 0 ); + IO_WRITE( FCR, 0 ); - /* Set for 8-bit, no parity, DLAB bit cleared */ + /* Set for 8-bit, no parity, DLAB bit cleared */ - IO_WRITE( LCR, UART_LCR_8BITS ); + IO_WRITE( LCR, UART_LCR_8BITS ); - /* Set baud rate */ + /* Set baud rate */ - gPESF->uart_set_baud_rate ( 0, uart_baud_rate ); + gPESF->uart_set_baud_rate( 0, uart_baud_rate ); - /* Assert DTR# and RTS# lines (OUT2?) */ + /* Assert DTR# and RTS# lines (OUT2?) */ - IO_WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); + IO_WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); - /* Clear any garbage in the input buffer */ + /* Clear any garbage in the input buffer */ - IO_READ( RBR ); + IO_READ( RBR ); - uart_initted = 1; + uart_initted = 1; } static int -legacy_uart_rr0( void ) +legacy_uart_rr0( void ) { - unsigned char lsr; + unsigned char lsr; - lsr = IO_READ( LSR ); + lsr = IO_READ( LSR ); - if ( lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE) ) - { - IO_READ( RBR ); /* discard */ - return 0; - } + if (lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE)) { + IO_READ( RBR ); /* discard */ + return 0; + } - return (lsr & UART_LSR_DR); + return lsr & UART_LSR_DR; } static int -legacy_uart_rd0( void ) +legacy_uart_rd0( void ) { - return IO_READ( RBR ); + return IO_READ( RBR ); } static struct pe_serial_functions legacy_uart_serial_functions = { - .uart_init = legacy_uart_init, - .uart_set_baud_rate = legacy_uart_set_baud_rate, - .tr0 = legacy_uart_tr0, - .td0 = legacy_uart_td0, - .rr0 = legacy_uart_rr0, - .rd0 = legacy_uart_rd0 + .uart_init = legacy_uart_init, + .uart_set_baud_rate = legacy_uart_set_baud_rate, + .tr0 = legacy_uart_tr0, + .td0 = legacy_uart_td0, + .rr0 = legacy_uart_rr0, + .rd0 = legacy_uart_rd0 }; // ============================================================================= @@ -228,142 +233,146 @@ static struct pe_serial_functions legacy_uart_serial_functions = { #define MMIO_READ(r) ml_phys_read_word(mmio_uart_base + MMIO_UART_##r) enum { - MMIO_UART_RBR = 0x0, /* receive buffer Register (R) */ - MMIO_UART_THR = 0x0, /* transmit holding register (W) */ - MMIO_UART_DLL = 0x0, /* DLAB = 1, divisor latch (LSB) */ - MMIO_UART_IER = 0x4, /* interrupt enable register */ - MMIO_UART_DLM = 0x4, /* DLAB = 1, divisor latch (MSB) */ - MMIO_UART_FCR = 0x8, /* fifo control register (W) */ - MMIO_UART_LCR = 0xc, /* line control register */ - MMIO_UART_MCR = 0x10, /* modem control register */ - MMIO_UART_LSR = 0x14, /* line status register */ - MMIO_UART_SCR = 0x1c, /* scratch register */ - MMIO_UART_CLK = 0x200, /* clocks register */ - MMIO_UART_RST = 0x204 /* Reset register */ + MMIO_UART_RBR = 0x0, /* receive buffer Register (R) */ + MMIO_UART_THR = 0x0, /* transmit holding register (W) */ + MMIO_UART_DLL = 0x0, /* DLAB = 1, divisor latch (LSB) */ + MMIO_UART_IER = 0x4, /* interrupt enable register */ + MMIO_UART_DLM = 0x4, /* DLAB = 1, divisor latch (MSB) */ + MMIO_UART_FCR = 0x8, /* fifo control register (W) */ + MMIO_UART_LCR = 0xc, /* line control register */ + MMIO_UART_MCR = 0x10, /* modem control register */ + MMIO_UART_LSR = 0x14, /* line status register */ + MMIO_UART_SCR = 0x1c, /* scratch register */ + MMIO_UART_CLK = 0x200, /* clocks register */ + MMIO_UART_RST = 0x204 /* Reset register */ }; static vm_offset_t mmio_uart_base = 0; - + static int mmio_uart_present( void ) { - MMIO_WRITE( SCR, 0x5a ); - if (MMIO_READ(SCR) != 0x5a) return 0; - MMIO_WRITE( SCR, 0xa5 ); - if (MMIO_READ(SCR) != 0xa5) return 0; - return 1; + MMIO_WRITE( SCR, 0x5a ); + if (MMIO_READ(SCR) != 0x5a) { + return 0; + } + MMIO_WRITE( SCR, 0xa5 ); + if (MMIO_READ(SCR) != 0xa5) { + return 0; + } + return 1; } static int mmio_uart_probe( void ) { - unsigned new_mmio_uart_base = 0; - - // if specified, mmio_uart overrides all probing - if (PE_parse_boot_argn("mmio_uart", &new_mmio_uart_base, sizeof (new_mmio_uart_base))) - { - // mmio_uart=0 will disable mmio_uart support - if (new_mmio_uart_base == 0) { - return 0; - } - - mmio_uart_base = new_mmio_uart_base; - return 1; - } - - // probe the two possible MMIO_UART2 addresses - mmio_uart_base = MMIO_UART2_BASE; - if (mmio_uart_present()) { - return 1; - } - - mmio_uart_base = MMIO_UART2_BASE_LEGACY; - if (mmio_uart_present()) { - return 1; - } - - // no mmio uart found - return 0; + unsigned new_mmio_uart_base = 0; + + // if specified, mmio_uart overrides all probing + if (PE_parse_boot_argn("mmio_uart", &new_mmio_uart_base, sizeof(new_mmio_uart_base))) { + // mmio_uart=0 will disable mmio_uart support + if (new_mmio_uart_base == 0) { + return 0; + } + + mmio_uart_base = new_mmio_uart_base; + return 1; + } + + // probe the two possible MMIO_UART2 addresses + mmio_uart_base = MMIO_UART2_BASE; + if (mmio_uart_present()) { + return 1; + } + + mmio_uart_base = MMIO_UART2_BASE_LEGACY; + if (mmio_uart_present()) { + return 1; + } + + // no mmio uart found + return 0; } static void mmio_uart_set_baud_rate( __unused int unit, __unused uint32_t baud_rate ) { - const unsigned char lcr = MMIO_READ( LCR ); - unsigned long div; + const unsigned char lcr = MMIO_READ( LCR ); + unsigned long div; - if (baud_rate == 0) baud_rate = 9600; - div = LEGACY_UART_CLOCK / 16 / baud_rate; + if (baud_rate == 0) { + baud_rate = 9600; + } + div = LEGACY_UART_CLOCK / 16 / baud_rate; - MMIO_WRITE( LCR, lcr | UART_LCR_DLAB ); - MMIO_WRITE( DLM, (unsigned char)(div >> 8) ); - MMIO_WRITE( DLL, (unsigned char) div ); - MMIO_WRITE( LCR, lcr & ~UART_LCR_DLAB); + MMIO_WRITE( LCR, lcr | UART_LCR_DLAB ); + MMIO_WRITE( DLM, (unsigned char)(div >> 8)); + MMIO_WRITE( DLL, (unsigned char) div ); + MMIO_WRITE( LCR, lcr & ~UART_LCR_DLAB); } static int mmio_uart_tr0( void ) { - return (MMIO_READ(LSR) & UART_LSR_THRE); + return MMIO_READ(LSR) & UART_LSR_THRE; } static void mmio_uart_td0( int c ) { - MMIO_WRITE( THR, c ); + MMIO_WRITE( THR, c ); } static void mmio_uart_init( void ) { - /* Disable hardware interrupts */ + /* Disable hardware interrupts */ - MMIO_WRITE( MCR, 0 ); - MMIO_WRITE( IER, 0 ); + MMIO_WRITE( MCR, 0 ); + MMIO_WRITE( IER, 0 ); - /* Disable FIFO's for 16550 devices */ + /* Disable FIFO's for 16550 devices */ - MMIO_WRITE( FCR, 0 ); + MMIO_WRITE( FCR, 0 ); - /* Set for 8-bit, no parity, DLAB bit cleared */ + /* Set for 8-bit, no parity, DLAB bit cleared */ - MMIO_WRITE( LCR, UART_LCR_8BITS ); + MMIO_WRITE( LCR, UART_LCR_8BITS ); - /* Leave baud rate as set by firmware unless serialbaud boot-arg overrides */ + /* Leave baud rate as set by firmware unless serialbaud boot-arg overrides */ - if (uart_baud_rate != DEFAULT_UART_BAUD_RATE) - { - gPESF->uart_set_baud_rate ( 0, uart_baud_rate ); - } + if (uart_baud_rate != DEFAULT_UART_BAUD_RATE) { + gPESF->uart_set_baud_rate( 0, uart_baud_rate ); + } - /* Assert DTR# and RTS# lines (OUT2?) */ + /* Assert DTR# and RTS# lines (OUT2?) */ - MMIO_WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); + MMIO_WRITE( MCR, UART_MCR_DTR | UART_MCR_RTS ); - /* Clear any garbage in the input buffer */ + /* Clear any garbage in the input buffer */ - MMIO_READ( RBR ); + MMIO_READ( RBR ); - uart_initted = 1; + uart_initted = 1; } static int -mmio_uart_rr0( void ) +mmio_uart_rr0( void ) { - unsigned char lsr; + unsigned char lsr; + + lsr = MMIO_READ( LSR ); - lsr = MMIO_READ( LSR ); + if (lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE)) { + MMIO_READ( RBR ); /* discard */ + return 0; + } - if ( lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE) ) - { - MMIO_READ( RBR ); /* discard */ - return 0; - } - - return (lsr & UART_LSR_DR); + return lsr & UART_LSR_DR; } -void lpss_uart_enable( boolean_t on_off ) +void +lpss_uart_enable( boolean_t on_off ) { unsigned int pmcs_reg; @@ -371,7 +380,7 @@ void lpss_uart_enable( boolean_t on_off ) return; } - pmcs_reg = ml_phys_read_byte (PCI_UART2 + 0x84); + pmcs_reg = ml_phys_read_byte(PCI_UART2 + 0x84); if (on_off == FALSE) { pmcs_reg |= 0x03; lpss_uart_enabled = 0; @@ -379,64 +388,65 @@ void lpss_uart_enable( boolean_t on_off ) pmcs_reg &= ~(0x03); } - ml_phys_write_byte (PCI_UART2 + 0x84, pmcs_reg); - pmcs_reg = ml_phys_read_byte (PCI_UART2 + 0x84); - + ml_phys_write_byte(PCI_UART2 + 0x84, pmcs_reg); + pmcs_reg = ml_phys_read_byte(PCI_UART2 + 0x84); + if (on_off == TRUE) { lpss_uart_re_init(); lpss_uart_enabled = 1; } } -static void lpss_uart_re_init( void ) +static void +lpss_uart_re_init( void ) { uint32_t register_read; - - MMIO_WRITE (RST, 0x7); /* LPSS UART2 controller out ot reset */ - register_read = MMIO_READ (RST); - MMIO_WRITE (LCR, UART_LCR_DLAB); /* Set DLAB bit to enable reading/writing of DLL, DLH */ - register_read = MMIO_READ (LCR); + MMIO_WRITE(RST, 0x7); /* LPSS UART2 controller out ot reset */ + register_read = MMIO_READ(RST); - MMIO_WRITE (DLL, 1); /* Divisor Latch Low Register */ - register_read = MMIO_READ (DLL); + MMIO_WRITE(LCR, UART_LCR_DLAB); /* Set DLAB bit to enable reading/writing of DLL, DLH */ + register_read = MMIO_READ(LCR); - MMIO_WRITE (DLM, 0); /* Divisor Latch High Register */ - register_read = MMIO_READ (DLM); + MMIO_WRITE(DLL, 1); /* Divisor Latch Low Register */ + register_read = MMIO_READ(DLL); - MMIO_WRITE (FCR, 1); /* Enable FIFO */ - register_read = MMIO_READ (FCR); + MMIO_WRITE(DLM, 0); /* Divisor Latch High Register */ + register_read = MMIO_READ(DLM); - MMIO_WRITE (LCR, UART_LCR_8BITS); /* Set 8 bits, clear DLAB */ - register_read = MMIO_READ (LCR); + MMIO_WRITE(FCR, 1); /* Enable FIFO */ + register_read = MMIO_READ(FCR); - MMIO_WRITE (MCR, UART_MCR_RTS); /* Request to send */ - register_read = MMIO_READ (MCR); + MMIO_WRITE(LCR, UART_LCR_8BITS); /* Set 8 bits, clear DLAB */ + register_read = MMIO_READ(LCR); - MMIO_WRITE (CLK, UART_CLK_125M_1); /* 1.25M Clock speed */ - register_read = MMIO_READ (CLK); + MMIO_WRITE(MCR, UART_MCR_RTS); /* Request to send */ + register_read = MMIO_READ(MCR); - MMIO_WRITE (CLK, UART_CLK_125M_2); /* 1.25M Clock speed */ - register_read = MMIO_READ (CLK); + MMIO_WRITE(CLK, UART_CLK_125M_1); /* 1.25M Clock speed */ + register_read = MMIO_READ(CLK); + + MMIO_WRITE(CLK, UART_CLK_125M_2); /* 1.25M Clock speed */ + register_read = MMIO_READ(CLK); } static int -mmio_uart_rd0( void ) +mmio_uart_rd0( void ) { - return MMIO_READ( RBR ); + return MMIO_READ( RBR ); } static struct pe_serial_functions mmio_uart_serial_functions = { - .uart_init = mmio_uart_init, - .uart_set_baud_rate = mmio_uart_set_baud_rate, - .tr0 = mmio_uart_tr0, - .td0 = mmio_uart_td0, - .rr0 = mmio_uart_rr0, - .rd0 = mmio_uart_rd0 + .uart_init = mmio_uart_init, + .uart_set_baud_rate = mmio_uart_set_baud_rate, + .tr0 = mmio_uart_tr0, + .td0 = mmio_uart_td0, + .rr0 = mmio_uart_rr0, + .rd0 = mmio_uart_rd0 }; // ============================================================================= -// PCIE_MMIO UART +// PCIE_MMIO UART // ============================================================================= #define PCIE_MMIO_UART_BASE 0xFE410000 @@ -445,119 +455,122 @@ static struct pe_serial_functions mmio_uart_serial_functions = { #define PCIE_MMIO_READ(r) ml_phys_read_byte(pcie_mmio_uart_base + PCIE_MMIO_UART_##r) enum { - PCIE_MMIO_UART_RBR = 0x0, /* receive buffer Register (R) */ - PCIE_MMIO_UART_THR = 0x0, /* transmit holding register (W) */ - PCIE_MMIO_UART_IER = 0x1, /* interrupt enable register */ - PCIE_MMIO_UART_FCR = 0x2, /* fifo control register (W) */ - PCIE_MMIO_UART_LCR = 0x4, /* line control register */ - PCIE_MMIO_UART_MCR = 0x4, /* modem control register */ - PCIE_MMIO_UART_LSR = 0x5, /* line status register */ - PCIE_MMIO_UART_DLL = 0x8, /* DLAB = 1, divisor latch (LSB) */ - PCIE_MMIO_UART_DLM = 0x9, /* DLAB = 1, divisor latch (MSB) */ - PCIE_MMIO_UART_SCR = 0x30, /* scratch register */ + PCIE_MMIO_UART_RBR = 0x0, /* receive buffer Register (R) */ + PCIE_MMIO_UART_THR = 0x0, /* transmit holding register (W) */ + PCIE_MMIO_UART_IER = 0x1, /* interrupt enable register */ + PCIE_MMIO_UART_FCR = 0x2, /* fifo control register (W) */ + PCIE_MMIO_UART_LCR = 0x4, /* line control register */ + PCIE_MMIO_UART_MCR = 0x4, /* modem control register */ + PCIE_MMIO_UART_LSR = 0x5, /* line status register */ + PCIE_MMIO_UART_DLL = 0x8, /* DLAB = 1, divisor latch (LSB) */ + PCIE_MMIO_UART_DLM = 0x9, /* DLAB = 1, divisor latch (MSB) */ + PCIE_MMIO_UART_SCR = 0x30, /* scratch register */ }; static vm_offset_t pcie_mmio_uart_base = 0; - + static int pcie_mmio_uart_present( void ) { + PCIE_MMIO_WRITE( SCR, 0x5a ); + if (PCIE_MMIO_READ(SCR) != 0x5a) { + return 0; + } + PCIE_MMIO_WRITE( SCR, 0xa5 ); + if (PCIE_MMIO_READ(SCR) != 0xa5) { + return 0; + } - PCIE_MMIO_WRITE( SCR, 0x5a ); - if (PCIE_MMIO_READ(SCR) != 0x5a) return 0; - PCIE_MMIO_WRITE( SCR, 0xa5 ); - if (PCIE_MMIO_READ(SCR) != 0xa5) return 0; - - return 1; + return 1; } static int pcie_mmio_uart_probe( void ) { - unsigned new_pcie_mmio_uart_base = 0; - - // if specified, pcie_mmio_uart overrides all probing - if (PE_parse_boot_argn("pcie_mmio_uart", &new_pcie_mmio_uart_base, sizeof (new_pcie_mmio_uart_base))) - { - // pcie_mmio_uart=0 will disable pcie_mmio_uart support - if (new_pcie_mmio_uart_base == 0) { - return 0; - } - pcie_mmio_uart_base = new_pcie_mmio_uart_base; - return 1; - } - - pcie_mmio_uart_base = PCIE_MMIO_UART_BASE; - if (pcie_mmio_uart_present()) { - return 1; - } - - // no pcie_mmio uart found - return 0; + unsigned new_pcie_mmio_uart_base = 0; + + // if specified, pcie_mmio_uart overrides all probing + if (PE_parse_boot_argn("pcie_mmio_uart", &new_pcie_mmio_uart_base, sizeof(new_pcie_mmio_uart_base))) { + // pcie_mmio_uart=0 will disable pcie_mmio_uart support + if (new_pcie_mmio_uart_base == 0) { + return 0; + } + pcie_mmio_uart_base = new_pcie_mmio_uart_base; + return 1; + } + + pcie_mmio_uart_base = PCIE_MMIO_UART_BASE; + if (pcie_mmio_uart_present()) { + return 1; + } + + // no pcie_mmio uart found + return 0; } static void pcie_mmio_uart_set_baud_rate( __unused int unit, __unused uint32_t baud_rate ) { - const unsigned char lcr = PCIE_MMIO_READ( LCR ); - unsigned long div; + const unsigned char lcr = PCIE_MMIO_READ( LCR ); + unsigned long div; - if (baud_rate == 0) baud_rate = 9600; - div = LEGACY_UART_CLOCK / 16 / baud_rate; + if (baud_rate == 0) { + baud_rate = 9600; + } + div = LEGACY_UART_CLOCK / 16 / baud_rate; - PCIE_MMIO_WRITE( LCR, lcr | UART_LCR_DLAB ); - PCIE_MMIO_WRITE( DLM, (unsigned char)(div >> 8) ); - PCIE_MMIO_WRITE( DLL, (unsigned char) div ); - PCIE_MMIO_WRITE( LCR, lcr & ~UART_LCR_DLAB); + PCIE_MMIO_WRITE( LCR, lcr | UART_LCR_DLAB ); + PCIE_MMIO_WRITE( DLM, (unsigned char)(div >> 8)); + PCIE_MMIO_WRITE( DLL, (unsigned char) div ); + PCIE_MMIO_WRITE( LCR, lcr & ~UART_LCR_DLAB); } static int pcie_mmio_uart_tr0( void ) { - return (PCIE_MMIO_READ(LSR) & UART_LSR_THRE); + return PCIE_MMIO_READ(LSR) & UART_LSR_THRE; } static void pcie_mmio_uart_td0( int c ) { - PCIE_MMIO_WRITE( THR, c ); + PCIE_MMIO_WRITE( THR, c ); } static void pcie_mmio_uart_init( void ) { - uart_initted = 1; + uart_initted = 1; } static int -pcie_mmio_uart_rr0( void ) +pcie_mmio_uart_rr0( void ) { - unsigned char lsr; + unsigned char lsr; + + lsr = PCIE_MMIO_READ( LSR ); - lsr = PCIE_MMIO_READ( LSR ); + if (lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE)) { + PCIE_MMIO_READ( RBR ); /* discard */ + return 0; + } - if ( lsr & (UART_LSR_FE | UART_LSR_PE | UART_LSR_OE) ) - { - PCIE_MMIO_READ( RBR ); /* discard */ - return 0; - } - - return (lsr & UART_LSR_DR); + return lsr & UART_LSR_DR; } static int -pcie_mmio_uart_rd0( void ) +pcie_mmio_uart_rd0( void ) { - return PCIE_MMIO_READ( RBR ); + return PCIE_MMIO_READ( RBR ); } static struct pe_serial_functions pcie_mmio_uart_serial_functions = { - .uart_init = pcie_mmio_uart_init, - .uart_set_baud_rate = pcie_mmio_uart_set_baud_rate, - .tr0 = pcie_mmio_uart_tr0, - .td0 = pcie_mmio_uart_td0, - .rr0 = pcie_mmio_uart_rr0, - .rd0 = pcie_mmio_uart_rd0 + .uart_init = pcie_mmio_uart_init, + .uart_set_baud_rate = pcie_mmio_uart_set_baud_rate, + .tr0 = pcie_mmio_uart_tr0, + .td0 = pcie_mmio_uart_td0, + .rr0 = pcie_mmio_uart_rr0, + .rd0 = pcie_mmio_uart_rd0 }; // ============================================================================= @@ -567,73 +580,67 @@ static struct pe_serial_functions pcie_mmio_uart_serial_functions = { int serial_init( void ) { - unsigned new_uart_baud_rate = 0; - - if (PE_parse_boot_argn("serialbaud", &new_uart_baud_rate, sizeof (new_uart_baud_rate))) - { - /* Valid divisor? */ - if (!((LEGACY_UART_CLOCK / 16) % new_uart_baud_rate)) { - uart_baud_rate = new_uart_baud_rate; - } - } - - if ( mmio_uart_probe() ) - { - gPESF = &mmio_uart_serial_functions; - gPESF->uart_init(); - lpss_uart_supported = 1; - lpss_uart_enabled = 1; - return 1; - } - else if ( legacy_uart_probe() ) - { - gPESF = &legacy_uart_serial_functions; - gPESF->uart_init(); - legacy_uart_enabled = 1; - return 1; - } - else if ( pcie_mmio_uart_probe() ) - { - gPESF = &pcie_mmio_uart_serial_functions; - gPESF->uart_init(); - pcie_uart_enabled = 1; - return 1; - } - else - { - return 0; - } + unsigned new_uart_baud_rate = 0; + if (PE_parse_boot_argn("serialbaud", &new_uart_baud_rate, sizeof(new_uart_baud_rate))) { + /* Valid divisor? */ + if (!((LEGACY_UART_CLOCK / 16) % new_uart_baud_rate)) { + uart_baud_rate = new_uart_baud_rate; + } + } + + if (mmio_uart_probe()) { + gPESF = &mmio_uart_serial_functions; + gPESF->uart_init(); + lpss_uart_supported = 1; + lpss_uart_enabled = 1; + return 1; + } else if (legacy_uart_probe()) { + gPESF = &legacy_uart_serial_functions; + gPESF->uart_init(); + legacy_uart_enabled = 1; + return 1; + } else if (pcie_mmio_uart_probe()) { + gPESF = &pcie_mmio_uart_serial_functions; + gPESF->uart_init(); + pcie_uart_enabled = 1; + return 1; + } else { + return 0; + } } static void uart_putc(char c) { if (uart_initted && (legacy_uart_enabled || lpss_uart_enabled || pcie_uart_enabled)) { - while (!gPESF->tr0()); /* Wait until THR is empty. */ - gPESF->td0(c); - } + while (!gPESF->tr0()) { + ; /* Wait until THR is empty. */ + } + gPESF->td0(c); + } } static int uart_getc(void) { - if (uart_initted && (legacy_uart_enabled || lpss_uart_enabled || pcie_uart_enabled)) { - if (!gPESF->rr0()) - return -1; - return gPESF->rd0(); - } - return -1; + if (uart_initted && (legacy_uart_enabled || lpss_uart_enabled || pcie_uart_enabled)) { + if (!gPESF->rr0()) { + return -1; + } + return gPESF->rd0(); + } + return -1; } void serial_putc( char c ) { - uart_putc(c); + uart_putc(c); } int serial_getc( void ) { - return uart_getc(); + return uart_getc(); } diff --git a/pexpert/pexpert/AppleBoot.h b/pexpert/pexpert/AppleBoot.h index e1c6e6073..f34879b2c 100644 --- a/pexpert/pexpert/AppleBoot.h +++ b/pexpert/pexpert/AppleBoot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -33,139 +33,139 @@ * DRI: Josh de Cesare */ -#define kAppleBootWidth (128) -#define kAppleBootHeight (128) -#define kAppleBootOffset (0) -#define kAppleBootFrames (1) -#define kAppleBootFPS (0) +#define kAppleBootWidth (128) +#define kAppleBootHeight (128) +#define kAppleBootOffset (0) +#define kAppleBootFrames (1) +#define kAppleBootFPS (0) const unsigned char gAppleBootPict[] = { - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x17,0x34,0x47,0x50,0x50,0x50,0x13,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x03,0x20,0x40,0x50,0x50,0x50,0x50,0x50,0x50,0x13,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1a,0x42,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0xfa,0x0a,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x07,0x34,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x43,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0e,0x43,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x36,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x14,0x4a,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x20,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x12,0x4b,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x0b,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0b,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0xf9,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x03,0x3e,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x1f,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x2f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x48,0x06,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x4f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x27,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x3f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x45,0xf7,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1d,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x19,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x40,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2f,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3b,0x03,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x34,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3f,0x07,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0xf7,0x47,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0xf9,0x07,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x14,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x36,0xf7,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x24,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x28,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x33,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3f,0x14,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x3a,0x50,0x50,0x50,0x50,0x50,0x50,0x41,0x1f,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x3a,0x50,0x50,0x50,0x48,0x35,0x17,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x33,0x27,0x15,0xf7,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x08,0x0e,0x13,0x14,0x12,0x0d,0x06,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x04,0x0b,0x12,0x17,0x19,0x15,0x11,0x08,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x11,0x26,0x39,0x44,0x4c,0x4f,0x50,0x50,0x4f,0x4d,0x4a,0x3f,0x31,0x19,0xf7,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0b,0x1c,0x2e,0xf9,0x46,0x4d,0x50,0x50,0x50,0x50,0x4f,0x4b,0x42,0x34,0x1f,0x0a,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0xf7,0x21,0x3f,0x4f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x47,0x2d,0x0d,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0d,0x25,0x3d,0xfa,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4c,0x37,0x15,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x04,0x25,0x45,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x39,0x19,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x09,0x21,0xf9,0x4d,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0xf9,0x15,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x17,0x42,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x42,0x27,0x0e,0x02,0x01,0x01,0x01,0x02,0x0b,0x1f,0x38,0x4b,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x33,0x06,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x03,0x2e,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x43,0x3a,0x37,0x3a,0x42,0x4d,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x42,0x0e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0a,0xf9,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x48,0x13,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0d,0x45,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4b,0x12,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x10,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x48,0x0b,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0c,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3d,0x03,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x07,0x42,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x21,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x37,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x42,0x12,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x22,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x34,0xf7,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0b,0x4b,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2c,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x36,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x29,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x11,0x4f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x36,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3a,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0b,0xfa,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x46,0x08,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x26,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4e,0x15,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x40,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x31,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0e,0x4e,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x49,0x08,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x25,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x29,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x39,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4c,0x0a,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0xf7,0x47,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3a,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x10,0x4f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x22,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1d,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x12,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x29,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4b,0x07,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x36,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x42,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0xf9,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x42,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3c,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x46,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3b,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0xf9,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3f,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x46,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x48,0xf7,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x43,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4e,0x0e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x3f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x1c,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x3a,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x33,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x34,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x46,0x04,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x2c,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x19,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3a,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x15,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4e,0x11,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0a,0xfa,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x37,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x42,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4f,0x15,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x37,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x42,0x04,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x25,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2c,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x13,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4e,0x15,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0xf7,0x47,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x48,0x0b,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x37,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x44,0x0a,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x44,0x0d,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0a,0xfa,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x48,0x17,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x3a,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4f,0x2c,0x04,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x41,0x19,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x07,0x4a,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x44,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x33,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x36,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x14,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x1f,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x3f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x0b,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1e,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3c,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x04,0x44,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x20,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x20,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4a,0x07,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x04,0x43,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x30,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1c,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x0e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x3e,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x36,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x14,0x4f,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x0e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x32,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x33,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x07,0x47,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4b,0x0b,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1c,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x26,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x37,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x41,0x04,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x08,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4e,0x13,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x19,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2d,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x2e,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x3f,0x03,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x03,0xf9,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4a,0x0d,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x09,0x46,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4e,0x19,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x11,0x4b,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x25,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x17,0x4d,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x1b,0x4d,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x31,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x17,0x4a,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4d,0x44,0x3b,0x34,0x2e,0x2c,0x2e,0x35,0x3e,0x4a,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x2e,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0e,0x41,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4a,0x37,0x1f,0x0c,0x03,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x06,0x15,0x2d,0x41,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4a,0x1f,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0xf7,0x2e,0xfa,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x4f,0xf9,0x1e,0x07,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x02,0x14,0x31,0x48,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x50,0x37,0x0c,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x0d,0x2f,0x45,0x4f,0x50,0x50,0x50,0x50,0x50,0x4f,0x43,0x2e,0x11,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x07,0x1f,0x3c,0x4c,0x50,0x50,0x50,0x50,0x4f,0x46,0x33,0x13,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x04,0x12,0x1f,0x26,0x28,0x26,0x1e,0x10,0x02,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x09,0x16,0x1e,0x1f,0x1c,0x11,0x04,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01, - 0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x01 + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x17, 0x34, 0x47, 0x50, 0x50, 0x50, 0x13, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x20, 0x40, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x13, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1a, 0x42, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0xfa, 0x0a, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x07, 0x34, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x43, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0e, 0x43, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x36, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x14, 0x4a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x20, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x12, 0x4b, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x0b, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0b, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0xf9, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x3e, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x2f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x06, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x15, 0x4f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x27, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x45, 0xf7, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1d, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x19, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x40, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2f, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x15, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3b, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x34, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3f, 0x07, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xf7, 0x47, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0xf9, 0x07, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x14, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x36, 0xf7, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x24, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x28, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x33, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3f, 0x14, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x3a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x41, 0x1f, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x3a, 0x50, 0x50, 0x50, 0x48, 0x35, 0x17, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x15, 0x33, 0x27, 0x15, 0xf7, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x08, 0x0e, 0x13, 0x14, 0x12, 0x0d, 0x06, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x0b, 0x12, 0x17, 0x19, 0x15, 0x11, 0x08, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x11, 0x26, 0x39, 0x44, 0x4c, 0x4f, 0x50, 0x50, 0x4f, 0x4d, 0x4a, 0x3f, 0x31, 0x19, 0xf7, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0b, 0x1c, 0x2e, 0xf9, 0x46, 0x4d, 0x50, 0x50, 0x50, 0x50, 0x4f, 0x4b, 0x42, 0x34, 0x1f, 0x0a, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xf7, 0x21, 0x3f, 0x4f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x47, 0x2d, 0x0d, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0d, 0x25, 0x3d, 0xfa, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4c, 0x37, 0x15, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x25, 0x45, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x39, 0x19, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x09, 0x21, 0xf9, 0x4d, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0xf9, 0x15, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x17, 0x42, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x42, 0x27, 0x0e, 0x02, 0x01, 0x01, 0x01, 0x02, 0x0b, 0x1f, 0x38, 0x4b, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x33, 0x06, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0x2e, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x43, 0x3a, 0x37, 0x3a, 0x42, 0x4d, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x42, 0x0e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0a, 0xf9, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x13, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0d, 0x45, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4b, 0x12, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x0b, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0c, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3d, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x07, 0x42, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x21, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x37, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x42, 0x12, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x22, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x34, 0xf7, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0b, 0x4b, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x36, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x29, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x11, 0x4f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x36, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3a, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0b, 0xfa, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x46, 0x08, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x26, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4e, 0x15, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x40, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x31, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0e, 0x4e, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x49, 0x08, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x25, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x29, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x39, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4c, 0x0a, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xf7, 0x47, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3a, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x10, 0x4f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x22, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1d, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x12, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x29, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4b, 0x07, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x36, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x42, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xf9, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x42, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x46, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3b, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0xf9, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3f, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x46, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0xf7, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x43, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4e, 0x0e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x3f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x1c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x3a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x33, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x34, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x46, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x2c, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x19, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3a, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x15, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4e, 0x11, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0a, 0xfa, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x37, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x42, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4f, 0x15, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x37, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x42, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x25, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x13, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4e, 0x15, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xf7, 0x47, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x0b, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x37, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x44, 0x0a, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x44, 0x0d, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0a, 0xfa, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x48, 0x17, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x3a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4f, 0x2c, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x41, 0x19, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x07, 0x4a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x44, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x33, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x36, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x14, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x0b, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1e, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x44, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x20, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x20, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4a, 0x07, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x43, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x30, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1c, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x0e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3e, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x36, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x14, 0x4f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x0e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x32, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x33, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x07, 0x47, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4b, 0x0b, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1c, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x26, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x37, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x41, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4e, 0x13, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x19, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2d, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x2e, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x3f, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x03, 0xf9, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4a, 0x0d, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x09, 0x46, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4e, 0x19, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x11, 0x4b, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x25, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x17, 0x4d, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x1b, 0x4d, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x31, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x17, 0x4a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4d, 0x44, 0x3b, 0x34, 0x2e, 0x2c, 0x2e, 0x35, 0x3e, 0x4a, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x2e, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0e, 0x41, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4a, 0x37, 0x1f, 0x0c, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x06, 0x15, 0x2d, 0x41, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4a, 0x1f, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xf7, 0x2e, 0xfa, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4f, 0xf9, 0x1e, 0x07, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x14, 0x31, 0x48, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x37, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x0d, 0x2f, 0x45, 0x4f, 0x50, 0x50, 0x50, 0x50, 0x50, 0x4f, 0x43, 0x2e, 0x11, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x07, 0x1f, 0x3c, 0x4c, 0x50, 0x50, 0x50, 0x50, 0x4f, 0x46, 0x33, 0x13, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x12, 0x1f, 0x26, 0x28, 0x26, 0x1e, 0x10, 0x02, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x09, 0x16, 0x1e, 0x1f, 0x1c, 0x11, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; diff --git a/pexpert/pexpert/Clut.h b/pexpert/pexpert/Clut.h index 9f38cd31b..797483244 100644 --- a/pexpert/pexpert/Clut.h +++ b/pexpert/pexpert/Clut.h @@ -2,7 +2,7 @@ * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -34,68 +34,68 @@ */ const unsigned char gClut[] = { - 0xff,0xff,0xff, 0xbf,0xbf,0xbf, 0xbe,0xbe,0xbe, 0xbd,0xbd,0xbd, - 0xbc,0xbc,0xbc, 0xff,0xff,0x00, 0xba,0xba,0xba, 0xb9,0xb9,0xb9, - 0xb8,0xb8,0xb8, 0xb7,0xb7,0xb7, 0xb6,0xb6,0xb6, 0xb5,0xb5,0xb5, - 0xb4,0xb4,0xb4, 0xb3,0xb3,0xb3, 0xb2,0xb2,0xb2, 0x00,0x00,0x00, - 0xb1,0xb1,0xb1, 0xb0,0xb0,0xb0, 0xaf,0xaf,0xaf, 0xae,0xae,0xae, - 0xad,0xad,0xad, 0xac,0xac,0xac, 0xab,0xab,0xab, 0xaa,0xaa,0xaa, - 0xff,0x00,0xff, 0xa9,0xa9,0xa9, 0xa8,0xa8,0xa8, 0xa7,0xa7,0xa7, - 0xa6,0xa6,0xa6, 0xa5,0xa5,0xa5, 0xa4,0xa4,0xa4, 0xa3,0xa3,0xa3, - 0xa2,0xa2,0xa2, 0xa1,0xa1,0xa1, 0xa0,0xa0,0xa0, 0xff,0x00,0x00, - 0x9f,0x9f,0x9f, 0x9e,0x9e,0x9e, 0x9d,0x9d,0x9d, 0x9c,0x9c,0x9c, - 0x9b,0x9b,0x9b, 0x9a,0x9a,0x9a, 0xcc,0xcc,0xff, 0xcc,0xcc,0xcc, - 0x99,0x99,0x99, 0x98,0x98,0x98, 0x97,0x97,0x97, 0x96,0x96,0x96, - 0x95,0x95,0x95, 0x94,0x94,0x94, 0x93,0x93,0x93, 0x92,0x92,0x92, - 0x91,0x91,0x91, 0x90,0x90,0x90, 0x8f,0x8f,0x8f, 0x8e,0x8e,0x8e, - 0x8d,0x8d,0x8d, 0x8c,0x8c,0x8c, 0x8b,0x8b,0x8b, 0x8a,0x8a,0x8a, - 0x89,0x89,0x89, 0x87,0x87,0x87, 0x86,0x86,0x86, 0x85,0x85,0x85, - 0x84,0x84,0x84, 0x83,0x83,0x83, 0x82,0x82,0x82, 0x81,0x81,0x81, - 0x80,0x80,0x80, 0x7f,0x7f,0x7f, 0x7e,0x7e,0x7e, 0x7d,0x7d,0x7d, - 0x7c,0x7c,0x7c, 0x7b,0x7b,0x7b, 0x7a,0x7a,0x7a, 0x79,0x79,0x79, - 0x78,0x78,0x78, 0x76,0x76,0x76, 0x75,0x75,0x75, 0x74,0x74,0x74, - 0x73,0x73,0x73, 0x72,0x72,0x72, 0x71,0x71,0x71, 0x70,0x70,0x70, - 0x6f,0x6f,0x6f, 0x6e,0x6e,0x6e, 0x6d,0x6d,0x6d, 0x6c,0x6c,0x6c, - 0x6b,0x6b,0x6b, 0x6a,0x6a,0x6a, 0x69,0x69,0x69, 0x68,0x68,0x68, - 0x67,0x67,0x67, 0x66,0x66,0x66, 0x64,0x64,0x64, 0x63,0x63,0x63, - 0x62,0x62,0x62, 0x61,0x61,0x61, 0x60,0x60,0x60, 0x5f,0x5f,0x5f, - 0x5e,0x5e,0x5e, 0x5d,0x5d,0x5d, 0x5c,0x5c,0x5c, 0x5b,0x5b,0x5b, - 0x5a,0x5a,0x5a, 0x59,0x59,0x59, 0x58,0x58,0x58, 0x57,0x57,0x57, - 0x56,0x56,0x56, 0x54,0x54,0x54, 0x53,0x53,0x53, 0x52,0x52,0x52, - 0x51,0x51,0x51, 0x50,0x50,0x50, 0x4f,0x4f,0x4f, 0x4e,0x4e,0x4e, - 0x4d,0x4d,0x4d, 0x4c,0x4c,0x4c, 0x4b,0x4b,0x4b, 0x4a,0x4a,0x4a, - 0x49,0x49,0x49, 0x48,0x48,0x48, 0x47,0x47,0x47, 0x46,0x46,0x46, - 0x45,0x45,0x45, 0x43,0x43,0x43, 0x42,0x42,0x42, 0x41,0x41,0x41, - 0x40,0x40,0x40, 0x3f,0x3f,0x3f, 0x3e,0x3e,0x3e, 0x3d,0x3d,0x3d, - 0x3c,0x3c,0x3c, 0x3b,0x3b,0x3b, 0x3a,0x3a,0x3a, 0x39,0x39,0x39, - 0x38,0x38,0x38, 0x37,0x37,0x37, 0x36,0x36,0x36, 0x35,0x35,0x35, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x00,0xff,0xff, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x00,0xff,0x00, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x00,0x00,0xff, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0xdd,0x00,0x00, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x00,0xbb,0x00, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, - 0x65,0x65,0x65, 0x65,0x65,0x65, 0x65,0x65,0x65, 0xbb,0xbb,0xbb, - 0x65,0x65,0x65, 0x88,0x88,0x88, 0x77,0x77,0x77, 0x55,0x55,0x55, - 0x44,0x44,0x44, 0x22,0x22,0x22, 0x65,0x65,0x65, 0x00,0x00,0x00 + 0xff, 0xff, 0xff, 0xbf, 0xbf, 0xbf, 0xbe, 0xbe, 0xbe, 0xbd, 0xbd, 0xbd, + 0xbc, 0xbc, 0xbc, 0xff, 0xff, 0x00, 0xba, 0xba, 0xba, 0xb9, 0xb9, 0xb9, + 0xb8, 0xb8, 0xb8, 0xb7, 0xb7, 0xb7, 0xb6, 0xb6, 0xb6, 0xb5, 0xb5, 0xb5, + 0xb4, 0xb4, 0xb4, 0xb3, 0xb3, 0xb3, 0xb2, 0xb2, 0xb2, 0x00, 0x00, 0x00, + 0xb1, 0xb1, 0xb1, 0xb0, 0xb0, 0xb0, 0xaf, 0xaf, 0xaf, 0xae, 0xae, 0xae, + 0xad, 0xad, 0xad, 0xac, 0xac, 0xac, 0xab, 0xab, 0xab, 0xaa, 0xaa, 0xaa, + 0xff, 0x00, 0xff, 0xa9, 0xa9, 0xa9, 0xa8, 0xa8, 0xa8, 0xa7, 0xa7, 0xa7, + 0xa6, 0xa6, 0xa6, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xa3, 0xa3, 0xa3, + 0xa2, 0xa2, 0xa2, 0xa1, 0xa1, 0xa1, 0xa0, 0xa0, 0xa0, 0xff, 0x00, 0x00, + 0x9f, 0x9f, 0x9f, 0x9e, 0x9e, 0x9e, 0x9d, 0x9d, 0x9d, 0x9c, 0x9c, 0x9c, + 0x9b, 0x9b, 0x9b, 0x9a, 0x9a, 0x9a, 0xcc, 0xcc, 0xff, 0xcc, 0xcc, 0xcc, + 0x99, 0x99, 0x99, 0x98, 0x98, 0x98, 0x97, 0x97, 0x97, 0x96, 0x96, 0x96, + 0x95, 0x95, 0x95, 0x94, 0x94, 0x94, 0x93, 0x93, 0x93, 0x92, 0x92, 0x92, + 0x91, 0x91, 0x91, 0x90, 0x90, 0x90, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, + 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8c, 0x8b, 0x8b, 0x8b, 0x8a, 0x8a, 0x8a, + 0x89, 0x89, 0x89, 0x87, 0x87, 0x87, 0x86, 0x86, 0x86, 0x85, 0x85, 0x85, + 0x84, 0x84, 0x84, 0x83, 0x83, 0x83, 0x82, 0x82, 0x82, 0x81, 0x81, 0x81, + 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e, 0x7d, 0x7d, 0x7d, + 0x7c, 0x7c, 0x7c, 0x7b, 0x7b, 0x7b, 0x7a, 0x7a, 0x7a, 0x79, 0x79, 0x79, + 0x78, 0x78, 0x78, 0x76, 0x76, 0x76, 0x75, 0x75, 0x75, 0x74, 0x74, 0x74, + 0x73, 0x73, 0x73, 0x72, 0x72, 0x72, 0x71, 0x71, 0x71, 0x70, 0x70, 0x70, + 0x6f, 0x6f, 0x6f, 0x6e, 0x6e, 0x6e, 0x6d, 0x6d, 0x6d, 0x6c, 0x6c, 0x6c, + 0x6b, 0x6b, 0x6b, 0x6a, 0x6a, 0x6a, 0x69, 0x69, 0x69, 0x68, 0x68, 0x68, + 0x67, 0x67, 0x67, 0x66, 0x66, 0x66, 0x64, 0x64, 0x64, 0x63, 0x63, 0x63, + 0x62, 0x62, 0x62, 0x61, 0x61, 0x61, 0x60, 0x60, 0x60, 0x5f, 0x5f, 0x5f, + 0x5e, 0x5e, 0x5e, 0x5d, 0x5d, 0x5d, 0x5c, 0x5c, 0x5c, 0x5b, 0x5b, 0x5b, + 0x5a, 0x5a, 0x5a, 0x59, 0x59, 0x59, 0x58, 0x58, 0x58, 0x57, 0x57, 0x57, + 0x56, 0x56, 0x56, 0x54, 0x54, 0x54, 0x53, 0x53, 0x53, 0x52, 0x52, 0x52, + 0x51, 0x51, 0x51, 0x50, 0x50, 0x50, 0x4f, 0x4f, 0x4f, 0x4e, 0x4e, 0x4e, + 0x4d, 0x4d, 0x4d, 0x4c, 0x4c, 0x4c, 0x4b, 0x4b, 0x4b, 0x4a, 0x4a, 0x4a, + 0x49, 0x49, 0x49, 0x48, 0x48, 0x48, 0x47, 0x47, 0x47, 0x46, 0x46, 0x46, + 0x45, 0x45, 0x45, 0x43, 0x43, 0x43, 0x42, 0x42, 0x42, 0x41, 0x41, 0x41, + 0x40, 0x40, 0x40, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, + 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, + 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x00, 0xff, 0xff, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x00, 0xff, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x00, 0x00, 0xff, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0xdd, 0x00, 0x00, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x00, 0xbb, 0x00, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, + 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0x65, 0xbb, 0xbb, 0xbb, + 0x65, 0x65, 0x65, 0x88, 0x88, 0x88, 0x77, 0x77, 0x77, 0x55, 0x55, 0x55, + 0x44, 0x44, 0x44, 0x22, 0x22, 0x22, 0x65, 0x65, 0x65, 0x00, 0x00, 0x00 }; diff --git a/pexpert/pexpert/GearImage.h b/pexpert/pexpert/GearImage.h index d79fa3ff4..40a3a896a 100644 --- a/pexpert/pexpert/GearImage.h +++ b/pexpert/pexpert/GearImage.h @@ -1,4048 +1,4048 @@ -#define kGearWidth (32) -#define kGearHeight (32) -#define kGearOffset (200) -#define kGearFrames (6) -#define kGearFPS (24) +#define kGearWidth (32) +#define kGearHeight (32) +#define kGearOffset (200) +#define kGearFrames (6) +#define kGearFPS (24) #if !PEXPERT_NO_3X_IMAGES -const unsigned char gGearPict3x[9*kGearFrames*kGearWidth*kGearHeight] = { - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb, - 0xfb,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xf5,0xf0, - 0xf0,0xf5,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd7,0xad,0x94, - 0x94,0xac,0xd6,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe1,0xa1,0x76,0x6f, - 0x6f,0x76,0x9f,0xdc,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xed,0xb4,0x7c,0x6d,0x7d, - 0x7d,0x6e,0x79,0xac,0xe8,0xff,0xff,0xfd,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfc,0xfe,0xff,0xff,0xfe,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd5,0x90,0x75,0x75,0x79, - 0x79,0x75,0x74,0x8e,0xd4,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf7,0xe7,0xe6,0xf6,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xca,0x82,0x76,0x7a,0x72, - 0x72,0x79,0x75,0x83,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xf9,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xeb,0xca,0xae,0xac,0xc4,0xe7, - 0xfd,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xca,0x80,0x73,0x79,0x76, - 0x76,0x79,0x72,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfa,0xf2,0xed,0xed,0xf3,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdf,0xa4,0x86,0x82,0x7e,0x81,0xa6, - 0xdd,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xf9,0xff,0xff,0xca,0x80,0x73,0x79,0x75, - 0x75,0x78,0x72,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xf8, - 0xec,0xe4,0xe3,0xe3,0xe5,0xec,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfc,0xbc,0x78,0x70,0x79,0x76,0x71,0x77, - 0xa8,0xec,0xff,0xfc,0xfe,0xff,0xff,0xff,0xf9,0xff,0xff,0xc9,0x81,0x75,0x7a,0x74, - 0x74,0x7a,0x74,0x80,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xfb,0xed, - 0xe2,0xe0,0xe1,0xe1,0xdf,0xe1,0xf0,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xa7,0x76,0x7a,0x7e,0x7d,0x80,0x72, - 0x81,0xc4,0xf6,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xfd,0xf2,0xe4, - 0xe1,0xe3,0xe2,0xe2,0xe1,0xe0,0xeb,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xe2,0xa4,0x78,0x79,0x7f,0x7b,0x7c,0x7c, - 0x79,0x91,0xd4,0xff,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfd,0xff,0xff,0xf6,0xe7,0xe2, - 0xe2,0xe2,0xe2,0xe3,0xe1,0xe1,0xeb,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xf2,0xad,0x72,0x75,0x80,0x77,0x76,0x81, - 0x79,0x77,0xb4,0xf8,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfd,0xff,0xfe,0xef,0xe2,0xe1, - 0xe3,0xe2,0xe2,0xe3,0xe0,0xdf,0xec,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xc1,0x7c,0x7a,0x82,0x79,0x7a,0x7e, - 0x78,0x77,0x93,0xcb,0xf9,0xff,0xff,0xff,0xfb,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xfa,0xff,0xff,0xff,0xfe,0xf3,0xe8,0xe2,0xe2, - 0xe2,0xe2,0xe2,0xe3,0xe1,0xe1,0xf0,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa0,0x81,0x7c,0x7d,0x7c,0x78, - 0x7c,0x7e,0x76,0x9e,0xef,0xff,0xfc,0xfe,0xfc,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xfa,0xff,0xff,0xff,0xfc,0xea,0xe1,0xe4,0xe3, - 0xe1,0xe2,0xe2,0xe2,0xe2,0xe9,0xf7,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb,0xd0,0x8b,0x73,0x80,0x7e,0x77, - 0x7e,0x7f,0x6f,0x88,0xd0,0xfb,0xfe,0xff,0xfa,0xff,0xff,0xc9,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xfa,0xff,0xff,0xfe,0xf5,0xe5,0xdf,0xe3,0xe3, - 0xe1,0xe3,0xe3,0xe0,0xe5,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfd,0xfb,0xff,0xf1,0xa6,0x7c,0x7f,0x7f,0x7a, - 0x7b,0x7b,0x7a,0x7e,0x9d,0xda,0xff,0xff,0xf6,0xff,0xff,0xc9,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xf6,0xea,0xe4,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe3,0xe1,0xea,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xfe,0xfe,0xff,0xfa,0xd3,0x99,0x79,0x7b,0x7f, - 0x79,0x79,0x81,0x76,0x77,0xba,0xff,0xff,0xf4,0xff,0xff,0xc9,0x81,0x75,0x7a,0x74, - 0x74,0x7a,0x74,0x80,0xc9,0xff,0xff,0xf8,0xff,0xff,0xf0,0xe3,0xe2,0xe4,0xe1,0xe2, - 0xe3,0xe2,0xe1,0xe8,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfa,0xfd,0xff, - 0xfc,0xfa,0xfc,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xfa,0xb9,0x7b,0x7a,0x82, - 0x7a,0x79,0x7f,0x77,0x73,0x99,0xda,0xfd,0xfc,0xff,0xff,0xcb,0x80,0x72,0x78,0x75, - 0x75,0x78,0x71,0x7f,0xca,0xff,0xff,0xfa,0xff,0xf8,0xe9,0xe2,0xe2,0xe3,0xe2,0xe1, - 0xe3,0xe1,0xe0,0xee,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xd9,0x9a,0x7e,0x7c, - 0x7d,0x7c,0x79,0x7c,0x7d,0x7c,0xa8,0xf1,0xff,0xff,0xff,0xcc,0x80,0x75,0x7a,0x73, - 0x73,0x79,0x74,0x80,0xca,0xff,0xff,0xfc,0xfe,0xef,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe7,0xf6,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xf1,0xe4, - 0xf0,0xff,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf9,0xcc,0x89,0x73, - 0x80,0x80,0x77,0x7e,0x7f,0x70,0x8a,0xd4,0xf9,0xff,0xff,0xcf,0x87,0x77,0x79,0x74, - 0x74,0x78,0x77,0x8a,0xd0,0xff,0xff,0xfb,0xfa,0xe9,0xe0,0xe3,0xe2,0xe1,0xe2,0xe3, - 0xe0,0xe5,0xf3,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfb, - 0xf8,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xdd,0xc2,0xb2,0xa9, - 0xaf,0xc6,0xe2,0xf8,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfa,0xff,0xec,0xa2,0x7c, - 0x81,0x7e,0x79,0x7d,0x7d,0x74,0x7f,0xad,0xe4,0xff,0xff,0xde,0xa0,0x74,0x6e,0x7d, - 0x7d,0x6e,0x73,0xa1,0xe2,0xff,0xff,0xfa,0xf2,0xe6,0xe1,0xe3,0xe3,0xe1,0xe3,0xe3, - 0xe0,0xe9,0xfb,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xf8,0xef,0xe8, - 0xe7,0xe9,0xed,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfd,0xff,0xe7,0xa4,0x7b,0x7d,0x80, - 0x79,0x82,0xa7,0xd7,0xf3,0xfc,0xfe,0xff,0xfe,0xff,0xfd,0xfe,0xff,0xfa,0xcf,0x96, - 0x7a,0x7c,0x7e,0x79,0x79,0x7c,0x7b,0x8f,0xd3,0xff,0xff,0xf4,0xcd,0x8c,0x6c,0x76, - 0x76,0x6c,0x89,0xca,0xf7,0xff,0xff,0xf8,0xeb,0xe4,0xe2,0xe3,0xe2,0xe2,0xe2,0xe1, - 0xe6,0xf2,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf5,0xe8,0xdd,0xd9, - 0xda,0xda,0xda,0xe5,0xf8,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf2,0xc2,0x8d,0x7b,0x83,0x84, - 0x7d,0x7f,0x86,0x90,0xaa,0xd6,0xf7,0xff,0xfe,0xfc,0xfe,0xff,0xfb,0xff,0xfa,0xb5, - 0x78,0x7c,0x83,0x77,0x77,0x7e,0x79,0x87,0xce,0xff,0xff,0xfe,0xf5,0xc6,0x93,0x7a, - 0x7a,0x92,0xc3,0xf3,0xff,0xff,0xff,0xf6,0xe7,0xe3,0xe3,0xe2,0xe2,0xe3,0xe1,0xe0, - 0xed,0xfd,0xff,0xfe,0xff,0xfe,0xfe,0xff,0xff,0xfd,0xf5,0xe9,0xe0,0xdd,0xdb,0xda, - 0xdb,0xdc,0xd9,0xde,0xee,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xe3,0xae,0x8c,0x85,0x89,0x89, - 0x89,0x8a,0x83,0x79,0x81,0x9f,0xbf,0xde,0xfa,0xff,0xfe,0xfb,0xf9,0xff,0xff,0xd8, - 0x97,0x7a,0x7b,0x81,0x81,0x7c,0x79,0x92,0xd5,0xff,0xff,0xfb,0xff,0xf3,0xd7,0xc2, - 0xc2,0xd7,0xf1,0xfe,0xff,0xff,0xff,0xf6,0xe8,0xe2,0xe2,0xe3,0xe3,0xe2,0xe1,0xe6, - 0xf5,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xf6,0xee,0xe5,0xdc,0xda,0xdc,0xde,0xde, - 0xdd,0xdc,0xdb,0xdd,0xe6,0xf6,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xe6,0xaf,0x88,0x80,0x86,0x84, - 0x82,0x84,0x85,0x87,0x85,0x7e,0x81,0x9f,0xcd,0xee,0xfa,0xfe,0xff,0xff,0xfe,0xf7, - 0xcd,0x91,0x76,0x7c,0x79,0x73,0x86,0xb7,0xeb,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfa,0xef,0xe4,0xe0,0xe2,0xe2,0xe0,0xe5,0xf3, - 0xfd,0xff,0xff,0xff,0xff,0xfe,0xfa,0xf1,0xe6,0xdc,0xda,0xdc,0xdd,0xdc,0xdb,0xdb, - 0xdb,0xdb,0xda,0xdc,0xe7,0xf7,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf2,0xc2,0x8b,0x7e,0x89,0x85, - 0x80,0x84,0x86,0x88,0x86,0x81,0x80,0x83,0x8b,0xa3,0xd0,0xf9,0xff,0xfc,0xfb,0xff, - 0xf5,0xc7,0x96,0x7d,0x78,0x82,0xad,0xe8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xec,0xe2,0xe0,0xe0,0xe6,0xf2,0xfc, - 0xff,0xff,0xff,0xff,0xfd,0xf3,0xe7,0xe0,0xdc,0xdb,0xdb,0xdd,0xdd,0xdc,0xdb,0xda, - 0xdc,0xdd,0xd9,0xdc,0xec,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe2,0xa6,0x7f,0x7e,0x86, - 0x8a,0x88,0x83,0x82,0x84,0x87,0x8a,0x83,0x77,0x80,0x9a,0xb8,0xdb,0xf6,0xff,0xff, - 0xfe,0xf2,0xd8,0xc2,0xbf,0xca,0xe2,0xfb,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfc,0xf8, - 0xf8,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf8,0xf3,0xf1,0xf0,0xf5,0xfc,0xff, - 0xff,0xff,0xfd,0xf6,0xec,0xe5,0xde,0xda,0xdb,0xdd,0xdd,0xdb,0xda,0xda,0xdc,0xdd, - 0xdb,0xd8,0xd8,0xe4,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd6,0xa7,0x8b,0x85, - 0x84,0x83,0x86,0x88,0x85,0x83,0x84,0x85,0x87,0x86,0x7d,0x7c,0x9a,0xce,0xf5,0xff, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xf3,0xe4,0xdb,0xdc,0xde,0xdd,0xdc,0xdc,0xdb,0xdb,0xdc,0xdc,0xda,0xda, - 0xda,0xdb,0xe3,0xf2,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xed,0xc9,0x9b, - 0x82,0x83,0x88,0x88,0x85,0x83,0x84,0x86,0x88,0x84,0x82,0x80,0x7e,0x94,0xc8,0xf3, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfb,0xf0,0xe3,0xdc,0xdb,0xdc,0xdd,0xdd,0xdb,0xda,0xda,0xdb,0xdc,0xdc,0xd9,0xd8, - 0xe0,0xee,0xf9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf7,0xd8, - 0xb9,0x9c,0x82,0x7c,0x86,0x8a,0x87,0x83,0x81,0x82,0x86,0x89,0x81,0x7a,0x95,0xd8, - 0xff,0xff,0xf8,0xf9,0xf9,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xff,0xff, - 0xf5,0xe3,0xdb,0xdc,0xdd,0xdc,0xdb,0xda,0xdb,0xdc,0xdc,0xda,0xd7,0xd9,0xe0,0xe9, - 0xf3,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfd,0xff,0xff, - 0xf6,0xd2,0xa4,0x8e,0x8a,0x84,0x83,0x87,0x87,0x87,0x83,0x80,0x8b,0x80,0x7b,0xc1, - 0xff,0xff,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xef,0xdb,0xdb,0xdd,0xdb,0xdb,0xdd,0xdc,0xdb,0xda,0xda,0xda,0xdb,0xe3,0xf1,0xfc, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff, - 0xfe,0xfb,0xef,0xd0,0xa5,0x85,0x80,0x89,0x8a,0x88,0x83,0x81,0x8c,0x81,0x79,0xc0, - 0xff,0xff,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xed,0xd9,0xda,0xde,0xda,0xda,0xdc,0xdc,0xda,0xd8,0xda,0xe3,0xef,0xf9,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xf9, - 0xfb,0xff,0xff,0xf4,0xdc,0xbf,0xa0,0x87,0x7d,0x80,0x86,0x87,0x86,0x7c,0x89,0xcd, - 0xff,0xff,0xf9,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xf1,0xdc,0xd8,0xdc,0xdc,0xda,0xd8,0xd7,0xd9,0xe0,0xeb,0xf4,0xfb,0xff,0xff,0xfd, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xfd,0xda,0xad,0x94,0x8b,0x89,0x87,0x84,0x90,0xb6,0xe8, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf9,0xea,0xdf,0xdb,0xdb,0xda,0xda,0xdd,0xe6,0xf4,0xfe,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf2,0xda,0xb9,0xa0,0x98,0xa0,0xc2,0xee,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xee,0xe2,0xdf,0xe1,0xe9,0xf3,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfb,0xfb,0xfb,0xfb,0xfa,0xfa,0xfa, - 0xfa,0xfc,0xfd,0xfb,0xf7,0xf7,0xfc,0xff,0xfa,0xea,0xd8,0xd4,0xdd,0xee,0xfd,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xfa,0xf5,0xf1,0xf3,0xf9,0xfe,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xed,0xdf,0xd7,0xd6,0xd6,0xd6,0xd6,0xd6, - 0xd6,0xd6,0xd6,0xd6,0xd6,0xd6,0xd7,0xd8,0xdc,0xe7,0xf5,0xfd,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfb,0xf7,0xf3,0xf1,0xf0,0xf0,0xf0,0xf0,0xf0,0xef,0xef, - 0xef,0xef,0xef,0xef,0xef,0xf0,0xf3,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe3,0xbc,0xa6,0x9d,0x9a,0x9a,0x9a,0x9a,0x9a, - 0x9a,0x9a,0x9a,0x9a,0x9a,0x99,0x98,0x9a,0xa2,0xb3,0xd5,0xf7,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xee,0xe3,0xde,0xda,0xda,0xda,0xda,0xda,0xda,0xd8,0xd8, - 0xd8,0xd8,0xd8,0xd8,0xd8,0xd9,0xdd,0xe6,0xf4,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfd,0xe2,0xb2,0x91,0x8e,0x90,0x8c,0x8c,0x8d,0x8d,0x8d, - 0x8d,0x8d,0x8d,0x8d,0x8c,0x8c,0x8b,0x8d,0x8f,0x8b,0x9f,0xd0,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfb,0xeb,0xda,0xd4,0xd5,0xd4,0xd3,0xd4,0xd4,0xd4,0xd4,0xd3,0xd3, - 0xd3,0xd3,0xd3,0xd3,0xd3,0xd4,0xd4,0xd5,0xe1,0xf3,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf6,0xbb,0x8e,0x87,0x8d,0x90,0x90,0x90,0x90,0x90,0x90, - 0x90,0x90,0x90,0x90,0x90,0x90,0x8f,0x90,0x8f,0x86,0x86,0xa5,0xdf,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf2,0xdc,0xd2,0xd2,0xd3,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd5,0xd5, - 0xd5,0xd5,0xd5,0xd5,0xd5,0xd5,0xd3,0xd1,0xd4,0xe6,0xfb,0xff,0xfe,0xff,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf0,0xa4,0x86,0x94,0x91,0x8b,0x8e,0x8d,0x8d,0x8d,0x8d, - 0x8d,0x8d,0x8d,0x8d,0x8d,0x8d,0x8e,0x8c,0x8c,0x94,0x8d,0x90,0xce,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xec,0xd4,0xd4,0xd6,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2, - 0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd4,0xd5,0xd0,0xdd,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf1,0xa7,0x88,0x95,0x91,0x8c,0x8f,0x8e,0x8e,0x8e,0x8e, - 0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8f,0x8d,0x8d,0x95,0x8f,0x93,0xcf,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xec,0xd4,0xd2,0xd4,0xd0,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1, - 0xd1,0xd1,0xd1,0xd1,0xd1,0xd0,0xd2,0xd3,0xcf,0xdc,0xf9,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf6,0xbd,0x90,0x89,0x8e,0x91,0x91,0x91,0x91,0x91,0x91, - 0x91,0x91,0x91,0x91,0x91,0x91,0x90,0x91,0x90,0x88,0x89,0xa9,0xe1,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf1,0xd9,0xce,0xce,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2, - 0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd0,0xce,0xd1,0xe4,0xfb,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfc,0xe0,0xb1,0x91,0x8f,0x91,0x8d,0x8d,0x8e,0x8e,0x8e, - 0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8d,0x8f,0x91,0x8e,0xa1,0xd1,0xf6,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfa,0xea,0xd7,0xd0,0xd1,0xd0,0xcf,0xd0,0xd1,0xd1,0xd1,0xd1,0xd1, - 0xd1,0xd1,0xd1,0xd1,0xd1,0xd2,0xd0,0xd2,0xdf,0xf3,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xe3,0xbd,0xa7,0x9e,0x9b,0x9b,0x9b,0x9b,0x9b, - 0x9b,0x9b,0x9d,0x9d,0x9d,0x9d,0x9c,0x9c,0xa3,0xb4,0xd6,0xf7,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfb,0xed,0xdf,0xd8,0xd4,0xd3,0xd4,0xd6,0xd6,0xd6,0xd6,0xd6, - 0xd6,0xd6,0xd6,0xd6,0xd6,0xd7,0xda,0xe3,0xf3,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xed,0xdd,0xd5,0xd4,0xd4,0xd4,0xd4,0xd4, - 0xd4,0xd4,0xd5,0xd5,0xd5,0xd5,0xd6,0xd7,0xda,0xe5,0xf5,0xfe,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xff,0xff,0xfb,0xf4,0xef,0xed,0xec,0xec,0xed,0xee,0xee,0xee,0xee, - 0xee,0xee,0xee,0xee,0xee,0xee,0xf1,0xf7,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfb,0xfb,0xfb,0xfb,0xfa,0xfa,0xfa, - 0xfa,0xfb,0xfd,0xfc,0xf8,0xf7,0xfc,0xff,0xfb,0xed,0xe0,0xdd,0xe5,0xf3,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xf9,0xf1,0xee,0xf0,0xf7,0xfd,0xff,0xfe,0xfc,0xfc,0xfd,0xfe,0xfe,0xfd, - 0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf4,0xe0,0xc4,0xaf,0xa8,0xb0,0xcd,0xf1,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xf8,0xe5,0xd6,0xd3,0xd8,0xe3,0xf0,0xf9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xfe,0xe0,0xb9,0xa5,0x9d,0x9a,0x98,0x98,0xa4,0xc4,0xed, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf4,0xde,0xce,0xc8,0xca,0xcc,0xce,0xd1,0xdb,0xf0,0xfe,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xfa, - 0xfb,0xff,0xff,0xf6,0xe3,0xca,0xae,0x97,0x8f,0x92,0x98,0x9a,0x99,0x92,0x9f,0xd7, - 0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xe9,0xca,0xc4,0xc8,0xc9,0xc9,0xc7,0xc6,0xcb,0xd6,0xe5,0xf1,0xfb,0xff,0xff,0xfd, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfc,0xf1,0xd7,0xb1,0x96,0x92,0x9a,0x9a,0x99,0x97,0x97,0xa1,0x97,0x91,0xcb, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe2,0xc1,0xc6,0xcc,0xc7,0xc7,0xc9,0xcb,0xcc,0xc8,0xcb,0xd9,0xec,0xf8,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xf7,0xd8,0xb2,0x9f,0x9a,0x95,0x94,0x99,0x9b,0x9b,0x98,0x98,0xa2,0x99,0x95,0xce, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe3,0xc2,0xc6,0xcb,0xc6,0xc7,0xc9,0xc9,0xca,0xc9,0xc9,0xcc,0xcf,0xda,0xec,0xfb, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf9,0xe1, - 0xc7,0xac,0x94,0x8e,0x98,0x9d,0x9b,0x98,0x97,0x98,0x9c,0xa0,0x9b,0x96,0xaa,0xdf, - 0xff,0xff,0xf9,0xfb,0xfb,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfc,0xfc,0xfc,0xfc,0xff,0xff, - 0xed,0xce,0xc2,0xc5,0xca,0xca,0xc7,0xc6,0xc7,0xca,0xcc,0xca,0xc6,0xc9,0xd5,0xe3, - 0xef,0xfc,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf0,0xd2,0xab, - 0x95,0x95,0x9a,0x9b,0x99,0x98,0x99,0x9b,0x9e,0x9d,0x9c,0x9a,0x98,0xa9,0xd1,0xf4, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xf9,0xe5,0xcc,0xc2,0xc4,0xc7,0xc8,0xc9,0xc8,0xc7,0xc7,0xc9,0xca,0xca,0xc9,0xc9, - 0xd3,0xe8,0xf8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xdd,0xb6,0x9e,0x97, - 0x97,0x98,0x9a,0x9b,0x9a,0x9a,0x9a,0x9b,0x9e,0x9f,0x99,0x97,0xad,0xd7,0xf7,0xff, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfa,0xe7,0xcf,0xc2,0xc4,0xc8,0xc9,0xc8,0xc7,0xc7,0xc8,0xc9,0xc9,0xc8,0xc8, - 0xc8,0xce,0xda,0xed,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe9,0xb5,0x93,0x92,0x99, - 0x9d,0x9b,0x97,0x97,0x9a,0x9f,0xa1,0x9b,0x93,0x99,0xac,0xc4,0xe0,0xf6,0xff,0xff, - 0xfe,0xf7,0xe5,0xd6,0xd5,0xde,0xed,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc, - 0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf2,0xe8,0xe2,0xe3,0xee,0xfb,0xff, - 0xff,0xff,0xfa,0xee,0xdd,0xce,0xc4,0xc1,0xc6,0xca,0xca,0xc7,0xc6,0xc7,0xca,0xcb, - 0xc9,0xc7,0xc7,0xd8,0xf3,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xcc,0x9d,0x93,0x9d,0x9a, - 0x96,0x9a,0x9b,0x9d,0x9c,0x9a,0x9a,0x9d,0xa2,0xb4,0xd8,0xfa,0xff,0xfd,0xfc,0xff, - 0xf8,0xd8,0xb5,0xa3,0xa1,0xaa,0xc9,0xf2,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf4,0xd7,0xc2,0xbd,0xbf,0xcd,0xe6,0xfb, - 0xff,0xfd,0xfe,0xff,0xfc,0xe7,0xd2,0xc8,0xc5,0xc5,0xc7,0xc8,0xca,0xc9,0xc8,0xc6, - 0xc8,0xcb,0xc6,0xcc,0xe4,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe9,0xba,0x9b,0x97,0x9b,0x99, - 0x98,0x9b,0x9d,0x9e,0x9d,0x99,0x9b,0xb2,0xd7,0xf1,0xfb,0xfe,0xff,0xff,0xfe,0xf9, - 0xda,0xaf,0x9d,0xa3,0xa2,0x9f,0xae,0xd1,0xf3,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xf5,0xdb,0xc2,0xb9,0xbc,0xbe,0xbc,0xca,0xe7, - 0xfb,0xfe,0xff,0xff,0xff,0xfc,0xf6,0xe7,0xd2,0xc4,0xc4,0xc8,0xca,0xc9,0xc7,0xc6, - 0xc7,0xc8,0xc6,0xc9,0xdb,0xf3,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xba,0x9e,0x9a,0x9e,0x9e, - 0x9f,0xa1,0x9b,0x94,0x9b,0xb1,0xc9,0xe2,0xfb,0xff,0xfe,0xfc,0xfb,0xff,0xff,0xe4, - 0xb5,0x9f,0xa1,0xaa,0xab,0xa6,0xa5,0xb9,0xe5,0xff,0xff,0xfd,0xff,0xf9,0xeb,0xe0, - 0xe1,0xed,0xfa,0xff,0xfd,0xff,0xff,0xea,0xc8,0xba,0xbc,0xc0,0xc2,0xbe,0xbd,0xcd, - 0xed,0xff,0xff,0xfc,0xfd,0xfe,0xff,0xfd,0xee,0xdf,0xd0,0xc5,0xc3,0xc7,0xca,0xca, - 0xca,0xc9,0xc8,0xcb,0xda,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xce,0xa1,0x93,0x9b,0x9b, - 0x97,0x9a,0x9f,0xa5,0xba,0xdd,0xf8,0xff,0xff,0xfd,0xfe,0xff,0xfc,0xff,0xfc,0xca, - 0x9f,0xa2,0xa9,0xa2,0xa3,0xa8,0xa6,0xb2,0xe0,0xff,0xff,0xfe,0xfa,0xde,0xc2,0xb6, - 0xb7,0xc6,0xe2,0xfa,0xfe,0xff,0xff,0xe6,0xc1,0xba,0xbd,0xba,0xbc,0xc2,0xbe,0xbc, - 0xdb,0xfd,0xff,0xfd,0xff,0xff,0xfd,0xfe,0xff,0xfb,0xec,0xd7,0xcc,0xc8,0xc6,0xc5, - 0xc8,0xc8,0xc4,0xcc,0xe4,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xed,0xba,0x98,0x96,0x98, - 0x94,0x9c,0xb7,0xdd,0xf5,0xfd,0xfe,0xff,0xfe,0xfe,0xfe,0xfe,0xff,0xfb,0xdc,0xb4, - 0xa1,0xa4,0xa6,0xa3,0xa4,0xa7,0xa9,0xb7,0xe3,0xff,0xff,0xf8,0xe1,0xbb,0xab,0xb2, - 0xb2,0xae,0xc1,0xe4,0xf9,0xff,0xff,0xe8,0xc5,0xbb,0xbc,0xbc,0xbc,0xc0,0xbf,0xbe, - 0xcb,0xe7,0xfc,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xf9,0xec,0xd6,0xc5,0xc2, - 0xc6,0xc6,0xc6,0xd9,0xf6,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe6,0xcd,0xbe,0xb8, - 0xbe,0xcf,0xe6,0xf8,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfc,0xff,0xf2,0xbc,0xa2, - 0xa6,0xa4,0xa2,0xa7,0xa8,0xa4,0xab,0xc9,0xed,0xff,0xff,0xed,0xc8,0xad,0xab,0xb5, - 0xb5,0xad,0xb4,0xcf,0xf0,0xff,0xff,0xf0,0xd4,0xbd,0xb8,0xbc,0xbd,0xbb,0xbe,0xc1, - 0xbe,0xd1,0xf6,0xff,0xfc,0xfe,0xff,0xfe,0xfd,0xfe,0xff,0xff,0xfb,0xf0,0xe3,0xd9, - 0xd6,0xda,0xe3,0xf1,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf3,0xe9, - 0xf2,0xff,0xff,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xdb,0xab,0x9c, - 0xa6,0xa7,0xa2,0xa8,0xaa,0xa2,0xb4,0xe3,0xfb,0xff,0xff,0xe2,0xb8,0xae,0xb1,0xb0, - 0xb0,0xb3,0xb6,0xc1,0xe6,0xff,0xff,0xfb,0xe8,0xc3,0xb5,0xbd,0xbe,0xbb,0xbe,0xc0, - 0xba,0xc5,0xe6,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfd,0xff,0xff,0xf7, - 0xf1,0xf7,0xfe,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe3,0xb6,0xa4,0xa3, - 0xa4,0xa5,0xa4,0xa7,0xa9,0xaa,0xc7,0xf6,0xff,0xff,0xff,0xe1,0xb4,0xad,0xb2,0xb0, - 0xb0,0xb5,0xb4,0xba,0xe2,0xff,0xff,0xff,0xf7,0xd0,0xba,0xbc,0xbd,0xbb,0xbe,0xbf, - 0xbe,0xbf,0xce,0xed,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfe,0xff, - 0xfd,0xfb,0xfc,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xfd,0xce,0xa1,0xa2,0xa9, - 0xa4,0xa4,0xa9,0xa4,0xa2,0xbc,0xe6,0xfe,0xfd,0xff,0xff,0xe0,0xb3,0xab,0xb1,0xb0, - 0xb0,0xb4,0xb2,0xb9,0xe2,0xff,0xff,0xfd,0xfe,0xea,0xca,0xb8,0xb9,0xbe,0xbc,0xbd, - 0xc2,0xbe,0xbf,0xde,0xfd,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfe, - 0xff,0xfe,0xfd,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe1,0xb8,0xa1,0xa2,0xa7, - 0xa4,0xa4,0xab,0xa5,0xa6,0xd2,0xff,0xff,0xf9,0xff,0xff,0xdf,0xb4,0xac,0xb2,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xf9,0xff,0xff,0xdb,0xb8,0xb8,0xbf,0xbb,0xbc, - 0xbf,0xbe,0xbe,0xcd,0xe9,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfc,0xff,0xf4,0xbf,0xa2,0xa6,0xa6,0xa4, - 0xa6,0xa6,0xa7,0xab,0xbf,0xe7,0xff,0xff,0xf9,0xff,0xff,0xdf,0xb4,0xac,0xb2,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfa,0xff,0xff,0xea,0xca,0xbd,0xbc,0xbc,0xbd, - 0xbc,0xbe,0xc0,0xbf,0xd3,0xf7,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe1,0xad,0x9c,0xa7,0xa7,0xa3, - 0xa9,0xa9,0xa0,0xaf,0xdc,0xfb,0xff,0xff,0xfc,0xff,0xff,0xdf,0xb4,0xad,0xb2,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xfa,0xe3,0xc0,0xb5,0xbd,0xbe, - 0xbb,0xbe,0xbf,0xba,0xc6,0xe9,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe9,0xc0,0xa8,0xa3,0xa6,0xa6,0xa3, - 0xa7,0xa9,0xa5,0xbe,0xf1,0xff,0xfd,0xff,0xfd,0xff,0xff,0xdf,0xb4,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfd,0xff,0xfe,0xff,0xf4,0xcc,0xb8,0xbd,0xbe, - 0xbc,0xbd,0xbd,0xbd,0xc1,0xd2,0xef,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd4,0xa4,0xa3,0xa9,0xa4,0xa5,0xa7, - 0xa4,0xa5,0xb8,0xdd,0xfb,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfd,0xff,0xff,0xff,0xfc,0xe4,0xc7,0xb9,0xbb, - 0xbe,0xbe,0xbe,0xc2,0xbd,0xbe,0xe1,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf7,0xc6,0x9d,0xa0,0xa8,0xa2,0xa3,0xaa, - 0xa5,0xa5,0xcd,0xfa,0xff,0xfc,0xff,0xff,0xfb,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb2,0xba,0xe2,0xff,0xff,0xfc,0xff,0xff,0xfc,0xff,0xfa,0xd7,0xb8,0xba, - 0xbe,0xba,0xbc,0xc2,0xbb,0xba,0xd8,0xfa,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xeb,0xbf,0xa1,0xa4,0xa8,0xa4,0xa6,0xa8, - 0xa7,0xb7,0xe3,0xff,0xff,0xfc,0xff,0xff,0xfb,0xff,0xff,0xe0,0xb5,0xaf,0xb3,0xb0, - 0xb0,0xb3,0xb1,0xb9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xfc,0xff,0xff,0xe8,0xc5,0xba, - 0xbc,0xbd,0xbc,0xc0,0xbe,0xbd,0xd3,0xf2,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xed,0xc1,0xa1,0xa4,0xa8,0xa7,0xa9,0xa1, - 0xad,0xd8,0xf9,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xe0,0xb5,0xaf,0xb3,0xb0, - 0xb0,0xb3,0xb1,0xb9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xfa,0xe0,0xbe, - 0xb7,0xbf,0xbd,0xbe,0xbd,0xbb,0xd4,0xf5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfc,0xcf,0xa2,0x9d,0xa5,0xa3,0xa0,0xa3, - 0xc4,0xf2,0xff,0xfc,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xaf,0xb3,0xb0, - 0xb0,0xb3,0xb1,0xb9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf5,0xd1, - 0xb8,0xb7,0xba,0xbc,0xb7,0xbb,0xdd,0xfe,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe8,0xc1,0xad,0xab,0xa8,0xa9,0xc2, - 0xe7,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xaf,0xb3,0xb0, - 0xb0,0xb3,0xb1,0xb9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xec, - 0xcf,0xbd,0xbe,0xc1,0xc2,0xd0,0xee,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf0,0xd9,0xc7,0xc7,0xd6,0xee, - 0xfd,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xaf,0xb3,0xb0, - 0xb0,0xb3,0xb1,0xb9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xfe, - 0xf1,0xdf,0xd5,0xd6,0xe3,0xf3,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf8,0xed,0xed,0xf8,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb6,0xb0,0xb3,0xaf, - 0xaf,0xb3,0xb2,0xbb,0xe2,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfa,0xf1,0xf2,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe6,0xbf,0xaf,0xb0,0xb2, - 0xb2,0xb1,0xb2,0xc1,0xe7,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf3,0xd3,0xb4,0xac,0xb5, - 0xb5,0xad,0xb4,0xd1,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xec,0xc7,0xaf,0xac, - 0xad,0xb1,0xc8,0xeb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe7,0xce,0xc0, - 0xc1,0xd0,0xe7,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf9,0xf5, - 0xf5,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb, - 0xfb,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xf5,0xf0, - 0xf0,0xf5,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd7,0xad,0x94, - 0x94,0xad,0xd7,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe1,0xa1,0x76,0x6f, - 0x6f,0x76,0x9f,0xdc,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xed,0xb4,0x7c,0x6d,0x7d, - 0x7d,0x6d,0x78,0xac,0xe8,0xff,0xff,0xfd,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfc,0xfe,0xff,0xff,0xfe,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd5,0x90,0x75,0x75,0x79, - 0x79,0x75,0x74,0x8e,0xd4,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf7,0xe7,0xe6,0xf6,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xca,0x82,0x76,0x7a,0x72, - 0x72,0x79,0x75,0x83,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xf9,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xeb,0xca,0xae,0xac,0xc4,0xe7, - 0xfd,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xca,0x80,0x73,0x79,0x76, - 0x76,0x79,0x72,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfa,0xf2,0xed,0xed,0xf3,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdf,0xa4,0x86,0x82,0x7e,0x81,0xa6, - 0xdd,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xf9,0xff,0xff,0xca,0x80,0x73,0x79,0x75, - 0x75,0x78,0x72,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xf8, - 0xec,0xe4,0xe3,0xe3,0xe5,0xec,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfc,0xbc,0x78,0x70,0x79,0x75,0x70,0x77, - 0xa8,0xec,0xff,0xfc,0xfe,0xff,0xff,0xff,0xf9,0xff,0xff,0xc9,0x81,0x75,0x7a,0x74, - 0x74,0x7a,0x74,0x80,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xfb,0xed, - 0xe2,0xe0,0xe1,0xe1,0xdf,0xe1,0xf0,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xa7,0x76,0x7a,0x7e,0x7d,0x80,0x72, - 0x81,0xc4,0xf6,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xfd,0xf2,0xe4, - 0xe1,0xe3,0xe2,0xe2,0xe1,0xe0,0xeb,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xe2,0xa4,0x78,0x79,0x7f,0x7b,0x7c,0x7c, - 0x79,0x91,0xd4,0xff,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfd,0xff,0xff,0xf5,0xe7,0xe3, - 0xe3,0xe2,0xe2,0xe3,0xe1,0xe1,0xeb,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xf2,0xad,0x72,0x74,0x80,0x77,0x76,0x81, - 0x79,0x77,0xb4,0xf8,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xfd,0xff,0xfe,0xef,0xe2,0xe2, - 0xe4,0xe1,0xe1,0xe3,0xe0,0xdf,0xec,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xc1,0x7c,0x7a,0x82,0x79,0x7a,0x7e, - 0x78,0x77,0x93,0xcb,0xf9,0xff,0xff,0xff,0xfb,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xfa,0xff,0xff,0xff,0xfe,0xf3,0xe8,0xe2,0xe2, - 0xe2,0xe2,0xe2,0xe3,0xe1,0xe1,0xf0,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa0,0x81,0x7c,0x7d,0x7c,0x78, - 0x7c,0x7e,0x76,0x9e,0xef,0xff,0xfc,0xfe,0xfc,0xff,0xff,0xca,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xfa,0xff,0xff,0xff,0xfc,0xea,0xe1,0xe4,0xe3, - 0xe1,0xe2,0xe2,0xe2,0xe2,0xe9,0xf7,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfc,0xd0,0x89,0x72,0x80,0x7e,0x77, - 0x7f,0x7f,0x6f,0x88,0xd0,0xfb,0xfe,0xff,0xfa,0xff,0xff,0xc9,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xfa,0xff,0xff,0xfe,0xf5,0xe5,0xdf,0xe3,0xe3, - 0xe1,0xe3,0xe3,0xe0,0xe5,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfd,0xfb,0xff,0xf0,0xa4,0x7b,0x7f,0x7d,0x79, - 0x7c,0x7b,0x7a,0x7e,0x9d,0xda,0xff,0xff,0xf6,0xff,0xff,0xc9,0x81,0x74,0x79,0x74, - 0x74,0x79,0x73,0x7f,0xc9,0xff,0xff,0xf9,0xff,0xff,0xf6,0xea,0xe4,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe3,0xe1,0xea,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xfe,0xfe,0xff,0xfa,0xd3,0x99,0x79,0x7a,0x7e, - 0x79,0x79,0x81,0x76,0x77,0xba,0xff,0xff,0xf4,0xff,0xff,0xc9,0x81,0x75,0x7a,0x74, - 0x74,0x7a,0x74,0x80,0xc9,0xff,0xff,0xf8,0xff,0xff,0xf0,0xe2,0xe1,0xe3,0xe2,0xe2, - 0xe3,0xe2,0xe1,0xe8,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfa,0xfd,0xff, - 0xfc,0xfa,0xfc,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xfa,0xb9,0x7b,0x7b,0x83, - 0x79,0x79,0x7f,0x77,0x73,0x99,0xda,0xfd,0xfc,0xff,0xff,0xcb,0x81,0x72,0x78,0x75, - 0x75,0x78,0x71,0x7f,0xca,0xff,0xff,0xfa,0xff,0xf8,0xe9,0xe1,0xe1,0xe3,0xe1,0xe1, - 0xe4,0xe2,0xe0,0xee,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xd9,0x9a,0x7f,0x7d, - 0x7d,0x7c,0x79,0x7c,0x7d,0x7c,0xa8,0xf1,0xff,0xff,0xff,0xcc,0x80,0x75,0x7a,0x73, - 0x73,0x79,0x74,0x80,0xca,0xff,0xff,0xfc,0xfe,0xef,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2, - 0xe2,0xe1,0xe7,0xf6,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xf1,0xe4, - 0xf0,0xff,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf9,0xcc,0x89,0x74, - 0x80,0x7e,0x76,0x7e,0x7f,0x70,0x8a,0xd4,0xf9,0xff,0xff,0xce,0x86,0x77,0x79,0x74, - 0x74,0x78,0x77,0x8a,0xd0,0xff,0xff,0xfc,0xfa,0xe8,0xe0,0xe4,0xe3,0xe1,0xe3,0xe3, - 0xdf,0xe3,0xf3,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfb, - 0xf8,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xdd,0xc2,0xb2,0xa9, - 0xaf,0xc6,0xe2,0xf8,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfb,0xff,0xec,0xa2,0x7c, - 0x81,0x7c,0x77,0x7d,0x7d,0x74,0x7f,0xad,0xe4,0xff,0xff,0xde,0xa0,0x74,0x6e,0x7d, - 0x7d,0x6e,0x73,0xa0,0xe1,0xff,0xff,0xf9,0xf1,0xe5,0xe0,0xe3,0xe2,0xe1,0xe3,0xe3, - 0xe0,0xe8,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xf8,0xef,0xe8, - 0xe7,0xe9,0xed,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfd,0xff,0xe7,0xa4,0x7b,0x7d,0x80, - 0x79,0x82,0xa7,0xd7,0xf3,0xfc,0xfe,0xff,0xfe,0xff,0xfd,0xfe,0xff,0xfa,0xcf,0x96, - 0x7a,0x7c,0x7e,0x79,0x79,0x7c,0x7b,0x8f,0xd3,0xff,0xff,0xf4,0xcd,0x8c,0x6c,0x76, - 0x76,0x6c,0x89,0xc9,0xf7,0xff,0xff,0xf8,0xeb,0xe4,0xe2,0xe2,0xe2,0xe2,0xe2,0xe1, - 0xe6,0xf2,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf5,0xe8,0xdd,0xd9, - 0xda,0xda,0xda,0xe5,0xf8,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf2,0xc2,0x8d,0x7b,0x83,0x84, - 0x7d,0x7f,0x87,0x90,0xaa,0xd6,0xf7,0xff,0xfe,0xfc,0xfe,0xff,0xfb,0xff,0xfa,0xb5, - 0x78,0x7c,0x84,0x78,0x77,0x7e,0x79,0x87,0xce,0xff,0xff,0xfe,0xf5,0xc6,0x93,0x7a, - 0x7a,0x92,0xc3,0xf2,0xff,0xff,0xff,0xf7,0xe8,0xe3,0xe3,0xe2,0xe1,0xe4,0xe2,0xe0, - 0xed,0xfd,0xff,0xfe,0xff,0xfe,0xfe,0xff,0xff,0xfd,0xf5,0xe9,0xe0,0xdd,0xdb,0xda, - 0xdb,0xdc,0xd9,0xde,0xee,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xe3,0xae,0x8c,0x86,0x89,0x89, - 0x8a,0x89,0x81,0x78,0x82,0x9f,0xbf,0xde,0xfb,0xff,0xfe,0xfb,0xf9,0xff,0xff,0xd8, - 0x96,0x79,0x7c,0x83,0x81,0x7c,0x7a,0x92,0xd5,0xff,0xff,0xfb,0xff,0xf3,0xd7,0xc2, - 0xc2,0xd7,0xf1,0xfe,0xff,0xff,0xff,0xf7,0xe9,0xe2,0xe2,0xe3,0xe3,0xe1,0xdf,0xe5, - 0xf5,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xf6,0xed,0xe5,0xdc,0xd9,0xdb,0xde,0xde, - 0xdd,0xdc,0xdb,0xdd,0xe6,0xf6,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xe6,0xaf,0x88,0x81,0x86,0x84, - 0x82,0x83,0x85,0x87,0x85,0x7e,0x81,0x9f,0xcd,0xee,0xfa,0xfe,0xff,0xff,0xfe,0xf7, - 0xcd,0x91,0x77,0x7d,0x7a,0x73,0x86,0xb7,0xeb,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfa,0xef,0xe4,0xe0,0xe2,0xe2,0xdf,0xe4,0xf2, - 0xfd,0xff,0xff,0xff,0xff,0xfe,0xfa,0xf1,0xe6,0xdc,0xda,0xdc,0xdd,0xdc,0xdb,0xda, - 0xda,0xdb,0xda,0xdc,0xe7,0xf7,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf2,0xc2,0x8b,0x7e,0x89,0x85, - 0x80,0x84,0x86,0x88,0x86,0x81,0x7f,0x82,0x8b,0xa4,0xd0,0xf9,0xff,0xfc,0xfb,0xff, - 0xf5,0xc7,0x96,0x7d,0x78,0x82,0xad,0xe8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xec,0xe3,0xe1,0xe0,0xe5,0xf1,0xfc, - 0xff,0xff,0xfe,0xff,0xfd,0xf3,0xe7,0xe0,0xdc,0xdb,0xdb,0xdd,0xdd,0xdd,0xdc,0xd9, - 0xdb,0xdd,0xd9,0xdc,0xec,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe2,0xa6,0x7f,0x7e,0x86, - 0x8a,0x88,0x83,0x81,0x83,0x87,0x8a,0x82,0x77,0x80,0x9a,0xb8,0xdb,0xf6,0xff,0xff, - 0xfe,0xf2,0xd8,0xc2,0xbf,0xca,0xe2,0xfb,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfc,0xf8, - 0xf8,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf8,0xf3,0xf0,0xf0,0xf6,0xfd,0xff, - 0xff,0xff,0xfd,0xf5,0xec,0xe5,0xde,0xda,0xdb,0xdd,0xdd,0xdb,0xda,0xda,0xdc,0xdd, - 0xdb,0xd8,0xd8,0xe4,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd6,0xa7,0x8b,0x85, - 0x84,0x83,0x86,0x88,0x85,0x83,0x84,0x85,0x87,0x86,0x7d,0x7b,0x99,0xcf,0xf5,0xff, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xf3,0xe4,0xdb,0xdc,0xde,0xdd,0xdc,0xdc,0xdb,0xdb,0xdc,0xdc,0xda,0xda, - 0xda,0xdb,0xe3,0xf2,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xed,0xc9,0x9b, - 0x82,0x83,0x88,0x88,0x85,0x83,0x84,0x86,0x87,0x84,0x81,0x7f,0x7d,0x94,0xc8,0xf3, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfb,0xf0,0xe3,0xdc,0xdb,0xdc,0xdd,0xdd,0xdb,0xda,0xda,0xdb,0xdc,0xdc,0xd9,0xd8, - 0xe0,0xee,0xf9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf7,0xd8, - 0xb9,0x9c,0x82,0x7b,0x85,0x8a,0x88,0x83,0x81,0x82,0x86,0x89,0x81,0x7a,0x96,0xd8, - 0xff,0xff,0xf8,0xf9,0xf9,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xfe,0xfd,0xff,0xff, - 0xf5,0xe3,0xdb,0xdc,0xdd,0xdb,0xdb,0xdb,0xdb,0xdc,0xdc,0xda,0xd7,0xd9,0xe0,0xe9, - 0xf3,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfd,0xff,0xff, - 0xf5,0xd2,0xa5,0x8e,0x89,0x84,0x83,0x87,0x87,0x87,0x83,0x82,0x8c,0x80,0x7b,0xc2, - 0xff,0xff,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xef,0xdb,0xdb,0xde,0xdb,0xdb,0xdb,0xdb,0xdb,0xda,0xda,0xda,0xdb,0xe3,0xf1,0xfc, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff, - 0xfe,0xfb,0xef,0xd0,0xa5,0x85,0x80,0x89,0x8a,0x88,0x83,0x82,0x8d,0x80,0x78,0xbf, - 0xff,0xff,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xed,0xd9,0xda,0xdd,0xda,0xdb,0xdb,0xda,0xd9,0xd8,0xda,0xe3,0xef,0xf9,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xf9, - 0xfb,0xff,0xff,0xf4,0xdc,0xbf,0xa0,0x87,0x7d,0x7f,0x85,0x87,0x86,0x7c,0x88,0xcd, - 0xff,0xff,0xf9,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xf0,0xdc,0xd8,0xda,0xda,0xda,0xd7,0xd5,0xd8,0xe1,0xeb,0xf4,0xfc,0xff,0xff,0xfd, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xfd,0xda,0xad,0x94,0x8b,0x88,0x87,0x84,0x91,0xb7,0xe8, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf8,0xea,0xdf,0xda,0xd9,0xda,0xdb,0xde,0xe6,0xf3,0xfe,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf2,0xda,0xb9,0xa0,0x98,0xa0,0xc2,0xee,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xee,0xe2,0xdf,0xe1,0xea,0xf4,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfb,0xfb,0xfb,0xfb,0xfa,0xfa,0xfa, - 0xfa,0xfc,0xfd,0xfb,0xf7,0xf7,0xfc,0xff,0xfa,0xea,0xd8,0xd4,0xdd,0xee,0xfd,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xfa,0xf5,0xf1,0xf3,0xf9,0xfe,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xfe,0xfd, - 0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xed,0xdf,0xd7,0xd6,0xd6,0xd6,0xd6,0xd6, - 0xd6,0xd6,0xd6,0xd6,0xd6,0xd6,0xd7,0xd8,0xdc,0xe7,0xf5,0xfd,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfb,0xf7,0xf3,0xf1,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0,0xf0, - 0xef,0xef,0xef,0xef,0xef,0xf0,0xf3,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe3,0xbc,0xa6,0x9d,0x9a,0x9a,0x9a,0x9a,0x9a, - 0x9a,0x9a,0x9a,0x9a,0x9a,0x99,0x98,0x9a,0xa2,0xb3,0xd5,0xf7,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xee,0xe3,0xde,0xda,0xda,0xda,0xda,0xda,0xda,0xda,0xd9, - 0xd8,0xd8,0xd8,0xd8,0xd8,0xd9,0xdd,0xe6,0xf4,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfd,0xe2,0xb2,0x91,0x8e,0x90,0x8c,0x8c,0x8d,0x8d,0x8d, - 0x8c,0x8c,0x8d,0x8d,0x8c,0x8c,0x8b,0x8d,0x8f,0x8b,0x9f,0xd0,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfb,0xeb,0xda,0xd4,0xd5,0xd4,0xd3,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4, - 0xd3,0xd3,0xd3,0xd3,0xd3,0xd4,0xd4,0xd5,0xe1,0xf3,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf6,0xbb,0x8e,0x87,0x8d,0x90,0x90,0x90,0x90,0x90,0x90, - 0x90,0x90,0x90,0x90,0x90,0x90,0x8f,0x90,0x8f,0x86,0x86,0xa5,0xdf,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf2,0xdc,0xd2,0xd2,0xd3,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4, - 0xd5,0xd5,0xd5,0xd5,0xd5,0xd5,0xd3,0xd1,0xd4,0xe6,0xfb,0xff,0xfe,0xff,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf0,0xa4,0x86,0x94,0x91,0x8b,0x8e,0x8d,0x8d,0x8d,0x8d, - 0x8d,0x8d,0x8d,0x8d,0x8d,0x8d,0x8e,0x8c,0x8c,0x94,0x8d,0x90,0xce,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xec,0xd4,0xd4,0xd6,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2, - 0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd4,0xd5,0xd0,0xdd,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf1,0xa7,0x88,0x95,0x91,0x8c,0x8f,0x8e,0x8e,0x8e,0x8e, - 0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8f,0x8d,0x8d,0x95,0x8f,0x93,0xcf,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xec,0xd4,0xd2,0xd4,0xd0,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1, - 0xd1,0xd1,0xd1,0xd1,0xd1,0xd0,0xd2,0xd3,0xcf,0xdc,0xf9,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf6,0xbd,0x90,0x89,0x8e,0x91,0x91,0x91,0x91,0x91,0x91, - 0x91,0x91,0x91,0x91,0x91,0x91,0x90,0x91,0x90,0x88,0x89,0xa9,0xe1,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf1,0xd9,0xce,0xce,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd2, - 0xd2,0xd2,0xd2,0xd2,0xd2,0xd2,0xd0,0xce,0xd1,0xe4,0xfb,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfc,0xe0,0xb1,0x91,0x8f,0x91,0x8d,0x8d,0x8e,0x8e,0x8e, - 0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8d,0x8f,0x91,0x8e,0xa1,0xd1,0xf6,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfa,0xea,0xd7,0xd0,0xd1,0xd0,0xcf,0xd0,0xd1,0xd1,0xd1,0xd1,0xd1, - 0xd1,0xd1,0xd1,0xd1,0xd1,0xd2,0xd0,0xd2,0xdf,0xf3,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xe3,0xbd,0xa7,0x9e,0x9b,0x9b,0x9b,0x9b,0x9b, - 0x9c,0x9d,0x9d,0x9d,0x9d,0x9d,0x9c,0x9c,0xa3,0xb4,0xd6,0xf7,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfb,0xed,0xdf,0xd8,0xd4,0xd3,0xd4,0xd6,0xd6,0xd6,0xd6,0xd6, - 0xd6,0xd6,0xd6,0xd6,0xd6,0xd7,0xda,0xe3,0xf3,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xed,0xdd,0xd5,0xd4,0xd4,0xd4,0xd4,0xd4, - 0xd5,0xd5,0xd5,0xd5,0xd5,0xd5,0xd6,0xd7,0xda,0xe5,0xf5,0xfe,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xff,0xff,0xfb,0xf4,0xef,0xed,0xec,0xec,0xed,0xee,0xee,0xee,0xee, - 0xee,0xee,0xee,0xee,0xee,0xee,0xf1,0xf7,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfb,0xfb,0xfb,0xfb,0xfa,0xfa,0xfa, - 0xfa,0xfb,0xfd,0xfc,0xf8,0xf7,0xfc,0xff,0xfb,0xee,0xe1,0xdd,0xe5,0xf3,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xf9,0xf1,0xee,0xf0,0xf7,0xfd,0xff,0xfe,0xfc,0xfc,0xfd,0xfe,0xfe,0xfd, - 0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf4,0xe0,0xc4,0xaf,0xa8,0xb0,0xcd,0xf1,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xf8,0xe5,0xd6,0xd3,0xd8,0xe2,0xef,0xf9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xfe,0xe0,0xb9,0xa5,0x9d,0x9a,0x99,0x97,0xa3,0xc4,0xed, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf5,0xde,0xcd,0xc8,0xca,0xcc,0xcd,0xd1,0xdc,0xef,0xfe,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xfa, - 0xfb,0xff,0xff,0xf6,0xe3,0xca,0xae,0x97,0x8f,0x92,0x98,0x9a,0x99,0x93,0x9f,0xd7, - 0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xe9,0xca,0xc4,0xc8,0xc9,0xc9,0xc8,0xc7,0xcb,0xd6,0xe5,0xf1,0xfb,0xff,0xff,0xfd, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfc,0xf2,0xd7,0xb1,0x96,0x93,0x9a,0x9a,0x9a,0x98,0x97,0xa2,0x99,0x92,0xcb, - 0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe3,0xc2,0xc6,0xcb,0xc6,0xc8,0xcb,0xcc,0xcc,0xc8,0xcb,0xd9,0xec,0xf8,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xf7,0xd9,0xb4,0xa0,0x9a,0x95,0x95,0x9a,0x9b,0x9b,0x99,0x98,0xa1,0x99,0x96,0xce, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe3,0xc2,0xc6,0xcb,0xc6,0xc7,0xc9,0xc9,0xca,0xc9,0xc9,0xcc,0xcf,0xda,0xec,0xfb, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf9,0xe1, - 0xc7,0xac,0x94,0x8f,0x98,0x9d,0x9b,0x98,0x97,0x98,0x9c,0xa1,0x9b,0x95,0xaa,0xdf, - 0xff,0xff,0xf9,0xfc,0xfc,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfc,0xfc,0xfc,0xff,0xff, - 0xec,0xcd,0xc2,0xc6,0xcb,0xc9,0xc6,0xc6,0xc7,0xca,0xcc,0xca,0xc6,0xc9,0xd5,0xe3, - 0xef,0xfc,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf0,0xd2,0xab, - 0x95,0x95,0x99,0x9b,0x9a,0x98,0x99,0x9b,0x9e,0x9d,0x9c,0x9a,0x98,0xa9,0xd1,0xf4, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xf9,0xe5,0xcc,0xc3,0xc6,0xc7,0xc8,0xc9,0xc8,0xc7,0xc7,0xc9,0xca,0xca,0xc9,0xca, - 0xd4,0xe8,0xf8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xdd,0xb6,0x9e,0x97, - 0x97,0x97,0x99,0x9a,0x9a,0x9a,0x9a,0x9b,0x9e,0x9f,0x99,0x97,0xad,0xd7,0xf7,0xff, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfa,0xe7,0xcf,0xc2,0xc4,0xc8,0xc9,0xc8,0xc7,0xc7,0xc8,0xc9,0xc9,0xc8,0xca, - 0xcb,0xce,0xda,0xee,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe9,0xb5,0x93,0x92,0x99, - 0x9d,0x9b,0x98,0x98,0x9a,0x9f,0xa2,0x9b,0x93,0x99,0xac,0xc4,0xe0,0xf6,0xff,0xff, - 0xfe,0xf7,0xe5,0xd7,0xd6,0xdf,0xed,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfc, - 0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf2,0xe8,0xe2,0xe3,0xee,0xfb,0xff, - 0xff,0xff,0xfa,0xee,0xdd,0xce,0xc3,0xc2,0xc7,0xcb,0xc9,0xc7,0xc7,0xc7,0xc9,0xcb, - 0xca,0xc7,0xc8,0xda,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xcc,0x9d,0x92,0x9d,0x9a, - 0x96,0x9a,0x9d,0x9e,0x9c,0x9a,0x9b,0x9c,0xa2,0xb4,0xd8,0xfa,0xff,0xfd,0xfc,0xff, - 0xf8,0xd8,0xb4,0xa4,0xa3,0xab,0xc9,0xf2,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf4,0xd7,0xc2,0xbd,0xbf,0xcc,0xe6,0xfb, - 0xff,0xfd,0xfe,0xff,0xfc,0xe8,0xd3,0xc8,0xc6,0xc6,0xc7,0xc9,0xc9,0xc9,0xc8,0xc6, - 0xc8,0xcb,0xc7,0xcd,0xe5,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe9,0xba,0x9b,0x97,0x9b,0x99, - 0x98,0x9b,0x9d,0x9e,0x9d,0x99,0x9b,0xb2,0xd7,0xf1,0xfb,0xfe,0xff,0xff,0xfe,0xf9, - 0xdb,0xb1,0x9e,0xa3,0xa3,0xa0,0xae,0xd2,0xf4,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xf5,0xdb,0xc2,0xb9,0xbc,0xbe,0xbc,0xca,0xe7, - 0xfb,0xfe,0xff,0xff,0xff,0xfc,0xf6,0xe7,0xd1,0xc5,0xc6,0xc8,0xca,0xc9,0xc7,0xc6, - 0xc7,0xc8,0xc7,0xcb,0xdb,0xf3,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xba,0x9e,0x9a,0x9e,0x9e, - 0x9f,0xa1,0x9b,0x94,0x9b,0xb1,0xc9,0xe2,0xfb,0xff,0xfe,0xfc,0xfb,0xff,0xff,0xe4, - 0xb6,0xa0,0xa2,0xa9,0xaa,0xa6,0xa6,0xb9,0xe5,0xff,0xff,0xfd,0xff,0xf9,0xeb,0xe0, - 0xe0,0xec,0xfa,0xff,0xfd,0xff,0xff,0xea,0xc8,0xbb,0xbd,0xc1,0xc2,0xbe,0xbe,0xcd, - 0xec,0xff,0xff,0xfc,0xfd,0xfe,0xff,0xfd,0xee,0xe0,0xd2,0xc5,0xc2,0xc7,0xca,0xca, - 0xca,0xc9,0xc8,0xcb,0xda,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf5,0xce,0xa1,0x93,0x9b,0x9b, - 0x97,0x9a,0x9f,0xa5,0xba,0xdd,0xf8,0xff,0xff,0xfd,0xfe,0xff,0xfc,0xff,0xfc,0xca, - 0x9f,0xa2,0xa9,0xa2,0xa3,0xa9,0xa8,0xb2,0xe0,0xff,0xff,0xfe,0xfa,0xde,0xc2,0xb6, - 0xb7,0xc6,0xe2,0xfa,0xfe,0xff,0xff,0xe6,0xc2,0xbb,0xbe,0xbb,0xbc,0xc3,0xbf,0xbc, - 0xdc,0xfe,0xff,0xfd,0xff,0xff,0xfd,0xfe,0xff,0xfb,0xec,0xd7,0xcc,0xc8,0xc6,0xc5, - 0xc8,0xc8,0xc4,0xcc,0xe4,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xed,0xba,0x98,0x96,0x98, - 0x94,0x9c,0xb7,0xdd,0xf5,0xfd,0xfe,0xff,0xfe,0xfe,0xfe,0xfe,0xff,0xfb,0xdd,0xb4, - 0xa1,0xa5,0xa7,0xa3,0xa4,0xa7,0xa9,0xb7,0xe3,0xff,0xff,0xf8,0xe1,0xbb,0xaa,0xb2, - 0xb2,0xaf,0xc2,0xe5,0xfa,0xff,0xff,0xe9,0xc6,0xbb,0xbc,0xbc,0xbc,0xc0,0xbf,0xbd, - 0xcd,0xe9,0xfd,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xf9,0xec,0xd6,0xc5,0xc2, - 0xc6,0xc6,0xc6,0xd9,0xf6,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe6,0xcd,0xbe,0xb8, - 0xbe,0xcf,0xe6,0xf8,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfc,0xff,0xf2,0xbd,0xa2, - 0xa6,0xa5,0xa3,0xa7,0xa8,0xa4,0xab,0xc9,0xed,0xff,0xff,0xed,0xc8,0xad,0xab,0xb5, - 0xb5,0xae,0xb5,0xce,0xef,0xff,0xff,0xf1,0xd4,0xbd,0xb8,0xbd,0xbe,0xbb,0xbe,0xc1, - 0xbf,0xd2,0xf6,0xff,0xfc,0xfe,0xff,0xfe,0xfd,0xfe,0xff,0xff,0xfb,0xf0,0xe3,0xd9, - 0xd6,0xda,0xe3,0xf1,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf3,0xe9, - 0xf2,0xff,0xff,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xdb,0xab,0x9c, - 0xa6,0xa7,0xa2,0xa8,0xaa,0xa2,0xb4,0xe3,0xfb,0xff,0xff,0xe2,0xb8,0xae,0xb1,0xb0, - 0xb0,0xb4,0xb7,0xc2,0xe7,0xff,0xff,0xfb,0xe7,0xc3,0xb6,0xbe,0xbe,0xbb,0xbe,0xc0, - 0xba,0xc5,0xe6,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfd,0xff,0xff,0xf7, - 0xf1,0xf7,0xfe,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe3,0xb6,0xa4,0xa3, - 0xa4,0xa6,0xa5,0xa7,0xa9,0xaa,0xc7,0xf6,0xff,0xff,0xff,0xe1,0xb3,0xac,0xb2,0xb0, - 0xb0,0xb5,0xb5,0xbc,0xe3,0xff,0xff,0xff,0xf7,0xd2,0xbc,0xbd,0xbd,0xbb,0xbe,0xbf, - 0xbf,0xc0,0xce,0xed,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfe,0xff, - 0xfd,0xfb,0xfc,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xfd,0xce,0xa1,0xa2,0xa9, - 0xa4,0xa5,0xa9,0xa4,0xa3,0xbd,0xe6,0xfe,0xfd,0xff,0xff,0xe1,0xb5,0xad,0xb1,0xb0, - 0xb1,0xb5,0xb3,0xbb,0xe3,0xff,0xff,0xfd,0xfe,0xeb,0xca,0xb7,0xb9,0xbe,0xbc,0xbd, - 0xc2,0xbe,0xbf,0xde,0xfd,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfe, - 0xff,0xfe,0xfd,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe1,0xb8,0xa1,0xa3,0xa7, - 0xa4,0xa4,0xab,0xa6,0xa8,0xd3,0xff,0xff,0xf9,0xff,0xff,0xdf,0xb5,0xaf,0xb3,0xb0, - 0xb1,0xb5,0xb3,0xba,0xe1,0xff,0xff,0xf9,0xff,0xff,0xdb,0xb8,0xb9,0xc0,0xbc,0xbc, - 0xbf,0xbe,0xbe,0xcd,0xe9,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfc,0xff,0xf4,0xbf,0xa2,0xa6,0xa6,0xa4, - 0xa6,0xa6,0xa7,0xab,0xbf,0xe7,0xff,0xff,0xf9,0xff,0xff,0xe0,0xb5,0xad,0xb2,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfa,0xff,0xff,0xea,0xca,0xbd,0xbc,0xbc,0xbd, - 0xbd,0xc0,0xc0,0xbe,0xd3,0xf7,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe1,0xad,0x9c,0xa7,0xa6,0xa3, - 0xa9,0xa9,0xa1,0xb0,0xdc,0xfa,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb2,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xfa,0xe3,0xc0,0xb5,0xbd,0xbe, - 0xbb,0xbf,0xc0,0xbb,0xc7,0xe9,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe9,0xc0,0xa8,0xa3,0xa6,0xa6,0xa3, - 0xa6,0xa9,0xa6,0xc0,0xf2,0xff,0xfe,0xff,0xfd,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfd,0xff,0xfe,0xff,0xf4,0xcc,0xb8,0xbd,0xbe, - 0xbc,0xbe,0xbf,0xbf,0xc2,0xd2,0xef,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd4,0xa4,0xa3,0xa9,0xa4,0xa5,0xa7, - 0xa4,0xa5,0xb8,0xdd,0xfb,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfd,0xff,0xff,0xff,0xfc,0xe4,0xc7,0xb9,0xbb, - 0xbe,0xbc,0xbc,0xc0,0xbd,0xc0,0xe2,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf7,0xc7,0x9d,0x9f,0xa9,0xa3,0xa2,0xa9, - 0xa6,0xa5,0xcc,0xf9,0xff,0xfc,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xfc,0xff,0xfa,0xd7,0xb8,0xba, - 0xbf,0xbb,0xbb,0xc0,0xbc,0xbb,0xd8,0xfa,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xeb,0xbf,0xa1,0xa4,0xa8,0xa5,0xa6,0xa8, - 0xa7,0xb7,0xe3,0xff,0xff,0xfc,0xff,0xff,0xfb,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfb,0xff,0xff,0xfc,0xff,0xff,0xe8,0xc5,0xba, - 0xbd,0xbe,0xbc,0xc0,0xbe,0xbc,0xd2,0xf2,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xed,0xc1,0xa1,0xa4,0xa7,0xa8,0xab,0xa3, - 0xad,0xd8,0xf9,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xfa,0xe0,0xbe, - 0xb7,0xbf,0xbe,0xbf,0xbd,0xbb,0xd4,0xf5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfc,0xcf,0xa2,0x9e,0xa4,0xa3,0xa2,0xa5, - 0xc3,0xf2,0xff,0xfc,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf5,0xd1, - 0xb8,0xb7,0xba,0xbc,0xb7,0xbc,0xde,0xfd,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe8,0xc1,0xae,0xab,0xa8,0xab,0xc3, - 0xe7,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xec, - 0xcf,0xbe,0xbe,0xc0,0xc2,0xd1,0xef,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf0,0xd9,0xc7,0xc7,0xd7,0xef, - 0xfd,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb5,0xae,0xb3,0xb0, - 0xb0,0xb4,0xb3,0xba,0xe1,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xfe, - 0xf1,0xdf,0xd5,0xd6,0xe3,0xf4,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf8,0xed,0xed,0xf8,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe0,0xb7,0xb1,0xb3,0xaf, - 0xaf,0xb4,0xb4,0xbc,0xe3,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xf9,0xf1,0xf2,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe6,0xbf,0xb0,0xb1,0xb2, - 0xb2,0xb2,0xb4,0xc2,0xe7,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf3,0xd3,0xb4,0xac,0xb5, - 0xb5,0xad,0xb4,0xd1,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xeb,0xc7,0xb0,0xad, - 0xad,0xb1,0xc8,0xeb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe7,0xcf,0xc2, - 0xc2,0xcf,0xe8,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf9,0xf5, - 0xf5,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb, - 0xfb,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xf5,0xf0, - 0xf0,0xf5,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd9,0xb1,0x99, - 0x99,0xb1,0xd9,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe2,0xa5,0x7b,0x74, - 0x74,0x7b,0xa4,0xde,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfc,0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xee,0xb8,0x82,0x73,0x82, - 0x81,0x74,0x7f,0xaf,0xe8,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd7,0x97,0x7d,0x7b,0x7d, - 0x7d,0x7b,0x7a,0x92,0xd5,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xfe,0xff,0xff,0xfe,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf8,0xe9,0xe8,0xf6,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xce,0x8b,0x7e,0x7f,0x77, - 0x77,0x7f,0x7b,0x87,0xcc,0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xff,0xf6,0xe5,0xe5,0xf5,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xed,0xce,0xb4,0xb2,0xc8,0xe9, - 0xfe,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xcd,0x88,0x7a,0x7f,0x7b, - 0x7b,0x7e,0x78,0x85,0xcc,0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xfe, - 0xe7,0xc2,0xa9,0xa9,0xc6,0xeb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe1,0xab,0x90,0x8c,0x87,0x89,0xab, - 0xdf,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xfa,0xff,0xff,0xcd,0x88,0x7a,0x7e,0x7a, - 0x7a,0x7e,0x78,0x85,0xcc,0xff,0xff,0xfa,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xdc, - 0xa2,0x7c,0x79,0x7d,0x81,0xa1,0xdd,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfd,0xc0,0x81,0x7a,0x84,0x80,0x7a,0x7f, - 0xad,0xee,0xff,0xfc,0xfe,0xff,0xff,0xff,0xfa,0xff,0xff,0xcd,0x88,0x7c,0x80,0x79, - 0x79,0x7f,0x7a,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xff,0xfd,0xfc,0xff,0xec,0xa6, - 0x72,0x6b,0x70,0x75,0x6a,0x71,0xb9,0xfd,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xea,0xac,0x7f,0x84,0x88,0x86,0x88,0x7a, - 0x89,0xc8,0xf7,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xcd,0x88,0x7b,0x7f,0x79, - 0x79,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xf6,0xc2,0x7d, - 0x6d,0x7b,0x78,0x7a,0x75,0x6f,0xa3,0xe8,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xe3,0xa9,0x81,0x83,0x88,0x84,0x85,0x85, - 0x81,0x98,0xd7,0xff,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xcd,0x88,0x7b,0x7f,0x79, - 0x79,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfa,0xff,0xff,0xd3,0x8e,0x74, - 0x77,0x77,0x75,0x7a,0x75,0x70,0x9e,0xe1,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf3,0xb3,0x7c,0x7f,0x89,0x81,0x81,0x89, - 0x81,0x7e,0xb7,0xf7,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xce,0x89,0x7b,0x80,0x7a, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfa,0xff,0xf8,0xb2,0x72,0x74, - 0x7c,0x71,0x72,0x7b,0x6e,0x6a,0xa7,0xf0,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xc5,0x84,0x82,0x8a,0x83,0x85,0x87, - 0x81,0x7f,0x99,0xce,0xf9,0xff,0xff,0xff,0xfb,0xff,0xff,0xce,0x89,0x7c,0x81,0x7b, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xfb,0xff,0xff,0xff,0xf9,0xca,0x90,0x72,0x73, - 0x79,0x75,0x74,0x7c,0x71,0x72,0xbb,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdf,0xa6,0x89,0x84,0x85,0x86,0x82, - 0x85,0x86,0x7e,0xa4,0xf0,0xff,0xfc,0xff,0xfc,0xff,0xff,0xcd,0x88,0x7c,0x81,0x7b, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xfc,0xff,0xfc,0xff,0xed,0x9a,0x71,0x78,0x77, - 0x73,0x77,0x76,0x75,0x78,0x98,0xd9,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb,0xd3,0x92,0x7d,0x89,0x87,0x81, - 0x88,0x88,0x79,0x8f,0xd2,0xfb,0xfe,0xff,0xfb,0xff,0xff,0xcd,0x88,0x7b,0x80,0x7a, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xfb,0xff,0xfe,0xfb,0xce,0x83,0x6a,0x7a,0x7a, - 0x72,0x79,0x79,0x6a,0x81,0xcb,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfd,0xfc,0xff,0xf1,0xab,0x84,0x88,0x87,0x83, - 0x86,0x86,0x83,0x85,0xa3,0xdd,0xff,0xff,0xf7,0xff,0xff,0xcd,0x89,0x7c,0x7f,0x79, - 0x79,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf6,0xff,0xff,0xda,0x9b,0x7a,0x75,0x76,0x77, - 0x74,0x78,0x78,0x72,0x9c,0xed,0xff,0xfb,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xff,0xfb,0xd6,0xa1,0x82,0x83,0x88, - 0x83,0x83,0x89,0x7f,0x80,0xbf,0xff,0xff,0xf5,0xff,0xff,0xcd,0x89,0x7d,0x80,0x79, - 0x79,0x7f,0x7a,0x85,0xcb,0xff,0xff,0xf4,0xff,0xff,0xb9,0x74,0x72,0x7c,0x74,0x74, - 0x79,0x74,0x71,0x92,0xcf,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfa,0xfd,0xff, - 0xfd,0xfb,0xfc,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xfb,0xbe,0x84,0x83,0x8b, - 0x83,0x83,0x89,0x81,0x7b,0xa0,0xdd,0xfe,0xfc,0xff,0xff,0xce,0x89,0x7c,0x7f,0x7a, - 0x7a,0x7d,0x77,0x84,0xcc,0xff,0xff,0xfb,0xfe,0xda,0x97,0x6d,0x71,0x7a,0x74,0x73, - 0x7d,0x72,0x71,0xb3,0xfa,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xdc,0xa1,0x87,0x85, - 0x85,0x85,0x84,0x86,0x85,0x82,0xad,0xf1,0xff,0xff,0xff,0xcf,0x89,0x7d,0x80,0x79, - 0x78,0x7e,0x7a,0x86,0xce,0xff,0xff,0xff,0xf0,0xa7,0x78,0x78,0x77,0x74,0x76,0x76, - 0x75,0x75,0x90,0xd5,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf2,0xe7, - 0xf1,0xff,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xcf,0x90,0x7b, - 0x88,0x88,0x82,0x87,0x86,0x78,0x91,0xd6,0xf9,0xff,0xff,0xd2,0x8f,0x80,0x80,0x7a, - 0x79,0x7d,0x7d,0x8f,0xd2,0xff,0xff,0xf9,0xd2,0x87,0x6c,0x7a,0x79,0x72,0x7a,0x7a, - 0x69,0x7e,0xc6,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfc, - 0xf9,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfb,0xe0,0xc7,0xb9,0xb1, - 0xb6,0xcb,0xe5,0xf8,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfb,0xff,0xed,0xa7,0x84, - 0x8a,0x86,0x81,0x86,0x86,0x7d,0x86,0xb2,0xe5,0xff,0xff,0xe0,0xa5,0x7d,0x77,0x84, - 0x81,0x73,0x7a,0xa5,0xe0,0xff,0xff,0xe3,0xab,0x7b,0x6f,0x78,0x79,0x72,0x76,0x79, - 0x72,0x99,0xea,0xff,0xfb,0xfd,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xf9,0xf2,0xec, - 0xeb,0xed,0xf1,0xf7,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xe9,0xab,0x86,0x89,0x8a, - 0x83,0x8d,0xb0,0xda,0xf4,0xfd,0xff,0xff,0xfe,0xff,0xfe,0xfe,0xff,0xfa,0xd2,0x9d, - 0x82,0x84,0x87,0x83,0x84,0x85,0x82,0x96,0xd6,0xff,0xff,0xf4,0xcf,0x92,0x74,0x7d, - 0x7c,0x72,0x8f,0xcc,0xf3,0xff,0xff,0xd2,0x8d,0x77,0x77,0x74,0x74,0x78,0x75,0x71, - 0x8e,0xca,0xf9,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfc,0xf6,0xec,0xe3,0xe0, - 0xe1,0xe1,0xdf,0xe9,0xfa,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xc9,0x96,0x84,0x8c,0x8c, - 0x88,0x8b,0x91,0x98,0xb1,0xda,0xf8,0xff,0xfe,0xfc,0xfe,0xff,0xfb,0xff,0xfb,0xbb, - 0x80,0x84,0x8b,0x82,0x82,0x87,0x81,0x8c,0xd0,0xff,0xff,0xfe,0xf5,0xc8,0x97,0x80, - 0x80,0x96,0xc6,0xf5,0xfe,0xff,0xff,0xce,0x85,0x76,0x79,0x72,0x71,0x7c,0x73,0x6f, - 0xaf,0xf9,0xff,0xfb,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf7,0xec,0xe6,0xe5,0xe3,0xe1, - 0xe1,0xe1,0xe0,0xe4,0xf2,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe6,0xb5,0x95,0x8f,0x92,0x92, - 0x92,0x92,0x8c,0x84,0x8c,0xa7,0xc4,0xe0,0xfb,0xff,0xfe,0xfb,0xf9,0xff,0xff,0xda, - 0x9e,0x82,0x84,0x8a,0x8b,0x86,0x82,0x98,0xd7,0xff,0xff,0xfc,0xff,0xf3,0xd9,0xc5, - 0xc5,0xd9,0xf2,0xff,0xfc,0xff,0xff,0xd4,0x8f,0x75,0x77,0x7d,0x7c,0x73,0x70,0x8e, - 0xd5,0xff,0xff,0xfa,0xff,0xff,0xff,0xfe,0xf8,0xf1,0xea,0xe3,0xe0,0xe2,0xe4,0xe4, - 0xe3,0xe3,0xe3,0xe4,0xec,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xb5,0x91,0x8b,0x90,0x8e, - 0x8b,0x8c,0x8e,0x91,0x8f,0x89,0x8b,0xa8,0xd3,0xf1,0xfb,0xfe,0xff,0xff,0xfe,0xf8, - 0xd0,0x98,0x7e,0x84,0x83,0x7d,0x8e,0xbb,0xec,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xea,0xb4,0x82,0x6d,0x73,0x73,0x6c,0x88,0xc8, - 0xf6,0xfe,0xff,0xff,0xff,0xfe,0xfc,0xf5,0xea,0xe2,0xe2,0xe3,0xe3,0xe2,0xe1,0xe1, - 0xe2,0xe2,0xe1,0xe3,0xec,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xc7,0x93,0x88,0x93,0x8f, - 0x8b,0x8e,0x8f,0x91,0x8f,0x8b,0x8a,0x8d,0x95,0xac,0xd5,0xf9,0xff,0xfd,0xfc,0xff, - 0xf5,0xcb,0x9d,0x85,0x81,0x8b,0xb2,0xe9,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xe8,0xa9,0x7b,0x6f,0x72,0x8d,0xc3,0xf3, - 0xff,0xfd,0xfe,0xff,0xfc,0xf5,0xeb,0xe5,0xe3,0xe2,0xe2,0xe3,0xe3,0xe2,0xe2,0xe2, - 0xe2,0xe3,0xe0,0xe3,0xf0,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe5,0xad,0x88,0x88,0x90, - 0x94,0x91,0x8d,0x8c,0x8c,0x90,0x93,0x8c,0x82,0x8b,0xa4,0xbf,0xdf,0xf7,0xff,0xff, - 0xfe,0xf2,0xda,0xc6,0xc3,0xce,0xe4,0xfb,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfc,0xf8, - 0xf8,0xfc,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfb,0xe0,0xc7,0xba,0xbc,0xd4,0xf1,0xfe, - 0xff,0xff,0xfe,0xf8,0xf0,0xe9,0xe2,0xe0,0xe2,0xe4,0xe4,0xe1,0xe1,0xe1,0xe3,0xe3, - 0xe3,0xe1,0xe0,0xe9,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xda,0xaf,0x95,0x8e, - 0x8d,0x8d,0x90,0x91,0x90,0x8d,0x8d,0x8d,0x8f,0x90,0x89,0x87,0xa3,0xd3,0xf6,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xff,0xfd,0xf6,0xeb,0xe4,0xe3,0xe4,0xe4,0xe2,0xe1,0xe1,0xe2,0xe3,0xe3,0xe1,0xe1, - 0xe2,0xe3,0xe9,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xef,0xce,0xa3, - 0x8c,0x8d,0x91,0x92,0x8f,0x8d,0x8d,0x8e,0x90,0x8e,0x8d,0x8a,0x88,0x9e,0xcc,0xf3, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xf4,0xe7,0xe2,0xe2,0xe3,0xe3,0xe3,0xe2,0xe1,0xe1,0xe2,0xe3,0xe3,0xe2,0xe1, - 0xe6,0xf1,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf8,0xdb, - 0xbf,0xa5,0x8c,0x85,0x8f,0x94,0x92,0x8d,0x8b,0x8b,0x8f,0x93,0x8b,0x85,0x9f,0xdc, - 0xff,0xff,0xf8,0xfa,0xf9,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf9,0xf8,0xf8,0xfa,0xff,0xff, - 0xf7,0xea,0xe2,0xe1,0xe2,0xe3,0xe2,0xe1,0xe2,0xe3,0xe4,0xe2,0xdf,0xe0,0xe7,0xee, - 0xf5,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xf6,0xd5,0xad,0x97,0x92,0x8d,0x8c,0x90,0x91,0x91,0x8c,0x8b,0x95,0x8b,0x87,0xc8, - 0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf3,0xe4,0xe3,0xe4,0xe1,0xe2,0xe3,0xe3,0xe2,0xe1,0xe1,0xe3,0xe3,0xe8,0xf3,0xfc, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff, - 0xfe,0xfb,0xf0,0xd4,0xac,0x8f,0x8a,0x91,0x92,0x91,0x8d,0x8c,0x96,0x8a,0x82,0xc4, - 0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xf1,0xe1,0xe2,0xe4,0xe1,0xe2,0xe2,0xe3,0xe3,0xdf,0xdf,0xe8,0xf4,0xfb,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xfa, - 0xfb,0xff,0xff,0xf5,0xe0,0xc5,0xa6,0x90,0x87,0x89,0x8e,0x90,0x8f,0x86,0x92,0xd1, - 0xff,0xff,0xfa,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xfe,0xfd,0xff,0xff, - 0xf3,0xe3,0xe0,0xe3,0xe3,0xe2,0xe1,0xe0,0xe1,0xe6,0xed,0xf6,0xfd,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xfd,0xdd,0xb4,0x9e,0x95,0x91,0x90,0x8e,0x9a,0xbd,0xea, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfa,0xee,0xe4,0xe2,0xe2,0xe2,0xe3,0xe5,0xea,0xf5,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf3,0xdd,0xc1,0xaa,0xa1,0xa8,0xc8,0xf0,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xf0,0xe7,0xe5,0xe7,0xee,0xf6,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xfb,0xfb,0xfb,0xfb,0xfb,0xfa, - 0xfb,0xfc,0xfd,0xfc,0xf8,0xf7,0xfc,0xff,0xfb,0xed,0xde,0xd8,0xe0,0xf0,0xfd,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xfa,0xf6,0xf4,0xf5,0xfa,0xfe,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xef,0xe2,0xdb,0xda,0xda,0xda,0xda,0xda, - 0xda,0xda,0xda,0xda,0xd9,0xd9,0xda,0xdb,0xdf,0xe9,0xf5,0xfd,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfc,0xf8,0xf4,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3, - 0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf6,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe5,0xc2,0xaf,0xa8,0xa3,0xa3,0xa3,0xa3,0xa3, - 0xa3,0xa3,0xa3,0xa3,0xa3,0xa2,0xa1,0xa3,0xaa,0xb9,0xd7,0xf8,0xff,0xfe,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xf3,0xe9,0xe4,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1, - 0xe1,0xe1,0xe1,0xe1,0xe1,0xe2,0xe5,0xeb,0xf7,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfd,0xe4,0xb8,0x9a,0x98,0x9a,0x96,0x96,0x97,0x97,0x97, - 0x97,0x97,0x97,0x97,0x97,0x97,0x96,0x98,0x9a,0x96,0xa8,0xd5,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfb,0xf0,0xe3,0xde,0xdf,0xde,0xdd,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc, - 0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdd,0xe7,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf7,0xc3,0x99,0x92,0x97,0x9a,0x9a,0x9b,0x9b,0x9a,0x9a, - 0x9a,0x9b,0x9b,0x9a,0x9a,0x9b,0x9a,0x9a,0x98,0x92,0x92,0xad,0xe2,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf5,0xe4,0xdb,0xdb,0xde,0xde,0xdd,0xdd,0xdd,0xdd,0xdd,0xdc,0xdc, - 0xdc,0xdd,0xdd,0xdd,0xdd,0xdc,0xda,0xd9,0xdc,0xeb,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf2,0xac,0x92,0xa0,0x9c,0x96,0x99,0x98,0x98,0x99,0x99, - 0x99,0x98,0x98,0x99,0x99,0x98,0x98,0x96,0x96,0x9e,0x99,0x9a,0xd2,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xf0,0xde,0xdc,0xde,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc, - 0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xdc,0xdd,0xda,0xe4,0xfb,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf2,0xae,0x94,0xa1,0x9d,0x97,0x9a,0x99,0x9a,0x9b,0x9b, - 0x9a,0x99,0x99,0x9a,0x9b,0x99,0x99,0x98,0x99,0xa0,0x9a,0x9c,0xd3,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xef,0xdc,0xdb,0xdd,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb, - 0xdb,0xdb,0xdb,0xdb,0xdb,0xda,0xdc,0xdd,0xd9,0xe3,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf7,0xc3,0x9c,0x97,0x9b,0x9d,0x9d,0x9d,0x9e,0x9e,0x9e, - 0x9e,0x9d,0x9d,0x9e,0x9e,0x9e,0x9c,0x9e,0x9d,0x96,0x95,0xb0,0xe3,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf4,0xe1,0xd8,0xd9,0xda,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xda,0xd9, - 0xda,0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xd9,0xdb,0xe9,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfc,0xe3,0xb9,0x9e,0x9b,0x9d,0x9b,0x9b,0x9b,0x9b,0x9b, - 0x9b,0x9b,0x9b,0x9b,0x9b,0x9b,0x9a,0x9c,0x9e,0x9a,0xaa,0xd5,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfb,0xee,0xe1,0xda,0xd9,0xd9,0xda,0xdb,0xdb,0xdb,0xdb,0xd9,0xd8, - 0xd9,0xdb,0xdb,0xdb,0xdb,0xdc,0xdb,0xdb,0xe5,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xe5,0xc3,0xb0,0xa9,0xa6,0xa6,0xa6,0xa6,0xa6, - 0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa8,0xae,0xbc,0xd8,0xf7,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xf1,0xe5,0xdf,0xde,0xde,0xde,0xde,0xde,0xde,0xde,0xde, - 0xde,0xde,0xde,0xde,0xde,0xde,0xe0,0xe8,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xee,0xe1,0xda,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xd9,0xd9,0xd8,0xd8,0xd8,0xd8,0xd9,0xdb,0xde,0xe8,0xf5,0xfe,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xff,0xff,0xfb,0xf6,0xf2,0xf2,0xf2,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1, - 0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf3,0xf8,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb, - 0xfb,0xfc,0xfd,0xfb,0xf8,0xf8,0xfc,0xff,0xfb,0xf0,0xe3,0xe0,0xe7,0xf3,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xf9,0xf3,0xf2,0xf4,0xf9,0xfe,0xff,0xfd,0xfc,0xfd,0xfe,0xfe,0xfe,0xfe, - 0xfd,0xfd,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe3,0xca,0xb7,0xb0,0xb7,0xd3,0xf3,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xf8,0xea,0xde,0xdc,0xe0,0xe8,0xf2,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfd,0xfc,0xff,0xfe,0xe4,0xc0,0xad,0xa7,0xa6,0xa4,0xa3,0xad,0xca,0xee, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf6,0xe4,0xd8,0xd4,0xd4,0xd5,0xd6,0xd9,0xe2,0xf2,0xfe,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xfb, - 0xfc,0xff,0xff,0xf7,0xe6,0xd0,0xb7,0xa3,0x9b,0x9e,0xa4,0xa6,0xa5,0x9e,0xa9,0xdb, - 0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xec,0xd2,0xcf,0xd3,0xd4,0xd3,0xd1,0xd0,0xd4,0xdd,0xe9,0xf4,0xfc,0xff,0xff,0xfe, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfc,0xf4,0xdd,0xba,0xa2,0x9f,0xa6,0xa6,0xa5,0xa3,0xa2,0xab,0xa4,0xa0,0xd2, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe7,0xcc,0xcf,0xd5,0xd1,0xd3,0xd5,0xd5,0xd5,0xd1,0xd3,0xe0,0xef,0xf9,0xfd,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xf8,0xdc,0xba,0xaa,0xa6,0xa2,0xa1,0xa5,0xa7,0xa7,0xa4,0xa3,0xab,0xa4,0xa1,0xd4, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe8,0xce,0xd0,0xd4,0xd0,0xd2,0xd4,0xd4,0xd4,0xd2,0xd2,0xd5,0xd8,0xe0,0xf0,0xfc, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xe4, - 0xcd,0xb6,0x9f,0x9b,0xa5,0xa9,0xa7,0xa3,0xa2,0xa3,0xa7,0xaa,0xa6,0xa1,0xb4,0xe3, - 0xff,0xff,0xfa,0xfc,0xfb,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfc,0xfd,0xfd,0xff,0xff, - 0xf0,0xd8,0xce,0xd0,0xd3,0xd2,0xd0,0xd0,0xd3,0xd6,0xd6,0xd4,0xd1,0xd4,0xdd,0xe7, - 0xf2,0xfd,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf2,0xd6,0xb4, - 0xa1,0xa2,0xa6,0xa6,0xa5,0xa4,0xa4,0xa6,0xa8,0xa7,0xa6,0xa7,0xa5,0xb3,0xd6,0xf6, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfa,0xea,0xd7,0xcf,0xd1,0xd1,0xd1,0xd2,0xd3,0xd3,0xd3,0xd4,0xd5,0xd5,0xd3,0xd2, - 0xdc,0xed,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xe0,0xbe,0xaa,0xa3, - 0xa2,0xa2,0xa5,0xa7,0xa6,0xa4,0xa4,0xa6,0xa9,0xaa,0xa5,0xa4,0xb8,0xdb,0xf7,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xec,0xd9,0xcf,0xd0,0xd2,0xd2,0xd1,0xd1,0xd1,0xd4,0xd5,0xd4,0xd3,0xd3, - 0xd3,0xd6,0xe0,0xf0,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xeb,0xbd,0x9f,0x9f,0xa6, - 0xa8,0xa6,0xa3,0xa3,0xa3,0xa7,0xaa,0xa6,0xa0,0xa6,0xb8,0xcb,0xe3,0xf7,0xff,0xff, - 0xfe,0xf8,0xe9,0xdc,0xdb,0xe2,0xf0,0xfd,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfc, - 0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf5,0xec,0xe7,0xe7,0xf1,0xfb,0xff, - 0xff,0xff,0xfb,0xf1,0xe4,0xd9,0xd0,0xcd,0xd0,0xd2,0xd2,0xd1,0xd1,0xd1,0xd4,0xd6, - 0xd4,0xd1,0xd1,0xe0,0xf5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xd2,0xa9,0x9f,0xa8,0xa5, - 0xa2,0xa5,0xa6,0xa9,0xa7,0xa4,0xa5,0xa9,0xae,0xbd,0xdd,0xfa,0xff,0xfd,0xfd,0xff, - 0xf9,0xde,0xbe,0xaf,0xad,0xb5,0xd0,0xf3,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf7,0xdf,0xcc,0xc8,0xcb,0xd6,0xe9,0xfb, - 0xff,0xfd,0xfe,0xff,0xfc,0xec,0xdb,0xd4,0xd1,0xd0,0xd0,0xd1,0xd1,0xd1,0xd2,0xd1, - 0xd3,0xd5,0xd2,0xd6,0xe9,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xeb,0xc3,0xa7,0xa2,0xa6,0xa5, - 0xa3,0xa3,0xa6,0xaa,0xa9,0xa5,0xa6,0xbb,0xdd,0xf3,0xfa,0xfe,0xff,0xff,0xfe,0xf9, - 0xdf,0xba,0xa9,0xad,0xae,0xac,0xb8,0xd6,0xf4,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf6,0xe1,0xce,0xc5,0xc7,0xca,0xc8,0xd3,0xeb, - 0xfc,0xfe,0xff,0xff,0xff,0xfd,0xf8,0xec,0xdb,0xd0,0xcf,0xd2,0xd2,0xd1,0xd0,0xd1, - 0xd3,0xd4,0xd2,0xd5,0xe2,0xf6,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xea,0xc2,0xa9,0xa5,0xa8,0xa8, - 0xa8,0xa9,0xa6,0xa1,0xa7,0xba,0xce,0xe5,0xfb,0xff,0xfe,0xfc,0xfc,0xff,0xff,0xe8, - 0xbf,0xaa,0xac,0xb4,0xb5,0xb1,0xb1,0xc1,0xe8,0xff,0xff,0xfd,0xff,0xfa,0xee,0xe4, - 0xe4,0xef,0xfb,0xff,0xfd,0xff,0xff,0xed,0xd0,0xc7,0xc8,0xcb,0xcb,0xc9,0xc9,0xd6, - 0xf0,0xff,0xff,0xfd,0xfd,0xff,0xff,0xfd,0xf1,0xe5,0xda,0xd0,0xce,0xd1,0xd3,0xd3, - 0xd3,0xd3,0xd1,0xd3,0xe1,0xf5,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xd3,0xab,0x9f,0xa6,0xa5, - 0xa1,0xa4,0xaa,0xb0,0xc2,0xe0,0xf8,0xff,0xfe,0xfd,0xff,0xff,0xfc,0xff,0xfc,0xd2, - 0xab,0xac,0xb3,0xaf,0xaf,0xb3,0xb3,0xbc,0xe4,0xff,0xff,0xfe,0xfa,0xe3,0xcb,0xc0, - 0xc1,0xce,0xe6,0xfa,0xfe,0xff,0xff,0xea,0xcb,0xc6,0xc9,0xc6,0xc6,0xcc,0xcb,0xca, - 0xe1,0xfd,0xff,0xfe,0xff,0xfe,0xfe,0xfe,0xff,0xfc,0xf0,0xdf,0xd5,0xd3,0xd1,0xcf, - 0xd1,0xd1,0xce,0xd4,0xe9,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf0,0xc2,0xa2,0xa1,0xa4, - 0xa1,0xa6,0xbe,0xe0,0xf6,0xfd,0xfe,0xff,0xfe,0xfe,0xfe,0xfe,0xff,0xfc,0xe1,0xbd, - 0xab,0xae,0xb2,0xaf,0xb0,0xb4,0xb6,0xc1,0xe7,0xff,0xff,0xfa,0xe7,0xc6,0xb7,0xbd, - 0xbd,0xba,0xca,0xe9,0xfb,0xff,0xff,0xec,0xce,0xc5,0xc7,0xc8,0xc7,0xc9,0xca,0xca, - 0xd5,0xeb,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfb,0xef,0xdd,0xd1,0xce, - 0xcf,0xce,0xcf,0xe0,0xf8,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe9,0xd2,0xc4,0xc1, - 0xc5,0xd3,0xe7,0xf8,0xff,0xff,0xfd,0xfc,0xfe,0xff,0xfe,0xfc,0xff,0xf4,0xc6,0xad, - 0xb0,0xb1,0xb0,0xb2,0xb2,0xaf,0xb8,0xd1,0xef,0xff,0xff,0xef,0xd0,0xba,0xb7,0xc0, - 0xc0,0xb9,0xc0,0xd7,0xf2,0xff,0xff,0xf3,0xda,0xc7,0xc4,0xc9,0xc9,0xc6,0xc9,0xcb, - 0xca,0xdb,0xf9,0xff,0xfd,0xfe,0xff,0xfe,0xfd,0xfe,0xff,0xff,0xfb,0xf3,0xe9,0xe1, - 0xde,0xe0,0xe8,0xf3,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf4,0xea, - 0xf4,0xff,0xff,0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xe0,0xb5,0xa7, - 0xb0,0xb2,0xaf,0xb2,0xb3,0xad,0xbe,0xe7,0xfb,0xff,0xff,0xe6,0xc2,0xbb,0xbd,0xbb, - 0xbc,0xc0,0xc1,0xcb,0xea,0xff,0xff,0xfc,0xeb,0xcc,0xc1,0xc9,0xc9,0xc6,0xc9,0xca, - 0xc6,0xd0,0xeb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xf9, - 0xf4,0xf9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xbf,0xae,0xae, - 0xb0,0xb1,0xb0,0xb2,0xb4,0xb5,0xcf,0xf7,0xff,0xff,0xff,0xe4,0xbe,0xba,0xbe,0xbb, - 0xbe,0xc2,0xbf,0xc4,0xe7,0xff,0xff,0xff,0xf9,0xd8,0xc5,0xc7,0xc8,0xc7,0xc8,0xc9, - 0xc9,0xca,0xd6,0xf0,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfe,0xff, - 0xfe,0xfc,0xfd,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xfd,0xd4,0xad,0xac,0xb4, - 0xb1,0xb0,0xb3,0xb1,0xb1,0xc6,0xe9,0xfe,0xfe,0xff,0xff,0xe5,0xbf,0xb9,0xbc,0xbc, - 0xbd,0xc0,0xbd,0xc4,0xe7,0xff,0xff,0xfe,0xfe,0xed,0xd1,0xc2,0xc5,0xc9,0xc7,0xc8, - 0xcb,0xc8,0xca,0xe3,0xfd,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfe, - 0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe5,0xc0,0xab,0xae,0xb3, - 0xb0,0xb0,0xb6,0xb2,0xb3,0xd8,0xff,0xff,0xfa,0xff,0xff,0xe4,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfa,0xff,0xff,0xe1,0xc4,0xc5,0xca,0xc7,0xc7, - 0xc9,0xc9,0xc9,0xd6,0xed,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf5,0xc8,0xae,0xaf,0xb1,0xb0, - 0xb1,0xb2,0xb3,0xb5,0xc6,0xe9,0xff,0xff,0xfa,0xff,0xff,0xe4,0xc0,0xba,0xbd,0xbc, - 0xbe,0xc1,0xbe,0xc5,0xe6,0xff,0xff,0xfb,0xff,0xff,0xee,0xd3,0xc7,0xc7,0xc8,0xc8, - 0xc7,0xc9,0xca,0xc9,0xda,0xf8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xe4,0xb7,0xa8,0xb1,0xb2,0xae, - 0xb3,0xb4,0xae,0xba,0xe1,0xfb,0xff,0xff,0xfc,0xff,0xff,0xe4,0xc0,0xba,0xbd,0xbc, - 0xbd,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfd,0xff,0xff,0xfc,0xe7,0xc9,0xc1,0xc9,0xc9, - 0xc6,0xc9,0xc9,0xc5,0xd0,0xed,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xec,0xc8,0xb1,0xae,0xb2,0xb2,0xaf, - 0xb2,0xb5,0xb2,0xc8,0xf5,0xff,0xfe,0xff,0xfe,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfe,0xff,0xfe,0xff,0xf6,0xd3,0xc3,0xc9,0xc9, - 0xc7,0xc8,0xc8,0xc9,0xcc,0xda,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xd9,0xaf,0xad,0xb4,0xb0,0xb0,0xb2, - 0xb0,0xb2,0xc2,0xe1,0xfc,0xff,0xff,0xff,0xfd,0xff,0xff,0xe5,0xc0,0xba,0xbe,0xbc, - 0xbc,0xbf,0xbe,0xc5,0xe6,0xff,0xff,0xfd,0xff,0xff,0xff,0xfc,0xe8,0xd0,0xc6,0xc7, - 0xc9,0xc7,0xc7,0xcb,0xc8,0xc9,0xe6,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf9,0xcd,0xa8,0xac,0xb4,0xae,0xae,0xb4, - 0xb0,0xb1,0xd4,0xfa,0xff,0xfc,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xfb,0xde,0xc4,0xc6, - 0xca,0xc6,0xc7,0xca,0xc5,0xc5,0xde,0xfa,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xee,0xc6,0xab,0xaf,0xb3,0xb0,0xb1,0xb2, - 0xb1,0xc1,0xe7,0xff,0xff,0xfc,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xff,0xeb,0xce,0xc5, - 0xc9,0xc9,0xc8,0xca,0xc8,0xc6,0xd9,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xc9,0xac,0xb0,0xb3,0xb2,0xb4,0xae, - 0xb8,0xdd,0xfa,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbe,0xbc, - 0xbc,0xbf,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xfb,0xe6,0xca, - 0xc4,0xca,0xc9,0xca,0xc7,0xc5,0xdb,0xf7,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfc,0xd5,0xae,0xab,0xb1,0xae,0xab,0xaf, - 0xcc,0xf4,0xff,0xfd,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbe,0xbc, - 0xbc,0xbf,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf7,0xd8, - 0xc4,0xc3,0xc5,0xc7,0xc3,0xc6,0xe3,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xeb,0xc8,0xb7,0xb5,0xb3,0xb5,0xca, - 0xeb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xef, - 0xd7,0xc9,0xc9,0xcb,0xcc,0xd8,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf2,0xde,0xce,0xcd,0xdc,0xf1, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xf4,0xe5,0xdd,0xdd,0xe8,0xf6,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xef,0xef,0xfa,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc2,0xbc,0xbe,0xba, - 0xbb,0xc0,0xbf,0xc6,0xe7,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xf4,0xf4,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xea,0xc8,0xbb,0xbc,0xbe, - 0xbe,0xbe,0xbf,0xcb,0xeb,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf5,0xd8,0xbd,0xb8,0xc1, - 0xc1,0xba,0xc1,0xd9,0xf5,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xee,0xcf,0xbb,0xb9, - 0xb9,0xbc,0xd1,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xeb,0xd6,0xca, - 0xca,0xd7,0xec,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf9,0xf6, - 0xf6,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb, - 0xfb,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xf5,0xf0, - 0xf0,0xf5,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd9,0xb1,0x99, - 0x99,0xb1,0xd9,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe2,0xa5,0x7b,0x74, - 0x74,0x7b,0xa4,0xde,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfc,0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xee,0xb8,0x82,0x73,0x82, - 0x81,0x74,0x7f,0xaf,0xe8,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd7,0x97,0x7d,0x7b,0x7d, - 0x7d,0x7b,0x7a,0x92,0xd5,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xfe,0xff,0xff,0xfe,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf8,0xe9,0xe8,0xf6,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xce,0x8b,0x7e,0x7f,0x77, - 0x77,0x7f,0x7b,0x87,0xcc,0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xff,0xf6,0xe5,0xe5,0xf5,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xed,0xce,0xb4,0xb2,0xc8,0xe9, - 0xfe,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xcd,0x88,0x7a,0x7f,0x7b, - 0x7b,0x7e,0x78,0x85,0xcc,0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xfe, - 0xe7,0xc2,0xa9,0xa9,0xc6,0xeb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe1,0xab,0x90,0x8c,0x87,0x89,0xab, - 0xdf,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xfa,0xff,0xff,0xcd,0x88,0x7a,0x7e,0x7a, - 0x7a,0x7e,0x78,0x85,0xcc,0xff,0xff,0xfa,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xdc, - 0xa2,0x7c,0x79,0x7d,0x81,0xa1,0xdd,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfd,0xc0,0x81,0x7a,0x84,0x80,0x7a,0x7f, - 0xad,0xee,0xff,0xfc,0xfe,0xff,0xff,0xff,0xfa,0xff,0xff,0xcd,0x88,0x7c,0x80,0x79, - 0x79,0x7f,0x7a,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xff,0xfd,0xfc,0xff,0xec,0xa6, - 0x72,0x6b,0x70,0x75,0x6a,0x71,0xb9,0xfd,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xea,0xac,0x7f,0x84,0x88,0x86,0x88,0x7a, - 0x89,0xc8,0xf7,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff,0xcd,0x88,0x7b,0x7f,0x79, - 0x79,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xf6,0xc2,0x7d, - 0x6d,0x7b,0x78,0x7a,0x75,0x6f,0xa3,0xe8,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xe3,0xa9,0x81,0x83,0x88,0x84,0x85,0x85, - 0x81,0x98,0xd7,0xff,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xcd,0x88,0x7b,0x7f,0x79, - 0x79,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfa,0xff,0xff,0xd3,0x8e,0x74, - 0x77,0x77,0x75,0x7a,0x75,0x70,0x9e,0xe1,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf3,0xb3,0x7c,0x7f,0x89,0x81,0x81,0x89, - 0x81,0x7e,0xb7,0xf7,0xff,0xfa,0xff,0xff,0xf9,0xff,0xff,0xce,0x89,0x7b,0x80,0x7a, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf9,0xff,0xff,0xfa,0xff,0xf8,0xb2,0x72,0x74, - 0x7c,0x71,0x72,0x7b,0x6e,0x6a,0xa7,0xf0,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xc5,0x84,0x82,0x8a,0x83,0x85,0x87, - 0x81,0x7f,0x99,0xce,0xf9,0xff,0xff,0xff,0xfb,0xff,0xff,0xce,0x89,0x7c,0x81,0x7b, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xfb,0xff,0xff,0xff,0xf9,0xca,0x90,0x72,0x73, - 0x79,0x75,0x74,0x7c,0x71,0x72,0xbb,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdf,0xa6,0x89,0x84,0x85,0x86,0x82, - 0x85,0x86,0x7e,0xa4,0xf0,0xff,0xfc,0xff,0xfc,0xff,0xff,0xcd,0x88,0x7c,0x81,0x7b, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xfc,0xff,0xfc,0xff,0xed,0x9a,0x71,0x78,0x77, - 0x73,0x77,0x76,0x75,0x78,0x98,0xd9,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfb,0xd3,0x92,0x7d,0x89,0x87,0x81, - 0x88,0x88,0x79,0x8f,0xd2,0xfb,0xfe,0xff,0xfb,0xff,0xff,0xcd,0x88,0x7b,0x80,0x7a, - 0x7a,0x7e,0x79,0x85,0xcb,0xff,0xff,0xfb,0xff,0xfe,0xfb,0xce,0x83,0x6a,0x7a,0x7a, - 0x72,0x79,0x79,0x6a,0x81,0xcb,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfd,0xfc,0xff,0xf1,0xab,0x84,0x88,0x87,0x83, - 0x86,0x86,0x83,0x85,0xa3,0xdd,0xff,0xff,0xf7,0xff,0xff,0xcd,0x89,0x7c,0x7f,0x79, - 0x79,0x7e,0x79,0x85,0xcb,0xff,0xff,0xf6,0xff,0xff,0xda,0x9b,0x7a,0x75,0x76,0x77, - 0x74,0x78,0x78,0x72,0x9c,0xed,0xff,0xfb,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xff,0xfb,0xd6,0xa1,0x82,0x83,0x88, - 0x83,0x83,0x89,0x7f,0x80,0xbf,0xff,0xff,0xf5,0xff,0xff,0xcd,0x89,0x7d,0x80,0x79, - 0x79,0x7f,0x7a,0x85,0xcb,0xff,0xff,0xf4,0xff,0xff,0xb9,0x74,0x72,0x7c,0x74,0x74, - 0x79,0x74,0x71,0x92,0xcf,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfa,0xfd,0xff, - 0xfd,0xfb,0xfc,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xfb,0xbe,0x84,0x83,0x8b, - 0x83,0x83,0x89,0x81,0x7b,0xa0,0xdd,0xfe,0xfc,0xff,0xff,0xce,0x89,0x7c,0x7f,0x7a, - 0x7a,0x7d,0x77,0x84,0xcc,0xff,0xff,0xfb,0xfe,0xda,0x97,0x6d,0x71,0x7a,0x74,0x73, - 0x7d,0x72,0x71,0xb3,0xfa,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xdc,0xa1,0x87,0x85, - 0x85,0x85,0x84,0x86,0x85,0x82,0xad,0xf1,0xff,0xff,0xff,0xcf,0x89,0x7d,0x80,0x79, - 0x78,0x7e,0x7a,0x86,0xce,0xff,0xff,0xff,0xf0,0xa7,0x78,0x78,0x77,0x74,0x76,0x76, - 0x75,0x75,0x90,0xd5,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf2,0xe7, - 0xf1,0xff,0xff,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xcf,0x90,0x7b, - 0x88,0x88,0x82,0x87,0x86,0x78,0x91,0xd6,0xf9,0xff,0xff,0xd2,0x8f,0x80,0x80,0x7a, - 0x79,0x7d,0x7d,0x8f,0xd2,0xff,0xff,0xf9,0xd2,0x87,0x6c,0x7a,0x79,0x72,0x7a,0x7a, - 0x69,0x7e,0xc6,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfc, - 0xf9,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfb,0xe0,0xc7,0xb9,0xb1, - 0xb6,0xcb,0xe5,0xf8,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfb,0xff,0xed,0xa7,0x84, - 0x8a,0x86,0x81,0x86,0x86,0x7d,0x86,0xb2,0xe5,0xff,0xff,0xe0,0xa5,0x7d,0x77,0x84, - 0x81,0x73,0x7a,0xa5,0xe0,0xff,0xff,0xe3,0xab,0x7b,0x6f,0x78,0x79,0x72,0x76,0x79, - 0x72,0x99,0xea,0xff,0xfb,0xfd,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xf9,0xf2,0xec, - 0xeb,0xed,0xf1,0xf7,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xe9,0xab,0x86,0x89,0x8a, - 0x83,0x8d,0xb0,0xda,0xf4,0xfd,0xff,0xff,0xfe,0xff,0xfe,0xfe,0xff,0xfa,0xd2,0x9d, - 0x82,0x84,0x87,0x83,0x84,0x85,0x82,0x96,0xd6,0xff,0xff,0xf4,0xcf,0x92,0x74,0x7d, - 0x7c,0x72,0x8f,0xcc,0xf3,0xff,0xff,0xd2,0x8d,0x77,0x77,0x74,0x74,0x78,0x75,0x71, - 0x8e,0xca,0xf9,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfc,0xf6,0xec,0xe3,0xe0, - 0xe1,0xe1,0xdf,0xe9,0xfa,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xc9,0x96,0x84,0x8c,0x8c, - 0x88,0x8b,0x91,0x98,0xb1,0xda,0xf8,0xff,0xfe,0xfc,0xfe,0xff,0xfb,0xff,0xfb,0xbb, - 0x80,0x84,0x8b,0x82,0x82,0x87,0x81,0x8c,0xd0,0xff,0xff,0xfe,0xf5,0xc8,0x97,0x80, - 0x80,0x96,0xc6,0xf5,0xfe,0xff,0xff,0xce,0x85,0x76,0x79,0x72,0x71,0x7c,0x73,0x6f, - 0xaf,0xf9,0xff,0xfb,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf7,0xec,0xe6,0xe5,0xe3,0xe1, - 0xe1,0xe1,0xe0,0xe4,0xf2,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe6,0xb5,0x95,0x8f,0x92,0x92, - 0x92,0x92,0x8c,0x84,0x8c,0xa7,0xc4,0xe0,0xfb,0xff,0xfe,0xfb,0xf9,0xff,0xff,0xda, - 0x9e,0x82,0x84,0x8a,0x8b,0x86,0x82,0x98,0xd7,0xff,0xff,0xfc,0xff,0xf3,0xd9,0xc5, - 0xc5,0xd9,0xf2,0xff,0xfc,0xff,0xff,0xd4,0x8f,0x75,0x77,0x7d,0x7c,0x73,0x70,0x8e, - 0xd5,0xff,0xff,0xfa,0xff,0xff,0xff,0xfe,0xf8,0xf1,0xea,0xe3,0xe0,0xe2,0xe4,0xe4, - 0xe3,0xe3,0xe3,0xe4,0xec,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xb5,0x91,0x8b,0x90,0x8e, - 0x8b,0x8c,0x8e,0x91,0x8f,0x89,0x8b,0xa8,0xd3,0xf1,0xfb,0xfe,0xff,0xff,0xfe,0xf8, - 0xd0,0x98,0x7e,0x84,0x83,0x7d,0x8e,0xbb,0xec,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xea,0xb4,0x82,0x6d,0x73,0x73,0x6c,0x88,0xc8, - 0xf6,0xfe,0xff,0xff,0xff,0xfe,0xfc,0xf5,0xea,0xe2,0xe2,0xe3,0xe3,0xe2,0xe1,0xe1, - 0xe2,0xe2,0xe1,0xe3,0xec,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xc7,0x93,0x88,0x93,0x8f, - 0x8b,0x8e,0x8f,0x91,0x8f,0x8b,0x8a,0x8d,0x95,0xac,0xd5,0xf9,0xff,0xfd,0xfc,0xff, - 0xf5,0xcb,0x9d,0x85,0x81,0x8b,0xb2,0xe9,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xe8,0xa9,0x7b,0x6f,0x72,0x8d,0xc3,0xf3, - 0xff,0xfd,0xfe,0xff,0xfc,0xf5,0xeb,0xe5,0xe3,0xe2,0xe2,0xe3,0xe3,0xe2,0xe2,0xe2, - 0xe2,0xe3,0xe0,0xe3,0xf0,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xe5,0xad,0x88,0x88,0x90, - 0x94,0x91,0x8d,0x8c,0x8c,0x90,0x93,0x8c,0x82,0x8b,0xa4,0xbf,0xdf,0xf7,0xff,0xff, - 0xfe,0xf2,0xda,0xc6,0xc3,0xce,0xe4,0xfb,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfc,0xf8, - 0xf8,0xfc,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfb,0xe0,0xc7,0xba,0xbc,0xd4,0xf1,0xfe, - 0xff,0xff,0xfe,0xf8,0xf0,0xe9,0xe2,0xe0,0xe2,0xe4,0xe4,0xe1,0xe1,0xe1,0xe3,0xe3, - 0xe3,0xe1,0xe0,0xe9,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xda,0xaf,0x95,0x8e, - 0x8d,0x8d,0x90,0x91,0x90,0x8d,0x8d,0x8d,0x8f,0x90,0x89,0x87,0xa3,0xd3,0xf6,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xff,0xfd,0xf6,0xeb,0xe4,0xe3,0xe4,0xe4,0xe2,0xe1,0xe1,0xe2,0xe3,0xe3,0xe1,0xe1, - 0xe2,0xe3,0xe9,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xef,0xce,0xa3, - 0x8c,0x8d,0x91,0x92,0x8f,0x8d,0x8d,0x8e,0x90,0x8e,0x8d,0x8a,0x88,0x9e,0xcc,0xf3, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xf4,0xe7,0xe2,0xe2,0xe3,0xe3,0xe3,0xe2,0xe1,0xe1,0xe2,0xe3,0xe3,0xe2,0xe1, - 0xe6,0xf1,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf8,0xdb, - 0xbf,0xa5,0x8c,0x85,0x8f,0x94,0x92,0x8d,0x8b,0x8b,0x8f,0x93,0x8b,0x85,0x9f,0xdc, - 0xff,0xff,0xf8,0xfa,0xf9,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf9,0xf8,0xf8,0xfa,0xff,0xff, - 0xf7,0xea,0xe2,0xe1,0xe2,0xe3,0xe2,0xe1,0xe2,0xe3,0xe4,0xe2,0xdf,0xe0,0xe7,0xee, - 0xf5,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xf6,0xd5,0xad,0x97,0x92,0x8d,0x8c,0x90,0x91,0x91,0x8c,0x8b,0x95,0x8b,0x87,0xc8, - 0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf3,0xe4,0xe3,0xe4,0xe1,0xe2,0xe3,0xe3,0xe2,0xe1,0xe1,0xe3,0xe3,0xe8,0xf3,0xfc, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff, - 0xfe,0xfb,0xf0,0xd4,0xac,0x8f,0x8a,0x91,0x92,0x91,0x8d,0x8c,0x96,0x8a,0x82,0xc4, - 0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xf1,0xe1,0xe2,0xe4,0xe1,0xe2,0xe2,0xe3,0xe3,0xdf,0xdf,0xe8,0xf4,0xfb,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xfa, - 0xfb,0xff,0xff,0xf5,0xe0,0xc5,0xa6,0x90,0x87,0x89,0x8e,0x90,0x8f,0x86,0x92,0xd1, - 0xff,0xff,0xfa,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xfe,0xfd,0xff,0xff, - 0xf3,0xe3,0xe0,0xe3,0xe3,0xe2,0xe1,0xe0,0xe1,0xe6,0xed,0xf6,0xfd,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xfd,0xdd,0xb4,0x9e,0x95,0x91,0x90,0x8e,0x9a,0xbd,0xea, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfa,0xee,0xe4,0xe2,0xe2,0xe2,0xe3,0xe5,0xea,0xf5,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf3,0xdd,0xc1,0xaa,0xa1,0xa8,0xc8,0xf0,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xf0,0xe7,0xe5,0xe7,0xee,0xf6,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xfb,0xfb,0xfb,0xfb,0xfb,0xfa, - 0xfb,0xfc,0xfd,0xfc,0xf8,0xf7,0xfc,0xff,0xfb,0xed,0xde,0xd8,0xe0,0xf0,0xfd,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xfa,0xf6,0xf4,0xf5,0xfa,0xfe,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xef,0xe2,0xdb,0xda,0xda,0xda,0xda,0xda, - 0xda,0xda,0xda,0xda,0xd9,0xd9,0xda,0xdb,0xdf,0xe9,0xf5,0xfd,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfc,0xf8,0xf4,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf3, - 0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf6,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe5,0xc2,0xaf,0xa8,0xa3,0xa3,0xa3,0xa3,0xa3, - 0xa3,0xa3,0xa3,0xa3,0xa3,0xa2,0xa1,0xa3,0xaa,0xb9,0xd7,0xf8,0xff,0xfe,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xf3,0xe9,0xe4,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1, - 0xe1,0xe1,0xe1,0xe1,0xe1,0xe2,0xe5,0xeb,0xf7,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfd,0xe4,0xb8,0x9a,0x98,0x9a,0x96,0x96,0x97,0x97,0x97, - 0x97,0x97,0x97,0x97,0x97,0x97,0x96,0x98,0x9a,0x96,0xa8,0xd5,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfb,0xf0,0xe3,0xde,0xdf,0xde,0xdd,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc, - 0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdd,0xe7,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf7,0xc3,0x99,0x92,0x97,0x9a,0x9a,0x9b,0x9b,0x9a,0x9a, - 0x9a,0x9b,0x9b,0x9a,0x9a,0x9b,0x9a,0x9a,0x98,0x92,0x92,0xad,0xe2,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf5,0xe4,0xdb,0xdb,0xde,0xde,0xdd,0xdd,0xdd,0xdd,0xdd,0xdc,0xdc, - 0xdc,0xdd,0xdd,0xdd,0xdd,0xdc,0xda,0xd9,0xdc,0xeb,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf2,0xac,0x92,0xa0,0x9c,0x96,0x99,0x98,0x98,0x99,0x99, - 0x99,0x98,0x98,0x99,0x99,0x98,0x98,0x96,0x96,0x9e,0x99,0x9a,0xd2,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xf0,0xde,0xdc,0xde,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc, - 0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xdc,0xdd,0xda,0xe4,0xfb,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfd,0xfb,0xff,0xf2,0xae,0x94,0xa1,0x9d,0x97,0x9a,0x99,0x9a,0x9b,0x9b, - 0x9a,0x99,0x99,0x9a,0x9b,0x99,0x99,0x98,0x99,0xa0,0x9a,0x9c,0xd3,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfd,0xff,0xff,0xef,0xdc,0xdb,0xdd,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb, - 0xdb,0xdb,0xdb,0xdb,0xdb,0xda,0xdc,0xdd,0xd9,0xe3,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfe,0xfd,0xff,0xf7,0xc3,0x9c,0x97,0x9b,0x9d,0x9d,0x9d,0x9e,0x9e,0x9e, - 0x9e,0x9d,0x9d,0x9e,0x9e,0x9e,0x9c,0x9e,0x9d,0x96,0x95,0xb0,0xe3,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf4,0xe1,0xd8,0xd9,0xda,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xda,0xd9, - 0xda,0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xd9,0xdb,0xe9,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfc,0xe3,0xb9,0x9e,0x9b,0x9d,0x9b,0x9b,0x9b,0x9b,0x9b, - 0x9b,0x9b,0x9b,0x9b,0x9b,0x9b,0x9a,0x9c,0x9e,0x9a,0xaa,0xd5,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfb,0xee,0xe1,0xda,0xd9,0xd9,0xda,0xdb,0xdb,0xdb,0xdb,0xd9,0xd8, - 0xd9,0xdb,0xdb,0xdb,0xdb,0xdc,0xdb,0xdb,0xe5,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xe5,0xc3,0xb0,0xa9,0xa6,0xa6,0xa6,0xa6,0xa6, - 0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa8,0xae,0xbc,0xd8,0xf7,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xf1,0xe5,0xdf,0xde,0xde,0xde,0xde,0xde,0xde,0xde,0xde, - 0xde,0xde,0xde,0xde,0xde,0xde,0xe0,0xe8,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xee,0xe1,0xda,0xd9,0xd9,0xd9,0xd9,0xd9, - 0xd9,0xd9,0xd8,0xd8,0xd8,0xd8,0xd9,0xdb,0xde,0xe8,0xf5,0xfe,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xff,0xff,0xfb,0xf6,0xf2,0xf2,0xf2,0xf1,0xf1,0xf1,0xf1,0xf1,0xf1, - 0xf1,0xf1,0xf1,0xf1,0xf1,0xf1,0xf3,0xf8,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xfb,0xfb,0xfb,0xfb,0xfb,0xfb, - 0xfb,0xfc,0xfd,0xfb,0xf8,0xf8,0xfc,0xff,0xfb,0xf0,0xe3,0xe0,0xe7,0xf3,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfe,0xf9,0xf3,0xf2,0xf4,0xf9,0xfe,0xff,0xfd,0xfc,0xfd,0xfe,0xfe,0xfe,0xfe, - 0xfd,0xfd,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe3,0xca,0xb7,0xb0,0xb7,0xd3,0xf3,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xf8,0xea,0xde,0xdc,0xe0,0xe8,0xf2,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfd,0xfc,0xff,0xfe,0xe4,0xc0,0xad,0xa7,0xa6,0xa4,0xa3,0xad,0xca,0xee, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf6,0xe4,0xd8,0xd4,0xd4,0xd5,0xd6,0xd9,0xe2,0xf2,0xfe,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xfe,0xfb, - 0xfc,0xff,0xff,0xf7,0xe6,0xd0,0xb7,0xa3,0x9b,0x9e,0xa4,0xa6,0xa5,0x9e,0xa9,0xdb, - 0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xec,0xd2,0xcf,0xd3,0xd4,0xd3,0xd1,0xd0,0xd4,0xdd,0xe9,0xf4,0xfc,0xff,0xff,0xfe, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xfc,0xf4,0xdd,0xba,0xa2,0x9f,0xa6,0xa6,0xa5,0xa3,0xa2,0xab,0xa4,0xa0,0xd2, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe7,0xcc,0xcf,0xd5,0xd1,0xd3,0xd5,0xd5,0xd5,0xd1,0xd3,0xe0,0xef,0xf9,0xfd,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xf8,0xdc,0xba,0xaa,0xa6,0xa2,0xa1,0xa5,0xa7,0xa7,0xa4,0xa3,0xab,0xa4,0xa1,0xd4, - 0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff, - 0xe8,0xce,0xd0,0xd4,0xd0,0xd2,0xd4,0xd4,0xd4,0xd2,0xd2,0xd5,0xd8,0xe0,0xf0,0xfc, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xe4, - 0xcd,0xb6,0x9f,0x9b,0xa5,0xa9,0xa7,0xa3,0xa2,0xa3,0xa7,0xaa,0xa6,0xa1,0xb4,0xe3, - 0xff,0xff,0xfa,0xfc,0xfb,0xfb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfc,0xfd,0xfd,0xff,0xff, - 0xf0,0xd8,0xce,0xd0,0xd3,0xd2,0xd0,0xd0,0xd3,0xd6,0xd6,0xd4,0xd1,0xd4,0xdd,0xe7, - 0xf2,0xfd,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf2,0xd6,0xb4, - 0xa1,0xa2,0xa6,0xa6,0xa5,0xa4,0xa4,0xa6,0xa8,0xa7,0xa6,0xa7,0xa5,0xb3,0xd6,0xf6, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfa,0xea,0xd7,0xcf,0xd1,0xd1,0xd1,0xd2,0xd3,0xd3,0xd3,0xd4,0xd5,0xd5,0xd3,0xd2, - 0xdc,0xed,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xe0,0xbe,0xaa,0xa3, - 0xa2,0xa2,0xa5,0xa7,0xa6,0xa4,0xa4,0xa6,0xa9,0xaa,0xa5,0xa4,0xb8,0xdb,0xf7,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xec,0xd9,0xcf,0xd0,0xd2,0xd2,0xd1,0xd1,0xd1,0xd4,0xd5,0xd4,0xd3,0xd3, - 0xd3,0xd6,0xe0,0xf0,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xeb,0xbd,0x9f,0x9f,0xa6, - 0xa8,0xa6,0xa3,0xa3,0xa3,0xa7,0xaa,0xa6,0xa0,0xa6,0xb8,0xcb,0xe3,0xf7,0xff,0xff, - 0xfe,0xf8,0xe9,0xdc,0xdb,0xe2,0xf0,0xfd,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfc, - 0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf5,0xec,0xe7,0xe7,0xf1,0xfb,0xff, - 0xff,0xff,0xfb,0xf1,0xe4,0xd9,0xd0,0xcd,0xd0,0xd2,0xd2,0xd1,0xd1,0xd1,0xd4,0xd6, - 0xd4,0xd1,0xd1,0xe0,0xf5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xd2,0xa9,0x9f,0xa8,0xa5, - 0xa2,0xa5,0xa6,0xa9,0xa7,0xa4,0xa5,0xa9,0xae,0xbd,0xdd,0xfa,0xff,0xfd,0xfd,0xff, - 0xf9,0xde,0xbe,0xaf,0xad,0xb5,0xd0,0xf3,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf7,0xdf,0xcc,0xc8,0xcb,0xd6,0xe9,0xfb, - 0xff,0xfd,0xfe,0xff,0xfc,0xec,0xdb,0xd4,0xd1,0xd0,0xd0,0xd1,0xd1,0xd1,0xd2,0xd1, - 0xd3,0xd5,0xd2,0xd6,0xe9,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xeb,0xc3,0xa7,0xa2,0xa6,0xa5, - 0xa3,0xa3,0xa6,0xaa,0xa9,0xa5,0xa6,0xbb,0xdd,0xf3,0xfa,0xfe,0xff,0xff,0xfe,0xf9, - 0xdf,0xba,0xa9,0xad,0xae,0xac,0xb8,0xd6,0xf4,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf6,0xe1,0xce,0xc5,0xc7,0xca,0xc8,0xd3,0xeb, - 0xfc,0xfe,0xff,0xff,0xff,0xfd,0xf8,0xec,0xdb,0xd0,0xcf,0xd2,0xd2,0xd1,0xd0,0xd1, - 0xd3,0xd4,0xd2,0xd5,0xe2,0xf6,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xea,0xc2,0xa9,0xa5,0xa8,0xa8, - 0xa8,0xa9,0xa6,0xa1,0xa7,0xba,0xce,0xe5,0xfb,0xff,0xfe,0xfc,0xfc,0xff,0xff,0xe8, - 0xbf,0xaa,0xac,0xb4,0xb5,0xb1,0xb1,0xc1,0xe8,0xff,0xff,0xfd,0xff,0xfa,0xee,0xe4, - 0xe4,0xef,0xfb,0xff,0xfd,0xff,0xff,0xed,0xd0,0xc7,0xc8,0xcb,0xcb,0xc9,0xc9,0xd6, - 0xf0,0xff,0xff,0xfd,0xfd,0xff,0xff,0xfd,0xf1,0xe5,0xda,0xd0,0xce,0xd1,0xd3,0xd3, - 0xd3,0xd3,0xd1,0xd3,0xe1,0xf5,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xd3,0xab,0x9f,0xa6,0xa5, - 0xa1,0xa4,0xaa,0xb0,0xc2,0xe0,0xf8,0xff,0xfe,0xfd,0xff,0xff,0xfc,0xff,0xfc,0xd2, - 0xab,0xac,0xb3,0xaf,0xaf,0xb3,0xb3,0xbc,0xe4,0xff,0xff,0xfe,0xfa,0xe3,0xcb,0xc0, - 0xc1,0xce,0xe6,0xfa,0xfe,0xff,0xff,0xea,0xcb,0xc6,0xc9,0xc6,0xc6,0xcc,0xcb,0xca, - 0xe1,0xfd,0xff,0xfe,0xff,0xfe,0xfe,0xfe,0xff,0xfc,0xf0,0xdf,0xd5,0xd3,0xd1,0xcf, - 0xd1,0xd1,0xce,0xd4,0xe9,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf0,0xc2,0xa2,0xa1,0xa4, - 0xa1,0xa6,0xbe,0xe0,0xf6,0xfd,0xfe,0xff,0xfe,0xfe,0xfe,0xfe,0xff,0xfc,0xe1,0xbd, - 0xab,0xae,0xb2,0xaf,0xb0,0xb4,0xb6,0xc1,0xe7,0xff,0xff,0xfa,0xe7,0xc6,0xb7,0xbd, - 0xbd,0xba,0xca,0xe9,0xfb,0xff,0xff,0xec,0xce,0xc5,0xc7,0xc8,0xc7,0xc9,0xca,0xca, - 0xd5,0xeb,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfb,0xef,0xdd,0xd1,0xce, - 0xcf,0xce,0xcf,0xe0,0xf8,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe9,0xd2,0xc4,0xc1, - 0xc5,0xd3,0xe7,0xf8,0xff,0xff,0xfd,0xfc,0xfe,0xff,0xfe,0xfc,0xff,0xf4,0xc6,0xad, - 0xb0,0xb1,0xb0,0xb2,0xb2,0xaf,0xb8,0xd1,0xef,0xff,0xff,0xef,0xd0,0xba,0xb7,0xc0, - 0xc0,0xb9,0xc0,0xd7,0xf2,0xff,0xff,0xf3,0xda,0xc7,0xc4,0xc9,0xc9,0xc6,0xc9,0xcb, - 0xca,0xdb,0xf9,0xff,0xfd,0xfe,0xff,0xfe,0xfd,0xfe,0xff,0xff,0xfb,0xf3,0xe9,0xe1, - 0xde,0xe0,0xe8,0xf3,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf4,0xea, - 0xf4,0xff,0xff,0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xe0,0xb5,0xa7, - 0xb0,0xb2,0xaf,0xb2,0xb3,0xad,0xbe,0xe7,0xfb,0xff,0xff,0xe6,0xc2,0xbb,0xbd,0xbb, - 0xbc,0xc0,0xc1,0xcb,0xea,0xff,0xff,0xfc,0xeb,0xcc,0xc1,0xc9,0xc9,0xc6,0xc9,0xca, - 0xc6,0xd0,0xeb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xf9, - 0xf4,0xf9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xbf,0xae,0xae, - 0xb0,0xb1,0xb0,0xb2,0xb4,0xb5,0xcf,0xf7,0xff,0xff,0xff,0xe4,0xbe,0xba,0xbe,0xbb, - 0xbe,0xc2,0xbf,0xc4,0xe7,0xff,0xff,0xff,0xf9,0xd8,0xc5,0xc7,0xc8,0xc7,0xc8,0xc9, - 0xc9,0xca,0xd6,0xf0,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfe,0xff, - 0xfe,0xfc,0xfd,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xfd,0xd4,0xad,0xac,0xb4, - 0xb1,0xb0,0xb3,0xb1,0xb1,0xc6,0xe9,0xfe,0xfe,0xff,0xff,0xe5,0xbf,0xb9,0xbc,0xbc, - 0xbd,0xc0,0xbd,0xc4,0xe7,0xff,0xff,0xfe,0xfe,0xed,0xd1,0xc2,0xc5,0xc9,0xc7,0xc8, - 0xcb,0xc8,0xca,0xe3,0xfd,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfe, - 0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe5,0xc0,0xab,0xae,0xb3, - 0xb0,0xb0,0xb6,0xb2,0xb3,0xd8,0xff,0xff,0xfa,0xff,0xff,0xe4,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfa,0xff,0xff,0xe1,0xc4,0xc5,0xca,0xc7,0xc7, - 0xc9,0xc9,0xc9,0xd6,0xed,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf5,0xc8,0xae,0xaf,0xb1,0xb0, - 0xb1,0xb2,0xb3,0xb5,0xc6,0xe9,0xff,0xff,0xfa,0xff,0xff,0xe4,0xc0,0xba,0xbd,0xbc, - 0xbe,0xc1,0xbe,0xc5,0xe6,0xff,0xff,0xfb,0xff,0xff,0xee,0xd3,0xc7,0xc7,0xc8,0xc8, - 0xc7,0xc9,0xca,0xc9,0xda,0xf8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xe4,0xb7,0xa8,0xb1,0xb2,0xae, - 0xb3,0xb4,0xae,0xba,0xe1,0xfb,0xff,0xff,0xfc,0xff,0xff,0xe4,0xc0,0xba,0xbd,0xbc, - 0xbd,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfd,0xff,0xff,0xfc,0xe7,0xc9,0xc1,0xc9,0xc9, - 0xc6,0xc9,0xc9,0xc5,0xd0,0xed,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xec,0xc8,0xb1,0xae,0xb2,0xb2,0xaf, - 0xb2,0xb5,0xb2,0xc8,0xf5,0xff,0xfe,0xff,0xfe,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfe,0xff,0xfe,0xff,0xf6,0xd3,0xc3,0xc9,0xc9, - 0xc7,0xc8,0xc8,0xc9,0xcc,0xda,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xd9,0xaf,0xad,0xb4,0xb0,0xb0,0xb2, - 0xb0,0xb2,0xc2,0xe1,0xfc,0xff,0xff,0xff,0xfd,0xff,0xff,0xe5,0xc0,0xba,0xbe,0xbc, - 0xbc,0xbf,0xbe,0xc5,0xe6,0xff,0xff,0xfd,0xff,0xff,0xff,0xfc,0xe8,0xd0,0xc6,0xc7, - 0xc9,0xc7,0xc7,0xcb,0xc8,0xc9,0xe6,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf9,0xcd,0xa8,0xac,0xb4,0xae,0xae,0xb4, - 0xb0,0xb1,0xd4,0xfa,0xff,0xfc,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xfb,0xde,0xc4,0xc6, - 0xca,0xc6,0xc7,0xca,0xc5,0xc5,0xde,0xfa,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xee,0xc6,0xab,0xaf,0xb3,0xb0,0xb1,0xb2, - 0xb1,0xc1,0xe7,0xff,0xff,0xfc,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xff,0xeb,0xce,0xc5, - 0xc9,0xc9,0xc8,0xca,0xc8,0xc6,0xd9,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xc9,0xac,0xb0,0xb3,0xb2,0xb4,0xae, - 0xb8,0xdd,0xfa,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbe,0xbc, - 0xbc,0xbf,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xfb,0xe6,0xca, - 0xc4,0xca,0xc9,0xca,0xc7,0xc5,0xdb,0xf7,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfc,0xd5,0xae,0xab,0xb1,0xae,0xab,0xaf, - 0xcc,0xf4,0xff,0xfd,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbe,0xbc, - 0xbc,0xbf,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf7,0xd8, - 0xc4,0xc3,0xc5,0xc7,0xc3,0xc6,0xe3,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xeb,0xc8,0xb7,0xb5,0xb3,0xb5,0xca, - 0xeb,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xef, - 0xd7,0xc9,0xc9,0xcb,0xcc,0xd8,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf2,0xde,0xce,0xcd,0xdc,0xf1, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc0,0xba,0xbd,0xbb, - 0xbc,0xc0,0xbe,0xc5,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xf4,0xe5,0xdd,0xdd,0xe8,0xf6,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xef,0xef,0xfa,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc2,0xbc,0xbe,0xba, - 0xbb,0xc0,0xbf,0xc6,0xe7,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xf4,0xf4,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xea,0xc8,0xbb,0xbc,0xbe, - 0xbe,0xbe,0xbf,0xcb,0xeb,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf5,0xd8,0xbd,0xb8,0xc1, - 0xc1,0xba,0xc1,0xd9,0xf5,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xee,0xcf,0xbb,0xb9, - 0xb9,0xbc,0xd1,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xeb,0xd6,0xca, - 0xca,0xd7,0xec,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xf9,0xf6, - 0xf6,0xf9,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfb, - 0xfb,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf8,0xf2, - 0xf1,0xf6,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xea,0xc4,0xa3, - 0x9e,0xb2,0xd7,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xed,0xb8,0x8d,0x80, - 0x7d,0x83,0xa6,0xd9,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf9,0xcc,0x8c,0x79,0x8b, - 0x8b,0x7f,0x84,0xa9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfe, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe5,0xaf,0x88,0x81,0x87, - 0x87,0x86,0x7f,0x8b,0xcf,0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xfe,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xed,0xe8,0xf3,0xfe, - 0xff,0xfe,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa5,0x8c,0x87,0x81, - 0x81,0x89,0x81,0x82,0xc7,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xfb,0xe6,0xe5,0xf5,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xd7,0xbb,0xb5,0xc6,0xe7, - 0xfd,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xde,0xa5,0x88,0x84,0x85, - 0x85,0x89,0x7d,0x7e,0xc6,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff, - 0xee,0xcb,0xae,0xac,0xc6,0xe4,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xec,0xc0,0x9d,0x92,0x92,0x94,0xa9, - 0xd5,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xde,0xa4,0x86,0x84,0x85, - 0x84,0x88,0x7d,0x7e,0xc6,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xed, - 0xb3,0x83,0x7f,0x87,0x83,0x94,0xd3,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd0,0x93,0x85,0x8b,0x8c,0x88,0x86, - 0xaa,0xe7,0xff,0xfc,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x83,0x8a,0x80,0x7f,0xc5,0xff,0xff,0xf9,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf4,0xb8, - 0x81,0x71,0x76,0x7c,0x6e,0x6e,0xae,0xf3,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfb,0xbc,0x85,0x8c,0x95,0x8f,0x91,0x8a, - 0x92,0xbe,0xee,0xff,0xff,0xfd,0xff,0xff,0xfb,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x83,0x8a,0x80,0x7f,0xc5,0xff,0xff,0xf8,0xff,0xff,0xff,0xfe,0xfd,0xff,0xd6,0x88, - 0x72,0x83,0x7e,0x7c,0x7c,0x79,0x9b,0xd5,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf4,0xb8,0x87,0x8c,0x94,0x8d,0x8d,0x91, - 0x8b,0x96,0xd2,0xff,0xff,0xfa,0xff,0xff,0xfb,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc6,0xff,0xff,0xf8,0xff,0xff,0xfc,0xff,0xff,0xe4,0xaa,0x80, - 0x7a,0x7f,0x7b,0x7c,0x7c,0x7a,0x97,0xd0,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfd,0xc2,0x89,0x89,0x93,0x8c,0x8b,0x93, - 0x8b,0x87,0xb6,0xf0,0xff,0xfd,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xf8,0xff,0xff,0xf9,0xff,0xff,0xc4,0x83,0x7e, - 0x82,0x77,0x78,0x7e,0x74,0x70,0xa1,0xe4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xd3,0x98,0x8e,0x92,0x8e,0x8f,0x90, - 0x8d,0x8e,0x9b,0xc5,0xf7,0xff,0xfe,0xff,0xfd,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc6,0xff,0xff,0xf9,0xff,0xff,0xfe,0xfe,0xe6,0xa5,0x75,0x78, - 0x81,0x79,0x78,0x82,0x77,0x74,0xb5,0xfb,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xed,0xbf,0x97,0x8b,0x90,0x90,0x8b, - 0x8f,0x91,0x85,0xa1,0xe7,0xff,0xfd,0xfe,0xfe,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xfb,0xff,0xfd,0xff,0xf4,0xb3,0x85,0x7d,0x7b, - 0x7a,0x7b,0x7a,0x7d,0x7c,0x90,0xd1,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfc,0xff,0xe3,0xa1,0x86,0x92,0x91,0x8c, - 0x91,0x90,0x84,0x92,0xc6,0xf2,0xff,0xff,0xfb,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xfa,0xff,0xfd,0xff,0xdd,0x92,0x76,0x82,0x7e, - 0x76,0x7d,0x7e,0x71,0x81,0xbe,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfc,0xff,0xf5,0xbc,0x94,0x8e,0x8f,0x8f, - 0x8f,0x8e,0x8f,0x8e,0x9e,0xd6,0xff,0xff,0xf8,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc4,0xff,0xff,0xf7,0xff,0xff,0xe6,0xb0,0x84,0x78,0x7d,0x7c, - 0x78,0x7d,0x7e,0x72,0x95,0xe6,0xff,0xfc,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xe8,0xb1,0x89,0x8c,0x93, - 0x8d,0x8c,0x92,0x89,0x87,0xbc,0xf8,0xff,0xf8,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xf4,0xff,0xff,0xc6,0x83,0x7b,0x81,0x79,0x7a, - 0x7d,0x7a,0x79,0x8d,0xc3,0xf7,0xff,0xfd,0xfe,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfb,0xfd,0xfe, - 0xfc,0xfa,0xfc,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xcc,0x93,0x8d,0x93, - 0x8d,0x8e,0x90,0x8b,0x88,0x9f,0xd1,0xfa,0xff,0xff,0xff,0xdf,0xa4,0x87,0x84,0x84, - 0x84,0x89,0x7d,0x7d,0xc5,0xff,0xff,0xf8,0xff,0xea,0xa6,0x76,0x7a,0x82,0x78,0x79, - 0x81,0x77,0x75,0xaf,0xf1,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xf9,0xfa, - 0xfe,0xfe,0xfb,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xb5,0x92,0x8b, - 0x90,0x90,0x8d,0x91,0x91,0x88,0xaa,0xee,0xff,0xff,0xff,0xdf,0xa4,0x8a,0x86,0x84, - 0x82,0x89,0x7f,0x7d,0xc6,0xff,0xff,0xff,0xf6,0xba,0x88,0x7e,0x7d,0x7c,0x7a,0x7a, - 0x7e,0x79,0x88,0xcd,0xff,0xff,0xfa,0xff,0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf6,0xee, - 0xf5,0xff,0xff,0xfc,0xfc,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xde,0x9e,0x86, - 0x92,0x92,0x8c,0x91,0x91,0x85,0x96,0xcd,0xf4,0xff,0xff,0xe2,0xac,0x8d,0x85,0x83, - 0x83,0x88,0x81,0x85,0xcb,0xff,0xff,0xfe,0xe1,0x95,0x76,0x82,0x7e,0x78,0x7e,0x7d, - 0x75,0x82,0xb5,0xeb,0xff,0xff,0xfd,0xfe,0xff,0xff,0xfe,0xfb,0xfc,0xff,0xff,0xf9, - 0xed,0xeb,0xf7,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfd,0xea,0xd1,0xbf,0xba, - 0xc1,0xd0,0xe4,0xf6,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfc,0xff,0xf4,0xb9,0x94, - 0x91,0x8e,0x8c,0x8f,0x8f,0x8c,0x8f,0xa9,0xdf,0xff,0xff,0xee,0xc0,0x89,0x7a,0x8a, - 0x8b,0x82,0x80,0x98,0xd8,0xff,0xff,0xef,0xc1,0x85,0x74,0x81,0x7f,0x76,0x7d,0x7f, - 0x72,0x90,0xdf,0xff,0xfa,0xfd,0xff,0xfe,0xfb,0xfb,0xfe,0xff,0xfc,0xeb,0xcd,0xb4, - 0xa8,0xa7,0xb3,0xd5,0xf9,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf3,0xc0,0x99,0x93,0x94, - 0x90,0x95,0xb1,0xd8,0xf2,0xfc,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfd,0xe2,0xae, - 0x8c,0x8e,0x92,0x8c,0x8b,0x91,0x8b,0x93,0xd2,0xff,0xff,0xfb,0xe0,0x9f,0x7e,0x86, - 0x85,0x7d,0x91,0xc1,0xec,0xff,0xff,0xde,0xa4,0x83,0x7c,0x7c,0x7a,0x7a,0x7c,0x7a, - 0x83,0xb4,0xf4,0xff,0xfc,0xfd,0xff,0xfe,0xff,0xff,0xff,0xf7,0xde,0xae,0x7f,0x6b, - 0x70,0x70,0x6a,0x91,0xdb,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xda,0xa5,0x91,0x99,0x98, - 0x92,0x95,0x9a,0xa0,0xb4,0xd9,0xf6,0xff,0xff,0xfc,0xfe,0xff,0xfb,0xff,0xff,0xc8, - 0x91,0x90,0x95,0x8b,0x8b,0x92,0x89,0x8b,0xcc,0xff,0xff,0xfc,0xfa,0xd8,0xa9,0x8d, - 0x86,0x94,0xbf,0xee,0xfd,0xff,0xff,0xda,0x9b,0x81,0x7d,0x78,0x77,0x7f,0x78,0x71, - 0xa4,0xe8,0xff,0xfe,0xff,0xfd,0xfd,0xfc,0xfe,0xfe,0xe3,0xaf,0x8a,0x7d,0x74,0x6f, - 0x72,0x73,0x6e,0x7b,0xac,0xe7,0xff,0xff,0xfd,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf1,0xc8,0xa2,0x97,0x9e,0x9e, - 0x9d,0x9e,0x99,0x8f,0x95,0xad,0xc6,0xdf,0xf9,0xff,0xfe,0xfd,0xfb,0xff,0xff,0xe6, - 0xb2,0x8e,0x8b,0x94,0x94,0x93,0x8a,0x91,0xd0,0xff,0xff,0xfa,0xff,0xfa,0xe2,0xcb, - 0xc7,0xd5,0xed,0xfd,0xfe,0xff,0xff,0xe2,0xa6,0x7e,0x79,0x81,0x81,0x7d,0x75,0x82, - 0xc8,0xff,0xff,0xf7,0xfb,0xfe,0xff,0xfb,0xe2,0xc2,0x9f,0x7c,0x6c,0x71,0x7a,0x7d, - 0x7b,0x79,0x7a,0x7b,0x93,0xd5,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf1,0xc9,0xa1,0x95,0x9c,0x9c, - 0x98,0x97,0x99,0x9b,0x99,0x95,0x94,0xa9,0xd0,0xee,0xfa,0xfe,0xff,0xfe,0xfd,0xfe, - 0xdf,0xa6,0x8a,0x8e,0x8c,0x89,0x94,0xb4,0xe5,0xff,0xff,0xfd,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xf4,0xc6,0x8c,0x73,0x79,0x79,0x73,0x86,0xb6, - 0xeb,0xff,0xff,0xfe,0xff,0xfb,0xf1,0xd6,0xa6,0x7b,0x6e,0x76,0x7a,0x77,0x74,0x73, - 0x74,0x76,0x74,0x76,0x93,0xd7,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xd9,0xa2,0x93,0xa1,0x9d, - 0x98,0x9b,0x9a,0x9a,0x99,0x97,0x95,0x97,0x9f,0xb0,0xd3,0xf7,0xff,0xfd,0xfc,0xff, - 0xfa,0xda,0xae,0x91,0x8a,0x91,0xb4,0xe8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfe,0xfd,0xff,0xee,0xb7,0x88,0x79,0x78,0x88,0xb8,0xee, - 0xff,0xfc,0xfd,0xff,0xfa,0xd4,0xa4,0x85,0x7a,0x73,0x71,0x75,0x79,0x78,0x75,0x72, - 0x76,0x79,0x71,0x78,0xa7,0xe4,0xff,0xff,0xfd,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xef,0xbb,0x98,0x95,0x9c, - 0xa1,0x9f,0x9a,0x98,0x98,0x9b,0x9e,0x99,0x90,0x94,0xa8,0xc0,0xdb,0xf2,0xfe,0xff, - 0xff,0xfa,0xe4,0xcc,0xc7,0xcf,0xe3,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xf9, - 0xf9,0xfb,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe8,0xcf,0xbf,0xbf,0xcf,0xe9,0xfc, - 0xff,0xff,0xfb,0xe0,0xb8,0x97,0x79,0x6a,0x71,0x7c,0x7b,0x75,0x72,0x73,0x78,0x7b, - 0x7a,0x71,0x6c,0x91,0xd4,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe4,0xbc,0xa0,0x9b, - 0x9b,0x9a,0x9c,0x9e,0x9d,0x9a,0x98,0x98,0x9a,0x9c,0x96,0x91,0xa3,0xce,0xf3,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xff,0xf8,0xd6,0x9c,0x74,0x70,0x79,0x7a,0x76,0x74,0x74,0x76,0x78,0x77,0x75,0x72, - 0x72,0x7a,0x93,0xc6,0xf8,0xff,0xfd,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf3,0xd6,0xaf, - 0x99,0x99,0x9d,0x9e,0x9d,0x9a,0x99,0x99,0x9c,0x9b,0x98,0x95,0x94,0xa3,0xc6,0xee, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xf9,0xd2,0x92,0x72,0x74,0x75,0x76,0x79,0x77,0x74,0x73,0x75,0x78,0x78,0x73,0x6e, - 0x80,0xae,0xe0,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xe0, - 0xc7,0xb1,0x9b,0x93,0x9a,0x9f,0x9f,0x9b,0x97,0x96,0x9a,0x9d,0x9a,0x91,0x9d,0xd5, - 0xff,0xff,0xf9,0xfa,0xfa,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfa,0xf9,0xf8,0xf8,0xff,0xff, - 0xe2,0xa1,0x71,0x6e,0x7d,0x79,0x72,0x71,0x73,0x78,0x7c,0x78,0x6b,0x6d,0x87,0xa7, - 0xc8,0xe9,0xfc,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xf8,0xdd,0xba,0xa5,0x9f,0x9a,0x99,0x9d,0x9c,0x9c,0x99,0x97,0x9f,0x95,0x8f,0xc5, - 0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff, - 0xc8,0x81,0x77,0x7b,0x74,0x74,0x77,0x79,0x78,0x73,0x71,0x76,0x7b,0x90,0xbd,0xec, - 0xff,0xff,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff, - 0xfe,0xfc,0xf5,0xdf,0xba,0x9c,0x96,0x9d,0x9e,0x9e,0x9b,0x99,0x9f,0x97,0x91,0xc2, - 0xfb,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff, - 0xc5,0x7b,0x75,0x7c,0x73,0x75,0x78,0x7a,0x79,0x6f,0x6e,0x8a,0xbd,0xe6,0xf7,0xfd, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfe,0xfb, - 0xfc,0xff,0xff,0xf9,0xe6,0xcd,0xb4,0xa1,0x95,0x95,0x9b,0x9d,0x9e,0x94,0x98,0xcd, - 0xff,0xff,0xfb,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xfa,0xff,0xff, - 0xd4,0x8c,0x6f,0x72,0x78,0x77,0x70,0x6a,0x6f,0x8b,0xaf,0xce,0xee,0xff,0xff,0xfa, - 0xf9,0xfd,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfd,0xfc,0xff,0xff,0xe8,0xc3,0xaa,0xa1,0x9e,0x9b,0x9c,0xa3,0xb9,0xe5, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xed,0xb9,0x86,0x72,0x76,0x76,0x77,0x7f,0x96,0xca,0xf7,0xff,0xfd,0xfd,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe5,0xcc,0xb5,0xa8,0xad,0xc9,0xec,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xee,0xbe,0x96,0x89,0x8d,0xa4,0xc9,0xeb,0xfc,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfc,0xfc,0xfb,0xfb,0xfb, - 0xfb,0xfc,0xfd,0xfc,0xf9,0xf8,0xfb,0xff,0xfe,0xf1,0xe1,0xda,0xdf,0xee,0xfd,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xee,0xd9,0xcc,0xd0,0xe3,0xf6,0xfe,0xfe,0xfb,0xfa,0xfc,0xff,0xff,0xfe, - 0xfd,0xfd,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe9,0xe0,0xdf,0xdf,0xdf,0xdf,0xdf, - 0xdf,0xdf,0xde,0xdd,0xdd,0xdd,0xdd,0xdf,0xe3,0xe9,0xf4,0xfe,0xff,0xfd,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfb,0xfa,0xfb,0xfa,0xf9,0xf8,0xf7,0xf6,0xf4,0xf4,0xf4,0xf4,0xf4, - 0xf4,0xf5,0xf4,0xf4,0xf5,0xf5,0xf7,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xee,0xd2,0xbe,0xb3,0xaf,0xaf,0xaf,0xaf,0xaf, - 0xaf,0xaf,0xad,0xac,0xac,0xac,0xaa,0xac,0xb4,0xbe,0xd7,0xf6,0xff,0xfe,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfa,0xf2,0xea,0xe7,0xe7,0xe7,0xe6,0xe6,0xe6,0xe6,0xe6, - 0xe6,0xe5,0xe6,0xe6,0xe5,0xe6,0xe9,0xee,0xf7,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xc9,0xa8,0xa5,0xa7,0xa3,0xa4,0xa4,0xa3,0xa3, - 0xa3,0xa3,0xa3,0xa3,0xa3,0xa3,0xa1,0xa3,0xa5,0xa4,0xaf,0xd0,0xf2,0xff,0xff,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xea,0xe3,0xe4,0xe3,0xe2,0xe2,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe2,0xe3,0xe3,0xe2,0xe2,0xe2,0xe3,0xe9,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd7,0xad,0x9c,0xa2,0xa8,0xa7,0xa7,0xa7,0xa6,0xa6, - 0xa6,0xa6,0xa6,0xa7,0xa7,0xa7,0xa7,0xa7,0xa5,0xa2,0x9f,0xae,0xde,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfa,0xec,0xe1,0xde,0xe2,0xe3,0xe2,0xe2,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe2,0xe1,0xe0,0xeb,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xfe,0xff,0xfe,0xfc,0xff,0xf5,0xbf,0xa5,0xa9,0xa6,0xa3,0xa4,0xa4,0xa4,0xa4,0xa5, - 0xa5,0xa4,0xa4,0xa4,0xa4,0xa4,0xa5,0xa3,0xa3,0xab,0xa3,0x9f,0xd2,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf5,0xe6,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe2,0xe2,0xe2,0xe1,0xe3,0xe4,0xdf,0xe6,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfe,0xfc,0xff,0xf5,0xbf,0xa5,0xaa,0xa8,0xa4,0xa5,0xa5,0xa5,0xa5,0xa5, - 0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa6,0xa4,0xa4,0xac,0xa3,0x9f,0xd2,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf5,0xe6,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe2,0xe2,0xe2,0xe1,0xe3,0xe4,0xdf,0xe5,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd3,0xac,0x9e,0xa3,0xa8,0xa8,0xa8,0xaa,0xa9,0xa7, - 0xa7,0xa8,0xa9,0xa9,0xa9,0xa9,0xa9,0xa9,0xa7,0xa6,0xa1,0xad,0xdd,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf9,0xeb,0xe0,0xdf,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe3,0xe3,0xe3,0xe3,0xe2,0xe2,0xe2,0xe0,0xea,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xee,0xc7,0xa8,0xa6,0xa8,0xa5,0xa6,0xa8,0xa7,0xa6, - 0xa7,0xa8,0xa8,0xa8,0xa8,0xa8,0xa7,0xa8,0xa9,0xa8,0xb1,0xd0,0xf1,0xff,0xff,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xf5,0xe7,0xe1,0xe2,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1, - 0xe1,0xe2,0xe2,0xe1,0xe2,0xe2,0xe2,0xe2,0xe8,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xec,0xd0,0xbd,0xb3,0xb0,0xb0,0xb0,0xb0,0xb2, - 0xb2,0xb2,0xb2,0xb2,0xb2,0xb2,0xb2,0xb2,0xb6,0xbe,0xd6,0xf4,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xf6,0xec,0xe6,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe4,0xe3,0xe3,0xe4,0xe4,0xe5,0xeb,0xf5,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf5,0xe7,0xde,0xdd,0xdd,0xdd,0xdd,0xde, - 0xde,0xde,0xde,0xde,0xde,0xde,0xde,0xdf,0xe1,0xe7,0xf2,0xfe,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfd,0xf8,0xf4,0xf3,0xf3,0xf3,0xf2,0xf2,0xf3,0xf3,0xf3, - 0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf4,0xf8,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfc,0xfb,0xfb,0xfb,0xfb, - 0xfb,0xfc,0xfd,0xfd,0xfa,0xf9,0xfc,0xff,0xfe,0xf6,0xeb,0xe5,0xe9,0xf4,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfc,0xf8,0xf5,0xf6,0xfa,0xfe,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xea,0xd9,0xc6,0xba,0xbf,0xd8,0xf3,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xf1,0xe7,0xe4,0xe6,0xec,0xf5,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xff,0xef,0xcf,0xba,0xb3,0xb0,0xad,0xae,0xb7,0xca,0xec, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfb,0xee,0xe1,0xdc,0xdc,0xdd,0xde,0xe2,0xe9,0xf4,0xfd,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfc, - 0xfc,0xff,0xff,0xfc,0xef,0xdb,0xc5,0xb3,0xa7,0xa8,0xae,0xb1,0xb1,0xab,0xaf,0xda, - 0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf4,0xe1,0xd9,0xda,0xdc,0xdb,0xda,0xda,0xdd,0xe4,0xed,0xf4,0xfb,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff, - 0xff,0xfe,0xf9,0xe8,0xca,0xb0,0xa9,0xae,0xb0,0xb2,0xb1,0xaf,0xb4,0xaf,0xad,0xd2, - 0xfc,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xef,0xdc,0xdb,0xdd,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdd,0xe3,0xf0,0xfa,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff, - 0xfc,0xe9,0xcc,0xb8,0xb1,0xad,0xad,0xb0,0xb2,0xb3,0xb0,0xaf,0xb5,0xb0,0xae,0xd4, - 0xfe,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xf0,0xdc,0xd9,0xdc,0xdb,0xdb,0xdb,0xdc,0xdc,0xdb,0xdb,0xdd,0xdf,0xe5,0xf1,0xfb, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xeb, - 0xd7,0xc3,0xb0,0xa8,0xad,0xb3,0xb3,0xb0,0xaf,0xaf,0xb1,0xb4,0xb5,0xaf,0xb6,0xde, - 0xff,0xff,0xfb,0xfd,0xfc,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd,0xfd,0xfd,0xff,0xff, - 0xf7,0xe4,0xd7,0xd7,0xdc,0xdb,0xd9,0xda,0xdb,0xdc,0xdd,0xdd,0xda,0xdb,0xe2,0xea, - 0xf3,0xfc,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf7,0xe2,0xc3, - 0xad,0xab,0xb1,0xb3,0xb0,0xb0,0xb1,0xb1,0xb2,0xb2,0xb3,0xb2,0xb3,0xbb,0xd0,0xef, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xf2,0xe0,0xd7,0xd8,0xd9,0xda,0xdc,0xdb,0xdb,0xdb,0xdc,0xdd,0xdd,0xdb,0xdb, - 0xe1,0xed,0xf8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xee,0xce,0xb4,0xae, - 0xad,0xad,0xb1,0xb3,0xb2,0xb0,0xb0,0xb1,0xb2,0xb5,0xb3,0xb1,0xbb,0xd7,0xf3,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xff,0xfd,0xf2,0xe2,0xd7,0xd7,0xdb,0xdc,0xdb,0xdb,0xdb,0xdb,0xdc,0xdd,0xdb,0xda, - 0xdb,0xde,0xe4,0xf0,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf3,0xcd,0xaf,0xa9,0xae, - 0xb4,0xb3,0xb0,0xae,0xaf,0xb2,0xb5,0xb4,0xaf,0xb0,0xbd,0xce,0xe1,0xf2,0xfe,0xff, - 0xff,0xfd,0xf1,0xe2,0xe0,0xe6,0xf1,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xf9,0xf1,0xec,0xec,0xf2,0xfa,0xff, - 0xff,0xff,0xfd,0xf5,0xe9,0xe1,0xda,0xd7,0xd9,0xdb,0xdb,0xda,0xda,0xda,0xdc,0xdd, - 0xdd,0xdb,0xd9,0xe3,0xf5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xe1,0xb5,0xa9,0xb4,0xb3, - 0xae,0xb1,0xb2,0xb2,0xb2,0xb1,0xb1,0xb4,0xb9,0xc4,0xdb,0xf5,0xff,0xfe,0xfd,0xff, - 0xfc,0xea,0xd0,0xbe,0xb9,0xc0,0xd6,0xf4,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfa,0xe8,0xd9,0xd3,0xd3,0xda,0xeb,0xfa, - 0xff,0xfe,0xfe,0xff,0xfd,0xf2,0xe5,0xde,0xda,0xd8,0xd9,0xdb,0xdc,0xdb,0xdb,0xdb, - 0xdb,0xdc,0xdb,0xde,0xea,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf4,0xd5,0xb5,0xac,0xb2,0xb1, - 0xae,0xb0,0xb2,0xb3,0xb4,0xb3,0xb2,0xbf,0xda,0xf1,0xfb,0xfe,0xff,0xfe,0xfe,0xfe, - 0xeb,0xc9,0xb9,0xbb,0xba,0xba,0xc4,0xd8,0xf3,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfc,0xed,0xd7,0xce,0xd1,0xd2,0xd2,0xd9,0xe8, - 0xf9,0xff,0xff,0xff,0xff,0xfe,0xfa,0xf2,0xe5,0xda,0xd7,0xdb,0xdd,0xdc,0xdb,0xdb, - 0xdb,0xdb,0xdb,0xdd,0xe4,0xf5,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf3,0xd3,0xb6,0xaf,0xb4,0xb3, - 0xb3,0xb5,0xb3,0xad,0xb0,0xc1,0xd3,0xe5,0xf8,0xff,0xff,0xfe,0xfc,0xff,0xff,0xef, - 0xcf,0xb9,0xb7,0xbd,0xbf,0xc0,0xbd,0xc4,0xe6,0xff,0xff,0xfc,0xff,0xfe,0xf4,0xea, - 0xea,0xf0,0xf9,0xff,0xff,0xff,0xff,0xf5,0xe0,0xd1,0xce,0xd3,0xd6,0xd4,0xd1,0xd8, - 0xee,0xff,0xff,0xfd,0xfe,0xff,0xff,0xfe,0xf6,0xed,0xe3,0xda,0xd7,0xd9,0xdc,0xdc, - 0xdc,0xdc,0xdc,0xdc,0xe3,0xf4,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xe1,0xb9,0xaa,0xb1,0xb2, - 0xae,0xb0,0xb3,0xb8,0xc6,0xdf,0xf7,0xff,0xff,0xfd,0xff,0xff,0xfc,0xff,0xff,0xdd, - 0xba,0xba,0xbe,0xba,0xbb,0xc1,0xbd,0xbf,0xe3,0xff,0xff,0xfd,0xfd,0xef,0xd9,0xcd, - 0xcd,0xd5,0xe7,0xfa,0xff,0xff,0xff,0xf1,0xda,0xd2,0xd2,0xd1,0xd2,0xd5,0xd3,0xd2, - 0xe3,0xf8,0xff,0xfe,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf5,0xe6,0xdd,0xda,0xd8,0xd8, - 0xdb,0xdb,0xd8,0xdc,0xe9,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf5,0xd0,0xb1,0xab,0xaf, - 0xad,0xb0,0xc3,0xe0,0xf5,0xfd,0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xfe,0xed,0xcd, - 0xb8,0xba,0xbe,0xbb,0xbd,0xc2,0xbe,0xc2,0xe5,0xff,0xff,0xfd,0xf1,0xd3,0xc4,0xc8, - 0xc8,0xc7,0xd2,0xe7,0xf8,0xff,0xff,0xf2,0xdd,0xd3,0xd1,0xd1,0xd1,0xd3,0xd4,0xd3, - 0xda,0xeb,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf4,0xe6,0xda,0xd7, - 0xda,0xda,0xd8,0xe2,0xf6,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xef,0xdc,0xcd,0xc7, - 0xcb,0xd7,0xe7,0xf8,0xff,0xff,0xfd,0xfc,0xfe,0xff,0xfe,0xfd,0xff,0xf8,0xd3,0xbd, - 0xbc,0xbb,0xbb,0xbe,0xbf,0xbf,0xc1,0xcf,0xed,0xff,0xff,0xf8,0xe2,0xc6,0xc1,0xcb, - 0xca,0xc7,0xca,0xd5,0xef,0xff,0xff,0xf8,0xe8,0xd2,0xcd,0xd2,0xd2,0xcf,0xd2,0xd4, - 0xd3,0xde,0xf7,0xff,0xfd,0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xf8,0xef,0xe8, - 0xe5,0xe6,0xeb,0xf4,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xec, - 0xf3,0xff,0xff,0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xea,0xc2,0xb4, - 0xbd,0xbf,0xbb,0xbe,0xc0,0xbb,0xc5,0xe4,0xf9,0xff,0xff,0xf1,0xd7,0xc7,0xc6,0xc8, - 0xc8,0xca,0xca,0xce,0xeb,0xff,0xff,0xff,0xf4,0xd7,0xcc,0xd3,0xd2,0xcf,0xd2,0xd4, - 0xd2,0xd7,0xea,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfb, - 0xf6,0xf8,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf2,0xd4,0xbd,0xb9, - 0xbd,0xbd,0xbd,0xc0,0xc0,0xbc,0xcf,0xf5,0xff,0xff,0xff,0xf0,0xd3,0xc7,0xc6,0xc7, - 0xc7,0xcb,0xc9,0xca,0xe8,0xff,0xff,0xff,0xfc,0xe4,0xd3,0xd1,0xd1,0xd1,0xd1,0xd2, - 0xd5,0xd3,0xd8,0xef,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfc,0xfe,0xff, - 0xfe,0xfc,0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe2,0xbf,0xba,0xbf, - 0xbc,0xbc,0xbf,0xbf,0xbe,0xc7,0xe2,0xfc,0xff,0xff,0xff,0xf0,0xd2,0xc5,0xc5,0xc7, - 0xc8,0xc9,0xc7,0xca,0xe8,0xff,0xff,0xfd,0xff,0xf6,0xdf,0xcf,0xd0,0xd3,0xd0,0xd1, - 0xd5,0xd3,0xd1,0xe4,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf4,0xd1,0xb6,0xb8,0xbf, - 0xbc,0xbc,0xc0,0xbd,0xbd,0xd7,0xf8,0xff,0xfd,0xff,0xff,0xef,0xd3,0xc6,0xc6,0xc7, - 0xc7,0xca,0xc8,0xca,0xe7,0xff,0xff,0xfb,0xff,0xff,0xea,0xd2,0xd0,0xd3,0xd0,0xd0, - 0xd2,0xd3,0xd3,0xda,0xec,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd9,0xc0,0xba,0xbb,0xbc, - 0xbd,0xbe,0xc0,0xbf,0xc8,0xe8,0xff,0xff,0xfb,0xff,0xff,0xee,0xd3,0xc6,0xc6,0xc8, - 0xc8,0xca,0xc8,0xca,0xe7,0xff,0xff,0xfc,0xff,0xff,0xf5,0xe1,0xd3,0xd0,0xd1,0xd1, - 0xd1,0xd4,0xd5,0xd1,0xdc,0xf8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf0,0xc7,0xb6,0xbe,0xbe,0xbb, - 0xbe,0xc0,0xbb,0xc2,0xdd,0xf7,0xff,0xff,0xfd,0xff,0xff,0xef,0xd3,0xc6,0xc6,0xc8, - 0xc8,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfd,0xff,0xff,0xff,0xf1,0xd6,0xcd,0xd2,0xd2, - 0xd0,0xd3,0xd4,0xd0,0xd7,0xed,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf5,0xd9,0xbf,0xb8,0xbd,0xbe,0xbb, - 0xbe,0xc0,0xbb,0xcb,0xf1,0xff,0xfe,0xff,0xfe,0xff,0xff,0xef,0xd3,0xc6,0xc6,0xc8, - 0xc8,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfd,0xff,0xfe,0xff,0xfa,0xe0,0xd1,0xd2,0xd1, - 0xd0,0xd1,0xd3,0xd4,0xd5,0xdd,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc1,0xba,0xbe,0xbc,0xbc,0xbe, - 0xbf,0xbe,0xc6,0xe0,0xfb,0xff,0xfe,0xff,0xfe,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfd,0xff,0xff,0xff,0xfe,0xf2,0xdd,0xcf,0xd0, - 0xd2,0xd0,0xd0,0xd4,0xd2,0xd3,0xe8,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfe,0xda,0xb6,0xb7,0xbf,0xbc,0xbb,0xc0, - 0xbd,0xbb,0xd4,0xf6,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xca,0xc7,0xc9,0xe7,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xff,0xe8,0xd1,0xd1, - 0xd3,0xcf,0xcf,0xd3,0xd1,0xd0,0xe0,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf8,0xd4,0xb7,0xba,0xc0,0xbc,0xbd,0xc1, - 0xbe,0xc2,0xe3,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xf4,0xde,0xd1, - 0xd0,0xd2,0xd1,0xd2,0xd3,0xd4,0xde,0xf0,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfc,0xd7,0xb8,0xbc,0xc0,0xbd,0xc0,0xbe, - 0xc2,0xd8,0xf3,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xfe,0xff,0xef,0xd3, - 0xcd,0xd3,0xd2,0xd1,0xd2,0xd3,0xdf,0xf2,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xbb,0xb7,0xbd,0xbd,0xbc,0xba, - 0xcc,0xef,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xfe,0xff,0xfa,0xe3, - 0xd1,0xcd,0xcf,0xd1,0xce,0xcf,0xe5,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf0,0xd3,0xc3,0xc0,0xc1,0xc2,0xcb, - 0xe3,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7, - 0xe3,0xd2,0xd2,0xd4,0xd3,0xdb,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe4,0xd5,0xd3,0xde,0xef, - 0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xca,0xc6,0xc8,0xe7,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xf7,0xeb,0xe3,0xe1,0xe8,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xf1,0xf0,0xf8,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xef,0xd4,0xc9,0xc8,0xc7, - 0xc6,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xf6,0xf4,0xf9,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf4,0xda,0xc8,0xc6,0xca, - 0xc9,0xc9,0xc9,0xcf,0xeb,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xe7,0xca,0xc2,0xcb, - 0xcb,0xc6,0xc9,0xd9,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf5,0xdc,0xc8,0xc4, - 0xc4,0xc5,0xd3,0xec,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf4,0xe0,0xd2, - 0xd2,0xd9,0xe9,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xf7, - 0xf7,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfb, - 0xfb,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf8,0xf2, - 0xf1,0xf6,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xea,0xc4,0xa3, - 0x9e,0xb2,0xd7,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xed,0xb8,0x8d,0x80, - 0x7d,0x83,0xa6,0xd9,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf9,0xcc,0x8c,0x79,0x8b, - 0x8b,0x7f,0x84,0xa9,0xe2,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xff,0xff,0xfe, - 0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe5,0xaf,0x88,0x81,0x87, - 0x87,0x86,0x7f,0x8b,0xcf,0xff,0xff,0xfa,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xfe,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xed,0xe8,0xf3,0xfe, - 0xff,0xfe,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa5,0x8c,0x87,0x81, - 0x81,0x89,0x81,0x82,0xc7,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xfb,0xe6,0xe5,0xf5,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xd7,0xbb,0xb5,0xc6,0xe7, - 0xfd,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff,0xfc,0xff,0xff,0xde,0xa5,0x88,0x84,0x85, - 0x85,0x89,0x7d,0x7e,0xc6,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xff, - 0xee,0xcb,0xae,0xac,0xc6,0xe4,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xec,0xc0,0x9d,0x92,0x92,0x94,0xa9, - 0xd5,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xde,0xa4,0x86,0x84,0x85, - 0x84,0x88,0x7d,0x7e,0xc6,0xff,0xff,0xf9,0xff,0xff,0xfe,0xff,0xff,0xfe,0xff,0xed, - 0xb3,0x83,0x7f,0x87,0x83,0x94,0xd3,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xd0,0x93,0x85,0x8b,0x8c,0x88,0x86, - 0xaa,0xe7,0xff,0xfc,0xfe,0xff,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x83,0x8a,0x80,0x7f,0xc5,0xff,0xff,0xf9,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf4,0xb8, - 0x81,0x71,0x76,0x7c,0x6e,0x6e,0xae,0xf3,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfb,0xbc,0x85,0x8c,0x95,0x8f,0x91,0x8a, - 0x92,0xbe,0xee,0xff,0xff,0xfd,0xff,0xff,0xfb,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x83,0x8a,0x80,0x7f,0xc5,0xff,0xff,0xf8,0xff,0xff,0xff,0xfe,0xfd,0xff,0xd6,0x88, - 0x72,0x83,0x7e,0x7c,0x7c,0x79,0x9b,0xd5,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xf4,0xb8,0x87,0x8c,0x94,0x8d,0x8d,0x91, - 0x8b,0x96,0xd2,0xff,0xff,0xfa,0xff,0xff,0xfb,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc6,0xff,0xff,0xf8,0xff,0xff,0xfc,0xff,0xff,0xe4,0xaa,0x80, - 0x7a,0x7f,0x7b,0x7c,0x7c,0x7a,0x97,0xd0,0xfa,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xff,0xfd,0xc2,0x89,0x89,0x93,0x8c,0x8b,0x93, - 0x8b,0x87,0xb6,0xf0,0xff,0xfd,0xff,0xff,0xfc,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xf8,0xff,0xff,0xf9,0xff,0xff,0xc4,0x83,0x7e, - 0x82,0x77,0x78,0x7e,0x74,0x70,0xa1,0xe4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xff,0xfa,0xff,0xff,0xd3,0x98,0x8e,0x92,0x8e,0x8f,0x90, - 0x8d,0x8e,0x9b,0xc5,0xf7,0xff,0xfe,0xff,0xfd,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc6,0xff,0xff,0xf9,0xff,0xff,0xfe,0xfe,0xe6,0xa5,0x75,0x78, - 0x81,0x79,0x78,0x82,0x77,0x74,0xb5,0xfb,0xff,0xfb,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xed,0xbf,0x97,0x8b,0x90,0x90,0x8b, - 0x8f,0x91,0x85,0xa1,0xe7,0xff,0xfd,0xfe,0xfe,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xfb,0xff,0xfd,0xff,0xf4,0xb3,0x85,0x7d,0x7b, - 0x7a,0x7b,0x7a,0x7d,0x7c,0x90,0xd1,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfc,0xff,0xe3,0xa1,0x86,0x92,0x91,0x8c, - 0x91,0x90,0x84,0x92,0xc6,0xf2,0xff,0xff,0xfb,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xfa,0xff,0xfd,0xff,0xdd,0x92,0x76,0x82,0x7e, - 0x76,0x7d,0x7e,0x71,0x81,0xbe,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfe,0xfc,0xff,0xf5,0xbc,0x94,0x8e,0x8f,0x8f, - 0x8f,0x8e,0x8f,0x8e,0x9e,0xd6,0xff,0xff,0xf8,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc4,0xff,0xff,0xf7,0xff,0xff,0xe6,0xb0,0x84,0x78,0x7d,0x7c, - 0x78,0x7d,0x7e,0x72,0x95,0xe6,0xff,0xfc,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xe8,0xb1,0x89,0x8c,0x93, - 0x8d,0x8c,0x92,0x89,0x87,0xbc,0xf8,0xff,0xf8,0xff,0xff,0xdd,0xa4,0x88,0x85,0x84, - 0x84,0x89,0x7e,0x7e,0xc5,0xff,0xff,0xf4,0xff,0xff,0xc6,0x83,0x7b,0x81,0x79,0x7a, - 0x7d,0x7a,0x79,0x8d,0xc3,0xf7,0xff,0xfd,0xfe,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfb,0xfd,0xfe, - 0xfc,0xfa,0xfc,0xff,0xff,0xff,0xfe,0xff,0xff,0xfb,0xff,0xff,0xcc,0x93,0x8d,0x93, - 0x8d,0x8e,0x90,0x8b,0x88,0x9f,0xd1,0xfa,0xff,0xff,0xff,0xdf,0xa4,0x87,0x84,0x84, - 0x84,0x89,0x7d,0x7d,0xc5,0xff,0xff,0xf8,0xff,0xea,0xa6,0x76,0x7a,0x82,0x78,0x79, - 0x81,0x77,0x75,0xaf,0xf1,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xff,0xfd,0xf9,0xfa, - 0xfe,0xfe,0xfb,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xe7,0xb5,0x92,0x8b, - 0x90,0x90,0x8d,0x91,0x91,0x88,0xaa,0xee,0xff,0xff,0xff,0xdf,0xa4,0x8a,0x86,0x84, - 0x82,0x89,0x7f,0x7d,0xc6,0xff,0xff,0xff,0xf6,0xba,0x88,0x7e,0x7d,0x7c,0x7a,0x7a, - 0x7e,0x79,0x88,0xcd,0xff,0xff,0xfa,0xff,0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf6,0xee, - 0xf5,0xff,0xff,0xfc,0xfc,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xde,0x9e,0x86, - 0x92,0x92,0x8c,0x91,0x91,0x85,0x96,0xcd,0xf4,0xff,0xff,0xe2,0xac,0x8d,0x85,0x83, - 0x83,0x88,0x81,0x85,0xcb,0xff,0xff,0xfe,0xe1,0x95,0x76,0x82,0x7e,0x78,0x7e,0x7d, - 0x75,0x82,0xb5,0xeb,0xff,0xff,0xfd,0xfe,0xff,0xff,0xfe,0xfb,0xfc,0xff,0xff,0xf9, - 0xed,0xeb,0xf7,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfd,0xea,0xd1,0xbf,0xba, - 0xc1,0xd0,0xe4,0xf6,0xff,0xff,0xfc,0xfb,0xfe,0xff,0xfd,0xfc,0xff,0xf4,0xb9,0x94, - 0x91,0x8e,0x8c,0x8f,0x8f,0x8c,0x8f,0xa9,0xdf,0xff,0xff,0xee,0xc0,0x89,0x7a,0x8a, - 0x8b,0x82,0x80,0x98,0xd8,0xff,0xff,0xef,0xc1,0x85,0x74,0x81,0x7f,0x76,0x7d,0x7f, - 0x72,0x90,0xdf,0xff,0xfa,0xfd,0xff,0xfe,0xfb,0xfb,0xfe,0xff,0xfc,0xeb,0xcd,0xb4, - 0xa8,0xa7,0xb3,0xd5,0xf9,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf3,0xc0,0x99,0x93,0x94, - 0x90,0x95,0xb1,0xd8,0xf2,0xfc,0xfe,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfd,0xe2,0xae, - 0x8c,0x8e,0x92,0x8c,0x8b,0x91,0x8b,0x93,0xd2,0xff,0xff,0xfb,0xe0,0x9f,0x7e,0x86, - 0x85,0x7d,0x91,0xc1,0xec,0xff,0xff,0xde,0xa4,0x83,0x7c,0x7c,0x7a,0x7a,0x7c,0x7a, - 0x83,0xb4,0xf4,0xff,0xfc,0xfd,0xff,0xfe,0xff,0xff,0xff,0xf7,0xde,0xae,0x7f,0x6b, - 0x70,0x70,0x6a,0x91,0xdb,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xda,0xa5,0x91,0x99,0x98, - 0x92,0x95,0x9a,0xa0,0xb4,0xd9,0xf6,0xff,0xff,0xfc,0xfe,0xff,0xfb,0xff,0xff,0xc8, - 0x91,0x90,0x95,0x8b,0x8b,0x92,0x89,0x8b,0xcc,0xff,0xff,0xfc,0xfa,0xd8,0xa9,0x8d, - 0x86,0x94,0xbf,0xee,0xfd,0xff,0xff,0xda,0x9b,0x81,0x7d,0x78,0x77,0x7f,0x78,0x71, - 0xa4,0xe8,0xff,0xfe,0xff,0xfd,0xfd,0xfc,0xfe,0xfe,0xe3,0xaf,0x8a,0x7d,0x74,0x6f, - 0x72,0x73,0x6e,0x7b,0xac,0xe7,0xff,0xff,0xfd,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf1,0xc8,0xa2,0x97,0x9e,0x9e, - 0x9d,0x9e,0x99,0x8f,0x95,0xad,0xc6,0xdf,0xf9,0xff,0xfe,0xfd,0xfb,0xff,0xff,0xe6, - 0xb2,0x8e,0x8b,0x94,0x94,0x93,0x8a,0x91,0xd0,0xff,0xff,0xfa,0xff,0xfa,0xe2,0xcb, - 0xc7,0xd5,0xed,0xfd,0xfe,0xff,0xff,0xe2,0xa6,0x7e,0x79,0x81,0x81,0x7d,0x75,0x82, - 0xc8,0xff,0xff,0xf7,0xfb,0xfe,0xff,0xfb,0xe2,0xc2,0x9f,0x7c,0x6c,0x71,0x7a,0x7d, - 0x7b,0x79,0x7a,0x7b,0x93,0xd5,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf1,0xc9,0xa1,0x95,0x9c,0x9c, - 0x98,0x97,0x99,0x9b,0x99,0x95,0x94,0xa9,0xd0,0xee,0xfa,0xfe,0xff,0xfe,0xfd,0xfe, - 0xdf,0xa6,0x8a,0x8e,0x8c,0x89,0x94,0xb4,0xe5,0xff,0xff,0xfd,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xf4,0xc6,0x8c,0x73,0x79,0x79,0x73,0x86,0xb6, - 0xeb,0xff,0xff,0xfe,0xff,0xfb,0xf1,0xd6,0xa6,0x7b,0x6e,0x76,0x7a,0x77,0x74,0x73, - 0x74,0x76,0x74,0x76,0x93,0xd7,0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xd9,0xa2,0x93,0xa1,0x9d, - 0x98,0x9b,0x9a,0x9a,0x99,0x97,0x95,0x97,0x9f,0xb0,0xd3,0xf7,0xff,0xfd,0xfc,0xff, - 0xfa,0xda,0xae,0x91,0x8a,0x91,0xb4,0xe8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xff,0xfe,0xfd,0xff,0xee,0xb7,0x88,0x79,0x78,0x88,0xb8,0xee, - 0xff,0xfc,0xfd,0xff,0xfa,0xd4,0xa4,0x85,0x7a,0x73,0x71,0x75,0x79,0x78,0x75,0x72, - 0x76,0x79,0x71,0x78,0xa7,0xe4,0xff,0xff,0xfd,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xef,0xbb,0x98,0x95,0x9c, - 0xa1,0x9f,0x9a,0x98,0x98,0x9b,0x9e,0x99,0x90,0x94,0xa8,0xc0,0xdb,0xf2,0xfe,0xff, - 0xff,0xfa,0xe4,0xcc,0xc7,0xcf,0xe3,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xf9, - 0xf9,0xfb,0xfe,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe8,0xcf,0xbf,0xbf,0xcf,0xe9,0xfc, - 0xff,0xff,0xfb,0xe0,0xb8,0x97,0x79,0x6a,0x71,0x7c,0x7b,0x75,0x72,0x73,0x78,0x7b, - 0x7a,0x71,0x6c,0x91,0xd4,0xfb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xe4,0xbc,0xa0,0x9b, - 0x9b,0x9a,0x9c,0x9e,0x9d,0x9a,0x98,0x98,0x9a,0x9c,0x96,0x91,0xa3,0xce,0xf3,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xff,0xf8,0xd6,0x9c,0x74,0x70,0x79,0x7a,0x76,0x74,0x74,0x76,0x78,0x77,0x75,0x72, - 0x72,0x7a,0x93,0xc6,0xf8,0xff,0xfd,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf3,0xd6,0xaf, - 0x99,0x99,0x9d,0x9e,0x9d,0x9a,0x99,0x99,0x9c,0x9b,0x98,0x95,0x94,0xa3,0xc6,0xee, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xf9,0xd2,0x92,0x72,0x74,0x75,0x76,0x79,0x77,0x74,0x73,0x75,0x78,0x78,0x73,0x6e, - 0x80,0xae,0xe0,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfb,0xe0, - 0xc7,0xb1,0x9b,0x93,0x9a,0x9f,0x9f,0x9b,0x97,0x96,0x9a,0x9d,0x9a,0x91,0x9d,0xd5, - 0xff,0xff,0xf9,0xfa,0xfa,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfa,0xf9,0xf8,0xf8,0xff,0xff, - 0xe2,0xa1,0x71,0x6e,0x7d,0x79,0x72,0x71,0x73,0x78,0x7c,0x78,0x6b,0x6d,0x87,0xa7, - 0xc8,0xe9,0xfc,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xf8,0xdd,0xba,0xa5,0x9f,0x9a,0x99,0x9d,0x9c,0x9c,0x99,0x97,0x9f,0x95,0x8f,0xc5, - 0xff,0xff,0xfb,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff, - 0xc8,0x81,0x77,0x7b,0x74,0x74,0x77,0x79,0x78,0x73,0x71,0x76,0x7b,0x90,0xbd,0xec, - 0xff,0xff,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff, - 0xfe,0xfc,0xf5,0xdf,0xba,0x9c,0x96,0x9d,0x9e,0x9e,0x9b,0x99,0x9f,0x97,0x91,0xc2, - 0xfb,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf9,0xff,0xff, - 0xc5,0x7b,0x75,0x7c,0x73,0x75,0x78,0x7a,0x79,0x6f,0x6e,0x8a,0xbd,0xe6,0xf7,0xfd, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xfe,0xfb, - 0xfc,0xff,0xff,0xf9,0xe6,0xcd,0xb4,0xa1,0x95,0x95,0x9b,0x9d,0x9e,0x94,0x98,0xcd, - 0xff,0xff,0xfb,0xff,0xff,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xff,0xfa,0xff,0xff, - 0xd4,0x8c,0x6f,0x72,0x78,0x77,0x70,0x6a,0x6f,0x8b,0xaf,0xce,0xee,0xff,0xff,0xfa, - 0xf9,0xfd,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfd,0xfc,0xff,0xff,0xe8,0xc3,0xaa,0xa1,0x9e,0x9b,0x9c,0xa3,0xb9,0xe5, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xed,0xb9,0x86,0x72,0x76,0x76,0x77,0x7f,0x96,0xca,0xf7,0xff,0xfd,0xfd,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe5,0xcc,0xb5,0xa8,0xad,0xc9,0xec,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xee,0xbe,0x96,0x89,0x8d,0xa4,0xc9,0xeb,0xfc,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfc,0xfc,0xfb,0xfb,0xfb, - 0xfb,0xfc,0xfd,0xfc,0xf9,0xf8,0xfb,0xff,0xfe,0xf1,0xe1,0xda,0xdf,0xee,0xfd,0xff, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xee,0xd9,0xcc,0xd0,0xe3,0xf6,0xfe,0xfe,0xfb,0xfa,0xfc,0xff,0xff,0xfe, - 0xfd,0xfd,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe9,0xe0,0xdf,0xdf,0xdf,0xdf,0xdf, - 0xdf,0xdf,0xde,0xdd,0xdd,0xdd,0xdd,0xdf,0xe3,0xe9,0xf4,0xfe,0xff,0xfd,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfb,0xfa,0xfb,0xfa,0xf9,0xf8,0xf7,0xf6,0xf4,0xf4,0xf4,0xf4,0xf4, - 0xf4,0xf5,0xf4,0xf4,0xf5,0xf5,0xf7,0xfa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xee,0xd2,0xbe,0xb3,0xaf,0xaf,0xaf,0xaf,0xaf, - 0xaf,0xaf,0xad,0xac,0xac,0xac,0xaa,0xac,0xb4,0xbe,0xd7,0xf6,0xff,0xfe,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfa,0xf2,0xea,0xe7,0xe7,0xe7,0xe6,0xe6,0xe6,0xe6,0xe6, - 0xe6,0xe5,0xe6,0xe6,0xe5,0xe6,0xe9,0xee,0xf7,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf1,0xc9,0xa8,0xa5,0xa7,0xa3,0xa4,0xa4,0xa3,0xa3, - 0xa3,0xa3,0xa3,0xa3,0xa3,0xa3,0xa1,0xa3,0xa5,0xa4,0xaf,0xd0,0xf2,0xff,0xff,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xea,0xe3,0xe4,0xe3,0xe2,0xe2,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe2,0xe3,0xe3,0xe2,0xe2,0xe2,0xe3,0xe9,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd7,0xad,0x9c,0xa2,0xa8,0xa7,0xa7,0xa7,0xa6,0xa6, - 0xa6,0xa6,0xa6,0xa7,0xa7,0xa7,0xa7,0xa7,0xa5,0xa2,0x9f,0xae,0xde,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfa,0xec,0xe1,0xde,0xe2,0xe3,0xe2,0xe2,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe2,0xe1,0xe0,0xeb,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xfe,0xff,0xfe,0xfc,0xff,0xf5,0xbf,0xa5,0xa9,0xa6,0xa3,0xa4,0xa4,0xa4,0xa4,0xa5, - 0xa5,0xa4,0xa4,0xa4,0xa4,0xa4,0xa5,0xa3,0xa3,0xab,0xa3,0x9f,0xd2,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf5,0xe6,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe2,0xe2,0xe2,0xe1,0xe3,0xe4,0xdf,0xe6,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xfe,0xff,0xfe,0xfc,0xff,0xf5,0xbf,0xa5,0xaa,0xa8,0xa4,0xa5,0xa5,0xa5,0xa5,0xa5, - 0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa6,0xa4,0xa4,0xac,0xa3,0x9f,0xd2,0xff,0xff,0xfa, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xff,0xff,0xf5,0xe6,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2, - 0xe2,0xe2,0xe2,0xe2,0xe2,0xe1,0xe3,0xe4,0xdf,0xe5,0xfa,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd3,0xac,0x9e,0xa3,0xa8,0xa8,0xa8,0xaa,0xa9,0xa7, - 0xa7,0xa8,0xa9,0xa9,0xa9,0xa9,0xa9,0xa9,0xa7,0xa6,0xa1,0xad,0xdd,0xff,0xff,0xfc, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf9,0xeb,0xe0,0xdf,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe3,0xe3,0xe3,0xe3,0xe2,0xe2,0xe2,0xe0,0xea,0xfc,0xff,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xee,0xc7,0xa8,0xa6,0xa8,0xa5,0xa6,0xa8,0xa7,0xa6, - 0xa7,0xa8,0xa8,0xa8,0xa8,0xa8,0xa7,0xa8,0xa9,0xa8,0xb1,0xd0,0xf1,0xff,0xff,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xf5,0xe7,0xe1,0xe2,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1,0xe1, - 0xe1,0xe2,0xe2,0xe1,0xe2,0xe2,0xe2,0xe2,0xe8,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xec,0xd0,0xbd,0xb3,0xb0,0xb0,0xb0,0xb0,0xb2, - 0xb2,0xb2,0xb2,0xb2,0xb2,0xb2,0xb2,0xb2,0xb6,0xbe,0xd6,0xf4,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfe,0xf6,0xec,0xe6,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3,0xe3, - 0xe3,0xe4,0xe3,0xe3,0xe4,0xe4,0xe5,0xeb,0xf5,0xfd,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf5,0xe7,0xde,0xdd,0xdd,0xdd,0xdd,0xde, - 0xde,0xde,0xde,0xde,0xde,0xde,0xde,0xdf,0xe1,0xe7,0xf2,0xfe,0xff,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfd,0xf8,0xf4,0xf3,0xf3,0xf3,0xf2,0xf2,0xf3,0xf3,0xf3, - 0xf3,0xf3,0xf3,0xf3,0xf3,0xf3,0xf4,0xf8,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xfc,0xfc,0xfb,0xfb,0xfb,0xfb, - 0xfb,0xfc,0xfd,0xfd,0xfa,0xf9,0xfc,0xff,0xfe,0xf6,0xeb,0xe5,0xe9,0xf4,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfc,0xf8,0xf5,0xf6,0xfa,0xfe,0xff,0xfe,0xfd,0xfd,0xfe,0xfe,0xfe,0xfe, - 0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xea,0xd9,0xc6,0xba,0xbf,0xd8,0xf3,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfb,0xf1,0xe7,0xe4,0xe6,0xec,0xf5,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfc,0xff,0xff,0xef,0xcf,0xba,0xb3,0xb0,0xad,0xae,0xb7,0xca,0xec, - 0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfb,0xee,0xe1,0xdc,0xdc,0xdd,0xde,0xe2,0xe9,0xf4,0xfd,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xfc, - 0xfc,0xff,0xff,0xfc,0xef,0xdb,0xc5,0xb3,0xa7,0xa8,0xae,0xb1,0xb1,0xab,0xaf,0xda, - 0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff, - 0xf4,0xe1,0xd9,0xda,0xdc,0xdb,0xda,0xda,0xdd,0xe4,0xed,0xf4,0xfb,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff, - 0xff,0xfe,0xf9,0xe8,0xca,0xb0,0xa9,0xae,0xb0,0xb2,0xb1,0xaf,0xb4,0xaf,0xad,0xd2, - 0xfc,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xef,0xdc,0xdb,0xdd,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdd,0xe3,0xf0,0xfa,0xfe,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xfe,0xff, - 0xfc,0xe9,0xcc,0xb8,0xb1,0xad,0xad,0xb0,0xb2,0xb3,0xb0,0xaf,0xb5,0xb0,0xae,0xd4, - 0xfe,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff, - 0xf0,0xdc,0xd9,0xdc,0xdb,0xdb,0xdb,0xdc,0xdc,0xdb,0xdb,0xdd,0xdf,0xe5,0xf1,0xfb, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfc,0xeb, - 0xd7,0xc3,0xb0,0xa8,0xad,0xb3,0xb3,0xb0,0xaf,0xaf,0xb1,0xb4,0xb5,0xaf,0xb6,0xde, - 0xff,0xff,0xfb,0xfd,0xfc,0xfc,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xfd,0xfd,0xfd,0xff,0xff, - 0xf7,0xe4,0xd7,0xd7,0xdc,0xdb,0xd9,0xda,0xdb,0xdc,0xdd,0xdd,0xda,0xdb,0xe2,0xea, - 0xf3,0xfc,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf7,0xe2,0xc3, - 0xad,0xab,0xb1,0xb3,0xb0,0xb0,0xb1,0xb1,0xb2,0xb2,0xb3,0xb2,0xb3,0xbb,0xd0,0xef, - 0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xfe,0xf2,0xe0,0xd7,0xd8,0xd9,0xda,0xdc,0xdb,0xdb,0xdb,0xdc,0xdd,0xdd,0xdb,0xdb, - 0xe1,0xed,0xf8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xee,0xce,0xb4,0xae, - 0xad,0xad,0xb1,0xb3,0xb2,0xb0,0xb0,0xb1,0xb2,0xb5,0xb3,0xb1,0xbb,0xd7,0xf3,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xff,0xfd,0xf2,0xe2,0xd7,0xd7,0xdb,0xdc,0xdb,0xdb,0xdb,0xdb,0xdc,0xdd,0xdb,0xda, - 0xdb,0xde,0xe4,0xf0,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf3,0xcd,0xaf,0xa9,0xae, - 0xb4,0xb3,0xb0,0xae,0xaf,0xb2,0xb5,0xb4,0xaf,0xb0,0xbd,0xce,0xe1,0xf2,0xfe,0xff, - 0xff,0xfd,0xf1,0xe2,0xe0,0xe6,0xf1,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xfe,0xf9,0xf1,0xec,0xec,0xf2,0xfa,0xff, - 0xff,0xff,0xfd,0xf5,0xe9,0xe1,0xda,0xd7,0xd9,0xdb,0xdb,0xda,0xda,0xda,0xdc,0xdd, - 0xdd,0xdb,0xd9,0xe3,0xf5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xe1,0xb5,0xa9,0xb4,0xb3, - 0xae,0xb1,0xb2,0xb2,0xb2,0xb1,0xb1,0xb4,0xb9,0xc4,0xdb,0xf5,0xff,0xfe,0xfd,0xff, - 0xfc,0xea,0xd0,0xbe,0xb9,0xc0,0xd6,0xf4,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xfa,0xe8,0xd9,0xd3,0xd3,0xda,0xeb,0xfa, - 0xff,0xfe,0xfe,0xff,0xfd,0xf2,0xe5,0xde,0xda,0xd8,0xd9,0xdb,0xdc,0xdb,0xdb,0xdb, - 0xdb,0xdc,0xdb,0xde,0xea,0xf9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf4,0xd5,0xb5,0xac,0xb2,0xb1, - 0xae,0xb0,0xb2,0xb3,0xb4,0xb3,0xb2,0xbf,0xda,0xf1,0xfb,0xfe,0xff,0xfe,0xfe,0xfe, - 0xeb,0xc9,0xb9,0xbb,0xba,0xba,0xc4,0xd8,0xf3,0xff,0xff,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfc,0xed,0xd7,0xce,0xd1,0xd2,0xd2,0xd9,0xe8, - 0xf9,0xff,0xff,0xff,0xff,0xfe,0xfa,0xf2,0xe5,0xda,0xd7,0xdb,0xdd,0xdc,0xdb,0xdb, - 0xdb,0xdb,0xdb,0xdd,0xe4,0xf5,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf3,0xd3,0xb6,0xaf,0xb4,0xb3, - 0xb3,0xb5,0xb3,0xad,0xb0,0xc1,0xd3,0xe5,0xf8,0xff,0xff,0xfe,0xfc,0xff,0xff,0xef, - 0xcf,0xb9,0xb7,0xbd,0xbf,0xc0,0xbd,0xc4,0xe6,0xff,0xff,0xfc,0xff,0xfe,0xf4,0xea, - 0xea,0xf0,0xf9,0xff,0xff,0xff,0xff,0xf5,0xe0,0xd1,0xce,0xd3,0xd6,0xd4,0xd1,0xd8, - 0xee,0xff,0xff,0xfd,0xfe,0xff,0xff,0xfe,0xf6,0xed,0xe3,0xda,0xd7,0xd9,0xdc,0xdc, - 0xdc,0xdc,0xdc,0xdc,0xe3,0xf4,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xe1,0xb9,0xaa,0xb1,0xb2, - 0xae,0xb0,0xb3,0xb8,0xc6,0xdf,0xf7,0xff,0xff,0xfd,0xff,0xff,0xfc,0xff,0xff,0xdd, - 0xba,0xba,0xbe,0xba,0xbb,0xc1,0xbd,0xbf,0xe3,0xff,0xff,0xfd,0xfd,0xef,0xd9,0xcd, - 0xcd,0xd5,0xe7,0xfa,0xff,0xff,0xff,0xf1,0xda,0xd2,0xd2,0xd1,0xd2,0xd5,0xd3,0xd2, - 0xe3,0xf8,0xff,0xfe,0xff,0xff,0xfe,0xfe,0xff,0xfe,0xf5,0xe6,0xdd,0xda,0xd8,0xd8, - 0xdb,0xdb,0xd8,0xdc,0xe9,0xf8,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xf5,0xd0,0xb1,0xab,0xaf, - 0xad,0xb0,0xc3,0xe0,0xf5,0xfd,0xfe,0xff,0xff,0xff,0xfe,0xff,0xff,0xfe,0xed,0xcd, - 0xb8,0xba,0xbe,0xbb,0xbd,0xc2,0xbe,0xc2,0xe5,0xff,0xff,0xfd,0xf1,0xd3,0xc4,0xc8, - 0xc8,0xc7,0xd2,0xe7,0xf8,0xff,0xff,0xf2,0xdd,0xd3,0xd1,0xd1,0xd1,0xd3,0xd4,0xd3, - 0xda,0xeb,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf4,0xe6,0xda,0xd7, - 0xda,0xda,0xd8,0xe2,0xf6,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xef,0xdc,0xcd,0xc7, - 0xcb,0xd7,0xe7,0xf8,0xff,0xff,0xfd,0xfc,0xfe,0xff,0xfe,0xfd,0xff,0xf8,0xd3,0xbd, - 0xbc,0xbb,0xbb,0xbe,0xbf,0xbf,0xc1,0xcf,0xed,0xff,0xff,0xf8,0xe2,0xc6,0xc1,0xcb, - 0xca,0xc7,0xca,0xd5,0xef,0xff,0xff,0xf8,0xe8,0xd2,0xcd,0xd2,0xd2,0xcf,0xd2,0xd4, - 0xd3,0xde,0xf7,0xff,0xfd,0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfe,0xf8,0xef,0xe8, - 0xe5,0xe6,0xeb,0xf4,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xec, - 0xf3,0xff,0xff,0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xea,0xc2,0xb4, - 0xbd,0xbf,0xbb,0xbe,0xc0,0xbb,0xc5,0xe4,0xf9,0xff,0xff,0xf1,0xd7,0xc7,0xc6,0xc8, - 0xc8,0xca,0xca,0xce,0xeb,0xff,0xff,0xff,0xf4,0xd7,0xcc,0xd3,0xd2,0xcf,0xd2,0xd4, - 0xd2,0xd7,0xea,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xfb, - 0xf6,0xf8,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf2,0xd4,0xbd,0xb9, - 0xbd,0xbd,0xbd,0xc0,0xc0,0xbc,0xcf,0xf5,0xff,0xff,0xff,0xf0,0xd3,0xc7,0xc6,0xc7, - 0xc7,0xcb,0xc9,0xca,0xe8,0xff,0xff,0xff,0xfc,0xe4,0xd3,0xd1,0xd1,0xd1,0xd1,0xd2, - 0xd5,0xd3,0xd8,0xef,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfc,0xfe,0xff, - 0xfe,0xfc,0xfc,0xfe,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe2,0xbf,0xba,0xbf, - 0xbc,0xbc,0xbf,0xbf,0xbe,0xc7,0xe2,0xfc,0xff,0xff,0xff,0xf0,0xd2,0xc5,0xc5,0xc7, - 0xc8,0xc9,0xc7,0xca,0xe8,0xff,0xff,0xfd,0xff,0xf6,0xdf,0xcf,0xd0,0xd3,0xd0,0xd1, - 0xd5,0xd3,0xd1,0xe4,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xfe, - 0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf4,0xd1,0xb6,0xb8,0xbf, - 0xbc,0xbc,0xc0,0xbd,0xbd,0xd7,0xf8,0xff,0xfd,0xff,0xff,0xef,0xd3,0xc6,0xc6,0xc7, - 0xc7,0xca,0xc8,0xca,0xe7,0xff,0xff,0xfb,0xff,0xff,0xea,0xd2,0xd0,0xd3,0xd0,0xd0, - 0xd2,0xd3,0xd3,0xda,0xec,0xfd,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xfa,0xd9,0xc0,0xba,0xbb,0xbc, - 0xbd,0xbe,0xc0,0xbf,0xc8,0xe8,0xff,0xff,0xfb,0xff,0xff,0xee,0xd3,0xc6,0xc6,0xc8, - 0xc8,0xca,0xc8,0xca,0xe7,0xff,0xff,0xfc,0xff,0xff,0xf5,0xe1,0xd3,0xd0,0xd1,0xd1, - 0xd1,0xd4,0xd5,0xd1,0xdc,0xf8,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd,0xff,0xf0,0xc7,0xb6,0xbe,0xbe,0xbb, - 0xbe,0xc0,0xbb,0xc2,0xdd,0xf7,0xff,0xff,0xfd,0xff,0xff,0xef,0xd3,0xc6,0xc6,0xc8, - 0xc8,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfd,0xff,0xff,0xff,0xf1,0xd6,0xcd,0xd2,0xd2, - 0xd0,0xd3,0xd4,0xd0,0xd7,0xed,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf5,0xd9,0xbf,0xb8,0xbd,0xbe,0xbb, - 0xbe,0xc0,0xbb,0xcb,0xf1,0xff,0xfe,0xff,0xfe,0xff,0xff,0xef,0xd3,0xc6,0xc6,0xc8, - 0xc8,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfd,0xff,0xfe,0xff,0xfa,0xe0,0xd1,0xd2,0xd1, - 0xd0,0xd1,0xd3,0xd4,0xd5,0xdd,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe5,0xc1,0xba,0xbe,0xbc,0xbc,0xbe, - 0xbf,0xbe,0xc6,0xe0,0xfb,0xff,0xfe,0xff,0xfe,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfd,0xff,0xff,0xff,0xfe,0xf2,0xdd,0xcf,0xd0, - 0xd2,0xd0,0xd0,0xd4,0xd2,0xd3,0xe8,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfe,0xda,0xb6,0xb7,0xbf,0xbc,0xbb,0xc0, - 0xbd,0xbb,0xd4,0xf6,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xca,0xc7,0xc9,0xe7,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xff,0xe8,0xd1,0xd1, - 0xd3,0xcf,0xcf,0xd3,0xd1,0xd0,0xe0,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf8,0xd4,0xb7,0xba,0xc0,0xbc,0xbd,0xc1, - 0xbe,0xc2,0xe3,0xff,0xff,0xfc,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xfe,0xff,0xff,0xf4,0xde,0xd1, - 0xd0,0xd2,0xd1,0xd2,0xd3,0xd4,0xde,0xf0,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xfc,0xd7,0xb8,0xbc,0xc0,0xbd,0xc0,0xbe, - 0xc2,0xd8,0xf3,0xff,0xff,0xfe,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xfe,0xff,0xef,0xd3, - 0xcd,0xd3,0xd2,0xd1,0xd2,0xd3,0xdf,0xf2,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xff,0xff,0xe0,0xbb,0xb7,0xbd,0xbd,0xbc,0xba, - 0xcc,0xef,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xfe,0xff,0xfa,0xe3, - 0xd1,0xcd,0xcf,0xd1,0xce,0xcf,0xe5,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf0,0xd3,0xc3,0xc0,0xc1,0xc2,0xcb, - 0xe3,0xfb,0xff,0xfe,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xcb,0xc7,0xc8,0xe6,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7, - 0xe3,0xd2,0xd2,0xd4,0xd3,0xdb,0xf1,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf6,0xe4,0xd5,0xd3,0xde,0xef, - 0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xee,0xd3,0xc8,0xc8,0xc8, - 0xc7,0xca,0xc6,0xc8,0xe7,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xf7,0xeb,0xe3,0xe1,0xe8,0xf5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xf1,0xf0,0xf8,0xfe, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfd,0xff,0xff,0xef,0xd4,0xc9,0xc8,0xc7, - 0xc6,0xca,0xc8,0xca,0xe8,0xff,0xff,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xfd,0xf6,0xf4,0xf9,0xfe,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xff,0xf4,0xda,0xc8,0xc6,0xca, - 0xc9,0xc9,0xc9,0xcf,0xeb,0xff,0xff,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xe7,0xca,0xc2,0xcb, - 0xcb,0xc6,0xc9,0xd9,0xf2,0xff,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfe,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xff,0xf5,0xdc,0xc8,0xc4, - 0xc4,0xc5,0xd3,0xec,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfe,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf4,0xe0,0xd2, - 0xd2,0xd9,0xe9,0xfc,0xff,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xf7, - 0xf7,0xfa,0xfd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xfd, - 0xfd,0xfd,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe, - 0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, +const unsigned char gGearPict3x[9 * kGearFrames * kGearWidth * kGearHeight] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, + 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, + 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x94, + 0x94, 0xac, 0xd6, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xa1, 0x76, 0x6f, + 0x6f, 0x76, 0x9f, 0xdc, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xb4, 0x7c, 0x6d, 0x7d, + 0x7d, 0x6e, 0x79, 0xac, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd5, 0x90, 0x75, 0x75, 0x79, + 0x79, 0x75, 0x74, 0x8e, 0xd4, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xe7, 0xe6, 0xf6, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x82, 0x76, 0x7a, 0x72, + 0x72, 0x79, 0x75, 0x83, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xeb, 0xca, 0xae, 0xac, 0xc4, 0xe7, + 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x76, + 0x76, 0x79, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xf2, 0xed, 0xed, 0xf3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa4, 0x86, 0x82, 0x7e, 0x81, 0xa6, + 0xdd, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x75, + 0x75, 0x78, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xf8, + 0xec, 0xe4, 0xe3, 0xe3, 0xe5, 0xec, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfc, 0xbc, 0x78, 0x70, 0x79, 0x76, 0x71, 0x77, + 0xa8, 0xec, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, + 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfb, 0xed, + 0xe2, 0xe0, 0xe1, 0xe1, 0xdf, 0xe1, 0xf0, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xa7, 0x76, 0x7a, 0x7e, 0x7d, 0x80, 0x72, + 0x81, 0xc4, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xf2, 0xe4, + 0xe1, 0xe3, 0xe2, 0xe2, 0xe1, 0xe0, 0xeb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe2, 0xa4, 0x78, 0x79, 0x7f, 0x7b, 0x7c, 0x7c, + 0x79, 0x91, 0xd4, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xf6, 0xe7, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xeb, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf2, 0xad, 0x72, 0x75, 0x80, 0x77, 0x76, 0x81, + 0x79, 0x77, 0xb4, 0xf8, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xef, 0xe2, 0xe1, + 0xe3, 0xe2, 0xe2, 0xe3, 0xe0, 0xdf, 0xec, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc1, 0x7c, 0x7a, 0x82, 0x79, 0x7a, 0x7e, + 0x78, 0x77, 0x93, 0xcb, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xe8, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa0, 0x81, 0x7c, 0x7d, 0x7c, 0x78, + 0x7c, 0x7e, 0x76, 0x9e, 0xef, 0xff, 0xfc, 0xfe, 0xfc, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xe1, 0xe4, 0xe3, + 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe9, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, 0xd0, 0x8b, 0x73, 0x80, 0x7e, 0x77, + 0x7e, 0x7f, 0x6f, 0x88, 0xd0, 0xfb, 0xfe, 0xff, 0xfa, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xf5, 0xe5, 0xdf, 0xe3, 0xe3, + 0xe1, 0xe3, 0xe3, 0xe0, 0xe5, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf1, 0xa6, 0x7c, 0x7f, 0x7f, 0x7a, + 0x7b, 0x7b, 0x7a, 0x7e, 0x9d, 0xda, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xf6, 0xea, 0xe4, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe3, 0xe1, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0x99, 0x79, 0x7b, 0x7f, + 0x79, 0x79, 0x81, 0x76, 0x77, 0xba, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, + 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf0, 0xe3, 0xe2, 0xe4, 0xe1, 0xe2, + 0xe3, 0xe2, 0xe1, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, + 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfa, 0xb9, 0x7b, 0x7a, 0x82, + 0x7a, 0x79, 0x7f, 0x77, 0x73, 0x99, 0xda, 0xfd, 0xfc, 0xff, 0xff, 0xcb, 0x80, 0x72, 0x78, 0x75, + 0x75, 0x78, 0x71, 0x7f, 0xca, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xe9, 0xe2, 0xe2, 0xe3, 0xe2, 0xe1, + 0xe3, 0xe1, 0xe0, 0xee, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd9, 0x9a, 0x7e, 0x7c, + 0x7d, 0x7c, 0x79, 0x7c, 0x7d, 0x7c, 0xa8, 0xf1, 0xff, 0xff, 0xff, 0xcc, 0x80, 0x75, 0x7a, 0x73, + 0x73, 0x79, 0x74, 0x80, 0xca, 0xff, 0xff, 0xfc, 0xfe, 0xef, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe7, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xf1, 0xe4, + 0xf0, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x89, 0x73, + 0x80, 0x80, 0x77, 0x7e, 0x7f, 0x70, 0x8a, 0xd4, 0xf9, 0xff, 0xff, 0xcf, 0x87, 0x77, 0x79, 0x74, + 0x74, 0x78, 0x77, 0x8a, 0xd0, 0xff, 0xff, 0xfb, 0xfa, 0xe9, 0xe0, 0xe3, 0xe2, 0xe1, 0xe2, 0xe3, + 0xe0, 0xe5, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, + 0xf8, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xdd, 0xc2, 0xb2, 0xa9, + 0xaf, 0xc6, 0xe2, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfa, 0xff, 0xec, 0xa2, 0x7c, + 0x81, 0x7e, 0x79, 0x7d, 0x7d, 0x74, 0x7f, 0xad, 0xe4, 0xff, 0xff, 0xde, 0xa0, 0x74, 0x6e, 0x7d, + 0x7d, 0x6e, 0x73, 0xa1, 0xe2, 0xff, 0xff, 0xfa, 0xf2, 0xe6, 0xe1, 0xe3, 0xe3, 0xe1, 0xe3, 0xe3, + 0xe0, 0xe9, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, + 0xe7, 0xe9, 0xed, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xe7, 0xa4, 0x7b, 0x7d, 0x80, + 0x79, 0x82, 0xa7, 0xd7, 0xf3, 0xfc, 0xfe, 0xff, 0xfe, 0xff, 0xfd, 0xfe, 0xff, 0xfa, 0xcf, 0x96, + 0x7a, 0x7c, 0x7e, 0x79, 0x79, 0x7c, 0x7b, 0x8f, 0xd3, 0xff, 0xff, 0xf4, 0xcd, 0x8c, 0x6c, 0x76, + 0x76, 0x6c, 0x89, 0xca, 0xf7, 0xff, 0xff, 0xf8, 0xeb, 0xe4, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xe1, + 0xe6, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf5, 0xe8, 0xdd, 0xd9, + 0xda, 0xda, 0xda, 0xe5, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8d, 0x7b, 0x83, 0x84, + 0x7d, 0x7f, 0x86, 0x90, 0xaa, 0xd6, 0xf7, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfa, 0xb5, + 0x78, 0x7c, 0x83, 0x77, 0x77, 0x7e, 0x79, 0x87, 0xce, 0xff, 0xff, 0xfe, 0xf5, 0xc6, 0x93, 0x7a, + 0x7a, 0x92, 0xc3, 0xf3, 0xff, 0xff, 0xff, 0xf6, 0xe7, 0xe3, 0xe3, 0xe2, 0xe2, 0xe3, 0xe1, 0xe0, + 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe0, 0xdd, 0xdb, 0xda, + 0xdb, 0xdc, 0xd9, 0xde, 0xee, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xae, 0x8c, 0x85, 0x89, 0x89, + 0x89, 0x8a, 0x83, 0x79, 0x81, 0x9f, 0xbf, 0xde, 0xfa, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xd8, + 0x97, 0x7a, 0x7b, 0x81, 0x81, 0x7c, 0x79, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xf3, 0xd7, 0xc2, + 0xc2, 0xd7, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0xf6, 0xe8, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe6, + 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xee, 0xe5, 0xdc, 0xda, 0xdc, 0xde, 0xde, + 0xdd, 0xdc, 0xdb, 0xdd, 0xe6, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xaf, 0x88, 0x80, 0x86, 0x84, + 0x82, 0x84, 0x85, 0x87, 0x85, 0x7e, 0x81, 0x9f, 0xcd, 0xee, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf7, + 0xcd, 0x91, 0x76, 0x7c, 0x79, 0x73, 0x86, 0xb7, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfa, 0xef, 0xe4, 0xe0, 0xe2, 0xe2, 0xe0, 0xe5, 0xf3, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf1, 0xe6, 0xdc, 0xda, 0xdc, 0xdd, 0xdc, 0xdb, 0xdb, + 0xdb, 0xdb, 0xda, 0xdc, 0xe7, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8b, 0x7e, 0x89, 0x85, + 0x80, 0x84, 0x86, 0x88, 0x86, 0x81, 0x80, 0x83, 0x8b, 0xa3, 0xd0, 0xf9, 0xff, 0xfc, 0xfb, 0xff, + 0xf5, 0xc7, 0x96, 0x7d, 0x78, 0x82, 0xad, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe2, 0xe0, 0xe0, 0xe6, 0xf2, 0xfc, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf3, 0xe7, 0xe0, 0xdc, 0xdb, 0xdb, 0xdd, 0xdd, 0xdc, 0xdb, 0xda, + 0xdc, 0xdd, 0xd9, 0xdc, 0xec, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa6, 0x7f, 0x7e, 0x86, + 0x8a, 0x88, 0x83, 0x82, 0x84, 0x87, 0x8a, 0x83, 0x77, 0x80, 0x9a, 0xb8, 0xdb, 0xf6, 0xff, 0xff, + 0xfe, 0xf2, 0xd8, 0xc2, 0xbf, 0xca, 0xe2, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, + 0xf8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf3, 0xf1, 0xf0, 0xf5, 0xfc, 0xff, + 0xff, 0xff, 0xfd, 0xf6, 0xec, 0xe5, 0xde, 0xda, 0xdb, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdc, 0xdd, + 0xdb, 0xd8, 0xd8, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd6, 0xa7, 0x8b, 0x85, + 0x84, 0x83, 0x86, 0x88, 0x85, 0x83, 0x84, 0x85, 0x87, 0x86, 0x7d, 0x7c, 0x9a, 0xce, 0xf5, 0xff, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xf3, 0xe4, 0xdb, 0xdc, 0xde, 0xdd, 0xdc, 0xdc, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, 0xda, + 0xda, 0xdb, 0xe3, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xc9, 0x9b, + 0x82, 0x83, 0x88, 0x88, 0x85, 0x83, 0x84, 0x86, 0x88, 0x84, 0x82, 0x80, 0x7e, 0x94, 0xc8, 0xf3, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfb, 0xf0, 0xe3, 0xdc, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdb, 0xdc, 0xdc, 0xd9, 0xd8, + 0xe0, 0xee, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf7, 0xd8, + 0xb9, 0x9c, 0x82, 0x7c, 0x86, 0x8a, 0x87, 0x83, 0x81, 0x82, 0x86, 0x89, 0x81, 0x7a, 0x95, 0xd8, + 0xff, 0xff, 0xf8, 0xf9, 0xf9, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xff, 0xff, + 0xf5, 0xe3, 0xdb, 0xdc, 0xdd, 0xdc, 0xdb, 0xda, 0xdb, 0xdc, 0xdc, 0xda, 0xd7, 0xd9, 0xe0, 0xe9, + 0xf3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, + 0xf6, 0xd2, 0xa4, 0x8e, 0x8a, 0x84, 0x83, 0x87, 0x87, 0x87, 0x83, 0x80, 0x8b, 0x80, 0x7b, 0xc1, + 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xef, 0xdb, 0xdb, 0xdd, 0xdb, 0xdb, 0xdd, 0xdc, 0xdb, 0xda, 0xda, 0xda, 0xdb, 0xe3, 0xf1, 0xfc, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, + 0xfe, 0xfb, 0xef, 0xd0, 0xa5, 0x85, 0x80, 0x89, 0x8a, 0x88, 0x83, 0x81, 0x8c, 0x81, 0x79, 0xc0, + 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xed, 0xd9, 0xda, 0xde, 0xda, 0xda, 0xdc, 0xdc, 0xda, 0xd8, 0xda, 0xe3, 0xef, 0xf9, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, + 0xfb, 0xff, 0xff, 0xf4, 0xdc, 0xbf, 0xa0, 0x87, 0x7d, 0x80, 0x86, 0x87, 0x86, 0x7c, 0x89, 0xcd, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xf1, 0xdc, 0xd8, 0xdc, 0xdc, 0xda, 0xd8, 0xd7, 0xd9, 0xe0, 0xeb, 0xf4, 0xfb, 0xff, 0xff, 0xfd, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xda, 0xad, 0x94, 0x8b, 0x89, 0x87, 0x84, 0x90, 0xb6, 0xe8, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf9, 0xea, 0xdf, 0xdb, 0xdb, 0xda, 0xda, 0xdd, 0xe6, 0xf4, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xda, 0xb9, 0xa0, 0x98, 0xa0, 0xc2, 0xee, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xee, 0xe2, 0xdf, 0xe1, 0xe9, 0xf3, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, + 0xfa, 0xfc, 0xfd, 0xfb, 0xf7, 0xf7, 0xfc, 0xff, 0xfa, 0xea, 0xd8, 0xd4, 0xdd, 0xee, 0xfd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xfa, 0xf5, 0xf1, 0xf3, 0xf9, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xed, 0xdf, 0xd7, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, + 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd8, 0xdc, 0xe7, 0xf5, 0xfd, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, 0xf3, 0xf1, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xef, 0xef, + 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf3, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xbc, 0xa6, 0x9d, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x99, 0x98, 0x9a, 0xa2, 0xb3, 0xd5, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xee, 0xe3, 0xde, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xd8, 0xd8, + 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdd, 0xe6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xe2, 0xb2, 0x91, 0x8e, 0x90, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, + 0x8d, 0x8d, 0x8d, 0x8d, 0x8c, 0x8c, 0x8b, 0x8d, 0x8f, 0x8b, 0x9f, 0xd0, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfb, 0xeb, 0xda, 0xd4, 0xd5, 0xd4, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd3, 0xd3, + 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd5, 0xe1, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbb, 0x8e, 0x87, 0x8d, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, + 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x8f, 0x90, 0x8f, 0x86, 0x86, 0xa5, 0xdf, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf2, 0xdc, 0xd2, 0xd2, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd5, 0xd5, + 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd3, 0xd1, 0xd4, 0xe6, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf0, 0xa4, 0x86, 0x94, 0x91, 0x8b, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, + 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8c, 0x8c, 0x94, 0x8d, 0x90, 0xce, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd6, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, + 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd4, 0xd5, 0xd0, 0xdd, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf1, 0xa7, 0x88, 0x95, 0x91, 0x8c, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, + 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8d, 0x8d, 0x95, 0x8f, 0x93, 0xcf, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd2, 0xd4, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, + 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd0, 0xd2, 0xd3, 0xcf, 0xdc, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbd, 0x90, 0x89, 0x8e, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, + 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x90, 0x91, 0x90, 0x88, 0x89, 0xa9, 0xe1, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf1, 0xd9, 0xce, 0xce, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, + 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd0, 0xce, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe0, 0xb1, 0x91, 0x8f, 0x91, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, + 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8f, 0x91, 0x8e, 0xa1, 0xd1, 0xf6, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfa, 0xea, 0xd7, 0xd0, 0xd1, 0xd0, 0xcf, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, + 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd0, 0xd2, 0xdf, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe3, 0xbd, 0xa7, 0x9e, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, + 0x9b, 0x9b, 0x9d, 0x9d, 0x9d, 0x9d, 0x9c, 0x9c, 0xa3, 0xb4, 0xd6, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdf, 0xd8, 0xd4, 0xd3, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, + 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xda, 0xe3, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdd, 0xd5, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, + 0xd4, 0xd4, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd7, 0xda, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf4, 0xef, 0xed, 0xec, 0xec, 0xed, 0xee, 0xee, 0xee, 0xee, + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xf1, 0xf7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, + 0xfa, 0xfb, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xed, 0xe0, 0xdd, 0xe5, 0xf3, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xf9, 0xf1, 0xee, 0xf0, 0xf7, 0xfd, 0xff, 0xfe, 0xfc, 0xfc, 0xfd, 0xfe, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xc4, 0xaf, 0xa8, 0xb0, 0xcd, 0xf1, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xe5, 0xd6, 0xd3, 0xd8, 0xe3, 0xf0, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfe, 0xe0, 0xb9, 0xa5, 0x9d, 0x9a, 0x98, 0x98, 0xa4, 0xc4, 0xed, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf4, 0xde, 0xce, 0xc8, 0xca, 0xcc, 0xce, 0xd1, 0xdb, 0xf0, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, + 0xfb, 0xff, 0xff, 0xf6, 0xe3, 0xca, 0xae, 0x97, 0x8f, 0x92, 0x98, 0x9a, 0x99, 0x92, 0x9f, 0xd7, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xe9, 0xca, 0xc4, 0xc8, 0xc9, 0xc9, 0xc7, 0xc6, 0xcb, 0xd6, 0xe5, 0xf1, 0xfb, 0xff, 0xff, 0xfd, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfc, 0xf1, 0xd7, 0xb1, 0x96, 0x92, 0x9a, 0x9a, 0x99, 0x97, 0x97, 0xa1, 0x97, 0x91, 0xcb, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe2, 0xc1, 0xc6, 0xcc, 0xc7, 0xc7, 0xc9, 0xcb, 0xcc, 0xc8, 0xcb, 0xd9, 0xec, 0xf8, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xf7, 0xd8, 0xb2, 0x9f, 0x9a, 0x95, 0x94, 0x99, 0x9b, 0x9b, 0x98, 0x98, 0xa2, 0x99, 0x95, 0xce, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe3, 0xc2, 0xc6, 0xcb, 0xc6, 0xc7, 0xc9, 0xc9, 0xca, 0xc9, 0xc9, 0xcc, 0xcf, 0xda, 0xec, 0xfb, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf9, 0xe1, + 0xc7, 0xac, 0x94, 0x8e, 0x98, 0x9d, 0x9b, 0x98, 0x97, 0x98, 0x9c, 0xa0, 0x9b, 0x96, 0xaa, 0xdf, + 0xff, 0xff, 0xf9, 0xfb, 0xfb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfc, 0xfc, 0xfc, 0xff, 0xff, + 0xed, 0xce, 0xc2, 0xc5, 0xca, 0xca, 0xc7, 0xc6, 0xc7, 0xca, 0xcc, 0xca, 0xc6, 0xc9, 0xd5, 0xe3, + 0xef, 0xfc, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xd2, 0xab, + 0x95, 0x95, 0x9a, 0x9b, 0x99, 0x98, 0x99, 0x9b, 0x9e, 0x9d, 0x9c, 0x9a, 0x98, 0xa9, 0xd1, 0xf4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf9, 0xe5, 0xcc, 0xc2, 0xc4, 0xc7, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc9, 0xca, 0xca, 0xc9, 0xc9, + 0xd3, 0xe8, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xdd, 0xb6, 0x9e, 0x97, + 0x97, 0x98, 0x9a, 0x9b, 0x9a, 0x9a, 0x9a, 0x9b, 0x9e, 0x9f, 0x99, 0x97, 0xad, 0xd7, 0xf7, 0xff, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xe7, 0xcf, 0xc2, 0xc4, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc8, 0xc9, 0xc9, 0xc8, 0xc8, + 0xc8, 0xce, 0xda, 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe9, 0xb5, 0x93, 0x92, 0x99, + 0x9d, 0x9b, 0x97, 0x97, 0x9a, 0x9f, 0xa1, 0x9b, 0x93, 0x99, 0xac, 0xc4, 0xe0, 0xf6, 0xff, 0xff, + 0xfe, 0xf7, 0xe5, 0xd6, 0xd5, 0xde, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, + 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xe8, 0xe2, 0xe3, 0xee, 0xfb, 0xff, + 0xff, 0xff, 0xfa, 0xee, 0xdd, 0xce, 0xc4, 0xc1, 0xc6, 0xca, 0xca, 0xc7, 0xc6, 0xc7, 0xca, 0xcb, + 0xc9, 0xc7, 0xc7, 0xd8, 0xf3, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xcc, 0x9d, 0x93, 0x9d, 0x9a, + 0x96, 0x9a, 0x9b, 0x9d, 0x9c, 0x9a, 0x9a, 0x9d, 0xa2, 0xb4, 0xd8, 0xfa, 0xff, 0xfd, 0xfc, 0xff, + 0xf8, 0xd8, 0xb5, 0xa3, 0xa1, 0xaa, 0xc9, 0xf2, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd7, 0xc2, 0xbd, 0xbf, 0xcd, 0xe6, 0xfb, + 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xe7, 0xd2, 0xc8, 0xc5, 0xc5, 0xc7, 0xc8, 0xca, 0xc9, 0xc8, 0xc6, + 0xc8, 0xcb, 0xc6, 0xcc, 0xe4, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xba, 0x9b, 0x97, 0x9b, 0x99, + 0x98, 0x9b, 0x9d, 0x9e, 0x9d, 0x99, 0x9b, 0xb2, 0xd7, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf9, + 0xda, 0xaf, 0x9d, 0xa3, 0xa2, 0x9f, 0xae, 0xd1, 0xf3, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf5, 0xdb, 0xc2, 0xb9, 0xbc, 0xbe, 0xbc, 0xca, 0xe7, + 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xe7, 0xd2, 0xc4, 0xc4, 0xc8, 0xca, 0xc9, 0xc7, 0xc6, + 0xc7, 0xc8, 0xc6, 0xc9, 0xdb, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xba, 0x9e, 0x9a, 0x9e, 0x9e, + 0x9f, 0xa1, 0x9b, 0x94, 0x9b, 0xb1, 0xc9, 0xe2, 0xfb, 0xff, 0xfe, 0xfc, 0xfb, 0xff, 0xff, 0xe4, + 0xb5, 0x9f, 0xa1, 0xaa, 0xab, 0xa6, 0xa5, 0xb9, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xeb, 0xe0, + 0xe1, 0xed, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xba, 0xbc, 0xc0, 0xc2, 0xbe, 0xbd, 0xcd, + 0xed, 0xff, 0xff, 0xfc, 0xfd, 0xfe, 0xff, 0xfd, 0xee, 0xdf, 0xd0, 0xc5, 0xc3, 0xc7, 0xca, 0xca, + 0xca, 0xc9, 0xc8, 0xcb, 0xda, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xce, 0xa1, 0x93, 0x9b, 0x9b, + 0x97, 0x9a, 0x9f, 0xa5, 0xba, 0xdd, 0xf8, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xff, 0xfc, 0xca, + 0x9f, 0xa2, 0xa9, 0xa2, 0xa3, 0xa8, 0xa6, 0xb2, 0xe0, 0xff, 0xff, 0xfe, 0xfa, 0xde, 0xc2, 0xb6, + 0xb7, 0xc6, 0xe2, 0xfa, 0xfe, 0xff, 0xff, 0xe6, 0xc1, 0xba, 0xbd, 0xba, 0xbc, 0xc2, 0xbe, 0xbc, + 0xdb, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfb, 0xec, 0xd7, 0xcc, 0xc8, 0xc6, 0xc5, + 0xc8, 0xc8, 0xc4, 0xcc, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xba, 0x98, 0x96, 0x98, + 0x94, 0x9c, 0xb7, 0xdd, 0xf5, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfb, 0xdc, 0xb4, + 0xa1, 0xa4, 0xa6, 0xa3, 0xa4, 0xa7, 0xa9, 0xb7, 0xe3, 0xff, 0xff, 0xf8, 0xe1, 0xbb, 0xab, 0xb2, + 0xb2, 0xae, 0xc1, 0xe4, 0xf9, 0xff, 0xff, 0xe8, 0xc5, 0xbb, 0xbc, 0xbc, 0xbc, 0xc0, 0xbf, 0xbe, + 0xcb, 0xe7, 0xfc, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xf9, 0xec, 0xd6, 0xc5, 0xc2, + 0xc6, 0xc6, 0xc6, 0xd9, 0xf6, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe6, 0xcd, 0xbe, 0xb8, + 0xbe, 0xcf, 0xe6, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf2, 0xbc, 0xa2, + 0xa6, 0xa4, 0xa2, 0xa7, 0xa8, 0xa4, 0xab, 0xc9, 0xed, 0xff, 0xff, 0xed, 0xc8, 0xad, 0xab, 0xb5, + 0xb5, 0xad, 0xb4, 0xcf, 0xf0, 0xff, 0xff, 0xf0, 0xd4, 0xbd, 0xb8, 0xbc, 0xbd, 0xbb, 0xbe, 0xc1, + 0xbe, 0xd1, 0xf6, 0xff, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xd9, + 0xd6, 0xda, 0xe3, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf3, 0xe9, + 0xf2, 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xdb, 0xab, 0x9c, + 0xa6, 0xa7, 0xa2, 0xa8, 0xaa, 0xa2, 0xb4, 0xe3, 0xfb, 0xff, 0xff, 0xe2, 0xb8, 0xae, 0xb1, 0xb0, + 0xb0, 0xb3, 0xb6, 0xc1, 0xe6, 0xff, 0xff, 0xfb, 0xe8, 0xc3, 0xb5, 0xbd, 0xbe, 0xbb, 0xbe, 0xc0, + 0xba, 0xc5, 0xe6, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xf7, + 0xf1, 0xf7, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe3, 0xb6, 0xa4, 0xa3, + 0xa4, 0xa5, 0xa4, 0xa7, 0xa9, 0xaa, 0xc7, 0xf6, 0xff, 0xff, 0xff, 0xe1, 0xb4, 0xad, 0xb2, 0xb0, + 0xb0, 0xb5, 0xb4, 0xba, 0xe2, 0xff, 0xff, 0xff, 0xf7, 0xd0, 0xba, 0xbc, 0xbd, 0xbb, 0xbe, 0xbf, + 0xbe, 0xbf, 0xce, 0xed, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, + 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xfd, 0xce, 0xa1, 0xa2, 0xa9, + 0xa4, 0xa4, 0xa9, 0xa4, 0xa2, 0xbc, 0xe6, 0xfe, 0xfd, 0xff, 0xff, 0xe0, 0xb3, 0xab, 0xb1, 0xb0, + 0xb0, 0xb4, 0xb2, 0xb9, 0xe2, 0xff, 0xff, 0xfd, 0xfe, 0xea, 0xca, 0xb8, 0xb9, 0xbe, 0xbc, 0xbd, + 0xc2, 0xbe, 0xbf, 0xde, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, + 0xff, 0xfe, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe1, 0xb8, 0xa1, 0xa2, 0xa7, + 0xa4, 0xa4, 0xab, 0xa5, 0xa6, 0xd2, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdf, 0xb4, 0xac, 0xb2, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdb, 0xb8, 0xb8, 0xbf, 0xbb, 0xbc, + 0xbf, 0xbe, 0xbe, 0xcd, 0xe9, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xbf, 0xa2, 0xa6, 0xa6, 0xa4, + 0xa6, 0xa6, 0xa7, 0xab, 0xbf, 0xe7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdf, 0xb4, 0xac, 0xb2, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xea, 0xca, 0xbd, 0xbc, 0xbc, 0xbd, + 0xbc, 0xbe, 0xc0, 0xbf, 0xd3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xad, 0x9c, 0xa7, 0xa7, 0xa3, + 0xa9, 0xa9, 0xa0, 0xaf, 0xdc, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xb4, 0xad, 0xb2, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfa, 0xe3, 0xc0, 0xb5, 0xbd, 0xbe, + 0xbb, 0xbe, 0xbf, 0xba, 0xc6, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xc0, 0xa8, 0xa3, 0xa6, 0xa6, 0xa3, + 0xa7, 0xa9, 0xa5, 0xbe, 0xf1, 0xff, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xdf, 0xb4, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xf4, 0xcc, 0xb8, 0xbd, 0xbe, + 0xbc, 0xbd, 0xbd, 0xbd, 0xc1, 0xd2, 0xef, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd4, 0xa4, 0xa3, 0xa9, 0xa4, 0xa5, 0xa7, + 0xa4, 0xa5, 0xb8, 0xdd, 0xfb, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xc7, 0xb9, 0xbb, + 0xbe, 0xbe, 0xbe, 0xc2, 0xbd, 0xbe, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf7, 0xc6, 0x9d, 0xa0, 0xa8, 0xa2, 0xa3, 0xaa, + 0xa5, 0xa5, 0xcd, 0xfa, 0xff, 0xfc, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb2, 0xba, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xfa, 0xd7, 0xb8, 0xba, + 0xbe, 0xba, 0xbc, 0xc2, 0xbb, 0xba, 0xd8, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xeb, 0xbf, 0xa1, 0xa4, 0xa8, 0xa4, 0xa6, 0xa8, + 0xa7, 0xb7, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, + 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe8, 0xc5, 0xba, + 0xbc, 0xbd, 0xbc, 0xc0, 0xbe, 0xbd, 0xd3, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xed, 0xc1, 0xa1, 0xa4, 0xa8, 0xa7, 0xa9, 0xa1, + 0xad, 0xd8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, + 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe0, 0xbe, + 0xb7, 0xbf, 0xbd, 0xbe, 0xbd, 0xbb, 0xd4, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xcf, 0xa2, 0x9d, 0xa5, 0xa3, 0xa0, 0xa3, + 0xc4, 0xf2, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, + 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd1, + 0xb8, 0xb7, 0xba, 0xbc, 0xb7, 0xbb, 0xdd, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xc1, 0xad, 0xab, 0xa8, 0xa9, 0xc2, + 0xe7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, + 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xec, + 0xcf, 0xbd, 0xbe, 0xc1, 0xc2, 0xd0, 0xee, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf0, 0xd9, 0xc7, 0xc7, 0xd6, 0xee, + 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xaf, 0xb3, 0xb0, + 0xb0, 0xb3, 0xb1, 0xb9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, + 0xf1, 0xdf, 0xd5, 0xd6, 0xe3, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xed, 0xed, 0xf8, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb6, 0xb0, 0xb3, 0xaf, + 0xaf, 0xb3, 0xb2, 0xbb, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xf1, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xbf, 0xaf, 0xb0, 0xb2, + 0xb2, 0xb1, 0xb2, 0xc1, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb4, 0xac, 0xb5, + 0xb5, 0xad, 0xb4, 0xd1, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xec, 0xc7, 0xaf, 0xac, + 0xad, 0xb1, 0xc8, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe7, 0xce, 0xc0, + 0xc1, 0xd0, 0xe7, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf5, + 0xf5, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, + 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, + 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x94, + 0x94, 0xad, 0xd7, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xa1, 0x76, 0x6f, + 0x6f, 0x76, 0x9f, 0xdc, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xb4, 0x7c, 0x6d, 0x7d, + 0x7d, 0x6d, 0x78, 0xac, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd5, 0x90, 0x75, 0x75, 0x79, + 0x79, 0x75, 0x74, 0x8e, 0xd4, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xe7, 0xe6, 0xf6, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x82, 0x76, 0x7a, 0x72, + 0x72, 0x79, 0x75, 0x83, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xeb, 0xca, 0xae, 0xac, 0xc4, 0xe7, + 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x76, + 0x76, 0x79, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xf2, 0xed, 0xed, 0xf3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa4, 0x86, 0x82, 0x7e, 0x81, 0xa6, + 0xdd, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x80, 0x73, 0x79, 0x75, + 0x75, 0x78, 0x72, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xf8, + 0xec, 0xe4, 0xe3, 0xe3, 0xe5, 0xec, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfc, 0xbc, 0x78, 0x70, 0x79, 0x75, 0x70, 0x77, + 0xa8, 0xec, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, + 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfb, 0xed, + 0xe2, 0xe0, 0xe1, 0xe1, 0xdf, 0xe1, 0xf0, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xa7, 0x76, 0x7a, 0x7e, 0x7d, 0x80, 0x72, + 0x81, 0xc4, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xf2, 0xe4, + 0xe1, 0xe3, 0xe2, 0xe2, 0xe1, 0xe0, 0xeb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe2, 0xa4, 0x78, 0x79, 0x7f, 0x7b, 0x7c, 0x7c, + 0x79, 0x91, 0xd4, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xf5, 0xe7, 0xe3, + 0xe3, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xeb, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf2, 0xad, 0x72, 0x74, 0x80, 0x77, 0x76, 0x81, + 0x79, 0x77, 0xb4, 0xf8, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xef, 0xe2, 0xe2, + 0xe4, 0xe1, 0xe1, 0xe3, 0xe0, 0xdf, 0xec, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc1, 0x7c, 0x7a, 0x82, 0x79, 0x7a, 0x7e, + 0x78, 0x77, 0x93, 0xcb, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xe8, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe3, 0xe1, 0xe1, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa0, 0x81, 0x7c, 0x7d, 0x7c, 0x78, + 0x7c, 0x7e, 0x76, 0x9e, 0xef, 0xff, 0xfc, 0xfe, 0xfc, 0xff, 0xff, 0xca, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xe1, 0xe4, 0xe3, + 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe9, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xd0, 0x89, 0x72, 0x80, 0x7e, 0x77, + 0x7f, 0x7f, 0x6f, 0x88, 0xd0, 0xfb, 0xfe, 0xff, 0xfa, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xf5, 0xe5, 0xdf, 0xe3, 0xe3, + 0xe1, 0xe3, 0xe3, 0xe0, 0xe5, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf0, 0xa4, 0x7b, 0x7f, 0x7d, 0x79, + 0x7c, 0x7b, 0x7a, 0x7e, 0x9d, 0xda, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xc9, 0x81, 0x74, 0x79, 0x74, + 0x74, 0x79, 0x73, 0x7f, 0xc9, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xf6, 0xea, 0xe4, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe3, 0xe1, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0x99, 0x79, 0x7a, 0x7e, + 0x79, 0x79, 0x81, 0x76, 0x77, 0xba, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc9, 0x81, 0x75, 0x7a, 0x74, + 0x74, 0x7a, 0x74, 0x80, 0xc9, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf0, 0xe2, 0xe1, 0xe3, 0xe2, 0xe2, + 0xe3, 0xe2, 0xe1, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, + 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfa, 0xb9, 0x7b, 0x7b, 0x83, + 0x79, 0x79, 0x7f, 0x77, 0x73, 0x99, 0xda, 0xfd, 0xfc, 0xff, 0xff, 0xcb, 0x81, 0x72, 0x78, 0x75, + 0x75, 0x78, 0x71, 0x7f, 0xca, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xe9, 0xe1, 0xe1, 0xe3, 0xe1, 0xe1, + 0xe4, 0xe2, 0xe0, 0xee, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd9, 0x9a, 0x7f, 0x7d, + 0x7d, 0x7c, 0x79, 0x7c, 0x7d, 0x7c, 0xa8, 0xf1, 0xff, 0xff, 0xff, 0xcc, 0x80, 0x75, 0x7a, 0x73, + 0x73, 0x79, 0x74, 0x80, 0xca, 0xff, 0xff, 0xfc, 0xfe, 0xef, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe1, 0xe7, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xf1, 0xe4, + 0xf0, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x89, 0x74, + 0x80, 0x7e, 0x76, 0x7e, 0x7f, 0x70, 0x8a, 0xd4, 0xf9, 0xff, 0xff, 0xce, 0x86, 0x77, 0x79, 0x74, + 0x74, 0x78, 0x77, 0x8a, 0xd0, 0xff, 0xff, 0xfc, 0xfa, 0xe8, 0xe0, 0xe4, 0xe3, 0xe1, 0xe3, 0xe3, + 0xdf, 0xe3, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, + 0xf8, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xdd, 0xc2, 0xb2, 0xa9, + 0xaf, 0xc6, 0xe2, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xec, 0xa2, 0x7c, + 0x81, 0x7c, 0x77, 0x7d, 0x7d, 0x74, 0x7f, 0xad, 0xe4, 0xff, 0xff, 0xde, 0xa0, 0x74, 0x6e, 0x7d, + 0x7d, 0x6e, 0x73, 0xa0, 0xe1, 0xff, 0xff, 0xf9, 0xf1, 0xe5, 0xe0, 0xe3, 0xe2, 0xe1, 0xe3, 0xe3, + 0xe0, 0xe8, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, + 0xe7, 0xe9, 0xed, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xe7, 0xa4, 0x7b, 0x7d, 0x80, + 0x79, 0x82, 0xa7, 0xd7, 0xf3, 0xfc, 0xfe, 0xff, 0xfe, 0xff, 0xfd, 0xfe, 0xff, 0xfa, 0xcf, 0x96, + 0x7a, 0x7c, 0x7e, 0x79, 0x79, 0x7c, 0x7b, 0x8f, 0xd3, 0xff, 0xff, 0xf4, 0xcd, 0x8c, 0x6c, 0x76, + 0x76, 0x6c, 0x89, 0xc9, 0xf7, 0xff, 0xff, 0xf8, 0xeb, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, + 0xe6, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf5, 0xe8, 0xdd, 0xd9, + 0xda, 0xda, 0xda, 0xe5, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8d, 0x7b, 0x83, 0x84, + 0x7d, 0x7f, 0x87, 0x90, 0xaa, 0xd6, 0xf7, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfa, 0xb5, + 0x78, 0x7c, 0x84, 0x78, 0x77, 0x7e, 0x79, 0x87, 0xce, 0xff, 0xff, 0xfe, 0xf5, 0xc6, 0x93, 0x7a, + 0x7a, 0x92, 0xc3, 0xf2, 0xff, 0xff, 0xff, 0xf7, 0xe8, 0xe3, 0xe3, 0xe2, 0xe1, 0xe4, 0xe2, 0xe0, + 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe0, 0xdd, 0xdb, 0xda, + 0xdb, 0xdc, 0xd9, 0xde, 0xee, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xae, 0x8c, 0x86, 0x89, 0x89, + 0x8a, 0x89, 0x81, 0x78, 0x82, 0x9f, 0xbf, 0xde, 0xfb, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xd8, + 0x96, 0x79, 0x7c, 0x83, 0x81, 0x7c, 0x7a, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xf3, 0xd7, 0xc2, + 0xc2, 0xd7, 0xf1, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xe9, 0xe2, 0xe2, 0xe3, 0xe3, 0xe1, 0xdf, 0xe5, + 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xed, 0xe5, 0xdc, 0xd9, 0xdb, 0xde, 0xde, + 0xdd, 0xdc, 0xdb, 0xdd, 0xe6, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xaf, 0x88, 0x81, 0x86, 0x84, + 0x82, 0x83, 0x85, 0x87, 0x85, 0x7e, 0x81, 0x9f, 0xcd, 0xee, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf7, + 0xcd, 0x91, 0x77, 0x7d, 0x7a, 0x73, 0x86, 0xb7, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfa, 0xef, 0xe4, 0xe0, 0xe2, 0xe2, 0xdf, 0xe4, 0xf2, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf1, 0xe6, 0xdc, 0xda, 0xdc, 0xdd, 0xdc, 0xdb, 0xda, + 0xda, 0xdb, 0xda, 0xdc, 0xe7, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xc2, 0x8b, 0x7e, 0x89, 0x85, + 0x80, 0x84, 0x86, 0x88, 0x86, 0x81, 0x7f, 0x82, 0x8b, 0xa4, 0xd0, 0xf9, 0xff, 0xfc, 0xfb, 0xff, + 0xf5, 0xc7, 0x96, 0x7d, 0x78, 0x82, 0xad, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe3, 0xe1, 0xe0, 0xe5, 0xf1, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xf3, 0xe7, 0xe0, 0xdc, 0xdb, 0xdb, 0xdd, 0xdd, 0xdd, 0xdc, 0xd9, + 0xdb, 0xdd, 0xd9, 0xdc, 0xec, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa6, 0x7f, 0x7e, 0x86, + 0x8a, 0x88, 0x83, 0x81, 0x83, 0x87, 0x8a, 0x82, 0x77, 0x80, 0x9a, 0xb8, 0xdb, 0xf6, 0xff, 0xff, + 0xfe, 0xf2, 0xd8, 0xc2, 0xbf, 0xca, 0xe2, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, + 0xf8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf3, 0xf0, 0xf0, 0xf6, 0xfd, 0xff, + 0xff, 0xff, 0xfd, 0xf5, 0xec, 0xe5, 0xde, 0xda, 0xdb, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdc, 0xdd, + 0xdb, 0xd8, 0xd8, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd6, 0xa7, 0x8b, 0x85, + 0x84, 0x83, 0x86, 0x88, 0x85, 0x83, 0x84, 0x85, 0x87, 0x86, 0x7d, 0x7b, 0x99, 0xcf, 0xf5, 0xff, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xf3, 0xe4, 0xdb, 0xdc, 0xde, 0xdd, 0xdc, 0xdc, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, 0xda, + 0xda, 0xdb, 0xe3, 0xf2, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xc9, 0x9b, + 0x82, 0x83, 0x88, 0x88, 0x85, 0x83, 0x84, 0x86, 0x87, 0x84, 0x81, 0x7f, 0x7d, 0x94, 0xc8, 0xf3, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfb, 0xf0, 0xe3, 0xdc, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xda, 0xda, 0xdb, 0xdc, 0xdc, 0xd9, 0xd8, + 0xe0, 0xee, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf7, 0xd8, + 0xb9, 0x9c, 0x82, 0x7b, 0x85, 0x8a, 0x88, 0x83, 0x81, 0x82, 0x86, 0x89, 0x81, 0x7a, 0x96, 0xd8, + 0xff, 0xff, 0xf8, 0xf9, 0xf9, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfd, 0xff, 0xff, + 0xf5, 0xe3, 0xdb, 0xdc, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xda, 0xd7, 0xd9, 0xe0, 0xe9, + 0xf3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, + 0xf5, 0xd2, 0xa5, 0x8e, 0x89, 0x84, 0x83, 0x87, 0x87, 0x87, 0x83, 0x82, 0x8c, 0x80, 0x7b, 0xc2, + 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xef, 0xdb, 0xdb, 0xde, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xda, 0xda, 0xda, 0xdb, 0xe3, 0xf1, 0xfc, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, + 0xfe, 0xfb, 0xef, 0xd0, 0xa5, 0x85, 0x80, 0x89, 0x8a, 0x88, 0x83, 0x82, 0x8d, 0x80, 0x78, 0xbf, + 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xed, 0xd9, 0xda, 0xdd, 0xda, 0xdb, 0xdb, 0xda, 0xd9, 0xd8, 0xda, 0xe3, 0xef, 0xf9, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, + 0xfb, 0xff, 0xff, 0xf4, 0xdc, 0xbf, 0xa0, 0x87, 0x7d, 0x7f, 0x85, 0x87, 0x86, 0x7c, 0x88, 0xcd, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xf0, 0xdc, 0xd8, 0xda, 0xda, 0xda, 0xd7, 0xd5, 0xd8, 0xe1, 0xeb, 0xf4, 0xfc, 0xff, 0xff, 0xfd, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xda, 0xad, 0x94, 0x8b, 0x88, 0x87, 0x84, 0x91, 0xb7, 0xe8, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf8, 0xea, 0xdf, 0xda, 0xd9, 0xda, 0xdb, 0xde, 0xe6, 0xf3, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xda, 0xb9, 0xa0, 0x98, 0xa0, 0xc2, 0xee, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xee, 0xe2, 0xdf, 0xe1, 0xea, 0xf4, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, + 0xfa, 0xfc, 0xfd, 0xfb, 0xf7, 0xf7, 0xfc, 0xff, 0xfa, 0xea, 0xd8, 0xd4, 0xdd, 0xee, 0xfd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xfa, 0xf5, 0xf1, 0xf3, 0xf9, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xed, 0xdf, 0xd7, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, + 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xd8, 0xdc, 0xe7, 0xf5, 0xfd, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, 0xf3, 0xf1, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, + 0xef, 0xef, 0xef, 0xef, 0xef, 0xf0, 0xf3, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe3, 0xbc, 0xa6, 0x9d, 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, + 0x9a, 0x9a, 0x9a, 0x9a, 0x9a, 0x99, 0x98, 0x9a, 0xa2, 0xb3, 0xd5, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xee, 0xe3, 0xde, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xda, 0xd9, + 0xd8, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdd, 0xe6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xe2, 0xb2, 0x91, 0x8e, 0x90, 0x8c, 0x8c, 0x8d, 0x8d, 0x8d, + 0x8c, 0x8c, 0x8d, 0x8d, 0x8c, 0x8c, 0x8b, 0x8d, 0x8f, 0x8b, 0x9f, 0xd0, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfb, 0xeb, 0xda, 0xd4, 0xd5, 0xd4, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, + 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xd4, 0xd5, 0xe1, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbb, 0x8e, 0x87, 0x8d, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, + 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x8f, 0x90, 0x8f, 0x86, 0x86, 0xa5, 0xdf, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf2, 0xdc, 0xd2, 0xd2, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, + 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd3, 0xd1, 0xd4, 0xe6, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf0, 0xa4, 0x86, 0x94, 0x91, 0x8b, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, + 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0x8e, 0x8c, 0x8c, 0x94, 0x8d, 0x90, 0xce, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd6, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, + 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd4, 0xd5, 0xd0, 0xdd, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf1, 0xa7, 0x88, 0x95, 0x91, 0x8c, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, + 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8f, 0x8d, 0x8d, 0x95, 0x8f, 0x93, 0xcf, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xec, 0xd4, 0xd2, 0xd4, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, + 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd0, 0xd2, 0xd3, 0xcf, 0xdc, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf6, 0xbd, 0x90, 0x89, 0x8e, 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, + 0x91, 0x91, 0x91, 0x91, 0x91, 0x91, 0x90, 0x91, 0x90, 0x88, 0x89, 0xa9, 0xe1, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf1, 0xd9, 0xce, 0xce, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, + 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd2, 0xd0, 0xce, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe0, 0xb1, 0x91, 0x8f, 0x91, 0x8d, 0x8d, 0x8e, 0x8e, 0x8e, + 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8f, 0x91, 0x8e, 0xa1, 0xd1, 0xf6, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfa, 0xea, 0xd7, 0xd0, 0xd1, 0xd0, 0xcf, 0xd0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, + 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, 0xd0, 0xd2, 0xdf, 0xf3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe3, 0xbd, 0xa7, 0x9e, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, + 0x9c, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9c, 0x9c, 0xa3, 0xb4, 0xd6, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdf, 0xd8, 0xd4, 0xd3, 0xd4, 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, + 0xd6, 0xd6, 0xd6, 0xd6, 0xd6, 0xd7, 0xda, 0xe3, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xdd, 0xd5, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, + 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd5, 0xd6, 0xd7, 0xda, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf4, 0xef, 0xed, 0xec, 0xec, 0xed, 0xee, 0xee, 0xee, 0xee, + 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xf1, 0xf7, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, 0xfa, 0xfa, + 0xfa, 0xfb, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xee, 0xe1, 0xdd, 0xe5, 0xf3, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xf9, 0xf1, 0xee, 0xf0, 0xf7, 0xfd, 0xff, 0xfe, 0xfc, 0xfc, 0xfd, 0xfe, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xc4, 0xaf, 0xa8, 0xb0, 0xcd, 0xf1, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xe5, 0xd6, 0xd3, 0xd8, 0xe2, 0xef, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfe, 0xe0, 0xb9, 0xa5, 0x9d, 0x9a, 0x99, 0x97, 0xa3, 0xc4, 0xed, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf5, 0xde, 0xcd, 0xc8, 0xca, 0xcc, 0xcd, 0xd1, 0xdc, 0xef, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, + 0xfb, 0xff, 0xff, 0xf6, 0xe3, 0xca, 0xae, 0x97, 0x8f, 0x92, 0x98, 0x9a, 0x99, 0x93, 0x9f, 0xd7, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xe9, 0xca, 0xc4, 0xc8, 0xc9, 0xc9, 0xc8, 0xc7, 0xcb, 0xd6, 0xe5, 0xf1, 0xfb, 0xff, 0xff, 0xfd, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfc, 0xf2, 0xd7, 0xb1, 0x96, 0x93, 0x9a, 0x9a, 0x9a, 0x98, 0x97, 0xa2, 0x99, 0x92, 0xcb, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe3, 0xc2, 0xc6, 0xcb, 0xc6, 0xc8, 0xcb, 0xcc, 0xcc, 0xc8, 0xcb, 0xd9, 0xec, 0xf8, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xf7, 0xd9, 0xb4, 0xa0, 0x9a, 0x95, 0x95, 0x9a, 0x9b, 0x9b, 0x99, 0x98, 0xa1, 0x99, 0x96, 0xce, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe3, 0xc2, 0xc6, 0xcb, 0xc6, 0xc7, 0xc9, 0xc9, 0xca, 0xc9, 0xc9, 0xcc, 0xcf, 0xda, 0xec, 0xfb, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf9, 0xe1, + 0xc7, 0xac, 0x94, 0x8f, 0x98, 0x9d, 0x9b, 0x98, 0x97, 0x98, 0x9c, 0xa1, 0x9b, 0x95, 0xaa, 0xdf, + 0xff, 0xff, 0xf9, 0xfc, 0xfc, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfc, 0xfc, 0xff, 0xff, + 0xec, 0xcd, 0xc2, 0xc6, 0xcb, 0xc9, 0xc6, 0xc6, 0xc7, 0xca, 0xcc, 0xca, 0xc6, 0xc9, 0xd5, 0xe3, + 0xef, 0xfc, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xd2, 0xab, + 0x95, 0x95, 0x99, 0x9b, 0x9a, 0x98, 0x99, 0x9b, 0x9e, 0x9d, 0x9c, 0x9a, 0x98, 0xa9, 0xd1, 0xf4, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf9, 0xe5, 0xcc, 0xc3, 0xc6, 0xc7, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc9, 0xca, 0xca, 0xc9, 0xca, + 0xd4, 0xe8, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xdd, 0xb6, 0x9e, 0x97, + 0x97, 0x97, 0x99, 0x9a, 0x9a, 0x9a, 0x9a, 0x9b, 0x9e, 0x9f, 0x99, 0x97, 0xad, 0xd7, 0xf7, 0xff, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfa, 0xe7, 0xcf, 0xc2, 0xc4, 0xc8, 0xc9, 0xc8, 0xc7, 0xc7, 0xc8, 0xc9, 0xc9, 0xc8, 0xca, + 0xcb, 0xce, 0xda, 0xee, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe9, 0xb5, 0x93, 0x92, 0x99, + 0x9d, 0x9b, 0x98, 0x98, 0x9a, 0x9f, 0xa2, 0x9b, 0x93, 0x99, 0xac, 0xc4, 0xe0, 0xf6, 0xff, 0xff, + 0xfe, 0xf7, 0xe5, 0xd7, 0xd6, 0xdf, 0xed, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, + 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xe8, 0xe2, 0xe3, 0xee, 0xfb, 0xff, + 0xff, 0xff, 0xfa, 0xee, 0xdd, 0xce, 0xc3, 0xc2, 0xc7, 0xcb, 0xc9, 0xc7, 0xc7, 0xc7, 0xc9, 0xcb, + 0xca, 0xc7, 0xc8, 0xda, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xcc, 0x9d, 0x92, 0x9d, 0x9a, + 0x96, 0x9a, 0x9d, 0x9e, 0x9c, 0x9a, 0x9b, 0x9c, 0xa2, 0xb4, 0xd8, 0xfa, 0xff, 0xfd, 0xfc, 0xff, + 0xf8, 0xd8, 0xb4, 0xa4, 0xa3, 0xab, 0xc9, 0xf2, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd7, 0xc2, 0xbd, 0xbf, 0xcc, 0xe6, 0xfb, + 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xe8, 0xd3, 0xc8, 0xc6, 0xc6, 0xc7, 0xc9, 0xc9, 0xc9, 0xc8, 0xc6, + 0xc8, 0xcb, 0xc7, 0xcd, 0xe5, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xba, 0x9b, 0x97, 0x9b, 0x99, + 0x98, 0x9b, 0x9d, 0x9e, 0x9d, 0x99, 0x9b, 0xb2, 0xd7, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf9, + 0xdb, 0xb1, 0x9e, 0xa3, 0xa3, 0xa0, 0xae, 0xd2, 0xf4, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf5, 0xdb, 0xc2, 0xb9, 0xbc, 0xbe, 0xbc, 0xca, 0xe7, + 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xe7, 0xd1, 0xc5, 0xc6, 0xc8, 0xca, 0xc9, 0xc7, 0xc6, + 0xc7, 0xc8, 0xc7, 0xcb, 0xdb, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xba, 0x9e, 0x9a, 0x9e, 0x9e, + 0x9f, 0xa1, 0x9b, 0x94, 0x9b, 0xb1, 0xc9, 0xe2, 0xfb, 0xff, 0xfe, 0xfc, 0xfb, 0xff, 0xff, 0xe4, + 0xb6, 0xa0, 0xa2, 0xa9, 0xaa, 0xa6, 0xa6, 0xb9, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xeb, 0xe0, + 0xe0, 0xec, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xbb, 0xbd, 0xc1, 0xc2, 0xbe, 0xbe, 0xcd, + 0xec, 0xff, 0xff, 0xfc, 0xfd, 0xfe, 0xff, 0xfd, 0xee, 0xe0, 0xd2, 0xc5, 0xc2, 0xc7, 0xca, 0xca, + 0xca, 0xc9, 0xc8, 0xcb, 0xda, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xce, 0xa1, 0x93, 0x9b, 0x9b, + 0x97, 0x9a, 0x9f, 0xa5, 0xba, 0xdd, 0xf8, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xff, 0xfc, 0xca, + 0x9f, 0xa2, 0xa9, 0xa2, 0xa3, 0xa9, 0xa8, 0xb2, 0xe0, 0xff, 0xff, 0xfe, 0xfa, 0xde, 0xc2, 0xb6, + 0xb7, 0xc6, 0xe2, 0xfa, 0xfe, 0xff, 0xff, 0xe6, 0xc2, 0xbb, 0xbe, 0xbb, 0xbc, 0xc3, 0xbf, 0xbc, + 0xdc, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xfb, 0xec, 0xd7, 0xcc, 0xc8, 0xc6, 0xc5, + 0xc8, 0xc8, 0xc4, 0xcc, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xba, 0x98, 0x96, 0x98, + 0x94, 0x9c, 0xb7, 0xdd, 0xf5, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfb, 0xdd, 0xb4, + 0xa1, 0xa5, 0xa7, 0xa3, 0xa4, 0xa7, 0xa9, 0xb7, 0xe3, 0xff, 0xff, 0xf8, 0xe1, 0xbb, 0xaa, 0xb2, + 0xb2, 0xaf, 0xc2, 0xe5, 0xfa, 0xff, 0xff, 0xe9, 0xc6, 0xbb, 0xbc, 0xbc, 0xbc, 0xc0, 0xbf, 0xbd, + 0xcd, 0xe9, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xf9, 0xec, 0xd6, 0xc5, 0xc2, + 0xc6, 0xc6, 0xc6, 0xd9, 0xf6, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe6, 0xcd, 0xbe, 0xb8, + 0xbe, 0xcf, 0xe6, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf2, 0xbd, 0xa2, + 0xa6, 0xa5, 0xa3, 0xa7, 0xa8, 0xa4, 0xab, 0xc9, 0xed, 0xff, 0xff, 0xed, 0xc8, 0xad, 0xab, 0xb5, + 0xb5, 0xae, 0xb5, 0xce, 0xef, 0xff, 0xff, 0xf1, 0xd4, 0xbd, 0xb8, 0xbd, 0xbe, 0xbb, 0xbe, 0xc1, + 0xbf, 0xd2, 0xf6, 0xff, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xd9, + 0xd6, 0xda, 0xe3, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf3, 0xe9, + 0xf2, 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xdb, 0xab, 0x9c, + 0xa6, 0xa7, 0xa2, 0xa8, 0xaa, 0xa2, 0xb4, 0xe3, 0xfb, 0xff, 0xff, 0xe2, 0xb8, 0xae, 0xb1, 0xb0, + 0xb0, 0xb4, 0xb7, 0xc2, 0xe7, 0xff, 0xff, 0xfb, 0xe7, 0xc3, 0xb6, 0xbe, 0xbe, 0xbb, 0xbe, 0xc0, + 0xba, 0xc5, 0xe6, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xf7, + 0xf1, 0xf7, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe3, 0xb6, 0xa4, 0xa3, + 0xa4, 0xa6, 0xa5, 0xa7, 0xa9, 0xaa, 0xc7, 0xf6, 0xff, 0xff, 0xff, 0xe1, 0xb3, 0xac, 0xb2, 0xb0, + 0xb0, 0xb5, 0xb5, 0xbc, 0xe3, 0xff, 0xff, 0xff, 0xf7, 0xd2, 0xbc, 0xbd, 0xbd, 0xbb, 0xbe, 0xbf, + 0xbf, 0xc0, 0xce, 0xed, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, + 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xfd, 0xce, 0xa1, 0xa2, 0xa9, + 0xa4, 0xa5, 0xa9, 0xa4, 0xa3, 0xbd, 0xe6, 0xfe, 0xfd, 0xff, 0xff, 0xe1, 0xb5, 0xad, 0xb1, 0xb0, + 0xb1, 0xb5, 0xb3, 0xbb, 0xe3, 0xff, 0xff, 0xfd, 0xfe, 0xeb, 0xca, 0xb7, 0xb9, 0xbe, 0xbc, 0xbd, + 0xc2, 0xbe, 0xbf, 0xde, 0xfd, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, + 0xff, 0xfe, 0xfd, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe1, 0xb8, 0xa1, 0xa3, 0xa7, + 0xa4, 0xa4, 0xab, 0xa6, 0xa8, 0xd3, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdf, 0xb5, 0xaf, 0xb3, 0xb0, + 0xb1, 0xb5, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xdb, 0xb8, 0xb9, 0xc0, 0xbc, 0xbc, + 0xbf, 0xbe, 0xbe, 0xcd, 0xe9, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xbf, 0xa2, 0xa6, 0xa6, 0xa4, + 0xa6, 0xa6, 0xa7, 0xab, 0xbf, 0xe7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xe0, 0xb5, 0xad, 0xb2, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xea, 0xca, 0xbd, 0xbc, 0xbc, 0xbd, + 0xbd, 0xc0, 0xc0, 0xbe, 0xd3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe1, 0xad, 0x9c, 0xa7, 0xa6, 0xa3, + 0xa9, 0xa9, 0xa1, 0xb0, 0xdc, 0xfa, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb2, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfa, 0xe3, 0xc0, 0xb5, 0xbd, 0xbe, + 0xbb, 0xbf, 0xc0, 0xbb, 0xc7, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe9, 0xc0, 0xa8, 0xa3, 0xa6, 0xa6, 0xa3, + 0xa6, 0xa9, 0xa6, 0xc0, 0xf2, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xf4, 0xcc, 0xb8, 0xbd, 0xbe, + 0xbc, 0xbe, 0xbf, 0xbf, 0xc2, 0xd2, 0xef, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd4, 0xa4, 0xa3, 0xa9, 0xa4, 0xa5, 0xa7, + 0xa4, 0xa5, 0xb8, 0xdd, 0xfb, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xc7, 0xb9, 0xbb, + 0xbe, 0xbc, 0xbc, 0xc0, 0xbd, 0xc0, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf7, 0xc7, 0x9d, 0x9f, 0xa9, 0xa3, 0xa2, 0xa9, + 0xa6, 0xa5, 0xcc, 0xf9, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xfa, 0xd7, 0xb8, 0xba, + 0xbf, 0xbb, 0xbb, 0xc0, 0xbc, 0xbb, 0xd8, 0xfa, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xeb, 0xbf, 0xa1, 0xa4, 0xa8, 0xa5, 0xa6, 0xa8, + 0xa7, 0xb7, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe8, 0xc5, 0xba, + 0xbd, 0xbe, 0xbc, 0xc0, 0xbe, 0xbc, 0xd2, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xed, 0xc1, 0xa1, 0xa4, 0xa7, 0xa8, 0xab, 0xa3, + 0xad, 0xd8, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe0, 0xbe, + 0xb7, 0xbf, 0xbe, 0xbf, 0xbd, 0xbb, 0xd4, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xcf, 0xa2, 0x9e, 0xa4, 0xa3, 0xa2, 0xa5, + 0xc3, 0xf2, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd1, + 0xb8, 0xb7, 0xba, 0xbc, 0xb7, 0xbc, 0xde, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xc1, 0xae, 0xab, 0xa8, 0xab, 0xc3, + 0xe7, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xec, + 0xcf, 0xbe, 0xbe, 0xc0, 0xc2, 0xd1, 0xef, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf0, 0xd9, 0xc7, 0xc7, 0xd7, 0xef, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb5, 0xae, 0xb3, 0xb0, + 0xb0, 0xb4, 0xb3, 0xba, 0xe1, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, + 0xf1, 0xdf, 0xd5, 0xd6, 0xe3, 0xf4, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xed, 0xed, 0xf8, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xb7, 0xb1, 0xb3, 0xaf, + 0xaf, 0xb4, 0xb4, 0xbc, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf9, 0xf1, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xbf, 0xb0, 0xb1, 0xb2, + 0xb2, 0xb2, 0xb4, 0xc2, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb4, 0xac, 0xb5, + 0xb5, 0xad, 0xb4, 0xd1, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xeb, 0xc7, 0xb0, 0xad, + 0xad, 0xb1, 0xc8, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe7, 0xcf, 0xc2, + 0xc2, 0xcf, 0xe8, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf5, + 0xf5, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, + 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, + 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xb1, 0x99, + 0x99, 0xb1, 0xd9, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa5, 0x7b, 0x74, + 0x74, 0x7b, 0xa4, 0xde, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xb8, 0x82, 0x73, 0x82, + 0x81, 0x74, 0x7f, 0xaf, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd7, 0x97, 0x7d, 0x7b, 0x7d, + 0x7d, 0x7b, 0x7a, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xe9, 0xe8, 0xf6, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xce, 0x8b, 0x7e, 0x7f, 0x77, + 0x77, 0x7f, 0x7b, 0x87, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xf6, 0xe5, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xed, 0xce, 0xb4, 0xb2, 0xc8, 0xe9, + 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7f, 0x7b, + 0x7b, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, + 0xe7, 0xc2, 0xa9, 0xa9, 0xc6, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe1, 0xab, 0x90, 0x8c, 0x87, 0x89, 0xab, + 0xdf, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7e, 0x7a, + 0x7a, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xdc, + 0xa2, 0x7c, 0x79, 0x7d, 0x81, 0xa1, 0xdd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc0, 0x81, 0x7a, 0x84, 0x80, 0x7a, 0x7f, + 0xad, 0xee, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x80, 0x79, + 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xec, 0xa6, + 0x72, 0x6b, 0x70, 0x75, 0x6a, 0x71, 0xb9, 0xfd, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xea, 0xac, 0x7f, 0x84, 0x88, 0x86, 0x88, 0x7a, + 0x89, 0xc8, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, + 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xc2, 0x7d, + 0x6d, 0x7b, 0x78, 0x7a, 0x75, 0x6f, 0xa3, 0xe8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe3, 0xa9, 0x81, 0x83, 0x88, 0x84, 0x85, 0x85, + 0x81, 0x98, 0xd7, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, + 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x8e, 0x74, + 0x77, 0x77, 0x75, 0x7a, 0x75, 0x70, 0x9e, 0xe1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf3, 0xb3, 0x7c, 0x7f, 0x89, 0x81, 0x81, 0x89, + 0x81, 0x7e, 0xb7, 0xf7, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xce, 0x89, 0x7b, 0x80, 0x7a, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xb2, 0x72, 0x74, + 0x7c, 0x71, 0x72, 0x7b, 0x6e, 0x6a, 0xa7, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xc5, 0x84, 0x82, 0x8a, 0x83, 0x85, 0x87, + 0x81, 0x7f, 0x99, 0xce, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x81, 0x7b, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xf9, 0xca, 0x90, 0x72, 0x73, + 0x79, 0x75, 0x74, 0x7c, 0x71, 0x72, 0xbb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa6, 0x89, 0x84, 0x85, 0x86, 0x82, + 0x85, 0x86, 0x7e, 0xa4, 0xf0, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x81, 0x7b, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xed, 0x9a, 0x71, 0x78, 0x77, + 0x73, 0x77, 0x76, 0x75, 0x78, 0x98, 0xd9, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, 0xd3, 0x92, 0x7d, 0x89, 0x87, 0x81, + 0x88, 0x88, 0x79, 0x8f, 0xd2, 0xfb, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x80, 0x7a, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xfe, 0xfb, 0xce, 0x83, 0x6a, 0x7a, 0x7a, + 0x72, 0x79, 0x79, 0x6a, 0x81, 0xcb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf1, 0xab, 0x84, 0x88, 0x87, 0x83, + 0x86, 0x86, 0x83, 0x85, 0xa3, 0xdd, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xcd, 0x89, 0x7c, 0x7f, 0x79, + 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xda, 0x9b, 0x7a, 0x75, 0x76, 0x77, + 0x74, 0x78, 0x78, 0x72, 0x9c, 0xed, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xd6, 0xa1, 0x82, 0x83, 0x88, + 0x83, 0x83, 0x89, 0x7f, 0x80, 0xbf, 0xff, 0xff, 0xf5, 0xff, 0xff, 0xcd, 0x89, 0x7d, 0x80, 0x79, + 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xb9, 0x74, 0x72, 0x7c, 0x74, 0x74, + 0x79, 0x74, 0x71, 0x92, 0xcf, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, + 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbe, 0x84, 0x83, 0x8b, + 0x83, 0x83, 0x89, 0x81, 0x7b, 0xa0, 0xdd, 0xfe, 0xfc, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x7f, 0x7a, + 0x7a, 0x7d, 0x77, 0x84, 0xcc, 0xff, 0xff, 0xfb, 0xfe, 0xda, 0x97, 0x6d, 0x71, 0x7a, 0x74, 0x73, + 0x7d, 0x72, 0x71, 0xb3, 0xfa, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdc, 0xa1, 0x87, 0x85, + 0x85, 0x85, 0x84, 0x86, 0x85, 0x82, 0xad, 0xf1, 0xff, 0xff, 0xff, 0xcf, 0x89, 0x7d, 0x80, 0x79, + 0x78, 0x7e, 0x7a, 0x86, 0xce, 0xff, 0xff, 0xff, 0xf0, 0xa7, 0x78, 0x78, 0x77, 0x74, 0x76, 0x76, + 0x75, 0x75, 0x90, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf2, 0xe7, + 0xf1, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcf, 0x90, 0x7b, + 0x88, 0x88, 0x82, 0x87, 0x86, 0x78, 0x91, 0xd6, 0xf9, 0xff, 0xff, 0xd2, 0x8f, 0x80, 0x80, 0x7a, + 0x79, 0x7d, 0x7d, 0x8f, 0xd2, 0xff, 0xff, 0xf9, 0xd2, 0x87, 0x6c, 0x7a, 0x79, 0x72, 0x7a, 0x7a, + 0x69, 0x7e, 0xc6, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, + 0xf9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xb9, 0xb1, + 0xb6, 0xcb, 0xe5, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xed, 0xa7, 0x84, + 0x8a, 0x86, 0x81, 0x86, 0x86, 0x7d, 0x86, 0xb2, 0xe5, 0xff, 0xff, 0xe0, 0xa5, 0x7d, 0x77, 0x84, + 0x81, 0x73, 0x7a, 0xa5, 0xe0, 0xff, 0xff, 0xe3, 0xab, 0x7b, 0x6f, 0x78, 0x79, 0x72, 0x76, 0x79, + 0x72, 0x99, 0xea, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, 0xf2, 0xec, + 0xeb, 0xed, 0xf1, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe9, 0xab, 0x86, 0x89, 0x8a, + 0x83, 0x8d, 0xb0, 0xda, 0xf4, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd2, 0x9d, + 0x82, 0x84, 0x87, 0x83, 0x84, 0x85, 0x82, 0x96, 0xd6, 0xff, 0xff, 0xf4, 0xcf, 0x92, 0x74, 0x7d, + 0x7c, 0x72, 0x8f, 0xcc, 0xf3, 0xff, 0xff, 0xd2, 0x8d, 0x77, 0x77, 0x74, 0x74, 0x78, 0x75, 0x71, + 0x8e, 0xca, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xec, 0xe3, 0xe0, + 0xe1, 0xe1, 0xdf, 0xe9, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc9, 0x96, 0x84, 0x8c, 0x8c, + 0x88, 0x8b, 0x91, 0x98, 0xb1, 0xda, 0xf8, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfb, 0xbb, + 0x80, 0x84, 0x8b, 0x82, 0x82, 0x87, 0x81, 0x8c, 0xd0, 0xff, 0xff, 0xfe, 0xf5, 0xc8, 0x97, 0x80, + 0x80, 0x96, 0xc6, 0xf5, 0xfe, 0xff, 0xff, 0xce, 0x85, 0x76, 0x79, 0x72, 0x71, 0x7c, 0x73, 0x6f, + 0xaf, 0xf9, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf7, 0xec, 0xe6, 0xe5, 0xe3, 0xe1, + 0xe1, 0xe1, 0xe0, 0xe4, 0xf2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xb5, 0x95, 0x8f, 0x92, 0x92, + 0x92, 0x92, 0x8c, 0x84, 0x8c, 0xa7, 0xc4, 0xe0, 0xfb, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xda, + 0x9e, 0x82, 0x84, 0x8a, 0x8b, 0x86, 0x82, 0x98, 0xd7, 0xff, 0xff, 0xfc, 0xff, 0xf3, 0xd9, 0xc5, + 0xc5, 0xd9, 0xf2, 0xff, 0xfc, 0xff, 0xff, 0xd4, 0x8f, 0x75, 0x77, 0x7d, 0x7c, 0x73, 0x70, 0x8e, + 0xd5, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf1, 0xea, 0xe3, 0xe0, 0xe2, 0xe4, 0xe4, + 0xe3, 0xe3, 0xe3, 0xe4, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x91, 0x8b, 0x90, 0x8e, + 0x8b, 0x8c, 0x8e, 0x91, 0x8f, 0x89, 0x8b, 0xa8, 0xd3, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf8, + 0xd0, 0x98, 0x7e, 0x84, 0x83, 0x7d, 0x8e, 0xbb, 0xec, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xea, 0xb4, 0x82, 0x6d, 0x73, 0x73, 0x6c, 0x88, 0xc8, + 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf5, 0xea, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, + 0xe2, 0xe2, 0xe1, 0xe3, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc7, 0x93, 0x88, 0x93, 0x8f, + 0x8b, 0x8e, 0x8f, 0x91, 0x8f, 0x8b, 0x8a, 0x8d, 0x95, 0xac, 0xd5, 0xf9, 0xff, 0xfd, 0xfc, 0xff, + 0xf5, 0xcb, 0x9d, 0x85, 0x81, 0x8b, 0xb2, 0xe9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe8, 0xa9, 0x7b, 0x6f, 0x72, 0x8d, 0xc3, 0xf3, + 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xf5, 0xeb, 0xe5, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe3, 0xe0, 0xe3, 0xf0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe5, 0xad, 0x88, 0x88, 0x90, + 0x94, 0x91, 0x8d, 0x8c, 0x8c, 0x90, 0x93, 0x8c, 0x82, 0x8b, 0xa4, 0xbf, 0xdf, 0xf7, 0xff, 0xff, + 0xfe, 0xf2, 0xda, 0xc6, 0xc3, 0xce, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, + 0xf8, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xba, 0xbc, 0xd4, 0xf1, 0xfe, + 0xff, 0xff, 0xfe, 0xf8, 0xf0, 0xe9, 0xe2, 0xe0, 0xe2, 0xe4, 0xe4, 0xe1, 0xe1, 0xe1, 0xe3, 0xe3, + 0xe3, 0xe1, 0xe0, 0xe9, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xda, 0xaf, 0x95, 0x8e, + 0x8d, 0x8d, 0x90, 0x91, 0x90, 0x8d, 0x8d, 0x8d, 0x8f, 0x90, 0x89, 0x87, 0xa3, 0xd3, 0xf6, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xfd, 0xf6, 0xeb, 0xe4, 0xe3, 0xe4, 0xe4, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe1, 0xe1, + 0xe2, 0xe3, 0xe9, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xef, 0xce, 0xa3, + 0x8c, 0x8d, 0x91, 0x92, 0x8f, 0x8d, 0x8d, 0x8e, 0x90, 0x8e, 0x8d, 0x8a, 0x88, 0x9e, 0xcc, 0xf3, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xf4, 0xe7, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, + 0xe6, 0xf1, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf8, 0xdb, + 0xbf, 0xa5, 0x8c, 0x85, 0x8f, 0x94, 0x92, 0x8d, 0x8b, 0x8b, 0x8f, 0x93, 0x8b, 0x85, 0x9f, 0xdc, + 0xff, 0xff, 0xf8, 0xfa, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf9, 0xf8, 0xf8, 0xfa, 0xff, 0xff, + 0xf7, 0xea, 0xe2, 0xe1, 0xe2, 0xe3, 0xe2, 0xe1, 0xe2, 0xe3, 0xe4, 0xe2, 0xdf, 0xe0, 0xe7, 0xee, + 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xf6, 0xd5, 0xad, 0x97, 0x92, 0x8d, 0x8c, 0x90, 0x91, 0x91, 0x8c, 0x8b, 0x95, 0x8b, 0x87, 0xc8, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf3, 0xe4, 0xe3, 0xe4, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe3, 0xe3, 0xe8, 0xf3, 0xfc, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, + 0xfe, 0xfb, 0xf0, 0xd4, 0xac, 0x8f, 0x8a, 0x91, 0x92, 0x91, 0x8d, 0x8c, 0x96, 0x8a, 0x82, 0xc4, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xf1, 0xe1, 0xe2, 0xe4, 0xe1, 0xe2, 0xe2, 0xe3, 0xe3, 0xdf, 0xdf, 0xe8, 0xf4, 0xfb, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, + 0xfb, 0xff, 0xff, 0xf5, 0xe0, 0xc5, 0xa6, 0x90, 0x87, 0x89, 0x8e, 0x90, 0x8f, 0x86, 0x92, 0xd1, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfd, 0xff, 0xff, + 0xf3, 0xe3, 0xe0, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xe1, 0xe6, 0xed, 0xf6, 0xfd, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xdd, 0xb4, 0x9e, 0x95, 0x91, 0x90, 0x8e, 0x9a, 0xbd, 0xea, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xee, 0xe4, 0xe2, 0xe2, 0xe2, 0xe3, 0xe5, 0xea, 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xdd, 0xc1, 0xaa, 0xa1, 0xa8, 0xc8, 0xf0, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xf0, 0xe7, 0xe5, 0xe7, 0xee, 0xf6, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, + 0xfb, 0xfc, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xed, 0xde, 0xd8, 0xe0, 0xf0, 0xfd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xfa, 0xf6, 0xf4, 0xf5, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xe2, 0xdb, 0xda, 0xda, 0xda, 0xda, 0xda, + 0xda, 0xda, 0xda, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdf, 0xe9, 0xf5, 0xfd, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, + 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf6, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc2, 0xaf, 0xa8, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, + 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa2, 0xa1, 0xa3, 0xaa, 0xb9, 0xd7, 0xf8, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xe9, 0xe4, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, + 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe5, 0xeb, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe4, 0xb8, 0x9a, 0x98, 0x9a, 0x96, 0x96, 0x97, 0x97, 0x97, + 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x96, 0x98, 0x9a, 0x96, 0xa8, 0xd5, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xde, 0xdf, 0xde, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, + 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe7, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x99, 0x92, 0x97, 0x9a, 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, + 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, 0x9b, 0x9a, 0x9a, 0x98, 0x92, 0x92, 0xad, 0xe2, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf5, 0xe4, 0xdb, 0xdb, 0xde, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xdc, + 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xda, 0xd9, 0xdc, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xac, 0x92, 0xa0, 0x9c, 0x96, 0x99, 0x98, 0x98, 0x99, 0x99, + 0x99, 0x98, 0x98, 0x99, 0x99, 0x98, 0x98, 0x96, 0x96, 0x9e, 0x99, 0x9a, 0xd2, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xde, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, + 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdc, 0xdd, 0xda, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xae, 0x94, 0xa1, 0x9d, 0x97, 0x9a, 0x99, 0x9a, 0x9b, 0x9b, + 0x9a, 0x99, 0x99, 0x9a, 0x9b, 0x99, 0x99, 0x98, 0x99, 0xa0, 0x9a, 0x9c, 0xd3, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, + 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xda, 0xdc, 0xdd, 0xd9, 0xe3, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x9c, 0x97, 0x9b, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, + 0x9e, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9c, 0x9e, 0x9d, 0x96, 0x95, 0xb0, 0xe3, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf4, 0xe1, 0xd8, 0xd9, 0xda, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xda, 0xd9, + 0xda, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xd9, 0xdb, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe3, 0xb9, 0x9e, 0x9b, 0x9d, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, + 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9a, 0x9c, 0x9e, 0x9a, 0xaa, 0xd5, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xd8, + 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdb, 0xdb, 0xe5, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe5, 0xc3, 0xb0, 0xa9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, + 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xae, 0xbc, 0xd8, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf1, 0xe5, 0xdf, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, + 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xe0, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xd9, 0xd9, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdb, 0xde, 0xe8, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf6, 0xf2, 0xf2, 0xf2, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, + 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, + 0xfb, 0xfc, 0xfd, 0xfb, 0xf8, 0xf8, 0xfc, 0xff, 0xfb, 0xf0, 0xe3, 0xe0, 0xe7, 0xf3, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xf9, 0xf3, 0xf2, 0xf4, 0xf9, 0xfe, 0xff, 0xfd, 0xfc, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe3, 0xca, 0xb7, 0xb0, 0xb7, 0xd3, 0xf3, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xea, 0xde, 0xdc, 0xe0, 0xe8, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xfe, 0xe4, 0xc0, 0xad, 0xa7, 0xa6, 0xa4, 0xa3, 0xad, 0xca, 0xee, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf6, 0xe4, 0xd8, 0xd4, 0xd4, 0xd5, 0xd6, 0xd9, 0xe2, 0xf2, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfb, + 0xfc, 0xff, 0xff, 0xf7, 0xe6, 0xd0, 0xb7, 0xa3, 0x9b, 0x9e, 0xa4, 0xa6, 0xa5, 0x9e, 0xa9, 0xdb, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xec, 0xd2, 0xcf, 0xd3, 0xd4, 0xd3, 0xd1, 0xd0, 0xd4, 0xdd, 0xe9, 0xf4, 0xfc, 0xff, 0xff, 0xfe, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfc, 0xf4, 0xdd, 0xba, 0xa2, 0x9f, 0xa6, 0xa6, 0xa5, 0xa3, 0xa2, 0xab, 0xa4, 0xa0, 0xd2, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe7, 0xcc, 0xcf, 0xd5, 0xd1, 0xd3, 0xd5, 0xd5, 0xd5, 0xd1, 0xd3, 0xe0, 0xef, 0xf9, 0xfd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xf8, 0xdc, 0xba, 0xaa, 0xa6, 0xa2, 0xa1, 0xa5, 0xa7, 0xa7, 0xa4, 0xa3, 0xab, 0xa4, 0xa1, 0xd4, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe8, 0xce, 0xd0, 0xd4, 0xd0, 0xd2, 0xd4, 0xd4, 0xd4, 0xd2, 0xd2, 0xd5, 0xd8, 0xe0, 0xf0, 0xfc, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xe4, + 0xcd, 0xb6, 0x9f, 0x9b, 0xa5, 0xa9, 0xa7, 0xa3, 0xa2, 0xa3, 0xa7, 0xaa, 0xa6, 0xa1, 0xb4, 0xe3, + 0xff, 0xff, 0xfa, 0xfc, 0xfb, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfd, 0xfd, 0xff, 0xff, + 0xf0, 0xd8, 0xce, 0xd0, 0xd3, 0xd2, 0xd0, 0xd0, 0xd3, 0xd6, 0xd6, 0xd4, 0xd1, 0xd4, 0xdd, 0xe7, + 0xf2, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf2, 0xd6, 0xb4, + 0xa1, 0xa2, 0xa6, 0xa6, 0xa5, 0xa4, 0xa4, 0xa6, 0xa8, 0xa7, 0xa6, 0xa7, 0xa5, 0xb3, 0xd6, 0xf6, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xea, 0xd7, 0xcf, 0xd1, 0xd1, 0xd1, 0xd2, 0xd3, 0xd3, 0xd3, 0xd4, 0xd5, 0xd5, 0xd3, 0xd2, + 0xdc, 0xed, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xbe, 0xaa, 0xa3, + 0xa2, 0xa2, 0xa5, 0xa7, 0xa6, 0xa4, 0xa4, 0xa6, 0xa9, 0xaa, 0xa5, 0xa4, 0xb8, 0xdb, 0xf7, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xec, 0xd9, 0xcf, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd5, 0xd4, 0xd3, 0xd3, + 0xd3, 0xd6, 0xe0, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xeb, 0xbd, 0x9f, 0x9f, 0xa6, + 0xa8, 0xa6, 0xa3, 0xa3, 0xa3, 0xa7, 0xaa, 0xa6, 0xa0, 0xa6, 0xb8, 0xcb, 0xe3, 0xf7, 0xff, 0xff, + 0xfe, 0xf8, 0xe9, 0xdc, 0xdb, 0xe2, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, + 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xec, 0xe7, 0xe7, 0xf1, 0xfb, 0xff, + 0xff, 0xff, 0xfb, 0xf1, 0xe4, 0xd9, 0xd0, 0xcd, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd6, + 0xd4, 0xd1, 0xd1, 0xe0, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd2, 0xa9, 0x9f, 0xa8, 0xa5, + 0xa2, 0xa5, 0xa6, 0xa9, 0xa7, 0xa4, 0xa5, 0xa9, 0xae, 0xbd, 0xdd, 0xfa, 0xff, 0xfd, 0xfd, 0xff, + 0xf9, 0xde, 0xbe, 0xaf, 0xad, 0xb5, 0xd0, 0xf3, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xdf, 0xcc, 0xc8, 0xcb, 0xd6, 0xe9, 0xfb, + 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xec, 0xdb, 0xd4, 0xd1, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd2, 0xd1, + 0xd3, 0xd5, 0xd2, 0xd6, 0xe9, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc3, 0xa7, 0xa2, 0xa6, 0xa5, + 0xa3, 0xa3, 0xa6, 0xaa, 0xa9, 0xa5, 0xa6, 0xbb, 0xdd, 0xf3, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf9, + 0xdf, 0xba, 0xa9, 0xad, 0xae, 0xac, 0xb8, 0xd6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xe1, 0xce, 0xc5, 0xc7, 0xca, 0xc8, 0xd3, 0xeb, + 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xec, 0xdb, 0xd0, 0xcf, 0xd2, 0xd2, 0xd1, 0xd0, 0xd1, + 0xd3, 0xd4, 0xd2, 0xd5, 0xe2, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc2, 0xa9, 0xa5, 0xa8, 0xa8, + 0xa8, 0xa9, 0xa6, 0xa1, 0xa7, 0xba, 0xce, 0xe5, 0xfb, 0xff, 0xfe, 0xfc, 0xfc, 0xff, 0xff, 0xe8, + 0xbf, 0xaa, 0xac, 0xb4, 0xb5, 0xb1, 0xb1, 0xc1, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfa, 0xee, 0xe4, + 0xe4, 0xef, 0xfb, 0xff, 0xfd, 0xff, 0xff, 0xed, 0xd0, 0xc7, 0xc8, 0xcb, 0xcb, 0xc9, 0xc9, 0xd6, + 0xf0, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xfd, 0xf1, 0xe5, 0xda, 0xd0, 0xce, 0xd1, 0xd3, 0xd3, + 0xd3, 0xd3, 0xd1, 0xd3, 0xe1, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd3, 0xab, 0x9f, 0xa6, 0xa5, + 0xa1, 0xa4, 0xaa, 0xb0, 0xc2, 0xe0, 0xf8, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xd2, + 0xab, 0xac, 0xb3, 0xaf, 0xaf, 0xb3, 0xb3, 0xbc, 0xe4, 0xff, 0xff, 0xfe, 0xfa, 0xe3, 0xcb, 0xc0, + 0xc1, 0xce, 0xe6, 0xfa, 0xfe, 0xff, 0xff, 0xea, 0xcb, 0xc6, 0xc9, 0xc6, 0xc6, 0xcc, 0xcb, 0xca, + 0xe1, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xf0, 0xdf, 0xd5, 0xd3, 0xd1, 0xcf, + 0xd1, 0xd1, 0xce, 0xd4, 0xe9, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf0, 0xc2, 0xa2, 0xa1, 0xa4, + 0xa1, 0xa6, 0xbe, 0xe0, 0xf6, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xe1, 0xbd, + 0xab, 0xae, 0xb2, 0xaf, 0xb0, 0xb4, 0xb6, 0xc1, 0xe7, 0xff, 0xff, 0xfa, 0xe7, 0xc6, 0xb7, 0xbd, + 0xbd, 0xba, 0xca, 0xe9, 0xfb, 0xff, 0xff, 0xec, 0xce, 0xc5, 0xc7, 0xc8, 0xc7, 0xc9, 0xca, 0xca, + 0xd5, 0xeb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfb, 0xef, 0xdd, 0xd1, 0xce, + 0xcf, 0xce, 0xcf, 0xe0, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe9, 0xd2, 0xc4, 0xc1, + 0xc5, 0xd3, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xc6, 0xad, + 0xb0, 0xb1, 0xb0, 0xb2, 0xb2, 0xaf, 0xb8, 0xd1, 0xef, 0xff, 0xff, 0xef, 0xd0, 0xba, 0xb7, 0xc0, + 0xc0, 0xb9, 0xc0, 0xd7, 0xf2, 0xff, 0xff, 0xf3, 0xda, 0xc7, 0xc4, 0xc9, 0xc9, 0xc6, 0xc9, 0xcb, + 0xca, 0xdb, 0xf9, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf3, 0xe9, 0xe1, + 0xde, 0xe0, 0xe8, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf4, 0xea, + 0xf4, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe0, 0xb5, 0xa7, + 0xb0, 0xb2, 0xaf, 0xb2, 0xb3, 0xad, 0xbe, 0xe7, 0xfb, 0xff, 0xff, 0xe6, 0xc2, 0xbb, 0xbd, 0xbb, + 0xbc, 0xc0, 0xc1, 0xcb, 0xea, 0xff, 0xff, 0xfc, 0xeb, 0xcc, 0xc1, 0xc9, 0xc9, 0xc6, 0xc9, 0xca, + 0xc6, 0xd0, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf9, + 0xf4, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xbf, 0xae, 0xae, + 0xb0, 0xb1, 0xb0, 0xb2, 0xb4, 0xb5, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xe4, 0xbe, 0xba, 0xbe, 0xbb, + 0xbe, 0xc2, 0xbf, 0xc4, 0xe7, 0xff, 0xff, 0xff, 0xf9, 0xd8, 0xc5, 0xc7, 0xc8, 0xc7, 0xc8, 0xc9, + 0xc9, 0xca, 0xd6, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, + 0xfe, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xfd, 0xd4, 0xad, 0xac, 0xb4, + 0xb1, 0xb0, 0xb3, 0xb1, 0xb1, 0xc6, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xe5, 0xbf, 0xb9, 0xbc, 0xbc, + 0xbd, 0xc0, 0xbd, 0xc4, 0xe7, 0xff, 0xff, 0xfe, 0xfe, 0xed, 0xd1, 0xc2, 0xc5, 0xc9, 0xc7, 0xc8, + 0xcb, 0xc8, 0xca, 0xe3, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, + 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe5, 0xc0, 0xab, 0xae, 0xb3, + 0xb0, 0xb0, 0xb6, 0xb2, 0xb3, 0xd8, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe1, 0xc4, 0xc5, 0xca, 0xc7, 0xc7, + 0xc9, 0xc9, 0xc9, 0xd6, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf5, 0xc8, 0xae, 0xaf, 0xb1, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb5, 0xc6, 0xe9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, + 0xbe, 0xc1, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc7, 0xc7, 0xc8, 0xc8, + 0xc7, 0xc9, 0xca, 0xc9, 0xda, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe4, 0xb7, 0xa8, 0xb1, 0xb2, 0xae, + 0xb3, 0xb4, 0xae, 0xba, 0xe1, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, + 0xbd, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xe7, 0xc9, 0xc1, 0xc9, 0xc9, + 0xc6, 0xc9, 0xc9, 0xc5, 0xd0, 0xed, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc8, 0xb1, 0xae, 0xb2, 0xb2, 0xaf, + 0xb2, 0xb5, 0xb2, 0xc8, 0xf5, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xf6, 0xd3, 0xc3, 0xc9, 0xc9, + 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xda, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xd9, 0xaf, 0xad, 0xb4, 0xb0, 0xb0, 0xb2, + 0xb0, 0xb2, 0xc2, 0xe1, 0xfc, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, + 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe8, 0xd0, 0xc6, 0xc7, + 0xc9, 0xc7, 0xc7, 0xcb, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xcd, 0xa8, 0xac, 0xb4, 0xae, 0xae, 0xb4, + 0xb0, 0xb1, 0xd4, 0xfa, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xfb, 0xde, 0xc4, 0xc6, + 0xca, 0xc6, 0xc7, 0xca, 0xc5, 0xc5, 0xde, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc6, 0xab, 0xaf, 0xb3, 0xb0, 0xb1, 0xb2, + 0xb1, 0xc1, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xce, 0xc5, + 0xc9, 0xc9, 0xc8, 0xca, 0xc8, 0xc6, 0xd9, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xc9, 0xac, 0xb0, 0xb3, 0xb2, 0xb4, 0xae, + 0xb8, 0xdd, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, + 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe6, 0xca, + 0xc4, 0xca, 0xc9, 0xca, 0xc7, 0xc5, 0xdb, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd5, 0xae, 0xab, 0xb1, 0xae, 0xab, 0xaf, + 0xcc, 0xf4, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, + 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xd8, + 0xc4, 0xc3, 0xc5, 0xc7, 0xc3, 0xc6, 0xe3, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc8, 0xb7, 0xb5, 0xb3, 0xb5, 0xca, + 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, + 0xd7, 0xc9, 0xc9, 0xcb, 0xcc, 0xd8, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xde, 0xce, 0xcd, 0xdc, 0xf1, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf4, 0xe5, 0xdd, 0xdd, 0xe8, 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xef, 0xef, 0xfa, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc2, 0xbc, 0xbe, 0xba, + 0xbb, 0xc0, 0xbf, 0xc6, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xf4, 0xf4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xbb, 0xbc, 0xbe, + 0xbe, 0xbe, 0xbf, 0xcb, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd8, 0xbd, 0xb8, 0xc1, + 0xc1, 0xba, 0xc1, 0xd9, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xee, 0xcf, 0xbb, 0xb9, + 0xb9, 0xbc, 0xd1, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, 0xd6, 0xca, + 0xca, 0xd7, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf6, + 0xf6, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, + 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf5, 0xf0, + 0xf0, 0xf5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xb1, 0x99, + 0x99, 0xb1, 0xd9, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe2, 0xa5, 0x7b, 0x74, + 0x74, 0x7b, 0xa4, 0xde, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xb8, 0x82, 0x73, 0x82, + 0x81, 0x74, 0x7f, 0xaf, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd7, 0x97, 0x7d, 0x7b, 0x7d, + 0x7d, 0x7b, 0x7a, 0x92, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf8, 0xe9, 0xe8, 0xf6, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xce, 0x8b, 0x7e, 0x7f, 0x77, + 0x77, 0x7f, 0x7b, 0x87, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xf6, 0xe5, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xed, 0xce, 0xb4, 0xb2, 0xc8, 0xe9, + 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7f, 0x7b, + 0x7b, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfe, + 0xe7, 0xc2, 0xa9, 0xa9, 0xc6, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe1, 0xab, 0x90, 0x8c, 0x87, 0x89, 0xab, + 0xdf, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7a, 0x7e, 0x7a, + 0x7a, 0x7e, 0x78, 0x85, 0xcc, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xdc, + 0xa2, 0x7c, 0x79, 0x7d, 0x81, 0xa1, 0xdd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc0, 0x81, 0x7a, 0x84, 0x80, 0x7a, 0x7f, + 0xad, 0xee, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x80, 0x79, + 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xec, 0xa6, + 0x72, 0x6b, 0x70, 0x75, 0x6a, 0x71, 0xb9, 0xfd, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xea, 0xac, 0x7f, 0x84, 0x88, 0x86, 0x88, 0x7a, + 0x89, 0xc8, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, + 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xc2, 0x7d, + 0x6d, 0x7b, 0x78, 0x7a, 0x75, 0x6f, 0xa3, 0xe8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe3, 0xa9, 0x81, 0x83, 0x88, 0x84, 0x85, 0x85, + 0x81, 0x98, 0xd7, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x7f, 0x79, + 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x8e, 0x74, + 0x77, 0x77, 0x75, 0x7a, 0x75, 0x70, 0x9e, 0xe1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf3, 0xb3, 0x7c, 0x7f, 0x89, 0x81, 0x81, 0x89, + 0x81, 0x7e, 0xb7, 0xf7, 0xff, 0xfa, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xce, 0x89, 0x7b, 0x80, 0x7a, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfa, 0xff, 0xf8, 0xb2, 0x72, 0x74, + 0x7c, 0x71, 0x72, 0x7b, 0x6e, 0x6a, 0xa7, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xc5, 0x84, 0x82, 0x8a, 0x83, 0x85, 0x87, + 0x81, 0x7f, 0x99, 0xce, 0xf9, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x81, 0x7b, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xf9, 0xca, 0x90, 0x72, 0x73, + 0x79, 0x75, 0x74, 0x7c, 0x71, 0x72, 0xbb, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdf, 0xa6, 0x89, 0x84, 0x85, 0x86, 0x82, + 0x85, 0x86, 0x7e, 0xa4, 0xf0, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xff, 0xcd, 0x88, 0x7c, 0x81, 0x7b, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xff, 0xed, 0x9a, 0x71, 0x78, 0x77, + 0x73, 0x77, 0x76, 0x75, 0x78, 0x98, 0xd9, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfb, 0xd3, 0x92, 0x7d, 0x89, 0x87, 0x81, + 0x88, 0x88, 0x79, 0x8f, 0xd2, 0xfb, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xcd, 0x88, 0x7b, 0x80, 0x7a, + 0x7a, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xfb, 0xff, 0xfe, 0xfb, 0xce, 0x83, 0x6a, 0x7a, 0x7a, + 0x72, 0x79, 0x79, 0x6a, 0x81, 0xcb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf1, 0xab, 0x84, 0x88, 0x87, 0x83, + 0x86, 0x86, 0x83, 0x85, 0xa3, 0xdd, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xcd, 0x89, 0x7c, 0x7f, 0x79, + 0x79, 0x7e, 0x79, 0x85, 0xcb, 0xff, 0xff, 0xf6, 0xff, 0xff, 0xda, 0x9b, 0x7a, 0x75, 0x76, 0x77, + 0x74, 0x78, 0x78, 0x72, 0x9c, 0xed, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xd6, 0xa1, 0x82, 0x83, 0x88, + 0x83, 0x83, 0x89, 0x7f, 0x80, 0xbf, 0xff, 0xff, 0xf5, 0xff, 0xff, 0xcd, 0x89, 0x7d, 0x80, 0x79, + 0x79, 0x7f, 0x7a, 0x85, 0xcb, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xb9, 0x74, 0x72, 0x7c, 0x74, 0x74, + 0x79, 0x74, 0x71, 0x92, 0xcf, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xfd, 0xff, + 0xfd, 0xfb, 0xfc, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbe, 0x84, 0x83, 0x8b, + 0x83, 0x83, 0x89, 0x81, 0x7b, 0xa0, 0xdd, 0xfe, 0xfc, 0xff, 0xff, 0xce, 0x89, 0x7c, 0x7f, 0x7a, + 0x7a, 0x7d, 0x77, 0x84, 0xcc, 0xff, 0xff, 0xfb, 0xfe, 0xda, 0x97, 0x6d, 0x71, 0x7a, 0x74, 0x73, + 0x7d, 0x72, 0x71, 0xb3, 0xfa, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdc, 0xa1, 0x87, 0x85, + 0x85, 0x85, 0x84, 0x86, 0x85, 0x82, 0xad, 0xf1, 0xff, 0xff, 0xff, 0xcf, 0x89, 0x7d, 0x80, 0x79, + 0x78, 0x7e, 0x7a, 0x86, 0xce, 0xff, 0xff, 0xff, 0xf0, 0xa7, 0x78, 0x78, 0x77, 0x74, 0x76, 0x76, + 0x75, 0x75, 0x90, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf2, 0xe7, + 0xf1, 0xff, 0xff, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcf, 0x90, 0x7b, + 0x88, 0x88, 0x82, 0x87, 0x86, 0x78, 0x91, 0xd6, 0xf9, 0xff, 0xff, 0xd2, 0x8f, 0x80, 0x80, 0x7a, + 0x79, 0x7d, 0x7d, 0x8f, 0xd2, 0xff, 0xff, 0xf9, 0xd2, 0x87, 0x6c, 0x7a, 0x79, 0x72, 0x7a, 0x7a, + 0x69, 0x7e, 0xc6, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, + 0xf9, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xb9, 0xb1, + 0xb6, 0xcb, 0xe5, 0xf8, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xed, 0xa7, 0x84, + 0x8a, 0x86, 0x81, 0x86, 0x86, 0x7d, 0x86, 0xb2, 0xe5, 0xff, 0xff, 0xe0, 0xa5, 0x7d, 0x77, 0x84, + 0x81, 0x73, 0x7a, 0xa5, 0xe0, 0xff, 0xff, 0xe3, 0xab, 0x7b, 0x6f, 0x78, 0x79, 0x72, 0x76, 0x79, + 0x72, 0x99, 0xea, 0xff, 0xfb, 0xfd, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf9, 0xf2, 0xec, + 0xeb, 0xed, 0xf1, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe9, 0xab, 0x86, 0x89, 0x8a, + 0x83, 0x8d, 0xb0, 0xda, 0xf4, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd2, 0x9d, + 0x82, 0x84, 0x87, 0x83, 0x84, 0x85, 0x82, 0x96, 0xd6, 0xff, 0xff, 0xf4, 0xcf, 0x92, 0x74, 0x7d, + 0x7c, 0x72, 0x8f, 0xcc, 0xf3, 0xff, 0xff, 0xd2, 0x8d, 0x77, 0x77, 0x74, 0x74, 0x78, 0x75, 0x71, + 0x8e, 0xca, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xec, 0xe3, 0xe0, + 0xe1, 0xe1, 0xdf, 0xe9, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc9, 0x96, 0x84, 0x8c, 0x8c, + 0x88, 0x8b, 0x91, 0x98, 0xb1, 0xda, 0xf8, 0xff, 0xfe, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xfb, 0xbb, + 0x80, 0x84, 0x8b, 0x82, 0x82, 0x87, 0x81, 0x8c, 0xd0, 0xff, 0xff, 0xfe, 0xf5, 0xc8, 0x97, 0x80, + 0x80, 0x96, 0xc6, 0xf5, 0xfe, 0xff, 0xff, 0xce, 0x85, 0x76, 0x79, 0x72, 0x71, 0x7c, 0x73, 0x6f, + 0xaf, 0xf9, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf7, 0xec, 0xe6, 0xe5, 0xe3, 0xe1, + 0xe1, 0xe1, 0xe0, 0xe4, 0xf2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe6, 0xb5, 0x95, 0x8f, 0x92, 0x92, + 0x92, 0x92, 0x8c, 0x84, 0x8c, 0xa7, 0xc4, 0xe0, 0xfb, 0xff, 0xfe, 0xfb, 0xf9, 0xff, 0xff, 0xda, + 0x9e, 0x82, 0x84, 0x8a, 0x8b, 0x86, 0x82, 0x98, 0xd7, 0xff, 0xff, 0xfc, 0xff, 0xf3, 0xd9, 0xc5, + 0xc5, 0xd9, 0xf2, 0xff, 0xfc, 0xff, 0xff, 0xd4, 0x8f, 0x75, 0x77, 0x7d, 0x7c, 0x73, 0x70, 0x8e, + 0xd5, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf1, 0xea, 0xe3, 0xe0, 0xe2, 0xe4, 0xe4, + 0xe3, 0xe3, 0xe3, 0xe4, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x91, 0x8b, 0x90, 0x8e, + 0x8b, 0x8c, 0x8e, 0x91, 0x8f, 0x89, 0x8b, 0xa8, 0xd3, 0xf1, 0xfb, 0xfe, 0xff, 0xff, 0xfe, 0xf8, + 0xd0, 0x98, 0x7e, 0x84, 0x83, 0x7d, 0x8e, 0xbb, 0xec, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xea, 0xb4, 0x82, 0x6d, 0x73, 0x73, 0x6c, 0x88, 0xc8, + 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf5, 0xea, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, + 0xe2, 0xe2, 0xe1, 0xe3, 0xec, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xc7, 0x93, 0x88, 0x93, 0x8f, + 0x8b, 0x8e, 0x8f, 0x91, 0x8f, 0x8b, 0x8a, 0x8d, 0x95, 0xac, 0xd5, 0xf9, 0xff, 0xfd, 0xfc, 0xff, + 0xf5, 0xcb, 0x9d, 0x85, 0x81, 0x8b, 0xb2, 0xe9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xe8, 0xa9, 0x7b, 0x6f, 0x72, 0x8d, 0xc3, 0xf3, + 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xf5, 0xeb, 0xe5, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe3, 0xe0, 0xe3, 0xf0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xe5, 0xad, 0x88, 0x88, 0x90, + 0x94, 0x91, 0x8d, 0x8c, 0x8c, 0x90, 0x93, 0x8c, 0x82, 0x8b, 0xa4, 0xbf, 0xdf, 0xf7, 0xff, 0xff, + 0xfe, 0xf2, 0xda, 0xc6, 0xc3, 0xce, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xf8, + 0xf8, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xc7, 0xba, 0xbc, 0xd4, 0xf1, 0xfe, + 0xff, 0xff, 0xfe, 0xf8, 0xf0, 0xe9, 0xe2, 0xe0, 0xe2, 0xe4, 0xe4, 0xe1, 0xe1, 0xe1, 0xe3, 0xe3, + 0xe3, 0xe1, 0xe0, 0xe9, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xda, 0xaf, 0x95, 0x8e, + 0x8d, 0x8d, 0x90, 0x91, 0x90, 0x8d, 0x8d, 0x8d, 0x8f, 0x90, 0x89, 0x87, 0xa3, 0xd3, 0xf6, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xfd, 0xf6, 0xeb, 0xe4, 0xe3, 0xe4, 0xe4, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe1, 0xe1, + 0xe2, 0xe3, 0xe9, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xef, 0xce, 0xa3, + 0x8c, 0x8d, 0x91, 0x92, 0x8f, 0x8d, 0x8d, 0x8e, 0x90, 0x8e, 0x8d, 0x8a, 0x88, 0x9e, 0xcc, 0xf3, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xf4, 0xe7, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, + 0xe6, 0xf1, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf8, 0xdb, + 0xbf, 0xa5, 0x8c, 0x85, 0x8f, 0x94, 0x92, 0x8d, 0x8b, 0x8b, 0x8f, 0x93, 0x8b, 0x85, 0x9f, 0xdc, + 0xff, 0xff, 0xf8, 0xfa, 0xf9, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf9, 0xf8, 0xf8, 0xfa, 0xff, 0xff, + 0xf7, 0xea, 0xe2, 0xe1, 0xe2, 0xe3, 0xe2, 0xe1, 0xe2, 0xe3, 0xe4, 0xe2, 0xdf, 0xe0, 0xe7, 0xee, + 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xf6, 0xd5, 0xad, 0x97, 0x92, 0x8d, 0x8c, 0x90, 0x91, 0x91, 0x8c, 0x8b, 0x95, 0x8b, 0x87, 0xc8, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf3, 0xe4, 0xe3, 0xe4, 0xe1, 0xe2, 0xe3, 0xe3, 0xe2, 0xe1, 0xe1, 0xe3, 0xe3, 0xe8, 0xf3, 0xfc, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, + 0xfe, 0xfb, 0xf0, 0xd4, 0xac, 0x8f, 0x8a, 0x91, 0x92, 0x91, 0x8d, 0x8c, 0x96, 0x8a, 0x82, 0xc4, + 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xf1, 0xe1, 0xe2, 0xe4, 0xe1, 0xe2, 0xe2, 0xe3, 0xe3, 0xdf, 0xdf, 0xe8, 0xf4, 0xfb, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfa, + 0xfb, 0xff, 0xff, 0xf5, 0xe0, 0xc5, 0xa6, 0x90, 0x87, 0x89, 0x8e, 0x90, 0x8f, 0x86, 0x92, 0xd1, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xfd, 0xff, 0xff, + 0xf3, 0xe3, 0xe0, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xe1, 0xe6, 0xed, 0xf6, 0xfd, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xfd, 0xdd, 0xb4, 0x9e, 0x95, 0x91, 0x90, 0x8e, 0x9a, 0xbd, 0xea, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xee, 0xe4, 0xe2, 0xe2, 0xe2, 0xe3, 0xe5, 0xea, 0xf5, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xdd, 0xc1, 0xaa, 0xa1, 0xa8, 0xc8, 0xf0, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xf0, 0xe7, 0xe5, 0xe7, 0xee, 0xf6, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfa, + 0xfb, 0xfc, 0xfd, 0xfc, 0xf8, 0xf7, 0xfc, 0xff, 0xfb, 0xed, 0xde, 0xd8, 0xe0, 0xf0, 0xfd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xfa, 0xf6, 0xf4, 0xf5, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xe2, 0xdb, 0xda, 0xda, 0xda, 0xda, 0xda, + 0xda, 0xda, 0xda, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdf, 0xe9, 0xf5, 0xfd, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, + 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf6, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc2, 0xaf, 0xa8, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, + 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa2, 0xa1, 0xa3, 0xaa, 0xb9, 0xd7, 0xf8, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xe9, 0xe4, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, + 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe2, 0xe5, 0xeb, 0xf7, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xe4, 0xb8, 0x9a, 0x98, 0x9a, 0x96, 0x96, 0x97, 0x97, 0x97, + 0x97, 0x97, 0x97, 0x97, 0x97, 0x97, 0x96, 0x98, 0x9a, 0x96, 0xa8, 0xd5, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfb, 0xf0, 0xe3, 0xde, 0xdf, 0xde, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, + 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe7, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x99, 0x92, 0x97, 0x9a, 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, + 0x9a, 0x9b, 0x9b, 0x9a, 0x9a, 0x9b, 0x9a, 0x9a, 0x98, 0x92, 0x92, 0xad, 0xe2, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf5, 0xe4, 0xdb, 0xdb, 0xde, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xdc, + 0xdc, 0xdd, 0xdd, 0xdd, 0xdd, 0xdc, 0xda, 0xd9, 0xdc, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xac, 0x92, 0xa0, 0x9c, 0x96, 0x99, 0x98, 0x98, 0x99, 0x99, + 0x99, 0x98, 0x98, 0x99, 0x99, 0x98, 0x98, 0x96, 0x96, 0x9e, 0x99, 0x9a, 0xd2, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xde, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, + 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdc, 0xdd, 0xda, 0xe4, 0xfb, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfd, 0xfb, 0xff, 0xf2, 0xae, 0x94, 0xa1, 0x9d, 0x97, 0x9a, 0x99, 0x9a, 0x9b, 0x9b, + 0x9a, 0x99, 0x99, 0x9a, 0x9b, 0x99, 0x99, 0x98, 0x99, 0xa0, 0x9a, 0x9c, 0xd3, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfd, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdd, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, + 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xda, 0xdc, 0xdd, 0xd9, 0xe3, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xc3, 0x9c, 0x97, 0x9b, 0x9d, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, + 0x9e, 0x9d, 0x9d, 0x9e, 0x9e, 0x9e, 0x9c, 0x9e, 0x9d, 0x96, 0x95, 0xb0, 0xe3, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf4, 0xe1, 0xd8, 0xd9, 0xda, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xda, 0xd9, + 0xda, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xd9, 0xdb, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe3, 0xb9, 0x9e, 0x9b, 0x9d, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, + 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9b, 0x9a, 0x9c, 0x9e, 0x9a, 0xaa, 0xd5, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xda, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xd8, + 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdb, 0xdb, 0xe5, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xe5, 0xc3, 0xb0, 0xa9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, + 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xae, 0xbc, 0xd8, 0xf7, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf1, 0xe5, 0xdf, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, + 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xe0, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xee, 0xe1, 0xda, 0xd9, 0xd9, 0xd9, 0xd9, 0xd9, + 0xd9, 0xd9, 0xd8, 0xd8, 0xd8, 0xd8, 0xd9, 0xdb, 0xde, 0xe8, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xf6, 0xf2, 0xf2, 0xf2, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, + 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf3, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, + 0xfb, 0xfc, 0xfd, 0xfb, 0xf8, 0xf8, 0xfc, 0xff, 0xfb, 0xf0, 0xe3, 0xe0, 0xe7, 0xf3, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfe, 0xf9, 0xf3, 0xf2, 0xf4, 0xf9, 0xfe, 0xff, 0xfd, 0xfc, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe3, 0xca, 0xb7, 0xb0, 0xb7, 0xd3, 0xf3, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xf8, 0xea, 0xde, 0xdc, 0xe0, 0xe8, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xfe, 0xe4, 0xc0, 0xad, 0xa7, 0xa6, 0xa4, 0xa3, 0xad, 0xca, 0xee, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf6, 0xe4, 0xd8, 0xd4, 0xd4, 0xd5, 0xd6, 0xd9, 0xe2, 0xf2, 0xfe, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfb, + 0xfc, 0xff, 0xff, 0xf7, 0xe6, 0xd0, 0xb7, 0xa3, 0x9b, 0x9e, 0xa4, 0xa6, 0xa5, 0x9e, 0xa9, 0xdb, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xec, 0xd2, 0xcf, 0xd3, 0xd4, 0xd3, 0xd1, 0xd0, 0xd4, 0xdd, 0xe9, 0xf4, 0xfc, 0xff, 0xff, 0xfe, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xfc, 0xf4, 0xdd, 0xba, 0xa2, 0x9f, 0xa6, 0xa6, 0xa5, 0xa3, 0xa2, 0xab, 0xa4, 0xa0, 0xd2, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe7, 0xcc, 0xcf, 0xd5, 0xd1, 0xd3, 0xd5, 0xd5, 0xd5, 0xd1, 0xd3, 0xe0, 0xef, 0xf9, 0xfd, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xf8, 0xdc, 0xba, 0xaa, 0xa6, 0xa2, 0xa1, 0xa5, 0xa7, 0xa7, 0xa4, 0xa3, 0xab, 0xa4, 0xa1, 0xd4, + 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, + 0xe8, 0xce, 0xd0, 0xd4, 0xd0, 0xd2, 0xd4, 0xd4, 0xd4, 0xd2, 0xd2, 0xd5, 0xd8, 0xe0, 0xf0, 0xfc, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xe4, + 0xcd, 0xb6, 0x9f, 0x9b, 0xa5, 0xa9, 0xa7, 0xa3, 0xa2, 0xa3, 0xa7, 0xaa, 0xa6, 0xa1, 0xb4, 0xe3, + 0xff, 0xff, 0xfa, 0xfc, 0xfb, 0xfb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, 0xfd, 0xfd, 0xff, 0xff, + 0xf0, 0xd8, 0xce, 0xd0, 0xd3, 0xd2, 0xd0, 0xd0, 0xd3, 0xd6, 0xd6, 0xd4, 0xd1, 0xd4, 0xdd, 0xe7, + 0xf2, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf2, 0xd6, 0xb4, + 0xa1, 0xa2, 0xa6, 0xa6, 0xa5, 0xa4, 0xa4, 0xa6, 0xa8, 0xa7, 0xa6, 0xa7, 0xa5, 0xb3, 0xd6, 0xf6, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfa, 0xea, 0xd7, 0xcf, 0xd1, 0xd1, 0xd1, 0xd2, 0xd3, 0xd3, 0xd3, 0xd4, 0xd5, 0xd5, 0xd3, 0xd2, + 0xdc, 0xed, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, 0xbe, 0xaa, 0xa3, + 0xa2, 0xa2, 0xa5, 0xa7, 0xa6, 0xa4, 0xa4, 0xa6, 0xa9, 0xaa, 0xa5, 0xa4, 0xb8, 0xdb, 0xf7, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xec, 0xd9, 0xcf, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd5, 0xd4, 0xd3, 0xd3, + 0xd3, 0xd6, 0xe0, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xeb, 0xbd, 0x9f, 0x9f, 0xa6, + 0xa8, 0xa6, 0xa3, 0xa3, 0xa3, 0xa7, 0xaa, 0xa6, 0xa0, 0xa6, 0xb8, 0xcb, 0xe3, 0xf7, 0xff, 0xff, + 0xfe, 0xf8, 0xe9, 0xdc, 0xdb, 0xe2, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfc, + 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xec, 0xe7, 0xe7, 0xf1, 0xfb, 0xff, + 0xff, 0xff, 0xfb, 0xf1, 0xe4, 0xd9, 0xd0, 0xcd, 0xd0, 0xd2, 0xd2, 0xd1, 0xd1, 0xd1, 0xd4, 0xd6, + 0xd4, 0xd1, 0xd1, 0xe0, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd2, 0xa9, 0x9f, 0xa8, 0xa5, + 0xa2, 0xa5, 0xa6, 0xa9, 0xa7, 0xa4, 0xa5, 0xa9, 0xae, 0xbd, 0xdd, 0xfa, 0xff, 0xfd, 0xfd, 0xff, + 0xf9, 0xde, 0xbe, 0xaf, 0xad, 0xb5, 0xd0, 0xf3, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf7, 0xdf, 0xcc, 0xc8, 0xcb, 0xd6, 0xe9, 0xfb, + 0xff, 0xfd, 0xfe, 0xff, 0xfc, 0xec, 0xdb, 0xd4, 0xd1, 0xd0, 0xd0, 0xd1, 0xd1, 0xd1, 0xd2, 0xd1, + 0xd3, 0xd5, 0xd2, 0xd6, 0xe9, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc3, 0xa7, 0xa2, 0xa6, 0xa5, + 0xa3, 0xa3, 0xa6, 0xaa, 0xa9, 0xa5, 0xa6, 0xbb, 0xdd, 0xf3, 0xfa, 0xfe, 0xff, 0xff, 0xfe, 0xf9, + 0xdf, 0xba, 0xa9, 0xad, 0xae, 0xac, 0xb8, 0xd6, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf6, 0xe1, 0xce, 0xc5, 0xc7, 0xca, 0xc8, 0xd3, 0xeb, + 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xec, 0xdb, 0xd0, 0xcf, 0xd2, 0xd2, 0xd1, 0xd0, 0xd1, + 0xd3, 0xd4, 0xd2, 0xd5, 0xe2, 0xf6, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc2, 0xa9, 0xa5, 0xa8, 0xa8, + 0xa8, 0xa9, 0xa6, 0xa1, 0xa7, 0xba, 0xce, 0xe5, 0xfb, 0xff, 0xfe, 0xfc, 0xfc, 0xff, 0xff, 0xe8, + 0xbf, 0xaa, 0xac, 0xb4, 0xb5, 0xb1, 0xb1, 0xc1, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfa, 0xee, 0xe4, + 0xe4, 0xef, 0xfb, 0xff, 0xfd, 0xff, 0xff, 0xed, 0xd0, 0xc7, 0xc8, 0xcb, 0xcb, 0xc9, 0xc9, 0xd6, + 0xf0, 0xff, 0xff, 0xfd, 0xfd, 0xff, 0xff, 0xfd, 0xf1, 0xe5, 0xda, 0xd0, 0xce, 0xd1, 0xd3, 0xd3, + 0xd3, 0xd3, 0xd1, 0xd3, 0xe1, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd3, 0xab, 0x9f, 0xa6, 0xa5, + 0xa1, 0xa4, 0xaa, 0xb0, 0xc2, 0xe0, 0xf8, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xfc, 0xd2, + 0xab, 0xac, 0xb3, 0xaf, 0xaf, 0xb3, 0xb3, 0xbc, 0xe4, 0xff, 0xff, 0xfe, 0xfa, 0xe3, 0xcb, 0xc0, + 0xc1, 0xce, 0xe6, 0xfa, 0xfe, 0xff, 0xff, 0xea, 0xcb, 0xc6, 0xc9, 0xc6, 0xc6, 0xcc, 0xcb, 0xca, + 0xe1, 0xfd, 0xff, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xf0, 0xdf, 0xd5, 0xd3, 0xd1, 0xcf, + 0xd1, 0xd1, 0xce, 0xd4, 0xe9, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf0, 0xc2, 0xa2, 0xa1, 0xa4, + 0xa1, 0xa6, 0xbe, 0xe0, 0xf6, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xfc, 0xe1, 0xbd, + 0xab, 0xae, 0xb2, 0xaf, 0xb0, 0xb4, 0xb6, 0xc1, 0xe7, 0xff, 0xff, 0xfa, 0xe7, 0xc6, 0xb7, 0xbd, + 0xbd, 0xba, 0xca, 0xe9, 0xfb, 0xff, 0xff, 0xec, 0xce, 0xc5, 0xc7, 0xc8, 0xc7, 0xc9, 0xca, 0xca, + 0xd5, 0xeb, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfb, 0xef, 0xdd, 0xd1, 0xce, + 0xcf, 0xce, 0xcf, 0xe0, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe9, 0xd2, 0xc4, 0xc1, + 0xc5, 0xd3, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf4, 0xc6, 0xad, + 0xb0, 0xb1, 0xb0, 0xb2, 0xb2, 0xaf, 0xb8, 0xd1, 0xef, 0xff, 0xff, 0xef, 0xd0, 0xba, 0xb7, 0xc0, + 0xc0, 0xb9, 0xc0, 0xd7, 0xf2, 0xff, 0xff, 0xf3, 0xda, 0xc7, 0xc4, 0xc9, 0xc9, 0xc6, 0xc9, 0xcb, + 0xca, 0xdb, 0xf9, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, 0xff, 0xff, 0xfb, 0xf3, 0xe9, 0xe1, + 0xde, 0xe0, 0xe8, 0xf3, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf4, 0xea, + 0xf4, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe0, 0xb5, 0xa7, + 0xb0, 0xb2, 0xaf, 0xb2, 0xb3, 0xad, 0xbe, 0xe7, 0xfb, 0xff, 0xff, 0xe6, 0xc2, 0xbb, 0xbd, 0xbb, + 0xbc, 0xc0, 0xc1, 0xcb, 0xea, 0xff, 0xff, 0xfc, 0xeb, 0xcc, 0xc1, 0xc9, 0xc9, 0xc6, 0xc9, 0xca, + 0xc6, 0xd0, 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf9, + 0xf4, 0xf9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xbf, 0xae, 0xae, + 0xb0, 0xb1, 0xb0, 0xb2, 0xb4, 0xb5, 0xcf, 0xf7, 0xff, 0xff, 0xff, 0xe4, 0xbe, 0xba, 0xbe, 0xbb, + 0xbe, 0xc2, 0xbf, 0xc4, 0xe7, 0xff, 0xff, 0xff, 0xf9, 0xd8, 0xc5, 0xc7, 0xc8, 0xc7, 0xc8, 0xc9, + 0xc9, 0xca, 0xd6, 0xf0, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, + 0xfe, 0xfc, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xfd, 0xd4, 0xad, 0xac, 0xb4, + 0xb1, 0xb0, 0xb3, 0xb1, 0xb1, 0xc6, 0xe9, 0xfe, 0xfe, 0xff, 0xff, 0xe5, 0xbf, 0xb9, 0xbc, 0xbc, + 0xbd, 0xc0, 0xbd, 0xc4, 0xe7, 0xff, 0xff, 0xfe, 0xfe, 0xed, 0xd1, 0xc2, 0xc5, 0xc9, 0xc7, 0xc8, + 0xcb, 0xc8, 0xca, 0xe3, 0xfd, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfe, + 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe5, 0xc0, 0xab, 0xae, 0xb3, + 0xb0, 0xb0, 0xb6, 0xb2, 0xb3, 0xd8, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe1, 0xc4, 0xc5, 0xca, 0xc7, 0xc7, + 0xc9, 0xc9, 0xc9, 0xd6, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf5, 0xc8, 0xae, 0xaf, 0xb1, 0xb0, + 0xb1, 0xb2, 0xb3, 0xb5, 0xc6, 0xe9, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, + 0xbe, 0xc1, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc7, 0xc7, 0xc8, 0xc8, + 0xc7, 0xc9, 0xca, 0xc9, 0xda, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe4, 0xb7, 0xa8, 0xb1, 0xb2, 0xae, + 0xb3, 0xb4, 0xae, 0xba, 0xe1, 0xfb, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xc0, 0xba, 0xbd, 0xbc, + 0xbd, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xe7, 0xc9, 0xc1, 0xc9, 0xc9, + 0xc6, 0xc9, 0xc9, 0xc5, 0xd0, 0xed, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc8, 0xb1, 0xae, 0xb2, 0xb2, 0xaf, + 0xb2, 0xb5, 0xb2, 0xc8, 0xf5, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xf6, 0xd3, 0xc3, 0xc9, 0xc9, + 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xda, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xd9, 0xaf, 0xad, 0xb4, 0xb0, 0xb0, 0xb2, + 0xb0, 0xb2, 0xc2, 0xe1, 0xfc, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, + 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfc, 0xe8, 0xd0, 0xc6, 0xc7, + 0xc9, 0xc7, 0xc7, 0xcb, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf9, 0xcd, 0xa8, 0xac, 0xb4, 0xae, 0xae, 0xb4, + 0xb0, 0xb1, 0xd4, 0xfa, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xfb, 0xde, 0xc4, 0xc6, + 0xca, 0xc6, 0xc7, 0xca, 0xc5, 0xc5, 0xde, 0xfa, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc6, 0xab, 0xaf, 0xb3, 0xb0, 0xb1, 0xb2, + 0xb1, 0xc1, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xce, 0xc5, + 0xc9, 0xc9, 0xc8, 0xca, 0xc8, 0xc6, 0xd9, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xc9, 0xac, 0xb0, 0xb3, 0xb2, 0xb4, 0xae, + 0xb8, 0xdd, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, + 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe6, 0xca, + 0xc4, 0xca, 0xc9, 0xca, 0xc7, 0xc5, 0xdb, 0xf7, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd5, 0xae, 0xab, 0xb1, 0xae, 0xab, 0xaf, + 0xcc, 0xf4, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbe, 0xbc, + 0xbc, 0xbf, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf7, 0xd8, + 0xc4, 0xc3, 0xc5, 0xc7, 0xc3, 0xc6, 0xe3, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xeb, 0xc8, 0xb7, 0xb5, 0xb3, 0xb5, 0xca, + 0xeb, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, + 0xd7, 0xc9, 0xc9, 0xcb, 0xcc, 0xd8, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf2, 0xde, 0xce, 0xcd, 0xdc, 0xf1, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc0, 0xba, 0xbd, 0xbb, + 0xbc, 0xc0, 0xbe, 0xc5, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf4, 0xe5, 0xdd, 0xdd, 0xe8, 0xf6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xef, 0xef, 0xfa, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc2, 0xbc, 0xbe, 0xba, + 0xbb, 0xc0, 0xbf, 0xc6, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xf4, 0xf4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xea, 0xc8, 0xbb, 0xbc, 0xbe, + 0xbe, 0xbe, 0xbf, 0xcb, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd8, 0xbd, 0xb8, 0xc1, + 0xc1, 0xba, 0xc1, 0xd9, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xee, 0xcf, 0xbb, 0xb9, + 0xb9, 0xbc, 0xd1, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, 0xd6, 0xca, + 0xca, 0xd7, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xf6, + 0xf6, 0xf9, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, + 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf2, + 0xf1, 0xf6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xea, 0xc4, 0xa3, + 0x9e, 0xb2, 0xd7, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xb8, 0x8d, 0x80, + 0x7d, 0x83, 0xa6, 0xd9, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x8c, 0x79, 0x8b, + 0x8b, 0x7f, 0x84, 0xa9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xaf, 0x88, 0x81, 0x87, + 0x87, 0x86, 0x7f, 0x8b, 0xcf, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xe8, 0xf3, 0xfe, + 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa5, 0x8c, 0x87, 0x81, + 0x81, 0x89, 0x81, 0x82, 0xc7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xfb, 0xe6, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xbb, 0xb5, 0xc6, 0xe7, + 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa5, 0x88, 0x84, 0x85, + 0x85, 0x89, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xee, 0xcb, 0xae, 0xac, 0xc6, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc0, 0x9d, 0x92, 0x92, 0x94, 0xa9, + 0xd5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa4, 0x86, 0x84, 0x85, + 0x84, 0x88, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xed, + 0xb3, 0x83, 0x7f, 0x87, 0x83, 0x94, 0xd3, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd0, 0x93, 0x85, 0x8b, 0x8c, 0x88, 0x86, + 0xaa, 0xe7, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf4, 0xb8, + 0x81, 0x71, 0x76, 0x7c, 0x6e, 0x6e, 0xae, 0xf3, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbc, 0x85, 0x8c, 0x95, 0x8f, 0x91, 0x8a, + 0x92, 0xbe, 0xee, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xd6, 0x88, + 0x72, 0x83, 0x7e, 0x7c, 0x7c, 0x79, 0x9b, 0xd5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf4, 0xb8, 0x87, 0x8c, 0x94, 0x8d, 0x8d, 0x91, + 0x8b, 0x96, 0xd2, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xaa, 0x80, + 0x7a, 0x7f, 0x7b, 0x7c, 0x7c, 0x7a, 0x97, 0xd0, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc2, 0x89, 0x89, 0x93, 0x8c, 0x8b, 0x93, + 0x8b, 0x87, 0xb6, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc4, 0x83, 0x7e, + 0x82, 0x77, 0x78, 0x7e, 0x74, 0x70, 0xa1, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x98, 0x8e, 0x92, 0x8e, 0x8f, 0x90, + 0x8d, 0x8e, 0x9b, 0xc5, 0xf7, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xfe, 0xe6, 0xa5, 0x75, 0x78, + 0x81, 0x79, 0x78, 0x82, 0x77, 0x74, 0xb5, 0xfb, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xbf, 0x97, 0x8b, 0x90, 0x90, 0x8b, + 0x8f, 0x91, 0x85, 0xa1, 0xe7, 0xff, 0xfd, 0xfe, 0xfe, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xff, 0xf4, 0xb3, 0x85, 0x7d, 0x7b, + 0x7a, 0x7b, 0x7a, 0x7d, 0x7c, 0x90, 0xd1, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xe3, 0xa1, 0x86, 0x92, 0x91, 0x8c, + 0x91, 0x90, 0x84, 0x92, 0xc6, 0xf2, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfa, 0xff, 0xfd, 0xff, 0xdd, 0x92, 0x76, 0x82, 0x7e, + 0x76, 0x7d, 0x7e, 0x71, 0x81, 0xbe, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbc, 0x94, 0x8e, 0x8f, 0x8f, + 0x8f, 0x8e, 0x8f, 0x8e, 0x9e, 0xd6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc4, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xe6, 0xb0, 0x84, 0x78, 0x7d, 0x7c, + 0x78, 0x7d, 0x7e, 0x72, 0x95, 0xe6, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe8, 0xb1, 0x89, 0x8c, 0x93, + 0x8d, 0x8c, 0x92, 0x89, 0x87, 0xbc, 0xf8, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc6, 0x83, 0x7b, 0x81, 0x79, 0x7a, + 0x7d, 0x7a, 0x79, 0x8d, 0xc3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, 0xfd, 0xfe, + 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xcc, 0x93, 0x8d, 0x93, + 0x8d, 0x8e, 0x90, 0x8b, 0x88, 0x9f, 0xd1, 0xfa, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x87, 0x84, 0x84, + 0x84, 0x89, 0x7d, 0x7d, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xea, 0xa6, 0x76, 0x7a, 0x82, 0x78, 0x79, + 0x81, 0x77, 0x75, 0xaf, 0xf1, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xfa, + 0xfe, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x92, 0x8b, + 0x90, 0x90, 0x8d, 0x91, 0x91, 0x88, 0xaa, 0xee, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x8a, 0x86, 0x84, + 0x82, 0x89, 0x7f, 0x7d, 0xc6, 0xff, 0xff, 0xff, 0xf6, 0xba, 0x88, 0x7e, 0x7d, 0x7c, 0x7a, 0x7a, + 0x7e, 0x79, 0x88, 0xcd, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf6, 0xee, + 0xf5, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xde, 0x9e, 0x86, + 0x92, 0x92, 0x8c, 0x91, 0x91, 0x85, 0x96, 0xcd, 0xf4, 0xff, 0xff, 0xe2, 0xac, 0x8d, 0x85, 0x83, + 0x83, 0x88, 0x81, 0x85, 0xcb, 0xff, 0xff, 0xfe, 0xe1, 0x95, 0x76, 0x82, 0x7e, 0x78, 0x7e, 0x7d, + 0x75, 0x82, 0xb5, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfb, 0xfc, 0xff, 0xff, 0xf9, + 0xed, 0xeb, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xea, 0xd1, 0xbf, 0xba, + 0xc1, 0xd0, 0xe4, 0xf6, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf4, 0xb9, 0x94, + 0x91, 0x8e, 0x8c, 0x8f, 0x8f, 0x8c, 0x8f, 0xa9, 0xdf, 0xff, 0xff, 0xee, 0xc0, 0x89, 0x7a, 0x8a, + 0x8b, 0x82, 0x80, 0x98, 0xd8, 0xff, 0xff, 0xef, 0xc1, 0x85, 0x74, 0x81, 0x7f, 0x76, 0x7d, 0x7f, + 0x72, 0x90, 0xdf, 0xff, 0xfa, 0xfd, 0xff, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xfc, 0xeb, 0xcd, 0xb4, + 0xa8, 0xa7, 0xb3, 0xd5, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf3, 0xc0, 0x99, 0x93, 0x94, + 0x90, 0x95, 0xb1, 0xd8, 0xf2, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xe2, 0xae, + 0x8c, 0x8e, 0x92, 0x8c, 0x8b, 0x91, 0x8b, 0x93, 0xd2, 0xff, 0xff, 0xfb, 0xe0, 0x9f, 0x7e, 0x86, + 0x85, 0x7d, 0x91, 0xc1, 0xec, 0xff, 0xff, 0xde, 0xa4, 0x83, 0x7c, 0x7c, 0x7a, 0x7a, 0x7c, 0x7a, + 0x83, 0xb4, 0xf4, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xde, 0xae, 0x7f, 0x6b, + 0x70, 0x70, 0x6a, 0x91, 0xdb, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xda, 0xa5, 0x91, 0x99, 0x98, + 0x92, 0x95, 0x9a, 0xa0, 0xb4, 0xd9, 0xf6, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xc8, + 0x91, 0x90, 0x95, 0x8b, 0x8b, 0x92, 0x89, 0x8b, 0xcc, 0xff, 0xff, 0xfc, 0xfa, 0xd8, 0xa9, 0x8d, + 0x86, 0x94, 0xbf, 0xee, 0xfd, 0xff, 0xff, 0xda, 0x9b, 0x81, 0x7d, 0x78, 0x77, 0x7f, 0x78, 0x71, + 0xa4, 0xe8, 0xff, 0xfe, 0xff, 0xfd, 0xfd, 0xfc, 0xfe, 0xfe, 0xe3, 0xaf, 0x8a, 0x7d, 0x74, 0x6f, + 0x72, 0x73, 0x6e, 0x7b, 0xac, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc8, 0xa2, 0x97, 0x9e, 0x9e, + 0x9d, 0x9e, 0x99, 0x8f, 0x95, 0xad, 0xc6, 0xdf, 0xf9, 0xff, 0xfe, 0xfd, 0xfb, 0xff, 0xff, 0xe6, + 0xb2, 0x8e, 0x8b, 0x94, 0x94, 0x93, 0x8a, 0x91, 0xd0, 0xff, 0xff, 0xfa, 0xff, 0xfa, 0xe2, 0xcb, + 0xc7, 0xd5, 0xed, 0xfd, 0xfe, 0xff, 0xff, 0xe2, 0xa6, 0x7e, 0x79, 0x81, 0x81, 0x7d, 0x75, 0x82, + 0xc8, 0xff, 0xff, 0xf7, 0xfb, 0xfe, 0xff, 0xfb, 0xe2, 0xc2, 0x9f, 0x7c, 0x6c, 0x71, 0x7a, 0x7d, + 0x7b, 0x79, 0x7a, 0x7b, 0x93, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc9, 0xa1, 0x95, 0x9c, 0x9c, + 0x98, 0x97, 0x99, 0x9b, 0x99, 0x95, 0x94, 0xa9, 0xd0, 0xee, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, + 0xdf, 0xa6, 0x8a, 0x8e, 0x8c, 0x89, 0x94, 0xb4, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf4, 0xc6, 0x8c, 0x73, 0x79, 0x79, 0x73, 0x86, 0xb6, + 0xeb, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xf1, 0xd6, 0xa6, 0x7b, 0x6e, 0x76, 0x7a, 0x77, 0x74, 0x73, + 0x74, 0x76, 0x74, 0x76, 0x93, 0xd7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xd9, 0xa2, 0x93, 0xa1, 0x9d, + 0x98, 0x9b, 0x9a, 0x9a, 0x99, 0x97, 0x95, 0x97, 0x9f, 0xb0, 0xd3, 0xf7, 0xff, 0xfd, 0xfc, 0xff, + 0xfa, 0xda, 0xae, 0x91, 0x8a, 0x91, 0xb4, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xee, 0xb7, 0x88, 0x79, 0x78, 0x88, 0xb8, 0xee, + 0xff, 0xfc, 0xfd, 0xff, 0xfa, 0xd4, 0xa4, 0x85, 0x7a, 0x73, 0x71, 0x75, 0x79, 0x78, 0x75, 0x72, + 0x76, 0x79, 0x71, 0x78, 0xa7, 0xe4, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xef, 0xbb, 0x98, 0x95, 0x9c, + 0xa1, 0x9f, 0x9a, 0x98, 0x98, 0x9b, 0x9e, 0x99, 0x90, 0x94, 0xa8, 0xc0, 0xdb, 0xf2, 0xfe, 0xff, + 0xff, 0xfa, 0xe4, 0xcc, 0xc7, 0xcf, 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, + 0xf9, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe8, 0xcf, 0xbf, 0xbf, 0xcf, 0xe9, 0xfc, + 0xff, 0xff, 0xfb, 0xe0, 0xb8, 0x97, 0x79, 0x6a, 0x71, 0x7c, 0x7b, 0x75, 0x72, 0x73, 0x78, 0x7b, + 0x7a, 0x71, 0x6c, 0x91, 0xd4, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe4, 0xbc, 0xa0, 0x9b, + 0x9b, 0x9a, 0x9c, 0x9e, 0x9d, 0x9a, 0x98, 0x98, 0x9a, 0x9c, 0x96, 0x91, 0xa3, 0xce, 0xf3, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xff, 0xf8, 0xd6, 0x9c, 0x74, 0x70, 0x79, 0x7a, 0x76, 0x74, 0x74, 0x76, 0x78, 0x77, 0x75, 0x72, + 0x72, 0x7a, 0x93, 0xc6, 0xf8, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xd6, 0xaf, + 0x99, 0x99, 0x9d, 0x9e, 0x9d, 0x9a, 0x99, 0x99, 0x9c, 0x9b, 0x98, 0x95, 0x94, 0xa3, 0xc6, 0xee, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xf9, 0xd2, 0x92, 0x72, 0x74, 0x75, 0x76, 0x79, 0x77, 0x74, 0x73, 0x75, 0x78, 0x78, 0x73, 0x6e, + 0x80, 0xae, 0xe0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, + 0xc7, 0xb1, 0x9b, 0x93, 0x9a, 0x9f, 0x9f, 0x9b, 0x97, 0x96, 0x9a, 0x9d, 0x9a, 0x91, 0x9d, 0xd5, + 0xff, 0xff, 0xf9, 0xfa, 0xfa, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfa, 0xf9, 0xf8, 0xf8, 0xff, 0xff, + 0xe2, 0xa1, 0x71, 0x6e, 0x7d, 0x79, 0x72, 0x71, 0x73, 0x78, 0x7c, 0x78, 0x6b, 0x6d, 0x87, 0xa7, + 0xc8, 0xe9, 0xfc, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xf8, 0xdd, 0xba, 0xa5, 0x9f, 0x9a, 0x99, 0x9d, 0x9c, 0x9c, 0x99, 0x97, 0x9f, 0x95, 0x8f, 0xc5, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, + 0xc8, 0x81, 0x77, 0x7b, 0x74, 0x74, 0x77, 0x79, 0x78, 0x73, 0x71, 0x76, 0x7b, 0x90, 0xbd, 0xec, + 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, + 0xfe, 0xfc, 0xf5, 0xdf, 0xba, 0x9c, 0x96, 0x9d, 0x9e, 0x9e, 0x9b, 0x99, 0x9f, 0x97, 0x91, 0xc2, + 0xfb, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, + 0xc5, 0x7b, 0x75, 0x7c, 0x73, 0x75, 0x78, 0x7a, 0x79, 0x6f, 0x6e, 0x8a, 0xbd, 0xe6, 0xf7, 0xfd, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfb, + 0xfc, 0xff, 0xff, 0xf9, 0xe6, 0xcd, 0xb4, 0xa1, 0x95, 0x95, 0x9b, 0x9d, 0x9e, 0x94, 0x98, 0xcd, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfa, 0xff, 0xff, + 0xd4, 0x8c, 0x6f, 0x72, 0x78, 0x77, 0x70, 0x6a, 0x6f, 0x8b, 0xaf, 0xce, 0xee, 0xff, 0xff, 0xfa, + 0xf9, 0xfd, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xff, 0xe8, 0xc3, 0xaa, 0xa1, 0x9e, 0x9b, 0x9c, 0xa3, 0xb9, 0xe5, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xed, 0xb9, 0x86, 0x72, 0x76, 0x76, 0x77, 0x7f, 0x96, 0xca, 0xf7, 0xff, 0xfd, 0xfd, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe5, 0xcc, 0xb5, 0xa8, 0xad, 0xc9, 0xec, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xee, 0xbe, 0x96, 0x89, 0x8d, 0xa4, 0xc9, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, + 0xfb, 0xfc, 0xfd, 0xfc, 0xf9, 0xf8, 0xfb, 0xff, 0xfe, 0xf1, 0xe1, 0xda, 0xdf, 0xee, 0xfd, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xee, 0xd9, 0xcc, 0xd0, 0xe3, 0xf6, 0xfe, 0xfe, 0xfb, 0xfa, 0xfc, 0xff, 0xff, 0xfe, + 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe9, 0xe0, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, + 0xdf, 0xdf, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdf, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xfd, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfb, 0xfa, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, + 0xf4, 0xf5, 0xf4, 0xf4, 0xf5, 0xf5, 0xf7, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd2, 0xbe, 0xb3, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xad, 0xac, 0xac, 0xac, 0xaa, 0xac, 0xb4, 0xbe, 0xd7, 0xf6, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf2, 0xea, 0xe7, 0xe7, 0xe7, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, + 0xe6, 0xe5, 0xe6, 0xe6, 0xe5, 0xe6, 0xe9, 0xee, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xc9, 0xa8, 0xa5, 0xa7, 0xa3, 0xa4, 0xa4, 0xa3, 0xa3, + 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa1, 0xa3, 0xa5, 0xa4, 0xaf, 0xd0, 0xf2, 0xff, 0xff, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xe3, 0xe4, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x9c, 0xa2, 0xa8, 0xa7, 0xa7, 0xa7, 0xa6, 0xa6, + 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa5, 0xa2, 0x9f, 0xae, 0xde, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe1, 0xde, 0xe2, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xa9, 0xa6, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, + 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa3, 0xa3, 0xab, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe6, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xaa, 0xa8, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, + 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa4, 0xa4, 0xac, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0xac, 0x9e, 0xa3, 0xa8, 0xa8, 0xa8, 0xaa, 0xa9, 0xa7, + 0xa7, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa7, 0xa6, 0xa1, 0xad, 0xdd, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf9, 0xeb, 0xe0, 0xdf, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc7, 0xa8, 0xa6, 0xa8, 0xa5, 0xa6, 0xa8, 0xa7, 0xa6, + 0xa7, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa7, 0xa8, 0xa9, 0xa8, 0xb1, 0xd0, 0xf1, 0xff, 0xff, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xe1, 0xe2, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, + 0xe1, 0xe2, 0xe2, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xec, 0xd0, 0xbd, 0xb3, 0xb0, 0xb0, 0xb0, 0xb0, 0xb2, + 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb6, 0xbe, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xec, 0xe6, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe4, 0xe3, 0xe3, 0xe4, 0xe4, 0xe5, 0xeb, 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, + 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xe1, 0xe7, 0xf2, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, + 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, + 0xfb, 0xfc, 0xfd, 0xfd, 0xfa, 0xf9, 0xfc, 0xff, 0xfe, 0xf6, 0xeb, 0xe5, 0xe9, 0xf4, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xf8, 0xf5, 0xf6, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xd9, 0xc6, 0xba, 0xbf, 0xd8, 0xf3, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xf1, 0xe7, 0xe4, 0xe6, 0xec, 0xf5, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, 0xcf, 0xba, 0xb3, 0xb0, 0xad, 0xae, 0xb7, 0xca, 0xec, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfb, 0xee, 0xe1, 0xdc, 0xdc, 0xdd, 0xde, 0xe2, 0xe9, 0xf4, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, + 0xfc, 0xff, 0xff, 0xfc, 0xef, 0xdb, 0xc5, 0xb3, 0xa7, 0xa8, 0xae, 0xb1, 0xb1, 0xab, 0xaf, 0xda, + 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf4, 0xe1, 0xd9, 0xda, 0xdc, 0xdb, 0xda, 0xda, 0xdd, 0xe4, 0xed, 0xf4, 0xfb, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xfe, 0xf9, 0xe8, 0xca, 0xb0, 0xa9, 0xae, 0xb0, 0xb2, 0xb1, 0xaf, 0xb4, 0xaf, 0xad, 0xd2, + 0xfc, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xef, 0xdc, 0xdb, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe3, 0xf0, 0xfa, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, + 0xfc, 0xe9, 0xcc, 0xb8, 0xb1, 0xad, 0xad, 0xb0, 0xb2, 0xb3, 0xb0, 0xaf, 0xb5, 0xb0, 0xae, 0xd4, + 0xfe, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xf0, 0xdc, 0xd9, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdb, 0xdb, 0xdd, 0xdf, 0xe5, 0xf1, 0xfb, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, + 0xd7, 0xc3, 0xb0, 0xa8, 0xad, 0xb3, 0xb3, 0xb0, 0xaf, 0xaf, 0xb1, 0xb4, 0xb5, 0xaf, 0xb6, 0xde, + 0xff, 0xff, 0xfb, 0xfd, 0xfc, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, + 0xf7, 0xe4, 0xd7, 0xd7, 0xdc, 0xdb, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xdd, 0xda, 0xdb, 0xe2, 0xea, + 0xf3, 0xfc, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf7, 0xe2, 0xc3, + 0xad, 0xab, 0xb1, 0xb3, 0xb0, 0xb0, 0xb1, 0xb1, 0xb2, 0xb2, 0xb3, 0xb2, 0xb3, 0xbb, 0xd0, 0xef, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xf2, 0xe0, 0xd7, 0xd8, 0xd9, 0xda, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xdb, + 0xe1, 0xed, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xce, 0xb4, 0xae, + 0xad, 0xad, 0xb1, 0xb3, 0xb2, 0xb0, 0xb0, 0xb1, 0xb2, 0xb5, 0xb3, 0xb1, 0xbb, 0xd7, 0xf3, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xfd, 0xf2, 0xe2, 0xd7, 0xd7, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdb, 0xda, + 0xdb, 0xde, 0xe4, 0xf0, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf3, 0xcd, 0xaf, 0xa9, 0xae, + 0xb4, 0xb3, 0xb0, 0xae, 0xaf, 0xb2, 0xb5, 0xb4, 0xaf, 0xb0, 0xbd, 0xce, 0xe1, 0xf2, 0xfe, 0xff, + 0xff, 0xfd, 0xf1, 0xe2, 0xe0, 0xe6, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf9, 0xf1, 0xec, 0xec, 0xf2, 0xfa, 0xff, + 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe1, 0xda, 0xd7, 0xd9, 0xdb, 0xdb, 0xda, 0xda, 0xda, 0xdc, 0xdd, + 0xdd, 0xdb, 0xd9, 0xe3, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe1, 0xb5, 0xa9, 0xb4, 0xb3, + 0xae, 0xb1, 0xb2, 0xb2, 0xb2, 0xb1, 0xb1, 0xb4, 0xb9, 0xc4, 0xdb, 0xf5, 0xff, 0xfe, 0xfd, 0xff, + 0xfc, 0xea, 0xd0, 0xbe, 0xb9, 0xc0, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe8, 0xd9, 0xd3, 0xd3, 0xda, 0xeb, 0xfa, + 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xf2, 0xe5, 0xde, 0xda, 0xd8, 0xd9, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, + 0xdb, 0xdc, 0xdb, 0xde, 0xea, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xd5, 0xb5, 0xac, 0xb2, 0xb1, + 0xae, 0xb0, 0xb2, 0xb3, 0xb4, 0xb3, 0xb2, 0xbf, 0xda, 0xf1, 0xfb, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, + 0xeb, 0xc9, 0xb9, 0xbb, 0xba, 0xba, 0xc4, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, 0xed, 0xd7, 0xce, 0xd1, 0xd2, 0xd2, 0xd9, 0xe8, + 0xf9, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf2, 0xe5, 0xda, 0xd7, 0xdb, 0xdd, 0xdc, 0xdb, 0xdb, + 0xdb, 0xdb, 0xdb, 0xdd, 0xe4, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb6, 0xaf, 0xb4, 0xb3, + 0xb3, 0xb5, 0xb3, 0xad, 0xb0, 0xc1, 0xd3, 0xe5, 0xf8, 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, + 0xcf, 0xb9, 0xb7, 0xbd, 0xbf, 0xc0, 0xbd, 0xc4, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xfe, 0xf4, 0xea, + 0xea, 0xf0, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xe0, 0xd1, 0xce, 0xd3, 0xd6, 0xd4, 0xd1, 0xd8, + 0xee, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xed, 0xe3, 0xda, 0xd7, 0xd9, 0xdc, 0xdc, + 0xdc, 0xdc, 0xdc, 0xdc, 0xe3, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe1, 0xb9, 0xaa, 0xb1, 0xb2, + 0xae, 0xb0, 0xb3, 0xb8, 0xc6, 0xdf, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, + 0xba, 0xba, 0xbe, 0xba, 0xbb, 0xc1, 0xbd, 0xbf, 0xe3, 0xff, 0xff, 0xfd, 0xfd, 0xef, 0xd9, 0xcd, + 0xcd, 0xd5, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xf1, 0xda, 0xd2, 0xd2, 0xd1, 0xd2, 0xd5, 0xd3, 0xd2, + 0xe3, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf5, 0xe6, 0xdd, 0xda, 0xd8, 0xd8, + 0xdb, 0xdb, 0xd8, 0xdc, 0xe9, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd0, 0xb1, 0xab, 0xaf, + 0xad, 0xb0, 0xc3, 0xe0, 0xf5, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xed, 0xcd, + 0xb8, 0xba, 0xbe, 0xbb, 0xbd, 0xc2, 0xbe, 0xc2, 0xe5, 0xff, 0xff, 0xfd, 0xf1, 0xd3, 0xc4, 0xc8, + 0xc8, 0xc7, 0xd2, 0xe7, 0xf8, 0xff, 0xff, 0xf2, 0xdd, 0xd3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd3, + 0xda, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xe6, 0xda, 0xd7, + 0xda, 0xda, 0xd8, 0xe2, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, 0xdc, 0xcd, 0xc7, + 0xcb, 0xd7, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf8, 0xd3, 0xbd, + 0xbc, 0xbb, 0xbb, 0xbe, 0xbf, 0xbf, 0xc1, 0xcf, 0xed, 0xff, 0xff, 0xf8, 0xe2, 0xc6, 0xc1, 0xcb, + 0xca, 0xc7, 0xca, 0xd5, 0xef, 0xff, 0xff, 0xf8, 0xe8, 0xd2, 0xcd, 0xd2, 0xd2, 0xcf, 0xd2, 0xd4, + 0xd3, 0xde, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, + 0xe5, 0xe6, 0xeb, 0xf4, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xec, + 0xf3, 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xea, 0xc2, 0xb4, + 0xbd, 0xbf, 0xbb, 0xbe, 0xc0, 0xbb, 0xc5, 0xe4, 0xf9, 0xff, 0xff, 0xf1, 0xd7, 0xc7, 0xc6, 0xc8, + 0xc8, 0xca, 0xca, 0xce, 0xeb, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xcc, 0xd3, 0xd2, 0xcf, 0xd2, 0xd4, + 0xd2, 0xd7, 0xea, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, + 0xf6, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xd4, 0xbd, 0xb9, + 0xbd, 0xbd, 0xbd, 0xc0, 0xc0, 0xbc, 0xcf, 0xf5, 0xff, 0xff, 0xff, 0xf0, 0xd3, 0xc7, 0xc6, 0xc7, + 0xc7, 0xcb, 0xc9, 0xca, 0xe8, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xd3, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, + 0xd5, 0xd3, 0xd8, 0xef, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, + 0xfe, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe2, 0xbf, 0xba, 0xbf, + 0xbc, 0xbc, 0xbf, 0xbf, 0xbe, 0xc7, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xf0, 0xd2, 0xc5, 0xc5, 0xc7, + 0xc8, 0xc9, 0xc7, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xf6, 0xdf, 0xcf, 0xd0, 0xd3, 0xd0, 0xd1, + 0xd5, 0xd3, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd1, 0xb6, 0xb8, 0xbf, + 0xbc, 0xbc, 0xc0, 0xbd, 0xbd, 0xd7, 0xf8, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc7, + 0xc7, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xea, 0xd2, 0xd0, 0xd3, 0xd0, 0xd0, + 0xd2, 0xd3, 0xd3, 0xda, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xc0, 0xba, 0xbb, 0xbc, + 0xbd, 0xbe, 0xc0, 0xbf, 0xc8, 0xe8, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc6, 0xc6, 0xc8, + 0xc8, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xf5, 0xe1, 0xd3, 0xd0, 0xd1, 0xd1, + 0xd1, 0xd4, 0xd5, 0xd1, 0xdc, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf0, 0xc7, 0xb6, 0xbe, 0xbe, 0xbb, + 0xbe, 0xc0, 0xbb, 0xc2, 0xdd, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, + 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xf1, 0xd6, 0xcd, 0xd2, 0xd2, + 0xd0, 0xd3, 0xd4, 0xd0, 0xd7, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd9, 0xbf, 0xb8, 0xbd, 0xbe, 0xbb, + 0xbe, 0xc0, 0xbb, 0xcb, 0xf1, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, + 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xfa, 0xe0, 0xd1, 0xd2, 0xd1, + 0xd0, 0xd1, 0xd3, 0xd4, 0xd5, 0xdd, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc1, 0xba, 0xbe, 0xbc, 0xbc, 0xbe, + 0xbf, 0xbe, 0xc6, 0xe0, 0xfb, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xdd, 0xcf, 0xd0, + 0xd2, 0xd0, 0xd0, 0xd4, 0xd2, 0xd3, 0xe8, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xda, 0xb6, 0xb7, 0xbf, 0xbc, 0xbb, 0xc0, + 0xbd, 0xbb, 0xd4, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xca, 0xc7, 0xc9, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xd1, 0xd1, + 0xd3, 0xcf, 0xcf, 0xd3, 0xd1, 0xd0, 0xe0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf8, 0xd4, 0xb7, 0xba, 0xc0, 0xbc, 0xbd, 0xc1, + 0xbe, 0xc2, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xde, 0xd1, + 0xd0, 0xd2, 0xd1, 0xd2, 0xd3, 0xd4, 0xde, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd7, 0xb8, 0xbc, 0xc0, 0xbd, 0xc0, 0xbe, + 0xc2, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xef, 0xd3, + 0xcd, 0xd3, 0xd2, 0xd1, 0xd2, 0xd3, 0xdf, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xbb, 0xb7, 0xbd, 0xbd, 0xbc, 0xba, + 0xcc, 0xef, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe3, + 0xd1, 0xcd, 0xcf, 0xd1, 0xce, 0xcf, 0xe5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf0, 0xd3, 0xc3, 0xc0, 0xc1, 0xc2, 0xcb, + 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, + 0xe3, 0xd2, 0xd2, 0xd4, 0xd3, 0xdb, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe4, 0xd5, 0xd3, 0xde, 0xef, + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xca, 0xc6, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf7, 0xeb, 0xe3, 0xe1, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf1, 0xf0, 0xf8, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd4, 0xc9, 0xc8, 0xc7, + 0xc6, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xf6, 0xf4, 0xf9, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xda, 0xc8, 0xc6, 0xca, + 0xc9, 0xc9, 0xc9, 0xcf, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe7, 0xca, 0xc2, 0xcb, + 0xcb, 0xc6, 0xc9, 0xd9, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf5, 0xdc, 0xc8, 0xc4, + 0xc4, 0xc5, 0xd3, 0xec, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xd2, + 0xd2, 0xd9, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, + 0xf7, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, + 0xfb, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf8, 0xf2, + 0xf1, 0xf6, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xea, 0xc4, 0xa3, + 0x9e, 0xb2, 0xd7, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xed, 0xb8, 0x8d, 0x80, + 0x7d, 0x83, 0xa6, 0xd9, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf9, 0xcc, 0x8c, 0x79, 0x8b, + 0x8b, 0x7f, 0x84, 0xa9, 0xe2, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe5, 0xaf, 0x88, 0x81, 0x87, + 0x87, 0x86, 0x7f, 0x8b, 0xcf, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xed, 0xe8, 0xf3, 0xfe, + 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa5, 0x8c, 0x87, 0x81, + 0x81, 0x89, 0x81, 0x82, 0xc7, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xfb, 0xe6, 0xe5, 0xf5, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xbb, 0xb5, 0xc6, 0xe7, + 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa5, 0x88, 0x84, 0x85, + 0x85, 0x89, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xee, 0xcb, 0xae, 0xac, 0xc6, 0xe4, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xec, 0xc0, 0x9d, 0x92, 0x92, 0x94, 0xa9, + 0xd5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xde, 0xa4, 0x86, 0x84, 0x85, + 0x84, 0x88, 0x7d, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xed, + 0xb3, 0x83, 0x7f, 0x87, 0x83, 0x94, 0xd3, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xd0, 0x93, 0x85, 0x8b, 0x8c, 0x88, 0x86, + 0xaa, 0xe7, 0xff, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf4, 0xb8, + 0x81, 0x71, 0x76, 0x7c, 0x6e, 0x6e, 0xae, 0xf3, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfb, 0xbc, 0x85, 0x8c, 0x95, 0x8f, 0x91, 0x8a, + 0x92, 0xbe, 0xee, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x83, 0x8a, 0x80, 0x7f, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xd6, 0x88, + 0x72, 0x83, 0x7e, 0x7c, 0x7c, 0x79, 0x9b, 0xd5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xf4, 0xb8, 0x87, 0x8c, 0x94, 0x8d, 0x8d, 0x91, + 0x8b, 0x96, 0xd2, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe4, 0xaa, 0x80, + 0x7a, 0x7f, 0x7b, 0x7c, 0x7c, 0x7a, 0x97, 0xd0, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xc2, 0x89, 0x89, 0x93, 0x8c, 0x8b, 0x93, + 0x8b, 0x87, 0xb6, 0xf0, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xc4, 0x83, 0x7e, + 0x82, 0x77, 0x78, 0x7e, 0x74, 0x70, 0xa1, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xd3, 0x98, 0x8e, 0x92, 0x8e, 0x8f, 0x90, + 0x8d, 0x8e, 0x9b, 0xc5, 0xf7, 0xff, 0xfe, 0xff, 0xfd, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc6, 0xff, 0xff, 0xf9, 0xff, 0xff, 0xfe, 0xfe, 0xe6, 0xa5, 0x75, 0x78, + 0x81, 0x79, 0x78, 0x82, 0x77, 0x74, 0xb5, 0xfb, 0xff, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xed, 0xbf, 0x97, 0x8b, 0x90, 0x90, 0x8b, + 0x8f, 0x91, 0x85, 0xa1, 0xe7, 0xff, 0xfd, 0xfe, 0xfe, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfb, 0xff, 0xfd, 0xff, 0xf4, 0xb3, 0x85, 0x7d, 0x7b, + 0x7a, 0x7b, 0x7a, 0x7d, 0x7c, 0x90, 0xd1, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xe3, 0xa1, 0x86, 0x92, 0x91, 0x8c, + 0x91, 0x90, 0x84, 0x92, 0xc6, 0xf2, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xfa, 0xff, 0xfd, 0xff, 0xdd, 0x92, 0x76, 0x82, 0x7e, + 0x76, 0x7d, 0x7e, 0x71, 0x81, 0xbe, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbc, 0x94, 0x8e, 0x8f, 0x8f, + 0x8f, 0x8e, 0x8f, 0x8e, 0x9e, 0xd6, 0xff, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc4, 0xff, 0xff, 0xf7, 0xff, 0xff, 0xe6, 0xb0, 0x84, 0x78, 0x7d, 0x7c, + 0x78, 0x7d, 0x7e, 0x72, 0x95, 0xe6, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe8, 0xb1, 0x89, 0x8c, 0x93, + 0x8d, 0x8c, 0x92, 0x89, 0x87, 0xbc, 0xf8, 0xff, 0xf8, 0xff, 0xff, 0xdd, 0xa4, 0x88, 0x85, 0x84, + 0x84, 0x89, 0x7e, 0x7e, 0xc5, 0xff, 0xff, 0xf4, 0xff, 0xff, 0xc6, 0x83, 0x7b, 0x81, 0x79, 0x7a, + 0x7d, 0x7a, 0x79, 0x8d, 0xc3, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfb, 0xfd, 0xfe, + 0xfc, 0xfa, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xcc, 0x93, 0x8d, 0x93, + 0x8d, 0x8e, 0x90, 0x8b, 0x88, 0x9f, 0xd1, 0xfa, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x87, 0x84, 0x84, + 0x84, 0x89, 0x7d, 0x7d, 0xc5, 0xff, 0xff, 0xf8, 0xff, 0xea, 0xa6, 0x76, 0x7a, 0x82, 0x78, 0x79, + 0x81, 0x77, 0x75, 0xaf, 0xf1, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xfd, 0xf9, 0xfa, + 0xfe, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe7, 0xb5, 0x92, 0x8b, + 0x90, 0x90, 0x8d, 0x91, 0x91, 0x88, 0xaa, 0xee, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0x8a, 0x86, 0x84, + 0x82, 0x89, 0x7f, 0x7d, 0xc6, 0xff, 0xff, 0xff, 0xf6, 0xba, 0x88, 0x7e, 0x7d, 0x7c, 0x7a, 0x7a, + 0x7e, 0x79, 0x88, 0xcd, 0xff, 0xff, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf6, 0xee, + 0xf5, 0xff, 0xff, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xde, 0x9e, 0x86, + 0x92, 0x92, 0x8c, 0x91, 0x91, 0x85, 0x96, 0xcd, 0xf4, 0xff, 0xff, 0xe2, 0xac, 0x8d, 0x85, 0x83, + 0x83, 0x88, 0x81, 0x85, 0xcb, 0xff, 0xff, 0xfe, 0xe1, 0x95, 0x76, 0x82, 0x7e, 0x78, 0x7e, 0x7d, + 0x75, 0x82, 0xb5, 0xeb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfb, 0xfc, 0xff, 0xff, 0xf9, + 0xed, 0xeb, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfd, 0xea, 0xd1, 0xbf, 0xba, + 0xc1, 0xd0, 0xe4, 0xf6, 0xff, 0xff, 0xfc, 0xfb, 0xfe, 0xff, 0xfd, 0xfc, 0xff, 0xf4, 0xb9, 0x94, + 0x91, 0x8e, 0x8c, 0x8f, 0x8f, 0x8c, 0x8f, 0xa9, 0xdf, 0xff, 0xff, 0xee, 0xc0, 0x89, 0x7a, 0x8a, + 0x8b, 0x82, 0x80, 0x98, 0xd8, 0xff, 0xff, 0xef, 0xc1, 0x85, 0x74, 0x81, 0x7f, 0x76, 0x7d, 0x7f, + 0x72, 0x90, 0xdf, 0xff, 0xfa, 0xfd, 0xff, 0xfe, 0xfb, 0xfb, 0xfe, 0xff, 0xfc, 0xeb, 0xcd, 0xb4, + 0xa8, 0xa7, 0xb3, 0xd5, 0xf9, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf3, 0xc0, 0x99, 0x93, 0x94, + 0x90, 0x95, 0xb1, 0xd8, 0xf2, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xe2, 0xae, + 0x8c, 0x8e, 0x92, 0x8c, 0x8b, 0x91, 0x8b, 0x93, 0xd2, 0xff, 0xff, 0xfb, 0xe0, 0x9f, 0x7e, 0x86, + 0x85, 0x7d, 0x91, 0xc1, 0xec, 0xff, 0xff, 0xde, 0xa4, 0x83, 0x7c, 0x7c, 0x7a, 0x7a, 0x7c, 0x7a, + 0x83, 0xb4, 0xf4, 0xff, 0xfc, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xf7, 0xde, 0xae, 0x7f, 0x6b, + 0x70, 0x70, 0x6a, 0x91, 0xdb, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xda, 0xa5, 0x91, 0x99, 0x98, + 0x92, 0x95, 0x9a, 0xa0, 0xb4, 0xd9, 0xf6, 0xff, 0xff, 0xfc, 0xfe, 0xff, 0xfb, 0xff, 0xff, 0xc8, + 0x91, 0x90, 0x95, 0x8b, 0x8b, 0x92, 0x89, 0x8b, 0xcc, 0xff, 0xff, 0xfc, 0xfa, 0xd8, 0xa9, 0x8d, + 0x86, 0x94, 0xbf, 0xee, 0xfd, 0xff, 0xff, 0xda, 0x9b, 0x81, 0x7d, 0x78, 0x77, 0x7f, 0x78, 0x71, + 0xa4, 0xe8, 0xff, 0xfe, 0xff, 0xfd, 0xfd, 0xfc, 0xfe, 0xfe, 0xe3, 0xaf, 0x8a, 0x7d, 0x74, 0x6f, + 0x72, 0x73, 0x6e, 0x7b, 0xac, 0xe7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc8, 0xa2, 0x97, 0x9e, 0x9e, + 0x9d, 0x9e, 0x99, 0x8f, 0x95, 0xad, 0xc6, 0xdf, 0xf9, 0xff, 0xfe, 0xfd, 0xfb, 0xff, 0xff, 0xe6, + 0xb2, 0x8e, 0x8b, 0x94, 0x94, 0x93, 0x8a, 0x91, 0xd0, 0xff, 0xff, 0xfa, 0xff, 0xfa, 0xe2, 0xcb, + 0xc7, 0xd5, 0xed, 0xfd, 0xfe, 0xff, 0xff, 0xe2, 0xa6, 0x7e, 0x79, 0x81, 0x81, 0x7d, 0x75, 0x82, + 0xc8, 0xff, 0xff, 0xf7, 0xfb, 0xfe, 0xff, 0xfb, 0xe2, 0xc2, 0x9f, 0x7c, 0x6c, 0x71, 0x7a, 0x7d, + 0x7b, 0x79, 0x7a, 0x7b, 0x93, 0xd5, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf1, 0xc9, 0xa1, 0x95, 0x9c, 0x9c, + 0x98, 0x97, 0x99, 0x9b, 0x99, 0x95, 0x94, 0xa9, 0xd0, 0xee, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfe, + 0xdf, 0xa6, 0x8a, 0x8e, 0x8c, 0x89, 0x94, 0xb4, 0xe5, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xf4, 0xc6, 0x8c, 0x73, 0x79, 0x79, 0x73, 0x86, 0xb6, + 0xeb, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xf1, 0xd6, 0xa6, 0x7b, 0x6e, 0x76, 0x7a, 0x77, 0x74, 0x73, + 0x74, 0x76, 0x74, 0x76, 0x93, 0xd7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xd9, 0xa2, 0x93, 0xa1, 0x9d, + 0x98, 0x9b, 0x9a, 0x9a, 0x99, 0x97, 0x95, 0x97, 0x9f, 0xb0, 0xd3, 0xf7, 0xff, 0xfd, 0xfc, 0xff, + 0xfa, 0xda, 0xae, 0x91, 0x8a, 0x91, 0xb4, 0xe8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xee, 0xb7, 0x88, 0x79, 0x78, 0x88, 0xb8, 0xee, + 0xff, 0xfc, 0xfd, 0xff, 0xfa, 0xd4, 0xa4, 0x85, 0x7a, 0x73, 0x71, 0x75, 0x79, 0x78, 0x75, 0x72, + 0x76, 0x79, 0x71, 0x78, 0xa7, 0xe4, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xef, 0xbb, 0x98, 0x95, 0x9c, + 0xa1, 0x9f, 0x9a, 0x98, 0x98, 0x9b, 0x9e, 0x99, 0x90, 0x94, 0xa8, 0xc0, 0xdb, 0xf2, 0xfe, 0xff, + 0xff, 0xfa, 0xe4, 0xcc, 0xc7, 0xcf, 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf9, + 0xf9, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe8, 0xcf, 0xbf, 0xbf, 0xcf, 0xe9, 0xfc, + 0xff, 0xff, 0xfb, 0xe0, 0xb8, 0x97, 0x79, 0x6a, 0x71, 0x7c, 0x7b, 0x75, 0x72, 0x73, 0x78, 0x7b, + 0x7a, 0x71, 0x6c, 0x91, 0xd4, 0xfb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xe4, 0xbc, 0xa0, 0x9b, + 0x9b, 0x9a, 0x9c, 0x9e, 0x9d, 0x9a, 0x98, 0x98, 0x9a, 0x9c, 0x96, 0x91, 0xa3, 0xce, 0xf3, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xff, 0xf8, 0xd6, 0x9c, 0x74, 0x70, 0x79, 0x7a, 0x76, 0x74, 0x74, 0x76, 0x78, 0x77, 0x75, 0x72, + 0x72, 0x7a, 0x93, 0xc6, 0xf8, 0xff, 0xfd, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf3, 0xd6, 0xaf, + 0x99, 0x99, 0x9d, 0x9e, 0x9d, 0x9a, 0x99, 0x99, 0x9c, 0x9b, 0x98, 0x95, 0x94, 0xa3, 0xc6, 0xee, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xf9, 0xd2, 0x92, 0x72, 0x74, 0x75, 0x76, 0x79, 0x77, 0x74, 0x73, 0x75, 0x78, 0x78, 0x73, 0x6e, + 0x80, 0xae, 0xe0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfb, 0xe0, + 0xc7, 0xb1, 0x9b, 0x93, 0x9a, 0x9f, 0x9f, 0x9b, 0x97, 0x96, 0x9a, 0x9d, 0x9a, 0x91, 0x9d, 0xd5, + 0xff, 0xff, 0xf9, 0xfa, 0xfa, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfa, 0xf9, 0xf8, 0xf8, 0xff, 0xff, + 0xe2, 0xa1, 0x71, 0x6e, 0x7d, 0x79, 0x72, 0x71, 0x73, 0x78, 0x7c, 0x78, 0x6b, 0x6d, 0x87, 0xa7, + 0xc8, 0xe9, 0xfc, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xf8, 0xdd, 0xba, 0xa5, 0x9f, 0x9a, 0x99, 0x9d, 0x9c, 0x9c, 0x99, 0x97, 0x9f, 0x95, 0x8f, 0xc5, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, + 0xc8, 0x81, 0x77, 0x7b, 0x74, 0x74, 0x77, 0x79, 0x78, 0x73, 0x71, 0x76, 0x7b, 0x90, 0xbd, 0xec, + 0xff, 0xff, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, + 0xfe, 0xfc, 0xf5, 0xdf, 0xba, 0x9c, 0x96, 0x9d, 0x9e, 0x9e, 0x9b, 0x99, 0x9f, 0x97, 0x91, 0xc2, + 0xfb, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf9, 0xff, 0xff, + 0xc5, 0x7b, 0x75, 0x7c, 0x73, 0x75, 0x78, 0x7a, 0x79, 0x6f, 0x6e, 0x8a, 0xbd, 0xe6, 0xf7, 0xfd, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfb, + 0xfc, 0xff, 0xff, 0xf9, 0xe6, 0xcd, 0xb4, 0xa1, 0x95, 0x95, 0x9b, 0x9d, 0x9e, 0x94, 0x98, 0xcd, + 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xff, 0xfa, 0xff, 0xff, + 0xd4, 0x8c, 0x6f, 0x72, 0x78, 0x77, 0x70, 0x6a, 0x6f, 0x8b, 0xaf, 0xce, 0xee, 0xff, 0xff, 0xfa, + 0xf9, 0xfd, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfd, 0xfc, 0xff, 0xff, 0xe8, 0xc3, 0xaa, 0xa1, 0x9e, 0x9b, 0x9c, 0xa3, 0xb9, 0xe5, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xed, 0xb9, 0x86, 0x72, 0x76, 0x76, 0x77, 0x7f, 0x96, 0xca, 0xf7, 0xff, 0xfd, 0xfd, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe5, 0xcc, 0xb5, 0xa8, 0xad, 0xc9, 0xec, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xee, 0xbe, 0x96, 0x89, 0x8d, 0xa4, 0xc9, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, + 0xfb, 0xfc, 0xfd, 0xfc, 0xf9, 0xf8, 0xfb, 0xff, 0xfe, 0xf1, 0xe1, 0xda, 0xdf, 0xee, 0xfd, 0xff, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xee, 0xd9, 0xcc, 0xd0, 0xe3, 0xf6, 0xfe, 0xfe, 0xfb, 0xfa, 0xfc, 0xff, 0xff, 0xfe, + 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe9, 0xe0, 0xdf, 0xdf, 0xdf, 0xdf, 0xdf, + 0xdf, 0xdf, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xdf, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xfd, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfb, 0xfa, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf4, 0xf4, 0xf4, 0xf4, 0xf4, + 0xf4, 0xf5, 0xf4, 0xf4, 0xf5, 0xf5, 0xf7, 0xfa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd2, 0xbe, 0xb3, 0xaf, 0xaf, 0xaf, 0xaf, 0xaf, + 0xaf, 0xaf, 0xad, 0xac, 0xac, 0xac, 0xaa, 0xac, 0xb4, 0xbe, 0xd7, 0xf6, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf2, 0xea, 0xe7, 0xe7, 0xe7, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, + 0xe6, 0xe5, 0xe6, 0xe6, 0xe5, 0xe6, 0xe9, 0xee, 0xf7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xc9, 0xa8, 0xa5, 0xa7, 0xa3, 0xa4, 0xa4, 0xa3, 0xa3, + 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa3, 0xa1, 0xa3, 0xa5, 0xa4, 0xaf, 0xd0, 0xf2, 0xff, 0xff, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xe3, 0xe4, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe2, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xe9, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd7, 0xad, 0x9c, 0xa2, 0xa8, 0xa7, 0xa7, 0xa7, 0xa6, 0xa6, + 0xa6, 0xa6, 0xa6, 0xa7, 0xa7, 0xa7, 0xa7, 0xa7, 0xa5, 0xa2, 0x9f, 0xae, 0xde, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfa, 0xec, 0xe1, 0xde, 0xe2, 0xe3, 0xe2, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe1, 0xe0, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xa9, 0xa6, 0xa3, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, + 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa5, 0xa3, 0xa3, 0xab, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe6, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xfe, 0xff, 0xfe, 0xfc, 0xff, 0xf5, 0xbf, 0xa5, 0xaa, 0xa8, 0xa4, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, + 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa4, 0xa4, 0xac, 0xa3, 0x9f, 0xd2, 0xff, 0xff, 0xfa, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xff, 0xff, 0xf5, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, + 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe1, 0xe3, 0xe4, 0xdf, 0xe5, 0xfa, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd3, 0xac, 0x9e, 0xa3, 0xa8, 0xa8, 0xa8, 0xaa, 0xa9, 0xa7, + 0xa7, 0xa8, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa9, 0xa7, 0xa6, 0xa1, 0xad, 0xdd, 0xff, 0xff, 0xfc, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf9, 0xeb, 0xe0, 0xdf, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xea, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xc7, 0xa8, 0xa6, 0xa8, 0xa5, 0xa6, 0xa8, 0xa7, 0xa6, + 0xa7, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa7, 0xa8, 0xa9, 0xa8, 0xb1, 0xd0, 0xf1, 0xff, 0xff, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xe1, 0xe2, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, 0xe1, + 0xe1, 0xe2, 0xe2, 0xe1, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xec, 0xd0, 0xbd, 0xb3, 0xb0, 0xb0, 0xb0, 0xb0, 0xb2, + 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb2, 0xb6, 0xbe, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xec, 0xe6, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, 0xe3, + 0xe3, 0xe4, 0xe3, 0xe3, 0xe4, 0xe4, 0xe5, 0xeb, 0xf5, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf5, 0xe7, 0xde, 0xdd, 0xdd, 0xdd, 0xdd, 0xde, + 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdf, 0xe1, 0xe7, 0xf2, 0xfe, 0xff, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xf8, 0xf4, 0xf3, 0xf3, 0xf3, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3, + 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf3, 0xf4, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xfc, 0xfc, 0xfb, 0xfb, 0xfb, 0xfb, + 0xfb, 0xfc, 0xfd, 0xfd, 0xfa, 0xf9, 0xfc, 0xff, 0xfe, 0xf6, 0xeb, 0xe5, 0xe9, 0xf4, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfc, 0xf8, 0xf5, 0xf6, 0xfa, 0xfe, 0xff, 0xfe, 0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, + 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xea, 0xd9, 0xc6, 0xba, 0xbf, 0xd8, 0xf3, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfb, 0xf1, 0xe7, 0xe4, 0xe6, 0xec, 0xf5, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, 0xcf, 0xba, 0xb3, 0xb0, 0xad, 0xae, 0xb7, 0xca, 0xec, + 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfb, 0xee, 0xe1, 0xdc, 0xdc, 0xdd, 0xde, 0xe2, 0xe9, 0xf4, 0xfd, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xfc, + 0xfc, 0xff, 0xff, 0xfc, 0xef, 0xdb, 0xc5, 0xb3, 0xa7, 0xa8, 0xae, 0xb1, 0xb1, 0xab, 0xaf, 0xda, + 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, + 0xf4, 0xe1, 0xd9, 0xda, 0xdc, 0xdb, 0xda, 0xda, 0xdd, 0xe4, 0xed, 0xf4, 0xfb, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, + 0xff, 0xfe, 0xf9, 0xe8, 0xca, 0xb0, 0xa9, 0xae, 0xb0, 0xb2, 0xb1, 0xaf, 0xb4, 0xaf, 0xad, 0xd2, + 0xfc, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xef, 0xdc, 0xdb, 0xdd, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdd, 0xe3, 0xf0, 0xfa, 0xfe, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, + 0xfc, 0xe9, 0xcc, 0xb8, 0xb1, 0xad, 0xad, 0xb0, 0xb2, 0xb3, 0xb0, 0xaf, 0xb5, 0xb0, 0xae, 0xd4, + 0xfe, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, + 0xf0, 0xdc, 0xd9, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdb, 0xdb, 0xdd, 0xdf, 0xe5, 0xf1, 0xfb, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfc, 0xeb, + 0xd7, 0xc3, 0xb0, 0xa8, 0xad, 0xb3, 0xb3, 0xb0, 0xaf, 0xaf, 0xb1, 0xb4, 0xb5, 0xaf, 0xb6, 0xde, + 0xff, 0xff, 0xfb, 0xfd, 0xfc, 0xfc, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xfd, 0xfd, 0xfd, 0xff, 0xff, + 0xf7, 0xe4, 0xd7, 0xd7, 0xdc, 0xdb, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xdd, 0xda, 0xdb, 0xe2, 0xea, + 0xf3, 0xfc, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf7, 0xe2, 0xc3, + 0xad, 0xab, 0xb1, 0xb3, 0xb0, 0xb0, 0xb1, 0xb1, 0xb2, 0xb2, 0xb3, 0xb2, 0xb3, 0xbb, 0xd0, 0xef, + 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xfe, 0xf2, 0xe0, 0xd7, 0xd8, 0xd9, 0xda, 0xdc, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdd, 0xdb, 0xdb, + 0xe1, 0xed, 0xf8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xee, 0xce, 0xb4, 0xae, + 0xad, 0xad, 0xb1, 0xb3, 0xb2, 0xb0, 0xb0, 0xb1, 0xb2, 0xb5, 0xb3, 0xb1, 0xbb, 0xd7, 0xf3, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xff, 0xfd, 0xf2, 0xe2, 0xd7, 0xd7, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdd, 0xdb, 0xda, + 0xdb, 0xde, 0xe4, 0xf0, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf3, 0xcd, 0xaf, 0xa9, 0xae, + 0xb4, 0xb3, 0xb0, 0xae, 0xaf, 0xb2, 0xb5, 0xb4, 0xaf, 0xb0, 0xbd, 0xce, 0xe1, 0xf2, 0xfe, 0xff, + 0xff, 0xfd, 0xf1, 0xe2, 0xe0, 0xe6, 0xf1, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf9, 0xf1, 0xec, 0xec, 0xf2, 0xfa, 0xff, + 0xff, 0xff, 0xfd, 0xf5, 0xe9, 0xe1, 0xda, 0xd7, 0xd9, 0xdb, 0xdb, 0xda, 0xda, 0xda, 0xdc, 0xdd, + 0xdd, 0xdb, 0xd9, 0xe3, 0xf5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xe1, 0xb5, 0xa9, 0xb4, 0xb3, + 0xae, 0xb1, 0xb2, 0xb2, 0xb2, 0xb1, 0xb1, 0xb4, 0xb9, 0xc4, 0xdb, 0xf5, 0xff, 0xfe, 0xfd, 0xff, + 0xfc, 0xea, 0xd0, 0xbe, 0xb9, 0xc0, 0xd6, 0xf4, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe8, 0xd9, 0xd3, 0xd3, 0xda, 0xeb, 0xfa, + 0xff, 0xfe, 0xfe, 0xff, 0xfd, 0xf2, 0xe5, 0xde, 0xda, 0xd8, 0xd9, 0xdb, 0xdc, 0xdb, 0xdb, 0xdb, + 0xdb, 0xdc, 0xdb, 0xde, 0xea, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xd5, 0xb5, 0xac, 0xb2, 0xb1, + 0xae, 0xb0, 0xb2, 0xb3, 0xb4, 0xb3, 0xb2, 0xbf, 0xda, 0xf1, 0xfb, 0xfe, 0xff, 0xfe, 0xfe, 0xfe, + 0xeb, 0xc9, 0xb9, 0xbb, 0xba, 0xba, 0xc4, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfc, 0xed, 0xd7, 0xce, 0xd1, 0xd2, 0xd2, 0xd9, 0xe8, + 0xf9, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfa, 0xf2, 0xe5, 0xda, 0xd7, 0xdb, 0xdd, 0xdc, 0xdb, 0xdb, + 0xdb, 0xdb, 0xdb, 0xdd, 0xe4, 0xf5, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf3, 0xd3, 0xb6, 0xaf, 0xb4, 0xb3, + 0xb3, 0xb5, 0xb3, 0xad, 0xb0, 0xc1, 0xd3, 0xe5, 0xf8, 0xff, 0xff, 0xfe, 0xfc, 0xff, 0xff, 0xef, + 0xcf, 0xb9, 0xb7, 0xbd, 0xbf, 0xc0, 0xbd, 0xc4, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xfe, 0xf4, 0xea, + 0xea, 0xf0, 0xf9, 0xff, 0xff, 0xff, 0xff, 0xf5, 0xe0, 0xd1, 0xce, 0xd3, 0xd6, 0xd4, 0xd1, 0xd8, + 0xee, 0xff, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xf6, 0xed, 0xe3, 0xda, 0xd7, 0xd9, 0xdc, 0xdc, + 0xdc, 0xdc, 0xdc, 0xdc, 0xe3, 0xf4, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xe1, 0xb9, 0xaa, 0xb1, 0xb2, + 0xae, 0xb0, 0xb3, 0xb8, 0xc6, 0xdf, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xdd, + 0xba, 0xba, 0xbe, 0xba, 0xbb, 0xc1, 0xbd, 0xbf, 0xe3, 0xff, 0xff, 0xfd, 0xfd, 0xef, 0xd9, 0xcd, + 0xcd, 0xd5, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xf1, 0xda, 0xd2, 0xd2, 0xd1, 0xd2, 0xd5, 0xd3, 0xd2, + 0xe3, 0xf8, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfe, 0xf5, 0xe6, 0xdd, 0xda, 0xd8, 0xd8, + 0xdb, 0xdb, 0xd8, 0xdc, 0xe9, 0xf8, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xf5, 0xd0, 0xb1, 0xab, 0xaf, + 0xad, 0xb0, 0xc3, 0xe0, 0xf5, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfe, 0xed, 0xcd, + 0xb8, 0xba, 0xbe, 0xbb, 0xbd, 0xc2, 0xbe, 0xc2, 0xe5, 0xff, 0xff, 0xfd, 0xf1, 0xd3, 0xc4, 0xc8, + 0xc8, 0xc7, 0xd2, 0xe7, 0xf8, 0xff, 0xff, 0xf2, 0xdd, 0xd3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd3, + 0xda, 0xeb, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xe6, 0xda, 0xd7, + 0xda, 0xda, 0xd8, 0xe2, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xef, 0xdc, 0xcd, 0xc7, + 0xcb, 0xd7, 0xe7, 0xf8, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, 0xfe, 0xfd, 0xff, 0xf8, 0xd3, 0xbd, + 0xbc, 0xbb, 0xbb, 0xbe, 0xbf, 0xbf, 0xc1, 0xcf, 0xed, 0xff, 0xff, 0xf8, 0xe2, 0xc6, 0xc1, 0xcb, + 0xca, 0xc7, 0xca, 0xd5, 0xef, 0xff, 0xff, 0xf8, 0xe8, 0xd2, 0xcd, 0xd2, 0xd2, 0xcf, 0xd2, 0xd4, + 0xd3, 0xde, 0xf7, 0xff, 0xfd, 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfe, 0xf8, 0xef, 0xe8, + 0xe5, 0xe6, 0xeb, 0xf4, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xec, + 0xf3, 0xff, 0xff, 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xea, 0xc2, 0xb4, + 0xbd, 0xbf, 0xbb, 0xbe, 0xc0, 0xbb, 0xc5, 0xe4, 0xf9, 0xff, 0xff, 0xf1, 0xd7, 0xc7, 0xc6, 0xc8, + 0xc8, 0xca, 0xca, 0xce, 0xeb, 0xff, 0xff, 0xff, 0xf4, 0xd7, 0xcc, 0xd3, 0xd2, 0xcf, 0xd2, 0xd4, + 0xd2, 0xd7, 0xea, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xfb, + 0xf6, 0xf8, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf2, 0xd4, 0xbd, 0xb9, + 0xbd, 0xbd, 0xbd, 0xc0, 0xc0, 0xbc, 0xcf, 0xf5, 0xff, 0xff, 0xff, 0xf0, 0xd3, 0xc7, 0xc6, 0xc7, + 0xc7, 0xcb, 0xc9, 0xca, 0xe8, 0xff, 0xff, 0xff, 0xfc, 0xe4, 0xd3, 0xd1, 0xd1, 0xd1, 0xd1, 0xd2, + 0xd5, 0xd3, 0xd8, 0xef, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfc, 0xfe, 0xff, + 0xfe, 0xfc, 0xfc, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe2, 0xbf, 0xba, 0xbf, + 0xbc, 0xbc, 0xbf, 0xbf, 0xbe, 0xc7, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xf0, 0xd2, 0xc5, 0xc5, 0xc7, + 0xc8, 0xc9, 0xc7, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xf6, 0xdf, 0xcf, 0xd0, 0xd3, 0xd0, 0xd1, + 0xd5, 0xd3, 0xd1, 0xe4, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xfe, + 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf4, 0xd1, 0xb6, 0xb8, 0xbf, + 0xbc, 0xbc, 0xc0, 0xbd, 0xbd, 0xd7, 0xf8, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc7, + 0xc7, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xea, 0xd2, 0xd0, 0xd3, 0xd0, 0xd0, + 0xd2, 0xd3, 0xd3, 0xda, 0xec, 0xfd, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xfa, 0xd9, 0xc0, 0xba, 0xbb, 0xbc, + 0xbd, 0xbe, 0xc0, 0xbf, 0xc8, 0xe8, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xee, 0xd3, 0xc6, 0xc6, 0xc8, + 0xc8, 0xca, 0xc8, 0xca, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xf5, 0xe1, 0xd3, 0xd0, 0xd1, 0xd1, + 0xd1, 0xd4, 0xd5, 0xd1, 0xdc, 0xf8, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, 0xff, 0xf0, 0xc7, 0xb6, 0xbe, 0xbe, 0xbb, + 0xbe, 0xc0, 0xbb, 0xc2, 0xdd, 0xf7, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, + 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xf1, 0xd6, 0xcd, 0xd2, 0xd2, + 0xd0, 0xd3, 0xd4, 0xd0, 0xd7, 0xed, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf5, 0xd9, 0xbf, 0xb8, 0xbd, 0xbe, 0xbb, + 0xbe, 0xc0, 0xbb, 0xcb, 0xf1, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xef, 0xd3, 0xc6, 0xc6, 0xc8, + 0xc8, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xff, 0xfa, 0xe0, 0xd1, 0xd2, 0xd1, + 0xd0, 0xd1, 0xd3, 0xd4, 0xd5, 0xdd, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe5, 0xc1, 0xba, 0xbe, 0xbc, 0xbc, 0xbe, + 0xbf, 0xbe, 0xc6, 0xe0, 0xfb, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xdd, 0xcf, 0xd0, + 0xd2, 0xd0, 0xd0, 0xd4, 0xd2, 0xd3, 0xe8, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfe, 0xda, 0xb6, 0xb7, 0xbf, 0xbc, 0xbb, 0xc0, + 0xbd, 0xbb, 0xd4, 0xf6, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xca, 0xc7, 0xc9, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xe8, 0xd1, 0xd1, + 0xd3, 0xcf, 0xcf, 0xd3, 0xd1, 0xd0, 0xe0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf8, 0xd4, 0xb7, 0xba, 0xc0, 0xbc, 0xbd, 0xc1, + 0xbe, 0xc2, 0xe3, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xde, 0xd1, + 0xd0, 0xd2, 0xd1, 0xd2, 0xd3, 0xd4, 0xde, 0xf0, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xfc, 0xd7, 0xb8, 0xbc, 0xc0, 0xbd, 0xc0, 0xbe, + 0xc2, 0xd8, 0xf3, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xef, 0xd3, + 0xcd, 0xd3, 0xd2, 0xd1, 0xd2, 0xd3, 0xdf, 0xf2, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xe0, 0xbb, 0xb7, 0xbd, 0xbd, 0xbc, 0xba, + 0xcc, 0xef, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xfa, 0xe3, + 0xd1, 0xcd, 0xcf, 0xd1, 0xce, 0xcf, 0xe5, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf0, 0xd3, 0xc3, 0xc0, 0xc1, 0xc2, 0xcb, + 0xe3, 0xfb, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xcb, 0xc7, 0xc8, 0xe6, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, + 0xe3, 0xd2, 0xd2, 0xd4, 0xd3, 0xdb, 0xf1, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf6, 0xe4, 0xd5, 0xd3, 0xde, 0xef, + 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xee, 0xd3, 0xc8, 0xc8, 0xc8, + 0xc7, 0xca, 0xc6, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xf7, 0xeb, 0xe3, 0xe1, 0xe8, 0xf5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xf1, 0xf0, 0xf8, 0xfe, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xef, 0xd4, 0xc9, 0xc8, 0xc7, + 0xc6, 0xca, 0xc8, 0xca, 0xe8, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xfd, 0xf6, 0xf4, 0xf9, 0xfe, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf4, 0xda, 0xc8, 0xc6, 0xca, + 0xc9, 0xc9, 0xc9, 0xcf, 0xeb, 0xff, 0xff, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe7, 0xca, 0xc2, 0xcb, + 0xcb, 0xc6, 0xc9, 0xd9, 0xf2, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xf5, 0xdc, 0xc8, 0xc4, + 0xc4, 0xc5, 0xd3, 0xec, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfe, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf4, 0xe0, 0xd2, + 0xd2, 0xd9, 0xe9, 0xfc, 0xff, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xf7, + 0xf7, 0xfa, 0xfd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xfd, + 0xfd, 0xfd, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, + 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; #endif /* !PEXPERT_NO_3X_IMAGES */ -const unsigned char gGearPict2x[4*kGearFrames*kGearWidth*kGearHeight] = { - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbc,0x86,0x86,0xbb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0x76,0x76,0x76,0x75,0xc7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x99,0x76,0x76,0x76,0x75,0x98,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xd0,0xb8,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xef,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xa2,0x7b,0x7b,0x7b,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe3,0xe2,0xe2,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x7d,0x7b,0x7b,0x7b,0x7b,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xe3,0xe2,0xe2,0xe2,0xe2,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x7d,0x7b,0x7b,0x7b,0x7b,0x94,0xfe,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xe2,0xe2,0xe2,0xe2,0xe2,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0x82,0x7d,0x7b,0x7b,0x7b,0x7b,0xc8,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xf3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbb,0x7d,0x7d,0x7b,0x7b,0x7b,0x85,0xfa,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xfe,0xe4,0xe3,0xe2,0xe2,0xe2,0xe2,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0x8d,0x7d,0x7d,0x7b,0x7b,0x7b,0xb8,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xf0,0xe3,0xe2,0xe2,0xe2,0xe2,0xe4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x7e,0x7d,0x7b,0x7b,0x7b,0x7d,0xec,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xfb,0xe4,0xe3,0xe2,0xe2,0xe2,0xe2,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x9d,0x7d,0x7d,0x7b,0x7b,0x7b,0xa1,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xec,0xe3,0xe2,0xe2,0xe2,0xe2,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xd7,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0x81,0x7d,0x7d,0x7b,0x7b,0x7a,0xd9,0xff,0xff,0x9a,0x76,0x76,0x76,0x75,0x9d,0xff,0xff,0xfa,0xe4,0xe2,0xe2,0xe2,0xe2,0xe2,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xa0,0x83,0x83,0x86,0xc1,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xb4,0x7e,0x7d,0x7b,0x7b,0x7a,0xa2,0xff,0xff,0xd1,0x76,0x76,0x76,0x75,0xd0,0xff,0xff,0xef,0xe3,0xe3,0xe2,0xe2,0xe2,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xef,0xde,0xdc,0xdc,0xe4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0x85,0x85,0x85,0x83,0x83,0x91,0xd4,0xff,0xff,0xff,0xff,0xff,0xf6,0x87,0x7e,0x7b,0x7b,0x7b,0x95,0xff,0xff,0xff,0xc7,0x8b,0x8b,0xc5,0xff,0xff,0xff,0xea,0xe3,0xe2,0xe2,0xe2,0xe3,0xfc,0xff,0xff,0xff,0xff,0xff,0xf4,0xe2,0xdc,0xdc,0xdc,0xdc,0xdb,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbb,0x85,0x85,0x85,0x85,0x85,0x83,0x82,0xa1,0xe7,0xff,0xff,0xff,0xff,0xcf,0x7f,0x7d,0x7b,0x7b,0xbb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe2,0xe2,0xe2,0xe2,0xf3,0xff,0xff,0xff,0xff,0xf8,0xe6,0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xdb,0xea,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0x86,0x85,0x85,0x85,0x85,0x85,0x83,0x83,0x82,0xb5,0xf6,0xff,0xff,0xff,0xc8,0x8d,0x85,0xad,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xe3,0xe4,0xf2,0xff,0xff,0xff,0xfc,0xec,0xde,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xdb,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0x86,0x86,0x85,0x85,0x85,0x85,0x85,0x83,0x82,0x89,0xcf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xdf,0xde,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xd9,0xea,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0x9e,0x86,0x86,0x85,0x85,0x85,0x85,0x83,0x83,0x82,0xc8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xde,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xd9,0xe0,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x8f,0x87,0x86,0x85,0x85,0x85,0x83,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xd9,0xdc,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xbd,0x8a,0x87,0x86,0x85,0x85,0x85,0x86,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xdb,0xdb,0xdb,0xdb,0xd9,0xdb,0xea,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xa9,0x87,0x87,0x86,0x85,0xb7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xdb,0xdb,0xd9,0xd9,0xe4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xb2,0xa5,0xc5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xe3,0xe7,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd1,0xae,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xb0,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe3,0xde,0xde,0xde,0xde,0xdc,0xdc,0xdc,0xdc,0xdc,0xe0,0xee,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xcb,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8d,0x8d,0x8d,0x8d,0x8d,0xd1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xeb,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0x98,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x9e,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xd4,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd8,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0x9c,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0xa2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd7,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xcb,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x91,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xea,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd3,0xb0,0xa6,0xa6,0xa6,0xa6,0xa6,0xa8,0xa8,0xa8,0xa8,0xb2,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xde,0xd9,0xd9,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xde,0xec,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xbd,0xb2,0xcf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe6,0xd8,0xdf,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xb4,0x98,0x98,0x98,0x99,0xc4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xc9,0xc9,0xcb,0xcb,0xd9,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xc5,0x9a,0x98,0x98,0x98,0x9a,0x9a,0x9c,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xc8,0xc8,0xc8,0xc9,0xcb,0xcc,0xe3,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x9e,0x98,0x98,0x98,0x99,0x9a,0x9c,0x9c,0xa2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0xc7,0xc8,0xc8,0xc8,0xc9,0xcb,0xcb,0xcf,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xac,0x98,0x98,0x98,0x99,0x9a,0x9c,0x9c,0x9c,0x9c,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xc5,0xc5,0xc8,0xc8,0xc8,0xc8,0xc9,0xc9,0xcb,0xd4,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0x98,0x98,0x99,0x99,0x9a,0x9c,0x9c,0x9c,0x9c,0xa2,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xc9,0xc5,0xc7,0xc8,0xc8,0xc8,0xc8,0xc9,0xc9,0xcb,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x98,0x99,0x99,0x9a,0x9a,0x9c,0x9c,0x9c,0x9c,0xc3,0xf8,0xff,0xff,0xff,0xd8,0xae,0xaa,0xc8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xd7,0xc3,0xc7,0xe6,0xff,0xff,0xff,0xfb,0xdb,0xc5,0xc5,0xc8,0xc8,0xc8,0xc8,0xc8,0xc9,0xc9,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc4,0x99,0x9a,0x9a,0x9c,0x9c,0x9c,0x9c,0xb4,0xec,0xff,0xff,0xff,0xff,0xdc,0xa2,0xa4,0xa5,0xa6,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xbc,0xbd,0xbf,0xc0,0xe8,0xff,0xff,0xff,0xff,0xf4,0xd3,0xc5,0xc7,0xc8,0xc8,0xc8,0xc8,0xc8,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0x9a,0x9c,0x9c,0x9c,0x9c,0xa8,0xdc,0xff,0xff,0xff,0xff,0xff,0xf8,0xa9,0xa4,0xa5,0xa6,0xa6,0xbc,0xff,0xff,0xff,0xdf,0xbf,0xc0,0xe2,0xff,0xff,0xff,0xc9,0xbb,0xbc,0xbf,0xbf,0xc4,0xfb,0xff,0xff,0xff,0xff,0xff,0xeb,0xcc,0xc7,0xc7,0xc8,0xc8,0xc8,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xb9,0x9c,0x9c,0xa0,0xcc,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xc8,0xa4,0xa4,0xa5,0xa6,0xa8,0xc4,0xff,0xff,0xe4,0xae,0xb1,0xb1,0xb4,0xe7,0xff,0xff,0xcf,0xbb,0xbc,0xbd,0xbf,0xc0,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe2,0xc8,0xc7,0xc8,0xd8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xdf,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xa4,0xa4,0xa5,0xa6,0xa8,0xa9,0xe7,0xff,0xff,0xc3,0xae,0xb1,0xb1,0xb4,0xcb,0xff,0xff,0xeb,0xbb,0xbb,0xbd,0xbd,0xbf,0xc0,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xec,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0xa4,0xa5,0xa5,0xa6,0xa8,0xc3,0xff,0xff,0xff,0xbc,0xae,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xcd,0xbb,0xbc,0xbd,0xbf,0xbf,0xcf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xa4,0xa4,0xa5,0xa6,0xa8,0xaa,0xf3,0xff,0xff,0xff,0xbc,0xae,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xf6,0xbc,0xbb,0xbc,0xbd,0xbf,0xc0,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xad,0xa4,0xa5,0xa6,0xa6,0xa8,0xd0,0xff,0xff,0xff,0xff,0xbc,0xae,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xd8,0xbb,0xbc,0xbd,0xbd,0xbf,0xc7,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xa4,0xa5,0xa5,0xa6,0xa8,0xae,0xf8,0xff,0xff,0xff,0xff,0xbc,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xfa,0xbf,0xbb,0xbd,0xbd,0xbd,0xbf,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xa8,0xa5,0xa5,0xa6,0xa6,0xa8,0xdc,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xff,0xe3,0xbb,0xbc,0xbd,0xbf,0xbf,0xc1,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xa4,0xa5,0xa5,0xa6,0xa8,0xb9,0xfe,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb2,0xc1,0xff,0xff,0xff,0xff,0xff,0xfe,0xc7,0xbb,0xbc,0xbd,0xbf,0xbf,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xa5,0xa5,0xa6,0xa6,0xa8,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb2,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xbb,0xbc,0xbd,0xbd,0xbf,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc0,0xa6,0xa6,0xa6,0xd5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb2,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xbb,0xbd,0xbd,0xd0,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xde,0xcf,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb2,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xdb,0xe7,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc4,0xb0,0xb1,0xb1,0xb2,0xc7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xb1,0xb1,0xb1,0xb2,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xb9,0xbb,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbc,0x86,0x86,0xbc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0x76,0x76,0x76,0x75,0xc7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x99,0x76,0x76,0x76,0x75,0x98,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xd0,0xb8,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xef,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xa2,0x7b,0x7b,0x7b,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe2,0xe2,0xe2,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x7d,0x7b,0x7b,0x7b,0x7b,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xe3,0xe2,0xe2,0xe2,0xe2,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x7d,0x7b,0x7b,0x7b,0x7b,0x94,0xfe,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xe3,0xe2,0xe2,0xe2,0xe2,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0x82,0x7d,0x7b,0x7b,0x7b,0x7b,0xc8,0xff,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xff,0xf3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbb,0x7d,0x7d,0x7b,0x7b,0x7b,0x85,0xfa,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xfe,0xe4,0xe3,0xe2,0xe2,0xe2,0xe2,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0x8b,0x7d,0x7b,0x7b,0x7b,0x7b,0xb8,0xff,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xff,0xf0,0xe3,0xe2,0xe2,0xe2,0xe2,0xe4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x7e,0x7d,0x7b,0x7b,0x7b,0x7d,0xec,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xfb,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x9d,0x7e,0x7d,0x7b,0x7b,0x7b,0xa1,0xff,0xff,0xff,0x8e,0x76,0x76,0x76,0x75,0x8d,0xff,0xff,0xff,0xec,0xe3,0xe2,0xe2,0xe2,0xe2,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xd7,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0x81,0x7d,0x7b,0x7b,0x7b,0x7a,0xd9,0xff,0xff,0x99,0x76,0x76,0x76,0x75,0x9d,0xff,0xff,0xfa,0xe3,0xe3,0xe2,0xe2,0xe2,0xe0,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xa0,0x83,0x83,0x86,0xc1,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xb4,0x7e,0x7d,0x7b,0x7b,0x7a,0xa2,0xff,0xff,0xd1,0x76,0x76,0x76,0x75,0xcf,0xff,0xff,0xef,0xe3,0xe2,0xe2,0xe2,0xe2,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xef,0xde,0xdc,0xdc,0xe4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0x85,0x85,0x85,0x83,0x83,0x91,0xd4,0xff,0xff,0xff,0xff,0xff,0xf6,0x87,0x7e,0x7d,0x7b,0x7b,0x95,0xff,0xff,0xff,0xc7,0x8b,0x8b,0xc5,0xff,0xff,0xff,0xeb,0xe3,0xe2,0xe2,0xe2,0xe3,0xfc,0xff,0xff,0xff,0xff,0xff,0xf4,0xe2,0xdc,0xdc,0xdc,0xdc,0xdb,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbb,0x85,0x85,0x85,0x85,0x83,0x83,0x82,0xa1,0xe7,0xff,0xff,0xff,0xff,0xcf,0x7f,0x7e,0x7b,0x7b,0xbb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe2,0xe2,0xe2,0xe0,0xf3,0xff,0xff,0xff,0xff,0xf8,0xe6,0xdc,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xea,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0x86,0x85,0x85,0x85,0x85,0x85,0x83,0x82,0x82,0xb5,0xf6,0xff,0xff,0xff,0xc8,0x8d,0x85,0xad,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xe4,0xe4,0xf2,0xff,0xff,0xff,0xfc,0xec,0xde,0xdc,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0x86,0x86,0x85,0x85,0x85,0x85,0x85,0x83,0x82,0x89,0xcf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xdf,0xde,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xd9,0xea,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0x9e,0x86,0x86,0x85,0x85,0x85,0x85,0x83,0x82,0x82,0xc8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xde,0xdc,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xd9,0xe0,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x8f,0x86,0x86,0x85,0x85,0x85,0x85,0x83,0x8b,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xdc,0xdc,0xdb,0xdb,0xdb,0xdb,0xd9,0xdc,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xbd,0x8a,0x87,0x86,0x85,0x85,0x85,0x85,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xdb,0xdb,0xdb,0xd9,0xd9,0xdb,0xea,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xa9,0x87,0x86,0x86,0x85,0xb7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xdb,0xd9,0xd9,0xd9,0xe4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xb2,0xa5,0xc5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xe3,0xe7,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd1,0xae,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xb0,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe3,0xde,0xde,0xde,0xde,0xde,0xdc,0xdc,0xdc,0xdc,0xe0,0xee,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xcb,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8d,0x8e,0x8d,0x8d,0x8d,0x8d,0x8d,0xd1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xd4,0xeb,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0x98,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0x9e,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xd4,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd3,0xd8,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0x9c,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0xa2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd7,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xcb,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0x91,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xea,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd3,0xb0,0xa6,0xa6,0xa6,0xa6,0xa8,0xa8,0xa8,0xa8,0xa8,0xb2,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xde,0xd9,0xd9,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xde,0xec,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xbd,0xb2,0xcf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe6,0xd8,0xdf,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xb4,0x98,0x98,0x98,0x99,0xc4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xc8,0xc9,0xcb,0xcb,0xd9,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xc5,0x9a,0x98,0x98,0x99,0x9a,0x9c,0x9c,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc8,0xc8,0xc8,0xc9,0xcb,0xcb,0xcc,0xe3,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0xa0,0x98,0x98,0x99,0x99,0x9a,0x9c,0x9c,0xa2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0xc7,0xc8,0xc8,0xc8,0xc9,0xcb,0xcb,0xcf,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xac,0x98,0x98,0x99,0x99,0x9a,0x9c,0x9c,0x9c,0x9c,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xc5,0xc7,0xc8,0xc8,0xc8,0xc8,0xc9,0xc9,0xcb,0xd5,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0x98,0x98,0x99,0x99,0x9a,0x9c,0x9c,0x9c,0x9c,0xa2,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xc9,0xc5,0xc7,0xc8,0xc8,0xc8,0xc8,0xc9,0xcb,0xcb,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x98,0x99,0x99,0x9a,0x9c,0x9c,0x9c,0x9c,0x9c,0xc3,0xf8,0xff,0xff,0xff,0xd8,0xae,0xac,0xc8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xd7,0xc4,0xc7,0xe6,0xff,0xff,0xff,0xfb,0xdc,0xc5,0xc7,0xc8,0xc8,0xc8,0xc8,0xc8,0xc9,0xcb,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc4,0x99,0x9a,0x9a,0x9c,0x9c,0x9c,0x9c,0xb4,0xec,0xff,0xff,0xff,0xff,0xdc,0xa4,0xa4,0xa5,0xa6,0xd4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xbc,0xbd,0xbf,0xc0,0xe8,0xff,0xff,0xff,0xff,0xf4,0xd3,0xc7,0xc7,0xc8,0xc8,0xc8,0xc8,0xc9,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0x9a,0x9c,0x9c,0x9c,0x9c,0xa8,0xdc,0xff,0xff,0xff,0xff,0xff,0xf8,0xa9,0xa4,0xa5,0xa6,0xa8,0xbc,0xff,0xff,0xff,0xdf,0xbf,0xc0,0xe2,0xff,0xff,0xff,0xc9,0xbc,0xbd,0xbf,0xc0,0xc4,0xfb,0xff,0xff,0xff,0xff,0xff,0xeb,0xcc,0xc7,0xc7,0xc8,0xc8,0xc8,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xb9,0x9c,0x9c,0xa0,0xcc,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0xa4,0xa5,0xa5,0xa6,0xa8,0xc4,0xff,0xff,0xe4,0xae,0xb1,0xb2,0xb5,0xe7,0xff,0xff,0xd0,0xbb,0xbc,0xbd,0xbf,0xc0,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe2,0xc8,0xc7,0xc8,0xd8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xdf,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xa5,0xa4,0xa5,0xa6,0xa8,0xa9,0xe7,0xff,0xff,0xc3,0xae,0xb1,0xb1,0xb5,0xcb,0xff,0xff,0xeb,0xbb,0xbc,0xbd,0xbd,0xbf,0xc0,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xec,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0xa4,0xa5,0xa6,0xa6,0xa8,0xc3,0xff,0xff,0xff,0xbc,0xae,0xb1,0xb2,0xb5,0xc3,0xff,0xff,0xff,0xcf,0xbb,0xbc,0xbd,0xbf,0xc0,0xcf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xa4,0xa5,0xa5,0xa6,0xa8,0xac,0xf3,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb2,0xb4,0xc1,0xff,0xff,0xff,0xf6,0xbc,0xbc,0xbd,0xbd,0xbf,0xc0,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xad,0xa4,0xa5,0xa6,0xa6,0xa8,0xd0,0xff,0xff,0xff,0xff,0xbd,0xae,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xd8,0xbb,0xbc,0xbd,0xbf,0xbf,0xc7,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xa4,0xa5,0xa5,0xa6,0xa8,0xb0,0xf8,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xfa,0xbf,0xbb,0xbd,0xbd,0xbf,0xc0,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xa8,0xa5,0xa5,0xa6,0xa6,0xa8,0xdc,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xff,0xe3,0xbb,0xbc,0xbd,0xbd,0xbf,0xc3,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xa4,0xa5,0xa6,0xa6,0xa8,0xb9,0xfe,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xff,0xfe,0xc7,0xbb,0xbd,0xbd,0xbf,0xbf,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xa5,0xa5,0xa6,0xa8,0xa8,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xbb,0xbc,0xbd,0xbd,0xbf,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc0,0xa6,0xa6,0xa8,0xd5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xbb,0xbd,0xbd,0xd1,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xde,0xcf,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0xb0,0xb1,0xb1,0xb4,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xdb,0xe7,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc4,0xb1,0xb1,0xb1,0xb4,0xc8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xb1,0xb1,0xb1,0xb2,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xbb,0xbb,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0x8b,0x8b,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x7d,0x7b,0x7b,0x7b,0xc9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xa0,0x7d,0x7b,0x7b,0x7b,0x9c,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xd3,0xbc,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xb4,0xcc,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xa9,0x85,0x85,0x83,0xc5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0x76,0x76,0x76,0x9e,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0x85,0x85,0x85,0x83,0x82,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x76,0x76,0x76,0x76,0x76,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0x85,0x85,0x85,0x85,0x83,0x9a,0xfe,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xfe,0x91,0x76,0x76,0x76,0x76,0x75,0xd4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0x8a,0x85,0x85,0x85,0x83,0x82,0xcb,0xff,0xff,0xff,0xff,0xff,0x96,0x7e,0x7d,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xc7,0x76,0x76,0x76,0x76,0x75,0x79,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x86,0x85,0x85,0x85,0x83,0x8d,0xfa,0xff,0xff,0xff,0xff,0x95,0x7e,0x7d,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xf8,0x81,0x76,0x76,0x76,0x76,0x75,0xb5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0x94,0x86,0x85,0x85,0x85,0x82,0xbc,0xff,0xff,0xff,0xff,0x96,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xb7,0x76,0x76,0x76,0x76,0x76,0x83,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0x86,0x85,0x85,0x85,0x83,0x85,0xee,0xff,0xff,0xff,0x96,0x7e,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xec,0x79,0x76,0x76,0x76,0x76,0x75,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xa4,0x86,0x85,0x85,0x85,0x82,0xa6,0xff,0xff,0xff,0x96,0x7e,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xa0,0x76,0x76,0x76,0x76,0x75,0x94,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xdb,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0x87,0x86,0x85,0x85,0x83,0x82,0xdb,0xff,0xff,0xa0,0x7e,0x7d,0x7b,0x7b,0xa1,0xff,0xff,0xd8,0x76,0x76,0x76,0x76,0x76,0x75,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf6,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xa8,0x8e,0x8d,0x91,0xc7,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xb9,0x86,0x85,0x85,0x85,0x82,0xa8,0xff,0xff,0xd3,0x7e,0x7d,0x7b,0x7b,0xd1,0xff,0xff,0xa0,0x76,0x76,0x76,0x76,0x75,0xad,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf2,0xe4,0xe2,0xe2,0xe8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0x8e,0x8e,0x8e,0x8e,0x8d,0x9a,0xd8,0xff,0xff,0xff,0xff,0xff,0xf7,0x8f,0x86,0x85,0x85,0x83,0x9a,0xff,0xff,0xff,0xc9,0x91,0x91,0xc8,0xff,0xff,0xff,0x94,0x76,0x76,0x75,0x76,0x7e,0xf4,0xff,0xff,0xff,0xff,0xff,0xf6,0xe6,0xe3,0xe3,0xe2,0xe2,0xe2,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x8f,0x8f,0x8e,0x8e,0x8e,0x8e,0x8d,0xa9,0xea,0xff,0xff,0xff,0xff,0xd3,0x87,0x85,0x85,0x85,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x76,0x76,0x75,0x75,0xcb,0xff,0xff,0xff,0xff,0xfb,0xea,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0x8f,0x8f,0x8f,0x8f,0x8e,0x8e,0x8d,0x8d,0x8d,0xbc,0xf7,0xff,0xff,0xff,0xcc,0x95,0x8d,0xb2,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xa9,0x7d,0x83,0xc4,0xff,0xff,0xff,0xfc,0xef,0xe3,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc3,0x8f,0x8f,0x8f,0x8f,0x8f,0x8e,0x8e,0x8d,0x8d,0x94,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xe7,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xa5,0x91,0x8f,0x8f,0x8f,0x8e,0x8e,0x8e,0x8d,0x8d,0xcc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe3,0xe2,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe7,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0x99,0x8f,0x8f,0x8f,0x8f,0x8e,0x8e,0x8d,0x95,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe3,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xc3,0x94,0x8f,0x8f,0x8f,0x8f,0x8e,0x8f,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe0,0xee,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xb0,0x91,0x8f,0x8f,0x8f,0xbd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xe2,0xe2,0xe2,0xe2,0xe8,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xbb,0xad,0xcb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe8,0xeb,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd5,0xb7,0xad,0xac,0xac,0xac,0xac,0xac,0xac,0xac,0xac,0xb7,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xe8,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe7,0xf2,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd0,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0xd5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xde,0xde,0xde,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xef,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xa2,0x9a,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x98,0x98,0x99,0xa6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xdb,0xe0,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xa5,0x9c,0x9c,0x9a,0x9a,0x9c,0x9c,0x9a,0x9a,0x9c,0x9a,0x9a,0x9c,0x9a,0xaa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdf,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd0,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xdb,0xd9,0xd9,0xdb,0xdb,0xdb,0xd9,0xd9,0xdb,0xdb,0xdb,0xdb,0xdb,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd7,0xb8,0xb0,0xb0,0xb0,0xb0,0xb0,0xb0,0xb0,0xb0,0xb1,0xbb,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe4,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe3,0xef,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xc4,0xb9,0xd4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xea,0xe0,0xe6,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbc,0xa2,0xa4,0xa4,0xa4,0xc9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xd4,0xd4,0xd4,0xd4,0xe0,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xcc,0xa5,0xa4,0xa4,0xa4,0xa5,0xa6,0xa8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0xd1,0xd3,0xd4,0xd4,0xd4,0xd4,0xe8,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xa9,0xa4,0xa4,0xa4,0xa5,0xa5,0xa6,0xa6,0xad,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0xd1,0xd1,0xd3,0xd3,0xd4,0xd4,0xd4,0xd8,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xb5,0xa4,0xa4,0xa4,0xa5,0xa5,0xa6,0xa6,0xa8,0xa8,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xd1,0xd1,0xd1,0xd1,0xd3,0xd4,0xd4,0xd4,0xd4,0xdc,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0xa4,0xa4,0xa4,0xa5,0xa5,0xa5,0xa6,0xa8,0xa8,0xae,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xd4,0xd1,0xd1,0xd1,0xd1,0xd3,0xd3,0xd4,0xd4,0xd4,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xa4,0xa5,0xa5,0xa5,0xa6,0xa6,0xa6,0xa8,0xa8,0xcb,0xf8,0xff,0xff,0xff,0xde,0xb8,0xb5,0xcf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xde,0xcd,0xd1,0xea,0xff,0xff,0xff,0xfb,0xe2,0xd1,0xd1,0xd1,0xd1,0xd1,0xd3,0xd3,0xd4,0xd4,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcb,0xa5,0xa5,0xa5,0xa5,0xa6,0xa8,0xa8,0xbd,0xef,0xff,0xff,0xff,0xff,0xe0,0xae,0xae,0xb0,0xb1,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xc8,0xc8,0xcb,0xcb,0xec,0xff,0xff,0xff,0xff,0xf6,0xdc,0xd1,0xd1,0xd1,0xd1,0xd3,0xd3,0xd3,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xa5,0xa6,0xa6,0xa6,0xa8,0xb2,0xe0,0xff,0xff,0xff,0xff,0xff,0xfa,0xb4,0xae,0xb0,0xb1,0xb2,0xc4,0xff,0xff,0xff,0xe3,0xc8,0xc9,0xe6,0xff,0xff,0xff,0xd1,0xc7,0xc8,0xc8,0xcb,0xcf,0xfb,0xff,0xff,0xff,0xff,0xff,0xef,0xd7,0xd1,0xd1,0xd1,0xd1,0xd1,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xc1,0xa6,0xa8,0xaa,0xd1,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0xae,0xb0,0xb1,0xb1,0xb5,0xcc,0xff,0xff,0xe8,0xbb,0xbc,0xbd,0xbf,0xeb,0xff,0xff,0xd7,0xc5,0xc8,0xc8,0xc9,0xcb,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe7,0xd3,0xd1,0xd1,0xdf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xe3,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xb0,0xae,0xb1,0xb1,0xb2,0xb5,0xea,0xff,0xff,0xcb,0xbb,0xbc,0xbd,0xc0,0xd3,0xff,0xff,0xee,0xc5,0xc7,0xc8,0xc8,0xc9,0xcc,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf0,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc1,0xae,0xb1,0xb1,0xb1,0xb4,0xcb,0xff,0xff,0xff,0xc5,0xbb,0xbc,0xbf,0xbf,0xcb,0xff,0xff,0xff,0xd5,0xc5,0xc8,0xc8,0xc9,0xc9,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xae,0xb0,0xb1,0xb1,0xb4,0xb7,0xf4,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xf7,0xc7,0xc7,0xc8,0xc8,0xc9,0xcb,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xb8,0xae,0xb1,0xb1,0xb2,0xb4,0xd5,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbf,0xbf,0xcb,0xff,0xff,0xff,0xff,0xdf,0xc5,0xc8,0xc8,0xc8,0xc9,0xd0,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xae,0xb1,0xb1,0xb1,0xb4,0xbb,0xfa,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xfb,0xc9,0xc7,0xc8,0xc8,0xc8,0xcb,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xb2,0xb0,0xb1,0xb1,0xb1,0xb4,0xe0,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbd,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xe7,0xc7,0xc8,0xc8,0xc8,0xc9,0xcc,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xae,0xb0,0xb1,0xb1,0xb2,0xc3,0xfe,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0xc7,0xc8,0xc8,0xc8,0xc9,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xb0,0xb1,0xb1,0xb1,0xb4,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbd,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xc7,0xc8,0xc8,0xc8,0xc8,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc8,0xb1,0xb1,0xb2,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xc7,0xc8,0xc8,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xe2,0xd5,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe2,0xeb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0xbc,0xbd,0xbd,0xbf,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xbb,0xbd,0xbd,0xbf,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xc4,0xc4,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0x8b,0x8b,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x7d,0x7b,0x7b,0x7b,0xc9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xa0,0x7d,0x7b,0x7b,0x7b,0x9c,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xd3,0xbc,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xb4,0xcc,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xa9,0x85,0x85,0x83,0xc5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0x76,0x76,0x76,0x9e,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0x85,0x85,0x85,0x83,0x82,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0x76,0x76,0x76,0x76,0x76,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0x85,0x85,0x85,0x85,0x83,0x9a,0xfe,0xff,0xff,0xff,0xff,0xff,0x95,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xfe,0x91,0x76,0x76,0x76,0x76,0x75,0xd4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0x8a,0x85,0x85,0x85,0x83,0x82,0xcb,0xff,0xff,0xff,0xff,0xff,0x96,0x7e,0x7d,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xff,0xc7,0x76,0x76,0x76,0x76,0x75,0x79,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x86,0x85,0x85,0x85,0x83,0x8d,0xfa,0xff,0xff,0xff,0xff,0x95,0x7e,0x7d,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xf8,0x81,0x76,0x76,0x76,0x76,0x75,0xb5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0x94,0x86,0x85,0x85,0x85,0x82,0xbc,0xff,0xff,0xff,0xff,0x96,0x7d,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xff,0xb7,0x76,0x76,0x76,0x76,0x76,0x83,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0x86,0x85,0x85,0x85,0x83,0x85,0xee,0xff,0xff,0xff,0x96,0x7e,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xec,0x79,0x76,0x76,0x76,0x76,0x75,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xa4,0x86,0x85,0x85,0x85,0x82,0xa6,0xff,0xff,0xff,0x96,0x7e,0x7b,0x7b,0x7b,0x92,0xff,0xff,0xff,0xa0,0x76,0x76,0x76,0x76,0x75,0x94,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xdb,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0x87,0x86,0x85,0x85,0x83,0x82,0xdb,0xff,0xff,0xa0,0x7e,0x7d,0x7b,0x7b,0xa1,0xff,0xff,0xd8,0x76,0x76,0x76,0x76,0x76,0x75,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf6,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xa8,0x8e,0x8d,0x91,0xc7,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xb9,0x86,0x85,0x85,0x85,0x82,0xa8,0xff,0xff,0xd3,0x7e,0x7d,0x7b,0x7b,0xd1,0xff,0xff,0xa0,0x76,0x76,0x76,0x76,0x75,0xad,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf2,0xe4,0xe2,0xe2,0xe8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcf,0x8e,0x8e,0x8e,0x8e,0x8d,0x9a,0xd8,0xff,0xff,0xff,0xff,0xff,0xf7,0x8f,0x86,0x85,0x85,0x83,0x9a,0xff,0xff,0xff,0xc9,0x91,0x91,0xc8,0xff,0xff,0xff,0x94,0x76,0x76,0x75,0x76,0x7e,0xf4,0xff,0xff,0xff,0xff,0xff,0xf6,0xe6,0xe3,0xe3,0xe2,0xe2,0xe2,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x8f,0x8f,0x8e,0x8e,0x8e,0x8e,0x8d,0xa9,0xea,0xff,0xff,0xff,0xff,0xd3,0x87,0x85,0x85,0x85,0xbf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x76,0x76,0x75,0x75,0xcb,0xff,0xff,0xff,0xff,0xfb,0xea,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0x8f,0x8f,0x8f,0x8f,0x8e,0x8e,0x8d,0x8d,0x8d,0xbc,0xf7,0xff,0xff,0xff,0xcc,0x95,0x8d,0xb2,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xa9,0x7d,0x83,0xc4,0xff,0xff,0xff,0xfc,0xef,0xe3,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc3,0x8f,0x8f,0x8f,0x8f,0x8f,0x8e,0x8e,0x8d,0x8d,0x94,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xe7,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xa5,0x91,0x8f,0x8f,0x8f,0x8e,0x8e,0x8e,0x8d,0x8d,0xcc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe3,0xe2,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe7,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0x99,0x8f,0x8f,0x8f,0x8f,0x8e,0x8e,0x8d,0x95,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe3,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xc3,0x94,0x8f,0x8f,0x8f,0x8f,0x8e,0x8f,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xe2,0xe2,0xe2,0xe2,0xe2,0xe0,0xee,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xb0,0x91,0x8f,0x8f,0x8f,0xbd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xe2,0xe2,0xe2,0xe2,0xe8,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xbb,0xad,0xcb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe8,0xeb,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd5,0xb7,0xad,0xac,0xac,0xac,0xac,0xac,0xac,0xac,0xac,0xb7,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xe8,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe4,0xe7,0xf2,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd0,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0x98,0xd5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xde,0xde,0xde,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xef,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xa2,0x9a,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x99,0x98,0x98,0x99,0xa6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xdb,0xdb,0xe0,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xa5,0x9c,0x9c,0x9a,0x9a,0x9c,0x9c,0x9a,0x9a,0x9c,0x9a,0x9a,0x9c,0x9a,0xaa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xdf,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd0,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0x9c,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xdb,0xd9,0xd9,0xdb,0xdb,0xdb,0xd9,0xd9,0xdb,0xdb,0xdb,0xdb,0xdb,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xd7,0xb8,0xb0,0xb0,0xb0,0xb0,0xb0,0xb0,0xb0,0xb0,0xb1,0xbb,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe4,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe3,0xef,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xc4,0xb9,0xd4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xea,0xe0,0xe6,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xbc,0xa2,0xa4,0xa4,0xa4,0xc9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xd4,0xd4,0xd4,0xd4,0xe0,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xcc,0xa5,0xa4,0xa4,0xa4,0xa5,0xa6,0xa8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0xd1,0xd3,0xd4,0xd4,0xd4,0xd4,0xe8,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xa9,0xa4,0xa4,0xa4,0xa5,0xa5,0xa6,0xa6,0xad,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0xd1,0xd1,0xd3,0xd3,0xd4,0xd4,0xd4,0xd8,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xb5,0xa4,0xa4,0xa4,0xa5,0xa5,0xa6,0xa6,0xa8,0xa8,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xd1,0xd1,0xd1,0xd1,0xd3,0xd4,0xd4,0xd4,0xd4,0xdc,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0xa4,0xa4,0xa4,0xa5,0xa5,0xa5,0xa6,0xa8,0xa8,0xae,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xd4,0xd1,0xd1,0xd1,0xd1,0xd3,0xd3,0xd4,0xd4,0xd4,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xa4,0xa5,0xa5,0xa5,0xa6,0xa6,0xa6,0xa8,0xa8,0xcb,0xf8,0xff,0xff,0xff,0xde,0xb8,0xb5,0xcf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xde,0xcd,0xd1,0xea,0xff,0xff,0xff,0xfb,0xe2,0xd1,0xd1,0xd1,0xd1,0xd1,0xd3,0xd3,0xd4,0xd4,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcb,0xa5,0xa5,0xa5,0xa5,0xa6,0xa8,0xa8,0xbd,0xef,0xff,0xff,0xff,0xff,0xe0,0xae,0xae,0xb0,0xb1,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xc8,0xc8,0xcb,0xcb,0xec,0xff,0xff,0xff,0xff,0xf6,0xdc,0xd1,0xd1,0xd1,0xd1,0xd3,0xd3,0xd3,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xa5,0xa6,0xa6,0xa6,0xa8,0xb2,0xe0,0xff,0xff,0xff,0xff,0xff,0xfa,0xb4,0xae,0xb0,0xb1,0xb2,0xc4,0xff,0xff,0xff,0xe3,0xc8,0xc9,0xe6,0xff,0xff,0xff,0xd1,0xc7,0xc8,0xc8,0xcb,0xcf,0xfb,0xff,0xff,0xff,0xff,0xff,0xef,0xd7,0xd1,0xd1,0xd1,0xd1,0xd1,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xc1,0xa6,0xa8,0xaa,0xd1,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0xae,0xb0,0xb1,0xb1,0xb5,0xcc,0xff,0xff,0xe8,0xbb,0xbc,0xbd,0xbf,0xeb,0xff,0xff,0xd7,0xc5,0xc8,0xc8,0xc9,0xcb,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe7,0xd3,0xd1,0xd1,0xdf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xe3,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xb0,0xae,0xb1,0xb1,0xb2,0xb5,0xea,0xff,0xff,0xcb,0xbb,0xbc,0xbd,0xc0,0xd3,0xff,0xff,0xee,0xc5,0xc7,0xc8,0xc8,0xc9,0xcc,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xf0,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc1,0xae,0xb1,0xb1,0xb1,0xb4,0xcb,0xff,0xff,0xff,0xc5,0xbb,0xbc,0xbf,0xbf,0xcb,0xff,0xff,0xff,0xd5,0xc5,0xc8,0xc8,0xc9,0xc9,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xae,0xb0,0xb1,0xb1,0xb4,0xb7,0xf4,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xf7,0xc7,0xc7,0xc8,0xc8,0xc9,0xcb,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xb8,0xae,0xb1,0xb1,0xb2,0xb4,0xd5,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbf,0xbf,0xcb,0xff,0xff,0xff,0xff,0xdf,0xc5,0xc8,0xc8,0xc8,0xc9,0xd0,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xae,0xb1,0xb1,0xb1,0xb4,0xbb,0xfa,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xfb,0xc9,0xc7,0xc8,0xc8,0xc8,0xcb,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xb2,0xb0,0xb1,0xb1,0xb1,0xb4,0xe0,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbd,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xe7,0xc7,0xc8,0xc8,0xc8,0xc9,0xcc,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xae,0xb0,0xb1,0xb1,0xb2,0xc3,0xfe,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0xc7,0xc8,0xc8,0xc8,0xc9,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xb0,0xb1,0xb1,0xb1,0xb4,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbd,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xc7,0xc8,0xc8,0xc8,0xc8,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc8,0xb1,0xb1,0xb2,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xc7,0xc8,0xc8,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xe2,0xd5,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc7,0xbb,0xbc,0xbd,0xbf,0xcb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xe2,0xeb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0xbc,0xbd,0xbd,0xbf,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xbb,0xbd,0xbd,0xbf,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xc4,0xc4,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0x99,0x91,0xbf,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0x86,0x85,0x85,0x83,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x86,0x85,0x85,0x83,0x94,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xc0,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xb7,0xcc,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0x8f,0x8e,0x8e,0xbb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb1,0x85,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0x7e,0x7d,0x7b,0x91,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0x8f,0x8f,0x8f,0x8e,0x8d,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x85,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0x81,0x7d,0x7b,0x7b,0x7b,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0x8f,0x8f,0x8f,0x8e,0x8e,0x99,0xfa,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xac,0x7e,0x7d,0x7b,0x7b,0x7b,0xc5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0x9d,0x8f,0x8f,0x8f,0x8e,0x8e,0xc3,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xe2,0x7e,0x7d,0x7b,0x7b,0x7b,0x7b,0xea,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x91,0x8f,0x8f,0x8e,0x8e,0x8f,0xef,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x89,0xff,0xff,0xff,0xff,0xfe,0x98,0x7e,0x7b,0x7b,0x7b,0x7b,0xa9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xa5,0x8f,0x8f,0x8f,0x8e,0x8d,0xb2,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x89,0xff,0xff,0xff,0xff,0xcb,0x7e,0x7d,0x7b,0x7b,0x7b,0x7f,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0x91,0x8f,0x8f,0x8e,0x8d,0x8d,0xe4,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xf6,0x87,0x7e,0x7b,0x7b,0x7b,0x7b,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x8f,0x8f,0x8f,0x8e,0x8d,0xa2,0xfe,0xff,0xff,0xb1,0x87,0x85,0x85,0x83,0x89,0xff,0xff,0xff,0xb4,0x7e,0x7d,0x7b,0x7b,0x7b,0x8d,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xe3,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0x96,0x8f,0x8f,0x8f,0x8e,0x8d,0xd0,0xff,0xff,0xbd,0x87,0x85,0x85,0x83,0x95,0xff,0xff,0xe8,0x81,0x7e,0x7b,0x7b,0x7b,0x7b,0xd1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xdf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0x99,0x98,0x9a,0xc5,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0x91,0x8f,0x8f,0x8e,0x8d,0xa2,0xff,0xff,0xe6,0x89,0x85,0x85,0x83,0xc4,0xff,0xff,0xb7,0x7e,0x7d,0x7b,0x7b,0x7b,0x99,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcb,0x83,0x76,0x76,0x8e,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0x9a,0x9a,0x99,0x99,0x98,0xa0,0xd7,0xff,0xff,0xff,0xff,0xff,0xfc,0xa1,0x91,0x8f,0x8f,0x8d,0x96,0xff,0xff,0xff,0xd9,0x9e,0x94,0xbf,0xfc,0xff,0xff,0xaa,0x7e,0x7b,0x7b,0x7b,0x7b,0xe6,0xff,0xff,0xff,0xff,0xff,0xdf,0x91,0x76,0x76,0x76,0x76,0x76,0xb2,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0x9c,0x9a,0x9c,0x99,0x99,0x98,0x98,0xaa,0xe6,0xff,0xff,0xff,0xff,0xe2,0x92,0x8f,0x8f,0x8e,0xb5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0x7d,0x7b,0x7b,0x7a,0xb7,0xff,0xff,0xff,0xff,0xef,0xa6,0x76,0x76,0x76,0x76,0x76,0x76,0x76,0xa0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0x9c,0x9c,0x9c,0x9c,0x9a,0x99,0x98,0x98,0x98,0xbc,0xf3,0xff,0xff,0xff,0xdc,0xa1,0x94,0xb4,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xb7,0x87,0x86,0xb8,0xff,0xff,0xff,0xf8,0xb8,0x7b,0x76,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0x9c,0x9c,0x9c,0x9c,0x9c,0x9a,0x99,0x98,0x98,0x9a,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x85,0x76,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0x75,0xa8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xb1,0x9c,0x9c,0x9c,0x9c,0x9a,0x9a,0x99,0x98,0x98,0xc5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0x79,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0x75,0x85,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xa8,0x9c,0x9c,0x9c,0x9a,0x9a,0x9a,0x98,0x99,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x94,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0x7a,0xbc,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xcf,0xa0,0x9c,0x9c,0x9c,0x9c,0x9a,0x99,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0x76,0x76,0x76,0x76,0x75,0x75,0xa5,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xc0,0x9d,0x9c,0x9c,0x9c,0xb8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbc,0x76,0x75,0x75,0x73,0x91,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xc5,0xb1,0xcb,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc1,0x98,0xa0,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xe3,0xc3,0xb7,0xb7,0xb7,0xb7,0xb7,0xb5,0xb4,0xb4,0xb4,0xbf,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xef,0xea,0xea,0xe8,0xe8,0xe8,0xe8,0xe8,0xe8,0xe8,0xeb,0xf3,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xe2,0xa5,0xa5,0xa5,0xa5,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xe3,0xe3,0xe3,0xe2,0xe3,0xe3,0xe3,0xe3,0xe2,0xe3,0xe2,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xb8,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xaa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe3,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xb8,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xdf,0xa6,0xa6,0xa6,0xa8,0xa8,0xa6,0xa8,0xa8,0xa8,0xa8,0xa8,0xa8,0xa8,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xe2,0xc3,0xb9,0xb9,0xb9,0xbb,0xbb,0xbb,0xbb,0xbb,0xbb,0xc0,0xd8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xea,0xe6,0xe6,0xe6,0xe6,0xe6,0xe6,0xe7,0xe6,0xe7,0xe8,0xf0,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xd3,0xc1,0xd8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe7,0xea,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xcd,0xae,0xae,0xae,0xb0,0xc9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xdc,0xdc,0xdc,0xde,0xe6,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xdb,0xb2,0xae,0xae,0xb1,0xb1,0xb1,0xb2,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xdb,0xdc,0xdc,0xdc,0xdc,0xde,0xea,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xbb,0xae,0xb0,0xb0,0xb1,0xb1,0xb1,0xb2,0xb5,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xd9,0xdb,0xdb,0xdb,0xdc,0xdc,0xdc,0xdf,0xf0,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xc4,0xae,0xb0,0xb0,0xb1,0xb1,0xb1,0xb2,0xb4,0xb5,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xd9,0xd9,0xd9,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xe2,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xb0,0xae,0xb0,0xb1,0xb1,0xb1,0xb2,0xb2,0xb4,0xb7,0xd7,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xdc,0xd9,0xdb,0xdb,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xb0,0xb0,0xb1,0xb1,0xb1,0xb1,0xb2,0xb4,0xb5,0xcc,0xf4,0xff,0xff,0xff,0xeb,0xc7,0xc0,0xd5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xd8,0xd8,0xeb,0xff,0xff,0xff,0xfc,0xea,0xdb,0xd9,0xd9,0xdb,0xdb,0xdb,0xdc,0xdc,0xdc,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xb1,0xb1,0xb1,0xb1,0xb2,0xb2,0xb5,0xc1,0xea,0xff,0xff,0xff,0xff,0xec,0xbc,0xbb,0xbc,0xbf,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xd1,0xd1,0xd4,0xd4,0xe8,0xff,0xff,0xff,0xff,0xfa,0xe6,0xd9,0xdb,0xdb,0xdb,0xdb,0xdb,0xdc,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xb1,0xb1,0xb2,0xb2,0xb2,0xb9,0xdf,0xfe,0xff,0xff,0xff,0xff,0xfe,0xc4,0xbb,0xbb,0xbd,0xbf,0xc5,0xff,0xff,0xff,0xef,0xd4,0xd3,0xe7,0xff,0xff,0xff,0xe0,0xd1,0xd1,0xd4,0xd4,0xd5,0xf7,0xff,0xff,0xff,0xff,0xff,0xf4,0xdf,0xd9,0xd9,0xdb,0xdb,0xdb,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xcf,0xb1,0xb2,0xb4,0xd3,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xbb,0xbb,0xbd,0xbf,0xc0,0xcb,0xff,0xff,0xf4,0xc7,0xc8,0xc8,0xcb,0xe7,0xff,0xff,0xe4,0xd1,0xd1,0xd1,0xd3,0xd4,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xdc,0xdb,0xdb,0xe2,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe4,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbd,0xbb,0xbd,0xbd,0xbf,0xc0,0xe6,0xff,0xff,0xdf,0xc5,0xc8,0xc8,0xcb,0xd4,0xff,0xff,0xf7,0xd0,0xd1,0xd1,0xd1,0xd4,0xd4,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf3,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0xbb,0xbc,0xbd,0xbf,0xbf,0xcb,0xfe,0xff,0xff,0xd9,0xc5,0xc7,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xe3,0xd1,0xd1,0xd1,0xd3,0xd4,0xd9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbb,0xbb,0xbd,0xbd,0xbf,0xc0,0xec,0xff,0xff,0xff,0xd9,0xc5,0xc7,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xfb,0xd4,0xd1,0xd1,0xd1,0xd3,0xd4,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcb,0xbb,0xbc,0xbd,0xbf,0xbf,0xd3,0xff,0xff,0xff,0xff,0xd9,0xc5,0xc8,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xff,0xeb,0xd1,0xd1,0xd1,0xd3,0xd4,0xd5,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xbb,0xbc,0xbd,0xbd,0xbf,0xc1,0xf6,0xff,0xff,0xff,0xff,0xd9,0xc5,0xc8,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xff,0xfe,0xd7,0xd1,0xd1,0xd1,0xd3,0xd4,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc3,0xbb,0xbd,0xbd,0xbf,0xbf,0xde,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xff,0xff,0xf2,0xd1,0xd1,0xd1,0xd1,0xd3,0xd4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbb,0xbc,0xbd,0xbd,0xbf,0xc4,0xfb,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xd1,0xd1,0xd1,0xd3,0xd4,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbc,0xbd,0xbd,0xbf,0xbf,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xd1,0xd1,0xd1,0xd1,0xd4,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xbd,0xbf,0xbf,0xd5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xd1,0xd1,0xd1,0xdb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe7,0xd9,0xea,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc8,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe7,0xea,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xc7,0xc8,0xc8,0xc9,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xc7,0xc8,0xc8,0xc8,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xcd,0xcc,0xde,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0x99,0x91,0xbf,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0x86,0x85,0x85,0x83,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x86,0x85,0x85,0x83,0x94,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xc0,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xb7,0xcc,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbf,0x8f,0x8e,0x8e,0xbb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb1,0x85,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0x7e,0x7d,0x7b,0x91,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0x8f,0x8f,0x8f,0x8e,0x8d,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x85,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0x81,0x7d,0x7b,0x7b,0x7b,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0x8f,0x8f,0x8f,0x8e,0x8e,0x99,0xfa,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xff,0xac,0x7e,0x7d,0x7b,0x7b,0x7b,0xc5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0x9d,0x8f,0x8f,0x8f,0x8e,0x8e,0xc3,0xff,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xff,0xff,0xe2,0x7e,0x7d,0x7b,0x7b,0x7b,0x7b,0xea,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x91,0x8f,0x8f,0x8e,0x8e,0x8f,0xef,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x89,0xff,0xff,0xff,0xff,0xfe,0x98,0x7e,0x7b,0x7b,0x7b,0x7b,0xa9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xa5,0x8f,0x8f,0x8f,0x8e,0x8d,0xb2,0xff,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x89,0xff,0xff,0xff,0xff,0xcb,0x7e,0x7d,0x7b,0x7b,0x7b,0x7f,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0x91,0x8f,0x8f,0x8e,0x8d,0x8d,0xe4,0xff,0xff,0xff,0xb1,0x86,0x85,0x85,0x83,0x8a,0xff,0xff,0xff,0xf6,0x87,0x7e,0x7b,0x7b,0x7b,0x7b,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x8f,0x8f,0x8f,0x8e,0x8d,0xa2,0xfe,0xff,0xff,0xb1,0x87,0x85,0x85,0x83,0x89,0xff,0xff,0xff,0xb4,0x7e,0x7d,0x7b,0x7b,0x7b,0x8d,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xe3,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0x96,0x8f,0x8f,0x8f,0x8e,0x8d,0xd0,0xff,0xff,0xbd,0x87,0x85,0x85,0x83,0x95,0xff,0xff,0xe8,0x81,0x7e,0x7b,0x7b,0x7b,0x7b,0xd1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xdf,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbd,0x99,0x98,0x9a,0xc5,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0x91,0x8f,0x8f,0x8e,0x8d,0xa2,0xff,0xff,0xe6,0x89,0x85,0x85,0x83,0xc4,0xff,0xff,0xb7,0x7e,0x7d,0x7b,0x7b,0x7b,0x99,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcb,0x83,0x76,0x76,0x8e,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0x9a,0x9a,0x99,0x99,0x98,0xa0,0xd7,0xff,0xff,0xff,0xff,0xff,0xfc,0xa1,0x91,0x8f,0x8f,0x8d,0x96,0xff,0xff,0xff,0xd9,0x9e,0x94,0xbf,0xfc,0xff,0xff,0xaa,0x7e,0x7b,0x7b,0x7b,0x7b,0xe6,0xff,0xff,0xff,0xff,0xff,0xdf,0x91,0x76,0x76,0x76,0x76,0x76,0xb2,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0x9c,0x9a,0x9c,0x99,0x99,0x98,0x98,0xaa,0xe6,0xff,0xff,0xff,0xff,0xe2,0x92,0x8f,0x8f,0x8e,0xb5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0x7d,0x7b,0x7b,0x7a,0xb7,0xff,0xff,0xff,0xff,0xef,0xa6,0x76,0x76,0x76,0x76,0x76,0x76,0x76,0xa0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0x9c,0x9c,0x9c,0x9c,0x9a,0x99,0x98,0x98,0x98,0xbc,0xf3,0xff,0xff,0xff,0xdc,0xa1,0x94,0xb4,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xb7,0x87,0x86,0xb8,0xff,0xff,0xff,0xf8,0xb8,0x7b,0x76,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0xc1,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0x9c,0x9c,0x9c,0x9c,0x9c,0x9a,0x99,0x98,0x98,0x9a,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd7,0x85,0x76,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0x75,0xa8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xb1,0x9c,0x9c,0x9c,0x9c,0x9a,0x9a,0x99,0x98,0x98,0xc5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0x79,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0x75,0x85,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xa8,0x9c,0x9c,0x9c,0x9a,0x9a,0x9a,0x98,0x99,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x94,0x76,0x76,0x76,0x76,0x76,0x76,0x75,0x7a,0xbc,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfc,0xcf,0xa0,0x9c,0x9c,0x9c,0x9c,0x9a,0x99,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x8b,0x76,0x76,0x76,0x76,0x75,0x75,0xa5,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xc0,0x9d,0x9c,0x9c,0x9c,0xb8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbc,0x76,0x75,0x75,0x73,0x91,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xc5,0xb1,0xcb,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc1,0x98,0xa0,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xe3,0xc3,0xb7,0xb7,0xb7,0xb7,0xb7,0xb5,0xb4,0xb4,0xb4,0xbf,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xef,0xea,0xea,0xe8,0xe8,0xe8,0xe8,0xe8,0xe8,0xe8,0xeb,0xf3,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xe2,0xa5,0xa5,0xa5,0xa5,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xa4,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xe3,0xe3,0xe3,0xe2,0xe3,0xe3,0xe3,0xe3,0xe2,0xe3,0xe2,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xb8,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xa5,0xaa,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe3,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xb8,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa6,0xa9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xdf,0xa6,0xa6,0xa6,0xa8,0xa8,0xa6,0xa8,0xa8,0xa8,0xa8,0xa8,0xa8,0xa8,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xe2,0xc3,0xb9,0xb9,0xb9,0xbb,0xbb,0xbb,0xbb,0xbb,0xbb,0xc0,0xd8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xea,0xe6,0xe6,0xe6,0xe6,0xe6,0xe6,0xe7,0xe6,0xe7,0xe8,0xf0,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xd3,0xc1,0xd8,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe7,0xea,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xcd,0xae,0xae,0xae,0xb0,0xc9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xdc,0xdc,0xdc,0xde,0xe6,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xdb,0xb2,0xae,0xae,0xb1,0xb1,0xb1,0xb2,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe0,0xdb,0xdc,0xdc,0xdc,0xdc,0xde,0xea,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xbb,0xae,0xb0,0xb0,0xb1,0xb1,0xb1,0xb2,0xb5,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe2,0xd9,0xdb,0xdb,0xdb,0xdc,0xdc,0xdc,0xdf,0xf0,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xc4,0xae,0xb0,0xb0,0xb1,0xb1,0xb1,0xb2,0xb4,0xb5,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xd9,0xd9,0xd9,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xe2,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xde,0xb0,0xae,0xb0,0xb1,0xb1,0xb1,0xb2,0xb2,0xb4,0xb7,0xd7,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xdc,0xd9,0xdb,0xdb,0xdb,0xdb,0xdc,0xdc,0xdc,0xdc,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xb0,0xb0,0xb1,0xb1,0xb1,0xb1,0xb2,0xb4,0xb5,0xcc,0xf4,0xff,0xff,0xff,0xeb,0xc7,0xc0,0xd5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xd8,0xd8,0xeb,0xff,0xff,0xff,0xfc,0xea,0xdb,0xd9,0xd9,0xdb,0xdb,0xdb,0xdc,0xdc,0xdc,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0xb1,0xb1,0xb1,0xb1,0xb2,0xb2,0xb5,0xc1,0xea,0xff,0xff,0xff,0xff,0xec,0xbc,0xbb,0xbc,0xbf,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xd1,0xd1,0xd4,0xd4,0xe8,0xff,0xff,0xff,0xff,0xfa,0xe6,0xd9,0xdb,0xdb,0xdb,0xdb,0xdb,0xdc,0xe7,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe6,0xb1,0xb1,0xb2,0xb2,0xb2,0xb9,0xdf,0xfe,0xff,0xff,0xff,0xff,0xfe,0xc4,0xbb,0xbb,0xbd,0xbf,0xc5,0xff,0xff,0xff,0xef,0xd4,0xd3,0xe7,0xff,0xff,0xff,0xe0,0xd1,0xd1,0xd4,0xd4,0xd5,0xf7,0xff,0xff,0xff,0xff,0xff,0xf4,0xdf,0xd9,0xd9,0xdb,0xdb,0xdb,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xcf,0xb1,0xb2,0xb4,0xd3,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xbb,0xbb,0xbd,0xbf,0xc0,0xcb,0xff,0xff,0xf4,0xc7,0xc8,0xc8,0xcb,0xe7,0xff,0xff,0xe4,0xd1,0xd1,0xd1,0xd3,0xd4,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xdc,0xdb,0xdb,0xe2,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe4,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbd,0xbb,0xbd,0xbd,0xbf,0xc0,0xe6,0xff,0xff,0xdf,0xc5,0xc8,0xc8,0xcb,0xd4,0xff,0xff,0xf7,0xd0,0xd1,0xd1,0xd1,0xd4,0xd4,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xf3,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd5,0xbb,0xbc,0xbd,0xbf,0xbf,0xcb,0xfe,0xff,0xff,0xd9,0xc5,0xc7,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xe3,0xd1,0xd1,0xd1,0xd3,0xd4,0xd9,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbb,0xbb,0xbd,0xbd,0xbf,0xc0,0xec,0xff,0xff,0xff,0xd9,0xc5,0xc7,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xfb,0xd4,0xd1,0xd1,0xd1,0xd3,0xd4,0xeb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcb,0xbb,0xbc,0xbd,0xbf,0xbf,0xd3,0xff,0xff,0xff,0xff,0xd9,0xc5,0xc8,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xff,0xeb,0xd1,0xd1,0xd1,0xd3,0xd4,0xd5,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xbb,0xbc,0xbd,0xbd,0xbf,0xc1,0xf6,0xff,0xff,0xff,0xff,0xd9,0xc5,0xc8,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xff,0xfe,0xd7,0xd1,0xd1,0xd1,0xd3,0xd4,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xc3,0xbb,0xbd,0xbd,0xbf,0xbf,0xde,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcf,0xff,0xff,0xff,0xff,0xff,0xf2,0xd1,0xd1,0xd1,0xd1,0xd3,0xd4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbb,0xbc,0xbd,0xbd,0xbf,0xc4,0xfb,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xd1,0xd1,0xd1,0xd3,0xd4,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbc,0xbd,0xbd,0xbf,0xbf,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xd1,0xd1,0xd1,0xd1,0xd4,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xbd,0xbf,0xbf,0xd5,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc9,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xd1,0xd1,0xd1,0xdb,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfe,0xe7,0xd9,0xea,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd9,0xc7,0xc8,0xc8,0xc8,0xcd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe7,0xea,0xfc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xc7,0xc8,0xc8,0xc9,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xc7,0xc8,0xc8,0xc8,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xcd,0xcc,0xde,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, +const unsigned char gGearPict2x[4 * kGearFrames * kGearWidth * kGearHeight] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x86, 0x86, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x76, 0x76, 0x76, 0x75, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x99, 0x76, 0x76, 0x76, 0x75, 0x98, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd0, 0xb8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa2, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe3, 0xe2, 0xe2, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0x94, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x82, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0x85, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe4, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x8d, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0xb8, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7d, 0xec, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xfb, 0xe4, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xec, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x81, 0x7d, 0x7d, 0x7b, 0x7b, 0x7a, 0xd9, 0xff, 0xff, 0x9a, 0x76, 0x76, 0x76, 0x75, 0x9d, 0xff, 0xff, 0xfa, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa0, 0x83, 0x83, 0x86, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7a, 0xa2, 0xff, 0xff, 0xd1, 0x76, 0x76, 0x76, 0x75, 0xd0, 0xff, 0xff, 0xef, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xef, 0xde, 0xdc, 0xdc, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x85, 0x85, 0x85, 0x83, 0x83, 0x91, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7b, 0x7b, 0x7b, 0x95, 0xff, 0xff, 0xff, 0xc7, 0x8b, 0x8b, 0xc5, 0xff, 0xff, 0xff, 0xea, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0xa1, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x7f, 0x7d, 0x7b, 0x7b, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xe2, 0xe2, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe6, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x83, 0x82, 0xb5, 0xf6, 0xff, 0xff, 0xff, 0xc8, 0x8d, 0x85, 0xad, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xe3, 0xe4, 0xf2, 0xff, 0xff, 0xff, 0xfc, 0xec, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x89, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdf, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x9e, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x83, 0x83, 0x82, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xe0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x8f, 0x87, 0x86, 0x85, 0x85, 0x85, 0x83, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xd9, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x8a, 0x87, 0x86, 0x85, 0x85, 0x85, 0x86, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x87, 0x87, 0x86, 0x85, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xdb, 0xdb, 0xd9, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xb2, 0xa5, 0xc5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe3, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xae, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xb0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xde, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe0, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x98, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd8, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x9c, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd7, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x91, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xb0, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xb2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xde, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xde, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xbd, 0xb2, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe6, 0xd8, 0xdf, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb4, 0x98, 0x98, 0x98, 0x99, 0xc4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc9, 0xc9, 0xcb, 0xcb, 0xd9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x98, 0x98, 0x98, 0x9a, 0x9a, 0x9c, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcc, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x9e, 0x98, 0x98, 0x98, 0x99, 0x9a, 0x9c, 0x9c, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xcf, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xac, 0x98, 0x98, 0x98, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xc5, 0xc5, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xcb, 0xd4, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xc9, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xcb, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x98, 0x99, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xc3, 0xf8, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xaa, 0xc8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xc3, 0xc7, 0xe6, 0xff, 0xff, 0xff, 0xfb, 0xdb, 0xc5, 0xc5, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xb4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa2, 0xa4, 0xa5, 0xa6, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xbc, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd3, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa9, 0xa4, 0xa5, 0xa6, 0xa6, 0xbc, 0xff, 0xff, 0xff, 0xdf, 0xbf, 0xc0, 0xe2, 0xff, 0xff, 0xff, 0xc9, 0xbb, 0xbc, 0xbf, 0xbf, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xcc, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xb9, 0x9c, 0x9c, 0xa0, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xc4, 0xff, 0xff, 0xe4, 0xae, 0xb1, 0xb1, 0xb4, 0xe7, 0xff, 0xff, 0xcf, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe2, 0xc8, 0xc7, 0xc8, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xa9, 0xe7, 0xff, 0xff, 0xc3, 0xae, 0xb1, 0xb1, 0xb4, 0xcb, 0xff, 0xff, 0xeb, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xec, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xc3, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xcd, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xaa, 0xf3, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xf6, 0xbc, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xad, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xae, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xbc, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbf, 0xbb, 0xbd, 0xbd, 0xbd, 0xbf, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa8, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xb9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc0, 0xa6, 0xa6, 0xa6, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbb, 0xbd, 0xbd, 0xd0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xde, 0xcf, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb2, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xdb, 0xe7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0xb0, 0xb1, 0xb1, 0xb2, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xb1, 0xb1, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb9, 0xbb, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x86, 0x86, 0xbc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x76, 0x76, 0x76, 0x75, 0xc7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x99, 0x76, 0x76, 0x76, 0x75, 0x98, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd0, 0xb8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa2, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe2, 0xe2, 0xe2, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0x94, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x82, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7d, 0x7d, 0x7b, 0x7b, 0x7b, 0x85, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe4, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x8b, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xb8, 0xff, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7d, 0xec, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xfb, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x9d, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xff, 0x8e, 0x76, 0x76, 0x76, 0x75, 0x8d, 0xff, 0xff, 0xff, 0xec, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xd7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0x81, 0x7d, 0x7b, 0x7b, 0x7b, 0x7a, 0xd9, 0xff, 0xff, 0x99, 0x76, 0x76, 0x76, 0x75, 0x9d, 0xff, 0xff, 0xfa, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe0, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa0, 0x83, 0x83, 0x86, 0xc1, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7a, 0xa2, 0xff, 0xff, 0xd1, 0x76, 0x76, 0x76, 0x75, 0xcf, 0xff, 0xff, 0xef, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xef, 0xde, 0xdc, 0xdc, 0xe4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0x85, 0x85, 0x85, 0x83, 0x83, 0x91, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7d, 0x7b, 0x7b, 0x95, 0xff, 0xff, 0xff, 0xc7, 0x8b, 0x8b, 0xc5, 0xff, 0xff, 0xff, 0xeb, 0xe3, 0xe2, 0xe2, 0xe2, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x85, 0x85, 0x85, 0x85, 0x83, 0x83, 0x82, 0xa1, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x7f, 0x7e, 0x7b, 0x7b, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xe2, 0xe0, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe6, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x82, 0xb5, 0xf6, 0xff, 0xff, 0xff, 0xc8, 0x8d, 0x85, 0xad, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xe4, 0xe4, 0xf2, 0xff, 0xff, 0xff, 0xfc, 0xec, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x89, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdf, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x9e, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x83, 0x82, 0x82, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xe0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x8f, 0x86, 0x86, 0x85, 0x85, 0x85, 0x85, 0x83, 0x8b, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdc, 0xdc, 0xdb, 0xdb, 0xdb, 0xdb, 0xd9, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x8a, 0x87, 0x86, 0x85, 0x85, 0x85, 0x85, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xdb, 0xdb, 0xdb, 0xd9, 0xd9, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x87, 0x86, 0x86, 0x85, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xdb, 0xd9, 0xd9, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xb2, 0xa5, 0xc5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe3, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xae, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xb0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe3, 0xde, 0xde, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xe0, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x8e, 0x8d, 0x8d, 0x8d, 0x8d, 0x8d, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x98, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x9e, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd3, 0xd8, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0x9c, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd7, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcb, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x91, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xb0, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xb2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xde, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xde, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xbd, 0xb2, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe6, 0xd8, 0xdf, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb4, 0x98, 0x98, 0x98, 0x99, 0xc4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc8, 0xc9, 0xcb, 0xcb, 0xd9, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x98, 0x98, 0x99, 0x9a, 0x9c, 0x9c, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xcc, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xa0, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0xa2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xcf, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xac, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xc9, 0xcb, 0xd5, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x98, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa2, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xc9, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xcb, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x98, 0x99, 0x99, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0xc3, 0xf8, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xac, 0xc8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xc4, 0xc7, 0xe6, 0xff, 0xff, 0xff, 0xfb, 0xdc, 0xc5, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xcb, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x99, 0x9a, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xb4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa4, 0xa4, 0xa5, 0xa6, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xbc, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xd3, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xc9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9a, 0x9c, 0x9c, 0x9c, 0x9c, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa9, 0xa4, 0xa5, 0xa6, 0xa8, 0xbc, 0xff, 0xff, 0xff, 0xdf, 0xbf, 0xc0, 0xe2, 0xff, 0xff, 0xff, 0xc9, 0xbc, 0xbd, 0xbf, 0xc0, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xcc, 0xc7, 0xc7, 0xc8, 0xc8, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xb9, 0x9c, 0x9c, 0xa0, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xc4, 0xff, 0xff, 0xe4, 0xae, 0xb1, 0xb2, 0xb5, 0xe7, 0xff, 0xff, 0xd0, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe2, 0xc8, 0xc7, 0xc8, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xa5, 0xa4, 0xa5, 0xa6, 0xa8, 0xa9, 0xe7, 0xff, 0xff, 0xc3, 0xae, 0xb1, 0xb1, 0xb5, 0xcb, 0xff, 0xff, 0xeb, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc0, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xec, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xc3, 0xff, 0xff, 0xff, 0xbc, 0xae, 0xb1, 0xb2, 0xb5, 0xc3, 0xff, 0xff, 0xff, 0xcf, 0xbb, 0xbc, 0xbd, 0xbf, 0xc0, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xac, 0xf3, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb2, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xf6, 0xbc, 0xbc, 0xbd, 0xbd, 0xbf, 0xc0, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xad, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xae, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xc7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa4, 0xa5, 0xa5, 0xa6, 0xa8, 0xb0, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xbf, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xa8, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xa4, 0xa5, 0xa6, 0xa6, 0xa8, 0xb9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc0, 0xa6, 0xa6, 0xa8, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xbb, 0xbd, 0xbd, 0xd1, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xde, 0xcf, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0xb0, 0xb1, 0xb1, 0xb4, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xdb, 0xe7, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0xb1, 0xb1, 0xb1, 0xb4, 0xc8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xb1, 0xb1, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xbb, 0xbb, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8b, 0x8b, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7d, 0x7b, 0x7b, 0x7b, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x7d, 0x7b, 0x7b, 0x7b, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, 0xbc, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb4, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa9, 0x85, 0x85, 0x83, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x76, 0x76, 0x76, 0x9e, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x85, 0x85, 0x85, 0x83, 0x82, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x76, 0x76, 0x76, 0x76, 0x76, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0x85, 0x85, 0x85, 0x85, 0x83, 0x9a, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x91, 0x76, 0x76, 0x76, 0x76, 0x75, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8a, 0x85, 0x85, 0x85, 0x83, 0x82, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x76, 0x76, 0x76, 0x76, 0x75, 0x79, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x86, 0x85, 0x85, 0x85, 0x83, 0x8d, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x81, 0x76, 0x76, 0x76, 0x76, 0x75, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x94, 0x86, 0x85, 0x85, 0x85, 0x82, 0xbc, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x86, 0x85, 0x85, 0x85, 0x83, 0x85, 0xee, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xec, 0x79, 0x76, 0x76, 0x76, 0x76, 0x75, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa4, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa6, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xdb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x87, 0x86, 0x85, 0x85, 0x83, 0x82, 0xdb, 0xff, 0xff, 0xa0, 0x7e, 0x7d, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xd8, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa8, 0x8e, 0x8d, 0x91, 0xc7, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb9, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa8, 0xff, 0xff, 0xd3, 0x7e, 0x7d, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xe4, 0xe2, 0xe2, 0xe8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x9a, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0x86, 0x85, 0x85, 0x83, 0x9a, 0xff, 0xff, 0xff, 0xc9, 0x91, 0x91, 0xc8, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x75, 0x76, 0x7e, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0xa9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x87, 0x85, 0x85, 0x85, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x76, 0x76, 0x75, 0x75, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xea, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xcc, 0x95, 0x8d, 0xb2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa9, 0x7d, 0x83, 0xc4, 0xff, 0xff, 0xff, 0xfc, 0xef, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x94, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe7, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xa5, 0x91, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x99, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x95, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x94, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe0, 0xee, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x91, 0x8f, 0x8f, 0x8f, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xad, 0xcb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe8, 0xeb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xb7, 0xad, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xb7, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe8, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe7, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd0, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xef, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xa2, 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x98, 0x98, 0x99, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xe0, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xa5, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdf, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xb8, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xbb, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc4, 0xb9, 0xd4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xea, 0xe0, 0xe6, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xbc, 0xa2, 0xa4, 0xa4, 0xa4, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe0, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xa9, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd8, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xb5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xdc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xae, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xde, 0xb8, 0xb5, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xde, 0xcd, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xfb, 0xe2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xbd, 0xef, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xae, 0xae, 0xb0, 0xb1, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc8, 0xc8, 0xcb, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xdc, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb4, 0xae, 0xb0, 0xb1, 0xb2, 0xc4, 0xff, 0xff, 0xff, 0xe3, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xff, 0xd1, 0xc7, 0xc8, 0xc8, 0xcb, 0xcf, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd7, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xc1, 0xa6, 0xa8, 0xaa, 0xd1, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xae, 0xb0, 0xb1, 0xb1, 0xb5, 0xcc, 0xff, 0xff, 0xe8, 0xbb, 0xbc, 0xbd, 0xbf, 0xeb, 0xff, 0xff, 0xd7, 0xc5, 0xc8, 0xc8, 0xc9, 0xcb, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd3, 0xd1, 0xd1, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xae, 0xb1, 0xb1, 0xb2, 0xb5, 0xea, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xc0, 0xd3, 0xff, 0xff, 0xee, 0xc5, 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xcb, 0xff, 0xff, 0xff, 0xc5, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xd5, 0xc5, 0xc8, 0xc8, 0xc9, 0xc9, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xae, 0xb0, 0xb1, 0xb1, 0xb4, 0xb7, 0xf4, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xf7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc9, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xb8, 0xae, 0xb1, 0xb1, 0xb2, 0xb4, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xc8, 0xc9, 0xd0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcb, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb2, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xae, 0xb0, 0xb1, 0xb1, 0xb2, 0xc3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc8, 0xb1, 0xb1, 0xb2, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc7, 0xc8, 0xc8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe2, 0xd5, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xbc, 0xbd, 0xbd, 0xbf, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xbd, 0xbd, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc4, 0xc4, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8b, 0x8b, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x7d, 0x7b, 0x7b, 0x7b, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa0, 0x7d, 0x7b, 0x7b, 0x7b, 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd3, 0xbc, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xb4, 0xcc, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa9, 0x85, 0x85, 0x83, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x76, 0x76, 0x76, 0x9e, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x85, 0x85, 0x85, 0x83, 0x82, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x76, 0x76, 0x76, 0x76, 0x76, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0x85, 0x85, 0x85, 0x85, 0x83, 0x9a, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x91, 0x76, 0x76, 0x76, 0x76, 0x75, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8a, 0x85, 0x85, 0x85, 0x83, 0x82, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0x76, 0x76, 0x76, 0x76, 0x75, 0x79, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x86, 0x85, 0x85, 0x85, 0x83, 0x8d, 0xfa, 0xff, 0xff, 0xff, 0xff, 0x95, 0x7e, 0x7d, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xf8, 0x81, 0x76, 0x76, 0x76, 0x76, 0x75, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0x94, 0x86, 0x85, 0x85, 0x85, 0x82, 0xbc, 0xff, 0xff, 0xff, 0xff, 0x96, 0x7d, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xff, 0xb7, 0x76, 0x76, 0x76, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x86, 0x85, 0x85, 0x85, 0x83, 0x85, 0xee, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xec, 0x79, 0x76, 0x76, 0x76, 0x76, 0x75, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xa4, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa6, 0xff, 0xff, 0xff, 0x96, 0x7e, 0x7b, 0x7b, 0x7b, 0x92, 0xff, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xdb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0x87, 0x86, 0x85, 0x85, 0x83, 0x82, 0xdb, 0xff, 0xff, 0xa0, 0x7e, 0x7d, 0x7b, 0x7b, 0xa1, 0xff, 0xff, 0xd8, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf6, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xa8, 0x8e, 0x8d, 0x91, 0xc7, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb9, 0x86, 0x85, 0x85, 0x85, 0x82, 0xa8, 0xff, 0xff, 0xd3, 0x7e, 0x7d, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xa0, 0x76, 0x76, 0x76, 0x76, 0x75, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf2, 0xe4, 0xe2, 0xe2, 0xe8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcf, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0x9a, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x8f, 0x86, 0x85, 0x85, 0x83, 0x9a, 0xff, 0xff, 0xff, 0xc9, 0x91, 0x91, 0xc8, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x75, 0x76, 0x7e, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe6, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8e, 0x8d, 0xa9, 0xea, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x87, 0x85, 0x85, 0x85, 0xbf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x76, 0x76, 0x75, 0x75, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xea, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x8d, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xcc, 0x95, 0x8d, 0xb2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa9, 0x7d, 0x83, 0xc4, 0xff, 0xff, 0xff, 0xfc, 0xef, 0xe3, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x8d, 0x94, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe7, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xa5, 0x91, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8e, 0x8d, 0x8d, 0xcc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe7, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x99, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x8d, 0x95, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x94, 0x8f, 0x8f, 0x8f, 0x8f, 0x8e, 0x8f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe0, 0xee, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x91, 0x8f, 0x8f, 0x8f, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xe2, 0xe2, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbb, 0xad, 0xcb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe8, 0xeb, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xb7, 0xad, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xac, 0xb7, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xe8, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe4, 0xe7, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd0, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xde, 0xde, 0xde, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xef, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xa2, 0x9a, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x98, 0x98, 0x99, 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdb, 0xdb, 0xe0, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xa5, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0x9a, 0x9c, 0x9a, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdf, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0xb8, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb0, 0xb1, 0xbb, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe4, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc4, 0xb9, 0xd4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xea, 0xe0, 0xe6, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xbc, 0xa2, 0xa4, 0xa4, 0xa4, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe0, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa6, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xa9, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xd8, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xb5, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa6, 0xa6, 0xa8, 0xa8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd4, 0xd4, 0xd4, 0xdc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xa4, 0xa4, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xae, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xd4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xd4, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa4, 0xa5, 0xa5, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xde, 0xb8, 0xb5, 0xcf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xde, 0xcd, 0xd1, 0xea, 0xff, 0xff, 0xff, 0xfb, 0xe2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd4, 0xd4, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xa5, 0xa5, 0xa5, 0xa5, 0xa6, 0xa8, 0xa8, 0xbd, 0xef, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xae, 0xae, 0xb0, 0xb1, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc8, 0xc8, 0xcb, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xdc, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd3, 0xd3, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xa5, 0xa6, 0xa6, 0xa6, 0xa8, 0xb2, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb4, 0xae, 0xb0, 0xb1, 0xb2, 0xc4, 0xff, 0xff, 0xff, 0xe3, 0xc8, 0xc9, 0xe6, 0xff, 0xff, 0xff, 0xd1, 0xc7, 0xc8, 0xc8, 0xcb, 0xcf, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd7, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xc1, 0xa6, 0xa8, 0xaa, 0xd1, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xae, 0xb0, 0xb1, 0xb1, 0xb5, 0xcc, 0xff, 0xff, 0xe8, 0xbb, 0xbc, 0xbd, 0xbf, 0xeb, 0xff, 0xff, 0xd7, 0xc5, 0xc8, 0xc8, 0xc9, 0xcb, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd3, 0xd1, 0xd1, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xae, 0xb1, 0xb1, 0xb2, 0xb5, 0xea, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xc0, 0xd3, 0xff, 0xff, 0xee, 0xc5, 0xc7, 0xc8, 0xc8, 0xc9, 0xcc, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xf0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xcb, 0xff, 0xff, 0xff, 0xc5, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xd5, 0xc5, 0xc8, 0xc8, 0xc9, 0xc9, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xae, 0xb0, 0xb1, 0xb1, 0xb4, 0xb7, 0xf4, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xf7, 0xc7, 0xc7, 0xc8, 0xc8, 0xc9, 0xcb, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xb8, 0xae, 0xb1, 0xb1, 0xb2, 0xb4, 0xd5, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbf, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xc8, 0xc9, 0xd0, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xae, 0xb1, 0xb1, 0xb1, 0xb4, 0xbb, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xc9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcb, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xb2, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xcc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xae, 0xb0, 0xb1, 0xb1, 0xb2, 0xc3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xc7, 0xc8, 0xc8, 0xc8, 0xc9, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xb0, 0xb1, 0xb1, 0xb1, 0xb4, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbd, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc8, 0xb1, 0xb1, 0xb2, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc7, 0xc8, 0xc8, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xe2, 0xd5, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0xbb, 0xbc, 0xbd, 0xbf, 0xcb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xe2, 0xeb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xbc, 0xbd, 0xbd, 0xbf, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xbd, 0xbd, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xc4, 0xc4, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x99, 0x91, 0xbf, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x86, 0x85, 0x85, 0x83, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x86, 0x85, 0x85, 0x83, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xc0, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xb7, 0xcc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0x8e, 0x8e, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x85, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x7e, 0x7d, 0x7b, 0x91, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x85, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x81, 0x7d, 0x7b, 0x7b, 0x7b, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x99, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xac, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x9d, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x91, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0xef, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x98, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xa9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa5, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0xe4, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xfe, 0xff, 0xff, 0xb1, 0x87, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x8d, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd0, 0xff, 0xff, 0xbd, 0x87, 0x85, 0x85, 0x83, 0x95, 0xff, 0xff, 0xe8, 0x81, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x99, 0x98, 0x9a, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xff, 0xff, 0xe6, 0x89, 0x85, 0x85, 0x83, 0xc4, 0xff, 0xff, 0xb7, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x83, 0x76, 0x76, 0x8e, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9a, 0x9a, 0x99, 0x99, 0x98, 0xa0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa1, 0x91, 0x8f, 0x8f, 0x8d, 0x96, 0xff, 0xff, 0xff, 0xd9, 0x9e, 0x94, 0xbf, 0xfc, 0xff, 0xff, 0xaa, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x91, 0x76, 0x76, 0x76, 0x76, 0x76, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9c, 0x9a, 0x9c, 0x99, 0x99, 0x98, 0x98, 0xaa, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x92, 0x8f, 0x8f, 0x8e, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x7d, 0x7b, 0x7b, 0x7a, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xa6, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x98, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xdc, 0xa1, 0x94, 0xb4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb7, 0x87, 0x86, 0xb8, 0xff, 0xff, 0xff, 0xf8, 0xb8, 0x7b, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x9a, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x85, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xb1, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x99, 0x98, 0x98, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x79, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0x85, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa8, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x9a, 0x98, 0x99, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x7a, 0xbc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xcf, 0xa0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa5, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xc0, 0x9d, 0x9c, 0x9c, 0x9c, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x76, 0x75, 0x75, 0x73, 0x91, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xc5, 0xb1, 0xcb, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc1, 0x98, 0xa0, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc3, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb5, 0xb4, 0xb4, 0xb4, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xea, 0xea, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xeb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa5, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xaa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc3, 0xb9, 0xb9, 0xb9, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xc0, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xea, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe6, 0xe7, 0xe8, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd3, 0xc1, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe7, 0xea, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcd, 0xae, 0xae, 0xae, 0xb0, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdc, 0xdc, 0xdc, 0xde, 0xe6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xdb, 0xb2, 0xae, 0xae, 0xb1, 0xb1, 0xb1, 0xb2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xde, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdf, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc4, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xd9, 0xd9, 0xd9, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xb0, 0xae, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb4, 0xb7, 0xd7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xcc, 0xf4, 0xff, 0xff, 0xff, 0xeb, 0xc7, 0xc0, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xd8, 0xd8, 0xeb, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb5, 0xc1, 0xea, 0xff, 0xff, 0xff, 0xff, 0xec, 0xbc, 0xbb, 0xbc, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd4, 0xd4, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe6, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb9, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc4, 0xbb, 0xbb, 0xbd, 0xbf, 0xc5, 0xff, 0xff, 0xff, 0xef, 0xd4, 0xd3, 0xe7, 0xff, 0xff, 0xff, 0xe0, 0xd1, 0xd1, 0xd4, 0xd4, 0xd5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdf, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xcf, 0xb1, 0xb2, 0xb4, 0xd3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbb, 0xbd, 0xbf, 0xc0, 0xcb, 0xff, 0xff, 0xf4, 0xc7, 0xc8, 0xc8, 0xcb, 0xe7, 0xff, 0xff, 0xe4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdb, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xe6, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xcb, 0xd4, 0xff, 0xff, 0xf7, 0xd0, 0xd1, 0xd1, 0xd1, 0xd4, 0xd4, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xcb, 0xfe, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xe3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xec, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xfb, 0xd4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbc, 0xbd, 0xbd, 0xbf, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xd1, 0xd1, 0xd1, 0xd1, 0xd4, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xbd, 0xbf, 0xbf, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd1, 0xdb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd9, 0xea, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe7, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc7, 0xc8, 0xc8, 0xc9, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xcd, 0xcc, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x99, 0x91, 0xbf, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0x86, 0x85, 0x85, 0x83, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x86, 0x85, 0x85, 0x83, 0x94, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xc0, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xb7, 0xcc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x8f, 0x8e, 0x8e, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x85, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0x7e, 0x7d, 0x7b, 0x91, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x85, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x81, 0x7d, 0x7b, 0x7b, 0x7b, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0x99, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xac, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x9d, 0x8f, 0x8f, 0x8f, 0x8e, 0x8e, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7b, 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x91, 0x8f, 0x8f, 0x8e, 0x8e, 0x8f, 0xef, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x98, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xa9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xa5, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x7f, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0x8d, 0xe4, 0xff, 0xff, 0xff, 0xb1, 0x86, 0x85, 0x85, 0x83, 0x8a, 0xff, 0xff, 0xff, 0xf6, 0x87, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xfe, 0xff, 0xff, 0xb1, 0x87, 0x85, 0x85, 0x83, 0x89, 0xff, 0xff, 0xff, 0xb4, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x8d, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xe3, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8f, 0x8f, 0x8f, 0x8e, 0x8d, 0xd0, 0xff, 0xff, 0xbd, 0x87, 0x85, 0x85, 0x83, 0x95, 0xff, 0xff, 0xe8, 0x81, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbd, 0x99, 0x98, 0x9a, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x91, 0x8f, 0x8f, 0x8e, 0x8d, 0xa2, 0xff, 0xff, 0xe6, 0x89, 0x85, 0x85, 0x83, 0xc4, 0xff, 0xff, 0xb7, 0x7e, 0x7d, 0x7b, 0x7b, 0x7b, 0x99, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0x83, 0x76, 0x76, 0x8e, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0x9a, 0x9a, 0x99, 0x99, 0x98, 0xa0, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xa1, 0x91, 0x8f, 0x8f, 0x8d, 0x96, 0xff, 0xff, 0xff, 0xd9, 0x9e, 0x94, 0xbf, 0xfc, 0xff, 0xff, 0xaa, 0x7e, 0x7b, 0x7b, 0x7b, 0x7b, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0x91, 0x76, 0x76, 0x76, 0x76, 0x76, 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0x9c, 0x9a, 0x9c, 0x99, 0x99, 0x98, 0x98, 0xaa, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xe2, 0x92, 0x8f, 0x8f, 0x8e, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x7d, 0x7b, 0x7b, 0x7a, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xef, 0xa6, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x98, 0xbc, 0xf3, 0xff, 0xff, 0xff, 0xdc, 0xa1, 0x94, 0xb4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xb7, 0x87, 0x86, 0xb8, 0xff, 0xff, 0xff, 0xf8, 0xb8, 0x7b, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0xc1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0x98, 0x98, 0x9a, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd7, 0x85, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xb1, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x99, 0x98, 0x98, 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0x79, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0x85, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xa8, 0x9c, 0x9c, 0x9c, 0x9a, 0x9a, 0x9a, 0x98, 0x99, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x94, 0x76, 0x76, 0x76, 0x76, 0x76, 0x76, 0x75, 0x7a, 0xbc, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfc, 0xcf, 0xa0, 0x9c, 0x9c, 0x9c, 0x9c, 0x9a, 0x99, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x8b, 0x76, 0x76, 0x76, 0x76, 0x75, 0x75, 0xa5, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xc0, 0x9d, 0x9c, 0x9c, 0x9c, 0xb8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbc, 0x76, 0x75, 0x75, 0x73, 0x91, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xc5, 0xb1, 0xcb, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc1, 0x98, 0xa0, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xc3, 0xb7, 0xb7, 0xb7, 0xb7, 0xb7, 0xb5, 0xb4, 0xb4, 0xb4, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xef, 0xea, 0xea, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xe8, 0xeb, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xe2, 0xa5, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xa4, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe3, 0xe3, 0xe3, 0xe2, 0xe3, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xa5, 0xaa, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe3, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xb8, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xdf, 0xa6, 0xa6, 0xa6, 0xa8, 0xa8, 0xa6, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xa8, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xc3, 0xb9, 0xb9, 0xb9, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0xc0, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xea, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6, 0xe7, 0xe6, 0xe7, 0xe8, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xd3, 0xc1, 0xd8, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe7, 0xea, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xcd, 0xae, 0xae, 0xae, 0xb0, 0xc9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xdc, 0xdc, 0xdc, 0xde, 0xe6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xdb, 0xb2, 0xae, 0xae, 0xb1, 0xb1, 0xb1, 0xb2, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xde, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xbb, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe2, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdf, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc4, 0xae, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xd9, 0xd9, 0xd9, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xde, 0xb0, 0xae, 0xb0, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb4, 0xb7, 0xd7, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xdc, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb0, 0xb0, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb4, 0xb5, 0xcc, 0xf4, 0xff, 0xff, 0xff, 0xeb, 0xc7, 0xc0, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xd8, 0xd8, 0xeb, 0xff, 0xff, 0xff, 0xfc, 0xea, 0xdb, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xdc, 0xdc, 0xdc, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0xb1, 0xb1, 0xb1, 0xb1, 0xb2, 0xb2, 0xb5, 0xc1, 0xea, 0xff, 0xff, 0xff, 0xff, 0xec, 0xbc, 0xbb, 0xbc, 0xbf, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd4, 0xd4, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe6, 0xd9, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdc, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xb1, 0xb1, 0xb2, 0xb2, 0xb2, 0xb9, 0xdf, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc4, 0xbb, 0xbb, 0xbd, 0xbf, 0xc5, 0xff, 0xff, 0xff, 0xef, 0xd4, 0xd3, 0xe7, 0xff, 0xff, 0xff, 0xe0, 0xd1, 0xd1, 0xd4, 0xd4, 0xd5, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdf, 0xd9, 0xd9, 0xdb, 0xdb, 0xdb, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xcf, 0xb1, 0xb2, 0xb4, 0xd3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbb, 0xbd, 0xbf, 0xc0, 0xcb, 0xff, 0xff, 0xf4, 0xc7, 0xc8, 0xc8, 0xcb, 0xe7, 0xff, 0xff, 0xe4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xdc, 0xdb, 0xdb, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xe6, 0xff, 0xff, 0xdf, 0xc5, 0xc8, 0xc8, 0xcb, 0xd4, 0xff, 0xff, 0xf7, 0xd0, 0xd1, 0xd1, 0xd1, 0xd4, 0xd4, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xf3, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd5, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xcb, 0xfe, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xe3, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd9, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbb, 0xbd, 0xbd, 0xbf, 0xc0, 0xec, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc7, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xfb, 0xd4, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcb, 0xbb, 0xbc, 0xbd, 0xbf, 0xbf, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xd5, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc1, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc5, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xd7, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xbd, 0xbd, 0xbf, 0xbf, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd1, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbb, 0xbc, 0xbd, 0xbd, 0xbf, 0xc4, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xd1, 0xd1, 0xd1, 0xd3, 0xd4, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbc, 0xbd, 0xbd, 0xbf, 0xbf, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xd1, 0xd1, 0xd1, 0xd1, 0xd4, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xbd, 0xbf, 0xbf, 0xd5, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc9, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd1, 0xd1, 0xd1, 0xdb, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xe7, 0xd9, 0xea, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd9, 0xc7, 0xc8, 0xc8, 0xc8, 0xcd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe7, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xc7, 0xc8, 0xc8, 0xc9, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xc7, 0xc8, 0xc8, 0xc8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xcd, 0xcc, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; -const unsigned char gGearPict[kGearFrames*kGearWidth*kGearHeight] = { - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x89,0x89,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xf6,0xff,0xff,0xff,0xcb,0x76,0x76,0xc9,0xff,0xff,0xff,0xfc,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc1,0x7b,0x8a,0xf6,0xff,0xff,0xc7,0x76,0x76,0xc5,0xff,0xff,0xfe,0xe6,0xe2,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb4,0x7b,0x7b,0xb5,0xff,0xff,0xc7,0x76,0x76,0xc5,0xff,0xff,0xf0,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0x81,0x7b,0x7e,0xec,0xff,0xc7,0x76,0x76,0xc5,0xff,0xfc,0xe4,0xe2,0xe2,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbb,0x7b,0x7b,0xa1,0xff,0xc7,0x76,0x76,0xc4,0xff,0xf2,0xe2,0xe2,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xc3,0xbb,0xee,0xff,0xff,0xfa,0x8b,0x7b,0x7b,0xe2,0xdb,0x76,0x76,0xd9,0xf3,0xe4,0xe2,0xe4,0xfe,0xff,0xff,0xfa,0xec,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xe4,0x85,0x85,0x85,0xbf,0xfa,0xff,0xd1,0x7e,0x7b,0xd7,0xff,0xdc,0xdb,0xff,0xf4,0xe2,0xe2,0xf4,0xff,0xfe,0xee,0xde,0xdc,0xdb,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf8,0x94,0x85,0x85,0x83,0x8e,0xd3,0xff,0xd5,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xf4,0xff,0xf3,0xe0,0xdc,0xdb,0xdb,0xde,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xbd,0x89,0x85,0x85,0x83,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xdc,0xdc,0xdb,0xdb,0xea,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xa9,0x87,0x85,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xdb,0xd9,0xe4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xf4,0xd1,0xd1,0xd1,0xd1,0xd1,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xef,0xee,0xee,0xee,0xee,0xfb,0xff,0xff, - 0xff,0xff,0xa0,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xd4,0xd4,0xd4,0xd3,0xd3,0xd3,0xd9,0xff,0xff, - 0xff,0xff,0xa1,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd8,0xff,0xff, - 0xff,0xff,0xf4,0xd3,0xd3,0xd3,0xd3,0xd4,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xec,0xec,0xec,0xec,0xec,0xfb,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xb7,0x98,0x99,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xc8,0xc9,0xdb,0xf8,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xc5,0x9a,0x99,0x9c,0x9c,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xc5,0xc8,0xc8,0xcb,0xe2,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf8,0xa4,0x98,0x9a,0x9c,0xa5,0xde,0xff,0xe2,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xeb,0xff,0xeb,0xcb,0xc8,0xc8,0xc8,0xcf,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xe8,0x9a,0x9c,0x9d,0xcc,0xfb,0xff,0xdf,0xa4,0xa6,0xe4,0xff,0xeb,0xec,0xff,0xea,0xbc,0xbf,0xea,0xff,0xfc,0xe0,0xc8,0xc8,0xc8,0xf3,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xcf,0xc8,0xf2,0xff,0xff,0xfb,0xad,0xa5,0xa8,0xec,0xe8,0xb0,0xb2,0xee,0xef,0xbb,0xbd,0xc5,0xfc,0xff,0xff,0xf7,0xdf,0xe4,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcd,0xa5,0xa6,0xc3,0xff,0xde,0xb0,0xb2,0xe0,0xff,0xcf,0xbb,0xbd,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xa5,0xa5,0xa9,0xf3,0xff,0xde,0xb0,0xb2,0xe0,0xff,0xf6,0xbc,0xbd,0xc0,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0xa5,0xa6,0xd0,0xff,0xff,0xde,0xb0,0xb1,0xe0,0xff,0xff,0xd8,0xbc,0xbd,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xa6,0xb1,0xf8,0xff,0xff,0xde,0xb0,0xb1,0xe0,0xff,0xff,0xfa,0xc3,0xbd,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xf8,0xff,0xff,0xff,0xe0,0xb0,0xb1,0xe3,0xff,0xff,0xff,0xfa,0xf0,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbb,0xbc,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0x89,0x89,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xf6,0xff,0xff,0xff,0xcb,0x76,0x75,0xc9,0xff,0xff,0xff,0xfc,0xfa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x7b,0x8a,0xf6,0xff,0xff,0xc7,0x76,0x76,0xc5,0xff,0xff,0xfe,0xe6,0xe2,0xf0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb4,0x7b,0x7b,0xb5,0xff,0xff,0xc7,0x76,0x76,0xc4,0xff,0xff,0xf0,0xe2,0xe2,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0x7f,0x7b,0x7e,0xec,0xff,0xc7,0x76,0x75,0xc3,0xff,0xfc,0xe3,0xe2,0xe2,0xfb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xbb,0x7d,0x7b,0xa1,0xff,0xc7,0x76,0x75,0xc3,0xff,0xf0,0xe2,0xe2,0xef,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xc3,0xbb,0xee,0xff,0xff,0xfa,0x8b,0x7b,0x7b,0xe2,0xdb,0x76,0x73,0xdb,0xf4,0xe7,0xe2,0xe4,0xfe,0xff,0xff,0xfa,0xec,0xee,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xe4,0x85,0x85,0x85,0xbd,0xfa,0xff,0xd1,0x7e,0x7b,0xd5,0xff,0xdc,0xdb,0xff,0xf6,0xe3,0xe2,0xf4,0xff,0xfe,0xee,0xde,0xdc,0xdb,0xf7,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf8,0x94,0x85,0x85,0x83,0x8e,0xd3,0xff,0xd5,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xf4,0xff,0xf3,0xe0,0xdc,0xdb,0xdb,0xde,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xbd,0x89,0x85,0x85,0x83,0xd7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xdc,0xdc,0xdb,0xdb,0xea,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xa9,0x86,0x85,0xd0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xdb,0xd9,0xe4,0xf8,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe4,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xf6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xf4,0xd1,0xd1,0xd1,0xd1,0xd1,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xef,0xee,0xee,0xee,0xee,0xfb,0xff,0xff, - 0xff,0xff,0xa0,0x8e,0x8e,0x8e,0x8e,0x8e,0x8e,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xd4,0xd4,0xd4,0xd3,0xd3,0xd3,0xd9,0xff,0xff, - 0xff,0xff,0xa1,0x8f,0x8f,0x8f,0x8f,0x8f,0x8f,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xd1,0xd1,0xd1,0xd1,0xd1,0xd1,0xd8,0xff,0xff, - 0xff,0xff,0xf4,0xd3,0xd3,0xd3,0xd3,0xd4,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xec,0xec,0xec,0xec,0xec,0xfb,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe8,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xb8,0x98,0x99,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xc8,0xcb,0xdb,0xf8,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xc5,0x9a,0x99,0x9c,0x9c,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xec,0xc7,0xc8,0xc8,0xcc,0xe3,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf8,0xa4,0x99,0x9a,0x9c,0xa5,0xde,0xff,0xe3,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xeb,0xff,0xeb,0xcb,0xc8,0xc8,0xc8,0xcf,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xe8,0x9a,0x9c,0x9d,0xcc,0xfb,0xff,0xdf,0xa4,0xa6,0xe4,0xff,0xec,0xec,0xff,0xea,0xbd,0xbf,0xea,0xff,0xfc,0xe2,0xc8,0xc8,0xc8,0xf3,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xcf,0xc8,0xf2,0xff,0xff,0xfb,0xad,0xa5,0xa8,0xec,0xe8,0xb0,0xb4,0xee,0xef,0xbb,0xbd,0xc7,0xfc,0xff,0xff,0xf7,0xe0,0xe4,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcd,0xa5,0xa6,0xc3,0xff,0xde,0xb1,0xb4,0xe0,0xff,0xcf,0xbd,0xbf,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xa5,0xa6,0xaa,0xf3,0xff,0xde,0xb0,0xb2,0xe0,0xff,0xf6,0xbd,0xbd,0xc0,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc9,0xa5,0xa6,0xd0,0xff,0xff,0xde,0xb1,0xb2,0xe0,0xff,0xff,0xd8,0xbc,0xbf,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0xa6,0xb1,0xf8,0xff,0xff,0xde,0xb1,0xb2,0xe0,0xff,0xff,0xfa,0xc3,0xbd,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xf8,0xff,0xff,0xff,0xe0,0xb1,0xb2,0xe3,0xff,0xff,0xff,0xfa,0xf0,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbb,0xbc,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0x8f,0x8e,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xf6,0xff,0xff,0xff,0xcd,0x7d,0x7b,0xcc,0xff,0xff,0xff,0xf6,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc5,0x85,0x92,0xf6,0xff,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0xff,0xf6,0x86,0x76,0xbd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x85,0x83,0xb9,0xff,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0xff,0xb2,0x76,0x76,0xad,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0x89,0x85,0x86,0xee,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0xec,0x79,0x76,0x79,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x85,0x85,0xa6,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0x9e,0x76,0x76,0xb5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xc7,0xc0,0xef,0xff,0xff,0xfa,0x94,0x85,0x82,0xe3,0xdc,0x7e,0x7b,0xdc,0xe0,0x76,0x76,0x83,0xf8,0xff,0xff,0xfb,0xf0,0xf2,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xe6,0x8f,0x8e,0x8f,0xc4,0xfa,0xff,0xd5,0x86,0x85,0xd8,0xff,0xde,0xde,0xff,0xd5,0x76,0x75,0xcd,0xff,0xfc,0xf6,0xe3,0xe2,0xe2,0xf8,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf8,0x9c,0x8f,0x8f,0x8e,0x98,0xd7,0xff,0xd8,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xcd,0xd0,0xff,0xec,0xe4,0xe4,0xe2,0xe2,0xe4,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xc3,0x92,0x8f,0x8e,0x8d,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xeb,0xe3,0xe2,0xe3,0xef,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xb0,0x8f,0x8e,0xd4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe2,0xe2,0xe8,0xfa,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xf6,0xd5,0xd5,0xd5,0xd4,0xd4,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xf2,0xf2,0xf2,0xf2,0xf2,0xfc,0xff,0xff, - 0xff,0xff,0xa9,0x98,0x98,0x98,0x98,0x98,0x98,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xe2,0xff,0xff, - 0xff,0xff,0xaa,0x9c,0x9c,0x9a,0x9c,0x9c,0x9c,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xe0,0xff,0xff, - 0xff,0xff,0xf6,0xd7,0xd7,0xd7,0xd7,0xd8,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xf0,0xf0,0xf0,0xf0,0xf0,0xfb,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbf,0xa2,0xa5,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xd4,0xd4,0xe3,0xfa,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfa,0xcc,0xa5,0xa5,0xa5,0xa6,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xd1,0xd1,0xd3,0xd4,0xe8,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfa,0xad,0xa4,0xa5,0xa6,0xb1,0xe2,0xff,0xe6,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xef,0xff,0xee,0xd5,0xd1,0xd1,0xd4,0xd8,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xeb,0xa5,0xa5,0xa8,0xd1,0xfb,0xff,0xe3,0xae,0xb1,0xe8,0xff,0xee,0xef,0xff,0xee,0xc8,0xc9,0xee,0xff,0xfc,0xe7,0xd1,0xd1,0xd3,0xf6,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd4,0xcf,0xf3,0xff,0xff,0xfb,0xb7,0xb1,0xb2,0xef,0xec,0xbb,0xbf,0xf0,0xf2,0xc5,0xc8,0xd0,0xfc,0xff,0xff,0xf8,0xe6,0xe8,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0xb0,0xb1,0xcb,0xff,0xe2,0xbb,0xbf,0xe6,0xff,0xd7,0xc8,0xc8,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xb1,0xb1,0xb4,0xf4,0xff,0xe3,0xbb,0xbf,0xe6,0xff,0xf7,0xc7,0xc8,0xcb,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0xb0,0xb1,0xd5,0xff,0xff,0xe3,0xbc,0xbf,0xe6,0xff,0xff,0xdf,0xc8,0xc9,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xb1,0xbc,0xfa,0xff,0xff,0xe3,0xbc,0xbf,0xe6,0xff,0xff,0xfb,0xcd,0xc8,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf8,0xff,0xff,0xff,0xe4,0xbc,0xbf,0xe7,0xff,0xff,0xff,0xfa,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xc5,0xc5,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0x8f,0x8e,0xf2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xf6,0xff,0xff,0xff,0xcd,0x7d,0x7b,0xcc,0xff,0xff,0xff,0xf6,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc5,0x85,0x92,0xf6,0xff,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0xff,0xf6,0x86,0x76,0xbd,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xb8,0x85,0x83,0xb9,0xff,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0xff,0xb2,0x76,0x76,0xad,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0x89,0x85,0x86,0xee,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0xec,0x79,0x76,0x79,0xec,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc0,0x85,0x85,0xa6,0xff,0xcb,0x7d,0x7b,0xc8,0xff,0x9e,0x76,0x76,0xb5,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfe,0xc7,0xc0,0xef,0xff,0xff,0xfa,0x94,0x85,0x82,0xe3,0xdc,0x7e,0x7b,0xdc,0xe0,0x76,0x76,0x83,0xf8,0xff,0xff,0xfb,0xf0,0xf2,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xe6,0x8f,0x8e,0x8f,0xc4,0xfa,0xff,0xd5,0x86,0x85,0xd8,0xff,0xde,0xde,0xff,0xd5,0x76,0x75,0xcd,0xff,0xfc,0xf6,0xe3,0xe2,0xe2,0xf8,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf8,0x9c,0x8f,0x8f,0x8e,0x98,0xd7,0xff,0xd8,0xd3,0xff,0xff,0xff,0xff,0xff,0xff,0xcd,0xd0,0xff,0xec,0xe4,0xe4,0xe2,0xe2,0xe4,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xf8,0xc3,0x92,0x8f,0x8e,0x8d,0xdb,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xeb,0xe3,0xe2,0xe3,0xef,0xfe,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xb0,0x8f,0x8e,0xd4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xe2,0xe2,0xe8,0xfa,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe7,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xf6,0xd5,0xd5,0xd5,0xd4,0xd4,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xf2,0xf2,0xf2,0xf2,0xf2,0xfc,0xff,0xff, - 0xff,0xff,0xa9,0x98,0x98,0x98,0x98,0x98,0x98,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xdc,0xdc,0xdc,0xdc,0xdc,0xdc,0xe2,0xff,0xff, - 0xff,0xff,0xaa,0x9c,0x9c,0x9a,0x9c,0x9c,0x9c,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xdb,0xdb,0xdb,0xdb,0xdb,0xdb,0xe0,0xff,0xff, - 0xff,0xff,0xf6,0xd7,0xd7,0xd7,0xd7,0xd8,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf4,0xf0,0xf0,0xf0,0xf0,0xf0,0xfb,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbf,0xa2,0xa5,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xee,0xd4,0xd4,0xe3,0xfa,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfa,0xcc,0xa5,0xa5,0xa5,0xa6,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf0,0xd1,0xd1,0xd3,0xd4,0xe8,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfa,0xad,0xa4,0xa5,0xa6,0xb1,0xe2,0xff,0xe6,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xeb,0xef,0xff,0xee,0xd5,0xd1,0xd1,0xd4,0xd8,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xeb,0xa5,0xa5,0xa8,0xd1,0xfb,0xff,0xe3,0xae,0xb1,0xe8,0xff,0xee,0xef,0xff,0xee,0xc8,0xc9,0xee,0xff,0xfc,0xe7,0xd1,0xd1,0xd3,0xf6,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd4,0xcf,0xf3,0xff,0xff,0xfb,0xb7,0xb1,0xb2,0xef,0xec,0xbb,0xbf,0xf0,0xf2,0xc5,0xc8,0xd0,0xfc,0xff,0xff,0xf8,0xe6,0xe8,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd4,0xb0,0xb1,0xcb,0xff,0xe2,0xbb,0xbf,0xe6,0xff,0xd7,0xc8,0xc8,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xb1,0xb1,0xb4,0xf4,0xff,0xe3,0xbb,0xbf,0xe6,0xff,0xf7,0xc7,0xc8,0xcb,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0xb0,0xb1,0xd5,0xff,0xff,0xe3,0xbc,0xbf,0xe6,0xff,0xff,0xdf,0xc8,0xc9,0xe0,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd8,0xb1,0xbc,0xfa,0xff,0xff,0xe3,0xbc,0xbf,0xe6,0xff,0xff,0xfb,0xcd,0xc8,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xef,0xf8,0xff,0xff,0xff,0xe4,0xbc,0xbf,0xe7,0xff,0xff,0xff,0xfa,0xf3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf8,0xc5,0xc5,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0x9c,0x92,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xf4,0xff,0xff,0xff,0xd8,0x85,0x85,0xc8,0xff,0xff,0xff,0xf7,0xe6,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0x8f,0x99,0xf3,0xff,0xff,0xd5,0x85,0x85,0xc5,0xff,0xff,0xfa,0x92,0x7b,0xb7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc4,0x8f,0x8e,0xb7,0xff,0xff,0xd5,0x85,0x85,0xc4,0xff,0xff,0xc1,0x7d,0x7b,0xaa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0x96,0x8e,0x8f,0xea,0xff,0xd5,0x86,0x85,0xc4,0xff,0xf2,0x83,0x7b,0x7e,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0x8f,0x8e,0xa8,0xff,0xd5,0x86,0x85,0xc4,0xff,0xac,0x7d,0x7b,0xb1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd1,0xc4,0xee,0xff,0xff,0xfc,0xa1,0x8e,0x8d,0xde,0xe7,0x86,0x85,0xd4,0xeb,0x7e,0x7b,0x83,0xf6,0xff,0xff,0xef,0xb5,0xb8,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf0,0x9c,0x99,0x99,0xc5,0xf8,0xff,0xdf,0x91,0x8f,0xd4,0xff,0xe3,0xdc,0xff,0xe2,0x7d,0x7b,0xc8,0xff,0xfb,0xbd,0x7d,0x76,0x76,0xd8,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfc,0xaa,0x9c,0x9a,0x98,0x9e,0xd7,0xff,0xde,0xd0,0xfe,0xff,0xff,0xff,0xff,0xff,0xd3,0xcd,0xff,0xd5,0x86,0x76,0x76,0x76,0x82,0xf2,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfb,0xcb,0x9d,0x9c,0x9a,0x98,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0x76,0x76,0x76,0x75,0xa9,0xf4,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbc,0x9c,0x9c,0xd1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x76,0x75,0x94,0xe4,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xde,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfa,0xdb,0xd9,0xd9,0xd9,0xd9,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xef,0xf8,0xf6,0xf4,0xf4,0xfc,0xff,0xff, - 0xff,0xff,0xb8,0xa5,0xa5,0xa5,0xa4,0xa4,0xa4,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe6,0xff,0xff, - 0xff,0xff,0xb9,0xa6,0xa6,0xa6,0xa6,0xa6,0xa8,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe4,0xff,0xff, - 0xff,0xff,0xf8,0xdc,0xdc,0xdc,0xdc,0xdc,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xf2,0xf3,0xf3,0xf3,0xf3,0xfb,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xcb,0xae,0xb1,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xdc,0xdc,0xe7,0xfa,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xd8,0xb1,0xb0,0xb1,0xb2,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xd9,0xdb,0xdc,0xdc,0xeb,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfc,0xbc,0xb1,0xb1,0xb1,0xb9,0xe3,0xff,0xeb,0xe3,0xfe,0xff,0xff,0xff,0xff,0xff,0xf0,0xf0,0xff,0xf3,0xde,0xdb,0xdb,0xdc,0xe0,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf2,0xb1,0xb1,0xb4,0xd5,0xfb,0xff,0xeb,0xbb,0xbd,0xe7,0xff,0xf3,0xf2,0xff,0xf4,0xd1,0xd4,0xee,0xff,0xfe,0xee,0xdb,0xdb,0xdb,0xf4,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xdc,0xd4,0xf3,0xff,0xff,0xfe,0xc4,0xbc,0xbf,0xee,0xf4,0xc5,0xc8,0xef,0xf7,0xd1,0xd1,0xd7,0xfc,0xff,0xff,0xfb,0xeb,0xec,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xbb,0xbd,0xcf,0xff,0xea,0xc5,0xc8,0xe7,0xff,0xe0,0xd1,0xd1,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbd,0xbd,0xc0,0xf3,0xff,0xeb,0xc7,0xc8,0xe7,0xff,0xfa,0xd3,0xd1,0xd4,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xbd,0xbd,0xd7,0xff,0xff,0xeb,0xc7,0xc8,0xe7,0xff,0xff,0xe8,0xd1,0xd3,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xbd,0xc5,0xf8,0xff,0xff,0xeb,0xc7,0xc8,0xe7,0xff,0xff,0xfc,0xd8,0xd1,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xfa,0xff,0xff,0xff,0xec,0xc7,0xc8,0xe7,0xff,0xff,0xff,0xfc,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xd0,0xcf,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0x9c,0x92,0xee,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xf4,0xff,0xff,0xff,0xd8,0x85,0x85,0xc8,0xff,0xff,0xff,0xf7,0xe6,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd0,0x8f,0x99,0xf3,0xff,0xff,0xd5,0x85,0x85,0xc5,0xff,0xff,0xfa,0x92,0x7b,0xb7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xc4,0x8f,0x8e,0xb7,0xff,0xff,0xd5,0x85,0x85,0xc4,0xff,0xff,0xc1,0x7d,0x7b,0xaa,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0x96,0x8e,0x8f,0xea,0xff,0xd5,0x86,0x85,0xc4,0xff,0xf2,0x83,0x7b,0x7e,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xcc,0x8f,0x8e,0xa8,0xff,0xd5,0x86,0x85,0xc4,0xff,0xac,0x7d,0x7b,0xb1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xd1,0xc4,0xee,0xff,0xff,0xfc,0xa1,0x8e,0x8d,0xde,0xe7,0x86,0x85,0xd4,0xeb,0x7e,0x7b,0x83,0xf6,0xff,0xff,0xef,0xb5,0xb8,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf0,0x9c,0x99,0x99,0xc5,0xf8,0xff,0xdf,0x91,0x8f,0xd4,0xff,0xe3,0xdc,0xff,0xe2,0x7d,0x7b,0xc8,0xff,0xfb,0xbd,0x7d,0x76,0x76,0xd8,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfc,0xaa,0x9c,0x9a,0x98,0x9e,0xd7,0xff,0xde,0xd0,0xfe,0xff,0xff,0xff,0xff,0xff,0xd3,0xcd,0xff,0xd5,0x86,0x76,0x76,0x76,0x82,0xf2,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfb,0xcb,0x9d,0x9c,0x9a,0x98,0xd8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdc,0x76,0x76,0x76,0x75,0xa9,0xf4,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xbc,0x9c,0x9c,0xd1,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd1,0x76,0x75,0x94,0xe4,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xea,0xde,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xd3,0xd9,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xfa,0xdb,0xd9,0xd9,0xd9,0xd9,0xe2,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xef,0xf8,0xf6,0xf4,0xf4,0xfc,0xff,0xff, - 0xff,0xff,0xb8,0xa5,0xa5,0xa5,0xa4,0xa4,0xa4,0xde,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xe3,0xe3,0xe2,0xe2,0xe2,0xe2,0xe6,0xff,0xff, - 0xff,0xff,0xb9,0xa6,0xa6,0xa6,0xa6,0xa6,0xa8,0xdf,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfa,0xe2,0xe2,0xe2,0xe2,0xe2,0xe2,0xe4,0xff,0xff, - 0xff,0xff,0xf8,0xdc,0xdc,0xdc,0xdc,0xdc,0xe4,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xf2,0xf3,0xf3,0xf3,0xf3,0xfb,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xcb,0xae,0xb1,0xdc,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf3,0xdc,0xdc,0xe7,0xfa,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xfc,0xd8,0xb1,0xb0,0xb1,0xb2,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf6,0xd9,0xdb,0xdc,0xdc,0xeb,0xfc,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xfc,0xbc,0xb1,0xb1,0xb1,0xb9,0xe3,0xff,0xeb,0xe3,0xfe,0xff,0xff,0xff,0xff,0xff,0xf0,0xf0,0xff,0xf3,0xde,0xdb,0xdb,0xdc,0xe0,0xfc,0xff,0xff,0xff, - 0xff,0xff,0xff,0xf2,0xb1,0xb1,0xb4,0xd5,0xfb,0xff,0xeb,0xbb,0xbd,0xe7,0xff,0xf3,0xf2,0xff,0xf4,0xd1,0xd4,0xee,0xff,0xfe,0xee,0xdb,0xdb,0xdb,0xf4,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xdc,0xd4,0xf3,0xff,0xff,0xfe,0xc4,0xbc,0xbf,0xee,0xf4,0xc5,0xc8,0xef,0xf7,0xd1,0xd1,0xd7,0xfc,0xff,0xff,0xfb,0xeb,0xec,0xfe,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdf,0xbb,0xbd,0xcf,0xff,0xea,0xc5,0xc8,0xe7,0xff,0xe0,0xd1,0xd1,0xe6,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf7,0xbd,0xbd,0xc0,0xf3,0xff,0xeb,0xc7,0xc8,0xe7,0xff,0xfa,0xd3,0xd1,0xd4,0xf7,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xdb,0xbd,0xbd,0xd7,0xff,0xff,0xeb,0xc7,0xc8,0xe7,0xff,0xff,0xe8,0xd1,0xd3,0xe3,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xe3,0xbd,0xc5,0xf8,0xff,0xff,0xeb,0xc7,0xc8,0xe7,0xff,0xff,0xfc,0xd8,0xd1,0xe8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xf2,0xfa,0xff,0xff,0xff,0xec,0xc7,0xc8,0xe7,0xff,0xff,0xff,0xfc,0xf4,0xfe,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xfb,0xd0,0xcf,0xf8,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, - 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff, +const unsigned char gGearPict[kGearFrames * kGearWidth * kGearHeight] = { + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x89, 0x89, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xcb, 0x76, 0x76, 0xc9, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc1, 0x7b, 0x8a, 0xf6, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xff, 0xfe, 0xe6, 0xe2, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7b, 0x7b, 0xb5, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x81, 0x7b, 0x7e, 0xec, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xfc, 0xe4, 0xe2, 0xe2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7b, 0x7b, 0xa1, 0xff, 0xc7, 0x76, 0x76, 0xc4, 0xff, 0xf2, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xee, 0xff, 0xff, 0xfa, 0x8b, 0x7b, 0x7b, 0xe2, 0xdb, 0x76, 0x76, 0xd9, 0xf3, 0xe4, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xfa, 0xec, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe4, 0x85, 0x85, 0x85, 0xbf, 0xfa, 0xff, 0xd1, 0x7e, 0x7b, 0xd7, 0xff, 0xdc, 0xdb, 0xff, 0xf4, 0xe2, 0xe2, 0xf4, 0xff, 0xfe, 0xee, 0xde, 0xdc, 0xdb, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0x94, 0x85, 0x85, 0x83, 0x8e, 0xd3, 0xff, 0xd5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf4, 0xff, 0xf3, 0xe0, 0xdc, 0xdb, 0xdb, 0xde, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x89, 0x85, 0x85, 0x83, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdb, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x87, 0x85, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xdb, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xee, 0xee, 0xee, 0xee, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xa0, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd4, 0xd4, 0xd4, 0xd3, 0xd3, 0xd3, 0xd9, 0xff, 0xff, + 0xff, 0xff, 0xa1, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd8, 0xff, 0xff, + 0xff, 0xff, 0xf4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xec, 0xec, 0xec, 0xec, 0xec, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb7, 0x98, 0x99, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xc8, 0xc9, 0xdb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x99, 0x9c, 0x9c, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xc5, 0xc8, 0xc8, 0xcb, 0xe2, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0xa4, 0x98, 0x9a, 0x9c, 0xa5, 0xde, 0xff, 0xe2, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xeb, 0xff, 0xeb, 0xcb, 0xc8, 0xc8, 0xc8, 0xcf, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe8, 0x9a, 0x9c, 0x9d, 0xcc, 0xfb, 0xff, 0xdf, 0xa4, 0xa6, 0xe4, 0xff, 0xeb, 0xec, 0xff, 0xea, 0xbc, 0xbf, 0xea, 0xff, 0xfc, 0xe0, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcf, 0xc8, 0xf2, 0xff, 0xff, 0xfb, 0xad, 0xa5, 0xa8, 0xec, 0xe8, 0xb0, 0xb2, 0xee, 0xef, 0xbb, 0xbd, 0xc5, 0xfc, 0xff, 0xff, 0xf7, 0xdf, 0xe4, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xa5, 0xa6, 0xc3, 0xff, 0xde, 0xb0, 0xb2, 0xe0, 0xff, 0xcf, 0xbb, 0xbd, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xa5, 0xa5, 0xa9, 0xf3, 0xff, 0xde, 0xb0, 0xb2, 0xe0, 0xff, 0xf6, 0xbc, 0xbd, 0xc0, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xa5, 0xa6, 0xd0, 0xff, 0xff, 0xde, 0xb0, 0xb1, 0xe0, 0xff, 0xff, 0xd8, 0xbc, 0xbd, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xa6, 0xb1, 0xf8, 0xff, 0xff, 0xde, 0xb0, 0xb1, 0xe0, 0xff, 0xff, 0xfa, 0xc3, 0xbd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xf8, 0xff, 0xff, 0xff, 0xe0, 0xb0, 0xb1, 0xe3, 0xff, 0xff, 0xff, 0xfa, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbb, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0x89, 0x89, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xf6, 0xff, 0xff, 0xff, 0xcb, 0x76, 0x75, 0xc9, 0xff, 0xff, 0xff, 0xfc, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x7b, 0x8a, 0xf6, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc5, 0xff, 0xff, 0xfe, 0xe6, 0xe2, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb4, 0x7b, 0x7b, 0xb5, 0xff, 0xff, 0xc7, 0x76, 0x76, 0xc4, 0xff, 0xff, 0xf0, 0xe2, 0xe2, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0x7f, 0x7b, 0x7e, 0xec, 0xff, 0xc7, 0x76, 0x75, 0xc3, 0xff, 0xfc, 0xe3, 0xe2, 0xe2, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbb, 0x7d, 0x7b, 0xa1, 0xff, 0xc7, 0x76, 0x75, 0xc3, 0xff, 0xf0, 0xe2, 0xe2, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xc3, 0xbb, 0xee, 0xff, 0xff, 0xfa, 0x8b, 0x7b, 0x7b, 0xe2, 0xdb, 0x76, 0x73, 0xdb, 0xf4, 0xe7, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xfa, 0xec, 0xee, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe4, 0x85, 0x85, 0x85, 0xbd, 0xfa, 0xff, 0xd1, 0x7e, 0x7b, 0xd5, 0xff, 0xdc, 0xdb, 0xff, 0xf6, 0xe3, 0xe2, 0xf4, 0xff, 0xfe, 0xee, 0xde, 0xdc, 0xdb, 0xf7, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0x94, 0x85, 0x85, 0x83, 0x8e, 0xd3, 0xff, 0xd5, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf4, 0xff, 0xf3, 0xe0, 0xdc, 0xdb, 0xdb, 0xde, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xbd, 0x89, 0x85, 0x85, 0x83, 0xd7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdb, 0xdb, 0xea, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa9, 0x86, 0x85, 0xd0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xdb, 0xd9, 0xe4, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe4, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf4, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xee, 0xee, 0xee, 0xee, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xa0, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0x8e, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xd4, 0xd4, 0xd4, 0xd3, 0xd3, 0xd3, 0xd9, 0xff, 0xff, + 0xff, 0xff, 0xa1, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd1, 0xd8, 0xff, 0xff, + 0xff, 0xff, 0xf4, 0xd3, 0xd3, 0xd3, 0xd3, 0xd4, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xec, 0xec, 0xec, 0xec, 0xec, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xb8, 0x98, 0x99, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xc8, 0xcb, 0xdb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0x9a, 0x99, 0x9c, 0x9c, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xc7, 0xc8, 0xc8, 0xcc, 0xe3, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0xa4, 0x99, 0x9a, 0x9c, 0xa5, 0xde, 0xff, 0xe3, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xeb, 0xff, 0xeb, 0xcb, 0xc8, 0xc8, 0xc8, 0xcf, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe8, 0x9a, 0x9c, 0x9d, 0xcc, 0xfb, 0xff, 0xdf, 0xa4, 0xa6, 0xe4, 0xff, 0xec, 0xec, 0xff, 0xea, 0xbd, 0xbf, 0xea, 0xff, 0xfc, 0xe2, 0xc8, 0xc8, 0xc8, 0xf3, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xcf, 0xc8, 0xf2, 0xff, 0xff, 0xfb, 0xad, 0xa5, 0xa8, 0xec, 0xe8, 0xb0, 0xb4, 0xee, 0xef, 0xbb, 0xbd, 0xc7, 0xfc, 0xff, 0xff, 0xf7, 0xe0, 0xe4, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xa5, 0xa6, 0xc3, 0xff, 0xde, 0xb1, 0xb4, 0xe0, 0xff, 0xcf, 0xbd, 0xbf, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xa5, 0xa6, 0xaa, 0xf3, 0xff, 0xde, 0xb0, 0xb2, 0xe0, 0xff, 0xf6, 0xbd, 0xbd, 0xc0, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc9, 0xa5, 0xa6, 0xd0, 0xff, 0xff, 0xde, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xd8, 0xbc, 0xbf, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xa6, 0xb1, 0xf8, 0xff, 0xff, 0xde, 0xb1, 0xb2, 0xe0, 0xff, 0xff, 0xfa, 0xc3, 0xbd, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xf8, 0xff, 0xff, 0xff, 0xe0, 0xb1, 0xb2, 0xe3, 0xff, 0xff, 0xff, 0xfa, 0xf0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbb, 0xbc, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8f, 0x8e, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xcd, 0x7d, 0x7b, 0xcc, 0xff, 0xff, 0xff, 0xf6, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x85, 0x92, 0xf6, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xf6, 0x86, 0x76, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x85, 0x83, 0xb9, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xb2, 0x76, 0x76, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x89, 0x85, 0x86, 0xee, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xec, 0x79, 0x76, 0x79, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x85, 0x85, 0xa6, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0x9e, 0x76, 0x76, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xc0, 0xef, 0xff, 0xff, 0xfa, 0x94, 0x85, 0x82, 0xe3, 0xdc, 0x7e, 0x7b, 0xdc, 0xe0, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xfb, 0xf0, 0xf2, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe6, 0x8f, 0x8e, 0x8f, 0xc4, 0xfa, 0xff, 0xd5, 0x86, 0x85, 0xd8, 0xff, 0xde, 0xde, 0xff, 0xd5, 0x76, 0x75, 0xcd, 0xff, 0xfc, 0xf6, 0xe3, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0x9c, 0x8f, 0x8f, 0x8e, 0x98, 0xd7, 0xff, 0xd8, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xd0, 0xff, 0xec, 0xe4, 0xe4, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x92, 0x8f, 0x8e, 0x8d, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xeb, 0xe3, 0xe2, 0xe3, 0xef, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x8f, 0x8e, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf6, 0xd5, 0xd5, 0xd5, 0xd4, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xfc, 0xff, 0xff, + 0xff, 0xff, 0xa9, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xff, 0xff, + 0xff, 0xff, 0xaa, 0x9c, 0x9c, 0x9a, 0x9c, 0x9c, 0x9c, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xe0, 0xff, 0xff, + 0xff, 0xff, 0xf6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbf, 0xa2, 0xa5, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd4, 0xd4, 0xe3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa5, 0xa5, 0xa6, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd3, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfa, 0xad, 0xa4, 0xa5, 0xa6, 0xb1, 0xe2, 0xff, 0xe6, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xef, 0xff, 0xee, 0xd5, 0xd1, 0xd1, 0xd4, 0xd8, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xeb, 0xa5, 0xa5, 0xa8, 0xd1, 0xfb, 0xff, 0xe3, 0xae, 0xb1, 0xe8, 0xff, 0xee, 0xef, 0xff, 0xee, 0xc8, 0xc9, 0xee, 0xff, 0xfc, 0xe7, 0xd1, 0xd1, 0xd3, 0xf6, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd4, 0xcf, 0xf3, 0xff, 0xff, 0xfb, 0xb7, 0xb1, 0xb2, 0xef, 0xec, 0xbb, 0xbf, 0xf0, 0xf2, 0xc5, 0xc8, 0xd0, 0xfc, 0xff, 0xff, 0xf8, 0xe6, 0xe8, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xb0, 0xb1, 0xcb, 0xff, 0xe2, 0xbb, 0xbf, 0xe6, 0xff, 0xd7, 0xc8, 0xc8, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xb1, 0xb1, 0xb4, 0xf4, 0xff, 0xe3, 0xbb, 0xbf, 0xe6, 0xff, 0xf7, 0xc7, 0xc8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xb0, 0xb1, 0xd5, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xdf, 0xc8, 0xc9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb1, 0xbc, 0xfa, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xfb, 0xcd, 0xc8, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf8, 0xff, 0xff, 0xff, 0xe4, 0xbc, 0xbf, 0xe7, 0xff, 0xff, 0xff, 0xfa, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0x8f, 0x8e, 0xf2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xf6, 0xff, 0xff, 0xff, 0xcd, 0x7d, 0x7b, 0xcc, 0xff, 0xff, 0xff, 0xf6, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc5, 0x85, 0x92, 0xf6, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xf6, 0x86, 0x76, 0xbd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xb8, 0x85, 0x83, 0xb9, 0xff, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xff, 0xb2, 0x76, 0x76, 0xad, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0x89, 0x85, 0x86, 0xee, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0xec, 0x79, 0x76, 0x79, 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc0, 0x85, 0x85, 0xa6, 0xff, 0xcb, 0x7d, 0x7b, 0xc8, 0xff, 0x9e, 0x76, 0x76, 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfe, 0xc7, 0xc0, 0xef, 0xff, 0xff, 0xfa, 0x94, 0x85, 0x82, 0xe3, 0xdc, 0x7e, 0x7b, 0xdc, 0xe0, 0x76, 0x76, 0x83, 0xf8, 0xff, 0xff, 0xfb, 0xf0, 0xf2, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xe6, 0x8f, 0x8e, 0x8f, 0xc4, 0xfa, 0xff, 0xd5, 0x86, 0x85, 0xd8, 0xff, 0xde, 0xde, 0xff, 0xd5, 0x76, 0x75, 0xcd, 0xff, 0xfc, 0xf6, 0xe3, 0xe2, 0xe2, 0xf8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf8, 0x9c, 0x8f, 0x8f, 0x8e, 0x98, 0xd7, 0xff, 0xd8, 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcd, 0xd0, 0xff, 0xec, 0xe4, 0xe4, 0xe2, 0xe2, 0xe4, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc3, 0x92, 0x8f, 0x8e, 0x8d, 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xeb, 0xe3, 0xe2, 0xe3, 0xef, 0xfe, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xb0, 0x8f, 0x8e, 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xe2, 0xe2, 0xe8, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe7, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xf6, 0xd5, 0xd5, 0xd5, 0xd4, 0xd4, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf2, 0xf2, 0xf2, 0xf2, 0xf2, 0xfc, 0xff, 0xff, + 0xff, 0xff, 0xa9, 0x98, 0x98, 0x98, 0x98, 0x98, 0x98, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe2, 0xff, 0xff, + 0xff, 0xff, 0xaa, 0x9c, 0x9c, 0x9a, 0x9c, 0x9c, 0x9c, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xdb, 0xe0, 0xff, 0xff, + 0xff, 0xff, 0xf6, 0xd7, 0xd7, 0xd7, 0xd7, 0xd8, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf4, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbf, 0xa2, 0xa5, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xee, 0xd4, 0xd4, 0xe3, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfa, 0xcc, 0xa5, 0xa5, 0xa5, 0xa6, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xd1, 0xd1, 0xd3, 0xd4, 0xe8, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfa, 0xad, 0xa4, 0xa5, 0xa6, 0xb1, 0xe2, 0xff, 0xe6, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xef, 0xff, 0xee, 0xd5, 0xd1, 0xd1, 0xd4, 0xd8, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xeb, 0xa5, 0xa5, 0xa8, 0xd1, 0xfb, 0xff, 0xe3, 0xae, 0xb1, 0xe8, 0xff, 0xee, 0xef, 0xff, 0xee, 0xc8, 0xc9, 0xee, 0xff, 0xfc, 0xe7, 0xd1, 0xd1, 0xd3, 0xf6, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd4, 0xcf, 0xf3, 0xff, 0xff, 0xfb, 0xb7, 0xb1, 0xb2, 0xef, 0xec, 0xbb, 0xbf, 0xf0, 0xf2, 0xc5, 0xc8, 0xd0, 0xfc, 0xff, 0xff, 0xf8, 0xe6, 0xe8, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd4, 0xb0, 0xb1, 0xcb, 0xff, 0xe2, 0xbb, 0xbf, 0xe6, 0xff, 0xd7, 0xc8, 0xc8, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xb1, 0xb1, 0xb4, 0xf4, 0xff, 0xe3, 0xbb, 0xbf, 0xe6, 0xff, 0xf7, 0xc7, 0xc8, 0xcb, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0xb0, 0xb1, 0xd5, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xdf, 0xc8, 0xc9, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd8, 0xb1, 0xbc, 0xfa, 0xff, 0xff, 0xe3, 0xbc, 0xbf, 0xe6, 0xff, 0xff, 0xfb, 0xcd, 0xc8, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xef, 0xf8, 0xff, 0xff, 0xff, 0xe4, 0xbc, 0xbf, 0xe7, 0xff, 0xff, 0xff, 0xfa, 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf8, 0xc5, 0xc5, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x9c, 0x92, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xf4, 0xff, 0xff, 0xff, 0xd8, 0x85, 0x85, 0xc8, 0xff, 0xff, 0xff, 0xf7, 0xe6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x8f, 0x99, 0xf3, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc5, 0xff, 0xff, 0xfa, 0x92, 0x7b, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x8f, 0x8e, 0xb7, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc4, 0xff, 0xff, 0xc1, 0x7d, 0x7b, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8e, 0x8f, 0xea, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xf2, 0x83, 0x7b, 0x7e, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x8f, 0x8e, 0xa8, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xac, 0x7d, 0x7b, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd1, 0xc4, 0xee, 0xff, 0xff, 0xfc, 0xa1, 0x8e, 0x8d, 0xde, 0xe7, 0x86, 0x85, 0xd4, 0xeb, 0x7e, 0x7b, 0x83, 0xf6, 0xff, 0xff, 0xef, 0xb5, 0xb8, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf0, 0x9c, 0x99, 0x99, 0xc5, 0xf8, 0xff, 0xdf, 0x91, 0x8f, 0xd4, 0xff, 0xe3, 0xdc, 0xff, 0xe2, 0x7d, 0x7b, 0xc8, 0xff, 0xfb, 0xbd, 0x7d, 0x76, 0x76, 0xd8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfc, 0xaa, 0x9c, 0x9a, 0x98, 0x9e, 0xd7, 0xff, 0xde, 0xd0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xcd, 0xff, 0xd5, 0x86, 0x76, 0x76, 0x76, 0x82, 0xf2, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcb, 0x9d, 0x9c, 0x9a, 0x98, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x76, 0x76, 0x76, 0x75, 0xa9, 0xf4, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbc, 0x9c, 0x9c, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x76, 0x75, 0x94, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfa, 0xdb, 0xd9, 0xd9, 0xd9, 0xd9, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xf8, 0xf6, 0xf4, 0xf4, 0xfc, 0xff, 0xff, + 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe6, 0xff, 0xff, + 0xff, 0xff, 0xb9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xff, 0xff, + 0xff, 0xff, 0xf8, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xcb, 0xae, 0xb1, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xdc, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0xb1, 0xb0, 0xb1, 0xb2, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd9, 0xdb, 0xdc, 0xdc, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfc, 0xbc, 0xb1, 0xb1, 0xb1, 0xb9, 0xe3, 0xff, 0xeb, 0xe3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xff, 0xf3, 0xde, 0xdb, 0xdb, 0xdc, 0xe0, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf2, 0xb1, 0xb1, 0xb4, 0xd5, 0xfb, 0xff, 0xeb, 0xbb, 0xbd, 0xe7, 0xff, 0xf3, 0xf2, 0xff, 0xf4, 0xd1, 0xd4, 0xee, 0xff, 0xfe, 0xee, 0xdb, 0xdb, 0xdb, 0xf4, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xdc, 0xd4, 0xf3, 0xff, 0xff, 0xfe, 0xc4, 0xbc, 0xbf, 0xee, 0xf4, 0xc5, 0xc8, 0xef, 0xf7, 0xd1, 0xd1, 0xd7, 0xfc, 0xff, 0xff, 0xfb, 0xeb, 0xec, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbd, 0xcf, 0xff, 0xea, 0xc5, 0xc8, 0xe7, 0xff, 0xe0, 0xd1, 0xd1, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbd, 0xc0, 0xf3, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xfa, 0xd3, 0xd1, 0xd4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xbd, 0xbd, 0xd7, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xe8, 0xd1, 0xd3, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbd, 0xc5, 0xf8, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xd8, 0xd1, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xec, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd0, 0xcf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0x9c, 0x92, 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xf4, 0xff, 0xff, 0xff, 0xd8, 0x85, 0x85, 0xc8, 0xff, 0xff, 0xff, 0xf7, 0xe6, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd0, 0x8f, 0x99, 0xf3, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc5, 0xff, 0xff, 0xfa, 0x92, 0x7b, 0xb7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc4, 0x8f, 0x8e, 0xb7, 0xff, 0xff, 0xd5, 0x85, 0x85, 0xc4, 0xff, 0xff, 0xc1, 0x7d, 0x7b, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0x96, 0x8e, 0x8f, 0xea, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xf2, 0x83, 0x7b, 0x7e, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0x8f, 0x8e, 0xa8, 0xff, 0xd5, 0x86, 0x85, 0xc4, 0xff, 0xac, 0x7d, 0x7b, 0xb1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xd1, 0xc4, 0xee, 0xff, 0xff, 0xfc, 0xa1, 0x8e, 0x8d, 0xde, 0xe7, 0x86, 0x85, 0xd4, 0xeb, 0x7e, 0x7b, 0x83, 0xf6, 0xff, 0xff, 0xef, 0xb5, 0xb8, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf0, 0x9c, 0x99, 0x99, 0xc5, 0xf8, 0xff, 0xdf, 0x91, 0x8f, 0xd4, 0xff, 0xe3, 0xdc, 0xff, 0xe2, 0x7d, 0x7b, 0xc8, 0xff, 0xfb, 0xbd, 0x7d, 0x76, 0x76, 0xd8, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfc, 0xaa, 0x9c, 0x9a, 0x98, 0x9e, 0xd7, 0xff, 0xde, 0xd0, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xcd, 0xff, 0xd5, 0x86, 0x76, 0x76, 0x76, 0x82, 0xf2, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfb, 0xcb, 0x9d, 0x9c, 0x9a, 0x98, 0xd8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdc, 0x76, 0x76, 0x76, 0x75, 0xa9, 0xf4, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xbc, 0x9c, 0x9c, 0xd1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd1, 0x76, 0x75, 0x94, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xea, 0xde, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xd3, 0xd9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xfa, 0xdb, 0xd9, 0xd9, 0xd9, 0xd9, 0xe2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xef, 0xf8, 0xf6, 0xf4, 0xf4, 0xfc, 0xff, 0xff, + 0xff, 0xff, 0xb8, 0xa5, 0xa5, 0xa5, 0xa4, 0xa4, 0xa4, 0xde, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe3, 0xe3, 0xe2, 0xe2, 0xe2, 0xe2, 0xe6, 0xff, 0xff, + 0xff, 0xff, 0xb9, 0xa6, 0xa6, 0xa6, 0xa6, 0xa6, 0xa8, 0xdf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfa, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe2, 0xe4, 0xff, 0xff, + 0xff, 0xff, 0xf8, 0xdc, 0xdc, 0xdc, 0xdc, 0xdc, 0xe4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xf2, 0xf3, 0xf3, 0xf3, 0xf3, 0xfb, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xcb, 0xae, 0xb1, 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf3, 0xdc, 0xdc, 0xe7, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xfc, 0xd8, 0xb1, 0xb0, 0xb1, 0xb2, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf6, 0xd9, 0xdb, 0xdc, 0xdc, 0xeb, 0xfc, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xfc, 0xbc, 0xb1, 0xb1, 0xb1, 0xb9, 0xe3, 0xff, 0xeb, 0xe3, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xff, 0xf3, 0xde, 0xdb, 0xdb, 0xdc, 0xe0, 0xfc, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xf2, 0xb1, 0xb1, 0xb4, 0xd5, 0xfb, 0xff, 0xeb, 0xbb, 0xbd, 0xe7, 0xff, 0xf3, 0xf2, 0xff, 0xf4, 0xd1, 0xd4, 0xee, 0xff, 0xfe, 0xee, 0xdb, 0xdb, 0xdb, 0xf4, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xdc, 0xd4, 0xf3, 0xff, 0xff, 0xfe, 0xc4, 0xbc, 0xbf, 0xee, 0xf4, 0xc5, 0xc8, 0xef, 0xf7, 0xd1, 0xd1, 0xd7, 0xfc, 0xff, 0xff, 0xfb, 0xeb, 0xec, 0xfe, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdf, 0xbb, 0xbd, 0xcf, 0xff, 0xea, 0xc5, 0xc8, 0xe7, 0xff, 0xe0, 0xd1, 0xd1, 0xe6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf7, 0xbd, 0xbd, 0xc0, 0xf3, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xfa, 0xd3, 0xd1, 0xd4, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xdb, 0xbd, 0xbd, 0xd7, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xe8, 0xd1, 0xd3, 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xbd, 0xc5, 0xf8, 0xff, 0xff, 0xeb, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xfc, 0xd8, 0xd1, 0xe8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf2, 0xfa, 0xff, 0xff, 0xff, 0xec, 0xc7, 0xc8, 0xe7, 0xff, 0xff, 0xff, 0xfc, 0xf4, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfb, 0xd0, 0xcf, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; diff --git a/pexpert/pexpert/arm/AIC.h b/pexpert/pexpert/arm/AIC.h index df60b1017..002ca6c91 100644 --- a/pexpert/pexpert/arm/AIC.h +++ b/pexpert/pexpert/arm/AIC.h @@ -5,16 +5,18 @@ #ifndef _PEXPERT_ARM_AIC_H #define _PEXPERT_ARM_AIC_H -#ifndef ASSEMBLER +#ifndef ASSEMBLER -static inline unsigned long _aic_read32(unsigned long addr) +static inline unsigned long +_aic_read32(unsigned long addr) { unsigned long data; data = *(volatile unsigned *)addr; return data; } -static inline void _aic_write32(unsigned long addr, unsigned long data) +static inline void +_aic_write32(unsigned long addr, unsigned long data) { *(volatile unsigned *)(addr) = data; } @@ -25,64 +27,64 @@ static inline void _aic_write32(unsigned long addr, unsigned long data) #endif // AIC -#define kAICAicRev (0x0000) -#define kAICAicCap0 (0x0004) -#define kAICAicCap0Int(n) ((n) & 0x3FF) -#define kAICAicCap0Proc(n) ((((n) >> 16) & 0x1F) + 1) -#define kAICAicCap1 (0x0008) -#define kAICAicRst (0x000C) -#define kAICGlbCfg (0x0010) -#define kAICMainTimLo (0x0020) -#define kAICMainTimHi (0x0028) -#define kAICIPINormalDbg (0x0030) -#define kAICIPISelfDbg (0x0034) +#define kAICAicRev (0x0000) +#define kAICAicCap0 (0x0004) +#define kAICAicCap0Int(n) ((n) & 0x3FF) +#define kAICAicCap0Proc(n) ((((n) >> 16) & 0x1F) + 1) +#define kAICAicCap1 (0x0008) +#define kAICAicRst (0x000C) +#define kAICGlbCfg (0x0010) +#define kAICMainTimLo (0x0020) +#define kAICMainTimHi (0x0028) +#define kAICIPINormalDbg (0x0030) +#define kAICIPISelfDbg (0x0034) -#define kAICWhoAmI (0x2000) -#define kAICIack (0x2004) -#define kAICIackVecType(n) (((n) >> 16) & 0x7) -#define kAICIackVecTypeSpurious (0) -#define kAICIackVecTypeExtInt (1) -#define kAICIackVecTypeIPI (4) -#define kAICIackVecTypeTimer (7) -#define kAICIackVecExtInt(n) ((n) & 0x3FF) -#define kAICIackVecIPIType(n) ((n) & 0x003) -#define kAICIackVecIPITypeNormal (1) -#define kAICIackVecIPITypeSelf (2) -#define kAICIPISet (0x2008) -#define kAICIPIClr (0x200C) -#define kAICIPIClrSelf (0x80000000) -#define kAICTmrCfg (0x2010) -#define kAICTmrCfgEn (1) -#define kAICTmrCfgFslPTI (0 << 4) -#define kAICTmrCfgFslSGTI (1 << 4) -#define kAICTmrCfgFslETI (2 << 4) -#define kAICTmrCnt (0x2014) -#define kAICTmrIntStat (0x2018) -#define kAICTmrIntStatPct (1) -#define kAICTmrStateSet (0x201C) -#define kAICTmrStateClr (0x2020) -#define kAICBankedCoreRegs (0x2000) -#define kAICBankedCoreTmrCnt (0x14) -#define kAICBankedCoreTmrIntStat (0x18) +#define kAICWhoAmI (0x2000) +#define kAICIack (0x2004) +#define kAICIackVecType(n) (((n) >> 16) & 0x7) +#define kAICIackVecTypeSpurious (0) +#define kAICIackVecTypeExtInt (1) +#define kAICIackVecTypeIPI (4) +#define kAICIackVecTypeTimer (7) +#define kAICIackVecExtInt(n) ((n) & 0x3FF) +#define kAICIackVecIPIType(n) ((n) & 0x003) +#define kAICIackVecIPITypeNormal (1) +#define kAICIackVecIPITypeSelf (2) +#define kAICIPISet (0x2008) +#define kAICIPIClr (0x200C) +#define kAICIPIClrSelf (0x80000000) +#define kAICTmrCfg (0x2010) +#define kAICTmrCfgEn (1) +#define kAICTmrCfgFslPTI (0 << 4) +#define kAICTmrCfgFslSGTI (1 << 4) +#define kAICTmrCfgFslETI (2 << 4) +#define kAICTmrCnt (0x2014) +#define kAICTmrIntStat (0x2018) +#define kAICTmrIntStatPct (1) +#define kAICTmrStateSet (0x201C) +#define kAICTmrStateClr (0x2020) +#define kAICBankedCoreRegs (0x2000) +#define kAICBankedCoreTmrCnt (0x14) +#define kAICBankedCoreTmrIntStat (0x18) -#define kAICTgtDst(n) (0x3000 + (n) * 4) -#define kAICSwGenSet(n) (0x4000 + (n) * 4) -#define kAICSwGenClr(n) (0x4080 + (n) * 4) -#define kAICIntMaskSet(n) (0x4100 + (n) * 4) -#define kAICIntMaskClr(n) (0x4180 + (n) * 4) -#define kAICHwIntMon(n) (0x4200 + (n) * 4) +#define kAICTgtDst(n) (0x3000 + (n) * 4) +#define kAICSwGenSet(n) (0x4000 + (n) * 4) +#define kAICSwGenClr(n) (0x4080 + (n) * 4) +#define kAICIntMaskSet(n) (0x4100 + (n) * 4) +#define kAICIntMaskClr(n) (0x4180 + (n) * 4) +#define kAICHwIntMon(n) (0x4200 + (n) * 4) -#define kAICAliasWhoAmI(n) (0x5000 + (n) * 0x80 + 0x00) -#define kAICAliasIack(n) (0x5000 + (n) * 0x80 + 0x04) -#define kAICAliasIPISet(n) (0x5000 + (n) * 0x80 + 0x08) -#define kAICAliasIPIClr(n) (0x5000 + (n) * 0x80 + 0x0C) -#define kAICAliasTmrCfg(n) (0x5000 + (n) * 0x80 + 0x10) -#define kAICAliasTmrCnt(n) (0x5000 + (n) * 0x80 + 0x14) -#define kAICAliasTmrIntStat(n) (0x5000 + (n) * 0x80 + 0x18) -#define kAICAliasTmrStateSet(n) (0x5000 + (n) * 0x80 + 0x1C) -#define kAICAliasTmrStateClr(n) (0x5000 + (n) * 0x80 + 0x20) +#define kAICAliasWhoAmI(n) (0x5000 + (n) * 0x80 + 0x00) +#define kAICAliasIack(n) (0x5000 + (n) * 0x80 + 0x04) +#define kAICAliasIPISet(n) (0x5000 + (n) * 0x80 + 0x08) +#define kAICAliasIPIClr(n) (0x5000 + (n) * 0x80 + 0x0C) +#define kAICAliasTmrCfg(n) (0x5000 + (n) * 0x80 + 0x10) +#define kAICAliasTmrCnt(n) (0x5000 + (n) * 0x80 + 0x14) +#define kAICAliasTmrIntStat(n) (0x5000 + (n) * 0x80 + 0x18) +#define kAICAliasTmrStateSet(n) (0x5000 + (n) * 0x80 + 0x1C) +#define kAICAliasTmrStateClr(n) (0x5000 + (n) * 0x80 + 0x20) -#define kAICExtIntShift (5) -#define kAICExtIntMask (0x1F) +#define kAICExtIntShift (5) +#define kAICExtIntMask (0x1F) #endif /* ! _PEXPERT_ARM_AIC_H */ diff --git a/pexpert/pexpert/arm/PL192_VIC.h b/pexpert/pexpert/arm/PL192_VIC.h index 091ab72a6..84a609166 100644 --- a/pexpert/pexpert/arm/PL192_VIC.h +++ b/pexpert/pexpert/arm/PL192_VIC.h @@ -8,26 +8,26 @@ #define ARM_CELL_PL192_VIC // VIC -#define rVICIRQSTATUS (*(volatile unsigned *)(pic_base + 0x000)) // VIC IRQ Status Register -#define rVICFIQSTATUS (*(volatile unsigned *)(pic_base + 0x004)) // VIC FIQ Status Register -#define rVICRAWINTR (*(volatile unsigned *)(pic_base + 0x008)) // VIC Raw Interrupt Status Register -#define rVICINTSELECT (*(volatile unsigned *)(pic_base + 0x00C)) // VIC Interrupt Select Register -#define rVICINTENABLE (*(volatile unsigned *)(pic_base + 0x010)) // VIC Interrupt Enable Register -#define rVICINTENCLEAR (*(volatile unsigned *)(pic_base + 0x014)) // VIC Interrupt Enable Clear Register -#define rVICSOFTINT (*(volatile unsigned *)(pic_base + 0x018)) // VIC Soft Interrupt Register -#define rVICSOFTINTCLEAR (*(volatile unsigned *)(pic_base + 0x01C)) // VIC Soft Interrupt Clear Register -#define rVICPROTECTION (*(volatile unsigned *)(pic_base + 0x020)) // VIC Protection Register -#define rVICSWPRIORITYMASK (*(volatile unsigned *)(pic_base + 0x024)) // VIC Software Priority Mask Register -#define rVICPRIORITYDAISY (*(volatile unsigned *)(pic_base + 0x028)) // VIC Priority Daisy Chain Register -#define rVICVECTOR(x) (*(volatile unsigned *)(pic_base + 0x100 + 4 * (x))) // VIC Vector Registers -#define rVICVECTPRIORITY(x) (*(volatile unsigned *)(pic_base + 0x200 + 4 * (x))) // VIC Vector Priority Registers -#define rVICPERIPHID0 (*(volatile unsigned *)(pic_base + 0xFE0)) // VIC Peripheral ID 0 Register -#define rVICPERIPHID1 (*(volatile unsigned *)(pic_base + 0xFE4)) // VIC Peripheral ID 1 Register -#define rVICPERIPHID2 (*(volatile unsigned *)(pic_base + 0xFE8)) // VIC Peripheral ID 2 Register -#define rVICPERIPHID3 (*(volatile unsigned *)(pic_base + 0xFEC)) // VIC Peripheral ID 3 Register -#define rVICPCELLID0 (*(volatile unsigned *)(pic_base + 0xFF0)) // VIC PrimeCell ID 0 Register -#define rVICPCELLID1 (*(volatile unsigned *)(pic_base + 0xFF4)) // VIC PrimeCell ID 1 Register -#define rVICPCELLID2 (*(volatile unsigned *)(pic_base + 0xFF8)) // VIC PrimeCell ID 2 Register -#define rVICPCELLID3 (*(volatile unsigned *)(pic_base + 0xFFC)) // VIC PrimeCell ID 3 Register +#define rVICIRQSTATUS (*(volatile unsigned *)(pic_base + 0x000)) // VIC IRQ Status Register +#define rVICFIQSTATUS (*(volatile unsigned *)(pic_base + 0x004)) // VIC FIQ Status Register +#define rVICRAWINTR (*(volatile unsigned *)(pic_base + 0x008)) // VIC Raw Interrupt Status Register +#define rVICINTSELECT (*(volatile unsigned *)(pic_base + 0x00C)) // VIC Interrupt Select Register +#define rVICINTENABLE (*(volatile unsigned *)(pic_base + 0x010)) // VIC Interrupt Enable Register +#define rVICINTENCLEAR (*(volatile unsigned *)(pic_base + 0x014)) // VIC Interrupt Enable Clear Register +#define rVICSOFTINT (*(volatile unsigned *)(pic_base + 0x018)) // VIC Soft Interrupt Register +#define rVICSOFTINTCLEAR (*(volatile unsigned *)(pic_base + 0x01C)) // VIC Soft Interrupt Clear Register +#define rVICPROTECTION (*(volatile unsigned *)(pic_base + 0x020)) // VIC Protection Register +#define rVICSWPRIORITYMASK (*(volatile unsigned *)(pic_base + 0x024)) // VIC Software Priority Mask Register +#define rVICPRIORITYDAISY (*(volatile unsigned *)(pic_base + 0x028)) // VIC Priority Daisy Chain Register +#define rVICVECTOR(x) (*(volatile unsigned *)(pic_base + 0x100 + 4 * (x))) // VIC Vector Registers +#define rVICVECTPRIORITY(x) (*(volatile unsigned *)(pic_base + 0x200 + 4 * (x))) // VIC Vector Priority Registers +#define rVICPERIPHID0 (*(volatile unsigned *)(pic_base + 0xFE0)) // VIC Peripheral ID 0 Register +#define rVICPERIPHID1 (*(volatile unsigned *)(pic_base + 0xFE4)) // VIC Peripheral ID 1 Register +#define rVICPERIPHID2 (*(volatile unsigned *)(pic_base + 0xFE8)) // VIC Peripheral ID 2 Register +#define rVICPERIPHID3 (*(volatile unsigned *)(pic_base + 0xFEC)) // VIC Peripheral ID 3 Register +#define rVICPCELLID0 (*(volatile unsigned *)(pic_base + 0xFF0)) // VIC PrimeCell ID 0 Register +#define rVICPCELLID1 (*(volatile unsigned *)(pic_base + 0xFF4)) // VIC PrimeCell ID 1 Register +#define rVICPCELLID2 (*(volatile unsigned *)(pic_base + 0xFF8)) // VIC PrimeCell ID 2 Register +#define rVICPCELLID3 (*(volatile unsigned *)(pic_base + 0xFFC)) // VIC PrimeCell ID 3 Register #endif /* ! _PEXPERT_ARM_PL192_VIC_H */ diff --git a/pexpert/pexpert/arm/S3cUART.h b/pexpert/pexpert/arm/S3cUART.h index a8410b685..96cc6ffc9 100644 --- a/pexpert/pexpert/arm/S3cUART.h +++ b/pexpert/pexpert/arm/S3cUART.h @@ -4,7 +4,7 @@ #ifndef _PEXPERT_ARM_S3CUART_H #define _PEXPERT_ARM_S3CUART_H -#define S3CUART +#define S3CUART // UART diff --git a/pexpert/pexpert/arm/S7002.h b/pexpert/pexpert/arm/S7002.h index 9774e01b8..a39829ba3 100644 --- a/pexpert/pexpert/arm/S7002.h +++ b/pexpert/pexpert/arm/S7002.h @@ -5,37 +5,37 @@ #ifndef _PEXPERT_ARM_S7002_H #define _PEXPERT_ARM_S7002_H -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include -#define rPMGR_EVENT_TMR (*(volatile unsigned *) (timer_base + 0x00100)) -#define rPMGR_EVENT_TMR_PERIOD (*(volatile unsigned *) (timer_base + 0x00104)) -#define rPMGR_EVENT_TMR_CTL (*(volatile unsigned *) (timer_base + 0x00108)) -#define rPMGR_INTERVAL_TMR (*(volatile unsigned *) (timer_base + 0x00200)) -#define rPMGR_INTERVAL_TMR_CTL (*(volatile unsigned *) (timer_base + 0x00204)) - -#define PMGR_EVENT_TMR_CTL_EN (1 << 0) -#define PMGR_INTERVAL_TMR_CTL_EN (1 << 0) -#define PMGR_INTERVAL_TMR_CTL_CLR_INT (1 << 8) - -#define DOCKFIFO_UART (1) -#define DOCKFIFO_UART_WRITE (0) -#define DOCKFIFO_UART_READ (1) -#define DOCKFIFO_W_SPACING (0x1000) -#define DOCKFIFO_SPACING (0x3000) - -#define rDOCKFIFO_R_DATA(_f, _n) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + ((_n) * 4))) -#define rDOCKFIFO_R_STAT(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x14)) -#define rDOCKFIFO_W_DATA(_f, _n) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + DOCKFIFO_W_SPACING + ((_n) * 4))) -#define rDOCKFIFO_W_STAT(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + DOCKFIFO_W_SPACING + 0x14)) -#define rDOCKFIFO_CNFG(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2000)) -#define rDOCKFIFO_DRAIN(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2004)) -#define rDOCKFIFO_INTMASK(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2008)) +#define rPMGR_EVENT_TMR (*(volatile unsigned *) (timer_base + 0x00100)) +#define rPMGR_EVENT_TMR_PERIOD (*(volatile unsigned *) (timer_base + 0x00104)) +#define rPMGR_EVENT_TMR_CTL (*(volatile unsigned *) (timer_base + 0x00108)) +#define rPMGR_INTERVAL_TMR (*(volatile unsigned *) (timer_base + 0x00200)) +#define rPMGR_INTERVAL_TMR_CTL (*(volatile unsigned *) (timer_base + 0x00204)) + +#define PMGR_EVENT_TMR_CTL_EN (1 << 0) +#define PMGR_INTERVAL_TMR_CTL_EN (1 << 0) +#define PMGR_INTERVAL_TMR_CTL_CLR_INT (1 << 8) + +#define DOCKFIFO_UART (1) +#define DOCKFIFO_UART_WRITE (0) +#define DOCKFIFO_UART_READ (1) +#define DOCKFIFO_W_SPACING (0x1000) +#define DOCKFIFO_SPACING (0x3000) + +#define rDOCKFIFO_R_DATA(_f, _n) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + ((_n) * 4))) +#define rDOCKFIFO_R_STAT(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x14)) +#define rDOCKFIFO_W_DATA(_f, _n) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + DOCKFIFO_W_SPACING + ((_n) * 4))) +#define rDOCKFIFO_W_STAT(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + DOCKFIFO_W_SPACING + 0x14)) +#define rDOCKFIFO_CNFG(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2000)) +#define rDOCKFIFO_DRAIN(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2004)) +#define rDOCKFIFO_INTMASK(_f) (*(volatile uint32_t *)(uart_base + ((_f) * DOCKFIFO_SPACING) + 0x2008)) #endif -#define PMGR_INTERVAL_TMR_OFFSET (0x200) -#define PMGR_INTERVAL_TMR_CTL_OFFSET (0x204) +#define PMGR_INTERVAL_TMR_OFFSET (0x200) +#define PMGR_INTERVAL_TMR_CTL_OFFSET (0x204) #endif /* ! _PEXPERT_ARM_S7002_H */ diff --git a/pexpert/pexpert/arm/T8002.h b/pexpert/pexpert/arm/T8002.h index 19fb3aeff..9f90baead 100644 --- a/pexpert/pexpert/arm/T8002.h +++ b/pexpert/pexpert/arm/T8002.h @@ -7,39 +7,39 @@ #include -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include -#define rPMGR_EVENT_TMR (*(volatile uint32_t *) (timer_base + 0x00000)) -#define rPMGR_EVENT_TMR_PERIOD (*(volatile uint32_t *) (timer_base + 0x00004)) -#define rPMGR_EVENT_TMR_CTL (*(volatile uint32_t *) (timer_base + 0x00008)) +#define rPMGR_EVENT_TMR (*(volatile uint32_t *) (timer_base + 0x00000)) +#define rPMGR_EVENT_TMR_PERIOD (*(volatile uint32_t *) (timer_base + 0x00004)) +#define rPMGR_EVENT_TMR_CTL (*(volatile uint32_t *) (timer_base + 0x00008)) -#define PMGR_EVENT_TMR_CTL_EN (1 << 0) +#define PMGR_EVENT_TMR_CTL_EN (1 << 0) -#define DOCKCHANNEL_UART (1) -#define DOCKCHANNEL_STRIDE (0x10000) +#define DOCKCHANNEL_UART (1) +#define DOCKCHANNEL_STRIDE (0x10000) // Channel index -#define DOCKCHANNEL_UART_CHANNEL (0) +#define DOCKCHANNEL_UART_CHANNEL (0) // AOP_CLOCK frequency * 30 ms -#define DOCKCHANNEL_DRAIN_PERIOD (96000000 * 0.03) +#define DOCKCHANNEL_DRAIN_PERIOD (96000000 * 0.03) -#define rDOCKCHANNELS_AGENT_AP_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x00)) -#define rDOCKCHANNELS_AGENT_AP_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x04)) -#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x08)) -#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x0c)) +#define rDOCKCHANNELS_AGENT_AP_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x00)) +#define rDOCKCHANNELS_AGENT_AP_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x04)) +#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x08)) +#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x0c)) -#define rDOCKCHANNELS_DEV_DRAIN_CFG(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x0008)) +#define rDOCKCHANNELS_DEV_DRAIN_CFG(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x0008)) -#define rDOCKCHANNELS_DEV_WDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4004)) -#define rDOCKCHANNELS_DEV_WSTAT(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4014)) -#define rDOCKCHANNELS_DEV_RDATA0(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4018)) -#define rDOCKCHANNELS_DEV_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x401c)) +#define rDOCKCHANNELS_DEV_WDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4004)) +#define rDOCKCHANNELS_DEV_WSTAT(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4014)) +#define rDOCKCHANNELS_DEV_RDATA0(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4018)) +#define rDOCKCHANNELS_DEV_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x401c)) -#define rDOCKCHANNELS_DOCK_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc01c)) -#define rDOCKCHANNELS_DOCK_RDATA3(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc024)) +#define rDOCKCHANNELS_DOCK_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc01c)) +#define rDOCKCHANNELS_DOCK_RDATA3(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc024)) #endif #endif /* ! _PEXPERT_ARM_T8002_H */ diff --git a/pexpert/pexpert/arm/board_config.h b/pexpert/pexpert/arm/board_config.h index 13ccdb597..0cad92467 100644 --- a/pexpert/pexpert/arm/board_config.h +++ b/pexpert/pexpert/arm/board_config.h @@ -13,7 +13,7 @@ #define ARM_BOARD_WFE_TIMEOUT_NS 1000 #define __ARM_L2CACHE_SIZE_LOG__ 18 #define ARM_BOARD_CLASS_S7002 -#define PEXPERT_NO_3X_IMAGES 1 +#define PEXPERT_NO_3X_IMAGES 1 #endif /* ARM_BOARD_CONFIG_S7002 */ #ifdef ARM_BOARD_CONFIG_T8002 @@ -22,7 +22,7 @@ #define ARM_BOARD_WFE_TIMEOUT_NS 1000 #define __ARM_L2CACHE_SIZE_LOG__ 19 #define ARM_BOARD_CLASS_T8002 -#define PEXPERT_NO_3X_IMAGES 1 +#define PEXPERT_NO_3X_IMAGES 1 #endif /* ARM_BOARD_CONFIG_T8002 */ #ifdef ARM_BOARD_CONFIG_T8004 @@ -31,7 +31,7 @@ #define ARM_BOARD_WFE_TIMEOUT_NS 1000 #define __ARM_L2CACHE_SIZE_LOG__ 20 #define ARM_BOARD_CLASS_T8002 -#define PEXPERT_NO_3X_IMAGES 1 +#define PEXPERT_NO_3X_IMAGES 1 #endif /* ARM_BOARD_CONFIG_T8004 */ #endif /* ! _PEXPERT_ARM_BOARD_CONFIG_H */ diff --git a/pexpert/pexpert/arm/boot.h b/pexpert/pexpert/arm/boot.h index dc0605fc8..9a9c31361 100644 --- a/pexpert/pexpert/arm/boot.h +++ b/pexpert/pexpert/arm/boot.h @@ -14,53 +14,53 @@ #define BOOT_LINE_LENGTH 256 /* - * Video information.. + * Video information.. */ struct Boot_Video { - unsigned long v_baseAddr; /* Base address of video memory */ - unsigned long v_display; /* Display Code (if Applicable */ - unsigned long v_rowBytes; /* Number of bytes per pixel row */ - unsigned long v_width; /* Width */ - unsigned long v_height; /* Height */ - unsigned long v_depth; /* Pixel Depth and other parameters */ + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_display; /* Display Code (if Applicable */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth and other parameters */ }; -#define kBootVideoDepthMask (0xFF) -#define kBootVideoDepthDepthShift (0) -#define kBootVideoDepthRotateShift (8) -#define kBootVideoDepthScaleShift (16) +#define kBootVideoDepthMask (0xFF) +#define kBootVideoDepthDepthShift (0) +#define kBootVideoDepthRotateShift (8) +#define kBootVideoDepthScaleShift (16) -#define kBootFlagsDarkBoot (1 << 0) +#define kBootFlagsDarkBoot (1 << 0) -typedef struct Boot_Video Boot_Video; +typedef struct Boot_Video Boot_Video; /* Boot argument structure - passed into Mach kernel at boot time. */ -#define kBootArgsRevision 1 -#define kBootArgsRevision2 2 /* added boot_args->bootFlags */ -#define kBootArgsVersion1 1 -#define kBootArgsVersion2 2 +#define kBootArgsRevision 1 +#define kBootArgsRevision2 2 /* added boot_args->bootFlags */ +#define kBootArgsVersion1 1 +#define kBootArgsVersion2 2 typedef struct boot_args { - uint16_t Revision; /* Revision of boot_args structure */ - uint16_t Version; /* Version of boot_args structure */ - uint32_t virtBase; /* Virtual base of memory */ - uint32_t physBase; /* Physical base of memory */ - uint32_t memSize; /* Size of memory */ - uint32_t topOfKernelData; /* Highest physical address used in kernel data area */ - Boot_Video Video; /* Video Information */ - uint32_t machineType; /* Machine Type */ - void *deviceTreeP; /* Base of flattened device tree */ - uint32_t deviceTreeLength; /* Length of flattened tree */ - char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ - uint32_t bootFlags; /* Additional flags specified by the bootloader */ - uint32_t memSizeActual; /* Actual size of memory */ + uint16_t Revision; /* Revision of boot_args structure */ + uint16_t Version; /* Version of boot_args structure */ + uint32_t virtBase; /* Virtual base of memory */ + uint32_t physBase; /* Physical base of memory */ + uint32_t memSize; /* Size of memory */ + uint32_t topOfKernelData; /* Highest physical address used in kernel data area */ + Boot_Video Video; /* Video Information */ + uint32_t machineType; /* Machine Type */ + void *deviceTreeP; /* Base of flattened device tree */ + uint32_t deviceTreeLength; /* Length of flattened tree */ + char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ + uint32_t bootFlags; /* Additional flags specified by the bootloader */ + uint32_t memSizeActual; /* Actual size of memory */ } boot_args; -#define SOC_DEVICE_TYPE_BUFFER_SIZE 32 +#define SOC_DEVICE_TYPE_BUFFER_SIZE 32 -#define PC_TRACE_BUF_SIZE 1024 +#define PC_TRACE_BUF_SIZE 1024 #define CDBG_MEM ((sizeof(dbg_registry_t) + PAGE_SIZE - 1) & ~PAGE_MASK) diff --git a/pexpert/pexpert/arm/consistent_debug.h b/pexpert/pexpert/arm/consistent_debug.h index 3697cd093..ceb317594 100644 --- a/pexpert/pexpert/arm/consistent_debug.h +++ b/pexpert/pexpert/arm/consistent_debug.h @@ -31,18 +31,18 @@ #include -#define DEBUG_RECORD_ID_LONG(a, b,c ,d, e, f, g, h) \ +#define DEBUG_RECORD_ID_LONG(a, b, c, d, e, f, g, h) \ ( ((uint64_t)( (((h) << 24) & 0xFF000000) | \ - (((g) << 16) & 0x00FF0000) | \ - (((f) << 8) & 0x0000FF00) | \ - ((e) & 0x000000FF) ) << 32) | \ + (((g) << 16) & 0x00FF0000) | \ + (((f) << 8) & 0x0000FF00) | \ + ((e) & 0x000000FF) ) << 32) | \ (uint64_t)( (((d) << 24) & 0xFF000000) | \ - (((c) << 16) & 0x00FF0000) | \ - (((b) << 8) & 0x0000FF00) | \ - ((a) & 0x000000FF) ) ) -#define DEBUG_RECORD_ID_SHORT(a,b,c,d) DEBUG_RECORD_ID_LONG(a,b,c,d,0,0,0,0) + (((c) << 16) & 0x00FF0000) | \ + (((b) << 8) & 0x0000FF00) | \ + ((a) & 0x000000FF) ) ) +#define DEBUG_RECORD_ID_SHORT(a, b, c, d) DEBUG_RECORD_ID_LONG(a,b,c,d,0,0,0,0) -/* +/* * Shared Memory Console Descriptors: * Record ID: One per SHMConsole */ @@ -59,22 +59,22 @@ typedef enum { #define DbgIdConsoleHeaderForIOP(which_dbg_processor, which_num) (DEBUG_RECORD_ID_LONG('C','O','N',0,0,0,which_dbg_processor,which_num)) -#define kDbgIdConsoleHeaderAP DbgIdConsoleHeaderForIOP(DBG_PROCESSOR_AP, 0) -#define kDbgIdConsoleHeaderANS DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_ANS, 0) -#define kDbgIdConsoleHeaderSIO DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_SIO, 0) -#define kDbgIdConsoleHeaderSEP DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_SEP, 0) -#define kDbgIdConsoleHeaderISP DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_ISP, 0) -#define kDbgIdConsoleHeaderOscar DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_OSCAR, 0) +#define kDbgIdConsoleHeaderAP DbgIdConsoleHeaderForIOP(DBG_PROCESSOR_AP, 0) +#define kDbgIdConsoleHeaderANS DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_ANS, 0) +#define kDbgIdConsoleHeaderSIO DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_SIO, 0) +#define kDbgIdConsoleHeaderSEP DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_SEP, 0) +#define kDbgIdConsoleHeaderISP DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_ISP, 0) +#define kDbgIdConsoleHeaderOscar DbgIdConsoleHeaderForIOP(DBG_COPROCESSOR_OSCAR, 0) -#define kDbgIdAstrisConnection DEBUG_RECORD_ID_LONG('A','S','T','R','C','N','X','N') -#define kDbgIdAstrisConnectionVers DEBUG_RECORD_ID_LONG('A','S','T','R','C','V','E','R') +#define kDbgIdAstrisConnection DEBUG_RECORD_ID_LONG('A','S','T','R','C','N','X','N') +#define kDbgIdAstrisConnectionVers DEBUG_RECORD_ID_LONG('A','S','T','R','C','V','E','R') -#define kDbgIdUnusedEntry 0x0ULL -#define kDbgIdReservedEntry DEBUG_RECORD_ID_LONG('R','E','S','E','R','V','E', 'D') -#define kDbgIdFreeReqEntry DEBUG_RECORD_ID_LONG('F','R','E','E','-','R','E','Q') -#define kDbgIdFreeAckEntry DEBUG_RECORD_ID_LONG('F','R','E','E','-','A','C','K') +#define kDbgIdUnusedEntry 0x0ULL +#define kDbgIdReservedEntry DEBUG_RECORD_ID_LONG('R','E','S','E','R','V','E', 'D') +#define kDbgIdFreeReqEntry DEBUG_RECORD_ID_LONG('F','R','E','E','-','R','E','Q') +#define kDbgIdFreeAckEntry DEBUG_RECORD_ID_LONG('F','R','E','E','-','A','C','K') -#define DEBUG_REGISTRY_MAX_RECORDS 512 +#define DEBUG_REGISTRY_MAX_RECORDS 512 typedef struct { uint64_t record_id; // = kDbgIdTopLevelHeader @@ -106,14 +106,14 @@ typedef struct { } dbg_cpr_t; typedef struct { - dbg_top_level_header_t top_level_header; - dbg_record_header_t records[DEBUG_REGISTRY_MAX_RECORDS]; + dbg_top_level_header_t top_level_header; + dbg_record_header_t records[DEBUG_REGISTRY_MAX_RECORDS]; // Stuff the AP's Progress Report buffer at the end of this // structure. It's currently the only processor that doesn't // have some easier form of persistent memory that survives the // iBoot->iOS handoff (e.g. ANS has its private heap) - dbg_cpr_t ap_cpr_region; + dbg_cpr_t ap_cpr_region; } dbg_registry_t; /* @@ -132,4 +132,3 @@ int PE_consistent_debug_register(uint64_t record_id, uint64_t physaddr, uint64_t int PE_consistent_debug_enabled(void); #endif // PE_CONSISTENT_DEBUG_H - diff --git a/pexpert/pexpert/arm/protos.h b/pexpert/pexpert/arm/protos.h index 26b7aece6..2944b793e 100644 --- a/pexpert/pexpert/arm/protos.h +++ b/pexpert/pexpert/arm/protos.h @@ -14,7 +14,7 @@ extern uint32_t pe_arm_init_interrupts(void *args); extern void pe_arm_init_debug(void *args); -#ifdef PEXPERT_KERNEL_PRIVATE +#ifdef PEXPERT_KERNEL_PRIVATE extern void cnputc(char); #endif int serial_init(void); diff --git a/pexpert/pexpert/arm64/AIC.h b/pexpert/pexpert/arm64/AIC.h index db5faabe3..da76cfc29 100644 --- a/pexpert/pexpert/arm64/AIC.h +++ b/pexpert/pexpert/arm64/AIC.h @@ -5,16 +5,18 @@ #ifndef _PEXPERT_ARM_AIC_H #define _PEXPERT_ARM_AIC_H -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include -static inline uint32_t _aic_read32(uintptr_t addr) +static inline uint32_t +_aic_read32(uintptr_t addr) { - return (*(volatile uint32_t *)addr); + return *(volatile uint32_t *)addr; } -static inline void _aic_write32(uintptr_t addr, uint32_t data) +static inline void +_aic_write32(uintptr_t addr, uint32_t data) { *(volatile uint32_t *)(addr) = data; } @@ -25,8 +27,7 @@ static inline void _aic_write32(uintptr_t addr, uint32_t data) #endif // AIC timebase registers (timer base address in DT node is setup as AIC_BASE + 0x1000) -#define kAICMainTimLo (0x20) -#define kAICMainTimHi (0x28) +#define kAICMainTimLo (0x20) +#define kAICMainTimHi (0x28) #endif /* ! _PEXPERT_ARM_AIC_H */ - diff --git a/pexpert/pexpert/arm64/BCM2837.h b/pexpert/pexpert/arm64/BCM2837.h index 5de092054..59148fd86 100644 --- a/pexpert/pexpert/arm64/BCM2837.h +++ b/pexpert/pexpert/arm64/BCM2837.h @@ -8,7 +8,7 @@ #ifdef BCM2837 #include "arm64_common.h" #endif - + #define NO_MONITOR 1 #define NO_ECORE 1 @@ -16,36 +16,36 @@ #define PI3_UART -#define PI3_BREAK asm volatile("brk #0"); +#define PI3_BREAK asm volatile("brk #0"); -#define BCM2837_GPIO_BASE 0x3F200000 -#define BCM2837_GPIO_SIZE 0xA0 -#define BCM2837_GPFSEL0 0x3F200000 -#define BCM2837_GPSET0 0x3F20001C -#define BCM2837_GPCLR0 0x3F200028 -#define BCM2837_GPPUD 0x3F200094 -#define BCM2837_GPPUDCLK0 0x3F200098 +#define BCM2837_GPIO_BASE 0x3F200000 +#define BCM2837_GPIO_SIZE 0xA0 +#define BCM2837_GPFSEL0 0x3F200000 +#define BCM2837_GPSET0 0x3F20001C +#define BCM2837_GPCLR0 0x3F200028 +#define BCM2837_GPPUD 0x3F200094 +#define BCM2837_GPPUDCLK0 0x3F200098 -#define BCM2837_AUX_BASE 0x3F215000 -#define BCM2837_AUX_SIZE 0x70 -#define BCM2837_AUX_ENABLES 0x3F215004 -#define BCM2837_AUX_MU_IO_REG 0x3F215040 -#define BCM2837_AUX_MU_IER_REG 0x3F215044 -#define BCM2837_AUX_MU_IIR_REG 0x3F215048 -#define BCM2837_AUX_MU_LCR_REG 0x3F21504C -#define BCM2837_AUX_MU_MCR_REG 0x3F215050 -#define BCM2837_AUX_MU_LSR_REG 0x3F215054 -#define BCM2837_AUX_MU_MSR_REG 0x3F215058 -#define BCM2837_AUX_MU_SCRATCH 0x3F21505C -#define BCM2837_AUX_MU_CNTL_REG 0x3F215060 -#define BCM2837_AUX_MU_STAT_REG 0x3F215064 -#define BCM2837_AUX_MU_BAUD_REG 0x3F215068 +#define BCM2837_AUX_BASE 0x3F215000 +#define BCM2837_AUX_SIZE 0x70 +#define BCM2837_AUX_ENABLES 0x3F215004 +#define BCM2837_AUX_MU_IO_REG 0x3F215040 +#define BCM2837_AUX_MU_IER_REG 0x3F215044 +#define BCM2837_AUX_MU_IIR_REG 0x3F215048 +#define BCM2837_AUX_MU_LCR_REG 0x3F21504C +#define BCM2837_AUX_MU_MCR_REG 0x3F215050 +#define BCM2837_AUX_MU_LSR_REG 0x3F215054 +#define BCM2837_AUX_MU_MSR_REG 0x3F215058 +#define BCM2837_AUX_MU_SCRATCH 0x3F21505C +#define BCM2837_AUX_MU_CNTL_REG 0x3F215060 +#define BCM2837_AUX_MU_STAT_REG 0x3F215064 +#define BCM2837_AUX_MU_BAUD_REG 0x3F215068 -#define BCM2837_GPFSEL0_V (pi3_gpio_base_vaddr + 0x0) -#define BCM2837_GPSET0_V (pi3_gpio_base_vaddr + 0x1C) -#define BCM2837_GPCLR0_V (pi3_gpio_base_vaddr + 0x28) -#define BCM2837_GPPUD_V (pi3_gpio_base_vaddr + 0x94) -#define BCM2837_GPPUDCLK0_V (pi3_gpio_base_vaddr + 0x98) +#define BCM2837_GPFSEL0_V (pi3_gpio_base_vaddr + 0x0) +#define BCM2837_GPSET0_V (pi3_gpio_base_vaddr + 0x1C) +#define BCM2837_GPCLR0_V (pi3_gpio_base_vaddr + 0x28) +#define BCM2837_GPPUD_V (pi3_gpio_base_vaddr + 0x94) +#define BCM2837_GPPUDCLK0_V (pi3_gpio_base_vaddr + 0x98) #define BCM2837_FSEL_INPUT 0x0 #define BCM2837_FSEL_OUTPUT 0x1 @@ -61,23 +61,23 @@ #define BCM2837_FSEL_OFFS(func) (((func) % 10) * 3) #define BCM2837_FSEL_MASK(func) (0x7 << BCM2837_FSEL_OFFS(func)) -#define BCM2837_AUX_ENABLES_V (pi3_aux_base_vaddr + 0x4) -#define BCM2837_AUX_MU_IO_REG_V (pi3_aux_base_vaddr + 0x40) -#define BCM2837_AUX_MU_IER_REG_V (pi3_aux_base_vaddr + 0x44) -#define BCM2837_AUX_MU_IIR_REG_V (pi3_aux_base_vaddr + 0x48) -#define BCM2837_AUX_MU_LCR_REG_V (pi3_aux_base_vaddr + 0x4C) -#define BCM2837_AUX_MU_MCR_REG_V (pi3_aux_base_vaddr + 0x50) -#define BCM2837_AUX_MU_LSR_REG_V (pi3_aux_base_vaddr + 0x54) -#define BCM2837_AUX_MU_MSR_REG_V (pi3_aux_base_vaddr + 0x58) -#define BCM2837_AUX_MU_SCRATCH_V (pi3_aux_base_vaddr + 0x5C) -#define BCM2837_AUX_MU_CNTL_REG_V (pi3_aux_base_vaddr + 0x60) -#define BCM2837_AUX_MU_STAT_REG_V (pi3_aux_base_vaddr + 0x64) -#define BCM2837_AUX_MU_BAUD_REG_V (pi3_aux_base_vaddr + 0x68) +#define BCM2837_AUX_ENABLES_V (pi3_aux_base_vaddr + 0x4) +#define BCM2837_AUX_MU_IO_REG_V (pi3_aux_base_vaddr + 0x40) +#define BCM2837_AUX_MU_IER_REG_V (pi3_aux_base_vaddr + 0x44) +#define BCM2837_AUX_MU_IIR_REG_V (pi3_aux_base_vaddr + 0x48) +#define BCM2837_AUX_MU_LCR_REG_V (pi3_aux_base_vaddr + 0x4C) +#define BCM2837_AUX_MU_MCR_REG_V (pi3_aux_base_vaddr + 0x50) +#define BCM2837_AUX_MU_LSR_REG_V (pi3_aux_base_vaddr + 0x54) +#define BCM2837_AUX_MU_MSR_REG_V (pi3_aux_base_vaddr + 0x58) +#define BCM2837_AUX_MU_SCRATCH_V (pi3_aux_base_vaddr + 0x5C) +#define BCM2837_AUX_MU_CNTL_REG_V (pi3_aux_base_vaddr + 0x60) +#define BCM2837_AUX_MU_STAT_REG_V (pi3_aux_base_vaddr + 0x64) +#define BCM2837_AUX_MU_BAUD_REG_V (pi3_aux_base_vaddr + 0x68) #define BCM2837_PUT32(addr, value) do { *((volatile uint32_t *) addr) = value; } while(0) #define BCM2837_GET32(addr) *((volatile uint32_t *) addr) -#define PLATFORM_PANIC_LOG_PADDR 0x3c0fc000 -#define PLATFORM_PANIC_LOG_SIZE 16384 // 16kb +#define PLATFORM_PANIC_LOG_PADDR 0x3c0fc000 +#define PLATFORM_PANIC_LOG_SIZE 16384 // 16kb #endif /* ! ASSEMBLER */ #endif /* ! _PEXPERT_ARM_BCM2837_H */ diff --git a/pexpert/pexpert/arm64/S3c2410x.h b/pexpert/pexpert/arm64/S3c2410x.h index a56cb781d..f4e482b92 100644 --- a/pexpert/pexpert/arm64/S3c2410x.h +++ b/pexpert/pexpert/arm64/S3c2410x.h @@ -11,7 +11,7 @@ // History // 0.0 : Programming start (February 15,2002) -> SOP // INTERRUPT rPRIORITY 0x4a00000a -> 0x4a00000c (May 02, 2002 SOP) -// RTC BCD DAY and DATE Register Name Correction (May 06, 2002 SOP) +// RTC BCD DAY and DATE Register Name Correction (May 06, 2002 SOP) //============================================================================= #ifndef __2410ADDR_H__ @@ -26,9 +26,9 @@ extern "C" { #include #if 0 -#define _ISR_STARTADDRESS 0x30000000 +#define _ISR_STARTADDRESS 0x30000000 -// Memory control +// Memory control #define rBWSCON (*(volatile unsigned *)0x48000000) //Bus width & wait status #define rBANKCON0 (*(volatile unsigned *)0x48000004) //Boot ROM control #define rBANKCON1 (*(volatile unsigned *)0x48000008) //BANK1 control @@ -119,7 +119,7 @@ extern "C" { #define rLCDSADDR2 (*(volatile unsigned *)0x4d000018) //STN/TFT Frame buffer start address 2 #define rLCDSADDR3 (*(volatile unsigned *)0x4d00001c) //STN/TFT Virtual screen address set #define rREDLUT (*(volatile unsigned *)0x4d000020) //STN Red lookup table -#define rGREENLUT (*(volatile unsigned *)0x4d000024) //STN Green lookup table +#define rGREENLUT (*(volatile unsigned *)0x4d000024) //STN Green lookup table #define rBLUELUT (*(volatile unsigned *)0x4d000028) //STN Blue lookup table #define rDITHMODE (*(volatile unsigned *)0x4d00004c) //STN Dithering mode #define rTPAL (*(volatile unsigned *)0x4d000050) //TFT Temporary palette @@ -170,7 +170,7 @@ extern "C" { #if 0 // USB DEVICE #ifdef __BIG_ENDIAN - +< ERROR IF BIG_ENDIAN > #define rFUNC_ADDR_REG (*(volatile unsigned char *)0x52000143) //Function address #define rPWR_REG (*(volatile unsigned char *)0x52000147) //Power management #define rEP_INT_REG (*(volatile unsigned char *)0x5200014b) //EP Interrupt pending and clear @@ -296,38 +296,38 @@ extern "C" { #endif -// I/O PORT +// I/O PORT #define rGPACON (*(volatile unsigned *)0x56000000) //Port A control #define rGPADAT (*(volatile unsigned *)0x56000004) //Port A data - + #define rGPBCON (*(volatile unsigned *)0x56000010) //Port B control #define rGPBDAT (*(volatile unsigned *)0x56000014) //Port B data #define rGPBUP (*(volatile unsigned *)0x56000018) //Pull-up control B - + #define rGPCCON (*(volatile unsigned *)0x56000020) //Port C control #define rGPCDAT (*(volatile unsigned *)0x56000024) //Port C data #define rGPCUP (*(volatile unsigned *)0x56000028) //Pull-up control C - + #define rGPDCON (*(volatile unsigned *)0x56000030) //Port D control #define rGPDDAT (*(volatile unsigned *)0x56000034) //Port D data #define rGPDUP (*(volatile unsigned *)0x56000038) //Pull-up control D - + #define rGPECON (*(volatile unsigned *)0x56000040) //Port E control #define rGPEDAT (*(volatile unsigned *)0x56000044) //Port E data #define rGPEUP (*(volatile unsigned *)0x56000048) //Pull-up control E - + #define rGPFCON (*(volatile unsigned *)0x56000050) //Port F control #define rGPFDAT (*(volatile unsigned *)0x56000054) //Port F data #define rGPFUP (*(volatile unsigned *)0x56000058) //Pull-up control F - + #define rGPGCON (*(volatile unsigned *)0x56000060) //Port G control #define rGPGDAT (*(volatile unsigned *)0x56000064) //Port G data #define rGPGUP (*(volatile unsigned *)0x56000068) //Pull-up control G - + #define rGPHCON (*(volatile unsigned *)0x56000070) //Port H control #define rGPHDAT (*(volatile unsigned *)0x56000074) //Port H data #define rGPHUP (*(volatile unsigned *)0x56000078) //Pull-up control H - + #define rMISCCR (*(volatile unsigned *)0x56000080) //Miscellaneous control #define rDCLKCON (*(volatile unsigned *)0x56000084) //DCLK0/1 control #define rEXTINT0 (*(volatile unsigned *)0x56000088) //External interrupt control register 0 @@ -342,8 +342,8 @@ extern "C" { #define rGSTATUS0 (*(volatile unsigned *)0x560000ac) //External pin status #define rGSTATUS1 (*(volatile unsigned *)0x560000b0) //Chip ID(0x32410000) #define rGSTATUS2 (*(volatile unsigned *)0x560000b4) //Reset type -#define rGSTATUS3 (*(volatile unsigned *)0x560000b8) //Saved data0(32-bit) before entering POWER_OFF mode -#define rGSTATUS4 (*(volatile unsigned *)0x560000bc) //Saved data0(32-bit) before entering POWER_OFF mode +#define rGSTATUS3 (*(volatile unsigned *)0x560000b8) //Saved data0(32-bit) before entering POWER_OFF mode +#define rGSTATUS4 (*(volatile unsigned *)0x560000bc) //Saved data0(32-bit) before entering POWER_OFF mode // RTC @@ -392,9 +392,9 @@ extern "C" { #define rADCTSC (*(volatile unsigned *)0x58000004) //ADC touch screen control #define rADCDLY (*(volatile unsigned *)0x58000008) //ADC start or Interval Delay #define rADCDAT0 (*(volatile unsigned *)0x5800000c) //ADC conversion data 0 -#define rADCDAT1 (*(volatile unsigned *)0x58000010) //ADC conversion data 1 - -// SPI +#define rADCDAT1 (*(volatile unsigned *)0x58000010) //ADC conversion data 1 + +// SPI #define rSPCON0 (*(volatile unsigned *)0x59000000) //SPI0 control #define rSPSTA0 (*(volatile unsigned *)0x59000004) //SPI0 status #define rSPPIN0 (*(volatile unsigned *)0x59000008) //SPI0 pin control @@ -435,7 +435,7 @@ extern "C" { #define rSDIDAT (*(volatile unsigned *)0x5a00003c) //SDI data #define SDIDAT 0x5a00003c #endif //SD Interface - + // ISR #define pISR_RESET (*(unsigned *)(_ISR_STARTADDRESS+0x0)) @@ -530,10 +530,10 @@ extern "C" { #define BIT_SUB_RXD0 (0x1<<0) #define ClearPending(bit) {\ - rSRCPND = bit;\ - rINTPND = bit;\ - rINTPND;\ - } + rSRCPND = bit;\ + rINTPND = bit;\ + rINTPND;\ + } //Wait until rINTPND is changed for the case that the ISR is very short. #endif diff --git a/pexpert/pexpert/arm64/S5L8960X.h b/pexpert/pexpert/arm64/S5L8960X.h index 782481ff2..82e140383 100644 --- a/pexpert/pexpert/arm64/S5L8960X.h +++ b/pexpert/pexpert/arm64/S5L8960X.h @@ -8,9 +8,9 @@ #include #include -#define WITH_CLASSIC_S2R 1 +#define WITH_CLASSIC_S2R 1 -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include diff --git a/pexpert/pexpert/arm64/S8000.h b/pexpert/pexpert/arm64/S8000.h index 1879560eb..284d239cd 100644 --- a/pexpert/pexpert/arm64/S8000.h +++ b/pexpert/pexpert/arm64/S8000.h @@ -8,7 +8,7 @@ #include #include -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include diff --git a/pexpert/pexpert/arm64/T7000.h b/pexpert/pexpert/arm64/T7000.h index 9c755083d..d6fffc0d9 100644 --- a/pexpert/pexpert/arm64/T7000.h +++ b/pexpert/pexpert/arm64/T7000.h @@ -8,9 +8,9 @@ #include #include -#define WITH_CLASSIC_S2R 1 +#define WITH_CLASSIC_S2R 1 -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include diff --git a/pexpert/pexpert/arm64/T8010.h b/pexpert/pexpert/arm64/T8010.h index 0210e7bd6..ed1ecbb11 100644 --- a/pexpert/pexpert/arm64/T8010.h +++ b/pexpert/pexpert/arm64/T8010.h @@ -8,34 +8,34 @@ #include #include -#ifndef ASSEMBLER +#ifndef ASSEMBLER #include #include -#define DOCKCHANNEL_UART (1) -#define DOCKCHANNEL_STRIDE (0x10000) +#define DOCKCHANNEL_UART (1) +#define DOCKCHANNEL_STRIDE (0x10000) // Channel index -#define DOCKCHANNEL_UART_CHANNEL (0) +#define DOCKCHANNEL_UART_CHANNEL (0) // AOP_CLOCK frequency * 30 ms -#define DOCKCHANNEL_DRAIN_PERIOD (192000000 * 0.03) +#define DOCKCHANNEL_DRAIN_PERIOD (192000000 * 0.03) -#define rDOCKCHANNELS_AGENT_AP_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x00)) -#define rDOCKCHANNELS_AGENT_AP_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x04)) -#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x08)) -#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x0c)) +#define rDOCKCHANNELS_AGENT_AP_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x00)) +#define rDOCKCHANNELS_AGENT_AP_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x04)) +#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_CTRL (*(volatile uint32_t *) (dock_agent_base + 0x08)) +#define rDOCKCHANNELS_AGENT_AP_ERR_INTR_STATUS (*(volatile uint32_t *) (dock_agent_base + 0x0c)) -#define rDOCKCHANNELS_DEV_DRAIN_CFG(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x0008)) +#define rDOCKCHANNELS_DEV_DRAIN_CFG(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x0008)) -#define rDOCKCHANNELS_DEV_WDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4004)) -#define rDOCKCHANNELS_DEV_WSTAT(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4014)) -#define rDOCKCHANNELS_DEV_RDATA0(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4018)) -#define rDOCKCHANNELS_DEV_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x401c)) +#define rDOCKCHANNELS_DEV_WDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4004)) +#define rDOCKCHANNELS_DEV_WSTAT(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4014)) +#define rDOCKCHANNELS_DEV_RDATA0(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x4018)) +#define rDOCKCHANNELS_DEV_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0x401c)) -#define rDOCKCHANNELS_DOCK_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc01c)) -#define rDOCKCHANNELS_DOCK_RDATA3(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc024)) +#define rDOCKCHANNELS_DOCK_RDATA1(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc01c)) +#define rDOCKCHANNELS_DOCK_RDATA3(_ch) (*(volatile uint32_t *) (uart_base + ((_ch) * DOCKCHANNEL_STRIDE) + 0xc024)) #endif diff --git a/pexpert/pexpert/arm64/board_config.h b/pexpert/pexpert/arm64/board_config.h index c7c434d2a..c4a0edd1d 100644 --- a/pexpert/pexpert/arm64/board_config.h +++ b/pexpert/pexpert/arm64/board_config.h @@ -16,8 +16,9 @@ #define ARM_BOARD_WFE_TIMEOUT_NS 1000 #define ARM_BOARD_CLASS_S5L8960X #define KERNEL_INTEGRITY_WT 1 -#define PEXPERT_NO_3X_IMAGES 1 +#define PEXPERT_NO_3X_IMAGES 1 #define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 #endif /* ARM64_BOARD_CONFIG_S5L8960X */ #ifdef ARM64_BOARD_CONFIG_T7000 @@ -30,6 +31,7 @@ #define ARM_BOARD_CLASS_T7000 #define KERNEL_INTEGRITY_WT 1 #define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 #endif /* ARM64_BOARD_CONFIG_T7000 */ #ifdef ARM64_BOARD_CONFIG_T7001 @@ -43,6 +45,7 @@ #define KERNEL_INTEGRITY_WT 1 #define CPU_COUNT 3 #define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 #endif /* ARM64_BOARD_CONFIG_T7001 */ #ifdef ARM64_BOARD_CONFIG_S8000 @@ -61,6 +64,7 @@ #define ARM_BOARD_CLASS_S8000 #define KERNEL_INTEGRITY_WT 1 #define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 #endif /* ARM64_BOARD_CONFIG_S8000 */ #ifdef ARM64_BOARD_CONFIG_S8001 @@ -79,6 +83,7 @@ #define ARM_BOARD_CLASS_S8000 #define KERNEL_INTEGRITY_WT 1 #define CORE_NCTRS 8 +#define CPMU_AIC_PMI 1 #endif /* ARM64_BOARD_CONFIG_S8001 */ #ifdef ARM64_BOARD_CONFIG_T8010 @@ -97,6 +102,7 @@ #define ARM_BOARD_WFE_TIMEOUT_NS 1000 #define ARM_BOARD_CLASS_T8010 #define CORE_NCTRS 10 +#define CPMU_AIC_PMI 1 #if DEVELOPMENT || DEBUG #define PMAP_CS 1 #define PMAP_CS_ENABLE 0 @@ -114,6 +120,7 @@ #define ARM_BOARD_CLASS_T8011 #define CPU_COUNT 3 #define CORE_NCTRS 10 +#define CPMU_AIC_PMI 1 #if DEVELOPMENT || DEBUG #define PMAP_CS 1 #define PMAP_CS_ENABLE 0 diff --git a/pexpert/pexpert/arm64/boot.h b/pexpert/pexpert/arm64/boot.h index 653b8252d..1bb953297 100644 --- a/pexpert/pexpert/arm64/boot.h +++ b/pexpert/pexpert/arm64/boot.h @@ -16,53 +16,53 @@ #define BOOT_LINE_LENGTH 256 /* - * Video information.. + * Video information.. */ struct Boot_Video { - unsigned long v_baseAddr; /* Base address of video memory */ - unsigned long v_display; /* Display Code (if Applicable */ - unsigned long v_rowBytes; /* Number of bytes per pixel row */ - unsigned long v_width; /* Width */ - unsigned long v_height; /* Height */ - unsigned long v_depth; /* Pixel Depth and other parameters */ + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_display; /* Display Code (if Applicable */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth and other parameters */ }; -#define kBootVideoDepthMask (0xFF) -#define kBootVideoDepthDepthShift (0) -#define kBootVideoDepthRotateShift (8) -#define kBootVideoDepthScaleShift (16) +#define kBootVideoDepthMask (0xFF) +#define kBootVideoDepthDepthShift (0) +#define kBootVideoDepthRotateShift (8) +#define kBootVideoDepthScaleShift (16) -#define kBootFlagsDarkBoot (1ULL << 0) +#define kBootFlagsDarkBoot (1ULL << 0) -typedef struct Boot_Video Boot_Video; +typedef struct Boot_Video Boot_Video; /* Boot argument structure - passed into Mach kernel at boot time. */ -#define kBootArgsRevision 1 -#define kBootArgsRevision2 2 /* added boot_args.bootFlags */ -#define kBootArgsVersion1 1 -#define kBootArgsVersion2 2 +#define kBootArgsRevision 1 +#define kBootArgsRevision2 2 /* added boot_args.bootFlags */ +#define kBootArgsVersion1 1 +#define kBootArgsVersion2 2 typedef struct boot_args { - uint16_t Revision; /* Revision of boot_args structure */ - uint16_t Version; /* Version of boot_args structure */ - uint64_t virtBase; /* Virtual base of memory */ - uint64_t physBase; /* Physical base of memory */ - uint64_t memSize; /* Size of memory */ - uint64_t topOfKernelData; /* Highest physical address used in kernel data area */ - Boot_Video Video; /* Video Information */ - uint32_t machineType; /* Machine Type */ - void *deviceTreeP; /* Base of flattened device tree */ - uint32_t deviceTreeLength; /* Length of flattened tree */ - char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ - uint64_t bootFlags; /* Additional flags specified by the bootloader */ - uint64_t memSizeActual; /* Actual size of memory */ + uint16_t Revision; /* Revision of boot_args structure */ + uint16_t Version; /* Version of boot_args structure */ + uint64_t virtBase; /* Virtual base of memory */ + uint64_t physBase; /* Physical base of memory */ + uint64_t memSize; /* Size of memory */ + uint64_t topOfKernelData; /* Highest physical address used in kernel data area */ + Boot_Video Video; /* Video Information */ + uint32_t machineType; /* Machine Type */ + void *deviceTreeP; /* Base of flattened device tree */ + uint32_t deviceTreeLength; /* Length of flattened tree */ + char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ + uint64_t bootFlags; /* Additional flags specified by the bootloader */ + uint64_t memSizeActual; /* Actual size of memory */ } boot_args; -#define SOC_DEVICE_TYPE_BUFFER_SIZE 32 +#define SOC_DEVICE_TYPE_BUFFER_SIZE 32 -#define PC_TRACE_BUF_SIZE 1024 +#define PC_TRACE_BUF_SIZE 1024 #if SHMCON #define SHMCON_MEM 0x4000 @@ -75,4 +75,3 @@ typedef struct boot_args { #define PE_EARLY_BOOT_VA (SHMCON_MEM + CDBG_MEM) #endif /* _PEXPERT_ARM64_BOOT_H_ */ - diff --git a/pexpert/pexpert/arm64/cyclone.h b/pexpert/pexpert/arm64/cyclone.h index 4d39b88cf..6d5d900fa 100644 --- a/pexpert/pexpert/arm64/cyclone.h +++ b/pexpert/pexpert/arm64/cyclone.h @@ -8,17 +8,17 @@ #ifdef APPLECYCLONE #include "arm64_common.h" -#define MONITOR 1 /* Use EL3 monitor */ -#define NO_ECORE 1 -#define HAS_32BIT_DBGWRAP 1 +#define MONITOR 1 /* Use EL3 monitor */ +#define NO_ECORE 1 +#define HAS_32BIT_DBGWRAP 1 /* * Determined by experiment (not described in manual): - * A0 is variant 0, B0 is variant 1. See arm64/proc_reg.h + * A0 is variant 0, B0 is variant 1. See arm64/proc_reg.h * for how these values are constructed from the MIDR. */ -#define CYCLONE_CPU_VERSION_A0 0x00 -#define CYCLONE_CPU_VERSION_B0 0x10 +#define CYCLONE_CPU_VERSION_A0 0x00 +#define CYCLONE_CPU_VERSION_B0 0x10 #endif diff --git a/pexpert/pexpert/arm64/hurricane.h b/pexpert/pexpert/arm64/hurricane.h index 84a8d1bf9..dea4c8f3e 100644 --- a/pexpert/pexpert/arm64/hurricane.h +++ b/pexpert/pexpert/arm64/hurricane.h @@ -5,8 +5,8 @@ #ifndef _PEXPERT_ARM_HURRICANE_H #define _PEXPERT_ARM_HURRICANE_H -#define NO_MONITOR 1 /* No EL3 for this CPU -- ever */ -#define HAS_MIGSTS 1 /* Has MIGSTS register, and supports migration between p-core and e-core */ +#define NO_MONITOR 1 /* No EL3 for this CPU -- ever */ +#define HAS_MIGSTS 1 /* Has MIGSTS register, and supports migration between p-core and e-core */ #define HAS_KTRR 1 /* Has KTRR registers */ #ifdef APPLEHURRICANE @@ -14,11 +14,11 @@ #endif /* - * A0 is variant 0, B0 is variant 1. See arm64/proc_reg.h + * A0 is variant 0, B0 is variant 1. See arm64/proc_reg.h * for how these values are constructed from the MIDR. */ -#define HURRICANE_CPU_VERSION_A0 0x00 -#define HURRICANE_CPU_VERSION_B0 0x10 +#define HURRICANE_CPU_VERSION_A0 0x00 +#define HURRICANE_CPU_VERSION_B0 0x10 // Hurricane and Zephyr require workaround for radar 20619637 #define SINGLE_STEP_RETIRE_ERRATA 1 diff --git a/pexpert/pexpert/arm64/twister.h b/pexpert/pexpert/arm64/twister.h index 759d1ba14..0a17b3f22 100644 --- a/pexpert/pexpert/arm64/twister.h +++ b/pexpert/pexpert/arm64/twister.h @@ -5,9 +5,9 @@ #ifndef _PEXPERT_ARM_TWISTER_H #define _PEXPERT_ARM_TWISTER_H -#define MONITOR 1 /* Use EL3 monitor */ -#define NO_ECORE 1 -#define HAS_32BIT_DBGWRAP 1 +#define MONITOR 1 /* Use EL3 monitor */ +#define NO_ECORE 1 +#define HAS_32BIT_DBGWRAP 1 #ifdef APPLETWISTER #include "arm64_common.h" diff --git a/pexpert/pexpert/arm64/typhoon.h b/pexpert/pexpert/arm64/typhoon.h index 366fe7232..e91c1faa5 100644 --- a/pexpert/pexpert/arm64/typhoon.h +++ b/pexpert/pexpert/arm64/typhoon.h @@ -5,9 +5,9 @@ #ifndef _PEXPERT_ARM_TYPHOON_H #define _PEXPERT_ARM_TYPHOON_H -#define MONITOR 1 /* Use EL3 monitor */ -#define NO_ECORE 1 -#define HAS_32BIT_DBGWRAP 1 +#define MONITOR 1 /* Use EL3 monitor */ +#define NO_ECORE 1 +#define HAS_32BIT_DBGWRAP 1 #ifdef APPLETYPHOON #include "arm64_common.h" diff --git a/pexpert/pexpert/boot.h b/pexpert/pexpert/boot.h index f26ec4aad..2966d1f88 100644 --- a/pexpert/pexpert/boot.h +++ b/pexpert/pexpert/boot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_BOOT_H_ diff --git a/pexpert/pexpert/device_tree.h b/pexpert/pexpert/device_tree.h index 8b22b9645..427f3a12c 100644 --- a/pexpert/pexpert/device_tree.h +++ b/pexpert/pexpert/device_tree.h @@ -1,9 +1,8 @@ - /* * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -12,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -23,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_DEVICE_TREE_H_ @@ -38,18 +37,18 @@ extern "C" { #endif /* -------------------------------------------------------------------------------- - Foundation Types -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * Foundation Types + * ------------------------------------------------------------------------------- + */ enum { - kDTPathNameSeparator = '/' /* 0x2F */ + kDTPathNameSeparator = '/' /* 0x2F */ }; /* Property Name Definitions (Property Names are C-Strings)*/ enum { - kDTMaxPropertyNameLength=31 /* Max length of Property Name (terminator not included) */ + kDTMaxPropertyNameLength=31 /* Max length of Property Name (terminator not included) */ }; typedef char DTPropertyNameBuf[32]; @@ -57,28 +56,28 @@ typedef char DTPropertyNameBuf[32]; /* Entry Name Definitions (Entry Names are C-Strings)*/ enum { - kDTMaxEntryNameLength = 63 /* Max length of a C-String Entry Name (terminator not included) */ + kDTMaxEntryNameLength = 63 /* Max length of a C-String Entry Name (terminator not included) */ }; /* length of DTEntryNameBuf = kDTMaxEntryNameLength +1*/ -typedef char DTEntryNameBuf[kDTMaxEntryNameLength+1]; +typedef char DTEntryNameBuf[kDTMaxEntryNameLength + 1]; /* -Structures for a Flattened Device Tree + * Structures for a Flattened Device Tree */ -#define kPropNameLength 32 +#define kPropNameLength 32 typedef struct DeviceTreeNodeProperty { - char name[kPropNameLength]; // NUL terminated property name - uint32_t length; // Length (bytes) of folloing prop value + char name[kPropNameLength];// NUL terminated property name + uint32_t length; // Length (bytes) of folloing prop value // unsigned long value[1]; // Variable length value of property - // Padded to a multiple of a longword? + // Padded to a multiple of a longword? } DeviceTreeNodeProperty; typedef struct OpaqueDTEntry { - uint32_t nProperties; // Number of props[] elements (0 => end) - uint32_t nChildren; // Number of children[] elements + uint32_t nProperties;// Number of props[] elements (0 => end) + uint32_t nChildren; // Number of children[] elements // DeviceTreeNodeProperty props[];// array size == nProperties // DeviceTreeNode children[]; // array size == nChildren } DeviceTreeNode; @@ -89,7 +88,7 @@ typedef struct DTSavedScope { struct DTSavedScope * nextScope; RealDTEntry scope; RealDTEntry entry; - unsigned long index; + unsigned long index; } *DTSavedScopePtr; /* Entry Iterator*/ @@ -98,7 +97,7 @@ typedef struct OpaqueDTEntryIterator { RealDTEntry currentScope; RealDTEntry currentEntry; DTSavedScopePtr savedScope; - unsigned long currentIndex; + unsigned long currentIndex; } OpaqueDTEntryIterator, *DTEntryIterator; /* Property Iterator*/ @@ -120,148 +119,148 @@ typedef struct OpaqueDTPropertyIterator* DTPropertyIterator; /* status values*/ enum { - kError = -1, - kIterationDone = 0, - kSuccess = 1 + kError = -1, + kIterationDone = 0, + kSuccess = 1 }; -#ifndef __MWERKS__ +#ifndef __MWERKS__ /* -------------------------------------------------------------------------------- - Device Tree Calls -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * Device Tree Calls + * ------------------------------------------------------------------------------- + */ /* Used to initalize the device tree functions. */ /* base is the base address of the flatened device tree */ void DTInit(void *base); /* -------------------------------------------------------------------------------- - Entry Handling -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * Entry Handling + * ------------------------------------------------------------------------------- + */ /* Compare two Entry's for equality. */ extern int DTEntryIsEqual(const DTEntry ref1, const DTEntry ref2); /* -------------------------------------------------------------------------------- - LookUp Entry by Name -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * LookUp Entry by Name + * ------------------------------------------------------------------------------- + */ /* - DTFindEntry: - Find the device tree entry that contains propName=propValue. - It currently searches the entire - tree. This function should eventually go in DeviceTree.c. - Returns: kSuccess = entry was found. Entry is in entryH. - kError = entry was not found -*/ + * DTFindEntry: + * Find the device tree entry that contains propName=propValue. + * It currently searches the entire + * tree. This function should eventually go in DeviceTree.c. + * Returns: kSuccess = entry was found. Entry is in entryH. + * kError = entry was not found + */ extern int DTFindEntry(const char *propName, const char *propValue, DTEntry *entryH); /* - Lookup Entry - Locates an entry given a specified subroot (searchPoint) and path name. If the - searchPoint pointer is NULL, the path name is assumed to be an absolute path - name rooted to the root of the device tree. -*/ + * Lookup Entry + * Locates an entry given a specified subroot (searchPoint) and path name. If the + * searchPoint pointer is NULL, the path name is assumed to be an absolute path + * name rooted to the root of the device tree. + */ extern int DTLookupEntry(const DTEntry searchPoint, const char *pathName, DTEntry *foundEntry); /* -------------------------------------------------------------------------------- - Entry Iteration -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * Entry Iteration + * ------------------------------------------------------------------------------- + */ /* - An Entry Iterator maintains three variables that are of interest to clients. - First is an "OutermostScope" which defines the outer boundry of the iteration. - This is defined by the starting entry and includes that entry plus all of it's - embedded entries. Second is a "currentScope" which is the entry the iterator is - currently in. And third is a "currentPosition" which is the last entry returned - during an iteration. - - Initialize Entry Iterator - Fill out the iterator structure. The outermostScope and currentScope of the iterator - are set to "startEntry". If "startEntry" = NULL, the outermostScope and - currentScope are set to the root entry. The currentPosition for the iterator is - set to "nil". -*/ + * An Entry Iterator maintains three variables that are of interest to clients. + * First is an "OutermostScope" which defines the outer boundry of the iteration. + * This is defined by the starting entry and includes that entry plus all of it's + * embedded entries. Second is a "currentScope" which is the entry the iterator is + * currently in. And third is a "currentPosition" which is the last entry returned + * during an iteration. + * + * Initialize Entry Iterator + * Fill out the iterator structure. The outermostScope and currentScope of the iterator + * are set to "startEntry". If "startEntry" = NULL, the outermostScope and + * currentScope are set to the root entry. The currentPosition for the iterator is + * set to "nil". + */ extern int DTInitEntryIterator(const DTEntry startEntry, DTEntryIterator iter); /* - Enter Child Entry - Move an Entry Iterator into the scope of a specified child entry. The - currentScope of the iterator is set to the entry specified in "childEntry". If - "childEntry" is nil, the currentScope is set to the entry specified by the - currentPosition of the iterator. -*/ + * Enter Child Entry + * Move an Entry Iterator into the scope of a specified child entry. The + * currentScope of the iterator is set to the entry specified in "childEntry". If + * "childEntry" is nil, the currentScope is set to the entry specified by the + * currentPosition of the iterator. + */ extern int DTEnterEntry(DTEntryIterator iterator, DTEntry childEntry); /* - Exit to Parent Entry - Move an Entry Iterator out of the current entry back into the scope of it's parent - entry. The currentPosition of the iterator is reset to the current entry (the - previous currentScope), so the next iteration call will continue where it left off. - This position is returned in parameter "currentPosition". -*/ + * Exit to Parent Entry + * Move an Entry Iterator out of the current entry back into the scope of it's parent + * entry. The currentPosition of the iterator is reset to the current entry (the + * previous currentScope), so the next iteration call will continue where it left off. + * This position is returned in parameter "currentPosition". + */ extern int DTExitEntry(DTEntryIterator iterator, DTEntry *currentPosition); /* - Iterate Entries - Iterate and return entries contained within the entry defined by the current - scope of the iterator. Entries are returned one at a time. When - int == kIterationDone, all entries have been exhausted, and the - value of nextEntry will be Nil. -*/ + * Iterate Entries + * Iterate and return entries contained within the entry defined by the current + * scope of the iterator. Entries are returned one at a time. When + * int == kIterationDone, all entries have been exhausted, and the + * value of nextEntry will be Nil. + */ extern int DTIterateEntries(DTEntryIterator iterator, DTEntry *nextEntry); /* - Restart Entry Iteration - Restart an iteration within the current scope. The iterator is reset such that - iteration of the contents of the currentScope entry can be restarted. The - outermostScope and currentScope of the iterator are unchanged. The currentPosition - for the iterator is set to "nil". -*/ + * Restart Entry Iteration + * Restart an iteration within the current scope. The iterator is reset such that + * iteration of the contents of the currentScope entry can be restarted. The + * outermostScope and currentScope of the iterator are unchanged. The currentPosition + * for the iterator is set to "nil". + */ extern int DTRestartEntryIteration(DTEntryIterator iterator); /* -------------------------------------------------------------------------------- - Get Property Values -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * Get Property Values + * ------------------------------------------------------------------------------- + */ /* - Get the value of the specified property for the specified entry. - - Get Property -*/ + * Get the value of the specified property for the specified entry. + * + * Get Property + */ extern int DTGetProperty(const DTEntry entry, const char *propertyName, void **propertyValue, unsigned int *propertySize); /* -------------------------------------------------------------------------------- - Iterating Properties -------------------------------------------------------------------------------- -*/ + * ------------------------------------------------------------------------------- + * Iterating Properties + * ------------------------------------------------------------------------------- + */ /* - Initialize Property Iterator - Fill out the property iterator structure. The target entry is defined by entry. -*/ + * Initialize Property Iterator + * Fill out the property iterator structure. The target entry is defined by entry. + */ extern int DTInitPropertyIterator(const DTEntry entry, DTPropertyIterator iter); /* - Iterate Properites - Iterate and return properties for given entry. - When int == kIterationDone, all properties have been exhausted. -*/ + * Iterate Properites + * Iterate and return properties for given entry. + * When int == kIterationDone, all properties have been exhausted. + */ extern int DTIterateProperties(DTPropertyIterator iterator, - char **foundProperty); + char **foundProperty); /* - Restart Property Iteration - Used to re-iterate over a list of properties. The Property Iterator is - reset to the beginning of the list of properties for an entry. -*/ + * Restart Property Iteration + * Used to re-iterate over a list of properties. The Property Iterator is + * reset to the beginning of the list of properties for an entry. + */ extern int DTRestartPropertyIteration(DTPropertyIterator iterator); diff --git a/pexpert/pexpert/i386/boot.h b/pexpert/pexpert/i386/boot.h index 55a2cab46..0476e8d33 100644 --- a/pexpert/pexpert/i386/boot.h +++ b/pexpert/pexpert/i386/boot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_I386_BOOT_H @@ -38,68 +38,70 @@ * Types of boot driver that may be loaded by the booter. */ enum { - kBootDriverTypeInvalid = 0, - kBootDriverTypeKEXT = 1, - kBootDriverTypeMKEXT = 2 + kBootDriverTypeInvalid = 0, + kBootDriverTypeKEXT = 1, + kBootDriverTypeMKEXT = 2 }; enum { - kEfiReservedMemoryType = 0, - kEfiLoaderCode = 1, - kEfiLoaderData = 2, - kEfiBootServicesCode = 3, - kEfiBootServicesData = 4, - kEfiRuntimeServicesCode = 5, - kEfiRuntimeServicesData = 6, - kEfiConventionalMemory = 7, - kEfiUnusableMemory = 8, - kEfiACPIReclaimMemory = 9, - kEfiACPIMemoryNVS = 10, - kEfiMemoryMappedIO = 11, - kEfiMemoryMappedIOPortSpace = 12, - kEfiPalCode = 13, - kEfiMaxMemoryType = 14 + kEfiReservedMemoryType = 0, + kEfiLoaderCode = 1, + kEfiLoaderData = 2, + kEfiBootServicesCode = 3, + kEfiBootServicesData = 4, + kEfiRuntimeServicesCode = 5, + kEfiRuntimeServicesData = 6, + kEfiConventionalMemory = 7, + kEfiUnusableMemory = 8, + kEfiACPIReclaimMemory = 9, + kEfiACPIMemoryNVS = 10, + kEfiMemoryMappedIO = 11, + kEfiMemoryMappedIOPortSpace = 12, + kEfiPalCode = 13, + kEfiMaxMemoryType = 14 }; /* * Memory range descriptor. */ typedef struct EfiMemoryRange { - uint32_t Type; - uint32_t Pad; - uint64_t PhysicalStart; - uint64_t VirtualStart; - uint64_t NumberOfPages; - uint64_t Attribute; + uint32_t Type; + uint32_t Pad; + uint64_t PhysicalStart; + uint64_t VirtualStart; + uint64_t NumberOfPages; + uint64_t Attribute; } EfiMemoryRange; #define BOOT_LINE_LENGTH 1024 #define BOOT_STRING_LEN BOOT_LINE_LENGTH /* - * Video information.. + * Video information.. */ struct Boot_VideoV1 { - uint32_t v_baseAddr; /* Base address of video memory */ - uint32_t v_display; /* Display Code (if Applicable */ - uint32_t v_rowBytes; /* Number of bytes per pixel row */ - uint32_t v_width; /* Width */ - uint32_t v_height; /* Height */ - uint32_t v_depth; /* Pixel Depth */ + uint32_t v_baseAddr; /* Base address of video memory */ + uint32_t v_display; /* Display Code (if Applicable */ + uint32_t v_rowBytes; /* Number of bytes per pixel row */ + uint32_t v_width; /* Width */ + uint32_t v_height; /* Height */ + uint32_t v_depth; /* Pixel Depth */ }; -typedef struct Boot_VideoV1 Boot_VideoV1; +typedef struct Boot_VideoV1 Boot_VideoV1; struct Boot_Video { - uint32_t v_display; /* Display Code (if Applicable */ - uint32_t v_rowBytes; /* Number of bytes per pixel row */ - uint32_t v_width; /* Width */ - uint32_t v_height; /* Height */ - uint32_t v_depth; /* Pixel Depth */ - uint32_t v_resv[7]; /* Reserved */ - uint64_t v_baseAddr; /* Base address of video memory */ + uint32_t v_display; /* Display Code (if Applicable */ + uint32_t v_rowBytes; /* Number of bytes per pixel row */ + uint32_t v_width; /* Width */ + uint32_t v_height; /* Height */ + uint32_t v_depth; /* Pixel Depth */ + uint8_t v_rotate; /* Rotation */ + uint8_t v_resv_byte[3]; /* Reserved */ + uint32_t v_resv[6]; /* Reserved */ + uint64_t v_baseAddr; /* Base address of video memory */ }; -typedef struct Boot_Video Boot_Video; +typedef struct Boot_Video Boot_Video; /* Values for v_display */ @@ -108,97 +110,95 @@ typedef struct Boot_Video Boot_Video; /* Struct describing an image passed in by the booter */ struct boot_icon_element { - unsigned int width; - unsigned int height; - int y_offset_from_center; - unsigned int data_size; - unsigned int __reserved1[4]; - unsigned char data[0]; + unsigned int width; + unsigned int height; + int y_offset_from_center; + unsigned int data_size; + unsigned int __reserved1[4]; + unsigned char data[0]; }; typedef struct boot_icon_element boot_icon_element; /* Boot argument structure - passed into Mach kernel at boot time. * "Revision" can be incremented for compatible changes */ -#define kBootArgsRevision 0 -#define kBootArgsVersion 2 +#define kBootArgsRevision 0 +#define kBootArgsVersion 2 /* Snapshot constants of previous revisions that are supported */ -#define kBootArgsVersion1 1 -#define kBootArgsVersion2 2 -#define kBootArgsRevision2_0 0 +#define kBootArgsVersion1 1 +#define kBootArgsVersion2 2 +#define kBootArgsRevision2_0 0 #define kBootArgsEfiMode32 32 #define kBootArgsEfiMode64 64 /* Bitfields for boot_args->flags */ -#define kBootArgsFlagRebootOnPanic (1 << 0) -#define kBootArgsFlagHiDPI (1 << 1) -#define kBootArgsFlagBlack (1 << 2) -#define kBootArgsFlagCSRActiveConfig (1 << 3) -#define kBootArgsFlagCSRConfigMode (1 << 4) -#define kBootArgsFlagCSRBoot (1 << 5) -#define kBootArgsFlagBlackBg (1 << 6) -#define kBootArgsFlagLoginUI (1 << 7) -#define kBootArgsFlagInstallUI (1 << 8) +#define kBootArgsFlagRebootOnPanic (1 << 0) +#define kBootArgsFlagHiDPI (1 << 1) +#define kBootArgsFlagBlack (1 << 2) +#define kBootArgsFlagCSRActiveConfig (1 << 3) +#define kBootArgsFlagCSRConfigMode (1 << 4) +#define kBootArgsFlagCSRBoot (1 << 5) +#define kBootArgsFlagBlackBg (1 << 6) +#define kBootArgsFlagLoginUI (1 << 7) +#define kBootArgsFlagInstallUI (1 << 8) typedef struct boot_args { - uint16_t Revision; /* Revision of boot_args structure */ - uint16_t Version; /* Version of boot_args structure */ - - uint8_t efiMode; /* 32 = 32-bit, 64 = 64-bit */ - uint8_t debugMode; /* Bit field with behavior changes */ - uint16_t flags; - - char CommandLine[BOOT_LINE_LENGTH]; /* Passed in command line */ - - uint32_t MemoryMap; /* Physical address of memory map */ - uint32_t MemoryMapSize; - uint32_t MemoryMapDescriptorSize; - uint32_t MemoryMapDescriptorVersion; - - Boot_VideoV1 VideoV1; /* Video Information */ - - uint32_t deviceTreeP; /* Physical address of flattened device tree */ - uint32_t deviceTreeLength; /* Length of flattened tree */ - - uint32_t kaddr; /* Physical address of beginning of kernel text */ - uint32_t ksize; /* Size of combined kernel text+data+efi */ - - uint32_t efiRuntimeServicesPageStart; /* physical address of defragmented runtime pages */ - uint32_t efiRuntimeServicesPageCount; - uint64_t efiRuntimeServicesVirtualPageStart; /* virtual address of defragmented runtime pages */ - - uint32_t efiSystemTable; /* physical address of system table in runtime area */ - uint32_t kslide; - - uint32_t performanceDataStart; /* physical address of log */ - uint32_t performanceDataSize; - - uint32_t keyStoreDataStart; /* physical address of key store data */ - uint32_t keyStoreDataSize; - uint64_t bootMemStart; - uint64_t bootMemSize; - uint64_t PhysicalMemorySize; - uint64_t FSBFrequency; - uint64_t pciConfigSpaceBaseAddress; - uint32_t pciConfigSpaceStartBusNumber; - uint32_t pciConfigSpaceEndBusNumber; - uint32_t csrActiveConfig; - uint32_t csrCapabilities; - uint32_t boot_SMC_plimit; - uint16_t bootProgressMeterStart; - uint16_t bootProgressMeterEnd; - Boot_Video Video; /* Video Information */ - - uint32_t apfsDataStart; /* Physical address of apfs volume key structure */ - uint32_t apfsDataSize; - - uint32_t __reserved4[710]; - + uint16_t Revision; /* Revision of boot_args structure */ + uint16_t Version; /* Version of boot_args structure */ + + uint8_t efiMode;/* 32 = 32-bit, 64 = 64-bit */ + uint8_t debugMode;/* Bit field with behavior changes */ + uint16_t flags; + + char CommandLine[BOOT_LINE_LENGTH];/* Passed in command line */ + + uint32_t MemoryMap;/* Physical address of memory map */ + uint32_t MemoryMapSize; + uint32_t MemoryMapDescriptorSize; + uint32_t MemoryMapDescriptorVersion; + + Boot_VideoV1 VideoV1; /* Video Information */ + + uint32_t deviceTreeP; /* Physical address of flattened device tree */ + uint32_t deviceTreeLength;/* Length of flattened tree */ + + uint32_t kaddr; /* Physical address of beginning of kernel text */ + uint32_t ksize; /* Size of combined kernel text+data+efi */ + + uint32_t efiRuntimeServicesPageStart;/* physical address of defragmented runtime pages */ + uint32_t efiRuntimeServicesPageCount; + uint64_t efiRuntimeServicesVirtualPageStart;/* virtual address of defragmented runtime pages */ + + uint32_t efiSystemTable;/* physical address of system table in runtime area */ + uint32_t kslide; + + uint32_t performanceDataStart;/* physical address of log */ + uint32_t performanceDataSize; + + uint32_t keyStoreDataStart;/* physical address of key store data */ + uint32_t keyStoreDataSize; + uint64_t bootMemStart; + uint64_t bootMemSize; + uint64_t PhysicalMemorySize; + uint64_t FSBFrequency; + uint64_t pciConfigSpaceBaseAddress; + uint32_t pciConfigSpaceStartBusNumber; + uint32_t pciConfigSpaceEndBusNumber; + uint32_t csrActiveConfig; + uint32_t csrCapabilities; + uint32_t boot_SMC_plimit; + uint16_t bootProgressMeterStart; + uint16_t bootProgressMeterEnd; + Boot_Video Video; /* Video Information */ + + uint32_t apfsDataStart;/* Physical address of apfs volume key structure */ + uint32_t apfsDataSize; + + uint32_t __reserved4[710]; } boot_args; extern char assert_boot_args_size_is_4096[sizeof(boot_args) == 4096 ? 1 : -1]; #endif /* _PEXPERT_I386_BOOT_H */ - diff --git a/pexpert/pexpert/i386/efi.h b/pexpert/pexpert/i386/efi.h index 5ef501593..3ab0f1f38 100644 --- a/pexpert/pexpert/i386/efi.h +++ b/pexpert/pexpert/i386/efi.h @@ -2,7 +2,7 @@ * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -58,17 +58,17 @@ typedef uint32_t EFI_HANDLE32; typedef uint64_t EFI_PTR64; typedef uint64_t EFI_HANDLE64; /* - -Portions Copyright 2004, Intel Corporation -All rights reserved. This program and the accompanying materials -are licensed and made available under the terms and conditions of the BSD License -which accompanies this distribution. The full text of the license may be found at - http://opensource.org/licenses/bsd-license.php - -THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, -WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. - -*/ + * + * Portions Copyright 2004, Intel Corporation + * All rights reserved. This program and the accompanying materials + * are licensed and made available under the terms and conditions of the BSD License + * which accompanies this distribution. The full text of the license may be found at + * http://opensource.org/licenses/bsd-license.php + * + * THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, + * WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + * + */ // @@ -131,10 +131,10 @@ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. #define EFI_SPECIFICATION_MINOR_REVISION 10 typedef struct { - EFI_UINT32 Data1; - EFI_UINT16 Data2; - EFI_UINT16 Data3; - EFI_UINT8 Data4[8]; + EFI_UINT32 Data1; + EFI_UINT16 Data2; + EFI_UINT16 Data3; + EFI_UINT8 Data4[8]; } EFI_GUID; #define APPLE_VENDOR_GUID \ @@ -144,8 +144,8 @@ typedef struct { {0x8BE4DF61, 0x93CA, 0x11d2, {0xAA, 0x0D, 0x00, 0xE0, 0x98, 0x03, 0x2B, 0x8C} } typedef union { - EFI_GUID Guid; - EFI_UINT8 Raw[16]; + EFI_GUID Guid; + EFI_UINT8 Raw[16]; } EFI_GUID_UNION; // @@ -160,17 +160,17 @@ typedef union { // TimeZone: -1440 to 1440 or 2047 // typedef struct { - EFI_UINT16 Year; - EFI_UINT8 Month; - EFI_UINT8 Day; - EFI_UINT8 Hour; - EFI_UINT8 Minute; - EFI_UINT8 Second; - EFI_UINT8 Pad1; - EFI_UINT32 Nanosecond; - EFI_INT16 TimeZone; - EFI_UINT8 Daylight; - EFI_UINT8 Pad2; + EFI_UINT16 Year; + EFI_UINT8 Month; + EFI_UINT8 Day; + EFI_UINT8 Hour; + EFI_UINT8 Minute; + EFI_UINT8 Second; + EFI_UINT8 Pad1; + EFI_UINT32 Nanosecond; + EFI_INT16 TimeZone; + EFI_UINT8 Daylight; + EFI_UINT8 Pad2; } EFI_TIME; // @@ -185,29 +185,29 @@ typedef struct { #define EFI_UNSPECIFIED_TIMEZONE 0x07FF typedef enum { - EfiReservedMemoryType, - EfiLoaderCode, - EfiLoaderData, - EfiBootServicesCode, - EfiBootServicesData, - EfiRuntimeServicesCode, - EfiRuntimeServicesData, - EfiConventionalMemory, - EfiUnusableMemory, - EfiACPIReclaimMemory, - EfiACPIMemoryNVS, - EfiMemoryMappedIO, - EfiMemoryMappedIOPortSpace, - EfiPalCode, - EfiMaxMemoryType + EfiReservedMemoryType, + EfiLoaderCode, + EfiLoaderData, + EfiBootServicesCode, + EfiBootServicesData, + EfiRuntimeServicesCode, + EfiRuntimeServicesData, + EfiConventionalMemory, + EfiUnusableMemory, + EfiACPIReclaimMemory, + EfiACPIMemoryNVS, + EfiMemoryMappedIO, + EfiMemoryMappedIOPortSpace, + EfiPalCode, + EfiMaxMemoryType } EFI_MEMORY_TYPE; typedef struct { - EFI_UINT64 Signature; - EFI_UINT32 Revision; - EFI_UINT32 HeaderSize; - EFI_UINT32 CRC32; - EFI_UINT32 Reserved; + EFI_UINT64 Signature; + EFI_UINT32 Revision; + EFI_UINT32 HeaderSize; + EFI_UINT32 CRC32; + EFI_UINT32 Reserved; } __attribute__((aligned(8))) EFI_TABLE_HEADER; // @@ -236,32 +236,32 @@ typedef EFI_UINT64 EFI_VIRTUAL_ADDRESS; #define EFI_MEMORY_DESCRIPTOR_VERSION 1 typedef struct { - EFI_UINT32 Type; - EFI_UINT32 Pad; - EFI_PHYSICAL_ADDRESS PhysicalStart; - EFI_VIRTUAL_ADDRESS VirtualStart; - EFI_UINT64 NumberOfPages; - EFI_UINT64 Attribute; + EFI_UINT32 Type; + EFI_UINT32 Pad; + EFI_PHYSICAL_ADDRESS PhysicalStart; + EFI_VIRTUAL_ADDRESS VirtualStart; + EFI_UINT64 NumberOfPages; + EFI_UINT64 Attribute; } __attribute__((aligned(8))) EFI_MEMORY_DESCRIPTOR; typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_SET_VIRTUAL_ADDRESS_MAP) ( - IN EFI_UINTN MemoryMapSize, - IN EFI_UINTN DescriptorSize, - IN EFI_UINT32 DescriptorVersion, - IN EFI_MEMORY_DESCRIPTOR * VirtualMap - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_SET_VIRTUAL_ADDRESS_MAP)( + IN EFI_UINTN MemoryMapSize, + IN EFI_UINTN DescriptorSize, + IN EFI_UINT32 DescriptorVersion, + IN EFI_MEMORY_DESCRIPTOR * VirtualMap + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_CONVERT_POINTER) ( - IN EFI_UINTN DebugDisposition, - IN OUT VOID **Address - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_CONVERT_POINTER)( + IN EFI_UINTN DebugDisposition, + IN OUT VOID **Address + ); // // Variable attributes @@ -272,104 +272,103 @@ EFI_STATUS #define EFI_VARIABLE_READ_ONLY 0x00000008 typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_GET_VARIABLE) ( - IN EFI_CHAR16 * VariableName, - IN EFI_GUID * VendorGuid, - OUT EFI_UINT32 * Attributes OPTIONAL, - IN OUT EFI_UINTN * DataSize, - OUT VOID * Data - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_GET_VARIABLE)( + IN EFI_CHAR16 * VariableName, + IN EFI_GUID * VendorGuid, + OUT EFI_UINT32 * Attributes OPTIONAL, + IN OUT EFI_UINTN * DataSize, + OUT VOID * Data + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_GET_NEXT_VARIABLE_NAME) ( - IN OUT EFI_UINTN * VariableNameSize, - IN OUT EFI_CHAR16 * VariableName, - IN OUT EFI_GUID * VendorGuid - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_GET_NEXT_VARIABLE_NAME)( + IN OUT EFI_UINTN * VariableNameSize, + IN OUT EFI_CHAR16 * VariableName, + IN OUT EFI_GUID * VendorGuid + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_SET_VARIABLE) ( - IN EFI_CHAR16 * VariableName, - IN EFI_GUID * VendorGuid, - IN EFI_UINT32 Attributes, - IN EFI_UINTN DataSize, - IN VOID * Data - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_SET_VARIABLE)( + IN EFI_CHAR16 * VariableName, + IN EFI_GUID * VendorGuid, + IN EFI_UINT32 Attributes, + IN EFI_UINTN DataSize, + IN VOID * Data + ); // // EFI Time // typedef struct { - EFI_UINT32 Resolution; - EFI_UINT32 Accuracy; - EFI_BOOLEAN SetsToZero; + EFI_UINT32 Resolution; + EFI_UINT32 Accuracy; + EFI_BOOLEAN SetsToZero; } __attribute__((aligned(4))) EFI_TIME_CAPABILITIES; typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_GET_TIME) ( - OUT EFI_TIME * Time, - OUT EFI_TIME_CAPABILITIES * Capabilities OPTIONAL - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_GET_TIME)( + OUT EFI_TIME * Time, + OUT EFI_TIME_CAPABILITIES * Capabilities OPTIONAL + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_SET_TIME) ( - IN EFI_TIME * Time - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_SET_TIME)( + IN EFI_TIME * Time + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_GET_WAKEUP_TIME) ( - OUT EFI_BOOLEAN * Enabled, - OUT EFI_BOOLEAN * Pending, - OUT EFI_TIME * Time - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_GET_WAKEUP_TIME)( + OUT EFI_BOOLEAN * Enabled, + OUT EFI_BOOLEAN * Pending, + OUT EFI_TIME * Time + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_SET_WAKEUP_TIME) ( - IN EFI_BOOLEAN Enable, - IN EFI_TIME * Time OPTIONAL - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_SET_WAKEUP_TIME)( + IN EFI_BOOLEAN Enable, + IN EFI_TIME * Time OPTIONAL + ); typedef enum { - EfiResetCold, - EfiResetWarm, - EfiResetShutdown, + EfiResetCold, + EfiResetWarm, + EfiResetShutdown, #ifdef TIANO_EXTENSION_FLAG - EfiResetUpdate + EfiResetUpdate #endif - } EFI_RESET_TYPE; typedef -EFI_RUNTIMESERVICE -VOID -(EFIAPI *EFI_RESET_SYSTEM) ( - IN EFI_RESET_TYPE ResetType, - IN EFI_STATUS ResetStatus, - IN EFI_UINTN DataSize, - IN EFI_CHAR16 * ResetData OPTIONAL - ); + EFI_RUNTIMESERVICE + VOID +(EFIAPI *EFI_RESET_SYSTEM)( + IN EFI_RESET_TYPE ResetType, + IN EFI_STATUS ResetStatus, + IN EFI_UINTN DataSize, + IN EFI_CHAR16 * ResetData OPTIONAL + ); typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_GET_NEXT_HIGH_MONO_COUNT) ( - OUT EFI_UINT32 * HighCount - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_GET_NEXT_HIGH_MONO_COUNT)( + OUT EFI_UINT32 * HighCount + ); // // Definition of Status Code extended data header @@ -386,15 +385,15 @@ EFI_STATUS #ifdef TIANO_EXTENSION_FLAG typedef -EFI_RUNTIMESERVICE -EFI_STATUS -(EFIAPI *EFI_REPORT_STATUS_CODE) ( - IN EFI_STATUS_CODE_TYPE Type, - IN EFI_STATUS_CODE_VALUE Value, - IN EFI_UINT32 Instance, - IN EFI_GUID * CallerId OPTIONAL, - IN EFI_STATUS_CODE_DATA * Data OPTIONAL - ); + EFI_RUNTIMESERVICE + EFI_STATUS +(EFIAPI *EFI_REPORT_STATUS_CODE)( + IN EFI_STATUS_CODE_TYPE Type, + IN EFI_STATUS_CODE_VALUE Value, + IN EFI_UINT32 Instance, + IN EFI_GUID * CallerId OPTIONAL, + IN EFI_STATUS_CODE_DATA * Data OPTIONAL + ); #endif // @@ -404,98 +403,96 @@ EFI_STATUS #define EFI_RUNTIME_SERVICES_REVISION ((EFI_SPECIFICATION_MAJOR_REVISION << 16) | (EFI_SPECIFICATION_MINOR_REVISION)) typedef struct { - EFI_TABLE_HEADER Hdr; - - // - // Time services - // - EFI_PTR32 GetTime; - EFI_PTR32 SetTime; - EFI_PTR32 GetWakeupTime; - EFI_PTR32 SetWakeupTime; - - // - // Virtual memory services - // - EFI_PTR32 SetVirtualAddressMap; - EFI_PTR32 ConvertPointer; - - // - // Variable services - // - EFI_PTR32 GetVariable; - EFI_PTR32 GetNextVariableName; - EFI_PTR32 SetVariable; - - // - // Misc - // - EFI_PTR32 GetNextHighMonotonicCount; - EFI_PTR32 ResetSystem; + EFI_TABLE_HEADER Hdr; + + // + // Time services + // + EFI_PTR32 GetTime; + EFI_PTR32 SetTime; + EFI_PTR32 GetWakeupTime; + EFI_PTR32 SetWakeupTime; + + // + // Virtual memory services + // + EFI_PTR32 SetVirtualAddressMap; + EFI_PTR32 ConvertPointer; + + // + // Variable services + // + EFI_PTR32 GetVariable; + EFI_PTR32 GetNextVariableName; + EFI_PTR32 SetVariable; + + // + // Misc + // + EFI_PTR32 GetNextHighMonotonicCount; + EFI_PTR32 ResetSystem; #ifdef TIANO_EXTENSION_FLAG - // - // //////////////////////////////////////////////////// - // Extended EFI Services - ////////////////////////////////////////////////////// - // - EFI_PTR32 ReportStatusCode; + // + // //////////////////////////////////////////////////// + // Extended EFI Services + ////////////////////////////////////////////////////// + // + EFI_PTR32 ReportStatusCode; #endif - } __attribute__((aligned(8))) EFI_RUNTIME_SERVICES_32; typedef struct { - EFI_TABLE_HEADER Hdr; - - // - // Time services - // - EFI_PTR64 GetTime; - EFI_PTR64 SetTime; - EFI_PTR64 GetWakeupTime; - EFI_PTR64 SetWakeupTime; - - // - // Virtual memory services - // - EFI_PTR64 SetVirtualAddressMap; - EFI_PTR64 ConvertPointer; - - // - // Variable services - // - EFI_PTR64 GetVariable; - EFI_PTR64 GetNextVariableName; - EFI_PTR64 SetVariable; - - // - // Misc - // - EFI_PTR64 GetNextHighMonotonicCount; - EFI_PTR64 ResetSystem; + EFI_TABLE_HEADER Hdr; + + // + // Time services + // + EFI_PTR64 GetTime; + EFI_PTR64 SetTime; + EFI_PTR64 GetWakeupTime; + EFI_PTR64 SetWakeupTime; + + // + // Virtual memory services + // + EFI_PTR64 SetVirtualAddressMap; + EFI_PTR64 ConvertPointer; + + // + // Variable services + // + EFI_PTR64 GetVariable; + EFI_PTR64 GetNextVariableName; + EFI_PTR64 SetVariable; + + // + // Misc + // + EFI_PTR64 GetNextHighMonotonicCount; + EFI_PTR64 ResetSystem; #ifdef TIANO_EXTENSION_FLAG - // - // //////////////////////////////////////////////////// - // Extended EFI Services - ////////////////////////////////////////////////////// - // - EFI_PTR64 ReportStatusCode; + // + // //////////////////////////////////////////////////// + // Extended EFI Services + ////////////////////////////////////////////////////// + // + EFI_PTR64 ReportStatusCode; #endif - } __attribute__((aligned(8))) EFI_RUNTIME_SERVICES_64; // // EFI Configuration Table // typedef struct { - EFI_GUID VendorGuid; - EFI_PTR32 VendorTable; + EFI_GUID VendorGuid; + EFI_PTR32 VendorTable; } EFI_CONFIGURATION_TABLE_32; typedef struct { - EFI_GUID VendorGuid; - EFI_PTR64 VendorTable; + EFI_GUID VendorGuid; + EFI_PTR64 VendorTable; } __attribute__((aligned(8))) EFI_CONFIGURATION_TABLE_64; // @@ -508,51 +505,49 @@ typedef struct { #define EFI_1_10_SYSTEM_TABLE_REVISION ((1 << 16) | 10) typedef struct EFI_SYSTEM_TABLE_32 { - EFI_TABLE_HEADER Hdr; + EFI_TABLE_HEADER Hdr; - EFI_PTR32 FirmwareVendor; - EFI_UINT32 FirmwareRevision; + EFI_PTR32 FirmwareVendor; + EFI_UINT32 FirmwareRevision; - EFI_HANDLE32 ConsoleInHandle; - EFI_PTR32 ConIn; + EFI_HANDLE32 ConsoleInHandle; + EFI_PTR32 ConIn; - EFI_HANDLE32 ConsoleOutHandle; - EFI_PTR32 ConOut; + EFI_HANDLE32 ConsoleOutHandle; + EFI_PTR32 ConOut; - EFI_HANDLE32 StandardErrorHandle; - EFI_PTR32 StdErr; + EFI_HANDLE32 StandardErrorHandle; + EFI_PTR32 StdErr; - EFI_PTR32 RuntimeServices; - EFI_PTR32 BootServices; - - EFI_UINT32 NumberOfTableEntries; - EFI_PTR32 ConfigurationTable; + EFI_PTR32 RuntimeServices; + EFI_PTR32 BootServices; + EFI_UINT32 NumberOfTableEntries; + EFI_PTR32 ConfigurationTable; } __attribute__((aligned(8))) EFI_SYSTEM_TABLE_32; typedef struct EFI_SYSTEM_TABLE_64 { - EFI_TABLE_HEADER Hdr; - - EFI_PTR64 FirmwareVendor; - EFI_UINT32 FirmwareRevision; + EFI_TABLE_HEADER Hdr; - EFI_UINT32 __pad; + EFI_PTR64 FirmwareVendor; + EFI_UINT32 FirmwareRevision; - EFI_HANDLE64 ConsoleInHandle; - EFI_PTR64 ConIn; + EFI_UINT32 __pad; - EFI_HANDLE64 ConsoleOutHandle; - EFI_PTR64 ConOut; + EFI_HANDLE64 ConsoleInHandle; + EFI_PTR64 ConIn; - EFI_HANDLE64 StandardErrorHandle; - EFI_PTR64 StdErr; + EFI_HANDLE64 ConsoleOutHandle; + EFI_PTR64 ConOut; - EFI_PTR64 RuntimeServices; - EFI_PTR64 BootServices; + EFI_HANDLE64 StandardErrorHandle; + EFI_PTR64 StdErr; - EFI_UINT64 NumberOfTableEntries; - EFI_PTR64 ConfigurationTable; + EFI_PTR64 RuntimeServices; + EFI_PTR64 BootServices; + EFI_UINT64 NumberOfTableEntries; + EFI_PTR64 ConfigurationTable; } __attribute__((aligned(8))) EFI_SYSTEM_TABLE_64; #endif /* _PEXPERT_I386_EFI_H */ diff --git a/pexpert/pexpert/i386/protos.h b/pexpert/pexpert/i386/protos.h index 86ba54d31..fb4c5f3dc 100644 --- a/pexpert/pexpert/i386/protos.h +++ b/pexpert/pexpert/i386/protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_I386_PROTOS_H @@ -45,9 +45,9 @@ in##s(i386_ioport_t port) \ { \ unsigned u data; \ asm volatile ( \ - "in" #s " %1,%0" \ - : "=a" (data) \ - : "d" (port)); \ + "in" #s " %1,%0" \ + : "=a" (data) \ + : "d" (port)); \ return (data); \ } @@ -56,9 +56,9 @@ static __inline__ void \ out##s(i386_ioport_t port, unsigned u data) \ { \ asm volatile ( \ - "out" #s " %1,%0" \ - : \ - : "d" (port), "a" (data)); \ + "out" #s " %1,%0" \ + : \ + : "d" (port), "a" (data)); \ } __IN(b, char) diff --git a/pexpert/pexpert/machine/boot.h b/pexpert/pexpert/machine/boot.h index 1afdf9167..4d5e34950 100644 --- a/pexpert/pexpert/machine/boot.h +++ b/pexpert/pexpert/machine/boot.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_MACHINE_BOOT_H diff --git a/pexpert/pexpert/machine/protos.h b/pexpert/pexpert/machine/protos.h index 1ec64a18f..d0aaa6887 100644 --- a/pexpert/pexpert/machine/protos.h +++ b/pexpert/pexpert/machine/protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_MACHINE_PROTOS_H diff --git a/pexpert/pexpert/pe_images.h b/pexpert/pexpert/pe_images.h index 35f179a19..548791529 100644 --- a/pexpert/pexpert/pe_images.h +++ b/pexpert/pexpert/pe_images.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -30,11 +30,11 @@ #include struct boot_progress_element { - unsigned int width; - unsigned int height; - int yOffset; - unsigned int res[5]; - unsigned char data[0]; + unsigned int width; + unsigned int height; + int yOffset; + unsigned int res[5]; + unsigned char data[0]; }; typedef struct boot_progress_element boot_progress_element; @@ -48,12 +48,12 @@ static const unsigned char * default_progress_data3x = gGearPict3x; static const unsigned char * default_progress_data3x = NULL; #endif -static vc_progress_element default_progress = - { 0, 4|1, 1000 / kGearFPS, kGearFrames, {0, 0, 0}, - kGearWidth, kGearHeight, 0, kGearOffset, - 0, {0, 0, 0} }; +static vc_progress_element default_progress = +{ 0, 4 | 1, 1000 / kGearFPS, kGearFrames, {0, 0, 0}, + kGearWidth, kGearHeight, 0, kGearOffset, + 0, {0, 0, 0} }; -static vc_progress_element default_noroot = - { 0, 1, 0, 0, {0, 0, 0}, - 128, 128, 0, 0, - -1, {0, 0, 0} }; +static vc_progress_element default_noroot = +{ 0, 1, 0, 0, {0, 0, 0}, + 128, 128, 0, 0, + -1, {0, 0, 0} }; diff --git a/pexpert/pexpert/pexpert.h b/pexpert/pexpert/pexpert.h index df4dd8db7..d01a6ee19 100644 --- a/pexpert/pexpert/pexpert.h +++ b/pexpert/pexpert/pexpert.h @@ -2,7 +2,7 @@ * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_PEXPERT_H_ @@ -45,7 +45,7 @@ __BEGIN_DECLS #endif #include -#if defined(PEXPERT_KERNEL_PRIVATE) || defined(IOKIT_KERNEL_PRIVATE) +#if defined(PEXPERT_KERNEL_PRIVATE) || defined(IOKIT_KERNEL_PRIVATE) typedef void *cpu_id_t; #else typedef void *cpu_id_t; @@ -70,13 +70,13 @@ extern struct macos_panic_header *panic_info; #endif /* CONFIG_EMBEDDED */ #endif /* XNU_KERNEL_PRIVATE */ -extern void lpss_uart_enable (boolean_t on_off); +extern void lpss_uart_enable(boolean_t on_off); void PE_enter_debugger( const char *cause); void PE_init_platform( - boolean_t vm_initialized, + boolean_t vm_initialized, void *args); /* @@ -118,9 +118,9 @@ void PE_update_panicheader_nestedpanic( * participate similarly */ -#define kPEICanHasAssertions 0x00000001 /* Exceptional conditions should panic() instead of printf() */ -#define kPEICanHasStatistics 0x00000002 /* Gather expensive statistics (that don't otherwise change behavior */ -#define kPEICanHasDiagnosticAPI 0x00000004 /* Vend API to userspace or kexts that introspect kernel state */ +#define kPEICanHasAssertions 0x00000001 /* Exceptional conditions should panic() instead of printf() */ +#define kPEICanHasStatistics 0x00000002 /* Gather expensive statistics (that don't otherwise change behavior */ +#define kPEICanHasDiagnosticAPI 0x00000004 /* Vend API to userspace or kexts that introspect kernel state */ extern uint32_t PE_i_can_has_kernel_configuration(void); @@ -148,31 +148,31 @@ void PE_init_iokit( void); struct clock_frequency_info_t { - unsigned long bus_clock_rate_hz; - unsigned long cpu_clock_rate_hz; - unsigned long dec_clock_rate_hz; - unsigned long bus_clock_rate_num; - unsigned long bus_clock_rate_den; - unsigned long bus_to_cpu_rate_num; - unsigned long bus_to_cpu_rate_den; - unsigned long bus_to_dec_rate_num; - unsigned long bus_to_dec_rate_den; - unsigned long timebase_frequency_hz; - unsigned long timebase_frequency_num; - unsigned long timebase_frequency_den; - unsigned long long bus_frequency_hz; - unsigned long long bus_frequency_min_hz; - unsigned long long bus_frequency_max_hz; - unsigned long long cpu_frequency_hz; - unsigned long long cpu_frequency_min_hz; - unsigned long long cpu_frequency_max_hz; - unsigned long long prf_frequency_hz; - unsigned long long prf_frequency_min_hz; - unsigned long long prf_frequency_max_hz; - unsigned long long mem_frequency_hz; - unsigned long long mem_frequency_min_hz; - unsigned long long mem_frequency_max_hz; - unsigned long long fix_frequency_hz; + unsigned long bus_clock_rate_hz; + unsigned long cpu_clock_rate_hz; + unsigned long dec_clock_rate_hz; + unsigned long bus_clock_rate_num; + unsigned long bus_clock_rate_den; + unsigned long bus_to_cpu_rate_num; + unsigned long bus_to_cpu_rate_den; + unsigned long bus_to_dec_rate_num; + unsigned long bus_to_dec_rate_den; + unsigned long timebase_frequency_hz; + unsigned long timebase_frequency_num; + unsigned long timebase_frequency_den; + unsigned long long bus_frequency_hz; + unsigned long long bus_frequency_min_hz; + unsigned long long bus_frequency_max_hz; + unsigned long long cpu_frequency_hz; + unsigned long long cpu_frequency_min_hz; + unsigned long long cpu_frequency_max_hz; + unsigned long long prf_frequency_hz; + unsigned long long prf_frequency_min_hz; + unsigned long long prf_frequency_max_hz; + unsigned long long mem_frequency_hz; + unsigned long long mem_frequency_min_hz; + unsigned long long mem_frequency_max_hz; + unsigned long long fix_frequency_hz; }; extern int debug_cpu_performance_degradation_factor; @@ -182,8 +182,8 @@ typedef struct clock_frequency_info_t clock_frequency_info_t; extern clock_frequency_info_t gPEClockFrequencyInfo; struct timebase_freq_t { - unsigned long timebase_num; - unsigned long timebase_den; + unsigned long timebase_num; + unsigned long timebase_den; }; typedef void (*timebase_callback_func)(struct timebase_freq_t *timebase_freq); @@ -195,12 +195,12 @@ void PE_call_timebase_callback(void); #ifdef KERNEL void PE_install_interrupt_handler( void *nub, int source, - void *target, IOInterruptHandler handler, void *refCon); + void *target, IOInterruptHandler handler, void *refCon); #endif #ifndef _FN_KPRINTF -#define _FN_KPRINTF -void kprintf(const char *fmt, ...) __printflike(1,2); +#define _FN_KPRINTF +void kprintf(const char *fmt, ...) __printflike(1, 2); #endif #if KERNEL_PRIVATE @@ -219,57 +219,57 @@ void init_display_putc(unsigned char *baseaddr, int rowbytes, int height); void display_putc(char c); enum { - kPEReadTOD, - kPEWriteTOD + kPEReadTOD, + kPEWriteTOD }; extern int (*PE_read_write_time_of_day)( - unsigned int options, + unsigned int options, long * secs); enum { - kPEWaitForInput = 0x00000001, - kPERawInput = 0x00000002 + kPEWaitForInput = 0x00000001, + kPERawInput = 0x00000002 }; extern int (*PE_poll_input)( - unsigned int options, + unsigned int options, char * c); extern int (*PE_write_IIC)( - unsigned char addr, + unsigned char addr, unsigned char reg, unsigned char data); /* Private Stuff - eventually put in pexpertprivate.h */ enum { - kDebugTypeNone = 0, - kDebugTypeDisplay = 1, - kDebugTypeSerial = 2 + kDebugTypeNone = 0, + kDebugTypeDisplay = 1, + kDebugTypeSerial = 2 }; /* Scale factor values for PE_Video.v_scale */ enum { - kPEScaleFactorUnknown = 0, - kPEScaleFactor1x = 1, - kPEScaleFactor2x = 2 + kPEScaleFactorUnknown = 0, + kPEScaleFactor1x = 1, + kPEScaleFactor2x = 2 }; struct PE_Video { - unsigned long v_baseAddr; /* Base address of video memory */ - unsigned long v_rowBytes; /* Number of bytes per pixel row */ - unsigned long v_width; /* Width */ - unsigned long v_height; /* Height */ - unsigned long v_depth; /* Pixel Depth */ - unsigned long v_display; /* Text or Graphics */ - char v_pixelFormat[64]; - unsigned long v_offset; /* offset into video memory to start at */ - unsigned long v_length; /* length of video memory (0 for v_rowBytes * v_height) */ - unsigned char v_rotate; /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ - unsigned char v_scale; /* Scale Factor for both X & Y */ - char reserved1[2]; + unsigned long v_baseAddr; /* Base address of video memory */ + unsigned long v_rowBytes; /* Number of bytes per pixel row */ + unsigned long v_width; /* Width */ + unsigned long v_height; /* Height */ + unsigned long v_depth; /* Pixel Depth */ + unsigned long v_display; /* Text or Graphics */ + char v_pixelFormat[64]; + unsigned long v_offset; /* offset into video memory to start at */ + unsigned long v_length; /* length of video memory (0 for v_rowBytes * v_height) */ + unsigned char v_rotate; /* Rotation: 0:normal, 1:right 90, 2:left 180, 3:left 90 */ + unsigned char v_scale; /* Scale Factor for both X & Y */ + char reserved1[2]; #ifdef __LP64__ - long reserved2; + long reserved2; #else - long v_baseAddrHigh; + long v_baseAddrHigh; #endif }; @@ -283,30 +283,30 @@ extern int PE_current_console( PE_Video *info); extern void PE_create_console( - void); + void); extern int PE_initialize_console( - PE_Video *newInfo, + PE_Video *newInfo, int op); -#define kPEGraphicsMode 1 -#define kPETextMode 2 -#define kPETextScreen 3 -#define kPEAcquireScreen 4 -#define kPEReleaseScreen 5 -#define kPEEnableScreen 6 -#define kPEDisableScreen 7 -#define kPEBaseAddressChange 8 -#define kPERefreshBootGraphics 9 +#define kPEGraphicsMode 1 +#define kPETextMode 2 +#define kPETextScreen 3 +#define kPEAcquireScreen 4 +#define kPEReleaseScreen 5 +#define kPEEnableScreen 6 +#define kPEDisableScreen 7 +#define kPEBaseAddressChange 8 +#define kPERefreshBootGraphics 9 extern void PE_display_icon( unsigned int flags, - const char * name ); + const char * name ); typedef struct PE_state { - boolean_t initialized; - PE_Video video; - void *deviceTreeHead; - void *bootArgs; + boolean_t initialized; + PE_Video video; + void *deviceTreeHead; + void *bootArgs; } PE_state_t; extern PE_state_t PE_state; @@ -316,14 +316,14 @@ extern char * PE_boot_args( #if !defined(__LP64__) && !defined(__arm__) extern boolean_t PE_parse_boot_arg( - const char *arg_string, - void *arg_ptr) __deprecated; + const char *arg_string, + void *arg_ptr) __deprecated; #endif extern boolean_t PE_parse_boot_argn( - const char *arg_string, - void *arg_ptr, - int max_arg); + const char *arg_string, + void *arg_ptr, + int max_arg); #if XNU_KERNEL_PRIVATE extern boolean_t PE_parse_boot_arg_str( @@ -333,25 +333,25 @@ extern boolean_t PE_parse_boot_arg_str( #endif /* XNU_KERNEL_PRIVATE */ extern boolean_t PE_get_default( - const char *property_name, - void *property_ptr, + const char *property_name, + void *property_ptr, unsigned int max_property); -#define PE_default_value(_key, _variable, _default) \ - do { \ - if (!PE_get_default((_key), &(_variable), sizeof(_variable))) \ - _variable = _default; \ +#define PE_default_value(_key, _variable, _default) \ + do { \ + if (!PE_get_default((_key), &(_variable), sizeof(_variable))) \ + _variable = _default; \ } while(0) enum { - kPEOptionKey = 0x3a, - kPECommandKey = 0x37, - kPEControlKey = 0x36, - kPEShiftKey = 0x38 + kPEOptionKey = 0x3a, + kPECommandKey = 0x37, + kPEControlKey = 0x36, + kPEShiftKey = 0x38 }; extern boolean_t PE_get_hotkey( - unsigned char key); + unsigned char key); extern kern_return_t PE_cpu_start( cpu_id_t target, diff --git a/pexpert/pexpert/protos.h b/pexpert/pexpert/protos.h index c9b6ae694..15bea419a 100644 --- a/pexpert/pexpert/protos.h +++ b/pexpert/pexpert/protos.h @@ -2,7 +2,7 @@ * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #ifndef _PEXPERT_PROTOS_H_ @@ -50,19 +50,19 @@ extern void interrupt_disable(void); //------------------------------------------------------------------------ //from kern/misc_protos.h -extern void +extern void _doprnt( - const char *fmt, - va_list *argp, - void (*putc)(char), - int radix); + const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); -extern void +extern void _doprnt_log( - const char *fmt, - va_list *argp, - void (*putc)(char), - int radix); + const char *fmt, + va_list *argp, + void (*putc)(char), + int radix); #include @@ -88,7 +88,7 @@ void Debugger(const char *message); extern void StartIOKit( void * p1, void * p2, void * p3, void * p4); // from iokit/Families/IOFramebuffer.cpp -extern unsigned char appleClut8[ 256 * 3 ]; +extern unsigned char appleClut8[256 * 3]; #endif /* PEXPERT_KERNEL_PRIVATE */ diff --git a/san/Kasan_kasan.exports b/san/Kasan_kasan.exports index 4372d6af7..4911193fb 100644 --- a/san/Kasan_kasan.exports +++ b/san/Kasan_kasan.exports @@ -100,6 +100,7 @@ ___asan_version_mismatch_check_apple_802 ___asan_version_mismatch_check_apple_900 ___asan_version_mismatch_check_apple_902 ___asan_version_mismatch_check_apple_1000 +___asan_version_mismatch_check_apple_1001 ___asan_init ___asan_memcpy ___asan_memmove diff --git a/san/kasan-arm64.c b/san/kasan-arm64.c index 056e531c4..3d3a23364 100644 --- a/san/kasan-arm64.c +++ b/san/kasan-arm64.c @@ -78,7 +78,7 @@ void flush_mmu_tlb(void); _Static_assert(KASAN_SHIFT == KASAN_SHIFT_ARM64, "KASan inconsistent shadow shift"); _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM"); _Static_assert((VM_MIN_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM"); -_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM"); +_Static_assert((VM_MAX_KERNEL_ADDRESS >> 3) + KASAN_SHIFT_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM"); static uintptr_t alloc_page(void) @@ -167,11 +167,11 @@ kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, bool is_zero, boo newpte = (uint64_t)alloc_zero_page() | ARM_PTE_AP(AP_RWNA); } newpte |= ARM_PTE_TYPE_VALID - | ARM_PTE_AF - | ARM_PTE_SH(SH_OUTER_MEMORY) - | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) - | ARM_PTE_NX - | ARM_PTE_PNX; + | ARM_PTE_AF + | ARM_PTE_SH(SH_OUTER_MEMORY) + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) + | ARM_PTE_NX + | ARM_PTE_PNX; *pte = newpte; } } @@ -233,7 +233,7 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero) /* lookup L3 entry */ pte = base + ((virt_shadow_target & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT); - if ((*pte & (ARM_PTE_TYPE|ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID|ARM_PTE_AP(AP_RWNA))) { + if ((*pte & (ARM_PTE_TYPE | ARM_PTE_APMASK)) == (ARM_PTE_TYPE_VALID | ARM_PTE_AP(AP_RWNA))) { /* L3 entry valid and mapped RW - do nothing */ } else { /* Not mapped, or mapped RO - create new L3 entry or upgrade to RW */ @@ -251,11 +251,11 @@ kasan_map_shadow_early(vm_offset_t address, vm_size_t size, bool is_zero) /* add the default attributes */ newpte |= ARM_PTE_TYPE_VALID - | ARM_PTE_AF - | ARM_PTE_SH(SH_OUTER_MEMORY) - | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) - | ARM_PTE_NX - | ARM_PTE_PNX; + | ARM_PTE_AF + | ARM_PTE_SH(SH_OUTER_MEMORY) + | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT) + | ARM_PTE_NX + | ARM_PTE_PNX; *pte = newpte; } diff --git a/san/kasan-fakestack.c b/san/kasan-fakestack.c index 0680f0858..add9941a9 100644 --- a/san/kasan-fakestack.c +++ b/san/kasan-fakestack.c @@ -242,7 +242,7 @@ kasan_fakestack_free(int sz_class, uptr dst, size_t realsz) kasan_free_internal((void **)&dst, &sz, KASAN_HEAP_FAKESTACK, &zone, realsz, 1, FAKESTACK_QUARANTINE); if (dst) { - zfree(zone, (void *)dst); + zfree(zone, dst); } kasan_unlock(flags); @@ -286,7 +286,7 @@ kasan_init_fakestack(void) assert(z); zone_change(z, Z_NOCALLOUT, TRUE); zone_change(z, Z_EXHAUST, TRUE); - zone_change(z, Z_EXPAND, FALSE); + zone_change(z, Z_EXPAND, FALSE); zone_change(z, Z_COLLECT, FALSE); zone_change(z, Z_KASAN_QUARANTINE, FALSE); zfill(z, maxsz / sz); @@ -329,7 +329,8 @@ kasan_fakestack_free(int __unused sz_class, uptr __unused dst, size_t __unused r #endif -void kasan_init_thread(struct kasan_thread_data *td) +void +kasan_init_thread(struct kasan_thread_data *td) { LIST_INIT(&td->fakestack_head); } diff --git a/san/kasan-test.c b/san/kasan-test.c index 672a6645e..e64c69d84 100644 --- a/san/kasan-test.c +++ b/san/kasan-test.c @@ -76,11 +76,12 @@ struct kasan_test { #define TEST_START(t) do { t->result = 1; TEST_BARRIER(); } while (0) #define TEST_FAULT(t) do { TEST_BARRIER(); t->result = 0; TEST_BARRIER(); } while (0) #define TEST_NOFAULT(t) do { TEST_BARRIER(); t->result = 1; TEST_BARRIER(); } while (0) -#define TEST_DONE(t,res) do { t->result = (res); kasan_handle_test(); } while (0) -#define DECLARE_TEST(f,s) { .func = f, .name = s } -#define DECLARE_TEST3(f,c,s) { .func = f, .cleanup = c, .name = s } +#define TEST_DONE(t, res) do { t->result = (res); kasan_handle_test(); } while (0) +#define DECLARE_TEST(f, s) { .func = f, .name = s } +#define DECLARE_TEST3(f, c, s) { .func = f, .cleanup = c, .name = s } -static void heap_cleanup(struct kasan_test *t) +static void +heap_cleanup(struct kasan_test *t) { if (t->data) { kfree(t->data, t->datasz); @@ -88,7 +89,8 @@ static void heap_cleanup(struct kasan_test *t) } } -static int test_global_overflow(struct kasan_test __unused *t) +static int +test_global_overflow(struct kasan_test __unused *t) { int i; /* rookie error */ @@ -98,7 +100,8 @@ static int test_global_overflow(struct kasan_test __unused *t) return 0; } -static int test_heap_underflow(struct kasan_test __unused *t) +static int +test_heap_underflow(struct kasan_test __unused *t) { uint8_t *x = kalloc(BUFSZ); if (!x) { @@ -110,7 +113,8 @@ static int test_heap_underflow(struct kasan_test __unused *t) return 0; } -static int test_heap_overflow(struct kasan_test __unused *t) +static int +test_heap_overflow(struct kasan_test __unused *t) { uint8_t *x = kalloc(BUFSZ); if (!x) { @@ -122,7 +126,8 @@ static int test_heap_overflow(struct kasan_test __unused *t) return 0; } -static int test_heap_uaf(struct kasan_test __unused *t) +static int +test_heap_uaf(struct kasan_test __unused *t) { uint8_t *x = kalloc(LBUFSZ); if (!x) { @@ -133,14 +138,17 @@ static int test_heap_uaf(struct kasan_test __unused *t) return 0; } -static int test_heap_inval_free(struct kasan_test __unused *t) +static int +test_heap_inval_free(struct kasan_test __unused *t) { int x; - kfree(&x, BUFSZ); + int *ptr = &x; + kfree(ptr, BUFSZ); return 0; } -static int test_heap_double_free(struct kasan_test *t) +static int +test_heap_double_free(struct kasan_test *t) { TEST_START(t); @@ -156,7 +164,8 @@ static int test_heap_double_free(struct kasan_test *t) return 0; } -static int test_heap_small_free(struct kasan_test *t) +static int +test_heap_small_free(struct kasan_test *t) { TEST_START(t); @@ -168,14 +177,15 @@ static int test_heap_small_free(struct kasan_test *t) t->data = x; TEST_FAULT(t); - kfree(x, BUFSZ-2); + kfree(x, BUFSZ - 2); t->data = NULL; t->datasz = 0; return 0; } -static int test_stack_overflow(struct kasan_test *t) +static int +test_stack_overflow(struct kasan_test *t) { TEST_START(t); @@ -195,7 +205,8 @@ static int test_stack_overflow(struct kasan_test *t) return !(a[0] == 0); } -static int test_stack_underflow(struct kasan_test *t) +static int +test_stack_underflow(struct kasan_test *t) { TEST_START(t); @@ -206,9 +217,9 @@ static int test_stack_underflow(struct kasan_test *t) /* generate a negative index without the compiler noticing */ #if __x86_64__ - __asm__ __volatile__("movq $-1, %0" : "=r"(idx) :: "memory"); + __asm__ __volatile__ ("movq $-1, %0" : "=r"(idx) :: "memory"); #else - __asm__ __volatile__("mov %0, #-1" : "=r"(idx) :: "memory"); + __asm__ __volatile__ ("mov %0, #-1" : "=r"(idx) :: "memory"); #endif TEST_FAULT(t); @@ -216,10 +227,11 @@ static int test_stack_underflow(struct kasan_test *t) TEST_NOFAULT(t); TEST_BARRIER(); - return (a[0] == 0); + return a[0] == 0; } -static int test_memcpy(struct kasan_test *t) +static int +test_memcpy(struct kasan_test *t) { TEST_START(t); uint8_t a1[STACK_ARRAY_SZ]; @@ -232,13 +244,14 @@ static int test_memcpy(struct kasan_test *t) /* should fail */ TEST_FAULT(t); - memcpy(a2, a1, STACK_ARRAY_SZ+1); + memcpy(a2, a1, STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_memmove(struct kasan_test *t) +static int +test_memmove(struct kasan_test *t) { TEST_START(t); uint8_t a1[STACK_ARRAY_SZ]; @@ -251,13 +264,14 @@ static int test_memmove(struct kasan_test *t) /* should fail */ TEST_FAULT(t); - memmove(a2, a1, STACK_ARRAY_SZ+1); + memmove(a2, a1, STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_bcopy(struct kasan_test *t) +static int +test_bcopy(struct kasan_test *t) { TEST_START(t); uint8_t a1[STACK_ARRAY_SZ]; @@ -270,13 +284,14 @@ static int test_bcopy(struct kasan_test *t) /* should fail */ TEST_FAULT(t); - bcopy(a2, a1, STACK_ARRAY_SZ+1); + bcopy(a2, a1, STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_memset(struct kasan_test *t) +static int +test_memset(struct kasan_test *t) { TEST_START(t); uint8_t a1[STACK_ARRAY_SZ]; @@ -288,67 +303,74 @@ static int test_memset(struct kasan_test *t) /* should fail */ TEST_FAULT(t); - memset(a1, 'f', STACK_ARRAY_SZ+1); + memset(a1, 'f', STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_memcmp(struct kasan_test *t) +static int +test_memcmp(struct kasan_test *t) { TEST_START(t); uint8_t *a1; uint8_t *a2; a1 = kalloc(STACK_ARRAY_SZ); - if (!a1) + if (!a1) { return 1; - a2 = kalloc(STACK_ARRAY_SZ+1); - if (!a2) + } + a2 = kalloc(STACK_ARRAY_SZ + 1); + if (!a2) { return 1; + } /* should work */ memcmp(a1, a2, STACK_ARRAY_SZ); - memcmp(a1, a2+1, STACK_ARRAY_SZ); + memcmp(a1, a2 + 1, STACK_ARRAY_SZ); TEST_BARRIER(); /* should fail */ TEST_FAULT(t); - memcmp(a1, a2, STACK_ARRAY_SZ+1); + memcmp(a1, a2, STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_bcmp(struct kasan_test *t) +static int +test_bcmp(struct kasan_test *t) { TEST_START(t); uint8_t *a1; uint8_t *a2; a1 = kalloc(STACK_ARRAY_SZ); - if (!a1) + if (!a1) { return 1; - a2 = kalloc(STACK_ARRAY_SZ+1); - if (!a2) + } + a2 = kalloc(STACK_ARRAY_SZ + 1); + if (!a2) { return 1; + } /* should work */ bcmp(a1, a2, STACK_ARRAY_SZ); - bcmp(a1, a2+1, STACK_ARRAY_SZ); + bcmp(a1, a2 + 1, STACK_ARRAY_SZ); TEST_BARRIER(); /* should fail */ TEST_FAULT(t); - bcmp(a1, a2, STACK_ARRAY_SZ+1); + bcmp(a1, a2, STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_bzero(struct kasan_test *t) +static int +test_bzero(struct kasan_test *t) { TEST_START(t); uint8_t a1[STACK_ARRAY_SZ]; @@ -360,13 +382,14 @@ static int test_bzero(struct kasan_test *t) /* should fail */ TEST_FAULT(t); - bzero(a1, STACK_ARRAY_SZ+1); + bzero(a1, STACK_ARRAY_SZ + 1); TEST_NOFAULT(t); return 0; } -static int test_strlcpy(struct kasan_test *t) +static int +test_strlcpy(struct kasan_test *t) { TEST_START(t); char a1[8]; @@ -382,7 +405,8 @@ static int test_strlcpy(struct kasan_test *t) return 0; } -static int test_strncpy(struct kasan_test *t) +static int +test_strncpy(struct kasan_test *t) { TEST_START(t); char a1[9]; @@ -398,7 +422,8 @@ static int test_strncpy(struct kasan_test *t) return a1[0] != 'l'; } -static int test_strlcat(struct kasan_test *t) +static int +test_strlcat(struct kasan_test *t) { TEST_START(t); char a1[9] = {}; @@ -418,7 +443,8 @@ static int test_strlcat(struct kasan_test *t) return a1[0] != 'l'; } -static int test_strncat(struct kasan_test *t) +static int +test_strncat(struct kasan_test *t) { TEST_START(t); char a1[9] = {}; @@ -463,20 +489,24 @@ test_blacklist_str(struct kasan_test *t) } #if 0 -static int test_strnlen(struct kasan_test *t) +static int +test_strnlen(struct kasan_test *t) { TEST_START(t); const char *a1 = "abcdef"; /* should not fault */ - if (strnlen(a1, 6) != 6) + if (strnlen(a1, 6) != 6) { return 1; - if (strnlen(a1, 7) != 6) + } + if (strnlen(a1, 7) != 6) { return 1; + } TEST_FAULT(t); - if (strnlen(a1, 8) != 6) + if (strnlen(a1, 8) != 6) { return 1; + } TEST_NOFAULT(t); return a1[0] != 'a'; @@ -486,7 +516,7 @@ static int test_strnlen(struct kasan_test *t) static void OS_NOINLINE force_fakestack(char *x) { - __asm__ __volatile__("" :: "r" (x) : "memory"); + __asm__ __volatile__ ("" :: "r" (x) : "memory"); } OS_NOINLINE @@ -536,7 +566,8 @@ stack_uaf_helper(void) return uaf_ptr; } -static int test_stack_uaf(struct kasan_test __unused *t) +static int +test_stack_uaf(struct kasan_test __unused *t) { int *x = stack_uaf_helper(); *x = 0xb4d; @@ -547,32 +578,32 @@ static int test_stack_uaf(struct kasan_test __unused *t) static struct kasan_test xnu_tests[] = { DECLARE_TEST(NULL, NULL), DECLARE_TEST(test_global_overflow, "Global overflow"), - DECLARE_TEST3(test_heap_underflow, heap_cleanup, "Heap underflow"), - DECLARE_TEST3(test_heap_overflow, heap_cleanup, "Heap overflow"), - DECLARE_TEST(test_heap_uaf, "Heap use-after-free"), + DECLARE_TEST3(test_heap_underflow, heap_cleanup, "Heap underflow"), + DECLARE_TEST3(test_heap_overflow, heap_cleanup, "Heap overflow"), + DECLARE_TEST(test_heap_uaf, "Heap use-after-free"), DECLARE_TEST(test_heap_inval_free, "Heap invalid free"), - DECLARE_TEST(test_heap_double_free,"Heap double free"), + DECLARE_TEST(test_heap_double_free, "Heap double free"), DECLARE_TEST3(test_heap_small_free, heap_cleanup, "Heap small free"), - DECLARE_TEST(test_stack_overflow, "Stack overflow"), + DECLARE_TEST(test_stack_overflow, "Stack overflow"), DECLARE_TEST(test_stack_underflow, "Stack underflow"), - DECLARE_TEST(test_stack_uaf, "Stack use-after-return"), - DECLARE_TEST(test_memcpy, "memcpy"), - DECLARE_TEST(test_memmove, "memmmove"), - DECLARE_TEST(test_bcopy, "bcopy"), - DECLARE_TEST(test_memset, "memset"), - DECLARE_TEST(test_memcmp, "memcmp"), - DECLARE_TEST(test_bcmp, "bcmp"), - DECLARE_TEST(test_bzero, "bzero"), - DECLARE_TEST(test_strlcpy, "strlcpy"), - DECLARE_TEST(test_strlcat, "strlcat"), - DECLARE_TEST(test_strncpy, "strncpy"), - DECLARE_TEST(test_strncat, "strncat"), - DECLARE_TEST(test_blacklist, "blacklist"), - DECLARE_TEST(test_blacklist_str, "blacklist_str"), - DECLARE_TEST(test_fakestack, "fakestack"), + DECLARE_TEST(test_stack_uaf, "Stack use-after-return"), + DECLARE_TEST(test_memcpy, "memcpy"), + DECLARE_TEST(test_memmove, "memmmove"), + DECLARE_TEST(test_bcopy, "bcopy"), + DECLARE_TEST(test_memset, "memset"), + DECLARE_TEST(test_memcmp, "memcmp"), + DECLARE_TEST(test_bcmp, "bcmp"), + DECLARE_TEST(test_bzero, "bzero"), + DECLARE_TEST(test_strlcpy, "strlcpy"), + DECLARE_TEST(test_strlcat, "strlcat"), + DECLARE_TEST(test_strncpy, "strncpy"), + DECLARE_TEST(test_strncat, "strncat"), + DECLARE_TEST(test_blacklist, "blacklist"), + DECLARE_TEST(test_blacklist_str, "blacklist_str"), + DECLARE_TEST(test_fakestack, "fakestack"), // DECLARE_TEST(test_strnlen, "strnlen"), }; -static int num_xnutests = sizeof(xnu_tests)/sizeof(xnu_tests[0]); +static int num_xnutests = sizeof(xnu_tests) / sizeof(xnu_tests[0]); static int kasan_run_test(struct kasan_test *test_list, int testno, int fail) @@ -629,7 +660,7 @@ kasan_test(int testno, int fail) if (testno == -1) { /* shorthand for all tests */ - testno = (1U << (num_xnutests-1)) - 1; + testno = (1U << (num_xnutests - 1)) - 1; } while (testno) { diff --git a/san/kasan-x86_64.c b/san/kasan-x86_64.c index 4b685e67f..9f266870a 100644 --- a/san/kasan-x86_64.c +++ b/san/kasan-x86_64.c @@ -36,6 +36,8 @@ #include #include #include +#include +#include #include #include #include @@ -48,27 +50,8 @@ #include extern uint64_t *IdlePML4; -extern uintptr_t physmap_base; -extern uintptr_t physmap_max; #define phys2virt(x) ((uintptr_t)(x) + physmap_base) -#define INTEL_PTE_VALID 0x00000001ULL -#define INTEL_PTE_WRITE 0x00000002ULL -#define INTEL_PTE_RW 0x00000002ULL -#define INTEL_PTE_USER 0x00000004ULL -#define INTEL_PTE_WTHRU 0x00000008ULL -#define INTEL_PTE_NCACHE 0x00000010ULL -#define INTEL_PTE_REF 0x00000020ULL -#define INTEL_PTE_MOD 0x00000040ULL -#define INTEL_PTE_PS 0x00000080ULL -#define INTEL_PTE_PTA 0x00000080ULL -#define INTEL_PTE_GLOBAL 0x00000100ULL -#define INTEL_PTE_WIRED 0x00000200ULL -#define INTEL_PDPTE_NESTED 0x00000400ULL -#define INTEL_PTE_PFN PG_FRAME -#define INTEL_PTE_NX (1ULL << 63) -#define INTEL_PTE_INVALID 0 - vm_offset_t shadow_pbase; vm_offset_t shadow_ptop; vm_offset_t shadow_pnext; @@ -77,11 +60,11 @@ unsigned shadow_stolen_idx; static vm_offset_t zero_superpage_phys; typedef struct { - unsigned int pml4 : 9; - unsigned int pdpt : 9; - unsigned int pd : 9; - unsigned int pt : 9; - unsigned int offset : 12; + unsigned int pml4 : 9; + unsigned int pdpt : 9; + unsigned int pd : 9; + unsigned int pt : 9; + unsigned int offset : 12; } split_addr_t; static split_addr_t @@ -143,11 +126,11 @@ kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size) vm_size_t j; for (j = 0; j < size; j += I386_LPGBYTES * 8) { - vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j); split_addr_t addr = split_address(virt_shadow_target); - assert(addr.pml4 == 507 || addr.pml4 == 508); + assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST && + addr.pml4 <= KERNEL_KASAN_PML4_LAST); uint64_t *L3; uint64_t *L2; @@ -158,8 +141,8 @@ kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size) uintptr_t pmem = alloc_page_zero(); L3 = (uint64_t *)phys2virt(pmem); IdlePML4[addr.pml4] = pmem - | INTEL_PTE_VALID - | INTEL_PTE_WRITE; + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; } else { L3 = (uint64_t *)phys2virt(L3); } @@ -169,8 +152,8 @@ kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size) uintptr_t pmem = alloc_page_zero(); L2 = (uint64_t *)phys2virt(pmem); L3[addr.pdpt] = pmem - | INTEL_PTE_VALID - | INTEL_PTE_WRITE; + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; } else { L2 = (uint64_t *)phys2virt(L2); } @@ -178,12 +161,12 @@ kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size) L1 = (uint64_t *)(L2[addr.pd] & ~PAGE_MASK); if (L1 == NULL) { L2[addr.pd] = (uint64_t)zero_superpage_phys - | INTEL_PTE_VALID - | INTEL_PTE_PS - | INTEL_PTE_NX; + | INTEL_PTE_VALID + | INTEL_PTE_PS + | INTEL_PTE_NX; } else { panic("Unexpected shadow mapping, addr = %lx, sz = %lu\n", - address, size); + address, size); } /* adding a new entry, this is not strictly required */ @@ -201,9 +184,9 @@ kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) assert((size & 0x7) == 0); for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) { - split_addr_t addr = split_address(shadow_base); - assert(addr.pml4 == 507 || addr.pml4 == 508); + assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST && + addr.pml4 <= KERNEL_KASAN_PML4_LAST); uint64_t *L3; uint64_t *L2; @@ -215,8 +198,8 @@ kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) uintptr_t pmem = alloc_page_zero(); L3 = (uint64_t *)phys2virt(pmem); IdlePML4[addr.pml4] = pmem - | INTEL_PTE_VALID - | INTEL_PTE_WRITE; + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; } else { L3 = (uint64_t *)phys2virt(L3); } @@ -226,14 +209,14 @@ kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) uintptr_t pmem = alloc_page_zero(); L2 = (uint64_t *)phys2virt(pmem); L3[addr.pdpt] = pmem - | INTEL_PTE_VALID - | INTEL_PTE_WRITE; + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; } else { L2 = (uint64_t *)phys2virt(L2); } uint64_t pde = L2[addr.pd]; - if ((pde & (INTEL_PTE_VALID|INTEL_PTE_PS)) == (INTEL_PTE_VALID|INTEL_PTE_PS)) { + if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) { /* Already mapped as a superpage */ continue; } @@ -243,8 +226,8 @@ kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) uintptr_t pmem = alloc_page_zero(); L1 = (uint64_t *)phys2virt(pmem); L2[addr.pd] = pmem - | INTEL_PTE_VALID - | INTEL_PTE_WRITE; + | INTEL_PTE_VALID + | INTEL_PTE_WRITE; } else { L1 = (uint64_t *)phys2virt(L1); } @@ -256,11 +239,11 @@ kasan_map_shadow(vm_offset_t address, vm_size_t size, bool is_zero) newpte = (uint64_t)zero_superpage_phys; } else { newpte = (vm_offset_t)alloc_page_zero() - | INTEL_PTE_WRITE; + | INTEL_PTE_WRITE; } L1[addr.pt] = newpte - | INTEL_PTE_VALID - | INTEL_PTE_NX; + | INTEL_PTE_VALID + | INTEL_PTE_NX; /* adding a new entry, this is not strictly required */ invlpg(shadow_base); @@ -339,7 +322,8 @@ bool kasan_is_shadow_mapped(uintptr_t shadowp) { split_addr_t addr = split_address(shadowp); - assert(addr.pml4 == 507 || addr.pml4 == 508); + assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST && + addr.pml4 <= KERNEL_KASAN_PML4_LAST); uint64_t *L3; uint64_t *L2; @@ -358,7 +342,7 @@ kasan_is_shadow_mapped(uintptr_t shadowp) L2 = (uint64_t *)phys2virt(L2); uint64_t pde = L2[addr.pd]; - if ((pde & (INTEL_PTE_VALID|INTEL_PTE_PS)) == (INTEL_PTE_VALID|INTEL_PTE_PS)) { + if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) { /* mapped as superpage */ return true; } diff --git a/san/kasan.c b/san/kasan.c index a34d479aa..9ec9433df 100644 --- a/san/kasan.c +++ b/san/kasan.c @@ -102,7 +102,7 @@ void kasan_lock(boolean_t *b) { *b = ml_set_interrupts_enabled(false); - simple_lock(&kasan_vm_lock); + simple_lock(&kasan_vm_lock, LCK_GRP_NULL); kasan_lock_holder = current_thread(); } @@ -1313,6 +1313,7 @@ UNUSED_ABI(__asan_version_mismatch_check_apple_802, void); UNUSED_ABI(__asan_version_mismatch_check_apple_900, void); UNUSED_ABI(__asan_version_mismatch_check_apple_902, void); UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void); +UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void); void UNSUPPORTED_API(__asan_init_v5, void); void UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b); diff --git a/san/kasan.h b/san/kasan.h index 4682692a8..308efa2e9 100644 --- a/san/kasan.h +++ b/san/kasan.h @@ -114,8 +114,8 @@ void __kasan_runtests(struct kasan_test *, int numtests); typedef int (*pmap_traverse_callback)(vm_map_offset_t start, - vm_map_offset_t end, - void *context); + vm_map_offset_t end, + void *context); int kasan_traverse_mappings(pmap_traverse_callback, void *context); #if XNU_KERNEL_PRIVATE @@ -176,7 +176,7 @@ extern const uintptr_t __asan_shadow_memory_dynamic_address; __BEGIN_DECLS -KASAN_DECLARE_FOREACH_WIDTH(void, __asan_report_load, uptr); + KASAN_DECLARE_FOREACH_WIDTH(void, __asan_report_load, uptr); KASAN_DECLARE_FOREACH_WIDTH(void, __asan_report_store, uptr); KASAN_DECLARE_FOREACH_WIDTH(void, __asan_store, uptr); KASAN_DECLARE_FOREACH_WIDTH(void, __asan_report_exp_load, uptr, int32_t); diff --git a/san/kasan_dynamic_blacklist.c b/san/kasan_dynamic_blacklist.c index 983b83576..04d0973dc 100644 --- a/san/kasan_dynamic_blacklist.c +++ b/san/kasan_dynamic_blacklist.c @@ -43,7 +43,7 @@ static void dybl_lock(boolean_t *b) { *b = ml_set_interrupts_enabled(false); - simple_lock(&_dybl_lock); + simple_lock(&_dybl_lock, LCK_GRP_NULL); } static void @@ -314,7 +314,6 @@ addr_to_func(uintptr_t addr, const kernel_mach_header_t *mh) * iterate the symbols, looking for the closest one to `addr' */ for (i = 0; i < (int)st->nsyms; i++) { - uint8_t n_type = syms[i].n_type; const char *name = strings + syms[i].n_un.n_strx; @@ -390,7 +389,7 @@ kasan_is_blacklisted(access_t type) blhe->count++; blhe->ble->count++; // printf("KASan: blacklist cache hit (%s:%s [0x%lx] 0x%x)\n", - // ble->kext_name ?: "" , ble->func_name ?: "", VM_KERNEL_UNSLIDE(bt[i]), mask); + // ble->kext_name ?: "" , ble->func_name ?: "", VM_KERNEL_UNSLIDE(bt[i]), mask); dybl_unlock(flag); return true; } @@ -398,7 +397,6 @@ kasan_is_blacklisted(access_t type) /* no hits - slowpath */ for (uint32_t i = 0; i < nframes; i++) { - const char *kextname = NULL; const char *funcname = NULL; @@ -452,7 +450,7 @@ kasan_is_blacklisted(access_t type) if (count == 0) { printf("KASan: ignoring blacklisted violation (%s:%s [0x%lx] %d 0x%x)\n", - kextname, funcname, VM_KERNEL_UNSLIDE(bt[i]), i, type); + kextname, funcname, VM_KERNEL_UNSLIDE(bt[i]), i, type); } return true; @@ -525,9 +523,9 @@ static const struct { /* convenience aliases */ { .type = TYPE_POISON_GLOBAL, .str = "GLOB" }, - { .type = TYPE_POISON_HEAP, .str = "HEAP" }, + { .type = TYPE_POISON_HEAP, .str = "HEAP" }, }; -static size_t typemap_sz = sizeof(typemap)/sizeof(typemap[0]); +static size_t typemap_sz = sizeof(typemap) / sizeof(typemap[0]); static inline access_t map_type(const char *str) diff --git a/san/kasan_internal.h b/san/kasan_internal.h index f593fbbba..7a920961e 100644 --- a/san/kasan_internal.h +++ b/san/kasan_internal.h @@ -55,7 +55,7 @@ typedef uintptr_t uptr; #ifdef __arm64__ /* Works out at about 25% of 512 MiB and 15% of 3GiB system */ # define STOLEN_MEM_PERCENT 13UL -# define STOLEN_MEM_BYTES MiB(62) +# define STOLEN_MEM_BYTES MiB(40) # define HW_PAGE_SIZE (ARM_PGBYTES) # define HW_PAGE_MASK (ARM_PGMASK) #else @@ -112,15 +112,15 @@ enum __attribute__((flag_enum)) kasan_access_types { TYPE_TEST = BIT(15), /* masks */ - TYPE_MEM = TYPE_MEMR|TYPE_MEMW, /* memory intrinsics */ - TYPE_STR = TYPE_STRR|TYPE_STRW, /* string intrinsics */ - TYPE_READ = TYPE_LOAD|TYPE_MEMR|TYPE_STRR, /* all reads */ - TYPE_WRITE = TYPE_STORE|TYPE_MEMW|TYPE_STRW, /* all writes */ - TYPE_RW = TYPE_READ|TYPE_WRITE, /* reads and writes */ - TYPE_FREE = TYPE_KFREE|TYPE_ZFREE|TYPE_FSFREE, - TYPE_NORMAL = TYPE_RW|TYPE_FREE, - TYPE_DYNAMIC = TYPE_NORMAL|TYPE_UAF, - TYPE_POISON = TYPE_POISON_GLOBAL|TYPE_POISON_HEAP, + TYPE_MEM = TYPE_MEMR | TYPE_MEMW, /* memory intrinsics */ + TYPE_STR = TYPE_STRR | TYPE_STRW, /* string intrinsics */ + TYPE_READ = TYPE_LOAD | TYPE_MEMR | TYPE_STRR, /* all reads */ + TYPE_WRITE = TYPE_STORE | TYPE_MEMW | TYPE_STRW, /* all writes */ + TYPE_RW = TYPE_READ | TYPE_WRITE, /* reads and writes */ + TYPE_FREE = TYPE_KFREE | TYPE_ZFREE | TYPE_FSFREE, + TYPE_NORMAL = TYPE_RW | TYPE_FREE, + TYPE_DYNAMIC = TYPE_NORMAL | TYPE_UAF, + TYPE_POISON = TYPE_POISON_GLOBAL | TYPE_POISON_HEAP, TYPE_ALL = ~0U, }; diff --git a/san/memintrinsics.h b/san/memintrinsics.h index 5c7a75a23..9e5f2eda2 100644 --- a/san/memintrinsics.h +++ b/san/memintrinsics.h @@ -31,20 +31,72 @@ /* * Non-sanitized versions of memory intrinsics */ -static inline void *__nosan_memcpy(void *dst, const void *src, size_t sz) { return memcpy(dst, src, sz); } -static inline void *__nosan_memset(void *src, int c, size_t sz) { return memset(src, c, sz); } -static inline void *__nosan_memmove(void *src, const void *dst, size_t sz) { return memmove(src, dst, sz); } -static inline int __nosan_bcmp(const void *a, const void *b, size_t sz) { return bcmp(a, b, sz); } -static inline void __nosan_bcopy(const void *src, void *dst, size_t sz) { return bcopy(src, dst, sz); } -static inline int __nosan_memcmp(const void *a, const void *b, size_t sz) { return memcmp(a, b, sz); } -static inline void __nosan_bzero(void *dst, size_t sz) { return bzero(dst, sz); } +static inline void * +__nosan_memcpy(void *dst, const void *src, size_t sz) +{ + return memcpy(dst, src, sz); +} +static inline void * +__nosan_memset(void *src, int c, size_t sz) +{ + return memset(src, c, sz); +} +static inline void * +__nosan_memmove(void *src, const void *dst, size_t sz) +{ + return memmove(src, dst, sz); +} +static inline int +__nosan_bcmp(const void *a, const void *b, size_t sz) +{ + return bcmp(a, b, sz); +} +static inline void +__nosan_bcopy(const void *src, void *dst, size_t sz) +{ + return bcopy(src, dst, sz); +} +static inline int +__nosan_memcmp(const void *a, const void *b, size_t sz) +{ + return memcmp(a, b, sz); +} +static inline void +__nosan_bzero(void *dst, size_t sz) +{ + return bzero(dst, sz); +} -static inline size_t __nosan_strlcpy(char *dst, const char *src, size_t sz) { return strlcpy(dst, src, sz); } -static inline char *__nosan_strncpy(char *dst, const char *src, size_t sz) { return strncpy(dst, src, sz); } -static inline size_t __nosan_strlcat(char *dst, const char *src, size_t sz) { return strlcat(dst, src, sz); } -static inline char *__nosan_strncat(char *dst, const char *src, size_t sz) { return strncat(dst, src, sz); } -static inline size_t __nosan_strnlen(const char *src, size_t sz) { return strnlen(src, sz); } -static inline size_t __nosan_strlen(const char *src) { return strlen(src); } +static inline size_t +__nosan_strlcpy(char *dst, const char *src, size_t sz) +{ + return strlcpy(dst, src, sz); +} +static inline char * +__nosan_strncpy(char *dst, const char *src, size_t sz) +{ + return strncpy(dst, src, sz); +} +static inline size_t +__nosan_strlcat(char *dst, const char *src, size_t sz) +{ + return strlcat(dst, src, sz); +} +static inline char * +__nosan_strncat(char *dst, const char *src, size_t sz) +{ + return strncat(dst, src, sz); +} +static inline size_t +__nosan_strnlen(const char *src, size_t sz) +{ + return strnlen(src, sz); +} +static inline size_t +__nosan_strlen(const char *src) +{ + return strlen(src); +} #if KASAN void *__asan_memcpy(void *src, const void *dst, size_t sz); diff --git a/san/ubsan.c b/san/ubsan.c index 0364a411f..86b6d293b 100644 --- a/san/ubsan.c +++ b/san/ubsan.c @@ -38,10 +38,10 @@ static size_t format_loc(struct san_src_loc *loc, char *dst, size_t sz) { return snprintf(dst, sz, " loc: %s:%d:%d\n", - loc->filename, - loc->line & ~line_acquired, - loc->col - ); + loc->filename, + loc->line & ~line_acquired, + loc->col + ); } /* @@ -74,14 +74,14 @@ format_overflow(struct ubsan_violation *v, char *buf, size_t sz) { struct san_type_desc *ty = v->overflow->ty; return snprintf(buf, sz, - "%s overflow, op = %s, ty = %s, width = %d, lhs = 0x%llx, rhs = 0x%llx\n", - ty->issigned ? "signed" : "unsigned", - overflow_str[v->ubsan_type], - ty->name, - 1 << ty->width, - v->lhs, - v->rhs - ); + "%s overflow, op = %s, ty = %s, width = %d, lhs = 0x%llx, rhs = 0x%llx\n", + ty->issigned ? "signed" : "unsigned", + overflow_str[v->ubsan_type], + ty->name, + 1 << ty->width, + v->lhs, + v->rhs + ); } static size_t @@ -91,9 +91,9 @@ format_shift(struct ubsan_violation *v, char *buf, size_t sz) struct san_type_desc *l = v->shift->lhs_t; struct san_type_desc *r = v->shift->rhs_t; - n += snprintf(buf+n, sz-n, "bad shift\n"); - n += snprintf(buf+n, sz-n, " lhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->lhs, l->name, l->issigned, 1 << l->width); - n += snprintf(buf+n, sz-n, " rhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->rhs, r->name, r->issigned, 1 << r->width); + n += snprintf(buf + n, sz - n, "bad shift\n"); + n += snprintf(buf + n, sz - n, " lhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->lhs, l->name, l->issigned, 1 << l->width); + n += snprintf(buf + n, sz - n, " rhs: 0x%llx, ty = %s, signed = %d, width = %d\n", v->rhs, r->name, r->issigned, 1 << r->width); return n; } @@ -113,9 +113,9 @@ format_alignment(struct ubsan_violation *v, char *buf, size_t sz) size_t n = 0; struct san_type_desc *ty = v->align->ty; - n += snprintf(buf+n, sz-n, "mis-aligned %s of 0x%llx\n", align_kinds[v->align->kind], v->lhs); - n += snprintf(buf+n, sz-n, " expected %d-byte alignment, type = %s\n", - 1 << v->align->align, ty->name); + n += snprintf(buf + n, sz - n, "mis-aligned %s of 0x%llx\n", align_kinds[v->align->kind], v->lhs); + n += snprintf(buf + n, sz - n, " expected %d-byte alignment, type = %s\n", + 1 << v->align->align, ty->name); return n; } @@ -127,10 +127,10 @@ format_oob(struct ubsan_violation *v, char *buf, size_t sz) struct san_type_desc *ity = v->oob->index_ty; uintptr_t idx = v->lhs; - n += snprintf(buf+n, sz-n, "OOB array access\n"); - n += snprintf(buf+n, sz-n, " idx %ld\n", idx); - n += snprintf(buf+n, sz-n, " aty: ty = %s, signed = %d, width = %d\n", aty->name, aty->issigned, 1 << aty->width); - n += snprintf(buf+n, sz-n, " ity: ty = %s, signed = %d, width = %d\n", ity->name, ity->issigned, 1 << ity->width); + n += snprintf(buf + n, sz - n, "OOB array access\n"); + n += snprintf(buf + n, sz - n, " idx %ld\n", idx); + n += snprintf(buf + n, sz - n, " aty: ty = %s, signed = %d, width = %d\n", aty->name, aty->issigned, 1 << aty->width); + n += snprintf(buf + n, sz - n, " ity: ty = %s, signed = %d, width = %d\n", ity->name, ity->issigned, 1 << ity->width); return n; } @@ -142,28 +142,28 @@ ubsan_format(struct ubsan_violation *v, char *buf, size_t sz) switch (v->ubsan_type) { case UBSAN_OVERFLOW_add ... UBSAN_OVERFLOW_negate: - n += format_overflow(v, buf+n, sz-n); + n += format_overflow(v, buf + n, sz - n); break; case UBSAN_UNREACHABLE: - n += snprintf(buf+n, sz-n, "unreachable\n"); + n += snprintf(buf + n, sz - n, "unreachable\n"); break; case UBSAN_SHIFT: - n += format_shift(v, buf+n, sz-n); + n += format_shift(v, buf + n, sz - n); break; case UBSAN_ALIGN: - n += format_alignment(v, buf+n, sz-n); + n += format_alignment(v, buf + n, sz - n); break; case UBSAN_POINTER_OVERFLOW: - n += snprintf(buf+n, sz-n, "pointer overflow, before = 0x%llx, after = 0x%llx\n", v->lhs, v->rhs); + n += snprintf(buf + n, sz - n, "pointer overflow, before = 0x%llx, after = 0x%llx\n", v->lhs, v->rhs); break; case UBSAN_OOB: - n += format_oob(v, buf+n, sz-n); + n += format_oob(v, buf + n, sz - n); break; default: panic("unknown violation"); } - n += format_loc(v->loc, buf+n, sz-n); + n += format_loc(v->loc, buf + n, sz - n); return n; } @@ -184,7 +184,7 @@ ubsan_handle(struct ubsan_violation *v, bool fatal) ubsan_log_append(v); if (ubsan_print || fatal) { - n += ubsan_format(v, buf+n, sz-n); + n += ubsan_format(v, buf + n, sz - n); } if (ubsan_print) { @@ -219,12 +219,12 @@ __ubsan_handle_shift_out_of_bounds_abort(struct ubsan_shift_desc *desc, uint64_t #define DEFINE_OVERFLOW(op) \ void __ubsan_handle_##op##_overflow(struct ubsan_overflow_desc *desc, uint64_t lhs, uint64_t rhs) { \ - struct ubsan_violation v = { UBSAN_OVERFLOW_##op, lhs, rhs, .overflow = desc, &desc->loc }; \ - ubsan_handle(&v, false); \ + struct ubsan_violation v = { UBSAN_OVERFLOW_##op, lhs, rhs, .overflow = desc, &desc->loc }; \ + ubsan_handle(&v, false); \ } \ void __ubsan_handle_##op##_overflow_abort(struct ubsan_overflow_desc *desc, uint64_t lhs, uint64_t rhs) { \ - struct ubsan_violation v = { UBSAN_OVERFLOW_##op, lhs, rhs, .overflow = desc, &desc->loc }; \ - ubsan_handle(&v, true); \ + struct ubsan_violation v = { UBSAN_OVERFLOW_##op, lhs, rhs, .overflow = desc, &desc->loc }; \ + ubsan_handle(&v, true); \ } DEFINE_OVERFLOW(add) @@ -233,37 +233,43 @@ DEFINE_OVERFLOW(mul) DEFINE_OVERFLOW(divrem) DEFINE_OVERFLOW(negate) -void __ubsan_handle_type_mismatch_v1(struct ubsan_align_desc *desc, uint64_t val) +void +__ubsan_handle_type_mismatch_v1(struct ubsan_align_desc *desc, uint64_t val) { struct ubsan_violation v = { UBSAN_ALIGN, val, 0, .align = desc, &desc->loc }; ubsan_handle(&v, false); } -void __ubsan_handle_type_mismatch_v1_abort(struct ubsan_align_desc *desc, uint64_t val) +void +__ubsan_handle_type_mismatch_v1_abort(struct ubsan_align_desc *desc, uint64_t val) { struct ubsan_violation v = { UBSAN_ALIGN, val, 0, .align = desc, &desc->loc }; ubsan_handle(&v, true); } -void __ubsan_handle_pointer_overflow(struct ubsan_ptroverflow_desc *desc, uint64_t before, uint64_t after) +void +__ubsan_handle_pointer_overflow(struct ubsan_ptroverflow_desc *desc, uint64_t before, uint64_t after) { struct ubsan_violation v = { UBSAN_POINTER_OVERFLOW, before, after, .ptroverflow = desc, &desc->loc }; ubsan_handle(&v, false); } -void __ubsan_handle_pointer_overflow_abort(struct ubsan_ptroverflow_desc *desc, uint64_t before, uint64_t after) +void +__ubsan_handle_pointer_overflow_abort(struct ubsan_ptroverflow_desc *desc, uint64_t before, uint64_t after) { struct ubsan_violation v = { UBSAN_POINTER_OVERFLOW, before, after, .ptroverflow = desc, &desc->loc }; ubsan_handle(&v, true); } -void __ubsan_handle_out_of_bounds(struct ubsan_oob_desc *desc, uint64_t idx) +void +__ubsan_handle_out_of_bounds(struct ubsan_oob_desc *desc, uint64_t idx) { struct ubsan_violation v = { UBSAN_OOB, idx, 0, .oob = desc, &desc->loc }; ubsan_handle(&v, false); } -void __ubsan_handle_out_of_bounds_abort(struct ubsan_oob_desc *desc, uint64_t idx) +void +__ubsan_handle_out_of_bounds_abort(struct ubsan_oob_desc *desc, uint64_t idx) { struct ubsan_violation v = { UBSAN_OOB, idx, 0, .oob = desc, &desc->loc }; ubsan_handle(&v, true); diff --git a/san/ubsan_log.c b/san/ubsan_log.c index dc06cd722..a02bf51df 100644 --- a/san/ubsan_log.c +++ b/san/ubsan_log.c @@ -104,7 +104,7 @@ sysctl_ubsan_log_dump SYSCTL_HANDLER_ARGS buf[0] = '\0'; for (size_t i = start; i != end; i = next_entry(i)) { - n += ubsan_format(&ubsan_log[i], buf+n, sz-n); + n += ubsan_format(&ubsan_log[i], buf + n, sz - n); } err = SYSCTL_OUT(req, buf, n); @@ -144,9 +144,9 @@ SYSCTL_NODE(_kern, OID_AUTO, ubsan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, ""); SYSCTL_COMPAT_UINT(_kern_ubsan, OID_AUTO, logsize, CTLFLAG_RD, NULL, (unsigned)ubsan_log_size, ""); SYSCTL_PROC(_kern_ubsan, OID_AUTO, logentries, - CTLTYPE_INT | CTLFLAG_RW, - 0, 0, sysctl_ubsan_log_entries, "I", ""); + CTLTYPE_INT | CTLFLAG_RW, + 0, 0, sysctl_ubsan_log_entries, "I", ""); SYSCTL_PROC(_kern_ubsan, OID_AUTO, log, - CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED, - 0, 0, sysctl_ubsan_log_dump, "A", ""); + CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MASKED, + 0, 0, sysctl_ubsan_log_dump, "A", ""); diff --git a/security/_label.h b/security/_label.h index cb4d9e8a6..2070bf3bd 100644 --- a/security/_label.h +++ b/security/_label.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -62,7 +62,7 @@ * $FreeBSD: src/sys/sys/_label.h,v 1.4 2003/05/08 19:49:42 rwatson Exp $ */ #ifndef _SECURITY_LABEL_H_ -#define _SECURITY_LABEL_H_ +#define _SECURITY_LABEL_H_ /* * XXXMAC: This shouldn't be exported to userland, but is because of ucred.h @@ -70,22 +70,22 @@ */ #if CONFIG_EMBEDDED #if CONFIG_VNGUARD -#define MAC_MAX_SLOTS 4 +#define MAC_MAX_SLOTS 4 #else -#define MAC_MAX_SLOTS 3 +#define MAC_MAX_SLOTS 3 #endif #else -#define MAC_MAX_SLOTS 7 +#define MAC_MAX_SLOTS 7 #endif -#define MAC_FLAG_INITIALIZED 0x0000001 /* Is initialized for use. */ +#define MAC_FLAG_INITIALIZED 0x0000001 /* Is initialized for use. */ struct label { - int l_flags; + int l_flags; union { - void *l_ptr; - long l_long; - } l_perpolicy[MAC_MAX_SLOTS]; + void *l_ptr; + long l_long; + } l_perpolicy[MAC_MAX_SLOTS]; }; #endif /* !_SECURITY_LABEL_H_ */ diff --git a/security/mac.h b/security/mac.h index 0e58baf99..c76796aa7 100644 --- a/security/mac.h +++ b/security/mac.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -72,10 +72,10 @@ */ #ifndef _SECURITY_MAC_H_ -#define _SECURITY_MAC_H_ +#define _SECURITY_MAC_H_ #ifndef _POSIX_MAC -#define _POSIX_MAC +#define _POSIX_MAC #endif #include @@ -83,18 +83,18 @@ /* * MAC framework-related constants and limits. */ -#define MAC_MAX_POLICY_NAME 32 -#define MAC_MAX_LABEL_ELEMENT_NAME 32 -#define MAC_MAX_LABEL_ELEMENT_DATA 4096 -#define MAC_MAX_LABEL_BUF_LEN 8192 -#define MAC_MAX_MANAGED_NAMESPACES 4 +#define MAC_MAX_POLICY_NAME 32 +#define MAC_MAX_LABEL_ELEMENT_NAME 32 +#define MAC_MAX_LABEL_ELEMENT_DATA 4096 +#define MAC_MAX_LABEL_BUF_LEN 8192 +#define MAC_MAX_MANAGED_NAMESPACES 4 struct mac { - size_t m_buflen; - char *m_string; + size_t m_buflen; + char *m_string; }; -typedef struct mac *mac_t; +typedef struct mac *mac_t; #ifdef KERNEL @@ -111,36 +111,36 @@ typedef struct mac *mac_t; #endif struct user_mac { - user_size_t m_buflen; - user_addr_t m_string; + user_size_t m_buflen; + user_addr_t m_string; }; struct user32_mac { - uint32_t m_buflen; - uint32_t m_string; + uint32_t m_buflen; + uint32_t m_string; }; struct user64_mac { - uint64_t m_buflen; - uint64_t m_string; + uint64_t m_buflen; + uint64_t m_string; }; #endif /* KERNEL */ /* * Device types for mac_iokit_check_device() */ -#define MAC_DEVICE_USB "USB" -#define MAC_DEVICE_FIREWIRE "FireWire" -#define MAC_DEVICE_TYPE_KEY "DeviceType" +#define MAC_DEVICE_USB "USB" +#define MAC_DEVICE_FIREWIRE "FireWire" +#define MAC_DEVICE_TYPE_KEY "DeviceType" /* * Flags for mac_proc_check_suspend_resume() */ -#define MAC_PROC_CHECK_SUSPEND 0 -#define MAC_PROC_CHECK_RESUME 1 -#define MAC_PROC_CHECK_HIBERNATE 2 -#define MAC_PROC_CHECK_SHUTDOWN_SOCKETS 3 -#define MAC_PROC_CHECK_PIDBIND 4 +#define MAC_PROC_CHECK_SUSPEND 0 +#define MAC_PROC_CHECK_RESUME 1 +#define MAC_PROC_CHECK_HIBERNATE 2 +#define MAC_PROC_CHECK_SHUTDOWN_SOCKETS 3 +#define MAC_PROC_CHECK_PIDBIND 4 #ifndef KERNEL /* @@ -148,7 +148,7 @@ struct user64_mac { * binds policy names to shared libraries that understand those policies, * as well as setting defaults for MAC-aware applications. */ -#define MAC_CONFFILE "/etc/mac.conf" +#define MAC_CONFFILE "/etc/mac.conf" /* * Extended non-POSIX.1e interfaces that offer additional services @@ -156,20 +156,20 @@ struct user64_mac { */ #ifdef __APPLE_API_PRIVATE __BEGIN_DECLS -int __mac_execve(char *fname, char **argv, char **envv, mac_t _label); -int __mac_get_fd(int _fd, mac_t _label); -int __mac_get_file(const char *_path, mac_t _label); -int __mac_get_link(const char *_path, mac_t _label); -int __mac_get_pid(pid_t _pid, mac_t _label); -int __mac_get_proc(mac_t _label); -int __mac_set_fd(int _fildes, const mac_t _label); -int __mac_set_file(const char *_path, mac_t _label); -int __mac_set_link(const char *_path, mac_t _label); -int __mac_mount(const char *type, const char *path, int flags, void *data, +int __mac_execve(char *fname, char **argv, char **envv, mac_t _label); +int __mac_get_fd(int _fd, mac_t _label); +int __mac_get_file(const char *_path, mac_t _label); +int __mac_get_link(const char *_path, mac_t _label); +int __mac_get_pid(pid_t _pid, mac_t _label); +int __mac_get_proc(mac_t _label); +int __mac_set_fd(int _fildes, const mac_t _label); +int __mac_set_file(const char *_path, mac_t _label); +int __mac_set_link(const char *_path, mac_t _label); +int __mac_mount(const char *type, const char *path, int flags, void *data, struct mac *label); -int __mac_get_mount(const char *path, struct mac *label); -int __mac_set_proc(const mac_t _label); -int __mac_syscall(const char *_policyname, int _call, void *_arg); +int __mac_get_mount(const char *path, struct mac *label); +int __mac_set_proc(const mac_t _label); +int __mac_syscall(const char *_policyname, int _call, void *_arg); __END_DECLS #endif /*__APPLE_API_PRIVATE*/ diff --git a/security/mac_alloc.c b/security/mac_alloc.c index 7c19ae14a..2a113b10c 100644 --- a/security/mac_alloc.c +++ b/security/mac_alloc.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -53,17 +53,17 @@ void * mac_kalloc(vm_size_t size, int how) { - - if (how == M_WAITOK) + if (how == M_WAITOK) { return kalloc(size); - else + } else { return kalloc_noblock(size); + } } /* * for temporary binary compatibility */ -void * mac_kalloc_noblock (vm_size_t size); +void * mac_kalloc_noblock(vm_size_t size); void * mac_kalloc_noblock(vm_size_t size) { @@ -73,8 +73,7 @@ mac_kalloc_noblock(vm_size_t size) void mac_kfree(void * data, vm_size_t size) { - - return kfree(data, size); + kfree(data, size); } /* @@ -88,11 +87,12 @@ mac_mbuf_alloc(int len, int wait) struct m_tag *t; t = m_tag_alloc(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_MAC_POLICY_LABEL, - len, wait); - if (t == NULL) - return (NULL); + len, wait); + if (t == NULL) { + return NULL; + } - return ((void *)(t + 1)); + return (void *)(t + 1); #else #pragma unused(len, wait) return NULL; @@ -121,17 +121,15 @@ extern vm_map_t kalloc_map; int mac_wire(void *start, void *end) { - - return (vm_map_wire_kernel(kalloc_map, CAST_USER_ADDR_T(start), - CAST_USER_ADDR_T(end), VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_SECURITY, FALSE)); + return vm_map_wire_kernel(kalloc_map, CAST_USER_ADDR_T(start), + CAST_USER_ADDR_T(end), VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_SECURITY, FALSE); } int mac_unwire(void *start, void *end) { - - return (vm_map_unwire(kalloc_map, CAST_USER_ADDR_T(start), - CAST_USER_ADDR_T(end), FALSE)); + return vm_map_unwire(kalloc_map, CAST_USER_ADDR_T(start), + CAST_USER_ADDR_T(end), FALSE); } /* @@ -140,30 +138,27 @@ mac_unwire(void *start, void *end) zone_t mac_zinit(vm_size_t size, vm_size_t maxmem, vm_size_t alloc, const char *name) { - return zinit(size, maxmem, alloc, name); } void mac_zone_change(zone_t zone, unsigned int item, boolean_t value) { - zone_change(zone, item, value); } void * mac_zalloc(zone_t zone, int how) { - - if (how == M_WAITOK) + if (how == M_WAITOK) { return zalloc(zone); - else + } else { return zalloc_noblock(zone); + } } void mac_zfree(zone_t zone, void *elem) { - zfree(zone, elem); } diff --git a/security/mac_alloc.h b/security/mac_alloc.h index 956b4344b..63da2e699 100644 --- a/security/mac_alloc.h +++ b/security/mac_alloc.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -30,7 +30,7 @@ */ #ifndef _SECURITY_MAC_ALLOC_H_ -#define _SECURITY_MAC_ALLOC_H_ +#define _SECURITY_MAC_ALLOC_H_ #include #include @@ -42,36 +42,36 @@ /* * Kernel Memory allocator */ -void * mac_kalloc (vm_size_t size, int how); -void mac_kfree (void *data, vm_size_t size); +void * mac_kalloc(vm_size_t size, int how); +void mac_kfree(void *data, vm_size_t size); /* * Mbuf allocator for mbuf labels. */ -void * mac_mbuf_alloc (int len, int wait); -void mac_mbuf_free (void *data); +void * mac_mbuf_alloc(int len, int wait); +void mac_mbuf_free(void *data); /* - * + * */ -int mac_wire (void *start, void *end); -int mac_unwire (void *start, void *end); +int mac_wire(void *start, void *end); +int mac_unwire(void *start, void *end); /* * Zone allocator */ -zone_t mac_zinit (vm_size_t size, vm_size_t maxmem, - vm_size_t alloc, const char *name); -void mac_zone_change (zone_t zone, unsigned int item, boolean_t value); -void * mac_zalloc (zone_t zone, int how); -void mac_zfree (zone_t zone, void *elem); +zone_t mac_zinit(vm_size_t size, vm_size_t maxmem, + vm_size_t alloc, const char *name); +void mac_zone_change(zone_t zone, unsigned int item, boolean_t value); +void * mac_zalloc(zone_t zone, int how); +void mac_zfree(zone_t zone, void *elem); /* Item definitions */ #define Z_EXHAUST 1 /* Make zone exhaustible */ #define Z_COLLECT 2 /* Make zone collectable */ #define Z_EXPAND 3 /* Make zone expandable */ #define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ -#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ +#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ #endif /* __APPLE_API_EVOLVING */ -#endif /* _SECURITY_MAC_ALLOC_H_ */ +#endif /* _SECURITY_MAC_ALLOC_H_ */ diff --git a/security/mac_audit.c b/security/mac_audit.c index 5459cf54a..504cf4b9f 100644 --- a/security/mac_audit.c +++ b/security/mac_audit.c @@ -2,7 +2,7 @@ * Copyright (c) 2006-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -61,11 +61,11 @@ * */ #include -#include -#include +#include +#include #include #include -#include +#include #include #include #include @@ -86,7 +86,7 @@ mac_system_check_audit(struct ucred *cred, void *record, int length) MAC_CHECK(system_check_audit, cred, record, length); - return (error); + return error; } int @@ -96,7 +96,7 @@ mac_system_check_auditon(struct ucred *cred, int cmd) MAC_CHECK(system_check_auditon, cred, cmd); - return (error); + return error; } int @@ -107,7 +107,7 @@ mac_system_check_auditctl(struct ucred *cred, struct vnode *vp) MAC_CHECK(system_check_auditctl, cred, vp, vl); - return (error); + return error; } int @@ -118,18 +118,20 @@ mac_proc_check_getauid(struct proc *curp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - - if (!mac_proc_check_enforce(curp)) + + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_getauid, cred); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -140,38 +142,42 @@ mac_proc_check_setauid(struct proc *curp, uid_t auid) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_setauid, cred, auid); kauth_cred_unref(&cred); - return (error); + return error; } -int -mac_proc_check_getaudit(struct proc *curp) +int +mac_proc_check_getaudit(struct proc *curp) { kauth_cred_t cred; int error; #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_getaudit, cred); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -182,39 +188,42 @@ mac_proc_check_setaudit(struct proc *curp, struct auditinfo_addr *ai) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_setaudit, cred, ai); kauth_cred_unref(&cred); - return (error); + return error; } #if 0 /* * This is the framework entry point for MAC policies to use to add * arbitrary data to the current audit record. - * (Currently not supported, as no existing audit viewers would + * (Currently not supported, as no existing audit viewers would * display this format) - * + * */ int mac_audit_data(int len, u_char *data, mac_policy_handle_t handle) { char *sanitized; - if ((len <= 0) || (len > MAC_AUDIT_DATA_LIMIT)) - return (EINVAL); + if ((len <= 0) || (len > MAC_AUDIT_DATA_LIMIT)) { + return EINVAL; + } sanitized = (char *)zalloc(mac_audit_data_zone); bcopy(data, sanitized, len); - return (audit_mac_data(MAC_AUDIT_DATA_TYPE, len, sanitized)); + return audit_mac_data(MAC_AUDIT_DATA_TYPE, len, sanitized); } #endif @@ -232,25 +241,28 @@ mac_audit_text(char *text, mac_policy_handle_t handle) name = mac_get_mpc(handle)->mpc_name; len = strlen(text); plen = 2 + strlen(name); - if (plen + len >= MAC_AUDIT_DATA_LIMIT) - return (EINVAL); + if (plen + len >= MAC_AUDIT_DATA_LIMIT) { + return EINVAL; + } /* * Make sure the text is only composed of only ASCII printable * characters. */ - for (i=0; i < len; i++) - if (text[i] < (char) 32 || text[i] > (char) 126) - return (EINVAL); + for (i = 0; i < len; i++) { + if (text[i] < (char) 32 || text[i] > (char) 126) { + return EINVAL; + } + } size = len + plen + 1; - sanitized = (char *)zalloc(mac_audit_data_zone); + sanitized = (char *)zalloc(mac_audit_data_zone); strlcpy(sanitized, name, MAC_AUDIT_DATA_LIMIT); strncat(sanitized, ": ", MAC_AUDIT_DATA_LIMIT - plen + 2); strncat(sanitized, text, MAC_AUDIT_DATA_LIMIT - plen); - return (audit_mac_data(MAC_AUDIT_TEXT_TYPE, size, (u_char *)sanitized)); + return audit_mac_data(MAC_AUDIT_TEXT_TYPE, size, (u_char *)sanitized); } int @@ -263,8 +275,9 @@ mac_audit_check_preselect(struct ucred *cred, unsigned short syscode, void *args ret = MAC_AUDIT_DEFAULT; for (i = 0; i < mac_policy_list.staticmax; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } if (mpc->mpc_ops->mpo_audit_check_preselect != NULL) { error = mpc->mpc_ops->mpo_audit_check_preselect(cred, @@ -275,8 +288,9 @@ mac_audit_check_preselect(struct ucred *cred, unsigned short syscode, void *args if (mac_policy_list_conditional_busy() != 0) { for (; i <= mac_policy_list.maxindex; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } if (mpc->mpc_ops->mpo_audit_check_preselect != NULL) { error = mpc->mpc_ops->mpo_audit_check_preselect(cred, @@ -287,7 +301,7 @@ mac_audit_check_preselect(struct ucred *cred, unsigned short syscode, void *args mac_policy_list_unbusy(); } - return (ret); + return ret; } int @@ -302,14 +316,16 @@ mac_audit_check_postselect(struct ucred *cred, unsigned short syscode, * If the audit was forced by a MAC policy by mac_audit_check_preselect(), * echo that. */ - if (mac_forced) - return (MAC_AUDIT_YES); + if (mac_forced) { + return MAC_AUDIT_YES; + } ret = MAC_AUDIT_DEFAULT; for (i = 0; i < mac_policy_list.staticmax; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } if (mpc->mpc_ops->mpo_audit_check_postselect != NULL) { mac_error = mpc->mpc_ops->mpo_audit_check_postselect(cred, @@ -320,8 +336,9 @@ mac_audit_check_postselect(struct ucred *cred, unsigned short syscode, if (mac_policy_list_conditional_busy() != 0) { for (; i <= mac_policy_list.maxindex; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } if (mpc->mpc_ops->mpo_audit_check_postselect != NULL) { mac_error = mpc->mpc_ops->mpo_audit_check_postselect(cred, @@ -332,10 +349,10 @@ mac_audit_check_postselect(struct ucred *cred, unsigned short syscode, mac_policy_list_unbusy(); } - return (ret); + return ret; } -#else /* !CONFIG_AUDIT */ +#else /* !CONFIG_AUDIT */ /* * Function stubs for when AUDIT isn't defined. @@ -344,72 +361,63 @@ mac_audit_check_postselect(struct ucred *cred, unsigned short syscode, int mac_system_check_audit(__unused struct ucred *cred, __unused void *record, __unused int length) { - - return (0); + return 0; } int mac_system_check_auditon(__unused struct ucred *cred, __unused int cmd) { - - return (0); + return 0; } int mac_system_check_auditctl(__unused struct ucred *cred, __unused struct vnode *vp) { - - return (0); + return 0; } int mac_proc_check_getauid(__unused struct proc *curp) { - - return (0); + return 0; } int mac_proc_check_setauid(__unused struct proc *curp, __unused uid_t auid) { - - return (0); + return 0; } int mac_proc_check_getaudit(__unused struct proc *curp) { - - return (0); + return 0; } int mac_proc_check_setaudit(__unused struct proc *curp, __unused struct auditinfo_addr *ai) { - - return (0); + return 0; } int mac_audit_check_preselect(__unused struct ucred *cred, __unused unsigned short syscode, __unused void *args) { - - return (MAC_AUDIT_DEFAULT); + return MAC_AUDIT_DEFAULT; } int mac_audit_check_postselect(__unused struct ucred *cred, __unused unsigned short syscode, __unused void *args, __unused int error, __unused int retval, __unused int mac_forced) { - - return (MAC_AUDIT_DEFAULT); + return MAC_AUDIT_DEFAULT; } int mac_audit_text(__unused char *text, __unused mac_policy_handle_t handle) { - return (0); + return 0; } -#endif /* !CONFIG_AUDIT */ +#endif /* !CONFIG_AUDIT */ diff --git a/security/mac_base.c b/security/mac_base.c index 0cc9f0b0d..c31e5fdc6 100644 --- a/security/mac_base.c +++ b/security/mac_base.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -109,22 +109,22 @@ #include #endif -/* +/* * define MB_DEBUG to display run-time debugging information * #define MB_DEBUG 1 */ #ifdef MB_DEBUG -#define DPRINTF(x) printf x +#define DPRINTF(x) printf x #else #define MB_DEBUG #define DPRINTF(x) #endif #if CONFIG_MACF -SYSCTL_NODE(, OID_AUTO, security, CTLFLAG_RW|CTLFLAG_LOCKED, 0, +SYSCTL_NODE(, OID_AUTO, security, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Security Controls"); -SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW|CTLFLAG_LOCKED, 0, +SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "TrustedBSD MAC policy controls"); /* @@ -150,7 +150,7 @@ SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD | CTLFLAG_LOCKED, * access to this variable is serialized during the boot process. Following * the end of serialization, we don't update this flag; no locking. */ -int mac_late = 0; +int mac_late = 0; /* * Flag to indicate whether or not we should allocate label storage for @@ -164,9 +164,9 @@ int mac_late = 0; * be a problem. Note: currently no locking. Will this be a problem? */ #if CONFIG_MACF_NET -unsigned int mac_label_mbufs = 1; +unsigned int mac_label_mbufs = 1; SYSCTL_UINT(_security_mac, OID_AUTO, label_mbufs, SECURITY_MAC_CTLFLAGS, - &mac_label_mbufs, 0, "Label all MBUFs"); + &mac_label_mbufs, 0, "Label all MBUFs"); #endif @@ -181,19 +181,19 @@ SYSCTL_UINT(_security_mac, OID_AUTO, label_mbufs, SECURITY_MAC_CTLFLAGS, * already has to deal with uninitialized labels, this probably won't * be a problem. */ -unsigned int mac_label_vnodes = 0; +unsigned int mac_label_vnodes = 0; SYSCTL_UINT(_security_mac, OID_AUTO, labelvnodes, SECURITY_MAC_CTLFLAGS, &mac_label_vnodes, 0, "Label all vnodes"); unsigned int mac_device_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, device_enforce, SECURITY_MAC_CTLFLAGS, - &mac_device_enforce, 0, "Enforce MAC policy on device operations"); + &mac_device_enforce, 0, "Enforce MAC policy on device operations"); -unsigned int mac_pipe_enforce = 1; +unsigned int mac_pipe_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, pipe_enforce, SECURITY_MAC_CTLFLAGS, &mac_pipe_enforce, 0, "Enforce MAC policy on pipe operations"); -unsigned int mac_posixsem_enforce = 1; +unsigned int mac_posixsem_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, posixsem_enforce, SECURITY_MAC_CTLFLAGS, &mac_posixsem_enforce, 0, "Enforce MAC policy on POSIX semaphores"); @@ -201,37 +201,37 @@ unsigned int mac_posixshm_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, posixshm_enforce, SECURITY_MAC_CTLFLAGS, &mac_posixshm_enforce, 0, "Enforce MAC policy on Posix Shared Memory"); -unsigned int mac_proc_enforce = 1; +unsigned int mac_proc_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, proc_enforce, SECURITY_MAC_CTLFLAGS, - &mac_proc_enforce, 0, "Enforce MAC policy on process operations"); + &mac_proc_enforce, 0, "Enforce MAC policy on process operations"); unsigned int mac_socket_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, socket_enforce, SECURITY_MAC_CTLFLAGS, - &mac_socket_enforce, 0, "Enforce MAC policy on socket operations"); + &mac_socket_enforce, 0, "Enforce MAC policy on socket operations"); -unsigned int mac_system_enforce = 1; +unsigned int mac_system_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, system_enforce, SECURITY_MAC_CTLFLAGS, &mac_system_enforce, 0, "Enforce MAC policy on system-wide interfaces"); -unsigned int mac_sysvmsg_enforce = 1; +unsigned int mac_sysvmsg_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, sysvmsg_enforce, SECURITY_MAC_CTLFLAGS, &mac_sysvmsg_enforce, 0, "Enforce MAC policy on System V IPC message queues"); -unsigned int mac_sysvsem_enforce = 1; +unsigned int mac_sysvsem_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, sysvsem_enforce, SECURITY_MAC_CTLFLAGS, &mac_sysvsem_enforce, 0, "Enforce MAC policy on System V IPC semaphores"); -unsigned int mac_sysvshm_enforce = 1; +unsigned int mac_sysvshm_enforce = 1; SYSCTL_INT(_security_mac, OID_AUTO, sysvshm_enforce, SECURITY_MAC_CTLFLAGS, &mac_sysvshm_enforce, 0, "Enforce MAC policy on System V Shared Memory"); -unsigned int mac_vm_enforce = 1; +unsigned int mac_vm_enforce = 1; SYSCTL_INT(_security_mac, OID_AUTO, vm_enforce, SECURITY_MAC_CTLFLAGS, - &mac_vm_enforce, 0, "Enforce MAC policy on VM operations"); + &mac_vm_enforce, 0, "Enforce MAC policy on VM operations"); -unsigned int mac_vnode_enforce = 1; +unsigned int mac_vnode_enforce = 1; SYSCTL_UINT(_security_mac, OID_AUTO, vnode_enforce, SECURITY_MAC_CTLFLAGS, - &mac_vnode_enforce, 0, "Enforce MAC policy on vnode operations"); + &mac_vnode_enforce, 0, "Enforce MAC policy on vnode operations"); #if CONFIG_AUDIT /* @@ -279,7 +279,7 @@ mac_policy_list_t mac_policy_list; * mac_label_element_list holds the master list of label namespaces for * all the policies. When a policy is loaded, each of it's label namespace * elements is added to the master list if not already present. When a - * policy is unloaded, the namespace elements are removed if no other + * policy is unloaded, the namespace elements are removed if no other * policy is interested in that namespace element. */ struct mac_label_element_list_t mac_label_element_list; @@ -291,7 +291,7 @@ mac_policy_grab_exclusive(void) lck_mtx_lock(mac_policy_mtx); while (mac_policy_busy != 0) { lck_mtx_sleep(mac_policy_mtx, LCK_SLEEP_UNLOCK, - (event_t)&mac_policy_busy, THREAD_UNINT); + (event_t)&mac_policy_busy, THREAD_UNINT); lck_mtx_lock(mac_policy_mtx); } } @@ -299,7 +299,6 @@ mac_policy_grab_exclusive(void) static __inline void mac_policy_release_exclusive(void) { - KASSERT(mac_policy_busy == 0, ("mac_policy_release_exclusive(): not exclusive")); lck_mtx_unlock(mac_policy_mtx); @@ -309,7 +308,7 @@ mac_policy_release_exclusive(void) void mac_policy_list_busy(void) { - lck_mtx_lock(mac_policy_mtx); + lck_mtx_lock(mac_policy_mtx); mac_policy_busy++; lck_mtx_unlock(mac_policy_mtx); } @@ -319,17 +318,19 @@ mac_policy_list_conditional_busy(void) { int ret; - if (mac_policy_list.numloaded <= mac_policy_list.staticmax) - return(0); + if (mac_policy_list.numloaded <= mac_policy_list.staticmax) { + return 0; + } lck_mtx_lock(mac_policy_mtx); if (mac_policy_list.numloaded > mac_policy_list.staticmax) { mac_policy_busy++; ret = 1; - } else + } else { ret = 0; + } lck_mtx_unlock(mac_policy_mtx); - return (ret); + return ret; } void @@ -338,8 +339,9 @@ mac_policy_list_unbusy(void) lck_mtx_lock(mac_policy_mtx); mac_policy_busy--; KASSERT(mac_policy_busy >= 0, ("MAC_POLICY_LIST_LOCK")); - if (mac_policy_busy == 0) + if (mac_policy_busy == 0) { thread_wakeup(&mac_policy_busy); + } lck_mtx_unlock(mac_policy_mtx); } @@ -366,13 +368,12 @@ mac_policy_init(void) mac_policy_list.entries = kalloc(sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE); #endif - bzero(mac_policy_list.entries, sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE); + bzero(mac_policy_list.entries, sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE); LIST_INIT(&mac_label_element_list); LIST_INIT(&mac_static_label_element_list); mac_lck_grp_attr = lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(mac_lck_grp_attr); mac_lck_grp = lck_grp_alloc_init("MAC lock", mac_lck_grp_attr); mac_lck_attr = lck_attr_alloc_init(); lck_attr_setdefault(mac_lck_attr); @@ -380,7 +381,7 @@ mac_policy_init(void) lck_attr_free(mac_lck_attr); lck_grp_attr_free(mac_lck_grp_attr); lck_grp_free(mac_lck_grp); - + mac_labelzone_init(); } @@ -397,7 +398,6 @@ void (*load_security_extensions_function)(void) = 0; void mac_policy_initmach(void) { - /* * For the purposes of modules that want to know if they were * loaded "early", set the mac_late flag once we've processed @@ -422,8 +422,8 @@ mac_policy_initbsd(void) #if CONFIG_AUDIT mac_audit_data_zone = zinit(MAC_AUDIT_DATA_LIMIT, - AQ_HIWATER * MAC_AUDIT_DATA_LIMIT, - 8192, "mac_audit_data_zone"); + AQ_HIWATER * MAC_AUDIT_DATA_LIMIT, + 8192, "mac_audit_data_zone"); #endif printf("MAC Framework successfully initialized\n"); @@ -439,8 +439,9 @@ mac_policy_initbsd(void) for (i = 0; i <= mac_policy_list.maxindex; i++) { mpc = mac_get_mpc(i); - if ((mpc != NULL) && (mpc->mpc_ops->mpo_policy_initbsd != NULL)) + if ((mpc != NULL) && (mpc->mpc_ops->mpo_policy_initbsd != NULL)) { (*(mpc->mpc_ops->mpo_policy_initbsd))(mpc); + } } mac_policy_release_exclusive(); @@ -448,8 +449,8 @@ mac_policy_initbsd(void) /* * After a policy has been loaded, add the label namespaces managed by the - * policy to either the static or non-static label namespace list. - * A namespace is added to the the list only if it is not already on one of + * policy to either the static or non-static label namespace list. + * A namespace is added to the the list only if it is not already on one of * the lists. */ void @@ -464,66 +465,76 @@ mac_policy_addto_labellist(mac_policy_handle_t handle, int static_entry) mpc = mac_get_mpc(handle); - if (mpc->mpc_labelnames == NULL) + if (mpc->mpc_labelnames == NULL) { return; + } - if (mpc->mpc_labelname_count == 0) + if (mpc->mpc_labelname_count == 0) { return; + } - if (static_entry) + if (static_entry) { list = &mac_static_label_element_list; - else + } else { list = &mac_label_element_list; + } /* * Before we grab the policy list lock, allocate enough memory - * to contain the potential new elements so we don't have to + * to contain the potential new elements so we don't have to * give up the lock, or allocate with the lock held. */ MALLOC(new_mles, struct mac_label_element **, sizeof(struct mac_label_element *) * mpc->mpc_labelname_count, M_MACTEMP, M_WAITOK | M_ZERO); - for (idx = 0; idx < mpc->mpc_labelname_count; idx++) - MALLOC(new_mles[idx], struct mac_label_element *, + for (idx = 0; idx < mpc->mpc_labelname_count; idx++) { + MALLOC(new_mles[idx], struct mac_label_element *, sizeof(struct mac_label_element), M_MACTEMP, M_WAITOK); + } mle_free = 0; MALLOC(new_mlls, struct mac_label_listener **, sizeof(struct mac_label_listener *) * mpc->mpc_labelname_count, M_MACTEMP, M_WAITOK); - for (idx = 0; idx < mpc->mpc_labelname_count; idx++) + for (idx = 0; idx < mpc->mpc_labelname_count; idx++) { MALLOC(new_mlls[idx], struct mac_label_listener *, sizeof(struct mac_label_listener), M_MACTEMP, M_WAITOK); + } mll_free = 0; - if (mac_late) + if (mac_late) { mac_policy_grab_exclusive(); + } for (idx = 0; idx < mpc->mpc_labelname_count; idx++) { - - if (*(name = mpc->mpc_labelnames[idx]) == '?') + if (*(name = mpc->mpc_labelnames[idx]) == '?') { name++; + } /* - * Check both label element lists and add to the + * Check both label element lists and add to the * appropriate list only if not already on a list. */ LIST_FOREACH(mle, &mac_static_label_element_list, mle_list) { - if (*(name2 = mle->mle_name) == '?') + if (*(name2 = mle->mle_name) == '?') { name2++; - if (strcmp(name, name2) == 0) + } + if (strcmp(name, name2) == 0) { break; + } } if (mle == NULL) { LIST_FOREACH(mle, &mac_label_element_list, mle_list) { - if (*(name2 = mle->mle_name) == '?') + if (*(name2 = mle->mle_name) == '?') { name2++; - if (strcmp(name, name2) == 0) + } + if (strcmp(name, name2) == 0) { break; + } } } if (mle == NULL) { mle = new_mles[mle_free]; strlcpy(mle->mle_name, mpc->mpc_labelnames[idx], - MAC_MAX_LABEL_ELEMENT_NAME); + MAC_MAX_LABEL_ELEMENT_NAME); LIST_INIT(&mle->mle_listeners); LIST_INSERT_HEAD(list, mle, mle_list); mle_free++; @@ -534,15 +545,18 @@ mac_policy_addto_labellist(mac_policy_handle_t handle, int static_entry) mll_list); mll_free++; } - if (mac_late) + if (mac_late) { mac_policy_release_exclusive(); + } /* Free up any unused label elements and listeners */ - for (idx = mle_free; idx < mpc->mpc_labelname_count; idx++) + for (idx = mle_free; idx < mpc->mpc_labelname_count; idx++) { FREE(new_mles[idx], M_MACTEMP); + } FREE(new_mles, M_MACTEMP); - for (idx = mll_free; idx < mpc->mpc_labelname_count; idx++) + for (idx = mll_free; idx < mpc->mpc_labelname_count; idx++) { FREE(new_mlls[idx], M_MACTEMP); + } FREE(new_mlls, M_MACTEMP); } @@ -563,11 +577,13 @@ mac_policy_removefrom_labellist(mac_policy_handle_t handle) mpc = mac_get_mpc(handle); - if (mpc->mpc_labelnames == NULL) + if (mpc->mpc_labelnames == NULL) { return; + } - if (mpc->mpc_labelname_count == 0) + if (mpc->mpc_labelname_count == 0) { return; + } /* * Unregister policy as being interested in any label @@ -627,9 +643,10 @@ mac_policy_fixup_mmd_list(struct mac_module_data *new) if (arr->type == MAC_DATA_TYPE_DICT) { dict = (struct mac_module_data_list *)aele->value; DPRINTF(("fixup_mmd: dict @%p\n", dict)); - for (k = 0; k < dict->count; k++) + for (k = 0; k < dict->count; k++) { mmd_fixup_ele(old, new, &(dict->list[k])); + } } } } @@ -651,17 +668,21 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, * Some preliminary checks to make sure the policy's conf structure * contains the required fields. */ - if (mpc->mpc_name == NULL) + if (mpc->mpc_name == NULL) { panic("policy's name is not set\n"); + } - if (mpc->mpc_fullname == NULL) + if (mpc->mpc_fullname == NULL) { panic("policy's full name is not set\n"); + } - if (mpc->mpc_labelname_count > MAC_MAX_MANAGED_NAMESPACES) + if (mpc->mpc_labelname_count > MAC_MAX_MANAGED_NAMESPACES) { panic("policy's managed label namespaces exceeds maximum\n"); + } - if (mpc->mpc_ops == NULL) + if (mpc->mpc_ops == NULL) { panic("policy's OPs field is NULL\n"); + } error = 0; @@ -669,7 +690,7 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE) { printf("Module %s does not support late loading.\n", mpc->mpc_name); - return (EPERM); + return EPERM; } mac_policy_grab_exclusive(); } @@ -683,17 +704,17 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, bzero(&tmac_policy_list_element[mac_policy_list.max], sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE); - + /* copy old entries into new list */ - memcpy(tmac_policy_list_element, mac_policy_list.entries, - sizeof(struct mac_policy_list_element) * - MAC_POLICY_LIST_CHUNKSIZE * mac_policy_list.chunks); - + memcpy(tmac_policy_list_element, mac_policy_list.entries, + sizeof(struct mac_policy_list_element) * + MAC_POLICY_LIST_CHUNKSIZE * mac_policy_list.chunks); + /* free old array */ kfree(mac_policy_list.entries, sizeof(struct mac_policy_list_element) * MAC_POLICY_LIST_CHUNKSIZE * mac_policy_list.chunks); - + mac_policy_list.entries = tmac_policy_list_element; /* Update maximums, etc */ @@ -701,14 +722,15 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, mac_policy_list.chunks++; #else printf("out of space in mac_policy_list.\n"); - return (ENOMEM); + return ENOMEM; #endif /* CONFIG_EMBEDDED */ } /* Check for policy with same name already loaded */ for (i = 0; i <= mac_policy_list.maxindex; i++) { - if (mac_policy_list.entries[i].mpc == NULL) - continue; + if (mac_policy_list.entries[i].mpc == NULL) { + continue; + } if (strcmp(mac_policy_list.entries[i].mpc->mpc_name, mpc->mpc_name) == 0) { @@ -768,37 +790,41 @@ mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, mac_policy_list.entries[*handlep].mpc = mpc; /* Update counters, etc */ - if (*handlep > mac_policy_list.maxindex) + if (*handlep > mac_policy_list.maxindex) { mac_policy_list.maxindex = *handlep; + } mac_policy_list.numloaded++; - + /* Per-policy initialization. */ - printf ("calling mpo_policy_init for %s\n", mpc->mpc_name); - if (mpc->mpc_ops->mpo_policy_init != NULL) + printf("calling mpo_policy_init for %s\n", mpc->mpc_name); + if (mpc->mpc_ops->mpo_policy_init != NULL) { (*(mpc->mpc_ops->mpo_policy_init))(mpc); + } if (mac_late && mpc->mpc_ops->mpo_policy_initbsd != NULL) { - printf ("calling mpo_policy_initbsd for %s\n", mpc->mpc_name); + printf("calling mpo_policy_initbsd for %s\n", mpc->mpc_name); (*(mpc->mpc_ops->mpo_policy_initbsd))(mpc); } mac_policy_updateflags(); - if (mac_late) + if (mac_late) { mac_policy_release_exclusive(); + } mac_policy_addto_labellist(*handlep, static_entry); printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname, mpc->mpc_name); - return (0); + return 0; out: - if (mac_late) + if (mac_late) { mac_policy_release_exclusive(); + } - return (error); + return error; } int @@ -815,7 +841,7 @@ mac_policy_unregister(mac_policy_handle_t handle) mpc = mac_get_mpc(handle); if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) { mac_policy_release_exclusive(); - return (0); + return 0; } #if 0 @@ -824,7 +850,7 @@ mac_policy_unregister(mac_policy_handle_t handle) */ if (mpc->mpc_field_off != NULL) { MAC_POLICY_LIST_UNLOCK(); - return (EBUSY); + return EBUSY; } #endif /* @@ -833,26 +859,29 @@ mac_policy_unregister(mac_policy_handle_t handle) */ if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) { mac_policy_release_exclusive(); - return (EBUSY); + return EBUSY; } mac_policy_removefrom_labellist(handle); mac_get_mpc(handle) = NULL; if (handle < mac_policy_list.freehint && - handle >= mac_policy_list.staticmax) + handle >= mac_policy_list.staticmax) { mac_policy_list.freehint = handle; + } - if (handle == mac_policy_list.maxindex) + if (handle == mac_policy_list.maxindex) { mac_policy_list.maxindex--; + } - mac_policy_list.numloaded--; + mac_policy_list.numloaded--; if (mpc->mpc_field_off != NULL) { mac_slot_offsets_free |= (1 << *mpc->mpc_field_off); } - if (mpc->mpc_ops->mpo_policy_destroy != NULL) + if (mpc->mpc_ops->mpo_policy_destroy != NULL) { (*(mpc->mpc_ops->mpo_policy_destroy))(mpc); + } mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED; mac_policy_updateflags(); @@ -868,7 +897,7 @@ mac_policy_unregister(mac_policy_handle_t handle) printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname, mpc->mpc_name); - return (0); + return 0; } /* @@ -878,40 +907,45 @@ mac_policy_unregister(mac_policy_handle_t handle) int mac_error_select(int error1, int error2) { - /* Certain decision-making errors take top priority. */ - if (error1 == EDEADLK || error2 == EDEADLK) - return (EDEADLK); + if (error1 == EDEADLK || error2 == EDEADLK) { + return EDEADLK; + } /* Invalid arguments should be reported where possible. */ - if (error1 == EINVAL || error2 == EINVAL) - return (EINVAL); + if (error1 == EINVAL || error2 == EINVAL) { + return EINVAL; + } /* Precedence goes to "visibility", with both process and file. */ - if (error1 == ESRCH || error2 == ESRCH) - return (ESRCH); + if (error1 == ESRCH || error2 == ESRCH) { + return ESRCH; + } - if (error1 == ENOENT || error2 == ENOENT) - return (ENOENT); + if (error1 == ENOENT || error2 == ENOENT) { + return ENOENT; + } /* Precedence goes to DAC/MAC protections. */ - if (error1 == EACCES || error2 == EACCES) - return (EACCES); + if (error1 == EACCES || error2 == EACCES) { + return EACCES; + } /* Precedence goes to privilege. */ - if (error1 == EPERM || error2 == EPERM) - return (EPERM); + if (error1 == EPERM || error2 == EPERM) { + return EPERM; + } /* Precedence goes to error over success; otherwise, arbitrary. */ - if (error1 != 0) - return (error1); - return (error2); + if (error1 != 0) { + return error1; + } + return error2; } void mac_label_init(struct label *label) { - bzero(label, sizeof(*label)); label->l_flags = MAC_FLAG_INITIALIZED; } @@ -919,7 +953,6 @@ mac_label_init(struct label *label) void mac_label_destroy(struct label *label) { - KASSERT(label->l_flags & MAC_FLAG_INITIALIZED, ("destroying uninitialized label")); @@ -930,11 +963,11 @@ mac_label_destroy(struct label *label) int mac_check_structmac_consistent(struct user_mac *mac) { + if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN || mac->m_buflen == 0) { + return EINVAL; + } - if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN || mac->m_buflen == 0) - return (EINVAL); - - return (0); + return 0; } /* @@ -958,37 +991,45 @@ mac_label_externalize(size_t mpo_externalize_off, struct label *label, if (element[0] == '?') { element++; ignorenotfound = 1; - } else if (element[0] == '*' && element[1] == '\0') + } else if (element[0] == '*' && element[1] == '\0') { all_labels = 1; + } element_list = &mac_static_label_element_list; element_loop: LIST_FOREACH(mle, element_list, mle_list) { name = mle->mle_name; if (all_labels) { - if (*name == '?') - continue; + if (*name == '?') { + continue; + } } else { - if (*name == '?') + if (*name == '?') { name++; - if (strcmp(name, element) != 0) + } + if (strcmp(name, element) != 0) { continue; + } } LIST_FOREACH(mll, &mle->mle_listeners, mll_list) { mpc = mac_policy_list.entries[mll->mll_handle].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_externalize = *(const typeof(mpo_externalize) *) ((const char *)mpc->mpc_ops + mpo_externalize_off); - if (mpo_externalize == NULL) + if (mpo_externalize == NULL) { continue; + } error = sbuf_printf(sb, "%s/", name); - if (error) + if (error) { goto done; + } error = mpo_externalize(label, mle->mle_name, sb); if (error) { - if (error != ENOENT) + if (error != ENOENT) { goto done; + } /* * If a policy doesn't have a label to * externalize it returns ENOENT. This @@ -1002,8 +1043,9 @@ element_loop: continue; } error = sbuf_putc(sb, ','); - if (error) + if (error) { goto done; + } count++; } } @@ -1014,13 +1056,15 @@ element_loop: goto element_loop; } done: - if (busy) + if (busy) { mac_policy_list_unbusy(); + } if (!error && count == 0) { - if (!all_labels && !ignorenotfound) - error = ENOENT; /* XXX: ENOLABEL? */ + if (!all_labels && !ignorenotfound) { + error = ENOENT; /* XXX: ENOLABEL? */ + } } - return (error); + return error; } /* @@ -1040,14 +1084,14 @@ mac_externalize(size_t mpo_externalize_off, struct label *label, int error = 0, len; /* allocate a scratch buffer the size of the string */ - MALLOC(scratch_base, char *, strlen(elementlist)+1, M_MACTEMP, M_WAITOK); + MALLOC(scratch_base, char *, strlen(elementlist) + 1, M_MACTEMP, M_WAITOK); if (scratch_base == NULL) { error = ENOMEM; goto out; } /* copy the elementlist to the scratch buffer */ - strlcpy(scratch_base, elementlist, strlen(elementlist)+1); + strlcpy(scratch_base, elementlist, strlen(elementlist) + 1); /* * set up a temporary pointer that can be used to iterate the @@ -1065,18 +1109,21 @@ mac_externalize(size_t mpo_externalize_off, struct label *label, while ((element = strsep(&scratch, ",")) != NULL) { error = mac_label_externalize(mpo_externalize_off, label, element, &sb); - if (error) + if (error) { break; + } + } + if ((len = sbuf_len(&sb)) > 0) { + sbuf_setpos(&sb, len - 1); /* trim trailing comma */ } - if ((len = sbuf_len(&sb)) > 0) - sbuf_setpos(&sb, len - 1); /* trim trailing comma */ sbuf_finish(&sb); out: - if (scratch_base != NULL) + if (scratch_base != NULL) { FREE(scratch_base, M_MACTEMP); + } - return (error); + return error; } /* @@ -1099,22 +1146,27 @@ mac_label_internalize(size_t mpo_internalize_off, struct label *label, element_list = &mac_static_label_element_list; element_loop: LIST_FOREACH(mle, element_list, mle_list) { - if (*(name = mle->mle_name) == '?') + if (*(name = mle->mle_name) == '?') { name++; - if (strcmp(element_name, name) != 0) + } + if (strcmp(element_name, name) != 0) { continue; + } LIST_FOREACH(mll, &mle->mle_listeners, mll_list) { mpc = mac_policy_list.entries[mll->mll_handle].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_internalize = *(const typeof(mpo_internalize) *) ((const char *)mpc->mpc_ops + mpo_internalize_off); - if (mpo_internalize == NULL) + if (mpo_internalize == NULL) { continue; + } error = mpo_internalize(label, element_name, element_data); - if (error) + if (error) { goto done; + } count++; } } @@ -1125,11 +1177,13 @@ element_loop: goto element_loop; } done: - if (busy) + if (busy) { mac_policy_list_unbusy(); - if (!error && count == 0) + } + if (!error && count == 0) { error = ENOPOLICY; - return (error); + } + return error; } int @@ -1149,7 +1203,7 @@ mac_internalize(size_t mpo_internalize_off, struct label *label, error = mac_label_internalize(mpo_internalize_off, label, element_name, element_data); } - return (error); + return error; } /* system calls */ @@ -1176,16 +1230,19 @@ __mac_get_pid(struct proc *p, struct __mac_get_pid_args *uap, int *ret __unused) mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } tproc = proc_find(uap->pid); - if (tproc == NULL) - return (ESRCH); + if (tproc == NULL) { + return ESRCH; + } tcred = kauth_cred_proc_ref(tproc); proc_rele(tproc); @@ -1194,20 +1251,21 @@ __mac_get_pid(struct proc *p, struct __mac_get_pid_args *uap, int *ret __unused) if (error) { FREE(elements, M_MACTEMP); kauth_cred_unref(&tcred); - return (error); + return error; } AUDIT_ARG(mac_string, elements); MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); error = mac_cred_label_externalize(tcred->cr_label, elements, buffer, mac.m_buflen, M_WAITOK); - if (error == 0) - error = copyout(buffer, mac.m_string, strlen(buffer)+1); + if (error == 0) { + error = copyout(buffer, mac.m_string, strlen(buffer) + 1); + } FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); kauth_cred_unref(&tcred); - return (error); + return error; } int @@ -1230,18 +1288,20 @@ __mac_get_proc(proc_t p, struct __mac_get_proc_args *uap, int *ret __unused) mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(elements, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(mac.m_string, elements, mac.m_buflen, &ulen); if (error) { FREE(elements, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, elements); @@ -1250,13 +1310,14 @@ __mac_get_proc(proc_t p, struct __mac_get_proc_args *uap, int *ret __unused) MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); error = mac_cred_label_externalize(cr->cr_label, elements, buffer, mac.m_buflen, M_WAITOK); - if (error == 0) - error = copyout(buffer, mac.m_string, strlen(buffer)+1); + if (error == 0) { + error = copyout(buffer, mac.m_string, strlen(buffer) + 1); + } FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); kauth_cred_unref(&cr); - return (error); + return error; } int @@ -1279,26 +1340,29 @@ __mac_set_proc(proc_t p, struct __mac_set_proc_args *uap, int *ret __unused) mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(mac.m_string, buffer, mac.m_buflen, &ulen); if (error) { FREE(buffer, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, buffer); intlabel = mac_cred_label_alloc(); error = mac_cred_label_internalize(intlabel, buffer); FREE(buffer, M_MACTEMP); - if (error) + if (error) { goto out; + } error = mac_cred_check_label_update(kauth_cred_get(), intlabel); if (error) { @@ -1306,12 +1370,13 @@ __mac_set_proc(proc_t p, struct __mac_set_proc_args *uap, int *ret __unused) } error = kauth_proc_label_update(p, intlabel); - if (error) + if (error) { goto out; + } out: mac_cred_label_free(intlabel); - return (error); + return error; } int @@ -1326,7 +1391,7 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) kauth_cred_t my_cred; #if CONFIG_MACF_SOCKET struct socket *so; -#endif /* MAC_SOCKET */ +#endif /* MAC_SOCKET */ struct label *intlabel; AUDIT_ARG(fd, uap->fd); @@ -1343,18 +1408,20 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); - + if (error) { + return error; + } + MALLOC(elements, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(mac.m_string, elements, mac.m_buflen, &ulen); if (error) { FREE(elements, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, elements); @@ -1363,9 +1430,9 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) if (error) { FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } - + my_cred = kauth_cred_proc_ref(p); error = mac_file_check_get(my_cred, fp->f_fglob, elements, mac.m_buflen); kauth_cred_unref(&my_cred); @@ -1373,57 +1440,58 @@ __mac_get_fd(proc_t p, struct __mac_get_fd_args *uap, int *ret __unused) fp_drop(p, uap->fd, fp, 0); FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } - + switch (FILEGLOB_DTYPE(fp->f_fglob)) { - case DTYPE_VNODE: - intlabel = mac_vnode_label_alloc(); - if (intlabel == NULL) { - error = ENOMEM; - break; - } - vp = (struct vnode *)fp->f_fglob->fg_data; - error = vnode_getwithref(vp); - if (error == 0) { - mac_vnode_label_copy(vp->v_label, intlabel); - error = mac_vnode_label_externalize(intlabel, - elements, buffer, - mac.m_buflen, M_WAITOK); - vnode_put(vp); - } - mac_vnode_label_free(intlabel); + case DTYPE_VNODE: + intlabel = mac_vnode_label_alloc(); + if (intlabel == NULL) { + error = ENOMEM; break; - case DTYPE_SOCKET: + } + vp = (struct vnode *)fp->f_fglob->fg_data; + error = vnode_getwithref(vp); + if (error == 0) { + mac_vnode_label_copy(vp->v_label, intlabel); + error = mac_vnode_label_externalize(intlabel, + elements, buffer, + mac.m_buflen, M_WAITOK); + vnode_put(vp); + } + mac_vnode_label_free(intlabel); + break; + case DTYPE_SOCKET: #if CONFIG_MACF_SOCKET - so = (struct socket *) fp->f_fglob->fg_data; - intlabel = mac_socket_label_alloc(MAC_WAITOK); - sock_lock(so, 1); - mac_socket_label_copy(so->so_label, intlabel); - sock_unlock(so, 1); - error = mac_socket_label_externalize(intlabel, elements, buffer, mac.m_buflen); - mac_socket_label_free(intlabel); - break; + so = (struct socket *) fp->f_fglob->fg_data; + intlabel = mac_socket_label_alloc(MAC_WAITOK); + sock_lock(so, 1); + mac_socket_label_copy(so->so_label, intlabel); + sock_unlock(so, 1); + error = mac_socket_label_externalize(intlabel, elements, buffer, mac.m_buflen); + mac_socket_label_free(intlabel); + break; #endif - case DTYPE_PSXSHM: - case DTYPE_PSXSEM: - case DTYPE_PIPE: - case DTYPE_KQUEUE: - case DTYPE_FSEVENTS: - case DTYPE_ATALK: - case DTYPE_NETPOLICY: - default: - error = ENOSYS; // only sockets/vnodes so far - break; + case DTYPE_PSXSHM: + case DTYPE_PSXSEM: + case DTYPE_PIPE: + case DTYPE_KQUEUE: + case DTYPE_FSEVENTS: + case DTYPE_ATALK: + case DTYPE_NETPOLICY: + default: + error = ENOSYS; // only sockets/vnodes so far + break; } fp_drop(p, uap->fd, fp, 0); - - if (error == 0) - error = copyout(buffer, mac.m_string, strlen(buffer)+1); - + + if (error == 0) { + error = copyout(buffer, mac.m_string, strlen(buffer) + 1); + } + FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } static int @@ -1450,12 +1518,14 @@ mac_get_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, int follow) mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(elements, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); @@ -1464,20 +1534,20 @@ mac_get_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, int follow) if (error) { FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, elements); ctx = vfs_context_current(); NDINIT(&nd, LOOKUP, OP_LOOKUP, - LOCKLEAF | (follow ? FOLLOW : NOFOLLOW) | AUDITVNPATH1, - UIO_USERSPACE, path_p, ctx); + LOCKLEAF | (follow ? FOLLOW : NOFOLLOW) | AUDITVNPATH1, + UIO_USERSPACE, path_p, ctx); error = namei(&nd); if (error) { FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } vp = nd.ni_vp; @@ -1486,39 +1556,37 @@ mac_get_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, int follow) intlabel = mac_vnode_label_alloc(); mac_vnode_label_copy(vp->v_label, intlabel); error = mac_vnode_label_externalize(intlabel, elements, buffer, - mac.m_buflen, M_WAITOK); + mac.m_buflen, M_WAITOK); mac_vnode_label_free(intlabel); - if (error == 0) + if (error == 0) { error = copyout(buffer, mac.m_string, strlen(buffer) + 1); + } vnode_put(vp); FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } int __mac_get_file(proc_t p, struct __mac_get_file_args *uap, - int *ret __unused) + int *ret __unused) { - - return (mac_get_filelink(p, uap->mac_p, uap->path_p, 1)); + return mac_get_filelink(p, uap->mac_p, uap->path_p, 1); } int __mac_get_link(proc_t p, struct __mac_get_link_args *uap, - int *ret __unused) + int *ret __unused) { - - return (mac_get_filelink(p, uap->mac_p, uap->path_p, 0)); + return mac_get_filelink(p, uap->mac_p, uap->path_p, 0); } int __mac_set_fd(proc_t p, struct __mac_set_fd_args *uap, int *ret __unused) { - struct fileproc *fp; struct user_mac mac; struct vfs_context *ctx = vfs_context_current(); @@ -1544,95 +1612,96 @@ __mac_set_fd(proc_t p, struct __mac_set_fd_args *uap, int *ret __unused) mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); - + if (error) { + return error; + } + error = mac_check_structmac_consistent(&mac); - if (error) - return (error); - + if (error) { + return error; + } + MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(mac.m_string, buffer, mac.m_buflen, &ulen); if (error) { FREE(buffer, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, buffer); - + error = fp_lookup(p, uap->fd, &fp, 0); if (error) { FREE(buffer, M_MACTEMP); - return (error); + return error; } - + error = mac_file_check_set(vfs_context_ucred(ctx), fp->f_fglob, buffer, mac.m_buflen); if (error) { fp_drop(p, uap->fd, fp, 0); FREE(buffer, M_MACTEMP); - return (error); + return error; } - - switch (FILEGLOB_DTYPE(fp->f_fglob)) { - case DTYPE_VNODE: - if (mac_label_vnodes == 0) { - error = ENOSYS; - break; - } + switch (FILEGLOB_DTYPE(fp->f_fglob)) { + case DTYPE_VNODE: + if (mac_label_vnodes == 0) { + error = ENOSYS; + break; + } - intlabel = mac_vnode_label_alloc(); + intlabel = mac_vnode_label_alloc(); - error = mac_vnode_label_internalize(intlabel, buffer); - if (error) { - mac_vnode_label_free(intlabel); - break; - } + error = mac_vnode_label_internalize(intlabel, buffer); + if (error) { + mac_vnode_label_free(intlabel); + break; + } - vp = (struct vnode *)fp->f_fglob->fg_data; + vp = (struct vnode *)fp->f_fglob->fg_data; - error = vnode_getwithref(vp); - if (error == 0) { - error = vn_setlabel(vp, intlabel, ctx); - vnode_put(vp); - } - mac_vnode_label_free(intlabel); - break; + error = vnode_getwithref(vp); + if (error == 0) { + error = vn_setlabel(vp, intlabel, ctx); + vnode_put(vp); + } + mac_vnode_label_free(intlabel); + break; - case DTYPE_SOCKET: + case DTYPE_SOCKET: #if CONFIG_MACF_SOCKET - intlabel = mac_socket_label_alloc(MAC_WAITOK); - error = mac_socket_label_internalize(intlabel, buffer); - if (error == 0) { - so = (struct socket *) fp->f_fglob->fg_data; - SOCK_LOCK(so); - error = mac_socket_label_update(vfs_context_ucred(ctx), so, intlabel); - SOCK_UNLOCK(so); - } - mac_socket_label_free(intlabel); - break; + intlabel = mac_socket_label_alloc(MAC_WAITOK); + error = mac_socket_label_internalize(intlabel, buffer); + if (error == 0) { + so = (struct socket *) fp->f_fglob->fg_data; + SOCK_LOCK(so); + error = mac_socket_label_update(vfs_context_ucred(ctx), so, intlabel); + SOCK_UNLOCK(so); + } + mac_socket_label_free(intlabel); + break; #endif - case DTYPE_PSXSHM: - case DTYPE_PSXSEM: - case DTYPE_PIPE: - case DTYPE_KQUEUE: - case DTYPE_FSEVENTS: - case DTYPE_ATALK: - case DTYPE_NETPOLICY: - default: - error = ENOSYS; // only sockets/vnodes so far - break; + case DTYPE_PSXSHM: + case DTYPE_PSXSEM: + case DTYPE_PIPE: + case DTYPE_KQUEUE: + case DTYPE_FSEVENTS: + case DTYPE_ATALK: + case DTYPE_NETPOLICY: + default: + error = ENOSYS; // only sockets/vnodes so far + break; } fp_drop(p, uap->fd, fp, 0); FREE(buffer, M_MACTEMP); - return (error); + return error; } static int mac_set_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, - int follow) + int follow) { struct vnode *vp; struct vfs_context *ctx = vfs_context_current(); @@ -1643,8 +1712,9 @@ mac_set_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, int error; size_t ulen; - if (mac_label_vnodes == 0) + if (mac_label_vnodes == 0) { return ENOSYS; + } if (IS_64BIT_PROCESS(p)) { struct user64_mac mac64; @@ -1657,20 +1727,21 @@ mac_set_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); if (error) { printf("mac_set_file: failed structure consistency check\n"); - return (error); + return error; } MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(mac.m_string, buffer, mac.m_buflen, &ulen); if (error) { FREE(buffer, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, buffer); @@ -1679,16 +1750,16 @@ mac_set_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, FREE(buffer, M_MACTEMP); if (error) { mac_vnode_label_free(intlabel); - return (error); + return error; } NDINIT(&nd, LOOKUP, OP_LOOKUP, - LOCKLEAF | (follow ? FOLLOW : NOFOLLOW) | AUDITVNPATH1, - UIO_USERSPACE, path_p, ctx); + LOCKLEAF | (follow ? FOLLOW : NOFOLLOW) | AUDITVNPATH1, + UIO_USERSPACE, path_p, ctx); error = namei(&nd); if (error) { mac_vnode_label_free(intlabel); - return (error); + return error; } vp = nd.ni_vp; @@ -1698,23 +1769,21 @@ mac_set_filelink(proc_t p, user_addr_t mac_p, user_addr_t path_p, vnode_put(vp); mac_vnode_label_free(intlabel); - return (error); + return error; } int __mac_set_file(proc_t p, struct __mac_set_file_args *uap, - int *ret __unused) + int *ret __unused) { - - return (mac_set_filelink(p, uap->mac_p, uap->path_p, 1)); + return mac_set_filelink(p, uap->mac_p, uap->path_p, 1); } int __mac_set_link(proc_t p, struct __mac_set_link_args *uap, - int *ret __unused) + int *ret __unused) { - - return (mac_set_filelink(p, uap->mac_p, uap->path_p, 0)); + return mac_set_filelink(p, uap->mac_p, uap->path_p, 0); } /* @@ -1727,7 +1796,7 @@ __mac_set_link(proc_t p, struct __mac_set_link_args *uap, * Indirect: uap->policy Name of target MAC policy * uap->call MAC policy-specific system call to perform * uap->arg MAC policy-specific system call arguments - * + * * Returns: 0 Success * !0 Not success * @@ -1742,8 +1811,9 @@ __mac_syscall(proc_t p, struct __mac_syscall_args *uap, int *retv __unused) size_t ulen; error = copyinstr(uap->policy, target, sizeof(target), &ulen); - if (error) - return (error); + if (error) { + return error; + } AUDIT_ARG(value32, uap->call); AUDIT_ARG(mac_string, target); @@ -1751,21 +1821,23 @@ __mac_syscall(proc_t p, struct __mac_syscall_args *uap, int *retv __unused) for (i = 0; i < mac_policy_list.staticmax; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } if (strcmp(mpc->mpc_name, target) == 0 && mpc->mpc_ops->mpo_policy_syscall != NULL) { error = mpc->mpc_ops->mpo_policy_syscall(p, uap->call, uap->arg); goto done; - } + } } if (mac_policy_list_conditional_busy() != 0) { for (; i <= mac_policy_list.maxindex; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } if (strcmp(mpc->mpc_name, target) == 0 && mpc->mpc_ops->mpo_policy_syscall != NULL) { @@ -1778,7 +1850,7 @@ __mac_syscall(proc_t p, struct __mac_syscall_args *uap, int *retv __unused) } done: - return (error); + return error; } int @@ -1801,18 +1873,20 @@ mac_mount_label_get(struct mount *mp, user_addr_t mac_p) mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(elements, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(mac.m_string, elements, mac.m_buflen, &ulen); if (error) { FREE(elements, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, elements); @@ -1822,11 +1896,12 @@ mac_mount_label_get(struct mount *mp, user_addr_t mac_p) mac.m_buflen); FREE(elements, M_MACTEMP); - if (error == 0) + if (error == 0) { error = copyout(buffer, mac.m_string, strlen(buffer) + 1); + } FREE(buffer, M_MACTEMP); - return (error); + return error; } /* @@ -1837,7 +1912,7 @@ mac_mount_label_get(struct mount *mp, user_addr_t mac_p) * ret (ignored) * * Indirect: uap->path Pathname - * uap->mac_p MAC info + * uap->mac_p MAC info * * Returns: 0 Success * !0 Not success @@ -1852,10 +1927,10 @@ __mac_get_mount(proc_t p __unused, struct __mac_get_mount_args *uap, int error; NDINIT(&nd, LOOKUP, OP_LOOKUP, FOLLOW | AUDITVNPATH1, - UIO_USERSPACE, uap->path, ctx); + UIO_USERSPACE, uap->path, ctx); error = namei(&nd); if (error) { - return (error); + return error; } mp = nd.ni_vp->v_mount; vnode_put(nd.ni_vp); @@ -1871,14 +1946,13 @@ __mac_get_mount(proc_t p __unused, struct __mac_get_mount_args *uap, * hook is called just before the thread exit from the kernel in ast_taken(). * * Returns: 0 Success - * !0 Not successful + * !0 Not successful */ int mac_schedule_userret(void) { - act_set_astmacf(current_thread()); - return (0); + return 0; } /* @@ -1888,14 +1962,14 @@ mac_schedule_userret(void) * callback. * * params: code exception code - * subcode exception subcode - * flags flags: - * MAC_DOEXCF_TRACED Only do exception if being - * ptrace()'ed. + * subcode exception subcode + * flags flags: + * MAC_DOEXCF_TRACED Only do exception if being + * ptrace()'ed. * * * Returns: 0 Success - * !0 Not successful + * !0 Not successful */ int mac_do_machexc(int64_t code, int64_t subcode, uint32_t flags) @@ -1904,19 +1978,21 @@ mac_do_machexc(int64_t code, int64_t subcode, uint32_t flags) proc_t p = current_proc(); /* Only allow execption codes in MACF's reserved range. */ - if ((code < EXC_MACF_MIN) || (code > EXC_MACF_MAX)) - return (1); + if ((code < EXC_MACF_MIN) || (code > EXC_MACF_MAX)) { + return 1; + } if (flags & MAC_DOEXCF_TRACED && - !(p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0)) - return (0); + !(p->p_lflag & P_LTRACED && (p->p_lflag & P_LPPWAIT) == 0)) { + return 0; + } /* Send the Mach exception */ codes[0] = (mach_exception_data_type_t)code; codes[1] = (mach_exception_data_type_t)subcode; - return (bsd_exception(EXC_SOFTWARE, codes, 2) != KERN_SUCCESS); + return bsd_exception(EXC_SOFTWARE, codes, 2) != KERN_SUCCESS; } #else /* MAC */ @@ -1926,89 +2002,84 @@ void (*load_security_extensions_function)(void) = 0; struct sysctl_oid_list sysctl__security_mac_children; int -mac_policy_register(struct mac_policy_conf *mpc __unused, - mac_policy_handle_t *handlep __unused, void *xd __unused) +mac_policy_register(struct mac_policy_conf *mpc __unused, + mac_policy_handle_t *handlep __unused, void *xd __unused) { - - return (0); + return 0; } int mac_policy_unregister(mac_policy_handle_t handle __unused) { - - return (0); + return 0; } int mac_audit_text(char *text __unused, mac_policy_handle_t handle __unused) { - - return (0); + return 0; } int mac_vnop_setxattr(struct vnode *vp __unused, const char *name __unused, char *buf __unused, size_t len __unused) { - - return (ENOENT); + return ENOENT; } int -mac_vnop_getxattr(struct vnode *vp __unused, const char *name __unused, - char *buf __unused, size_t len __unused, size_t *attrlen __unused) +mac_vnop_getxattr(struct vnode *vp __unused, const char *name __unused, + char *buf __unused, size_t len __unused, size_t *attrlen __unused) { - - return (ENOENT); + return ENOENT; } int mac_vnop_removexattr(struct vnode *vp __unused, const char *name __unused) { - - return (ENOENT); + return ENOENT; } int mac_file_setxattr(struct fileglob *fg __unused, const char *name __unused, char *buf __unused, size_t len __unused) { - - return (ENOENT); + return ENOENT; } int mac_file_getxattr(struct fileglob *fg __unused, const char *name __unused, - char *buf __unused, size_t len __unused, size_t *attrlen __unused) + char *buf __unused, size_t len __unused, size_t *attrlen __unused) { - - return (ENOENT); + return ENOENT; } int mac_file_removexattr(struct fileglob *fg __unused, const char *name __unused) { - - return (ENOENT); + return ENOENT; } -intptr_t mac_label_get(struct label *l __unused, int slot __unused) +intptr_t +mac_label_get(struct label *l __unused, int slot __unused) { - return 0; + return 0; } -void mac_label_set(struct label *l __unused, int slot __unused, intptr_t v __unused) +void +mac_label_set(struct label *l __unused, int slot __unused, intptr_t v __unused) { - return; + return; } int mac_iokit_check_hid_control(kauth_cred_t cred __unused); -int mac_iokit_check_hid_control(kauth_cred_t cred __unused) +int +mac_iokit_check_hid_control(kauth_cred_t cred __unused) { - return 0; + return 0; } int mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused); -int mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused) +int +mac_vnode_check_trigger_resolve(vfs_context_t ctx __unused, struct vnode *dvp __unused, struct componentname *cnp __unused) { return 0; } diff --git a/security/mac_data.c b/security/mac_data.c index ffe50237d..ef8e7b4f8 100644 --- a/security/mac_data.c +++ b/security/mac_data.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -51,7 +51,7 @@ * SUCH DAMAGE. */ -#include +#include #include "mac_internal.h" @@ -62,9 +62,10 @@ mac_find_policy_data(const mac_policy_handle_t handle, const char *key, struct mac_policy_conf *mpc; int error = ENOENT; - if ((mpc = mac_get_mpc(handle)) != NULL) + if ((mpc = mac_get_mpc(handle)) != NULL) { error = mac_find_module_data(mpc->mpc_data, key, valp, sizep); - return (error); + } + return error; } int @@ -85,5 +86,5 @@ mac_find_module_data(struct mac_module_data *mmd, const char *key, } } - return (error); + return error; } diff --git a/security/mac_data.h b/security/mac_data.h index 74df5e6b3..5d58e5a11 100644 --- a/security/mac_data.h +++ b/security/mac_data.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -52,51 +52,51 @@ */ #ifndef _SECURITY_MAC_DATA_H_ -#define _SECURITY_MAC_DATA_H_ +#define _SECURITY_MAC_DATA_H_ #ifndef PRIVATE #warning "MAC policy is not KPI, see Technical Q&A QA1574, this header will be removed in next version" #endif /** - @brief Mac policy module data - - This structure specifies module data that is passed in to the - TrustedBSD MAC policy module by the kernel module loader. The - data is made of up key/value pairs where the key is always a - string and the value is a string, binary data or array. An array - may be a list of values (actually a similar set of key/value pairs, - but in this case the keys are always null), and may also consist of - a set of dictionaries, which in turn are made up of a list of key/value - pairs. - - Module data may be specified in the MAC policy module's - Info.plist file as part of the OSModuleData dictionary. - - E.g. - - OSModuleData - - foo - bar - Beers - - - type - lager - Name - Anchor Steam - - - type - ale - Name - Sierra Nevada Pale Ale - - - - -*/ + * @brief Mac policy module data + * + * This structure specifies module data that is passed in to the + * TrustedBSD MAC policy module by the kernel module loader. The + * data is made of up key/value pairs where the key is always a + * string and the value is a string, binary data or array. An array + * may be a list of values (actually a similar set of key/value pairs, + * but in this case the keys are always null), and may also consist of + * a set of dictionaries, which in turn are made up of a list of key/value + * pairs. + * + * Module data may be specified in the MAC policy module's + * Info.plist file as part of the OSModuleData dictionary. + * + * E.g. + * + * OSModuleData + * + * foo + * bar + * Beers + * + * + * type + * lager + * Name + * Anchor Steam + * + * + * type + * ale + * Name + * Sierra Nevada Pale Ale + * + * + * + * + */ struct mac_module_data_element { unsigned int key_size; unsigned int value_size; @@ -110,15 +110,15 @@ struct mac_module_data_list { struct mac_module_data_element list[1]; }; struct mac_module_data { - void *base_addr; /* Orig base address, for ptr fixup. */ + void *base_addr; /* Orig base address, for ptr fixup. */ unsigned int size; unsigned int count; - struct mac_module_data_element data[1]; /* actually bigger */ + struct mac_module_data_element data[1]; /* actually bigger */ }; -#define MAC_DATA_TYPE_PRIMITIVE 0 /* Primitive type (int, string, etc.) */ -#define MAC_DATA_TYPE_ARRAY 1 /* Array type. */ -#define MAC_DATA_TYPE_DICT 2 /* Dictionary type. */ +#define MAC_DATA_TYPE_PRIMITIVE 0 /* Primitive type (int, string, etc.) */ +#define MAC_DATA_TYPE_ARRAY 1 /* Array type. */ +#define MAC_DATA_TYPE_DICT 2 /* Dictionary type. */ #ifdef _SECURITY_MAC_POLICY_H_ /* XXX mac_policy_handle_t is defined in mac_policy.h, move prototype there? */ @@ -136,7 +136,7 @@ static __inline void mmd_fixup_ele(struct mac_module_data *oldbase, struct mac_module_data *newbase, struct mac_module_data_element *ele) { - if (ele->key != NULL) { /* Array elements have no keys. */ + if (ele->key != NULL) { /* Array elements have no keys. */ ele->key -= (uintptr_t)oldbase; ele->key += (uintptr_t)newbase; } diff --git a/security/mac_file.c b/security/mac_file.c index 7f2de809c..afba6a7b3 100644 --- a/security/mac_file.c +++ b/security/mac_file.c @@ -54,23 +54,22 @@ mac_file_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(file_label_init, label); - return (label); + return label; } void mac_file_label_init(struct fileglob *fg) { - fg->fg_label = mac_file_label_alloc(); } static void mac_file_label_free(struct label *label) { - MAC_PERFORM(file_label_destroy, label); mac_labelzone_free(label); } @@ -78,14 +77,12 @@ mac_file_label_free(struct label *label) void mac_file_label_associate(struct ucred *cred, struct fileglob *fg) { - MAC_PERFORM(file_label_associate, cred, fg, fg->fg_label); } void mac_file_label_destroy(struct fileglob *fg) { - mac_file_label_free(fg->fg_label); fg->fg_label = NULL; } @@ -96,7 +93,7 @@ mac_file_check_create(struct ucred *cred) int error; MAC_CHECK(file_check_create, cred); - return (error); + return error; } int @@ -105,7 +102,7 @@ mac_file_check_dup(struct ucred *cred, struct fileglob *fg, int newfd) int error; MAC_CHECK(file_check_dup, cred, fg, fg->fg_label, newfd); - return (error); + return error; } int @@ -115,7 +112,7 @@ mac_file_check_fcntl(struct ucred *cred, struct fileglob *fg, int cmd, int error; MAC_CHECK(file_check_fcntl, cred, fg, fg->fg_label, cmd, arg); - return (error); + return error; } int @@ -124,7 +121,7 @@ mac_file_check_ioctl(struct ucred *cred, struct fileglob *fg, u_int cmd) int error; MAC_CHECK(file_check_ioctl, cred, fg, fg->fg_label, cmd); - return (error); + return error; } int @@ -133,7 +130,7 @@ mac_file_check_inherit(struct ucred *cred, struct fileglob *fg) int error; MAC_CHECK(file_check_inherit, cred, fg, fg->fg_label); - return (error); + return error; } int @@ -142,7 +139,7 @@ mac_file_check_receive(struct ucred *cred, struct fileglob *fg) int error; MAC_CHECK(file_check_receive, cred, fg, fg->fg_label); - return (error); + return error; } int @@ -151,7 +148,7 @@ mac_file_check_get_offset(struct ucred *cred, struct fileglob *fg) int error; MAC_CHECK(file_check_get_offset, cred, fg, fg->fg_label); - return (error); + return error; } int @@ -160,17 +157,17 @@ mac_file_check_change_offset(struct ucred *cred, struct fileglob *fg) int error; MAC_CHECK(file_check_change_offset, cred, fg, fg->fg_label); - return (error); + return error; } - + int mac_file_check_get(struct ucred *cred, struct fileglob *fg, char *elements, int len) { int error; - + MAC_CHECK(file_check_get, cred, fg, elements, len); - return (error); + return error; } int @@ -178,9 +175,9 @@ mac_file_check_set(struct ucred *cred, struct fileglob *fg, char *buf, int buflen) { int error; - + MAC_CHECK(file_check_set, cred, fg, buf, buflen); - return (error); + return error; } int @@ -188,20 +185,20 @@ mac_file_check_lock(struct ucred *cred, struct fileglob *fg, int op, struct flock *fl) { int error; - + MAC_CHECK(file_check_lock, cred, fg, fg->fg_label, op, fl); - return (error); + return error; } int mac_file_check_library_validation(struct proc *proc, - struct fileglob *fg, off_t slice_offset, - user_long_t error_message, size_t error_message_size) + struct fileglob *fg, off_t slice_offset, + user_long_t error_message, size_t error_message_size) { int error; MAC_CHECK(file_check_library_validation, proc, fg, slice_offset, error_message, error_message_size); - return (error); + return error; } /* @@ -222,10 +219,11 @@ mac_file_check_mmap(struct ucred *cred, struct fileglob *fg, int prot, maxp = *maxprot; MAC_CHECK(file_check_mmap, cred, fg, fg->fg_label, prot, flags, offset, &maxp); - if ((maxp | *maxprot) != *maxprot) + if ((maxp | *maxprot) != *maxprot) { panic("file_check_mmap increased max protections"); + } *maxprot = maxp; - return (error); + return error; } void @@ -246,7 +244,8 @@ mac_file_check_mmap_downgrade(struct ucred *cred, struct fileglob *fg, */ int -mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size_t len) { +mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size_t len) +{ struct vnode *vp = NULL; if (!fg || FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { @@ -259,7 +258,8 @@ mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size_t len) int mac_file_getxattr(struct fileglob *fg, const char *name, char *buf, size_t len, - size_t *attrlen) { + size_t *attrlen) +{ struct vnode *vp = NULL; if (!fg || FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { @@ -271,7 +271,8 @@ mac_file_getxattr(struct fileglob *fg, const char *name, char *buf, size_t len, } int -mac_file_removexattr(struct fileglob *fg, const char *name) { +mac_file_removexattr(struct fileglob *fg, const char *name) +{ struct vnode *vp = NULL; if (!fg || FILEGLOB_DTYPE(fg) != DTYPE_VNODE) { diff --git a/security/mac_framework.h b/security/mac_framework.h index d735e0124..b40928bb0 100644 --- a/security/mac_framework.h +++ b/security/mac_framework.h @@ -71,7 +71,7 @@ */ #ifndef _SECURITY_MAC_FRAMEWORK_H_ -#define _SECURITY_MAC_FRAMEWORK_H_ +#define _SECURITY_MAC_FRAMEWORK_H_ #ifndef KERNEL #error "no user-serviceable parts inside" @@ -145,456 +145,456 @@ typedef struct OSObject *io_object_t; #endif /* __IOKIT_PORTS_DEFINED__ */ /*@ macros */ -#define VNODE_LABEL_CREATE 1 +#define VNODE_LABEL_CREATE 1 /*@ === */ -int mac_audit_check_postselect(kauth_cred_t cred, unsigned short syscode, - void *args, int error, int retval, int mac_forced); -int mac_audit_check_preselect(kauth_cred_t cred, unsigned short syscode, - void *args); -int mac_bpfdesc_check_receive(struct bpf_d *bpf_d, struct ifnet *ifp); -void mac_bpfdesc_label_destroy(struct bpf_d *bpf_d); -void mac_bpfdesc_label_init(struct bpf_d *bpf_d); -void mac_bpfdesc_label_associate(kauth_cred_t cred, struct bpf_d *bpf_d); -int mac_cred_check_label_update(kauth_cred_t cred, - struct label *newlabel); -int mac_cred_check_label_update_execve(vfs_context_t ctx, - struct vnode *vp, off_t offset, struct vnode *scriptvp, - struct label *scriptvnodelabel, struct label *execlabel, - proc_t proc, void *macextensions); -int mac_cred_check_visible(kauth_cred_t u1, kauth_cred_t u2); -struct label *mac_cred_label_alloc(void); -void mac_cred_label_associate(kauth_cred_t cred_parent, - kauth_cred_t cred_child); -void mac_cred_label_associate_fork(kauth_cred_t cred, proc_t child); -void mac_cred_label_associate_kernel(kauth_cred_t cred); -void mac_cred_label_associate_user(kauth_cred_t cred); -void mac_cred_label_destroy(kauth_cred_t cred); -int mac_cred_label_externalize_audit(proc_t p, struct mac *mac); -void mac_cred_label_free(struct label *label); -void mac_cred_label_init(kauth_cred_t cred); -int mac_cred_label_compare(struct label *a, struct label *b); -void mac_cred_label_update(kauth_cred_t cred, struct label *newlabel); -void mac_cred_label_update_execve(vfs_context_t ctx, kauth_cred_t newcred, - struct vnode *vp, off_t offset, struct vnode *scriptvp, - struct label *scriptvnodelabel, struct label *execlabel, u_int *csflags, - void *macextensions, int *disjoint, int *labelupdateerror); -void mac_devfs_label_associate_device(dev_t dev, struct devnode *de, - const char *fullpath); -void mac_devfs_label_associate_directory(const char *dirname, int dirnamelen, - struct devnode *de, const char *fullpath); -void mac_devfs_label_copy(struct label *, struct label *label); -void mac_devfs_label_destroy(struct devnode *de); -void mac_devfs_label_init(struct devnode *de); -void mac_devfs_label_update(struct mount *mp, struct devnode *de, - struct vnode *vp); -int mac_execve_enter(user_addr_t mac_p, struct image_params *imgp); -int mac_file_check_change_offset(kauth_cred_t cred, struct fileglob *fg); -int mac_file_check_create(kauth_cred_t cred); -int mac_file_check_dup(kauth_cred_t cred, struct fileglob *fg, int newfd); -int mac_file_check_fcntl(kauth_cred_t cred, struct fileglob *fg, int cmd, - user_long_t arg); -int mac_file_check_get(kauth_cred_t cred, struct fileglob *fg, - char *elements, int len); -int mac_file_check_get_offset(kauth_cred_t cred, struct fileglob *fg); -int mac_file_check_inherit(kauth_cred_t cred, struct fileglob *fg); -int mac_file_check_ioctl(kauth_cred_t cred, struct fileglob *fg, - unsigned int cmd); -int mac_file_check_lock(kauth_cred_t cred, struct fileglob *fg, int op, - struct flock *fl); -int mac_file_check_library_validation(struct proc *proc, - struct fileglob *fg, off_t slice_offset, - user_long_t error_message, size_t error_message_size); -int mac_file_check_mmap(kauth_cred_t cred, struct fileglob *fg, - int prot, int flags, uint64_t file_pos, int *maxprot); -void mac_file_check_mmap_downgrade(kauth_cred_t cred, struct fileglob *fg, - int *prot); -int mac_file_check_receive(kauth_cred_t cred, struct fileglob *fg); -int mac_file_check_set(kauth_cred_t cred, struct fileglob *fg, - char *bufp, int buflen); -void mac_file_label_associate(kauth_cred_t cred, struct fileglob *fg); -void mac_file_label_destroy(struct fileglob *fg); -void mac_file_label_init(struct fileglob *fg); -int mac_ifnet_check_transmit(struct ifnet *ifp, struct mbuf *mbuf, - int family, int type); -void mac_ifnet_label_associate(struct ifnet *ifp); -void mac_ifnet_label_destroy(struct ifnet *ifp); -int mac_ifnet_label_get(kauth_cred_t cred, struct ifreq *ifr, - struct ifnet *ifp); -void mac_ifnet_label_init(struct ifnet *ifp); -void mac_ifnet_label_recycle(struct ifnet *ifp); -int mac_ifnet_label_set(kauth_cred_t cred, struct ifreq *ifr, - struct ifnet *ifp); -int mac_inpcb_check_deliver(struct inpcb *inp, struct mbuf *mbuf, - int family, int type); -void mac_inpcb_label_associate(struct socket *so, struct inpcb *inp); -void mac_inpcb_label_destroy(struct inpcb *inp); -int mac_inpcb_label_init(struct inpcb *inp, int flag); -void mac_inpcb_label_recycle(struct inpcb *inp); -void mac_inpcb_label_update(struct socket *so); -int mac_iokit_check_device(char *devtype, struct mac_module_data *mdata); -int mac_iokit_check_open(kauth_cred_t cred, io_object_t user_client, unsigned int user_client_type); -int mac_iokit_check_set_properties(kauth_cred_t cred, io_object_t registry_entry, io_object_t properties); -int mac_iokit_check_filter_properties(kauth_cred_t cred, io_object_t registry_entry); -int mac_iokit_check_get_property(kauth_cred_t cred, io_object_t registry_entry, const char *name); -int mac_iokit_check_hid_control(kauth_cred_t cred); -void mac_ipq_label_associate(struct mbuf *fragment, struct ipq *ipq); -int mac_ipq_label_compare(struct mbuf *fragment, struct ipq *ipq); -void mac_ipq_label_destroy(struct ipq *ipq); -int mac_ipq_label_init(struct ipq *ipq, int flag); -void mac_ipq_label_update(struct mbuf *fragment, struct ipq *ipq); -void mac_mbuf_label_associate_bpfdesc(struct bpf_d *bpf_d, struct mbuf *m); -void mac_mbuf_label_associate_ifnet(struct ifnet *ifp, struct mbuf *m); -void mac_mbuf_label_associate_inpcb(struct inpcb *inp, struct mbuf *m); -void mac_mbuf_label_associate_ipq(struct ipq *ipq, struct mbuf *mbuf); -void mac_mbuf_label_associate_linklayer(struct ifnet *ifp, struct mbuf *m); -void mac_mbuf_label_associate_multicast_encap(struct mbuf *oldmbuf, - struct ifnet *ifp, struct mbuf *newmbuf); -void mac_mbuf_label_associate_netlayer(struct mbuf *oldmbuf, - struct mbuf *newmbuf); -void mac_mbuf_label_associate_socket(struct socket *so, struct mbuf *m); -void mac_mbuf_label_copy(struct mbuf *m_from, struct mbuf *m_to); -void mac_mbuf_label_destroy(struct mbuf *m); -int mac_mbuf_label_init(struct mbuf *m, int flag); -void mac_mbuf_tag_copy(struct m_tag *m, struct m_tag *mtag); -void mac_mbuf_tag_destroy(struct m_tag *mtag); -int mac_mbuf_tag_init(struct m_tag *, int how); -int mac_mount_check_fsctl(vfs_context_t ctx, struct mount *mp, - unsigned int cmd); -int mac_mount_check_getattr(vfs_context_t ctx, struct mount *mp, - struct vfs_attr *vfa); -int mac_mount_check_label_update(vfs_context_t ctx, struct mount *mp); -int mac_mount_check_mount(vfs_context_t ctx, struct vnode *vp, - struct componentname *cnp, const char *vfc_name); -int mac_mount_check_snapshot_create(vfs_context_t ctx, struct mount *mp, - const char *name); -int mac_mount_check_snapshot_delete(vfs_context_t ctx, struct mount *mp, - const char *name); -int mac_mount_check_snapshot_revert(vfs_context_t ctx, struct mount *mp, - const char *name); -int mac_mount_check_remount(vfs_context_t ctx, struct mount *mp); -int mac_mount_check_setattr(vfs_context_t ctx, struct mount *mp, - struct vfs_attr *vfa); -int mac_mount_check_stat(vfs_context_t ctx, struct mount *mp); -int mac_mount_check_umount(vfs_context_t ctx, struct mount *mp); -void mac_mount_label_associate(vfs_context_t ctx, struct mount *mp); -void mac_mount_label_destroy(struct mount *mp); -int mac_mount_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen); -int mac_mount_label_get(struct mount *mp, user_addr_t mac_p); -void mac_mount_label_init(struct mount *); -int mac_mount_label_internalize(struct label *, char *string); -void mac_netinet_fragment(struct mbuf *datagram, struct mbuf *fragment); -void mac_netinet_icmp_reply(struct mbuf *m); -void mac_netinet_tcp_reply(struct mbuf *m); -int mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, - unsigned int cmd); -int mac_pipe_check_kqfilter(kauth_cred_t cred, struct knote *kn, - struct pipe *cpipe); -int mac_pipe_check_read(kauth_cred_t cred, struct pipe *cpipe); -int mac_pipe_check_select(kauth_cred_t cred, struct pipe *cpipe, - int which); -int mac_pipe_check_stat(kauth_cred_t cred, struct pipe *cpipe); -int mac_pipe_check_write(kauth_cred_t cred, struct pipe *cpipe); -struct label *mac_pipe_label_alloc(void); -void mac_pipe_label_associate(kauth_cred_t cred, struct pipe *cpipe); -void mac_pipe_label_copy(struct label *src, struct label *dest); -void mac_pipe_label_destroy(struct pipe *cpipe); -void mac_pipe_label_free(struct label *label); -void mac_pipe_label_init(struct pipe *cpipe); -int mac_pipe_label_update(kauth_cred_t cred, struct pipe *cpipe, - struct label *label); +int mac_audit_check_postselect(kauth_cred_t cred, unsigned short syscode, + void *args, int error, int retval, int mac_forced); +int mac_audit_check_preselect(kauth_cred_t cred, unsigned short syscode, + void *args); +int mac_bpfdesc_check_receive(struct bpf_d *bpf_d, struct ifnet *ifp); +void mac_bpfdesc_label_destroy(struct bpf_d *bpf_d); +void mac_bpfdesc_label_init(struct bpf_d *bpf_d); +void mac_bpfdesc_label_associate(kauth_cred_t cred, struct bpf_d *bpf_d); +int mac_cred_check_label_update(kauth_cred_t cred, + struct label *newlabel); +int mac_cred_check_label_update_execve(vfs_context_t ctx, + struct vnode *vp, off_t offset, struct vnode *scriptvp, + struct label *scriptvnodelabel, struct label *execlabel, + proc_t proc, void *macextensions); +int mac_cred_check_visible(kauth_cred_t u1, kauth_cred_t u2); +struct label *mac_cred_label_alloc(void); +void mac_cred_label_associate(kauth_cred_t cred_parent, + kauth_cred_t cred_child); +void mac_cred_label_associate_fork(kauth_cred_t cred, proc_t child); +void mac_cred_label_associate_kernel(kauth_cred_t cred); +void mac_cred_label_associate_user(kauth_cred_t cred); +void mac_cred_label_destroy(kauth_cred_t cred); +int mac_cred_label_externalize_audit(proc_t p, struct mac *mac); +void mac_cred_label_free(struct label *label); +void mac_cred_label_init(kauth_cred_t cred); +int mac_cred_label_compare(struct label *a, struct label *b); +void mac_cred_label_update(kauth_cred_t cred, struct label *newlabel); +void mac_cred_label_update_execve(vfs_context_t ctx, kauth_cred_t newcred, + struct vnode *vp, off_t offset, struct vnode *scriptvp, + struct label *scriptvnodelabel, struct label *execlabel, u_int *csflags, + void *macextensions, int *disjoint, int *labelupdateerror); +void mac_devfs_label_associate_device(dev_t dev, struct devnode *de, + const char *fullpath); +void mac_devfs_label_associate_directory(const char *dirname, int dirnamelen, + struct devnode *de, const char *fullpath); +void mac_devfs_label_copy(struct label *, struct label *label); +void mac_devfs_label_destroy(struct devnode *de); +void mac_devfs_label_init(struct devnode *de); +void mac_devfs_label_update(struct mount *mp, struct devnode *de, + struct vnode *vp); +int mac_execve_enter(user_addr_t mac_p, struct image_params *imgp); +int mac_file_check_change_offset(kauth_cred_t cred, struct fileglob *fg); +int mac_file_check_create(kauth_cred_t cred); +int mac_file_check_dup(kauth_cred_t cred, struct fileglob *fg, int newfd); +int mac_file_check_fcntl(kauth_cred_t cred, struct fileglob *fg, int cmd, + user_long_t arg); +int mac_file_check_get(kauth_cred_t cred, struct fileglob *fg, + char *elements, int len); +int mac_file_check_get_offset(kauth_cred_t cred, struct fileglob *fg); +int mac_file_check_inherit(kauth_cred_t cred, struct fileglob *fg); +int mac_file_check_ioctl(kauth_cred_t cred, struct fileglob *fg, + unsigned int cmd); +int mac_file_check_lock(kauth_cred_t cred, struct fileglob *fg, int op, + struct flock *fl); +int mac_file_check_library_validation(struct proc *proc, + struct fileglob *fg, off_t slice_offset, + user_long_t error_message, size_t error_message_size); +int mac_file_check_mmap(kauth_cred_t cred, struct fileglob *fg, + int prot, int flags, uint64_t file_pos, int *maxprot); +void mac_file_check_mmap_downgrade(kauth_cred_t cred, struct fileglob *fg, + int *prot); +int mac_file_check_receive(kauth_cred_t cred, struct fileglob *fg); +int mac_file_check_set(kauth_cred_t cred, struct fileglob *fg, + char *bufp, int buflen); +void mac_file_label_associate(kauth_cred_t cred, struct fileglob *fg); +void mac_file_label_destroy(struct fileglob *fg); +void mac_file_label_init(struct fileglob *fg); +int mac_ifnet_check_transmit(struct ifnet *ifp, struct mbuf *mbuf, + int family, int type); +void mac_ifnet_label_associate(struct ifnet *ifp); +void mac_ifnet_label_destroy(struct ifnet *ifp); +int mac_ifnet_label_get(kauth_cred_t cred, struct ifreq *ifr, + struct ifnet *ifp); +void mac_ifnet_label_init(struct ifnet *ifp); +void mac_ifnet_label_recycle(struct ifnet *ifp); +int mac_ifnet_label_set(kauth_cred_t cred, struct ifreq *ifr, + struct ifnet *ifp); +int mac_inpcb_check_deliver(struct inpcb *inp, struct mbuf *mbuf, + int family, int type); +void mac_inpcb_label_associate(struct socket *so, struct inpcb *inp); +void mac_inpcb_label_destroy(struct inpcb *inp); +int mac_inpcb_label_init(struct inpcb *inp, int flag); +void mac_inpcb_label_recycle(struct inpcb *inp); +void mac_inpcb_label_update(struct socket *so); +int mac_iokit_check_device(char *devtype, struct mac_module_data *mdata); +int mac_iokit_check_open(kauth_cred_t cred, io_object_t user_client, unsigned int user_client_type); +int mac_iokit_check_set_properties(kauth_cred_t cred, io_object_t registry_entry, io_object_t properties); +int mac_iokit_check_filter_properties(kauth_cred_t cred, io_object_t registry_entry); +int mac_iokit_check_get_property(kauth_cred_t cred, io_object_t registry_entry, const char *name); +int mac_iokit_check_hid_control(kauth_cred_t cred); +void mac_ipq_label_associate(struct mbuf *fragment, struct ipq *ipq); +int mac_ipq_label_compare(struct mbuf *fragment, struct ipq *ipq); +void mac_ipq_label_destroy(struct ipq *ipq); +int mac_ipq_label_init(struct ipq *ipq, int flag); +void mac_ipq_label_update(struct mbuf *fragment, struct ipq *ipq); +void mac_mbuf_label_associate_bpfdesc(struct bpf_d *bpf_d, struct mbuf *m); +void mac_mbuf_label_associate_ifnet(struct ifnet *ifp, struct mbuf *m); +void mac_mbuf_label_associate_inpcb(struct inpcb *inp, struct mbuf *m); +void mac_mbuf_label_associate_ipq(struct ipq *ipq, struct mbuf *mbuf); +void mac_mbuf_label_associate_linklayer(struct ifnet *ifp, struct mbuf *m); +void mac_mbuf_label_associate_multicast_encap(struct mbuf *oldmbuf, + struct ifnet *ifp, struct mbuf *newmbuf); +void mac_mbuf_label_associate_netlayer(struct mbuf *oldmbuf, + struct mbuf *newmbuf); +void mac_mbuf_label_associate_socket(struct socket *so, struct mbuf *m); +void mac_mbuf_label_copy(struct mbuf *m_from, struct mbuf *m_to); +void mac_mbuf_label_destroy(struct mbuf *m); +int mac_mbuf_label_init(struct mbuf *m, int flag); +void mac_mbuf_tag_copy(struct m_tag *m, struct m_tag *mtag); +void mac_mbuf_tag_destroy(struct m_tag *mtag); +int mac_mbuf_tag_init(struct m_tag *, int how); +int mac_mount_check_fsctl(vfs_context_t ctx, struct mount *mp, + unsigned int cmd); +int mac_mount_check_getattr(vfs_context_t ctx, struct mount *mp, + struct vfs_attr *vfa); +int mac_mount_check_label_update(vfs_context_t ctx, struct mount *mp); +int mac_mount_check_mount(vfs_context_t ctx, struct vnode *vp, + struct componentname *cnp, const char *vfc_name); +int mac_mount_check_snapshot_create(vfs_context_t ctx, struct mount *mp, + const char *name); +int mac_mount_check_snapshot_delete(vfs_context_t ctx, struct mount *mp, + const char *name); +int mac_mount_check_snapshot_revert(vfs_context_t ctx, struct mount *mp, + const char *name); +int mac_mount_check_remount(vfs_context_t ctx, struct mount *mp); +int mac_mount_check_setattr(vfs_context_t ctx, struct mount *mp, + struct vfs_attr *vfa); +int mac_mount_check_stat(vfs_context_t ctx, struct mount *mp); +int mac_mount_check_umount(vfs_context_t ctx, struct mount *mp); +void mac_mount_label_associate(vfs_context_t ctx, struct mount *mp); +void mac_mount_label_destroy(struct mount *mp); +int mac_mount_label_externalize(struct label *label, char *elements, + char *outbuf, size_t outbuflen); +int mac_mount_label_get(struct mount *mp, user_addr_t mac_p); +void mac_mount_label_init(struct mount *); +int mac_mount_label_internalize(struct label *, char *string); +void mac_netinet_fragment(struct mbuf *datagram, struct mbuf *fragment); +void mac_netinet_icmp_reply(struct mbuf *m); +void mac_netinet_tcp_reply(struct mbuf *m); +int mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, + unsigned int cmd); +int mac_pipe_check_kqfilter(kauth_cred_t cred, struct knote *kn, + struct pipe *cpipe); +int mac_pipe_check_read(kauth_cred_t cred, struct pipe *cpipe); +int mac_pipe_check_select(kauth_cred_t cred, struct pipe *cpipe, + int which); +int mac_pipe_check_stat(kauth_cred_t cred, struct pipe *cpipe); +int mac_pipe_check_write(kauth_cred_t cred, struct pipe *cpipe); +struct label *mac_pipe_label_alloc(void); +void mac_pipe_label_associate(kauth_cred_t cred, struct pipe *cpipe); +void mac_pipe_label_copy(struct label *src, struct label *dest); +void mac_pipe_label_destroy(struct pipe *cpipe); +void mac_pipe_label_free(struct label *label); +void mac_pipe_label_init(struct pipe *cpipe); +int mac_pipe_label_update(kauth_cred_t cred, struct pipe *cpipe, + struct label *label); void mac_policy_initbsd(void); -int mac_posixsem_check_create(kauth_cred_t cred, const char *name); -int mac_posixsem_check_open(kauth_cred_t cred, struct pseminfo *psem); -int mac_posixsem_check_post(kauth_cred_t cred, struct pseminfo *psem); -int mac_posixsem_check_unlink(kauth_cred_t cred, struct pseminfo *psem, - const char *name); -int mac_posixsem_check_wait(kauth_cred_t cred, struct pseminfo *psem); -void mac_posixsem_vnode_label_associate(kauth_cred_t cred, - struct pseminfo *psem, struct label *plabel, - vnode_t vp, struct label *vlabel); -void mac_posixsem_label_associate(kauth_cred_t cred, - struct pseminfo *psem, const char *name); -void mac_posixsem_label_destroy(struct pseminfo *psem); -void mac_posixsem_label_init(struct pseminfo *psem); -int mac_posixshm_check_create(kauth_cred_t cred, const char *name); -int mac_posixshm_check_mmap(kauth_cred_t cred, struct pshminfo *pshm, - int prot, int flags); -int mac_posixshm_check_open(kauth_cred_t cred, struct pshminfo *pshm, - int fflags); -int mac_posixshm_check_stat(kauth_cred_t cred, struct pshminfo *pshm); -int mac_posixshm_check_truncate(kauth_cred_t cred, struct pshminfo *pshm, - off_t s); -int mac_posixshm_check_unlink(kauth_cred_t cred, struct pshminfo *pshm, - const char *name); -void mac_posixshm_vnode_label_associate(kauth_cred_t cred, - struct pshminfo *pshm, struct label *plabel, - vnode_t vp, struct label *vlabel); -void mac_posixshm_label_associate(kauth_cred_t cred, - struct pshminfo *pshm, const char *name); -void mac_posixshm_label_destroy(struct pshminfo *pshm); -void mac_posixshm_label_init(struct pshminfo *pshm); -int mac_priv_check(kauth_cred_t cred, int priv); -int mac_priv_grant(kauth_cred_t cred, int priv); -int mac_proc_check_debug(proc_t proc1, proc_t proc2); -int mac_proc_check_proc_info(proc_t curp, proc_t target, int callnum, int flavor); -int mac_proc_check_get_cs_info(proc_t curp, proc_t target, unsigned int op); -int mac_proc_check_set_cs_info(proc_t curp, proc_t target, unsigned int op); -int mac_proc_check_fork(proc_t proc); -int mac_proc_check_suspend_resume(proc_t proc, int sr); -int mac_proc_check_get_task_name(kauth_cred_t cred, struct proc *p); -int mac_proc_check_get_task(kauth_cred_t cred, struct proc *p); -int mac_proc_check_expose_task(kauth_cred_t cred, struct proc *p); -int mac_proc_check_inherit_ipc_ports(struct proc *p, struct vnode *cur_vp, off_t cur_offset, struct vnode *img_vp, off_t img_offset, struct vnode *scriptvp); -int mac_proc_check_getaudit(proc_t proc); -int mac_proc_check_getauid(proc_t proc); +int mac_posixsem_check_create(kauth_cred_t cred, const char *name); +int mac_posixsem_check_open(kauth_cred_t cred, struct pseminfo *psem); +int mac_posixsem_check_post(kauth_cred_t cred, struct pseminfo *psem); +int mac_posixsem_check_unlink(kauth_cred_t cred, struct pseminfo *psem, + const char *name); +int mac_posixsem_check_wait(kauth_cred_t cred, struct pseminfo *psem); +void mac_posixsem_vnode_label_associate(kauth_cred_t cred, + struct pseminfo *psem, struct label *plabel, + vnode_t vp, struct label *vlabel); +void mac_posixsem_label_associate(kauth_cred_t cred, + struct pseminfo *psem, const char *name); +void mac_posixsem_label_destroy(struct pseminfo *psem); +void mac_posixsem_label_init(struct pseminfo *psem); +int mac_posixshm_check_create(kauth_cred_t cred, const char *name); +int mac_posixshm_check_mmap(kauth_cred_t cred, struct pshminfo *pshm, + int prot, int flags); +int mac_posixshm_check_open(kauth_cred_t cred, struct pshminfo *pshm, + int fflags); +int mac_posixshm_check_stat(kauth_cred_t cred, struct pshminfo *pshm); +int mac_posixshm_check_truncate(kauth_cred_t cred, struct pshminfo *pshm, + off_t s); +int mac_posixshm_check_unlink(kauth_cred_t cred, struct pshminfo *pshm, + const char *name); +void mac_posixshm_vnode_label_associate(kauth_cred_t cred, + struct pshminfo *pshm, struct label *plabel, + vnode_t vp, struct label *vlabel); +void mac_posixshm_label_associate(kauth_cred_t cred, + struct pshminfo *pshm, const char *name); +void mac_posixshm_label_destroy(struct pshminfo *pshm); +void mac_posixshm_label_init(struct pshminfo *pshm); +int mac_priv_check(kauth_cred_t cred, int priv); +int mac_priv_grant(kauth_cred_t cred, int priv); +int mac_proc_check_debug(proc_t proc1, proc_t proc2); +int mac_proc_check_proc_info(proc_t curp, proc_t target, int callnum, int flavor); +int mac_proc_check_get_cs_info(proc_t curp, proc_t target, unsigned int op); +int mac_proc_check_set_cs_info(proc_t curp, proc_t target, unsigned int op); +int mac_proc_check_fork(proc_t proc); +int mac_proc_check_suspend_resume(proc_t proc, int sr); +int mac_proc_check_get_task_name(kauth_cred_t cred, struct proc *p); +int mac_proc_check_get_task(kauth_cred_t cred, struct proc *p); +int mac_proc_check_expose_task(kauth_cred_t cred, struct proc *p); +int mac_proc_check_inherit_ipc_ports(struct proc *p, struct vnode *cur_vp, off_t cur_offset, struct vnode *img_vp, off_t img_offset, struct vnode *scriptvp); +int mac_proc_check_getaudit(proc_t proc); +int mac_proc_check_getauid(proc_t proc); int mac_proc_check_getlcid(proc_t proc1, proc_t proc2, - pid_t pid); + pid_t pid); int mac_proc_check_ledger(proc_t curp, proc_t target, int op); -int mac_proc_check_map_anon(proc_t proc, user_addr_t u_addr, - user_size_t u_size, int prot, int flags, int *maxprot); -int mac_proc_check_mprotect(proc_t proc, - user_addr_t addr, user_size_t size, int prot); -int mac_proc_check_run_cs_invalid(proc_t proc); -int mac_proc_check_sched(proc_t proc, proc_t proc2); -int mac_proc_check_setaudit(proc_t proc, struct auditinfo_addr *ai); -int mac_proc_check_setauid(proc_t proc, uid_t auid); +int mac_proc_check_map_anon(proc_t proc, user_addr_t u_addr, + user_size_t u_size, int prot, int flags, int *maxprot); +int mac_proc_check_mprotect(proc_t proc, + user_addr_t addr, user_size_t size, int prot); +int mac_proc_check_run_cs_invalid(proc_t proc); +int mac_proc_check_sched(proc_t proc, proc_t proc2); +int mac_proc_check_setaudit(proc_t proc, struct auditinfo_addr *ai); +int mac_proc_check_setauid(proc_t proc, uid_t auid); int mac_proc_check_setlcid(proc_t proc1, proc_t proc2, - pid_t pid1, pid_t pid2); -int mac_proc_check_signal(proc_t proc1, proc_t proc2, - int signum); -int mac_proc_check_wait(proc_t proc1, proc_t proc2); -void mac_proc_notify_exit(proc_t proc); -int mac_setsockopt_label(kauth_cred_t cred, struct socket *so, - struct mac *extmac); + pid_t pid1, pid_t pid2); +int mac_proc_check_signal(proc_t proc1, proc_t proc2, + int signum); +int mac_proc_check_wait(proc_t proc1, proc_t proc2); +void mac_proc_notify_exit(proc_t proc); +int mac_setsockopt_label(kauth_cred_t cred, struct socket *so, + struct mac *extmac); int mac_socket_check_accept(kauth_cred_t cred, struct socket *so); int mac_socket_check_accepted(kauth_cred_t cred, struct socket *so); -int mac_socket_check_bind(kauth_cred_t cred, struct socket *so, - struct sockaddr *addr); -int mac_socket_check_connect(kauth_cred_t cred, struct socket *so, - struct sockaddr *addr); -int mac_socket_check_create(kauth_cred_t cred, int domain, - int type, int protocol); -int mac_socket_check_deliver(struct socket *so, struct mbuf *m); -int mac_socket_check_ioctl(kauth_cred_t cred, struct socket *so, - unsigned int cmd); -int mac_socket_check_kqfilter(kauth_cred_t cred, struct knote *kn, - struct socket *so); -int mac_socket_check_listen(kauth_cred_t cred, struct socket *so); -int mac_socket_check_receive(kauth_cred_t cred, struct socket *so); -int mac_socket_check_received(kauth_cred_t cred, struct socket *so, - struct sockaddr *saddr); +int mac_socket_check_bind(kauth_cred_t cred, struct socket *so, + struct sockaddr *addr); +int mac_socket_check_connect(kauth_cred_t cred, struct socket *so, + struct sockaddr *addr); +int mac_socket_check_create(kauth_cred_t cred, int domain, + int type, int protocol); +int mac_socket_check_deliver(struct socket *so, struct mbuf *m); +int mac_socket_check_ioctl(kauth_cred_t cred, struct socket *so, + unsigned int cmd); +int mac_socket_check_kqfilter(kauth_cred_t cred, struct knote *kn, + struct socket *so); +int mac_socket_check_listen(kauth_cred_t cred, struct socket *so); +int mac_socket_check_receive(kauth_cred_t cred, struct socket *so); +int mac_socket_check_received(kauth_cred_t cred, struct socket *so, + struct sockaddr *saddr); int mac_socket_check_select(kauth_cred_t cred, struct socket *so, - int which); -int mac_socket_check_send(kauth_cred_t cred, struct socket *so, - struct sockaddr *addr); -int mac_socket_check_getsockopt(kauth_cred_t cred, struct socket *so, - struct sockopt *sopt); -int mac_socket_check_setsockopt(kauth_cred_t cred, struct socket *so, - struct sockopt *sopt); -int mac_socket_check_stat(kauth_cred_t cred, struct socket *so); -void mac_socket_label_associate(kauth_cred_t cred, struct socket *so); -void mac_socket_label_associate_accept(struct socket *oldsocket, - struct socket *newsocket); -void mac_socket_label_copy(struct label *from, struct label *to); -void mac_socket_label_destroy(struct socket *); -int mac_socket_label_get(kauth_cred_t cred, struct socket *so, - struct mac *extmac); -int mac_socket_label_init(struct socket *, int waitok); -void mac_socketpeer_label_associate_mbuf(struct mbuf *m, struct socket *so); -void mac_socketpeer_label_associate_socket(struct socket *peersocket, - struct socket *socket_to_modify); -int mac_socketpeer_label_get(kauth_cred_t cred, struct socket *so, - struct mac *extmac); -int mac_system_check_acct(kauth_cred_t cred, struct vnode *vp); -int mac_system_check_audit(kauth_cred_t cred, void *record, int length); -int mac_system_check_auditctl(kauth_cred_t cred, struct vnode *vp); -int mac_system_check_auditon(kauth_cred_t cred, int cmd); -int mac_system_check_chud(kauth_cred_t cred); -int mac_system_check_host_priv(kauth_cred_t cred); -int mac_system_check_info(kauth_cred_t, const char *info_type); -int mac_system_check_nfsd(kauth_cred_t cred); -int mac_system_check_reboot(kauth_cred_t cred, int howto); -int mac_system_check_settime(kauth_cred_t cred); -int mac_system_check_swapoff(kauth_cred_t cred, struct vnode *vp); -int mac_system_check_swapon(kauth_cred_t cred, struct vnode *vp); -int mac_system_check_sysctlbyname(kauth_cred_t cred, const char *namestring, int *name, - u_int namelen, user_addr_t oldctl, size_t oldlen, - user_addr_t newctl, size_t newlen); -int mac_system_check_kas_info(kauth_cred_t cred, int selector); -void mac_sysvmsg_label_associate(kauth_cred_t cred, - struct msqid_kernel *msqptr, struct msg *msgptr); -void mac_sysvmsg_label_init(struct msg *msgptr); -void mac_sysvmsg_label_recycle(struct msg *msgptr); -int mac_sysvmsq_check_enqueue(kauth_cred_t cred, struct msg *msgptr, - struct msqid_kernel *msqptr); -int mac_sysvmsq_check_msgrcv(kauth_cred_t cred, struct msg *msgptr); -int mac_sysvmsq_check_msgrmid(kauth_cred_t cred, struct msg *msgptr); -int mac_sysvmsq_check_msqctl(kauth_cred_t cred, - struct msqid_kernel *msqptr, int cmd); -int mac_sysvmsq_check_msqget(kauth_cred_t cred, - struct msqid_kernel *msqptr); -int mac_sysvmsq_check_msqrcv(kauth_cred_t cred, - struct msqid_kernel *msqptr); -int mac_sysvmsq_check_msqsnd(kauth_cred_t cred, - struct msqid_kernel *msqptr); -void mac_sysvmsq_label_associate(kauth_cred_t cred, - struct msqid_kernel *msqptr); -void mac_sysvmsq_label_init(struct msqid_kernel *msqptr); -void mac_sysvmsq_label_recycle(struct msqid_kernel *msqptr); -int mac_sysvsem_check_semctl(kauth_cred_t cred, - struct semid_kernel *semakptr, int cmd); -int mac_sysvsem_check_semget(kauth_cred_t cred, - struct semid_kernel *semakptr); -int mac_sysvsem_check_semop(kauth_cred_t cred, - struct semid_kernel *semakptr, size_t accesstype); -void mac_sysvsem_label_associate(kauth_cred_t cred, - struct semid_kernel *semakptr); -void mac_sysvsem_label_destroy(struct semid_kernel *semakptr); -void mac_sysvsem_label_init(struct semid_kernel *semakptr); -void mac_sysvsem_label_recycle(struct semid_kernel *semakptr); -int mac_sysvshm_check_shmat(kauth_cred_t cred, - struct shmid_kernel *shmsegptr, int shmflg); -int mac_sysvshm_check_shmctl(kauth_cred_t cred, - struct shmid_kernel *shmsegptr, int cmd); -int mac_sysvshm_check_shmdt(kauth_cred_t cred, - struct shmid_kernel *shmsegptr); -int mac_sysvshm_check_shmget(kauth_cred_t cred, - struct shmid_kernel *shmsegptr, int shmflg); -void mac_sysvshm_label_associate(kauth_cred_t cred, - struct shmid_kernel *shmsegptr); -void mac_sysvshm_label_destroy(struct shmid_kernel *shmsegptr); -void mac_sysvshm_label_init(struct shmid_kernel* shmsegptr); -void mac_sysvshm_label_recycle(struct shmid_kernel *shmsegptr); -int mac_vnode_check_access(vfs_context_t ctx, struct vnode *vp, - int acc_mode); -int mac_vnode_check_chdir(vfs_context_t ctx, struct vnode *dvp); -int mac_vnode_check_chroot(vfs_context_t ctx, struct vnode *dvp, - struct componentname *cnp); -int mac_vnode_check_clone(vfs_context_t ctx, struct vnode *dvp, - struct vnode *vp, struct componentname *cnp); -int mac_vnode_check_create(vfs_context_t ctx, struct vnode *dvp, - struct componentname *cnp, struct vnode_attr *vap); -int mac_vnode_check_deleteextattr(vfs_context_t ctx, struct vnode *vp, - const char *name); -int mac_vnode_check_exchangedata(vfs_context_t ctx, struct vnode *v1, - struct vnode *v2); -int mac_vnode_check_exec(vfs_context_t ctx, struct vnode *vp, - struct image_params *imgp); -int mac_vnode_check_fsgetpath(vfs_context_t ctx, struct vnode *vp); -int mac_vnode_check_getattr(vfs_context_t ctx, struct ucred *file_cred, - struct vnode *vp, struct vnode_attr *va); + int which); +int mac_socket_check_send(kauth_cred_t cred, struct socket *so, + struct sockaddr *addr); +int mac_socket_check_getsockopt(kauth_cred_t cred, struct socket *so, + struct sockopt *sopt); +int mac_socket_check_setsockopt(kauth_cred_t cred, struct socket *so, + struct sockopt *sopt); +int mac_socket_check_stat(kauth_cred_t cred, struct socket *so); +void mac_socket_label_associate(kauth_cred_t cred, struct socket *so); +void mac_socket_label_associate_accept(struct socket *oldsocket, + struct socket *newsocket); +void mac_socket_label_copy(struct label *from, struct label *to); +void mac_socket_label_destroy(struct socket *); +int mac_socket_label_get(kauth_cred_t cred, struct socket *so, + struct mac *extmac); +int mac_socket_label_init(struct socket *, int waitok); +void mac_socketpeer_label_associate_mbuf(struct mbuf *m, struct socket *so); +void mac_socketpeer_label_associate_socket(struct socket *peersocket, + struct socket *socket_to_modify); +int mac_socketpeer_label_get(kauth_cred_t cred, struct socket *so, + struct mac *extmac); +int mac_system_check_acct(kauth_cred_t cred, struct vnode *vp); +int mac_system_check_audit(kauth_cred_t cred, void *record, int length); +int mac_system_check_auditctl(kauth_cred_t cred, struct vnode *vp); +int mac_system_check_auditon(kauth_cred_t cred, int cmd); +int mac_system_check_chud(kauth_cred_t cred); +int mac_system_check_host_priv(kauth_cred_t cred); +int mac_system_check_info(kauth_cred_t, const char *info_type); +int mac_system_check_nfsd(kauth_cred_t cred); +int mac_system_check_reboot(kauth_cred_t cred, int howto); +int mac_system_check_settime(kauth_cred_t cred); +int mac_system_check_swapoff(kauth_cred_t cred, struct vnode *vp); +int mac_system_check_swapon(kauth_cred_t cred, struct vnode *vp); +int mac_system_check_sysctlbyname(kauth_cred_t cred, const char *namestring, int *name, + u_int namelen, user_addr_t oldctl, size_t oldlen, + user_addr_t newctl, size_t newlen); +int mac_system_check_kas_info(kauth_cred_t cred, int selector); +void mac_sysvmsg_label_associate(kauth_cred_t cred, + struct msqid_kernel *msqptr, struct msg *msgptr); +void mac_sysvmsg_label_init(struct msg *msgptr); +void mac_sysvmsg_label_recycle(struct msg *msgptr); +int mac_sysvmsq_check_enqueue(kauth_cred_t cred, struct msg *msgptr, + struct msqid_kernel *msqptr); +int mac_sysvmsq_check_msgrcv(kauth_cred_t cred, struct msg *msgptr); +int mac_sysvmsq_check_msgrmid(kauth_cred_t cred, struct msg *msgptr); +int mac_sysvmsq_check_msqctl(kauth_cred_t cred, + struct msqid_kernel *msqptr, int cmd); +int mac_sysvmsq_check_msqget(kauth_cred_t cred, + struct msqid_kernel *msqptr); +int mac_sysvmsq_check_msqrcv(kauth_cred_t cred, + struct msqid_kernel *msqptr); +int mac_sysvmsq_check_msqsnd(kauth_cred_t cred, + struct msqid_kernel *msqptr); +void mac_sysvmsq_label_associate(kauth_cred_t cred, + struct msqid_kernel *msqptr); +void mac_sysvmsq_label_init(struct msqid_kernel *msqptr); +void mac_sysvmsq_label_recycle(struct msqid_kernel *msqptr); +int mac_sysvsem_check_semctl(kauth_cred_t cred, + struct semid_kernel *semakptr, int cmd); +int mac_sysvsem_check_semget(kauth_cred_t cred, + struct semid_kernel *semakptr); +int mac_sysvsem_check_semop(kauth_cred_t cred, + struct semid_kernel *semakptr, size_t accesstype); +void mac_sysvsem_label_associate(kauth_cred_t cred, + struct semid_kernel *semakptr); +void mac_sysvsem_label_destroy(struct semid_kernel *semakptr); +void mac_sysvsem_label_init(struct semid_kernel *semakptr); +void mac_sysvsem_label_recycle(struct semid_kernel *semakptr); +int mac_sysvshm_check_shmat(kauth_cred_t cred, + struct shmid_kernel *shmsegptr, int shmflg); +int mac_sysvshm_check_shmctl(kauth_cred_t cred, + struct shmid_kernel *shmsegptr, int cmd); +int mac_sysvshm_check_shmdt(kauth_cred_t cred, + struct shmid_kernel *shmsegptr); +int mac_sysvshm_check_shmget(kauth_cred_t cred, + struct shmid_kernel *shmsegptr, int shmflg); +void mac_sysvshm_label_associate(kauth_cred_t cred, + struct shmid_kernel *shmsegptr); +void mac_sysvshm_label_destroy(struct shmid_kernel *shmsegptr); +void mac_sysvshm_label_init(struct shmid_kernel* shmsegptr); +void mac_sysvshm_label_recycle(struct shmid_kernel *shmsegptr); +int mac_vnode_check_access(vfs_context_t ctx, struct vnode *vp, + int acc_mode); +int mac_vnode_check_chdir(vfs_context_t ctx, struct vnode *dvp); +int mac_vnode_check_chroot(vfs_context_t ctx, struct vnode *dvp, + struct componentname *cnp); +int mac_vnode_check_clone(vfs_context_t ctx, struct vnode *dvp, + struct vnode *vp, struct componentname *cnp); +int mac_vnode_check_create(vfs_context_t ctx, struct vnode *dvp, + struct componentname *cnp, struct vnode_attr *vap); +int mac_vnode_check_deleteextattr(vfs_context_t ctx, struct vnode *vp, + const char *name); +int mac_vnode_check_exchangedata(vfs_context_t ctx, struct vnode *v1, + struct vnode *v2); +int mac_vnode_check_exec(vfs_context_t ctx, struct vnode *vp, + struct image_params *imgp); +int mac_vnode_check_fsgetpath(vfs_context_t ctx, struct vnode *vp); +int mac_vnode_check_getattr(vfs_context_t ctx, struct ucred *file_cred, + struct vnode *vp, struct vnode_attr *va); int mac_vnode_check_getattrlist(vfs_context_t ctx, struct vnode *vp, - struct attrlist *alist); -int mac_vnode_check_getextattr(vfs_context_t ctx, struct vnode *vp, - const char *name, struct uio *uio); -int mac_vnode_check_ioctl(vfs_context_t ctx, struct vnode *vp, - unsigned int cmd); -int mac_vnode_check_kqfilter(vfs_context_t ctx, - kauth_cred_t file_cred, struct knote *kn, struct vnode *vp); -int mac_vnode_check_label_update(vfs_context_t ctx, struct vnode *vp, - struct label *newlabel); -int mac_vnode_check_link(vfs_context_t ctx, struct vnode *dvp, - struct vnode *vp, struct componentname *cnp); -int mac_vnode_check_listextattr(vfs_context_t ctx, struct vnode *vp); -int mac_vnode_check_lookup(vfs_context_t ctx, struct vnode *dvp, - struct componentname *cnp); -int mac_vnode_check_lookup_preflight(vfs_context_t ctx, struct vnode *dvp, - const char *path, size_t pathlen); -int mac_vnode_check_open(vfs_context_t ctx, struct vnode *vp, - int acc_mode); -int mac_vnode_check_read(vfs_context_t ctx, - kauth_cred_t file_cred, struct vnode *vp); -int mac_vnode_check_readdir(vfs_context_t ctx, struct vnode *vp); -int mac_vnode_check_readlink(vfs_context_t ctx, struct vnode *vp); -int mac_vnode_check_rename(vfs_context_t ctx, struct vnode *dvp, - struct vnode *vp, struct componentname *cnp, struct vnode *tdvp, - struct vnode *tvp, struct componentname *tcnp); -int mac_vnode_check_revoke(vfs_context_t ctx, struct vnode *vp); -int mac_vnode_check_searchfs(vfs_context_t ctx, struct vnode *vp, - struct attrlist *alist); + struct attrlist *alist); +int mac_vnode_check_getextattr(vfs_context_t ctx, struct vnode *vp, + const char *name, struct uio *uio); +int mac_vnode_check_ioctl(vfs_context_t ctx, struct vnode *vp, + unsigned int cmd); +int mac_vnode_check_kqfilter(vfs_context_t ctx, + kauth_cred_t file_cred, struct knote *kn, struct vnode *vp); +int mac_vnode_check_label_update(vfs_context_t ctx, struct vnode *vp, + struct label *newlabel); +int mac_vnode_check_link(vfs_context_t ctx, struct vnode *dvp, + struct vnode *vp, struct componentname *cnp); +int mac_vnode_check_listextattr(vfs_context_t ctx, struct vnode *vp); +int mac_vnode_check_lookup(vfs_context_t ctx, struct vnode *dvp, + struct componentname *cnp); +int mac_vnode_check_lookup_preflight(vfs_context_t ctx, struct vnode *dvp, + const char *path, size_t pathlen); +int mac_vnode_check_open(vfs_context_t ctx, struct vnode *vp, + int acc_mode); +int mac_vnode_check_read(vfs_context_t ctx, + kauth_cred_t file_cred, struct vnode *vp); +int mac_vnode_check_readdir(vfs_context_t ctx, struct vnode *vp); +int mac_vnode_check_readlink(vfs_context_t ctx, struct vnode *vp); +int mac_vnode_check_rename(vfs_context_t ctx, struct vnode *dvp, + struct vnode *vp, struct componentname *cnp, struct vnode *tdvp, + struct vnode *tvp, struct componentname *tcnp); +int mac_vnode_check_revoke(vfs_context_t ctx, struct vnode *vp); +int mac_vnode_check_searchfs(vfs_context_t ctx, struct vnode *vp, + struct attrlist *alist); int mac_vnode_check_select(vfs_context_t ctx, struct vnode *vp, - int which); -int mac_vnode_check_setacl(vfs_context_t ctx, struct vnode *vp, - struct kauth_acl *acl); + int which); +int mac_vnode_check_setacl(vfs_context_t ctx, struct vnode *vp, + struct kauth_acl *acl); int mac_vnode_check_setattrlist(vfs_context_t ctxd, struct vnode *vp, - struct attrlist *alist); -int mac_vnode_check_setextattr(vfs_context_t ctx, struct vnode *vp, - const char *name, struct uio *uio); -int mac_vnode_check_setflags(vfs_context_t ctx, struct vnode *vp, - u_long flags); -int mac_vnode_check_setmode(vfs_context_t ctx, struct vnode *vp, - mode_t mode); -int mac_vnode_check_setowner(vfs_context_t ctx, struct vnode *vp, - uid_t uid, gid_t gid); -int mac_vnode_check_setutimes(vfs_context_t ctx, struct vnode *vp, - struct timespec atime, struct timespec mtime); -int mac_vnode_check_signature(struct vnode *vp, - struct cs_blob *cs_blob, struct image_params *imgp, - unsigned int *cs_flags, unsigned int *signer_type, - int flags); -int mac_vnode_check_stat(vfs_context_t ctx, - kauth_cred_t file_cred, struct vnode *vp); -int mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp, - struct componentname *cnp); -int mac_vnode_check_truncate(vfs_context_t ctx, - kauth_cred_t file_cred, struct vnode *vp); -int mac_vnode_check_uipc_bind(vfs_context_t ctx, struct vnode *dvp, - struct componentname *cnp, struct vnode_attr *vap); -int mac_vnode_check_uipc_connect(vfs_context_t ctx, struct vnode *vp, struct socket *so); -int mac_vnode_check_unlink(vfs_context_t ctx, struct vnode *dvp, - struct vnode *vp, struct componentname *cnp); -int mac_vnode_check_write(vfs_context_t ctx, - kauth_cred_t file_cred, struct vnode *vp); -struct label *mac_vnode_label_alloc(void); -int mac_vnode_label_associate(struct mount *mp, struct vnode *vp, - vfs_context_t ctx); -void mac_vnode_label_associate_devfs(struct mount *mp, struct devnode *de, - struct vnode *vp); -int mac_vnode_label_associate_extattr(struct mount *mp, struct vnode *vp); -int mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, - struct vnode *vp, vfs_context_t ctx); -void mac_vnode_label_associate_singlelabel(struct mount *mp, - struct vnode *vp); -void mac_vnode_label_copy(struct label *l1, struct label *l2); -void mac_vnode_label_destroy(struct vnode *vp); -int mac_vnode_label_externalize_audit(struct vnode *vp, struct mac *mac); -void mac_vnode_label_free(struct label *label); -void mac_vnode_label_init(struct vnode *vp); -int mac_vnode_label_init_needed(struct vnode *vp); -void mac_vnode_label_recycle(struct vnode *vp); -void mac_vnode_label_update(vfs_context_t ctx, struct vnode *vp, - struct label *newlabel); -void mac_vnode_label_update_extattr(struct mount *mp, struct vnode *vp, - const char *name); -int mac_vnode_notify_create(vfs_context_t ctx, struct mount *mp, - struct vnode *dvp, struct vnode *vp, struct componentname *cnp); -void mac_vnode_notify_deleteextattr(vfs_context_t ctx, struct vnode *vp, const char *name); -void mac_vnode_notify_link(vfs_context_t ctx, struct vnode *vp, - struct vnode *dvp, struct componentname *cnp); -void mac_vnode_notify_open(vfs_context_t ctx, struct vnode *vp, int acc_flags); -void mac_vnode_notify_rename(vfs_context_t ctx, struct vnode *vp, - struct vnode *dvp, struct componentname *cnp); -void mac_vnode_notify_setacl(vfs_context_t ctx, struct vnode *vp, struct kauth_acl *acl); -void mac_vnode_notify_setattrlist(vfs_context_t ctx, struct vnode *vp, struct attrlist *alist); -void mac_vnode_notify_setextattr(vfs_context_t ctx, struct vnode *vp, const char *name, struct uio *uio); -void mac_vnode_notify_setflags(vfs_context_t ctx, struct vnode *vp, u_long flags); -void mac_vnode_notify_setmode(vfs_context_t ctx, struct vnode *vp, mode_t mode); -void mac_vnode_notify_setowner(vfs_context_t ctx, struct vnode *vp, uid_t uid, gid_t gid); -void mac_vnode_notify_setutimes(vfs_context_t ctx, struct vnode *vp, struct timespec atime, struct timespec mtime); -void mac_vnode_notify_truncate(vfs_context_t ctx, kauth_cred_t file_cred, struct vnode *vp); -int mac_vnode_find_sigs(struct proc *p, struct vnode *vp, off_t offsetInMacho); -int vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, - struct componentname *cnp, int flags, vfs_context_t ctx); -void vnode_relabel(struct vnode *vp); -void mac_pty_notify_grant(proc_t p, struct tty *tp, dev_t dev, struct label *label); -void mac_pty_notify_close(proc_t p, struct tty *tp, dev_t dev, struct label *label); -int mac_kext_check_load(kauth_cred_t cred, const char *identifier); -int mac_kext_check_unload(kauth_cred_t cred, const char *identifier); -int mac_kext_check_query(kauth_cred_t cred); -int mac_skywalk_flow_check_connect(proc_t p, void *flow, const struct sockaddr *addr, int type, int protocol); -int mac_skywalk_flow_check_listen(proc_t p, void *flow, const struct sockaddr *addr, int type, int protocol); + struct attrlist *alist); +int mac_vnode_check_setextattr(vfs_context_t ctx, struct vnode *vp, + const char *name, struct uio *uio); +int mac_vnode_check_setflags(vfs_context_t ctx, struct vnode *vp, + u_long flags); +int mac_vnode_check_setmode(vfs_context_t ctx, struct vnode *vp, + mode_t mode); +int mac_vnode_check_setowner(vfs_context_t ctx, struct vnode *vp, + uid_t uid, gid_t gid); +int mac_vnode_check_setutimes(vfs_context_t ctx, struct vnode *vp, + struct timespec atime, struct timespec mtime); +int mac_vnode_check_signature(struct vnode *vp, + struct cs_blob *cs_blob, struct image_params *imgp, + unsigned int *cs_flags, unsigned int *signer_type, + int flags); +int mac_vnode_check_stat(vfs_context_t ctx, + kauth_cred_t file_cred, struct vnode *vp); +int mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp, + struct componentname *cnp); +int mac_vnode_check_truncate(vfs_context_t ctx, + kauth_cred_t file_cred, struct vnode *vp); +int mac_vnode_check_uipc_bind(vfs_context_t ctx, struct vnode *dvp, + struct componentname *cnp, struct vnode_attr *vap); +int mac_vnode_check_uipc_connect(vfs_context_t ctx, struct vnode *vp, struct socket *so); +int mac_vnode_check_unlink(vfs_context_t ctx, struct vnode *dvp, + struct vnode *vp, struct componentname *cnp); +int mac_vnode_check_write(vfs_context_t ctx, + kauth_cred_t file_cred, struct vnode *vp); +struct label *mac_vnode_label_alloc(void); +int mac_vnode_label_associate(struct mount *mp, struct vnode *vp, + vfs_context_t ctx); +void mac_vnode_label_associate_devfs(struct mount *mp, struct devnode *de, + struct vnode *vp); +int mac_vnode_label_associate_extattr(struct mount *mp, struct vnode *vp); +int mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, + struct vnode *vp, vfs_context_t ctx); +void mac_vnode_label_associate_singlelabel(struct mount *mp, + struct vnode *vp); +void mac_vnode_label_copy(struct label *l1, struct label *l2); +void mac_vnode_label_destroy(struct vnode *vp); +int mac_vnode_label_externalize_audit(struct vnode *vp, struct mac *mac); +void mac_vnode_label_free(struct label *label); +void mac_vnode_label_init(struct vnode *vp); +int mac_vnode_label_init_needed(struct vnode *vp); +void mac_vnode_label_recycle(struct vnode *vp); +void mac_vnode_label_update(vfs_context_t ctx, struct vnode *vp, + struct label *newlabel); +void mac_vnode_label_update_extattr(struct mount *mp, struct vnode *vp, + const char *name); +int mac_vnode_notify_create(vfs_context_t ctx, struct mount *mp, + struct vnode *dvp, struct vnode *vp, struct componentname *cnp); +void mac_vnode_notify_deleteextattr(vfs_context_t ctx, struct vnode *vp, const char *name); +void mac_vnode_notify_link(vfs_context_t ctx, struct vnode *vp, + struct vnode *dvp, struct componentname *cnp); +void mac_vnode_notify_open(vfs_context_t ctx, struct vnode *vp, int acc_flags); +void mac_vnode_notify_rename(vfs_context_t ctx, struct vnode *vp, + struct vnode *dvp, struct componentname *cnp); +void mac_vnode_notify_setacl(vfs_context_t ctx, struct vnode *vp, struct kauth_acl *acl); +void mac_vnode_notify_setattrlist(vfs_context_t ctx, struct vnode *vp, struct attrlist *alist); +void mac_vnode_notify_setextattr(vfs_context_t ctx, struct vnode *vp, const char *name, struct uio *uio); +void mac_vnode_notify_setflags(vfs_context_t ctx, struct vnode *vp, u_long flags); +void mac_vnode_notify_setmode(vfs_context_t ctx, struct vnode *vp, mode_t mode); +void mac_vnode_notify_setowner(vfs_context_t ctx, struct vnode *vp, uid_t uid, gid_t gid); +void mac_vnode_notify_setutimes(vfs_context_t ctx, struct vnode *vp, struct timespec atime, struct timespec mtime); +void mac_vnode_notify_truncate(vfs_context_t ctx, kauth_cred_t file_cred, struct vnode *vp); +int mac_vnode_find_sigs(struct proc *p, struct vnode *vp, off_t offsetInMacho); +int vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, + struct componentname *cnp, int flags, vfs_context_t ctx); +void vnode_relabel(struct vnode *vp); +void mac_pty_notify_grant(proc_t p, struct tty *tp, dev_t dev, struct label *label); +void mac_pty_notify_close(proc_t p, struct tty *tp, dev_t dev, struct label *label); +int mac_kext_check_load(kauth_cred_t cred, const char *identifier); +int mac_kext_check_unload(kauth_cred_t cred, const char *identifier); +int mac_kext_check_query(kauth_cred_t cred); +int mac_skywalk_flow_check_connect(proc_t p, void *flow, const struct sockaddr *addr, int type, int protocol); +int mac_skywalk_flow_check_listen(proc_t p, void *flow, const struct sockaddr *addr, int type, int protocol); void psem_label_associate(struct fileproc *fp, struct vnode *vp, struct vfs_context *ctx); void pshm_label_associate(struct fileproc *fp, struct vnode *vp, struct vfs_context *ctx); @@ -604,6 +604,6 @@ struct label *mac_bpfdesc_label_get(struct bpf_d *d); void mac_bpfdesc_label_set(struct bpf_d *d, struct label *label); #endif -#endif /* CONFIG_MACF */ +#endif /* CONFIG_MACF */ #endif /* !_SECURITY_MAC_FRAMEWORK_H_ */ diff --git a/security/mac_inet.c b/security/mac_inet.c index 1f57fdc79..ccbc1995e 100644 --- a/security/mac_inet.c +++ b/security/mac_inet.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -86,25 +86,26 @@ mac_inpcb_label_alloc(int flag) int error; label = mac_labelzone_alloc(flag); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_CHECK(inpcb_label_init, label, flag); if (error) { MAC_PERFORM(inpcb_label_destroy, label); mac_labelzone_free(label); - return (NULL); + return NULL; } - return (label); + return label; } int mac_inpcb_label_init(struct inpcb *inp, int flag) { - inp->inp_label = mac_inpcb_label_alloc(flag); - if (inp->inp_label == NULL) - return (ENOMEM); - return (0); + if (inp->inp_label == NULL) { + return ENOMEM; + } + return 0; } static struct label * @@ -114,32 +115,32 @@ mac_ipq_label_alloc(int flag) int error; label = mac_labelzone_alloc(flag); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_CHECK(ipq_label_init, label, flag); if (error) { MAC_PERFORM(ipq_label_destroy, label); mac_labelzone_free(label); - return (NULL); + return NULL; } - return (label); + return label; } int mac_ipq_label_init(struct ipq *ipq, int flag) { - ipq->ipq_label = mac_ipq_label_alloc(flag); - if (ipq->ipq_label == NULL) - return (ENOMEM); - return (0); + if (ipq->ipq_label == NULL) { + return ENOMEM; + } + return 0; } static void mac_inpcb_label_free(struct label *label) { - MAC_PERFORM(inpcb_label_destroy, label); mac_labelzone_free(label); } @@ -147,7 +148,6 @@ mac_inpcb_label_free(struct label *label) void mac_inpcb_label_destroy(struct inpcb *inp) { - mac_inpcb_label_free(inp->inp_label); inp->inp_label = NULL; } @@ -155,14 +155,12 @@ mac_inpcb_label_destroy(struct inpcb *inp) void mac_inpcb_label_recycle(struct inpcb *inp) { - MAC_PERFORM(inpcb_label_recycle, inp->inp_label); } static void mac_ipq_label_free(struct label *label) { - MAC_PERFORM(ipq_label_destroy, label); mac_labelzone_free(label); } @@ -170,7 +168,6 @@ mac_ipq_label_free(struct label *label) void mac_ipq_label_destroy(struct ipq *ipq) { - mac_ipq_label_free(ipq->ipq_label); ipq->ipq_label = NULL; } @@ -178,7 +175,6 @@ mac_ipq_label_destroy(struct ipq *ipq) void mac_inpcb_label_associate(struct socket *so, struct inpcb *inp) { - MAC_PERFORM(inpcb_label_associate, so, so->so_label, inp, inp->inp_label); } @@ -237,7 +233,7 @@ mac_ipq_label_compare(struct mbuf *fragment, struct ipq *ipq) result = 1; MAC_BOOLEAN(ipq_label_compare, &&, fragment, label, ipq, ipq->ipq_label); - return (result); + return result; } void @@ -276,15 +272,16 @@ mac_inpcb_check_deliver(struct inpcb *inp, struct mbuf *m, int family, int type) struct label *label; int error; - if ((m->m_flags & M_PKTHDR) == 0) + if ((m->m_flags & M_PKTHDR) == 0) { panic("%s: no mbuf packet header!", __func__); + } label = mac_mbuf_to_label(m); MAC_CHECK(inpcb_check_deliver, inp, inp->inp_label, m, label, family, type); - return (error); + return error; } /* @@ -296,7 +293,7 @@ mac_inpcb_label_update(struct socket *so) struct inpcb *inp; /* XXX: assert socket lock. */ - inp = sotoinpcb(so); /* XXX: inp locking */ + inp = sotoinpcb(so); /* XXX: inp locking */ if (inp != NULL) { /* INP_LOCK_ASSERT(inp); */ diff --git a/security/mac_internal.h b/security/mac_internal.h index 0f034d23f..ec457f0cb 100644 --- a/security/mac_internal.h +++ b/security/mac_internal.h @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -100,17 +100,17 @@ SYSCTL_DECL(_security_mac); extern int mac_late; struct mac_policy_list_element { - struct mac_policy_conf *mpc; -}; + struct mac_policy_conf *mpc; +}; struct mac_policy_list { - u_int numloaded; - u_int max; - u_int maxindex; - u_int staticmax; - u_int chunks; - u_int freehint; - struct mac_policy_list_element *entries; + u_int numloaded; + u_int max; + u_int maxindex; + u_int staticmax; + u_int chunks; + u_int freehint; + struct mac_policy_list_element *entries; }; typedef struct mac_policy_list mac_policy_list_t; @@ -121,19 +121,19 @@ typedef struct mac_policy_list mac_policy_list_t; * label namespace name. */ struct mac_label_listener { - mac_policy_handle_t mll_handle; - LIST_ENTRY(mac_label_listener) mll_list; + mac_policy_handle_t mll_handle; + LIST_ENTRY(mac_label_listener) mll_list; }; LIST_HEAD(mac_label_listeners_t, mac_label_listener); -/* +/* * Type of list used to manage label namespace names. - */ + */ struct mac_label_element { - char mle_name[MAC_MAX_LABEL_ELEMENT_NAME]; - struct mac_label_listeners_t mle_listeners; - LIST_ENTRY(mac_label_element) mle_list; + char mle_name[MAC_MAX_LABEL_ELEMENT_NAME]; + struct mac_label_listeners_t mle_listeners; + LIST_ENTRY(mac_label_element) mle_list; }; LIST_HEAD(mac_label_element_list_t, mac_label_element); @@ -172,11 +172,12 @@ extern unsigned int mac_label_vnodes; static bool mac_proc_check_enforce(proc_t p); -static __inline__ bool mac_proc_check_enforce(proc_t p) +static __inline__ bool +mac_proc_check_enforce(proc_t p) { #if CONFIG_MACF // Don't apply policies to the kernel itself. - return (p != kernproc); + return p != kernproc; #else #pragma unused(p) return false; @@ -185,10 +186,11 @@ static __inline__ bool mac_proc_check_enforce(proc_t p) static bool mac_cred_check_enforce(kauth_cred_t cred); -static __inline__ bool mac_cred_check_enforce(kauth_cred_t cred) +static __inline__ bool +mac_cred_check_enforce(kauth_cred_t cred) { #if CONFIG_MACF - return (cred != proc_ucred(kernproc)); + return cred != proc_ucred(kernproc); #else #pragma unused(p) return false; @@ -216,14 +218,14 @@ int mac_check_structmac_consistent(struct user_mac *mac); #else int mac_check_structmac_consistent(struct mac *mac); #endif - + int mac_cred_label_externalize(struct label *, char *e, char *out, size_t olen, int flags); #if CONFIG_MACF_SOCKET int mac_socket_label_externalize(struct label *, char *e, char *out, size_t olen); #endif /* CONFIG_MACF_SOCKET */ int mac_vnode_label_externalize(struct label *, char *e, char *out, size_t olen, int flags); int mac_pipe_label_externalize(struct label *label, char *elements, - char *outbuf, size_t outbuflen); + char *outbuf, size_t outbuflen); int mac_cred_label_internalize(struct label *label, char *string); #if CONFIG_MACF_SOCKET @@ -251,34 +253,34 @@ struct label *mac_mbuf_to_label(struct mbuf *m); * request. Note that it returns its value via 'error' in the scope * of the caller. */ -#define MAC_CHECK(check, args...) do { \ - struct mac_policy_conf *mpc; \ - u_int i; \ - \ - error = 0; \ - for (i = 0; i < mac_policy_list.staticmax; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ - \ - if (mpc->mpc_ops->mpo_ ## check != NULL) \ - error = mac_error_select( \ - mpc->mpc_ops->mpo_ ## check (args), \ - error); \ - } \ - if (mac_policy_list_conditional_busy() != 0) { \ - for (; i <= mac_policy_list.maxindex; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ +#define MAC_CHECK(check, args...) do { \ + struct mac_policy_conf *mpc; \ + u_int i; \ + \ + error = 0; \ + for (i = 0; i < mac_policy_list.staticmax; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ \ - if (mpc->mpc_ops->mpo_ ## check != NULL) \ - error = mac_error_select( \ - mpc->mpc_ops->mpo_ ## check (args), \ - error); \ - } \ - mac_policy_list_unbusy(); \ - } \ + if (mpc->mpc_ops->mpo_ ## check != NULL) \ + error = mac_error_select( \ + mpc->mpc_ops->mpo_ ## check (args), \ + error); \ + } \ + if (mac_policy_list_conditional_busy() != 0) { \ + for (; i <= mac_policy_list.maxindex; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ + \ + if (mpc->mpc_ops->mpo_ ## check != NULL) \ + error = mac_error_select( \ + mpc->mpc_ops->mpo_ ## check (args), \ + error); \ + } \ + mac_policy_list_unbusy(); \ + } \ } while (0) /* @@ -288,35 +290,35 @@ struct label *mac_mbuf_to_label(struct mbuf *m); * and otherwise returns EPERM. Note that it returns its value via * 'error' in the scope of the caller. */ -#define MAC_GRANT(check, args...) do { \ - struct mac_policy_conf *mpc; \ - u_int i; \ - \ - error = EPERM; \ - for (i = 0; i < mac_policy_list.staticmax; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ - \ - if (mpc->mpc_ops->mpo_ ## check != NULL) { \ - if (mpc->mpc_ops->mpo_ ## check (args) == 0) \ - error = 0; \ - } \ - } \ - if (mac_policy_list_conditional_busy() != 0) { \ - for (; i <= mac_policy_list.maxindex; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ - \ - if (mpc->mpc_ops->mpo_ ## check != NULL) { \ - if (mpc->mpc_ops->mpo_ ## check (args) \ - == 0) \ - error = 0; \ - } \ - } \ - mac_policy_list_unbusy(); \ - } \ +#define MAC_GRANT(check, args...) do { \ + struct mac_policy_conf *mpc; \ + u_int i; \ + \ + error = EPERM; \ + for (i = 0; i < mac_policy_list.staticmax; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ + \ + if (mpc->mpc_ops->mpo_ ## check != NULL) { \ + if (mpc->mpc_ops->mpo_ ## check (args) == 0) \ + error = 0; \ + } \ + } \ + if (mac_policy_list_conditional_busy() != 0) { \ + for (; i <= mac_policy_list.maxindex; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ + \ + if (mpc->mpc_ops->mpo_ ## check != NULL) { \ + if (mpc->mpc_ops->mpo_ ## check (args) \ + == 0) \ + error = 0; \ + } \ + } \ + mac_policy_list_unbusy(); \ + } \ } while (0) /* @@ -327,71 +329,71 @@ struct label *mac_mbuf_to_label(struct mbuf *m); * should be initialized by the caller in a meaningful way to get * a meaningful result. */ -#define MAC_BOOLEAN(operation, composition, args...) do { \ - struct mac_policy_conf *mpc; \ - u_int i; \ - \ - for (i = 0; i < mac_policy_list.staticmax; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ - \ - if (mpc->mpc_ops->mpo_ ## operation != NULL) \ - result = result composition \ - mpc->mpc_ops->mpo_ ## operation \ - (args); \ - } \ - if (mac_policy_list_conditional_busy() != 0) { \ - for (; i <= mac_policy_list.maxindex; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ +#define MAC_BOOLEAN(operation, composition, args...) do { \ + struct mac_policy_conf *mpc; \ + u_int i; \ + \ + for (i = 0; i < mac_policy_list.staticmax; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ \ - if (mpc->mpc_ops->mpo_ ## operation != NULL) \ - result = result composition \ - mpc->mpc_ops->mpo_ ## operation \ - (args); \ - } \ - mac_policy_list_unbusy(); \ - } \ + if (mpc->mpc_ops->mpo_ ## operation != NULL) \ + result = result composition \ + mpc->mpc_ops->mpo_ ## operation \ + (args); \ + } \ + if (mac_policy_list_conditional_busy() != 0) { \ + for (; i <= mac_policy_list.maxindex; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ + \ + if (mpc->mpc_ops->mpo_ ## operation != NULL) \ + result = result composition \ + mpc->mpc_ops->mpo_ ## operation \ + (args); \ + } \ + mac_policy_list_unbusy(); \ + } \ } while (0) -#define MAC_INTERNALIZE(obj, label, instring) \ +#define MAC_INTERNALIZE(obj, label, instring) \ mac_internalize(offsetof(struct mac_policy_ops, mpo_ ## obj ## _label_internalize), label, instring) -#define MAC_EXTERNALIZE(obj, label, elementlist, outbuf, outbuflen) \ +#define MAC_EXTERNALIZE(obj, label, elementlist, outbuf, outbuflen) \ mac_externalize(offsetof(struct mac_policy_ops, mpo_ ## obj ## _label_externalize), label, elementlist, outbuf, outbuflen) -#define MAC_EXTERNALIZE_AUDIT(obj, label, outbuf, outbuflen) \ +#define MAC_EXTERNALIZE_AUDIT(obj, label, outbuf, outbuflen) \ mac_externalize(offsetof(struct mac_policy_ops, mpo_ ## obj ## _label_externalize_audit), label, "*", outbuf, outbuflen) /* * MAC_PERFORM performs the designated operation by walking the policy * module list and invoking that operation for each policy. */ -#define MAC_PERFORM(operation, args...) do { \ - struct mac_policy_conf *mpc; \ - u_int i; \ - \ - for (i = 0; i < mac_policy_list.staticmax; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ - \ - if (mpc->mpc_ops->mpo_ ## operation != NULL) \ - mpc->mpc_ops->mpo_ ## operation (args); \ - } \ - if (mac_policy_list_conditional_busy() != 0) { \ - for (; i <= mac_policy_list.maxindex; i++) { \ - mpc = mac_policy_list.entries[i].mpc; \ - if (mpc == NULL) \ - continue; \ - \ - if (mpc->mpc_ops->mpo_ ## operation != NULL) \ - mpc->mpc_ops->mpo_ ## operation (args); \ - } \ - mac_policy_list_unbusy(); \ - } \ +#define MAC_PERFORM(operation, args...) do { \ + struct mac_policy_conf *mpc; \ + u_int i; \ + \ + for (i = 0; i < mac_policy_list.staticmax; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ + \ + if (mpc->mpc_ops->mpo_ ## operation != NULL) \ + mpc->mpc_ops->mpo_ ## operation (args); \ + } \ + if (mac_policy_list_conditional_busy() != 0) { \ + for (; i <= mac_policy_list.maxindex; i++) { \ + mpc = mac_policy_list.entries[i].mpc; \ + if (mpc == NULL) \ + continue; \ + \ + if (mpc->mpc_ops->mpo_ ## operation != NULL) \ + mpc->mpc_ops->mpo_ ## operation (args); \ + } \ + mac_policy_list_unbusy(); \ + } \ } while (0) struct __mac_get_pid_args; @@ -412,4 +414,4 @@ int mac_externalize(size_t mpo_externalize_off, struct label *label, const char *elementlist, char *outbuf, size_t outbuflen); int mac_internalize(size_t mpo_internalize_off, struct label *label, char *elementlist); -#endif /* !_SECURITY_MAC_INTERNAL_H_ */ +#endif /* !_SECURITY_MAC_INTERNAL_H_ */ diff --git a/security/mac_iokit.c b/security/mac_iokit.c index fd41b7538..f23467676 100644 --- a/security/mac_iokit.c +++ b/security/mac_iokit.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -52,10 +52,10 @@ */ #include -#include -#include +#include +#include #include -#include +#include #include #include #include @@ -71,7 +71,7 @@ mac_iokit_check_device(char *devtype, struct mac_module_data *mdata) int error; MAC_CHECK(iokit_check_device, devtype, mdata); - return (error); + return error; } int @@ -80,7 +80,7 @@ mac_iokit_check_open(kauth_cred_t cred, io_object_t user_client, unsigned int us int error; MAC_CHECK(iokit_check_open, cred, user_client, user_client_type); - return (error); + return error; } int @@ -89,7 +89,7 @@ mac_iokit_check_set_properties(kauth_cred_t cred, io_object_t registry_entry, io int error; MAC_CHECK(iokit_check_set_properties, cred, registry_entry, properties); - return (error); + return error; } int @@ -98,7 +98,7 @@ mac_iokit_check_filter_properties(kauth_cred_t cred, io_object_t registry_entry) int error; MAC_CHECK(iokit_check_filter_properties, cred, registry_entry); - return (error); + return error; } int @@ -107,7 +107,7 @@ mac_iokit_check_get_property(kauth_cred_t cred, io_object_t registry_entry, cons int error; MAC_CHECK(iokit_check_get_property, cred, registry_entry, name); - return (error); + return error; } int @@ -116,6 +116,5 @@ mac_iokit_check_hid_control(kauth_cred_t cred) int error; MAC_CHECK(iokit_check_hid_control, cred); - return (error); + return error; } - diff --git a/security/mac_kext.c b/security/mac_kext.c index f84a6cc99..5506b4712 100644 --- a/security/mac_kext.c +++ b/security/mac_kext.c @@ -32,29 +32,31 @@ #include int -mac_kext_check_load(kauth_cred_t cred, const char *identifier) { +mac_kext_check_load(kauth_cred_t cred, const char *identifier) +{ int error; MAC_CHECK(kext_check_load, cred, identifier); - return (error); + return error; } int -mac_kext_check_unload(kauth_cred_t cred, const char *identifier) { +mac_kext_check_unload(kauth_cred_t cred, const char *identifier) +{ int error; MAC_CHECK(kext_check_unload, cred, identifier); - return (error); + return error; } int -mac_kext_check_query(kauth_cred_t cred) { +mac_kext_check_query(kauth_cred_t cred) +{ int error; MAC_CHECK(kext_check_query, cred); - return (error); + return error; } - diff --git a/security/mac_label.c b/security/mac_label.c index b05c43b84..ca6777d0d 100644 --- a/security/mac_label.c +++ b/security/mac_label.c @@ -42,7 +42,6 @@ static zone_t zone_label; void mac_labelzone_init(void) { - zone_label = zinit(sizeof(struct label), 8192 * sizeof(struct label), sizeof(struct label), "MAC Labels"); @@ -56,27 +55,30 @@ mac_labelzone_alloc(int flags) { struct label *l; - if (flags & MAC_NOWAIT) + if (flags & MAC_NOWAIT) { l = (struct label *) zalloc_noblock(zone_label); - else + } else { l = (struct label *) zalloc(zone_label); - if (l == NULL) - return (NULL); + } + if (l == NULL) { + return NULL; + } bzero(l, sizeof(struct label)); l->l_flags = MAC_FLAG_INITIALIZED; - return (l); + return l; } void mac_labelzone_free(struct label *l) { - - if (l == NULL) + if (l == NULL) { panic("Free of NULL MAC label\n"); + } - if ((l->l_flags & MAC_FLAG_INITIALIZED) == 0) + if ((l->l_flags & MAC_FLAG_INITIALIZED) == 0) { panic("Free of uninitialized label\n"); + } bzero(l, sizeof(struct label)); zfree(zone_label, l); } @@ -89,7 +91,7 @@ mac_label_get(struct label *l, int slot) { KASSERT(l != NULL, ("mac_label_get: NULL label")); - return ((intptr_t) (l->l_perpolicy[slot].l_ptr)); + return (intptr_t) (l->l_perpolicy[slot].l_ptr); } void @@ -99,4 +101,3 @@ mac_label_set(struct label *l, int slot, intptr_t v) l->l_perpolicy[slot].l_ptr = (void *) v; } - diff --git a/security/mac_mach.c b/security/mac_mach.c index 4ae380665..0030e5931 100644 --- a/security/mac_mach.c +++ b/security/mac_mach.c @@ -54,8 +54,9 @@ static struct proc * mac_task_get_proc(struct task *task) { - if (task == current_task()) + if (task == current_task()) { return proc_self(); + } /* * Tasks don't really hold a reference on a proc unless the @@ -65,8 +66,9 @@ mac_task_get_proc(struct task *task) struct proc *p = proc_find(pid); if (p != NULL) { - if (proc_task(p) == task) + if (proc_task(p) == task) { return p; + } proc_rele(p); } return NULL; @@ -78,13 +80,14 @@ mac_task_check_expose_task(struct task *task) int error; struct proc *p = mac_task_get_proc(task); - if (p == NULL) + if (p == NULL) { return ESRCH; + } struct ucred *cred = kauth_cred_get(); MAC_CHECK(proc_check_expose_task, cred, p); proc_rele(p); - return (error); + return error; } int @@ -93,14 +96,15 @@ mac_task_check_set_host_special_port(struct task *task, int id, struct ipc_port int error; struct proc *p = mac_task_get_proc(task); - if (p == NULL) + if (p == NULL) { return ESRCH; + } kauth_cred_t cred = kauth_cred_proc_ref(p); MAC_CHECK(proc_check_set_host_special_port, cred, id, port); kauth_cred_unref(&cred); proc_rele(p); - return (error); + return error; } int @@ -109,14 +113,15 @@ mac_task_check_set_host_exception_port(struct task *task, unsigned int exception int error; struct proc *p = mac_task_get_proc(task); - if (p == NULL) + if (p == NULL) { return ESRCH; + } kauth_cred_t cred = kauth_cred_proc_ref(p); MAC_CHECK(proc_check_set_host_exception_port, cred, exception); kauth_cred_unref(&cred); proc_rele(p); - return (error); + return error; } int @@ -126,26 +131,27 @@ mac_task_check_set_host_exception_ports(struct task *task, unsigned int exceptio int exception; struct proc *p = mac_task_get_proc(task); - if (p == NULL) + if (p == NULL) { return ESRCH; + } kauth_cred_t cred = kauth_cred_proc_ref(p); for (exception = FIRST_EXCEPTION; exception < EXC_TYPES_COUNT; exception++) { if (exception_mask & (1 << exception)) { MAC_CHECK(proc_check_set_host_exception_port, cred, exception); - if (error) + if (error) { break; + } } } kauth_cred_unref(&cred); proc_rele(p); - return (error); + return error; } void mac_thread_userret(struct thread *td) { - MAC_PERFORM(thread_userret, td); } @@ -188,7 +194,7 @@ mac_exc_create_label(void) // Policy initialization of the label, typically performs allocations as well. // (Unless the policy's full data really fits into a pointer size.) MAC_PERFORM(exc_action_label_init, label); - + return label; } @@ -219,21 +225,25 @@ mac_exc_free_action_label(struct exception_action *action) int mac_exc_update_action_label(struct exception_action *action, - struct label *newlabel) { + struct label *newlabel) +{ int error; - + MAC_CHECK(exc_action_label_update, action, action->label, newlabel); - - return (error); + + return error; } int mac_exc_inherit_action_label(struct exception_action *parent, - struct exception_action *child) { + struct exception_action *child) +{ return mac_exc_update_action_label(child, parent->label); } -int mac_exc_update_task_crash_label(struct task *task, struct label *label) { +int +mac_exc_update_task_crash_label(struct task *task, struct label *label) +{ int error; assert(task != kernel_task); @@ -241,8 +251,8 @@ int mac_exc_update_task_crash_label(struct task *task, struct label *label) { struct label *crash_label = get_task_crash_label(task); MAC_CHECK(exc_action_label_update, NULL, crash_label, label); - - return (error); + + return error; } // Process label creation, may sleep. @@ -291,5 +301,5 @@ mac_exc_action_check_exception_send(struct task *victim_task, struct exception_a mac_exc_free_label(bsd_label); } - return (error); + return error; } diff --git a/security/mac_net.c b/security/mac_net.c index e06837b06..1169fb516 100644 --- a/security/mac_net.c +++ b/security/mac_net.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -83,23 +83,24 @@ mac_mbuf_to_label(struct mbuf *mbuf) struct m_tag *tag; struct label *label; - if (mbuf == NULL) - return (NULL); + if (mbuf == NULL) { + return NULL; + } if ((mbuf->m_flags & M_PKTHDR) == 0) { printf("%s() got non-header MBUF!\n", __func__); - return (NULL); + return NULL; } tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_MACLABEL, - NULL); + NULL); if (tag == NULL) { printf("%s() m_tag_locate() returned NULL! (m->flags %04x)\n", - __func__, mbuf->m_flags); - return (NULL); + __func__, mbuf->m_flags); + return NULL; } - label = (struct label *)(tag+1); - return (label); + label = (struct label *)(tag + 1); + return label; } static struct label * @@ -108,10 +109,11 @@ mac_bpfdesc_label_alloc(void) struct label *label; label = mac_labelzone_alloc(M_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(bpfdesc_label_init, label); - return (label); + return label; } void @@ -129,16 +131,16 @@ mac_ifnet_label_alloc(void) struct label *label; label = mac_labelzone_alloc(M_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(ifnet_label_init, label); - return (label); + return label; } void mac_ifnet_label_init(struct ifnet *ifp) { - ifp->if_label = mac_ifnet_label_alloc(); } @@ -155,16 +157,16 @@ mac_mbuf_tag_init(struct m_tag *tag, int flag) mac_label_init(label); MAC_CHECK(mbuf_label_init, label, flag); - if (error) + if (error) { printf("%s(): mpo_mbuf_label_init() failed!\n", __func__); + } - return (error); + return error; } static void mac_bpfdesc_label_free(struct label *label) { - MAC_PERFORM(bpfdesc_label_destroy, label); mac_labelzone_free(label); } @@ -182,7 +184,6 @@ mac_bpfdesc_label_destroy(struct bpf_d *bpf_d) static void mac_ifnet_label_free(struct label *label) { - MAC_PERFORM(ifnet_label_destroy, label); mac_labelzone_free(label); } @@ -190,7 +191,6 @@ mac_ifnet_label_free(struct label *label) void mac_ifnet_label_destroy(struct ifnet *ifp) { - mac_ifnet_label_free(ifp->if_label); ifp->if_label = NULL; } @@ -198,7 +198,6 @@ mac_ifnet_label_destroy(struct ifnet *ifp) void mac_ifnet_label_recycle(struct ifnet *ifp) { - MAC_PERFORM(ifnet_label_recycle, ifp->if_label); } @@ -222,8 +221,9 @@ mac_mbuf_tag_copy(struct m_tag *src, struct m_tag *dest) src_label = (struct label *)(src + 1); dest_label = (struct label *)(dest + 1); - if (src_label == NULL || dest_label == NULL) + if (src_label == NULL || dest_label == NULL) { return; + } /* * mac_mbuf_tag_init() is called on the target tag @@ -248,7 +248,6 @@ mac_mbuf_label_copy(struct mbuf *m_from, struct mbuf *m_to) static void mac_ifnet_label_copy(struct label *src, struct label *dest) { - MAC_PERFORM(ifnet_label_copy, src, dest); } @@ -256,21 +255,18 @@ static int mac_ifnet_label_externalize(struct label *label, char *elements, char *outbuf, size_t outbuflen) { - - return (MAC_EXTERNALIZE(ifnet, label, elements, outbuf, outbuflen)); + return MAC_EXTERNALIZE(ifnet, label, elements, outbuf, outbuflen); } static int mac_ifnet_label_internalize(struct label *label, char *string) { - - return (MAC_INTERNALIZE(ifnet, label, string)); + return MAC_INTERNALIZE(ifnet, label, string); } void mac_ifnet_label_associate(struct ifnet *ifp) { - MAC_PERFORM(ifnet_label_associate, ifp, ifp->if_label); } @@ -295,7 +291,7 @@ mac_bpfdesc_check_receive(struct bpf_d *bpf_d, struct ifnet *ifp) ifp->if_label); ifnet_lock_done(ifp); - return (error); + return error; } int @@ -304,23 +300,24 @@ mac_mbuf_label_init(struct mbuf *m, int flag) struct m_tag *tag; int error; - if (mac_label_mbufs == 0) - return (0); + if (mac_label_mbufs == 0) { + return 0; + } tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_MACLABEL, - sizeof(struct label), flag, m); + sizeof(struct label), flag, m); if (tag == NULL) { printf("%s(): m_tag_alloc() failed!\n", __func__); - return (ENOBUFS); + return ENOBUFS; } error = mac_mbuf_tag_init(tag, flag); if (error) { printf("%s(): mac_mbuf_tag_init() failed!\n", __func__); m_tag_free(tag); - return (error); + return error; } m_tag_prepend(m, tag); - return (0); + return 0; } void @@ -402,7 +399,7 @@ mac_mbuf_label_associate_socket(struct socket *socket, struct mbuf *mbuf) sotoxsocket(socket, &xso); MAC_PERFORM(mbuf_label_associate_socket, &xso, socket->so_label, - mbuf, label); + mbuf, label); } int @@ -419,7 +416,7 @@ mac_ifnet_check_transmit(struct ifnet *ifp, struct mbuf *mbuf, int family, family, type); ifnet_lock_done(ifp); - return (error); + return error; } int @@ -434,19 +431,21 @@ mac_ifnet_label_get(__unused struct ucred *cred, struct ifreq *ifr, error = copyin(CAST_USER_ADDR_T(ifr->ifr_ifru.ifru_data), &mac, sizeof(mac)); - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(elements, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(CAST_USER_ADDR_T(mac.m_string), elements, mac.m_buflen, &len); if (error) { FREE(elements, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, elements); @@ -460,12 +459,13 @@ mac_ifnet_label_get(__unused struct ucred *cred, struct ifreq *ifr, mac_ifnet_label_free(intlabel); FREE(elements, M_MACTEMP); - if (error == 0) + if (error == 0) { error = copyout(buffer, CAST_USER_ADDR_T(mac.m_string), strlen(buffer) + 1); + } FREE(buffer, M_MACTEMP); - return (error); + return error; } int @@ -480,19 +480,21 @@ mac_ifnet_label_set(struct ucred *cred, struct ifreq *ifr, error = copyin(CAST_USER_ADDR_T(ifr->ifr_ifru.ifru_data), &mac, sizeof(mac)); - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(CAST_USER_ADDR_T(mac.m_string), buffer, mac.m_buflen, &len); if (error) { FREE(buffer, M_MACTEMP); - return (error); + return error; } AUDIT_ARG(mac_string, buffer); @@ -501,7 +503,7 @@ mac_ifnet_label_set(struct ucred *cred, struct ifreq *ifr, FREE(buffer, M_MACTEMP); if (error) { mac_ifnet_label_free(intlabel); - return (error); + return error; } /* @@ -512,7 +514,7 @@ mac_ifnet_label_set(struct ucred *cred, struct ifreq *ifr, error = suser(cred, NULL); if (error) { mac_ifnet_label_free(intlabel); - return (error); + return error; } ifnet_lock_exclusive(ifp); @@ -521,12 +523,12 @@ mac_ifnet_label_set(struct ucred *cred, struct ifreq *ifr, if (error) { ifnet_lock_done(ifp); mac_ifnet_label_free(intlabel); - return (error); + return error; } MAC_PERFORM(ifnet_label_update, cred, ifp, ifp->if_label, intlabel); ifnet_lock_done(ifp); mac_ifnet_label_free(intlabel); - return (0); + return 0; } diff --git a/security/mac_pipe.c b/security/mac_pipe.c index 31fa3b436..78d733696 100644 --- a/security/mac_pipe.c +++ b/security/mac_pipe.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -78,23 +78,22 @@ mac_pipe_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(pipe_label_init, label); - return (label); + return label; } void mac_pipe_label_init(struct pipe *cpipe) { - cpipe->pipe_label = mac_pipe_label_alloc(); } void mac_pipe_label_free(struct label *label) { - MAC_PERFORM(pipe_label_destroy, label); mac_labelzone_free(label); } @@ -102,7 +101,6 @@ mac_pipe_label_free(struct label *label) void mac_pipe_label_destroy(struct pipe *cpipe) { - mac_pipe_label_free(cpipe->pipe_label); cpipe->pipe_label = NULL; } @@ -110,7 +108,6 @@ mac_pipe_label_destroy(struct pipe *cpipe) void mac_pipe_label_copy(struct label *src, struct label *dest) { - MAC_PERFORM(pipe_label_copy, src, dest); } @@ -122,7 +119,7 @@ mac_pipe_label_externalize(struct label *label, char *elements, error = MAC_EXTERNALIZE(pipe, label, elements, outbuf, outbuflen); - return (error); + return error; } int @@ -132,13 +129,12 @@ mac_pipe_label_internalize(struct label *label, char *string) error = MAC_INTERNALIZE(pipe, label, string); - return (error); + return error; } void mac_pipe_label_associate(kauth_cred_t cred, struct pipe *cpipe) { - MAC_PERFORM(pipe_label_associate, cred, cpipe, cpipe->pipe_label); } @@ -149,12 +145,13 @@ mac_pipe_check_kqfilter(kauth_cred_t cred, struct knote *kn, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_kqfilter, cred, kn, cpipe, cpipe->pipe_label); - return (error); + return error; } int mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, u_int cmd) @@ -162,14 +159,15 @@ mac_pipe_check_ioctl(kauth_cred_t cred, struct pipe *cpipe, u_int cmd) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_ioctl, cred, cpipe, cpipe->pipe_label, cmd); - return (error); + return error; } int @@ -178,14 +176,15 @@ mac_pipe_check_read(kauth_cred_t cred, struct pipe *cpipe) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_read, cred, cpipe, cpipe->pipe_label); - return (error); + return error; } static int @@ -195,14 +194,15 @@ mac_pipe_check_label_update(kauth_cred_t cred, struct pipe *cpipe, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_label_update, cred, cpipe, cpipe->pipe_label, newlabel); - return (error); + return error; } int @@ -211,14 +211,15 @@ mac_pipe_check_select(kauth_cred_t cred, struct pipe *cpipe, int which) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_select, cred, cpipe, cpipe->pipe_label, which); - return (error); + return error; } int @@ -227,14 +228,15 @@ mac_pipe_check_stat(kauth_cred_t cred, struct pipe *cpipe) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_stat, cred, cpipe, cpipe->pipe_label); - return (error); + return error; } int @@ -243,14 +245,15 @@ mac_pipe_check_write(kauth_cred_t cred, struct pipe *cpipe) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_pipe_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_pipe_enforce) { + return 0; + } #endif MAC_CHECK(pipe_check_write, cred, cpipe, cpipe->pipe_label); - return (error); + return error; } int @@ -260,10 +263,11 @@ mac_pipe_label_update(kauth_cred_t cred, struct pipe *cpipe, int error; error = mac_pipe_check_label_update(cred, cpipe, label); - if (error) - return (error); + if (error) { + return error; + } MAC_PERFORM(pipe_label_update, cred, cpipe, cpipe->pipe_label, label); - return (0); + return 0; } diff --git a/security/mac_policy.h b/security/mac_policy.h index a36ebe953..09a8bec12 100644 --- a/security/mac_policy.h +++ b/security/mac_policy.h @@ -2,7 +2,7 @@ * Copyright (c) 2007-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -66,16 +66,16 @@ */ /** - @file mac_policy.h - @brief Kernel Interfaces for MAC policy modules - - This header defines the list of operations that are defined by the - TrustedBSD MAC Framwork on Darwin. MAC Policy modules register - with the framework to declare interest in a specific set of - operations. If interest in an entry point is not declared, then - the policy will be ignored when the Framework evaluates that entry - point. -*/ + * @file mac_policy.h + * @brief Kernel Interfaces for MAC policy modules + * + * This header defines the list of operations that are defined by the + * TrustedBSD MAC Framwork on Darwin. MAC Policy modules register + * with the framework to declare interest in a specific set of + * operations. If interest in an entry point is not declared, then + * the policy will be ignored when the Framework evaluates that entry + * point. + */ #ifndef _SECURITY_MAC_POLICY_H_ #define _SECURITY_MAC_POLICY_H_ @@ -122,9 +122,9 @@ struct vnode; #ifndef _KAUTH_CRED_T -#define _KAUTH_CRED_T +#define _KAUTH_CRED_T typedef struct ucred *kauth_cred_t; -#endif /* !_KAUTH_CRED_T */ +#endif /* !_KAUTH_CRED_T */ #ifndef __IOKIT_PORTS_DEFINED__ #define __IOKIT_PORTS_DEFINED__ @@ -153,190 +153,190 @@ typedef struct OSObject *io_object_t; */ /** - @name Entry Points for Label Management - - These are the entry points corresponding to the life cycle events for - kernel objects, such as initialization, creation, and destruction. - - Most policies (that use labels) will initialize labels by allocating - space for policy-specific data. In most cases, it is permitted to - sleep during label initialization operations; it will be noted when - it is not permitted. - - Initialization usually will not require doing more than allocating a - generic label for the given object. What follows initialization is - creation, where a label is made specific to the object it is associated - with. Destruction occurs when the label is no longer needed, such as - when the corresponding object is destroyed. All necessary cleanup should - be performed in label destroy operations. - - Where possible, the label entry points have identical parameters. If - the policy module does not require structure-specific label - information, the same function may be registered in the policy - operation vector. Many policies will implement two such generic - allocation calls: one to handle sleepable requests, and one to handle - potentially non-sleepable requests. -*/ + * @name Entry Points for Label Management + * + * These are the entry points corresponding to the life cycle events for + * kernel objects, such as initialization, creation, and destruction. + * + * Most policies (that use labels) will initialize labels by allocating + * space for policy-specific data. In most cases, it is permitted to + * sleep during label initialization operations; it will be noted when + * it is not permitted. + * + * Initialization usually will not require doing more than allocating a + * generic label for the given object. What follows initialization is + * creation, where a label is made specific to the object it is associated + * with. Destruction occurs when the label is no longer needed, such as + * when the corresponding object is destroyed. All necessary cleanup should + * be performed in label destroy operations. + * + * Where possible, the label entry points have identical parameters. If + * the policy module does not require structure-specific label + * information, the same function may be registered in the policy + * operation vector. Many policies will implement two such generic + * allocation calls: one to handle sleepable requests, and one to handle + * potentially non-sleepable requests. + */ /** - @brief Audit event postselection - @param cred Subject credential - @param syscode Syscall number - @param args Syscall arguments - @param error Syscall errno - @param retval Syscall return value - - This is the MAC Framework audit postselect, which is called before - exiting a syscall to determine if an audit event should be committed. - A return value of MAC_AUDIT_NO forces the audit record to be suppressed. - Any other return value results in the audit record being committed. - - @warning The suppression behavior will probably go away in Apple's - future version of the audit implementation. - - @return Return MAC_AUDIT_NO to force suppression of the audit record. - Any other value results in the audit record being committed. - -*/ + * @brief Audit event postselection + * @param cred Subject credential + * @param syscode Syscall number + * @param args Syscall arguments + * @param error Syscall errno + * @param retval Syscall return value + * + * This is the MAC Framework audit postselect, which is called before + * exiting a syscall to determine if an audit event should be committed. + * A return value of MAC_AUDIT_NO forces the audit record to be suppressed. + * Any other return value results in the audit record being committed. + * + * @warning The suppression behavior will probably go away in Apple's + * future version of the audit implementation. + * + * @return Return MAC_AUDIT_NO to force suppression of the audit record. + * Any other value results in the audit record being committed. + * + */ typedef int mpo_audit_check_postselect_t( kauth_cred_t cred, unsigned short syscode, void *args, int error, int retval -); + ); /** - @brief Audit event preselection - @param cred Subject credential - @param syscode Syscall number - @param args Syscall arguments - - This is the MAC Framework audit preselect, which is called before a - syscall is entered to determine if an audit event should be created. - If the MAC policy forces the syscall to be audited, MAC_AUDIT_YES should be - returned. A return value of MAC_AUDIT_NO causes the audit record to - be suppressed. Returning MAC_POLICY_DEFAULT indicates that the policy wants - to defer to the system's existing preselection mechanism. - - When policies return different preferences, the Framework decides what action - to take based on the following policy. If any policy returns MAC_AUDIT_YES, - then create an audit record, else if any policy returns MAC_AUDIT_NO, then - suppress the creations of an audit record, else defer to the system's - existing preselection mechanism. - - @warning The audit implementation in Apple's current version is - incomplete, so the MAC policies have priority over the system's existing - mechanisms. This will probably change in the future version where - the audit implementation is more complete. - - @return Return MAC_AUDIT_YES to force auditing of the syscall, - MAC_AUDIT_NO to force no auditing of the syscall, MAC_AUDIT_DEFAULT - to allow auditing mechanisms to determine if the syscall is audited. - -*/ + * @brief Audit event preselection + * @param cred Subject credential + * @param syscode Syscall number + * @param args Syscall arguments + * + * This is the MAC Framework audit preselect, which is called before a + * syscall is entered to determine if an audit event should be created. + * If the MAC policy forces the syscall to be audited, MAC_AUDIT_YES should be + * returned. A return value of MAC_AUDIT_NO causes the audit record to + * be suppressed. Returning MAC_POLICY_DEFAULT indicates that the policy wants + * to defer to the system's existing preselection mechanism. + * + * When policies return different preferences, the Framework decides what action + * to take based on the following policy. If any policy returns MAC_AUDIT_YES, + * then create an audit record, else if any policy returns MAC_AUDIT_NO, then + * suppress the creations of an audit record, else defer to the system's + * existing preselection mechanism. + * + * @warning The audit implementation in Apple's current version is + * incomplete, so the MAC policies have priority over the system's existing + * mechanisms. This will probably change in the future version where + * the audit implementation is more complete. + * + * @return Return MAC_AUDIT_YES to force auditing of the syscall, + * MAC_AUDIT_NO to force no auditing of the syscall, MAC_AUDIT_DEFAULT + * to allow auditing mechanisms to determine if the syscall is audited. + * + */ typedef int mpo_audit_check_preselect_t( kauth_cred_t cred, unsigned short syscode, void *args -); + ); /** - @brief Initialize BPF descriptor label - @param label New label to initialize - - Initialize the label for a newly instantiated BPF descriptor. - Sleeping is permitted. -*/ + * @brief Initialize BPF descriptor label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated BPF descriptor. + * Sleeping is permitted. + */ typedef void mpo_bpfdesc_label_init_t( struct label *label -); + ); /** - @brief Destroy BPF descriptor label - @param label The label to be destroyed - - Destroy a BPF descriptor label. Since the BPF descriptor - is going out of scope, policy modules should free any internal - storage associated with the label so that it may be destroyed. -*/ + * @brief Destroy BPF descriptor label + * @param label The label to be destroyed + * + * Destroy a BPF descriptor label. Since the BPF descriptor + * is going out of scope, policy modules should free any internal + * storage associated with the label so that it may be destroyed. + */ typedef void mpo_bpfdesc_label_destroy_t( struct label *label -); + ); /** - @brief Associate a BPF descriptor with a label - @param cred User credential creating the BPF descriptor - @param bpf_d The BPF descriptor - @param bpflabel The new label - - Set the label on a newly created BPF descriptor from the passed - subject credential. This call will be made when a BPF device node - is opened by a process with the passed subject credential. -*/ + * @brief Associate a BPF descriptor with a label + * @param cred User credential creating the BPF descriptor + * @param bpf_d The BPF descriptor + * @param bpflabel The new label + * + * Set the label on a newly created BPF descriptor from the passed + * subject credential. This call will be made when a BPF device node + * is opened by a process with the passed subject credential. + */ typedef void mpo_bpfdesc_label_associate_t( kauth_cred_t cred, struct bpf_d *bpf_d, struct label *bpflabel -); + ); /** - @brief Check whether BPF can read from a network interface - @param bpf_d Subject; the BPF descriptor - @param bpflabel Policy label for bpf_d - @param ifp Object; the network interface - @param ifnetlabel Policy label for ifp - - Determine whether the MAC framework should permit datagrams from - the passed network interface to be delivered to the buffers of - the passed BPF descriptor. Return (0) for success, or an errno - value for failure. Suggested failure: EACCES for label mismatches, - EPERM for lack of privilege. -*/ + * @brief Check whether BPF can read from a network interface + * @param bpf_d Subject; the BPF descriptor + * @param bpflabel Policy label for bpf_d + * @param ifp Object; the network interface + * @param ifnetlabel Policy label for ifp + * + * Determine whether the MAC framework should permit datagrams from + * the passed network interface to be delivered to the buffers of + * the passed BPF descriptor. Return (0) for success, or an errno + * value for failure. Suggested failure: EACCES for label mismatches, + * EPERM for lack of privilege. + */ typedef int mpo_bpfdesc_check_receive_t( struct bpf_d *bpf_d, struct label *bpflabel, struct ifnet *ifp, struct label *ifnetlabel -); -/** - @brief Indicate desire to change the process label at exec time - @param old Existing subject credential - @param vp File being executed - @param offset Offset of binary within file being executed - @param scriptvp Script being executed by interpreter, if any. - @param vnodelabel Label corresponding to vp - @param scriptvnodelabel Script vnode label - @param execlabel Userspace provided execution label - @param p Object process - @param macpolicyattr MAC policy-specific spawn attribute data - @param macpolicyattrlen Length of policy-specific spawn attribute data - @see mac_execve - @see mpo_cred_label_update_execve_t - @see mpo_vnode_check_exec_t - - Indicate whether this policy intends to update the label of a newly - created credential from the existing subject credential (old). This - call occurs when a process executes the passed vnode. If a policy - returns success from this entry point, the mpo_cred_label_update_execve - entry point will later be called with the same parameters. Access - has already been checked via the mpo_vnode_check_exec entry point, - this entry point is necessary to preserve kernel locking constraints - during program execution. - - The supplied vnode and vnodelabel correspond with the file actually - being executed; in the case that the file is interpreted (for - example, a script), the label of the original exec-time vnode has - been preserved in scriptvnodelabel. - - The final label, execlabel, corresponds to a label supplied by a - user space application through the use of the mac_execve system call. - - The vnode lock is held during this operation. No changes should be - made to the old credential structure. - - @warning Even if a policy returns 0, it should behave correctly in - the presence of an invocation of mpo_cred_label_update_execve, as that - call may happen as a result of another policy requesting a transition. - - @return Non-zero if a transition is required, 0 otherwise. -*/ + ); +/** + * @brief Indicate desire to change the process label at exec time + * @param old Existing subject credential + * @param vp File being executed + * @param offset Offset of binary within file being executed + * @param scriptvp Script being executed by interpreter, if any. + * @param vnodelabel Label corresponding to vp + * @param scriptvnodelabel Script vnode label + * @param execlabel Userspace provided execution label + * @param p Object process + * @param macpolicyattr MAC policy-specific spawn attribute data + * @param macpolicyattrlen Length of policy-specific spawn attribute data + * @see mac_execve + * @see mpo_cred_label_update_execve_t + * @see mpo_vnode_check_exec_t + * + * Indicate whether this policy intends to update the label of a newly + * created credential from the existing subject credential (old). This + * call occurs when a process executes the passed vnode. If a policy + * returns success from this entry point, the mpo_cred_label_update_execve + * entry point will later be called with the same parameters. Access + * has already been checked via the mpo_vnode_check_exec entry point, + * this entry point is necessary to preserve kernel locking constraints + * during program execution. + * + * The supplied vnode and vnodelabel correspond with the file actually + * being executed; in the case that the file is interpreted (for + * example, a script), the label of the original exec-time vnode has + * been preserved in scriptvnodelabel. + * + * The final label, execlabel, corresponds to a label supplied by a + * user space application through the use of the mac_execve system call. + * + * The vnode lock is held during this operation. No changes should be + * made to the old credential structure. + * + * @warning Even if a policy returns 0, it should behave correctly in + * the presence of an invocation of mpo_cred_label_update_execve, as that + * call may happen as a result of another policy requesting a transition. + * + * @return Non-zero if a transition is required, 0 otherwise. + */ typedef int mpo_cred_check_label_update_execve_t( kauth_cred_t old, struct vnode *vp, @@ -348,230 +348,230 @@ typedef int mpo_cred_check_label_update_execve_t( struct proc *p, void *macpolicyattr, size_t macpolicyattrlen -); + ); /** - @brief Access control check for relabelling processes - @param cred Subject credential - @param newlabel New label to apply to the user credential - @see mpo_cred_label_update_t - @see mac_set_proc - - Determine whether the subject identified by the credential can relabel - itself to the supplied new label (newlabel). This access control check - is called when the mac_set_proc system call is invoked. A user space - application will supply a new value, the value will be internalized - and provided in newlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for relabelling processes + * @param cred Subject credential + * @param newlabel New label to apply to the user credential + * @see mpo_cred_label_update_t + * @see mac_set_proc + * + * Determine whether the subject identified by the credential can relabel + * itself to the supplied new label (newlabel). This access control check + * is called when the mac_set_proc system call is invoked. A user space + * application will supply a new value, the value will be internalized + * and provided in newlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_cred_check_label_update_t( kauth_cred_t cred, struct label *newlabel -); + ); /** - @brief Access control check for visibility of other subjects - @param u1 Subject credential - @param u2 Object credential - - Determine whether the subject identified by the credential u1 can - "see" other subjects with the passed subject credential u2. This call - may be made in a number of situations, including inter-process status - sysctls used by ps, and in procfs lookups. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to hide visibility. -*/ + * @brief Access control check for visibility of other subjects + * @param u1 Subject credential + * @param u2 Object credential + * + * Determine whether the subject identified by the credential u1 can + * "see" other subjects with the passed subject credential u2. This call + * may be made in a number of situations, including inter-process status + * sysctls used by ps, and in procfs lookups. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to hide visibility. + */ typedef int mpo_cred_check_visible_t( kauth_cred_t u1, kauth_cred_t u2 -); + ); /** - @brief Associate a credential with a new process at fork - @param cred credential to inherited by new process - @param proc the new process - - Allow a process to associate the credential with a new - process for reference countng purposes. - NOTE: the credential can be dis-associated in ways other - than exit - so this strategy is flawed - should just - catch label destroy callback. -*/ + * @brief Associate a credential with a new process at fork + * @param cred credential to inherited by new process + * @param proc the new process + * + * Allow a process to associate the credential with a new + * process for reference countng purposes. + * NOTE: the credential can be dis-associated in ways other + * than exit - so this strategy is flawed - should just + * catch label destroy callback. + */ typedef void mpo_cred_label_associate_fork_t( kauth_cred_t cred, proc_t proc -); + ); /** - @brief Create the first process - @param cred Subject credential to be labeled - - Create the subject credential of process 0, the parent of all BSD - kernel processes. Policies should update the label in the - previously initialized credential structure. -*/ + * @brief Create the first process + * @param cred Subject credential to be labeled + * + * Create the subject credential of process 0, the parent of all BSD + * kernel processes. Policies should update the label in the + * previously initialized credential structure. + */ typedef void mpo_cred_label_associate_kernel_t( kauth_cred_t cred -); + ); /** - @brief Create a credential label - @param parent_cred Parent credential - @param child_cred Child credential - - Set the label of a newly created credential, most likely using the - information in the supplied parent credential. - - @warning This call is made when crcopy or crdup is invoked on a - newly created struct ucred, and should not be confused with a - process fork or creation event. -*/ + * @brief Create a credential label + * @param parent_cred Parent credential + * @param child_cred Child credential + * + * Set the label of a newly created credential, most likely using the + * information in the supplied parent credential. + * + * @warning This call is made when crcopy or crdup is invoked on a + * newly created struct ucred, and should not be confused with a + * process fork or creation event. + */ typedef void mpo_cred_label_associate_t( kauth_cred_t parent_cred, kauth_cred_t child_cred -); + ); /** - @brief Create the first process - @param cred Subject credential to be labeled - - Create the subject credential of process 1, the parent of all BSD - user processes. Policies should update the label in the previously - initialized credential structure. This is the 'init' process. -*/ + * @brief Create the first process + * @param cred Subject credential to be labeled + * + * Create the subject credential of process 1, the parent of all BSD + * user processes. Policies should update the label in the previously + * initialized credential structure. This is the 'init' process. + */ typedef void mpo_cred_label_associate_user_t( kauth_cred_t cred -); + ); /** - @brief Destroy credential label - @param label The label to be destroyed - - Destroy a user credential label. Since the user credential - is going out of scope, policy modules should free any internal - storage associated with the label so that it may be destroyed. -*/ + * @brief Destroy credential label + * @param label The label to be destroyed + * + * Destroy a user credential label. Since the user credential + * is going out of scope, policy modules should free any internal + * storage associated with the label so that it may be destroyed. + */ typedef void mpo_cred_label_destroy_t( struct label *label -); + ); /** - @brief Externalize a user credential label for auditing - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the label on a user credential for - inclusion in an audit record. An externalized label consists of a text - representation of the label contents that will be added to the audit record - as part of a text token. Policy-agnostic user space tools will display - this externalized version. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize a user credential label for auditing + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the label on a user credential for + * inclusion in an audit record. An externalized label consists of a text + * representation of the label contents that will be added to the audit record + * as part of a text token. Policy-agnostic user space tools will display + * this externalized version. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_cred_label_externalize_audit_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Externalize a user credential label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the label on a user - credential. An externalized label consists of a text representation - of the label contents that can be used with user applications. - Policy-agnostic user space tools will display this externalized - version. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize a user credential label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the label on a user + * credential. An externalized label consists of a text representation + * of the label contents that can be used with user applications. + * Policy-agnostic user space tools will display this externalized + * version. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_cred_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize user credential label - @param label New label to initialize - - Initialize the label for a newly instantiated user credential. - Sleeping is permitted. -*/ + * @brief Initialize user credential label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated user credential. + * Sleeping is permitted. + */ typedef void mpo_cred_label_init_t( struct label *label -); + ); /** - @brief Internalize a user credential label - @param label Label to be internalized - @param element_name Name of the label namespace for which the label should - be internalized - @param element_data Text data to be internalized - - Produce a user credential label from an external representation. An - externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will forward text version to the kernel for - processing by individual policy modules. - - The policy's internalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, Otherwise, return non-zero if an error occurs - while internalizing the label data. - -*/ + * @brief Internalize a user credential label + * @param label Label to be internalized + * @param element_name Name of the label namespace for which the label should + * be internalized + * @param element_data Text data to be internalized + * + * Produce a user credential label from an external representation. An + * externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will forward text version to the kernel for + * processing by individual policy modules. + * + * The policy's internalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, Otherwise, return non-zero if an error occurs + * while internalizing the label data. + * + */ typedef int mpo_cred_label_internalize_t( struct label *label, char *element_name, char *element_data -); -/** - @brief Update credential at exec time - @param old_cred Existing subject credential - @param new_cred New subject credential to be labeled - @param p Object process. - @param vp File being executed - @param offset Offset of binary within file being executed - @param scriptvp Script being executed by interpreter, if any. - @param vnodelabel Label corresponding to vp - @param scriptvnodelabel Script vnode label - @param execlabel Userspace provided execution label - @param csflags Code signing flags to be set after exec - @param macpolicyattr MAC policy-specific spawn attribute data. - @param macpolicyattrlen Length of policy-specific spawn attribute data. - @see mac_execve - @see mpo_cred_check_label_update_execve_t - @see mpo_vnode_check_exec_t - - Update the label of a newly created credential (new) from the - existing subject credential (old). This call occurs when a process - executes the passed vnode and one of the loaded policy modules has - returned success from the mpo_cred_check_label_update_execve entry point. - Access has already been checked via the mpo_vnode_check_exec entry - point, this entry point is only used to update any policy state. - - The supplied vnode and vnodelabel correspond with the file actually - being executed; in the case that the file is interpreted (for - example, a script), the label of the original exec-time vnode has - been preserved in scriptvnodelabel. - - The final label, execlabel, corresponds to a label supplied by a - user space application through the use of the mac_execve system call. - - If non-NULL, the value pointed to by disjointp will be set to 0 to - indicate that the old and new credentials are not disjoint, or 1 to - indicate that they are. - - The vnode lock is held during this operation. No changes should be - made to the old credential structure. - @return 0 on success, Otherwise, return non-zero if update results in - termination of child. -*/ + ); +/** + * @brief Update credential at exec time + * @param old_cred Existing subject credential + * @param new_cred New subject credential to be labeled + * @param p Object process. + * @param vp File being executed + * @param offset Offset of binary within file being executed + * @param scriptvp Script being executed by interpreter, if any. + * @param vnodelabel Label corresponding to vp + * @param scriptvnodelabel Script vnode label + * @param execlabel Userspace provided execution label + * @param csflags Code signing flags to be set after exec + * @param macpolicyattr MAC policy-specific spawn attribute data. + * @param macpolicyattrlen Length of policy-specific spawn attribute data. + * @see mac_execve + * @see mpo_cred_check_label_update_execve_t + * @see mpo_vnode_check_exec_t + * + * Update the label of a newly created credential (new) from the + * existing subject credential (old). This call occurs when a process + * executes the passed vnode and one of the loaded policy modules has + * returned success from the mpo_cred_check_label_update_execve entry point. + * Access has already been checked via the mpo_vnode_check_exec entry + * point, this entry point is only used to update any policy state. + * + * The supplied vnode and vnodelabel correspond with the file actually + * being executed; in the case that the file is interpreted (for + * example, a script), the label of the original exec-time vnode has + * been preserved in scriptvnodelabel. + * + * The final label, execlabel, corresponds to a label supplied by a + * user space application through the use of the mac_execve system call. + * + * If non-NULL, the value pointed to by disjointp will be set to 0 to + * indicate that the old and new credentials are not disjoint, or 1 to + * indicate that they are. + * + * The vnode lock is held during this operation. No changes should be + * made to the old credential structure. + * @return 0 on success, Otherwise, return non-zero if update results in + * termination of child. + */ typedef int mpo_cred_label_update_execve_t( kauth_cred_t old_cred, kauth_cred_t new_cred, @@ -586,415 +586,415 @@ typedef int mpo_cred_label_update_execve_t( void *macpolicyattr, size_t macpolicyattrlen, int *disjointp -); + ); /** - @brief Update a credential label - @param cred The existing credential - @param newlabel A new label to apply to the credential - @see mpo_cred_check_label_update_t - @see mac_set_proc - - Update the label on a user credential, using the supplied new label. - This is called as a result of a process relabel operation. Access - control was already confirmed by mpo_cred_check_label_update. -*/ + * @brief Update a credential label + * @param cred The existing credential + * @param newlabel A new label to apply to the credential + * @see mpo_cred_check_label_update_t + * @see mac_set_proc + * + * Update the label on a user credential, using the supplied new label. + * This is called as a result of a process relabel operation. Access + * control was already confirmed by mpo_cred_check_label_update. + */ typedef void mpo_cred_label_update_t( kauth_cred_t cred, struct label *newlabel -); + ); /** - @brief Create a new devfs device - @param dev Major and minor numbers of special file - @param de "inode" of new device file - @param label Destination label - @param fullpath Path relative to mount (e.g. /dev) of new device file - - This entry point labels a new devfs device. The label will likely be based - on the path to the device, or the major and minor numbers. - The policy should store an appropriate label into 'label'. -*/ + * @brief Create a new devfs device + * @param dev Major and minor numbers of special file + * @param de "inode" of new device file + * @param label Destination label + * @param fullpath Path relative to mount (e.g. /dev) of new device file + * + * This entry point labels a new devfs device. The label will likely be based + * on the path to the device, or the major and minor numbers. + * The policy should store an appropriate label into 'label'. + */ typedef void mpo_devfs_label_associate_device_t( dev_t dev, struct devnode *de, struct label *label, const char *fullpath -); -/** - @brief Create a new devfs directory - @param dirname Name of new directory - @param dirnamelen Length of 'dirname' - @param de "inode" of new directory - @param label Destination label - @param fullpath Path relative to mount (e.g. /dev) of new directory - - This entry point labels a new devfs directory. The label will likely be - based on the path of the new directory. The policy should store an appropriate - label into 'label'. The devfs root directory is labelled in this way. -*/ + ); +/** + * @brief Create a new devfs directory + * @param dirname Name of new directory + * @param dirnamelen Length of 'dirname' + * @param de "inode" of new directory + * @param label Destination label + * @param fullpath Path relative to mount (e.g. /dev) of new directory + * + * This entry point labels a new devfs directory. The label will likely be + * based on the path of the new directory. The policy should store an appropriate + * label into 'label'. The devfs root directory is labelled in this way. + */ typedef void mpo_devfs_label_associate_directory_t( const char *dirname, int dirnamelen, struct devnode *de, struct label *label, const char *fullpath -); + ); /** - @brief Copy a devfs label - @param src Source devfs label - @param dest Destination devfs label - - Copy the label information from src to dest. The devfs file system - often duplicates (splits) existing device nodes rather than creating - new ones. -*/ + * @brief Copy a devfs label + * @param src Source devfs label + * @param dest Destination devfs label + * + * Copy the label information from src to dest. The devfs file system + * often duplicates (splits) existing device nodes rather than creating + * new ones. + */ typedef void mpo_devfs_label_copy_t( struct label *src, struct label *dest -); + ); /** - @brief Destroy devfs label - @param label The label to be destroyed - - Destroy a devfs entry label. Since the object is going out - of scope, policy modules should free any internal storage associated - with the label so that it may be destroyed. -*/ + * @brief Destroy devfs label + * @param label The label to be destroyed + * + * Destroy a devfs entry label. Since the object is going out + * of scope, policy modules should free any internal storage associated + * with the label so that it may be destroyed. + */ typedef void mpo_devfs_label_destroy_t( struct label *label -); + ); /** - @brief Initialize devfs label - @param label New label to initialize - - Initialize the label for a newly instantiated devfs entry. Sleeping - is permitted. -*/ + * @brief Initialize devfs label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated devfs entry. Sleeping + * is permitted. + */ typedef void mpo_devfs_label_init_t( struct label *label -); -/** - @brief Update a devfs label after relabelling its vnode - @param mp Devfs mount point - @param de Affected devfs directory entry - @param delabel Label of devfs directory entry - @param vp Vnode associated with de - @param vnodelabel New label of vnode - - Update a devfs label when its vnode is manually relabelled, - for example with setfmac(1). Typically, this will simply copy - the vnode label into the devfs label. -*/ + ); +/** + * @brief Update a devfs label after relabelling its vnode + * @param mp Devfs mount point + * @param de Affected devfs directory entry + * @param delabel Label of devfs directory entry + * @param vp Vnode associated with de + * @param vnodelabel New label of vnode + * + * Update a devfs label when its vnode is manually relabelled, + * for example with setfmac(1). Typically, this will simply copy + * the vnode label into the devfs label. + */ typedef void mpo_devfs_label_update_t( struct mount *mp, struct devnode *de, struct label *delabel, struct vnode *vp, struct label *vnodelabel -); + ); /** - @brief Access control for sending an exception to an exception action - @param crashlabel The crashing process's label - @param action Exception action - @param exclabel Policy label for exception action - - Determine whether the the exception message caused by the victim - process can be sent to the exception action. The policy may compare - credentials in the crashlabel, which are derived from the process at - the time the exception occurs, with the credentials in the exclabel, - which was set at the time the exception port was set, to determine - its decision. Note that any process from which the policy derived - any credentials may not exist anymore at the time of this policy - operation. Sleeping is permitted. - - @return Return 0 if the message can be sent, otherwise an - appropriate value for errno should be returned. -*/ + * @brief Access control for sending an exception to an exception action + * @param crashlabel The crashing process's label + * @param action Exception action + * @param exclabel Policy label for exception action + * + * Determine whether the the exception message caused by the victim + * process can be sent to the exception action. The policy may compare + * credentials in the crashlabel, which are derived from the process at + * the time the exception occurs, with the credentials in the exclabel, + * which was set at the time the exception port was set, to determine + * its decision. Note that any process from which the policy derived + * any credentials may not exist anymore at the time of this policy + * operation. Sleeping is permitted. + * + * @return Return 0 if the message can be sent, otherwise an + * appropriate value for errno should be returned. + */ typedef int mpo_exc_action_check_exception_send_t( struct label *crashlabel, struct exception_action *action, struct label *exclabel -); + ); /** - @brief Associate an exception action label - @param action Exception action to label - @param exclabel Policy label to be filled in for exception action - - Set the label on an exception action. -*/ + * @brief Associate an exception action label + * @param action Exception action to label + * @param exclabel Policy label to be filled in for exception action + * + * Set the label on an exception action. + */ typedef void mpo_exc_action_label_associate_t( struct exception_action *action, struct label *exclabel -); + ); /** - @brief Destroy exception action label - @param label The label to be destroyed - - Destroy the label on an exception action. Since the object is going - out of scope, policy modules should free any internal storage - associated with the label so that it may be destroyed. Sleeping is - permitted. -*/ + * @brief Destroy exception action label + * @param label The label to be destroyed + * + * Destroy the label on an exception action. Since the object is going + * out of scope, policy modules should free any internal storage + * associated with the label so that it may be destroyed. Sleeping is + * permitted. + */ typedef void mpo_exc_action_label_destroy_t( struct label *label -); + ); /** - @brief Populate an exception action label with process credentials - @param label The label to be populated - @param proc Process to derive credentials from - - Populate a label with credentials derived from a process. At - exception delivery time, the policy should compare credentials of the - process that set an exception ports with the credentials of the - process or corpse that experienced the exception. Note that the - process that set the port may not exist at that time anymore, so - labels should carry copies of live credentials if necessary. -*/ + * @brief Populate an exception action label with process credentials + * @param label The label to be populated + * @param proc Process to derive credentials from + * + * Populate a label with credentials derived from a process. At + * exception delivery time, the policy should compare credentials of the + * process that set an exception ports with the credentials of the + * process or corpse that experienced the exception. Note that the + * process that set the port may not exist at that time anymore, so + * labels should carry copies of live credentials if necessary. + */ typedef void mpo_exc_action_label_populate_t( struct label *label, struct proc *proc -); + ); /** - @brief Initialize exception action label - @param label New label to initialize - - Initialize a label for an exception action. Usually performs - policy specific allocations. Sleeping is permitted. -*/ + * @brief Initialize exception action label + * @param label New label to initialize + * + * Initialize a label for an exception action. Usually performs + * policy specific allocations. Sleeping is permitted. + */ typedef int mpo_exc_action_label_init_t( struct label *label -); + ); /** - @brief Update the label on an exception action - @param action Exception action that the label belongs to (may be - NULL if none) - @param label Policy label to update - @param newlabel New label for update - - Update the credentials of an exception action from the given - label. The policy should copy over any credentials (process and - otherwise) from the new label into the label to update. Must not - sleep, must be quick and can be called with locks held. -*/ + * @brief Update the label on an exception action + * @param action Exception action that the label belongs to (may be + * NULL if none) + * @param label Policy label to update + * @param newlabel New label for update + * + * Update the credentials of an exception action from the given + * label. The policy should copy over any credentials (process and + * otherwise) from the new label into the label to update. Must not + * sleep, must be quick and can be called with locks held. + */ typedef int mpo_exc_action_label_update_t( struct exception_action *action, struct label *label, struct label *newlabel -); + ); /** - @brief Access control for changing the offset of a file descriptor - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - - Determine whether the subject identified by the credential can - change the offset of the file represented by fg. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control for changing the offset of a file descriptor + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * + * Determine whether the subject identified by the credential can + * change the offset of the file represented by fg. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_file_check_change_offset_t( kauth_cred_t cred, struct fileglob *fg, struct label *label -); + ); /** - @brief Access control for creating a file descriptor - @param cred Subject credential - - Determine whether the subject identified by the credential can - allocate a new file descriptor. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control for creating a file descriptor + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can + * allocate a new file descriptor. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_file_check_create_t( kauth_cred_t cred -); + ); /** - @brief Access control for duplicating a file descriptor - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - @param newfd New file descriptor number - - Determine whether the subject identified by the credential can - duplicate the fileglob structure represented by fg and as file - descriptor number newfd. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control for duplicating a file descriptor + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * @param newfd New file descriptor number + * + * Determine whether the subject identified by the credential can + * duplicate the fileglob structure represented by fg and as file + * descriptor number newfd. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_file_check_dup_t( kauth_cred_t cred, struct fileglob *fg, struct label *label, int newfd -); -/** - @brief Access control check for fcntl - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - @param cmd Control operation to be performed; see fcntl(2) - @param arg fcnt arguments; see fcntl(2) - - Determine whether the subject identified by the credential can perform - the file control operation indicated by cmd. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for fcntl + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * @param cmd Control operation to be performed; see fcntl(2) + * @param arg fcnt arguments; see fcntl(2) + * + * Determine whether the subject identified by the credential can perform + * the file control operation indicated by cmd. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_file_check_fcntl_t( kauth_cred_t cred, struct fileglob *fg, struct label *label, int cmd, user_long_t arg -); + ); /** - @brief Access control check for mac_get_fd - @param cred Subject credential - @param fg Fileglob structure - @param elements Element buffer - @param len Length of buffer - - Determine whether the subject identified by the credential should be allowed - to get an externalized version of the label on the object indicated by fd. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for mac_get_fd + * @param cred Subject credential + * @param fg Fileglob structure + * @param elements Element buffer + * @param len Length of buffer + * + * Determine whether the subject identified by the credential should be allowed + * to get an externalized version of the label on the object indicated by fd. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_file_check_get_t( kauth_cred_t cred, struct fileglob *fg, char *elements, int len -); + ); /** - @brief Access control for getting the offset of a file descriptor - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - - Determine whether the subject identified by the credential can - get the offset of the file represented by fg. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control for getting the offset of a file descriptor + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * + * Determine whether the subject identified by the credential can + * get the offset of the file represented by fg. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_file_check_get_offset_t( kauth_cred_t cred, struct fileglob *fg, struct label *label -); + ); /** - @brief Access control for inheriting a file descriptor - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - - Determine whether the subject identified by the credential can - inherit the fileglob structure represented by fg. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control for inheriting a file descriptor + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * + * Determine whether the subject identified by the credential can + * inherit the fileglob structure represented by fg. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_file_check_inherit_t( kauth_cred_t cred, struct fileglob *fg, struct label *label -); + ); /** - @brief Access control check for file ioctl - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - @param cmd The ioctl command; see ioctl(2) - - Determine whether the subject identified by the credential can perform - the ioctl operation indicated by cmd. - - @warning Since ioctl data is opaque from the standpoint of the MAC - framework, policies must exercise extreme care when implementing - access control checks. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + * @brief Access control check for file ioctl + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * @param cmd The ioctl command; see ioctl(2) + * + * Determine whether the subject identified by the credential can perform + * the ioctl operation indicated by cmd. + * + * @warning Since ioctl data is opaque from the standpoint of the MAC + * framework, policies must exercise extreme care when implementing + * access control checks. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_file_check_ioctl_t( kauth_cred_t cred, struct fileglob *fg, struct label *label, unsigned int cmd -); -/** - @brief Access control check for file locking - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - @param op The lock operation (F_GETLK, F_SETLK, F_UNLK) - @param fl The flock structure - - Determine whether the subject identified by the credential can perform - the lock operation indicated by op and fl on the file represented by fg. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + ); +/** + * @brief Access control check for file locking + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * @param op The lock operation (F_GETLK, F_SETLK, F_UNLK) + * @param fl The flock structure + * + * Determine whether the subject identified by the credential can perform + * the lock operation indicated by op and fl on the file represented by fg. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_file_check_lock_t( kauth_cred_t cred, struct fileglob *fg, struct label *label, int op, struct flock *fl -); -/** - @brief Check with library validation if a macho slice is allowed to be combined into a proc. - @param p Subject process - @param fg Fileglob structure - @param slice_offset offset of the code slice - @param error_message error message returned to user-space in case of error (userspace pointer) - @param error_message_size error message size - - Its a little odd that the MAC/kext writes into userspace since this - implies there is only one MAC module that implements this, however - the alterantive is to allocate memory in xnu, on the hope that - the MAC module will use it, or allocated in the MAC module and then - free it in xnu. Either of these are very appeling, so lets go with - the slightly more hacky way. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Check with library validation if a macho slice is allowed to be combined into a proc. + * @param p Subject process + * @param fg Fileglob structure + * @param slice_offset offset of the code slice + * @param error_message error message returned to user-space in case of error (userspace pointer) + * @param error_message_size error message size + * + * Its a little odd that the MAC/kext writes into userspace since this + * implies there is only one MAC module that implements this, however + * the alterantive is to allocate memory in xnu, on the hope that + * the MAC module will use it, or allocated in the MAC module and then + * free it in xnu. Either of these are very appeling, so lets go with + * the slightly more hacky way. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_file_check_library_validation_t( struct proc *p, struct fileglob *fg, off_t slice_offset, user_long_t error_message, size_t error_message_size -); -/** - @brief Access control check for mapping a file - @param cred Subject credential - @param fg fileglob representing file to map - @param label Policy label associated with vp - @param prot mmap protections; see mmap(2) - @param flags Type of mapped object; see mmap(2) - @param maxprot Maximum rights - - Determine whether the subject identified by the credential should be - allowed to map the file represented by fg with the protections specified - in prot. The maxprot field holds the maximum permissions on the new - mapping, a combination of VM_PROT_READ, VM_PROT_WRITE, and VM_PROT_EXECUTE. - To avoid overriding prior access control checks, a policy should only - remove flags from maxprot. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for mapping a file + * @param cred Subject credential + * @param fg fileglob representing file to map + * @param label Policy label associated with vp + * @param prot mmap protections; see mmap(2) + * @param flags Type of mapped object; see mmap(2) + * @param maxprot Maximum rights + * + * Determine whether the subject identified by the credential should be + * allowed to map the file represented by fg with the protections specified + * in prot. The maxprot field holds the maximum permissions on the new + * mapping, a combination of VM_PROT_READ, VM_PROT_WRITE, and VM_PROT_EXECUTE. + * To avoid overriding prior access control checks, a policy should only + * remove flags from maxprot. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_file_check_mmap_t( kauth_cred_t cred, struct fileglob *fg, @@ -1003,125 +1003,125 @@ typedef int mpo_file_check_mmap_t( int flags, uint64_t file_pos, int *maxprot -); + ); /** - @brief Downgrade the mmap protections - @param cred Subject credential - @param fg file to map - @param label Policy label associated with vp - @param prot mmap protections to be downgraded - - Downgrade the mmap protections based on the subject and object labels. -*/ + * @brief Downgrade the mmap protections + * @param cred Subject credential + * @param fg file to map + * @param label Policy label associated with vp + * @param prot mmap protections to be downgraded + * + * Downgrade the mmap protections based on the subject and object labels. + */ typedef void mpo_file_check_mmap_downgrade_t( kauth_cred_t cred, struct fileglob *fg, struct label *label, int *prot -); + ); /** - @brief Access control for receiving a file descriptor - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg - - Determine whether the subject identified by the credential can - receive the fileglob structure represented by fg. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control for receiving a file descriptor + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + * + * Determine whether the subject identified by the credential can + * receive the fileglob structure represented by fg. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_file_check_receive_t( kauth_cred_t cred, struct fileglob *fg, struct label *label -); + ); /** - @brief Access control check for mac_set_fd - @param cred Subject credential - @param fg Fileglob structure - @param elements Elements buffer - @param len Length of elements buffer - - Determine whether the subject identified by the credential can - perform the mac_set_fd operation. The mac_set_fd operation is used - to associate a MAC label with a file. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for mac_set_fd + * @param cred Subject credential + * @param fg Fileglob structure + * @param elements Elements buffer + * @param len Length of elements buffer + * + * Determine whether the subject identified by the credential can + * perform the mac_set_fd operation. The mac_set_fd operation is used + * to associate a MAC label with a file. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_file_check_set_t( kauth_cred_t cred, struct fileglob *fg, char *elements, int len -); + ); /** - @brief Create file label - @param cred Subject credential - @param fg Fileglob structure - @param label Policy label for fg -*/ + * @brief Create file label + * @param cred Subject credential + * @param fg Fileglob structure + * @param label Policy label for fg + */ typedef void mpo_file_label_associate_t( kauth_cred_t cred, struct fileglob *fg, struct label *label -); + ); /** - @brief Destroy file label - @param label The label to be destroyed - - Destroy the label on a file descriptor. In this entry point, a - policy module should free any internal storage associated with - label so that it may be destroyed. -*/ + * @brief Destroy file label + * @param label The label to be destroyed + * + * Destroy the label on a file descriptor. In this entry point, a + * policy module should free any internal storage associated with + * label so that it may be destroyed. + */ typedef void mpo_file_label_destroy_t( struct label *label -); + ); /** - @brief Initialize file label - @param label New label to initialize -*/ + * @brief Initialize file label + * @param label New label to initialize + */ typedef void mpo_file_label_init_t( struct label *label -); -/** - @brief Access control check for relabeling network interfaces - @param cred Subject credential - @param ifp network interface being relabeled - @param ifnetlabel Current label of the network interfaces - @param newlabel New label to apply to the network interfaces - @see mpo_ifnet_label_update_t - - Determine whether the subject identified by the credential can - relabel the network interface represented by ifp to the supplied - new label (newlabel). - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for relabeling network interfaces + * @param cred Subject credential + * @param ifp network interface being relabeled + * @param ifnetlabel Current label of the network interfaces + * @param newlabel New label to apply to the network interfaces + * @see mpo_ifnet_label_update_t + * + * Determine whether the subject identified by the credential can + * relabel the network interface represented by ifp to the supplied + * new label (newlabel). + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_ifnet_check_label_update_t( kauth_cred_t cred, struct ifnet *ifp, struct label *ifnetlabel, struct label *newlabel -); -/** - @brief Access control check for relabeling network interfaces - @param ifp Network interface mbuf will be transmitted through - @param ifnetlabel Label of the network interfaces - @param m The mbuf to be transmitted - @param mbuflabel Label of the mbuf to be transmitted - @param family Address Family, AF_* - @param type Type of socket, SOCK_{STREAM,DGRAM,RAW} - - Determine whether the mbuf with label mbuflabel may be transmitted - through the network interface represented by ifp that has the - label ifnetlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for relabeling network interfaces + * @param ifp Network interface mbuf will be transmitted through + * @param ifnetlabel Label of the network interfaces + * @param m The mbuf to be transmitted + * @param mbuflabel Label of the mbuf to be transmitted + * @param family Address Family, AF_* + * @param type Type of socket, SOCK_{STREAM,DGRAM,RAW} + * + * Determine whether the mbuf with label mbuflabel may be transmitted + * through the network interface represented by ifp that has the + * label ifnetlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_ifnet_check_transmit_t( struct ifnet *ifp, struct label *ifnetlabel, @@ -1129,138 +1129,138 @@ typedef int mpo_ifnet_check_transmit_t( struct label *mbuflabel, int family, int type -); + ); /** - @brief Create a network interface label - @param ifp Network interface labeled - @param ifnetlabel Label for the network interface - - Set the label of a newly created network interface, most likely - using the information in the supplied network interface struct. -*/ + * @brief Create a network interface label + * @param ifp Network interface labeled + * @param ifnetlabel Label for the network interface + * + * Set the label of a newly created network interface, most likely + * using the information in the supplied network interface struct. + */ typedef void mpo_ifnet_label_associate_t( struct ifnet *ifp, struct label *ifnetlabel -); + ); /** - @brief Copy an ifnet label - @param src Source ifnet label - @param dest Destination ifnet label - - Copy the label information from src to dest. -*/ + * @brief Copy an ifnet label + * @param src Source ifnet label + * @param dest Destination ifnet label + * + * Copy the label information from src to dest. + */ typedef void mpo_ifnet_label_copy_t( struct label *src, struct label *dest -); + ); /** - @brief Destroy ifnet label - @param label The label to be destroyed - - Destroy the label on an ifnet label. In this entry point, a - policy module should free any internal storage associated with - label so that it may be destroyed. -*/ + * @brief Destroy ifnet label + * @param label The label to be destroyed + * + * Destroy the label on an ifnet label. In this entry point, a + * policy module should free any internal storage associated with + * label so that it may be destroyed. + */ typedef void mpo_ifnet_label_destroy_t( struct label *label -); + ); /** - @brief Externalize an ifnet label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the label on an interface. - An externalized label consists of a text representation of the - label contents that can be used with user applications. - Policy-agnostic user space tools will display this externalized - version. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize an ifnet label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the label on an interface. + * An externalized label consists of a text representation of the + * label contents that can be used with user applications. + * Policy-agnostic user space tools will display this externalized + * version. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_ifnet_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize ifnet label - @param label New label to initialize -*/ + * @brief Initialize ifnet label + * @param label New label to initialize + */ typedef void mpo_ifnet_label_init_t( struct label *label -); + ); /** - @brief Internalize an interface label - @param label Label to be internalized - @param element_name Name of the label namespace for which the label should - be internalized - @param element_data Text data to be internalized - - Produce an interface label from an external representation. An - externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will forward text version to the kernel for - processing by individual policy modules. - - The policy's internalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, Otherwise, return non-zero if an error occurs - while internalizing the label data. - -*/ + * @brief Internalize an interface label + * @param label Label to be internalized + * @param element_name Name of the label namespace for which the label should + * be internalized + * @param element_data Text data to be internalized + * + * Produce an interface label from an external representation. An + * externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will forward text version to the kernel for + * processing by individual policy modules. + * + * The policy's internalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, Otherwise, return non-zero if an error occurs + * while internalizing the label data. + * + */ typedef int mpo_ifnet_label_internalize_t( struct label *label, char *element_name, char *element_data -); + ); /** - @brief Recycle up a network interface label - @param label The label to be recycled - - Recycle a network interface label. Darwin caches the struct ifnet - of detached ifnets in a "free pool". Before ifnets are returned - to the "free pool", policies can cleanup or overwrite any information - present in the label. -*/ + * @brief Recycle up a network interface label + * @param label The label to be recycled + * + * Recycle a network interface label. Darwin caches the struct ifnet + * of detached ifnets in a "free pool". Before ifnets are returned + * to the "free pool", policies can cleanup or overwrite any information + * present in the label. + */ typedef void mpo_ifnet_label_recycle_t( struct label *label -); -/** - @brief Update a network interface label - @param cred Subject credential - @param ifp The network interface to be relabeled - @param ifnetlabel The current label of the network interface - @param newlabel A new label to apply to the network interface - @see mpo_ifnet_check_label_update_t - - Update the label on a network interface, using the supplied new label. -*/ + ); +/** + * @brief Update a network interface label + * @param cred Subject credential + * @param ifp The network interface to be relabeled + * @param ifnetlabel The current label of the network interface + * @param newlabel A new label to apply to the network interface + * @see mpo_ifnet_check_label_update_t + * + * Update the label on a network interface, using the supplied new label. + */ typedef void mpo_ifnet_label_update_t( kauth_cred_t cred, struct ifnet *ifp, struct label *ifnetlabel, struct label *newlabel -); -/** - @brief Access control check for delivering a packet to a socket - @param inp inpcb the socket is associated with - @param inplabel Label of the inpcb - @param m The mbuf being received - @param mbuflabel Label of the mbuf being received - @param family Address family, AF_* - @param type Type of socket, SOCK_{STREAM,DGRAM,RAW} - - Determine whether the mbuf with label mbuflabel may be received - by the socket associated with inpcb that has the label inplabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for delivering a packet to a socket + * @param inp inpcb the socket is associated with + * @param inplabel Label of the inpcb + * @param m The mbuf being received + * @param mbuflabel Label of the mbuf being received + * @param family Address family, AF_* + * @param type Type of socket, SOCK_{STREAM,DGRAM,RAW} + * + * Determine whether the mbuf with label mbuflabel may be received + * by the socket associated with inpcb that has the label inplabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_inpcb_check_deliver_t( struct inpcb *inp, struct label *inplabel, @@ -1268,367 +1268,367 @@ typedef int mpo_inpcb_check_deliver_t( struct label *mbuflabel, int family, int type -); + ); /** - @brief Create an inpcb label - @param so Socket containing the inpcb to be labeled - @param solabel Label of the socket - @param inp inpcb to be labeled - @param inplabel Label for the inpcb - - Set the label of a newly created inpcb, most likely - using the information in the socket and/or socket label. -*/ + * @brief Create an inpcb label + * @param so Socket containing the inpcb to be labeled + * @param solabel Label of the socket + * @param inp inpcb to be labeled + * @param inplabel Label for the inpcb + * + * Set the label of a newly created inpcb, most likely + * using the information in the socket and/or socket label. + */ typedef void mpo_inpcb_label_associate_t( struct socket *so, struct label *solabel, struct inpcb *inp, struct label *inplabel -); + ); /** - @brief Destroy inpcb label - @param label The label to be destroyed - - Destroy the label on an inpcb label. In this entry point, a - policy module should free any internal storage associated with - label so that it may be destroyed. -*/ + * @brief Destroy inpcb label + * @param label The label to be destroyed + * + * Destroy the label on an inpcb label. In this entry point, a + * policy module should free any internal storage associated with + * label so that it may be destroyed. + */ typedef void mpo_inpcb_label_destroy_t( struct label *label -); + ); /** - @brief Initialize inpcb label - @param label New label to initialize - @param flag M_WAITOK or M_NOWAIT -*/ + * @brief Initialize inpcb label + * @param label New label to initialize + * @param flag M_WAITOK or M_NOWAIT + */ typedef int mpo_inpcb_label_init_t( struct label *label, int flag -); + ); /** - @brief Recycle up an inpcb label - @param label The label to be recycled - - Recycle an inpcb label. Darwin allocates the inpcb as part of - the socket structure in some cases. For this case we must recycle - rather than destroy the inpcb as it will be reused later. -*/ + * @brief Recycle up an inpcb label + * @param label The label to be recycled + * + * Recycle an inpcb label. Darwin allocates the inpcb as part of + * the socket structure in some cases. For this case we must recycle + * rather than destroy the inpcb as it will be reused later. + */ typedef void mpo_inpcb_label_recycle_t( struct label *label -); + ); /** - @brief Update an inpcb label from a socket label - @param so Socket containing the inpcb to be relabeled - @param solabel New label of the socket - @param inp inpcb to be labeled - @param inplabel Label for the inpcb - - Set the label of a newly created inpcb due to a change in the - underlying socket label. -*/ + * @brief Update an inpcb label from a socket label + * @param so Socket containing the inpcb to be relabeled + * @param solabel New label of the socket + * @param inp inpcb to be labeled + * @param inplabel Label for the inpcb + * + * Set the label of a newly created inpcb due to a change in the + * underlying socket label. + */ typedef void mpo_inpcb_label_update_t( struct socket *so, struct label *solabel, struct inpcb *inp, struct label *inplabel -); + ); /** - @brief Device hardware access control - @param devtype Type of device connected - - This is the MAC Framework device access control, which is called by the I/O - Kit when a new device is connected to the system to determine whether that - device should be trusted. A list of properties associated with the device - is passed as an XML-formatted string. The routine should examine these - properties to determine the trustworthiness of the device. A return value - of EPERM forces the device to be claimed by a special device driver that - will prevent its operation. - - @warning This is an experimental interface and may change in the future. - - @return Return EPERM to indicate that the device is untrusted and should - not be allowed to operate. Return zero to indicate that the device is - trusted and should be allowed to operate normally. - -*/ + * @brief Device hardware access control + * @param devtype Type of device connected + * + * This is the MAC Framework device access control, which is called by the I/O + * Kit when a new device is connected to the system to determine whether that + * device should be trusted. A list of properties associated with the device + * is passed as an XML-formatted string. The routine should examine these + * properties to determine the trustworthiness of the device. A return value + * of EPERM forces the device to be claimed by a special device driver that + * will prevent its operation. + * + * @warning This is an experimental interface and may change in the future. + * + * @return Return EPERM to indicate that the device is untrusted and should + * not be allowed to operate. Return zero to indicate that the device is + * trusted and should be allowed to operate normally. + * + */ typedef int mpo_iokit_check_device_t( char *devtype, struct mac_module_data *mdata -); + ); /** - @brief Access control check for opening an I/O Kit device - @param cred Subject credential - @param user_client User client instance - @param user_client_type User client type - - Determine whether the subject identified by the credential can open an - I/O Kit device at the passed path of the passed user client class and - type. - - @return Return 0 if access is granted, or an appropriate value for - errno should be returned. -*/ + * @brief Access control check for opening an I/O Kit device + * @param cred Subject credential + * @param user_client User client instance + * @param user_client_type User client type + * + * Determine whether the subject identified by the credential can open an + * I/O Kit device at the passed path of the passed user client class and + * type. + * + * @return Return 0 if access is granted, or an appropriate value for + * errno should be returned. + */ typedef int mpo_iokit_check_open_t( kauth_cred_t cred, io_object_t user_client, unsigned int user_client_type -); + ); /** - @brief Access control check for setting I/O Kit device properties - @param cred Subject credential - @param entry Target device - @param properties Property list - - Determine whether the subject identified by the credential can set - properties on an I/O Kit device. - - @return Return 0 if access is granted, or an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting I/O Kit device properties + * @param cred Subject credential + * @param entry Target device + * @param properties Property list + * + * Determine whether the subject identified by the credential can set + * properties on an I/O Kit device. + * + * @return Return 0 if access is granted, or an appropriate value for + * errno should be returned. + */ typedef int mpo_iokit_check_set_properties_t( kauth_cred_t cred, io_object_t entry, io_object_t properties -); + ); /** - @brief Indicate desire to filter I/O Kit devices properties - @param cred Subject credential - @param entry Target device - @see mpo_iokit_check_get_property_t - - Indicate whether this policy may restrict the subject credential - from reading properties of the target device. - If a policy returns success from this entry point, the - mpo_iokit_check_get_property entry point will later be called - for each property that the subject credential tries to read from - the target device. - - This entry point is primarilly to optimize bulk property reads - by skipping calls to the mpo_iokit_check_get_property entry point - for credentials / devices no MAC policy is interested in. - - @warning Even if a policy returns 0, it should behave correctly in - the presence of an invocation of mpo_iokit_check_get_property, as that - call may happen as a result of another policy requesting a transition. - - @return Non-zero if a transition is required, 0 otherwise. + * @brief Indicate desire to filter I/O Kit devices properties + * @param cred Subject credential + * @param entry Target device + * @see mpo_iokit_check_get_property_t + * + * Indicate whether this policy may restrict the subject credential + * from reading properties of the target device. + * If a policy returns success from this entry point, the + * mpo_iokit_check_get_property entry point will later be called + * for each property that the subject credential tries to read from + * the target device. + * + * This entry point is primarilly to optimize bulk property reads + * by skipping calls to the mpo_iokit_check_get_property entry point + * for credentials / devices no MAC policy is interested in. + * + * @warning Even if a policy returns 0, it should behave correctly in + * the presence of an invocation of mpo_iokit_check_get_property, as that + * call may happen as a result of another policy requesting a transition. + * + * @return Non-zero if a transition is required, 0 otherwise. */ typedef int mpo_iokit_check_filter_properties_t( kauth_cred_t cred, io_object_t entry -); + ); /** - @brief Access control check for getting I/O Kit device properties - @param cred Subject credential - @param entry Target device - @param name Property name - - Determine whether the subject identified by the credential can get - properties on an I/O Kit device. - - @return Return 0 if access is granted, or an appropriate value for - errno. -*/ + * @brief Access control check for getting I/O Kit device properties + * @param cred Subject credential + * @param entry Target device + * @param name Property name + * + * Determine whether the subject identified by the credential can get + * properties on an I/O Kit device. + * + * @return Return 0 if access is granted, or an appropriate value for + * errno. + */ typedef int mpo_iokit_check_get_property_t( kauth_cred_t cred, io_object_t entry, const char *name -); + ); /** - @brief Access control check for software HID control - @param cred Subject credential - - Determine whether the subject identified by the credential can - control the HID (Human Interface Device) subsystem, such as to - post synthetic keypresses, pointer movement and clicks. - - @return Return 0 if access is granted, or an appropriate value for - errno. -*/ + * @brief Access control check for software HID control + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can + * control the HID (Human Interface Device) subsystem, such as to + * post synthetic keypresses, pointer movement and clicks. + * + * @return Return 0 if access is granted, or an appropriate value for + * errno. + */ typedef int mpo_iokit_check_hid_control_t( kauth_cred_t cred -); + ); /** - @brief Create an IP reassembly queue label - @param fragment First received IP fragment - @param fragmentlabel Policy label for fragment - @param ipq IP reassembly queue to be labeled - @param ipqlabel Policy label to be filled in for ipq - - Set the label on a newly created IP reassembly queue from - the mbuf header of the first received fragment. -*/ + * @brief Create an IP reassembly queue label + * @param fragment First received IP fragment + * @param fragmentlabel Policy label for fragment + * @param ipq IP reassembly queue to be labeled + * @param ipqlabel Policy label to be filled in for ipq + * + * Set the label on a newly created IP reassembly queue from + * the mbuf header of the first received fragment. + */ typedef void mpo_ipq_label_associate_t( struct mbuf *fragment, struct label *fragmentlabel, struct ipq *ipq, struct label *ipqlabel -); + ); /** - @brief Compare an mbuf header label to an ipq label - @param fragment IP datagram fragment - @param fragmentlabel Policy label for fragment - @param ipq IP fragment reassembly queue - @param ipqlabel Policy label for ipq - - Compare the label of the mbuf header containing an IP datagram - (fragment) fragment with the label of the passed IP fragment - reassembly queue (ipq). Return (1) for a successful match, or (0) - for no match. This call is made when the IP stack attempts to - find an existing fragment reassembly queue for a newly received - fragment; if this fails, a new fragment reassembly queue may be - instantiated for the fragment. Policies may use this entry point - to prevent the reassembly of otherwise matching IP fragments if - policy does not permit them to be reassembled based on the label - or other information. -*/ + * @brief Compare an mbuf header label to an ipq label + * @param fragment IP datagram fragment + * @param fragmentlabel Policy label for fragment + * @param ipq IP fragment reassembly queue + * @param ipqlabel Policy label for ipq + * + * Compare the label of the mbuf header containing an IP datagram + * (fragment) fragment with the label of the passed IP fragment + * reassembly queue (ipq). Return (1) for a successful match, or (0) + * for no match. This call is made when the IP stack attempts to + * find an existing fragment reassembly queue for a newly received + * fragment; if this fails, a new fragment reassembly queue may be + * instantiated for the fragment. Policies may use this entry point + * to prevent the reassembly of otherwise matching IP fragments if + * policy does not permit them to be reassembled based on the label + * or other information. + */ typedef int mpo_ipq_label_compare_t( struct mbuf *fragment, struct label *fragmentlabel, struct ipq *ipq, struct label *ipqlabel -); + ); /** - @brief Destroy IP reassembly queue label - @param label The label to be destroyed - - Destroy the label on an IP fragment queue. In this entry point, a - policy module should free any internal storage associated with - label so that it may be destroyed. -*/ + * @brief Destroy IP reassembly queue label + * @param label The label to be destroyed + * + * Destroy the label on an IP fragment queue. In this entry point, a + * policy module should free any internal storage associated with + * label so that it may be destroyed. + */ typedef void mpo_ipq_label_destroy_t( struct label *label -); + ); /** - @brief Initialize IP reassembly queue label - @param label New label to initialize - @param flag M_WAITOK or M_NOWAIT - - Initialize the label on a newly instantiated IP fragment reassembly - queue. The flag field may be one of M_WAITOK and M_NOWAIT, and - should be employed to avoid performing a sleeping malloc(9) during - this initialization call. IP fragment reassembly queue allocation - frequently occurs in performance sensitive environments, and the - implementation should be careful to avoid sleeping or long-lived - operations. This entry point is permitted to fail resulting in - the failure to allocate the IP fragment reassembly queue. -*/ + * @brief Initialize IP reassembly queue label + * @param label New label to initialize + * @param flag M_WAITOK or M_NOWAIT + * + * Initialize the label on a newly instantiated IP fragment reassembly + * queue. The flag field may be one of M_WAITOK and M_NOWAIT, and + * should be employed to avoid performing a sleeping malloc(9) during + * this initialization call. IP fragment reassembly queue allocation + * frequently occurs in performance sensitive environments, and the + * implementation should be careful to avoid sleeping or long-lived + * operations. This entry point is permitted to fail resulting in + * the failure to allocate the IP fragment reassembly queue. + */ typedef int mpo_ipq_label_init_t( struct label *label, int flag -); + ); /** - @brief Update the label on an IP fragment reassembly queue - @param fragment IP fragment - @param fragmentlabel Policy label for fragment - @param ipq IP fragment reassembly queue - @param ipqlabel Policy label to be updated for ipq - - Update the label on an IP fragment reassembly queue (ipq) based - on the acceptance of the passed IP fragment mbuf header (fragment). -*/ + * @brief Update the label on an IP fragment reassembly queue + * @param fragment IP fragment + * @param fragmentlabel Policy label for fragment + * @param ipq IP fragment reassembly queue + * @param ipqlabel Policy label to be updated for ipq + * + * Update the label on an IP fragment reassembly queue (ipq) based + * on the acceptance of the passed IP fragment mbuf header (fragment). + */ typedef void mpo_ipq_label_update_t( struct mbuf *fragment, struct label *fragmentlabel, struct ipq *ipq, struct label *ipqlabel -); + ); /** - @brief Assign a label to a new mbuf - @param bpf_d BPF descriptor - @param b_label Policy label for bpf_d - @param m Object; mbuf - @param m_label Policy label to fill in for m - - Set the label on the mbuf header of a newly created datagram - generated using the passed BPF descriptor. This call is made when - a write is performed to the BPF device associated with the passed - BPF descriptor. -*/ + * @brief Assign a label to a new mbuf + * @param bpf_d BPF descriptor + * @param b_label Policy label for bpf_d + * @param m Object; mbuf + * @param m_label Policy label to fill in for m + * + * Set the label on the mbuf header of a newly created datagram + * generated using the passed BPF descriptor. This call is made when + * a write is performed to the BPF device associated with the passed + * BPF descriptor. + */ typedef void mpo_mbuf_label_associate_bpfdesc_t( struct bpf_d *bpf_d, struct label *b_label, struct mbuf *m, struct label *m_label -); + ); /** - @brief Assign a label to a new mbuf - @param ifp Interface descriptor - @param i_label Existing label of ifp - @param m Object; mbuf - @param m_label Policy label to fill in for m - - Label an mbuf based on the interface from which it was received. -*/ + * @brief Assign a label to a new mbuf + * @param ifp Interface descriptor + * @param i_label Existing label of ifp + * @param m Object; mbuf + * @param m_label Policy label to fill in for m + * + * Label an mbuf based on the interface from which it was received. + */ typedef void mpo_mbuf_label_associate_ifnet_t( struct ifnet *ifp, struct label *i_label, struct mbuf *m, struct label *m_label -); + ); /** - @brief Assign a label to a new mbuf - @param inp inpcb structure - @param i_label Existing label of inp - @param m Object; mbuf - @param m_label Policy label to fill in for m - - Label an mbuf based on the inpcb from which it was derived. -*/ + * @brief Assign a label to a new mbuf + * @param inp inpcb structure + * @param i_label Existing label of inp + * @param m Object; mbuf + * @param m_label Policy label to fill in for m + * + * Label an mbuf based on the inpcb from which it was derived. + */ typedef void mpo_mbuf_label_associate_inpcb_t( struct inpcb *inp, struct label *i_label, struct mbuf *m, struct label *m_label -); + ); /** - @brief Set the label on a newly reassembled IP datagram - @param ipq IP fragment reassembly queue - @param ipqlabel Policy label for ipq - @param mbuf IP datagram to be labeled - @param mbuflabel Policy label to be filled in for mbuf - - Set the label on a newly reassembled IP datagram (mbuf) from the IP - fragment reassembly queue (ipq) from which it was generated. -*/ + * @brief Set the label on a newly reassembled IP datagram + * @param ipq IP fragment reassembly queue + * @param ipqlabel Policy label for ipq + * @param mbuf IP datagram to be labeled + * @param mbuflabel Policy label to be filled in for mbuf + * + * Set the label on a newly reassembled IP datagram (mbuf) from the IP + * fragment reassembly queue (ipq) from which it was generated. + */ typedef void mpo_mbuf_label_associate_ipq_t( struct ipq *ipq, struct label *ipqlabel, struct mbuf *mbuf, struct label *mbuflabel -); + ); /** - @brief Assign a label to a new mbuf - @param ifp Subject; network interface - @param i_label Existing label of ifp - @param m Object; mbuf - @param m_label Policy label to fill in for m - - Set the label on the mbuf header of a newly created datagram - generated for the purposes of a link layer response for the passed - interface. This call may be made in a number of situations, including - for ARP or ND6 responses in the IPv4 and IPv6 stacks. -*/ + * @brief Assign a label to a new mbuf + * @param ifp Subject; network interface + * @param i_label Existing label of ifp + * @param m Object; mbuf + * @param m_label Policy label to fill in for m + * + * Set the label on the mbuf header of a newly created datagram + * generated for the purposes of a link layer response for the passed + * interface. This call may be made in a number of situations, including + * for ARP or ND6 responses in the IPv4 and IPv6 stacks. + */ typedef void mpo_mbuf_label_associate_linklayer_t( struct ifnet *ifp, struct label *i_label, struct mbuf *m, struct label *m_label -); -/** - @brief Assign a label to a new mbuf - @param oldmbuf mbuf headerder for existing datagram for existing datagram - @param oldmbuflabel Policy label for oldmbuf - @param ifp Network interface - @param ifplabel Policy label for ifp - @param newmbuf mbuf header to be labeled for new datagram - @param newmbuflabel Policy label for newmbuf - - Set the label on the mbuf header of a newly created datagram - generated from the existing passed datagram when it is processed - by the passed multicast encapsulation interface. This call is made - when an mbuf is to be delivered using the virtual interface. -*/ + ); +/** + * @brief Assign a label to a new mbuf + * @param oldmbuf mbuf headerder for existing datagram for existing datagram + * @param oldmbuflabel Policy label for oldmbuf + * @param ifp Network interface + * @param ifplabel Policy label for ifp + * @param newmbuf mbuf header to be labeled for new datagram + * @param newmbuflabel Policy label for newmbuf + * + * Set the label on the mbuf header of a newly created datagram + * generated from the existing passed datagram when it is processed + * by the passed multicast encapsulation interface. This call is made + * when an mbuf is to be delivered using the virtual interface. + */ typedef void mpo_mbuf_label_associate_multicast_encap_t( struct mbuf *oldmbuf, struct label *oldmbuflabel, @@ -1636,1319 +1636,1319 @@ typedef void mpo_mbuf_label_associate_multicast_encap_t( struct label *ifplabel, struct mbuf *newmbuf, struct label *newmbuflabel -); + ); /** - @brief Assign a label to a new mbuf - @param oldmbuf Received datagram - @param oldmbuflabel Policy label for oldmbuf - @param newmbuf Newly created datagram - @param newmbuflabel Policy label for newmbuf - - Set the label on the mbuf header of a newly created datagram generated - by the IP stack in response to an existing received datagram (oldmbuf). - This call may be made in a number of situations, including when responding - to ICMP request datagrams. -*/ + * @brief Assign a label to a new mbuf + * @param oldmbuf Received datagram + * @param oldmbuflabel Policy label for oldmbuf + * @param newmbuf Newly created datagram + * @param newmbuflabel Policy label for newmbuf + * + * Set the label on the mbuf header of a newly created datagram generated + * by the IP stack in response to an existing received datagram (oldmbuf). + * This call may be made in a number of situations, including when responding + * to ICMP request datagrams. + */ typedef void mpo_mbuf_label_associate_netlayer_t( struct mbuf *oldmbuf, struct label *oldmbuflabel, struct mbuf *newmbuf, struct label *newmbuflabel -); + ); /** - @brief Assign a label to a new mbuf - @param so Socket to label - @param so_label Policy label for socket - @param m Object; mbuf - @param m_label Policy label to fill in for m - - An mbuf structure is used to store network traffic in transit. - When an application sends data to a socket or a pipe, it is wrapped - in an mbuf first. This function sets the label on a newly created mbuf header - based on the socket sending the data. The contents of the label should be - suitable for performing an access check on the receiving side of the - communication. - - Only labeled MBUFs will be presented to the policy via this entrypoint. -*/ + * @brief Assign a label to a new mbuf + * @param so Socket to label + * @param so_label Policy label for socket + * @param m Object; mbuf + * @param m_label Policy label to fill in for m + * + * An mbuf structure is used to store network traffic in transit. + * When an application sends data to a socket or a pipe, it is wrapped + * in an mbuf first. This function sets the label on a newly created mbuf header + * based on the socket sending the data. The contents of the label should be + * suitable for performing an access check on the receiving side of the + * communication. + * + * Only labeled MBUFs will be presented to the policy via this entrypoint. + */ typedef void mpo_mbuf_label_associate_socket_t( socket_t so, struct label *so_label, struct mbuf *m, struct label *m_label -); + ); /** - @brief Copy a mbuf label - @param src Source label - @param dest Destination label - - Copy the mbuf label information in src into dest. - - Only called when both source and destination mbufs have labels. -*/ + * @brief Copy a mbuf label + * @param src Source label + * @param dest Destination label + * + * Copy the mbuf label information in src into dest. + * + * Only called when both source and destination mbufs have labels. + */ typedef void mpo_mbuf_label_copy_t( struct label *src, struct label *dest -); + ); /** - @brief Destroy mbuf label - @param label The label to be destroyed - - Destroy a mbuf label. Since the - object is going out of scope, policy modules should free any - internal storage associated with the label so that it may be - destroyed. -*/ + * @brief Destroy mbuf label + * @param label The label to be destroyed + * + * Destroy a mbuf label. Since the + * object is going out of scope, policy modules should free any + * internal storage associated with the label so that it may be + * destroyed. + */ typedef void mpo_mbuf_label_destroy_t( struct label *label -); + ); /** - @brief Initialize mbuf label - @param label New label to initialize - @param flag Malloc flags - - Initialize the label for a newly instantiated mbuf. - - @warning Since it is possible for the flags to be set to - M_NOWAIT, the malloc operation may fail. - - @return On success, 0, otherwise, an appropriate errno return value. -*/ + * @brief Initialize mbuf label + * @param label New label to initialize + * @param flag Malloc flags + * + * Initialize the label for a newly instantiated mbuf. + * + * @warning Since it is possible for the flags to be set to + * M_NOWAIT, the malloc operation may fail. + * + * @return On success, 0, otherwise, an appropriate errno return value. + */ typedef int mpo_mbuf_label_init_t( struct label *label, int flag -); + ); /** - @brief Access control check for fsctl - @param cred Subject credential - @param mp The mount point - @param label Label associated with the mount point - @param cmd Filesystem-dependent request code; see fsctl(2) - - Determine whether the subject identified by the credential can perform - the volume operation indicated by com. - - @warning The fsctl() system call is directly analogous to ioctl(); since - the associated data is opaque from the standpoint of the MAC framework - and since these operations can affect many aspects of system operation, - policies must exercise extreme care when implementing access control checks. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for fsctl + * @param cred Subject credential + * @param mp The mount point + * @param label Label associated with the mount point + * @param cmd Filesystem-dependent request code; see fsctl(2) + * + * Determine whether the subject identified by the credential can perform + * the volume operation indicated by com. + * + * @warning The fsctl() system call is directly analogous to ioctl(); since + * the associated data is opaque from the standpoint of the MAC framework + * and since these operations can affect many aspects of system operation, + * policies must exercise extreme care when implementing access control checks. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_mount_check_fsctl_t( kauth_cred_t cred, struct mount *mp, struct label *label, unsigned int cmd -); + ); /** - @brief Access control check for the retrieval of file system attributes - @param cred Subject credential - @param mp The mount structure of the file system - @param vfa The attributes requested - - This entry point determines whether given subject can get information - about the given file system. This check happens during statfs() syscalls, - but is also used by other parts within the kernel such as the audit system. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - - @note Policies may change the contents of vfa to alter the list of - file system attributes returned. -*/ + * @brief Access control check for the retrieval of file system attributes + * @param cred Subject credential + * @param mp The mount structure of the file system + * @param vfa The attributes requested + * + * This entry point determines whether given subject can get information + * about the given file system. This check happens during statfs() syscalls, + * but is also used by other parts within the kernel such as the audit system. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + * @note Policies may change the contents of vfa to alter the list of + * file system attributes returned. + */ typedef int mpo_mount_check_getattr_t( kauth_cred_t cred, struct mount *mp, struct label *mp_label, struct vfs_attr *vfa -); + ); /** - @brief Access control check for mount point relabeling - @param cred Subject credential - @param mp Object file system mount point - @param mntlabel Policy label for fle system mount point - - Determine whether the subject identified by the credential can relabel - the mount point. This call is made when a file system mount is updated. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch - or EPERM for lack of privilege. -*/ + * @brief Access control check for mount point relabeling + * @param cred Subject credential + * @param mp Object file system mount point + * @param mntlabel Policy label for fle system mount point + * + * Determine whether the subject identified by the credential can relabel + * the mount point. This call is made when a file system mount is updated. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch + * or EPERM for lack of privilege. + */ typedef int mpo_mount_check_label_update_t( kauth_cred_t cred, struct mount *mp, struct label *mntlabel -); -/** - @brief Access control check for mounting a file system - @param cred Subject credential - @param vp Vnode that is to be the mount point - @param vlabel Label associated with the vnode - @param cnp Component name for vp - @param vfc_name Filesystem type name - - Determine whether the subject identified by the credential can perform - the mount operation on the target vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for mounting a file system + * @param cred Subject credential + * @param vp Vnode that is to be the mount point + * @param vlabel Label associated with the vnode + * @param cnp Component name for vp + * @param vfc_name Filesystem type name + * + * Determine whether the subject identified by the credential can perform + * the mount operation on the target vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_mount_check_mount_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel, struct componentname *cnp, const char *vfc_name -); + ); /** - @brief Access control check for fs_snapshot_create - @param cred Subject credential - @mp Filesystem mount point to create snapshot of - @name Name of snapshot to create - - Determine whether the subject identified by the credential can - create a snapshot of the filesystem at the given mount point. - - @return Return 0 if access is granted, otherwise an appropriate value - for errno should be returned. -*/ + * @brief Access control check for fs_snapshot_create + * @param cred Subject credential + * @mp Filesystem mount point to create snapshot of + * @name Name of snapshot to create + * + * Determine whether the subject identified by the credential can + * create a snapshot of the filesystem at the given mount point. + * + * @return Return 0 if access is granted, otherwise an appropriate value + * for errno should be returned. + */ typedef int mpo_mount_check_snapshot_create_t( kauth_cred_t cred, struct mount *mp, const char *name -); + ); /** - @brief Access control check for fs_snapshot_delete - @param cred Subject credential - @mp Filesystem mount point to delete snapshot of - @name Name of snapshot to delete - - Determine whether the subject identified by the credential can - delete the named snapshot from the filesystem at the given - mount point. - - @return Return 0 if access is granted, otherwise an appropriate value - for errno should be returned. -*/ + * @brief Access control check for fs_snapshot_delete + * @param cred Subject credential + * @mp Filesystem mount point to delete snapshot of + * @name Name of snapshot to delete + * + * Determine whether the subject identified by the credential can + * delete the named snapshot from the filesystem at the given + * mount point. + * + * @return Return 0 if access is granted, otherwise an appropriate value + * for errno should be returned. + */ typedef int mpo_mount_check_snapshot_delete_t( kauth_cred_t cred, struct mount *mp, const char *name -); + ); /** - @brief Access control check for fs_snapshot_revert - @param cred Subject credential - @mp Filesystem mount point to revert to snapshot - @name Name of snapshot to revert to - - Determine whether the subject identified by the credential can - revert the filesystem at the given mount point to the named snapshot. - - @return Return 0 if access is granted, otherwise an appropriate value - for errno should be returned. -*/ + * @brief Access control check for fs_snapshot_revert + * @param cred Subject credential + * @mp Filesystem mount point to revert to snapshot + * @name Name of snapshot to revert to + * + * Determine whether the subject identified by the credential can + * revert the filesystem at the given mount point to the named snapshot. + * + * @return Return 0 if access is granted, otherwise an appropriate value + * for errno should be returned. + */ typedef int mpo_mount_check_snapshot_revert_t( kauth_cred_t cred, struct mount *mp, const char *name -); + ); /** - @brief Access control check remounting a filesystem - @param cred Subject credential - @param mp The mount point - @param mlabel Label currently associated with the mount point - - Determine whether the subject identified by the credential can perform - the remount operation on the target vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check remounting a filesystem + * @param cred Subject credential + * @param mp The mount point + * @param mlabel Label currently associated with the mount point + * + * Determine whether the subject identified by the credential can perform + * the remount operation on the target vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_mount_check_remount_t( kauth_cred_t cred, struct mount *mp, struct label *mlabel -); + ); /** - @brief Access control check for the settting of file system attributes - @param cred Subject credential - @param mp The mount structure of the file system - @param vfa The attributes requested - - This entry point determines whether given subject can set information - about the given file system, for example the volume name. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for the settting of file system attributes + * @param cred Subject credential + * @param mp The mount structure of the file system + * @param vfa The attributes requested + * + * This entry point determines whether given subject can set information + * about the given file system, for example the volume name. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_mount_check_setattr_t( kauth_cred_t cred, struct mount *mp, struct label *mp_label, struct vfs_attr *vfa -); + ); /** - @brief Access control check for file system statistics - @param cred Subject credential - @param mp Object file system mount - @param mntlabel Policy label for mp - - Determine whether the subject identified by the credential can see - the results of a statfs performed on the file system. This call may - be made in a number of situations, including during invocations of - statfs(2) and related calls, as well as to determine what file systems - to exclude from listings of file systems, such as when getfsstat(2) - is invoked. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch - or EPERM for lack of privilege. -*/ + * @brief Access control check for file system statistics + * @param cred Subject credential + * @param mp Object file system mount + * @param mntlabel Policy label for mp + * + * Determine whether the subject identified by the credential can see + * the results of a statfs performed on the file system. This call may + * be made in a number of situations, including during invocations of + * statfs(2) and related calls, as well as to determine what file systems + * to exclude from listings of file systems, such as when getfsstat(2) + * is invoked. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch + * or EPERM for lack of privilege. + */ typedef int mpo_mount_check_stat_t( kauth_cred_t cred, struct mount *mp, struct label *mntlabel -); + ); /** - @brief Access control check for unmounting a filesystem - @param cred Subject credential - @param mp The mount point - @param mlabel Label associated with the mount point - - Determine whether the subject identified by the credential can perform - the unmount operation on the target vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for unmounting a filesystem + * @param cred Subject credential + * @param mp The mount point + * @param mlabel Label associated with the mount point + * + * Determine whether the subject identified by the credential can perform + * the unmount operation on the target vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_mount_check_umount_t( kauth_cred_t cred, struct mount *mp, struct label *mlabel -); + ); /** - @brief Create mount labels - @param cred Subject credential - @param mp Mount point of file system being mounted - @param mntlabel Label to associate with the new mount point - @see mpo_mount_label_init_t - - Fill out the labels on the mount point being created by the supplied - user credential. This call is made when file systems are first mounted. -*/ + * @brief Create mount labels + * @param cred Subject credential + * @param mp Mount point of file system being mounted + * @param mntlabel Label to associate with the new mount point + * @see mpo_mount_label_init_t + * + * Fill out the labels on the mount point being created by the supplied + * user credential. This call is made when file systems are first mounted. + */ typedef void mpo_mount_label_associate_t( kauth_cred_t cred, struct mount *mp, struct label *mntlabel -); + ); /** - @brief Destroy mount label - @param label The label to be destroyed - - Destroy a file system mount label. Since the - object is going out of scope, policy modules should free any - internal storage associated with the label so that it may be - destroyed. -*/ + * @brief Destroy mount label + * @param label The label to be destroyed + * + * Destroy a file system mount label. Since the + * object is going out of scope, policy modules should free any + * internal storage associated with the label so that it may be + * destroyed. + */ typedef void mpo_mount_label_destroy_t( struct label *label -); + ); /** - @brief Externalize a mount point label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the mount point label. An - externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will display this externalized version. - - The policy's externalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize a mount point label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the mount point label. An + * externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will display this externalized version. + * + * The policy's externalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_mount_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize mount point label - @param label New label to initialize - - Initialize the label for a newly instantiated mount structure. - This label is typically used to store a default label in the case - that the file system has been mounted singlelabel. Since some - file systems do not support persistent labels (extended attributes) - or are read-only (such as CD-ROMs), it is often necessary to store - a default label separately from the label of the mount point - itself. Sleeping is permitted. -*/ + * @brief Initialize mount point label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated mount structure. + * This label is typically used to store a default label in the case + * that the file system has been mounted singlelabel. Since some + * file systems do not support persistent labels (extended attributes) + * or are read-only (such as CD-ROMs), it is often necessary to store + * a default label separately from the label of the mount point + * itself. Sleeping is permitted. + */ typedef void mpo_mount_label_init_t( struct label *label -); + ); /** - @brief Internalize a mount point label - @param label Label to be internalized - @param element_name Name of the label namespace for which the label should - be internalized - @param element_data Text data to be internalized - - Produce a mount point file system label from an external representation. - An externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will forward text version to the kernel for - processing by individual policy modules. - - The policy's internalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, Otherwise, return non-zero if an error occurs - while internalizing the label data. - -*/ + * @brief Internalize a mount point label + * @param label Label to be internalized + * @param element_name Name of the label namespace for which the label should + * be internalized + * @param element_data Text data to be internalized + * + * Produce a mount point file system label from an external representation. + * An externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will forward text version to the kernel for + * processing by individual policy modules. + * + * The policy's internalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, Otherwise, return non-zero if an error occurs + * while internalizing the label data. + * + */ typedef int mpo_mount_label_internalize_t( struct label *label, char *element_name, char *element_data -); + ); /** - @brief Set the label on an IPv4 datagram fragment - @param datagram Datagram being fragmented - @param datagramlabel Policy label for datagram - @param fragment New fragment - @param fragmentlabel Policy label for fragment - - Called when an IPv4 datagram is fragmented into several smaller datagrams. - Policies implementing mbuf labels will typically copy the label from the - source datagram to the new fragment. -*/ + * @brief Set the label on an IPv4 datagram fragment + * @param datagram Datagram being fragmented + * @param datagramlabel Policy label for datagram + * @param fragment New fragment + * @param fragmentlabel Policy label for fragment + * + * Called when an IPv4 datagram is fragmented into several smaller datagrams. + * Policies implementing mbuf labels will typically copy the label from the + * source datagram to the new fragment. + */ typedef void mpo_netinet_fragment_t( struct mbuf *datagram, struct label *datagramlabel, struct mbuf *fragment, struct label *fragmentlabel -); + ); /** - @brief Set the label on an ICMP reply - @param m mbuf containing the ICMP reply - @param mlabel Policy label for m - - A policy may wish to update the label of an mbuf that refers to - an ICMP packet being sent in response to an IP packet. This may - be called in response to a bad packet or an ICMP request. -*/ + * @brief Set the label on an ICMP reply + * @param m mbuf containing the ICMP reply + * @param mlabel Policy label for m + * + * A policy may wish to update the label of an mbuf that refers to + * an ICMP packet being sent in response to an IP packet. This may + * be called in response to a bad packet or an ICMP request. + */ typedef void mpo_netinet_icmp_reply_t( struct mbuf *m, struct label *mlabel -); + ); /** - @brief Set the label on a TCP reply - @param m mbuf containing the TCP reply - @param mlabel Policy label for m - - Called for outgoing TCP packets not associated with an actual socket. -*/ + * @brief Set the label on a TCP reply + * @param m mbuf containing the TCP reply + * @param mlabel Policy label for m + * + * Called for outgoing TCP packets not associated with an actual socket. + */ typedef void mpo_netinet_tcp_reply_t( struct mbuf *m, struct label *mlabel -); + ); /** - @brief Access control check for pipe ioctl - @param cred Subject credential - @param cpipe Object to be accessed - @param pipelabel The label on the pipe - @param cmd The ioctl command; see ioctl(2) - - Determine whether the subject identified by the credential can perform - the ioctl operation indicated by cmd. - - @warning Since ioctl data is opaque from the standpoint of the MAC - framework, policies must exercise extreme care when implementing - access control checks. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + * @brief Access control check for pipe ioctl + * @param cred Subject credential + * @param cpipe Object to be accessed + * @param pipelabel The label on the pipe + * @param cmd The ioctl command; see ioctl(2) + * + * Determine whether the subject identified by the credential can perform + * the ioctl operation indicated by cmd. + * + * @warning Since ioctl data is opaque from the standpoint of the MAC + * framework, policies must exercise extreme care when implementing + * access control checks. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_pipe_check_ioctl_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel, unsigned int cmd -); + ); /** - @brief Access control check for pipe kqfilter - @param cred Subject credential - @param kn Object knote - @param cpipe Object to be accessed - @param pipelabel Policy label for the pipe - - Determine whether the subject identified by the credential can - receive the knote on the passed pipe. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for pipe kqfilter + * @param cred Subject credential + * @param kn Object knote + * @param cpipe Object to be accessed + * @param pipelabel Policy label for the pipe + * + * Determine whether the subject identified by the credential can + * receive the knote on the passed pipe. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_pipe_check_kqfilter_t( kauth_cred_t cred, struct knote *kn, struct pipe *cpipe, struct label *pipelabel -); + ); /** - @brief Access control check for pipe relabel - @param cred Subject credential - @param cpipe Object to be accessed - @param pipelabel The current label on the pipe - @param newlabel The new label to be used - - Determine whether the subject identified by the credential can - perform a relabel operation on the passed pipe. The cred object holds - the credentials of the subject performing the operation. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ -typedef int mpo_pipe_check_label_update_t( - kauth_cred_t cred, + * @brief Access control check for pipe relabel + * @param cred Subject credential + * @param cpipe Object to be accessed + * @param pipelabel The current label on the pipe + * @param newlabel The new label to be used + * + * Determine whether the subject identified by the credential can + * perform a relabel operation on the passed pipe. The cred object holds + * the credentials of the subject performing the operation. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ +typedef int mpo_pipe_check_label_update_t( + kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel, struct label *newlabel -); + ); /** - @brief Access control check for pipe read - @param cred Subject credential - @param cpipe Object to be accessed - @param pipelabel The label on the pipe - - Determine whether the subject identified by the credential can - perform a read operation on the passed pipe. The cred object holds - the credentials of the subject performing the operation. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + * @brief Access control check for pipe read + * @param cred Subject credential + * @param cpipe Object to be accessed + * @param pipelabel The label on the pipe + * + * Determine whether the subject identified by the credential can + * perform a read operation on the passed pipe. The cred object holds + * the credentials of the subject performing the operation. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_pipe_check_read_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel -); + ); /** - @brief Access control check for pipe select - @param cred Subject credential - @param cpipe Object to be accessed - @param pipelabel The label on the pipe - @param which The operation selected on: FREAD or FWRITE - - Determine whether the subject identified by the credential can - perform a select operation on the passed pipe. The cred object holds - the credentials of the subject performing the operation. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + * @brief Access control check for pipe select + * @param cred Subject credential + * @param cpipe Object to be accessed + * @param pipelabel The label on the pipe + * @param which The operation selected on: FREAD or FWRITE + * + * Determine whether the subject identified by the credential can + * perform a select operation on the passed pipe. The cred object holds + * the credentials of the subject performing the operation. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_pipe_check_select_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel, int which -); + ); /** - @brief Access control check for pipe stat - @param cred Subject credential - @param cpipe Object to be accessed - @param pipelabel The label on the pipe - - Determine whether the subject identified by the credential can - perform a stat operation on the passed pipe. The cred object holds - the credentials of the subject performing the operation. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + * @brief Access control check for pipe stat + * @param cred Subject credential + * @param cpipe Object to be accessed + * @param pipelabel The label on the pipe + * + * Determine whether the subject identified by the credential can + * perform a stat operation on the passed pipe. The cred object holds + * the credentials of the subject performing the operation. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_pipe_check_stat_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel -); + ); /** - @brief Access control check for pipe write - @param cred Subject credential - @param cpipe Object to be accessed - @param pipelabel The label on the pipe - - Determine whether the subject identified by the credential can - perform a write operation on the passed pipe. The cred object holds - the credentials of the subject performing the operation. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - -*/ + * @brief Access control check for pipe write + * @param cred Subject credential + * @param cpipe Object to be accessed + * @param pipelabel The label on the pipe + * + * Determine whether the subject identified by the credential can + * perform a write operation on the passed pipe. The cred object holds + * the credentials of the subject performing the operation. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + */ typedef int mpo_pipe_check_write_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel -); + ); /** - @brief Create a pipe label - @param cred Subject credential - @param cpipe object to be labeled - @param pipelabel Label for the pipe object - - Create a label for the pipe object being created by the supplied - user credential. This call is made when the pipe is being created - XXXPIPE(for one or both sides of the pipe?). - -*/ + * @brief Create a pipe label + * @param cred Subject credential + * @param cpipe object to be labeled + * @param pipelabel Label for the pipe object + * + * Create a label for the pipe object being created by the supplied + * user credential. This call is made when the pipe is being created + * XXXPIPE(for one or both sides of the pipe?). + * + */ typedef void mpo_pipe_label_associate_t( kauth_cred_t cred, struct pipe *cpipe, struct label *pipelabel -); + ); /** - @brief Copy a pipe label - @param src Source pipe label - @param dest Destination pipe label - - Copy the pipe label associated with src to dest. - XXXPIPE Describe when this is used: most likely during pipe creation to - copy from rpipe to wpipe. -*/ + * @brief Copy a pipe label + * @param src Source pipe label + * @param dest Destination pipe label + * + * Copy the pipe label associated with src to dest. + * XXXPIPE Describe when this is used: most likely during pipe creation to + * copy from rpipe to wpipe. + */ typedef void mpo_pipe_label_copy_t( struct label *src, struct label *dest -); + ); /** - @brief Destroy pipe label - @param label The label to be destroyed - - Destroy a pipe label. Since the object is going out of scope, - policy modules should free any internal storage associated with the - label so that it may be destroyed. -*/ + * @brief Destroy pipe label + * @param label The label to be destroyed + * + * Destroy a pipe label. Since the object is going out of scope, + * policy modules should free any internal storage associated with the + * label so that it may be destroyed. + */ typedef void mpo_pipe_label_destroy_t( struct label *label -); + ); /** - @brief Externalize a pipe label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the label on a pipe. - An externalized label consists of a text representation - of the label contents that can be used with user applications. - Policy-agnostic user space tools will display this externalized - version. - - The policy's externalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize a pipe label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the label on a pipe. + * An externalized label consists of a text representation + * of the label contents that can be used with user applications. + * Policy-agnostic user space tools will display this externalized + * version. + * + * The policy's externalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_pipe_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize pipe label - @param label New label to initialize - - Initialize label storage for use with a newly instantiated pipe object. - Sleeping is permitted. -*/ + * @brief Initialize pipe label + * @param label New label to initialize + * + * Initialize label storage for use with a newly instantiated pipe object. + * Sleeping is permitted. + */ typedef void mpo_pipe_label_init_t( struct label *label -); + ); /** - @brief Internalize a pipe label - @param label Label to be internalized - @param element_name Name of the label namespace for which the label should - be internalized - @param element_data Text data to be internalized - - Produce a pipe label from an external representation. An - externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will forward text version to the kernel for - processing by individual policy modules. - - The policy's internalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, Otherwise, return non-zero if an error occurs - while internalizing the label data. - -*/ + * @brief Internalize a pipe label + * @param label Label to be internalized + * @param element_name Name of the label namespace for which the label should + * be internalized + * @param element_data Text data to be internalized + * + * Produce a pipe label from an external representation. An + * externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will forward text version to the kernel for + * processing by individual policy modules. + * + * The policy's internalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, Otherwise, return non-zero if an error occurs + * while internalizing the label data. + * + */ typedef int mpo_pipe_label_internalize_t( struct label *label, char *element_name, char *element_data -); -/** - @brief Update a pipe label - @param cred Subject credential - @param cpipe Object to be labeled - @param oldlabel Existing pipe label - @param newlabel New label to replace existing label - @see mpo_pipe_check_label_update_t - - The subject identified by the credential has previously requested - and was authorized to relabel the pipe; this entry point allows - policies to perform the actual relabel operation. Policies should - update oldlabel using the label stored in the newlabel parameter. - -*/ + ); +/** + * @brief Update a pipe label + * @param cred Subject credential + * @param cpipe Object to be labeled + * @param oldlabel Existing pipe label + * @param newlabel New label to replace existing label + * @see mpo_pipe_check_label_update_t + * + * The subject identified by the credential has previously requested + * and was authorized to relabel the pipe; this entry point allows + * policies to perform the actual relabel operation. Policies should + * update oldlabel using the label stored in the newlabel parameter. + * + */ typedef void mpo_pipe_label_update_t( kauth_cred_t cred, struct pipe *cpipe, struct label *oldlabel, struct label *newlabel -); + ); /** - @brief Policy unload event - @param mpc MAC policy configuration - - This is the MAC Framework policy unload event. This entry point will - only be called if the module's policy configuration allows unload (if - the MPC_LOADTIME_FLAG_UNLOADOK is set). Most security policies won't - want to be unloaded; they should set their flags to prevent this - entry point from being called. - - @warning During this call, the mac policy list mutex is held, so - sleep operations cannot be performed, and calls out to other kernel - subsystems must be made with caution. - - @see MPC_LOADTIME_FLAG_UNLOADOK -*/ + * @brief Policy unload event + * @param mpc MAC policy configuration + * + * This is the MAC Framework policy unload event. This entry point will + * only be called if the module's policy configuration allows unload (if + * the MPC_LOADTIME_FLAG_UNLOADOK is set). Most security policies won't + * want to be unloaded; they should set their flags to prevent this + * entry point from being called. + * + * @warning During this call, the mac policy list mutex is held, so + * sleep operations cannot be performed, and calls out to other kernel + * subsystems must be made with caution. + * + * @see MPC_LOADTIME_FLAG_UNLOADOK + */ typedef void mpo_policy_destroy_t( struct mac_policy_conf *mpc -); + ); /** - @brief Policy initialization event - @param mpc MAC policy configuration - @see mac_policy_register - @see mpo_policy_initbsd_t - - This is the MAC Framework policy initialization event. This entry - point is called during mac_policy_register, when the policy module - is first registered with the MAC Framework. This is often done very - early in the boot process, after the kernel Mach subsystem has been - initialized, but prior to the BSD subsystem being initialized. - Since the kernel BSD services are not yet available, it is possible - that some initialization must occur later, possibly in the - mpo_policy_initbsd_t policy entry point, such as registering BSD system - controls (sysctls). Policy modules loaded at boot time will be - registered and initialized before labeled Mach objects are created. - - @warning During this call, the mac policy list mutex is held, so - sleep operations cannot be performed, and calls out to other kernel - subsystems must be made with caution. -*/ + * @brief Policy initialization event + * @param mpc MAC policy configuration + * @see mac_policy_register + * @see mpo_policy_initbsd_t + * + * This is the MAC Framework policy initialization event. This entry + * point is called during mac_policy_register, when the policy module + * is first registered with the MAC Framework. This is often done very + * early in the boot process, after the kernel Mach subsystem has been + * initialized, but prior to the BSD subsystem being initialized. + * Since the kernel BSD services are not yet available, it is possible + * that some initialization must occur later, possibly in the + * mpo_policy_initbsd_t policy entry point, such as registering BSD system + * controls (sysctls). Policy modules loaded at boot time will be + * registered and initialized before labeled Mach objects are created. + * + * @warning During this call, the mac policy list mutex is held, so + * sleep operations cannot be performed, and calls out to other kernel + * subsystems must be made with caution. + */ typedef void mpo_policy_init_t( struct mac_policy_conf *mpc -); + ); /** - @brief Policy BSD initialization event - @param mpc MAC policy configuration - @see mpo_policy_init_t - - This entry point is called after the kernel BSD subsystem has been - initialized. By this point, the module should already be loaded, - registered, and initialized. Since policy modules are initialized - before kernel BSD services are available, this second initialization - phase is necessary. At this point, BSD services (memory management, - synchronization primitives, vfs, etc.) are available, but the first - process has not yet been created. Mach-related objects and tasks - will already be fully initialized and may be in use--policies requiring - ubiquitous labeling may also want to implement mpo_policy_init_t. - - @warning During this call, the mac policy list mutex is held, so - sleep operations cannot be performed, and calls out to other kernel - subsystems must be made with caution. -*/ + * @brief Policy BSD initialization event + * @param mpc MAC policy configuration + * @see mpo_policy_init_t + * + * This entry point is called after the kernel BSD subsystem has been + * initialized. By this point, the module should already be loaded, + * registered, and initialized. Since policy modules are initialized + * before kernel BSD services are available, this second initialization + * phase is necessary. At this point, BSD services (memory management, + * synchronization primitives, vfs, etc.) are available, but the first + * process has not yet been created. Mach-related objects and tasks + * will already be fully initialized and may be in use--policies requiring + * ubiquitous labeling may also want to implement mpo_policy_init_t. + * + * @warning During this call, the mac policy list mutex is held, so + * sleep operations cannot be performed, and calls out to other kernel + * subsystems must be made with caution. + */ typedef void mpo_policy_initbsd_t( struct mac_policy_conf *mpc -); + ); /** - @brief Policy extension service - @param p Calling process - @param call Policy-specific syscall number - @param arg Pointer to syscall arguments - - This entry point provides a policy-multiplexed system call so that - policies may provide additional services to user processes without - registering specific system calls. The policy name provided during - registration is used to demux calls from userland, and the arguments - will be forwarded to this entry point. When implementing new - services, security modules should be sure to invoke appropriate - access control checks from the MAC framework as needed. For - example, if a policy implements an augmented signal functionality, - it should call the necessary signal access control checks to invoke - the MAC framework and other registered policies. - - @warning Since the format and contents of the policy-specific - arguments are unknown to the MAC Framework, modules must perform the - required copyin() of the syscall data on their own. No policy - mediation is performed, so policies must perform any necessary - access control checks themselves. If multiple policies are loaded, - they will currently be unable to mediate calls to other policies. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Policy extension service + * @param p Calling process + * @param call Policy-specific syscall number + * @param arg Pointer to syscall arguments + * + * This entry point provides a policy-multiplexed system call so that + * policies may provide additional services to user processes without + * registering specific system calls. The policy name provided during + * registration is used to demux calls from userland, and the arguments + * will be forwarded to this entry point. When implementing new + * services, security modules should be sure to invoke appropriate + * access control checks from the MAC framework as needed. For + * example, if a policy implements an augmented signal functionality, + * it should call the necessary signal access control checks to invoke + * the MAC framework and other registered policies. + * + * @warning Since the format and contents of the policy-specific + * arguments are unknown to the MAC Framework, modules must perform the + * required copyin() of the syscall data on their own. No policy + * mediation is performed, so policies must perform any necessary + * access control checks themselves. If multiple policies are loaded, + * they will currently be unable to mediate calls to other policies. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_policy_syscall_t( struct proc *p, int call, user_addr_t arg -); + ); /** - @brief Access control check for POSIX semaphore create - @param cred Subject credential - @param name String name of the semaphore - - Determine whether the subject identified by the credential can create - a POSIX semaphore specified by name. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX semaphore create + * @param cred Subject credential + * @param name String name of the semaphore + * + * Determine whether the subject identified by the credential can create + * a POSIX semaphore specified by name. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixsem_check_create_t( kauth_cred_t cred, const char *name -); + ); /** - @brief Access control check for POSIX semaphore open - @param cred Subject credential - @param ps Pointer to semaphore information structure - @param semlabel Label associated with the semaphore - - Determine whether the subject identified by the credential can open - the named POSIX semaphore with label semlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX semaphore open + * @param cred Subject credential + * @param ps Pointer to semaphore information structure + * @param semlabel Label associated with the semaphore + * + * Determine whether the subject identified by the credential can open + * the named POSIX semaphore with label semlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixsem_check_open_t( kauth_cred_t cred, struct pseminfo *ps, struct label *semlabel -); + ); /** - @brief Access control check for POSIX semaphore post - @param cred Subject credential - @param ps Pointer to semaphore information structure - @param semlabel Label associated with the semaphore - - Determine whether the subject identified by the credential can unlock - the named POSIX semaphore with label semlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX semaphore post + * @param cred Subject credential + * @param ps Pointer to semaphore information structure + * @param semlabel Label associated with the semaphore + * + * Determine whether the subject identified by the credential can unlock + * the named POSIX semaphore with label semlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixsem_check_post_t( kauth_cred_t cred, struct pseminfo *ps, struct label *semlabel -); + ); /** - @brief Access control check for POSIX semaphore unlink - @param cred Subject credential - @param ps Pointer to semaphore information structure - @param semlabel Label associated with the semaphore - @param name String name of the semaphore - - Determine whether the subject identified by the credential can remove - the named POSIX semaphore with label semlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX semaphore unlink + * @param cred Subject credential + * @param ps Pointer to semaphore information structure + * @param semlabel Label associated with the semaphore + * @param name String name of the semaphore + * + * Determine whether the subject identified by the credential can remove + * the named POSIX semaphore with label semlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixsem_check_unlink_t( kauth_cred_t cred, struct pseminfo *ps, struct label *semlabel, const char *name -); + ); /** - @brief Access control check for POSIX semaphore wait - @param cred Subject credential - @param ps Pointer to semaphore information structure - @param semlabel Label associated with the semaphore - - Determine whether the subject identified by the credential can lock - the named POSIX semaphore with label semlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX semaphore wait + * @param cred Subject credential + * @param ps Pointer to semaphore information structure + * @param semlabel Label associated with the semaphore + * + * Determine whether the subject identified by the credential can lock + * the named POSIX semaphore with label semlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixsem_check_wait_t( kauth_cred_t cred, struct pseminfo *ps, struct label *semlabel -); + ); /** - @brief Create a POSIX semaphore label - @param cred Subject credential - @param ps Pointer to semaphore information structure - @param semlabel Label to associate with the new semaphore - @param name String name of the semaphore - - Label a new POSIX semaphore. The label was previously - initialized and associated with the semaphore. At this time, an - appropriate initial label value should be assigned to the object and - stored in semalabel. -*/ + * @brief Create a POSIX semaphore label + * @param cred Subject credential + * @param ps Pointer to semaphore information structure + * @param semlabel Label to associate with the new semaphore + * @param name String name of the semaphore + * + * Label a new POSIX semaphore. The label was previously + * initialized and associated with the semaphore. At this time, an + * appropriate initial label value should be assigned to the object and + * stored in semalabel. + */ typedef void mpo_posixsem_label_associate_t( kauth_cred_t cred, struct pseminfo *ps, struct label *semlabel, const char *name -); + ); /** - @brief Destroy POSIX semaphore label - @param label The label to be destroyed - - Destroy a POSIX semaphore label. Since the object is - going out of scope, policy modules should free any internal storage - associated with the label so that it may be destroyed. -*/ + * @brief Destroy POSIX semaphore label + * @param label The label to be destroyed + * + * Destroy a POSIX semaphore label. Since the object is + * going out of scope, policy modules should free any internal storage + * associated with the label so that it may be destroyed. + */ typedef void mpo_posixsem_label_destroy_t( struct label *label -); + ); /** - @brief Initialize POSIX semaphore label - @param label New label to initialize - - Initialize the label for a newly instantiated POSIX semaphore. Sleeping - is permitted. -*/ + * @brief Initialize POSIX semaphore label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated POSIX semaphore. Sleeping + * is permitted. + */ typedef void mpo_posixsem_label_init_t( struct label *label -); + ); /** - @brief Access control check for POSIX shared memory region create - @param cred Subject credential - @param name String name of the shared memory region - - Determine whether the subject identified by the credential can create - the POSIX shared memory region referenced by name. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX shared memory region create + * @param cred Subject credential + * @param name String name of the shared memory region + * + * Determine whether the subject identified by the credential can create + * the POSIX shared memory region referenced by name. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixshm_check_create_t( kauth_cred_t cred, const char *name -); -/** - @brief Access control check for mapping POSIX shared memory - @param cred Subject credential - @param ps Pointer to shared memory information structure - @param shmlabel Label associated with the shared memory region - @param prot mmap protections; see mmap(2) - @param flags shmat flags; see shmat(2) - - Determine whether the subject identified by the credential can map - the POSIX shared memory segment associated with shmlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for mapping POSIX shared memory + * @param cred Subject credential + * @param ps Pointer to shared memory information structure + * @param shmlabel Label associated with the shared memory region + * @param prot mmap protections; see mmap(2) + * @param flags shmat flags; see shmat(2) + * + * Determine whether the subject identified by the credential can map + * the POSIX shared memory segment associated with shmlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixshm_check_mmap_t( kauth_cred_t cred, struct pshminfo *ps, struct label *shmlabel, int prot, int flags -); + ); /** - @brief Access control check for POSIX shared memory region open - @param cred Subject credential - @param ps Pointer to shared memory information structure - @param shmlabel Label associated with the shared memory region - @param fflags shm_open(2) open flags ('fflags' encoded) - - Determine whether the subject identified by the credential can open - the POSIX shared memory region. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX shared memory region open + * @param cred Subject credential + * @param ps Pointer to shared memory information structure + * @param shmlabel Label associated with the shared memory region + * @param fflags shm_open(2) open flags ('fflags' encoded) + * + * Determine whether the subject identified by the credential can open + * the POSIX shared memory region. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixshm_check_open_t( kauth_cred_t cred, struct pshminfo *ps, struct label *shmlabel, int fflags -); + ); /** - @brief Access control check for POSIX shared memory stat - @param cred Subject credential - @param ps Pointer to shared memory information structure - @param shmlabel Label associated with the shared memory region - - Determine whether the subject identified by the credential can obtain - status for the POSIX shared memory segment associated with shmlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX shared memory stat + * @param cred Subject credential + * @param ps Pointer to shared memory information structure + * @param shmlabel Label associated with the shared memory region + * + * Determine whether the subject identified by the credential can obtain + * status for the POSIX shared memory segment associated with shmlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixshm_check_stat_t( kauth_cred_t cred, struct pshminfo *ps, struct label *shmlabel -); + ); /** - @brief Access control check for POSIX shared memory truncate - @param cred Subject credential - @param ps Pointer to shared memory information structure - @param shmlabel Label associated with the shared memory region - @param len Length to truncate or extend shared memory segment - - Determine whether the subject identified by the credential can truncate - or extend (to len) the POSIX shared memory segment associated with shmlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX shared memory truncate + * @param cred Subject credential + * @param ps Pointer to shared memory information structure + * @param shmlabel Label associated with the shared memory region + * @param len Length to truncate or extend shared memory segment + * + * Determine whether the subject identified by the credential can truncate + * or extend (to len) the POSIX shared memory segment associated with shmlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixshm_check_truncate_t( kauth_cred_t cred, struct pshminfo *ps, struct label *shmlabel, off_t len -); + ); /** - @brief Access control check for POSIX shared memory unlink - @param cred Subject credential - @param ps Pointer to shared memory information structure - @param shmlabel Label associated with the shared memory region - @param name String name of the shared memory region - - Determine whether the subject identified by the credential can delete - the POSIX shared memory segment associated with shmlabel. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for POSIX shared memory unlink + * @param cred Subject credential + * @param ps Pointer to shared memory information structure + * @param shmlabel Label associated with the shared memory region + * @param name String name of the shared memory region + * + * Determine whether the subject identified by the credential can delete + * the POSIX shared memory segment associated with shmlabel. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_posixshm_check_unlink_t( kauth_cred_t cred, struct pshminfo *ps, struct label *shmlabel, const char *name -); + ); /** - @brief Create a POSIX shared memory region label - @param cred Subject credential - @param ps Pointer to shared memory information structure - @param shmlabel Label to associate with the new shared memory region - @param name String name of the shared memory region - - Label a new POSIX shared memory region. The label was previously - initialized and associated with the shared memory region. At this - time, an appropriate initial label value should be assigned to the - object and stored in shmlabel. -*/ + * @brief Create a POSIX shared memory region label + * @param cred Subject credential + * @param ps Pointer to shared memory information structure + * @param shmlabel Label to associate with the new shared memory region + * @param name String name of the shared memory region + * + * Label a new POSIX shared memory region. The label was previously + * initialized and associated with the shared memory region. At this + * time, an appropriate initial label value should be assigned to the + * object and stored in shmlabel. + */ typedef void mpo_posixshm_label_associate_t( kauth_cred_t cred, struct pshminfo *ps, struct label *shmlabel, const char *name -); + ); /** - @brief Destroy POSIX shared memory label - @param label The label to be destroyed - - Destroy a POSIX shared memory region label. Since the - object is going out of scope, policy modules should free any - internal storage associated with the label so that it may be - destroyed. -*/ + * @brief Destroy POSIX shared memory label + * @param label The label to be destroyed + * + * Destroy a POSIX shared memory region label. Since the + * object is going out of scope, policy modules should free any + * internal storage associated with the label so that it may be + * destroyed. + */ typedef void mpo_posixshm_label_destroy_t( struct label *label -); + ); /** - @brief Initialize POSIX Shared Memory region label - @param label New label to initialize - - Initialize the label for newly a instantiated POSIX Shared Memory - region. Sleeping is permitted. -*/ + * @brief Initialize POSIX Shared Memory region label + * @param label New label to initialize + * + * Initialize the label for newly a instantiated POSIX Shared Memory + * region. Sleeping is permitted. + */ typedef void mpo_posixshm_label_init_t( struct label *label -); + ); /** - @brief Access control check for privileged operations - @param cred Subject credential - @param priv Requested privilege (see sys/priv.h) - - Determine whether the subject identified by the credential can perform - a privileged operation. Privileged operations are allowed if the cred - is the superuser or any policy returns zero for mpo_priv_grant, unless - any policy returns nonzero for mpo_priv_check. - - @return Return 0 if access is granted, otherwise EPERM should be returned. -*/ + * @brief Access control check for privileged operations + * @param cred Subject credential + * @param priv Requested privilege (see sys/priv.h) + * + * Determine whether the subject identified by the credential can perform + * a privileged operation. Privileged operations are allowed if the cred + * is the superuser or any policy returns zero for mpo_priv_grant, unless + * any policy returns nonzero for mpo_priv_check. + * + * @return Return 0 if access is granted, otherwise EPERM should be returned. + */ typedef int mpo_priv_check_t( kauth_cred_t cred, int priv -); + ); /** - @brief Grant regular users the ability to perform privileged operations - @param cred Subject credential - @param priv Requested privilege (see sys/priv.h) - - Determine whether the subject identified by the credential should be - allowed to perform a privileged operation that in the absense of any - MAC policy it would not be able to perform. Privileged operations are - allowed if the cred is the superuser or any policy returns zero for - mpo_priv_grant, unless any policy returns nonzero for mpo_priv_check. - - Unlike other MAC hooks which can only reduce the privilege of a - credential, this hook raises the privilege of a credential when it - returns 0. Extreme care must be taken when implementing this hook to - avoid undermining the security of the system. - - @return Return 0 if additional privilege is granted, otherwise EPERM - should be returned. -*/ + * @brief Grant regular users the ability to perform privileged operations + * @param cred Subject credential + * @param priv Requested privilege (see sys/priv.h) + * + * Determine whether the subject identified by the credential should be + * allowed to perform a privileged operation that in the absense of any + * MAC policy it would not be able to perform. Privileged operations are + * allowed if the cred is the superuser or any policy returns zero for + * mpo_priv_grant, unless any policy returns nonzero for mpo_priv_check. + * + * Unlike other MAC hooks which can only reduce the privilege of a + * credential, this hook raises the privilege of a credential when it + * returns 0. Extreme care must be taken when implementing this hook to + * avoid undermining the security of the system. + * + * @return Return 0 if additional privilege is granted, otherwise EPERM + * should be returned. + */ typedef int mpo_priv_grant_t( kauth_cred_t cred, int priv -); + ); /** - @brief Access control check for debugging process - @param cred Subject credential - @param proc Object process - - Determine whether the subject identified by the credential can debug - the passed process. This call may be made in a number of situations, - including use of the ptrace(2) and ktrace(2) APIs, as well as for some - types of procfs operations. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to hide visibility of the target. -*/ + * @brief Access control check for debugging process + * @param cred Subject credential + * @param proc Object process + * + * Determine whether the subject identified by the credential can debug + * the passed process. This call may be made in a number of situations, + * including use of the ptrace(2) and ktrace(2) APIs, as well as for some + * types of procfs operations. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to hide visibility of the target. + */ typedef int mpo_proc_check_debug_t( kauth_cred_t cred, struct proc *proc -); + ); /** - @brief Access control over fork - @param cred Subject credential - @param proc Subject process trying to fork - - Determine whether the subject identified is allowed to fork. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control over fork + * @param cred Subject credential + * @param proc Subject process trying to fork + * + * Determine whether the subject identified is allowed to fork. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_fork_t( kauth_cred_t cred, struct proc *proc -); + ); /** - @brief Access control check for setting host special ports. - @param cred Subject credential - @param id The host special port to set - @param port The new value to set for the special port - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting host special ports. + * @param cred Subject credential + * @param id The host special port to set + * @param port The new value to set for the special port + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_set_host_special_port_t( kauth_cred_t cred, int id, - struct ipc_port *port -); + struct ipc_port *port + ); /** - @brief Access control check for setting host exception ports. - @param cred Subject credential - @param exception Exception port to set - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting host exception ports. + * @param cred Subject credential + * @param exception Exception port to set + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_set_host_exception_port_t( kauth_cred_t cred, unsigned int exception -); + ); /** - @brief Access control over pid_suspend and pid_resume - @param cred Subject credential - @param proc Subject process trying to run pid_suspend or pid_resume - @param sr Call is suspend (0) or resume (1) - - Determine whether the subject identified is allowed to suspend or resume - other processes. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control over pid_suspend and pid_resume + * @param cred Subject credential + * @param proc Subject process trying to run pid_suspend or pid_resume + * @param sr Call is suspend (0) or resume (1) + * + * Determine whether the subject identified is allowed to suspend or resume + * other processes. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_suspend_resume_t( kauth_cred_t cred, struct proc *proc, int sr -); + ); /** - @brief Access control check for retrieving audit information - @param cred Subject credential - - Determine whether the subject identified by the credential can get - audit information such as the audit user ID, the preselection mask, - the terminal ID and the audit session ID, using the getaudit() system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for retrieving audit information + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can get + * audit information such as the audit user ID, the preselection mask, + * the terminal ID and the audit session ID, using the getaudit() system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_getaudit_t( kauth_cred_t cred -); + ); /** - @brief Access control check for retrieving audit user ID - @param cred Subject credential - - Determine whether the subject identified by the credential can get - the user identity being used by the auditing system, using the getauid() - system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for retrieving audit user ID + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can get + * the user identity being used by the auditing system, using the getauid() + * system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_getauid_t( kauth_cred_t cred -); + ); /** - @brief Access control check for retrieving Login Context ID - @param p0 Calling process - @param p Effected process - @param pid syscall PID argument - - Determine if getlcid(2) system call is permitted. - - Information returned by this system call is similar to that returned via - process listings etc. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for retrieving Login Context ID + * @param p0 Calling process + * @param p Effected process + * @param pid syscall PID argument + * + * Determine if getlcid(2) system call is permitted. + * + * Information returned by this system call is similar to that returned via + * process listings etc. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_getlcid_t( struct proc *p0, struct proc *p, pid_t pid -); + ); /** - @brief Access control check for retrieving ledger information - @param cred Subject credential - @param target Object process - @param op ledger operation - - Determine if ledger(2) system call is permitted. - - Information returned by this system call is similar to that returned via - process listings etc. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for retrieving ledger information + * @param cred Subject credential + * @param target Object process + * @param op ledger operation + * + * Determine if ledger(2) system call is permitted. + * + * Information returned by this system call is similar to that returned via + * process listings etc. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_ledger_t( kauth_cred_t cred, struct proc *target, int op -); + ); /** - @brief Access control check for retrieving process information. - @param cred Subject credential - @param target Target process (may be null, may be zombie) - - Determine if a credential has permission to access process information as defined - by call number and flavor on target process - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for retrieving process information. + * @param cred Subject credential + * @param target Target process (may be null, may be zombie) + * + * Determine if a credential has permission to access process information as defined + * by call number and flavor on target process + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_proc_info_t( kauth_cred_t cred, struct proc *target, int callnum, int flavor -); + ); /** - @brief Access control check for retrieving code signing information. - @param cred Subject credential - @param target Target process - @param op Code signing operation being performed - - Determine whether the subject identified by the credential should be - allowed to get code signing information about the target process. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for retrieving code signing information. + * @param cred Subject credential + * @param target Target process + * @param op Code signing operation being performed + * + * Determine whether the subject identified by the credential should be + * allowed to get code signing information about the target process. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_get_cs_info_t( kauth_cred_t cred, struct proc *target, unsigned int op -); + ); /** - @brief Access control check for setting code signing information. - @param cred Subject credential - @param target Target process - @param op Code signing operation being performed. - - Determine whether the subject identified by the credential should be - allowed to set code signing information about the target process. - - @return Return 0 if permission is granted, otherwise an appropriate - value of errno should be returned. -*/ + * @brief Access control check for setting code signing information. + * @param cred Subject credential + * @param target Target process + * @param op Code signing operation being performed. + * + * Determine whether the subject identified by the credential should be + * allowed to set code signing information about the target process. + * + * @return Return 0 if permission is granted, otherwise an appropriate + * value of errno should be returned. + */ typedef int mpo_proc_check_set_cs_info_t( kauth_cred_t cred, struct proc *target, unsigned int op -); -/** - @brief Access control check for mmap MAP_ANON - @param proc User process requesting the memory - @param cred Subject credential - @param u_addr Start address of the memory range - @param u_size Length address of the memory range - @param prot mmap protections; see mmap(2) - @param flags Type of mapped object; see mmap(2) - @param maxprot Maximum rights - - Determine whether the subject identified by the credential should be - allowed to obtain anonymous memory using the specified flags and - protections on the new mapping. MAP_ANON will always be present in the - flags. Certain combinations of flags with a non-NULL addr may - cause a mapping to be rejected before this hook is called. The maxprot field - holds the maximum permissions on the new mapping, a combination of - VM_PROT_READ, VM_PROT_WRITE and VM_PROT_EXECUTE. To avoid overriding prior - access control checks, a policy should only remove flags from maxprot. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for mmap MAP_ANON + * @param proc User process requesting the memory + * @param cred Subject credential + * @param u_addr Start address of the memory range + * @param u_size Length address of the memory range + * @param prot mmap protections; see mmap(2) + * @param flags Type of mapped object; see mmap(2) + * @param maxprot Maximum rights + * + * Determine whether the subject identified by the credential should be + * allowed to obtain anonymous memory using the specified flags and + * protections on the new mapping. MAP_ANON will always be present in the + * flags. Certain combinations of flags with a non-NULL addr may + * cause a mapping to be rejected before this hook is called. The maxprot field + * holds the maximum permissions on the new mapping, a combination of + * VM_PROT_READ, VM_PROT_WRITE and VM_PROT_EXECUTE. To avoid overriding prior + * access control checks, a policy should only remove flags from maxprot. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EPERM for lack of privilege. + */ typedef int mpo_proc_check_map_anon_t( struct proc *proc, kauth_cred_t cred, @@ -2957,1575 +2957,1575 @@ typedef int mpo_proc_check_map_anon_t( int prot, int flags, int *maxprot -); -/** - @brief Access control check for setting memory protections - @param cred Subject credential - @param proc User process requesting the change - @param addr Start address of the memory range - @param size Length address of the memory range - @param prot Memory protections, see mmap(2) - - Determine whether the subject identified by the credential should - be allowed to set the specified memory protections on memory mapped - in the process proc. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for setting memory protections + * @param cred Subject credential + * @param proc User process requesting the change + * @param addr Start address of the memory range + * @param size Length address of the memory range + * @param prot Memory protections, see mmap(2) + * + * Determine whether the subject identified by the credential should + * be allowed to set the specified memory protections on memory mapped + * in the process proc. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_mprotect_t( kauth_cred_t cred, struct proc *proc, user_addr_t addr, user_size_t size, int prot -); + ); /** - @brief Access control check for changing scheduling parameters - @param cred Subject credential - @param proc Object process - - Determine whether the subject identified by the credential can change - the scheduling parameters of the passed process. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to limit visibility. -*/ + * @brief Access control check for changing scheduling parameters + * @param cred Subject credential + * @param proc Object process + * + * Determine whether the subject identified by the credential can change + * the scheduling parameters of the passed process. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to limit visibility. + */ typedef int mpo_proc_check_sched_t( kauth_cred_t cred, struct proc *proc -); + ); /** - @brief Access control check for setting audit information - @param cred Subject credential - @param ai Audit information - - Determine whether the subject identified by the credential can set - audit information such as the the preselection mask, the terminal ID - and the audit session ID, using the setaudit() system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting audit information + * @param cred Subject credential + * @param ai Audit information + * + * Determine whether the subject identified by the credential can set + * audit information such as the the preselection mask, the terminal ID + * and the audit session ID, using the setaudit() system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_setaudit_t( kauth_cred_t cred, struct auditinfo_addr *ai -); + ); /** - @brief Access control check for setting audit user ID - @param cred Subject credential - @param auid Audit user ID - - Determine whether the subject identified by the credential can set - the user identity used by the auditing system, using the setauid() - system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting audit user ID + * @param cred Subject credential + * @param auid Audit user ID + * + * Determine whether the subject identified by the credential can set + * the user identity used by the auditing system, using the setauid() + * system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_setauid_t( kauth_cred_t cred, uid_t auid -); + ); /** - @brief Access control check for setting the Login Context - @param p0 Calling process - @param p Effected process - @param pid syscall PID argument - @param lcid syscall LCID argument - - Determine if setlcid(2) system call is permitted. - - See xnu/bsd/kern/kern_prot.c:setlcid() implementation for example of - decoding syscall arguments to determine action desired by caller. - - Five distinct actions are possible: CREATE JOIN LEAVE ADOPT ORPHAN - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting the Login Context + * @param p0 Calling process + * @param p Effected process + * @param pid syscall PID argument + * @param lcid syscall LCID argument + * + * Determine if setlcid(2) system call is permitted. + * + * See xnu/bsd/kern/kern_prot.c:setlcid() implementation for example of + * decoding syscall arguments to determine action desired by caller. + * + * Five distinct actions are possible: CREATE JOIN LEAVE ADOPT ORPHAN + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_setlcid_t( struct proc *p0, struct proc *p, pid_t pid, pid_t lcid -); + ); /** - @brief Access control check for delivering signal - @param cred Subject credential - @param proc Object process - @param signum Signal number; see kill(2) - - Determine whether the subject identified by the credential can deliver - the passed signal to the passed process. - - @warning Programs typically expect to be able to send and receive - signals as part or their normal process lifecycle; caution should be - exercised when implementing access controls over signal events. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to limit visibility. -*/ + * @brief Access control check for delivering signal + * @param cred Subject credential + * @param proc Object process + * @param signum Signal number; see kill(2) + * + * Determine whether the subject identified by the credential can deliver + * the passed signal to the passed process. + * + * @warning Programs typically expect to be able to send and receive + * signals as part or their normal process lifecycle; caution should be + * exercised when implementing access controls over signal events. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to limit visibility. + */ typedef int mpo_proc_check_signal_t( kauth_cred_t cred, struct proc *proc, int signum -); + ); /** - @brief Access control check for wait - @param cred Subject credential - @param proc Object process - - Determine whether the subject identified by the credential can wait - for process termination. - - @warning Caution should be exercised when implementing access - controls for wait, since programs often wait for child processes to - exit. Failure to be notified of a child process terminating may - cause the parent process to hang, or may produce zombie processes. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for wait + * @param cred Subject credential + * @param proc Object process + * + * Determine whether the subject identified by the credential can wait + * for process termination. + * + * @warning Caution should be exercised when implementing access + * controls for wait, since programs often wait for child processes to + * exit. Failure to be notified of a child process terminating may + * cause the parent process to hang, or may produce zombie processes. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_proc_check_wait_t( kauth_cred_t cred, struct proc *proc -); + ); /** - @brief Inform MAC policies that a process has exited. - @param proc Object process - - Called after all of the process's threads have terminated and - it has been removed from the process list. KPI that identifies - the process by pid will fail to find the process; KPI that - identifies the process by the object process pointer functions - normally. proc_exiting() returns true for the object process. -*/ + * @brief Inform MAC policies that a process has exited. + * @param proc Object process + * + * Called after all of the process's threads have terminated and + * it has been removed from the process list. KPI that identifies + * the process by pid will fail to find the process; KPI that + * identifies the process by the object process pointer functions + * normally. proc_exiting() returns true for the object process. + */ typedef void mpo_proc_notify_exit_t( struct proc *proc -); + ); /** - @brief Destroy process label - @param label The label to be destroyed - - Destroy a process label. Since the object is going - out of scope, policy modules should free any internal storage - associated with the label so that it may be destroyed. -*/ + * @brief Destroy process label + * @param label The label to be destroyed + * + * Destroy a process label. Since the object is going + * out of scope, policy modules should free any internal storage + * associated with the label so that it may be destroyed. + */ typedef void mpo_proc_label_destroy_t( struct label *label -); + ); /** - @brief Initialize process label - @param label New label to initialize - @see mpo_cred_label_init_t - - Initialize the label for a newly instantiated BSD process structure. - Normally, security policies will store the process label in the user - credential rather than here in the process structure. However, - there are some floating label policies that may need to temporarily - store a label in the process structure until it is safe to update - the user credential label. Sleeping is permitted. -*/ + * @brief Initialize process label + * @param label New label to initialize + * @see mpo_cred_label_init_t + * + * Initialize the label for a newly instantiated BSD process structure. + * Normally, security policies will store the process label in the user + * credential rather than here in the process structure. However, + * there are some floating label policies that may need to temporarily + * store a label in the process structure until it is safe to update + * the user credential label. Sleeping is permitted. + */ typedef void mpo_proc_label_init_t( struct label *label -); -/** - @brief Access control check for skywalk flow connect - @param cred Subject credential - @param flow Flow object - @param addr Remote address for flow to send data to - @param type Flow type (e.g. SOCK_STREAM or SOCK_DGRAM) - @param protocol Network protocol (e.g. IPPROTO_TCP) - - Determine whether the subject identified by the credential can - create a flow for sending data to the remote host specified by - addr. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + ); +/** + * @brief Access control check for skywalk flow connect + * @param cred Subject credential + * @param flow Flow object + * @param addr Remote address for flow to send data to + * @param type Flow type (e.g. SOCK_STREAM or SOCK_DGRAM) + * @param protocol Network protocol (e.g. IPPROTO_TCP) + * + * Determine whether the subject identified by the credential can + * create a flow for sending data to the remote host specified by + * addr. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_skywalk_flow_check_connect_t( kauth_cred_t cred, void *flow, const struct sockaddr *addr, int type, int protocol -); -/** - @brief Access control check for skywalk flow listen - @param cred Subject credential - @param flow Flow object - @param addr Local address for flow to listen on - @param type Flow type (e.g. SOCK_STREAM or SOCK_DGRAM) - @param protocol Network protocol (e.g. IPPROTO_TCP) - - Determine whether the subject identified by the credential can - create a flow for receiving data on the local address specified - by addr. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + ); +/** + * @brief Access control check for skywalk flow listen + * @param cred Subject credential + * @param flow Flow object + * @param addr Local address for flow to listen on + * @param type Flow type (e.g. SOCK_STREAM or SOCK_DGRAM) + * @param protocol Network protocol (e.g. IPPROTO_TCP) + * + * Determine whether the subject identified by the credential can + * create a flow for receiving data on the local address specified + * by addr. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_skywalk_flow_check_listen_t( kauth_cred_t cred, void *flow, const struct sockaddr *addr, int type, int protocol -); + ); /** - @brief Access control check for socket accept - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - - Determine whether the subject identified by the credential can accept() - a new connection on the socket from the host specified by addr. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket accept + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * + * Determine whether the subject identified by the credential can accept() + * a new connection on the socket from the host specified by addr. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_accept_t( kauth_cred_t cred, socket_t so, struct label *socklabel -); + ); /** - @brief Access control check for a pending socket accept - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - @param addr Address of the listening socket (coming soon) - - Determine whether the subject identified by the credential can accept() - a pending connection on the socket from the host specified by addr. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for a pending socket accept + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * @param addr Address of the listening socket (coming soon) + * + * Determine whether the subject identified by the credential can accept() + * a pending connection on the socket from the host specified by addr. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_accepted_t( kauth_cred_t cred, socket_t so, struct label *socklabel, struct sockaddr *addr -); + ); /** - @brief Access control check for socket bind - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - @param addr Name to assign to the socket - - Determine whether the subject identified by the credential can bind() - the name (addr) to the socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket bind + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * @param addr Name to assign to the socket + * + * Determine whether the subject identified by the credential can bind() + * the name (addr) to the socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_bind_t( kauth_cred_t cred, socket_t so, struct label *socklabel, struct sockaddr *addr -); + ); /** - @brief Access control check for socket connect - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - @param addr Name to assign to the socket - - Determine whether the subject identified by the credential can - connect() the passed socket to the remote host specified by addr. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket connect + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * @param addr Name to assign to the socket + * + * Determine whether the subject identified by the credential can + * connect() the passed socket to the remote host specified by addr. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_connect_t( kauth_cred_t cred, socket_t so, struct label *socklabel, struct sockaddr *addr -); + ); /** - @brief Access control check for socket() system call. - @param cred Subject credential - @param domain communication domain - @param type socket type - @param protocol socket protocol - - Determine whether the subject identified by the credential can - make the socket() call. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket() system call. + * @param cred Subject credential + * @param domain communication domain + * @param type socket type + * @param protocol socket protocol + * + * Determine whether the subject identified by the credential can + * make the socket() call. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_create_t( kauth_cred_t cred, int domain, int type, int protocol -); + ); /** - @brief Access control check for delivering data to a user's receieve queue - @param so The socket data is being delivered to - @param so_label The label of so - @param m The mbuf whose data will be deposited into the receive queue - @param m_label The label of the sender of the data. - - A socket has a queue for receiving incoming data. When a packet arrives - on the wire, it eventually gets deposited into this queue, which the - owner of the socket drains when they read from the socket's file descriptor. - - This function determines whether the socket can receive data from - the sender specified by m_label. - - @warning There is an outstanding design issue surrounding the placement - of this function. The check must be placed either before or after the - TCP sequence and ACK counters are updated. Placing the check before - the counters are updated causes the incoming packet to be resent by - the remote if the check rejects it. Placing the check after the counters - are updated results in a completely silent drop. As far as each TCP stack - is concerned the packet was received, however, the data will not be in the - socket's receive queue. Another consideration is that the current design - requires using the "failed label" occasionally. In that case, on rejection, - we want the remote TCP to resend the data. Because of this, we chose to - place this check before the counters are updated, so rejected packets will be - resent by the remote host. - - If a policy keeps rejecting the same packet, eventually the connection will - be dropped. Policies have several options if this design causes problems. - For example, one options is to sanitize the mbuf such that it is acceptable, - then accept it. That may require negotiation between policies as the - Framework will not know to re-check the packet. - - The policy must handle NULL MBUF labels. This will likely be the case - for non-local TCP sockets for example. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for delivering data to a user's receieve queue + * @param so The socket data is being delivered to + * @param so_label The label of so + * @param m The mbuf whose data will be deposited into the receive queue + * @param m_label The label of the sender of the data. + * + * A socket has a queue for receiving incoming data. When a packet arrives + * on the wire, it eventually gets deposited into this queue, which the + * owner of the socket drains when they read from the socket's file descriptor. + * + * This function determines whether the socket can receive data from + * the sender specified by m_label. + * + * @warning There is an outstanding design issue surrounding the placement + * of this function. The check must be placed either before or after the + * TCP sequence and ACK counters are updated. Placing the check before + * the counters are updated causes the incoming packet to be resent by + * the remote if the check rejects it. Placing the check after the counters + * are updated results in a completely silent drop. As far as each TCP stack + * is concerned the packet was received, however, the data will not be in the + * socket's receive queue. Another consideration is that the current design + * requires using the "failed label" occasionally. In that case, on rejection, + * we want the remote TCP to resend the data. Because of this, we chose to + * place this check before the counters are updated, so rejected packets will be + * resent by the remote host. + * + * If a policy keeps rejecting the same packet, eventually the connection will + * be dropped. Policies have several options if this design causes problems. + * For example, one options is to sanitize the mbuf such that it is acceptable, + * then accept it. That may require negotiation between policies as the + * Framework will not know to re-check the packet. + * + * The policy must handle NULL MBUF labels. This will likely be the case + * for non-local TCP sockets for example. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_deliver_t( socket_t so, struct label *so_label, struct mbuf *m, struct label *m_label -); + ); /** - @brief Access control check for socket ioctl. - @param cred Subject credential - @param so Object socket - @param cmd The ioctl command; see ioctl(2) - @param socklabel Policy label for socket - - Determine whether the subject identified by the credential can perform - the ioctl operation indicated by cmd on the given socket. - - @warning Since ioctl data is opaque from the standpoint of the MAC - framework, and since ioctls can affect many aspects of system - operation, policies must exercise extreme care when implementing - access control checks. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for socket ioctl. + * @param cred Subject credential + * @param so Object socket + * @param cmd The ioctl command; see ioctl(2) + * @param socklabel Policy label for socket + * + * Determine whether the subject identified by the credential can perform + * the ioctl operation indicated by cmd on the given socket. + * + * @warning Since ioctl data is opaque from the standpoint of the MAC + * framework, and since ioctls can affect many aspects of system + * operation, policies must exercise extreme care when implementing + * access control checks. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_socket_check_ioctl_t( kauth_cred_t cred, socket_t so, unsigned int cmd, struct label *socklabel -); + ); /** - @brief Access control check for socket kqfilter - @param cred Subject credential - @param kn Object knote - @param so Object socket - @param socklabel Policy label for socket - - Determine whether the subject identified by the credential can - receive the knote on the passed socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket kqfilter + * @param cred Subject credential + * @param kn Object knote + * @param so Object socket + * @param socklabel Policy label for socket + * + * Determine whether the subject identified by the credential can + * receive the knote on the passed socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_kqfilter_t( kauth_cred_t cred, struct knote *kn, socket_t so, struct label *socklabel -); + ); /** - @brief Access control check for socket relabel - @param cred Subject credential - @param so Object socket - @param so_label The current label of so - @param newlabel The label to be assigned to so - - Determine whether the subject identified by the credential can - change the label on the socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket relabel + * @param cred Subject credential + * @param so Object socket + * @param so_label The current label of so + * @param newlabel The label to be assigned to so + * + * Determine whether the subject identified by the credential can + * change the label on the socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_label_update_t( kauth_cred_t cred, socket_t so, struct label *so_label, struct label *newlabel -); + ); /** - @brief Access control check for socket listen - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - - Determine whether the subject identified by the credential can - listen() on the passed socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket listen + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * + * Determine whether the subject identified by the credential can + * listen() on the passed socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_listen_t( kauth_cred_t cred, socket_t so, struct label *socklabel -); + ); /** - @brief Access control check for socket receive - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - - Determine whether the subject identified by the credential can - receive data from the socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket receive + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * + * Determine whether the subject identified by the credential can + * receive data from the socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_receive_t( kauth_cred_t cred, socket_t so, struct label *socklabel -); + ); -/** - @brief Access control check for socket receive - @param cred Subject credential - @param sock Object socket - @param socklabel Policy label for socket - @param saddr Name of the remote socket - - Determine whether the subject identified by the credential can - receive data from the remote host specified by addr. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ +/** + * @brief Access control check for socket receive + * @param cred Subject credential + * @param sock Object socket + * @param socklabel Policy label for socket + * @param saddr Name of the remote socket + * + * Determine whether the subject identified by the credential can + * receive data from the remote host specified by addr. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_received_t( - kauth_cred_t cred, - struct socket *sock, - struct label *socklabel, - struct sockaddr *saddr - ); + kauth_cred_t cred, + struct socket *sock, + struct label *socklabel, + struct sockaddr *saddr + ); /** - @brief Access control check for socket select - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - @param which The operation selected on: FREAD or FWRITE - - Determine whether the subject identified by the credential can use the - socket in a call to select(). - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket select + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * @param which The operation selected on: FREAD or FWRITE + * + * Determine whether the subject identified by the credential can use the + * socket in a call to select(). + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_select_t( kauth_cred_t cred, socket_t so, struct label *socklabel, int which -); + ); /** - @brief Access control check for socket send - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for socket - @param addr Address being sent to - - Determine whether the subject identified by the credential can send - data to the socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for socket send + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for socket + * @param addr Address being sent to + * + * Determine whether the subject identified by the credential can send + * data to the socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_send_t( kauth_cred_t cred, socket_t so, struct label *socklabel, struct sockaddr *addr -); + ); /** - @brief Access control check for retrieving socket status - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for so - - Determine whether the subject identified by the credential can - execute the stat() system call on the given socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for retrieving socket status + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for so + * + * Determine whether the subject identified by the credential can + * execute the stat() system call on the given socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_stat_t( kauth_cred_t cred, socket_t so, struct label *socklabel -); + ); /** - @brief Access control check for setting socket options - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for so - @param sopt The options being set - - Determine whether the subject identified by the credential can - execute the setsockopt system call on the given socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for setting socket options + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for so + * @param sopt The options being set + * + * Determine whether the subject identified by the credential can + * execute the setsockopt system call on the given socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_setsockopt_t( kauth_cred_t cred, socket_t so, struct label *socklabel, struct sockopt *sopt -); + ); /** - @brief Access control check for getting socket options - @param cred Subject credential - @param so Object socket - @param socklabel Policy label for so - @param sopt The options to get - - Determine whether the subject identified by the credential can - execute the getsockopt system call on the given socket. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for getting socket options + * @param cred Subject credential + * @param so Object socket + * @param socklabel Policy label for so + * @param sopt The options to get + * + * Determine whether the subject identified by the credential can + * execute the getsockopt system call on the given socket. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_socket_check_getsockopt_t( kauth_cred_t cred, socket_t so, struct label *socklabel, struct sockopt *sopt -); + ); /** - @brief Label a socket - @param oldsock Listening socket - @param oldlabel Policy label associated with oldsock - @param newsock New socket - @param newlabel Policy label associated with newsock - - A new socket is created when a connection is accept(2)ed. This - function labels the new socket based on the existing listen(2)ing - socket. -*/ + * @brief Label a socket + * @param oldsock Listening socket + * @param oldlabel Policy label associated with oldsock + * @param newsock New socket + * @param newlabel Policy label associated with newsock + * + * A new socket is created when a connection is accept(2)ed. This + * function labels the new socket based on the existing listen(2)ing + * socket. + */ typedef void mpo_socket_label_associate_accept_t( socket_t oldsock, struct label *oldlabel, socket_t newsock, struct label *newlabel -); + ); /** - @brief Assign a label to a new socket - @param cred Credential of the owning process - @param so The socket being labeled - @param solabel The label - @warning cred can be NULL - - Set the label on a newly created socket from the passed subject - credential. This call is made when a socket is created. The - credentials may be null if the socket is being created by the - kernel. -*/ + * @brief Assign a label to a new socket + * @param cred Credential of the owning process + * @param so The socket being labeled + * @param solabel The label + * @warning cred can be NULL + * + * Set the label on a newly created socket from the passed subject + * credential. This call is made when a socket is created. The + * credentials may be null if the socket is being created by the + * kernel. + */ typedef void mpo_socket_label_associate_t( kauth_cred_t cred, socket_t so, struct label *solabel -); + ); /** - @brief Copy a socket label - @param src Source label - @param dest Destination label - - Copy the socket label information in src into dest. -*/ + * @brief Copy a socket label + * @param src Source label + * @param dest Destination label + * + * Copy the socket label information in src into dest. + */ typedef void mpo_socket_label_copy_t( struct label *src, struct label *dest -); + ); /** - @brief Destroy socket label - @param label The label to be destroyed - - Destroy a socket label. Since the object is going out of - scope, policy modules should free any internal storage associated - with the label so that it may be destroyed. -*/ + * @brief Destroy socket label + * @param label The label to be destroyed + * + * Destroy a socket label. Since the object is going out of + * scope, policy modules should free any internal storage associated + * with the label so that it may be destroyed. + */ typedef void mpo_socket_label_destroy_t( struct label *label -); + ); /** - @brief Externalize a socket label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of label - - Produce an externalized socket label based on the label structure passed. - An externalized label consists of a text representation of the label - contents that can be used with userland applications and read by the - user. If element_name does not match a namespace managed by the policy, - simply return 0. Only return nonzero if an error occurs while externalizing - the label data. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Externalize a socket label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of label + * + * Produce an externalized socket label based on the label structure passed. + * An externalized label consists of a text representation of the label + * contents that can be used with userland applications and read by the + * user. If element_name does not match a namespace managed by the policy, + * simply return 0. Only return nonzero if an error occurs while externalizing + * the label data. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_socket_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize socket label - @param label New label to initialize - @param waitok Malloc flags - - Initialize the label of a newly instantiated socket. The waitok - field may be one of M_WAITOK and M_NOWAIT, and should be employed to - avoid performing a sleeping malloc(9) during this initialization - call. It it not always safe to sleep during this entry point. - - @warning Since it is possible for the waitok flags to be set to - M_NOWAIT, the malloc operation may fail. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Initialize socket label + * @param label New label to initialize + * @param waitok Malloc flags + * + * Initialize the label of a newly instantiated socket. The waitok + * field may be one of M_WAITOK and M_NOWAIT, and should be employed to + * avoid performing a sleeping malloc(9) during this initialization + * call. It it not always safe to sleep during this entry point. + * + * @warning Since it is possible for the waitok flags to be set to + * M_NOWAIT, the malloc operation may fail. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_socket_label_init_t( struct label *label, int waitok -); + ); /** - @brief Internalize a socket label - @param label Label to be filled in - @param element_name Name of the label namespace for which the label should - be internalized - @param element_data Text data to be internalized - - Produce an internal socket label structure based on externalized label - data in text format. - - The policy's internalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Internalize a socket label + * @param label Label to be filled in + * @param element_name Name of the label namespace for which the label should + * be internalized + * @param element_data Text data to be internalized + * + * Produce an internal socket label structure based on externalized label + * data in text format. + * + * The policy's internalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_socket_label_internalize_t( struct label *label, char *element_name, char *element_data -); + ); /** - @brief Relabel socket - @param cred Subject credential - @param so Object; socket - @param so_label Current label of the socket - @param newlabel The label to be assigned to so - - The subject identified by the credential has previously requested - and was authorized to relabel the socket; this entry point allows - policies to perform the actual label update operation. - - @warning XXX This entry point will likely change in future versions. -*/ + * @brief Relabel socket + * @param cred Subject credential + * @param so Object; socket + * @param so_label Current label of the socket + * @param newlabel The label to be assigned to so + * + * The subject identified by the credential has previously requested + * and was authorized to relabel the socket; this entry point allows + * policies to perform the actual label update operation. + * + * @warning XXX This entry point will likely change in future versions. + */ typedef void mpo_socket_label_update_t( kauth_cred_t cred, socket_t so, struct label *so_label, struct label *newlabel -); + ); /** - @brief Set the peer label on a socket from mbuf - @param m Mbuf chain received on socket so - @param m_label Label for m - @param so Current label for the socket - @param so_label Policy label to be filled out for the socket - - Set the peer label of a socket based on the label of the sender of the - mbuf. - - This is called for every TCP/IP packet received. The first call for a given - socket operates on a newly initialized label, and subsequent calls operate - on existing label data. - - @warning Because this can affect performance significantly, it has - different sematics than other 'set' operations. Typically, 'set' operations - operate on newly initialzed labels and policies do not need to worry about - clobbering existing values. In this case, it is too inefficient to - initialize and destroy a label every time data is received for the socket. - Instead, it is up to the policies to determine how to replace the label data. - Most policies should be able to replace the data inline. -*/ + * @brief Set the peer label on a socket from mbuf + * @param m Mbuf chain received on socket so + * @param m_label Label for m + * @param so Current label for the socket + * @param so_label Policy label to be filled out for the socket + * + * Set the peer label of a socket based on the label of the sender of the + * mbuf. + * + * This is called for every TCP/IP packet received. The first call for a given + * socket operates on a newly initialized label, and subsequent calls operate + * on existing label data. + * + * @warning Because this can affect performance significantly, it has + * different sematics than other 'set' operations. Typically, 'set' operations + * operate on newly initialzed labels and policies do not need to worry about + * clobbering existing values. In this case, it is too inefficient to + * initialize and destroy a label every time data is received for the socket. + * Instead, it is up to the policies to determine how to replace the label data. + * Most policies should be able to replace the data inline. + */ typedef void mpo_socketpeer_label_associate_mbuf_t( struct mbuf *m, struct label *m_label, socket_t so, struct label *so_label -); + ); /** - @brief Set the peer label on a socket from socket - @param source Local socket - @param sourcelabel Policy label for source - @param target Peer socket - @param targetlabel Policy label to fill in for target - - Set the peer label on a stream UNIX domain socket from the passed - remote socket endpoint. This call will be made when the socket pair - is connected, and will be made for both endpoints. - - Note that this call is only made on connection; it is currently not updated - during communication. -*/ + * @brief Set the peer label on a socket from socket + * @param source Local socket + * @param sourcelabel Policy label for source + * @param target Peer socket + * @param targetlabel Policy label to fill in for target + * + * Set the peer label on a stream UNIX domain socket from the passed + * remote socket endpoint. This call will be made when the socket pair + * is connected, and will be made for both endpoints. + * + * Note that this call is only made on connection; it is currently not updated + * during communication. + */ typedef void mpo_socketpeer_label_associate_socket_t( socket_t source, struct label *sourcelabel, socket_t target, struct label *targetlabel -); + ); /** - @brief Destroy socket peer label - @param label The peer label to be destroyed - - Destroy a socket peer label. Since the object is going out of - scope, policy modules should free any internal storage associated - with the label so that it may be destroyed. -*/ + * @brief Destroy socket peer label + * @param label The peer label to be destroyed + * + * Destroy a socket peer label. Since the object is going out of + * scope, policy modules should free any internal storage associated + * with the label so that it may be destroyed. + */ typedef void mpo_socketpeer_label_destroy_t( struct label *label -); + ); /** - @brief Externalize a socket peer label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of label - - Produce an externalized socket peer label based on the label structure - passed. An externalized label consists of a text representation of the - label contents that can be used with userland applications and read by the - user. If element_name does not match a namespace managed by the policy, - simply return 0. Only return nonzero if an error occurs while externalizing - the label data. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Externalize a socket peer label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of label + * + * Produce an externalized socket peer label based on the label structure + * passed. An externalized label consists of a text representation of the + * label contents that can be used with userland applications and read by the + * user. If element_name does not match a namespace managed by the policy, + * simply return 0. Only return nonzero if an error occurs while externalizing + * the label data. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_socketpeer_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize socket peer label - @param label New label to initialize - @param waitok Malloc flags - - Initialize the peer label of a newly instantiated socket. The - waitok field may be one of M_WAITOK and M_NOWAIT, and should be - employed to avoid performing a sleeping malloc(9) during this - initialization call. It it not always safe to sleep during this - entry point. - - @warning Since it is possible for the waitok flags to be set to - M_NOWAIT, the malloc operation may fail. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Initialize socket peer label + * @param label New label to initialize + * @param waitok Malloc flags + * + * Initialize the peer label of a newly instantiated socket. The + * waitok field may be one of M_WAITOK and M_NOWAIT, and should be + * employed to avoid performing a sleeping malloc(9) during this + * initialization call. It it not always safe to sleep during this + * entry point. + * + * @warning Since it is possible for the waitok flags to be set to + * M_NOWAIT, the malloc operation may fail. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_socketpeer_label_init_t( struct label *label, int waitok -); + ); /** - @brief Access control check for enabling accounting - @param cred Subject credential - @param vp Accounting file - @param vlabel Label associated with vp - - Determine whether the subject should be allowed to enable accounting, - based on its label and the label of the accounting log file. See - acct(5) for more information. - - As accounting is disabled by passing NULL to the acct(2) system call, - the policy should be prepared for both 'vp' and 'vlabel' to be NULL. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for enabling accounting + * @param cred Subject credential + * @param vp Accounting file + * @param vlabel Label associated with vp + * + * Determine whether the subject should be allowed to enable accounting, + * based on its label and the label of the accounting log file. See + * acct(5) for more information. + * + * As accounting is disabled by passing NULL to the acct(2) system call, + * the policy should be prepared for both 'vp' and 'vlabel' to be NULL. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_acct_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel -); + ); /** - @brief Access control check for audit - @param cred Subject credential - @param record Audit record - @param length Audit record length - - Determine whether the subject identified by the credential can submit - an audit record for inclusion in the audit log via the audit() system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for audit + * @param cred Subject credential + * @param record Audit record + * @param length Audit record length + * + * Determine whether the subject identified by the credential can submit + * an audit record for inclusion in the audit log via the audit() system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_audit_t( kauth_cred_t cred, void *record, int length -); + ); /** - @brief Access control check for controlling audit - @param cred Subject credential - @param vp Audit file - @param vl Label associated with vp - - Determine whether the subject should be allowed to enable auditing using - the auditctl() system call, based on its label and the label of the proposed - audit file. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for controlling audit + * @param cred Subject credential + * @param vp Audit file + * @param vl Label associated with vp + * + * Determine whether the subject should be allowed to enable auditing using + * the auditctl() system call, based on its label and the label of the proposed + * audit file. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_auditctl_t( kauth_cred_t cred, struct vnode *vp, struct label *vl -); + ); /** - @brief Access control check for manipulating auditing - @param cred Subject credential - @param cmd Audit control command - - Determine whether the subject identified by the credential can perform - the audit subsystem control operation cmd via the auditon() system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for manipulating auditing + * @param cred Subject credential + * @param cmd Audit control command + * + * Determine whether the subject identified by the credential can perform + * the audit subsystem control operation cmd via the auditon() system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_auditon_t( kauth_cred_t cred, int cmd -); + ); /** - @brief Access control check for using CHUD facilities - @param cred Subject credential - - Determine whether the subject identified by the credential can perform - performance-related tasks using the CHUD system call. This interface is - deprecated. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for using CHUD facilities + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can perform + * performance-related tasks using the CHUD system call. This interface is + * deprecated. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_chud_t( - kauth_cred_t cred -); + kauth_cred_t cred + ); /** - @brief Access control check for obtaining the host control port - @param cred Subject credential - - Determine whether the subject identified by the credential can - obtain the host control port. - - @return Return 0 if access is granted, or non-zero otherwise. -*/ + * @brief Access control check for obtaining the host control port + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can + * obtain the host control port. + * + * @return Return 0 if access is granted, or non-zero otherwise. + */ typedef int mpo_system_check_host_priv_t( kauth_cred_t cred -); + ); /** - @brief Access control check for obtaining system information - @param cred Subject credential - @param info_type A description of the information requested - - Determine whether the subject identified by the credential should be - allowed to obtain information about the system. - - This is a generic hook that can be used in a variety of situations where - information is being returned that might be considered sensitive. - Rather than adding a new MAC hook for every such interface, this hook can - be called with a string identifying the type of information requested. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for obtaining system information + * @param cred Subject credential + * @param info_type A description of the information requested + * + * Determine whether the subject identified by the credential should be + * allowed to obtain information about the system. + * + * This is a generic hook that can be used in a variety of situations where + * information is being returned that might be considered sensitive. + * Rather than adding a new MAC hook for every such interface, this hook can + * be called with a string identifying the type of information requested. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_info_t( kauth_cred_t cred, const char *info_type -); + ); /** - @brief Access control check for calling NFS services - @param cred Subject credential - - Determine whether the subject identified by the credential should be - allowed to call nfssrv(2). - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for calling NFS services + * @param cred Subject credential + * + * Determine whether the subject identified by the credential should be + * allowed to call nfssrv(2). + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_nfsd_t( kauth_cred_t cred -); + ); /** - @brief Access control check for reboot - @param cred Subject credential - @param howto howto parameter from reboot(2) - - Determine whether the subject identified by the credential should be - allowed to reboot the system in the specified manner. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for reboot + * @param cred Subject credential + * @param howto howto parameter from reboot(2) + * + * Determine whether the subject identified by the credential should be + * allowed to reboot the system in the specified manner. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_reboot_t( kauth_cred_t cred, int howto -); + ); /** - @brief Access control check for setting system clock - @param cred Subject credential - - Determine whether the subject identified by the credential should be - allowed to set the system clock. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for setting system clock + * @param cred Subject credential + * + * Determine whether the subject identified by the credential should be + * allowed to set the system clock. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_settime_t( kauth_cred_t cred -); + ); /** - @brief Access control check for removing swap devices - @param cred Subject credential - @param vp Swap device - @param label Label associated with vp - - Determine whether the subject identified by the credential should be - allowed to remove vp as a swap device. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for removing swap devices + * @param cred Subject credential + * @param vp Swap device + * @param label Label associated with vp + * + * Determine whether the subject identified by the credential should be + * allowed to remove vp as a swap device. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_swapoff_t( kauth_cred_t cred, struct vnode *vp, struct label *label -); + ); /** - @brief Access control check for adding swap devices - @param cred Subject credential - @param vp Swap device - @param label Label associated with vp - - Determine whether the subject identified by the credential should be - allowed to add vp as a swap device. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for adding swap devices + * @param cred Subject credential + * @param vp Swap device + * @param label Label associated with vp + * + * Determine whether the subject identified by the credential should be + * allowed to add vp as a swap device. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_swapon_t( kauth_cred_t cred, struct vnode *vp, struct label *label -); -/** - @brief Access control check for sysctl - @param cred Subject credential - @param namestring String representation of sysctl name. - @param name Integer name; see sysctl(3) - @param namelen Length of name array of integers; see sysctl(3) - @param old 0 or address where to store old value; see sysctl(3) - @param oldlen Length of old buffer; see sysctl(3) - @param newvalue 0 or address of new value; see sysctl(3) - @param newlen Length of new buffer; see sysctl(3) - - Determine whether the subject identified by the credential should be - allowed to make the specified sysctl(3) transaction. - - The sysctl(3) call specifies that if the old value is not desired, - oldp and oldlenp should be set to NULL. Likewise, if a new value is - not to be set, newp should be set to NULL and newlen set to 0. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for sysctl + * @param cred Subject credential + * @param namestring String representation of sysctl name. + * @param name Integer name; see sysctl(3) + * @param namelen Length of name array of integers; see sysctl(3) + * @param old 0 or address where to store old value; see sysctl(3) + * @param oldlen Length of old buffer; see sysctl(3) + * @param newvalue 0 or address of new value; see sysctl(3) + * @param newlen Length of new buffer; see sysctl(3) + * + * Determine whether the subject identified by the credential should be + * allowed to make the specified sysctl(3) transaction. + * + * The sysctl(3) call specifies that if the old value is not desired, + * oldp and oldlenp should be set to NULL. Likewise, if a new value is + * not to be set, newp should be set to NULL and newlen set to 0. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_sysctlbyname_t( kauth_cred_t cred, const char *namestring, int *name, u_int namelen, - user_addr_t old, /* NULLOK */ + user_addr_t old, /* NULLOK */ size_t oldlen, - user_addr_t newvalue, /* NULLOK */ + user_addr_t newvalue, /* NULLOK */ size_t newlen -); + ); /** - @brief Access control check for kas_info - @param cred Subject credential - @param selector Category of information to return. See kas_info.h - - Determine whether the subject identified by the credential can perform - introspection of the kernel address space layout for - debugging/performance analysis. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for kas_info + * @param cred Subject credential + * @param selector Category of information to return. See kas_info.h + * + * Determine whether the subject identified by the credential can perform + * introspection of the kernel address space layout for + * debugging/performance analysis. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_system_check_kas_info_t( kauth_cred_t cred, int selector -); -/** - @brief Create a System V message label - @param cred Subject credential - @param msqptr The message queue the message will be placed in - @param msqlabel The label of the message queue - @param msgptr The message - @param msglabel The label of the message - - Label the message as its placed in the message queue. -*/ + ); +/** + * @brief Create a System V message label + * @param cred Subject credential + * @param msqptr The message queue the message will be placed in + * @param msqlabel The label of the message queue + * @param msgptr The message + * @param msglabel The label of the message + * + * Label the message as its placed in the message queue. + */ typedef void mpo_sysvmsg_label_associate_t( kauth_cred_t cred, struct msqid_kernel *msqptr, struct label *msqlabel, struct msg *msgptr, struct label *msglabel -); + ); /** - @brief Destroy System V message label - @param label The label to be destroyed - - Destroy a System V message label. Since the object is - going out of scope, policy modules should free any internal storage - associated with the label so that it may be destroyed. -*/ + * @brief Destroy System V message label + * @param label The label to be destroyed + * + * Destroy a System V message label. Since the object is + * going out of scope, policy modules should free any internal storage + * associated with the label so that it may be destroyed. + */ typedef void mpo_sysvmsg_label_destroy_t( struct label *label -); + ); /** - @brief Initialize System V message label - @param label New label to initialize - - Initialize the label for a newly instantiated System V message. -*/ + * @brief Initialize System V message label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated System V message. + */ typedef void mpo_sysvmsg_label_init_t( struct label *label -); + ); /** - @brief Clean up a System V message label - @param label The label to be destroyed - - Clean up a System V message label. Darwin pre-allocates - messages at system boot time and re-uses them rather than - allocating new ones. Before messages are returned to the "free - pool", policies can cleanup or overwrite any information present in - the label. -*/ + * @brief Clean up a System V message label + * @param label The label to be destroyed + * + * Clean up a System V message label. Darwin pre-allocates + * messages at system boot time and re-uses them rather than + * allocating new ones. Before messages are returned to the "free + * pool", policies can cleanup or overwrite any information present in + * the label. + */ typedef void mpo_sysvmsg_label_recycle_t( struct label *label -); -/** - @brief Access control check for System V message enqueuing - @param cred Subject credential - @param msgptr The message - @param msglabel The message's label - @param msqptr The message queue - @param msqlabel The message queue's label - - Determine whether the subject identified by the credential can add the - given message to the given message queue. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for System V message enqueuing + * @param cred Subject credential + * @param msgptr The message + * @param msglabel The message's label + * @param msqptr The message queue + * @param msqlabel The message queue's label + * + * Determine whether the subject identified by the credential can add the + * given message to the given message queue. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_enqueue_t( kauth_cred_t cred, struct msg *msgptr, struct label *msglabel, struct msqid_kernel *msqptr, struct label *msqlabel -); + ); /** - @brief Access control check for System V message reception - @param cred The credential of the intended recipient - @param msgptr The message - @param msglabel The message's label - - Determine whether the subject identified by the credential can receive - the given message. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for System V message reception + * @param cred The credential of the intended recipient + * @param msgptr The message + * @param msglabel The message's label + * + * Determine whether the subject identified by the credential can receive + * the given message. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_msgrcv_t( kauth_cred_t cred, struct msg *msgptr, struct label *msglabel -); + ); /** - @brief Access control check for System V message queue removal - @param cred The credential of the caller - @param msgptr The message - @param msglabel The message's label - - System V message queues are removed using the msgctl() system call. - The system will iterate over each messsage in the queue, calling this - function for each, to determine whether the caller has the appropriate - credentials. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for System V message queue removal + * @param cred The credential of the caller + * @param msgptr The message + * @param msglabel The message's label + * + * System V message queues are removed using the msgctl() system call. + * The system will iterate over each messsage in the queue, calling this + * function for each, to determine whether the caller has the appropriate + * credentials. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_msgrmid_t( kauth_cred_t cred, struct msg *msgptr, struct label *msglabel -); + ); /** - @brief Access control check for msgctl() - @param cred The credential of the caller - @param msqptr The message queue - @param msqlabel The message queue's label - - This access check is performed to validate calls to msgctl(). - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for msgctl() + * @param cred The credential of the caller + * @param msqptr The message queue + * @param msqlabel The message queue's label + * + * This access check is performed to validate calls to msgctl(). + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_msqctl_t( kauth_cred_t cred, struct msqid_kernel *msqptr, struct label *msqlabel, int cmd -); + ); /** - @brief Access control check to get a System V message queue - @param cred The credential of the caller - @param msqptr The message queue requested - @param msqlabel The message queue's label - - On a call to msgget(), if the queue requested already exists, - and it is a public queue, this check will be performed before the - queue's ID is returned to the user. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check to get a System V message queue + * @param cred The credential of the caller + * @param msqptr The message queue requested + * @param msqlabel The message queue's label + * + * On a call to msgget(), if the queue requested already exists, + * and it is a public queue, this check will be performed before the + * queue's ID is returned to the user. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_msqget_t( kauth_cred_t cred, struct msqid_kernel *msqptr, struct label *msqlabel -); + ); /** - @brief Access control check to receive a System V message from the given queue - @param cred The credential of the caller - @param msqptr The message queue to receive from - @param msqlabel The message queue's label - - On a call to msgrcv(), this check is performed to determine whether the - caller has receive rights on the given queue. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check to receive a System V message from the given queue + * @param cred The credential of the caller + * @param msqptr The message queue to receive from + * @param msqlabel The message queue's label + * + * On a call to msgrcv(), this check is performed to determine whether the + * caller has receive rights on the given queue. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_msqrcv_t( kauth_cred_t cred, struct msqid_kernel *msqptr, struct label *msqlabel -); + ); /** - @brief Access control check to send a System V message to the given queue - @param cred The credential of the caller - @param msqptr The message queue to send to - @param msqlabel The message queue's label - - On a call to msgsnd(), this check is performed to determine whether the - caller has send rights on the given queue. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check to send a System V message to the given queue + * @param cred The credential of the caller + * @param msqptr The message queue to send to + * @param msqlabel The message queue's label + * + * On a call to msgsnd(), this check is performed to determine whether the + * caller has send rights on the given queue. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvmsq_check_msqsnd_t( kauth_cred_t cred, struct msqid_kernel *msqptr, struct label *msqlabel -); + ); /** - @brief Create a System V message queue label - @param cred Subject credential - @param msqptr The message queue - @param msqlabel The label of the message queue - -*/ + * @brief Create a System V message queue label + * @param cred Subject credential + * @param msqptr The message queue + * @param msqlabel The label of the message queue + * + */ typedef void mpo_sysvmsq_label_associate_t( kauth_cred_t cred, struct msqid_kernel *msqptr, struct label *msqlabel -); + ); /** - @brief Destroy System V message queue label - @param label The label to be destroyed - - Destroy a System V message queue label. Since the object is - going out of scope, policy modules should free any internal storage - associated with the label so that it may be destroyed. -*/ + * @brief Destroy System V message queue label + * @param label The label to be destroyed + * + * Destroy a System V message queue label. Since the object is + * going out of scope, policy modules should free any internal storage + * associated with the label so that it may be destroyed. + */ typedef void mpo_sysvmsq_label_destroy_t( struct label *label -); + ); /** - @brief Initialize System V message queue label - @param label New label to initialize - - Initialize the label for a newly instantiated System V message queue. -*/ + * @brief Initialize System V message queue label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated System V message queue. + */ typedef void mpo_sysvmsq_label_init_t( struct label *label -); + ); /** - @brief Clean up a System V message queue label - @param label The label to be destroyed - - Clean up a System V message queue label. Darwin pre-allocates - message queues at system boot time and re-uses them rather than - allocating new ones. Before message queues are returned to the "free - pool", policies can cleanup or overwrite any information present in - the label. -*/ + * @brief Clean up a System V message queue label + * @param label The label to be destroyed + * + * Clean up a System V message queue label. Darwin pre-allocates + * message queues at system boot time and re-uses them rather than + * allocating new ones. Before message queues are returned to the "free + * pool", policies can cleanup or overwrite any information present in + * the label. + */ typedef void mpo_sysvmsq_label_recycle_t( struct label *label -); + ); /** - @brief Access control check for System V semaphore control operation - @param cred Subject credential - @param semakptr Pointer to semaphore identifier - @param semaklabel Label associated with semaphore - @param cmd Control operation to be performed; see semctl(2) - - Determine whether the subject identified by the credential can perform - the operation indicated by cmd on the System V semaphore semakptr. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for System V semaphore control operation + * @param cred Subject credential + * @param semakptr Pointer to semaphore identifier + * @param semaklabel Label associated with semaphore + * @param cmd Control operation to be performed; see semctl(2) + * + * Determine whether the subject identified by the credential can perform + * the operation indicated by cmd on the System V semaphore semakptr. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvsem_check_semctl_t( kauth_cred_t cred, struct semid_kernel *semakptr, struct label *semaklabel, int cmd -); + ); /** - @brief Access control check for obtaining a System V semaphore - @param cred Subject credential - @param semakptr Pointer to semaphore identifier - @param semaklabel Label to associate with the semaphore - - Determine whether the subject identified by the credential can - obtain a System V semaphore. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for obtaining a System V semaphore + * @param cred Subject credential + * @param semakptr Pointer to semaphore identifier + * @param semaklabel Label to associate with the semaphore + * + * Determine whether the subject identified by the credential can + * obtain a System V semaphore. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvsem_check_semget_t( kauth_cred_t cred, struct semid_kernel *semakptr, struct label *semaklabel -); + ); /** - @brief Access control check for System V semaphore operations - @param cred Subject credential - @param semakptr Pointer to semaphore identifier - @param semaklabel Label associated with the semaphore - @param accesstype Flags to indicate access (read and/or write) - - Determine whether the subject identified by the credential can - perform the operations on the System V semaphore indicated by - semakptr. The accesstype flags hold the maximum set of permissions - from the sem_op array passed to the semop system call. It may - contain SEM_R for read-only operations or SEM_A for read/write - operations. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for System V semaphore operations + * @param cred Subject credential + * @param semakptr Pointer to semaphore identifier + * @param semaklabel Label associated with the semaphore + * @param accesstype Flags to indicate access (read and/or write) + * + * Determine whether the subject identified by the credential can + * perform the operations on the System V semaphore indicated by + * semakptr. The accesstype flags hold the maximum set of permissions + * from the sem_op array passed to the semop system call. It may + * contain SEM_R for read-only operations or SEM_A for read/write + * operations. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvsem_check_semop_t( kauth_cred_t cred, struct semid_kernel *semakptr, struct label *semaklabel, size_t accesstype -); + ); /** - @brief Create a System V semaphore label - @param cred Subject credential - @param semakptr The semaphore being created - @param semalabel Label to associate with the new semaphore - - Label a new System V semaphore. The label was previously - initialized and associated with the semaphore. At this time, an - appropriate initial label value should be assigned to the object and - stored in semalabel. -*/ + * @brief Create a System V semaphore label + * @param cred Subject credential + * @param semakptr The semaphore being created + * @param semalabel Label to associate with the new semaphore + * + * Label a new System V semaphore. The label was previously + * initialized and associated with the semaphore. At this time, an + * appropriate initial label value should be assigned to the object and + * stored in semalabel. + */ typedef void mpo_sysvsem_label_associate_t( kauth_cred_t cred, struct semid_kernel *semakptr, struct label *semalabel -); + ); /** - @brief Destroy System V semaphore label - @param label The label to be destroyed - - Destroy a System V semaphore label. Since the object is - going out of scope, policy modules should free any internal storage - associated with the label so that it may be destroyed. -*/ + * @brief Destroy System V semaphore label + * @param label The label to be destroyed + * + * Destroy a System V semaphore label. Since the object is + * going out of scope, policy modules should free any internal storage + * associated with the label so that it may be destroyed. + */ typedef void mpo_sysvsem_label_destroy_t( struct label *label -); + ); /** - @brief Initialize System V semaphore label - @param label New label to initialize - - Initialize the label for a newly instantiated System V semaphore. Sleeping - is permitted. -*/ + * @brief Initialize System V semaphore label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated System V semaphore. Sleeping + * is permitted. + */ typedef void mpo_sysvsem_label_init_t( struct label *label -); + ); /** - @brief Clean up a System V semaphore label - @param label The label to be cleaned - - Clean up a System V semaphore label. Darwin pre-allocates - semaphores at system boot time and re-uses them rather than - allocating new ones. Before semaphores are returned to the "free - pool", policies can cleanup or overwrite any information present in - the label. -*/ + * @brief Clean up a System V semaphore label + * @param label The label to be cleaned + * + * Clean up a System V semaphore label. Darwin pre-allocates + * semaphores at system boot time and re-uses them rather than + * allocating new ones. Before semaphores are returned to the "free + * pool", policies can cleanup or overwrite any information present in + * the label. + */ typedef void mpo_sysvsem_label_recycle_t( struct label *label -); + ); /** - @brief Access control check for mapping System V shared memory - @param cred Subject credential - @param shmsegptr Pointer to shared memory segment identifier - @param shmseglabel Label associated with the shared memory segment - @param shmflg shmat flags; see shmat(2) - - Determine whether the subject identified by the credential can map - the System V shared memory segment associated with shmsegptr. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for mapping System V shared memory + * @param cred Subject credential + * @param shmsegptr Pointer to shared memory segment identifier + * @param shmseglabel Label associated with the shared memory segment + * @param shmflg shmat flags; see shmat(2) + * + * Determine whether the subject identified by the credential can map + * the System V shared memory segment associated with shmsegptr. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvshm_check_shmat_t( kauth_cred_t cred, struct shmid_kernel *shmsegptr, struct label *shmseglabel, int shmflg -); + ); /** - @brief Access control check for System V shared memory control operation - @param cred Subject credential - @param shmsegptr Pointer to shared memory segment identifier - @param shmseglabel Label associated with the shared memory segment - @param cmd Control operation to be performed; see shmctl(2) - - Determine whether the subject identified by the credential can perform - the operation indicated by cmd on the System V shared memory segment - shmsegptr. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for System V shared memory control operation + * @param cred Subject credential + * @param shmsegptr Pointer to shared memory segment identifier + * @param shmseglabel Label associated with the shared memory segment + * @param cmd Control operation to be performed; see shmctl(2) + * + * Determine whether the subject identified by the credential can perform + * the operation indicated by cmd on the System V shared memory segment + * shmsegptr. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvshm_check_shmctl_t( kauth_cred_t cred, struct shmid_kernel *shmsegptr, struct label *shmseglabel, int cmd -); + ); /** - @brief Access control check for unmapping System V shared memory - @param cred Subject credential - @param shmsegptr Pointer to shared memory segment identifier - @param shmseglabel Label associated with the shared memory segment - - Determine whether the subject identified by the credential can unmap - the System V shared memory segment associated with shmsegptr. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for unmapping System V shared memory + * @param cred Subject credential + * @param shmsegptr Pointer to shared memory segment identifier + * @param shmseglabel Label associated with the shared memory segment + * + * Determine whether the subject identified by the credential can unmap + * the System V shared memory segment associated with shmsegptr. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvshm_check_shmdt_t( kauth_cred_t cred, struct shmid_kernel *shmsegptr, struct label *shmseglabel -); + ); /** - @brief Access control check obtaining System V shared memory identifier - @param cred Subject credential - @param shmsegptr Pointer to shared memory segment identifier - @param shmseglabel Label associated with the shared memory segment - @param shmflg shmget flags; see shmget(2) - - Determine whether the subject identified by the credential can get - the System V shared memory segment address. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check obtaining System V shared memory identifier + * @param cred Subject credential + * @param shmsegptr Pointer to shared memory segment identifier + * @param shmseglabel Label associated with the shared memory segment + * @param shmflg shmget flags; see shmget(2) + * + * Determine whether the subject identified by the credential can get + * the System V shared memory segment address. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_sysvshm_check_shmget_t( kauth_cred_t cred, struct shmid_kernel *shmsegptr, struct label *shmseglabel, int shmflg -); + ); /** - @brief Create a System V shared memory region label - @param cred Subject credential - @param shmsegptr The shared memory region being created - @param shmlabel Label to associate with the new shared memory region - - Label a new System V shared memory region. The label was previously - initialized and associated with the shared memory region. At this - time, an appropriate initial label value should be assigned to the - object and stored in shmlabel. -*/ + * @brief Create a System V shared memory region label + * @param cred Subject credential + * @param shmsegptr The shared memory region being created + * @param shmlabel Label to associate with the new shared memory region + * + * Label a new System V shared memory region. The label was previously + * initialized and associated with the shared memory region. At this + * time, an appropriate initial label value should be assigned to the + * object and stored in shmlabel. + */ typedef void mpo_sysvshm_label_associate_t( kauth_cred_t cred, struct shmid_kernel *shmsegptr, struct label *shmlabel -); + ); /** - @brief Destroy System V shared memory label - @param label The label to be destroyed - - Destroy a System V shared memory region label. Since the - object is going out of scope, policy modules should free any - internal storage associated with the label so that it may be - destroyed. -*/ + * @brief Destroy System V shared memory label + * @param label The label to be destroyed + * + * Destroy a System V shared memory region label. Since the + * object is going out of scope, policy modules should free any + * internal storage associated with the label so that it may be + * destroyed. + */ typedef void mpo_sysvshm_label_destroy_t( struct label *label -); + ); /** - @brief Initialize System V Shared Memory region label - @param label New label to initialize - - Initialize the label for a newly instantiated System V Shared Memory - region. Sleeping is permitted. -*/ + * @brief Initialize System V Shared Memory region label + * @param label New label to initialize + * + * Initialize the label for a newly instantiated System V Shared Memory + * region. Sleeping is permitted. + */ typedef void mpo_sysvshm_label_init_t( struct label *label -); + ); /** - @brief Clean up a System V Share Memory Region label - @param shmlabel The label to be cleaned - - Clean up a System V Shared Memory Region label. Darwin - pre-allocates these objects at system boot time and re-uses them - rather than allocating new ones. Before the memory regions are - returned to the "free pool", policies can cleanup or overwrite any - information present in the label. -*/ + * @brief Clean up a System V Share Memory Region label + * @param shmlabel The label to be cleaned + * + * Clean up a System V Shared Memory Region label. Darwin + * pre-allocates these objects at system boot time and re-uses them + * rather than allocating new ones. Before the memory regions are + * returned to the "free pool", policies can cleanup or overwrite any + * information present in the label. + */ typedef void mpo_sysvshm_label_recycle_t( struct label *shmlabel -); + ); /** - @brief Access control check for getting a process's task name - @param cred Subject credential - @param p Object process - - Determine whether the subject identified by the credential can get - the passed process's task name port. - This call is used by the task_name_for_pid(2) API. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to hide visibility of the target. -*/ + * @brief Access control check for getting a process's task name + * @param cred Subject credential + * @param p Object process + * + * Determine whether the subject identified by the credential can get + * the passed process's task name port. + * This call is used by the task_name_for_pid(2) API. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to hide visibility of the target. + */ typedef int mpo_proc_check_get_task_name_t( kauth_cred_t cred, struct proc *p -); + ); /** - @brief Access control check for getting a process's task port - @param cred Subject credential - @param p Object process - - Determine whether the subject identified by the credential can get - the passed process's task control port. - This call is used by the task_for_pid(2) API. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to hide visibility of the target. -*/ + * @brief Access control check for getting a process's task port + * @param cred Subject credential + * @param p Object process + * + * Determine whether the subject identified by the credential can get + * the passed process's task control port. + * This call is used by the task_for_pid(2) API. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to hide visibility of the target. + */ typedef int mpo_proc_check_get_task_t( kauth_cred_t cred, struct proc *p -); + ); /** - @brief Access control check for exposing a process's task port - @param cred Subject credential - @param p Object process - - Determine whether the subject identified by the credential can expose - the passed process's task control port. - This call is used by the accessor APIs like processor_set_tasks() and - processor_set_threads(). - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch, - EPERM for lack of privilege, or ESRCH to hide visibility of the target. -*/ + * @brief Access control check for exposing a process's task port + * @param cred Subject credential + * @param p Object process + * + * Determine whether the subject identified by the credential can expose + * the passed process's task control port. + * This call is used by the accessor APIs like processor_set_tasks() and + * processor_set_threads(). + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch, + * EPERM for lack of privilege, or ESRCH to hide visibility of the target. + */ typedef int mpo_proc_check_expose_task_t( kauth_cred_t cred, struct proc *p -); - -/** - @brief Check whether task's IPC may inherit across process exec - @param p current process instance - @param cur_vp vnode pointer to current instance - @param cur_offset offset of binary of currently executing image - @param img_vp vnode pointer to to be exec'ed image - @param img_offset offset into file which is selected for execution - @param scriptvp vnode pointer of script file if any. - @return Return 0 if access is granted. - EPERM if parent does not have any entitlements. - EACCESS if mismatch in entitlements -*/ + ); + +/** + * @brief Check whether task's IPC may inherit across process exec + * @param p current process instance + * @param cur_vp vnode pointer to current instance + * @param cur_offset offset of binary of currently executing image + * @param img_vp vnode pointer to to be exec'ed image + * @param img_offset offset into file which is selected for execution + * @param scriptvp vnode pointer of script file if any. + * @return Return 0 if access is granted. + * EPERM if parent does not have any entitlements. + * EACCESS if mismatch in entitlements + */ typedef int mpo_proc_check_inherit_ipc_ports_t( struct proc *p, struct vnode *cur_vp, @@ -4533,122 +4533,122 @@ typedef int mpo_proc_check_inherit_ipc_ports_t( struct vnode *img_vp, off_t img_offset, struct vnode *scriptvp -); + ); /** - @brief Privilege check for a process to run invalid - @param p Object process - - Determine whether the process may execute even though the system determined - that it is untrusted (eg unidentified / modified code). - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. + * @brief Privilege check for a process to run invalid + * @param p Object process + * + * Determine whether the process may execute even though the system determined + * that it is untrusted (eg unidentified / modified code). + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. */ typedef int mpo_proc_check_run_cs_invalid_t( struct proc *p -); + ); /** - @brief Notification a process is finished with exec and will jump to userspace - @param p Object process - - Notifies all MAC policies that a process has completed an exec and is about to - jump to userspace to continue execution. This may result in process termination - via signals. Hook is designed to hold no/minimal locks so it can be used for any - necessary upcalls. + * @brief Notification a process is finished with exec and will jump to userspace + * @param p Object process + * + * Notifies all MAC policies that a process has completed an exec and is about to + * jump to userspace to continue execution. This may result in process termination + * via signals. Hook is designed to hold no/minimal locks so it can be used for any + * necessary upcalls. */ typedef void mpo_proc_notify_exec_complete_t( struct proc *p -); + ); /** - @brief Perform MAC-related events when a thread returns to user space - @param thread Mach (not BSD) thread that is returning - - This entry point permits policy modules to perform MAC-related - events when a thread returns to user space, via a system call - return or trap return. -*/ + * @brief Perform MAC-related events when a thread returns to user space + * @param thread Mach (not BSD) thread that is returning + * + * This entry point permits policy modules to perform MAC-related + * events when a thread returns to user space, via a system call + * return or trap return. + */ typedef void mpo_thread_userret_t( struct thread *thread -); + ); /** - @brief Check vnode access - @param cred Subject credential - @param vp Object vnode - @param label Label for vp - @param acc_mode access(2) flags - - Determine how invocations of access(2) and related calls by the - subject identified by the credential should return when performed - on the passed vnode using the passed access flags. This should - generally be implemented using the same semantics used in - mpo_vnode_check_open. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Check vnode access + * @param cred Subject credential + * @param vp Object vnode + * @param label Label for vp + * @param acc_mode access(2) flags + * + * Determine how invocations of access(2) and related calls by the + * subject identified by the credential should return when performed + * on the passed vnode using the passed access flags. This should + * generally be implemented using the same semantics used in + * mpo_vnode_check_open. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_access_t( kauth_cred_t cred, struct vnode *vp, struct label *label, int acc_mode -); + ); /** - @brief Access control check for changing working directory - @param cred Subject credential - @param dvp Object; vnode to chdir(2) into - @param dlabel Policy label for dvp - - Determine whether the subject identified by the credential can change - the process working directory to the passed vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for changing working directory + * @param cred Subject credential + * @param dvp Object; vnode to chdir(2) into + * @param dlabel Policy label for dvp + * + * Determine whether the subject identified by the credential can change + * the process working directory to the passed vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_chdir_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel -); + ); /** - @brief Access control check for changing root directory - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label associated with dvp - @param cnp Component name for dvp - - Determine whether the subject identified by the credential should be - allowed to chroot(2) into the specified directory (dvp). - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Access control check for changing root directory + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label associated with dvp + * @param cnp Component name for dvp + * + * Determine whether the subject identified by the credential should be + * allowed to chroot(2) into the specified directory (dvp). + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_vnode_check_chroot_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, struct componentname *cnp -); -/** - @brief Access control check for creating clone - @param cred Subject credential - @param dvp Vnode of directory to create the clone in - @param dlabel Policy label associated with dvp - @param vp Vnode of the file to clone from - @param label Policy label associated with vp - @param cnp Component name for the clone being created - - Determine whether the subject identified by the credential should be - allowed to create a clone of the vnode vp with the name specified by cnp. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for creating clone + * @param cred Subject credential + * @param dvp Vnode of directory to create the clone in + * @param dlabel Policy label associated with dvp + * @param vp Vnode of the file to clone from + * @param label Policy label associated with vp + * @param cnp Component name for the clone being created + * + * Determine whether the subject identified by the credential should be + * allowed to create a clone of the vnode vp with the name specified by cnp. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_clone_t( kauth_cred_t cred, struct vnode *dvp, @@ -4656,286 +4656,286 @@ typedef int mpo_vnode_check_clone_t( struct vnode *vp, struct label *label, struct componentname *cnp -); -/** - @brief Access control check for creating vnode - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label for dvp - @param cnp Component name for dvp - @param vap vnode attributes for vap - - Determine whether the subject identified by the credential can create - a vnode with the passed parent directory, passed name information, - and passed attribute information. This call may be made in a number of - situations, including as a result of calls to open(2) with O_CREAT, - mknod(2), mkfifo(2), and others. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for creating vnode + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label for dvp + * @param cnp Component name for dvp + * @param vap vnode attributes for vap + * + * Determine whether the subject identified by the credential can create + * a vnode with the passed parent directory, passed name information, + * and passed attribute information. This call may be made in a number of + * situations, including as a result of calls to open(2) with O_CREAT, + * mknod(2), mkfifo(2), and others. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_create_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, struct componentname *cnp, struct vnode_attr *vap -); + ); /** - @brief Access control check for deleting extended attribute - @param cred Subject credential - @param vp Object vnode - @param vlabel Label associated with vp - @param name Extended attribute name - - Determine whether the subject identified by the credential can delete - the extended attribute from the passed vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for deleting extended attribute + * @param cred Subject credential + * @param vp Object vnode + * @param vlabel Label associated with vp + * @param name Extended attribute name + * + * Determine whether the subject identified by the credential can delete + * the extended attribute from the passed vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_deleteextattr_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel, const char *name -); -/** - @brief Access control check for exchanging file data - @param cred Subject credential - @param v1 vnode 1 to swap - @param vl1 Policy label for v1 - @param v2 vnode 2 to swap - @param vl2 Policy label for v2 - - Determine whether the subject identified by the credential can swap the data - in the two supplied vnodes. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for exchanging file data + * @param cred Subject credential + * @param v1 vnode 1 to swap + * @param vl1 Policy label for v1 + * @param v2 vnode 2 to swap + * @param vl2 Policy label for v2 + * + * Determine whether the subject identified by the credential can swap the data + * in the two supplied vnodes. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_exchangedata_t( kauth_cred_t cred, struct vnode *v1, struct label *vl1, struct vnode *v2, struct label *vl2 -); -/** - @brief Access control check for executing the vnode - @param cred Subject credential - @param vp Object vnode to execute - @param scriptvp Script being executed by interpreter, if any. - @param vnodelabel Label corresponding to vp - @param scriptlabel Script vnode label - @param execlabel Userspace provided execution label - @param cnp Component name for file being executed - @param macpolicyattr MAC policy-specific spawn attribute data. - @param macpolicyattrlen Length of policy-specific spawn attribute data. - - Determine whether the subject identified by the credential can execute - the passed vnode. Determination of execute privilege is made separately - from decisions about any process label transitioning event. - - The final label, execlabel, corresponds to a label supplied by a - user space application through the use of the mac_execve system call. - This label will be NULL if the user application uses the the vendor - execve(2) call instead of the MAC Framework mac_execve() call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for executing the vnode + * @param cred Subject credential + * @param vp Object vnode to execute + * @param scriptvp Script being executed by interpreter, if any. + * @param vnodelabel Label corresponding to vp + * @param scriptlabel Script vnode label + * @param execlabel Userspace provided execution label + * @param cnp Component name for file being executed + * @param macpolicyattr MAC policy-specific spawn attribute data. + * @param macpolicyattrlen Length of policy-specific spawn attribute data. + * + * Determine whether the subject identified by the credential can execute + * the passed vnode. Determination of execute privilege is made separately + * from decisions about any process label transitioning event. + * + * The final label, execlabel, corresponds to a label supplied by a + * user space application through the use of the mac_execve system call. + * This label will be NULL if the user application uses the the vendor + * execve(2) call instead of the MAC Framework mac_execve() call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_exec_t( kauth_cred_t cred, struct vnode *vp, struct vnode *scriptvp, struct label *vnodelabel, struct label *scriptlabel, - struct label *execlabel, /* NULLOK */ + struct label *execlabel, /* NULLOK */ struct componentname *cnp, u_int *csflags, void *macpolicyattr, size_t macpolicyattrlen -); + ); /** - @brief Access control check for fsgetpath - @param cred Subject credential - @param vp Vnode for which a path will be returned - @param label Label associated with the vnode - - Determine whether the subject identified by the credential can get the path - of the given vnode with fsgetpath. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for fsgetpath + * @param cred Subject credential + * @param vp Vnode for which a path will be returned + * @param label Label associated with the vnode + * + * Determine whether the subject identified by the credential can get the path + * of the given vnode with fsgetpath. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_fsgetpath_t( kauth_cred_t cred, struct vnode *vp, struct label *label -); -/** - @brief Access control check for retrieving file attributes - @param active_cred Subject credential - @param file_cred Credential associated with the struct fileproc - @param vp Object vnode - @param vlabel Policy label for vp - @param va Vnode attributes to retrieve - - Determine whether the subject identified by the credential can - get information about the passed vnode. The active_cred hold - the credentials of the subject performing the operation, and - file_cred holds the credentials of the subject that originally - opened the file. This check happens during stat(), lstat(), - fstat(), and getattrlist() syscalls. See for - definitions of the attributes. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. - - @note Policies may change the contents of va to alter the list of - file attributes returned. -*/ + ); +/** + * @brief Access control check for retrieving file attributes + * @param active_cred Subject credential + * @param file_cred Credential associated with the struct fileproc + * @param vp Object vnode + * @param vlabel Policy label for vp + * @param va Vnode attributes to retrieve + * + * Determine whether the subject identified by the credential can + * get information about the passed vnode. The active_cred hold + * the credentials of the subject performing the operation, and + * file_cred holds the credentials of the subject that originally + * opened the file. This check happens during stat(), lstat(), + * fstat(), and getattrlist() syscalls. See for + * definitions of the attributes. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + * + * @note Policies may change the contents of va to alter the list of + * file attributes returned. + */ typedef int mpo_vnode_check_getattr_t( kauth_cred_t active_cred, kauth_cred_t file_cred, /* NULLOK */ struct vnode *vp, struct label *vlabel, struct vnode_attr *va -); + ); /** - @brief Access control check for retrieving file attributes - @param cred Subject credential - @param vp Object vnode - @param vlabel Policy label for vp - @param alist List of attributes to retrieve - - Determine whether the subject identified by the credential can read - various attributes of the specified vnode, or the filesystem or volume on - which that vnode resides. See for definitions of the - attributes. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. Access control covers all attributes requested - with this call; the security policy is not permitted to change the set of - attributes requested. -*/ + * @brief Access control check for retrieving file attributes + * @param cred Subject credential + * @param vp Object vnode + * @param vlabel Policy label for vp + * @param alist List of attributes to retrieve + * + * Determine whether the subject identified by the credential can read + * various attributes of the specified vnode, or the filesystem or volume on + * which that vnode resides. See for definitions of the + * attributes. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. Access control covers all attributes requested + * with this call; the security policy is not permitted to change the set of + * attributes requested. + */ typedef int mpo_vnode_check_getattrlist_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel, struct attrlist *alist -); -/** - @brief Access control check for retrieving an extended attribute - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param name Extended attribute name - @param uio I/O structure pointer - - Determine whether the subject identified by the credential can retrieve - the extended attribute from the passed vnode. The uio parameter - will be NULL when the getxattr(2) call has been made with a NULL data - value; this is done to request the size of the data only. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for retrieving an extended attribute + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param name Extended attribute name + * @param uio I/O structure pointer + * + * Determine whether the subject identified by the credential can retrieve + * the extended attribute from the passed vnode. The uio parameter + * will be NULL when the getxattr(2) call has been made with a NULL data + * value; this is done to request the size of the data only. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_getextattr_t( kauth_cred_t cred, struct vnode *vp, - struct label *label, /* NULLOK */ + struct label *label, /* NULLOK */ const char *name, - struct uio *uio /* NULLOK */ -); -/** - @brief Access control check for ioctl - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param cmd Device-dependent request code; see ioctl(2) - - Determine whether the subject identified by the credential can perform - the ioctl operation indicated by com. - - @warning Since ioctl data is opaque from the standpoint of the MAC - framework, and since ioctls can affect many aspects of system - operation, policies must exercise extreme care when implementing - access control checks. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + struct uio *uio /* NULLOK */ + ); +/** + * @brief Access control check for ioctl + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param cmd Device-dependent request code; see ioctl(2) + * + * Determine whether the subject identified by the credential can perform + * the ioctl operation indicated by com. + * + * @warning Since ioctl data is opaque from the standpoint of the MAC + * framework, and since ioctls can affect many aspects of system + * operation, policies must exercise extreme care when implementing + * access control checks. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_ioctl_t( kauth_cred_t cred, struct vnode *vp, struct label *label, unsigned int cmd -); + ); /** - @brief Access control check for vnode kqfilter - @param active_cred Subject credential - @param kn Object knote - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can - receive the knote on the passed vnode. - - @return Return 0 if access if granted, otherwise an appropriate - value for errno should be returned. -*/ + * @brief Access control check for vnode kqfilter + * @param active_cred Subject credential + * @param kn Object knote + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can + * receive the knote on the passed vnode. + * + * @return Return 0 if access if granted, otherwise an appropriate + * value for errno should be returned. + */ typedef int mpo_vnode_check_kqfilter_t( kauth_cred_t active_cred, - kauth_cred_t file_cred, /* NULLOK */ + kauth_cred_t file_cred, /* NULLOK */ struct knote *kn, struct vnode *vp, struct label *label -); -/** - @brief Access control check for relabel - @param cred Subject credential - @param vp Object vnode - @param vnodelabel Existing policy label for vp - @param newlabel Policy label update to later be applied to vp - @see mpo_relable_vnode_t - - Determine whether the subject identified by the credential can relabel - the passed vnode to the passed label update. If all policies permit - the label change, the actual relabel entry point (mpo_vnode_label_update) - will follow. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for relabel + * @param cred Subject credential + * @param vp Object vnode + * @param vnodelabel Existing policy label for vp + * @param newlabel Policy label update to later be applied to vp + * @see mpo_relable_vnode_t + * + * Determine whether the subject identified by the credential can relabel + * the passed vnode to the passed label update. If all policies permit + * the label change, the actual relabel entry point (mpo_vnode_label_update) + * will follow. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_label_update_t( struct ucred *cred, struct vnode *vp, struct label *vnodelabel, struct label *newlabel -); -/** - @brief Access control check for creating link - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label associated with dvp - @param vp Link destination vnode - @param label Policy label associated with vp - @param cnp Component name for the link being created - - Determine whether the subject identified by the credential should be - allowed to create a link to the vnode vp with the name specified by cnp. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for creating link + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label associated with dvp + * @param vp Link destination vnode + * @param label Policy label associated with vp + * @param cnp Component name for the link being created + * + * Determine whether the subject identified by the credential should be + * allowed to create a link to the vnode vp with the name specified by cnp. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_link_t( kauth_cred_t cred, struct vnode *dvp, @@ -4943,171 +4943,171 @@ typedef int mpo_vnode_check_link_t( struct vnode *vp, struct label *label, struct componentname *cnp -); + ); /** - @brief Access control check for listing extended attributes - @param cred Subject credential - @param vp Object vnode - @param vlabel Policy label associated with vp - - Determine whether the subject identified by the credential can retrieve - a list of named extended attributes from a vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for listing extended attributes + * @param cred Subject credential + * @param vp Object vnode + * @param vlabel Policy label associated with vp + * + * Determine whether the subject identified by the credential can retrieve + * a list of named extended attributes from a vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_listextattr_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel -); -/** - @brief Access control check for lookup - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label for dvp - @param path Path being looked up - @param pathlen Length of path in bytes - - Determine whether the subject identified by the credential can perform - a lookup of the passed path relative to the passed directory vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. - - @note The path may contain untrusted input. If approved, lookup proceeds - on the path; if a component is found to be a symlink then this hook is - called again with the updated path. -*/ + ); +/** + * @brief Access control check for lookup + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label for dvp + * @param path Path being looked up + * @param pathlen Length of path in bytes + * + * Determine whether the subject identified by the credential can perform + * a lookup of the passed path relative to the passed directory vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + * + * @note The path may contain untrusted input. If approved, lookup proceeds + * on the path; if a component is found to be a symlink then this hook is + * called again with the updated path. + */ typedef int mpo_vnode_check_lookup_preflight_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, const char *path, size_t pathlen -); + ); /** - @brief Access control check for lookup - @param cred Subject credential - @param dvp Object vnode - @param dlabel Policy label for dvp - @param cnp Component name being looked up - - Determine whether the subject identified by the credential can perform - a lookup in the passed directory vnode for the passed name (cnp). - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for lookup + * @param cred Subject credential + * @param dvp Object vnode + * @param dlabel Policy label for dvp + * @param cnp Component name being looked up + * + * Determine whether the subject identified by the credential can perform + * a lookup in the passed directory vnode for the passed name (cnp). + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_lookup_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, struct componentname *cnp -); + ); /** - @brief Access control check for open - @param cred Subject credential - @param vp Object vnode - @param label Policy label associated with vp - @param acc_mode open(2) access mode - - Determine whether the subject identified by the credential can perform - an open operation on the passed vnode with the passed access mode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for open + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label associated with vp + * @param acc_mode open(2) access mode + * + * Determine whether the subject identified by the credential can perform + * an open operation on the passed vnode with the passed access mode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_open_t( kauth_cred_t cred, struct vnode *vp, struct label *label, int acc_mode -); + ); /** - @brief Access control check for read - @param active_cred Subject credential - @param file_cred Credential associated with the struct fileproc - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can perform - a read operation on the passed vnode. The active_cred hold the credentials - of the subject performing the operation, and file_cred holds the - credentials of the subject that originally opened the file. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for read + * @param active_cred Subject credential + * @param file_cred Credential associated with the struct fileproc + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can perform + * a read operation on the passed vnode. The active_cred hold the credentials + * of the subject performing the operation, and file_cred holds the + * credentials of the subject that originally opened the file. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_read_t( - kauth_cred_t active_cred, /* SUBJECT */ - kauth_cred_t file_cred, /* NULLOK */ - struct vnode *vp, /* OBJECT */ - struct label *label /* LABEL */ -); -/** - @brief Access control check for read directory - @param cred Subject credential - @param dvp Object directory vnode - @param dlabel Policy label for dvp - - Determine whether the subject identified by the credential can - perform a readdir operation on the passed directory vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + kauth_cred_t active_cred, /* SUBJECT */ + kauth_cred_t file_cred, /* NULLOK */ + struct vnode *vp, /* OBJECT */ + struct label *label /* LABEL */ + ); +/** + * @brief Access control check for read directory + * @param cred Subject credential + * @param dvp Object directory vnode + * @param dlabel Policy label for dvp + * + * Determine whether the subject identified by the credential can + * perform a readdir operation on the passed directory vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_readdir_t( - kauth_cred_t cred, /* SUBJECT */ - struct vnode *dvp, /* OBJECT */ - struct label *dlabel /* LABEL */ -); -/** - @brief Access control check for read link - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can perform - a readlink operation on the passed symlink vnode. This call can be made - in a number of situations, including an explicit readlink call by the - user process, or as a result of an implicit readlink during a name - lookup by the process. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + kauth_cred_t cred, /* SUBJECT */ + struct vnode *dvp, /* OBJECT */ + struct label *dlabel /* LABEL */ + ); +/** + * @brief Access control check for read link + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can perform + * a readlink operation on the passed symlink vnode. This call can be made + * in a number of situations, including an explicit readlink call by the + * user process, or as a result of an implicit readlink during a name + * lookup by the process. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_readlink_t( kauth_cred_t cred, struct vnode *vp, struct label *label -); -/** - @brief Access control check for rename - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label associated with dvp - @param vp vnode to be renamed - @param label Policy label associated with vp - @param cnp Component name for vp - @param tdvp Destination directory vnode - @param tdlabel Policy label associated with tdvp - @param tvp Overwritten vnode - @param tlabel Policy label associated with tvp - @param tcnp Destination component name - - Determine whether the subject identified by the credential should be allowed - to rename the vnode vp to something else. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for rename + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label associated with dvp + * @param vp vnode to be renamed + * @param label Policy label associated with vp + * @param cnp Component name for vp + * @param tdvp Destination directory vnode + * @param tdlabel Policy label associated with tdvp + * @param tvp Overwritten vnode + * @param tlabel Policy label associated with tvp + * @param tcnp Destination component name + * + * Determine whether the subject identified by the credential should be allowed + * to rename the vnode vp to something else. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_rename_t( kauth_cred_t cred, struct vnode *dvp, @@ -5120,30 +5120,30 @@ typedef int mpo_vnode_check_rename_t( struct vnode *tvp, struct label *tlabel, struct componentname *tcnp -); -/** - @brief Access control check for rename from - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label associated with dvp - @param vp vnode to be renamed - @param label Policy label associated with vp - @param cnp Component name for vp - @see mpo_vnode_check_rename_t - @see mpo_vnode_check_rename_to_t - - Determine whether the subject identified by the credential should be - allowed to rename the vnode vp to something else. - - Due to VFS locking constraints (to make sure proper vnode locks are - held during this entry point), the vnode relabel checks had to be - split into two parts: relabel_from and relabel to. - - This hook is deprecated, mpo_vnode_check_rename_t should be used instead. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for rename from + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label associated with dvp + * @param vp vnode to be renamed + * @param label Policy label associated with vp + * @param cnp Component name for vp + * @see mpo_vnode_check_rename_t + * @see mpo_vnode_check_rename_to_t + * + * Determine whether the subject identified by the credential should be + * allowed to rename the vnode vp to something else. + * + * Due to VFS locking constraints (to make sure proper vnode locks are + * held during this entry point), the vnode relabel checks had to be + * split into two parts: relabel_from and relabel to. + * + * This hook is deprecated, mpo_vnode_check_rename_t should be used instead. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_rename_from_t( kauth_cred_t cred, struct vnode *dvp, @@ -5151,268 +5151,268 @@ typedef int mpo_vnode_check_rename_from_t( struct vnode *vp, struct label *label, struct componentname *cnp -); -/** - @brief Access control check for rename to - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label associated with dvp - @param vp Overwritten vnode - @param label Policy label associated with vp - @param samedir Boolean; 1 if the source and destination directories are the same - @param cnp Destination component name - @see mpo_vnode_check_rename_t - @see mpo_vnode_check_rename_from_t - - Determine whether the subject identified by the credential should be - allowed to rename to the vnode vp, into the directory dvp, or to the - name represented by cnp. If there is no existing file to overwrite, - vp and label will be NULL. - - Due to VFS locking constraints (to make sure proper vnode locks are - held during this entry point), the vnode relabel checks had to be - split into two parts: relabel_from and relabel to. - - This hook is deprecated, mpo_vnode_check_rename_t should be used instead. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + ); +/** + * @brief Access control check for rename to + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label associated with dvp + * @param vp Overwritten vnode + * @param label Policy label associated with vp + * @param samedir Boolean; 1 if the source and destination directories are the same + * @param cnp Destination component name + * @see mpo_vnode_check_rename_t + * @see mpo_vnode_check_rename_from_t + * + * Determine whether the subject identified by the credential should be + * allowed to rename to the vnode vp, into the directory dvp, or to the + * name represented by cnp. If there is no existing file to overwrite, + * vp and label will be NULL. + * + * Due to VFS locking constraints (to make sure proper vnode locks are + * held during this entry point), the vnode relabel checks had to be + * split into two parts: relabel_from and relabel to. + * + * This hook is deprecated, mpo_vnode_check_rename_t should be used instead. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_rename_to_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, - struct vnode *vp, /* NULLOK */ - struct label *label, /* NULLOK */ + struct vnode *vp, /* NULLOK */ + struct label *label, /* NULLOK */ int samedir, struct componentname *cnp -); + ); /** - @brief Access control check for revoke - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can revoke - access to the passed vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for revoke + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can revoke + * access to the passed vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_revoke_t( kauth_cred_t cred, struct vnode *vp, struct label *label -); + ); /** - @brief Access control check for searchfs - @param cred Subject credential - @param vp Object vnode - @param vlabel Policy label for vp - @param alist List of attributes used as search criteria - - Determine whether the subject identified by the credential can search the - vnode using the searchfs system call. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for searchfs + * @param cred Subject credential + * @param vp Object vnode + * @param vlabel Policy label for vp + * @param alist List of attributes used as search criteria + * + * Determine whether the subject identified by the credential can search the + * vnode using the searchfs system call. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_searchfs_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel, struct attrlist *alist -); + ); /** - @brief Access control check for select - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param which The operation selected on: FREAD or FWRITE - - Determine whether the subject identified by the credential can select - the vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. -*/ + * @brief Access control check for select + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param which The operation selected on: FREAD or FWRITE + * + * Determine whether the subject identified by the credential can select + * the vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. + */ typedef int mpo_vnode_check_select_t( kauth_cred_t cred, struct vnode *vp, struct label *label, int which -); + ); /** - @brief Access control check for setting ACL - @param cred Subject credential - @param vp Object node - @param label Policy label for vp - @param acl ACL structure pointer - - Determine whether the subject identified by the credential can set an ACL - on the specified vnode. The ACL pointer will be NULL when removing an ACL. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for setting ACL + * @param cred Subject credential + * @param vp Object node + * @param label Policy label for vp + * @param acl ACL structure pointer + * + * Determine whether the subject identified by the credential can set an ACL + * on the specified vnode. The ACL pointer will be NULL when removing an ACL. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_setacl_t( kauth_cred_t cred, struct vnode *vp, struct label *label, struct kauth_acl *acl -); + ); /** - @brief Access control check for setting file attributes - @param cred Subject credential - @param vp Object vnode - @param vlabel Policy label for vp - @param alist List of attributes to set - - Determine whether the subject identified by the credential can set - various attributes of the specified vnode, or the filesystem or volume on - which that vnode resides. See for definitions of the - attributes. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. Access control covers all attributes requested - with this call. -*/ + * @brief Access control check for setting file attributes + * @param cred Subject credential + * @param vp Object vnode + * @param vlabel Policy label for vp + * @param alist List of attributes to set + * + * Determine whether the subject identified by the credential can set + * various attributes of the specified vnode, or the filesystem or volume on + * which that vnode resides. See for definitions of the + * attributes. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. Access control covers all attributes requested + * with this call. + */ typedef int mpo_vnode_check_setattrlist_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel, struct attrlist *alist -); -/** - @brief Access control check for setting extended attribute - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param name Extended attribute name - @param uio I/O structure pointer - - Determine whether the subject identified by the credential can set the - extended attribute of passed name and passed namespace on the passed - vnode. Policies implementing security labels backed into extended - attributes may want to provide additional protections for those - attributes. Additionally, policies should avoid making decisions based - on the data referenced from uio, as there is a potential race condition - between this check and the actual operation. The uio may also be NULL - if a delete operation is being performed. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for setting extended attribute + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param name Extended attribute name + * @param uio I/O structure pointer + * + * Determine whether the subject identified by the credential can set the + * extended attribute of passed name and passed namespace on the passed + * vnode. Policies implementing security labels backed into extended + * attributes may want to provide additional protections for those + * attributes. Additionally, policies should avoid making decisions based + * on the data referenced from uio, as there is a potential race condition + * between this check and the actual operation. The uio may also be NULL + * if a delete operation is being performed. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_setextattr_t( kauth_cred_t cred, struct vnode *vp, struct label *label, const char *name, struct uio *uio -); + ); /** - @brief Access control check for setting flags - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param flags File flags; see chflags(2) - - Determine whether the subject identified by the credential can set - the passed flags on the passed vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for setting flags + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param flags File flags; see chflags(2) + * + * Determine whether the subject identified by the credential can set + * the passed flags on the passed vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_setflags_t( kauth_cred_t cred, struct vnode *vp, struct label *label, u_long flags -); + ); /** - @brief Access control check for setting mode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param mode File mode; see chmod(2) - - Determine whether the subject identified by the credential can set - the passed mode on the passed vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for setting mode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param mode File mode; see chmod(2) + * + * Determine whether the subject identified by the credential can set + * the passed mode on the passed vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_setmode_t( kauth_cred_t cred, struct vnode *vp, struct label *label, mode_t mode -); -/** - @brief Access control check for setting uid and gid - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param uid User ID - @param gid Group ID - - Determine whether the subject identified by the credential can set - the passed uid and passed gid as file uid and file gid on the passed - vnode. The IDs may be set to (-1) to request no update. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for setting uid and gid + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param uid User ID + * @param gid Group ID + * + * Determine whether the subject identified by the credential can set + * the passed uid and passed gid as file uid and file gid on the passed + * vnode. The IDs may be set to (-1) to request no update. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_setowner_t( kauth_cred_t cred, struct vnode *vp, struct label *label, uid_t uid, gid_t gid -); -/** - @brief Access control check for setting timestamps - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param atime Access time; see utimes(2) - @param mtime Modification time; see utimes(2) - - Determine whether the subject identified by the credential can set - the passed access timestamps on the passed vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for setting timestamps + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param atime Access time; see utimes(2) + * @param mtime Modification time; see utimes(2) + * + * Determine whether the subject identified by the credential can set + * the passed access timestamps on the passed vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_setutimes_t( kauth_cred_t cred, struct vnode *vp, struct label *label, struct timespec atime, struct timespec mtime -); -/** - @brief Access control check after determining the code directory hash - @param vp vnode vnode to combine into proc - @param label label associated with the vnode - @param cpu_type cpu type of the signature being checked - @param cs_blob the code signature to check - @param cs_flags update code signing flags if needed - @param signer_type output parameter for the code signature's signer type - @param flags operational flag to mpo_vnode_check_signature - @param fatal_failure_desc description of fatal failure - @param fatal_failure_desc_len failure description len, failure is fatal if non-0 - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. + ); +/** + * @brief Access control check after determining the code directory hash + * @param vp vnode vnode to combine into proc + * @param label label associated with the vnode + * @param cpu_type cpu type of the signature being checked + * @param cs_blob the code signature to check + * @param cs_flags update code signing flags if needed + * @param signer_type output parameter for the code signature's signer type + * @param flags operational flag to mpo_vnode_check_signature + * @param fatal_failure_desc description of fatal failure + * @param fatal_failure_desc_len failure description len, failure is fatal if non-0 + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. */ typedef int mpo_vnode_check_signature_t( struct vnode *vp, @@ -5423,138 +5423,138 @@ typedef int mpo_vnode_check_signature_t( unsigned int *signer_type, int flags, char **fatal_failure_desc, size_t *fatal_failure_desc_len -); + ); /** - @brief Access control check for stat - @param active_cred Subject credential - @param file_cred Credential associated with the struct fileproc - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can stat - the passed vnode. See stat(2) for more information. The active_cred - hold the credentials of the subject performing the operation, and - file_cred holds the credentials of the subject that originally - opened the file. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for stat + * @param active_cred Subject credential + * @param file_cred Credential associated with the struct fileproc + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can stat + * the passed vnode. See stat(2) for more information. The active_cred + * hold the credentials of the subject performing the operation, and + * file_cred holds the credentials of the subject that originally + * opened the file. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_stat_t( struct ucred *active_cred, - struct ucred *file_cred, /* NULLOK */ + struct ucred *file_cred, /* NULLOK */ struct vnode *vp, struct label *label -); + ); /** - @brief Access control check for vnode trigger resolution - @param cred Subject credential - @param dvp Object vnode - @param dlabel Policy label for dvp - @param cnp Component name that triggered resolution - - Determine whether the subject identified by the credential can trigger - resolution of the passed name (cnp) in the passed directory vnode - via an external trigger resolver. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for vnode trigger resolution + * @param cred Subject credential + * @param dvp Object vnode + * @param dlabel Policy label for dvp + * @param cnp Component name that triggered resolution + * + * Determine whether the subject identified by the credential can trigger + * resolution of the passed name (cnp) in the passed directory vnode + * via an external trigger resolver. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_trigger_resolve_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, struct componentname *cnp -); -/** - @brief Access control check for truncate/ftruncate - @param active_cred Subject credential - @param file_cred Credential associated with the struct fileproc - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can - perform a truncate operation on the passed vnode. The active_cred hold - the credentials of the subject performing the operation, and - file_cred holds the credentials of the subject that originally - opened the file. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for truncate/ftruncate + * @param active_cred Subject credential + * @param file_cred Credential associated with the struct fileproc + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can + * perform a truncate operation on the passed vnode. The active_cred hold + * the credentials of the subject performing the operation, and + * file_cred holds the credentials of the subject that originally + * opened the file. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_truncate_t( kauth_cred_t active_cred, - kauth_cred_t file_cred, /* NULLOK */ + kauth_cred_t file_cred, /* NULLOK */ struct vnode *vp, struct label *label -); -/** - @brief Access control check for binding UNIX domain socket - @param cred Subject credential - @param dvp Directory vnode - @param dlabel Policy label for dvp - @param cnp Component name for dvp - @param vap vnode attributes for vap - - Determine whether the subject identified by the credential can perform a - bind operation on a UNIX domain socket with the passed parent directory, - passed name information, and passed attribute information. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for binding UNIX domain socket + * @param cred Subject credential + * @param dvp Directory vnode + * @param dlabel Policy label for dvp + * @param cnp Component name for dvp + * @param vap vnode attributes for vap + * + * Determine whether the subject identified by the credential can perform a + * bind operation on a UNIX domain socket with the passed parent directory, + * passed name information, and passed attribute information. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_uipc_bind_t( kauth_cred_t cred, struct vnode *dvp, struct label *dlabel, struct componentname *cnp, struct vnode_attr *vap -); + ); /** - @brief Access control check for connecting UNIX domain socket - @param cred Subject credential - @param vp Object vnode - @param label Policy label associated with vp - @param so Socket - - Determine whether the subject identified by the credential can perform a - connect operation on the passed UNIX domain socket vnode. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for connecting UNIX domain socket + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label associated with vp + * @param so Socket + * + * Determine whether the subject identified by the credential can perform a + * connect operation on the passed UNIX domain socket vnode. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_uipc_connect_t( kauth_cred_t cred, struct vnode *vp, struct label *label, socket_t so -); -/** - @brief Access control check for deleting vnode - @param cred Subject credential - @param dvp Parent directory vnode - @param dlabel Policy label for dvp - @param vp Object vnode to delete - @param label Policy label for vp - @param cnp Component name for vp - @see mpo_check_rename_to_t - - Determine whether the subject identified by the credential can delete - a vnode from the passed parent directory and passed name information. - This call may be made in a number of situations, including as a - results of calls to unlink(2) and rmdir(2). Policies implementing - this entry point should also implement mpo_check_rename_to to - authorize deletion of objects as a result of being the target of a rename. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + ); +/** + * @brief Access control check for deleting vnode + * @param cred Subject credential + * @param dvp Parent directory vnode + * @param dlabel Policy label for dvp + * @param vp Object vnode to delete + * @param label Policy label for vp + * @param cnp Component name for vp + * @see mpo_check_rename_to_t + * + * Determine whether the subject identified by the credential can delete + * a vnode from the passed parent directory and passed name information. + * This call may be made in a number of situations, including as a + * results of calls to unlink(2) and rmdir(2). Policies implementing + * this entry point should also implement mpo_check_rename_to to + * authorize deletion of objects as a result of being the target of a rename. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_unlink_t( kauth_cred_t cred, struct vnode *dvp, @@ -5562,43 +5562,43 @@ typedef int mpo_vnode_check_unlink_t( struct vnode *vp, struct label *label, struct componentname *cnp -); + ); /** - @brief Access control check for write - @param active_cred Subject credential - @param file_cred Credential associated with the struct fileproc - @param vp Object vnode - @param label Policy label for vp - - Determine whether the subject identified by the credential can - perform a write operation on the passed vnode. The active_cred hold - the credentials of the subject performing the operation, and - file_cred holds the credentials of the subject that originally - opened the file. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EACCES for label mismatch or - EPERM for lack of privilege. -*/ + * @brief Access control check for write + * @param active_cred Subject credential + * @param file_cred Credential associated with the struct fileproc + * @param vp Object vnode + * @param label Policy label for vp + * + * Determine whether the subject identified by the credential can + * perform a write operation on the passed vnode. The active_cred hold + * the credentials of the subject performing the operation, and + * file_cred holds the credentials of the subject that originally + * opened the file. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EACCES for label mismatch or + * EPERM for lack of privilege. + */ typedef int mpo_vnode_check_write_t( kauth_cred_t active_cred, - kauth_cred_t file_cred, /* NULLOK */ + kauth_cred_t file_cred, /* NULLOK */ struct vnode *vp, struct label *label -); -/** - @brief Associate a vnode with a devfs entry - @param mp Devfs mount point - @param mntlabel Devfs mount point label - @param de Devfs directory entry - @param delabel Label associated with de - @param vp vnode associated with de - @param vlabel Label associated with vp - - Fill in the label (vlabel) for a newly created devfs vnode. The - label is typically derived from the label on the devfs directory - entry or the label on the filesystem, supplied as parameters. -*/ + ); +/** + * @brief Associate a vnode with a devfs entry + * @param mp Devfs mount point + * @param mntlabel Devfs mount point label + * @param de Devfs directory entry + * @param delabel Label associated with de + * @param vp vnode associated with de + * @param vlabel Label associated with vp + * + * Fill in the label (vlabel) for a newly created devfs vnode. The + * label is typically derived from the label on the devfs directory + * entry or the label on the filesystem, supplied as parameters. + */ typedef void mpo_vnode_label_associate_devfs_t( struct mount *mp, struct label *mntlabel, @@ -5606,50 +5606,50 @@ typedef void mpo_vnode_label_associate_devfs_t( struct label *delabel, struct vnode *vp, struct label *vlabel -); + ); /** - @brief Associate a label with a vnode - @param mp File system mount point - @param mntlabel File system mount point label - @param vp Vnode to label - @param vlabel Label associated with vp - - Attempt to retrieve label information for the vnode, vp, from the - file system extended attribute store. The label should be stored in - the supplied vlabel parameter. If a policy cannot retrieve an - extended attribute, sometimes it is acceptible to fallback to using - the mntlabel. - - If the policy requires vnodes to have a valid label elsewhere it - MUST NOT return other than temporary errors, and must always provide - a valid label of some sort. Returning an error will cause vnode - labeling to be retried at a later access. Failure to handle policy - centric errors internally (corrupt labels etc.) will result in - inaccessible files. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. -*/ + * @brief Associate a label with a vnode + * @param mp File system mount point + * @param mntlabel File system mount point label + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * Attempt to retrieve label information for the vnode, vp, from the + * file system extended attribute store. The label should be stored in + * the supplied vlabel parameter. If a policy cannot retrieve an + * extended attribute, sometimes it is acceptible to fallback to using + * the mntlabel. + * + * If the policy requires vnodes to have a valid label elsewhere it + * MUST NOT return other than temporary errors, and must always provide + * a valid label of some sort. Returning an error will cause vnode + * labeling to be retried at a later access. Failure to handle policy + * centric errors internally (corrupt labels etc.) will result in + * inaccessible files. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + */ typedef int mpo_vnode_label_associate_extattr_t( struct mount *mp, struct label *mntlabel, struct vnode *vp, struct label *vlabel -); -/** - @brief Associate a file label with a vnode - @param cred User credential - @param mp Fdesc mount point - @param mntlabel Fdesc mount point label - @param fg Fileglob structure - @param label Policy label for fg - @param vp Vnode to label - @param vlabel Label associated with vp - - Associate label information for the vnode, vp, with the label of - the open file descriptor described by fg. - The label should be stored in the supplied vlabel parameter. -*/ + ); +/** + * @brief Associate a file label with a vnode + * @param cred User credential + * @param mp Fdesc mount point + * @param mntlabel Fdesc mount point label + * @param fg Fileglob structure + * @param label Policy label for fg + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * Associate label information for the vnode, vp, with the label of + * the open file descriptor described by fg. + * The label should be stored in the supplied vlabel parameter. + */ typedef void mpo_vnode_label_associate_file_t( struct ucred *cred, struct mount *mp, @@ -5658,327 +5658,327 @@ typedef void mpo_vnode_label_associate_file_t( struct label *label, struct vnode *vp, struct label *vlabel -); -/** - @brief Associate a pipe label with a vnode - @param cred User credential for the process that opened the pipe - @param cpipe Pipe structure - @param pipelabel Label associated with pipe - @param vp Vnode to label - @param vlabel Label associated with vp - - Associate label information for the vnode, vp, with the label of - the pipe described by the pipe structure cpipe. - The label should be stored in the supplied vlabel parameter. -*/ + ); +/** + * @brief Associate a pipe label with a vnode + * @param cred User credential for the process that opened the pipe + * @param cpipe Pipe structure + * @param pipelabel Label associated with pipe + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * Associate label information for the vnode, vp, with the label of + * the pipe described by the pipe structure cpipe. + * The label should be stored in the supplied vlabel parameter. + */ typedef void mpo_vnode_label_associate_pipe_t( struct ucred *cred, struct pipe *cpipe, struct label *pipelabel, struct vnode *vp, struct label *vlabel -); -/** - @brief Associate a POSIX semaphore label with a vnode - @param cred User credential for the process that create psem - @param psem POSIX semaphore structure - @param psemlabel Label associated with psem - @param vp Vnode to label - @param vlabel Label associated with vp - - Associate label information for the vnode, vp, with the label of - the POSIX semaphore described by psem. - The label should be stored in the supplied vlabel parameter. -*/ + ); +/** + * @brief Associate a POSIX semaphore label with a vnode + * @param cred User credential for the process that create psem + * @param psem POSIX semaphore structure + * @param psemlabel Label associated with psem + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * Associate label information for the vnode, vp, with the label of + * the POSIX semaphore described by psem. + * The label should be stored in the supplied vlabel parameter. + */ typedef void mpo_vnode_label_associate_posixsem_t( struct ucred *cred, struct pseminfo *psem, struct label *psemlabel, struct vnode *vp, struct label *vlabel -); -/** - @brief Associate a POSIX shared memory label with a vnode - @param cred User credential for the process that created pshm - @param pshm POSIX shared memory structure - @param pshmlabel Label associated with pshm - @param vp Vnode to label - @param vlabel Label associated with vp - - Associate label information for the vnode, vp, with the label of - the POSIX shared memory region described by pshm. - The label should be stored in the supplied vlabel parameter. -*/ + ); +/** + * @brief Associate a POSIX shared memory label with a vnode + * @param cred User credential for the process that created pshm + * @param pshm POSIX shared memory structure + * @param pshmlabel Label associated with pshm + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * Associate label information for the vnode, vp, with the label of + * the POSIX shared memory region described by pshm. + * The label should be stored in the supplied vlabel parameter. + */ typedef void mpo_vnode_label_associate_posixshm_t( struct ucred *cred, struct pshminfo *pshm, struct label *pshmlabel, struct vnode *vp, struct label *vlabel -); + ); /** - @brief Associate a label with a vnode - @param mp File system mount point - @param mntlabel File system mount point label - @param vp Vnode to label - @param vlabel Label associated with vp - - On non-multilabel file systems, set the label for a vnode. The - label will most likely be based on the file system label. -*/ + * @brief Associate a label with a vnode + * @param mp File system mount point + * @param mntlabel File system mount point label + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * On non-multilabel file systems, set the label for a vnode. The + * label will most likely be based on the file system label. + */ typedef void mpo_vnode_label_associate_singlelabel_t( struct mount *mp, struct label *mntlabel, struct vnode *vp, struct label *vlabel -); -/** - @brief Associate a socket label with a vnode - @param cred User credential for the process that opened the socket - @param so Socket structure - @param solabel Label associated with so - @param vp Vnode to label - @param vlabel Label associated with vp - - Associate label information for the vnode, vp, with the label of - the open socket described by the socket structure so. - The label should be stored in the supplied vlabel parameter. -*/ + ); +/** + * @brief Associate a socket label with a vnode + * @param cred User credential for the process that opened the socket + * @param so Socket structure + * @param solabel Label associated with so + * @param vp Vnode to label + * @param vlabel Label associated with vp + * + * Associate label information for the vnode, vp, with the label of + * the open socket described by the socket structure so. + * The label should be stored in the supplied vlabel parameter. + */ typedef void mpo_vnode_label_associate_socket_t( kauth_cred_t cred, socket_t so, struct label *solabel, struct vnode *vp, struct label *vlabel -); + ); /** - @brief Copy a vnode label - @param src Source vnode label - @param dest Destination vnode label - - Copy the vnode label information from src to dest. On Darwin, this - is currently only necessary when executing interpreted scripts, but - will later be used if vnode label externalization cannot be an - atomic operation. -*/ + * @brief Copy a vnode label + * @param src Source vnode label + * @param dest Destination vnode label + * + * Copy the vnode label information from src to dest. On Darwin, this + * is currently only necessary when executing interpreted scripts, but + * will later be used if vnode label externalization cannot be an + * atomic operation. + */ typedef void mpo_vnode_label_copy_t( struct label *src, struct label *dest -); + ); /** - @brief Destroy vnode label - @param label The label to be destroyed - - Destroy a vnode label. Since the object is going out of scope, - policy modules should free any internal storage associated with the - label so that it may be destroyed. -*/ + * @brief Destroy vnode label + * @param label The label to be destroyed + * + * Destroy a vnode label. Since the object is going out of scope, + * policy modules should free any internal storage associated with the + * label so that it may be destroyed. + */ typedef void mpo_vnode_label_destroy_t( struct label *label -); + ); /** - @brief Externalize a vnode label for auditing - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the label on a vnode suitable for - inclusion in an audit record. An externalized label consists of a text - representation of the label contents that will be added to the audit record - as part of a text token. Policy-agnostic user space tools will display - this externalized version. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize a vnode label for auditing + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the label on a vnode suitable for + * inclusion in an audit record. An externalized label consists of a text + * representation of the label contents that will be added to the audit record + * as part of a text token. Policy-agnostic user space tools will display + * this externalized version. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_vnode_label_externalize_audit_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Externalize a vnode label - @param label Label to be externalized - @param element_name Name of the label namespace for which labels should be - externalized - @param sb String buffer to be filled with a text representation of the label - - Produce an external representation of the label on a vnode. An - externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will display this externalized version. - - @return 0 on success, return non-zero if an error occurs while - externalizing the label data. - -*/ + * @brief Externalize a vnode label + * @param label Label to be externalized + * @param element_name Name of the label namespace for which labels should be + * externalized + * @param sb String buffer to be filled with a text representation of the label + * + * Produce an external representation of the label on a vnode. An + * externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will display this externalized version. + * + * @return 0 on success, return non-zero if an error occurs while + * externalizing the label data. + * + */ typedef int mpo_vnode_label_externalize_t( struct label *label, char *element_name, struct sbuf *sb -); + ); /** - @brief Initialize vnode label - @param label New label to initialize - - Initialize label storage for use with a newly instantiated vnode, or - for temporary storage associated with the copying in or out of a - vnode label. While it is necessary to allocate space for a - kernel-resident vnode label, it is not yet necessary to link this vnode - with persistent label storage facilities, such as extended attributes. - Sleeping is permitted. -*/ + * @brief Initialize vnode label + * @param label New label to initialize + * + * Initialize label storage for use with a newly instantiated vnode, or + * for temporary storage associated with the copying in or out of a + * vnode label. While it is necessary to allocate space for a + * kernel-resident vnode label, it is not yet necessary to link this vnode + * with persistent label storage facilities, such as extended attributes. + * Sleeping is permitted. + */ typedef void mpo_vnode_label_init_t( struct label *label -); + ); /** - @brief Internalize a vnode label - @param label Label to be internalized - @param element_name Name of the label namespace for which the label should - be internalized - @param element_data Text data to be internalized - - Produce a vnode label from an external representation. An - externalized label consists of a text representation of the label - contents that can be used with user applications. Policy-agnostic - user space tools will forward text version to the kernel for - processing by individual policy modules. - - The policy's internalize entry points will be called only if the - policy has registered interest in the label namespace. - - @return 0 on success, Otherwise, return non-zero if an error occurs - while internalizing the label data. -*/ + * @brief Internalize a vnode label + * @param label Label to be internalized + * @param element_name Name of the label namespace for which the label should + * be internalized + * @param element_data Text data to be internalized + * + * Produce a vnode label from an external representation. An + * externalized label consists of a text representation of the label + * contents that can be used with user applications. Policy-agnostic + * user space tools will forward text version to the kernel for + * processing by individual policy modules. + * + * The policy's internalize entry points will be called only if the + * policy has registered interest in the label namespace. + * + * @return 0 on success, Otherwise, return non-zero if an error occurs + * while internalizing the label data. + */ typedef int mpo_vnode_label_internalize_t( struct label *label, char *element_name, char *element_data -); + ); /** - @brief Clean up a vnode label - @param label The label to be cleaned for re-use - - Clean up a vnode label. Darwin (Tiger, 8.x) allocates vnodes on demand, but - typically never frees them. Before vnodes are placed back on free lists for - re-use, policies can cleanup or overwrite any information present in the label. -*/ + * @brief Clean up a vnode label + * @param label The label to be cleaned for re-use + * + * Clean up a vnode label. Darwin (Tiger, 8.x) allocates vnodes on demand, but + * typically never frees them. Before vnodes are placed back on free lists for + * re-use, policies can cleanup or overwrite any information present in the label. + */ typedef void mpo_vnode_label_recycle_t( struct label *label -); + ); /** - @brief Write a label to a extended attribute - @param cred Subject credential - @param vp The vnode for which the label is being stored - @param vlabel Label associated with vp - @param intlabel The new label to store - - Store a new label in the extended attribute corresponding to the - supplied vnode. The policy has already authorized the operation; - this call must be implemented in order to perform the actual - operation. - - @return In the event of an error, an appropriate value for errno - should be returned, otherwise return 0 upon success. - - @warning XXX After examining the extended attribute implementation on - Apple's future release, this entry point may be changed. -*/ + * @brief Write a label to a extended attribute + * @param cred Subject credential + * @param vp The vnode for which the label is being stored + * @param vlabel Label associated with vp + * @param intlabel The new label to store + * + * Store a new label in the extended attribute corresponding to the + * supplied vnode. The policy has already authorized the operation; + * this call must be implemented in order to perform the actual + * operation. + * + * @return In the event of an error, an appropriate value for errno + * should be returned, otherwise return 0 upon success. + * + * @warning XXX After examining the extended attribute implementation on + * Apple's future release, this entry point may be changed. + */ typedef int mpo_vnode_label_store_t( kauth_cred_t cred, struct vnode *vp, struct label *vlabel, struct label *intlabel -); -/** - @brief Update vnode label from extended attributes - @param mp File system mount point - @param mntlabel Mount point label - @param vp Vnode to label - @param vlabel Label associated with vp - @param name Name of the xattr - @see mpo_vnode_check_setextattr_t - - When an extended attribute is updated via the Vendor attribute management - functions, the MAC vnode label might also require an update. - Policies should first determine if 'name' matches their xattr label - name. If it does, the kernel is has either replaced or removed the - named extended attribute that was previously associated with the - vnode. Normally labels should only be modified via MAC Framework label - management calls, but sometimes the user space components will directly - modify extended attributes. For example, 'cp', 'tar', etc. manage - extended attributes in userspace, not the kernel. - - This entry point is called after the label update has occurred, so - it cannot return a failure. However, the operation is preceded by - the mpo_vnode_check_setextattr() access control check. - - If the vnode label needs to be updated the policy should return - a non-zero value. The vnode label will be marked for re-association - by the framework. -*/ + ); +/** + * @brief Update vnode label from extended attributes + * @param mp File system mount point + * @param mntlabel Mount point label + * @param vp Vnode to label + * @param vlabel Label associated with vp + * @param name Name of the xattr + * @see mpo_vnode_check_setextattr_t + * + * When an extended attribute is updated via the Vendor attribute management + * functions, the MAC vnode label might also require an update. + * Policies should first determine if 'name' matches their xattr label + * name. If it does, the kernel is has either replaced or removed the + * named extended attribute that was previously associated with the + * vnode. Normally labels should only be modified via MAC Framework label + * management calls, but sometimes the user space components will directly + * modify extended attributes. For example, 'cp', 'tar', etc. manage + * extended attributes in userspace, not the kernel. + * + * This entry point is called after the label update has occurred, so + * it cannot return a failure. However, the operation is preceded by + * the mpo_vnode_check_setextattr() access control check. + * + * If the vnode label needs to be updated the policy should return + * a non-zero value. The vnode label will be marked for re-association + * by the framework. + */ typedef int mpo_vnode_label_update_extattr_t( struct mount *mp, struct label *mntlabel, struct vnode *vp, struct label *vlabel, const char *name -); -/** - @brief Update a vnode label - @param cred Subject credential - @param vp The vnode to relabel - @param vnodelabel Existing vnode label - @param label New label to replace existing label - @see mpo_vnode_check_label_update_t - - The subject identified by the credential has previously requested - and was authorized to relabel the vnode; this entry point allows - policies to perform the actual relabel operation. Policies should - update vnodelabel using the label stored in the label parameter. -*/ + ); +/** + * @brief Update a vnode label + * @param cred Subject credential + * @param vp The vnode to relabel + * @param vnodelabel Existing vnode label + * @param label New label to replace existing label + * @see mpo_vnode_check_label_update_t + * + * The subject identified by the credential has previously requested + * and was authorized to relabel the vnode; this entry point allows + * policies to perform the actual relabel operation. Policies should + * update vnodelabel using the label stored in the label parameter. + */ typedef void mpo_vnode_label_update_t( kauth_cred_t cred, struct vnode *vp, struct label *vnodelabel, struct label *label -); + ); /** - @brief Find deatched signatures for a shared library - @param p file trying to find the signature - @param vp The vnode to relabel - @param offset offset in the macho that the signature is requested for (for fat binaries) - @param label Existing vnode label - -*/ + * @brief Find deatched signatures for a shared library + * @param p file trying to find the signature + * @param vp The vnode to relabel + * @param offset offset in the macho that the signature is requested for (for fat binaries) + * @param label Existing vnode label + * + */ typedef int mpo_vnode_find_sigs_t( struct proc *p, struct vnode *vp, off_t offset, struct label *label -); -/** - @brief Create a new vnode, backed by extended attributes - @param cred User credential for the creating process - @param mp File system mount point - @param mntlabel File system mount point label - @param dvp Parent directory vnode - @param dlabel Parent directory vnode label - @param vp Newly created vnode - @param vlabel Label to associate with the new vnode - @param cnp Component name for vp - - Write out the label for the newly created vnode, most likely storing - the results in a file system extended attribute. Most policies will - derive the new vnode label using information from a combination - of the subject (user) credential, the file system label, the parent - directory label, and potentially the path name component. - - @return If the operation succeeds, store the new label in vlabel and - return 0. Otherwise, return an appropriate errno value. -*/ + ); +/** + * @brief Create a new vnode, backed by extended attributes + * @param cred User credential for the creating process + * @param mp File system mount point + * @param mntlabel File system mount point label + * @param dvp Parent directory vnode + * @param dlabel Parent directory vnode label + * @param vp Newly created vnode + * @param vlabel Label to associate with the new vnode + * @param cnp Component name for vp + * + * Write out the label for the newly created vnode, most likely storing + * the results in a file system extended attribute. Most policies will + * derive the new vnode label using information from a combination + * of the subject (user) credential, the file system label, the parent + * directory label, and potentially the path name component. + * + * @return If the operation succeeds, store the new label in vlabel and + * return 0. Otherwise, return an appropriate errno value. + */ typedef int mpo_vnode_notify_create_t( kauth_cred_t cred, struct mount *mp, @@ -5988,35 +5988,35 @@ typedef int mpo_vnode_notify_create_t( struct vnode *vp, struct label *vlabel, struct componentname *cnp -); + ); /** - @brief Inform MAC policies that a vnode has been opened - @param cred User credential for the creating process - @param vp vnode opened - @param label Policy label for the vp - @param acc_mode open(2) access mode used - - Inform Mac policies that a vnode have been successfully opened - (passing all MAC polices and DAC). -*/ + * @brief Inform MAC policies that a vnode has been opened + * @param cred User credential for the creating process + * @param vp vnode opened + * @param label Policy label for the vp + * @param acc_mode open(2) access mode used + * + * Inform Mac policies that a vnode have been successfully opened + * (passing all MAC polices and DAC). + */ typedef void mpo_vnode_notify_open_t( kauth_cred_t cred, struct vnode *vp, struct label *label, int acc_mode -); + ); /** - @brief Inform MAC policies that a vnode has been renamed - @param cred User credential for the renaming process - @param vp Vnode that's being renamed - @param label Policy label for vp - @param dvp Parent directory for the destination - @param dlabel Policy label for dvp - @param cnp Component name for the destination - - Inform MAC policies that a vnode has been renamed. + * @brief Inform MAC policies that a vnode has been renamed + * @param cred User credential for the renaming process + * @param vp Vnode that's being renamed + * @param label Policy label for vp + * @param dvp Parent directory for the destination + * @param dlabel Policy label for dvp + * @param cnp Component name for the destination + * + * Inform MAC policies that a vnode has been renamed. */ typedef void mpo_vnode_notify_rename_t( kauth_cred_t cred, @@ -6025,18 +6025,18 @@ typedef void mpo_vnode_notify_rename_t( struct vnode *dvp, struct label *dlabel, struct componentname *cnp -); + ); /** - @brief Inform MAC policies that a vnode has been linked - @param cred User credential for the renaming process - @param dvp Parent directory for the destination - @param dlabel Policy label for dvp - @param vp Vnode that's being linked - @param vlabel Policy label for vp - @param cnp Component name for the destination - - Inform MAC policies that a vnode has been linked. + * @brief Inform MAC policies that a vnode has been linked + * @param cred User credential for the renaming process + * @param dvp Parent directory for the destination + * @param dlabel Policy label for dvp + * @param vp Vnode that's being linked + * @param vlabel Policy label for vp + * @param cnp Component name for the destination + * + * Inform MAC policies that a vnode has been linked. */ typedef void mpo_vnode_notify_link_t( kauth_cred_t cred, @@ -6045,237 +6045,237 @@ typedef void mpo_vnode_notify_link_t( struct vnode *vp, struct label *vlabel, struct componentname *cnp -); + ); /** - @brief Inform MAC policies that an extended attribute has been removed from a vnode - @param cred Subject credential - @param vp Object node - @param label Policy label for vp - @param name Extended attribute name - - Inform MAC policies that an extended attribute has been removed from a vnode. -*/ + * @brief Inform MAC policies that an extended attribute has been removed from a vnode + * @param cred Subject credential + * @param vp Object node + * @param label Policy label for vp + * @param name Extended attribute name + * + * Inform MAC policies that an extended attribute has been removed from a vnode. + */ typedef void mpo_vnode_notify_deleteextattr_t( kauth_cred_t cred, struct vnode *vp, struct label *label, const char *name -); + ); /** - @brief Inform MAC policies that an ACL has been set on a vnode - @param cred Subject credential - @param vp Object node - @param label Policy label for vp - @param acl ACL structure pointer - - Inform MAC policies that an ACL has been set on a vnode. -*/ + * @brief Inform MAC policies that an ACL has been set on a vnode + * @param cred Subject credential + * @param vp Object node + * @param label Policy label for vp + * @param acl ACL structure pointer + * + * Inform MAC policies that an ACL has been set on a vnode. + */ typedef void mpo_vnode_notify_setacl_t( kauth_cred_t cred, struct vnode *vp, struct label *label, struct kauth_acl *acl -); + ); /** - @brief Inform MAC policies that an attributes have been set on a vnode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param alist List of attributes to set - - Inform MAC policies that an attributes have been set on a vnode. -*/ + * @brief Inform MAC policies that an attributes have been set on a vnode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param alist List of attributes to set + * + * Inform MAC policies that an attributes have been set on a vnode. + */ typedef void mpo_vnode_notify_setattrlist_t( kauth_cred_t cred, struct vnode *vp, struct label *label, struct attrlist *alist -); + ); /** - @brief Inform MAC policies that an extended attribute has been set on a vnode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param name Extended attribute name - @param uio I/O structure pointer - - Inform MAC policies that an extended attribute has been set on a vnode. -*/ + * @brief Inform MAC policies that an extended attribute has been set on a vnode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param name Extended attribute name + * @param uio I/O structure pointer + * + * Inform MAC policies that an extended attribute has been set on a vnode. + */ typedef void mpo_vnode_notify_setextattr_t( kauth_cred_t cred, struct vnode *vp, struct label *label, const char *name, struct uio *uio -); + ); /** - @brief Inform MAC policies that flags have been set on a vnode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param flags File flags; see chflags(2) - - Inform MAC policies that flags have been set on a vnode. -*/ + * @brief Inform MAC policies that flags have been set on a vnode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param flags File flags; see chflags(2) + * + * Inform MAC policies that flags have been set on a vnode. + */ typedef void mpo_vnode_notify_setflags_t( kauth_cred_t cred, struct vnode *vp, struct label *label, u_long flags -); + ); /** - @brief Inform MAC policies that a new mode has been set on a vnode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param mode File mode; see chmod(2) - - Inform MAC policies that a new mode has been set on a vnode. -*/ + * @brief Inform MAC policies that a new mode has been set on a vnode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param mode File mode; see chmod(2) + * + * Inform MAC policies that a new mode has been set on a vnode. + */ typedef void mpo_vnode_notify_setmode_t( kauth_cred_t cred, struct vnode *vp, struct label *label, mode_t mode -); + ); /** - @brief Inform MAC policies that new uid/gid have been set on a vnode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param uid User ID - @param gid Group ID - - Inform MAC policies that new uid/gid have been set on a vnode. -*/ + * @brief Inform MAC policies that new uid/gid have been set on a vnode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param uid User ID + * @param gid Group ID + * + * Inform MAC policies that new uid/gid have been set on a vnode. + */ typedef void mpo_vnode_notify_setowner_t( kauth_cred_t cred, struct vnode *vp, struct label *label, uid_t uid, gid_t gid -); + ); /** - @brief Inform MAC policies that new timestamps have been set on a vnode - @param cred Subject credential - @param vp Object vnode - @param label Policy label for vp - @param atime Access time; see utimes(2) - @param mtime Modification time; see utimes(2) - - Inform MAC policies that new timestamps have been set on a vnode. -*/ + * @brief Inform MAC policies that new timestamps have been set on a vnode + * @param cred Subject credential + * @param vp Object vnode + * @param label Policy label for vp + * @param atime Access time; see utimes(2) + * @param mtime Modification time; see utimes(2) + * + * Inform MAC policies that new timestamps have been set on a vnode. + */ typedef void mpo_vnode_notify_setutimes_t( kauth_cred_t cred, struct vnode *vp, struct label *label, struct timespec atime, struct timespec mtime -); + ); /** - @brief Inform MAC policies that a vnode has been truncated - @param cred Subject credential - @param file_cred Credential associated with the struct fileproc - @param vp Object vnode - @param label Policy label for vp - - Inform MAC policies that a vnode has been truncated. -*/ + * @brief Inform MAC policies that a vnode has been truncated + * @param cred Subject credential + * @param file_cred Credential associated with the struct fileproc + * @param vp Object vnode + * @param label Policy label for vp + * + * Inform MAC policies that a vnode has been truncated. + */ typedef void mpo_vnode_notify_truncate_t( kauth_cred_t cred, kauth_cred_t file_cred, struct vnode *vp, struct label *label -); + ); /** - @brief Inform MAC policies that a pty slave has been granted - @param p Responsible process - @param tp tty data structure - @param dev Major and minor numbers of device - @param label Policy label for tp - - Inform MAC policies that a pty slave has been granted. -*/ + * @brief Inform MAC policies that a pty slave has been granted + * @param p Responsible process + * @param tp tty data structure + * @param dev Major and minor numbers of device + * @param label Policy label for tp + * + * Inform MAC policies that a pty slave has been granted. + */ typedef void mpo_pty_notify_grant_t( proc_t p, struct tty *tp, dev_t dev, struct label *label -); + ); /** - @brief Inform MAC policies that a pty master has been closed - @param p Responsible process - @param tp tty data structure - @param dev Major and minor numbers of device - @param label Policy label for tp - - Inform MAC policies that a pty master has been closed. -*/ + * @brief Inform MAC policies that a pty master has been closed + * @param p Responsible process + * @param tp tty data structure + * @param dev Major and minor numbers of device + * @param label Policy label for tp + * + * Inform MAC policies that a pty master has been closed. + */ typedef void mpo_pty_notify_close_t( proc_t p, struct tty *tp, dev_t dev, struct label *label -); + ); /** - @brief Access control check for kext loading - @param cred Subject credential - @param identifier Kext identifier - - Determine whether the subject identified by the credential can load the - specified kext. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EPERM for lack of privilege. -*/ + * @brief Access control check for kext loading + * @param cred Subject credential + * @param identifier Kext identifier + * + * Determine whether the subject identified by the credential can load the + * specified kext. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EPERM for lack of privilege. + */ typedef int mpo_kext_check_load_t( kauth_cred_t cred, const char *identifier -); + ); /** - @brief Access control check for kext unloading - @param cred Subject credential - @param identifier Kext identifier - - Determine whether the subject identified by the credential can unload the - specified kext. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EPERM for lack of privilege. -*/ + * @brief Access control check for kext unloading + * @param cred Subject credential + * @param identifier Kext identifier + * + * Determine whether the subject identified by the credential can unload the + * specified kext. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EPERM for lack of privilege. + */ typedef int mpo_kext_check_unload_t( kauth_cred_t cred, const char *identifier -); + ); /** - @brief Access control check for querying information about loaded kexts - @param cred Subject credential - - Determine whether the subject identified by the credential can query - information about loaded kexts. - - @return Return 0 if access is granted, otherwise an appropriate value for - errno should be returned. Suggested failure: EPERM for lack of privilege. -*/ + * @brief Access control check for querying information about loaded kexts + * @param cred Subject credential + * + * Determine whether the subject identified by the credential can query + * information about loaded kexts. + * + * @return Return 0 if access is granted, otherwise an appropriate value for + * errno should be returned. Suggested failure: EPERM for lack of privilege. + */ typedef int mpo_kext_check_query_t( kauth_cred_t cred -); + ); /* * Placeholder for future events that may need mac hooks. @@ -6290,78 +6290,78 @@ typedef void mpo_reserved_hook_t(void); */ #define MAC_POLICY_OPS_VERSION 55 /* inc when new reserved slots are taken */ struct mac_policy_ops { - mpo_audit_check_postselect_t *mpo_audit_check_postselect; - mpo_audit_check_preselect_t *mpo_audit_check_preselect; - - mpo_bpfdesc_label_associate_t *mpo_bpfdesc_label_associate; - mpo_bpfdesc_label_destroy_t *mpo_bpfdesc_label_destroy; - mpo_bpfdesc_label_init_t *mpo_bpfdesc_label_init; - mpo_bpfdesc_check_receive_t *mpo_bpfdesc_check_receive; - - mpo_cred_check_label_update_execve_t *mpo_cred_check_label_update_execve; - mpo_cred_check_label_update_t *mpo_cred_check_label_update; - mpo_cred_check_visible_t *mpo_cred_check_visible; - mpo_cred_label_associate_fork_t *mpo_cred_label_associate_fork; - mpo_cred_label_associate_kernel_t *mpo_cred_label_associate_kernel; - mpo_cred_label_associate_t *mpo_cred_label_associate; - mpo_cred_label_associate_user_t *mpo_cred_label_associate_user; - mpo_cred_label_destroy_t *mpo_cred_label_destroy; - mpo_cred_label_externalize_audit_t *mpo_cred_label_externalize_audit; - mpo_cred_label_externalize_t *mpo_cred_label_externalize; - mpo_cred_label_init_t *mpo_cred_label_init; - mpo_cred_label_internalize_t *mpo_cred_label_internalize; - mpo_cred_label_update_execve_t *mpo_cred_label_update_execve; - mpo_cred_label_update_t *mpo_cred_label_update; - - mpo_devfs_label_associate_device_t *mpo_devfs_label_associate_device; - mpo_devfs_label_associate_directory_t *mpo_devfs_label_associate_directory; - mpo_devfs_label_copy_t *mpo_devfs_label_copy; - mpo_devfs_label_destroy_t *mpo_devfs_label_destroy; - mpo_devfs_label_init_t *mpo_devfs_label_init; - mpo_devfs_label_update_t *mpo_devfs_label_update; - - mpo_file_check_change_offset_t *mpo_file_check_change_offset; - mpo_file_check_create_t *mpo_file_check_create; - mpo_file_check_dup_t *mpo_file_check_dup; - mpo_file_check_fcntl_t *mpo_file_check_fcntl; - mpo_file_check_get_offset_t *mpo_file_check_get_offset; - mpo_file_check_get_t *mpo_file_check_get; - mpo_file_check_inherit_t *mpo_file_check_inherit; - mpo_file_check_ioctl_t *mpo_file_check_ioctl; - mpo_file_check_lock_t *mpo_file_check_lock; - mpo_file_check_mmap_downgrade_t *mpo_file_check_mmap_downgrade; - mpo_file_check_mmap_t *mpo_file_check_mmap; - mpo_file_check_receive_t *mpo_file_check_receive; - mpo_file_check_set_t *mpo_file_check_set; - mpo_file_label_init_t *mpo_file_label_init; - mpo_file_label_destroy_t *mpo_file_label_destroy; - mpo_file_label_associate_t *mpo_file_label_associate; - - mpo_ifnet_check_label_update_t *mpo_ifnet_check_label_update; - mpo_ifnet_check_transmit_t *mpo_ifnet_check_transmit; - mpo_ifnet_label_associate_t *mpo_ifnet_label_associate; - mpo_ifnet_label_copy_t *mpo_ifnet_label_copy; - mpo_ifnet_label_destroy_t *mpo_ifnet_label_destroy; - mpo_ifnet_label_externalize_t *mpo_ifnet_label_externalize; - mpo_ifnet_label_init_t *mpo_ifnet_label_init; - mpo_ifnet_label_internalize_t *mpo_ifnet_label_internalize; - mpo_ifnet_label_update_t *mpo_ifnet_label_update; - mpo_ifnet_label_recycle_t *mpo_ifnet_label_recycle; - - mpo_inpcb_check_deliver_t *mpo_inpcb_check_deliver; - mpo_inpcb_label_associate_t *mpo_inpcb_label_associate; - mpo_inpcb_label_destroy_t *mpo_inpcb_label_destroy; - mpo_inpcb_label_init_t *mpo_inpcb_label_init; - mpo_inpcb_label_recycle_t *mpo_inpcb_label_recycle; - mpo_inpcb_label_update_t *mpo_inpcb_label_update; - - mpo_iokit_check_device_t *mpo_iokit_check_device; - - mpo_ipq_label_associate_t *mpo_ipq_label_associate; - mpo_ipq_label_compare_t *mpo_ipq_label_compare; - mpo_ipq_label_destroy_t *mpo_ipq_label_destroy; - mpo_ipq_label_init_t *mpo_ipq_label_init; - mpo_ipq_label_update_t *mpo_ipq_label_update; + mpo_audit_check_postselect_t *mpo_audit_check_postselect; + mpo_audit_check_preselect_t *mpo_audit_check_preselect; + + mpo_bpfdesc_label_associate_t *mpo_bpfdesc_label_associate; + mpo_bpfdesc_label_destroy_t *mpo_bpfdesc_label_destroy; + mpo_bpfdesc_label_init_t *mpo_bpfdesc_label_init; + mpo_bpfdesc_check_receive_t *mpo_bpfdesc_check_receive; + + mpo_cred_check_label_update_execve_t *mpo_cred_check_label_update_execve; + mpo_cred_check_label_update_t *mpo_cred_check_label_update; + mpo_cred_check_visible_t *mpo_cred_check_visible; + mpo_cred_label_associate_fork_t *mpo_cred_label_associate_fork; + mpo_cred_label_associate_kernel_t *mpo_cred_label_associate_kernel; + mpo_cred_label_associate_t *mpo_cred_label_associate; + mpo_cred_label_associate_user_t *mpo_cred_label_associate_user; + mpo_cred_label_destroy_t *mpo_cred_label_destroy; + mpo_cred_label_externalize_audit_t *mpo_cred_label_externalize_audit; + mpo_cred_label_externalize_t *mpo_cred_label_externalize; + mpo_cred_label_init_t *mpo_cred_label_init; + mpo_cred_label_internalize_t *mpo_cred_label_internalize; + mpo_cred_label_update_execve_t *mpo_cred_label_update_execve; + mpo_cred_label_update_t *mpo_cred_label_update; + + mpo_devfs_label_associate_device_t *mpo_devfs_label_associate_device; + mpo_devfs_label_associate_directory_t *mpo_devfs_label_associate_directory; + mpo_devfs_label_copy_t *mpo_devfs_label_copy; + mpo_devfs_label_destroy_t *mpo_devfs_label_destroy; + mpo_devfs_label_init_t *mpo_devfs_label_init; + mpo_devfs_label_update_t *mpo_devfs_label_update; + + mpo_file_check_change_offset_t *mpo_file_check_change_offset; + mpo_file_check_create_t *mpo_file_check_create; + mpo_file_check_dup_t *mpo_file_check_dup; + mpo_file_check_fcntl_t *mpo_file_check_fcntl; + mpo_file_check_get_offset_t *mpo_file_check_get_offset; + mpo_file_check_get_t *mpo_file_check_get; + mpo_file_check_inherit_t *mpo_file_check_inherit; + mpo_file_check_ioctl_t *mpo_file_check_ioctl; + mpo_file_check_lock_t *mpo_file_check_lock; + mpo_file_check_mmap_downgrade_t *mpo_file_check_mmap_downgrade; + mpo_file_check_mmap_t *mpo_file_check_mmap; + mpo_file_check_receive_t *mpo_file_check_receive; + mpo_file_check_set_t *mpo_file_check_set; + mpo_file_label_init_t *mpo_file_label_init; + mpo_file_label_destroy_t *mpo_file_label_destroy; + mpo_file_label_associate_t *mpo_file_label_associate; + + mpo_ifnet_check_label_update_t *mpo_ifnet_check_label_update; + mpo_ifnet_check_transmit_t *mpo_ifnet_check_transmit; + mpo_ifnet_label_associate_t *mpo_ifnet_label_associate; + mpo_ifnet_label_copy_t *mpo_ifnet_label_copy; + mpo_ifnet_label_destroy_t *mpo_ifnet_label_destroy; + mpo_ifnet_label_externalize_t *mpo_ifnet_label_externalize; + mpo_ifnet_label_init_t *mpo_ifnet_label_init; + mpo_ifnet_label_internalize_t *mpo_ifnet_label_internalize; + mpo_ifnet_label_update_t *mpo_ifnet_label_update; + mpo_ifnet_label_recycle_t *mpo_ifnet_label_recycle; + + mpo_inpcb_check_deliver_t *mpo_inpcb_check_deliver; + mpo_inpcb_label_associate_t *mpo_inpcb_label_associate; + mpo_inpcb_label_destroy_t *mpo_inpcb_label_destroy; + mpo_inpcb_label_init_t *mpo_inpcb_label_init; + mpo_inpcb_label_recycle_t *mpo_inpcb_label_recycle; + mpo_inpcb_label_update_t *mpo_inpcb_label_update; + + mpo_iokit_check_device_t *mpo_iokit_check_device; + + mpo_ipq_label_associate_t *mpo_ipq_label_associate; + mpo_ipq_label_compare_t *mpo_ipq_label_compare; + mpo_ipq_label_destroy_t *mpo_ipq_label_destroy; + mpo_ipq_label_init_t *mpo_ipq_label_init; + mpo_ipq_label_update_t *mpo_ipq_label_update; mpo_file_check_library_validation_t *mpo_file_check_library_validation; mpo_vnode_notify_setacl_t *mpo_vnode_notify_setacl; @@ -6373,547 +6373,547 @@ struct mac_policy_ops { mpo_vnode_notify_setutimes_t *mpo_vnode_notify_setutimes; mpo_vnode_notify_truncate_t *mpo_vnode_notify_truncate; - mpo_mbuf_label_associate_bpfdesc_t *mpo_mbuf_label_associate_bpfdesc; - mpo_mbuf_label_associate_ifnet_t *mpo_mbuf_label_associate_ifnet; - mpo_mbuf_label_associate_inpcb_t *mpo_mbuf_label_associate_inpcb; - mpo_mbuf_label_associate_ipq_t *mpo_mbuf_label_associate_ipq; - mpo_mbuf_label_associate_linklayer_t *mpo_mbuf_label_associate_linklayer; + mpo_mbuf_label_associate_bpfdesc_t *mpo_mbuf_label_associate_bpfdesc; + mpo_mbuf_label_associate_ifnet_t *mpo_mbuf_label_associate_ifnet; + mpo_mbuf_label_associate_inpcb_t *mpo_mbuf_label_associate_inpcb; + mpo_mbuf_label_associate_ipq_t *mpo_mbuf_label_associate_ipq; + mpo_mbuf_label_associate_linklayer_t *mpo_mbuf_label_associate_linklayer; mpo_mbuf_label_associate_multicast_encap_t *mpo_mbuf_label_associate_multicast_encap; - mpo_mbuf_label_associate_netlayer_t *mpo_mbuf_label_associate_netlayer; - mpo_mbuf_label_associate_socket_t *mpo_mbuf_label_associate_socket; - mpo_mbuf_label_copy_t *mpo_mbuf_label_copy; - mpo_mbuf_label_destroy_t *mpo_mbuf_label_destroy; - mpo_mbuf_label_init_t *mpo_mbuf_label_init; - - mpo_mount_check_fsctl_t *mpo_mount_check_fsctl; - mpo_mount_check_getattr_t *mpo_mount_check_getattr; - mpo_mount_check_label_update_t *mpo_mount_check_label_update; - mpo_mount_check_mount_t *mpo_mount_check_mount; - mpo_mount_check_remount_t *mpo_mount_check_remount; - mpo_mount_check_setattr_t *mpo_mount_check_setattr; - mpo_mount_check_stat_t *mpo_mount_check_stat; - mpo_mount_check_umount_t *mpo_mount_check_umount; - mpo_mount_label_associate_t *mpo_mount_label_associate; - mpo_mount_label_destroy_t *mpo_mount_label_destroy; - mpo_mount_label_externalize_t *mpo_mount_label_externalize; - mpo_mount_label_init_t *mpo_mount_label_init; - mpo_mount_label_internalize_t *mpo_mount_label_internalize; - - mpo_netinet_fragment_t *mpo_netinet_fragment; - mpo_netinet_icmp_reply_t *mpo_netinet_icmp_reply; - mpo_netinet_tcp_reply_t *mpo_netinet_tcp_reply; - - mpo_pipe_check_ioctl_t *mpo_pipe_check_ioctl; - mpo_pipe_check_kqfilter_t *mpo_pipe_check_kqfilter; - mpo_pipe_check_label_update_t *mpo_pipe_check_label_update; - mpo_pipe_check_read_t *mpo_pipe_check_read; - mpo_pipe_check_select_t *mpo_pipe_check_select; - mpo_pipe_check_stat_t *mpo_pipe_check_stat; - mpo_pipe_check_write_t *mpo_pipe_check_write; - mpo_pipe_label_associate_t *mpo_pipe_label_associate; - mpo_pipe_label_copy_t *mpo_pipe_label_copy; - mpo_pipe_label_destroy_t *mpo_pipe_label_destroy; - mpo_pipe_label_externalize_t *mpo_pipe_label_externalize; - mpo_pipe_label_init_t *mpo_pipe_label_init; - mpo_pipe_label_internalize_t *mpo_pipe_label_internalize; - mpo_pipe_label_update_t *mpo_pipe_label_update; - - mpo_policy_destroy_t *mpo_policy_destroy; - mpo_policy_init_t *mpo_policy_init; - mpo_policy_initbsd_t *mpo_policy_initbsd; - mpo_policy_syscall_t *mpo_policy_syscall; - - mpo_system_check_sysctlbyname_t *mpo_system_check_sysctlbyname; - mpo_proc_check_inherit_ipc_ports_t *mpo_proc_check_inherit_ipc_ports; - mpo_vnode_check_rename_t *mpo_vnode_check_rename; - mpo_kext_check_query_t *mpo_kext_check_query; - mpo_proc_notify_exec_complete_t *mpo_proc_notify_exec_complete; - mpo_reserved_hook_t *mpo_reserved5; - mpo_reserved_hook_t *mpo_reserved6; - mpo_proc_check_expose_task_t *mpo_proc_check_expose_task; - mpo_proc_check_set_host_special_port_t *mpo_proc_check_set_host_special_port; + mpo_mbuf_label_associate_netlayer_t *mpo_mbuf_label_associate_netlayer; + mpo_mbuf_label_associate_socket_t *mpo_mbuf_label_associate_socket; + mpo_mbuf_label_copy_t *mpo_mbuf_label_copy; + mpo_mbuf_label_destroy_t *mpo_mbuf_label_destroy; + mpo_mbuf_label_init_t *mpo_mbuf_label_init; + + mpo_mount_check_fsctl_t *mpo_mount_check_fsctl; + mpo_mount_check_getattr_t *mpo_mount_check_getattr; + mpo_mount_check_label_update_t *mpo_mount_check_label_update; + mpo_mount_check_mount_t *mpo_mount_check_mount; + mpo_mount_check_remount_t *mpo_mount_check_remount; + mpo_mount_check_setattr_t *mpo_mount_check_setattr; + mpo_mount_check_stat_t *mpo_mount_check_stat; + mpo_mount_check_umount_t *mpo_mount_check_umount; + mpo_mount_label_associate_t *mpo_mount_label_associate; + mpo_mount_label_destroy_t *mpo_mount_label_destroy; + mpo_mount_label_externalize_t *mpo_mount_label_externalize; + mpo_mount_label_init_t *mpo_mount_label_init; + mpo_mount_label_internalize_t *mpo_mount_label_internalize; + + mpo_netinet_fragment_t *mpo_netinet_fragment; + mpo_netinet_icmp_reply_t *mpo_netinet_icmp_reply; + mpo_netinet_tcp_reply_t *mpo_netinet_tcp_reply; + + mpo_pipe_check_ioctl_t *mpo_pipe_check_ioctl; + mpo_pipe_check_kqfilter_t *mpo_pipe_check_kqfilter; + mpo_pipe_check_label_update_t *mpo_pipe_check_label_update; + mpo_pipe_check_read_t *mpo_pipe_check_read; + mpo_pipe_check_select_t *mpo_pipe_check_select; + mpo_pipe_check_stat_t *mpo_pipe_check_stat; + mpo_pipe_check_write_t *mpo_pipe_check_write; + mpo_pipe_label_associate_t *mpo_pipe_label_associate; + mpo_pipe_label_copy_t *mpo_pipe_label_copy; + mpo_pipe_label_destroy_t *mpo_pipe_label_destroy; + mpo_pipe_label_externalize_t *mpo_pipe_label_externalize; + mpo_pipe_label_init_t *mpo_pipe_label_init; + mpo_pipe_label_internalize_t *mpo_pipe_label_internalize; + mpo_pipe_label_update_t *mpo_pipe_label_update; + + mpo_policy_destroy_t *mpo_policy_destroy; + mpo_policy_init_t *mpo_policy_init; + mpo_policy_initbsd_t *mpo_policy_initbsd; + mpo_policy_syscall_t *mpo_policy_syscall; + + mpo_system_check_sysctlbyname_t *mpo_system_check_sysctlbyname; + mpo_proc_check_inherit_ipc_ports_t *mpo_proc_check_inherit_ipc_ports; + mpo_vnode_check_rename_t *mpo_vnode_check_rename; + mpo_kext_check_query_t *mpo_kext_check_query; + mpo_proc_notify_exec_complete_t *mpo_proc_notify_exec_complete; + mpo_reserved_hook_t *mpo_reserved5; + mpo_reserved_hook_t *mpo_reserved6; + mpo_proc_check_expose_task_t *mpo_proc_check_expose_task; + mpo_proc_check_set_host_special_port_t *mpo_proc_check_set_host_special_port; mpo_proc_check_set_host_exception_port_t *mpo_proc_check_set_host_exception_port; - mpo_exc_action_check_exception_send_t *mpo_exc_action_check_exception_send; - mpo_exc_action_label_associate_t *mpo_exc_action_label_associate; - mpo_exc_action_label_populate_t *mpo_exc_action_label_populate; - mpo_exc_action_label_destroy_t *mpo_exc_action_label_destroy; - mpo_exc_action_label_init_t *mpo_exc_action_label_init; - mpo_exc_action_label_update_t *mpo_exc_action_label_update; - - mpo_vnode_check_trigger_resolve_t *mpo_vnode_check_trigger_resolve; - mpo_reserved_hook_t *mpo_reserved1; - mpo_reserved_hook_t *mpo_reserved2; - mpo_reserved_hook_t *mpo_reserved3; - mpo_skywalk_flow_check_connect_t *mpo_skywalk_flow_check_connect; - mpo_skywalk_flow_check_listen_t *mpo_skywalk_flow_check_listen; - - mpo_posixsem_check_create_t *mpo_posixsem_check_create; - mpo_posixsem_check_open_t *mpo_posixsem_check_open; - mpo_posixsem_check_post_t *mpo_posixsem_check_post; - mpo_posixsem_check_unlink_t *mpo_posixsem_check_unlink; - mpo_posixsem_check_wait_t *mpo_posixsem_check_wait; - mpo_posixsem_label_associate_t *mpo_posixsem_label_associate; - mpo_posixsem_label_destroy_t *mpo_posixsem_label_destroy; - mpo_posixsem_label_init_t *mpo_posixsem_label_init; - mpo_posixshm_check_create_t *mpo_posixshm_check_create; - mpo_posixshm_check_mmap_t *mpo_posixshm_check_mmap; - mpo_posixshm_check_open_t *mpo_posixshm_check_open; - mpo_posixshm_check_stat_t *mpo_posixshm_check_stat; - mpo_posixshm_check_truncate_t *mpo_posixshm_check_truncate; - mpo_posixshm_check_unlink_t *mpo_posixshm_check_unlink; - mpo_posixshm_label_associate_t *mpo_posixshm_label_associate; - mpo_posixshm_label_destroy_t *mpo_posixshm_label_destroy; - mpo_posixshm_label_init_t *mpo_posixshm_label_init; - - mpo_proc_check_debug_t *mpo_proc_check_debug; - mpo_proc_check_fork_t *mpo_proc_check_fork; - mpo_proc_check_get_task_name_t *mpo_proc_check_get_task_name; - mpo_proc_check_get_task_t *mpo_proc_check_get_task; - mpo_proc_check_getaudit_t *mpo_proc_check_getaudit; - mpo_proc_check_getauid_t *mpo_proc_check_getauid; - mpo_proc_check_getlcid_t *mpo_proc_check_getlcid; - mpo_proc_check_mprotect_t *mpo_proc_check_mprotect; - mpo_proc_check_sched_t *mpo_proc_check_sched; - mpo_proc_check_setaudit_t *mpo_proc_check_setaudit; - mpo_proc_check_setauid_t *mpo_proc_check_setauid; - mpo_proc_check_setlcid_t *mpo_proc_check_setlcid; - mpo_proc_check_signal_t *mpo_proc_check_signal; - mpo_proc_check_wait_t *mpo_proc_check_wait; - mpo_proc_label_destroy_t *mpo_proc_label_destroy; - mpo_proc_label_init_t *mpo_proc_label_init; - - mpo_socket_check_accept_t *mpo_socket_check_accept; - mpo_socket_check_accepted_t *mpo_socket_check_accepted; - mpo_socket_check_bind_t *mpo_socket_check_bind; - mpo_socket_check_connect_t *mpo_socket_check_connect; - mpo_socket_check_create_t *mpo_socket_check_create; - mpo_socket_check_deliver_t *mpo_socket_check_deliver; - mpo_socket_check_kqfilter_t *mpo_socket_check_kqfilter; - mpo_socket_check_label_update_t *mpo_socket_check_label_update; - mpo_socket_check_listen_t *mpo_socket_check_listen; - mpo_socket_check_receive_t *mpo_socket_check_receive; - mpo_socket_check_received_t *mpo_socket_check_received; - mpo_socket_check_select_t *mpo_socket_check_select; - mpo_socket_check_send_t *mpo_socket_check_send; - mpo_socket_check_stat_t *mpo_socket_check_stat; - mpo_socket_check_setsockopt_t *mpo_socket_check_setsockopt; - mpo_socket_check_getsockopt_t *mpo_socket_check_getsockopt; - mpo_socket_label_associate_accept_t *mpo_socket_label_associate_accept; - mpo_socket_label_associate_t *mpo_socket_label_associate; - mpo_socket_label_copy_t *mpo_socket_label_copy; - mpo_socket_label_destroy_t *mpo_socket_label_destroy; - mpo_socket_label_externalize_t *mpo_socket_label_externalize; - mpo_socket_label_init_t *mpo_socket_label_init; - mpo_socket_label_internalize_t *mpo_socket_label_internalize; - mpo_socket_label_update_t *mpo_socket_label_update; - - mpo_socketpeer_label_associate_mbuf_t *mpo_socketpeer_label_associate_mbuf; - mpo_socketpeer_label_associate_socket_t *mpo_socketpeer_label_associate_socket; - mpo_socketpeer_label_destroy_t *mpo_socketpeer_label_destroy; - mpo_socketpeer_label_externalize_t *mpo_socketpeer_label_externalize; - mpo_socketpeer_label_init_t *mpo_socketpeer_label_init; - - mpo_system_check_acct_t *mpo_system_check_acct; - mpo_system_check_audit_t *mpo_system_check_audit; - mpo_system_check_auditctl_t *mpo_system_check_auditctl; - mpo_system_check_auditon_t *mpo_system_check_auditon; - mpo_system_check_host_priv_t *mpo_system_check_host_priv; - mpo_system_check_nfsd_t *mpo_system_check_nfsd; - mpo_system_check_reboot_t *mpo_system_check_reboot; - mpo_system_check_settime_t *mpo_system_check_settime; - mpo_system_check_swapoff_t *mpo_system_check_swapoff; - mpo_system_check_swapon_t *mpo_system_check_swapon; - mpo_socket_check_ioctl_t *mpo_socket_check_ioctl; - - mpo_sysvmsg_label_associate_t *mpo_sysvmsg_label_associate; - mpo_sysvmsg_label_destroy_t *mpo_sysvmsg_label_destroy; - mpo_sysvmsg_label_init_t *mpo_sysvmsg_label_init; - mpo_sysvmsg_label_recycle_t *mpo_sysvmsg_label_recycle; - mpo_sysvmsq_check_enqueue_t *mpo_sysvmsq_check_enqueue; - mpo_sysvmsq_check_msgrcv_t *mpo_sysvmsq_check_msgrcv; - mpo_sysvmsq_check_msgrmid_t *mpo_sysvmsq_check_msgrmid; - mpo_sysvmsq_check_msqctl_t *mpo_sysvmsq_check_msqctl; - mpo_sysvmsq_check_msqget_t *mpo_sysvmsq_check_msqget; - mpo_sysvmsq_check_msqrcv_t *mpo_sysvmsq_check_msqrcv; - mpo_sysvmsq_check_msqsnd_t *mpo_sysvmsq_check_msqsnd; - mpo_sysvmsq_label_associate_t *mpo_sysvmsq_label_associate; - mpo_sysvmsq_label_destroy_t *mpo_sysvmsq_label_destroy; - mpo_sysvmsq_label_init_t *mpo_sysvmsq_label_init; - mpo_sysvmsq_label_recycle_t *mpo_sysvmsq_label_recycle; - mpo_sysvsem_check_semctl_t *mpo_sysvsem_check_semctl; - mpo_sysvsem_check_semget_t *mpo_sysvsem_check_semget; - mpo_sysvsem_check_semop_t *mpo_sysvsem_check_semop; - mpo_sysvsem_label_associate_t *mpo_sysvsem_label_associate; - mpo_sysvsem_label_destroy_t *mpo_sysvsem_label_destroy; - mpo_sysvsem_label_init_t *mpo_sysvsem_label_init; - mpo_sysvsem_label_recycle_t *mpo_sysvsem_label_recycle; - mpo_sysvshm_check_shmat_t *mpo_sysvshm_check_shmat; - mpo_sysvshm_check_shmctl_t *mpo_sysvshm_check_shmctl; - mpo_sysvshm_check_shmdt_t *mpo_sysvshm_check_shmdt; - mpo_sysvshm_check_shmget_t *mpo_sysvshm_check_shmget; - mpo_sysvshm_label_associate_t *mpo_sysvshm_label_associate; - mpo_sysvshm_label_destroy_t *mpo_sysvshm_label_destroy; - mpo_sysvshm_label_init_t *mpo_sysvshm_label_init; - mpo_sysvshm_label_recycle_t *mpo_sysvshm_label_recycle; - - mpo_proc_notify_exit_t *mpo_proc_notify_exit; - mpo_mount_check_snapshot_revert_t *mpo_mount_check_snapshot_revert; - mpo_vnode_check_getattr_t *mpo_vnode_check_getattr; - mpo_mount_check_snapshot_create_t *mpo_mount_check_snapshot_create; - mpo_mount_check_snapshot_delete_t *mpo_mount_check_snapshot_delete; - mpo_vnode_check_clone_t *mpo_vnode_check_clone; - mpo_proc_check_get_cs_info_t *mpo_proc_check_get_cs_info; - mpo_proc_check_set_cs_info_t *mpo_proc_check_set_cs_info; - - mpo_iokit_check_hid_control_t *mpo_iokit_check_hid_control; - - mpo_vnode_check_access_t *mpo_vnode_check_access; - mpo_vnode_check_chdir_t *mpo_vnode_check_chdir; - mpo_vnode_check_chroot_t *mpo_vnode_check_chroot; - mpo_vnode_check_create_t *mpo_vnode_check_create; - mpo_vnode_check_deleteextattr_t *mpo_vnode_check_deleteextattr; - mpo_vnode_check_exchangedata_t *mpo_vnode_check_exchangedata; - mpo_vnode_check_exec_t *mpo_vnode_check_exec; - mpo_vnode_check_getattrlist_t *mpo_vnode_check_getattrlist; - mpo_vnode_check_getextattr_t *mpo_vnode_check_getextattr; - mpo_vnode_check_ioctl_t *mpo_vnode_check_ioctl; - mpo_vnode_check_kqfilter_t *mpo_vnode_check_kqfilter; - mpo_vnode_check_label_update_t *mpo_vnode_check_label_update; - mpo_vnode_check_link_t *mpo_vnode_check_link; - mpo_vnode_check_listextattr_t *mpo_vnode_check_listextattr; - mpo_vnode_check_lookup_t *mpo_vnode_check_lookup; - mpo_vnode_check_open_t *mpo_vnode_check_open; - mpo_vnode_check_read_t *mpo_vnode_check_read; - mpo_vnode_check_readdir_t *mpo_vnode_check_readdir; - mpo_vnode_check_readlink_t *mpo_vnode_check_readlink; - mpo_vnode_check_rename_from_t *mpo_vnode_check_rename_from; - mpo_vnode_check_rename_to_t *mpo_vnode_check_rename_to; - mpo_vnode_check_revoke_t *mpo_vnode_check_revoke; - mpo_vnode_check_select_t *mpo_vnode_check_select; - mpo_vnode_check_setattrlist_t *mpo_vnode_check_setattrlist; - mpo_vnode_check_setextattr_t *mpo_vnode_check_setextattr; - mpo_vnode_check_setflags_t *mpo_vnode_check_setflags; - mpo_vnode_check_setmode_t *mpo_vnode_check_setmode; - mpo_vnode_check_setowner_t *mpo_vnode_check_setowner; - mpo_vnode_check_setutimes_t *mpo_vnode_check_setutimes; - mpo_vnode_check_stat_t *mpo_vnode_check_stat; - mpo_vnode_check_truncate_t *mpo_vnode_check_truncate; - mpo_vnode_check_unlink_t *mpo_vnode_check_unlink; - mpo_vnode_check_write_t *mpo_vnode_check_write; - mpo_vnode_label_associate_devfs_t *mpo_vnode_label_associate_devfs; - mpo_vnode_label_associate_extattr_t *mpo_vnode_label_associate_extattr; - mpo_vnode_label_associate_file_t *mpo_vnode_label_associate_file; - mpo_vnode_label_associate_pipe_t *mpo_vnode_label_associate_pipe; - mpo_vnode_label_associate_posixsem_t *mpo_vnode_label_associate_posixsem; - mpo_vnode_label_associate_posixshm_t *mpo_vnode_label_associate_posixshm; - mpo_vnode_label_associate_singlelabel_t *mpo_vnode_label_associate_singlelabel; - mpo_vnode_label_associate_socket_t *mpo_vnode_label_associate_socket; - mpo_vnode_label_copy_t *mpo_vnode_label_copy; - mpo_vnode_label_destroy_t *mpo_vnode_label_destroy; - mpo_vnode_label_externalize_audit_t *mpo_vnode_label_externalize_audit; - mpo_vnode_label_externalize_t *mpo_vnode_label_externalize; - mpo_vnode_label_init_t *mpo_vnode_label_init; - mpo_vnode_label_internalize_t *mpo_vnode_label_internalize; - mpo_vnode_label_recycle_t *mpo_vnode_label_recycle; - mpo_vnode_label_store_t *mpo_vnode_label_store; - mpo_vnode_label_update_extattr_t *mpo_vnode_label_update_extattr; - mpo_vnode_label_update_t *mpo_vnode_label_update; - mpo_vnode_notify_create_t *mpo_vnode_notify_create; - mpo_vnode_check_signature_t *mpo_vnode_check_signature; - mpo_vnode_check_uipc_bind_t *mpo_vnode_check_uipc_bind; - mpo_vnode_check_uipc_connect_t *mpo_vnode_check_uipc_connect; - - mpo_proc_check_run_cs_invalid_t *mpo_proc_check_run_cs_invalid; - mpo_proc_check_suspend_resume_t *mpo_proc_check_suspend_resume; - - mpo_thread_userret_t *mpo_thread_userret; - - mpo_iokit_check_set_properties_t *mpo_iokit_check_set_properties; - - mpo_system_check_chud_t *mpo_system_check_chud; - - mpo_vnode_check_searchfs_t *mpo_vnode_check_searchfs; - - mpo_priv_check_t *mpo_priv_check; - mpo_priv_grant_t *mpo_priv_grant; - - mpo_proc_check_map_anon_t *mpo_proc_check_map_anon; - - mpo_vnode_check_fsgetpath_t *mpo_vnode_check_fsgetpath; - - mpo_iokit_check_open_t *mpo_iokit_check_open; - - mpo_proc_check_ledger_t *mpo_proc_check_ledger; - - mpo_vnode_notify_rename_t *mpo_vnode_notify_rename; - - mpo_vnode_check_setacl_t *mpo_vnode_check_setacl; + mpo_exc_action_check_exception_send_t *mpo_exc_action_check_exception_send; + mpo_exc_action_label_associate_t *mpo_exc_action_label_associate; + mpo_exc_action_label_populate_t *mpo_exc_action_label_populate; + mpo_exc_action_label_destroy_t *mpo_exc_action_label_destroy; + mpo_exc_action_label_init_t *mpo_exc_action_label_init; + mpo_exc_action_label_update_t *mpo_exc_action_label_update; + + mpo_vnode_check_trigger_resolve_t *mpo_vnode_check_trigger_resolve; + mpo_reserved_hook_t *mpo_reserved1; + mpo_reserved_hook_t *mpo_reserved2; + mpo_reserved_hook_t *mpo_reserved3; + mpo_skywalk_flow_check_connect_t *mpo_skywalk_flow_check_connect; + mpo_skywalk_flow_check_listen_t *mpo_skywalk_flow_check_listen; + + mpo_posixsem_check_create_t *mpo_posixsem_check_create; + mpo_posixsem_check_open_t *mpo_posixsem_check_open; + mpo_posixsem_check_post_t *mpo_posixsem_check_post; + mpo_posixsem_check_unlink_t *mpo_posixsem_check_unlink; + mpo_posixsem_check_wait_t *mpo_posixsem_check_wait; + mpo_posixsem_label_associate_t *mpo_posixsem_label_associate; + mpo_posixsem_label_destroy_t *mpo_posixsem_label_destroy; + mpo_posixsem_label_init_t *mpo_posixsem_label_init; + mpo_posixshm_check_create_t *mpo_posixshm_check_create; + mpo_posixshm_check_mmap_t *mpo_posixshm_check_mmap; + mpo_posixshm_check_open_t *mpo_posixshm_check_open; + mpo_posixshm_check_stat_t *mpo_posixshm_check_stat; + mpo_posixshm_check_truncate_t *mpo_posixshm_check_truncate; + mpo_posixshm_check_unlink_t *mpo_posixshm_check_unlink; + mpo_posixshm_label_associate_t *mpo_posixshm_label_associate; + mpo_posixshm_label_destroy_t *mpo_posixshm_label_destroy; + mpo_posixshm_label_init_t *mpo_posixshm_label_init; + + mpo_proc_check_debug_t *mpo_proc_check_debug; + mpo_proc_check_fork_t *mpo_proc_check_fork; + mpo_proc_check_get_task_name_t *mpo_proc_check_get_task_name; + mpo_proc_check_get_task_t *mpo_proc_check_get_task; + mpo_proc_check_getaudit_t *mpo_proc_check_getaudit; + mpo_proc_check_getauid_t *mpo_proc_check_getauid; + mpo_proc_check_getlcid_t *mpo_proc_check_getlcid; + mpo_proc_check_mprotect_t *mpo_proc_check_mprotect; + mpo_proc_check_sched_t *mpo_proc_check_sched; + mpo_proc_check_setaudit_t *mpo_proc_check_setaudit; + mpo_proc_check_setauid_t *mpo_proc_check_setauid; + mpo_proc_check_setlcid_t *mpo_proc_check_setlcid; + mpo_proc_check_signal_t *mpo_proc_check_signal; + mpo_proc_check_wait_t *mpo_proc_check_wait; + mpo_proc_label_destroy_t *mpo_proc_label_destroy; + mpo_proc_label_init_t *mpo_proc_label_init; + + mpo_socket_check_accept_t *mpo_socket_check_accept; + mpo_socket_check_accepted_t *mpo_socket_check_accepted; + mpo_socket_check_bind_t *mpo_socket_check_bind; + mpo_socket_check_connect_t *mpo_socket_check_connect; + mpo_socket_check_create_t *mpo_socket_check_create; + mpo_socket_check_deliver_t *mpo_socket_check_deliver; + mpo_socket_check_kqfilter_t *mpo_socket_check_kqfilter; + mpo_socket_check_label_update_t *mpo_socket_check_label_update; + mpo_socket_check_listen_t *mpo_socket_check_listen; + mpo_socket_check_receive_t *mpo_socket_check_receive; + mpo_socket_check_received_t *mpo_socket_check_received; + mpo_socket_check_select_t *mpo_socket_check_select; + mpo_socket_check_send_t *mpo_socket_check_send; + mpo_socket_check_stat_t *mpo_socket_check_stat; + mpo_socket_check_setsockopt_t *mpo_socket_check_setsockopt; + mpo_socket_check_getsockopt_t *mpo_socket_check_getsockopt; + mpo_socket_label_associate_accept_t *mpo_socket_label_associate_accept; + mpo_socket_label_associate_t *mpo_socket_label_associate; + mpo_socket_label_copy_t *mpo_socket_label_copy; + mpo_socket_label_destroy_t *mpo_socket_label_destroy; + mpo_socket_label_externalize_t *mpo_socket_label_externalize; + mpo_socket_label_init_t *mpo_socket_label_init; + mpo_socket_label_internalize_t *mpo_socket_label_internalize; + mpo_socket_label_update_t *mpo_socket_label_update; + + mpo_socketpeer_label_associate_mbuf_t *mpo_socketpeer_label_associate_mbuf; + mpo_socketpeer_label_associate_socket_t *mpo_socketpeer_label_associate_socket; + mpo_socketpeer_label_destroy_t *mpo_socketpeer_label_destroy; + mpo_socketpeer_label_externalize_t *mpo_socketpeer_label_externalize; + mpo_socketpeer_label_init_t *mpo_socketpeer_label_init; + + mpo_system_check_acct_t *mpo_system_check_acct; + mpo_system_check_audit_t *mpo_system_check_audit; + mpo_system_check_auditctl_t *mpo_system_check_auditctl; + mpo_system_check_auditon_t *mpo_system_check_auditon; + mpo_system_check_host_priv_t *mpo_system_check_host_priv; + mpo_system_check_nfsd_t *mpo_system_check_nfsd; + mpo_system_check_reboot_t *mpo_system_check_reboot; + mpo_system_check_settime_t *mpo_system_check_settime; + mpo_system_check_swapoff_t *mpo_system_check_swapoff; + mpo_system_check_swapon_t *mpo_system_check_swapon; + mpo_socket_check_ioctl_t *mpo_socket_check_ioctl; + + mpo_sysvmsg_label_associate_t *mpo_sysvmsg_label_associate; + mpo_sysvmsg_label_destroy_t *mpo_sysvmsg_label_destroy; + mpo_sysvmsg_label_init_t *mpo_sysvmsg_label_init; + mpo_sysvmsg_label_recycle_t *mpo_sysvmsg_label_recycle; + mpo_sysvmsq_check_enqueue_t *mpo_sysvmsq_check_enqueue; + mpo_sysvmsq_check_msgrcv_t *mpo_sysvmsq_check_msgrcv; + mpo_sysvmsq_check_msgrmid_t *mpo_sysvmsq_check_msgrmid; + mpo_sysvmsq_check_msqctl_t *mpo_sysvmsq_check_msqctl; + mpo_sysvmsq_check_msqget_t *mpo_sysvmsq_check_msqget; + mpo_sysvmsq_check_msqrcv_t *mpo_sysvmsq_check_msqrcv; + mpo_sysvmsq_check_msqsnd_t *mpo_sysvmsq_check_msqsnd; + mpo_sysvmsq_label_associate_t *mpo_sysvmsq_label_associate; + mpo_sysvmsq_label_destroy_t *mpo_sysvmsq_label_destroy; + mpo_sysvmsq_label_init_t *mpo_sysvmsq_label_init; + mpo_sysvmsq_label_recycle_t *mpo_sysvmsq_label_recycle; + mpo_sysvsem_check_semctl_t *mpo_sysvsem_check_semctl; + mpo_sysvsem_check_semget_t *mpo_sysvsem_check_semget; + mpo_sysvsem_check_semop_t *mpo_sysvsem_check_semop; + mpo_sysvsem_label_associate_t *mpo_sysvsem_label_associate; + mpo_sysvsem_label_destroy_t *mpo_sysvsem_label_destroy; + mpo_sysvsem_label_init_t *mpo_sysvsem_label_init; + mpo_sysvsem_label_recycle_t *mpo_sysvsem_label_recycle; + mpo_sysvshm_check_shmat_t *mpo_sysvshm_check_shmat; + mpo_sysvshm_check_shmctl_t *mpo_sysvshm_check_shmctl; + mpo_sysvshm_check_shmdt_t *mpo_sysvshm_check_shmdt; + mpo_sysvshm_check_shmget_t *mpo_sysvshm_check_shmget; + mpo_sysvshm_label_associate_t *mpo_sysvshm_label_associate; + mpo_sysvshm_label_destroy_t *mpo_sysvshm_label_destroy; + mpo_sysvshm_label_init_t *mpo_sysvshm_label_init; + mpo_sysvshm_label_recycle_t *mpo_sysvshm_label_recycle; + + mpo_proc_notify_exit_t *mpo_proc_notify_exit; + mpo_mount_check_snapshot_revert_t *mpo_mount_check_snapshot_revert; + mpo_vnode_check_getattr_t *mpo_vnode_check_getattr; + mpo_mount_check_snapshot_create_t *mpo_mount_check_snapshot_create; + mpo_mount_check_snapshot_delete_t *mpo_mount_check_snapshot_delete; + mpo_vnode_check_clone_t *mpo_vnode_check_clone; + mpo_proc_check_get_cs_info_t *mpo_proc_check_get_cs_info; + mpo_proc_check_set_cs_info_t *mpo_proc_check_set_cs_info; + + mpo_iokit_check_hid_control_t *mpo_iokit_check_hid_control; + + mpo_vnode_check_access_t *mpo_vnode_check_access; + mpo_vnode_check_chdir_t *mpo_vnode_check_chdir; + mpo_vnode_check_chroot_t *mpo_vnode_check_chroot; + mpo_vnode_check_create_t *mpo_vnode_check_create; + mpo_vnode_check_deleteextattr_t *mpo_vnode_check_deleteextattr; + mpo_vnode_check_exchangedata_t *mpo_vnode_check_exchangedata; + mpo_vnode_check_exec_t *mpo_vnode_check_exec; + mpo_vnode_check_getattrlist_t *mpo_vnode_check_getattrlist; + mpo_vnode_check_getextattr_t *mpo_vnode_check_getextattr; + mpo_vnode_check_ioctl_t *mpo_vnode_check_ioctl; + mpo_vnode_check_kqfilter_t *mpo_vnode_check_kqfilter; + mpo_vnode_check_label_update_t *mpo_vnode_check_label_update; + mpo_vnode_check_link_t *mpo_vnode_check_link; + mpo_vnode_check_listextattr_t *mpo_vnode_check_listextattr; + mpo_vnode_check_lookup_t *mpo_vnode_check_lookup; + mpo_vnode_check_open_t *mpo_vnode_check_open; + mpo_vnode_check_read_t *mpo_vnode_check_read; + mpo_vnode_check_readdir_t *mpo_vnode_check_readdir; + mpo_vnode_check_readlink_t *mpo_vnode_check_readlink; + mpo_vnode_check_rename_from_t *mpo_vnode_check_rename_from; + mpo_vnode_check_rename_to_t *mpo_vnode_check_rename_to; + mpo_vnode_check_revoke_t *mpo_vnode_check_revoke; + mpo_vnode_check_select_t *mpo_vnode_check_select; + mpo_vnode_check_setattrlist_t *mpo_vnode_check_setattrlist; + mpo_vnode_check_setextattr_t *mpo_vnode_check_setextattr; + mpo_vnode_check_setflags_t *mpo_vnode_check_setflags; + mpo_vnode_check_setmode_t *mpo_vnode_check_setmode; + mpo_vnode_check_setowner_t *mpo_vnode_check_setowner; + mpo_vnode_check_setutimes_t *mpo_vnode_check_setutimes; + mpo_vnode_check_stat_t *mpo_vnode_check_stat; + mpo_vnode_check_truncate_t *mpo_vnode_check_truncate; + mpo_vnode_check_unlink_t *mpo_vnode_check_unlink; + mpo_vnode_check_write_t *mpo_vnode_check_write; + mpo_vnode_label_associate_devfs_t *mpo_vnode_label_associate_devfs; + mpo_vnode_label_associate_extattr_t *mpo_vnode_label_associate_extattr; + mpo_vnode_label_associate_file_t *mpo_vnode_label_associate_file; + mpo_vnode_label_associate_pipe_t *mpo_vnode_label_associate_pipe; + mpo_vnode_label_associate_posixsem_t *mpo_vnode_label_associate_posixsem; + mpo_vnode_label_associate_posixshm_t *mpo_vnode_label_associate_posixshm; + mpo_vnode_label_associate_singlelabel_t *mpo_vnode_label_associate_singlelabel; + mpo_vnode_label_associate_socket_t *mpo_vnode_label_associate_socket; + mpo_vnode_label_copy_t *mpo_vnode_label_copy; + mpo_vnode_label_destroy_t *mpo_vnode_label_destroy; + mpo_vnode_label_externalize_audit_t *mpo_vnode_label_externalize_audit; + mpo_vnode_label_externalize_t *mpo_vnode_label_externalize; + mpo_vnode_label_init_t *mpo_vnode_label_init; + mpo_vnode_label_internalize_t *mpo_vnode_label_internalize; + mpo_vnode_label_recycle_t *mpo_vnode_label_recycle; + mpo_vnode_label_store_t *mpo_vnode_label_store; + mpo_vnode_label_update_extattr_t *mpo_vnode_label_update_extattr; + mpo_vnode_label_update_t *mpo_vnode_label_update; + mpo_vnode_notify_create_t *mpo_vnode_notify_create; + mpo_vnode_check_signature_t *mpo_vnode_check_signature; + mpo_vnode_check_uipc_bind_t *mpo_vnode_check_uipc_bind; + mpo_vnode_check_uipc_connect_t *mpo_vnode_check_uipc_connect; + + mpo_proc_check_run_cs_invalid_t *mpo_proc_check_run_cs_invalid; + mpo_proc_check_suspend_resume_t *mpo_proc_check_suspend_resume; + + mpo_thread_userret_t *mpo_thread_userret; + + mpo_iokit_check_set_properties_t *mpo_iokit_check_set_properties; + + mpo_system_check_chud_t *mpo_system_check_chud; + + mpo_vnode_check_searchfs_t *mpo_vnode_check_searchfs; + + mpo_priv_check_t *mpo_priv_check; + mpo_priv_grant_t *mpo_priv_grant; + + mpo_proc_check_map_anon_t *mpo_proc_check_map_anon; + + mpo_vnode_check_fsgetpath_t *mpo_vnode_check_fsgetpath; + + mpo_iokit_check_open_t *mpo_iokit_check_open; + + mpo_proc_check_ledger_t *mpo_proc_check_ledger; + + mpo_vnode_notify_rename_t *mpo_vnode_notify_rename; + + mpo_vnode_check_setacl_t *mpo_vnode_check_setacl; mpo_vnode_notify_deleteextattr_t *mpo_vnode_notify_deleteextattr; - mpo_system_check_kas_info_t *mpo_system_check_kas_info; + mpo_system_check_kas_info_t *mpo_system_check_kas_info; - mpo_vnode_check_lookup_preflight_t *mpo_vnode_check_lookup_preflight; + mpo_vnode_check_lookup_preflight_t *mpo_vnode_check_lookup_preflight; - mpo_vnode_notify_open_t *mpo_vnode_notify_open; + mpo_vnode_notify_open_t *mpo_vnode_notify_open; - mpo_system_check_info_t *mpo_system_check_info; + mpo_system_check_info_t *mpo_system_check_info; - mpo_pty_notify_grant_t *mpo_pty_notify_grant; - mpo_pty_notify_close_t *mpo_pty_notify_close; + mpo_pty_notify_grant_t *mpo_pty_notify_grant; + mpo_pty_notify_close_t *mpo_pty_notify_close; - mpo_vnode_find_sigs_t *mpo_vnode_find_sigs; + mpo_vnode_find_sigs_t *mpo_vnode_find_sigs; - mpo_kext_check_load_t *mpo_kext_check_load; - mpo_kext_check_unload_t *mpo_kext_check_unload; + mpo_kext_check_load_t *mpo_kext_check_load; + mpo_kext_check_unload_t *mpo_kext_check_unload; - mpo_proc_check_proc_info_t *mpo_proc_check_proc_info; - mpo_vnode_notify_link_t *mpo_vnode_notify_link; - mpo_iokit_check_filter_properties_t *mpo_iokit_check_filter_properties; - mpo_iokit_check_get_property_t *mpo_iokit_check_get_property; + mpo_proc_check_proc_info_t *mpo_proc_check_proc_info; + mpo_vnode_notify_link_t *mpo_vnode_notify_link; + mpo_iokit_check_filter_properties_t *mpo_iokit_check_filter_properties; + mpo_iokit_check_get_property_t *mpo_iokit_check_get_property; }; /** - @brief MAC policy handle type - - The MAC handle is used to uniquely identify a loaded policy within - the MAC Framework. - - A variable of this type is set by mac_policy_register(). + * @brief MAC policy handle type + * + * The MAC handle is used to uniquely identify a loaded policy within + * the MAC Framework. + * + * A variable of this type is set by mac_policy_register(). */ typedef unsigned int mac_policy_handle_t; -#define mpc_t struct mac_policy_conf * +#define mpc_t struct mac_policy_conf * /** - @brief Mac policy configuration - - This structure specifies the configuration information for a - MAC policy module. A policy module developer must supply - a short unique policy name, a more descriptive full name, a list of label - namespaces and count, a pointer to the registered enty point operations, - any load time flags, and optionally, a pointer to a label slot identifier. - - The Framework will update the runtime flags (mpc_runtime_flags) to - indicate that the module has been registered. - - If the label slot identifier (mpc_field_off) is NULL, the Framework - will not provide label storage for the policy. Otherwise, the - Framework will store the label location (slot) in this field. - - The mpc_list field is used by the Framework and should not be - modified by policies. -*/ + * @brief Mac policy configuration + * + * This structure specifies the configuration information for a + * MAC policy module. A policy module developer must supply + * a short unique policy name, a more descriptive full name, a list of label + * namespaces and count, a pointer to the registered enty point operations, + * any load time flags, and optionally, a pointer to a label slot identifier. + * + * The Framework will update the runtime flags (mpc_runtime_flags) to + * indicate that the module has been registered. + * + * If the label slot identifier (mpc_field_off) is NULL, the Framework + * will not provide label storage for the policy. Otherwise, the + * Framework will store the label location (slot) in this field. + * + * The mpc_list field is used by the Framework and should not be + * modified by policies. + */ /* XXX - reorder these for better aligment on 64bit platforms */ struct mac_policy_conf { - const char *mpc_name; /** policy name */ - const char *mpc_fullname; /** full name */ - char const * const *mpc_labelnames; /** managed label namespaces */ - unsigned int mpc_labelname_count; /** number of managed label namespaces */ - const struct mac_policy_ops *mpc_ops; /** operation vector */ - int mpc_loadtime_flags; /** load time flags */ - int *mpc_field_off; /** label slot */ - int mpc_runtime_flags; /** run time flags */ - mpc_t mpc_list; /** List reference */ - void *mpc_data; /** module data */ + const char *mpc_name; /** policy name */ + const char *mpc_fullname; /** full name */ + char const * const *mpc_labelnames; /** managed label namespaces */ + unsigned int mpc_labelname_count; /** number of managed label namespaces */ + const struct mac_policy_ops *mpc_ops; /** operation vector */ + int mpc_loadtime_flags; /** load time flags */ + int *mpc_field_off; /** label slot */ + int mpc_runtime_flags; /** run time flags */ + mpc_t mpc_list; /** List reference */ + void *mpc_data; /** module data */ }; /** - @brief MAC policy module registration routine - - This function is called to register a policy with the - MAC framework. A policy module will typically call this from the - Darwin KEXT registration routine. + * @brief MAC policy module registration routine + * + * This function is called to register a policy with the + * MAC framework. A policy module will typically call this from the + * Darwin KEXT registration routine. */ -int mac_policy_register(struct mac_policy_conf *mpc, +int mac_policy_register(struct mac_policy_conf *mpc, mac_policy_handle_t *handlep, void *xd); /** - @brief MAC policy module de-registration routine - - This function is called to de-register a policy with theD - MAC framework. A policy module will typically call this from the - Darwin KEXT de-registration routine. + * @brief MAC policy module de-registration routine + * + * This function is called to de-register a policy with theD + * MAC framework. A policy module will typically call this from the + * Darwin KEXT de-registration routine. */ -int mac_policy_unregister(mac_policy_handle_t handle); +int mac_policy_unregister(mac_policy_handle_t handle); /* * Framework entry points for the policies to add audit data. */ -int mac_audit_text(char *text, mac_policy_handle_t handle); +int mac_audit_text(char *text, mac_policy_handle_t handle); /* * Calls to assist with use of Apple XATTRs within policy modules. */ -int mac_vnop_setxattr(struct vnode *, const char *, char *, size_t); -int mac_vnop_getxattr(struct vnode *, const char *, char *, size_t, - size_t *); -int mac_vnop_removexattr(struct vnode *, const char *); +int mac_vnop_setxattr(struct vnode *, const char *, char *, size_t); +int mac_vnop_getxattr(struct vnode *, const char *, char *, size_t, + size_t *); +int mac_vnop_removexattr(struct vnode *, const char *); /** - @brief Set an extended attribute on a vnode-based fileglob. - @param fg fileglob representing file to attach the extended attribute - @param name extended attribute name - @param buf buffer of data to use as the extended attribute value - @param len size of buffer - - Sets the value of an extended attribute on a file. - - Caller must hold an iocount on the vnode represented by the fileglob. -*/ -int mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size_t len); + * @brief Set an extended attribute on a vnode-based fileglob. + * @param fg fileglob representing file to attach the extended attribute + * @param name extended attribute name + * @param buf buffer of data to use as the extended attribute value + * @param len size of buffer + * + * Sets the value of an extended attribute on a file. + * + * Caller must hold an iocount on the vnode represented by the fileglob. + */ +int mac_file_setxattr(struct fileglob *fg, const char *name, char *buf, size_t len); /** - @brief Get an extended attribute from a vnode-based fileglob. - @param fg fileglob representing file to read the extended attribute - @param name extended attribute name - @param buf buffer of data to hold the extended attribute value - @param len size of buffer - @param attrlen size of full extended attribute value - - Gets the value of an extended attribute on a file. - - Caller must hold an iocount on the vnode represented by the fileglob. -*/ -int mac_file_getxattr(struct fileglob *fg, const char *name, char *buf, size_t len, - size_t *attrlen); + * @brief Get an extended attribute from a vnode-based fileglob. + * @param fg fileglob representing file to read the extended attribute + * @param name extended attribute name + * @param buf buffer of data to hold the extended attribute value + * @param len size of buffer + * @param attrlen size of full extended attribute value + * + * Gets the value of an extended attribute on a file. + * + * Caller must hold an iocount on the vnode represented by the fileglob. + */ +int mac_file_getxattr(struct fileglob *fg, const char *name, char *buf, size_t len, + size_t *attrlen); /** - @brief Remove an extended attribute from a vnode-based fileglob. - @param fg fileglob representing file to remove the extended attribute - @param name extended attribute name - - Removes the named extended attribute from the file. - - Caller must hold an iocount on the vnode represented by the fileglob. -*/ -int mac_file_removexattr(struct fileglob *fg, const char *name); + * @brief Remove an extended attribute from a vnode-based fileglob. + * @param fg fileglob representing file to remove the extended attribute + * @param name extended attribute name + * + * Removes the named extended attribute from the file. + * + * Caller must hold an iocount on the vnode represented by the fileglob. + */ +int mac_file_removexattr(struct fileglob *fg, const char *name); /* * Arbitrary limit on how much data will be logged by the audit * entry points above. */ -#define MAC_AUDIT_DATA_LIMIT 1024 +#define MAC_AUDIT_DATA_LIMIT 1024 /* * Values returned by mac_audit_{pre,post}select. To combine the responses * of the security policies into a single decision, * mac_audit_{pre,post}select() choose the greatest value returned. */ -#define MAC_AUDIT_DEFAULT 0 /* use system behavior */ -#define MAC_AUDIT_NO 1 /* force not auditing this event */ -#define MAC_AUDIT_YES 2 /* force auditing this event */ +#define MAC_AUDIT_DEFAULT 0 /* use system behavior */ +#define MAC_AUDIT_NO 1 /* force not auditing this event */ +#define MAC_AUDIT_YES 2 /* force auditing this event */ // \defgroup mpc_loadtime_flags Flags for the mpc_loadtime_flags field /** - @name Flags for the mpc_loadtime_flags field - @see mac_policy_conf - - This is the complete list of flags that are supported by the - mpc_loadtime_flags field of the mac_policy_conf structure. These - flags specify the load time behavior of MAC Framework policy - modules. -*/ + * @name Flags for the mpc_loadtime_flags field + * @see mac_policy_conf + * + * This is the complete list of flags that are supported by the + * mpc_loadtime_flags field of the mac_policy_conf structure. These + * flags specify the load time behavior of MAC Framework policy + * modules. + */ /*@{*/ /** - @brief Flag to indicate registration preference - - This flag indicates that the policy module must be loaded and - initialized early in the boot process. If the flag is specified, - attempts to register the module following boot will be rejected. The - flag may be used by policies that require pervasive labeling of all - system objects, and cannot handle objects that have not been - properly initialized by the policy. - */ -#define MPC_LOADTIME_FLAG_NOTLATE 0x00000001 + * @brief Flag to indicate registration preference + * + * This flag indicates that the policy module must be loaded and + * initialized early in the boot process. If the flag is specified, + * attempts to register the module following boot will be rejected. The + * flag may be used by policies that require pervasive labeling of all + * system objects, and cannot handle objects that have not been + * properly initialized by the policy. + */ +#define MPC_LOADTIME_FLAG_NOTLATE 0x00000001 /** - @brief Flag to indicate unload preference - - This flag indicates that the policy module may be unloaded. If this - flag is not set, then the policy framework will reject requests to - unload the module. This flag might be used by modules that allocate - label state and are unable to free that state at runtime, or for - modules that simply do not want to permit unload operations. -*/ -#define MPC_LOADTIME_FLAG_UNLOADOK 0x00000002 + * @brief Flag to indicate unload preference + * + * This flag indicates that the policy module may be unloaded. If this + * flag is not set, then the policy framework will reject requests to + * unload the module. This flag might be used by modules that allocate + * label state and are unable to free that state at runtime, or for + * modules that simply do not want to permit unload operations. + */ +#define MPC_LOADTIME_FLAG_UNLOADOK 0x00000002 /** - @brief Unsupported - - XXX This flag is not yet supported. -*/ -#define MPC_LOADTIME_FLAG_LABELMBUFS 0x00000004 + * @brief Unsupported + * + * XXX This flag is not yet supported. + */ +#define MPC_LOADTIME_FLAG_LABELMBUFS 0x00000004 /** - @brief Flag to indicate a base policy - - This flag indicates that the policy module is a base policy. Only - one module can declare itself as base, otherwise the boot process - will be halted. + * @brief Flag to indicate a base policy + * + * This flag indicates that the policy module is a base policy. Only + * one module can declare itself as base, otherwise the boot process + * will be halted. */ -#define MPC_LOADTIME_BASE_POLICY 0x00000008 +#define MPC_LOADTIME_BASE_POLICY 0x00000008 /*@}*/ /** - @brief Policy registration flag - @see mac_policy_conf - - This flag indicates that the policy module has been successfully - registered with the TrustedBSD MAC Framework. The Framework will - set this flag in the mpc_runtime_flags field of the policy's - mac_policy_conf structure after registering the policy. + * @brief Policy registration flag + * @see mac_policy_conf + * + * This flag indicates that the policy module has been successfully + * registered with the TrustedBSD MAC Framework. The Framework will + * set this flag in the mpc_runtime_flags field of the policy's + * mac_policy_conf structure after registering the policy. */ -#define MPC_RUNTIME_FLAG_REGISTERED 0x00000001 +#define MPC_RUNTIME_FLAG_REGISTERED 0x00000001 /* * Depends on POLICY_VER */ #ifndef POLICY_VER -#define POLICY_VER 1.0 +#define POLICY_VER 1.0 #endif -#define MAC_POLICY_SET(handle, mpops, mpname, mpfullname, lnames, lcount, slot, lflags, rflags) \ - static struct mac_policy_conf mpname##_mac_policy_conf = { \ - .mpc_name = #mpname, \ - .mpc_fullname = mpfullname, \ - .mpc_labelnames = lnames, \ - .mpc_labelname_count = lcount, \ - .mpc_ops = mpops, \ - .mpc_loadtime_flags = lflags, \ - .mpc_field_off = slot, \ - .mpc_runtime_flags = rflags \ - }; \ - \ - static kern_return_t \ - kmod_start(kmod_info_t *ki, void *xd) \ - { \ - return mac_policy_register(&mpname##_mac_policy_conf, \ - &handle, xd); \ - } \ - \ - static kern_return_t \ - kmod_stop(kmod_info_t *ki, void *xd) \ - { \ - return mac_policy_unregister(handle); \ - } \ - \ - extern kern_return_t _start(kmod_info_t *ki, void *data); \ - extern kern_return_t _stop(kmod_info_t *ki, void *data); \ - \ - KMOD_EXPLICIT_DECL(security.mpname, POLICY_VER, _start, _stop) \ - kmod_start_func_t *_realmain = kmod_start; \ - kmod_stop_func_t *_antimain = kmod_stop; \ +#define MAC_POLICY_SET(handle, mpops, mpname, mpfullname, lnames, lcount, slot, lflags, rflags) \ + static struct mac_policy_conf mpname##_mac_policy_conf = { \ + .mpc_name = #mpname, \ + .mpc_fullname = mpfullname, \ + .mpc_labelnames = lnames, \ + .mpc_labelname_count = lcount, \ + .mpc_ops = mpops, \ + .mpc_loadtime_flags = lflags, \ + .mpc_field_off = slot, \ + .mpc_runtime_flags = rflags \ + }; \ + \ + static kern_return_t \ + kmod_start(kmod_info_t *ki, void *xd) \ + { \ + return mac_policy_register(&mpname##_mac_policy_conf, \ + &handle, xd); \ + } \ + \ + static kern_return_t \ + kmod_stop(kmod_info_t *ki, void *xd) \ + { \ + return mac_policy_unregister(handle); \ + } \ + \ + extern kern_return_t _start(kmod_info_t *ki, void *data); \ + extern kern_return_t _stop(kmod_info_t *ki, void *data); \ + \ + KMOD_EXPLICIT_DECL(security.mpname, POLICY_VER, _start, _stop) \ + kmod_start_func_t *_realmain = kmod_start; \ + kmod_stop_func_t *_antimain = kmod_stop; \ int _kext_apple_cc = __APPLE_CC__ -#define LABEL_TO_SLOT(l, s) (l)->l_perpolicy[s] +#define LABEL_TO_SLOT(l, s) (l)->l_perpolicy[s] /* * Policy interface to map a struct label pointer to per-policy data. @@ -6923,35 +6923,35 @@ int mac_file_removexattr(struct fileglob *fg, const char *name); intptr_t mac_label_get(struct label *l, int slot); void mac_label_set(struct label *l, int slot, intptr_t v); -#define mac_get_mpc(h) (mac_policy_list.entries[h].mpc) +#define mac_get_mpc(h) (mac_policy_list.entries[h].mpc) /** - @name Flags for MAC allocator interfaces - - These flags are passed to the Darwin kernel allocator routines to - indicate whether the allocation is permitted to block or not. - Caution should be taken; some operations are not permitted to sleep, - and some types of locks cannot be held when sleeping. + * @name Flags for MAC allocator interfaces + * + * These flags are passed to the Darwin kernel allocator routines to + * indicate whether the allocation is permitted to block or not. + * Caution should be taken; some operations are not permitted to sleep, + * and some types of locks cannot be held when sleeping. */ /*@{*/ /** - @brief Allocation operations may block - - If memory is not immediately available, the allocation routine - will block (typically sleeping) until memory is available. - - @warning Inappropriate use of this flag may cause kernel panics. + * @brief Allocation operations may block + * + * If memory is not immediately available, the allocation routine + * will block (typically sleeping) until memory is available. + * + * @warning Inappropriate use of this flag may cause kernel panics. */ #define MAC_WAITOK 0 /** - @brief Allocation operations may not block - - Rather than blocking, the allocator may return an error if memory - is not immediately available. This type of allocation will not - sleep, preserving locking semantics. + * @brief Allocation operations may not block + * + * Rather than blocking, the allocator may return an error if memory + * is not immediately available. This type of allocation will not + * sleep, preserving locking semantics. */ #define MAC_NOWAIT 1 diff --git a/security/mac_posix_sem.c b/security/mac_posix_sem.c index f17db238a..969e9ab91 100644 --- a/security/mac_posix_sem.c +++ b/security/mac_posix_sem.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -75,23 +75,22 @@ mac_posixsem_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(posixsem_label_init, label); - return (label); + return label; } void mac_posixsem_label_init(struct pseminfo *psem) { - psem->psem_label = mac_posixsem_label_alloc(); } static void mac_posixsem_label_free(struct label *label) { - MAC_PERFORM(posixsem_label_destroy, label); mac_labelzone_free(label); } @@ -99,7 +98,6 @@ mac_posixsem_label_free(struct label *label) void mac_posixsem_label_destroy(struct pseminfo *psem) { - mac_posixsem_label_free(psem->psem_label); psem->psem_label = NULL; } @@ -108,18 +106,17 @@ void mac_posixsem_label_associate(kauth_cred_t cred, struct pseminfo *psem, const char *name) { - MAC_PERFORM(posixsem_label_associate, cred, psem, psem->psem_label, name); } void -mac_posixsem_vnode_label_associate(kauth_cred_t cred, - struct pseminfo *psem, struct label *plabel, - vnode_t vp, struct label *vlabel) +mac_posixsem_vnode_label_associate(kauth_cred_t cred, + struct pseminfo *psem, struct label *plabel, + vnode_t vp, struct label *vlabel) { MAC_PERFORM(vnode_label_associate_posixsem, cred, - psem, plabel, vp, vlabel); + psem, plabel, vp, vlabel); } int @@ -128,14 +125,15 @@ mac_posixsem_check_create(kauth_cred_t cred, const char *name) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_posixsem_enforce) { + return 0; + } #endif MAC_CHECK(posixsem_check_create, cred, name); - return (error); + return error; } int @@ -144,15 +142,16 @@ mac_posixsem_check_open(kauth_cred_t cred, struct pseminfo *psem) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_posixsem_enforce) { + return 0; + } #endif MAC_CHECK(posixsem_check_open, cred, psem, psem->psem_label); - return (error); + return error; } int @@ -161,14 +160,15 @@ mac_posixsem_check_post(kauth_cred_t cred, struct pseminfo *psem) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_posixsem_enforce) { + return 0; + } #endif MAC_CHECK(posixsem_check_post, cred, psem, psem->psem_label); - return (error); + return error; } int @@ -178,14 +178,15 @@ mac_posixsem_check_unlink(kauth_cred_t cred, struct pseminfo *psem, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_posixsem_enforce) { + return 0; + } #endif MAC_CHECK(posixsem_check_unlink, cred, psem, psem->psem_label, name); - return (error); + return error; } int @@ -194,12 +195,13 @@ mac_posixsem_check_wait(kauth_cred_t cred, struct pseminfo *psem) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_posixsem_enforce) { + return 0; + } #endif MAC_CHECK(posixsem_check_wait, cred, psem, psem->psem_label); - return (error); + return error; } diff --git a/security/mac_posix_shm.c b/security/mac_posix_shm.c index cc4e281c1..4e3294016 100644 --- a/security/mac_posix_shm.c +++ b/security/mac_posix_shm.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -76,23 +76,22 @@ mac_posixshm_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(posixshm_label_init, label); - return (label); + return label; } void mac_posixshm_label_init(struct pshminfo *pshm) { - pshm->pshm_label = mac_posixshm_label_alloc(); } static void mac_posixshm_label_free(struct label *label) { - MAC_PERFORM(posixshm_label_destroy, label); mac_labelzone_free(label); } @@ -100,25 +99,23 @@ mac_posixshm_label_free(struct label *label) void mac_posixshm_label_destroy(struct pshminfo *pshm) { - mac_posixshm_label_free(pshm->pshm_label); pshm->pshm_label = NULL; } void mac_posixshm_vnode_label_associate(kauth_cred_t cred, - struct pshminfo *pshm, struct label *plabel, - vnode_t vp, struct label *vlabel) + struct pshminfo *pshm, struct label *plabel, + vnode_t vp, struct label *vlabel) { - MAC_PERFORM(vnode_label_associate_posixshm, cred, - pshm, plabel, vp, vlabel); + MAC_PERFORM(vnode_label_associate_posixshm, cred, + pshm, plabel, vp, vlabel); } void mac_posixshm_label_associate(kauth_cred_t cred, struct pshminfo *pshm, const char *name) { - MAC_PERFORM(posixshm_label_associate, cred, pshm, pshm->pshm_label, name); } @@ -128,9 +125,10 @@ mac_posixshm_check_create(kauth_cred_t cred, const char *name) int error = 0; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixshm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_posixshm_enforce) { + return 0; + } #endif MAC_CHECK(posixshm_check_create, cred, name); @@ -144,14 +142,15 @@ mac_posixshm_check_open(kauth_cred_t cred, struct pshminfo *shm, int fflags) int error = 0; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixshm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_posixshm_enforce) { + return 0; + } #endif MAC_CHECK(posixshm_check_open, cred, shm, shm->pshm_label, fflags); - return (error); + return error; } int @@ -161,15 +160,16 @@ mac_posixshm_check_mmap(kauth_cred_t cred, struct pshminfo *shm, int error = 0; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixshm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_posixshm_enforce) { + return 0; + } #endif MAC_CHECK(posixshm_check_mmap, cred, shm, shm->pshm_label, - prot, flags); + prot, flags); - return (error); + return error; } int @@ -178,14 +178,15 @@ mac_posixshm_check_stat(kauth_cred_t cred, struct pshminfo *shm) int error = 0; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixshm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_posixshm_enforce) { + return 0; + } #endif MAC_CHECK(posixshm_check_stat, cred, shm, shm->pshm_label); - return (error); + return error; } int @@ -195,14 +196,15 @@ mac_posixshm_check_truncate(kauth_cred_t cred, struct pshminfo *shm, int error = 0; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixshm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_posixshm_enforce) { + return 0; + } #endif MAC_CHECK(posixshm_check_truncate, cred, shm, shm->pshm_label, size); - return (error); + return error; } int @@ -212,12 +214,13 @@ mac_posixshm_check_unlink(kauth_cred_t cred, struct pshminfo *shm, int error = 0; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_posixshm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_posixshm_enforce) { + return 0; + } #endif MAC_CHECK(posixshm_check_unlink, cred, shm, shm->pshm_label, name); - return (error); + return error; } diff --git a/security/mac_priv.c b/security/mac_priv.c index 59f14de12..a88eb460b 100644 --- a/security/mac_priv.c +++ b/security/mac_priv.c @@ -2,7 +2,7 @@ * Copyright (c) 2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -86,12 +86,13 @@ mac_priv_check(kauth_cred_t cred, int priv) { int error; - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return 0; + } MAC_CHECK(priv_check, cred, priv); - return (error); + return error; } /* @@ -103,10 +104,11 @@ mac_priv_grant(kauth_cred_t cred, int priv) { int error; - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return 0; + } MAC_GRANT(priv_grant, cred, priv); - return (error); + return error; } diff --git a/security/mac_process.c b/security/mac_process.c index f3ea32890..3552fe991 100644 --- a/security/mac_process.c +++ b/security/mac_process.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -86,10 +86,11 @@ mac_cred_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(cred_label_init, label); - return (label); + return label; } void @@ -108,7 +109,7 @@ mac_cred_label_free(struct label *label) int mac_cred_label_compare(struct label *a, struct label *b) { - return (bcmp(a, b, sizeof (*a)) == 0); + return bcmp(a, b, sizeof(*a)) == 0; } int @@ -123,13 +124,12 @@ mac_cred_label_externalize_audit(struct proc *p, struct mac *mac) mac->m_string, mac->m_buflen); kauth_cred_unref(&cr); - return (error); + return error; } void mac_cred_label_destroy(kauth_cred_t cred) { - mac_cred_label_free(cred->cr_label); cred->cr_label = NULL; } @@ -142,7 +142,7 @@ mac_cred_label_externalize(struct label *label, char *elements, error = MAC_EXTERNALIZE(cred, label, elements, outbuf, outbuflen); - return (error); + return error; } int @@ -152,7 +152,7 @@ mac_cred_label_internalize(struct label *label, char *string) error = MAC_INTERNALIZE(cred, label, string); - return (error); + return error; } /* @@ -166,7 +166,7 @@ mac_cred_label_associate_fork(kauth_cred_t cred, proc_t proc) { MAC_PERFORM(cred_label_associate_fork, cred, proc); } - + /* * Initialize MAC label for the first kernel process, from which other * kernel processes and threads are spawned. @@ -174,7 +174,6 @@ mac_cred_label_associate_fork(kauth_cred_t cred, proc_t proc) void mac_cred_label_associate_kernel(kauth_cred_t cred) { - MAC_PERFORM(cred_label_associate_kernel, cred); } @@ -185,7 +184,6 @@ mac_cred_label_associate_kernel(kauth_cred_t cred) void mac_cred_label_associate_user(kauth_cred_t cred) { - MAC_PERFORM(cred_label_associate_user, cred); } @@ -197,7 +195,6 @@ mac_cred_label_associate_user(kauth_cred_t cred) void mac_cred_label_associate(struct ucred *parent_cred, struct ucred *child_cred) { - MAC_PERFORM(cred_label_associate, parent_cred, child_cred); } @@ -210,8 +207,9 @@ mac_execve_enter(user_addr_t mac_p, struct image_params *imgp) int error; size_t ulen; - if (mac_p == USER_ADDR_NULL) - return (0); + if (mac_p == USER_ADDR_NULL) { + return 0; + } if (IS_64BIT_PROCESS(current_proc())) { struct user64_mac mac64; @@ -224,18 +222,21 @@ mac_execve_enter(user_addr_t mac_p, struct image_params *imgp) mac.m_buflen = mac32.m_buflen; mac.m_string = mac32.m_string; } - if (error) - return (error); + if (error) { + return error; + } error = mac_check_structmac_consistent(&mac); - if (error) - return (error); + if (error) { + return error; + } execlabel = mac_cred_label_alloc(); MALLOC(buffer, char *, mac.m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(CAST_USER_ADDR_T(mac.m_string), buffer, mac.m_buflen, &ulen); - if (error) + if (error) { goto out; + } AUDIT_ARG(mac_string, buffer); error = mac_cred_label_internalize(execlabel, buffer); @@ -246,7 +247,7 @@ out: } imgp->ip_execlabelp = execlabel; FREE(buffer, M_MACTEMP); - return (error); + return error; } /* @@ -275,14 +276,15 @@ mac_cred_check_label_update(kauth_cred_t cred, struct label *newlabel) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_proc_enforce) { + return 0; + } #endif MAC_CHECK(cred_check_label_update, cred, newlabel); - return (error); + return error; } int @@ -292,13 +294,14 @@ mac_cred_check_visible(kauth_cred_t u1, kauth_cred_t u2) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif MAC_CHECK(cred_check_visible, u1, u2); - return (error); + return error; } int @@ -309,17 +312,19 @@ mac_proc_check_debug(proc_t curp, struct proc *proc) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_debug, cred, proc); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -330,17 +335,19 @@ mac_proc_check_fork(proc_t curp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_fork, cred, curp); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -350,7 +357,7 @@ mac_proc_check_get_task_name(struct ucred *cred, struct proc *p) MAC_CHECK(proc_check_get_task_name, cred, p); - return (error); + return error; } int @@ -360,7 +367,7 @@ mac_proc_check_get_task(struct ucred *cred, struct proc *p) MAC_CHECK(proc_check_get_task, cred, p); - return (error); + return error; } int @@ -370,7 +377,7 @@ mac_proc_check_expose_task(struct ucred *cred, struct proc *p) MAC_CHECK(proc_check_expose_task, cred, p); - return (error); + return error; } int @@ -380,7 +387,7 @@ mac_proc_check_inherit_ipc_ports(struct proc *p, struct vnode *cur_vp, off_t cur MAC_CHECK(proc_check_inherit_ipc_ports, p, cur_vp, cur_offset, img_vp, img_offset, scriptvp); - return (error); + return error; } /* @@ -397,17 +404,19 @@ mac_proc_check_map_anon(proc_t proc, user_addr_t u_addr, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vm_enforce) + if (!mac_vm_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(proc)) - return (0); + if (!mac_proc_check_enforce(proc)) { + return 0; + } cred = kauth_cred_proc_ref(proc); MAC_CHECK(proc_check_map_anon, proc, cred, u_addr, u_size, prot, flags, maxprot); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -419,35 +428,38 @@ mac_proc_check_mprotect(proc_t proc, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vm_enforce) + if (!mac_vm_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(proc)) - return (0); + if (!mac_proc_check_enforce(proc)) { + return 0; + } cred = kauth_cred_proc_ref(proc); MAC_CHECK(proc_check_mprotect, cred, proc, addr, size, prot); kauth_cred_unref(&cred); - return (error); + return error; } int mac_proc_check_run_cs_invalid(proc_t proc) { int error; - + #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_vm_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_vm_enforce) { + return 0; + } #endif - + MAC_CHECK(proc_check_run_cs_invalid, proc); - - return (error); + + return error; } - + int mac_proc_check_sched(proc_t curp, struct proc *proc) { @@ -456,17 +468,19 @@ mac_proc_check_sched(proc_t curp, struct proc *proc) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_sched, cred, proc); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -477,17 +491,19 @@ mac_proc_check_signal(proc_t curp, struct proc *proc, int signum) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_signal, cred, proc, signum); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -498,17 +514,19 @@ mac_proc_check_wait(proc_t curp, struct proc *proc) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_wait, cred, proc); kauth_cred_unref(&cred); - return (error); + return error; } void @@ -525,17 +543,19 @@ mac_proc_check_suspend_resume(proc_t curp, int sr) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_suspend_resume, cred, curp, sr); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -546,17 +566,19 @@ mac_proc_check_ledger(proc_t curp, proc_t proc, int ledger_op) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_ledger, cred, proc, ledger_op); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -567,17 +589,19 @@ mac_proc_check_proc_info(proc_t curp, proc_t target, int callnum, int flavor) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_proc_info, cred, target, callnum, flavor); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -588,17 +612,19 @@ mac_proc_check_get_cs_info(proc_t curp, proc_t target, unsigned int op) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_get_cs_info, cred, target, op); kauth_cred_unref(&cred); - return (error); + return error; } int @@ -609,16 +635,17 @@ mac_proc_check_set_cs_info(proc_t curp, proc_t target, unsigned int op) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce) + if (!mac_proc_enforce) { return 0; + } #endif - if (!mac_proc_check_enforce(curp)) + if (!mac_proc_check_enforce(curp)) { return 0; + } cred = kauth_cred_proc_ref(curp); MAC_CHECK(proc_check_set_cs_info, cred, target, op); kauth_cred_unref(&cred); - return (error); + return error; } - diff --git a/security/mac_pty.c b/security/mac_pty.c index bbd535272..679948b29 100644 --- a/security/mac_pty.c +++ b/security/mac_pty.c @@ -34,11 +34,13 @@ #include void -mac_pty_notify_grant(proc_t p, struct tty *tp, dev_t dev, struct label *label) { +mac_pty_notify_grant(proc_t p, struct tty *tp, dev_t dev, struct label *label) +{ MAC_PERFORM(pty_notify_grant, p, tp, dev, label); } void -mac_pty_notify_close(proc_t p, struct tty *tp, dev_t dev, struct label *label) { +mac_pty_notify_close(proc_t p, struct tty *tp, dev_t dev, struct label *label) +{ MAC_PERFORM(pty_notify_close, p, tp, dev, label); } diff --git a/security/mac_skywalk.c b/security/mac_skywalk.c index ba53dfe93..fa0ca3255 100644 --- a/security/mac_skywalk.c +++ b/security/mac_skywalk.c @@ -38,7 +38,7 @@ mac_skywalk_flow_check_connect(proc_t proc, void *flow, const struct sockaddr *a int error; MAC_CHECK(skywalk_flow_check_connect, proc_ucred(proc), flow, addr, type, protocol); - return (error); + return error; } int @@ -47,6 +47,5 @@ mac_skywalk_flow_check_listen(proc_t proc, void *flow, const struct sockaddr *ad int error; MAC_CHECK(skywalk_flow_check_listen, proc_ucred(proc), flow, addr, type, protocol); - return (error); + return error; } - diff --git a/security/mac_socket.c b/security/mac_socket.c index e935f6f78..925e8f23c 100644 --- a/security/mac_socket.c +++ b/security/mac_socket.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -39,7 +39,7 @@ * Research, the Technology Research Division of Network Associates, Inc. * under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the * DARPA CHATS research program. - * + * * This software was enhanced by SPARTA ISSO under SPAWAR contract * N66001-04-C-6019 ("SEFOS"). * @@ -92,17 +92,18 @@ mac_socket_label_alloc(int flag) int error; label = mac_labelzone_alloc(flag); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_CHECK(socket_label_init, label, flag); if (error) { MAC_PERFORM(socket_label_destroy, label); mac_labelzone_free(label); - return (NULL); + return NULL; } - return (label); + return label; } static struct label * @@ -112,39 +113,39 @@ mac_socket_peer_label_alloc(int flag) int error; label = mac_labelzone_alloc(flag); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_CHECK(socketpeer_label_init, label, flag); if (error) { MAC_PERFORM(socketpeer_label_destroy, label); mac_labelzone_free(label); - return (NULL); + return NULL; } - return (label); + return label; } int mac_socket_label_init(struct socket *so, int flag) { - so->so_label = mac_socket_label_alloc(flag); - if (so->so_label == NULL) - return (ENOMEM); + if (so->so_label == NULL) { + return ENOMEM; + } so->so_peerlabel = mac_socket_peer_label_alloc(flag); if (so->so_peerlabel == NULL) { mac_socket_label_free(so->so_label); so->so_label = NULL; - return (ENOMEM); + return ENOMEM; } - return (0); + return 0; } void mac_socket_label_free(struct label *label) { - MAC_PERFORM(socket_label_destroy, label); mac_labelzone_free(label); } @@ -152,7 +153,6 @@ mac_socket_label_free(struct label *label) static void mac_socket_peer_label_free(struct label *label) { - MAC_PERFORM(socketpeer_label_destroy, label); mac_labelzone_free(label); } @@ -160,7 +160,6 @@ mac_socket_peer_label_free(struct label *label) void mac_socket_label_destroy(struct socket *so) { - if (so->so_label != NULL) { mac_socket_label_free(so->so_label); so->so_label = NULL; @@ -174,7 +173,6 @@ mac_socket_label_destroy(struct socket *so) void mac_socket_label_copy(struct label *src, struct label *dest) { - MAC_PERFORM(socket_label_copy, src, dest); } @@ -186,7 +184,7 @@ mac_socket_label_externalize(struct label *label, char *elements, error = MAC_EXTERNALIZE(socket, label, elements, outbuf, outbuflen); - return (error); + return error; } static int @@ -197,7 +195,7 @@ mac_socketpeer_label_externalize(struct label *label, char *elements, error = MAC_EXTERNALIZE(socketpeer, label, elements, outbuf, outbuflen); - return (error); + return error; } int @@ -207,20 +205,21 @@ mac_socket_label_internalize(struct label *label, char *string) error = MAC_INTERNALIZE(socket, label, string); - return (error); + return error; } void mac_socket_label_associate(struct ucred *cred, struct socket *so) { #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return; + } #endif - MAC_PERFORM(socket_label_associate, cred, - (socket_t)so, so->so_label); + MAC_PERFORM(socket_label_associate, cred, + (socket_t)so, so->so_label); } void @@ -228,14 +227,15 @@ mac_socket_label_associate_accept(struct socket *oldsocket, struct socket *newsocket) { #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return; + } #endif - MAC_PERFORM(socket_label_associate_accept, - (socket_t)oldsocket, oldsocket->so_label, - (socket_t)newsocket, newsocket->so_label); + MAC_PERFORM(socket_label_associate_accept, + (socket_t)oldsocket, oldsocket->so_label, + (socket_t)newsocket, newsocket->so_label); } #if CONFIG_MACF_SOCKET && CONFIG_MACF_NET @@ -245,21 +245,22 @@ mac_socketpeer_label_associate_mbuf(struct mbuf *mbuf, struct socket *so) struct label *label; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce && !mac_net_enforce) - return; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce && !mac_net_enforce) { + return; + } #endif label = mac_mbuf_to_label(mbuf); /* Policy must deal with NULL label (unlabeled mbufs) */ MAC_PERFORM(socketpeer_label_associate_mbuf, mbuf, label, - (socket_t)so, so->so_peerlabel); + (socket_t)so, so->so_peerlabel); } #else void -mac_socketpeer_label_associate_mbuf(__unused struct mbuf *mbuf, - __unused struct socket *so) +mac_socketpeer_label_associate_mbuf(__unused struct mbuf *mbuf, + __unused struct socket *so) { return; } @@ -270,31 +271,33 @@ mac_socketpeer_label_associate_socket(struct socket *oldsocket, struct socket *newsocket) { #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return; + } #endif MAC_PERFORM(socketpeer_label_associate_socket, - (socket_t)oldsocket, oldsocket->so_label, - (socket_t)newsocket, newsocket->so_peerlabel); + (socket_t)oldsocket, oldsocket->so_label, + (socket_t)newsocket, newsocket->so_peerlabel); } int mac_socket_check_kqfilter(kauth_cred_t cred, struct knote *kn, - struct socket *so) + struct socket *so) { int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif - MAC_CHECK(socket_check_kqfilter, cred, kn, - (socket_t)so, so->so_label); - return (error); + MAC_CHECK(socket_check_kqfilter, cred, kn, + (socket_t)so, so->so_label); + return error; } static int @@ -304,14 +307,15 @@ mac_socket_check_select(kauth_cred_t cred, struct socket *so, int which) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_select, cred, - (socket_t)so, so->so_label, which); - return (error); + (socket_t)so, so->so_label, which); + return error; } mac_socket_check_label_update(kauth_cred_t cred, struct socket *so, @@ -320,15 +324,16 @@ mac_socket_check_label_update(kauth_cred_t cred, struct socket *so, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_label_update, cred, - (socket_t)so, so->so_label, - newlabel); - return (error); + (socket_t)so, so->so_label, + newlabel); + return error; } int @@ -337,17 +342,19 @@ mac_socket_label_update(kauth_cred_t cred, struct socket *so, struct label *labe int error; #if 0 #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif #endif error = mac_socket_check_label_update(cred, so, label); - if (error) - return (error); + if (error) { + return error; + } MAC_PERFORM(socket_label_update, cred, - (socket_t)so, so->so_label, label); + (socket_t)so, so->so_label, label); #if CONFIG_MACF_NET /* @@ -359,7 +366,7 @@ mac_socket_label_update(kauth_cred_t cred, struct socket *so, struct label *labe */ mac_inpcb_label_update(so); #endif - return (0); + return 0; } int @@ -371,27 +378,29 @@ mac_setsockopt_label(kauth_cred_t cred, struct socket *so, struct mac *mac) size_t len; error = mac_check_structmac_consistent(mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(buffer, char *, mac->m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(CAST_USER_ADDR_T(mac->m_string), buffer, - mac->m_buflen, &len); + mac->m_buflen, &len); if (error) { FREE(buffer, M_MACTEMP); - return (error); + return error; } intlabel = mac_socket_label_alloc(MAC_WAITOK); error = mac_socket_label_internalize(intlabel, buffer); FREE(buffer, M_MACTEMP); - if (error) + if (error) { goto out; + } error = mac_socket_label_update(cred, so, intlabel); out: mac_socket_label_free(intlabel); - return (error); + return error; } int @@ -404,15 +413,16 @@ mac_socket_label_get(__unused kauth_cred_t cred, struct socket *so, size_t len; error = mac_check_structmac_consistent(mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(elements, char *, mac->m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(CAST_USER_ADDR_T(mac->m_string), elements, - mac->m_buflen, &len); + mac->m_buflen, &len); if (error) { FREE(elements, M_MACTEMP); - return (error); + return error; } MALLOC(buffer, char *, mac->m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); @@ -421,14 +431,15 @@ mac_socket_label_get(__unused kauth_cred_t cred, struct socket *so, error = mac_socket_label_externalize(intlabel, elements, buffer, mac->m_buflen); mac_socket_label_free(intlabel); - if (error == 0) + if (error == 0) { error = copyout(buffer, CAST_USER_ADDR_T(mac->m_string), - strlen(buffer)+1); + strlen(buffer) + 1); + } FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } int @@ -441,15 +452,16 @@ mac_socketpeer_label_get(__unused kauth_cred_t cred, struct socket *so, size_t len; error = mac_check_structmac_consistent(mac); - if (error) - return (error); + if (error) { + return error; + } MALLOC(elements, char *, mac->m_buflen, M_MACTEMP, M_WAITOK); error = copyinstr(CAST_USER_ADDR_T(mac->m_string), elements, - mac->m_buflen, &len); + mac->m_buflen, &len); if (error) { FREE(elements, M_MACTEMP); - return (error); + return error; } MALLOC(buffer, char *, mac->m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); @@ -458,14 +470,15 @@ mac_socketpeer_label_get(__unused kauth_cred_t cred, struct socket *so, error = mac_socketpeer_label_externalize(intlabel, elements, buffer, mac->m_buflen); mac_socket_label_free(intlabel); - if (error == 0) + if (error == 0) { error = copyout(buffer, CAST_USER_ADDR_T(mac->m_string), - strlen(buffer)+1); + strlen(buffer) + 1); + } FREE(buffer, M_MACTEMP); FREE(elements, M_MACTEMP); - return (error); + return error; } #endif /* MAC_SOCKET */ @@ -476,14 +489,15 @@ mac_socket_check_accept(kauth_cred_t cred, struct socket *so) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_accept, cred, - (socket_t)so, so->so_label); - return (error); + (socket_t)so, so->so_label); + return error; } #if CONFIG_MACF_SOCKET_SUBSET @@ -494,19 +508,20 @@ mac_socket_check_accepted(kauth_cred_t cred, struct socket *so) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif if (sock_getaddr((socket_t)so, &sockaddr, 1) != 0) { error = ECONNABORTED; } else { MAC_CHECK(socket_check_accepted, cred, - (socket_t)so, so->so_label, sockaddr); + (socket_t)so, so->so_label, sockaddr); sock_freeaddr(sockaddr); } - return (error); + return error; } #endif @@ -517,14 +532,15 @@ mac_socket_check_bind(kauth_cred_t ucred, struct socket *so, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_bind, ucred, - (socket_t)so, so->so_label, sockaddr); - return (error); + (socket_t)so, so->so_label, sockaddr); + return error; } int @@ -534,15 +550,16 @@ mac_socket_check_connect(kauth_cred_t cred, struct socket *so, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_connect, cred, - (socket_t)so, so->so_label, - sockaddr); - return (error); + (socket_t)so, so->so_label, + sockaddr); + return error; } int @@ -551,13 +568,14 @@ mac_socket_check_create(kauth_cred_t cred, int domain, int type, int protocol) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_create, cred, domain, type, protocol); - return (error); + return error; } #if CONFIG_MACF_SOCKET && CONFIG_MACF_NET @@ -568,41 +586,43 @@ mac_socket_check_deliver(struct socket *so, struct mbuf *mbuf) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif label = mac_mbuf_to_label(mbuf); /* Policy must deal with NULL label (unlabeled mbufs) */ MAC_CHECK(socket_check_deliver, - (socket_t)so, so->so_label, mbuf, label); - return (error); + (socket_t)so, so->so_label, mbuf, label); + return error; } #else int mac_socket_check_deliver(__unused struct socket *so, __unused struct mbuf *mbuf) { - return (0); + return 0; } #endif int mac_socket_check_ioctl(kauth_cred_t cred, struct socket *so, - unsigned int cmd) + unsigned int cmd) { int error; #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) + if (!mac_socket_enforce) { return 0; + } #endif MAC_CHECK(socket_check_ioctl, cred, - (socket_t)so, cmd, so->so_label); - return (error); + (socket_t)so, cmd, so->so_label); + return error; } int @@ -611,14 +631,15 @@ mac_socket_check_stat(kauth_cred_t cred, struct socket *so) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_stat, cred, - (socket_t)so, so->so_label); - return (error); + (socket_t)so, so->so_label); + return error; } int @@ -627,14 +648,15 @@ mac_socket_check_listen(kauth_cred_t cred, struct socket *so) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_listen, cred, - (socket_t)so, so->so_label); - return (error); + (socket_t)so, so->so_label); + return error; } int @@ -643,14 +665,15 @@ mac_socket_check_receive(kauth_cred_t cred, struct socket *so) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_receive, cred, - (socket_t)so, so->so_label); - return (error); + (socket_t)so, so->so_label); + return error; } int @@ -659,62 +682,67 @@ mac_socket_check_received(kauth_cred_t cred, struct socket *so, struct sockaddr int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif - + MAC_CHECK(socket_check_received, cred, - so, so->so_label, saddr); - return (error); + so, so->so_label, saddr); + return error; } int mac_socket_check_send(kauth_cred_t cred, struct socket *so, - struct sockaddr *sockaddr) + struct sockaddr *sockaddr) { int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_send, cred, - (socket_t)so, so->so_label, sockaddr); - return (error); + (socket_t)so, so->so_label, sockaddr); + return error; } int mac_socket_check_setsockopt(kauth_cred_t cred, struct socket *so, - struct sockopt *sopt) + struct sockopt *sopt) { int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_setsockopt, cred, - (socket_t)so, so->so_label, sopt); - return (error); + (socket_t)so, so->so_label, sopt); + return error; } -int mac_socket_check_getsockopt(kauth_cred_t cred, struct socket *so, - struct sockopt *sopt) +int +mac_socket_check_getsockopt(kauth_cred_t cred, struct socket *so, + struct sockopt *sopt) { int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_socket_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_socket_enforce) { + return 0; + } #endif MAC_CHECK(socket_check_getsockopt, cred, - (socket_t)so, so->so_label, sopt); - return (error); + (socket_t)so, so->so_label, sopt); + return error; } diff --git a/security/mac_system.c b/security/mac_system.c index bae108765..a0c00105b 100644 --- a/security/mac_system.c +++ b/security/mac_system.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -76,14 +76,15 @@ mac_system_check_acct(kauth_cred_t cred, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_acct, cred, vp, vp != NULL ? vp->v_label : NULL); - return (error); + return error; } int @@ -93,13 +94,14 @@ mac_system_check_chud(kauth_cred_t cred) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_chud, cred); - return (error); + return error; } int @@ -109,13 +111,14 @@ mac_system_check_host_priv(kauth_cred_t cred) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_host_priv, cred); - return (error); + return error; } int @@ -125,13 +128,14 @@ mac_system_check_info(kauth_cred_t cred, const char *info_type) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_info, cred, info_type); - return (error); + return error; } int @@ -141,13 +145,14 @@ mac_system_check_nfsd(kauth_cred_t cred) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_nfsd, cred); - return (error); + return error; } int @@ -157,13 +162,14 @@ mac_system_check_reboot(kauth_cred_t cred, int howto) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_reboot, cred, howto); - return (error); + return error; } @@ -174,13 +180,14 @@ mac_system_check_settime(kauth_cred_t cred) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_settime, cred); - return (error); + return error; } int @@ -190,12 +197,13 @@ mac_system_check_swapon(kauth_cred_t cred, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_swapon, cred, vp, vp->v_label); - return (error); + return error; } int @@ -205,31 +213,33 @@ mac_system_check_swapoff(kauth_cred_t cred, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_swapoff, cred, vp, vp->v_label); - return (error); + return error; } int mac_system_check_sysctlbyname(kauth_cred_t cred, const char *namestring, int *name, - u_int namelen, user_addr_t oldctl, size_t oldlen, - user_addr_t newctl, size_t newlen) + u_int namelen, user_addr_t oldctl, size_t oldlen, + user_addr_t newctl, size_t newlen) { int error; - + #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_sysctlbyname, cred, namestring, - name, namelen, oldctl, oldlen, newctl, newlen); - - return (error); + name, namelen, oldctl, oldlen, newctl, newlen); + + return error; } int @@ -239,11 +249,12 @@ mac_system_check_kas_info(kauth_cred_t cred, int selector) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_system_enforce) - return (0); + if (!mac_system_enforce) { + return 0; + } #endif MAC_CHECK(system_check_kas_info, cred, selector); - return (error); + return error; } diff --git a/security/mac_sysv_msg.c b/security/mac_sysv_msg.c index 5a4016a7c..20e69ea3e 100644 --- a/security/mac_sysv_msg.c +++ b/security/mac_sysv_msg.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -80,16 +80,16 @@ mac_sysv_msgmsg_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(sysvmsg_label_init, label); - return (label); + return label; } void mac_sysvmsg_label_init(struct msg *msgptr) { - msgptr->label = mac_sysv_msgmsg_label_alloc(); } @@ -99,65 +99,62 @@ mac_sysv_msgqueue_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(sysvmsq_label_init, label); - return (label); + return label; } void mac_sysvmsq_label_init(struct msqid_kernel *msqptr) { - msqptr->label = mac_sysv_msgqueue_label_alloc(); } void -mac_sysvmsg_label_associate(kauth_cred_t cred, struct msqid_kernel *msqptr, +mac_sysvmsg_label_associate(kauth_cred_t cred, struct msqid_kernel *msqptr, struct msg *msgptr) { - - MAC_PERFORM(sysvmsg_label_associate, cred, msqptr, msqptr->label, - msgptr, msgptr->label); + MAC_PERFORM(sysvmsg_label_associate, cred, msqptr, msqptr->label, + msgptr, msgptr->label); } void mac_sysvmsq_label_associate(kauth_cred_t cred, struct msqid_kernel *msqptr) { - MAC_PERFORM(sysvmsq_label_associate, cred, msqptr, msqptr->label); } void mac_sysvmsg_label_recycle(struct msg *msgptr) { - MAC_PERFORM(sysvmsg_label_recycle, msgptr->label); } void mac_sysvmsq_label_recycle(struct msqid_kernel *msqptr) { - MAC_PERFORM(sysvmsq_label_recycle, msqptr->label); } int mac_sysvmsq_check_enqueue(kauth_cred_t cred, struct msg *msgptr, - struct msqid_kernel *msqptr) + struct msqid_kernel *msqptr) { int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif - MAC_CHECK(sysvmsq_check_enqueue, cred, msgptr, msgptr->label, msqptr, + MAC_CHECK(sysvmsq_check_enqueue, cred, msgptr, msgptr->label, msqptr, msqptr->label); - return(error); + return error; } int @@ -166,14 +163,15 @@ mac_sysvmsq_check_msgrcv(kauth_cred_t cred, struct msg *msgptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif MAC_CHECK(sysvmsq_check_msgrcv, cred, msgptr, msgptr->label); - return(error); + return error; } int @@ -182,14 +180,15 @@ mac_sysvmsq_check_msgrmid(kauth_cred_t cred, struct msg *msgptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif - MAC_CHECK(sysvmsq_check_msgrmid, cred, msgptr, msgptr->label); + MAC_CHECK(sysvmsq_check_msgrmid, cred, msgptr, msgptr->label); - return(error); + return error; } int @@ -198,14 +197,15 @@ mac_sysvmsq_check_msqget(kauth_cred_t cred, struct msqid_kernel *msqptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif MAC_CHECK(sysvmsq_check_msqget, cred, msqptr, msqptr->label); - return(error); + return error; } int @@ -214,14 +214,15 @@ mac_sysvmsq_check_msqsnd(kauth_cred_t cred, struct msqid_kernel *msqptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif MAC_CHECK(sysvmsq_check_msqsnd, cred, msqptr, msqptr->label); - return(error); + return error; } int @@ -230,14 +231,15 @@ mac_sysvmsq_check_msqrcv(kauth_cred_t cred, struct msqid_kernel *msqptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif MAC_CHECK(sysvmsq_check_msqrcv, cred, msqptr, msqptr->label); - return(error); + return error; } int @@ -247,12 +249,13 @@ mac_sysvmsq_check_msqctl(kauth_cred_t cred, struct msqid_kernel *msqptr, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvmsg_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvmsg_enforce) { + return 0; + } #endif MAC_CHECK(sysvmsq_check_msqctl, cred, msqptr, msqptr->label, cmd); - return(error); + return error; } diff --git a/security/mac_sysv_sem.c b/security/mac_sysv_sem.c index a136a0502..de5d463eb 100644 --- a/security/mac_sysv_sem.c +++ b/security/mac_sysv_sem.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -81,23 +81,22 @@ mac_sysv_sem_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(sysvsem_label_init, label); - return (label); + return label; } void mac_sysvsem_label_init(struct semid_kernel *semakptr) { - semakptr->label = mac_sysv_sem_label_alloc(); } static void mac_sysv_sem_label_free(struct label *label) { - MAC_PERFORM(sysvsem_label_destroy, label); mac_labelzone_free(label); } @@ -105,7 +104,6 @@ mac_sysv_sem_label_free(struct label *label) void mac_sysvsem_label_destroy(struct semid_kernel *semakptr) { - mac_sysv_sem_label_free(semakptr->label); semakptr->label = NULL; } @@ -113,14 +111,12 @@ mac_sysvsem_label_destroy(struct semid_kernel *semakptr) void mac_sysvsem_label_associate(kauth_cred_t cred, struct semid_kernel *semakptr) { - MAC_PERFORM(sysvsem_label_associate, cred, semakptr, semakptr->label); } void mac_sysvsem_label_recycle(struct semid_kernel *semakptr) { - MAC_PERFORM(sysvsem_label_recycle, semakptr->label); } @@ -131,14 +127,15 @@ mac_sysvsem_check_semctl(kauth_cred_t cred, struct semid_kernel *semakptr, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvsem_enforce) { + return 0; + } #endif MAC_CHECK(sysvsem_check_semctl, cred, semakptr, semakptr->label, cmd); - return(error); + return error; } int @@ -147,14 +144,15 @@ mac_sysvsem_check_semget(kauth_cred_t cred, struct semid_kernel *semakptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvsem_enforce) { + return 0; + } #endif MAC_CHECK(sysvsem_check_semget, cred, semakptr, semakptr->label); - return(error); + return error; } int @@ -164,13 +162,14 @@ mac_sysvsem_check_semop(kauth_cred_t cred, struct semid_kernel *semakptr, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvsem_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvsem_enforce) { + return 0; + } #endif MAC_CHECK(sysvsem_check_semop, cred, semakptr, semakptr->label, accesstype); - return(error); + return error; } diff --git a/security/mac_sysv_shm.c b/security/mac_sysv_shm.c index b6777cb74..21ff04ac5 100644 --- a/security/mac_sysv_shm.c +++ b/security/mac_sysv_shm.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -83,23 +83,22 @@ mac_sysv_shm_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(sysvshm_label_init, label); - return (label); + return label; } void mac_sysvshm_label_init(struct shmid_kernel *shmsegptr) { - shmsegptr->label = mac_sysv_shm_label_alloc(); } static void mac_sysv_shm_label_free(struct label *label) { - MAC_PERFORM(sysvshm_label_destroy, label); mac_labelzone_free(label); } @@ -107,7 +106,6 @@ mac_sysv_shm_label_free(struct label *label) void mac_sysvshm_label_destroy(struct shmid_kernel *shmsegptr) { - mac_sysv_shm_label_free(shmsegptr->label); shmsegptr->label = NULL; } @@ -115,14 +113,12 @@ mac_sysvshm_label_destroy(struct shmid_kernel *shmsegptr) void mac_sysvshm_label_associate(struct ucred *cred, struct shmid_kernel *shmsegptr) { - MAC_PERFORM(sysvshm_label_associate, cred, shmsegptr, shmsegptr->label); } void mac_sysvshm_label_recycle(struct shmid_kernel *shmsegptr) { - MAC_PERFORM(sysvshm_label_recycle, shmsegptr->label); } @@ -133,15 +129,16 @@ mac_sysvshm_check_shmat(struct ucred *cred, struct shmid_kernel *shmsegptr, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvshm_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvshm_enforce) { + return 0; + } #endif MAC_CHECK(sysvshm_check_shmat, cred, shmsegptr, shmsegptr->label, shmflg); - return(error); + return error; } int @@ -151,15 +148,16 @@ mac_sysvshm_check_shmctl(struct ucred *cred, struct shmid_kernel *shmsegptr, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvshm_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvshm_enforce) { + return 0; + } #endif MAC_CHECK(sysvshm_check_shmctl, cred, shmsegptr, shmsegptr->label, cmd); - return(error); + return error; } int @@ -168,14 +166,15 @@ mac_sysvshm_check_shmdt(struct ucred *cred, struct shmid_kernel *shmsegptr) int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvshm_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvshm_enforce) { + return 0; + } #endif MAC_CHECK(sysvshm_check_shmdt, cred, shmsegptr, shmsegptr->label); - return(error); + return error; } int @@ -185,13 +184,14 @@ mac_sysvshm_check_shmget(struct ucred *cred, struct shmid_kernel *shmsegptr, int error; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_sysvshm_enforce) - return (0); + /* 21167099 - only check if we allow write */ + if (!mac_sysvshm_enforce) { + return 0; + } #endif MAC_CHECK(sysvshm_check_shmget, cred, shmsegptr, shmsegptr->label, shmflg); - return(error); + return error; } diff --git a/security/mac_vfs.c b/security/mac_vfs.c index b18fc092c..81b311012 100644 --- a/security/mac_vfs.c +++ b/security/mac_vfs.c @@ -2,7 +2,7 @@ * Copyright (c) 2007-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /*- @@ -87,7 +87,7 @@ #include /* convert {R,W,X}_OK values to V{READ,WRITE,EXEC} */ -#define ACCESS_MODE_TO_VNODE_MASK(m) (m << 6) +#define ACCESS_MODE_TO_VNODE_MASK(m) (m << 6) static struct label * mac_devfsdirent_label_alloc(void) @@ -95,16 +95,16 @@ mac_devfsdirent_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(devfs_label_init, label); - return (label); + return label; } void mac_devfs_label_init(struct devnode *de) { - de->dn_label = mac_devfsdirent_label_alloc(); } @@ -114,16 +114,16 @@ mac_mount_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(mount_label_init, label); - return (label); + return label; } void mac_mount_label_init(struct mount *mp) { - mp->mnt_mntlabel = mac_mount_label_alloc(); } @@ -133,10 +133,11 @@ mac_vnode_label_alloc(void) struct label *label; label = mac_labelzone_alloc(MAC_WAITOK); - if (label == NULL) - return (NULL); + if (label == NULL) { + return NULL; + } MAC_PERFORM(vnode_label_init, label); - return (label); + return label; } void @@ -148,18 +149,17 @@ mac_vnode_label_init(vnode_t vp) int mac_vnode_label_init_needed(vnode_t vp) { - return (mac_label_vnodes != 0 && vp->v_label == NULL); + return mac_label_vnodes != 0 && vp->v_label == NULL; } -/* +/* * vnode labels are allocated at the same time as vnodes, but vnodes are never * freed. Instead, we want to remove any sensitive information before putting * them on the free list for reuse. -*/ + */ void mac_vnode_label_recycle(vnode_t vp) { - MAC_PERFORM(vnode_label_recycle, vp->v_label); } @@ -182,7 +182,6 @@ mac_devfs_label_destroy(struct devnode *de) static void mac_mount_label_free(struct label *label) { - MAC_PERFORM(mount_label_destroy, label); mac_labelzone_free(label); } @@ -233,7 +232,7 @@ mac_vnode_label_externalize_audit(struct vnode *vp, struct mac *mac) error = MAC_EXTERNALIZE_AUDIT(vnode, vp->v_label, mac->m_string, mac->m_buflen); - return (error); + return error; } int @@ -244,7 +243,7 @@ mac_vnode_label_externalize(struct label *label, char *elements, error = MAC_EXTERNALIZE(vnode, label, elements, outbuf, outbuflen); - return (error); + return error; } int @@ -254,7 +253,7 @@ mac_vnode_label_internalize(struct label *label, char *string) error = MAC_INTERNALIZE(vnode, label, string); - return (error); + return error; } int @@ -264,7 +263,7 @@ mac_mount_label_internalize(struct label *label, char *string) error = MAC_INTERNALIZE(mount, label, string); - return (error); + return error; } int @@ -275,7 +274,7 @@ mac_mount_label_externalize(struct label *label, char *elements, error = MAC_EXTERNALIZE(mount, label, elements, outbuf, outbuflen); - return (error); + return error; } void @@ -283,8 +282,9 @@ mac_devfs_label_copy(struct label *src, struct label *dest) { #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_device_enforce) + if (!mac_device_enforce) { return; + } #endif MAC_PERFORM(devfs_label_copy, src, dest); @@ -296,8 +296,9 @@ mac_devfs_label_update(struct mount *mp, struct devnode *de, { #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_device_enforce) + if (!mac_device_enforce) { return; + } #endif MAC_PERFORM(devfs_label_update, mp, de, de->dn_label, vp, @@ -313,8 +314,9 @@ mac_vnode_label_associate(struct mount *mp, struct vnode *vp, vfs_context_t ctx) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) - return (error); + if (!mac_vnode_enforce) { + return error; + } #endif /* XXX: should not inspect v_tag in kernel! */ @@ -332,7 +334,7 @@ mac_vnode_label_associate(struct mount *mp, struct vnode *vp, vfs_context_t ctx) break; } - return (error); + return error; } void @@ -341,8 +343,9 @@ mac_vnode_label_associate_devfs(struct mount *mp, struct devnode *de, { #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_device_enforce) + if (!mac_device_enforce) { return; + } #endif MAC_PERFORM(vnode_label_associate_devfs, @@ -359,7 +362,7 @@ mac_vnode_label_associate_extattr(struct mount *mp, struct vnode *vp) MAC_CHECK(vnode_label_associate_extattr, mp, mp->mnt_mntlabel, vp, vp->v_label); - return (error); + return error; } void @@ -367,11 +370,13 @@ mac_vnode_label_associate_singlelabel(struct mount *mp, struct vnode *vp) { #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif - if (!mac_label_vnodes) + if (!mac_label_vnodes) { return; + } MAC_PERFORM(vnode_label_associate_singlelabel, mp, mp ? mp->mnt_mntlabel : NULL, vp, vp->v_label); @@ -386,16 +391,18 @@ mac_vnode_notify_create(vfs_context_t ctx, struct mount *mp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) - return (0); + if (!mac_vnode_enforce) { + return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_notify_create, cred, mp, mp->mnt_mntlabel, dvp, dvp->v_label, vp, vp->v_label, cnp); - return (error); + return error; } void @@ -406,12 +413,14 @@ mac_vnode_notify_rename(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_rename, cred, vp, vp->v_label, dvp, dvp->v_label, cnp); } @@ -423,29 +432,33 @@ mac_vnode_notify_open(vfs_context_t ctx, struct vnode *vp, int acc_flags) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_open, cred, vp, vp->v_label, acc_flags); } void mac_vnode_notify_link(vfs_context_t ctx, struct vnode *vp, - struct vnode *dvp, struct componentname *cnp) + struct vnode *dvp, struct componentname *cnp) { kauth_cred_t cred; #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_link, cred, dvp, dvp->v_label, vp, vp->v_label, cnp); } @@ -456,12 +469,14 @@ mac_vnode_notify_deleteextattr(vfs_context_t ctx, struct vnode *vp, const char * #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_deleteextattr, cred, vp, vp->v_label, name); } @@ -472,12 +487,14 @@ mac_vnode_notify_setacl(vfs_context_t ctx, struct vnode *vp, struct kauth_acl *a #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setacl, cred, vp, vp->v_label, acl); } @@ -488,12 +505,14 @@ mac_vnode_notify_setattrlist(vfs_context_t ctx, struct vnode *vp, struct attrlis #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setattrlist, cred, vp, vp->v_label, alist); } @@ -504,12 +523,14 @@ mac_vnode_notify_setextattr(vfs_context_t ctx, struct vnode *vp, const char *nam #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setextattr, cred, vp, vp->v_label, name, uio); } @@ -520,12 +541,14 @@ mac_vnode_notify_setflags(vfs_context_t ctx, struct vnode *vp, u_long flags) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setflags, cred, vp, vp->v_label, flags); } @@ -536,12 +559,14 @@ mac_vnode_notify_setmode(vfs_context_t ctx, struct vnode *vp, mode_t mode) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setmode, cred, vp, vp->v_label, mode); } @@ -552,12 +577,14 @@ mac_vnode_notify_setowner(vfs_context_t ctx, struct vnode *vp, uid_t uid, gid_t #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setowner, cred, vp, vp->v_label, uid, gid); } @@ -568,12 +595,14 @@ mac_vnode_notify_setutimes(vfs_context_t ctx, struct vnode *vp, struct timespec #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_setutimes, cred, vp, vp->v_label, atime, mtime); } @@ -584,12 +613,14 @@ mac_vnode_notify_truncate(vfs_context_t ctx, kauth_cred_t file_cred, struct vnod #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) + if (!mac_cred_check_enforce(cred)) { return; + } MAC_PERFORM(vnode_notify_truncate, cred, file_cred, vp, vp->v_label); } @@ -606,16 +637,19 @@ mac_vnode_label_update_extattr(struct mount *mp, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return; + } #endif - if (!mac_label_vnodes) + if (!mac_label_vnodes) { return; + } MAC_PERFORM(vnode_label_update_extattr, mp, mp->mnt_mntlabel, vp, - vp->v_label, name); - if (error == 0) + vp->v_label, name); + if (error == 0) { return; + } vnode_lock(vp); vnode_relabel(vp); @@ -632,24 +666,27 @@ mac_vnode_label_store(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif - if (!mac_label_vnodes) + if (!mac_label_vnodes) { return 0; + } cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_label_store, cred, vp, vp->v_label, intlabel); - return (error); + return error; } void mac_cred_label_update_execve(vfs_context_t ctx, kauth_cred_t new, struct vnode *vp, off_t offset, - struct vnode *scriptvp, struct label *scriptvnodelabel, struct label *execl, u_int *csflags, - void *macextensions, int *disjoint, int *labelupdateerror) + struct vnode *scriptvp, struct label *scriptvnodelabel, struct label *execl, u_int *csflags, + void *macextensions, int *disjoint, int *labelupdateerror) { kauth_cred_t cred; *disjoint = 0; @@ -658,8 +695,9 @@ mac_cred_label_update_execve(vfs_context_t ctx, kauth_cred_t new, struct vnode * #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce || !mac_vnode_enforce) + if (!mac_proc_enforce || !mac_vnode_enforce) { return; + } #endif /* mark the new cred to indicate "matching" includes the label */ @@ -677,38 +715,42 @@ mac_cred_label_update_execve(vfs_context_t ctx, kauth_cred_t new, struct vnode * u_int i; error = 0; - for (i = 0; i< mac_policy_list.staticmax; i++) { + for (i = 0; i < mac_policy_list.staticmax; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_cred_label_update_execve_t *hook = mpc->mpc_ops->mpo_cred_label_update_execve; - if (hook == NULL) + if (hook == NULL) { continue; + } size_t spawnattrlen = 0; void *spawnattr = exec_spawnattr_getmacpolicyinfo(macextensions, mpc->mpc_name, &spawnattrlen); error = mac_error_select(hook(cred, new, vfs_context_proc(ctx), vp, offset, scriptvp, - vp->v_label, scriptvnodelabel, execl, csflags, spawnattr, spawnattrlen, disjoint), - error); + vp->v_label, scriptvnodelabel, execl, csflags, spawnattr, spawnattrlen, disjoint), + error); } - if (mac_policy_list_conditional_busy() != 0) { + if (mac_policy_list_conditional_busy() != 0) { for (; i <= mac_policy_list.maxindex; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_cred_label_update_execve_t *hook = mpc->mpc_ops->mpo_cred_label_update_execve; - if (hook == NULL) + if (hook == NULL) { continue; + } size_t spawnattrlen = 0; void *spawnattr = exec_spawnattr_getmacpolicyinfo(macextensions, mpc->mpc_name, &spawnattrlen); error = mac_error_select(hook(cred, new, vfs_context_proc(ctx), vp, offset, scriptvp, - vp->v_label, scriptvnodelabel, execl, csflags, spawnattr, spawnattrlen, disjoint), - error); + vp->v_label, scriptvnodelabel, execl, csflags, spawnattr, spawnattrlen, disjoint), + error); } mac_policy_list_unbusy(); } @@ -718,16 +760,17 @@ mac_cred_label_update_execve(vfs_context_t ctx, kauth_cred_t new, struct vnode * int mac_cred_check_label_update_execve(vfs_context_t ctx, struct vnode *vp, off_t offset, - struct vnode *scriptvp, struct label *scriptvnodelabel, struct label *execlabel, - struct proc *p, void *macextensions) + struct vnode *scriptvp, struct label *scriptvnodelabel, struct label *execlabel, + struct proc *p, void *macextensions) { kauth_cred_t cred; int result = 0; #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce || !mac_vnode_enforce) + if (!mac_proc_enforce || !mac_vnode_enforce) { return result; + } #endif cred = vfs_context_ucred(ctx); @@ -741,29 +784,33 @@ mac_cred_check_label_update_execve(vfs_context_t ctx, struct vnode *vp, off_t of struct mac_policy_conf *mpc; u_int i; - for (i = 0; i< mac_policy_list.staticmax; i++) { + for (i = 0; i < mac_policy_list.staticmax; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_cred_check_label_update_execve_t *hook = mpc->mpc_ops->mpo_cred_check_label_update_execve; - if (hook == NULL) + if (hook == NULL) { continue; + } size_t spawnattrlen = 0; void *spawnattr = exec_spawnattr_getmacpolicyinfo(macextensions, mpc->mpc_name, &spawnattrlen); result = result || hook(cred, vp, offset, scriptvp, vp->v_label, scriptvnodelabel, execlabel, p, spawnattr, spawnattrlen); } - if (mac_policy_list_conditional_busy() != 0) { + if (mac_policy_list_conditional_busy() != 0) { for (; i <= mac_policy_list.maxindex; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_cred_check_label_update_execve_t *hook = mpc->mpc_ops->mpo_cred_check_label_update_execve; - if (hook == NULL) + if (hook == NULL) { continue; + } size_t spawnattrlen = 0; void *spawnattr = exec_spawnattr_getmacpolicyinfo(macextensions, mpc->mpc_name, &spawnattrlen); @@ -774,7 +821,7 @@ mac_cred_check_label_update_execve(vfs_context_t ctx, struct vnode *vp, off_t of } } - return (result); + return result; } int @@ -787,17 +834,19 @@ mac_vnode_check_access(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } /* Convert {R,W,X}_OK values to V{READ,WRITE,EXEC} for entry points */ mask = ACCESS_MODE_TO_VNODE_MASK(acc_mode); MAC_CHECK(vnode_check_access, cred, vp, vp->v_label, mask); - return (error); - } + return error; +} int mac_vnode_check_chdir(vfs_context_t ctx, struct vnode *dvp) @@ -807,14 +856,16 @@ mac_vnode_check_chdir(vfs_context_t ctx, struct vnode *dvp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_chdir, cred, dvp, dvp->v_label); - return (error); + return error; } int @@ -826,14 +877,16 @@ mac_vnode_check_chroot(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_chroot, cred, dvp, dvp->v_label, cnp); - return (error); + return error; } int @@ -845,15 +898,17 @@ mac_vnode_check_clone(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_clone, cred, dvp, dvp->v_label, vp, vp->v_label, cnp); - return (error); + return error; } int mac_vnode_check_create(vfs_context_t ctx, struct vnode *dvp, @@ -864,14 +919,16 @@ mac_vnode_check_create(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_create, cred, dvp, dvp->v_label, cnp, vap); - return (error); + return error; } int @@ -883,15 +940,17 @@ mac_vnode_check_unlink(vfs_context_t ctx, struct vnode *dvp, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_unlink, cred, dvp, dvp->v_label, vp, vp->v_label, cnp); - return (error); + return error; } #if 0 int @@ -903,14 +962,16 @@ mac_vnode_check_deleteacl(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_deleteacl, cred, vp, vp->v_label, type); - return (error); + return error; } #endif @@ -923,14 +984,16 @@ mac_vnode_check_deleteextattr(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_deleteextattr, cred, vp, vp->v_label, name); - return (error); + return error; } int mac_vnode_check_exchangedata(vfs_context_t ctx, @@ -941,16 +1004,18 @@ mac_vnode_check_exchangedata(vfs_context_t ctx, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); - MAC_CHECK(vnode_check_exchangedata, cred, v1, v1->v_label, + if (!mac_cred_check_enforce(cred)) { + return 0; + } + MAC_CHECK(vnode_check_exchangedata, cred, v1, v1->v_label, v2, v2->v_label); - return (error); + return error; } #if 0 @@ -962,14 +1027,16 @@ mac_vnode_check_getacl(vfs_context_t ctx, struct vnode *vp, acl_type_t type) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_getacl, cred, vp, vp->v_label, type); - return (error); + return error; } #endif @@ -982,14 +1049,16 @@ mac_vnode_check_getattr(vfs_context_t ctx, struct ucred *file_cred, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_getattr, cred, file_cred, vp, vp->v_label, va); - return (error); + return error; } int @@ -1001,16 +1070,18 @@ mac_vnode_check_getattrlist(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_getattrlist, cred, vp, vp->v_label, alist); /* Falsify results instead of returning error? */ - return (error); + return error; } int @@ -1022,8 +1093,9 @@ mac_vnode_check_exec(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce || !mac_vnode_enforce) + if (!mac_proc_enforce || !mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); @@ -1037,48 +1109,52 @@ mac_vnode_check_exec(vfs_context_t ctx, struct vnode *vp, struct mac_policy_conf *mpc; u_int i; - for (i = 0; i< mac_policy_list.staticmax; i++) { + for (i = 0; i < mac_policy_list.staticmax; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_vnode_check_exec_t *hook = mpc->mpc_ops->mpo_vnode_check_exec; - if (hook == NULL) + if (hook == NULL) { continue; + } size_t spawnattrlen = 0; void *spawnattr = exec_spawnattr_getmacpolicyinfo(imgp->ip_px_smpx, mpc->mpc_name, &spawnattrlen); error = mac_error_select( - hook(cred, - vp, imgp->ip_scriptvp, vp->v_label, imgp->ip_scriptlabelp, - imgp->ip_execlabelp, &imgp->ip_ndp->ni_cnd, &imgp->ip_csflags, - spawnattr, spawnattrlen), error); + hook(cred, + vp, imgp->ip_scriptvp, vp->v_label, imgp->ip_scriptlabelp, + imgp->ip_execlabelp, &imgp->ip_ndp->ni_cnd, &imgp->ip_csflags, + spawnattr, spawnattrlen), error); } - if (mac_policy_list_conditional_busy() != 0) { + if (mac_policy_list_conditional_busy() != 0) { for (; i <= mac_policy_list.maxindex; i++) { mpc = mac_policy_list.entries[i].mpc; - if (mpc == NULL) + if (mpc == NULL) { continue; + } mpo_vnode_check_exec_t *hook = mpc->mpc_ops->mpo_vnode_check_exec; - if (hook == NULL) + if (hook == NULL) { continue; + } size_t spawnattrlen = 0; void *spawnattr = exec_spawnattr_getmacpolicyinfo(imgp->ip_px_smpx, mpc->mpc_name, &spawnattrlen); error = mac_error_select( - hook(cred, - vp, imgp->ip_scriptvp, vp->v_label, imgp->ip_scriptlabelp, - imgp->ip_execlabelp, &imgp->ip_ndp->ni_cnd, &imgp->ip_csflags, - spawnattr, spawnattrlen), error); + hook(cred, + vp, imgp->ip_scriptvp, vp->v_label, imgp->ip_scriptlabelp, + imgp->ip_execlabelp, &imgp->ip_ndp->ni_cnd, &imgp->ip_csflags, + spawnattr, spawnattrlen), error); } mac_policy_list_unbusy(); } } - return (error); + return error; } int @@ -1089,125 +1165,127 @@ mac_vnode_check_fsgetpath(vfs_context_t ctx, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_fsgetpath, cred, vp, vp->v_label); - return (error); + return error; } int mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, - struct image_params *imgp, - unsigned int *cs_flags, unsigned int *signer_type, - int flags) + struct image_params *imgp, + unsigned int *cs_flags, unsigned int *signer_type, + int flags) { - int error; - char *fatal_failure_desc = NULL; - size_t fatal_failure_desc_len = 0; + int error; + char *fatal_failure_desc = NULL; + size_t fatal_failure_desc_len = 0; - char *vn_path = NULL; - vm_size_t vn_pathlen = MAXPATHLEN; - cpu_type_t cpu_type = (imgp == NULL) ? CPU_TYPE_ANY : imgp->ip_origcputype; + char *vn_path = NULL; + vm_size_t vn_pathlen = MAXPATHLEN; + cpu_type_t cpu_type = (imgp == NULL) ? CPU_TYPE_ANY : imgp->ip_origcputype; #if SECURITY_MAC_CHECK_ENFORCE - /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce || !mac_vnode_enforce) - return 0; + /* 21167099 - only check if we allow write */ + if (!mac_proc_enforce || !mac_vnode_enforce) { + return 0; + } #endif - MAC_CHECK(vnode_check_signature, vp, vp->v_label, cpu_type, cs_blob, - cs_flags, signer_type, flags, &fatal_failure_desc, &fatal_failure_desc_len); + MAC_CHECK(vnode_check_signature, vp, vp->v_label, cpu_type, cs_blob, + cs_flags, signer_type, flags, &fatal_failure_desc, &fatal_failure_desc_len); - if (fatal_failure_desc_len) { - // A fatal code signature validation failure occured, formulate a crash - // reason. + if (fatal_failure_desc_len) { + // A fatal code signature validation failure occured, formulate a crash + // reason. - char const *path = NULL; + char const *path = NULL; - vn_path = (char *)kalloc(MAXPATHLEN); - if (vn_path != NULL) { - if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) { - path = vn_path; - } else { - path = "(get vnode path failed)"; - } - } else { - path = "(path alloc failed)"; - } - - if (error == 0) { - panic("mac_vnode_check_signature: MAC hook returned no error, " - "but status is claimed to be fatal? " - "path: '%s', fatal_failure_desc_len: %ld, fatal_failure_desc:\n%s\n", - path, fatal_failure_desc_len, fatal_failure_desc); - } - - printf("mac_vnode_check_signature: %s: code signature validation failed fatally: %s", - path, fatal_failure_desc); - - if (imgp == NULL) { - goto out; - } - - os_reason_t reason = os_reason_create(OS_REASON_CODESIGNING, - CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG); - - if (reason == OS_REASON_NULL) { - printf("mac_vnode_check_signature: %s: failure to allocate exit reason for validation failure: %s\n", - path, fatal_failure_desc); - goto out; - } - - imgp->ip_cs_error = reason; - reason->osr_flags = (OS_REASON_FLAG_GENERATE_CRASH_REPORT | - OS_REASON_FLAG_CONSISTENT_FAILURE); - - if (fatal_failure_desc == NULL) { - // This may happen if allocation for the buffer failed. - printf("mac_vnode_check_signature: %s: fatal failure is missing its description.\n", path); - } else { - mach_vm_address_t data_addr = 0; - - int reason_error = 0; - int kcdata_error = 0; - - if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size - (1, fatal_failure_desc_len))) == 0 && - (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor, - EXIT_REASON_USER_DESC, fatal_failure_desc_len, - &data_addr)) == KERN_SUCCESS) { - kern_return_t mc_error = kcdata_memcpy(&reason->osr_kcd_descriptor, (mach_vm_address_t)data_addr, - fatal_failure_desc, fatal_failure_desc_len); - - if (mc_error != KERN_SUCCESS) { - printf("mac_vnode_check_signature: %s: failed to copy reason string " - "(kcdata_memcpy error: %d, length: %ld)\n", - path, mc_error, fatal_failure_desc_len); - } - } else { - printf("mac_vnode_check_signature: %s: failed to allocate space for reason string " - "(os_reason_alloc_buffer error: %d, kcdata error: %d, length: %ld)\n", - path, reason_error, kcdata_error, fatal_failure_desc_len); - } + vn_path = (char *)kalloc(MAXPATHLEN); + if (vn_path != NULL) { + if (vn_getpath(vp, vn_path, (int*)&vn_pathlen) == 0) { + path = vn_path; + } else { + path = "(get vnode path failed)"; + } + } else { + path = "(path alloc failed)"; + } + + if (error == 0) { + panic("mac_vnode_check_signature: MAC hook returned no error, " + "but status is claimed to be fatal? " + "path: '%s', fatal_failure_desc_len: %ld, fatal_failure_desc:\n%s\n", + path, fatal_failure_desc_len, fatal_failure_desc); + } + + printf("mac_vnode_check_signature: %s: code signature validation failed fatally: %s", + path, fatal_failure_desc); + + if (imgp == NULL) { + goto out; + } - } - } + os_reason_t reason = os_reason_create(OS_REASON_CODESIGNING, + CODESIGNING_EXIT_REASON_TASKGATED_INVALID_SIG); + + if (reason == OS_REASON_NULL) { + printf("mac_vnode_check_signature: %s: failure to allocate exit reason for validation failure: %s\n", + path, fatal_failure_desc); + goto out; + } + + imgp->ip_cs_error = reason; + reason->osr_flags = (OS_REASON_FLAG_GENERATE_CRASH_REPORT | + OS_REASON_FLAG_CONSISTENT_FAILURE); + + if (fatal_failure_desc == NULL) { + // This may happen if allocation for the buffer failed. + printf("mac_vnode_check_signature: %s: fatal failure is missing its description.\n", path); + } else { + mach_vm_address_t data_addr = 0; + + int reason_error = 0; + int kcdata_error = 0; + + if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size + (1, fatal_failure_desc_len))) == 0 && + (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor, + EXIT_REASON_USER_DESC, fatal_failure_desc_len, + &data_addr)) == KERN_SUCCESS) { + kern_return_t mc_error = kcdata_memcpy(&reason->osr_kcd_descriptor, (mach_vm_address_t)data_addr, + fatal_failure_desc, fatal_failure_desc_len); + + if (mc_error != KERN_SUCCESS) { + printf("mac_vnode_check_signature: %s: failed to copy reason string " + "(kcdata_memcpy error: %d, length: %ld)\n", + path, mc_error, fatal_failure_desc_len); + } + } else { + printf("mac_vnode_check_signature: %s: failed to allocate space for reason string " + "(os_reason_alloc_buffer error: %d, kcdata error: %d, length: %ld)\n", + path, reason_error, kcdata_error, fatal_failure_desc_len); + } + } + } out: - if (vn_path) { - kfree(vn_path, MAXPATHLEN); - } + if (vn_path) { + kfree(vn_path, MAXPATHLEN); + } - if (fatal_failure_desc_len > 0 && fatal_failure_desc != NULL) { - kfree(fatal_failure_desc, fatal_failure_desc_len); - } + if (fatal_failure_desc_len > 0 && fatal_failure_desc != NULL) { + kfree(fatal_failure_desc, fatal_failure_desc_len); + } - return (error); + return error; } #if 0 @@ -1219,14 +1297,16 @@ mac_vnode_check_getacl(vfs_context_t ctx, struct vnode *vp, acl_type_t type) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_getacl, cred, vp, vp->v_label, type); - return (error); + return error; } #endif @@ -1239,15 +1319,17 @@ mac_vnode_check_getextattr(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_getextattr, cred, vp, vp->v_label, name, uio); - return (error); + return error; } int @@ -1258,14 +1340,16 @@ mac_vnode_check_ioctl(vfs_context_t ctx, struct vnode *vp, u_int cmd) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_ioctl, cred, vp, vp->v_label, cmd); - return (error); + return error; } int @@ -1277,16 +1361,18 @@ mac_vnode_check_kqfilter(vfs_context_t ctx, kauth_cred_t file_cred, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_kqfilter, cred, file_cred, kn, vp, vp->v_label); - return (error); + return error; } int @@ -1298,15 +1384,17 @@ mac_vnode_check_link(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_link, cred, dvp, dvp->v_label, vp, vp->v_label, cnp); - return (error); + return error; } int @@ -1317,14 +1405,16 @@ mac_vnode_check_listextattr(vfs_context_t ctx, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_listextattr, cred, vp, vp->v_label); - return (error); + return error; } int @@ -1336,14 +1426,16 @@ mac_vnode_check_lookup_preflight(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_lookup_preflight, cred, dvp, dvp->v_label, path, pathlen); - return (error); + return error; } int @@ -1355,14 +1447,16 @@ mac_vnode_check_lookup(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_lookup, cred, dvp, dvp->v_label, cnp); - return (error); + return error; } int @@ -1373,14 +1467,16 @@ mac_vnode_check_open(vfs_context_t ctx, struct vnode *vp, int acc_mode) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_open, cred, vp, vp->v_label, acc_mode); - return (error); + return error; } int @@ -1392,16 +1488,18 @@ mac_vnode_check_read(vfs_context_t ctx, struct ucred *file_cred, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_read, cred, file_cred, vp, vp->v_label); - return (error); + return error; } int @@ -1412,14 +1510,16 @@ mac_vnode_check_readdir(vfs_context_t ctx, struct vnode *dvp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_readdir, cred, dvp, dvp->v_label); - return (error); + return error; } int @@ -1430,14 +1530,16 @@ mac_vnode_check_readlink(vfs_context_t ctx, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_readlink, cred, vp, vp->v_label); - return (error); + return error; } int @@ -1449,15 +1551,17 @@ mac_vnode_check_label_update(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_label_update, cred, vp, vp->v_label, newlabel); - return (error); + return error; } int @@ -1470,27 +1574,31 @@ mac_vnode_check_rename(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_rename_from, cred, dvp, dvp->v_label, vp, vp->v_label, cnp); - if (error) - return (error); + if (error) { + return error; + } MAC_CHECK(vnode_check_rename_to, cred, tdvp, tdvp->v_label, tvp, tvp != NULL ? tvp->v_label : NULL, dvp == tdvp, tcnp); - if (error) - return (error); + if (error) { + return error; + } MAC_CHECK(vnode_check_rename, cred, dvp, dvp->v_label, vp, vp->v_label, cnp, tdvp, tdvp->v_label, tvp, tvp != NULL ? tvp->v_label : NULL, tcnp); - return (error); + return error; } int @@ -1501,14 +1609,16 @@ mac_vnode_check_revoke(vfs_context_t ctx, struct vnode *vp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_revoke, cred, vp, vp->v_label); - return (error); + return error; } int @@ -1519,14 +1629,16 @@ mac_vnode_check_searchfs(vfs_context_t ctx, struct vnode *vp, struct attrlist *a #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_searchfs, cred, vp, vp->v_label, alist); - return (error); + return error; } int @@ -1537,14 +1649,16 @@ mac_vnode_check_select(vfs_context_t ctx, struct vnode *vp, int which) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_select, cred, vp, vp->v_label, which); - return (error); + return error; } int @@ -1556,14 +1670,16 @@ mac_vnode_check_setacl(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setacl, cred, vp, vp->v_label, acl); - return (error); + return error; } int @@ -1575,14 +1691,16 @@ mac_vnode_check_setattrlist(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setattrlist, cred, vp, vp->v_label, alist); - return (error); + return error; } int @@ -1594,15 +1712,17 @@ mac_vnode_check_setextattr(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setextattr, cred, vp, vp->v_label, name, uio); - return (error); + return error; } int @@ -1613,14 +1733,16 @@ mac_vnode_check_setflags(vfs_context_t ctx, struct vnode *vp, u_long flags) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setflags, cred, vp, vp->v_label, flags); - return (error); + return error; } int @@ -1631,14 +1753,16 @@ mac_vnode_check_setmode(vfs_context_t ctx, struct vnode *vp, mode_t mode) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setmode, cred, vp, vp->v_label, mode); - return (error); + return error; } int @@ -1650,14 +1774,16 @@ mac_vnode_check_setowner(vfs_context_t ctx, struct vnode *vp, uid_t uid, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setowner, cred, vp, vp->v_label, uid, gid); - return (error); + return error; } int @@ -1669,15 +1795,17 @@ mac_vnode_check_setutimes(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_setutimes, cred, vp, vp->v_label, atime, mtime); - return (error); + return error; } int @@ -1689,15 +1817,17 @@ mac_vnode_check_stat(vfs_context_t ctx, struct ucred *file_cred, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_stat, cred, file_cred, vp, vp->v_label); - return (error); + return error; } int @@ -1709,14 +1839,16 @@ mac_vnode_check_trigger_resolve(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_trigger_resolve, cred, dvp, dvp->v_label, cnp); - return (error); + return error; } int @@ -1728,16 +1860,18 @@ mac_vnode_check_truncate(vfs_context_t ctx, struct ucred *file_cred, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_truncate, cred, file_cred, vp, vp->v_label); - return (error); + return error; } int @@ -1749,15 +1883,17 @@ mac_vnode_check_write(vfs_context_t ctx, struct ucred *file_cred, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_write, cred, file_cred, vp, vp->v_label); - return (error); + return error; } int @@ -1769,14 +1905,16 @@ mac_vnode_check_uipc_bind(vfs_context_t ctx, struct vnode *dvp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_uipc_bind, cred, dvp, dvp->v_label, cnp, vap); - return (error); + return error; } int @@ -1787,14 +1925,16 @@ mac_vnode_check_uipc_connect(vfs_context_t ctx, struct vnode *vp, struct socket #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(vnode_check_uipc_connect, cred, vp, vp->v_label, (socket_t) so); - return (error); + return error; } void @@ -1803,8 +1943,9 @@ mac_vnode_label_update(vfs_context_t ctx, struct vnode *vp, struct label *newlab kauth_cred_t cred = vfs_context_ucred(ctx); struct label *tmpl = NULL; - if (vp->v_label == NULL) + if (vp->v_label == NULL) { tmpl = mac_vnode_label_alloc(); + } vnode_lock(vp); @@ -1817,8 +1958,9 @@ mac_vnode_label_update(vfs_context_t ctx, struct vnode *vp, struct label *newlab MAC_PERFORM(vnode_label_update, cred, vp, vp->v_label, newlabel); vnode_unlock(vp); - if (tmpl != NULL) + if (tmpl != NULL) { mac_vnode_label_free(tmpl); + } } int @@ -1828,13 +1970,14 @@ mac_vnode_find_sigs(struct proc *p, struct vnode *vp, off_t offset) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_proc_enforce || !mac_vnode_enforce) + if (!mac_proc_enforce || !mac_vnode_enforce) { return 0; + } #endif MAC_CHECK(vnode_find_sigs, p, vp, offset, vp->v_label); - return (error); + return error; } void @@ -1845,44 +1988,51 @@ mac_mount_label_associate(vfs_context_t ctx, struct mount *mp) /* XXX: eventually this logic may be handled by the policy? */ /* We desire MULTILABEL for the root filesystem. */ - if ((mp->mnt_flag & MNT_ROOTFS) && - (strcmp(mp->mnt_vfsstat.f_fstypename, "hfs") == 0)) + if ((mp->mnt_flag & MNT_ROOTFS) && + (strcmp(mp->mnt_vfsstat.f_fstypename, "hfs") == 0)) { mp->mnt_flag |= MNT_MULTILABEL; + } /* MULTILABEL on DEVFS. */ - if (strcmp(mp->mnt_vfsstat.f_fstypename, "devfs") == 0) + if (strcmp(mp->mnt_vfsstat.f_fstypename, "devfs") == 0) { mp->mnt_flag |= MNT_MULTILABEL; + } /* MULTILABEL on FDESC pseudo-filesystem. */ - if (strcmp(mp->mnt_vfsstat.f_fstypename, "fdesc") == 0) + if (strcmp(mp->mnt_vfsstat.f_fstypename, "fdesc") == 0) { mp->mnt_flag |= MNT_MULTILABEL; + } /* MULTILABEL on all NFS filesystems. */ - if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs") == 0) + if (strcmp(mp->mnt_vfsstat.f_fstypename, "nfs") == 0) { mp->mnt_flag |= MNT_MULTILABEL; + } /* MULTILABEL on all AFP filesystems. */ - if (strcmp(mp->mnt_vfsstat.f_fstypename, "afpfs") == 0) + if (strcmp(mp->mnt_vfsstat.f_fstypename, "afpfs") == 0) { mp->mnt_flag |= MNT_MULTILABEL; + } if (mp->mnt_vtable != NULL) { /* Any filesystem that supports native XATTRs. */ - if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR)) + if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR)) { mp->mnt_flag |= MNT_MULTILABEL; + } /* Filesystem does not support multilabel. */ if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFSNOMACLABEL) && - (mp->mnt_flag & MNT_MULTILABEL)) + (mp->mnt_flag & MNT_MULTILABEL)) { mp->mnt_flag &= ~MNT_MULTILABEL; + } } MAC_PERFORM(mount_label_associate, cred, mp, mp->mnt_mntlabel); #if DEBUG printf("MAC Framework enabling %s support: %s -> %s (%s)\n", - mp->mnt_flag & MNT_MULTILABEL ? "multilabel" : "singlelabel", - mp->mnt_vfsstat.f_mntfromname, - mp->mnt_vfsstat.f_mntonname, - mp->mnt_vfsstat.f_fstypename); + mp->mnt_flag & MNT_MULTILABEL ? "multilabel" : "singlelabel", + mp->mnt_vfsstat.f_mntfromname, + mp->mnt_vfsstat.f_mntonname, + mp->mnt_vfsstat.f_fstypename); #endif } @@ -1895,15 +2045,17 @@ mac_mount_check_mount(vfs_context_t ctx, struct vnode *vp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_mount, cred, vp, vp->v_label, cnp, vfc_name); - return (error); + return error; } int @@ -1915,14 +2067,16 @@ mac_mount_check_snapshot_create(vfs_context_t ctx, struct mount *mp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_snapshot_create, cred, mp, name); - return (error); + return error; } int @@ -1934,14 +2088,16 @@ mac_mount_check_snapshot_delete(vfs_context_t ctx, struct mount *mp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_snapshot_delete, cred, mp, name); - return (error); + return error; } int @@ -1953,14 +2109,16 @@ mac_mount_check_snapshot_revert(vfs_context_t ctx, struct mount *mp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_snapshot_revert, cred, mp, name); - return (error); + return error; } int @@ -1971,15 +2129,17 @@ mac_mount_check_remount(vfs_context_t ctx, struct mount *mp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_remount, cred, mp, mp->mnt_mntlabel); - return (error); + return error; } int @@ -1990,19 +2150,21 @@ mac_mount_check_umount(vfs_context_t ctx, struct mount *mp) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_umount, cred, mp, mp->mnt_mntlabel); - return (error); + return error; } int -mac_mount_check_getattr(vfs_context_t ctx, struct mount *mp, +mac_mount_check_getattr(vfs_context_t ctx, struct mount *mp, struct vfs_attr *vfa) { kauth_cred_t cred; @@ -2010,18 +2172,20 @@ mac_mount_check_getattr(vfs_context_t ctx, struct mount *mp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_getattr, cred, mp, mp->mnt_mntlabel, vfa); - return (error); + return error; } int -mac_mount_check_setattr(vfs_context_t ctx, struct mount *mp, +mac_mount_check_setattr(vfs_context_t ctx, struct mount *mp, struct vfs_attr *vfa) { kauth_cred_t cred; @@ -2029,14 +2193,16 @@ mac_mount_check_setattr(vfs_context_t ctx, struct mount *mp, #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_setattr, cred, mp, mp->mnt_mntlabel, vfa); - return (error); + return error; } int @@ -2047,15 +2213,17 @@ mac_mount_check_stat(vfs_context_t ctx, struct mount *mount) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_stat, cred, mount, mount->mnt_mntlabel); - return (error); + return error; } int @@ -2066,15 +2234,17 @@ mac_mount_check_label_update(vfs_context_t ctx, struct mount *mount) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_label_update, cred, mount, mount->mnt_mntlabel); - return (error); + return error; } int @@ -2085,15 +2255,17 @@ mac_mount_check_fsctl(vfs_context_t ctx, struct mount *mp, u_int cmd) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif cred = vfs_context_ucred(ctx); - if (!mac_cred_check_enforce(cred)) - return (0); + if (!mac_cred_check_enforce(cred)) { + return 0; + } MAC_CHECK(mount_check_fsctl, cred, mp, mp->mnt_mntlabel, cmd); - return (error); + return error; } void @@ -2102,8 +2274,9 @@ mac_devfs_label_associate_device(dev_t dev, struct devnode *de, { #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_device_enforce) + if (!mac_device_enforce) { return; + } #endif MAC_PERFORM(devfs_label_associate_device, dev, de, de->dn_label, @@ -2116,8 +2289,9 @@ mac_devfs_label_associate_directory(const char *dirname, int dirnamelen, { #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_device_enforce) + if (!mac_device_enforce) { return; + } #endif MAC_PERFORM(devfs_label_associate_directory, dirname, dirnamelen, de, @@ -2131,21 +2305,25 @@ vn_setlabel(struct vnode *vp, struct label *intlabel, vfs_context_t context) #if SECURITY_MAC_CHECK_ENFORCE /* 21167099 - only check if we allow write */ - if (!mac_vnode_enforce) + if (!mac_vnode_enforce) { return 0; + } #endif - if (!mac_label_vnodes) - return (0); + if (!mac_label_vnodes) { + return 0; + } if (vp->v_mount == NULL) { printf("vn_setlabel: null v_mount\n"); - if (vp->v_type != VNON) + if (vp->v_type != VNON) { printf("vn_setlabel: null v_mount with non-VNON\n"); - return (EBADF); + } + return EBADF; } - if ((vp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) - return (ENOTSUP); + if ((vp->v_mount->mnt_flag & MNT_MULTILABEL) == 0) { + return ENOTSUP; + } /* * Multi-phase commit. First check the policies to confirm the @@ -2154,26 +2332,26 @@ vn_setlabel(struct vnode *vp, struct label *intlabel, vfs_context_t context) * should update the vnode at the end as part of VNOP_SETLABEL()? */ error = mac_vnode_check_label_update(context, vp, intlabel); - if (error) - return (error); + if (error) { + return error; + } error = VNOP_SETLABEL(vp, intlabel, context); if (error == ENOTSUP) { error = mac_vnode_label_store(context, vp, - intlabel); + intlabel); if (error) { printf("%s: mac_vnode_label_store failed %d\n", - __func__, error); - return (error); + __func__, error); + return error; } mac_vnode_label_update(context, vp, intlabel); - } else - if (error) { + } else if (error) { printf("vn_setlabel: vop setlabel failed %d\n", error); - return (error); + return error; } - return (0); + return 0; } int @@ -2197,13 +2375,14 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, if (fnp->fd_fd == -1) { MAC_PERFORM(vnode_label_associate_file, vfs_context_ucred(ctx), mp, mp->mnt_mntlabel, NULL, NULL, vp, vp->v_label); - return (0); + return 0; } p = vfs_context_proc(ctx); error = fp_lookup(p, fnp->fd_fd, &fp, 0); - if (error) - return (error); + if (error) { + return error; + } if (fp->f_fglob == NULL) { error = EBADF; @@ -2213,8 +2392,9 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, switch (FILEGLOB_DTYPE(fp->f_fglob)) { case DTYPE_VNODE: fvp = (struct vnode *)fp->f_fglob->fg_data; - if ((error = vnode_getwithref(fvp))) + if ((error = vnode_getwithref(fvp))) { goto out; + } MAC_PERFORM(vnode_label_copy, fvp->v_label, vp->v_label); (void)vnode_put(fvp); break; @@ -2223,7 +2403,7 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, so = (struct socket *)fp->f_fglob->fg_data; socket_lock(so, 1); MAC_PERFORM(vnode_label_associate_socket, - vfs_context_ucred(ctx), (socket_t)so, so->so_label, + vfs_context_ucred(ctx), (socket_t)so, so->so_label, vp, vp->v_label); socket_unlock(so, 1); break; @@ -2258,5 +2438,5 @@ mac_vnode_label_associate_fdesc(struct mount *mp, struct fdescnode *fnp, } out: fp_drop(p, fnp->fd_fd, fp, 0); - return (error); + return error; } diff --git a/security/mac_vfs_subr.c b/security/mac_vfs_subr.c index f8b2263b9..6f4c096c7 100644 --- a/security/mac_vfs_subr.c +++ b/security/mac_vfs_subr.c @@ -2,7 +2,7 @@ * Copyright (c) 2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -43,7 +43,7 @@ */ int vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, - struct componentname *cnp, int flags, vfs_context_t ctx) + struct componentname *cnp, int flags, vfs_context_t ctx) { int error = 0; @@ -52,15 +52,17 @@ vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, /* are we labeling vnodes? If not still notify of create */ if (mac_label_vnodes == 0) { - if (flags & VNODE_LABEL_CREATE) + if (flags & VNODE_LABEL_CREATE) { error = mac_vnode_notify_create(ctx, mp, dvp, vp, cnp); + } return 0; } /* if already VL_LABELED */ - if (vp->v_lflag & VL_LABELED) - return (0); + if (vp->v_lflag & VL_LABELED) { + return 0; + } vnode_lock_spin(vp); @@ -71,7 +73,7 @@ vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, */ if (vp->v_lflag & VL_LABELED) { vnode_unlock(vp); - return (0); + return 0; } if ((vp->v_lflag & VL_LABEL) == 0) { @@ -80,19 +82,22 @@ vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, /* Could sleep on disk I/O, drop lock. */ vnode_unlock(vp); - if (vp->v_label == NULL) + if (vp->v_label == NULL) { vp->v_label = mac_vnode_label_alloc(); + } - if (flags & VNODE_LABEL_CREATE) + if (flags & VNODE_LABEL_CREATE) { error = mac_vnode_notify_create(ctx, mp, dvp, vp, cnp); - else + } else { error = mac_vnode_label_associate(mp, vp, ctx); + } vnode_lock_spin(vp); - if ((error == 0) && (vp->v_flag & VNCACHEABLE)) + if ((error == 0) && (vp->v_flag & VNCACHEABLE)) { vp->v_lflag |= VL_LABELED; + } vp->v_lflag &= ~VL_LABEL; if (vp->v_lflag & VL_LABELWAIT) { @@ -108,8 +113,8 @@ vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, while (vp->v_lflag & VL_LABEL) { vp->v_lflag |= VL_LABELWAIT; - error = msleep(&vp->v_label, &vp->v_lock, PVFS|PDROP, - "vnode_label", &ts); + error = msleep(&vp->v_label, &vp->v_lock, PVFS | PDROP, + "vnode_label", &ts); vnode_lock_spin(vp); if (error == EWOULDBLOCK) { @@ -121,7 +126,7 @@ vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, } vnode_unlock(vp); - return (error); + return error; } @@ -136,7 +141,6 @@ vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, void vnode_relabel(struct vnode *vp) { - /* Wait for any other labeling to complete. */ while (vp->v_lflag & VL_LABEL) { vp->v_lflag |= VL_LABELWAIT; @@ -154,20 +158,21 @@ vnode_relabel(struct vnode *vp) */ int -mac_vnop_setxattr (struct vnode *vp, const char *name, char *buf, size_t len) +mac_vnop_setxattr(struct vnode *vp, const char *name, char *buf, size_t len) { vfs_context_t ctx; int options = XATTR_NOSECURITY; - char uio_buf[ UIO_SIZEOF(1) ]; - uio_t auio; + char uio_buf[UIO_SIZEOF(1)]; + uio_t auio; int error; - if (vfs_isrdonly(vp->v_mount)) - return (EROFS); + if (vfs_isrdonly(vp->v_mount)) { + return EROFS; + } ctx = vfs_context_current(); auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(buf), len); error = vn_setxattr(vp, name, auio, options, ctx); @@ -179,38 +184,39 @@ mac_vnop_setxattr (struct vnode *vp, const char *name, char *buf, size_t len) } #endif - return (error); + return error; } int -mac_vnop_getxattr (struct vnode *vp, const char *name, char *buf, size_t len, - size_t *attrlen) +mac_vnop_getxattr(struct vnode *vp, const char *name, char *buf, size_t len, + size_t *attrlen) { vfs_context_t ctx = vfs_context_current(); int options = XATTR_NOSECURITY; - char uio_buf[ UIO_SIZEOF(1) ]; - uio_t auio; + char uio_buf[UIO_SIZEOF(1)]; + uio_t auio; int error; auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, - &uio_buf[0], sizeof(uio_buf)); + &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(buf), len); error = vn_getxattr(vp, name, auio, attrlen, options, ctx); *attrlen = len - uio_resid(auio); - return (error); + return error; } int -mac_vnop_removexattr (struct vnode *vp, const char *name) +mac_vnop_removexattr(struct vnode *vp, const char *name) { vfs_context_t ctx = vfs_context_current(); int options = XATTR_NOSECURITY; int error; - if (vfs_isrdonly(vp->v_mount)) - return (EROFS); + if (vfs_isrdonly(vp->v_mount)) { + return EROFS; + } error = vn_removexattr(vp, name, options, ctx); #if CONFIG_FSE @@ -221,5 +227,5 @@ mac_vnop_removexattr (struct vnode *vp, const char *name) } #endif - return (error); + return error; } diff --git a/tests/Makefile b/tests/Makefile index 019b19433..5f165b8b7 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -24,7 +24,7 @@ include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.common OTHER_CFLAGS = -Weverything -Wno-gnu-union-cast -Wno-missing-field-initializers -Wno-partial-availability OTHER_CFLAGS += -Wno-missing-noreturn -Wno-vla -Wno-reserved-id-macro -Wno-documentation-unknown-command OTHER_CFLAGS += -Wno-padded -Wno-used-but-marked-unused -Wno-covered-switch-default -Wno-nullability-extension -OTHER_CFLAGS += -Wno-gnu-empty-initializer -Wno-unused-macros +OTHER_CFLAGS += -Wno-gnu-empty-initializer -Wno-unused-macros -Wno-undef OTHER_CFLAGS += --std=gnu11 -isystem $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders OTHER_CFLAGS += -UT_NAMESPACE_PREFIX -DT_NAMESPACE_PREFIX=xnu OTHER_CFLAGS += -F $(SDKROOT)/System/Library/PrivateFrameworks @@ -37,6 +37,8 @@ CODESIGN_ALLOCATE:=$(shell xcrun -sdk "$(TARGETSDK)" -find codesign_allocate) atm_diagnostic_flag: OTHER_CFLAGS += drop_priv.c +testposixshm: INVALID_ARCHS = i386 + avx: INVALID_ARCHS = i386 avx: OTHER_CFLAGS += -mavx512f -mavx512bw -mavx512vl avx: OTHER_CFLAGS += -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders @@ -59,25 +61,20 @@ kdebug: OTHER_LDFLAGS = -framework ktrace -ldarwintest_utils -framework kperf EXCLUDED_SOURCES += drop_priv.c kperf_helpers.c xnu_quick_test_helpers.c -ifeq ($(PLATFORM),iPhoneOS) -CONFIG_FREEZE_DEFINE:= -DCONFIG_FREEZE -else -CONFIG_FREEZE_DEFINE:= -EXCLUDED_SOURCES += jumbo_va_spaces_28530648.c +ifneq ($(PLATFORM),iPhoneOS) +EXCLUDED_SOURCES += jumbo_va_spaces_28530648.c perf_compressor.c memorystatus_freeze_test.c endif -perf_compressor: OTHER_CFLAGS += $(CONFIG_FREEZE_DEFINE) perf_compressor: OTHER_LDFLAGS += -ldarwintest_utils perf_compressor: CODE_SIGN_ENTITLEMENTS=./private_entitlement.plist -memorystatus_freeze_test: OTHER_CFLAGS += $(CONFIG_FREEZE_DEFINE) memorystatus_freeze_test: OTHER_LDFLAGS += -ldarwintest_utils stackshot: OTHER_CFLAGS += -Wno-objc-messaging-id stackshot: OTHER_LDFLAGS += -lkdd -framework Foundation stackshot: INVALID_ARCHS = i386 -telemetry: OTHER_LDFLAGS = -framework ktrace +telemetry: OTHER_LDFLAGS = -framework ktrace -framework CoreFoundation telemetry: INVALID_ARCHS = i386 memorystatus_zone_test: INVALID_ARCHS = i386 @@ -105,7 +102,7 @@ monotonic_core: OTHER_LDFLAGS += -framework ktrace monotonic_core: INVALID_ARCHS = i386 perf_exit: perf_exit_proc -perf_exit: OTHER_LDFLAGS = -framework ktrace +perf_exit: OTHER_LDFLAGS = -framework ktrace -ldarwintest_utils perf_exit: INVALID_ARCHS = i386 perf_exit: CODE_SIGN_ENTITLEMENTS=./private_entitlement.plist @@ -230,10 +227,30 @@ install-excserver: ; exc_resource_threads: excserver exc_resource_threads: OTHER_CFLAGS += $(OBJROOT)/excserver.c -I $(OBJROOT) +ifneq (osx,$(TARGET_NAME)) +EXCLUDED_SOURCES += ldt_code32.s ldt.c +else +$(OBJROOT)/ldt_mach_exc_server.c: + $(MIG) $(CFLAGS) \ + -user /dev/null \ + -server $(OBJROOT)/ldt_mach_exc_server.c \ + -header $(OBJROOT)/ldt_mach_exc.h \ + mach_exc.defs + +ldt: INVALID_ARCHS = i386 +ldt: $(OBJROOT)/ldt_mach_exc_server.c +ldt: OTHER_CFLAGS += -I $(OBJROOT) $(SRCROOT)/ldt_code32.s -Wl,-pagezero_size,0x1000 +endif + ifneq ($(PLATFORM),BridgeOS) EXCLUDED_SOURCES += remote_time.c else remote_time: INVALID_ARCHS = armv7 armv7s arm64_32 endif +vm_phys_footprint: OTHER_LDFLAGS += -framework CoreFoundation -framework IOSurface +vm_phys_footprint_legacy: legacy_footprint.entitlement +vm_phys_footprint_legacy: OTHER_LDFLAGS += -framework CoreFoundation -framework IOSurface +vm_phys_footprint_legacy: CODE_SIGN_ENTITLEMENTS=./legacy_footprint.entitlement + include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.targets diff --git a/tests/atm_diagnostic_flag.c b/tests/atm_diagnostic_flag.c index 864ffd643..021930149 100644 --- a/tests/atm_diagnostic_flag.c +++ b/tests/atm_diagnostic_flag.c @@ -7,7 +7,7 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.debugging")); /* * The low 8 bits may be in use, so modify one - * of the upper 8 bits to ensure round-tripping. + * of the upper 8 bits to ensure round-tripping. */ #define LIBTRACE_PRIVATE_DATA 0x01000000 @@ -19,60 +19,64 @@ static uint32_t _original; static uint32_t _save_atm_diagnostic_flag(void) { - kern_return_t kr; - kr = host_get_atm_diagnostic_flag(mach_host_self(), &_original); - T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "host_get_atm_diagnostic_flag()"); - T_LOG("Original ATM diagnostic flag: 0x%08x", _original); - return _original; + kern_return_t kr; + kr = host_get_atm_diagnostic_flag(mach_host_self(), &_original); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "host_get_atm_diagnostic_flag()"); + T_LOG("Original ATM diagnostic flag: 0x%08x", _original); + return _original; } static kern_return_t _mutate_atm_diagnostic_flag(uint32_t v) { - T_LOG("Try to set ATM diagnostic flag to: 0x%08x", v); - kern_return_t kr = host_set_atm_diagnostic_flag(mach_host_self(), v); - if (kr == KERN_SUCCESS) _needs_reset = true; - return kr; + T_LOG("Try to set ATM diagnostic flag to: 0x%08x", v); + kern_return_t kr = host_set_atm_diagnostic_flag(mach_host_self(), v); + if (kr == KERN_SUCCESS) { + _needs_reset = true; + } + return kr; } static void _reset_atm_diagnostic_flag(void) { - if (!_needs_reset) return; - T_LOG("Reset ATM diagnostic flag to: 0x%08x", _original); - kern_return_t kr; - kr = host_set_atm_diagnostic_flag(mach_host_self(), _original); - if (kr != KERN_SUCCESS) { - T_ASSERT_FAIL("host_set_atm_diagnostic_flag() failed: %s", - mach_error_string(kr)); - } + if (!_needs_reset) { + return; + } + T_LOG("Reset ATM diagnostic flag to: 0x%08x", _original); + kern_return_t kr; + kr = host_set_atm_diagnostic_flag(mach_host_self(), _original); + if (kr != KERN_SUCCESS) { + T_ASSERT_FAIL("host_set_atm_diagnostic_flag() failed: %s", + mach_error_string(kr)); + } } T_DECL(toggle_atm_diagnostic_flag, - "change the atm_diagnostic_flag, which should use the commpage", - T_META_ASROOT(true)) + "change the atm_diagnostic_flag, which should use the commpage", + T_META_ASROOT(true)) { - T_ATEND(_reset_atm_diagnostic_flag); - uint32_t f = _save_atm_diagnostic_flag(); - f ^= LIBTRACE_PRIVATE_DATA; - kern_return_t kr = _mutate_atm_diagnostic_flag(f); - if (kr == KERN_NOT_SUPPORTED) { - T_SKIP("Seems ATM is disabled on this platform. " - "Ignoring host_set_atm_diagnostic_flag functionality. " - "Bailing gracefully."); - } - T_EXPECT_MACH_SUCCESS(kr, "Set atm_diagnostic_flag"); + T_ATEND(_reset_atm_diagnostic_flag); + uint32_t f = _save_atm_diagnostic_flag(); + f ^= LIBTRACE_PRIVATE_DATA; + kern_return_t kr = _mutate_atm_diagnostic_flag(f); + if (kr == KERN_NOT_SUPPORTED) { + T_SKIP("Seems ATM is disabled on this platform. " + "Ignoring host_set_atm_diagnostic_flag functionality. " + "Bailing gracefully."); + } + T_EXPECT_MACH_SUCCESS(kr, "Set atm_diagnostic_flag"); } T_DECL(unprivileged_atm_diagnostic_flag, - "expect to fail to set the atm_diagnostic_flag", - T_META_ASROOT(false)) + "expect to fail to set the atm_diagnostic_flag", + T_META_ASROOT(false)) { - drop_priv(); - T_ATEND(_reset_atm_diagnostic_flag); - uint32_t f = _save_atm_diagnostic_flag(); - f ^= LIBTRACE_PRIVATE_DATA; - kern_return_t kr = _mutate_atm_diagnostic_flag(f); - T_EXPECT_MACH_ERROR(KERN_INVALID_ARGUMENT, kr, - "Deny change to atm_diagnostic_flag"); + drop_priv(); + T_ATEND(_reset_atm_diagnostic_flag); + uint32_t f = _save_atm_diagnostic_flag(); + f ^= LIBTRACE_PRIVATE_DATA; + kern_return_t kr = _mutate_atm_diagnostic_flag(f); + T_EXPECT_MACH_ERROR(KERN_INVALID_ARGUMENT, kr, + "Deny change to atm_diagnostic_flag"); } diff --git a/tests/avx.c b/tests/avx.c index 0041e999a..345361957 100644 --- a/tests/avx.c +++ b/tests/avx.c @@ -17,7 +17,7 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.intel"), T_META_CHECK_LEAKS(false) -); + ); #define NORMAL_RUN_TIME (10) #define LONG_RUN_TIME (10*60) @@ -30,45 +30,45 @@ char karray_str_buf[1024]; /* * ymm defines/globals/prototypes */ -#define STOP_COOKIE_256 0x01234567 +#define STOP_COOKIE_256 0x01234567 #if defined(__x86_64__) -#define YMM_MAX 16 -#define X86_AVX_STATE_T x86_avx_state64_t -#define X86_AVX_STATE_COUNT x86_AVX_STATE64_COUNT -#define X86_AVX_STATE_FLAVOR x86_AVX_STATE64 -#define MCONTEXT_SIZE_256 sizeof(struct __darwin_mcontext_avx64) +#define YMM_MAX 16 +#define X86_AVX_STATE_T x86_avx_state64_t +#define X86_AVX_STATE_COUNT x86_AVX_STATE64_COUNT +#define X86_AVX_STATE_FLAVOR x86_AVX_STATE64 +#define MCONTEXT_SIZE_256 sizeof(struct __darwin_mcontext_avx64) #else -#define YMM_MAX 8 -#define X86_AVX_STATE_T x86_avx_state32_t -#define X86_AVX_STATE_COUNT x86_AVX_STATE32_COUNT -#define X86_AVX_STATE_FLAVOR x86_AVX_STATE32 -#define MCONTEXT_SIZE_256 sizeof(struct __darwin_mcontext_avx32) +#define YMM_MAX 8 +#define X86_AVX_STATE_T x86_avx_state32_t +#define X86_AVX_STATE_COUNT x86_AVX_STATE32_COUNT +#define X86_AVX_STATE_FLAVOR x86_AVX_STATE32 +#define MCONTEXT_SIZE_256 sizeof(struct __darwin_mcontext_avx32) #endif #define VECTOR256 __m256 #define VEC256ALIGN __attribute ((aligned(32))) static inline void populate_ymm(void); static inline void check_ymm(void); -VECTOR256 vec256array0[YMM_MAX] VEC256ALIGN; -VECTOR256 vec256array1[YMM_MAX] VEC256ALIGN; -VECTOR256 vec256array2[YMM_MAX] VEC256ALIGN; -VECTOR256 vec256array3[YMM_MAX] VEC256ALIGN; +VECTOR256 vec256array0[YMM_MAX] VEC256ALIGN; +VECTOR256 vec256array1[YMM_MAX] VEC256ALIGN; +VECTOR256 vec256array2[YMM_MAX] VEC256ALIGN; +VECTOR256 vec256array3[YMM_MAX] VEC256ALIGN; /* * zmm defines/globals/prototypes */ #define STOP_COOKIE_512 0x0123456789abcdefULL #if defined(__x86_64__) -#define ZMM_MAX 32 -#define X86_AVX512_STATE_T x86_avx512_state64_t -#define X86_AVX512_STATE_COUNT x86_AVX512_STATE64_COUNT -#define X86_AVX512_STATE_FLAVOR x86_AVX512_STATE64 -#define MCONTEXT_SIZE_512 sizeof(struct __darwin_mcontext_avx512_64) +#define ZMM_MAX 32 +#define X86_AVX512_STATE_T x86_avx512_state64_t +#define X86_AVX512_STATE_COUNT x86_AVX512_STATE64_COUNT +#define X86_AVX512_STATE_FLAVOR x86_AVX512_STATE64 +#define MCONTEXT_SIZE_512 sizeof(struct __darwin_mcontext_avx512_64) #else -#define ZMM_MAX 8 -#define X86_AVX512_STATE_T x86_avx512_state32_t -#define X86_AVX512_STATE_COUNT x86_AVX512_STATE32_COUNT -#define X86_AVX512_STATE_FLAVOR x86_AVX512_STATE32 -#define MCONTEXT_SIZE_512 sizeof(struct __darwin_mcontext_avx512_32) +#define ZMM_MAX 8 +#define X86_AVX512_STATE_T x86_avx512_state32_t +#define X86_AVX512_STATE_COUNT x86_AVX512_STATE32_COUNT +#define X86_AVX512_STATE_FLAVOR x86_AVX512_STATE32 +#define MCONTEXT_SIZE_512 sizeof(struct __darwin_mcontext_avx512_32) #endif #define VECTOR512 __m512 #define VEC512ALIGN __attribute ((aligned(64))) @@ -77,34 +77,41 @@ VECTOR256 vec256array3[YMM_MAX] VEC256ALIGN; static inline void populate_zmm(void); static inline void populate_opmask(void); static inline void check_zmm(void); -VECTOR512 vec512array0[ZMM_MAX] VEC512ALIGN; -VECTOR512 vec512array1[ZMM_MAX] VEC512ALIGN; -VECTOR512 vec512array2[ZMM_MAX] VEC512ALIGN; -VECTOR512 vec512array3[ZMM_MAX] VEC512ALIGN; +VECTOR512 vec512array0[ZMM_MAX] VEC512ALIGN; +VECTOR512 vec512array1[ZMM_MAX] VEC512ALIGN; +VECTOR512 vec512array2[ZMM_MAX] VEC512ALIGN; +VECTOR512 vec512array3[ZMM_MAX] VEC512ALIGN; OPMASK karray0[8]; OPMASK karray1[8]; OPMASK karray2[8]; OPMASK karray3[8]; +kern_return_t _thread_get_state_avx(thread_t thread, int flavor, thread_state_t state, + mach_msg_type_number_t *state_count); +kern_return_t _thread_get_state_avx512(thread_t thread, int flavor, thread_state_t state, + mach_msg_type_number_t *state_count); /* * Common functions */ int -memcmp_unoptimized(const void *s1, const void *s2, size_t n) { +memcmp_unoptimized(const void *s1, const void *s2, size_t n) +{ if (n != 0) { const unsigned char *p1 = s1, *p2 = s2; do { - if (*p1++ != *p2++) - return (*--p1 - *--p2); + if (*p1++ != *p2++) { + return *--p1 - *--p2; + } } while (--n != 0); } - return (0); + return 0; } void -start_timer(int seconds, void (*handler)(int, siginfo_t *, void *)) { +start_timer(int seconds, void (*handler)(int, siginfo_t *, void *)) +{ struct sigaction sigalrm_action = { .sa_sigaction = handler, .sa_flags = SA_RESTART, @@ -123,15 +130,17 @@ start_timer(int seconds, void (*handler)(int, siginfo_t *, void *)) { } void -require_avx(void) { - if((_get_cpu_capabilities() & kHasAVX1_0) != kHasAVX1_0) { +require_avx(void) +{ + if ((_get_cpu_capabilities() & kHasAVX1_0) != kHasAVX1_0) { T_SKIP("AVX not supported on this system"); } } void -require_avx512(void) { - if((_get_cpu_capabilities() & kHasAVX512F) != kHasAVX512F) { +require_avx512(void) +{ + if ((_get_cpu_capabilities() & kHasAVX512F) != kHasAVX512F) { T_SKIP("AVX-512 not supported on this system"); } } @@ -141,71 +150,101 @@ require_avx512(void) { */ static inline void -store_ymm(VECTOR256 *vec256array) { +store_ymm(VECTOR256 *vec256array) +{ int i = 0; - __asm__ volatile("vmovaps %%ymm0, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm1, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm2, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm3, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm4, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm5, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm6, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm7, %0" :"=m" (vec256array[i])); + __asm__ volatile ("vmovaps %%ymm0, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm1, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm2, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm3, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm4, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm5, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm6, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm7, %0" :"=m" (vec256array[i])); +#if defined(__x86_64__) + i++; __asm__ volatile ("vmovaps %%ymm8, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm9, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm10, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm11, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm12, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm13, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm14, %0" :"=m" (vec256array[i])); + i++; __asm__ volatile ("vmovaps %%ymm15, %0" :"=m" (vec256array[i])); +#endif +} + +static inline void +restore_ymm(VECTOR256 *vec256array) +{ + VECTOR256 *p = vec256array; + + __asm__ volatile ("vmovaps %0, %%ymm0" :: "m" (*(__m256i*)p) : "ymm0"); p++; + __asm__ volatile ("vmovaps %0, %%ymm1" :: "m" (*(__m256i*)p) : "ymm1"); p++; + __asm__ volatile ("vmovaps %0, %%ymm2" :: "m" (*(__m256i*)p) : "ymm2"); p++; + __asm__ volatile ("vmovaps %0, %%ymm3" :: "m" (*(__m256i*)p) : "ymm3"); p++; + __asm__ volatile ("vmovaps %0, %%ymm4" :: "m" (*(__m256i*)p) : "ymm4"); p++; + __asm__ volatile ("vmovaps %0, %%ymm5" :: "m" (*(__m256i*)p) : "ymm5"); p++; + __asm__ volatile ("vmovaps %0, %%ymm6" :: "m" (*(__m256i*)p) : "ymm6"); p++; + __asm__ volatile ("vmovaps %0, %%ymm7" :: "m" (*(__m256i*)p) : "ymm7"); + #if defined(__x86_64__) - i++;__asm__ volatile("vmovaps %%ymm8, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm9, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm10, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm11, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm12, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm13, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm14, %0" :"=m" (vec256array[i])); - i++;__asm__ volatile("vmovaps %%ymm15, %0" :"=m" (vec256array[i])); + ++p; __asm__ volatile ("vmovaps %0, %%ymm8" :: "m" (*(__m256i*)p) : "ymm8"); p++; + __asm__ volatile ("vmovaps %0, %%ymm9" :: "m" (*(__m256i*)p) : "ymm9"); p++; + __asm__ volatile ("vmovaps %0, %%ymm10" :: "m" (*(__m256i*)p) : "ymm10"); p++; + __asm__ volatile ("vmovaps %0, %%ymm11" :: "m" (*(__m256i*)p) : "ymm11"); p++; + __asm__ volatile ("vmovaps %0, %%ymm12" :: "m" (*(__m256i*)p) : "ymm12"); p++; + __asm__ volatile ("vmovaps %0, %%ymm13" :: "m" (*(__m256i*)p) : "ymm13"); p++; + __asm__ volatile ("vmovaps %0, %%ymm14" :: "m" (*(__m256i*)p) : "ymm14"); p++; + __asm__ volatile ("vmovaps %0, %%ymm15" :: "m" (*(__m256i*)p) : "ymm15"); #endif } static inline void -populate_ymm(void) { +populate_ymm(void) +{ int j; uint32_t p[8] VEC256ALIGN; - for (j = 0; j < (int) (sizeof(p)/sizeof(p[0])); j++) + for (j = 0; j < (int) (sizeof(p) / sizeof(p[0])); j++) { p[j] = getpid(); + } p[0] = 0x22222222; p[7] = 0x77777777; - __asm__ volatile("vmovaps %0, %%ymm0" :: "m" (*(__m256i*)p) : "ymm0"); - __asm__ volatile("vmovaps %0, %%ymm1" :: "m" (*(__m256i*)p) : "ymm1"); - __asm__ volatile("vmovaps %0, %%ymm2" :: "m" (*(__m256i*)p) : "ymm2"); - __asm__ volatile("vmovaps %0, %%ymm3" :: "m" (*(__m256i*)p) : "ymm3"); + __asm__ volatile ("vmovaps %0, %%ymm0" :: "m" (*(__m256i*)p) : "ymm0"); + __asm__ volatile ("vmovaps %0, %%ymm1" :: "m" (*(__m256i*)p) : "ymm1"); + __asm__ volatile ("vmovaps %0, %%ymm2" :: "m" (*(__m256i*)p) : "ymm2"); + __asm__ volatile ("vmovaps %0, %%ymm3" :: "m" (*(__m256i*)p) : "ymm3"); p[0] = 0x44444444; p[7] = 0xEEEEEEEE; - __asm__ volatile("vmovaps %0, %%ymm4" :: "m" (*(__m256i*)p) : "ymm4"); - __asm__ volatile("vmovaps %0, %%ymm5" :: "m" (*(__m256i*)p) : "ymm5"); - __asm__ volatile("vmovaps %0, %%ymm6" :: "m" (*(__m256i*)p) : "ymm6"); - __asm__ volatile("vmovaps %0, %%ymm7" :: "m" (*(__m256i*)p) : "ymm7"); + __asm__ volatile ("vmovaps %0, %%ymm4" :: "m" (*(__m256i*)p) : "ymm4"); + __asm__ volatile ("vmovaps %0, %%ymm5" :: "m" (*(__m256i*)p) : "ymm5"); + __asm__ volatile ("vmovaps %0, %%ymm6" :: "m" (*(__m256i*)p) : "ymm6"); + __asm__ volatile ("vmovaps %0, %%ymm7" :: "m" (*(__m256i*)p) : "ymm7"); #if defined(__x86_64__) p[0] = 0x88888888; p[7] = 0xAAAAAAAA; - __asm__ volatile("vmovaps %0, %%ymm8" :: "m" (*(__m256i*)p) : "ymm8"); - __asm__ volatile("vmovaps %0, %%ymm9" :: "m" (*(__m256i*)p) : "ymm9"); - __asm__ volatile("vmovaps %0, %%ymm10" :: "m" (*(__m256i*)p) : "ymm10"); - __asm__ volatile("vmovaps %0, %%ymm11" :: "m" (*(__m256i*)p) : "ymm11"); + __asm__ volatile ("vmovaps %0, %%ymm8" :: "m" (*(__m256i*)p) : "ymm8"); + __asm__ volatile ("vmovaps %0, %%ymm9" :: "m" (*(__m256i*)p) : "ymm9"); + __asm__ volatile ("vmovaps %0, %%ymm10" :: "m" (*(__m256i*)p) : "ymm10"); + __asm__ volatile ("vmovaps %0, %%ymm11" :: "m" (*(__m256i*)p) : "ymm11"); p[0] = 0xBBBBBBBB; p[7] = 0xCCCCCCCC; - __asm__ volatile("vmovaps %0, %%ymm12" :: "m" (*(__m256i*)p) : "ymm12"); - __asm__ volatile("vmovaps %0, %%ymm13" :: "m" (*(__m256i*)p) : "ymm13"); - __asm__ volatile("vmovaps %0, %%ymm14" :: "m" (*(__m256i*)p) : "ymm14"); - __asm__ volatile("vmovaps %0, %%ymm15" :: "m" (*(__m256i*)p) : "ymm15"); + __asm__ volatile ("vmovaps %0, %%ymm12" :: "m" (*(__m256i*)p) : "ymm12"); + __asm__ volatile ("vmovaps %0, %%ymm13" :: "m" (*(__m256i*)p) : "ymm13"); + __asm__ volatile ("vmovaps %0, %%ymm14" :: "m" (*(__m256i*)p) : "ymm14"); + __asm__ volatile ("vmovaps %0, %%ymm15" :: "m" (*(__m256i*)p) : "ymm15"); #endif store_ymm(vec256array0); } void -vec256_to_string(VECTOR256 *vec, char *buf) { +vec256_to_string(VECTOR256 *vec, char *buf) +{ unsigned int vec_idx = 0; unsigned int buf_idx = 0; int ret = 0; @@ -217,15 +256,16 @@ vec256_to_string(VECTOR256 *vec, char *buf) { buf + buf_idx, "0x%016llx:%016llx:%016llx:%016llx\n", a[0], a[1], a[2], a[3] - ); + ); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sprintf()"); buf_idx += ret; } } void -assert_ymm_eq(void *a, void *b, int c) { - if(memcmp_unoptimized(a, b, c)) { +assert_ymm_eq(void *a, void *b, int c) +{ + if (memcmp_unoptimized(a, b, c)) { vec256_to_string(a, vec_str_buf); T_LOG("Compare failed, vector A:\n%s", vec_str_buf); vec256_to_string(b, vec_str_buf); @@ -235,7 +275,8 @@ assert_ymm_eq(void *a, void *b, int c) { } void -check_ymm(void) { +check_ymm(void) +{ uint32_t *p = (uint32_t *) &vec256array1[7]; store_ymm(vec256array1); if (p[0] == STOP_COOKIE_256) { @@ -245,13 +286,14 @@ check_ymm(void) { } static void -copy_ymm_state_to_vector(X86_AVX_STATE_T *sp, VECTOR256 *vp) { +copy_ymm_state_to_vector(X86_AVX_STATE_T *sp, VECTOR256 *vp) +{ int i; struct __darwin_xmm_reg *xmm = &sp->__fpu_xmm0; struct __darwin_xmm_reg *ymmh = &sp->__fpu_ymmh0; - for (i = 0; i < YMM_MAX; i++ ) { - bcopy(&xmm[i], &vp[i], sizeof(*xmm)); + for (i = 0; i < YMM_MAX; i++) { + bcopy(&xmm[i], &vp[i], sizeof(*xmm)); bcopy(&ymmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*ymmh)), sizeof(*ymmh)); } } @@ -281,8 +323,38 @@ ymm_sigalrm_handler(int signum __unused, siginfo_t *info __unused, void *ctx) checking = FALSE; } +kern_return_t +_thread_get_state_avx( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + kern_return_t rv; + VECTOR256 ymms[YMM_MAX]; + + /* + * We must save and restore the YMMs across thread_get_state() because + * code in thread_get_state changes at least one xmm register AFTER the + * thread_get_state has saved the state in userspace. While it's still + * possible for something to muck with %xmms BEFORE making the mach + * system call (and rendering this save/restore useless), that does not + * currently occur, and since we depend on the avx state saved in the + * thread_get_state to be the same as that manually copied from YMMs after + * thread_get_state returns, we have to go through these machinations. + */ + store_ymm(ymms); + + rv = thread_get_state(thread, flavor, state, state_count); + + restore_ymm(ymms); + + return rv; +} + void -ymm_integrity(int time) { +ymm_integrity(int time) +{ mach_msg_type_number_t avx_count = X86_AVX_STATE_COUNT; kern_return_t kret; X86_AVX_STATE_T avx_state, avx_state2; @@ -291,9 +363,9 @@ ymm_integrity(int time) { bzero(&avx_state, sizeof(avx_state)); bzero(&avx_state2, sizeof(avx_state)); - kret = thread_get_state( + kret = _thread_get_state_avx( ts, X86_AVX_STATE_FLAVOR, (thread_state_t)&avx_state, &avx_count - ); + ); store_ymm(vec256array2); @@ -306,9 +378,9 @@ ymm_integrity(int time) { populate_ymm(); - kret = thread_get_state( + kret = _thread_get_state_avx( ts, X86_AVX_STATE_FLAVOR, (thread_state_t)&avx_state2, &avx_count - ); + ); store_ymm(vec256array2); @@ -326,7 +398,7 @@ ymm_integrity(int time) { populate_ymm(); /* Check state until timer fires */ - while(checking) { + while (checking) { check_ymm(); } @@ -350,142 +422,220 @@ ymm_integrity(int time) { */ static inline void -store_opmask(OPMASK k[]) { - __asm__ volatile("kmovq %%k0, %0" :"=m" (k[0])); - __asm__ volatile("kmovq %%k1, %0" :"=m" (k[1])); - __asm__ volatile("kmovq %%k2, %0" :"=m" (k[2])); - __asm__ volatile("kmovq %%k3, %0" :"=m" (k[3])); - __asm__ volatile("kmovq %%k4, %0" :"=m" (k[4])); - __asm__ volatile("kmovq %%k5, %0" :"=m" (k[5])); - __asm__ volatile("kmovq %%k6, %0" :"=m" (k[6])); - __asm__ volatile("kmovq %%k7, %0" :"=m" (k[7])); +store_opmask(OPMASK k[]) +{ + __asm__ volatile ("kmovq %%k0, %0" :"=m" (k[0])); + __asm__ volatile ("kmovq %%k1, %0" :"=m" (k[1])); + __asm__ volatile ("kmovq %%k2, %0" :"=m" (k[2])); + __asm__ volatile ("kmovq %%k3, %0" :"=m" (k[3])); + __asm__ volatile ("kmovq %%k4, %0" :"=m" (k[4])); + __asm__ volatile ("kmovq %%k5, %0" :"=m" (k[5])); + __asm__ volatile ("kmovq %%k6, %0" :"=m" (k[6])); + __asm__ volatile ("kmovq %%k7, %0" :"=m" (k[7])); } static inline void -store_zmm(VECTOR512 *vecarray) { +store_zmm(VECTOR512 *vecarray) +{ int i = 0; - __asm__ volatile("vmovaps %%zmm0, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm1, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm2, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm3, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm4, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm5, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm6, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm7, %0" :"=m" (vecarray[i])); + __asm__ volatile ("vmovaps %%zmm0, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm1, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm2, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm3, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm4, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm5, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm6, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm7, %0" :"=m" (vecarray[i])); #if defined(__x86_64__) - i++;__asm__ volatile("vmovaps %%zmm8, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm9, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm10, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm11, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm12, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm13, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm14, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm15, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm16, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm17, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm18, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm19, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm20, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm21, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm22, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm23, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm24, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm25, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm26, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm27, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm28, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm29, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm30, %0" :"=m" (vecarray[i])); - i++;__asm__ volatile("vmovaps %%zmm31, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm8, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm9, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm10, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm11, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm12, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm13, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm14, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm15, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm16, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm17, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm18, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm19, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm20, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm21, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm22, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm23, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm24, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm25, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm26, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm27, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm28, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm29, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm30, %0" :"=m" (vecarray[i])); + i++; __asm__ volatile ("vmovaps %%zmm31, %0" :"=m" (vecarray[i])); #endif } static inline void -populate_opmask(void) { +restore_zmm(VECTOR512 *vecarray) +{ + VECTOR512 *p = vecarray; + + __asm__ volatile ("vmovaps %0, %%zmm0" :: "m" (*(__m512i*)p) : "zmm0"); p++; + __asm__ volatile ("vmovaps %0, %%zmm1" :: "m" (*(__m512i*)p) : "zmm1"); p++; + __asm__ volatile ("vmovaps %0, %%zmm2" :: "m" (*(__m512i*)p) : "zmm2"); p++; + __asm__ volatile ("vmovaps %0, %%zmm3" :: "m" (*(__m512i*)p) : "zmm3"); p++; + __asm__ volatile ("vmovaps %0, %%zmm4" :: "m" (*(__m512i*)p) : "zmm4"); p++; + __asm__ volatile ("vmovaps %0, %%zmm5" :: "m" (*(__m512i*)p) : "zmm5"); p++; + __asm__ volatile ("vmovaps %0, %%zmm6" :: "m" (*(__m512i*)p) : "zmm6"); p++; + __asm__ volatile ("vmovaps %0, %%zmm7" :: "m" (*(__m512i*)p) : "zmm7"); + +#if defined(__x86_64__) + ++p; __asm__ volatile ("vmovaps %0, %%zmm8" :: "m" (*(__m512i*)p) : "zmm8"); p++; + __asm__ volatile ("vmovaps %0, %%zmm9" :: "m" (*(__m512i*)p) : "zmm9"); p++; + __asm__ volatile ("vmovaps %0, %%zmm10" :: "m" (*(__m512i*)p) : "zmm10"); p++; + __asm__ volatile ("vmovaps %0, %%zmm11" :: "m" (*(__m512i*)p) : "zmm11"); p++; + __asm__ volatile ("vmovaps %0, %%zmm12" :: "m" (*(__m512i*)p) : "zmm12"); p++; + __asm__ volatile ("vmovaps %0, %%zmm13" :: "m" (*(__m512i*)p) : "zmm13"); p++; + __asm__ volatile ("vmovaps %0, %%zmm14" :: "m" (*(__m512i*)p) : "zmm14"); p++; + __asm__ volatile ("vmovaps %0, %%zmm15" :: "m" (*(__m512i*)p) : "zmm15"); p++; + __asm__ volatile ("vmovaps %0, %%zmm16" :: "m" (*(__m512i*)p) : "zmm16"); p++; + __asm__ volatile ("vmovaps %0, %%zmm17" :: "m" (*(__m512i*)p) : "zmm17"); p++; + __asm__ volatile ("vmovaps %0, %%zmm18" :: "m" (*(__m512i*)p) : "zmm18"); p++; + __asm__ volatile ("vmovaps %0, %%zmm19" :: "m" (*(__m512i*)p) : "zmm19"); p++; + __asm__ volatile ("vmovaps %0, %%zmm20" :: "m" (*(__m512i*)p) : "zmm20"); p++; + __asm__ volatile ("vmovaps %0, %%zmm21" :: "m" (*(__m512i*)p) : "zmm21"); p++; + __asm__ volatile ("vmovaps %0, %%zmm22" :: "m" (*(__m512i*)p) : "zmm22"); p++; + __asm__ volatile ("vmovaps %0, %%zmm23" :: "m" (*(__m512i*)p) : "zmm23"); p++; + __asm__ volatile ("vmovaps %0, %%zmm24" :: "m" (*(__m512i*)p) : "zmm24"); p++; + __asm__ volatile ("vmovaps %0, %%zmm25" :: "m" (*(__m512i*)p) : "zmm25"); p++; + __asm__ volatile ("vmovaps %0, %%zmm26" :: "m" (*(__m512i*)p) : "zmm26"); p++; + __asm__ volatile ("vmovaps %0, %%zmm27" :: "m" (*(__m512i*)p) : "zmm27"); p++; + __asm__ volatile ("vmovaps %0, %%zmm28" :: "m" (*(__m512i*)p) : "zmm28"); p++; + __asm__ volatile ("vmovaps %0, %%zmm29" :: "m" (*(__m512i*)p) : "zmm29"); p++; + __asm__ volatile ("vmovaps %0, %%zmm30" :: "m" (*(__m512i*)p) : "zmm30"); p++; + __asm__ volatile ("vmovaps %0, %%zmm31" :: "m" (*(__m512i*)p) : "zmm31"); +#endif +} + +static inline void +populate_opmask(void) +{ uint64_t k[8]; - for (int j = 0; j < 8; j++) - k[j] = ((uint64_t) getpid() << 32) + (0x11111111 * j); + for (int j = 0; j < 8; j++) { + k[j] = ((uint64_t) getpid() << 32) + (0x11111111 * j); + } - __asm__ volatile("kmovq %0, %%k0" : :"m" (k[0])); - __asm__ volatile("kmovq %0, %%k1" : :"m" (k[1])); - __asm__ volatile("kmovq %0, %%k2" : :"m" (k[2])); - __asm__ volatile("kmovq %0, %%k3" : :"m" (k[3])); - __asm__ volatile("kmovq %0, %%k4" : :"m" (k[4])); - __asm__ volatile("kmovq %0, %%k5" : :"m" (k[5])); - __asm__ volatile("kmovq %0, %%k6" : :"m" (k[6])); - __asm__ volatile("kmovq %0, %%k7" : :"m" (k[7])); + __asm__ volatile ("kmovq %0, %%k0" : :"m" (k[0])); + __asm__ volatile ("kmovq %0, %%k1" : :"m" (k[1])); + __asm__ volatile ("kmovq %0, %%k2" : :"m" (k[2])); + __asm__ volatile ("kmovq %0, %%k3" : :"m" (k[3])); + __asm__ volatile ("kmovq %0, %%k4" : :"m" (k[4])); + __asm__ volatile ("kmovq %0, %%k5" : :"m" (k[5])); + __asm__ volatile ("kmovq %0, %%k6" : :"m" (k[6])); + __asm__ volatile ("kmovq %0, %%k7" : :"m" (k[7])); store_opmask(karray0); } +kern_return_t +_thread_get_state_avx512( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + kern_return_t rv; + VECTOR512 zmms[ZMM_MAX]; + + /* + * We must save and restore the ZMMs across thread_get_state() because + * code in thread_get_state changes at least one xmm register AFTER the + * thread_get_state has saved the state in userspace. While it's still + * possible for something to muck with %XMMs BEFORE making the mach + * system call (and rendering this save/restore useless), that does not + * currently occur, and since we depend on the avx512 state saved in the + * thread_get_state to be the same as that manually copied from ZMMs after + * thread_get_state returns, we have to go through these machinations. + */ + store_zmm(zmms); + + rv = thread_get_state(thread, flavor, state, state_count); + + restore_zmm(zmms); + + return rv; +} + static inline void -populate_zmm(void) { +populate_zmm(void) +{ int j; uint64_t p[8] VEC512ALIGN; - for (j = 0; j < (int) (sizeof(p)/sizeof(p[0])); j++) - p[j] = ((uint64_t) getpid() << 32) + getpid(); + for (j = 0; j < (int) (sizeof(p) / sizeof(p[0])); j++) { + p[j] = ((uint64_t) getpid() << 32) + getpid(); + } p[0] = 0x0000000000000000ULL; p[2] = 0x4444444444444444ULL; p[4] = 0x8888888888888888ULL; p[7] = 0xCCCCCCCCCCCCCCCCULL; - __asm__ volatile("vmovaps %0, %%zmm0" :: "m" (*(__m256i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm1" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm2" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm3" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm4" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm5" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm6" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm7" :: "m" (*(__m512i*)p) ); + __asm__ volatile ("vmovaps %0, %%zmm0" :: "m" (*(__m256i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm1" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm2" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm3" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm4" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm5" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm6" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm7" :: "m" (*(__m512i*)p)); #if defined(__x86_64__) p[0] = 0x1111111111111111ULL; p[2] = 0x5555555555555555ULL; p[4] = 0x9999999999999999ULL; p[7] = 0xDDDDDDDDDDDDDDDDULL; - __asm__ volatile("vmovaps %0, %%zmm8" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm9" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm10" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm11" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm12" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm13" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm14" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm15" :: "m" (*(__m512i*)p) ); + __asm__ volatile ("vmovaps %0, %%zmm8" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm9" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm10" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm11" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm12" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm13" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm14" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm15" :: "m" (*(__m512i*)p)); p[0] = 0x2222222222222222ULL; p[2] = 0x6666666666666666ULL; p[4] = 0xAAAAAAAAAAAAAAAAULL; p[7] = 0xEEEEEEEEEEEEEEEEULL; - __asm__ volatile("vmovaps %0, %%zmm16" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm17" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm18" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm19" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm20" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm21" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm22" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm23" :: "m" (*(__m512i*)p) ); + __asm__ volatile ("vmovaps %0, %%zmm16" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm17" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm18" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm19" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm20" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm21" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm22" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm23" :: "m" (*(__m512i*)p)); p[0] = 0x3333333333333333ULL; p[2] = 0x7777777777777777ULL; p[4] = 0xBBBBBBBBBBBBBBBBULL; p[7] = 0xFFFFFFFFFFFFFFFFULL; - __asm__ volatile("vmovaps %0, %%zmm24" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm25" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm26" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm27" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm28" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm29" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm30" :: "m" (*(__m512i*)p) ); - __asm__ volatile("vmovaps %0, %%zmm31" :: "m" (*(__m512i*)p) ); + __asm__ volatile ("vmovaps %0, %%zmm24" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm25" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm26" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm27" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm28" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm29" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm30" :: "m" (*(__m512i*)p)); + __asm__ volatile ("vmovaps %0, %%zmm31" :: "m" (*(__m512i*)p)); #endif store_zmm(vec512array0); } void -vec512_to_string(VECTOR512 *vec, char *buf) { +vec512_to_string(VECTOR512 *vec, char *buf) +{ unsigned int vec_idx = 0; unsigned int buf_idx = 0; int ret = 0; @@ -499,33 +649,35 @@ vec512_to_string(VECTOR512 *vec, char *buf) { "%016llx:%016llx:%016llx:%016llx%s", a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], vec_idx < ZMM_MAX - 1 ? "\n" : "" - ); + ); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sprintf()"); buf_idx += ret; } } void -opmask_to_string(OPMASK *karray, char *buf) { +opmask_to_string(OPMASK *karray, char *buf) +{ unsigned int karray_idx = 0; unsigned int buf_idx = 0; int ret = 0; - for(karray_idx = 0; karray_idx < KARRAY_MAX; karray_idx++) { + for (karray_idx = 0; karray_idx < KARRAY_MAX; karray_idx++) { ret = sprintf( buf + buf_idx, "k%d: 0x%016llx%s", karray_idx, karray[karray_idx], karray_idx < KARRAY_MAX ? "\n" : "" - ); + ); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sprintf()"); buf_idx += ret; } } static void -assert_zmm_eq(void *a, void *b, int c) { - if(memcmp_unoptimized(a, b, c)) { +assert_zmm_eq(void *a, void *b, int c) +{ + if (memcmp_unoptimized(a, b, c)) { vec512_to_string(a, vec_str_buf); T_LOG("Compare failed, vector A:\n%s", vec_str_buf); vec512_to_string(b, vec_str_buf); @@ -535,7 +687,8 @@ assert_zmm_eq(void *a, void *b, int c) { } static void -assert_opmask_eq(OPMASK *a, OPMASK *b) { +assert_opmask_eq(OPMASK *a, OPMASK *b) +{ for (int i = 0; i < KARRAY_MAX; i++) { if (a[i] != b[i]) { opmask_to_string(a, karray_str_buf); @@ -548,7 +701,8 @@ assert_opmask_eq(OPMASK *a, OPMASK *b) { } void -check_zmm(void) { +check_zmm(void) +{ uint64_t *p = (uint64_t *) &vec512array1[7]; store_opmask(karray1); store_zmm(vec512array1); @@ -560,14 +714,18 @@ check_zmm(void) { assert_opmask_eq(karray0, karray1); } -static void copy_state_to_opmask(X86_AVX512_STATE_T *sp, OPMASK *op) { +static void +copy_state_to_opmask(X86_AVX512_STATE_T *sp, OPMASK *op) +{ OPMASK *k = (OPMASK *) &sp->__fpu_k0; for (int i = 0; i < KARRAY_MAX; i++) { bcopy(&k[i], &op[i], sizeof(*op)); } } -static void copy_zmm_state_to_vector(X86_AVX512_STATE_T *sp, VECTOR512 *vp) { +static void +copy_zmm_state_to_vector(X86_AVX512_STATE_T *sp, VECTOR512 *vp) +{ int i; struct __darwin_xmm_reg *xmm = &sp->__fpu_xmm0; struct __darwin_xmm_reg *ymmh = &sp->__fpu_ymmh0; @@ -575,15 +733,15 @@ static void copy_zmm_state_to_vector(X86_AVX512_STATE_T *sp, VECTOR512 *vp) { #if defined(__x86_64__) struct __darwin_zmm_reg *zmm = &sp->__fpu_zmm16; - for (i = 0; i < ZMM_MAX/2; i++ ) { - bcopy(&xmm[i], &vp[i], sizeof(*xmm)); + for (i = 0; i < ZMM_MAX / 2; i++) { + bcopy(&xmm[i], &vp[i], sizeof(*xmm)); bcopy(&ymmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*ymmh)), sizeof(*ymmh)); bcopy(&zmmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*zmmh)), sizeof(*zmmh)); - bcopy(&zmm[i], &vp[(ZMM_MAX/2)+i], sizeof(*zmm)); + bcopy(&zmm[i], &vp[(ZMM_MAX / 2) + i], sizeof(*zmm)); } #else - for (i = 0; i < ZMM_MAX; i++ ) { - bcopy(&xmm[i], &vp[i], sizeof(*xmm)); + for (i = 0; i < ZMM_MAX; i++) { + bcopy(&xmm[i], &vp[i], sizeof(*xmm)); bcopy(&ymmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*ymmh)), sizeof(*ymmh)); bcopy(&zmmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*zmmh)), sizeof(*zmmh)); } @@ -620,7 +778,8 @@ zmm_sigalrm_handler(int signum __unused, siginfo_t *info __unused, void *ctx) } void -zmm_integrity(int time) { +zmm_integrity(int time) +{ mach_msg_type_number_t avx_count = X86_AVX512_STATE_COUNT; kern_return_t kret; X86_AVX512_STATE_T avx_state, avx_state2; @@ -632,9 +791,9 @@ zmm_integrity(int time) { store_zmm(vec512array2); store_opmask(karray2); - kret = thread_get_state( + kret = _thread_get_state_avx512( ts, X86_AVX512_STATE_FLAVOR, (thread_state_t)&avx_state, &avx_count - ); + ); T_QUIET; T_ASSERT_MACH_SUCCESS(kret, "thread_get_state()"); vec512_to_string(vec512array2, vec_str_buf); @@ -649,9 +808,9 @@ zmm_integrity(int time) { populate_zmm(); populate_opmask(); - kret = thread_get_state( + kret = _thread_get_state_avx512( ts, X86_AVX512_STATE_FLAVOR, (thread_state_t)&avx_state2, &avx_count - ); + ); store_zmm(vec512array2); store_opmask(karray2); @@ -674,7 +833,7 @@ zmm_integrity(int time) { populate_opmask(); /* Check state until timer fires */ - while(checking) { + while (checking) { check_zmm(); } @@ -701,36 +860,35 @@ zmm_integrity(int time) { * Main test declarations */ T_DECL(ymm_integrity, - "Quick soak test to verify that AVX " - "register state is maintained correctly", - T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD)) { + "Quick soak test to verify that AVX " + "register state is maintained correctly", + T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD)) { require_avx(); ymm_integrity(NORMAL_RUN_TIME); } T_DECL(ymm_integrity_stress, - "Extended soak test to verify that AVX " - "register state is maintained correctly", - T_META_TIMEOUT(LONG_RUN_TIME + TIMEOUT_OVERHEAD), - T_META_ENABLED(false)) { + "Extended soak test to verify that AVX " + "register state is maintained correctly", + T_META_TIMEOUT(LONG_RUN_TIME + TIMEOUT_OVERHEAD), + T_META_ENABLED(false)) { require_avx(); ymm_integrity(LONG_RUN_TIME); } T_DECL(zmm_integrity, - "Quick soak test to verify that AVX-512 " - "register state is maintained correctly", - T_META_TIMEOUT(LONG_RUN_TIME + TIMEOUT_OVERHEAD)) { + "Quick soak test to verify that AVX-512 " + "register state is maintained correctly", + T_META_TIMEOUT(LONG_RUN_TIME + TIMEOUT_OVERHEAD)) { require_avx512(); zmm_integrity(NORMAL_RUN_TIME); } T_DECL(zmm_integrity_stress, - "Extended soak test to verify that AVX-512 " - "register state is maintained correctly", - T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD), - T_META_ENABLED(false)) { + "Extended soak test to verify that AVX-512 " + "register state is maintained correctly", + T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD), + T_META_ENABLED(false)) { require_avx512(); zmm_integrity(LONG_RUN_TIME); } - diff --git a/tests/backtracing.c b/tests/backtracing.c index 614ec12b6..379960766 100644 --- a/tests/backtracing.c +++ b/tests/backtracing.c @@ -1,8 +1,12 @@ +/* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */ + #include #include #include #include #include +#include +#include #include #define USER_FRAMES (12) @@ -10,163 +14,252 @@ #define NON_RECURSE_FRAMES (5) static const char *user_bt[USER_FRAMES] = { - NULL, NULL, - "backtrace_thread", - "recurse_a", "recurse_b", "recurse_a", "recurse_b", - "recurse_a", "recurse_b", "recurse_a", - "expect_stack", NULL + NULL, NULL, + "backtrace_thread", + "recurse_a", "recurse_b", "recurse_a", "recurse_b", + "recurse_a", "recurse_b", "recurse_a", + "expect_stack", NULL }; static void expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol, unsigned long addr, unsigned int bt_idx, unsigned int max_frames) { - const char *name; - unsigned int frame_idx = max_frames - bt_idx - 1; - - if (bt[frame_idx] == NULL) { - T_LOG("frame %2u: skipping system frame", frame_idx); - return; - } - - if (CSIsNull(symbol)) { - T_FAIL("invalid symbol for address %#lx at frame %d", addr, frame_idx); - return; - } - - if (frame_idx >= bt_len) { - T_FAIL("unexpected frame '%s' (%#lx) at index %u", - CSSymbolGetName(symbol), addr, frame_idx); - return; - } - - name = CSSymbolGetName(symbol); - T_QUIET; T_ASSERT_NOTNULL(name, NULL); - T_EXPECT_EQ_STR(name, bt[frame_idx], - "frame %2u: saw '%s', expected '%s'", - frame_idx, name, bt[frame_idx]); + const char *name; + unsigned int frame_idx = max_frames - bt_idx - 1; + + if (bt[frame_idx] == NULL) { + T_LOG("frame %2u: skipping system frame", frame_idx); + return; + } + + if (CSIsNull(symbol)) { + T_FAIL("invalid symbol for address %#lx at frame %d", addr, frame_idx); + return; + } + + if (frame_idx >= bt_len) { + T_FAIL("unexpected frame '%s' (%#lx) at index %u", + CSSymbolGetName(symbol), addr, frame_idx); + return; + } + + name = CSSymbolGetName(symbol); + T_QUIET; T_ASSERT_NOTNULL(name, NULL); + T_EXPECT_EQ_STR(name, bt[frame_idx], + "frame %2u: saw '%s', expected '%s'", + frame_idx, name, bt[frame_idx]); } -static void __attribute__((noinline,not_tail_called)) +static bool +is_kernel_64_bit(void) +{ + static dispatch_once_t k64_once; + static bool k64 = false; + dispatch_once(&k64_once, ^{ + int errb; + int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ }; + + struct kinfo_proc kp; + size_t len = sizeof(kp); + + errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0); + T_QUIET; T_ASSERT_POSIX_SUCCESS(errb, + "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})"); + + k64 = kp.kp_proc.p_flag & P_LP64; + T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32"); + }); + return k64; +} + +static void __attribute__((noinline, not_tail_called)) expect_stack(void) { - uint64_t bt[USER_FRAMES] = { 0 }; - unsigned int bt_len = USER_FRAMES; - int err; - size_t bt_filled; - - static dispatch_once_t expect_stacks_once; - static bool k64; - static CSSymbolicatorRef user_symb; - - dispatch_once(&expect_stacks_once, ^(void) { - int errb; - int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ }; - - struct kinfo_proc kp; - size_t len; - - len = sizeof(kp); - errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0); - T_QUIET; T_ASSERT_POSIX_SUCCESS(errb, - "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})"); - - k64 = kp.kp_proc.p_flag & P_LP64; - T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32"); - - user_symb = CSSymbolicatorCreateWithTask(mach_task_self()); - T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL); - T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL); - }); - - bt_filled = USER_FRAMES; - err = sysctlbyname("kern.backtrace.user", bt, &bt_filled, NULL, 0); - if (err == ENOENT) { - T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT"); - } - T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(\"kern.backtrace.user\")"); - - bt_len = (unsigned int)bt_filled; - T_EXPECT_EQ(bt_len, (unsigned int)USER_FRAMES, - "%u frames should be present in backtrace", (unsigned int)USER_FRAMES); - - for (unsigned int i = 0; i < bt_len; i++) { - uintptr_t addr; + uint64_t bt[USER_FRAMES] = { 0 }; + unsigned int bt_len = USER_FRAMES; + int err; + size_t bt_filled; + bool k64; + + static CSSymbolicatorRef user_symb; + static dispatch_once_t expect_stack_once; + dispatch_once(&expect_stack_once, ^{ + user_symb = CSSymbolicatorCreateWithTask(mach_task_self()); + T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL); + T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL); + }); + + k64 = is_kernel_64_bit(); + bt_filled = USER_FRAMES; + err = sysctlbyname("kern.backtrace.user", bt, &bt_filled, NULL, 0); + if (err == ENOENT) { + T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT"); + } + T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(\"kern.backtrace.user\")"); + + bt_len = (unsigned int)bt_filled; + T_EXPECT_EQ(bt_len, (unsigned int)USER_FRAMES, + "%u frames should be present in backtrace", (unsigned int)USER_FRAMES); + + for (unsigned int i = 0; i < bt_len; i++) { + uintptr_t addr; #if !defined(__LP64__) - /* - * Backtrace frames come out as kernel words; convert them back to user - * uintptr_t for 32-bit processes. - */ - if (k64) { - addr = (uintptr_t)(bt[i]); - } else { - addr = (uintptr_t)(((uint32_t *)bt)[i]); - } + /* + * Backtrace frames come out as kernel words; convert them back to user + * uintptr_t for 32-bit processes. + */ + if (k64) { + addr = (uintptr_t)(bt[i]); + } else { + addr = (uintptr_t)(((uint32_t *)bt)[i]); + } #else /* defined(__LP32__) */ - addr = (uintptr_t)bt[i]; + addr = (uintptr_t)bt[i]; #endif /* defined(__LP32__) */ - CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( - user_symb, addr, kCSNow); - expect_frame(user_bt, USER_FRAMES, symbol, addr, i, bt_len); - } + CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( + user_symb, addr, kCSNow); + expect_frame(user_bt, USER_FRAMES, symbol, addr, i, bt_len); + } } -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_a(unsigned int frames); -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_b(unsigned int frames); -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_a(unsigned int frames) { - if (frames == 1) { - expect_stack(); - getpid(); - return 0; - } + if (frames == 1) { + expect_stack(); + getpid(); + return 0; + } - return recurse_b(frames - 1) + 1; + return recurse_b(frames - 1) + 1; } -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_b(unsigned int frames) { - if (frames == 1) { - expect_stack(); - getpid(); - return 0; - } + if (frames == 1) { + expect_stack(); + getpid(); + return 0; + } - return recurse_a(frames - 1) + 1; + return recurse_a(frames - 1) + 1; } static void * backtrace_thread(void *arg) { #pragma unused(arg) - unsigned int calls; - - /* - * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname - * - * Always make one less call for this frame (backtrace_thread). - */ - calls = USER_FRAMES - NON_RECURSE_FRAMES; - - T_LOG("backtrace thread calling into %d frames (already at %d frames)", - calls, NON_RECURSE_FRAMES); - (void)recurse_a(calls); - return NULL; + unsigned int calls; + + /* + * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname + * + * Always make one less call for this frame (backtrace_thread). + */ + calls = USER_FRAMES - NON_RECURSE_FRAMES; + + T_LOG("backtrace thread calling into %d frames (already at %d frames)", + calls, NON_RECURSE_FRAMES); + (void)recurse_a(calls); + return NULL; } T_DECL(backtrace_user, "test that the kernel can backtrace user stacks", T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) { - pthread_t thread; + pthread_t thread; + + T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread, + NULL), "create additional thread to backtrace"); + + T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL); +} + +T_DECL(backtrace_user_bounds, + "test that the kernel doesn't write frames out of expected bounds") +{ + uint64_t bt_init[USER_FRAMES] = {}; + size_t bt_filled = USER_FRAMES, bt_filled_after = 0; + int error = 0; + kern_return_t kr = KERN_FAILURE; + void *bt_page = NULL; + void *guard_page = NULL; + void *bt_start = NULL; + + /* + * The backtrace addresses come back as kernel words. + */ + size_t kword_size = is_kernel_64_bit() ? 8 : 4; + + /* + * Get an idea of how many frames to expect. + */ + error = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, + 0); + if (error == ENOENT) { + T_SKIP("release kernel: kern.backtrace.user missing"); + } + T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")"); + + /* + * Allocate two pages -- a first one that's valid and a second that + * will be non-writeable to catch a copyout that's too large. + */ + + bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + T_WITH_ERRNO; + T_ASSERT_NE(bt_page, MAP_FAILED, "allocated backtrace pages"); + guard_page = (char *)bt_page + vm_page_size; + + error = mprotect(guard_page, vm_page_size, PROT_READ); + T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page"); + + /* + * Ensure the pages are set up as expected. + */ + + kr = vm_write(mach_task_self(), (vm_address_t)bt_page, + (vm_offset_t)&(int){ 12345 }, sizeof(int)); + T_ASSERT_MACH_SUCCESS(kr, + "should succeed in writing to backtrace page"); + + kr = vm_write(mach_task_self(), (vm_address_t)guard_page, + (vm_offset_t)&(int){ 12345 }, sizeof(int)); + T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page"); + + /* + * Ask the kernel to write the backtrace just before the guard page. + */ + + bt_start = (char *)guard_page - (kword_size * bt_filled); + bt_filled_after = bt_filled; + + error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after, + NULL, 0); + T_EXPECT_POSIX_SUCCESS(error, + "sysctlbyname(\"kern.backtrace.user\") just before guard page"); + T_EXPECT_EQ(bt_filled, bt_filled_after, + "both calls to backtrace should have filled in the same number of " + "frames"); - T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread, - NULL), "create additional thread to backtrace"); + /* + * Expect the kernel to fault when writing too far. + */ - T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL); + bt_start = (char *)bt_start + 1; + bt_filled_after = bt_filled; + error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after, + NULL, 0); + T_EXPECT_POSIX_FAILURE(error, EFAULT, + "sysctlbyname(\"kern.backtrace.user\") should fault one byte into " + "guard page"); } diff --git a/tests/contextswitch.c b/tests/contextswitch.c index 3969ead2e..b2ec16624 100644 --- a/tests/contextswitch.c +++ b/tests/contextswitch.c @@ -23,10 +23,10 @@ #include #include -#define MAX_THREADS 32 -#define SPIN_SECS 6 -#define THR_SPINNER_PRI 63 -#define THR_MANAGER_PRI 62 +#define MAX_THREADS 32 +#define SPIN_SECS 6 +#define THR_SPINNER_PRI 63 +#define THR_MANAGER_PRI 62 #define WARMUP_ITERATIONS 100 #define POWERCTRL_SUCCESS_STR "Factor1: 1.000000" @@ -38,14 +38,14 @@ static _Atomic uint32_t keep_going = 1; static dt_stat_time_t s; static struct { - pthread_t thread; - bool measure_thread; + pthread_t thread; + bool measure_thread; } threads[MAX_THREADS]; -static uint64_t -nanos_to_abs(uint64_t nanos) -{ - return nanos * timebase_info.denom / timebase_info.numer; +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + return nanos * timebase_info.denom / timebase_info.numer; } extern char **environ; @@ -53,137 +53,141 @@ extern char **environ; static void csw_perf_test_init(void) { - int spawn_ret, pid; - char *const clpcctrl_args[] = {"/usr/local/bin/clpcctrl", "-f", "5000", NULL}; - spawn_ret = posix_spawn(&pid, clpcctrl_args[0], NULL, NULL, clpcctrl_args, environ); - waitpid(pid, &spawn_ret, 0); + int spawn_ret, pid; + char *const clpcctrl_args[] = {"/usr/local/bin/clpcctrl", "-f", "5000", NULL}; + spawn_ret = posix_spawn(&pid, clpcctrl_args[0], NULL, NULL, clpcctrl_args, environ); + waitpid(pid, &spawn_ret, 0); } static void csw_perf_test_cleanup(void) { - int spawn_ret, pid; - char *const clpcctrl_args[] = {"/usr/local/bin/clpcctrl", "-d", NULL}; - spawn_ret = posix_spawn(&pid, clpcctrl_args[0], NULL, NULL, clpcctrl_args, environ); - waitpid(pid, &spawn_ret, 0); + int spawn_ret, pid; + char *const clpcctrl_args[] = {"/usr/local/bin/clpcctrl", "-d", NULL}; + spawn_ret = posix_spawn(&pid, clpcctrl_args[0], NULL, NULL, clpcctrl_args, environ); + waitpid(pid, &spawn_ret, 0); } static pthread_t -create_thread(uint32_t thread_id, uint32_t priority, bool fixpri, - void *(*start_routine)(void *)) +create_thread(uint32_t thread_id, uint32_t priority, bool fixpri, + void *(*start_routine)(void *)) { - int rv; - pthread_t new_thread; - struct sched_param param = { .sched_priority = (int)priority }; - pthread_attr_t attr; + int rv; + pthread_t new_thread; + struct sched_param param = { .sched_priority = (int)priority }; + pthread_attr_t attr; - T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "pthread_attr_init"); + T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "pthread_attr_init"); - T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, ¶m), - "pthread_attr_setschedparam"); + T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, ¶m), + "pthread_attr_setschedparam"); - if (fixpri) { - T_ASSERT_POSIX_ZERO(pthread_attr_setschedpolicy(&attr, SCHED_RR), - "pthread_attr_setschedpolicy"); - } + if (fixpri) { + T_ASSERT_POSIX_ZERO(pthread_attr_setschedpolicy(&attr, SCHED_RR), + "pthread_attr_setschedpolicy"); + } - T_ASSERT_POSIX_ZERO(pthread_create(&new_thread, &attr, start_routine, - (void*)(uintptr_t)thread_id), "pthread_create"); + T_ASSERT_POSIX_ZERO(pthread_create(&new_thread, &attr, start_routine, + (void*)(uintptr_t)thread_id), "pthread_create"); - T_ASSERT_POSIX_ZERO(pthread_attr_destroy(&attr), "pthread_attr_destroy"); + T_ASSERT_POSIX_ZERO(pthread_attr_destroy(&attr), "pthread_attr_destroy"); - threads[thread_id].thread = new_thread; + threads[thread_id].thread = new_thread; - return new_thread; + return new_thread; } /* Spin until a specified number of seconds elapses */ static void spin_for_duration(uint32_t seconds) { - uint64_t duration = nanos_to_abs((uint64_t)seconds * NSEC_PER_SEC); - uint64_t current_time = mach_absolute_time(); - uint64_t timeout = duration + current_time; + uint64_t duration = nanos_to_abs((uint64_t)seconds * NSEC_PER_SEC); + uint64_t current_time = mach_absolute_time(); + uint64_t timeout = duration + current_time; - uint64_t spin_count = 0; + uint64_t spin_count = 0; - while (mach_absolute_time() < timeout && atomic_load_explicit(&keep_going, - memory_order_relaxed)) { - spin_count++; - } + while (mach_absolute_time() < timeout && atomic_load_explicit(&keep_going, + memory_order_relaxed)) { + spin_count++; + } } static void * spin_thread(void *arg) { - uint32_t thread_id = (uint32_t) arg; - char name[30] = ""; + uint32_t thread_id = (uint32_t) arg; + char name[30] = ""; - snprintf(name, sizeof(name), "spin thread %2d", thread_id); - pthread_setname_np(name); - T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), + snprintf(name, sizeof(name), "spin thread %2d", thread_id); + pthread_setname_np(name); + T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), "semaphore_wait_signal"); - spin_for_duration(SPIN_SECS); - return NULL; + spin_for_duration(SPIN_SECS); + return NULL; } static void * thread(void *arg) { - uint32_t thread_id = (uint32_t) arg; - char name[30] = ""; - - snprintf(name, sizeof(name), "thread %2d", thread_id); - pthread_setname_np(name); - T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), "semaphore_wait"); - - if (threads[thread_id].measure_thread) { - for (int i = 0; i < WARMUP_ITERATIONS; i++) { - thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0); - } - T_STAT_MEASURE_LOOP(s) { - if(thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0)) - T_ASSERT_FAIL("thread_switch"); - } - atomic_store_explicit(&keep_going, 0, memory_order_relaxed); - } else { - while (atomic_load_explicit(&keep_going, memory_order_relaxed)) { - if (thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0)) - T_ASSERT_FAIL("thread_switch"); - } - } - return NULL; + uint32_t thread_id = (uint32_t) arg; + char name[30] = ""; + + snprintf(name, sizeof(name), "thread %2d", thread_id); + pthread_setname_np(name); + T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), "semaphore_wait"); + + if (threads[thread_id].measure_thread) { + for (int i = 0; i < WARMUP_ITERATIONS; i++) { + thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0); + } + T_STAT_MEASURE_LOOP(s) { + if (thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0)) { + T_ASSERT_FAIL("thread_switch"); + } + } + atomic_store_explicit(&keep_going, 0, memory_order_relaxed); + } else { + while (atomic_load_explicit(&keep_going, memory_order_relaxed)) { + if (thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0)) { + T_ASSERT_FAIL("thread_switch"); + } + } + } + return NULL; } -void check_device_temperature(void) +void +check_device_temperature(void) { - char buffer[256]; - FILE *pipe = popen("powerctrl Factor1", "r"); - - if (pipe == NULL) { - T_FAIL("Failed to check device temperature"); - T_END; - } - - fgets(buffer, sizeof(buffer), pipe); - - if (strncmp(POWERCTRL_SUCCESS_STR, buffer, strlen(POWERCTRL_SUCCESS_STR))) { - T_PERF("temperature", 0.0, "factor", "device temperature"); - } else { - T_PASS("Device temperature check pass"); - T_PERF("temperature", 1.0, "factor", "device temperature"); - } - pclose(pipe); + char buffer[256]; + FILE *pipe = popen("powerctrl Factor1", "r"); + + if (pipe == NULL) { + T_FAIL("Failed to check device temperature"); + T_END; + } + + fgets(buffer, sizeof(buffer), pipe); + + if (strncmp(POWERCTRL_SUCCESS_STR, buffer, strlen(POWERCTRL_SUCCESS_STR))) { + T_PERF("temperature", 0.0, "factor", "device temperature"); + } else { + T_PASS("Device temperature check pass"); + T_PERF("temperature", 1.0, "factor", "device temperature"); + } + pclose(pipe); } -void record_perfcontrol_stats(const char *sysctlname, const char *units, const char *info) +void +record_perfcontrol_stats(const char *sysctlname, const char *units, const char *info) { - int data = 0; - size_t data_size = sizeof(data); - T_ASSERT_POSIX_ZERO(sysctlbyname(sysctlname, - &data, &data_size, NULL, 0), + int data = 0; + size_t data_size = sizeof(data); + T_ASSERT_POSIX_ZERO(sysctlbyname(sysctlname, + &data, &data_size, NULL, 0), "%s", sysctlname); - T_PERF(info, data, units, info); + T_PERF(info, data, units, info); } @@ -192,94 +196,93 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler")); /* Disable the test on MacOS for now */ T_DECL(perf_csw, "context switch performance", T_META_TAG_PERF, T_META_CHECK_LEAKS(false), T_META_ASROOT(true)) { - #if !CONFIG_EMBEDDED - T_SKIP("Not supported on MacOS"); - return; + T_SKIP("Not supported on MacOS"); + return; #endif /* CONFIG_EMBEDDED */ - check_device_temperature(); - - T_ATEND(csw_perf_test_cleanup); - - csw_perf_test_init(); - pthread_setname_np("main thread"); - - T_ASSERT_MACH_SUCCESS(mach_timebase_info(&timebase_info), "mach_timebase_info"); - - struct sched_param param = {.sched_priority = 48}; - - T_ASSERT_POSIX_ZERO(pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m), - "pthread_setschedparam"); - - T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &semaphore, - SYNC_POLICY_FIFO, 0), "semaphore_create"); - - T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &worker_sem, - SYNC_POLICY_FIFO, 0), "semaphore_create"); - - size_t ncpu_size = sizeof(g_numcpus); - T_ASSERT_POSIX_ZERO(sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0), - "sysctlbyname hw.ncpu"); - - printf("hw.ncpu: %d\n", g_numcpus); - uint32_t n_spinners = g_numcpus - 1; - - int mt_supported = 0; - size_t mt_supported_size = sizeof(mt_supported); - T_ASSERT_POSIX_ZERO(sysctlbyname("kern.monotonic.supported", &mt_supported, - &mt_supported_size, NULL, 0), "sysctlbyname kern.monotonic.supported"); - - for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { - threads[thread_id].thread = create_thread(thread_id, THR_SPINNER_PRI, - true, &spin_thread); - } - - s = dt_stat_time_create("context switch time"); - - create_thread(n_spinners, THR_MANAGER_PRI, true, &thread); - threads[n_spinners].measure_thread = true; - create_thread(n_spinners + 1, THR_MANAGER_PRI, true, &thread); - - /* Allow the context switch threads to get into sem_wait() */ - for (uint32_t thread_id = 0; thread_id < n_spinners + 2; thread_id++) { - T_ASSERT_MACH_SUCCESS(semaphore_wait(worker_sem), "semaphore_wait"); - } - - int enable_callout_stats = 1; - size_t enable_size = sizeof(enable_callout_stats); - - if (mt_supported) { - /* Enable callout stat collection */ - T_ASSERT_POSIX_ZERO(sysctlbyname("kern.perfcontrol_callout.stats_enabled", - NULL, 0, &enable_callout_stats, enable_size), - "sysctlbyname kern.perfcontrol_callout.stats_enabled"); - } - - T_ASSERT_MACH_SUCCESS(semaphore_signal_all(semaphore), "semaphore_signal"); - - - for (uint32_t thread_id = 0; thread_id < n_spinners + 2; thread_id++) { - T_ASSERT_POSIX_ZERO(pthread_join(threads[thread_id].thread, NULL), - "pthread_join %d", thread_id); - } - - if (mt_supported) { - record_perfcontrol_stats("kern.perfcontrol_callout.oncore_instr", - "instructions", "oncore.instructions"); - record_perfcontrol_stats("kern.perfcontrol_callout.offcore_instr", - "instructions", "offcore.instructions"); - record_perfcontrol_stats("kern.perfcontrol_callout.oncore_cycles", - "cycles", "oncore.cycles"); - record_perfcontrol_stats("kern.perfcontrol_callout.offcore_cycles", - "cycles", "offcore.cycles"); - - /* Disable callout stat collection */ - enable_callout_stats = 0; - T_ASSERT_POSIX_ZERO(sysctlbyname("kern.perfcontrol_callout.stats_enabled", - NULL, 0, &enable_callout_stats, enable_size), - "sysctlbyname kern.perfcontrol_callout.stats_enabled"); - } - - check_device_temperature(); - dt_stat_finalize(s); + check_device_temperature(); + + T_ATEND(csw_perf_test_cleanup); + + csw_perf_test_init(); + pthread_setname_np("main thread"); + + T_ASSERT_MACH_SUCCESS(mach_timebase_info(&timebase_info), "mach_timebase_info"); + + struct sched_param param = {.sched_priority = 48}; + + T_ASSERT_POSIX_ZERO(pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m), + "pthread_setschedparam"); + + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &semaphore, + SYNC_POLICY_FIFO, 0), "semaphore_create"); + + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &worker_sem, + SYNC_POLICY_FIFO, 0), "semaphore_create"); + + size_t ncpu_size = sizeof(g_numcpus); + T_ASSERT_POSIX_ZERO(sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0), + "sysctlbyname hw.ncpu"); + + printf("hw.ncpu: %d\n", g_numcpus); + uint32_t n_spinners = g_numcpus - 1; + + int mt_supported = 0; + size_t mt_supported_size = sizeof(mt_supported); + T_ASSERT_POSIX_ZERO(sysctlbyname("kern.monotonic.supported", &mt_supported, + &mt_supported_size, NULL, 0), "sysctlbyname kern.monotonic.supported"); + + for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { + threads[thread_id].thread = create_thread(thread_id, THR_SPINNER_PRI, + true, &spin_thread); + } + + s = dt_stat_time_create("context switch time"); + + create_thread(n_spinners, THR_MANAGER_PRI, true, &thread); + threads[n_spinners].measure_thread = true; + create_thread(n_spinners + 1, THR_MANAGER_PRI, true, &thread); + + /* Allow the context switch threads to get into sem_wait() */ + for (uint32_t thread_id = 0; thread_id < n_spinners + 2; thread_id++) { + T_ASSERT_MACH_SUCCESS(semaphore_wait(worker_sem), "semaphore_wait"); + } + + int enable_callout_stats = 1; + size_t enable_size = sizeof(enable_callout_stats); + + if (mt_supported) { + /* Enable callout stat collection */ + T_ASSERT_POSIX_ZERO(sysctlbyname("kern.perfcontrol_callout.stats_enabled", + NULL, 0, &enable_callout_stats, enable_size), + "sysctlbyname kern.perfcontrol_callout.stats_enabled"); + } + + T_ASSERT_MACH_SUCCESS(semaphore_signal_all(semaphore), "semaphore_signal"); + + + for (uint32_t thread_id = 0; thread_id < n_spinners + 2; thread_id++) { + T_ASSERT_POSIX_ZERO(pthread_join(threads[thread_id].thread, NULL), + "pthread_join %d", thread_id); + } + + if (mt_supported) { + record_perfcontrol_stats("kern.perfcontrol_callout.oncore_instr", + "instructions", "oncore.instructions"); + record_perfcontrol_stats("kern.perfcontrol_callout.offcore_instr", + "instructions", "offcore.instructions"); + record_perfcontrol_stats("kern.perfcontrol_callout.oncore_cycles", + "cycles", "oncore.cycles"); + record_perfcontrol_stats("kern.perfcontrol_callout.offcore_cycles", + "cycles", "offcore.cycles"); + + /* Disable callout stat collection */ + enable_callout_stats = 0; + T_ASSERT_POSIX_ZERO(sysctlbyname("kern.perfcontrol_callout.stats_enabled", + NULL, 0, &enable_callout_stats, enable_size), + "sysctlbyname kern.perfcontrol_callout.stats_enabled"); + } + + check_device_temperature(); + dt_stat_finalize(s); } diff --git a/tests/cpucount.c b/tests/cpucount.c index 47159c1c9..a3641bdbe 100644 --- a/tests/cpucount.c +++ b/tests/cpucount.c @@ -3,8 +3,8 @@ * * * -xcrun -sdk macosx.internal clang -o cpucount cpucount.c -ldarwintest -g -Weverything -xcrun -sdk iphoneos.internal clang -arch arm64 -o cpucount-ios cpucount.c -ldarwintest -g -Weverything + * xcrun -sdk macosx.internal clang -o cpucount cpucount.c -ldarwintest -g -Weverything + * xcrun -sdk iphoneos.internal clang -arch arm64 -o cpucount-ios cpucount.c -ldarwintest -g -Weverything */ #include @@ -59,9 +59,15 @@ static semaphore_t g_readysem, g_go_sem; static mach_timebase_info_data_t timebase_info; -static uint64_t nanos_to_abs(uint64_t nanos) { return nanos * timebase_info.denom / timebase_info.numer; } +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + return nanos * timebase_info.denom / timebase_info.numer; +} -static void set_realtime(pthread_t thread) { +static void +set_realtime(pthread_t thread) +{ kern_return_t kr; thread_time_constraint_policy_data_t pol; @@ -75,7 +81,7 @@ static void set_realtime(pthread_t thread) { pol.preemptible = 0; /* Ignored by OS */ kr = thread_policy_set(target_thread, THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t) &pol, - THREAD_TIME_CONSTRAINT_POLICY_COUNT); + THREAD_TIME_CONSTRAINT_POLICY_COUNT); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_set(THREAD_TIME_CONSTRAINT_POLICY)"); } @@ -100,8 +106,9 @@ create_thread(void *(*start_routine)(void *), uint32_t priority) rv = pthread_create(&new_thread, &attr, start_routine, NULL); T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create"); - if (priority == 97) + if (priority == 97) { set_realtime(new_thread); + } rv = pthread_attr_destroy(&attr); T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_destroy"); @@ -128,7 +135,9 @@ thread_fn(__unused void *arg) * spin to force the other threads to spread out across the cores * may take some time if cores are masked and CLPC needs to warm up to unmask them */ - while (g_ready_threads < g_threads && mach_absolute_time() < timeout); + while (g_ready_threads < g_threads && mach_absolute_time() < timeout) { + ; + } T_QUIET; T_ASSERT_GE(timeout, mach_absolute_time(), "waiting for all threads took too long"); @@ -148,14 +157,16 @@ thread_fn(__unused void *arg) if (iteration++ % 10000) { uint32_t cpus_seen = 0; - for (uint32_t i = 0 ; i < g_threads; i++) { - if (g_cpu_seen[i]) + for (uint32_t i = 0; i < g_threads; i++) { + if (g_cpu_seen[i]) { cpus_seen++; + } } /* bail out early if we saw all CPUs */ - if (cpus_seen == g_threads) + if (cpus_seen == g_threads) { break; + } } } @@ -190,7 +201,9 @@ spin_fn(__unused void *arg) uint64_t inner_timeout = nanos_to_abs(1 * NSEC_PER_MSEC) + mach_absolute_time(); - while (mach_absolute_time() < inner_timeout && g_bail == false); + while (mach_absolute_time() < inner_timeout && g_bail == false) { + ; + } } kr = semaphore_wait_signal(g_go_sem, g_readysem); @@ -203,7 +216,7 @@ spin_fn(__unused void *arg) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wgnu-flexible-array-initializer" T_DECL(count_cpus, "Tests we can schedule threads on all hw.ncpus cores according to _os_cpu_number", - T_META_CHECK_LEAKS(false), T_META_ENABLED(false)) + T_META_CHECK_LEAKS(false), T_META_ENABLED(false)) #pragma clang diagnostic pop { setvbuf(stdout, NULL, _IONBF, 0); @@ -228,13 +241,15 @@ T_DECL(count_cpus, "Tests we can schedule threads on all hw.ncpus cores accordin assert(g_threads < max_threads); - for (uint32_t i = 0; i < g_threads; i++) + for (uint32_t i = 0; i < g_threads; i++) { create_thread(&thread_fn, g_thread_pri); + } - for (uint32_t i = 0; i < g_spin_threads; i++) + for (uint32_t i = 0; i < g_spin_threads; i++) { create_thread(&spin_fn, g_spin_threads_pri); + } - for (uint32_t i = 0 ; i < g_threads + g_spin_threads; i++) { + for (uint32_t i = 0; i < g_threads + g_spin_threads; i++) { kr = semaphore_wait(g_readysem); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_wait"); } @@ -242,25 +257,27 @@ T_DECL(count_cpus, "Tests we can schedule threads on all hw.ncpus cores accordin uint64_t timeout = nanos_to_abs(g_spin_ms * NSEC_PER_MSEC) + mach_absolute_time(); /* spin to warm up CLPC :) */ - while (mach_absolute_time() < timeout); + while (mach_absolute_time() < timeout) { + ; + } kr = semaphore_signal_all(g_go_sem); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_signal_all"); - for (uint32_t i = 0 ; i < g_threads + g_spin_threads; i++) { + for (uint32_t i = 0; i < g_threads + g_spin_threads; i++) { kr = semaphore_wait(g_readysem); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_wait"); } uint32_t cpus_seen = 0; - for (uint32_t i = 0 ; i < g_threads; i++) { - if (g_cpu_seen[i]) + for (uint32_t i = 0; i < g_threads; i++) { + if (g_cpu_seen[i]) { cpus_seen++; + } printf("cpu %2d: %d\n", i, g_cpu_seen[i]); } T_ASSERT_EQ(cpus_seen, g_threads, "test should have run threads on all CPUS"); } - diff --git a/tests/data_protection.c b/tests/data_protection.c index c9a69fee7..7a7e4dc8a 100644 --- a/tests/data_protection.c +++ b/tests/data_protection.c @@ -47,7 +47,7 @@ int apple_key_store( size_t input_struct_count, uint64_t * outputs, uint32_t * output_count -); + ); int spawn_proc(char * const command[]); int supports_content_prot(void); char* dp_class_num_to_string(int num); @@ -60,7 +60,7 @@ void setup(void); void cleanup(void); T_DECL(data_protection, - "Verify behavior of the various data protection classes") { + "Verify behavior of the various data protection classes") { int local_result = -1; int new_prot_class = -1; int old_prot_class = -1; @@ -74,16 +74,16 @@ T_DECL(data_protection, * Ensure we can freely read and change * protection classes when unlocked. */ - for( + for ( new_prot_class = PROTECTION_CLASS_A; new_prot_class <= PROTECTION_CLASS_F; new_prot_class++ - ) { + ) { T_ASSERT_NE( old_prot_class = GET_PROT_CLASS(g_fd), -1, "Get protection class when locked" - ); + ); T_WITH_ERRNO; T_ASSERT_NE( SET_PROT_CLASS(g_fd, new_prot_class), @@ -92,7 +92,7 @@ T_DECL(data_protection, "from %s to %s while unlocked", dp_class_num_to_string(old_prot_class), dp_class_num_to_string(new_prot_class) - ); + ); } /* Query the filesystem for the default CP level (Is it C?) */ @@ -105,7 +105,7 @@ T_DECL(data_protection, old_prot_class = fcntl(g_fd, F_GETDEFAULTPROTLEVEL), -1, "Get default protection level for filesystem" - ); + ); /* XXX: Do we want to do anything with the level? What should it be? */ @@ -119,10 +119,10 @@ T_DECL(data_protection, /* re-create the file */ T_WITH_ERRNO; T_ASSERT_GE( - g_fd = open(g_filepath, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC), + g_fd = open(g_filepath, O_CREAT | O_EXCL | O_RDWR | O_CLOEXEC), 0, "Recreate test file" - ); + ); /* Try making a class A file while locked. */ T_ASSERT_EQ(lock_device(), 0, "*** Lock device ***"); @@ -133,7 +133,7 @@ T_DECL(data_protection, -1, "Should not be able to change protection " "from class D to class A when locked" - ); + ); T_ASSERT_EQ(unlock_device(TEST_PASSCODE), 0, "*** Unlock device ***"); /* Attempt opening/IO to a class A file while unlocked. */ @@ -143,13 +143,13 @@ T_DECL(data_protection, 0, "Should be able to change protection " "from class D to class A when unlocked" - ); + ); close(g_fd); T_WITH_ERRNO; T_ASSERT_GE( - g_fd = open(g_filepath, O_RDWR|O_CLOEXEC), + g_fd = open(g_filepath, O_RDWR | O_CLOEXEC), 0, "Should be able to open a class A file when unlocked"); @@ -159,13 +159,13 @@ T_DECL(data_protection, */ current_byte = 0; - while(current_byte < CPT_IO_SIZE) { + while (current_byte < CPT_IO_SIZE) { local_result = pwrite( g_fd, &wr_buffer[current_byte], CPT_IO_SIZE - current_byte, current_byte - ); + ); T_WITH_ERRNO; T_ASSERT_NE( @@ -173,20 +173,20 @@ T_DECL(data_protection, -1, "Should be able to write to " "a class A file when unlocked" - ); + ); current_byte += local_result; } current_byte = 0; - while(current_byte < CPT_IO_SIZE) { + while (current_byte < CPT_IO_SIZE) { local_result = pread( g_fd, &rd_buffer[current_byte], CPT_IO_SIZE - current_byte, current_byte - ); + ); T_WITH_ERRNO; T_ASSERT_NE( @@ -194,7 +194,7 @@ T_DECL(data_protection, -1, "Should be able to read from " "a class A file when unlocked" - ); + ); current_byte += local_result; } @@ -209,46 +209,46 @@ T_DECL(data_protection, pread(g_fd, rd_buffer, CPT_IO_SIZE, 0), 0, "Should not be able to read from a class A file when locked" - ); + ); T_ASSERT_LE( pwrite(g_fd, wr_buffer, CPT_IO_SIZE, 0), 0, "Should not be able to write to a class A file when locked" - ); + ); T_ASSERT_EQ( SET_PROT_CLASS(g_fd, PROTECTION_CLASS_D), -1, "Should not be able to change protection " "from class A to class D when locked" - ); + ); /* Try to open and truncate the file. */ close(g_fd); T_ASSERT_EQ( - g_fd = open(g_filepath, O_RDWR|O_TRUNC|O_CLOEXEC), + g_fd = open(g_filepath, O_RDWR | O_TRUNC | O_CLOEXEC), -1, "Should not be able to open and truncate " "a class A file when locked" - ); + ); /* Try to open the file */ T_ASSERT_EQ( - g_fd = open(g_filepath, O_RDWR|O_CLOEXEC), + g_fd = open(g_filepath, O_RDWR | O_CLOEXEC), -1, "Should not be able to open a class A file when locked" - ); + ); /* What about class B files? */ T_ASSERT_EQ(unlock_device(TEST_PASSCODE), 0, "*** Unlock device ***"); T_ASSERT_GE( - g_fd = open(g_filepath, O_RDWR|O_CLOEXEC), + g_fd = open(g_filepath, O_RDWR | O_CLOEXEC), 0, "Should be able to open a class A file when unlocked" - ); + ); T_WITH_ERRNO; T_ASSERT_EQ( @@ -256,7 +256,7 @@ T_DECL(data_protection, 0, "Should be able to change protection " "class from A to D when unlocked" - ); + ); T_ASSERT_EQ(lock_device(), 0, "*** Lock device ***"); @@ -266,13 +266,13 @@ T_DECL(data_protection, 0, "Should be able to change protection " "class from D to B when locked" - ); + ); T_ASSERT_EQ( GET_PROT_CLASS(g_fd), PROTECTION_CLASS_B, "File should now have class B protection" - ); + ); /* * We should also be able to read/write to the @@ -280,13 +280,13 @@ T_DECL(data_protection, */ current_byte = 0; - while(current_byte < CPT_IO_SIZE) { + while (current_byte < CPT_IO_SIZE) { local_result = pwrite( g_fd, &wr_buffer[current_byte], CPT_IO_SIZE - current_byte, current_byte - ); + ); T_WITH_ERRNO; T_ASSERT_NE( @@ -294,27 +294,27 @@ T_DECL(data_protection, -1, "Should be able to write to a " "new class B file when locked" - ); + ); current_byte += local_result; } current_byte = 0; - while(current_byte < CPT_IO_SIZE) { + while (current_byte < CPT_IO_SIZE) { local_result = pread( g_fd, &rd_buffer[current_byte], CPT_IO_SIZE - current_byte, current_byte - ); + ); T_ASSERT_NE( local_result, -1, "Should be able to read from a " "new class B file when locked" - ); + ); current_byte += local_result; } @@ -323,10 +323,10 @@ T_DECL(data_protection, close(g_fd); T_WITH_ERRNO; T_ASSERT_EQ( - g_fd = open(g_filepath, O_RDWR|O_CLOEXEC), + g_fd = open(g_filepath, O_RDWR | O_CLOEXEC), -1, "Should not be able to open a class B file when locked" - ); + ); unlink(g_filepath); @@ -336,34 +336,34 @@ T_DECL(data_protection, mkdir(g_dirpath, 0x0777), -1, "Should be able to create a new directory when locked" - ); + ); /* The newly created directory should not have a protection class. */ T_ASSERT_NE( - g_dir_fd = open(g_dirpath, O_RDONLY|O_CLOEXEC), + g_dir_fd = open(g_dirpath, O_RDONLY | O_CLOEXEC), -1, "Should be able to open an unclassed directory when locked" - ); + ); T_ASSERT_TRUE( GET_PROT_CLASS(g_dir_fd) == PROTECTION_CLASS_D || GET_PROT_CLASS(g_dir_fd) == PROTECTION_CLASS_DIR_NONE, "Directory protection class sholud be D or NONE" - ); + ); T_ASSERT_EQ( SET_PROT_CLASS(g_dir_fd, PROTECTION_CLASS_A), 0, "Should be able to change a directory from " "class D to class A while locked" - ); + ); T_ASSERT_EQ( SET_PROT_CLASS(g_dir_fd, PROTECTION_CLASS_D), 0, "Should be able to change a directory from " "class A to class D while locked" - ); + ); /* * Do all files created in the directory properly inherit the @@ -374,21 +374,21 @@ T_DECL(data_protection, strlcpy(g_filepath, g_dirpath, PATH_MAX), PATH_MAX, "Construct path for file in the directory" - ); + ); T_ASSERT_LT( strlcat(g_filepath, "test_file", PATH_MAX), PATH_MAX, "Construct path for file in the directory" - ); + ); T_SETUPEND; T_ASSERT_EQ(unlock_device(TEST_PASSCODE), 0, "*** Unlock device ***"); - for( + for ( new_prot_class = PROTECTION_CLASS_A; new_prot_class <= PROTECTION_CLASS_D; new_prot_class++ - ) { + ) { int getclass_dir; T_WITH_ERRNO; @@ -396,7 +396,7 @@ T_DECL(data_protection, old_prot_class = GET_PROT_CLASS(g_dir_fd), -1, "Get protection class for the directory" - ); + ); T_WITH_ERRNO; T_ASSERT_EQ( @@ -406,36 +406,36 @@ T_DECL(data_protection, "protection from %s to %s", dp_class_num_to_string(old_prot_class), dp_class_num_to_string(new_prot_class) - ); + ); T_EXPECT_EQ( getclass_dir = GET_PROT_CLASS(g_dir_fd), new_prot_class, "Get protection class for the directory" - ); + ); T_WITH_ERRNO; T_ASSERT_GE( - g_fd = open(g_filepath, O_CREAT|O_EXCL|O_CLOEXEC, 0777), + g_fd = open(g_filepath, O_CREAT | O_EXCL | O_CLOEXEC, 0777), 0, "Should be able to create file in " "%s directory when unlocked", dp_class_num_to_string(new_prot_class) - ); + ); T_WITH_ERRNO; T_ASSERT_NE( local_result = GET_PROT_CLASS(g_fd), -1, "Get the new file's protection class" - ); + ); T_ASSERT_EQ( local_result, new_prot_class, "File should have %s protection", dp_class_num_to_string(new_prot_class) - ); + ); close(g_fd); unlink(g_filepath); @@ -446,7 +446,7 @@ T_DECL(data_protection, SET_PROT_CLASS(g_dir_fd, PROTECTION_CLASS_F), 0, "Should not be able to create class F directory" - ); + ); /* * Are class A and class B semantics followed for when @@ -458,16 +458,16 @@ T_DECL(data_protection, 0, "Should be able to change protection " "from class F to class A when unlocked" - ); + ); T_ASSERT_EQ(lock_device(), 0, "*** Lock device ***"); T_ASSERT_EQ( - g_fd = open(g_filepath, O_CREAT|O_EXCL|O_CLOEXEC, 0777), + g_fd = open(g_filepath, O_CREAT | O_EXCL | O_CLOEXEC, 0777), -1, "Should not be able to create a new file " "in a class A directory when locked" - ); + ); T_ASSERT_EQ(unlock_device(TEST_PASSCODE), 0, "*** Unlock device ***"); @@ -477,44 +477,44 @@ T_DECL(data_protection, 0, "Should be able to change directory " "from class A to class B when unlocked" - ); + ); T_ASSERT_EQ(lock_device(), 0, "*** Lock device ***"); T_ASSERT_GE( - g_fd = open(g_filepath, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, 0777), + g_fd = open(g_filepath, O_CREAT | O_EXCL | O_RDWR | O_CLOEXEC, 0777), 0, "Should be able to create a new file " "in class B directory when locked" - ); + ); T_ASSERT_NE( local_result = GET_PROT_CLASS(g_fd), -1, "Get the new file's protection class" - ); + ); T_ASSERT_EQ( local_result, PROTECTION_CLASS_B, "File should inherit protection class of class B directory" - ); + ); /* What happens when we try to create new subdirectories? */ T_ASSERT_EQ(unlock_device(TEST_PASSCODE), 0, "*** Unlock device ***"); - for( + for ( new_prot_class = PROTECTION_CLASS_A; new_prot_class <= PROTECTION_CLASS_D; new_prot_class++ - ) { + ) { T_WITH_ERRNO; T_ASSERT_EQ( SET_PROT_CLASS(g_dir_fd, new_prot_class), 0, "Change directory to %s", dp_class_num_to_string(new_prot_class) - ); + ); T_WITH_ERRNO; T_ASSERT_NE( @@ -522,15 +522,15 @@ T_DECL(data_protection, -1, "Create subdirectory in %s directory", dp_class_num_to_string(new_prot_class) - ); + ); T_WITH_ERRNO; T_ASSERT_NE( - g_subdir_fd = open(g_subdirpath, O_RDONLY|O_CLOEXEC), + g_subdir_fd = open(g_subdirpath, O_RDONLY | O_CLOEXEC), -1, "Should be able to open subdirectory in %s directory", dp_class_num_to_string(new_prot_class) - ); + ); T_ASSERT_NE( local_result = GET_PROT_CLASS(g_subdir_fd), @@ -538,14 +538,14 @@ T_DECL(data_protection, "Get protection class of new subdirectory " "of %s directory", dp_class_num_to_string(new_prot_class) - ); + ); T_ASSERT_EQ( local_result, new_prot_class, "New subdirectory should have same class as %s parent", dp_class_num_to_string(new_prot_class) - ); + ); close(g_subdir_fd); rmdir(g_subdirpath); @@ -553,7 +553,8 @@ T_DECL(data_protection, } void -setup(void) { +setup(void) +{ int ret = 0; int local_result = -1; @@ -565,16 +566,16 @@ setup(void) { T_ASSERT_NOTNULL( mkdtemp(g_test_tempdir), "Create temporary directory for test" - ); + ); T_LOG("Test temp dir: %s", g_test_tempdir); T_ASSERT_NE( local_result = supports_content_prot(), -1, "Get content protection support status" - ); + ); - if(local_result == 0) { + if (local_result == 0) { T_SKIP("Data protection not supported on this system"); } @@ -582,13 +583,13 @@ setup(void) { has_passcode(), 0, "Device should not have existing passcode" - ); + ); T_ASSERT_EQ( set_passcode(TEST_PASSCODE, NULL), 0, "Set test passcode" - ); + ); bzero(g_filepath, PATH_MAX); bzero(g_dirpath, PATH_MAX); @@ -607,68 +608,70 @@ setup(void) { T_WITH_ERRNO; T_ASSERT_GE( - g_fd = open(g_filepath, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, 0777), + g_fd = open(g_filepath, O_CREAT | O_EXCL | O_RDWR | O_CLOEXEC, 0777), 0, "Create test file" - ); + ); T_SETUPEND; } void -cleanup(void) { +cleanup(void) +{ T_LOG("Cleaning up…"); - if(g_subdir_fd >= 0) { + if (g_subdir_fd >= 0) { T_LOG("Cleanup: closing fd %d", g_subdir_fd); close(g_subdir_fd); } - if(g_subdirpath[0]) { + if (g_subdirpath[0]) { T_LOG("Cleanup: removing %s", g_subdirpath); rmdir(g_subdirpath); } - if(g_fd >= 0) { + if (g_fd >= 0) { T_LOG("Cleanup: closing fd %d", g_fd); close(g_fd); } - if(g_filepath[0]) { + if (g_filepath[0]) { T_LOG("Cleanup: removing %s", g_filepath); unlink(g_filepath); } - if(g_dir_fd >= 0) { + if (g_dir_fd >= 0) { T_LOG("Cleanup: closing fd %d", g_dir_fd); close(g_dir_fd); } - if(g_dirpath[0]) { + if (g_dirpath[0]) { T_LOG("Cleanup: removing %s", g_dirpath); rmdir(g_dirpath); } - if(strcmp(g_test_tempdir, TEMP_DIR_TEMPLATE)) { + if (strcmp(g_test_tempdir, TEMP_DIR_TEMPLATE)) { T_LOG("Cleanup: removing %s", g_test_tempdir); rmdir(g_test_tempdir); } - if(g_passcode_set) { + if (g_passcode_set) { T_LOG("Cleanup: unlocking device"); - if(unlock_device(TEST_PASSCODE)) { + if (unlock_device(TEST_PASSCODE)) { T_LOG("Warning: failed to unlock device in cleanup"); } T_LOG("Cleanup: clearing passcode"); - if(clear_passcode(TEST_PASSCODE)) { + if (clear_passcode(TEST_PASSCODE)) { T_LOG("Warning: failed to clear passcode in cleanup"); } } } int -set_passcode(char * new_passcode, char * old_passcode) { +set_passcode(char * new_passcode, char * old_passcode) +{ int result = -1; #ifdef KEYBAG_ENTITLEMENTS @@ -687,11 +690,11 @@ set_passcode(char * new_passcode, char * old_passcode) { old_passcode_len = strnlen(old_passcode, CPT_MAX_PASS_LEN); new_passcode_len = strnlen(new_passcode, CPT_MAX_PASS_LEN); - if((old_passcode == NULL) || (old_passcode_len == CPT_MAX_PASS_LEN)) { + if ((old_passcode == NULL) || (old_passcode_len == CPT_MAX_PASS_LEN)) { old_passcode = ""; old_passcode_len = 0; } - if((new_passcode == NULL) || (new_passcode_len == CPT_MAX_PASS_LEN)) { + if ((new_passcode == NULL) || (new_passcode_len == CPT_MAX_PASS_LEN)) { new_passcode = ""; new_passcode_len = 0; } @@ -704,14 +707,14 @@ set_passcode(char * new_passcode, char * old_passcode) { memcpy(buffer_ptr, old_passcode, old_passcode_len); buffer_ptr += ((old_passcode_len + sizeof(uint32_t) - 1) & - ~(sizeof(uint32_t) - 1)); + ~(sizeof(uint32_t) - 1)); *((uint32_t *) buffer_ptr) = new_passcode_len; buffer_ptr += sizeof(uint32_t); memcpy(buffer_ptr, new_passcode, new_passcode_len); buffer_ptr += ((new_passcode_len + sizeof(uint32_t) - 1) & - ~(sizeof(uint32_t) - 1)); + ~(sizeof(uint32_t) - 1)); input_structs = buffer; input_struct_count = (buffer_ptr - buffer); @@ -724,7 +727,7 @@ set_passcode(char * new_passcode, char * old_passcode) { input_struct_count, NULL, NULL - ); + ); #else /* * If we aren't entitled, we'll need to use @@ -732,17 +735,17 @@ set_passcode(char * new_passcode, char * old_passcode) { */ T_LOG("%s(): using keystorectl", __func__); - if( + if ( (old_passcode == NULL) || (strnlen(old_passcode, CPT_MAX_PASS_LEN) == CPT_MAX_PASS_LEN) - ) { + ) { old_passcode = ""; } - if( + if ( (new_passcode == NULL) || (strnlen(new_passcode, CPT_MAX_PASS_LEN) == CPT_MAX_PASS_LEN) - ) { + ) { new_passcode = ""; } @@ -755,17 +758,18 @@ set_passcode(char * new_passcode, char * old_passcode) { }; result = spawn_proc(keystorectl_args); #endif /* KEYBAG_ENTITLEMENTS */ - if(result == 0 && new_passcode != NULL) { + if (result == 0 && new_passcode != NULL) { g_passcode_set = 1; - } else if(result == 0 && new_passcode == NULL) { + } else if (result == 0 && new_passcode == NULL) { g_passcode_set = 0; } - return(result); + return result; } int -clear_passcode(char * passcode) { +clear_passcode(char * passcode) +{ /* * For the moment, this will set the passcode to the empty string * (a known value); this will most likely need to change, or running @@ -775,12 +779,14 @@ clear_passcode(char * passcode) { } int -has_passcode(void) { +has_passcode(void) +{ return set_passcode(NULL, NULL); } int -lock_device(void) { +lock_device(void) +{ int result = -1; /* @@ -796,7 +802,7 @@ lock_device(void) { */ char * const kbd_args[] = {KEYBAGDTEST_PATH, "lock", NULL}; result = spawn_proc(kbd_args); - if(result) { + if (result) { return result; } @@ -807,22 +813,22 @@ lock_device(void) { */ (void) unlink("/private/var/foo_test_file"); - while(1) { + while (1) { int dp_fd; dp_fd = open_dprotected_np( "/private/var/foo_test_file", - O_RDWR|O_CREAT, + O_RDWR | O_CREAT, PROTECTION_CLASS_A, 0 - ); + ); - if(dp_fd >= 0) { + if (dp_fd >= 0) { /* delete it and sleep */ close(dp_fd); result = unlink("/private/var/foo_test_file"); - if(result) { + if (result) { return result; } @@ -845,7 +851,8 @@ lock_device(void) { } int -unlock_device(char * passcode) { +unlock_device(char * passcode) +{ int result = -1; #ifdef KEYBAG_ENTITLEMENTS @@ -857,7 +864,7 @@ unlock_device(char * passcode) { T_LOG("%s(): using keybag entitlements", __func__); input_struct_count = strnlen(passcode, CPT_MAX_PASS_LEN); - if((passcode == NULL) || (input_struct_count == CPT_MAX_PASS_LEN)) { + if ((passcode == NULL) || (input_struct_count == CPT_MAX_PASS_LEN)) { passcode = ""; input_struct_count = 0; } @@ -870,7 +877,7 @@ unlock_device(char * passcode) { input_struct_count, NULL, NULL - ); + ); #else /* * If we aren't entitled, we'll need to use @@ -878,10 +885,10 @@ unlock_device(char * passcode) { */ T_LOG("%s(): using keystorectl", __func__); - if( + if ( (passcode == NULL) || (strnlen(passcode, CPT_MAX_PASS_LEN) == CPT_MAX_PASS_LEN) - ) { + ) { passcode = ""; } @@ -892,7 +899,7 @@ unlock_device(char * passcode) { result = spawn_proc(keystorectl_args); #endif /* KEYBAG_ENTITLEMENTS */ - return(result); + return result; } /* @@ -904,7 +911,8 @@ unlock_device(char * passcode) { * we are formatted for it. */ int -supports_content_prot(void) { +supports_content_prot(void) +{ int local_result = -1; int result = -1; uint32_t buffer_size = 1; @@ -916,14 +924,14 @@ supports_content_prot(void) { defaults = IORegistryEntryFromPath( kIOMasterPortDefault, kIODeviceTreePlane ":/defaults" - ); + ); - if(defaults == IO_OBJECT_NULL) { + if (defaults == IO_OBJECT_NULL) { /* Assume data protection is unsupported */ T_LOG( "%s(): no defaults entry in IORegistry", __func__ - ); + ); return 0; } @@ -932,14 +940,14 @@ supports_content_prot(void) { "content-protect", buffer, &buffer_size - ); + ); - if(k_result != KERN_SUCCESS) { + if (k_result != KERN_SUCCESS) { /* Assume data protection is unsupported */ T_LOG( "%s(): no content-protect property in IORegistry", __func__ - ); + ); return 0; } @@ -950,19 +958,19 @@ supports_content_prot(void) { */ local_result = statfs(g_test_tempdir, &statfs_results); - if(local_result == -1) { + if (local_result == -1) { T_LOG( "%s(): failed to statfs the test directory, errno = %s", __func__, strerror(errno) - ); + ); return -1; - } else if(statfs_results.f_flags & MNT_CPROTECT) { + } else if (statfs_results.f_flags & MNT_CPROTECT) { return 1; } else { T_LOG( "%s(): filesystem not formatted for data protection", __func__ - ); + ); return 0; } } @@ -973,12 +981,13 @@ supports_content_prot(void) { */ int apple_key_store(uint32_t command, - uint64_t * inputs, - uint32_t input_count, - void * input_structs, - size_t input_struct_count, - uint64_t * outputs, - uint32_t * output_count) { + uint64_t * inputs, + uint32_t input_count, + void * input_structs, + size_t input_struct_count, + uint64_t * outputs, + uint32_t * output_count) +{ int result = -1; io_connect_t connection = IO_OBJECT_NULL; io_registry_entry_t apple_key_bag_service = IO_OBJECT_NULL; @@ -988,12 +997,12 @@ apple_key_store(uint32_t command, apple_key_bag_service = IOServiceGetMatchingService( kIOMasterPortDefault, IOServiceMatching(kAppleKeyStoreServiceName) - ); - if(apple_key_bag_service == IO_OBJECT_NULL) { + ); + if (apple_key_bag_service == IO_OBJECT_NULL) { T_LOG( "%s: failed to match kAppleKeyStoreServiceName", __func__ - ); + ); goto end; } @@ -1002,13 +1011,13 @@ apple_key_store(uint32_t command, mach_task_self(), 0, &connection - ); - if(k_result != KERN_SUCCESS) { + ); + if (k_result != KERN_SUCCESS) { T_LOG( "%s: failed to open AppleKeyStore: " "IOServiceOpen() returned %d", __func__, k_result - ); + ); goto end; } @@ -1016,21 +1025,21 @@ apple_key_store(uint32_t command, connection, kAppleKeyStoreUserClientOpen, NULL, 0, NULL, 0, NULL, NULL, NULL, NULL - ); - if(k_result != KERN_SUCCESS) { + ); + if (k_result != KERN_SUCCESS) { T_LOG( "%s: call to AppleKeyStore method " "kAppleKeyStoreUserClientOpen failed", __func__ - ); + ); goto close; } io_result = IOConnectCallMethod( connection, command, inputs, input_count, input_structs, input_struct_count, outputs, output_count, NULL, NULL - ); - if(io_result != kIOReturnSuccess) { + ); + if (io_result != kIOReturnSuccess) { T_LOG("%s: call to AppleKeyStore method %d failed", __func__); goto close; } @@ -1040,14 +1049,15 @@ apple_key_store(uint32_t command, close: IOServiceClose(apple_key_bag_service); end: - return(result); + return result; } /* * Helper function for launching tools */ int -spawn_proc(char * const command[]) { +spawn_proc(char * const command[]) +{ pid_t pid = 0; int launch_tool_ret = 0; bool waitpid_ret = true; @@ -1057,17 +1067,17 @@ spawn_proc(char * const command[]) { launch_tool_ret = dt_launch_tool(&pid, command, false, NULL, NULL); T_EXPECT_EQ(launch_tool_ret, 0, "launch tool: %s", command[0]); - if(launch_tool_ret != 0) { + if (launch_tool_ret != 0) { return 1; } waitpid_ret = dt_waitpid(pid, &status, &signal, timeout); T_EXPECT_TRUE(waitpid_ret, "%s should succeed", command[0]); - if(waitpid_ret == false) { - if(status != 0) { + if (waitpid_ret == false) { + if (status != 0) { T_LOG("%s exited %d", command[0], status); } - if(signal != 0) { + if (signal != 0) { T_LOG("%s received signal %d", command[0], signal); } return 1; @@ -1077,29 +1087,32 @@ spawn_proc(char * const command[]) { } char* -dp_class_num_to_string(int num) { - switch(num) { - case 0: - return "unclassed"; - case PROTECTION_CLASS_A: - return "class A"; - case PROTECTION_CLASS_B: - return "class B"; - case PROTECTION_CLASS_C: - return "class C"; - case PROTECTION_CLASS_D: - return "class D"; - case PROTECTION_CLASS_E: - return "class E"; - case PROTECTION_CLASS_F: - return "class F"; - default: - return ""; +dp_class_num_to_string(int num) +{ + switch (num) { + case 0: + return "unclassed"; + case PROTECTION_CLASS_A: + return "class A"; + case PROTECTION_CLASS_B: + return "class B"; + case PROTECTION_CLASS_C: + return "class C"; + case PROTECTION_CLASS_D: + return "class D"; + case PROTECTION_CLASS_E: + return "class E"; + case PROTECTION_CLASS_F: + return "class F"; + default: + return ""; } } #if 0 -int device_lock_state(void) { +int +device_lock_state(void) +{ /* * TODO: Actually implement this. * @@ -1109,11 +1122,13 @@ int device_lock_state(void) { */ int result = -1; - return(result); + return result; } /* Determines if we will try to test class C semanatics. */ -int unlocked_since_boot() { +int +unlocked_since_boot() +{ /* * TODO: Actually implement this. * @@ -1124,7 +1139,6 @@ int unlocked_since_boot() { */ int result = 1; - return(result); + return result; } #endif - diff --git a/tests/disk_mount_conditioner.c b/tests/disk_mount_conditioner.c index fc3db9f89..6c733f451 100644 --- a/tests/disk_mount_conditioner.c +++ b/tests/disk_mount_conditioner.c @@ -25,14 +25,14 @@ static void perf_setup(char **path, int *fd); T_GLOBAL_META( T_META_NAMESPACE("xnu.vfs.dmc"), T_META_ASROOT(true) -); + ); #pragma mark Entitled Tests #ifndef TEST_UNENTITLED T_DECL(fsctl_get_uninitialized, - "Initial fsctl.get should return zeros", - T_META_ASROOT(false)) + "Initial fsctl.get should return zeros", + T_META_ASROOT(false)) { int err; char *mount_path; @@ -54,7 +54,7 @@ T_DECL(fsctl_get_uninitialized, } T_DECL(fsctl_set, - "fsctl.set should succeed and fsctl.get should verify") + "fsctl.set should succeed and fsctl.get should verify") { int err; char *mount_path; @@ -118,7 +118,7 @@ verify_mount_fallback_values(const char *mount_path, disk_conditioner_info *info } T_DECL(fsctl_set_zero, - "fsctl.set zero values should fall back to original mount settings") + "fsctl.set zero values should fall back to original mount settings") { char *mount_path; disk_conditioner_info info = {0}; @@ -135,7 +135,7 @@ T_DECL(fsctl_set_zero, } T_DECL(fsctl_set_out_of_bounds, - "fsctl.set out-of-bounds values should fall back to original mount settings") + "fsctl.set out-of-bounds values should fall back to original mount settings") { char *mount_path; disk_conditioner_info info; @@ -156,7 +156,7 @@ T_DECL(fsctl_set_out_of_bounds, } T_DECL(fsctl_restore_mount_fields, - "fsctl.set should restore fields on mount_t that it temporarily overrides") + "fsctl.set should restore fields on mount_t that it temporarily overrides") { int err; char *mount_path; @@ -212,8 +212,8 @@ T_DECL(fsctl_restore_mount_fields, } T_DECL(fsctl_get_nonroot, - "fsctl.get should not require root", - T_META_ASROOT(false)) + "fsctl.get should not require root", + T_META_ASROOT(false)) { int err; char *mount_path; @@ -234,8 +234,8 @@ T_DECL(fsctl_get_nonroot, } T_DECL(fsctl_set_nonroot, - "fsctl.set should require root", - T_META_ASROOT(false)) + "fsctl.set should require root", + T_META_ASROOT(false)) { int err; char *mount_path; @@ -271,7 +271,7 @@ T_DECL(fsctl_set_nonroot, } T_DECL(fsctl_delays, - "Validate I/O delays when DMC is enabled") + "Validate I/O delays when DMC is enabled") { char *path; int fd; @@ -323,7 +323,7 @@ T_DECL(fsctl_delays, #pragma mark Unentitled Tests T_DECL(fsctl_get_unentitled, - "fsctl.get should not require entitlement") + "fsctl.get should not require entitlement") { int err; char *mount_path; @@ -339,7 +339,7 @@ T_DECL(fsctl_get_unentitled, } T_DECL(fsctl_set_unentitled, - "fsctl.set should require entitlement") + "fsctl.set should require entitlement") { int err; char *mount_path; @@ -373,7 +373,9 @@ T_DECL(fsctl_set_unentitled, #pragma mark Helpers -static char *mktempdir(void) { +static char * +mktempdir(void) +{ char *path = malloc(PATH_MAX); strcpy(path, "/tmp/dmc.XXXXXXXX"); atexit_b(^{ free(path); }); @@ -394,7 +396,9 @@ static char *mktempdir(void) { * Faster than creating a ram disk to test with * when access to the filesystem is not necessary */ -static char *mktempmount(void) { +static char * +mktempmount(void) +{ char *mount_path = mktempdir(); T_WITH_ERRNO; @@ -410,7 +414,9 @@ static char *mktempmount(void) { * Wrapper around dt_launch_tool/dt_waitpid * that works like libc:system() */ -static int system_legal(const char *command) { +static int +system_legal(const char *command) +{ pid_t pid = -1; int exit_status = 0; const char *argv[] = { @@ -439,7 +445,9 @@ static int system_legal(const char *command) { * that contains a usable HFS+ filesystem * mounted via a ram disk */ -static char *mkramdisk(void) { +static char * +mkramdisk(void) +{ char cmd[1024]; char *mount_path = mktempdir(); char *dev_disk_file = malloc(256); @@ -473,7 +481,9 @@ static char *mkramdisk(void) { return mount_path; } -static uint64_t time_for_read(int fd, const char *expected) { +static uint64_t +time_for_read(int fd, const char *expected) +{ int err; ssize_t ret; char buf[READSIZE]; @@ -491,10 +501,12 @@ static uint64_t time_for_read(int fd, const char *expected) { err = memcmp(buf, expected, sizeof(buf)); T_ASSERT_EQ_INT(0, err, "read expected contents from temporary file"); - return (stop - start); + return stop - start; } -static void perf_setup(char **path, int *fd) { +static void +perf_setup(char **path, int *fd) +{ int temp_fd; char *temp_path; diff --git a/tests/drop_priv.c b/tests/drop_priv.c index 7bb499c53..13d4681af 100644 --- a/tests/drop_priv.c +++ b/tests/drop_priv.c @@ -21,22 +21,22 @@ static unsigned _get_sudo_invoker(const char *var) { - char *value_str = getenv(var); - T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(value_str, - "Not running under sudo, getenv(\"%s\") failed", var); - T_QUIET; T_ASSERT_NE_CHAR(*value_str, '\0', - "getenv(\"%s\") returned an empty string", var); + char *value_str = getenv(var); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(value_str, + "Not running under sudo, getenv(\"%s\") failed", var); + T_QUIET; T_ASSERT_NE_CHAR(*value_str, '\0', + "getenv(\"%s\") returned an empty string", var); - char *endp; - unsigned long value = strtoul(value_str, &endp, 10); - T_QUIET; T_WITH_ERRNO; T_ASSERT_EQ_CHAR(*endp, '\0', - "strtoul(\"%s\") not called on a valid number", value_str); - T_QUIET; T_WITH_ERRNO; T_ASSERT_NE_ULONG(value, ULONG_MAX, - "strtoul(\"%s\") overflow", value_str); + char *endp; + unsigned long value = strtoul(value_str, &endp, 10); + T_QUIET; T_WITH_ERRNO; T_ASSERT_EQ_CHAR(*endp, '\0', + "strtoul(\"%s\") not called on a valid number", value_str); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NE_ULONG(value, ULONG_MAX, + "strtoul(\"%s\") overflow", value_str); - T_QUIET; T_ASSERT_NE_ULONG(value, 0ul, "%s invalid", var); - T_QUIET; T_ASSERT_LT_ULONG(value, ID_MAX, "%s invalid", var); - return (unsigned)value; + T_QUIET; T_ASSERT_NE_ULONG(value, 0ul, "%s invalid", var); + T_QUIET; T_ASSERT_LT_ULONG(value, ID_MAX, "%s invalid", var); + return (unsigned)value; } #endif /* TARGET_OS_OSX */ @@ -46,14 +46,14 @@ void drop_priv(void) { #if TARGET_OS_OSX - uid_t lower_uid = _get_sudo_invoker(INVOKER_UID); - gid_t lower_gid = _get_sudo_invoker(INVOKER_GID); + uid_t lower_uid = _get_sudo_invoker(INVOKER_UID); + gid_t lower_gid = _get_sudo_invoker(INVOKER_GID); #else - struct passwd *pw = getpwnam("mobile"); - T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(pw, "getpwnam(\"mobile\")"); - uid_t lower_uid = pw->pw_uid; - gid_t lower_gid = pw->pw_gid; + struct passwd *pw = getpwnam("mobile"); + T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(pw, "getpwnam(\"mobile\")"); + uid_t lower_uid = pw->pw_uid; + gid_t lower_gid = pw->pw_gid; #endif - T_ASSERT_POSIX_SUCCESS(setgid(lower_gid), "Change group to %u", lower_gid); - T_ASSERT_POSIX_SUCCESS(setuid(lower_uid), "Change user to %u", lower_uid); + T_ASSERT_POSIX_SUCCESS(setgid(lower_gid), "Change group to %u", lower_gid); + T_ASSERT_POSIX_SUCCESS(setuid(lower_uid), "Change user to %u", lower_uid); } diff --git a/tests/exc_resource_threads.c b/tests/exc_resource_threads.c index 4b247c6bc..173a8ef82 100644 --- a/tests/exc_resource_threads.c +++ b/tests/exc_resource_threads.c @@ -23,11 +23,11 @@ static dispatch_semaphore_t sync_sema; kern_return_t catch_mach_exception_raise(mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t exception, - mach_exception_data_t code, - mach_msg_type_number_t code_count) + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t code_count) { #pragma unused(exception_port, thread, task, code, code_count) pid_t pid; @@ -40,14 +40,14 @@ catch_mach_exception_raise(mach_port_t exception_port, kern_return_t catch_mach_exception_raise_state(mach_port_t exception_port, - exception_type_t exception, - const mach_exception_data_t code, - mach_msg_type_number_t code_count, - int * flavor, - const thread_state_t old_state, - mach_msg_type_number_t old_state_count, - thread_state_t new_state, - mach_msg_type_number_t * new_state_count) + exception_type_t exception, + const mach_exception_data_t code, + mach_msg_type_number_t code_count, + int * flavor, + const thread_state_t old_state, + mach_msg_type_number_t old_state_count, + thread_state_t new_state, + mach_msg_type_number_t * new_state_count) { #pragma unused(exception_port, exception, code, code_count, flavor, old_state, old_state_count, new_state, new_state_count) T_FAIL("Unsupported catch_mach_exception_raise_state"); @@ -56,16 +56,16 @@ catch_mach_exception_raise_state(mach_port_t exception_port, kern_return_t catch_mach_exception_raise_state_identity(mach_port_t exception_port, - mach_port_t thread, - mach_port_t task, - exception_type_t exception, - mach_exception_data_t code, - mach_msg_type_number_t code_count, - int * flavor, - thread_state_t old_state, - mach_msg_type_number_t old_state_count, - thread_state_t new_state, - mach_msg_type_number_t * new_state_count) + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t code_count, + int * flavor, + thread_state_t old_state, + mach_msg_type_number_t old_state_count, + thread_state_t new_state, + mach_msg_type_number_t * new_state_count) { #pragma unused(exception_port, thread, task, exception, code, code_count, flavor, old_state, old_state_count, new_state, new_state_count) T_FAIL("Unsupported catch_mach_exception_raise_state_identity"); @@ -85,29 +85,34 @@ exc_handler(void * arg) mach_port_t exception_port; kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &exception_port); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { T_FAIL("mach_port_allocate: %s (%d)", mach_error_string(kret), kret); + } kret = mach_port_insert_right(mach_task_self(), exception_port, exception_port, MACH_MSG_TYPE_MAKE_SEND); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { T_FAIL("mach_port_insert_right: %s (%d)", mach_error_string(kret), kret); + } kret = task_set_exception_ports(mach_task_self(), EXC_MASK_CRASH | EXC_MASK_CORPSE_NOTIFY, exception_port, - (exception_behavior_t)(EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES), 0); - if (kret != KERN_SUCCESS) + (exception_behavior_t)(EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES), 0); + if (kret != KERN_SUCCESS) { T_FAIL("task_set_exception_ports: %s (%d)", mach_error_string(kret), kret); + } dispatch_semaphore_signal(sync_sema); kret = mach_msg_server(mach_exc_server, MACH_MSG_SIZE_RELIABLE, exception_port, 0); - if (kret != KERN_SUCCESS) + if (kret != KERN_SUCCESS) { T_FAIL("mach_msg_server: %s (%d)", mach_error_string(kret), kret); + } return NULL; } static void* -dummy_thread(void *arg) { +dummy_thread(void *arg) +{ #pragma unused(arg) while (1) { sleep(60); @@ -136,14 +141,14 @@ check_exc_resource_threads_enabled() size_t enabled_size = sizeof(enabled); err = sysctlbyname("kern.exc_resource_threads_enabled", &enabled, &enabled_size, NULL, 0); - if (err || !enabled) + if (err || !enabled) { T_SKIP("EXC_RESOURCE RESOURCE_TYPE_THREADS not enabled on this system"); - + } } T_DECL(exc_resource_threads, "Ensures that a process with a thread_limit set will receive an exc_resource when it crosses its thread limit", - T_META_ASROOT(true), - T_META_CHECK_LEAKS(false)) + T_META_ASROOT(true), + T_META_CHECK_LEAKS(false)) { pthread_t handle_thread; diff --git a/tests/freebsd_waitpid_nohang.c b/tests/freebsd_waitpid_nohang.c index 9aa55e19e..815abe79e 100644 --- a/tests/freebsd_waitpid_nohang.c +++ b/tests/freebsd_waitpid_nohang.c @@ -57,6 +57,6 @@ T_DECL(waitpid_nohang, "FreeBSDarwin--waitpid_nohang") status = -1; pid = waitpid(child, &status, WNOHANG); T_ASSERT_EQ(pid, child, "waitpid returns correct pid"); - T_EXPECT_EQ(WIFSIGNALED(status), true, "child was signaled"); + T_EXPECT_EQ(WIFSIGNALED(status), true, "child was signaled"); T_EXPECT_EQ(WTERMSIG(status), SIGTERM, "child was sent SIGTERM"); } diff --git a/tests/gettimeofday.c b/tests/gettimeofday.c index e2b8c3a19..e2f792b4c 100644 --- a/tests/gettimeofday.c +++ b/tests/gettimeofday.c @@ -7,7 +7,7 @@ extern int __gettimeofday(struct timeval *, struct timezone *); T_DECL(gettimeofday, "gettimeofday()", - T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true), T_META_LTEPHASE(LTE_POSTINIT)) + T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true), T_META_LTEPHASE(LTE_POSTINIT)) { struct timeval tv_a, tv_b, tv_c; @@ -29,7 +29,7 @@ T_DECL(gettimeofday, "gettimeofday()", extern int __gettimeofday_with_mach(struct timeval *, struct timezone *, uint64_t *mach_time); T_DECL(gettimeofday_with_mach, "gettimeofday_with_mach()", - T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) + T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) { struct timeval gtod_ts; diff --git a/tests/host_notifications.c b/tests/host_notifications.c index c4463b3c4..16c6c4b3a 100644 --- a/tests/host_notifications.c +++ b/tests/host_notifications.c @@ -4,7 +4,9 @@ #include -static void do_test(int notify_type, void (^trigger_block)(void)){ +static void +do_test(int notify_type, void (^trigger_block)(void)) +{ mach_port_t port; T_ASSERT_MACH_SUCCESS(mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port), NULL); @@ -16,13 +18,13 @@ static void do_test(int notify_type, void (^trigger_block)(void)){ mach_msg_header_t hdr; mach_msg_trailer_t trailer; } message = { .hdr = { - .msgh_bits = 0, - .msgh_size = sizeof(mach_msg_header_t), - .msgh_remote_port = MACH_PORT_NULL, - .msgh_local_port = port, - .msgh_voucher_port = MACH_PORT_NULL, - .msgh_id = 0, - }}; + .msgh_bits = 0, + .msgh_size = sizeof(mach_msg_header_t), + .msgh_remote_port = MACH_PORT_NULL, + .msgh_local_port = port, + .msgh_voucher_port = MACH_PORT_NULL, + .msgh_id = 0, + }}; T_ASSERT_EQ(MACH_RCV_TOO_LARGE, mach_msg_receive(&message.hdr), NULL); mach_msg_destroy(&message.hdr); @@ -32,8 +34,8 @@ T_DECL(host_notify_calendar_change, "host_request_notification(HOST_NOTIFY_CALEN { do_test(HOST_NOTIFY_CALENDAR_CHANGE, ^{ struct timeval tm; - if (gettimeofday(&tm, NULL) != 0 || settimeofday(&tm, NULL) != 0){ - T_SKIP("Unable to settimeofday()"); + if (gettimeofday(&tm, NULL) != 0 || settimeofday(&tm, NULL) != 0) { + T_SKIP("Unable to settimeofday()"); } }); } @@ -42,8 +44,8 @@ T_DECL(host_notify_calendar_set, "host_request_notification(HOST_NOTIFY_CALENDAR { do_test(HOST_NOTIFY_CALENDAR_SET, ^{ struct timeval tm; - if (gettimeofday(&tm, NULL) != 0 || settimeofday(&tm, NULL) != 0){ - T_SKIP("Unable to settimeofday()"); + if (gettimeofday(&tm, NULL) != 0 || settimeofday(&tm, NULL) != 0) { + T_SKIP("Unable to settimeofday()"); } }); } diff --git a/tests/host_statistics_rate_limiting.c b/tests/host_statistics_rate_limiting.c index 8376db7b3..27809e747 100644 --- a/tests/host_statistics_rate_limiting.c +++ b/tests/host_statistics_rate_limiting.c @@ -17,7 +17,8 @@ #define RETRY 5 static int -remove_platform_binary(void){ +remove_platform_binary(void) +{ int ret; uint32_t my_csflags; @@ -64,30 +65,31 @@ struct all_host_info { }; static void -check_host_info(struct all_host_info* data, unsigned long iter, char lett){ +check_host_info(struct all_host_info* data, unsigned long iter, char lett) +{ char* datap; - unsigned long i,j; + unsigned long i, j; /* check that for the shorter revisions no data is copied on the bytes of diff with the longer */ - for ( j = 0 ; j < iter; j++) { + for (j = 0; j < iter; j++) { datap = (char*) &data[j].host_vm_info64_rev0; - for ( i = (HOST_VM_INFO64_REV0_COUNT * sizeof(int)); i< (HOST_VM_INFO64_REV1_COUNT * sizeof(int)); i++) { - T_QUIET;T_ASSERT_EQ(datap[i], lett, "HOST_VM_INFO64_REV0 byte %lu iter %lu", i, j); + for (i = (HOST_VM_INFO64_REV0_COUNT * sizeof(int)); i < (HOST_VM_INFO64_REV1_COUNT * sizeof(int)); i++) { + T_QUIET; T_ASSERT_EQ(datap[i], lett, "HOST_VM_INFO64_REV0 byte %lu iter %lu", i, j); } datap = (char*) &data[j].host_vm_info_rev0; - for ( i = (HOST_VM_INFO_REV0_COUNT * sizeof(int)); i< (HOST_VM_INFO_REV2_COUNT * sizeof(int)); i++) { - T_QUIET;T_ASSERT_EQ(datap[i], lett, "HOST_VM_INFO_REV0 byte %lu iter %lu", i, j); + for (i = (HOST_VM_INFO_REV0_COUNT * sizeof(int)); i < (HOST_VM_INFO_REV2_COUNT * sizeof(int)); i++) { + T_QUIET; T_ASSERT_EQ(datap[i], lett, "HOST_VM_INFO_REV0 byte %lu iter %lu", i, j); } datap = (char*) &data[j].host_vm_info_rev1; - for ( i = (HOST_VM_INFO_REV1_COUNT * sizeof(int)); i< (HOST_VM_INFO_REV2_COUNT * sizeof(int)); i++) { - T_QUIET;T_ASSERT_EQ(datap[i], lett, "HOST_VM_INFO_REV1 byte %lu iter %lu", i, j); + for (i = (HOST_VM_INFO_REV1_COUNT * sizeof(int)); i < (HOST_VM_INFO_REV2_COUNT * sizeof(int)); i++) { + T_QUIET; T_ASSERT_EQ(datap[i], lett, "HOST_VM_INFO_REV1 byte %lu iter %lu", i, j); } datap = (char*) &data[j].host_expired_task_info; - for ( i = (TASK_POWER_INFO_COUNT * sizeof(int)); i< (TASK_POWER_INFO_V2_COUNT * sizeof(int)); i++) { - T_QUIET;T_ASSERT_EQ(datap[i], lett, "TASK_POWER_INFO_COUNT byte %lu iter %lu", i, j); + for (i = (TASK_POWER_INFO_COUNT * sizeof(int)); i < (TASK_POWER_INFO_V2_COUNT * sizeof(int)); i++) { + T_QUIET; T_ASSERT_EQ(datap[i], lett, "TASK_POWER_INFO_COUNT byte %lu iter %lu", i, j); } } T_LOG("No data overflow"); @@ -95,48 +97,47 @@ check_host_info(struct all_host_info* data, unsigned long iter, char lett){ datap = (char*) data; /* check that after MAX_ATTEMP_PER_SEC data are all the same */ - for ( i = 0 ; i < sizeof(struct all_host_info) ; i++ ) - for ( j = MAX_ATTEMP_PER_SEC - 1 ; j < iter - 1; j++) { - T_QUIET; T_ASSERT_EQ(datap[i+(j * sizeof(struct all_host_info))], datap[i+((j+1) * sizeof(struct all_host_info))], "all_host_info iter %lu does not match iter %lu", j, j+1); + for (i = 0; i < sizeof(struct all_host_info); i++) { + for (j = MAX_ATTEMP_PER_SEC - 1; j < iter - 1; j++) { + T_QUIET; T_ASSERT_EQ(datap[i + (j * sizeof(struct all_host_info))], datap[i + ((j + 1) * sizeof(struct all_host_info))], "all_host_info iter %lu does not match iter %lu", j, j + 1); } + } T_LOG("Data was cached"); } static void -get_host_info(struct all_host_info* data, host_t self, int iter){ +get_host_info(struct all_host_info* data, host_t self, int iter) +{ int i; unsigned int count; - for (i = 0; i < iter; i++){ + for (i = 0; i < iter; i++) { count = HOST_VM_INFO64_REV0_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics64(self, HOST_VM_INFO64, (host_info64_t)&data[i].host_vm_info64_rev0, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics64(self, HOST_VM_INFO64, (host_info64_t)&data[i].host_vm_info64_rev0, &count), NULL); count = HOST_VM_INFO64_REV1_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics64(self, HOST_VM_INFO64, (host_info64_t)&data[i].host_vm_info64_rev1, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics64(self, HOST_VM_INFO64, (host_info64_t)&data[i].host_vm_info64_rev1, &count), NULL); count = HOST_EXTMOD_INFO64_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics64(self, HOST_EXTMOD_INFO64, (host_info64_t)&data[i].host_extmod_info64, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics64(self, HOST_EXTMOD_INFO64, (host_info64_t)&data[i].host_extmod_info64, &count), NULL); count = HOST_LOAD_INFO_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_LOAD_INFO, (host_info_t)&data[i].host_load_info, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_LOAD_INFO, (host_info_t)&data[i].host_load_info, &count), NULL); count = HOST_VM_INFO_REV0_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_VM_INFO, (host_info_t)&data[i].host_vm_info_rev0, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_VM_INFO, (host_info_t)&data[i].host_vm_info_rev0, &count), NULL); count = HOST_VM_INFO_REV1_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_VM_INFO, (host_info_t)&data[i].host_vm_info_rev1, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_VM_INFO, (host_info_t)&data[i].host_vm_info_rev1, &count), NULL); count = HOST_VM_INFO_REV2_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_VM_INFO, (host_info_t)&data[i].host_vm_info_rev2, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_VM_INFO, (host_info_t)&data[i].host_vm_info_rev2, &count), NULL); count = HOST_CPU_LOAD_INFO_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_CPU_LOAD_INFO, (host_info_t)&data[i].host_cpu_load_info, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_CPU_LOAD_INFO, (host_info_t)&data[i].host_cpu_load_info, &count), NULL); count = TASK_POWER_INFO_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_EXPIRED_TASK_INFO, (host_info_t)&data[i].host_expired_task_info, &count), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_EXPIRED_TASK_INFO, (host_info_t)&data[i].host_expired_task_info, &count), NULL); count = TASK_POWER_INFO_V2_COUNT; - T_QUIET;T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_EXPIRED_TASK_INFO, (host_info_t)&data[i].host_expired_task_info2, &count), NULL); - + T_QUIET; T_ASSERT_POSIX_ZERO(host_statistics(self, HOST_EXPIRED_TASK_INFO, (host_info_t)&data[i].host_expired_task_info2, &count), NULL); } - } T_DECL(test_host_statistics, "testing rate limit for host_statistics", - T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) + T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) { - unsigned long long start, end, window; int retry = 0; host_t self; @@ -144,34 +145,35 @@ T_DECL(test_host_statistics, "testing rate limit for host_statistics", struct all_host_info* data; mach_timebase_info_data_t timebaseInfo = { 0, 0 }; - if (remove_platform_binary()) + if (remove_platform_binary()) { T_SKIP("Failed to remove platform binary"); + } data = malloc(ITER * sizeof(struct all_host_info)); - T_QUIET;T_ASSERT_NE(data, NULL, "malloc"); + T_QUIET; T_ASSERT_NE(data, NULL, "malloc"); /* check the size of the data structure against the bytes in COUNT*/ - T_QUIET;T_ASSERT_EQ(sizeof(data[0].host_vm_info64_rev0), HOST_VM_INFO64_COUNT * sizeof(int), "HOST_VM_INFO64_COUNT"); - T_QUIET;T_ASSERT_EQ(sizeof(data[0].host_extmod_info64), HOST_EXTMOD_INFO64_COUNT * sizeof(int), "HOST_EXTMOD_INFO64_COUNT"); - T_QUIET;T_ASSERT_EQ(sizeof(data[0].host_load_info), HOST_LOAD_INFO_COUNT * sizeof(int), "HOST_LOAD_INFO_COUNT"); - T_QUIET;T_ASSERT_EQ(sizeof(data[0].host_vm_info_rev0), HOST_VM_INFO_COUNT * sizeof(int), "HOST_VM_INFO_COUNT"); - T_QUIET;T_ASSERT_EQ(sizeof(data[0].host_cpu_load_info), HOST_CPU_LOAD_INFO_COUNT * sizeof(int), "HOST_CPU_LOAD_INFO_COUNT"); - T_QUIET;T_ASSERT_EQ(sizeof(data[0].host_expired_task_info2), TASK_POWER_INFO_V2_COUNT * sizeof(int), "TASK_POWER_INFO_V2_COUNT"); + T_QUIET; T_ASSERT_EQ(sizeof(data[0].host_vm_info64_rev0), HOST_VM_INFO64_COUNT * sizeof(int), "HOST_VM_INFO64_COUNT"); + T_QUIET; T_ASSERT_EQ(sizeof(data[0].host_extmod_info64), HOST_EXTMOD_INFO64_COUNT * sizeof(int), "HOST_EXTMOD_INFO64_COUNT"); + T_QUIET; T_ASSERT_EQ(sizeof(data[0].host_load_info), HOST_LOAD_INFO_COUNT * sizeof(int), "HOST_LOAD_INFO_COUNT"); + T_QUIET; T_ASSERT_EQ(sizeof(data[0].host_vm_info_rev0), HOST_VM_INFO_COUNT * sizeof(int), "HOST_VM_INFO_COUNT"); + T_QUIET; T_ASSERT_EQ(sizeof(data[0].host_cpu_load_info), HOST_CPU_LOAD_INFO_COUNT * sizeof(int), "HOST_CPU_LOAD_INFO_COUNT"); + T_QUIET; T_ASSERT_EQ(sizeof(data[0].host_expired_task_info2), TASK_POWER_INFO_V2_COUNT * sizeof(int), "TASK_POWER_INFO_V2_COUNT"); /* check that the latest revision is the COUNT */ - T_QUIET;T_ASSERT_EQ(HOST_VM_INFO64_REV1_COUNT, HOST_VM_INFO64_COUNT, "HOST_VM_INFO64_REV1_COUNT"); - T_QUIET;T_ASSERT_EQ(HOST_VM_INFO_REV2_COUNT, HOST_VM_INFO_COUNT, "HOST_VM_INFO_REV2_COUNT"); + T_QUIET; T_ASSERT_EQ(HOST_VM_INFO64_REV1_COUNT, HOST_VM_INFO64_COUNT, "HOST_VM_INFO64_REV1_COUNT"); + T_QUIET; T_ASSERT_EQ(HOST_VM_INFO_REV2_COUNT, HOST_VM_INFO_COUNT, "HOST_VM_INFO_REV2_COUNT"); /* check that the previous revision are smaller than the latest */ - T_QUIET;T_ASSERT_LE(HOST_VM_INFO64_REV0_COUNT, HOST_VM_INFO64_REV1_COUNT, "HOST_VM_INFO64_REV0"); - T_QUIET;T_ASSERT_LE(HOST_VM_INFO_REV0_COUNT, HOST_VM_INFO_REV2_COUNT, "HOST_VM_INFO_REV0_COUNT"); - T_QUIET;T_ASSERT_LE(HOST_VM_INFO_REV1_COUNT, HOST_VM_INFO_REV2_COUNT, "HOST_VM_INFO_REV1_COUNT"); - T_QUIET;T_ASSERT_LE(TASK_POWER_INFO_COUNT,TASK_POWER_INFO_V2_COUNT, "TASK_POWER_INFO_COUNT"); + T_QUIET; T_ASSERT_LE(HOST_VM_INFO64_REV0_COUNT, HOST_VM_INFO64_REV1_COUNT, "HOST_VM_INFO64_REV0"); + T_QUIET; T_ASSERT_LE(HOST_VM_INFO_REV0_COUNT, HOST_VM_INFO_REV2_COUNT, "HOST_VM_INFO_REV0_COUNT"); + T_QUIET; T_ASSERT_LE(HOST_VM_INFO_REV1_COUNT, HOST_VM_INFO_REV2_COUNT, "HOST_VM_INFO_REV1_COUNT"); + T_QUIET; T_ASSERT_LE(TASK_POWER_INFO_COUNT, TASK_POWER_INFO_V2_COUNT, "TASK_POWER_INFO_COUNT"); memset(data, lett, ITER * sizeof(struct all_host_info)); self = mach_host_self(); - T_QUIET;T_ASSERT_EQ(mach_timebase_info(&timebaseInfo), KERN_SUCCESS, NULL); + T_QUIET; T_ASSERT_EQ(mach_timebase_info(&timebaseInfo), KERN_SUCCESS, NULL); window = (WINDOW * NSEC_PER_SEC * timebaseInfo.denom) / timebaseInfo.numer; retry = 0; @@ -181,11 +183,11 @@ T_DECL(test_host_statistics, "testing rate limit for host_statistics", get_host_info(data, self, ITER); end = mach_continuous_time(); retry++; - } while( (end - start > window) && retry <= RETRY); + } while ((end - start > window) && retry <= RETRY); - if (retry <= RETRY) + if (retry <= RETRY) { check_host_info(data, ITER, lett); - else + } else { T_SKIP("Failed to find window for test"); + } } - diff --git a/tests/ioperf.c b/tests/ioperf.c index 1eb2e8cf2..6db459e70 100644 --- a/tests/ioperf.c +++ b/tests/ioperf.c @@ -43,157 +43,156 @@ char *data_buf = NULL; extern char **environ; static struct { - pthread_t thread; + pthread_t thread; } threads[MAX_THREADS]; -static uint64_t -nanos_to_abs(uint64_t nanos) -{ - return nanos * timebase_info.denom / timebase_info.numer; +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + return nanos * timebase_info.denom / timebase_info.numer; } static void io_perf_test_io_init(void) { - int spawn_ret, pid; - char *const mount_args[] = {"/usr/local/sbin/mount_nand.sh", NULL}; - spawn_ret = posix_spawn(&pid, mount_args[0], NULL, NULL, mount_args, environ); - if (spawn_ret < 0) { - T_SKIP("NAND mounting in LTE not possible on this device. Skipping test!"); - } - waitpid(pid, &spawn_ret, 0); - if (WIFEXITED(spawn_ret) && !WEXITSTATUS(spawn_ret)) { - T_PASS("NAND mounted successfully"); - } else { - T_SKIP("Unable to mount NAND. Skipping test!"); - } - - /* Mark the main thread as fixed priority */ - struct sched_param param = {.sched_priority = THR_MANAGER_PRI}; - T_ASSERT_POSIX_ZERO(pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m), - "pthread_setschedparam"); - - /* Set I/O Policy to Tier 0 */ - T_ASSERT_POSIX_ZERO(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, - IOPOL_IMPORTANT), "setiopolicy"); - - /* Create data buffer */ - data_buf = malloc(IO_SIZE * 16); - T_ASSERT_NOTNULL(data_buf, "Data buffer allocation"); - - int rndfd = open("/dev/urandom", O_RDONLY, S_IRUSR); - T_ASSERT_POSIX_SUCCESS(rndfd, "Open /dev/urandom"); - T_ASSERT_GE_INT((int)read(rndfd, data_buf, IO_SIZE * 16), 0, "read /dev/urandom"); - close(rndfd); - - /* Create test file */ - int fd = open("/mnt2/test", O_CREAT | O_WRONLY, S_IRUSR); - T_ASSERT_POSIX_SUCCESS(fd, 0, "Open /mnt2/test for writing!"); - - T_ASSERT_POSIX_ZERO(fcntl(fd, F_NOCACHE, 1), "fcntl F_NOCACHE enable"); - for (int size = 0; size < FILE_SIZE;) { - T_QUIET; - T_ASSERT_GE_INT((int)write(fd, data_buf, IO_SIZE * 16), 0, "write test file"); - size += (IO_SIZE * 16); - } - close(fd); - sync(); - + int spawn_ret, pid; + char *const mount_args[] = {"/usr/local/sbin/mount_nand.sh", NULL}; + spawn_ret = posix_spawn(&pid, mount_args[0], NULL, NULL, mount_args, environ); + if (spawn_ret < 0) { + T_SKIP("NAND mounting in LTE not possible on this device. Skipping test!"); + } + waitpid(pid, &spawn_ret, 0); + if (WIFEXITED(spawn_ret) && !WEXITSTATUS(spawn_ret)) { + T_PASS("NAND mounted successfully"); + } else { + T_SKIP("Unable to mount NAND. Skipping test!"); + } + + /* Mark the main thread as fixed priority */ + struct sched_param param = {.sched_priority = THR_MANAGER_PRI}; + T_ASSERT_POSIX_ZERO(pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m), + "pthread_setschedparam"); + + /* Set I/O Policy to Tier 0 */ + T_ASSERT_POSIX_ZERO(setiopolicy_np(IOPOL_TYPE_DISK, IOPOL_SCOPE_PROCESS, + IOPOL_IMPORTANT), "setiopolicy"); + + /* Create data buffer */ + data_buf = malloc(IO_SIZE * 16); + T_ASSERT_NOTNULL(data_buf, "Data buffer allocation"); + + int rndfd = open("/dev/urandom", O_RDONLY, S_IRUSR); + T_ASSERT_POSIX_SUCCESS(rndfd, "Open /dev/urandom"); + T_ASSERT_GE_INT((int)read(rndfd, data_buf, IO_SIZE * 16), 0, "read /dev/urandom"); + close(rndfd); + + /* Create test file */ + int fd = open("/mnt2/test", O_CREAT | O_WRONLY, S_IRUSR); + T_ASSERT_POSIX_SUCCESS(fd, 0, "Open /mnt2/test for writing!"); + + T_ASSERT_POSIX_ZERO(fcntl(fd, F_NOCACHE, 1), "fcntl F_NOCACHE enable"); + for (int size = 0; size < FILE_SIZE;) { + T_QUIET; + T_ASSERT_GE_INT((int)write(fd, data_buf, IO_SIZE * 16), 0, "write test file"); + size += (IO_SIZE * 16); + } + close(fd); + sync(); } static pthread_t -create_thread(uint32_t thread_id, uint32_t priority, bool fixpri, - void *(*start_routine)(void *)) +create_thread(uint32_t thread_id, uint32_t priority, bool fixpri, + void *(*start_routine)(void *)) { - int rv; - pthread_t new_thread; - struct sched_param param = { .sched_priority = (int)priority }; - pthread_attr_t attr; + int rv; + pthread_t new_thread; + struct sched_param param = { .sched_priority = (int)priority }; + pthread_attr_t attr; - T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "pthread_attr_init"); + T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "pthread_attr_init"); - T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, ¶m), - "pthread_attr_setschedparam"); + T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, ¶m), + "pthread_attr_setschedparam"); - if (fixpri) { - T_ASSERT_POSIX_ZERO(pthread_attr_setschedpolicy(&attr, SCHED_RR), - "pthread_attr_setschedpolicy"); - } + if (fixpri) { + T_ASSERT_POSIX_ZERO(pthread_attr_setschedpolicy(&attr, SCHED_RR), + "pthread_attr_setschedpolicy"); + } - T_ASSERT_POSIX_ZERO(pthread_create(&new_thread, &attr, start_routine, - (void*)(uintptr_t)thread_id), "pthread_create"); + T_ASSERT_POSIX_ZERO(pthread_create(&new_thread, &attr, start_routine, + (void*)(uintptr_t)thread_id), "pthread_create"); - T_ASSERT_POSIX_ZERO(pthread_attr_destroy(&attr), "pthread_attr_destroy"); + T_ASSERT_POSIX_ZERO(pthread_attr_destroy(&attr), "pthread_attr_destroy"); - threads[thread_id].thread = new_thread; + threads[thread_id].thread = new_thread; - return new_thread; + return new_thread; } /* Spin until a specified number of seconds elapses */ static void spin_for_duration(uint32_t seconds) { - uint64_t duration = nanos_to_abs((uint64_t)seconds * NSEC_PER_SEC); - uint64_t current_time = mach_absolute_time(); - uint64_t timeout = duration + current_time; + uint64_t duration = nanos_to_abs((uint64_t)seconds * NSEC_PER_SEC); + uint64_t current_time = mach_absolute_time(); + uint64_t timeout = duration + current_time; - uint64_t spin_count = 0; + uint64_t spin_count = 0; - while (mach_absolute_time() < timeout && atomic_load_explicit(&keep_going, - memory_order_relaxed)) { - spin_count++; - } + while (mach_absolute_time() < timeout && atomic_load_explicit(&keep_going, + memory_order_relaxed)) { + spin_count++; + } } static void * spin_thread(void *arg) { - uint32_t thread_id = (uint32_t) arg; - char name[30] = ""; - - snprintf(name, sizeof(name), "spin thread %2d", thread_id); - pthread_setname_np(name); - T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), - "semaphore_wait_signal"); - spin_for_duration(SPIN_SECS); - return NULL; + uint32_t thread_id = (uint32_t) arg; + char name[30] = ""; + + snprintf(name, sizeof(name), "spin thread %2d", thread_id); + pthread_setname_np(name); + T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), + "semaphore_wait_signal"); + spin_for_duration(SPIN_SECS); + return NULL; } void perform_io(dt_stat_time_t stat) { - /* Open the test data file */ - int test_file_fd = open("/mnt2/test", O_RDONLY); - T_WITH_ERRNO; - T_ASSERT_POSIX_SUCCESS(test_file_fd, "Open test data file"); - - /* Disable caching and read-ahead for the file */ - T_ASSERT_POSIX_ZERO(fcntl(test_file_fd, F_NOCACHE, 1), "fcntl F_NOCACHE enable"); - T_ASSERT_POSIX_ZERO(fcntl(test_file_fd, F_RDAHEAD, 0), "fcntl F_RDAHEAD disable"); - - uint32_t count = 0; - int ret; - - for (int i=0; i < WARMUP_ITERATIONS; i++) { - /* Warmup loop */ - read(test_file_fd, data_buf, IO_SIZE); - } - - do { - T_STAT_MEASURE(stat) { - ret = read(test_file_fd, data_buf, IO_SIZE); - } - if (ret == 0) { - T_QUIET; - T_ASSERT_POSIX_SUCCESS(lseek(test_file_fd, 0, SEEK_SET), "lseek begin"); - } else if (ret < 0) { - T_FAIL("read failure"); - T_END; - } - count++; - } while(count < IO_COUNT); - close(test_file_fd); + /* Open the test data file */ + int test_file_fd = open("/mnt2/test", O_RDONLY); + T_WITH_ERRNO; + T_ASSERT_POSIX_SUCCESS(test_file_fd, "Open test data file"); + + /* Disable caching and read-ahead for the file */ + T_ASSERT_POSIX_ZERO(fcntl(test_file_fd, F_NOCACHE, 1), "fcntl F_NOCACHE enable"); + T_ASSERT_POSIX_ZERO(fcntl(test_file_fd, F_RDAHEAD, 0), "fcntl F_RDAHEAD disable"); + + uint32_t count = 0; + int ret; + + for (int i = 0; i < WARMUP_ITERATIONS; i++) { + /* Warmup loop */ + read(test_file_fd, data_buf, IO_SIZE); + } + + do { + T_STAT_MEASURE(stat) { + ret = read(test_file_fd, data_buf, IO_SIZE); + } + if (ret == 0) { + T_QUIET; + T_ASSERT_POSIX_SUCCESS(lseek(test_file_fd, 0, SEEK_SET), "lseek begin"); + } else if (ret < 0) { + T_FAIL("read failure"); + T_END; + } + count++; + } while (count < IO_COUNT); + close(test_file_fd); } T_GLOBAL_META(T_META_NAMESPACE("xnu.io"), T_META_TAG_PERF); @@ -201,56 +200,55 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.io"), T_META_TAG_PERF); /* Disable the test on MacOS for now */ T_DECL(read_perf, "Sequential Uncached Read Performance", T_META_TYPE_PERF, T_META_CHECK_LEAKS(NO), T_META_ASROOT(YES), T_META_LTEPHASE(LTE_POSTINIT)) { - #if !CONFIG_EMBEDDED - T_SKIP("Not supported on MacOS"); + T_SKIP("Not supported on MacOS"); #endif /* !CONFIG_EMBEDDED */ - io_perf_test_io_init(); - pthread_setname_np("main thread"); - - T_ASSERT_MACH_SUCCESS(mach_timebase_info(&timebase_info), "mach_timebase_info"); - - dt_stat_time_t seq_noload = dt_stat_time_create("sequential read latency (CPU idle)"); - perform_io(seq_noload); - dt_stat_finalize(seq_noload); - - /* - * We create spinner threads for this test so that all other cores are - * busy. That way the I/O issue thread has to context switch to the - * IOWorkLoop thread and back for the I/O. - */ - T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &semaphore, - SYNC_POLICY_FIFO, 0), "semaphore_create"); - - T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &worker_sem, - SYNC_POLICY_FIFO, 0), "semaphore_create"); - - size_t ncpu_size = sizeof(g_numcpus); - T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0), - "sysctlbyname(hw.ncpu)"); - - T_LOG("hw.ncpu: %d\n", g_numcpus); - uint32_t n_spinners = g_numcpus - 1; - - for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { - threads[thread_id].thread = create_thread(thread_id, THR_SPINNER_PRI, - true, &spin_thread); - } - - for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { - T_ASSERT_MACH_SUCCESS(semaphore_wait(worker_sem), "semaphore_wait"); - } - - T_ASSERT_MACH_SUCCESS(semaphore_signal_all(semaphore), "semaphore_signal"); - - dt_stat_time_t seq_load = dt_stat_time_create("sequential read latency (Single CPU)"); - perform_io(seq_load); - dt_stat_finalize(seq_load); - - atomic_store_explicit(&keep_going, 0, memory_order_relaxed); - for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { - T_ASSERT_POSIX_ZERO(pthread_join(threads[thread_id].thread, NULL), - "pthread_join %d", thread_id); - } + io_perf_test_io_init(); + pthread_setname_np("main thread"); + + T_ASSERT_MACH_SUCCESS(mach_timebase_info(&timebase_info), "mach_timebase_info"); + + dt_stat_time_t seq_noload = dt_stat_time_create("sequential read latency (CPU idle)"); + perform_io(seq_noload); + dt_stat_finalize(seq_noload); + + /* + * We create spinner threads for this test so that all other cores are + * busy. That way the I/O issue thread has to context switch to the + * IOWorkLoop thread and back for the I/O. + */ + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &semaphore, + SYNC_POLICY_FIFO, 0), "semaphore_create"); + + T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &worker_sem, + SYNC_POLICY_FIFO, 0), "semaphore_create"); + + size_t ncpu_size = sizeof(g_numcpus); + T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0), + "sysctlbyname(hw.ncpu)"); + + T_LOG("hw.ncpu: %d\n", g_numcpus); + uint32_t n_spinners = g_numcpus - 1; + + for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { + threads[thread_id].thread = create_thread(thread_id, THR_SPINNER_PRI, + true, &spin_thread); + } + + for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { + T_ASSERT_MACH_SUCCESS(semaphore_wait(worker_sem), "semaphore_wait"); + } + + T_ASSERT_MACH_SUCCESS(semaphore_signal_all(semaphore), "semaphore_signal"); + + dt_stat_time_t seq_load = dt_stat_time_create("sequential read latency (Single CPU)"); + perform_io(seq_load); + dt_stat_finalize(seq_load); + + atomic_store_explicit(&keep_going, 0, memory_order_relaxed); + for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) { + T_ASSERT_POSIX_ZERO(pthread_join(threads[thread_id].thread, NULL), + "pthread_join %d", thread_id); + } } diff --git a/tests/jumbo_va_spaces_28530648.c b/tests/jumbo_va_spaces_28530648.c index aa081f3d8..6f76a7a73 100644 --- a/tests/jumbo_va_spaces_28530648.c +++ b/tests/jumbo_va_spaces_28530648.c @@ -17,7 +17,11 @@ */ #define ALLOC_TEST_GB 54 +#if defined(ENTITLED) T_DECL(jumbo_va_spaces_28530648, +#else +T_DECL(jumbo_va_spaces_28530648_unentitled, +#endif "Verify that the \"dynamic-codesigning\" entitlement is required to utilize an extra-large " "VA space on arm64", T_META_NAMESPACE("xnu.vm"), diff --git a/tests/kdebug.c b/tests/kdebug.c index 6be5164d1..cd9b6c776 100644 --- a/tests/kdebug.c +++ b/tests/kdebug.c @@ -18,8 +18,8 @@ #include T_GLOBAL_META( - T_META_NAMESPACE("xnu.ktrace"), - T_META_ASROOT(true)); + T_META_NAMESPACE("xnu.ktrace"), + T_META_ASROOT(true)); #define KDBG_TEST_MACROS 1 #define KDBG_TEST_OLD_TIMES 2 @@ -30,7 +30,7 @@ assert_kdebug_test(unsigned int flavor) size_t size = flavor; int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDTEST }; T_ASSERT_POSIX_SUCCESS(sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, - &size, NULL, 0), "KERN_KDTEST sysctl"); + &size, NULL, 0), "KERN_KDTEST sysctl"); } #pragma mark kdebug syscalls @@ -45,7 +45,7 @@ T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events") ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){}); __block int events_seen = 0; - ktrace_events_single(s, TRACE_DEBUGID, ^void(struct trace_point *tp) { + ktrace_events_single(s, TRACE_DEBUGID, ^void (struct trace_point *tp) { events_seen++; T_PASS("saw traced event"); @@ -76,7 +76,7 @@ T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events") #define SIGNPOST_PAIRED_CODE (0x20U) T_DECL(kdebug_signpost_syscall, - "test that kdebug_signpost(2) emits correct events") + "test that kdebug_signpost(2) emits correct events") { ktrace_session_t s = ktrace_session_create(); T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session"); @@ -87,8 +87,8 @@ T_DECL(kdebug_signpost_syscall, /* make sure to get enough events for the KDBUFWAIT to trigger */ // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){}); ktrace_events_single(s, - APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE), - ^(struct trace_point *tp) { + APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE), + ^(struct trace_point *tp) { single_seen++; T_PASS("single signpost is traced"); @@ -99,8 +99,8 @@ T_DECL(kdebug_signpost_syscall, }); ktrace_events_single_paired(s, - APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE), - ^(struct trace_point *start, struct trace_point *end) { + APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE), + ^(struct trace_point *start, struct trace_point *end) { paired_seen++; T_PASS("paired signposts are traced"); @@ -115,16 +115,16 @@ T_DECL(kdebug_signpost_syscall, T_EXPECT_EQ(end->arg4, 12UL, "argument 4 of end signpost is correct"); T_EXPECT_EQ(single_seen, 1, - "signposts are traced in the correct order"); + "signposts are traced in the correct order"); ktrace_end(s, 1); }); ktrace_set_completion_handler(s, ^(void) { T_QUIET; T_EXPECT_NE(single_seen, 0, - "did not see single tracepoint before timeout"); + "did not see single tracepoint before timeout"); T_QUIET; T_EXPECT_NE(paired_seen, 0, - "did not see single tracepoint before timeout"); + "did not see single tracepoint before timeout"); ktrace_session_destroy(s); T_END; }); @@ -132,16 +132,16 @@ T_DECL(kdebug_signpost_syscall, ktrace_filter_pid(s, getpid()); T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), - "started tracing"); + "started tracing"); T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4), - "emitted single signpost"); + "emitted single signpost"); T_EXPECT_POSIX_SUCCESS( - kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8), - "emitted start signpost"); + kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8), + "emitted start signpost"); T_EXPECT_POSIX_SUCCESS( - kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12), - "emitted end signpost"); + kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12), + "emitted end signpost"); ktrace_end(s, 0); dispatch_main(); @@ -154,24 +154,24 @@ T_DECL(kdebug_signpost_syscall, #define WRAPPING_EVENTS_THRESHOLD (100) T_DECL(wrapping, - "ensure that wrapping traces lost events and no events prior to the wrap", - T_META_CHECK_LEAKS(false)) + "ensure that wrapping traces lost events and no events prior to the wrap", + T_META_CHECK_LEAKS(false)) { - int mib[4]; kbufinfo_t buf_info; int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5; int current_secs = wait_wrapping_secs; /* use sysctls manually to bypass libktrace assumptions */ - mib[0] = CTL_KERN; mib[1] = KERN_KDEBUG; mib[2] = KERN_KDSETUP; mib[3] = 0; - size_t needed = 0; - T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0), - "KERN_KDSETUP"); - + int mib[4] = { CTL_KERN, KERN_KDEBUG }; mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT; T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF"); + mib[2] = KERN_KDSETUP; mib[3] = 0; + size_t needed = 0; + T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0), + "KERN_KDSETUP"); + mib[2] = KERN_KDENABLE; mib[3] = 1; T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE"); @@ -189,12 +189,12 @@ T_DECL(wrapping, } T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0), - NULL); + NULL); } while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0); T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED, - "trace wrapped (after %d seconds within %d second timeout)", - wait_wrapping_secs - current_secs, wait_wrapping_secs); + "trace wrapped (after %d seconds within %d second timeout)", + wait_wrapping_secs - current_secs, wait_wrapping_secs); ktrace_session_t s = ktrace_session_create(); T_QUIET; T_ASSERT_NOTNULL(s, NULL); @@ -204,21 +204,21 @@ T_DECL(wrapping, ktrace_events_all(s, ^(struct trace_point *tp) { if (events == 0) { - T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS, - "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS", - tp->debugid, - ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK)); + T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS, + "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS", + tp->debugid, + ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK)); } else { - T_QUIET; - T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS, - "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS", - tp->debugid, - ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK)); + T_QUIET; + T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS, + "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS", + tp->debugid, + ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK)); } events++; if (events > WRAPPING_EVENTS_THRESHOLD) { - ktrace_end(s, 1); + ktrace_end(s, 1); } }); @@ -228,14 +228,14 @@ T_DECL(wrapping, }); T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), - "started tracing"); + "started tracing"); dispatch_main(); } T_DECL(reject_old_events, - "ensure that kdebug rejects events from before tracing began", - T_META_CHECK_LEAKS(false)) + "ensure that kdebug rejects events from before tracing began", + T_META_CHECK_LEAKS(false)) { __block uint64_t event_horizon_ts; @@ -244,10 +244,10 @@ T_DECL(reject_old_events, __block int events = 0; ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0), - KDBG_EVENTID(DBG_BSD + 1, 0, 0), ^(struct trace_point *tp) { + KDBG_EVENTID(DBG_BSD + 1, 0, 0), ^(struct trace_point *tp) { events++; T_EXPECT_GT(tp->timestamp, event_horizon_ts, - "events in trace should be from after tracing began"); + "events in trace should be from after tracing began"); }); ktrace_set_completion_handler(s, ^{ @@ -271,8 +271,8 @@ T_DECL(reject_old_events, #define ORDERING_TIMEOUT_SEC 5 T_DECL(ascending_time_order, - "ensure that kdebug events are in ascending order based on time", - T_META_CHECK_LEAKS(false)) + "ensure that kdebug events are in ascending order based on time", + T_META_CHECK_LEAKS(false)) { __block uint64_t prev_ts = 0; __block uint32_t prev_debugid = 0; @@ -284,12 +284,12 @@ T_DECL(ascending_time_order, ktrace_events_all(s, ^(struct trace_point *tp) { if (tp->timestamp < prev_ts) { - in_order = false; - T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)", - prev_ts, prev_debugid, prev_cpu); - T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)", - tp->timestamp, tp->debugid, tp->cpuid); - ktrace_end(s, 1); + in_order = false; + T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)", + prev_ts, prev_debugid, prev_cpu); + T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)", + tp->timestamp, tp->debugid, tp->cpuid); + ktrace_end(s, 1); } }); @@ -300,13 +300,13 @@ T_DECL(ascending_time_order, }); T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), - "started tracing"); + "started tracing"); /* try to inject old timestamps into trace */ assert_kdebug_test(KDBG_TEST_OLD_TIMES); dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC), - dispatch_get_main_queue(), ^{ + dispatch_get_main_queue(), ^{ T_LOG("ending test after timeout"); ktrace_end(s, 1); }); @@ -317,15 +317,15 @@ T_DECL(ascending_time_order, #pragma mark dyld tracing __attribute__((aligned(8))) - static const char map_uuid[16] = "map UUID"; +static const char map_uuid[16] = "map UUID"; __attribute__((aligned(8))) - static const char unmap_uuid[16] = "unmap UUID"; +static const char unmap_uuid[16] = "unmap UUID"; __attribute__((aligned(8))) - static const char sc_uuid[16] = "shared UUID"; +static const char sc_uuid[16] = "shared UUID"; - static fsid_t map_fsid = { .val = { 42, 43 } }; +static fsid_t map_fsid = { .val = { 42, 43 } }; static fsid_t unmap_fsid = { .val = { 44, 45 } }; static fsid_t sc_fsid = { .val = { 46, 47 } }; @@ -340,8 +340,8 @@ static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 }; __unused static void expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid, - uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid, - int order) + uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid, + int order) { #if defined(__LP64__) || defined(__arm64__) if (order == 0) { @@ -368,7 +368,7 @@ expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid, T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL); T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation, - exp_fsobjid->fid_generation, NULL); + exp_fsobjid->fid_generation, NULL); } else { T_ASSERT_FAIL("unrecognized order of events %d", order); } @@ -405,7 +405,7 @@ expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid, fsobjid.fid_generation = tp->arg1; T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation, - exp_fsobjid->fid_generation, NULL); + exp_fsobjid->fid_generation, NULL); } else { T_ASSERT_FAIL("unrecognized order of events %d", order); } @@ -422,16 +422,16 @@ expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid, static void expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code, - const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid, - fsobj_id_t *exp_fsobjid, uint8_t *saw_events) + const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid, + fsobj_id_t *exp_fsobjid, uint8_t *saw_events) { for (int i = 0; i < DYLD_EVENTS; i++) { ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, - base_code + DYLD_CODE_OFFSET + (unsigned int)i), - ^(struct trace_point *tp) { + base_code + DYLD_CODE_OFFSET + (unsigned int)i), + ^(struct trace_point *tp) { T_LOG("checking %s event %c", name, 'A' + i); expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr, - exp_fsid, exp_fsobjid, i); + exp_fsid, exp_fsobjid, i); *saw_events |= (1U << i); }); } @@ -456,14 +456,14 @@ T_DECL(dyld_events, "test that dyld registering libraries emits events") T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), - "filtered to current process"); + "filtered to current process"); expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid, - MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping); + MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping); expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid, - UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping); + UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping); expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A, - sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache); + sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache); ktrace_set_completion_handler(s, ^{ ktrace_session_destroy(s); @@ -481,22 +481,22 @@ T_DECL(dyld_events, "test that dyld registering libraries emits events") info.fsid = map_fsid; info.fsobjid = map_fsobjid; T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(), - &info, 1), "registered dyld image info"); + &info, 1), "registered dyld image info"); info.load_addr = UNMAP_LOAD_ADDR; memcpy(info.uuid, unmap_uuid, sizeof(info.uuid)); info.fsid = unmap_fsid; info.fsobjid = unmap_fsobjid; T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(), - &info, 1), "unregistered dyld image info"); + &info, 1), "unregistered dyld image info"); info.load_addr = SC_LOAD_ADDR; memcpy(info.uuid, sc_uuid, sizeof(info.uuid)); info.fsid = sc_fsid; info.fsobjid = sc_fsobjid; T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info( - mach_task_self(), info, FALSE, FALSE), - "registered dyld shared cache image info"); + mach_task_self(), info, FALSE, FALSE), + "registered dyld shared cache image info"); ktrace_end(s, 0); @@ -551,7 +551,7 @@ is_development_kernel(void) T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev, - &dev_size, NULL, 0), NULL); + &dev_size, NULL, 0), NULL); is_development = (dev != 0); }); @@ -560,7 +560,7 @@ is_development_kernel(void) static void expect_event(struct trace_point *tp, const char *name, unsigned int *events, - const uint32_t *event_ids, size_t event_ids_len) + const uint32_t *event_ids, size_t event_ids_len) { unsigned int event_idx = *events; bool event_found = false; @@ -579,7 +579,7 @@ expect_event(struct trace_point *tp, const char *name, unsigned int *events, *events += 1; for (i = 0; i < event_idx; i++) { T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1, - NULL); + NULL); } for (; i < 4; i++) { T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL); @@ -590,7 +590,7 @@ static void expect_release_event(struct trace_point *tp, unsigned int *events) { expect_event(tp, "release", events, rel_evts, - sizeof(rel_evts) / sizeof(rel_evts[0])); + sizeof(rel_evts) / sizeof(rel_evts[0])); } static void @@ -603,20 +603,20 @@ static void expect_filtered_event(struct trace_point *tp, unsigned int *events) { expect_event(tp, "filtered", events, filt_evts, - sizeof(filt_evts) / sizeof(filt_evts[0])); + sizeof(filt_evts) / sizeof(filt_evts[0])); } static void expect_noprocfilt_event(struct trace_point *tp, unsigned int *events) { expect_event(tp, "noprocfilt", events, noprocfilt_evts, - sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0])); + sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0])); } static void expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback, - void (^cb)(unsigned int dev_seen, unsigned int rel_seen, - unsigned int filt_seen, unsigned int noprocfilt_seen)) + void (^cb)(unsigned int dev_seen, unsigned int rel_seen, + unsigned int filt_seen, unsigned int noprocfilt_seen)) { __block unsigned int dev_seen = 0; __block unsigned int rel_seen = 0; @@ -634,7 +634,7 @@ expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback, ktrace_events_all(s, evtcb); } else { ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0), - KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb); + KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb); } ktrace_set_completion_handler(s, ^{ @@ -655,11 +655,11 @@ T_DECL(kernel_events, "ensure kernel macros work") T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session"); T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), - "filtered events to current process"); + "filtered events to current process"); expect_kdbg_test_events(s, false, - ^(unsigned int dev_seen, unsigned int rel_seen, - unsigned int filt_seen, unsigned int noprocfilt_seen) { + ^(unsigned int dev_seen, unsigned int rel_seen, + unsigned int filt_seen, unsigned int noprocfilt_seen) { /* * Development-only events are only filtered if running on an embedded * OS. @@ -672,12 +672,12 @@ T_DECL(kernel_events, "ensure kernel macros work") #endif T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, - "release and development events seen"); + "release and development events seen"); T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen", - dev_exp ? "" : "not "); + dev_exp ? "" : "not "); T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen"); T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS, - "process filter-agnostic events seen"); + "process filter-agnostic events seen"); }); dispatch_main(); @@ -689,29 +689,29 @@ T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work") T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session"); T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), - "filtered events to current process"); + "filtered events to current process"); expect_kdbg_test_events(s, true, - ^(unsigned int dev_seen, unsigned int rel_seen, - unsigned int filt_seen, unsigned int noprocfilt_seen) { + ^(unsigned int dev_seen, unsigned int rel_seen, + unsigned int filt_seen, unsigned int noprocfilt_seen) { T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL); #if defined(__arm__) || defined(__arm64__) T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U, - NULL); + NULL); #else T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS, - "development-only events seen"); + "development-only events seen"); #endif /* defined(__arm__) || defined(__arm64__) */ T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen"); T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS, - "process filter-agnostic events seen"); + "process filter-agnostic events seen"); }); dispatch_main(); } T_DECL(kernel_events_noprocfilt, - "ensure that the no process filter kernel macros work") + "ensure that the no process filter kernel macros work") { ktrace_session_t s = ktrace_session_create(); T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session"); @@ -723,19 +723,19 @@ T_DECL(kernel_events_noprocfilt, for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) { T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s, - noprocfilt_evts[i]), - "ignored process filter for noprocfilt event"); + noprocfilt_evts[i]), + "ignored process filter for noprocfilt event"); } expect_kdbg_test_events(s, false, - ^(unsigned int dev_seen, unsigned int rel_seen, - unsigned int filt_seen, unsigned int noprocfilt_seen) { + ^(unsigned int dev_seen, unsigned int rel_seen, + unsigned int filt_seen, unsigned int noprocfilt_seen) { T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen"); T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen"); T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen"); T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS, - "process filter-agnostic events seen"); + "process filter-agnostic events seen"); }); dispatch_main(); @@ -765,7 +765,7 @@ kdebug_abuser_thread(void *ctx) } T_DECL(stress, "emit events on all but one CPU with a small buffer", - T_META_CHECK_LEAKS(false)) + T_META_CHECK_LEAKS(false)) { T_SETUPBEGIN; ktrace_session_t s = ktrace_session_create(); @@ -782,15 +782,15 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", (void)kperf_timer_count_set(1); int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS)); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns", - TIMER_NS); + TIMER_NS); kperror = kperf_timer_action_set(0, 1); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set"); kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO | - KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK | - KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO | - KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH | - KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM | - KPERF_SAMPLER_TH_INSTRS_CYCLES); + KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK | + KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO | + KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH | + KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM | + KPERF_SAMPLER_TH_INSTRS_CYCLES); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set"); /* You monster... */ @@ -810,7 +810,7 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t)); T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)", - ncpus - 1); + ncpus - 1); ktrace_set_completion_handler(s, ^{ T_SETUPBEGIN; @@ -826,7 +826,7 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", ktrace_file_t f = ktrace_file_open(filepath, false); T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s", - filepath); + filepath); uint64_t first_timestamp = 0; error = ktrace_file_earliest_timestamp(f, &first_timestamp); T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp"); @@ -840,35 +840,35 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", nevents++; uint64_t delta_ns = 0; T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp, - "timestamps are monotonically increasing"); + "timestamps are monotonically increasing"); int converror = ktrace_convert_timestamp_to_nanoseconds(sread, - tp->timestamp - prev_timestamp, &delta_ns); + tp->timestamp - prev_timestamp, &delta_ns); T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns"); if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) { - if (tp->debugname) { - T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)", - (double)delta_ns / 1e9, prev_timestamp, - tp->timestamp, tp->cpuid, tp->debugname, tp->debugid); + if (tp->debugname) { + T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)", + (double)delta_ns / 1e9, prev_timestamp, + tp->timestamp, tp->cpuid, tp->debugname, tp->debugid); } else { - T_LOG("gap: %gs at %llu - %llu on %d: %#x", - (double)delta_ns / 1e9, prev_timestamp, - tp->timestamp, tp->cpuid, tp->debugid); + T_LOG("gap: %gs at %llu - %llu on %d: %#x", + (double)delta_ns / 1e9, prev_timestamp, + tp->timestamp, tp->cpuid, tp->debugid); } - /* - * These gaps are ok -- they appear after CPUs are brought back - * up. - */ + /* + * These gaps are ok -- they appear after CPUs are brought back + * up. + */ #define INTERRUPT (0x1050000) #define PERF_CPU_IDLE (0x27001000) #define INTC_HANDLER (0x5000004) #define DECR_TRAP (0x1090000) - uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK; - if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE && - eventid != INTC_HANDLER && eventid != DECR_TRAP) { - unsigned int lost_events = TRACE_LOST_EVENTS; - T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events, - "gaps should end with lost events"); + uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK; + if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE && + eventid != INTC_HANDLER && eventid != DECR_TRAP) { + unsigned int lost_events = TRACE_LOST_EVENTS; + T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events, + "gaps should end with lost events"); } } @@ -880,31 +880,31 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", __block uint64_t last_write = 0; ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS, - ^(struct trace_point *start, struct trace_point *end) { + ^(struct trace_point *start, struct trace_point *end) { uint64_t delta_ns; int converror = ktrace_convert_timestamp_to_nanoseconds(sread, - start->timestamp - last_write, &delta_ns); + start->timestamp - last_write, &delta_ns); T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns"); uint64_t dur_ns; converror = ktrace_convert_timestamp_to_nanoseconds(sread, - end->timestamp - start->timestamp, &dur_ns); + end->timestamp - start->timestamp, &dur_ns); T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns"); T_LOG("write: %llu (+%gs): %gus on %d: %lu events", start->timestamp, - (double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1); + (double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1); last_write = end->timestamp; }); ktrace_set_completion_handler(sread, ^{ uint64_t duration_ns = 0; if (last_timestamp) { - int converror = ktrace_convert_timestamp_to_nanoseconds(sread, - last_timestamp - first_timestamp, &duration_ns); - T_QUIET; T_ASSERT_POSIX_ZERO(converror, - "convert timestamp to ns"); - T_LOG("file was %gs long, %llu events: %g events/msec/cpu", - (double)duration_ns / 1e9, nevents, - (double)nevents / ((double)duration_ns / 1e6) / ncpus); + int converror = ktrace_convert_timestamp_to_nanoseconds(sread, + last_timestamp - first_timestamp, &duration_ns); + T_QUIET; T_ASSERT_POSIX_ZERO(converror, + "convert timestamp to ns"); + T_LOG("file was %gs long, %llu events: %g events/msec/cpu", + (double)duration_ns / 1e9, nevents, + (double)nevents / ((double)duration_ns / 1e6) / ncpus); } (void)unlink(filepath); ktrace_session_destroy(sread); @@ -921,32 +921,32 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", #if 0 kperror = kperf_sample_set(1); T_ASSERT_POSIX_SUCCESS(kperror, - "started kperf timer sampling every %llu ns", TIMER_NS); + "started kperf timer sampling every %llu ns", TIMER_NS); #endif for (int i = 0; i < (ncpus - 1); i++) { int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread, - (void *)(uintptr_t)i); + (void *)(uintptr_t)i); T_QUIET; T_ASSERT_POSIX_ZERO(error, - "pthread_create abuser thread %d", i); + "pthread_create abuser thread %d", i); } int error = ktrace_start_writing_file(s, filepath, - ktrace_compression_none, NULL, NULL); + ktrace_compression_none, NULL, NULL); T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath); T_SETUPEND; dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC), - dispatch_get_main_queue(), ^{ + dispatch_get_main_queue(), ^{ T_LOG("ending trace"); ktrace_end(s, 1); continue_abuse = false; for (int i = 0; i < (ncpus - 1); i++) { - int joinerror = pthread_join(threads[i], NULL); - T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d", - i); + int joinerror = pthread_join(threads[i], NULL); + T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d", + i); } }); @@ -972,7 +972,7 @@ T_DECL(stress, "emit events on all but one CPU with a small buffer", * never see. */ T_DECL(round_trips, - "test sustained tracing with multiple round-trips through the kernel") + "test sustained tracing with multiple round-trips through the kernel") { ktrace_session_t s = ktrace_session_create(); T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session"); @@ -990,9 +990,9 @@ T_DECL(round_trips, ktrace_events_all(s, ^(__unused struct trace_point *tp) { events++; if (events % ROUND_TRIP_PERIOD == 0) { - T_LOG("emitting round-trip event %" PRIu64, emitted); - kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0); - emitted++; + T_LOG("emitting round-trip event %" PRIu64, emitted); + kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0); + emitted++; } }); @@ -1000,17 +1000,17 @@ T_DECL(round_trips, T_LOG("saw round-trip event after %" PRIu64 " events", events); seen++; if (seen >= ROUND_TRIPS_THRESHOLD) { - T_LOG("ending trace after seeing %" PRIu64 " events, " - "emitting %" PRIu64, seen, emitted); - ktrace_end(s, 1); + T_LOG("ending trace after seeing %" PRIu64 " events, " + "emitting %" PRIu64, seen, emitted); + ktrace_end(s, 1); } }); ktrace_set_completion_handler(s, ^{ T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD, - "emitted %" PRIu64 " round-trip events", emitted); + "emitted %" PRIu64 " round-trip events", emitted); T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD, - "saw %" PRIu64 " round-trip events", seen); + "saw %" PRIu64 " round-trip events", seen); ktrace_session_destroy(s); T_END; }); @@ -1019,8 +1019,8 @@ T_DECL(round_trips, T_ASSERT_POSIX_ZERO(error, "started tracing"); dispatch_after(dispatch_time(DISPATCH_TIME_NOW, - ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(), - ^{ + ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(), + ^{ T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS); ktrace_end(s, 0); }); @@ -1061,10 +1061,10 @@ T_DECL(event_coverage, "ensure events appear up to the end of tracing") }); dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, - 0, 0, dispatch_get_main_queue()); + 0, 0, dispatch_get_main_queue()); dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW, - HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC), - HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0); + HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC), + HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0); dispatch_source_set_cancel_handler(timer, ^{ dispatch_release(timer); }); @@ -1073,22 +1073,22 @@ T_DECL(event_coverage, "ensure events appear up to the end of tracing") heartbeats++; T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, " - "current event time %lld", heartbeats, mach_absolute_time(), - events, current_timestamp); + "current event time %lld", heartbeats, mach_absolute_time(), + events, current_timestamp); if (current_timestamp > 0) { - T_EXPECT_GT(current_timestamp, last_timestamp, - "event timestamps should be increasing"); - T_QUIET; T_EXPECT_GT(events, last_events, - "number of events should be increasing"); + T_EXPECT_GT(current_timestamp, last_timestamp, + "event timestamps should be increasing"); + T_QUIET; T_EXPECT_GT(events, last_events, + "number of events should be increasing"); } last_timestamp = current_timestamp; last_events = events; if (heartbeats >= HEARTBEAT_COUNT) { - T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT); - ktrace_end(s, 0); + T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT); + ktrace_end(s, 0); } }); @@ -1099,3 +1099,62 @@ T_DECL(event_coverage, "ensure events appear up to the end of tracing") dispatch_main(); } + +static unsigned int +set_nevents(unsigned int nevents) +{ + T_QUIET; + T_ASSERT_POSIX_SUCCESS(sysctl( + (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)nevents }, 4, + NULL, 0, NULL, 0), "set kdebug buffer size"); + + T_QUIET; + T_ASSERT_POSIX_SUCCESS(sysctl( + (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETUP, (int)nevents }, 4, + NULL, 0, NULL, 0), "setup kdebug buffers"); + + kbufinfo_t bufinfo = { 0 }; + T_QUIET; + T_ASSERT_POSIX_SUCCESS(sysctl( + (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3, + &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0), + "get kdebug buffer size"); + + T_QUIET; + T_ASSERT_POSIX_SUCCESS(sysctl( + (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }, 3, + NULL, 0, NULL, 0), + "remove kdebug buffers"); + + return (unsigned int)bufinfo.nkdbufs; +} + +T_DECL(set_buffer_size, "ensure large buffer sizes can be set") +{ + uint64_t memsize = 0; + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize, + &(size_t){ sizeof(memsize) }, NULL, 0), "get memory size"); + + /* + * Try to allocate up to one-eighth of available memory towards + * tracing. + */ + uint64_t maxevents_u64 = memsize / 8 / sizeof(kd_buf); + if (maxevents_u64 > UINT32_MAX) { + maxevents_u64 = UINT32_MAX; + } + unsigned int maxevents = (unsigned int)maxevents_u64; + + unsigned int minevents = set_nevents(0); + T_ASSERT_GT(minevents, 0, "saw non-zero minimum event count of %u", + minevents); + + unsigned int step = ((maxevents - minevents - 1) / 4); + T_ASSERT_GT(step, 0, "stepping by %u events", step); + + for (unsigned int i = minevents + step; i < maxevents; i += step) { + unsigned int actualevents = set_nevents(i); + T_ASSERT_GE(actualevents, i - minevents, + "%u events in kernel when %u requested", actualevents, i); + } +} diff --git a/tests/kernel_mtx_perf.c b/tests/kernel_mtx_perf.c index 396104fd2..76af0603a 100644 --- a/tests/kernel_mtx_perf.c +++ b/tests/kernel_mtx_perf.c @@ -20,10 +20,10 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.kernel_mtx_perf_test")); #define ITER 100000 -#define TEST_MTX_MAX_STATS 8 +#define TEST_MTX_MAX_STATS 8 -#define TEST_MTX_LOCK_STATS 0 -#define TEST_MTX_UNLOCK_MTX_STATS 6 +#define TEST_MTX_LOCK_STATS 0 +#define TEST_MTX_UNLOCK_MTX_STATS 6 static void test_from_kernel_lock_unlock_contended(void) @@ -39,7 +39,7 @@ test_from_kernel_lock_unlock_contended(void) size = 1000; buff = calloc(size, sizeof(char)); - T_QUIET;T_ASSERT_NOTNULL(buff, "Allocating buffer fo sysctl"); + T_QUIET; T_ASSERT_NOTNULL(buff, "Allocating buffer fo sysctl"); snprintf(iter, sizeof(iter), "%d", ITER); ret = sysctlbyname("kern.test_mtx_contended", buff, &size, iter, sizeof(iter)); @@ -49,7 +49,9 @@ test_from_kernel_lock_unlock_contended(void) /* first line is "STATS INNER LOOP" */ buff_p = buff; - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; /* @@ -61,8 +63,8 @@ test_from_kernel_lock_unlock_contended(void) avg_p = strstr(buff_p, "avg "); /* contended test records statistics only for lock/unlock for now */ - if (i == TEST_MTX_LOCK_STATS || i == TEST_MTX_UNLOCK_MTX_STATS ) { - T_QUIET;T_ASSERT_NOTNULL(avg_p, "contended %i average not found", i); + if (i == TEST_MTX_LOCK_STATS || i == TEST_MTX_UNLOCK_MTX_STATS) { + T_QUIET; T_ASSERT_NOTNULL(avg_p, "contended %i average not found", i); sscanf(avg_p, "avg %llu", &avg); name = strstr(buff_p, "TEST_MTX_"); @@ -79,31 +81,37 @@ test_from_kernel_lock_unlock_contended(void) } buff_p = avg_p; - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; - } - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; /* next line is "STATS OUTER LOOP" */ - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; /* contended test records statistics only for lock/unlock for now */ avg_p = strstr(buff_p, "run time "); - T_QUIET;T_ASSERT_NOTNULL(avg_p, "contended %d loop run time not found", 0); + T_QUIET; T_ASSERT_NOTNULL(avg_p, "contended %d loop run time not found", 0); sscanf(avg_p, "run time %llu", &run); avg_p = strstr(buff_p, "total time "); - T_QUIET;T_ASSERT_NOTNULL(avg_p, "uncontended %d loop total time not found", 0); + T_QUIET; T_ASSERT_NOTNULL(avg_p, "uncontended %d loop total time not found", 0); sscanf(avg_p, "total time %llu", &tot); - if (run < tot) + if (run < tot) { avg = run; - else + } else { avg = tot; + } name = strstr(buff_p, "TEST_MTX_"); end_name = strstr(buff_p, "_STATS"); @@ -115,7 +123,7 @@ test_from_kernel_lock_unlock_contended(void) snprintf(name_string, name_size + strlen(pre_string), "%s%s", pre_string, &name[strlen("TEST_MTX_")]); pre_string = "avg time contended loop "; snprintf(avg_name_string, name_size + strlen(pre_string), "%s%s", pre_string, &name[strlen("TEST_MTX_")]); - T_PERF(name_string, avg/ITER, "ns", avg_name_string); + T_PERF(name_string, avg / ITER, "ns", avg_name_string); free(buff); } @@ -134,7 +142,7 @@ test_from_kernel_lock_unlock_uncontended(void) size = 2000; buff = calloc(size, sizeof(char)); - T_QUIET;T_ASSERT_NOTNULL(buff, "Allocating buffer fo sysctl"); + T_QUIET; T_ASSERT_NOTNULL(buff, "Allocating buffer fo sysctl"); snprintf(iter, sizeof(iter), "%d", ITER); ret = sysctlbyname("kern.test_mtx_uncontended", buff, &size, iter, sizeof(iter)); @@ -144,7 +152,9 @@ test_from_kernel_lock_unlock_uncontended(void) /* first line is "STATS INNER LOOP" */ buff_p = buff; - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; /* @@ -154,7 +164,7 @@ test_from_kernel_lock_unlock_uncontended(void) */ for (i = 0; i < TEST_MTX_MAX_STATS; i++) { avg_p = strstr(buff_p, "avg "); - T_QUIET;T_ASSERT_NOTNULL(avg_p, "uncontended %i average not found", i); + T_QUIET; T_ASSERT_NOTNULL(avg_p, "uncontended %i average not found", i); sscanf(avg_p, "avg %llu", &avg); name = strstr(buff_p, "TEST_MTX_"); @@ -170,15 +180,21 @@ test_from_kernel_lock_unlock_uncontended(void) T_PERF(name_string, avg, "ns", avg_name_string); buff_p = avg_p; - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; } - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; /* next line is "STATS OUTER LOOP" */ - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; /* @@ -188,17 +204,18 @@ test_from_kernel_lock_unlock_uncontended(void) */ for (i = 0; i < TEST_MTX_MAX_STATS - 2; i++) { avg_p = strstr(buff_p, "run time "); - T_QUIET;T_ASSERT_NOTNULL(avg_p, "uncontended %d loop run time not found", i); + T_QUIET; T_ASSERT_NOTNULL(avg_p, "uncontended %d loop run time not found", i); sscanf(avg_p, "run time %llu", &run); avg_p = strstr(buff_p, "total time "); - T_QUIET;T_ASSERT_NOTNULL(avg_p, "uncontended %d loop total time not found", i); + T_QUIET; T_ASSERT_NOTNULL(avg_p, "uncontended %d loop total time not found", i); sscanf(avg_p, "total time %llu", &tot); - if (run < tot) + if (run < tot) { avg = run; - else + } else { avg = tot; + } name = strstr(buff_p, "TEST_MTX_"); end_name = strstr(buff_p, "_STATS"); @@ -210,12 +227,13 @@ test_from_kernel_lock_unlock_uncontended(void) snprintf(name_string, name_size + strlen(pre_string), "%s%s", pre_string, &name[strlen("TEST_MTX_")]); pre_string = "avg time uncontended loop "; snprintf(avg_name_string, name_size + strlen(pre_string), "%s%s", pre_string, &name[strlen("TEST_MTX_")]); - T_PERF(name_string, avg/ITER, "ns", avg_name_string); + T_PERF(name_string, avg / ITER, "ns", avg_name_string); buff_p = avg_p; - while( *buff_p != '\n' ) buff_p++; + while (*buff_p != '\n') { + buff_p++; + } buff_p++; - } free(buff); } @@ -244,12 +262,12 @@ fix_cpu_frequency(void) char str_val[10]; ret = sysctlbyname("machdep.cpu.brand_string", NULL, &len, NULL, 0); - T_QUIET;T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname machdep.cpu.brand_string"); + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname machdep.cpu.brand_string"); - buffer = malloc(len+2); + buffer = malloc(len + 2); ret = sysctlbyname("machdep.cpu.brand_string", buffer, &len, NULL, 0); - T_QUIET;T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname machdep.cpu.brand_string"); - buffer[len+1] = '\0'; + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname machdep.cpu.brand_string"); + buffer[len + 1] = '\0'; cpu_freq = strstr(buffer, "CPU @ "); if (cpu_freq == NULL) { @@ -268,14 +286,14 @@ fix_cpu_frequency(void) } } - switch(scale){ + switch (scale) { case 'M': case 'm': nom_freq = (int) val; break; case 'G': case 'g': - nom_freq = (int) (val*1000); + nom_freq = (int) (val * 1000); break; default: T_LOG("Could not fix frequency, scale field is %c\n", scale); @@ -295,12 +313,11 @@ out: } T_DECL(kernel_mtx_perf_test, - "Kernel mutex performance test", - T_META_ASROOT(YES), T_META_CHECK_LEAKS(NO)) + "Kernel mutex performance test", + T_META_ASROOT(YES), T_META_CHECK_LEAKS(NO)) { fix_cpu_frequency(); test_from_kernel_lock_unlock_uncontended(); test_from_kernel_lock_unlock_contended(); } - diff --git a/tests/kevent_continuous_time.c b/tests/kevent_continuous_time.c index 607cce682..d9cd1cb1c 100644 --- a/tests/kevent_continuous_time.c +++ b/tests/kevent_continuous_time.c @@ -18,7 +18,7 @@ extern char **environ; static mach_timebase_info_data_t tb_info; -static const uint64_t one_mil = 1000LL*1000LL; +static const uint64_t one_mil = 1000LL * 1000LL; #define tick_to_ns(ticks) (((ticks) * tb_info.numer) / (tb_info.denom)) #define tick_to_ms(ticks) (tick_to_ns(ticks)/one_mil) @@ -26,7 +26,9 @@ static const uint64_t one_mil = 1000LL*1000LL; #define ns_to_tick(ns) ((ns) * tb_info.denom / tb_info.numer) #define ms_to_tick(ms) (ns_to_tick((ms) * one_mil)) -static uint64_t time_delta_ms(void){ +static uint64_t +time_delta_ms(void) +{ uint64_t abs_now = mach_absolute_time(); uint64_t cnt_now = mach_continuous_time();; return tick_to_ms(cnt_now) - tick_to_ms(abs_now); @@ -34,8 +36,12 @@ static uint64_t time_delta_ms(void){ static int run_sleep_tests = 0; -static int trigger_sleep(int for_secs) { - if(!run_sleep_tests) return 0; +static int +trigger_sleep(int for_secs) +{ + if (!run_sleep_tests) { + return 0; + } // sleep for 1 seconds each iteration char buf[10]; @@ -46,13 +52,13 @@ static int trigger_sleep(int for_secs) { int spawn_ret, pid; char *const pmset1_args[] = {"/usr/bin/pmset", "relative", "wake", buf, NULL}; T_ASSERT_POSIX_ZERO((spawn_ret = posix_spawn(&pid, pmset1_args[0], NULL, NULL, pmset1_args, environ)), NULL); - + T_ASSERT_EQ(waitpid(pid, &spawn_ret, 0), pid, NULL); T_ASSERT_EQ(spawn_ret, 0, NULL); char *const pmset2_args[] = {"/usr/bin/pmset", "sleepnow", NULL}; T_ASSERT_POSIX_ZERO((spawn_ret = posix_spawn(&pid, pmset2_args[0], NULL, NULL, pmset2_args, environ)), NULL); - + T_ASSERT_EQ(waitpid(pid, &spawn_ret, 0), pid, NULL); T_ASSERT_EQ(spawn_ret, 0, NULL); @@ -62,22 +68,26 @@ static int trigger_sleep(int for_secs) { // waits up to 30 seconds for system to sleep // returns number of seconds it took for sleep to be entered // or -1 if sleep wasn't accomplished -static int wait_for_sleep() { - if(!run_sleep_tests) return 0; +static int +wait_for_sleep() +{ + if (!run_sleep_tests) { + return 0; + } uint64_t before_diff = time_delta_ms(); - - for(int i = 0; i < 30; i++) { + + for (int i = 0; i < 30; i++) { uint64_t after_diff = time_delta_ms(); // on OSX, there's enough latency between calls to MCT and MAT // when the system is going down for sleep for values to diverge a few ms - if(llabs((int64_t)before_diff - (int64_t)after_diff) > 2) { + if (llabs((int64_t)before_diff - (int64_t)after_diff) > 2) { return i + 1; } - + sleep(1); - T_LOG("waited %d seconds for sleep...", i+1); + T_LOG("waited %d seconds for sleep...", i + 1); } return -1; } @@ -149,7 +159,7 @@ T_DECL(kevent_continuous_time_absolute, "kevent(EVFILT_TIMER with NOTE_MACH_CONT struct timeval tv; gettimeofday(&tv, NULL); int64_t nowus = (int64_t)tv.tv_sec * USEC_PER_SEC + (int64_t)tv.tv_usec; - int64_t fire_at = (3*USEC_PER_SEC) + nowus; + int64_t fire_at = (3 * USEC_PER_SEC) + nowus; uint64_t cnt_now = mach_continuous_time(); uint64_t cnt_then = cnt_now + ms_to_tick(3000); @@ -231,10 +241,9 @@ T_DECL(kevent_continuous_time_pops, "kevent(EVFILT_TIMER with NOTE_MACH_CONTINUO trigger_sleep(2); int sleep_secs = 0; - if(run_sleep_tests) { + if (run_sleep_tests) { sleep_secs = wait_for_sleep(); - } - else { + } else { // simulate 2 seconds of system "sleep" sleep(2); } @@ -242,7 +251,7 @@ T_DECL(kevent_continuous_time_pops, "kevent(EVFILT_TIMER with NOTE_MACH_CONTINUO uint64_t cnt_now = mach_continuous_time(); uint64_t ms_elapsed = tick_to_ms(cnt_now - cnt_then); - if(run_sleep_tests) { + if (run_sleep_tests) { T_ASSERT_LT(llabs((int64_t)ms_elapsed - 2000LL), 500LL, "slept for %llums, expected 2000ms (astris is connected?)", ms_elapsed); } diff --git a/tests/kevent_pty.c b/tests/kevent_pty.c index 2fad75e6c..734de7902 100644 --- a/tests/kevent_pty.c +++ b/tests/kevent_pty.c @@ -16,8 +16,8 @@ #include T_GLOBAL_META( - T_META_NAMESPACE("xnu.kevent"), - T_META_CHECK_LEAKS(false)); + T_META_NAMESPACE("xnu.kevent"), + T_META_CHECK_LEAKS(false)); #define TIMEOUT_SECS 10 @@ -31,7 +31,7 @@ child_tty_client(void) ssize_t bytes_wr; src = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, - (uintptr_t)STDIN_FILENO, 0, NULL); + (uintptr_t)STDIN_FILENO, 0, NULL); if (!src) { exit(1); } @@ -73,8 +73,8 @@ pty_master(void) } T_DECL(pty_master_teardown, - "try removing a TTY master out from under a PTY slave holding a kevent", - T_META_ASROOT(true)) + "try removing a TTY master out from under a PTY slave holding a kevent", + T_META_ASROOT(true)) { __block pid_t master_pid; char buf[16] = ""; @@ -95,8 +95,8 @@ T_DECL(pty_master_teardown, __builtin_unreachable(); } T_ASSERT_POSIX_SUCCESS(master_pid, - "forked child master PTY with pid %d, at pty %s", master_pid, - pty_filename); + "forked child master PTY with pid %d, at pty %s", master_pid, + pty_filename); close(child_ready[1]); @@ -209,16 +209,16 @@ redispatch(dispatch_group_t grp, dispatch_source_type_t type, int fd) __block void (^redispatch_blk)(void) = Block_copy(^{ if (iters++ > ATTACH_ITERATIONS) { - return; + return; } else if (iters == ATTACH_ITERATIONS) { - dispatch_group_leave(grp); - T_PASS("created %d %s sources on busy PTY", iters, - type == DISPATCH_SOURCE_TYPE_READ ? "read" : "write"); + dispatch_group_leave(grp); + T_PASS("created %d %s sources on busy PTY", iters, + type == DISPATCH_SOURCE_TYPE_READ ? "read" : "write"); } dispatch_source_t src = dispatch_source_create( - type, (uintptr_t)fd, 0, - dispatch_get_main_queue()); + type, (uintptr_t)fd, 0, + dispatch_get_main_queue()); dispatch_source_set_event_handler(src, ^{ dispatch_cancel(src); @@ -234,18 +234,18 @@ redispatch(dispatch_group_t grp, dispatch_source_type_t type, int fd) } T_DECL(attach_while_tty_wakeups, - "try to attach knotes while a TTY is getting wakeups") + "try to attach knotes while a TTY is getting wakeups") { dispatch_group_t grp = dispatch_group_create(); T_SETUPBEGIN; T_ASSERT_POSIX_SUCCESS(openpty(&attach_master, &attach_slave, NULL, NULL, - NULL), NULL); + NULL), NULL); T_ASSERT_POSIX_ZERO(pthread_create(&reader, NULL, reader_thread, - (void *)(uintptr_t)attach_master), NULL); + (void *)(uintptr_t)attach_master), NULL); T_ASSERT_POSIX_ZERO(pthread_create(&writer, NULL, writer_thread, - (void *)(uintptr_t)attach_slave), NULL); + (void *)(uintptr_t)attach_slave), NULL); T_ATEND(join_threads); T_SETUPEND; @@ -261,7 +261,7 @@ T_DECL(attach_while_tty_wakeups, } T_DECL(master_read_data_set, - "check that the data is set on read sources of master fds") + "check that the data is set on read sources of master fds") { int master = -1, slave = -1; @@ -271,12 +271,12 @@ T_DECL(master_read_data_set, T_QUIET; T_ASSERT_GE(slave, 0, "slave fd is valid"); dispatch_source_t src = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, - (uintptr_t)master, 0, dispatch_get_main_queue()); + (uintptr_t)master, 0, dispatch_get_main_queue()); dispatch_source_set_event_handler(src, ^{ unsigned long len = dispatch_source_get_data(src); T_EXPECT_GT(len, (unsigned long)0, - "the amount of data to read was set for the master source"); + "the amount of data to read was set for the master source"); dispatch_cancel(src); }); @@ -292,7 +292,9 @@ T_DECL(master_read_data_set, char buf[512] = ""; int ret = 0; - while ((ret = write(slave, buf, sizeof(buf)) == -1 && errno == EAGAIN)); + while ((ret = write(slave, buf, sizeof(buf)) == -1 && errno == EAGAIN)) { + ; + } T_ASSERT_POSIX_SUCCESS(ret, "slave wrote data"); dispatch_main(); diff --git a/tests/kevent_qos.c b/tests/kevent_qos.c index df021e3ac..9bbb7d62e 100644 --- a/tests/kevent_qos.c +++ b/tests/kevent_qos.c @@ -35,7 +35,7 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.kevent_qos")); #define HELPER_TIMEOUT_SECS (15) #define ENV_VAR_QOS (3) -static const char *qos_env[ENV_VAR_QOS] = {"XNU_TEST_QOS_BO", "XNU_TEST_QOS_QO", "XNU_TEST_QOS_AO"}; +static const char *qos_env[ENV_VAR_QOS] = {"XNU_TEST_QOS_BO", "XNU_TEST_QOS_QO", "XNU_TEST_QOS_AO"}; static const char *qos_name_env[ENV_VAR_QOS] = {"XNU_TEST_QOS_NAME_BO", "XNU_TEST_QOS_NAME_QO", "XNU_TEST_QOS_NAME_AO"}; #define ENV_VAR_FUNCTION (1) @@ -88,30 +88,30 @@ get_user_promotion_basepri(void) mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE, - (thread_policy_t)&thread_policy, &count, &get_default); + (thread_policy_t)&thread_policy, &count, &get_default); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get"); return thread_policy.thps_user_promotion_basepri; } #define EXPECT_QOS_EQ(qos, ...) do { \ - if ((qos) == QOS_CLASS_USER_INTERACTIVE) { \ - T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED, __VA_ARGS__); \ - T_EXPECT_EQ(47u, get_user_promotion_basepri(), __VA_ARGS__); \ - } else { \ - T_EXPECT_EFFECTIVE_QOS_EQ(qos, __VA_ARGS__); \ - } \ + if ((qos) == QOS_CLASS_USER_INTERACTIVE) { \ + T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED, __VA_ARGS__); \ + T_EXPECT_EQ(47u, get_user_promotion_basepri(), __VA_ARGS__); \ + } else { \ + T_EXPECT_EFFECTIVE_QOS_EQ(qos, __VA_ARGS__); \ + } \ } while (0) #define EXPECT_TEST_MSG(_ke) do { \ - struct kevent_qos_s *ke = _ke; \ - mach_msg_header_t *hdr = (mach_msg_header_t *)ke->ext[0]; \ - T_ASSERT_NOTNULL(hdr, "has a message"); \ - T_ASSERT_EQ(hdr->msgh_size, (uint32_t)sizeof(struct test_msg), "of the right size"); \ - struct test_msg *tmsg = (struct test_msg *)hdr; \ - if (tmsg->opts & MACH_SEND_PROPAGATE_QOS) { \ - T_EXPECT_EQ(tmsg->qos, ((uint32_t)(ke->ext[2] >> 32)), \ - "propagation works"); \ - } \ + struct kevent_qos_s *ke = _ke; \ + mach_msg_header_t *hdr = (mach_msg_header_t *)ke->ext[0]; \ + T_ASSERT_NOTNULL(hdr, "has a message"); \ + T_ASSERT_EQ(hdr->msgh_size, (uint32_t)sizeof(struct test_msg), "of the right size"); \ + struct test_msg *tmsg = (struct test_msg *)hdr; \ + if (tmsg->opts & MACH_SEND_PROPAGATE_QOS) { \ + T_EXPECT_EQ(tmsg->qos, ((uint32_t)(ke->ext[2] >> 32)), \ + "propagation works"); \ + } \ } while (0) /* @@ -122,8 +122,8 @@ static void workloop_cb_test_intransit(uint64_t *workloop_id __unused, void **eventslist, int *events) { T_LOG("Workloop handler workloop_cb_test_intransit called. " - "Will wait for %d seconds to make sure client enqueues the sync msg \n", - 2 * RECV_TIMEOUT_SECS); + "Will wait for %d seconds to make sure client enqueues the sync msg \n", + 2 * RECV_TIMEOUT_SECS); EXPECT_TEST_MSG(*eventslist); @@ -137,7 +137,7 @@ workloop_cb_test_intransit(uint64_t *workloop_id __unused, void **eventslist, in /* The effective Qos should be the one expected after override */ EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); *events = 0; T_END; @@ -159,7 +159,7 @@ workloop_cb_test_sync_send(uint64_t *workloop_id __unused, void **eventslist, in /* The effective Qos should be the one expected after override */ EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); *events = 0; T_END; @@ -185,7 +185,7 @@ workloop_cb_test_sync_send_and_enable(uint64_t *workloop_id, struct kevent_qos_s /* The effective Qos should be the one expected after override */ EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); /* Snapshot the current override priority */ override_priority = get_user_promotion_basepri(); @@ -200,8 +200,8 @@ workloop_cb_test_sync_send_and_enable(uint64_t *workloop_id, struct kevent_qos_s */ reenable_priority = get_user_promotion_basepri(); T_EXPECT_LT(reenable_priority, override_priority, - "thread's current override priority %d should be less than override priority prior to enabling knote %d", - reenable_priority, override_priority); + "thread's current override priority %d should be less than override priority prior to enabling knote %d", + reenable_priority, override_priority); *events = 0; T_END; @@ -228,7 +228,7 @@ workloop_cb_test_send_two_sync(uint64_t *workloop_id __unused, struct kevent_qos if (send_two_sync_handler_called == 0) { /* The effective Qos should be the one expected after override */ EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); /* Enable the knote to get 2nd message */ struct kevent_qos_s *kev = *eventslist; @@ -237,7 +237,7 @@ workloop_cb_test_send_two_sync(uint64_t *workloop_id __unused, struct kevent_qos *events = 1; } else { EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_BEFORE_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_BEFORE_OVERRIDE]); *events = 0; T_END; } @@ -267,13 +267,13 @@ workloop_cb_test_two_send_and_destroy(uint64_t *workloop_id __unused, struct kev /* The effective Qos should be the one expected after override */ EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); sleep(SEND_TIMEOUT_SECS); /* Special reply port should have been destroyed, check Qos again */ EXPECT_QOS_EQ(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], - "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_BEFORE_OVERRIDE]); + "dispatch_source event handler QoS should be %s", g_expected_qos_name[ENV_QOS_BEFORE_OVERRIDE]); two_send_and_destroy_test_passed = TRUE; } else { @@ -303,7 +303,7 @@ get_reply_port(struct kevent_qos_s *kev) T_QUIET; T_ASSERT_NOTNULL(hdr, "msg hdr"); reply_port = hdr->msgh_remote_port; - T_QUIET;T_ASSERT_TRUE(MACH_PORT_VALID(reply_port), "reply port valid"); + T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(reply_port), "reply port valid"); kr = mach_port_type(mach_task_self(), reply_port, &type); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_type"); T_QUIET; T_ASSERT_TRUE(type & MACH_PORT_TYPE_SEND_ONCE, "send once received"); @@ -319,22 +319,22 @@ send_reply(mach_port_t reply_port) struct { mach_msg_header_t header; } send_msg = { - .header = { - .msgh_remote_port = reply_port, - .msgh_local_port = MACH_PORT_NULL, - .msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0), - .msgh_id = 0x100, - .msgh_size = sizeof(send_msg), + .header = { + .msgh_remote_port = reply_port, + .msgh_local_port = MACH_PORT_NULL, + .msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0), + .msgh_id = 0x100, + .msgh_size = sizeof(send_msg), }, }; kr = mach_msg(&(send_msg.header), - MACH_SEND_MSG, - send_msg.header.msgh_size, - 0, - MACH_PORT_NULL, - 0, - 0); + MACH_SEND_MSG, + send_msg.header.msgh_size, + 0, + MACH_PORT_NULL, + 0, + 0); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server mach_msg"); } @@ -342,16 +342,14 @@ send_reply(mach_port_t reply_port) static void populate_kevent(struct kevent_qos_s *kev, unsigned long long port) { - memset(kev, 0, sizeof(struct kevent_qos_s)); kev->ident = port; kev->filter = EVFILT_MACHPORT; kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED; kev->fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | - MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)); + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)); kev->data = 1; - } static void @@ -364,7 +362,7 @@ enable_kevent(uint64_t *workloop_id, unsigned long long port) struct kevent_qos_s kev_err[] = {{ 0 }}; kr = kevent_id(*workloop_id, &kev, 1, kev_err, 1, NULL, - NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS | KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST); + NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS | KEVENT_FLAG_DYNAMIC_KQ_MUST_EXIST); T_QUIET; T_ASSERT_POSIX_SUCCESS(kr, "kevent_id"); } @@ -374,7 +372,6 @@ enable_kevent(uint64_t *workloop_id, unsigned long long port) static void workloop_cb_test_sync_send_reply(uint64_t *workloop_id __unused, struct kevent_qos_s **eventslist, int *events) { - T_LOG("Workloop handler workloop_cb_test_sync_send_reply called"); if (geteuid() != 0) { @@ -427,7 +424,6 @@ workloop_cb_test_sync_send_deallocate(uint64_t *workloop_id __unused, struct kev static void workloop_cb_test_sync_send_reply_kevent(uint64_t *workloop_id, struct kevent_qos_s **eventslist, int *events) { - T_LOG("Workloop handler workloop_cb_test_sync_send_reply_kevent called"); if (geteuid() != 0) { @@ -456,7 +452,6 @@ workloop_cb_test_sync_send_reply_kevent(uint64_t *workloop_id, struct kevent_qos static void workloop_cb_test_sync_send_reply_kevent_pthread(uint64_t *workloop_id __unused, struct kevent_qos_s **eventslist, int *events) { - T_LOG("Workloop handler workloop_cb_test_sync_send_reply_kevent_pthread called"); if (geteuid() != 0) { @@ -484,7 +479,6 @@ workloop_cb_test_sync_send_reply_kevent_pthread(uint64_t *workloop_id __unused, static void workloop_cb_test_sync_send_kevent_reply(uint64_t *workloop_id, struct kevent_qos_s **eventslist, int *events) { - T_LOG("workloop handler workloop_cb_test_sync_send_kevent_reply called"); if (geteuid() != 0) { @@ -514,7 +508,6 @@ workloop_cb_test_sync_send_kevent_reply(uint64_t *workloop_id, struct kevent_qos static void workloop_cb_test_sync_send_do_nothing(uint64_t *workloop_id __unused, struct kevent_qos_s **eventslist, int *events) { - T_LOG("Workloop handler workloop_cb_test_sync_send_do_nothing called"); if (geteuid() != 0) { @@ -537,7 +530,6 @@ workloop_cb_test_sync_send_do_nothing(uint64_t *workloop_id __unused, struct kev static void workloop_cb_test_sync_send_do_nothing_kevent_pthread(uint64_t *workloop_id __unused, struct kevent_qos_s **eventslist, int *events) { - T_LOG("Workloop handler workloop_cb_test_sync_send_do_nothing_kevent_pthread called"); if (geteuid() != 0) { @@ -561,7 +553,6 @@ workloop_cb_test_sync_send_do_nothing_kevent_pthread(uint64_t *workloop_id __unu static void workloop_cb_test_sync_send_do_nothing_exit(uint64_t *workloop_id __unused, struct kevent_qos_s **eventslist, __unused int *events) { - T_LOG("workloop handler workloop_cb_test_sync_send_do_nothing_exit called"); if (geteuid() != 0) { @@ -583,7 +574,6 @@ workloop_cb_test_sync_send_do_nothing_exit(uint64_t *workloop_id __unused, struc static void workloop_cb_test_sync_send_reply_kevent_reply_kevent(uint64_t *workloop_id __unused, struct kevent_qos_s **eventslist, int *events) { - T_LOG("Workloop handler workloop_cb_test_sync_send_reply_kevent_reply_kevent called"); if (geteuid() != 0) { @@ -636,7 +626,6 @@ workloop_cb_test_sync_send_kevent_reply_reply_kevent(uint64_t *workloop_id, stru send_reply(reply_port); *events = 0; - } else { /* send reply */ send_reply(reply_port); @@ -710,9 +699,7 @@ workloop_cb_test_sync_send_reply_kevent_kevent_reply(uint64_t *workloop_id, stru populate_kevent(kev, kev->ident); *events = 1; - } else { - /* Enable the knote */ enable_kevent(workloop_id, kev->ident); /* send reply */ @@ -732,7 +719,7 @@ get_server_port(void) { mach_port_t port; kern_return_t kr = bootstrap_check_in(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &port); + KEVENT_QOS_SERVICE_NAME, &port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in"); return port; } @@ -743,15 +730,15 @@ env_set_qos(char **env, qos_class_t qos[], const char *qos_name[], const char *w int i; char *qos_str, *qos_name_str; for (i = 0; i < ENV_VAR_QOS; i++) { - T_QUIET; T_ASSERT_POSIX_SUCCESS(asprintf(&qos_str, "%s=%d", qos_env[i] , qos[i]), - NULL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(asprintf(&qos_str, "%s=%d", qos_env[i], qos[i]), + NULL); T_QUIET; T_ASSERT_POSIX_SUCCESS( asprintf(&qos_name_str, "%s=%s", qos_name_env[i], qos_name[i]), NULL); env[2 * i] = qos_str; env[2 * i + 1] = qos_name_str; } T_QUIET; T_ASSERT_POSIX_SUCCESS(asprintf(&env[2 * i], "%s=%s", wl_function_name, wl_function), - NULL); + NULL); env[2 * i + 1] = NULL; } @@ -768,10 +755,10 @@ environ_get_qos(qos_class_t qos[], const char *qos_name[], const char **wl_funct unsigned long qos_l = strtoul(qos_str, &qos_end, 10); T_QUIET; T_ASSERT_EQ(*qos_end, '\0', "getenv(%s) = '%s' should be an " - "integer", qos_env[i], qos_str); + "integer", qos_env[i], qos_str); T_QUIET; T_ASSERT_LT(qos_l, (unsigned long)100, "getenv(%s) = '%s' should " - "be less than 100", qos_env[i], qos_str); + "be less than 100", qos_env[i], qos_str); qos[i] = (qos_class_t)qos_l; qos_name[i] = getenv(qos_name_env[i]); @@ -789,12 +776,12 @@ create_pthpriority_voucher(mach_msg_priority_t qos) mach_voucher_t voucher = MACH_PORT_NULL; kern_return_t ret; ipc_pthread_priority_value_t ipc_pthread_priority_value = - (ipc_pthread_priority_value_t)qos; + (ipc_pthread_priority_value_t)qos; mach_voucher_attr_raw_recipe_array_t recipes; mach_voucher_attr_raw_recipe_size_t recipe_size = 0; mach_voucher_attr_recipe_t recipe = - (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size]; + (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size]; recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY; recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE; @@ -806,9 +793,9 @@ create_pthpriority_voucher(mach_msg_priority_t qos) recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0]; ret = host_create_mach_voucher(mach_host_self(), - recipes, - recipe_size, - &voucher); + recipes, + recipe_size, + &voucher); T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher"); return voucher; @@ -825,21 +812,21 @@ send( kern_return_t ret = 0; struct test_msg send_msg = { - .header = { - .msgh_remote_port = send_port, - .msgh_local_port = reply_port, - .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, - reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, - MACH_MSG_TYPE_MOVE_SEND, - MACH_MSGH_BITS_COMPLEX), - .msgh_id = 0x100, - .msgh_size = sizeof(send_msg), + .header = { + .msgh_remote_port = send_port, + .msgh_local_port = reply_port, + .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, + reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, + MACH_MSG_TYPE_MOVE_SEND, + MACH_MSGH_BITS_COMPLEX), + .msgh_id = 0x100, + .msgh_size = sizeof(send_msg), }, - .body = { - .msgh_descriptor_count = 1, + .body = { + .msgh_descriptor_count = 1, }, - .port_descriptor = { - .name = msg_port, + .port_descriptor = { + .name = msg_port, .disposition = MACH_MSG_TYPE_MOVE_RECEIVE, .type = MACH_MSG_PORT_DESCRIPTOR, }, @@ -861,15 +848,15 @@ send( } ret = mach_msg(&(send_msg.header), - MACH_SEND_MSG | - MACH_SEND_TIMEOUT | - MACH_SEND_OVERRIDE| - ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options), - send_msg.header.msgh_size, - 0, - MACH_PORT_NULL, - 10000, - 0); + MACH_SEND_MSG | + MACH_SEND_TIMEOUT | + MACH_SEND_OVERRIDE | + ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options), + send_msg.header.msgh_size, + 0, + MACH_PORT_NULL, + 10000, + 0); T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg"); } @@ -882,30 +869,30 @@ receive( kern_return_t ret = 0; struct test_msg rcv_msg = { - .header = { - .msgh_remote_port = MACH_PORT_NULL, - .msgh_local_port = rcv_port, - .msgh_size = sizeof(rcv_msg), + .header = { + .msgh_remote_port = MACH_PORT_NULL, + .msgh_local_port = rcv_port, + .msgh_size = sizeof(rcv_msg), }, }; T_LOG("Client: Starting sync receive\n"); ret = mach_msg(&(rcv_msg.header), - MACH_RCV_MSG | - MACH_RCV_TIMEOUT | - MACH_RCV_SYNC_WAIT, - 0, - rcv_msg.header.msgh_size, - rcv_port, - SEND_TIMEOUT_SECS * 1000, - notify_port); + MACH_RCV_MSG | + MACH_RCV_TIMEOUT | + MACH_RCV_SYNC_WAIT, + 0, + rcv_msg.header.msgh_size, + rcv_port, + SEND_TIMEOUT_SECS * 1000, + notify_port); return ret; } T_HELPER_DECL(qos_get_special_reply_port, - "Test get_special_reply_port and it's corner cases.") + "Test get_special_reply_port and it's corner cases.") { mach_port_t special_reply_port; mach_port_t new_special_reply_port; @@ -933,7 +920,7 @@ qos_client_send_to_intransit(void *arg __unused) mach_port_t special_reply_port; kern_return_t kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -941,39 +928,39 @@ qos_client_send_to_intransit(void *arg __unused) /* Create a rcv right to send in a msg */ kr = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_RECEIVE, - &msg_port); + MACH_PORT_RIGHT_RECEIVE, + &msg_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client mach_port_allocate"); kr = mach_port_insert_right(mach_task_self(), - msg_port, - msg_port, - MACH_MSG_TYPE_MAKE_SEND); + msg_port, + msg_port, + MACH_MSG_TYPE_MAKE_SEND); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client mach_port_insert_right"); /* Send an empty msg on the port to fire the WL thread */ send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); /* Sleep 3 seconds for the server to start */ sleep(3); /* Send the message with msg port as in-transit port, this msg will not be dequeued */ send(qos_send_port, MACH_PORT_NULL, msg_port, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); /* Send 5 messages to msg port to make sure the port is full */ for (int i = 0; i < 5; i++) { send(msg_port, MACH_PORT_NULL, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); } T_LOG("Sent 5 msgs, now trying to send sync ipc messgae, which will block with a timeout\n"); /* Send the message to the in-transit port, it should block and override the rcv's workloop */ send(msg_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); T_LOG("Client done sending messages, now waiting for server to end the test"); T_ASSERT_FAIL("client timed out"); @@ -981,7 +968,7 @@ qos_client_send_to_intransit(void *arg __unused) } T_HELPER_DECL(qos_client_send_to_intransit_with_thr_pri, - "Send synchronous messages from a pri thread to an intransit port") + "Send synchronous messages from a pri thread to an intransit port") { thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], qos_client_send_to_intransit); sleep(HELPER_TIMEOUT_SECS); @@ -992,7 +979,7 @@ thread_create_at_qos(qos_class_t qos, void * (*function)(void *)) { qos_class_t qos_thread; pthread_t thread; - pthread_attr_t attr; + pthread_attr_t attr; int ret; ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL); @@ -1000,13 +987,13 @@ thread_create_at_qos(qos_class_t qos, void * (*function)(void *)) T_LOG("set priority failed\n"); } - pthread_attr_init(&attr); - pthread_attr_set_qos_class_np(&attr, qos, 0); - pthread_create(&thread, &attr, function, NULL); + pthread_attr_init(&attr); + pthread_attr_set_qos_class_np(&attr, qos, 0); + pthread_create(&thread, &attr, function, NULL); T_LOG("pthread created\n"); pthread_get_qos_class_np(thread, &qos_thread, NULL); - T_EXPECT_EQ(qos_thread, (qos_class_t)qos, NULL); + T_EXPECT_EQ(qos_thread, (qos_class_t)qos, NULL); } static void * @@ -1017,10 +1004,10 @@ qos_send_and_sync_rcv(void *arg __unused) T_LOG("Client: from created thread\n"); T_EXPECT_EFFECTIVE_QOS_EQ(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - "pthread QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); + "pthread QoS should be %s", g_expected_qos_name[ENV_QOS_AFTER_OVERRIDE]); kern_return_t kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -1028,10 +1015,10 @@ qos_send_and_sync_rcv(void *arg __unused) /* enqueue two messages to make sure that mqueue is not empty */ send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0), 0); send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0), 0); sleep(SEND_TIMEOUT_SECS); @@ -1046,7 +1033,7 @@ qos_send_and_sync_rcv(void *arg __unused) } T_HELPER_DECL(qos_client_send_sync_and_sync_rcv, - "Send messages and syncronously wait for rcv") + "Send messages and syncronously wait for rcv") { thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], qos_send_and_sync_rcv); sleep(HELPER_TIMEOUT_SECS); @@ -1062,7 +1049,7 @@ qos_client_send_sync_msg_and_test_link(void *arg) unsigned long expected_result = (unsigned long) arg; kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); /* start monitoring sync ipc link */ @@ -1071,7 +1058,7 @@ qos_client_send_sync_msg_and_test_link(void *arg) /* Send the message to msg port */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); /* * wait for the reply @@ -1084,15 +1071,17 @@ qos_client_send_sync_msg_and_test_link(void *arg) kr = mach_sync_ipc_link_monitoring_stop(special_reply_port, &in_effect); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_sync_ipc_link_monitoring_stop"); - if (!in_effect) + if (!in_effect) { T_LOG("Link was broken"); - else + } else { T_LOG("Link correct"); + } - if (expected_result == 1) + if (expected_result == 1) { T_ASSERT_TRUE(in_effect, "special reply port link after rcv"); - else + } else { T_ASSERT_FALSE(in_effect, "special reply port link after rcv"); + } T_END; } @@ -1106,7 +1095,7 @@ qos_client_send_2sync_msg_and_test_link(void *arg) unsigned long expected_result = (unsigned long) arg; kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); /* start monitoring sync ipc link */ @@ -1115,37 +1104,39 @@ qos_client_send_2sync_msg_and_test_link(void *arg) /* Send the first message to msg port */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); /* wait for the reply */ kr = receive(special_reply_port, qos_send_port); - T_QUIET;T_ASSERT_MACH_SUCCESS(kr, "receive"); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "receive"); /* Send the second message to msg port */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); /* wait for the reply */ kr = receive(special_reply_port, qos_send_port); - T_QUIET;T_ASSERT_MACH_SUCCESS(kr, "receive"); + T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "receive"); /* stop monitoring link */ kr = mach_sync_ipc_link_monitoring_stop(special_reply_port, &in_effect); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_sync_ipc_link_monitoring_stop"); - if (!in_effect) + if (!in_effect) { T_LOG("Link was broken"); - else + } else { T_LOG("Link correct"); + } - if (expected_result == 1) + if (expected_result == 1) { T_ASSERT_TRUE(in_effect, "special reply port link after rcv"); - else + } else { T_ASSERT_FALSE(in_effect, "special reply port link after rcv"); + } T_END; } T_HELPER_DECL(qos_client_send_sync_msg_with_link_check_correct_server, - "Send sync message, wait for reply and check sync ipc link") + "Send sync message, wait for reply and check sync ipc link") { pthread_t thread; pthread_attr_t attr; @@ -1158,7 +1149,7 @@ T_HELPER_DECL(qos_client_send_sync_msg_with_link_check_correct_server, } T_HELPER_DECL(qos_client_send_sync_msg_with_link_check_incorrect_server, - "Send sync message, wait for reply and check sync ipc link") + "Send sync message, wait for reply and check sync ipc link") { pthread_t thread; pthread_attr_t attr; @@ -1171,7 +1162,7 @@ T_HELPER_DECL(qos_client_send_sync_msg_with_link_check_incorrect_server, } T_HELPER_DECL(qos_client_send_2sync_msg_with_link_check_correct_server, - "Send sync message, wait for reply and check sync ipc link") + "Send sync message, wait for reply and check sync ipc link") { pthread_t thread; pthread_attr_t attr; @@ -1184,7 +1175,7 @@ T_HELPER_DECL(qos_client_send_2sync_msg_with_link_check_correct_server, } T_HELPER_DECL(qos_client_send_2sync_msg_with_link_check_incorrect_server, - "Send sync message, wait for reply and check sync ipc link") + "Send sync message, wait for reply and check sync ipc link") { pthread_t thread; pthread_attr_t attr; @@ -1203,7 +1194,7 @@ qos_client_send_sync_msg(void *arg __unused) mach_port_t special_reply_port; kern_return_t kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -1211,7 +1202,7 @@ qos_client_send_sync_msg(void *arg __unused) /* Send the message to msg port */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); /* wait for the reply */ receive(special_reply_port, qos_send_port); @@ -1224,7 +1215,7 @@ qos_client_send_sync_msg(void *arg __unused) } T_HELPER_DECL(qos_client_send_sync_msg_with_pri, - "Send sync message and wait for reply") + "Send sync message and wait for reply") { thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], qos_client_send_sync_msg); sleep(HELPER_TIMEOUT_SECS); @@ -1237,7 +1228,7 @@ qos_client_send_two_sync_msg_high_qos(void *arg __unused) mach_port_t special_reply_port; kern_return_t kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -1245,7 +1236,7 @@ qos_client_send_two_sync_msg_high_qos(void *arg __unused) /* Send the message to msg port */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], 0, 0), 0); /* wait for the reply */ receive(special_reply_port, qos_send_port); @@ -1264,7 +1255,7 @@ qos_client_send_two_sync_msg_low_qos(void *arg __unused) mach_port_t special_reply_port; kern_return_t kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -1272,7 +1263,7 @@ qos_client_send_two_sync_msg_low_qos(void *arg __unused) /* Send the message to msg port */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); /* wait for the reply */ receive(special_reply_port, qos_send_port); @@ -1285,7 +1276,7 @@ qos_client_send_two_sync_msg_low_qos(void *arg __unused) } T_HELPER_DECL(qos_client_send_two_sync_msg_with_thr_pri, - "Send messages sync msgs from 2 threads at given thread pri") + "Send messages sync msgs from 2 threads at given thread pri") { thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], qos_client_send_two_sync_msg_high_qos); sleep(INTERMITTENT_TIMEOUT_SEC); @@ -1317,7 +1308,7 @@ qos_client_create_sepcial_reply_and_spawn_thread(void *arg __unused) mach_port_t special_reply_port; kern_return_t kr = bootstrap_look_up(bootstrap_port, - KEVENT_QOS_SERVICE_NAME, &qos_send_port); + KEVENT_QOS_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -1327,11 +1318,11 @@ qos_client_create_sepcial_reply_and_spawn_thread(void *arg __unused) /* Send an async message */ send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); /* Send the sync ipc message */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), 0); /* Create a new thread to send the sync message on our special reply port */ thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], qos_client_destroy_other_threads_port); @@ -1346,7 +1337,7 @@ qos_client_create_sepcial_reply_and_spawn_thread(void *arg __unused) } T_HELPER_DECL(qos_client_send_two_msg_and_destroy, - "Send a message with another threads special reply port while that thread destroys the port") + "Send a message with another threads special reply port while that thread destroys the port") { thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], qos_client_create_sepcial_reply_and_spawn_thread); sleep(HELPER_TIMEOUT_SECS); @@ -1370,8 +1361,8 @@ qos_client_send_complex_msg_to_service_port(void *arg __unused) T_LOG("Sending to the service port with a sync IPC"); send(svc_port, tsr_port, conn_port, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), - MACH_SEND_PROPAGATE_QOS); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), + MACH_SEND_PROPAGATE_QOS); receive(tsr_port, svc_port); @@ -1399,11 +1390,11 @@ qos_client_send_to_connection_then_service_port(void *arg __unused) T_LOG("Sending to the connection port with a sync IPC"); send(conn_port, tsr_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), - MACH_SEND_PROPAGATE_QOS); + (uint32_t)_pthread_qos_class_encode(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], 0, 0), + MACH_SEND_PROPAGATE_QOS); thread_create_at_qos(g_expected_qos[ENV_QOS_AFTER_OVERRIDE], - qos_client_send_complex_msg_to_service_port); + qos_client_send_complex_msg_to_service_port); receive(tsr_port, conn_port); @@ -1414,16 +1405,16 @@ qos_client_send_to_connection_then_service_port(void *arg __unused) } T_HELPER_DECL(qos_client_send_complex_msg_with_pri, - "Send a message with several ports causing links") + "Send a message with several ports causing links") { thread_create_at_qos(g_expected_qos[ENV_QOS_BEFORE_OVERRIDE], - qos_client_send_to_connection_then_service_port); + qos_client_send_to_connection_then_service_port); sleep(HELPER_TIMEOUT_SECS); } static void run_client_server(const char *server_name, const char *client_name, qos_class_t qos[], - const char *qos_name[], const char *wl_function) + const char *qos_name[], const char *wl_function) { char *env[2 * ENV_VAR_QOS + ENV_VAR_FUNCTION + 1]; env_set_qos(env, qos, qos_name, wl_function); @@ -1435,7 +1426,7 @@ run_client_server(const char *server_name, const char *client_name, qos_class_t dt_helper_t helpers[] = { dt_launchd_helper_env("com.apple.xnu.test.kevent_qos.plist", - server_name, env), + server_name, env), dt_fork_helper(client_name) }; dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS); @@ -1456,92 +1447,92 @@ expect_kevent_id_recv(mach_port_t port, qos_class_t qos[], const char *qos_name[ if (strcmp(wl_function, "workloop_cb_test_intransit") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_and_enable") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_and_enable, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_and_enable, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_send_two_sync") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_send_two_sync, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_send_two_sync, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_two_send_and_destroy") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_two_send_and_destroy, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_two_send_and_destroy, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_reply") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_deallocate") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_deallocate, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_deallocate, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_reply_kevent") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_reply_kevent_pthread") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent_pthread, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent_pthread, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_kevent_reply") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_kevent_reply, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_kevent_reply, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_do_nothing") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_do_nothing, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_do_nothing, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_do_nothing_kevent_pthread") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_do_nothing_kevent_pthread, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_do_nothing_kevent_pthread, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_do_nothing_exit") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_do_nothing_exit, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_do_nothing_exit, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_reply_kevent_reply_kevent") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent_reply_kevent, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent_reply_kevent, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_kevent_reply_reply_kevent") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_kevent_reply_reply_kevent, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_kevent_reply_reply_kevent, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_kevent_reply_kevent_reply") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_kevent_reply_kevent_reply, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_kevent_reply_kevent_reply, 0, 0), NULL); } else if (strcmp(wl_function, "workloop_cb_test_sync_send_reply_kevent_kevent_reply") == 0) { T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent_kevent_reply, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_sync_send_reply_kevent_kevent_reply, 0, 0), NULL); } else { T_ASSERT_FAIL("no workloop function specified \n"); } struct kevent_qos_s kev[] = {{ - .ident = port, - .filter = EVFILT_MACHPORT, - .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, - .fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | - MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)), - .data = 1, - .qos = (int32_t)_pthread_qos_class_encode(qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0) - }}; + .ident = port, + .filter = EVFILT_MACHPORT, + .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, + .fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0)), + .data = 1, + .qos = (int32_t)_pthread_qos_class_encode(qos[ENV_QOS_QUEUE_OVERRIDE], 0, 0) + }}; struct kevent_qos_s kev_err[] = {{ 0 }}; /* Setup workloop for mach msg rcv */ r = kevent_id(25, kev, 1, kev_err, 1, NULL, - NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); + NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id"); T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id"); @@ -1549,7 +1540,7 @@ expect_kevent_id_recv(mach_port_t port, qos_class_t qos[], const char *qos_name[ } T_HELPER_DECL(server_kevent_id, - "Reply with the QoS that a dispatch source event handler ran with") + "Reply with the QoS that a dispatch source event handler ran with") { qos_class_t qos[ENV_VAR_QOS]; const char *qos_name[ENV_VAR_QOS]; @@ -1559,17 +1550,17 @@ T_HELPER_DECL(server_kevent_id, expect_kevent_id_recv(get_server_port(), qos, qos_name, wl_function); sleep(HELPER_TIMEOUT_SECS); T_ASSERT_FAIL("should receive a message within %d seconds", - RECV_TIMEOUT_SECS); + RECV_TIMEOUT_SECS); } #define TEST_QOS(server_name, client_name, name, wl_function_name, qos_bo, qos_bo_name, qos_qo, qos_qo_name, qos_ao, qos_ao_name) \ T_DECL(server_kevent_id_##name, \ - "Event delivery at " qos_ao_name " QoS using a kevent_id", \ - T_META_ASROOT(YES)) \ + "Event delivery at " qos_ao_name " QoS using a kevent_id", \ + T_META_ASROOT(YES)) \ { \ - qos_class_t qos_array[ENV_VAR_QOS] = {qos_bo, qos_qo, qos_ao}; \ - const char *qos_name_array[ENV_VAR_QOS] = {qos_bo_name, qos_qo_name, qos_ao_name}; \ - run_client_server(server_name, client_name, qos_array, qos_name_array, wl_function_name); \ + qos_class_t qos_array[ENV_VAR_QOS] = {qos_bo, qos_qo, qos_ao}; \ + const char *qos_name_array[ENV_VAR_QOS] = {qos_bo_name, qos_qo_name, qos_ao_name}; \ + run_client_server(server_name, client_name, qos_array, qos_name_array, wl_function_name); \ } /* * Test 1: Test special reply port SPI @@ -1578,9 +1569,9 @@ T_HELPER_DECL(server_kevent_id, * the same should return MACH_PORT_NULL, unless the reply port is destroyed. */ TEST_QOS("server_kevent_id", "qos_get_special_reply_port", special_reply_port, "workloop_cb_test_intransit", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") /* * Test 2: Test sync ipc send to an in-transit port @@ -1590,9 +1581,9 @@ TEST_QOS("server_kevent_id", "qos_get_special_reply_port", special_reply_port, " * sync ipc override. */ TEST_QOS("server_kevent_id", "qos_client_send_to_intransit_with_thr_pri", transit_IN, "workloop_cb_test_intransit", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INITIATED, "user initiated") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INITIATED, "user initiated") /* * Test 3: Test sync ipc send to an in-transit port @@ -1602,9 +1593,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_to_intransit_with_thr_pri", transi * sync ipc override. */ TEST_QOS("server_kevent_id", "qos_client_send_to_intransit_with_thr_pri", transit_UI, "workloop_cb_test_intransit", - QOS_CLASS_USER_INITIATED, "user initiated", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") + QOS_CLASS_USER_INITIATED, "user initiated", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") /* * Test 4: Test starting a sync rcv overrides the servicer @@ -1614,9 +1605,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_to_intransit_with_thr_pri", transi * servicer of the workloop gets sync ipc override. */ TEST_QOS("server_kevent_id", "qos_client_send_sync_and_sync_rcv", rcv_IN, "workloop_cb_test_intransit", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INITIATED, "user initiated") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INITIATED, "user initiated") /* * Test 5: Test starting a sync rcv overrides the servicer @@ -1626,9 +1617,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_sync_and_sync_rcv", rcv_IN, "workl * servicer of the workloop gets sync ipc override. */ TEST_QOS("server_kevent_id", "qos_client_send_sync_and_sync_rcv", rcv_UI, "workloop_cb_test_intransit", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INTERACTIVE, "user interactive with 47 basepri promotion") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INTERACTIVE, "user interactive with 47 basepri promotion") /* * Test 6: test sending sync ipc message (at IN qos) to port will override the servicer @@ -1637,9 +1628,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_sync_and_sync_rcv", rcv_UI, "workl * of the workloop on other side gets sync ipc override. */ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_pri", send_sync_IN, "workloop_cb_test_sync_send", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INITIATED, "user initiated") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INITIATED, "user initiated") /* * Test 7: test sending sync ipc message (at UI qos) to port will override the servicer @@ -1648,9 +1639,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_pri", send_sync_IN, * of the workloop on other side gets sync ipc override. */ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_pri", send_sync_UI, "workloop_cb_test_sync_send", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") /* * Test 8: test enabling a knote in workloop handler will drop the sync ipc override of delivered message @@ -1660,9 +1651,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_pri", send_sync_UI, * that sync ipc override is dropped. */ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_pri", send_sync_UI_and_enable, "workloop_cb_test_sync_send_and_enable", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") /* * Test 9: test returning to begin processing drops sync ipc override of delivered message @@ -1671,9 +1662,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_pri", send_sync_UI_a * the delivered message, but should still have the override of an enqueued message. */ TEST_QOS("server_kevent_id", "qos_client_send_two_sync_msg_with_thr_pri", send_two_sync_UI, "workloop_cb_test_send_two_sync", - QOS_CLASS_BACKGROUND, "background", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") + QOS_CLASS_BACKGROUND, "background", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") /* * Test 10: test destroying the special reply port drops the override @@ -1683,9 +1674,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_two_sync_msg_with_thr_pri", send_t * reply port drops the sync ipc override on the servicer. */ TEST_QOS("server_kevent_id", "qos_client_send_two_msg_and_destroy", send_two_UI_and_destroy, "workloop_cb_test_two_send_and_destroy", - QOS_CLASS_BACKGROUND, "background", - QOS_CLASS_MAINTENANCE, "maintenance", - QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") + QOS_CLASS_BACKGROUND, "background", + QOS_CLASS_MAINTENANCE, "maintenance", + QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") /* * Test 11: test sending two ports with chaining * @@ -1693,9 +1684,9 @@ TEST_QOS("server_kevent_id", "qos_client_send_two_msg_and_destroy", send_two_UI_ * sent as a sync IPC to a service port. */ TEST_QOS("server_kevent_id", "qos_client_send_complex_msg_with_pri", send_complex_sync_UI_and_enable, "workloop_cb_test_sync_send_and_enable", - QOS_CLASS_USER_INITIATED, "user initiated", - QOS_CLASS_USER_INITIATED, "user initiated", - QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") + QOS_CLASS_USER_INITIATED, "user initiated", + QOS_CLASS_USER_INITIATED, "user initiated", + QOS_CLASS_USER_INTERACTIVE, "user initiated with 47 basepri promotion") /* * Test 12 - 19 @@ -1703,65 +1694,65 @@ TEST_QOS("server_kevent_id", "qos_client_send_complex_msg_with_pri", send_comple * Test single sync ipc link with server that breaks/preserves the link in different ways. */ TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_correct_server", send_sync_link_correct_server_s, "workloop_cb_test_sync_send_reply", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_correct_server", send_sync_link_correct_server_d, "workloop_cb_test_sync_send_deallocate", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_correct_server", send_sync_link_correct_server_sk, "workloop_cb_test_sync_send_reply_kevent", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_correct_server", send_sync_link_correct_server_skp, "workloop_cb_test_sync_send_reply_kevent_pthread", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_incorrect_server", send_sync_link_incorrect_server_ks, "workloop_cb_test_sync_send_kevent_reply", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_correct_server", send_sync_link_correct_server_n, "workloop_cb_test_sync_send_do_nothing", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_incorrect_server", send_sync_link_incorrect_server_kp, "workloop_cb_test_sync_send_do_nothing_kevent_pthread", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_sync_msg_with_link_check_correct_server", send_sync_link_correct_server_e, "workloop_cb_test_sync_send_do_nothing_exit", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") /* * Test 20 - 23 * * Test sequential sync ipc link with server that breaks/preserves the link. */ TEST_QOS("server_kevent_id", "qos_client_send_2sync_msg_with_link_check_correct_server", send_2sync_link_correct_server_sksk, "workloop_cb_test_sync_send_reply_kevent_reply_kevent", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_2sync_msg_with_link_check_incorrect_server", send_2sync_link_incorrect_server_kssk, "workloop_cb_test_sync_send_kevent_reply_reply_kevent", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_2sync_msg_with_link_check_incorrect_server", send_2sync_link_incorrect_server_ksks, "workloop_cb_test_sync_send_kevent_reply_kevent_reply", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") TEST_QOS("server_kevent_id", "qos_client_send_2sync_msg_with_link_check_incorrect_server", send_2sync_link_incorrect_server_skks, "workloop_cb_test_sync_send_reply_kevent_kevent_reply", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default", - QOS_CLASS_DEFAULT, "default") + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default", + QOS_CLASS_DEFAULT, "default") diff --git a/tests/kpc.c b/tests/kpc.c index 52009508a..1f74ada51 100644 --- a/tests/kpc.c +++ b/tests/kpc.c @@ -5,15 +5,15 @@ #include T_DECL(fixed_counters, - "test that fixed counters return monotonically increasing values", - T_META_ASROOT(YES)) + "test that fixed counters return monotonically increasing values", + T_META_ASROOT(YES)) { T_SKIP("unimplemented"); } T_DECL(fixed_thread_counters, - "test that fixed thread counters return monotonically increasing values", - T_META_ASROOT(YES)) + "test that fixed thread counters return monotonically increasing values", + T_META_ASROOT(YES)) { int err; uint32_t ctrs_cnt; @@ -30,9 +30,9 @@ T_DECL(fixed_thread_counters, T_QUIET; T_ASSERT_POSIX_SUCCESS(kpc_force_all_ctrs_set(1), NULL); T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_FIXED_MASK), - "kpc_set_counting"); + "kpc_set_counting"); T_ASSERT_POSIX_SUCCESS(kpc_set_thread_counting(KPC_CLASS_FIXED_MASK), - "kpc_set_thread_counting"); + "kpc_set_thread_counting"); T_SETUPEND; @@ -56,11 +56,11 @@ T_DECL(fixed_thread_counters, for (uint32_t i = 0; i < ctrs_cnt; i++) { T_LOG("checking counter %d with value %" PRIu64 - " > previous value %" PRIu64, i, ctrs_b[i], ctrs_a[i]); + " > previous value %" PRIu64, i, ctrs_b[i], ctrs_a[i]); T_QUIET; T_EXPECT_GT(ctrs_b[i], UINT64_C(0), "counter %d is non-zero", i); T_QUIET; T_EXPECT_LT(ctrs_a[i], ctrs_b[i], - "counter %d is increasing", i); + "counter %d is increasing", i); } free(ctrs_a); diff --git a/tests/kperf.c b/tests/kperf.c index 81e3e4db9..0c6684ae2 100644 --- a/tests/kperf.c +++ b/tests/kperf.c @@ -18,8 +18,9 @@ #include "kperf_helpers.h" T_GLOBAL_META( - T_META_NAMESPACE("xnu.kperf"), - T_META_CHECK_LEAKS(false)); + T_META_NAMESPACE("xnu.kperf"), + T_META_CHECK_LEAKS(false), + T_META_ASROOT(true)); #define MAX_CPUS 64 #define MAX_THREADS 64 @@ -33,7 +34,9 @@ spinning_thread(void *semp) T_ASSERT_NOTNULL(semp, "semaphore passed to thread should not be NULL"); dispatch_semaphore_signal(*(dispatch_semaphore_t *)semp); - while (running_threads); + while (running_threads) { + ; + } return NULL; } @@ -45,7 +48,7 @@ spinning_thread(void *semp) #define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4) #define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \ - MACH_STACK_HANDOFF) + MACH_STACK_HANDOFF) #define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED) #define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE) @@ -69,19 +72,18 @@ reset_ktrace(void) */ T_DECL(ipi_active_cpus, - "make sure that kperf IPIs all active CPUs", - T_META_ASROOT(true)) + "make sure that kperf IPIs all active CPUs") { int ncpus = dt_ncpu(); T_QUIET; T_ASSERT_LT(ncpus, MAX_CPUS, - "only supports up to %d CPUs", MAX_CPUS); + "only supports up to %d CPUs", MAX_CPUS); T_LOG("found %d CPUs", ncpus); int nthreads = ncpus - 1; T_QUIET; T_ASSERT_LT(nthreads, MAX_THREADS, - "only supports up to %d threads", MAX_THREADS); + "only supports up to %d threads", MAX_THREADS); static pthread_t threads[MAX_THREADS]; @@ -98,8 +100,8 @@ T_DECL(ipi_active_cpus, for (int i = 0; i < nthreads; i++) { T_QUIET; T_ASSERT_POSIX_ZERO( - pthread_create(&threads[i], NULL, &spinning_thread, - &thread_spinning), NULL); + pthread_create(&threads[i], NULL, &spinning_thread, + &thread_spinning), NULL); dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER); } @@ -117,10 +119,10 @@ T_DECL(ipi_active_cpus, */ ktrace_events_single(s, DISPATCH_AFTER_EVENT, - ^(__unused struct trace_point *tp) + ^(__unused struct trace_point *tp) { dispatch_after(dispatch_time(DISPATCH_TIME_NOW, - TIMEOUT_SECS * NSEC_PER_SEC), q, ^{ + TIMEOUT_SECS * NSEC_PER_SEC), q, ^{ ktrace_end(s, 0); }); }); @@ -136,17 +138,17 @@ T_DECL(ipi_active_cpus, running_threads = false; for (int i = 0; i < nthreads; i++) { - T_QUIET; - T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL); + T_QUIET; + T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL); } for (int i = 0; i < nidles; i++) { - T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]); + T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]); } T_LOG("saw %" PRIu64 " timer fires, %" PRIu64 " samples, " - "%g samples/fire", nfires, nsamples, - (double)nsamples / (double)nfires); + "%g samples/fire", nfires, nsamples, + (double)nsamples / (double)nfires); T_END; }); @@ -183,8 +185,8 @@ T_DECL(ipi_active_cpus, tids_on_cpu[tp->cpuid] = 0; for (int i = 0; i < nidles; i++) { - if (idle_tids[i] == idle_thread) { - return; + if (idle_tids[i] == idle_thread) { + return; } } @@ -211,18 +213,18 @@ T_DECL(ipi_active_cpus, nexpected = 0; for (int i = 0; i < ncpus; i++) { - uint64_t i_bit = UINT64_C(1) << i; - if (sample_missing & i_bit) { - T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)", - tp->cpuid, tids_snap[i], last_fire_cpu, - xcall_mask, last_expected); - sample_missing &= ~i_bit; + uint64_t i_bit = UINT64_C(1) << i; + if (sample_missing & i_bit) { + T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)", + tp->cpuid, tids_snap[i], last_fire_cpu, + xcall_mask, last_expected); + sample_missing &= ~i_bit; } - if (tids_on_cpu[i] != 0) { - tids_snap[i] = tids_on_cpu[i]; - sample_missing |= i_bit; - nexpected++; + if (tids_on_cpu[i] != 0) { + tids_snap[i] = tids_on_cpu[i]; + sample_missing |= i_bit; + nexpected++; } } @@ -242,7 +244,7 @@ T_DECL(ipi_active_cpus, ktrace_events_single(s, MP_CPUS_CALL, ^(struct trace_point *tp) { if (xcall_from_cpu != (int)tp->cpuid) { - return; + return; } xcall_mask = tp->arg1; @@ -257,8 +259,8 @@ T_DECL(ipi_active_cpus, ktrace_events_single(s, PERF_TMR_HNDLR, ^(struct trace_point *tp) { nsamples++; if ((int)tp->cpuid > ncpus) { - /* skip IOPs; they're not scheduling our threads */ - return; + /* skip IOPs; they're not scheduling our threads */ + return; } sample_missing &= ~(UINT64_C(1) << tp->cpuid); @@ -271,11 +273,11 @@ T_DECL(ipi_active_cpus, (void)kperf_action_count_set(1); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK), - NULL); + NULL); (void)kperf_timer_count_set(1); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, - kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL); + kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); @@ -283,8 +285,8 @@ T_DECL(ipi_active_cpus, T_ATEND(reset_ktrace); T_ASSERT_POSIX_ZERO(ktrace_start(s, - dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), - "start ktrace"); + dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), + "start ktrace"); kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0); @@ -300,12 +302,12 @@ T_DECL(ipi_active_cpus, #define NON_TRIGGER_CODE UINT8_C(0xff) #define NON_TRIGGER_EVENT \ - (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \ - NON_TRIGGER_CODE)) + (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \ + NON_TRIGGER_CODE)) static void expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, - unsigned int n_debugids) + unsigned int n_debugids) { __block int missing_kernel_stacks = 0; __block int missing_user_stacks = 0; @@ -316,37 +318,37 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, T_QUIET; T_ASSERT_NOTNULL(s, NULL); ktrace_events_single(s, PERF_STK_KHDR, ^(struct trace_point *tp) { - missing_kernel_stacks--; - T_LOG("saw kernel stack with %lu frames, flags = %#lx", tp->arg2, - tp->arg1); - }); + missing_kernel_stacks--; + T_LOG("saw kernel stack with %lu frames, flags = %#lx", tp->arg2, + tp->arg1); + }); ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) { - missing_user_stacks--; - T_LOG("saw user stack with %lu frames, flags = %#lx", tp->arg2, - tp->arg1); - }); + missing_user_stacks--; + T_LOG("saw user stack with %lu frames, flags = %#lx", tp->arg2, + tp->arg1); + }); for (unsigned int i = 0; i < n_debugids; i++) { ktrace_events_single(s, debugids[i], ^(struct trace_point *tp) { - missing_kernel_stacks++; - missing_user_stacks++; - T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid); - }); + missing_kernel_stacks++; + missing_user_stacks++; + T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid); + }); } ktrace_events_single(s, NON_TRIGGER_EVENT, - ^(__unused struct trace_point *tp) - { - ktrace_end(s, 0); - }); + ^(__unused struct trace_point *tp) + { + ktrace_end(s, 0); + }); ktrace_set_completion_handler(s, ^{ - T_EXPECT_LE(missing_kernel_stacks, 0, NULL); - T_EXPECT_LE(missing_user_stacks, 0, NULL); + T_EXPECT_LE(missing_kernel_stacks, 0, NULL); + T_EXPECT_LE(missing_user_stacks, 0, NULL); - ktrace_session_destroy(s); - T_END; - }); + ktrace_session_destroy(s); + T_END; + }); /* configure kperf */ @@ -354,14 +356,14 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, (void)kperf_action_count_set(1); T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, - KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL); + KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL); filter = kperf_kdebug_filter_create(); T_ASSERT_NOTNULL(filter, NULL); T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL); T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter, filter_desc), - NULL); + NULL); T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL); kperf_kdebug_filter_destroy(filter); @@ -378,10 +380,10 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT, 0, 0, 0, 0), NULL); dispatch_after(dispatch_time(DISPATCH_TIME_NOW, KDEBUG_TRIGGER_TIMEOUT_NS), - dispatch_get_main_queue(), ^(void) - { - ktrace_end(s, 1); - }); + dispatch_get_main_queue(), ^(void) + { + ktrace_end(s, 1); + }); } #define TRIGGER_CLASS UINT8_C(0xfe) @@ -389,11 +391,10 @@ expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, #define TRIGGER_SUBCLASS UINT8_C(0xff) #define TRIGGER_CODE UINT8_C(0) #define TRIGGER_DEBUGID \ - (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE)) + (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE)) T_DECL(kdebug_trigger_classes, - "test that kdebug trigger samples on classes", - T_META_ASROOT(true)) + "test that kdebug trigger samples on classes") { const uint32_t class_debugids[] = { KDBG_EVENTID(TRIGGER_CLASS, 1, 1), @@ -403,13 +404,12 @@ T_DECL(kdebug_trigger_classes, }; expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids, - sizeof(class_debugids) / sizeof(class_debugids[0])); + sizeof(class_debugids) / sizeof(class_debugids[0])); dispatch_main(); } T_DECL(kdebug_trigger_subclasses, - "test that kdebug trigger samples on subclasses", - T_META_ASROOT(true)) + "test that kdebug trigger samples on subclasses") { const uint32_t subclass_debugids[] = { KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 0), @@ -419,20 +419,19 @@ T_DECL(kdebug_trigger_subclasses, }; expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids, - sizeof(subclass_debugids) / sizeof(subclass_debugids[0])); + sizeof(subclass_debugids) / sizeof(subclass_debugids[0])); dispatch_main(); } T_DECL(kdebug_trigger_debugids, - "test that kdebug trigger samples on debugids", - T_META_ASROOT(true)) + "test that kdebug trigger samples on debugids") { const uint32_t debugids[] = { TRIGGER_DEBUGID }; expect_kdebug_trigger("D0xfeff0000", debugids, - sizeof(debugids) / sizeof(debugids[0])); + sizeof(debugids) / sizeof(debugids[0])); dispatch_main(); } @@ -442,8 +441,7 @@ T_DECL(kdebug_trigger_debugids, */ T_DECL(kdbg_callstacks, - "test that the kdbg_callstacks samples on syscalls", - T_META_ASROOT(true)) + "test that the kdbg_callstacks samples on syscalls") { ktrace_session_t s; __block bool saw_user_stack = false; @@ -454,7 +452,7 @@ T_DECL(kdbg_callstacks, /* * Make sure BSD events are traced in order to trigger samples on syscalls. */ - ktrace_events_class(s, DBG_BSD, ^void(__unused struct trace_point *tp) {}); + ktrace_events_class(s, DBG_BSD, ^void (__unused struct trace_point *tp) {}); ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) { saw_user_stack = true; @@ -465,7 +463,7 @@ T_DECL(kdbg_callstacks, ktrace_session_destroy(s); T_EXPECT_TRUE(saw_user_stack, - "saw user stack after configuring kdbg_callstacks"); + "saw user stack after configuring kdbg_callstacks"); T_END; }); @@ -478,7 +476,7 @@ T_DECL(kdbg_callstacks, T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC), - dispatch_get_main_queue(), ^(void) { + dispatch_get_main_queue(), ^(void) { ktrace_end(s, 1); }); @@ -501,58 +499,80 @@ expect_stacks_traced(void (^cb)(void)) __block unsigned int kernel_stacks = 0; ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) { - user_stacks++; - }); + user_stacks++; + }); ktrace_events_single(s, PERF_STK_KHDR, ^(__unused struct trace_point *tp) { - kernel_stacks++; - }); + kernel_stacks++; + }); ktrace_set_completion_handler(s, ^(void) { - ktrace_session_destroy(s); - T_EXPECT_GT(user_stacks, 0U, NULL); - T_EXPECT_GT(kernel_stacks, 0U, NULL); - cb(); - }); + ktrace_session_destroy(s); + T_EXPECT_GT(user_stacks, 0U, NULL); + T_EXPECT_GT(kernel_stacks, 0U, NULL); + cb(); + }); T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); dispatch_after(dispatch_time(DISPATCH_TIME_NOW, STACKS_WAIT_DURATION_NS), - dispatch_get_main_queue(), ^(void) - { - kperf_reset(); - ktrace_end(s, 0); - }); + dispatch_get_main_queue(), ^(void) + { + kperf_reset(); + ktrace_end(s, 0); + }); } -T_DECL(pet, "test that PET mode samples kernel and user stacks", - T_META_ASROOT(true)) +T_DECL(pet, "test that PET mode samples kernel and user stacks") { configure_kperf_stacks_timer(-1, 10); T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); expect_stacks_traced(^(void) { - T_END; - }); + T_END; + }); dispatch_main(); } T_DECL(lightweight_pet, - "test that lightweight PET mode samples kernel and user stacks", - T_META_ASROOT(true)) + "test that lightweight PET mode samples kernel and user stacks", + T_META_ASROOT(true)) { int set = 1; configure_kperf_stacks_timer(-1, 10); T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL, - &set, sizeof(set)), NULL); + &set, sizeof(set)), NULL); T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); expect_stacks_traced(^(void) { - T_END; - }); + T_END; + }); dispatch_main(); } + +T_DECL(pet_stress, "repeatedly enable and disable PET mode") +{ + int niters = 1000; + while (niters--) { + configure_kperf_stacks_timer(-1, 10); + T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); + usleep(20); + kperf_reset(); + } + ; +} + +T_DECL(timer_stress, "repeatedly enable and disable timers") +{ + int niters = 1000; + while (niters--) { + configure_kperf_stacks_timer(-1, 1); + usleep(20); + kperf_reset(); + } + ; +} diff --git a/tests/kperf_backtracing.c b/tests/kperf_backtracing.c index 1d3d46d08..a586569c3 100644 --- a/tests/kperf_backtracing.c +++ b/tests/kperf_backtracing.c @@ -18,37 +18,37 @@ #define PERF_STK_UDATA UINT32_C(0x25020010) T_GLOBAL_META( - T_META_NAMESPACE("xnu.kperf"), - T_META_CHECK_LEAKS(false)); + T_META_NAMESPACE("xnu.kperf"), + T_META_CHECK_LEAKS(false)); static void expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol, unsigned long addr, unsigned int bt_idx, unsigned int max_frames) { - const char *name; - unsigned int frame_idx = max_frames - bt_idx - 1; - - if (!bt[frame_idx]) { - T_LOG("frame %2u: skipping system frame", frame_idx); - return; - } - - if (CSIsNull(symbol)) { - T_FAIL("invalid symbol for address %#lx at frame %d", addr, frame_idx); - return; - } - - if (frame_idx >= bt_len) { - T_FAIL("unexpected frame '%s' (%#lx) at index %u", - CSSymbolGetName(symbol), addr, frame_idx); - return; - } - - name = CSSymbolGetName(symbol); - T_QUIET; T_ASSERT_NOTNULL(name, NULL); - T_EXPECT_EQ_STR(name, bt[frame_idx], - "frame %2u: saw '%s', expected '%s'", - frame_idx, name, bt[frame_idx]); + const char *name; + unsigned int frame_idx = max_frames - bt_idx - 1; + + if (!bt[frame_idx]) { + T_LOG("frame %2u: skipping system frame", frame_idx); + return; + } + + if (CSIsNull(symbol)) { + T_FAIL("invalid symbol for address %#lx at frame %d", addr, frame_idx); + return; + } + + if (frame_idx >= bt_len) { + T_FAIL("unexpected frame '%s' (%#lx) at index %u", + CSSymbolGetName(symbol), addr, frame_idx); + return; + } + + name = CSSymbolGetName(symbol); + T_QUIET; T_ASSERT_NOTNULL(name, NULL); + T_EXPECT_EQ_STR(name, bt[frame_idx], + "frame %2u: saw '%s', expected '%s'", + frame_idx, name, bt[frame_idx]); } /* @@ -65,100 +65,100 @@ static void expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen, bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by) { - CSSymbolicatorRef symb; - uint32_t hdr_debugid; - uint32_t data_debugid; - __block unsigned int stacks = 0; - __block unsigned int frames = 0; - __block unsigned int hdr_frames = 0; - __block unsigned int allow_larger = allow_larger_by; - - if (kern) { - static CSSymbolicatorRef kern_symb; - static dispatch_once_t kern_symb_once; - - hdr_debugid = PERF_STK_KHDR; - data_debugid = PERF_STK_KDATA; - - dispatch_once(&kern_symb_once, ^(void) { - kern_symb = CSSymbolicatorCreateWithMachKernel(); - T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL); - }); - symb = kern_symb; - } else { - static CSSymbolicatorRef user_symb; - static dispatch_once_t user_symb_once; - - hdr_debugid = PERF_STK_UHDR; - data_debugid = PERF_STK_UDATA; - - dispatch_once(&user_symb_once, ^(void) { - user_symb = CSSymbolicatorCreateWithTask(mach_task_self()); - T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL); - T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL); - }); - symb = user_symb; - } - - ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) { - if (tid != 0 && tid != tp->threadid) { - return; - } - - T_LOG("found stack from thread %#lx", tp->threadid); - stacks++; - if (!(tp->arg1 & 1)) { - T_FAIL("invalid %s stack on thread %#lx", kern ? "kernel" : "user", - tp->threadid); - return; - } - - hdr_frames = (unsigned int)tp->arg2; - /* ignore extra link register or value pointed to by stack pointer */ - hdr_frames -= 1; - - T_QUIET; T_EXPECT_GE(hdr_frames, bt_len, - "number of frames in header"); - T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger, - "number of frames in header"); - if (hdr_frames > bt_len && allow_larger > 0) { - allow_larger = hdr_frames - bt_len; - hdr_frames = bt_len; - } - - T_LOG("%s stack seen", kern ? "kernel" : "user"); - frames = 0; - }); - - ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) { - if (tid != 0 && tid != tp->threadid) { - return; - } - - int i = 0; - - if (frames == 0 && hdr_frames > bt_len) { - /* skip frames near the PC */ - i = (int)allow_larger; - allow_larger -= 4; - } - - for (; i < 4 && frames < hdr_frames; i++, frames++) { - unsigned long addr = (&tp->arg1)[i]; - CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( - symb, addr, kCSNow); - - expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames); - } - - /* saw the end of the user stack */ - if (hdr_frames == frames) { - *stacks_seen += 1; - if (!kern) { - ktrace_end(s, 1); - } - } - }); + CSSymbolicatorRef symb; + uint32_t hdr_debugid; + uint32_t data_debugid; + __block unsigned int stacks = 0; + __block unsigned int frames = 0; + __block unsigned int hdr_frames = 0; + __block unsigned int allow_larger = allow_larger_by; + + if (kern) { + static CSSymbolicatorRef kern_symb; + static dispatch_once_t kern_symb_once; + + hdr_debugid = PERF_STK_KHDR; + data_debugid = PERF_STK_KDATA; + + dispatch_once(&kern_symb_once, ^(void) { + kern_symb = CSSymbolicatorCreateWithMachKernel(); + T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL); + }); + symb = kern_symb; + } else { + static CSSymbolicatorRef user_symb; + static dispatch_once_t user_symb_once; + + hdr_debugid = PERF_STK_UHDR; + data_debugid = PERF_STK_UDATA; + + dispatch_once(&user_symb_once, ^(void) { + user_symb = CSSymbolicatorCreateWithTask(mach_task_self()); + T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL); + T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL); + }); + symb = user_symb; + } + + ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) { + if (tid != 0 && tid != tp->threadid) { + return; + } + + T_LOG("found stack from thread %#lx", tp->threadid); + stacks++; + if (!(tp->arg1 & 1)) { + T_FAIL("invalid %s stack on thread %#lx", kern ? "kernel" : "user", + tp->threadid); + return; + } + + hdr_frames = (unsigned int)tp->arg2; + /* ignore extra link register or value pointed to by stack pointer */ + hdr_frames -= 1; + + T_QUIET; T_EXPECT_GE(hdr_frames, bt_len, + "number of frames in header"); + T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger, + "number of frames in header"); + if (hdr_frames > bt_len && allow_larger > 0) { + allow_larger = hdr_frames - bt_len; + hdr_frames = bt_len; + } + + T_LOG("%s stack seen", kern ? "kernel" : "user"); + frames = 0; + }); + + ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) { + if (tid != 0 && tid != tp->threadid) { + return; + } + + int i = 0; + + if (frames == 0 && hdr_frames > bt_len) { + /* skip frames near the PC */ + i = (int)allow_larger; + allow_larger -= 4; + } + + for (; i < 4 && frames < hdr_frames; i++, frames++) { + unsigned long addr = (&tp->arg1)[i]; + CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( + symb, addr, kCSNow); + + expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames); + } + + /* saw the end of the user stack */ + if (hdr_frames == frames) { + *stacks_seen += 1; + if (!kern) { + ktrace_end(s, 1); + } + } + }); } #define TRIGGERING_DEBUGID (0xfeff0f00) @@ -168,41 +168,45 @@ expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen, * hoisted out of the path to the spin (breaking being able to get a good * backtrace). */ -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_a(dispatch_semaphore_t spinning, unsigned int frames); -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_b(dispatch_semaphore_t spinning, unsigned int frames); -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_a(dispatch_semaphore_t spinning, unsigned int frames) { - if (frames == 0) { - if (spinning) { - dispatch_semaphore_signal(spinning); - for (;;); - } else { - kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0); - return 0; - } - } - - return recurse_b(spinning, frames - 1) + 1; + if (frames == 0) { + if (spinning) { + dispatch_semaphore_signal(spinning); + for (;;) { + ; + } + } else { + kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0); + return 0; + } + } + + return recurse_b(spinning, frames - 1) + 1; } -static int __attribute__((noinline,not_tail_called)) +static int __attribute__((noinline, not_tail_called)) recurse_b(dispatch_semaphore_t spinning, unsigned int frames) { - if (frames == 0) { - if (spinning) { - dispatch_semaphore_signal(spinning); - for (;;); - } else { - kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0); - return 0; - } - } - - return recurse_a(spinning, frames - 1) + 1; + if (frames == 0) { + if (spinning) { + dispatch_semaphore_signal(spinning); + for (;;) { + ; + } + } else { + kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0); + return 0; + } + } + + return recurse_a(spinning, frames - 1) + 1; } #define USER_FRAMES (12) @@ -215,37 +219,37 @@ recurse_b(dispatch_semaphore_t spinning, unsigned int frames) static const char *user_bt[USER_FRAMES] = { #if defined(__x86_64__) - NULL, + NULL, #endif /* defined(__x86_64__) */ - NULL, NULL, - "backtrace_thread", - "recurse_a", "recurse_b", "recurse_a", "recurse_b", - "recurse_a", "recurse_b", "recurse_a", + NULL, NULL, + "backtrace_thread", + "recurse_a", "recurse_b", "recurse_a", "recurse_b", + "recurse_a", "recurse_b", "recurse_a", #if !defined(__x86_64__) - "recurse_b", + "recurse_b", #endif /* !defined(__x86_64__) */ - NULL + NULL }; #if defined(__arm__) #define KERNEL_FRAMES (2) static const char *kernel_bt[KERNEL_FRAMES] = { - "unix_syscall", "kdebug_trace64" + "unix_syscall", "kdebug_trace64" }; #elif defined(__arm64__) #define KERNEL_FRAMES (4) static const char *kernel_bt[KERNEL_FRAMES] = { - "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64" + "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64" }; #elif defined(__x86_64__) #define KERNEL_FRAMES (2) static const char *kernel_bt[KERNEL_FRAMES] = { - "unix_syscall64", "kdebug_trace64" + "unix_syscall64", "kdebug_trace64" }; #else @@ -266,71 +270,71 @@ static dispatch_semaphore_t backtrace_go; static void * backtrace_thread(void *arg) { - dispatch_semaphore_t notify_spinning; - unsigned int calls; - - notify_spinning = (dispatch_semaphore_t)arg; - - dispatch_semaphore_signal(backtrace_started); - if (!notify_spinning) { - dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER); - } - - /* - * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64] - * - * Always make one less call for this frame (backtrace_thread). - */ - calls = USER_FRAMES - RECURSE_START_OFFSET - 1 /* backtrace_thread */; - if (notify_spinning) { - /* - * Spinning doesn't end up calling __kdebug_trace64. - */ - calls -= 1; - } - - T_LOG("backtrace thread calling into %d frames (already at %d frames)", - calls, RECURSE_START_OFFSET); - (void)recurse_a(notify_spinning, calls); - return NULL; + dispatch_semaphore_t notify_spinning; + unsigned int calls; + + notify_spinning = (dispatch_semaphore_t)arg; + + dispatch_semaphore_signal(backtrace_started); + if (!notify_spinning) { + dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER); + } + + /* + * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64] + * + * Always make one less call for this frame (backtrace_thread). + */ + calls = USER_FRAMES - RECURSE_START_OFFSET - 1 /* backtrace_thread */; + if (notify_spinning) { + /* + * Spinning doesn't end up calling __kdebug_trace64. + */ + calls -= 1; + } + + T_LOG("backtrace thread calling into %d frames (already at %d frames)", + calls, RECURSE_START_OFFSET); + (void)recurse_a(notify_spinning, calls); + return NULL; } static uint64_t create_backtrace_thread(dispatch_semaphore_t notify_spinning) { - pthread_t thread = NULL; - uint64_t tid; + pthread_t thread = NULL; + uint64_t tid; - dispatch_once(&backtrace_once, ^{ - backtrace_started = dispatch_semaphore_create(0); - T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL); + dispatch_once(&backtrace_once, ^{ + backtrace_started = dispatch_semaphore_create(0); + T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL); - if (!notify_spinning) { - backtrace_go = dispatch_semaphore_create(0); - T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL); - } - }); + if (!notify_spinning) { + backtrace_go = dispatch_semaphore_create(0); + T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL); + } + }); - T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread, - (void *)notify_spinning), NULL); - T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created"); - dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER); + T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread, + (void *)notify_spinning), NULL); + T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created"); + dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER); - T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL); - T_QUIET; T_ASSERT_NE(tid, UINT64_C(0), - "backtrace thread created does not have ID 0"); + T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL); + T_QUIET; T_ASSERT_NE(tid, UINT64_C(0), + "backtrace thread created does not have ID 0"); - T_LOG("starting thread with ID 0x%" PRIx64, tid); + T_LOG("starting thread with ID 0x%" PRIx64, tid); - return tid; + return tid; } static void start_backtrace_thread(void) { - T_QUIET; T_ASSERT_NOTNULL(backtrace_go, - "thread to backtrace created before starting it"); - dispatch_semaphore_signal(backtrace_go); + T_QUIET; T_ASSERT_NOTNULL(backtrace_go, + "thread to backtrace created before starting it"); + dispatch_semaphore_signal(backtrace_go); } #if TARGET_OS_WATCH @@ -343,104 +347,104 @@ T_DECL(backtraces_kdebug_trigger, "test that backtraces from kdebug trigger are correct", T_META_ASROOT(true)) { - static unsigned int stacks_seen = 0; - ktrace_session_t s; - kperf_kdebug_filter_t filter; - uint64_t tid; - - s = ktrace_session_create(); - T_ASSERT_NOTNULL(s, "ktrace session was created"); - - T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL); - - tid = create_backtrace_thread(NULL); - expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0); - expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 0); - - /* - * The triggering event must be traced (and thus registered with libktrace) - * to get backtraces. - */ - ktrace_events_single(s, TRIGGERING_DEBUGID, - ^(__unused struct trace_point *tp){ }); - - ktrace_set_completion_handler(s, ^(void) { - T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks"); - ktrace_session_destroy(s); - kperf_reset(); - T_END; - }); - - filter = kperf_kdebug_filter_create(); - T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created"); - - T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter, - TRIGGERING_DEBUGID), NULL); - T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL); - (void)kperf_action_count_set(1); - T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, - KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL); - T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL); - kperf_kdebug_filter_destroy(filter); - - T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); - - T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); - - start_backtrace_thread(); - - dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS), - dispatch_get_main_queue(), ^(void) - { - T_LOG("ending test after timeout"); - ktrace_end(s, 0); - }); - - dispatch_main(); + static unsigned int stacks_seen = 0; + ktrace_session_t s; + kperf_kdebug_filter_t filter; + uint64_t tid; + + s = ktrace_session_create(); + T_ASSERT_NOTNULL(s, "ktrace session was created"); + + T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL); + + tid = create_backtrace_thread(NULL); + expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0); + expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 0); + + /* + * The triggering event must be traced (and thus registered with libktrace) + * to get backtraces. + */ + ktrace_events_single(s, TRIGGERING_DEBUGID, + ^(__unused struct trace_point *tp){ }); + + ktrace_set_completion_handler(s, ^(void) { + T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks"); + ktrace_session_destroy(s); + kperf_reset(); + T_END; + }); + + filter = kperf_kdebug_filter_create(); + T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created"); + + T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter, + TRIGGERING_DEBUGID), NULL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL); + (void)kperf_action_count_set(1); + T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, + KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL); + kperf_kdebug_filter_destroy(filter); + + T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); + + T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); + + start_backtrace_thread(); + + dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS), + dispatch_get_main_queue(), ^(void) + { + T_LOG("ending test after timeout"); + ktrace_end(s, 0); + }); + + dispatch_main(); } T_DECL(backtraces_user_timer, "test that user backtraces on a timer are correct", T_META_ASROOT(true)) { - static unsigned int stacks_seen = 0; - ktrace_session_t s; - uint64_t tid; - dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0); + static unsigned int stacks_seen = 0; + ktrace_session_t s; + uint64_t tid; + dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0); - s = ktrace_session_create(); - T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); + s = ktrace_session_create(); + T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); - ktrace_filter_pid(s, getpid()); + ktrace_filter_pid(s, getpid()); - configure_kperf_stacks_timer(getpid(), 10); + configure_kperf_stacks_timer(getpid(), 10); - tid = create_backtrace_thread(wait_for_spinning); - /* potentially calling dispatch function and system call */ - expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2); + tid = create_backtrace_thread(wait_for_spinning); + /* potentially calling dispatch function and system call */ + expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2); - ktrace_set_completion_handler(s, ^(void) { - T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack"); - ktrace_session_destroy(s); - kperf_reset(); - T_END; - }); + ktrace_set_completion_handler(s, ^(void) { + T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack"); + ktrace_session_destroy(s); + kperf_reset(); + T_END; + }); - T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); + T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); - /* wait until the thread that will be backtraced is spinning */ - dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER); + /* wait until the thread that will be backtraced is spinning */ + dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER); - T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); + T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); - dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS), - dispatch_get_main_queue(), ^(void) - { - T_LOG("ending test after timeout"); - ktrace_end(s, 0); - }); + dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS), + dispatch_get_main_queue(), ^(void) + { + T_LOG("ending test after timeout"); + ktrace_end(s, 0); + }); - dispatch_main(); + dispatch_main(); } /* TODO test kernel stacks in all modes */ diff --git a/tests/kperf_helpers.c b/tests/kperf_helpers.c index bf64f6bb8..c6b4d6d5a 100644 --- a/tests/kperf_helpers.c +++ b/tests/kperf_helpers.c @@ -7,19 +7,19 @@ void configure_kperf_stacks_timer(pid_t pid, unsigned int period_ms) { - kperf_reset(); + kperf_reset(); - (void)kperf_action_count_set(1); - (void)kperf_timer_count_set(1); + (void)kperf_action_count_set(1); + (void)kperf_timer_count_set(1); - T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, - KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL); + T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, + KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL); - if (pid != -1) { - T_ASSERT_POSIX_SUCCESS(kperf_action_filter_set_by_pid(1, pid), NULL); - } + if (pid != -1) { + T_ASSERT_POSIX_SUCCESS(kperf_action_filter_set_by_pid(1, pid), NULL); + } - T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); - T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, - kperf_ns_to_ticks(period_ms * NSEC_PER_MSEC)), NULL); + T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); + T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, + kperf_ns_to_ticks(period_ms * NSEC_PER_MSEC)), NULL); } diff --git a/tests/kqueue_add_and_trigger.c b/tests/kqueue_add_and_trigger.c index 15243a789..8dded5ed3 100644 --- a/tests/kqueue_add_and_trigger.c +++ b/tests/kqueue_add_and_trigger.c @@ -16,7 +16,7 @@ T_DECL(kqueue_add_and_trigger_evfilt_user, "Add and trigger EVFILT_USER events w const struct kevent kev = { .ident = 1, .filter = EVFILT_USER, - .flags = EV_ADD|EV_CLEAR, + .flags = EV_ADD | EV_CLEAR, .fflags = NOTE_TRIGGER, }; const struct timespec timeout = { @@ -32,6 +32,4 @@ T_DECL(kqueue_add_and_trigger_evfilt_user, "Add and trigger EVFILT_USER events w T_ASSERT_EQ(ret, 1, "kevent with add and trigger, ret"); T_ASSERT_EQ(ret_kev.ident, 1, "kevent with add and trigger, ident"); T_ASSERT_EQ(ret_kev.filter, EVFILT_USER, "kevent with add and trigger, filter"); - } - diff --git a/tests/kqueue_close.c b/tests/kqueue_close.c index 3682d91e3..5678d3251 100644 --- a/tests/kqueue_close.c +++ b/tests/kqueue_close.c @@ -56,7 +56,7 @@ run_test() pthread_t thread; int rv = pthread_create(&thread, NULL, poll_kqueue, - (void *)(uintptr_t)fd); + (void *)(uintptr_t)fd); T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create"); usleep(timeout_ms * 1000); @@ -69,9 +69,9 @@ run_test() } T_DECL(kqueue_close_race, "Races kqueue close with kqueue process", - T_META_LTEPHASE(LTE_POSTINIT), T_META_TIMEOUT(5)) + T_META_LTEPHASE(LTE_POSTINIT), T_META_TIMEOUT(5)) { - for (uint32_t i = 1 ; i < 100 ; i++) { + for (uint32_t i = 1; i < 100; i++) { run_test(); } } diff --git a/tests/kqueue_fifo_18776047.c b/tests/kqueue_fifo_18776047.c index fe45758fd..d2a285d7c 100644 --- a/tests/kqueue_fifo_18776047.c +++ b/tests/kqueue_fifo_18776047.c @@ -41,10 +41,11 @@ write_some_data(int fd) retval = (int)write(fd, data, (size_t)len); if (retval < 0) { if (errno == EAGAIN) { - if (len == 1) + if (len == 1) { return count; - else + } else { len--; + } } else { T_ASSERT_FAIL("write to fd %d of %s of len %d failed.", fd, data, len); abort(); diff --git a/tests/kqueue_file_tests.c b/tests/kqueue_file_tests.c index dcd2c4793..6293e16c8 100644 --- a/tests/kqueue_file_tests.c +++ b/tests/kqueue_file_tests.c @@ -22,8 +22,8 @@ #include T_GLOBAL_META( - T_META_NAMESPACE("xnu.kevent") - ); + T_META_NAMESPACE("xnu.kevent") + ); #define PDIR "/tmp" #define DIR1 PDIR "/dir1" @@ -32,24 +32,24 @@ T_GLOBAL_META( #define FILE1 PDIR "/file1" #define FILE2 PDIR "/file2" -#define KEY "somekey" -#define VAL "someval" +#define KEY "somekey" +#define VAL "someval" -#define NOSLEEP 0 -#define SLEEP 1 -#define NO_EVENT 0 -#define YES_EVENT 1 +#define NOSLEEP 0 +#define SLEEP 1 +#define NO_EVENT 0 +#define YES_EVENT 1 -#define OUTPUT_LEVEL 0 -#define RESULT_LEVEL 3 +#define OUTPUT_LEVEL 0 +#define RESULT_LEVEL 3 -#define TEST_STRING "Some text!!! Yes indeed, some of that very structure which has passed on man's knowledge for generations." -#define HELLO_WORLD "Hello, World!" -#define USLEEP_TIME 5000 -#define WAIT_TIME (4l) -#define LENGTHEN_SIZE 500 -#define FIFO_SPACE 8192 /* FIFOS have 8K of buffer space */ +#define TEST_STRING "Some text!!! Yes indeed, some of that very structure which has passed on man's knowledge for generations." +#define HELLO_WORLD "Hello, World!" +#define USLEEP_TIME 5000 +#define WAIT_TIME (4l) +#define LENGTHEN_SIZE 500 +#define FIFO_SPACE 8192 /* FIFOS have 8K of buffer space */ /* * These two variables are the non local memory for holding the return @@ -62,17 +62,17 @@ int fifo_read_fd; * Types of actions for setup, cleanup, and execution of tests */ typedef enum {CREAT, MKDIR, READ, WRITE, WRITEFD, FILLFD, UNLINK, LSKEE, RMDIR, MKFIFO, LENGTHEN, TRUNC, - SYMLINK, CHMOD, CHOWN, EXCHANGEDATA, RENAME, LSEEK, OPEN, MMAP, NOTHING, - SETXATTR, UTIMES, STAT, HARDLINK, REVOKE, FUNLOCK} action_id_t; + SYMLINK, CHMOD, CHOWN, EXCHANGEDATA, RENAME, LSEEK, OPEN, MMAP, NOTHING, + SETXATTR, UTIMES, STAT, HARDLINK, REVOKE, FUNLOCK} action_id_t; -/* +/* * Directs an action as mentioned above */ typedef struct _action { - int act_dosleep; - action_id_t act_id; - void *act_args[5]; - int act_fd; + int act_dosleep; + action_id_t act_id; + void *act_args[5]; + int act_fd; } action_t; /* @@ -81,7 +81,7 @@ typedef struct _action { */ typedef struct _test { char *t_testname; - + /* Is this test an expected failure? */ int t_known_failure; @@ -89,39 +89,39 @@ typedef struct _test { int t_nondeterministic; /* Test kevent() or poll() */ - int t_is_poll_test; - + int t_is_poll_test; + /* Actions for setting up test */ - int t_n_prep_actions; + int t_n_prep_actions; action_t t_prep_actions[5]; - + /* Actions for cleaning up test */ - int t_n_cleanup_actions; + int t_n_cleanup_actions; action_t t_cleanup_actions[5]; - + /* Action for thred to take while we wait */ action_t t_helpthreadact; - + /* File to look for event on */ - char *t_watchfile; /* set event ident IN TEST (can't know fd beforehand)*/ - int t_file_is_fifo;/* FIFOs are handled in a special manner */ - + char *t_watchfile; /* set event ident IN TEST (can't know fd beforehand)*/ + int t_file_is_fifo;/* FIFOs are handled in a special manner */ + /* Different parameters for poll() vs kevent() */ - union { - struct kevent tu_kev; - short tu_pollevents; + union { + struct kevent tu_kev; + short tu_pollevents; } t_union; - + /* Do we expect results? */ - int t_want_event; - + int t_want_event; + /* Not always used--how much data should we find (EVFILT_{READ,WRITE}) */ - int t_nbytes; - + int t_nbytes; + /* Hacks for FILT_READ and pipes */ - int t_read_to_end_first; /* Consume all data in file before waiting for event */ - int t_write_some_data; /* Write some data to file before waiting for event (FIFO hack) */ - int t_extra_sleep_hack; /* Sleep before waiting, to let a fifo fill up with data */ + int t_read_to_end_first; /* Consume all data in file before waiting for event */ + int t_write_some_data; /* Write some data to file before waiting for event (FIFO hack) */ + int t_extra_sleep_hack; /* Sleep before waiting, to let a fifo fill up with data */ } test_t; char * @@ -189,75 +189,73 @@ get_action_name(action_id_t a) * Initialize an action struct. Whether to sleep, what action to take, * and arguments for that action. */ -void -init_action(action_t *act, int sleep, action_id_t call, int nargs, ...) +void +init_action(action_t *act, int sleep, action_id_t call, int nargs, ...) { int i; va_list ap; va_start(ap, nargs); act->act_dosleep = sleep; act->act_id = call; - - for (i = 0; i < nargs; i++) - { + + for (i = 0; i < nargs; i++) { act->act_args[i] = va_arg(ap, void*); } - + va_end(ap); - } /* - * Opening a fifo is complicated: need to open both sides at once + * Opening a fifo is complicated: need to open both sides at once */ void * -open_fifo_readside(void *arg) +open_fifo_readside(void *arg) { if ((fifo_read_fd = open((char*)arg, O_RDONLY)) == -1) { T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", arg, errno, strerror(errno)); } - return (&fifo_read_fd); + return &fifo_read_fd; } /* * Open a fifo, setting read and write descriptors. Return 0 for success, -1 for failure. * Only set FD args upon success; they will be unmodified on failure. */ -int -open_fifo(const char *path, int *readfd, int *writefd) +int +open_fifo(const char *path, int *readfd, int *writefd) { pthread_t thread; int waitres; int res; int *tmpreadfd, tmpwritefd; - + fifo_read_fd = -1; res = pthread_create(&thread, 0, open_fifo_readside, (void*)path); if (res == 0) { if ((tmpwritefd = open(path, O_WRONLY)) == -1) { T_LOG("open(%s, O_WRONLY) failed: %d (%s)\n", path, errno, strerror(errno)); - return (-1); + return -1; } waitres = pthread_join(thread, (void**) &tmpreadfd); - + fcntl(tmpwritefd, F_SETFL, O_WRONLY | O_NONBLOCK); - + if ((waitres == 0) && (tmpwritefd >= 0) && (*tmpreadfd >= 0)) { *readfd = *tmpreadfd; *writefd = tmpwritefd; } else { - res = -1; + res = -1; } } - + return res; } /* * Just concatenate a directory and a filename, sticking a "/" betwixt them */ -void -makepath(char *buf, const char *dir, const char *file) +void +makepath(char *buf, const char *dir, const char *file) { strcpy(buf, dir); strcat(buf, "/"); @@ -267,8 +265,8 @@ makepath(char *buf, const char *dir, const char *file) /* Execute a prep, cleanup, or test action; specific tricky notes below. * - * CREAT: comes to life and given length 1 - * READ: try to read one char + * CREAT: comes to life and given length 1 + * READ: try to read one char * WRITE: try to write TEST_STRING to file * LENGTHEN: make longer by LENGTHEN_SIZE * MMAP: mmap first 20 bytes of file, write HELLO_WORLD in @@ -278,8 +276,8 @@ makepath(char *buf, const char *dir, const char *file) * * * Several of these have hard-coded sizes. */ -void* -execute_action(void *actionptr) +void* +execute_action(void *actionptr) { action_t *act = (action_t*)actionptr; void **args = act->act_args; @@ -289,186 +287,190 @@ execute_action(void *actionptr) void *addr; struct timeval tv; struct stat sstat; - + T_LOG("Beginning action of type %d: %s\n", act->act_id, get_action_name(act->act_id)); - + /* Let other thread get into kevent() sleep */ - if(SLEEP == act->act_dosleep) { + if (SLEEP == act->act_dosleep) { usleep(USLEEP_TIME); } - switch(act->act_id) { - case NOTHING: - res = 0; - break; - case CREAT: - if ((tmpfd = creat((char*)args[0], 0755)) == -1) { - T_LOG("creat() failed on \"%s\": %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - ftruncate(tmpfd, 1); /* So that mmap() doesn't fool us */ - close(tmpfd); - res = 0; - break; - case MKDIR: - res = mkdir((char*)args[0], 0755); - break; - case READ: - if ((tmpfd = open((char*)args[0], O_RDONLY)) == -1) { - T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - res = read(tmpfd, &c, 1); - res = (res == 1 ? 0 : -1); - close(tmpfd); + switch (act->act_id) { + case NOTHING: + res = 0; + break; + case CREAT: + if ((tmpfd = creat((char*)args[0], 0755)) == -1) { + T_LOG("creat() failed on \"%s\": %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case WRITE: - if ((tmpfd = open((char*)args[0], O_RDWR)) == -1) { - T_LOG("open(%s, O_RDWR) failed: %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - res = write(tmpfd, TEST_STRING, strlen(TEST_STRING)); - if (res == strlen(TEST_STRING)) { - res = 0; - } else { - res = -1; - } - close(tmpfd); + } + ftruncate(tmpfd, 1); /* So that mmap() doesn't fool us */ + close(tmpfd); + res = 0; + break; + case MKDIR: + res = mkdir((char*)args[0], 0755); + break; + case READ: + if ((tmpfd = open((char*)args[0], O_RDONLY)) == -1) { + T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case WRITEFD: - res = write((int)act->act_fd, TEST_STRING, strlen(TEST_STRING)); - if (res == strlen(TEST_STRING)) { - res = 0; - } else { - res = -1; - } + } + res = read(tmpfd, &c, 1); + res = (res == 1 ? 0 : -1); + close(tmpfd); + break; + case WRITE: + if ((tmpfd = open((char*)args[0], O_RDWR)) == -1) { + T_LOG("open(%s, O_RDWR) failed: %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case FILLFD: - while (write((int)act->act_fd, "a", 1) > 0); + } + res = write(tmpfd, TEST_STRING, strlen(TEST_STRING)); + if (res == strlen(TEST_STRING)) { res = 0; + } else { + res = -1; + } + close(tmpfd); + break; + case WRITEFD: + res = write((int)act->act_fd, TEST_STRING, strlen(TEST_STRING)); + if (res == strlen(TEST_STRING)) { + res = 0; + } else { + res = -1; + } + break; + case FILLFD: + while (write((int)act->act_fd, "a", 1) > 0) { + ; + } + res = 0; + break; + case UNLINK: + res = unlink((char*)args[0]); + break; + case LSEEK: + res = lseek((int)act->act_fd, (int)args[0], SEEK_SET); + res = (res == (int)args[0] ? 0 : -1); + break; + case RMDIR: + res = rmdir((char*)args[0]); + break; + case MKFIFO: + res = mkfifo((char*)args[0], 0755); + break; + case LENGTHEN: + res = truncate((char*)args[0], LENGTHEN_SIZE); + break; + case TRUNC: + res = truncate((char*)args[0], 0); + break; + case SYMLINK: + res = symlink((char*)args[0], (char*)args[1]); + break; + case CHMOD: + res = chmod((char*)args[0], (int)args[1]); + break; + case CHOWN: + /* path, uid, gid */ + res = chown((char*)args[0], (int) args[1], (int) args[2]); + break; + case EXCHANGEDATA: + res = exchangedata((char*)args[0], (char*)args[1], 0); + break; + case RENAME: + res = rename((char*)args[0], (char*)args[1]); + break; + case OPEN: + if ((tmpfd = open((char*)args[0], O_RDONLY | O_CREAT)) == -1) { + T_LOG("open(%s, O_RDONLY | O_CREAT) failed: %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case UNLINK: - res = unlink((char*)args[0]); - break; - case LSEEK: - res = lseek((int)act->act_fd, (int)args[0], SEEK_SET); - res = (res == (int)args[0] ? 0 : -1); - break; - case RMDIR: - res = rmdir((char*)args[0]); - break; - case MKFIFO: - res = mkfifo((char*)args[0], 0755); - break; - case LENGTHEN: - res = truncate((char*)args[0], LENGTHEN_SIZE); - break; - case TRUNC: - res = truncate((char*)args[0], 0); - break; - case SYMLINK: - res = symlink((char*)args[0], (char*)args[1]); - break; - case CHMOD: - res = chmod((char*)args[0], (int)args[1]); - break; - case CHOWN: - /* path, uid, gid */ - res = chown((char*)args[0], (int) args[1], (int) args[2]); - break; - case EXCHANGEDATA: - res = exchangedata((char*)args[0], (char*)args[1], 0); - break; - case RENAME: - res = rename((char*)args[0], (char*)args[1]); - break; - case OPEN: - if ((tmpfd = open((char*)args[0], O_RDONLY | O_CREAT)) == -1) { - T_LOG("open(%s, O_RDONLY | O_CREAT) failed: %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - res = close(tmpfd); + } + res = close(tmpfd); + break; + case MMAP: + /* It had best already exist with nonzero size */ + if ((tmpfd = open((char*)args[0], O_RDWR)) == -1) { + T_LOG("open(%s, O_RDWR) failed: %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case MMAP: - /* It had best already exist with nonzero size */ - if ((tmpfd = open((char*)args[0], O_RDWR)) == -1) { - T_LOG("open(%s, O_RDWR) failed: %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - addr = mmap(0, 20, PROT_WRITE | PROT_READ, MAP_FILE | MAP_SHARED, tmpfd, 0); - if (addr != ((void*)-1)) { - res = 0; - if ((int)args[1]) { - strcpy((char*)addr, HELLO_WORLD); - msync(addr, 20, MS_SYNC); - } + } + addr = mmap(0, 20, PROT_WRITE | PROT_READ, MAP_FILE | MAP_SHARED, tmpfd, 0); + if (addr != ((void*)-1)) { + res = 0; + if ((int)args[1]) { + strcpy((char*)addr, HELLO_WORLD); + msync(addr, 20, MS_SYNC); } - close(tmpfd); - munmap(addr, 20); - break; - case SETXATTR: - res = setxattr((char*)args[0], KEY, (void*)VAL, strlen(VAL), - 0, 0); - break; - case UTIMES: - tv.tv_sec = time(NULL); - tv.tv_usec = 0; - res = utimes((char*)args[0], &tv); - break; - case STAT: - res = lstat((char*)args[0], &sstat); + } + close(tmpfd); + munmap(addr, 20); + break; + case SETXATTR: + res = setxattr((char*)args[0], KEY, (void*)VAL, strlen(VAL), + 0, 0); + break; + case UTIMES: + tv.tv_sec = time(NULL); + tv.tv_usec = 0; + res = utimes((char*)args[0], &tv); + break; + case STAT: + res = lstat((char*)args[0], &sstat); + break; + case HARDLINK: + res = link((char*)args[0], (char*)args[1]); + break; + case REVOKE: + if ((tmpfd = open((char*)args[0], O_RDONLY)) == -1) { + T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case HARDLINK: - res = link((char*)args[0], (char*)args[1]); + } + res = revoke((char*)args[0]); + close(tmpfd); + break; + case FUNLOCK: + if ((tmpfd = open((char*)args[0], O_RDONLY)) == -1) { + T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", args[0], errno, strerror(errno)); + res = -1; break; - case REVOKE: - if ((tmpfd = open((char*)args[0], O_RDONLY)) == -1) { - T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - res = revoke((char*)args[0]); + } + if ((res = flock(tmpfd, LOCK_EX)) == -1) { + T_LOG("flock() LOCK_EX failed: %d (%s)\n", errno, strerror(errno)); close(tmpfd); break; - case FUNLOCK: - if ((tmpfd = open((char*)args[0], O_RDONLY)) == -1) { - T_LOG("open(%s, O_RDONLY) failed: %d (%s)\n", args[0], errno, strerror(errno)); - res = -1; - break; - } - if ((res = flock(tmpfd, LOCK_EX)) == -1) { - T_LOG("flock() LOCK_EX failed: %d (%s)\n", errno, strerror(errno)); - close(tmpfd); - break; - } - if ((res = flock(tmpfd, LOCK_UN)) == -1) { - T_LOG("flock() LOCK_UN failed: %d (%s)\n", errno, strerror(errno)); - close(tmpfd); - break; - } + } + if ((res = flock(tmpfd, LOCK_UN)) == -1) { + T_LOG("flock() LOCK_UN failed: %d (%s)\n", errno, strerror(errno)); close(tmpfd); break; - default: - res = -1; - break; + } + close(tmpfd); + break; + default: + res = -1; + break; } thread_status = res; - return (&thread_status); + return &thread_status; } /* * Read until the end of a file, for EVFILT_READ purposes (considers file position) */ -void -read_to_end(int fd) +void +read_to_end(int fd) { char buf[50]; - while (read(fd, buf, sizeof(buf)) > 0); + while (read(fd, buf, sizeof(buf)) > 0) { + ; + } } /* @@ -476,15 +478,15 @@ read_to_end(int fd) * of actions. "failout" parameter indicates whether to stop if one fails. */ int -execute_action_list(action_t *actions, int nactions, int failout) +execute_action_list(action_t *actions, int nactions, int failout) { int i, res; for (i = 0, res = 0; (0 == res || (!failout)) && (i < nactions); i++) { T_LOG("Starting prep action %d\n", i); res = *((int *) execute_action(&(actions[i]))); - if(res != 0) { + if (res != 0) { T_LOG("Action list failed on step %d. res = %d errno = %d (%s)\n", i, res, - errno, strerror(errno)); + errno, strerror(errno)); } else { T_LOG("Action list work succeeded on step %d.\n", i); } @@ -507,13 +509,13 @@ execute_test(test_t *test) int *status; memset(&evlist, 0, sizeof(evlist)); - + T_LOG("[BEGIN] %s\n", test->t_testname); T_LOG(test->t_want_event ? "Expecting an event.\n" : "Not expecting events.\n"); - + res = execute_action_list(test->t_prep_actions, test->t_n_prep_actions, 1); - + /* If prep succeeded */ if (0 == res) { /* Create kqueue for kqueue tests*/ @@ -522,9 +524,8 @@ execute_test(test_t *test) T_LOG("kqueue() failed: %d (%s)\n", errno, strerror(errno)); } } - + if ((test->t_is_poll_test) || kqfd >= 0) { - /* Open the file we're to monitor. Fifos get special handling */ if (test->t_file_is_fifo) { filefd = -1; @@ -532,19 +533,19 @@ execute_test(test_t *test) } else { if ((filefd = open(test->t_watchfile, O_RDONLY | O_SYMLINK)) == -1) { T_LOG("open() of watchfile %s failed: %d (%s)\n", test->t_watchfile, - errno, strerror(errno)); + errno, strerror(errno)); } } - + if (filefd >= 0) { T_LOG("Opened file to monitor.\n"); - - /* - * Fill in the fd to monitor once you know it + + /* + * Fill in the fd to monitor once you know it * If it's a fifo test, then the helper is definitely going to want the write end. */ test->t_helpthreadact.act_fd = (writefd >= 0 ? writefd : filefd); - + if (test->t_read_to_end_first) { read_to_end(filefd); } else if (test->t_write_some_data) { @@ -553,18 +554,18 @@ execute_test(test_t *test) dowr.act_fd = writefd; (void)execute_action(&dowr); } - + /* Helper modifies the file that we're listening on (sleeps first, in general) */ thread_status = 0; res = pthread_create(&thr, NULL, execute_action, (void*) &test->t_helpthreadact); if (0 == res) { T_LOG("Created helper thread.\n"); - + /* This is ugly business to hack on filling up a FIFO */ if (test->t_extra_sleep_hack) { usleep(USLEEP_TIME); } - + if (test->t_is_poll_test) { struct pollfd pl; pl.fd = filefd; @@ -576,11 +577,11 @@ execute_test(test_t *test) res = cnt; } } else { - test->t_union.tu_kev.ident = filefd; - cnt = kevent(kqfd, &test->t_union.tu_kev, 1, &evlist, 1, &ts); + test->t_union.tu_kev.ident = filefd; + cnt = kevent(kqfd, &test->t_union.tu_kev, 1, &evlist, 1, &ts); T_LOG("Finished kevent() call.\n"); - - if ((cnt < 0) || (evlist.flags & EV_ERROR)) { + + if ((cnt < 0) || (evlist.flags & EV_ERROR)) { T_LOG("kevent() call failed.\n"); if (cnt < 0) { T_LOG("error is in errno, %s\n", strerror(errno)); @@ -590,13 +591,13 @@ execute_test(test_t *test) res = cnt; } } - + /* Success only if you've succeeded to this point AND joined AND other thread is happy*/ status = NULL; res2 = pthread_join(thr, (void **)&status); if (res2 != 0) { T_LOG("Couldn't join helper thread: %d (%s).\n", res2, - strerror(res2)); + strerror(res2)); } else if (*status) { T_LOG("Helper action had result %d\n", *status); } @@ -604,7 +605,7 @@ execute_test(test_t *test) } else { T_LOG("Couldn't start thread: %d (%s).\n", res, strerror(res)); } - + close(filefd); if (test->t_file_is_fifo) { close(writefd); @@ -621,24 +622,23 @@ execute_test(test_t *test) res = -1; } } - + /* Cleanup work */ execute_action_list(test->t_cleanup_actions, test->t_n_cleanup_actions, 0); - + /* Success if nothing failed and we either received or did not receive event, - * as expected + * as expected */ if (0 == res) { T_LOG(cnt > 0 ? "Got an event.\n" : "Did not get an event.\n"); if (((cnt > 0) && (test->t_want_event)) || ((cnt == 0) && (!test->t_want_event))) { if ((!test->t_is_poll_test) && (test->t_union.tu_kev.filter == EVFILT_READ || test->t_union.tu_kev.filter == EVFILT_WRITE) - && (test->t_nbytes) && (test->t_nbytes != evlist.data)) { + && (test->t_nbytes) && (test->t_nbytes != evlist.data)) { T_LOG("Read wrong number of bytes available. Wanted %d, got %d\n", test->t_nbytes, evlist.data); retval = -1; } else { retval = 0; } - } else { T_LOG("Got unexpected event or lack thereof.\n"); retval = -1; @@ -650,7 +650,7 @@ execute_test(test_t *test) if (test->t_nondeterministic) { T_LOG("XXX non-deterministic test result = %d (%s)\n", retval, - (retval == 0) ? "pass" : "fail"); + (retval == 0) ? "pass" : "fail"); T_MAYFAIL; } else { if (test->t_known_failure) { @@ -666,7 +666,7 @@ execute_test(test_t *test) } T_LOG("Test %s done with result %d.\n", test->t_testname, retval); - return (retval); + return retval; } @@ -682,7 +682,7 @@ init_test_common(test_t *tst, char *testname, char *watchfile, int nprep, int nc tst->t_n_prep_actions = nprep; tst->t_n_cleanup_actions = nclean; tst->t_want_event = (want > 0); - + if (ispoll) { tst->t_is_poll_test = 1; tst->t_union.tu_pollevents = (short)event; @@ -703,9 +703,9 @@ init_test_common(test_t *tst, char *testname, char *watchfile, int nprep, int nc * * "want" does double duty as whether you want an event and how many bytes you might want to read * "event" is either an event flag (e.g. NOTE_WRITE) or EVFILT_READ - */ -void -init_test(test_t *tst, char *testname, char *watchfile, int nprep, int nclean, int event, int want) + */ +void +init_test(test_t *tst, char *testname, char *watchfile, int nprep, int nclean, int event, int want) { init_test_common(tst, testname, watchfile, nprep, nclean, event, want, 0); } @@ -714,28 +714,28 @@ init_test(test_t *tst, char *testname, char *watchfile, int nprep, int nclean, i * Same as above, but for a poll() test */ void -init_poll_test(test_t *tst, char *testname, char *watchfile, int nprep, int nclean, int event, int want) +init_poll_test(test_t *tst, char *testname, char *watchfile, int nprep, int nclean, int event, int want) { init_test_common(tst, testname, watchfile, nprep, nclean, event, want, 1); } -void -run_note_delete_tests() +void +run_note_delete_tests() { test_t test; - + init_test(&test, "1.1.2: unlink a file", FILE1, 1, 0, NOTE_DELETE, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "1.1.3: rmdir a dir", DIR1, 1, 0, NOTE_DELETE, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + init_test(&test, "1.1.4: rename one file over another", FILE2, 2, 1, NOTE_DELETE, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); @@ -743,7 +743,7 @@ run_note_delete_tests() init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + init_test(&test, "1.1.5: rename one dir over another", DIR2, 2, 1, NOTE_DELETE, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); @@ -751,14 +751,14 @@ run_note_delete_tests() init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, NULL); execute_test(&test); - + /* Do FIFO stuff here */ init_test(&test, "1.1.6: make a fifo, unlink it", FILE1, 1, 0, NOTE_DELETE, YES_EVENT); test.t_file_is_fifo = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 1, (void*)FILE1); execute_test(&test); - + init_test(&test, "1.1.7: rename a file over a fifo", FILE1, 2, 1, NOTE_DELETE, YES_EVENT); test.t_nondeterministic = 1; test.t_file_is_fifo = 1; @@ -767,7 +767,7 @@ run_note_delete_tests() init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE2, (void*)FILE1); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "1.1.8: unlink a symlink to a file", FILE2, 2, 1, NOTE_DELETE, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); @@ -775,34 +775,34 @@ run_note_delete_tests() init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)FILE2, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + /* ================= */ - + init_test(&test, "1.2.1: Straight-up rename file", FILE1, 1, 1, NOTE_DELETE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); + init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, (void*)NULL); execute_test(&test); - + init_test(&test, "1.2.2: Straight-up rename dir", DIR1, 1, 1, NOTE_DELETE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); + init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, (void*)NULL); execute_test(&test); - + init_test(&test, "1.2.3: Null action on file", FILE1, 1, 1, NOTE_DELETE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, NOTHING, 2, NULL, NULL); /* The null action */ init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + init_test(&test, "1.2.4: Rename one file over another: watch the file that lives", FILE1, 2, 1, NOTE_DELETE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, CREAT, 2, (void*)FILE2, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + init_test(&test, "1.2.5: Rename one dir over another, watch the dir that lives", DIR1, 2, 1, NOTE_DELETE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, MKDIR, 2, (void*)DIR2, (void*)NULL); @@ -815,24 +815,24 @@ path_on_apfs(const char *path) { struct statfs sfs = {}; T_QUIET; T_ASSERT_POSIX_SUCCESS(statfs(path, &sfs), NULL); - return (memcmp(&sfs.f_fstypename[0], "apfs", strlen("apfs")) == 0); + return memcmp(&sfs.f_fstypename[0], "apfs", strlen("apfs")) == 0; } -void +void run_note_write_tests() { char pathbuf[50]; char otherpathbuf[50]; - + test_t test; - + init_test(&test, "2.1.1: Straight-up write to a file", FILE1, 1, 1, NOTE_WRITE, YES_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, WRITE, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - - + + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.2: creat() file inside a dir", DIR1, 1, 2, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -841,7 +841,7 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.3: open() file inside a dir", DIR1, 1, 2, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -850,7 +850,7 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.4: unlink a file from a dir", DIR1, 2, 1, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -859,7 +859,7 @@ run_note_write_tests() init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); makepath(otherpathbuf, DIR1, FILE2); init_test(&test, "2.1.5: rename a file in a dir", DIR1, 2, 2, NOTE_WRITE, YES_EVENT); @@ -870,7 +870,7 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)otherpathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.6: rename a file to outside of a dir", DIR1, 2, 2, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -880,7 +880,7 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.7: rename a file into a dir", DIR1, 2, 2, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -890,7 +890,7 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.9: unlink a fifo from a dir", DIR1, 2, 1, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -899,7 +899,7 @@ run_note_write_tests() init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.10: make symlink in a dir", DIR1, 1, 2, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -908,7 +908,7 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + init_test(&test, "2.1.12: write to a FIFO", FILE1, 1, 1, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 2, (void*)FILE1, (void*)NULL); @@ -916,8 +916,8 @@ run_note_write_tests() init_action(&test.t_helpthreadact, SLEEP, WRITEFD, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - - + + makepath(pathbuf, DIR1, FILE1); init_test(&test, "2.1.13: delete a symlink in a dir", DIR1, 2, 1, NOTE_WRITE, YES_EVENT); test.t_known_failure = 1; @@ -949,59 +949,59 @@ run_note_write_tests() init_action(&test.t_helpthreadact, SLEEP, MMAP, 2, (void*)FILE1, (void*)1); /* 1 -> "modify it"*/ init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + /*================= no-event tests ==================*/ init_test(&test, "2.2.1: just open and close existing file", FILE1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, OPEN, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + init_test(&test, "2.2.2: read from existing file", FILE1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, READ, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + init_test(&test, "2.2.3: rename existing file", FILE1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, (void*)NULL); execute_test(&test); - + init_test(&test, "2.2.4: just open and close dir", DIR1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, OPEN, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + /* There are no tests 2.2.5 or 2.2.6 */ - + init_test(&test, "2.2.7: rename a dir", DIR1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, (void*)NULL); execute_test(&test); - + init_test(&test, "2.2.8: rename a fifo", FILE1, 1, 1, NOTE_WRITE, NO_EVENT); test.t_file_is_fifo = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, (void*)NULL); execute_test(&test); - + init_test(&test, "2.2.9: unlink a fifo", FILE1, 1, 0, NOTE_WRITE, NO_EVENT); test.t_file_is_fifo = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, UNLINK,1, (void*)FILE1); + init_action(&test.t_helpthreadact, SLEEP, UNLINK, 1, (void*)FILE1); execute_test(&test); - + init_test(&test, "2.2.10: chmod a file", FILE1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, CHMOD, 2, (void*)FILE1, (void*)0700); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + struct passwd *pwd = getpwnam("local"); if (pwd != NULL) { @@ -1012,13 +1012,13 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); } - + init_test(&test, "2.2.12: chmod a dir", DIR1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, CHMOD, 2, (void*)DIR1, (void*)0700); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + if (pwd != NULL) { init_test(&test, "2.2.13: chown a dir", DIR1, 2, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); @@ -1027,11 +1027,11 @@ run_note_write_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); } - + T_LOG("MMAP will never give a notification on HFS.\n"); init_test(&test, "2.1.14: mmap() a file but do not change it", FILE1, 1, 1, NOTE_WRITE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, MMAP, 2, (void*)FILE1, (void*)0); + init_action(&test.t_helpthreadact, SLEEP, MMAP, 2, (void*)FILE1, (void*)0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); } @@ -1041,77 +1041,77 @@ run_note_extend_tests() { test_t test; char pathbuf[50]; - + T_LOG("THESE TESTS MAY FAIL ON HFS\n"); - + init_test(&test, "3.1.1: write beyond the end of a file", FILE1, 1, 1, NOTE_EXTEND, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, WRITE, 2, (void*)FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, WRITE, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + /* - * We won't concern ourselves with lengthening directories: commenting these out + * We won't concern ourselves with lengthening directories: commenting these out + * * - - makepath(pathbuf, DIR1, FILE1); - init_test(&test, "3.1.2: add a file to a directory with creat()", DIR1, 1, 2, NOTE_EXTEND, YES_EVENT); - init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); - execute_test(&test); - - makepath(pathbuf, DIR1, FILE1); - init_test(&test, "3.1.3: add a file to a directory with open()", DIR1, 1, 2, NOTE_EXTEND, YES_EVENT); - init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); - execute_test(&test); - - makepath(pathbuf, DIR1, FILE1); - init_test(&test, "3.1.4: add a file to a directory with rename()", DIR1, 2, 2, NOTE_EXTEND, YES_EVENT); - init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&(test.t_prep_actions[1]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)pathbuf); - init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); - execute_test(&test); + * makepath(pathbuf, DIR1, FILE1); + * init_test(&test, "3.1.2: add a file to a directory with creat()", DIR1, 1, 2, NOTE_EXTEND, YES_EVENT); + * init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); + * init_action(&test.t_helpthreadact, SLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); + * execute_test(&test); + * + * makepath(pathbuf, DIR1, FILE1); + * init_test(&test, "3.1.3: add a file to a directory with open()", DIR1, 1, 2, NOTE_EXTEND, YES_EVENT); + * init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); + * init_action(&test.t_helpthreadact, SLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); + * execute_test(&test); + * + * makepath(pathbuf, DIR1, FILE1); + * init_test(&test, "3.1.4: add a file to a directory with rename()", DIR1, 2, 2, NOTE_EXTEND, YES_EVENT); + * init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); + * init_action(&(test.t_prep_actions[1]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); + * init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)pathbuf); + * init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); + * execute_test(&test); */ - + /* 3.1.5: a placeholder for a potential kernel test */ /* - makepath(pathbuf, DIR1, DIR2); - init_test(&test, "3.1.6: add a file to a directory with mkdir()", DIR1, 1, 2, NOTE_EXTEND, YES_EVENT); - init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, MKDIR, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); - execute_test(&test); + * makepath(pathbuf, DIR1, DIR2); + * init_test(&test, "3.1.6: add a file to a directory with mkdir()", DIR1, 1, 2, NOTE_EXTEND, YES_EVENT); + * init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); + * init_action(&test.t_helpthreadact, SLEEP, MKDIR, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)pathbuf, (void*)NULL); + * init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); + * execute_test(&test); */ init_test(&test, "3.1.7: lengthen a file with truncate()", FILE1, 1, 1, NOTE_EXTEND, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, LENGTHEN, 2, FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, LENGTHEN, 2, FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - - + + /** ========== NO EVENT SECTION ============== **/ init_test(&test, "3.2.1: setxattr() a file", FILE1, 1, 1, NOTE_EXTEND, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, SETXATTR, 2, FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, SETXATTR, 2, FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + init_test(&test, "3.2.2: chmod a file", FILE1, 1, 1, NOTE_EXTEND, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, CHMOD, 2, (void*)FILE1, (void*)0700); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + struct passwd *pwd = getpwnam("local"); if (pwd != NULL) { init_test(&test, "3.2.3: chown a file", FILE1, 2, 1, NOTE_EXTEND, NO_EVENT); @@ -1123,13 +1123,13 @@ run_note_extend_tests() } else { T_LOG("Couldn't getpwnam for user \"local\"\n"); } - + init_test(&test, "3.2.4: chmod a dir", DIR1, 1, 1, NOTE_EXTEND, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, CHMOD, 2, (void*)DIR1, (void*)0700); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + if (pwd != NULL) { init_test(&test, "3.2.5: chown a dir", DIR1, 2, 1, NOTE_EXTEND, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); @@ -1138,10 +1138,10 @@ run_note_extend_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); } - + init_test(&test, "3.2.6: TRUNC a file with truncate()", FILE1, 1, 1, NOTE_EXTEND, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, TRUNC, 2, FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, TRUNC, 2, FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); } @@ -1151,14 +1151,14 @@ run_note_attrib_tests() { test_t test; char pathbuf[50]; - + init_test(&test, "4.1.1: chmod a file", FILE1, 1, 1, NOTE_ATTRIB, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, CHMOD, 2, FILE1, (void*)0700); + init_action(&test.t_helpthreadact, SLEEP, CHMOD, 2, FILE1, (void*)0700); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + struct passwd *pwd = getpwnam("local"); if (pwd != NULL) { init_test(&test, "4.1.2: chown a file", FILE1, 2, 1, NOTE_ATTRIB, YES_EVENT); @@ -1174,7 +1174,7 @@ run_note_attrib_tests() init_action(&(test.t_helpthreadact), SLEEP, CHMOD, 2, (void*)DIR1, (void*)0700); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + if (pwd != NULL) { init_test(&test, "4.1.4: chown a dir", DIR1, 2, 1, NOTE_ATTRIB, YES_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); @@ -1183,18 +1183,18 @@ run_note_attrib_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); } - + init_test(&test, "4.1.5: setxattr on a file", FILE1, 1, 1, NOTE_ATTRIB, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, SETXATTR, 2, (void*)FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, SETXATTR, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.1.6: setxattr on a dir", DIR1, 1, 1, NOTE_ATTRIB, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, SETXATTR, 2, (void*)DIR1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, SETXATTR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); @@ -1203,7 +1203,7 @@ run_note_attrib_tests() init_test(&test, "4.1.7: exchangedata", FILE1, 2, 2, NOTE_ATTRIB, YES_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, CREAT, 2, (void*)FILE2, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, EXCHANGEDATA, 2, (void*)FILE1, (void*)FILE2); + init_action(&test.t_helpthreadact, SLEEP, EXCHANGEDATA, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, UNLINK, 2, (void*)FILE2, (void*)NULL); execute_test(&test); @@ -1212,130 +1212,128 @@ run_note_attrib_tests() init_test(&test, "4.1.8: utimes on a file", FILE1, 1, 1, NOTE_ATTRIB, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, UTIMES, 2, (void*)FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, UTIMES, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.1.9: utimes on a dir", DIR1, 1, 1, NOTE_ATTRIB, YES_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, UTIMES, 2, (void*)DIR1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, UTIMES, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - - + + /* ====== NO EVENT TESTS ========== */ - + init_test(&test, "4.2.1: rename a file", FILE1, 1, 1, NOTE_ATTRIB, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + init_test(&test, "4.2.2: open (do not change) a file", FILE1, 1, 1, NOTE_ATTRIB, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, OPEN, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "4.2.3: stat a file", FILE1, 1, 1, NOTE_ATTRIB, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, STAT, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "4.2.4: unlink a file", FILE1, 1, 0, NOTE_ATTRIB, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "4.2.5: write to a file", FILE1, 1, 1, NOTE_ATTRIB, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, WRITE, 2, (void*)FILE1, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, WRITE, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - + T_LOG("EXPECT SPURIOUS NOTE_ATTRIB EVENTS FROM DIRECTORY OPERATIONS on HFS.\n"); init_test(&test, "4.2.6: add a file to a directory with creat()", DIR1, 1, 2, NOTE_ATTRIB, NO_EVENT); test.t_known_failure = 1; makepath(pathbuf, DIR1, FILE1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.2.7: mkdir in a dir", DIR1, 1, 2, NOTE_ATTRIB, NO_EVENT); test.t_known_failure = 1; makepath(pathbuf, DIR1, DIR2); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, MKDIR, 2, (void*)pathbuf, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, MKDIR, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.2.8: add a symlink to a directory", DIR1, 1, 2, NOTE_ATTRIB, NO_EVENT); test.t_known_failure = 1; makepath(pathbuf, DIR1, FILE1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, SYMLINK, 2, (void*)DOTDOT, (void*)pathbuf); + init_action(&test.t_helpthreadact, SLEEP, SYMLINK, 2, (void*)DOTDOT, (void*)pathbuf); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.2.9: rename into a dir()", DIR1, 2, 2, NOTE_ATTRIB, NO_EVENT); test.t_known_failure = 1; makepath(pathbuf, DIR1, FILE1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)pathbuf); + init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)pathbuf); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.2.10: unlink() file from dir", DIR1, 2, 1, NOTE_ATTRIB, NO_EVENT); test.t_known_failure = 1; makepath(pathbuf, DIR1, FILE1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, CREAT, 2, (void*)pathbuf, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); + init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + init_test(&test, "4.2.11: mkfifo in a directory", DIR1, 1, 2, NOTE_ATTRIB, NO_EVENT); test.t_known_failure = 1; makepath(pathbuf, DIR1, FILE1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); - init_action(&test.t_helpthreadact, SLEEP, MKFIFO, 1, (void*)pathbuf); + init_action(&test.t_helpthreadact, SLEEP, MKFIFO, 1, (void*)pathbuf); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - - } -void +void run_note_link_tests() { test_t test; char pathbuf[50]; char otherpathbuf[50]; - + T_LOG("HFS DOES NOT HANDLE UNLINK CORRECTLY...\n"); init_test(&test, "5.1.1: unlink() a file", FILE1, 1, 0, NOTE_LINK, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)FILE1, (void*)NULL); execute_test(&test); - - + + init_test(&test, "5.1.1.5: link A to B, watch A, remove B", FILE1, 2, 1, NOTE_LINK, YES_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, HARDLINK, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)FILE2, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "5.1.2: link() to a file", FILE1, 1, 2, NOTE_LINK, YES_EVENT); #if TARGET_OS_WATCH test.t_nondeterministic = 1; @@ -1345,7 +1343,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, DIR2); init_test(&test, "5.1.3: make one dir in another", DIR1, 1, 2, NOTE_LINK, YES_EVENT); test.t_known_failure = 1; @@ -1354,7 +1352,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, DIR2); init_test(&test, "5.1.4: rmdir a dir from within another", DIR1, 2, 1, NOTE_LINK, YES_EVENT); test.t_known_failure = 1; @@ -1363,7 +1361,7 @@ run_note_link_tests() init_action(&test.t_helpthreadact, SLEEP, RMDIR, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, DIR2); makepath(otherpathbuf, DIR1, DIR1); init_test(&test, "5.1.5: rename dir A over dir B inside dir C", DIR1, 3, 2, NOTE_LINK, YES_EVENT); @@ -1375,7 +1373,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)otherpathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + T_LOG("HFS bypasses hfs_makenode to create in target, so misses knote.\n"); makepath(pathbuf, DIR1, DIR2); init_test(&test, "5.1.6: rename one dir into another", DIR1, 2, 2, NOTE_LINK, YES_EVENT); @@ -1386,7 +1384,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + T_LOG("HFS bypasses hfs_removedir to remove from source, so misses knote.\n"); makepath(pathbuf, DIR1, DIR2); init_test(&test, "5.1.7: rename one dir out of another", DIR1, 2, 2, NOTE_LINK, YES_EVENT); @@ -1397,13 +1395,13 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + init_test(&test, "5.1.8: rmdir a dir", DIR1, 1, 0, NOTE_LINK, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RMDIR, 2, (void*)DIR1, (void*)NULL); execute_test(&test); - + /* ============= NO EVENT SECTION ============== */ makepath(pathbuf, DIR1, FILE1); init_test(&test, "5.2.1: make a file in a dir", DIR1, 1, 2, NOTE_LINK, NO_EVENT); @@ -1413,7 +1411,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "5.2.2: unlink a file in a dir", DIR1, 2, 1, NOTE_LINK, NO_EVENT); test.t_known_failure = 1; @@ -1422,7 +1420,7 @@ run_note_link_tests() init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)pathbuf, (void*)NULL); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); makepath(otherpathbuf, DIR1, FILE2); init_test(&test, "5.2.3: rename a file within a dir", DIR1, 2, 2, NOTE_LINK, NO_EVENT); @@ -1433,7 +1431,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)otherpathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "5.2.4: rename a file into a dir", DIR1, 2, 2, NOTE_LINK, NO_EVENT); test.t_known_failure = 1; @@ -1443,7 +1441,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + makepath(pathbuf, DIR1, FILE1); init_test(&test, "5.2.5: make a symlink in a dir", DIR1, 1, 2, NOTE_LINK, NO_EVENT); test.t_known_failure = 1; @@ -1452,7 +1450,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)pathbuf, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + init_test(&test, "5.2.6: make a symlink to a dir", DIR1, 1, 2, NOTE_LINK, NO_EVENT); test.t_known_failure = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); @@ -1460,7 +1458,7 @@ run_note_link_tests() init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + init_test(&test, "5.2.7: make a symlink to a file", FILE1, 1, 2, NOTE_LINK, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, SYMLINK, 2, (void*)FILE1, (void*)FILE2); @@ -1470,31 +1468,31 @@ run_note_link_tests() } void -run_note_rename_tests() +run_note_rename_tests() { test_t test; - + init_test(&test, "6.1.1: rename a file", FILE1, 1, 1, NOTE_RENAME, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + init_test(&test, "6.1.2: rename a dir", DIR1, 1, 1, NOTE_RENAME, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, NULL); execute_test(&test); - + init_test(&test, "6.1.3: rename one file over another", FILE1, 2, 1, NOTE_RENAME, YES_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, CREAT, 2, (void*)FILE2, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + init_test(&test, "6.1.4: rename one dir over another", DIR1, 2, 1, NOTE_RENAME, YES_EVENT); test.t_nondeterministic = 1; init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); @@ -1502,48 +1500,48 @@ run_note_rename_tests() init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, NULL); execute_test(&test); - + /* ========= NO EVENT SECTION =========== */ - + init_test(&test, "6.2.1: unlink a file", FILE1, 1, 0, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "6.2.2: rmdir a dir", DIR1, 1, 0, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RMDIR, 2, (void*)DIR1, NULL); execute_test(&test); - + init_test(&test, "6.2.3: link() to a file", FILE1, 1, 2, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, HARDLINK, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); init_action(&test.t_cleanup_actions[1], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - - init_test(&test, "6.2.4: rename one file over another: watch deceased", - FILE2, 2, 1, NOTE_RENAME, NO_EVENT); + + init_test(&test, "6.2.4: rename one file over another: watch deceased", + FILE2, 2, 1, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, CREAT, 2, (void*)FILE2, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - - init_test(&test, "6.2.5: rename one dir over another: watch deceased", - DIR2, 2, 1, NOTE_RENAME, NO_EVENT); + + init_test(&test, "6.2.5: rename one dir over another: watch deceased", + DIR2, 2, 1, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, MKDIR, 2, (void*)DIR2, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR2); init_action(&test.t_cleanup_actions[0], NOSLEEP, RMDIR, 2, (void*)DIR2, NULL); execute_test(&test); - + init_test(&test, "6.2.6: rename a file to itself", FILE1, 1, 1, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 2, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE1); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "6.2.7: rename a dir to itself", DIR1, 1, 1, NOTE_RENAME, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKDIR, 2, (void*)DIR1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)DIR1, (void*)DIR1); @@ -1551,8 +1549,8 @@ run_note_rename_tests() execute_test(&test); } -void -run_note_revoke_tests() +void +run_note_revoke_tests() { test_t test; init_test(&test, "7.1.1: revoke file", FILE1, 1, 1, NOTE_REVOKE, YES_EVENT); @@ -1560,7 +1558,7 @@ run_note_revoke_tests() init_action(&test.t_helpthreadact, SLEEP, REVOKE, 1, (void*)FILE1); init_action(&(test.t_cleanup_actions[0]), NOSLEEP, UNLINK, 1, (void*)FILE1); execute_test(&test); - + init_test(&test, "7.2.1: delete file", FILE1, 1, 0, NOTE_REVOKE, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 1, (void*)FILE1); @@ -1569,7 +1567,7 @@ run_note_revoke_tests() void -run_evfilt_read_tests() +run_evfilt_read_tests() { test_t test; init_test(&test, "8.1.1: how much data in file of length LENGTHEN_SIZE?", FILE1, 2, 1, EVFILT_READ, LENGTHEN_SIZE); @@ -1578,21 +1576,21 @@ run_evfilt_read_tests() init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "8.1.2: block, then write to file", FILE1, 2, 1, EVFILT_READ, strlen(TEST_STRING)); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&(test.t_prep_actions[1]), NOSLEEP, TRUNC, 1, (void*)FILE1); init_action(&test.t_helpthreadact, SLEEP, WRITE, 1, (void*)FILE1); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "8.1.3: block, then extend", FILE1, 2, 1, EVFILT_READ, LENGTHEN_SIZE); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&(test.t_prep_actions[1]), NOSLEEP, TRUNC, 1, (void*)FILE1); init_action(&test.t_helpthreadact, SLEEP, LENGTHEN, 1, (void*)FILE1); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "8.1.4: block, then seek to beginning", FILE1, 2, 1, EVFILT_READ, strlen(TEST_STRING)); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&(test.t_prep_actions[1]), NOSLEEP, WRITE, 1, (void*)FILE1); @@ -1600,15 +1598,15 @@ run_evfilt_read_tests() init_action(&test.t_helpthreadact, SLEEP, LSEEK, 1, (void*)0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - - + + init_test(&test, "8.1.5: block, then write to fifo", FILE1, 1, 1, EVFILT_READ, strlen(TEST_STRING)); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1); test.t_file_is_fifo = 1; init_action(&test.t_helpthreadact, SLEEP, WRITE, 1, (void*)FILE1); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + /* No result section... */ init_test(&test, "8.2.1: just rename", FILE1, 2, 1, EVFILT_READ, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); @@ -1616,13 +1614,13 @@ run_evfilt_read_tests() init_action(&test.t_helpthreadact, SLEEP, RENAME, 2, (void*)FILE1, (void*)FILE2); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE2, NULL); execute_test(&test); - + init_test(&test, "8.2.2: delete file", FILE1, 2, 0, EVFILT_READ, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&(test.t_prep_actions[1]), NOSLEEP, TRUNC, 1, (void*)FILE1); init_action(&test.t_helpthreadact, SLEEP, UNLINK, 1, (void*)FILE1); execute_test(&test); - + init_test(&test, "8.2.3: write to beginning", FILE1, 2, 1, EVFILT_READ, NO_EVENT); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&(test.t_prep_actions[1]), NOSLEEP, WRITE, 1, (void*)FILE1); @@ -1630,7 +1628,7 @@ run_evfilt_read_tests() init_action(&test.t_helpthreadact, SLEEP, WRITE, 1, (void*)FILE1); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 1, (void*)FILE1); execute_test(&test); - + init_test(&test, "8.1.4: block, then seek to current location", FILE1, 2, 1, EVFILT_READ, 0); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1); init_action(&(test.t_prep_actions[1]), NOSLEEP, WRITE, 1, (void*)FILE1); @@ -1638,14 +1636,13 @@ run_evfilt_read_tests() init_action(&test.t_helpthreadact, SLEEP, LSEEK, 1, (void*)strlen(TEST_STRING)); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "8.2.5: trying to read from empty fifo", FILE1, 1, 1, EVFILT_READ, 0); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1); test.t_file_is_fifo = 1; init_action(&test.t_helpthreadact, SLEEP, NOTHING, 1, (void*)0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - } @@ -1671,10 +1668,9 @@ write_to_fd(void *arg) /* * We don't (in principle) support EVFILT_WRITE for vnodes; thusly, no tests here */ -void +void run_evfilt_write_tests() { - test_t test; init_test(&test, "9.1.1: how much space in empty fifo?", FILE1, 1, 1, EVFILT_WRITE, FIFO_SPACE); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); @@ -1682,7 +1678,7 @@ run_evfilt_write_tests() init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "9.1.2: how much space in slightly written fifo?", FILE1, 1, 1, EVFILT_WRITE, FIFO_SPACE - strlen(TEST_STRING)); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); test.t_file_is_fifo = 1; @@ -1690,7 +1686,7 @@ run_evfilt_write_tests() init_action(&(test.t_helpthreadact), NOSLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_test(&test, "9.2.1: how much space in a full fifo?", FILE1, 1, 1, EVFILT_WRITE, 0); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); test.t_nondeterministic = 1; @@ -1710,14 +1706,14 @@ run_poll_tests() init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_poll_test(&test, "10.1.2: does poll say I can write an empty FIFO?", FILE1, 1, 1, POLLWRNORM, 1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); test.t_file_is_fifo = 1; init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_poll_test(&test, "10.1.3: does poll say I can read a nonempty FIFO?", FILE1, 1, 1, POLLRDNORM, 1); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); test.t_file_is_fifo = 1; @@ -1725,30 +1721,30 @@ run_poll_tests() init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_poll_test(&test, "10.1.4: does poll say I can read a nonempty regular file?", FILE1, 2, 1, POLLRDNORM, 1); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1, (void*)NULL); init_action(&(test.t_prep_actions[1]), NOSLEEP, LENGTHEN, 1, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_poll_test(&test, "10.1.5: does poll say I can read an empty file?", FILE1, 1, 1, POLLRDNORM, 1); init_action(&(test.t_prep_actions[0]), NOSLEEP, CREAT, 1, (void*)FILE1, (void*)NULL); init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - - - - + + + + init_poll_test(&test, "10.2.2: does poll say I can read an empty FIFO?", FILE1, 1, 1, POLLRDNORM, 0); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); test.t_file_is_fifo = 1; init_action(&test.t_helpthreadact, SLEEP, NOTHING, 0); init_action(&test.t_cleanup_actions[0], NOSLEEP, UNLINK, 2, (void*)FILE1, NULL); execute_test(&test); - + init_poll_test(&test, "10.2.3: does poll say I can write a full FIFO?", FILE1, 1, 1, POLLWRNORM, 0); init_action(&(test.t_prep_actions[0]), NOSLEEP, MKFIFO, 1, (void*)FILE1, (void*)NULL); test.t_nondeterministic = 1; @@ -1773,7 +1769,7 @@ run_note_funlock_tests() } void -run_all_tests() +run_all_tests() { run_note_delete_tests(); run_note_write_tests(); @@ -1790,48 +1786,47 @@ run_all_tests() run_note_funlock_tests(); } - T_DECL(kqueue_file_tests, - "Tests assorted kqueue operations for file-related events") +T_DECL(kqueue_file_tests, + "Tests assorted kqueue operations for file-related events") { char *which = NULL; if (argc > 1) { which = argv[1]; } - + T_SETUPBEGIN; rmdir(DIR1); rmdir(DIR2); T_SETUPEND; - if ((!which) || (strcmp(which, "all") == 0)) + if ((!which) || (strcmp(which, "all") == 0)) { run_all_tests(); - else if (strcmp(which, "delete") == 0) + } else if (strcmp(which, "delete") == 0) { run_note_delete_tests(); - else if (strcmp(which, "write") == 0) + } else if (strcmp(which, "write") == 0) { run_note_write_tests(); - else if (strcmp(which, "extend") == 0) + } else if (strcmp(which, "extend") == 0) { run_note_extend_tests(); - else if (strcmp(which, "attrib") == 0) + } else if (strcmp(which, "attrib") == 0) { run_note_attrib_tests(); - else if (strcmp(which, "link") == 0) + } else if (strcmp(which, "link") == 0) { run_note_link_tests(); - else if (strcmp(which, "rename") == 0) + } else if (strcmp(which, "rename") == 0) { run_note_rename_tests(); - else if (strcmp(which, "revoke") == 0) + } else if (strcmp(which, "revoke") == 0) { run_note_revoke_tests(); - else if (strcmp(which, "evfiltread") == 0) + } else if (strcmp(which, "evfiltread") == 0) { run_evfilt_read_tests(); - else if (strcmp(which, "evfiltwrite") == 0) + } else if (strcmp(which, "evfiltwrite") == 0) { run_evfilt_write_tests(); - else if (strcmp(which, "poll") == 0) + } else if (strcmp(which, "poll") == 0) { run_poll_tests(); - else if (strcmp(which, "funlock") == 0) + } else if (strcmp(which, "funlock") == 0) { run_note_funlock_tests(); - else { + } else { fprintf(stderr, "Valid options are:\n\tdelete, write, extend, " - "attrib, link, rename, revoke, evfiltread, " - "fifo, all, evfiltwrite, funlock\n"); + "attrib, link, rename, revoke, evfiltread, " + "fifo, all, evfiltwrite, funlock\n"); exit(1); } } - diff --git a/tests/kqueue_timer_tests.c b/tests/kqueue_timer_tests.c index e02deb400..874335995 100644 --- a/tests/kqueue_timer_tests.c +++ b/tests/kqueue_timer_tests.c @@ -18,8 +18,16 @@ static mach_timebase_info_data_t timebase_info; -static uint64_t nanos_to_abs(uint64_t nanos) { return nanos * timebase_info.denom / timebase_info.numer; } -static uint64_t abs_to_nanos(uint64_t abs) { return abs * timebase_info.numer / timebase_info.denom; } +static uint64_t +nanos_to_abs(uint64_t nanos) +{ + return nanos * timebase_info.denom / timebase_info.numer; +} +static uint64_t +abs_to_nanos(uint64_t abs) +{ + return abs * timebase_info.numer / timebase_info.denom; +} static int kq, passed, failed; @@ -50,7 +58,7 @@ do_simple_kevent(struct kevent64_s *kev, uint64_t expected) if (ret < 1 || (kev->flags & EV_ERROR)) { T_LOG("%s() failure: kevent returned %d, error %d\n", __func__, ret, - (ret == -1 ? errno : (int) kev->data)); + (ret == -1 ? errno : (int) kev->data)); return 0; } @@ -58,13 +66,13 @@ do_simple_kevent(struct kevent64_s *kev, uint64_t expected) /* did it work? */ elapsed_usecs = (after.tv_sec - before.tv_sec) * (int64_t)USEC_PER_SEC + - (after.tv_usec - before.tv_usec); + (after.tv_usec - before.tv_usec); delta_usecs = (uint64_t)llabs(elapsed_usecs - ((int64_t)expected)); /* failure if we're 30% off, or 50 mics late */ if (delta_usecs > (30 * expected / 100.0) && delta_usecs > 50) { T_LOG("\tfailure: expected %lld usec, measured %lld usec.\n", - expected, elapsed_usecs); + expected, elapsed_usecs); return 0; } else { T_LOG("\tsuccess, measured %lld usec.\n", elapsed_usecs); @@ -118,11 +126,12 @@ test_absolute_kevent(int time, int scale) } /* deadlines in the past should fire immediately */ - if (time < 0) + if (time < 0) { expected = 0; + } EV_SET64(&kev, 1, EVFILT_TIMER, EV_ADD, - NOTE_ABSOLUTE | scale, deadline, 0,0,0); + NOTE_ABSOLUTE | scale, deadline, 0, 0, 0); ret = do_simple_kevent(&kev, expected); if (ret) { @@ -172,11 +181,12 @@ test_oneshot_kevent(int time, int scale) T_SETUPEND; /* deadlines in the past should fire immediately */ - if (time < 0) + if (time < 0) { expected = 0; + } EV_SET64(&kev, 2, EVFILT_TIMER, EV_ADD | EV_ONESHOT, scale, time, - 0, 0, 0); + 0, 0, 0); ret = do_simple_kevent(&kev, expected); if (ret) { @@ -200,13 +210,14 @@ test_interval_kevent(int usec) uint64_t test_duration_us = USEC_PER_SEC; /* 1 second */ uint64_t expected_pops; - if (usec < 0) + if (usec < 0) { expected_pops = 1; /* TODO: test 'and only once' */ - else + } else { expected_pops = test_duration_us / (uint64_t)usec; + } T_LOG("Testing interval kevent at %d usec intervals (%lld pops/second)...\n", - usec, expected_pops); + usec, expected_pops); EV_SET64(&kev, 3, EVFILT_TIMER, EV_ADD, NOTE_USECONDS, usec, 0, 0, 0); ret = kevent64(kq, &kev, 1, NULL, 0, 0, NULL); @@ -238,15 +249,16 @@ test_interval_kevent(int usec) pops += (uint64_t)kev.data; gettimeofday(&after, NULL); elapsed_usecs = (uint64_t)((after.tv_sec - before.tv_sec) * (int64_t)USEC_PER_SEC + - (after.tv_usec - before.tv_usec)); + (after.tv_usec - before.tv_usec)); - if (elapsed_usecs > test_duration_us) + if (elapsed_usecs > test_duration_us) { break; + } } /* check how many times the timer fired: within 5%? */ if (pops > expected_pops + (expected_pops / 20) || - pops < expected_pops - (expected_pops / 20)) { + pops < expected_pops - (expected_pops / 20)) { T_FAIL("%s() usec:%d (saw %lld of %lld expected pops)", __func__, usec, pops, expected_pops); failed++; } else { @@ -274,7 +286,7 @@ test_repeating_kevent(int usec) uint64_t expected_pops = test_duration_us / (uint64_t)usec; T_LOG("Testing repeating kevent at %d usec intervals (%lld pops/second)...\n", - usec, expected_pops); + usec, expected_pops); EV_SET64(&kev, 4, EVFILT_TIMER, EV_ADD, NOTE_USECONDS, usec, 0, 0, 0); ret = kevent64(kq, &kev, 1, NULL, 0, 0, NULL); @@ -299,7 +311,7 @@ test_repeating_kevent(int usec) /* check how many times the timer fired: within 5%? */ if (pops > expected_pops + (expected_pops / 20) || - pops < expected_pops - (expected_pops / 20)) { + pops < expected_pops - (expected_pops / 20)) { T_FAIL("%s() usec:%d (saw %lld of %lld expected pops)", __func__, usec, pops, expected_pops); failed++; } else { @@ -325,7 +337,7 @@ test_updated_kevent(int first, int second) T_SETUPBEGIN; - EV_SET64(&kev, 4, EVFILT_TIMER, EV_ADD|EV_ONESHOT, 0, first, 0, 0, 0); + EV_SET64(&kev, 4, EVFILT_TIMER, EV_ADD | EV_ONESHOT, 0, first, 0, 0, 0); ret = kevent64(kq, &kev, 1, NULL, 0, 0, NULL); if (ret != 0) { T_FAIL("%s() failure: initial kevent returned %d\n", __func__, ret); @@ -339,8 +351,9 @@ test_updated_kevent(int first, int second) uint64_t expected_us = (uint64_t)second * 1000; - if (second < 0) + if (second < 0) { expected_us = 0; + } ret = do_simple_kevent(&kev, expected_us); @@ -356,8 +369,8 @@ test_updated_kevent(int first, int second) static void disable_timer_coalescing(void) { - struct task_qos_policy qosinfo; - kern_return_t kr; + struct task_qos_policy qosinfo; + kern_return_t kr; T_SETUPBEGIN; @@ -365,7 +378,7 @@ disable_timer_coalescing(void) qosinfo.task_throughput_qos_tier = THROUGHPUT_QOS_TIER_0; kr = task_policy_set(mach_task_self(), TASK_OVERRIDE_QOS_POLICY, (task_policy_t)&qosinfo, - TASK_QOS_POLICY_COUNT); + TASK_QOS_POLICY_COUNT); if (kr != KERN_SUCCESS) { T_FAIL("task_policy_set(... TASK_OVERRIDE_QOS_POLICY ...) failed: %d (%s)", kr, mach_error_string(kr)); } @@ -374,7 +387,7 @@ disable_timer_coalescing(void) } T_DECL(kqueue_timer_tests, - "Tests assorted kqueue operations for timer-related events") + "Tests assorted kqueue operations for timer-related events") { /* * Since we're trying to test timers here, disable timer coalescing @@ -433,5 +446,4 @@ T_DECL(kqueue_timer_tests, test_updated_kevent(1000, 2000); test_updated_kevent(2000, 1000); test_updated_kevent(1000, -1); - } diff --git a/tests/ldt.c b/tests/ldt.c new file mode 100644 index 000000000..3f2378e31 --- /dev/null +++ b/tests/ldt.c @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +// #define STANDALONE + +#ifndef STANDALONE +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef STANDALONE +T_GLOBAL_META( + T_META_NAMESPACE("xnu.intel"), + T_META_CHECK_LEAKS(false) + ); +#endif + +#define COMPAT_MODE_CS_SELECTOR 0x1f +#define SYSENTER_SELECTOR 0xb +/* #define DEBUG 1 */ +#define P2ROUNDUP(x, align) (-(-((long)x) & -((long)align))) +#define MSG 2048 + +#define NORMAL_RUN_TIME (10) +#define TIMEOUT_OVERHEAD (10) + +/* + * General theory of operation: + * ---------------------------- + * (1) Ensure that all code and data to be accessed from compatibility mode is + * located in the low 4GiB of virtual address space. + * (2) Allocate required segments via the i386_set_ldt() system call, making + * sure to set the descriptor type correctly (code vs. data). Creating + * 64-bit code segments is not allowed (just use the existing 0x2b selector.) + * (3) Once you know which selector is associated with the desired code, use a + * trampoline (or thunk) to (a) switch to a stack that's located below 4GiB + * and (b) save ABI-mandated caller-saved state so that if it's trashed by + * compatibility-mode code, it can be restored before returning to 64-bit + * mode (if desired), and finally (c) long-jump or long-call (aka far call) + * to the segment and desired offset (this example uses an offset of 0 for + * simplicity.) + * (4) Once in compatibility mode, if a framework call or system call is required, + * the code must trampoline back to 64-bit mode to do so. System calls from + * compatibility mode code are not supported and will result in invalid opcode + * exceptions. This example includes a simple 64-bit trampoline (which must + * be located in the low 4GiB of virtual address space, since it's executed + * by compatibility-mode code.) Note that since the 64-bit ABI mandates that + * the stack must be aligned to a 16-byte boundary, the sample trampoline + * performs that rounding, to simplify compatibility-mode code. Additionally, + * since 64-bit native code makes use of thread-local storage, the user-mode + * GSbase must be restored. This sample includes two ways to do that-- (a) by + * calling into a C implementation that associates the thread-local storage + * pointer with a stack range (which will be unique for each thread.), and + * (b) by storing the original GSbase in a block of memory installed into + * GSbase before calling into compatibility-mode code. A special machdep + * system call restores GSbase as needed. Note that the sample trampoline + * does not save and restore %gs (or most other register state, so that is an + * area that may be tailored to the application's requirements.) + * (5) Once running in compatibility mode, should synchronous or asynchronous + * exceptions occur, this sample shows how a mach exception handler (running + * in a detached thread, handling exceptions for the entire task) can catch + * such exceptions and manipulate thread state to perform recovery (or not.) + * Other ways to handle exceptions include installing per-thread exception + * servers. Alternatively, BSD signal handlers can be used. Note that once a + * process installs a custom LDT, *ALL* future signal deliveries will include + * ucontext pointers to mcontext structures that include enhanced thread + * state embedded (e.g. the %ds, %es, %ss, and GSBase registers) [This assumes + * that the SA_SIGINFO is passed to sigaction(2) when registering handlers]. + * The mcontext size (part of the ucontext) can be used to differentiate between + * different mcontext flavors (e.g. those with/without full thread state plus + * x87 FP state, AVX state, or AVX2/3 state). + */ + +/* + * This test exercises the custom LDT functionality exposed via the i386_{get,set}_ldt + * system calls. + * + * Tests include: + * (1a) Exception handling (due to an exception or another thread sending a signal) while + * running in compatibility mode; + * (1b) Signal handling while running in compatibility mode; + * (2) Thunking back to 64-bit mode and executing a framework function (e.g. printf) + * (3) Ensuring that transitions to compatibility mode and back to 64-bit mode + * do not negatively impact system calls and framework calls in 64-bit mode + * (4) Use of thread_get_state / thread_set_state to configure a thread to + * execute in compatibility mode with the proper LDT code segment (this is + * effectively what the exception handler does when the passed-in new_state + * is changed (or what the BSD signal handler return handling does when the + * mcontext is modified).) + * (5) Ensure that compatibility mode code cannot make system calls via sysenter or + * old-style int {0x80..0x82}. + * (6) Negative testing to ensure errors are returned if the consumer tries + * to set a disallowed segment type / Long flag. [TBD] + */ + +/* + * Note that these addresses are not necessarily available due to ASLR, so + * a robust implementation should determine the proper range to use via + * another means. + */ +#define FIXED_STACK_ADDR ((uintptr_t)0x10000000) /* must be page-aligned */ +#ifndef STANDALONE +/* libdarwintest needs LOTs of stack */ +#endif +#define FIXED_STACK_SIZE (PAGE_SIZE * 16) + +#define FIXED_TRAMP_ADDR (FIXED_STACK_ADDR + FIXED_STACK_SIZE + PAGE_SIZE) +#define FIXED_TRAMP_MAXLEN (PAGE_SIZE * 8) + +#pragma pack(1) +typedef struct { + uint64_t off; + uint16_t seg; +} far_call_t; +#pragma pack() + +typedef struct { + uint64_t stack_base; + uint64_t stack_limit; + uint64_t GSbase; +} stackaddr_to_gsbase_t; + +typedef struct thread_arg { + pthread_mutex_t mutex; + pthread_cond_t condvar; + volatile boolean_t done; + uint32_t compat_stackaddr; /* Compatibility mode stack address */ +} thread_arg_t; + +typedef struct custom_tsd { + struct custom_tsd * this_tsd_base; + uint64_t orig_tsd_base; +} custom_tsd_t; + +typedef uint64_t (*compat_tramp_t)(far_call_t *fcp, void *lowmemstk, uint64_t arg_for_32bit, + uint64_t callback, uint64_t absolute_addr_of_thunk64); + +#define GS_RELATIVE volatile __attribute__((address_space(256))) +static custom_tsd_t GS_RELATIVE *mytsd = (custom_tsd_t GS_RELATIVE *)0; + +static far_call_t input_desc = { .seg = COMPAT_MODE_CS_SELECTOR, .off = 0 }; +static uint64_t stackAddr = 0; +static compat_tramp_t thunkit = NULL; +static uint64_t thunk64_addr; +static stackaddr_to_gsbase_t stack2gs[] = { { FIXED_STACK_ADDR, FIXED_STACK_ADDR + FIXED_STACK_SIZE, 0 } }; + +extern int compat_mode_trampoline(far_call_t *, void *, uint64_t); +extern void long_mode_trampoline(void); +extern boolean_t mach_exc_server(mach_msg_header_t *InHeadP, mach_msg_header_t *OutHeadP); + +extern void code_32(void); + +kern_return_t catch_mach_exception_raise_state_identity(mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t code_count, + int * flavor, + thread_state_t old_state, + mach_msg_type_number_t old_state_count, + thread_state_t new_state, + mach_msg_type_number_t * new_state_count); + +kern_return_t +catch_mach_exception_raise_state(mach_port_t exception_port, + exception_type_t exception, + const mach_exception_data_t code, + mach_msg_type_number_t codeCnt, + int *flavor, + const thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t *new_stateCnt); + +kern_return_t +catch_mach_exception_raise(mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t codeCnt, + int *flavor, + thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t *new_stateCnt); + +extern void _thread_set_tsd_base(uint64_t); +static uint64_t stack_range_to_GSbase(uint64_t stackptr, uint64_t GSbase); +void restore_gsbase(uint64_t stackptr); + +static uint64_t +get_gsbase(void) +{ + struct thread_identifier_info tiinfo; + unsigned int info_count = THREAD_IDENTIFIER_INFO_COUNT; + kern_return_t kr; + + if ((kr = thread_info(mach_thread_self(), THREAD_IDENTIFIER_INFO, + (thread_info_t) &tiinfo, &info_count)) != KERN_SUCCESS) { + fprintf(stderr, "Could not get tsd base address. This will not end well.\n"); + return 0; + } + + return (uint64_t)tiinfo.thread_handle; +} + +void +restore_gsbase(uint64_t stackptr) +{ + /* Restore GSbase so tsd is accessible in long mode */ + uint64_t orig_GSbase = stack_range_to_GSbase(stackptr, 0); + + assert(orig_GSbase != 0); + _thread_set_tsd_base(orig_GSbase); +} + +/* + * Though we've directed all exceptions through the catch_mach_exception_raise_state_identity + * entry point, we still must provide these two other entry points, otherwise a linker error + * will occur. + */ +kern_return_t +catch_mach_exception_raise(mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t codeCnt, + int *flavor, + thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t *new_stateCnt) +{ +#pragma unused(exception_port, thread, task, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt) + fprintf(stderr, "Unexpected exception handler called: %s\n", __func__); + return KERN_FAILURE; +} + +kern_return_t +catch_mach_exception_raise_state(mach_port_t exception_port, + exception_type_t exception, + const mach_exception_data_t code, + mach_msg_type_number_t codeCnt, + int *flavor, + const thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t *new_stateCnt) +{ +#pragma unused(exception_port, exception, code, codeCnt, flavor, old_state, old_stateCnt, new_state, new_stateCnt) + fprintf(stderr, "Unexpected exception handler called: %s\n", __func__); + return KERN_FAILURE; +} + +static void +handle_arithmetic_exception(_STRUCT_X86_THREAD_FULL_STATE64 *xtfs64, uint64_t *ip_skip_countp) +{ + fprintf(stderr, "Caught divide-error exception\n"); + fprintf(stderr, "cs=0x%x rip=0x%x gs=0x%x ss=0x%x rsp=0x%llx\n", + (unsigned)xtfs64->ss64.__cs, + (unsigned)xtfs64->ss64.__rip, (unsigned)xtfs64->ss64.__gs, + (unsigned)xtfs64->__ss, xtfs64->ss64.__rsp); + *ip_skip_countp = 2; +} + +static void +handle_badinsn_exception(_STRUCT_X86_THREAD_FULL_STATE64 *xtfs64, uint64_t __unused *ip_skip_countp) +{ + extern void first_invalid_opcode(void); + extern void last_invalid_opcode(void); + + uint64_t start_addr = ((uintptr_t)first_invalid_opcode - (uintptr_t)code_32); + uint64_t end_addr = ((uintptr_t)last_invalid_opcode - (uintptr_t)code_32); + + fprintf(stderr, "Caught invalid opcode exception\n"); + fprintf(stderr, "cs=%x rip=%x gs=%x ss=0x%x rsp=0x%llx | handling between 0x%llx and 0x%llx\n", + (unsigned)xtfs64->ss64.__cs, + (unsigned)xtfs64->ss64.__rip, (unsigned)xtfs64->ss64.__gs, + (unsigned)xtfs64->__ss, xtfs64->ss64.__rsp, + start_addr, end_addr); + + /* + * We expect to handle 4 invalid opcode exceptions: + * (1) sysenter + * (2) int $0x80 + * (3) int $0x81 + * (4) int $0x82 + * (Note that due to the way the invalid opcode indication was implemented, + * %rip is already set to the next instruction.) + */ + if (xtfs64->ss64.__rip >= start_addr && xtfs64->ss64.__rip <= end_addr) { + /* + * On return from the failed sysenter, %cs is changed to the + * sysenter code selector and %ss is set to 0x23, so switch them + * back to sane values. + */ + if ((unsigned)xtfs64->ss64.__cs == SYSENTER_SELECTOR) { + xtfs64->ss64.__cs = COMPAT_MODE_CS_SELECTOR; + xtfs64->__ss = 0x23; /* XXX */ + } + } +} + +kern_return_t +catch_mach_exception_raise_state_identity(mach_port_t exception_port, + mach_port_t thread, + mach_port_t task, + exception_type_t exception, + mach_exception_data_t code, + mach_msg_type_number_t codeCnt, + int * flavor, + thread_state_t old_state, + mach_msg_type_number_t old_stateCnt, + thread_state_t new_state, + mach_msg_type_number_t * new_stateCnt) +{ +#pragma unused(exception_port, thread, task) + + _STRUCT_X86_THREAD_FULL_STATE64 *xtfs64 = (_STRUCT_X86_THREAD_FULL_STATE64 *)(void *)old_state; + _STRUCT_X86_THREAD_FULL_STATE64 *new_xtfs64 = (_STRUCT_X86_THREAD_FULL_STATE64 *)(void *)new_state; + uint64_t rip_skip_count = 0; + + /* + * Check the exception code and thread state. + * If we were executing 32-bit code (or 64-bit code on behalf of + * 32-bit code), we could update the thread state to effectively longjmp + * back to a safe location where the victim thread can recover. + * Then again, we could return KERN_NOT_SUPPORTED and allow the process + * to be nuked. + */ + + switch (exception) { + case EXC_ARITHMETIC: + if (codeCnt >= 1 && code[0] == EXC_I386_DIV) { + handle_arithmetic_exception(xtfs64, &rip_skip_count); + } + break; + + case EXC_BAD_INSTRUCTION: + { + if (codeCnt >= 1 && code[0] == EXC_I386_INVOP) { + handle_badinsn_exception(xtfs64, &rip_skip_count); + } + break; + } + + default: + fprintf(stderr, "Unsupported catch_mach_exception_raise_state_identity: code 0x%llx sub 0x%llx\n", + code[0], codeCnt > 1 ? code[1] : 0LL); + fprintf(stderr, "flavor=%d %%cs=0x%x %%rip=0x%llx\n", *flavor, (unsigned)xtfs64->ss64.__cs, + xtfs64->ss64.__rip); + } + + /* + * If this exception happened in compatibility mode, + * assume it was the intentional division-by-zero and set the + * new state's cs register to just after the div instruction + * to enable the thread to resume. + */ + if ((unsigned)xtfs64->ss64.__cs == COMPAT_MODE_CS_SELECTOR) { + *new_stateCnt = old_stateCnt; + *new_xtfs64 = *xtfs64; + new_xtfs64->ss64.__rip += rip_skip_count; + fprintf(stderr, "new cs=0x%x rip=0x%llx\n", (unsigned)new_xtfs64->ss64.__cs, + new_xtfs64->ss64.__rip); + return KERN_SUCCESS; + } else { + return KERN_NOT_SUPPORTED; + } +} + +static void * +handle_exceptions(void *arg) +{ + mach_port_t ePort = (mach_port_t)arg; + kern_return_t kret; + + kret = mach_msg_server(mach_exc_server, MACH_MSG_SIZE_RELIABLE, ePort, 0); + if (kret != KERN_SUCCESS) { + fprintf(stderr, "mach_msg_server: %s (%d)", mach_error_string(kret), kret); + } + + return NULL; +} + +static void +init_task_exception_server(void) +{ + kern_return_t kr; + task_t me = mach_task_self(); + pthread_t handler_thread; + pthread_attr_t attr; + mach_port_t ePort; + + kr = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &ePort); + if (kr != KERN_SUCCESS) { + fprintf(stderr, "allocate receive right: %d\n", kr); + return; + } + + kr = mach_port_insert_right(me, ePort, ePort, MACH_MSG_TYPE_MAKE_SEND); + if (kr != KERN_SUCCESS) { + fprintf(stderr, "insert right into port=[%d]: %d\n", ePort, kr); + return; + } + + kr = task_set_exception_ports(me, EXC_MASK_BAD_INSTRUCTION | EXC_MASK_ARITHMETIC, ePort, + (exception_behavior_t)(EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES), x86_THREAD_FULL_STATE64); + if (kr != KERN_SUCCESS) { + fprintf(stderr, "abort: error setting task exception ports on task=[%d], handler=[%d]: %d\n", me, ePort, kr); + exit(1); + } + + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + + if (pthread_create(&handler_thread, &attr, handle_exceptions, (void *)(uintptr_t)ePort) != 0) { + perror("pthread create error"); + return; + } + + pthread_attr_destroy(&attr); +} + +static union ldt_entry *descs = 0; +static uint64_t idx; +static int saw_ud2 = 0; +static boolean_t ENV_set_ldt_in_sighandler = FALSE; + +static void +signal_handler(int signo, siginfo_t *sinfop, void *ucontext) +{ + uint64_t rip_skip_count = 0; + ucontext_t *uctxp = (ucontext_t *)ucontext; + union { + _STRUCT_MCONTEXT_AVX512_64 *avx512_basep; + _STRUCT_MCONTEXT_AVX512_64_FULL *avx512_fullp; + _STRUCT_MCONTEXT_AVX64 *avx64_basep; + _STRUCT_MCONTEXT_AVX64_FULL *avx64_fullp; + _STRUCT_MCONTEXT64 *fp_basep; + _STRUCT_MCONTEXT64_FULL *fp_fullp; + } mctx; + + mctx.fp_fullp = (_STRUCT_MCONTEXT64_FULL *)uctxp->uc_mcontext; + + /* + * Note that GSbase must be restored before calling into any frameworks + * that might access anything %gs-relative (e.g. TSD) if the signal + * handler was triggered while the thread was running with a non-default + * (system-established) GSbase. + */ + + if ((signo != SIGFPE && signo != SIGILL) || sinfop->si_signo != signo) { +#ifndef STANDALONE + T_ASSERT_FAIL("Unexpected signal %d\n", signo); +#else + restore_gsbase(mctx.fp_fullp->__ss.ss64.__rsp); + fprintf(stderr, "Not handling signal %d\n", signo); + abort(); +#endif + } + + if (uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX512_64) || + uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64) || + uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT64)) { + _STRUCT_X86_THREAD_STATE64 *ss64 = &mctx.fp_basep->__ss; + + /* + * The following block is an illustration of what NOT to do. + * Configuring an LDT for the first time in a signal handler + * will likely cause the process to crash. + */ + if (ENV_set_ldt_in_sighandler == TRUE && !saw_ud2) { + /* Set the LDT: */ + int cnt = i386_set_ldt((int)idx, &descs[idx], 1); + if (cnt != (int)idx) { +#ifdef DEBUG + fprintf(stderr, "i386_set_ldt unexpectedly returned %d\n", cnt); +#endif +#ifndef STANDALONE + T_LOG("i386_set_ldt unexpectedly returned %d\n", cnt); + T_ASSERT_FAIL("i386_set_ldt failure"); +#else + exit(1); +#endif + } +#ifdef DEBUG + printf("i386_set_ldt returned %d\n", cnt); +#endif + ss64->__rip += 2; /* ud2 is 2 bytes */ + + saw_ud2 = 1; + + /* + * When we return here, the sigreturn processing code will try to copy a FULL + * thread context from the signal stack, which will likely cause the resumed + * thread to fault and be terminated. + */ + return; + } + + restore_gsbase(ss64->__rsp); + + /* + * If we're in this block, either we are dispatching a signal received + * before we installed a custom LDT or we are on a kernel without + * BSD-signalling-sending-full-thread-state support. It's likely the latter case. + */ +#ifndef STANDALONE + T_ASSERT_FAIL("This system doesn't support BSD signals with full thread state."); +#else + fprintf(stderr, "This system doesn't support BSD signals with full thread state. Aborting.\n"); + abort(); +#endif + } else if (uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX512_64_FULL) || + uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64_FULL) || + uctxp->uc_mcsize == sizeof(_STRUCT_MCONTEXT64_FULL)) { + _STRUCT_X86_THREAD_FULL_STATE64 *ss64 = &mctx.fp_fullp->__ss; + + /* + * Since we're handing this signal on the same thread, we may need to + * restore GSbase. + */ + uint64_t orig_gsbase = stack_range_to_GSbase(ss64->ss64.__rsp, 0); + if (orig_gsbase != 0 && orig_gsbase != ss64->__gsbase) { + restore_gsbase(ss64->ss64.__rsp); + } + + if (signo == SIGFPE) { + handle_arithmetic_exception(ss64, &rip_skip_count); + } else if (signo == SIGILL) { + handle_badinsn_exception(ss64, &rip_skip_count); + } + + /* + * If this exception happened in compatibility mode, + * assume it was the intentional division-by-zero and set the + * new state's cs register to just after the div instruction + * to enable the thread to resume. + */ + if ((unsigned)ss64->ss64.__cs == COMPAT_MODE_CS_SELECTOR) { + ss64->ss64.__rip += rip_skip_count; + fprintf(stderr, "new cs=0x%x rip=0x%llx\n", (unsigned)ss64->ss64.__cs, + ss64->ss64.__rip); + } + } else { + _STRUCT_X86_THREAD_STATE64 *ss64 = &mctx.fp_basep->__ss; + + restore_gsbase(ss64->__rsp); +#ifndef STANDALONE + T_ASSERT_FAIL("Unknown mcontext size %lu: Aborting.", uctxp->uc_mcsize); +#else + fprintf(stderr, "Unknown mcontext size %lu: Aborting.\n", uctxp->uc_mcsize); + abort(); +#endif + } +} + +static void +setup_signal_handling(void) +{ + int rv; + + struct sigaction sa = { + .__sigaction_u = { .__sa_sigaction = signal_handler }, + .sa_flags = SA_SIGINFO + }; + + sigfillset(&sa.sa_mask); + + rv = sigaction(SIGFPE, &sa, NULL); + if (rv != 0) { +#ifndef STANDALONE + T_ASSERT_FAIL("Failed to configure SIGFPE signal handler\n"); +#else + fprintf(stderr, "Failed to configure SIGFPE signal handler\n"); + abort(); +#endif + } + + rv = sigaction(SIGILL, &sa, NULL); + if (rv != 0) { +#ifndef STANDALONE + T_ASSERT_FAIL("Failed to configure SIGILL signal handler\n"); +#else + fprintf(stderr, "Failed to configure SIGILL signal handler\n"); + abort(); +#endif + } +} + +static void +teardown_signal_handling(void) +{ + if (signal(SIGFPE, SIG_DFL) == SIG_ERR) { +#ifndef STANDALONE + T_ASSERT_FAIL("Error resetting SIGFPE signal disposition\n"); +#else + fprintf(stderr, "Error resetting SIGFPE signal disposition\n"); + abort(); +#endif + } + + if (signal(SIGILL, SIG_DFL) == SIG_ERR) { +#ifndef STANDALONE + T_ASSERT_FAIL("Error resetting SIGILL signal disposition\n"); +#else + fprintf(stderr, "Error resetting SIGILL signal disposition\n"); + abort(); +#endif + } +} + +#ifdef DEBUG +static void +dump_desc(union ldt_entry *entp) +{ + printf("base %p lim %p type 0x%x dpl %x present %x opsz %x granular %x\n", + (void *)(uintptr_t)(entp->code.base00 + (entp->code.base16 << 16) + (entp->code.base24 << 24)), + (void *)(uintptr_t)(entp->code.limit00 + (entp->code.limit16 << 16)), + entp->code.type, + entp->code.dpl, + entp->code.present, + entp->code.opsz, + entp->code.granular); +} +#endif + +static int +map_lowmem_stack(void **lowmemstk) +{ + void *addr, *redzone; + + if ((redzone = mmap((void *)(FIXED_STACK_ADDR - PAGE_SIZE), PAGE_SIZE, PROT_READ, + MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0)) == MAP_FAILED) { + return errno; + } + + if ((addr = mmap((void *)FIXED_STACK_ADDR, FIXED_STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0)) == MAP_FAILED) { + (void)munmap(redzone, PAGE_SIZE); + return errno; + } + + if (lowmemstk) { + *lowmemstk = addr; + } + + return 0; +} + +static int +map_32bit_code_impl(uint8_t *code_src, size_t code_len, void **codeptr, void *baseaddr, + size_t szlimit) +{ + void *addr; + size_t sz = (size_t)P2ROUNDUP(code_len, (unsigned)PAGE_SIZE); + + if (code_len > szlimit) { + return E2BIG; + } + +#ifdef DEBUG + printf("baseaddr = %p, size = %lu, szlimit = %u\n", baseaddr, sz, (unsigned)szlimit); +#endif + + if ((addr = mmap(baseaddr, sz, PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0)) == MAP_FAILED) { + return errno; + } + +#ifdef DEBUG + printf("Mapping code @%p..%p => %p..%p\n", (void *)code_src, + (void *)((uintptr_t)code_src + (unsigned)code_len), + addr, (void *)((uintptr_t)addr + (unsigned)code_len)); +#endif + + bcopy(code_src, addr, code_len); + + /* Fill the rest of the page with NOPs */ + memset((void *)((uintptr_t)addr + code_len), 0x90, sz - code_len); + + if (codeptr) { + *codeptr = addr; + } + + return 0; +} + +static int +map_32bit_trampoline(compat_tramp_t *lowmemtrampp) +{ + extern int compat_mode_trampoline_len; + + return map_32bit_code_impl((uint8_t *)&compat_mode_trampoline, + (size_t)compat_mode_trampoline_len, (void **)lowmemtrampp, + (void *)FIXED_TRAMP_ADDR, FIXED_TRAMP_MAXLEN); +} + +static int +enable_ldt64(int *val) +{ + int ldt64_enable_value = 1; + int ldt64_enable_old = 0; + size_t ldt64_value_sz = sizeof(ldt64_enable_value); + int err; + + /* Enable the feature for this test (development kernels only) */ + if ((err = sysctlbyname("machdep.ldt64", 0, 0, &ldt64_enable_value, + ldt64_value_sz)) != 0) { + if (errno == EPERM) { + if ((err = sysctlbyname("machdep.ldt64", &ldt64_enable_old, + &ldt64_value_sz, 0, 0)) == 0) { + *val = ldt64_enable_old; + } + } + return errno; + } + + *val = ldt64_enable_value; + return 0; +} + +static uint64_t +stack_range_to_GSbase(uint64_t stackptr, uint64_t GSbase) +{ + unsigned long i; + + for (i = 0; i < sizeof(stack2gs) / sizeof(stack2gs[0]); i++) { + if (stackptr >= stack2gs[i].stack_base && + stackptr < stack2gs[i].stack_limit) { + if (GSbase != 0) { +#ifdef DEBUG + fprintf(stderr, "Updated gsbase for stack at 0x%llx..0x%llx to 0x%llx\n", + stack2gs[i].stack_base, stack2gs[i].stack_limit, GSbase); +#endif + stack2gs[i].GSbase = GSbase; + } + return stack2gs[i].GSbase; + } + } + return 0; +} + +static uint64_t +call_compatmode(uint32_t stackaddr, uint64_t compat_arg, uint64_t callback) +{ + uint64_t rv; + + /* + * Depending on how this is used, this allocation may need to be + * made with an allocator that returns virtual addresses below 4G. + */ + custom_tsd_t *new_GSbase = malloc(PAGE_SIZE); + + /* + * Change the GSbase (so things like printf will fail unless GSbase is + * restored) + */ + if (new_GSbase != NULL) { +#ifdef DEBUG + fprintf(stderr, "Setting new GS base: %p\n", (void *)new_GSbase); +#endif + new_GSbase->this_tsd_base = new_GSbase; + new_GSbase->orig_tsd_base = get_gsbase(); + _thread_set_tsd_base((uintptr_t)new_GSbase); + } else { +#ifndef STANDALONE + T_ASSERT_FAIL("Failed to allocate a page for new GSbase"); +#else + fprintf(stderr, "Failed to allocate a page for new GSbase"); + abort(); +#endif + } + + rv = thunkit(&input_desc, (void *)(uintptr_t)stackaddr, compat_arg, + callback, thunk64_addr); + + restore_gsbase(stackaddr); + + free(new_GSbase); + + return rv; +} + +static uint64_t +get_cursp(void) +{ + uint64_t curstk; + __asm__ __volatile__ ("movq %%rsp, %0" : "=r" (curstk) :: "memory"); + return curstk; +} + +static void +hello_from_32bit(void) +{ + uint64_t cur_tsd_base = (uint64_t)(uintptr_t)mytsd->this_tsd_base; + restore_gsbase(get_cursp()); + + printf("Hello on behalf of 32-bit compatibility mode!\n"); + + _thread_set_tsd_base(cur_tsd_base); +} + +/* + * Thread for executing 32-bit code + */ +static void * +thread_32bit(void *arg) +{ + thread_arg_t *targp = (thread_arg_t *)arg; + uint64_t cthread_self = 0; + + /* Save the GSbase for context switch back to 64-bit mode */ + cthread_self = get_gsbase(); + + /* + * Associate GSbase with the compat-mode stack (which will be used for long mode + * thunk calls as well.) + */ + (void)stack_range_to_GSbase(targp->compat_stackaddr, cthread_self); + +#ifdef DEBUG + printf("[thread %p] tsd base => %p\n", (void *)pthread_self(), (void *)cthread_self); +#endif + + pthread_mutex_lock(&targp->mutex); + + do { + if (targp->done == FALSE) { + pthread_cond_wait(&targp->condvar, &targp->mutex); + } + + /* Finally, execute the test */ + if (call_compatmode(targp->compat_stackaddr, 0, + (uint64_t)&hello_from_32bit) == 1) { + printf("32-bit code test passed\n"); + } else { + printf("32-bit code test failed\n"); + } + } while (targp->done == FALSE); + + pthread_mutex_unlock(&targp->mutex); + + return 0; +} + +static void +join_32bit_thread(pthread_t *thridp, thread_arg_t *cmargp) +{ + (void)pthread_mutex_lock(&cmargp->mutex); + cmargp->done = TRUE; + (void)pthread_cond_signal(&cmargp->condvar); + (void)pthread_mutex_unlock(&cmargp->mutex); + (void)pthread_join(*thridp, NULL); + *thridp = 0; +} + +static int +create_worker_thread(thread_arg_t *cmargp, uint32_t stackaddr, pthread_t *cmthreadp) +{ + *cmargp = (thread_arg_t) { .mutex = PTHREAD_MUTEX_INITIALIZER, + .condvar = PTHREAD_COND_INITIALIZER, + .done = FALSE, + .compat_stackaddr = stackaddr }; + + return pthread_create(cmthreadp, NULL, thread_32bit, cmargp); +} + +static void +ldt64_test_setup(pthread_t *cmthreadp, thread_arg_t *cmargp, boolean_t setldt_in_sighandler) +{ + extern void thunk64(void); + extern void thunk64_movabs(void); + int cnt = 0, err; + void *addr; + uintptr_t code_addr; + uintptr_t thunk64_movabs_addr; + int enable_status = 0; + + descs = malloc(sizeof(union ldt_entry) * 256); + if (descs == 0) { +#ifndef STANDALONE + T_ASSERT_FAIL("Could not allocate descriptor storage"); +#else + fprintf(stderr, "Could not allocate descriptor storage\n"); + abort(); +#endif + } + + if ((err = enable_ldt64(&enable_status)) != 0 && enable_status == 0) { +#ifndef STANDALONE + T_LOG("Warning: Couldn't set ldt64=1 via sysctl: %s\n", + strerror(err)); + T_ASSERT_FAIL("Couldn't enable ldt64 feature.\n"); +#else + fprintf(stderr, "Warning: Couldn't set ldt64=1 via sysctl: %s\n", + strerror(err)); + exit(1); +#endif + } + +#ifdef DEBUG + printf("32-bit code is at %p\n", (void *)&code_32); +#endif + + if ((err = map_lowmem_stack(&addr)) != 0) { +#ifdef DEBUG + fprintf(stderr, "Failed to mmap lowmem stack: %s\n", strerror(err)); +#endif +#ifndef STANDALONE + T_ASSERT_FAIL("failed to mmap lowmem stack"); +#else + exit(1); +#endif + } + + stackAddr = (uintptr_t)addr + FIXED_STACK_SIZE - 16; +#ifdef DEBUG + printf("lowstack addr = %p\n", (void *)stackAddr); +#endif + + if ((err = create_worker_thread(cmargp, (uint32_t)stackAddr, cmthreadp)) != 0) { +#ifdef DEBUG + fprintf(stderr, "Fatal: Could not create thread: %s\n", strerror(err)); +#endif +#ifndef STANDALONE + T_LOG("Fatal: Could not create thread: %s\n", strerror(err)); + T_ASSERT_FAIL("Thread creation failure"); +#else + exit(1); +#endif + } + + + if ((err = map_32bit_trampoline(&thunkit)) != 0) { +#ifdef DEBUG + fprintf(stderr, "Failed to map trampoline into lowmem: %s\n", strerror(err)); +#endif + join_32bit_thread(cmthreadp, cmargp); +#ifndef STANDALONE + T_LOG("Failed to map trampoline into lowmem: %s\n", strerror(err)); + T_ASSERT_FAIL("Failed to map trampoline into lowmem"); +#else + exit(1); +#endif + } + + /* + * Store long_mode_trampoline's address into the constant part of the movabs + * instruction in thunk64 + */ + thunk64_movabs_addr = (uintptr_t)thunkit + ((uintptr_t)thunk64_movabs - (uintptr_t)compat_mode_trampoline); + *((uint64_t *)(thunk64_movabs_addr + 2)) = (uint64_t)&long_mode_trampoline; + + bzero(descs, sizeof(union ldt_entry) * 256); + + if ((cnt = i386_get_ldt(0, descs, 1)) <= 0) { + fprintf(stderr, "i386_get_ldt unexpectedly returned %d\n", cnt); + join_32bit_thread(cmthreadp, cmargp); +#ifndef STANDALONE + T_LOG("i386_get_ldt unexpectedly returned %d\n", cnt); + T_ASSERT_FAIL("i386_get_ldt failure"); +#else + exit(1); +#endif + } + +#ifdef DEBUG + printf("i386_get_ldt returned %d\n", cnt); +#endif + + idx = (unsigned)cnt; /* Put the desired descriptor in the first available slot */ + + /* + * code_32's address for the purposes of this descriptor is the base mapped address of + * the thunkit function + the offset of code_32 from compat_mode_trampoline. + */ + code_addr = (uintptr_t)thunkit + ((uintptr_t)code_32 - (uintptr_t)compat_mode_trampoline); + thunk64_addr = (uintptr_t)thunkit + ((uintptr_t)thunk64 - (uintptr_t)compat_mode_trampoline); + + /* Initialize desired descriptor */ + descs[idx].code.limit00 = (unsigned short)(((code_addr >> 12) + 1) & 0xFFFF); + descs[idx].code.limit16 = (unsigned char)((((code_addr >> 12) + 1) >> 16) & 0xF); + descs[idx].code.base00 = (unsigned short)((code_addr) & 0xFFFF); + descs[idx].code.base16 = (unsigned char)((code_addr >> 16) & 0xFF); + descs[idx].code.base24 = (unsigned char)((code_addr >> 24) & 0xFF); + descs[idx].code.type = DESC_CODE_READ; + descs[idx].code.opsz = DESC_CODE_32B; + descs[idx].code.granular = DESC_GRAN_PAGE; + descs[idx].code.dpl = 3; + descs[idx].code.present = 1; + + if (setldt_in_sighandler == FALSE) { + /* Set the LDT: */ + cnt = i386_set_ldt((int)idx, &descs[idx], 1); + if (cnt != (int)idx) { +#ifdef DEBUG + fprintf(stderr, "i386_set_ldt unexpectedly returned %d\n", cnt); +#endif + join_32bit_thread(cmthreadp, cmargp); +#ifndef STANDALONE + T_LOG("i386_set_ldt unexpectedly returned %d\n", cnt); + T_ASSERT_FAIL("i386_set_ldt failure"); +#else + exit(1); +#endif + } +#ifdef DEBUG + printf("i386_set_ldt returned %d\n", cnt); +#endif + } else { + __asm__ __volatile__ ("ud2" ::: "memory"); + } + + + /* Read back the LDT to ensure it was set properly */ + if ((cnt = i386_get_ldt(0, descs, (int)idx)) > 0) { +#ifdef DEBUG + for (int i = 0; i < cnt; i++) { + dump_desc(&descs[i]); + } +#endif + } else { +#ifdef DEBUG + fprintf(stderr, "i386_get_ldt unexpectedly returned %d\n", cnt); +#endif + join_32bit_thread(cmthreadp, cmargp); +#ifndef STANDALONE + T_LOG("i386_get_ldt unexpectedly returned %d\n", cnt); + T_ASSERT_FAIL("i386_get_ldt failure"); +#else + exit(1); +#endif + } + + free(descs); +} + +#ifdef STANDALONE +static void +test_ldt64_with_bsdsig(void) +#else +/* + * Main test declarations + */ +T_DECL(ldt64_with_bsd_sighandling, + "Ensures that a 64-bit process can create LDT entries and can execute code in " + "compatibility mode with BSD signal handling", + T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD)) +#endif +{ + pthread_t cmthread; + thread_arg_t cmarg; + + setup_signal_handling(); + +#ifndef STANDALONE + T_SETUPBEGIN; +#endif + ENV_set_ldt_in_sighandler = (getenv("LDT_SET_IN_SIGHANDLER") != NULL) ? TRUE : FALSE; + ldt64_test_setup(&cmthread, &cmarg, ENV_set_ldt_in_sighandler); +#ifndef STANDALONE + T_SETUPEND; +#endif + + join_32bit_thread(&cmthread, &cmarg); + + teardown_signal_handling(); + +#ifndef STANDALONE + T_PASS("Successfully completed ldt64 test with BSD signal handling"); +#else + fprintf(stderr, "PASSED: ldt64_with_bsd_signal_handling\n"); +#endif +} + +#ifdef STANDALONE +static void +test_ldt64_with_machexc(void) +#else +T_DECL(ldt64_with_mach_exception_handling, + "Ensures that a 64-bit process can create LDT entries and can execute code in " + "compatibility mode with Mach exception handling", + T_META_TIMEOUT(NORMAL_RUN_TIME + TIMEOUT_OVERHEAD)) +#endif +{ + pthread_t cmthread; + thread_arg_t cmarg; + +#ifndef STANDALONE + T_SETUPBEGIN; +#endif + ldt64_test_setup(&cmthread, &cmarg, FALSE); +#ifndef STANDALONE + T_SETUPEND; +#endif + + /* Now repeat with Mach exception handling */ + init_task_exception_server(); + + join_32bit_thread(&cmthread, &cmarg); + +#ifndef STANDALONE + T_PASS("Successfully completed ldt64 test with mach exception handling"); +#else + fprintf(stderr, "PASSED: ldt64_with_mach_exception_handling\n"); +#endif +} + +#ifdef STANDALONE +int +main(int __unused argc, char ** __unused argv) +{ + test_ldt64_with_bsdsig(); + test_ldt64_with_machexc(); +} +#endif diff --git a/tests/ldt_code32.s b/tests/ldt_code32.s new file mode 100644 index 000000000..690de6f7f --- /dev/null +++ b/tests/ldt_code32.s @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +.code64 +.globl _compat_mode_trampoline +_compat_mode_trampoline: + /* + * %rdi => address of far_call_t (64-bit offset, then 16-bit selector) + * %rsi => lowmem stack + * %rdx => argument to 32-bit function + * %rcx => address of long mode callback + * %r8 => 64-bit address of _thunk64 + */ + movq %rsp, %rax + movq %rsi, %rsp + pushq %rax /* Save 64-bit stack pointer */ + leaq 1f(%rip), %rax + movq %rdx, %r9 + xorq %rdx, %rdx + movw %cs, %dx + shlq $32, %rdx + orq %rdx, %rax + movq %r9, %rdx + /* + * Save all callee-saved regs before calling down to compat mode, + * as there's no guarantee that the top 32 bits are preserved + * across compat mode/long mode switches. + */ + pushq %rbp + pushq %rbx + pushq %r12 + pushq %r13 + pushq %r14 + pushq %r15 + + pushq %r8 /* Push the absolute address of _thunk64 below */ + pushq %rcx /* Push the 64-bit fn ptr that compat mode will call */ + pushq %rdx /* Push arg to 32-bit code */ + pushq %rax /* Push the return offset + segment onto the stack */ + + ljmpq *(%rdi) +1: + /* + * lretl from compat mode pops off the first 8 bytes, + * so manually reclaim the remaining 24 bytes + */ + addq $0x18, %rsp + + /* Restore callee-saved registers */ + popq %r15 + popq %r14 + popq %r13 + popq %r12 + popq %rbx + popq %rbp + + popq %rsp + retq + + +.code32 +.globl _code_32 +.align 12 +_code_32: + /* + * After the standard stack frame is established, the stack layout is as follows: + * + * (%esp) -> old %ebp + * 4(%ebp) -> return %eip + * 8(%ebp) -> return %cs + * 0xc(%ebp) -> function arg (value to increment and return) + * 0x14(%ebp) -> 8-byte long mode function pointer to call via trampoline (with 0 args) + * 0x1c(%ebp) -> absolute (32-bit) base address of the 64-bit thunk + * (Note that the caller pushed a 64-bit value here, so the 4 bytes + * at 0x20(%ebp) are zeroes.) + */ + pushl %ebp + movl %esp, %ebp + pushl %ebx + call 1f +1: + popl %ebx /* save EIP for use in PIC calculation below */ + subl $8, %esp + + movl 0x1c(%ebp), %eax + + /* Populate the far call descriptor: */ + movl %eax, -8(%ebp) + movl 8(%ebp), %eax /* The long-mode %cs from whence we came */ + movl %eax, -4(%ebp) + + pushl $0 /* number of arguments */ + pushl 0x18(%ebp) /* high 32-bits of long mode funcptr */ + pushl 0x14(%ebp) /* low 32-bits of long mode funcptr */ + + /* + * The next 2 instructions are necessary because clang cannot deal with + * a "leal offset(index_reg), dest_reg" construct despite the fact that + * this code is marked .code32 (because the target is 64-bit and cannot + * process this uniquely-32-bit construct.) + */ + leal 2f - 1b, %eax + addl %ebx, %eax + + pushl $0 + pushl %cs + pushl $0 + pushl %eax + + /* + * Note that the long-mode-based function that is called will need + * to restore GSbase before calling into any frameworks that might + * access %gs-relative data. + */ + ljmpl *-8(%ebp) /* far call to the long mode trampoline */ +2: + /* + * lretq from long mode pops 16 bytes, so reclaim the remaining 12 + */ + addl $12, %esp + + /* + * Do a division-by-zero so the exception handler can catch it and + * restore execution right after. If a signal handler is used, + * it must restore GSbase first if it intends to call into any + * frameworks / APIs that access %gs-relative data. + */ + xorl %eax, %eax + div %eax + +.globl _first_invalid_opcode +_first_invalid_opcode: + /* + * Next, try to perform a sysenter syscall -- which should result in + * a #UD. + */ + leal 3f - 1b, %edx + addl %ebx, %edx /* return address is expected in %edx */ + pushl %ecx + movl %esp, %ecx /* stack ptr is expected in %ecx */ + sysenter +3: + popl %ecx + + /* + * Do the same with each of the old-style INT syscalls. + */ + int $0x80 + int $0x81 +.globl _last_invalid_opcode +_last_invalid_opcode: + int $0x82 + + /* + * discard the return value from the trampolined function and + * increment the value passed in as this function's first argument + * then return that value + 1 so caller can verify a successful + * thunk. + */ + movl 0xc(%ebp), %eax + incl %eax + addl $8, %esp + popl %ebx + popl %ebp + lret + +.code64 + +.globl _thunk64 +_thunk64: + /* + * The thunk is a very simple code fragment that uses an + * absolute address modified at setup time to call into + * the long mode trampoline.far call data passed on the stack to jump to long mode + * code (where %rip-relative addressing will work properly.) + * + */ +.globl _thunk64_movabs +_thunk64_movabs: + movabs $0xdeadbeeffeedface, %rax + jmpq *%rax + + +.globl _compat_mode_trampoline_len +_compat_mode_trampoline_len: + .long (. - _compat_mode_trampoline) + + +.globl _long_mode_trampoline +_long_mode_trampoline: + /* + * After creating a standard stack frame, the stack layout is: + * + * 8(%rbp) => %eip of far return to compat mode + * 0x10(%rbp) => %cs of far return to compat mode + * 0x18(%rbp) => low 32-bits of function pointer + * 0x1C(%rbp) => high 32-bits of function pointer + * 0x20(%rbp) => number of parameters (0..4) + * 0x24(%rbp) => first argument [low 32-bits] (if needed) + * 0x28(%rbp) => first argument [high 32-bits] (if needed) + * 0x2c(%rbp) => second argument [low 32-bits] (if needed) + * 0x30(%rbp) => second argument [high 32-bits] (if needed) + * 0x34(%rbp) => third argument [low 32-bits] (if needed) + * 0x38(%rbp) => third argument [high 32-bits] (if needed) + * 0x3c(%rbp) => fourth argument [low 32-bits] (if needed) + * 0x40(%rbp) => fourth argument [high 32-bits] (if needed) + * + * Note that we continue to use the existing (<4G) stack + * after the call into long mode. + */ + pushq %rbp + movq %rsp, %rbp + pushq %rdi + pushq %rsi + pushq %rcx + movl 0x20(%rbp), %eax + + testl %eax, %eax + jz 5f + + movq 0x24(%rbp), %rdi + decl %eax + +2: + testl %eax, %eax + jz 5f + + movq 0x2c(%rbp), %rsi + decl %eax + +3: + testl %eax, %eax + jz 5f + + movq 0x34(%rbp), %rdx + decl %eax + +4: + testl %eax, %eax + jnz 1f /* too many arguments specified -- bail out and return */ + + movq 0x3c(%rbp), %rcx + +5: /* Call passed-in function */ + /* Note that the stack MUST be 16-byte aligned before we call into frameworks in long mode */ + + pushq %rbx + movq %rsp, %rbx + subq $0x10, %rsp + andq $0xffffffffffffffe0, %rsp + + callq *0x18(%rbp) + movq %rbx, %rsp + popq %rbx +1: + popq %rcx + popq %rsi + popq %rdi + popq %rbp + lretq diff --git a/tests/legacy_footprint.entitlement b/tests/legacy_footprint.entitlement new file mode 100644 index 000000000..63294709f --- /dev/null +++ b/tests/legacy_footprint.entitlement @@ -0,0 +1,8 @@ + + + + + com.apple.private.memory.legacy_footprint + + + diff --git a/tests/ltable_exhaustion_test.c b/tests/ltable_exhaustion_test.c index 9bfeba8ea..b2aecff53 100644 --- a/tests/ltable_exhaustion_test.c +++ b/tests/ltable_exhaustion_test.c @@ -8,10 +8,10 @@ #define ITER 100 T_DECL(ltable_exhaustion_test, - "check if allocating not used ltable entries can panic the system", - T_META_ASROOT(true)) + "check if allocating not used ltable entries can panic the system", + T_META_ASROOT(true)) { - int n_ltable_entries,n_ltable_entries_after; + int n_ltable_entries, n_ltable_entries_after; size_t len = sizeof(int); int i; mach_port_name_t portset; @@ -31,5 +31,5 @@ T_DECL(ltable_exhaustion_test, */ T_EXPECT_POSIX_SUCCESS(sysctlbyname("kern.n_ltable_entries", &n_ltable_entries_after, &len, NULL, 0), "kern.n_ltable_entries"); - T_EXPECT_LE(n_ltable_entries_after, n_ltable_entries+ITER, "ltable before %d after %d iter %d", n_ltable_entries, n_ltable_entries_after, ITER); + T_EXPECT_LE(n_ltable_entries_after, n_ltable_entries + ITER, "ltable before %d after %d iter %d", n_ltable_entries, n_ltable_entries_after, ITER); } diff --git a/tests/mach_boottime_usec.c b/tests/mach_boottime_usec.c index ad0ac3265..7a9a47277 100644 --- a/tests/mach_boottime_usec.c +++ b/tests/mach_boottime_usec.c @@ -8,7 +8,7 @@ #include T_DECL(mach_boottime_usec, "mach_boottime_usec()", - T_META_ALL_VALID_ARCHS(true), T_META_LTEPHASE(LTE_POSTINIT)) + T_META_ALL_VALID_ARCHS(true), T_META_LTEPHASE(LTE_POSTINIT)) { uint64_t bt_usec = mach_boottime_usec(); diff --git a/tests/mach_continuous_time.c b/tests/mach_continuous_time.c index a7d773bfb..0d49b7bdf 100644 --- a/tests/mach_continuous_time.c +++ b/tests/mach_continuous_time.c @@ -19,10 +19,10 @@ extern uint64_t mach_absolute_time_kernel(void); extern uint64_t mach_continuous_time_kernel(void); #endif - + extern char **environ; -static const int64_t one_mil = 1000*1000; +static const int64_t one_mil = 1000 * 1000; #define to_ns(ticks) (((ticks) * tb_info.numer) / (tb_info.denom)) #define to_ms(ticks) (to_ns(ticks)/one_mil) @@ -30,12 +30,13 @@ static const int64_t one_mil = 1000*1000; static mach_timebase_info_data_t tb_info; static void -update(uint64_t *a, uint64_t *c) { - mach_get_times(a,c,NULL); +update(uint64_t *a, uint64_t *c) +{ + mach_get_times(a, c, NULL); } T_DECL(mct_monotonic, "Testing mach_continuous_time returns sane, monotonic values", - T_META_ALL_VALID_ARCHS(true)) + T_META_ALL_VALID_ARCHS(true)) { mach_timebase_info(&tb_info); #ifdef HAS_KERNEL_TIME_TRAPS @@ -43,15 +44,16 @@ T_DECL(mct_monotonic, "Testing mach_continuous_time returns sane, monotonic valu #endif volatile uint64_t multiple_test = to_ms(mach_continuous_time()); - for(int i = 0; i < 20; i++) { + for (int i = 0; i < 20; i++) { uint64_t tmp; const char *test_type = "user"; #ifdef HAS_KERNEL_TIME_TRAPS if (kernel) { test_type = "kernel"; tmp = mach_continuous_time_kernel(); - } else + } else { tmp = mach_continuous_time(); + } kernel = !kernel; #else tmp = mach_continuous_time(); @@ -67,7 +69,7 @@ T_DECL(mct_monotonic, "Testing mach_continuous_time returns sane, monotonic valu } T_DECL(mat_monotonic, "Testing mach_absolute_time returns sane, monotonic values", - T_META_ALL_VALID_ARCHS(true)) + T_META_ALL_VALID_ARCHS(true)) { mach_timebase_info(&tb_info); #ifdef HAS_KERNEL_TIME_TRAPS @@ -75,15 +77,16 @@ T_DECL(mat_monotonic, "Testing mach_absolute_time returns sane, monotonic values #endif volatile uint64_t multiple_test = to_ms(mach_absolute_time()); - for(int i = 0; i < 20; i++) { + for (int i = 0; i < 20; i++) { uint64_t tmp; const char *test_type = "user"; #ifdef HAS_KERNEL_TIME_TRAPS if (kernel) { test_type = "kernel"; tmp = mach_absolute_time_kernel(); - } else + } else { tmp = mach_absolute_time(); + } kernel = !kernel; #endif tmp = mach_absolute_time(); @@ -117,7 +120,8 @@ T_DECL(mct_pause, "Testing mach_continuous_time and mach_absolute_time don't div } #ifdef HAS_KERNEL_TIME_TRAPS -static void update_kern(uint64_t *abs, uint64_t *cont) +static void +update_kern(uint64_t *abs, uint64_t *cont) { uint64_t abs1, abs2, cont1, cont2; do { @@ -190,40 +194,40 @@ T_DECL(mct_sleep, "Testing mach_continuous_time behavior over system sleep"){ T_ASSERT_EQ(spawn_ret, 0, "pmset relative wait 5 failed"); // wait for device to sleep (up to 30 seconds) - for(int i = 0; i < 30; i++) { + for (int i = 0; i < 30; i++) { update(&abs_now, &cnt_now); after_diff = (int)(to_ms(cnt_now) - to_ms(abs_now)); // on OSX, there's enough latency between calls to MCT and MAT // when the system is going down for sleep for values to diverge a few ms - if(abs(before_diff - after_diff) > 2) { + if (abs(before_diff - after_diff) > 2) { break; } sleep(1); - T_LOG("waited %d seconds for sleep...", i+1); + T_LOG("waited %d seconds for sleep...", i + 1); } - if((after_diff - before_diff) < 4000) { + if ((after_diff - before_diff) < 4000) { T_LOG("Device slept for less than 4 seconds, did it really sleep? (%d ms change between abs and cont)", - after_diff - before_diff); + after_diff - before_diff); } time_t after_sleep = time(NULL); int cal_sleep_diff = (int)(double)difftime(after_sleep, before_sleep); - int ct_sleep_diff = ((int)to_ms(cnt_now) - ct_ms_before_sleep)/1000; - int ab_sleep_diff = ((int)to_ms(abs_now) - ab_ms_before_sleep)/1000; + int ct_sleep_diff = ((int)to_ms(cnt_now) - ct_ms_before_sleep) / 1000; + int ab_sleep_diff = ((int)to_ms(abs_now) - ab_ms_before_sleep) / 1000; T_LOG("Calendar progressed: %d sec; continuous time progressed: %d sec; absolute time progressed %d sec", - cal_sleep_diff, ct_sleep_diff, ab_sleep_diff); + cal_sleep_diff, ct_sleep_diff, ab_sleep_diff); T_ASSERT_LE(abs(ct_sleep_diff - cal_sleep_diff), 2, - "continuous time should progress at ~ same rate as calendar"); + "continuous time should progress at ~ same rate as calendar"); } T_DECL(mct_settimeofday, "Testing mach_continuous_time behavior over settimeofday"){ - if (geteuid() != 0){ + if (geteuid() != 0) { T_SKIP("The settimeofday() test requires root privileges to run."); } mach_timebase_info(&tb_info); @@ -237,7 +241,7 @@ T_DECL(mct_settimeofday, "Testing mach_continuous_time behavior over settimeofda struct timeval forward_tv = saved_tv; // move time forward by two minutes, ensure mach_continuous_time keeps // chugging along with mach_absolute_time - forward_tv.tv_sec += 2*60; + forward_tv.tv_sec += 2 * 60; before = (int)to_ms(mach_continuous_time()); T_ASSERT_POSIX_ZERO(settimeofday(&forward_tv, &saved_tz), NULL); @@ -250,7 +254,7 @@ T_DECL(mct_settimeofday, "Testing mach_continuous_time behavior over settimeofda #ifdef HAS_KERNEL_TIME_TRAPS T_DECL(mct_settimeofday_kern, "Testing kernel mach_continuous_time behavior over settimeofday"){ - if (geteuid() != 0){ + if (geteuid() != 0) { T_SKIP("The settimeofday() test requires root privileges to run."); } mach_timebase_info(&tb_info); @@ -264,7 +268,7 @@ T_DECL(mct_settimeofday_kern, "Testing kernel mach_continuous_time behavior over struct timeval forward_tv = saved_tv; // move time forward by two minutes, ensure mach_continuous_time keeps // chugging along with mach_absolute_time - forward_tv.tv_sec += 2*60; + forward_tv.tv_sec += 2 * 60; before = (int)to_ms(mach_continuous_time_kernel()); T_ASSERT_POSIX_ZERO(settimeofday(&forward_tv, &saved_tz), NULL); @@ -277,14 +281,14 @@ T_DECL(mct_settimeofday_kern, "Testing kernel mach_continuous_time behavior over #endif T_DECL(mct_aproximate, "Testing mach_continuous_approximate_time()", - T_META_ALL_VALID_ARCHS(true)) + T_META_ALL_VALID_ARCHS(true)) { mach_timebase_info(&tb_info); uint64_t absolute = to_ns(mach_continuous_time()); uint64_t approximate = to_ns(mach_continuous_approximate_time()); - T_EXPECT_LE(llabs((long long)absolute - (long long)approximate), (long long)(25*NSEC_PER_MSEC), NULL); + T_EXPECT_LE(llabs((long long)absolute - (long long)approximate), (long long)(25 * NSEC_PER_MSEC), NULL); } T_DECL(mach_time_perf, "mach_time performance") { @@ -364,4 +368,3 @@ T_DECL(mach_time_perf_instructions_kern, "instructions retired for kernel mach_t } } #endif - diff --git a/tests/mach_exc.defs b/tests/mach_exc.defs new file mode 100644 index 000000000..7f58187fe --- /dev/null +++ b/tests/mach_exc.defs @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2016 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ +/* + * @OSF_COPYRIGHT@ + */ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ + +#include diff --git a/tests/mach_get_times.c b/tests/mach_get_times.c index c40badada..d6fe33a7b 100644 --- a/tests/mach_get_times.c +++ b/tests/mach_get_times.c @@ -11,7 +11,7 @@ #define timespec2nanosec(ts) ((uint64_t)((ts)->tv_sec) * NSEC_PER_SEC + (uint64_t)((ts)->tv_nsec)) T_DECL(mach_get_times, "mach_get_times()", - T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) + T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true)) { const int ITERATIONS = 500000 * dt_ncpu(); struct timespec gtod_ts; @@ -26,9 +26,9 @@ T_DECL(mach_get_times, "mach_get_times()", gtod = timespec2nanosec(>od_ts); T_LOG_VERBOSE("[%d] abs: %llu.%09llu(+%llu)\tcont: %llu.%09llu(+%llu)\tgtod:%llu.%09llu(+%llu)", i, - absolute / NSEC_PER_SEC, absolute % NSEC_PER_SEC, absolute - last_absolute, - continuous / NSEC_PER_SEC, continuous % NSEC_PER_SEC, continuous - last_continuous, - gtod / NSEC_PER_SEC, gtod % NSEC_PER_SEC, gtod - last_gtod); + absolute / NSEC_PER_SEC, absolute % NSEC_PER_SEC, absolute - last_absolute, + continuous / NSEC_PER_SEC, continuous % NSEC_PER_SEC, continuous - last_continuous, + gtod / NSEC_PER_SEC, gtod % NSEC_PER_SEC, gtod - last_gtod); T_QUIET; T_EXPECT_EQ(absolute - last_absolute, continuous - last_continuous, NULL); diff --git a/tests/mach_port_deallocate_21692215.c b/tests/mach_port_deallocate_21692215.c index 4b84428f6..0b1510600 100644 --- a/tests/mach_port_deallocate_21692215.c +++ b/tests/mach_port_deallocate_21692215.c @@ -9,16 +9,16 @@ T_DECL(mach_port_deallocate, "mach_port_deallocate deallocates also PORT_SET"){ mach_port_t port_set; mach_port_t port[NR_PORTS]; - int i,ret; + int i, ret; - ret= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &port_set); + ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &port_set); T_ASSERT_MACH_SUCCESS(ret, "mach_port_allocate MACH_PORT_RIGHT_PORT_SET"); - for(i=0;i #include -T_DECL(mach_port_insert_right,"insert send right for an existing right", T_META_CHECK_LEAKS(false)) +T_DECL(mach_port_insert_right, "insert send right for an existing right", T_META_CHECK_LEAKS(false)) { mach_port_t port = MACH_PORT_NULL; mach_port_t port2 = MACH_PORT_NULL; diff --git a/tests/mach_port_mod_refs.c b/tests/mach_port_mod_refs.c index 3e5d2f321..acb7d119a 100644 --- a/tests/mach_port_mod_refs.c +++ b/tests/mach_port_mod_refs.c @@ -84,9 +84,9 @@ T_DECL(mach_port_mod_refs, "mach_port_mod_refs"){ /* * deallocate the ports/sets */ - ret= mach_port_mod_refs(mach_task_self(), port_set, MACH_PORT_RIGHT_PORT_SET, -1); + ret = mach_port_mod_refs(mach_task_self(), port_set, MACH_PORT_RIGHT_PORT_SET, -1); T_ASSERT_MACH_SUCCESS(ret, "mach_port_mod_refs(PORT_SET, -1)"); - ret= mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1); + ret = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_RECEIVE, -1); T_ASSERT_MACH_SUCCESS(ret, "mach_port_mod_refs(RECV_RIGHT, -1)"); } diff --git a/tests/mach_timebase_info.c b/tests/mach_timebase_info.c index 51f3bb4d9..d58dd85c4 100644 --- a/tests/mach_timebase_info.c +++ b/tests/mach_timebase_info.c @@ -5,7 +5,7 @@ extern kern_return_t mach_timebase_info_trap(mach_timebase_info_t info); T_DECL(mach_timebase_info, "mach_timebase_info(_trap)", - T_META_ALL_VALID_ARCHS(true), T_META_LTEPHASE(LTE_POSTINIT)) + T_META_ALL_VALID_ARCHS(true), T_META_LTEPHASE(LTE_POSTINIT)) { mach_timebase_info_data_t a, b, c; diff --git a/tests/memorystatus_freeze_test.c b/tests/memorystatus_freeze_test.c index d41c66465..abd029e11 100644 --- a/tests/memorystatus_freeze_test.c +++ b/tests/memorystatus_freeze_test.c @@ -13,18 +13,16 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.vm"), T_META_CHECK_LEAKS(false) -); + ); -#define MEM_SIZE_MB 10 -#define NUM_ITERATIONS 5 +#define MEM_SIZE_MB 10 +#define NUM_ITERATIONS 5 #define CREATE_LIST(X) \ X(SUCCESS) \ X(TOO_FEW_ARGUMENTS) \ X(SYSCTL_VM_PAGESIZE_FAILED) \ X(VM_PAGESIZE_IS_ZERO) \ - X(SYSCTL_VM_FREEZE_ENABLED_FAILED) \ - X(FREEZER_DISABLED) \ X(DISPATCH_SOURCE_CREATE_FAILED) \ X(INITIAL_SIGNAL_TO_PARENT_FAILED) \ X(SIGNAL_TO_PARENT_FAILED) \ @@ -52,8 +50,9 @@ void run_freezer_test(int size_mb); void freeze_helper_process(void); -void move_to_idle_band(void) { - +void +move_to_idle_band(void) +{ memorystatus_priority_properties_t props; /* * Freezing a process also moves it to an elevated jetsam band in order to protect it from idle exits. @@ -71,19 +70,34 @@ void move_to_idle_band(void) { } } -void freeze_helper_process(void) { - int ret; +void +freeze_helper_process(void) +{ + size_t length; + int ret, freeze_enabled, errno_freeze_sysctl; T_LOG("Freezing child pid %d", pid); ret = sysctlbyname("kern.memorystatus_freeze", NULL, NULL, &pid, sizeof(pid)); + errno_freeze_sysctl = errno; sleep(1); + /* + * The child process toggles its freezable state on each iteration. + * So a failure for every alternate freeze is expected. + */ if (freeze_count % 2 == 0) { - /* - * The child process toggles its freezable state on each iteration. - * So a failure for every alternate freeze is expected. - */ - T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_freeze failed"); + length = sizeof(freeze_enabled); + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.freeze_enabled", &freeze_enabled, &length, NULL, 0), + "failed to query vm.freeze_enabled"); + if (freeze_enabled) { + errno = errno_freeze_sysctl; + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_freeze failed"); + } else { + /* If freezer is disabled, skip the test. This can happen due to disk space shortage. */ + T_LOG("Freeze has been disabled. Terminating early."); + T_END; + } + T_LOG("Freeze succeeded. Thawing child pid %d", pid); ret = sysctlbyname("kern.memorystatus_thaw", NULL, NULL, &pid, sizeof(pid)); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_thaw failed"); @@ -97,17 +111,24 @@ void freeze_helper_process(void) { T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "failed to send SIGUSR1 to child process"); } -void run_freezer_test(int size_mb) { - int ret; +void +run_freezer_test(int size_mb) +{ + int ret, freeze_enabled; char sz_str[50]; char **launch_tool_args; char testpath[PATH_MAX]; uint32_t testpath_buf_size; dispatch_source_t ds_freeze, ds_proc; - -#ifndef CONFIG_FREEZE - T_SKIP("Task freeze not supported."); -#endif + size_t length; + + length = sizeof(freeze_enabled); + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.freeze_enabled", &freeze_enabled, &length, NULL, 0), + "failed to query vm.freeze_enabled"); + if (!freeze_enabled) { + /* If freezer is disabled, skip the test. This can happen due to disk space shortage. */ + T_SKIP("Freeze has been disabled. Skipping test."); + } signal(SIGUSR1, SIG_IGN); ds_freeze = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue()); @@ -115,10 +136,10 @@ void run_freezer_test(int size_mb) { dispatch_source_set_event_handler(ds_freeze, ^{ if (freeze_count < NUM_ITERATIONS) { - freeze_helper_process(); + freeze_helper_process(); } else { - kill(pid, SIGKILL); - dispatch_source_cancel(ds_freeze); + kill(pid, SIGKILL); + dispatch_source_cancel(ds_freeze); } }); dispatch_activate(ds_freeze); @@ -155,11 +176,11 @@ void run_freezer_test(int size_mb) { code = WEXITSTATUS(status); if (code == 0) { - T_END; + T_END; } else if (code > 0 && code < EXIT_CODE_MAX) { - T_ASSERT_FAIL("Child exited with %s", exit_codes_str[code]); + T_ASSERT_FAIL("Child exited with %s", exit_codes_str[code]); } else { - T_ASSERT_FAIL("Child exited with unknown exit code %d", code); + T_ASSERT_FAIL("Child exited with unknown exit code %d", code); } }); dispatch_activate(ds_proc); @@ -169,9 +190,9 @@ void run_freezer_test(int size_mb) { } T_HELPER_DECL(allocate_pages, - "allocates pages to freeze", - T_META_ASROOT(true)) { - int i, j, temp, ret, size_mb, vmpgsize; + "allocates pages to freeze", + T_META_ASROOT(true)) { + int i, j, ret, size_mb, vmpgsize; size_t len; char val; __block int num_pages, num_iter = 0; @@ -191,15 +212,6 @@ T_HELPER_DECL(allocate_pages, exit(TOO_FEW_ARGUMENTS); } - len = sizeof(temp); - ret = sysctlbyname("vm.freeze_enabled", &temp, &len, NULL, 0); - if (ret != 0) { - exit(SYSCTL_VM_FREEZE_ENABLED_FAILED); - } - if (temp == 0) { - exit(FREEZER_DISABLED); - } - size_mb = atoi(argv[0]); num_pages = size_mb * 1024 * 1024 / vmpgsize; buf = (char**)malloc(sizeof(char*) * (size_t)num_pages); @@ -218,9 +230,9 @@ T_HELPER_DECL(allocate_pages, dispatch_after(dispatch_time(DISPATCH_TIME_NOW, NSEC_PER_SEC), dispatch_get_main_queue(), ^{ /* Signal to the parent that we're done allocating and it's ok to freeze us */ - printf("Sending initial signal to parent to begin freezing\n"); + printf("[%d] Sending initial signal to parent to begin freezing\n", getpid()); if (kill(getppid(), SIGUSR1) != 0) { - exit(INITIAL_SIGNAL_TO_PARENT_FAILED); + exit(INITIAL_SIGNAL_TO_PARENT_FAILED); } }); @@ -236,27 +248,28 @@ T_HELPER_DECL(allocate_pages, /* Make sure all the pages are accessed before trying to freeze again */ for (int x = 0; x < num_pages; x++) { - tmp = buf[x][0]; + tmp = buf[x][0]; } current_state = memorystatus_control(MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE, getpid(), 0, NULL, 0); /* Toggle freezable state */ new_state = (current_state) ? 0: 1; - printf("Changing state from %s to %s\n", (current_state) ? "freezable": "unfreezable", (new_state) ? "freezable": "unfreezable"); + printf("[%d] Changing state from %s to %s\n", getpid(), + (current_state) ? "freezable": "unfreezable", (new_state) ? "freezable": "unfreezable"); if (memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE, getpid(), (uint32_t)new_state, NULL, 0) != KERN_SUCCESS) { - exit(MEMORYSTATUS_CONTROL_FAILED); + exit(MEMORYSTATUS_CONTROL_FAILED); } /* Verify that the state has been set correctly */ current_state = memorystatus_control(MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE, getpid(), 0, NULL, 0); if (new_state != current_state) { - exit(IS_FREEZABLE_NOT_AS_EXPECTED); + exit(IS_FREEZABLE_NOT_AS_EXPECTED); } num_iter++; if (kill(getppid(), SIGUSR1) != 0) { - exit(SIGNAL_TO_PARENT_FAILED); + exit(SIGNAL_TO_PARENT_FAILED); } }); dispatch_activate(ds_signal); diff --git a/tests/memorystatus_vm_map_fork.c b/tests/memorystatus_vm_map_fork.c index e321bea17..1c450f8a7 100644 --- a/tests/memorystatus_vm_map_fork.c +++ b/tests/memorystatus_vm_map_fork.c @@ -19,7 +19,7 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.vm"), T_META_CHECK_LEAKS(false) -); + ); extern char **environ; @@ -39,7 +39,7 @@ extern char **environ; */ /* Test variants */ -#define TEST_ALLOWED 0x1 +#define TEST_ALLOWED 0x1 #define TEST_NOT_ALLOWED 0x2 /* @@ -47,7 +47,7 @@ extern char **environ; * is either allowed or disallowed for the * kern.memorystatus_vm_map_fork_pidwatch sysctl. */ -#define MEMORYSTATUS_VM_MAP_FORK_ALLOWED 0x100000000ul +#define MEMORYSTATUS_VM_MAP_FORK_ALLOWED 0x100000000ul #define MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED 0x200000000ul /* @@ -58,7 +58,7 @@ extern char **environ; static char testpath[PATH_MAX]; static uint32_t testpath_size = sizeof(testpath); #define LIMIT_DELTA_MB 5 /* an arbitrary limit delta */ -#define MEGABYTE (1024 * 1024) +#define MEGABYTE (1024 * 1024) /* * The child process communicates back to parent via an exit() code. @@ -93,7 +93,7 @@ is_development_kernel(void) return FALSE; } - return (dev != 0); + return dev != 0; } /* @@ -134,12 +134,12 @@ get_memorystatus_vm_map_fork_pidwatch() static void wait_for_free_mem(int need_mb) { - int64_t memsize; - int memorystatus_level; - size_t size; - int64_t avail; - int err; - int try; + int64_t memsize; + int memorystatus_level; + size_t size; + int64_t avail; + int err; + int try; /* * get amount of memory in the machine @@ -153,14 +153,14 @@ wait_for_free_mem(int need_mb) */ try = 1; for (;;) { - /* * memorystatus_level is a percentage of memory available. For example 20 means 1/5 of memory. * It currently doesn't exist on macOS but neither does jetsam, so pass the test there. */ size = sizeof(memorystatus_level); - if (sysctlbyname("kern.memorystatus_level", &memorystatus_level, &size, NULL, 0) != 0) + if (sysctlbyname("kern.memorystatus_level", &memorystatus_level, &size, NULL, 0) != 0) { return; + } T_QUIET; T_ASSERT_LE(memorystatus_level, 100, "memorystatus_level too high"); T_QUIET; T_ASSERT_GT(memorystatus_level, 0, "memorystatus_level negative"); @@ -172,14 +172,16 @@ wait_for_free_mem(int need_mb) /* * We're good to go if there's more than enough available. */ - if ((int64_t)need_mb * MEGABYTE < avail) + if ((int64_t)need_mb * MEGABYTE < avail) { return; + } /* * issue a message to log and sleep briefly to see if we can get more memory */ - if (try-- == 0) + if (try-- == 0) { break; + } T_LOG("Need %d MB, only %d MB available. sleeping 5 seconds for more to free. memorystatus_level %d", need_mb, (int)(avail / MEGABYTE), memorystatus_level); sleep(5); @@ -253,8 +255,8 @@ test_child_process(pid_t child_pid, int *status, struct rusage *ru) */ T_HELPER_DECL(child_process, "child allocates memory to failure") { -#define BYTESPERALLOC MEGABYTE -#define BYTESINEXCESS (2 * MEGABYTE) /* 2 MB - arbitrary */ +#define BYTESPERALLOC MEGABYTE +#define BYTESINEXCESS (2 * MEGABYTE) /* 2 MB - arbitrary */ char *limit; long limit_mb = 0; long max_bytes_to_munch, bytes_remaining, bytes_this_munch; @@ -264,14 +266,16 @@ T_HELPER_DECL(child_process, "child allocates memory to failure") * This helper is run in a child process. The helper sees one argument * as a string which is the amount of memory in megabytes to allocate. */ - if (argc != 1) + if (argc != 1) { exit(NO_MEMSIZE_ARG); + } limit = argv[0]; errno = 0; limit_mb = strtol(limit, NULL, 10); - if (errno != 0 || limit_mb <= 0) + if (errno != 0 || limit_mb <= 0) { exit(INVALID_MEMSIZE); + } /* Compute in excess of assigned limit */ max_bytes_to_munch = limit_mb * MEGABYTE; @@ -281,8 +285,9 @@ T_HELPER_DECL(child_process, "child allocates memory to failure") bytes_this_munch = MIN(bytes_remaining, BYTESPERALLOC); mem = malloc((size_t)bytes_this_munch); - if (mem == NULL) + if (mem == NULL) { exit(MALLOC_FAILED); + } arc4random_buf(mem, (size_t)bytes_this_munch); } @@ -297,18 +302,18 @@ T_HELPER_DECL(child_process, "child allocates memory to failure") static void memorystatus_vm_map_fork_parent(int test_variant) { - int max_task_pmem = 0; /* MB */ - size_t size = 0; - int active_limit_mb = 0; - int inactive_limit_mb = 0; - short flags = 0; - char memlimit_str[16]; - pid_t child_pid; - int child_status; - uint64_t kernel_pidwatch_val; - uint64_t expected_pidwatch_val; - int ret; - struct rusage ru; + int max_task_pmem = 0; /* MB */ + size_t size = 0; + int active_limit_mb = 0; + int inactive_limit_mb = 0; + short flags = 0; + char memlimit_str[16]; + pid_t child_pid; + int child_status; + uint64_t kernel_pidwatch_val; + uint64_t expected_pidwatch_val; + int ret; + struct rusage ru; enum child_exits exit_val; /* @@ -324,11 +329,11 @@ memorystatus_vm_map_fork_parent(int test_variant) */ size = sizeof(max_task_pmem); (void)sysctlbyname("kern.max_task_pmem", &max_task_pmem, &size, NULL, 0); - if (max_task_pmem <= 0) + if (max_task_pmem <= 0) { max_task_pmem = 0; + } if (test_variant == TEST_ALLOWED) { - /* * Tell the child to allocate less than 1/4 the system wide limit. */ @@ -338,9 +343,7 @@ memorystatus_vm_map_fork_parent(int test_variant) active_limit_mb = max_task_pmem / 4 - LIMIT_DELTA_MB; } expected_pidwatch_val = MEMORYSTATUS_VM_MAP_FORK_ALLOWED; - } else { /* TEST_NOT_ALLOWED */ - /* * Tell the child to allocate more than 1/4 the system wide limit. */ @@ -350,7 +353,6 @@ memorystatus_vm_map_fork_parent(int test_variant) } else { expected_pidwatch_val = MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED; } - } inactive_limit_mb = active_limit_mb; T_LOG("using limit of %d Meg", active_limit_mb); @@ -374,7 +376,7 @@ memorystatus_vm_map_fork_parent(int test_variant) /* * Prepare the arguments needed to spawn the child process. */ - memset (memlimit_str, 0, sizeof(memlimit_str)); + memset(memlimit_str, 0, sizeof(memlimit_str)); (void)sprintf(memlimit_str, "%d", active_limit_mb); ret = _NSGetExecutablePath(testpath, &testpath_size); @@ -419,8 +421,9 @@ memorystatus_vm_map_fork_parent(int test_variant) if (!WIFEXITED(child_status)) { if (WIFSIGNALED(child_status)) { /* jetsam kills a process with SIGKILL */ - if (WTERMSIG(child_status) == SIGKILL) + if (WTERMSIG(child_status) == SIGKILL) { T_LOG("Child appears to have been a jetsam victim"); + } T_SKIP("Child terminated by signal %d test result invalid", WTERMSIG(child_status)); } T_SKIP("child did not exit normally (status=%d) test result invalid", child_status); @@ -430,7 +433,7 @@ memorystatus_vm_map_fork_parent(int test_variant) * We don't expect the child to exit for any other reason than success */ exit_val = (enum child_exits)WEXITSTATUS(child_status); - T_QUIET; T_ASSERT_EQ(exit_val, NORMAL_EXIT, "child exit due to: %s", + T_QUIET; T_ASSERT_EQ(exit_val, NORMAL_EXIT, "child exit due to: %s", (0 < exit_val && exit_val < NUM_CHILD_EXIT) ? child_exit_why[exit_val] : "unknown"); /* @@ -462,6 +465,5 @@ T_DECL(memorystatus_vm_map_fork_test_not_allowed, "test that corpse generation w T_DECL(memorystatus_vm_map_fork_test_allowed, "test corpse generation allowed") { - memorystatus_vm_map_fork_parent(TEST_ALLOWED); } diff --git a/tests/memorystatus_zone_test.c b/tests/memorystatus_zone_test.c index 007970ec9..bc376ee57 100644 --- a/tests/memorystatus_zone_test.c +++ b/tests/memorystatus_zone_test.c @@ -20,36 +20,36 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.vm"), T_META_CHECK_LEAKS(false) -); + ); -#define TIMEOUT_SECS 1500 +#define TIMEOUT_SECS 1500 #if TARGET_OS_EMBEDDED -#define ALLOCATION_SIZE_VM_REGION (16*1024) /* 16 KB */ -#define ALLOCATION_SIZE_VM_OBJECT ALLOCATION_SIZE_VM_REGION +#define ALLOCATION_SIZE_VM_REGION (16*1024) /* 16 KB */ +#define ALLOCATION_SIZE_VM_OBJECT ALLOCATION_SIZE_VM_REGION #else -#define ALLOCATION_SIZE_VM_REGION (1024*1024*100) /* 100 MB */ -#define ALLOCATION_SIZE_VM_OBJECT (16*1024) /* 16 KB */ +#define ALLOCATION_SIZE_VM_REGION (1024*1024*100) /* 100 MB */ +#define ALLOCATION_SIZE_VM_OBJECT (16*1024) /* 16 KB */ #endif -#define MAX_CHILD_PROCS 100 +#define MAX_CHILD_PROCS 100 -#define ZONEMAP_JETSAM_LIMIT_SYSCTL "kern.zone_map_jetsam_limit=60" +#define ZONEMAP_JETSAM_LIMIT_SYSCTL "kern.zone_map_jetsam_limit=60" -#define VME_ZONE_TEST_OPT "allocate_vm_regions" -#define VM_OBJECTS_ZONE_TEST_OPT "allocate_vm_objects" -#define GENERIC_ZONE_TEST_OPT "allocate_from_generic_zone" +#define VME_ZONE_TEST_OPT "allocate_vm_regions" +#define VM_OBJECTS_ZONE_TEST_OPT "allocate_vm_objects" +#define GENERIC_ZONE_TEST_OPT "allocate_from_generic_zone" -#define VME_ZONE "VM map entries" -#define VMOBJECTS_ZONE "vm objects" -#define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98 +#define VME_ZONE "VM map entries" +#define VMOBJECTS_ZONE "vm objects" +#define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98 -#define VM_TAG1 100 -#define VM_TAG2 101 +#define VM_TAG1 100 +#define VM_TAG2 101 enum { - VME_ZONE_TEST = 0, - VM_OBJECTS_ZONE_TEST, - GENERIC_ZONE_TEST, + VME_ZONE_TEST = 0, + VM_OBJECTS_ZONE_TEST, + GENERIC_ZONE_TEST, }; typedef struct test_config_struct { @@ -62,10 +62,9 @@ typedef struct test_config_struct { static test_config_struct current_test; static int num_children = 0; static bool test_ending = false; -static bool within_dispatch_signal_handler = false; -static bool within_dispatch_timer_handler = false; static dispatch_source_t ds_signal = NULL; static dispatch_source_t ds_timer = NULL; +static dispatch_queue_t dq_spawn = NULL; static ktrace_session_t session = NULL; static mach_zone_info_array_t zone_info_array = NULL; @@ -79,6 +78,7 @@ static pthread_mutex_t test_ending_mtx; static void allocate_vm_regions(void); static void allocate_vm_objects(void); static void allocate_from_generic_zone(void); +static void begin_test_teardown(void); static void cleanup_and_end_test(void); static void setup_ktrace_session(void); static void spawn_child_process(void); @@ -94,14 +94,15 @@ extern kern_return_t mach_zone_info_for_largest_zone( host_priv_t host, mach_zone_name_t *name, mach_zone_info_t *info -); + ); -static void allocate_vm_regions(void) +static void +allocate_vm_regions(void) { uint64_t alloc_size = ALLOCATION_SIZE_VM_REGION, i = 0; - printf("[%d] Allocating VM regions, each of size %lld KB\n", getpid(), (alloc_size>>10)); - for (i = 0; ; i++) { + printf("[%d] Allocating VM regions, each of size %lld KB\n", getpid(), (alloc_size >> 10)); + for (i = 0;; i++) { mach_vm_address_t addr = (mach_vm_address_t)NULL; /* Alternate VM tags between consecutive regions to prevent coalescing */ @@ -117,16 +118,21 @@ static void allocate_vm_regions(void) kill(getppid(), SIGUSR1); while (1) { - pause(); + sleep(2); + /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */ + if (getppid() == 1) { + exit(0); + } } } -static void allocate_vm_objects(void) +static void +allocate_vm_objects(void) { uint64_t alloc_size = ALLOCATION_SIZE_VM_OBJECT, i = 0; - printf("[%d] Allocating VM regions, each of size %lld KB, each backed by a VM object\n", getpid(), (alloc_size>>10)); - for (i = 0; ; i++) { + printf("[%d] Allocating VM regions, each of size %lld KB, each backed by a VM object\n", getpid(), (alloc_size >> 10)); + for (i = 0;; i++) { mach_vm_address_t addr = (mach_vm_address_t)NULL; /* Alternate VM tags between consecutive regions to prevent coalescing */ @@ -146,16 +152,21 @@ static void allocate_vm_objects(void) kill(getppid(), SIGUSR1); while (1) { - pause(); + sleep(2); + /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */ + if (getppid() == 1) { + exit(0); + } } } -static void allocate_from_generic_zone(void) +static void +allocate_from_generic_zone(void) { uint64_t i = 0; printf("[%d] Allocating mach_ports\n", getpid()); - for (i = 0; ; i++) { + for (i = 0;; i++) { mach_port_t port; if ((mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port)) != KERN_SUCCESS) { @@ -168,17 +179,23 @@ static void allocate_from_generic_zone(void) kill(getppid(), SIGUSR1); while (1) { - pause(); + sleep(2); + /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */ + if (getppid() == 1) { + exit(0); + } } } -static void print_zone_info(mach_zone_name_t *zn, mach_zone_info_t *zi) +static void +print_zone_info(mach_zone_name_t *zn, mach_zone_info_t *zi) { T_LOG("ZONE NAME: %-35sSIZE: %-25lluELEMENTS: %llu", - zn->mzn_name, zi->mzi_cur_size, zi->mzi_count); + zn->mzn_name, zi->mzi_cur_size, zi->mzi_count); } -static void query_zone_info(void) +static void +query_zone_info(void) { int i; kern_return_t kr; @@ -202,7 +219,8 @@ static void query_zone_info(void) } } -static bool vme_zone_compares_to_vm_objects(void) +static bool +vme_zone_compares_to_vm_objects(void) { int i; uint64_t vm_object_element_count = 0, vm_map_entry_element_count = 0; @@ -217,7 +235,7 @@ static bool vme_zone_compares_to_vm_objects(void) print_zone_info(&(current_test.zone_names[i]), &(zone_info_array[i])); } - T_LOG("# VM map entries as percentage of # vm objects = %llu", (vm_map_entry_element_count * 100)/ vm_object_element_count); + T_LOG("# VM map entries as percentage of # vm objects = %llu", (vm_map_entry_element_count * 100) / vm_object_element_count); if (vm_map_entry_element_count >= ((vm_object_element_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) { T_LOG("Number of VM map entries is comparable to vm objects\n\n"); return true; @@ -226,7 +244,8 @@ static bool vme_zone_compares_to_vm_objects(void) return false; } -static bool verify_generic_jetsam_criteria(void) +static bool +verify_generic_jetsam_criteria(void) { T_LOG("Largest zone info"); print_zone_info(&largest_zone_name, &largest_zone_info); @@ -242,7 +261,37 @@ static bool verify_generic_jetsam_criteria(void) return false; } -static void cleanup_and_end_test(void) +static void +begin_test_teardown(void) +{ + /* End ktrace session */ + if (session != NULL) { + T_LOG("Ending ktrace session..."); + ktrace_end(session, 1); + } + + dispatch_sync(dq_spawn, ^{ + T_LOG("Cancelling dispatch sources..."); + + /* Disable the timer that queries and prints zone info periodically */ + if (ds_timer != NULL) { + dispatch_source_cancel(ds_timer); + } + + /* Disable signal handler that spawns child processes */ + if (ds_signal != NULL) { + /* + * No need for a dispatch_source_cancel_and_wait here. + * We're queueing this on the spawn queue, so no further + * processes will be spawned after the source is cancelled. + */ + dispatch_source_cancel(ds_signal); + } + }); +} + +static void +cleanup_and_end_test(void) { int i; @@ -258,18 +307,26 @@ static void cleanup_and_end_test(void) test_ending = true; pthread_mutex_unlock(&test_ending_mtx); - T_LOG("Number of processes spawned: %d", num_children); - T_LOG("Cleaning up..."); + dispatch_async(dq_spawn, ^{ + /* + * If the test succeeds, we will call dispatch_source_cancel twice, which is fine since + * the operation is idempotent. Just make sure to not drop all references to the dispatch sources + * (in this case we're not, we have globals holding references to them), or we can end up with + * use-after-frees which would be a problem. + */ + /* Disable the timer that queries and prints zone info periodically */ + if (ds_timer != NULL) { + dispatch_source_cancel(ds_timer); + } - /* Disable the timer that queries and prints zone info periodically */ - if (ds_timer != NULL && !within_dispatch_timer_handler) { - dispatch_source_cancel(ds_timer); - } + /* Disable signal handler that spawns child processes */ + if (ds_signal != NULL) { + dispatch_source_cancel(ds_signal); + } + }); - /* Disable signal handler that spawns child processes, only if we're not in the event handler's context */ - if (ds_signal != NULL && !within_dispatch_signal_handler) { - dispatch_source_cancel_and_wait(ds_signal); - } + T_LOG("Number of processes spawned: %d", num_children); + T_LOG("Killing child processes..."); /* Kill all the child processes that were spawned */ for (i = 0; i < num_children; i++) { @@ -296,12 +353,16 @@ static void cleanup_and_end_test(void) ktrace_end(session, 1); } - for (i = 0; i < current_test.num_zones; i++) { - print_zone_info(&(current_test.zone_names[i]), &(zone_info_array[i])); + if (current_test.num_zones > 0) { + T_LOG("Relevant zone info at the end of the test:"); + for (i = 0; i < current_test.num_zones; i++) { + print_zone_info(&(current_test.zone_names[i]), &(zone_info_array[i])); + } } } -static void setup_ktrace_session(void) +static void +setup_ktrace_session(void) { int ret = 0; @@ -311,6 +372,10 @@ static void setup_ktrace_session(void) ktrace_set_interactive(session); + ktrace_set_dropped_events_handler(session, ^{ + T_FAIL("Dropped ktrace events; might have missed an expected jetsam event. Terminating early."); + }); + ktrace_set_completion_handler(session, ^{ ktrace_session_destroy(session); T_END; @@ -323,36 +388,39 @@ static void setup_ktrace_session(void) /* We don't care about jetsams for any other reason except zone-map-exhaustion */ if (event->arg2 == kMemorystatusKilledZoneMapExhaustion) { - cleanup_and_end_test(); - T_LOG("[memorystatus_do_kill] jetsam reason: zone-map-exhaustion, pid: %lu\n\n", event->arg1); - if (current_test.test_index == VME_ZONE_TEST || current_test.test_index == VM_OBJECTS_ZONE_TEST) { - /* - * For the VM map entries zone we try to kill the leaking process. - * Verify that we jetsammed one of the processes we spawned. - * - * For the vm objects zone we pick the leaking process via the VM map entries - * zone, if the number of vm objects and VM map entries are comparable. - * The test simulates this scenario, we should see a targeted jetsam for the - * vm objects zone too. - */ - for (i = 0; i < num_children; i++) { - if (child_pids[i] == (pid_t)event->arg1) { - received_jetsam_event = true; - break; + begin_test_teardown(); + T_LOG("[memorystatus_do_kill] jetsam reason: zone-map-exhaustion, pid: %d\n\n", (int)event->arg1); + if (current_test.test_index == VME_ZONE_TEST || current_test.test_index == VM_OBJECTS_ZONE_TEST) { + /* + * For the VM map entries zone we try to kill the leaking process. + * Verify that we jetsammed one of the processes we spawned. + * + * For the vm objects zone we pick the leaking process via the VM map entries + * zone, if the number of vm objects and VM map entries are comparable. + * The test simulates this scenario, we should see a targeted jetsam for the + * vm objects zone too. + */ + for (i = 0; i < num_children; i++) { + if (child_pids[i] == (pid_t)event->arg1) { + received_jetsam_event = true; + T_LOG("Received jetsam event for a child"); + break; } } - /* - * If we didn't see a targeted jetsam, verify that the largest zone actually - * fulfilled the criteria for generic jetsams. - */ - if (!received_jetsam_event && verify_generic_jetsam_criteria()) { - received_jetsam_event = true; + /* + * If we didn't see a targeted jetsam, verify that the largest zone actually + * fulfilled the criteria for generic jetsams. + */ + if (!received_jetsam_event && verify_generic_jetsam_criteria()) { + received_jetsam_event = true; + T_LOG("Did not receive jetsam event for a child, but generic jetsam criteria holds"); } } else { - received_jetsam_event = true; + received_jetsam_event = true; + T_LOG("Received generic jetsam event"); } - T_ASSERT_TRUE(received_jetsam_event, "Received zone-map-exhaustion jetsam event as expected"); + T_QUIET; T_ASSERT_TRUE(received_jetsam_event, "Jetsam event not as expected"); } }); T_QUIET; T_ASSERT_POSIX_ZERO(ret, "ktrace_events_single"); @@ -361,7 +429,8 @@ static void setup_ktrace_session(void) T_QUIET; T_ASSERT_POSIX_ZERO(ret, "ktrace_start"); } -static void print_zone_map_size(void) +static void +print_zone_map_size(void) { int ret; uint64_t zstats[2]; @@ -370,10 +439,11 @@ static void print_zone_map_size(void) ret = sysctlbyname("kern.zone_map_size_and_capacity", &zstats, &zstats_size, NULL, 0); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.zone_map_size_and_capacity failed"); - T_LOG("Zone map capacity: %-30lldZone map size: %lld [%lld%% full]", zstats[1], zstats[0], (zstats[0] * 100)/zstats[1]); + T_LOG("Zone map capacity: %-30lldZone map size: %lld [%lld%% full]", zstats[1], zstats[0], (zstats[0] * 100) / zstats[1]); } -static void spawn_child_process(void) +static void +spawn_child_process(void) { pid_t pid = -1; char helper_func[50]; @@ -397,7 +467,8 @@ static void spawn_child_process(void) child_pids[num_children++] = pid; } -static void run_test(void) +static void +run_test(void) { uint64_t mem; uint32_t testpath_buf_size, pages; @@ -453,30 +524,27 @@ static void run_test(void) * spawning many children at once and creating a lot of memory pressure. */ signal(SIGUSR1, SIG_IGN); - ds_signal = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue()); + dq_spawn = dispatch_queue_create("spawn_queue", DISPATCH_QUEUE_SERIAL); + ds_signal = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq_spawn); T_QUIET; T_ASSERT_NOTNULL(ds_signal, "dispatch_source_create: signal"); dispatch_source_set_event_handler(ds_signal, ^{ - within_dispatch_signal_handler = true; print_zone_map_size(); /* Wait a few seconds before spawning another child. Keeps us from allocating too aggressively */ sleep(5); spawn_child_process(); - within_dispatch_signal_handler = false; }); dispatch_activate(ds_signal); /* Timer to query jetsam-relevant zone info every second. Print it every 10 seconds. */ ds_timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_queue_create("timer_queue", NULL)); T_QUIET; T_ASSERT_NOTNULL(ds_timer, "dispatch_source_create: timer"); - dispatch_source_set_timer(ds_timer, dispatch_time(DISPATCH_TIME_NOW, NSEC_PER_SEC), NSEC_PER_SEC, 0); + dispatch_source_set_timer(ds_timer, dispatch_time(DISPATCH_TIME_NOW, NSEC_PER_SEC), NSEC_PER_SEC, 0); dispatch_source_set_event_handler(ds_timer, ^{ - within_dispatch_timer_handler = true; query_zone_info(); - within_dispatch_timer_handler = false; - }); + }); dispatch_activate(ds_timer); /* Set up a ktrace session to listen for jetsam events */ @@ -491,7 +559,8 @@ static void run_test(void) dispatch_main(); } -static void move_to_idle_band(void) +static void +move_to_idle_band(void) { memorystatus_priority_properties_t props; @@ -534,38 +603,38 @@ T_HELPER_DECL(allocate_from_generic_zone, "allocates from a generic zone") * The test allocates zone memory pretty aggressively which can cause the system to panic * if the jetsam limit is quite high; a lower value keeps us from panicking. */ -T_DECL( memorystatus_vme_zone_test, - "allocates elements from the VM map entries zone, verifies zone-map-exhaustion jetsams", - T_META_ASROOT(true), - T_META_TIMEOUT(1800), +T_DECL( memorystatus_vme_zone_test, + "allocates elements from the VM map entries zone, verifies zone-map-exhaustion jetsams", + T_META_ASROOT(true), + T_META_TIMEOUT(1800), /* T_META_LTEPHASE(LTE_POSTINIT), */ - T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL)) + T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL)) { current_test = (test_config_struct) { .test_index = VME_ZONE_TEST, .helper_func = VME_ZONE_TEST_OPT, .num_zones = 1, - .zone_names = (mach_zone_name_t []){ + .zone_names = (mach_zone_name_t[]){ { .mzn_name = VME_ZONE } } }; run_test(); } -T_DECL( memorystatus_vm_objects_zone_test, - "allocates elements from the VM objects and the VM map entries zones, verifies zone-map-exhaustion jetsams", - T_META_ASROOT(true), - T_META_TIMEOUT(1800), +T_DECL( memorystatus_vm_objects_zone_test, + "allocates elements from the VM objects and the VM map entries zones, verifies zone-map-exhaustion jetsams", + T_META_ASROOT(true), + T_META_TIMEOUT(1800), /* T_META_LTEPHASE(LTE_POSTINIT), */ - T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL)) + T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL)) { current_test = (test_config_struct) { .test_index = VM_OBJECTS_ZONE_TEST, .helper_func = VM_OBJECTS_ZONE_TEST_OPT, .num_zones = 2, - .zone_names = (mach_zone_name_t []){ + .zone_names = (mach_zone_name_t[]){ { .mzn_name = VME_ZONE }, { .mzn_name = VMOBJECTS_ZONE} } @@ -573,13 +642,13 @@ T_DECL( memorystatus_vm_objects_zone_test, run_test(); } -T_DECL( memorystatus_generic_zone_test, - "allocates elements from a zone that doesn't have an optimized jetsam path, verifies zone-map-exhaustion jetsams", - T_META_ASROOT(true), - T_META_TIMEOUT(1800), +T_DECL( memorystatus_generic_zone_test, + "allocates elements from a zone that doesn't have an optimized jetsam path, verifies zone-map-exhaustion jetsams", + T_META_ASROOT(true), + T_META_TIMEOUT(1800), /* T_META_LTEPHASE(LTE_POSTINIT), */ - T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL)) + T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL)) { current_test = (test_config_struct) { .test_index = GENERIC_ZONE_TEST, diff --git a/tests/mktimer_kobject.c b/tests/mktimer_kobject.c index 54b24a0d5..a66986363 100644 --- a/tests/mktimer_kobject.c +++ b/tests/mktimer_kobject.c @@ -35,7 +35,7 @@ T_DECL(mktimer_kobject, "mktimer_kobject()", T_META_ALL_VALID_ARCHS(true)) // request a port-destroyed notification on the timer port kr = mach_port_request_notification(mach_task_self(), timer_port, MACH_NOTIFY_PORT_DESTROYED, - 0, notify_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); + 0, notify_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous); // this should fail! T_ASSERT_NE(kr, KERN_SUCCESS, "notifications should NOT work on mk_timer ports!"); @@ -47,4 +47,3 @@ T_DECL(mktimer_kobject, "mktimer_kobject()", T_META_ALL_VALID_ARCHS(true)) T_LOG("done"); } - diff --git a/tests/monotonic_core.c b/tests/monotonic_core.c index 9c04f491a..9574db602 100644 --- a/tests/monotonic_core.c +++ b/tests/monotonic_core.c @@ -29,9 +29,9 @@ #include T_GLOBAL_META( - T_META_NAMESPACE("xnu.monotonic"), - T_META_CHECK_LEAKS(false) -); + T_META_NAMESPACE("xnu.monotonic"), + T_META_CHECK_LEAKS(false) + ); static void skip_if_unsupported(void) @@ -41,7 +41,7 @@ skip_if_unsupported(void) size_t supported_size = sizeof(supported); r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size, - NULL, 0); + NULL, 0); if (r < 0) { T_WITH_ERRNO; T_SKIP("could not find \"kern.monotonic.supported\" sysctl"); @@ -65,7 +65,7 @@ check_fixed_counts(uint64_t counts[2][2]) } T_DECL(core_fixed_thread_self, "check the current thread's fixed counters", - T_META_ASROOT(true)) + T_META_ASROOT(true)) { int err; extern int thread_selfcounts(int type, void *buf, size_t nbytes); @@ -84,7 +84,7 @@ T_DECL(core_fixed_thread_self, "check the current thread's fixed counters", } T_DECL(core_fixed_task, "check that task counting is working", - T_META_ASROOT(true)) + T_META_ASROOT(true)) { task_t task = mach_task_self(); kern_return_t kr; @@ -94,21 +94,21 @@ T_DECL(core_fixed_task, "check that task counting is working", skip_if_unsupported(); kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS, - (task_inspect_info_t)&counts[0], &size); + (task_inspect_info_t)&counts[0], &size); T_ASSERT_MACH_SUCCESS(kr, - "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)"); + "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)"); size = TASK_INSPECT_BASIC_COUNTS_COUNT; kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS, - (task_inspect_info_t)&counts[1], &size); + (task_inspect_info_t)&counts[1], &size); T_ASSERT_MACH_SUCCESS(kr, - "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)"); + "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)"); check_fixed_counts(counts); } T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work", - T_META_ASROOT(true)) + T_META_ASROOT(true)) { __block bool saw_events = false; ktrace_session_t s; @@ -122,8 +122,8 @@ T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work", T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); ktrace_events_single_paired(s, - KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_TMPCPU, 0x3fff), - ^(struct trace_point *start, struct trace_point *end) + KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_TMPCPU, 0x3fff), + ^(struct trace_point *start, struct trace_point *end) { uint64_t counts[2][2]; @@ -144,12 +144,12 @@ T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work", T_SETUPEND; T_ASSERT_POSIX_ZERO(ktrace_start(s, - dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), NULL); + dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), NULL); r = sysctlbyname("kern.monotonic.kdebug_test", NULL, NULL, &set, - sizeof(set)); + sizeof(set)); T_ASSERT_POSIX_SUCCESS(r, - "sysctlbyname(\"kern.monotonic.kdebug_test\", ...)"); + "sysctlbyname(\"kern.monotonic.kdebug_test\", ...)"); ktrace_end(s, 0); dispatch_main(); @@ -174,26 +174,26 @@ spin_task_inspect(__unused void *arg) while (true) { size = (unsigned int)sizeof(counts); (void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS, - (task_inspect_info_t)&counts[0], &size); + (task_inspect_info_t)&counts[0], &size); /* * Not realistic for a process to see count values with the high bit * set, but kernel pointers will be that high. */ T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63, - "check for valid count entry 1"); + "check for valid count entry 1"); T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63, - "check for valid count entry 2"); + "check for valid count entry 2"); } } T_DECL(core_fixed_stack_leak_race, - "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS") + "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS") { T_SETUPBEGIN; int ncpus = 0; T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus, - &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs"); + &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs"); T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs"); pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads)); @@ -207,8 +207,8 @@ T_DECL(core_fixed_stack_leak_race, */ for (int i = 0; i < ncpus; i++) { T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL, - i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL), - NULL); + i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL), + NULL); } T_SETUPEND; @@ -228,7 +228,7 @@ perf_sysctl_deltas(const char *sysctl_name, const char *stat_name) skip_if_unsupported(); dt_stat_t instrs = dt_stat_create("instructions", "%s_instrs", - stat_name); + stat_name); dt_stat_t cycles = dt_stat_create("cycles", "%s_cycles", stat_name); T_SETUPEND; @@ -246,26 +246,26 @@ perf_sysctl_deltas(const char *sysctl_name, const char *stat_name) } T_DECL(perf_core_fixed_cpu, "test the performance of fixed CPU counter access", - T_META_ASROOT(true), T_META_TAG_PERF) + T_META_ASROOT(true), T_META_TAG_PERF) { perf_sysctl_deltas("kern.monotonic.fixed_cpu_perf", "fixed_cpu_counters"); } T_DECL(perf_core_fixed_thread, "test the performance of fixed thread counter access", - T_META_ASROOT(true), T_META_TAG_PERF) + T_META_ASROOT(true), T_META_TAG_PERF) { perf_sysctl_deltas("kern.monotonic.fixed_thread_perf", - "fixed_thread_counters"); + "fixed_thread_counters"); } T_DECL(perf_core_fixed_task, "test the performance of fixed task counter access", - T_META_ASROOT(true), T_META_TAG_PERF) + T_META_ASROOT(true), T_META_TAG_PERF) { perf_sysctl_deltas("kern.monotonic.fixed_task_perf", "fixed_task_counters"); } T_DECL(perf_core_fixed_thread_self, "test the performance of thread self counts", - T_META_TAG_PERF) + T_META_TAG_PERF) { extern int thread_selfcounts(int type, void *buf, size_t nbytes); uint64_t counts[2][2]; @@ -286,11 +286,11 @@ T_DECL(perf_core_fixed_thread_self, "test the performance of thread self counts" T_QUIET; T_ASSERT_POSIX_ZERO(r2, "__thread_selfcounts"); T_QUIET; T_ASSERT_GT(counts[1][0], counts[0][0], - "instructions increase monotonically"); + "instructions increase monotonically"); dt_stat_add(instrs, counts[1][0] - counts[0][0]); T_QUIET; T_ASSERT_GT(counts[1][1], counts[0][1], - "cycles increase monotonically"); + "cycles increase monotonically"); dt_stat_add(cycles, counts[1][1] - counts[0][1]); } diff --git a/tests/net_tun_pr_35136664.c b/tests/net_tun_pr_35136664.c index 366f066bd..c644f2ad8 100644 --- a/tests/net_tun_pr_35136664.c +++ b/tests/net_tun_pr_35136664.c @@ -1,4 +1,3 @@ - #include #include #include @@ -13,7 +12,7 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.net")); T_DECL(PR_35136664_utun, - "This bind a utun and close it without connecting") + "This bind a utun and close it without connecting") { int tunsock; struct ctl_info kernctl_info; @@ -38,7 +37,7 @@ T_DECL(PR_35136664_utun, } T_DECL(PR_35136664_ipsec, - "This bind a ipsec and close it without connecting") + "This bind a ipsec and close it without connecting") { int tunsock; struct ctl_info kernctl_info; diff --git a/tests/net_tuntests.c b/tests/net_tuntests.c index 91363ab63..d4b2477f5 100644 --- a/tests/net_tuntests.c +++ b/tests/net_tuntests.c @@ -1,4 +1,3 @@ - #include #include #include @@ -38,16 +37,19 @@ static void log_hexdump(const void *inp, size_t len) { unsigned i, off = 0; - char buf[9+16*3+1]; + char buf[9 + 16 * 3 + 1]; for (i = 0; i < len; i++) { - if (i % 16 == 0) + if (i % 16 == 0) { off = (unsigned)snprintf(buf, sizeof(buf), "%08x:", i); - off += (unsigned)snprintf(buf+off, sizeof(buf)-off, " %02x", (((const uint8_t *)inp)[i]) & 0xff); - if (i % 16 == 15) - T_LOG("%s", buf); } - if (len % 16) + off += (unsigned)snprintf(buf + off, sizeof(buf) - off, " %02x", (((const uint8_t *)inp)[i]) & 0xff); + if (i % 16 == 15) { T_LOG("%s", buf); + } + } + if (len % 16) { + T_LOG("%s", buf); + } } #endif @@ -111,14 +113,14 @@ check_enables(int tunsock, int enable_netif, int enable_flowswitch, int enable_c scratchlen = sizeof(scratch); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF, - &scratch, &scratchlen), NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )scratchlen, sizeof(scratch), NULL); + &scratch, &scratchlen), NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL); T_QUIET; T_EXPECT_EQ(scratch, enable_netif, NULL); scratchlen = sizeof(scratch); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH, - &scratch, &scratchlen), NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )scratchlen, sizeof(scratch), NULL); + &scratch, &scratchlen), NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL); if (get_skywalk_features() & SK_FEATURE_NETNS) { if (enable_netif) { T_QUIET; T_EXPECT_EQ(scratch, enable_flowswitch, NULL); @@ -131,8 +133,8 @@ check_enables(int tunsock, int enable_netif, int enable_flowswitch, int enable_c scratchlen = sizeof(scratch); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL, - &scratch, &scratchlen), NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )scratchlen, sizeof(scratch), NULL); + &scratch, &scratchlen), NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL); if (g_is_ipsec_test && !enable_netif) { T_QUIET; T_EXPECT_EQ(scratch, 0, NULL); } else { @@ -143,15 +145,15 @@ check_enables(int tunsock, int enable_netif, int enable_flowswitch, int enable_c uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid), NULL); } else { uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), ENXIO, NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), ENXIO, NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid), NULL); } } @@ -162,14 +164,14 @@ tunsock_get_ifname(int s, char ifname[IFXNAMSIZ]) socklen_t optlen = IFXNAMSIZ; T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(getsockopt(s, SYSPROTO_CONTROL, g_OPT_IFNAME, ifname, &optlen), NULL); T_QUIET; T_ASSERT_TRUE(optlen > 0, NULL); - T_QUIET; T_ASSERT_TRUE(ifname[optlen-1] == '\0', NULL); - T_QUIET; T_ASSERT_TRUE(strlen(ifname)+1 == optlen, "got ifname \"%s\" len %zd expected %u", ifname, strlen(ifname), optlen); + T_QUIET; T_ASSERT_TRUE(ifname[optlen - 1] == '\0', NULL); + T_QUIET; T_ASSERT_TRUE(strlen(ifname) + 1 == optlen, "got ifname \"%s\" len %zd expected %u", ifname, strlen(ifname), optlen); } static short ifnet_get_flags(int s, const char ifname[IFNAMSIZ]) { - struct ifreq ifr; + struct ifreq ifr; memset(&ifr, 0, sizeof(ifr)); strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(s, SIOCGIFFLAGS, (caddr_t)&ifr), NULL); @@ -203,7 +205,7 @@ ifnet_add_addr4(const char ifname[IFNAMSIZ], struct in_addr *addr, struct in_add } if (broadaddr != NULL || (addr != NULL && - (ifnet_get_flags(s, ifname) & IFF_POINTOPOINT) != 0)) { + (ifnet_get_flags(s, ifname) & IFF_POINTOPOINT) != 0)) { sin = &ifra.ifra_broadaddr; sin->sin_len = sizeof(*sin); sin->sin_family = AF_INET; @@ -226,23 +228,23 @@ create_sa(const char ifname[IFXNAMSIZ], uint8_t type, uint32_t spi, struct in_ad } /* - - */ + * + */ struct { - struct sadb_msg msg __attribute((aligned(sizeof (uint64_t)))); - struct sadb_key key __attribute((aligned(sizeof (uint64_t)))); - struct sadb_sa sa __attribute((aligned(sizeof (uint64_t)))); - struct sadb_x_sa2 sa2 __attribute((aligned(sizeof (uint64_t)))); - struct sadb_x_ipsecif ipsecif __attribute((aligned(sizeof (uint64_t)))); + struct sadb_msg msg __attribute((aligned(sizeof(uint64_t)))); + struct sadb_key key __attribute((aligned(sizeof(uint64_t)))); + struct sadb_sa sa __attribute((aligned(sizeof(uint64_t)))); + struct sadb_x_sa2 sa2 __attribute((aligned(sizeof(uint64_t)))); + struct sadb_x_ipsecif ipsecif __attribute((aligned(sizeof(uint64_t)))); struct { - struct sadb_address addr __attribute((aligned(sizeof (uint64_t)))); - struct sockaddr_in saddr __attribute((aligned(sizeof (uint64_t)))); + struct sadb_address addr __attribute((aligned(sizeof(uint64_t)))); + struct sockaddr_in saddr __attribute((aligned(sizeof(uint64_t)))); } src; struct { - struct sadb_address addr __attribute((aligned(sizeof (uint64_t)))); - struct sockaddr_in saddr __attribute((aligned(sizeof (uint64_t)))); + struct sadb_address addr __attribute((aligned(sizeof(uint64_t)))); + struct sockaddr_in saddr __attribute((aligned(sizeof(uint64_t)))); } dst; } addcmd; @@ -259,17 +261,17 @@ create_sa(const char ifname[IFXNAMSIZ], uint8_t type, uint32_t spi, struct in_ad addcmd.key.sadb_key_len = PFKEY_UNIT64(sizeof(addcmd.key)); addcmd.key.sadb_key_exttype = SADB_EXT_KEY_ENCRYPT; - addcmd.key.sadb_key_bits = 0; - addcmd.key.sadb_key_reserved = 0; - - addcmd.sa.sadb_sa_len = PFKEY_UNIT64(sizeof(addcmd.sa)); - addcmd.sa.sadb_sa_exttype = SADB_EXT_SA; - addcmd.sa.sadb_sa_spi = htonl(spi); - addcmd.sa.sadb_sa_replay = 0; - addcmd.sa.sadb_sa_state = 0; - addcmd.sa.sadb_sa_auth = SADB_AALG_NONE; - addcmd.sa.sadb_sa_encrypt = SADB_EALG_NULL; - addcmd.sa.sadb_sa_flags = SADB_X_EXT_CYCSEQ; + addcmd.key.sadb_key_bits = 0; + addcmd.key.sadb_key_reserved = 0; + + addcmd.sa.sadb_sa_len = PFKEY_UNIT64(sizeof(addcmd.sa)); + addcmd.sa.sadb_sa_exttype = SADB_EXT_SA; + addcmd.sa.sadb_sa_spi = htonl(spi); + addcmd.sa.sadb_sa_replay = 0; + addcmd.sa.sadb_sa_state = 0; + addcmd.sa.sadb_sa_auth = SADB_AALG_NONE; + addcmd.sa.sadb_sa_encrypt = SADB_EALG_NULL; + addcmd.sa.sadb_sa_flags = SADB_X_EXT_CYCSEQ; addcmd.sa2.sadb_x_sa2_len = PFKEY_UNIT64(sizeof(addcmd.sa2)); addcmd.sa2.sadb_x_sa2_exttype = SADB_X_EXT_SA2; @@ -287,19 +289,19 @@ create_sa(const char ifname[IFXNAMSIZ], uint8_t type, uint32_t spi, struct in_ad addcmd.ipsecif.sadb_x_ipsecif_init_disabled = 0; addcmd.ipsecif.reserved = 0; - addcmd.src.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.src)); - addcmd.src.addr.sadb_address_exttype = SADB_EXT_ADDRESS_SRC; - addcmd.src.addr.sadb_address_proto = IPSEC_ULPROTO_ANY; - addcmd.src.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why? + addcmd.src.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.src)); + addcmd.src.addr.sadb_address_exttype = SADB_EXT_ADDRESS_SRC; + addcmd.src.addr.sadb_address_proto = IPSEC_ULPROTO_ANY; + addcmd.src.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why? addcmd.src.addr.sadb_address_reserved = 0; addcmd.src.saddr.sin_len = sizeof(addcmd.src.saddr); addcmd.src.saddr.sin_family = AF_INET; addcmd.src.saddr.sin_port = htons(0); addcmd.src.saddr.sin_addr = *src; - addcmd.dst.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.dst)); - addcmd.dst.addr.sadb_address_exttype = SADB_EXT_ADDRESS_DST; - addcmd.dst.addr.sadb_address_proto = IPSEC_ULPROTO_ANY; + addcmd.dst.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.dst)); + addcmd.dst.addr.sadb_address_exttype = SADB_EXT_ADDRESS_DST; + addcmd.dst.addr.sadb_address_proto = IPSEC_ULPROTO_ANY; addcmd.dst.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why? addcmd.dst.addr.sadb_address_reserved = 0; addcmd.dst.saddr.sin_len = sizeof(addcmd.dst.saddr); @@ -342,31 +344,31 @@ startover: //enable_netif, enable_channel, enable_flowswitch); T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF, - &enable_netif, sizeof(enable_netif)), EINVAL, NULL); + &enable_netif, sizeof(enable_netif)), EINVAL, NULL); T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH, - &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL); + &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL); T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL, - &enable_channel, sizeof(enable_channel)), EINVAL, NULL); + &enable_channel, sizeof(enable_channel)), EINVAL, NULL); uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), EINVAL, NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), EINVAL, NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid), NULL); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(bind(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr)), NULL); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF, - &enable_netif, sizeof(enable_netif)), NULL); + &enable_netif, sizeof(enable_netif)), NULL); T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH, - &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL); + &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL); T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL, - &enable_channel, sizeof(enable_channel)), EINVAL, NULL); + &enable_channel, sizeof(enable_channel)), EINVAL, NULL); uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), ENXIO, NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), ENXIO, NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid), NULL); int error = connect(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr)); @@ -382,50 +384,50 @@ startover: T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(error, "connect()"); T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF, - &enable_netif, sizeof(enable_netif)), EINVAL, NULL); + &enable_netif, sizeof(enable_netif)), EINVAL, NULL); if (get_skywalk_features() & SK_FEATURE_NETNS) { if (enable_netif) { T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH, - &enable_flowswitch, sizeof(enable_flowswitch)), NULL); + &enable_flowswitch, sizeof(enable_flowswitch)), NULL); } else { T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH, - &enable_flowswitch, sizeof(enable_flowswitch)), ENOENT, NULL); + &enable_flowswitch, sizeof(enable_flowswitch)), ENOENT, NULL); } } else { T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH, - &enable_flowswitch, sizeof(enable_flowswitch)), ENOTSUP, NULL); + &enable_flowswitch, sizeof(enable_flowswitch)), ENOTSUP, NULL); } if (enable_channel) { if (g_is_ipsec_test && !enable_netif) { /* ipsec doesn't support channels without a netif */ T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL, - &enable_channel, sizeof(enable_channel)), EOPNOTSUPP, NULL); + &enable_channel, sizeof(enable_channel)), EOPNOTSUPP, NULL); uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), ENXIO, NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), ENXIO, NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid), NULL); } else { T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL, - &enable_channel, sizeof(enable_channel)), NULL); + &enable_channel, sizeof(enable_channel)), NULL); uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid), NULL); } } else { T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL, - &enable_channel, sizeof(enable_channel)), ENXIO, NULL); + &enable_channel, sizeof(enable_channel)), ENXIO, NULL); uuid_clear(uuid); uuidlen = sizeof(uuid_t); T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID, - uuid, &uuidlen), ENXIO, NULL); - T_QUIET; T_EXPECT_EQ_ULONG((unsigned long )uuidlen, sizeof(uuid_t), NULL); + uuid, &uuidlen), ENXIO, NULL); + T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t), NULL); T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid), NULL); } @@ -442,12 +444,13 @@ ipsec_stats(void) { struct ifmibdata ifmd; - len = sizeof(struct ifmibdata); - name[3] = IFMIB_IFDATA; - name[4] = interesting_row; - name[5] = IpFDATA_GENERAL; - if (sysctl(name, 6, &ifmd, &len, (void *)0, 0) == -1) - err(1, "sysctl IFDATA_GENERAL %d", interesting_row); + len = sizeof(struct ifmibdata); + name[3] = IFMIB_IFDATA; + name[4] = interesting_row; + name[5] = IpFDATA_GENERAL; + if (sysctl(name, 6, &ifmd, &len, (void *)0, 0) == -1) { + err(1, "sysctl IFDATA_GENERAL %d", interesting_row); + } } #endif @@ -509,10 +512,10 @@ setup_tunsock(void) T_LOG("Created interface %s", ifname); - uint32_t ifaddr = (10 << 24) | ((unsigned)getpid()&0xffff) << 8 | 160; + uint32_t ifaddr = (10 << 24) | ((unsigned)getpid() & 0xffff) << 8 | 160; struct in_addr mask; g_addr1.s_addr = htonl(ifaddr); - g_addr2.s_addr = htonl(ifaddr+1); + g_addr2.s_addr = htonl(ifaddr + 1); mask.s_addr = htonl(0xffffffff); ifnet_add_addr4(ifname, &g_addr1, &mask, &g_addr2); diff --git a/tests/netbsd_utimensat.c b/tests/netbsd_utimensat.c index c14f92a6f..4dc3c29b5 100644 --- a/tests/netbsd_utimensat.c +++ b/tests/netbsd_utimensat.c @@ -52,12 +52,13 @@ __RCSID("$NetBSD: t_utimensat.c,v 1.6 2017/01/10 15:13:56 christos Exp $"); #define BASELINK "symlink" #define FILEERR "dir/symlink" -static const struct timespec tptr[] = { +static const struct timespec tptr[] = { { 0x12345678, 987654321 }, { 0x15263748, 123456789 }, }; -static void chtmpdir(void) +static void +chtmpdir(void) { T_SETUPBEGIN; T_ASSERT_POSIX_ZERO(chdir(dt_tmpdir()), NULL); @@ -86,7 +87,7 @@ T_DECL(netbsd_utimensat_fd, "See that utimensat works with fd") struct stat st; T_ASSERT_POSIX_ZERO(mkdir(DIRPATH, 0755), NULL); - T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT|O_RDWR, 0644)), NULL); + T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT | O_RDWR, 0644)), NULL); T_ASSERT_POSIX_ZERO(close(fd), NULL); T_ASSERT_POSIX_SUCCESS((dfd = open(DIRPATH, O_RDONLY, 0)), NULL); @@ -108,7 +109,7 @@ T_DECL(netbsd_utimensat_fdcwd, "See that utimensat works with fd as AT_FDCWD") struct stat st; T_ASSERT_POSIX_ZERO(mkdir(DIRPATH, 0755), NULL); - T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT|O_RDWR, 0644)), NULL); + T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT | O_RDWR, 0644)), NULL); T_ASSERT_POSIX_ZERO(close(fd), NULL); T_ASSERT_POSIX_ZERO(chdir(DIRPATH), NULL); @@ -150,7 +151,7 @@ T_DECL(netbsd_utimensat_fderr2, "See that utimensat fails with bad fdat") char cwd[MAXPATHLEN]; T_ASSERT_POSIX_ZERO(mkdir(DIRPATH, 0755), NULL); - T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT|O_RDWR, 0644)), NULL); + T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT | O_RDWR, 0644)), NULL); T_ASSERT_POSIX_ZERO(close(fd), NULL); T_ASSERT_POSIX_SUCCESS((dfd = open(getcwd(cwd, MAXPATHLEN), O_RDONLY, 0)), NULL); @@ -165,7 +166,7 @@ T_DECL(netbsd_utimensat_fderr3, "See that utimensat fails with fd as -1") int fd; T_ASSERT_POSIX_ZERO(mkdir(DIRPATH, 0755), NULL); - T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT|O_RDWR, 0644)), NULL); + T_ASSERT_POSIX_SUCCESS((fd = open(FILEPATH, O_CREAT | O_RDWR, 0644)), NULL); T_ASSERT_POSIX_ZERO(close(fd), NULL); T_ASSERT_EQ(utimensat(-1, FILEPATH, tptr, 0), -1, NULL); diff --git a/tests/ntp_adjtime_29192647.c b/tests/ntp_adjtime_29192647.c index 28663859e..c29db4bfe 100644 --- a/tests/ntp_adjtime_29192647.c +++ b/tests/ntp_adjtime_29192647.c @@ -14,15 +14,15 @@ #define ERROR 2 /*2 us of error tolerance*/ T_DECL(settimeofday_29192647, - "Verify that the syscall settimeofday is effective", - T_META_ASROOT(true), T_META_CHECK_LEAKS(NO), T_META_LTEPHASE(LTE_POSTINIT)) + "Verify that the syscall settimeofday is effective", + T_META_ASROOT(true), T_META_CHECK_LEAKS(NO), T_META_LTEPHASE(LTE_POSTINIT)) { struct timeval time; long new_time; - if (geteuid() != 0){ - T_SKIP("settimeofday_29192647 test requires root privileges to run."); - } + if (geteuid() != 0) { + T_SKIP("settimeofday_29192647 test requires root privileges to run."); + } T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&time, NULL), NULL); @@ -54,7 +54,9 @@ T_DECL(settimeofday_29192647, } } -static void get_abs_to_us_scale_factor(uint64_t* numer, uint64_t* denom){ +static void +get_abs_to_us_scale_factor(uint64_t* numer, uint64_t* denom) +{ struct timespec time; uint64_t old_abstime, new_abstime; uint64_t old_time_usec, new_time_usec; @@ -63,13 +65,13 @@ static void get_abs_to_us_scale_factor(uint64_t* numer, uint64_t* denom){ T_QUIET; T_ASSERT_EQ(mach_get_times(&old_abstime, NULL, &time), KERN_SUCCESS, NULL); - old_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + old_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; sleep(1); T_QUIET; T_ASSERT_EQ(mach_get_times(&new_abstime, NULL, &time), KERN_SUCCESS, NULL); - new_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + new_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; /* this is conversion factors from abs to nanos */ T_ASSERT_EQ(mach_timebase_info(&timebaseInfo), KERN_SUCCESS, NULL); @@ -81,10 +83,11 @@ static void get_abs_to_us_scale_factor(uint64_t* numer, uint64_t* denom){ time_conv1 *= timebaseInfo.numer; time_conv1 /= timebaseInfo.denom * 1000; - if (time_conv1 > new_time_usec) + if (time_conv1 > new_time_usec) { diff = time_conv1 - new_time_usec; - else + } else { diff = new_time_usec - time_conv1; + } T_EXPECT_LE_ULLONG(diff, (unsigned long long)ERROR, "Check scale factor time base (%u/%u) delta read usec %llu delta converted %llu delta abs %llu", timebaseInfo.numer, timebaseInfo.denom, time_conv1, new_time_usec, new_abstime); @@ -97,8 +100,8 @@ static void get_abs_to_us_scale_factor(uint64_t* numer, uint64_t* denom){ #define ADJTIME_OFFSET_PER_SEC 500 T_DECL(adjtime_29192647, - "Verify that the syscall adjtime is effective", - T_META_CHECK_LEAKS(NO), T_META_LTEPHASE(LTE_POSTINIT), T_META_ASROOT(true)) + "Verify that the syscall adjtime is effective", + T_META_CHECK_LEAKS(NO), T_META_LTEPHASE(LTE_POSTINIT), T_META_ASROOT(true)) { struct timespec time; struct timeval adj; @@ -113,17 +116,16 @@ T_DECL(adjtime_29192647, #endif if (geteuid() != 0) { - T_SKIP("adjtime_29192647 test requires root privileges to run."); - } + T_SKIP("adjtime_29192647 test requires root privileges to run."); + } lterdos_env = getenv("LTERDOS"); - if (lterdos_env != NULL){ + if (lterdos_env != NULL) { if (!(strcmp(lterdos_env, "YES") == 0)) { - T_SKIP("adjtime_29192647 test requires LTE to run."); + T_SKIP("adjtime_29192647 test requires LTE to run."); } - } - else { + } else { T_SKIP("adjtime_29192647 test requires LTE to run."); } @@ -134,7 +136,7 @@ T_DECL(adjtime_29192647, T_QUIET; T_ASSERT_EQ(mach_get_times(&old_abstime, NULL, &time), KERN_SUCCESS, NULL); - old_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + old_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; adj.tv_sec = 0; adj.tv_usec = ADJSTMENT; @@ -147,21 +149,21 @@ T_DECL(adjtime_29192647, * until the last second is slewed the final < 500 usecs. */ T_WITH_ERRNO; - T_ASSERT_POSIX_ZERO(adjtime(&adj, NULL),NULL); + T_ASSERT_POSIX_ZERO(adjtime(&adj, NULL), NULL); /* * Wait that the full adjustment is applied. * Note, add 2 more secs for take into account division error * and that the last block of adj is fully elapsed. */ - sleep_time = (ADJSTMENT)/(ADJTIME_OFFSET_PER_SEC)+2; + sleep_time = (ADJSTMENT) / (ADJTIME_OFFSET_PER_SEC)+2; T_LOG("Waiting for %u sec\n", sleep_time); sleep(sleep_time); T_QUIET; T_ASSERT_EQ(mach_get_times(&new_abstime, NULL, &time), KERN_SUCCESS, NULL); - new_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + new_time_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; us_delta = new_time_usec - old_time_usec; us_delta -= ADJSTMENT; @@ -178,7 +180,6 @@ T_DECL(adjtime_29192647, T_EXPECT_LE_LONG(diff, (long) ERROR, "Check abs time vs calendar time"); T_EXPECT_GE_LONG(diff, (long) -ERROR, "Check abs time vs calendar time"); - } #define FREQ_PPM 222 /*222 PPM(us/s)*/ @@ -186,8 +187,8 @@ T_DECL(adjtime_29192647, #define OFFSET_US 123 /*123us*/ T_DECL(ntp_adjtime_29192647, - "Verify that the syscall ntp_adjtime is effective", - T_META_CHECK_LEAKS(NO), T_META_LTEPHASE(LTE_POSTINIT), T_META_ASROOT(true)) + "Verify that the syscall ntp_adjtime is effective", + T_META_CHECK_LEAKS(NO), T_META_LTEPHASE(LTE_POSTINIT), T_META_ASROOT(true)) { struct timespec time; struct timex ntptime; @@ -202,18 +203,17 @@ T_DECL(ntp_adjtime_29192647, T_SKIP("ntp_adjtime_29192647 test requires LTE to run."); #endif - if (geteuid() != 0){ - T_SKIP("ntp_adjtime_29192647 test requires root privileges to run."); - } + if (geteuid() != 0) { + T_SKIP("ntp_adjtime_29192647 test requires root privileges to run."); + } lterdos_env = getenv("LTERDOS"); - if (lterdos_env != NULL){ + if (lterdos_env != NULL) { if (!(strcmp(lterdos_env, "YES") == 0)) { - T_SKIP("adjtime_29192647 test requires LTE to run."); + T_SKIP("adjtime_29192647 test requires LTE to run."); } - } - else { + } else { T_SKIP("adjtime_29192647 test requires LTE to run."); } @@ -229,10 +229,10 @@ T_DECL(ntp_adjtime_29192647, ntptime.modes = MOD_STATUS; ntptime.status = TIME_OK; - /* ntp input freq is in ppm (us/s) * 2^16, max freq is 500 ppm */ - freq = (FREQ_PPM) * 65536; + /* ntp input freq is in ppm (us/s) * 2^16, max freq is 500 ppm */ + freq = (FREQ_PPM) * 65536; ntptime.modes |= MOD_FREQUENCY; - ntptime.freq = freq; + ntptime.freq = freq; T_LOG("Attemping to change calendar frequency of %d ppm", FREQ_PPM); @@ -246,13 +246,13 @@ T_DECL(ntp_adjtime_29192647, T_QUIET; T_ASSERT_EQ(mach_get_times(&abstime1, NULL, &time), KERN_SUCCESS, NULL); - time1_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + time1_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; sleep(1); T_QUIET; T_ASSERT_EQ(mach_get_times(&abstime2, NULL, &time), KERN_SUCCESS, NULL); - time2_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + time2_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; abs_delta = abstime2 - abstime1; us_delta = time2_usec - time1_usec; @@ -261,14 +261,14 @@ T_DECL(ntp_adjtime_29192647, time_conv *= num; time_conv /= den; - app = time_conv/USEC_PER_SEC; //sec elapsed + app = time_conv / USEC_PER_SEC; //sec elapsed time_delta = time_conv; time_delta += app * (FREQ_PPM); - app = time_conv%USEC_PER_SEC; + app = time_conv % USEC_PER_SEC; - time_delta += (app*(FREQ_PPM))/USEC_PER_SEC; + time_delta += (app * (FREQ_PPM)) / USEC_PER_SEC; diff = (long) us_delta - (long) time_delta; @@ -283,8 +283,8 @@ T_DECL(ntp_adjtime_29192647, freq = 0; ntptime.modes = MOD_STATUS; ntptime.status = TIME_OK; - ntptime.modes |= MOD_FREQUENCY; - ntptime.freq = freq; + ntptime.modes |= MOD_FREQUENCY; + ntptime.freq = freq; T_WITH_ERRNO; T_ASSERT_EQ(ntp_adjtime(&ntptime), TIME_OK, NULL); @@ -300,28 +300,28 @@ T_DECL(ntp_adjtime_29192647, memset(&ntptime, 0, sizeof(ntptime)); ntptime.modes |= MOD_STATUS; ntptime.status = TIME_OK; - ntptime.status |= STA_PLL|STA_FREQHOLD; + ntptime.status |= STA_PLL | STA_FREQHOLD; /* ntp input phase can be both ns or us (MOD_MICRO), max offset is 500 ms */ - ntptime.offset = OFFSET_US; - ntptime.modes |= MOD_OFFSET|MOD_MICRO; + ntptime.offset = OFFSET_US; + ntptime.modes |= MOD_OFFSET | MOD_MICRO; /* * The system will slew each sec of: * slew = ntp.offset >> (SHIFT_PLL + time_constant); * ntp.offset -= slew; */ - offset= (OFFSET_US) * 1000; + offset = (OFFSET_US) * 1000; sleep_time = 2; - while((offset>>SHIFT_PLL)>0){ + while ((offset >> SHIFT_PLL) > 0) { offset -= offset >> SHIFT_PLL; sleep_time++; } T_QUIET; T_ASSERT_EQ(mach_get_times(&abstime1, NULL, &time), KERN_SUCCESS, NULL); - time1_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + time1_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; T_LOG("Attemping to change calendar phase of %d us", OFFSET_US); @@ -336,7 +336,7 @@ T_DECL(ntp_adjtime_29192647, T_QUIET; T_ASSERT_EQ(mach_get_times(&abstime2, NULL, &time), KERN_SUCCESS, NULL); - time2_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec/1000; + time2_usec = (uint64_t)time.tv_sec * USEC_PER_SEC + (uint64_t)time.tv_nsec / 1000; abs_delta = abstime2 - abstime1; us_delta = time2_usec - time1_usec; @@ -356,16 +356,13 @@ T_DECL(ntp_adjtime_29192647, memset(&ntptime, 0, sizeof(ntptime)); ntptime.modes = MOD_STATUS; ntptime.status = TIME_OK; - ntptime.modes |= MOD_FREQUENCY; - ntptime.freq = 0; + ntptime.modes |= MOD_FREQUENCY; + ntptime.freq = 0; ntptime.status |= STA_PLL; - ntptime.offset = 0; + ntptime.offset = 0; ntptime.modes |= MOD_OFFSET; T_WITH_ERRNO; T_ASSERT_EQ(ntp_adjtime(&ntptime), TIME_OK, NULL); - } - - diff --git a/tests/perf_compressor.c b/tests/perf_compressor.c index 1a8a57f3f..3e8aa68d8 100644 --- a/tests/perf_compressor.c +++ b/tests/perf_compressor.c @@ -14,7 +14,7 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.vm.perf"), T_META_CHECK_LEAKS(false), T_META_TAG_PERF -); + ); enum { ALL_ZEROS, @@ -44,6 +44,7 @@ static const char *exit_codes_str[] = { CREATE_LIST(EXIT_CODES_STRING) }; +#define SYSCTL_FREEZE_TO_MEMORY "kern.memorystatus_freeze_to_memory=1" static pid_t pid = -1; static dt_stat_t r; @@ -55,8 +56,11 @@ void allocate_random_pages(char **buf, int num_pages, int vmpgsize); void allocate_representative_pages(char **buf, int num_pages, int vmpgsize); void run_compressor_test(int size_mb, int page_type); void freeze_helper_process(void); +void cleanup(void); -void allocate_zero_pages(char **buf, int num_pages, int vmpgsize) { +void +allocate_zero_pages(char **buf, int num_pages, int vmpgsize) +{ int i; for (i = 0; i < num_pages; i++) { @@ -65,19 +69,23 @@ void allocate_zero_pages(char **buf, int num_pages, int vmpgsize) { } } -void allocate_mostly_zero_pages(char **buf, int num_pages, int vmpgsize) { +void +allocate_mostly_zero_pages(char **buf, int num_pages, int vmpgsize) +{ int i, j; for (i = 0; i < num_pages; i++) { buf[i] = (char*)malloc((size_t)vmpgsize * sizeof(char)); memset(buf[i], 0, vmpgsize); for (j = 0; j < 40; j++) { - buf[i][j] = (char)(j+1); + buf[i][j] = (char)(j + 1); } } } -void allocate_random_pages(char **buf, int num_pages, int vmpgsize) { +void +allocate_random_pages(char **buf, int num_pages, int vmpgsize) +{ int i; for (i = 0; i < num_pages; i++) { @@ -87,7 +95,9 @@ void allocate_random_pages(char **buf, int num_pages, int vmpgsize) { } // Gives us the compression ratio we see in the typical case (~2.7) -void allocate_representative_pages(char **buf, int num_pages, int vmpgsize) { +void +allocate_representative_pages(char **buf, int num_pages, int vmpgsize) +{ int i, j; char val; @@ -103,32 +113,46 @@ void allocate_representative_pages(char **buf, int num_pages, int vmpgsize) { } } -void freeze_helper_process(void) { - int ret; +void +freeze_helper_process(void) +{ + int ret, freeze_enabled; int64_t compressed_before, compressed_after, input_before, input_after; size_t length; + int errno_sysctl_freeze; length = sizeof(compressed_before); T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.compressor_compressed_bytes", &compressed_before, &length, NULL, 0), - "failed to query vm.compressor_compressed_bytes"); + "failed to query vm.compressor_compressed_bytes"); length = sizeof(input_before); T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.compressor_input_bytes", &input_before, &length, NULL, 0), - "failed to query vm.compressor_input_bytes"); + "failed to query vm.compressor_input_bytes"); T_STAT_MEASURE(s) { ret = sysctlbyname("kern.memorystatus_freeze", NULL, NULL, &pid, sizeof(pid)); + errno_sysctl_freeze = errno; }; length = sizeof(compressed_after); T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.compressor_compressed_bytes", &compressed_after, &length, NULL, 0), - "failed to query vm.compressor_compressed_bytes"); + "failed to query vm.compressor_compressed_bytes"); length = sizeof(input_after); T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.compressor_input_bytes", &input_after, &length, NULL, 0), - "failed to query vm.compressor_input_bytes"); - - T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_freeze failed"); + "failed to query vm.compressor_input_bytes"); + + length = sizeof(freeze_enabled); + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.freeze_enabled", &freeze_enabled, &length, NULL, 0), + "failed to query vm.freeze_enabled"); + if (freeze_enabled) { + errno = errno_sysctl_freeze; + T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_freeze failed"); + } else { + /* If freezer is disabled, skip the test. This can happen due to disk space shortage. */ + T_LOG("Freeze has been disabled. Terminating early."); + T_END; + } - dt_stat_add(r, (double)(input_after - input_before)/(double)(compressed_after - compressed_before)); + dt_stat_add(r, (double)(input_after - input_before) / (double)(compressed_after - compressed_before)); ret = sysctlbyname("kern.memorystatus_thaw", NULL, NULL, &pid, sizeof(pid)); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_thaw failed"); @@ -136,7 +160,22 @@ void freeze_helper_process(void) { T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "failed to send SIGUSR1 to child process"); } -void run_compressor_test(int size_mb, int page_type) { +void +cleanup(void) +{ + int status = 0; + + /* No helper process. */ + if (pid == -1) { + return; + } + /* Kill the helper process. */ + kill(pid, SIGKILL); +} + +void +run_compressor_test(int size_mb, int page_type) +{ int ret; char sz_str[50]; char pt_str[50]; @@ -144,10 +183,18 @@ void run_compressor_test(int size_mb, int page_type) { char testpath[PATH_MAX]; uint32_t testpath_buf_size; dispatch_source_t ds_freeze, ds_proc; + int freeze_enabled; + size_t length; -#ifndef CONFIG_FREEZE - T_SKIP("Task freeze not supported."); -#endif + length = sizeof(freeze_enabled); + T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.freeze_enabled", &freeze_enabled, &length, NULL, 0), + "failed to query vm.freeze_enabled"); + if (!freeze_enabled) { + /* If freezer is disabled, skip the test. This can happen due to disk space shortage. */ + T_SKIP("Freeze has been disabled. Skipping test."); + } + + T_ATEND(cleanup); r = dt_stat_create("(input bytes / compressed bytes)", "compression_ratio"); s = dt_stat_time_create("compressor_latency"); @@ -160,13 +207,13 @@ void run_compressor_test(int size_mb, int page_type) { dispatch_source_set_event_handler(ds_freeze, ^{ if (!dt_stat_stable(s)) { - freeze_helper_process(); + freeze_helper_process(); } else { - dt_stat_finalize(s); - dt_stat_finalize(r); + dt_stat_finalize(s); + dt_stat_finalize(r); - kill(pid, SIGKILL); - dispatch_source_cancel(ds_freeze); + kill(pid, SIGKILL); + dispatch_source_cancel(ds_freeze); } }); dispatch_activate(ds_freeze); @@ -205,11 +252,11 @@ void run_compressor_test(int size_mb, int page_type) { code = WEXITSTATUS(status); if (code == 0) { - T_END; + T_END; } else if (code > 0 && code < EXIT_CODE_MAX) { - T_ASSERT_FAIL("Child exited with %s", exit_codes_str[code]); + T_ASSERT_FAIL("Child exited with %s", exit_codes_str[code]); } else { - T_ASSERT_FAIL("Child exited with unknown exit code %d", code); + T_ASSERT_FAIL("Child exited with unknown exit code %d", code); } }); dispatch_activate(ds_proc); @@ -244,21 +291,21 @@ T_HELPER_DECL(allocate_pages, "allocates pages to compress") { buf = (char**)malloc(sizeof(char*) * (size_t)num_pages); // Switch on the type of page requested - switch(page_type) { - case ALL_ZEROS: - allocate_zero_pages(buf, num_pages, vmpgsize); - break; - case MOSTLY_ZEROS: - allocate_mostly_zero_pages(buf, num_pages, vmpgsize); - break; - case RANDOM: - allocate_random_pages(buf, num_pages, vmpgsize); - break; - case TYPICAL: - allocate_representative_pages(buf, num_pages, vmpgsize); - break; - default: - exit(UNKNOWN_PAGE_TYPE); + switch (page_type) { + case ALL_ZEROS: + allocate_zero_pages(buf, num_pages, vmpgsize); + break; + case MOSTLY_ZEROS: + allocate_mostly_zero_pages(buf, num_pages, vmpgsize); + break; + case RANDOM: + allocate_random_pages(buf, num_pages, vmpgsize); + break; + case TYPICAL: + allocate_representative_pages(buf, num_pages, vmpgsize); + break; + default: + exit(UNKNOWN_PAGE_TYPE); } for (j = 0; j < num_pages; j++) { @@ -267,9 +314,9 @@ T_HELPER_DECL(allocate_pages, "allocates pages to compress") { dispatch_after(dispatch_time(DISPATCH_TIME_NOW, NSEC_PER_SEC), dispatch_get_main_queue(), ^{ /* Signal to the parent that we're done allocating and it's ok to freeze us */ - printf("Sending initial signal to parent to begin freezing\n"); + printf("[%d] Sending initial signal to parent to begin freezing\n", getpid()); if (kill(getppid(), SIGUSR1) != 0) { - exit(INITIAL_SIGNAL_TO_PARENT_FAILED); + exit(INITIAL_SIGNAL_TO_PARENT_FAILED); } }); @@ -284,10 +331,10 @@ T_HELPER_DECL(allocate_pages, "allocates pages to compress") { /* Make sure all the pages are accessed before trying to freeze again */ for (int x = 0; x < num_pages; x++) { - tmp = buf[x][0]; + tmp = buf[x][0]; } if (kill(getppid(), SIGUSR1) != 0) { - exit(SIGNAL_TO_PARENT_FAILED); + exit(SIGNAL_TO_PARENT_FAILED); } }); dispatch_activate(ds_signal); @@ -299,36 +346,51 @@ T_HELPER_DECL(allocate_pages, "allocates pages to compress") { // Keeping just the 100MB version for iOSMark #ifndef DT_IOSMARK -T_DECL(compr_10MB_zero, "Compressor latencies") { +T_DECL(compr_10MB_zero, + "Compression latency for 10MB - zero pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(10, ALL_ZEROS); } -T_DECL(compr_10MB_mostly_zero, "Compressor latencies") { +T_DECL(compr_10MB_mostly_zero, + "Compression latency for 10MB - mostly zero pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(10, MOSTLY_ZEROS); } -T_DECL(compr_10MB_random, "Compressor latencies") { +T_DECL(compr_10MB_random, + "Compression latency for 10MB - random pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(10, RANDOM); } -T_DECL(compr_10MB_typical, "Compressor latencies") { +T_DECL(compr_10MB_typical, + "Compression latency for 10MB - typical pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(10, TYPICAL); } -T_DECL(compr_100MB_zero, "Compressor latencies") { +T_DECL(compr_100MB_zero, + "Compression latency for 100MB - zero pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(100, ALL_ZEROS); } -T_DECL(compr_100MB_mostly_zero, "Compressor latencies") { +T_DECL(compr_100MB_mostly_zero, + "Compression latency for 100MB - mostly zero pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(100, MOSTLY_ZEROS); } -T_DECL(compr_100MB_random, "Compressor latencies") { +T_DECL(compr_100MB_random, + "Compression latency for 100MB - random pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(100, RANDOM); } #endif -T_DECL(compr_100MB_typical, "Compressor latencies") { +T_DECL(compr_100MB_typical, + "Compression latency for 100MB - typical pages", + T_META_SYSCTL_INT(SYSCTL_FREEZE_TO_MEMORY)) { run_compressor_test(100, TYPICAL); } - diff --git a/tests/perf_exit.c b/tests/perf_exit.c index 1dba37cd0..df71ffd86 100644 --- a/tests/perf_exit.c +++ b/tests/perf_exit.c @@ -16,7 +16,7 @@ T_GLOBAL_META( T_META_ASROOT(true), T_META_LTEPHASE(LTE_SINGLEUSER), T_META_TAG_PERF -); + ); #if TARGET_OS_WATCH #define TEST_TIMEOUT 3600 * (NSEC_PER_SEC) #else @@ -58,7 +58,9 @@ static _Atomic bool tracing_on = false; void run_exit_test(int proc_wired_mem, int nthreads); -static void cleanup(void) { +static void +cleanup(void) +{ free(begin_ts); dispatch_release(spawn_queue); dispatch_release(processing_queue); @@ -113,10 +115,9 @@ T_DECL(exit, "exit(2) time from syscall start to end", T_META_TIMEOUT(TEST_TIMEO consumer_i++; dt_stat_finalize(s); if (consumer_i >= TEST_CASES_COUNT) { - ktrace_end(session, 1); - } - else { - s = create_stat(test_cases[consumer_i].wired_mem, test_cases[consumer_i].threads); + ktrace_end(session, 1); + } else { + s = create_stat(test_cases[consumer_i].wired_mem, test_cases[consumer_i].threads); } }); @@ -129,7 +130,7 @@ T_DECL(exit, "exit(2) time from syscall start to end", T_META_TIMEOUT(TEST_TIMEO T_ASSERT_LE(e->pid, PID_MAX, "pid %d is valid in end tracepoint", e->pid); if (begin_ts[e->pid] == 0) { - return; + return; } T_QUIET; T_ASSERT_LE(begin_ts[e->pid], e->timestamp, "timestamps are monotonically increasing"); @@ -137,7 +138,7 @@ T_DECL(exit, "exit(2) time from syscall start to end", T_META_TIMEOUT(TEST_TIMEO if (dt_stat_stable(s) && producer_i == consumer_i) { - dispatch_sync(spawn_queue, ^(void) { + dispatch_sync(spawn_queue, ^(void) { producer_i++; T_ASSERT_POSIX_ZERO(kdebug_trace(NEXT_CASE_EVENTID, producer_i, 0, 0, 0), "kdebug_trace returns 0"); }); @@ -154,7 +155,7 @@ T_DECL(exit, "exit(2) time from syscall start to end", T_META_TIMEOUT(TEST_TIMEO char nthreads_buf[32], mem_buf[32]; if (producer_i >= TEST_CASES_COUNT || !tracing_on) { - return; + return; } snprintf(nthreads_buf, 32, "%d", test_cases[producer_i].threads); @@ -170,8 +171,9 @@ T_DECL(exit, "exit(2) time from syscall start to end", T_META_TIMEOUT(TEST_TIMEO bret = waitpid(pid, &status, 0); T_QUIET; T_ASSERT_POSIX_SUCCESS(bret, "waited for process %d\n", pid); - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) - T_ASSERT_FAIL("child process failed to run"); + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + T_ASSERT_FAIL("child process failed to run"); + } // Avoid saturating the CPU with new processes usleep(1000); @@ -187,4 +189,3 @@ T_DECL(exit, "exit(2) time from syscall start to end", T_META_TIMEOUT(TEST_TIMEO dispatch_main(); } - diff --git a/tests/perf_exit_proc.c b/tests/perf_exit_proc.c index b8bb88a7f..c709bec64 100644 --- a/tests/perf_exit_proc.c +++ b/tests/perf_exit_proc.c @@ -6,18 +6,21 @@ #include #include -static void* loop(__attribute__ ((unused)) void *arg) { +static void* +loop(__attribute__ ((unused)) void *arg) +{ while (1) { - } } -static int run_additional_threads(int nthreads) { +static int +run_additional_threads(int nthreads) +{ for (int i = 0; i < nthreads; i++) { pthread_t pthread; int err; - + err = pthread_create(&pthread, NULL, loop, NULL); if (err) { return err; @@ -27,13 +30,16 @@ static int run_additional_threads(int nthreads) { return 0; } -static int allocate_and_wire_memory(mach_vm_size_t size) { +static int +allocate_and_wire_memory(mach_vm_size_t size) +{ int err; task_t task = mach_task_self(); mach_vm_address_t addr; - if (size <= 0) + if (size <= 0) { return 0; + } err = mach_vm_allocate(task, &addr, size, VM_FLAGS_ANYWHERE); if (err != KERN_SUCCESS) { @@ -60,7 +66,9 @@ static int allocate_and_wire_memory(mach_vm_size_t size) { return 0; } -int main(int argc, char *argv[]) { +int +main(int argc, char *argv[]) +{ int nthreads = 0; int err; mach_vm_size_t wired_mem = 0; @@ -71,7 +79,7 @@ int main(int argc, char *argv[]) { if (argc > 2) { wired_mem = (mach_vm_size_t)strtoul(argv[2], NULL, 10); } - + err = allocate_and_wire_memory(wired_mem); if (err) { return err; diff --git a/tests/perf_kdebug.c b/tests/perf_kdebug.c index 0b8240ec4..d2861ba66 100644 --- a/tests/perf_kdebug.c +++ b/tests/perf_kdebug.c @@ -14,34 +14,41 @@ T_GLOBAL_META( T_META_ASROOT(true), T_META_CHECK_LEAKS(false), T_META_TAG_PERF -); + ); // // Helper functions for direct control over the kernel trace facility. // -static void _sysctl_reset() { +static void +_sysctl_reset() +{ int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }; - if(sysctl(mib, 3, NULL, NULL, NULL, 0)) { + if (sysctl(mib, 3, NULL, NULL, NULL, 0)) { T_FAIL("KERN_KDREMOVE sysctl failed"); } } -static void _sysctl_setbuf(uint32_t capacity) { +static void +_sysctl_setbuf(uint32_t capacity) +{ int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)capacity }; if (sysctl(mib, 4, NULL, NULL, NULL, 0)) { T_FAIL("KERN_KDSETBUF sysctl failed"); } } -static void _sysctl_setup() { +static void +_sysctl_setup() +{ int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDSETUP }; if (sysctl(mib, 3, NULL, NULL, NULL, 0)) { T_FAIL("KERN_KDSETUP sysctl failed"); } } -static void _sysctl_enable(int value) +static void +_sysctl_enable(int value) { int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDENABLE, value }; if (sysctl(mib, 4, NULL, NULL, NULL, 0) < 0) { @@ -49,41 +56,53 @@ static void _sysctl_enable(int value) } } -static void _sysctl_enable_typefilter(uint8_t* type_filter_bitmap) { +static void +_sysctl_enable_typefilter(uint8_t* type_filter_bitmap) +{ int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDSET_TYPEFILTER }; size_t needed = KDBG_TYPEFILTER_BITMAP_SIZE; - if(sysctl(mib, 3, type_filter_bitmap, &needed, NULL, 0)) { + if (sysctl(mib, 3, type_filter_bitmap, &needed, NULL, 0)) { T_FAIL("KERN_KDSET_TYPEFILTER sysctl failed"); } } -static void _sysctl_nowrap(bool is_nowrap) { +static void +_sysctl_nowrap(bool is_nowrap) +{ int mib[] = { CTL_KERN, KERN_KDEBUG, is_nowrap ? KERN_KDEFLAGS : KERN_KDDFLAGS, KDBG_NOWRAP }; if (sysctl(mib, 4, NULL, NULL, NULL, 0)) { T_FAIL("KDBG_NOWRAP sysctl failed"); } } -static void enable_tracing(bool value) { +static void +enable_tracing(bool value) +{ _sysctl_enable(value ? KDEBUG_ENABLE_TRACE : 0); } -static void enable_typefilter_all_reject() { +static void +enable_typefilter_all_reject() +{ uint8_t type_filter_bitmap[KDBG_TYPEFILTER_BITMAP_SIZE]; memset(type_filter_bitmap, 0, sizeof(type_filter_bitmap)); _sysctl_enable_typefilter(type_filter_bitmap); } -static void enable_typefilter_all_pass() { +static void +enable_typefilter_all_pass() +{ uint8_t type_filter_bitmap[KDBG_TYPEFILTER_BITMAP_SIZE]; memset(type_filter_bitmap, 0xff, sizeof(type_filter_bitmap)); _sysctl_enable_typefilter(type_filter_bitmap); } -static void loop_kdebug_trace(dt_stat_time_t s) { +static void +loop_kdebug_trace(dt_stat_time_t s) +{ do { dt_stat_token start = dt_stat_time_begin(s); - for (uint32_t i = 0; i<100; i++) { + for (uint32_t i = 0; i < 100; i++) { kdebug_trace(0x97000000 | DBG_FUNC_NONE, i, i, i, i); kdebug_trace(0x97000000 | DBG_FUNC_NONE, i, i, i, i); kdebug_trace(0x97000000 | DBG_FUNC_NONE, i, i, i, i); @@ -99,10 +118,12 @@ static void loop_kdebug_trace(dt_stat_time_t s) { } while (!dt_stat_stable(s)); } -static void loop_getppid(dt_stat_time_t s) { +static void +loop_getppid(dt_stat_time_t s) +{ do { dt_stat_token start = dt_stat_time_begin(s); - for (uint32_t i = 0; i<100; i++) { + for (uint32_t i = 0; i < 100; i++) { getppid(); getppid(); getppid(); @@ -118,11 +139,15 @@ static void loop_getppid(dt_stat_time_t s) { } while (!dt_stat_stable(s)); } -static void reset_kdebug_trace(void) { +static void +reset_kdebug_trace(void) +{ _sysctl_reset(); } -static void test(const char* test_name, void (^pretest_setup)(void), void (*test)(dt_stat_time_t s)) { +static void +test(const char* test_name, void (^pretest_setup)(void), void (*test)(dt_stat_time_t s)) +{ T_ATEND(reset_kdebug_trace); _sysctl_reset(); _sysctl_setbuf(1000000); @@ -143,26 +168,26 @@ static void test(const char* test_name, void (^pretest_setup)(void), void (*test // T_DECL(kdebug_trace_baseline_syscall, - "Test the latency of a syscall while kernel tracing is disabled") { + "Test the latency of a syscall while kernel tracing is disabled") { test("kdebug_trace_baseline_syscall", ^{ enable_tracing(false); }, loop_getppid); } T_DECL(kdebug_trace_kdbg_disabled, - "Test the latency of kdebug_trace while kernel tracing is disabled") { + "Test the latency of kdebug_trace while kernel tracing is disabled") { test("kdebug_trace_kdbg_disabled", ^{ enable_tracing(false); }, loop_kdebug_trace); } T_DECL(kdebug_trace_kdbg_enabled, - "Test the latency of kdebug_trace while kernel tracing is enabled with no typefilter") { + "Test the latency of kdebug_trace while kernel tracing is enabled with no typefilter") { test("kdebug_trace_kdbg_enabled", ^{ enable_tracing(true); }, loop_kdebug_trace); } T_DECL(kdebug_trace_kdbg_enabled_typefilter_pass, - "Test the latency of kdebug_trace while kernel tracing is enabled with a typefilter that passes the event") { + "Test the latency of kdebug_trace while kernel tracing is enabled with a typefilter that passes the event") { test("kdebug_trace_kdbg_enabled_typefilter_pass", ^{ enable_tracing(true); enable_typefilter_all_pass(); }, loop_kdebug_trace); } T_DECL(kdebug_trace_kdbg_enabled_typefilter_reject, - "Test the latency of kdebug_trace while kernel tracing is enabled with a typefilter that rejects the event") { + "Test the latency of kdebug_trace while kernel tracing is enabled with a typefilter that rejects the event") { test("kdebug_trace_kdbg_enabled_typefilter_reject", ^{ enable_tracing(true); enable_typefilter_all_reject(); }, loop_kdebug_trace); } diff --git a/tests/perf_spawn_fork.c b/tests/perf_spawn_fork.c index fad33b2ae..1d0a0ff00 100644 --- a/tests/perf_spawn_fork.c +++ b/tests/perf_spawn_fork.c @@ -11,7 +11,7 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.perf"), T_META_CHECK_LEAKS(false), T_META_TAG_PERF -); + ); #define SPAWN_MEASURE_LOOP(s) \ char *args[] = {"/usr/bin/true", NULL}; \ @@ -19,16 +19,16 @@ T_GLOBAL_META( pid_t pid; \ int status; \ while (!dt_stat_stable(s)) { \ - T_STAT_MEASURE(s) { \ - err = posix_spawn(&pid, args[0], NULL, NULL, args, NULL); \ - } \ - if (err) { \ - T_FAIL("posix_spawn returned %d", err); \ - } \ - waitpid(pid, &status, 0); \ - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { \ - T_FAIL("Child process of posix_spawn failed to run"); \ - } \ + T_STAT_MEASURE(s) { \ + err = posix_spawn(&pid, args[0], NULL, NULL, args, NULL); \ + } \ + if (err) { \ + T_FAIL("posix_spawn returned %d", err); \ + } \ + waitpid(pid, &status, 0); \ + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { \ + T_FAIL("Child process of posix_spawn failed to run"); \ + } \ } T_DECL(posix_spawn_platform_binary_latency, "posix_spawn platform binary latency") { @@ -49,17 +49,17 @@ T_DECL(posix_spawn_platform_binary_latency, "posix_spawn platform binary latency pid_t pid; \ int status; \ while (!dt_stat_stable(s)) { \ - T_STAT_MEASURE(s) { \ - pid = fork(); \ - if (pid == 0) \ - exit(0); \ - else if (pid == -1) \ - T_FAIL("fork returned -1"); \ - } \ - waitpid(pid, &status, 0); \ - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { \ - T_FAIL("forked process failed to exit properly"); \ - } \ + T_STAT_MEASURE(s) { \ + pid = fork(); \ + if (pid == 0) \ + exit(0); \ + else if (pid == -1) \ + T_FAIL("fork returned -1"); \ + } \ + waitpid(pid, &status, 0); \ + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { \ + T_FAIL("forked process failed to exit properly"); \ + } \ } T_DECL(fork, "fork latency") { diff --git a/tests/perf_vmfault.c b/tests/perf_vmfault.c index e3e81f1bd..384d35862 100644 --- a/tests/perf_vmfault.c +++ b/tests/perf_vmfault.c @@ -12,102 +12,278 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.vm.perf"), T_META_CHECK_LEAKS(false), T_META_TAG_PERF -); + ); #ifdef DT_IOSMARK -#define MEMSIZE (1UL<<29) /* 512 MB */ +#define MEMSIZE (1UL<<29) /* 512 MB */ #else -#define MEMSIZE (1UL<<27) /* 128 MB */ +#define MEMSIZE (1UL<<27) /* 128 MB */ #endif +#define VM_TAG1 100 +#define VM_TAG2 101 + enum { SOFT_FAULT, ZERO_FILL, - NUM_TESTS + NUM_FAULT_TYPES +}; + +enum { + VARIANT_DEFAULT = 1, + VARIANT_SINGLE_REGION, + VARIANT_MULTIPLE_REGIONS, + NUM_MAPPING_VARIANTS +}; + +static char *variant_str[] = { + "none", + "default", + "single-region", + "multiple-regions" }; -static int test_type; + +typedef struct { + char *region_addr; + char *shared_region_addr; + size_t region_len; +} memregion_config; + +static memregion_config *memregion_config_per_thread; + +static size_t pgsize; static int num_threads; static int ready_thread_count; -static size_t pgsize; -static size_t num_pages; -static char *memblock; -static char *memblock_share; -static dt_stat_time_t t; +static dt_stat_time_t runtime; static pthread_cond_t start_cvar; static pthread_cond_t threads_ready_cvar; static pthread_mutex_t ready_thread_count_lock; -static void map_mem_regions(void); -static void unmap_mem_regions(void); +static void map_mem_regions_default(int fault_type, size_t memsize); +static void map_mem_regions_single(int fault_type, size_t memsize); +static void map_mem_regions_multiple(int fault_type, size_t memsize); +static void map_mem_regions(int fault_type, int mapping_variant, size_t memsize); +static void unmap_mem_regions(int mapping_variant, size_t memsize); +static void setup_per_thread_regions(char *memblock, char *memblock_share, int fault_type, size_t memsize); static void fault_pages(int thread_id); static void execute_threads(void); static void *thread_setup(void *arg); -static void run_test(int test, int threads, int cpus); +static void run_test(int fault_type, int mapping_variant, size_t memsize); +static void setup_and_run_test(int test, int threads); static int get_ncpu(void); -static void map_mem_regions(void) +/* Allocates memory using the default mmap behavior. Each VM region created is capped at 128 MB. */ +static void +map_mem_regions_default(int fault_type, size_t memsize) { - char *ptr; volatile char val; vm_prot_t curprot, maxprot; + char *ptr, *memblock, *memblock_share = NULL; - memblock = (char *)mmap(NULL, MEMSIZE, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + memblock = (char *)mmap(NULL, memsize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); T_QUIET; T_ASSERT_NE((void *)memblock, MAP_FAILED, "mmap"); - if (test_type == SOFT_FAULT) { - + if (fault_type == SOFT_FAULT) { /* Fault in all the pages of the original region. */ - for(ptr = memblock; ptr < memblock + MEMSIZE; ptr += pgsize) { + for (ptr = memblock; ptr < memblock + memsize; ptr += pgsize) { val = *ptr; } /* Remap the region so that subsequent accesses result in read soft faults. */ T_QUIET; T_ASSERT_MACH_SUCCESS(vm_remap(mach_task_self(), (vm_address_t *)&memblock_share, - MEMSIZE, 0, VM_FLAGS_ANYWHERE, mach_task_self(), (vm_address_t)memblock, FALSE, - &curprot, &maxprot, VM_INHERIT_DEFAULT), "vm_remap"); + memsize, 0, VM_FLAGS_ANYWHERE, mach_task_self(), (vm_address_t)memblock, FALSE, + &curprot, &maxprot, VM_INHERIT_DEFAULT), "vm_remap"); } + setup_per_thread_regions(memblock, memblock_share, fault_type, memsize); } -static void unmap_mem_regions(void) +/* Creates a single VM region by mapping in a named memory entry. */ +static void +map_mem_regions_single(int fault_type, size_t memsize) { - if (test_type == SOFT_FAULT) { - T_QUIET; T_ASSERT_MACH_SUCCESS(munmap(memblock_share, MEMSIZE), "munmap"); + volatile char val; + vm_prot_t curprot, maxprot; + char *ptr, *memblock = NULL, *memblock_share = NULL; + vm_size_t size = memsize; + vm_offset_t addr1 = 0; + mach_port_t mem_handle = MACH_PORT_NULL; + + /* Allocate a region and fault in all the pages. */ + T_QUIET; T_ASSERT_MACH_SUCCESS(vm_allocate(mach_task_self(), &addr1, size, VM_FLAGS_ANYWHERE), "vm_allocate"); + for (ptr = (char *)addr1; ptr < (char *)addr1 + memsize; ptr += pgsize) { + val = *ptr; } - T_QUIET; T_ASSERT_MACH_SUCCESS(munmap(memblock, MEMSIZE), "munmap"); + + /* Create a named memory entry from the region allocated above, and de-allocate said region. */ + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry(mach_task_self(), &size, addr1, VM_PROT_ALL | MAP_MEM_NAMED_CREATE, + &mem_handle, MACH_PORT_NULL), "mach_make_memory_entry"); + T_QUIET; T_ASSERT_MACH_SUCCESS(vm_deallocate(mach_task_self(), addr1, size), "vm_deallocate"); + + /* Map in the named entry and deallocate it. */ + T_QUIET; T_ASSERT_MACH_SUCCESS(vm_map(mach_task_self(), (vm_address_t *)&memblock, size, 0, VM_FLAGS_ANYWHERE, mem_handle, 0, + FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_NONE), "vm_map"); + T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), mem_handle), "mach_port_deallocate"); + + if (fault_type == SOFT_FAULT) { + /* Fault in all the pages of the original region. */ + for (ptr = memblock; ptr < memblock + memsize; ptr += pgsize) { + val = *ptr; + } + /* Remap the region so that subsequent accesses result in read soft faults. */ + T_QUIET; T_ASSERT_MACH_SUCCESS(vm_remap(mach_task_self(), (vm_address_t *)&memblock_share, + memsize, 0, VM_FLAGS_ANYWHERE, mach_task_self(), (vm_address_t)memblock, FALSE, + &curprot, &maxprot, VM_INHERIT_DEFAULT), "vm_remap"); + } + setup_per_thread_regions(memblock, memblock_share, fault_type, memsize); } -static void fault_pages(int thread_id) +/* Allocates a separate VM region for each thread. */ +static void +map_mem_regions_multiple(int fault_type, size_t memsize) { - size_t region_len, region_start, region_end; - char *ptr, *block; + int i; + size_t region_len, num_pages; volatile char val; + char *ptr, *memblock, *memblock_share; + vm_prot_t curprot, maxprot; - region_len = num_pages / (size_t)num_threads; - region_start = region_len * (size_t)thread_id; + num_pages = memsize / pgsize; - if((size_t)thread_id < num_pages % (size_t)num_threads) { - region_start += (size_t)thread_id; - region_len++; + for (i = 0; i < num_threads; i++) { + memblock = NULL; + + region_len = num_pages / (size_t)num_threads; + if ((size_t)i < num_pages % (size_t)num_threads) { + region_len++; + } + region_len *= pgsize; + + int flags = VM_MAKE_TAG((i % 2)? VM_TAG1 : VM_TAG2) | MAP_ANON | MAP_PRIVATE; + + memblock = (char *)mmap(NULL, region_len, PROT_READ | PROT_WRITE, flags, -1, 0); + T_QUIET; T_ASSERT_NE((void *)memblock, MAP_FAILED, "mmap"); + memregion_config_per_thread[i].region_addr = memblock; + memregion_config_per_thread[i].shared_region_addr = 0; + memregion_config_per_thread[i].region_len = region_len; + + if (fault_type == SOFT_FAULT) { + /* Fault in all the pages of the original region. */ + for (ptr = memblock; ptr < memblock + region_len; ptr += pgsize) { + val = *ptr; + } + memblock_share = NULL; + /* Remap the region so that subsequent accesses result in read soft faults. */ + T_QUIET; T_ASSERT_MACH_SUCCESS(vm_remap(mach_task_self(), (vm_address_t *)&memblock_share, + region_len, 0, VM_FLAGS_ANYWHERE, mach_task_self(), (vm_address_t)memblock, FALSE, + &curprot, &maxprot, VM_INHERIT_DEFAULT), "vm_remap"); + memregion_config_per_thread[i].shared_region_addr = memblock_share; + } } - else { - region_start += num_pages % (size_t)num_threads; +} + +static void +map_mem_regions(int fault_type, int mapping_variant, size_t memsize) +{ + memregion_config_per_thread = (memregion_config *)malloc(sizeof(*memregion_config_per_thread) * (size_t)num_threads); + switch (mapping_variant) { + case VARIANT_SINGLE_REGION: + map_mem_regions_single(fault_type, memsize); + break; + case VARIANT_MULTIPLE_REGIONS: + map_mem_regions_multiple(fault_type, memsize); + break; + case VARIANT_DEFAULT: + default: + map_mem_regions_default(fault_type, memsize); } +} + +static void +setup_per_thread_regions(char *memblock, char *memblock_share, int fault_type, size_t memsize) +{ + int i; + size_t region_len, region_start, num_pages; + + num_pages = memsize / pgsize; + for (i = 0; i < num_threads; i++) { + region_len = num_pages / (size_t)num_threads; + region_start = region_len * (size_t)i; + + if ((size_t)i < num_pages % (size_t)num_threads) { + region_start += (size_t)i; + region_len++; + } else { + region_start += num_pages % (size_t)num_threads; + } - region_start *= pgsize; - region_len *= pgsize; - region_end = region_start + region_len; + region_start *= pgsize; + region_len *= pgsize; - block = (test_type == SOFT_FAULT)? memblock_share: memblock; - for(ptr = block + region_start; ptr < block + region_end; ptr += pgsize) { + memregion_config_per_thread[i].region_addr = memblock + region_start; + memregion_config_per_thread[i].shared_region_addr = ((fault_type == SOFT_FAULT) ? + memblock_share + region_start : 0); + memregion_config_per_thread[i].region_len = region_len; + } +} + +static void +unmap_mem_regions(int mapping_variant, size_t memsize) +{ + if (mapping_variant == VARIANT_MULTIPLE_REGIONS) { + int i; + for (i = 0; i < num_threads; i++) { + if (memregion_config_per_thread[i].shared_region_addr != 0) { + T_QUIET; T_ASSERT_MACH_SUCCESS(munmap(memregion_config_per_thread[i].shared_region_addr, + memregion_config_per_thread[i].region_len), "munmap"); + } + T_QUIET; T_ASSERT_MACH_SUCCESS(munmap(memregion_config_per_thread[i].region_addr, + memregion_config_per_thread[i].region_len), "munmap"); + } + } else { + if (memregion_config_per_thread[0].shared_region_addr != 0) { + T_QUIET; T_ASSERT_MACH_SUCCESS(munmap(memregion_config_per_thread[0].shared_region_addr, memsize), "munmap"); + } + T_QUIET; T_ASSERT_MACH_SUCCESS(munmap(memregion_config_per_thread[0].region_addr, memsize), "munmap"); + } +} + +static void +fault_pages(int thread_id) +{ + char *ptr, *block; + volatile char val; + + block = memregion_config_per_thread[thread_id].shared_region_addr ? + memregion_config_per_thread[thread_id].shared_region_addr : + memregion_config_per_thread[thread_id].region_addr; + for (ptr = block; ptr < block + memregion_config_per_thread[thread_id].region_len; ptr += pgsize) { val = *ptr; } } -static void execute_threads(void) +static void * +thread_setup(void *arg) +{ + int my_index = *((int *)arg); + + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&ready_thread_count_lock), "pthread_mutex_lock"); + ready_thread_count++; + if (ready_thread_count == num_threads) { + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_signal(&threads_ready_cvar), "pthread_cond_signal"); + } + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_wait(&start_cvar, &ready_thread_count_lock), "pthread_cond_wait"); + T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&ready_thread_count_lock), "pthread_mutex_unlock"); + + fault_pages(my_index); + return NULL; +} + +static void +execute_threads(void) { int thread_index, thread_retval; int *thread_indices; - void *thread_retval_ptr = &thread_retval; + void *thread_retval_ptr = &thread_retval; pthread_t* threads; T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_init(&threads_ready_cvar, NULL), "pthread_cond_init"); @@ -117,24 +293,24 @@ static void execute_threads(void) threads = (pthread_t *)malloc(sizeof(*threads) * (size_t)num_threads); thread_indices = (int *)malloc(sizeof(*thread_indices) * (size_t)num_threads); - for(thread_index = 0; thread_index < num_threads; thread_index++) { + for (thread_index = 0; thread_index < num_threads; thread_index++) { thread_indices[thread_index] = thread_index; T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&threads[thread_index], NULL, - thread_setup, (void *)&thread_indices[thread_index]), "pthread_create"); + thread_setup, (void *)&thread_indices[thread_index]), "pthread_create"); } T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&ready_thread_count_lock), "pthread_mutex_lock"); - if(ready_thread_count != num_threads) { + if (ready_thread_count != num_threads) { T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_wait(&threads_ready_cvar, &ready_thread_count_lock), - "pthread_cond_wait"); + "pthread_cond_wait"); } T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&ready_thread_count_lock), "pthread_mutex_unlock"); - T_STAT_MEASURE(t) { + T_STAT_MEASURE(runtime) { T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_broadcast(&start_cvar), "pthread_cond_broadcast"); - for(thread_index = 0; thread_index < num_threads; thread_index++) { + for (thread_index = 0; thread_index < num_threads; thread_index++) { T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_join(threads[thread_index], &thread_retval_ptr), - "pthread_join"); + "pthread_join"); } }; @@ -142,72 +318,97 @@ static void execute_threads(void) free(thread_indices); } -static void *thread_setup(void *arg) -{ - int my_index = *((int *)arg); - - T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_lock(&ready_thread_count_lock), "pthread_mutex_lock"); - ready_thread_count++; - if(ready_thread_count == num_threads) { - T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_signal(&threads_ready_cvar), "pthread_cond_signal"); - } - T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_cond_wait(&start_cvar, &ready_thread_count_lock), "pthread_cond_wait"); - T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_mutex_unlock(&ready_thread_count_lock), "pthread_mutex_unlock"); - - fault_pages(my_index); - return NULL; -} - -static void run_test(int test, int threads, int cpus) +static void +run_test(int fault_type, int mapping_variant, size_t memsize) { + char metric_str[32]; + size_t num_pages; size_t sysctl_size = sizeof(pgsize); int ret = sysctlbyname("vm.pagesize", &pgsize, &sysctl_size, NULL, 0); T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl vm.pagesize failed"); - test_type = test; - num_threads = threads; - num_pages = MEMSIZE / pgsize; + num_pages = memsize / pgsize; - T_QUIET; T_ASSERT_LT(test_type, NUM_TESTS, "invalid test type"); + T_QUIET; T_ASSERT_LT(fault_type, NUM_FAULT_TYPES, "invalid test type"); + T_QUIET; T_ASSERT_LT(mapping_variant, NUM_MAPPING_VARIANTS, "invalid mapping variant"); T_QUIET; T_ASSERT_GT(num_threads, 0, "num_threads <= 0"); - T_QUIET; T_ASSERT_GT((int)num_pages/ num_threads, 0, "num_pages/num_threads <= 0"); + T_QUIET; T_ASSERT_GT((int)num_pages / num_threads, 0, "num_pages/num_threads <= 0"); - T_LOG("No. of cpus: %d", cpus); + T_LOG("No. of cpus: %d", get_ncpu()); T_LOG("No. of threads: %d", num_threads); T_LOG("No. of pages: %ld", num_pages); T_LOG("Pagesize: %ld", pgsize); + T_LOG("Allocation size: %ld MB", memsize / (1024 * 1024)); + T_LOG("Mapping variant: %s", variant_str[mapping_variant]); + + snprintf(metric_str, 32, "Runtime-%s", variant_str[mapping_variant]); + runtime = dt_stat_time_create(metric_str); - t = dt_stat_time_create("Runtime"); // This sets the A/B failure threshold at 50% of baseline for Runtime - dt_stat_set_variable(t, kPCFailureThresholdPctVar, 50.0); - while (!dt_stat_stable(t)) { - map_mem_regions(); + dt_stat_set_variable((dt_stat_t)runtime, kPCFailureThresholdPctVar, 50.0); + while (!dt_stat_stable(runtime)) { + map_mem_regions(fault_type, mapping_variant, memsize); execute_threads(); - unmap_mem_regions(); + unmap_mem_regions(mapping_variant, memsize); + } + + dt_stat_finalize(runtime); + T_LOG("Throughput-%s (MB/s): %lf\n\n", variant_str[mapping_variant], (double)memsize / (1024 * 1024) / dt_stat_mean((dt_stat_t)runtime)); +} + +static void +setup_and_run_test(int fault_type, int threads) +{ + int i, mapping_variant; + size_t memsize; + char *e; + + mapping_variant = VARIANT_DEFAULT; + memsize = MEMSIZE; + num_threads = threads; + + if ((e = getenv("NTHREADS"))) { + if (threads == 1) { + T_SKIP("Custom environment variables specified. Skipping single threaded version."); + } + num_threads = (int)strtol(e, NULL, 0); + } + + if ((e = getenv("MEMSIZEMB"))) { + memsize = (size_t)strtol(e, NULL, 0) * 1024 * 1024; + } + + if ((e = getenv("VARIANT"))) { + mapping_variant = (int)strtol(e, NULL, 0); + run_test(fault_type, mapping_variant, memsize); + } else { + for (i = VARIANT_DEFAULT; i < NUM_MAPPING_VARIANTS; i++) { + run_test(fault_type, i, memsize); + } } - dt_stat_finalize(t); T_END; } -static int get_ncpu(void) +static int +get_ncpu(void) { int ncpu; size_t length = sizeof(ncpu); T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.ncpu", &ncpu, &length, NULL, 0), - "failed to query hw.ncpu"); + "failed to query hw.ncpu"); return ncpu; } T_DECL(read_soft_fault, - "Read soft faults (single thread)") + "Read soft faults (single thread)") { - run_test(SOFT_FAULT, 1, get_ncpu()); + setup_and_run_test(SOFT_FAULT, 1); } T_DECL(read_soft_fault_multithreaded, - "Read soft faults (multi-threaded)") + "Read soft faults (multi-threaded)") { char *e; int nthreads; @@ -218,17 +419,17 @@ T_DECL(read_soft_fault_multithreaded, } else { nthreads = get_ncpu(); } - run_test(SOFT_FAULT, nthreads, get_ncpu()); + setup_and_run_test(SOFT_FAULT, nthreads); } T_DECL(zero_fill_fault, - "Zero fill faults (single thread)") + "Zero fill faults (single thread)") { - run_test(ZERO_FILL, 1, get_ncpu()); + setup_and_run_test(ZERO_FILL, 1); } T_DECL(zero_fill_fault_multithreaded, - "Zero fill faults (multi-threaded)") + "Zero fill faults (multi-threaded)") { char *e; int nthreads; @@ -239,5 +440,5 @@ T_DECL(zero_fill_fault_multithreaded, } else { nthreads = get_ncpu(); } - run_test(ZERO_FILL, nthreads, get_ncpu()); + setup_and_run_test(ZERO_FILL, nthreads); } diff --git a/tests/phys_footprint_interval_max.c b/tests/phys_footprint_interval_max.c index 846b59151..10a64fbe5 100644 --- a/tests/phys_footprint_interval_max.c +++ b/tests/phys_footprint_interval_max.c @@ -1,4 +1,3 @@ - /* * Copyright (c) 2018 Apple Inc. All rights reserved. * @@ -40,7 +39,7 @@ int proc_rlimit_control(pid_t pid, int flavor, void *arg); T_DECL(phys_footprint_interval_max, - "Validate physical footprint interval tracking") + "Validate physical footprint interval tracking") { int ret; struct rusage_info_v4 ru; @@ -50,7 +49,7 @@ T_DECL(phys_footprint_interval_max, T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "proc_pid_rusage"); T_ASSERT_EQ(ru.ri_lifetime_max_phys_footprint, ru.ri_interval_max_phys_footprint, - "Max footprint and interval footprint are equal prior to dirtying memory"); + "Max footprint and interval footprint are equal prior to dirtying memory"); ret = mach_vm_allocate(mach_task_self(), &addr, (mach_vm_size_t)ALLOC_SIZE_LARGE, VM_FLAGS_ANYWHERE); T_QUIET; @@ -62,7 +61,7 @@ T_DECL(phys_footprint_interval_max, T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "proc_pid_rusage"); T_ASSERT_EQ(ru.ri_lifetime_max_phys_footprint, ru.ri_interval_max_phys_footprint, - "Max footprint and interval footprint are equal after dirtying large memory region"); + "Max footprint and interval footprint are equal after dirtying large memory region"); mach_vm_deallocate(mach_task_self(), addr, (mach_vm_size_t)ALLOC_SIZE_LARGE); @@ -70,7 +69,7 @@ T_DECL(phys_footprint_interval_max, T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "proc_pid_rusage"); T_ASSERT_EQ(ru.ri_lifetime_max_phys_footprint, ru.ri_interval_max_phys_footprint, - "Max footprint and interval footprint are still equal after freeing large memory region"); + "Max footprint and interval footprint are still equal after freeing large memory region"); ret = proc_reset_footprint_interval(getpid()); T_ASSERT_POSIX_SUCCESS(ret, "proc_reset_footprint_interval()"); @@ -79,7 +78,7 @@ T_DECL(phys_footprint_interval_max, T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "proc_pid_rusage"); T_ASSERT_GT(ru.ri_lifetime_max_phys_footprint, ru.ri_interval_max_phys_footprint, - "Max footprint is greater than interval footprint after resetting interval"); + "Max footprint is greater than interval footprint after resetting interval"); ret = mach_vm_allocate(mach_task_self(), &addr, (mach_vm_size_t)ALLOC_SIZE_SMALL, VM_FLAGS_ANYWHERE); T_QUIET; @@ -90,5 +89,5 @@ T_DECL(phys_footprint_interval_max, T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "proc_pid_rusage"); T_ASSERT_GT(ru.ri_lifetime_max_phys_footprint, ru.ri_interval_max_phys_footprint, - "Max footprint is still greater than interval footprint after dirtying small memory region"); + "Max footprint is still greater than interval footprint after dirtying small memory region"); } diff --git a/tests/poll.c b/tests/poll.c index 8ff8806c2..49e4be65f 100644 --- a/tests/poll.c +++ b/tests/poll.c @@ -15,7 +15,7 @@ T_GLOBAL_META(T_META_NAMESPACE("xnu.poll")); #define SLEEP_TIME_SECS 1 #define POLL_TIMEOUT_MS 1800 static_assert(POLL_TIMEOUT_MS > (SLEEP_TIME_SECS * 1000), - "poll timeout should be longer than sleep time"); + "poll timeout should be longer than sleep time"); /* * This matches the behavior of other UNIXes, but is under-specified in POSIX. @@ -23,7 +23,7 @@ static_assert(POLL_TIMEOUT_MS > (SLEEP_TIME_SECS * 1000), * See . */ T_DECL(sleep_with_no_fds, - "poll() called with no fds provided should act like sleep") + "poll() called with no fds provided should act like sleep") { uint64_t begin_time, sleep_time, poll_time; struct pollfd pfd = { 0 }; @@ -35,11 +35,11 @@ T_DECL(sleep_with_no_fds, begin_time = mach_absolute_time(); T_ASSERT_POSIX_SUCCESS(poll(&pfd, 0, POLL_TIMEOUT_MS), - "poll() with 0 events and timeout %d ms", POLL_TIMEOUT_MS); + "poll() with 0 events and timeout %d ms", POLL_TIMEOUT_MS); poll_time = mach_absolute_time() - begin_time; T_EXPECT_GT(poll_time, sleep_time, - "poll(... %d) should wait longer than sleep(1)", POLL_TIMEOUT_MS); + "poll(... %d) should wait longer than sleep(1)", POLL_TIMEOUT_MS); } #define LAUNCHD_PATH "/sbin/launchd" @@ -49,7 +49,7 @@ T_DECL(sleep_with_no_fds, * See . */ T_DECL(directories, - "poll() with directories should return an error") + "poll() with directories should return an error") { int file, dir, pipes[2]; struct pollfd pfd[] = { @@ -68,44 +68,44 @@ T_DECL(directories, pfd[0].fd = dir; T_EXPECT_POSIX_SUCCESS(poll(pfd, 1, -1), "poll() with a directory"); T_QUIET; T_EXPECT_TRUE(pfd[0].revents & POLLNVAL, - "directory should be an invalid event"); + "directory should be an invalid event"); /* file and directory */ pfd[0].fd = file; pfd[0].revents = 0; pfd[1].fd = dir; pfd[1].revents = 0; T_EXPECT_POSIX_SUCCESS(poll(pfd, 2, -1), - "poll() with a file and directory"); + "poll() with a file and directory"); T_QUIET; T_EXPECT_TRUE(pfd[0].revents & POLLIN, "file should be readable"); T_QUIET; T_EXPECT_TRUE(pfd[1].revents & POLLNVAL, - "directory should be an invalid event"); + "directory should be an invalid event"); /* directory and file */ pfd[0].fd = dir; pfd[0].revents = 0; pfd[1].fd = file; pfd[1].revents = 0; T_EXPECT_POSIX_SUCCESS(poll(pfd, 2, -1), - "poll() with a directory and a file"); + "poll() with a directory and a file"); T_QUIET; T_EXPECT_TRUE(pfd[0].revents & POLLNVAL, - "directory should be an invalid event"); + "directory should be an invalid event"); T_QUIET; T_EXPECT_TRUE(pfd[1].revents & POLLIN, "file should be readable"); /* file and pipe */ pfd[0].fd = file; pfd[0].revents = 0; pfd[1].fd = pipes[0]; pfd[0].revents = 0; T_EXPECT_POSIX_SUCCESS(poll(pfd, 2, -1), - "poll() with a file and pipe"); + "poll() with a file and pipe"); T_QUIET; T_EXPECT_TRUE(pfd[0].revents & POLLIN, "file should be readable"); T_QUIET; T_EXPECT_FALSE(pfd[1].revents & POLLIN, - "pipe should not be readable"); + "pipe should not be readable"); /* file, directory, and pipe */ pfd[0].fd = file; pfd[0].revents = 0; pfd[1].fd = dir; pfd[1].revents = 0; pfd[2].fd = pipes[0]; pfd[2].revents = 0; T_EXPECT_POSIX_SUCCESS(poll(pfd, 3, -1), - "poll() with a file, directory, and pipe"); + "poll() with a file, directory, and pipe"); T_QUIET; T_EXPECT_TRUE(pfd[0].revents & POLLIN, "file should be readable"); T_QUIET; T_EXPECT_TRUE(pfd[1].revents & POLLNVAL, - "directory should be an invalid event"); + "directory should be an invalid event"); T_QUIET; T_EXPECT_FALSE(pfd[2].revents & POLLIN, "pipe should not be readable"); /* directory and pipe */ @@ -113,17 +113,17 @@ T_DECL(directories, pfd[0].fd = dir; pfd[0].revents = 0; pfd[1].fd = pipes[0]; pfd[1].revents = 0; dispatch_after(dispatch_time(DISPATCH_TIME_NOW, - PIPE_DIR_TIMEOUT_SECS * NSEC_PER_SEC), - dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^{ + PIPE_DIR_TIMEOUT_SECS * NSEC_PER_SEC), + dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^{ T_ASSERT_FALSE(timed_out, "poll timed out after %d seconds", - PIPE_DIR_TIMEOUT_SECS); + PIPE_DIR_TIMEOUT_SECS); }); T_EXPECT_POSIX_SUCCESS(poll(pfd, 3, -1), - "poll() with a directory and pipe"); + "poll() with a directory and pipe"); timed_out = false; T_QUIET; T_EXPECT_TRUE(pfd[0].revents & POLLNVAL, - "directory should be an invalid event"); + "directory should be an invalid event"); T_QUIET; T_EXPECT_FALSE(pfd[1].revents & POLLIN, "pipe should not be readable"); } diff --git a/tests/poll_select_kevent_paired_fds.c b/tests/poll_select_kevent_paired_fds.c index bd9a5e727..880e9a7b6 100644 --- a/tests/poll_select_kevent_paired_fds.c +++ b/tests/poll_select_kevent_paired_fds.c @@ -30,9 +30,9 @@ #include /* kevent_qos */ T_GLOBAL_META( - T_META_NAMESPACE("xnu.kevent"), - T_META_CHECK_LEAKS(false), - T_META_LTEPHASE(LTE_POSTINIT)); + T_META_NAMESPACE("xnu.kevent"), + T_META_CHECK_LEAKS(false), + T_META_LTEPHASE(LTE_POSTINIT)); /* * Test to validate that monitoring a PTY device, FIFO, pipe, or socket pair in @@ -118,7 +118,7 @@ static struct { static bool handle_reading(enum fd_pair fd_pair, int fd); static bool handle_writing(enum fd_pair fd_pair, int fd); static void drive_kq(bool reading, union mode mode, enum fd_pair fd_pair, - int fd); + int fd); #pragma mark writing @@ -136,7 +136,7 @@ wake_writer(void) char tmp = 'a'; close(shared.wr_wait.out_fd); T_QUIET; T_ASSERT_POSIX_SUCCESS(write( - shared.wr_wait.in_fd, &tmp, 1), NULL); + shared.wr_wait.in_fd, &tmp, 1), NULL); break; } } @@ -161,7 +161,7 @@ writer_wait(void) char tmp; close(shared.wr_wait.in_fd); T_QUIET; T_ASSERT_POSIX_SUCCESS(read( - shared.wr_wait.out_fd, &tmp, 1), NULL); + shared.wr_wait.out_fd, &tmp, 1), NULL); break; } } @@ -174,10 +174,10 @@ handle_writing(enum fd_pair __unused fd_pair, int fd) { static unsigned int cur_char = 0; T_QUIET; T_ASSERT_POSIX_SUCCESS(write(fd, - &(EXPECTED_STRING[cur_char]), 1), NULL); + &(EXPECTED_STRING[cur_char]), 1), NULL); cur_char++; - return (cur_char < EXPECTED_LEN); + return cur_char < EXPECTED_LEN; } #define EXPECTED_QOS QOS_CLASS_USER_INITIATED @@ -186,17 +186,17 @@ static void reenable_workq(int fd, int16_t filt) { struct kevent_qos_s events[] = {{ - .ident = (uint64_t)fd, - .filter = filt, - .flags = EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH, - .qos = (int32_t)_pthread_qos_class_encode(EXPECTED_QOS, - 0, 0), - .fflags = NOTE_LOWAT, - .data = 1 - }}; + .ident = (uint64_t)fd, + .filter = filt, + .flags = EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH, + .qos = (int32_t)_pthread_qos_class_encode(EXPECTED_QOS, + 0, 0), + .fflags = NOTE_LOWAT, + .data = 1 + }}; int kev = kevent_qos(-1, events, 1, events, 1, NULL, NULL, - KEVENT_FLAG_WORKQ | KEVENT_FLAG_ERROR_EVENTS); + KEVENT_FLAG_WORKQ | KEVENT_FLAG_ERROR_EVENTS); T_QUIET; T_ASSERT_POSIX_SUCCESS(kev, "reenable workq in kevent_qos"); } @@ -205,7 +205,7 @@ workqueue_write_fn(void ** __unused buf, int * __unused count) { // T_MAYFAIL; // T_QUIET; T_ASSERT_EFFECTIVE_QOS_EQ(EXPECTED_QOS, - // "writer thread should be woken up at correct QoS"); + // "writer thread should be woken up at correct QoS"); if (!handle_writing(shared.fd_pair, shared.wr_fd)) { /* finished handling the fd, tear down the source */ T_LOG("signal shared.wr_finished"); @@ -230,23 +230,23 @@ drive_kq(bool reading, union mode mode, enum fd_pair fd_pair, int fd) struct kevent events; EV_SET(&events, fd, reading ? EVFILT_READ : EVFILT_WRITE, EV_ADD, - NOTE_LOWAT, 1, NULL); + NOTE_LOWAT, 1, NULL); struct kevent64_s events64; EV_SET64(&events64, fd, reading ? EVFILT_READ : EVFILT_WRITE, EV_ADD, - NOTE_LOWAT, 1, 0, 0, 0); + NOTE_LOWAT, 1, 0, 0, 0); struct kevent_qos_s events_qos[] = {{ - .ident = (uint64_t)fd, - .filter = reading ? EVFILT_READ : EVFILT_WRITE, - .flags = EV_ADD, - .fflags = NOTE_LOWAT, - .data = 1 - }, { - .ident = 0, - .filter = EVFILT_TIMER, - .flags = EV_ADD, - .fflags = NOTE_SECONDS, - .data = READ_TIMEOUT_SECS - }}; + .ident = (uint64_t)fd, + .filter = reading ? EVFILT_READ : EVFILT_WRITE, + .flags = EV_ADD, + .fflags = NOTE_LOWAT, + .data = 1 + }, { + .ident = 0, + .filter = EVFILT_TIMER, + .flags = EV_ADD, + .fflags = NOTE_SECONDS, + .data = READ_TIMEOUT_SECS + }}; /* determine which variant of kevent to use */ enum read_mode which_kevent; @@ -355,18 +355,18 @@ write_to_fd(void * __unused ctx) T_LOG("write from child was interrupted"); } bytes_wr = write(shared.wr_fd, EXPECTED_STRING, - EXPECTED_LEN); + EXPECTED_LEN); } while (bytes_wr == -1 && errno == EINTR); T_QUIET; T_ASSERT_POSIX_SUCCESS(bytes_wr, "write"); T_QUIET; T_ASSERT_EQ(bytes_wr, (ssize_t)EXPECTED_LEN, - "wrote enough bytes"); + "wrote enough bytes"); break; case INCREMENTAL_WRITE: - for (unsigned int i = 0; i < EXPECTED_LEN ; i++) { + for (unsigned int i = 0; i < EXPECTED_LEN; i++) { T_QUIET; T_ASSERT_POSIX_SUCCESS(write(shared.wr_fd, - &(EXPECTED_STRING[i]), 1), NULL); + &(EXPECTED_STRING[i]), 1), NULL); usleep(INCREMENTAL_WRITE_SLEEP_USECS); } break; @@ -385,7 +385,7 @@ write_to_fd(void * __unused ctx) int changes = 1; T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &shared.wr_finished, SYNC_POLICY_FIFO, 0), - "semaphore_create shared.wr_finished"); + "semaphore_create shared.wr_finished"); T_QUIET; T_ASSERT_NE_UINT(shared.wr_finished, (unsigned)MACH_PORT_NULL, "wr_finished semaphore_create"); @@ -394,19 +394,19 @@ write_to_fd(void * __unused ctx) T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_kevent(workqueue_fn, workqueue_write_fn, 0, 0), NULL); struct kevent_qos_s events[] = {{ - .ident = (uint64_t)shared.wr_fd, - .filter = EVFILT_WRITE, - .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, - .fflags = NOTE_LOWAT, - .data = 1, - .qos = (int32_t)_pthread_qos_class_encode(EXPECTED_QOS, - 0, 0) - }}; + .ident = (uint64_t)shared.wr_fd, + .filter = EVFILT_WRITE, + .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, + .fflags = NOTE_LOWAT, + .data = 1, + .qos = (int32_t)_pthread_qos_class_encode(EXPECTED_QOS, + 0, 0) + }}; for (;;) { int kev = kevent_qos(-1, changes == 0 ? NULL : events, changes, - events, 1, NULL, NULL, - KEVENT_FLAG_WORKQ | KEVENT_FLAG_ERROR_EVENTS); + events, 1, NULL, NULL, + KEVENT_FLAG_WORKQ | KEVENT_FLAG_ERROR_EVENTS); if (kev == -1 && errno == EINTR) { changes = 0; T_LOG("kevent_qos was interrupted"); @@ -423,29 +423,29 @@ write_to_fd(void * __unused ctx) dispatch_source_t write_src; T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &shared.wr_finished, SYNC_POLICY_FIFO, 0), - "semaphore_create shared.wr_finished"); + "semaphore_create shared.wr_finished"); T_QUIET; T_ASSERT_NE_UINT(shared.wr_finished, (unsigned)MACH_PORT_NULL, "semaphore_create"); write_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE, - (uintptr_t)shared.wr_fd, 0, NULL); + (uintptr_t)shared.wr_fd, 0, NULL); T_QUIET; T_ASSERT_NOTNULL(write_src, - "dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE ...)"); + "dispatch_source_create(DISPATCH_SOURCE_TYPE_WRITE ...)"); dispatch_block_t handler = dispatch_block_create_with_qos_class( - DISPATCH_BLOCK_ENFORCE_QOS_CLASS, EXPECTED_QOS, 0, ^{ - // T_MAYFAIL; - // T_QUIET; T_ASSERT_EFFECTIVE_QOS_EQ(EXPECTED_QOS, - // "write handler block should run at correct QoS"); - if (!handle_writing(shared.fd_pair, shared.wr_fd)) { - /* finished handling the fd, tear down the source */ - dispatch_source_cancel(write_src); - dispatch_release(write_src); - T_LOG("signal shared.wr_finished"); - semaphore_signal(shared.wr_finished); - } - }); + DISPATCH_BLOCK_ENFORCE_QOS_CLASS, EXPECTED_QOS, 0, ^{ + // T_MAYFAIL; + // T_QUIET; T_ASSERT_EFFECTIVE_QOS_EQ(EXPECTED_QOS, + // "write handler block should run at correct QoS"); + if (!handle_writing(shared.fd_pair, shared.wr_fd)) { + /* finished handling the fd, tear down the source */ + dispatch_source_cancel(write_src); + dispatch_release(write_src); + T_LOG("signal shared.wr_finished"); + semaphore_signal(shared.wr_finished); + } + }); dispatch_source_set_event_handler(write_src, handler); dispatch_activate(write_src); @@ -502,7 +502,7 @@ handle_reading(enum fd_pair fd_pair, int fd) T_QUIET; T_ASSERT_POSIX_SUCCESS(bytes_rd, "reading from file"); T_QUIET; T_ASSERT_LE(bytes_rd, (ssize_t)EXPECTED_LEN, - "read too much from file"); + "read too much from file"); if (bytes_rd == 0) { T_LOG("read EOF from file"); @@ -511,16 +511,15 @@ handle_reading(enum fd_pair fd_pair, int fd) read_buf[bytes_rd] = '\0'; strlcpy(&(final_string[final_length]), read_buf, - sizeof(final_string) - final_length); + sizeof(final_string) - final_length); final_length += (size_t)bytes_rd; T_QUIET; T_ASSERT_LE(final_length, EXPECTED_LEN, - "should not read more from file than what can be sent"); + "should not read more from file than what can be sent"); /* FIFOs don't send EOF when the write side closes */ if (final_length == strlen(EXPECTED_STRING) && - (fd_pair == FIFO_PAIR)) - { + (fd_pair == FIFO_PAIR)) { T_LOG("read all expected bytes from FIFO"); return false; } @@ -532,7 +531,7 @@ workqueue_read_fn(void ** __unused buf, int * __unused count) { // T_MAYFAIL; // T_QUIET; T_ASSERT_EFFECTIVE_QOS_EQ(EXPECTED_QOS, - // "reader thread should be requested at correct QoS"); + // "reader thread should be requested at correct QoS"); if (!handle_reading(shared.fd_pair, shared.rd_fd)) { T_LOG("signal shared.rd_finished"); semaphore_signal(shared.rd_finished); @@ -556,7 +555,7 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) if (!(fd_flags & O_NONBLOCK)) { T_QUIET; T_ASSERT_POSIX_SUCCESS(fcntl(fd, F_SETFL, - fd_flags | O_NONBLOCK), NULL); + fd_flags | O_NONBLOCK), NULL); } switch (mode) { @@ -569,13 +568,13 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) int pol = poll(fds, 1, READ_TIMEOUT_SECS * 1000); T_QUIET; T_ASSERT_POSIX_SUCCESS(pol, "poll"); T_QUIET; T_ASSERT_NE(pol, 0, - "poll should not time out after %d seconds, read %zd out " - "of %zu bytes", - READ_TIMEOUT_SECS, final_length, strlen(EXPECTED_STRING)); + "poll should not time out after %d seconds, read %zd out " + "of %zu bytes", + READ_TIMEOUT_SECS, final_length, strlen(EXPECTED_STRING)); T_QUIET; T_ASSERT_FALSE(fds[0].revents & POLLERR, - "should not see an error on the device"); + "should not see an error on the device"); T_QUIET; T_ASSERT_FALSE(fds[0].revents & POLLNVAL, - "should not set up an invalid poll"); + "should not set up an invalid poll"); if (!handle_reading(fd_pair, fd)) { break; @@ -597,7 +596,7 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) FD_ZERO(&err_fd); FD_SET(fd, &err_fd); - int sel = select(fd + 1, &read_fd, NULL, NULL/*&err_fd*/, &tv); + int sel = select(fd + 1, &read_fd, NULL, NULL /*&err_fd*/, &tv); if (sel == -1 && errno == EINTR) { T_LOG("select interrupted"); continue; @@ -607,12 +606,12 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) T_QUIET; T_ASSERT_POSIX_SUCCESS(sel, "select"); T_QUIET; T_ASSERT_NE(sel, 0, - "select waited for %d seconds and timed out", - READ_TIMEOUT_SECS); + "select waited for %d seconds and timed out", + READ_TIMEOUT_SECS); /* didn't fail or time out, therefore data is ready */ T_QUIET; T_ASSERT_NE(FD_ISSET(fd, &read_fd), 0, - "select should show reading fd as readable"); + "select should show reading fd as readable"); if (!handle_reading(fd_pair, fd)) { break; @@ -632,29 +631,29 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) // prohibit ourselves from going multi-threaded see:rdar://33296008 _dispatch_prohibit_transition_to_multithreaded(true); T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_kevent( - workqueue_fn, workqueue_read_fn, 0, 0), NULL); + workqueue_fn, workqueue_read_fn, 0, 0), NULL); T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &shared.rd_finished, SYNC_POLICY_FIFO, 0), - "semaphore_create shared.rd_finished"); + "semaphore_create shared.rd_finished"); T_QUIET; T_ASSERT_NE_UINT(shared.rd_finished, (unsigned)MACH_PORT_NULL, "semaphore_create"); int changes = 1; struct kevent_qos_s events[] = {{ - .ident = (uint64_t)shared.rd_fd, - .filter = EVFILT_READ, - .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, - .fflags = NOTE_LOWAT, - .data = 1, - .qos = (int32_t)_pthread_qos_class_encode(EXPECTED_QOS, - 0, 0) - }}; + .ident = (uint64_t)shared.rd_fd, + .filter = EVFILT_READ, + .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, + .fflags = NOTE_LOWAT, + .data = 1, + .qos = (int32_t)_pthread_qos_class_encode(EXPECTED_QOS, + 0, 0) + }}; for (;;) { int kev = kevent_qos(-1, changes == 0 ? NULL : events, changes, - events, 1, NULL, NULL, - KEVENT_FLAG_WORKQ | KEVENT_FLAG_ERROR_EVENTS); + events, 1, NULL, NULL, + KEVENT_FLAG_WORKQ | KEVENT_FLAG_ERROR_EVENTS); if (kev == -1 && errno == EINTR) { changes = 0; T_LOG("kevent_qos was interrupted"); @@ -673,30 +672,30 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) dispatch_source_t read_src; T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &shared.rd_finished, SYNC_POLICY_FIFO, 0), - "semaphore_create shared.rd_finished"); + "semaphore_create shared.rd_finished"); T_QUIET; T_ASSERT_NE_UINT(shared.rd_finished, (unsigned)MACH_PORT_NULL, "semaphore_create"); read_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, - (uintptr_t)fd, 0, NULL); + (uintptr_t)fd, 0, NULL); T_QUIET; T_ASSERT_NOTNULL(read_src, - "dispatch_source_create(DISPATCH_SOURCE_TYPE_READ)"); + "dispatch_source_create(DISPATCH_SOURCE_TYPE_READ)"); dispatch_block_t handler = dispatch_block_create_with_qos_class( - DISPATCH_BLOCK_ENFORCE_QOS_CLASS, EXPECTED_QOS, 0, ^{ - // T_MAYFAIL; - // T_QUIET; T_ASSERT_EFFECTIVE_QOS_EQ(EXPECTED_QOS, - // "read handler block should run at correct QoS"); - - if (!handle_reading(fd_pair, fd)) { - /* finished handling the fd, tear down the source */ - dispatch_source_cancel(read_src); - dispatch_release(read_src); - T_LOG("signal shared.rd_finished"); - semaphore_signal(shared.rd_finished); - } - }); + DISPATCH_BLOCK_ENFORCE_QOS_CLASS, EXPECTED_QOS, 0, ^{ + // T_MAYFAIL; + // T_QUIET; T_ASSERT_EFFECTIVE_QOS_EQ(EXPECTED_QOS, + // "read handler block should run at correct QoS"); + + if (!handle_reading(fd_pair, fd)) { + /* finished handling the fd, tear down the source */ + dispatch_source_cancel(read_src); + dispatch_release(read_src); + T_LOG("signal shared.rd_finished"); + semaphore_signal(shared.rd_finished); + } + }); dispatch_source_set_event_handler(read_src, handler); dispatch_activate(read_src); @@ -721,7 +720,7 @@ read_from_fd(int fd, enum fd_pair fd_pair, enum read_mode mode) } T_EXPECT_EQ_STR(final_string, EXPECTED_STRING, - "reader should receive valid string"); + "reader should receive valid string"); T_QUIET; T_ASSERT_POSIX_SUCCESS(close(fd), NULL); } @@ -733,7 +732,7 @@ fd_pair_init(enum fd_pair fd_pair, int *rd_fd, int *wr_fd) switch (fd_pair) { case PTY_PAIR: T_ASSERT_POSIX_SUCCESS(openpty(rd_fd, wr_fd, NULL, NULL, NULL), - NULL); + NULL); break; case FIFO_PAIR: { @@ -741,7 +740,7 @@ fd_pair_init(enum fd_pair fd_pair, int *rd_fd, int *wr_fd) T_QUIET; T_ASSERT_NOTNULL(mktemp(fifo_path), NULL); T_ASSERT_POSIX_SUCCESS(mkfifo(fifo_path, 0700), "mkfifo(%s, 0700)", - fifo_path); + fifo_path); /* * Opening the read side of a pipe will block until the write * side opens -- use O_NONBLOCK. @@ -764,7 +763,7 @@ fd_pair_init(enum fd_pair fd_pair, int *rd_fd, int *wr_fd) case SOCKET_PAIR: { int sock_fds[2]; T_ASSERT_POSIX_SUCCESS(socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds), - NULL); + NULL); *rd_fd = sock_fds[0]; *wr_fd = sock_fds[1]; break; @@ -783,7 +782,7 @@ fd_pair_init(enum fd_pair fd_pair, int *rd_fd, int *wr_fd) static void drive_threads(enum fd_pair fd_pair, enum read_mode rd_mode, - enum write_mode wr_mode) + enum write_mode wr_mode) { pthread_t thread; @@ -794,11 +793,11 @@ drive_threads(enum fd_pair fd_pair, enum read_mode rd_mode, shared.wr_kind = THREAD_WRITER; T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &shared.wr_wait.sem, SYNC_POLICY_FIFO, 0), - "semaphore_create shared.wr_wait.sem"); + "semaphore_create shared.wr_wait.sem"); T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, write_to_fd, NULL), - NULL); + NULL); T_LOG("created writer thread"); read_from_fd(shared.rd_fd, fd_pair, rd_mode); @@ -852,69 +851,69 @@ T_HELPER_DECL(writer_helper, "Write asynchronously") #pragma mark tests #define WR_DECL_PROCESSES(desc_name, fd_pair, write_name, write_str, \ - write_mode, read_name, read_mode) \ - T_DECL(desc_name##_r##read_name##_w##write_name##_procs, "read changes to a " \ - #desc_name " with " #read_name " and writing " #write_str \ - " across two processes") \ - { \ - drive_processes(fd_pair, read_mode, write_mode); \ - } + write_mode, read_name, read_mode) \ + T_DECL(desc_name##_r##read_name##_w##write_name##_procs, "read changes to a " \ + #desc_name " with " #read_name " and writing " #write_str \ + " across two processes") \ + { \ + drive_processes(fd_pair, read_mode, write_mode); \ + } #define WR_DECL_THREADS(desc_name, fd_pair, write_name, write_str, \ - write_mode, read_name, read_mode) \ - T_DECL(desc_name##_r##read_name##_w##write_name##_thds, "read changes to a " \ - #desc_name " with " #read_name " and writing " #write_str) \ - { \ - drive_threads(fd_pair, read_mode, write_mode); \ - } + write_mode, read_name, read_mode) \ + T_DECL(desc_name##_r##read_name##_w##write_name##_thds, "read changes to a " \ + #desc_name " with " #read_name " and writing " #write_str) \ + { \ + drive_threads(fd_pair, read_mode, write_mode); \ + } #define WR_DECL(desc_name, fd_pair, write_name, write_str, write_mode, \ - read_name, read_mode) \ - WR_DECL_PROCESSES(desc_name, fd_pair, write_name, write_str, \ - write_mode, read_name, read_mode) \ - WR_DECL_THREADS(desc_name, fd_pair, write_name, write_str, \ - write_mode, read_name, read_mode) + read_name, read_mode) \ + WR_DECL_PROCESSES(desc_name, fd_pair, write_name, write_str, \ + write_mode, read_name, read_mode) \ + WR_DECL_THREADS(desc_name, fd_pair, write_name, write_str, \ + write_mode, read_name, read_mode) #define RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ - WR_DECL(desc_name, fd_pair, full, "the full string", FULL_WRITE, \ - read_name, read_mode) \ - WR_DECL(desc_name, fd_pair, inc, "incrementally", \ - INCREMENTAL_WRITE, read_name, read_mode) + WR_DECL(desc_name, fd_pair, full, "the full string", FULL_WRITE, \ + read_name, read_mode) \ + WR_DECL(desc_name, fd_pair, inc, "incrementally", \ + INCREMENTAL_WRITE, read_name, read_mode) #define RD_DECL_DISPATCH_ONLY(suffix, desc_name, fd_pair, read_name, \ - read_mode) \ - WR_DECL##suffix(desc_name, fd_pair, inc_dispatch, \ - "incrementally with a dispatch source", \ - DISPATCH_INCREMENTAL_WRITE, read_name, read_mode) + read_mode) \ + WR_DECL##suffix(desc_name, fd_pair, inc_dispatch, \ + "incrementally with a dispatch source", \ + DISPATCH_INCREMENTAL_WRITE, read_name, read_mode) #define RD_DECL_WORKQ_ONLY(suffix, desc_name, fd_pair, read_name, \ - read_mode) \ - WR_DECL##suffix(desc_name, fd_pair, inc_workq, \ - "incrementally with the workqueue", \ - WORKQ_INCREMENTAL_WRITE, read_name, read_mode) + read_mode) \ + WR_DECL##suffix(desc_name, fd_pair, inc_workq, \ + "incrementally with the workqueue", \ + WORKQ_INCREMENTAL_WRITE, read_name, read_mode) #define RD_DECL(desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_DISPATCH_ONLY(, desc_name, fd_pair, read_name, read_mode) - // RD_DECL_WORKQ_ONLY(, desc_name, fd_pair, read_name, read_mode) + RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ + RD_DECL_DISPATCH_ONLY(, desc_name, fd_pair, read_name, read_mode) +// RD_DECL_WORKQ_ONLY(, desc_name, fd_pair, read_name, read_mode) /* * dispatch_source tests cannot share the same process as other workqueue * tests. */ #define RD_DECL_DISPATCH(desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_DISPATCH_ONLY(, desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_WORKQ_ONLY(_PROCESSES, desc_name, fd_pair, read_name, \ - read_mode) + RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ + RD_DECL_DISPATCH_ONLY(, desc_name, fd_pair, read_name, read_mode) \ + RD_DECL_WORKQ_ONLY(_PROCESSES, desc_name, fd_pair, read_name, \ + read_mode) /* * Workqueue tests cannot share the same process as other workqueue or * dispatch_source tests. -#define RD_DECL_WORKQ(desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ - RD_DECL_DISPATCH_ONLY(_PROCESSES, desc_name, fd_pair, read_name, \ - read_mode) \ - RD_DECL_WORKQ_ONLY(_PROCESSES, desc_name, fd_pair, read_name, \ - read_mode) + #define RD_DECL_WORKQ(desc_name, fd_pair, read_name, read_mode) \ + * RD_DECL_SAFE(desc_name, fd_pair, read_name, read_mode) \ + * RD_DECL_DISPATCH_ONLY(_PROCESSES, desc_name, fd_pair, read_name, \ + * read_mode) \ + * RD_DECL_WORKQ_ONLY(_PROCESSES, desc_name, fd_pair, read_name, \ + * read_mode) */ #define PAIR_DECL(desc_name, fd_pair) \ @@ -924,7 +923,7 @@ T_HELPER_DECL(writer_helper, "Write asynchronously") RD_DECL(desc_name, fd_pair, kevent64, KEVENT64_READ) \ RD_DECL(desc_name, fd_pair, kevent_qos, KEVENT_QOS_READ) \ RD_DECL_DISPATCH(desc_name, fd_pair, dispatch_source, DISPATCH_READ) - // RD_DECL_WORKQ(desc_name, fd_pair, workq, WORKQ_READ) +// RD_DECL_WORKQ(desc_name, fd_pair, workq, WORKQ_READ) PAIR_DECL(tty, PTY_PAIR) PAIR_DECL(pipe, PIPE_PAIR) diff --git a/tests/port_descriptions.c b/tests/port_descriptions.c index a42ab29be..3f1f96ea3 100644 --- a/tests/port_descriptions.c +++ b/tests/port_descriptions.c @@ -30,22 +30,22 @@ static void expect_special_port_description(const char *(*fn)(mach_port_t), - mach_port_t port, const char *namestr) + mach_port_t port, const char *namestr) { const char *desc = fn(port); T_EXPECT_NOTNULL(desc, "%s is %s", namestr, desc); if (desc) { T_QUIET; T_EXPECT_GT(strlen(desc), strlen(""), - "%s's description string is not empty", namestr); + "%s's description string is not empty", namestr); } } T_DECL(host_special_port_descriptions, - "verify that host special ports can be described") + "verify that host special ports can be described") { #define TEST_HSP(portdef) \ - expect_special_port_description(mach_host_special_port_description, \ - portdef, #portdef) + expect_special_port_description(mach_host_special_port_description, \ + portdef, #portdef) TEST_HSP(HOST_PORT); TEST_HSP(HOST_PRIV_PORT); @@ -76,20 +76,20 @@ T_DECL(host_special_port_descriptions, #undef TEST_HSP T_EXPECT_EQ(HOST_SYSPOLICYD_PORT, HOST_MAX_SPECIAL_PORT, - "checked all of the ports"); + "checked all of the ports"); const char *invalid_hsp = - mach_host_special_port_description(HOST_MAX_SPECIAL_PORT + 1); + mach_host_special_port_description(HOST_MAX_SPECIAL_PORT + 1); T_EXPECT_NULL(invalid_hsp, - "invalid host special port description should be NULL"); + "invalid host special port description should be NULL"); } T_DECL(task_special_port_descriptions, - "verify that task special ports can be described") + "verify that task special ports can be described") { #define TEST_TSP(portdef) \ - expect_special_port_description(mach_task_special_port_description, \ - portdef, #portdef) + expect_special_port_description(mach_task_special_port_description, \ + portdef, #portdef) TEST_TSP(TASK_KERNEL_PORT); TEST_TSP(TASK_HOST_PORT); @@ -103,12 +103,12 @@ T_DECL(task_special_port_descriptions, #undef TEST_TSP T_EXPECT_EQ(TASK_RESOURCE_NOTIFY_PORT, TASK_MAX_SPECIAL_PORT, - "checked all of the ports"); + "checked all of the ports"); const char *invalid_tsp = - mach_task_special_port_description(TASK_MAX_SPECIAL_PORT + 1); + mach_task_special_port_description(TASK_MAX_SPECIAL_PORT + 1); T_EXPECT_NULL(invalid_tsp, - "invalid task special port description should be NULL"); + "invalid task special port description should be NULL"); } static void @@ -120,11 +120,11 @@ expect_special_port_id(int (*fn)(const char *id), int port, const char *portid) } T_DECL(host_special_port_mapping, - "verify that task special port names can be mapped to numbers") + "verify that task special port names can be mapped to numbers") { #define TEST_HSP(portdef) \ - expect_special_port_id(mach_host_special_port_for_id, \ - portdef, #portdef) + expect_special_port_id(mach_host_special_port_for_id, \ + portdef, #portdef) TEST_HSP(HOST_PORT); TEST_HSP(HOST_PRIV_PORT); @@ -156,15 +156,15 @@ T_DECL(host_special_port_mapping, int invalid_tsp = mach_host_special_port_for_id("BOGUS_SPECIAL_PORT_NAME"); T_EXPECT_EQ(invalid_tsp, -1, - "invalid host special port IDs should return -1"); + "invalid host special port IDs should return -1"); } T_DECL(task_special_port_mapping, - "verify that task special port names can be mapped to numbers") + "verify that task special port names can be mapped to numbers") { #define TEST_TSP(portdef) \ - expect_special_port_id(mach_task_special_port_for_id, \ - portdef, #portdef) + expect_special_port_id(mach_task_special_port_for_id, \ + portdef, #portdef) TEST_TSP(TASK_KERNEL_PORT); TEST_TSP(TASK_HOST_PORT); @@ -179,5 +179,5 @@ T_DECL(task_special_port_mapping, int invalid_tsp = mach_task_special_port_for_id("BOGUS_SPECIAL_PORT_NAME"); T_EXPECT_EQ(invalid_tsp, -1, - "invalid task special port IDs should return -1"); + "invalid task special port IDs should return -1"); } diff --git a/tests/proc_core_name_24152432.c b/tests/proc_core_name_24152432.c index 11317c629..773735dea 100644 --- a/tests/proc_core_name_24152432.c +++ b/tests/proc_core_name_24152432.c @@ -21,7 +21,7 @@ static const char corefile_ctl[] = "kern.corefile"; static const char coredump_ctl[] = "kern.coredump"; /* The directory where coredumps will be */ -static const char dump_dir[] = "/cores"; +static const char dump_dir[] = "/cores"; /* The default coredump location if the kern.coredump ctl is invalid */ static const char default_dump_fmt[] = "/cores/core.%d"; /* The coredump location when we set kern.coredump ctl to something valid */ @@ -48,14 +48,16 @@ static const struct timespec timeout = { #if TARGET_OS_OSX static int fork_and_wait_for_segfault(void); -static void sigalrm_handler(int sig) +static void +sigalrm_handler(int sig) { (void)sig; stop_looking = 1; return; } -static void list_coredump_files() +static void +list_coredump_files() { int ret; char buf[BUFFLEN] = { 0 }; @@ -67,7 +69,9 @@ static void list_coredump_files() return; } -static int fork_and_wait_for_segfault() { +static int +fork_and_wait_for_segfault() +{ int pid, ret; pid = fork(); if (pid == 0) { @@ -82,7 +86,8 @@ static int fork_and_wait_for_segfault() { return pid; } -static int setup_coredump_kevent(struct kevent *kev, int dir) +static int +setup_coredump_kevent(struct kevent *kev, int dir) { int ret; int kqfd; @@ -96,7 +101,8 @@ static int setup_coredump_kevent(struct kevent *kev, int dir) return kqfd; } -static void look_for_coredump(const char *format, int pid, int kqfd, struct kevent *kev) +static void +look_for_coredump(const char *format, int pid, int kqfd, struct kevent *kev) { int ret = 0; int i = 0; @@ -115,10 +121,11 @@ static void look_for_coredump(const char *format, int pid, int kqfd, struct keve snprintf(buf, BUFFLEN, format, pid); ret = remove(buf); - if (ret != -1) + if (ret != -1) { break; + } - T_LOG("Couldn't find coredump file (try #%d).", i+1); + T_LOG("Couldn't find coredump file (try #%d).", i + 1); i++; } alarm(0); @@ -130,7 +137,8 @@ static void look_for_coredump(const char *format, int pid, int kqfd, struct keve T_ASSERT_POSIX_SUCCESS(ret, "Removing coredump file (should be at %s)", buf); } -static void sysctl_enable_coredumps(void) +static void +sysctl_enable_coredumps(void) { int ret; int enable_core_dump = 1; diff --git a/tests/proc_info.c b/tests/proc_info.c index cb5799d29..8502f2d87 100644 --- a/tests/proc_info.c +++ b/tests/proc_info.c @@ -55,7 +55,8 @@ #define CONF_OPN_FILE_COUNT 3 #define CONF_TMP_FILE_PFX "/tmp/xnu.tests.proc_info." -static int CONF_TMP_FILE_OPEN(char path[PATH_MAX]) +static int +CONF_TMP_FILE_OPEN(char path[PATH_MAX]) { static char stmp_path[PATH_MAX] = {}; char *nm; @@ -75,43 +76,43 @@ uint32_t get_tty_dev(void); #define WAIT_FOR_CHILDREN(pipefd, action, child_count) \ do { \ - long ret; \ - if (child_count == 1) { \ - int child_ret_action = 999; \ - while (child_ret_action != action) { \ - ret = read(pipefd, &child_ret_action, sizeof(child_ret_action)); \ - } \ - } else { \ - int child_ready_count = child_count * (int)sizeof(action); \ + long ret; \ + if (child_count == 1) { \ + int child_ret_action = 999; \ + while (child_ret_action != action) { \ + ret = read(pipefd, &child_ret_action, sizeof(child_ret_action)); \ + } \ + } else { \ + int child_ready_count = child_count * (int)sizeof(action); \ \ - action = 0; \ - while (child_ready_count) { \ - ret = read(pipefd, &action, (int)sizeof(action)); \ - if (ret != -1) { \ - child_ready_count -= ret; \ - } else { \ - T_FAIL("ERROR: Could not read from pipe() : %d", errno); \ - } \ - if (action) { \ - T_FAIL("ERROR: Child action failed with error %d", action); \ - } \ - } \ - } \ + action = 0; \ + while (child_ready_count) { \ + ret = read(pipefd, &action, (int)sizeof(action)); \ + if (ret != -1) { \ + child_ready_count -= ret; \ + } else { \ + T_FAIL("ERROR: Could not read from pipe() : %d", errno); \ + } \ + if (action) { \ + T_FAIL("ERROR: Child action failed with error %d", action); \ + } \ + } \ + } \ } while (0) #define PROC_INFO_CALL(struct_name, pid, flavor, proc_arg) \ do { \ - struct struct_name * struct_var = malloc(sizeof(struct struct_name)); \ - T_QUIET; \ - T_ASSERT_NOTNULL(struct_var, "malloc() for " #flavor); \ - retval = __proc_info(PROC_INFO_CALL_PIDINFO, pid, flavor, (uint64_t)proc_arg, (user_addr_t)struct_var, \ - (uint32_t)sizeof(struct struct_name)); \ + struct struct_name * struct_var = malloc(sizeof(struct struct_name)); \ + T_QUIET; \ + T_ASSERT_NOTNULL(struct_var, "malloc() for " #flavor); \ + retval = __proc_info(PROC_INFO_CALL_PIDINFO, pid, flavor, (uint64_t)proc_arg, (user_addr_t)struct_var, \ + (uint32_t)sizeof(struct struct_name)); \ \ - T_QUIET; \ - T_EXPECT_POSIX_SUCCESS(retval, "__proc_info call for " #flavor); \ - T_ASSERT_EQ_INT(retval, (int)sizeof(struct struct_name), "__proc_info call for " #flavor); \ - ret_structs[i] = (void *)struct_var; \ - i++; \ + T_QUIET; \ + T_EXPECT_POSIX_SUCCESS(retval, "__proc_info call for " #flavor); \ + T_ASSERT_EQ_INT(retval, (int)sizeof(struct struct_name), "__proc_info call for " #flavor); \ + ret_structs[i] = (void *)struct_var; \ + i++; \ } while (0) uint32_t @@ -119,7 +120,7 @@ get_tty_dev() { struct stat buf; stat(ttyname(1), &buf); - return ((uint32_t)buf.st_rdev); + return (uint32_t)buf.st_rdev; } /* @@ -165,183 +166,187 @@ enum proc_info_opt { static int tmp_fd = -1; -static child_action_handler_t proc_info_listpids_handler = ^void(proc_config_t proc_config, int child_id) { - close(proc_config->parent_pipe[PIPE_IN]); - close(proc_config->child_pipe[child_id][PIPE_OUT]); - long retval = 0; - int child_action = 0; - retval = write(proc_config->parent_pipe[PIPE_OUT], &child_action, sizeof(child_action)); - if (retval != -1) { - while (child_action != ACT_EXIT) { - retval = read(proc_config->child_pipe[child_id][PIPE_IN], &child_action, sizeof(child_action)); - if (retval == 0 || (retval == -1 && errno == EAGAIN)) { - continue; - } - if (retval != -1) { - switch (child_action) { - case ACT_CHANGE_UID: - /* - * Change uid - */ - retval = setuid(CONF_UID_VAL); - break; - case ACT_CHANGE_RUID: - /* - * Change ruid - */ - retval = setreuid(CONF_RUID_VAL, (uid_t)-1); - break; - case ACT_EXIT: - /* - * Exit - */ - break; - } - } - if (child_action != ACT_EXIT) { - retval = write(proc_config->parent_pipe[PIPE_OUT], &retval, sizeof(retval)); - if (retval == -1) - break; - } - } - } - close(proc_config->parent_pipe[PIPE_OUT]); - close(proc_config->child_pipe[child_id][PIPE_IN]); - exit(0); +static child_action_handler_t proc_info_listpids_handler = ^void (proc_config_t proc_config, int child_id) { + close(proc_config->parent_pipe[PIPE_IN]); + close(proc_config->child_pipe[child_id][PIPE_OUT]); + long retval = 0; + int child_action = 0; + retval = write(proc_config->parent_pipe[PIPE_OUT], &child_action, sizeof(child_action)); + if (retval != -1) { + while (child_action != ACT_EXIT) { + retval = read(proc_config->child_pipe[child_id][PIPE_IN], &child_action, sizeof(child_action)); + if (retval == 0 || (retval == -1 && errno == EAGAIN)) { + continue; + } + if (retval != -1) { + switch (child_action) { + case ACT_CHANGE_UID: + /* + * Change uid + */ + retval = setuid(CONF_UID_VAL); + break; + case ACT_CHANGE_RUID: + /* + * Change ruid + */ + retval = setreuid(CONF_RUID_VAL, (uid_t)-1); + break; + case ACT_EXIT: + /* + * Exit + */ + break; + } + } + if (child_action != ACT_EXIT) { + retval = write(proc_config->parent_pipe[PIPE_OUT], &retval, sizeof(retval)); + if (retval == -1) { + break; + } + } + } + } + close(proc_config->parent_pipe[PIPE_OUT]); + close(proc_config->child_pipe[child_id][PIPE_IN]); + exit(0); }; -static child_action_handler_t proc_info_call_pidinfo_handler = ^void(proc_config_t proc_config, int child_id) { - close(proc_config->parent_pipe[PIPE_IN]); - close(proc_config->child_pipe[child_id][PIPE_OUT]); - int action = 0; - long retval = 0; - int i; - void * tmp_map = NULL; - dispatch_queue_t q = NULL; - dispatch_semaphore_t sem = NULL; - /* - * PHASE 1: Child ready and waits for parent to send next action - */ - T_LOG("Child ready to accept action from parent"); - retval = write(proc_config->parent_pipe[PIPE_OUT], &action, sizeof(action)); - if (retval != -1) { - while (action != ACT_EXIT) { - retval = read(proc_config->child_pipe[child_id][PIPE_IN], &action, sizeof(action)); - - if (retval != -1) { - retval = 0; - switch (action) { - case ACT_PHASE2: { - /* - * Change uid, euid, guid, rgid, nice value - * Also change the svuid and svgid - */ - T_LOG("Child changing uid, euid, rguid, svuid, svgid and nice value"); - retval = nice(CONF_NICE_VAL); - if (retval == -1) { - T_LOG("(child) ERROR: nice() failed"); - break; - } - retval = setgid(CONF_GID_VAL); - if (retval == -1) { - T_LOG("(child) ERROR: setgid() failed"); - break; - } - retval = setreuid((uid_t)-1, CONF_RUID_VAL); - if (retval == -1) { - T_LOG("(child) ERROR: setreuid() failed"); - break; - } - break; - } - case ACT_PHASE3: { - /* - * Allocate a page of memory - * Copy on write shared memory - */ - T_LOG("Child allocating a page of memory, and causing a copy-on-write"); - retval = 0; - tmp_map = mmap(0, PAGE_SIZE, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); - if (tmp_map == MAP_FAILED) { - T_LOG("(child) ERROR: mmap() failed"); - retval = 1; - break; - } - /* - * Get the page allocated - */ - int * map_ptr = (int *)tmp_map; - for (i = 0; i < (int)(PAGE_SIZE / sizeof(int)); i++) { - *map_ptr++ = i; - } - /* - * Cause copy on write to the page - */ - *((int *)(proc_config->cow_map)) = 20; - - break; - } - case ACT_PHASE4: { - T_LOG("Child spending CPU cycles and changing thread name"); - retval = 0; - int number = 1000; - unsigned long long factorial = 1; - int j; - for (j = 1; j <= number; j++) { - factorial *= (unsigned long long)j; - } - sysctlbyname("kern.threadname", NULL, 0, CONF_THREAD_NAME, strlen(CONF_THREAD_NAME)); - break; - } - case ACT_PHASE5: { - /* - * Dispatch for Workq test - */ - T_LOG("Child creating a dispatch queue, and dispatching blocks on it"); - q = dispatch_queue_create("com.apple.test_proc_info.workqtest", - DISPATCH_QUEUE_CONCURRENT); // dispatch_get_global_queue(0, 0); - sem = dispatch_semaphore_create(0); - - for (i = 0; i < CONF_NUM_THREADS; i++) { - dispatch_async(q, ^{ - /* - * Block the thread, do nothing - */ - dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); - }); - } - break; - } - case ACT_EXIT: { - /* - * Exit - */ - if (sem) { - for (i = 0; i < CONF_NUM_THREADS; i++) { - dispatch_semaphore_signal(sem); - } - } - - if (tmp_map) - munmap(tmp_map, PAGE_SIZE); - - if (proc_config->cow_map) - munmap(proc_config->cow_map, PAGE_SIZE); - - break; - } - } - } - if (action != ACT_EXIT) { - retval = write(proc_config->parent_pipe[PIPE_OUT], &action, sizeof(action)); - if (retval == -1) - break; - } - } - close(proc_config->parent_pipe[PIPE_OUT]); - close(proc_config->child_pipe[child_id][PIPE_IN]); - exit(0); - } +static child_action_handler_t proc_info_call_pidinfo_handler = ^void (proc_config_t proc_config, int child_id) { + close(proc_config->parent_pipe[PIPE_IN]); + close(proc_config->child_pipe[child_id][PIPE_OUT]); + int action = 0; + long retval = 0; + int i; + void * tmp_map = NULL; + dispatch_queue_t q = NULL; + dispatch_semaphore_t sem = NULL; + /* + * PHASE 1: Child ready and waits for parent to send next action + */ + T_LOG("Child ready to accept action from parent"); + retval = write(proc_config->parent_pipe[PIPE_OUT], &action, sizeof(action)); + if (retval != -1) { + while (action != ACT_EXIT) { + retval = read(proc_config->child_pipe[child_id][PIPE_IN], &action, sizeof(action)); + + if (retval != -1) { + retval = 0; + switch (action) { + case ACT_PHASE2: { + /* + * Change uid, euid, guid, rgid, nice value + * Also change the svuid and svgid + */ + T_LOG("Child changing uid, euid, rguid, svuid, svgid and nice value"); + retval = nice(CONF_NICE_VAL); + if (retval == -1) { + T_LOG("(child) ERROR: nice() failed"); + break; + } + retval = setgid(CONF_GID_VAL); + if (retval == -1) { + T_LOG("(child) ERROR: setgid() failed"); + break; + } + retval = setreuid((uid_t)-1, CONF_RUID_VAL); + if (retval == -1) { + T_LOG("(child) ERROR: setreuid() failed"); + break; + } + break; + } + case ACT_PHASE3: { + /* + * Allocate a page of memory + * Copy on write shared memory + */ + T_LOG("Child allocating a page of memory, and causing a copy-on-write"); + retval = 0; + tmp_map = mmap(0, PAGE_SIZE, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + if (tmp_map == MAP_FAILED) { + T_LOG("(child) ERROR: mmap() failed"); + retval = 1; + break; + } + /* + * Get the page allocated + */ + int * map_ptr = (int *)tmp_map; + for (i = 0; i < (int)(PAGE_SIZE / sizeof(int)); i++) { + *map_ptr++ = i; + } + /* + * Cause copy on write to the page + */ + *((int *)(proc_config->cow_map)) = 20; + + break; + } + case ACT_PHASE4: { + T_LOG("Child spending CPU cycles and changing thread name"); + retval = 0; + int number = 1000; + unsigned long long factorial = 1; + int j; + for (j = 1; j <= number; j++) { + factorial *= (unsigned long long)j; + } + sysctlbyname("kern.threadname", NULL, 0, CONF_THREAD_NAME, strlen(CONF_THREAD_NAME)); + break; + } + case ACT_PHASE5: { + /* + * Dispatch for Workq test + */ + T_LOG("Child creating a dispatch queue, and dispatching blocks on it"); + q = dispatch_queue_create("com.apple.test_proc_info.workqtest", + DISPATCH_QUEUE_CONCURRENT); // dispatch_get_global_queue(0, 0); + sem = dispatch_semaphore_create(0); + + for (i = 0; i < CONF_NUM_THREADS; i++) { + dispatch_async(q, ^{ + /* + * Block the thread, do nothing + */ + dispatch_semaphore_wait(sem, DISPATCH_TIME_FOREVER); + }); + } + break; + } + case ACT_EXIT: { + /* + * Exit + */ + if (sem) { + for (i = 0; i < CONF_NUM_THREADS; i++) { + dispatch_semaphore_signal(sem); + } + } + + if (tmp_map) { + munmap(tmp_map, PAGE_SIZE); + } + + if (proc_config->cow_map) { + munmap(proc_config->cow_map, PAGE_SIZE); + } + + break; + } + } + } + if (action != ACT_EXIT) { + retval = write(proc_config->parent_pipe[PIPE_OUT], &action, sizeof(action)); + if (retval == -1) { + break; + } + } + } + close(proc_config->parent_pipe[PIPE_OUT]); + close(proc_config->child_pipe[child_id][PIPE_IN]); + exit(0); + } }; static void @@ -452,6 +457,7 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) int retval, i = 0; uint64_t * thread_addr = NULL; void * map_tmp = NULL; + static char tmp_path[PATH_MAX] = {}; proc_config_t proc_config = spawn_child_processes(1, proc_info_call_pidinfo_handler); int child_pid = proc_config->child_pids[0]; @@ -522,7 +528,7 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) T_ASSERT_NOTNULL(pall, "malloc() for PROC_TASKALLINFO"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDTASKALLINFO, (uint32_t)0, (user_addr_t)pall, - (uint32_t)sizeof(struct proc_taskallinfo)); + (uint32_t)sizeof(struct proc_taskallinfo)); T_QUIET; T_ASSERT_EQ_INT(retval, (int)sizeof(struct proc_taskallinfo), "__proc_info call for PROC_PIDTASKALLINFO in THREAD_ADDR"); @@ -532,11 +538,11 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) T_ASSERT_NOTNULL(thread_addr, "malloc() for PROC_PIDLISTTHREADS"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDLISTTHREADS, (uint32_t)0, (user_addr_t)thread_addr, - (int32_t)(sizeof(uint64_t) * (unsigned long)(pall->ptinfo.pti_threadnum + 1))); + (int32_t)(sizeof(uint64_t) * (unsigned long)(pall->ptinfo.pti_threadnum + 1))); T_LOG("(int)((unsigned long)retval / PROC_PIDLISTTHREADS_SIZE: %d", - (int)((unsigned long)retval / PROC_PIDLISTTHREADS_SIZE)); + (int)((unsigned long)retval / PROC_PIDLISTTHREADS_SIZE)); T_ASSERT_GE_INT((int)((unsigned long)retval / PROC_PIDLISTTHREADS_SIZE), pall->ptinfo.pti_threadnum, - "__proc_info call for PROC_PIDLISTTHREADS"); + "__proc_info call for PROC_PIDLISTTHREADS"); free(pall); } @@ -574,7 +580,7 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) T_ASSERT_MACH_SUCCESS(retval, "thread_info call for PROC_PIDTHREADID64INFO"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDTHREADID64INFO, (uint64_t)child_thread_threadinfo.thread_id, - (user_addr_t)pthinfo_64, (uint32_t)sizeof(struct proc_threadinfo)); + (user_addr_t)pthinfo_64, (uint32_t)sizeof(struct proc_threadinfo)); T_ASSERT_EQ_INT(retval, (int)sizeof(struct proc_threadinfo), "__proc_info call for PROC_PIDTHREADID64INFO"); ret_structs[i] = (void *)pthinfo_64; @@ -596,7 +602,6 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) vm_map_size_t map_tmp_sz = 0; if ((proc_info_opts & PREGINFO) | (proc_info_opts & PREGINFO_PATH) | (proc_info_opts & PREGINFO_PATH_2) | (proc_info_opts & PREGINFO_PATH_3)) { - static char tmp_path[PATH_MAX] = {}; tmp_fd = CONF_TMP_FILE_OPEN(tmp_path); /* @@ -634,7 +639,7 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) T_ASSERT_NE_PTR(map_tmp, MAP_FAILED, "mmap() for PROC_PIDREGIONINFO"); T_LOG("file: %s is opened as fd %d and mapped at %llx with size %lu", tmp_path, tmp_fd, (uint64_t)map_tmp, - (unsigned long)PAGE_SIZE); + (unsigned long)PAGE_SIZE); /* * unlink() the file to be nice, but do it _after_ we've @@ -675,25 +680,26 @@ proc_info_caller(int proc_info_opts, void ** ret_structs, int * ret_child_pid) struct proc_regionwithpathinfo * preginfo_path = malloc(sizeof(struct proc_regionwithpathinfo)); retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDREGIONPATHINFO2, (uint64_t)map_tmp, - (user_addr_t)preginfo_path, (uint32_t)sizeof(struct proc_regionwithpathinfo)); + (user_addr_t)preginfo_path, (uint32_t)sizeof(struct proc_regionwithpathinfo)); T_ASSERT_EQ_INT(retval, (int)sizeof(struct proc_regionwithpathinfo), "__proc_info call for PROC_PIDREGIONPATHINFO2"); T_LOG("preginfo_path.prp_vip.vip_vi.vi_fsid.val 0: %d", preginfo_path->prp_vip.vip_vi.vi_fsid.val[0]); T_LOG("preginfo_path.prp_vip.vip_vi.vi_fsid.val 1: %d", preginfo_path->prp_vip.vip_vi.vi_fsid.val[1]); + ret_structs[3] = (void *)(uintptr_t)preginfo_path->prp_vip.vip_vi.vi_fsid.val[0]; + ret_structs[4] = (void *)(uintptr_t)preginfo_path->prp_vip.vip_vi.vi_fsid.val[1]; retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDREGIONPATHINFO3, - (uint64_t)(*(uint32_t *)(preginfo_path->prp_vip.vip_vi.vi_fsid.val)), (user_addr_t)preginfo_path, - (uint32_t)sizeof(struct proc_regionwithpathinfo)); + (uint64_t)preginfo_path->prp_vip.vip_vi.vi_fsid.val[0] + + ((uint64_t)preginfo_path->prp_vip.vip_vi.vi_fsid.val[1] << 32), + (user_addr_t)preginfo_path, + (uint32_t)sizeof(struct proc_regionwithpathinfo)); T_ASSERT_EQ_INT(retval, (int)sizeof(struct proc_regionwithpathinfo), "__proc_info call for PROC_PIDREGIONPATHWITHINFO3"); - ret_structs[i] = (void *)preginfo_path; - i++; - ret_structs[i] = (void *)map_tmp; - i++; - ret_structs[i] = (void *)(uintptr_t)map_tmp_sz; - i++; + ret_structs[0] = (void *)preginfo_path; + ret_structs[1] = (void *)map_tmp; + ret_structs[2] = (void *)(uintptr_t)map_tmp_sz; - retval = unlink(preginfo_path->prp_vip.vip_path); + retval = unlink(tmp_path); T_QUIET; T_ASSERT_POSIX_SUCCESS(retval, "unlink(%s) failed", preginfo_path->prp_vip.vip_path); } @@ -725,9 +731,9 @@ free_proc_info(void ** proc_info, int num) */ T_DECL(proc_info_listpids_all_pids, - "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { /* * Get the value of nprocs with no buffer sent in @@ -747,7 +753,7 @@ T_DECL(proc_info_listpids_all_pids, } pid_t * proc_ids = malloc(sizeof(pid_t) * (unsigned long)proc_count); num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_ALL_PIDS, (uint32_t)getpid(), (uint32_t)0, (user_addr_t)proc_ids, - (int32_t)(proc_count * (int)sizeof(pid_t))); + (int32_t)(proc_count * (int)sizeof(pid_t))); num_procs = num_procs / (int)sizeof(pid_t); T_ASSERT_GE_INT(num_procs, proc_count, "Valid number of pids obtained for PROC_ALL_PIDS."); @@ -759,7 +765,7 @@ T_DECL(proc_info_listpids_all_pids, proc_ids = malloc(sizeof(pid_t) * (unsigned long)proc_count_all); num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_ALL_PIDS, (uint32_t)getpid(), (uint32_t)0, (user_addr_t)proc_ids, - (int32_t)(proc_count_all * (int)sizeof(pid_t))); + (int32_t)(proc_count_all * (int)sizeof(pid_t))); num_procs = num_procs / (int)sizeof(pid_t); int pid_match = 1; @@ -788,14 +794,14 @@ T_DECL(proc_info_listpids_all_pids, errno = 0; num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_ALL_PIDS, (uint32_t)getpid(), (uint32_t)0, (user_addr_t)proc_ids, - (uint32_t)(sizeof(pid_t) - 1)); + (uint32_t)(sizeof(pid_t) - 1)); T_EXPECT_POSIX_ERROR(errno, ENOMEM, "Valid proc_info behavior when bufsize < sizeof(pid_t)."); } T_DECL(proc_info_listpids_pgrp_only, - "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(CONF_PROC_COUNT, proc_info_listpids_handler); T_LOG("Test to verify PROC_PGRP_ONLY returns correct value"); @@ -807,7 +813,7 @@ T_DECL(proc_info_listpids_pgrp_only, int proc_count = CONF_PROC_COUNT + 2; pid_t * proc_ids = malloc(sizeof(*proc_ids) * (unsigned long)proc_count); int num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_PGRP_ONLY, (uint32_t)proc_config->proc_grp_id, (uint32_t)0, - (user_addr_t)proc_ids, (int32_t)(proc_count * (int)sizeof(*proc_ids))); + (user_addr_t)proc_ids, (int32_t)(proc_count * (int)sizeof(*proc_ids))); num_procs = num_procs / (int)sizeof(pid_t); T_ASSERT_EQ_INT(num_procs, CONF_PROC_COUNT + 1, "Valid number of pids obtained for PROC_PGRP_ONLY."); kill_child_processes(proc_config); @@ -816,9 +822,9 @@ T_DECL(proc_info_listpids_pgrp_only, } T_DECL(proc_info_listpids_ppid_only, - "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(CONF_PROC_COUNT, proc_info_listpids_handler); T_LOG("Test to verify PROC_PPID_ONLY returns correct value"); @@ -828,7 +834,7 @@ T_DECL(proc_info_listpids_ppid_only, int proc_count = CONF_PROC_COUNT + 2; pid_t * proc_ids = malloc(sizeof(*proc_ids) * (unsigned long)proc_count); int num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_PPID_ONLY, (uint32_t)getpid(), (uint32_t)0, (user_addr_t)proc_ids, - (int32_t)(proc_count * (int)sizeof(*proc_ids))); + (int32_t)(proc_count * (int)sizeof(*proc_ids))); num_procs = num_procs / (int)sizeof(pid_t); T_ASSERT_EQ_INT(num_procs, CONF_PROC_COUNT, "Valid number of pids obtained for PROC_PPID_ONLY."); kill_child_processes(proc_config); @@ -837,9 +843,9 @@ T_DECL(proc_info_listpids_ppid_only, } T_DECL(proc_info_listpids_uid_only, - "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(CONF_PROC_COUNT, proc_info_listpids_handler); T_LOG("Test to verify PROC_UID_ONLY returns correct value"); @@ -848,18 +854,18 @@ T_DECL(proc_info_listpids_uid_only, send_action_to_child_processes(proc_config, ACT_CHANGE_UID); usleep(10000); int num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_UID_ONLY, CONF_UID_VAL, (uint32_t)0, (user_addr_t)proc_ids, - (int32_t)(proc_count * (int)sizeof(*proc_ids))); + (int32_t)(proc_count * (int)sizeof(*proc_ids))); T_ASSERT_GE_ULONG((unsigned long)num_procs / sizeof(pid_t), (unsigned long)CONF_PROC_COUNT, - "Valid number of pids obtained for PROC_UID_ONLY."); + "Valid number of pids obtained for PROC_UID_ONLY."); kill_child_processes(proc_config); free_proc_config(proc_config); free(proc_ids); } T_DECL(proc_info_listpids_ruid_only, - "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(CONF_PROC_COUNT, proc_info_listpids_handler); T_LOG("Test to verify PROC_RUID_ONLY returns correct value"); @@ -868,18 +874,18 @@ T_DECL(proc_info_listpids_ruid_only, send_action_to_child_processes(proc_config, ACT_CHANGE_RUID); usleep(10000); int num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_RUID_ONLY, CONF_RUID_VAL, (uint32_t)0, (user_addr_t)proc_ids, - (int32_t)(proc_count * (int)sizeof(*proc_ids))); + (int32_t)(proc_count * (int)sizeof(*proc_ids))); T_ASSERT_GE_ULONG((unsigned long)num_procs / sizeof(pid_t), (unsigned long)CONF_PROC_COUNT, - "Valid number of pids obtained for PROC_RUID_ONLY."); + "Valid number of pids obtained for PROC_RUID_ONLY."); kill_child_processes(proc_config); free_proc_config(proc_config); free(proc_ids); } T_DECL(proc_info_listpids_tty_only, - "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API test to verify PROC_INFO_CALL_LISTPIDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { int ret = isatty(STDOUT_FILENO); if (ret != 1) { @@ -892,7 +898,7 @@ T_DECL(proc_info_listpids_tty_only, int proc_count = CONF_PROC_COUNT + 2; pid_t * proc_ids = malloc(sizeof(*proc_ids) * (unsigned long)proc_count); int num_procs = __proc_info(PROC_INFO_CALL_LISTPIDS, PROC_TTY_ONLY, get_tty_dev(), (uint32_t)0, (user_addr_t)proc_ids, - (int32_t)(proc_count * (int)sizeof(*proc_ids))); + (int32_t)(proc_count * (int)sizeof(*proc_ids))); num_procs = num_procs / (int)sizeof(pid_t); T_ASSERT_GE_INT(num_procs, 0, "Valid number of pids returned by PROC_TTY_ONLY."); kill_child_processes(proc_config); @@ -908,9 +914,9 @@ T_DECL(proc_info_listpids_tty_only, */ T_DECL(proc_info_pidinfo_proc_piduniqidentifierinfo, - "Test to identify PROC_PIDUNIQIDENTIFIERINFO returns correct unique identifiers for process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to identify PROC_PIDUNIQIDENTIFIERINFO returns correct unique identifiers for process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; proc_info_caller(P_UNIQIDINFO | C_UNIQIDINFO, proc_info, NULL); @@ -923,15 +929,15 @@ T_DECL(proc_info_pidinfo_proc_piduniqidentifierinfo, T_EXPECT_EQ_UCHAR(c_uniqidinfo->p_uuid[i], p_uniqidinfo->p_uuid[i], "p_uuid should be the same unique id"); } T_EXPECT_EQ_ULLONG(c_uniqidinfo->p_puniqueid, p_uniqidinfo->p_uniqueid, - "p_puniqueid of child should be same as p_uniqueid for parent"); + "p_puniqueid of child should be same as p_uniqueid for parent"); free_proc_info(proc_info, 2); } T_DECL(proc_info_pidinfo_proc_pidtbsdinfo, - "Test to verify PROC_PIDTBSDINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDTBSDINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; int child_pid = 0; @@ -962,9 +968,9 @@ T_DECL(proc_info_pidinfo_proc_pidtbsdinfo, } T_DECL(proc_info_pidt_shortbsdinfo, - "Test to verify PROC_PIDT_SHORTBSDINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDT_SHORTBSDINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; int child_pid = 0; @@ -981,7 +987,7 @@ T_DECL(proc_info_pidt_shortbsdinfo, * The short variant returns all flags except session flags, hence ignoring them here. */ T_EXPECT_EQ_UINT(pbsd_short->pbsi_flags, (pbsd->pbi_flags & (unsigned int)(~PROC_FLAG_CTTY)), - "PROC_PIDT_SHORTBSDINFO returns valid flags"); + "PROC_PIDT_SHORTBSDINFO returns valid flags"); T_EXPECT_EQ_UINT(pbsd_short->pbsi_uid, CONF_RUID_VAL, "PROC_PIDT_SHORTBSDINFO returns valid uid"); T_EXPECT_EQ_UINT(pbsd_short->pbsi_gid, CONF_GID_VAL, "PROC_PIDT_SHORTBSDINFO returns valid gid"); T_EXPECT_EQ_UINT(pbsd_short->pbsi_ruid, 0U, "PROC_PIDT_SHORTBSDINFO returns valid ruid"); @@ -992,9 +998,9 @@ T_DECL(proc_info_pidt_shortbsdinfo, } T_DECL(proc_info_pidt_bsdinfowithuniqid, - "Test to verify PROC_PIDT_BSDINFOWITHUNIQID returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDT_BSDINFOWITHUNIQID returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[4]; int child_pid = 0; @@ -1018,28 +1024,28 @@ T_DECL(proc_info_pidt_bsdinfowithuniqid, T_EXPECT_EQ_STR(pbsd_uniqid->pbsd.pbi_comm, CONF_CMD_NAME, "PROC_PIDT_BSDINFOWITHUNIQID returns valid p_comm name"); T_EXPECT_EQ_STR(pbsd_uniqid->pbsd.pbi_name, CONF_CMD_NAME, "PROC_PIDT_BSDINFOWITHUNIQID returns valid p_name name"); T_EXPECT_EQ_UINT(pbsd_uniqid->pbsd.pbi_flags, (pbsd_old->pbi_flags | PROC_FLAG_PSUGID), - "PROC_PIDT_BSDINFOWITHUNIQID returns valid flags"); + "PROC_PIDT_BSDINFOWITHUNIQID returns valid flags"); T_EXPECT_EQ_UINT(pbsd_uniqid->pbsd.pbi_nfiles, pbsd_old->pbi_nfiles, "PROC_PIDT_BSDINFOWITHUNIQID returned valid pbi_nfiles"); T_EXPECT_EQ_UINT(pbsd_uniqid->pbsd.pbi_pgid, (uint32_t)getpgid(getpid()), - "PROC_PIDT_BSDINFOWITHUNIQID returned valid pbi_pgid"); + "PROC_PIDT_BSDINFOWITHUNIQID returned valid pbi_pgid"); T_EXPECT_EQ_UINT(pbsd_uniqid->pbsd.pbi_pjobc, pbsd->pbi_pjobc, "PROC_PIDT_BSDINFOWITHUNIQID returned valid pbi_pjobc"); T_EXPECT_NE_UINT(pbsd_uniqid->pbsd.e_tdev, 0U, "PROC_PIDT_BSDINFOWITHUNIQID returned valid e_tdev"); T_EXPECT_NE_ULLONG(pbsd_uniqid->p_uniqidentifier.p_uniqueid, p_uniqidinfo->p_uniqueid, - "PROC_PIDT_BSDINFOWITHUNIQID returned valid p_uniqueid"); + "PROC_PIDT_BSDINFOWITHUNIQID returned valid p_uniqueid"); for (int i = 0; i < 16; i++) { T_EXPECT_EQ_UCHAR(pbsd_uniqid->p_uniqidentifier.p_uuid[i], p_uniqidinfo->p_uuid[i], - "PROC_PIDT_BSDINFOWITHUNIQID reported valid p_uniqueid"); + "PROC_PIDT_BSDINFOWITHUNIQID reported valid p_uniqueid"); } T_EXPECT_EQ_ULLONG(pbsd_uniqid->p_uniqidentifier.p_puniqueid, p_uniqidinfo->p_uniqueid, - "p_puniqueid of child should be same as p_uniqueid for parent"); + "p_puniqueid of child should be same as p_uniqueid for parent"); free_proc_info(proc_info, 4); } T_DECL(proc_info_proc_pidtask_info, - "Test to verify PROC_PIDTASKINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDTASKINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; proc_info_caller(P_TASK_INFO | P_TASK_INFO_NEW, proc_info, NULL); @@ -1047,36 +1053,36 @@ T_DECL(proc_info_proc_pidtask_info, struct proc_taskinfo * p_task_info_new = (struct proc_taskinfo *)proc_info[1]; T_EXPECT_GE_ULLONG((p_task_info_new->pti_virtual_size - p_task_info->pti_virtual_size), (unsigned long long)PAGE_SIZE, - "PROC_PIDTASKINFO returned valid value for pti_virtual_size"); + "PROC_PIDTASKINFO returned valid value for pti_virtual_size"); T_EXPECT_GE_ULLONG((p_task_info_new->pti_resident_size - p_task_info->pti_resident_size), (unsigned long long)PAGE_SIZE, - "PROC_PIDTASKINFO returned valid value for pti_virtual_size"); + "PROC_PIDTASKINFO returned valid value for pti_virtual_size"); T_EXPECT_EQ_INT(p_task_info_new->pti_policy, POLICY_TIMESHARE, "PROC_PIDTASKINFO returned valid value for pti_virtual_size"); T_EXPECT_GE_ULLONG(p_task_info->pti_threads_user, 1ULL, "PROC_PIDTASKINFO returned valid value for pti_threads_user"); #if defined(__arm__) || defined(__arm64__) T_EXPECT_GE_ULLONG(p_task_info->pti_threads_system, 0ULL, "PROC_PIDTASKINFO returned valid value for pti_threads_system"); T_EXPECT_GE_ULLONG((p_task_info_new->pti_total_system - p_task_info->pti_total_system), 0ULL, - "PROC_PIDTASKINFO returned valid value for pti_total_system"); + "PROC_PIDTASKINFO returned valid value for pti_total_system"); #else T_EXPECT_GE_ULLONG(p_task_info->pti_threads_system, 1ULL, "PROC_PIDTASKINFO returned valid value for pti_threads_system"); T_EXPECT_GT_ULLONG((p_task_info_new->pti_total_system - p_task_info->pti_total_system), 0ULL, - "PROC_PIDTASKINFO returned valid value for pti_total_system"); + "PROC_PIDTASKINFO returned valid value for pti_total_system"); #endif T_EXPECT_GT_ULLONG((p_task_info_new->pti_total_user - p_task_info->pti_total_user), 0ULL, - "PROC_PIDTASKINFO returned valid value for pti_total_user"); + "PROC_PIDTASKINFO returned valid value for pti_total_user"); T_EXPECT_GE_INT((p_task_info_new->pti_faults - p_task_info->pti_faults), 1, - "PROC_PIDTASKINFO returned valid value for pti_faults"); + "PROC_PIDTASKINFO returned valid value for pti_faults"); T_EXPECT_GE_INT((p_task_info_new->pti_cow_faults - p_task_info->pti_cow_faults), 1, - "PROC_PIDTASKINFO returned valid value for pti_cow_faults"); + "PROC_PIDTASKINFO returned valid value for pti_cow_faults"); T_EXPECT_GE_INT((p_task_info_new->pti_syscalls_mach - p_task_info->pti_syscalls_mach), 0, - "PROC_PIDTASKINFO returned valid value for pti_syscalls_mach"); + "PROC_PIDTASKINFO returned valid value for pti_syscalls_mach"); T_EXPECT_GE_INT((p_task_info_new->pti_syscalls_unix - p_task_info->pti_syscalls_unix), 2, - "PROC_PIDTASKINFO returned valid value for pti_syscalls_unix"); + "PROC_PIDTASKINFO returned valid value for pti_syscalls_unix"); T_EXPECT_EQ_INT((p_task_info_new->pti_messages_sent - p_task_info->pti_messages_sent), 0, - "PROC_PIDTASKINFO returned valid value for pti_messages_sent"); + "PROC_PIDTASKINFO returned valid value for pti_messages_sent"); T_EXPECT_EQ_INT((p_task_info_new->pti_messages_received - p_task_info->pti_messages_received), 0, - "PROC_PIDTASKINFO returned valid value for pti_messages_received"); + "PROC_PIDTASKINFO returned valid value for pti_messages_received"); T_EXPECT_EQ_INT(p_task_info_new->pti_priority, p_task_info->pti_priority, - "PROC_PIDTASKINFO returned valid value for pti_priority"); + "PROC_PIDTASKINFO returned valid value for pti_priority"); T_EXPECT_GE_INT(p_task_info_new->pti_threadnum, 1, "PROC_PIDTASKINFO returned valid value for pti_threadnum"); if (p_task_info_new->pti_threadnum > 1) { @@ -1095,9 +1101,9 @@ T_DECL(proc_info_proc_pidtask_info, } T_DECL(proc_info_proc_pidtaskallinfo, - "Test to verify PROC_PIDTASKALLINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDTASKALLINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[4]; int child_pid = 0; @@ -1129,35 +1135,35 @@ T_DECL(proc_info_proc_pidtaskallinfo, #if defined(__arm__) || defined(__arm64__) T_EXPECT_GE_ULLONG(pall->ptinfo.pti_threads_system, 0ULL, "PROC_PIDTASKALLINFO returned valid value for pti_threads_system"); T_EXPECT_GE_ULLONG((pall->ptinfo.pti_total_system - p_task_info->pti_total_system), 0ULL, - "PROC_PIDTASKALLINFO returned valid value for pti_total_system"); + "PROC_PIDTASKALLINFO returned valid value for pti_total_system"); #else T_EXPECT_GE_ULLONG(pall->ptinfo.pti_threads_system, 1ULL, "PROC_PIDTASKALLINFO returned valid value for pti_threads_system"); T_EXPECT_GT_ULLONG((pall->ptinfo.pti_total_system - p_task_info->pti_total_system), 0ULL, - "PROC_PIDTASKALLINFO returned valid value for pti_total_system"); + "PROC_PIDTASKALLINFO returned valid value for pti_total_system"); #endif /* ARM */ T_EXPECT_GE_ULLONG((pall->ptinfo.pti_virtual_size - p_task_info->pti_virtual_size), (unsigned long long)PAGE_SIZE, - "PROC_PIDTASKALLINFO returned valid value for pti_virtual_size"); + "PROC_PIDTASKALLINFO returned valid value for pti_virtual_size"); T_EXPECT_GE_ULLONG((pall->ptinfo.pti_resident_size - p_task_info->pti_resident_size), (unsigned long long)PAGE_SIZE, - "PROC_PIDTASKALLINFO returned valid value for pti_virtual_size"); + "PROC_PIDTASKALLINFO returned valid value for pti_virtual_size"); T_EXPECT_EQ_INT(pall->ptinfo.pti_policy, POLICY_TIMESHARE, "PROC_PIDTASKALLINFO returned valid value for pti_virtual_size"); T_EXPECT_GE_ULLONG(pall->ptinfo.pti_threads_user, 1ULL, "PROC_PIDTASKALLINFO returned valid value for pti_threads_user "); T_EXPECT_GT_ULLONG((pall->ptinfo.pti_total_user - p_task_info->pti_total_user), 0ULL, - "PROC_PIDTASKALLINFO returned valid value for pti_total_user"); + "PROC_PIDTASKALLINFO returned valid value for pti_total_user"); T_EXPECT_GE_INT((pall->ptinfo.pti_faults - p_task_info->pti_faults), 1, - "PROC_PIDTASKALLINFO returned valid value for pti_faults"); + "PROC_PIDTASKALLINFO returned valid value for pti_faults"); T_EXPECT_GE_INT((pall->ptinfo.pti_cow_faults - p_task_info->pti_cow_faults), 1, - "PROC_PIDTASKALLINFO returned valid value for pti_cow_faults"); + "PROC_PIDTASKALLINFO returned valid value for pti_cow_faults"); T_EXPECT_GE_INT((pall->ptinfo.pti_syscalls_mach - p_task_info->pti_syscalls_mach), 0, - "PROC_PIDTASKALLINFO returned valid value for pti_syscalls_mach"); + "PROC_PIDTASKALLINFO returned valid value for pti_syscalls_mach"); T_EXPECT_GE_INT((pall->ptinfo.pti_syscalls_unix - p_task_info->pti_syscalls_unix), 2, - "PROC_PIDTASKALLINFO returned valid value for pti_syscalls_unix"); + "PROC_PIDTASKALLINFO returned valid value for pti_syscalls_unix"); T_EXPECT_EQ_INT((pall->ptinfo.pti_messages_sent - p_task_info->pti_messages_sent), 0, - "PROC_PIDTASKALLINFO returned valid value for pti_messages_sent"); + "PROC_PIDTASKALLINFO returned valid value for pti_messages_sent"); T_EXPECT_EQ_INT((pall->ptinfo.pti_messages_received - p_task_info->pti_messages_received), 0, - "PROC_PIDTASKALLINFO returned valid value for pti_messages_received"); + "PROC_PIDTASKALLINFO returned valid value for pti_messages_received"); T_EXPECT_EQ_INT(pall->ptinfo.pti_priority, p_task_info->pti_priority, - "PROC_PIDTASKALLINFO returned valid value for pti_priority"); + "PROC_PIDTASKALLINFO returned valid value for pti_priority"); T_EXPECT_GE_INT(pall->ptinfo.pti_threadnum, 1, "PROC_PIDTASKALLINFO returned valid value for pti_threadnum"); if (pall->ptinfo.pti_threadnum > 1) { T_LOG("WARN: PROC_PIDTASKALLINFO returned threadnum greater than 1"); @@ -1173,18 +1179,18 @@ T_DECL(proc_info_proc_pidtaskallinfo, } T_DECL(proc_info_proc_pidlistthreads, - "Test to verify PROC_PIDLISTTHREADS returns valid information about process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDLISTTHREADS returns valid information about process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[1]; proc_info_caller(THREAD_ADDR, proc_info, NULL); } T_DECL(proc_info_proc_pidthreadinfo, - "Test to verify PROC_PIDTHREADINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDTHREADINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; int child_pid = 0; @@ -1193,9 +1199,9 @@ T_DECL(proc_info_proc_pidthreadinfo, struct proc_threadinfo * pthinfo = (struct proc_threadinfo *)proc_info[1]; T_EXPECT_GT_ULLONG((pthinfo->pth_user_time - pthinfo_old->pth_user_time), 0ULL, - "PROC_PIDTHREADINFO returns valid value for pth_user_time"); + "PROC_PIDTHREADINFO returns valid value for pth_user_time"); T_EXPECT_GE_ULLONG((pthinfo->pth_system_time - pthinfo_old->pth_system_time), 0ULL, - "PROC_PIDTHREADINFO returns valid value for pth_system_time"); + "PROC_PIDTHREADINFO returns valid value for pth_system_time"); /* * This is the scaled cpu usage percentage, since we are not * doing a really long CPU bound task, it is (nearly) zero @@ -1211,9 +1217,9 @@ T_DECL(proc_info_proc_pidthreadinfo, */ T_EXPECT_EQ_INT(pthinfo->pth_sleep_time, 0, "PROC_PIDTHREADINFO returns valid value for pth_sleep_time"); T_EXPECT_LE_INT(pthinfo->pth_curpri, (BASEPRI_DEFAULT - CONF_NICE_VAL), - "PROC_PIDTHREADINFO returns valid value for pth_curpri"); + "PROC_PIDTHREADINFO returns valid value for pth_curpri"); T_EXPECT_EQ_INT(pthinfo->pth_priority, (BASEPRI_DEFAULT - CONF_NICE_VAL), - "PROC_PIDTHREADINFO returns valid value for pth_priority"); + "PROC_PIDTHREADINFO returns valid value for pth_priority"); T_EXPECT_EQ_INT(pthinfo->pth_maxpriority, MAXPRI_USER, "PROC_PIDTHREADINFO returns valid value for pth_maxpriority"); T_EXPECT_EQ_STR(pthinfo->pth_name, CONF_THREAD_NAME, "PROC_PIDTHREADINFO returns valid value for pth_name"); @@ -1221,20 +1227,20 @@ T_DECL(proc_info_proc_pidthreadinfo, } T_DECL(proc_info_proc_threadid64info, - "Test to verify PROC_PIDTHREADID64INFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDTHREADID64INFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; proc_info_caller(PTHINFO | PTHINFO_64, proc_info, NULL); struct proc_threadinfo pthinfo = *((struct proc_threadinfo *)proc_info[0]); struct proc_threadinfo pthinfo_64 = *((struct proc_threadinfo *)proc_info[1]); T_EXPECT_GE_ULLONG(pthinfo_64.pth_user_time, pthinfo.pth_user_time, - "PROC_PIDTHREADID64INFO returns valid value for pth_user_time"); + "PROC_PIDTHREADID64INFO returns valid value for pth_user_time"); T_EXPECT_GE_ULLONG(pthinfo_64.pth_system_time, pthinfo.pth_system_time, - "PROC_PIDTHREADID64INFO returns valid value for pth_system_time"); + "PROC_PIDTHREADID64INFO returns valid value for pth_system_time"); T_EXPECT_GE_INT(pthinfo_64.pth_cpu_usage, pthinfo.pth_cpu_usage, - "PROC_PIDTHREADID64INFO returns valid value for pth_cpu_usage"); + "PROC_PIDTHREADID64INFO returns valid value for pth_cpu_usage"); T_EXPECT_EQ_INT(pthinfo_64.pth_policy, POLICY_TIMESHARE, "PROC_PIDTHREADID64INFO returns valid value for pth_policy"); if (!(pthinfo_64.pth_run_state == TH_STATE_WAITING) && !(pthinfo_64.pth_run_state == TH_STATE_RUNNING)) { T_EXPECT_EQ_INT(pthinfo_64.pth_run_state, -1, "PROC_PIDTHREADID64INFO returns valid value for pth_run_state"); @@ -1243,16 +1249,16 @@ T_DECL(proc_info_proc_threadid64info, T_EXPECT_EQ_INT(pthinfo_64.pth_curpri, pthinfo.pth_curpri, "PROC_PIDTHREADID64INFO returns valid value for pth_curpri"); T_EXPECT_EQ_INT(pthinfo_64.pth_priority, pthinfo.pth_priority, "PROC_PIDTHREADID64INFO returns valid value for pth_priority"); T_EXPECT_EQ_INT(pthinfo_64.pth_maxpriority, pthinfo.pth_maxpriority, - "PROC_PIDTHREADID64INFO returns valid value for pth_maxpriority"); + "PROC_PIDTHREADID64INFO returns valid value for pth_maxpriority"); T_EXPECT_EQ_STR(pthinfo_64.pth_name, CONF_THREAD_NAME, "PROC_PIDTHREADID64INFO returns valid value for pth_name"); free_proc_info(proc_info, 2); } T_DECL(proc_info_proc_pidthreadpathinfo, - "Test to verify PROC_PIDTHREADPATHINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDTHREADPATHINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[2]; proc_info_caller(PTHINFO | PINFO_PATH, proc_info, NULL); @@ -1260,11 +1266,11 @@ T_DECL(proc_info_proc_pidthreadpathinfo, struct proc_threadwithpathinfo pinfo_path = *((struct proc_threadwithpathinfo *)proc_info[1]); T_EXPECT_GE_ULLONG(pinfo_path.pt.pth_user_time, pthinfo.pth_user_time, - "PROC_PIDTHREADPATHINFO returns valid value for pth_user_time"); + "PROC_PIDTHREADPATHINFO returns valid value for pth_user_time"); T_EXPECT_GE_ULLONG(pinfo_path.pt.pth_system_time, pthinfo.pth_system_time, - "PROC_PIDTHREADPATHINFO returns valid value for pth_system_time"); + "PROC_PIDTHREADPATHINFO returns valid value for pth_system_time"); T_EXPECT_GE_INT(pinfo_path.pt.pth_cpu_usage, pthinfo.pth_cpu_usage, - "PROC_PIDTHREADPATHINFO returns valid value for pth_cpu_usage"); + "PROC_PIDTHREADPATHINFO returns valid value for pth_cpu_usage"); T_EXPECT_EQ_INT(pinfo_path.pt.pth_policy, POLICY_TIMESHARE, "PROC_PIDTHREADPATHINFO returns valid value for pth_policy"); if (!(pinfo_path.pt.pth_run_state == TH_STATE_WAITING) && !(pinfo_path.pt.pth_run_state == TH_STATE_RUNNING)) { T_EXPECT_EQ_INT(pinfo_path.pt.pth_run_state, -1, "PROC_PIDTHREADPATHINFO returns valid value for pth_run_state"); @@ -1272,9 +1278,9 @@ T_DECL(proc_info_proc_pidthreadpathinfo, T_EXPECT_EQ_INT(pinfo_path.pt.pth_sleep_time, 0, "PROC_PIDTHREADPATHINFO returns valid value for pth_sleep_time"); T_EXPECT_EQ_INT(pinfo_path.pt.pth_curpri, pthinfo.pth_curpri, "PROC_PIDTHREADPATHINFO returns valid value for pth_curpri"); T_EXPECT_EQ_INT(pinfo_path.pt.pth_priority, pthinfo.pth_priority, - "PROC_PIDTHREADPATHINFO returns valid value for pth_priority"); + "PROC_PIDTHREADPATHINFO returns valid value for pth_priority"); T_EXPECT_EQ_INT(pinfo_path.pt.pth_maxpriority, pthinfo.pth_maxpriority, - "PROC_PIDTHREADPATHINFO returns valid value for pth_maxpriority"); + "PROC_PIDTHREADPATHINFO returns valid value for pth_maxpriority"); T_EXPECT_EQ_STR(pinfo_path.pt.pth_name, CONF_THREAD_NAME, "PROC_PIDTHREADPATHINFO returns valid value for pth_name"); T_EXPECT_EQ_INT(pinfo_path.pvip.vip_vi.vi_type, VNON, "PROC_PIDTHREADPATHINFO valid vnode information"); @@ -1282,9 +1288,9 @@ T_DECL(proc_info_proc_pidthreadpathinfo, } T_DECL(proc_info_proc_pidarchinfo, - "Test to verify PROC_PIDARCHINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDARCHINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[1]; proc_info_caller(PAI, proc_info, NULL); @@ -1295,7 +1301,7 @@ T_DECL(proc_info_proc_pidarchinfo, T_EXPECT_EQ_INT(pai.p_cputype, CPU_TYPE_ARM, "PROC_PIDARCHINFO returned valid value for p_cputype"); } T_EXPECT_EQ_INT((pai.p_cpusubtype & CPU_SUBTYPE_ARM_ALL), CPU_SUBTYPE_ARM_ALL, - "PROC_PIDARCHINFO returned valid value for p_cpusubtype"); + "PROC_PIDARCHINFO returned valid value for p_cpusubtype"); #else if (!((pai.p_cputype & CPU_TYPE_X86) == CPU_TYPE_X86) && !((pai.p_cputype & CPU_TYPE_X86_64) == CPU_TYPE_X86_64)) { T_EXPECT_EQ_INT(pai.p_cputype, CPU_TYPE_X86, "PROC_PIDARCHINFO returned valid value for p_cputype"); @@ -1305,9 +1311,9 @@ T_DECL(proc_info_proc_pidarchinfo, } T_DECL(proc_info_proc_pidregioninfo, - "Test to verify PROC_PIDREGIONINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDREGIONINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[3]; proc_info_caller(PREGINFO, proc_info, NULL); @@ -1321,28 +1327,28 @@ T_DECL(proc_info_proc_pidregioninfo, T_EXPECT_EQ_ULLONG(preginfo.pri_offset, (unsigned long long)PAGE_SIZE, "PROC_PIDREGIONINFO returns valid value for pri_offset"); T_EXPECT_EQ_UINT((preginfo.pri_protection ^ (VM_PROT_READ | VM_PROT_WRITE)), 0U, - "PROC_PIDREGIONINFO returns valid value for pri_protection, expected read/write only"); + "PROC_PIDREGIONINFO returns valid value for pri_protection, expected read/write only"); T_EXPECT_EQ_UINT((preginfo.pri_max_protection & (VM_PROT_READ | VM_PROT_WRITE)), (unsigned int)(VM_PROT_READ | VM_PROT_WRITE), - "PROC_PIDREGIONINFO returns valid value for pri_max_protection"); + "PROC_PIDREGIONINFO returns valid value for pri_max_protection"); T_EXPECT_EQ_UINT((preginfo.pri_inheritance ^ VM_INHERIT_COPY), 0U, - "PROC_PIDREGIONINFO returns valid value for pri_inheritance"); + "PROC_PIDREGIONINFO returns valid value for pri_inheritance"); T_EXPECT_EQ_UINT((preginfo.pri_behavior ^ VM_BEHAVIOR_DEFAULT), 0U, "PROC_PIDREGIONINFO returns valid value for pri_behavior"); T_EXPECT_EQ_UINT(preginfo.pri_user_wired_count, 0U, "PROC_PIDREGIONINFO returns valid value for pri_user_wired_count"); T_EXPECT_EQ_UINT(preginfo.pri_user_tag, 0U, "PROC_PIDREGIONINFO returns valid value for pri_user_tag"); T_EXPECT_NE_UINT((preginfo.pri_flags ^ (PROC_REGION_SUBMAP | PROC_REGION_SHARED)), 0U, - "PROC_PIDREGIONINFO returns valid value for pri_flags"); + "PROC_PIDREGIONINFO returns valid value for pri_flags"); T_EXPECT_EQ_UINT(preginfo.pri_pages_resident, 0U, "PROC_PIDREGIONINFO returns valid value for pri_pages_resident"); T_EXPECT_EQ_UINT(preginfo.pri_pages_shared_now_private, 0U, - "PROC_PIDREGIONINFO returns valid value for pri_pages_shared_now_private"); + "PROC_PIDREGIONINFO returns valid value for pri_pages_shared_now_private"); T_EXPECT_EQ_UINT(preginfo.pri_pages_swapped_out, 0U, "PROC_PIDREGIONINFO returns valid value for pri_pages_swapped_out"); T_EXPECT_EQ_UINT(preginfo.pri_pages_dirtied, 0U, "PROC_PIDREGIONINFO returns valid value for pri_pages_dirtied"); T_EXPECT_EQ_UINT(preginfo.pri_ref_count, 2U, "PROC_PIDREGIONINFO returns valid value for pri_ref_count"); T_EXPECT_EQ_UINT(preginfo.pri_shadow_depth, 1U, "PROC_PIDREGIONINFO returns valid value for pri_shadow_depth"); T_EXPECT_EQ_UINT(preginfo.pri_share_mode, (unsigned int)SM_COW, "PROC_PIDREGIONINFO returns valid value for pri_share_mode"); T_EXPECT_EQ_UINT(preginfo.pri_private_pages_resident, 0U, - "PROC_PIDREGIONINFO returns valid value for pri_private_pages_resident"); + "PROC_PIDREGIONINFO returns valid value for pri_private_pages_resident"); T_EXPECT_GE_UINT(preginfo.pri_shared_pages_resident, 0U, - "PROC_PIDREGIONINFO returns valid value for pri_shared_pages_resident"); + "PROC_PIDREGIONINFO returns valid value for pri_shared_pages_resident"); T_EXPECT_EQ_ULLONG(preginfo.pri_address, (uint64_t)map_tmp, "PROC_PIDREGIONINFO returns valid value for pri_addr"); T_EXPECT_NE_UINT(preginfo.pri_obj_id, 0U, "PROC_PIDREGIONINFO returns valid value for pri_obj_id"); T_EXPECT_EQ_ULLONG(preginfo.pri_size, (unsigned long long)map_tmp_sz, "PROC_PIDREGIONINFO returns valid value for pri_size"); @@ -1356,9 +1362,9 @@ T_DECL(proc_info_proc_pidregioninfo, } T_DECL(proc_info_proc_pidregionpathinfo, - "Test to verify PROC_PIDREGIONPATHINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_INSTALLEDUSEROS)) + "Test to verify PROC_PIDREGIONPATHINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_INSTALLEDUSEROS)) { void * proc_info[3]; proc_info_caller(PREGINFO_PATH, proc_info, NULL); @@ -1366,75 +1372,75 @@ T_DECL(proc_info_proc_pidregionpathinfo, struct proc_regionwithpathinfo preginfo_path = *((struct proc_regionwithpathinfo *)proc_info[0]); /* * map_tmp isn't a struct like the rest of our ret_structs, but we sneak it back because we need it - */ + */ void *map_tmp = proc_info[1]; vm_map_size_t map_tmp_sz = (vm_map_size_t)(uintptr_t)proc_info[2]; T_EXPECT_EQ_ULLONG(preginfo_path.prp_prinfo.pri_offset, (uint64_t)PAGE_SIZE, - "PROC_PIDREGIONPATHINFO returns valid value for pri_offset"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_offset"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_protection ^ (VM_PROT_READ | VM_PROT_WRITE)), 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_protection, expected read/write only"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_protection, expected read/write only"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_max_protection & (VM_PROT_READ | VM_PROT_WRITE)), - (unsigned int)(VM_PROT_READ | VM_PROT_WRITE), - "PROC_PIDREGIONPATHINFO returns valid value for pri_max_protection"); + (unsigned int)(VM_PROT_READ | VM_PROT_WRITE), + "PROC_PIDREGIONPATHINFO returns valid value for pri_max_protection"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_inheritance ^ VM_INHERIT_COPY), 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_inheritance"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_inheritance"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_behavior ^ VM_BEHAVIOR_DEFAULT), 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_behavior"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_behavior"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_user_wired_count, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_user_wired_count"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_user_wired_count"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_user_tag, 0U, "PROC_PIDREGIONPATHINFO returns valid value for pri_user_tag"); T_EXPECT_NE_UINT((preginfo_path.prp_prinfo.pri_flags ^ (PROC_REGION_SUBMAP | PROC_REGION_SHARED)), 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_flags"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_flags"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_resident"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_resident"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_shared_now_private, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_shared_now_private"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_shared_now_private"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_swapped_out, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_swapped_out"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_swapped_out"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_dirtied, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_dirtied"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_pages_dirtied"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_ref_count, 2U, "PROC_PIDREGIONPATHINFO returns valid value for pri_ref_count"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_shadow_depth, 1U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_shadow_depth"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_shadow_depth"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_share_mode, (unsigned int)SM_COW, - "PROC_PIDREGIONPATHINFO returns valid value for pri_share_mode"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_share_mode"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_private_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_private_pages_resident"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_private_pages_resident"); T_EXPECT_GE_UINT(preginfo_path.prp_prinfo.pri_shared_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO returns valid value for pri_shared_pages_resident"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_shared_pages_resident"); T_EXPECT_EQ_ULLONG(preginfo_path.prp_prinfo.pri_address, (uint64_t)map_tmp, - "PROC_PIDREGIONPATHINFO returns valid value for pri_addr"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_addr"); T_EXPECT_NE_UINT(preginfo_path.prp_prinfo.pri_obj_id, 0U, "PROC_PIDREGIONPATHINFO returns valid value for pri_obj_id"); T_EXPECT_EQ_ULLONG(preginfo_path.prp_prinfo.pri_size, (uint64_t)map_tmp_sz, - "PROC_PIDREGIONPATHINFO returns valid value for pri_size"); + "PROC_PIDREGIONPATHINFO returns valid value for pri_size"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_depth, 0U, "PROC_PIDREGIONPATHINFO returns valid value for pri_depth"); T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_type, VREG, "PROC_PIDREGIONPATHINFO returns valid value for vi_type"); T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_pad, 0, "PROC_PIDREGIONPATHINFO returns valid value for vi_pad"); T_EXPECT_NE_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[0], 0, - "PROC_PIDREGIONPATHINFO returns valid value for vi_fsid.val[0]"); + "PROC_PIDREGIONPATHINFO returns valid value for vi_fsid.val[0]"); T_EXPECT_NE_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[1], 0, - "PROC_PIDREGIONPATHINFO returns valid value for vi_fsid.val[1]"); + "PROC_PIDREGIONPATHINFO returns valid value for vi_fsid.val[1]"); T_EXPECT_NE_PTR((void *)(strcasestr(preginfo_path.prp_vip.vip_path, CONF_TMP_FILE_PFX)), NULL, - "PROC_PIDREGIONPATHINFO returns valid value for vi_path"); + "PROC_PIDREGIONPATHINFO returns valid value for vi_path"); /* * Basic sanity checks for vnode stat returned by the API */ T_EXPECT_NE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_dev, 0U, "PROC_PIDREGIONPATHINFO returns valid value for vst_dev"); T_EXPECT_EQ_INT(((preginfo_path.prp_vip.vip_vi.vi_stat.vst_mode & S_IFMT) ^ S_IFREG), 0, - "PROC_PIDREGIONPATHINFO returns valid value for vst_mode"); + "PROC_PIDREGIONPATHINFO returns valid value for vst_mode"); T_EXPECT_EQ_USHORT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_nlink, (unsigned short)0, /* the file was unlink()'d! */ - "PROC_PIDREGIONPATHINFO returns valid value for vst_nlink"); + "PROC_PIDREGIONPATHINFO returns valid value for vst_nlink"); T_EXPECT_NE_ULLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_ino, 0ULL, - "PROC_PIDREGIONPATHINFO returns valid value for vst_ino"); + "PROC_PIDREGIONPATHINFO returns valid value for vst_ino"); T_EXPECT_EQ_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_uid, 0U, "PROC_PIDREGIONPATHINFO returns valid value for vst_uid"); T_EXPECT_EQ_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_gid, 0U, "PROC_PIDREGIONPATHINFO returns valid value for vst_gid"); T_EXPECT_GE_LLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_size, (off_t)CONF_BLK_SIZE, - "PROC_PIDREGIONPATHINFO returns valid value for vst_size"); + "PROC_PIDREGIONPATHINFO returns valid value for vst_size"); T_EXPECT_GE_LLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_blocks, 1LL, - "PROC_PIDREGIONPATHINFO returns valid value for vst_blocks"); + "PROC_PIDREGIONPATHINFO returns valid value for vst_blocks"); T_EXPECT_GE_INT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_blksize, CONF_BLK_SIZE, - "PROC_PIDREGIONPATHINFO returns valid value for vst_blksize"); + "PROC_PIDREGIONPATHINFO returns valid value for vst_blksize"); int ret = 0; ret = munmap(map_tmp, (size_t)map_tmp_sz); @@ -1444,9 +1450,9 @@ T_DECL(proc_info_proc_pidregionpathinfo, } T_DECL(proc_info_proc_pidregionpathinfo2, - "Test to verify PROC_PIDREGIONPATHINFO2 returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_INSTALLEDUSEROS)) + "Test to verify PROC_PIDREGIONPATHINFO2 returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_INSTALLEDUSEROS)) { void * proc_info[3]; proc_info_caller(PREGINFO_PATH_2, proc_info, NULL); @@ -1454,80 +1460,80 @@ T_DECL(proc_info_proc_pidregionpathinfo2, struct proc_regionwithpathinfo preginfo_path = *((struct proc_regionwithpathinfo *)proc_info[0]); /* * map_tmp isn't a struct like the rest of our ret_structs, but we sneak it back because we need it - */ + */ void *map_tmp = proc_info[1]; vm_map_size_t map_tmp_sz = (vm_map_size_t)(uintptr_t)proc_info[2]; T_EXPECT_EQ_ULLONG(preginfo_path.prp_prinfo.pri_offset, (uint64_t)PAGE_SIZE, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_offset"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_offset"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_protection ^ (VM_PROT_READ | VM_PROT_WRITE)), 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_protection, expected read/write only"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_protection, expected read/write only"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_max_protection & (VM_PROT_READ | VM_PROT_WRITE)), - (unsigned int)(VM_PROT_READ | VM_PROT_WRITE), - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_max_protection"); + (unsigned int)(VM_PROT_READ | VM_PROT_WRITE), + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_max_protection"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_inheritance ^ VM_INHERIT_COPY), 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_inheritance"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_inheritance"); T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_behavior ^ VM_BEHAVIOR_DEFAULT), 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_behavior"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_behavior"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_user_wired_count, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_user_wired_count"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_user_wired_count"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_user_tag, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for pri_user_tag"); T_EXPECT_NE_UINT((preginfo_path.prp_prinfo.pri_flags ^ (PROC_REGION_SUBMAP | PROC_REGION_SHARED)), 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_flags"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_flags"); /* * Following values are hard-coded to be zero in source */ T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_resident"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_resident"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_shared_now_private, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_shared_now_private"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_shared_now_private"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_swapped_out, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_swapped_out"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_swapped_out"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_dirtied, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_dirtied"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_pages_dirtied"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_ref_count, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for pri_ref_count"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_shadow_depth, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_shadow_depth"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_shadow_depth"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_share_mode, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for pri_share_mode"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_private_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_private_pages_resident"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_private_pages_resident"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_shared_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_shared_pages_resident"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_shared_pages_resident"); T_EXPECT_EQ_ULLONG(preginfo_path.prp_prinfo.pri_address, (uint64_t)map_tmp, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_addr"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_addr"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_obj_id, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for pri_obj_id"); T_EXPECT_EQ_ULLONG(preginfo_path.prp_prinfo.pri_size, (unsigned long long)map_tmp_sz, - "PROC_PIDREGIONPATHINFO2 returns valid value for pri_size"); + "PROC_PIDREGIONPATHINFO2 returns valid value for pri_size"); T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_depth, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for pri_depth"); T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_type, VREG, "PROC_PIDREGIONPATHINFO2 returns valid value for vi_type"); T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_pad, 0, "PROC_PIDREGIONPATHINFO2 returns valid value for vi_pad"); T_EXPECT_NE_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[0], 0, - "PROC_PIDREGIONPATHINFO2 returns valid value for vi_fsid.val[0]:%d", - preginfo_path.prp_vip.vip_vi.vi_fsid.val[0]); + "PROC_PIDREGIONPATHINFO2 returns valid value for vi_fsid.val[0]:%d", + preginfo_path.prp_vip.vip_vi.vi_fsid.val[0]); T_EXPECT_NE_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[1], 0, - "PROC_PIDREGIONPATHINFO2 returns valid value for vi_fsid.val[1]:%d", - preginfo_path.prp_vip.vip_vi.vi_fsid.val[1]); + "PROC_PIDREGIONPATHINFO2 returns valid value for vi_fsid.val[1]:%d", + preginfo_path.prp_vip.vip_vi.vi_fsid.val[1]); T_EXPECT_NE_PTR((void *)(strcasestr(preginfo_path.prp_vip.vip_path, CONF_TMP_FILE_PFX)), NULL, - "PROC_PIDREGIONPATHINFO2 returns valid value for vi_path"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vi_path"); /* * Basic sanity checks for vnode stat returned by the API */ T_EXPECT_NE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_dev, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for vst_dev"); T_EXPECT_EQ_UINT(((preginfo_path.prp_vip.vip_vi.vi_stat.vst_mode & S_IFMT) ^ S_IFREG), 0, - "PROC_PIDREGIONPATHINFO2 returns valid value for vst_mode"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vst_mode"); T_EXPECT_EQ_USHORT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_nlink, (unsigned short)0, /* the file was unlink()'d! */ - "PROC_PIDREGIONPATHINFO2 returns valid value for vst_nlink"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vst_nlink"); T_EXPECT_NE_ULLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_ino, 0ULL, - "PROC_PIDREGIONPATHINFO2 returns valid value for vst_ino"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vst_ino"); T_EXPECT_EQ_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_uid, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for vst_uid"); T_EXPECT_EQ_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_gid, 0U, "PROC_PIDREGIONPATHINFO2 returns valid value for vst_gid"); T_EXPECT_GE_LLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_size, (off_t)CONF_BLK_SIZE, - "PROC_PIDREGIONPATHINFO2 returns valid value for vst_size"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vst_size"); T_EXPECT_GE_LLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_blocks, 1LL, - "PROC_PIDREGIONPATHINFO2 returns valid value for vst_blocks"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vst_blocks"); T_EXPECT_GE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_blksize, CONF_BLK_SIZE, - "PROC_PIDREGIONPATHINFO2 returns valid value for vst_blksize"); + "PROC_PIDREGIONPATHINFO2 returns valid value for vst_blksize"); int ret = 0; ret = munmap(map_tmp, (size_t)map_tmp_sz); @@ -1537,86 +1543,22 @@ T_DECL(proc_info_proc_pidregionpathinfo2, } T_DECL(proc_info_proc_pidregionpathinfo3, - "Test to verify PROC_PIDREGIONPATHINFO3 returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_INSTALLEDUSEROS)) + "Test to verify PROC_PIDREGIONPATHINFO3 returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_INSTALLEDUSEROS)) { - void * proc_info[3]; + void * proc_info[5]; proc_info_caller(PREGINFO_PATH_3, proc_info, NULL); struct proc_regionwithpathinfo preginfo_path = *((struct proc_regionwithpathinfo *)proc_info[0]); void *map_tmp = proc_info[1]; vm_map_size_t map_tmp_sz = (vm_map_size_t)(uintptr_t)proc_info[2]; - T_EXPECT_GE_ULLONG(preginfo_path.prp_prinfo.pri_offset, (uint64_t)PAGE_SIZE, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_offset"); - T_EXPECT_NE_UINT((preginfo_path.prp_prinfo.pri_protection ^ (VM_PROT_WRITE | VM_PROT_EXECUTE)), 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_protection"); -#if defined(__arm__) || defined(__arm64__) - T_EXPECT_GT_UINT(preginfo_path.prp_prinfo.pri_max_protection, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_max_protection"); -#else - T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_max_protection ^ VM_PROT_ALL), 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_max_protection"); -#endif - T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_inheritance ^ VM_INHERIT_COPY), 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_inheritance"); - T_EXPECT_EQ_UINT((preginfo_path.prp_prinfo.pri_behavior ^ VM_BEHAVIOR_DEFAULT), 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_behavior"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_user_wired_count, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_user_wired_count"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_user_tag, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for pri_user_tag"); - T_EXPECT_NE_UINT((preginfo_path.prp_prinfo.pri_flags ^ (PROC_REGION_SUBMAP | PROC_REGION_SHARED)), 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_flags"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_pages_resident"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_shared_now_private, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_pages_shared_now_private"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_swapped_out, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_pages_swapped_out"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_pages_dirtied, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_pages_dirtied"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_ref_count, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for pri_ref_count"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_shadow_depth, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_shadow_depth"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_share_mode, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for pri_share_mode"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_private_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_private_pages_resident"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_shared_pages_resident, 0U, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_shared_pages_resident"); - T_EXPECT_NE_ULLONG(preginfo_path.prp_prinfo.pri_address, 0ULL, "PROC_PIDREGIONPATHINFO3 returns valid value for pri_addr"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_obj_id, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for pri_obj_id"); - T_EXPECT_GE_ULLONG(preginfo_path.prp_prinfo.pri_size, (uint64_t)map_tmp_sz, - "PROC_PIDREGIONPATHINFO3 returns valid value for pri_size"); - T_EXPECT_EQ_UINT(preginfo_path.prp_prinfo.pri_depth, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for pri_depth"); - - T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_type, VREG, "PROC_PIDREGIONPATHINFO3 returns valid value for vi_type"); - T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_pad, 0, "PROC_PIDREGIONPATHINFO3 returns valid value for vi_pad"); - T_EXPECT_NE_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[0], 0, - "PROC_PIDREGIONPATHINFO3 returns valid value for vi_fsid.val[0]"); - T_EXPECT_NE_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[1], 0, - "PROC_PIDREGIONPATHINFO3 returns valid value for vi_fsid.val[1]"); - /* - * Basic sanity checks for vnode stat returned by the API - */ - T_EXPECT_NE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_dev, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for vst_dev"); - T_EXPECT_EQ_UINT(((preginfo_path.prp_vip.vip_vi.vi_stat.vst_mode & S_IFMT) ^ S_IFREG), 0, - "PROC_PIDREGIONPATHINFO3 returns valid value for vst_mode"); - T_EXPECT_EQ_USHORT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_nlink, (unsigned short)1, /* the file was unlink()'d _after_ calling proc_info */ - "PROC_PIDREGIONPATHINFO3 returns valid value for vst_nlink"); - T_EXPECT_NE_ULLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_ino, 0ULL, - "PROC_PIDREGIONPATHINFO3 returns valid value for vst_ino"); - /* - * No way to confirm actual ownership or binary. Just log the value - */ - T_EXPECT_GE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_uid, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for vst_uid"); - T_EXPECT_GE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_gid, 0U, "PROC_PIDREGIONPATHINFO3 returns valid value for vst_gid"); - T_EXPECT_GE_LLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_size, (off_t)CONF_BLK_SIZE, - "PROC_PIDREGIONPATHINFO3 returns valid value for vst_size"); - T_EXPECT_GE_LLONG(preginfo_path.prp_vip.vip_vi.vi_stat.vst_blocks, 1LL, - "PROC_PIDREGIONPATHINFO3 returns valid value for vst_blocks"); - T_EXPECT_GE_UINT(preginfo_path.prp_vip.vip_vi.vi_stat.vst_blksize, CONF_BLK_SIZE, - "PROC_PIDREGIONPATHINFO3 returns valid value for vst_blksize"); + /* The *info3 version of this call returns any open file that lives on the same file system */ + T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[0], (int)(uintptr_t)proc_info[3], + "PROC_PIDREGIONPATHINFO3 returns valid value for vi_fsid.val[0]"); + T_EXPECT_EQ_INT(preginfo_path.prp_vip.vip_vi.vi_fsid.val[1], (int)(uintptr_t)proc_info[4], + "PROC_PIDREGIONPATHINFO3 returns valid value for vi_fsid.val[1]"); int ret = 0; ret = munmap(map_tmp, (size_t)map_tmp_sz); @@ -1626,9 +1568,9 @@ T_DECL(proc_info_proc_pidregionpathinfo3, } T_DECL(proc_info_proc_pidvnodepathinfo, - "Test to verify PROC_PIDVNODEPATHINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDVNODEPATHINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { void * proc_info[1]; proc_info_caller(PVNINFO, proc_info, NULL); @@ -1643,16 +1585,16 @@ T_DECL(proc_info_proc_pidvnodepathinfo, */ T_EXPECT_NE_UINT(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_dev, 0U, "PROC_PIDVNODEPATHINFO returns valid value for vst_dev"); T_EXPECT_EQ_INT(((pvninfo.pvi_cdir.vip_vi.vi_stat.vst_mode & S_IFMT) ^ S_IFDIR), 0, - "PROC_PIDVNODEPATHINFO returns valid value for vst_mode"); + "PROC_PIDVNODEPATHINFO returns valid value for vst_mode"); T_EXPECT_GE_USHORT(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_nlink, (unsigned short)2, - "PROC_PIDVNODEPATHINFO returns valid value for vst_nlink"); + "PROC_PIDVNODEPATHINFO returns valid value for vst_nlink"); T_EXPECT_NE_ULLONG(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_ino, 0ULL, "PROC_PIDVNODEPATHINFO returns valid value for vst_ino"); T_EXPECT_GE_UINT(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_uid, 0U, "PROC_PIDVNODEPATHINFO returns valid value for vst_uid"); T_EXPECT_GE_UINT(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_gid, 0U, "PROC_PIDVNODEPATHINFO returns valid value for vst_gid"); T_EXPECT_GT_LLONG(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_size, 0LL, "PROC_PIDVNODEPATHINFO returns valid value for vst_size"); T_EXPECT_GE_LLONG(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_blocks, 0LL, "PROC_PIDVNODEPATHINFO returns valid value for vst_blocks"); T_EXPECT_GE_UINT(pvninfo.pvi_cdir.vip_vi.vi_stat.vst_blksize, CONF_BLK_SIZE, - "PROC_PIDVNODEPATHINFO returns valid value for vst_blksize"); + "PROC_PIDVNODEPATHINFO returns valid value for vst_blksize"); free_proc_info(proc_info, 1); } @@ -1662,9 +1604,9 @@ T_DECL(proc_info_proc_pidvnodepathinfo, */ T_DECL(proc_info_pidinfo_proc_pidlistfds, - "proc_info API tests to verify PROC_INFO_CALL_PIDINFO/PROC_PIDLISTFDS", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "proc_info API tests to verify PROC_INFO_CALL_PIDINFO/PROC_PIDLISTFDS", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { int retval; int orig_nfiles = 0; @@ -1676,10 +1618,10 @@ T_DECL(proc_info_pidinfo_proc_pidlistfds, T_EXPECT_GE_INT(orig_nfiles, CONF_OPN_FILE_COUNT, "The number of open files is lower than expected."); /* - * Allocate a buffer of expected size + 1 to ensure that - * the API still returns expected size - * i.e. 3 + 1 = 4 open fds - */ + * Allocate a buffer of expected size + 1 to ensure that + * the API still returns expected size + * i.e. 3 + 1 = 4 open fds + */ T_LOG("Test to verify PROC_PIDLISTFDS returns valid fd information"); fd_info = malloc(sizeof(*fd_info) * 5); tmp_fd = CONF_TMP_FILE_OPEN(NULL); @@ -1688,7 +1630,7 @@ T_DECL(proc_info_pidinfo_proc_pidlistfds, T_EXPECT_POSIX_SUCCESS(tmp_fd, "open() for PROC_PIDLISTFDS"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDLISTFDS, (uint32_t)0, (user_addr_t)fd_info, - (uint32_t)(sizeof(*fd_info) * 5)); + (uint32_t)(sizeof(*fd_info) * 5)); retval = retval / (int)sizeof(struct proc_fdinfo); close(tmp_fd); @@ -1711,15 +1653,15 @@ T_DECL(proc_info_pidinfo_proc_pidlistfds, } T_DECL(proc_info_proc_pidpathinfo, - "Test to verify PROC_PIDPATHINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDPATHINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { char * pid_path = NULL; pid_path = malloc(sizeof(char) * PROC_PIDPATHINFO_MAXSIZE); T_EXPECT_NOTNULL(pid_path, "malloc for PROC_PIDPATHINFO"); int retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDPATHINFO, (uint64_t)0, (user_addr_t)pid_path, - (uint32_t)PROC_PIDPATHINFO_MAXSIZE); + (uint32_t)PROC_PIDPATHINFO_MAXSIZE); T_EXPECT_EQ_INT(retval, 0, "__proc_info call for PROC_PIDPATHINFO"); T_EXPECT_NE_PTR((void *)(strcasestr(pid_path, CONF_CMD_NAME)), NULL, "PROC_PIDPATHINFOreturns valid value for pid_path"); @@ -1728,9 +1670,9 @@ T_DECL(proc_info_proc_pidpathinfo, } T_DECL(proc_info_proc_pidlistfileports, - "Test to verify PROC_PIDLISTFILEPORTS returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDLISTFILEPORTS returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { struct proc_fileportinfo * fileport_info = NULL; mach_port_t tmp_file_port = MACH_PORT_NULL; @@ -1749,23 +1691,23 @@ T_DECL(proc_info_proc_pidlistfileports, */ retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDLISTFILEPORTS, (uint64_t)0, (user_addr_t)0, (uint32_t)0); T_EXPECT_GE_INT(retval / (int)sizeof(fileport_info), 1, - "__proc_info call for PROC_PIDLISTFILEPORTS to get total ports in parent"); + "__proc_info call for PROC_PIDLISTFILEPORTS to get total ports in parent"); /* * Child doesn't have any fileports, should return zero */ retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDLISTFILEPORTS, (uint64_t)0, (user_addr_t)0, (uint32_t)0); T_EXPECT_EQ_INT(retval / (int)sizeof(fileport_info), 0, - "__proc_info call for PROC_PIDLISTFILEPORTS to get total ports in child"); + "__proc_info call for PROC_PIDLISTFILEPORTS to get total ports in child"); fileport_info = malloc(sizeof(*fileport_info) * (size_t)retval); retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDLISTFILEPORTS, (uint64_t)0, (user_addr_t)fileport_info, - (uint32_t)sizeof(*fileport_info)); + (uint32_t)sizeof(*fileport_info)); T_EXPECT_EQ_INT(retval, (int)sizeof(*fileport_info), "__proc_info call for PROC_PIDLISTFILEPORTS"); T_EXPECT_NE_UINT(fileport_info->proc_fileport, (uint32_t)0, "PROC_PIDLISTFILEPORTS returns valid value for proc_fileport"); T_EXPECT_EQ_UINT(fileport_info->proc_fdtype, (uint32_t)PROX_FDTYPE_VNODE, - "PROC_PIDLISTFILEPORTS returns valid value for proc_fdtype"); + "PROC_PIDLISTFILEPORTS returns valid value for proc_fdtype"); /* * Cleanup for the fileport @@ -1780,9 +1722,9 @@ T_DECL(proc_info_proc_pidlistfileports, } T_DECL(proc_info_proc_pidcoalitioninfo, - "Test to verify PROC_PIDCOALITIONINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDCOALITIONINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(1, proc_info_call_pidinfo_handler); int child_pid = proc_config->child_pids[0]; @@ -1790,10 +1732,10 @@ T_DECL(proc_info_proc_pidcoalitioninfo, struct proc_pidcoalitioninfo pci_parent; struct proc_pidcoalitioninfo pci_child; int retval = __proc_info(PROC_INFO_CALL_PIDINFO, getpid(), PROC_PIDCOALITIONINFO, (uint64_t)0, (user_addr_t)&pci_parent, - (uint32_t)sizeof(pci_parent)); + (uint32_t)sizeof(pci_parent)); T_EXPECT_EQ_INT(retval, (int)sizeof(pci_parent), "__proc_info call for PROC_PIDCOALITIONINFO (parent)"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDCOALITIONINFO, (uint64_t)0, (user_addr_t)&pci_child, - (uint32_t)sizeof(pci_child)); + (uint32_t)sizeof(pci_child)); T_EXPECT_EQ_INT(retval, (int)sizeof(pci_child), "__proc_info call for PROC_PIDCOALITIONINFO (child)"); /* @@ -1801,16 +1743,16 @@ T_DECL(proc_info_proc_pidcoalitioninfo, */ for (int i = 0; i < COALITION_NUM_TYPES; i++) { T_EXPECT_EQ_ULLONG(pci_parent.coalition_id[i], pci_child.coalition_id[i], - "PROC_PIDCOALITIONINFO returns valid value for coalition_id"); + "PROC_PIDCOALITIONINFO returns valid value for coalition_id"); } free_proc_config(proc_config); } T_DECL(proc_info_proc_pidworkqueueinfo, - "Test to verify PROC_PIDWORKQUEUEINFO returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDWORKQUEUEINFO returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(1, proc_info_call_pidinfo_handler); int child_pid = proc_config->child_pids[0]; @@ -1819,7 +1761,7 @@ T_DECL(proc_info_proc_pidworkqueueinfo, struct proc_workqueueinfo pwqinfo; usleep(10000); int retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDWORKQUEUEINFO, (uint64_t)0, (user_addr_t)&pwqinfo, - (uint32_t)sizeof(pwqinfo)); + (uint32_t)sizeof(pwqinfo)); T_EXPECT_EQ_INT(retval, (int)sizeof(pwqinfo), "__proc_info call for PROC_PIDWORKQUEUEINFO"); int ncpu = 0; @@ -1828,16 +1770,16 @@ T_DECL(proc_info_proc_pidworkqueueinfo, T_EXPECT_EQ_INT(retval, 0, "sysctl() for PROC_PIDWORKQUEUEINFO"); T_EXPECT_GE_UINT(pwqinfo.pwq_nthreads, (uint32_t)1, "PROC_PIDWORKQUEUEINFO returns valid value for pwq_nthreads"); T_EXPECT_GE_UINT(pwqinfo.pwq_blockedthreads + pwqinfo.pwq_runthreads, (uint32_t)1, - "PROC_PIDWORKQUEUEINFO returns valid value for pwqinfo.pwq_runthreads/pwq_blockedthreads"); + "PROC_PIDWORKQUEUEINFO returns valid value for pwqinfo.pwq_runthreads/pwq_blockedthreads"); T_EXPECT_EQ_UINT(pwqinfo.pwq_state, (uint32_t)0, "PROC_PIDWORKQUEUEINFO returns valid value for pwq_state"); kill_child_processes(proc_config); free_proc_config(proc_config); } T_DECL(proc_info_proc_pidnoteexit, - "Test to verify PROC_PIDNOTEEXIT returns valid information about the process", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to verify PROC_PIDNOTEEXIT returns valid information about the process", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { /* * Ask the child to close pipe and quit, cleanup pipes for parent @@ -1848,7 +1790,7 @@ T_DECL(proc_info_proc_pidnoteexit, uint32_t exit_data = 0; int retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDNOTEEXIT, (uint64_t)(NOTE_EXITSTATUS | NOTE_EXIT_DETAIL), - (user_addr_t)&exit_data, (uint32_t)sizeof(exit_data)); + (user_addr_t)&exit_data, (uint32_t)sizeof(exit_data)); T_EXPECT_EQ_INT(retval, (int)sizeof(exit_data), "__proc_info call for PROC_PIDNOTEEXIT"); T_EXPECT_EQ_UINT(exit_data, 0U, "PROC_PIDNOTEEXIT returned valid value for exit_data"); @@ -1857,9 +1799,9 @@ T_DECL(proc_info_proc_pidnoteexit, } T_DECL(proc_info_negative_tests, - "Test to validate PROC_INFO_CALL_PIDINFO for invalid arguments", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "Test to validate PROC_INFO_CALL_PIDINFO for invalid arguments", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { proc_config_t proc_config = spawn_child_processes(1, proc_info_call_pidinfo_handler); int child_pid = proc_config->child_pids[0]; @@ -1869,17 +1811,17 @@ T_DECL(proc_info_negative_tests, __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDNOTEEXIT, (uint64_t)0, (user_addr_t)&exit_data, (uint32_t)0); T_EXPECT_EQ_INT(errno, ENOMEM, "PROC_INFO_CALL_PIDINFO call should fail with ENOMEM if buffersize is zero"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, PROC_PIDPATHINFO, (uint64_t)0, (user_addr_t)&exit_data, - (uint32_t)PROC_PIDPATHINFO_MAXSIZE + 1); + (uint32_t)PROC_PIDPATHINFO_MAXSIZE + 1); T_EXPECT_EQ_INT(errno, EOVERFLOW, - "PROC_INFO_CALL_PIDINFO call should fail with EOVERFLOW if buffersize is larger than PROC_PIDPATHINFO_MAXSIZE"); + "PROC_INFO_CALL_PIDINFO call should fail with EOVERFLOW if buffersize is larger than PROC_PIDPATHINFO_MAXSIZE"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, -1, PROC_PIDNOTEEXIT, (uint64_t)0, (user_addr_t)&exit_data, - (uint32_t)sizeof(exit_data)); + (uint32_t)sizeof(exit_data)); T_EXPECT_EQ_INT(errno, ESRCH, "PROC_INFO_CALL_PIDINFO call should fail with ESRCH for invalid process id"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, child_pid, -1U, (uint64_t)0, (user_addr_t)&exit_data, (uint32_t)sizeof(exit_data)); T_EXPECT_EQ_INT(errno, EINVAL, "PROC_INFO_CALL_PIDINFO call should fail with EINVAL for invalid flavor"); retval = __proc_info(PROC_INFO_CALL_PIDINFO, 0, PROC_PIDWORKQUEUEINFO, (uint64_t)0, (user_addr_t)0, (uint32_t)0); T_EXPECT_EQ_INT(errno, EINVAL, - "PROC_INFO_CALL_PIDINFO call should fail with EINVAL if flavor is PROC_PIDWORKQUEUEINFO and pid=0"); + "PROC_INFO_CALL_PIDINFO call should fail with EINVAL if flavor is PROC_PIDWORKQUEUEINFO and pid=0"); free_proc_config(proc_config); } @@ -1972,12 +1914,12 @@ T_DECL(proc_list_uptrs, "the kernel should return any up-pointers it knows about */ uptr_names[cur_uptr] = "dynamic kqueue non-file-backed knote"; struct kevent_qos_s events_id[] = {{ - .filter = EVFILT_USER, - .ident = 1, - .flags = EV_ADD, - .qos = (int)_pthread_qos_class_encode(QOS_CLASS_DEFAULT, 0, 0), - .udata = uptrs[cur_uptr++] - }}; + .filter = EVFILT_USER, + .ident = 1, + .flags = EV_ADD, + .qos = (int)_pthread_qos_class_encode(QOS_CLASS_DEFAULT, 0, 0), + .udata = uptrs[cur_uptr++] + }}; uptr_names[cur_uptr] = "dynamic kqueue ID"; kev_err = kevent_id(uptrs[cur_uptr++], events_id, 1, NULL, 0, NULL, NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_IMMEDIATE); @@ -2004,15 +1946,15 @@ T_DECL(proc_list_uptrs, "the kernel should return any up-pointers it knows about } } T_FAIL("unexpected up-pointer found: %#" PRIx64, uptrs_obs[i]); - next:; +next: ; if (found != -1) { T_PASS("found up-pointer for %s", uptr_names[found]); } } uint64_t up_overflow[2] = {0}; - uptrs_count = proc_list_uptrs(getpid(), up_overflow, sizeof(uint64_t)+1); - T_ASSERT_EQ(up_overflow[1], (uint64_t)0 , "overflow check"); + uptrs_count = proc_list_uptrs(getpid(), up_overflow, sizeof(uint64_t) + 1); + T_ASSERT_EQ(up_overflow[1], (uint64_t)0, "overflow check"); } #pragma mark dynamic kqueue info @@ -2027,12 +1969,12 @@ static void setup_kevent_id(kqueue_id_t id) { struct kevent_qos_s events_id[] = {{ - .filter = EVFILT_USER, - .ident = 1, - .flags = EV_ADD, - .qos = (int)_pthread_qos_class_encode(QOS_CLASS_DEFAULT, 0, 0), - .udata = EXPECTED_UDATA - }}; + .filter = EVFILT_USER, + .ident = 1, + .flags = EV_ADD, + .qos = (int)_pthread_qos_class_encode(QOS_CLASS_DEFAULT, 0, 0), + .udata = EXPECTED_UDATA + }}; int err = kevent_id(id, events_id, 1, NULL, 0, NULL, NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_IMMEDIATE); T_ASSERT_POSIX_SUCCESS(err, "register event with kevent_id"); diff --git a/tests/proc_info_list_kthreads.c b/tests/proc_info_list_kthreads.c index f7c410550..8af5647fe 100644 --- a/tests/proc_info_list_kthreads.c +++ b/tests/proc_info_list_kthreads.c @@ -19,14 +19,14 @@ #if TARGET_OS_OSX T_DECL(proc_info_list_kthreads, - "Test to verify PROC_PIDLISTTHREADIDS returns kernel thread IDs for pid 0", - T_META_ASROOT(true), - T_META_CHECK_LEAKS(false)) + "Test to verify PROC_PIDLISTTHREADIDS returns kernel thread IDs for pid 0", + T_META_ASROOT(true), + T_META_CHECK_LEAKS(false)) #else T_DECL(proc_info_list_kthreads, - "Test to verify PROC_PIDLISTTHREADIDS returns kernel thread IDs for pid 0", - T_META_ASROOT(false), - T_META_CHECK_LEAKS(false)) + "Test to verify PROC_PIDLISTTHREADIDS returns kernel thread IDs for pid 0", + T_META_ASROOT(false), + T_META_CHECK_LEAKS(false)) #endif /* TARGET_OS_OSX */ { int buf_used = 0; @@ -57,7 +57,7 @@ T_DECL(proc_info_list_kthreads, int expected_size = ti.pti_threadnum * (int)sizeof(uint64_t); /* tack on five extra to detect newly allocated threads */ - int allocated_size = expected_size + EXTRA_THREADS*(int)sizeof(uint64_t); + int allocated_size = expected_size + EXTRA_THREADS * (int)sizeof(uint64_t); uint64_t *thread_list_tmp = malloc((size_t)allocated_size); T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(thread_list_tmp, "malloc(size = %d) failed", allocated_size); @@ -87,7 +87,7 @@ T_DECL(proc_info_list_kthreads, * threads than we thought, try again! */ T_LOG("expected %d threads, but saw an extra thread: %d", - expected_size / (int)sizeof(uint64_t), buf_used / (int)sizeof(uint64_t)); + expected_size / (int)sizeof(uint64_t), buf_used / (int)sizeof(uint64_t)); free(thread_list_tmp); } } @@ -98,13 +98,12 @@ T_DECL(proc_info_list_kthreads, T_QUIET; T_ASSERT_GT(thread_count, 0, "thread_count > 0"); struct proc_threadinfo pthinfo_64; - for (int i = 0 ; i < thread_count ; i++) { + for (int i = 0; i < thread_count; i++) { bzero(&pthinfo_64, sizeof(struct proc_threadinfo)); int retval = proc_pidinfo(0, PROC_PIDTHREADID64INFO, thread_list[i], - (void *)&pthinfo_64, (uint32_t)sizeof(pthinfo_64)); + (void *)&pthinfo_64, (uint32_t)sizeof(pthinfo_64)); T_QUIET; T_WITH_ERRNO; T_EXPECT_GT(retval, 0, "proc_pidinfo(PROC_PIDTASKINFO) returned %d", retval); T_QUIET; T_EXPECT_EQ(retval, (int)sizeof(pthinfo_64), "proc_pidinfo(PROC_PIDTASKINFO) returned size %d == %lu", - retval, sizeof(pthinfo_64)); + retval, sizeof(pthinfo_64)); } } - diff --git a/tests/proc_info_udata.c b/tests/proc_info_udata.c index 3a37cbf37..4482e275c 100644 --- a/tests/proc_info_udata.c +++ b/tests/proc_info_udata.c @@ -10,7 +10,7 @@ T_DECL(proc_udata_info, "Get and set a proc udata token"){ int ret; udata = token; - ret = proc_udata_info(getpid(), PROC_UDATA_INFO_SET, &udata, sizeof (udata)); + ret = proc_udata_info(getpid(), PROC_UDATA_INFO_SET, &udata, sizeof(udata)); #if CONFIG_EMBEDDED T_WITH_ERRNO; @@ -23,24 +23,24 @@ T_DECL(proc_udata_info, "Get and set a proc udata token"){ T_LOG("udata set to %#llx", udata); - bzero(&udata, sizeof (udata)); - ret = proc_udata_info(getpid(), PROC_UDATA_INFO_GET, &udata, sizeof (udata)); + bzero(&udata, sizeof(udata)); + ret = proc_udata_info(getpid(), PROC_UDATA_INFO_GET, &udata, sizeof(udata)); T_WITH_ERRNO; T_ASSERT_EQ_INT(ret, 0, "proc_udata_info PROC_UDATA_INFO_GET"); T_ASSERT_EQ_ULLONG(token, udata, "proc_udata_info(): retrieved value matches token"); - ret = proc_udata_info(getpid(), PROC_UDATA_INFO_SET, &udata, sizeof (uint32_t)); + ret = proc_udata_info(getpid(), PROC_UDATA_INFO_SET, &udata, sizeof(uint32_t)); T_WITH_ERRNO; T_ASSERT_EQ_INT(ret, -1, "proc_udata_info PROC_UDATA_INFO_SET with invalid size returned -1"); T_ASSERT_EQ_INT(errno, EINVAL, "proc_udata_info PROC_UDATA_INFO_SET with invalid size returned EINVAL"); - ret = proc_udata_info(getppid(), PROC_UDATA_INFO_GET, &udata, sizeof (udata)); + ret = proc_udata_info(getppid(), PROC_UDATA_INFO_GET, &udata, sizeof(udata)); T_WITH_ERRNO; T_ASSERT_EQ_INT(ret, -1, "proc_udata_info PROC_UDATA_INFO_GET returned -1 on attempt against non-self pid"); T_ASSERT_EQ_INT(errno, EACCES, "proc_udata_info PROC_UDATA_INFO_GET set errno to EACCES on attempt against non-self pid"); - ret = proc_udata_info(getppid(), PROC_UDATA_INFO_SET, &udata, sizeof (udata)); + ret = proc_udata_info(getppid(), PROC_UDATA_INFO_SET, &udata, sizeof(udata)); T_WITH_ERRNO; T_ASSERT_EQ_INT(ret, -1, "proc_udata_info PROC_UDATA_INFO_SET returned -1 on attempt against non-self pid"); T_ASSERT_EQ_INT(errno, EACCES, "proc_udata_info PROC_UDATA_INFO_SET set errno to EACCES on attempt against non-self pid"); diff --git a/tests/processor_info.c b/tests/processor_info.c new file mode 100644 index 000000000..b8eebdb35 --- /dev/null +++ b/tests/processor_info.c @@ -0,0 +1,104 @@ +#include +#include +#if __arm64__ +#include +#endif /* __arm64__ */ +#include +#include +#include + +T_GLOBAL_META(T_META_ASROOT(true)); + +T_DECL(processor_cpu_stat64, + "ensure 64-bit processor statistics are reported correctly", + T_META_NAMESPACE("xnu.arm")) +{ +#if !__arm64__ + T_SKIP("processor statistics only available on ARM"); +#else /* !__arm64__ */ + host_t host = mach_host_self(); + host_t priv_port = MACH_PORT_NULL; + + kern_return_t kr = host_get_host_priv_port(host, &priv_port); + T_QUIET; + T_ASSERT_MACH_SUCCESS(kr, "host_get_host_priv_port"); + T_QUIET; + T_ASSERT_NE(priv_port, MACH_PORT_NULL, "valid host priv port"); + + processor_port_array_t cpu_ports = NULL; + mach_msg_type_number_t cpu_count = 0; + kr = host_processors(priv_port, &cpu_ports, &cpu_count); + T_QUIET; + T_ASSERT_MACH_SUCCESS(kr, "host_processors"); + T_QUIET; + T_ASSERT_NOTNULL(cpu_ports, "valid processor port array"); + T_QUIET; + T_ASSERT_GT(cpu_count, (mach_msg_type_number_t)0, + "non-zero CPU count"); + + T_LOG("found %d CPUs", cpu_count); + + struct processor_cpu_stat64 *prestats = calloc(cpu_count, + sizeof(*prestats)); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_NOTNULL(prestats, "allocate space for stats (pre)"); + memset(prestats, 0xff, cpu_count * sizeof(*prestats)); + + for (int i = 0; i < (int)cpu_count; i++) { + printf("%d\n", PROCESSOR_CPU_STAT64_COUNT); + mach_msg_type_number_t info_count = PROCESSOR_CPU_STAT64_COUNT; + kr = processor_info(cpu_ports[i], PROCESSOR_CPU_STAT64, &host, + (processor_info_t)&prestats[i], &info_count); + T_ASSERT_MACH_SUCCESS(kr, + "processor_info(%d, PROCESSOR_CPU_STAT64, ...)", i); + + T_QUIET; + T_ASSERT_EQ(info_count, PROCESSOR_CPU_STAT64_COUNT, + "received enough CPU statistics"); + } + + sleep(1); + + struct processor_cpu_stat64 *poststats = calloc(cpu_count - 1, + sizeof(*poststats)); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_NOTNULL(poststats, "allocate space for stats (post)"); + + for (int i = 0; i < (int)cpu_count; i++) { + mach_msg_type_number_t info_count = PROCESSOR_CPU_STAT64_COUNT; + kr = processor_info(cpu_ports[i], PROCESSOR_CPU_STAT64, &host, + (processor_info_t)&poststats[i], &info_count); + T_ASSERT_MACH_SUCCESS(kr, + "processor_info(%d, PROCESSOR_CPU_STAT64, ...)", i); + + T_QUIET; + T_ASSERT_EQ(info_count, PROCESSOR_CPU_STAT64_COUNT, + "received enough CPU statistics"); + } + + for (int i = 0; i < (int)cpu_count; i++) { +#define CHECK_STAT_FIELD(field) \ + T_EXPECT_GE(poststats[i].field, prestats[i].field, \ + "CPU %d's " #field " is monotonically increasing (+%" PRIu64 \ + ")", i, poststats[i].field - prestats[i].field) + + CHECK_STAT_FIELD(irq_ex_cnt); + CHECK_STAT_FIELD(ipi_cnt); + CHECK_STAT_FIELD(timer_cnt); + CHECK_STAT_FIELD(undef_ex_cnt); + CHECK_STAT_FIELD(unaligned_cnt); + CHECK_STAT_FIELD(vfp_cnt); + CHECK_STAT_FIELD(vfp_shortv_cnt); + CHECK_STAT_FIELD(data_ex_cnt); + CHECK_STAT_FIELD(instr_ex_cnt); + CHECK_STAT_FIELD(pmi_cnt); + +#undef CHECK_STAT_FIELD + } + + free(prestats); + free(poststats); +#endif /* __arm64__ */ +} diff --git a/tests/pwrite_avoid_sigxfsz_28581610.c b/tests/pwrite_avoid_sigxfsz_28581610.c index 9c39e55ba..abeff39dc 100644 --- a/tests/pwrite_avoid_sigxfsz_28581610.c +++ b/tests/pwrite_avoid_sigxfsz_28581610.c @@ -24,7 +24,7 @@ xfsz_signal(__unused int signo) } T_DECL(pwrite, "Tests avoiding SIGXFSZ with pwrite and odd offsets", - T_META_ASROOT(true)) + T_META_ASROOT(true)) { int fd, x; off_t ret; @@ -56,40 +56,40 @@ T_DECL(pwrite, "Tests avoiding SIGXFSZ with pwrite and odd offsets", for (x = 0; offs[x] != 0; x++) { ret = ftruncate(fd, offs[x]); T_ASSERT_TRUE(((ret == -1) && (errno == EINVAL)), - "negative offset %d", offs[x]); + "negative offset %d", offs[x]); } T_SETUPEND; /* we want to get the EFBIG errno but without a SIGXFSZ signal */ - T_EXPECTFAIL; + T_EXPECTFAIL; if (!sigsetjmp(xfsz_jmpbuf, 1)) { signal(SIGXFSZ, xfsz_signal); ret = pwrite(fd, buffer, sizeof buffer, LONG_MAX); T_ASSERT_TRUE(((ret == -1) && (errno == EFBIG)), - "large offset %d", 13); + "large offset %d", 13); } else { signal(SIGXFSZ, SIG_DFL); T_FAIL("%s unexpected SIGXFSZ with offset %lX", - "", LONG_MAX); + "", LONG_MAX); } /* Negative offsets are invalid, no SIGXFSZ signals required */ for (x = 0; offs[x] != 0; x++) { - /* only -1 gives the correct result */ - if (-1 != offs[x]) { - T_EXPECTFAIL; - } + /* only -1 gives the correct result */ + if (-1 != offs[x]) { + T_EXPECTFAIL; + } if (!sigsetjmp(xfsz_jmpbuf, 1)) { signal(SIGXFSZ, xfsz_signal); ret = pwrite(fd, buffer, sizeof buffer, offs[x]); T_ASSERT_TRUE(((ret == -1) && (errno == EINVAL)), - "negative offset %d", offs[x]); + "negative offset %d", offs[x]); } else { signal(SIGXFSZ, SIG_DFL); T_FAIL("%s unexpected SIGXFSZ with negative offset %d", - "", offs[x]); + "", offs[x]); } } diff --git a/tests/quiesce_counter.c b/tests/quiesce_counter.c index 563d13d04..c10df2ad9 100644 --- a/tests/quiesce_counter.c +++ b/tests/quiesce_counter.c @@ -44,7 +44,7 @@ #ifndef _COMM_PAGE_CPU_QUIESCENT_COUNTER T_DECL(test_quiescent_counter, "Validate that _COMM_PAGE_CPU_QUIESCENT_COUNTER increments", - T_META_CHECK_LEAKS(false)) + T_META_CHECK_LEAKS(false)) { T_SKIP("_COMM_PAGE_CPU_QUIESCENT_COUNTER doesn't exist on this system"); } @@ -52,7 +52,7 @@ T_DECL(test_quiescent_counter, "Validate that _COMM_PAGE_CPU_QUIESCENT_COUNTER i #else /* _COMM_PAGE_CPU_QUIESCENT_COUNTER */ T_DECL(test_quiescent_counter, "Validate that _COMM_PAGE_CPU_QUIESCENT_COUNTER increments", - T_META_CHECK_LEAKS(false)) + T_META_CHECK_LEAKS(false)) { int rv; @@ -74,8 +74,7 @@ T_DECL(test_quiescent_counter, "Validate that _COMM_PAGE_CPU_QUIESCENT_COUNTER i uint64_t last_counter = counter; T_LOG("first value of _COMM_PAGE_CPU_QUIESCENT_COUNTER is %llu", counter); - for (int i = 0 ; i < 10 ; i++) - { + for (int i = 0; i < 10; i++) { sleep(1); last_counter = counter; @@ -88,4 +87,3 @@ T_DECL(test_quiescent_counter, "Validate that _COMM_PAGE_CPU_QUIESCENT_COUNTER i } #endif /* _COMM_PAGE_CPU_QUIESCENT_COUNTER */ - diff --git a/tests/regression_17272465.c b/tests/regression_17272465.c index ed2dc105c..1bd261d9e 100644 --- a/tests/regression_17272465.c +++ b/tests/regression_17272465.c @@ -5,7 +5,7 @@ T_DECL(regression_17272465, - "Test for host_set_special_port Mach port over-release, rdr: 17272465", T_META_CHECK_LEAKS(false)) + "Test for host_set_special_port Mach port over-release, rdr: 17272465", T_META_CHECK_LEAKS(false)) { kern_return_t kr; mach_port_t port = MACH_PORT_NULL; diff --git a/tests/remote_time.c b/tests/remote_time.c index cd028a9de..1cb3f94cc 100644 --- a/tests/remote_time.c +++ b/tests/remote_time.c @@ -7,14 +7,14 @@ extern uint64_t __mach_bridge_remote_time(uint64_t); T_DECL(remote_time_syscall, "test mach_bridge_remote_time syscall", - T_META_CHECK_LEAKS(false)) + T_META_CHECK_LEAKS(false)) { #if TARGET_OS_BRIDGE uint64_t local_time = mach_absolute_time(); uint64_t remote_time1 = mach_bridge_remote_time(local_time); uint64_t remote_time2 = __mach_bridge_remote_time(local_time); T_LOG("local_time = %llu, remote_time1 = %llu, remote_time2 = %llu", - local_time, remote_time1, remote_time2); + local_time, remote_time1, remote_time2); T_ASSERT_EQ(remote_time1, remote_time2, "syscall works"); #else T_SKIP("Skipping test"); diff --git a/tests/settimeofday_29193041.c b/tests/settimeofday_29193041.c index 6bb495ddb..fe04a2ec5 100644 --- a/tests/settimeofday_29193041.c +++ b/tests/settimeofday_29193041.c @@ -23,16 +23,16 @@ #define DAY 86400 //1 day in sec T_DECL(settime_32089962_not_entitled_root, - "Verify that root privileges can allow to change the time", - T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) + "Verify that root privileges can allow to change the time", + T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) { struct timeval settimeofdaytime; struct timeval adj_time; struct timex ntptime; - if (geteuid() != 0){ - T_SKIP("settimeofday_root_29193041 test requires root privileges to run."); - } + if (geteuid() != 0) { + T_SKIP("settimeofday_root_29193041 test requires root privileges to run."); + } /* test settimeofday */ T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&settimeofdaytime, NULL), NULL); @@ -41,7 +41,7 @@ T_DECL(settime_32089962_not_entitled_root, /* test adjtime */ adj_time.tv_sec = 1; adj_time.tv_usec = 0; - T_ASSERT_POSIX_ZERO(adjtime(&adj_time, NULL),NULL); + T_ASSERT_POSIX_ZERO(adjtime(&adj_time, NULL), NULL); /* test ntp_adjtime */ memset(&ntptime, 0, sizeof(ntptime)); @@ -52,17 +52,17 @@ T_DECL(settime_32089962_not_entitled_root, } T_DECL(settime_32089962_not_entitled_not_root, - "Verify that the \"com.apple.settime\" entitlement can allow to change the time", - T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) + "Verify that the \"com.apple.settime\" entitlement can allow to change the time", + T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) { struct timeval settimeofdaytime; struct timeval adj_time; struct timex ntptime; int res; - if (geteuid() == 0){ - T_SKIP("settimeofday_29193041 test requires no root privileges to run."); - } + if (geteuid() == 0) { + T_SKIP("settimeofday_29193041 test requires no root privileges to run."); + } T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&settimeofdaytime, NULL), NULL); @@ -89,15 +89,15 @@ T_DECL(settime_32089962_not_entitled_not_root, } T_DECL(settimeofday_29193041_not_entitled_root, - "Verify that root privileges can allow to change the time", - T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) + "Verify that root privileges can allow to change the time", + T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) { struct timeval time; long new_time; - if (geteuid() != 0){ - T_SKIP("settimeofday_root_29193041 test requires root privileges to run."); - } + if (geteuid() != 0) { + T_SKIP("settimeofday_root_29193041 test requires root privileges to run."); + } T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&time, NULL), NULL); @@ -115,19 +115,19 @@ T_DECL(settimeofday_29193041_not_entitled_root, T_EXPECT_GE_LONG(time.tv_sec, new_time, "Time changed with root and without entitlement"); time.tv_sec -= DAY; - T_QUIET;T_ASSERT_POSIX_ZERO(settimeofday(&time, NULL), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(settimeofday(&time, NULL), NULL); } T_DECL(settimeofday_29193041_not_entitled_not_root, - "Verify that the \"com.apple.settime\" entitlement can allow to change the time", - T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) + "Verify that the \"com.apple.settime\" entitlement can allow to change the time", + T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) { struct timeval time; long new_time; - if (geteuid() == 0){ - T_SKIP("settimeofday_29193041 test requires no root privileges to run."); - } + if (geteuid() == 0) { + T_SKIP("settimeofday_29193041 test requires no root privileges to run."); + } T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&time, NULL), NULL); @@ -154,5 +154,4 @@ T_DECL(settimeofday_29193041_not_entitled_not_root, #else T_EXPECT_LT_LONG(time.tv_sec, new_time, "Not permitted to change time without root and without entitlement"); #endif - } diff --git a/tests/settimeofday_29193041_entitled.c b/tests/settimeofday_29193041_entitled.c index 51ca5a5ed..38bc47ad0 100644 --- a/tests/settimeofday_29193041_entitled.c +++ b/tests/settimeofday_29193041_entitled.c @@ -23,16 +23,16 @@ #define DAY 86400 //1 day in sec T_DECL(settime_32089962_entitled_root, - "Verify that root privileges can allow to change the time", - T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) + "Verify that root privileges can allow to change the time", + T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) { struct timeval settimeofdaytime; struct timeval adj_time; struct timex ntptime; - if (geteuid() != 0){ - T_SKIP("settime_32089962_entitled_root test requires root privileges to run."); - } + if (geteuid() != 0) { + T_SKIP("settime_32089962_entitled_root test requires root privileges to run."); + } /* test settimeofday */ T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&settimeofdaytime, NULL), NULL); @@ -41,7 +41,7 @@ T_DECL(settime_32089962_entitled_root, /* test adjtime */ adj_time.tv_sec = 1; adj_time.tv_usec = 0; - T_ASSERT_POSIX_ZERO(adjtime(&adj_time, NULL),NULL); + T_ASSERT_POSIX_ZERO(adjtime(&adj_time, NULL), NULL); /* test ntp_adjtime */ memset(&ntptime, 0, sizeof(ntptime)); @@ -52,17 +52,16 @@ T_DECL(settime_32089962_entitled_root, } T_DECL(settime_32089962_entitled_not_root, - "Verify that the \"com.apple.settime\" entitlement can allow to change the time", - T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) + "Verify that the \"com.apple.settime\" entitlement can allow to change the time", + T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) { - struct timeval settimeofdaytime; struct timeval adj_time; struct timex ntptime; - if (geteuid() == 0){ - T_SKIP("settime_32089962_entitled_root test requires no root privileges to run."); - } + if (geteuid() == 0) { + T_SKIP("settime_32089962_entitled_root test requires no root privileges to run."); + } /* test settimeofday */ T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&settimeofdaytime, NULL), NULL); @@ -71,7 +70,7 @@ T_DECL(settime_32089962_entitled_not_root, /* test adjtime */ adj_time.tv_sec = 1; adj_time.tv_usec = 0; - T_ASSERT_POSIX_ZERO(adjtime(&adj_time, NULL),NULL); + T_ASSERT_POSIX_ZERO(adjtime(&adj_time, NULL), NULL); /* test ntp_adjtime */ memset(&ntptime, 0, sizeof(ntptime)); @@ -79,19 +78,18 @@ T_DECL(settime_32089962_entitled_not_root, ntptime.status = TIME_OK; T_ASSERT_EQ(ntp_adjtime(&ntptime), TIME_OK, NULL); - } T_DECL(settimeofday_29193041_entitled_root, - "Verify that root privileges can allow to change the time", - T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) + "Verify that root privileges can allow to change the time", + T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) { struct timeval time; long new_time; - if (geteuid() != 0){ - T_SKIP("settimeofday_root_29193041 test requires root privileges to run."); - } + if (geteuid() != 0) { + T_SKIP("settimeofday_root_29193041 test requires root privileges to run."); + } T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&time, NULL), NULL); @@ -109,19 +107,19 @@ T_DECL(settimeofday_29193041_entitled_root, T_EXPECT_GE_LONG(time.tv_sec, new_time, "Time changed with root and entitlement"); time.tv_sec -= DAY; - T_QUIET;T_ASSERT_POSIX_ZERO(settimeofday(&time, NULL), NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(settimeofday(&time, NULL), NULL); } T_DECL(settimeofday_29193041_entitled_not_root, - "Verify that the \"com.apple.settime\" entitlement can allow to change the time", - T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) + "Verify that the \"com.apple.settime\" entitlement can allow to change the time", + T_META_ASROOT(false), T_META_CHECK_LEAKS(false)) { struct timeval time; long new_time; - if (geteuid() == 0){ - T_SKIP("settimeofday_29193041 test requires no root privileges to run."); - } + if (geteuid() == 0) { + T_SKIP("settimeofday_29193041 test requires no root privileges to run."); + } T_QUIET; T_ASSERT_POSIX_ZERO(gettimeofday(&time, NULL), NULL); @@ -137,7 +135,7 @@ T_DECL(settimeofday_29193041_entitled_not_root, /* expext to be past new_time */ T_EXPECT_GE_LONG(time.tv_sec, new_time, "Time successfully changed without root and with entitlement"); - + time.tv_sec -= DAY; T_QUIET; T_ASSERT_POSIX_ZERO(settimeofday(&time, NULL), NULL); } diff --git a/tests/sigchld_return.c b/tests/sigchld_return.c index 6a3cc6bcf..25ec4f275 100644 --- a/tests/sigchld_return.c +++ b/tests/sigchld_return.c @@ -10,41 +10,41 @@ static int exitcode = 0x6789BEEF; int should_exit = 0; -void handler (int sig, siginfo_t *sip, __unused void *uconp) +void +handler(int sig, siginfo_t *sip, __unused void *uconp) { - /* Should handle the SIGCHLD signal */ - T_ASSERT_EQ_INT(sig, SIGCHLD, "Captured signal returns 0x%x, expected SIGCHLD (0x%x).", sig, SIGCHLD); - T_QUIET; T_ASSERT_NOTNULL(sip, "siginfo_t returned NULL but should have returned data."); - T_ASSERT_EQ_INT(sip->si_code, CLD_EXITED, "si_code returns 0x%x, expected CLD_EXITED (0x%x).", sip->si_code, CLD_EXITED); - T_ASSERT_EQ_INT(sip->si_status, exitcode, "si_status returns 0x%08X, expected the child's exit code (0x%08X).", sip->si_status, exitcode); - should_exit = 1; + /* Should handle the SIGCHLD signal */ + T_ASSERT_EQ_INT(sig, SIGCHLD, "Captured signal returns 0x%x, expected SIGCHLD (0x%x).", sig, SIGCHLD); + T_QUIET; T_ASSERT_NOTNULL(sip, "siginfo_t returned NULL but should have returned data."); + T_ASSERT_EQ_INT(sip->si_code, CLD_EXITED, "si_code returns 0x%x, expected CLD_EXITED (0x%x).", sip->si_code, CLD_EXITED); + T_ASSERT_EQ_INT(sip->si_status, exitcode, "si_status returns 0x%08X, expected the child's exit code (0x%08X).", sip->si_status, exitcode); + should_exit = 1; } T_DECL(sigchldreturn, "checks that a child process exited with an exitcode returns correctly to parent", T_META_CHECK_LEAKS(false)) { - struct sigaction act; - int pid; + struct sigaction act; + int pid; - act.sa_sigaction = handler; - act.sa_flags = SA_SIGINFO; + act.sa_sigaction = handler; + act.sa_flags = SA_SIGINFO; - /* Set action for signal */ - T_QUIET; T_ASSERT_POSIX_SUCCESS(sigaction (SIGCHLD, &act, NULL), "Calling sigaction() failed for SIGCHLD"); + /* Set action for signal */ + T_QUIET; T_ASSERT_POSIX_SUCCESS(sigaction(SIGCHLD, &act, NULL), "Calling sigaction() failed for SIGCHLD"); - /* Now fork a child that just exits */ - pid = fork(); - T_QUIET; T_ASSERT_NE_INT(pid, -1, "fork() failed!"); + /* Now fork a child that just exits */ + pid = fork(); + T_QUIET; T_ASSERT_NE_INT(pid, -1, "fork() failed!"); - if (pid == 0) { - /* Child process! */ - exit (exitcode); - } + if (pid == 0) { + /* Child process! */ + exit(exitcode); + } - /* Main program that did the fork */ - /* We should process the signal, then exit */ - while (!should_exit) { - sleep(1); - } + /* Main program that did the fork */ + /* We should process the signal, then exit */ + while (!should_exit) { + sleep(1); + } } - diff --git a/tests/sigcont_return.c b/tests/sigcont_return.c index 606caa910..5e9258923 100644 --- a/tests/sigcont_return.c +++ b/tests/sigcont_return.c @@ -8,21 +8,22 @@ T_DECL(sigcontreturn, "checks that a call to waitid() for a child that is stopped and then continued returns correctly") { - pid_t pid; - siginfo_t siginfo; - pid = fork(); - T_QUIET; T_ASSERT_NE_INT(pid, -1, "fork() failed!"); + pid_t pid; + siginfo_t siginfo; + pid = fork(); + T_QUIET; T_ASSERT_NE_INT(pid, -1, "fork() failed!"); - if (pid == 0) { - while(1){} - } + if (pid == 0) { + while (1) { + } + } - kill(pid, SIGSTOP); - kill(pid, SIGCONT); - sleep(1); + kill(pid, SIGSTOP); + kill(pid, SIGCONT); + sleep(1); - T_QUIET; T_ASSERT_POSIX_SUCCESS(waitid(P_PID, pid, &siginfo, WCONTINUED), "Calling waitid() failed for pid %d", pid); + T_QUIET; T_ASSERT_POSIX_SUCCESS(waitid(P_PID, pid, &siginfo, WCONTINUED), "Calling waitid() failed for pid %d", pid); - T_ASSERT_EQ_INT(siginfo.si_status, SIGCONT, "A call to waitid() for stopped and continued child returns 0x%x, expected SIGCONT (0x%x)", siginfo.si_status, SIGCONT ); - kill(pid, SIGKILL); + T_ASSERT_EQ_INT(siginfo.si_status, SIGCONT, "A call to waitid() for stopped and continued child returns 0x%x, expected SIGCONT (0x%x)", siginfo.si_status, SIGCONT ); + kill(pid, SIGKILL); } diff --git a/tests/socket_bind_35243417.c b/tests/socket_bind_35243417.c index cb44aa53f..0d650e11a 100644 --- a/tests/socket_bind_35243417.c +++ b/tests/socket_bind_35243417.c @@ -11,31 +11,31 @@ static int sockv6_open(void) { - int s; + int s; s = socket(AF_INET6, SOCK_DGRAM, 0); T_QUIET; T_ASSERT_POSIX_SUCCESS(s, "socket(AF_INET6, SOCK_DGRAM, 0)"); - return (s); + return s; } static int sockv6_bind(int s, in_port_t port) { - struct sockaddr_in6 sin6; + struct sockaddr_in6 sin6; bzero(&sin6, sizeof(sin6)); sin6.sin6_len = sizeof(sin6); sin6.sin6_family = AF_INET6; sin6.sin6_port = port; - return (bind(s, (const struct sockaddr *)&sin6, sizeof(sin6))); + return bind(s, (const struct sockaddr *)&sin6, sizeof(sin6)); } static void sockv6_set_v6only(int s) { - int on = 1; - int ret; + int on = 1; + int ret; ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &on, sizeof(on)); T_QUIET; @@ -44,15 +44,15 @@ sockv6_set_v6only(int s) static bool alloc_and_bind_ports(in_port_t port_start, in_port_t port_end, - int bind_attempts) + int bind_attempts) { - int bound_count = 0; - bool success = true; + int bound_count = 0; + bool success = true; for (in_port_t i = port_start; success && i <= port_end; i++) { - int s6 = -1; - int s6_other = -1; - int ret; + int s6 = -1; + int s6_other = -1; + int ret; s6 = sockv6_open(); sockv6_set_v6only(s6); @@ -76,7 +76,7 @@ alloc_and_bind_ports(in_port_t port_start, in_port_t port_end, break; } } - loop_done: +loop_done: if (s6 >= 0) { close(s6); } @@ -85,23 +85,23 @@ alloc_and_bind_ports(in_port_t port_start, in_port_t port_end, } } T_ASSERT_TRUE(bound_count == bind_attempts, - "number of successful binds %d (out of %d)", - bound_count, bind_attempts); - return (success); + "number of successful binds %d (out of %d)", + bound_count, bind_attempts); + return success; } T_DECL(socket_bind_35243417, - "bind IPv6 only UDP socket, then bind IPv6 socket.", - T_META_ASROOT(false), - T_META_CHECK_LEAKS(false)) + "bind IPv6 only UDP socket, then bind IPv6 socket.", + T_META_ASROOT(false), + T_META_CHECK_LEAKS(false)) { alloc_and_bind_ports(1, 65534, 10); } T_DECL(socket_bind_35243417_root, - "bind IPv6 only UDP socket, then bind IPv6 socket.", - T_META_ASROOT(true)) + "bind IPv6 only UDP socket, then bind IPv6 socket.", + T_META_ASROOT(true)) { alloc_and_bind_ports(1, 65534, 10); } diff --git a/tests/socket_bind_35685803.c b/tests/socket_bind_35685803.c index d0e22a941..b1173c2e2 100644 --- a/tests/socket_bind_35685803.c +++ b/tests/socket_bind_35685803.c @@ -17,57 +17,57 @@ static bool debug; static int sock_open_common(int pf, int type) { - int s; + int s; s = socket(pf, type, 0); T_QUIET; T_ASSERT_POSIX_SUCCESS(s, "socket(%d, %d, 0)", pf, type); - return (s); + return s; } static int sock_open(int type) { - return (sock_open_common(PF_INET, type)); + return sock_open_common(PF_INET, type); } static int sock_bind(int s, int port) { - struct sockaddr_in sin = { + struct sockaddr_in sin = { .sin_len = sizeof(sin), .sin_family = AF_INET, }; sin.sin_port = htons(port); - return (bind(s, (const struct sockaddr *)&sin, sizeof(sin))); + return bind(s, (const struct sockaddr *)&sin, sizeof(sin)); } static int sockv6_open(int type) { - return (sock_open_common(PF_INET6, type)); + return sock_open_common(PF_INET6, type); } static int sockv6_bind(int s, int port) { - struct sockaddr_in6 sin6 = { + struct sockaddr_in6 sin6 = { .sin6_len = sizeof(sin6), .sin6_family = AF_INET6, }; sin6.sin6_port = htons(port); - return (bind(s, (const struct sockaddr *)&sin6, sizeof(sin6))); + return bind(s, (const struct sockaddr *)&sin6, sizeof(sin6)); } static uint16_t sock_get_port(int sockfd) { - int error; - uint16_t p; - union sockaddr_in_4_6 sin; - socklen_t sin_len; + int error; + uint16_t p; + union sockaddr_in_4_6 sin; + socklen_t sin_len; sin_len = sizeof(sin); bzero(&sin, sin_len); @@ -75,7 +75,7 @@ sock_get_port(int sockfd) T_QUIET; T_EXPECT_POSIX_ZERO(error, "getsockname(%d)", sockfd); if (error != 0) { - return (0); + return 0; } switch (sin.sa.sa_family) { case AF_INET: @@ -86,36 +86,35 @@ sock_get_port(int sockfd) break; default: T_ASSERT_FAIL("unknown address family %d\n", - sin.sa.sa_family); + sin.sa.sa_family); p = 0; break; } - return (p); + return p; } typedef struct { - bool v6; - int socket_count; - int * socket_list; + bool v6; + int socket_count; + int * socket_list; } SocketInfo, * SocketInfoRef; static void bind_sockets(SocketInfoRef info, const char * msg) { for (int i = 0; i < info->socket_count; i++) { - int error; - uint16_t port; + int error; + uint16_t port; if (info->v6) { error = sockv6_bind(info->socket_list[i], 0); - } - else { + } else { error = sock_bind(info->socket_list[i], 0); } port = sock_get_port(info->socket_list[i]); if (debug) { T_LOG( "%s: fd %d port is %d error %d", - msg, info->socket_list[i], ntohs(port), error); + msg, info->socket_list[i], ntohs(port), error); } } return; @@ -124,19 +123,19 @@ bind_sockets(SocketInfoRef info, const char * msg) static void * second_thread(void * arg) { - SocketInfoRef info = (SocketInfoRef)arg; + SocketInfoRef info = (SocketInfoRef)arg; bind_sockets(info, "second"); - return (NULL); + return NULL; } static void multithreaded_bind_test(bool v6, int socket_count) { - int error; - SocketInfo info; - int socket_list[socket_count]; - pthread_t thread; + int error; + SocketInfo info; + int socket_list[socket_count]; + pthread_t thread; info.v6 = v6; for (int i = 0; i < socket_count; i++) { @@ -159,7 +158,7 @@ multithreaded_bind_test(bool v6, int socket_count) T_ASSERT_POSIX_ZERO(error, "pthread_join"); for (int i = 0; i < socket_count; i++) { - error = close(socket_list[i]); + error = close(socket_list[i]); T_QUIET; T_ASSERT_POSIX_ZERO(error, "close socket %d", socket_list[i]); } @@ -175,31 +174,31 @@ run_multithreaded_bind_test(int number_of_runs, bool v6, int socket_count) } T_DECL(socket_bind_35685803, - "multithreaded bind IPv4 socket as root", - T_META_ASROOT(false), - T_META_CHECK_LEAKS(false)) + "multithreaded bind IPv4 socket as root", + T_META_ASROOT(false), + T_META_CHECK_LEAKS(false)) { run_multithreaded_bind_test(100, false, 100); } T_DECL(socket_bind_35685803_root, - "multithreaded bind IPv4 socket", - T_META_ASROOT(true)) + "multithreaded bind IPv4 socket", + T_META_ASROOT(true)) { run_multithreaded_bind_test(100, false, 100); } T_DECL(socket_bind_35685803_v6, - "multithreaded bind IPv6 socket as root", - T_META_ASROOT(false), - T_META_CHECK_LEAKS(false)) + "multithreaded bind IPv6 socket as root", + T_META_ASROOT(false), + T_META_CHECK_LEAKS(false)) { run_multithreaded_bind_test(100, true, 100); } T_DECL(socket_bind_35685803_v6_root, - "multithreaded bind IPv6 socket", - T_META_ASROOT(true)) + "multithreaded bind IPv6 socket", + T_META_ASROOT(true)) { run_multithreaded_bind_test(100, true, 100); } diff --git a/tests/socket_poll_close_25786011.c b/tests/socket_poll_close_25786011.c index b39b936ea..5454e80b1 100644 --- a/tests/socket_poll_close_25786011.c +++ b/tests/socket_poll_close_25786011.c @@ -17,7 +17,7 @@ T_DECL(socket_poll_close_25786011, "Tests an invalid poll call to a socket and t */ struct pollfd my_pollfd = { .fd = my_socket, - .events = POLLEXTEND + .events = POLLEXTEND }; /* diff --git a/tests/stackshot_spawn_exit_stress.c b/tests/stackshot_spawn_exit_stress.c index 2a0be2b37..448095598 100644 --- a/tests/stackshot_spawn_exit_stress.c +++ b/tests/stackshot_spawn_exit_stress.c @@ -13,10 +13,10 @@ #include T_GLOBAL_META( - T_META_NAMESPACE("xnu.stackshot"), - T_META_CHECK_LEAKS(false), - T_META_ASROOT(true) - ); + T_META_NAMESPACE("xnu.stackshot"), + T_META_CHECK_LEAKS(false), + T_META_ASROOT(true) + ); #if TARGET_OS_WATCH #define SPAWN_ITERATIONS 1999 @@ -28,7 +28,9 @@ T_GLOBAL_META( #define REAP_INTERVAL 10 -static void* loop(__attribute__ ((unused)) void *arg) { +static void* +loop(__attribute__ ((unused)) void *arg) +{ exit(0); } @@ -38,14 +40,16 @@ T_HELPER_DECL(spawn_children_helper, "spawn_children helper") T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&pthread, NULL, loop, NULL), "pthread_create"); - while (1) { ; } + while (1) { + ; + } } static void take_stackshot(void) { uint32_t stackshot_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | - STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT); + STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT); void *config = stackshot_config_create(); T_QUIET; T_ASSERT_NOTNULL(config, "created stackshot config"); @@ -64,10 +68,10 @@ retry: goto retry; } else { T_QUIET; T_ASSERT_POSIX_ZERO(ret, - "called stackshot_capture_with_config (no retries remaining)"); + "called stackshot_capture_with_config (no retries remaining)"); } } else { - T_QUIET; T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config"); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config"); } ret = stackshot_config_dealloc(config); @@ -86,22 +90,22 @@ T_DECL(stackshot_spawn_exit, "tests taking many stackshots while children proces int num_stackshots = 0; while (1) { - take_stackshot(); - num_stackshots++; - if ((num_stackshots % 100) == 0) { - T_LOG("completed %d stackshots", num_stackshots); + take_stackshot(); + num_stackshots++; + if ((num_stackshots % 100) == 0) { + T_LOG("completed %d stackshots", num_stackshots); } - // Sleep between each stackshot - usleep(100); + // Sleep between each stackshot + usleep(100); } }); // META option for T_HELPER_DECL to not output test begin on start posix_spawn_file_actions_t actions; T_QUIET; T_ASSERT_POSIX_SUCCESS(posix_spawn_file_actions_init(&actions), "create spawn actions"); - T_QUIET; T_ASSERT_POSIX_SUCCESS(posix_spawn_file_actions_addopen (&actions, STDOUT_FILENO, "/dev/null", O_WRONLY, 0), - "set stdout of child to NULL"); + T_QUIET; T_ASSERT_POSIX_SUCCESS(posix_spawn_file_actions_addopen(&actions, STDOUT_FILENO, "/dev/null", O_WRONLY, 0), + "set stdout of child to NULL"); int children_unreaped = 0, status; for (int iterations_remaining = SPAWN_ITERATIONS; iterations_remaining > 0; iterations_remaining--) { diff --git a/tests/suspended_spawn_26184412.c b/tests/suspended_spawn_26184412.c index 977e96dc8..6a8977bb5 100644 --- a/tests/suspended_spawn_26184412.c +++ b/tests/suspended_spawn_26184412.c @@ -1,5 +1,3 @@ - - #include #include @@ -57,7 +55,7 @@ spawn_and_signal(int signal) T_ASSERT_POSIX_SUCCESS(ret, "posix_spawnattr_destroy"); int status = 0; - int waitpid_result = waitpid(child_pid, &status, WUNTRACED|WNOHANG); + int waitpid_result = waitpid(child_pid, &status, WUNTRACED | WNOHANG); T_ASSERT_POSIX_SUCCESS(waitpid_result, "waitpid"); T_ASSERT_EQ(waitpid_result, child_pid, "waitpid should return child we spawned"); @@ -98,4 +96,3 @@ T_DECL(suspended_spawn_kill, "Tests spawning a suspended process and killing it" { spawn_and_signal(SIGKILL); } - diff --git a/tests/task_info.c b/tests/task_info.c index c440036cb..74ab31f23 100644 --- a/tests/task_info.c +++ b/tests/task_info.c @@ -86,19 +86,19 @@ T_DECL(task_vm_info, "tests task vm info", T_META_ASROOT(true), T_META_LTEPHASE( T_EXPECT_NE(vm_info.virtual_size, 0ULL, "task_info --rev0 call does not return 0 for virtual_size"); T_EXPECT_EQ(vm_info.phys_footprint, (unsigned long long)TESTPHYSFOOTPRINTVAL, - "task_info --rev0 call returned value %llu for vm_info.phys_footprint. Expected %u since this value should not be " - "modified by rev0", - vm_info.phys_footprint, TESTPHYSFOOTPRINTVAL); + "task_info --rev0 call returned value %llu for vm_info.phys_footprint. Expected %u since this value should not be " + "modified by rev0", + vm_info.phys_footprint, TESTPHYSFOOTPRINTVAL); T_EXPECT_EQ(vm_info.min_address, CANARY, - "task_info --rev0 call returned value 0x%llx for vm_info.min_address. Expected 0x%llx since this value should not " - "be modified by rev0", - vm_info.min_address, CANARY); + "task_info --rev0 call returned value 0x%llx for vm_info.min_address. Expected 0x%llx since this value should not " + "be modified by rev0", + vm_info.min_address, CANARY); T_EXPECT_EQ(vm_info.max_address, CANARY, - "task_info --rev0 call returned value 0x%llx for vm_info.max_address. Expected 0x%llx since this value should not " - "be modified by rev0", - vm_info.max_address, CANARY); + "task_info --rev0 call returned value 0x%llx for vm_info.max_address. Expected 0x%llx since this value should not " + "be modified by rev0", + vm_info.max_address, CANARY); /* * Test the REV1 version of TASK_VM_INFO. @@ -118,19 +118,19 @@ T_DECL(task_vm_info, "tests task vm info", T_META_ASROOT(true), T_META_LTEPHASE( T_EXPECT_NE(vm_info.virtual_size, 0ULL, "task_info --rev1 call does not return 0 for virtual_size"); T_EXPECT_NE(vm_info.phys_footprint, (unsigned long long)TESTPHYSFOOTPRINTVAL, - "task_info --rev1 call returned value %llu for vm_info.phys_footprint. Expected value is anything other than %u " - "since this value should not be modified by rev1", - vm_info.phys_footprint, TESTPHYSFOOTPRINTVAL); + "task_info --rev1 call returned value %llu for vm_info.phys_footprint. Expected value is anything other than %u " + "since this value should not be modified by rev1", + vm_info.phys_footprint, TESTPHYSFOOTPRINTVAL); T_EXPECT_EQ(vm_info.min_address, CANARY, - "task_info --rev1 call returned value 0x%llx for vm_info.min_address. Expected 0x%llx since this value should not " - "be modified by rev1", - vm_info.min_address, CANARY); + "task_info --rev1 call returned value 0x%llx for vm_info.min_address. Expected 0x%llx since this value should not " + "be modified by rev1", + vm_info.min_address, CANARY); T_EXPECT_EQ(vm_info.max_address, CANARY, - "task_info --rev1 call returned value 0x%llx for vm_info.max_address. Expected 0x%llx since this value should not " - "be modified by rev1", - vm_info.max_address, CANARY); + "task_info --rev1 call returned value 0x%llx for vm_info.max_address. Expected 0x%llx since this value should not " + "be modified by rev1", + vm_info.max_address, CANARY); /* * Test the REV2 version of TASK_VM_INFO. @@ -150,19 +150,19 @@ T_DECL(task_vm_info, "tests task vm info", T_META_ASROOT(true), T_META_LTEPHASE( T_EXPECT_NE(vm_info.virtual_size, 0ULL, "task_info --rev2 call does not return 0 for virtual_size\n"); T_EXPECT_NE(vm_info.phys_footprint, (unsigned long long)TESTPHYSFOOTPRINTVAL, - "task_info --rev2 call returned value %llu for vm_info.phys_footprint. Expected anything other than %u since this " - "value should be modified by rev2", - vm_info.phys_footprint, TESTPHYSFOOTPRINTVAL); + "task_info --rev2 call returned value %llu for vm_info.phys_footprint. Expected anything other than %u since this " + "value should be modified by rev2", + vm_info.phys_footprint, TESTPHYSFOOTPRINTVAL); T_EXPECT_NE(vm_info.min_address, CANARY, - "task_info --rev2 call returned value 0x%llx for vm_info.min_address. Expected anything other than 0x%llx since " - "this value should be modified by rev2", - vm_info.min_address, CANARY); + "task_info --rev2 call returned value 0x%llx for vm_info.min_address. Expected anything other than 0x%llx since " + "this value should be modified by rev2", + vm_info.min_address, CANARY); T_EXPECT_NE(vm_info.max_address, CANARY, - "task_info --rev2 call returned value 0x%llx for vm_info.max_address. Expected anything other than 0x%llx since " - "this value should be modified by rev2", - vm_info.max_address, CANARY); + "task_info --rev2 call returned value 0x%llx for vm_info.max_address. Expected anything other than 0x%llx since " + "this value should be modified by rev2", + vm_info.max_address, CANARY); } T_DECL(host_debug_info, "tests host debug info", T_META_ASROOT(true), T_META_LTEPHASE(LTE_POSTINIT)) @@ -262,8 +262,8 @@ T_DECL(task_thread_times_info, "tests task thread times info", T_META_ASROOT(tru */ T_EXPECT_FALSE((thread_times_info_data_new.user_time.seconds - thread_times_info_data.user_time.seconds) != 0 || - (thread_times_info_data_new.system_time.seconds - thread_times_info_data.system_time.seconds) != 0, - "Tests whether the difference between thread times is greater than the allowed limit"); + (thread_times_info_data_new.system_time.seconds - thread_times_info_data.system_time.seconds) != 0, + "Tests whether the difference between thread times is greater than the allowed limit"); /* * This is a negative case. @@ -272,7 +272,7 @@ T_DECL(task_thread_times_info, "tests task thread times info", T_META_ASROOT(tru count--; err = task_info(mach_task_self(), TASK_THREAD_TIMES_INFO, (task_info_t)&thread_times_info_data, &count); T_ASSERT_MACH_ERROR(err, KERN_INVALID_ARGUMENT, - "Negative test case: task_info should verify that count is at least equal to what is defined in API."); + "Negative test case: task_info should verify that count is at least equal to what is defined in API."); } T_DECL(task_absolutetime_info, "tests task absolute time info", T_META_ASROOT(true), T_META_LTEPHASE(LTE_POSTINIT)) @@ -309,7 +309,7 @@ T_DECL(task_absolutetime_info, "tests task absolute time info", T_META_ASROOT(tr */ T_EXPECT_FALSE(user_time_diff < ABSOLUTE_MIN_USER_TIME_DIFF || system_time_diff < ABSOLUTE_MIN_SYSTEM_TIME_DIFF, - "Tests whether the difference between thread times is greater than the expected range"); + "Tests whether the difference between thread times is greater than the expected range"); #endif if (absolute_time_info_data.threads_user <= 0) { @@ -335,7 +335,7 @@ T_DECL(task_absolutetime_info, "tests task absolute time info", T_META_ASROOT(tr * There is no real way to estimate the exact amount. */ T_EXPECT_NE(absolute_time_info_data.threads_system, 0ULL, - "task_info should return non-zero value for system threads time = %llu", absolute_time_info_data.threads_system); + "task_info should return non-zero value for system threads time = %llu", absolute_time_info_data.threads_system); #endif /* @@ -344,7 +344,7 @@ T_DECL(task_absolutetime_info, "tests task absolute time info", T_META_ASROOT(tr count--; err = task_info(mach_task_self(), TASK_ABSOLUTETIME_INFO, (task_info_t)&absolute_time_info_data_new, &count); T_ASSERT_MACH_ERROR(err, KERN_INVALID_ARGUMENT, - "Negative test case: task_info should verify that count is at least equal to what is defined in API."); + "Negative test case: task_info should verify that count is at least equal to what is defined in API."); } T_DECL(task_affinity_tag_info, "tests task_affinity_tag_info", T_META_ASROOT(true), T_META_LTEPHASE(LTE_POSTINIT)) @@ -367,15 +367,15 @@ T_DECL(task_affinity_tag_info, "tests task_affinity_tag_info", T_META_ASROOT(tru * The affinity is not set by default, hence expecting a zero value. */ T_ASSERT_FALSE(affinity_tag_info_data.min != 0 || affinity_tag_info_data.max != 0, - "task_info call returns non-zero min or max value"); + "task_info call returns non-zero min or max value"); /* - * This is a negative case. - */ + * This is a negative case. + */ count--; err = task_info(mach_task_self(), TASK_AFFINITY_TAG_INFO, (task_info_t)&affinity_tag_info_data, &count); T_ASSERT_MACH_ERROR(err, KERN_INVALID_ARGUMENT, - "Negative test case: task_info should verify that count is at least equal to what is defined in API."); + "Negative test case: task_info should verify that count is at least equal to what is defined in API."); } T_DECL(task_flags_info, "tests task_flags_info", T_META_ASROOT(true), T_META_LTEPHASE(LTE_POSTINIT)) @@ -396,7 +396,7 @@ T_DECL(task_flags_info, "tests task_flags_info", T_META_ASROOT(true), T_META_LTE /* Change for 32-bit arch possibility?*/ T_ASSERT_EQ((flags_info_data.flags & (unsigned int)(~(TF_LP64 | TF_64B_DATA))), 0U, - "task_info should only give out 64-bit addr/data flags"); + "task_info should only give out 64-bit addr/data flags"); /* * This is a negative case. @@ -405,7 +405,7 @@ T_DECL(task_flags_info, "tests task_flags_info", T_META_ASROOT(true), T_META_LTE count--; err = task_info(mach_task_self(), TASK_FLAGS_INFO, (task_info_t)&flags_info_data, &count); T_ASSERT_MACH_ERROR(err, KERN_INVALID_ARGUMENT, - "Negative test case: task_info should verify that count is at least equal to what is defined in API."); + "Negative test case: task_info should verify that count is at least equal to what is defined in API."); } T_DECL(task_power_info_v2, "tests task_power_info_v2", T_META_ASROOT(true), T_META_LTEPHASE(LTE_POSTINIT)) @@ -428,7 +428,7 @@ T_DECL(task_power_info_v2, "tests task_power_info_v2", T_META_ASROOT(true), T_ME T_ASSERT_MACH_SUCCESS(err, "verify task_info call succeeded"); T_ASSERT_LE(power_info_data_v2.gpu_energy.task_gpu_utilisation, 0ULL, - "verified task_info call shows zero GPU utilization for non-GPU task"); + "verified task_info call shows zero GPU utilization for non-GPU task"); do_factorial_task(); @@ -443,14 +443,14 @@ T_DECL(task_power_info_v2, "tests task_power_info_v2", T_META_ASROOT(true), T_ME * iOS does not have system_time. */ T_ASSERT_GT(power_info_data_v2_new.cpu_energy.total_user, power_info_data_v2.cpu_energy.total_user, - "task_info call returns valid user time"); + "task_info call returns valid user time"); T_ASSERT_GT(power_info_data_v2_new.cpu_energy.total_system, power_info_data_v2.cpu_energy.total_system, - "task_info call returns valid system time"); + "task_info call returns valid system time"); #endif T_ASSERT_GE(power_info_data_v2.cpu_energy.task_interrupt_wakeups, 1ULL, - "verify task_info call returns non-zero value for interrupt_wakeup (ret value = %llu)", - power_info_data_v2.cpu_energy.task_interrupt_wakeups); + "verify task_info call returns non-zero value for interrupt_wakeup (ret value = %llu)", + power_info_data_v2.cpu_energy.task_interrupt_wakeups); #if !(defined(__arm__) || defined(__arm64__)) if (power_info_data_v2.cpu_energy.task_platform_idle_wakeups != 0) { @@ -470,9 +470,9 @@ T_DECL(task_power_info_v2, "tests task_power_info_v2", T_META_ASROOT(true), T_ME err = task_info(mach_task_self(), TASK_POWER_INFO_V2, (task_info_t)&power_info_data_v2, &count); T_ASSERT_MACH_ERROR(err, KERN_INVALID_ARGUMENT, - "Negative test case: task_info should verify that count is at least equal to what is defined in API. Call " - "returns errno %d:%s", - err, mach_error_string(err)); + "Negative test case: task_info should verify that count is at least equal to what is defined in API. Call " + "returns errno %d:%s", + err, mach_error_string(err)); } T_DECL(test_task_basic_info_32, "tests TASK_BASIC_INFO_32", T_META_ASROOT(true), T_META_LTEPHASE(LTE_POSTINIT)) @@ -670,7 +670,7 @@ test_task_basic_info(enum info_kind kind) T_EXPECT_EQ(resident_size_diff % 4096, 0ULL, "verify task_info returns valid max resident_size"); T_EXPECT_GE(resident_size_diff, 0ULL, "verify task_info returns non-negative max resident_size"); T_EXPECT_GE(info_get(kind, GET_MAX_RES, info_data[AFTER]), info_get(kind, GET_RESIDENT_SIZE, info_data[AFTER]), - "verify max resident size is greater than or equal to curr resident size"); + "verify max resident size is greater than or equal to curr resident size"); } do_factorial_task(); @@ -701,7 +701,7 @@ test_task_basic_info(enum info_kind kind) kr = task_info(mach_task_self(), flavor, info_data[AFTER], &count); T_ASSERT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, - "Negative test case: task_info should verify that count is at least equal to what is defined in API"); + "Negative test case: task_info should verify that count is at least equal to what is defined in API"); /* * deallocate memory @@ -715,9 +715,9 @@ test_task_basic_info(enum info_kind kind) } T_DECL(test_sigcont_task_suspend_resume, - "test to verify that SIGCONT on task_suspend()-ed process works", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "test to verify that SIGCONT on task_suspend()-ed process works", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { T_SETUPBEGIN; int is_dev = is_development_kernel(); @@ -795,13 +795,12 @@ T_DECL(test_sigcont_task_suspend_resume, T_ASSERT_EQ(signal_no, 0, "child should be resumed and exit without signal"); T_ASSERT_EQ(exit_status, 0, "child should exit with 0"); - } T_DECL(test_sigcont_task_suspend2_resume, - "test to verify that SIGCONT on task_suspend2()-ed process doesn't work", - T_META_ASROOT(true), - T_META_LTEPHASE(LTE_POSTINIT)) + "test to verify that SIGCONT on task_suspend2()-ed process doesn't work", + T_META_ASROOT(true), + T_META_LTEPHASE(LTE_POSTINIT)) { T_SETUPBEGIN; int is_dev = is_development_kernel(); @@ -925,7 +924,6 @@ T_DECL(test_sigcont_task_suspend2_resume, T_ASSERT_EQ(signal_no, 0, "child should be resumed and no signal should be returned"); T_ASSERT_EQ(exit_status, 0, "child should exit with 0"); - } uint64_t @@ -1015,24 +1013,24 @@ info_get(enum info_kind kind, enum info_get get, void * data) switch (kind) { case INFO_32: case INFO_32_2: - return (uint64_t) & (((task_basic_info_32_t)data)->user_time); + return (uint64_t) &(((task_basic_info_32_t)data)->user_time); #if defined(__arm__) || defined(__arm64__) case INFO_64: T_ASSERT_FAIL("illegal info_get %d %d", kind, get); break; case INFO_64_2: - return (uint64_t) & (((task_basic_info_64_2_t)data)->user_time); + return (uint64_t) &(((task_basic_info_64_2_t)data)->user_time); #else case INFO_64: - return (uint64_t) & (((task_basic_info_64_t)data)->user_time); + return (uint64_t) &(((task_basic_info_64_t)data)->user_time); case INFO_64_2: T_ASSERT_FAIL("illegal info_get %d %d", kind, get); break; #endif /* defined(__arm__) || defined(__arm64__) */ case INFO_MACH: - return (uint64_t) & (((mach_task_basic_info_t)data)->user_time); + return (uint64_t) &(((mach_task_basic_info_t)data)->user_time); case INFO_MAX: default: @@ -1042,24 +1040,24 @@ info_get(enum info_kind kind, enum info_get get, void * data) switch (kind) { case INFO_32: case INFO_32_2: - return (uint64_t) & (((task_basic_info_32_t)data)->system_time); + return (uint64_t) &(((task_basic_info_32_t)data)->system_time); #if defined(__arm__) || defined(__arm64__) case INFO_64: T_ASSERT_FAIL("illegal info_get %d %d", kind, get); break; case INFO_64_2: - return (uint64_t) & (((task_basic_info_64_2_t)data)->system_time); + return (uint64_t) &(((task_basic_info_64_2_t)data)->system_time); #else case INFO_64: - return (uint64_t) & (((task_basic_info_64_t)data)->system_time); + return (uint64_t) &(((task_basic_info_64_t)data)->system_time); case INFO_64_2: T_ASSERT_FAIL("illegal info_get %d %d", kind, get); break; #endif /* defined(__arm__) || defined(__arm64__) */ case INFO_MACH: - return (uint64_t) & (((mach_task_basic_info_t)data)->user_time); + return (uint64_t) &(((mach_task_basic_info_t)data)->user_time); case INFO_MAX: default: T_ASSERT_FAIL("unhandled info_get %d %d", kind, get); diff --git a/tests/task_info_28439149.c b/tests/task_info_28439149.c index 9102ba600..f56e38da5 100644 --- a/tests/task_info_28439149.c +++ b/tests/task_info_28439149.c @@ -12,7 +12,9 @@ #include #include -static void do_child(int *pipefd){ +static void +do_child(int *pipefd) +{ int exit = 0; close(pipefd[1]); @@ -22,7 +24,7 @@ static void do_child(int *pipefd){ } T_DECL(task_info_28439149, "ensure that task_info has the correct permission", - T_META_CHECK_LEAKS(false), T_META_ASROOT(true)) + T_META_CHECK_LEAKS(false), T_META_ASROOT(true)) { int pipefd[2]; @@ -40,7 +42,7 @@ T_DECL(task_info_28439149, "ensure that task_info has the correct permission", int exit; mach_msg_type_number_t count; - struct task_basic_info_64 ti; + struct task_basic_info_64 ti; task_dyld_info_data_t di; task_t self = mach_task_self(); @@ -53,21 +55,21 @@ T_DECL(task_info_28439149, "ensure that task_info has the correct permission", count = TASK_BASIC_INFO_64_COUNT; T_EXPECT_MACH_SUCCESS(task_info(self, TASK_BASIC_INFO_64, (task_info_t)&ti, - &count), "task_info(self, TASK_BASIC_INFO_64 ...)"); + &count), "task_info(self, TASK_BASIC_INFO_64 ...)"); count = TASK_BASIC_INFO_64_COUNT; T_EXPECT_MACH_SUCCESS(task_info(other, TASK_BASIC_INFO_64, (task_info_t)&ti, - &count), "task_info(other_name, TASK_BASIC_INFO_64 ...)"); + &count), "task_info(other_name, TASK_BASIC_INFO_64 ...)"); count = TASK_BASIC_INFO_64_COUNT; T_EXPECT_MACH_SUCCESS(task_info(other_name, TASK_BASIC_INFO_64, (task_info_t)&ti, - &count), "task_info(other_name, TASK_BASIC_INFO_64 ...)"); + &count), "task_info(other_name, TASK_BASIC_INFO_64 ...)"); count = TASK_DYLD_INFO_COUNT; T_EXPECT_MACH_SUCCESS(task_info(self, TASK_DYLD_INFO, (task_info_t)&di, - &count), "task_info(self, TASK_DYLD_INFO ...)"); + &count), "task_info(self, TASK_DYLD_INFO ...)"); count = TASK_DYLD_INFO_COUNT; T_EXPECT_MACH_SUCCESS(task_info(other, TASK_DYLD_INFO, (task_info_t)&di, - &count), "task_info(other_name, TASK_DYLD_INFO ...)"); + &count), "task_info(other_name, TASK_DYLD_INFO ...)"); count = TASK_DYLD_INFO_COUNT; ret = task_info(other_name, TASK_DYLD_INFO, (task_info_t)&di, &count); T_EXPECT_EQ_INT(ret, KERN_INVALID_ARGUMENT, "task info TASK_DYLD_INFO should fail with mach_port_name"); @@ -78,4 +80,3 @@ T_DECL(task_info_28439149, "ensure that task_info has the correct permission", wait(NULL); } - diff --git a/tests/task_inspect.c b/tests/task_inspect.c index f16064a1f..5b3dff783 100644 --- a/tests/task_inspect.c +++ b/tests/task_inspect.c @@ -27,7 +27,7 @@ check_secure_kernel(void) size_t secure_kern_size = sizeof(secure_kern); T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.secure_kernel", &secure_kern, - &secure_kern_size, NULL, 0), NULL); + &secure_kern_size, NULL, 0), NULL); if (secure_kern) { T_SKIP("secure kernel: processor_set_tasks will not return kernel_task"); @@ -54,7 +54,7 @@ attempt_kernel_inspection(task_t task) count = TASK_BASIC_INFO_64_COUNT; T_EXPECT_MACH_SUCCESS(task_info(task, TASK_BASIC_INFO_64, (task_info_t)&ti, - &count), "task_info(... TASK_BASIC_INFO_64 ...)"); + &count), "task_info(... TASK_BASIC_INFO_64 ...)"); T_EXPECT_MACH_SUCCESS(task_threads(task, &threads, &thcnt), "task_threads"); T_LOG("Found %d kernel threads.", thcnt); @@ -64,7 +64,7 @@ attempt_kernel_inspection(task_t task) mach_msg_type_number_t bi_count = THREAD_BASIC_INFO_COUNT; kr = thread_info(threads[i], THREAD_BASIC_INFO, - (thread_info_t)&basic_info, &bi_count); + (thread_info_t)&basic_info, &bi_count); /* * Ignore threads that have gone away. */ @@ -76,8 +76,8 @@ attempt_kernel_inspection(task_t task) (void)mach_port_deallocate(mach_task_self(), threads[i]); } mach_vm_deallocate(mach_task_self(), - (mach_vm_address_t)(uintptr_t)threads, - thcnt * sizeof(*threads)); + (mach_vm_address_t)(uintptr_t)threads, + thcnt * sizeof(*threads)); ipc_info_space_basic_t basic_info; T_EXPECT_MACH_SUCCESS(mach_port_space_basic_info(task, &basic_info), "mach_port_space_basic_info"); @@ -87,28 +87,28 @@ attempt_kernel_inspection(task_t task) ipc_info_tree_name_array_t tree; mach_msg_type_number_t tblcnt = 0, treecnt = 0; T_EXPECT_MACH_SUCCESS(mach_port_space_info(task, &info_space, &table, - &tblcnt, &tree, &treecnt), "mach_port_space_info"); + &tblcnt, &tree, &treecnt), "mach_port_space_info"); if (tblcnt > 0) { mach_vm_deallocate(mach_task_self(), - (mach_vm_address_t)(uintptr_t)table, - tblcnt * sizeof(*table)); + (mach_vm_address_t)(uintptr_t)table, + tblcnt * sizeof(*table)); } if (treecnt > 0) { mach_vm_deallocate(mach_task_self(), - (mach_vm_address_t)(uintptr_t)tree, - treecnt * sizeof(*tree)); + (mach_vm_address_t)(uintptr_t)tree, + treecnt * sizeof(*tree)); } T_END; } T_DECL(inspect_kernel_task, - "ensure that kernel task can be inspected", - T_META_CHECK_LEAKS(false), - T_META_ASROOT(true)) + "ensure that kernel task can be inspected", + T_META_CHECK_LEAKS(false), + T_META_ASROOT(true)) { processor_set_name_array_t psets; - processor_set_t pset; + processor_set_t pset; task_array_t tasks; mach_msg_type_number_t i, j, tcnt, pcnt = 0; mach_port_t self = mach_host_self(); @@ -116,7 +116,7 @@ T_DECL(inspect_kernel_task, check_secure_kernel(); T_ASSERT_MACH_SUCCESS(host_processor_sets(self, &psets, &pcnt), - NULL); + NULL); for (i = 0; i < pcnt; i++) { T_ASSERT_MACH_SUCCESS(host_processor_set_priv(self, psets[i], &pset), NULL); @@ -132,14 +132,14 @@ T_DECL(inspect_kernel_task, /* free tasks array */ mach_vm_deallocate(mach_task_self(), - (mach_vm_address_t)(uintptr_t)tasks, - tcnt * sizeof(*tasks)); + (mach_vm_address_t)(uintptr_t)tasks, + tcnt * sizeof(*tasks)); mach_port_deallocate(mach_task_self(), pset); mach_port_deallocate(mach_task_self(), psets[i]); } mach_vm_deallocate(mach_task_self(), - (mach_vm_address_t)(uintptr_t)psets, - pcnt * sizeof(*psets)); + (mach_vm_address_t)(uintptr_t)psets, + pcnt * sizeof(*psets)); T_FAIL("could not find kernel_task in list of tasks returned"); } diff --git a/tests/telemetry.c b/tests/telemetry.c index ab45d147f..810dcf2d8 100644 --- a/tests/telemetry.c +++ b/tests/telemetry.c @@ -1,7 +1,11 @@ +/* Copyright (c) 2018 Apple Inc. All rights reserved. */ + +#include #include #include #include #include +#include #include #include @@ -13,20 +17,76 @@ enum telemetry_pmi { #define TELEMETRY_CMD_PMI_SETUP 3 T_GLOBAL_META(T_META_NAMESPACE("xnu.debugging.telemetry"), - T_META_CHECK_LEAKS(false), - T_META_ASROOT(true)); + T_META_CHECK_LEAKS(false), + T_META_ASROOT(true)); extern int __telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval, - uint64_t leeway, uint64_t arg4, uint64_t arg5); + uint64_t leeway, uint64_t arg4, uint64_t arg5); + +/* + * Data Analytics (da) also has a microstackshot configuration -- set a PMI + * cycle interval of 0 to force it to disable microstackshot on PMI. + */ + +static void +set_da_microstackshot_period(CFNumberRef num) +{ + CFPreferencesSetValue(CFSTR("microstackshotPMICycleInterval"), num, + CFSTR("com.apple.da"), +#if TARGET_OS_IPHONE + CFSTR("mobile"), +#else // TARGET_OS_IPHONE + CFSTR("root"), +#endif // !TARGET_OS_IPHONE + kCFPreferencesCurrentHost); + + notify_post("com.apple.da.tasking_changed"); +} + +static void +disable_da_microstackshots(void) +{ + int64_t zero = 0; + CFNumberRef num = CFNumberCreate(NULL, kCFNumberSInt64Type, &zero); + set_da_microstackshot_period(num); + T_LOG("notified da of tasking change, sleeping"); + sleep(3); +} + +/* + * Unset the preference to allow da to reset its configuration. + */ +static void +reenable_da_microstackshots(void) +{ + set_da_microstackshot_period(NULL); +} +/* + * Clean up the test's configuration and allow da to activate again. + */ static void telemetry_cleanup(void) { int ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_NONE, 0, 0, 0, 0); T_EXPECT_POSIX_SUCCESS(ret, "telemetry(... NONE ...)"); + reenable_da_microstackshots(); +} + +/* + * Make sure da hasn't configured the microstackshots -- otherwise the PMI + * setup command will return EBUSY. + */ +static void +telemetry_init(void) +{ + disable_da_microstackshots(); + T_LOG("installing cleanup handler"); + T_ATEND(telemetry_cleanup); } volatile static bool spinning = true; + static void * thread_spin(__unused void *arg) { @@ -37,7 +97,7 @@ thread_spin(__unused void *arg) #define MT_MICROSTACKSHOT KDBG_EVENTID(DBG_MONOTONIC, 2, 1) #define MS_RECORD MACHDBG_CODE(DBG_MACH_STACKSHOT, \ - MICROSTACKSHOT_RECORD) + MICROSTACKSHOT_RECORD) #if defined(__arm64__) || defined(__arm__) #define INSTRS_PERIOD (100ULL * 1000 * 1000) #else /* defined(__arm64__) || defined(__arm__) */ @@ -62,33 +122,29 @@ T_DECL(microstackshot_pmi, "attempt to configure microstackshots on PMI") __block int interrupt_records = 0; __block int timer_arm_records = 0; __block int unknown_records = 0; - __block int multi_records = 0; ktrace_events_single(s, MT_MICROSTACKSHOT, ^(__unused struct trace_point *tp) { pmi_events++; }); ktrace_events_single_paired(s, MS_RECORD, - ^(struct trace_point *start, __unused struct trace_point *end) { + ^(struct trace_point *start, __unused struct trace_point *end) { if (start->arg1 & kPMIRecord) { - pmi_records++; + pmi_records++; } if (start->arg1 & kIORecord) { - io_records++; + io_records++; } if (start->arg1 & kInterruptRecord) { - interrupt_records++; + interrupt_records++; } if (start->arg1 & kTimerArmingRecord) { - timer_arm_records++; + timer_arm_records++; } const uint8_t any_record = kPMIRecord | kIORecord | kInterruptRecord | - kTimerArmingRecord; + kTimerArmingRecord; if ((start->arg1 & any_record) == 0) { - unknown_records++; - } - if (__builtin_popcount(start->arg1 & any_record) != 1) { - multi_records++; + unknown_records++; } microstackshot_record_events++; @@ -97,32 +153,31 @@ T_DECL(microstackshot_pmi, "attempt to configure microstackshots on PMI") ktrace_set_completion_handler(s, ^{ ktrace_session_destroy(s); T_EXPECT_GT(pmi_events, 0, - "saw non-zero PMIs (%g/sec)", pmi_events / (double)SLEEP_SECS); + "saw non-zero PMIs (%g/sec)", pmi_events / (double)SLEEP_SECS); T_EXPECT_GT(pmi_records, 0, "saw non-zero PMI record events (%g/sec)", - pmi_records / (double)SLEEP_SECS); + pmi_records / (double)SLEEP_SECS); T_EXPECT_EQ(unknown_records, 0, "saw zero unknown record events"); - T_EXPECT_EQ(multi_records, 0, "saw zero multiple record events"); T_EXPECT_GT(microstackshot_record_events, 0, - "saw non-zero microstackshot record events (%g/sec)", - microstackshot_record_events / (double)SLEEP_SECS); + "saw non-zero microstackshot record events (%g/sec)", + microstackshot_record_events / (double)SLEEP_SECS); if (interrupt_records > 0) { - T_LOG("saw %g interrupt records per second", - interrupt_records / (double)SLEEP_SECS); + T_LOG("saw %g interrupt records per second", + interrupt_records / (double)SLEEP_SECS); } else { - T_LOG("saw no interrupt records"); + T_LOG("saw no interrupt records"); } if (io_records > 0) { - T_LOG("saw %g I/O records per second", - io_records / (double)SLEEP_SECS); + T_LOG("saw %g I/O records per second", + io_records / (double)SLEEP_SECS); } else { - T_LOG("saw no I/O records"); + T_LOG("saw no I/O records"); } if (timer_arm_records > 0) { - T_LOG("saw %g timer arming records per second", - timer_arm_records / (double)SLEEP_SECS); + T_LOG("saw %g timer arming records per second", + timer_arm_records / (double)SLEEP_SECS); } else { - T_LOG("saw no timer arming records"); + T_LOG("saw no timer arming records"); } T_END; @@ -130,19 +185,15 @@ T_DECL(microstackshot_pmi, "attempt to configure microstackshots on PMI") T_SETUPEND; + telemetry_init(); + /* * Start sampling via telemetry on the instructions PMI. */ int ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_INSTRS, - INSTRS_PERIOD, 0, 0, 0); - if (ret < 0 && errno == EBUSY) { - T_PASS("telemetry is busy/active, maybe the events will be seen"); - } else { - T_ASSERT_POSIX_SUCCESS(ret, - "telemetry syscall succeeded, started microstackshots"); - T_LOG("installing cleanup handler"); - T_ATEND(telemetry_cleanup); - } + INSTRS_PERIOD, 0, 0, 0); + T_ASSERT_POSIX_SUCCESS(ret, + "telemetry syscall succeeded, started microstackshots"); pthread_t thread; int error = pthread_create(&thread, NULL, thread_spin, NULL); @@ -152,7 +203,7 @@ T_DECL(microstackshot_pmi, "attempt to configure microstackshots on PMI") T_ASSERT_POSIX_ZERO(error, "started tracing"); dispatch_after(dispatch_time(DISPATCH_TIME_NOW, SLEEP_SECS * NSEC_PER_SEC), - dispatch_get_main_queue(), ^{ + dispatch_get_main_queue(), ^{ spinning = false; ktrace_end(s, 0); (void)pthread_join(thread, NULL); @@ -163,23 +214,33 @@ T_DECL(microstackshot_pmi, "attempt to configure microstackshots on PMI") } T_DECL(error_handling, - "ensure that error conditions for the telemetry syscall are observed") + "ensure that error conditions for the telemetry syscall are observed") { + telemetry_init(); + int ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_INSTRS, - 1, 0, 0, 0); + 1, 0, 0, 0); T_EXPECT_EQ(ret, -1, "telemetry shouldn't allow PMI every instruction"); ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_INSTRS, - 1000 * 1000, 0, 0, 0); + 1000 * 1000, 0, 0, 0); T_EXPECT_EQ(ret, -1, - "telemetry shouldn't allow PMI every million instructions"); + "telemetry shouldn't allow PMI every million instructions"); ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_CYCLES, - 1, 0, 0, 0); + 1, 0, 0, 0); T_EXPECT_EQ(ret, -1, "telemetry shouldn't allow PMI every cycle"); ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_CYCLES, - 1000 * 1000, 0, 0, 0); + 1000 * 1000, 0, 0, 0); T_EXPECT_EQ(ret, -1, - "telemetry shouldn't allow PMI every million cycles"); + "telemetry shouldn't allow PMI every million cycles"); + + ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_CYCLES, + UINT64_MAX, 0, 0, 0); + T_EXPECT_EQ(ret, -1, "telemetry shouldn't allow PMI every UINT64_MAX cycles"); + + ret = __telemetry(TELEMETRY_CMD_PMI_SETUP, TELEMETRY_PMI_CYCLES, + (1ULL << 55), 0, 0, 0); + T_EXPECT_EQ(ret, -1, "telemetry shouldn't allow PMI with extremely long periods"); } diff --git a/tests/testposixshm.c b/tests/testposixshm.c new file mode 100644 index 000000000..e715b428d --- /dev/null +++ b/tests/testposixshm.c @@ -0,0 +1,218 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static int nthreads = 0; +static int fd; +static _Atomic int phase = 0; +static _Atomic int pass_count = 0; +static _Atomic int fail_count = 0; + +static void * +worker_thread_func(__unused void *arg) +{ + int myfd; + int error; + + /* test racing shm_open */ + while (atomic_load(&phase) == 0) { + ; + } + myfd = shm_open("abcd", O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); + if (myfd == -1) { + T_QUIET; T_EXPECT_EQ(errno, EEXIST, "Expected EEXIST"); + atomic_fetch_add(&fail_count, 1); + } else { + fd = myfd; + atomic_fetch_add(&pass_count, 1); + } + + /* test racing ftruncate */ + while (atomic_load(&phase) == 1) { + ; + } + error = ftruncate(fd, 8 * 1024); + if (error == -1) { + T_QUIET; T_EXPECT_EQ(errno, EINVAL, "Expected EINVAL"); + atomic_fetch_add(&fail_count, 1); + } else { + atomic_fetch_add(&pass_count, 1); + } + + /* test racing close */ + while (atomic_load(&phase) == 2) { + ; + } + error = close(fd); + if (error == -1) { + T_QUIET; T_EXPECT_EQ(errno, EBADF, "Expected EBADF"); + atomic_fetch_add(&fail_count, 1); + } else { + atomic_fetch_add(&pass_count, 1); + } + + /* test racing shm_unlink() */ + while (atomic_load(&phase) == 3) { + ; + } + error = shm_unlink("abcd"); + if (error == -1) { + T_QUIET; T_EXPECT_EQ(errno, ENOENT, "Expected ENOENT"); + atomic_fetch_add(&fail_count, 1); + } else { + atomic_fetch_add(&pass_count, 1); + } + return NULL; +} + +static void +create_threads(void) +{ + int ret; + int ncpu; + size_t ncpu_size = sizeof(ncpu); + int i; + pthread_attr_t attr; + + ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0); + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(hw.ncpu)"); + + T_QUIET; T_LOG("%s: Detected %d CPUs\n", __FUNCTION__, ncpu); + + nthreads = ncpu; + T_QUIET; T_LOG("%s: Will create %d threads\n", __FUNCTION__, nthreads); + + ret = pthread_attr_init(&attr); + T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init"); + + for (i = 0; i < nthreads; i++) { + pthread_t thread; + ret = pthread_create(&thread, &attr, worker_thread_func, NULL); + T_QUIET; T_ASSERT_POSIX_ZERO(ret, "pthread_create"); + } +} + + +T_DECL(testposixshm, "Posix Shared Memory tests") +{ + int fd1; + int fd2; + int *addr; + char *noname = ""; + char *toolong = "12345678901234567890123456789012"; + char *maxname = "1234567890123456789012345678901"; + + /* must have O_CREAT */ + fd1 = shm_open(maxname, O_RDWR, S_IRUSR | S_IWUSR); + T_EXPECT_EQ(fd1, -1, "shm_open() missing O_CREAT"); + T_WITH_ERRNO; + T_EXPECT_EQ(errno, ENOENT, "Expected ENOENT"); + + /* name too long */ + fd1 = shm_open(toolong, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + T_EXPECT_EQ(fd1, -1, "shm_open() name too long"); + T_WITH_ERRNO; + T_EXPECT_EQ(errno, ENAMETOOLONG, "Expected ENAMETOOLONG"); + + /* invalid name */ + fd1 = shm_open(noname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + T_EXPECT_EQ(fd1, -1, "shm_open() invalid name"); + T_WITH_ERRNO; + T_EXPECT_EQ(errno, EINVAL, "Expected EINVAL"); + + /* valid open */ + fd1 = shm_open(maxname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + T_EXPECT_POSIX_SUCCESS(fd1, "valid shm_open() result"); + + /* O_CREAT, but not O_EXCL should work */ + fd2 = shm_open(maxname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); + T_EXPECT_POSIX_SUCCESS(fd2, "shm_open() no O_EXCL"); + + /* close should work */ + T_EXPECT_POSIX_ZERO(close(fd2), "close()"); + + /* O_CREAT | O_EXCL should fail */ + fd2 = shm_open(maxname, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR); + T_WITH_ERRNO; + T_EXPECT_EQ(fd2, -1, "shm_open() existing but O_EXCL"); + T_EXPECT_EQ(errno, EEXIST, "Expected EEXIST"); + + /* use ftruncate to create the memory */ + T_EXPECT_POSIX_ZERO(ftruncate(fd1, 16 * 1024), NULL); + + /* a second ftruncate should fail */ + T_WITH_ERRNO; + T_EXPECT_EQ(ftruncate(fd1, 8 * 1024), -1, "second ftruncate() should fail"); + T_EXPECT_EQ(errno, EINVAL, "Expected EINVAL"); + + /* Map the memory object */ + addr = mmap(0, 4 * 1024, PROT_READ | PROT_WRITE, MAP_SHARED, fd1, 0); + T_WITH_ERRNO; + T_EXPECT_NE((void *)addr, MAP_FAILED, "mmap() should work"); + + /* close should work */ + T_EXPECT_POSIX_ZERO(close(fd1), "close()"); + + /* unlink should work */ + T_EXPECT_POSIX_SUCCESS(shm_unlink(maxname), "shm_unlink()"); + + /* shm_open() after unlink/close should fail */ + fd2 = shm_open(maxname, O_RDWR, S_IRUSR | S_IWUSR); + T_WITH_ERRNO; + T_EXPECT_EQ(fd2, -1, "shm_open() but removed"); + T_EXPECT_EQ(errno, ENOENT, "Expected ENOENT"); + + /* + * second phase of tests, try to create race conditions for + * shm_open() - multiple threads do shm_open(, ... O_EXCL, ...) + * ftruncate() - multiple threads, only 1 should succeed. + * fclose() - multiple threads, only 1 should succeed. + * shm_unlink() - multiple threads, only 1 should succeed. + */ + create_threads(); + sleep(1); + T_LOG("Race testing shm_open"); + atomic_fetch_add(&phase, 1); + while (pass_count + fail_count < nthreads) { + sleep(1); + } + T_EXPECT_EQ(pass_count, 1, "racing shm_open()"); + T_EXPECT_EQ(fail_count, nthreads - 1, "racing shm_open()"); + + atomic_store(&pass_count, 0); + atomic_store(&fail_count, 0); + T_LOG("Race testing ftruncate\n"); + atomic_fetch_add(&phase, 1); + while (pass_count + fail_count < nthreads) { + sleep(1); + } + T_EXPECT_EQ(pass_count, 1, "racing ftruncate()"); + T_EXPECT_EQ(fail_count, nthreads - 1, "racing ftruncate()"); + + atomic_store(&pass_count, 0); + atomic_store(&fail_count, 0); + T_LOG("Race testing fclose\n"); + atomic_fetch_add(&phase, 1); + while (pass_count + fail_count < nthreads) { + sleep(1); + } + T_EXPECT_EQ(pass_count, 1, "racing fclose()"); + T_EXPECT_EQ(fail_count, nthreads - 1, "racing fclose()"); + + atomic_store(&pass_count, 0); + atomic_store(&fail_count, 0); + T_LOG("Race testing shm_unlink\n"); + atomic_fetch_add(&phase, 1); + while (pass_count + fail_count < nthreads) { + sleep(1); + } + T_EXPECT_EQ(pass_count, 1, "racing shm_unlink()"); + T_EXPECT_EQ(fail_count, nthreads - 1, "racing shm_unlink()"); +} diff --git a/tests/tty_hang.c b/tests/tty_hang.c index 19dc4d23a..7bde7e1d0 100644 --- a/tests/tty_hang.c +++ b/tests/tty_hang.c @@ -40,7 +40,7 @@ #include #include -#define TEST_TIMEOUT 10 +#define TEST_TIMEOUT 10 /* * Receiving SIGTTIN (from the blocked read) is the passing condition, we just @@ -70,23 +70,23 @@ get_new_session_and_terminal_and_fork_child_to_read(char *pty_name) * child. */ T_ASSERT_POSIX_SUCCESS(socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fd), - NULL); - + NULL); + /* * New session, lose any existing controlling terminal and become * session leader. */ T_ASSERT_POSIX_SUCCESS(setsid(), NULL); - + /* now open pty, become controlling terminal of new session */ T_ASSERT_POSIX_SUCCESS(pty_fd = open(pty_name, O_RDWR), NULL); - + T_ASSERT_POSIX_SUCCESS(pid = fork(), NULL); if (pid == 0) { /* child */ int pty_fd_child; char buf[10]; - + T_ASSERT_POSIX_SUCCESS(close(sock_fd[0]), NULL); T_ASSERT_POSIX_SUCCESS(close(pty_fd), NULL); @@ -122,12 +122,12 @@ get_new_session_and_terminal_and_fork_child_to_read(char *pty_name) */ exit(0); } - + T_ASSERT_POSIX_SUCCESS(close(sock_fd[1]), NULL); - + /* wait for child to open slave side and set its pgid to its pid */ T_ASSERT_POSIX_SUCCESS(read(sock_fd[0], buf, sizeof(buf)), NULL); - + /* * We need this to happen and in the order shown * @@ -142,17 +142,17 @@ get_new_session_and_terminal_and_fork_child_to_read(char *pty_name) */ T_ASSERT_POSIX_SUCCESS(tcsetpgrp(pty_fd, pid), NULL); - + /* let child know you have set it to be the foreground process group */ T_ASSERT_POSIX_SUCCESS(write(sock_fd[0], "done", sizeof("done")), NULL); - + /* * give it a second to do the read of the terminal in response. * * XXX : Find a way to detect that the child is blocked in read(2). */ sleep(1); - + /* * now change the foreground process group to ourselves - * Note we are now in the background process group and we need to ignore @@ -164,7 +164,7 @@ get_new_session_and_terminal_and_fork_child_to_read(char *pty_name) signal(SIGTTOU, SIG_IGN); T_ASSERT_POSIX_SUCCESS(tcsetpgrp(pty_fd, getpid()), NULL); - return (0); + return 0; } /* @@ -187,19 +187,19 @@ run_test(int do_revoke) T_QUIET; T_SETUPBEGIN; - - slave_pty= NULL; + + slave_pty = NULL; T_ASSERT_POSIX_SUCCESS(master_fd = posix_openpt(O_RDWR | O_NOCTTY), NULL); (void)fcntl(master_fd, F_SETFL, O_NONBLOCK); T_ASSERT_POSIX_SUCCESS(grantpt(master_fd), NULL); T_ASSERT_POSIX_SUCCESS(unlockpt(master_fd), NULL); - slave_pty= ptsname(master_fd); + slave_pty = ptsname(master_fd); T_ASSERT_NOTNULL(slave_pty, NULL); T_LOG("slave pty is %s\n", slave_pty); T_SETUPEND; - + /* * We get the stdin and stdout redirection but we don't have visibility * into the child (nor can we wait for it). To get around that, we fork @@ -207,7 +207,7 @@ run_test(int do_revoke) * returning to the caller. */ T_ASSERT_POSIX_SUCCESS(pid = fork(), NULL); - + if (pid == 0) { /* child */ T_ASSERT_POSIX_SUCCESS(close(master_fd), NULL); get_new_session_and_terminal_and_fork_child_to_read(slave_pty); @@ -243,10 +243,10 @@ run_test(int do_revoke) dt_waitpid(pid, &status, &sig, 0); if (sig) { T_FAIL("Test failed because child received signal %s\n", - strsignal(sig)); + strsignal(sig)); } else if (status) { T_FAIL("Test failed because child exited with status %d\n", - status); + status); } else { T_PASS("test_passed\n"); } @@ -265,7 +265,7 @@ T_HELPER_DECL(create_new_session_and_exit, "create_new_session_and_exit") { T_DECL(tty_exit_bgread_hang_test, "test for background read hang on ttys with proc exit") { dt_helper_t helpers[1]; - + helpers[0] = dt_fork_helper("create_new_session_and_exit"); dt_run_helpers(helpers, 1, TEST_TIMEOUT); } @@ -279,9 +279,8 @@ T_HELPER_DECL(create_new_session_and_revoke_terminal, "create_new_session_and_re T_DECL(tty_revoke_bgread_hang_test, "test for background read hang on ttys with revoke") { dt_helper_t helpers[1]; - + helpers[0] = dt_fork_helper("create_new_session_and_revoke_terminal"); dt_run_helpers(helpers, 1, TEST_TIMEOUT); } /*********************** END TEST 2 *********************************/ - diff --git a/tests/turnstile_multihop.c b/tests/turnstile_multihop.c index 339cfe8c2..95dfadeb0 100644 --- a/tests/turnstile_multihop.c +++ b/tests/turnstile_multihop.c @@ -60,7 +60,8 @@ struct load_token_context { static struct mach_timebase_info sched_mti; static pthread_once_t sched_mti_once_control = PTHREAD_ONCE_INIT; -static void sched_mti_init(void) +static void +sched_mti_init(void) { mach_timebase_info(&sched_mti); } @@ -103,7 +104,9 @@ sched_create_load_at_qos(qos_class_t qos, void **load_token) } context = calloc(1, sizeof(*context)); - if (context == NULL) { T_QUIET; T_LOG("calloc returned error"); return ENOMEM; } + if (context == NULL) { + T_QUIET; T_LOG("calloc returned error"); return ENOMEM; + } context->threads_should_exit = 0; context->thread_count = nthreads; @@ -112,7 +115,7 @@ sched_create_load_at_qos(qos_class_t qos, void **load_token) OSMemoryBarrier(); - for (i=0; i < nthreads; i++) { + for (i = 0; i < nthreads; i++) { ret = pthread_create(&context->threads[i], &attr, sched_load_thread, context); T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_create"); T_QUIET; T_LOG("%s: Created thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]); @@ -137,7 +140,9 @@ sched_load_thread(void *arg) uint64_t start = mach_absolute_time(); uint64_t end = start + nanoseconds_to_absolutetime(900ULL * NSEC_PER_MSEC); - while ((mach_absolute_time() < end) && !context->threads_should_exit); + while ((mach_absolute_time() < end) && !context->threads_should_exit) { + ; + } } T_QUIET; T_LOG("%s: Thread terminating %p\n", __FUNCTION__, (void *)pthread_self()); @@ -155,7 +160,7 @@ sched_terminate_load(void *load_token) context->threads_should_exit = 1; OSMemoryBarrier(); - for (i=0; i < context->thread_count; i++) { + for (i = 0; i < context->thread_count; i++) { T_QUIET; T_LOG("%s: Joining thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]); ret = pthread_join(context->threads[i], NULL); T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_join"); @@ -169,17 +174,18 @@ sched_terminate_load(void *load_token) // Find the first num primes, simply as a means of doing work -static void do_work(int num) +static void +do_work(int num) { volatile int i = 3, count, c; - for(count = 2; count <= num; ) { - for(c = 2; c <= i; c++) { - if(i%c == 0) { + for (count = 2; count <= num;) { + for (c = 2; c <= i; c++) { + if (i % c == 0) { break; } } - if(c == i) { + if (c == i) { count++; } i++; @@ -209,7 +215,7 @@ get_user_promotion_basepri(void) mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE, - (thread_policy_t)&thread_policy, &count, &get_default); + (thread_policy_t)&thread_policy, &count, &get_default); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get"); return thread_policy.thps_user_promotion_basepri; } @@ -224,7 +230,7 @@ workloop_cb_test_intransit(uint64_t *workloop_id __unused, void **eventslist __u { messages_received++; T_LOG("Workloop handler workloop_cb_test_intransit called. Received message no %d", - messages_received); + messages_received); /* Skip the test if we can't check Qos */ @@ -233,24 +239,23 @@ workloop_cb_test_intransit(uint64_t *workloop_id __unused, void **eventslist __u } if (messages_received == 1) { - sleep(5); T_LOG("Do some CPU work."); do_work(5000); /* Check if the override now is IN + 60 boost */ T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED, - "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED"); + "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED"); T_EXPECT_EQ(get_user_promotion_basepri(), 60u, - "dispatch_source event handler should be overridden at 60"); + "dispatch_source event handler should be overridden at 60"); /* Enable the knote to get 2nd message */ struct kevent_qos_s *kev = *eventslist; kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED; kev->fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | - MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | - MACH_RCV_VOUCHER); + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | + MACH_RCV_VOUCHER); *events = 1; } else { *events = 0; @@ -263,7 +268,7 @@ run_client_server(const char *server_name, const char *client_name) { dt_helper_t helpers[] = { dt_launchd_helper_domain("com.apple.xnu.test.turnstile_multihop.plist", - server_name, NULL, LAUNCH_SYSTEM_DOMAIN), + server_name, NULL, LAUNCH_SYSTEM_DOMAIN), dt_fork_helper(client_name) }; dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS); @@ -278,7 +283,7 @@ get_server_port(void) { mach_port_t port; kern_return_t kr = bootstrap_check_in(bootstrap_port, - TURNSTILE_MULTIHOP_SERVICE_NAME, &port); + TURNSTILE_MULTIHOP_SERVICE_NAME, &port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in"); return port; } @@ -291,12 +296,12 @@ create_pthpriority_voucher(mach_msg_priority_t qos) mach_voucher_t voucher = MACH_PORT_NULL; kern_return_t ret; ipc_pthread_priority_value_t ipc_pthread_priority_value = - (ipc_pthread_priority_value_t)qos; + (ipc_pthread_priority_value_t)qos; mach_voucher_attr_raw_recipe_array_t recipes; mach_voucher_attr_raw_recipe_size_t recipe_size = 0; mach_voucher_attr_recipe_t recipe = - (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size]; + (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size]; recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY; recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE; @@ -308,9 +313,9 @@ create_pthpriority_voucher(mach_msg_priority_t qos) recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0]; ret = host_create_mach_voucher(mach_host_self(), - recipes, - recipe_size, - &voucher); + recipes, + recipe_size, + &voucher); T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher"); return voucher; @@ -331,21 +336,21 @@ send( mach_msg_body_t body; mach_msg_port_descriptor_t port_descriptor; } send_msg = { - .header = { - .msgh_remote_port = send_port, - .msgh_local_port = reply_port, - .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, - reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, - MACH_MSG_TYPE_MOVE_SEND, - MACH_MSGH_BITS_COMPLEX), - .msgh_id = 0x100, - .msgh_size = sizeof(send_msg), + .header = { + .msgh_remote_port = send_port, + .msgh_local_port = reply_port, + .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, + reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, + MACH_MSG_TYPE_MOVE_SEND, + MACH_MSGH_BITS_COMPLEX), + .msgh_id = 0x100, + .msgh_size = sizeof(send_msg), }, - .body = { - .msgh_descriptor_count = 1, + .body = { + .msgh_descriptor_count = 1, }, - .port_descriptor = { - .name = msg_port, + .port_descriptor = { + .name = msg_port, .disposition = MACH_MSG_TYPE_MOVE_RECEIVE, .type = MACH_MSG_PORT_DESCRIPTOR, }, @@ -360,15 +365,15 @@ send( } ret = mach_msg(&(send_msg.header), - MACH_SEND_MSG | - MACH_SEND_TIMEOUT | - MACH_SEND_OVERRIDE| - ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options), - send_msg.header.msgh_size, - 0, - MACH_PORT_NULL, - 10000, - 0); + MACH_SEND_MSG | + MACH_SEND_TIMEOUT | + MACH_SEND_OVERRIDE | + ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options), + send_msg.header.msgh_size, + 0, + MACH_PORT_NULL, + 10000, + 0); T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg"); } @@ -385,24 +390,24 @@ receive( mach_msg_body_t body; mach_msg_port_descriptor_t port_descriptor; } rcv_msg = { - .header = + .header = { - .msgh_remote_port = MACH_PORT_NULL, - .msgh_local_port = rcv_port, - .msgh_size = sizeof(rcv_msg), + .msgh_remote_port = MACH_PORT_NULL, + .msgh_local_port = rcv_port, + .msgh_size = sizeof(rcv_msg), }, }; T_LOG("Client: Starting sync receive\n"); ret = mach_msg(&(rcv_msg.header), - MACH_RCV_MSG | - MACH_RCV_SYNC_WAIT, - 0, - rcv_msg.header.msgh_size, - rcv_port, - 0, - notify_port); + MACH_RCV_MSG | + MACH_RCV_SYNC_WAIT, + 0, + rcv_msg.header.msgh_size, + rcv_port, + 0, + notify_port); } static lock_t lock_DEF; @@ -417,13 +422,15 @@ static mach_port_t sixty_thread_port; static uint64_t dispatch_sync_owner; -static int get_pri(thread_t thread_port) { +static int +get_pri(thread_t thread_port) +{ kern_return_t kr; thread_extended_info_data_t extended_info; mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; kr = thread_info(thread_port, THREAD_EXTENDED_INFO, - (thread_info_t)&extended_info, &count); + (thread_info_t)&extended_info, &count); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); @@ -452,7 +459,7 @@ thread_wait_to_block(mach_port_t thread_port) while (1) { mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; kr = thread_info(thread_port, THREAD_EXTENDED_INFO, - (thread_info_t)&extended_info, &count); + (thread_info_t)&extended_info, &count); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); @@ -473,7 +480,7 @@ thread_wait_to_boost(mach_port_t thread_port, mach_port_t yield_thread, int prio while (1) { mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; kr = thread_info(thread_port, THREAD_EXTENDED_INFO, - (thread_info_t)&extended_info, &count); + (thread_info_t)&extended_info, &count); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); @@ -500,20 +507,20 @@ dispatch_sync_wait(mach_port_t owner_thread, qos_class_t promote_qos) dispatch_sync_owner = owner_thread; struct kevent_qos_s kev[] = {{ - .ident = mach_thread_self(), - .filter = EVFILT_WORKLOOP, - .flags = action, - .fflags = fflags, - .udata = (uintptr_t) &def_thread_port, - .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0), - .ext[EV_EXTIDX_WL_MASK] = mask, - .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner, - .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner, - }}; + .ident = mach_thread_self(), + .filter = EVFILT_WORKLOOP, + .flags = action, + .fflags = fflags, + .udata = (uintptr_t) &def_thread_port, + .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0), + .ext[EV_EXTIDX_WL_MASK] = mask, + .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner, + .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner, + }}; /* Setup workloop to fake dispatch sync wait on a workloop */ r = kevent_id(30, kev, 1, kev_err, 1, NULL, - NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); + NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); T_QUIET; T_LOG("dispatch_sync_wait returned\n"); } @@ -532,22 +539,21 @@ dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos) dispatch_sync_owner = owner_thread; struct kevent_qos_s kev[] = {{ - .ident = def_thread_port, - .filter = EVFILT_WORKLOOP, - .flags = action, - .fflags = fflags, - .udata = (uintptr_t) &def_thread_port, - .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0), - .ext[EV_EXTIDX_WL_MASK] = mask, - .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner, - .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner, - }}; + .ident = def_thread_port, + .filter = EVFILT_WORKLOOP, + .flags = action, + .fflags = fflags, + .udata = (uintptr_t) &def_thread_port, + .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0), + .ext[EV_EXTIDX_WL_MASK] = mask, + .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner, + .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner, + }}; /* Setup workloop to fake dispatch sync wake on a workloop */ r = kevent_id(30, kev, 1, kev_err, 1, NULL, - NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); + NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); T_QUIET; T_LOG("dispatch_sync_cancel returned\n"); - } static void * @@ -591,7 +597,7 @@ thread_at_sixty(void *arg __unused) after_lock_time = mach_absolute_time(); T_QUIET; T_LOG("The time for priority 60 thread to acquire lock was %llu \n", - (after_lock_time - before_lock_time)); + (after_lock_time - before_lock_time)); exit(0); } @@ -671,7 +677,7 @@ thread_at_maintenance(void *arg __unused) set_thread_name(__FUNCTION__); kern_return_t kr = bootstrap_look_up(bootstrap_port, - TURNSTILE_MULTIHOP_SERVICE_NAME, &qos_send_port); + TURNSTILE_MULTIHOP_SERVICE_NAME, &qos_send_port); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); special_reply_port = thread_get_special_reply_port(); @@ -681,11 +687,11 @@ thread_at_maintenance(void *arg __unused) /* Send an async message */ send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0); /* Send a sync message */ send(qos_send_port, special_reply_port, MACH_PORT_NULL, - (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0); + (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0); /* Create a new thread at QOS_CLASS_DEFAULT qos */ thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default); @@ -698,7 +704,7 @@ thread_at_maintenance(void *arg __unused) } T_HELPER_DECL(three_ulock_sync_ipc_hop, - "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos") + "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos") { dt_stat_time_t roundtrip_stat = dt_stat_time_create("multihop_lock_acquire"); @@ -720,7 +726,7 @@ thread_create_at_qos(qos_class_t qos, void * (*function)(void *)) { qos_class_t qos_thread; pthread_t thread; - pthread_attr_t attr; + pthread_attr_t attr; int ret; ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL); @@ -728,9 +734,9 @@ thread_create_at_qos(qos_class_t qos, void * (*function)(void *)) T_LOG("set priority failed\n"); } - pthread_attr_init(&attr); - pthread_attr_set_qos_class_np(&attr, qos, 0); - pthread_create(&thread, &attr, function, NULL); + pthread_attr_init(&attr); + pthread_attr_set_qos_class_np(&attr, qos, 0); + pthread_create(&thread, &attr, function, NULL); T_LOG("pthread created\n"); pthread_get_qos_class_np(thread, &qos_thread, NULL); @@ -744,33 +750,33 @@ expect_kevent_id_recv(mach_port_t port) int r; T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( - worker_cb, event_cb, - (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL); + worker_cb, event_cb, + (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL); struct kevent_qos_s kev[] = {{ - .ident = port, - .filter = EVFILT_MACHPORT, - .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, - .fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | - MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | - MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | - MACH_RCV_VOUCHER), - .data = 1, - .qos = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0) - }}; + .ident = port, + .filter = EVFILT_MACHPORT, + .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, + .fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | + MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | + MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | + MACH_RCV_VOUCHER), + .data = 1, + .qos = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0) + }}; struct kevent_qos_s kev_err[] = {{ 0 }}; /* Setup workloop for mach msg rcv */ r = kevent_id(25, kev, 1, kev_err, 1, NULL, - NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); + NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id"); T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id"); } T_HELPER_DECL(server_kevent_id, - "Reply with the QoS that a dispatch source event handler ran with") + "Reply with the QoS that a dispatch source event handler ran with") { expect_kevent_id_recv(get_server_port()); sigsuspend(0); @@ -779,20 +785,20 @@ T_HELPER_DECL(server_kevent_id, #define TEST_MULTIHOP(server_name, client_name, name) \ T_DECL(server_kevent_id_##name, \ - "Event delivery using a kevent_id", \ - T_META_ASROOT(YES)) \ + "Event delivery using a kevent_id", \ + T_META_ASROOT(YES)) \ { \ - run_client_server(server_name, client_name); \ + run_client_server(server_name, client_name); \ } #define TEST_MULTIHOP_SPIN(server_name, client_name, name) \ T_DECL(server_kevent_id_##name, \ - "Event delivery using a kevent_id", \ - T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \ + "Event delivery using a kevent_id", \ + T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \ { \ - spin_for_ever = true; \ - run_client_server(server_name, client_name); \ - spin_for_ever = false; \ + spin_for_ever = true; \ + run_client_server(server_name, client_name); \ + spin_for_ever = false; \ } /* diff --git a/tests/turnstile_multihop_helper.h b/tests/turnstile_multihop_helper.h index 0652b27b0..28b5becd8 100644 --- a/tests/turnstile_multihop_helper.h +++ b/tests/turnstile_multihop_helper.h @@ -18,9 +18,9 @@ __inline static void yield(void) { #if !defined(__x86_64__) && !defined(__i386__) - __asm volatile("yield"); + __asm volatile ("yield"); #else - __asm volatile("pause"); + __asm volatile ("pause"); #endif } @@ -28,9 +28,9 @@ __inline static void wfe(void) { #if !defined(__x86_64__) && !defined(__i386__) - __asm volatile("wfe"); + __asm volatile ("wfe"); #else - __asm volatile("pause"); + __asm volatile ("pause"); #endif } @@ -38,9 +38,9 @@ __inline static void wfi(void) { #if !defined(__x86_64__) && !defined(__i386__) - __asm volatile("wfi"); + __asm volatile ("wfi"); #else - __asm volatile("pause"); + __asm volatile ("pause"); #endif } @@ -48,7 +48,7 @@ __inline static void sev(void) { #if !defined(__x86_64__) && !defined(__i386__) - __asm volatile("sev"); + __asm volatile ("sev"); #endif } @@ -65,7 +65,7 @@ _os_get_self(void) return self; } -#define ULL_WAITERS 1U +#define ULL_WAITERS 1U static uint32_t lock_no_wait[4] = { 0, 0, 0, 0}; static uint32_t lock_wait[4] = { 0, 0, 0, 0}; @@ -89,7 +89,7 @@ ull_lock(lock_t *lock, int id, uint opcode, uint flags) if ((count % 100000) == 0) { printf("[%d,%d]%s>top of loop count=%d\n", id, mach_id, __FUNCTION__, count); } - u32 new = waiters ? (ULL_WAITERS|ull_locked) : ull_locked; + u32 new = waiters ? (ULL_WAITERS | ull_locked) : ull_locked; prev = 0; __c11_atomic_compare_exchange_strong(lock, &prev, new, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); if (prev == 0) { @@ -166,7 +166,7 @@ ull_unlock(lock_t *lock, int id, uint opcode, uint flags) exit(1); } - if (prev == (ULL_WAITERS|ull_locked)) { + if (prev == (ULL_WAITERS | ull_locked)) { /* locked with waiters */ *lock = 0; __c11_atomic_thread_fence(__ATOMIC_ACQ_REL); diff --git a/tests/turnstile_multihop_types.h b/tests/turnstile_multihop_types.h index fc21b00e1..31d4c37fc 100644 --- a/tests/turnstile_multihop_types.h +++ b/tests/turnstile_multihop_types.h @@ -5,28 +5,28 @@ #include #include -typedef signed char s8; -typedef unsigned char u8; -typedef uint16_t u16; -typedef int16_t s16; -typedef uint32_t u32; -typedef uint64_t u64; -typedef int32_t s32; -typedef int64_t s64; +typedef signed char s8; +typedef unsigned char u8; +typedef uint16_t u16; +typedef int16_t s16; +typedef uint32_t u32; +typedef uint64_t u64; +typedef int32_t s32; +typedef int64_t s64; #if defined(__arm64__) || defined(__x86_64__) -typedef u64 un; -typedef s64 sn; +typedef u64 un; +typedef s64 sn; #else -typedef u32 un; -typedef s32 sn; +typedef u32 un; +typedef s32 sn; #endif #ifndef __DRT_H__ -typedef u32 uint; +typedef u32 uint; #endif -#define volatile_read(atom) (*((volatile typeof(*(atom)) *)(atom))) -#define volatile_write(atom, value) (*((volatile typeof(*(atom)) *)(atom)) = value) +#define volatile_read(atom) (*((volatile typeof(*(atom)) *)(atom))) +#define volatile_write(atom, value) (*((volatile typeof(*(atom)) *)(atom)) = value) #endif diff --git a/tests/turnstiles_test.c b/tests/turnstiles_test.c index 0494ba146..34b9667f3 100644 --- a/tests/turnstiles_test.c +++ b/tests/turnstiles_test.c @@ -30,7 +30,7 @@ thread_create_at_qos(qos_class_t qos, void * (*function)(void *), int type) { qos_class_t qos_thread; pthread_t thread; - pthread_attr_t attr; + pthread_attr_t attr; int ret; ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL); @@ -38,23 +38,24 @@ thread_create_at_qos(qos_class_t qos, void * (*function)(void *), int type) T_LOG("set priority failed\n"); } - pthread_attr_init(&attr); - pthread_attr_set_qos_class_np(&attr, qos, 0); - pthread_create(&thread, &attr, function, (void *)type); + pthread_attr_init(&attr); + pthread_attr_set_qos_class_np(&attr, qos, 0); + pthread_create(&thread, &attr, function, (void *)type); T_LOG("pthread created\n"); pthread_get_qos_class_np(thread, &qos_thread, NULL); - T_EXPECT_EQ(qos_thread, (qos_class_t)qos, NULL); + T_EXPECT_EQ(qos_thread, (qos_class_t)qos, NULL); } static int -get_pri(thread_t thread_port) { +get_pri(thread_t thread_port) +{ kern_return_t kr; thread_extended_info_data_t extended_info; mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; kr = thread_info(thread_port, THREAD_EXTENDED_INFO, - (thread_info_t)&extended_info, &count); + (thread_info_t)&extended_info, &count); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); return extended_info.pth_curpri; @@ -134,7 +135,7 @@ try_to_take_lock_and_unlock(void *arg) /* Try taking the test lock */ turnstile_prim_lock(type); - sleep (2); + sleep(2); turnstile_prim_unlock(type); return NULL; } @@ -254,5 +255,4 @@ T_DECL(turnstile_test, "Turnstile test", T_META_ASROOT(YES)) test1(SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE); test2(SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE); test3(SYSCTL_TURNSTILE_TEST_GLOBAL_HASHTABLE); - } diff --git a/tests/utimensat.c b/tests/utimensat.c index d5baad67f..c534bde51 100644 --- a/tests/utimensat.c +++ b/tests/utimensat.c @@ -45,13 +45,13 @@ T_DECL(utimensat, "Try various versions of utimensat") struct stat pre_st, post_st; int fd; - T_ASSERT_POSIX_SUCCESS((fd = open(FILENAME, O_CREAT|O_RDWR, 0644)), NULL); + T_ASSERT_POSIX_SUCCESS((fd = open(FILENAME, O_CREAT | O_RDWR, 0644)), NULL); T_ASSERT_POSIX_ZERO(close(fd), NULL); - for (size_t i = 0; i < sizeof(tptr)/sizeof(tptr[0]); i++) { - T_LOG("=== {%ld, %ld} {%ld, %ld} ===", - tptr[i][0].tv_sec, tptr[i][0].tv_nsec, - tptr[i][1].tv_sec, tptr[i][1].tv_nsec); + for (size_t i = 0; i < sizeof(tptr) / sizeof(tptr[0]); i++) { + T_LOG("=== {%ld, %ld} {%ld, %ld} ===", + tptr[i][0].tv_sec, tptr[i][0].tv_nsec, + tptr[i][1].tv_sec, tptr[i][1].tv_nsec); struct timespec now; clock_gettime(CLOCK_REALTIME, &now); diff --git a/tests/verify_kalloc_config.c b/tests/verify_kalloc_config.c index 14ce3c974..64a9f6901 100644 --- a/tests/verify_kalloc_config.c +++ b/tests/verify_kalloc_config.c @@ -7,11 +7,12 @@ T_GLOBAL_META( T_META_NAMESPACE("xnu.vm"), T_META_CHECK_LEAKS(false) -); + ); static void run_test(void); -static void run_test(void) +static void +run_test(void) { kern_return_t kr; uint64_t size, i; @@ -24,8 +25,8 @@ static void run_test(void) const char kalloc_str[] = "kalloc."; kr = mach_memory_info(mach_host_self(), - &name, &nameCnt, &info, &infoCnt, - &wiredInfo, &wiredInfoCnt); + &name, &nameCnt, &info, &infoCnt, + &wiredInfo, &wiredInfoCnt); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_memory_info"); T_QUIET; T_ASSERT_EQ(nameCnt, infoCnt, "zone name and info counts don't match"); @@ -40,19 +41,19 @@ static void run_test(void) if ((name != NULL) && (nameCnt != 0)) { kr = vm_deallocate(mach_task_self(), (vm_address_t) name, - (vm_size_t) (nameCnt * sizeof *name)); + (vm_size_t) (nameCnt * sizeof *name)); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate name"); } if ((info != NULL) && (infoCnt != 0)) { kr = vm_deallocate(mach_task_self(), (vm_address_t) info, - (vm_size_t) (infoCnt * sizeof *info)); + (vm_size_t) (infoCnt * sizeof *info)); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate info"); } if ((wiredInfo != NULL) && (wiredInfoCnt != 0)) { kr = vm_deallocate(mach_task_self(), (vm_address_t) wiredInfo, - (vm_size_t) (wiredInfoCnt * sizeof *wiredInfo)); + (vm_size_t) (wiredInfoCnt * sizeof *wiredInfo)); T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate wiredInfo"); } @@ -60,9 +61,8 @@ static void run_test(void) } T_DECL( verify_kalloc_config, - "verifies that the kalloc zones are configured correctly", - T_META_ASROOT(true)) + "verifies that the kalloc zones are configured correctly", + T_META_ASROOT(true)) { run_test(); } - diff --git a/tests/vm_phys_footprint.c b/tests/vm_phys_footprint.c new file mode 100644 index 000000000..4dbea7be7 --- /dev/null +++ b/tests/vm_phys_footprint.c @@ -0,0 +1,1221 @@ +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +extern int ledger(int cmd, caddr_t arg1, caddr_t arg2, caddr_t arg3); + +#if ENTITLED && defined(__arm64__) +#define LEGACY_FOOTPRINT 1 +#else /* ENTITLED && __arm64__ */ +#define LEGACY_FOOTPRINT 0 +#endif /* ENTITLED && __arm64__ */ + +#define MEM_SIZE (100 * 1024 * 1024) /* 100 MB */ + +static int64_t ledger_count = -1; +static int footprint_index = -1; +static int pagetable_index = -1; +static struct ledger_entry_info *lei = NULL; + +static void +ledger_init(void) +{ + static int ledger_inited = 0; + struct ledger_info li; + struct ledger_template_info *templateInfo; + int64_t templateCnt; + int i; + + if (ledger_inited) { + return; + } + ledger_inited = 1; + + T_SETUPBEGIN; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_EQ(ledger(LEDGER_INFO, + (caddr_t)(uintptr_t)getpid(), + (caddr_t)&li, + NULL), + 0, + "ledger(LEDGER_INFO)"); + + templateCnt = li.li_entries; + templateInfo = malloc((size_t)li.li_entries * sizeof(struct ledger_template_info)); + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_NE(templateInfo, NULL, "malloc()"); + + ledger_count = li.li_entries; + footprint_index = -1; + pagetable_index = -1; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_GE(ledger(LEDGER_TEMPLATE_INFO, + (caddr_t)templateInfo, + (caddr_t)&templateCnt, + NULL), + 0, + "ledger(LEDGER_TEMPLATE_INFO)"); + for (i = 0; i < templateCnt; i++) { + if (!strncmp(templateInfo[i].lti_name, + "phys_footprint", + strlen("phys_footprint"))) { + footprint_index = i; + } else if (!strncmp(templateInfo[i].lti_name, + "page_table", + strlen("page_table"))) { + pagetable_index = i; + } + } + free(templateInfo); + + lei = (struct ledger_entry_info *) + malloc((size_t)ledger_count * sizeof(*lei)); + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_NE(lei, NULL, "malloc(ledger_entry_info)"); + + T_QUIET; + T_ASSERT_NE(footprint_index, -1, "no footprint_index"); + T_QUIET; + T_ASSERT_NE(pagetable_index, -1, "no pagetable_index"); + + T_SETUPEND; +} + +static void +get_ledger_info( + uint64_t *phys_footprint, + uint64_t *page_table) +{ + int64_t count; + + count = ledger_count; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_GE(ledger(LEDGER_ENTRY_INFO, + (caddr_t)(uintptr_t)getpid(), + (caddr_t)lei, + (caddr_t)&count), + 0, + "ledger(LEDGER_ENTRY_INFO)"); + T_QUIET; + T_ASSERT_GT(count, (int64_t)footprint_index, "no entry for footprint"); + T_QUIET; + T_ASSERT_GT(count, (int64_t)pagetable_index, "no entry for pagetable"); + if (phys_footprint) { + *phys_footprint = (uint64_t)(lei[footprint_index].lei_balance); + } + if (page_table) { + *page_table = (uint64_t)(lei[pagetable_index].lei_balance); + } +} + +static mach_vm_address_t +pre_warm( + mach_vm_size_t vm_size) +{ + kern_return_t kr; + mach_vm_address_t vm_addr; + unsigned char BigBufOnStack[100 * 1024]; + uint64_t footprint, page_table; + + /* make sure ledgers are ready to be queried */ + ledger_init(); + + T_SETUPBEGIN; + + /* + * Touch a few pages ahead on the stack, to make + * sure we don't see a footprint increase due to + * an extra stack page later. + */ + memset(BigBufOnStack, 0xb, sizeof(BigBufOnStack)); + T_QUIET; + T_EXPECT_EQ(BigBufOnStack[0], 0xb, + "BigBufOnStack[0] == 0x%x", + BigBufOnStack[0]); + T_QUIET; + T_EXPECT_EQ(BigBufOnStack[sizeof(BigBufOnStack) - 1], 0xb, + "BigBufOnStack[%lu] == 0x%x", + sizeof(BigBufOnStack), + BigBufOnStack[sizeof(BigBufOnStack) - 1]); + + /* + * Pre-allocate, touch and then release the same amount + * of memory we'll be allocating later during the test, + * to account for any memory overhead (page tables, global + * variables, ...). + */ + vm_addr = 0; + kr = mach_vm_allocate(mach_task_self(), + &vm_addr, + vm_size, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate(%lld) error 0x%x (%s)", + vm_size, kr, mach_error_string(kr)); + memset((char *)(uintptr_t)vm_addr, 'p', (size_t)vm_size); + kr = mach_vm_deallocate(mach_task_self(), + vm_addr, + vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + + /* + * Exercise the ledger code to make sure it's ready to run + * without any extra memory overhead later. + */ + get_ledger_info(&footprint, &page_table); + + T_SETUPEND; + + /* + * Return the start of the virtual range we pre-warmed, so that the + * test can check that it's using the same range. + */ + return vm_addr; +} + +T_DECL(phys_footprint_anonymous, + "phys_footprint for anonymous memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* allocating virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + vm_size = MEM_SIZE; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("virtual allocation does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "virtual allocation of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying anonymous memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating dirty anonymous memory decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +#define TEMP_FILE_TEMPLATE "/tmp/phys_footprint_data.XXXXXXXX" +#define TEMP_FILE_SIZE (1 * 1024 * 1024) + +T_DECL(phys_footprint_file, + "phys_footprint for mapped file", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + mach_vm_address_t pre_vm_addr; + int fd; + char *map_addr; + size_t map_size, dirty_size; + ssize_t nbytes; + char tmp_file_name[PATH_MAX] = TEMP_FILE_TEMPLATE; + char *buf; + size_t buf_size; + + T_SETUPBEGIN; + buf_size = TEMP_FILE_SIZE; + T_QUIET; + T_ASSERT_NOTNULL(buf = (char *)malloc(buf_size), + "allocate %zu-byte buffer", buf_size); + memset(buf, 'f', buf_size); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_NOTNULL(mktemp(tmp_file_name), + "create temporary file name"); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_GE(fd = open(tmp_file_name, O_CREAT | O_RDWR), + 0, + "create temp file"); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_EQ(nbytes = write(fd, buf, buf_size), + (ssize_t)buf_size, + "write %zu bytes", buf_size); + free(buf); + T_SETUPEND; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(TEMP_FILE_SIZE); + + /* mapping a file does not impact footprint... */ + get_ledger_info(&footprint_before, &pagetable_before); + map_size = TEMP_FILE_SIZE; + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_NOTNULL(map_addr = (char *)mmap(NULL, map_size, + PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, fd, 0), + "mmap()"); + T_QUIET; + T_EXPECT_EQ((mach_vm_address_t)map_addr, pre_vm_addr, + "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("mapping file does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "mapping file with %zu bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + map_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching file-backed memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = map_size / 2; + memset(map_addr, 'F', dirty_size); + /* ... should not impact footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying file-backed memory does not impact phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %zu bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating file-backed memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_EQ(munmap(map_addr, map_size), + 0, + "unmap file"); + /* ... should not impact footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("unmapping file-backed memory does not impact phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "unmapped %zu dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +T_DECL(phys_footprint_purgeable, + "phys_footprint for purgeable memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size; + int state; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* allocating purgeable virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + vm_size = MEM_SIZE; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("purgeable virtual allocation does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "purgeable virtual allocation of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying anonymous memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_VOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(VOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, + "memory was non-volatile"); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it non-volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_NONVOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(NONVOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_VOLATILE, + "memory was volatile"); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating memory decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +T_DECL(phys_footprint_purgeable_ownership, + "phys_footprint for owned purgeable memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size, me_size; + int state; + mach_port_t me_port; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* allocating purgeable virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + vm_size = MEM_SIZE; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("purgeable virtual allocation does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "purgeable virtual allocation of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying anonymous memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_VOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(VOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, + "memory was non-volatile"); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it non-volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_NONVOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(NONVOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_VOLATILE, + "memory was volatile"); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making a memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + me_size = vm_size; + me_port = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &me_size, + vm_addr, + VM_PROT_READ | VM_PROT_WRITE, + &me_port, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "make_memory_entry() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(me_size, vm_size, "memory entry size mismatch"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making a memory entry does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "making a memory entry of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory while holding memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating owned memory while holding memory entry " + "does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* releasing the memory entry... */ + kr = mach_port_deallocate(mach_task_self(), me_port); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "mach_port_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing memory entry decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +#ifdef MAP_MEM_LEDGER_TAGGED +T_DECL(phys_footprint_ledger_purgeable_owned, + "phys_footprint for ledger-tagged purgeable memory ownership", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size, me_size; + int state; + mach_port_t me_port; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* making a memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_size = MEM_SIZE; + me_size = vm_size; + me_port = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &me_size, + 0, + (MAP_MEM_NAMED_CREATE | + MAP_MEM_LEDGER_TAGGED | + MAP_MEM_PURGABLE | + VM_PROT_READ | VM_PROT_WRITE), + &me_port, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "make_memory_entry() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(me_size, vm_size, "memory entry size mismatch"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making a memory entry does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "making a memory entry of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* mapping ledger-tagged virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + me_port, + 0, /* offset */ + FALSE, /* copy */ + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_map() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("mapping ledger-tagged memory does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "ledger-tagged mapping of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying ledger-tagged memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_VOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(VOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, + "memory was non-volatile"); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it non-volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_NONVOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(NONVOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_VOLATILE, + "memory was volatile"); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory while holding memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating owned memory while holding memory entry " + "does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* releasing the memory entry... */ + kr = mach_port_deallocate(mach_task_self(), me_port); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "mach_port_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing memory entry decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +T_DECL(phys_footprint_ledger_owned, + "phys_footprint for ledger-tagged memory ownership", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size, me_size; + int state; + mach_port_t me_port; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* making a memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_size = MEM_SIZE; + me_size = vm_size; + me_port = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &me_size, + 0, + (MAP_MEM_NAMED_CREATE | + MAP_MEM_LEDGER_TAGGED | + VM_PROT_READ | VM_PROT_WRITE), + &me_port, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "make_memory_entry() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(me_size, vm_size, "memory entry size mismatch"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making a memory entry does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "making a memory entry of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* mapping ledger-tagged virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + me_port, + 0, /* offset */ + FALSE, /* copy */ + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_map() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("mapping ledger-tagged memory does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "ledger-tagged mapping of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying ledger-tagged memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory while holding memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating owned memory while holding memory entry " + "does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* releasing the memory entry... */ + kr = mach_port_deallocate(mach_task_self(), me_port); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "mach_port_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing memory entry decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} +#endif /* MAP_MEM_LEDGER_TAGGED */ + +/* IOSurface code from: CoreImage/CoreImageTests/CIRender/SurfaceUtils.c */ +#include +#include +#include +static size_t +bytes_per_element(uint32_t format) +{ + size_t bpe = 0; + switch (format) { + case 32: // kCVPixelFormatType_32ARGB (ARGB8) + bpe = 4; + break; + default: + bpe = 0; + break; + } + return bpe; +} +static size_t +bytes_per_pixel(uint32_t format) +{ + size_t bpe = 0; + switch (format) { + case 32: // kCVPixelFormatType_32ARGB (ARGB8) + bpe = 4; + break; + default: + bpe = 0; + break; + } + return bpe; +} +static inline size_t +roundSizeToMultiple(size_t size, size_t mult) +{ + return ((size + mult - 1) / mult) * mult; +} +static inline void +setIntValue(CFMutableDictionaryRef dict, const CFStringRef key, int value) +{ + CFNumberRef number = CFNumberCreate(0, kCFNumberIntType, &value); + CFDictionarySetValue(dict, key, number); + CFRelease(number); +} +typedef void (^SurfacePlaneBlock)(void *data, size_t planeIndex, size_t width, size_t height, size_t rowbytes); +static IOReturn +SurfaceApplyPlaneBlock(IOSurfaceRef surface, SurfacePlaneBlock block) +{ + if (surface == nil || block == nil) { + return kIOReturnBadArgument; + } + + IOReturn result = kIOReturnSuccess; + size_t planeCount = IOSurfaceGetPlaneCount(surface); + + if (planeCount == 0) { + result = IOSurfaceLock(surface, 0, NULL); + if (result != kIOReturnSuccess) { + return result; + } + + void* base = IOSurfaceGetBaseAddress(surface); + size_t rb = IOSurfaceGetBytesPerRow(surface); + size_t w = IOSurfaceGetWidth(surface); + size_t h = IOSurfaceGetHeight(surface); + + if (base && rb && w && h) { + block(base, 0, w, h, rb); + } + + IOSurfaceUnlock(surface, 0, NULL); + } else if (planeCount == 2) { + for (size_t i = 0; i < planeCount; i++) { + result = IOSurfaceLock(surface, 0, NULL); + if (result != kIOReturnSuccess) { + return result; + } + + void* base = IOSurfaceGetBaseAddressOfPlane(surface, i); + size_t rb = IOSurfaceGetBytesPerRowOfPlane(surface, i); + size_t w = IOSurfaceGetWidthOfPlane(surface, i); + size_t h = IOSurfaceGetHeightOfPlane(surface, i); + + if (base && rb && w && h) { + block(base, i, w, h, rb); + } + + IOSurfaceUnlock(surface, 0, NULL); + } + } + return result; +} +static void +ClearSurface(IOSurfaceRef surface) +{ + const int zero = 0; + (void) SurfaceApplyPlaneBlock(surface, ^(void *p, size_t i, __unused size_t w, size_t h, size_t rb) + { + if (i == 0) { + memset(p, zero, rb * h); + } else { + memset(p, 128, rb * h); + } + }); +} +static IOSurfaceRef +CreateSurface(uint32_t pixelsWide, uint32_t pixelsHigh, uint32_t rowBytesAlignment, uint32_t fmt, bool purgeable, bool clear) +{ + IOSurfaceRef surface = nil; + + if (pixelsWide < 1 || pixelsHigh < 1 || fmt == 0) { + return nil; + } + + size_t bpp = bytes_per_pixel(fmt); + size_t bpe = bytes_per_element(fmt); + if (bpp == 0 || bpe == 0) { + return nil; + } + + size_t rowbytes = pixelsWide * bpp; + if (rowBytesAlignment == 0) { + rowBytesAlignment = 16; + } + rowbytes = roundSizeToMultiple(rowbytes, rowBytesAlignment); + + CFMutableDictionaryRef props = CFDictionaryCreateMutable(0, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); + setIntValue(props, kIOSurfaceBytesPerRow, (int)rowbytes); + setIntValue(props, kIOSurfaceWidth, (int)pixelsWide); + setIntValue(props, kIOSurfaceHeight, (int)pixelsHigh); + setIntValue(props, kIOSurfacePixelFormat, (int)fmt); +#if TARGET_OS_IPHONE + setIntValue(props, kIOSurfaceNonPurgeable, purgeable); +#else /* TARGET_OS_IPHONE */ + (void)purgeable; +#endif /* TARGET_OS_IPHONE */ + { + if (bpe != bpp) { // i.e. a 422 format such as 'yuvf' etc. + setIntValue(props, kIOSurfaceElementWidth, 2); + setIntValue(props, kIOSurfaceElementHeight, 1); + } + setIntValue(props, kIOSurfaceBytesPerElement, (int)bpe); + } + + surface = IOSurfaceCreate(props); + + if (clear) { + ClearSurface(surface); + } + + CFRelease(props); + return surface; +} +T_DECL(phys_footprint_purgeable_iokit, + "phys_footprint for purgeable IOKit memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + IOSurfaceRef surface; + uint32_t old_state; + uint64_t surface_size; + + T_SETUPBEGIN; + ledger_init(); + surface = CreateSurface(1024, 1024, 0, 32, true, true); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableVolatile, &old_state); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableNonVolatile, &old_state); + CFRelease(surface); + T_SETUPEND; + + surface_size = 1024 * 1024 * 4; + + /* create IOsurface: footprint grows */ + get_ledger_info(&footprint_before, &pagetable_before); + surface = CreateSurface(1024, 1024, 0, 32, true, true); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: creating IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "create IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before + surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("creating IOSurface increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "create IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ + + /* make IOSurface volatile: footprint shrinks */ + get_ledger_info(&footprint_before, &pagetable_before); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableVolatile, &old_state); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: volatile IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "volatile IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before - surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making IOSurface volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ + + /* make IOSurface non-volatile: footprint grows */ + get_ledger_info(&footprint_before, &pagetable_before); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableNonVolatile, &old_state); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: non-volatile IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "non-volatile IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before + surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making IOSurface non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ + + /* accessing IOSurface re-mapping: no footprint impact */ + + /* deallocating IOSurface re-mapping: no footprint impact */ + + /* release IOSurface: footprint shrinks */ + get_ledger_info(&footprint_before, &pagetable_before); + CFRelease(surface); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: release IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "releasing IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before - surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing IOSurface decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "released IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ +} diff --git a/tests/vm_phys_footprint_legacy.c b/tests/vm_phys_footprint_legacy.c new file mode 100644 index 000000000..c6357797f --- /dev/null +++ b/tests/vm_phys_footprint_legacy.c @@ -0,0 +1,1223 @@ +#define ENTITLED 1 + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +extern int ledger(int cmd, caddr_t arg1, caddr_t arg2, caddr_t arg3); + +#if ENTITLED && defined(__arm64__) +#define LEGACY_FOOTPRINT 1 +#else /* ENTITLED && __arm64__ */ +#define LEGACY_FOOTPRINT 0 +#endif /* ENTITLED && __arm64__ */ + +#define MEM_SIZE (100 * 1024 * 1024) /* 100 MB */ + +static int64_t ledger_count = -1; +static int footprint_index = -1; +static int pagetable_index = -1; +static struct ledger_entry_info *lei = NULL; + +static void +ledger_init(void) +{ + static int ledger_inited = 0; + struct ledger_info li; + struct ledger_template_info *templateInfo; + int64_t templateCnt; + int i; + + if (ledger_inited) { + return; + } + ledger_inited = 1; + + T_SETUPBEGIN; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_EQ(ledger(LEDGER_INFO, + (caddr_t)(uintptr_t)getpid(), + (caddr_t)&li, + NULL), + 0, + "ledger(LEDGER_INFO)"); + + templateCnt = li.li_entries; + templateInfo = malloc((size_t)li.li_entries * sizeof(struct ledger_template_info)); + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_NE(templateInfo, NULL, "malloc()"); + + ledger_count = li.li_entries; + footprint_index = -1; + pagetable_index = -1; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_GE(ledger(LEDGER_TEMPLATE_INFO, + (caddr_t)templateInfo, + (caddr_t)&templateCnt, + NULL), + 0, + "ledger(LEDGER_TEMPLATE_INFO)"); + for (i = 0; i < templateCnt; i++) { + if (!strncmp(templateInfo[i].lti_name, + "phys_footprint", + strlen("phys_footprint"))) { + footprint_index = i; + } else if (!strncmp(templateInfo[i].lti_name, + "page_table", + strlen("page_table"))) { + pagetable_index = i; + } + } + free(templateInfo); + + lei = (struct ledger_entry_info *) + malloc((size_t)ledger_count * sizeof(*lei)); + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_NE(lei, NULL, "malloc(ledger_entry_info)"); + + T_QUIET; + T_ASSERT_NE(footprint_index, -1, "no footprint_index"); + T_QUIET; + T_ASSERT_NE(pagetable_index, -1, "no pagetable_index"); + + T_SETUPEND; +} + +static void +get_ledger_info( + uint64_t *phys_footprint, + uint64_t *page_table) +{ + int64_t count; + + count = ledger_count; + T_QUIET; + T_WITH_ERRNO; + T_ASSERT_GE(ledger(LEDGER_ENTRY_INFO, + (caddr_t)(uintptr_t)getpid(), + (caddr_t)lei, + (caddr_t)&count), + 0, + "ledger(LEDGER_ENTRY_INFO)"); + T_QUIET; + T_ASSERT_GT(count, (int64_t)footprint_index, "no entry for footprint"); + T_QUIET; + T_ASSERT_GT(count, (int64_t)pagetable_index, "no entry for pagetable"); + if (phys_footprint) { + *phys_footprint = (uint64_t)(lei[footprint_index].lei_balance); + } + if (page_table) { + *page_table = (uint64_t)(lei[pagetable_index].lei_balance); + } +} + +static mach_vm_address_t +pre_warm( + mach_vm_size_t vm_size) +{ + kern_return_t kr; + mach_vm_address_t vm_addr; + unsigned char BigBufOnStack[100 * 1024]; + uint64_t footprint, page_table; + + /* make sure ledgers are ready to be queried */ + ledger_init(); + + T_SETUPBEGIN; + + /* + * Touch a few pages ahead on the stack, to make + * sure we don't see a footprint increase due to + * an extra stack page later. + */ + memset(BigBufOnStack, 0xb, sizeof(BigBufOnStack)); + T_QUIET; + T_EXPECT_EQ(BigBufOnStack[0], 0xb, + "BigBufOnStack[0] == 0x%x", + BigBufOnStack[0]); + T_QUIET; + T_EXPECT_EQ(BigBufOnStack[sizeof(BigBufOnStack) - 1], 0xb, + "BigBufOnStack[%lu] == 0x%x", + sizeof(BigBufOnStack), + BigBufOnStack[sizeof(BigBufOnStack) - 1]); + + /* + * Pre-allocate, touch and then release the same amount + * of memory we'll be allocating later during the test, + * to account for any memory overhead (page tables, global + * variables, ...). + */ + vm_addr = 0; + kr = mach_vm_allocate(mach_task_self(), + &vm_addr, + vm_size, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate(%lld) error 0x%x (%s)", + vm_size, kr, mach_error_string(kr)); + memset((char *)(uintptr_t)vm_addr, 'p', (size_t)vm_size); + kr = mach_vm_deallocate(mach_task_self(), + vm_addr, + vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + + /* + * Exercise the ledger code to make sure it's ready to run + * without any extra memory overhead later. + */ + get_ledger_info(&footprint, &page_table); + + T_SETUPEND; + + /* + * Return the start of the virtual range we pre-warmed, so that the + * test can check that it's using the same range. + */ + return vm_addr; +} + +T_DECL(legacy_phys_footprint_anonymous, + "phys_footprint for anonymous memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* allocating virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + vm_size = MEM_SIZE; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("virtual allocation does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "virtual allocation of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying anonymous memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating dirty anonymous memory decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +#define TEMP_FILE_TEMPLATE "/tmp/phys_footprint_data.XXXXXXXX" +#define TEMP_FILE_SIZE (1 * 1024 * 1024) + +T_DECL(legacy_phys_footprint_file, + "phys_footprint for mapped file", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + mach_vm_address_t pre_vm_addr; + int fd; + char *map_addr; + size_t map_size, dirty_size; + ssize_t nbytes; + char tmp_file_name[PATH_MAX] = TEMP_FILE_TEMPLATE; + char *buf; + size_t buf_size; + + T_SETUPBEGIN; + buf_size = TEMP_FILE_SIZE; + T_QUIET; + T_ASSERT_NOTNULL(buf = (char *)malloc(buf_size), + "allocate %zu-byte buffer", buf_size); + memset(buf, 'f', buf_size); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_NOTNULL(mktemp(tmp_file_name), + "create temporary file name"); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_GE(fd = open(tmp_file_name, O_CREAT | O_RDWR), + 0, + "create temp file"); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_EQ(nbytes = write(fd, buf, buf_size), + (ssize_t)buf_size, + "write %zu bytes", buf_size); + free(buf); + T_SETUPEND; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(TEMP_FILE_SIZE); + + /* mapping a file does not impact footprint... */ + get_ledger_info(&footprint_before, &pagetable_before); + map_size = TEMP_FILE_SIZE; + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_NOTNULL(map_addr = (char *)mmap(NULL, map_size, + PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, fd, 0), + "mmap()"); + T_QUIET; + T_EXPECT_EQ((mach_vm_address_t)map_addr, pre_vm_addr, + "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("mapping file does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "mapping file with %zu bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + map_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching file-backed memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = map_size / 2; + memset(map_addr, 'F', dirty_size); + /* ... should not impact footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying file-backed memory does not impact phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %zu bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating file-backed memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + T_WITH_ERRNO; + T_QUIET; + T_ASSERT_EQ(munmap(map_addr, map_size), + 0, + "unmap file"); + /* ... should not impact footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("unmapping file-backed memory does not impact phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "unmapped %zu dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +T_DECL(legacy_phys_footprint_purgeable, + "phys_footprint for purgeable memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size; + int state; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* allocating purgeable virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + vm_size = MEM_SIZE; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("purgeable virtual allocation does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "purgeable virtual allocation of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying anonymous memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_VOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(VOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, + "memory was non-volatile"); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it non-volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_NONVOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(NONVOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_VOLATILE, + "memory was volatile"); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating memory decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +T_DECL(legacy_phys_footprint_purgeable_ownership, + "phys_footprint for owned purgeable memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size, me_size; + int state; + mach_port_t me_port; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* allocating purgeable virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + vm_size = MEM_SIZE; + kr = mach_vm_allocate(mach_task_self(), &vm_addr, vm_size, + VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_allocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("purgeable virtual allocation does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "purgeable virtual allocation of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying anonymous memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_VOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(VOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, + "memory was non-volatile"); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it non-volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_NONVOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(NONVOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_VOLATILE, + "memory was volatile"); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making a memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + me_size = vm_size; + me_port = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &me_size, + vm_addr, + VM_PROT_READ | VM_PROT_WRITE, + &me_port, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "make_memory_entry() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(me_size, vm_size, "memory entry size mismatch"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making a memory entry does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "making a memory entry of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory while holding memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating owned memory while holding memory entry " + "does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* releasing the memory entry... */ + kr = mach_port_deallocate(mach_task_self(), me_port); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "mach_port_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing memory entry decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +#ifdef MAP_MEM_LEDGER_TAGGED +T_DECL(legacy_phys_footprint_ledger_purgeable_owned, + "phys_footprint for ledger-tagged purgeable memory ownership", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size, me_size; + int state; + mach_port_t me_port; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* making a memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_size = MEM_SIZE; + me_size = vm_size; + me_port = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &me_size, + 0, + (MAP_MEM_NAMED_CREATE | + MAP_MEM_LEDGER_TAGGED | + MAP_MEM_PURGABLE | + VM_PROT_READ | VM_PROT_WRITE), + &me_port, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "make_memory_entry() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(me_size, vm_size, "memory entry size mismatch"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making a memory entry does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "making a memory entry of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* mapping ledger-tagged virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + me_port, + 0, /* offset */ + FALSE, /* copy */ + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_map() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("mapping ledger-tagged memory does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "ledger-tagged mapping of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying ledger-tagged memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_VOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(VOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, + "memory was non-volatile"); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* making it non-volatile... */ + get_ledger_info(&footprint_before, &pagetable_before); + state = VM_PURGABLE_NONVOLATILE; + T_QUIET; + T_ASSERT_EQ(mach_vm_purgable_control(mach_task_self(), + vm_addr, + VM_PURGABLE_SET_STATE, + &state), + KERN_SUCCESS, + "vm_purgable_control(NONVOLATILE)"); + T_QUIET; + T_ASSERT_EQ(state, VM_PURGABLE_VOLATILE, + "memory was volatile"); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory while holding memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating owned memory while holding memory entry " + "does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* releasing the memory entry... */ + kr = mach_port_deallocate(mach_task_self(), me_port); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "mach_port_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing memory entry decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} + +T_DECL(legacy_phys_footprint_ledger_owned, + "phys_footprint for ledger-tagged memory ownership", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + kern_return_t kr; + mach_vm_address_t pre_vm_addr, vm_addr; + mach_vm_size_t vm_size, dirty_size, me_size; + int state; + mach_port_t me_port; + + /* pre-warm to account for page table expansion */ + pre_vm_addr = pre_warm(MEM_SIZE); + + /* making a memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_size = MEM_SIZE; + me_size = vm_size; + me_port = MACH_PORT_NULL; + kr = mach_make_memory_entry_64(mach_task_self(), + &me_size, + 0, + (MAP_MEM_NAMED_CREATE | + MAP_MEM_LEDGER_TAGGED | + VM_PROT_READ | VM_PROT_WRITE), + &me_port, + MACH_PORT_NULL); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "make_memory_entry() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(me_size, vm_size, "memory entry size mismatch"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making a memory entry does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "making a memory entry of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* mapping ledger-tagged virtual memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + vm_addr = 0; + kr = mach_vm_map(mach_task_self(), &vm_addr, vm_size, + 0, /* mask */ + VM_FLAGS_ANYWHERE, + me_port, + 0, /* offset */ + FALSE, /* copy */ + VM_PROT_READ | VM_PROT_WRITE, + VM_PROT_READ | VM_PROT_WRITE, + VM_INHERIT_DEFAULT); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_map() error 0x%x (%s)", + kr, mach_error_string(kr)); + T_QUIET; + T_EXPECT_EQ(vm_addr, pre_vm_addr, "pre-warm mishap"); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("mapping ledger-tagged memory does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "ledger-tagged mapping of %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + vm_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* touching memory... */ + get_ledger_info(&footprint_before, &pagetable_before); + dirty_size = vm_size / 2; + memset((char *)(uintptr_t)vm_addr, 'x', (size_t)dirty_size); + /* ... should increase footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before + dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("modifying ledger-tagged memory increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "touched %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* deallocating memory while holding memory entry... */ + get_ledger_info(&footprint_before, &pagetable_before); + kr = mach_vm_deallocate(mach_task_self(), vm_addr, vm_size); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "vm_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should not change footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("deallocating owned memory while holding memory entry " + "does not change phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "deallocated %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); + + /* releasing the memory entry... */ + kr = mach_port_deallocate(mach_task_self(), me_port); + T_QUIET; + T_EXPECT_EQ(kr, KERN_SUCCESS, "mach_port_deallocate() error 0x%x (%s)", + kr, mach_error_string(kr)); + /* ... should decrease footprint */ + get_ledger_info(&footprint_after, &pagetable_after); + footprint_expected = footprint_before - dirty_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing memory entry decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld dirty bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + dirty_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +} +#endif /* MAP_MEM_LEDGER_TAGGED */ + +/* IOSurface code from: CoreImage/CoreImageTests/CIRender/SurfaceUtils.c */ +#include +#include +#include +static size_t +bytes_per_element(uint32_t format) +{ + size_t bpe = 0; + switch (format) { + case 32: // kCVPixelFormatType_32ARGB (ARGB8) + bpe = 4; + break; + default: + bpe = 0; + break; + } + return bpe; +} +static size_t +bytes_per_pixel(uint32_t format) +{ + size_t bpe = 0; + switch (format) { + case 32: // kCVPixelFormatType_32ARGB (ARGB8) + bpe = 4; + break; + default: + bpe = 0; + break; + } + return bpe; +} +static inline size_t +roundSizeToMultiple(size_t size, size_t mult) +{ + return ((size + mult - 1) / mult) * mult; +} +static inline void +setIntValue(CFMutableDictionaryRef dict, const CFStringRef key, int value) +{ + CFNumberRef number = CFNumberCreate(0, kCFNumberIntType, &value); + CFDictionarySetValue(dict, key, number); + CFRelease(number); +} +typedef void (^SurfacePlaneBlock)(void *data, size_t planeIndex, size_t width, size_t height, size_t rowbytes); +static IOReturn +SurfaceApplyPlaneBlock(IOSurfaceRef surface, SurfacePlaneBlock block) +{ + if (surface == nil || block == nil) { + return kIOReturnBadArgument; + } + + IOReturn result = kIOReturnSuccess; + size_t planeCount = IOSurfaceGetPlaneCount(surface); + + if (planeCount == 0) { + result = IOSurfaceLock(surface, 0, NULL); + if (result != kIOReturnSuccess) { + return result; + } + + void* base = IOSurfaceGetBaseAddress(surface); + size_t rb = IOSurfaceGetBytesPerRow(surface); + size_t w = IOSurfaceGetWidth(surface); + size_t h = IOSurfaceGetHeight(surface); + + if (base && rb && w && h) { + block(base, 0, w, h, rb); + } + + IOSurfaceUnlock(surface, 0, NULL); + } else if (planeCount == 2) { + for (size_t i = 0; i < planeCount; i++) { + result = IOSurfaceLock(surface, 0, NULL); + if (result != kIOReturnSuccess) { + return result; + } + + void* base = IOSurfaceGetBaseAddressOfPlane(surface, i); + size_t rb = IOSurfaceGetBytesPerRowOfPlane(surface, i); + size_t w = IOSurfaceGetWidthOfPlane(surface, i); + size_t h = IOSurfaceGetHeightOfPlane(surface, i); + + if (base && rb && w && h) { + block(base, i, w, h, rb); + } + + IOSurfaceUnlock(surface, 0, NULL); + } + } + return result; +} +static void +ClearSurface(IOSurfaceRef surface) +{ + const int zero = 0; + (void) SurfaceApplyPlaneBlock(surface, ^(void *p, size_t i, __unused size_t w, size_t h, size_t rb) + { + if (i == 0) { + memset(p, zero, rb * h); + } else { + memset(p, 128, rb * h); + } + }); +} +static IOSurfaceRef +CreateSurface(uint32_t pixelsWide, uint32_t pixelsHigh, uint32_t rowBytesAlignment, uint32_t fmt, bool purgeable, bool clear) +{ + IOSurfaceRef surface = nil; + + if (pixelsWide < 1 || pixelsHigh < 1 || fmt == 0) { + return nil; + } + + size_t bpp = bytes_per_pixel(fmt); + size_t bpe = bytes_per_element(fmt); + if (bpp == 0 || bpe == 0) { + return nil; + } + + size_t rowbytes = pixelsWide * bpp; + if (rowBytesAlignment == 0) { + rowBytesAlignment = 16; + } + rowbytes = roundSizeToMultiple(rowbytes, rowBytesAlignment); + + CFMutableDictionaryRef props = CFDictionaryCreateMutable(0, 0, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks); + setIntValue(props, kIOSurfaceBytesPerRow, (int)rowbytes); + setIntValue(props, kIOSurfaceWidth, (int)pixelsWide); + setIntValue(props, kIOSurfaceHeight, (int)pixelsHigh); + setIntValue(props, kIOSurfacePixelFormat, (int)fmt); +#if TARGET_OS_IPHONE + setIntValue(props, kIOSurfaceNonPurgeable, purgeable); +#else /* TARGET_OS_IPHONE */ + (void)purgeable; +#endif /* TARGET_OS_IPHONE */ + { + if (bpe != bpp) { // i.e. a 422 format such as 'yuvf' etc. + setIntValue(props, kIOSurfaceElementWidth, 2); + setIntValue(props, kIOSurfaceElementHeight, 1); + } + setIntValue(props, kIOSurfaceBytesPerElement, (int)bpe); + } + + surface = IOSurfaceCreate(props); + + if (clear) { + ClearSurface(surface); + } + + CFRelease(props); + return surface; +} +T_DECL(legacy_phys_footprint_purgeable_iokit, + "phys_footprint for purgeable IOKit memory", + T_META_NAMESPACE("xnu.vm"), + T_META_LTEPHASE(LTE_POSTINIT)) +{ + uint64_t footprint_before, pagetable_before; + uint64_t footprint_after, pagetable_after; + uint64_t footprint_expected; + IOSurfaceRef surface; + uint32_t old_state; + uint64_t surface_size; + + T_SETUPBEGIN; + ledger_init(); + surface = CreateSurface(1024, 1024, 0, 32, true, true); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableVolatile, &old_state); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableNonVolatile, &old_state); + CFRelease(surface); + T_SETUPEND; + + surface_size = 1024 * 1024 * 4; + + /* create IOsurface: footprint grows */ + get_ledger_info(&footprint_before, &pagetable_before); + surface = CreateSurface(1024, 1024, 0, 32, true, true); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: creating IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "create IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before + surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("creating IOSurface increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "create IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ + + /* make IOSurface volatile: footprint shrinks */ + get_ledger_info(&footprint_before, &pagetable_before); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableVolatile, &old_state); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: volatile IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "volatile IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before - surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making IOSurface volatile decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made volatile %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ + + /* make IOSurface non-volatile: footprint grows */ + get_ledger_info(&footprint_before, &pagetable_before); + IOSurfaceSetPurgeable(surface, kIOSurfacePurgeableNonVolatile, &old_state); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: non-volatile IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "non-volatile IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before + surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("making IOSurface non-volatile increases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "made non-volatile %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ + + /* accessing IOSurface re-mapping: no footprint impact */ + + /* deallocating IOSurface re-mapping: no footprint impact */ + + /* release IOSurface: footprint shrinks */ + get_ledger_info(&footprint_before, &pagetable_before); + CFRelease(surface); + get_ledger_info(&footprint_after, &pagetable_after); +#if LEGACY_FOOTPRINT + footprint_expected = footprint_before; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("LEGACY FOOTPRINT: release IOSurface: no footprint impact"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "releasing IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#else /* LEGACY_FOOTPRINT */ + footprint_expected = footprint_before - surface_size; + footprint_expected += (pagetable_after - pagetable_before); + T_LOG("releasing IOSurface decreases phys_footprint"); + T_EXPECT_EQ(footprint_after, footprint_expected, + "released IOSurface %lld bytes: " + "footprint %lld -> %lld expected %lld delta %lld", + surface_size, footprint_before, footprint_after, + footprint_expected, footprint_after - footprint_expected); +#endif /* LEGACY_FOOTPRINT */ +} diff --git a/tests/vm_set_max_addr_helper.c b/tests/vm_set_max_addr_helper.c index 5a06a3e22..5ea7818b1 100644 --- a/tests/vm_set_max_addr_helper.c +++ b/tests/vm_set_max_addr_helper.c @@ -2,7 +2,8 @@ #include #include -int main(void) +int +main(void) { kern_return_t kr; mach_vm_address_t addr = 50ULL * 1024ULL * 1024ULL * 1024ULL; @@ -15,4 +16,3 @@ int main(void) return 1; } } - diff --git a/tests/vm_set_max_addr_test.c b/tests/vm_set_max_addr_test.c index 325227d51..ac03b77ff 100644 --- a/tests/vm_set_max_addr_test.c +++ b/tests/vm_set_max_addr_test.c @@ -11,9 +11,9 @@ extern char * testpath; T_DECL(set_max_addr, - "Description", - T_META_NAMESPACE("xnu.vm"), - T_META_CHECK_LEAKS(false)) + "Description", + T_META_NAMESPACE("xnu.vm"), + T_META_CHECK_LEAKS(false)) { #if (defined(__arm64__) && defined(__LP64__)) int result = 0; @@ -54,4 +54,3 @@ T_DECL(set_max_addr, T_SKIP("Not supported on this architecture"); #endif /* (defined(__arm64__) && defined(__LP64__)) */ } - diff --git a/tests/voucher_entry_18826844.c b/tests/voucher_entry_18826844.c index 24e246ad0..f5107ff84 100644 --- a/tests/voucher_entry_18826844.c +++ b/tests/voucher_entry_18826844.c @@ -27,8 +27,8 @@ T_DECL(voucher_entry, "voucher_entry", T_META_CHECK_LEAKS(false), T_META_ALL_VAL }; kr = host_create_mach_voucher(mach_host_self(), - (mach_voucher_attr_raw_recipe_array_t)&recipe, - sizeof(recipe), &voucher); + (mach_voucher_attr_raw_recipe_array_t)&recipe, + sizeof(recipe), &voucher); T_ASSERT_MACH_SUCCESS(kr, "host_create_mach_voucher"); @@ -75,4 +75,3 @@ T_DECL(voucher_entry, "voucher_entry", T_META_CHECK_LEAKS(false), T_META_ALL_VAL T_ASSERT_MACH_ERROR(KERN_INVALID_NAME, kr, "voucher should now be invalid name"); } - diff --git a/tests/voucher_traps.c b/tests/voucher_traps.c index f3e5a0a20..0b4967720 100644 --- a/tests/voucher_traps.c +++ b/tests/voucher_traps.c @@ -21,7 +21,8 @@ #include -static mach_port_t get_atm_voucher(void) +static mach_port_t +get_atm_voucher(void) { mach_voucher_attr_recipe_data_t r = { .key = MACH_VOUCHER_ATTR_KEY_ATM, @@ -29,8 +30,8 @@ static mach_port_t get_atm_voucher(void) }; mach_port_t port = MACH_PORT_NULL; kern_return_t kr = host_create_mach_voucher(mach_host_self(), - (mach_voucher_attr_raw_recipe_array_t)&r, - sizeof(r), &port); + (mach_voucher_attr_raw_recipe_array_t)&r, + sizeof(r), &port); T_ASSERT_MACH_SUCCESS(kr, "Create ATM voucher: 0x%x", (unsigned int)port); return port; @@ -54,7 +55,7 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") */ alloc_addr = (mach_vm_address_t)round_page(MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE + 1); kr = mach_vm_allocate(mach_task_self(), &alloc_addr, - alloc_sz, VM_FLAGS_ANYWHERE); + alloc_sz, VM_FLAGS_ANYWHERE); /* * Make sure that the address of the allocation is larger than the @@ -62,19 +63,20 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") * . */ T_ASSERT_GT_ULLONG((uint64_t)alloc_addr, - (uint64_t)MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE, - "Recipe addr (%llu bytes): 0x%llx > max recipe sz: %llu", - (uint64_t)alloc_sz, (uint64_t)alloc_addr, - (uint64_t)MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE); + (uint64_t)MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE, + "Recipe addr (%llu bytes): 0x%llx > max recipe sz: %llu", + (uint64_t)alloc_sz, (uint64_t)alloc_addr, + (uint64_t)MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE); /* make the allocation look like a pointer to an int */ mach_msg_type_number_t *recipe_size; recipe_size = (mach_msg_type_number_t *)((uintptr_t)alloc_addr); bzero(recipe_size, (unsigned long)alloc_sz); - if (alloc_sz > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) + if (alloc_sz > MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE) { *recipe_size = MACH_VOUCHER_ATTR_MAX_RAW_RECIPE_ARRAY_SIZE; - else + } else { *recipe_size = (mach_msg_type_number_t)alloc_sz; + } /* recipe buffer on the heap: memset it so panics show up loudly */ size_t size = (size_t)(10 * 1024 * 1024); @@ -88,7 +90,7 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") * kernel heap (probably zone memory). */ kr = mach_voucher_extract_attr_recipe_trap(port, MACH_VOUCHER_ATTR_KEY_ATM, - recipe, recipe_size); + recipe, recipe_size); T_ASSERT_MACH_SUCCESS(kr, "Extract attribute data with recipe: heap"); /* reset the recipe memory */ @@ -101,7 +103,7 @@ T_DECL(voucher_extract_attr_recipe, "voucher_extract_attr_recipe") * kernel stack. */ kr = mach_voucher_extract_attr_recipe_trap(port, MACH_VOUCHER_ATTR_KEY_ATM, - recipe, recipe_size); + recipe, recipe_size); T_ASSERT_MACH_SUCCESS(kr, "Extract attribute data with recipe: stack"); /* cleanup */ diff --git a/tests/wired_mem_bench.c b/tests/wired_mem_bench.c index 91fe03a22..0e6f2bda5 100644 --- a/tests/wired_mem_bench.c +++ b/tests/wired_mem_bench.c @@ -33,19 +33,19 @@ #define WIRED_MEM_THRESHOLD_PERCENTAGE 30 T_DECL(wired_mem_bench, - "report the amount of wired memory consumed by the booted OS; guard against egregious or unexpected regressions", - T_META_CHECK_LEAKS(false), - T_META_ASROOT(true), - T_META_REQUIRES_REBOOT(true)) // Help reduce noise by asking for a clean boot + "report the amount of wired memory consumed by the booted OS; guard against egregious or unexpected regressions", + T_META_CHECK_LEAKS(false), + T_META_ASROOT(true), + T_META_REQUIRES_REBOOT(true)) // Help reduce noise by asking for a clean boot // T_META_TAG_PERF) { - vm_statistics64_data_t stat; - uint64_t memsize; - vm_size_t page_size = 0; - unsigned int count = HOST_VM_INFO64_COUNT; - kern_return_t ret; - int wired_mem_pct; - struct utsname uname_vers; + vm_statistics64_data_t stat; + uint64_t memsize; + vm_size_t page_size = 0; + unsigned int count = HOST_VM_INFO64_COUNT; + kern_return_t ret; + int wired_mem_pct; + struct utsname uname_vers; T_SETUPBEGIN; ret = uname(&uname_vers); @@ -75,10 +75,10 @@ T_DECL(wired_mem_bench, T_SETUPEND; T_PERF("wired_memory", (double)(stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 10), "kB", - "Wired memory at boot"); + "Wired memory at boot"); T_LOG("\nwired memory: %llu kB (%llu MB)\n", stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 10, - stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 20); + stat.wire_count * (mach_vm_size_t)vm_kernel_page_size >> 20); #if TARGET_OS_IOS || TARGET_OS_OSX // zprint is not mastered onto other platforms. @@ -95,6 +95,6 @@ T_DECL(wired_mem_bench, T_PERF("wired_memory_percentage", wired_mem_pct, "%", "Wired memory as percentage of device DRAM size"); T_ASSERT_LT(wired_mem_pct, WIRED_MEM_THRESHOLD_PERCENTAGE, - "Wired memory percentage is below allowable threshold (%llu bytes / %u pages / %llu total device memory)", - (uint64_t)stat.wire_count * page_size, stat.wire_count, memsize); + "Wired memory percentage is below allowable threshold (%llu bytes / %u pages / %llu total device memory)", + (uint64_t)stat.wire_count * page_size, stat.wire_count, memsize); } diff --git a/tests/work_interval_test.c b/tests/work_interval_test.c index cc6925056..c80267309 100644 --- a/tests/work_interval_test.c +++ b/tests/work_interval_test.c @@ -1,4 +1,3 @@ - /* test that the header doesn't implicitly depend on others */ #include @@ -64,7 +63,7 @@ T_DECL(work_interval, "work interval interface") WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP, }; - for (uint32_t i = 0 ; i < sizeof(flags) / sizeof(flags[0]) ; i++) { + for (uint32_t i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) { ret = work_interval_create(&handle, flags[i]); T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create, joinable"); @@ -117,6 +116,4 @@ T_DECL(work_interval, "work interval interface") ret = work_interval_destroy(handle); T_ASSERT_POSIX_SUCCESS(ret, "work_interval_destroy"); - } - diff --git a/tests/workq_sigprof.c b/tests/workq_sigprof.c index 6ea38a8c9..458307962 100644 --- a/tests/workq_sigprof.c +++ b/tests/workq_sigprof.c @@ -15,56 +15,61 @@ static pthread_t workq_thread; static bool signal_received; -static void signal_handler(int sig __unused, siginfo_t *b __unused, void* unused __unused) { - if (pthread_self() == workq_thread) { - signal_received = true; - } +static void +signal_handler(int sig __unused, siginfo_t *b __unused, void* unused __unused) +{ + if (pthread_self() == workq_thread) { + signal_received = true; + } } -static void workq_block(void *unused __unused) { - workq_thread = pthread_self(); +static void +workq_block(void *unused __unused) +{ + workq_thread = pthread_self(); - /* - sigset_t set; - sigemptyset(&set); - sigaddset(&set, SIGPROF); - pthread_sigmask(SIG_UNBLOCK, &set, NULL); - */ + /* + * sigset_t set; + * sigemptyset(&set); + * sigaddset(&set, SIGPROF); + * pthread_sigmask(SIG_UNBLOCK, &set, NULL); + */ - uint64_t spin_start = mach_absolute_time(); - while (mach_absolute_time() - spin_start < 30 * NSEC_PER_SEC) - if (signal_received) { - T_PASS("Got SIGPROF!"); - T_END; - } - } + uint64_t spin_start = mach_absolute_time(); + while (mach_absolute_time() - spin_start < 30 * NSEC_PER_SEC) { + if (signal_received) { + T_PASS("Got SIGPROF!"); + T_END; + } + } +} T_DECL(workq_sigprof, "test that workqueue threads can receive sigprof") { - struct sigaction sa = { - .sa_sigaction = signal_handler - }; - sigfillset(&sa.sa_mask); - T_ASSERT_POSIX_ZERO(sigaction(SIGPROF, &sa, NULL), NULL); + struct sigaction sa = { + .sa_sigaction = signal_handler + }; + sigfillset(&sa.sa_mask); + T_ASSERT_POSIX_ZERO(sigaction(SIGPROF, &sa, NULL), NULL); - dispatch_queue_t q = dispatch_get_global_queue(0, 0); - dispatch_async_f(q, NULL, workq_block); + dispatch_queue_t q = dispatch_get_global_queue(0, 0); + dispatch_async_f(q, NULL, workq_block); - struct itimerval timerval = { - .it_interval = {.tv_usec = 10000}, - .it_value = {.tv_usec = 10000} - }; - T_ASSERT_POSIX_ZERO(setitimer(ITIMER_PROF, &timerval, NULL), NULL); + struct itimerval timerval = { + .it_interval = {.tv_usec = 10000}, + .it_value = {.tv_usec = 10000} + }; + T_ASSERT_POSIX_ZERO(setitimer(ITIMER_PROF, &timerval, NULL), NULL); - dispatch_main(); + dispatch_main(); } #else //!TARGET_OS_IPHONE T_DECL(workq_sigprof, "test that workqueue threads can receive sigprof") { - T_EXPECTFAIL; - T_FAIL(" setitimer/sigprof doesn't seem to be delivered on embeded platforms"); + T_EXPECTFAIL; + T_FAIL(" setitimer/sigprof doesn't seem to be delivered on embeded platforms"); } #endif //!TARGET_OS_IPHONE diff --git a/tests/xnu_quick_test.c b/tests/xnu_quick_test.c index 7698b3fc3..412f8558a 100644 --- a/tests/xnu_quick_test.c +++ b/tests/xnu_quick_test.c @@ -10,28 +10,28 @@ #include #include -T_GLOBAL_META (T_META_NAMESPACE("xnu.quicktest"), T_META_CHECK_LEAKS(false)); -char g_target_path[ PATH_MAX ]; +T_GLOBAL_META(T_META_NAMESPACE("xnu.quicktest"), T_META_CHECK_LEAKS(false)); +char g_target_path[PATH_MAX]; /* ************************************************************************************************************** * Test the syscall system call. * ************************************************************************************************************** */ T_DECL(syscall, - "xnu_quick_test for syscall", T_META_CHECK_LEAKS(NO)) + "xnu_quick_test for syscall", T_META_CHECK_LEAKS(NO)) { - int my_fd = -1; - char * my_pathp; + int my_fd = -1; + char * my_pathp; kern_return_t my_kr; T_SETUPBEGIN; create_target_directory(TEST_DIRECTORY); - + T_SETUPEND; - my_kr = vm_allocate((vm_map_t) mach_task_self(), (vm_address_t*)&my_pathp, - PATH_MAX, VM_FLAGS_ANYWHERE); + my_kr = vm_allocate((vm_map_t) mach_task_self(), (vm_address_t*)&my_pathp, + PATH_MAX, VM_FLAGS_ANYWHERE); T_ASSERT_MACH_SUCCESS(my_kr, "Allocating vm to path %s", my_pathp); *my_pathp = 0x00; @@ -39,19 +39,20 @@ T_DECL(syscall, strcat( my_pathp, "/" ); /* create a test file */ - + T_ASSERT_MACH_SUCCESS( create_random_name( my_pathp, 1), "Create random test file" ); /* use an indirect system call to open our test file. * I picked open since it uses a path pointer which grows to 64 bits in an LP64 environment. */ T_EXPECT_NE(my_fd = syscall( SYS_open, my_pathp, (O_RDWR | O_EXCL), 0 ), - -1, "Attempt to open file using indirect syscall %s", my_pathp); + -1, "Attempt to open file using indirect syscall %s", my_pathp); - if (my_fd != -1) + if (my_fd != -1) { close(my_fd); - + } + if (my_pathp != NULL) { - remove(my_pathp); + remove(my_pathp); vm_deallocate(mach_task_self(), (vm_address_t)my_pathp, PATH_MAX); } @@ -62,51 +63,51 @@ T_DECL(syscall, * Test fork wait4, and exit system calls. * ************************************************************************************************************** */ -T_DECL(fork_wait4_exit, - "Tests forking off a process and waiting for the child to exit", T_META_CHECK_LEAKS(false)) +T_DECL(fork_wait4_exit, + "Tests forking off a process and waiting for the child to exit", T_META_CHECK_LEAKS(false)) { - int my_err, my_status; - pid_t my_pid, my_wait_pid; - struct rusage my_usage; - + int my_err, my_status; + pid_t my_pid, my_wait_pid; + struct rusage my_usage; + strncpy(g_target_path, "/", 2); /* spin off another process */ T_ASSERT_NE(my_pid = fork(), -1, "Fork off a process"); - - if ( my_pid == 0 ) { - struct stat my_sb; - + + if (my_pid == 0) { + struct stat my_sb; + /* child process does very little then exits */ my_err = stat( &g_target_path[0], &my_sb ); T_WITH_ERRNO; - T_ASSERT_TRUE(my_err == 0, "stat call with path: \"%s\" returned \"%d\"", &g_target_path[0], errno); + T_ASSERT_TRUE(my_err == 0, "stat call with path: \"%s\" returned \"%d\"", &g_target_path[0], errno); exit( 44 ); } - + /* parent process waits for child to exit */ T_ASSERT_NE(my_wait_pid = wait4( my_pid, &my_status, 0, &my_usage ), -1, - "Wait for child to exit\n"); + "Wait for child to exit\n"); /* wait4 should return our child's pid when it exits */ - T_ASSERT_EQ(my_wait_pid, my_pid, - "wait4 should return our child's pid when it exits"); - - /* kind of just guessing on these values so if this fails we should take a closer - * look at the returned rusage structure. + T_ASSERT_EQ(my_wait_pid, my_pid, + "wait4 should return our child's pid when it exits"); + + /* kind of just guessing on these values so if this fails we should take a closer + * look at the returned rusage structure. */ - T_ASSERT_FALSE(( my_usage.ru_utime.tv_sec > 1 || - my_usage.ru_stime.tv_sec > 1 || my_usage.ru_majflt > 1000 || - my_usage.ru_msgsnd > 100 ), "wait4 returned rusage structure"); + T_ASSERT_FALSE((my_usage.ru_utime.tv_sec > 1 || + my_usage.ru_stime.tv_sec > 1 || my_usage.ru_majflt > 1000 || + my_usage.ru_msgsnd > 100), "wait4 returned rusage structure"); - T_ASSERT_TRUE(( WIFEXITED( my_status ) && WEXITSTATUS( my_status ) == 44 ), - "check if wait4 returns right exit status"); + T_ASSERT_TRUE((WIFEXITED( my_status ) && WEXITSTATUS( my_status ) == 44), + "check if wait4 returns right exit status"); } -T_DECL (getrusage, "Sanity check of getrusage") +T_DECL(getrusage, "Sanity check of getrusage") { - struct rusage my_rusage; - + struct rusage my_rusage; + T_WITH_ERRNO; T_ASSERT_EQ(getrusage( RUSAGE_SELF, &my_rusage ), 0, NULL); T_LOG("Checking that getrusage returned sane values"); @@ -115,4 +116,3 @@ T_DECL (getrusage, "Sanity check of getrusage") T_EXPECT_LT(my_rusage.ru_nsignals, 1000, NULL); T_EXPECT_GE(my_rusage.ru_nsignals, 0, NULL); } - diff --git a/tests/xnu_quick_test_entitled.c b/tests/xnu_quick_test_entitled.c index ec1252fb4..b3d6a9d4b 100644 --- a/tests/xnu_quick_test_entitled.c +++ b/tests/xnu_quick_test_entitled.c @@ -13,7 +13,7 @@ #include #endif -T_GLOBAL_META (T_META_NAMESPACE("xnu.quicktest"), T_META_CHECK_LEAKS(false)); +T_GLOBAL_META(T_META_NAMESPACE("xnu.quicktest"), T_META_CHECK_LEAKS(false)); /* ************************************************************************************************************** @@ -21,15 +21,15 @@ T_GLOBAL_META (T_META_NAMESPACE("xnu.quicktest"), T_META_CHECK_LEAKS(false)); * ************************************************************************************************************** */ T_DECL(ioctl, "Sanity check of ioctl by exercising DKIOCGETBLOCKCOUNT and DKIOCGETBLOCKSIZE", - T_META_ASROOT(true)) + T_META_ASROOT(true)) { - int my_err; - int my_fd = -1; - struct statfs * my_infop; - char * my_ptr; - int my_blksize; - long long my_block_count; - char my_name[ MAXPATHLEN ]; + int my_err; + int my_fd = -1; + struct statfs * my_infop; + char * my_ptr; + int my_blksize; + long long my_block_count; + char my_name[MAXPATHLEN]; #if !TARGET_OS_EMBEDDED /* @@ -37,7 +37,7 @@ T_DECL(ioctl, "Sanity check of ioctl by exercising DKIOCGETBLOCKCOUNT and DKIOCG * disabled or in AppleInternal mode */ if (csr_check( CSR_ALLOW_UNRESTRICTED_FS ) && - csr_check( CSR_ALLOW_APPLE_INTERNAL ) ) { + csr_check( CSR_ALLOW_APPLE_INTERNAL )) { T_SKIP("System Integrity Protection is enabled"); } #endif @@ -48,11 +48,11 @@ T_DECL(ioctl, "Sanity check of ioctl by exercising DKIOCGETBLOCKCOUNT and DKIOCG T_ASSERT_GT(getmntinfo( &my_infop, MNT_NOWAIT ), 0, "getmntinfo"); /* make this a raw device */ - strlcpy( &my_name[0], &my_infop->f_mntfromname[0], sizeof(my_name) ); - if ( (my_ptr = strrchr( &my_name[0], '/' )) != 0 ) { - if ( my_ptr[1] != 'r' ) { - my_ptr[ strlen( my_ptr ) ] = 0x00; - memmove( &my_ptr[2], &my_ptr[1], (strlen( &my_ptr[1] ) + 1) ); + strlcpy( &my_name[0], &my_infop->f_mntfromname[0], sizeof(my_name)); + if ((my_ptr = strrchr( &my_name[0], '/' )) != 0) { + if (my_ptr[1] != 'r') { + my_ptr[strlen( my_ptr )] = 0x00; + memmove( &my_ptr[2], &my_ptr[1], (strlen( &my_ptr[1] ) + 1)); my_ptr[1] = 'r'; } } @@ -63,11 +63,11 @@ T_DECL(ioctl, "Sanity check of ioctl by exercising DKIOCGETBLOCKCOUNT and DKIOCG /* obtain the size of the media (in blocks) */ T_EXPECT_POSIX_SUCCESS(my_err = ioctl( my_fd, DKIOCGETBLOCKCOUNT, &my_block_count ), - "ioctl DKIOCGETBLOCKCOUNT"); + "ioctl DKIOCGETBLOCKCOUNT"); /* obtain the block size of the media */ T_EXPECT_POSIX_SUCCESS(my_err = ioctl( my_fd, DKIOCGETBLOCKSIZE, &my_blksize ), - "ioctl DKIOCGETBLOCKSIZE"); + "ioctl DKIOCGETBLOCKSIZE"); T_LOG( "my_block_count %qd my_blksize %d \n", my_block_count, my_blksize ); diff --git a/tests/xnu_quick_test_helpers.c b/tests/xnu_quick_test_helpers.c index 08670d831..9ea7b94ab 100644 --- a/tests/xnu_quick_test_helpers.c +++ b/tests/xnu_quick_test_helpers.c @@ -5,50 +5,51 @@ #include #include -void create_target_directory( const char * the_targetp ) +void +create_target_directory( const char * the_targetp ) { - int err; - const char * my_targetp; - - my_targetp = getenv("TMPDIR"); - if ( my_targetp == NULL ) - my_targetp = "/tmp"; - - T_ASSERT_LT( strlen( the_targetp ), (unsigned long)( PATH_MAX - 1 ), - "check target path too long - \"%s\"", the_targetp ); - - for ( ;; ) { - int my_rand; - char my_name[64]; - - my_rand = rand( ); - sprintf( &my_name[0], "xnu_quick_test-%d", my_rand ); - T_ASSERT_LT( strlen( &my_name[0] ) + strlen( the_targetp ) + 2, (unsigned long)PATH_MAX, - "check target path plus our test directory name is too long: " - "target path - \"%s\" test directory name - \"%s\"", - the_targetp, &my_name[0] ); - - /* append generated directory name onto our path */ - g_target_path[0] = 0x00; - strcat( &g_target_path[0], the_targetp ); - if ( g_target_path[ (strlen(the_targetp) - 1) ] != '/' ) { - strcat( &g_target_path[0], "/" ); - } - strcat( &g_target_path[0], &my_name[0] ); - - /* try to create the test directory */ - err = mkdir( &g_target_path[0], (S_IRWXU | S_IRWXG | S_IROTH) ); - if ( err == 0 ) { - break; - } - err = errno; - if ( EEXIST != err ) { - T_ASSERT_FAIL( "test directory creation failed - \"%s\" \n" - "mkdir call failed with error %d - \"%s\"", - &g_target_path[0], errno, strerror( err) ); - } - } + int err; + const char * my_targetp; + my_targetp = getenv("TMPDIR"); + if (my_targetp == NULL) { + my_targetp = "/tmp"; + } + + T_ASSERT_LT( strlen( the_targetp ), (unsigned long)(PATH_MAX - 1), + "check target path too long - \"%s\"", the_targetp ); + + for (;;) { + int my_rand; + char my_name[64]; + + my_rand = rand(); + sprintf( &my_name[0], "xnu_quick_test-%d", my_rand ); + T_ASSERT_LT( strlen( &my_name[0] ) + strlen( the_targetp ) + 2, (unsigned long)PATH_MAX, + "check target path plus our test directory name is too long: " + "target path - \"%s\" test directory name - \"%s\"", + the_targetp, &my_name[0] ); + + /* append generated directory name onto our path */ + g_target_path[0] = 0x00; + strcat( &g_target_path[0], the_targetp ); + if (g_target_path[(strlen(the_targetp) - 1)] != '/') { + strcat( &g_target_path[0], "/" ); + } + strcat( &g_target_path[0], &my_name[0] ); + + /* try to create the test directory */ + err = mkdir( &g_target_path[0], (S_IRWXU | S_IRWXG | S_IROTH)); + if (err == 0) { + break; + } + err = errno; + if (EEXIST != err) { + T_ASSERT_FAIL( "test directory creation failed - \"%s\" \n" + "mkdir call failed with error %d - \"%s\"", + &g_target_path[0], errno, strerror( err)); + } + } } /* create_target_directory */ /* @@ -59,56 +60,63 @@ void create_target_directory( const char * the_targetp ) * WARNING - assumes caller has appended a trailing '/' on the path passed to us. * RAND_MAX is currently 2147483647 (ten characters plus one for a slash) */ -int create_random_name( char *the_pathp, int do_open ) { - int i, my_err; - int my_fd = -1; - - for ( i = 0; i < 1; i++ ) { - int my_rand; - char *myp; - char my_name[32]; - - my_rand = rand( ); - sprintf( &my_name[0], "%d", my_rand ); - T_ASSERT_LT_ULONG((strlen( &my_name[0] ) + strlen( the_pathp ) + 2), (unsigned long)PATH_MAX, - "check if path to test file is less than PATH_MAX"); - - // append generated file name onto our path - myp = strrchr( the_pathp, '/' ); - *(myp + 1) = 0x00; - strcat( the_pathp, &my_name[0] ); - if ( do_open ) { - /* create a file with this name */ - my_fd = open( the_pathp, (O_RDWR | O_CREAT | O_EXCL), - (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH) ); - T_EXPECT_TRUE((my_fd != -1 || errno == EEXIST), "open file with name %s", the_pathp); - - if( errno == EEXIST ) - continue; - } - else { - /* make sure the name is unique */ - struct stat my_sb; - my_err = stat( the_pathp, &my_sb ); - T_EXPECT_TRUE((my_err == 0 || errno == ENOENT), "make sure the name is unique"); - - if(errno == ENOENT) break; - /* name already exists, try another */ - i--; - continue; - } - } - - if ( my_fd != -1 ) - close( my_fd ); - - if(do_open && my_fd == -1) - return 1; - - return 0; +int +create_random_name( char *the_pathp, int do_open ) +{ + int i, my_err; + int my_fd = -1; + + for (i = 0; i < 1; i++) { + int my_rand; + char *myp; + char my_name[32]; + + my_rand = rand(); + sprintf( &my_name[0], "%d", my_rand ); + T_ASSERT_LT_ULONG((strlen( &my_name[0] ) + strlen( the_pathp ) + 2), (unsigned long)PATH_MAX, + "check if path to test file is less than PATH_MAX"); + + // append generated file name onto our path + myp = strrchr( the_pathp, '/' ); + *(myp + 1) = 0x00; + strcat( the_pathp, &my_name[0] ); + if (do_open) { + /* create a file with this name */ + my_fd = open( the_pathp, (O_RDWR | O_CREAT | O_EXCL), + (S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)); + T_EXPECT_TRUE((my_fd != -1 || errno == EEXIST), "open file with name %s", the_pathp); + + if (errno == EEXIST) { + continue; + } + } else { + /* make sure the name is unique */ + struct stat my_sb; + my_err = stat( the_pathp, &my_sb ); + T_EXPECT_TRUE((my_err == 0 || errno == ENOENT), "make sure the name is unique"); + + if (errno == ENOENT) { + break; + } + /* name already exists, try another */ + i--; + continue; + } + } + + if (my_fd != -1) { + close( my_fd ); + } + + if (do_open && my_fd == -1) { + return 1; + } + + return 0; } /* create_random_name */ -void remove_target_directory() { - rmdir(&g_target_path[0]); +void +remove_target_directory() +{ + rmdir(&g_target_path[0]); } - diff --git a/tests/xnu_quick_test_helpers.h b/tests/xnu_quick_test_helpers.h index b6a25ed9a..90fcc2d2f 100644 --- a/tests/xnu_quick_test_helpers.h +++ b/tests/xnu_quick_test_helpers.h @@ -7,7 +7,7 @@ #define TEST_DIRECTORY "/tmp" -extern char g_target_path[ PATH_MAX ]; +extern char g_target_path[PATH_MAX]; int create_random_name( char *the_pathp, int do_open ); void create_target_directory( const char * the_targetp ); diff --git a/tools/cred_dump_backtraces.c b/tools/cred_dump_backtraces.c index 12daa525d..0b290b084 100644 --- a/tools/cred_dump_backtraces.c +++ b/tools/cred_dump_backtraces.c @@ -17,64 +17,63 @@ #include /* bad! this is replicated in kern_credential.c. make sure they stay in sync! - * Or better yet have commone header file? + * Or better yet have commone header file? */ #define MAX_STACK_DEPTH 8 struct cred_backtrace { - int depth; - uint32_t stack[ MAX_STACK_DEPTH ]; + int depth; + uint32_t stack[MAX_STACK_DEPTH]; }; typedef struct cred_backtrace cred_backtrace; struct cred_debug_buffer { - int next_slot; - cred_backtrace stack_buffer[ 1 ]; + int next_slot; + cred_backtrace stack_buffer[1]; }; typedef struct cred_debug_buffer cred_debug_buffer; main( int argc, char *argv[] ) { - int err, i, j; - size_t len; - char *my_bufferp = NULL; - cred_debug_buffer *bt_buffp; - cred_backtrace *btp; + int err, i, j; + size_t len; + char *my_bufferp = NULL; + cred_debug_buffer *bt_buffp; + cred_backtrace *btp; /* get size of buffer we will need */ len = 0; - err = sysctlbyname( "kern.cred_bt", NULL, &len, NULL, 0 ); - if ( err != 0 ) { + err = sysctlbyname( "kern.cred_bt", NULL, &len, NULL, 0 ); + if (err != 0) { printf( "sysctl failed \n" ); - printf( "\terrno %d - \"%s\" \n", errno, strerror( errno ) ); + printf( "\terrno %d - \"%s\" \n", errno, strerror( errno )); return; } - + /* get a buffer for our back traces */ my_bufferp = malloc( len ); - if ( my_bufferp == NULL ) { - printf( "malloc error %d - \"%s\" \n", errno, strerror( errno ) ); + if (my_bufferp == NULL) { + printf( "malloc error %d - \"%s\" \n", errno, strerror( errno )); return; } - err = sysctlbyname( "kern.cred_bt", my_bufferp, &len, NULL, 0 ); - if ( err != 0 ) { + err = sysctlbyname( "kern.cred_bt", my_bufferp, &len, NULL, 0 ); + if (err != 0) { printf( "sysctl 2 failed \n" ); - printf( "\terrno %d - \"%s\" \n", errno, strerror( errno ) ); + printf( "\terrno %d - \"%s\" \n", errno, strerror( errno )); return; } bt_buffp = (cred_debug_buffer *) my_bufferp; - btp = &bt_buffp->stack_buffer[ 0 ]; - + btp = &bt_buffp->stack_buffer[0]; + printf("number of traces %d \n", bt_buffp->next_slot); - for ( i = 0; i < bt_buffp->next_slot; i++, btp++ ) { + for (i = 0; i < bt_buffp->next_slot; i++, btp++) { printf("[%d] ", i); - for ( j = 0; j < btp->depth; j++ ) { - printf("%p ", btp->stack[ j ]); + for (j = 0; j < btp->depth; j++) { + printf("%p ", btp->stack[j]); } printf("\n"); } - + return; } - diff --git a/tools/cred_dump_creds.c b/tools/cred_dump_creds.c index e5fe91f7e..8d417fe69 100644 --- a/tools/cred_dump_creds.c +++ b/tools/cred_dump_creds.c @@ -16,67 +16,68 @@ #include /* bad! this is replicated in kern_credential.c. make sure they stay in sync! - * Or better yet have commone header file? + * Or better yet have commone header file? */ struct debug_ucred { - uint32_t credp; - uint32_t cr_ref; /* reference count */ - uid_t cr_uid; /* effective user id */ - uid_t cr_ruid; /* real user id */ - uid_t cr_svuid; /* saved user id */ - short cr_ngroups; /* number of groups in advisory list */ - gid_t cr_groups[NGROUPS]; /* advisory group list */ - gid_t cr_rgid; /* real group id */ - gid_t cr_svgid; /* saved group id */ - uid_t cr_gmuid; /* UID for group membership purposes */ - struct auditinfo_addr cr_audit; /* user auditing data */ - uint32_t cr_label; /* MACF label */ - int cr_flags; /* flags on credential */ + uint32_t credp; + uint32_t cr_ref; /* reference count */ + uid_t cr_uid; /* effective user id */ + uid_t cr_ruid; /* real user id */ + uid_t cr_svuid; /* saved user id */ + short cr_ngroups; /* number of groups in advisory list */ + gid_t cr_groups[NGROUPS]; /* advisory group list */ + gid_t cr_rgid; /* real group id */ + gid_t cr_svgid; /* saved group id */ + uid_t cr_gmuid; /* UID for group membership purposes */ + struct auditinfo_addr cr_audit; /* user auditing data */ + uint32_t cr_label; /* MACF label */ + int cr_flags; /* flags on credential */ }; typedef struct debug_ucred debug_ucred; -void dump_cred_hash_table( debug_ucred * credp, size_t buf_size ); +void dump_cred_hash_table( debug_ucred * credp, size_t buf_size ); void dump_cred( debug_ucred * credp ); main( int argc, char *argv[] ) { - int err; - size_t len; - char *my_bufferp = NULL; + int err; + size_t len; + char *my_bufferp = NULL; /* get size of buffer we will need */ len = 0; - err = sysctlbyname( "kern.dump_creds", NULL, &len, NULL, 0 ); - if ( err != 0 ) { + err = sysctlbyname( "kern.dump_creds", NULL, &len, NULL, 0 ); + if (err != 0) { printf( "sysctl failed \n" ); - printf( "\terrno %d - \"%s\" \n", errno, strerror( errno ) ); + printf( "\terrno %d - \"%s\" \n", errno, strerror( errno )); return; } - + /* get a buffer for our credentials. need some spare room since table could have grown */ my_bufferp = malloc( len ); - if ( my_bufferp == NULL ) { - printf( "malloc error %d - \"%s\" \n", errno, strerror( errno ) ); + if (my_bufferp == NULL) { + printf( "malloc error %d - \"%s\" \n", errno, strerror( errno )); return; } - err = sysctlbyname( "kern.dump_creds", my_bufferp, &len, NULL, 0 ); - if ( err != 0 ) { + err = sysctlbyname( "kern.dump_creds", my_bufferp, &len, NULL, 0 ); + if (err != 0) { printf( "sysctl 2 failed \n" ); - printf( "\terrno %d - \"%s\" \n", errno, strerror( errno ) ); + printf( "\terrno %d - \"%s\" \n", errno, strerror( errno )); return; } - dump_cred_hash_table( (debug_ucred *)my_bufferp, len ); + dump_cred_hash_table((debug_ucred *)my_bufferp, len ); return; } -void dump_cred_hash_table( debug_ucred * credp, size_t buf_size ) +void +dump_cred_hash_table( debug_ucred * credp, size_t buf_size ) { - int i, my_count = (buf_size / sizeof(debug_ucred)); - - printf("\n\t dumping credential hash table - total creds %d \n", - my_count); + int i, my_count = (buf_size / sizeof(debug_ucred)); + + printf("\n\t dumping credential hash table - total creds %d \n", + my_count); for (i = 0; i < my_count; i++) { printf("[%02d] ", i); dump_cred( credp ); @@ -85,9 +86,10 @@ void dump_cred_hash_table( debug_ucred * credp, size_t buf_size ) return; } -void dump_cred( debug_ucred * credp ) +void +dump_cred( debug_ucred * credp ) { - int i; + int i; printf("%p ", credp->credp); printf("%lu ", credp->cr_ref); printf("%d ", credp->cr_uid); @@ -96,7 +98,7 @@ void dump_cred( debug_ucred * credp ) printf("%d g[", credp->cr_ngroups); for (i = 0; i < credp->cr_ngroups; i++) { printf("%d", credp->cr_groups[i]); - if ( (i + 1) < credp->cr_ngroups ) { + if ((i + 1) < credp->cr_ngroups) { printf(" "); } } diff --git a/tools/lldbmacros/Makefile b/tools/lldbmacros/Makefile index 26e79ffb4..aec946f22 100644 --- a/tools/lldbmacros/Makefile +++ b/tools/lldbmacros/Makefile @@ -15,7 +15,7 @@ LLDBMACROS_BOOTSTRAP_DEST:=$(OBJPATH)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSD LLDBMACROS_DEST:=$(LLDBMACROS_BOOTSTRAP_DEST)/lldbmacros/ LLDBMACROS_USERDEBUG_FILES= ifeq ($(BUILD_STATIC_LINK),1) -KERNEL_STATIC_DSYM_LLDBMACROS := $(OBJPATH)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros +KERNEL_STATIC_DSYM_LLDBMACROS := $(OBJPATH)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros/ endif LLDBMACROS_USERDEBUG_FILES:= \ @@ -96,7 +96,13 @@ $(eval $(call INSTALLPYTHON_RULE_template,$(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHO $(eval $(call INSTALLPYTHON_RULE_template,$(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbsdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)/../)) endif -lldbmacros_install: $(INSTALL_LLDBMACROS_PYTHON_FILES) $(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME) +ifeq ($(BUILD_STATIC_LINK),1) +STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS := \ + $(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES) \ + $(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME) +endif + +lldbmacros_install: $(INSTALL_LLDBMACROS_PYTHON_FILES) $(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME) $(STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS) $(_v)$(MKDIR) $(LLDBMACROS_DEST)/builtinkexts ifeq ($(BUILD_STATIC_LINK),1) $(_v)$(MKDIR) $(KERNEL_STATIC_DSYM_LLDBMACROS)/builtinkexts diff --git a/tools/lldbmacros/kauth.py b/tools/lldbmacros/kauth.py index 89ee5845c..5c0bce329 100755 --- a/tools/lldbmacros/kauth.py +++ b/tools/lldbmacros/kauth.py @@ -19,16 +19,13 @@ def PrintKauthCache(cmd_args=None): and print out usage information. """ anchor = unsigned(kern.globals.kauth_cred_table_anchor) - alloc_info_struct = anchor - sizeof('struct _mhead') - alloc_info = kern.GetValueFromAddress(alloc_info_struct, 'struct _mhead*') - alloc_size = unsigned(alloc_info.mlen) - (sizeof('struct _mhead')) - table_entries = alloc_size / sizeof('struct kauth_cred_entry_head') + table_entries = 128 # KAUTH_CRED_TABLE_SIZE anchor = kern.globals.kauth_cred_table_anchor print "Cred cache has: " + str(table_entries) + " buckets\n" print "Number of items in each bucket ... \n" for i in range(0, table_entries): numinbucket = 0 - for kauth_cred in IterateTAILQ_HEAD(anchor[i], "cr_link"): + for kauth_cred in IterateListEntry(anchor[i], 'kauth_cred_t', "cr_link"): numinbucket += 1 #print str(kauth_cred.cr_posix) #print str(kauth_cred.cr_ref) diff --git a/tools/lldbmacros/kcdata.py b/tools/lldbmacros/kcdata.py index f6db996cc..66fc2e8c4 100755 --- a/tools/lldbmacros/kcdata.py +++ b/tools/lldbmacros/kcdata.py @@ -1378,7 +1378,7 @@ def formatWaitInfo(info): return s -def SaveStackshotReport(j, outfile_name, dsc_uuid, dsc_libs_arr, incomplete): +def SaveStackshotReport(j, outfile_name, incomplete): import time from operator import itemgetter, attrgetter ss = j.get('kcdata_stackshot') @@ -1395,41 +1395,18 @@ def SaveStackshotReport(j, outfile_name, dsc_uuid, dsc_libs_arr, incomplete): os_version = ss.get('osversion', 'Unknown') timebase = ss.get('mach_timebase_info', {"denom": 1, "numer": 1}) - if not dsc_uuid and 'imageSlidBaseAddress' not in ss.get('shared_cache_dyld_load_info'): - print "Stackshot format does not include slid shared cache base address and no UUID provided. Skipping writing report." - return - - # If a shared cache UUID is provided, treat the slide as the base address - # for compatibility with existing tools that operate based on this logic - if dsc_uuid: - shared_cache_base_addr = ss.get('shared_cache_dyld_load_info')['imageLoadAddress'] - elif 'imageSlidBaseAddress' in ss.get('shared_cache_dyld_load_info'): - shared_cache_base_addr = ss.get('shared_cache_dyld_load_info')['imageSlidBaseAddress'] - else: - print "No shared cache UUID provided and data doesn't include imageSlidBaseAddress. Skipping writing report." - return - dsc_common = [format_uuid(ss.get('shared_cache_dyld_load_info')['imageUUID']), - shared_cache_base_addr, "S" ] + dsc_common = None + shared_cache_info = ss.get('shared_cache_dyld_load_info') + if shared_cache_info: + shared_cache_base_addr = shared_cache_info['imageSlidBaseAddress'] + dsc_common = [format_uuid(shared_cache_info['imageUUID']), shared_cache_info['imageSlidBaseAddress'], "S" ] + print "Shared cache UUID found from the binary data is <%s> " % str(dsc_common[0]) dsc_layout = ss.get('system_shared_cache_layout') dsc_libs = [] - print "Shared cache UUID found from the binary data is <%s> " % str(dsc_common[0]) - if dsc_common[0].replace('-', '').lower() == dsc_uuid: - print "SUCCESS: Found Matching dyld shared cache uuid. Loading library load addresses from layout provided." - _load_addr = dsc_common[1] - #print _load_addr - #print dsc_libs_arr - for i in dsc_libs_arr: - _uuid = i[2].lower().replace('-','').strip() - _addr = int(i[0], 16) + _load_addr - dsc_libs.append([_uuid, _addr, "C"]) - #print "adding ", [_uuid, _addr, "C"] - elif dsc_uuid: - print "Provided shared cache UUID does not match. Skipping writing report." - return - elif dsc_layout: + if dsc_layout: print "Found in memory system shared cache layout with {} images".format(len(dsc_layout)) slide = ss.get('shared_cache_dyld_load_info')['imageLoadAddress'] @@ -1478,11 +1455,10 @@ def SaveStackshotReport(j, outfile_name, dsc_uuid, dsc_libs_arr, incomplete): pr_lib_dsc = [format_uuid(tsnap.get('shared_cache_dyld_load_info')['imageUUID']), tsnap.get('shared_cache_dyld_load_info')['imageSlidBaseAddress'], - "S" - ] + "S"] pr_libs = [] - if len(dsc_libs) == 0: + if len(dsc_libs) == 0 and pr_lib_dsc: pr_libs.append(pr_lib_dsc) _lib_type = "P" if int(pid) == 0: @@ -1598,38 +1574,6 @@ def RunCommand(bash_cmd_string, get_stderr = True): finally: return (exit_code, output_str) -def ProcessDyldSharedCacheFile(shared_cache_file_path, sdk_str=""): - """ returns (uuid, text_info) output from shared_cache_util. - In case of error None is returned and err message is printed to stdout. - """ - if not os.path.exists(shared_cache_file_path): - print "File path: %s does not exists" % shared_cache_file_path - return None - if sdk_str: - sdk_str = ' -sdk "%s" ' % sdk_str - (c, so) = RunCommand("xcrun {} -find dyld_shared_cache_util".format(sdk_str)) - if c: - print "Failed to find path to dyld_shared_cache_util. Exit code: %d , message: %s" % (c,so) - return None - dyld_shared_cache_util = so.strip() - (c, so) = RunCommand("{} -info {}".format(dyld_shared_cache_util, shared_cache_file_path)) - if c: - print "Failed to get uuid info from %s" % shared_cache_file_path - print so - return None - - uuid = so.splitlines()[0].split(": ")[-1].strip().replace("-","").lower() - - (c, so) = RunCommand("{} -text_info {}".format(dyld_shared_cache_util, shared_cache_file_path)) - if c: - print "Failed to get text_info from %s" % shared_cache_file_path - print so - return None - - print "Found %s uuid: %s" % (shared_cache_file_path, uuid) - text_info = so - - return (uuid, so) parser = argparse.ArgumentParser(description="Decode a kcdata binary file.") parser.add_argument("-l", "--listtypes", action="store_true", required=False, default=False, @@ -1645,10 +1589,7 @@ parser.add_argument("--multiple", help="look for multiple stackshots in a single parser.add_argument("-p", "--plist", required=False, default=False, help="output as plist", action="store_true") -parser.add_argument("-U", "--uuid", required=False, default="", help="UUID of dyld shared cache to be analysed and filled in libs of stackshot report", dest="uuid") -parser.add_argument("-L", "--layout", required=False, type=argparse.FileType("r"), help="Path to layout file for DyldSharedCache. You can generate one by doing \n\tbash$xcrun -sdk dyld_shared_cache_util -text_info ", dest="layout") parser.add_argument("-S", "--sdk", required=False, default="", help="sdk property passed to xcrun command to find the required tools. Default is empty string.", dest="sdk") -parser.add_argument("-D", "--dyld_shared_cache", required=False, default="", help="Path to dyld_shared_cache built by B&I", dest="dsc") parser.add_argument("--pretty", default=False, action='store_true', help="make the output a little more human readable") parser.add_argument("--incomplete", action='store_true', help="accept incomplete data") parser.add_argument("kcdata_file", type=argparse.FileType('r'), help="Path to a kcdata binary file.") @@ -1767,21 +1708,8 @@ if __name__ == '__main__': if args.pretty: json_obj = prettify(json_obj) - dsc_uuid = None - dsc_libs_arr = [] - libs_re = re.compile("^\s*(0x[a-fA-F0-9]+)\s->\s(0x[a-fA-F0-9]+)\s+<([a-fA-F0-9\-]+)>\s+.*$", re.MULTILINE) - if args.uuid and args.layout: - dsc_uuid = args.uuid.strip().replace("-",'').lower() - dsc_libs_arr = libs_re.findall(args.layout.read()) - - if args.dsc: - _ret = ProcessDyldSharedCacheFile(args.dsc, args.sdk) - if _ret: - dsc_uuid = _ret[0] - dsc_libs_arr = libs_re.findall(_ret[1]) - if args.stackshot_file: - SaveStackshotReport(json_obj, args.stackshot_file, dsc_uuid, dsc_libs_arr, G.data_was_incomplete) + SaveStackshotReport(json_obj, args.stackshot_file, G.data_was_incomplete) elif args.plist: import Foundation plist = Foundation.NSPropertyListSerialization.dataWithPropertyList_format_options_error_( diff --git a/tools/lldbmacros/memory.py b/tools/lldbmacros/memory.py index a39837ff2..e157a5db3 100755 --- a/tools/lldbmacros/memory.py +++ b/tools/lldbmacros/memory.py @@ -3293,7 +3293,10 @@ def _vm_page_unpack_ptr(page): #ARM64 - min_addr = 0xffffff8000000000 if unsigned(page) & unsigned(ptr_mask) : masked_page = (unsigned(page) & ~ptr_mask) - return (unsigned(addressof(kern.globals.vm_pages[masked_page]))) + # can't use addressof(kern.globals.vm_pages[masked_page]) due to 32 bit limitation in SB bridge + vm_pages_addr = unsigned(addressof(kern.globals.vm_pages[0])) + element_size = unsigned(addressof(kern.globals.vm_pages[1])) - vm_pages_addr + return (vm_pages_addr + masked_page * element_size) return ((unsigned(page) << unsigned(ptr_shift)) + unsigned(min_addr)) @lldb_command('calcvmpagehash') @@ -3398,7 +3401,7 @@ def VMObjectWalkPages(cmd_args=None, cmd_options={}): if (page_count % 1000) == 0: print "traversed %d pages ...\n" % (page_count) else: - out_string += format_string.format(page_count, res_page_count, vmp, vmp.vmp_offset, _vm_page_unpack_ptr(vmp.listq.next), _vm_page_get_phys_page(vmp), vmp.vmp_wire_count) + out_string += format_string.format(page_count, res_page_count, vmp, vmp.vmp_offset, _vm_page_unpack_ptr(vmp.vmp_listq.next), _vm_page_get_phys_page(vmp), vmp.vmp_wire_count) out_string += first_bitfield_format_string.format(vmp.vmp_q_state, vmp.vmp_in_background, vmp.vmp_on_backgroundq, vmp.vmp_gobbled, vmp.vmp_laundry, vmp.vmp_no_cache, vmp.vmp_private, vmp.vmp_reference) diff --git a/tools/lldbmacros/misc.py b/tools/lldbmacros/misc.py index fd5382f1a..effb5ea28 100755 --- a/tools/lldbmacros/misc.py +++ b/tools/lldbmacros/misc.py @@ -7,6 +7,37 @@ import xnudefines from scheduler import * +@lldb_command('showlogstream') +def showLogStream(cmd_args=None): + """ + Dump the state of the kernel log stream + """ + mbp = kern.globals.oslog_streambufp + print "streaming buffer space avail: {0:>#x} of {1:>#x} bytes\n".format(kern.globals.oslog_stream_buf_bytesavail, kern.globals.oslog_stream_buf_size) + print " read head: offset {0:>#x}\nwrite head: offset {1:>#x}\n".format(mbp.msg_bufr, mbp.msg_bufx) + count = 0 + print " id timestamp offset size off+size type metadata" + for entry in IterateSTAILQ_HEAD(kern.globals.oslog_stream_buf_head, "buf_entries"): + next_start = entry.offset + entry.size + if (next_start > 0x1000): + next_start = next_start - 0x1000 + print "{0:>4d}: {1:5x} {3:>4d} {4:>5x} {5: inner.offset) and + (outer.offset < inner.offset + inner.size)): + print "error: overlapping entries: {:>3x} <--> {:>3x}".format(outer.offset, inner.offset) + count = count + 1 + @lldb_command('showmcastate') def showMCAstate(cmd_args=None): """ diff --git a/tools/lldbmacros/pmap.py b/tools/lldbmacros/pmap.py index 40529c70c..2424d2d93 100755 --- a/tools/lldbmacros/pmap.py +++ b/tools/lldbmacros/pmap.py @@ -381,12 +381,11 @@ def _PmapL4Walk(pmap_addr_val,vaddr, ept_pmap, verbose_level = vSCRIPT): params: pmap_addr_val - core.value representing kernel data of type pmap_addr_t vaddr : int - virtual address to walk """ - is_cpu64_bit = int(kern.globals.cpu_64bit) pt_paddr = unsigned(pmap_addr_val) pt_valid = (unsigned(pmap_addr_val) != 0) pt_large = 0 pframe_offset = 0 - if pt_valid and is_cpu64_bit: + if pt_valid: # Lookup bits 47:39 of linear address in PML4T pt_index = (vaddr >> 39) & 0x1ff pframe_offset = vaddr & 0x7fffffffff diff --git a/tools/lldbmacros/scheduler.py b/tools/lldbmacros/scheduler.py index 36a37c328..ed22c6ae8 100755 --- a/tools/lldbmacros/scheduler.py +++ b/tools/lldbmacros/scheduler.py @@ -43,6 +43,7 @@ def ShowInterrupts(cmd_args=None): cpu_data_entry = Cast(element, 'cpu_data_t *') print "CPU {} IRQ: {:d}\n".format(y, cpu_data_entry.cpu_stat.irq_ex_cnt) print "CPU {} IPI: {:d}\n".format(y, cpu_data_entry.cpu_stat.ipi_cnt) + print "CPU {} PMI: {:d}\n".format(y, cpu_data_entry.cpu_stat.pmi_cnt) print "CPU {} TMR: {:d}\n".format(y, cpu_data_entry.cpu_stat.timer_cnt) x = x + 1 y = y + 1 @@ -489,6 +490,9 @@ def ShowRunQSummary(runq): print "\t" + GetThreadBackTrace(thread, prefix="\t\t") + "\n" def ShowRTRunQSummary(rt_runq): + if (hex(rt_runq.count) == hex(0xfdfdfdfd)) : + print " Realtime Queue ({:<#012x}) uninitialized\n".format(addressof(rt_runq.queue)) + return print " Realtime Queue ({:<#012x}) Count {:d}\n".format(addressof(rt_runq.queue), rt_runq.count) if rt_runq.count != 0: print "\t" + GetThreadSummary.header + "\n" diff --git a/tools/lldbmacros/xnu.py b/tools/lldbmacros/xnu.py index 1d30ad890..688001678 100755 --- a/tools/lldbmacros/xnu.py +++ b/tools/lldbmacros/xnu.py @@ -793,6 +793,135 @@ def WalkList(cmd_args=[], cmd_options={}): else: print "{0: <#020x}".format(i) +def iotrace_parse_Copt(Copt): + """Parses the -C option argument and returns a list of CPUs + """ + cpusOpt = Copt + cpuList = cpusOpt.split(",") + chosen_cpus = [] + for cpu_num_string in cpuList: + try: + if '-' in cpu_num_string: + parts = cpu_num_string.split('-') + if len(parts) != 2 or not (parts[0].isdigit() and parts[1].isdigit()): + raise ArgumentError("Invalid cpu specification: %s" % cpu_num_string) + firstRange = int(parts[0]) + lastRange = int(parts[1]) + if firstRange >= kern.globals.real_ncpus or lastRange >= kern.globals.real_ncpus: + raise ValueError() + if lastRange < firstRange: + raise ArgumentError("Invalid CPU range specified: `%s'" % cpu_num_string) + for cpu_num in range(firstRange, lastRange + 1): + if cpu_num not in chosen_cpus: + chosen_cpus.append(cpu_num) + else: + chosen_cpu = int(cpu_num_string) + if chosen_cpu < 0 or chosen_cpu >= kern.globals.real_ncpus: + raise ValueError() + if chosen_cpu not in chosen_cpus: + chosen_cpus.append(chosen_cpu) + except ValueError: + raise ArgumentError("Invalid CPU number specified. Valid range is 0..%d" % (kern.globals.real_ncpus - 1)) + + return chosen_cpus + + +@lldb_command('iotrace', 'C:N:S:RB') +def IOTrace_cmd(cmd_args=[], cmd_options={}): + """ Prints the iotrace ring buffers for all CPUs by default. + Arguments: + -B : Print backtraces for each ring entry + -C [,...,] : Limit trace entries to those generated by the specified CPUs (each cpuSpec can be a + single CPU number or a range separated by a dash (e.g. "0-3")) + -N : Limit output to the first entries (across all chosen CPUs) + -R : Display results in reverse-sorted order (oldest first; default is newest-first) + -S : Sort output by specified iotrace_entry_t field name (instead of by timestamp) + """ + IDX_CPU = 0 + IDX_RINGPOS = 1 + IDX_RINGENTRY = 2 + MAX_IOTRACE_BACKTRACES = 16 + + if kern.arch != "x86_64": + print "Sorry, iotrace is an x86-only command." + return + + if '-S' in cmd_options: + field_arg = cmd_options['-S'] + try: + getattr(kern.globals.iotrace_ring[0][0], field_arg) + sort_key_field_name = field_arg + except AttributeError: + raise ArgumentError("Invalid sort key field name `%s'" % field_arg) + else: + sort_key_field_name = 'start_time_abs' + + if '-C' in cmd_options: + chosen_cpus = iotrace_parse_Copt(cmd_options['-C']) + else: + chosen_cpus = [x for x in range(kern.globals.real_ncpus)] + + try: + limit_output_count = int(cmd_options['-N']) + except ValueError: + raise ArgumentError("Invalid output count `%s'" % cmd_options['-N']); + except KeyError: + limit_output_count = None + + reverse_sort = '-R' in cmd_options + backtraces = '-B' in cmd_options + + # entries will be a list of 3-tuples, each holding the CPU on which the iotrace entry was collected, + # the original ring index, and the iotrace entry. + entries = [] + for x in chosen_cpus: + ring_slice = [(x, y, kern.globals.iotrace_ring[x][y]) for y in range(kern.globals.iotrace_entries_per_cpu)] + entries.extend(ring_slice) + + total_entries = len(entries) + + entries.sort(key=lambda x: getattr(x[IDX_RINGENTRY], sort_key_field_name), reverse=reverse_sort) + + if limit_output_count is not None and limit_output_count > total_entries: + print ("NOTE: Output count `%d' is too large; showing all %d entries" % (limit_output_count, total_entries)); + limit_output_count = total_entries + + if len(chosen_cpus) < kern.globals.real_ncpus: + print "NOTE: Limiting to entries from cpu%s %s" % ("s" if len(chosen_cpus) > 1 else "", str(chosen_cpus)) + + if limit_output_count is not None and limit_output_count < total_entries: + entries_to_display = limit_output_count + print "NOTE: Limiting to the %s" % ("first entry" if entries_to_display == 1 else ("first %d entries" % entries_to_display)) + else: + entries_to_display = total_entries + + print "%-19s %-8s %-10s %-20s SZ %-18s %-17s DATA" % ( + "START TIME", + "DURATION", + "CPU#[RIDX]", + " TYPE", + " VIRT ADDR", + " PHYS ADDR") + + for x in xrange(entries_to_display): + print "%-20u(%6u) %6s[%02d] %-20s %d 0x%016x 0x%016x 0x%x" % ( + entries[x][IDX_RINGENTRY].start_time_abs, + entries[x][IDX_RINGENTRY].duration, + "CPU%d" % entries[x][IDX_CPU], + entries[x][IDX_RINGPOS], + str(entries[x][IDX_RINGENTRY].iotype).split("=")[1].strip(), + entries[x][IDX_RINGENTRY].size, + entries[x][IDX_RINGENTRY].vaddr, + entries[x][IDX_RINGENTRY].paddr, + entries[x][IDX_RINGENTRY].val) + if backtraces: + for btidx in range(MAX_IOTRACE_BACKTRACES): + nextbt = entries[x][IDX_RINGENTRY].backtrace[btidx] + if nextbt == 0: + break + print "\t" + GetSourceInformationForAddress(nextbt) + + from memory import * diff --git a/tools/lockstat/lockstat.c b/tools/lockstat/lockstat.c index 1f2b5af32..50e788108 100644 --- a/tools/lockstat/lockstat.c +++ b/tools/lockstat/lockstat.c @@ -81,37 +81,36 @@ void get_lockgroup_deltas(void); char *pgmname; mach_port_t host_control; -lockgroup_info_t *lockgroup_info, *lockgroup_start, *lockgroup_deltas; -unsigned int count; +lockgroup_info_t *lockgroup_info, *lockgroup_start, *lockgroup_deltas; +unsigned int count; -unsigned int gDebug = 1; +unsigned int gDebug = 1; int main(int argc, char **argv) { - kern_return_t kr; - int arg2; - unsigned int i; - int found; + kern_return_t kr; + int arg2; + unsigned int i; + int found; setlinebuf(stdout); pgmname = argv[0]; gDebug = (NULL != strstr(argv[0], "debug")); - host_control = mach_host_self(); + host_control = mach_host_self(); kr = host_lockgroup_info(host_control, &lockgroup_info, &count); - if (kr != KERN_SUCCESS) - { + if (kr != KERN_SUCCESS) { mach_error("host_statistics", kr); - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } if (gDebug) { printf("count = %d\n", count); for (i = 0; i < count; i++) { - printf("%s\n",lockgroup_info[i].lockgroup_name); + printf("%s\n", lockgroup_info[i].lockgroup_name); } } @@ -124,22 +123,18 @@ main(int argc, char **argv) print_all_mutex(lockgroup_info); print_rw_hdr(); print_all_rw(lockgroup_info); - } - else if (strcmp(argv[1], "spin") == 0) { + } else if (strcmp(argv[1], "spin") == 0) { print_spin_hdr(); print_all_spin(lockgroup_info); - } - else if (strcmp(argv[1], "mutex") == 0) { + } else if (strcmp(argv[1], "mutex") == 0) { print_mutex_hdr(); print_all_mutex(lockgroup_info); - } - else if (strcmp(argv[1], "rw") == 0) { + } else if (strcmp(argv[1], "rw") == 0) { print_rw_hdr(); print_all_rw(lockgroup_info); - } - else { + } else { found = 0; - for (i = 0;i < count;i++) { + for (i = 0; i < count; i++) { if (strcmp(argv[1], lockgroup_info[i].lockgroup_name) == 0) { found = 1; print_spin_hdr(); @@ -151,10 +146,11 @@ main(int argc, char **argv) break; } } - if (found == 0) - { usage(); } + if (found == 0) { + usage(); + } } - break; + break; case 3: if (sscanf(argv[2], "%d", &arg2) != 1) { usage(); @@ -164,7 +160,6 @@ main(int argc, char **argv) } prime_lockgroup_deltas(); if (strcmp(argv[1], "all") == 0) { - while (1) { sleep(arg2); get_lockgroup_deltas(); @@ -175,38 +170,30 @@ main(int argc, char **argv) print_rw_hdr(); print_all_rw(lockgroup_deltas); } - } - else if (strcmp(argv[1], "spin") == 0) { - + } else if (strcmp(argv[1], "spin") == 0) { while (1) { sleep(arg2); get_lockgroup_deltas(); print_spin_hdr(); print_all_spin(lockgroup_deltas); } - } - else if (strcmp(argv[1], "mutex") == 0) { - + } else if (strcmp(argv[1], "mutex") == 0) { while (1) { sleep(arg2); get_lockgroup_deltas(); print_mutex_hdr(); print_all_mutex(lockgroup_deltas); } - } - else if (strcmp(argv[1], "rw") == 0) { - + } else if (strcmp(argv[1], "rw") == 0) { while (1) { sleep(arg2); get_lockgroup_deltas(); print_rw_hdr(); print_all_rw(lockgroup_deltas); } - } - else { - + } else { found = 0; - for (i = 0;i < count;i++) { + for (i = 0; i < count; i++) { if (strcmp(argv[1], lockgroup_info[i].lockgroup_name) == 0) { found = 1; while (1) { @@ -221,18 +208,20 @@ main(int argc, char **argv) } } } - if (found == 0) - { usage(); } + if (found == 0) { + usage(); + } } break; case 4: - if (strcmp(argv[3], "abs") != 0) - { usage(); } - if (sscanf(argv[2], "%d", &arg2) != 1) - { usage(); } + if (strcmp(argv[3], "abs") != 0) { + usage(); + } + if (sscanf(argv[2], "%d", &arg2) != 1) { + usage(); + } if (strcmp(argv[1], "all") == 0) { - while (1) - { + while (1) { print_spin_hdr(); print_all_spin(lockgroup_info); print_mutex_hdr(); @@ -241,34 +230,29 @@ main(int argc, char **argv) print_all_rw(lockgroup_info); sleep(arg2); } - } - else if (strcmp(argv[1], "spin") == 0) { - while (1) - {print_all_spin(lockgroup_info); + } else if (strcmp(argv[1], "spin") == 0) { + while (1) { + print_all_spin(lockgroup_info); sleep(arg2); } - } - else if (strcmp(argv[1], "mutex") == 0) { + } else if (strcmp(argv[1], "mutex") == 0) { print_mutex_hdr(); - while (1) - {print_all_mutex(lockgroup_info); + while (1) { + print_all_mutex(lockgroup_info); sleep(arg2); } - } - else if (strcmp(argv[1], "rw") == 0) { + } else if (strcmp(argv[1], "rw") == 0) { print_rw_hdr(); - while (1) - {print_all_rw(lockgroup_info); + while (1) { + print_all_rw(lockgroup_info); sleep(arg2); } - } - else { + } else { found = 0; - for (i = 0;i < count;i++) { + for (i = 0; i < count; i++) { if (strcmp(argv[1], lockgroup_info[i].lockgroup_name) == 0) { found = 1; - while (1) - { + while (1) { print_spin_hdr(); print_spin(i, lockgroup_info); print_mutex_hdr(); @@ -279,19 +263,20 @@ main(int argc, char **argv) } } } - if (found == 0) - { usage(); } + if (found == 0) { + usage(); + } } break; default: usage(); break; - } + } exit(0); } - -void + +void usage() { fprintf(stderr, "Usage: %s [all, spin, mutex, rw, ] {} {abs}\n", pgmname); @@ -307,7 +292,7 @@ print_spin_hdr(void) void print_spin(int requested, lockgroup_info_t *lockgroup) { - lockgroup_info_t *curptr = &lockgroup[requested]; + lockgroup_info_t *curptr = &lockgroup[requested]; if (curptr->lock_spin_cnt != 0 && curptr->lock_spin_util_cnt != 0) { printf("%16lld ", curptr->lock_spin_util_cnt); @@ -319,11 +304,12 @@ print_spin(int requested, lockgroup_info_t *lockgroup) void print_all_spin(lockgroup_info_t *lockgroup) { - unsigned int i; + unsigned int i; - for (i = 0;i < count;i++) + for (i = 0; i < count; i++) { print_spin(i, lockgroup); - printf("\n"); + } + printf("\n"); } void @@ -332,21 +318,21 @@ print_mutex_hdr(void) #if defined(__i386__) || defined(__x86_64__) printf("Mutex lock attempts Misses Waits Direct Waits Name\n"); #else - printf(" mutex locks misses waits name\n"); + printf(" mutex locks misses waits name\n"); #endif } void print_mutex(int requested, lockgroup_info_t *lockgroup) { - lockgroup_info_t *curptr = &lockgroup[requested]; + lockgroup_info_t *curptr = &lockgroup[requested]; if (curptr->lock_mtx_cnt != 0 && curptr->lock_mtx_util_cnt != 0) { printf("%16lld ", curptr->lock_mtx_util_cnt); #if defined(__i386__) || defined(__x86_64__) - printf("%10lld %10lld %10lld ", curptr->lock_mtx_miss_cnt, curptr->lock_mtx_wait_cnt, curptr->lock_mtx_held_cnt); + printf("%10lld %10lld %10lld ", curptr->lock_mtx_miss_cnt, curptr->lock_mtx_wait_cnt, curptr->lock_mtx_held_cnt); #else - printf("%16lld %16lld ", curptr->lock_mtx_miss_cnt, curptr->lock_mtx_wait_cnt); + printf("%16lld %16lld ", curptr->lock_mtx_miss_cnt, curptr->lock_mtx_wait_cnt); #endif printf("%-14s\n", curptr->lockgroup_name); } @@ -355,12 +341,12 @@ print_mutex(int requested, lockgroup_info_t *lockgroup) void print_all_mutex(lockgroup_info_t *lockgroup) { - unsigned int i; + unsigned int i; - for (i = 0;i < count;i++) + for (i = 0; i < count; i++) { print_mutex(i, lockgroup); - printf("\n"); - + } + printf("\n"); } void @@ -372,11 +358,11 @@ print_rw_hdr(void) void print_rw(int requested, lockgroup_info_t *lockgroup) { - lockgroup_info_t *curptr = &lockgroup[requested]; + lockgroup_info_t *curptr = &lockgroup[requested]; if (curptr->lock_rw_cnt != 0 && curptr->lock_rw_util_cnt != 0) { printf("%16lld ", curptr->lock_rw_util_cnt); - printf("%16lld %16lld ", curptr->lock_rw_miss_cnt, curptr->lock_rw_wait_cnt); + printf("%16lld %16lld ", curptr->lock_rw_miss_cnt, curptr->lock_rw_wait_cnt); printf("%-14s\n", curptr->lockgroup_name); } } @@ -384,12 +370,12 @@ print_rw(int requested, lockgroup_info_t *lockgroup) void print_all_rw(lockgroup_info_t *lockgroup) { - unsigned int i; + unsigned int i; - for (i = 0;i < count;i++) + for (i = 0; i < count; i++) { print_rw(i, lockgroup); - printf("\n"); - + } + printf("\n"); } void @@ -398,29 +384,28 @@ prime_lockgroup_deltas(void) lockgroup_start = calloc(count, sizeof(lockgroup_info_t)); if (lockgroup_start == NULL) { fprintf(stderr, "Can't allocate memory for lockgroup info\n"); - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } memcpy(lockgroup_start, lockgroup_info, count * sizeof(lockgroup_info_t)); - lockgroup_deltas = calloc(count, sizeof(lockgroup_info_t)); + lockgroup_deltas = calloc(count, sizeof(lockgroup_info_t)); if (lockgroup_deltas == NULL) { fprintf(stderr, "Can't allocate memory for lockgroup info\n"); - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } } void get_lockgroup_deltas(void) { - kern_return_t kr; - unsigned int i; + kern_return_t kr; + unsigned int i; kr = host_lockgroup_info(host_control, &lockgroup_info, &count); - if (kr != KERN_SUCCESS) - { + if (kr != KERN_SUCCESS) { mach_error("host_statistics", kr); - exit (EXIT_FAILURE); + exit(EXIT_FAILURE); } memcpy(lockgroup_deltas, lockgroup_info, count * sizeof(lockgroup_info_t)); diff --git a/tools/reindent.sh b/tools/reindent.sh deleted file mode 100755 index 80cb4dde6..000000000 --- a/tools/reindent.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -# -# Reindent CWD recursively using clang-format (assumed to be in your PATH), -# and per-component or per-directory .clang-format style specifications. -# - -CPUS=`sysctl -n hw.logicalcpu` -CLANGFORMAT=`xcrun -find clang-format` - -if [ ! -x "${CLANGFORMAT}" ]; then - echo "Could not find clang-format" 1>&2 - exit 1 -fi - -echo "Using ${CLANGFORMAT} to reindent, using concurrency of ${CPUS}" - -find -x . \! \( \( -name BUILD -o -name EXTERNAL_HEADERS -o -name zlib -o -name .svn -o -name .git -o -name cscope.\* -o -name \*~ \) -prune \) -type f \( -name \*.c -o -name \*.cpp \) -print0 | \ - xargs -0 -P "${CPUS}" -n 10 "${CLANGFORMAT}" -style=file -i -ret=$? - -if [ $ret -ne 0 ]; then - echo "reindent failed: $ret" 1>&2 - exit 1 -fi - -exit 0 diff --git a/tools/tests/MPMMTest/KQMPMMtest.c b/tools/tests/MPMMTest/KQMPMMtest.c index 66f15b320..c62b694fa 100644 --- a/tools/tests/MPMMTest/KQMPMMtest.c +++ b/tools/tests/MPMMTest/KQMPMMtest.c @@ -26,21 +26,21 @@ typedef struct { - mach_msg_header_t header; - mach_msg_trailer_t trailer; // subtract this when sending + mach_msg_header_t header; + mach_msg_trailer_t trailer; // subtract this when sending } ipc_trivial_message; typedef struct { - mach_msg_header_t header; - u_int32_t numbers[0]; - mach_msg_trailer_t trailer; // subtract this when sending + mach_msg_header_t header; + u_int32_t numbers[0]; + mach_msg_trailer_t trailer; // subtract this when sending } ipc_inline_message; typedef struct { - mach_msg_header_t header; - mach_msg_body_t body; - mach_msg_ool_descriptor_t descriptor; - mach_msg_trailer_t trailer; // subtract this when sending + mach_msg_header_t header; + mach_msg_body_t body; + mach_msg_ool_descriptor_t descriptor; + mach_msg_trailer_t trailer; // subtract this when sending } ipc_complex_message; enum { @@ -60,33 +60,37 @@ struct port_args { }; typedef union { - pid_t pid; - pthread_t tid; + pid_t pid; + pthread_t tid; } thread_id_t; /* Global options */ -static boolean_t verbose = FALSE; -static boolean_t affinity = FALSE; -static boolean_t timeshare = FALSE; -static boolean_t threaded = FALSE; -static boolean_t oneway = FALSE; -static boolean_t do_select = FALSE; +static boolean_t verbose = FALSE; +static boolean_t affinity = FALSE; +static boolean_t timeshare = FALSE; +static boolean_t threaded = FALSE; +static boolean_t oneway = FALSE; +static boolean_t do_select = FALSE; static boolean_t save_perfdata = FALSE; -int msg_type; -int num_ints; -int num_msgs; -int num_clients; -int num_servers; -int client_delay; -int client_spin; -int client_pages; -char **server_port_name; - -void signal_handler(int sig) { +int msg_type; +int num_ints; +int num_msgs; +int num_clients; +int num_servers; +int client_delay; +int client_spin; +int client_pages; +char **server_port_name; + +void +signal_handler(int sig) +{ } -void usage(const char *progname) { +void +usage(const char *progname) +{ fprintf(stderr, "usage: %s [options]\n", progname); fprintf(stderr, "where options are:\n"); fprintf(stderr, " -affinity\t\tthreads use affinity\n"); @@ -118,10 +122,12 @@ void usage(const char *progname) { exit(1); } -void parse_args(int argc, char *argv[]) { - host_basic_info_data_t info; - mach_msg_type_number_t count; - kern_return_t result; +void +parse_args(int argc, char *argv[]) +{ + host_basic_info_data_t info; + mach_msg_type_number_t count; + kern_return_t result; /* Initialize defaults */ msg_type = msg_type_trivial; @@ -131,12 +137,13 @@ void parse_args(int argc, char *argv[]) { num_clients = 4; count = HOST_BASIC_INFO_COUNT; - result = host_info(mach_host_self(), HOST_BASIC_INFO, - (host_info_t)&info, &count); - if (result == KERN_SUCCESS && info.avail_cpus > 1) + result = host_info(mach_host_self(), HOST_BASIC_INFO, + (host_info_t)&info, &count); + if (result == KERN_SUCCESS && info.avail_cpus > 1) { num_servers = info.avail_cpus / 2; - else + } else { num_servers = 1; + } const char *progname = argv[0]; argc--; argv++; @@ -157,50 +164,59 @@ void parse_args(int argc, char *argv[]) { oneway = TRUE; argc--; argv++; } else if (0 == strcmp("-type", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } if (0 == strcmp("trivial", argv[1])) { msg_type = msg_type_trivial; } else if (0 == strcmp("inline", argv[1])) { msg_type = msg_type_inline; } else if (0 == strcmp("complex", argv[1])) { msg_type = msg_type_complex; - } else + } else { usage(progname); + } argc -= 2; argv += 2; } else if (0 == strcmp("-numints", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } num_ints = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-count", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } num_msgs = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; - } else if (0 == strcmp("-clients", argv[0])) { - if (argc < 2) + } else if (0 == strcmp("-clients", argv[0])) { + if (argc < 2) { usage(progname); + } num_clients = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; - } else if (0 == strcmp("-servers", argv[0])) { - if (argc < 2) + } else if (0 == strcmp("-servers", argv[0])) { + if (argc < 2) { usage(progname); + } num_servers = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-delay", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } client_delay = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-spin", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } client_spin = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-pages", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } client_pages = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-select", argv[0])) { @@ -209,52 +225,54 @@ void parse_args(int argc, char *argv[]) { } else if (0 == strcmp("-perf", argv[0])) { save_perfdata = TRUE; argc--; argv++; - } else + } else { usage(progname); + } } } -void setup_server_ports(struct port_args *ports) +void +setup_server_ports(struct port_args *ports) { kern_return_t ret = 0; mach_port_t bsport; - ports->req_size = MAX(sizeof(ipc_inline_message) + - sizeof(u_int32_t) * num_ints, - sizeof(ipc_complex_message)); - ports->reply_size = sizeof(ipc_trivial_message) - - sizeof(mach_msg_trailer_t); + ports->req_size = MAX(sizeof(ipc_inline_message) + + sizeof(u_int32_t) * num_ints, + sizeof(ipc_complex_message)); + ports->reply_size = sizeof(ipc_trivial_message) - + sizeof(mach_msg_trailer_t); ports->req_msg = malloc(ports->req_size); ports->reply_msg = malloc(ports->reply_size); - ret = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_RECEIVE, - &(ports->port)); + ret = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_RECEIVE, + &(ports->port)); if (KERN_SUCCESS != ret) { mach_error("mach_port_allocate(): ", ret); exit(1); } - ret = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_PORT_SET, - &(ports->pset)); + ret = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_PORT_SET, + &(ports->pset)); if (KERN_SUCCESS != ret) { mach_error("mach_port_allocate(): ", ret); exit(1); } - + ret = mach_port_insert_member(mach_task_self(), - ports->port, - ports->pset); + ports->port, + ports->pset); if (KERN_SUCCESS != ret) { mach_error("mach_port_insert_member(): ", ret); exit(1); } - ret = mach_port_insert_right(mach_task_self(), - ports->port, - ports->port, - MACH_MSG_TYPE_MAKE_SEND); + ret = mach_port_insert_right(mach_task_self(), + ports->port, + ports->port, + MACH_MSG_TYPE_MAKE_SEND); if (KERN_SUCCESS != ret) { mach_error("mach_port_insert_right(): ", ret); exit(1); @@ -268,85 +286,88 @@ void setup_server_ports(struct port_args *ports) if (verbose) { printf("server waiting for IPC messages from client on port '%s'.\n", - server_port_name[ports->server_num]); + server_port_name[ports->server_num]); } ret = bootstrap_register(bsport, - server_port_name[ports->server_num], - ports->port); + server_port_name[ports->server_num], + ports->port); if (KERN_SUCCESS != ret) { mach_error("bootstrap_register(): ", ret); exit(1); } } -void setup_client_ports(struct port_args *ports) +void +setup_client_ports(struct port_args *ports) { kern_return_t ret = 0; - switch(msg_type) { - case msg_type_trivial: - ports->req_size = sizeof(ipc_trivial_message); - break; - case msg_type_inline: - ports->req_size = sizeof(ipc_inline_message) + - sizeof(u_int32_t) * num_ints; - break; - case msg_type_complex: - ports->req_size = sizeof(ipc_complex_message); - break; + switch (msg_type) { + case msg_type_trivial: + ports->req_size = sizeof(ipc_trivial_message); + break; + case msg_type_inline: + ports->req_size = sizeof(ipc_inline_message) + + sizeof(u_int32_t) * num_ints; + break; + case msg_type_complex: + ports->req_size = sizeof(ipc_complex_message); + break; } ports->req_size -= sizeof(mach_msg_trailer_t); ports->reply_size = sizeof(ipc_trivial_message); ports->req_msg = malloc(ports->req_size); ports->reply_msg = malloc(ports->reply_size); - ret = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_RECEIVE, - &(ports->port)); + ret = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_RECEIVE, + &(ports->port)); if (KERN_SUCCESS != ret) { mach_error("mach_port_allocate(): ", ret); exit(1); } if (verbose) { printf("Client sending %d %s IPC messages to port '%s' in %s mode.\n", - num_msgs, (msg_type == msg_type_inline) ? - "inline" : ((msg_type == msg_type_complex) ? - "complex" : "trivial"), - server_port_name[ports->server_num], - (oneway ? "oneway" : "rpc")); + num_msgs, (msg_type == msg_type_inline) ? + "inline" : ((msg_type == msg_type_complex) ? + "complex" : "trivial"), + server_port_name[ports->server_num], + (oneway ? "oneway" : "rpc")); } - } static void -thread_setup(int tag) { - kern_return_t ret; - thread_extended_policy_data_t epolicy; - thread_affinity_policy_data_t policy; +thread_setup(int tag) +{ + kern_return_t ret; + thread_extended_policy_data_t epolicy; + thread_affinity_policy_data_t policy; if (!timeshare) { epolicy.timeshare = FALSE; ret = thread_policy_set( - mach_thread_self(), THREAD_EXTENDED_POLICY, - (thread_policy_t) &epolicy, - THREAD_EXTENDED_POLICY_COUNT); - if (ret != KERN_SUCCESS) + mach_thread_self(), THREAD_EXTENDED_POLICY, + (thread_policy_t) &epolicy, + THREAD_EXTENDED_POLICY_COUNT); + if (ret != KERN_SUCCESS) { printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret); + } } - if (affinity) { - policy.affinity_tag = tag; - ret = thread_policy_set( - mach_thread_self(), THREAD_AFFINITY_POLICY, - (thread_policy_t) &policy, - THREAD_AFFINITY_POLICY_COUNT); - if (ret != KERN_SUCCESS) - printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret); - } + if (affinity) { + policy.affinity_tag = tag; + ret = thread_policy_set( + mach_thread_self(), THREAD_AFFINITY_POLICY, + (thread_policy_t) &policy, + THREAD_AFFINITY_POLICY_COUNT); + if (ret != KERN_SUCCESS) { + printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret); + } + } } void * -server(void *serverarg) +server(void *serverarg) { int kq; struct kevent64_s kev[1]; @@ -362,35 +383,36 @@ server(void *serverarg) setup_server_ports(&args); thread_setup(args.server_num + 1); - + kq = kqueue(); if (kq == -1) { perror("kqueue"); exit(1); } - EV_SET64(&kev[0], args.pset, EVFILT_MACHPORT, (EV_ADD | EV_CLEAR | EV_DISPATCH), + EV_SET64(&kev[0], args.pset, EVFILT_MACHPORT, (EV_ADD | EV_CLEAR | EV_DISPATCH), #if DIRECT_MSG_RCV - MACH_RCV_MSG|MACH_RCV_LARGE, 0, 0, (mach_vm_address_t)args.req_msg, args.req_size); + MACH_RCV_MSG | MACH_RCV_LARGE, 0, 0, (mach_vm_address_t)args.req_msg, args.req_size); #else - 0, 0, 0, 0, 0); + 0, 0, 0, 0, 0); #endif err = kevent64(kq, kev, 1, NULL, 0, 0, NULL); if (err == -1) { perror("kevent"); exit(1); } - - for (idx = 0; idx < totalmsg; idx++) { - if (verbose) + for (idx = 0; idx < totalmsg; idx++) { + if (verbose) { printf("server awaiting message %d\n", idx); - retry: + } +retry: if (do_select) { FD_ZERO(&readfds); FD_SET(kq, &readfds); - if (verbose) + if (verbose) { printf("Calling select() prior to kevent64().\n"); + } count = select(kq + 1, &readfds, NULL, NULL, NULL); if (count == -1) { @@ -399,77 +421,83 @@ server(void *serverarg) } } - EV_SET64(&kev[0], args.pset, EVFILT_MACHPORT, EV_ENABLE, + EV_SET64(&kev[0], args.pset, EVFILT_MACHPORT, EV_ENABLE, #if DIRECT_MSG_RCV - MACH_RCV_MSG|MACH_RCV_LARGE, 0, 0, (mach_vm_address_t)args.req_msg, args.req_size); + MACH_RCV_MSG | MACH_RCV_LARGE, 0, 0, (mach_vm_address_t)args.req_msg, args.req_size); #else - 0, 0, 0, 0, 0); + 0, 0, 0, 0, 0); #endif err = kevent64(kq, kev, 1, kev, 1, 0, NULL); if (err == -1) { perror("kevent64"); exit(1); - } + } if (err == 0) { - // printf("kevent64: returned zero\n"); + // printf("kevent64: returned zero\n"); goto retry; } #if DIRECT_MSG_RCV ret = kev[0].fflags; if (MACH_MSG_SUCCESS != ret) { - if (verbose) + if (verbose) { printf("kevent64() mach_msg_return=%d", ret); + } mach_error("kevent64 (msg receive): ", ret); exit(1); - } + } #else - if (kev[0].data != args.port) + if (kev[0].data != args.port) { printf("kevent64(MACH_PORT_NULL) port name (%lld) != expected (0x%x)\n", kev[0].data, args.port); + } args.req_msg->msgh_bits = 0; args.req_msg->msgh_size = args.req_size; args.req_msg->msgh_local_port = args.port; - ret = mach_msg(args.req_msg, - MACH_RCV_MSG|MACH_RCV_INTERRUPT|MACH_RCV_LARGE, - 0, - args.req_size, - args.pset, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - if (MACH_RCV_INTERRUPTED == ret) + ret = mach_msg(args.req_msg, + MACH_RCV_MSG | MACH_RCV_INTERRUPT | MACH_RCV_LARGE, + 0, + args.req_size, + args.pset, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (MACH_RCV_INTERRUPTED == ret) { break; + } if (MACH_MSG_SUCCESS != ret) { - if (verbose) + if (verbose) { printf("mach_msg() ret=%d", ret); + } mach_error("mach_msg (receive): ", ret); exit(1); } #endif - if (verbose) + if (verbose) { printf("server received message %d\n", idx); + } if (args.req_msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) { - ret = vm_deallocate(mach_task_self(), - (vm_address_t)((ipc_complex_message *)args.req_msg)->descriptor.address, - ((ipc_complex_message *)args.req_msg)->descriptor.size); + ret = vm_deallocate(mach_task_self(), + (vm_address_t)((ipc_complex_message *)args.req_msg)->descriptor.address, + ((ipc_complex_message *)args.req_msg)->descriptor.size); } if (1 == args.req_msg->msgh_id) { - if (verbose) + if (verbose) { printf("server sending reply %d\n", idx); - args.reply_msg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, - MACH_MSG_TYPE_MAKE_SEND); + } + args.reply_msg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, + MACH_MSG_TYPE_MAKE_SEND); args.reply_msg->msgh_size = args.reply_size; args.reply_msg->msgh_remote_port = args.req_msg->msgh_remote_port; args.reply_msg->msgh_local_port = args.req_msg->msgh_local_port; args.reply_msg->msgh_id = 2; - ret = mach_msg(args.reply_msg, - MACH_SEND_MSG, - args.reply_size, - 0, - MACH_PORT_NULL, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + ret = mach_msg(args.reply_msg, + MACH_SEND_MSG, + args.reply_size, + 0, + MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); if (MACH_MSG_SUCCESS != ret) { mach_error("mach_msg (send): ", ret); exit(1); @@ -480,34 +508,36 @@ server(void *serverarg) } static inline void -client_spin_loop(unsigned count, void (fn)(void)) +client_spin_loop(unsigned count, void(fn)(void)) { - while (count--) + while (count--) { fn(); + } } -static long dummy_memory; -static long *client_memory = &dummy_memory; +static long dummy_memory; +static long *client_memory = &dummy_memory; static void client_work_atom(void) { - static int i; + static int i; - if (++i > client_pages * PAGE_SIZE / sizeof(long)) + if (++i > client_pages * PAGE_SIZE / sizeof(long)) { i = 0; + } client_memory[i] = 0; } -static int calibration_count = 10000; -static int calibration_usec; +static int calibration_count = 10000; +static int calibration_usec; static void * calibrate_client_work(void) { - long dummy; - struct timeval nowtv; - struct timeval warmuptv = { 0, 100 * 1000 }; /* 100ms */ - struct timeval starttv; - struct timeval endtv; + long dummy; + struct timeval nowtv; + struct timeval warmuptv = { 0, 100 * 1000 }; /* 100ms */ + struct timeval starttv; + struct timeval endtv; if (client_spin) { /* Warm-up the stepper first... */ @@ -516,8 +546,8 @@ calibrate_client_work(void) do { client_spin_loop(calibration_count, client_work_atom); gettimeofday(&nowtv, NULL); - } while (timercmp(&nowtv, &endtv, < )); - + } while (timercmp(&nowtv, &endtv, < )); + /* Now do the calibration */ while (TRUE) { gettimeofday(&starttv, NULL); @@ -538,41 +568,43 @@ calibrate_client_work(void) calibration_count /= calibration_usec; break; } - if (verbose) + if (verbose) { printf("calibration_count=%d calibration_usec=%d\n", - calibration_count, calibration_usec); + calibration_count, calibration_usec); + } } - return NULL; + return NULL; } static void * client_work(void) { - if (client_spin) { - client_spin_loop(calibration_count*client_spin, - client_work_atom); + client_spin_loop(calibration_count * client_spin, + client_work_atom); } - + if (client_delay) { usleep(client_delay); } return NULL; } -void *client(void *threadarg) +void * +client(void *threadarg) { struct port_args args; int idx; - mach_msg_header_t *req, *reply; + mach_msg_header_t *req, *reply; mach_port_t bsport, servport; kern_return_t ret; int server_num = (int) threadarg; void *ints = malloc(sizeof(u_int32_t) * num_ints); - if (verbose) + if (verbose) { printf("client(%d) started, server port name %s\n", - server_num, server_port_name[server_num]); + server_num, server_port_name[server_num]); + } args.server_num = server_num; thread_setup(server_num + 1); @@ -584,8 +616,8 @@ void *client(void *threadarg) exit(1); } ret = bootstrap_look_up(bsport, - server_port_name[server_num], - &servport); + server_port_name[server_num], + &servport); if (KERN_SUCCESS != ret) { mach_error("bootstrap_look_up(): ", ret); exit(1); @@ -595,19 +627,20 @@ void *client(void *threadarg) /* Allocate and touch memory */ if (client_pages) { - unsigned i; + unsigned i; client_memory = (long *) malloc(client_pages * PAGE_SIZE); - for (i = 0; i < client_pages; i++) + for (i = 0; i < client_pages; i++) { client_memory[i * PAGE_SIZE / sizeof(long)] = 0; + } } - + /* start message loop */ for (idx = 0; idx < num_msgs; idx++) { req = args.req_msg; reply = args.reply_msg; - req->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, - MACH_MSG_TYPE_MAKE_SEND); + req->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, + MACH_MSG_TYPE_MAKE_SEND); req->msgh_size = args.req_size; req->msgh_remote_port = servport; req->msgh_local_port = args.port; @@ -616,21 +649,22 @@ void *client(void *threadarg) (req)->msgh_bits |= MACH_MSGH_BITS_COMPLEX; ((ipc_complex_message *)req)->body.msgh_descriptor_count = 1; ((ipc_complex_message *)req)->descriptor.address = ints; - ((ipc_complex_message *)req)->descriptor.size = - num_ints * sizeof(u_int32_t); + ((ipc_complex_message *)req)->descriptor.size = + num_ints * sizeof(u_int32_t); ((ipc_complex_message *)req)->descriptor.deallocate = FALSE; ((ipc_complex_message *)req)->descriptor.copy = MACH_MSG_VIRTUAL_COPY; ((ipc_complex_message *)req)->descriptor.type = MACH_MSG_OOL_DESCRIPTOR; } - if (verbose) + if (verbose) { printf("client sending message %d\n", idx); - ret = mach_msg(req, - MACH_SEND_MSG, - args.req_size, - 0, - MACH_PORT_NULL, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + } + ret = mach_msg(req, + MACH_SEND_MSG, + args.req_size, + 0, + MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); if (MACH_MSG_SUCCESS != ret) { mach_error("mach_msg (send): ", ret); fprintf(stderr, "bailing after %u iterations\n", idx); @@ -638,26 +672,28 @@ void *client(void *threadarg) break; } if (!oneway) { - if (verbose) + if (verbose) { printf("client awaiting reply %d\n", idx); + } reply->msgh_bits = 0; reply->msgh_size = args.reply_size; reply->msgh_local_port = args.port; - ret = mach_msg(args.reply_msg, - MACH_RCV_MSG|MACH_RCV_INTERRUPT, - 0, - args.reply_size, - args.port, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + ret = mach_msg(args.reply_msg, + MACH_RCV_MSG | MACH_RCV_INTERRUPT, + 0, + args.reply_size, + args.port, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); if (MACH_MSG_SUCCESS != ret) { mach_error("mach_msg (receive): ", ret); fprintf(stderr, "bailing after %u iterations\n", - idx); + idx); exit(1); } - if (verbose) + if (verbose) { printf("client received reply %d\n", idx); + } } client_work(); @@ -668,44 +704,53 @@ void *client(void *threadarg) } static void -thread_spawn(thread_id_t *thread, void *(fn)(void *), void *arg) { +thread_spawn(thread_id_t *thread, void *(fn)(void *), void *arg) +{ if (threaded) { - kern_return_t ret; + kern_return_t ret; ret = pthread_create( - &thread->tid, - NULL, - fn, - arg); - if (ret != 0) + &thread->tid, + NULL, + fn, + arg); + if (ret != 0) { err(1, "pthread_create()"); - if (verbose) + } + if (verbose) { printf("created pthread %p\n", thread->tid); + } } else { thread->pid = fork(); if (thread->pid == 0) { - if (verbose) + if (verbose) { printf("calling %p(%p)\n", fn, arg); + } fn(arg); exit(0); } - if (verbose) + if (verbose) { printf("forked pid %d\n", thread->pid); + } } } static void -thread_join(thread_id_t *thread) { +thread_join(thread_id_t *thread) +{ if (threaded) { - kern_return_t ret; - if (verbose) + kern_return_t ret; + if (verbose) { printf("joining thread %p\n", thread->tid); + } ret = pthread_join(thread->tid, NULL); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { err(1, "pthread_join(%p)", thread->tid); + } } else { - int stat; - if (verbose) + int stat; + if (verbose) { printf("waiting for pid %d\n", thread->pid); + } waitpid(thread->pid, &stat, 0); } } @@ -713,10 +758,10 @@ thread_join(thread_id_t *thread) { static void wait_for_servers(void) { - int i; - int retry_count = 10; - mach_port_t bsport, servport; - kern_return_t ret; + int i; + int retry_count = 10; + mach_port_t bsport, servport; + kern_return_t ret; /* find server port */ ret = task_get_bootstrap_port(mach_task_self(), &bsport); @@ -728,27 +773,29 @@ wait_for_servers(void) while (retry_count-- > 0) { for (i = 0; i < num_servers; i++) { ret = bootstrap_look_up(bsport, - server_port_name[i], - &servport); + server_port_name[i], + &servport); if (ret != KERN_SUCCESS) { break; } } - if (ret == KERN_SUCCESS) + if (ret == KERN_SUCCESS) { return; - usleep(100 * 1000); /* 100ms */ + } + usleep(100 * 1000); /* 100ms */ } fprintf(stderr, "Server(s) failed to register\n"); exit(1); } -int main(int argc, char *argv[]) +int +main(int argc, char *argv[]) { - int i; - int j; - thread_id_t *client_id; - thread_id_t *server_id; + int i; + int j; + thread_id_t *client_id; + thread_id_t *server_id; signal(SIGINT, signal_handler); parse_args(argc, argv); @@ -759,13 +806,15 @@ int main(int argc, char *argv[]) * If we're using affinity create an empty namespace now * so this is shared by all our offspring. */ - if (affinity) + if (affinity) { thread_setup(0); + } server_id = (thread_id_t *) malloc(num_servers * sizeof(thread_id_t)); server_port_name = (char **) malloc(num_servers * sizeof(char *)); - if (verbose) + if (verbose) { printf("creating %d servers\n", num_servers); + } for (i = 0; i < num_servers; i++) { server_port_name[i] = (char *) malloc(sizeof("PORT.pppppp.xx")); /* PORT names include pid of main process for disambiguation */ @@ -782,12 +831,12 @@ int main(int argc, char *argv[]) * the clients and the clock. */ wait_for_servers(); - - printf("%d server%s, %d client%s per server (%d total) %u messages...", - num_servers, (num_servers > 1)? "s" : "", - num_clients, (num_clients > 1)? "s" : "", - totalclients, - totalmsg); + + printf("%d server%s, %d client%s per server (%d total) %u messages...", + num_servers, (num_servers > 1)? "s" : "", + num_clients, (num_clients > 1)? "s" : "", + totalclients, + totalmsg); fflush(stdout); /* Call gettimeofday() once and throw away result; some implementations @@ -797,12 +846,13 @@ int main(int argc, char *argv[]) gettimeofday(&starttv, NULL); client_id = (thread_id_t *) malloc(totalclients * sizeof(thread_id_t)); - if (verbose) + if (verbose) { printf("creating %d clients\n", totalclients); + } for (i = 0; i < num_servers; i++) { for (j = 0; j < num_clients; j++) { thread_spawn( - &client_id[(i*num_clients) + j], + &client_id[(i * num_clients) + j], client, (void *) (long) i); } @@ -827,25 +877,24 @@ int main(int argc, char *argv[]) deltatv.tv_usec += 1000000; } - double dsecs = (double) deltatv.tv_sec + - 1.0E-6 * (double) deltatv.tv_usec; + double dsecs = (double) deltatv.tv_sec + + 1.0E-6 * (double) deltatv.tv_usec; - double time_in_sec = (double)deltatv.tv_sec + (double)deltatv.tv_usec/1000.0; - double throughput_msg_p_sec = (double) totalmsg/dsecs; - double avg_msg_latency = dsecs*1.0E6 / (double)totalmsg; + double time_in_sec = (double)deltatv.tv_sec + (double)deltatv.tv_usec / 1000.0; + double throughput_msg_p_sec = (double) totalmsg / dsecs; + double avg_msg_latency = dsecs * 1.0E6 / (double)totalmsg; - printf(" in %ld.%03u seconds\n", - (long)deltatv.tv_sec, deltatv.tv_usec/1000); + printf(" in %ld.%03u seconds\n", + (long)deltatv.tv_sec, deltatv.tv_usec / 1000); printf(" throughput in messages/sec: %g\n", - (double)totalmsg / dsecs); - printf(" average message latency (usec): %2.3g\n", - dsecs * 1.0E6 / (double) totalmsg); + (double)totalmsg / dsecs); + printf(" average message latency (usec): %2.3g\n", + dsecs * 1.0E6 / (double) totalmsg); if (save_perfdata == TRUE) { char name[256]; snprintf(name, sizeof(name), "%s_avg_msg_latency", basename(argv[0])); record_perf_data(name, "usec", avg_msg_latency, "Message latency measured in microseconds. Lower is better", stderr); } - return (0); - + return 0; } diff --git a/tools/tests/MPMMTest/MPMMtest.c b/tools/tests/MPMMTest/MPMMtest.c index 7dc344fd4..09401d502 100644 --- a/tools/tests/MPMMTest/MPMMtest.c +++ b/tools/tests/MPMMTest/MPMMtest.c @@ -27,21 +27,21 @@ typedef struct { - mach_msg_header_t header; - mach_msg_trailer_t trailer; // subtract this when sending + mach_msg_header_t header; + mach_msg_trailer_t trailer; // subtract this when sending } ipc_trivial_message; typedef struct { - mach_msg_header_t header; - u_int32_t numbers[0]; - mach_msg_trailer_t trailer; // subtract this when sending + mach_msg_header_t header; + u_int32_t numbers[0]; + mach_msg_trailer_t trailer; // subtract this when sending } ipc_inline_message; typedef struct { - mach_msg_header_t header; - mach_msg_body_t body; - mach_msg_ool_descriptor_t descriptor; - mach_msg_trailer_t trailer; // subtract this when sending + mach_msg_header_t header; + mach_msg_body_t body; + mach_msg_ool_descriptor_t descriptor; + mach_msg_trailer_t trailer; // subtract this when sending } ipc_complex_message; enum { @@ -64,52 +64,58 @@ struct port_args { }; typedef union { - pid_t pid; - pthread_t tid; + pid_t pid; + pthread_t tid; } thread_id_t; /* Global options */ -static int verbose = 0; -static boolean_t affinity = FALSE; -static boolean_t timeshare = FALSE; -static boolean_t threaded = FALSE; -static boolean_t oneway = FALSE; -static boolean_t useset = FALSE; -static boolean_t save_perfdata = FALSE; -int msg_type; -int num_ints; -int num_msgs; -int num_clients; -int num_servers; -int client_delay; -int client_spin; -int client_pages; -int portcount = 1; -int setcount = 0; -boolean_t stress_prepost = FALSE; -char **server_port_name; - -struct port_args *server_port_args; +static int verbose = 0; +static boolean_t affinity = FALSE; +static boolean_t timeshare = FALSE; +static boolean_t threaded = FALSE; +static boolean_t oneway = FALSE; +static boolean_t useset = FALSE; +static boolean_t save_perfdata = FALSE; +int msg_type; +int num_ints; +int num_msgs; +int num_clients; +int num_servers; +int client_delay; +int client_spin; +int client_pages; +int portcount = 1; +int setcount = 0; +boolean_t stress_prepost = FALSE; +char **server_port_name; + +struct port_args *server_port_args; /* global data */ mach_timebase_info_data_t g_timebase; int64_t g_client_send_time = 0; -static inline uint64_t ns_to_abs(uint64_t ns) +static inline uint64_t +ns_to_abs(uint64_t ns) { return ns * g_timebase.denom / g_timebase.numer; } -static inline uint64_t abs_to_ns(uint64_t abs) +static inline uint64_t +abs_to_ns(uint64_t abs) { return abs * g_timebase.numer / g_timebase.denom; } -void signal_handler(int sig) { +void +signal_handler(int sig) +{ } -void usage(const char *progname) { +void +usage(const char *progname) +{ fprintf(stderr, "usage: %s [options]\n", progname); fprintf(stderr, "where options are:\n"); fprintf(stderr, " -affinity\t\tthreads use affinity\n"); @@ -146,10 +152,12 @@ void usage(const char *progname) { exit(1); } -void parse_args(int argc, char *argv[]) { - host_basic_info_data_t info; - mach_msg_type_number_t count; - kern_return_t result; +void +parse_args(int argc, char *argv[]) +{ + host_basic_info_data_t info; + mach_msg_type_number_t count; + kern_return_t result; /* Initialize defaults */ msg_type = msg_type_trivial; @@ -159,12 +167,13 @@ void parse_args(int argc, char *argv[]) { num_clients = 4; count = HOST_BASIC_INFO_COUNT; - result = host_info(mach_host_self(), HOST_BASIC_INFO, - (host_info_t)&info, &count); - if (result == KERN_SUCCESS && info.avail_cpus > 1) + result = host_info(mach_host_self(), HOST_BASIC_INFO, + (host_info_t)&info, &count); + if (result == KERN_SUCCESS && info.avail_cpus > 1) { num_servers = info.avail_cpus / 2; - else + } else { num_servers = 1; + } const char *progname = argv[0]; argc--; argv++; @@ -188,59 +197,70 @@ void parse_args(int argc, char *argv[]) { save_perfdata = TRUE; argc--; argv++; } else if (0 == strcmp("-type", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } if (0 == strcmp("trivial", argv[1])) { msg_type = msg_type_trivial; } else if (0 == strcmp("inline", argv[1])) { msg_type = msg_type_inline; } else if (0 == strcmp("complex", argv[1])) { msg_type = msg_type_complex; - } else + } else { usage(progname); + } argc -= 2; argv += 2; } else if (0 == strcmp("-numints", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } num_ints = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-count", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } num_msgs = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; - } else if (0 == strcmp("-clients", argv[0])) { - if (argc < 2) + } else if (0 == strcmp("-clients", argv[0])) { + if (argc < 2) { usage(progname); + } num_clients = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; - } else if (0 == strcmp("-servers", argv[0])) { - if (argc < 2) + } else if (0 == strcmp("-servers", argv[0])) { + if (argc < 2) { usage(progname); + } num_servers = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-delay", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } client_delay = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-spin", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } client_spin = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-pages", argv[0])) { - if (argc < 2) + if (argc < 2) { usage(progname); + } client_pages = strtoul(argv[1], NULL, 0); argc -= 2; argv += 2; } else if (0 == strcmp("-set", argv[0])) { - if (argc < 3) + if (argc < 3) { usage(progname); + } setcount = strtoul(argv[1], NULL, 0); portcount = strtoul(argv[2], NULL, 0); - if (setcount <= 0 || portcount <= 0) + if (setcount <= 0 || portcount <= 0) { usage(progname); + } useset = TRUE; argc -= 3; argv += 3; } else if (0 == strcmp("-prepost", argv[0])) { @@ -265,17 +285,18 @@ void parse_args(int argc, char *argv[]) { } } -void setup_server_ports(struct port_args *ports) +void +setup_server_ports(struct port_args *ports) { kern_return_t ret = 0; mach_port_t bsport; mach_port_t port; - ports->req_size = MAX(sizeof(ipc_inline_message) + - sizeof(u_int32_t) * num_ints, - sizeof(ipc_complex_message)); - ports->reply_size = sizeof(ipc_trivial_message) - - sizeof(mach_msg_trailer_t); + ports->req_size = MAX(sizeof(ipc_inline_message) + + sizeof(u_int32_t) * num_ints, + sizeof(ipc_complex_message)); + ports->reply_size = sizeof(ipc_trivial_message) - + sizeof(mach_msg_trailer_t); ports->req_msg = malloc(ports->req_size); ports->reply_msg = malloc(ports->reply_size); if (setcount > 0) { @@ -302,15 +323,16 @@ void setup_server_ports(struct port_args *ports) for (int ns = 0; ns < setcount; ns++) { ret = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_PORT_SET, - &ports->set[ns]); + MACH_PORT_RIGHT_PORT_SET, + &ports->set[ns]); if (KERN_SUCCESS != ret) { mach_error("mach_port_allocate(SET): ", ret); exit(1); } - if (verbose > 1) + if (verbose > 1) { printf("SVR[%d] allocated set[%d] %#x\n", - ports->server_num, ns, ports->set[ns]); + ports->server_num, ns, ports->set[ns]); + } set = ports->set[ns]; } @@ -321,25 +343,27 @@ void setup_server_ports(struct port_args *ports) /* stuff the portset(s) with ports */ for (int i = 0; i < portcount; i++) { - ret = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_RECEIVE, - &port); + ret = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_RECEIVE, + &port); if (KERN_SUCCESS != ret) { mach_error("mach_port_allocate(PORT): ", ret); exit(1); } - if (stress_prepost) + if (stress_prepost) { ports->port_list[i] = port; + } if (useset) { /* insert the port into _all_ allocated lowest-level sets */ for (int ns = 0; ns < setcount; ns++) { - if (verbose > 1) + if (verbose > 1) { printf("SVR[%d] moving port %#x into set %#x...\n", - ports->server_num, port, ports->set[ns]); + ports->server_num, port, ports->set[ns]); + } ret = mach_port_insert_member(mach_task_self(), - port, ports->set[ns]); + port, ports->set[ns]); if (KERN_SUCCESS != ret) { mach_error("mach_port_insert_member(): ", ret); exit(1); @@ -355,9 +379,9 @@ void setup_server_ports(struct port_args *ports) /* insert a send right for _each_ port */ for (int i = 0; i < portcount; i++) { ret = mach_port_insert_right(mach_task_self(), - ports->port_list[i], - ports->port_list[i], - MACH_MSG_TYPE_MAKE_SEND); + ports->port_list[i], + ports->port_list[i], + MACH_MSG_TYPE_MAKE_SEND); if (KERN_SUCCESS != ret) { mach_error("mach_port_insert_right(): ", ret); exit(1); @@ -365,9 +389,9 @@ void setup_server_ports(struct port_args *ports) } } else { ret = mach_port_insert_right(mach_task_self(), - ports->port, - ports->port, - MACH_MSG_TYPE_MAKE_SEND); + ports->port, + ports->port, + MACH_MSG_TYPE_MAKE_SEND); if (KERN_SUCCESS != ret) { mach_error("mach_port_insert_right(): ", ret); exit(1); @@ -382,80 +406,84 @@ void setup_server_ports(struct port_args *ports) if (verbose) { printf("server waiting for IPC messages from client on port '%s' (%#x).\n", - server_port_name[ports->server_num], ports->port); + server_port_name[ports->server_num], ports->port); } ret = bootstrap_register(bsport, - server_port_name[ports->server_num], - ports->port); + server_port_name[ports->server_num], + ports->port); if (KERN_SUCCESS != ret) { mach_error("bootstrap_register(): ", ret); exit(1); } } -void setup_client_ports(struct port_args *ports) +void +setup_client_ports(struct port_args *ports) { kern_return_t ret = 0; - switch(msg_type) { - case msg_type_trivial: - ports->req_size = sizeof(ipc_trivial_message); - break; - case msg_type_inline: - ports->req_size = sizeof(ipc_inline_message) + - sizeof(u_int32_t) * num_ints; - break; - case msg_type_complex: - ports->req_size = sizeof(ipc_complex_message); - break; + switch (msg_type) { + case msg_type_trivial: + ports->req_size = sizeof(ipc_trivial_message); + break; + case msg_type_inline: + ports->req_size = sizeof(ipc_inline_message) + + sizeof(u_int32_t) * num_ints; + break; + case msg_type_complex: + ports->req_size = sizeof(ipc_complex_message); + break; } ports->req_size -= sizeof(mach_msg_trailer_t); ports->reply_size = sizeof(ipc_trivial_message); ports->req_msg = malloc(ports->req_size); ports->reply_msg = malloc(ports->reply_size); - ret = mach_port_allocate(mach_task_self(), - MACH_PORT_RIGHT_RECEIVE, - &(ports->port)); + ret = mach_port_allocate(mach_task_self(), + MACH_PORT_RIGHT_RECEIVE, + &(ports->port)); if (KERN_SUCCESS != ret) { mach_error("mach_port_allocate(): ", ret); exit(1); } if (verbose) { printf("Client sending %d %s IPC messages to port '%s' in %s mode\n", - num_msgs, (msg_type == msg_type_inline) ? - "inline" : ((msg_type == msg_type_complex) ? - "complex" : "trivial"), - server_port_name[ports->server_num], - (oneway ? "oneway" : "rpc")); + num_msgs, (msg_type == msg_type_inline) ? + "inline" : ((msg_type == msg_type_complex) ? + "complex" : "trivial"), + server_port_name[ports->server_num], + (oneway ? "oneway" : "rpc")); } } static void -thread_setup(int tag) { - kern_return_t ret; - thread_extended_policy_data_t epolicy; - thread_affinity_policy_data_t policy; +thread_setup(int tag) +{ + kern_return_t ret; + thread_extended_policy_data_t epolicy; + thread_affinity_policy_data_t policy; if (!timeshare) { epolicy.timeshare = FALSE; ret = thread_policy_set( - mach_thread_self(), THREAD_EXTENDED_POLICY, - (thread_policy_t) &epolicy, - THREAD_EXTENDED_POLICY_COUNT); - if (ret != KERN_SUCCESS) + mach_thread_self(), THREAD_EXTENDED_POLICY, + (thread_policy_t) &epolicy, + THREAD_EXTENDED_POLICY_COUNT); + if (ret != KERN_SUCCESS) { printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret); + } } - if (affinity) { - policy.affinity_tag = tag; - ret = thread_policy_set( - mach_thread_self(), THREAD_AFFINITY_POLICY, - (thread_policy_t) &policy, - THREAD_AFFINITY_POLICY_COUNT); - if (ret != KERN_SUCCESS) - printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret); - } + if (affinity) { + policy.affinity_tag = tag; + ret = thread_policy_set( + mach_thread_self(), THREAD_AFFINITY_POLICY, + (thread_policy_t) &policy, + THREAD_AFFINITY_POLICY_COUNT); + if (ret != KERN_SUCCESS) { + printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret); + } + } } void * @@ -478,46 +506,51 @@ server(void *serverarg) recv_port = (useset) ? args->rcv_set : args->port; for (idx = 0; idx < totalmsg; idx++) { - if (verbose > 2) + if (verbose > 2) { printf("server awaiting message %d\n", idx); + } ret = mach_msg(args->req_msg, - MACH_RCV_MSG|MACH_RCV_INTERRUPT|MACH_RCV_LARGE, - 0, - args->req_size, - recv_port, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); - if (MACH_RCV_INTERRUPTED == ret) + MACH_RCV_MSG | MACH_RCV_INTERRUPT | MACH_RCV_LARGE, + 0, + args->req_size, + recv_port, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); + if (MACH_RCV_INTERRUPTED == ret) { break; + } if (MACH_MSG_SUCCESS != ret) { - if (verbose) + if (verbose) { printf("mach_msg() ret=%d", ret); + } mach_error("mach_msg (receive): ", ret); exit(1); } - if (verbose > 2) + if (verbose > 2) { printf("server received message %d\n", idx); + } if (args->req_msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) { - ret = vm_deallocate(mach_task_self(), - (vm_address_t)((ipc_complex_message *)args->req_msg)->descriptor.address, - ((ipc_complex_message *)args->req_msg)->descriptor.size); + ret = vm_deallocate(mach_task_self(), + (vm_address_t)((ipc_complex_message *)args->req_msg)->descriptor.address, + ((ipc_complex_message *)args->req_msg)->descriptor.size); } if (1 == args->req_msg->msgh_id) { - if (verbose > 2) + if (verbose > 2) { printf("server sending reply %d\n", idx); + } args->reply_msg->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0); args->reply_msg->msgh_size = args->reply_size; args->reply_msg->msgh_remote_port = args->req_msg->msgh_remote_port; args->reply_msg->msgh_local_port = MACH_PORT_NULL; args->reply_msg->msgh_id = 2; ret = mach_msg(args->reply_msg, - MACH_SEND_MSG, - args->reply_size, - 0, - MACH_PORT_NULL, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + MACH_SEND_MSG, + args->reply_size, + 0, + MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); if (MACH_MSG_SUCCESS != ret) { mach_error("mach_msg (send): ", ret); exit(1); @@ -525,11 +558,13 @@ server(void *serverarg) } } - if (!useset) + if (!useset) { return NULL; + } - if (verbose < 1) + if (verbose < 1) { return NULL; + } uint64_t deltans = 0; /* @@ -537,8 +572,9 @@ server(void *serverarg) * and measure the time. */ for (int ns = 0; ns < setcount; ns++) { - if (verbose > 1) + if (verbose > 1) { printf("\tTearing down set[%d] %#x...\n", ns, args->set[ns]); + } starttm = mach_absolute_time(); ret = mach_port_mod_refs(mach_task_self(), args->set[ns], MACH_PORT_RIGHT_PORT_SET, -1); endtm = mach_absolute_time(); @@ -558,34 +594,36 @@ server(void *serverarg) } static inline void -client_spin_loop(unsigned count, void (fn)(void)) +client_spin_loop(unsigned count, void(fn)(void)) { - while (count--) + while (count--) { fn(); + } } -static long dummy_memory; -static long *client_memory = &dummy_memory; +static long dummy_memory; +static long *client_memory = &dummy_memory; static void client_work_atom(void) { - static int i; + static int i; - if (++i > client_pages * PAGE_SIZE / sizeof(long)) + if (++i > client_pages * PAGE_SIZE / sizeof(long)) { i = 0; + } client_memory[i] = 0; } -static int calibration_count = 10000; -static int calibration_usec; +static int calibration_count = 10000; +static int calibration_usec; static void * calibrate_client_work(void) { - long dummy; - struct timeval nowtv; - struct timeval warmuptv = { 0, 100 * 1000 }; /* 100ms */ - struct timeval starttv; - struct timeval endtv; + long dummy; + struct timeval nowtv; + struct timeval warmuptv = { 0, 100 * 1000 }; /* 100ms */ + struct timeval starttv; + struct timeval endtv; if (client_spin) { /* Warm-up the stepper first... */ @@ -594,8 +632,8 @@ calibrate_client_work(void) do { client_spin_loop(calibration_count, client_work_atom); gettimeofday(&nowtv, NULL); - } while (timercmp(&nowtv, &endtv, < )); - + } while (timercmp(&nowtv, &endtv, < )); + /* Now do the calibration */ while (TRUE) { gettimeofday(&starttv, NULL); @@ -616,9 +654,10 @@ calibrate_client_work(void) calibration_count /= calibration_usec; break; } - if (verbose > 1) + if (verbose > 1) { printf("calibration_count=%d calibration_usec=%d\n", - calibration_count, calibration_usec); + calibration_count, calibration_usec); + } } return NULL; } @@ -626,38 +665,40 @@ calibrate_client_work(void) static void * client_work(void) { - if (client_spin) { - client_spin_loop(calibration_count*client_spin, - client_work_atom); + client_spin_loop(calibration_count * client_spin, + client_work_atom); } - + if (client_delay) { usleep(client_delay); } return NULL; } -void *client(void *threadarg) +void * +client(void *threadarg) { struct port_args args; struct port_args *svr_args = NULL; int idx; - mach_msg_header_t *req, *reply; + mach_msg_header_t *req, *reply; mach_port_t bsport, servport; kern_return_t ret; int server_num = (int)(uintptr_t)threadarg; void *ints = malloc(sizeof(u_int32_t) * num_ints); - if (verbose) + if (verbose) { printf("client(%d) started, server port name %s\n", - server_num, server_port_name[server_num]); + server_num, server_port_name[server_num]); + } args.server_num = server_num; thread_setup(server_num + 1); - if (stress_prepost) + if (stress_prepost) { svr_args = &server_port_args[server_num]; + } /* find server port */ ret = task_get_bootstrap_port(mach_task_self(), &bsport); @@ -666,8 +707,8 @@ void *client(void *threadarg) exit(1); } ret = bootstrap_look_up(bsport, - server_port_name[server_num], - &servport); + server_port_name[server_num], + &servport); if (KERN_SUCCESS != ret) { mach_error("bootstrap_look_up(): ", ret); exit(1); @@ -677,14 +718,15 @@ void *client(void *threadarg) /* Allocate and touch memory */ if (client_pages) { - unsigned i; + unsigned i; client_memory = (long *) malloc(client_pages * PAGE_SIZE); - for (i = 0; i < client_pages; i++) + for (i = 0; i < client_pages; i++) { client_memory[i * PAGE_SIZE / sizeof(long)] = 0; + } } uint64_t starttm, endtm; - + /* start message loop */ for (idx = 0; idx < num_msgs; idx++) { req = args.req_msg; @@ -701,7 +743,7 @@ void *client(void *threadarg) req->msgh_local_port = MACH_PORT_NULL; } else { req->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, - MACH_MSG_TYPE_MAKE_SEND_ONCE); + MACH_MSG_TYPE_MAKE_SEND_ONCE); req->msgh_local_port = args.port; } req->msgh_id = oneway ? 0 : 1; @@ -709,23 +751,24 @@ void *client(void *threadarg) (req)->msgh_bits |= MACH_MSGH_BITS_COMPLEX; ((ipc_complex_message *)req)->body.msgh_descriptor_count = 1; ((ipc_complex_message *)req)->descriptor.address = ints; - ((ipc_complex_message *)req)->descriptor.size = - num_ints * sizeof(u_int32_t); + ((ipc_complex_message *)req)->descriptor.size = + num_ints * sizeof(u_int32_t); ((ipc_complex_message *)req)->descriptor.deallocate = FALSE; ((ipc_complex_message *)req)->descriptor.copy = MACH_MSG_VIRTUAL_COPY; ((ipc_complex_message *)req)->descriptor.type = MACH_MSG_OOL_DESCRIPTOR; } - if (verbose > 2) + if (verbose > 2) { printf("client sending message %d to port %#x\n", - idx, req->msgh_remote_port); + idx, req->msgh_remote_port); + } starttm = mach_absolute_time(); - ret = mach_msg(req, - MACH_SEND_MSG, - args.req_size, - 0, - MACH_PORT_NULL, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + ret = mach_msg(req, + MACH_SEND_MSG, + args.req_size, + 0, + MACH_PORT_NULL, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); endtm = mach_absolute_time(); if (MACH_MSG_SUCCESS != ret) { mach_error("mach_msg (send): ", ret); @@ -733,30 +776,33 @@ void *client(void *threadarg) exit(1); break; } - if (stress_prepost) + if (stress_prepost) { OSAtomicAdd64(endtm - starttm, &g_client_send_time); + } if (!oneway) { - if (verbose > 2) + if (verbose > 2) { printf("client awaiting reply %d\n", idx); + } reply->msgh_bits = 0; reply->msgh_size = args.reply_size; reply->msgh_local_port = args.port; - ret = mach_msg(args.reply_msg, - MACH_RCV_MSG|MACH_RCV_INTERRUPT, - 0, - args.reply_size, - args.port, - MACH_MSG_TIMEOUT_NONE, - MACH_PORT_NULL); + ret = mach_msg(args.reply_msg, + MACH_RCV_MSG | MACH_RCV_INTERRUPT, + 0, + args.reply_size, + args.port, + MACH_MSG_TIMEOUT_NONE, + MACH_PORT_NULL); if (MACH_MSG_SUCCESS != ret) { mach_error("mach_msg (receive): ", ret); fprintf(stderr, "bailing after %u iterations\n", - idx); + idx); exit(1); } - if (verbose > 2) + if (verbose > 2) { printf("client received reply %d\n", idx); + } } client_work(); @@ -767,44 +813,53 @@ void *client(void *threadarg) } static void -thread_spawn(thread_id_t *thread, void *(fn)(void *), void *arg) { +thread_spawn(thread_id_t *thread, void *(fn)(void *), void *arg) +{ if (threaded) { - kern_return_t ret; + kern_return_t ret; ret = pthread_create( - &thread->tid, - NULL, - fn, - arg); - if (ret != 0) + &thread->tid, + NULL, + fn, + arg); + if (ret != 0) { err(1, "pthread_create()"); - if (verbose > 1) + } + if (verbose > 1) { printf("created pthread %p\n", thread->tid); + } } else { thread->pid = fork(); if (thread->pid == 0) { - if (verbose > 1) + if (verbose > 1) { printf("calling %p(%p)\n", fn, arg); + } fn(arg); exit(0); } - if (verbose > 1) + if (verbose > 1) { printf("forked pid %d\n", thread->pid); + } } } static void -thread_join(thread_id_t *thread) { +thread_join(thread_id_t *thread) +{ if (threaded) { - kern_return_t ret; - if (verbose > 1) + kern_return_t ret; + if (verbose > 1) { printf("joining thread %p\n", thread->tid); + } ret = pthread_join(thread->tid, NULL); - if (ret != KERN_SUCCESS) + if (ret != KERN_SUCCESS) { err(1, "pthread_join(%p)", thread->tid); + } } else { - int stat; - if (verbose > 1) + int stat; + if (verbose > 1) { printf("waiting for pid %d\n", thread->pid); + } waitpid(thread->pid, &stat, 0); } } @@ -812,10 +867,10 @@ thread_join(thread_id_t *thread) { static void wait_for_servers(void) { - int i; - int retry_count = 10; - mach_port_t bsport, servport; - kern_return_t ret; + int i; + int retry_count = 10; + mach_port_t bsport, servport; + kern_return_t ret; /* find server port */ ret = task_get_bootstrap_port(mach_task_self(), &bsport); @@ -827,26 +882,28 @@ wait_for_servers(void) while (retry_count-- > 0) { for (i = 0; i < num_servers; i++) { ret = bootstrap_look_up(bsport, - server_port_name[i], - &servport); + server_port_name[i], + &servport); if (ret != KERN_SUCCESS) { break; } } - if (ret == KERN_SUCCESS) + if (ret == KERN_SUCCESS) { return; - usleep(100 * 1000); /* 100ms */ + } + usleep(100 * 1000); /* 100ms */ } fprintf(stderr, "Server(s) failed to register\n"); exit(1); } -int main(int argc, char *argv[]) +int +main(int argc, char *argv[]) { - int i; - int j; - thread_id_t *client_id; - thread_id_t *server_id; + int i; + int j; + thread_id_t *client_id; + thread_id_t *server_id; signal(SIGINT, signal_handler); parse_args(argc, argv); @@ -862,8 +919,9 @@ int main(int argc, char *argv[]) * If we're using affinity create an empty namespace now * so this is shared by all our offspring. */ - if (affinity) + if (affinity) { thread_setup(0); + } server_id = (thread_id_t *) malloc(num_servers * sizeof(thread_id_t)); server_port_name = (char **) malloc(num_servers * sizeof(char *)); @@ -873,8 +931,9 @@ int main(int argc, char *argv[]) exit(1); } - if (verbose) + if (verbose) { printf("creating %d servers\n", num_servers); + } for (i = 0; i < num_servers; i++) { server_port_name[i] = (char *) malloc(sizeof("PORT.pppppp.xx")); /* PORT names include pid of main process for disambiguation */ @@ -891,12 +950,12 @@ int main(int argc, char *argv[]) * the clients and the clock. */ wait_for_servers(); - - printf("%d server%s, %d client%s per server (%d total) %u messages...", - num_servers, (num_servers > 1)? "s" : "", - num_clients, (num_clients > 1)? "s" : "", - totalclients, - totalmsg); + + printf("%d server%s, %d client%s per server (%d total) %u messages...", + num_servers, (num_servers > 1)? "s" : "", + num_clients, (num_clients > 1)? "s" : "", + totalclients, + totalmsg); fflush(stdout); /* Call gettimeofday() once and throw away result; some implementations @@ -906,12 +965,13 @@ int main(int argc, char *argv[]) gettimeofday(&starttv, NULL); client_id = (thread_id_t *) malloc(totalclients * sizeof(thread_id_t)); - if (verbose) + if (verbose) { printf("creating %d clients\n", totalclients); + } for (i = 0; i < num_servers; i++) { for (j = 0; j < num_clients; j++) { thread_spawn( - &client_id[(i*num_clients) + j], + &client_id[(i * num_clients) + j], client, (void *) (long) i); } @@ -923,8 +983,9 @@ int main(int argc, char *argv[]) } gettimeofday(&endtv, NULL); - if (verbose) + if (verbose) { printf("all servers complete: waiting for clients...\n"); + } for (i = 0; i < totalclients; i++) { thread_join(&client_id[i]); @@ -938,19 +999,19 @@ int main(int argc, char *argv[]) deltatv.tv_usec += 1000000; } - double dsecs = (double) deltatv.tv_sec + - 1.0E-6 * (double) deltatv.tv_usec; + double dsecs = (double) deltatv.tv_sec + + 1.0E-6 * (double) deltatv.tv_usec; printf(" in %lu.%03u seconds\n", - deltatv.tv_sec, deltatv.tv_usec/1000); + deltatv.tv_sec, deltatv.tv_usec / 1000); printf(" throughput in messages/sec: %g\n", - (double)totalmsg / dsecs); - printf(" average message latency (usec): %2.3g\n", - dsecs * 1.0E6 / (double) totalmsg); + (double)totalmsg / dsecs); + printf(" average message latency (usec): %2.3g\n", + dsecs * 1.0E6 / (double) totalmsg); - double time_in_sec = (double)deltatv.tv_sec + (double)deltatv.tv_usec/1000.0; - double throughput_msg_p_sec = (double) totalmsg/dsecs; - double avg_msg_latency = dsecs*1.0E6 / (double)totalmsg; + double time_in_sec = (double)deltatv.tv_sec + (double)deltatv.tv_usec / 1000.0; + double throughput_msg_p_sec = (double) totalmsg / dsecs; + double avg_msg_latency = dsecs * 1.0E6 / (double)totalmsg; if (save_perfdata == TRUE) { char name[256]; @@ -963,9 +1024,8 @@ int main(int argc, char *argv[]) dsecs = (double)sendns / (double)NSEC_PER_SEC; printf(" total send time: %2.3gs\n", dsecs); printf(" average send time (usec): %2.3g\n", - dsecs * 1.0E6 / (double)totalmsg); + dsecs * 1.0E6 / (double)totalmsg); } - return (0); - + return 0; } diff --git a/tools/tests/TLBcoherency/TLBcoherency.c b/tools/tests/TLBcoherency/TLBcoherency.c index a4165baac..f185f779c 100644 --- a/tools/tests/TLBcoherency/TLBcoherency.c +++ b/tools/tests/TLBcoherency/TLBcoherency.c @@ -2,7 +2,7 @@ * Copyright (c) 2011 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ @@ -97,28 +97,33 @@ int sleepus; pthread_t threads[MAX_THREADS]; uint32_t roles[MAX_THREADS]; -void usage(char **a) { +void +usage(char **a) +{ exit(1); } -void set_enable(int val) +void +set_enable(int val) { int mib[6]; size_t needed; - mib[0] = CTL_KERN; - mib[1] = KERN_KDEBUG; - mib[2] = KERN_KDENABLE; - mib[3] = val; - mib[4] = 0; - mib[5] = 0; + mib[0] = CTL_KERN; + mib[1] = KERN_KDEBUG; + mib[2] = KERN_KDENABLE; + mib[3] = val; + mib[4] = 0; + mib[5] = 0; - if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0) { - printf("trace facility failure, KERN_KDENABLE\n"); + if (sysctl(mib, 4, NULL, &needed, NULL, 0) < 0) { + printf("trace facility failure, KERN_KDENABLE\n"); } } -void initialize_arena_element(int i) { +void +initialize_arena_element(int i) +{ __unused int sysret; void *hint = reuse_addrs ? (void *)0x1000 : NULL; parray[i].taddr = (uintptr_t)mmap(hint, mapping_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED, -1, 0); @@ -128,7 +133,7 @@ void initialize_arena_element(int i) { exit(2); } -#if !defined(__LP64__) +#if !defined(__LP64__) uint32_t pattern = parray[i].taddr; pattern |= cpid & 0xFFF; // memset_pattern4((void *)parray[i].taddr, &pattern, PAGE_SIZE); // @@ -141,7 +146,7 @@ void initialize_arena_element(int i) { // memset_pattern8(parray[i].taddr, &pattern, PAGE_SIZE); #endif - uint64_t val = (*(uintptr_t *)parray[i].taddr); + uint64_t val = (*(uintptr_t *)parray[i].taddr); if (val != 0) { CONSISTENCY("Mismatch, actual: 0x%llx, expected: 0x%llx\n", (unsigned long long)val, 0ULL); @@ -162,25 +167,31 @@ void initialize_arena_element(int i) { } } -void initialize_arena(void) { +void +initialize_arena(void) +{ for (int i = 0; i < arenasize; i++) { initialize_arena_element(i); } } -void *tlbexerciser(void *targs) { +void * +tlbexerciser(void *targs) +{ uint32_t role = *(uint32_t *)targs; __unused int sysret; printf("Starting thread %p, role: %u\n", pthread_self(), role); - for(;;) { + for (;;) { for (int i = 0; i < arenasize; i++) { - if (all_stop) + if (all_stop) { return NULL; + } if (trymode) { - if (OSSpinLockTry(&parray[i].tlock) == false) + if (OSSpinLockTry(&parray[i].tlock) == false) { continue; + } } else { OSSpinLockLock(&parray[i].tlock); } @@ -191,8 +202,9 @@ void *tlbexerciser(void *targs) { uintptr_t val = *(uintptr_t *)parray[i].taddr; if (val != ad) { - if (stop_on_failure) + if (stop_on_failure) { all_stop = true; + } syscall(180, 0x71BC0000, (ad >> 32), (ad & ~0), 0, 0, 0); CONSISTENCY("Mismatch, actual: 0x%llx, expected: 0x%llx\n", (unsigned long long)val, (unsigned long long)ad); if (stop_on_failure) { @@ -223,15 +235,18 @@ void *tlbexerciser(void *targs) { parray[i].tlock = 0; //unlock - if (sleepus) + if (sleepus) { usleep(sleepus); + } } } return NULL; } -int main(int argc, char **argv) { +int +main(int argc, char **argv) +{ extern char *optarg; int arg; unsigned nthreads = NTHREADS; @@ -248,7 +263,7 @@ int main(int argc, char **argv) { break; case 's': arenasize = atoi(optarg); // we typically want this to - // be sized < 2nd level TLB + // be sized < 2nd level TLB break; case 'f': stop_on_failure = true; @@ -270,7 +285,7 @@ int main(int argc, char **argv) { } } - if(optind != argc) { + if (optind != argc) { usage(argv); } @@ -283,25 +298,26 @@ int main(int argc, char **argv) { for (int dex = 0; dex < nthreads; dex++) { roles[dex] = LOOPER; - if (dex == 0) + if (dex == 0) { roles[dex] = OBSERVER; + } int result = pthread_create(&threads[dex], NULL, tlbexerciser, &roles[dex]); - if(result) { + if (result) { printf("pthread_create: %d starting worker thread; aborting.\n", result); return result; } } - for(int dex = 0; dex < nthreads; dex++) { + for (int dex = 0; dex < nthreads; dex++) { void *rtn; int result = pthread_join(threads[dex], &rtn); - if(result) { + if (result) { printf("pthread_join(): %d, aborting\n", result); return result; } - if(rtn) { + if (rtn) { printf("***Aborting on worker error\n"); exit(1); } diff --git a/tools/tests/affinity/pool.c b/tools/tests/affinity/pool.c index 449ff23af..8637c22f6 100644 --- a/tools/tests/affinity/pool.c +++ b/tools/tests/affinity/pool.c @@ -45,88 +45,88 @@ * use as input and output and what function to call for processing is * data-driven. */ - + pthread_mutex_t funnel; -pthread_cond_t barrier; +pthread_cond_t barrier; -uint64_t timer; -int threads; -int threads_ready = 0; +uint64_t timer; +int threads; +int threads_ready = 0; -int iterations = 10000; -boolean_t affinity = FALSE; -boolean_t halting = FALSE; -int verbosity = 1; +int iterations = 10000; +boolean_t affinity = FALSE; +boolean_t halting = FALSE; +int verbosity = 1; typedef struct work { - TAILQ_ENTRY(work) link; - int *data; - int isize; - int tag; - int number; + TAILQ_ENTRY(work) link; + int *data; + int isize; + int tag; + int number; } work_t; /* * A work queue, complete with pthread objects for its management */ typedef struct work_queue { - pthread_mutex_t mtx; - pthread_cond_t cnd; - TAILQ_HEAD(, work) queue; - unsigned int waiters; + pthread_mutex_t mtx; + pthread_cond_t cnd; + TAILQ_HEAD(, work) queue; + unsigned int waiters; } work_queue_t; /* Worker functions take a integer array and size */ -typedef void (worker_fn_t)(int *, int); +typedef void (worker_fn_t)(int *, int); /* This struct controls the function of a stage */ #define WORKERS_MAX 10 typedef struct { - int stagenum; - char *name; - worker_fn_t *fn; - work_queue_t *input; - work_queue_t *output; - work_queue_t bufq; - int work_todo; + int stagenum; + char *name; + worker_fn_t *fn; + work_queue_t *input; + work_queue_t *output; + work_queue_t bufq; + int work_todo; } stage_info_t; /* This defines a worker thread */ typedef struct worker_info { - int setnum; - stage_info_t *stage; - pthread_t thread; + int setnum; + stage_info_t *stage; + pthread_t thread; } worker_info_t; -#define DBG(x...) do { \ - if (verbosity > 1) { \ - pthread_mutex_lock(&funnel); \ - printf(x); \ - pthread_mutex_unlock(&funnel); \ - } \ +#define DBG(x...) do { \ + if (verbosity > 1) { \ + pthread_mutex_lock(&funnel); \ + printf(x); \ + pthread_mutex_unlock(&funnel); \ + } \ } while (0) -#define mutter(x...) do { \ - if (verbosity > 0) { \ - printf(x); \ - } \ +#define mutter(x...) do { \ + if (verbosity > 0) { \ + printf(x); \ + } \ } while (0) -#define s_if_plural(x) (((x) > 1) ? "s" : "") +#define s_if_plural(x) (((x) > 1) ? "s" : "") static void usage() { fprintf(stderr, - "usage: pool [-a] Turn affinity on (off)\n" - " [-b B] Number of buffers per producer (2)\n" - " [-i I] Number of buffers to produce (10000)\n" - " [-s S] Number of stages (2)\n" - " [-p P] Number of pages per buffer (256=1MB)]\n" - " [-w] Consumer writes data\n" - " [-v V] Verbosity level 0..2 (1)\n" - " [N [M]] Number of producer and consumers (2)\n" - ); + "usage: pool [-a] Turn affinity on (off)\n" + " [-b B] Number of buffers per producer (2)\n" + " [-i I] Number of buffers to produce (10000)\n" + " [-s S] Number of stages (2)\n" + " [-p P] Number of pages per buffer (256=1MB)]\n" + " [-w] Consumer writes data\n" + " [-v V] Verbosity level 0..2 (1)\n" + " [N [M]] Number of producer and consumers (2)\n" + ); exit(1); } @@ -134,7 +134,7 @@ usage() void writer_fn(int *data, int isize) { - int i; + int i; for (i = 0; i < isize; i++) { data[i] = i; @@ -145,8 +145,8 @@ writer_fn(int *data, int isize) void reader_fn(int *data, int isize) { - int i; - int datum; + int i; + int datum; for (i = 0; i < isize; i++) { datum = data[i]; @@ -157,7 +157,7 @@ reader_fn(int *data, int isize) void reader_writer_fn(int *data, int isize) { - int i; + int i; for (i = 0; i < isize; i++) { data[i] += 1; @@ -167,16 +167,17 @@ reader_writer_fn(int *data, int isize) void affinity_set(int tag) { - kern_return_t ret; - thread_affinity_policy_data_t policy; + kern_return_t ret; + thread_affinity_policy_data_t policy; if (affinity) { policy.affinity_tag = tag; ret = thread_policy_set( - mach_thread_self(), THREAD_AFFINITY_POLICY, - (thread_policy_t) &policy, - THREAD_AFFINITY_POLICY_COUNT); - if (ret != KERN_SUCCESS) + mach_thread_self(), THREAD_AFFINITY_POLICY, + (thread_policy_t) &policy, + THREAD_AFFINITY_POLICY_COUNT); + if (ret != KERN_SUCCESS) { printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret); + } } } @@ -187,22 +188,23 @@ affinity_set(int tag) void * manager_fn(void *arg) { - worker_info_t *wp = (worker_info_t *) arg; - stage_info_t *sp = wp->stage; - boolean_t is_producer = (sp->stagenum == 0); - long iteration = 0; - int current_tag = 0; - - kern_return_t ret; - thread_extended_policy_data_t epolicy; + worker_info_t *wp = (worker_info_t *) arg; + stage_info_t *sp = wp->stage; + boolean_t is_producer = (sp->stagenum == 0); + long iteration = 0; + int current_tag = 0; + + kern_return_t ret; + thread_extended_policy_data_t epolicy; epolicy.timeshare = FALSE; ret = thread_policy_set( - mach_thread_self(), THREAD_EXTENDED_POLICY, - (thread_policy_t) &epolicy, - THREAD_EXTENDED_POLICY_COUNT); - if (ret != KERN_SUCCESS) + mach_thread_self(), THREAD_EXTENDED_POLICY, + (thread_policy_t) &epolicy, + THREAD_EXTENDED_POLICY_COUNT); + if (ret != KERN_SUCCESS) { printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret); - + } + /* * If we're using affinity sets and we're a producer * set our tag to by our thread set number. @@ -224,7 +226,7 @@ manager_fn(void *arg) pthread_mutex_unlock(&funnel); if (halting) { printf(" all threads ready for process %d, " - "hit any key to start", getpid()); + "hit any key to start", getpid()); fflush(stdout); (void) getchar(); } @@ -236,7 +238,7 @@ manager_fn(void *arg) } do { - work_t *workp; + work_t *workp; /* * Get a buffer from the input queue. @@ -250,10 +252,11 @@ manager_fn(void *arg) goto out; } workp = TAILQ_FIRST(&(sp->input->queue)); - if (workp != NULL) + if (workp != NULL) { break; + } DBG(" %s[%d,%d] todo %d waiting for buffer\n", - sp->name, wp->setnum, sp->stagenum, sp->work_todo); + sp->name, wp->setnum, sp->stagenum, sp->work_todo); sp->input->waiters++; pthread_cond_wait(&sp->input->cnd, &sp->input->mtx); sp->input->waiters--; @@ -273,7 +276,7 @@ manager_fn(void *arg) } DBG(" %s[%d,%d] todo %d work %p data %p\n", - sp->name, wp->setnum, sp->stagenum, iteration, workp, workp->data); + sp->name, wp->setnum, sp->stagenum, iteration, workp, workp->data); /* Do our stuff with the buffer */ (void) sp->fn(workp->data, workp->isize); @@ -286,11 +289,10 @@ manager_fn(void *arg) TAILQ_INSERT_TAIL(&(sp->output->queue), workp, link); if (sp->output->waiters) { DBG(" %s[%d,%d] todo %d signaling work\n", - sp->name, wp->setnum, sp->stagenum, iteration); + sp->name, wp->setnum, sp->stagenum, iteration); pthread_cond_signal(&sp->output->cnd); } pthread_mutex_unlock(&sp->output->mtx); - } while (1); out: @@ -307,24 +309,24 @@ void (*consumer_fnp)(int *data, int isize) = &reader_fn; int main(int argc, char *argv[]) { - int i; - int j; - int k; - int pages = 256; /* 1MB */ - int buffers = 2; - int producers = 2; - int consumers = 2; - int stages = 2; - int *status; - stage_info_t *stage_info; - stage_info_t *sp; - worker_info_t *worker_info; - worker_info_t *wp; - kern_return_t ret; - int c; + int i; + int j; + int k; + int pages = 256; /* 1MB */ + int buffers = 2; + int producers = 2; + int consumers = 2; + int stages = 2; + int *status; + stage_info_t *stage_info; + stage_info_t *sp; + worker_info_t *worker_info; + worker_info_t *wp; + kern_return_t ret; + int c; /* Do switch parsing: */ - while ((c = getopt (argc, argv, "ab:i:p:s:twv:")) != -1) { + while ((c = getopt(argc, argv, "ab:i:p:s:twv:")) != -1) { switch (c) { case 'a': affinity = !affinity; @@ -340,8 +342,9 @@ main(int argc, char *argv[]) break; case 's': stages = atoi(optarg); - if (stages >= WORKERS_MAX) + if (stages >= WORKERS_MAX) { usage(); + } break; case 't': halting = TRUE; @@ -359,38 +362,42 @@ main(int argc, char *argv[]) } } argc -= optind; argv += optind; - if (argc > 0) + if (argc > 0) { producers = atoi(*argv); + } argc--; argv++; - if (argc > 0) + if (argc > 0) { consumers = atoi(*argv); - + } + pthread_mutex_init(&funnel, NULL); pthread_cond_init(&barrier, NULL); /* - * Fire up the worker threads. + * Fire up the worker threads. */ threads = consumers * (stages - 1) + producers; mutter("Launching %d producer%s with %d stage%s of %d consumer%s\n" - " with %saffinity, consumer reads%s data\n", - producers, s_if_plural(producers), - stages - 1, s_if_plural(stages - 1), - consumers, s_if_plural(consumers), - affinity? "": "no ", - (consumer_fnp == &reader_writer_fn)? " and writes" : ""); - if (pages < 256) + " with %saffinity, consumer reads%s data\n", + producers, s_if_plural(producers), + stages - 1, s_if_plural(stages - 1), + consumers, s_if_plural(consumers), + affinity? "": "no ", + (consumer_fnp == &reader_writer_fn)? " and writes" : ""); + if (pages < 256) { mutter(" %dkB bytes per buffer, ", pages * 4); - else + } else { mutter(" %dMB bytes per buffer, ", pages / 256); + } mutter("%d buffer%s per producer ", - buffers, s_if_plural(buffers)); - if (buffers * pages < 256) + buffers, s_if_plural(buffers)); + if (buffers * pages < 256) { mutter("(total %dkB)\n", buffers * pages * 4); - else + } else { mutter("(total %dMB)\n", buffers * pages / 256); + } mutter(" processing %d buffer%s...\n", - iterations, s_if_plural(iterations)); + iterations, s_if_plural(iterations)); stage_info = (stage_info_t *) malloc(stages * sizeof(stage_info_t)); worker_info = (worker_info_t *) malloc(threads * sizeof(worker_info_t)); @@ -414,12 +421,12 @@ main(int argc, char *argv[]) sp->output = &stage_info[(i + 1) % stages].bufq; stage_info[i].work_todo = iterations; } - + /* Create the producers */ for (i = 0; i < producers; i++) { - work_t *work_array; - int *data; - int isize; + work_t *work_array; + int *data; + int isize; isize = pages * 4096 / sizeof(int); data = (int *) malloc(buffers * pages * 4096); @@ -427,34 +434,36 @@ main(int argc, char *argv[]) /* Set up the empty work buffers */ work_array = (work_t *) malloc(buffers * sizeof(work_t)); for (j = 0; j < buffers; j++) { - work_array[j].data = data + (isize * j); + work_array[j].data = data + (isize * j); work_array[j].isize = isize; work_array[j].tag = 0; TAILQ_INSERT_TAIL(&stage_info[0].bufq.queue, &work_array[j], link); DBG(" empty work item %p for data %p\n", - &work_array[j], work_array[j].data); + &work_array[j], work_array[j].data); } wp = &worker_info[i]; wp->setnum = i + 1; wp->stage = &stage_info[0]; if (ret = pthread_create(&wp->thread, - NULL, - &manager_fn, - (void *) wp)) + NULL, + &manager_fn, + (void *) wp)) { err(1, "pthread_create %d,%d", 0, i); + } } /* Create consumers */ for (i = 1; i < stages; i++) { for (j = 0; j < consumers; j++) { - wp = &worker_info[producers + (consumers*(i-1)) + j]; + wp = &worker_info[producers + (consumers * (i - 1)) + j]; wp->setnum = j + 1; wp->stage = &stage_info[i]; if (ret = pthread_create(&wp->thread, - NULL, - &manager_fn, - (void *) wp)) + NULL, + &manager_fn, + (void *) wp)) { err(1, "pthread_create %d,%d", i, j); + } } } @@ -462,8 +471,8 @@ main(int argc, char *argv[]) * We sit back anf wait for the slaves to finish. */ for (k = 0; k < threads; k++) { - int i; - int j; + int i; + int j; wp = &worker_info[k]; if (k < producers) { @@ -473,8 +482,9 @@ main(int argc, char *argv[]) i = (k - producers) / consumers; j = (k - producers) % consumers; } - if(ret = pthread_join(wp->thread, (void **)&status)) - err(1, "pthread_join %d,%d", i, j); + if (ret = pthread_join(wp->thread, (void **)&status)) { + err(1, "pthread_join %d,%d", i, j); + } DBG("Thread %d,%d status %d\n", i, j, status); } @@ -484,7 +494,7 @@ main(int argc, char *argv[]) timer = mach_absolute_time() - timer; timer = timer / 1000000ULL; printf("%d.%03d seconds elapsed.\n", - (int) (timer/1000ULL), (int) (timer % 1000ULL)); + (int) (timer / 1000ULL), (int) (timer % 1000ULL)); return 0; } diff --git a/tools/tests/affinity/sets.c b/tools/tests/affinity/sets.c index 4631d09c1..028244ea3 100644 --- a/tools/tests/affinity/sets.c +++ b/tools/tests/affinity/sets.c @@ -18,14 +18,14 @@ * affinity set placement in Leopard. * * The picture here, for each set, is: - * + * * free work * -> queue --> producer --> queue --> consumer -- * | | * ----------------------------------------------- * * <------ "stage" -----> <------ "stage" -----> - + * * We spin off sets of production line threads (2 sets by default). * All threads of each line sets the same affinity tag (unless disabled). * By default there are 2 stage (worker) threads per production line. @@ -46,91 +46,91 @@ * use as input and output and what function to call for processing is * data-driven. */ - + pthread_mutex_t funnel; -pthread_cond_t barrier; +pthread_cond_t barrier; -uint64_t timer; -int threads; -int threads_ready = 0; +uint64_t timer; +int threads; +int threads_ready = 0; -int iterations = 10000; -boolean_t affinity = FALSE; -boolean_t halting = FALSE; -boolean_t cache_config = FALSE; -int verbosity = 1; +int iterations = 10000; +boolean_t affinity = FALSE; +boolean_t halting = FALSE; +boolean_t cache_config = FALSE; +int verbosity = 1; typedef struct work { - TAILQ_ENTRY(work) link; - int *data; + TAILQ_ENTRY(work) link; + int *data; } work_t; /* * A work queue, complete with pthread objects for its management */ typedef struct work_queue { - pthread_mutex_t mtx; - pthread_cond_t cnd; - TAILQ_HEAD(, work) queue; - boolean_t waiters; + pthread_mutex_t mtx; + pthread_cond_t cnd; + TAILQ_HEAD(, work) queue; + boolean_t waiters; } work_queue_t; /* Worker functions take a integer array and size */ -typedef void (worker_fn_t)(int *, int); +typedef void (worker_fn_t)(int *, int); /* This struct controls the function of a thread */ typedef struct { - int stagenum; - char *name; - worker_fn_t *fn; - work_queue_t *input; - work_queue_t *output; - struct line_info *set; - pthread_t thread; - work_queue_t bufq; + int stagenum; + char *name; + worker_fn_t *fn; + work_queue_t *input; + work_queue_t *output; + struct line_info *set; + pthread_t thread; + work_queue_t bufq; } stage_info_t; /* This defines a thread set */ #define WORKERS_MAX 10 typedef struct line_info { - int setnum; - int *data; - int isize; - stage_info_t *stage[WORKERS_MAX]; + int setnum; + int *data; + int isize; + stage_info_t *stage[WORKERS_MAX]; } line_info_t; -#define DBG(x...) do { \ - if (verbosity > 1) { \ - pthread_mutex_lock(&funnel); \ - printf(x); \ - pthread_mutex_unlock(&funnel); \ - } \ +#define DBG(x...) do { \ + if (verbosity > 1) { \ + pthread_mutex_lock(&funnel); \ + printf(x); \ + pthread_mutex_unlock(&funnel); \ + } \ } while (0) -#define mutter(x...) do { \ - if (verbosity > 0) { \ - printf(x); \ - } \ +#define mutter(x...) do { \ + if (verbosity > 0) { \ + printf(x); \ + } \ } while (0) -#define s_if_plural(x) (((x) > 1) ? "s" : "") +#define s_if_plural(x) (((x) > 1) ? "s" : "") static void usage() { fprintf(stderr, - "usage: sets [-a] Turn affinity on (off)\n" - " [-b B] Number of buffers per set/line (2)\n" - " [-c] Configure for max cache performance\n" - " [-h] Print this\n" - " [-i I] Number of items/buffers to process (1000)\n" - " [-s S] Number of stages per set/line (2)\n" - " [-t] Halt for keyboard input to start\n" - " [-p P] Number of pages per buffer (256=1MB)]\n" - " [-w] Consumer writes data\n" - " [-v V] Level of verbosity 0..2 (1)\n" - " [N] Number of sets/lines (2)\n" - ); + "usage: sets [-a] Turn affinity on (off)\n" + " [-b B] Number of buffers per set/line (2)\n" + " [-c] Configure for max cache performance\n" + " [-h] Print this\n" + " [-i I] Number of items/buffers to process (1000)\n" + " [-s S] Number of stages per set/line (2)\n" + " [-t] Halt for keyboard input to start\n" + " [-p P] Number of pages per buffer (256=1MB)]\n" + " [-w] Consumer writes data\n" + " [-v V] Level of verbosity 0..2 (1)\n" + " [N] Number of sets/lines (2)\n" + ); exit(1); } @@ -138,7 +138,7 @@ usage() void writer_fn(int *data, int isize) { - int i; + int i; for (i = 0; i < isize; i++) { data[i] = i; @@ -149,8 +149,8 @@ writer_fn(int *data, int isize) void reader_fn(int *data, int isize) { - int i; - int datum; + int i; + int datum; for (i = 0; i < isize; i++) { datum = data[i]; @@ -161,7 +161,7 @@ reader_fn(int *data, int isize) void reader_writer_fn(int *data, int isize) { - int i; + int i; for (i = 0; i < isize; i++) { data[i] += 1; @@ -175,34 +175,36 @@ reader_writer_fn(int *data, int isize) void * manager_fn(void *arg) { - stage_info_t *sp = (stage_info_t *) arg; - line_info_t *lp = sp->set; - kern_return_t ret; - long iteration = 0; + stage_info_t *sp = (stage_info_t *) arg; + line_info_t *lp = sp->set; + kern_return_t ret; + long iteration = 0; /* * If we're using affinity sets (we are by default) * set our tag to by our thread set number. */ - thread_extended_policy_data_t epolicy; - thread_affinity_policy_data_t policy; + thread_extended_policy_data_t epolicy; + thread_affinity_policy_data_t policy; epolicy.timeshare = FALSE; ret = thread_policy_set( - mach_thread_self(), THREAD_EXTENDED_POLICY, - (thread_policy_t) &epolicy, - THREAD_EXTENDED_POLICY_COUNT); - if (ret != KERN_SUCCESS) + mach_thread_self(), THREAD_EXTENDED_POLICY, + (thread_policy_t) &epolicy, + THREAD_EXTENDED_POLICY_COUNT); + if (ret != KERN_SUCCESS) { printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret); - + } + if (affinity) { policy.affinity_tag = lp->setnum; ret = thread_policy_set( - mach_thread_self(), THREAD_AFFINITY_POLICY, - (thread_policy_t) &policy, - THREAD_AFFINITY_POLICY_COUNT); - if (ret != KERN_SUCCESS) + mach_thread_self(), THREAD_AFFINITY_POLICY, + (thread_policy_t) &policy, + THREAD_AFFINITY_POLICY_COUNT); + if (ret != KERN_SUCCESS) { printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret); + } } DBG("Starting %s set: %d stage: %d\n", sp->name, lp->setnum, sp->stagenum); @@ -217,7 +219,7 @@ manager_fn(void *arg) pthread_mutex_unlock(&funnel); if (halting) { printf(" all threads ready for process %d, " - "hit any key to start", getpid()); + "hit any key to start", getpid()); fflush(stdout); (void) getchar(); } @@ -229,8 +231,8 @@ manager_fn(void *arg) } do { - int i; - work_t *workp; + int i; + work_t *workp; /* * Get a buffer from the input queue. @@ -239,10 +241,11 @@ manager_fn(void *arg) pthread_mutex_lock(&sp->input->mtx); while (1) { workp = TAILQ_FIRST(&(sp->input->queue)); - if (workp != NULL) + if (workp != NULL) { break; + } DBG(" %s[%d,%d] iteration %d waiting for buffer\n", - sp->name, lp->setnum, sp->stagenum, iteration); + sp->name, lp->setnum, sp->stagenum, iteration); sp->input->waiters = TRUE; pthread_cond_wait(&sp->input->cnd, &sp->input->mtx); sp->input->waiters = FALSE; @@ -251,7 +254,7 @@ manager_fn(void *arg) pthread_mutex_unlock(&sp->input->mtx); DBG(" %s[%d,%d] iteration %d work %p data %p\n", - sp->name, lp->setnum, sp->stagenum, iteration, workp, workp->data); + sp->name, lp->setnum, sp->stagenum, iteration, workp, workp->data); /* Do our stuff with the buffer */ (void) sp->fn(workp->data, lp->isize); @@ -264,7 +267,7 @@ manager_fn(void *arg) TAILQ_INSERT_TAIL(&(sp->output->queue), workp, link); if (sp->output->waiters) { DBG(" %s[%d,%d] iteration %d signaling work\n", - sp->name, lp->setnum, sp->stagenum, iteration); + sp->name, lp->setnum, sp->stagenum, iteration); pthread_cond_signal(&sp->output->cnd); } pthread_mutex_unlock(&sp->output->mtx); @@ -275,27 +278,27 @@ manager_fn(void *arg) return (void *) iteration; } -#define MAX_CACHE_DEPTH 10 +#define MAX_CACHE_DEPTH 10 static void auto_config(int npages, int *nbufs, int *nsets) { - size_t len; - int ncpu; - int llc; - int64_t cacheconfig[MAX_CACHE_DEPTH]; - int64_t cachesize[MAX_CACHE_DEPTH]; + size_t len; + int ncpu; + int llc; + int64_t cacheconfig[MAX_CACHE_DEPTH]; + int64_t cachesize[MAX_CACHE_DEPTH]; mutter("Autoconfiguring...\n"); len = sizeof(cacheconfig); if (sysctlbyname("hw.cacheconfig", - &cacheconfig[0], &len, NULL, 0) != 0) { + &cacheconfig[0], &len, NULL, 0) != 0) { printf("Unable to get hw.cacheconfig, %d\n", errno); exit(1); } len = sizeof(cachesize); if (sysctlbyname("hw.cachesize", - &cachesize[0], &len, NULL, 0) != 0) { + &cachesize[0], &len, NULL, 0) != 0) { printf("Unable to get hw.cachesize, %d\n", errno); exit(1); } @@ -303,9 +306,11 @@ auto_config(int npages, int *nbufs, int *nsets) /* * Find LLC */ - for (llc = MAX_CACHE_DEPTH - 1; llc > 0; llc--) - if (cacheconfig[llc] != 0) + for (llc = MAX_CACHE_DEPTH - 1; llc > 0; llc--) { + if (cacheconfig[llc] != 0) { break; + } + } /* * Calculate number of buffers of size pages*4096 bytes @@ -313,15 +318,15 @@ auto_config(int npages, int *nbufs, int *nsets) */ *nbufs = cachesize[llc] * 9 / (npages * 4096 * 10); mutter(" L%d (LLC) cache %qd bytes: " - "using %d buffers of size %d bytes\n", - llc, cachesize[llc], *nbufs, (npages * 4096)); + "using %d buffers of size %d bytes\n", + llc, cachesize[llc], *nbufs, (npages * 4096)); - /* + /* * Calcalute how many sets: */ - *nsets = cacheconfig[0]/cacheconfig[llc]; + *nsets = cacheconfig[0] / cacheconfig[llc]; mutter(" %qd cpus; %qd cpus per L%d cache: using %d sets\n", - cacheconfig[0], cacheconfig[llc], llc, *nsets); + cacheconfig[0], cacheconfig[llc], llc, *nsets); } void (*producer_fnp)(int *data, int isize) = &writer_fn; @@ -330,22 +335,22 @@ void (*consumer_fnp)(int *data, int isize) = &reader_fn; int main(int argc, char *argv[]) { - int i; - int j; - int pages = 256; /* 1MB */ - int buffers = 2; - int sets = 2; - int stages = 2; - int *status; - line_info_t *line_info; - line_info_t *lp; - stage_info_t *stage_info; - stage_info_t *sp; - kern_return_t ret; - int c; + int i; + int j; + int pages = 256; /* 1MB */ + int buffers = 2; + int sets = 2; + int stages = 2; + int *status; + line_info_t *line_info; + line_info_t *lp; + stage_info_t *stage_info; + stage_info_t *sp; + kern_return_t ret; + int c; /* Do switch parsing: */ - while ((c = getopt (argc, argv, "ab:chi:p:s:twv:")) != -1) { + while ((c = getopt(argc, argv, "ab:chi:p:s:twv:")) != -1) { switch (c) { case 'a': affinity = !affinity; @@ -364,8 +369,9 @@ main(int argc, char *argv[]) break; case 's': stages = atoi(optarg); - if (stages >= WORKERS_MAX) + if (stages >= WORKERS_MAX) { usage(); + } break; case 't': halting = TRUE; @@ -383,39 +389,43 @@ main(int argc, char *argv[]) } } argc -= optind; argv += optind; - if (argc > 0) + if (argc > 0) { sets = atoi(*argv); + } - if (cache_config) + if (cache_config) { auto_config(pages, &buffers, &sets); + } pthread_mutex_init(&funnel, NULL); pthread_cond_init(&barrier, NULL); /* - * Fire up the worker threads. + * Fire up the worker threads. */ threads = sets * stages; mutter("Launching %d set%s of %d threads with %saffinity, " - "consumer reads%s data\n", - sets, s_if_plural(sets), stages, affinity? "": "no ", - (consumer_fnp == &reader_writer_fn)? " and writes" : ""); - if (pages < 256) + "consumer reads%s data\n", + sets, s_if_plural(sets), stages, affinity? "": "no ", + (consumer_fnp == &reader_writer_fn)? " and writes" : ""); + if (pages < 256) { mutter(" %dkB bytes per buffer, ", pages * 4); - else + } else { mutter(" %dMB bytes per buffer, ", pages / 256); + } mutter("%d buffer%s per set ", - buffers, s_if_plural(buffers)); - if (buffers * pages < 256) + buffers, s_if_plural(buffers)); + if (buffers * pages < 256) { mutter("(total %dkB)\n", buffers * pages * 4); - else + } else { mutter("(total %dMB)\n", buffers * pages / 256); + } mutter(" processing %d buffer%s...\n", - iterations, s_if_plural(iterations)); + iterations, s_if_plural(iterations)); line_info = (line_info_t *) malloc(sets * sizeof(line_info_t)); stage_info = (stage_info_t *) malloc(sets * stages * sizeof(stage_info_t)); for (i = 0; i < sets; i++) { - work_t *work_array; + work_t *work_array; lp = &line_info[i]; @@ -425,7 +435,7 @@ main(int argc, char *argv[]) /* Set up the queue for the workers of this thread set: */ for (j = 0; j < stages; j++) { - sp = &stage_info[(i*stages) + j]; + sp = &stage_info[(i * stages) + j]; sp->stagenum = j; sp->set = lp; lp->stage[j] = sp; @@ -455,18 +465,19 @@ main(int argc, char *argv[]) /* Set up the buffers on the first worker of the set. */ work_array = (work_t *) malloc(buffers * sizeof(work_t)); for (j = 0; j < buffers; j++) { - work_array[j].data = lp->data + (lp->isize * j); + work_array[j].data = lp->data + (lp->isize * j); TAILQ_INSERT_TAIL(&lp->stage[0]->bufq.queue, &work_array[j], link); DBG(" empty work item %p for set %d data %p\n", - &work_array[j], i, work_array[j].data); + &work_array[j], i, work_array[j].data); } /* Create this set of threads */ for (j = 0; j < stages; j++) { if (ret = pthread_create(&lp->stage[j]->thread, NULL, - &manager_fn, - (void *) lp->stage[j])) - err(1, "pthread_create %d,%d", i, j); + &manager_fn, + (void *) lp->stage[j])) { + err(1, "pthread_create %d,%d", i, j); + } } } @@ -476,8 +487,9 @@ main(int argc, char *argv[]) for (i = 0; i < sets; i++) { lp = &line_info[i]; for (j = 0; j < stages; j++) { - if(ret = pthread_join(lp->stage[j]->thread, (void **)&status)) - err(1, "pthread_join %d,%d", i, j); + if (ret = pthread_join(lp->stage[j]->thread, (void **)&status)) { + err(1, "pthread_join %d,%d", i, j); + } DBG("Thread %d,%d status %d\n", i, j, status); } } @@ -488,7 +500,7 @@ main(int argc, char *argv[]) timer = mach_absolute_time() - timer; timer = timer / 1000000ULL; printf("%d.%03d seconds elapsed.\n", - (int) (timer/1000ULL), (int) (timer % 1000ULL)); + (int) (timer / 1000ULL), (int) (timer % 1000ULL)); return 0; } diff --git a/tools/tests/affinity/tags.c b/tools/tests/affinity/tags.c index f03ef46c5..16a314e49 100644 --- a/tools/tests/affinity/tags.c +++ b/tools/tests/affinity/tags.c @@ -10,45 +10,45 @@ #include #include -int verbosity = 1; +int verbosity = 1; -#define DBG(x...) do { \ - if (verbosity > 1) { \ - printf(x); \ - } \ +#define DBG(x...) do { \ + if (verbosity > 1) { \ + printf(x); \ + } \ } while (0) -#define mutter(x...) do { \ - if (verbosity > 0) { \ - printf(x); \ - } \ +#define mutter(x...) do { \ + if (verbosity > 0) { \ + printf(x); \ + } \ } while (0) -#define s_if_plural(x) (((x) > 1) ? "s" : "") +#define s_if_plural(x) (((x) > 1) ? "s" : "") static void usage() { fprintf(stderr, - "usage: tags [-i] interactive/input\n" - " [-v V] verbosity level 0..2 (1)\n" - " [-h] help info\n" - " pid process id of target task\n" - ); + "usage: tags [-i] interactive/input\n" + " [-v V] verbosity level 0..2 (1)\n" + " [-h] help info\n" + " pid process id of target task\n" + ); exit(1); } void thread_tag_set(thread_t thread, int tag) { - kern_return_t ret; - thread_affinity_policy_data_t policy; + kern_return_t ret; + thread_affinity_policy_data_t policy; policy.affinity_tag = tag; ret = thread_policy_set( - thread, THREAD_AFFINITY_POLICY, - (thread_policy_t) &policy, - THREAD_AFFINITY_POLICY_COUNT); + thread, THREAD_AFFINITY_POLICY, + (thread_policy_t) &policy, + THREAD_AFFINITY_POLICY_COUNT); if (ret != KERN_SUCCESS) { printf("thread_policy_set(1) returned %d\n", ret); exit(1); @@ -58,14 +58,14 @@ thread_tag_set(thread_t thread, int tag) int thread_tag_get(thread_t thread) { - kern_return_t ret; - boolean_t get_default = FALSE; - thread_affinity_policy_data_t policy; - mach_msg_type_number_t count = THREAD_AFFINITY_POLICY_COUNT; + kern_return_t ret; + boolean_t get_default = FALSE; + thread_affinity_policy_data_t policy; + mach_msg_type_number_t count = THREAD_AFFINITY_POLICY_COUNT; ret = thread_policy_get( - thread, THREAD_AFFINITY_POLICY, - (thread_policy_t) &policy, &count, &get_default); + thread, THREAD_AFFINITY_POLICY, + (thread_policy_t) &policy, &count, &get_default); if (ret != KERN_SUCCESS) { printf("thread_policy_set(1) returned %d\n", ret); exit(1); @@ -74,19 +74,19 @@ thread_tag_get(thread_t thread) return policy.affinity_tag; } -char input[81]; +char input[81]; int main(int argc, char *argv[]) { - kern_return_t ret; - mach_port_name_t port; - int pid; - int c; - thread_act_t *thread_array; - mach_msg_type_number_t num_threads; - int i; - boolean_t interactive = FALSE; - int tag; + kern_return_t ret; + mach_port_name_t port; + int pid; + int c; + thread_act_t *thread_array; + mach_msg_type_number_t num_threads; + int i; + boolean_t interactive = FALSE; + int tag; if (geteuid() != 0) { printf("Must be run as root\n"); @@ -94,7 +94,7 @@ main(int argc, char *argv[]) } /* Do switch parsing: */ - while ((c = getopt (argc, argv, "hiv:")) != -1) { + while ((c = getopt(argc, argv, "hiv:")) != -1) { switch (c) { case 'i': interactive = TRUE; @@ -109,21 +109,24 @@ main(int argc, char *argv[]) } } argc -= optind; argv += optind; - if (argc > 0) + if (argc > 0) { pid = atoi(*argv); + } ret = task_for_pid(mach_task_self(), pid, &port); - if (ret != KERN_SUCCESS) - err(1, "task_for_pid(,%d,) returned %d", pid, ret); + if (ret != KERN_SUCCESS) { + err(1, "task_for_pid(,%d,) returned %d", pid, ret); + } mutter("task %p\n", port); ret = task_threads(port, &thread_array, &num_threads); - if (ret != KERN_SUCCESS) - err(1, "task_threads() returned %d", pid, ret); - + if (ret != KERN_SUCCESS) { + err(1, "task_threads() returned %d", pid, ret); + } + for (i = 0; i < num_threads; i++) { printf(" %d: thread 0x%08x tag %d\n", - i, thread_array[i], thread_tag_get(thread_array[i])); + i, thread_array[i], thread_tag_get(thread_array[i])); } while (interactive) { @@ -131,7 +134,7 @@ main(int argc, char *argv[]) for (i = 0; i < num_threads; i++) { tag = thread_tag_get(thread_array[i]); printf(" %d: thread 0x%08x tag %d: ", - i, thread_array[i], tag); + i, thread_array[i], tag); fflush(stdout); (void) fgets(input, 20, stdin); if (feof(stdin)) { diff --git a/tools/tests/execperf/exit.c b/tools/tests/execperf/exit.c index e58cbfebb..761f55946 100644 --- a/tools/tests/execperf/exit.c +++ b/tools/tests/execperf/exit.c @@ -1,14 +1,16 @@ #include -int main(int artc, char *argv[]) { +int +main(int artc, char *argv[]) +{ #if defined(__x86_64__) - asm volatile ("andq $0xfffffffffffffff0, %rsp\n"); + asm volatile ("andq $0xfffffffffffffff0, %rsp\n"); #elif defined(__i386__) - asm volatile ("andl $0xfffffff0, %esp\n"); + asm volatile ("andl $0xfffffff0, %esp\n"); #elif defined(__arm__) || defined(__arm64__) asm volatile (""); #else #error Unsupported architecture #endif - _Exit(42); + _Exit(42); } diff --git a/tools/tests/execperf/printexecinfo.c b/tools/tests/execperf/printexecinfo.c index 1baefb922..b8e03eb4c 100644 --- a/tools/tests/execperf/printexecinfo.c +++ b/tools/tests/execperf/printexecinfo.c @@ -10,24 +10,27 @@ #include __attribute__((constructor)) -void init(int argc, const char *argv[], const char *envp[], const char *appl[], void *vars __attribute__((unused))) { +void +init(int argc, const char *argv[], const char *envp[], const char *appl[], void *vars __attribute__((unused))) +{ int i; printf("argv = %p\n", argv); - for (i=0; argv[i]; i++) { + for (i = 0; argv[i]; i++) { printf("argv[%2d] = %p %.100s%s\n", i, argv[i], argv[i], strlen(argv[i]) > 100 ? "..." : ""); } printf("envp = %p\n", envp); - for (i=0; envp[i]; i++) { + for (i = 0; envp[i]; i++) { printf("envp[%2d] = %p %.100s%s\n", i, envp[i], envp[i], strlen(envp[i]) > 100 ? "..." : ""); } printf("appl = %p\n", appl); - for (i=0; appl[i]; i++) { + for (i = 0; appl[i]; i++) { printf("appl[%2d] = %p %.100s%s\n", i, appl[i], appl[i], strlen(appl[i]) > 100 ? "..." : ""); } } -void printexecinfo(void) +void +printexecinfo(void) { int ret; uint64_t stackaddr; @@ -36,39 +39,42 @@ void printexecinfo(void) printf("executable load address = 0x%016llx\n", (uint64_t)(uintptr_t)&_mh_execute_header); printf("executable cputype 0x%08x cpusubtype 0x%08x (%s:%s)\n", - _mh_execute_header.cputype, - _mh_execute_header.cpusubtype, - arch ? arch->name : "unknown", - arch ? arch->description : "unknown"); + _mh_execute_header.cputype, + _mh_execute_header.cpusubtype, + arch ? arch->name : "unknown", + arch ? arch->description : "unknown"); ret = sysctlbyname("kern.usrstack64", &stackaddr, &len, NULL, 0); - if (ret == -1) + if (ret == -1) { err(1, "sysctlbyname"); + } printf(" stack address = 0x%016llx\n", stackaddr); } -void printdyldinfo(void) +void +printdyldinfo(void) { task_dyld_info_data_t info; mach_msg_type_number_t size = TASK_DYLD_INFO_COUNT; kern_return_t kret; struct dyld_all_image_infos *all_image_infos; - + kret = task_info(mach_task_self(), TASK_DYLD_INFO, - (void *)&info, &size); - if (kret != KERN_SUCCESS) + (void *)&info, &size); + if (kret != KERN_SUCCESS) { errx(1, "task_info: %s", mach_error_string(kret)); + } all_image_infos = (struct dyld_all_image_infos *)(uintptr_t)info.all_image_info_addr; printf(" dyld load address = 0x%016llx\n", (uint64_t)(uintptr_t)all_image_infos->dyldImageLoadAddress); printf(" shared cache slide = 0x%016llx\n", (uint64_t)(uintptr_t)all_image_infos->sharedCacheSlide); - } -int main(int argc, char *argv[]) { - +int +main(int argc, char *argv[]) +{ printexecinfo(); printdyldinfo(); diff --git a/tools/tests/execperf/run.c b/tools/tests/execperf/run.c index 79a2bf602..6388659d6 100644 --- a/tools/tests/execperf/run.c +++ b/tools/tests/execperf/run.c @@ -15,76 +15,80 @@ void usage(void); void *work(void *); -int main(int argc, char *argv[]) { - - int i, count, threadcount; - int ret; - pthread_t *threads; - - if (argc < 4) { - usage(); - } - - threadcount = atoi(argv[1]); - count = atoi(argv[2]); - - newargv = &argv[3]; - - threads = (pthread_t *)calloc(threadcount, sizeof(pthread_t)); - for (i=0; i < threadcount; i++) { - ret = pthread_create(&threads[i], NULL, work, (void *)(intptr_t)count); - if (ret) { - err(1, "pthread_create"); - } - } - - for (i=0; i < threadcount; i++) { - ret = pthread_join(threads[i], NULL); - if (ret) { - err(1, "pthread_join"); - } - } - - return 0; +int +main(int argc, char *argv[]) +{ + int i, count, threadcount; + int ret; + pthread_t *threads; + + if (argc < 4) { + usage(); + } + + threadcount = atoi(argv[1]); + count = atoi(argv[2]); + + newargv = &argv[3]; + + threads = (pthread_t *)calloc(threadcount, sizeof(pthread_t)); + for (i = 0; i < threadcount; i++) { + ret = pthread_create(&threads[i], NULL, work, (void *)(intptr_t)count); + if (ret) { + err(1, "pthread_create"); + } + } + + for (i = 0; i < threadcount; i++) { + ret = pthread_join(threads[i], NULL); + if (ret) { + err(1, "pthread_join"); + } + } + + return 0; } -void usage(void) { - fprintf(stderr, "Usage: %s [ [ ...]]\n", - getprogname()); - exit(1); +void +usage(void) +{ + fprintf(stderr, "Usage: %s [ [ ...]]\n", + getprogname()); + exit(1); } -void *work(void *arg) +void * +work(void *arg) { - int count = (int)(intptr_t)arg; - int i; - int ret; - pid_t pid; - - for (i=0; i < count; i++) { - ret = posix_spawn(&pid, newargv[0], NULL, NULL, newargv, environ); - if (ret != 0) { - errc(1, ret, "posix_spawn(%s)", newargv[0]); - } - - while (-1 == waitpid(pid, &ret, 0)) { - if (errno != EINTR) { - err(1, "waitpid(%d)", pid); - } - } - - if (WIFSIGNALED(ret)) { - errx(1, "process exited with signal %d", WTERMSIG(ret)); - } else if (WIFSTOPPED(ret)) { - errx(1, "process stopped with signal %d", WSTOPSIG(ret)); - } else if (WIFEXITED(ret)) { - if (WEXITSTATUS(ret) != 42) { - errx(1, "process exited with unexpected exit code %d", WEXITSTATUS(ret)); - } - } else { - errx(1, "unknown exit condition %x", ret); - } - } - - return NULL; + int count = (int)(intptr_t)arg; + int i; + int ret; + pid_t pid; + + for (i = 0; i < count; i++) { + ret = posix_spawn(&pid, newargv[0], NULL, NULL, newargv, environ); + if (ret != 0) { + errc(1, ret, "posix_spawn(%s)", newargv[0]); + } + + while (-1 == waitpid(pid, &ret, 0)) { + if (errno != EINTR) { + err(1, "waitpid(%d)", pid); + } + } + + if (WIFSIGNALED(ret)) { + errx(1, "process exited with signal %d", WTERMSIG(ret)); + } else if (WIFSTOPPED(ret)) { + errx(1, "process stopped with signal %d", WSTOPSIG(ret)); + } else if (WIFEXITED(ret)) { + if (WEXITSTATUS(ret) != 42) { + errx(1, "process exited with unexpected exit code %d", WEXITSTATUS(ret)); + } + } else { + errx(1, "unknown exit condition %x", ret); + } + } + + return NULL; } diff --git a/tools/tests/jitter/timer_jitter.c b/tools/tests/jitter/timer_jitter.c index e6b4dc5d7..f803f0e16 100644 --- a/tools/tests/jitter/timer_jitter.c +++ b/tools/tests/jitter/timer_jitter.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -53,9 +53,9 @@ typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t; -#define DEFAULT_MAX_SLEEP_NS 2000000000ll /* Two seconds */ -#define CONSTRAINT_NANOS (20000000ll) /* 20 ms */ -#define COMPUTATION_NANOS (10000000ll) /* 10 ms */ +#define DEFAULT_MAX_SLEEP_NS 2000000000ll /* Two seconds */ +#define CONSTRAINT_NANOS (20000000ll) /* 20 ms */ +#define COMPUTATION_NANOS (10000000ll) /* 10 ms */ struct mach_timebase_info g_mti; @@ -103,38 +103,38 @@ thread_setup(my_policy_type_t pol) int res; switch (pol) { - case MY_POLICY_TIMESHARE: - { - return 0; - } - case MY_POLICY_REALTIME: - { - thread_time_constraint_policy_data_t pol; - - /* Hard-coded realtime parameters (similar to what Digi uses) */ - pol.period = 100000; - pol.constraint = CONSTRAINT_NANOS * g_mti.denom / g_mti.numer; - pol.computation = COMPUTATION_NANOS * g_mti.denom / g_mti.numer; - pol.preemptible = 0; /* Ignored by OS */ - - res = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT); - assert(res == 0, fail); - break; - } - case MY_POLICY_FIXEDPRI: - { - thread_extended_policy_data_t pol; - pol.timeshare = 0; - - res = thread_policy_set(mach_thread_self(), THREAD_EXTENDED_POLICY, (thread_policy_t) &pol, THREAD_EXTENDED_POLICY_COUNT); - assert(res == 0, fail); - break; - } - default: - { - printf("invalid policy type\n"); - return 1; - } + case MY_POLICY_TIMESHARE: + { + return 0; + } + case MY_POLICY_REALTIME: + { + thread_time_constraint_policy_data_t pol; + + /* Hard-coded realtime parameters (similar to what Digi uses) */ + pol.period = 100000; + pol.constraint = CONSTRAINT_NANOS * g_mti.denom / g_mti.numer; + pol.computation = COMPUTATION_NANOS * g_mti.denom / g_mti.numer; + pol.preemptible = 0; /* Ignored by OS */ + + res = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT); + assert(res == 0, fail); + break; + } + case MY_POLICY_FIXEDPRI: + { + thread_extended_policy_data_t pol; + pol.timeshare = 0; + + res = thread_policy_set(mach_thread_self(), THREAD_EXTENDED_POLICY, (thread_policy_t) &pol, THREAD_EXTENDED_POLICY_COUNT); + assert(res == 0, fail); + break; + } + default: + { + printf("invalid policy type\n"); + return 1; + } } return 0; @@ -142,7 +142,7 @@ fail: return 1; } -uint64_t +uint64_t get_random_sleep_length_abs_ns(uint64_t min_sleep_ns, uint64_t max_sleep_ns) { uint64_t tmp; @@ -157,7 +157,7 @@ get_random_sleep_length_abs_ns(uint64_t min_sleep_ns, uint64_t max_sleep_ns) return min_sleep_ns + tmp; } -void +void compute_stats(double *values, uint64_t count, double *average_magnitudep, double *maxp, double *minp, double *stddevp) { uint64_t i; @@ -174,12 +174,12 @@ compute_stats(double *values, uint64_t count, double *average_magnitudep, double } _avg = _sum / (double)count; - + _dev = 0; for (i = 0; i < count; i++) { _dev += pow((values[i] - _avg), 2); } - + _dev /= count; _dev = sqrt(_dev); @@ -206,7 +206,7 @@ print_stats_fract(const char *label, double avg, double max, double min, double printf("Min %s jitter: %.1lf%%\n", label, min * 100); printf("Avg %s jitter: %.1lf%%\n", label, avg * 100); printf("Stddev: %.1lf%%\n", stddev * 100); - putchar('\n'); + putchar('\n'); } int @@ -242,32 +242,32 @@ main(int argc, char **argv) opterr = 0; while ((ch = getopt(argc, argv, "m:n:hs:w")) != -1 && ch != '?') { switch (ch) { - case 's': - /* Specified seed for random)() */ - random_seed = (unsigned)atoi(optarg); - srandom(random_seed); - need_seed = FALSE; - break; - case 'm': - /* How long per timer? */ - max_sleep_ns = strtoull(optarg, NULL, 10); - break; - case 'n': - /* How long per timer? */ - min_sleep_ns = strtoull(optarg, NULL, 10); - break; - case 'w': - /* After each timed wait, wakeup another thread */ - wakeup_second_thread = TRUE; - break; - case 'h': - print_usage(); - exit(0); - break; - default: - fprintf(stderr, "Got unexpected result from getopt().\n"); - exit(1); - break; + case 's': + /* Specified seed for random)() */ + random_seed = (unsigned)atoi(optarg); + srandom(random_seed); + need_seed = FALSE; + break; + case 'm': + /* How long per timer? */ + max_sleep_ns = strtoull(optarg, NULL, 10); + break; + case 'n': + /* How long per timer? */ + min_sleep_ns = strtoull(optarg, NULL, 10); + break; + case 'w': + /* After each timed wait, wakeup another thread */ + wakeup_second_thread = TRUE; + break; + case 'h': + print_usage(); + exit(0); + break; + default: + fprintf(stderr, "Got unexpected result from getopt().\n"); + exit(1); + break; } } @@ -296,7 +296,7 @@ main(int argc, char **argv) /* How much jitter is so extreme that we should cut a trace point */ too_much = strtoull(argv[2], NULL, 10); - + /* Array for data */ jitter_arr = (double*)malloc(sizeof(*jitter_arr) * iterations); if (jitter_arr == NULL) { @@ -368,21 +368,21 @@ main(int argc, char **argv) /* For now, do not exit; this call could be locked down */ } - /* - * Repeatedly pick a random timer length and - * try to sleep exactly that long + /* + * Repeatedly pick a random timer length and + * try to sleep exactly that long */ for (i = 0; i < iterations; i++) { sleep_length_abs = (uint64_t) (get_random_sleep_length_abs_ns(min_sleep_ns, max_sleep_ns) * (((double)g_mti.denom) / ((double)g_mti.numer))); target_time = mach_absolute_time() + sleep_length_abs; - + /* Sleep */ kret = mach_wait_until(target_time); wake_time = mach_absolute_time(); - + jitter_arr[i] = (double)(wake_time - target_time); fraction_arr[i] = jitter_arr[i] / ((double)sleep_length_abs); - + /* Too much: cut a tracepoint for a debugger */ if (jitter_arr[i] >= too_much) { kdebug_trace(0xeeeee0 | DBG_FUNC_NONE, 0, 0, 0, 0); @@ -401,12 +401,11 @@ main(int argc, char **argv) if (kret != KERN_SUCCESS) { errx(1, "semaphore_wait"); } - } } /* - * Compute statistics and output results. + * Compute statistics and output results. */ compute_stats(jitter_arr, iterations, &avg, &max, &min, &stddev); compute_stats(fraction_arr, iterations, &avg_fract, &max_fract, &min_fract, &stddev_fract); @@ -416,20 +415,19 @@ main(int argc, char **argv) print_stats_fract("%", avg_fract, max_fract, min_fract, stddev_fract); if (wakeup_second_thread) { - res = pthread_join(secthread, NULL); if (res) { err(1, "pthread_join"); } compute_stats(wakeup_second_jitter_arr, iterations, &avg, &max, &min, &stddev); - + putchar('\n'); print_stats_us("second jitter", avg, max, min, stddev); putchar('\n'); printf("%llu/%llu (%.1f%%) wakeups on same CPU\n", secargs.woke_on_same_cpu, iterations, - 100.0*((double)secargs.woke_on_same_cpu)/iterations); + 100.0 * ((double)secargs.woke_on_same_cpu) / iterations); } return 0; @@ -452,12 +450,11 @@ second_thread(void *args) exit(1); } - /* - * Repeatedly pick a random timer length and - * try to sleep exactly that long + /* + * Repeatedly pick a random timer length and + * try to sleep exactly that long */ for (i = 0; i < secargs->iterations; i++) { - /* Wake up when poked by main thread */ kret = semaphore_wait(secargs->wakeup_semaphore); if (kret != KERN_SUCCESS) { @@ -468,7 +465,7 @@ second_thread(void *args) cpuno = _os_cpu_number(); if (wake_time < secargs->last_poke_time) { /* Woke in past, unsynchronized mach_absolute_time()? */ - + errx(1, "woke in past %llu (%d) < %llu (%d)", wake_time, cpuno, secargs->last_poke_time, secargs->cpuno); } @@ -477,7 +474,7 @@ second_thread(void *args) } secargs->wakeup_second_jitter_arr[i] = (double)(wake_time - secargs->last_poke_time); - + /* Too much: cut a tracepoint for a debugger */ if (secargs->wakeup_second_jitter_arr[i] >= secargs->too_much) { kdebug_trace(0xeeeee4 | DBG_FUNC_NONE, 0, 0, 0, 0); @@ -487,7 +484,6 @@ second_thread(void *args) if (kret != KERN_SUCCESS) { errx(1, "semaphore_signal %d", kret); } - } return NULL; diff --git a/tools/tests/mktimer/mktimer_test.c b/tools/tests/mktimer/mktimer_test.c index 43326a1b7..357b6e10a 100644 --- a/tools/tests/mktimer/mktimer_test.c +++ b/tools/tests/mktimer/mktimer_test.c @@ -54,7 +54,9 @@ uint32_t report = 1000; uint64_t on, lastfire = 0, totaljitter = 0, max_jitter = 0, min_jitter = ~0ULL, jiterations = 0, leeway_ns = 0, leeway_abs = 0; uint64_t deadline; -void cfmcb(CFMachPortRef port, void *msg, CFIndex size, void *msginfo) { +void +cfmcb(CFMachPortRef port, void *msg, CFIndex size, void *msginfo) +{ uint64_t ctime = mach_absolute_time(); uint64_t jitter = 0; @@ -70,7 +72,7 @@ void cfmcb(CFMachPortRef port, void *msg, CFIndex size, void *msginfo) { totaljitter += jitter; if ((++jiterations % report) == 0) { - printf("max_jitter: %g (ns), min_jitter: %g (ns), average_jitter: %g (ns)\n", max_jitter * conversion, min_jitter * conversion, ((double)totaljitter/(double)jiterations) * conversion); + printf("max_jitter: %g (ns), min_jitter: %g (ns), average_jitter: %g (ns)\n", max_jitter * conversion, min_jitter * conversion, ((double)totaljitter / (double)jiterations) * conversion); max_jitter = 0; min_jitter = ~0ULL; jiterations = 0; totaljitter = 0; } } @@ -84,7 +86,9 @@ void cfmcb(CFMachPortRef port, void *msg, CFIndex size, void *msginfo) { } } -int main(int argc, char **argv) { +int +main(int argc, char **argv) +{ if (argc != 4) { printf("Usage: mktimer_test \n"); return 0; @@ -120,7 +124,7 @@ int main(int argc, char **argv) { if (use_leeway) { mk_timer_arm_leeway(timerPort, MK_TIMER_CRITICAL, mach_absolute_time() + interval_abs, leeway_abs); - } else { + } else { mk_timer_arm(timerPort, mach_absolute_time() + interval_abs); } diff --git a/tools/tests/perf_index/PerfIndex_COPS_Module/PITest.h b/tools/tests/perf_index/PerfIndex_COPS_Module/PITest.h index 7722852d9..dc1392ca6 100644 --- a/tools/tests/perf_index/PerfIndex_COPS_Module/PITest.h +++ b/tools/tests/perf_index/PerfIndex_COPS_Module/PITest.h @@ -11,19 +11,19 @@ @interface PITest : NSObject { - int (*setup_func)(int, long long, int, void**); - int (*execute_func)(int, int, long long, int, void**); - void (*cleanup_func)(int, long long); + int (*setup_func)(int, long long, int, void**); + int (*execute_func)(int, int, long long, int, void**); + void (*cleanup_func)(int, long long); - long long length; - int numThreads; - int readyThreadCount; - int testArgc; - void** testArgv; - pthread_mutex_t readyThreadCountLock; - pthread_cond_t threadsReadyCvar; - pthread_cond_t startCvar; - pthread_t* threads; + long long length; + int numThreads; + int readyThreadCount; + int testArgc; + void** testArgv; + pthread_mutex_t readyThreadCountLock; + pthread_cond_t threadsReadyCvar; + pthread_cond_t startCvar; + pthread_t* threads; } @property NSString* testName; diff --git a/tools/tests/perf_index/md5.c b/tools/tests/perf_index/md5.c index 8b2775ad4..0e66c7303 100644 --- a/tools/tests/perf_index/md5.c +++ b/tools/tests/perf_index/md5.c @@ -14,138 +14,140 @@ #define CCMD5_BLOCK_SIZE 64 -#define F(x,y,z) (z ^ (x & (y ^ z))) -#define G(x,y,z) (y ^ (z & (y ^ x))) -#define H(x,y,z) (x^y^z) -#define I(x,y,z) (y^(x|(~z))) +#define F(x, y, z) (z ^ (x & (y ^ z))) +#define G(x, y, z) (y ^ (z & (y ^ x))) +#define H(x, y, z) (x^y^z) +#define I(x, y, z) (y^(x|(~z))) -#define CC_ROLc(X,s) (((X) << (s)) | ((X) >> (32 - (s)))) +#define CC_ROLc(X, s) (((X) << (s)) | ((X) >> (32 - (s)))) -#define FF(a,b,c,d,M,s,t) \ +#define FF(a, b, c, d, M, s, t) \ a = (a + F(b,c,d) + M + t); a = CC_ROLc(a, s) + b; -#define GG(a,b,c,d,M,s,t) \ +#define GG(a, b, c, d, M, s, t) \ a = (a + G(b,c,d) + M + t); a = CC_ROLc(a, s) + b; -#define HH(a,b,c,d,M,s,t) \ +#define HH(a, b, c, d, M, s, t) \ a = (a + H(b,c,d) + M + t); a = CC_ROLc(a, s) + b; -#define II(a,b,c,d,M,s,t) \ +#define II(a, b, c, d, M, s, t) \ a = (a + I(b,c,d) + M + t); a = CC_ROLc(a, s) + b; -static void md5_compress(uint32_t *state, unsigned long nblocks, const void *in) +static void +md5_compress(uint32_t *state, unsigned long nblocks, const void *in) { - uint32_t i, W[16], a, b, c, d; - uint32_t *s = state; - const unsigned char *buf = in; - - while(nblocks--) { - - /* copy the state into 512-bits into W[0..15] */ - for (i = 0; i < 16; i++) { - W[i] = ((uint32_t*)buf)[i]; - } - - /* copy state */ - a = s[0]; - b = s[1]; - c = s[2]; - d = s[3]; - - FF(a,b,c,d,W[0],7,0xd76aa478) - FF(d,a,b,c,W[1],12,0xe8c7b756) - FF(c,d,a,b,W[2],17,0x242070db) - FF(b,c,d,a,W[3],22,0xc1bdceee) - FF(a,b,c,d,W[4],7,0xf57c0faf) - FF(d,a,b,c,W[5],12,0x4787c62a) - FF(c,d,a,b,W[6],17,0xa8304613) - FF(b,c,d,a,W[7],22,0xfd469501) - FF(a,b,c,d,W[8],7,0x698098d8) - FF(d,a,b,c,W[9],12,0x8b44f7af) - FF(c,d,a,b,W[10],17,0xffff5bb1) - FF(b,c,d,a,W[11],22,0x895cd7be) - FF(a,b,c,d,W[12],7,0x6b901122) - FF(d,a,b,c,W[13],12,0xfd987193) - FF(c,d,a,b,W[14],17,0xa679438e) - FF(b,c,d,a,W[15],22,0x49b40821) - GG(a,b,c,d,W[1],5,0xf61e2562) - GG(d,a,b,c,W[6],9,0xc040b340) - GG(c,d,a,b,W[11],14,0x265e5a51) - GG(b,c,d,a,W[0],20,0xe9b6c7aa) - GG(a,b,c,d,W[5],5,0xd62f105d) - GG(d,a,b,c,W[10],9,0x02441453) - GG(c,d,a,b,W[15],14,0xd8a1e681) - GG(b,c,d,a,W[4],20,0xe7d3fbc8) - GG(a,b,c,d,W[9],5,0x21e1cde6) - GG(d,a,b,c,W[14],9,0xc33707d6) - GG(c,d,a,b,W[3],14,0xf4d50d87) - GG(b,c,d,a,W[8],20,0x455a14ed) - GG(a,b,c,d,W[13],5,0xa9e3e905) - GG(d,a,b,c,W[2],9,0xfcefa3f8) - GG(c,d,a,b,W[7],14,0x676f02d9) - GG(b,c,d,a,W[12],20,0x8d2a4c8a) - HH(a,b,c,d,W[5],4,0xfffa3942) - HH(d,a,b,c,W[8],11,0x8771f681) - HH(c,d,a,b,W[11],16,0x6d9d6122) - HH(b,c,d,a,W[14],23,0xfde5380c) - HH(a,b,c,d,W[1],4,0xa4beea44) - HH(d,a,b,c,W[4],11,0x4bdecfa9) - HH(c,d,a,b,W[7],16,0xf6bb4b60) - HH(b,c,d,a,W[10],23,0xbebfbc70) - HH(a,b,c,d,W[13],4,0x289b7ec6) - HH(d,a,b,c,W[0],11,0xeaa127fa) - HH(c,d,a,b,W[3],16,0xd4ef3085) - HH(b,c,d,a,W[6],23,0x04881d05) - HH(a,b,c,d,W[9],4,0xd9d4d039) - HH(d,a,b,c,W[12],11,0xe6db99e5) - HH(c,d,a,b,W[15],16,0x1fa27cf8) - HH(b,c,d,a,W[2],23,0xc4ac5665) - II(a,b,c,d,W[0],6,0xf4292244) - II(d,a,b,c,W[7],10,0x432aff97) - II(c,d,a,b,W[14],15,0xab9423a7) - II(b,c,d,a,W[5],21,0xfc93a039) - II(a,b,c,d,W[12],6,0x655b59c3) - II(d,a,b,c,W[3],10,0x8f0ccc92) - II(c,d,a,b,W[10],15,0xffeff47d) - II(b,c,d,a,W[1],21,0x85845dd1) - II(a,b,c,d,W[8],6,0x6fa87e4f) - II(d,a,b,c,W[15],10,0xfe2ce6e0) - II(c,d,a,b,W[6],15,0xa3014314) - II(b,c,d,a,W[13],21,0x4e0811a1) - II(a,b,c,d,W[4],6,0xf7537e82) - II(d,a,b,c,W[11],10,0xbd3af235) - II(c,d,a,b,W[2],15,0x2ad7d2bb) - II(b,c,d,a,W[9],21,0xeb86d391) - - /* store state */ - s[0] += a; - s[1] += b; - s[2] += c; - s[3] += d; - - buf+=CCMD5_BLOCK_SIZE; - } + uint32_t i, W[16], a, b, c, d; + uint32_t *s = state; + const unsigned char *buf = in; + + while (nblocks--) { + /* copy the state into 512-bits into W[0..15] */ + for (i = 0; i < 16; i++) { + W[i] = ((uint32_t*)buf)[i]; + } + + /* copy state */ + a = s[0]; + b = s[1]; + c = s[2]; + d = s[3]; + + FF(a, b, c, d, W[0], 7, 0xd76aa478) + FF(d, a, b, c, W[1], 12, 0xe8c7b756) + FF(c, d, a, b, W[2], 17, 0x242070db) + FF(b, c, d, a, W[3], 22, 0xc1bdceee) + FF(a, b, c, d, W[4], 7, 0xf57c0faf) + FF(d, a, b, c, W[5], 12, 0x4787c62a) + FF(c, d, a, b, W[6], 17, 0xa8304613) + FF(b, c, d, a, W[7], 22, 0xfd469501) + FF(a, b, c, d, W[8], 7, 0x698098d8) + FF(d, a, b, c, W[9], 12, 0x8b44f7af) + FF(c, d, a, b, W[10], 17, 0xffff5bb1) + FF(b, c, d, a, W[11], 22, 0x895cd7be) + FF(a, b, c, d, W[12], 7, 0x6b901122) + FF(d, a, b, c, W[13], 12, 0xfd987193) + FF(c, d, a, b, W[14], 17, 0xa679438e) + FF(b, c, d, a, W[15], 22, 0x49b40821) + GG(a, b, c, d, W[1], 5, 0xf61e2562) + GG(d, a, b, c, W[6], 9, 0xc040b340) + GG(c, d, a, b, W[11], 14, 0x265e5a51) + GG(b, c, d, a, W[0], 20, 0xe9b6c7aa) + GG(a, b, c, d, W[5], 5, 0xd62f105d) + GG(d, a, b, c, W[10], 9, 0x02441453) + GG(c, d, a, b, W[15], 14, 0xd8a1e681) + GG(b, c, d, a, W[4], 20, 0xe7d3fbc8) + GG(a, b, c, d, W[9], 5, 0x21e1cde6) + GG(d, a, b, c, W[14], 9, 0xc33707d6) + GG(c, d, a, b, W[3], 14, 0xf4d50d87) + GG(b, c, d, a, W[8], 20, 0x455a14ed) + GG(a, b, c, d, W[13], 5, 0xa9e3e905) + GG(d, a, b, c, W[2], 9, 0xfcefa3f8) + GG(c, d, a, b, W[7], 14, 0x676f02d9) + GG(b, c, d, a, W[12], 20, 0x8d2a4c8a) + HH(a, b, c, d, W[5], 4, 0xfffa3942) + HH(d, a, b, c, W[8], 11, 0x8771f681) + HH(c, d, a, b, W[11], 16, 0x6d9d6122) + HH(b, c, d, a, W[14], 23, 0xfde5380c) + HH(a, b, c, d, W[1], 4, 0xa4beea44) + HH(d, a, b, c, W[4], 11, 0x4bdecfa9) + HH(c, d, a, b, W[7], 16, 0xf6bb4b60) + HH(b, c, d, a, W[10], 23, 0xbebfbc70) + HH(a, b, c, d, W[13], 4, 0x289b7ec6) + HH(d, a, b, c, W[0], 11, 0xeaa127fa) + HH(c, d, a, b, W[3], 16, 0xd4ef3085) + HH(b, c, d, a, W[6], 23, 0x04881d05) + HH(a, b, c, d, W[9], 4, 0xd9d4d039) + HH(d, a, b, c, W[12], 11, 0xe6db99e5) + HH(c, d, a, b, W[15], 16, 0x1fa27cf8) + HH(b, c, d, a, W[2], 23, 0xc4ac5665) + II(a, b, c, d, W[0], 6, 0xf4292244) + II(d, a, b, c, W[7], 10, 0x432aff97) + II(c, d, a, b, W[14], 15, 0xab9423a7) + II(b, c, d, a, W[5], 21, 0xfc93a039) + II(a, b, c, d, W[12], 6, 0x655b59c3) + II(d, a, b, c, W[3], 10, 0x8f0ccc92) + II(c, d, a, b, W[10], 15, 0xffeff47d) + II(b, c, d, a, W[1], 21, 0x85845dd1) + II(a, b, c, d, W[8], 6, 0x6fa87e4f) + II(d, a, b, c, W[15], 10, 0xfe2ce6e0) + II(c, d, a, b, W[6], 15, 0xa3014314) + II(b, c, d, a, W[13], 21, 0x4e0811a1) + II(a, b, c, d, W[4], 6, 0xf7537e82) + II(d, a, b, c, W[11], 10, 0xbd3af235) + II(c, d, a, b, W[2], 15, 0x2ad7d2bb) + II(b, c, d, a, W[9], 21, 0xeb86d391) + + /* store state */ + s[0] += a; + s[1] += b; + s[2] += c; + s[3] += d; + + buf += CCMD5_BLOCK_SIZE; + } } -void md5_hash(uint8_t *message, uint64_t len, uint32_t *hash) { +void +md5_hash(uint8_t *message, uint64_t len, uint32_t *hash) +{ hash[0] = 0x67452301; hash[1] = 0xEFCDAB89; hash[2] = 0x98BADCFE; hash[3] = 0x10325476; - - md5_compress(hash, len/64, message); - + + md5_compress(hash, len / 64, message); + uint32_t blockbuff[16]; uint8_t *byteptr = (uint8_t*)blockbuff; - + int left = len % 64; - memcpy(byteptr, message + len-left, left); - + memcpy(byteptr, message + len - left, left); + byteptr[left] = 0x80; left++; - if (64 - left >= 8) + if (64 - left >= 8) { bzero(byteptr + left, 56 - left); - else { + } else { memset(byteptr + left, 0, 64 - left); md5_compress(hash, 1, blockbuff); bzero(blockbuff, 56); diff --git a/tools/tests/perf_index/perf_index.c b/tools/tests/perf_index/perf_index.c index 195328836..8d71a0c8f 100644 --- a/tools/tests/perf_index/perf_index.c +++ b/tools/tests/perf_index/perf_index.c @@ -12,19 +12,19 @@ #include "fail.h" typedef struct parsed_args_struct { - char* my_name; - char* test_name; - int num_threads; - long long length; - int test_argc; - void** test_argv; + char* my_name; + char* test_name; + int num_threads; + long long length; + int test_argc; + void** test_argv; } parsed_args_t; typedef struct test_struct { - int (*setup)(int, long long, int, void**); - int (*execute)(int, int, long long, int, void**); - int (*cleanup)(int, long long); - char** error_str_ptr; + int (*setup)(int, long long, int, void**); + int (*execute)(int, int, long long, int, void**); + int (*cleanup)(int, long long); + char** error_str_ptr; } test_t; parsed_args_t args; @@ -34,181 +34,202 @@ pthread_mutex_t ready_thread_count_lock; pthread_cond_t start_cvar; pthread_cond_t threads_ready_cvar; -int parse_args(int argc, char** argv, parsed_args_t* parsed_args) { - if(argc != 4) { - return -1; - } - - parsed_args->my_name = argv[0]; - parsed_args->test_name = argv[1]; - parsed_args->num_threads = atoi(argv[2]); - parsed_args->length = strtoll(argv[3], NULL, 10); - parsed_args->test_argc = 0; - parsed_args->test_argv = NULL; - return 0; +int +parse_args(int argc, char** argv, parsed_args_t* parsed_args) +{ + if (argc != 4) { + return -1; + } + + parsed_args->my_name = argv[0]; + parsed_args->test_name = argv[1]; + parsed_args->num_threads = atoi(argv[2]); + parsed_args->length = strtoll(argv[3], NULL, 10); + parsed_args->test_argc = 0; + parsed_args->test_argv = NULL; + return 0; } -void print_usage(char** argv) { - printf("Usage: %s test_name threads length\n", argv[0]); +void +print_usage(char** argv) +{ + printf("Usage: %s test_name threads length\n", argv[0]); } -int find_test(char* test_name, char* test_path) { - char binpath[MAXPATHLEN]; - char* dirpath; - uint32_t size = sizeof(binpath); - int retval; - - retval = _NSGetExecutablePath(binpath, &size); - assert(retval == 0); - dirpath = dirname(binpath); - - snprintf(test_path, MAXPATHLEN, "%s/perfindex-%s.dylib", dirpath, test_name); - if(access(test_path, F_OK) == 0) - return 0; - else - return -1; +int +find_test(char* test_name, char* test_path) +{ + char binpath[MAXPATHLEN]; + char* dirpath; + uint32_t size = sizeof(binpath); + int retval; + + retval = _NSGetExecutablePath(binpath, &size); + assert(retval == 0); + dirpath = dirname(binpath); + + snprintf(test_path, MAXPATHLEN, "%s/perfindex-%s.dylib", dirpath, test_name); + if (access(test_path, F_OK) == 0) { + return 0; + } else { + return -1; + } } -int load_test(char* path, test_t* test) { - void* handle; - void* p; +int +load_test(char* path, test_t* test) +{ + void* handle; + void* p; - handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); - if(!handle) { - return -1; - } + handle = dlopen(path, RTLD_NOW | RTLD_LOCAL); + if (!handle) { + return -1; + } - p = dlsym(handle, "setup"); - test->setup = (int (*)(int, long long, int, void **))p; + p = dlsym(handle, "setup"); + test->setup = (int (*)(int, long long, int, void **))p; - p = dlsym(handle, "execute"); - test->execute = (int (*)(int, int, long long, int, void **))p; - if(p == NULL) - return -1; + p = dlsym(handle, "execute"); + test->execute = (int (*)(int, int, long long, int, void **))p; + if (p == NULL) { + return -1; + } - p = dlsym(handle, "cleanup"); - test->cleanup = (int (*)(int, long long))p; + p = dlsym(handle, "cleanup"); + test->cleanup = (int (*)(int, long long))p; - p = dlsym(handle, "error_str"); - test->error_str_ptr = (char**)p; + p = dlsym(handle, "error_str"); + test->error_str_ptr = (char**)p; - return 0; + return 0; } -void start_timer(struct timeval *tp) { - gettimeofday(tp, NULL); +void +start_timer(struct timeval *tp) +{ + gettimeofday(tp, NULL); } -void end_timer(struct timeval *tp) { - struct timeval tend; - gettimeofday(&tend, NULL); - if(tend.tv_usec >= tp->tv_usec) { - tp->tv_sec = tend.tv_sec - tp->tv_sec; - tp->tv_usec = tend.tv_usec - tp->tv_usec; - } - else { - tp->tv_sec = tend.tv_sec - tp->tv_sec - 1; - tp->tv_usec = tend.tv_usec - tp->tv_usec + 1000000; - } +void +end_timer(struct timeval *tp) +{ + struct timeval tend; + gettimeofday(&tend, NULL); + if (tend.tv_usec >= tp->tv_usec) { + tp->tv_sec = tend.tv_sec - tp->tv_sec; + tp->tv_usec = tend.tv_usec - tp->tv_usec; + } else { + tp->tv_sec = tend.tv_sec - tp->tv_sec - 1; + tp->tv_usec = tend.tv_usec - tp->tv_usec + 1000000; + } } -void print_timer(struct timeval *tp) { - printf("%ld.%06d\n", tp->tv_sec, tp->tv_usec); +void +print_timer(struct timeval *tp) +{ + printf("%ld.%06d\n", tp->tv_sec, tp->tv_usec); } -static void* thread_setup(void *arg) { - int my_index = (int)arg; - long long work_size = args.length / args.num_threads; - int work_remainder = args.length % args.num_threads; - - if(work_remainder > my_index) { - work_size++; - } - - pthread_mutex_lock(&ready_thread_count_lock); - ready_thread_count++; - if(ready_thread_count == args.num_threads) - pthread_cond_signal(&threads_ready_cvar); - pthread_cond_wait(&start_cvar, &ready_thread_count_lock); - pthread_mutex_unlock(&ready_thread_count_lock); - test.execute(my_index, args.num_threads, work_size, args.test_argc, args.test_argv); - return NULL; +static void* +thread_setup(void *arg) +{ + int my_index = (int)arg; + long long work_size = args.length / args.num_threads; + int work_remainder = args.length % args.num_threads; + + if (work_remainder > my_index) { + work_size++; + } + + pthread_mutex_lock(&ready_thread_count_lock); + ready_thread_count++; + if (ready_thread_count == args.num_threads) { + pthread_cond_signal(&threads_ready_cvar); + } + pthread_cond_wait(&start_cvar, &ready_thread_count_lock); + pthread_mutex_unlock(&ready_thread_count_lock); + test.execute(my_index, args.num_threads, work_size, args.test_argc, args.test_argv); + return NULL; } -int main(int argc, char** argv) { - int retval; - int thread_index; - struct timeval timer; - pthread_t* threads; - int thread_retval; - void* thread_retval_ptr = &thread_retval; - char test_path[MAXPATHLEN]; - - retval = parse_args(argc, argv, &args); - if(retval) { - print_usage(argv); - return -1; - } - - retval = find_test(args.test_name, test_path); - if(retval) { - printf("Unable to find test %s\n", args.test_name); - return -1; - } - - load_test(test_path, &test); - if(retval) { - printf("Unable to load test %s\n", args.test_name); - return -1; - } - - pthread_cond_init(&threads_ready_cvar, NULL); - pthread_cond_init(&start_cvar, NULL); - pthread_mutex_init(&ready_thread_count_lock, NULL); - ready_thread_count = 0; - - if(test.setup) { - retval = test.setup(args.num_threads, args.length, 0, NULL); - if(retval == PERFINDEX_FAILURE) { - fprintf(stderr, "Test setup failed: %s\n", *test.error_str_ptr); - return -1; - } - } - - threads = (pthread_t*)malloc(sizeof(pthread_t)*args.num_threads); - for(thread_index = 0; thread_index < args.num_threads; thread_index++) { - retval = pthread_create(&threads[thread_index], NULL, thread_setup, (void*)(long)thread_index); - assert(retval == 0); - } - - pthread_mutex_lock(&ready_thread_count_lock); - if(ready_thread_count != args.num_threads) { - pthread_cond_wait(&threads_ready_cvar, &ready_thread_count_lock); - } - pthread_mutex_unlock(&ready_thread_count_lock); - - start_timer(&timer); - pthread_cond_broadcast(&start_cvar); - for(thread_index = 0; thread_index < args.num_threads; thread_index++) { - pthread_join(threads[thread_index], &thread_retval_ptr); - if(**test.error_str_ptr) { - printf("Test failed: %s\n", *test.error_str_ptr); - } - } - end_timer(&timer); - - if(test.cleanup) - retval = test.cleanup(args.num_threads, args.length); - if(retval == PERFINDEX_FAILURE) { - fprintf(stderr, "Test cleanup failed: %s\n", *test.error_str_ptr); - free(threads); - return -1; - } - - print_timer(&timer); - - free(threads); - - return 0; +int +main(int argc, char** argv) +{ + int retval; + int thread_index; + struct timeval timer; + pthread_t* threads; + int thread_retval; + void* thread_retval_ptr = &thread_retval; + char test_path[MAXPATHLEN]; + + retval = parse_args(argc, argv, &args); + if (retval) { + print_usage(argv); + return -1; + } + + retval = find_test(args.test_name, test_path); + if (retval) { + printf("Unable to find test %s\n", args.test_name); + return -1; + } + + load_test(test_path, &test); + if (retval) { + printf("Unable to load test %s\n", args.test_name); + return -1; + } + + pthread_cond_init(&threads_ready_cvar, NULL); + pthread_cond_init(&start_cvar, NULL); + pthread_mutex_init(&ready_thread_count_lock, NULL); + ready_thread_count = 0; + + if (test.setup) { + retval = test.setup(args.num_threads, args.length, 0, NULL); + if (retval == PERFINDEX_FAILURE) { + fprintf(stderr, "Test setup failed: %s\n", *test.error_str_ptr); + return -1; + } + } + + threads = (pthread_t*)malloc(sizeof(pthread_t) * args.num_threads); + for (thread_index = 0; thread_index < args.num_threads; thread_index++) { + retval = pthread_create(&threads[thread_index], NULL, thread_setup, (void*)(long)thread_index); + assert(retval == 0); + } + + pthread_mutex_lock(&ready_thread_count_lock); + if (ready_thread_count != args.num_threads) { + pthread_cond_wait(&threads_ready_cvar, &ready_thread_count_lock); + } + pthread_mutex_unlock(&ready_thread_count_lock); + + start_timer(&timer); + pthread_cond_broadcast(&start_cvar); + for (thread_index = 0; thread_index < args.num_threads; thread_index++) { + pthread_join(threads[thread_index], &thread_retval_ptr); + if (**test.error_str_ptr) { + printf("Test failed: %s\n", *test.error_str_ptr); + } + } + end_timer(&timer); + + if (test.cleanup) { + retval = test.cleanup(args.num_threads, args.length); + } + if (retval == PERFINDEX_FAILURE) { + fprintf(stderr, "Test cleanup failed: %s\n", *test.error_str_ptr); + free(threads); + return -1; + } + + print_timer(&timer); + + free(threads); + + return 0; } diff --git a/tools/tests/perf_index/perfindex-compile.c b/tools/tests/perf_index/perfindex-compile.c index b7743f8eb..5585df4de 100644 --- a/tools/tests/perf_index/perfindex-compile.c +++ b/tools/tests/perf_index/perfindex-compile.c @@ -7,48 +7,50 @@ static const char *src_dst = "/tmp/perf_index_compile_code"; static const char *src_root = "/Network/Servers/xs1/release/Software/Zin/Projects/xnu/xnu-2050.7.9"; DECL_SETUP { - char* cmd; - int retval; - const char *src = src_root; - if(test_argc >= 1) - src = (char*)test_argv[0]; + char* cmd; + int retval; + const char *src = src_root; + if (test_argc >= 1) { + src = (char*)test_argv[0]; + } - retval = asprintf(&cmd, "ditto \"%s\" \"%s\"", src, src_dst); - VERIFY(retval > 0, "asprintf failed"); + retval = asprintf(&cmd, "ditto \"%s\" \"%s\"", src, src_dst); + VERIFY(retval > 0, "asprintf failed"); - retval = system(cmd); - VERIFY(retval == 0, "ditto command failed"); + retval = system(cmd); + VERIFY(retval == 0, "ditto command failed"); - free(cmd); + free(cmd); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } DECL_TEST { - char* cmd; - int retval; + char* cmd; + int retval; - if(thread_id != 0) - return 0; + if (thread_id != 0) { + return 0; + } - retval = asprintf(&cmd, "make -C \"%s\" MAKEJOBS=-j%d", src_dst, num_threads); - VERIFY(retval > 0, "asprintf failed"); + retval = asprintf(&cmd, "make -C \"%s\" MAKEJOBS=-j%d", src_dst, num_threads); + VERIFY(retval > 0, "asprintf failed"); - retval = system(cmd); - VERIFY(retval == 0, "make command failed"); + retval = system(cmd); + VERIFY(retval == 0, "make command failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } DECL_CLEANUP { - char* cmd; - int retval; + char* cmd; + int retval; - retval = asprintf(&cmd, "rm -rf \"%s\"", src_dst); - VERIFY(retval > 0, "asprintf failed"); + retval = asprintf(&cmd, "rm -rf \"%s\"", src_dst); + VERIFY(retval > 0, "asprintf failed"); - retval = system(cmd); - VERIFY(retval == 0, "rm command failed"); + retval = system(cmd); + VERIFY(retval == 0, "rm command failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/perfindex-cpu.c b/tools/tests/perf_index/perfindex-cpu.c index e95e4640d..9a0d5f90f 100644 --- a/tools/tests/perf_index/perfindex-cpu.c +++ b/tools/tests/perf_index/perfindex-cpu.c @@ -5,10 +5,10 @@ #include DECL_TEST { - long long i; - uint32_t digest[4]; - for(i=0; i DECL_SETUP { - VERIFY(test_argc > 0, "missing argument"); + VERIFY(test_argc > 0, "missing argument"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } DECL_TEST { - char* cmd; - int retval; + char* cmd; + int retval; - retval = asprintf(&cmd, "iperf -c \"%s\" -n %lld > /dev/null", test_argv[0], length); - VERIFY(retval > 0, "asprintf failed"); + retval = asprintf(&cmd, "iperf -c \"%s\" -n %lld > /dev/null", test_argv[0], length); + VERIFY(retval > 0, "asprintf failed"); - retval = system(cmd); - VERIFY(retval == 0, "iperf command failed"); + retval = system(cmd); + VERIFY(retval == 0, "iperf command failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/perfindex-memory.c b/tools/tests/perf_index/perfindex-memory.c index 759468ed9..3a4ea088d 100644 --- a/tools/tests/perf_index/perfindex-memory.c +++ b/tools/tests/perf_index/perfindex-memory.c @@ -8,77 +8,79 @@ static char *memblock; static size_t memsize; -size_t hw_memsize(void) { - int mib[2]; - size_t len; - size_t my_memsize; - int retval; +size_t +hw_memsize(void) +{ + int mib[2]; + size_t len; + size_t my_memsize; + int retval; - mib[0] = CTL_HW; - mib[1] = HW_MEMSIZE; - len = sizeof(my_memsize); + mib[0] = CTL_HW; + mib[1] = HW_MEMSIZE; + len = sizeof(my_memsize); - retval = sysctl(mib, 2, &my_memsize, &len, NULL, 0); + retval = sysctl(mib, 2, &my_memsize, &len, NULL, 0); - if(retval != 0) - return 0; + if (retval != 0) { + return 0; + } - return my_memsize; + return my_memsize; } DECL_SETUP { - char *memblockfiller; - long long i; - int pgsz = getpagesize(); - - /* Heuristic: use half the physical memory, hopefully this should work on all - * devices. We use the amount of physical memory, rather than some softer - * metric, like amount of free memory, so that the memory allocated is always - * consistent for a given device. - */ - memsize = hw_memsize(); - VERIFY(memsize > 0, "hw_memsize failed"); - memsize = memsize/2; - - memblock = (char*)malloc(memsize); - VERIFY(memblock != NULL, "malloc failed"); - - memblockfiller = memblock; - - /* Do this manually, to make sure everything is paged in */ - for(i=0; i 0, "hw_memsize failed"); + memsize = memsize / 2; + + memblock = (char*)malloc(memsize); + VERIFY(memblock != NULL, "malloc failed"); + + memblockfiller = memblock; + + /* Do this manually, to make sure everything is paged in */ + for (i = 0; i < memsize; i += pgsz) { + memblockfiller[i] = 1; + } + + return PERFINDEX_SUCCESS; } /* figures out what region of memory to copy, so it does interfere with other -threads, */ + * threads, */ DECL_TEST { - long long left = length; - long long region_len = memsize / num_threads / 2; - long long region_start = memsize / num_threads * thread_id / 2; - long long copy_len; - - if(thread_id < memsize / 2 % num_threads) { - region_start += thread_id; - region_len++; - } - else { - region_start += memsize / 2 % num_threads; - } - - while(left>0) { - copy_len = region_len < left ? region_len : left; - memcpy(memblock+region_start+memsize/2, memblock+region_start, copy_len); - left -= copy_len; - } - - return PERFINDEX_SUCCESS; + long long left = length; + long long region_len = memsize / num_threads / 2; + long long region_start = memsize / num_threads * thread_id / 2; + long long copy_len; + + if (thread_id < memsize / 2 % num_threads) { + region_start += thread_id; + region_len++; + } else { + region_start += memsize / 2 % num_threads; + } + + while (left > 0) { + copy_len = region_len < left ? region_len : left; + memcpy(memblock + region_start + memsize / 2, memblock + region_start, copy_len); + left -= copy_len; + } + + return PERFINDEX_SUCCESS; } DECL_CLEANUP { - free(memblock); - return PERFINDEX_SUCCESS; + free(memblock); + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/perfindex-ram_file_create.c b/tools/tests/perf_index/perfindex-ram_file_create.c index 7c24b4a4b..9df49326c 100644 --- a/tools/tests/perf_index/perfindex-ram_file_create.c +++ b/tools/tests/perf_index/perfindex-ram_file_create.c @@ -9,25 +9,25 @@ const char ramdisk_name[] = "StressRAMDisk"; char ramdisk_path[MAXPATHLEN]; DECL_SETUP { - int retval; + int retval; - retval = setup_ram_volume(ramdisk_name, ramdisk_path); - VERIFY(retval == PERFINDEX_SUCCESS, "setup_ram_volume failed"); + retval = setup_ram_volume(ramdisk_name, ramdisk_path); + VERIFY(retval == PERFINDEX_SUCCESS, "setup_ram_volume failed"); - printf("ramdisk: %s\n", ramdisk_path); + printf("ramdisk: %s\n", ramdisk_path); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } DECL_TEST { - return test_file_create(ramdisk_path, thread_id, num_threads, length); + return test_file_create(ramdisk_path, thread_id, num_threads, length); } DECL_CLEANUP { - int retval; + int retval; - retval = cleanup_ram_volume(ramdisk_path); - VERIFY(retval == 0, "cleanup_ram_volume failed"); + retval = cleanup_ram_volume(ramdisk_path); + VERIFY(retval == 0, "cleanup_ram_volume failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/perfindex-ram_file_read.c b/tools/tests/perf_index/perfindex-ram_file_read.c index e547cec42..10e804d7f 100644 --- a/tools/tests/perf_index/perfindex-ram_file_read.c +++ b/tools/tests/perf_index/perfindex-ram_file_read.c @@ -9,28 +9,28 @@ const char ramdisk_name[] = "StressRAMDisk"; char ramdisk_path[MAXPATHLEN]; DECL_SETUP { - int retval; + int retval; - retval = setup_ram_volume(ramdisk_name, ramdisk_path); - VERIFY(retval == PERFINDEX_SUCCESS, "setup_ram_volume failed"); + retval = setup_ram_volume(ramdisk_name, ramdisk_path); + VERIFY(retval == PERFINDEX_SUCCESS, "setup_ram_volume failed"); - printf("ramdisk: %s\n", ramdisk_path); + printf("ramdisk: %s\n", ramdisk_path); - return test_file_read_setup(ramdisk_path, num_threads, length, 0L); + return test_file_read_setup(ramdisk_path, num_threads, length, 0L); } DECL_TEST { - return test_file_read(ramdisk_path, thread_id, num_threads, length, 0L); + return test_file_read(ramdisk_path, thread_id, num_threads, length, 0L); } DECL_CLEANUP { - int retval; + int retval; - retval = test_file_read_cleanup(ramdisk_path, num_threads, length); - VERIFY(retval == PERFINDEX_SUCCESS, "test_file_read_cleanup failed"); + retval = test_file_read_cleanup(ramdisk_path, num_threads, length); + VERIFY(retval == PERFINDEX_SUCCESS, "test_file_read_cleanup failed"); - retval = cleanup_ram_volume(ramdisk_path); - VERIFY(retval == 0, "cleanup_ram_volume failed"); + retval = cleanup_ram_volume(ramdisk_path); + VERIFY(retval == 0, "cleanup_ram_volume failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/perfindex-ram_file_write.c b/tools/tests/perf_index/perfindex-ram_file_write.c index e8c596bad..8250ea482 100644 --- a/tools/tests/perf_index/perfindex-ram_file_write.c +++ b/tools/tests/perf_index/perfindex-ram_file_write.c @@ -9,28 +9,28 @@ const char ramdisk_name[] = "StressRAMDisk"; char ramdisk_path[MAXPATHLEN]; DECL_SETUP { - int retval; + int retval; - retval = setup_ram_volume(ramdisk_name, ramdisk_path); - VERIFY(retval == PERFINDEX_SUCCESS, "setup_ram_volume failed"); + retval = setup_ram_volume(ramdisk_name, ramdisk_path); + VERIFY(retval == PERFINDEX_SUCCESS, "setup_ram_volume failed"); - printf("ramdisk: %s\n", ramdisk_path); + printf("ramdisk: %s\n", ramdisk_path); - return test_file_write_setup(ramdisk_path, num_threads, length); + return test_file_write_setup(ramdisk_path, num_threads, length); } DECL_TEST { - return test_file_write(ramdisk_path, thread_id, num_threads, length, 0L); + return test_file_write(ramdisk_path, thread_id, num_threads, length, 0L); } DECL_CLEANUP { - int retval; + int retval; - retval = test_file_write_cleanup(ramdisk_path, num_threads, length); - VERIFY(retval == PERFINDEX_SUCCESS, "test_file_read_cleanup failed"); + retval = test_file_write_cleanup(ramdisk_path, num_threads, length); + VERIFY(retval == PERFINDEX_SUCCESS, "test_file_read_cleanup failed"); - retval = cleanup_ram_volume(ramdisk_path); - VERIFY(retval == 0, "cleanup_ram_volume failed"); + retval = cleanup_ram_volume(ramdisk_path); + VERIFY(retval == 0, "cleanup_ram_volume failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/perfindex-syscall.c b/tools/tests/perf_index/perfindex-syscall.c index 757c5c8ad..e092a4845 100644 --- a/tools/tests/perf_index/perfindex-syscall.c +++ b/tools/tests/perf_index/perfindex-syscall.c @@ -3,9 +3,9 @@ #include DECL_TEST { - long long i; - for(i=0; i #include -int setup_ram_volume(const char* name, char* path) { - char *cmd; - int retval; - - retval = asprintf(&cmd, "diskutil erasevolume HFS+ '%s' `hdiutil attach -nomount ram://1500000` >/dev/null", name); - VERIFY(retval > 0, "asprintf failed"); +int +setup_ram_volume(const char* name, char* path) +{ + char *cmd; + int retval; - retval = system(cmd); - VERIFY(retval == 0, "diskutil command failed"); + retval = asprintf(&cmd, "diskutil erasevolume HFS+ '%s' `hdiutil attach -nomount ram://1500000` >/dev/null", name); + VERIFY(retval > 0, "asprintf failed"); - snprintf(path, MAXPATHLEN, "/Volumes/%s", name); + retval = system(cmd); + VERIFY(retval == 0, "diskutil command failed"); - free(cmd); + snprintf(path, MAXPATHLEN, "/Volumes/%s", name); - return PERFINDEX_SUCCESS; + free(cmd); + + return PERFINDEX_SUCCESS; } -int cleanup_ram_volume(char* path) { - char *cmd; - int retval; +int +cleanup_ram_volume(char* path) +{ + char *cmd; + int retval; - retval = asprintf(&cmd, "umount -f '%s' >/dev/null", path); - VERIFY(retval > 0, "asprintf failed"); + retval = asprintf(&cmd, "umount -f '%s' >/dev/null", path); + VERIFY(retval > 0, "asprintf failed"); - retval = system(cmd); - VERIFY(retval == 0, "diskutil command failed"); + retval = system(cmd); + VERIFY(retval == 0, "diskutil command failed"); - free(cmd); + free(cmd); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } diff --git a/tools/tests/perf_index/test_fault_helper.c b/tools/tests/perf_index/test_fault_helper.c index 439757774..ab4fbadc7 100644 --- a/tools/tests/perf_index/test_fault_helper.c +++ b/tools/tests/perf_index/test_fault_helper.c @@ -14,77 +14,80 @@ static char* memblock; -int test_fault_setup() { - char *ptr; - int pgsz = getpagesize(); - int retval; +int +test_fault_setup() +{ + char *ptr; + int pgsz = getpagesize(); + int retval; - memblock = (char *)mmap(NULL, MEMSIZE, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); - VERIFY(memblock != MAP_FAILED, "mmap failed"); + memblock = (char *)mmap(NULL, MEMSIZE, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); + VERIFY(memblock != MAP_FAILED, "mmap failed"); - /* make sure memory is paged */ - for(ptr = memblock; ptr= 0, "open failed"); - - close(fd); - } - - for(i=0; i= 0, "open failed"); + + close(fd); + } + + for (i = 0; i < length; i++) { + snprintf(filepath, MAXPATHLEN, "%s/file_create-%d-%lld", path, thread_id, i); + retval = unlink(filepath); + VERIFY(retval == 0, "unlink failed"); + } + + return PERFINDEX_SUCCESS; } -int test_file_read_setup(char* path, int num_threads, long long length, long long max_file_size) { - int fd; - char filepath[MAXPATHLEN]; - long long left; - int retval; - size_t writelen; +int +test_file_read_setup(char* path, int num_threads, long long length, long long max_file_size) +{ + int fd; + char filepath[MAXPATHLEN]; + long long left; + int retval; + size_t writelen; - if(max_file_size == 0) - max_file_size = MAXFILESIZE; + if (max_file_size == 0) { + max_file_size = MAXFILESIZE; + } - left = MIN(length, max_file_size/num_threads); + left = MIN(length, max_file_size / num_threads); - snprintf(filepath, sizeof(filepath), "%s/file_read", path); - fd = open(filepath, O_CREAT | O_EXCL | O_WRONLY, 0644); - printf("%d\n", fd); - VERIFY(fd >= 0, "open failed"); + snprintf(filepath, sizeof(filepath), "%s/file_read", path); + fd = open(filepath, O_CREAT | O_EXCL | O_WRONLY, 0644); + printf("%d\n", fd); + VERIFY(fd >= 0, "open failed"); - bzero(readbuff, sizeof(readbuff)); + bzero(readbuff, sizeof(readbuff)); - while(left > 0) { - writelen = sizeof(readbuff) < left ? sizeof(readbuff) : left; - retval = write(fd, readbuff, writelen); - VERIFY(retval == writelen, "write failed"); - left -= writelen; - } + while (left > 0) { + writelen = sizeof(readbuff) < left ? sizeof(readbuff) : left; + retval = write(fd, readbuff, writelen); + VERIFY(retval == writelen, "write failed"); + left -= writelen; + } - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } -int test_file_read(char* path, int thread_id, int num_threads, long long length, long long max_file_size) { - long long left; - size_t file_offset = 0; - int readlen; - int fd; - int retval; - char filepath[MAXPATHLEN]; - long long filesize; - - - if(max_file_size == 0) - max_file_size = MAXFILESIZE; - filesize = MIN(length, max_file_size/num_threads); - - snprintf(filepath, sizeof(filepath), "%s/file_read", path); - fd = open(filepath, O_RDONLY); - VERIFY(fd >= 0, "open failed"); - - for(left=length; left>0;) { - readlen = sizeof(readbuff) < left ? sizeof(readbuff) : left; - if(file_offset+readlen > filesize) { - retval = lseek(fd, 0, SEEK_SET); - - - VERIFY(retval >= 0, "lseek failed"); - - file_offset = 0; - continue; - } - retval = read(fd, readbuff, readlen); - VERIFY(retval == readlen, "read failed"); - left -= readlen; - file_offset += readlen; - } - return PERFINDEX_SUCCESS; +int +test_file_read(char* path, int thread_id, int num_threads, long long length, long long max_file_size) +{ + long long left; + size_t file_offset = 0; + int readlen; + int fd; + int retval; + char filepath[MAXPATHLEN]; + long long filesize; + + + if (max_file_size == 0) { + max_file_size = MAXFILESIZE; + } + filesize = MIN(length, max_file_size / num_threads); + + snprintf(filepath, sizeof(filepath), "%s/file_read", path); + fd = open(filepath, O_RDONLY); + VERIFY(fd >= 0, "open failed"); + + for (left = length; left > 0;) { + readlen = sizeof(readbuff) < left ? sizeof(readbuff) : left; + if (file_offset + readlen > filesize) { + retval = lseek(fd, 0, SEEK_SET); + + + VERIFY(retval >= 0, "lseek failed"); + + file_offset = 0; + continue; + } + retval = read(fd, readbuff, readlen); + VERIFY(retval == readlen, "read failed"); + left -= readlen; + file_offset += readlen; + } + return PERFINDEX_SUCCESS; } -int test_file_read_cleanup(char* path, int num_threads, long long length) { - char filepath[MAXPATHLEN]; - int retval; +int +test_file_read_cleanup(char* path, int num_threads, long long length) +{ + char filepath[MAXPATHLEN]; + int retval; - snprintf(filepath, sizeof(filepath), "%s/file_read", path); - retval = unlink(filepath); - VERIFY(retval == 0, "unlink failed"); + snprintf(filepath, sizeof(filepath), "%s/file_read", path); + retval = unlink(filepath); + VERIFY(retval == 0, "unlink failed"); - return PERFINDEX_SUCCESS; + return PERFINDEX_SUCCESS; } -int test_file_write_setup(char* path, int num_threads, long long length) { - int i; - char filepath[MAXPATHLEN]; - - if(fds == NULL) { - fds = (int*)malloc(sizeof(int)*num_threads); - VERIFY(fds, "malloc failed"); - } - - for(i=0; i0;) { - writelen = sizeof(writebuff) < left ? sizeof(writebuff) : left; - retval = write(fd, writebuff, writelen); - VERIFY(retval == writelen, "write failed"); - - left -= writelen; - file_offset += writelen; - if(file_offset>max_file_size/num_threads) { - retval = lseek(fd, 0, SEEK_SET); - VERIFY(retval >= 0, "leeks failed"); - file_offset = 0; - } - } - - return PERFINDEX_SUCCESS; +int +test_file_write(char* path, int thread_id, int num_threads, long long length, long long max_file_size) +{ + long long left; + size_t file_offset = 0; + int writelen; + int retval; + int fd = fds[thread_id]; + + if (max_file_size == 0) { + max_file_size = MAXFILESIZE; + } + + for (left = length; left > 0;) { + writelen = sizeof(writebuff) < left ? sizeof(writebuff) : left; + retval = write(fd, writebuff, writelen); + VERIFY(retval == writelen, "write failed"); + + left -= writelen; + file_offset += writelen; + if (file_offset > max_file_size / num_threads) { + retval = lseek(fd, 0, SEEK_SET); + VERIFY(retval >= 0, "leeks failed"); + file_offset = 0; + } + } + + return PERFINDEX_SUCCESS; } -int test_file_write_cleanup(char* path, int num_threads, long long length) { - int i; - char filepath[MAXPATHLEN]; - int retval; +int +test_file_write_cleanup(char* path, int num_threads, long long length) +{ + int i; + char filepath[MAXPATHLEN]; + int retval; - for(i=0; ipersona_id); ki->persona_info_version = PERSONA_INFO_V1; ret = kpersona_dealloc(ki->persona_id); - if (ret < 0) + if (ret < 0) { err_print("destroy failed!"); + } return ret; } -static int persona_op_lookup(struct kpersona_info *ki, pid_t pid, uid_t uid) +static int +persona_op_lookup(struct kpersona_info *ki, pid_t pid, uid_t uid) { int ret; @@ -89,29 +93,33 @@ static int persona_op_lookup(struct kpersona_info *ki, pid_t pid, uid_t uid) if (pid > 0) { ki->persona_info_version = PERSONA_INFO_V1; ret = kpersona_pidinfo(pid, ki); - if (ret < 0) + if (ret < 0) { err_print("pidinfo failed!"); - else + } else { dump_kpersona("Persona-for-pid:", ki); + } } else { int np = 0; uid_t personas[128]; size_t npersonas = ARRAY_SZ(personas); const char *name = NULL; - if (ki->persona_name[0] != 0) + if (ki->persona_name[0] != 0) { name = ki->persona_name; + } np = kpersona_find(name, uid, personas, &npersonas); - if (np < 0) + if (np < 0) { err("kpersona_find returned %d (errno:%d)", np, errno); + } info("Found %zu persona%c", npersonas, npersonas != 1 ? 's' : ' '); np = npersonas; while (np--) { info("\tpersona[%d]=%d...", np, personas[np]); ki->persona_info_version = PERSONA_INFO_V1; ret = kpersona_info(personas[np], ki); - if (ret < 0) + if (ret < 0) { err("kpersona_info failed (errno:%d) for persona[%d]", errno, personas[np]); + } dump_kpersona(NULL, ki); } } @@ -119,7 +127,8 @@ static int persona_op_lookup(struct kpersona_info *ki, pid_t pid, uid_t uid) return ret; } -static int persona_op_support(void) +static int +persona_op_support(void) { uid_t pna_id = -1; int ret = kpersona_get(&pna_id); @@ -139,17 +148,20 @@ static int persona_op_support(void) * * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */ -static void usage_main(const char *progname, const char *msg, int verbose) +static void +usage_main(const char *progname, const char *msg, int verbose) { const char *nm = basename((char *)progname); - if (msg) + if (msg) { printf("%s\n\n", msg); + } printf("%s v%d.%d\n", PROG_NAME, PROG_VMAJOR, PROG_VMINOR); printf("usage: %s [op] [-v] [-i id] [-t type] [-p pid] [-u uid] [-g gid] [-l login] [-G {groupspec}] [-m gmuid]\n", nm); - if (!verbose) + if (!verbose) { exit(1); + } printf("\t%-15s\tOne of: create | destroy | lookup | support\n", "[op]"); printf("\t%-15s\tBe verbose\n", "-v"); @@ -168,7 +180,8 @@ static void usage_main(const char *progname, const char *msg, int verbose) exit(1); } -int main(int argc, char **argv) +int +main(int argc, char **argv) { char ch; int ret; @@ -184,27 +197,31 @@ int main(int argc, char **argv) */ g.verbose = 0; - if (geteuid() != 0) + if (geteuid() != 0) { err("%s must be run as root", argv[0] ? basename(argv[0]) : PROG_NAME); + } - if (argc < 2) + if (argc < 2) { usage_main(argv[0], "Not enough arguments", 0); + } op_str = argv[1]; - if (strcmp(op_str, "create") == 0) + if (strcmp(op_str, "create") == 0) { persona_op = PERSONA_OP_CREATE; - else if (strcmp(op_str, "destroy") == 0) + } else if (strcmp(op_str, "destroy") == 0) { persona_op = PERSONA_OP_DESTROY; - else if (strcmp(op_str, "lookup") == 0) + } else if (strcmp(op_str, "lookup") == 0) { persona_op = PERSONA_OP_LOOKUP; - else if (strcmp(op_str, "support") == 0) + } else if (strcmp(op_str, "support") == 0) { persona_op = PERSONA_OP_SUPPORT; - else if (strcmp(op_str, "help") == 0 || strcmp(op_str, "-h") == 0) + } else if (strcmp(op_str, "help") == 0 || strcmp(op_str, "-h") == 0) { usage_main(argv[0], NULL, 1); + } - if (persona_op <= 0 || persona_op > PERSONA_OP_MAX) + if (persona_op <= 0 || persona_op > PERSONA_OP_MAX) { usage_main(argv[0], "Invalid [op]", 0); + } memset(&kinfo, 0, sizeof(kinfo)); kinfo.persona_gmuid = KAUTH_UID_NONE; @@ -241,8 +258,9 @@ int main(int argc, char **argv) break; case 'p': ret = atoi(optarg); - if (ret <= 0) + if (ret <= 0) { err("Invalid PID: %s", optarg); + } pid = (pid_t)ret; break; case 'u': @@ -255,21 +273,24 @@ int main(int argc, char **argv) break; case 'g': kinfo.persona_gid = (gid_t)atoi(optarg); - if (kinfo.persona_gid <= 500) + if (kinfo.persona_gid <= 500) { err("Invalid GID: %d", kinfo.persona_gid); + } break; case 'l': strncpy(kinfo.persona_name, optarg, MAXLOGNAME); break; case 'G': ret = parse_groupspec(&kinfo, optarg); - if (ret < 0) + if (ret < 0) { err("Invalid groupspec: \"%s\"", optarg); + } break; case 'm': ret = atoi(optarg); - if (ret < 0) + if (ret < 0) { err("Invalid group membership ID: %s", optarg); + } kinfo.persona_gmuid = (uid_t)ret; break; case 'v': @@ -295,14 +316,16 @@ int main(int argc, char **argv) * least one group: make it equal to either the GID or UID */ kinfo.persona_ngroups = 1; - if (kinfo.persona_gid) + if (kinfo.persona_gid) { kinfo.persona_groups[0] = kinfo.persona_gid; - else + } else { kinfo.persona_groups[0] = kinfo.persona_id; + } } - if (g.verbose) + if (g.verbose) { dump_kpersona("Input persona:", &kinfo); + } switch (persona_op) { case PERSONA_OP_CREATE: diff --git a/tools/tests/personas/persona_spawn.c b/tools/tests/personas/persona_spawn.c index 521871576..618fa76a9 100644 --- a/tools/tests/personas/persona_spawn.c +++ b/tools/tests/personas/persona_spawn.c @@ -58,7 +58,8 @@ static pthread_mutex_t g_child_mtx; static TAILQ_HEAD(, child) g_children = TAILQ_HEAD_INITIALIZER(g_children); static int g_nchildren = 0; -static pid_t spawn_child(int argc, char **argv, struct persona_args *pa) +static pid_t +spawn_child(int argc, char **argv, struct persona_args *pa) { int ret; uint32_t persona_flags = 0; @@ -99,11 +100,13 @@ static pid_t spawn_child(int argc, char **argv, struct persona_args *pa) goto out_err; } - if (pa->flags & PA_SHOULD_VERIFY) + if (pa->flags & PA_SHOULD_VERIFY) { persona_flags |= POSIX_SPAWN_PERSONA_FLAGS_VERIFY; + } - if (pa->flags & PA_OVERRIDE) + if (pa->flags & PA_OVERRIDE) { persona_flags |= POSIX_SPAWN_PERSONA_FLAGS_OVERRIDE; + } ret = posix_spawnattr_set_persona_np(&attr, pa->kinfo.persona_id, persona_flags); if (ret != 0) { @@ -166,14 +169,16 @@ out_err: static int child_should_exit = 0; -static void child_sighandler(int sig) +static void +child_sighandler(int sig) { (void)sig; dbg("PID: %d received sig %d", getpid(), sig); child_should_exit = 1; } -static int child_main_loop(int argc, char **argv) +static int +child_main_loop(int argc, char **argv) { char ch; sigset_t sigset; @@ -224,17 +229,20 @@ static int child_main_loop(int argc, char **argv) kinfo.persona_info_version = PERSONA_INFO_V1; err = kpersona_info(persona_id, &kinfo); - if (err == 0) + if (err == 0) { dump_kpersona("Child: kpersona_info", &kinfo); - else + } else { info("Child: ERROR grabbing kpersona_info: %d", errno); + } - if (child_should_exit) + if (child_should_exit) { return rval; + } infov("Child Sleeping!"); - while (!child_should_exit) + while (!child_should_exit) { sleep(1); + } infov("Child exiting!"); return rval; @@ -247,7 +255,8 @@ static int child_main_loop(int argc, char **argv) * * = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */ -static void main_sighandler(int sig) +static void +main_sighandler(int sig) { dbg("PID: %d received sig %d", getpid(), sig); if (sig == SIGCHLD) { @@ -255,15 +264,17 @@ static void main_sighandler(int sig) } } -static void usage_main(const char *progname, int verbose) +static void +usage_main(const char *progname, int verbose) { const char *nm = basename((char *)progname); printf("%s v%d.%d\n", PERSONA_TEST_NAME, PERSONA_TEST_VMAJOR, PERSONA_TEST_VMINOR); printf("usage: %s [-I id] [-V] [-u uid] [-g gid] [-vw] progname [args...]\n", nm); printf(" Spawn a new process into a new or existing persona.\n"); - if (!verbose) + if (!verbose) { exit(1); + } printf("\t%-10s\tID of the persona\n", "-I id"); printf("\t%-10s\tVerify persona parameters against existing persona (given by -I)\n", "-V"); @@ -278,7 +289,8 @@ static void usage_main(const char *progname, int verbose) exit(1); } -int main(int argc, char **argv) +int +main(int argc, char **argv) { char ch; int ret; @@ -294,8 +306,9 @@ int main(int argc, char **argv) if (argc > 1 && strcmp(argv[1], "child") == 0) { optind = 2; ret = child_main_loop(argc, argv); - if (ret != 1) + if (ret != 1) { exit(ret); + } if (strcmp(argv[optind], "spawn") != 0) { printf("child exiting (%s).\n", argv[optind]); exit(0); @@ -310,8 +323,9 @@ int main(int argc, char **argv) */ } - if (geteuid() != 0) + if (geteuid() != 0) { err("%s must be run as root", argv[0] ? basename(argv[0]) : PERSONA_TEST_NAME); + } struct persona_args pa; memset(&pa, 0, sizeof(pa)); @@ -334,15 +348,17 @@ int main(int argc, char **argv) break; case 'G': ret = parse_groupspec(&pa.kinfo, optarg); - if (ret < 0) + if (ret < 0) { err("Invalid groupspec: \"%s\"", optarg); + } pa.flags |= PA_HAS_GROUPS; pa.flags |= PA_OVERRIDE; break; case 'I': pa.kinfo.persona_id = atoi(optarg); - if (pa.kinfo.persona_id == 0) + if (pa.kinfo.persona_id == 0) { err("Invalid Persona ID: %s", optarg); + } pa.flags |= PA_HAS_ID; break; case 'u': @@ -366,8 +382,9 @@ int main(int argc, char **argv) } } - if (pa.flags & PA_SHOULD_VERIFY) + if (pa.flags & PA_SHOULD_VERIFY) { pa.flags = ~PA_OVERRIDE; + } if (optind >= argc) { printf("No program given!\n"); @@ -382,8 +399,9 @@ int main(int argc, char **argv) argv[argc] = NULL; ret = spawn_child(argc, argv, &pa); - if (ret < 0) + if (ret < 0) { return ret; + } pid_t child_pid = (pid_t)ret; int status = 0; @@ -398,9 +416,10 @@ int main(int argc, char **argv) waitpid(child_pid, &status, 0); if (WIFEXITED(status)) { status = WEXITSTATUS(status); - if (status != 0) + if (status != 0) { errc(ERR_CHILD_FAIL, - "Child exited with status: %d", status); + "Child exited with status: %d", status); + } } } diff --git a/tools/tests/personas/persona_test.h b/tools/tests/personas/persona_test.h index e88d08ad7..c3c14e3c3 100644 --- a/tools/tests/personas/persona_test.h +++ b/tools/tests/personas/persona_test.h @@ -46,63 +46,63 @@ struct persona_args { #define err(fmt, ...) \ do { \ - fflush(NULL); \ - fprintf(stderr, "[%4d] [ERROR(%d:%s)] %s:%d: " fmt "\n", \ - getuid(), errno, strerror(errno), \ - __func__, __LINE__, ## __VA_ARGS__ ); \ - fflush(stderr); \ - exit(ERR_SYSTEM); \ + fflush(NULL); \ + fprintf(stderr, "[%4d] [ERROR(%d:%s)] %s:%d: " fmt "\n", \ + getuid(), errno, strerror(errno), \ + __func__, __LINE__, ## __VA_ARGS__ ); \ + fflush(stderr); \ + exit(ERR_SYSTEM); \ } while (0) #define errc(code, fmt, ...) \ do { \ - fflush(NULL); \ - fprintf(stderr, "[%4d] [ERROR(%d)] %s:%d: " fmt "\n", \ - getuid(), code, \ - __func__, __LINE__, ## __VA_ARGS__ ); \ - fflush(stderr); \ - exit(code ? code : ERR_SYSTEM); \ + fflush(NULL); \ + fprintf(stderr, "[%4d] [ERROR(%d)] %s:%d: " fmt "\n", \ + getuid(), code, \ + __func__, __LINE__, ## __VA_ARGS__ ); \ + fflush(stderr); \ + exit(code ? code : ERR_SYSTEM); \ } while (0) #define err_print(fmt, ...) \ do { \ - fflush(NULL); \ - fprintf(stderr, "[%4d] [ERROR(%d:%s)] %s:%d: " fmt "\n", \ - getuid(), errno, strerror(errno), \ - __func__, __LINE__, ## __VA_ARGS__ ); \ - fflush(stderr); \ + fflush(NULL); \ + fprintf(stderr, "[%4d] [ERROR(%d:%s)] %s:%d: " fmt "\n", \ + getuid(), errno, strerror(errno), \ + __func__, __LINE__, ## __VA_ARGS__ ); \ + fflush(stderr); \ } while (0) #define err__start(fmt, ...) \ do { \ - fprintf(stderr, "[%4d] [ERROR] " fmt, getuid(), ## __VA_ARGS__); \ - fflush(stderr); \ + fprintf(stderr, "[%4d] [ERROR] " fmt, getuid(), ## __VA_ARGS__); \ + fflush(stderr); \ } while (0) #define err__cont(fmt, ...) \ do { \ - fprintf(stderr, fmt, ## __VA_ARGS__); \ - fflush(stderr); \ + fprintf(stderr, fmt, ## __VA_ARGS__); \ + fflush(stderr); \ } while (0) #define err__finish(fmt, ...) \ do { \ - fprintf(stderr, fmt "\n", ## __VA_ARGS__); \ - fflush(stderr); \ + fprintf(stderr, fmt "\n", ## __VA_ARGS__); \ + fflush(stderr); \ } while (0) #ifdef DEBUG #define dbg(fmt, ...) \ do { \ - fprintf(stdout, "[%4d] [DEBUG] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ - fflush(NULL); \ + fprintf(stdout, "[%4d] [DEBUG] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ + fflush(NULL); \ } while (0) #define warn(fmt, ...) \ do { \ - fprintf(stdout, "[%4d] [WARN ] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ - fflush(NULL); \ + fprintf(stdout, "[%4d] [WARN ] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ + fflush(NULL); \ } while (0) #else #define dbg(...) @@ -111,40 +111,42 @@ struct persona_args { #define info(fmt, ...) \ do { \ - fprintf(stdout, "[%4d] [INFO ] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ - fflush(NULL); \ + fprintf(stdout, "[%4d] [INFO ] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ + fflush(NULL); \ } while (0) #define info_start(fmt, ...) \ do { \ - fprintf(stdout, "[%4d] [INFO ] " fmt, getuid(), ## __VA_ARGS__ ); \ + fprintf(stdout, "[%4d] [INFO ] " fmt, getuid(), ## __VA_ARGS__ ); \ } while (0) #define info_cont(fmt, ...) \ do { \ - fprintf(stdout, fmt, ## __VA_ARGS__ ); \ + fprintf(stdout, fmt, ## __VA_ARGS__ ); \ } while (0) #define info_end() \ do { \ - fprintf(stdout, "\n"); \ - fflush(NULL); \ + fprintf(stdout, "\n"); \ + fflush(NULL); \ } while (0) #define infov(fmt, ...) \ if (g.verbose) { \ - fprintf(stdout, "[%4d] [vINFO] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ - fflush(NULL); \ + fprintf(stdout, "[%4d] [vINFO] " fmt "\n", getuid(), ## __VA_ARGS__ ); \ + fflush(NULL); \ } #define ARRAY_SZ(a) \ (sizeof(a) / sizeof((a)[0])) -static inline void _dump_kpersona(const char *msg, uint32_t flags, const struct kpersona_info *ki) +static inline void +_dump_kpersona(const char *msg, uint32_t flags, const struct kpersona_info *ki) { - if (msg) + if (msg) { info("%s", msg); + } info("\t kpersona_info (v%d) {", ki->persona_info_version); info("\t\t %cid: %d", flags & PA_HAS_ID ? '+' : '-', ki->persona_id); info("\t\t %ctype: %d", flags & PA_HAS_TYPE ? '+' : '-', ki->persona_type); @@ -152,15 +154,18 @@ static inline void _dump_kpersona(const char *msg, uint32_t flags, const struct info_start("\t\t ngroups: %d", ki->persona_ngroups); for (int i = 0; i < ki->persona_ngroups; i++) { - if (i == 0) info_cont(" {"); + if (i == 0) { + info_cont(" {"); + } info_cont(" %d", ki->persona_groups[i]); } - if (ki->persona_ngroups > 0) + if (ki->persona_ngroups > 0) { info_cont(" }"); + } info_end(); info("\t\t %cgmuid: %d (0x%x)", flags & PA_HAS_GROUPS ? '+' : '-', - (int)ki->persona_gmuid, ki->persona_gmuid); + (int)ki->persona_gmuid, ki->persona_gmuid); info("\t\t %clogin: \"%s\"", flags & PA_HAS_LOGIN ? '+' : '-', ki->persona_name); info("\t }"); } @@ -168,33 +173,39 @@ static inline void _dump_kpersona(const char *msg, uint32_t flags, const struct #define dump_kpersona(msg, ki) \ _dump_kpersona(msg, 0xffffffff, ki) -static inline void dump_persona_args(const char *msg, const struct persona_args *pa) +static inline void +dump_persona_args(const char *msg, const struct persona_args *pa) { const struct kpersona_info *ki = &pa->kinfo; - if (msg) + if (msg) { info("%s", msg); + } info("\t flags: 0x%x", pa->flags); info("\t %cuid: %d", pa->flags & PA_HAS_UID ? '+' : '-', pa->override_uid); _dump_kpersona(NULL, pa->flags, ki); } -static int parse_groupspec(struct kpersona_info *kinfo, char *spec) +static int +parse_groupspec(struct kpersona_info *kinfo, char *spec) { int idx = 0; int grp; char *s, *e; - if (!spec) + if (!spec) { return -1; + } s = e = spec; while (*s) { int comma = 0; e = s; - while (*e && *e != ',') + while (*e && *e != ',') { e++; - if (*e) + } + if (*e) { comma = 1; + } *e = 0; grp = atoi(s); if (comma) { @@ -203,8 +214,9 @@ static int parse_groupspec(struct kpersona_info *kinfo, char *spec) } else { s = e; } - if (grp < 0) + if (grp < 0) { return -1; + } kinfo->persona_groups[idx] = grp; idx++; } diff --git a/tools/tests/personas/persona_test_run.sh b/tools/tests/personas/persona_test_run.sh index 77ee923d4..b07ec376c 100755 --- a/tools/tests/personas/persona_test_run.sh +++ b/tools/tests/personas/persona_test_run.sh @@ -317,12 +317,12 @@ function spawn_child() { validate_child_info "${TMPDIR}/$file" "$pna_id" "$uid" "$gid" "$groups" $line - # TODO: validate that the first child spawned into a persona *cannot* spawn - # into a different persona... - #if [ $uid -eq 0 ]; then - # ${PERSONA_SPAWN} -v $spawn_args ${PERSONA_SPAWN} child -v -E -R -v -I 99 /bin/echo "This is running in the system persona" - # expect_failure "Spawned child that re-execs into non-default persona" $line - #fi + ## TODO: validate that the first child spawned into a persona *cannot* spawn + ## into a different persona... + ##if [ $uid -eq 0 ]; then + ## ${PERSONA_SPAWN} -v $spawn_args ${PERSONA_SPAWN} child -v -E -R -v -I 99 /bin/echo "This is running in the system persona" + ## expect_failure "Spawned child that re-execs into non-default persona" $line + ##fi return 0 } diff --git a/tools/tests/superpages/measure_tlbs.c b/tools/tests/superpages/measure_tlbs.c index 02097c588..d413f0bab 100644 --- a/tools/tests/superpages/measure_tlbs.c +++ b/tools/tests/superpages/measure_tlbs.c @@ -19,106 +19,110 @@ #define RUNS2 (RUNS0/20) clock_t -testt(boolean_t superpages, int mode, int write, int kb) { +testt(boolean_t superpages, int mode, int write, int kb) +{ static int sum; char *data; unsigned int run, p, p2, i, res; mach_vm_address_t addr = 0; - int pages = kb/4; - mach_vm_size_t size = SUPERPAGE_ROUND_UP(pages*PAGE_SIZE); /* allocate full superpages */ + int pages = kb / 4; + mach_vm_size_t size = SUPERPAGE_ROUND_UP(pages * PAGE_SIZE); /* allocate full superpages */ int kr; kr = mach_vm_allocate(mach_task_self(), &addr, size, VM_FLAGS_ANYWHERE | (superpages? VM_FLAGS_SUPERPAGE_SIZE_2MB : VM_FLAGS_SUPERPAGE_NONE)); - if (!addr) + if (!addr) { return 0; + } data = (char*)(long)addr; /* touch every base page to make sure everything is mapped and zero-filled */ - for (p = 0; p1) { + if (argc > 1) { if (!strcmp(argv[1], "-h")) { printf("Usage: %s \n", argv[0]); printf("\tmode = 0: test all cases\n"); @@ -640,9 +792,9 @@ int main(int argc, char **argv) { printf("\tmode > 0: run test \n"); exit(0); } - mode=atoi(argv[1]); + mode = atoi(argv[1]); } - + /* install SIGBUS handler */ struct sigaction my_sigaction; my_sigaction.sa_handler = test_signal_handler; @@ -650,25 +802,27 @@ int main(int argc, char **argv) { my_sigaction.sa_mask = 0; sigaction( SIGBUS, &my_sigaction, NULL ); sigaction( SIGSEGV, &my_sigaction, NULL ); - - if (mode>0) /* one specific test */ - testit(mode-1); - if (mode==0) { /* test all cases */ + if (mode > 0) { /* one specific test */ + testit(mode - 1); + } + + if (mode == 0) { /* test all cases */ printf("Running %d tests:\n", TESTS); - for (i=0; itlock); IOSimpleLockUnlock(self->tlock); } -static void thread_call_test_func2(thread_call_param_t param0, - thread_call_param_t param1) +static void +thread_call_test_func2(thread_call_param_t param0, + thread_call_param_t param1) { testthreadcall *self = (testthreadcall *)param0; - + IOLog("thread_call_test_func2 %p %p\n", param0, param1); - + IOLockWakeup(self->tlock2, &my_event, false); } diff --git a/tools/tests/testkext/testthreadcall.h b/tools/tests/testkext/testthreadcall.h index c2a03b806..d34a8cf30 100644 --- a/tools/tests/testkext/testthreadcall.h +++ b/tools/tests/testkext/testthreadcall.h @@ -8,10 +8,10 @@ #include class testthreadcall : public IOService { - OSDeclareDefaultStructors(testthreadcall); - - virtual bool start( IOService * provider ); - + OSDeclareDefaultStructors(testthreadcall); + + virtual bool start( IOService * provider ); + public: thread_call_t tcall; thread_call_t tcall2; diff --git a/tools/tests/testkext/testvmx.cpp b/tools/tests/testkext/testvmx.cpp index 4ca0e1fab..3e66aa89d 100644 --- a/tools/tests/testkext/testvmx.cpp +++ b/tools/tests/testkext/testvmx.cpp @@ -20,29 +20,29 @@ OSDefineMetaClassAndStructors(testvmx, super); bool testvmx::start( IOService * provider ) { - int ret; - - IOLog("%s\n", __PRETTY_FUNCTION__); - - if (!super::start(provider)) { - return false; - } - - IOLog("Attempting host_vmxon\n"); - ret = host_vmxon(FALSE); - IOLog("host_vmxon: %d\n", ret); - - return true; + int ret; + + IOLog("%s\n", __PRETTY_FUNCTION__); + + if (!super::start(provider)) { + return false; + } + + IOLog("Attempting host_vmxon\n"); + ret = host_vmxon(FALSE); + IOLog("host_vmxon: %d\n", ret); + + return true; } void testvmx::stop( IOService * provider ) { - IOLog("%s\n", __PRETTY_FUNCTION__); - - super::stop(provider); - - IOLog("Attempting host_vmxoff\n"); - host_vmxoff(); - IOLog("host_vmxoff called\n"); + IOLog("%s\n", __PRETTY_FUNCTION__); + + super::stop(provider); + + IOLog("Attempting host_vmxoff\n"); + host_vmxoff(); + IOLog("host_vmxoff called\n"); } diff --git a/tools/tests/testkext/testvmx.h b/tools/tests/testkext/testvmx.h index f8937b248..769bcc509 100644 --- a/tools/tests/testkext/testvmx.h +++ b/tools/tests/testkext/testvmx.h @@ -8,10 +8,9 @@ #include class testvmx : public IOService { - OSDeclareDefaultStructors(testvmx); - - virtual bool start( IOService * provider ); - - virtual void stop( IOService * provider ); - + OSDeclareDefaultStructors(testvmx); + + virtual bool start( IOService * provider ); + + virtual void stop( IOService * provider ); }; diff --git a/tools/tests/zero-to-n/zero-to-n.c b/tools/tests/zero-to-n/zero-to-n.c index f31b58df5..39b7cd915 100644 --- a/tools/tests/zero-to-n/zero-to-n.c +++ b/tools/tests/zero-to-n/zero-to-n.c @@ -2,7 +2,7 @@ * Copyright (c) 2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ #include @@ -55,6 +55,8 @@ #include +#include + typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t; typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_FIXEDPRI } my_policy_type_t; @@ -62,16 +64,14 @@ typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0) #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0) -#define CONSTRAINT_NANOS (20000000ll) /* 20 ms */ -#define COMPUTATION_NANOS (10000000ll) /* 10 ms */ -#define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */ - -#define DEBUG 0 +#define CONSTRAINT_NANOS (20000000ll) /* 20 ms */ +#define COMPUTATION_NANOS (10000000ll) /* 10 ms */ +#define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */ #if DEBUG -#define debug_log(args...) printf(args) +#define debug_log(args ...) printf(args) #else -#define debug_log(args...) do { } while(0) +#define debug_log(args ...) do { } while(0) #endif /* Declarations */ @@ -88,6 +88,8 @@ static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0; /* Global variables (general) */ static uint32_t g_numcpus; +static uint32_t g_nphysicalcpu; +static uint32_t g_nlogicalcpu; static uint32_t g_numthreads; static wake_type_t g_waketype; static policy_t g_policy; @@ -120,6 +122,15 @@ static boolean_t g_do_all_spin = FALSE; /* Every thread backgrounds temporarily before parking */ static boolean_t g_drop_priority = FALSE; +/* Test whether realtime threads are scheduled on the separate CPUs */ +static boolean_t g_test_rt = FALSE; + +/* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */ +static boolean_t g_test_rt_smt = FALSE; + +/* Test whether realtime threads are successfully avoiding CPU 0 on Intel */ +static boolean_t g_test_rt_avoid0 = FALSE; + /* One randomly chosen thread holds up the train for a certain duration. */ static boolean_t g_do_one_long_spin = FALSE; static uint32_t g_one_long_spin_id = 0; @@ -140,6 +151,14 @@ static semaphore_t g_donesem; /* Global variables (chain) */ static semaphore_t *g_semarr; +typedef struct { + __attribute__((aligned(128))) uint32_t current; + uint32_t accum; +} histogram_t; + +static histogram_t *g_cpu_histogram; +static _Atomic uint64_t *g_cpu_map; + static uint64_t abs_to_nanos(uint64_t abstime) { @@ -156,9 +175,9 @@ inline static void yield(void) { #if defined(__arm__) || defined(__arm64__) - asm volatile("yield"); + asm volatile ("yield"); #elif defined(__x86_64__) || defined(__i386__) - asm volatile("pause"); + asm volatile ("pause"); #else #error Unrecognized architecture #endif @@ -175,7 +194,7 @@ churn_thread(__unused void *arg) */ while (g_churn_stop == FALSE && - mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) { + mach_absolute_time() < (g_starttime_abs + NSEC_PER_SEC)) { spin_count++; yield(); } @@ -189,8 +208,9 @@ churn_thread(__unused void *arg) static void create_churn_threads() { - if (g_churn_count == 0) + if (g_churn_count == 0) { g_churn_count = g_numcpus - 1; + } errno_t err; @@ -201,45 +221,53 @@ create_churn_threads() g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count); assert(g_churn_threads); - if ((err = pthread_attr_init(&attr))) + if ((err = pthread_attr_init(&attr))) { errc(EX_OSERR, err, "pthread_attr_init"); + } - if ((err = pthread_attr_setschedparam(&attr, ¶m))) + if ((err = pthread_attr_setschedparam(&attr, ¶m))) { errc(EX_OSERR, err, "pthread_attr_setschedparam"); + } - if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) + if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) { errc(EX_OSERR, err, "pthread_attr_setschedpolicy"); + } - for (uint32_t i = 0 ; i < g_churn_count ; i++) { + for (uint32_t i = 0; i < g_churn_count; i++) { pthread_t new_thread; - if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL))) + if ((err = pthread_create(&new_thread, &attr, churn_thread, NULL))) { errc(EX_OSERR, err, "pthread_create"); + } g_churn_threads[i] = new_thread; } - if ((err = pthread_attr_destroy(&attr))) + if ((err = pthread_attr_destroy(&attr))) { errc(EX_OSERR, err, "pthread_attr_destroy"); + } } static void join_churn_threads(void) { - if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) + if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) { printf("Warning: Some of the churn threads may have stopped early: %lld\n", - g_churn_stopped_at); + g_churn_stopped_at); + } atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst); /* Rejoin churn threads */ for (uint32_t i = 0; i < g_churn_count; i++) { errno_t err = pthread_join(g_churn_threads[i], NULL); - if (err) errc(EX_OSERR, err, "pthread_join %d", i); + if (err) { + errc(EX_OSERR, err, "pthread_join %d", i); + } } } /* - * Figure out what thread policy to use + * Figure out what thread policy to use */ static my_policy_type_t parse_thread_policy(const char *str) @@ -259,7 +287,7 @@ parse_thread_policy(const char *str) * Figure out what wakeup pattern to use */ static wake_type_t -parse_wakeup_pattern(const char *str) +parse_wakeup_pattern(const char *str) { if (strcmp(str, "chain") == 0) { return WAKE_CHAIN; @@ -286,34 +314,38 @@ thread_setup(uint32_t my_id) if (g_priority) { int policy = SCHED_OTHER; - if (g_policy == MY_POLICY_FIXEDPRI) + if (g_policy == MY_POLICY_FIXEDPRI) { policy = SCHED_RR; + } struct sched_param param = {.sched_priority = (int)g_priority}; - if ((ret = pthread_setschedparam(pthread_self(), policy, ¶m))) + if ((ret = pthread_setschedparam(pthread_self(), policy, ¶m))) { errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id); + } } switch (g_policy) { - case MY_POLICY_TIMESHARE: - break; - case MY_POLICY_REALTIME: - /* Hard-coded realtime parameters (similar to what Digi uses) */ - pol.period = 100000; - pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS); - pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS); - pol.preemptible = 0; /* Ignored by OS */ - - kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, - (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT); - mach_assert_zero_t(my_id, kr); - break; - case MY_POLICY_FIXEDPRI: - ret = pthread_set_fixedpriority_self(); - if (ret) errc(EX_OSERR, ret, "pthread_set_fixedpriority_self"); - break; - default: - errx(EX_USAGE, "invalid policy type %d", g_policy); + case MY_POLICY_TIMESHARE: + break; + case MY_POLICY_REALTIME: + /* Hard-coded realtime parameters (similar to what Digi uses) */ + pol.period = 100000; + pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS); + pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS); + pol.preemptible = 0; /* Ignored by OS */ + + kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY, + (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT); + mach_assert_zero_t(my_id, kr); + break; + case MY_POLICY_FIXEDPRI: + ret = pthread_set_fixedpriority_self(); + if (ret) { + errc(EX_OSERR, ret, "pthread_set_fixedpriority_self"); + } + break; + default: + errx(EX_USAGE, "invalid policy type %d", g_policy); } if (g_do_affinity) { @@ -322,7 +354,7 @@ thread_setup(uint32_t my_id) affinity.affinity_tag = my_id % 2; kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY, - (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT); + (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT); mach_assert_zero_t(my_id, kr); } @@ -352,8 +384,9 @@ worker_thread(void *arg) */ /* Give the worker threads undisturbed time to finish before waiting on them */ - if (g_do_sleep) + if (g_do_sleep) { usleep(g_iteration_sleeptime_us); + } debug_log("%d Leader thread wait for ready\n", i); @@ -363,13 +396,22 @@ worker_thread(void *arg) * TODO: Invent 'semaphore wait for N signals' */ - for (uint32_t j = 0 ; j < g_numthreads - 1; j++) { + for (uint32_t j = 0; j < g_numthreads - 1; j++) { kr = semaphore_wait(g_readysem); mach_assert_zero_t(my_id, kr); } debug_log("%d Leader thread wait\n", i); + if (i > 0) { + for (int cpuid = 0; cpuid < g_numcpus; cpuid++) { + if (g_cpu_histogram[cpuid].current == 1) { + atomic_fetch_or_explicit(&g_cpu_map[i - 1], (1UL << cpuid), memory_order_relaxed); + g_cpu_histogram[cpuid].current = 0; + } + } + } + /* Signal main thread and wait for start of iteration */ kr = semaphore_wait_signal(g_leadersem, g_main_sem); @@ -407,7 +449,7 @@ worker_thread(void *arg) * records when she wakes up, and possibly * wakes up a friend. */ - switch(g_waketype) { + switch (g_waketype) { case WAKE_BROADCAST_ONESEM: kr = semaphore_wait_signal(g_broadcastsem, g_readysem); mach_assert_zero_t(my_id, kr); @@ -457,7 +499,11 @@ worker_thread(void *arg) } } - debug_log("Thread %p woke up for iteration %d.\n", pthread_self(), i); + unsigned int cpuid = _os_cpu_number(); + assert(cpuid < g_numcpus); + debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid, i); + g_cpu_histogram[cpuid].current = 1; + g_cpu_histogram[cpuid].accum++; if (g_do_one_long_spin && g_one_long_spin_id == my_id) { /* One randomly chosen thread holds up the train for a while. */ @@ -487,7 +533,9 @@ worker_thread(void *arg) if (g_drop_priority) { /* Drop priority to BG momentarily */ errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG); - if (ret) errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG"); + if (ret) { + errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG"); + } } if (g_do_all_spin) { @@ -502,7 +550,9 @@ worker_thread(void *arg) if (g_drop_priority) { /* Restore normal priority */ errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0); - if (ret) errc(EX_OSERR, ret, "setpriority 0"); + if (ret) { + errc(EX_OSERR, ret, "setpriority 0"); + } } debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i); @@ -510,17 +560,25 @@ worker_thread(void *arg) if (my_id == 0) { /* Give the worker threads undisturbed time to finish before waiting on them */ - if (g_do_sleep) + if (g_do_sleep) { usleep(g_iteration_sleeptime_us); + } /* Wait for the worker threads to finish */ - for (uint32_t i = 0 ; i < g_numthreads - 1; i++) { + for (uint32_t i = 0; i < g_numthreads - 1; i++) { kr = semaphore_wait(g_readysem); mach_assert_zero_t(my_id, kr); } /* Tell everyone and the main thread that the last iteration is done */ - debug_log("%d Leader thread done\n", i); + debug_log("%d Leader thread done\n", g_iterations - 1); + + for (int cpuid = 0; cpuid < g_numcpus; cpuid++) { + if (g_cpu_histogram[cpuid].current == 1) { + atomic_fetch_or_explicit(&g_cpu_map[g_iterations - 1], (1UL << cpuid), memory_order_relaxed); + g_cpu_histogram[cpuid].current = 0; + } + } kr = semaphore_signal_all(g_main_sem); mach_assert_zero_t(my_id, kr); @@ -543,8 +601,8 @@ compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t _sum = 0; uint64_t _max = 0; uint64_t _min = UINT64_MAX; - float _avg = 0; - float _dev = 0; + float _avg = 0; + float _dev = 0; for (i = 0; i < count; i++) { _sum += values[i]; @@ -553,12 +611,12 @@ compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, } _avg = ((float)_sum) / ((float)count); - + _dev = 0; for (i = 0; i < count; i++) { _dev += powf((((float)values[i]) - _avg), 2); } - + _dev /= count; _dev = sqrtf(_dev); @@ -574,18 +632,23 @@ main(int argc, char **argv) errno_t ret; kern_return_t kr; - pthread_t *threads; - uint64_t *worst_latencies_ns; - uint64_t *worst_latencies_from_first_ns; - uint64_t max, min; - float avg, stddev; + pthread_t *threads; + uint64_t *worst_latencies_ns; + uint64_t *worst_latencies_from_first_ns; + uint64_t max, min; + float avg, stddev; - for (int i = 0; i < argc; i++) - if (strcmp(argv[i], "--switched_apptype") == 0) + bool test_fail = false; + + for (int i = 0; i < argc; i++) { + if (strcmp(argv[i], "--switched_apptype") == 0) { g_seen_apptype = TRUE; + } + } - if (!g_seen_apptype) + if (!g_seen_apptype) { selfexec_with_apptype(argc, argv); + } parse_args(argc, argv); @@ -595,10 +658,63 @@ main(int argc, char **argv) size_t ncpu_size = sizeof(g_numcpus); ret = sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0); - if (ret) err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)"); + if (ret) { + err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)"); + } + assert(g_numcpus <= 64); /* g_cpu_map needs to be extended for > 64 cpus */ + + size_t physicalcpu_size = sizeof(g_nphysicalcpu); + ret = sysctlbyname("hw.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0); + if (ret) { + err(EX_OSERR, "Failed sysctlbyname(hw.physicalcpu)"); + } - if (g_do_each_spin) + size_t logicalcpu_size = sizeof(g_nlogicalcpu); + ret = sysctlbyname("hw.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0); + if (ret) { + err(EX_OSERR, "Failed sysctlbyname(hw.logicalcpu)"); + } + + if (g_test_rt) { + if (g_numthreads == 0) { + g_numthreads = g_numcpus; + } + g_policy = MY_POLICY_REALTIME; + g_do_all_spin = TRUE; + } else if (g_test_rt_smt) { + if (g_nlogicalcpu != 2 * g_nphysicalcpu) { + /* Not SMT */ + printf("Attempt to run --test-rt-smt on a non-SMT device\n"); + exit(0); + } + + if (g_numthreads == 0) { + g_numthreads = g_nphysicalcpu; + } + g_policy = MY_POLICY_REALTIME; + g_do_all_spin = TRUE; + } else if (g_test_rt_avoid0) { +#if defined(__x86_64__) || defined(__i386__) + if (g_numthreads == 0) { + g_numthreads = g_nphysicalcpu - 1; + } + if (g_numthreads == 0) { + printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n"); + exit(0); + } + g_policy = MY_POLICY_REALTIME; + g_do_all_spin = TRUE; +#else + printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n"); + exit(0); +#endif + } else if (g_numthreads == 0) { + g_numthreads = g_numcpus; + } + + if (g_do_each_spin) { g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns); + } /* Configure the long-spin thread to take up half of its computation */ if (g_do_one_long_spin) { @@ -610,10 +726,12 @@ main(int argc, char **argv) g_iteration_sleeptime_us = g_numthreads * 20; uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1; - if (g_do_each_spin) + if (g_do_each_spin) { g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC); - if (g_do_one_long_spin) + } + if (g_do_one_long_spin) { g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC; + } /* Arrays for threads and their wakeup times */ threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads); @@ -626,7 +744,9 @@ main(int argc, char **argv) /* Ensure the allocation is pre-faulted */ ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size); - if (ret) errc(EX_OSERR, ret, "memset_s endtimes"); + if (ret) { + errc(EX_OSERR, ret, "memset_s endtimes"); + } size_t latencies_size = sizeof(uint64_t) * g_iterations; @@ -635,14 +755,36 @@ main(int argc, char **argv) /* Ensure the allocation is pre-faulted */ ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size); - if (ret) errc(EX_OSERR, ret, "memset_s latencies"); + if (ret) { + errc(EX_OSERR, ret, "memset_s latencies"); + } worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size); assert(worst_latencies_from_first_ns); /* Ensure the allocation is pre-faulted */ ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size); - if (ret) errc(EX_OSERR, ret, "memset_s latencies_from_first"); + if (ret) { + errc(EX_OSERR, ret, "memset_s latencies_from_first"); + } + + size_t histogram_size = sizeof(histogram_t) * g_numcpus; + g_cpu_histogram = (histogram_t *)valloc(histogram_size); + assert(g_cpu_histogram); + /* Ensure the allocation is pre-faulted */ + ret = memset_s(g_cpu_histogram, histogram_size, 0, histogram_size); + if (ret) { + errc(EX_OSERR, ret, "memset_s g_cpu_histogram"); + } + + size_t map_size = sizeof(uint64_t) * g_iterations; + g_cpu_map = (_Atomic uint64_t *)valloc(map_size); + assert(g_cpu_map); + /* Ensure the allocation is pre-faulted */ + ret = memset_s(g_cpu_map, map_size, 0, map_size); + if (ret) { + errc(EX_OSERR, ret, "memset_s g_cpu_map"); + } kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0); mach_assert_zero(kr); @@ -651,7 +793,6 @@ main(int argc, char **argv) if (g_waketype == WAKE_CHAIN || g_waketype == WAKE_BROADCAST_PERTHREAD || g_waketype == WAKE_HOP) { - g_semarr = valloc(sizeof(semaphore_t) * g_numthreads); assert(g_semarr); @@ -681,34 +822,41 @@ main(int argc, char **argv) /* Create the threads */ for (uint32_t i = 0; i < g_numthreads; i++) { ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i); - if (ret) errc(EX_OSERR, ret, "pthread_create %d", i); + if (ret) { + errc(EX_OSERR, ret, "pthread_create %d", i); + } } ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL); - if (ret) errc(EX_OSERR, ret, "setpriority"); + if (ret) { + errc(EX_OSERR, ret, "setpriority"); + } thread_setup(0); g_starttime_abs = mach_absolute_time(); - if (g_churn_pri) + if (g_churn_pri) { create_churn_threads(); + } /* Let everyone get settled */ kr = semaphore_wait(g_main_sem); mach_assert_zero(kr); /* Give the system a bit more time to settle */ - if (g_do_sleep) + if (g_do_sleep) { usleep(g_iteration_sleeptime_us); + } /* Go! */ for (uint32_t i = 0; i < g_iterations; i++) { uint32_t j; uint64_t worst_abs = 0, best_abs = UINT64_MAX; - if (g_do_one_long_spin) + if (g_do_one_long_spin) { g_one_long_spin_id = (uint32_t)rand() % g_numthreads; + } debug_log("%d Main thread reset\n", i); @@ -734,13 +882,13 @@ main(int argc, char **argv) latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs; worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs; } - + worst_latencies_ns[i] = abs_to_nanos(worst_abs); worst_abs = 0; for (j = 1; j < g_numthreads; j++) { uint64_t latency_abs; - + latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0]; worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs; best_abs = best_abs > latency_abs ? latency_abs : best_abs; @@ -755,23 +903,28 @@ main(int argc, char **argv) /* Ariadne's ad-hoc test signpost */ kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latencies_from_first_ns[i], g_traceworthy_latency_ns, 0, 0); - if (g_verbose) + if (g_verbose) { printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0); + } } /* Give the system a bit more time to settle */ - if (g_do_sleep) + if (g_do_sleep) { usleep(g_iteration_sleeptime_us); + } } /* Rejoin threads */ for (uint32_t i = 0; i < g_numthreads; i++) { ret = pthread_join(threads[i], NULL); - if (ret) errc(EX_OSERR, ret, "pthread_join %d", i); + if (ret) { + errc(EX_OSERR, ret, "pthread_join %d", i); + } } - if (g_churn_pri) + if (g_churn_pri) { join_churn_threads(); + } compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev); printf("Results (from a stop):\n"); @@ -795,12 +948,55 @@ main(int argc, char **argv) } #endif + if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) { + putchar('\n'); + + for (uint32_t i = 0; i < g_numcpus; i++) { + printf("%d\t%d\n", i, g_cpu_histogram[i].accum); + } + +#define PRIMARY 0x5555555555555555ULL +#define SECONDARY 0xaaaaaaaaaaaaaaaaULL + + int fail_count = 0; + + for (uint32_t i = 0; i < g_iterations; i++) { + bool secondary = false; + bool fail = false; + uint64_t map = g_cpu_map[i]; + if (g_test_rt_smt) { + /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */ + secondary = (map & SECONDARY); + /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */ + fail = ((map & PRIMARY) & ((map & SECONDARY) >> 1)); + } else if (g_test_rt) { + fail = __builtin_popcountll(map) != g_numthreads; + } else if (g_test_rt_avoid0) { + fail = ((map & 0x1) == 0x1); + } + if (secondary || fail) { + printf("Iteration %d: 0x%llx%s%s\n", i, map, + secondary ? " SECONDARY" : "", + fail ? " FAIL" : ""); + } + test_fail |= fail; + fail_count += fail; + } + + if (test_fail && (g_iterations >= 100) && (fail_count <= g_iterations / 100)) { + printf("99%% or better success rate\n"); + test_fail = 0; + } + } + free(threads); free(g_thread_endtimes_abs); free(worst_latencies_ns); free(worst_latencies_from_first_ns); + free(g_cpu_histogram); + free(g_cpu_map); - return 0; + return test_fail; } /* @@ -820,26 +1016,36 @@ selfexec_with_apptype(int argc, char *argv[]) uint32_t prog_size = PATH_MAX; ret = _NSGetExecutablePath(prog, &prog_size); - if (ret) err(EX_OSERR, "_NSGetExecutablePath"); + if (ret) { + err(EX_OSERR, "_NSGetExecutablePath"); + } - for (i=0; i < argc; i++) { + for (i = 0; i < argc; i++) { new_argv[i] = argv[i]; } new_argv[i] = "--switched_apptype"; - new_argv[i+1] = NULL; + new_argv[i + 1] = NULL; ret = posix_spawnattr_init(&attr); - if (ret) errc(EX_OSERR, ret, "posix_spawnattr_init"); + if (ret) { + errc(EX_OSERR, ret, "posix_spawnattr_init"); + } ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC); - if (ret) errc(EX_OSERR, ret, "posix_spawnattr_setflags"); + if (ret) { + errc(EX_OSERR, ret, "posix_spawnattr_setflags"); + } ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT); - if (ret) errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np"); + if (ret) { + errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np"); + } ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ); - if (ret) errc(EX_OSERR, ret, "posix_spawn"); + if (ret) { + errc(EX_OSERR, ret, "posix_spawn"); + } } /* @@ -849,11 +1055,11 @@ static void __attribute__((noreturn)) usage() { errx(EX_USAGE, "Usage: %s " - " \n\t\t" - "[--trace ] " - "[--verbose] [--spin-one] [--spin-all] [--spin-time ] [--affinity]\n\t\t" - "[--no-sleep] [--drop-priority] [--churn-pri ] [--churn-count ]", - getprogname()); + " \n\t\t" + "[--trace ] " + "[--verbose] [--spin-one] [--spin-all] [--spin-time ] [--affinity]\n\t\t" + "[--no-sleep] [--drop-priority] [--churn-pri ] [--churn-count ]", + getprogname()); } static struct option* g_longopts; @@ -867,9 +1073,10 @@ read_dec_arg() uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10); - if (cp == optarg || *cp) + if (cp == optarg || *cp) { errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"", - g_longopts[option_index].name, optarg); + g_longopts[option_index].name, optarg); + } return arg_val; } @@ -887,6 +1094,7 @@ parse_args(int argc, char *argv[]) }; static struct option longopts[] = { + /* BEGIN IGNORE CODESTYLE */ { "spin-time", required_argument, NULL, OPT_SPIN_TIME }, { "trace", required_argument, NULL, OPT_TRACE }, { "priority", required_argument, NULL, OPT_PRIORITY }, @@ -898,9 +1106,13 @@ parse_args(int argc, char *argv[]) { "affinity", no_argument, (int*)&g_do_affinity, TRUE }, { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE }, { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE }, + { "test-rt", no_argument, (int*)&g_test_rt, TRUE }, + { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE }, + { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE }, { "verbose", no_argument, (int*)&g_verbose, TRUE }, { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } + /* END IGNORE CODESTYLE */ }; g_longopts = longopts; @@ -958,11 +1170,9 @@ parse_args(int argc, char *argv[]) /* How many threads? */ g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10); - if (cp == argv[0] || *cp) + if (cp == argv[0] || *cp) { errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]); - - if (g_numthreads < 1) - errx(EX_USAGE, "Must use at least one thread"); + } /* What wakeup pattern? */ g_waketype = parse_wakeup_pattern(argv[1]); @@ -973,17 +1183,19 @@ parse_args(int argc, char *argv[]) /* Iterations */ g_iterations = (uint32_t)strtoull(argv[3], &cp, 10); - if (cp == argv[3] || *cp) + if (cp == argv[3] || *cp) { errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]); + } - if (g_iterations < 1) + if (g_iterations < 1) { errx(EX_USAGE, "Must have at least one iteration"); + } - if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) + if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) { errx(EX_USAGE, "chain mode requires more than one thread"); + } - if (g_numthreads == 1 && g_waketype == WAKE_HOP) + if (g_numthreads == 1 && g_waketype == WAKE_HOP) { errx(EX_USAGE, "hop mode requires more than one thread"); + } } - - -- 2.45.2